summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-max961117
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-824
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-timer-stm3271
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci22
-rw-r--r--Documentation/ABI/testing/sysfs-class-switchtec96
-rw-r--r--Documentation/ABI/testing/sysfs-class-typec276
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu7
-rw-r--r--Documentation/ABI/testing/sysfs-platform-chipidea-usb29
-rw-r--r--Documentation/ABI/testing/sysfs-platform-renesas_usb315
-rw-r--r--Documentation/DocBook/rapidio.tmpl3
-rw-r--r--Documentation/PCI/00-INDEX10
-rw-r--r--Documentation/PCI/endpoint/function/binding/pci-test.txt17
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint-cfs.txt105
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint.txt215
-rw-r--r--Documentation/PCI/endpoint/pci-test-function.txt66
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.txt179
-rw-r--r--Documentation/PCI/pci-iov-howto.txt12
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html233
-rw-r--r--Documentation/RCU/Design/Data-Structures/nxtlist.svg34
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html47
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html195
-rw-r--r--Documentation/RCU/rcu_dereference.txt9
-rw-r--r--Documentation/RCU/rculist_nulls.txt6
-rw-r--r--Documentation/RCU/stallwarn.txt190
-rw-r--r--Documentation/RCU/whatisRCU.txt32
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt23
-rw-r--r--Documentation/arm/stm32/stm32h743-overview.txt30
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/cavium-thunder2.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/gemini.txt86
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/i2se.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.txt31
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/sprd.txt13
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra186-pmc.txt34
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-flowctrl.txt8
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt45
-rw-r--r--Documentation/devicetree/bindings/chosen.txt45
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-core-clock.txt7
-rw-r--r--Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt11
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/devfreq/exynos-bus.txt46
-rw-r--r--Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt2
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt2
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt13
-rw-r--r--Documentation/devicetree/bindings/firmware/coreboot.txt33
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-pr-ip.txt12
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt3
-rw-r--r--Documentation/devicetree/bindings/fpga/lattice-ice40-fpga-mgr.txt21
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt44
-rw-r--r--Documentation/devicetree/bindings/gpio/cortina,gemini-gpio.txt24
-rw-r--r--Documentation/devicetree/bindings/gpio/faraday,ftgpio010.txt27
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-aspeed.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt32
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-thunderx.txt27
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-xra1403.txt46
-rw-r--r--Documentation/devicetree/bindings/gpio/moxa,moxart-gpio.txt19
-rw-r--r--Documentation/devicetree/bindings/gpio/ni,169445-nand-gpio.txt38
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt8
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt25
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adxl345.txt38
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt18
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ltc2497.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/max1118.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/adc/max9611.txt27
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,pm8xxx-xoadc.txt76
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ltc2632.txt23
-rw-r--r--Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt61
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30102.txt30
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt27
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/light/vl6180.txt15
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt28
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt36
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt28
-rw-r--r--Documentation/devicetree/bindings/media/atmel-isi.txt91
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2640.txt23
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5645.txt54
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5647.txt35
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov7670.txt43
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt37
-rw-r--r--Documentation/devicetree/bindings/media/s5p-cec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/s5p-mfc.txt2
-rw-r--r--Documentation/devicetree/bindings/media/stih-cec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/ti,da850-vpif.txt50
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt1
-rw-r--r--Documentation/devicetree/bindings/net/faraday,ftmac.txt24
-rw-r--r--Documentation/devicetree/bindings/net/marvell,prestera.txt13
-rw-r--r--Documentation/devicetree/bindings/net/moxa,moxart-mac.txt21
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt6
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-iim.txt22
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt26
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt129
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt14
-rw-r--r--Documentation/devicetree/bindings/pci/ti-pci.txt42
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt93
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt106
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.txt85
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt71
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-pwm.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt45
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mediatek.txt34
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt47
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.txt14
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.txt5
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt57
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt (renamed from Documentation/devicetree/bindings/i2c/trivial-devices.txt)0
-rw-r--r--Documentation/devicetree/bindings/usb/da8xx-usb.txt41
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-orion.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/generic.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt7
-rw-r--r--Documentation/devicetree/bindings/watchdog/cortina,gemini-watchdog.txt17
-rw-r--r--Documentation/driver-api/vme.rst363
-rw-r--r--Documentation/driver-model/devres.txt6
-rw-r--r--Documentation/extcon/porting-android-switch-class123
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt2
-rw-r--r--Documentation/filesystems/overlayfs.txt9
-rw-r--r--Documentation/filesystems/sysfs-pci.txt15
-rw-r--r--Documentation/filesystems/vfs.txt3
-rw-r--r--Documentation/gpio/consumer.txt6
-rw-r--r--Documentation/gpu/introduction.rst11
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/kdump/kdump.txt16
-rw-r--r--Documentation/kref.txt1
-rw-r--r--Documentation/media/kapi/cec-core.rst12
-rw-r--r--Documentation/media/kapi/csi2.rst9
-rw-r--r--Documentation/media/kapi/v4l2-core.rst2
-rw-r--r--Documentation/media/lirc.h.rst.exceptions1
-rw-r--r--Documentation/media/uapi/cec/cec-func-ioctl.rst2
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst2
-rw-r--r--Documentation/media/uapi/cec/cec-func-poll.rst4
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst13
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst13
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst13
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-g-mode.rst12
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst55
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst3
-rw-r--r--Documentation/media/uapi/rc/lirc-dev-intro.rst53
-rw-r--r--Documentation/media/uapi/rc/lirc-get-features.rst13
-rw-r--r--Documentation/media/uapi/rc/lirc-get-length.rst3
-rw-r--r--Documentation/media/uapi/rc/lirc-get-rec-mode.rst4
-rw-r--r--Documentation/media/uapi/rc/lirc-get-send-mode.rst7
-rw-r--r--Documentation/media/uapi/rc/lirc-read.rst16
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst2
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst2
-rw-r--r--Documentation/media/uapi/rc/lirc-write.rst17
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst122
-rw-r--r--Documentation/media/uapi/v4l/depth-formats.rst1
-rw-r--r--Documentation/media/uapi/v4l/dev-capture.rst4
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst58
-rw-r--r--Documentation/media/uapi/v4l/dev-output.rst4
-rw-r--r--Documentation/media/uapi/v4l/devices.rst1
-rw-r--r--Documentation/media/uapi/v4l/meta-formats.rst16
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-007.rst13
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-inzi.rst81
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst168
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst120
-rw-r--r--Documentation/media/uapi/v4l/pixfmt.rst1
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst240
-rw-r--r--Documentation/media/uapi/v4l/video.rst7
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enuminput.rst11
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumoutput.rst15
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst16
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querycap.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst17
-rw-r--r--Documentation/media/v4l-drivers/philips.rst4
-rw-r--r--Documentation/media/v4l-drivers/vivid.rst8
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions3
-rw-r--r--Documentation/memory-barriers.txt2
-rw-r--r--Documentation/misc-devices/pci-endpoint-test.txt35
-rw-r--r--Documentation/perf/qcom_l3_pmu.txt25
-rw-r--r--Documentation/powerpc/cxl.txt15
-rw-r--r--Documentation/powerpc/cxlflash.txt5
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.txt57
-rw-r--r--Documentation/process/changes.rst4
-rw-r--r--Documentation/scsi/scsi_eh.txt30
-rw-r--r--Documentation/switchtec.txt80
-rw-r--r--Documentation/tee.txt118
-rw-r--r--Documentation/usb/typec.rst184
-rw-r--r--Documentation/virtual/kvm/api.txt367
-rw-r--r--Documentation/virtual/kvm/arm/hyp-abi.txt53
-rw-r--r--Documentation/virtual/kvm/devices/s390_flic.txt41
-rw-r--r--Documentation/virtual/kvm/devices/vfio.txt18
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt3
-rw-r--r--Documentation/virtual/kvm/hypercalls.txt5
-rw-r--r--Documentation/vm/transhuge.txt10
-rw-r--r--Documentation/w1/slaves/00-INDEX4
-rw-r--r--Documentation/w1/slaves/w1_ds241350
-rw-r--r--Documentation/w1/slaves/w1_ds243863
-rw-r--r--MAINTAINERS164
-rw-r--r--Makefile8
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi24
-rw-r--r--arch/arc/include/asm/cache.h6
-rw-r--r--arch/arc/include/asm/mmu.h4
-rw-r--r--arch/arc/include/asm/pgtable.h6
-rw-r--r--arch/arc/include/uapi/asm/elf.h1
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h5
-rw-r--r--arch/arc/kernel/ptrace.c62
-rw-r--r--arch/arc/kernel/unwind.c2
-rw-r--r--arch/arc/mm/cache.c111
-rw-r--r--arch/arm/Kconfig34
-rw-r--r--arch/arm/boot/compressed/head.S12
-rw-r--r--arch/arm/boot/dts/Makefile73
-rw-r--r--arch/arm/boot/dts/alpine.dtsi20
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir2110.dts1
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir3220.dts1
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir5221.dts1
-rw-r--r--arch/arm/boot/dts/am335x-baltos-leds.dtsi50
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts11
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts154
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi87
-rw-r--r--arch/arm/boot/dts/am3517.dtsi12
-rw-r--r--arch/arm/boot/dts/am4372.dtsi7
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts15
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi24
-rw-r--r--arch/arm/boot/dts/armada-385-linksys-shelby.dts114
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi18
-rw-r--r--arch/arm/boot/dts/armada-385-synology-ds116.dts321
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi20
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts38
-rw-r--r--arch/arm/boot/dts/armada-388.dtsi9
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi49
-rw-r--r--arch/arm/boot/dts/armada-xp-98dx3236.dtsi211
-rw-r--r--arch/arm/boot/dts/armada-xp-98dx3336.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-98dx4251.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-xp-db-dxbc2.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-db-xc3-24g4xg.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-linksys-mamba.dts18
-rw-r--r--arch/arm/boot/dts/aspeed-ast2500-evb.dts22
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts18
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts36
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi109
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi156
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts5
-rw-r--r--arch/arm/boot/dts/at91-tse850-3.dts29
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi2
-rw-r--r--arch/arm/boot/dts/axp209.dtsi5
-rw-r--r--arch/arm/boot/dts/axp22x.dtsi5
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi36
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi60
-rw-r--r--arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts17
-rw-r--r--arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts14
-rw-r--r--arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts5
-rw-r--r--arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts41
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6250.dts3
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts15
-rw-r--r--arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts10
-rw-r--r--arch/arm/boot/dts/bcm4708.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts14
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts16
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts12
-rw-r--r--arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts98
-rw-r--r--arch/arm/boot/dts/bcm47081.dtsi20
-rw-r--r--arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts14
-rw-r--r--arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts8
-rw-r--r--arch/arm/boot/dts/bcm4709-linksys-ea9200.dts42
-rw-r--r--arch/arm/boot/dts/bcm4709-netgear-r7000.dts19
-rw-r--r--arch/arm/boot/dts/bcm4709-netgear-r8000.dts41
-rw-r--r--arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts8
-rw-r--r--arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts18
-rw-r--r--arch/arm/boot/dts/bcm47094-linksys-panamera.dts36
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts8
-rw-r--r--arch/arm/boot/dts/bcm47094-netgear-r8500.dts6
-rw-r--r--arch/arm/boot/dts/bcm47189-tenda-ac9.dts39
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi79
-rw-r--r--arch/arm/boot/dts/bcm53573.dtsi10
-rw-r--r--arch/arm/boot/dts/bcm94708.dts8
-rw-r--r--arch/arm/boot/dts/bcm94709.dts8
-rw-r--r--arch/arm/boot/dts/bcm953012er.dts8
-rw-r--r--arch/arm/boot/dts/bcm953012hr.dts97
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts62
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts10
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts10
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts10
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts10
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts10
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts8
-rw-r--r--arch/arm/boot/dts/bcm958625k.dts8
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts10
-rw-r--r--arch/arm/boot/dts/da850-evm.dts31
-rw-r--r--arch/arm/boot/dts/da850-lego-ev3.dts59
-rw-r--r--arch/arm/boot/dts/da850.dtsi31
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts10
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi7
-rw-r--r--arch/arm/boot/dts/dra7.dtsi41
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi46
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts2
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi40
-rw-r--r--arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-prime.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts2
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi75
-rw-r--r--arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi50
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi32
-rw-r--r--arch/arm/boot/dts/exynos5800.dtsi56
-rw-r--r--arch/arm/boot/dts/gemini-nas4220b.dts102
-rw-r--r--arch/arm/boot/dts/gemini-rut1xx.dts65
-rw-r--r--arch/arm/boot/dts/gemini-sq201.dts118
-rw-r--r--arch/arm/boot/dts/gemini-wbd111.dts102
-rw-r--r--arch/arm/boot/dts/gemini-wbd222.dts102
-rw-r--r--arch/arm/boot/dts/gemini.dtsi156
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts2
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts2
-rw-r--r--arch/arm/boot/dts/imx25-pinfunc.h5
-rw-r--r--arch/arm/boot/dts/imx25.dtsi12
-rw-r--r--arch/arm/boot/dts/imx28-duckbill-2-485.dts189
-rw-r--r--arch/arm/boot/dts/imx28-duckbill-2-enocean.dts220
-rw-r--r--arch/arm/boot/dts/imx28-duckbill-2-spi.dts199
-rw-r--r--arch/arm/boot/dts/imx28-duckbill-2.dts183
-rw-r--r--arch/arm/boot/dts/imx28-duckbill.dts81
-rw-r--r--arch/arm/boot/dts/imx28-m28cu3.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi28
-rw-r--r--arch/arm/boot/dts/imx50.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-gw5903.dts55
-rw-r--r--arch/arm/boot/dts/imx6dl-gw5904.dts55
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts7
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts7
-rw-r--r--arch/arm/boot/dts/imx6q-b850v3.dts70
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi16
-rw-r--r--arch/arm/boot/dts/imx6q-cm-fx6.dts83
-rw-r--r--arch/arm/boot/dts/imx6q-gw5903.dts55
-rw-r--r--arch/arm/boot/dts/imx6q-gw5904.dts59
-rw-r--r--arch/arm/boot/dts/imx6q-icore-ofcap10.dts76
-rw-r--r--arch/arm/boot/dts/imx6q-icore-ofcap12.dts76
-rw-r--r--arch/arm/boot/dts/imx6q-icore.dts34
-rw-r--r--arch/arm/boot/dts/imx6q-utilite-pro.dts10
-rw-r--r--arch/arm/boot/dts/imx6q-zii-rdu2.dts50
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5903.dtsi654
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5904.dtsi641
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore.dtsi19
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi932
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qp-nitrogen6_som2.dts55
-rw-r--r--arch/arm/boot/dts/imx6qp-sabresd.dts4
-rw-r--r--arch/arm/boot/dts/imx6qp-zii-rdu2.dts50
-rw-r--r--arch/arm/boot/dts/imx6qp.dtsi99
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi21
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts5
-rw-r--r--arch/arm/boot/dts/imx6ul-geam.dtsi45
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot-common.dtsi141
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot-emmc.dts1
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot-nand.dts1
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot.dtsi73
-rw-r--r--arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi55
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi45
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-eval-v3.dts1
-rw-r--r--arch/arm/boot/dts/imx7d-sdb-sht11.dts74
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi7
l---------arch/arm/boot/dts/include/arm1
l---------arch/arm/boot/dts/include/arm641
-rw-r--r--arch/arm/boot/dts/meson8.dtsi2
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi2
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi243
-rw-r--r--arch/arm/boot/dts/moxart-uc7112lx.dts2
-rw-r--r--arch/arm/boot/dts/moxart.dtsi21
-rw-r--r--arch/arm/boot/dts/omap3-cpu-thermal.dtsi20
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi52
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts23
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi32
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi8
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi8
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts375
-rw-r--r--arch/arm/boot/dts/omap443x.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4460.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts6
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts8
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi306
-rw-r--r--arch/arm/boot/dts/r7s72100-genmai.dts8
-rw-r--r--arch/arm/boot/dts/r7s72100-rskrza1.dts8
-rw-r--r--arch/arm/boot/dts/r7s72100.dtsi76
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi19
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi29
-rw-r--r--arch/arm/boot/dts/r8a7745.dtsi29
-rw-r--r--arch/arm/boot/dts/r8a7778-bockw.dts1
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen.dts1
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi28
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts4
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts5
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi25
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi25
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts1
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi25
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts3
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts3
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi30
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi4
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts12
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-rdk.dts298
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-som.dtsi497
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-som.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts62
-rw-r--r--arch/arm/boot/dts/rk3288-tinker.dts536
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi8
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi16
-rw-r--r--arch/arm/boot/dts/s3c64xx.dtsi3
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi5
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi56
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi51
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk.dtsi7
-rw-r--r--arch/arm/boot/dts/socfpga_arria5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts1
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socrates.dts8
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sodia.dts23
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_vt.dts2
-rw-r--r--arch/arm/boot/dts/spear600-evb.dts148
-rw-r--r--arch/arm/boot/dts/spear600.dtsi28
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi30
-rw-r--r--arch/arm/boot/dts/stih410.dtsi13
-rw-r--r--arch/arm/boot/dts/stm32429i-eval.dts28
-rw-r--r--arch/arm/boot/dts/stm32746g-eval.dts4
-rw-r--r--arch/arm/boot/dts/stm32f429-disco.dts16
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi37
-rw-r--r--arch/arm/boot/dts/stm32f469-disco.dts16
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi96
-rw-r--r--arch/arm/boot/dts/stm32h743-pinctrl.dtsi156
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi83
-rw-r--r--arch/arm/boot/dts/stm32h743i-eval.dts74
-rw-r--r--arch/arm/boot/dts/sun4i-a10-a1000.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-cubieboard.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-hackberry.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet1.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-marsboard.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-mini-xplus.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-mk802.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pcduino.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi40
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts5
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts3
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi77
-rw-r--r--arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13-licheepi-one.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts1
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi140
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-chip-pro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-gr8.dtsi618
-rw-r--r--arch/arm/boot/dts/sun5i-r8-chip.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-r8.dtsi40
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi284
-rw-r--r--arch/arm/boot/dts/sun6i-a31-app4-evb1.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-colombus.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-i7.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-m9.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-cs908.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sina31s.dts23
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts57
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubieboard2.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubietruck.dts9
-rw-r--r--arch/arm/boot/dts/sun7i-a20-hummingbird.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-icnova-swac.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-m3.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-mk808c.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts36
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi46
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-evb.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-a23-q8-tablet.dts10
-rw-r--r--arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts23
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi155
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts21
-rw-r--r--arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts11
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts96
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi.dtsi1
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-2.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts23
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi602
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts1
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts1
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi2
-rw-r--r--arch/arm/boot/dts/sunxi-common-regulators.dtsi1
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi601
-rw-r--r--arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi1
-rw-r--r--arch/arm/boot/dts/uniphier-ld4-ref.dts10
-rw-r--r--arch/arm/boot/dts/uniphier-ld4.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-ld6b-ref.dts10
-rw-r--r--arch/arm/boot/dts/uniphier-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ace.dts11
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ref.dts10
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-sanji.dts11
-rw-r--r--arch/arm/boot/dts/uniphier-pro4.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-gentil.dts11
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-vodka.dts10
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-ref-daughter.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-sld3-ref.dts12
-rw-r--r--arch/arm/boot/dts/uniphier-sld3.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-sld8-ref.dts10
-rw-r--r--arch/arm/boot/dts/uniphier-sld8.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-support-card.dtsi5
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi24
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi24
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts18
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts2
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts14
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-c.dts77
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev.dtsi12
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/icst.c105
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig112
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig111
-rw-r--r--arch/arm/configs/bcm2835_defconfig5
-rw-r--r--arch/arm/configs/davinci_all_defconfig11
-rw-r--r--arch/arm/configs/dram_0xd0000000.config1
-rw-r--r--arch/arm/configs/exynos_defconfig5
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig5
-rw-r--r--arch/arm/configs/multi_v7_defconfig21
-rw-r--r--arch/arm/configs/omap2plus_defconfig24
-rw-r--r--arch/arm/configs/qcom_defconfig7
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig4
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/include/asm/cacheflush.h20
-rw-r--r--arch/arm/include/asm/cpufeature.h38
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dma-mapping.h12
-rw-r--r--arch/arm/include/asm/fixmap.h2
-rw-r--r--arch/arm/include/asm/hardware/icst.h59
-rw-r--r--arch/arm/include/asm/io.h10
-rw-r--r--arch/arm/include/asm/kvm_asm.h7
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmu.h1
-rw-r--r--arch/arm/include/asm/module.h9
-rw-r--r--arch/arm/include/asm/pci.h3
-rw-r--r--arch/arm/include/asm/proc-fns.h4
-rw-r--r--arch/arm/include/asm/set_memory.h32
-rw-r--r--arch/arm/include/asm/virt.h14
-rw-r--r--arch/arm/include/debug/brcmstb.S18
-rw-r--r--arch/arm/include/uapi/asm/kvm.h4
-rw-r--r--arch/arm/kernel/bios32.c19
-rw-r--r--arch/arm/kernel/ftrace.c12
-rw-r--r--arch/arm/kernel/hyp-stub.S43
-rw-r--r--arch/arm/kernel/kgdb.c2
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/kernel/module-plts.c87
-rw-r--r--arch/arm/kernel/module.lds1
-rw-r--r--arch/arm/kernel/reboot.c7
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kvm/arm.c69
-rw-r--r--arch/arm/kvm/coproc.c24
-rw-r--r--arch/arm/kvm/coproc.h18
-rw-r--r--arch/arm/kvm/handle_exit.c8
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S28
-rw-r--r--arch/arm/kvm/init.S51
-rw-r--r--arch/arm/kvm/interrupts.S4
-rw-r--r--arch/arm/kvm/mmu.c36
-rw-r--r--arch/arm/kvm/psci.c8
-rw-r--r--arch/arm/mach-at91/Makefile34
-rw-r--r--arch/arm/mach-at91/at91rm9200.c15
-rw-r--r--arch/arm/mach-at91/at91sam9.c91
-rw-r--r--arch/arm/mach-at91/generic.h8
-rw-r--r--arch/arm/mach-at91/pm.c199
-rw-r--r--arch/arm/mach-at91/pm.h24
-rw-r--r--arch/arm/mach-at91/pm_data-offsets.c13
-rw-r--r--arch/arm/mach-at91/pm_suspend.S31
-rw-r--r--arch/arm/mach-at91/sama5.c52
-rw-r--r--arch/arm/mach-at91/soc.c142
-rw-r--r--arch/arm/mach-bcm/Kconfig2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c5
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c3
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c3
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c3
-rw-r--r--arch/arm/mach-davinci/da830.c6
-rw-r--r--arch/arm/mach-davinci/da850.c6
-rw-r--r--arch/arm/mach-davinci/da8xx-dt.c1
-rw-r--r--arch/arm/mach-davinci/pdata-quirks.c178
-rw-r--r--arch/arm/mach-davinci/pm.c1
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c26
-rw-r--r--arch/arm/mach-gemini/Kconfig53
-rw-r--r--arch/arm/mach-gemini/Makefile15
-rw-r--r--arch/arm/mach-gemini/Makefile.boot9
-rw-r--r--arch/arm/mach-gemini/board-dt.c62
-rw-r--r--arch/arm/mach-gemini/board-nas4220b.c106
-rw-r--r--arch/arm/mach-gemini/board-rut1xx.c92
-rw-r--r--arch/arm/mach-gemini/board-wbd111.c133
-rw-r--r--arch/arm/mach-gemini/board-wbd222.c133
-rw-r--r--arch/arm/mach-gemini/common.h33
-rw-r--r--arch/arm/mach-gemini/devices.c118
-rw-r--r--arch/arm/mach-gemini/gpio.c231
-rw-r--r--arch/arm/mach-gemini/idle.c31
-rw-r--r--arch/arm/mach-gemini/include/mach/entry-macro.S33
-rw-r--r--arch/arm/mach-gemini/include/mach/global_reg.h278
-rw-r--r--arch/arm/mach-gemini/include/mach/hardware.h71
-rw-r--r--arch/arm/mach-gemini/include/mach/irqs.h53
-rw-r--r--arch/arm/mach-gemini/include/mach/uncompress.h42
-rw-r--r--arch/arm/mach-gemini/irq.c105
-rw-r--r--arch/arm/mach-gemini/mm.c82
-rw-r--r--arch/arm/mach-gemini/reset.c25
-rw-r--r--arch/arm/mach-gemini/time.c239
-rw-r--r--arch/arm/mach-hisi/platmcpm.c2
-rw-r--r--arch/arm/mach-imx/gpc.c217
-rw-r--r--arch/arm/mach-imx/mach-imx25.c6
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c7
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c4
-rw-r--r--arch/arm/mach-imx/mach-pcm037_eet.c4
-rw-r--r--arch/arm/mach-imx/mmdc.c20
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c4
-rw-r--r--arch/arm/mach-keystone/Kconfig1
-rw-r--r--arch/arm/mach-keystone/pm_domain.c4
-rw-r--r--arch/arm/mach-mmp/clock.c3
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c3
-rw-r--r--arch/arm/mach-omap1/pm.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/clockdomains81xx_data.c10
-rw-r--r--arch/arm/mach-omap2/cm81xx.h1
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c208
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h23
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c34
-rw-r--r--arch/arm/mach-omap2/pm.c1
-rw-r--r--arch/arm/mach-oxnas/Kconfig1
-rw-r--r--arch/arm/mach-shmobile/setup-r7s72100.c2
-rw-r--r--arch/arm/mach-stm32/Kconfig26
-rw-r--r--arch/arm/mach-stm32/board-dt.c1
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c3
-rw-r--r--arch/arm/mach-tegra/flowctrl.c171
-rw-r--r--arch/arm/mach-tegra/flowctrl.h66
-rw-r--r--arch/arm/mach-tegra/platsmp.c2
-rw-r--r--arch/arm/mach-tegra/pm.c2
-rw-r--r--arch/arm/mach-tegra/reset-handler.S2
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S3
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S2
-rw-r--r--arch/arm/mach-tegra/sleep.S2
-rw-r--r--arch/arm/mach-tegra/tegra.c2
-rw-r--r--arch/arm/mach-w90x900/clock.c3
-rw-r--r--arch/arm/mm/cache-l2x0.c13
-rw-r--r--arch/arm/mm/dma-mapping.c16
-rw-r--r--arch/arm/mm/dump.c54
-rw-r--r--arch/arm/mm/init.c13
-rw-r--r--arch/arm/mm/ioremap.c7
-rw-r--r--arch/arm/mm/mmu.c21
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm/mm/pageattr.c1
-rw-r--r--arch/arm/mm/proc-v7.S15
-rw-r--r--arch/arm/mm/proc-v7m.S6
-rw-r--r--arch/arm/net/bpf_jit_32.c1
-rw-r--r--arch/arm/plat-samsung/devs.c1
-rw-r--r--arch/arm/plat-versatile/include/plat/clock.h15
-rw-r--r--arch/arm/xen/efi.c2
-rw-r--r--arch/arm/xen/enlighten.c16
-rw-r--r--arch/arm64/Kconfig11
-rw-r--r--arch/arm64/Kconfig.debug4
-rw-r--r--arch/arm64/Kconfig.platforms5
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi29
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts188
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi124
l---------arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi80
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts40
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts80
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts11
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi29
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi21
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts26
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts26
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi172
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi43
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts42
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts164
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts114
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts23
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts21
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi173
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi194
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts42
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi3
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi12
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts42
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts42
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts42
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts38
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-xmc.dts20
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi24
-rw-r--r--arch/arm64/boot/dts/broadcom/vulcan-eval.dts33
-rw-r--r--arch/arm64/boot/dts/broadcom/vulcan.dtsi147
-rw-r--r--arch/arm64/boot/dts/cavium/Makefile1
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dts34
-rw-r--r--arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi148
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-bus.dtsi48
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi43
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2.dts17
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts18
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi50
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi182
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts123
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts107
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi275
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts155
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts110
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi863
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts64
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts64
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi165
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi196
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi151
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi737
-rw-r--r--arch/arm64/boot/dts/hisilicon/Makefile1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts162
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi411
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts7
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi3
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi407
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07-d05.dts20
-rw-r--r--arch/arm64/boot/dts/hisilicon/hip07.dtsi479
l---------arch/arm64/boot/dts/include/arm1
l---------arch/arm64/boot/dts/include/arm641
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts56
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi25
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-db.dts43
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8020.dtsi10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts32
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040.dtsi9
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi11
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi70
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi60
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts91
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi319
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi59
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi19
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi46
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts29
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts39
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi181
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts32
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi311
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-evb.dts57
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi1264
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi94
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts306
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi1103
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi145
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi147
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts10
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi13
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts10
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi4
-rw-r--r--arch/arm64/boot/dts/sprd/Makefile3
-rw-r--r--arch/arm64/boot/dts/sprd/sc9860.dtsi569
-rw-r--r--arch/arm64/boot/dts/sprd/sp9860g-1h10.dts56
-rw-r--r--arch/arm64/boot/dts/sprd/whale2.dtsi71
-rw-r--r--arch/arm64/boot/dts/zte/zx296718-evb.dts28
-rw-r--r--arch/arm64/boot/dts/zte/zx296718.dtsi126
-rw-r--r--arch/arm64/configs/defconfig22
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h81
-rw-r--r--arch/arm64/include/asm/bug.h32
-rw-r--r--arch/arm64/include/asm/cache.h38
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cachetype.h100
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/esr.h4
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h4
-rw-r--r--arch/arm64/include/asm/io.h10
-rw-r--r--arch/arm64/include/asm/kexec.h52
-rw-r--r--arch/arm64/include/asm/kvm_asm.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h14
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/module.h14
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/sections.h2
-rw-r--r--arch/arm64/include/asm/smp.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h167
-rw-r--r--arch/arm64/include/asm/virt.h31
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h4
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/alternative.c11
-rw-r--r--arch/arm64/kernel/cacheinfo.c38
-rw-r--r--arch/arm64/kernel/cpufeature.c30
-rw-r--r--arch/arm64/kernel/cpuinfo.c37
-rw-r--r--arch/arm64/kernel/crash_dump.c71
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/efi-header.S155
-rw-r--r--arch/arm64/kernel/head.S222
-rw-r--r--arch/arm64/kernel/hibernate.c10
-rw-r--r--arch/arm64/kernel/hyp-stub.S38
-rw-r--r--arch/arm64/kernel/machine_kexec.c170
-rw-r--r--arch/arm64/kernel/module-plts.c108
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/module.lds1
-rw-r--r--arch/arm64/kernel/perf_event.c120
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/reloc_test_core.c81
-rw-r--r--arch/arm64/kernel/reloc_test_syms.S83
-rw-r--r--arch/arm64/kernel/setup.c12
-rw-r--r--arch/arm64/kernel/smp.c79
-rw-r--r--arch/arm64/kernel/traps.c14
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S27
-rw-r--r--arch/arm64/kvm/hyp-init.S46
-rw-r--r--arch/arm64/kvm/hyp.S5
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S43
-rw-r--r--arch/arm64/kvm/hyp/tlb.c22
-rw-r--r--arch/arm64/kvm/reset.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c500
-rw-r--r--arch/arm64/kvm/sys_regs.h23
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
-rw-r--r--arch/arm64/mm/context.c3
-rw-r--r--arch/arm64/mm/dma-mapping.c278
-rw-r--r--arch/arm64/mm/fault.c55
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/init.c181
-rw-r--r--arch/arm64/mm/mmu.c311
-rw-r--r--arch/arm64/mm/pageattr.c16
-rw-r--r--arch/arm64/net/bpf_jit_comp.c1
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c22
-rw-r--r--arch/cris/include/asm/pci.h4
-rw-r--r--arch/ia64/include/asm/pci.h5
-rw-r--r--arch/ia64/kernel/asm-offsets.c4
-rw-r--r--arch/ia64/kernel/crash.c22
-rw-r--r--arch/ia64/pci/pci.c46
-rw-r--r--arch/m68k/ifpsp060/src/ilsp.S2
-rw-r--r--arch/m68k/ifpsp060/src/isp.S2
-rw-r--r--arch/microblaze/include/asm/pci.h6
-rw-r--r--arch/microblaze/pci/pci-common.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c2
-rw-r--r--arch/mips/dec/prom/init.c6
-rw-r--r--arch/mips/include/asm/cpu-features.h10
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h468
-rw-r--r--arch/mips/include/asm/maar.h10
-rw-r--r--arch/mips/include/asm/mipsregs.h62
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h2
-rw-r--r--arch/mips/include/asm/pci.h5
-rw-r--r--arch/mips/include/asm/tlb.h6
-rw-r--r--arch/mips/include/uapi/asm/inst.h2
-rw-r--r--arch/mips/include/uapi/asm/kvm.h22
-rw-r--r--arch/mips/kernel/cpu-probe.c13
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/time.c1
-rw-r--r--arch/mips/kernel/traps.c4
-rw-r--r--arch/mips/kvm/Kconfig27
-rw-r--r--arch/mips/kvm/Makefile9
-rw-r--r--arch/mips/kvm/emulate.c502
-rw-r--r--arch/mips/kvm/entry.c132
-rw-r--r--arch/mips/kvm/hypcall.c53
-rw-r--r--arch/mips/kvm/interrupt.h5
-rw-r--r--arch/mips/kvm/mips.c123
-rw-r--r--arch/mips/kvm/mmu.c20
-rw-r--r--arch/mips/kvm/tlb.c441
-rw-r--r--arch/mips/kvm/trace.h74
-rw-r--r--arch/mips/kvm/trap_emul.c73
-rw-r--r--arch/mips/kvm/vz.c3223
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/pci/pci.c24
-rw-r--r--arch/mn10300/include/asm/pci.h4
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c23
-rw-r--r--arch/parisc/include/asm/pci.h4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/pci.c28
-rw-r--r--arch/powerpc/Kconfig77
-rw-r--r--arch/powerpc/Makefile13
-rw-r--r--arch/powerpc/Makefile.postlink34
-rw-r--r--arch/powerpc/configs/powernv_defconfig6
-rw-r--r--arch/powerpc/configs/ppc64_defconfig6
-rw-r--r--arch/powerpc/configs/pseries_defconfig6
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h4
-rw-r--r--arch/powerpc/include/asm/bitops.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h200
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h58
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h8
-rw-r--r--arch/powerpc/include/asm/code-patching.h41
-rw-r--r--arch/powerpc/include/asm/cpuidle.h33
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/dbell.h40
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/debugfs.h17
-rw-r--r--arch/powerpc/include/asm/disassemble.h5
-rw-r--r--arch/powerpc/include/asm/exception-64s.h87
-rw-r--r--arch/powerpc/include/asm/fadump.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h3
-rw-r--r--arch/powerpc/include/asm/head-64.h1
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/io.h102
-rw-r--r--arch/powerpc/include/asm/iommu.h32
-rw-r--r--arch/powerpc/include/asm/kprobes.h63
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h35
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h32
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mce.h94
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h5
-rw-r--r--arch/powerpc/include/asm/mmu.h19
-rw-r--r--arch/powerpc/include/asm/mmu_context.h24
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/opal-api.h77
-rw-r--r--arch/powerpc/include/asm/opal.h41
-rw-r--r--arch/powerpc/include/asm/paca.h38
-rw-r--r--arch/powerpc/include/asm/page_64.h14
-rw-r--r--arch/powerpc/include/asm/pci.h9
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h3
-rw-r--r--arch/powerpc/include/asm/powernv.h22
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h60
-rw-r--r--arch/powerpc/include/asm/processor.h41
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/smp.h21
-rw-r--r--arch/powerpc/include/asm/syscalls.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h10
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/include/asm/xive-regs.h97
-rw-r--r--arch/powerpc/include/asm/xive.h163
-rw-r--r--arch/powerpc/include/asm/xmon.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h16
-rw-r--r--arch/powerpc/kernel/Makefile12
-rw-r--r--arch/powerpc/kernel/asm-offsets.c9
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S36
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dbell.c58
-rw-r--r--arch/powerpc/kernel/eeh.c3
-rw-r--r--arch/powerpc/kernel/eeh_driver.c55
-rw-r--r--arch/powerpc/kernel/entry_32.S107
-rw-r--r--arch/powerpc/kernel/entry_64.S380
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S186
-rw-r--r--arch/powerpc/kernel/fadump.c93
-rw-r--r--arch/powerpc/kernel/ftrace.c619
-rw-r--r--arch/powerpc/kernel/head_32.S16
-rw-r--r--arch/powerpc/kernel/head_64.S3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S285
-rw-r--r--arch/powerpc/kernel/iommu.c91
-rw-r--r--arch/powerpc/kernel/irq.c41
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c104
-rw-r--r--arch/powerpc/kernel/kprobes.c214
-rw-r--r--arch/powerpc/kernel/mce.c18
-rw-r--r--arch/powerpc/kernel/mce_power.c780
-rw-r--r--arch/powerpc/kernel/optprobes.c6
-rw-r--r--arch/powerpc/kernel/paca.c21
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c16
-rw-r--r--arch/powerpc/kernel/setup_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c299
-rw-r--r--arch/powerpc/kernel/stacktrace.c9
-rw-r--r--arch/powerpc/kernel/swsusp.c1
-rw-r--r--arch/powerpc/kernel/syscalls.c16
-rw-r--r--arch/powerpc/kernel/sysfs.c12
-rw-r--r--arch/powerpc/kernel/trace/Makefile29
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c620
-rw-r--r--arch/powerpc/kernel/trace/ftrace_32.S118
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64.S85
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S272
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_pg.S68
-rw-r--r--arch/powerpc/kernel/trace/trace_clock.c (renamed from arch/powerpc/kernel/trace_clock.c)0
-rw-r--r--arch/powerpc/kernel/traps.c27
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c26
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c17
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c315
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c303
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c34
-rw-r--r--arch/powerpc/kvm/book3s_hv.c26
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c32
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c14
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c5
-rw-r--r--arch/powerpc/kvm/booke.c9
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/emulate.c8
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c472
-rw-r--r--arch/powerpc/kvm/powerpc.c327
-rw-r--r--arch/powerpc/lib/code-patching.c4
-rw-r--r--arch/powerpc/lib/sstep.c82
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c2
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c106
-rw-r--r--arch/powerpc/mm/fault.c84
-rw-r--r--arch/powerpc/mm/hash_low_32.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c26
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c11
-rw-r--r--arch/powerpc/mm/hugetlbpage.c18
-rw-r--r--arch/powerpc/mm/icswx.c2
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mmap.c53
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c116
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c43
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c5
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/mm/slb.c4
-rw-r--r--arch/powerpc/mm/slb_low.S82
-rw-r--r--arch/powerpc/mm/slice.c258
-rw-r--r--arch/powerpc/mm/subpage-prot.c3
-rw-r--r--arch/powerpc/mm/tlb-radix.c93
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c8
-rw-r--r--arch/powerpc/perf/isa207-common.c82
-rw-r--r--arch/powerpc/perf/isa207-common.h26
-rw-r--r--arch/powerpc/perf/power8-events-list.h6
-rw-r--r--arch/powerpc/perf/power8-pmu.c4
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c1
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype14
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c11
-rw-r--r--arch/powerpc/platforms/chrp/smp.c1
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c11
-rw-r--r--arch/powerpc/platforms/powermac/smp.c3
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c7
-rw-r--r--arch/powerpc/platforms/powernv/idle.c109
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c470
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S71
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c27
-rw-r--r--arch/powerpc/platforms/powernv/opal.c79
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c78
-rw-r--r--arch/powerpc/platforms/powernv/pci.c6
-rw-r--r--arch/powerpc/platforms/powernv/pci.h17
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/rng.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c19
-rw-r--r--arch/powerpc/platforms/powernv/smp.c107
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c3
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c10
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c5
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c43
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c61
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/smp.c49
-rw-r--r--arch/powerpc/platforms/pseries/vio.c2
-rw-r--r--arch/powerpc/sysdev/Kconfig1
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/axonram.c45
-rw-r--r--arch/powerpc/sysdev/scom.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c20
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c6
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig11
-rw-r--r--arch/powerpc/sysdev/xive/Makefile4
-rw-r--r--arch/powerpc/sysdev/xive/common.c1302
-rw-r--r--arch/powerpc/sysdev/xive/native.c640
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h62
-rwxr-xr-xarch/powerpc/tools/gcc-check-mprofile-kernel.sh (renamed from arch/powerpc/scripts/gcc-check-mprofile-kernel.sh)0
-rwxr-xr-xarch/powerpc/tools/relocs_check.sh (renamed from arch/powerpc/relocs_check.sh)0
-rw-r--r--arch/powerpc/xmon/xmon.c241
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/cacheflush.h34
-rw-r--r--arch/s390/include/asm/kvm_host.h42
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/set_memory.h31
-rw-r--r--arch/s390/include/uapi/asm/kvm.h29
-rw-r--r--arch/s390/kernel/ftrace.c1
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kvm/gaccess.c6
-rw-r--r--arch/s390/kvm/intercept.c27
-rw-r--r--arch/s390/kvm/interrupt.c133
-rw-r--r--arch/s390/kvm/kvm-s390.c151
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/priv.c50
-rw-r--r--arch/s390/kvm/sthyi.c3
-rw-r--r--arch/s390/kvm/trace-s390.h52
-rw-r--r--arch/s390/kvm/vsie.c78
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pageattr.c1
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/net/bpf_jit_comp.c1
-rw-r--r--arch/s390/tools/gen_facilities.c1
-rw-r--r--arch/sh/drivers/pci/pci.c21
-rw-r--r--arch/sh/include/asm/pci.h4
-rw-r--r--arch/sparc/include/asm/pci_64.h5
-rw-r--r--arch/sparc/kernel/head_64.S6
-rw-r--r--arch/sparc/kernel/led.c13
-rw-r--r--arch/sparc/kernel/pci.c6
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/lib/GENbzero.S2
-rw-r--r--arch/sparc/lib/NGbzero.S2
-rw-r--r--arch/unicore32/include/asm/pci.h3
-rw-r--r--arch/unicore32/kernel/pci.c23
-rw-r--r--arch/x86/hyperv/hv_init.c2
-rw-r--r--arch/x86/include/asm/cacheflush.h85
-rw-r--r--arch/x86/include/asm/hypervisor.h8
-rw-r--r--arch/x86/include/asm/intel_pmc_ipc.h23
-rw-r--r--arch/x86/include/asm/intel_scu_ipc.h8
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h15
-rw-r--r--arch/x86/include/asm/pci.h7
-rw-r--r--arch/x86/include/asm/pmem.h5
-rw-r--r--arch/x86/include/asm/set_memory.h87
-rw-r--r--arch/x86/include/asm/string_64.h1
-rw-r--r--arch/x86/include/asm/vmx.h4
-rw-r--r--arch/x86/include/asm/xen/events.h11
-rw-r--r--arch/x86/include/asm/xen/page.h25
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h7
-rw-r--r--arch/x86/include/uapi/asm/kvm.h3
-rw-r--r--arch/x86/include/uapi/asm/vmx.h25
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c15
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c3
-rw-r--r--arch/x86/kernel/cpu/vmware.c39
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c1
-rw-r--r--arch/x86/kernel/kprobes/opt.c1
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/machine_kexec_32.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/tboot.c3
-rw-r--r--arch/x86/kvm/Kconfig12
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/assigned-dev.c1058
-rw-r--r--arch/x86/kvm/assigned-dev.h32
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/cpuid.h11
-rw-r--r--arch/x86/kvm/emulate.c23
-rw-r--r--arch/x86/kvm/i8259.c72
-rw-r--r--arch/x86/kvm/ioapic.c28
-rw-r--r--arch/x86/kvm/ioapic.h16
-rw-r--r--arch/x86/kvm/iommu.c356
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/irq.h32
-rw-r--r--arch/x86/kvm/irq_comm.c50
-rw-r--r--arch/x86/kvm/lapic.c26
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/page_track.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h54
-rw-r--r--arch/x86/kvm/svm.c14
-rw-r--r--arch/x86/kvm/vmx.c332
-rw-r--r--arch/x86/kvm/x86.c260
-rw-r--r--arch/x86/kvm/x86.h36
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/pageattr.c1
-rw-r--r--arch/x86/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/pci/i386.c47
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/xen/Kconfig33
-rw-r--r--arch/x86/xen/Makefile16
-rw-r--r--arch/x86/xen/efi.c2
-rw-r--r--arch/x86/xen/enlighten.c1833
-rw-r--r--arch/x86/xen/enlighten_hvm.c214
-rw-r--r--arch/x86/xen/enlighten_pv.c1513
-rw-r--r--arch/x86/xen/enlighten_pvh.c106
-rw-r--r--arch/x86/xen/mmu.c2788
-rw-r--r--arch/x86/xen/mmu_hvm.c79
-rw-r--r--arch/x86/xen/mmu_pv.c2730
-rw-r--r--arch/x86/xen/pmu.h5
-rw-r--r--arch/x86/xen/smp.c517
-rw-r--r--arch/x86/xen/smp.h16
-rw-r--r--arch/x86/xen/smp_hvm.c63
-rw-r--r--arch/x86/xen/smp_pv.c490
-rw-r--r--arch/x86/xen/suspend.c54
-rw-r--r--arch/x86/xen/suspend_hvm.c22
-rw-r--r--arch/x86/xen/suspend_pv.c46
-rw-r--r--arch/x86/xen/time.c8
-rw-r--r--arch/x86/xen/xen-head.S4
-rw-r--r--arch/x86/xen/xen-ops.h22
-rw-r--r--arch/xtensa/include/asm/pci.h7
-rw-r--r--arch/xtensa/include/asm/processor.h15
-rw-r--r--arch/xtensa/include/asm/ptrace.h39
-rw-r--r--arch/xtensa/include/uapi/asm/ptrace.h40
-rw-r--r--arch/xtensa/kernel/coprocessor.S24
-rw-r--r--arch/xtensa/kernel/entry.S3
-rw-r--r--arch/xtensa/kernel/pci.c24
-rw-r--r--arch/xtensa/kernel/process.c8
-rw-r--r--arch/xtensa/kernel/ptrace.c170
-rw-r--r--arch/xtensa/kernel/setup.c9
-rw-r--r--arch/xtensa/kernel/signal.c10
-rw-r--r--arch/xtensa/kernel/stacktrace.c35
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h20
-rw-r--r--arch/xtensa/platforms/iss/setup.c43
-rw-r--r--block/Kconfig1
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-mq-debugfs.c870
-rw-r--r--block/blk-mq-debugfs.h82
-rw-r--r--block/blk-mq-sched.c30
-rw-r--r--block/blk-mq-sysfs.c10
-rw-r--r--block/blk-mq.c54
-rw-r--r--block/blk-mq.h28
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/elevator.c16
-rw-r--r--block/kyber-iosched.c130
-rw-r--r--block/mq-deadline.c123
-rw-r--r--block/partition-generic.c17
-rw-r--r--certs/blacklist.c2
-rw-r--r--crypto/lzo.c4
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_apd.c12
-rw-r--r--drivers/acpi/acpi_lpss.c17
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconvert.h144
-rw-r--r--drivers/acpi/acpica/acglobal.h53
-rw-r--r--drivers/acpi/acpica/aclocal.h106
-rw-r--r--drivers/acpi/acpica/acmacros.h35
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h99
-rw-r--r--drivers/acpi/acpica/dbmethod.c1
-rw-r--r--drivers/acpi/acpica/dbxface.c5
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c3
-rw-r--r--drivers/acpi/acpica/dsobject.c15
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/dsutils.c8
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c16
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c17
-rw-r--r--drivers/acpi/acpica/exoparg2.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c16
-rw-r--r--drivers/acpi/acpica/exresolv.c3
-rw-r--r--drivers/acpi/acpica/exstore.c5
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c18
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c16
-rw-r--r--drivers/acpi/acpica/nsrepair2.c6
-rw-r--r--drivers/acpi/acpica/nsutils.c29
-rw-r--r--drivers/acpi/acpica/psargs.c29
-rw-r--r--drivers/acpi/acpica/psloop.c34
-rw-r--r--drivers/acpi/acpica/psobject.c38
-rw-r--r--drivers/acpi/acpica/psopcode.c15
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c6
-rw-r--r--drivers/acpi/acpica/pstree.c9
-rw-r--r--drivers/acpi/acpica/psutils.c11
-rw-r--r--drivers/acpi/acpica/utalloc.c50
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c1
-rw-r--r--drivers/acpi/acpica/utresrc.c9
-rw-r--r--drivers/acpi/acpica/utxferror.c16
-rw-r--r--drivers/acpi/apei/erst.c8
-rw-r--r--drivers/acpi/arm64/iort.c230
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/glue.c11
-rw-r--r--drivers/acpi/nfit/Kconfig12
-rw-r--r--drivers/acpi/nfit/core.c233
-rw-r--r--drivers/acpi/nfit/nfit.h4
-rw-r--r--drivers/acpi/pci_mcfg.c14
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c50
-rw-r--r--drivers/acpi/power.c10
-rw-r--r--drivers/acpi/scan.c11
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/x86/utils.c90
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/pata_bk3710.c382
-rw-r--r--drivers/auxdisplay/Kconfig304
-rw-r--r--drivers/auxdisplay/Makefile4
-rw-r--r--drivers/auxdisplay/arm-charlcd.c (renamed from drivers/misc/arm-charlcd.c)0
-rw-r--r--drivers/auxdisplay/charlcd.c818
-rw-r--r--drivers/auxdisplay/hd44780.c326
-rw-r--r--drivers/auxdisplay/ht16k33.c20
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c1
-rw-r--r--drivers/auxdisplay/panel.c1796
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/dma-contiguous.c5
-rw-r--r--drivers/base/dma-mapping.c46
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/wakeup.c54
-rw-r--r--drivers/base/soc.c52
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/brd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c490
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h10
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/rbd.c366
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/char/agp/amd-k7-agp.c1
-rw-r--r--drivers/char/agp/ati-agp.c1
-rw-r--r--drivers/char/agp/generic.c12
-rw-r--r--drivers/char/agp/intel-gtt.c1
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/misc.c11
-rw-r--r--drivers/char/mspec.c9
-rw-r--r--drivers/char/tpm/tpm-chip.c54
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/meson/gxbb.h21
-rw-r--r--drivers/clk/qcom/common.c2
-rw-r--r--drivers/clk/versatile/Kconfig3
-rw-r--r--drivers/clk/versatile/Makefile2
-rw-r--r--drivers/clk/versatile/clk-icst.c1
-rw-r--r--drivers/clk/versatile/clk-icst.h2
-rw-r--r--drivers/clk/versatile/clk-impd1.c1
-rw-r--r--drivers/clk/versatile/clk-realview.c1
-rw-r--r--drivers/clk/versatile/clk-versatile.c1
-rw-r--r--drivers/clk/versatile/icst.c105
-rw-r--r--drivers/clk/versatile/icst.h57
-rw-r--r--drivers/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/sti-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/dax/Kconfig10
-rw-r--r--drivers/dax/Makefile5
-rw-r--r--drivers/dax/dax-private.h57
-rw-r--r--drivers/dax/dax.c862
-rw-r--r--drivers/dax/dax.h15
-rw-r--r--drivers/dax/device-dax.h25
-rw-r--r--drivers/dax/device.c660
-rw-r--r--drivers/dax/pmem.c10
-rw-r--r--drivers/dax/super.c425
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/amba-pl08x.c20
-rw-r--r--drivers/dma/cppi41.c168
-rw-r--r--drivers/dma/dmatest.c11
-rw-r--r--drivers/dma/imx-sdma.c19
-rw-r--r--drivers/dma/ioat/init.c4
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c42
-rw-r--r--drivers/dma/qcom/hidma.c15
-rw-r--r--drivers/dma/qcom/hidma_ll.c6
-rw-r--r--drivers/dma/sh/rcar-dmac.c52
-rw-r--r--drivers/dma/stm32-dma.c2
-rw-r--r--drivers/dma/sun4i-dma.c2
-rw-r--r--drivers/dma/virt-dma.c11
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c63
-rw-r--r--drivers/extcon/Kconfig7
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-arizona.c46
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c395
-rw-r--r--drivers/extcon/extcon-palmas.c6
-rw-r--r--drivers/extcon/extcon-usb-gpio.c10
-rw-r--r--drivers/extcon/extcon.c22
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h8
-rw-r--r--drivers/firmware/arm_scpi.c7
-rw-r--r--drivers/firmware/efi/libstub/fdt.c28
-rw-r--r--drivers/firmware/google/Kconfig59
-rw-r--r--drivers/firmware/google/Makefile10
-rw-r--r--drivers/firmware/google/coreboot_table-acpi.c88
-rw-r--r--drivers/firmware/google/coreboot_table-of.c82
-rw-r--r--drivers/firmware/google/coreboot_table.c94
-rw-r--r--drivers/firmware/google/coreboot_table.h50
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c109
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c153
-rw-r--r--drivers/firmware/google/memconsole.c155
-rw-r--r--drivers/firmware/google/memconsole.h43
-rw-r--r--drivers/firmware/google/vpd.c332
-rw-r--r--drivers/firmware/google/vpd_decode.c99
-rw-r--r--drivers/firmware/google/vpd_decode.h58
-rw-r--r--drivers/firmware/meson/meson_sm.c20
-rw-r--r--drivers/firmware/qcom_scm-32.c18
-rw-r--r--drivers/firmware/qcom_scm-64.c58
-rw-r--r--drivers/firmware/qcom_scm.c18
-rw-r--r--drivers/firmware/qcom_scm.h11
-rw-r--r--drivers/fpga/Kconfig42
-rw-r--r--drivers/fpga/Makefile6
-rw-r--r--drivers/fpga/altera-freeze-bridge.c32
-rw-r--r--drivers/fpga/altera-hps2fpga.c15
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c68
-rw-r--r--drivers/fpga/altera-pr-ip-core.c220
-rw-r--r--drivers/fpga/fpga-bridge.c17
-rw-r--r--drivers/fpga/fpga-mgr.c2
-rw-r--r--drivers/fpga/fpga-region.c15
-rw-r--r--drivers/fpga/ice40-spi.c207
-rw-r--r--drivers/fpga/ts73xx-fpga.c156
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c161
-rw-r--r--drivers/fpga/xilinx-spi.c198
-rw-r--r--drivers/fpga/zynq-fpga.c28
-rw-r--r--drivers/gpio/Kconfig46
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/devres.c2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c38
-rw-r--r--drivers/gpio/gpio-104-idi-48.c18
-rw-r--r--drivers/gpio/gpio-104-idio-16.c24
-rw-r--r--drivers/gpio/gpio-altera.c24
-rw-r--r--drivers/gpio/gpio-arizona.c30
-rw-r--r--drivers/gpio/gpio-aspeed.c285
-rw-r--r--drivers/gpio/gpio-ath79.c28
-rw-r--r--drivers/gpio/gpio-bcm-kona.c48
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c144
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c93
-rw-r--r--drivers/gpio/gpio-etraxfs.c24
-rw-r--r--drivers/gpio/gpio-exar.c23
-rw-r--r--drivers/gpio/gpio-f7188x.c24
-rw-r--r--drivers/gpio/gpio-ftgpio010.c242
-rw-r--r--drivers/gpio/gpio-gemini.c236
-rw-r--r--drivers/gpio/gpio-merrifield.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c28
-rw-r--r--drivers/gpio/gpio-mmio.c1
-rw-r--r--drivers/gpio/gpio-mockup.c16
-rw-r--r--drivers/gpio/gpio-moxart.c84
-rw-r--r--drivers/gpio/gpio-mvebu.c435
-rw-r--r--drivers/gpio/gpio-mxc.c6
-rw-r--r--drivers/gpio/gpio-mxs.c6
-rw-r--r--drivers/gpio/gpio-omap.c26
-rw-r--r--drivers/gpio/gpio-pca953x.c38
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pch.c14
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c28
-rw-r--r--drivers/gpio/gpio-pl061.c28
-rw-r--r--drivers/gpio/gpio-pxa.c2
-rw-r--r--drivers/gpio/gpio-reg.c185
-rw-r--r--drivers/gpio/gpio-sa1100.c216
-rw-r--r--drivers/gpio/gpio-sodaville.c28
-rw-r--r--drivers/gpio/gpio-sta2x11.c17
-rw-r--r--drivers/gpio/gpio-twl4030.c3
-rw-r--r--drivers/gpio/gpio-wcove.c8
-rw-r--r--drivers/gpio/gpio-ws16c48.c46
-rw-r--r--drivers/gpio/gpio-xlp.c9
-rw-r--r--drivers/gpio/gpio-zx.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c53
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/gma500/gtt.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c11
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h3
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile4
-rw-r--r--drivers/gpu/drm/tegra/drm.c283
-rw-r--r--drivers/gpu/drm/tegra/drm.h15
-rw-r--r--drivers/gpu/drm/tegra/falcon.c259
-rw-r--r--drivers/gpu/drm/tegra/falcon.h127
-rw-r--r--drivers/gpu/drm/tegra/fb.c23
-rw-r--r--drivers/gpu/drm/tegra/gem.c12
-rw-r--r--drivers/gpu/drm/tegra/vic.c396
-rw-r--r--drivers/gpu/drm/tegra/vic.h31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/host1x/bus.c68
-rw-r--r--drivers/gpu/host1x/cdma.c74
-rw-r--r--drivers/gpu/host1x/cdma.h6
-rw-r--r--drivers/gpu/host1x/dev.c76
-rw-r--r--drivers/gpu/host1x/dev.h14
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c16
-rw-r--r--drivers/gpu/host1x/job.c72
-rw-r--r--drivers/gpu/host1x/job.h1
-rw-r--r--drivers/gpu/host1x/syncpt.c2
-rw-r--r--drivers/hv/channel.c10
-rw-r--r--drivers/hv/channel_mgmt.c48
-rw-r--r--drivers/hv/connection.c65
-rw-r--r--drivers/hv/hv.c5
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c12
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h29
-rw-r--r--drivers/hv/ring_buffer.c22
-rw-r--r--drivers/hv/vmbus_drv.c4
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/ide/ide-io.c2
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/idle/intel_idle.c2
-rw-r--r--drivers/iio/accel/Kconfig31
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/adxl345.h18
-rw-r--r--drivers/iio/accel/adxl345_core.c179
-rw-r--r--drivers/iio/accel/adxl345_i2c.c73
-rw-r--r--drivers/iio/accel/adxl345_spi.c81
-rw-r--r--drivers/iio/accel/bma180.c32
-rw-r--r--drivers/iio/accel/mma7455_i2c.c8
-rw-r--r--drivers/iio/accel/mma7660.c7
-rw-r--r--drivers/iio/adc/Kconfig112
-rw-r--r--drivers/iio/adc/Makefile10
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/aspeed_adc.c295
-rw-r--r--drivers/iio/adc/cpcap-adc.c1007
-rw-r--r--drivers/iio/adc/exynos_adc.c2
-rw-r--r--drivers/iio/adc/hx711.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c34
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c219
-rw-r--r--drivers/iio/adc/ltc2497.c279
-rw-r--r--drivers/iio/adc/max1027.c2
-rw-r--r--drivers/iio/adc/max11100.c5
-rw-r--r--drivers/iio/adc/max1118.c307
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/adc/max9611.c585
-rw-r--r--drivers/iio/adc/meson_saradc.c165
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c1036
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c325
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c230
-rw-r--r--drivers/iio/adc/qcom-vadc-common.h108
-rw-r--r--drivers/iio/adc/rockchip_saradc.c2
-rw-r--r--drivers/iio/adc/spear_adc.c (renamed from drivers/staging/iio/adc/spear_adc.c)0
-rw-r--r--drivers/iio/adc/stm32-adc.c50
-rw-r--r--drivers/iio/adc/stx104.c1
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c719
-rw-r--r--drivers/iio/adc/ti-ads1015.c24
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c2
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c26
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c37
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c24
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c4
-rw-r--r--drivers/iio/counter/104-quad-8.c1
-rw-r--r--drivers/iio/counter/Kconfig2
-rw-r--r--drivers/iio/dac/Kconfig25
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5504.c4
-rw-r--r--drivers/iio/dac/ad7303.c6
-rw-r--r--drivers/iio/dac/cio-dac.c1
-rw-r--r--drivers/iio/dac/ltc2632.c314
-rw-r--r--drivers/iio/dac/max5821.c1
-rw-r--r--drivers/iio/dac/mcp4725.c24
-rw-r--r--drivers/iio/dac/stm32-dac-core.c180
-rw-r--r--drivers/iio/dac/stm32-dac-core.h51
-rw-r--r--drivers/iio/dac/stm32-dac.c334
-rw-r--r--drivers/iio/gyro/itg3200_core.c7
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c5
-rw-r--r--drivers/iio/health/Kconfig13
-rw-r--r--drivers/iio/health/Makefile1
-rw-r--r--drivers/iio/health/max30100.c1
-rw-r--r--drivers/iio/health/max30102.c486
-rw-r--r--drivers/iio/humidity/Kconfig14
-rw-r--r--drivers/iio/humidity/Makefile3
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c315
-rw-r--r--drivers/iio/humidity/hts221_buffer.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c43
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c1
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h11
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c13
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c105
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c12
-rw-r--r--drivers/iio/industrialio-core.c15
-rw-r--r--drivers/iio/light/Kconfig20
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/apds9960.c11
-rw-r--r--drivers/iio/light/bh1750.c2
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c289
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/lm3533-als.c4
-rw-r--r--drivers/iio/light/tsl2563.c10
-rw-r--r--drivers/iio/light/us5182d.c7
-rw-r--r--drivers/iio/light/vl6180.c543
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c9
-rw-r--r--drivers/iio/magnetometer/mag3110.c7
-rw-r--r--drivers/iio/potentiostat/lmp91000.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c11
-rw-r--r--drivers/iio/pressure/hp03.c7
-rw-r--r--drivers/iio/pressure/mpl3115.c7
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/iio/proximity/Kconfig11
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c5
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/proximity/srf04.c304
-rw-r--r--drivers/iio/temperature/Kconfig14
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c311
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/iio/temperature/mlx90614.c7
-rw-r--r--drivers/iio/temperature/tmp007.c277
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c320
-rw-r--r--drivers/infiniband/core/ucm.c35
-rw-r--r--drivers/infiniband/core/user_mad.c4
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c51
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h10
-rw-r--r--drivers/infiniband/hw/hfi1/device.c2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c42
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c425
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h106
-rw-r--r--drivers/infiniband/hw/hfi1/init.c33
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c30
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c80
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h17
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h34
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c180
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h17
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c191
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h18
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/qedr/main.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c59
-rw-r--r--drivers/input/evdev.c11
-rw-r--r--drivers/input/joydev.c11
-rw-r--r--drivers/input/misc/soc_button_array.c5
-rw-r--r--drivers/input/mousedev.c11
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c127
-rw-r--r--drivers/iommu/arm-smmu.c376
-rw-r--r--drivers/iommu/dma-iommu.c283
-rw-r--r--drivers/iommu/dmar.c35
-rw-r--r--drivers/iommu/exynos-iommu.c32
-rw-r--r--drivers/iommu/fsl_pamu.h1
-rw-r--r--drivers/iommu/intel-iommu.c36
-rw-r--r--drivers/iommu/intel_irq_remapping.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c2
-rw-r--r--drivers/iommu/iommu.c78
-rw-r--r--drivers/iommu/iova.c89
-rw-r--r--drivers/iommu/mtk_iommu_v1.c25
-rw-r--r--drivers/iommu/of_iommu.c126
-rw-r--r--drivers/iommu/omap-iommu.c190
-rw-r--r--drivers/iommu/omap-iommu.h34
-rw-r--r--drivers/iommu/rockchip-iommu.c31
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/leds/leds-lp5521.c2
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-lp5562.c2
-rw-r--r--drivers/lightnvm/core.c4
-rw-r--r--drivers/macintosh/macio_sysfs.c7
-rw-r--r--drivers/md/Kconfig16
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/bcache/util.h12
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c30
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-ioctl.c13
-rw-r--r--drivers/md/dm-linear.c27
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stats.c7
-rw-r--r--drivers/md/dm-stripe.c29
-rw-r--r--drivers/md/dm-target.c6
-rw-r--r--drivers/md/dm.c67
-rw-r--r--drivers/media/Kconfig22
-rw-r--r--drivers/media/Makefile10
-rw-r--r--drivers/media/cec-edid.c171
-rw-r--r--drivers/media/cec/Kconfig19
-rw-r--r--drivers/media/cec/Makefile8
-rw-r--r--drivers/media/cec/cec-adap.c141
-rw-r--r--drivers/media/cec/cec-api.c21
-rw-r--r--drivers/media/cec/cec-core.c56
-rw-r--r--drivers/media/cec/cec-edid.c167
-rw-r--r--drivers/media/cec/cec-notifier.c130
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c5
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c5
-rw-r--r--drivers/media/common/tveeprom.c4
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c9
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c23
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c5
-rw-r--r--drivers/media/dvb-frontends/mn88472.c134
-rw-r--r--drivers/media/dvb-frontends/mn88472_priv.h1
-rw-r--r--drivers/media/dvb-frontends/si2168.c4
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/i2c/Kconfig43
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad5820.c4
-rw-r--r--drivers/media/i2c/adv7511.c6
-rw-r--r--drivers/media/i2c/adv7604.c6
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/ov2640.c1201
-rw-r--r--drivers/media/i2c/ov5645.c1345
-rw-r--r--drivers/media/i2c/ov5647.c634
-rw-r--r--drivers/media/i2c/ov7670.c75
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig6
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/imx074.c8
-rw-r--r--drivers/media/i2c/soc_camera/mt9m001.c14
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c6
-rw-r--r--drivers/media/i2c/soc_camera/mt9t112.c6
-rw-r--r--drivers/media/i2c/soc_camera/mt9v022.c14
-rw-r--r--drivers/media/i2c/soc_camera/ov2640.c1204
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c17
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c8
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c47
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c30
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c24
-rw-r--r--drivers/media/i2c/soc_camera/rj54n1cb0c.c6
-rw-r--r--drivers/media/i2c/soc_camera/tw9910.c6
-rw-r--r--drivers/media/i2c/tc358743.c59
-rw-r--r--drivers/media/i2c/tvp5150.c4
-rw-r--r--drivers/media/media-devnode.c20
-rw-r--r--drivers/media/media-entity.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c3
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c4
-rw-r--r--drivers/media/pci/cx88/cx88-core.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c2
-rw-r--r--drivers/media/pci/cx88/cx88.h3
-rw-r--r--drivers/media/pci/dm1105/dm1105.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c7
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/mantis/mantis_vp1034.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c5
-rw-r--r--drivers/media/pci/saa7164/saa7164-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c5
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c5
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c11
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c5
-rw-r--r--drivers/media/pci/ttpci/budget-av.c8
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c2
-rw-r--r--drivers/media/pci/ttpci/budget.c4
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c11
-rw-r--r--drivers/media/platform/Kconfig48
-rw-r--r--drivers/media/platform/Makefile6
-rw-r--r--drivers/media/platform/atmel/Kconfig11
-rw-r--r--drivers/media/platform/atmel/Makefile1
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h102
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c628
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c1368
-rw-r--r--drivers/media/platform/atmel/atmel-isi.h (renamed from drivers/media/platform/soc_camera/atmel-isi.h)0
-rw-r--r--drivers/media/platform/coda/coda-bit.c100
-rw-r--r--drivers/media/platform/coda/coda-common.c130
-rw-r--r--drivers/media/platform/coda/coda-h264.c87
-rw-r--r--drivers/media/platform/coda/coda.h10
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c27
-rw-r--r--drivers/media/platform/fsl-viu.c5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c1
-rw-r--r--drivers/media/platform/mtk-jpeg/Makefile2
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c1292
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h139
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c417
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h91
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c160
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h25
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h58
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c33
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c5
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h17
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c26
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c4
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c1
-rw-r--r--drivers/media/platform/omap3isp/isp.c17
-rw-r--r--drivers/media/platform/omap3isp/isp.h1
-rw-r--r--drivers/media/platform/s5p-cec/Makefile (renamed from drivers/staging/media/s5p-cec/Makefile)0
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cec.h (renamed from drivers/staging/media/s5p-cec/exynos_hdmi_cec.h)0
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c (renamed from drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c)0
-rw-r--r--drivers/media/platform/s5p-cec/regs-cec.h (renamed from drivers/staging/media/s5p-cec/regs-cec.h)0
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c304
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h79
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c2
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h2
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v7.h2
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v8.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c245
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h43
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c72
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c8
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c10
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h51
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c93
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.h12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c48
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c16
-rw-r--r--drivers/media/platform/sh_vou.c4
-rw-r--r--drivers/media/platform/soc_camera/Kconfig12
-rw-r--r--drivers/media/platform/soc_camera/Makefile1
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c1167
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c13
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c103
-rw-r--r--drivers/media/platform/soc_camera/soc_scale_crop.c11
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c5
-rw-r--r--drivers/media/platform/sti/cec/Makefile (renamed from drivers/staging/media/st-cec/Makefile)0
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c404
-rw-r--r--drivers/media/platform/sti/delta/delta-mjpeg-dec.c2
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c14
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h6
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c34
-rw-r--r--drivers/media/platform/vimc/Kconfig14
-rw-r--r--drivers/media/platform/vimc/Makefile3
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c498
-rw-r--r--drivers/media/platform/vimc/vimc-capture.h28
-rw-r--r--drivers/media/platform/vimc/vimc-core.c695
-rw-r--r--drivers/media/platform/vimc/vimc-core.h112
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c276
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.h28
-rw-r--r--drivers/media/platform/vivid/Kconfig5
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c4
-rw-r--r--drivers/media/platform/vivid/vivid-core.c32
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c13
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c26
-rw-r--r--drivers/media/platform/vsp1/Makefile1
-rw-r--r--drivers/media/platform/vsp1/vsp1.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c42
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c82
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c163
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.c230
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.h45
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.c222
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.h42
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.c646
-rw-r--r--drivers/media/platform/vsp1/vsp1_histo.h84
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c6
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c59
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h9
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h33
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c54
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c11
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c85
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c224
-rw-r--r--drivers/media/radio/si4713/si4713.c9
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c5
-rw-r--r--drivers/media/rc/Kconfig9
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/gpio-ir-recv.c2
-rw-r--r--drivers/media/rc/igorplugusb.c2
-rw-r--r--drivers/media/rc/imon.c5
-rw-r--r--drivers/media/rc/ir-lirc-codec.c34
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c49
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c92
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c74
-rw-r--r--drivers/media/rc/keymaps/rc-lirc.c42
-rw-r--r--drivers/media/rc/lirc_dev.c122
-rw-r--r--drivers/media/rc/mceusb.c4
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c6
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/serial_ir.c4
-rw-r--r--drivers/media/rc/sir_ir.c438
-rw-r--r--drivers/media/rc/st_rc.c15
-rw-r--r--drivers/media/rc/sunxi-cir.c21
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/tuners/si2157.c23
-rw-r--r--drivers/media/tuners/si2157_priv.h2
-rw-r--r--drivers/media/tuners/xc5000.c3
-rw-r--r--drivers/media/usb/Kconfig1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c7
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c42
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c48
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c16
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c163
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c3
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc-common.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c3
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c54
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c19
-rw-r--r--drivers/media/usb/em28xx/Kconfig7
-rw-r--r--drivers/media/usb/em28xx/em28xx-camera.c107
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c15
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h18
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c13
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c5
-rw-r--r--drivers/media/usb/gspca/konica.c3
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig2
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-eeprom.c13
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig10
-rw-r--r--drivers/media/usb/rainshadow-cec/Makefile1
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c388
-rw-r--r--drivers/media/usb/stk1160/Kconfig6
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c9
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c15
-rw-r--r--drivers/media/usb/uvc/uvc_video.c12
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h9
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c24
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c37
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c14
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c11
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c11
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c3
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c11
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptspi.c10
-rw-r--r--drivers/misc/Kconfig296
-rw-r--r--drivers/misc/Makefile6
-rw-r--r--drivers/misc/aspeed-lpc-ctrl.c266
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c4
-rw-r--r--drivers/misc/cxl/api.c17
-rw-r--r--drivers/misc/cxl/context.c68
-rw-r--r--drivers/misc/cxl/cxl.h263
-rw-r--r--drivers/misc/cxl/debugfs.c41
-rw-r--r--drivers/misc/cxl/fault.c136
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c10
-rw-r--r--drivers/misc/cxl/hcalls.c6
-rw-r--r--drivers/misc/cxl/irq.c53
-rw-r--r--drivers/misc/cxl/main.c12
-rw-r--r--drivers/misc/cxl/native.c339
-rw-r--r--drivers/misc/cxl/pci.c409
-rw-r--r--drivers/misc/cxl/trace.h43
-rw-r--r--drivers/misc/ds1682.c7
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c57
-rw-r--r--drivers/misc/enclosure.c7
-rw-r--r--drivers/misc/lkdtm.h1
-rw-r--r--drivers/misc/lkdtm_bugs.c13
-rw-r--r--drivers/misc/lkdtm_core.c1
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/amthif.c340
-rw-r--r--drivers/misc/mei/bus-fixup.c9
-rw-r--r--drivers/misc/mei/bus.c6
-rw-r--r--drivers/misc/mei/client.c14
-rw-r--r--drivers/misc/mei/client.h5
-rw-r--r--drivers/misc/mei/hbm.c29
-rw-r--r--drivers/misc/mei/init.c14
-rw-r--r--drivers/misc/mei/interrupt.c38
-rw-r--r--drivers/misc/mei/main.c125
-rw-r--r--drivers/misc/mei/mei_dev.h51
-rw-r--r--drivers/misc/mei/pci-me.c31
-rw-r--r--drivers/misc/mei/pci-txe.c29
-rw-r--r--drivers/misc/panel.c2439
-rw-r--r--drivers/misc/pci_endpoint_test.c534
-rw-r--r--drivers/misc/sram-exec.c3
-rw-r--r--drivers/misc/tsl2550.c7
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mtd/nand/nandsim.c29
-rw-r--r--drivers/mtd/ubi/block.c7
-rw-r--r--drivers/mtd/ubi/build.c91
-rw-r--r--drivers/mtd/ubi/vmt.c49
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/dsa/dsa_loop.c3
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c561
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib.h2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c25
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c49
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c41
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/hamradio/yam.c10
-rw-r--r--drivers/net/hippi/rrunner.c20
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/nvdimm/Kconfig1
-rw-r--r--drivers/nvdimm/btt_devs.c2
-rw-r--r--drivers/nvdimm/bus.c122
-rw-r--r--drivers/nvdimm/claim.c37
-rw-r--r--drivers/nvdimm/core.c51
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c24
-rw-r--r--drivers/nvdimm/namespace_devs.c17
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c8
-rw-r--r--drivers/nvdimm/pmem.c90
-rw-r--r--drivers/nvdimm/pmem.h7
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c83
-rw-r--r--drivers/nvme/host/fc.c20
-rw-r--r--drivers/nvme/host/lightnvm.c4
-rw-r--r--drivers/nvme/host/pci.c45
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/target/loop.c17
-rw-r--r--drivers/nvmem/Kconfig11
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/nvmem/imx-iim.c173
-rw-r--r--drivers/nvmem/imx-ocotp.c254
-rw-r--r--drivers/nvmem/meson-efuse.c2
-rw-r--r--drivers/nvmem/sunxi_sid.c89
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c44
-rw-r--r--drivers/of/device.c43
-rw-r--r--drivers/of/fdt.c18
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_numa.c2
-rw-r--r--drivers/of/of_pci.c45
-rw-r--r--drivers/of/of_private.h12
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/platform.c10
-rw-r--r--drivers/of/resolver.c2
-rw-r--r--drivers/of/unittest-data/Makefile17
-rw-r--r--drivers/of/unittest-data/overlay.dts53
-rw-r--r--drivers/of/unittest-data/overlay_bad_phandle.dts20
-rw-r--r--drivers/of/unittest-data/overlay_base.dts80
-rw-r--r--drivers/of/unittest.c321
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c61
-rw-r--r--drivers/pci/dwc/Kconfig36
-rw-r--r--drivers/pci/dwc/Makefile5
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c293
-rw-r--r--drivers/pci/dwc/pci-exynos.c14
-rw-r--r--drivers/pci/dwc/pci-imx6.c199
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c3
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c3
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c12
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c342
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c39
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/dwc/pcie-designware.c258
-rw-r--r--drivers/pci/dwc/pcie-designware.h135
-rw-r--r--drivers/pci/dwc/pcie-hisi.c9
-rw-r--r--drivers/pci/dwc/pcie-qcom.c2
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c3
-rw-r--r--drivers/pci/ecam.c6
-rw-r--r--drivers/pci/endpoint/Kconfig31
-rw-r--r--drivers/pci/endpoint/Makefile7
-rw-r--r--drivers/pci/endpoint/functions/Kconfig12
-rw-r--r--drivers/pci/endpoint/functions/Makefile5
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c510
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c509
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c580
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c143
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c359
-rw-r--r--drivers/pci/host/Kconfig10
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-aardvark.c173
-rw-r--r--drivers/pci/host/pci-ftpci100.c563
-rw-r--r--drivers/pci/host/pci-host-generic.c1
-rw-r--r--drivers/pci/host/pci-hyperv.c46
-rw-r--r--drivers/pci/host/pci-mvebu.c22
-rw-r--r--drivers/pci/host/pci-tegra.c4
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c1
-rw-r--r--drivers/pci/host/pci-thunder-pem.c1
-rw-r--r--drivers/pci/host/pci-versatile.c4
-rw-r--r--drivers/pci/host/pci-xgene.c5
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c3
-rw-r--r--drivers/pci/host/pcie-rockchip.c91
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/host/pcie-xilinx.c2
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c6
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/irq.c61
-rw-r--r--drivers/pci/mmap.c99
-rw-r--r--drivers/pci/msi.c17
-rw-r--r--drivers/pci/pci-driver.c24
-rw-r--r--drivers/pci/pci-sysfs.c104
-rw-r--r--drivers/pci/pci.c252
-rw-r--r--drivers/pci/pci.h19
-rw-r--r--drivers/pci/pcie/pcie-dpc.c5
-rw-r--r--drivers/pci/probe.c32
-rw-r--r--drivers/pci/proc.c41
-rw-r--r--drivers/pci/quirks.c82
-rw-r--r--drivers/pci/search.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c2
-rw-r--r--drivers/pci/switch/Kconfig13
-rw-r--r--drivers/pci/switch/Makefile1
-rw-r--r--drivers/pci/switch/switchtec.c1600
-rw-r--r--drivers/pcmcia/electra_cf.c4
-rw-r--r--drivers/perf/Kconfig14
-rw-r--r--drivers/perf/Makefile4
-rw-r--r--drivers/perf/arm_pmu.c530
-rw-r--r--drivers/perf/arm_pmu_acpi.c256
-rw-r--r--drivers/perf/arm_pmu_platform.c235
-rw-r--r--drivers/perf/qcom_l3_pmu.c849
-rw-r--r--drivers/phy/Kconfig19
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-bcm-ns-usb3.c69
-rw-r--r--drivers/phy/phy-exynos-dp-video.c6
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c71
-rw-r--r--drivers/phy/phy-exynos-pcie.c8
-rw-r--r--drivers/phy/phy-exynos5-usbdrd.c6
-rw-r--r--drivers/phy/phy-meson8b-usb2.c6
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c477
-rw-r--r--drivers/phy/phy-qcom-qmp.c1153
-rw-r--r--drivers/phy/phy-qcom-qusb2.c493
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c31
-rw-r--r--drivers/phy/phy-rockchip-inno-usb2.c53
-rw-r--r--drivers/phy/phy-rockchip-usb.c19
-rw-r--r--drivers/phy/phy-sun4i-usb.c58
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c31
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c970
-rw-r--r--drivers/platform/x86/Kconfig18
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c14
-rw-r--r--drivers/platform/x86/asus-laptop.c8
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wmi.c24
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/dell-laptop.c254
-rw-r--r--drivers/platform/x86/dell-smbios.c20
-rw-r--r--drivers/platform/x86/dell-smbios.h11
-rw-r--r--drivers/platform/x86/dell-wmi-aio.c6
-rw-r--r--drivers/platform/x86/dell-wmi.c20
-rw-r--r--drivers/platform/x86/eeepc-laptop.c10
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1216
-rw-r--r--drivers/platform/x86/hp-wireless.c27
-rw-r--r--drivers/platform/x86/hp-wmi.c414
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel-hid.c51
-rw-r--r--drivers/platform/x86/intel-vbtn.c4
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c143
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c156
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c106
-rw-r--r--drivers/platform/x86/msi-laptop.c14
-rw-r--r--drivers/platform/x86/msi-wmi.c9
-rw-r--r--drivers/platform/x86/panasonic-laptop.c18
-rw-r--r--drivers/platform/x86/silead_dmi.c80
-rw-r--r--drivers/platform/x86/surface3_button.c5
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c99
-rw-r--r--drivers/platform/x86/topstar-laptop.c5
-rw-r--r--drivers/platform/x86/toshiba-wmi.c5
-rw-r--r--drivers/platform/x86/toshiba_acpi.c8
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/pps/pps.c123
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c260
-rw-r--r--drivers/pwm/pwm-atmel.c276
-rw-r--r--drivers/pwm/pwm-mediatek.c219
-rw-r--r--drivers/pwm/pwm-pca9685.c112
-rw-r--r--drivers/pwm/pwm-tegra.c37
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c24
-rw-r--r--drivers/rapidio/rio-sysfs.c76
-rw-r--r--drivers/rapidio/rio.c3
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/reset/Kconfig14
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/reset-a10sr.c138
-rw-r--r--drivers/reset/reset-ath79.c27
-rw-r--r--drivers/reset/reset-imx7.c158
-rw-r--r--drivers/reset/reset-meson.c12
-rw-r--r--drivers/reset/reset-oxnas.c6
-rw-r--r--drivers/reset/reset-pistachio.c9
-rw-r--r--drivers/reset/reset-socfpga.c13
-rw-r--r--drivers/reset/reset-sunxi.c18
-rw-r--r--drivers/reset/reset-uniphier.c37
-rw-r--r--drivers/rtc/class.c14
-rw-r--r--drivers/rtc/rtc-core.h10
-rw-r--r--drivers/rtc/rtc-dev.c17
-rw-r--r--drivers/s390/block/Kconfig1
-rw-r--r--drivers/s390/block/dcssblk.c45
-rw-r--r--drivers/s390/char/sclp_early.c4
-rw-r--r--drivers/scsi/BusLogic.c14
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/aacraid/aachba.c13
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c3
-rw-r--r--drivers/scsi/aacraid/commsup.c20
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/aacraid/rx.c16
-rw-r--r--drivers/scsi/advansys.c21
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/be2iscsi/be.h12
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c17
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h74
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c111
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h13
-rw-r--r--drivers/scsi/be2iscsi/be_main.c397
-rw-r--r--drivers/scsi/be2iscsi/be_main.h30
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c140
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h43
-rw-r--r--drivers/scsi/bfa/bfa_core.c66
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c37
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c31
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c30
-rw-r--r--drivers/scsi/bfa/bfa_modules.h101
-rw-r--r--drivers/scsi/bfa/bfa_svc.c172
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_isr.c128
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxlflash/common.h137
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c4
-rw-r--r--drivers/scsi/cxlflash/main.c1162
-rw-r--r--drivers/scsi/cxlflash/main.h2
-rw-r--r--drivers/scsi/cxlflash/sislite.h124
-rw-r--r--drivers/scsi/cxlflash/superpipe.c16
-rw-r--r--drivers/scsi/cxlflash/superpipe.h56
-rw-r--r--drivers/scsi/cxlflash/vlun.c99
-rw-r--r--drivers/scsi/cxlflash/vlun.h2
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c23
-rw-r--r--drivers/scsi/fnic/fnic_isr.c41
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c105
-rw-r--r--drivers/scsi/fnic/fnic_stats.h16
-rw-r--r--drivers/scsi/fnic/fnic_trace.c49
-rw-r--r--drivers/scsi/hisi_sas/Kconfig1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h19
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c471
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c19
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c1201
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c6
-rw-r--r--drivers/scsi/ipr.c259
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/isci/registers.h4
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_lport.c20
-rw-r--r--drivers/scsi/libiscsi.c8
-rw-r--r--drivers/scsi/libsas/sas_init.c7
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/mac_esp.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/mvumi.c85
-rw-r--r--drivers/scsi/osd/osd_uld.c64
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c234
-rw-r--r--drivers/scsi/pmcraid.h8
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1
-rw-r--r--drivers/scsi/scsi_error.c184
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c3
-rw-r--r--drivers/scsi/scsi_transport_sas.c8
-rw-r--r--drivers/scsi/sd.c127
-rw-r--r--drivers/scsi/sd.h14
-rw-r--r--drivers/scsi/sd_zbc.c58
-rw-r--r--drivers/scsi/ses.c1
-rw-r--r--drivers/scsi/sg.c284
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/snic/snic_debugfs.c2
-rw-r--r--drivers/scsi/stex.c287
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufshcd.c102
-rw-r--r--drivers/scsi/ufs/ufshci.h6
-rw-r--r--drivers/scsi/virtio_scsi.c24
-rw-r--r--drivers/scsi/xen-scsifront.c2
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/atmel/Kconfig6
-rw-r--r--drivers/soc/atmel/Makefile1
-rw-r--r--drivers/soc/atmel/soc.c231
-rw-r--r--drivers/soc/atmel/soc.h (renamed from arch/arm/mach-at91/soc.h)0
-rw-r--r--drivers/soc/bcm/brcmstb/common.c9
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h1
-rw-r--r--drivers/soc/imx/Kconfig9
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpc.c489
-rw-r--r--drivers/soc/imx/gpcv2.c363
-rw-r--r--drivers/soc/renesas/r8a7795-sysc.c26
-rw-r--r--drivers/soc/renesas/rcar-sysc.c25
-rw-r--r--drivers/soc/renesas/rcar-sysc.h10
-rw-r--r--drivers/soc/renesas/renesas-soc.c18
-rw-r--r--drivers/soc/samsung/Kconfig8
-rw-r--r--drivers/soc/samsung/Makefile4
-rw-r--r--drivers/soc/samsung/exynos-pmu.c22
-rw-r--r--drivers/soc/samsung/exynos-pmu.h3
-rw-r--r--drivers/soc/tegra/Kconfig22
-rw-r--r--drivers/soc/tegra/Makefile4
-rw-r--r--drivers/soc/tegra/flowctrl.c224
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c4
-rw-r--r--drivers/soc/tegra/pmc-tegra186.c169
-rw-r--r--drivers/soc/ti/Kconfig12
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c202
-rw-r--r--drivers/soc/zte/zx296718_pm_domains.c1
-rw-r--r--drivers/soc/zte/zx2967_pm_domains.c4
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig10
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO21
-rw-r--r--drivers/staging/android/ion/Kconfig56
-rw-r--r--drivers/staging/android/ion/Makefile18
-rw-r--r--drivers/staging/android/ion/compat_ion.c195
-rw-r--r--drivers/staging/android/ion/compat_ion.h29
-rw-r--r--drivers/staging/android/ion/hisilicon/Kconfig5
-rw-r--r--drivers/staging/android/ion/hisilicon/Makefile1
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c113
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c85
-rw-r--r--drivers/staging/android/ion/ion.c1168
-rw-r--r--drivers/staging/android/ion/ion.h387
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c37
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c27
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c125
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c156
-rw-r--r--drivers/staging/android/ion/ion_heap.c68
-rw-r--r--drivers/staging/android/ion/ion_of.c184
-rw-r--r--drivers/staging/android/ion/ion_of.h37
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c6
-rw-r--r--drivers/staging/android/ion/ion_priv.h473
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c53
-rw-r--r--drivers/staging/android/ion/ion_test.c305
-rw-r--r--drivers/staging/android/ion/tegra/Makefile1
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c80
-rw-r--r--drivers/staging/android/lowmemorykiller.c212
-rw-r--r--drivers/staging/android/uapi/ion.h86
-rw-r--r--drivers/staging/android/uapi/ion_test.h69
-rw-r--r--drivers/staging/bcm2835-audio/Kconfig7
-rw-r--r--drivers/staging/bcm2835-audio/bcm2835-ctl.c345
-rw-r--r--drivers/staging/bcm2835-audio/bcm2835-pcm.c554
-rw-r--r--drivers/staging/bcm2835-audio/bcm2835-vchiq.c912
-rw-r--r--drivers/staging/bcm2835-audio/bcm2835.c250
-rw-r--r--drivers/staging/bcm2835-audio/bcm2835.h167
-rw-r--r--drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt27
-rw-r--r--drivers/staging/ccree/Kconfig43
-rw-r--r--drivers/staging/ccree/Makefile3
-rw-r--r--drivers/staging/ccree/TODO30
-rw-r--r--drivers/staging/ccree/cc_bitops.h62
-rw-r--r--drivers/staging/ccree/cc_crypto_ctx.h299
-rw-r--r--drivers/staging/ccree/cc_hal.h30
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h603
-rw-r--r--drivers/staging/ccree/cc_lli_defs.h57
-rw-r--r--drivers/staging/ccree/cc_pal_log.h188
-rw-r--r--drivers/staging/ccree/cc_pal_log_plat.h33
-rw-r--r--drivers/staging/ccree/cc_pal_types.h97
-rw-r--r--drivers/staging/ccree/cc_pal_types_plat.h29
-rw-r--r--drivers/staging/ccree/cc_regs.h106
-rw-r--r--drivers/staging/ccree/dx_crys_kernel.h180
-rw-r--r--drivers/staging/ccree/dx_env.h224
-rw-r--r--drivers/staging/ccree/dx_host.h155
-rw-r--r--drivers/staging/ccree/dx_reg_base_host.h34
-rw-r--r--drivers/staging/ccree/dx_reg_common.h26
-rw-r--r--drivers/staging/ccree/hash_defs.h78
-rw-r--r--drivers/staging/ccree/hw_queue_defs_plat.h43
-rw-r--r--drivers/staging/ccree/ssi_aead.c2832
-rw-r--r--drivers/staging/ccree/ssi_aead.h120
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c1873
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.h105
-rw-r--r--drivers/staging/ccree/ssi_cipher.c1503
-rw-r--r--drivers/staging/ccree/ssi_cipher.h89
-rw-r--r--drivers/staging/ccree/ssi_config.h61
-rw-r--r--drivers/staging/ccree/ssi_driver.c556
-rw-r--r--drivers/staging/ccree/ssi_driver.h228
-rw-r--r--drivers/staging/ccree/ssi_fips.c65
-rw-r--r--drivers/staging/ccree/ssi_fips.h70
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h315
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c96
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1681
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c369
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h77
-rw-r--r--drivers/staging/ccree/ssi_hash.c2758
-rw-r--r--drivers/staging/ccree/ssi_hash.h101
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c301
-rw-r--r--drivers/staging/ccree/ssi_ivgen.h72
-rw-r--r--drivers/staging/ccree/ssi_pm.c150
-rw-r--r--drivers/staging/ccree/ssi_pm.h46
-rw-r--r--drivers/staging/ccree/ssi_pm_ext.c60
-rw-r--r--drivers/staging/ccree/ssi_pm_ext.h33
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c713
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.h60
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c138
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.h80
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c439
-rw-r--r--drivers/staging/ccree/ssi_sysfs.h54
-rw-r--r--drivers/staging/comedi/Kconfig5
-rw-r--r--drivers/staging/comedi/comedi_buf.c24
-rw-r--r--drivers/staging/comedi/comedi_fops.c22
-rw-r--r--drivers/staging/comedi/comedi_internal.h2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c24
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c12
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c6
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c8
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c330
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h18
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.c50
-rw-r--r--drivers/staging/dgnc/TODO3
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c154
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h42
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c109
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h340
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c67
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.h7
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c228
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h85
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h13
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c577
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h6
-rw-r--r--drivers/staging/dgnc/dgnc_utils.c9
-rw-r--r--drivers/staging/dgnc/dgnc_utils.h6
-rw-r--r--drivers/staging/dgnc/digi.h134
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h2
-rw-r--r--drivers/staging/fbtft/Kconfig6
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c2
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c2
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c2
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c2
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c195
-rw-r--r--drivers/staging/fbtft/fb_ssd1289.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c8
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c6
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c33
-rw-r--r--drivers/staging/fbtft/fbtft-core.c74
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c16
-rw-r--r--drivers/staging/fbtft/fbtft.h5
-rw-r--r--drivers/staging/fbtft/fbtft_device.c23
-rw-r--r--drivers/staging/fbtft/flexfb.c20
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig17
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile5
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/Makefile10
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/README186
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/TODO18
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h185
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c2520
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h348
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c279
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpkg.h176
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h541
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c1595
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h832
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/net.h480
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig10
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile6
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon-cmd.h69
-rw-r--r--drivers/staging/fsl-mc/bus/dpcon.c317
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/Makefile9
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h75
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-driver.c296
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt135
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-service.c618
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio.c224
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio.h109
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.c1035
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.h469
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c4
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-fd.h451
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-global.h202
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h139
-rw-r--r--drivers/staging/fsl-mc/include/dpcon.h115
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c17
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h2
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c7
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c3
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c16
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c2
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c21
-rw-r--r--drivers/staging/greybus/connection.c3
-rw-r--r--drivers/staging/greybus/gbphy.c1
-rw-r--r--drivers/staging/greybus/light.c1
-rw-r--r--drivers/staging/greybus/loopback.c5
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c6
-rw-r--r--drivers/staging/greybus/uart.c3
-rw-r--r--drivers/staging/iio/accel/Makefile7
-rw-r--r--drivers/staging/iio/accel/adis16201.c382
-rw-r--r--drivers/staging/iio/accel/adis16201.h144
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c248
-rw-r--r--drivers/staging/iio/accel/adis16203.c332
-rw-r--r--drivers/staging/iio/accel/adis16203.h125
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c216
-rw-r--r--drivers/staging/iio/accel/adis16209.c383
-rw-r--r--drivers/staging/iio/accel/adis16209.h144
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c248
-rw-r--r--drivers/staging/iio/accel/adis16240.c459
-rw-r--r--drivers/staging/iio/accel/adis16240.h179
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c301
-rw-r--r--drivers/staging/iio/adc/Kconfig22
-rw-r--r--drivers/staging/iio/adc/Makefile2
-rw-r--r--drivers/staging/iio/adc/ad7192.c20
-rw-r--r--drivers/staging/iio/adc/ad7280a.c34
-rw-r--r--drivers/staging/iio/adc/ad7606.c9
-rw-r--r--drivers/staging/iio/adc/ad7606.h3
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c215
-rw-r--r--drivers/staging/iio/addac/adt7316.c113
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/cdc/ad7152.c23
-rw-r--r--drivers/staging/iio/cdc/ad7746.c50
-rw-r--r--drivers/staging/iio/frequency/ad9832.c119
-rw-r--r--drivers/staging/iio/frequency/ad9832.h92
-rw-r--r--drivers/staging/iio/frequency/ad9834.c86
-rw-r--r--drivers/staging/iio/frequency/ad9834.h72
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c37
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c66
-rw-r--r--drivers/staging/iio/light/isl29028.c37
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c4
-rw-r--r--drivers/staging/iio/meter/ade7753.c82
-rw-r--r--drivers/staging/iio/meter/ade7753.h72
-rw-r--r--drivers/staging/iio/meter/ade7754.c124
-rw-r--r--drivers/staging/iio/meter/ade7754.h90
-rw-r--r--drivers/staging/iio/meter/ade7759.c74
-rw-r--r--drivers/staging/iio/meter/ade7759.h53
-rw-r--r--drivers/staging/iio/meter/ade7854.c4
-rw-r--r--drivers/staging/iio/meter/meter.h60
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c28
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c2
-rw-r--r--drivers/staging/ks7010/TODO4
-rw-r--r--drivers/staging/ks7010/eap_packet.h11
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c899
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h102
-rw-r--r--drivers/staging/ks7010/ks_hostif.c1081
-rw-r--r--drivers/staging/ks7010/ks_hostif.h442
-rw-r--r--drivers/staging/ks7010/ks_wlan.h58
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h68
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c1302
-rw-r--r--drivers/staging/ks7010/michael_mic.c12
-rw-r--r--drivers/staging/ks7010/michael_mic.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h51
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h65
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h232
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h137
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h12
-rw-r--r--drivers/staging/lustre/include/linux/lnet/nidstr.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h14
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h155
-rw-r--r--drivers/staging/lustre/lnet/Kconfig1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c109
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h24
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c88
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c14
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c69
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h76
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c62
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c38
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c8
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c11
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c51
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c151
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c26
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c29
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c28
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c195
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c41
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c32
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c38
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c164
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c34
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c17
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c39
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h14
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c31
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h42
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h120
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h10
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h74
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h32
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c13
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c47
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c180
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c49
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c26
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c24
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c111
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c7
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c52
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c60
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c12
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c77
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c17
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c2
-rw-r--r--drivers/staging/media/Kconfig8
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/atomisp/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/Makefile6
-rw-r--r--drivers/staging/media/atomisp/TODO64
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig106
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile23
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c1260
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h198
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.c1490
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h459
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.c1219
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h672
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile8
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c225
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.h49
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/common.h65
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c218
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.h38
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c235
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.h63
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c238
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c209
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.h58
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c2512
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h766
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx132.h566
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx134.h2464
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx135.h3374
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx175.h1959
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx208.h550
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx219.h227
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx227.h726
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp.c39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c80
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c89
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_imx.c191
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/vcm.c45
-rw-r--r--drivers/staging/media/atomisp/i2c/libmsrlisthelper.c209
-rw-r--r--drivers/staging/media/atomisp/i2c/lm3554.c1009
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.c1963
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h1786
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.c1559
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h940
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.c1373
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h1267
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h67
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.c2066
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h1381
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c2221
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h1482
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h1284
-rw-r--r--drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h37
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h1367
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h40
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h262
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h32
-rw-r--r--drivers/staging/media/atomisp/include/linux/vlv2_plat_clock.h30
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h136
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3642.h153
-rw-r--r--drivers/staging/media/atomisp/pci/Kconfig13
-rw-r--r--drivers/staging/media/atomisp/pci/Makefile5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile355
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h209
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c608
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h124
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c6751
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h457
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h668
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c4722
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h282
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c1263
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h369
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c446
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h412
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c209
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c245
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c1304
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h331
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c3130
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c1437
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h471
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h191
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c181
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h133
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c1610
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h377
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h56
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h170
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c321
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h83
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c281
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h297
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c883
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c115
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c51
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h141
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c227
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c360
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h189
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c3221
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h399
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c214
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h72
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h104
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_common_defs.h200
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h170
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_regs_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h416
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_host_ids_hrt.h84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h72
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h254
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h254
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h234
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h310
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/sp_hrt.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c3634
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h63
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c360
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h189
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c3220
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h399
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c214
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h72
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h282
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h233
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h106
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h108
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h168
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h164
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h381
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h104
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_common_defs.h200
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h170
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_regs_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h435
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_host_ids_hrt.h119
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h138
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h254
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h234
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h310
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h210
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h215
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h175
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/sp_hrt.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h206
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h87
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h91
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c3686
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h458
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c360
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h189
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c3220
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h399
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c214
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h72
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h104
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_common_defs.h200
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h170
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_regs_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h435
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_host_ids_hrt.h119
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h254
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h126
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h234
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h310
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/sp_hrt.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c3634
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h673
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h388
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h83
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h255
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h90
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h85
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug.c72
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_local.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_private.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c299
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h207
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h75
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c567
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c127
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c108
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h143
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c70
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_ddr_hrt_modified.h148
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_hrt_modified.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c227
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h120
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c1823
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h533
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h116
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c448
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h136
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c129
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h57
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_private.h157
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c81
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h163
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h306
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c258
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h130
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h155
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h115
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/mmu_global.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/resource_global.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h93
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h348
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h56
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vmem_global.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/xmem_global.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h103
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bbb_config.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/cpu_mem_support.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h194
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h70
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h135
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h110
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h93
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h115
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h376
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h184
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2400_config.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2500_config.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2600_config.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2601_config.h70
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_config.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h845
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h675
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w_types.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op_count.h226
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h186
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_dma_public.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_irq_public.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_public.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h82
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/osys_public.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pipeline_public.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h1222
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func_types.h385
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/sp_public.h223
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/tag_public.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/timed_ctrl_public.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vamem_public.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vmem_public.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h224
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h174
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h330
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h167
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h82
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c95
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/socket_global.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/stream_buffer_global.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h56
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h57
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h188
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h468
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h157
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c95
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h299
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h94
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h63
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h196
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h365
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h235
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c83
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h82
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h228
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h659
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h110
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h94
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h582
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h78
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h654
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_state.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_load_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_store_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c183
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.c71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h63
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_state.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h106
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c122
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_state.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_state.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c132
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h78
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c120
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c156
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c63
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c215
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h110
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.c79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c132
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_state.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c65
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.c26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_state.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c306
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c321
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.c94
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h154
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_state.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h86
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c62
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h44
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c118
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h65
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c214
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h97
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c110
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c132
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.c41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c86
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c86
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_types.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c86
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h63
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_types.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c79
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c159
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c162
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h75
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c136
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c74
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c386
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h266
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_store_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c130
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h77
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h136
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/scale/scale_1.0/ia_css_scale_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h99
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h232
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_param.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c424
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c338
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h95
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c76
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.c36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h53
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c130
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c140
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h32
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h29
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h47
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h51
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c81
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h22
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h71
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c265
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h96
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h98
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_wrapper_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c219
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h26
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h81
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c125
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h56
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h45
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h94
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_load_param.h20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_store_param.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h73
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h498
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h309
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h128
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c81
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h333
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c1873
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h197
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h66
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c590
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h508
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h31
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c3611
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h46
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c126
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c77
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h180
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h132
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c1026
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c568
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c613
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h118
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h107
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c227
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h201
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c179
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c141
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c103
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c141
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c105
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c607
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c898
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h41
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h308
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c806
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h192
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h69
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c412
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c192
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h101
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h89
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h115
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c55
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c330
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h87
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h61
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c199
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c11364
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h410
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c342
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h40
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c84
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h1096
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h88
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c176
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h76
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c749
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h49
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c62
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c267
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h86
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c419
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c5268
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h188
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c43
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c1814
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h248
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c76
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h37
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c30
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c728
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c1544
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c241
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c259
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c218
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h107
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c129
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h106
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h323
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h130
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h100
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h119
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h68
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h175
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h76
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c597
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c93
-rw-r--r--drivers/staging/media/atomisp/platform/Makefile6
-rw-r--r--drivers/staging/media/atomisp/platform/clock/Makefile6
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c40
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h27
-rw-r--r--drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c247
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/Makefile5
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c758
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c297
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c28
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c101
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h24
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c12
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif_regs.h20
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c4
-rw-r--r--drivers/staging/media/lirc/Kconfig12
-rw-r--r--drivers/staging/media/lirc/Makefile2
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c899
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c839
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c9
-rw-r--r--drivers/staging/media/omap4iss/iss_csi2.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipe.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_ipipeif.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_resizer.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c41
-rw-r--r--drivers/staging/media/platform/bcm2835/Kconfig10
-rw-r--r--drivers/staging/media/platform/bcm2835/TODO39
-rw-r--r--drivers/staging/media/platform/bcm2835/bcm2835-camera.c2024
-rw-r--r--drivers/staging/media/platform/bcm2835/bcm2835-camera.h145
-rw-r--r--drivers/staging/media/platform/bcm2835/controls.c1335
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-encodings.h127
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-msg-format.h81
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-msg-port.h107
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-msg.h404
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-parameters.h689
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-vchiq.c1916
-rw-r--r--drivers/staging/media/platform/bcm2835/mmal-vchiq.h178
-rw-r--r--drivers/staging/media/s5p-cec/Kconfig9
-rw-r--r--drivers/staging/media/s5p-cec/TODO7
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c280
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.h76
-rw-r--r--drivers/staging/media/st-cec/Kconfig8
-rw-r--r--drivers/staging/media/st-cec/TODO7
-rw-r--r--drivers/staging/media/st-cec/stih-cec.c379
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c16
-rw-r--r--drivers/staging/most/aim-sound/sound.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c7
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c39
-rw-r--r--drivers/staging/most/mostcore/core.c165
-rw-r--r--drivers/staging/nvec/nvec-keytable.h3
-rw-r--r--drivers/staging/nvec/nvec_kbd.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c4
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c27
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c15
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c135
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c39
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c11
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c37
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c17
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h2
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h2
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c34
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c80
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c4
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c52
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c70
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c15
-rw-r--r--drivers/staging/rtl8192u/r8192U.h12
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c20
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c77
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c34
-rw-r--r--drivers/staging/rtl8712/ieee80211.c5
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c10
-rw-r--r--drivers/staging/rtl8712/os_intfs.c14
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c11
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_event.h8
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.h14
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c143
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.h16
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h191
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.h10
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h12
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c11
-rw-r--r--drivers/staging/rtl8712/wifi.h12
-rw-r--r--drivers/staging/rtl8712/wlan_bssdef.h6
-rw-r--r--drivers/staging/rtl8723bs/Kconfig11
-rw-r--r--drivers/staging/rtl8723bs/Makefile70
-rw-r--r--drivers/staging/rtl8723bs/TODO16
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2678
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c243
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2226
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c1447
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_eeprom.c369
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c635
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c1430
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c203
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c698
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c3150
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c6941
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c197
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c1421
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c2689
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c64
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c2437
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c641
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c2328
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c3100
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c138
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BReg.h442
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c3779
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h193
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c3729
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h155
-rw-r--r--drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h566
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c643
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h48
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c302
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h28
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c799
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h49
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.c662
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf.h63
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c2091
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h83
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c205
-rw-r--r--drivers/staging/rtl8723bs/hal/Mp_Precomp.h33
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c1720
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1751
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c3286
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c474
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_phy.c224
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_sdio.c115
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c1446
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h1465
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_AntDiv.c70
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_AntDiv.h38
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c338
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.h47
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c1221
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.h195
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c89
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h39
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c30
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h37
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c186
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h31
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c535
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.h162
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c175
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h47
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.c42
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_PathDiv.h29
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RTL8723B.c45
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RTL8723B.h22
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c257
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h65
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h172
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.c52
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_debug.h166
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_interface.h59
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h60
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_reg.h103
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_types.h102
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c2358
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c300
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c4549
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c1050
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c224
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c86
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c490
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c686
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c1928
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c1294
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h1126
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h128
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h77
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h232
-rw-r--r--drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h132
-rw-r--r--drivers/staging/rtl8723bs/include/HalVerDef.h127
-rw-r--r--drivers/staging/rtl8723bs/include/autoconf.h73
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h209
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h26
-rw-r--r--drivers/staging/rtl8723bs/include/drv_conf.h37
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h720
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types_sdio.h39
-rw-r--r--drivers/staging/rtl8723bs/include/ethernet.h22
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h68
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h309
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_h2c.h293
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h273
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h1725
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h483
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h410
-rw-r--r--drivers/staging/rtl8723bs/include/hal_pg.h81
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy.h183
-rw-r--r--drivers/staging/rtl8723bs/include/hal_phy_reg.h25
-rw-r--r--drivers/staging/rtl8723bs/include/hal_sdio.h26
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h1345
-rw-r--r--drivers/staging/rtl8723bs/include/ioctl_cfg80211.h128
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h27
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h88
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h281
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h178
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h48
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_recv.h50
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8192c_rf.h39
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_cmd.h199
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_dm.h41
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h279
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_recv.h144
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_rf.h26
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_spec.h262
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_xmit.h458
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ap.h47
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_beamforming.h135
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_br_ext.h63
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_btcoex.h64
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_byteorder.h24
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h980
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_debug.h355
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_eeprom.h128
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h132
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_event.h117
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ht.h118
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h373
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl.h80
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_ioctl_set.h41
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h695
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h888
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h512
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_odm.h36
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h375
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_qos.h27
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h553
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_rf.h159
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_security.h440
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_version.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h28
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h528
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_hal.h28
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops.h49
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_ops_linux.h40
-rw-r--r--drivers/staging/rtl8723bs/include/sdio_osintf.h24
-rw-r--r--drivers/staging/rtl8723bs/include/sta_info.h392
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h1158
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h278
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h54
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3586
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c5808
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c206
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c1916
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c481
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c365
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c787
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.h45
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c695
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c599
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c164
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c296
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c4
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c16
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h7
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c107
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.h21
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c5
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h4
-rw-r--r--drivers/staging/sm750fb/sm750.c57
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c9
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c21
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/buffers.c36
-rw-r--r--drivers/staging/speakup/fakekey.c2
-rw-r--r--drivers/staging/speakup/i18n.c77
-rw-r--r--drivers/staging/speakup/keyhelp.c14
-rw-r--r--drivers/staging/speakup/kobjects.c96
-rw-r--r--drivers/staging/speakup/main.c309
-rw-r--r--drivers/staging/speakup/selection.c6
-rw-r--r--drivers/staging/speakup/serialio.c108
-rw-r--r--drivers/staging/speakup/speakup.h15
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c21
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c5
-rw-r--r--drivers/staging/speakup/speakup_apollo.c15
-rw-r--r--drivers/staging/speakup/speakup_audptr.c16
-rw-r--r--drivers/staging/speakup/speakup_bns.c3
-rw-r--r--drivers/staging/speakup/speakup_decext.c24
-rw-r--r--drivers/staging/speakup/speakup_decpc.c119
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c22
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c6
-rw-r--r--drivers/staging/speakup/speakup_dtlk.h10
-rw-r--r--drivers/staging/speakup/speakup_dummy.c3
-rw-r--r--drivers/staging/speakup/speakup_keypc.c9
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c7
-rw-r--r--drivers/staging/speakup/speakup_soft.c93
-rw-r--r--drivers/staging/speakup/speakup_spkout.c12
-rw-r--r--drivers/staging/speakup/speakup_txprt.c3
-rw-r--r--drivers/staging/speakup/spk_priv.h17
-rw-r--r--drivers/staging/speakup/spk_types.h11
-rw-r--r--drivers/staging/speakup/synth.c105
-rw-r--r--drivers/staging/speakup/varhandlers.c18
-rw-r--r--drivers/staging/typec/Kconfig24
-rw-r--r--drivers/staging/typec/Makefile3
-rw-r--r--drivers/staging/typec/TODO15
-rw-r--r--drivers/staging/typec/fusb302/Kconfig7
-rw-r--r--drivers/staging/typec/fusb302/Makefile1
-rw-r--r--drivers/staging/typec/fusb302/TODO6
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c1815
-rw-r--r--drivers/staging/typec/fusb302/fusb302_reg.h186
-rw-r--r--drivers/staging/typec/pd.h281
-rw-r--r--drivers/staging/typec/pd_bdo.h31
-rw-r--r--drivers/staging/typec/pd_vdo.h249
-rw-r--r--drivers/staging/typec/tcpci.c526
-rw-r--r--drivers/staging/typec/tcpci.h133
-rw-r--r--drivers/staging/typec/tcpm.c3465
-rw-r--r--drivers/staging/typec/tcpm.h150
-rw-r--r--drivers/staging/unisys/include/channel.h213
-rw-r--r--drivers/staging/unisys/include/iochannel.h87
-rw-r--r--drivers/staging/unisys/include/visorbus.h8
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h208
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h57
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c420
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h26
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c95
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c1043
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h149
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c95
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c21
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c258
-rw-r--r--drivers/staging/vc04_services/Kconfig32
-rw-r--r--drivers/staging/vc04_services/Makefile3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig8
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile (renamed from drivers/staging/bcm2835-audio/Makefile)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/TODO (renamed from drivers/staging/bcm2835-audio/TODO)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c426
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c568
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c875
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c472
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h175
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h (renamed from drivers/staging/bcm2835-audio/vc_vchi_audioserv_defs.h)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig11
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile (renamed from drivers/staging/media/platform/bcm2835/Makefile)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/TODO34
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c1966
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h145
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c1333
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h (renamed from drivers/staging/media/platform/bcm2835/mmal-common.h)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h126
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h (renamed from drivers/staging/media/platform/bcm2835/mmal-msg-common.h)0
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h99
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h109
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h399
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h687
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c2063
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h174
-rw-r--r--drivers/staging/vc04_services/interface/vchi/TODO21
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c550
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c259
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c56
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c16
-rw-r--r--drivers/staging/vme/devices/vme_user.c10
-rw-r--r--drivers/staging/vt6655/baseband.h12
-rw-r--r--drivers/staging/vt6655/card.c25
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/vt6655/rxtx.h10
-rw-r--r--drivers/staging/vt6656/main_usb.c15
-rw-r--r--drivers/staging/vt6656/rf.c13
-rw-r--r--drivers/staging/vt6656/rxtx.c32
-rw-r--r--drivers/staging/vt6656/usbpipe.c31
-rw-r--r--drivers/staging/vt6656/wcmd.c3
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h19
-rw-r--r--drivers/staging/wilc1000/host_interface.c57
-rw-r--r--drivers/staging/wilc1000/linux_mon.c2
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c7
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c21
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c78
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c54
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c10
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c59
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h58
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c4
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h28
-rw-r--r--drivers/staging/wlan-ng/p80211req.c3
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c1
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c55
-rw-r--r--drivers/staging/xgifb/vb_init.h3
-rw-r--r--drivers/staging/xgifb/vb_setmode.c12
-rw-r--r--drivers/tee/Kconfig18
-rw-r--r--drivers/tee/Makefile5
-rw-r--r--drivers/tee/optee/Kconfig7
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c444
-rw-r--r--drivers/tee/optee/core.c622
-rw-r--r--drivers/tee/optee/optee_msg.h418
-rw-r--r--drivers/tee/optee/optee_private.h183
-rw-r--r--drivers/tee/optee/optee_smc.h450
-rw-r--r--drivers/tee/optee/rpc.c396
-rw-r--r--drivers/tee/optee/supp.c273
-rw-r--r--drivers/tee/tee_core.c893
-rw-r--r--drivers/tee/tee_private.h129
-rw-r--r--drivers/tee/tee_shm.c358
-rw-r--r--drivers/tee/tee_shm_pool.c156
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/hvc/hvc_console.c4
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/n_gsm.c21
-rw-r--r--drivers/tty/n_hdlc.c10
-rw-r--r--drivers/tty/pty.c7
-rw-r--r--drivers/tty/serdev/core.c41
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c13
-rw-r--r--drivers/tty/serial/8250/8250_early.c24
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c43
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c3
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/Kconfig11
-rw-r--r--drivers/tty/serial/Makefile3
-rw-r--r--drivers/tty/serial/altera_jtaguart.c20
-rw-r--r--drivers/tty/serial/altera_uart.c32
-rw-r--r--drivers/tty/serial/amba-pl011.c54
-rw-r--r--drivers/tty/serial/atmel_serial.c7
-rw-r--r--drivers/tty/serial/atmel_serial.h (renamed from include/linux/atmel_serial.h)0
-rw-r--r--drivers/tty/serial/fsl_lpuart.c20
-rw-r--r--drivers/tty/serial/imx.c99
-rw-r--r--drivers/tty/serial/omap-serial.c9
-rw-r--r--drivers/tty/serial/samsung.c44
-rw-r--r--drivers/tty/serial/sb1250-duart.c18
-rw-r--r--drivers/tty/serial/serial_core.c34
-rw-r--r--drivers/tty/serial/sh-sci.c43
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/st-asc.c12
-rw-r--r--drivers/tty/serial/uartlite.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c71
-rw-r--r--drivers/tty/tty_baudrate.c232
-rw-r--r--drivers/tty/tty_io.c633
-rw-r--r--drivers/tty/tty_ioctl.c222
-rw-r--r--drivers/tty/tty_jobctrl.c554
-rw-r--r--drivers/tty/vt/selection.c18
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_mf624.c48
-rw-r--r--drivers/usb/Kconfig14
-rw-r--r--drivers/usb/Makefile4
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci.h2
-rw-r--r--drivers/usb/chipidea/core.c67
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c33
-rw-r--r--drivers/usb/class/Kconfig2
-rw-r--r--drivers/usb/class/cdc-acm.c151
-rw-r--r--drivers/usb/class/cdc-acm.h4
-rw-r--r--drivers/usb/class/cdc-wdm.c103
-rw-r--r--drivers/usb/class/usblp.c37
-rw-r--r--drivers/usb/class/usbtmc.c56
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/common/usb-otg-fsm.c7
-rw-r--r--drivers/usb/core/Makefile2
-rw-r--r--drivers/usb/core/buffer.c12
-rw-r--r--drivers/usb/core/devices.c4
-rw-r--r--drivers/usb/core/driver.c21
-rw-r--r--drivers/usb/core/file.c9
-rw-r--r--drivers/usb/core/hcd.c82
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/of.c23
-rw-r--r--drivers/usb/core/usb.c152
-rw-r--r--drivers/usb/dwc2/Kconfig2
-rw-r--r--drivers/usb/dwc2/core.h5
-rw-r--r--drivers/usb/dwc2/hcd.c16
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/params.c19
-rw-r--r--drivers/usb/dwc2/platform.c18
-rw-r--r--drivers/usb/dwc3/Kconfig3
-rw-r--r--drivers/usb/dwc3/Makefile4
-rw-r--r--drivers/usb/dwc3/core.c109
-rw-r--r--drivers/usb/dwc3/core.h261
-rw-r--r--drivers/usb/dwc3/debug.h28
-rw-r--r--drivers/usb/dwc3/debugfs.c105
-rw-r--r--drivers/usb/dwc3/drd.c85
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c22
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c48
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c5
-rw-r--r--drivers/usb/dwc3/ep0.c151
-rw-r--r--drivers/usb/dwc3/gadget.c197
-rw-r--r--drivers/usb/dwc3/gadget.h20
-rw-r--r--drivers/usb/dwc3/trace.h58
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/function/f_fs.c80
-rw-r--r--drivers/usb/gadget/function/u_ether.c24
-rw-r--r--drivers/usb/gadget/function/u_fs.h14
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c16
-rw-r--r--drivers/usb/gadget/legacy/inode.c17
-rw-r--r--drivers/usb/gadget/udc/Kconfig32
-rw-r--r--drivers/usb/gadget/udc/Makefile3
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c266
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h40
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c217
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c49
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/core.c1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c20
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c15
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c13
-rw-r--r--drivers/usb/gadget/udc/net2272.c8
-rw-r--r--drivers/usb/gadget/udc/net2272.h2
-rw-r--r--drivers/usb/gadget/udc/net2280.c12
-rw-r--r--drivers/usb/gadget/udc/net2280.h2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c32
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c164
-rw-r--r--drivers/usb/host/Kconfig12
-rw-r--r--drivers/usb/host/Makefile4
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c4
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mem.c12
-rw-r--r--drivers/usb/host/ehci-orion.c36
-rw-r--r--drivers/usb/host/ehci-platform.c10
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-pci.c16
-rw-r--r--drivers/usb/host/ohci-platform.c3
-rw-r--r--drivers/usb/host/ohci.h3
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/pci-quirks.h4
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h2
-rw-r--r--drivers/usb/host/xhci-dbg.c308
-rw-r--r--drivers/usb/host/xhci-hub.c167
-rw-r--r--drivers/usb/host/xhci-mem.c53
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c121
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-rcar.c11
-rw-r--r--drivers/usb/host/xhci-rcar.h6
-rw-r--r--drivers/usb/host/xhci-ring.c266
-rw-r--r--drivers/usb/host/xhci-trace.h174
-rw-r--r--drivers/usb/host/xhci.c334
-rw-r--r--drivers/usb/host/xhci.h333
-rw-r--r--drivers/usb/isp1760/isp1760-if.c8
-rw-r--r--drivers/usb/misc/adutux.c55
-rw-r--r--drivers/usb/misc/appledisplay.c19
-rw-r--r--drivers/usb/misc/chaoskey.c22
-rw-r--r--drivers/usb/misc/ftdi-elan.c42
-rw-r--r--drivers/usb/misc/idmouse.c28
-rw-r--r--drivers/usb/misc/iowarrior.c25
-rw-r--r--drivers/usb/misc/ldusb.c53
-rw-r--r--drivers/usb/misc/legousbtower.c66
-rw-r--r--drivers/usb/misc/lvstest.c12
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/misc/usblcd.c47
-rw-r--r--drivers/usb/misc/usbtest.c50
-rw-r--r--drivers/usb/misc/uss720.c10
-rw-r--r--drivers/usb/misc/yurex.c16
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c19
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/cppi_dma.c11
-rw-r--r--drivers/usb/musb/da8xx.c43
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/phy/Kconfig7
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c7
-rw-r--r--drivers/usb/serial/aircable.c36
-rw-r--r--drivers/usb/serial/ark3116.c17
-rw-r--r--drivers/usb/serial/cyberjack.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c23
-rw-r--r--drivers/usb/serial/f81534.c137
-rw-r--r--drivers/usb/serial/ftdi_sio.c54
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/generic.c32
-rw-r--r--drivers/usb/serial/io_edgeport.c28
-rw-r--r--drivers/usb/serial/io_ti.c49
-rw-r--r--drivers/usb/serial/ipaq.c51
-rw-r--r--drivers/usb/serial/iuu_phoenix.c22
-rw-r--r--drivers/usb/serial/keyspan_pda.c16
-rw-r--r--drivers/usb/serial/kobil_sct.c13
-rw-r--r--drivers/usb/serial/mos7720.c75
-rw-r--r--drivers/usb/serial/mos7840.c36
-rw-r--r--drivers/usb/serial/mxuport.c133
-rw-r--r--drivers/usb/serial/omninet.c130
-rw-r--r--drivers/usb/serial/opticon.c12
-rw-r--r--drivers/usb/serial/oti6858.c19
-rw-r--r--drivers/usb/serial/pl2303.c82
-rw-r--r--drivers/usb/serial/quatech2.c7
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/spcp8x5.c16
-rw-r--r--drivers/usb/serial/symbolserial.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial.c230
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c146
-rw-r--r--drivers/usb/serial/whiteheat.c32
-rw-r--r--drivers/usb/storage/karma.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h4
-rw-r--r--drivers/usb/storage/usb.c40
-rw-r--r--drivers/usb/typec/Kconfig22
-rw-r--r--drivers/usb/typec/Makefile2
-rw-r--r--drivers/usb/typec/typec.c1262
-rw-r--r--drivers/usb/typec/typec_wcove.c377
-rw-r--r--drivers/usb/usb-skeleton.c59
-rw-r--r--drivers/usb/usbip/vhci_hcd.c32
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c15
-rw-r--r--drivers/vfio/vfio_iommu_type1.c150
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/vhost/vsock.c9
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c7
-rw-r--r--drivers/vme/vme.c469
-rw-r--r--drivers/w1/masters/matrox_w1.c6
-rw-r--r--drivers/w1/slaves/Kconfig6
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2438.c390
-rw-r--r--drivers/w1/slaves/w1_ds2760.h10
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/w1/w1_int.c1
-rw-r--r--drivers/w1/w1_io.c1
-rw-r--r--drivers/w1/w1_log.h31
-rw-r--r--drivers/w1/w1_netlink.c5
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c94
-rw-r--r--drivers/xen/balloon.c30
-rw-r--r--drivers/xen/efi.c18
-rw-r--r--drivers/xen/events/events_base.c25
-rw-r--r--drivers/xen/evtchn.c14
-rw-r--r--drivers/xen/platform-pci.c14
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xenfs/super.c4
-rw-r--r--drivers/zorro/zorro-driver.c15
-rw-r--r--drivers/zorro/zorro-sysfs.c76
-rw-r--r--drivers/zorro/zorro.c3
-rw-r--r--drivers/zorro/zorro.h3
-rw-r--r--firmware/Makefile3
-rw-r--r--fs/affs/affs.h4
-rw-r--r--fs/affs/amigaffs.h (renamed from include/linux/amigaffs.h)0
-rw-r--r--fs/affs/dir.c2
-rw-r--r--fs/affs/file.c10
-rw-r--r--fs/affs/inode.c1
-rw-r--r--fs/affs/namei.c87
-rw-r--r--fs/befs/linuxvfs.c15
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c117
-rw-r--r--fs/btrfs/backref.c41
-rw-r--r--fs/btrfs/btrfs_inode.h7
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c29
-rw-r--r--fs/btrfs/ctree.h34
-rw-r--r--fs/btrfs/delayed-inode.c46
-rw-r--r--fs/btrfs/delayed-inode.h6
-rw-r--r--fs/btrfs/delayed-ref.c8
-rw-r--r--fs/btrfs/delayed-ref.h8
-rw-r--r--fs/btrfs/dev-replace.c9
-rw-r--r--fs/btrfs/disk-io.c13
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/extent-tree.c35
-rw-r--r--fs/btrfs/extent_io.c59
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/extent_map.c10
-rw-r--r--fs/btrfs/extent_map.h3
-rw-r--r--fs/btrfs/file.c82
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/free-space-tree.c3
-rw-r--r--fs/btrfs/inode.c289
-rw-r--r--fs/btrfs/ioctl.c42
-rw-r--r--fs/btrfs/ordered-data.c20
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/qgroup.c102
-rw-r--r--fs/btrfs/qgroup.h51
-rw-r--r--fs/btrfs/raid56.c38
-rw-r--r--fs/btrfs/reada.c37
-rw-r--r--fs/btrfs/root-tree.c3
-rw-r--r--fs/btrfs/scrub.c331
-rw-r--r--fs/btrfs/send.c50
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/tests/btrfs-tests.c1
-rw-r--r--fs/btrfs/transaction.c48
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c854
-rw-r--r--fs/btrfs/volumes.h8
-rw-r--r--fs/buffer.c32
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/caps.c25
-rw-r--r--fs/ceph/debugfs.c21
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c77
-rw-r--r--fs/ceph/inode.c17
-rw-r--r--fs/ceph/mds_client.c79
-rw-r--r--fs/ceph/mds_client.h15
-rw-r--r--fs/ceph/mdsmap.c44
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c7
-rw-r--r--fs/ceph/super.h31
-rw-r--r--fs/ceph/xattr.c3
-rw-r--r--fs/char_dev.c86
-rw-r--r--fs/cifs/cifs_unicode.c6
-rw-r--r--fs/cifs/cifs_unicode.h5
-rw-r--r--fs/cifs/cifsencrypt.c4
-rw-r--r--fs/cifs/cifsfs.c15
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifsproto.h3
-rw-r--r--fs/cifs/cifssmb.c13
-rw-r--r--fs/cifs/connect.c9
-rw-r--r--fs/cifs/file.c357
-rw-r--r--fs/cifs/inode.c28
-rw-r--r--fs/cifs/ioctl.c4
-rw-r--r--fs/cifs/misc.c122
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/smb2misc.c5
-rw-r--r--fs/cifs/smb2ops.c1
-rw-r--r--fs/cifs/smb2pdu.c14
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/crypto/fname.c90
-rw-r--r--fs/crypto/fscrypt_private.h13
-rw-r--r--fs/crypto/keyinfo.c3
-rw-r--r--fs/crypto/policy.c98
-rw-r--r--fs/dax.c337
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/exofs/dir.c3
-rw-r--r--fs/ext2/inode.c9
-rw-r--r--fs/ext4/Makefile10
-rw-r--r--fs/ext4/ext4.h13
-rw-r--r--fs/ext4/fsmap.c722
-rw-r--r--fs/ext4/fsmap.h69
-rw-r--r--fs/ext4/ialloc.c17
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c49
-rw-r--r--fs/ext4/ioctl.c92
-rw-r--r--fs/ext4/mballoc.c53
-rw-r--r--fs/ext4/mballoc.h17
-rw-r--r--fs/ext4/namei.c129
-rw-r--r--fs/ext4/page-io.c11
-rw-r--r--fs/ext4/super.c18
-rw-r--r--fs/ext4/sysfs.c8
-rw-r--r--fs/ext4/xattr.c63
-rw-r--r--fs/f2fs/checkpoint.c77
-rw-r--r--fs/f2fs/data.c200
-rw-r--r--fs/f2fs/debug.c62
-rw-r--r--fs/f2fs/dir.c71
-rw-r--r--fs/f2fs/extent_cache.c326
-rw-r--r--fs/f2fs/f2fs.h343
-rw-r--r--fs/f2fs/file.c177
-rw-r--r--fs/f2fs/gc.c93
-rw-r--r--fs/f2fs/hash.c7
-rw-r--r--fs/f2fs/inline.c38
-rw-r--r--fs/f2fs/inode.c23
-rw-r--r--fs/f2fs/namei.c60
-rw-r--r--fs/f2fs/node.c150
-rw-r--r--fs/f2fs/node.h31
-rw-r--r--fs/f2fs/recovery.c8
-rw-r--r--fs/f2fs/segment.c792
-rw-r--r--fs/f2fs/segment.h144
-rw-r--r--fs/f2fs/super.c48
-rw-r--r--fs/f2fs/trace.c4
-rw-r--r--fs/f2fs/xattr.c31
-rw-r--r--fs/f2fs/xattr.h8
-rw-r--r--fs/fcntl.c14
-rw-r--r--fs/file.c2
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dev.c24
-rw-r--r--fs/fuse/file.c30
-rw-r--r--fs/fuse/fuse_i.h11
-rw-r--r--fs/fuse/inode.c9
-rw-r--r--fs/gfs2/bmap.c741
-rw-r--r--fs/gfs2/file.c6
-rw-r--r--fs/gfs2/glock.c81
-rw-r--r--fs/gfs2/incore.h8
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/rgrp.c7
-rw-r--r--fs/gfs2/rgrp.h7
-rw-r--r--fs/gfs2/super.c11
-rw-r--r--fs/hfs/extent.c4
-rw-r--r--fs/hfsplus/extents.c5
-rw-r--r--fs/inode.c5
-rw-r--r--fs/internal.h2
-rw-r--r--fs/iomap.c19
-rw-r--r--fs/jbd2/journal.c30
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/super.c28
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nsfs.c4
-rw-r--r--fs/open.c9
-rw-r--r--fs/orangefs/devorangefs-req.c4
-rw-r--r--fs/orangefs/dir.c640
-rw-r--r--fs/orangefs/downcall.h16
-rw-r--r--fs/orangefs/file.c6
-rw-r--r--fs/orangefs/inode.c19
-rw-r--r--fs/orangefs/namei.c5
-rw-r--r--fs/orangefs/orangefs-debugfs.c3
-rw-r--r--fs/orangefs/orangefs-dev-proto.h7
-rw-r--r--fs/orangefs/orangefs-kernel.h9
-rw-r--r--fs/orangefs/orangefs-utils.c98
-rw-r--r--fs/orangefs/protocol.h9
-rw-r--r--fs/orangefs/super.c28
-rw-r--r--fs/orangefs/waitqueue.c9
-rw-r--r--fs/orangefs/xattr.c26
-rw-r--r--fs/overlayfs/copy_up.c82
-rw-r--r--fs/overlayfs/dir.c37
-rw-r--r--fs/overlayfs/inode.c103
-rw-r--r--fs/overlayfs/namei.c141
-rw-r--r--fs/overlayfs/overlayfs.h41
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/super.c40
-rw-r--r--fs/overlayfs/util.c19
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/generic.c1
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/namespaces.c1
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/reiserfs/item_ops.c24
-rw-r--r--fs/select.c5
-rw-r--r--fs/seq_file.c16
-rw-r--r--fs/signalfd.c2
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/dir.c23
-rw-r--r--fs/ubifs/file.c12
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/misc.h10
-rw-r--r--fs/ubifs/sb.c14
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/ufs/ialloc.c6
-rw-r--r--fs/xattr.c27
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/kmem.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c57
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h12
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c172
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c354
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h14
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c43
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h22
-rw-r--r--fs/xfs/libxfs/xfs_btree.c15
-rw-r--r--fs/xfs/libxfs/xfs_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c7
-rw-r--r--fs/xfs/libxfs/xfs_format.h11
-rw-r--r--fs/xfs/libxfs/xfs_fs.h13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c90
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c56
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h4
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c70
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h23
-rw-r--r--fs/xfs/xfs_aops.c12
-rw-r--r--fs/xfs/xfs_bmap_item.c6
-rw-r--r--fs/xfs/xfs_bmap_util.c20
-rw-r--r--fs/xfs/xfs_buf.c24
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_dir2_readdir.c15
-rw-r--r--fs/xfs/xfs_discard.c10
-rw-r--r--fs/xfs/xfs_extfree_item.c1
-rw-r--r--fs/xfs/xfs_fsmap.c940
-rw-r--r--fs/xfs/xfs_fsmap.h53
-rw-r--r--fs/xfs/xfs_icache.c58
-rw-r--r--fs/xfs/xfs_icache.h8
-rw-r--r--fs/xfs/xfs_inode.c9
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c29
-rw-r--r--fs/xfs/xfs_ioctl.c89
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c18
-rw-r--r--fs/xfs/xfs_linux.h85
-rw-r--r--fs/xfs/xfs_log.c4
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c4
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_qm.c11
-rw-r--r--fs/xfs/xfs_qm_syscalls.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c1
-rw-r--r--fs/xfs/xfs_reflink.c39
-rw-r--r--fs/xfs/xfs_rmap_item.c1
-rw-r--r--fs/xfs/xfs_rtalloc.h22
-rw-r--r--fs/xfs/xfs_super.c8
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--fs/xfs/xfs_trace.h144
-rw-r--r--fs/xfs/xfs_trans.c39
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_trans_ail.c71
-rw-r--r--fs/xfs/xfs_trans_priv.h15
-rw-r--r--include/acpi/acconfig.h1
-rw-r--r--include/acpi/acpi_bus.h11
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl2.h10
-rw-r--r--include/asm-generic/set_memory.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/drm/drm_mem_util.h9
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h11
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h9
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7792-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h5
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h2
-rw-r--r--include/dt-bindings/genpd/k2g.h90
-rw-r--r--include/dt-bindings/gpio/gpio.h12
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h112
-rw-r--r--include/dt-bindings/pinctrl/hisi.h15
-rw-r--r--include/dt-bindings/power/imx7-power.h16
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h2
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-a10sr.h33
-rw-r--r--include/dt-bindings/reset/imx7-reset.h62
-rw-r--r--include/kvm/arm_arch_timer.h2
-rw-r--r--include/kvm/arm_pmu.h7
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/acpi.h18
-rw-r--r--include/linux/acpi_iort.h6
-rw-r--r--include/linux/amba/pl080.h50
-rw-r--r--include/linux/amba/pl330.h35
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/blk-mq.h9
-rw-r--r--include/linux/blkdev.h21
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cdev.h5
-rw-r--r--include/linux/ceph/ceph_features.h4
-rw-r--r--include/linux/ceph/ceph_fs.h14
-rw-r--r--include/linux/ceph/cls_lock_client.h5
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/mdsmap.h7
-rw-r--r--include/linux/ceph/osd_client.h7
-rw-r--r--include/linux/ceph/pagelist.h6
-rw-r--r--include/linux/cma.h6
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpumask.h4
-rw-r--r--include/linux/crash_core.h69
-rw-r--r--include/linux/dax.h34
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dma-fence.h4
-rw-r--r--include/linux/dma-iommu.h6
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/f2fs_fs.h17
-rw-r--r--include/linux/fcntl.h6
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/firmware/meson/meson_sm.h4
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h29
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h19
-rw-r--r--include/linux/fscrypt_common.h11
-rw-r--r--include/linux/fscrypt_notsupp.h9
-rw-r--r--include/linux/fscrypt_supp.h74
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gpio/consumer.h12
-rw-r--r--include/linux/gpio/driver.h6
-rw-r--r--include/linux/gpio/gpio-reg.h13
-rw-r--r--include/linux/hid-sensor-hub.h2
-rw-r--r--include/linux/hid-sensor-ids.h8
-rw-r--r--include/linux/host1x.h1
-rw-r--r--include/linux/hyperv.h31
-rw-r--r--include/linux/intel-iommu.h18
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/iommu.h58
-rw-r--r--include/linux/iova.h91
-rw-r--r--include/linux/ipc.h7
-rw-r--r--include/linux/jiffies.h11
-rw-r--r--include/linux/kexec.h57
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kref.h6
-rw-r--r--include/linux/kvm_host.h80
-rw-r--r--include/linux/libnvdimm.h8
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mfd/stm32-timers.h2
-rw-r--r--include/linux/mfd/sun4i-gpadc.h6
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h30
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h4
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/driver.h26
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/netlink.h19
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_device.h17
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_irq.h2
-rw-r--r--include/linux/of_pci.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/pci-ecam.h3
-rw-r--r--include/linux/pci-ep-cfs.h41
-rw-r--r--include/linux/pci-epc.h144
-rw-r--r--include/linux/pci-epf.h162
-rw-r--r--include/linux/pci.h84
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pe.h177
-rw-r--r--include/linux/perf/arm_pmu.h29
-rw-r--r--include/linux/platform_data/iommu-omap.h20
-rw-r--r--include/linux/platform_data/itco_wdt.h4
-rw-r--r--include/linux/pm_domain.h1
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/pmem.h23
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/qcom_scm.h6
-rw-r--r--include/linux/qed/qed_if.h2
-rw-r--r--include/linux/rcu_node_tree.h99
-rw-r--r--include/linux/rcu_segcblist.h90
-rw-r--r--include/linux/rculist.h3
-rw-r--r--include/linux/rcupdate.h17
-rw-r--r--include/linux/rcutiny.h24
-rw-r--r--include/linux/rcutree.h5
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/mm.h12
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/sem.h3
-rw-r--r--include/linux/serdev.h21
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h16
-rw-r--r--include/linux/srcu.h84
-rw-r--r--include/linux/srcuclassic.h115
-rw-r--r--include/linux/srcutiny.h93
-rw-r--r--include/linux/srcutree.h150
-rw-r--r--include/linux/string.h18
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/tee_drv.h277
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/tty.h13
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uaccess.h1
-rw-r--r--include/linux/uio_driver.h13
-rw-r--r--include/linux/usb.h73
-rw-r--r--include/linux/usb/hcd.h7
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/serial.h42
-rw-r--r--include/linux/usb/typec.h243
-rw-r--r--include/linux/vmalloc.h20
-rw-r--r--include/linux/vme.h12
-rw-r--r--include/media/cec-edid.h104
-rw-r--r--include/media/cec-notifier.h111
-rw-r--r--include/media/cec.h119
-rw-r--r--include/media/davinci/vpif_types.h1
-rw-r--r--include/media/rc-map.h79
-rw-r--r--include/media/soc_camera.h14
-rw-r--r--include/media/tveeprom.h3
-rw-r--r--include/media/v4l2-ioctl.h17
-rw-r--r--include/media/videobuf2-core.h12
-rw-r--r--include/media/videobuf2-memops.h3
-rw-r--r--include/misc/charlcd.h42
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/cfg80211.h4
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/net/secure_seq.h10
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/rdma/rdmavt_qp.h1
-rw-r--r--include/scsi/libfc.h3
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_eh.h5
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/scsi/sg.h1
-rw-r--r--include/soc/tegra/flowctrl.h82
-rw-r--r--include/soc/tegra/pmc.h29
-rw-r--r--include/trace/events/btrfs.h187
-rw-r--r--include/trace/events/ext4.h74
-rw-r--r--include/trace/events/f2fs.h62
-rw-r--r--include/trace/events/fs_dax.h130
-rw-r--r--include/trace/events/iommu.h1
-rw-r--r--include/trace/events/v4l2.h1
-rw-r--r--include/uapi/drm/drm_fourcc.h45
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/aspeed-lpc-ctrl.h61
-rw-r--r--include/uapi/linux/btrfs.h10
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/eventpoll.h21
-rw-r--r--include/uapi/linux/fs.h15
-rw-r--r--include/uapi/linux/fsmap.h112
-rw-r--r--include/uapi/linux/ipmi.h2
-rw-r--r--include/uapi/linux/kvm.h25
-rw-r--r--include/uapi/linux/ndctl.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h13
-rw-r--r--include/uapi/linux/pci_regs.h2
-rw-r--r--include/uapi/linux/pcitest.h19
-rw-r--r--include/uapi/linux/perf_event.h16
-rw-r--r--include/uapi/linux/pps.h19
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/switchtec_ioctl.h132
-rw-r--r--include/uapi/linux/tee.h346
-rw-r--r--include/uapi/linux/usb/ch9.h3
-rw-r--r--include/uapi/linux/usb/functionfs.h2
-rw-r--r--include/uapi/linux/videodev2.h25
-rw-r--r--include/xen/arm/page-coherent.h26
-rw-r--r--include/xen/interface/io/9pfs.h36
-rw-r--r--include/xen/interface/io/displif.h854
-rw-r--r--include/xen/interface/io/kbdif.h458
-rw-r--r--include/xen/interface/io/ring.h143
-rw-r--r--include/xen/interface/io/sndif.h793
-rw-r--r--include/xen/xen-ops.h19
-rw-r--r--init/Kconfig43
-rw-r--r--init/do_mounts.h22
-rw-r--r--init/initramfs.c30
-rw-r--r--init/main.c2
-rw-r--r--ipc/shm.c16
-rw-r--r--ipc/util.c7
-rw-r--r--ipc/util.h2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/inode.c2
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c21
-rw-r--r--kernel/crash_core.c439
-rw-r--r--kernel/fork.c29
-rw-r--r--kernel/groups.c2
-rw-r--r--kernel/hung_task.c8
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/kcov.c9
-rw-r--r--kernel/kexec_core.c431
-rw-r--r--kernel/kprobes.c32
-rw-r--r--kernel/ksysfs.c8
-rw-r--r--kernel/locking/lockdep.c86
-rw-r--r--kernel/locking/rtmutex-debug.c9
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/pid_namespace.c34
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c3
-rw-r--r--kernel/power/suspend.c29
-rw-r--r--kernel/printk/braille.c15
-rw-r--r--kernel/printk/braille.h13
-rw-r--r--kernel/printk/printk.c30
-rw-r--r--kernel/rcu/Makefile5
-rw-r--r--kernel/rcu/rcu.h153
-rw-r--r--kernel/rcu/rcu_segcblist.c505
-rw-r--r--kernel/rcu/rcu_segcblist.h164
-rw-r--r--kernel/rcu/rcutorture.c43
-rw-r--r--kernel/rcu/srcu.c12
-rw-r--r--kernel/rcu/srcutiny.c216
-rw-r--r--kernel/rcu/srcutree.c1155
-rw-r--r--kernel/rcu/tiny.c20
-rw-r--r--kernel/rcu/tiny_plugin.h13
-rw-r--r--kernel/rcu/tree.c710
-rw-r--r--kernel/rcu/tree.h163
-rw-r--r--kernel/rcu/tree_exp.h25
-rw-r--r--kernel/rcu/tree_plugin.h64
-rw-r--r--kernel/rcu/tree_trace.c26
-rw-r--r--kernel/rcu/update.c53
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/taskstats.c14
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/trace/trace_entries.h6
-rw-r--r--kernel/trace/trace_hwlat.c14
-rw-r--r--kernel/trace/trace_output.c9
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile1
-rw-r--r--lib/devres.c6
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/list_sort.c149
-rw-r--r--lib/refcount.c22
-rw-r--r--lib/rhashtable.c13
-rw-r--r--lib/string.c26
-rw-r--r--lib/test_bpf.c10
-rw-r--r--lib/test_list_sort.c150
-rw-r--r--lib/test_sort.c11
-rw-r--r--lib/vsprintf.c3
-rw-r--r--lib/zlib_inflate/inftrees.c2
-rw-r--r--mm/cma.c31
-rw-r--r--mm/cma.h1
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/compaction.c83
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/frame_vector.c5
-rw-r--r--mm/huge_memory.c28
-rw-r--r--mm/internal.h12
-rw-r--r--mm/kasan/kasan.c8
-rw-r--r--mm/kmemcheck.c2
-rw-r--r--mm/memblock.c56
-rw-r--r--mm/mmu_notifier.c14
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/page-writeback.c14
-rw-r--r--mm/page_alloc.c162
-rw-r--r--mm/page_isolation.c5
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slab.h4
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swap_slots.c19
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c10
-rw-r--r--mm/util.c57
-rw-r--r--mm/vmalloc.c33
-rw-r--r--mm/vmscan.c17
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/9p/Kconfig9
-rw-r--r--net/9p/Makefile4
-rw-r--r--net/9p/trans_xen.c545
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/netfilter/ebt_dnat.c20
-rw-r--r--net/ceph/ceph_common.c29
-rw-r--r--net/ceph/cls_lock_client.c51
-rw-r--r--net/ceph/debugfs.c7
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osd_client.c143
-rw-r--r--net/ceph/pagelist.c2
-rw-r--r--net/ceph/snapshot.c6
-rw-r--r--net/core/dev.c31
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/secure_seq.c31
-rw-r--r--net/core/sock.c7
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/decnet/af_decnet.c3
-rw-r--r--net/decnet/dn_neigh.c12
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/ip_vti.c13
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/syncookies.c12
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c34
-rw-r--r--net/ipv4/tcp_metrics.c5
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/ila/ila_xlat.c8
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c39
-rw-r--r--net/ipv6/syncookies.c10
-rw-r--r--net/ipv6/tcp_ipv6.c34
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c4
-rw-r--r--net/llc/llc_sap.c2
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mpls/af_mpls.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c22
-rw-r--r--net/netfilter/nf_conntrack_core.c10
-rw-r--r--net/netfilter/nf_conntrack_helper.c26
-rw-r--r--net/netfilter/nf_conntrack_netlink.c89
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/netfilter/nft_set_bitmap.c5
-rw-r--r--net/netfilter/x_tables.c28
-rw-r--r--net/netfilter/xt_CT.c11
-rw-r--r--net/netfilter/xt_recent.c5
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/openvswitch/conntrack.c30
-rw-r--r--net/sched/cls_matchall.c3
-rw-r--r--net/sched/sch_choke.c5
-rw-r--r--net/sched/sch_fq.c12
-rw-r--r--net/sched/sch_fq_codel.c26
-rw-r--r--net/sched/sch_hhf.c33
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_sfq.c6
-rw-r--r--net/smc/af_smc.c2
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--samples/bpf/bpf_load.c229
-rw-r--r--samples/bpf/bpf_load.h18
-rw-r--r--samples/bpf/map_perf_test_user.c14
-rw-r--r--samples/bpf/tracex2_user.c7
-rw-r--r--samples/bpf/tracex3_user.c7
-rw-r--r--samples/bpf/tracex4_user.c8
-rw-r--r--samples/mei/TODO2
-rw-r--r--scripts/Makefile.lib14
-rwxr-xr-xscripts/checkpatch.pl165
-rw-r--r--scripts/dtc/checks.c361
-rw-r--r--scripts/dtc/data.c16
-rw-r--r--scripts/dtc/dtc-lexer.l3
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped77
-rw-r--r--scripts/dtc/dtc-parser.tab.c_shipped6
-rw-r--r--scripts/dtc/dtc-parser.y6
-rw-r--r--scripts/dtc/dtc.c9
-rw-r--r--scripts/dtc/dtc.h11
-rw-r--r--scripts/dtc/flattree.c58
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c3
-rw-r--r--scripts/dtc/libfdt/libfdt.h51
-rw-r--r--scripts/dtc/libfdt/libfdt_env.h26
-rw-r--r--scripts/dtc/livetree.c22
-rw-r--r--scripts/dtc/srcpos.c2
-rw-r--r--scripts/dtc/srcpos.h11
-rw-r--r--scripts/dtc/treesource.c6
-rwxr-xr-xscripts/dtc/update-dtc-source.sh20
-rw-r--r--scripts/dtc/util.c11
-rw-r--r--scripts/dtc/util.h24
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/module-common.lds3
-rw-r--r--scripts/spelling.txt8
-rwxr-xr-xscripts/ver_linux2
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/apparmor/include/lib.h11
-rw-r--r--security/apparmor/lib.c30
-rw-r--r--security/apparmor/match.c2
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/inode.c2
-rw-r--r--security/keys/keyctl.c22
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/smack/smackfs.c2
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/intel8x0.c4
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/x86/intel_hdmi_audio.c2
-rw-r--r--tools/Makefile5
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--tools/include/uapi/linux/perf_event.h16
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat381
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt26
-rw-r--r--tools/pci/pcitest.c186
-rw-r--r--tools/pci/pcitest.sh56
-rw-r--r--tools/testing/nvdimm/Kbuild11
-rw-r--r--tools/testing/nvdimm/dax-dev.c49
-rw-r--r--tools/testing/nvdimm/pmem-dax.c21
-rw-r--r--tools/testing/nvdimm/test/nfit.c54
-rw-r--r--tools/testing/selftests/.gitignore4
-rw-r--r--tools/testing/selftests/Makefile4
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/gnu/stubs.h1
-rw-r--r--tools/testing/selftests/bpf/test_progs.c16
-rw-r--r--tools/testing/selftests/bpf/test_tcp_estats.c258
-rw-r--r--tools/testing/selftests/breakpoints/Makefile2
-rw-r--r--tools/testing/selftests/cpufreq/config15
-rw-r--r--tools/testing/selftests/ftrace/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc2
-rw-r--r--tools/testing/selftests/futex/Makefile9
-rw-r--r--tools/testing/selftests/gpio/Makefile11
-rw-r--r--tools/testing/selftests/gpio/config2
-rw-r--r--tools/testing/selftests/lib.mk6
-rw-r--r--tools/testing/selftests/lib/config3
-rw-r--r--tools/testing/selftests/powerpc/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile10
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/cache_shape.c125
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h6
-rw-r--r--tools/testing/selftests/powerpc/utils.c53
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh2
-rw-r--r--tools/testing/selftests/splice/Makefile3
-rw-r--r--tools/testing/selftests/sync/Makefile3
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c2
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rw-r--r--tools/testing/selftests/vm/config1
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c2
-rw-r--r--tools/testing/selftests/vm/mlock2-tests.c12
-rw-r--r--tools/testing/selftests/vm/on-fault-limit.c2
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests37
-rw-r--r--tools/testing/selftests/vm/thuge-gen.c2
-rw-r--r--tools/testing/selftests/vm/virtual_address_range.c122
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c61
-rw-r--r--tools/testing/selftests/x86/.gitignore13
-rw-r--r--tools/testing/selftests/x86/Makefile3
-rw-r--r--tools/usb/.gitignore2
-rw-r--r--tools/usb/usbip/README2
-rw-r--r--tools/usb/usbip/libsrc/usbip_common.c9
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c28
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c28
-rw-r--r--tools/usb/usbip/src/usbip.c2
-rw-r--r--usr/Kconfig10
-rw-r--r--virt/kvm/arm/arch_timer.c124
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c78
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c87
-rw-r--r--virt/kvm/arm/pmu.c39
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c108
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c90
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c87
-rw-r--r--virt/kvm/arm/vgic/vgic.c64
-rw-r--r--virt/kvm/arm/vgic/vgic.h7
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/irqchip.c16
-rw-r--r--virt/kvm/kvm_main.c122
-rw-r--r--virt/kvm/vfio.c105
5237 files changed, 522651 insertions, 82980 deletions
diff --git a/.mailmap b/.mailmap
index 1d6f4e7280dc..d2aeb146efed 100644
--- a/.mailmap
+++ b/.mailmap
@@ -111,6 +111,7 @@ Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
+Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 793acf999e9e..ed3e5e949fce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -412,6 +412,8 @@ sysctl/
- directory with info on the /proc/sys/* files.
target/
- directory with info on generating TCM v4 fabric .ko modules
+tee.txt
+ - info on the TEE subsystem and drivers
this_cpu_ops.txt
- List rationale behind and the way to use this_cpu operations.
thermal/
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 530809ccfacf..8c24d0892f61 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -55,6 +55,7 @@ Description:
then it is to be found in the base device directory.
What: /sys/bus/iio/devices/iio:deviceX/sampling_frequency_available
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_sampling_frequency_available
What: /sys/.../iio:deviceX/buffer/sampling_frequency_available
What: /sys/bus/iio/devices/triggerX/sampling_frequency_available
KernelVersion: 2.6.35
@@ -1593,7 +1594,7 @@ Description:
can be processed to siemens per meter.
What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Raw counter device counts from channel Y. For quadrature
@@ -1601,10 +1602,24 @@ Description:
the counts of a single quadrature signal phase from channel Y.
What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Raw counter device index value from channel Y. This attribute
provides an absolute positional reference (e.g. a pulse once per
revolution) which may be used to home positional systems as
required.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
+KernelVersion: 4.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ A list of possible counting directions which are:
+ - "up" : counter device is increasing.
+ - "down": counter device is decreasing.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
+KernelVersion: 4.12
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw counter device counters direction for channel Y.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611 b/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
new file mode 100644
index 000000000000..6d2d2b094941
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-max9611
@@ -0,0 +1,17 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_power_shunt_resistor
+Date: March 2017
+KernelVersion: 4.12
+Contact: linux-iio@vger.kernel.org
+Description: The value of the shunt resistor used to compute power drain on
+ common input voltage pin (RS+). In Ohms.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_current_shunt_resistor
+Date: March 2017
+KernelVersion: 4.12
+Contact: linux-iio@vger.kernel.org
+Description: The value of the shunt resistor used to compute current flowing
+ between RS+ and RS- voltage sense inputs. In Ohms.
+
+These attributes describe a single physical component, exposed as two distinct
+attributes as it is used to calculate two different values: power load and
+current flowing between RS+ and RS- inputs.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
index ba676520b953..7fac2c268d9a 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -1,24 +1,16 @@
-What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
What: /sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available
What: /sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available
What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
What: /sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available
What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Discrete set of available values for the respective counter
configuration are listed in this file.
-What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
-KernelVersion: 4.9
-Contact: linux-iio@vger.kernel.org
-Description:
- Read-only attribute that indicates whether the counter for
- channel Y is counting up or down.
-
What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Count mode for channel Y. Four count modes are available:
@@ -52,7 +44,7 @@ Description:
continuously throughout.
What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Read-only attribute that indicates whether excessive noise is
@@ -60,14 +52,14 @@ Description:
irrelevant in non-quadrature clock mode.
What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
If the counter device supports preset registers, the preset
count for channel Y is provided by this attribute.
What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Configure channel Y counter for non-quadrature or quadrature
@@ -88,7 +80,7 @@ Description:
decoded for UP/DN clock.
What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Whether to set channel Y counter with channel Y preset value
@@ -96,14 +88,14 @@ Description:
Valid attribute values are boolean.
What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Active level of channel Y index input; irrelevant in
non-synchronous load mode.
What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
-KernelVersion: 4.9
+KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
Configure channel Y counter for non-synchronous or synchronous
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
index 6534a60037ff..230020e06677 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
+++ b/Documentation/ABI/testing/sysfs-bus-iio-timer-stm32
@@ -3,11 +3,15 @@ KernelVersion: 4.11
Contact: benjamin.gaignard@st.com
Description:
Reading returns the list possible master modes which are:
- - "reset" : The UG bit from the TIMx_EGR register is used as trigger output (TRGO).
- - "enable" : The Counter Enable signal CNT_EN is used as trigger output.
+ - "reset" : The UG bit from the TIMx_EGR register is
+ used as trigger output (TRGO).
+ - "enable" : The Counter Enable signal CNT_EN is used
+ as trigger output.
- "update" : The update event is selected as trigger output.
- For instance a master timer can then be used as a prescaler for a slave timer.
- - "compare_pulse" : The trigger output send a positive pulse when the CC1IF flag is to be set.
+ For instance a master timer can then be used
+ as a prescaler for a slave timer.
+ - "compare_pulse" : The trigger output send a positive pulse
+ when the CC1IF flag is to be set.
- "OC1REF" : OC1REF signal is used as trigger output.
- "OC2REF" : OC2REF signal is used as trigger output.
- "OC3REF" : OC3REF signal is used as trigger output.
@@ -27,3 +31,62 @@ Description:
Reading returns the current sampling frequency.
Writing an value different of 0 set and start sampling.
Writing 0 stop sampling.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset
+KernelVersion: 4.12
+Contact: benjamin.gaignard@st.com
+Description:
+ Reading returns the current preset value.
+ Writing sets the preset value.
+ When counting up the counter starts from 0 and fires an
+ event when reach preset value.
+ When counting down the counter start from preset value
+ and fire event when reach 0.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
+KernelVersion: 4.12
+Contact: benjamin.gaignard@st.com
+Description:
+ Reading returns the list possible quadrature modes.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode
+KernelVersion: 4.12
+Contact: benjamin.gaignard@st.com
+Description:
+ Configure the device counter quadrature modes:
+ channel_A:
+ Encoder A input servers as the count input and B as
+ the UP/DOWN direction control input.
+
+ channel_B:
+ Encoder B input serves as the count input and A as
+ the UP/DOWN direction control input.
+
+ quadrature:
+ Encoder A and B inputs are mixed to get direction
+ and count with a scale of 0.25.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count_enable_mode_available
+KernelVersion: 4.12
+Contact: benjamin.gaignard@st.com
+Description:
+ Reading returns the list possible enable modes.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_enable_mode
+KernelVersion: 4.12
+Contact: benjamin.gaignard@st.com
+Description:
+ Configure the device counter enable modes, in all case
+ counting direction is set by in_count0_count_direction
+ attribute and the counter is clocked by the internal clock.
+ always:
+ Counter is always ON.
+
+ gated:
+ Counting is enabled when connected trigger signal
+ level is high else counting is disabled.
+
+ triggered:
+ Counting is enabled on rising edge of the connected
+ trigger, and remains enabled for the duration of this
+ selected mode.
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index e4e90104d7c3..44d4b2be92fd 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -301,3 +301,25 @@ Contact: Emil Velikov <emil.l.velikov@gmail.com>
Description:
This file contains the revision field of the PCI device.
The value comes from device config space. The file is read only.
+
+What: /sys/bus/pci/devices/.../sriov_drivers_autoprobe
+Date: April 2017
+Contact: Bodong Wang<bodong@mellanox.com>
+Description:
+ This file is associated with the PF of a device that
+ supports SR-IOV. It determines whether newly-enabled VFs
+ are immediately bound to a driver. It initially contains
+ 1, which means the kernel automatically binds VFs to a
+ compatible driver immediately after they are enabled. If
+ an application writes 0 to the file before enabling VFs,
+ the kernel will not bind VFs to a driver.
+
+ A typical use case is to write 0 to this file, then enable
+ VFs, then assign the newly-created VFs to virtual machines.
+ Note that changing this file does not affect already-
+ enabled VFs. In this scenario, the user must first disable
+ the VFs, write 0 to sriov_drivers_autoprobe, then re-enable
+ the VFs.
+
+ This is similar to /sys/bus/pci/drivers_autoprobe, but
+ affects only the VFs associated with a specific PF.
diff --git a/Documentation/ABI/testing/sysfs-class-switchtec b/Documentation/ABI/testing/sysfs-class-switchtec
new file mode 100644
index 000000000000..48cb4c15e430
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-switchtec
@@ -0,0 +1,96 @@
+switchtec - Microsemi Switchtec PCI Switch Management Endpoint
+
+For details on this subsystem look at Documentation/switchtec.txt.
+
+What: /sys/class/switchtec
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: The switchtec class subsystem folder.
+ Each registered switchtec driver is represented by a switchtecX
+ subfolder (X being an integer >= 0).
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_id
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component identifier as stored in the hardware (eg. PM8543)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_revision
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component revision stored in the hardware (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/component_vendor
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Component vendor as stored in the hardware (eg. MICROSEM)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/device_version
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Device version as stored in the hardware (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/fw_version
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Currently running firmware version (read only)
+Values: integer (in hexadecimal).
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/partition
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Partition number for this device in the switch (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/partition_count
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Total number of partitions in the switch (read only)
+Values: integer.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_id
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product identifier as stored in the hardware (eg. PSX 48XG3)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_revision
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product revision stored in the hardware (eg. RevB)
+ (read only)
+Values: arbitrary string.
+
+
+What: /sys/class/switchtec/switchtec[0-9]+/product_vendor
+Date: 05-Jan-2017
+KernelVersion: v4.11
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description: Product vendor as stored in the hardware (eg. MICROSEM)
+ (read only)
+Values: arbitrary string.
diff --git a/Documentation/ABI/testing/sysfs-class-typec b/Documentation/ABI/testing/sysfs-class-typec
new file mode 100644
index 000000000000..d4a3d23eb09c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-typec
@@ -0,0 +1,276 @@
+USB Type-C port devices (eg. /sys/class/typec/port0/)
+
+What: /sys/class/typec/<port>/data_role
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The supported USB data roles. This attribute can be used for
+ requesting data role swapping on the port. Swapping is supported
+ as synchronous operation, so write(2) to the attribute will not
+ return until the operation has finished. The attribute is
+ notified about role changes so that poll(2) on the attribute
+ wakes up. Change on the role will also generate uevent
+ KOBJ_CHANGE on the port. The current role is show in brackets,
+ for example "[host] device" when DRP port is in host mode.
+
+ Valid values: host, device
+
+What: /sys/class/typec/<port>/power_role
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The supported power roles. This attribute can be used to request
+ power role swap on the port when the port supports USB Power
+ Delivery. Swapping is supported as synchronous operation, so
+ write(2) to the attribute will not return until the operation
+ has finished. The attribute is notified about role changes so
+ that poll(2) on the attribute wakes up. Change on the role will
+ also generate uevent KOBJ_CHANGE. The current role is show in
+ brackets, for example "[source] sink" when in source mode.
+
+ Valid values: source, sink
+
+What: /sys/class/typec/<port>/vconn_source
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows is the port VCONN Source. This attribute can be used to
+ request VCONN swap to change the VCONN Source during connection
+ when both the port and the partner support USB Power Delivery.
+ Swapping is supported as synchronous operation, so write(2) to
+ the attribute will not return until the operation has finished.
+ The attribute is notified about VCONN source changes so that
+ poll(2) on the attribute wakes up. Change on VCONN source also
+ generates uevent KOBJ_CHANGE.
+
+ Valid values:
+ - "no" when the port is not the VCONN Source
+ - "yes" when the port is the VCONN Source
+
+What: /sys/class/typec/<port>/power_operation_mode
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the current power operational mode the port is in. The
+ power operation mode means current level for VBUS. In case USB
+ Power Delivery communication is used for negotiating the levels,
+ power operation mode should show "usb_power_delivery".
+
+ Valid values:
+ - default
+ - 1.5A
+ - 3.0A
+ - usb_power_delivery
+
+What: /sys/class/typec/<port>/preferred_role
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The user space can notify the driver about the preferred role.
+ It should be handled as enabling of Try.SRC or Try.SNK, as
+ defined in USB Type-C specification, in the port drivers. By
+ default the preferred role should come from the platform.
+
+ Valid values: source, sink, none (to remove preference)
+
+What: /sys/class/typec/<port>/supported_accessory_modes
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Space separated list of accessory modes, defined in the USB
+ Type-C specification, the port supports.
+
+What: /sys/class/typec/<port>/usb_power_delivery_revision
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Revision number of the supported USB Power Delivery
+ specification, or 0 when USB Power Delivery is not supported.
+
+What: /sys/class/typec/<port>/usb_typec_revision
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Revision number of the supported USB Type-C specification.
+
+
+USB Type-C partner devices (eg. /sys/class/typec/port0-partner/)
+
+What: /sys/class/typec/<port>-partner/accessory_mode
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the Accessory Mode name when the partner is an Accessory.
+ The Accessory Modes are defined in USB Type-C Specification.
+
+What: /sys/class/typec/<port>-partner/supports_usb_power_delivery
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows if the partner supports USB Power Delivery communication:
+ Valid values: yes, no
+
+What: /sys/class/typec/<port>-partner>/identity/
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ This directory appears only if the port device driver is capable
+ of showing the result of Discover Identity USB power delivery
+ command. That will not always be possible even when USB power
+ delivery is supported, for example when USB power delivery
+ communication for the port is mostly handled in firmware. If the
+ directory exists, it will have an attribute file for every VDO
+ in Discover Identity command result.
+
+What: /sys/class/typec/<port>-partner/identity/id_header
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ ID Header VDO part of Discover Identity command result. The
+ value will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+What: /sys/class/typec/<port>-partner/identity/cert_stat
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Cert Stat VDO part of Discover Identity command result. The
+ value will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+What: /sys/class/typec/<port>-partner/identity/product
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Product VDO part of Discover Identity command result. The value
+ will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+
+USB Type-C cable devices (eg. /sys/class/typec/port0-cable/)
+
+Note: Electronically Marked Cables will have a device also for one cable plug
+(eg. /sys/class/typec/port0-plug0). If the cable is active and has also SOP
+Double Prime controller (USB Power Deliver specification ch. 2.4) it will have
+second device also for the other plug. Both plugs may have alternate modes as
+described in USB Type-C and USB Power Delivery specifications.
+
+What: /sys/class/typec/<port>-cable/type
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows if the cable is active.
+ Valid values: active, passive
+
+What: /sys/class/typec/<port>-cable/plug_type
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows type of the plug on the cable:
+ - type-a - Standard A
+ - type-b - Standard B
+ - type-c
+ - captive
+
+What: /sys/class/typec/<port>-cable/identity/
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ This directory appears only if the port device driver is capable
+ of showing the result of Discover Identity USB power delivery
+ command. That will not always be possible even when USB power
+ delivery is supported. If the directory exists, it will have an
+ attribute for every VDO returned by Discover Identity command.
+
+What: /sys/class/typec/<port>-cable/identity/id_header
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ ID Header VDO part of Discover Identity command result. The
+ value will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+What: /sys/class/typec/<port>-cable/identity/cert_stat
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Cert Stat VDO part of Discover Identity command result. The
+ value will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+What: /sys/class/typec/<port>-cable/identity/product
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Product VDO part of Discover Identity command result. The value
+ will show 0 until Discover Identity command result becomes
+ available. The value can be polled.
+
+
+Alternate Mode devices.
+
+The alternate modes will have Standard or Vendor ID (SVID) assigned by USB-IF.
+The ports, partners and cable plugs can have alternate modes. A supported SVID
+will consist of a set of modes. Every SVID a port/partner/plug supports will
+have a device created for it, and every supported mode for a supported SVID will
+have its own directory under that device. Below <dev> refers to the device for
+the alternate mode.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/svid
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ The SVID (Standard or Vendor ID) assigned by USB-IF for this
+ alternate mode.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Every supported mode will have its own directory. The name of
+ a mode will be "mode<index>" (for example mode1), where <index>
+ is the actual index to the mode VDO returned by Discover Modes
+ USB power delivery command.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/description
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows description of the mode. The description is optional for
+ the drivers, just like with the Billboard Devices.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/vdo
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows the VDO in hexadecimal returned by Discover Modes command
+ for this mode.
+
+What: /sys/class/typec/<port|partner|cable>/<dev>/mode<index>/active
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Shows if the mode is active or not. The attribute can be used
+ for entering/exiting the mode with partners and cable plugs, and
+ with the port alternate modes it can be used for disabling
+ support for specific alternate modes. Entering/exiting modes is
+ supported as synchronous operation so write(2) to the attribute
+ does not return until the enter/exit mode operation has
+ finished. The attribute is notified when the mode is
+ entered/exited so poll(2) on the attribute wakes up.
+ Entering/exiting a mode will also generate uevent KOBJ_CHANGE.
+
+ Valid values: yes, no
+
+What: /sys/class/typec/<port>/<dev>/mode<index>/supported_roles
+Date: April 2017
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Space separated list of the supported roles.
+
+ This attribute is available for the devices describing the
+ alternate modes a port supports, and it will not be exposed with
+ the devices presenting the alternate modes the partners or cable
+ plugs support.
+
+ Valid values: source, sink
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 2a4a423d08e0..f3d5817c4ef0 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -366,3 +366,10 @@ Contact: Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
Description: AArch64 CPU registers
'identification' directory exposes the CPU ID registers for
identifying model and revision of the CPU.
+
+What: /sys/devices/system/cpu/cpu#/cpu_capacity
+Date: December 2016
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: information about CPUs heterogeneity.
+
+ cpu_capacity: capacity of cpu#.
diff --git a/Documentation/ABI/testing/sysfs-platform-chipidea-usb2 b/Documentation/ABI/testing/sysfs-platform-chipidea-usb2
new file mode 100644
index 000000000000..b0f4684a83fe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-chipidea-usb2
@@ -0,0 +1,9 @@
+What: /sys/bus/platform/devices/ci_hdrc.0/role
+Date: Mar 2017
+Contact: Peter Chen <peter.chen@nxp.com>
+Description:
+ It returns string "gadget" or "host" when read it, it indicates
+ current controller role.
+
+ It will do role switch when write "gadget" or "host" to it.
+ Only controller at dual-role configuration supports writing.
diff --git a/Documentation/ABI/testing/sysfs-platform-renesas_usb3 b/Documentation/ABI/testing/sysfs-platform-renesas_usb3
new file mode 100644
index 000000000000..5621c15d5dc0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-renesas_usb3
@@ -0,0 +1,15 @@
+What: /sys/devices/platform/<renesas_usb3's name>/role
+Date: March 2017
+KernelVersion: 4.13
+Contact: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Description:
+ This file can be read and write.
+ The file can show/change the drd mode of usb.
+
+ Write the following string to change the mode:
+ "host" - switching mode from peripheral to host.
+ "peripheral" - switching mode from host to peripheral.
+
+ Read the file, then it shows the following strings:
+ "host" - The mode is host now.
+ "peripheral" - The mode is peripheral now.
diff --git a/Documentation/DocBook/rapidio.tmpl b/Documentation/DocBook/rapidio.tmpl
index 50479360d845..ac3cca3399a1 100644
--- a/Documentation/DocBook/rapidio.tmpl
+++ b/Documentation/DocBook/rapidio.tmpl
@@ -129,9 +129,6 @@
<sect1 id="Device_model_support"><title>Device model support</title>
!Idrivers/rapidio/rio-driver.c
</sect1>
- <sect1 id="Sysfs_support"><title>Sysfs support</title>
-!Idrivers/rapidio/rio-sysfs.c
- </sect1>
<sect1 id="PPC32_support"><title>PPC32 support</title>
!Iarch/powerpc/sysdev/fsl_rio.c
</sect1>
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX
index 147231f1613e..00c9a90b6f38 100644
--- a/Documentation/PCI/00-INDEX
+++ b/Documentation/PCI/00-INDEX
@@ -12,3 +12,13 @@ pci.txt
- info on the PCI subsystem for device driver authors
pcieaer-howto.txt
- the PCI Express Advanced Error Reporting Driver Guide HOWTO
+endpoint/pci-endpoint.txt
+ - guide to add endpoint controller driver and endpoint function driver.
+endpoint/pci-endpoint-cfs.txt
+ - guide to use configfs to configure the PCI endpoint function.
+endpoint/pci-test-function.txt
+ - specification of *PCI test* function device.
+endpoint/pci-test-howto.txt
+ - userguide for PCI endpoint test function.
+endpoint/function/binding/
+ - binding documentation for PCI endpoint function
diff --git a/Documentation/PCI/endpoint/function/binding/pci-test.txt b/Documentation/PCI/endpoint/function/binding/pci-test.txt
new file mode 100644
index 000000000000..3b68b955fb50
--- /dev/null
+++ b/Documentation/PCI/endpoint/function/binding/pci-test.txt
@@ -0,0 +1,17 @@
+PCI TEST ENDPOINT FUNCTION
+
+name: Should be "pci_epf_test" to bind to the pci_epf_test driver.
+
+Configurable Fields:
+vendorid : should be 0x104c
+deviceid : should be 0xb500 for DRA74x and 0xb501 for DRA72x
+revid : don't care
+progif_code : don't care
+subclass_code : don't care
+baseclass_code : should be 0xff
+cache_line_size : don't care
+subsys_vendor_id : don't care
+subsys_id : don't care
+interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
+msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts
+ to test
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.txt b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt
new file mode 100644
index 000000000000..d740f29960a4
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt
@@ -0,0 +1,105 @@
+ CONFIGURING PCI ENDPOINT USING CONFIGFS
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+The PCI Endpoint Core exposes configfs entry (pci_ep) to configure the
+PCI endpoint function and to bind the endpoint function
+with the endpoint controller. (For introducing other mechanisms to
+configure the PCI Endpoint Function refer to [1]).
+
+*) Mounting configfs
+
+The PCI Endpoint Core layer creates pci_ep directory in the mounted configfs
+directory. configfs can be mounted using the following command.
+
+ mount -t configfs none /sys/kernel/config
+
+*) Directory Structure
+
+The pci_ep configfs has two directories at its root: controllers and
+functions. Every EPC device present in the system will have an entry in
+the *controllers* directory and and every EPF driver present in the system
+will have an entry in the *functions* directory.
+
+/sys/kernel/config/pci_ep/
+ .. controllers/
+ .. functions/
+
+*) Creating EPF Device
+
+Every registered EPF driver will be listed in controllers directory. The
+entries corresponding to EPF driver will be created by the EPF core.
+
+/sys/kernel/config/pci_ep/functions/
+ .. <EPF Driver1>/
+ ... <EPF Device 11>/
+ ... <EPF Device 21>/
+ .. <EPF Driver2>/
+ ... <EPF Device 12>/
+ ... <EPF Device 22>/
+
+In order to create a <EPF device> of the type probed by <EPF Driver>, the
+user has to create a directory inside <EPF DriverN>.
+
+Every <EPF device> directory consists of the following entries that can be
+used to configure the standard configuration header of the endpoint function.
+(These entries are created by the framework when any new <EPF Device> is
+created)
+
+ .. <EPF Driver1>/
+ ... <EPF Device 11>/
+ ... vendorid
+ ... deviceid
+ ... revid
+ ... progif_code
+ ... subclass_code
+ ... baseclass_code
+ ... cache_line_size
+ ... subsys_vendor_id
+ ... subsys_id
+ ... interrupt_pin
+
+*) EPC Device
+
+Every registered EPC device will be listed in controllers directory. The
+entries corresponding to EPC device will be created by the EPC core.
+
+/sys/kernel/config/pci_ep/controllers/
+ .. <EPC Device1>/
+ ... <Symlink EPF Device11>/
+ ... <Symlink EPF Device12>/
+ ... start
+ .. <EPC Device2>/
+ ... <Symlink EPF Device21>/
+ ... <Symlink EPF Device22>/
+ ... start
+
+The <EPC Device> directory will have a list of symbolic links to
+<EPF Device>. These symbolic links should be created by the user to
+represent the functions present in the endpoint device.
+
+The <EPC Device> directory will also have a *start* field. Once
+"1" is written to this field, the endpoint device will be ready to
+establish the link with the host. This is usually done after
+all the EPF devices are created and linked with the EPC device.
+
+
+ | controllers/
+ | <Directory: EPC name>/
+ | <Symbolic Link: Function>
+ | start
+ | functions/
+ | <Directory: EPF driver>/
+ | <Directory: EPF device>/
+ | vendorid
+ | deviceid
+ | revid
+ | progif_code
+ | subclass_code
+ | baseclass_code
+ | cache_line_size
+ | subsys_vendor_id
+ | subsys_id
+ | interrupt_pin
+ | function
+
+[1] -> Documentation/PCI/endpoint/pci-endpoint.txt
diff --git a/Documentation/PCI/endpoint/pci-endpoint.txt b/Documentation/PCI/endpoint/pci-endpoint.txt
new file mode 100644
index 000000000000..9b1d66829290
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-endpoint.txt
@@ -0,0 +1,215 @@
+ PCI ENDPOINT FRAMEWORK
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to use the PCI Endpoint Framework in order to create
+endpoint controller driver, endpoint function driver, and using configfs
+interface to bind the function driver to the controller driver.
+
+1. Introduction
+
+Linux has a comprehensive PCI subsystem to support PCI controllers that
+operates in Root Complex mode. The subsystem has capability to scan PCI bus,
+assign memory resources and IRQ resources, load PCI driver (based on
+vendor ID, device ID), support other services like hot-plug, power management,
+advanced error reporting and virtual channels.
+
+However the PCI controller IP integrated in some SoCs is capable of operating
+either in Root Complex mode or Endpoint mode. PCI Endpoint Framework will
+add endpoint mode support in Linux. This will help to run Linux in an
+EP system which can have a wide variety of use cases from testing or
+validation, co-processor accelerator, etc.
+
+2. PCI Endpoint Core
+
+The PCI Endpoint Core layer comprises 3 components: the Endpoint Controller
+library, the Endpoint Function library, and the configfs layer to bind the
+endpoint function with the endpoint controller.
+
+2.1 PCI Endpoint Controller(EPC) Library
+
+The EPC library provides APIs to be used by the controller that can operate
+in endpoint mode. It also provides APIs to be used by function driver/library
+in order to implement a particular endpoint function.
+
+2.1.1 APIs for the PCI controller Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI controller driver.
+
+*) devm_pci_epc_create()/pci_epc_create()
+
+ The PCI controller driver should implement the following ops:
+ * write_header: ops to populate configuration space header
+ * set_bar: ops to configure the BAR
+ * clear_bar: ops to reset the BAR
+ * alloc_addr_space: ops to allocate in PCI controller address space
+ * free_addr_space: ops to free the allocated address space
+ * raise_irq: ops to raise a legacy or MSI interrupt
+ * start: ops to start the PCI link
+ * stop: ops to stop the PCI link
+
+ The PCI controller driver can then create a new EPC device by invoking
+ devm_pci_epc_create()/pci_epc_create().
+
+*) devm_pci_epc_destroy()/pci_epc_destroy()
+
+ The PCI controller driver can destroy the EPC device created by either
+ devm_pci_epc_create() or pci_epc_create() using devm_pci_epc_destroy() or
+ pci_epc_destroy().
+
+*) pci_epc_linkup()
+
+ In order to notify all the function devices that the EPC device to which
+ they are linked has established a link with the host, the PCI controller
+ driver should invoke pci_epc_linkup().
+
+*) pci_epc_mem_init()
+
+ Initialize the pci_epc_mem structure used for allocating EPC addr space.
+
+*) pci_epc_mem_exit()
+
+ Cleanup the pci_epc_mem structure allocated during pci_epc_mem_init().
+
+2.1.2 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epc_write_header()
+
+ The PCI endpoint function driver should use pci_epc_write_header() to
+ write the standard configuration header to the endpoint controller.
+
+*) pci_epc_set_bar()
+
+ The PCI endpoint function driver should use pci_epc_set_bar() to configure
+ the Base Address Register in order for the host to assign PCI addr space.
+ Register space of the function driver is usually configured
+ using this API.
+
+*) pci_epc_clear_bar()
+
+ The PCI endpoint function driver should use pci_epc_clear_bar() to reset
+ the BAR.
+
+*) pci_epc_raise_irq()
+
+ The PCI endpoint function driver should use pci_epc_raise_irq() to raise
+ Legacy Interrupt or MSI Interrupt.
+
+*) pci_epc_mem_alloc_addr()
+
+ The PCI endpoint function driver should use pci_epc_mem_alloc_addr(), to
+ allocate memory address from EPC addr space which is required to access
+ RC's buffer
+
+*) pci_epc_mem_free_addr()
+
+ The PCI endpoint function driver should use pci_epc_mem_free_addr() to
+ free the memory space allocated using pci_epc_mem_alloc_addr().
+
+2.1.3 Other APIs
+
+There are other APIs provided by the EPC library. These are used for binding
+the EPF device with EPC device. pci-ep-cfs.c can be used as reference for
+using these APIs.
+
+*) pci_epc_get()
+
+ Get a reference to the PCI endpoint controller based on the device name of
+ the controller.
+
+*) pci_epc_put()
+
+ Release the reference to the PCI endpoint controller obtained using
+ pci_epc_get()
+
+*) pci_epc_add_epf()
+
+ Add a PCI endpoint function to a PCI endpoint controller. A PCIe device
+ can have up to 8 functions according to the specification.
+
+*) pci_epc_remove_epf()
+
+ Remove the PCI endpoint function from PCI endpoint controller.
+
+*) pci_epc_start()
+
+ The PCI endpoint function driver should invoke pci_epc_start() once it
+ has configured the endpoint function and wants to start the PCI link.
+
+*) pci_epc_stop()
+
+ The PCI endpoint function driver should invoke pci_epc_stop() to stop
+ the PCI LINK.
+
+2.2 PCI Endpoint Function(EPF) Library
+
+The EPF library provides APIs to be used by the function driver and the EPC
+library to provide endpoint mode functionality.
+
+2.2.1 APIs for the PCI Endpoint Function Driver
+
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint function driver.
+
+*) pci_epf_register_driver()
+
+ The PCI Endpoint Function driver should implement the following ops:
+ * bind: ops to perform when a EPC device has been bound to EPF device
+ * unbind: ops to perform when a binding has been lost between a EPC
+ device and EPF device
+ * linkup: ops to perform when the EPC device has established a
+ connection with a host system
+
+ The PCI Function driver can then register the PCI EPF driver by using
+ pci_epf_register_driver().
+
+*) pci_epf_unregister_driver()
+
+ The PCI Function driver can unregister the PCI EPF driver by using
+ pci_epf_unregister_driver().
+
+*) pci_epf_alloc_space()
+
+ The PCI Function driver can allocate space for a particular BAR using
+ pci_epf_alloc_space().
+
+*) pci_epf_free_space()
+
+ The PCI Function driver can free the allocated space
+ (using pci_epf_alloc_space) by invoking pci_epf_free_space().
+
+2.2.2 APIs for the PCI Endpoint Controller Library
+This section lists the APIs that the PCI Endpoint core provides to be used
+by the PCI endpoint controller library.
+
+*) pci_epf_linkup()
+
+ The PCI endpoint controller library invokes pci_epf_linkup() when the
+ EPC device has established the connection to the host.
+
+2.2.2 Other APIs
+There are other APIs provided by the EPF library. These are used to notify
+the function driver when the EPF device is bound to the EPC device.
+pci-ep-cfs.c can be used as reference for using these APIs.
+
+*) pci_epf_create()
+
+ Create a new PCI EPF device by passing the name of the PCI EPF device.
+ This name will be used to bind the the EPF device to a EPF driver.
+
+*) pci_epf_destroy()
+
+ Destroy the created PCI EPF device.
+
+*) pci_epf_bind()
+
+ pci_epf_bind() should be invoked when the EPF device has been bound to
+ a EPC device.
+
+*) pci_epf_unbind()
+
+ pci_epf_unbind() should be invoked when the binding between EPC device
+ and EPF device is lost.
diff --git a/Documentation/PCI/endpoint/pci-test-function.txt b/Documentation/PCI/endpoint/pci-test-function.txt
new file mode 100644
index 000000000000..0c519c9bf94a
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-test-function.txt
@@ -0,0 +1,66 @@
+ PCI TEST
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+Traditionally PCI RC has always been validated by using standard
+PCI cards like ethernet PCI cards or USB PCI cards or SATA PCI cards.
+However with the addition of EP-core in linux kernel, it is possible
+to configure a PCI controller that can operate in EP mode to work as
+a test device.
+
+The PCI endpoint test device is a virtual device (defined in software)
+used to test the endpoint functionality and serve as a sample driver
+for other PCI endpoint devices (to use the EP framework).
+
+The PCI endpoint test device has the following registers:
+
+ 1) PCI_ENDPOINT_TEST_MAGIC
+ 2) PCI_ENDPOINT_TEST_COMMAND
+ 3) PCI_ENDPOINT_TEST_STATUS
+ 4) PCI_ENDPOINT_TEST_SRC_ADDR
+ 5) PCI_ENDPOINT_TEST_DST_ADDR
+ 6) PCI_ENDPOINT_TEST_SIZE
+ 7) PCI_ENDPOINT_TEST_CHECKSUM
+
+*) PCI_ENDPOINT_TEST_MAGIC
+
+This register will be used to test BAR0. A known pattern will be written
+and read back from MAGIC register to verify BAR0.
+
+*) PCI_ENDPOINT_TEST_COMMAND:
+
+This register will be used by the host driver to indicate the function
+that the endpoint device must perform.
+
+Bitfield Description:
+ Bit 0 : raise legacy IRQ
+ Bit 1 : raise MSI IRQ
+ Bit 2 - 7 : MSI interrupt number
+ Bit 8 : read command (read data from RC buffer)
+ Bit 9 : write command (write data to RC buffer)
+ Bit 10 : copy command (copy data from one RC buffer to another
+ RC buffer)
+
+*) PCI_ENDPOINT_TEST_STATUS
+
+This register reflects the status of the PCI endpoint device.
+
+Bitfield Description:
+ Bit 0 : read success
+ Bit 1 : read fail
+ Bit 2 : write success
+ Bit 3 : write fail
+ Bit 4 : copy success
+ Bit 5 : copy fail
+ Bit 6 : IRQ raised
+ Bit 7 : source address is invalid
+ Bit 8 : destination address is invalid
+
+*) PCI_ENDPOINT_TEST_SRC_ADDR
+
+This register contains the source address (RC buffer address) for the
+COPY/READ command.
+
+*) PCI_ENDPOINT_TEST_DST_ADDR
+
+This register contains the destination address (RC buffer address) for
+the COPY/WRITE command.
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
new file mode 100644
index 000000000000..75f48c3bb191
--- /dev/null
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -0,0 +1,179 @@
+ PCI TEST USERGUIDE
+ Kishon Vijay Abraham I <kishon@ti.com>
+
+This document is a guide to help users use pci-epf-test function driver
+and pci_endpoint_test host driver for testing PCI. The list of steps to
+be followed in the host side and EP side is given below.
+
+1. Endpoint Device
+
+1.1 Endpoint Controller Devices
+
+To find the list of endpoint controller devices in the system:
+
+ # ls /sys/class/pci_epc/
+ 51000000.pcie_ep
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+ # ls /sys/kernel/config/pci_ep/controllers
+ 51000000.pcie_ep
+
+1.2 Endpoint Function Drivers
+
+To find the list of endpoint function drivers in the system:
+
+ # ls /sys/bus/pci-epf/drivers
+ pci_epf_test
+
+If PCI_ENDPOINT_CONFIGFS is enabled
+ # ls /sys/kernel/config/pci_ep/functions
+ pci_epf_test
+
+1.3 Creating pci-epf-test Device
+
+PCI endpoint function device can be created using the configfs. To create
+pci-epf-test device, the following commands can be used
+
+ # mount -t configfs none /sys/kernel/config
+ # cd /sys/kernel/config/pci_ep/
+ # mkdir functions/pci_epf_test/func1
+
+The "mkdir func1" above creates the pci-epf-test function device that will
+be probed by pci_epf_test driver.
+
+The PCI endpoint framework populates the directory with the following
+configurable fields.
+
+ # ls functions/pci_epf_test/func1
+ baseclass_code interrupt_pin revid subsys_vendor_id
+ cache_line_size msi_interrupts subclass_code vendorid
+ deviceid progif_code subsys_id
+
+The PCI endpoint function driver populates these entries with default values
+when the device is bound to the driver. The pci-epf-test driver populates
+vendorid with 0xffff and interrupt_pin with 0x0001
+
+ # cat functions/pci_epf_test/func1/vendorid
+ 0xffff
+ # cat functions/pci_epf_test/func1/interrupt_pin
+ 0x0001
+
+1.4 Configuring pci-epf-test Device
+
+The user can configure the pci-epf-test device using configfs entry. In order
+to change the vendorid and the number of MSI interrupts used by the function
+device, the following commands can be used.
+
+ # echo 0x104c > functions/pci_epf_test/func1/vendorid
+ # echo 0xb500 > functions/pci_epf_test/func1/deviceid
+ # echo 16 > functions/pci_epf_test/func1/msi_interrupts
+
+1.5 Binding pci-epf-test Device to EP Controller
+
+In order for the endpoint function device to be useful, it has to be bound to
+a PCI endpoint controller driver. Use the configfs to bind the function
+device to one of the controller driver present in the system.
+
+ # ln -s functions/pci_epf_test/func1 controllers/51000000.pcie_ep/
+
+Once the above step is completed, the PCI endpoint is ready to establish a link
+with the host.
+
+1.6 Start the Link
+
+In order for the endpoint device to establish a link with the host, the _start_
+field should be populated with '1'.
+
+ # echo 1 > controllers/51000000.pcie_ep/start
+
+2. RootComplex Device
+
+2.1 lspci Output
+
+Note that the devices listed here correspond to the value populated in 1.4 above
+
+ 00:00.0 PCI bridge: Texas Instruments Device 8888 (rev 01)
+ 01:00.0 Unassigned class [ff00]: Texas Instruments Device b500
+
+2.2 Using Endpoint Test function Device
+
+pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
+tests. Before pcitest.sh can be used pcitest.c should be compiled using the
+following commands.
+
+ cd <kernel-dir>
+ make headers_install ARCH=arm
+ arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest
+ cp pcitest <rootfs>/usr/sbin/
+ cp tools/pci/pcitest.sh <rootfs>
+
+2.2.1 pcitest.sh Output
+ # ./pcitest.sh
+ BAR tests
+
+ BAR0: OKAY
+ BAR1: OKAY
+ BAR2: OKAY
+ BAR3: OKAY
+ BAR4: NOT OKAY
+ BAR5: NOT OKAY
+
+ Interrupt tests
+
+ LEGACY IRQ: NOT OKAY
+ MSI1: OKAY
+ MSI2: OKAY
+ MSI3: OKAY
+ MSI4: OKAY
+ MSI5: OKAY
+ MSI6: OKAY
+ MSI7: OKAY
+ MSI8: OKAY
+ MSI9: OKAY
+ MSI10: OKAY
+ MSI11: OKAY
+ MSI12: OKAY
+ MSI13: OKAY
+ MSI14: OKAY
+ MSI15: OKAY
+ MSI16: OKAY
+ MSI17: NOT OKAY
+ MSI18: NOT OKAY
+ MSI19: NOT OKAY
+ MSI20: NOT OKAY
+ MSI21: NOT OKAY
+ MSI22: NOT OKAY
+ MSI23: NOT OKAY
+ MSI24: NOT OKAY
+ MSI25: NOT OKAY
+ MSI26: NOT OKAY
+ MSI27: NOT OKAY
+ MSI28: NOT OKAY
+ MSI29: NOT OKAY
+ MSI30: NOT OKAY
+ MSI31: NOT OKAY
+ MSI32: NOT OKAY
+
+ Read Tests
+
+ READ ( 1 bytes): OKAY
+ READ ( 1024 bytes): OKAY
+ READ ( 1025 bytes): OKAY
+ READ (1024000 bytes): OKAY
+ READ (1024001 bytes): OKAY
+
+ Write Tests
+
+ WRITE ( 1 bytes): OKAY
+ WRITE ( 1024 bytes): OKAY
+ WRITE ( 1025 bytes): OKAY
+ WRITE (1024000 bytes): OKAY
+ WRITE (1024001 bytes): OKAY
+
+ Copy Tests
+
+ COPY ( 1 bytes): OKAY
+ COPY ( 1024 bytes): OKAY
+ COPY ( 1025 bytes): OKAY
+ COPY (1024000 bytes): OKAY
+ COPY (1024001 bytes): OKAY
diff --git a/Documentation/PCI/pci-iov-howto.txt b/Documentation/PCI/pci-iov-howto.txt
index 2d91ae251982..d2a84151e99c 100644
--- a/Documentation/PCI/pci-iov-howto.txt
+++ b/Documentation/PCI/pci-iov-howto.txt
@@ -68,6 +68,18 @@ To disable SR-IOV capability:
echo 0 > \
/sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs
+To enable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. This is the
+default behavior.
+ echo 1 > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
+To disable auto probing VFs by a compatible driver on the host, run
+command below before enabling SR-IOV capabilities. Updating this
+entry will not affect VFs which are already probed.
+ echo 0 > \
+ /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe
+
3.2 Usage example
Following piece of code illustrates the usage of the SR-IOV API.
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index f773a264ae02..1672573b037a 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -17,7 +17,7 @@ rcu_dereference.txt
rcubarrier.txt
- RCU and Unloadable Modules
rculist_nulls.txt
- - RCU list primitives for use with SLAB_DESTROY_BY_RCU
+ - RCU list primitives for use with SLAB_TYPESAFE_BY_RCU
rcuref.txt
- Reference-count design for elements of lists/arrays protected by RCU
rcu.txt
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index d583c653a703..38d6d800761f 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -19,6 +19,8 @@ to each other.
The <tt>rcu_state</tt> Structure</a>
<li> <a href="#The rcu_node Structure">
The <tt>rcu_node</tt> Structure</a>
+<li> <a href="#The rcu_segcblist Structure">
+ The <tt>rcu_segcblist</tt> Structure</a>
<li> <a href="#The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a>
<li> <a href="#The rcu_dynticks Structure">
@@ -841,6 +843,134 @@ for lockdep lock-class names.
Finally, lines&nbsp;64-66 produce an error if the maximum number of
CPUs is too large for the specified fanout.
+<h3><a name="The rcu_segcblist Structure">
+The <tt>rcu_segcblist</tt> Structure</a></h3>
+
+The <tt>rcu_segcblist</tt> structure maintains a segmented list of
+callbacks as follows:
+
+<pre>
+ 1 #define RCU_DONE_TAIL 0
+ 2 #define RCU_WAIT_TAIL 1
+ 3 #define RCU_NEXT_READY_TAIL 2
+ 4 #define RCU_NEXT_TAIL 3
+ 5 #define RCU_CBLIST_NSEGS 4
+ 6
+ 7 struct rcu_segcblist {
+ 8 struct rcu_head *head;
+ 9 struct rcu_head **tails[RCU_CBLIST_NSEGS];
+10 unsigned long gp_seq[RCU_CBLIST_NSEGS];
+11 long len;
+12 long len_lazy;
+13 };
+</pre>
+
+<p>
+The segments are as follows:
+
+<ol>
+<li> <tt>RCU_DONE_TAIL</tt>: Callbacks whose grace periods have elapsed.
+ These callbacks are ready to be invoked.
+<li> <tt>RCU_WAIT_TAIL</tt>: Callbacks that are waiting for the
+ current grace period.
+ Note that different CPUs can have different ideas about which
+ grace period is current, hence the <tt>-&gt;gp_seq</tt> field.
+<li> <tt>RCU_NEXT_READY_TAIL</tt>: Callbacks waiting for the next
+ grace period to start.
+<li> <tt>RCU_NEXT_TAIL</tt>: Callbacks that have not yet been
+ associated with a grace period.
+</ol>
+
+<p>
+The <tt>-&gt;head</tt> pointer references the first callback or
+is <tt>NULL</tt> if the list contains no callbacks (which is
+<i>not</i> the same as being empty).
+Each element of the <tt>-&gt;tails[]</tt> array references the
+<tt>-&gt;next</tt> pointer of the last callback in the corresponding
+segment of the list, or the list's <tt>-&gt;head</tt> pointer if
+that segment and all previous segments are empty.
+If the corresponding segment is empty but some previous segment is
+not empty, then the array element is identical to its predecessor.
+Older callbacks are closer to the head of the list, and new callbacks
+are added at the tail.
+This relationship between the <tt>-&gt;head</tt> pointer, the
+<tt>-&gt;tails[]</tt> array, and the callbacks is shown in this
+diagram:
+
+</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
+
+</p><p>In this figure, the <tt>-&gt;head</tt> pointer references the
+first
+RCU callback in the list.
+The <tt>-&gt;tails[RCU_DONE_TAIL]</tt> array element references
+the <tt>-&gt;head</tt> pointer itself, indicating that none
+of the callbacks is ready to invoke.
+The <tt>-&gt;tails[RCU_WAIT_TAIL]</tt> array element references callback
+CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
+CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period,
+give or take possible disagreements about exactly which grace period
+is the current one.
+The <tt>-&gt;tails[RCU_NEXT_READY_TAIL]</tt> array element
+references the same RCU callback that <tt>-&gt;tails[RCU_WAIT_TAIL]</tt>
+does, which indicates that there are no callbacks waiting on the next
+RCU grace period.
+The <tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element references
+CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
+remaining RCU callbacks have not yet been assigned to an RCU grace
+period.
+Note that the <tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element
+always references the last RCU callback's <tt>-&gt;next</tt> pointer
+unless the callback list is empty, in which case it references
+the <tt>-&gt;head</tt> pointer.
+
+<p>
+There is one additional important special case for the
+<tt>-&gt;tails[RCU_NEXT_TAIL]</tt> array element: It can be <tt>NULL</tt>
+when this list is <i>disabled</i>.
+Lists are disabled when the corresponding CPU is offline or when
+the corresponding CPU's callbacks are offloaded to a kthread,
+both of which are described elsewhere.
+
+</p><p>CPUs advance their callbacks from the
+<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
+<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
+as grace periods advance.
+
+</p><p>The <tt>-&gt;gp_seq[]</tt> array records grace-period
+numbers corresponding to the list segments.
+This is what allows different CPUs to have different ideas as to
+which is the current grace period while still avoiding premature
+invocation of their callbacks.
+In particular, this allows CPUs that go idle for extended periods
+to determine which of their callbacks are ready to be invoked after
+reawakening.
+
+</p><p>The <tt>-&gt;len</tt> counter contains the number of
+callbacks in <tt>-&gt;head</tt>, and the
+<tt>-&gt;len_lazy</tt> contains the number of those callbacks that
+are known to only free memory, and whose invocation can therefore
+be safely deferred.
+
+<p><b>Important note</b>: It is the <tt>-&gt;len</tt> field that
+determines whether or not there are callbacks associated with
+this <tt>rcu_segcblist</tt> structure, <i>not</i> the <tt>-&gt;head</tt>
+pointer.
+The reason for this is that all the ready-to-invoke callbacks
+(that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted
+all at once at callback-invocation time.
+If callback invocation must be postponed, for example, because a
+high-priority process just woke up on this CPU, then the remaining
+callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment.
+Either way, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts
+are adjusted after the corresponding callbacks have been invoked, and so
+again it is the <tt>-&gt;len</tt> count that accurately reflects whether
+or not there are callbacks associated with this <tt>rcu_segcblist</tt>
+structure.
+Of course, off-CPU sampling of the <tt>-&gt;len</tt> count requires
+the use of appropriate synchronization, for example, memory barriers.
+This synchronization can be a bit subtle, particularly in the case
+of <tt>rcu_barrier()</tt>.
+
<h3><a name="The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a></h3>
@@ -983,62 +1113,18 @@ choice.
as follows:
<pre>
- 1 struct rcu_head *nxtlist;
- 2 struct rcu_head **nxttail[RCU_NEXT_SIZE];
- 3 unsigned long nxtcompleted[RCU_NEXT_SIZE];
- 4 long qlen_lazy;
- 5 long qlen;
- 6 long qlen_last_fqs_check;
+ 1 struct rcu_segcblist cblist;
+ 2 long qlen_last_fqs_check;
+ 3 unsigned long n_cbs_invoked;
+ 4 unsigned long n_nocbs_invoked;
+ 5 unsigned long n_cbs_orphaned;
+ 6 unsigned long n_cbs_adopted;
7 unsigned long n_force_qs_snap;
- 8 unsigned long n_cbs_invoked;
- 9 unsigned long n_cbs_orphaned;
-10 unsigned long n_cbs_adopted;
-11 long blimit;
+ 8 long blimit;
</pre>
-<p>The <tt>-&gt;nxtlist</tt> pointer and the
-<tt>-&gt;nxttail[]</tt> array form a four-segment list with
-older callbacks near the head and newer ones near the tail.
-Each segment contains callbacks with the corresponding relationship
-to the current grace period.
-The pointer out of the end of each of the four segments is referenced
-by the element of the <tt>-&gt;nxttail[]</tt> array indexed by
-<tt>RCU_DONE_TAIL</tt> (for callbacks handled by a prior grace period),
-<tt>RCU_WAIT_TAIL</tt> (for callbacks waiting on the current grace period),
-<tt>RCU_NEXT_READY_TAIL</tt> (for callbacks that will wait on the next
-grace period), and
-<tt>RCU_NEXT_TAIL</tt> (for callbacks that are not yet associated
-with a specific grace period)
-respectively, as shown in the following figure.
-
-</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
-
-</p><p>In this figure, the <tt>-&gt;nxtlist</tt> pointer references the
-first
-RCU callback in the list.
-The <tt>-&gt;nxttail[RCU_DONE_TAIL]</tt> array element references
-the <tt>-&gt;nxtlist</tt> pointer itself, indicating that none
-of the callbacks is ready to invoke.
-The <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt> array element references callback
-CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
-CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period.
-The <tt>-&gt;nxttail[RCU_NEXT_READY_TAIL]</tt> array element
-references the same RCU callback that <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt>
-does, which indicates that there are no callbacks waiting on the next
-RCU grace period.
-The <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element references
-CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
-remaining RCU callbacks have not yet been assigned to an RCU grace
-period.
-Note that the <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element
-always references the last RCU callback's <tt>-&gt;next</tt> pointer
-unless the callback list is empty, in which case it references
-the <tt>-&gt;nxtlist</tt> pointer.
-
-</p><p>CPUs advance their callbacks from the
-<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
-<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
-as grace periods advance.
+<p>The <tt>-&gt;cblist</tt> structure is the segmented callback list
+described earlier.
The CPU advances the callbacks in its <tt>rcu_data</tt> structure
whenever it notices that another RCU grace period has completed.
The CPU detects the completion of an RCU grace period by noticing
@@ -1049,16 +1135,7 @@ Recall that each <tt>rcu_node</tt> structure's
<tt>-&gt;completed</tt> field is updated at the end of each
grace period.
-</p><p>The <tt>-&gt;nxtcompleted[]</tt> array records grace-period
-numbers corresponding to the list segments.
-This allows CPUs that go idle for extended periods to determine
-which of their callbacks are ready to be invoked after reawakening.
-
-</p><p>The <tt>-&gt;qlen</tt> counter contains the number of
-callbacks in <tt>-&gt;nxtlist</tt>, and the
-<tt>-&gt;qlen_lazy</tt> contains the number of those callbacks that
-are known to only free memory, and whose invocation can therefore
-be safely deferred.
+<p>
The <tt>-&gt;qlen_last_fqs_check</tt> and
<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
states from <tt>call_rcu()</tt> and friends when callback
@@ -1069,6 +1146,10 @@ lists grow excessively long.
fields count the number of callbacks invoked,
sent to other CPUs when this CPU goes offline,
and received from other CPUs when those other CPUs go offline.
+The <tt>-&gt;n_nocbs_invoked</tt> is used when the CPU's callbacks
+are offloaded to a kthread.
+
+<p>
Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
RCU callbacks that may be invoked at a given time.
@@ -1104,6 +1185,9 @@ Its fields are as follows:
1 int dynticks_nesting;
2 int dynticks_nmi_nesting;
3 atomic_t dynticks;
+ 4 bool rcu_need_heavy_qs;
+ 5 unsigned long rcu_qs_ctr;
+ 6 bool rcu_urgent_qs;
</pre>
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
@@ -1117,11 +1201,32 @@ NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
field, except that NMIs that interrupt non-dyntick-idle execution
are not counted.
-</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
+</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
CPU's transitions to and from dyntick-idle mode, so that this counter
has an even value when the CPU is in dyntick-idle mode and an odd
value otherwise.
+</p><p>The <tt>-&gt;rcu_need_heavy_qs</tt> field is used
+to record the fact that the RCU core code would really like to
+see a quiescent state from the corresponding CPU, so much so that
+it is willing to call for heavy-weight dyntick-counter operations.
+This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
+code, which provide a momentary idle sojourn in response.
+
+</p><p>The <tt>-&gt;rcu_qs_ctr</tt> field is used to record
+quiescent states from <tt>cond_resched()</tt>.
+Because <tt>cond_resched()</tt> can execute quite frequently, this
+must be quite lightweight, as in a non-atomic increment of this
+per-CPU field.
+
+</p><p>Finally, the <tt>-&gt;rcu_urgent_qs</tt> field is used to record
+the fact that the RCU core code would really like to see a quiescent
+state from the corresponding CPU, with the various other fields indicating
+just how badly RCU wants this quiescent state.
+This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
+code, which, if nothing else, non-atomically increment <tt>-&gt;rcu_qs_ctr</tt>
+in response.
+
<table>
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
diff --git a/Documentation/RCU/Design/Data-Structures/nxtlist.svg b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
index abc4cc73a097..0223e79c38e0 100644
--- a/Documentation/RCU/Design/Data-Structures/nxtlist.svg
+++ b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
@@ -19,7 +19,7 @@
id="svg2"
version="1.1"
inkscape:version="0.48.4 r9939"
- sodipodi:docname="nxtlist.fig">
+ sodipodi:docname="segcblist.svg">
<metadata
id="metadata94">
<rdf:RDF>
@@ -28,7 +28,7 @@
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
@@ -241,61 +241,51 @@
xml:space="preserve"
x="225"
y="675"
- fill="#000000"
- font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="324"
- text-anchor="start"
- id="text64">nxtlist</text>
+ id="text64"
+ style="font-size:324px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;head</text>
<!-- Text -->
<text
xml:space="preserve"
x="225"
y="1800"
- fill="#000000"
- font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="324"
- text-anchor="start"
- id="text66">nxttail[RCU_DONE_TAIL]</text>
+ id="text66"
+ style="font-size:324px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;tails[RCU_DONE_TAIL]</text>
<!-- Text -->
<text
xml:space="preserve"
x="225"
y="2925"
- fill="#000000"
- font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="324"
- text-anchor="start"
- id="text68">nxttail[RCU_WAIT_TAIL]</text>
+ id="text68"
+ style="font-size:324px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;tails[RCU_WAIT_TAIL]</text>
<!-- Text -->
<text
xml:space="preserve"
x="225"
y="4050"
- fill="#000000"
- font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="324"
- text-anchor="start"
- id="text70">nxttail[RCU_NEXT_READY_TAIL]</text>
+ id="text70"
+ style="font-size:324px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;tails[RCU_NEXT_READY_TAIL]</text>
<!-- Text -->
<text
xml:space="preserve"
x="225"
y="5175"
- fill="#000000"
- font-family="Courier"
font-style="normal"
font-weight="bold"
font-size="324"
- text-anchor="start"
- id="text72">nxttail[RCU_NEXT_TAIL]</text>
+ id="text72"
+ style="font-size:324px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;tails[RCU_NEXT_TAIL]</text>
<!-- Text -->
<text
xml:space="preserve"
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 7a3194c5559a..e5d0bbd0230b 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -284,6 +284,7 @@ Expedited Grace Period Refinements</a></h2>
Funnel locking and wait/wakeup</a>.
<li> <a href="#Use of Workqueues">Use of Workqueues</a>.
<li> <a href="#Stall Warnings">Stall warnings</a>.
+<li> <a href="#Mid-Boot Operation">Mid-boot operation</a>.
</ol>
<h3><a name="Idle-CPU Checks">Idle-CPU Checks</a></h3>
@@ -524,7 +525,7 @@ their grace periods and carrying out their wakeups.
In earlier implementations, the task requesting the expedited
grace period also drove it to completion.
This straightforward approach had the disadvantage of needing to
-account for signals sent to user tasks,
+account for POSIX signals sent to user tasks,
so more recent implemementations use the Linux kernel's
<a href="https://www.kernel.org/doc/Documentation/workqueue.txt">workqueues</a>.
@@ -533,8 +534,8 @@ The requesting task still does counter snapshotting and funnel-lock
processing, but the task reaching the top of the funnel lock
does a <tt>schedule_work()</tt> (from <tt>_synchronize_rcu_expedited()</tt>
so that a workqueue kthread does the actual grace-period processing.
-Because workqueue kthreads do not accept signals, grace-period-wait
-processing need not allow for signals.
+Because workqueue kthreads do not accept POSIX signals, grace-period-wait
+processing need not allow for POSIX signals.
In addition, this approach allows wakeups for the previous expedited
grace period to be overlapped with processing for the next expedited
@@ -586,6 +587,46 @@ blocking the current grace period are printed.
Each stall warning results in another pass through the loop, but the
second and subsequent passes use longer stall times.
+<h3><a name="Mid-Boot Operation">Mid-boot operation</a></h3>
+
+<p>
+The use of workqueues has the advantage that the expedited
+grace-period code need not worry about POSIX signals.
+Unfortunately, it has the
+corresponding disadvantage that workqueues cannot be used until
+they are initialized, which does not happen until some time after
+the scheduler spawns the first task.
+Given that there are parts of the kernel that really do want to
+execute grace periods during this mid-boot &ldquo;dead zone&rdquo;,
+expedited grace periods must do something else during thie time.
+
+<p>
+What they do is to fall back to the old practice of requiring that the
+requesting task drive the expedited grace period, as was the case
+before the use of workqueues.
+However, the requesting task is only required to drive the grace period
+during the mid-boot dead zone.
+Before mid-boot, a synchronous grace period is a no-op.
+Some time after mid-boot, workqueues are used.
+
+<p>
+Non-expedited non-SRCU synchronous grace periods must also operate
+normally during mid-boot.
+This is handled by causing non-expedited grace periods to take the
+expedited code path during mid-boot.
+
+<p>
+The current code assumes that there are no POSIX signals during
+the mid-boot dead zone.
+However, if an overwhelming need for POSIX signals somehow arises,
+appropriate adjustments can be made to the expedited stall-warning code.
+One such adjustment would reinstate the pre-workqueue stall-warning
+checks, but only during the mid-boot dead zone.
+
+<p>
+With this refinement, synchronous grace periods can now be used from
+task context pretty much any time during the life of the kernel.
+
<h3><a name="Summary">
Summary</a></h3>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 21593496aca6..f60adf112663 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -659,8 +659,9 @@ systems with more than one CPU:
In other words, a given instance of <tt>synchronize_rcu()</tt>
can avoid waiting on a given RCU read-side critical section only
if it can prove that <tt>synchronize_rcu()</tt> started first.
+ </font>
- <p>
+ <p><font color="ffffff">
A related question is &ldquo;When <tt>rcu_read_lock()</tt>
doesn't generate any code, why does it matter how it relates
to a grace period?&rdquo;
@@ -675,8 +676,9 @@ systems with more than one CPU:
within the critical section, in which case none of the accesses
within the critical section may observe the effects of any
access following the grace period.
+ </font>
- <p>
+ <p><font color="ffffff">
As of late 2016, mathematical models of RCU take this
viewpoint, for example, see slides&nbsp;62 and&nbsp;63
of the
@@ -1616,8 +1618,8 @@ CPUs should at least make reasonable forward progress.
In return for its shorter latencies, <tt>synchronize_rcu_expedited()</tt>
is permitted to impose modest degradation of real-time latency
on non-idle online CPUs.
-That said, it will likely be necessary to take further steps to reduce this
-degradation, hopefully to roughly that of a scheduling-clock interrupt.
+Here, &ldquo;modest&rdquo; means roughly the same latency
+degradation as a scheduling-clock interrupt.
<p>
There are a number of situations where even
@@ -1913,12 +1915,9 @@ This requirement is another factor driving batching of grace periods,
but it is also the driving force behind the checks for large numbers
of queued RCU callbacks in the <tt>call_rcu()</tt> code path.
Finally, high update rates should not delay RCU read-side critical
-sections, although some read-side delays can occur when using
+sections, although some small read-side delays can occur when using
<tt>synchronize_rcu_expedited()</tt>, courtesy of this function's use
-of <tt>try_stop_cpus()</tt>.
-(In the future, <tt>synchronize_rcu_expedited()</tt> will be
-converted to use lighter-weight inter-processor interrupts (IPIs),
-but this will still disturb readers, though to a much smaller degree.)
+of <tt>smp_call_function_single()</tt>.
<p>
Although all three of these corner cases were understood in the early
@@ -2154,7 +2153,8 @@ as will <tt>rcu_assign_pointer()</tt>.
<p>
Although <tt>call_rcu()</tt> may be invoked at any
time during boot, callbacks are not guaranteed to be invoked until after
-the scheduler is fully up and running.
+all of RCU's kthreads have been spawned, which occurs at
+<tt>early_initcall()</tt> time.
This delay in callback invocation is due to the fact that RCU does not
invoke callbacks until it is fully initialized, and this full initialization
cannot occur until after the scheduler has initialized itself to the
@@ -2167,8 +2167,10 @@ on what operations those callbacks could invoke.
Perhaps surprisingly, <tt>synchronize_rcu()</tt>,
<a href="#Bottom-Half Flavor"><tt>synchronize_rcu_bh()</tt></a>
(<a href="#Bottom-Half Flavor">discussed below</a>),
-and
-<a href="#Sched Flavor"><tt>synchronize_sched()</tt></a>
+<a href="#Sched Flavor"><tt>synchronize_sched()</tt></a>,
+<tt>synchronize_rcu_expedited()</tt>,
+<tt>synchronize_rcu_bh_expedited()</tt>, and
+<tt>synchronize_sched_expedited()</tt>
will all operate normally
during very early boot, the reason being that there is only one CPU
and preemption is disabled.
@@ -2178,45 +2180,59 @@ state and thus a grace period, so the early-boot implementation can
be a no-op.
<p>
-Both <tt>synchronize_rcu_bh()</tt> and <tt>synchronize_sched()</tt>
-continue to operate normally through the remainder of boot, courtesy
-of the fact that preemption is disabled across their RCU read-side
-critical sections and also courtesy of the fact that there is still
-only one CPU.
-However, once the scheduler starts initializing, preemption is enabled.
-There is still only a single CPU, but the fact that preemption is enabled
-means that the no-op implementation of <tt>synchronize_rcu()</tt> no
-longer works in <tt>CONFIG_PREEMPT=y</tt> kernels.
-Therefore, as soon as the scheduler starts initializing, the early-boot
-fastpath is disabled.
-This means that <tt>synchronize_rcu()</tt> switches to its runtime
-mode of operation where it posts callbacks, which in turn means that
-any call to <tt>synchronize_rcu()</tt> will block until the corresponding
-callback is invoked.
-Unfortunately, the callback cannot be invoked until RCU's runtime
-grace-period machinery is up and running, which cannot happen until
-the scheduler has initialized itself sufficiently to allow RCU's
-kthreads to be spawned.
-Therefore, invoking <tt>synchronize_rcu()</tt> during scheduler
-initialization can result in deadlock.
+However, once the scheduler has spawned its first kthread, this early
+boot trick fails for <tt>synchronize_rcu()</tt> (as well as for
+<tt>synchronize_rcu_expedited()</tt>) in <tt>CONFIG_PREEMPT=y</tt>
+kernels.
+The reason is that an RCU read-side critical section might be preempted,
+which means that a subsequent <tt>synchronize_rcu()</tt> really does have
+to wait for something, as opposed to simply returning immediately.
+Unfortunately, <tt>synchronize_rcu()</tt> can't do this until all of
+its kthreads are spawned, which doesn't happen until some time during
+<tt>early_initcalls()</tt> time.
+But this is no excuse: RCU is nevertheless required to correctly handle
+synchronous grace periods during this time period.
+Once all of its kthreads are up and running, RCU starts running
+normally.
<table>
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
<tr><td>
- So what happens with <tt>synchronize_rcu()</tt> during
- scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
- kernels?
+ How can RCU possibly handle grace periods before all of its
+ kthreads have been spawned???
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
- maps directly to <tt>synchronize_sched()</tt>.
- Therefore, <tt>synchronize_rcu()</tt> works normally throughout
- boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
- However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
- so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
- during scheduler initialization.
+ Very carefully!
+ </font>
+
+ <p><font color="ffffff">
+ During the &ldquo;dead zone&rdquo; between the time that the
+ scheduler spawns the first task and the time that all of RCU's
+ kthreads have been spawned, all synchronous grace periods are
+ handled by the expedited grace-period mechanism.
+ At runtime, this expedited mechanism relies on workqueues, but
+ during the dead zone the requesting task itself drives the
+ desired expedited grace period.
+ Because dead-zone execution takes place within task context,
+ everything works.
+ Once the dead zone ends, expedited grace periods go back to
+ using workqueues, as is required to avoid problems that would
+ otherwise occur when a user task received a POSIX signal while
+ driving an expedited grace period.
+ </font>
+
+ <p><font color="ffffff">
+ And yes, this does mean that it is unhelpful to send POSIX
+ signals to random tasks between the time that the scheduler
+ spawns its first kthread and the time that RCU's kthreads
+ have all been spawned.
+ If there ever turns out to be a good reason for sending POSIX
+ signals during that time, appropriate adjustments will be made.
+ (If it turns out that POSIX signals are sent during this time for
+ no good reason, other adjustments will be made, appropriate
+ or otherwise.)
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
@@ -2295,12 +2311,61 @@ situation, and Dipankar Sarma incorporated <tt>rcu_barrier()</tt> into RCU.
The need for <tt>rcu_barrier()</tt> for module unloading became
apparent later.
+<p>
+<b>Important note</b>: The <tt>rcu_barrier()</tt> function is not,
+repeat, <i>not</i>, obligated to wait for a grace period.
+It is instead only required to wait for RCU callbacks that have
+already been posted.
+Therefore, if there are no RCU callbacks posted anywhere in the system,
+<tt>rcu_barrier()</tt> is within its rights to return immediately.
+Even if there are callbacks posted, <tt>rcu_barrier()</tt> does not
+necessarily need to wait for a grace period.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ Wait a minute!
+ Each RCU callbacks must wait for a grace period to complete,
+ and <tt>rcu_barrier()</tt> must wait for each pre-existing
+ callback to be invoked.
+ Doesn't <tt>rcu_barrier()</tt> therefore need to wait for
+ a full grace period if there is even one callback posted anywhere
+ in the system?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ Absolutely not!!!
+ </font>
+
+ <p><font color="ffffff">
+ Yes, each RCU callbacks must wait for a grace period to complete,
+ but it might well be partly (or even completely) finished waiting
+ by the time <tt>rcu_barrier()</tt> is invoked.
+ In that case, <tt>rcu_barrier()</tt> need only wait for the
+ remaining portion of the grace period to elapse.
+ So even if there are quite a few callbacks posted,
+ <tt>rcu_barrier()</tt> might well return quite quickly.
+ </font>
+
+ <p><font color="ffffff">
+ So if you need to wait for a grace period as well as for all
+ pre-existing callbacks, you will need to invoke both
+ <tt>synchronize_rcu()</tt> and <tt>rcu_barrier()</tt>.
+ If latency is a concern, you can always use workqueues
+ to invoke them concurrently.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
<h3><a name="Hotplug CPU">Hotplug CPU</a></h3>
<p>
The Linux kernel supports CPU hotplug, which means that CPUs
can come and go.
-It is of course illegal to use any RCU API member from an offline CPU.
+It is of course illegal to use any RCU API member from an offline CPU,
+with the exception of <a href="#Sleepable RCU">SRCU</a> read-side
+critical sections.
This requirement was present from day one in DYNIX/ptx, but
on the other hand, the Linux kernel's CPU-hotplug implementation
is &ldquo;interesting.&rdquo;
@@ -2310,19 +2375,18 @@ The Linux-kernel CPU-hotplug implementation has notifiers that
are used to allow the various kernel subsystems (including RCU)
to respond appropriately to a given CPU-hotplug operation.
Most RCU operations may be invoked from CPU-hotplug notifiers,
-including even normal synchronous grace-period operations
-such as <tt>synchronize_rcu()</tt>.
-However, expedited grace-period operations such as
-<tt>synchronize_rcu_expedited()</tt> are not supported,
-due to the fact that current implementations block CPU-hotplug
-operations, which could result in deadlock.
+including even synchronous grace-period operations such as
+<tt>synchronize_rcu()</tt> and <tt>synchronize_rcu_expedited()</tt>.
<p>
-In addition, all-callback-wait operations such as
+However, all-callback-wait operations such as
<tt>rcu_barrier()</tt> are also not supported, due to the
fact that there are phases of CPU-hotplug operations where
the outgoing CPU's callbacks will not be invoked until after
the CPU-hotplug operation ends, which could also result in deadlock.
+Furthermore, <tt>rcu_barrier()</tt> blocks CPU-hotplug operations
+during its execution, which results in another type of deadlock
+when invoked from a CPU-hotplug notifier.
<h3><a name="Scheduler and RCU">Scheduler and RCU</a></h3>
@@ -2864,6 +2928,27 @@ API, which, in combination with <tt>srcu_read_unlock()</tt>,
guarantees a full memory barrier.
<p>
+Also unlike other RCU flavors, SRCU's callbacks-wait function
+<tt>srcu_barrier()</tt> may be invoked from CPU-hotplug notifiers,
+though this is not necessarily a good idea.
+The reason that this is possible is that SRCU is insensitive
+to whether or not a CPU is online, which means that <tt>srcu_barrier()</tt>
+need not exclude CPU-hotplug operations.
+
+<p>
+As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating
+a locking bottleneck present in prior kernel versions.
+Although this will allow users to put much heavier stress on
+<tt>call_srcu()</tt>, it is important to note that SRCU does not
+yet take any special steps to deal with callback flooding.
+So if you are posting (say) 10,000 SRCU callbacks per second per CPU,
+you are probably totally OK, but if you intend to post (say) 1,000,000
+SRCU callbacks per second per CPU, please run some tests first.
+SRCU just might need a few adjustment to deal with that sort of load.
+Of course, your mileage may vary based on the speed of your CPUs and
+the size of your memory.
+
+<p>
The
<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">SRCU API</a>
includes
@@ -3021,8 +3106,8 @@ to do some redesign to avoid this scalability problem.
<p>
RCU disables CPU hotplug in a few places, perhaps most notably in the
-expedited grace-period and <tt>rcu_barrier()</tt> operations.
-If there is a strong reason to use expedited grace periods in CPU-hotplug
+<tt>rcu_barrier()</tt> operations.
+If there is a strong reason to use <tt>rcu_barrier()</tt> in CPU-hotplug
notifiers, it will be necessary to avoid disabling CPU hotplug.
This would introduce some complexity, so there had better be a <i>very</i>
good reason.
@@ -3096,9 +3181,5 @@ Andy Lutomirski for their help in rendering
this article human readable, and to Michelle Rankin for her support
of this effort.
Other contributions are acknowledged in the Linux kernel's git archive.
-The cartoon is copyright (c) 2013 by Melissa Broussard,
-and is provided
-under the terms of the Creative Commons Attribution-Share Alike 3.0
-United States license.
</body></html>
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index c0bf2441a2ba..b2a613f16d74 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -138,6 +138,15 @@ o Be very careful about comparing pointers obtained from
This sort of comparison occurs frequently when scanning
RCU-protected circular linked lists.
+ Note that if checks for being within an RCU read-side
+ critical section are not required and the pointer is never
+ dereferenced, rcu_access_pointer() should be used in place
+ of rcu_dereference(). The rcu_access_pointer() primitive
+ does not require an enclosing read-side critical section,
+ and also omits the smp_read_barrier_depends() included in
+ rcu_dereference(), which in turn should provide a small
+ performance gain in some CPUs (e.g., the DEC Alpha).
+
o The comparison is against a pointer that references memory
that was initialized "a long time ago." The reason
this is safe is that even if misordering occurs, the
diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt
index 18f9651ff23d..8151f0195f76 100644
--- a/Documentation/RCU/rculist_nulls.txt
+++ b/Documentation/RCU/rculist_nulls.txt
@@ -1,5 +1,5 @@
Using hlist_nulls to protect read-mostly linked lists and
-objects using SLAB_DESTROY_BY_RCU allocations.
+objects using SLAB_TYPESAFE_BY_RCU allocations.
Please read the basics in Documentation/RCU/listRCU.txt
@@ -7,7 +7,7 @@ Using special makers (called 'nulls') is a convenient way
to solve following problem :
A typical RCU linked list managing objects which are
-allocated with SLAB_DESTROY_BY_RCU kmem_cache can
+allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can
use following algos :
1) Lookup algo
@@ -96,7 +96,7 @@ unlock_chain(); // typically a spin_unlock()
3) Remove algo
--------------
Nothing special here, we can use a standard RCU hlist deletion.
-But thanks to SLAB_DESTROY_BY_RCU, beware a deleted object can be reused
+But thanks to SLAB_TYPESAFE_BY_RCU, beware a deleted object can be reused
very very fast (before the end of RCU grace period)
if (put_last_reference_on(obj) {
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index e93d04133fe7..96a3d81837e1 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -1,9 +1,102 @@
Using RCU's CPU Stall Detector
-The rcu_cpu_stall_suppress module parameter enables RCU's CPU stall
-detector, which detects conditions that unduly delay RCU grace periods.
-This module parameter enables CPU stall detection by default, but
-may be overridden via boot-time parameter or at runtime via sysfs.
+This document first discusses what sorts of issues RCU's CPU stall
+detector can locate, and then discusses kernel parameters and Kconfig
+options that can be used to fine-tune the detector's operation. Finally,
+this document explains the stall detector's "splat" format.
+
+
+What Causes RCU CPU Stall Warnings?
+
+So your kernel printed an RCU CPU stall warning. The next question is
+"What caused it?" The following problems can result in RCU CPU stall
+warnings:
+
+o A CPU looping in an RCU read-side critical section.
+
+o A CPU looping with interrupts disabled.
+
+o A CPU looping with preemption disabled. This condition can
+ result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
+ stalls.
+
+o A CPU looping with bottom halves disabled. This condition can
+ result in RCU-sched and RCU-bh stalls.
+
+o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
+ kernel without invoking schedule(). Note that cond_resched()
+ does not necessarily prevent RCU CPU stall warnings. Therefore,
+ if the looping in the kernel is really expected and desirable
+ behavior, you might need to replace some of the cond_resched()
+ calls with calls to cond_resched_rcu_qs().
+
+o Booting Linux using a console connection that is too slow to
+ keep up with the boot-time console-message rate. For example,
+ a 115Kbaud serial console can be -way- too slow to keep up
+ with boot-time message rates, and will frequently result in
+ RCU CPU stall warning messages. Especially if you have added
+ debug printk()s.
+
+o Anything that prevents RCU's grace-period kthreads from running.
+ This can result in the "All QSes seen" console-log message.
+ This message will include information on when the kthread last
+ ran and how often it should be expected to run.
+
+o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
+ happen to preempt a low-priority task in the middle of an RCU
+ read-side critical section. This is especially damaging if
+ that low-priority task is not permitted to run on any other CPU,
+ in which case the next RCU grace period can never complete, which
+ will eventually cause the system to run out of memory and hang.
+ While the system is in the process of running itself out of
+ memory, you might see stall-warning messages.
+
+o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
+ is running at a higher priority than the RCU softirq threads.
+ This will prevent RCU callbacks from ever being invoked,
+ and in a CONFIG_PREEMPT_RCU kernel will further prevent
+ RCU grace periods from ever completing. Either way, the
+ system will eventually run out of memory and hang. In the
+ CONFIG_PREEMPT_RCU case, you might see stall-warning
+ messages.
+
+o A hardware or software issue shuts off the scheduler-clock
+ interrupt on a CPU that is not in dyntick-idle mode. This
+ problem really has happened, and seems to be most likely to
+ result in RCU CPU stall warnings for CONFIG_NO_HZ_COMMON=n kernels.
+
+o A bug in the RCU implementation.
+
+o A hardware failure. This is quite unlikely, but has occurred
+ at least once in real life. A CPU failed in a running system,
+ becoming unresponsive, but not causing an immediate crash.
+ This resulted in a series of RCU CPU stall warnings, eventually
+ leading the realization that the CPU had failed.
+
+The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
+warning. Note that SRCU does -not- have CPU stall warnings. Please note
+that RCU only detects CPU stalls when there is a grace period in progress.
+No grace period, no CPU stall warnings.
+
+To diagnose the cause of the stall, inspect the stack traces.
+The offending function will usually be near the top of the stack.
+If you have a series of stall warnings from a single extended stall,
+comparing the stack traces can often help determine where the stall
+is occurring, which will usually be in the function nearest the top of
+that portion of the stack which remains the same from trace to trace.
+If you can reliably trigger the stall, ftrace can be quite helpful.
+
+RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE
+and with RCU's event tracing. For information on RCU's event tracing,
+see include/trace/events/rcu.h.
+
+
+Fine-Tuning the RCU CPU Stall Detector
+
+The rcuupdate.rcu_cpu_stall_suppress module parameter disables RCU's
+CPU stall detector, which detects conditions that unduly delay RCU grace
+periods. This module parameter enables CPU stall detection by default,
+but may be overridden via boot-time parameter or at runtime via sysfs.
The stall detector's idea of what constitutes "unduly delayed" is
controlled by a set of kernel configuration variables and cpp macros:
@@ -56,6 +149,9 @@ rcupdate.rcu_task_stall_timeout
And continues with the output of sched_show_task() for each
task stalling the current RCU-tasks grace period.
+
+Interpreting RCU's CPU Stall-Detector "Splats"
+
For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
it will print a message similar to the following:
@@ -178,89 +274,3 @@ grace period is in flight.
It is entirely possible to see stall warnings from normal and from
expedited grace periods at about the same time from the same run.
-
-
-What Causes RCU CPU Stall Warnings?
-
-So your kernel printed an RCU CPU stall warning. The next question is
-"What caused it?" The following problems can result in RCU CPU stall
-warnings:
-
-o A CPU looping in an RCU read-side critical section.
-
-o A CPU looping with interrupts disabled. This condition can
- result in RCU-sched and RCU-bh stalls.
-
-o A CPU looping with preemption disabled. This condition can
- result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
- stalls.
-
-o A CPU looping with bottom halves disabled. This condition can
- result in RCU-sched and RCU-bh stalls.
-
-o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
- kernel without invoking schedule(). Note that cond_resched()
- does not necessarily prevent RCU CPU stall warnings. Therefore,
- if the looping in the kernel is really expected and desirable
- behavior, you might need to replace some of the cond_resched()
- calls with calls to cond_resched_rcu_qs().
-
-o Booting Linux using a console connection that is too slow to
- keep up with the boot-time console-message rate. For example,
- a 115Kbaud serial console can be -way- too slow to keep up
- with boot-time message rates, and will frequently result in
- RCU CPU stall warning messages. Especially if you have added
- debug printk()s.
-
-o Anything that prevents RCU's grace-period kthreads from running.
- This can result in the "All QSes seen" console-log message.
- This message will include information on when the kthread last
- ran and how often it should be expected to run.
-
-o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
- happen to preempt a low-priority task in the middle of an RCU
- read-side critical section. This is especially damaging if
- that low-priority task is not permitted to run on any other CPU,
- in which case the next RCU grace period can never complete, which
- will eventually cause the system to run out of memory and hang.
- While the system is in the process of running itself out of
- memory, you might see stall-warning messages.
-
-o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
- is running at a higher priority than the RCU softirq threads.
- This will prevent RCU callbacks from ever being invoked,
- and in a CONFIG_PREEMPT_RCU kernel will further prevent
- RCU grace periods from ever completing. Either way, the
- system will eventually run out of memory and hang. In the
- CONFIG_PREEMPT_RCU case, you might see stall-warning
- messages.
-
-o A hardware or software issue shuts off the scheduler-clock
- interrupt on a CPU that is not in dyntick-idle mode. This
- problem really has happened, and seems to be most likely to
- result in RCU CPU stall warnings for CONFIG_NO_HZ_COMMON=n kernels.
-
-o A bug in the RCU implementation.
-
-o A hardware failure. This is quite unlikely, but has occurred
- at least once in real life. A CPU failed in a running system,
- becoming unresponsive, but not causing an immediate crash.
- This resulted in a series of RCU CPU stall warnings, eventually
- leading the realization that the CPU had failed.
-
-The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
-warning. Note that SRCU does -not- have CPU stall warnings. Please note
-that RCU only detects CPU stalls when there is a grace period in progress.
-No grace period, no CPU stall warnings.
-
-To diagnose the cause of the stall, inspect the stack traces.
-The offending function will usually be near the top of the stack.
-If you have a series of stall warnings from a single extended stall,
-comparing the stack traces can often help determine where the stall
-is occurring, which will usually be in the function nearest the top of
-that portion of the stack which remains the same from trace to trace.
-If you can reliably trigger the stall, ftrace can be quite helpful.
-
-RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE
-and with RCU's event tracing. For information on RCU's event tracing,
-see include/trace/events/rcu.h.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 5cbd8b2395b8..8ed6c9f6133c 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -562,7 +562,9 @@ This section presents a "toy" RCU implementation that is based on
familiar locking primitives. Its overhead makes it a non-starter for
real-life use, as does its lack of scalability. It is also unsuitable
for realtime use, since it allows scheduling latency to "bleed" from
-one read-side critical section to another.
+one read-side critical section to another. It also assumes recursive
+reader-writer locks: If you try this with non-recursive locks, and
+you allow nested rcu_read_lock() calls, you can deadlock.
However, it is probably the easiest implementation to relate to, so is
a good starting point.
@@ -587,20 +589,21 @@ It is extremely simple:
write_unlock(&rcu_gp_mutex);
}
-[You can ignore rcu_assign_pointer() and rcu_dereference() without
-missing much. But here they are anyway. And whatever you do, don't
-forget about them when submitting patches making use of RCU!]
+[You can ignore rcu_assign_pointer() and rcu_dereference() without missing
+much. But here are simplified versions anyway. And whatever you do,
+don't forget about them when submitting patches making use of RCU!]
- #define rcu_assign_pointer(p, v) ({ \
- smp_wmb(); \
- (p) = (v); \
- })
+ #define rcu_assign_pointer(p, v) \
+ ({ \
+ smp_store_release(&(p), (v)); \
+ })
- #define rcu_dereference(p) ({ \
- typeof(p) _________p1 = p; \
- smp_read_barrier_depends(); \
- (_________p1); \
- })
+ #define rcu_dereference(p) \
+ ({ \
+ typeof(p) _________p1 = p; \
+ smp_read_barrier_depends(); \
+ (_________p1); \
+ })
The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
@@ -925,7 +928,8 @@ d. Do you need RCU grace periods to complete even in the face
e. Is your workload too update-intensive for normal use of
RCU, but inappropriate for other synchronization mechanisms?
- If so, consider SLAB_DESTROY_BY_RCU. But please be careful!
+ If so, consider SLAB_TYPESAFE_BY_RCU (which was originally
+ named SLAB_DESTROY_BY_RCU). But please be careful!
f. Do you need read-side critical sections that are respected
even though they are in the middle of the idle loop, during
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index e4c9e0e46b95..4e0654b56aef 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1578,6 +1578,15 @@
extended tables themselves, and also PASID support. With
this option set, extended tables will not be used even
on hardware which claims to support them.
+ tboot_noforce [Default Off]
+ Do not force the Intel IOMMU enabled under tboot.
+ By default, tboot will force Intel IOMMU on, which
+ could harm performance of some high-throughput
+ devices like 40GBit network cards, even if identity
+ mapping is enabled.
+ Note that using this option lowers the security
+ provided by tboot because it makes the system
+ vulnerable to DMA attacks.
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.
@@ -1644,6 +1653,12 @@
nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices.
+ iommu.passthrough=
+ [ARM64] Configure DMA to bypass the IOMMU by default.
+ Format: { "0" | "1" }
+ 0 - Use IOMMU translation for DMA.
+ 1 - Bypass the IOMMU for DMA.
+ unset - Use IOMMU translation for DMA.
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -3785,6 +3800,14 @@
spia_pedr=
spia_peddr=
+ srcutree.exp_holdoff [KNL]
+ Specifies how many nanoseconds must elapse
+ since the end of the last SRCU grace period for
+ a given srcu_struct until the next normal SRCU
+ grace period will be considered for automatic
+ expediting. Set to zero to disable automatic
+ expediting.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/Documentation/arm/stm32/stm32h743-overview.txt b/Documentation/arm/stm32/stm32h743-overview.txt
new file mode 100644
index 000000000000..3031cbae31a5
--- /dev/null
+++ b/Documentation/arm/stm32/stm32h743-overview.txt
@@ -0,0 +1,30 @@
+ STM32H743 Overview
+ ==================
+
+ Introduction
+ ------------
+ The STM32H743 is a Cortex-M7 MCU aimed at various applications.
+ It features:
+ - Cortex-M7 core running up to @400MHz
+ - 2MB internal flash, 1MBytes internal RAM
+ - FMC controller to connect SDRAM, NOR and NAND memories
+ - Dual mode QSPI
+ - SD/MMC/SDIO support
+ - Ethernet controller
+ - USB OTFG FS & HS controllers
+ - I2C, SPI, CAN busses support
+ - Several 16 & 32 bits general purpose timers
+ - Serial Audio interface
+ - LCD controller
+ - HDMI-CEC
+ - SPDIFRX
+ - DFSDM
+
+ Resources
+ ---------
+ Datasheet and reference manual are publicly available on ST website:
+ - http://www.st.com/en/microcontrollers/stm32h7x3.html?querycriteria=productId=LN2033
+
+ Document Author
+ ---------------
+ Alexandre Torgue <alexandre.torgue@st.com>
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index 61ca21ebef1a..d1c97f9f51cc 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -169,6 +169,18 @@ infrastructure:
as available on the CPU where it is fetched and is not a system
wide safe value.
+ 4) ID_AA64ISAR1_EL1 - Instruction set attribute register 1
+
+ x--------------------------------------------------x
+ | Name | bits | visible |
+ |--------------------------------------------------|
+ | LRCPC | [23-20] | y |
+ |--------------------------------------------------|
+ | FCMA | [19-16] | y |
+ |--------------------------------------------------|
+ | JSCVT | [15-12] | y |
+ x--------------------------------------------------x
+
Appendix I: Example
---------------------------
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index c246cd2730d9..bfd5b558477d 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -43,8 +43,11 @@ Board compatible values:
- "wetek,hub" (Meson gxbb)
- "wetek,play2" (Meson gxbb)
- "amlogic,p212" (Meson gxl s905x)
+ - "khadas,vim" (Meson gxl s905x)
+
- "amlogic,p230" (Meson gxl s905d)
- "amlogic,p231" (Meson gxl s905d)
+ - "hwacom,amazetv" (Meson gxl s905x)
- "amlogic,q200" (Meson gxm s912)
- "amlogic,q201" (Meson gxm s912)
- "nexbox,a95x" (Meson gxbb or Meson gxl s905x)
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 29737b9b616e..799af90dd75b 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -217,7 +217,8 @@ memory, bridge implementations, processor and other functionality not controlled
elsewhere.
required properties:
-- compatible: Should be "atmel,<chip>-sfr", "syscon".
+- compatible: Should be "atmel,<chip>-sfr", "syscon" or
+ "atmel,<chip>-sfrbu", "syscon"
<chip> can be "sama5d3", "sama5d4" or "sama5d2".
- reg: Should contain registers location and length
diff --git a/Documentation/devicetree/bindings/arm/cavium-thunder2.txt b/Documentation/devicetree/bindings/arm/cavium-thunder2.txt
new file mode 100644
index 000000000000..dc5dd65cbce7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cavium-thunder2.txt
@@ -0,0 +1,8 @@
+Cavium ThunderX2 CN99XX platform tree bindings
+----------------------------------------------
+
+Boards with Cavium ThunderX2 CN99XX SoC shall have the root property:
+ compatible = "cavium,thunderx2-cn9900", "brcm,vulcan-soc";
+
+These SoC uses the "cavium,thunder2" core which will be compatible
+with "brcm,vulcan".
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 698ad1f097fa..1030f5f50207 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -170,6 +170,7 @@ nodes to be present and contain the properties described below.
"brcm,brahma-b15"
"brcm,vulcan"
"cavium,thunder"
+ "cavium,thunder2"
"faraday,fa526"
"intel,sa110"
"intel,sa1100"
diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
new file mode 100644
index 000000000000..d38834c67dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
@@ -0,0 +1,31 @@
+OP-TEE Device Tree Bindings
+
+OP-TEE is a piece of software using hardware features to provide a Trusted
+Execution Environment. The security can be provided with ARM TrustZone, but
+also by virtualization or a separate chip.
+
+We're using "linaro" as the first part of the compatible property for
+the reference implementation maintained by Linaro.
+
+* OP-TEE based on ARM TrustZone required properties:
+
+- compatible : should contain "linaro,optee-tz"
+
+- method : The method of calling the OP-TEE Trusted OS. Permitted
+ values are:
+
+ "smc" : SMC #0, with the register assignments specified
+ in drivers/tee/optee/optee_smc.h
+
+ "hvc" : HVC #0, with the register assignments specified
+ in drivers/tee/optee/optee_smc.h
+
+
+
+Example:
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index c9c567ae227f..cdb9dd705754 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -179,6 +179,18 @@ LS1046A ARMv8 based RDB Board
Required root node properties:
- compatible = "fsl,ls1046a-rdb", "fsl,ls1046a";
+LS1088A SoC
+Required root node properties:
+ - compatible = "fsl,ls1088a";
+
+LS1088A ARMv8 based QDS Board
+Required root node properties:
+ - compatible = "fsl,ls1088a-qds", "fsl,ls1088a";
+
+LS1088A ARMv8 based RDB Board
+Required root node properties:
+ - compatible = "fsl,ls1088a-rdb", "fsl,ls1088a";
+
LS2080A SoC
Required root node properties:
- compatible = "fsl,ls2080a";
@@ -195,3 +207,14 @@ LS2080A ARMv8 based RDB Board
Required root node properties:
- compatible = "fsl,ls2080a-rdb", "fsl,ls2080a";
+LS2088A SoC
+Required root node properties:
+ - compatible = "fsl,ls2088a";
+
+LS2088A ARMv8 based QDS Board
+Required root node properties:
+ - compatible = "fsl,ls2088a-qds", "fsl,ls2088a";
+
+LS2088A ARMv8 based RDB Board
+Required root node properties:
+ - compatible = "fsl,ls2088a-rdb", "fsl,ls2088a";
diff --git a/Documentation/devicetree/bindings/arm/gemini.txt b/Documentation/devicetree/bindings/arm/gemini.txt
new file mode 100644
index 000000000000..0041eb031116
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/gemini.txt
@@ -0,0 +1,86 @@
+Cortina systems Gemini platforms
+
+The Gemini SoC is the project name for an ARMv4 FA525-based SoC originally
+produced by Storlink Semiconductor around 2005. The company was renamed
+later renamed Storm Semiconductor. The chip product name is Storlink SL3516.
+It was derived from earlier products from Storm named SL3316 (Centroid) and
+SL3512 (Bulverde).
+
+Storm Semiconductor was acquired by Cortina Systems in 2008 and the SoC was
+produced and used for NAS and similar usecases. In 2014 Cortina Systems was
+in turn acquired by Inphi, who seem to have discontinued this product family.
+
+Many of the IP blocks used in the SoC comes from Faraday Technology.
+
+Required properties (in root node):
+ compatible = "cortina,gemini";
+
+Required nodes:
+
+- soc: the SoC should be represented by a simple bus encompassing all the
+ onchip devices, this is referred to as the soc bus node.
+
+- syscon: the soc bus node must have a system controller node pointing to the
+ global control registers, with the compatible string
+ "cortina,gemini-syscon", "syscon";
+
+- timer: the soc bus node must have a timer node pointing to the SoC timer
+ block, with the compatible string "cortina,gemini-timer"
+ See: clocksource/cortina,gemini-timer.txt
+
+- interrupt-controller: the sob bus node must have an interrupt controller
+ node pointing to the SoC interrupt controller block, with the compatible
+ string "cortina,gemini-interrupt-controller"
+ See interrupt-controller/cortina,gemini-interrupt-controller.txt
+
+Example:
+
+/ {
+ model = "Foo Gemini Machine";
+ compatible = "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+ interrupt-parent = <&intcon>;
+
+ syscon: syscon@40000000 {
+ compatible = "cortina,gemini-syscon", "syscon";
+ reg = <0x40000000 0x1000>;
+ };
+
+ uart0: serial@42000000 {
+ compatible = "ns16550a";
+ reg = <0x42000000 0x100>;
+ clock-frequency = <48000000>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ };
+
+ timer@43000000 {
+ compatible = "cortina,gemini-timer";
+ reg = <0x43000000 0x1000>;
+ interrupt-parent = <&intcon>;
+ interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */
+ <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */
+ <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */
+ syscon = <&syscon>;
+ };
+
+ intcon: interrupt-controller@48000000 {
+ compatible = "cortina,gemini-interrupt-controller";
+ reg = <0x48000000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index f1c1e21a8110..2e732152064b 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -4,6 +4,14 @@ Hi3660 SoC
Required root node properties:
- compatible = "hisilicon,hi3660";
+Hi3798cv200 SoC
+Required root node properties:
+ - compatible = "hisilicon,hi3798cv200";
+
+Hi3798cv200 Poplar Board
+Required root node properties:
+ - compatible = "hisilicon,hi3798cv200-poplar", "hisilicon,hi3798cv200";
+
Hi4511 Board
Required root node properties:
- compatible = "hisilicon,hi3620-hi4511";
diff --git a/Documentation/devicetree/bindings/arm/i2se.txt b/Documentation/devicetree/bindings/arm/i2se.txt
new file mode 100644
index 000000000000..dbd54a3aa07d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/i2se.txt
@@ -0,0 +1,22 @@
+I2SE Device Tree Bindings
+-------------------------
+
+Duckbill Board
+Required root node properties:
+ - compatible = "i2se,duckbill", "fsl,imx28";
+
+Duckbill 2 Board
+Required root node properties:
+ - compatible = "i2se,duckbill-2", "fsl,imx28";
+
+Duckbill 2 485 Board
+Required root node properties:
+ - compatible = "i2se,duckbill-2-485", "i2se,duckbill-2", "fsl,imx28";
+
+Duckbill 2 EnOcean Board
+Required root node properties:
+ - compatible = "i2se,duckbill-2-enocean", "i2se,duckbill-2", "fsl,imx28";
+
+Duckbill 2 SPI Board
+Required root node properties:
+ - compatible = "i2se,duckbill-2-spi", "i2se,duckbill-2", "fsl,imx28";
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt
index 917199f17965..d9650c1788f4 100644
--- a/Documentation/devicetree/bindings/arm/l2c2x0.txt
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.txt
@@ -90,6 +90,9 @@ Optional properties:
- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable),
<1> (forcibly enable), property absent (OS specific behavior,
preferably retain firmware settings)
+- arm,early-bresp-disable : Disable the CA9 optimization Early BRESP (PL310)
+- arm,full-line-zero-disable : Disable the CA9 optimization Full line of zero
+ write (PL310)
Example:
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index cc4ace6397ab..c965d99e86c2 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -1,5 +1,8 @@
Rockchip platforms device tree bindings
---------------------------------------
+- Asus Tinker board
+ Required root node properties:
+ - compatible = "asus,rk3288-tinker", "rockchip,rk3288";
- Kylin RK3036 board:
Required root node properties:
@@ -56,6 +59,17 @@ Rockchip platforms device tree bindings
- compatible = "google,veyron-brain-rev0", "google,veyron-brain",
"google,veyron", "rockchip,rk3288";
+- Google Gru (dev-board):
+ Required root node properties:
+ - compatible = "google,gru-rev15", "google,gru-rev14",
+ "google,gru-rev13", "google,gru-rev12",
+ "google,gru-rev11", "google,gru-rev10",
+ "google,gru-rev9", "google,gru-rev8",
+ "google,gru-rev7", "google,gru-rev6",
+ "google,gru-rev5", "google,gru-rev4",
+ "google,gru-rev3", "google,gru-rev2",
+ "google,gru", "rockchip,rk3399";
+
- Google Jaq (Haier Chromebook 11 and more):
Required root node properties:
- compatible = "google,veyron-jaq-rev5", "google,veyron-jaq-rev4",
@@ -70,6 +84,15 @@ Rockchip platforms device tree bindings
"google,veyron-jerry-rev3", "google,veyron-jerry",
"google,veyron", "rockchip,rk3288";
+- Google Kevin (Samsung Chromebook Plus):
+ Required root node properties:
+ - compatible = "google,kevin-rev15", "google,kevin-rev14",
+ "google,kevin-rev13", "google,kevin-rev12",
+ "google,kevin-rev11", "google,kevin-rev10",
+ "google,kevin-rev9", "google,kevin-rev8",
+ "google,kevin-rev7", "google,kevin-rev6",
+ "google,kevin", "google,gru", "rockchip,rk3399";
+
- Google Mickey (Asus Chromebit CS10):
Required root node properties:
- compatible = "google,veyron-mickey-rev8", "google,veyron-mickey-rev7",
@@ -103,6 +126,10 @@ Rockchip platforms device tree bindings
Required root node properties:
- compatible = "mqmaker,miqi", "rockchip,rk3288";
+- Phytec phyCORE-RK3288: Rapid Development Kit
+ Required root node properties:
+ - compatible = "phytec,rk3288-pcm-947", "phytec,rk3288-phycore-som", "rockchip,rk3288";
+
- Rockchip PX3 Evaluation board:
Required root node properties:
- compatible = "rockchip,px3-evb", "rockchip,px3", "rockchip,rk3188";
@@ -134,6 +161,10 @@ Rockchip platforms device tree bindings
Required root node properties:
- compatible = "rockchip,rk3288-fennec", "rockchip,rk3288";
+- Rockchip RK3328 evb:
+ Required root node properties:
+ - compatible = "rockchip,rk3328-evb", "rockchip,rk3328";
+
- Rockchip RK3399 evb:
Required root node properties:
- compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index c9502634316d..170fe0562c63 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -13,8 +13,12 @@ SoCs:
compatible = "renesas,r8a73a4"
- R-Mobile A1 (R8A77400)
compatible = "renesas,r8a7740"
+ - RZ/G1H (R8A77420)
+ compatible = "renesas,r8a7742"
- RZ/G1M (R8A77430)
compatible = "renesas,r8a7743"
+ - RZ/G1N (R8A77440)
+ compatible = "renesas,r8a7744"
- RZ/G1E (R8A77450)
compatible = "renesas,r8a7745"
- R-Car M1A (R8A77781)
diff --git a/Documentation/devicetree/bindings/arm/sprd.txt b/Documentation/devicetree/bindings/arm/sprd.txt
index 31a629dc75b8..3df034b13e28 100644
--- a/Documentation/devicetree/bindings/arm/sprd.txt
+++ b/Documentation/devicetree/bindings/arm/sprd.txt
@@ -1,11 +1,14 @@
Spreadtrum SoC Platforms Device Tree Bindings
----------------------------------------------------
-Sharkl64 is a Spreadtrum's SoC Platform which is based
-on ARM 64-bit processor.
+SC9836 openphone Board
+Required root node properties:
+ - compatible = "sprd,sc9836-openphone", "sprd,sc9836";
-SC9836 openphone board with SC9836 SoC based on the
-Sharkl64 Platform shall have the following properties.
+SC9860 SoC
+Required root node properties:
+ - compatible = "sprd,sc9860"
+SP9860G 3GFHD Board
Required root node properties:
- - compatible = "sprd,sc9836-openphone", "sprd,sc9836";
+ - compatible = "sprd,sp9860g-1h10", "sprd,sc9860";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra186-pmc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra186-pmc.txt
new file mode 100644
index 000000000000..078a58b0302f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra186-pmc.txt
@@ -0,0 +1,34 @@
+NVIDIA Tegra Power Management Controller (PMC)
+
+Required properties:
+- compatible: Should contain one of the following:
+ - "nvidia,tegra186-pmc": for Tegra186
+- reg: Must contain an (offset, length) pair of the register set for each
+ entry in reg-names.
+- reg-names: Must include the following entries:
+ - "pmc"
+ - "wake"
+ - "aotag"
+ - "scratch"
+
+Optional properties:
+- nvidia,invert-interrupt: If present, inverts the PMU interrupt signal.
+
+Example:
+
+SoC DTSI:
+
+ pmc@c3600000 {
+ compatible = "nvidia,tegra186-pmc";
+ reg = <0 0x0c360000 0 0x10000>,
+ <0 0x0c370000 0 0x10000>,
+ <0 0x0c380000 0 0x10000>,
+ <0 0x0c390000 0 0x10000>;
+ reg-names = "pmc", "wake", "aotag", "scratch";
+ };
+
+Board DTS:
+
+ pmc@c360000 {
+ nvidia,invert-interrupt;
+ };
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-flowctrl.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-flowctrl.txt
index ccf0adddc820..a855c1bffc0f 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-flowctrl.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-flowctrl.txt
@@ -1,7 +1,13 @@
NVIDIA Tegra Flow Controller
Required properties:
-- compatible: Should be "nvidia,tegra<chip>-flowctrl"
+- compatible: Should contain one of the following:
+ - "nvidia,tegra20-flowctrl": for Tegra20
+ - "nvidia,tegra30-flowctrl": for Tegra30
+ - "nvidia,tegra114-flowctrl": for Tegra114
+ - "nvidia,tegra124-flowctrl": for Tegra124
+ - "nvidia,tegra132-flowctrl", "nvidia,tegra124-flowctrl": for Tegra132
+ - "nvidia,tegra210-flowctrl": for Tegra210
- reg: Should contain one register range (address and length)
Example:
diff --git a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
new file mode 100644
index 000000000000..2aa24b889923
--- /dev/null
+++ b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
@@ -0,0 +1,45 @@
+DT bindings for the Hitachi HD44780 Character LCD Controller
+
+The Hitachi HD44780 Character LCD Controller is commonly used on character LCDs
+that can display one or more lines of text. It exposes an M6800 bus interface,
+which can be used in either 4-bit or 8-bit mode.
+
+Required properties:
+ - compatible: Must contain "hit,hd44780",
+ - data-gpios: Must contain an array of either 4 or 8 GPIO specifiers,
+ referring to the GPIO pins connected to the data signal lines DB0-DB7
+ (8-bit mode) or DB4-DB7 (4-bit mode) of the LCD Controller's bus interface,
+ - enable-gpios: Must contain a GPIO specifier, referring to the GPIO pin
+ connected to the "E" (Enable) signal line of the LCD Controller's bus
+ interface,
+ - rs-gpios: Must contain a GPIO specifier, referring to the GPIO pin
+ connected to the "RS" (Register Select) signal line of the LCD Controller's
+ bus interface,
+ - display-height-chars: Height of the display, in character cells,
+ - display-width-chars: Width of the display, in character cells.
+
+Optional properties:
+ - rw-gpios: Must contain a GPIO specifier, referring to the GPIO pin
+ connected to the "RW" (Read/Write) signal line of the LCD Controller's bus
+ interface,
+ - backlight-gpios: Must contain a GPIO specifier, referring to the GPIO pin
+ used for enabling the LCD's backlight,
+ - internal-buffer-width: Internal buffer width (default is 40 for displays
+ with 1 or 2 lines, and display-width-chars for displays with more than 2
+ lines).
+
+Example:
+
+ auxdisplay {
+ compatible = "hit,hd44780";
+
+ data-gpios = <&hc595 0 GPIO_ACTIVE_HIGH>,
+ <&hc595 1 GPIO_ACTIVE_HIGH>,
+ <&hc595 2 GPIO_ACTIVE_HIGH>,
+ <&hc595 3 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
+ rs-gpios = <&hc595 5 GPIO_ACTIVE_HIGH>;
+
+ display-height-chars = <2>;
+ display-width-chars = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/chosen.txt b/Documentation/devicetree/bindings/chosen.txt
index 6ae9d82d4c37..b5e39af4ddc0 100644
--- a/Documentation/devicetree/bindings/chosen.txt
+++ b/Documentation/devicetree/bindings/chosen.txt
@@ -52,3 +52,48 @@ This property is set (currently only on PowerPC, and only needed on
book3e) by some versions of kexec-tools to tell the new kernel that it
is being booted by kexec, as the booting environment may differ (e.g.
a different secondary CPU release mechanism)
+
+linux,usable-memory-range
+-------------------------
+
+This property (arm64 only) holds a base address and size, describing a
+limited region in which memory may be considered available for use by
+the kernel. Memory outside of this range is not available for use.
+
+This property describes a limitation: memory within this range is only
+valid when also described through another mechanism that the kernel
+would otherwise use to determine available memory (e.g. memory nodes
+or the EFI memory map). Valid memory may be sparse within the range.
+e.g.
+
+/ {
+ chosen {
+ linux,usable-memory-range = <0x9 0xf0000000 0x0 0x10000000>;
+ };
+};
+
+The main usage is for crash dump kernel to identify its own usable
+memory and exclude, at its boot time, any other memory areas that are
+part of the panicked kernel's memory.
+
+While this property does not represent a real hardware, the address
+and the size are expressed in #address-cells and #size-cells,
+respectively, of the root node.
+
+linux,elfcorehdr
+----------------
+
+This property (currently used only on arm64) holds the memory range,
+the address and the size, of the elf core header which mainly describes
+the panicked kernel's memory layout as PT_LOAD segments of elf format.
+e.g.
+
+/ {
+ chosen {
+ linux,elfcorehdr = <0x9 0xfffff000 0x0 0x800>;
+ };
+};
+
+While this property does not represent a real hardware, the address
+and the size are expressed in #address-cells and #size-cells,
+respectively, of the root node.
diff --git a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
index ce06435d28ed..a09d627b5508 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,gxbb-clkc.txt
@@ -5,7 +5,8 @@ controllers within the SoC.
Required Properties:
-- compatible: should be "amlogic,gxbb-clkc"
+- compatible: should be "amlogic,gxbb-clkc" for GXBB SoC,
+ or "amlogic,gxl-clkc" for GXL and GXM SoC.
- reg: physical base address of the clock controller and length of memory
mapped region.
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
index eb985a633d59..796c260c183d 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -31,6 +31,12 @@ The following is a list of provided IDs and clock names on Armada 39x:
4 = dclk (SDRAM Interface Clock)
5 = refclk (Reference Clock)
+The following is a list of provided IDs and clock names on 98dx3236:
+ 0 = tclk (Internal Bus clock)
+ 1 = cpuclk (CPU clock)
+ 2 = ddrclk (DDR clock)
+ 3 = mpll (MPLL Clock)
+
The following is a list of provided IDs and clock names on Kirkwood and Dove:
0 = tclk (Internal Bus clock)
1 = cpuclk (CPU0 clock)
@@ -49,6 +55,7 @@ Required properties:
"marvell,armada-380-core-clock" - For Armada 380/385 SoC core clocks
"marvell,armada-390-core-clock" - For Armada 39x SoC core clocks
"marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
+ "marvell,mv98dx3236-core-clock" - For 98dx3236 family SoC core clocks
"marvell,dove-core-clock" - for Dove SoC core clocks
"marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
"marvell,mv88f6180-core-clock" - for Kirkwood MV88f6180 SoC
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
index 5142efc8099d..de562da2ae77 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -119,6 +119,16 @@ ID Clock Peripheral
29 sata1lnk
30 sata1 SATA Host 1
+The following is a list of provided IDs for 98dx3236:
+ID Clock Peripheral
+-----------------------------------
+3 ge1 Gigabit Ethernet 1
+4 ge0 Gigabit Ethernet 0
+5 pex0 PCIe Cntrl 0
+17 sdio SDHCI Host
+18 usb0 USB Host 0
+22 xor0 XOR DMA 0
+
The following is a list of provided IDs for Dove:
ID Clock Peripheral
-----------------------------------
@@ -169,6 +179,7 @@ Required properties:
"marvell,armada-380-gating-clock" - for Armada 380/385 SoC clock gating
"marvell,armada-390-gating-clock" - for Armada 39x SoC clock gating
"marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating
+ "marvell,mv98dx3236-gating-clock" - for 98dx3236 SoC clock gating
"marvell,dove-gating-clock" - for Dove SoC clock gating
"marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
- reg : shall be the register address of the Clock Gating Control register
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index aa3526f229a7..6ed469c66b32 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -35,6 +35,7 @@ Required properties:
* "fsl,ls1021a-clockgen"
* "fsl,ls1043a-clockgen"
* "fsl,ls1046a-clockgen"
+ * "fsl,ls1088a-clockgen"
* "fsl,ls2080a-clockgen"
Chassis-version clock strings include:
* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
diff --git a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
index d085ef90d27c..f8e946471a58 100644
--- a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
+++ b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -202,23 +202,23 @@ Example2 :
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
opp-microvolt = <800000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <800000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <800000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <825000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <875000>;
};
@@ -292,23 +292,23 @@ Example2 :
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
opp-microvolt = <900000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
opp-microvolt = <900000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <1000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <1000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <1000000>;
};
@@ -318,19 +318,19 @@ Example2 :
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
};
};
@@ -339,19 +339,19 @@ Example2 :
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
};
};
@@ -360,13 +360,13 @@ Example2 :
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
diff --git a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
index ebc1a914bda3..ec94468b35be 100644
--- a/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
+++ b/Documentation/devicetree/bindings/display/atmel/hlcdc-dc.txt
@@ -1,7 +1,7 @@
Device-Tree bindings for Atmel's HLCDC (High LCD Controller) DRM driver
The Atmel HLCDC Display Controller is subdevice of the HLCDC MFD device.
-See ../mfd/atmel-hlcdc.txt for more details.
+See ../../mfd/atmel-hlcdc.txt for more details.
Required properties:
- compatible: value should be "atmel,hlcdc-display-controller"
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b82c00449468..57a8d0610062 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -94,6 +94,7 @@ Required properties:
* allwinner,sun6i-a31-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
+ - interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the frontend and backend
* ahb: the backend interface clock
* mod: the backend module clock
@@ -265,6 +266,7 @@ fe0: display-frontend@1e00000 {
be0: display-backend@1e60000 {
compatible = "allwinner,sun5i-a13-display-backend";
reg = <0x01e60000 0x10000>;
+ interrupts = <47>;
clocks = <&ahb_gates 44>, <&de_be_clk>,
<&dram_gates 26>;
clock-names = "ahb", "mod",
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index 0fad7ed2ea19..74e1e8add5a1 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -249,6 +249,19 @@ of the following host1x client modules:
See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
regarding the DPAUX pad controller bindings.
+- vic: Video Image Compositor
+ - compatible : "nvidia,tegra<chip>-vic"
+ - reg: Physical base address and length of the controller's registers.
+ - interrupts: The interrupt outputs from the controller.
+ - clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: Must include the following entries:
+ - vic: clock input for the VIC hardware
+ - resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+ - reset-names: Must include the following entries:
+ - vic
+
Example:
/ {
diff --git a/Documentation/devicetree/bindings/firmware/coreboot.txt b/Documentation/devicetree/bindings/firmware/coreboot.txt
new file mode 100644
index 000000000000..4c955703cea8
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/coreboot.txt
@@ -0,0 +1,33 @@
+COREBOOT firmware information
+
+The device tree node to communicate the location of coreboot's memory-resident
+bookkeeping structures to the kernel. Since coreboot itself cannot boot a
+device-tree-based kernel (yet), this node needs to be inserted by a
+second-stage bootloader (a coreboot "payload").
+
+Required properties:
+ - compatible: Should be "coreboot"
+ - reg: Address and length of the following two memory regions, in order:
+ 1.) The coreboot table. This is a list of variable-sized descriptors
+ that contain various compile- and run-time generated firmware
+ parameters. It is identified by the magic string "LBIO" in its first
+ four bytes.
+ See coreboot's src/commonlib/include/commonlib/coreboot_tables.h for
+ details.
+ 2.) The CBMEM area. This is a downward-growing memory region used by
+ coreboot to dynamically allocate data structures that remain resident.
+ It may or may not include the coreboot table as one of its members. It
+ is identified by a root node descriptor with the magic number
+ 0xc0389481 that resides in the topmost 8 bytes of the area.
+ See coreboot's src/include/imd.h for details.
+
+Example:
+ firmware {
+ ranges;
+
+ coreboot {
+ compatible = "coreboot";
+ reg = <0xfdfea000 0x264>,
+ <0xfdfea000 0x16000>;
+ }
+ };
diff --git a/Documentation/devicetree/bindings/fpga/altera-pr-ip.txt b/Documentation/devicetree/bindings/fpga/altera-pr-ip.txt
new file mode 100644
index 000000000000..52a294cf2730
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-pr-ip.txt
@@ -0,0 +1,12 @@
+Altera Arria10 Partial Reconfiguration IP
+
+Required properties:
+- compatible : should contain "altr,a10-pr-ip"
+- reg : base address and size for memory mapped io.
+
+Example:
+
+ fpga_mgr: fpga-mgr@ff20c000 {
+ compatible = "altr,a10-pr-ip";
+ reg = <0xff20c000 0x10>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index 3b32ba15a717..6db8aeda461a 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -186,12 +186,15 @@ Optional properties:
otherwise full reconfiguration is done.
- external-fpga-config : boolean, set if the FPGA has already been configured
prior to OS boot up.
+- encrypted-fpga-config : boolean, set if the bitstream is encrypted
- region-unfreeze-timeout-us : The maximum time in microseconds to wait for
bridges to successfully become enabled after the region has been
programmed.
- region-freeze-timeout-us : The maximum time in microseconds to wait for
bridges to successfully become disabled before the region has been
programmed.
+- config-complete-timeout-us : The maximum time in microseconds time for the
+ FPGA to go to operating mode after the region has been programmed.
- child nodes : devices in the FPGA after programming.
In the example below, when an overlay is applied targeting fpga-region0,
diff --git a/Documentation/devicetree/bindings/fpga/lattice-ice40-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/lattice-ice40-fpga-mgr.txt
new file mode 100644
index 000000000000..4dc412437b08
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/lattice-ice40-fpga-mgr.txt
@@ -0,0 +1,21 @@
+Lattice iCE40 FPGA Manager
+
+Required properties:
+- compatible: Should contain "lattice,ice40-fpga-mgr"
+- reg: SPI chip select
+- spi-max-frequency: Maximum SPI frequency (>=1000000, <=25000000)
+- cdone-gpios: GPIO input connected to CDONE pin
+- reset-gpios: Active-low GPIO output connected to CRESET_B pin. Note
+ that unless the GPIO is held low during startup, the
+ FPGA will enter Master SPI mode and drive SCK with a
+ clock signal potentially jamming other devices on the
+ bus until the firmware is loaded.
+
+Example:
+ fpga: fpga@0 {
+ compatible = "lattice,ice40-fpga-mgr";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ cdone-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 22 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt b/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt
new file mode 100644
index 000000000000..9766f7472f51
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xilinx-slave-serial.txt
@@ -0,0 +1,44 @@
+Xilinx Slave Serial SPI FPGA Manager
+
+Xilinx Spartan-6 FPGAs support a method of loading the bitstream over
+what is referred to as "slave serial" interface.
+The slave serial link is not technically SPI, and might require extra
+circuits in order to play nicely with other SPI slaves on the same bus.
+
+See https://www.xilinx.com/support/documentation/user_guides/ug380.pdf
+
+Required properties:
+- compatible: should contain "xlnx,fpga-slave-serial"
+- reg: spi chip select of the FPGA
+- prog_b-gpios: config pin (referred to as PROGRAM_B in the manual)
+- done-gpios: config status pin (referred to as DONE in the manual)
+
+Example for full FPGA configuration:
+
+ fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&fpga_mgr_spi>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ };
+
+ spi1: spi@10680 {
+ compatible = "marvell,armada-xp-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi0_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ interrupts = <92>;
+ clocks = <&coreclk 0>;
+ status = "okay";
+
+ fpga_mgr_spi: fpga-mgr@0 {
+ compatible = "xlnx,fpga-slave-serial";
+ spi-max-frequency = <60000000>;
+ spi-cpha;
+ reg = <0>;
+ done-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+ prog_b-gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/cortina,gemini-gpio.txt b/Documentation/devicetree/bindings/gpio/cortina,gemini-gpio.txt
deleted file mode 100644
index 5c9246c054e5..000000000000
--- a/Documentation/devicetree/bindings/gpio/cortina,gemini-gpio.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Cortina Systems Gemini GPIO Controller
-
-Required properties:
-
-- compatible : Must be "cortina,gemini-gpio"
-- reg : Should contain registers location and length
-- interrupts : Should contain the interrupt line for the GPIO block
-- gpio-controller : marks this as a GPIO controller
-- #gpio-cells : Should be 2, see gpio/gpio.txt
-- interrupt-controller : marks this as an interrupt controller
-- #interrupt-cells : a standard two-cell interrupt flag, see
- interrupt-controller/interrupts.txt
-
-Example:
-
-gpio@4d000000 {
- compatible = "cortina,gemini-gpio";
- reg = <0x4d000000 0x100>;
- interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
diff --git a/Documentation/devicetree/bindings/gpio/faraday,ftgpio010.txt b/Documentation/devicetree/bindings/gpio/faraday,ftgpio010.txt
new file mode 100644
index 000000000000..d04236558619
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/faraday,ftgpio010.txt
@@ -0,0 +1,27 @@
+Faraday Technology FTGPIO010 GPIO Controller
+
+Required properties:
+
+- compatible : Should be one of
+ "cortina,gemini-gpio", "faraday,ftgpio010"
+ "moxa,moxart-gpio", "faraday,ftgpio010"
+ "faraday,ftgpio010"
+- reg : Should contain registers location and length
+- interrupts : Should contain the interrupt line for the GPIO block
+- gpio-controller : marks this as a GPIO controller
+- #gpio-cells : Should be 2, see gpio/gpio.txt
+- interrupt-controller : marks this as an interrupt controller
+- #interrupt-cells : a standard two-cell interrupt flag, see
+ interrupt-controller/interrupts.txt
+
+Example:
+
+gpio@4d000000 {
+ compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
+ reg = <0x4d000000 0x100>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
index 393bb2ed8a77..c756afa88cc6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
@@ -17,7 +17,8 @@ Required properties:
Optional properties:
-- interrupt-parent : The parent interrupt controller, optional if inherited
+- interrupt-parent : The parent interrupt controller, optional if inherited
+- clocks : A phandle to the HPLL clock node for debounce timings
The gpio and interrupt properties are further described in their respective
bindings documentation:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index a6f3bec1da7d..42c3bb2d53e8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -38,6 +38,24 @@ Required properties:
- #gpio-cells: Should be two. The first cell is the pin number. The
second cell is reserved for flags, unused at the moment.
+Optional properties:
+
+In order to use the GPIO lines in PWM mode, some additional optional
+properties are required. Only Armada 370 and XP support these properties.
+
+- compatible: Must contain "marvell,armada-370-xp-gpio"
+
+- reg: an additional register set is needed, for the GPIO Blink
+ Counter on/off registers.
+
+- reg-names: Must contain an entry "pwm" corresponding to the
+ additional register range needed for PWM operation.
+
+- #pwm-cells: Should be two. The first cell is the GPIO line number. The
+ second cell is the period in nanoseconds.
+
+- clocks: Must be a phandle to the clock for the GPIO controller.
+
Example:
gpio0: gpio@d0018100 {
@@ -51,3 +69,17 @@ Example:
#interrupt-cells = <2>;
interrupts = <16>, <17>, <18>, <19>;
};
+
+ gpio1: gpio@18140 {
+ compatible = "marvell,armada-370-xp-gpio";
+ reg = <0x18140 0x40>, <0x181c8 0x08>;
+ reg-names = "gpio", "pwm";
+ ngpios = <17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #pwm-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <87>, <88>, <89>;
+ clocks = <&coreclk 0>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index e63935710011..7f57271df2bc 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -26,6 +26,7 @@ Required properties:
ti,tca6416
ti,tca6424
ti,tca9539
+ ti,tca9554
onsemi,pca9654
exar,xra1202
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
index ada4e2973323..7d3bd631d011 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pcf857x.txt
@@ -25,7 +25,6 @@ Required Properties:
- "nxp,pcf8574": For the NXP PCF8574
- "nxp,pcf8574a": For the NXP PCF8574A
- "nxp,pcf8575": For the NXP PCF8575
- - "ti,tca9554": For the TI TCA9554
- reg: I2C slave address.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-thunderx.txt b/Documentation/devicetree/bindings/gpio/gpio-thunderx.txt
new file mode 100644
index 000000000000..3f883ae29d11
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-thunderx.txt
@@ -0,0 +1,27 @@
+Cavium ThunderX/OCTEON-TX GPIO controller bindings
+
+Required Properties:
+- reg: The controller bus address.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Must be 2.
+ - First cell is the GPIO pin number relative to the controller.
+ - Second cell is a standard generic flag bitfield as described in gpio.txt.
+
+Optional Properties:
+- compatible: "cavium,thunder-8890-gpio", unused as PCI driver binding is used.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Must be present and have value of 2 if
+ "interrupt-controller" is present.
+ - First cell is the GPIO pin number relative to the controller.
+ - Second cell is triggering flags as defined in interrupts.txt.
+
+Example:
+
+gpio_6_0: gpio@6,0 {
+ compatible = "cavium,thunder-8890-gpio";
+ reg = <0x3000 0 0 0 0>; /* DEVFN = 0x30 (6:0) */
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-xra1403.txt b/Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
new file mode 100644
index 000000000000..e13cc399b363
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
@@ -0,0 +1,46 @@
+GPIO Driver for XRA1403 16-BIT GPIO Expander With Reset Input from EXAR
+
+The XRA1403 is an 16-bit GPIO expander with an SPI interface. Features available:
+ - Individually programmable inputs:
+ - Internal pull-up resistors
+ - Polarity inversion
+ - Individual interrupt enable
+ - Rising edge and/or Falling edge interrupt
+ - Input filter
+ - Individually programmable outputs
+ - Output Level Control
+ - Output Three-State Control
+
+Properties
+----------
+Check documentation for SPI and GPIO controllers regarding properties needed to configure the node.
+
+ - compatible = "exar,xra1403".
+ - reg - SPI id of the device.
+ - gpio-controller - marks the node as gpio.
+ - #gpio-cells - should be two where the first cell is the pin number
+ and the second one is used for optional parameters.
+
+Optional properties:
+-------------------
+ - reset-gpios: in case available used to control the device reset line.
+ - interrupt-controller - marks the node as interrupt controller.
+ - #interrupt-cells - should be two and represents the number of cells
+ needed to encode interrupt source.
+
+Example
+--------
+
+ gpioxra0: gpio@2 {
+ compatible = "exar,xra1403";
+ reg = <2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
+ spi-max-frequency = <1000000>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/moxa,moxart-gpio.txt b/Documentation/devicetree/bindings/gpio/moxa,moxart-gpio.txt
deleted file mode 100644
index f8e8f185a3db..000000000000
--- a/Documentation/devicetree/bindings/gpio/moxa,moxart-gpio.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-MOXA ART GPIO Controller
-
-Required properties:
-
-- #gpio-cells : Should be 2, The first cell is the pin number,
- the second cell is used to specify polarity:
- 0 = active high
- 1 = active low
-- compatible : Must be "moxa,moxart-gpio"
-- reg : Should contain registers location and length
-
-Example:
-
- gpio: gpio@98700000 {
- gpio-controller;
- #gpio-cells = <2>;
- compatible = "moxa,moxart-gpio";
- reg = <0x98700000 0xC>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/ni,169445-nand-gpio.txt b/Documentation/devicetree/bindings/gpio/ni,169445-nand-gpio.txt
new file mode 100644
index 000000000000..ca2f8c745a27
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/ni,169445-nand-gpio.txt
@@ -0,0 +1,38 @@
+Bindings for the National Instruments 169445 GPIO NAND controller
+
+The 169445 GPIO NAND controller has two memory mapped GPIO registers, one
+for input (the ready signal) and one for output (control signals). It is
+intended to be used with the GPIO NAND driver.
+
+Required properties:
+ - compatible: should be "ni,169445-nand-gpio"
+ - reg-names: must contain
+ "dat" - data register
+ - reg: address + size pairs describing the GPIO register sets;
+ order must correspond with the order of entries in reg-names
+ - #gpio-cells: must be set to 2. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
+ - gpio-controller: Marks the device node as a gpio controller.
+
+Optional properties:
+ - no-output: disables driving output on the pins
+
+Examples:
+ gpio1: nand-gpio-out@1f300010 {
+ compatible = "ni,169445-nand-gpio";
+ reg = <0x1f300010 0x4>;
+ reg-names = "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ gpio2: nand-gpio-in@1f300014 {
+ compatible = "ni,169445-nand-gpio";
+ reg = <0x1f300014 0x4>;
+ reg-names = "dat";
+ gpio-controller;
+ #gpio-cells = <2>;
+ no-output;
+ };
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
index 476f5ea6c627..2b6243e730f6 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt
@@ -35,6 +35,14 @@ Optional properties:
- interrupt-names and interrupts:
* pmu: Power Management Unit interrupt, if implemented in hardware
+ - memory-region:
+ Memory region to allocate from, as defined in
+ Documentation/devicetree/bindi/reserved-memory/reserved-memory.txt
+
+ - operating-points-v2:
+ Operating Points for the GPU, as defined in
+ Documentation/devicetree/bindings/opp/opp.txt
+
Vendor-specific bindings
------------------------
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
index ff3db65e50de..b7e4c7444510 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,gk20a.txt
@@ -5,6 +5,7 @@ Required properties:
Currently recognized values:
- nvidia,gk20a
- nvidia,gm20b
+ - nvidia,gp10b
- reg: Physical base address and length of the controller's registers.
Must contain two entries:
- first entry for bar0
@@ -14,7 +15,8 @@ Required properties:
- interrupt-names: Must include the following entries:
- stall
- nonstall
-- vdd-supply: regulator for supply voltage.
+- vdd-supply: regulator for supply voltage. Only required for GPUs not using
+ power domains.
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
@@ -27,6 +29,8 @@ is also required:
See ../reset/reset.txt for details.
- reset-names: Must include the following entries:
- gpu
+- power-domains: GPUs that make use of power domains can define this property
+ instead of vdd-supply. Currently "nvidia,gp10b" makes use of this.
Optional properties:
- iommus: A reference to the IOMMU. See ../iommu/iommu.txt for details.
@@ -68,3 +72,22 @@ Example for GM20B:
iommus = <&mc TEGRA_SWGROUP_GPU>;
status = "disabled";
};
+
+Example for GP10B:
+
+ gpu@17000000 {
+ compatible = "nvidia,gp10b";
+ reg = <0x0 0x17000000 0x0 0x1000000>,
+ <0x0 0x18000000 0x0 0x1000000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "stall", "nonstall";
+ clocks = <&bpmp TEGRA186_CLK_GPCCLK>,
+ <&bpmp TEGRA186_CLK_GPU>;
+ clock-names = "gpu", "pwr";
+ resets = <&bpmp TEGRA186_RESET_GPU>;
+ reset-names = "gpu";
+ power-domains = <&bpmp TEGRA186_POWER_DOMAIN_GPU>;
+ iommus = <&smmu TEGRA186_SID_GPU>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/adxl345.txt b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
new file mode 100644
index 000000000000..e7111b02c02c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/adxl345.txt
@@ -0,0 +1,38 @@
+Analog Devices ADXL345 3-Axis, +/-(2g/4g/8g/16g) Digital Accelerometer
+
+http://www.analog.com/en/products/mems/accelerometers/adxl345.html
+
+Required properties:
+ - compatible : should be "adi,adxl345"
+ - reg : the I2C address or SPI chip select number of the sensor
+
+Required properties for SPI bus usage:
+ - spi-max-frequency : set maximum clock frequency, must be 5000000
+ - spi-cpol and spi-cpha : must be defined for adxl345 to enable SPI mode 3
+
+Optional properties:
+ - interrupt-parent : phandle to the parent interrupt controller as documented
+ in Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ - interrupts: interrupt mapping for IRQ as documented in
+ Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+
+Example for a I2C device node:
+
+ accelerometer@2a {
+ compatible = "adi,adxl345";
+ reg = <0x53>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+Example for a SPI device node:
+
+ accelerometer@0 {
+ compatible = "adi,adxl345";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ spi-cpol;
+ spi-cpha;
+ interrupt-parent = <&gpio1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index f9e3ff2c656e..047189192aec 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -7,6 +7,7 @@ Required properties:
- "amlogic,meson-gxm-saradc" for GXM
along with the generic "amlogic,meson-saradc"
- reg: the physical base address and length of the registers
+- interrupts: the interrupt indicating end of sampling
- clocks: phandle and clock identifier (see clock-names)
- clock-names: mandatory clocks:
- "clkin" for the reference clock (typically XTAL)
@@ -23,6 +24,7 @@ Example:
compatible = "amlogic,meson-gxl-saradc", "amlogic,meson-saradc";
#io-channel-cells = <1>;
reg = <0x0 0x8680 0x0 0x34>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>;
clocks = <&xtal>,
<&clkc CLKID_SAR_ADC>,
<&clkc CLKID_SANA>,
diff --git a/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt b/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
new file mode 100644
index 000000000000..674e133b7cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/aspeed_adc.txt
@@ -0,0 +1,20 @@
+Aspeed ADC
+
+This device is a 10-bit converter for 16 voltage channels. All inputs are
+single ended.
+
+Required properties:
+- compatible: Should be "aspeed,ast2400-adc" or "aspeed,ast2500-adc"
+- reg: memory window mapping address and length
+- clocks: Input clock used to derive the sample clock. Expected to be the
+ SoC's APB clock.
+- #io-channel-cells: Must be set to <1> to indicate channels are selected
+ by index.
+
+Example:
+ adc@1e6e9000 {
+ compatible = "aspeed,ast2400-adc";
+ reg = <0x1e6e9000 0xb0>;
+ clocks = <&clk_apb>;
+ #io-channel-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
new file mode 100644
index 000000000000..487ea966858e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/cpcap-adc.txt
@@ -0,0 +1,18 @@
+Motorola CPCAP PMIC ADC binding
+
+Required properties:
+- compatible: Should be "motorola,cpcap-adc" or "motorola,mapphone-cpcap-adc"
+- interrupt-parent: The interrupt controller
+- interrupts: The interrupt number for the ADC device
+- interrupt-names: Should be "adcdone"
+- #io-channel-cells: Number of cells in an IIO specifier
+
+Example:
+
+cpcap_adc: adc {
+ compatible = "motorola,mapphone-cpcap-adc";
+ interrupt-parent = <&cpcap>;
+ interrupts = <8 IRQ_TYPE_NONE>;
+ interrupt-names = "adcdone";
+ #io-channel-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/ltc2497.txt b/Documentation/devicetree/bindings/iio/adc/ltc2497.txt
new file mode 100644
index 000000000000..a237ed99c0d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ltc2497.txt
@@ -0,0 +1,13 @@
+* Linear Technology / Analog Devices LTC2497 ADC
+
+Required properties:
+ - compatible: Must be "lltc,ltc2497"
+ - reg: Must contain the ADC I2C address
+ - vref-supply: The regulator supply for ADC reference voltage
+
+Example:
+ ltc2497: adc@76 {
+ compatible = "lltc,ltc2497";
+ reg = <0x76>;
+ vref-supply = <&ltc2497_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/max1118.txt b/Documentation/devicetree/bindings/iio/adc/max1118.txt
new file mode 100644
index 000000000000..cf33d0b15a6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/max1118.txt
@@ -0,0 +1,21 @@
+* MAX1117/MAX1118/MAX1119 8-bit, dual-channel ADCs
+
+Required properties:
+ - compatible: Should be one of
+ * "maxim,max1117"
+ * "maxim,max1118"
+ * "maxim,max1119"
+ - reg: spi chip select number for the device
+ - (max1118 only) vref-supply: The regulator supply for ADC reference voltage
+
+Recommended properties:
+ - spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+adc@0 {
+ compatible = "maxim,max1118";
+ reg = <0>;
+ vref-supply = <&vdd_supply>;
+ spi-max-frequency = <1000000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/max9611.txt b/Documentation/devicetree/bindings/iio/adc/max9611.txt
new file mode 100644
index 000000000000..ab4f43145ae5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/max9611.txt
@@ -0,0 +1,27 @@
+* Maxim max9611/max9612 current sense amplifier with 12-bits ADC interface
+
+Maxim max9611/max9612 is an high-side current sense amplifier with integrated
+12-bits ADC communicating over I2c bus.
+The device node for this driver shall be a child of a I2c controller.
+
+Required properties
+ - compatible: Should be "maxim,max9611" or "maxim,max9612"
+ - reg: The 7-bits long I2c address of the device
+ - shunt-resistor-micro-ohms: Value, in micro Ohms, of the current sense shunt
+ resistor
+
+Example:
+
+&i2c4 {
+ csa: adc@7c {
+ compatible = "maxim,max9611";
+ reg = <0x7c>;
+
+ shunt-resistor-micro-ohms = <5000>;
+ };
+};
+
+This device node describes a current sense amplifier sitting on I2c4 bus
+with address 0x7c (read address is 0xf9, write address is 0xf8).
+A sense resistor of 0,005 Ohm is installed between RS+ and RS- current-sensing
+inputs.
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,pm8xxx-xoadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom,pm8xxx-xoadc.txt
index 53cd146d8096..3ae06127789e 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom,pm8xxx-xoadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,pm8xxx-xoadc.txt
@@ -19,32 +19,42 @@ Required properties:
with PMIC variant but is typically something like 2.2 or 1.8V.
The following required properties are standard for IO channels, see
-iio-bindings.txt for more details:
+iio-bindings.txt for more details, but notice that this particular
+ADC has a special addressing scheme that require two cells for
+identifying each ADC channel:
-- #address-cells: should be set to <1>
+- #address-cells: should be set to <2>, the first cell is the
+ prescaler (on PM8058) or premux (on PM8921) with two valid bits
+ so legal values are 0x00, 0x01 or 0x02. The second cell
+ is the main analog mux setting (0x00..0x0f). The combination
+ of prescaler/premux and analog mux uniquely addresses a hardware
+ channel on all systems.
- #size-cells: should be set to <0>
-- #io-channel-cells: should be set to <1>
+- #io-channel-cells: should be set to <2>, again the cells are
+ precaler or premux followed by the analog muxing line.
- interrupts: should refer to the parent PMIC interrupt controller
and reference the proper ADC interrupt.
Required subnodes:
-The ADC channels are configured as subnodes of the ADC. Since some of
-them are used for calibrating the ADC, these nodes are compulsory:
+The ADC channels are configured as subnodes of the ADC.
+
+Since some of them are used for calibrating the ADC, these nodes are
+compulsory:
adc-channel@c {
- reg = <0x0c>;
+ reg = <0x00 0x0c>;
};
adc-channel@d {
- reg = <0x0d>;
+ reg = <0x00 0x0d>;
};
adc-channel@f {
- reg = <0x0f>;
+ reg = <0x00 0x0f>;
};
These three nodes are used for absolute and ratiometric calibration
@@ -52,13 +62,13 @@ and only need to have these reg values: they are by hardware definition
1:1 ratio converters that sample 625, 1250 and 0 milliV and create
an interpolation calibration for all other ADCs.
-Optional subnodes: any channels other than channel 0x0c, 0x0d and
-0x0f are optional.
+Optional subnodes: any channels other than channels [0x00 0x0c],
+[0x00 0x0d] and [0x00 0x0f] are optional.
Required channel node properties:
- reg: should contain the hardware channel number in the range
- 0 .. 0x0f (4 bits). The hardware only supports 16 channels.
+ 0 .. 0xff (8 bits).
Optional channel node properties:
@@ -94,56 +104,54 @@ Example:
xoadc: xoadc@197 {
compatible = "qcom,pm8058-adc";
reg = <0x197>;
- interrupt-parent = <&pm8058>;
- interrupts = <76 1>;
- #address-cells = <1>;
+ interrupts-extended = <&pm8058 76 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <2>;
#size-cells = <0>;
- #io-channel-cells = <1>;
+ #io-channel-cells = <2>;
vcoin: adc-channel@0 {
- reg = <0x00>;
+ reg = <0x00 0x00>;
};
vbat: adc-channel@1 {
- reg = <0x01>;
+ reg = <0x00 0x01>;
};
dcin: adc-channel@2 {
- reg = <0x02>;
+ reg = <0x00 0x02>;
};
ichg: adc-channel@3 {
- reg = <0x03>;
+ reg = <0x00 0x03>;
};
vph_pwr: adc-channel@4 {
- reg = <0x04>;
+ reg = <0x00 0x04>;
};
usb_vbus: adc-channel@a {
- reg = <0x0a>;
+ reg = <0x00 0x0a>;
};
die_temp: adc-channel@b {
- reg = <0x0b>;
+ reg = <0x00 0x0b>;
};
ref_625mv: adc-channel@c {
- reg = <0x0c>;
+ reg = <0x00 0x0c>;
};
ref_1250mv: adc-channel@d {
- reg = <0x0d>;
+ reg = <0x00 0x0d>;
};
ref_325mv: adc-channel@e {
- reg = <0x0e>;
+ reg = <0x00 0x0e>;
};
ref_muxoff: adc-channel@f {
- reg = <0x0f>;
+ reg = <0x00 0x0f>;
};
};
-
/* IIO client node */
iio-hwmon {
compatible = "iio-hwmon";
- io-channels = <&xoadc 0x01>, /* Battery */
- <&xoadc 0x02>, /* DC in (charger) */
- <&xoadc 0x04>, /* VPH the main system voltage */
- <&xoadc 0x0b>, /* Die temperature */
- <&xoadc 0x0c>, /* Reference voltage 1.25V */
- <&xoadc 0x0d>, /* Reference voltage 0.625V */
- <&xoadc 0x0e>; /* Reference voltage 0.325V */
+ io-channels = <&xoadc 0x00 0x01>, /* Battery */
+ <&xoadc 0x00 0x02>, /* DC in (charger) */
+ <&xoadc 0x00 0x04>, /* VPH the main system voltage */
+ <&xoadc 0x00 0x0b>, /* Die temperature */
+ <&xoadc 0x00 0x0c>, /* Reference voltage 1.25V */
+ <&xoadc 0x00 0x0d>, /* Reference voltage 0.625V */
+ <&xoadc 0x00 0x0e>; /* Reference voltage 0.325V */
};
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index 205593f56fe7..e0a9b9d6d6fd 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: should be "rockchip,<name>-saradc" or "rockchip,rk3066-tsadc"
- "rockchip,saradc": for rk3188, rk3288
- "rockchip,rk3066-tsadc": for rk3036
+ - "rockchip,rk3328-saradc", "rockchip,rk3399-saradc": for rk3328
- "rockchip,rk3399-saradc": for rk3399
- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index 5dfc88ec24a4..e35f9f1b3200 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -57,6 +57,9 @@ Optional properties:
- dmas: Phandle to dma channel for this ADC instance.
See ../../dma/dma.txt for details.
- dma-names: Must be "rx" when dmas property is being used.
+- assigned-resolution-bits: Resolution (bits) to use for conversions. Must
+ match device available resolutions (e.g. can be 6, 8, 10 or 12 on stm32f4).
+ Default is maximum resolution if unset.
Example:
adc: adc@40012000 {
@@ -84,6 +87,7 @@ Example:
st,adc-channels = <8>;
dmas = <&dma2 0 0 0x400 0x0>;
dma-names = "rx";
+ assigned-resolution-bits = <8>;
};
...
other adc child nodes follow...
diff --git a/Documentation/devicetree/bindings/iio/dac/ltc2632.txt b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
new file mode 100644
index 000000000000..eb911e5a8ab4
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
@@ -0,0 +1,23 @@
+Linear Technology LTC2632 DAC device driver
+
+Required properties:
+ - compatible: Has to contain one of the following:
+ lltc,ltc2632-l12
+ lltc,ltc2632-l10
+ lltc,ltc2632-l8
+ lltc,ltc2632-h12
+ lltc,ltc2632-h10
+ lltc,ltc2632-h8
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply. In particular, "reg" and "spi-max-frequency" properties must be given.
+
+Example:
+
+ spi_master {
+ dac: ltc2632@0 {
+ compatible = "lltc,ltc2632-l12";
+ reg = <0>; /* CS0 */
+ spi-max-frequency = <1000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
new file mode 100644
index 000000000000..bcee71f808d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
@@ -0,0 +1,61 @@
+STMicroelectronics STM32 DAC
+
+The STM32 DAC is a 12-bit voltage output digital-to-analog converter. The DAC
+may be configured in 8 or 12-bit mode. It has two output channels, each with
+its own converter.
+It has built-in noise and triangle waveform generator and supports external
+triggers for conversions. The DAC's output buffer allows a high drive output
+current.
+
+Contents of a stm32 dac root node:
+-----------------------------------
+Required properties:
+- compatible: Must be "st,stm32h7-dac-core".
+- reg: Offset and length of the device's register set.
+- clocks: Must contain an entry for pclk (which feeds the peripheral bus
+ interface)
+- clock-names: Must be "pclk".
+- vref-supply: Phandle to the vref+ input analog reference supply.
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- resets: Must contain the phandle to the reset controller.
+- A pinctrl state named "default" for each DAC channel may be defined to set
+ DAC_OUTx pin in mode of operation for analog output on external pin.
+
+Contents of a stm32 dac child node:
+-----------------------------------
+DAC core node should contain at least one subnode, representing a
+DAC instance/channel available on the machine.
+
+Required properties:
+- compatible: Must be "st,stm32-dac".
+- reg: Must be either 1 or 2, to define (single) channel in use
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
+ Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+Example:
+ dac: dac@40007400 {
+ compatible = "st,stm32h7-dac-core";
+ reg = <0x40007400 0x400>;
+ clocks = <&clk>;
+ clock-names = "pclk";
+ vref-supply = <&reg_vref>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dac_out1 &dac_out2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac1: dac@1 {
+ compatible = "st,stm32-dac";
+ #io-channels-cells = <1>;
+ reg = <1>;
+ };
+
+ dac2: dac@2 {
+ compatible = "st,stm32-dac";
+ #io-channels-cells = <1>;
+ reg = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/health/max30102.txt b/Documentation/devicetree/bindings/iio/health/max30102.txt
new file mode 100644
index 000000000000..c695e7cbeefb
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/health/max30102.txt
@@ -0,0 +1,30 @@
+Maxim MAX30102 heart rate and pulse oximeter sensor
+
+* https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf
+
+Required properties:
+ - compatible: must be "maxim,max30102"
+ - reg: the I2C address of the sensor
+ - interrupt-parent: should be the phandle for the interrupt controller
+ - interrupts: the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic
+ interrupt client node bindings.
+
+Optional properties:
+ - maxim,red-led-current-microamp: configuration for RED LED current
+ - maxim,ir-led-current-microamp: configuration for IR LED current
+
+ Note that each step is approximately 200 microamps, ranging from 0 uA to
+ 50800 uA.
+
+Example:
+
+max30100@57 {
+ compatible = "maxim,max30102";
+ reg = <0x57>;
+ maxim,red-led-current-microamp = <7000>;
+ maxim,ir-led-current-microamp = <7000>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <16 2>;
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index a9fc11e43b45..2b4514592f83 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -3,14 +3,21 @@ InvenSense MPU-6050 Six-Axis (Gyro + Accelerometer) MEMS MotionTracking Device
http://www.invensense.com/mems/gyro/mpu6050.html
Required properties:
- - compatible : should be "invensense,mpu6050"
+ - compatible : should be one of
+ "invensense,mpu6050"
+ "invensense,mpu6500"
+ "invensense,mpu9150"
+ "invensense,mpu9250"
+ "invensense,icm20608"
- reg : the I2C address of the sensor
- interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
Optional properties:
- mount-matrix: an optional 3x3 mounting rotation matrix
-
+ - i2c-gate node. These devices also support an auxiliary i2c bus. This is
+ simple enough to be described using the i2c-gate binding. See
+ i2c/i2c-gate.txt for more details.
Example:
mpu6050@68 {
@@ -28,3 +35,19 @@ Example:
"0", /* y2 */
"0.984807753012208"; /* z2 */
};
+
+
+ mpu9250@68 {
+ compatible = "invensense,mpu9250";
+ reg = <0x68>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <21 1>;
+ i2c-gate {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ax8975@c {
+ compatible = "ak,ak8975";
+ reg = <0x0c>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index cf81afdf7803..8305fb05ffda 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -3,6 +3,8 @@
Required properties:
- compatible: must be one of:
"st,lsm6ds3"
+ "st,lsm6ds3h"
+ "st,lsm6dsl"
"st,lsm6dsm"
- reg: i2c address of the sensor / spi cs line
diff --git a/Documentation/devicetree/bindings/iio/light/vl6180.txt b/Documentation/devicetree/bindings/iio/light/vl6180.txt
new file mode 100644
index 000000000000..2c52952715a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/vl6180.txt
@@ -0,0 +1,15 @@
+STMicro VL6180 - ALS, range and proximity sensor
+
+Link to datasheet: http://www.st.com/resource/en/datasheet/vl6180x.pdf
+
+Required properties:
+
+ -compatible: should be "st,vl6180"
+ -reg: the I2C address of the sensor
+
+Example:
+
+vl6180@29 {
+ compatible = "st,vl6180";
+ reg = <0x29>;
+};
diff --git a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt
new file mode 100644
index 000000000000..d4dc7a227e2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt
@@ -0,0 +1,28 @@
+* Devantech SRF04 ultrasonic range finder
+ Bit-banging driver using two GPIOs
+
+Required properties:
+ - compatible: Should be "devantech,srf04"
+
+ - trig-gpios: Definition of the GPIO for the triggering (output)
+ This GPIO is set for about 10 us by the driver to tell the
+ device it should initiate the measurement cycle.
+
+ - echo-gpios: Definition of the GPIO for the echo (input)
+ This GPIO is set by the device as soon as an ultrasonic
+ burst is sent out and reset when the first echo is
+ received.
+ Thus this GPIO is set while the ultrasonic waves are doing
+ one round trip.
+ It needs to be an GPIO which is able to deliver an
+ interrupt because the time between two interrupts is
+ measured in the driver.
+ See Documentation/devicetree/bindings/gpio/gpio.txt for
+ information on how to specify a consumer gpio.
+
+Example:
+srf04@0 {
+ compatible = "devantech,srf04";
+ trig-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ echo-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt
new file mode 100644
index 000000000000..386ab37a383f
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,nvic.txt
@@ -0,0 +1,36 @@
+* ARM Nested Vector Interrupt Controller (NVIC)
+
+The NVIC provides an interrupt controller that is tightly coupled to
+Cortex-M based processor cores. The NVIC implemented on different SoCs
+vary in the number of interrupts and priority bits per interrupt.
+
+Main node required properties:
+
+- compatible : should be one of:
+ "arm,v6m-nvic"
+ "arm,v7m-nvic"
+ "arm,v8m-nvic"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The type shall be a <u32> and the value shall be 2.
+
+ The 1st cell contains the interrupt number for the interrupt type.
+
+ The 2nd cell is the priority of the interrupt.
+
+- reg : Specifies base physical address(s) and size of the NVIC registers.
+ This is at a fixed address (0xe000e100) and size (0xc00).
+
+- arm,num-irq-priority-bits: The number of priority bits implemented by the
+ given SoC
+
+Example:
+
+ intc: interrupt-controller@e000e100 {
+ compatible = "arm,v7m-nvic";
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ interrupt-controller;
+ reg = <0xe000e100 0xc00>;
+ arm,num-irq-priority-bits = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 6cdf32d037fc..8a6ffce12af5 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -60,6 +60,17 @@ conditions.
aliases of secure registers have to be used during
SMMU configuration.
+- stream-match-mask : For SMMUs supporting stream matching and using
+ #iommu-cells = <1>, specifies a mask of bits to ignore
+ when matching stream IDs (e.g. this may be programmed
+ into the SMRn.MASK field of every stream match register
+ used). For cases where it is desirable to ignore some
+ portion of every Stream ID (e.g. for certain MMU-500
+ configurations given globally unique input IDs). This
+ property is not valid for SMMUs using stream indexing,
+ or using stream matching with #iommu-cells = <2>, and
+ may be ignored if present in such cases.
+
** Deprecated properties:
- mmu-masters (deprecated in favour of the generic "iommus" binding) :
@@ -109,3 +120,20 @@ conditions.
master3 {
iommus = <&smmu2 1 0x30>;
};
+
+
+ /* ARM MMU-500 with 10-bit stream ID input configuration */
+ smmu3: iommu {
+ compatible = "arm,mmu-500", "arm,smmu-v2";
+ ...
+ #iommu-cells = <1>;
+ /* always ignore appended 5-bit TBU number */
+ stream-match-mask = 0x7c00;
+ };
+
+ bus {
+ /* bus whose child devices emit one unique 10-bit stream
+ ID each, but may master through multiple SMMU TBUs */
+ iommu-map = <0 &smmu3 0 0x400>;
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/media/atmel-isi.txt b/Documentation/devicetree/bindings/media/atmel-isi.txt
index 251f008f220c..332513a151cc 100644
--- a/Documentation/devicetree/bindings/media/atmel-isi.txt
+++ b/Documentation/devicetree/bindings/media/atmel-isi.txt
@@ -1,51 +1,66 @@
-Atmel Image Sensor Interface (ISI) SoC Camera Subsystem
-----------------------------------------------
-
-Required properties:
-- compatible: must be "atmel,at91sam9g45-isi"
-- reg: physical base address and length of the registers set for the device;
-- interrupts: should contain IRQ line for the ISI;
-- clocks: list of clock specifiers, corresponding to entries in
- the clock-names property;
-- clock-names: must contain "isi_clk", which is the isi peripherial clock.
-
-ISI supports a single port node with parallel bus. It should contain one
+Atmel Image Sensor Interface (ISI)
+----------------------------------
+
+Required properties for ISI:
+- compatible: must be "atmel,at91sam9g45-isi".
+- reg: physical base address and length of the registers set for the device.
+- interrupts: should contain IRQ line for the ISI.
+- clocks: list of clock specifiers, corresponding to entries in the clock-names
+ property; please refer to clock-bindings.txt.
+- clock-names: required elements: "isi_clk".
+- pinctrl-names, pinctrl-0: please refer to pinctrl-bindings.txt.
+
+ISI supports a single port node with parallel bus. It shall contain one
'port' child node with child 'endpoint' node. Please refer to the bindings
defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
-Example:
- isi: isi@f0034000 {
- compatible = "atmel,at91sam9g45-isi";
- reg = <0xf0034000 0x4000>;
- interrupts = <37 IRQ_TYPE_LEVEL_HIGH 5>;
-
- clocks = <&isi_clk>;
- clock-names = "isi_clk";
+Endpoint node properties
+------------------------
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_isi>;
+- bus-width: <8> or <10> (mandatory)
+- hsync-active (default: active high)
+- vsync-active (default: active high)
+- pclk-sample (default: sample on falling edge)
+- remote-endpoint: A phandle to the bus receiver's endpoint node (mandatory).
- port {
- #address-cells = <1>;
- #size-cells = <0>;
+Example:
- isi_0: endpoint {
- remote-endpoint = <&ov2640_0>;
- bus-width = <8>;
- };
+isi: isi@f0034000 {
+ compatible = "atmel,at91sam9g45-isi";
+ reg = <0xf0034000 0x4000>;
+ interrupts = <37 IRQ_TYPE_LEVEL_HIGH 5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isi_data_0_7>;
+ clocks = <&isi_clk>;
+ clock-names = "isi_clk";
+ port {
+ isi_0: endpoint {
+ remote-endpoint = <&ov2640_0>;
+ bus-width = <8>;
+ vsync-active = <1>;
+ hsync-active = <1>;
};
};
+};
- i2c1: i2c@f0018000 {
- ov2640: camera@0x30 {
- compatible = "ovti,ov2640";
- reg = <0x30>;
+i2c1: i2c@f0018000 {
+ ov2640: camera@30 {
+ compatible = "ovti,ov2640";
+ reg = <0x30>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+ resetb-gpios = <&pioE 11 GPIO_ACTIVE_LOW>;
+ pwdn-gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
+ clocks = <&pck0>;
+ clock-names = "xvclk";
+ assigned-clocks = <&pck0>;
+ assigned-clock-rates = <25000000>;
- port {
- ov2640_0: endpoint {
- remote-endpoint = <&isi_0>;
- bus-width = <8>;
- };
+ port {
+ ov2640_0: endpoint {
+ remote-endpoint = <&isi_0>;
+ bus-width = <8>;
};
};
};
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2640.txt b/Documentation/devicetree/bindings/media/i2c/ov2640.txt
index c429b5bdcaa0..989ce6cb6ac3 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov2640.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov2640.txt
@@ -1,8 +1,8 @@
* Omnivision OV2640 CMOS sensor
-The Omnivision OV2640 sensor support multiple resolutions output, such as
-CIF, SVGA, UXGA. It also can support YUV422/420, RGB565/555 or raw RGB
-output format.
+The Omnivision OV2640 sensor supports multiple resolutions output, such as
+CIF, SVGA, UXGA. It also can support the YUV422/420, RGB565/555 or raw RGB
+output formats.
Required Properties:
- compatible: should be "ovti,ov2640"
@@ -20,26 +20,21 @@ Documentation/devicetree/bindings/media/video-interfaces.txt.
Example:
i2c1: i2c@f0018000 {
- ov2640: camera@0x30 {
+ ov2640: camera@30 {
compatible = "ovti,ov2640";
reg = <0x30>;
-
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pck1 &pinctrl_ov2640_pwdn &pinctrl_ov2640_resetb>;
-
- resetb-gpios = <&pioE 24 GPIO_ACTIVE_LOW>;
- pwdn-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
-
- clocks = <&pck1>;
+ pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+ resetb-gpios = <&pioE 11 GPIO_ACTIVE_LOW>;
+ pwdn-gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
+ clocks = <&pck0>;
clock-names = "xvclk";
-
- assigned-clocks = <&pck1>;
+ assigned-clocks = <&pck0>;
assigned-clock-rates = <25000000>;
port {
ov2640_0: endpoint {
remote-endpoint = <&isi_0>;
- bus-width = <8>;
};
};
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5645.txt b/Documentation/devicetree/bindings/media/i2c/ov5645.txt
new file mode 100644
index 000000000000..fd7aec9f8e24
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov5645.txt
@@ -0,0 +1,54 @@
+* Omnivision 1/4-Inch 5Mp CMOS Digital Image Sensor
+
+The Omnivision OV5645 is a 1/4-Inch CMOS active pixel digital image sensor with
+an active array size of 2592H x 1944V. It is programmable through a serial I2C
+interface.
+
+Required Properties:
+- compatible: Value should be "ovti,ov5645".
+- clocks: Reference to the xclk clock.
+- clock-names: Should be "xclk".
+- clock-frequency: Frequency of the xclk clock.
+- enable-gpios: Chip enable GPIO. Polarity is GPIO_ACTIVE_HIGH. This corresponds
+ to the hardware pin PWDNB which is physically active low.
+- reset-gpios: Chip reset GPIO. Polarity is GPIO_ACTIVE_LOW. This corresponds to
+ the hardware pin RESETB.
+- vdddo-supply: Chip digital IO regulator.
+- vdda-supply: Chip analog regulator.
+- vddd-supply: Chip digital core regulator.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ &i2c1 {
+ ...
+
+ ov5645: ov5645@78 {
+ compatible = "ovti,ov5645";
+ reg = <0x78>;
+
+ enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_default>;
+
+ clocks = <&clks 200>;
+ clock-names = "xclk";
+ clock-frequency = <23880000>;
+
+ vdddo-supply = <&camera_dovdd_1v8>;
+ vdda-supply = <&camera_avdd_2v8>;
+ vddd-supply = <&camera_dvdd_1v2>;
+
+ port {
+ ov5645_ep: endpoint {
+ clock-lanes = <1>;
+ data-lanes = <0 2>;
+ remote-endpoint = <&csi0_ep>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5647.txt b/Documentation/devicetree/bindings/media/i2c/ov5647.txt
new file mode 100644
index 000000000000..22e44945b661
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov5647.txt
@@ -0,0 +1,35 @@
+Omnivision OV5647 raw image sensor
+---------------------------------
+
+OV5647 is a raw image sensor with MIPI CSI-2 and CCP2 image data interfaces
+and CCI (I2C compatible) control bus.
+
+Required properties:
+
+- compatible : "ovti,ov5647".
+- reg : I2C slave address of the sensor.
+- clocks : Reference to the xclk clock.
+
+The common video interfaces bindings (see video-interfaces.txt) should be
+used to specify link to the image data receiver. The OV5647 device
+node should contain one 'port' child node with an 'endpoint' subnode.
+
+Endpoint node mandatory properties:
+
+- remote-endpoint: A phandle to the bus receiver's endpoint node.
+
+Example:
+
+ i2c@2000 {
+ ...
+ ov: camera@36 {
+ compatible = "ovti,ov5647";
+ reg = <0x36>;
+ clocks = <&camera_clk>;
+ port {
+ camera_1: endpoint {
+ remote-endpoint = <&csi1_ep1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/ov7670.txt b/Documentation/devicetree/bindings/media/i2c/ov7670.txt
new file mode 100644
index 000000000000..826b6563b009
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov7670.txt
@@ -0,0 +1,43 @@
+* Omnivision OV7670 CMOS sensor
+
+The Omnivision OV7670 sensor supports multiple resolutions output, such as
+CIF, SVGA, UXGA. It also can support the YUV422/420, RGB565/555 or raw RGB
+output formats.
+
+Required Properties:
+- compatible: should be "ovti,ov7670"
+- clocks: reference to the xclk input clock.
+- clock-names: should be "xclk".
+
+Optional Properties:
+- reset-gpios: reference to the GPIO connected to the resetb pin, if any.
+ Active is low.
+- powerdown-gpios: reference to the GPIO connected to the pwdn pin, if any.
+ Active is high.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ i2c1: i2c@f0018000 {
+ ov7670: camera@21 {
+ compatible = "ovti,ov7670";
+ reg = <0x21>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+ reset-gpios = <&pioE 11 GPIO_ACTIVE_LOW>;
+ powerdown-gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
+ clocks = <&pck0>;
+ clock-names = "xclk";
+ assigned-clocks = <&pck0>;
+ assigned-clock-rates = <25000000>;
+
+ port {
+ ov7670_0: endpoint {
+ remote-endpoint = <&isi_0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
new file mode 100644
index 000000000000..3813947b4d4f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
@@ -0,0 +1,37 @@
+* Mediatek JPEG Decoder
+
+Mediatek JPEG Decoder is the JPEG decode hardware present in Mediatek SoCs
+
+Required properties:
+- compatible : must be one of the following string:
+ "mediatek,mt8173-jpgdec"
+ "mediatek,mt2701-jpgdec"
+- reg : physical base address of the jpeg decoder registers and length of
+ memory mapped region.
+- interrupts : interrupt number to the interrupt controller.
+- clocks: device clocks, see
+ Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- clock-names: must contain "jpgdec-smi" and "jpgdec".
+- power-domains: a phandle to the power domain, see
+ Documentation/devicetree/bindings/power/power_domain.txt for details.
+- mediatek,larb: must contain the local arbiters in the current Socs, see
+ Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+ for details.
+- iommus: should point to the respective IOMMU block with master port as
+ argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+ for details.
+
+Example:
+ jpegdec: jpegdec@15004000 {
+ compatible = "mediatek,mt2701-jpgdec";
+ reg = <0 0x15004000 0 0x1000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&imgsys CLK_IMG_JPGDEC_SMI>,
+ <&imgsys CLK_IMG_JPGDEC>;
+ clock-names = "jpgdec-smi",
+ "jpgdec";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_ISP>;
+ mediatek,larb = <&larb2>;
+ iommus = <&iommu MT2701_M4U_PORT_JPGDEC_WDMA>,
+ <&iommu MT2701_M4U_PORT_JPGDEC_BSDMA>;
+ };
diff --git a/Documentation/devicetree/bindings/media/s5p-cec.txt b/Documentation/devicetree/bindings/media/s5p-cec.txt
index 925ab4d72eaa..4bb08d9d940b 100644
--- a/Documentation/devicetree/bindings/media/s5p-cec.txt
+++ b/Documentation/devicetree/bindings/media/s5p-cec.txt
@@ -15,6 +15,7 @@ Required properties:
- clock-names : from common clock binding: must contain "hdmicec",
corresponding to entry in the clocks property.
- samsung,syscon-phandle - phandle to the PMU system controller
+ - hdmi-phandle - phandle to the HDMI controller
Example:
@@ -25,6 +26,7 @@ hdmicec: cec@100B0000 {
clocks = <&clock CLK_HDMI_CEC>;
clock-names = "hdmicec";
samsung,syscon-phandle = <&pmu_system_controller>;
+ hdmi-phandle = <&hdmi>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>;
status = "okay";
diff --git a/Documentation/devicetree/bindings/media/s5p-mfc.txt b/Documentation/devicetree/bindings/media/s5p-mfc.txt
index 2c901286d818..d3404b5d4d17 100644
--- a/Documentation/devicetree/bindings/media/s5p-mfc.txt
+++ b/Documentation/devicetree/bindings/media/s5p-mfc.txt
@@ -28,7 +28,7 @@ Optional properties:
- memory-region : from reserved memory binding: phandles to two reserved
memory regions, first is for "left" mfc memory bus interfaces,
second if for the "right" mfc memory bus, used when no SYSMMU
- support is available
+ support is available; used only by MFC v5 present in Exynos4 SoCs
Obsolete properties:
- samsung,mfc-r, samsung,mfc-l : support removed, please use memory-region
diff --git a/Documentation/devicetree/bindings/media/stih-cec.txt b/Documentation/devicetree/bindings/media/stih-cec.txt
index 71c4b2f4bcef..289a08b33651 100644
--- a/Documentation/devicetree/bindings/media/stih-cec.txt
+++ b/Documentation/devicetree/bindings/media/stih-cec.txt
@@ -9,6 +9,7 @@ Required properties:
- pinctrl-names: Contains only one value - "default"
- pinctrl-0: Specifies the pin control groups used for CEC hardware.
- resets: Reference to a reset controller
+ - hdmi-phandle: Phandle to the HDMI controller
Example for STIH407:
@@ -22,4 +23,5 @@ sti-cec@094a087c {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cec0_default>;
resets = <&softreset STIH407_LPM_SOFTRESET>;
+ hdmi-phandle = <&hdmi>;
};
diff --git a/Documentation/devicetree/bindings/media/ti,da850-vpif.txt b/Documentation/devicetree/bindings/media/ti,da850-vpif.txt
index 6d25d7f23d26..df7182a63e59 100644
--- a/Documentation/devicetree/bindings/media/ti,da850-vpif.txt
+++ b/Documentation/devicetree/bindings/media/ti,da850-vpif.txt
@@ -16,8 +16,10 @@ Required properties:
Video Capture:
VPIF has a 16-bit parallel bus input, supporting 2 8-bit channels or a
-single 16-bit channel. It should contain at least one port child node
-with child 'endpoint' node. Please refer to the bindings defined in
+single 16-bit channel. It should contain one or two port child nodes
+with child 'endpoint' node. If there are two ports then port@0 must
+describe the input and port@1 output channels. Please refer to the
+bindings defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
Example using 2 8-bit input channels, one of which is connected to an
@@ -28,17 +30,24 @@ I2C-connected TVP5147 decoder:
reg = <0x217000 0x1000>;
interrupts = <92>;
- port {
- vpif_ch0: endpoint@0 {
- reg = <0>;
- bus-width = <8>;
- remote-endpoint = <&composite>;
+ port@0 {
+ vpif_input_ch0: endpoint@0 {
+ reg = <0>;
+ bus-width = <8>;
+ remote-endpoint = <&composite_in>;
+ };
+
+ vpif_input_ch1: endpoint@1 {
+ reg = <1>;
+ bus-width = <8>;
+ data-shift = <8>;
};
+ };
- vpif_ch1: endpoint@1 {
- reg = <1>;
- bus-width = <8>;
- data-shift = <8>;
+ port@1 {
+ vpif_output_ch0: endpoint {
+ bus-width = <8>;
+ remote-endpoint = <&composite_out>;
};
};
};
@@ -53,13 +62,28 @@ I2C-connected TVP5147 decoder:
status = "okay";
port {
- composite: endpoint {
+ composite_in: endpoint {
hsync-active = <1>;
vsync-active = <1>;
pclk-sample = <0>;
/* VPIF channel 0 (lower 8-bits) */
- remote-endpoint = <&vpif_ch0>;
+ remote-endpoint = <&vpif_input_ch0>;
+ bus-width = <8>;
+ };
+ };
+ };
+
+ adv7343@2a {
+ compatible = "adi,adv7343";
+ reg = <0x2a>;
+
+ port {
+ composite_out: endpoint {
+ adi,dac-enable = <1 1 1>;
+ adi,sd-dac-enable = <1>;
+
+ remote-endpoint = <&vpif_output_ch0>;
bus-width = <8>;
};
};
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index 670831b29565..eec40be7f79a 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -15,7 +15,7 @@ Required properties:
The HLCDC IP exposes two subdevices:
- a PWM chip: see ../pwm/atmel-hlcdc-pwm.txt
- - a Display Controller: see ../display/atmel-hlcdc-dc.txt
+ - a Display Controller: see ../display/atmel/hlcdc-dc.txt
Example:
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 3e920ec5c4d3..9ce35af8507c 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -40,6 +40,7 @@ Required properties:
w25x80
w25x32
w25q32
+ w25q64
w25q32dw
w25q80bl
w25q128
diff --git a/Documentation/devicetree/bindings/net/faraday,ftmac.txt b/Documentation/devicetree/bindings/net/faraday,ftmac.txt
new file mode 100644
index 000000000000..be4f55e23bf7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/faraday,ftmac.txt
@@ -0,0 +1,24 @@
+Faraday Ethernet Controller
+
+Required properties:
+
+- compatible : Must contain "faraday,ftmac", as well as one of
+ the SoC specific identifiers:
+ "andestech,atmac100"
+ "moxa,moxart-mac"
+- reg : Should contain register location and length
+- interrupts : Should contain the mac interrupt number
+
+Example:
+
+ mac0: mac@90900000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x90900000 0x100>;
+ interrupts = <25 0>;
+ };
+
+ mac1: mac@92000000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x92000000 0x100>;
+ interrupts = <27 0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/marvell,prestera.txt b/Documentation/devicetree/bindings/net/marvell,prestera.txt
index 5fbab29718e8..c329608fa887 100644
--- a/Documentation/devicetree/bindings/net/marvell,prestera.txt
+++ b/Documentation/devicetree/bindings/net/marvell,prestera.txt
@@ -32,19 +32,16 @@ DFX Server bindings
-------------------
Required properties:
-- compatible: must be "marvell,dfx-server"
+- compatible: must be "marvell,dfx-server", "simple-bus"
+- ranges: describes the address mapping of a memory-mapped bus.
- reg: address and length of the register set for the device.
Example:
-dfx-registers {
- compatible = "simple-bus";
+dfx-server {
+ compatible = "marvell,dfx-server", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 MBUS_ID(0x08, 0x00) 0 0x100000>;
-
- dfx: dfx@0 {
- compatible = "marvell,dfx-server";
- reg = <0 0x100000>;
- };
+ reg = <MBUS_ID(0x08, 0x00) 0 0x100000>;
};
diff --git a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
deleted file mode 100644
index 583418b2c127..000000000000
--- a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MOXA ART Ethernet Controller
-
-Required properties:
-
-- compatible : Must be "moxa,moxart-mac"
-- reg : Should contain register location and length
-- interrupts : Should contain the mac interrupt number
-
-Example:
-
- mac0: mac@90900000 {
- compatible = "moxa,moxart-mac";
- reg = <0x90900000 0x100>;
- interrupts = <25 0>;
- };
-
- mac1: mac@92000000 {
- compatible = "moxa,moxart-mac";
- reg = <0x92000000 0x100>;
- interrupts = <27 0>;
- };
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
index d543ed3f5363..ef06d061913c 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
@@ -1,7 +1,11 @@
Allwinner sunxi-sid
Required properties:
-- compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
+- compatible: Should be one of the following:
+ "allwinner,sun4i-a10-sid"
+ "allwinner,sun7i-a20-sid"
+ "allwinner,sun8i-h3-sid"
+
- reg: Should contain registers location and length
= Data cells =
diff --git a/Documentation/devicetree/bindings/nvmem/imx-iim.txt b/Documentation/devicetree/bindings/nvmem/imx-iim.txt
new file mode 100644
index 000000000000..1978c5bcd96d
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/imx-iim.txt
@@ -0,0 +1,22 @@
+Freescale i.MX IC Identification Module (IIM) device tree bindings
+
+This binding represents the IC Identification Module (IIM) found on
+i.MX25, i.MX27, i.MX31, i.MX35, i.MX51 and i.MX53 SoCs.
+
+Required properties:
+- compatible: should be one of
+ "fsl,imx25-iim", "fsl,imx27-iim",
+ "fsl,imx31-iim", "fsl,imx35-iim",
+ "fsl,imx51-iim", "fsl,imx53-iim",
+- reg: Should contain the register base and length.
+- interrupts: Should contain the interrupt for the IIM
+- clocks: Should contain a phandle pointing to the gated peripheral clock.
+
+Example:
+
+ iim: iim@63f98000 {
+ compatible = "fsl,imx53-iim", "fsl,imx27-iim";
+ reg = <0x63f98000 0x4000>;
+ interrupts = <69>;
+ clocks = <&clks IMX5_CLK_IIM_GATE>;
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 966a72ecc6bd..70d791b03ea1 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -9,14 +9,19 @@ Required properties:
"fsl,imx6sl-ocotp" (i.MX6SL), or
"fsl,imx6sx-ocotp" (i.MX6SX),
"fsl,imx6ul-ocotp" (i.MX6UL),
+ "fsl,imx7d-ocotp" (i.MX7D/S),
followed by "syscon".
- reg: Should contain the register base and length.
- clocks: Should contain a phandle pointing to the gated peripheral clock.
+Optional properties:
+- read-only: disable write access
+
Example:
ocotp: ocotp@021bc000 {
compatible = "fsl,imx6q-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6QDL_CLK_IIM>;
+ read-only;
};
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index 1392c705ceca..b2480dd38c11 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -6,30 +6,40 @@ Required properties:
- reg-names: Must be "config" for the PCIe configuration space.
(The old way of getting the configuration address space from "ranges"
is deprecated and should be avoided.)
+- num-lanes: number of lanes to use
+RC mode:
- #address-cells: set to <3>
- #size-cells: set to <2>
- device_type: set to "pci"
- ranges: ranges for the PCI memory and I/O regions
- #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map: standard PCI properties
- to define the mapping of the PCIe interface to interrupt
+- interrupt-map-mask and interrupt-map: standard PCI
+ properties to define the mapping of the PCIe interface to interrupt
numbers.
-- num-lanes: number of lanes to use
+EP mode:
+- num-ib-windows: number of inbound address translation
+ windows
+- num-ob-windows: number of outbound address translation
+ windows
Optional properties:
-- num-viewport: number of view ports configured in hardware. If a platform
- does not specify it, the driver assumes 2.
- num-lanes: number of lanes to use (this property should be specified unless
the link is brought already up in BIOS)
- reset-gpio: gpio pin number of power good signal
-- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
- specify this property, to keep backwards compatibility a range of 0x00-0xff
- is assumed if not present)
- clocks: Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- "pcie"
- "pcie_bus"
+RC mode:
+- num-viewport: number of view ports configured in
+ hardware. If a platform does not specify it, the driver assumes 2.
+- bus-range: PCI bus numbers covered (it is recommended
+ for new devicetrees to specify this property, to keep backwards
+ compatibility a range of 0x00-0xff is assumed if not present)
+EP mode:
+- max-functions: maximum number of functions that can be
+ configured
Example configuration:
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
new file mode 100644
index 000000000000..35d4a979bb7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -0,0 +1,129 @@
+Faraday Technology FTPCI100 PCI Host Bridge
+
+This PCI bridge is found inside that Cortina Systems Gemini SoC platform and
+is a generic IP block from Faraday Technology. It exists in two variants:
+plain and dual PCI. The plain version embeds a cascading interrupt controller
+into the host bridge. The dual version routes the interrupts to the host
+chips interrupt controller.
+
+The host controller appear on the PCI bus with vendor ID 0x159b (Faraday
+Technology) and product ID 0x4321.
+
+Mandatory properties:
+
+- compatible: ranging from specific to generic, should be one of
+ "cortina,gemini-pci", "faraday,ftpci100"
+ "cortina,gemini-pci-dual", "faraday,ftpci100-dual"
+ "faraday,ftpci100"
+ "faraday,ftpci100-dual"
+- reg: memory base and size for the host bridge
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- #interrupt-cells: set to <1>
+- bus-range: set to <0x00 0xff>
+- device_type, set to "pci"
+- ranges: see pci.txt
+- interrupt-map-mask: see pci.txt
+- interrupt-map: see pci.txt
+- dma-ranges: three ranges for the inbound memory region. The ranges must
+ be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB,
+ 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
+ pre-fetchable.
+
+Mandatory subnodes:
+- For "faraday,ftpci100" a node representing the interrupt-controller inside the
+ host bridge is mandatory. It has the following mandatory properties:
+ - interrupt: see interrupt-controller/interrupts.txt
+ - interrupt-parent: see interrupt-controller/interrupts.txt
+ - interrupt-controller: see interrupt-controller/interrupts.txt
+ - #address-cells: set to <0>
+ - #interrupt-cells: set to <1>
+
+I/O space considerations:
+
+The plain variant has 128MiB of non-prefetchable memory space, whereas the
+"dual" variant has 64MiB. Take this into account when describing the ranges.
+
+Interrupt map considerations:
+
+The "dual" variant will get INT A, B, C, D from the system interrupt controller
+and should point to respective interrupt in that controller in its
+interrupt-map.
+
+The code which is the only documentation of how the Faraday PCI (the non-dual
+variant) interrupts assigns the default interrupt mapping/swizzling has
+typically been like this, doing the swizzling on the interrupt controller side
+rather than in the interconnect:
+
+interrupt-map-mask = <0xf800 0 0 7>;
+interrupt-map =
+ <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+ <0x4800 0 0 2 &pci_intc 1>,
+ <0x4800 0 0 3 &pci_intc 2>,
+ <0x4800 0 0 4 &pci_intc 3>,
+ <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+ <0x5000 0 0 2 &pci_intc 2>,
+ <0x5000 0 0 3 &pci_intc 3>,
+ <0x5000 0 0 4 &pci_intc 0>,
+ <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+ <0x5800 0 0 2 &pci_intc 3>,
+ <0x5800 0 0 3 &pci_intc 0>,
+ <0x5800 0 0 4 &pci_intc 1>,
+ <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+ <0x6000 0 0 2 &pci_intc 0>,
+ <0x6000 0 0 3 &pci_intc 1>,
+ <0x6000 0 0 4 &pci_intc 2>;
+
+Example:
+
+pci@50000000 {
+ compatible = "cortina,gemini-pci", "faraday,ftpci100";
+ reg = <0x50000000 0x100>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, /* PCI A */
+ <26 IRQ_TYPE_LEVEL_HIGH>, /* PCI B */
+ <27 IRQ_TYPE_LEVEL_HIGH>, /* PCI C */
+ <28 IRQ_TYPE_LEVEL_HIGH>; /* PCI D */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
+ bus-range = <0x00 0xff>;
+ ranges = /* 1MiB I/O space 0x50000000-0x500fffff */
+ <0x01000000 0 0 0x50000000 0 0x00100000>,
+ /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */
+ <0x02000000 0 0x58000000 0x58000000 0 0x08000000>;
+
+ /* DMA ranges */
+ dma-ranges =
+ /* 128MiB at 0x00000000-0x07ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x08000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+ <0x4800 0 0 2 &pci_intc 1>,
+ <0x4800 0 0 3 &pci_intc 2>,
+ <0x4800 0 0 4 &pci_intc 3>,
+ <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+ <0x5000 0 0 2 &pci_intc 2>,
+ <0x5000 0 0 3 &pci_intc 3>,
+ <0x5000 0 0 4 &pci_intc 0>,
+ <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+ <0x5800 0 0 2 &pci_intc 3>,
+ <0x5800 0 0 3 &pci_intc 0>,
+ <0x5800 0 0 4 &pci_intc 1>,
+ <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+ <0x6000 0 0 2 &pci_intc 0>,
+ <0x6000 0 0 3 &pci_intc 0>,
+ <0x6000 0 0 4 &pci_intc 0>;
+ pci_intc: interrupt-controller {
+ interrupt-parent = <&intcon>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index 83aeb1f5a645..e3d5680875b1 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -4,7 +4,11 @@ This PCIe host controller is based on the Synopsis Designware PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
-- compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie"
+- compatible:
+ - "fsl,imx6q-pcie"
+ - "fsl,imx6sx-pcie",
+ - "fsl,imx6qp-pcie"
+ - "fsl,imx7d-pcie"
- reg: base address and length of the PCIe controller
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
@@ -34,6 +38,14 @@ Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
- "pcie_inbound_axi"
+Additional required properties for imx7d-pcie:
+- power-domains: Must be set to a phandle pointing to PCIE_PHY power domain
+- resets: Must contain phandles to PCIe-related reset lines exposed by SRC
+ IP block
+- reset-names: Must contain the following entires:
+ - "pciephy"
+ - "apps"
+
Example:
pcie@0x01000000 {
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 60e25161f351..6a07c96227e0 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -1,17 +1,22 @@
TI PCI Controllers
PCIe Designware Controller
- - compatible: Should be "ti,dra7-pcie""
- - reg : Two register ranges as listed in the reg-names property
- - reg-names : The first entry must be "ti-conf" for the TI specific registers
- The second entry must be "rc-dbics" for the designware pcie
- registers
- The third entry must be "config" for the PCIe configuration space
+ - compatible: Should be "ti,dra7-pcie" for RC
+ Should be "ti,dra7-pcie-ep" for EP
- phys : list of PHY specifiers (used by generic PHY framework)
- phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
number of PHYs as specified in *phys* property.
- ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>",
where <X> is the instance number of the pcie from the HW spec.
+ - num-lanes as specified in ../designware-pcie.txt
+
+HOST MODE
+=========
+ - reg : Two register ranges as listed in the reg-names property
+ - reg-names : The first entry must be "ti-conf" for the TI specific registers
+ The second entry must be "rc-dbics" for the DesignWare PCIe
+ registers
+ The third entry must be "config" for the PCIe configuration space
- interrupts : Two interrupt entries must be specified. The first one is for
main interrupt line and the second for MSI interrupt line.
- #address-cells,
@@ -19,13 +24,36 @@ PCIe Designware Controller
#interrupt-cells,
device_type,
ranges,
- num-lanes,
interrupt-map-mask,
interrupt-map : as specified in ../designware-pcie.txt
+DEVICE MODE
+===========
+ - reg : Four register ranges as listed in the reg-names property
+ - reg-names : "ti-conf" for the TI specific registers
+ "ep_dbics" for the standard configuration registers as
+ they are locally accessed within the DIF CS space
+ "ep_dbics2" for the standard configuration registers as
+ they are locally accessed within the DIF CS2 space
+ "addr_space" used to map remote RC address space
+ - interrupts : one interrupt entries must be specified for main interrupt.
+ - num-ib-windows : number of inbound address translation windows
+ - num-ob-windows : number of outbound address translation windows
+ - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument
+ should contain the register offset within syscon
+ and the 2nd argument should contain the bit field
+ for setting the bit to enable unaligned
+ access.
+
Optional Property:
- gpios : Should be added if a gpio line is required to drive PERST# line
+NOTE: Two DT nodes may be added for each PCI controller; one for host
+mode and another for device mode. So in order for PCI to
+work in host mode, EP mode DT node should be disabled and in order to PCI to
+work in EP mode, host mode DT node should be disabled. Host mode and EP
+mode are mutually exclusive.
+
Example:
axi {
compatible = "simple-bus";
diff --git a/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt b/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt
index 33a2b1ee3f3e..0acc5a99fb79 100644
--- a/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt
@@ -6,12 +6,11 @@ This binding describes a usb3.0 phy for mt65xx platforms of Medaitek SoC.
Required properties (controller (parent) node):
- compatible : should be one of
"mediatek,mt2701-u3phy"
+ "mediatek,mt2712-u3phy"
"mediatek,mt8173-u3phy"
- - reg : offset and length of register for phy, exclude port's
- register.
- - clocks : a list of phandle + clock-specifier pairs, one for each
- entry in clock-names
- - clock-names : must contain
+ - clocks : (deprecated, use port's clocks instead) a list of phandle +
+ clock-specifier pairs, one for each entry in clock-names
+ - clock-names : (deprecated, use port's one instead) must contain
"u3phya_ref": for reference clock of usb3.0 analog phy.
Required nodes : a sub-node is required for each port the controller
@@ -19,8 +18,19 @@ Required nodes : a sub-node is required for each port the controller
'reg' property is used inside these nodes to describe
the controller's topology.
+Optional properties (controller (parent) node):
+ - reg : offset and length of register shared by multiple ports,
+ exclude port's private register. It is needed on mt2701
+ and mt8173, but not on mt2712.
+
Required properties (port (child) node):
- reg : address and length of the register set for the port.
+- clocks : a list of phandle + clock-specifier pairs, one for each
+ entry in clock-names
+- clock-names : must contain
+ "ref": 48M reference clock for HighSpeed analog phy; and 26M
+ reference clock for SuperSpeed analog phy, sometimes is
+ 24M, 25M or 27M, depended on platform.
- #phy-cells : should be 1 (See second example)
cell after port phandle is phy type from:
- PHY_TYPE_USB2
@@ -31,21 +41,31 @@ Example:
u3phy: usb-phy@11290000 {
compatible = "mediatek,mt8173-u3phy";
reg = <0 0x11290000 0 0x800>;
- clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
- clock-names = "u3phya_ref";
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "okay";
- phy_port0: port@11290800 {
- reg = <0 0x11290800 0 0x800>;
+ u2port0: usb-phy@11290800 {
+ reg = <0 0x11290800 0 0x100>;
+ clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
+ clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
- phy_port1: port@11291000 {
- reg = <0 0x11291000 0 0x800>;
+ u3port0: usb-phy@11290900 {
+ reg = <0 0x11290800 0 0x700>;
+ clocks = <&clk26m>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+
+ u2port1: usb-phy@11291000 {
+ reg = <0 0x11291000 0 0x100>;
+ clocks = <&apmixedsys CLK_APMIXED_REF2USB_TX>;
+ clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
@@ -64,7 +84,54 @@ Example:
usb30: usb@11270000 {
...
- phys = <&phy_port0 PHY_TYPE_USB3>;
- phy-names = "usb3-0";
+ phys = <&u2port0 PHY_TYPE_USB2>, <&u3port0 PHY_TYPE_USB3>;
+ phy-names = "usb2-0", "usb3-0";
...
};
+
+
+Layout differences of banks between mt8173/mt2701 and mt2712
+-------------------------------------------------------------
+mt8173 and mt2701:
+port offset bank
+shared 0x0000 SPLLC
+ 0x0100 FMREG
+u2 port0 0x0800 U2PHY_COM
+u3 port0 0x0900 U3PHYD
+ 0x0a00 U3PHYD_BANK2
+ 0x0b00 U3PHYA
+ 0x0c00 U3PHYA_DA
+u2 port1 0x1000 U2PHY_COM
+u3 port1 0x1100 U3PHYD
+ 0x1200 U3PHYD_BANK2
+ 0x1300 U3PHYA
+ 0x1400 U3PHYA_DA
+u2 port2 0x1800 U2PHY_COM
+ ...
+
+mt2712:
+port offset bank
+u2 port0 0x0000 MISC
+ 0x0100 FMREG
+ 0x0300 U2PHY_COM
+u3 port0 0x0700 SPLLC
+ 0x0800 CHIP
+ 0x0900 U3PHYD
+ 0x0a00 U3PHYD_BANK2
+ 0x0b00 U3PHYA
+ 0x0c00 U3PHYA_DA
+u2 port1 0x1000 MISC
+ 0x1100 FMREG
+ 0x1300 U2PHY_COM
+u3 port1 0x1700 SPLLC
+ 0x1800 CHIP
+ 0x1900 U3PHYD
+ 0x1a00 U3PHYD_BANK2
+ 0x1b00 U3PHYA
+ 0x1c00 U3PHYA_DA
+u2 port2 0x2000 MISC
+ ...
+
+ SPLLC shared by u3 ports and FMREG shared by u2 ports on
+mt8173/mt2701 are put back into each port; a new bank MISC for
+u2 ports and CHIP for u3 ports are added on mt2712.
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index 3c29c77a7018..e71a8d23f4a8 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -2,6 +2,7 @@ ROCKCHIP USB2.0 PHY WITH INNO IP BLOCK
Required properties (phy (parent) node):
- compatible : should be one of the listed compatibles:
+ * "rockchip,rk3328-usb2phy"
* "rockchip,rk3366-usb2phy"
* "rockchip,rk3399-usb2phy"
- reg : the address offset of grf for usb-phy configuration.
@@ -11,6 +12,11 @@ Required properties (phy (parent) node):
Optional properties:
- clocks : phandle + phy specifier pair, for the input clock of phy.
- clock-names : input clock name of phy, must be "phyclk".
+ - assigned-clocks : phandle of usb 480m clock.
+ - assigned-clock-parents : parent of usb 480m clock, select between
+ usb-phy output 480m and xin24m.
+ Refer to clk/clock-bindings.txt for generic clock
+ consumer properties.
Required nodes : a sub-node is required for each port the phy provides.
The sub-node name is used to identify host or otg port,
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
new file mode 100644
index 000000000000..e11c563a65ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -0,0 +1,106 @@
+Qualcomm QMP PHY controller
+===========================
+
+QMP phy controller supports physical layer functionality for a number of
+controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+Required properties:
+ - compatible: compatible list, contains:
+ "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
+ "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996.
+
+ - reg: offset and length of register set for PHY's common serdes block.
+
+ - #clock-cells: must be 1
+ - Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
+ interface (for pipe based PHYs). These clock are then gate-controlled
+ by gcc.
+ - #address-cells: must be 1
+ - #size-cells: must be 1
+ - ranges: must be present
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: "cfg_ahb" for phy config clock,
+ "aux" for phy aux clock,
+ "ref" for 19.2 MHz ref clk,
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+ For "qcom,msm8996-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref".
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: "phy" for reset of phy block,
+ "common" for phy common block reset,
+ "cfg" for phy's ahb cfg block reset (Optional).
+ For "qcom,msm8996-qmp-pcie-phy" must contain:
+ "phy", "common", "cfg".
+ For "qcom,msm8996-qmp-usb3-phy" must contain
+ "phy", "common".
+
+ - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+Optional properties:
+ - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
+ pll block.
+
+Required nodes:
+ - Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+Required properties for child node:
+ - reg: list of offset and length pairs of register sets for PHY blocks -
+ tx, rx and pcs.
+
+ - #phy-cells: must be 0
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: Must contain following for pcie and usb qmp phys:
+ "pipe<lane-number>" for pipe clock specific to each lane.
+
+ - resets: a list of phandles and reset controller specifier pairs,
+ one for each entry in reset-names.
+ - reset-names: Must contain following for pcie qmp phys:
+ "lane<lane-number>" for reset specific to each lane.
+
+Example:
+ phy@34000 {
+ compatible = "qcom,msm8996-qmp-pcie-phy";
+ reg = <0x34000 0x488>;
+ #clock-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref";
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ resets = <&gcc GCC_PCIE_PHY_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_BCR>,
+ <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
+ reset-names = "phy", "common", "cfg";
+
+ pciephy_0: lane@35000 {
+ reg = <0x35000 0x130>,
+ <0x35200 0x200>,
+ <0x35400 0x1dc>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "pipe0";
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>;
+ reset-names = "lane0";
+ };
+
+ pciephy_1: lane@36000 {
+ ...
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt
new file mode 100644
index 000000000000..aa0fcb05acb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt
@@ -0,0 +1,43 @@
+Qualcomm QUSB2 phy controller
+=============================
+
+QUSB2 controller supports LS/FS/HS usb connectivity on Qualcomm chipsets.
+
+Required properties:
+ - compatible: compatible list, contains "qcom,msm8996-qusb2-phy".
+ - reg: offset and length of the PHY register set.
+ - #phy-cells: must be 0.
+
+ - clocks: a list of phandles and clock-specifier pairs,
+ one for each entry in clock-names.
+ - clock-names: must be "cfg_ahb" for phy config clock,
+ "ref" for 19.2 MHz ref clk,
+ "iface" for phy interface clock (Optional).
+
+ - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
+ - vdda-phy-dpdm-supply: Phandle to 3.1V regulator supply to Dp/Dm port signals.
+
+ - resets: Phandle to reset to phy block.
+
+Optional properties:
+ - nvmem-cells: Phandle to nvmem cell that contains 'HS Tx trim'
+ tuning parameter value for qusb2 phy.
+
+ - qcom,tcsr-syscon: Phandle to TCSR syscon register region.
+
+Example:
+ hsusb_phy: phy@7411000 {
+ compatible = "qcom,msm8996-qusb2-phy";
+ reg = <0x7411000 0x180>;
+ #phy-cells = <0>;
+
+ clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_RX1_USB2_CLKREF_CLK>,
+ clock-names = "cfg_ahb", "ref";
+
+ vdda-pll-supply = <&pm8994_l12>;
+ vdda-phy-dpdm-supply = <&pm8994_l24>;
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ nvmem-cells = <&qusb2p_hstx_trim>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
index 57dc388e2fa2..4ed569046daf 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
@@ -30,6 +30,7 @@ Optional Properties:
- reset-names: Only allow the following entries:
- phy-reset
- resets: Must contain an entry for each entry in reset-names.
+- vbus-supply: power-supply phandle for vbus power source
Example:
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
index e42334258185..005bc22938ff 100644
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
@@ -15,6 +15,7 @@ Required properties:
- reg : a list of offset + length pairs
- reg-names :
* "phy_ctrl"
+ * "pmu0" for H3, V3s and A64
* "pmu1"
* "pmu2" for sun4i, sun6i or sun7i
- #phy-cells : from the generic phy bindings, must be 1
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
index 65cc0345747d..6c1498958d48 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
@@ -1,22 +1,42 @@
Freescale i.MX General Power Controller
=======================================
-The i.MX6Q General Power Control (GPC) block contains DVFS load tracking
-counters and Power Gating Control (PGC) for the CPU and PU (GPU/VPU) power
-domains.
+The i.MX6 General Power Control (GPC) block contains DVFS load tracking
+counters and Power Gating Control (PGC).
Required properties:
-- compatible: Should be "fsl,imx6q-gpc" or "fsl,imx6sl-gpc"
+- compatible: Should be one of the following:
+ - fsl,imx6q-gpc
+ - fsl,imx6qp-gpc
+ - fsl,imx6sl-gpc
- reg: should be register base and length as documented in the
datasheet
-- interrupts: Should contain GPC interrupt request 1
-- pu-supply: Link to the LDO regulator powering the PU power domain
-- clocks: Clock phandles to devices in the PU power domain that need
- to be enabled during domain power-up for reset propagation.
-- #power-domain-cells: Should be 1, see below:
+- interrupts: Should contain one interrupt specifier for the GPC interrupt
+- clocks: Must contain an entry for each entry in clock-names.
+ See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - ipg
-The gpc node is a power-controller as documented by the generic power domain
-bindings in Documentation/devicetree/bindings/power/power_domain.txt.
+The power domains are generic power domain providers as documented in
+Documentation/devicetree/bindings/power/power_domain.txt. They are described as
+subnodes of the power gating controller 'pgc' node of the GPC and should
+contain the following:
+
+Required properties:
+- reg: Must contain the DOMAIN_INDEX of this power domain
+ The following DOMAIN_INDEX values are valid for i.MX6Q:
+ ARM_DOMAIN 0
+ PU_DOMAIN 1
+ The following additional DOMAIN_INDEX value is valid for i.MX6SL:
+ DISPLAY_DOMAIN 2
+
+- #power-domain-cells: Should be 0
+
+Optional properties:
+- clocks: a number of phandles to clocks that need to be enabled during domain
+ power-up sequencing to ensure reset propagation into devices located inside
+ this power domain
+- power-supply: a phandle to the regulator powering this domain
Example:
@@ -25,14 +45,30 @@ Example:
reg = <0x020dc000 0x4000>;
interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
<0 90 IRQ_TYPE_LEVEL_HIGH>;
- pu-supply = <&reg_pu>;
- clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
- <&clks IMX6QDL_CLK_GPU3D_SHADER>,
- <&clks IMX6QDL_CLK_GPU2D_CORE>,
- <&clks IMX6QDL_CLK_GPU2D_AXI>,
- <&clks IMX6QDL_CLK_OPENVG_AXI>,
- <&clks IMX6QDL_CLK_VPU_AXI>;
- #power-domain-cells = <1>;
+ clocks = <&clks IMX6QDL_CLK_IPG>;
+ clock-names = "ipg";
+
+ pgc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@0 {
+ reg = <0>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_pu: power-domain@1 {
+ reg = <1>;
+ #power-domain-cells = <0>;
+ power-supply = <&reg_pu>;
+ clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+ <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+ <&clks IMX6QDL_CLK_GPU2D_CORE>,
+ <&clks IMX6QDL_CLK_GPU2D_AXI>,
+ <&clks IMX6QDL_CLK_OPENVG_AXI>,
+ <&clks IMX6QDL_CLK_VPU_AXI>;
+ };
+ };
};
@@ -40,20 +76,13 @@ Specifying power domain for IP modules
======================================
IP cores belonging to a power domain should contain a 'power-domains' property
-that is a phandle pointing to the gpc device node and a DOMAIN_INDEX specifying
-the power domain the device belongs to.
+that is a phandle pointing to the power domain the device belongs to.
Example of a device that is part of the PU power domain:
vpu: vpu@02040000 {
reg = <0x02040000 0x3c000>;
/* ... */
- power-domains = <&gpc 1>;
+ power-domains = <&pd_pu>;
/* ... */
};
-
-The following DOMAIN_INDEX values are valid for i.MX6Q:
-ARM_DOMAIN 0
-PU_DOMAIN 1
-The following additional DOMAIN_INDEX value is valid for i.MX6SL:
-DISPLAY_DOMAIN 2
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
new file mode 100644
index 000000000000..02f45c65fd87
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
@@ -0,0 +1,71 @@
+Freescale i.MX General Power Controller v2
+==========================================
+
+The i.MX7S/D General Power Control (GPC) block contains Power Gating
+Control (PGC) for various power domains.
+
+Required properties:
+
+- compatible: Should be "fsl,imx7d-gpc"
+
+- reg: should be register base and length as documented in the
+ datasheet
+
+- interrupts: Should contain GPC interrupt request 1
+
+Power domains contained within GPC node are generic power domain
+providers, documented in
+Documentation/devicetree/bindings/power/power_domain.txt, which are
+described as subnodes of the power gating controller 'pgc' node,
+which, in turn, is expected to contain the following:
+
+Required properties:
+
+- reg: Power domain index. Valid values are defined in
+ include/dt-bindings/power/imx7-power.h
+
+- #power-domain-cells: Should be 0
+
+Optional properties:
+
+- power-supply: Power supply used to power the domain
+
+Example:
+
+ gpc: gpc@303a0000 {
+ compatible = "fsl,imx7d-gpc";
+ reg = <0x303a0000 0x1000>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&intc>;
+
+ pgc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pgc_pcie_phy: power-domain@3 {
+ #power-domain-cells = <0>;
+
+ reg = <IMX7_POWER_DOMAIN_PCIE_PHY>;
+ power-supply = <&reg_1p0d>;
+ };
+ };
+ };
+
+
+Specifying power domain for IP modules
+======================================
+
+IP cores belonging to a power domain should contain a 'power-domains'
+property that is a phandle for PGC node representing the domain.
+
+Example of a device that is part of the PCIE_PHY power domain:
+
+ pcie: pcie@33800000 {
+ reg = <0x33800000 0x4000>,
+ <0x4ff00000 0x80000>;
+ /* ... */
+ power-domains = <&pgc_pcie_phy>;
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 940707d095cc..14bd9e945ff6 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -81,7 +81,7 @@ Example 3:
child: power-controller@12341000 {
compatible = "foo,power-controller";
reg = <0x12341000 0x1000>;
- power-domains = <&parent 0>;
+ power-domains = <&parent>;
#power-domain-cells = <0>;
domain-idle-states = <&DOMAIN_PWR_DN>;
};
diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
index 02331b904d4e..c8c831d7b0d1 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: should be one of:
- "atmel,at91sam9rl-pwm"
- "atmel,sama5d3-pwm"
+ - "atmel,sama5d2-pwm"
- reg: physical base address and length of the controller's registers
- #pwm-cells: Should be 3. See pwm.txt in this directory for a
description of the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
index b4e73778dda3..c57e11b8d937 100644
--- a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -19,6 +19,19 @@ Required properties:
- reset-names: Must include the following entries:
- pwm
+Optional properties:
+============================
+In some of the interface like PWM based regulator device, it is required
+to configure the pins differently in different states, especially in suspend
+state of the system. The configuration of pin is provided via the pinctrl
+DT node as detailed in the pinctrl DT binding document
+ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+The PWM node will have following optional properties.
+pinctrl-names: Pin state names. Must be "default" and "sleep".
+pinctrl-0: phandle for the default/active state of pin configurations.
+pinctrl-1: phandle for the sleep state of pin configurations.
+
Example:
pwm: pwm@7000a000 {
@@ -29,3 +42,35 @@ Example:
resets = <&tegra_car 17>;
reset-names = "pwm";
};
+
+
+Example with the pin configuration for suspend and resume:
+=========================================================
+Suppose pin PE7 (On Tegra210) interfaced with the regulator device and
+it requires PWM output to be tristated when system enters suspend.
+Following will be DT binding to achieve this:
+
+#include <dt-bindings/pinctrl/pinctrl-tegra.h>
+
+ pinmux@700008d4 {
+ pwm_active_state: pwm_active_state {
+ pe7 {
+ nvidia,pins = "pe7";
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ pwm_sleep_state: pwm_sleep_state {
+ pe7 {
+ nvidia,pins = "pe7";
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ };
+ };
+ };
+
+ pwm@7000a000 {
+ /* Mandatory PWM properties */
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm_active_state>;
+ pinctrl-1 = <&pwm_sleep_state>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
new file mode 100644
index 000000000000..54c59b0560ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -0,0 +1,34 @@
+MediaTek PWM controller
+
+Required properties:
+ - compatible: should be "mediatek,<name>-pwm":
+ - "mediatek,mt7623-pwm": found on mt7623 SoC.
+ - reg: physical base address and length of the controller's registers.
+ - #pwm-cells: must be 2. See pwm.txt in this directory for a description of
+ the cell format.
+ - clocks: phandle and clock specifier of the PWM reference clock.
+ - clock-names: must contain the following:
+ - "top": the top clock generator
+ - "main": clock used by the PWM core
+ - "pwm1-5": the five per PWM clocks
+ - pinctrl-names: Must contain a "default" entry.
+ - pinctrl-0: One property must exist for each entry in pinctrl-names.
+ See pinctrl/pinctrl-bindings.txt for details of the property values.
+
+Example:
+ pwm0: pwm@11006000 {
+ compatible = "mediatek,mt7623-pwm";
+ reg = <0 0x11006000 0 0x1000>;
+ #pwm-cells = <2>;
+ clocks = <&topckgen CLK_TOP_PWM_SEL>,
+ <&pericfg CLK_PERI_PWM>,
+ <&pericfg CLK_PERI_PWM1>,
+ <&pericfg CLK_PERI_PWM2>,
+ <&pericfg CLK_PERI_PWM3>,
+ <&pericfg CLK_PERI_PWM4>,
+ <&pericfg CLK_PERI_PWM5>;
+ clock-names = "top", "main", "pwm1", "pwm2",
+ "pwm3", "pwm4", "pwm5";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins>;
+ };
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
new file mode 100644
index 000000000000..5e1afc3d8480
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
@@ -0,0 +1,47 @@
+Freescale i.MX7 System Reset Controller
+======================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: Should be "fsl,imx7-src", "syscon"
+- reg: should be register base and length as documented in the
+ datasheet
+- interrupts: Should contain SRC interrupt
+- #reset-cells: 1, see below
+
+example:
+
+src: reset-controller@30390000 {
+ compatible = "fsl,imx7d-src", "syscon";
+ reg = <0x30390000 0x2000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+};
+
+
+Specifying reset lines connected to IP modules
+==============================================
+
+The system reset controller can be used to reset various set of
+peripherals. Device nodes that need access to reset lines should
+specify them as a reset phandle in their corresponding node as
+specified in reset.txt.
+
+Example:
+
+ pcie: pcie@33800000 {
+
+ ...
+
+ resets = <&src IMX7_RESET_PCIEPHY>,
+ <&src IMX7_RESET_PCIE_CTRL_APPS_EN>;
+ reset-names = "pciephy", "apps";
+
+ ...
+ };
+
+
+For list of all valid reset indicies see
+<dt-bindings/reset/imx7-reset.h>
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.txt b/Documentation/devicetree/bindings/serial/sprd-uart.txt
index 2aff0f22c9fa..cab40f0f6f49 100644
--- a/Documentation/devicetree/bindings/serial/sprd-uart.txt
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.txt
@@ -1,7 +1,19 @@
* Spreadtrum serial UART
Required properties:
-- compatible: must be "sprd,sc9836-uart"
+- compatible: must be one of:
+ * "sprd,sc9836-uart"
+ * "sprd,sc9860-uart", "sprd,sc9836-uart"
+
- reg: offset and length of the register set for the device
- interrupts: exactly one interrupt specifier
- clocks: phandles to input clocks.
+
+Example:
+ uart0: serial@0 {
+ compatible = "sprd,sc9860-uart",
+ "sprd,sc9836-uart";
+ reg = <0x0 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_26m>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.txt b/Documentation/devicetree/bindings/soc/rockchip/grf.txt
index a0685c209218..cc9f05d3cbc1 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.txt
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.txt
@@ -8,6 +8,8 @@ From RK3368 SoCs, the GRF is divided into two sections,
- SGRF, used for general secure system,
- PMUGRF, used for always on system
+On RK3328 SoCs, the GRF adds a section for USB2PHYGRF,
+
Required Properties:
- compatible: GRF should be one of the following:
@@ -16,6 +18,7 @@ Required Properties:
- "rockchip,rk3188-grf", "syscon": for rk3188
- "rockchip,rk3228-grf", "syscon": for rk3228
- "rockchip,rk3288-grf", "syscon": for rk3288
+ - "rockchip,rk3328-grf", "syscon": for rk3328
- "rockchip,rk3368-grf", "syscon": for rk3368
- "rockchip,rk3399-grf", "syscon": for rk3399
- compatible: PMUGRF should be one of the following:
@@ -23,6 +26,8 @@ Required Properties:
- "rockchip,rk3399-pmugrf", "syscon": for rk3399
- compatible: SGRF should be one of the following
- "rockchip,rk3288-sgrf", "syscon": for rk3288
+- compatible: USB2PHYGRF should be one of the followings
+ - "rockchip,rk3328-usb2phy-grf", "syscon": for rk3328
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
new file mode 100644
index 000000000000..c705db07d820
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
@@ -0,0 +1,57 @@
+Texas Instruments TI-SCI Generic Power Domain
+---------------------------------------------
+
+Some TI SoCs contain a system controller (like the PMMC, etc...) that is
+responsible for controlling the state of the IPs that are present.
+Communication between the host processor running an OS and the system
+controller happens through a protocol known as TI-SCI [1].
+
+[1] Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+PM Domain Node
+==============
+The PM domain node represents the global PM domain managed by the PMMC, which
+in this case is the implementation as documented by the generic PM domain
+bindings in Documentation/devicetree/bindings/power/power_domain.txt. Because
+this relies on the TI SCI protocol to communicate with the PMMC it must be a
+child of the pmmc node.
+
+Required Properties:
+--------------------
+- compatible: should be "ti,sci-pm-domain"
+- #power-domain-cells: Must be 1 so that an id can be provided in each
+ device node.
+
+Example (K2G):
+-------------
+ pmmc: pmmc {
+ compatible = "ti,k2g-sci";
+ ...
+
+ k2g_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <1>;
+ };
+ };
+
+PM Domain Consumers
+===================
+Hardware blocks belonging to a PM domain should contain a "power-domains"
+property that is a phandle pointing to the corresponding PM domain node
+along with an index representing the device id to be passed to the PMMC
+for device control.
+
+Required Properties:
+--------------------
+- power-domains: phandle pointing to the corresponding PM domain node
+ and an ID representing the device.
+
+See dt-bindings/genpd/k2g.h for the list of valid identifiers for k2g.
+
+Example (K2G):
+--------------------
+ uart0: serial@02530c00 {
+ compatible = "ns16550a";
+ ...
+ power-domains = <&k2g_pds K2G_DEV_UART0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index ad10fbe61562..ad10fbe61562 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
index ccb844aba7d4..717c5f656237 100644
--- a/Documentation/devicetree/bindings/usb/da8xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
@@ -18,10 +18,26 @@ Required properties:
- phy-names: Should be "usb-phy"
+ - dmas: specifies the dma channels
+
+ - dma-names: specifies the names of the channels. Use "rxN" for receive
+ and "txN" for transmit endpoints. N specifies the endpoint number.
+
Optional properties:
~~~~~~~~~~~~~~~~~~~~
- vbus-supply: Phandle to a regulator providing the USB bus power.
+DMA
+~~~
+- compatible: ti,da830-cppi41
+- reg: offset and length of the following register spaces: CPPI DMA Controller,
+ CPPI DMA Scheduler, Queue Manager
+- reg-names: "controller", "scheduler", "queuemgr"
+- #dma-cells: should be set to 2. The first number represents the
+ channel number (0 … 3 for endpoints 1 … 4).
+ The second number is 0 for RX and 1 for TX transfers.
+- #dma-channels: should be set to 4 representing the 4 endpoints.
+
Example:
usb_phy: usb-phy {
compatible = "ti,da830-usb-phy";
@@ -30,7 +46,10 @@ Example:
};
usb0: usb@200000 {
compatible = "ti,da830-musb";
- reg = <0x00200000 0x10000>;
+ reg = <0x00200000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupts = <58>;
interrupt-names = "mc";
@@ -39,5 +58,25 @@ Example:
phys = <&usb_phy 0>;
phy-names = "usb-phy";
+ dmas = <&cppi41dma 0 0 &cppi41dma 1 0
+ &cppi41dma 2 0 &cppi41dma 3 0
+ &cppi41dma 0 1 &cppi41dma 1 1
+ &cppi41dma 2 1 &cppi41dma 3 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4",
+ "tx1", "tx2", "tx3", "tx4";
+
status = "okay";
+
+ cppi41dma: dma-controller@201000 {
+ compatible = "ti,da830-cppi41";
+ reg = <0x201000 0x1000
+ 0x202000 0x1000
+ 0x204000 0x4000>;
+ reg-names = "controller", "scheduler", "queuemgr";
+ interrupts = <58>;
+ #dma-cells = <2>;
+ #dma-channels = <4>;
+ };
+
};
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 6c7c2bce6d0c..00bea038639e 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -14,6 +14,10 @@ Required properties:
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
- snps,dwc2: A generic DWC2 USB controller with default parameters.
+ - "st,stm32f4x9-fsotg": The DWC2 USB FS/HS controller instance in STM32F4x9 SoCs
+ configured in FS mode;
+ - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
+ configured in HS mode;
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
- clocks: clock provider specifier
diff --git a/Documentation/devicetree/bindings/usb/ehci-orion.txt b/Documentation/devicetree/bindings/usb/ehci-orion.txt
index 17c3bc858b86..2855bae79fda 100644
--- a/Documentation/devicetree/bindings/usb/ehci-orion.txt
+++ b/Documentation/devicetree/bindings/usb/ehci-orion.txt
@@ -1,7 +1,9 @@
* EHCI controller, Orion Marvell variants
Required properties:
-- compatible: must be "marvell,orion-ehci"
+- compatible: must be one of the following
+ "marvell,orion-ehci"
+ "marvell,armada-3700-ehci"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: The EHCI interrupt
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index bfadeb1c3bab..0a74ab8dfdc2 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -22,6 +22,7 @@ Optional properties:
property is used if any real OTG features(HNP/SRP/ADP)
is enabled, if ADP is required, otg-rev should be
0x0200 or above.
+ - companion: phandle of a companion
- hnp-disable: tells OTG controllers we want to disable OTG HNP, normally HNP
is the basic function of real OTG except you want it
to be a srp-capable only B device.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index bbb8174405bc..12e27844bb7b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -80,6 +80,7 @@ denx Denx Software Engineering
devantech Devantech, Ltd.
digi Digi International Inc.
digilent Diglent, Inc.
+dioo Dioo Microcircuit Co., Ltd
dlg Dialog Semiconductor
dlink D-Link Corporation
dmo Data Modul AG
@@ -103,6 +104,7 @@ ettus NI Ettus Research
eukrea Eukréa Electromatique
everest Everest Semiconductor Co. Ltd.
everspin Everspin Technologies, Inc.
+exar Exar Corporation
excito Excito
ezchip EZchip Semiconductor
faraday Faraday Technology Corporation
@@ -137,6 +139,7 @@ holt Holt Integrated Circuits, Inc.
honeywell Honeywell
hp Hewlett Packard
holtek Holtek Semiconductor, Inc.
+hwacom HwaCom Systems Inc.
i2se I2SE GmbH
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -160,6 +163,7 @@ jedec JEDEC Solid State Technology Association
karo Ka-Ro electronics GmbH
keithkoep Keith & Koep GmbH
keymile Keymile GmbH
+khadas Khadas
kinetic Kinetic Technologies
kosagi Sutajio Ko-Usagi PTE Ltd.
kyo Kyocera Corporation
@@ -169,6 +173,7 @@ lego LEGO Systems A/S
lenovo Lenovo Group Ltd.
lg LG Corporation
licheepi Lichee Pi
+linaro Linaro Limited
linux Linux-specific binding
lltc Linear Technology Corporation
lsi LSI Corp. (LSI Logic)
@@ -214,6 +219,7 @@ newhaven Newhaven Display International
ni National Instruments
nintendo Nintendo
nokia Nokia
+nordic Nordic Semiconductor
nuvoton Nuvoton Technology Corporation
nvd New Vision Display
nvidia NVIDIA
@@ -260,6 +266,7 @@ richtek Richtek Technology Corporation
ricoh Ricoh Co. Ltd.
rikomagic Rikomagic Tech Corp. Ltd
rockchip Fuzhou Rockchip Electronics Co., Ltd
+rohm ROHM Semiconductor Co., Ltd
samsung Samsung Semiconductor
samtec Samtec/Softing company
sandisk Sandisk Corporation
diff --git a/Documentation/devicetree/bindings/watchdog/cortina,gemini-watchdog.txt b/Documentation/devicetree/bindings/watchdog/cortina,gemini-watchdog.txt
new file mode 100644
index 000000000000..bc4b865d178b
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/cortina,gemini-watchdog.txt
@@ -0,0 +1,17 @@
+Cortina Systems Gemini SoC Watchdog
+
+Required properties:
+- compatible : must be "cortina,gemini-watchdog"
+- reg : shall contain base register location and length
+- interrupts : shall contain the interrupt for the watchdog
+
+Optional properties:
+- timeout-sec : the default watchdog timeout in seconds.
+
+Example:
+
+watchdog@41000000 {
+ compatible = "cortina,gemini-watchdog";
+ reg = <0x41000000 0x1000>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/driver-api/vme.rst b/Documentation/driver-api/vme.rst
index 89776fb3c8bd..def139c13410 100644
--- a/Documentation/driver-api/vme.rst
+++ b/Documentation/driver-api/vme.rst
@@ -6,36 +6,15 @@ Driver registration
As with other subsystems within the Linux kernel, VME device drivers register
with the VME subsystem, typically called from the devices init routine. This is
-achieved via a call to the following function:
+achieved via a call to :c:func:`vme_register_driver`.
-.. code-block:: c
-
- int vme_register_driver (struct vme_driver *driver, unsigned int ndevs);
+A pointer to a structure of type :c:type:`struct vme_driver <vme_driver>` must
+be provided to the registration function. Along with the maximum number of
+devices your driver is able to support.
-If driver registration is successful this function returns zero, if an error
-occurred a negative error code will be returned.
-
-A pointer to a structure of type 'vme_driver' must be provided to the
-registration function. Along with ndevs, which is the number of devices your
-driver is able to support. The structure is as follows:
-
-.. code-block:: c
-
- struct vme_driver {
- struct list_head node;
- const char *name;
- int (*match)(struct vme_dev *);
- int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
- void (*shutdown)(void);
- struct device_driver driver;
- struct list_head devices;
- unsigned int ndev;
- };
-
-At the minimum, the '.name', '.match' and '.probe' elements of this structure
-should be correctly set. The '.name' element is a pointer to a string holding
-the device driver's name.
+At the minimum, the '.name', '.match' and '.probe' elements of
+:c:type:`struct vme_driver <vme_driver>` should be correctly set. The '.name'
+element is a pointer to a string holding the device driver's name.
The '.match' function allows control over which VME devices should be registered
with the driver. The match function should return 1 if a device should be
@@ -54,29 +33,16 @@ the number of devices probed to one:
}
The '.probe' element should contain a pointer to the probe routine. The
-probe routine is passed a 'struct vme_dev' pointer as an argument. The
-'struct vme_dev' structure looks like the following:
-
-.. code-block:: c
-
- struct vme_dev {
- int num;
- struct vme_bridge *bridge;
- struct device dev;
- struct list_head drv_list;
- struct list_head bridge_list;
- };
+probe routine is passed a :c:type:`struct vme_dev <vme_dev>` pointer as an
+argument.
Here, the 'num' field refers to the sequential device ID for this specific
driver. The bridge number (or bus number) can be accessed using
dev->bridge->num.
-A function is also provided to unregister the driver from the VME core and is
-usually called from the device driver's exit routine:
-
-.. code-block:: c
-
- void vme_unregister_driver (struct vme_driver *driver);
+A function is also provided to unregister the driver from the VME core called
+:c:func:`vme_unregister_driver` and should usually be called from the device
+driver's exit routine.
Resource management
@@ -90,47 +56,29 @@ driver is called. The probe routine is passed a pointer to the devices
device structure. This pointer should be saved, it will be required for
requesting VME resources.
-The driver can request ownership of one or more master windows, slave windows
-and/or dma channels. Rather than allowing the device driver to request a
-specific window or DMA channel (which may be used by a different driver) this
-driver allows a resource to be assigned based on the required attributes of the
-driver in question:
-
-.. code-block:: c
-
- struct vme_resource * vme_master_request(struct vme_dev *dev,
- u32 aspace, u32 cycle, u32 width);
-
- struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
- u32 cycle);
-
- struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
-
-For slave windows these attributes are split into the VME address spaces that
-need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
-Master windows add a further set of attributes in 'width' specifying the
-required data transfer widths. These attributes are defined as bitmasks and as
-such any combination of the attributes can be requested for a single window,
-the core will assign a window that meets the requirements, returning a pointer
-of type vme_resource that should be used to identify the allocated resource
-when it is used. For DMA controllers, the request function requires the
-potential direction of any transfers to be provided in the route attributes.
-This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
-support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
-If an unallocated window fitting the requirements can not be found a NULL
-pointer will be returned.
+The driver can request ownership of one or more master windows
+(:c:func:`vme_master_request`), slave windows (:c:func:`vme_slave_request`)
+and/or dma channels (:c:func:`vme_dma_request`). Rather than allowing the device
+driver to request a specific window or DMA channel (which may be used by a
+different driver) the API allows a resource to be assigned based on the required
+attributes of the driver in question. For slave windows these attributes are
+split into the VME address spaces that need to be accessed in 'aspace' and VME
+bus cycle types required in 'cycle'. Master windows add a further set of
+attributes in 'width' specifying the required data transfer widths. These
+attributes are defined as bitmasks and as such any combination of the
+attributes can be requested for a single window, the core will assign a window
+that meets the requirements, returning a pointer of type vme_resource that
+should be used to identify the allocated resource when it is used. For DMA
+controllers, the request function requires the potential direction of any
+transfers to be provided in the route attributes. This is typically VME-to-MEM
+and/or MEM-to-VME, though some hardware can support VME-to-VME and MEM-to-MEM
+transfers as well as test pattern generation. If an unallocated window fitting
+the requirements can not be found a NULL pointer will be returned.
Functions are also provided to free window allocations once they are no longer
-required. These functions should be passed the pointer to the resource provided
-during resource allocation:
-
-.. code-block:: c
-
- void vme_master_free(struct vme_resource *res);
-
- void vme_slave_free(struct vme_resource *res);
-
- void vme_dma_free(struct vme_resource *res);
+required. These functions (:c:func:`vme_master_free`, :c:func:`vme_slave_free`
+and :c:func:`vme_dma_free`) should be passed the pointer to the resource
+provided during resource allocation.
Master windows
@@ -144,61 +92,22 @@ the underlying chipset. A window must be configured before it can be used.
Master window configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Once a master window has been assigned the following functions can be used to
-configure it and retrieve the current settings:
-
-.. code-block:: c
-
- int vme_master_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size, u32 aspace,
- u32 cycle, u32 width);
-
- int vme_master_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *width);
-
-The address spaces, transfer widths and cycle types are the same as described
+Once a master window has been assigned :c:func:`vme_master_set` can be used to
+configure it and :c:func:`vme_master_get` to retrieve the current settings. The
+address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
For example, only one address space may be specified.
-These functions return 0 on success or an error code should the call fail.
-
Master window access
~~~~~~~~~~~~~~~~~~~~
-The following functions can be used to read from and write to configured master
-windows. These functions return the number of bytes copied:
-
-.. code-block:: c
-
- ssize_t vme_master_read(struct vme_resource *res, void *buf,
- size_t count, loff_t offset);
-
- ssize_t vme_master_write(struct vme_resource *res, void *buf,
- size_t count, loff_t offset);
-
-In addition to simple reads and writes, a function is provided to do a
-read-modify-write transaction. This function returns the original value of the
-VME bus location :
-
-.. code-block:: c
-
- unsigned int vme_master_rmw (struct vme_resource *res,
- unsigned int mask, unsigned int compare, unsigned int swap,
- loff_t offset);
-
-This functions by reading the offset, applying the mask. If the bits selected in
-the mask match with the values of the corresponding bits in the compare field,
-the value of swap is written the specified offset.
-
-Parts of a VME window can be mapped into user space memory using the following
-function:
+The function :c:func:`vme_master_read` can be used to read from and
+:c:func:`vme_master_write` used to write to configured master windows.
-.. code-block:: c
-
- int vme_master_mmap(struct vme_resource *resource,
- struct vm_area_struct *vma)
+In addition to simple reads and writes, :c:func:`vme_master_rmw` is provided to
+do a read-modify-write transaction. Parts of a VME window can also be mapped
+into user space memory using :c:func:`vme_master_mmap`.
Slave windows
@@ -213,41 +122,23 @@ it can be used.
Slave window configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Once a slave window has been assigned the following functions can be used to
-configure it and retrieve the current settings:
-
-.. code-block:: c
-
- int vme_slave_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size,
- dma_addr_t mem, u32 aspace, u32 cycle);
-
- int vme_slave_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size,
- dma_addr_t *mem, u32 *aspace, u32 *cycle);
+Once a slave window has been assigned :c:func:`vme_slave_set` can be used to
+configure it and :c:func:`vme_slave_get` to retrieve the current settings.
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
For example, only one address space may be specified.
-These functions return 0 on success or an error code should the call fail.
-
Slave window buffer allocation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Functions are provided to allow the user to allocate and free a contiguous
-buffers which will be accessible by the VME bridge. These functions do not have
-to be used, other methods can be used to allocate a buffer, though care must be
-taken to ensure that they are contiguous and accessible by the VME bridge:
-
-.. code-block:: c
-
- void * vme_alloc_consistent(struct vme_resource *res, size_t size,
- dma_addr_t *mem);
-
- void vme_free_consistent(struct vme_resource *res, size_t size,
- void *virt, dma_addr_t mem);
+Functions are provided to allow the user to allocate
+(:c:func:`vme_alloc_consistent`) and free (:c:func:`vme_free_consistent`)
+contiguous buffers which will be accessible by the VME bridge. These functions
+do not have to be used, other methods can be used to allocate a buffer, though
+care must be taken to ensure that they are contiguous and accessible by the VME
+bridge.
Slave window access
@@ -269,29 +160,18 @@ executed, reused and destroyed.
List Management
~~~~~~~~~~~~~~~
-The following functions are provided to create and destroy DMA lists. Execution
-of a list will not automatically destroy the list, thus enabling a list to be
-reused for repetitive tasks:
-
-.. code-block:: c
-
- struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
-
- int vme_dma_list_free(struct vme_dma_list *list);
+The function :c:func:`vme_new_dma_list` is provided to create and
+:c:func:`vme_dma_list_free` to destroy DMA lists. Execution of a list will not
+automatically destroy the list, thus enabling a list to be reused for repetitive
+tasks.
List Population
~~~~~~~~~~~~~~~
-An item can be added to a list using the following function ( the source and
+An item can be added to a list using :c:func:`vme_dma_list_add` (the source and
destination attributes need to be created before calling this function, this is
-covered under "Transfer Attributes"):
-
-.. code-block:: c
-
- int vme_dma_list_add(struct vme_dma_list *list,
- struct vme_dma_attr *src, struct vme_dma_attr *dest,
- size_t count);
+covered under "Transfer Attributes").
.. note::
@@ -310,41 +190,19 @@ an item to a list. This is due to the diverse attributes required for each type
of source and destination. There are functions to create attributes for PCI, VME
and pattern sources and destinations (where appropriate):
-Pattern source:
-
-.. code-block:: c
-
- struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
-
-PCI source or destination:
-
-.. code-block:: c
-
- struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
-
-VME source or destination:
+ - PCI source or destination: :c:func:`vme_dma_pci_attribute`
+ - VME source or destination: :c:func:`vme_dma_vme_attribute`
+ - Pattern source: :c:func:`vme_dma_pattern_attribute`
-.. code-block:: c
-
- struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
- u32 aspace, u32 cycle, u32 width);
-
-The following function should be used to free an attribute:
-
-.. code-block:: c
-
- void vme_dma_free_attribute(struct vme_dma_attr *attr);
+The function :c:func:`vme_dma_free_attribute` should be used to free an
+attribute.
List Execution
~~~~~~~~~~~~~~
-The following function queues a list for execution. The function will return
-once the list has been executed:
-
-.. code-block:: c
-
- int vme_dma_list_exec(struct vme_dma_list *list);
+The function :c:func:`vme_dma_list_exec` queues a list for execution and will
+return once the list has been executed.
Interrupts
@@ -358,20 +216,13 @@ specific VME level and status IDs.
Attaching Interrupt Handlers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The following functions can be used to attach and free a specific VME level and
-status ID combination. Any given combination can only be assigned a single
-callback function. A void pointer parameter is provided, the value of which is
-passed to the callback function, the use of this pointer is user undefined:
-
-.. code-block:: c
-
- int vme_irq_request(struct vme_dev *dev, int level, int statid,
- void (*callback)(int, int, void *), void *priv);
-
- void vme_irq_free(struct vme_dev *dev, int level, int statid);
-
-The callback parameters are as follows. Care must be taken in writing a callback
-function, callback functions run in interrupt context:
+The function :c:func:`vme_irq_request` can be used to attach and
+:c:func:`vme_irq_free` to free a specific VME level and status ID combination.
+Any given combination can only be assigned a single callback function. A void
+pointer parameter is provided, the value of which is passed to the callback
+function, the use of this pointer is user undefined. The callback parameters are
+as follows. Care must be taken in writing a callback function, callback
+functions run in interrupt context:
.. code-block:: c
@@ -381,12 +232,8 @@ function, callback functions run in interrupt context:
Interrupt Generation
~~~~~~~~~~~~~~~~~~~~
-The following function can be used to generate a VME interrupt at a given VME
-level and VME status ID:
-
-.. code-block:: c
-
- int vme_irq_generate(struct vme_dev *dev, int level, int statid);
+The function :c:func:`vme_irq_generate` can be used to generate a VME interrupt
+at a given VME level and VME status ID.
Location monitors
@@ -399,54 +246,29 @@ monitor.
Location Monitor Management
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The following functions are provided to request the use of a block of location
-monitors and to free them after they are no longer required:
-
-.. code-block:: c
-
- struct vme_resource * vme_lm_request(struct vme_dev *dev);
-
- void vme_lm_free(struct vme_resource * res);
-
-Each block may provide a number of location monitors, monitoring adjacent
-locations. The following function can be used to determine how many locations
-are provided:
-
-.. code-block:: c
-
- int vme_lm_count(struct vme_resource * res);
+The function :c:func:`vme_lm_request` is provided to request the use of a block
+of location monitors and :c:func:`vme_lm_free` to free them after they are no
+longer required. Each block may provide a number of location monitors,
+monitoring adjacent locations. The function :c:func:`vme_lm_count` can be used
+to determine how many locations are provided.
Location Monitor Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Once a bank of location monitors has been allocated, the following functions
-are provided to configure the location and mode of the location monitor:
-
-.. code-block:: c
-
- int vme_lm_set(struct vme_resource *res, unsigned long long base,
- u32 aspace, u32 cycle);
-
- int vme_lm_get(struct vme_resource *res, unsigned long long *base,
- u32 *aspace, u32 *cycle);
+Once a bank of location monitors has been allocated, the function
+:c:func:`vme_lm_set` is provided to configure the location and mode of the
+location monitor. The function :c:func:`vme_lm_get` can be used to retrieve
+existing settings.
Location Monitor Use
~~~~~~~~~~~~~~~~~~~~
-The following functions allow a callback to be attached and detached from each
-location monitor location. Each location monitor can monitor a number of
-adjacent locations:
-
-.. code-block:: c
-
- int vme_lm_attach(struct vme_resource *res, int num,
- void (*callback)(void *));
-
- int vme_lm_detach(struct vme_resource *res, int num);
-
-The callback function is declared as follows.
+The function :c:func:`vme_lm_attach` enables a callback to be attached and
+:c:func:`vme_lm_detach` allows on to be detached from each location monitor
+location. Each location monitor can monitor a number of adjacent locations. The
+callback function is declared as follows.
.. code-block:: c
@@ -456,19 +278,20 @@ The callback function is declared as follows.
Slot Detection
--------------
-This function returns the slot ID of the provided bridge.
-
-.. code-block:: c
-
- int vme_slot_num(struct vme_dev *dev);
+The function :c:func:`vme_slot_num` returns the slot ID of the provided bridge.
Bus Detection
-------------
-This function returns the bus ID of the provided bridge.
+The function :c:func:`vme_bus_num` returns the bus ID of the provided bridge.
-.. code-block:: c
- int vme_bus_num(struct vme_dev *dev);
+VME API
+-------
+
+.. kernel-doc:: include/linux/vme.h
+ :internal:
+.. kernel-doc:: drivers/vme/vme.c
+ :export:
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index bf34d5b3a733..e72587fe477d 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -342,8 +342,10 @@ PER-CPU MEM
devm_free_percpu()
PCI
- pcim_enable_device() : after success, all PCI ops become managed
- pcim_pin_device() : keep PCI device enabled after release
+ devm_pci_remap_cfgspace() : ioremap PCI configuration space
+ devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
+ pcim_enable_device() : after success, all PCI ops become managed
+ pcim_pin_device() : keep PCI device enabled after release
PHY
devm_usb_get_phy()
diff --git a/Documentation/extcon/porting-android-switch-class b/Documentation/extcon/porting-android-switch-class
deleted file mode 100644
index 49c81caef84d..000000000000
--- a/Documentation/extcon/porting-android-switch-class
+++ /dev/null
@@ -1,123 +0,0 @@
-
- Staging/Android Switch Class Porting Guide
- (linux/drivers/staging/android/switch)
- (c) Copyright 2012 Samsung Electronics
-
-AUTHORS
-MyungJoo Ham <myungjoo.ham@samsung.com>
-
-/*****************************************************************
- * CHAPTER 1. *
- * PORTING SWITCH CLASS DEVICE DRIVERS *
- *****************************************************************/
-
-****** STEP 1. Basic Functionality
- No extcon extended feature, but switch features only.
-
-- struct switch_dev (fed to switch_dev_register/unregister)
- @name: no change
- @dev: no change
- @index: drop (not used in switch device driver side anyway)
- @state: no change
- If you have used @state with magic numbers, keep it
- at this step.
- @print_name: no change but type change (switch_dev->extcon_dev)
- @print_state: no change but type change (switch_dev->extcon_dev)
-
-- switch_dev_register(sdev, dev)
- => extcon_dev_register(edev)
- : type change (sdev->edev)
- : remove second param('dev'). if edev has parent device, should store
- 'dev' to 'edev.dev.parent' before registering extcon device
-- switch_dev_unregister(sdev)
- => extcon_dev_unregister(edev)
- : no change but type change (sdev->edev)
-- switch_get_state(sdev)
- => extcon_get_state(edev)
- : no change but type change (sdev->edev) and (return: int->u32)
-- switch_set_state(sdev, state)
- => extcon_set_state(edev, state)
- : no change but type change (sdev->edev) and (state: int->u32)
-
-With this changes, the ex-switch extcon class device works as it once
-worked as switch class device. However, it will now have additional
-interfaces (both ABI and in-kernel API) and different ABI locations.
-However, if CONFIG_ANDROID is enabled without CONFIG_ANDROID_SWITCH,
-/sys/class/switch/* will be symbolically linked to /sys/class/extcon/
-so that they are still compatible with legacy userspace processes.
-
-****** STEP 2. Multistate (no more magic numbers in state value)
- Extcon's extended features for switch device drivers with
- complex features usually required magic numbers in state
- value of switch_dev. With extcon, such magic numbers that
- support multiple cables are no more required or supported.
-
- 1. Define cable names at edev->supported_cable.
- 2. (Recommended) remove print_state callback.
- 3. Use extcon_get_cable_state_(edev, index) or
- extcon_get_cable_state(edev, cable_name) instead of
- extcon_get_state(edev) if you intend to get a state of a specific
- cable. Same for set_state. This way, you can remove the usage of
- magic numbers in state value.
- 4. Use extcon_update_state() if you are updating specific bits of
- the state value.
-
-Example: a switch device driver w/ magic numbers for two cables.
- "0x00": no cables connected.
- "0x01": cable 1 connected
- "0x02": cable 2 connected
- "0x03": cable 1 and 2 connected
- 1. edev->supported_cable = {"1", "2", NULL};
- 2. edev->print_state = NULL;
- 3. extcon_get_cable_state_(edev, 0) shows cable 1's state.
- extcon_get_cable_state(edev, "1") shows cable 1's state.
- extcon_set_cable_state_(edev, 1) sets cable 2's state.
- extcon_set_cable_state(edev, "2") sets cable 2's state
- 4. extcon_update_state(edev, 0x01, 0) sets the least bit's 0.
-
-****** STEP 3. Notify other device drivers
-
- You can notify others of the cable attach/detach events with
-notifier chains.
-
- At the side of other device drivers (the extcon device itself
-does not need to get notified of its own events), there are two
-methods to register notifier_block for cable events:
-(a) for a specific cable or (b) for every cable.
-
- (a) extcon_register_interest(obj, extcon_name, cable_name, nb)
- Example: want to get news of "MAX8997_MUIC"'s "USB" cable
-
- obj = kzalloc(sizeof(struct extcon_specific_cable_nb),
- GFP_KERNEL);
- nb->notifier_call = the_callback_to_handle_usb;
-
- extcon_register_intereset(obj, "MAX8997_MUIC", "USB", nb);
-
- (b) extcon_register_notifier(edev, nb)
- Call nb for any changes in edev.
-
- Please note that in order to properly behave with method (a),
-the extcon device driver should support multistate feature (STEP 2).
-
-****** STEP 4. Inter-cable relation (mutually exclusive)
-
- You can provide inter-cable mutually exclusiveness information
-for an extcon device. When cables A and B are declared to be mutually
-exclusive, the two cables cannot be in ATTACHED state simulteneously.
-
-
-/*****************************************************************
- * CHAPTER 2. *
- * PORTING USERSPACE w/ SWITCH CLASS DEVICE SUPPORT *
- *****************************************************************/
-
-****** ABI Location
-
- If "CONFIG_ANDROID" is enabled, /sys/class/switch/* are created
-as symbolic links to /sys/class/extcon/*.
-
- The two files of switch class, name and state, are provided with
-extcon, too. When the multistate support (STEP 2 of CHAPTER 1.) is
-not enabled or print_state callback is supplied, the output of
-state ABI is same with switch class.
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index f9133a921d5a..1e84be3c142e 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -26,7 +26,7 @@
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
- | powerpc: | TODO |
+ | powerpc: | ok |
| s390: | TODO |
| score: | TODO |
| sh: | TODO |
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 634d03e20c2d..c9e884b52698 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -21,12 +21,19 @@ from accessing the corresponding object from the original filesystem.
This is most obvious from the 'st_dev' field returned by stat(2).
While directories will report an st_dev from the overlay-filesystem,
-all non-directory objects will report an st_dev from the lower or
+non-directory objects may report an st_dev from the lower filesystem or
upper filesystem that is providing the object. Similarly st_ino will
only be unique when combined with st_dev, and both of these can change
over the lifetime of a non-directory object. Many applications and
tools ignore these values and will not be affected.
+In the special case of all overlay layers on the same underlying
+filesystem, all objects will report an st_dev from the overlay
+filesystem and st_ino from the underlying filesystem. This will
+make the overlay mount more compliant with filesystem scanners and
+overlay objects will be distinguishable from the corresponding
+objects in the original filesystem.
+
Upper and Lower
---------------
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 6ea1ceda6f52..06f1d64c6f70 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -113,9 +113,18 @@ Supporting PCI access on new platforms
--------------------------------------
In order to support PCI resource mapping as described above, Linux platform
-code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function.
-Platforms are free to only support subsets of the mmap functionality, but
-useful return codes should be provided.
+code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic
+implementation of that functionality. To support the historical interface of
+mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
+
+Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
+implementation of pci_mmap_page_range() instead of defining
+ARCH_GENERIC_PCI_MMAP_RESOURCE.
+
+Platforms which support write-combining maps of PCI resources must define
+arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when
+write-combining is permitted. Platforms which support maps of I/O resources
+define arch_can_pci_mmap_io() similarly.
Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms
wishing to support legacy functionality should define it and provide
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 94dd27ef4a76..f42b90687d40 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -694,8 +694,7 @@ struct address_space_operations {
write_end: After a successful write_begin, and data copy, write_end must
be called. len is the original len passed to write_begin, and copied
- is the amount that was able to be copied (copied == len is always true
- if write_begin was called with the AOP_FLAG_UNINTERRUPTIBLE flag).
+ is the amount that was able to be copied.
The filesystem must take care of unlocking the page and releasing it
refcount, and updating i_size.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 05676fdacfe3..912568baabb9 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -70,6 +70,12 @@ instead of -ENOENT if no GPIO has been assigned to the requested function:
unsigned int index,
enum gpiod_flags flags)
+Note that gpio_get*_optional() functions (and their managed variants), unlike
+the rest of gpiolib API, also return NULL when gpiolib support is disabled.
+This is helpful to driver authors, since they do not need to special case
+-ENOSYS return codes. System integrators should however be careful to enable
+gpiolib on systems that need it.
+
For a function using multiple GPIOs all of those can be obtained with one call:
struct gpio_descs *gpiod_get_array(struct device *dev,
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
index 05a82bdfbca4..fccbe375244d 100644
--- a/Documentation/gpu/introduction.rst
+++ b/Documentation/gpu/introduction.rst
@@ -85,3 +85,14 @@ This means that there's a blackout-period of about one month where feature work
can't be merged. The recommended way to deal with that is having a -next tree
that's always open, but making sure to not feed it into linux-next during the
blackout period. As an example, drm-misc works like that.
+
+Code of Conduct
+---------------
+
+As a freedesktop.org project, dri-devel, and the DRM community, follows the
+Contributor Covenant, found at: https://www.freedesktop.org/wiki/CodeOfConduct
+
+Please conduct yourself in a respectful and civilised manner when
+interacting with community members on mailing lists, IRC, or bug
+trackers. The community represents the project as a whole, and abusive
+or bullying behaviour is not tolerated by the project.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index a77ead911956..1e9fcb4d0ec8 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -191,6 +191,7 @@ Code Seq#(hex) Include File Comments
'W' 00-1F linux/watchdog.h conflict!
'W' 00-1F linux/wanrouter.h conflict! (pre 3.9)
'W' 00-3F sound/asound.h conflict!
+'W' 40-5F drivers/pci/switch/switchtec.c
'X' all fs/xfs/xfs_fs.h conflict!
and fs/xfs/linux-2.6/xfs_ioctl32.h
and include/linux/falloc.h
@@ -308,6 +309,7 @@ Code Seq#(hex) Include File Comments
0xA3 80-8F Port ACL in development:
<mailto:tlewis@mindspring.com>
0xA3 90-9F linux/dtlk.h
+0xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
0xAA 00-3F linux/uapi/linux/userfaultfd.h
0xAB 00-1F linux/nbd.h
0xAC 00-1F linux/raw.h
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index b0eb27b956d9..615434d81108 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -18,7 +18,7 @@ memory image to a dump file on the local disk, or across the network to
a remote system.
Kdump and kexec are currently supported on the x86, x86_64, ppc64, ia64,
-s390x and arm architectures.
+s390x, arm and arm64 architectures.
When the system kernel boots, it reserves a small section of memory for
the dump-capture kernel. This ensures that ongoing Direct Memory Access
@@ -249,6 +249,13 @@ Dump-capture kernel config options (Arch Dependent, arm)
AUTO_ZRELADDR=y
+Dump-capture kernel config options (Arch Dependent, arm64)
+----------------------------------------------------------
+
+- Please note that kvm of the dump-capture kernel will not be enabled
+ on non-VHE systems even if it is configured. This is because the CPU
+ will not be reset to EL2 on panic.
+
Extended crashkernel syntax
===========================
@@ -305,6 +312,8 @@ Boot into System Kernel
kernel will automatically locate the crash kernel image within the
first 512MB of RAM if X is not given.
+ On arm64, use "crashkernel=Y[@X]". Note that the start address of
+ the kernel, X if explicitly specified, must be aligned to 2MiB (0x200000).
Load the Dump-capture Kernel
============================
@@ -327,6 +336,8 @@ For s390x:
- Use image or bzImage
For arm:
- Use zImage
+For arm64:
+ - Use vmlinux or Image
If you are using a uncompressed vmlinux image then use following command
to load dump-capture kernel.
@@ -370,6 +381,9 @@ For s390x:
For arm:
"1 maxcpus=1 reset_devices"
+For arm64:
+ "1 maxcpus=1 reset_devices"
+
Notes on loading the dump-capture kernel:
* By default, the ELF headers are stored in ELF64 format to support
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index ddf85a5dde0c..d26a27ca964d 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -84,6 +84,7 @@ int my_data_handler(void)
task = kthread_run(more_data_handling, data, "more_data_handling");
if (task == ERR_PTR(-ENOMEM)) {
rv = -ENOMEM;
+ kref_put(&data->refcount, data_release);
goto out;
}
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index 81c6d8e93774..7a04c5386dc8 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -27,11 +27,8 @@ HDMI 1.3a specification is sufficient:
http://www.microprocessor.org/HDMISpecification13a.pdf
-The Kernel Interface
-====================
-
-CEC Adapter
------------
+CEC Adapter Interface
+---------------------
The struct cec_adapter represents the CEC adapter hardware. It is created by
calling cec_allocate_adapter() and deleted by calling cec_delete_adapter():
@@ -51,6 +48,7 @@ ops:
priv:
will be stored in adap->priv and can be used by the adapter ops.
+ Use cec_get_drvdata(adap) to get the priv pointer.
name:
the name of the CEC adapter. Note: this name will be copied.
@@ -65,6 +63,10 @@ available_las:
the number of simultaneous logical addresses that this
adapter can handle. Must be 1 <= available_las <= CEC_MAX_LOG_ADDRS.
+To obtain the priv pointer use this helper function:
+
+.. c:function::
+ void *cec_get_drvdata(const struct cec_adapter *adap);
To register the /dev/cecX device node and the remote control device (if
CEC_CAP_RC is set) you call:
diff --git a/Documentation/media/kapi/csi2.rst b/Documentation/media/kapi/csi2.rst
index 2004db00b12b..e33fcb967922 100644
--- a/Documentation/media/kapi/csi2.rst
+++ b/Documentation/media/kapi/csi2.rst
@@ -45,10 +45,11 @@ where
* - bits_per_sample
- Number of bits per sample.
-The transmitter drivers must configure the CSI-2 transmitter to *LP-11
-mode* whenever the transmitter is powered on but not active. Some
-transmitters do this automatically but some have to be explicitly
-programmed to do so.
+The transmitter drivers must, if possible, configure the CSI-2
+transmitter to *LP-11 mode* whenever the transmitter is powered on but
+not active. Some transmitters do this automatically but some have to
+be explicitly programmed to do so, and some are unable to do so
+altogether due to hardware constraints.
Receiver drivers
----------------
diff --git a/Documentation/media/kapi/v4l2-core.rst b/Documentation/media/kapi/v4l2-core.rst
index e9677150ed99..d8f6c46d26d5 100644
--- a/Documentation/media/kapi/v4l2-core.rst
+++ b/Documentation/media/kapi/v4l2-core.rst
@@ -1,4 +1,4 @@
-Video2Linux devices
+Video4Linux devices
-------------------
.. toctree::
diff --git a/Documentation/media/lirc.h.rst.exceptions b/Documentation/media/lirc.h.rst.exceptions
index 246c850151d7..c130617a9986 100644
--- a/Documentation/media/lirc.h.rst.exceptions
+++ b/Documentation/media/lirc.h.rst.exceptions
@@ -35,7 +35,6 @@ ignore define PULSE_MASK
ignore define LIRC_MODE2_SPACE
ignore define LIRC_MODE2_PULSE
-ignore define LIRC_MODE2_TIMEOUT
ignore define LIRC_VALUE_MASK
ignore define LIRC_MODE2_MASK
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index 7dcfd178fb24..22fb6304a2df 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -30,7 +30,7 @@ Arguments
``request``
CEC ioctl request code as defined in the cec.h header file, for
- example :c:func:`CEC_ADAP_G_CAPS`.
+ example :ref:`CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`.
``argp``
Pointer to a request-specific structure.
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index 0304388cd159..18dfb62f2efe 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -33,7 +33,7 @@ Arguments
Open flags. Access mode must be ``O_RDWR``.
When the ``O_NONBLOCK`` flag is given, the
- :ref:`CEC_RECEIVE <CEC_RECEIVE>` and :c:func:`CEC_DQEVENT` ioctls
+ :ref:`CEC_RECEIVE <CEC_RECEIVE>` and :ref:`CEC_DQEVENT <CEC_DQEVENT>` ioctls
will return the ``EAGAIN`` error code when no message or event is available, and
ioctls :ref:`CEC_TRANSMIT <CEC_TRANSMIT>`,
:ref:`CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>` and
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index 6a863cfda6e0..fa0abd8fb160 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -30,7 +30,7 @@ Arguments
List of FD events to be watched
``nfds``
- Number of FD efents at the \*ufds array
+ Number of FD events at the \*ufds array
``timeout``
Timeout to wait for events
@@ -49,7 +49,7 @@ is non-zero). CEC devices set the ``POLLIN`` and ``POLLRDNORM`` flags in
the ``revents`` field if there are messages in the receive queue. If the
transmit queue has room for new messages, the ``POLLOUT`` and
``POLLWRNORM`` flags are set. If there are events in the event queue,
-then the ``POLLPRI`` flag is set. When the function timed out it returns
+then the ``POLLPRI`` flag is set. When the function times out it returns
a value of zero, on failure it returns -1 and the ``errno`` variable is
set appropriately.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 09f09bbe28d4..fcf863ab6f43 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -351,3 +351,16 @@ On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
+The :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>` can return the following
+error codes:
+
+ENOTTY
+ The ``CEC_CAP_LOG_ADDRS`` capability wasn't set, so this ioctl is not supported.
+
+EBUSY
+ The CEC adapter is currently configuring itself, or it is already configured and
+ ``num_log_addrs`` is non-zero, or another filehandle is in exclusive follower or
+ initiator mode, or the filehandle is in mode ``CEC_MODE_NO_INITIATOR``.
+
+EINVAL
+ The contents of struct :c:type:`cec_log_addrs` is invalid.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
index a3cdc75cec3e..9e49d4be35d5 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
@@ -78,3 +78,16 @@ Return Value
On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
+
+The :ref:`ioctl CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>` can return the following
+error codes:
+
+ENOTTY
+ The ``CEC_CAP_PHYS_ADDR`` capability wasn't set, so this ioctl is not supported.
+
+EBUSY
+ Another filehandle is in exclusive follower or initiator mode, or the filehandle
+ is in mode ``CEC_MODE_NO_INITIATOR``.
+
+EINVAL
+ The physical address is malformed.
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 6e589a1fae17..4d3570c2e0b3 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -56,7 +56,7 @@ it is guaranteed that the state did change in between the two events.
* - __u16
- ``phys_addr``
- The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
- valid physical address is set.
+ valid physical address is set.
* - __u16
- ``log_addr_mask``
- The current set of claimed logical addresses. This is 0 if no logical
@@ -174,3 +174,14 @@ Return Value
On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
+
+The :ref:`ioctl CEC_DQEVENT <CEC_DQEVENT>` can return the following
+error codes:
+
+EAGAIN
+ This is returned when the filehandle is in non-blocking mode and there
+ are no pending events.
+
+ERESTARTSYS
+ An interrupt (e.g. Ctrl-C) arrived while in blocking mode waiting for
+ events to arrive.
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index e4ded9df0a84..664f0d47bbcd 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -249,3 +249,15 @@ Return Value
On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
+
+The :ref:`ioctl CEC_S_MODE <CEC_S_MODE>` can return the following
+error codes:
+
+EINVAL
+ The requested mode is invalid.
+
+EPERM
+ Monitor mode is requested without having root permissions
+
+EBUSY
+ Someone else is already an exclusive follower or initiator.
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index dc2adb391c0a..267044f7ac30 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -51,13 +51,13 @@ A received message can be:
be non-zero).
To send a CEC message the application has to fill in the struct
-:c:type:` cec_msg` and pass it to :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`.
+:c:type:`cec_msg` and pass it to :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`.
The :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` is only available if
``CEC_CAP_TRANSMIT`` is set. If there is no more room in the transmit
queue, then it will return -1 and set errno to the ``EBUSY`` error code.
The transmit queue has enough room for 18 messages (about 1 second worth
of 2-byte messages). Note that the CEC kernel framework will also reply
-to core messages (see :ref:cec-core-processing), so it is not a good
+to core messages (see :ref:`cec-core-processing`), so it is not a good
idea to fully fill up the transmit queue.
If the file descriptor is in non-blocking mode then the transmit will
@@ -69,6 +69,18 @@ The ``sequence`` field is filled in for every transmit and this can be
checked against the received messages to find the corresponding transmit
result.
+Normally calling :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` when the physical
+address is invalid (due to e.g. a disconnect) will return ``ENONET``.
+
+However, the CEC specification allows sending messages from 'Unregistered' to
+'TV' when the physical address is invalid since some TVs pull the hotplug detect
+pin of the HDMI connector low when they go into standby, or when switching to
+another input.
+
+When the hotplug detect pin goes low the EDID disappears, and thus the
+physical address, but the cable is still connected and CEC still works.
+In order to detect/wake up the device it is allowed to send poll and 'Image/Text
+View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{13.0cm}|
@@ -289,3 +301,42 @@ Return Value
On success 0 is returned, on error -1 and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
+
+The :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>` can return the following
+error codes:
+
+EAGAIN
+ No messages are in the receive queue, and the filehandle is in non-blocking mode.
+
+ETIMEDOUT
+ The ``timeout`` was reached while waiting for a message.
+
+ERESTARTSYS
+ The wait for a message was interrupted (e.g. by Ctrl-C).
+
+The :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` can return the following
+error codes:
+
+ENOTTY
+ The ``CEC_CAP_TRANSMIT`` capability wasn't set, so this ioctl is not supported.
+
+EPERM
+ The CEC adapter is not configured, i.e. :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
+ has never been called.
+
+ENONET
+ The CEC adapter is not configured, i.e. :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
+ was called, but the physical address is invalid so no logical address was claimed.
+ An exception is made in this case for transmits from initiator 0xf ('Unregistered')
+ to destination 0 ('TV'). In that case the transmit will proceed as usual.
+
+EBUSY
+ Another filehandle is in exclusive follower or initiator mode, or the filehandle
+ is in mode ``CEC_MODE_NO_INITIATOR``. This is also returned if the transmit
+ queue is full.
+
+EINVAL
+ The contents of struct :c:type:`cec_msg` is invalid.
+
+ERESTARTSYS
+ The wait for a successful transmit was interrupted (e.g. by Ctrl-C).
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 3e03dc2e6003..2a5164aea2b4 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -284,7 +284,8 @@ Types and flags used to represent the media graph elements
supported scaling ratios is entity-specific and can differ
between the horizontal and vertical directions (in particular
scaling can be supported in one direction only). Binning and
- skipping are considered as scaling.
+ sub-sampling (occasionally also referred to as skipping) are
+ considered as scaling.
- .. row 28
diff --git a/Documentation/media/uapi/rc/lirc-dev-intro.rst b/Documentation/media/uapi/rc/lirc-dev-intro.rst
index ef97e40f2fd8..d1936eeb9ce0 100644
--- a/Documentation/media/uapi/rc/lirc-dev-intro.rst
+++ b/Documentation/media/uapi/rc/lirc-dev-intro.rst
@@ -27,6 +27,8 @@ What you should see for a chardev:
$ ls -l /dev/lirc*
crw-rw---- 1 root root 248, 0 Jul 2 22:20 /dev/lirc0
+.. _lirc_modes:
+
**********
LIRC modes
**********
@@ -38,25 +40,62 @@ on the following table.
``LIRC_MODE_MODE2``
- The driver returns a sequence of pulse and space codes to userspace.
+ The driver returns a sequence of pulse and space codes to userspace,
+ as a series of u32 values.
This mode is used only for IR receive.
+ The upper 8 bits determine the packet type, and the lower 24 bits
+ the payload. Use ``LIRC_VALUE()`` macro to get the payload, and
+ the macro ``LIRC_MODE2()`` will give you the type, which
+ is one of:
+
+ ``LIRC_MODE2_PULSE``
+
+ Signifies the presence of IR in microseconds.
+
+ ``LIRC_MODE2_SPACE``
+
+ Signifies absence of IR in microseconds.
+
+ ``LIRC_MODE2_FREQUENCY``
+
+ If measurement of the carrier frequency was enabled with
+ :ref:`lirc_set_measure_carrier_mode` then this packet gives you
+ the carrier frequency in Hertz.
+
+ ``LIRC_MODE2_TIMEOUT``
+
+ If timeout reports are enabled with
+ :ref:`lirc_set_rec_timeout_reports`, when the timeout set with
+ :ref:`lirc_set_rec_timeout` expires due to no IR being detected,
+ this packet will be sent, with the number of microseconds with
+ no IR.
+
.. _lirc-mode-lirccode:
``LIRC_MODE_LIRCCODE``
- The IR signal is decoded internally by the receiver. The LIRC interface
- returns the scancode as an integer value. This is the usual mode used
- by several TV media cards.
+ This mode can be used for IR receive and send.
- This mode is used only for IR receive.
+ The IR signal is decoded internally by the receiver, or encoded by the
+ transmitter. The LIRC interface represents the scancode as byte string,
+ which might not be a u32, it can be any length. The value is entirely
+ driver dependent. This mode is used by some older lirc drivers.
+
+ The length of each code depends on the driver, which can be retrieved
+ with :ref:`lirc_get_length`. This length is used both
+ for transmitting and receiving IR.
.. _lirc-mode-pulse:
``LIRC_MODE_PULSE``
- On puse mode, a sequence of pulse/space integer values are written to the
- lirc device using :Ref:`lirc-write`.
+ In pulse mode, a sequence of pulse/space integer values are written to the
+ lirc device using :ref:`lirc-write`.
+
+ The values are alternating pulse and space lengths, in microseconds. The
+ first and last entry must be a pulse, so there must be an odd number
+ of entries.
This mode is used only for IR send.
diff --git a/Documentation/media/uapi/rc/lirc-get-features.rst b/Documentation/media/uapi/rc/lirc-get-features.rst
index 79e07b4d44d6..64f89a4f9d9c 100644
--- a/Documentation/media/uapi/rc/lirc-get-features.rst
+++ b/Documentation/media/uapi/rc/lirc-get-features.rst
@@ -48,8 +48,8 @@ LIRC features
``LIRC_CAN_REC_PULSE``
- The driver is capable of receiving using
- :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>`.
+ Unused. Kept just to avoid breaking uAPI.
+ :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>` can only be used for transmitting.
.. _LIRC-CAN-REC-MODE2:
@@ -156,19 +156,22 @@ LIRC features
``LIRC_CAN_SEND_PULSE``
- The driver supports sending using :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>`.
+ The driver supports sending (also called as IR blasting or IR TX) using
+ :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>`.
.. _LIRC-CAN-SEND-MODE2:
``LIRC_CAN_SEND_MODE2``
- The driver supports sending using :ref:`LIRC_MODE_MODE2 <lirc-mode-mode2>`.
+ Unused. Kept just to avoid breaking uAPI.
+ :ref:`LIRC_MODE_MODE2 <lirc-mode-mode2>` can only be used for receiving.
.. _LIRC-CAN-SEND-LIRCCODE:
``LIRC_CAN_SEND_LIRCCODE``
- The driver supports sending codes (also called as IR blasting or IR TX).
+ The driver supports sending (also called as IR blasting or IR TX) using
+ :ref:`LIRC_MODE_LIRCCODE <lirc-mode-LIRCCODE>`.
Return Value
diff --git a/Documentation/media/uapi/rc/lirc-get-length.rst b/Documentation/media/uapi/rc/lirc-get-length.rst
index 8c2747c8d2c9..3990af5de0e9 100644
--- a/Documentation/media/uapi/rc/lirc-get-length.rst
+++ b/Documentation/media/uapi/rc/lirc-get-length.rst
@@ -30,7 +30,8 @@ Arguments
Description
===========
-Retrieves the code length in bits (only for ``LIRC-MODE-LIRCCODE``).
+Retrieves the code length in bits (only for
+:ref:`LIRC_MODE_LIRCCODE <lirc-mode-lirccode>`).
Reads on the device must be done in blocks matching the bit count.
The bit could should be rounded up so that it matches full bytes.
diff --git a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
index a5023e0194c1..a4eb6c0a26e9 100644
--- a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
@@ -35,8 +35,8 @@ Description
Get/set supported receive modes. Only :ref:`LIRC_MODE_MODE2 <lirc-mode-mode2>`
and :ref:`LIRC_MODE_LIRCCODE <lirc-mode-lirccode>` are supported for IR
-receive.
-
+receive. Use :ref:`lirc_get_features` to find out which modes the driver
+supports.
Return Value
============
diff --git a/Documentation/media/uapi/rc/lirc-get-send-mode.rst b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
index 51ac13428969..a169b234290e 100644
--- a/Documentation/media/uapi/rc/lirc-get-send-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
@@ -34,9 +34,12 @@ Arguments
Description
===========
-Get/set supported transmit mode.
+Get/set current transmit mode.
-Only :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>` is supported by for IR send.
+Only :ref:`LIRC_MODE_PULSE <lirc-mode-pulse>` and
+:ref:`LIRC_MODE_LIRCCODE <lirc-mode-lirccode>` is supported by for IR send,
+depending on the driver. Use :ref:`lirc_get_features` to find out which
+modes the driver supports.
Return Value
============
diff --git a/Documentation/media/uapi/rc/lirc-read.rst b/Documentation/media/uapi/rc/lirc-read.rst
index 4c678f60e872..ff14a69104e5 100644
--- a/Documentation/media/uapi/rc/lirc-read.rst
+++ b/Documentation/media/uapi/rc/lirc-read.rst
@@ -44,17 +44,13 @@ descriptor ``fd`` into the buffer starting at ``buf``. If ``count`` is zero,
:ref:`read() <lirc-read>` returns zero and has no other results. If ``count``
is greater than ``SSIZE_MAX``, the result is unspecified.
-The lircd userspace daemon reads raw IR data from the LIRC chardev. The
-exact format of the data depends on what modes a driver supports, and
-what mode has been selected. lircd obtains supported modes and sets the
-active mode via the ioctl interface, detailed at :ref:`lirc_func`.
-The generally preferred mode for receive is
-:ref:`LIRC_MODE_MODE2 <lirc-mode-mode2>`, in which packets containing an
-int value describing an IR signal are read from the chardev.
+The exact format of the data depends on what :ref:`lirc_modes` a driver
+uses. Use :ref:`lirc_get_features` to get the supported mode.
-See also
-`http://www.lirc.org/html/technical.html <http://www.lirc.org/html/technical.html>`__
-for more info.
+The generally preferred mode for receive is
+:ref:`LIRC_MODE_MODE2 <lirc-mode-mode2>`,
+in which packets containing an int value describing an IR signal are
+read from the chardev.
Return Value
============
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
index a83fbbfa0d3b..a89246806c4b 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
@@ -9,7 +9,7 @@ ioctl LIRC_SET_REC_CARRIER_RANGE
Name
====
-LIRC_SET_REC_CARRIER_RANGE - Set lower bond of the carrier used to modulate
+LIRC_SET_REC_CARRIER_RANGE - Set lower bound of the carrier used to modulate
IR receive.
Synopsis
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
index 9c501bbf4c62..86353e602695 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
@@ -31,6 +31,8 @@ Arguments
Description
===========
+.. _lirc-mode2-timeout:
+
Enable or disable timeout reports for IR receive. By default, timeout reports
should be turned off.
diff --git a/Documentation/media/uapi/rc/lirc-write.rst b/Documentation/media/uapi/rc/lirc-write.rst
index 3b035c6613b1..2aad0fef4a5b 100644
--- a/Documentation/media/uapi/rc/lirc-write.rst
+++ b/Documentation/media/uapi/rc/lirc-write.rst
@@ -42,13 +42,16 @@ Description
referenced by the file descriptor ``fd`` from the buffer starting at
``buf``.
-The data written to the chardev is a pulse/space sequence of integer
-values. Pulses and spaces are only marked implicitly by their position.
-The data must start and end with a pulse, therefore, the data must
-always include an uneven number of samples. The write function must
-block until the data has been transmitted by the hardware. If more data
-is provided than the hardware can send, the driver returns ``EINVAL``.
-
+The exact format of the data depends on what mode a driver uses, use
+:ref:`lirc_get_features` to get the supported mode.
+
+When in :ref:`LIRC_MODE_PULSE <lirc-mode-PULSE>` mode, the data written to
+the chardev is a pulse/space sequence of integer values. Pulses and spaces
+are only marked implicitly by their position. The data must start and end
+with a pulse, therefore, the data must always include an uneven number of
+samples. The write function must block until the data has been transmitted
+by the hardware. If more data is provided than the hardware can send, the
+driver returns ``EINVAL``.
Return Value
============
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index ac58966ccb9b..ae6ee73f151c 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -34,6 +34,125 @@ flags are copied from the OUTPUT video buffer to the CAPTURE video
buffer.
+Interactions between formats, controls and buffers
+==================================================
+
+V4L2 exposes parameters that influence the buffer size, or the way data is
+laid out in the buffer. Those parameters are exposed through both formats and
+controls. One example of such a control is the ``V4L2_CID_ROTATE`` control
+that modifies the direction in which pixels are stored in the buffer, as well
+as the buffer size when the selected format includes padding at the end of
+lines.
+
+The set of information needed to interpret the content of a buffer (e.g. the
+pixel format, the line stride, the tiling orientation or the rotation) is
+collectively referred to in the rest of this section as the buffer layout.
+
+Controls that can modify the buffer layout shall set the
+``V4L2_CTRL_FLAG_MODIFY_LAYOUT`` flag.
+
+Modifying formats or controls that influence the buffer size or layout require
+the stream to be stopped. Any attempt at such a modification while the stream
+is active shall cause the ioctl setting the format or the control to return
+the ``EBUSY`` error code. In that case drivers shall also set the
+``V4L2_CTRL_FLAG_GRABBED`` flag when calling
+:c:func:`VIDIOC_QUERYCTRL` or :c:func:`VIDIOC_QUERY_EXT_CTRL` for such a
+control while the stream is active.
+
+.. note::
+
+ The :c:func:`VIDIOC_S_SELECTION` ioctl can, depending on the hardware (for
+ instance if the device doesn't include a scaler), modify the format in
+ addition to the selection rectangle. Similarly, the
+ :c:func:`VIDIOC_S_INPUT`, :c:func:`VIDIOC_S_OUTPUT`, :c:func:`VIDIOC_S_STD`
+ and :c:func:`VIDIOC_S_DV_TIMINGS` ioctls can also modify the format and
+ selection rectangles. When those ioctls result in a buffer size or layout
+ change, drivers shall handle that condition as they would handle it in the
+ :c:func:`VIDIOC_S_FMT` ioctl in all cases described in this section.
+
+Controls that only influence the buffer layout can be modified at any time
+when the stream is stopped. As they don't influence the buffer size, no
+special handling is needed to synchronize those controls with buffer
+allocation and the ``V4L2_CTRL_FLAG_GRABBED`` flag is cleared once the
+stream is stopped.
+
+Formats and controls that influence the buffer size interact with buffer
+allocation. The simplest way to handle this is for drivers to always require
+buffers to be reallocated in order to change those formats or controls. In
+that case, to perform such changes, userspace applications shall first stop
+the video stream with the :c:func:`VIDIOC_STREAMOFF` ioctl if it is running
+and free all buffers with the :c:func:`VIDIOC_REQBUFS` ioctl if they are
+allocated. After freeing all buffers the ``V4L2_CTRL_FLAG_GRABBED`` flag
+for controls is cleared. The format or controls can then be modified, and
+buffers shall then be reallocated and the stream restarted. A typical ioctl
+sequence is
+
+ #. VIDIOC_STREAMOFF
+ #. VIDIOC_REQBUFS(0)
+ #. VIDIOC_S_EXT_CTRLS
+ #. VIDIOC_S_FMT
+ #. VIDIOC_REQBUFS(n)
+ #. VIDIOC_QBUF
+ #. VIDIOC_STREAMON
+
+The second :c:func:`VIDIOC_REQBUFS` call will take the new format and control
+value into account to compute the buffer size to allocate. Applications can
+also retrieve the size by calling the :c:func:`VIDIOC_G_FMT` ioctl if needed.
+
+.. note::
+
+ The API doesn't mandate the above order for control (3.) and format (4.)
+ changes. Format and controls can be set in a different order, or even
+ interleaved, depending on the device and use case. For instance some
+ controls might behave differently for different pixel formats, in which
+ case the format might need to be set first.
+
+When reallocation is required, any attempt to modify format or controls that
+influences the buffer size while buffers are allocated shall cause the format
+or control set ioctl to return the ``EBUSY`` error. Any attempt to queue a
+buffer too small for the current format or controls shall cause the
+:c:func:`VIDIOC_QBUF` ioctl to return a ``EINVAL`` error.
+
+Buffer reallocation is an expensive operation. To avoid that cost, drivers can
+(and are encouraged to) allow format or controls that influence the buffer
+size to be changed with buffers allocated. In that case, a typical ioctl
+sequence to modify format and controls is
+
+ #. VIDIOC_STREAMOFF
+ #. VIDIOC_S_EXT_CTRLS
+ #. VIDIOC_S_FMT
+ #. VIDIOC_QBUF
+ #. VIDIOC_STREAMON
+
+For this sequence to operate correctly, queued buffers need to be large enough
+for the new format or controls. Drivers shall return a ``ENOSPC`` error in
+response to format change (:c:func:`VIDIOC_S_FMT`) or control changes
+(:c:func:`VIDIOC_S_CTRL` or :c:func:`VIDIOC_S_EXT_CTRLS`) if buffers too small
+for the new format are currently queued. As a simplification, drivers are
+allowed to return a ``EBUSY`` error from these ioctls if any buffer is
+currently queued, without checking the queued buffers sizes.
+
+Additionally, drivers shall return a ``EINVAL`` error from the
+:c:func:`VIDIOC_QBUF` ioctl if the buffer being queued is too small for the
+current format or controls. Together, these requirements ensure that queued
+buffers will always be large enough for the configured format and controls.
+
+Userspace applications can query the buffer size required for a given format
+and controls by first setting the desired control values and then trying the
+desired format. The :c:func:`VIDIOC_TRY_FMT` ioctl will return the required
+buffer size.
+
+ #. VIDIOC_S_EXT_CTRLS(x)
+ #. VIDIOC_TRY_FMT()
+ #. VIDIOC_S_EXT_CTRLS(y)
+ #. VIDIOC_TRY_FMT()
+
+The :c:func:`VIDIOC_CREATE_BUFS` ioctl can then be used to allocate buffers
+based on the queried sizes (for instance by allocating a set of buffers large
+enough for all the desired formats and controls, or by allocating separate set
+of appropriately sized buffers for each use case).
+
+
.. c:type:: v4l2_buffer
struct v4l2_buffer
@@ -330,6 +449,9 @@ enum v4l2_buf_type
- 12
- Buffer for Software Defined Radio (SDR) output stream, see
:ref:`sdr`.
+ * - ``V4L2_BUF_TYPE_META_CAPTURE``
+ - 13
+ - Buffer for metadata capture, see :ref:`metadata`.
diff --git a/Documentation/media/uapi/v4l/depth-formats.rst b/Documentation/media/uapi/v4l/depth-formats.rst
index 82f183870aae..d1641e9687a6 100644
--- a/Documentation/media/uapi/v4l/depth-formats.rst
+++ b/Documentation/media/uapi/v4l/depth-formats.rst
@@ -12,4 +12,5 @@ Depth data provides distance to points, mapped onto the image plane
.. toctree::
:maxdepth: 1
+ pixfmt-inzi
pixfmt-z16
diff --git a/Documentation/media/uapi/v4l/dev-capture.rst b/Documentation/media/uapi/v4l/dev-capture.rst
index 32b32055d070..4218742ab5d9 100644
--- a/Documentation/media/uapi/v4l/dev-capture.rst
+++ b/Documentation/media/uapi/v4l/dev-capture.rst
@@ -42,8 +42,8 @@ Video capture devices shall support :ref:`audio input <audio>`,
:ref:`tuner`, :ref:`controls <control>`,
:ref:`cropping and scaling <crop>` and
:ref:`streaming parameter <streaming-par>` ioctls as needed. The
-:ref:`video input <video>` and :ref:`video standard <standard>`
-ioctls must be supported by all video capture devices.
+:ref:`video input <video>` ioctls must be supported by all video
+capture devices.
Image Format Negotiation
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
new file mode 100644
index 000000000000..62518adfe37b
--- /dev/null
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -0,0 +1,58 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _metadata:
+
+******************
+Metadata Interface
+******************
+
+Metadata refers to any non-image data that supplements video frames with
+additional information. This may include statistics computed over the image
+or frame capture parameters supplied by the image source. This interface is
+intended for transfer of metadata to userspace and control of that operation.
+
+The metadata interface is implemented on video capture device nodes. The device
+can be dedicated to metadata or can implement both video and metadata capture
+as specified in its reported capabilities.
+
+Querying Capabilities
+=====================
+
+Device nodes supporting the metadata interface set the ``V4L2_CAP_META_CAPTURE``
+flag in the ``device_caps`` field of the
+:c:type:`v4l2_capability` structure returned by the :c:func:`VIDIOC_QUERYCAP`
+ioctl. That flag means the device can capture metadata to memory.
+
+At least one of the read/write or streaming I/O methods must be supported.
+
+
+Data Format Negotiation
+=======================
+
+The metadata device uses the :ref:`format` ioctls to select the capture format.
+The metadata buffer content format is bound to that selected format. In addition
+to the basic :ref:`format` ioctls, the :c:func:`VIDIOC_ENUM_FMT` ioctl must be
+supported as well.
+
+To use the :ref:`format` ioctls applications set the ``type`` field of the
+:c:type:`v4l2_format` structure to ``V4L2_BUF_TYPE_META_CAPTURE`` and use the
+:c:type:`v4l2_meta_format` ``meta`` member of the ``fmt`` union as needed per
+the desired operation. Both drivers and applications must set the remainder of
+the :c:type:`v4l2_format` structure to 0.
+
+.. _v4l2-meta-format:
+
+.. flat-table:: struct v4l2_meta_format
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``dataformat``
+ - The data format, set by the application. This is a little endian
+ :ref:`four character code <v4l2-fourcc>`. V4L2 defines metadata formats
+ in :ref:`meta-formats`.
+ * - __u32
+ - ``buffersize``
+ - Maximum buffer size in bytes required for data. The value is set by the
+ driver.
diff --git a/Documentation/media/uapi/v4l/dev-output.rst b/Documentation/media/uapi/v4l/dev-output.rst
index 25ae8ec96fdf..342eb4931f5c 100644
--- a/Documentation/media/uapi/v4l/dev-output.rst
+++ b/Documentation/media/uapi/v4l/dev-output.rst
@@ -40,8 +40,8 @@ Video output devices shall support :ref:`audio output <audio>`,
:ref:`modulator <tuner>`, :ref:`controls <control>`,
:ref:`cropping and scaling <crop>` and
:ref:`streaming parameter <streaming-par>` ioctls as needed. The
-:ref:`video output <video>` and :ref:`video standard <standard>`
-ioctls must be supported by all video output devices.
+:ref:`video output <video>` ioctls must be supported by all video
+output devices.
Image Format Negotiation
diff --git a/Documentation/media/uapi/v4l/devices.rst b/Documentation/media/uapi/v4l/devices.rst
index 5c3d6c29e12c..fb7f8c26cf09 100644
--- a/Documentation/media/uapi/v4l/devices.rst
+++ b/Documentation/media/uapi/v4l/devices.rst
@@ -25,3 +25,4 @@ Interfaces
dev-touch
dev-event
dev-subdev
+ dev-meta
diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst
new file mode 100644
index 000000000000..01e24e3df571
--- /dev/null
+++ b/Documentation/media/uapi/v4l/meta-formats.rst
@@ -0,0 +1,16 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _meta-formats:
+
+****************
+Metadata Formats
+****************
+
+These formats are used for the :ref:`metadata` interface only.
+
+
+.. toctree::
+ :maxdepth: 1
+
+ pixfmt-meta-vsp1-hgo
+ pixfmt-meta-vsp1-hgt
diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/pixfmt-007.rst
index 95a23a28c595..0c30ee2577d3 100644
--- a/Documentation/media/uapi/v4l/pixfmt-007.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-007.rst
@@ -174,7 +174,7 @@ this colorspace:
The xvYCC 709 encoding (``V4L2_YCBCR_ENC_XV709``, :ref:`xvycc`) is
similar to the Rec. 709 encoding, but it allows for R', G' and B' values
that are outside the range [0…1]. The resulting Y', Cb and Cr values are
-scaled and offset:
+scaled and offset according to the limited range formula:
.. math::
@@ -187,7 +187,7 @@ scaled and offset:
The xvYCC 601 encoding (``V4L2_YCBCR_ENC_XV601``, :ref:`xvycc`) is
similar to the BT.601 encoding, but it allows for R', G' and B' values
that are outside the range [0…1]. The resulting Y', Cb and Cr values are
-scaled and offset:
+scaled and offset according to the limited range formula:
.. math::
@@ -198,9 +198,14 @@ scaled and offset:
Cr = \frac{224}{256} * (0.5R' - 0.4187G' - 0.0813B')
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
-[-0.5…0.5]. The non-standard xvYCC 709 or xvYCC 601 encodings can be
+[-0.5…0.5] and quantized without further scaling or offsets.
+The non-standard xvYCC 709 or xvYCC 601 encodings can be
used by selecting ``V4L2_YCBCR_ENC_XV709`` or ``V4L2_YCBCR_ENC_XV601``.
-The xvYCC encodings always use full range quantization.
+As seen by the xvYCC formulas these encodings always use limited range quantization,
+there is no full range variant. The whole point of these extended gamut encodings
+is that values outside the limited range are still valid, although they
+map to R', G' and B' values outside the [0…1] range and are therefore outside
+the Rec. 709 colorspace gamut.
.. _col-srgb:
diff --git a/Documentation/media/uapi/v4l/pixfmt-inzi.rst b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
new file mode 100644
index 000000000000..9849e799f205
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
@@ -0,0 +1,81 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-INZI:
+
+**************************
+V4L2_PIX_FMT_INZI ('INZI')
+**************************
+
+Infrared 10-bit linked with Depth 16-bit images
+
+
+Description
+===========
+
+Proprietary multi-planar format used by Intel SR300 Depth cameras, comprise of
+Infrared image followed by Depth data. The pixel definition is 32-bpp,
+with the Depth and Infrared Data split into separate continuous planes of
+identical dimensions.
+
+
+
+The first plane - Infrared data - is stored according to
+:ref:`V4L2_PIX_FMT_Y10 <V4L2-PIX-FMT-Y10>` greyscale format.
+Each pixel is 16-bit cell, with actual data stored in the 10 LSBs
+with values in range 0 to 1023.
+The six remaining MSBs are padded with zeros.
+
+
+The second plane provides 16-bit per-pixel Depth data arranged in
+:ref:`V4L2-PIX-FMT-Z16 <V4L2-PIX-FMT-Z16>` format.
+
+
+**Frame Structure.**
+Each cell is a 16-bit word with more significant data stored at higher
+memory address (byte order is little-endian).
+
+.. raw:: latex
+
+ \newline\newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 1
+ :widths: 1 1 1 1 1 1
+
+ * - Ir\ :sub:`0,0`
+ - Ir\ :sub:`0,1`
+ - Ir\ :sub:`0,2`
+ - ...
+ - ...
+ - ...
+ * - :cspan:`5` ...
+ * - :cspan:`5` Infrared Data
+ * - :cspan:`5` ...
+ * - ...
+ - ...
+ - ...
+ - Ir\ :sub:`n-1,n-3`
+ - Ir\ :sub:`n-1,n-2`
+ - Ir\ :sub:`n-1,n-1`
+ * - Depth\ :sub:`0,0`
+ - Depth\ :sub:`0,1`
+ - Depth\ :sub:`0,2`
+ - ...
+ - ...
+ - ...
+ * - :cspan:`5` ...
+ * - :cspan:`5` Depth Data
+ * - :cspan:`5` ...
+ * - ...
+ - ...
+ - ...
+ - Depth\ :sub:`n-1,n-3`
+ - Depth\ :sub:`n-1,n-2`
+ - Depth\ :sub:`n-1,n-1`
+
+.. raw:: latex
+
+ \end{adjustbox}\newline\newline
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst
new file mode 100644
index 000000000000..67796594fd48
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst
@@ -0,0 +1,168 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _v4l2-meta-fmt-vsp1-hgo:
+
+*******************************
+V4L2_META_FMT_VSP1_HGO ('VSPH')
+*******************************
+
+Renesas R-Car VSP1 1-D Histogram Data
+
+
+Description
+===========
+
+This format describes histogram data generated by the Renesas R-Car VSP1 1-D
+Histogram (HGO) engine.
+
+The VSP1 HGO is a histogram computation engine that can operate on RGB, YCrCb
+or HSV data. It operates on a possibly cropped and subsampled input image and
+computes the minimum, maximum and sum of all pixels as well as per-channel
+histograms.
+
+The HGO can compute histograms independently per channel, on the maximum of the
+three channels (RGB data only) or on the Y channel only (YCbCr only). It can
+additionally output the histogram with 64 or 256 bins, resulting in four
+possible modes of operation.
+
+- In *64 bins normal mode*, the HGO operates on the three channels independently
+ to compute three 64-bins histograms. RGB, YCbCr and HSV image formats are
+ supported.
+- In *64 bins maximum mode*, the HGO operates on the maximum of the (R, G, B)
+ channels to compute a single 64-bins histogram. Only the RGB image format is
+ supported.
+- In *256 bins normal mode*, the HGO operates on the Y channel to compute a
+ single 256-bins histogram. Only the YCbCr image format is supported.
+- In *256 bins maximum mode*, the HGO operates on the maximum of the (R, G, B)
+ channels to compute a single 256-bins histogram. Only the RGB image format is
+ supported.
+
+**Byte Order.**
+All data is stored in memory in little endian format. Each cell in the tables
+contains one byte.
+
+.. flat-table:: VSP1 HGO Data - 64 Bins, Normal Mode (792 bytes)
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Offset
+ - :cspan:`4` Memory
+ * -
+ - [31:24]
+ - [23:16]
+ - [15:8]
+ - [7:0]
+ * - 0
+ -
+ - R/Cr/H max [7:0]
+ -
+ - R/Cr/H min [7:0]
+ * - 4
+ -
+ - G/Y/S max [7:0]
+ -
+ - G/Y/S min [7:0]
+ * - 8
+ -
+ - B/Cb/V max [7:0]
+ -
+ - B/Cb/V min [7:0]
+ * - 12
+ - :cspan:`4` R/Cr/H sum [31:0]
+ * - 16
+ - :cspan:`4` G/Y/S sum [31:0]
+ * - 20
+ - :cspan:`4` B/Cb/V sum [31:0]
+ * - 24
+ - :cspan:`4` R/Cr/H bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 276
+ - :cspan:`4` R/Cr/H bin 63 [31:0]
+ * - 280
+ - :cspan:`4` G/Y/S bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 532
+ - :cspan:`4` G/Y/S bin 63 [31:0]
+ * - 536
+ - :cspan:`4` B/Cb/V bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 788
+ - :cspan:`4` B/Cb/V bin 63 [31:0]
+
+.. flat-table:: VSP1 HGO Data - 64 Bins, Max Mode (264 bytes)
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Offset
+ - :cspan:`4` Memory
+ * -
+ - [31:24]
+ - [23:16]
+ - [15:8]
+ - [7:0]
+ * - 0
+ -
+ - max(R,G,B) max [7:0]
+ -
+ - max(R,G,B) min [7:0]
+ * - 4
+ - :cspan:`4` max(R,G,B) sum [31:0]
+ * - 8
+ - :cspan:`4` max(R,G,B) bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 260
+ - :cspan:`4` max(R,G,B) bin 63 [31:0]
+
+.. flat-table:: VSP1 HGO Data - 256 Bins, Normal Mode (1032 bytes)
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Offset
+ - :cspan:`4` Memory
+ * -
+ - [31:24]
+ - [23:16]
+ - [15:8]
+ - [7:0]
+ * - 0
+ -
+ - Y max [7:0]
+ -
+ - Y min [7:0]
+ * - 4
+ - :cspan:`4` Y sum [31:0]
+ * - 8
+ - :cspan:`4` Y bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 1028
+ - :cspan:`4` Y bin 255 [31:0]
+
+.. flat-table:: VSP1 HGO Data - 256 Bins, Max Mode (1032 bytes)
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Offset
+ - :cspan:`4` Memory
+ * -
+ - [31:24]
+ - [23:16]
+ - [15:8]
+ - [7:0]
+ * - 0
+ -
+ - max(R,G,B) max [7:0]
+ -
+ - max(R,G,B) min [7:0]
+ * - 4
+ - :cspan:`4` max(R,G,B) sum [31:0]
+ * - 8
+ - :cspan:`4` max(R,G,B) bin 0 [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 1028
+ - :cspan:`4` max(R,G,B) bin 255 [31:0]
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
new file mode 100644
index 000000000000..fb9f79466319
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
@@ -0,0 +1,120 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _v4l2-meta-fmt-vsp1-hgt:
+
+*******************************
+V4L2_META_FMT_VSP1_HGT ('VSPT')
+*******************************
+
+Renesas R-Car VSP1 2-D Histogram Data
+
+
+Description
+===========
+
+This format describes histogram data generated by the Renesas R-Car VSP1
+2-D Histogram (HGT) engine.
+
+The VSP1 HGT is a histogram computation engine that operates on HSV
+data. It operates on a possibly cropped and subsampled input image and
+computes the sum, maximum and minimum of the S component as well as a
+weighted frequency histogram based on the H and S components.
+
+The histogram is a matrix of 6 Hue and 32 Saturation buckets, 192 in
+total. Each HSV value is added to one or more buckets with a weight
+between 1 and 16 depending on the Hue areas configuration. Finding the
+corresponding buckets is done by inspecting the H and S value independently.
+
+The Saturation position **n** (0 - 31) of the bucket in the matrix is
+found by the expression:
+
+ n = S / 8
+
+The Hue position **m** (0 - 5) of the bucket in the matrix depends on
+how the HGT Hue areas are configured. There are 6 user configurable Hue
+Areas which can be configured to cover overlapping Hue values:
+
+::
+
+ Area 0 Area 1 Area 2 Area 3 Area 4 Area 5
+ ________ ________ ________ ________ ________ ________
+ \ /| |\ /| |\ /| |\ /| |\ /| |\ /| |\ /
+ \ / | | \ / | | \ / | | \ / | | \ / | | \ / | | \ /
+ X | | X | | X | | X | | X | | X | | X
+ / \ | | / \ | | / \ | | / \ | | / \ | | / \ | | / \
+ / \| |/ \| |/ \| |/ \| |/ \| |/ \| |/ \
+ 5U 0L 0U 1L 1U 2L 2U 3L 3U 4L 4U 5L 5U 0L
+ <0..............................Hue Value............................255>
+
+When two consecutive areas don't overlap (n+1L is equal to nU) the boundary
+value is considered as part of the lower area.
+
+Pixels with a hue value included in the centre of an area (between nL and nU
+included) are attributed to that single area and given a weight of 16. Pixels
+with a hue value included in the overlapping region between two areas (between
+n+1L and nU excluded) are attributed to both areas and given a weight for each
+of these areas proportional to their position along the diagonal lines
+(rounded down).
+
+The Hue area setup must match one of the following constrains:
+
+::
+
+ 0L <= 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U
+
+::
+
+ 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U <= 0L
+
+**Byte Order.**
+All data is stored in memory in little endian format. Each cell in the tables
+contains one byte.
+
+.. flat-table:: VSP1 HGT Data - (776 bytes)
+ :header-rows: 2
+ :stub-columns: 0
+
+ * - Offset
+ - :cspan:`4` Memory
+ * -
+ - [31:24]
+ - [23:16]
+ - [15:8]
+ - [7:0]
+ * - 0
+ - -
+ - S max [7:0]
+ - -
+ - S min [7:0]
+ * - 4
+ - :cspan:`4` S sum [31:0]
+ * - 8
+ - :cspan:`4` Histogram bucket (m=0, n=0) [31:0]
+ * - 12
+ - :cspan:`4` Histogram bucket (m=0, n=1) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 132
+ - :cspan:`4` Histogram bucket (m=0, n=31) [31:0]
+ * - 136
+ - :cspan:`4` Histogram bucket (m=1, n=0) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 264
+ - :cspan:`4` Histogram bucket (m=2, n=0) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 392
+ - :cspan:`4` Histogram bucket (m=3, n=0) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 520
+ - :cspan:`4` Histogram bucket (m=4, n=0) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 648
+ - :cspan:`4` Histogram bucket (m=5, n=0) [31:0]
+ * -
+ - :cspan:`4` ...
+ * - 772
+ - :cspan:`4` Histogram bucket (m=5, n=31) [31:0]
diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst
index 4f184c7aedab..00737152497b 100644
--- a/Documentation/media/uapi/v4l/pixfmt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt.rst
@@ -34,4 +34,5 @@ see also :ref:`VIDIOC_G_FBUF <VIDIOC_G_FBUF>`.)
pixfmt-013
sdr-formats
tch-formats
+ meta-formats
pixfmt-reserved
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 09e2798b4966..8e73bb00c0d5 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1890,10 +1890,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -1911,10 +1911,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -1932,10 +1932,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -1953,10 +1953,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -1974,10 +1974,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -1995,10 +1995,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -2016,10 +2016,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -2037,10 +2037,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -2058,10 +2058,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -2079,10 +2079,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -2100,10 +2100,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -2121,10 +2121,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -2142,10 +2142,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- 0
- 0
- 0
@@ -2161,10 +2161,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -2182,10 +2182,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -2201,10 +2201,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- 0
- 0
- 0
@@ -2222,10 +2222,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`9`
- b\ :sub:`8`
- b\ :sub:`7`
@@ -2241,10 +2241,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`1`
- b\ :sub:`0`
- 0
@@ -2262,10 +2262,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`1`
- b\ :sub:`0`
- 0
@@ -2281,10 +2281,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`9`
- b\ :sub:`8`
- b\ :sub:`7`
@@ -2300,10 +2300,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`9`
- b\ :sub:`8`
- b\ :sub:`7`
@@ -2321,10 +2321,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`9`
- g\ :sub:`8`
- g\ :sub:`7`
@@ -2342,10 +2342,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`9`
- g\ :sub:`8`
- g\ :sub:`7`
@@ -2363,10 +2363,10 @@ organization is given as an example for the first pixel only.
-
-
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- r\ :sub:`9`
- r\ :sub:`8`
- r\ :sub:`7`
@@ -2382,10 +2382,10 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SBGGR12_1X12
- 0x3008
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- b\ :sub:`11`
- b\ :sub:`10`
- b\ :sub:`9`
@@ -2403,10 +2403,10 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SGBRG12_1X12
- 0x3010
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`11`
- g\ :sub:`10`
- g\ :sub:`9`
@@ -2424,10 +2424,10 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SGRBG12_1X12
- 0x3011
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- g\ :sub:`11`
- g\ :sub:`10`
- g\ :sub:`9`
@@ -2445,10 +2445,10 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SRGGB12_1X12
- 0x3012
-
- - -
- - -
- - -
- - -
+ -
+ -
+ -
+ -
- r\ :sub:`11`
- r\ :sub:`10`
- r\ :sub:`9`
@@ -2466,8 +2466,8 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SBGGR14_1X14
- 0x3019
-
- - -
- - -
+ -
+ -
- b\ :sub:`13`
- b\ :sub:`12`
- b\ :sub:`11`
@@ -2487,8 +2487,8 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SGBRG14_1X14
- 0x301a
-
- - -
- - -
+ -
+ -
- g\ :sub:`13`
- g\ :sub:`12`
- g\ :sub:`11`
@@ -2508,8 +2508,8 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SGRBG14_1X14
- 0x301b
-
- - -
- - -
+ -
+ -
- g\ :sub:`13`
- g\ :sub:`12`
- g\ :sub:`11`
@@ -2529,8 +2529,8 @@ organization is given as an example for the first pixel only.
- MEDIA_BUS_FMT_SRGGB14_1X14
- 0x301c
-
- - -
- - -
+ -
+ -
- r\ :sub:`13`
- r\ :sub:`12`
- r\ :sub:`11`
diff --git a/Documentation/media/uapi/v4l/video.rst b/Documentation/media/uapi/v4l/video.rst
index a205fb87d566..d2bc06b064ad 100644
--- a/Documentation/media/uapi/v4l/video.rst
+++ b/Documentation/media/uapi/v4l/video.rst
@@ -8,9 +8,10 @@ Video Inputs and Outputs
Video inputs and outputs are physical connectors of a device. These can
be for example RF connectors (antenna/cable), CVBS a.k.a. Composite
-Video, S-Video or RGB connectors. Video and VBI capture devices have
-inputs. Video and VBI output devices have outputs, at least one each.
-Radio devices have no video inputs or outputs.
+Video, S-Video and RGB connectors. Camera sensors are also considered to
+be a video input. Video and VBI capture devices have inputs. Video and
+VBI output devices have outputs, at least one each. Radio devices have
+no video inputs or outputs.
To learn about the number and attributes of the available inputs and
outputs applications can enumerate them with the
diff --git a/Documentation/media/uapi/v4l/vidioc-enuminput.rst b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
index 17aaaf939757..266e48ab237f 100644
--- a/Documentation/media/uapi/v4l/vidioc-enuminput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
@@ -33,7 +33,7 @@ Description
To query the attributes of a video input applications initialize the
``index`` field of struct :c:type:`v4l2_input` and call the
-:ref:`VIDIOC_ENUMINPUT` ioctl with a pointer to this structure. Drivers
+:ref:`VIDIOC_ENUMINPUT` with a pointer to this structure. Drivers
fill the rest of the structure or return an ``EINVAL`` error code when the
index is out of bounds. To enumerate all inputs applications shall begin
at index zero, incrementing by one until the driver returns ``EINVAL``.
@@ -117,8 +117,9 @@ at index zero, incrementing by one until the driver returns ``EINVAL``.
- This input uses a tuner (RF demodulator).
* - ``V4L2_INPUT_TYPE_CAMERA``
- 2
- - Analog baseband input, for example CVBS / Composite Video,
- S-Video, RGB.
+ - Any non-tuner video input, for example Composite Video,
+ S-Video, HDMI, camera sensor. The naming as ``_TYPE_CAMERA`` is historical,
+ today we would have called it ``_TYPE_VIDEO``.
* - ``V4L2_INPUT_TYPE_TOUCH``
- 3
- This input is a touch device for capturing raw touch data.
@@ -209,11 +210,11 @@ at index zero, incrementing by one until the driver returns ``EINVAL``.
* - ``V4L2_IN_CAP_DV_TIMINGS``
- 0x00000002
- This input supports setting video timings by using
- VIDIOC_S_DV_TIMINGS.
+ ``VIDIOC_S_DV_TIMINGS``.
* - ``V4L2_IN_CAP_STD``
- 0x00000004
- This input supports setting the TV standard by using
- VIDIOC_S_STD.
+ ``VIDIOC_S_STD``.
* - ``V4L2_IN_CAP_NATIVE_SIZE``
- 0x00000008
- This input supports setting the native size using the
diff --git a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
index d7dd2742475a..93a2cf3b310c 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
@@ -33,11 +33,11 @@ Description
To query the attributes of a video outputs applications initialize the
``index`` field of struct :c:type:`v4l2_output` and call
-the :ref:`VIDIOC_ENUMOUTPUT` ioctl with a pointer to this structure.
+the :ref:`VIDIOC_ENUMOUTPUT` with a pointer to this structure.
Drivers fill the rest of the structure or return an ``EINVAL`` error code
when the index is out of bounds. To enumerate all outputs applications
shall begin at index zero, incrementing by one until the driver returns
-EINVAL.
+``EINVAL``.
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -112,11 +112,12 @@ EINVAL.
- This output is an analog TV modulator.
* - ``V4L2_OUTPUT_TYPE_ANALOG``
- 2
- - Analog baseband output, for example Composite / CVBS, S-Video,
- RGB.
+ - Any non-modulator video output, for example Composite Video,
+ S-Video, HDMI. The naming as ``_TYPE_ANALOG`` is historical,
+ today we would have called it ``_TYPE_VIDEO``.
* - ``V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY``
- 3
- - [?]
+ - The video output will be copied to a :ref:`video overlay <overlay>`.
@@ -132,11 +133,11 @@ EINVAL.
* - ``V4L2_OUT_CAP_DV_TIMINGS``
- 0x00000002
- This output supports setting video timings by using
- VIDIOC_S_DV_TIMINGS.
+ ``VIDIOC_S_DV_TIMINGS``.
* - ``V4L2_OUT_CAP_STD``
- 0x00000004
- This output supports setting the TV standard by using
- VIDIOC_S_STD.
+ ``VIDIOC_S_STD``.
* - ``V4L2_OUT_CAP_NATIVE_SIZE``
- 0x00000008
- This output supports setting the native size using the
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index aea276502f5e..e573c74138de 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -146,8 +146,20 @@ EBUSY
- ``flags``
- Several flags giving more information about the format. See
:ref:`dv-bt-flags` for a description of the flags.
- * - __u32
- - ``reserved[14]``
+ * - struct :c:type:`v4l2_fract`
+ - ``picture_aspect``
+ - The picture aspect if the pixels are not square. Only valid if the
+ ``V4L2_DV_FL_HAS_PICTURE_ASPECT`` flag is set.
+ * - __u8
+ - ``cea861_vic``
+ - The Video Identification Code according to the CEA-861 standard.
+ Only valid if the ``V4L2_DV_FL_HAS_CEA861_VIC`` flag is set.
+ * - __u8
+ - ``hdmi_vic``
+ - The Video Identification Code according to the HDMI standard.
+ Only valid if the ``V4L2_DV_FL_HAS_HDMI_VIC`` flag is set.
+ * - __u8
+ - ``reserved[46]``
- Reserved for future extensions. Drivers and applications must set
the array to zero.
diff --git a/Documentation/media/uapi/v4l/vidioc-querycap.rst b/Documentation/media/uapi/v4l/vidioc-querycap.rst
index 165d8314327e..12e0d9a63cd8 100644
--- a/Documentation/media/uapi/v4l/vidioc-querycap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querycap.rst
@@ -236,6 +236,9 @@ specification the ioctl returns an ``EINVAL`` error code.
* - ``V4L2_CAP_SDR_OUTPUT``
- 0x00400000
- The device supports the :ref:`SDR Output <sdr>` interface.
+ * - ``V4L2_CAP_META_CAPTURE``
+ - 0x00800000
+ - The device supports the :ref:`metadata` capture interface.
* - ``V4L2_CAP_READWRITE``
- 0x01000000
- The device supports the :ref:`read() <rw>` and/or
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index 82769de801b1..41c5744a1239 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -301,12 +301,12 @@ See also the examples in :ref:`control`.
- ``name``\ [32]
- Name of the menu item, a NUL-terminated ASCII string. This
information is intended for the user. This field is valid for
- ``V4L2_CTRL_FLAG_MENU`` type controls.
+ ``V4L2_CTRL_TYPE_MENU`` type controls.
* -
- __s64
- ``value``
- Value of the integer menu item. This field is valid for
- ``V4L2_CTRL_FLAG_INTEGER_MENU`` type controls.
+ ``V4L2_CTRL_TYPE_INTEGER_MENU`` type controls.
* - __u32
-
- ``reserved``
@@ -507,6 +507,19 @@ See also the examples in :ref:`control`.
represents an action on the hardware. For example: clearing an
error flag or triggering the flash. All the controls of the type
``V4L2_CTRL_TYPE_BUTTON`` have this flag set.
+ * .. _FLAG_MODIFY_LAYOUT:
+
+ - ``V4L2_CTRL_FLAG_MODIFY_LAYOUT``
+ - 0x0400
+ - Changing this control value may modify the layout of the
+ buffer (for video devices) or the media bus format (for sub-devices).
+
+ A typical example would be the ``V4L2_CID_ROTATE`` control.
+
+ Note that typically controls with this flag will also set the
+ ``V4L2_CTRL_FLAG_GRABBED`` flag when buffers are allocated or
+ streaming is in progress since most drivers do not support changing
+ the format in that case.
Return Value
diff --git a/Documentation/media/v4l-drivers/philips.rst b/Documentation/media/v4l-drivers/philips.rst
index 620bcfea7af0..4f68947e6a13 100644
--- a/Documentation/media/v4l-drivers/philips.rst
+++ b/Documentation/media/v4l-drivers/philips.rst
@@ -145,8 +145,8 @@ dev_hint
A camera is specified by its type (the number from the camera model,
like PCA645, PCVC750VC, etc) and optionally the serial number (visible
- in /proc/bus/usb/devices). A hint consists of a string with the following
- format::
+ in /sys/kernel/debug/usb/devices). A hint consists of a string with the
+ following format::
[type[.serialnumber]:]node
diff --git a/Documentation/media/v4l-drivers/vivid.rst b/Documentation/media/v4l-drivers/vivid.rst
index c8cf371e8bb9..3e44b2217f2d 100644
--- a/Documentation/media/v4l-drivers/vivid.rst
+++ b/Documentation/media/v4l-drivers/vivid.rst
@@ -263,6 +263,14 @@ all configurable using the following module options:
removed. Unless overridden by ccs_cap_mode and/or ccs_out_mode the
will default to enabling crop, compose and scaling.
+- allocators:
+
+ memory allocator selection, default is 0. It specifies the way buffers
+ will be allocated.
+
+ - 0: vmalloc
+ - 1: dma-contig
+
Taken together, all these module options allow you to precisely customize
the driver behavior and test your application with all sorts of permutations.
It is also very suitable to emulate hardware that is not yet available, e.g.
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index e11a0d0a8931..a5cb0a8686ac 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -27,6 +27,7 @@ replace symbol V4L2_FIELD_SEQ_TB :c:type:`v4l2_field`
replace symbol V4L2_FIELD_TOP :c:type:`v4l2_field`
# Documented enum v4l2_buf_type
+replace symbol V4L2_BUF_TYPE_META_CAPTURE :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SDR_CAPTURE :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SDR_OUTPUT :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SLICED_VBI_CAPTURE :c:type:`v4l2_buf_type`
@@ -152,6 +153,7 @@ replace define V4L2_CAP_MODULATOR device-capabilities
replace define V4L2_CAP_SDR_CAPTURE device-capabilities
replace define V4L2_CAP_EXT_PIX_FORMAT device-capabilities
replace define V4L2_CAP_SDR_OUTPUT device-capabilities
+replace define V4L2_CAP_META_CAPTURE device-capabilities
replace define V4L2_CAP_READWRITE device-capabilities
replace define V4L2_CAP_ASYNCIO device-capabilities
replace define V4L2_CAP_STREAMING device-capabilities
@@ -339,6 +341,7 @@ replace define V4L2_CTRL_FLAG_WRITE_ONLY control-flags
replace define V4L2_CTRL_FLAG_VOLATILE control-flags
replace define V4L2_CTRL_FLAG_HAS_PAYLOAD control-flags
replace define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE control-flags
+replace define V4L2_CTRL_FLAG_MODIFY_LAYOUT control-flags
replace define V4L2_CTRL_FLAG_NEXT_CTRL control
replace define V4L2_CTRL_FLAG_NEXT_COMPOUND control
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index d2b0a8d81258..08329cb857ed 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -768,7 +768,7 @@ equal to zero, in which case the compiler is within its rights to
transform the above code into the following:
q = READ_ONCE(a);
- WRITE_ONCE(b, 1);
+ WRITE_ONCE(b, 2);
do_something_else();
Given this transformation, the CPU is not required to respect the ordering
diff --git a/Documentation/misc-devices/pci-endpoint-test.txt b/Documentation/misc-devices/pci-endpoint-test.txt
new file mode 100644
index 000000000000..4ebc3594b32c
--- /dev/null
+++ b/Documentation/misc-devices/pci-endpoint-test.txt
@@ -0,0 +1,35 @@
+Driver for PCI Endpoint Test Function
+
+This driver should be used as a host side driver if the root complex is
+connected to a configurable PCI endpoint running *pci_epf_test* function
+driver configured according to [1].
+
+The "pci_endpoint_test" driver can be used to perform the following tests.
+
+The PCI driver for the test device performs the following tests
+ *) verifying addresses programmed in BAR
+ *) raise legacy IRQ
+ *) raise MSI IRQ
+ *) read data
+ *) write data
+ *) copy data
+
+This misc driver creates /dev/pci-endpoint-test.<num> for every
+*pci_epf_test* function connected to the root complex and "ioctls"
+should be used to perform the above tests.
+
+ioctl
+-----
+ PCITEST_BAR: Tests the BAR. The number of the BAR to be tested
+ should be passed as argument.
+ PCITEST_LEGACY_IRQ: Tests legacy IRQ
+ PCITEST_MSI: Tests message signalled interrupts. The MSI number
+ to be tested should be passed as argument.
+ PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
+ as argument.
+ PCITEST_READ: Perform read tests. The size of the buffer should be passed
+ as argument.
+ PCITEST_COPY: Perform read tests. The size of the buffer should be passed
+ as argument.
+
+[1] -> Documentation/PCI/endpoint/function/binding/pci-test.txt
diff --git a/Documentation/perf/qcom_l3_pmu.txt b/Documentation/perf/qcom_l3_pmu.txt
new file mode 100644
index 000000000000..96b3a9444a0d
--- /dev/null
+++ b/Documentation/perf/qcom_l3_pmu.txt
@@ -0,0 +1,25 @@
+Qualcomm Datacenter Technologies L3 Cache Performance Monitoring Unit (PMU)
+===========================================================================
+
+This driver supports the L3 cache PMUs found in Qualcomm Datacenter Technologies
+Centriq SoCs. The L3 cache on these SOCs is composed of multiple slices, shared
+by all cores within a socket. Each slice is exposed as a separate uncore perf
+PMU with device name l3cache_<socket>_<instance>. User space is responsible
+for aggregating across slices.
+
+The driver provides a description of its available events and configuration
+options in sysfs, see /sys/devices/l3cache*. Given that these are uncore PMUs
+the driver also exposes a "cpumask" sysfs attribute which contains a mask
+consisting of one CPU per socket which will be used to handle all the PMU
+events on that socket.
+
+The hardware implements 32bit event counters and has a flat 8bit event space
+exposed via the "event" format attribute. In addition to the 32bit physical
+counters the driver supports virtual 64bit hardware counters by using hardware
+counter chaining. This feature is exposed via the "lc" (long counter) format
+flag. E.g.:
+
+ perf stat -e l3cache_0_0/read-miss,lc/
+
+Given that these are uncore PMUs the driver does not support sampling, therefore
+"perf record" will not work. Per-task perf sessions are not supported.
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt
index d5506ba0fef7..c5e8d5098ed3 100644
--- a/Documentation/powerpc/cxl.txt
+++ b/Documentation/powerpc/cxl.txt
@@ -21,7 +21,7 @@ Introduction
Hardware overview
=================
- POWER8 FPGA
+ POWER8/9 FPGA
+----------+ +---------+
| | | |
| CPU | | AFU |
@@ -34,7 +34,7 @@ Hardware overview
| | CAPP |<------>| |
+---+------+ PCIE +---------+
- The POWER8 chip has a Coherently Attached Processor Proxy (CAPP)
+ The POWER8/9 chip has a Coherently Attached Processor Proxy (CAPP)
unit which is part of the PCIe Host Bridge (PHB). This is managed
by Linux by calls into OPAL. Linux doesn't directly program the
CAPP.
@@ -59,6 +59,17 @@ Hardware overview
the fault. The context to which this fault is serviced is based on
who owns that acceleration function.
+ POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0.
+ POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0.
+ This PSL Version 9 provides new features such as:
+ * Interaction with the nest MMU on the P9 chip.
+ * Native DMA support.
+ * Supports sending ASB_Notify messages for host thread wakeup.
+ * Supports Atomic operations.
+ * ....
+
+ Cards with a PSL9 won't work on a POWER8 system and cards with a
+ PSL8 won't work on a POWER9 system.
AFU Modes
=========
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
index 6d9a2ed32cad..66b4496d6619 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.txt
@@ -239,6 +239,11 @@ DK_CXLFLASH_USER_VIRTUAL
resource handle that is provided is already referencing provisioned
storage. This is reflected by the last LBA being a non-zero value.
+ When a LUN is accessible from more than one port, this ioctl will
+ return with the DK_CXLFLASH_ALL_PORTS_ACTIVE return flag set. This
+ provides the user with a hint that I/O can be retried in the event
+ of an I/O error as the LUN can be reached over multiple paths.
+
DK_CXLFLASH_VLUN_RESIZE
-----------------------
This ioctl is responsible for resizing a previously created virtual
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.txt
index 3007bc98af28..9cabaf8a207e 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.txt
@@ -55,10 +55,14 @@ as follows:
booted with restricted memory. By default, the boot memory
size will be the larger of 5% of system RAM or 256MB.
Alternatively, user can also specify boot memory size
- through boot parameter 'fadump_reserve_mem=' which will
- override the default calculated size. Use this option
- if default boot memory size is not sufficient for second
- kernel to boot successfully.
+ through boot parameter 'crashkernel=' which will override
+ the default calculated size. Use this option if default
+ boot memory size is not sufficient for second kernel to
+ boot successfully. For syntax of crashkernel= parameter,
+ refer to Documentation/kdump/kdump.txt. If any offset is
+ provided in crashkernel= parameter, it will be ignored
+ as fadump reserves memory at end of RAM for boot memory
+ dump preservation in case of a crash.
-- After the low memory (boot memory) area has been saved, the
firmware will reset PCI and other hardware state. It will
@@ -105,21 +109,21 @@ memory is held.
If there is no waiting dump data, then only the memory required
to hold CPU state, HPTE region, boot memory dump and elfcore
-header, is reserved at the top of memory (see Fig. 1). This area
-is *not* released: this region will be kept permanently reserved,
-so that it can act as a receptacle for a copy of the boot memory
-content in addition to CPU state and HPTE region, in the case a
-crash does occur.
+header, is usually reserved at an offset greater than boot memory
+size (see Fig. 1). This area is *not* released: this region will
+be kept permanently reserved, so that it can act as a receptacle
+for a copy of the boot memory content in addition to CPU state
+and HPTE region, in the case a crash does occur.
o Memory Reservation during first kernel
- Low memory Top of memory
+ Low memory Top of memory
0 boot memory size |
- | | |<--Reserved dump area -->|
- V V | Permanent Reservation V
- +-----------+----------/ /----------+---+----+-----------+----+
- | | |CPU|HPTE| DUMP |ELF |
- +-----------+----------/ /----------+---+----+-----------+----+
+ | | |<--Reserved dump area -->| |
+ V V | Permanent Reservation | V
+ +-----------+----------/ /---+---+----+-----------+----+------+
+ | | |CPU|HPTE| DUMP |ELF | |
+ +-----------+----------/ /---+---+----+-----------+----+------+
| ^
| |
\ /
@@ -135,12 +139,12 @@ crash does occur.
0 boot memory size |
| |<------------- Reserved dump area ----------- -->|
V V V
- +-----------+----------/ /----------+---+----+-----------+----+
- | | |CPU|HPTE| DUMP |ELF |
- +-----------+----------/ /----------+---+----+-----------+----+
- | |
- V V
- Used by second /proc/vmcore
+ +-----------+----------/ /---+---+----+-----------+----+------+
+ | | |CPU|HPTE| DUMP |ELF | |
+ +-----------+----------/ /---+---+----+-----------+----+------+
+ | |
+ V V
+ Used by second /proc/vmcore
kernel to boot
Fig. 2
@@ -158,13 +162,16 @@ How to enable firmware-assisted dump (fadump):
1. Set config option CONFIG_FA_DUMP=y and build kernel.
2. Boot into linux kernel with 'fadump=on' kernel cmdline option.
-3. Optionally, user can also set 'fadump_reserve_mem=' kernel cmdline
+3. Optionally, user can also set 'crashkernel=' kernel cmdline
to specify size of the memory to reserve for boot memory dump
preservation.
-NOTE: If firmware-assisted dump fails to reserve memory then it will
- fallback to existing kdump mechanism if 'crashkernel=' option
- is set at kernel cmdline.
+NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
+ use 'crashkernel=' to specify size of the memory to reserve
+ for boot memory dump preservation.
+ 2. If firmware-assisted dump fails to reserve memory then it
+ will fallback to existing kdump mechanism if 'crashkernel='
+ option is set at kernel cmdline.
Sysfs/debugfs files:
------------
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 01c5dbcd0f84..e25d63f8c0da 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -30,7 +30,7 @@ you probably needn't concern yourself with isdn4k-utils.
Program Minimal version Command to check the version
====================== =============== ========================================
GNU C 3.2 gcc --version
-GNU make 3.80 make --version
+GNU make 3.81 make --version
binutils 2.12 ld -v
util-linux 2.10o fdformat --version
module-init-tools 0.9.10 depmod -V
@@ -70,7 +70,7 @@ computer.
Make
----
-You will need GNU make 3.80 or later to build the kernel.
+You will need GNU make 3.81 or later to build the kernel.
Binutils
--------
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index 37eca00796ee..11e447bdb3a5 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -70,7 +70,7 @@ with the command.
scmd is requeued to blk queue.
- otherwise
- scsi_eh_scmd_add(scmd, 0) is invoked for the command. See
+ scsi_eh_scmd_add(scmd) is invoked for the command. See
[1-3] for details of this function.
@@ -103,13 +103,14 @@ function
eh_timed_out() callback did not handle the command.
Step #2 is taken.
- 2. If the host supports asynchronous completion (as indicated by the
- no_async_abort setting in the host template) scsi_abort_command()
- is invoked to schedule an asynchrous abort. If that fails
- Step #3 is taken.
+ 2. scsi_abort_command() is invoked to schedule an asynchrous abort.
+ Asynchronous abort are not invoked for commands which the
+ SCSI_EH_ABORT_SCHEDULED flag is set (this indicates that the command
+ already had been aborted once, and this is a retry which failed),
+ or when the EH deadline is expired. In these case Step #3 is taken.
- 2. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
- command. See [1-3] for more information.
+ 3. scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD) is invoked for the
+ command. See [1-4] for more information.
[1-3] Asynchronous command aborts
@@ -124,16 +125,13 @@ function
scmds enter EH via scsi_eh_scmd_add(), which does the following.
- 1. Turns on scmd->eh_eflags as requested. It's 0 for error
- completions and SCSI_EH_CANCEL_CMD for timeouts.
+ 1. Links scmd->eh_entry to shost->eh_cmd_q
- 2. Links scmd->eh_entry to shost->eh_cmd_q
+ 2. Sets SHOST_RECOVERY bit in shost->shost_state
- 3. Sets SHOST_RECOVERY bit in shost->shost_state
+ 3. Increments shost->host_failed
- 4. Increments shost->host_failed
-
- 5. Wakes up SCSI EH thread if shost->host_busy == shost->host_failed
+ 4. Wakes up SCSI EH thread if shost->host_busy == shost->host_failed
As can be seen above, once any scmd is added to shost->eh_cmd_q,
SHOST_RECOVERY shost_state bit is turned on. This prevents any new
@@ -249,7 +247,6 @@ scmd->allowed.
1. Error completion / time out
ACTION: scsi_eh_scmd_add() is invoked for scmd
- - set scmd->eh_eflags
- add scmd to shost->eh_cmd_q
- set SHOST_RECOVERY
- shost->host_failed++
@@ -263,7 +260,6 @@ scmd->allowed.
3. scmd recovered
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
- - clear scmd->eh_eflags
- scsi_setup_cmd_retry()
- move from local eh_work_q to local eh_done_q
LOCKING: none
@@ -456,8 +452,6 @@ except for #1 must be implemented by eh_strategy_handler().
- shost->host_failed is zero.
- - Each scmd's eh_eflags field is cleared.
-
- Each scmd is in such a state that scsi_setup_cmd_retry() on the
scmd doesn't make any difference.
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt
new file mode 100644
index 000000000000..a0a9c7b3d4d5
--- /dev/null
+++ b/Documentation/switchtec.txt
@@ -0,0 +1,80 @@
+========================
+Linux Switchtec Support
+========================
+
+Microsemi's "Switchtec" line of PCI switch devices is already
+supported by the kernel with standard PCI switch drivers. However, the
+Switchtec device advertises a special management endpoint which
+enables some additional functionality. This includes:
+
+* Packet and Byte Counters
+* Firmware Upgrades
+* Event and Error logs
+* Querying port link status
+* Custom user firmware commands
+
+The switchtec kernel module implements this functionality.
+
+
+Interface
+=========
+
+The primary means of communicating with the Switchtec management firmware is
+through the Memory-mapped Remote Procedure Call (MRPC) interface.
+Commands are submitted to the interface with a 4-byte command
+identifier and up to 1KB of command specific data. The firmware will
+respond with a 4 bytes return code and up to 1KB of command specific
+data. The interface only processes a single command at a time.
+
+
+Userspace Interface
+===================
+
+The MRPC interface will be exposed to userspace through a simple char
+device: /dev/switchtec#, one for each management endpoint in the system.
+
+The char device has the following semantics:
+
+* A write must consist of at least 4 bytes and no more than 1028 bytes.
+ The first four bytes will be interpreted as the command to run and
+ the remainder will be used as the input data. A write will send the
+ command to the firmware to begin processing.
+
+* Each write must be followed by exactly one read. Any double write will
+ produce an error and any read that doesn't follow a write will
+ produce an error.
+
+* A read will block until the firmware completes the command and return
+ the four bytes of status plus up to 1024 bytes of output data. (The
+ length will be specified by the size parameter of the read call --
+ reading less than 4 bytes will produce an error.
+
+* The poll call will also be supported for userspace applications that
+ need to do other things while waiting for the command to complete.
+
+The following IOCTLs are also supported by the device:
+
+* SWITCHTEC_IOCTL_FLASH_INFO - Retrieve firmware length and number
+ of partitions in the device.
+
+* SWITCHTEC_IOCTL_FLASH_PART_INFO - Retrieve address and lengeth for
+ any specified partition in flash.
+
+* SWITCHTEC_IOCTL_EVENT_SUMMARY - Read a structure of bitmaps
+ indicating all uncleared events.
+
+* SWITCHTEC_IOCTL_EVENT_CTL - Get the current count, clear and set flags
+ for any event. This ioctl takes in a switchtec_ioctl_event_ctl struct
+ with the event_id, index and flags set (index being the partition or PFF
+ number for non-global events). It returns whether the event has
+ occurred, the number of times and any event specific data. The flags
+ can be used to clear the count or enable and disable actions to
+ happen when the event occurs.
+ By using the SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL flag,
+ you can set an event to trigger a poll command to return with
+ POLLPRI. In this way, userspace can wait for events to occur.
+
+* SWITCHTEC_IOCTL_PFF_TO_PORT and SWITCHTEC_IOCTL_PORT_TO_PFF convert
+ between PCI Function Framework number (used by the event system)
+ and Switchtec Logic Port ID and Partition number (which is more
+ user friendly).
diff --git a/Documentation/tee.txt b/Documentation/tee.txt
new file mode 100644
index 000000000000..718599357596
--- /dev/null
+++ b/Documentation/tee.txt
@@ -0,0 +1,118 @@
+TEE subsystem
+This document describes the TEE subsystem in Linux.
+
+A TEE (Trusted Execution Environment) is a trusted OS running in some
+secure environment, for example, TrustZone on ARM CPUs, or a separate
+secure co-processor etc. A TEE driver handles the details needed to
+communicate with the TEE.
+
+This subsystem deals with:
+
+- Registration of TEE drivers
+
+- Managing shared memory between Linux and the TEE
+
+- Providing a generic API to the TEE
+
+The TEE interface
+=================
+
+include/uapi/linux/tee.h defines the generic interface to a TEE.
+
+User space (the client) connects to the driver by opening /dev/tee[0-9]* or
+/dev/teepriv[0-9]*.
+
+- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor
+ which user space can mmap. When user space doesn't need the file
+ descriptor any more, it should be closed. When shared memory isn't needed
+ any longer it should be unmapped with munmap() to allow the reuse of
+ memory.
+
+- TEE_IOC_VERSION lets user space know which TEE this driver handles and
+ the its capabilities.
+
+- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application.
+
+- TEE_IOC_INVOKE invokes a function in a Trusted Application.
+
+- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE.
+
+- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application.
+
+There are two classes of clients, normal clients and supplicants. The latter is
+a helper process for the TEE to access resources in Linux, for example file
+system access. A normal client opens /dev/tee[0-9]* and a supplicant opens
+/dev/teepriv[0-9].
+
+Much of the communication between clients and the TEE is opaque to the
+driver. The main job for the driver is to receive requests from the
+clients, forward them to the TEE and send back the results. In the case of
+supplicants the communication goes in the other direction, the TEE sends
+requests to the supplicant which then sends back the result.
+
+OP-TEE driver
+=============
+
+The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM
+TrustZone based OP-TEE solution that is supported.
+
+Lowest level of communication with OP-TEE builds on ARM SMC Calling
+Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface
+[3] used internally by the driver. Stacked on top of that is OP-TEE Message
+Protocol [4].
+
+OP-TEE SMC interface provides the basic functions required by SMCCC and some
+additional functions specific for OP-TEE. The most interesting functions are:
+
+- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information
+ which is then returned by TEE_IOC_VERSION
+
+- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used
+ to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a
+ separate secure co-processor.
+
+- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol
+
+- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory
+ range to used for shared memory between Linux and OP-TEE.
+
+The GlobalPlatform TEE Client API [5] is implemented on top of the generic
+TEE API.
+
+Picture of the relationship between the different components in the
+OP-TEE architecture.
+
+ User space Kernel Secure world
+ ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
+ +--------+ +-------------+
+ | Client | | Trusted |
+ +--------+ | Application |
+ /\ +-------------+
+ || +----------+ /\
+ || |tee- | ||
+ || |supplicant| \/
+ || +----------+ +-------------+
+ \/ /\ | TEE Internal|
+ +-------+ || | API |
+ + TEE | || +--------+--------+ +-------------+
+ | Client| || | TEE | OP-TEE | | OP-TEE |
+ | API | \/ | subsys | driver | | Trusted OS |
+ +-------+----------------+----+-------+----+-----------+-------------+
+ | Generic TEE API | | OP-TEE MSG |
+ | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
+ +-----------------------------+ +------------------------------+
+
+RPC (Remote Procedure Call) are requests from secure world to kernel driver
+or tee-supplicant. An RPC is identified by a special range of SMCCC return
+values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the
+kernel are handled by the kernel driver. Other RPC messages will be forwarded to
+tee-supplicant without further involvement of the driver, except switching
+shared memory buffer representation.
+
+References:
+[1] https://github.com/OP-TEE/optee_os
+[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+[3] drivers/tee/optee/optee_smc.h
+[4] drivers/tee/optee/optee_msg.h
+[5] http://www.globalplatform.org/specificationsdevice.asp look for
+ "TEE Client API Specification v1.0" and click download.
diff --git a/Documentation/usb/typec.rst b/Documentation/usb/typec.rst
new file mode 100644
index 000000000000..b67a46779de9
--- /dev/null
+++ b/Documentation/usb/typec.rst
@@ -0,0 +1,184 @@
+
+USB Type-C connector class
+==========================
+
+Introduction
+------------
+
+The typec class is meant for describing the USB Type-C ports in a system to the
+user space in unified fashion. The class is designed to provide nothing else
+except the user space interface implementation in hope that it can be utilized
+on as many platforms as possible.
+
+The platforms are expected to register every USB Type-C port they have with the
+class. In a normal case the registration will be done by a USB Type-C or PD PHY
+driver, but it may be a driver for firmware interface such as UCSI, driver for
+USB PD controller or even driver for Thunderbolt3 controller. This document
+considers the component registering the USB Type-C ports with the class as "port
+driver".
+
+On top of showing the capabilities, the class also offer user space control over
+the roles and alternate modes of ports, partners and cable plugs when the port
+driver is capable of supporting those features.
+
+The class provides an API for the port drivers described in this document. The
+attributes are described in Documentation/ABI/testing/sysfs-class-typec.
+
+User space interface
+--------------------
+Every port will be presented as its own device under /sys/class/typec/. The
+first port will be named "port0", the second "port1" and so on.
+
+When connected, the partner will be presented also as its own device under
+/sys/class/typec/. The parent of the partner device will always be the port it
+is attached to. The partner attached to port "port0" will be named
+"port0-partner". Full path to the device would be
+/sys/class/typec/port0/port0-partner/.
+
+The cable and the two plugs on it may also be optionally presented as their own
+devices under /sys/class/typec/. The cable attached to the port "port0" port
+will be named port0-cable and the plug on the SOP Prime end (see USB Power
+Delivery Specification ch. 2.4) will be named "port0-plug0" and on the SOP
+Double Prime end "port0-plug1". The parent of a cable will always be the port,
+and the parent of the cable plugs will always be the cable.
+
+If the port, partner or cable plug supports Alternate Modes, every supported
+Alternate Mode SVID will have their own device describing them. Note that the
+Alternate Mode devices will not be attached to the typec class. The parent of an
+alternate mode will be the device that supports it, so for example an alternate
+mode of port0-partner will be presented under /sys/class/typec/port0-partner/.
+Every mode that is supported will have its own group under the Alternate Mode
+device named "mode<index>", for example /sys/class/typec/port0/<alternate
+mode>/mode1/. The requests for entering/exiting a mode can be done with "active"
+attribute file in that group.
+
+Driver API
+----------
+
+Registering the ports
+~~~~~~~~~~~~~~~~~~~~~
+
+The port drivers will describe every Type-C port they control with struct
+typec_capability data structure, and register them with the following API:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_register_port typec_unregister_port
+
+When registering the ports, the prefer_role member in struct typec_capability
+deserves special notice. If the port that is being registered does not have
+initial role preference, which means the port does not execute Try.SNK or
+Try.SRC by default, the member must have value TYPEC_NO_PREFERRED_ROLE.
+Otherwise if the port executes Try.SNK by default, the member must have value
+TYPEC_DEVICE, and with Try.SRC the value must be TYPEC_HOST.
+
+Registering Partners
+~~~~~~~~~~~~~~~~~~~~
+
+After successful connection of a partner, the port driver needs to register the
+partner with the class. Details about the partner need to be described in struct
+typec_partner_desc. The class copies the details of the partner during
+registration. The class offers the following API for registering/unregistering
+partners.
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_register_partner typec_unregister_partner
+
+The class will provide a handle to struct typec_partner if the registration was
+successful, or NULL.
+
+If the partner is USB Power Delivery capable, and the port driver is able to
+show the result of Discover Identity command, the partner descriptor structure
+should include handle to struct usb_pd_identity instance. The class will then
+create a sysfs directory for the identity under the partner device. The result
+of Discover Identity command can then be reported with the following API:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_partner_set_identity
+
+Registering Cables
+~~~~~~~~~~~~~~~~~~
+
+After successful connection of a cable that supports USB Power Delivery
+Structured VDM "Discover Identity", the port driver needs to register the cable
+and one or two plugs, depending if there is CC Double Prime controller present
+in the cable or not. So a cable capable of SOP Prime communication, but not SOP
+Double Prime communication, should only have one plug registered. For more
+information about SOP communication, please read chapter about it from the
+latest USB Power Delivery specification.
+
+The plugs are represented as their own devices. The cable is registered first,
+followed by registration of the cable plugs. The cable will be the parent device
+for the plugs. Details about the cable need to be described in struct
+typec_cable_desc and about a plug in struct typec_plug_desc. The class copies
+the details during registration. The class offers the following API for
+registering/unregistering cables and their plugs:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_register_cable typec_unregister_cable typec_register_plug
+ typec_unregister_plug
+
+The class will provide a handle to struct typec_cable and struct typec_plug if
+the registration is successful, or NULL if it isn't.
+
+If the cable is USB Power Delivery capable, and the port driver is able to show
+the result of Discover Identity command, the cable descriptor structure should
+include handle to struct usb_pd_identity instance. The class will then create a
+sysfs directory for the identity under the cable device. The result of Discover
+Identity command can then be reported with the following API:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_cable_set_identity
+
+Notifications
+~~~~~~~~~~~~~
+
+When the partner has executed a role change, or when the default roles change
+during connection of a partner or cable, the port driver must use the following
+APIs to report it to the class:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_set_data_role typec_set_pwr_role typec_set_vconn_role
+ typec_set_pwr_opmode
+
+Alternate Modes
+~~~~~~~~~~~~~~~
+
+USB Type-C ports, partners and cable plugs may support Alternate Modes. Each
+Alternate Mode will have identifier called SVID, which is either a Standard ID
+given by USB-IF or vendor ID, and each supported SVID can have 1 - 6 modes. The
+class provides struct typec_mode_desc for describing individual mode of a SVID,
+and struct typec_altmode_desc which is a container for all the supported modes.
+
+Ports that support Alternate Modes need to register each SVID they support with
+the following API:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_port_register_altmode
+
+If a partner or cable plug provides a list of SVIDs as response to USB Power
+Delivery Structured VDM Discover SVIDs message, each SVID needs to be
+registered.
+
+API for the partners:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_partner_register_altmode
+
+API for the Cable Plugs:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_plug_register_altmode
+
+So ports, partners and cable plugs will register the alternate modes with their
+own functions, but the registration will always return a handle to struct
+typec_altmode on success, or NULL. The unregistration will happen with the same
+function:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_unregister_altmode
+
+If a partner or cable plug enters or exits a mode, the port driver needs to
+notify the class with the following API:
+
+.. kernel-doc:: drivers/usb/typec/typec.c
+ :functions: typec_altmode_update_active
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index fd106899afd1..4029943887a3 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -110,17 +110,18 @@ Type: system ioctl
Parameters: machine type identifier (KVM_VM_*)
Returns: a VM fd that can be used to control the new virtual machine.
-The new VM has no virtual cpus and no memory. An mmap() of a VM fd
-will access the virtual machine's physical address space; offset zero
-corresponds to guest physical address zero. Use of mmap() on a VM fd
-is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is
-available.
-You most certainly want to use 0 as machine type.
+The new VM has no virtual cpus and no memory.
+You probably want to use 0 as machine type.
In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN).
+To use hardware assisted virtualization on MIPS (VZ ASE) rather than
+the default trap & emulate implementation (which changes the virtual
+memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
+flag KVM_VM_MIPS_VZ.
+
4.3 KVM_GET_MSR_INDEX_LIST
@@ -1321,130 +1322,6 @@ The flags bitmap is defined as:
/* the host supports the ePAPR idle hcall
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
-4.48 KVM_ASSIGN_PCI_DEVICE (deprecated)
-
-Capability: none
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_pci_dev (in)
-Returns: 0 on success, -1 on error
-
-Assigns a host PCI device to the VM.
-
-struct kvm_assigned_pci_dev {
- __u32 assigned_dev_id;
- __u32 busnr;
- __u32 devfn;
- __u32 flags;
- __u32 segnr;
- union {
- __u32 reserved[11];
- };
-};
-
-The PCI device is specified by the triple segnr, busnr, and devfn.
-Identification in succeeding service requests is done via assigned_dev_id. The
-following flags are specified:
-
-/* Depends on KVM_CAP_IOMMU */
-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-/* The following two depend on KVM_CAP_PCI_2_3 */
-#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
-#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
-
-If KVM_DEV_ASSIGN_PCI_2_3 is set, the kernel will manage legacy INTx interrupts
-via the PCI-2.3-compliant device-level mask, thus enable IRQ sharing with other
-assigned devices or host devices. KVM_DEV_ASSIGN_MASK_INTX specifies the
-guest's view on the INTx mask, see KVM_ASSIGN_SET_INTX_MASK for details.
-
-The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
-isolation of the device. Usages not specifying this flag are deprecated.
-
-Only PCI header type 0 devices with PCI BAR resources are supported by
-device assignment. The user requesting this ioctl must have read/write
-access to the PCI sysfs resource files associated with the device.
-
-Errors:
- ENOTTY: kernel does not support this ioctl
-
- Other error conditions may be defined by individual device types or
- have their standard meanings.
-
-
-4.49 KVM_DEASSIGN_PCI_DEVICE (deprecated)
-
-Capability: none
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_pci_dev (in)
-Returns: 0 on success, -1 on error
-
-Ends PCI device assignment, releasing all associated resources.
-
-See KVM_ASSIGN_PCI_DEVICE for the data structure. Only assigned_dev_id is
-used in kvm_assigned_pci_dev to identify the device.
-
-Errors:
- ENOTTY: kernel does not support this ioctl
-
- Other error conditions may be defined by individual device types or
- have their standard meanings.
-
-4.50 KVM_ASSIGN_DEV_IRQ (deprecated)
-
-Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_irq (in)
-Returns: 0 on success, -1 on error
-
-Assigns an IRQ to a passed-through device.
-
-struct kvm_assigned_irq {
- __u32 assigned_dev_id;
- __u32 host_irq; /* ignored (legacy field) */
- __u32 guest_irq;
- __u32 flags;
- union {
- __u32 reserved[12];
- };
-};
-
-The following flags are defined:
-
-#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
-#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
-#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
-
-#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
-#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
-#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
-
-It is not valid to specify multiple types per host or guest IRQ. However, the
-IRQ type of host and guest can differ or can even be null.
-
-Errors:
- ENOTTY: kernel does not support this ioctl
-
- Other error conditions may be defined by individual device types or
- have their standard meanings.
-
-
-4.51 KVM_DEASSIGN_DEV_IRQ (deprecated)
-
-Capability: KVM_CAP_ASSIGN_DEV_IRQ
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_irq (in)
-Returns: 0 on success, -1 on error
-
-Ends an IRQ assignment to a passed-through device.
-
-See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
-by assigned_dev_id, flags must correspond to the IRQ type specified on
-KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
-
-
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
@@ -1531,52 +1408,6 @@ struct kvm_irq_routing_hv_sint {
__u32 sint;
};
-4.53 KVM_ASSIGN_SET_MSIX_NR (deprecated)
-
-Capability: none
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_msix_nr (in)
-Returns: 0 on success, -1 on error
-
-Set the number of MSI-X interrupts for an assigned device. The number is
-reset again by terminating the MSI-X assignment of the device via
-KVM_DEASSIGN_DEV_IRQ. Calling this service more than once at any earlier
-point will fail.
-
-struct kvm_assigned_msix_nr {
- __u32 assigned_dev_id;
- __u16 entry_nr;
- __u16 padding;
-};
-
-#define KVM_MAX_MSIX_PER_DEV 256
-
-
-4.54 KVM_ASSIGN_SET_MSIX_ENTRY (deprecated)
-
-Capability: none
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_msix_entry (in)
-Returns: 0 on success, -1 on error
-
-Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
-the GSI vector to zero means disabling the interrupt.
-
-struct kvm_assigned_msix_entry {
- __u32 assigned_dev_id;
- __u32 gsi;
- __u16 entry; /* The index of entry in the MSI-X table */
- __u16 padding[3];
-};
-
-Errors:
- ENOTTY: kernel does not support this ioctl
-
- Other error conditions may be defined by individual device types or
- have their standard meanings.
-
4.55 KVM_SET_TSC_KHZ
@@ -1728,40 +1559,6 @@ should skip processing the bitmap and just invalidate everything. It must
be set to the number of set bits in the bitmap.
-4.61 KVM_ASSIGN_SET_INTX_MASK (deprecated)
-
-Capability: KVM_CAP_PCI_2_3
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_assigned_pci_dev (in)
-Returns: 0 on success, -1 on error
-
-Allows userspace to mask PCI INTx interrupts from the assigned device. The
-kernel will not deliver INTx interrupts to the guest between setting and
-clearing of KVM_ASSIGN_SET_INTX_MASK via this interface. This enables use of
-and emulation of PCI 2.3 INTx disable command register behavior.
-
-This may be used for both PCI 2.3 devices supporting INTx disable natively and
-older devices lacking this support. Userspace is responsible for emulating the
-read value of the INTx disable bit in the guest visible PCI command register.
-When modifying the INTx disable state, userspace should precede updating the
-physical device command register by calling this ioctl to inform the kernel of
-the new intended INTx mask state.
-
-Note that the kernel uses the device INTx disable bit to internally manage the
-device interrupt state for PCI 2.3 devices. Reads of this register may
-therefore not match the expected value. Writes should always use the guest
-intended INTx disable value rather than attempting to read-copy-update the
-current physical device state. Races between user and kernel updates to the
-INTx disable bit are handled lazily in the kernel. It's possible the device
-may generate unintended interrupts, but they will not be injected into the
-guest.
-
-See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
-by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
-evaluated.
-
-
4.62 KVM_CREATE_SPAPR_TCE
Capability: KVM_CAP_SPAPR_TCE
@@ -2068,11 +1865,23 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
+ MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
+ MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
+ MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
+ MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
+ MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
+ MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
+ MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
+ MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
+ MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
MIPS | KVM_REG_MIPS_CP0_WIRED | 32
+ MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
+ MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
+ MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
MIPS | KVM_REG_MIPS_CP0_COUNT | 32
MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
@@ -2089,6 +1898,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
+ MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
@@ -2096,6 +1906,7 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
+ MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
@@ -2162,6 +1973,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e.
with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
the PFNX field starting at bit 30.
+MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
+patterns:
+ 0x7030 0000 0001 01 <reg:8>
+
MIPS KVM control registers (see above) have the following id bit patterns:
0x7030 0000 0002 <reg:16>
@@ -4164,6 +3979,23 @@ to take care of that.
This capability can be enabled dynamically even if VCPUs were already
created and are running.
+7.9 KVM_CAP_S390_GS
+
+Architectures: s390
+Parameters: none
+Returns: 0 on success; -EINVAL if the machine does not support
+ guarded storage; -EBUSY if a VCPU has already been created.
+
+Allows use of guarded storage for the KVM guest.
+
+7.10 KVM_CAP_S390_AIS
+
+Architectures: s390
+Parameters: none
+
+Allow use of adapter-interruption suppression.
+Returns: 0 on success; -EBUSY if a VCPU has already been created.
+
8. Other capabilities.
----------------------
@@ -4210,3 +4042,118 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel can support guests using the
hashed page table MMU defined in Power ISA V3.00 (as implemented in
the POWER9 processor), including in-memory segment tables.
+
+8.5 KVM_CAP_MIPS_VZ
+
+Architectures: mips
+
+This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
+it is available, means that full hardware assisted virtualization capabilities
+of the hardware are available for use through KVM. An appropriate
+KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which
+utilises it.
+
+If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
+available, it means that the VM is using full hardware assisted virtualization
+capabilities of the hardware. This is useful to check after creating a VM with
+KVM_VM_MIPS_DEFAULT.
+
+The value returned by KVM_CHECK_EXTENSION should be compared against known
+values (see below). All other values are reserved. This is to allow for the
+possibility of other hardware assisted virtualization implementations which
+may be incompatible with the MIPS VZ ASE.
+
+ 0: The trap & emulate implementation is in use to run guest code in user
+ mode. Guest virtual memory segments are rearranged to fit the guest in the
+ user mode address space.
+
+ 1: The MIPS VZ ASE is in use, providing full hardware assisted
+ virtualization, including standard guest virtual memory segments.
+
+8.6 KVM_CAP_MIPS_TE
+
+Architectures: mips
+
+This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
+it is available, means that the trap & emulate implementation is available to
+run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
+assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
+to KVM_CREATE_VM to create a VM which utilises it.
+
+If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
+available, it means that the VM is using trap & emulate.
+
+8.7 KVM_CAP_MIPS_64BIT
+
+Architectures: mips
+
+This capability indicates the supported architecture type of the guest, i.e. the
+supported register and address width.
+
+The values returned when this capability is checked by KVM_CHECK_EXTENSION on a
+kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
+be checked specifically against known values (see below). All other values are
+reserved.
+
+ 0: MIPS32 or microMIPS32.
+ Both registers and addresses are 32-bits wide.
+ It will only be possible to run 32-bit guest code.
+
+ 1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
+ Registers are 64-bits wide, but addresses are 32-bits wide.
+ 64-bit guest code may run but cannot access MIPS64 memory segments.
+ It will also be possible to run 32-bit guest code.
+
+ 2: MIPS64 or microMIPS64 with access to all address segments.
+ Both registers and addresses are 64-bits wide.
+ It will be possible to run 64-bit or 32-bit guest code.
+
+8.8 KVM_CAP_X86_GUEST_MWAIT
+
+Architectures: x86
+
+This capability indicates that guest using memory monotoring instructions
+(MWAIT/MWAITX) to stop the virtual CPU will not cause a VM exit. As such time
+spent while virtual CPU is halted in this way will then be accounted for as
+guest running time on the host (as opposed to e.g. HLT).
+
+8.9 KVM_CAP_ARM_USER_IRQ
+
+Architectures: arm, arm64
+This capability, if KVM_CHECK_EXTENSION indicates that it is available, means
+that if userspace creates a VM without an in-kernel interrupt controller, it
+will be notified of changes to the output level of in-kernel emulated devices,
+which can generate virtual interrupts, presented to the VM.
+For such VMs, on every return to userspace, the kernel
+updates the vcpu's run->s.regs.device_irq_level field to represent the actual
+output level of the device.
+
+Whenever kvm detects a change in the device output level, kvm guarantees at
+least one return to userspace before running the VM. This exit could either
+be a KVM_EXIT_INTR or any other exit event, like KVM_EXIT_MMIO. This way,
+userspace can always sample the device output level and re-compute the state of
+the userspace interrupt controller. Userspace should always check the state
+of run->s.regs.device_irq_level on every kvm exit.
+The value in run->s.regs.device_irq_level can represent both level and edge
+triggered interrupt signals, depending on the device. Edge triggered interrupt
+signals will exit to userspace with the bit in run->s.regs.device_irq_level
+set exactly once per edge signal.
+
+The field run->s.regs.device_irq_level is available independent of
+run->kvm_valid_regs or run->kvm_dirty_regs bits.
+
+If KVM_CAP_ARM_USER_IRQ is supported, the KVM_CHECK_EXTENSION ioctl returns a
+number larger than 0 indicating the version of this capability is implemented
+and thereby which bits in in run->s.regs.device_irq_level can signal values.
+
+Currently the following bits are defined for the device_irq_level bitmap:
+
+ KVM_CAP_ARM_USER_IRQ >= 1:
+
+ KVM_ARM_DEV_EL1_VTIMER - EL1 virtual timer
+ KVM_ARM_DEV_EL1_PTIMER - EL1 physical timer
+ KVM_ARM_DEV_PMU - ARM PMU overflow interrupt signal
+
+Future versions of kvm may implement additional events. These will get
+indicated by returning a higher number from KVM_CHECK_EXTENSION and will be
+listed above.
diff --git a/Documentation/virtual/kvm/arm/hyp-abi.txt b/Documentation/virtual/kvm/arm/hyp-abi.txt
new file mode 100644
index 000000000000..a20a0bee268d
--- /dev/null
+++ b/Documentation/virtual/kvm/arm/hyp-abi.txt
@@ -0,0 +1,53 @@
+* Internal ABI between the kernel and HYP
+
+This file documents the interaction between the Linux kernel and the
+hypervisor layer when running Linux as a hypervisor (for example
+KVM). It doesn't cover the interaction of the kernel with the
+hypervisor when running as a guest (under Xen, KVM or any other
+hypervisor), or any hypervisor-specific interaction when the kernel is
+used as a host.
+
+On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
+mode, but still needs to interact with it, allowing a built-in
+hypervisor to be either installed or torn down.
+
+In order to achieve this, the kernel must be booted at HYP (arm) or
+EL2 (arm64), allowing it to install a set of stubs before dropping to
+SVC/EL1. These stubs are accessible by using a 'hvc #0' instruction,
+and only act on individual CPUs.
+
+Unless specified otherwise, any built-in hypervisor must implement
+these functions (see arch/arm{,64}/include/asm/virt.h):
+
+* r0/x0 = HVC_SET_VECTORS
+ r1/x1 = vectors
+
+ Set HVBAR/VBAR_EL2 to 'vectors' to enable a hypervisor. 'vectors'
+ must be a physical address, and respect the alignment requirements
+ of the architecture. Only implemented by the initial stubs, not by
+ Linux hypervisors.
+
+* r0/x0 = HVC_RESET_VECTORS
+
+ Turn HYP/EL2 MMU off, and reset HVBAR/VBAR_EL2 to the initials
+ stubs' exception vector value. This effectively disables an existing
+ hypervisor.
+
+* r0/x0 = HVC_SOFT_RESTART
+ r1/x1 = restart address
+ x2 = x0's value when entering the next payload (arm64)
+ x3 = x1's value when entering the next payload (arm64)
+ x4 = x2's value when entering the next payload (arm64)
+
+ Mask all exceptions, disable the MMU, move the arguments into place
+ (arm64 only), and jump to the restart address while at HYP/EL2. This
+ hypercall is not expected to return to its caller.
+
+Any other value of r0/x0 triggers a hypervisor-specific handling,
+which is not documented here.
+
+The return value of a stub hypercall is held by r0/x0, and is 0 on
+success, and HVC_STUB_ERR on error. A stub hypercall is allowed to
+clobber any of the caller-saved registers (x0-x18 on arm64, r0-r3 and
+ip on arm). It is thus recommended to use a function call to perform
+the hypercall.
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index 6b0e115301c8..c2518cea8ab4 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -14,6 +14,8 @@ FLIC provides support to
- purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ)
- enable/disable for the guest transparent async page faults
- register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*)
+- modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM)
+- inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT)
Groups:
KVM_DEV_FLIC_ENQUEUE
@@ -64,12 +66,18 @@ struct kvm_s390_io_adapter {
__u8 isc;
__u8 maskable;
__u8 swap;
- __u8 pad;
+ __u8 flags;
};
id contains the unique id for the adapter, isc the I/O interruption subclass
- to use, maskable whether this adapter may be masked (interrupts turned off)
- and swap whether the indicators need to be byte swapped.
+ to use, maskable whether this adapter may be masked (interrupts turned off),
+ swap whether the indicators need to be byte swapped, and flags contains
+ further characteristics of the adapter.
+ Currently defined values for 'flags' are:
+ - KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS
+ (adapter-interrupt-suppression) facility. This flag only has an effect if
+ the AIS capability is enabled.
+ Unknown flag values are ignored.
KVM_DEV_FLIC_ADAPTER_MODIFY
@@ -101,6 +109,33 @@ struct kvm_s390_io_adapter_req {
release a userspace page for the translated address specified in addr
from the list of mappings
+ KVM_DEV_FLIC_AISM
+ modify the adapter-interruption-suppression mode for a given isc if the
+ AIS capability is enabled. Takes a kvm_s390_ais_req describing:
+
+struct kvm_s390_ais_req {
+ __u8 isc;
+ __u16 mode;
+};
+
+ isc contains the target I/O interruption subclass, mode the target
+ adapter-interruption-suppression mode. The following modes are
+ currently supported:
+ - KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection
+ is always allowed;
+ - KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq
+ injection is only allowed once and the following adapter interrupts
+ will be suppressed until the mode is set again to ALL-Interruptions
+ or SINGLE-Interruption mode.
+
+ KVM_DEV_FLIC_AIRQ_INJECT
+ Inject adapter interrupts on a specified adapter.
+ attr->attr contains the unique id for the adapter, which allows for
+ adapter-specific checks and actions.
+ For adapters subject to AIS, handle the airq injection suppression for
+ an isc according to the adapter-interruption-suppression mode on condition
+ that the AIS capability is enabled.
+
Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on
FLIC with an unknown group or attribute gives the error code EINVAL (instead of
ENXIO, as specified in the API documentation). It is not possible to conclude
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virtual/kvm/devices/vfio.txt
index ef51740c67ca..528c77c8022c 100644
--- a/Documentation/virtual/kvm/devices/vfio.txt
+++ b/Documentation/virtual/kvm/devices/vfio.txt
@@ -16,7 +16,21 @@ Groups:
KVM_DEV_VFIO_GROUP attributes:
KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
+ kvm_device_attr.addr points to an int32_t file descriptor
+ for the VFIO group.
KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
+ kvm_device_attr.addr points to an int32_t file descriptor
+ for the VFIO group.
+ KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table
+ allocated by sPAPR KVM.
+ kvm_device_attr.addr points to a struct:
-For each, kvm_device_attr.addr points to an int32_t file descriptor
-for the VFIO group.
+ struct kvm_vfio_spapr_tce {
+ __s32 groupfd;
+ __s32 tablefd;
+ };
+
+ where
+ @groupfd is a file descriptor for a VFIO group;
+ @tablefd is a file descriptor for a TCE table allocated via
+ KVM_CREATE_SPAPR_TCE.
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index b6cda49f2ba4..575ccb022aac 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -140,7 +140,8 @@ struct kvm_s390_vm_cpu_subfunc {
u8 kmo[16]; # valid with Message-Security-Assist-Extension 4
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
- u8 reserved[1824]; # reserved for future instructions
+ u8 kma[16]; # valid with Message-Security-Assist-Extension 8
+ u8 reserved[1808]; # reserved for future instructions
};
Parameters: address of a buffer to load the subfunction blocks from.
diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virtual/kvm/hypercalls.txt
index feaaa634f154..a890529c63ed 100644
--- a/Documentation/virtual/kvm/hypercalls.txt
+++ b/Documentation/virtual/kvm/hypercalls.txt
@@ -28,6 +28,11 @@ S390:
property inside the device tree's /hypervisor node.
For more information refer to Documentation/virtual/kvm/ppc-pv.txt
+MIPS:
+ KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
+ number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and
+ the return value is placed in $2 (v0).
+
KVM Hypercalls Documentation
===========================
The template for each hypercall is:
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index cd28d5ee5273..4dde03b44ad1 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -266,7 +266,7 @@ for each mapping.
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in /proc/meminfo.
-To identify what applications are mapping file transparent huge pages, it
+To identify what applications are mapping file transparent huge pages, it
is necessary to read /proc/PID/smaps and count the FileHugeMapped fields
for each mapping.
@@ -292,7 +292,7 @@ thp_collapse_alloc_failed is incremented if khugepaged found a range
the allocation.
thp_file_alloc is incremented every time a file huge page is successfully
-i allocated.
+ allocated.
thp_file_mapped is incremented every time a file huge page is mapped into
user address space.
@@ -501,7 +501,7 @@ scanner can get reference to a page is get_page_unless_zero().
All tail pages have zero ->_refcount until atomic_add(). This prevents the
scanner from getting a reference to the tail page up to that point. After the
-atomic_add() we don't care about the ->_refcount value. We already known how
+atomic_add() we don't care about the ->_refcount value. We already known how
many references should be uncharged from the head page.
For head page get_page_unless_zero() will succeed and we don't mind. It's
@@ -519,8 +519,8 @@ comes. Splitting will free up unused subpages.
Splitting the page right away is not an option due to locking context in
the place where we can detect partial unmap. It's also might be
-counterproductive since in many cases partial unmap unmap happens during
-exit(2) if an THP crosses VMA boundary.
+counterproductive since in many cases partial unmap happens during exit(2) if
+a THP crosses a VMA boundary.
Function deferred_split_huge_page() is used to queue page for splitting.
The splitting itself will happen when we get memory pressure via shrinker
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 6e18c70c3474..8d76718e1ea2 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -2,7 +2,11 @@
- This file
w1_therm
- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
+w1_ds2413
+ - The Maxim/Dallas Semiconductor ds2413 dual channel addressable switch.
w1_ds2423
- The Maxim/Dallas Semiconductor ds2423 counter device.
+w1_ds2438
+ - The Maxim/Dallas Semiconductor ds2438 smart battery monitor.
w1_ds28e04
- The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/w1/slaves/w1_ds2413 b/Documentation/w1/slaves/w1_ds2413
new file mode 100644
index 000000000000..936263a8ccb4
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2413
@@ -0,0 +1,50 @@
+Kernel driver w1_ds2413
+=======================
+
+Supported chips:
+ * Maxim DS2413 1-Wire Dual Channel Addressable Switch
+
+supported family codes:
+ W1_FAMILY_DS2413 0x3A
+
+Author: Mariusz Bialonczyk <manio@skyboo.net>
+
+Description
+-----------
+
+The DS2413 chip has two open-drain outputs (PIO A and PIO B).
+Support is provided through the sysfs files "output" and "state".
+
+Reading state
+-------------
+The "state" file provides one-byte value which is in the same format as for
+the chip PIO_ACCESS_READ command (refer the datasheet for details):
+
+Bit 0: PIOA Pin State
+Bit 1: PIOA Output Latch State
+Bit 2: PIOB Pin State
+Bit 3: PIOB Output Latch State
+Bit 4-7: Complement of Bit 3 to Bit 0 (verified by the kernel module)
+
+This file is readonly.
+
+Writing output
+--------------
+You can set the PIO pins using the "output" file.
+It is writable, you can write one-byte value to this sysfs file.
+Similarly the byte format is the same as for the PIO_ACCESS_WRITE command:
+
+Bit 0: PIOA
+Bit 1: PIOB
+Bit 2-7: No matter (driver will set it to "1"s)
+
+
+The chip has some kind of basic protection against transmission errors.
+When reading the state, there is a four complement bits.
+The driver is checking this complement, and when it is wrong then it is
+returning I/O error.
+
+When writing output, the master must repeat the PIO Output Data byte in
+its inverted form and it is waiting for a confirmation.
+If the write is unsuccessful for three times, the write also returns
+I/O error.
diff --git a/Documentation/w1/slaves/w1_ds2438 b/Documentation/w1/slaves/w1_ds2438
new file mode 100644
index 000000000000..b99f3674c5b4
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2438
@@ -0,0 +1,63 @@
+Kernel driver w1_ds2438
+=======================
+
+Supported chips:
+ * Maxim DS2438 Smart Battery Monitor
+
+supported family codes:
+ W1_FAMILY_DS2438 0x26
+
+Author: Mariusz Bialonczyk <manio@skyboo.net>
+
+Description
+-----------
+
+The DS2438 chip provides several functions that are desirable to carry in
+a battery pack. It also has a 40 bytes of nonvolatile EEPROM.
+Because the ability of temperature, current and voltage measurement, the chip
+is also often used in weather stations and applications such as: rain gauge,
+wind speed/direction measuring, humidity sensing, etc.
+
+Current support is provided through the following sysfs files (all files
+except "iad" are readonly):
+
+"iad"
+-----
+This file controls the 'Current A/D Control Bit' (IAD) in the
+Status/Configuration Register.
+Writing a zero value will clear the IAD bit and disables the current
+measurements.
+Writing value "1" is setting the IAD bit (enables the measurements).
+The IAD bit is enabled by default in the DS2438.
+
+When writing to sysfs file bits 2-7 are ignored, so it's safe to write ASCII.
+An I/O error is returned when there is a problem setting the new value.
+
+"page0"
+-------
+This file provides full 8 bytes of the chip Page 0 (00h).
+This page contains the most frequently accessed information of the DS2438.
+Internally when this file is read, the additional CRC byte is also obtained
+from the slave device. If it is correct, the 8 bytes page data are passed
+to userspace, otherwise an I/O error is returned.
+
+"temperature"
+-------------
+Opening and reading this file initiates the CONVERT_T (temperature conversion)
+command of the chip, afterwards the temperature is read from the device
+registers and provided as an ASCII decimal value.
+
+Important: The returned value has to be divided by 256 to get a real
+temperature in degrees Celsius.
+
+"vad", "vdd"
+------------
+Opening and reading this file initiates the CONVERT_V (voltage conversion)
+command of the chip.
+
+Depending on a sysfs filename a different input for the A/D will be selected:
+vad: general purpose A/D input (VAD)
+vdd: battery input (VDD)
+
+After the voltage conversion the value is returned as decimal ASCII.
+Note: The value is in mV, so to get a volts the value has to be divided by 10.
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e9d4e91fa94..2decf40d28e1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -653,7 +653,9 @@ M: Thor Thayer <thor.thayer@linux.intel.com>
S: Maintained
F: drivers/gpio/gpio-altera-a10sr.c
F: drivers/mfd/altera-a10sr.c
+F: drivers/reset/reset-a10sr.c
F: include/linux/mfd/altera-a10sr.h
+F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Vince Bridgers <vbridger@opensource.altera.com>
@@ -813,6 +815,7 @@ W: http://wiki.analog.com/
W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/iio/*/ad*
+F: drivers/iio/adc/ltc2497*
X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -983,6 +986,7 @@ F: arch/arm*/include/asm/perf_event.h
F: drivers/perf/*
F: include/linux/perf/arm_pmu.h
F: Documentation/devicetree/bindings/arm/pmu.txt
+F: Documentation/devicetree/bindings/perf/
ARM PORT
M: Russell King <linux@armlinux.org.uk>
@@ -1054,8 +1058,13 @@ M: Chen-Yu Tsai <wens@csie.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
N: sun[x456789]i
-F: arch/arm/boot/dts/ntc-gr8*
+N: sun50i
+F: arch/arm/mach-sunxi/
F: arch/arm64/boot/dts/allwinner/
+F: drivers/clk/sunxi-ng/
+F: drivers/pinctrl/sunxi/
+F: drivers/soc/sunxi/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
ARM/Allwinner SoC Clock Support
M: Emilio López <emilio@elopez.com.ar>
@@ -1108,7 +1117,6 @@ F: drivers/*/*aspeed*
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
-M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.linux4sam.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@ -1120,6 +1128,7 @@ F: arch/arm/boot/dts/at91*.dtsi
F: arch/arm/boot/dts/sama*.dts
F: arch/arm/boot/dts/sama*.dtsi
F: arch/arm/include/debug/at91.S
+F: drivers/memory/atmel*
ARM/ATMEL AT91 Clock Support
M: Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -1275,6 +1284,7 @@ F: arch/arm/mach-mxs/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
F: drivers/clk/imx/
+F: drivers/soc/imx/
F: include/soc/imx/
ARM/FREESCALE VYBRID ARM ARCHITECTURE
@@ -1495,6 +1505,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
M: Gregory Clement <gregory.clement@free-electrons.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/soc/dove/
F: arch/arm/mach-dove/
F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
@@ -2233,7 +2244,7 @@ ATMEL ISI DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-media@vger.kernel.org
S: Supported
-F: drivers/media/platform/soc_camera/atmel-isi.c
+F: drivers/media/platform/atmel/atmel-isi.c
F: include/media/atmel-isi.h
ATMEL LCDFB DRIVER
@@ -2665,9 +2676,9 @@ N: kona
F: arch/arm/mach-bcm/
BROADCOM BCM2835 ARM ARCHITECTURE
-M: Stephen Warren <swarren@wwwdotorg.org>
M: Lee Jones <lee@kernel.org>
M: Eric Anholt <eric@anholt.net>
+M: Stefan Wahren <stefan.wahren@i2se.com>
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/anholt/linux
@@ -2687,12 +2698,14 @@ F: arch/mips/include/asm/mach-bcm47xx/*
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
+M: Jon Mason <jonmason@broadcom.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: arch/arm/mach-bcm/bcm_5301x.c
F: arch/arm/boot/dts/bcm5301x*.dtsi
F: arch/arm/boot/dts/bcm470*
+F: arch/arm/boot/dts/bcm953012*
BROADCOM BCM53573 ARM ARCHITECTURE
M: Rafał Miłecki <rafal@milecki.pl>
@@ -2854,13 +2867,6 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
-BROADCOM VULCAN ARM64 SOC
-M: Jayachandran C. <c.jayachandran@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm64/boot/dts/broadcom/vulcan*
-
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
M: Devesh Sharma <devesh.sharma@broadcom.com>
@@ -2920,6 +2926,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
S: Maintained
F: Documentation/filesystems/btrfs.txt
F: fs/btrfs/
+F: include/linux/btrfs*
+F: include/uapi/linux/btrfs*
BTTV VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
@@ -3049,13 +3057,12 @@ CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVER
M: Kevin Tsai <ktsai@capellamicro.com>
S: Maintained
F: drivers/iio/light/cm*
-F: Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
CAVIUM THUNDERX2 ARM64 SOC
M: Jayachandran C <jnair@caviumnetworks.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm64/boot/dts/cavium/thunder-99xx*
+F: arch/arm64/boot/dts/cavium/thunder2-99xx*
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
CAVIUM I2C DRIVER
@@ -3099,7 +3106,7 @@ F: drivers/net/ieee802154/cc2520.c
F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
-CEC DRIVER
+CEC FRAMEWORK
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -3108,10 +3115,9 @@ S: Supported
F: Documentation/media/kapi/cec-core.rst
F: Documentation/media/uapi/cec
F: drivers/media/cec/
-F: drivers/media/cec-edid.c
F: drivers/media/rc/keymaps/rc-cec.c
F: include/media/cec.h
-F: include/media/cec-edid.h
+F: include/media/cec-notifier.h
F: include/uapi/linux/cec.h
F: include/uapi/linux/cec-funcs.h
@@ -3886,6 +3892,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/dwc3/
+DEVANTECH SRF ULTRASONIC RANGER IIO DRIVER
+M: Andreas Klinger <ak@it-klinger.de>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/iio/proximity/srf*.c
+
DEVICE COREDUMP (DEV_COREDUMP)
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-kernel@vger.kernel.org
@@ -4131,6 +4143,18 @@ S: Maintained
F: drivers/char/dtlk.c
F: include/linux/dtlk.h
+DPAA2 DATAPATH I/O (DPIO) DRIVER
+M: Roy Pledge <Roy.Pledge@nxp.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/staging/fsl-mc/bus/dpio
+
+DPAA2 ETHERNET DRIVER
+M: Ioana Radulescu <ruxandra.radulescu@nxp.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/staging/fsl-dpaa2/ethernet
+
DPT_I2O SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
L: linux-scsi@vger.kernel.org
@@ -5161,7 +5185,6 @@ F: include/uapi/linux/firewire*.h
F: tools/firewire/
FIRMWARE LOADER (request_firmware)
-M: Ming Lei <ming.lei@canonical.com>
M: Luis R. Rodriguez <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -5191,13 +5214,15 @@ F: include/linux/ipmi-fru.h
K: fmc_d.*register
FPGA MANAGER FRAMEWORK
-M: Alan Tull <atull@opensource.altera.com>
+M: Alan Tull <atull@kernel.org>
R: Moritz Fischer <moritz.fischer@ettus.com>
L: linux-fpga@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
+F: Documentation/fpga/
+F: Documentation/devicetree/bindings/fpga/
F: drivers/fpga/
-F: include/linux/fpga/fpga-mgr.h
+F: include/linux/fpga/
W: http://www.rocketboards.org
FPU EMULATOR
@@ -5309,6 +5334,7 @@ M: Scott Wood <oss@buserror.net>
L: linuxppc-dev@lists.ozlabs.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
+F: Documentation/devicetree/bindings/powerpc/fsl/
F: drivers/soc/fsl/
F: include/linux/fsl/
@@ -5397,10 +5423,12 @@ F: Documentation/filesystems/caching/
F: fs/fscache/
F: include/linux/fscache*.h
-FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
+FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
M: Theodore Y. Ts'o <tytso@mit.edu>
M: Jaegeuk Kim <jaegeuk@kernel.org>
-L: linux-fsdevel@vger.kernel.org
+L: linux-fscrypt@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-fscrypt/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git
S: Supported
F: fs/crypto/
F: include/linux/fscrypt*.h
@@ -5562,6 +5590,7 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/base/power/domain*.c
F: include/linux/pm_domain.h
+F: Documentation/devicetree/bindings/power/power_domain.txt
GENERIC UIO DRIVER FOR PCI DEVICES
M: "Michael S. Tsirkin" <mst@redhat.com>
@@ -6840,6 +6869,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Maintained
F: Documentation/devicetree/bindings/iommu/
F: drivers/iommu/
+F: include/linux/iommu.h
+F: include/linux/iova.h
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
@@ -7567,7 +7598,7 @@ Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
S: Supported
F: Documentation/ABI/stable/sysfs-firmware-opal-*
-F: Documentation/devicetree/bindings/powerpc/opal/
+F: Documentation/devicetree/bindings/powerpc/
F: Documentation/devicetree/bindings/rtc/rtc-opal.txt
F: Documentation/devicetree/bindings/i2c/i2c-opal.txt
F: Documentation/powerpc/
@@ -8168,6 +8199,7 @@ W: https://linuxtv.org
Q: http://patchwork.kernel.org/project/linux-media/list/
T: git git://linuxtv.org/media_tree.git
S: Maintained
+F: Documentation/devicetree/bindings/media/
F: Documentation/media/
F: drivers/media/
F: drivers/staging/media/
@@ -8188,6 +8220,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
+MEDIATEK JPEG DRIVER
+M: Rick Chang <rick.chang@mediatek.com>
+M: Bin Liu <bin.liu@mediatek.com>
+S: Supported
+F: drivers/media/platform/mtk-jpeg/
+F: Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
+
MEDIATEK MEDIA DRIVER
M: Tiffany Lin <tiffany.lin@mediatek.com>
M: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
@@ -8386,7 +8425,7 @@ MICROCHIP / ATMEL AT91 / AT32 SERIAL DRIVER
M: Richard Genoud <richard.genoud@gmail.com>
S: Maintained
F: drivers/tty/serial/atmel_serial.c
-F: include/linux/atmel_serial.h
+F: drivers/tty/serial/atmel_serial.h
MICROCHIP / ATMEL DMA DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
@@ -8782,14 +8821,16 @@ F: drivers/net/ethernet/neterion/
NETFILTER
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+M: Florian Westphal <fw@strlen.de>
L: netfilter-devel@vger.kernel.org
L: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
+W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
-S: Supported
+S: Maintained
F: include/linux/netfilter*
F: include/linux/netfilter/
F: include/net/netfilter/
@@ -9129,7 +9170,6 @@ F: drivers/nvme/target/fcloop.c
NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
-M: Maxime Ripard <maxime.ripard@free-electrons.com>
S: Maintained
F: drivers/nvmem/
F: Documentation/devicetree/bindings/nvmem/
@@ -9361,12 +9401,20 @@ M: Harald Welte <laforge@gnumonks.org>
S: Maintained
F: drivers/char/pcmcia/cm4040_cs.*
+OMNIVISION OV5647 SENSOR DRIVER
+M: Ramiro Oliveira <roliveir@synopsys.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov5647.c
+
OMNIVISION OV7670 SENSOR DRIVER
M: Jonathan Corbet <corbet@lwn.net>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/ov7670.c
+F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
ONENAND FLASH DRIVER
M: Kyungmin Park <kyungmin.park@samsung.com>
@@ -9470,6 +9518,11 @@ F: arch/*/oprofile/
F: drivers/oprofile/
F: include/linux/oprofile.h
+OP-TEE DRIVER
+M: Jens Wiklander <jens.wiklander@linaro.org>
+S: Maintained
+F: drivers/tee/optee/
+
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@versity.com>
M: Joel Becker <jlbec@evilplan.org>
@@ -9682,6 +9735,15 @@ F: include/linux/pci*
F: arch/x86/pci/
F: arch/x86/kernel/quirks.c
+PCI ENDPOINT SUBSYSTEM
+M: Kishon Vijay Abraham I <kishon@ti.com>
+L: linux-pci@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
+S: Supported
+F: drivers/pci/endpoint/
+F: drivers/misc/pci_endpoint_test.c
+F: tools/pci/
+
PCI DRIVER FOR ALTERA PCIE IP
M: Ley Foon Tan <lftan@altera.com>
L: rfi@lists.rocketboards.org (moderated for non-subscribers)
@@ -9756,6 +9818,17 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
F: drivers/pci/host/pci-aardvark.c
+PCI DRIVER FOR MICROSEMI SWITCHTEC
+M: Kurt Schwemmer <kurt.schwemmer@microsemi.com>
+M: Stephen Bates <stephen.bates@microsemi.com>
+M: Logan Gunthorpe <logang@deltatee.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/switchtec.txt
+F: Documentation/ABI/testing/sysfs-class-switchtec
+F: drivers/pci/switch/switchtec*
+F: include/uapi/linux/switchtec_ioctl.h
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
@@ -10079,7 +10152,6 @@ F: drivers/scsi/pmcraid.*
PMC SIERRA PM8001 DRIVER
M: Jack Wang <jinpu.wang@profitbricks.com>
M: lindar_liu@usish.com
-L: pmchba@pmcs.com
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/pm8001/
@@ -10315,6 +10387,8 @@ F: include/linux/pwm.h
F: drivers/pwm/
F: drivers/video/backlight/pwm_bl.c
F: include/linux/pwm_backlight.h
+F: drivers/gpio/gpio-mvebu.c
+F: Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
PXA2xx/PXA3xx SUPPORT
M: Daniel Mack <daniel@zonque.org>
@@ -10552,6 +10626,13 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/aty/aty128fb.c
+RAINSHADOW-CEC DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/usb/rainshadow-cec/*
+
RALINK MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org>
L: linux-mips@linux-mips.org
@@ -11223,6 +11304,14 @@ F: drivers/hwtracing/stm/
F: include/linux/stm.h
F: include/uapi/linux/stm.h
+TEE SUBSYSTEM
+M: Jens Wiklander <jens.wiklander@linaro.org>
+S: Maintained
+F: include/linux/tee_drv.h
+F: include/uapi/linux/tee.h
+F: drivers/tee/
+F: Documentation/tee.txt
+
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
S: Maintained
@@ -12447,9 +12536,8 @@ S: Maintained
F: drivers/media/rc/ttusbir.c
TEGRA ARCHITECTURE SUPPORT
-M: Stephen Warren <swarren@wwwdotorg.org>
M: Thierry Reding <thierry.reding@gmail.com>
-M: Alexandre Courbot <gnurou@gmail.com>
+M: Jonathan Hunter <jonathanh@nvidia.com>
L: linux-tegra@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-tegra/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
@@ -12528,6 +12616,9 @@ S: Maintained
F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
F: drivers/firmware/ti_sci*
F: include/linux/soc/ti/ti_sci_protocol.h
+F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+F: include/dt-bindings/genpd/k2g.h
+F: drivers/soc/ti/ti_sci_pm_domains.c
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
@@ -13245,6 +13336,15 @@ F: drivers/usb/
F: include/linux/usb.h
F: include/linux/usb/
+USB TYPEC SUBSYSTEM
+M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-class-typec
+F: Documentation/usb/typec.rst
+F: drivers/usb/typec/
+F: include/linux/usb/typec.h
+
USB UHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -13539,6 +13639,14 @@ W: https://linuxtv.org
S: Maintained
F: drivers/media/platform/vivid/*
+VIMC VIRTUAL MEDIA CONTROLLER DRIVER
+M: Helen Koike <helen.koike@collabora.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Maintained
+F: drivers/media/platform/vimc/*
+
VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
diff --git a/Makefile b/Makefile
index 43534cca1de9..220121fdca4d 100644
--- a/Makefile
+++ b/Makefile
@@ -1374,7 +1374,7 @@ help:
@echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
echo ' (default: $(INSTALL_HDR_PATH))'; \
echo ''
- @echo 'Static analysers'
+ @echo 'Static analysers:'
@echo ' checkstack - Generate a list of stack hogs'
@echo ' namespacecheck - Name space analysis on compiled kernel'
@echo ' versioncheck - Sanity check on version.h usage'
@@ -1384,7 +1384,7 @@ help:
@echo ' headerdep - Detect inclusion cycles in headers'
@$(MAKE) -f $(srctree)/scripts/Makefile.help checker-help
@echo ''
- @echo 'Kernel selftest'
+ @echo 'Kernel selftest:'
@echo ' kselftest - Build and run kernel selftest (run as root)'
@echo ' Build, install, and boot kernel before'
@echo ' running kselftest on it'
@@ -1392,6 +1392,10 @@ help:
@echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed'
@echo ' .config.'
@echo ''
+ @echo 'Userspace tools targets:'
+ @echo ' use "make tools/help"'
+ @echo ' or "cd tools; make help"'
+ @echo ''
@echo 'Kernel packaging:'
@$(MAKE) $(build)=$(package-dir) help
@echo ''
diff --git a/arch/Kconfig b/arch/Kconfig
index 640999412d11..6c00e5b00f8b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -2,7 +2,11 @@
# General architecture dependent options
#
+config CRASH_CORE
+ bool
+
config KEXEC_CORE
+ select CRASH_CORE
bool
config HAVE_IMA_KEXEC
@@ -320,6 +324,9 @@ config HAVE_CMPXCHG_LOCAL
config HAVE_CMPXCHG_DOUBLE
bool
+config ARCH_WEAK_RELEASE_ACQUIRE
+ bool
+
config ARCH_WANT_IPC_PARSE_VERSION
bool
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5d7fb3e7cb97..a5459698f0ee 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -444,6 +444,7 @@ config ARC_HAS_PAE40
bool "Support for the 40-bit Physical Address Extension"
default n
depends on ISA_ARCV2
+ select HIGHMEM
help
Enable access to physical memory beyond 4G, only supported on
ARC cores with 40 bit Physical Addressing support
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index d6c1bbc98ac3..41cfb29b62c1 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -51,7 +51,7 @@
pguclk: pguclk {
#clock-cells = <0>;
compatible = "fixed-clock";
- clock-frequency = <74440000>;
+ clock-frequency = <74250000>;
};
};
@@ -149,12 +149,13 @@
interrupts = <14>;
};
- i2c@0x1e000 {
- compatible = "snps,designware-i2c";
+ i2s: i2s@1e000 {
+ compatible = "snps,designware-i2s";
reg = <0x1e000 0x100>;
- clock-frequency = <400000>;
- clocks = <&i2cclk>;
+ clocks = <&i2sclk 0>;
+ clock-names = "i2sclk";
interrupts = <15>;
+ #sound-dai-cells = <0>;
};
i2c@0x1f000 {
@@ -174,6 +175,7 @@
adi,input-colorspace = "rgb";
adi,input-clock = "1x";
adi,clock-delay = <0x03>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
@@ -295,5 +297,17 @@
};
};
};
+
+ sound_playback {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "AXS10x HDMI Audio";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,cpu {
+ sound-dai = <&i2s>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&adv7511>;
+ };
+ };
};
};
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 5008021fba98..19ebddffb279 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -62,6 +62,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIR 0x16
+#define ARC_REG_IC_ENDR 0x17
#define ARC_REG_IC_IVIL 0x19
#define ARC_REG_IC_PTAG 0x1E
#define ARC_REG_IC_PTAG_HI 0x1F
@@ -76,6 +78,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_IVDL 0x4A
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
+#define ARC_REG_DC_STARTR 0x4D
+#define ARC_REG_DC_ENDR 0x4E
#define ARC_REG_DC_PTAG 0x5C
#define ARC_REG_DC_PTAG_HI 0x5F
@@ -83,6 +87,8 @@ extern unsigned long perip_base, perip_end;
#define DC_CTRL_DIS 0x001
#define DC_CTRL_INV_MODE_FLUSH 0x040
#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_RGN_OP_INV 0x200
+#define DC_CTRL_RGN_OP_MSK 0x200
/*System-level cache (L2 cache) related Auxiliary registers */
#define ARC_REG_SLC_CFG 0x901
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index b144d7ca7d20..db7319e9b506 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -9,6 +9,10 @@
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
+#ifndef __ASSEMBLY__
+#include <linux/threads.h> /* NR_CPUS */
+#endif
+
#if defined(CONFIG_ARC_MMU_V1)
#define CONFIG_ARC_MMU_VER 1
#elif defined(CONFIG_ARC_MMU_V2)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index ee22d40afef4..08fe33830d4b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -35,11 +35,11 @@
#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H
-#include <asm/page.h>
-#include <asm/mmu.h>
+#include <linux/const.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
-#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
/**************************************************************************
* Page Table Flags
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
index 0037a587320d..06d95e611616 100644
--- a/arch/arc/include/uapi/asm/elf.h
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -27,6 +27,7 @@ typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_ARCV2REG (sizeof(struct user_regs_arcv2) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 0b3ef63d4a03..dd206e6b482c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -47,6 +47,11 @@ struct user_regs_struct {
unsigned long efa; /* break pt addr, for break points in delay slots */
unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
+
+struct user_regs_arcv2 {
+ unsigned long r30, r58, r59;
+};
+
#endif /* !__ASSEMBLY__ */
#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 31150060d38b..5ee4676f135d 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -184,19 +184,75 @@ static int genregs_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_ISA_ARCV2
+static int arcv2regs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret, copy_sz;
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ copy_sz = sizeof(struct user_regs_arcv2);
+ else
+ copy_sz = 4; /* r30 only */
+
+ /*
+ * itemized copy not needed like above as layout of regs (r30,r58,r59)
+ * is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
+ */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs->r30,
+ 0, copy_sz);
+
+ return ret;
+}
+
+static int arcv2regs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret, copy_sz;
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ copy_sz = sizeof(struct user_regs_arcv2);
+ else
+ copy_sz = 4; /* r30 only */
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, (void *)&regs->r30,
+ 0, copy_sz);
+
+ return ret;
+}
+
+#endif
+
enum arc_getset {
- REGSET_GENERAL,
+ REGSET_CMN,
+ REGSET_ARCV2,
};
static const struct user_regset arc_regsets[] = {
- [REGSET_GENERAL] = {
+ [REGSET_CMN] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
- }
+ },
+#ifdef CONFIG_ISA_ARCV2
+ [REGSET_ARCV2] = {
+ .core_note_type = NT_ARC_V2,
+ .n = ELF_ARCV2REG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = arcv2regs_get,
+ .set = arcv2regs_set,
+ },
+#endif
};
static const struct user_regset_view user_arc_view = {
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index b6e4f7a7419b..333daab7def0 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -845,7 +845,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
* state->dataAlign;
break;
case DW_CFA_def_cfa_register:
- unw_debug("cfa_def_cfa_regsiter: ");
+ unw_debug("cfa_def_cfa_register: ");
state->cfa.reg = get_uleb128(&ptr.p8, end);
break;
/*todo case DW_CFA_def_cfa_expression: */
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 928562967f3c..a867575a758b 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -21,6 +21,10 @@
#include <asm/cachectl.h>
#include <asm/setup.h>
+#ifdef CONFIG_ISA_ARCV2
+#define USE_RGN_FLSH 1
+#endif
+
static int l2_line_sz;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 1;
@@ -28,7 +32,7 @@ unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int cacheop);
+ unsigned long sz, const int op, const int full_page);
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
@@ -233,11 +237,10 @@ slc_chk:
static inline
void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int op)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd;
int num_lines;
- const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
@@ -279,11 +282,10 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int op)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd, aux_tag;
int num_lines;
- const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
@@ -334,6 +336,8 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
}
}
+#ifndef USE_RGN_FLSH
+
/*
* In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
* Here's how cache ops are implemented
@@ -349,17 +353,16 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
*/
static inline
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int cacheop)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd;
int num_lines;
- const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
- if (cacheop == OP_INV_IC) {
+ if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
} else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
- aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
@@ -368,7 +371,7 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
- if (!full_page_op) {
+ if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
}
@@ -381,7 +384,7 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* - (and needs to be written before the lower 32 bits)
*/
if (is_pae40_enabled()) {
- if (cacheop == OP_INV_IC)
+ if (op == OP_INV_IC)
/*
* Non aliasing I-cache in HS38,
* aliasing I-cache handled in __cache_line_loop_v3()
@@ -397,6 +400,55 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
}
}
+#else
+
+/*
+ * optimized flush operation which takes a region as opposed to iterating per line
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int s, e;
+
+ /* Only for Non aliasing I-cache in HS38 */
+ if (op == OP_INV_IC) {
+ s = ARC_REG_IC_IVIR;
+ e = ARC_REG_IC_ENDR;
+ } else {
+ s = ARC_REG_DC_STARTR;
+ e = ARC_REG_DC_ENDR;
+ }
+
+ if (!full_page) {
+ /* for any leading gap between @paddr and start of cache line */
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+
+ /*
+ * account for any trailing gap to end of cache line
+ * this is equivalent to DIV_ROUND_UP() in line ops above
+ */
+ sz += L1_CACHE_BYTES - 1;
+ }
+
+ if (is_pae40_enabled()) {
+ /* TBD: check if crossing 4TB boundary */
+ if (op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ /* ENDR needs to be set ahead of START */
+ write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
+ write_aux_reg(s, paddr);
+
+ /* caller waits on DC_CTRL.FS */
+}
+
+#endif
+
#if (CONFIG_ARC_MMU_VER < 3)
#define __cache_line_loop __cache_line_loop_v2
#elif (CONFIG_ARC_MMU_VER == 3)
@@ -411,6 +463,11 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* Machine specific helpers for Entire D-Cache or Per Line ops
*/
+#ifndef USE_RGN_FLSH
+/*
+ * this version avoids extra read/write of DC_CTRL for flush or invalid ops
+ * in the non region flush regime (such as for ARCompact)
+ */
static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
@@ -424,6 +481,32 @@ static inline void __before_dc_op(const int op)
}
}
+#else
+
+static inline void __before_dc_op(const int op)
+{
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int val = read_aux_reg(ctl);
+
+ if (op == OP_FLUSH_N_INV) {
+ val |= DC_CTRL_INV_MODE_FLUSH;
+ }
+
+ if (op != OP_INV_IC) {
+ /*
+ * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
+ * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
+ */
+ val &= ~DC_CTRL_RGN_OP_MSK;
+ if (op & OP_INV)
+ val |= DC_CTRL_RGN_OP_INV;
+ }
+ write_aux_reg(ctl, val);
+}
+
+#endif
+
+
static inline void __after_dc_op(const int op)
{
if (op & OP_FLUSH) {
@@ -486,13 +569,14 @@ static void __dc_enable(void)
static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
__before_dc_op(op);
- __cache_line_loop(paddr, vaddr, sz, op);
+ __cache_line_loop(paddr, vaddr, sz, op, full_page);
__after_dc_op(op);
@@ -521,10 +605,11 @@ static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
- (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
+ (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
local_irq_restore(flags);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 8a7ab5e73af9..4c1a35f15838 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -27,6 +27,7 @@ config ARM
select GENERIC_ALLOCATOR
select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
@@ -358,15 +359,6 @@ config ARM_SINGLE_ARMV7M
select SPARSE_IRQ
select USE_OF
-config ARCH_GEMINI
- bool "Cortina Systems Gemini"
- select CLKSRC_MMIO
- select CPU_FA526
- select GENERIC_CLOCKEVENTS
- select GPIOLIB
- help
- Support for the Cortina Systems Gemini family SoCs
-
config ARCH_EBSA110
bool "EBSA-110"
select ARCH_USES_GETTIMEOFFSET
@@ -818,6 +810,8 @@ source "arch/arm/mach-spear/Kconfig"
source "arch/arm/mach-sti/Kconfig"
+source "arch/arm/mach-stm32/Kconfig"
+
source "arch/arm/mach-s3c24xx/Kconfig"
source "arch/arm/mach-s3c64xx/Kconfig"
@@ -876,28 +870,6 @@ config ARCH_LPC18XX
Support for NXP's LPC18xx Cortex-M3 and LPC43xx Cortex-M4
high performance microcontrollers.
-config ARCH_STM32
- bool "STMicrolectronics STM32"
- depends on ARM_SINGLE_ARMV7M
- select ARCH_HAS_RESET_CONTROLLER
- select ARMV7M_SYSTICK
- select CLKSRC_STM32
- select PINCTRL
- select RESET_CONTROLLER
- select STM32_EXTI
- help
- Support for STMicroelectronics STM32 processors.
-
-config MACH_STM32F429
- bool "STMicrolectronics STM32F429"
- depends on ARCH_STM32
- default y
-
-config MACH_STM32F746
- bool "STMicrolectronics STM32F746"
- depends on ARCH_STM32
- default y
-
config ARCH_MPS2
bool "ARM MPS2 platform"
depends on ARM_SINGLE_ARMV7M
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 9150f9732785..7c711ba61417 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -422,7 +422,17 @@ dtb_check_done:
cmp r0, #HYP_MODE
bne 1f
- bl __hyp_get_vectors
+ /*
+ * Compute the address of the hyp vectors after relocation.
+ * This requires some arithmetic since we cannot directly
+ * reference __hyp_stub_vectors in a PC-relative way.
+ * Call __hyp_set_vectors with the new address so that we
+ * can HVC again after the copy.
+ */
+0: adr r0, 0b
+ movw r1, #:lower16:__hyp_stub_vectors - 0b
+ movt r1, #:upper16:__hyp_stub_vectors - 0b
+ add r0, r0, r1
sub r0, r0, r5
add r0, r0, r10
bl __hyp_set_vectors
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 011808490fed..9c5e1d944d1c 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -77,6 +77,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm4708-asus-rt-ac56u.dtb \
bcm4708-asus-rt-ac68u.dtb \
bcm4708-buffalo-wzr-1750dhp.dtb \
+ bcm4708-linksys-ea6300-v1.dtb \
bcm4708-luxul-xap-1510.dtb \
bcm4708-luxul-xwc-1000.dtb \
bcm4708-netgear-r6250.dtb \
@@ -87,17 +88,21 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm47081-buffalo-wzr-900dhp.dtb \
bcm47081-luxul-xap-1410.dtb \
bcm47081-luxul-xwr-1200.dtb \
+ bcm47081-tplink-archer-c5-v2.dtb \
bcm4709-asus-rt-ac87u.dtb \
bcm4709-buffalo-wxr-1900dhp.dtb \
+ bcm4709-linksys-ea9200.dtb \
bcm4709-netgear-r7000.dtb \
bcm4709-netgear-r8000.dtb \
bcm4709-tplink-archer-c9-v1.dtb \
bcm47094-dlink-dir-885l.dtb \
+ bcm47094-linksys-panamera.dtb \
bcm47094-luxul-xwr-3100.dtb \
bcm47094-netgear-r8500.dtb \
bcm94708.dtb \
bcm94709.dtb \
bcm953012er.dtb \
+ bcm953012hr.dtb \
bcm953012k.dtb
dtb-$(CONFIG_ARCH_BCM_53573) += \
bcm47189-tenda-ac9.dtb
@@ -173,6 +178,12 @@ dtb-$(CONFIG_ARCH_EXYNOS5) += \
exynos5440-sd5v1.dtb \
exynos5440-ssdk5440.dtb \
exynos5800-peach-pi.dtb
+dtb-$(CONFIG_ARCH_GEMINI) += \
+ gemini-nas4220b.dtb \
+ gemini-rut1xx.dtb \
+ gemini-sq201.dtb \
+ gemini-wbd111.dtb \
+ gemini-wbd222.dtb
dtb-$(CONFIG_ARCH_HI3xxx) += \
hi3620-hi4511.dtb
dtb-$(CONFIG_ARCH_HIGHBANK) += \
@@ -352,6 +363,8 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-gw551x.dtb \
imx6dl-gw552x.dtb \
imx6dl-gw553x.dtb \
+ imx6dl-gw5903.dtb \
+ imx6dl-gw5904.dtb \
imx6dl-hummingboard.dtb \
imx6dl-icore.dtb \
imx6dl-icore-rqs.dtb \
@@ -395,9 +408,13 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-gw551x.dtb \
imx6q-gw552x.dtb \
imx6q-gw553x.dtb \
+ imx6q-gw5903.dtb \
+ imx6q-gw5904.dtb \
imx6q-h100.dtb \
imx6q-hummingboard.dtb \
imx6q-icore.dtb \
+ imx6q-icore-ofcap10.dtb \
+ imx6q-icore-ofcap12.dtb \
imx6q-icore-rqs.dtb \
imx6q-marsboard.dtb \
imx6q-mccmon6.dtb \
@@ -425,9 +442,12 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-utilite-pro.dtb \
imx6q-wandboard.dtb \
imx6q-wandboard-revb1.dtb \
+ imx6q-zii-rdu2.dtb \
imx6qp-nitrogen6_max.dtb \
+ imx6qp-nitrogen6_som2.dtb \
imx6qp-sabreauto.dtb \
- imx6qp-sabresd.dtb
+ imx6qp-sabresd.dtb \
+ imx6qp-zii-rdu2.dtb
dtb-$(CONFIG_SOC_IMX6SL) += \
imx6sl-evk.dtb \
imx6sl-warp.dtb
@@ -458,6 +478,7 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-nitrogen7.dtb \
imx7d-sbc-imx7.dtb \
imx7d-sdb.dtb \
+ imx7d-sdb-sht11.dtb \
imx7s-colibri-eval-v3.dtb \
imx7s-warp.dtb
dtb-$(CONFIG_SOC_LS1021A) += \
@@ -488,6 +509,10 @@ dtb-$(CONFIG_ARCH_MXS) += \
imx28-cfa10056.dtb \
imx28-cfa10057.dtb \
imx28-cfa10058.dtb \
+ imx28-duckbill-2-485.dtb \
+ imx28-duckbill-2.dtb \
+ imx28-duckbill-2-enocean.dtb \
+ imx28-duckbill-2-spi.dtb \
imx28-duckbill.dtb \
imx28-eukrea-mbmx283lc.dtb \
imx28-eukrea-mbmx287lc.dtb \
@@ -673,6 +698,25 @@ dtb-$(CONFIG_ARCH_REALVIEW) += \
arm-realview-eb-a9mp-bbrevd.dtb \
arm-realview-pba8.dtb \
arm-realview-pbx-a9.dtb
+dtb-$(CONFIG_ARCH_RENESAS) += \
+ emev2-kzm9d.dtb \
+ r7s72100-genmai.dtb \
+ r7s72100-rskrza1.dtb \
+ r8a73a4-ape6evm.dtb \
+ r8a7740-armadillo800eva.dtb \
+ r8a7743-sk-rzg1m.dtb \
+ r8a7745-sk-rzg1e.dtb \
+ r8a7778-bockw.dtb \
+ r8a7779-marzen.dtb \
+ r8a7790-lager.dtb \
+ r8a7791-koelsch.dtb \
+ r8a7791-porter.dtb \
+ r8a7792-blanche.dtb \
+ r8a7792-wheat.dtb \
+ r8a7793-gose.dtb \
+ r8a7794-alt.dtb \
+ r8a7794-silk.dtb \
+ sh73a0-kzm9g.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk1108-evb.dtb \
rk3036-evb.dtb \
@@ -692,9 +736,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3288-firefly.dtb \
rk3288-firefly-reload.dtb \
rk3288-miqi.dtb \
+ rk3288-phycore-rdk.dtb \
rk3288-popmetal.dtb \
rk3288-r89.dtb \
rk3288-rock2-square.dtb \
+ rk3288-tinker.dtb \
rk3288-veyron-brain.dtb \
rk3288-veyron-jaq.dtb \
rk3288-veyron-jerry.dtb \
@@ -713,25 +759,6 @@ dtb-$(CONFIG_ARCH_S5PV210) += \
s5pv210-smdkc110.dtb \
s5pv210-smdkv210.dtb \
s5pv210-torbreck.dtb
-dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
- emev2-kzm9d.dtb \
- r7s72100-genmai.dtb \
- r7s72100-rskrza1.dtb \
- r8a73a4-ape6evm.dtb \
- r8a7740-armadillo800eva.dtb \
- r8a7743-sk-rzg1m.dtb \
- r8a7745-sk-rzg1e.dtb \
- r8a7778-bockw.dtb \
- r8a7779-marzen.dtb \
- r8a7790-lager.dtb \
- r8a7791-koelsch.dtb \
- r8a7791-porter.dtb \
- r8a7792-blanche.dtb \
- r8a7792-wheat.dtb \
- r8a7793-gose.dtb \
- r8a7794-alt.dtb \
- r8a7794-silk.dtb \
- sh73a0-kzm9g.dtb
dtb-$(CONFIG_ARCH_SOCFPGA) += \
socfpga_arria5_socdk.dtb \
socfpga_arria10_socdk_nand.dtb \
@@ -764,7 +791,8 @@ dtb-$(CONFIG_ARCH_STM32)+= \
stm32f429-disco.dtb \
stm32f469-disco.dtb \
stm32429i-eval.dtb \
- stm32746g-eval.dtb
+ stm32746g-eval.dtb \
+ stm32h743i-eval.dtb
dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-a1000.dtb \
sun4i-a10-ba10-tvbox.dtb \
@@ -868,6 +896,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-h3-beelink-x2.dtb \
sun8i-h3-nanopi-m1.dtb \
sun8i-h3-nanopi-neo.dtb \
+ sun8i-h3-nanopi-neo-air.dtb \
sun8i-h3-orangepi-2.dtb \
sun8i-h3-orangepi-lite.dtb \
sun8i-h3-orangepi-one.dtb \
@@ -970,6 +999,8 @@ dtb-$(CONFIG_MACH_ARMADA_38X) += \
armada-385-db-ap.dtb \
armada-385-linksys-caiman.dtb \
armada-385-linksys-cobra.dtb \
+ armada-385-linksys-shelby.dtb \
+ armada-385-synology-ds116.dtb \
armada-385-turris-omnia.dtb \
armada-388-clearfog.dtb \
armada-388-clearfog-base.dtb \
diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi
index d0eefc3b886c..731df7a8c4e6 100644
--- a/arch/arm/boot/dts/alpine.dtsi
+++ b/arch/arm/boot/dts/alpine.dtsi
@@ -41,28 +41,28 @@
compatible = "arm,cortex-a15";
device_type = "cpu";
reg = <0>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <1700000000>;
};
cpu@1 {
compatible = "arm,cortex-a15";
device_type = "cpu";
reg = <1>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <1700000000>;
};
cpu@2 {
compatible = "arm,cortex-a15";
device_type = "cpu";
reg = <2>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <1700000000>;
};
cpu@3 {
compatible = "arm,cortex-a15";
device_type = "cpu";
reg = <3>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <1700000000>;
};
};
@@ -81,7 +81,7 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <50000000>;
};
/* Interrupt Controller */
@@ -120,26 +120,26 @@
<GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
};
- uart0:uart@fd883000 {
+ uart0: uart@fd883000 {
compatible = "ns16550a";
reg = <0x0 0xfd883000 0x0 0x1000>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <375000000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
};
- uart1:uart@0xfd884000 {
+ uart1: uart@fd884000 {
compatible = "ns16550a";
reg = <0x0 0xfd884000 0x0 0x1000>;
- clock-frequency = <0>; /* Filled by loader */
+ clock-frequency = <375000000>;
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
};
/* Internal PCIe Controller */
- pcie-internal@0xfbc00000 {
+ pcie@fbc00000 {
compatible = "pci-host-ecam-generic";
device_type = "pci";
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
index 501c7527121b..75de1e723303 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir2110.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "am335x-baltos.dtsi"
+#include "am335x-baltos-leds.dtsi"
/ {
model = "OnRISC Baltos iR 2110";
diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
index 19f53b8569e1..46df1b22022c 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir3220.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "am335x-baltos.dtsi"
+#include "am335x-baltos-leds.dtsi"
/ {
model = "OnRISC Baltos iR 3220";
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 2b9d7f4db23f..5d56355ba040 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "am335x-baltos.dtsi"
+#include "am335x-baltos-leds.dtsi"
/ {
model = "OnRISC Baltos iR 5221";
diff --git a/arch/arm/boot/dts/am335x-baltos-leds.dtsi b/arch/arm/boot/dts/am335x-baltos-leds.dtsi
new file mode 100644
index 000000000000..3ab1767d5c13
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-baltos-leds.dtsi
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * VScom OnRISC
+ * http://www.vscom.de
+ */
+
+/*#include "am33xx.dtsi"*/
+
+/ {
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_leds>;
+
+ compatible = "gpio-leds";
+
+ power {
+ label = "onrisc:red:power";
+ linux,default-trigger = "default-on";
+ gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+ wlan {
+ label = "onrisc:blue:wlan";
+ gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ app {
+ label = "onrisc:green:app";
+ gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+};
+
+&am33xx_pinmux {
+ user_leds: pinmux_user_leds {
+ pinctrl-single,pins = <
+ AM33XX_IOPAD(0x908, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_col.gpio3_0 PWR LED */
+ AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_txd3.gpio0_16 WLAN LED */
+ AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_txd2.gpio0_17 APP LED */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 77273df1a028..935ed17d22e4 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -15,3 +15,14 @@
model = "TI AM335x BeagleBone Black";
compatible = "ti,am335x-bone-black", "ti,am335x-bone", "ti,am33xx";
};
+
+&cpu0_opp_table {
+ /*
+ * All PG 2.0 silicon may not support 1GHz but some of the early
+ * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
+ * to support 1GHz OPP so enable it for PG 2.0 on this board.
+ */
+ oppnitro@1000000000 {
+ opp-supported-hw = <0x06 0x0100>;
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index a2ad076822db..f2005ecca74f 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -201,6 +201,69 @@
AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLUP | MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
>;
};
+
+ cpsw_default: cpsw_default {
+ pinctrl-single,pins = <
+ /* Slave 1, RMII mode */
+ AM33XX_IOPAD(0x90c, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_crs.rmii1_crs_dv */
+ AM33XX_IOPAD(0x944, (PIN_INPUT_PULLUP | MUX_MODE0)) /* rmii1_refclk.rmii1_refclk */
+ AM33XX_IOPAD(0x940, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxd0.rmii1_rxd0 */
+ AM33XX_IOPAD(0x93c, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxd1.rmii1_rxd1 */
+ AM33XX_IOPAD(0x910, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxerr.rmii1_rxerr */
+ AM33XX_IOPAD(0x928, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txd0.rmii1_txd0 */
+ AM33XX_IOPAD(0x924, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txd1.rmii1_txd1 */
+ AM33XX_IOPAD(0x914, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txen.rmii1_txen */
+ /* Slave 2, RMII mode */
+ AM33XX_IOPAD(0x870, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_wait0.rmii2_crs_dv */
+ AM33XX_IOPAD(0x908, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_col.rmii2_refclk */
+ AM33XX_IOPAD(0x86c, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_a11.rmii2_rxd0 */
+ AM33XX_IOPAD(0x868, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_a10.rmii2_rxd1 */
+ AM33XX_IOPAD(0x874, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_wpn.rmii2_rxerr */
+ AM33XX_IOPAD(0x854, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a5.rmii2_txd0 */
+ AM33XX_IOPAD(0x850, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a4.rmii2_txd1 */
+ AM33XX_IOPAD(0x840, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a0.rmii2_txen */
+ >;
+ };
+
+ cpsw_sleep: cpsw_sleep {
+ pinctrl-single,pins = <
+ /* Slave 1 reset value */
+ AM33XX_IOPAD(0x90c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x944, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x940, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x93c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x910, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x928, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x924, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x914, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+
+ /* Slave 2 reset value */
+ AM33XX_IOPAD(0x870, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x908, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x86c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x868, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x874, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x854, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x850, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x840, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ >;
+ };
+
+ davinci_mdio_default: davinci_mdio_default {
+ pinctrl-single,pins = <
+ /* MDIO */
+ AM33XX_IOPAD(0x948, (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)) /* mdio_data.mdio_data */
+ AM33XX_IOPAD(0x94c, (PIN_OUTPUT_PULLUP | MUX_MODE0)) /* mdio_clk.mdio_clk */
+ >;
+ };
+
+ davinci_mdio_sleep: davinci_mdio_sleep {
+ pinctrl-single,pins = <
+ /* MDIO reset value */
+ AM33XX_IOPAD(0x948, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_IOPAD(0x94c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ >;
+ };
};
&i2c0 {
@@ -245,6 +308,39 @@
spi-max-frequency = <1000000>;
spi-cpol;
};
+
+ spi_nor: flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q64", "jedec,spi-nor";
+ spi-max-frequency = <80000000>;
+ m25p,fast-read;
+ reg = <0>;
+
+ partition@0 {
+ label = "u-boot-spl";
+ reg = <0x0 0x80000>;
+ read-only;
+ };
+
+ partition@1 {
+ label = "u-boot";
+ reg = <0x80000 0x100000>;
+ read-only;
+ };
+
+ partition@2 {
+ label = "u-boot-env";
+ reg = <0x180000 0x20000>;
+ read-only;
+ };
+
+ partition@3 {
+ label = "misc";
+ reg = <0x1A0000 0x660000>;
+ };
+ };
+
};
&tscadc {
@@ -350,3 +446,61 @@
pinctrl-0 = <&uart3_pins_default>;
status = "okay";
};
+
+&gpio3 {
+ p4 {
+ gpio-hog;
+ gpios = <4 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "PR1_MII_CTRL";
+ };
+
+ p10 {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_HIGH>;
+ /* ETH1 mux: Low for MII-PRU, high for RMII-CPSW */
+ output-high;
+ line-name = "MUX_MII_CTL1";
+ };
+};
+
+&cpsw_emac0 {
+ phy-handle = <&ethphy0>;
+ phy-mode = "rmii";
+ dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+ phy-handle = <&ethphy1>;
+ phy-mode = "rmii";
+ dual_emac_res_vlan = <2>;
+};
+
+&mac {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cpsw_default>;
+ pinctrl-1 = <&cpsw_sleep>;
+ status = "okay";
+ dual_emac;
+};
+
+&phy_sel {
+ rmii-clock-ext;
+};
+
+&davinci_mdio {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&davinci_mdio_default>;
+ pinctrl-1 = <&davinci_mdio_sleep>;
+ status = "okay";
+ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
+ reset-delay-us = <2>; /* PHY datasheet states 1uS min */
+
+ ethphy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy1: ethernet-phy@3 {
+ reg = <3>;
+ };
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 9e96d60976b7..9e242943dcec 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -46,19 +46,7 @@
device_type = "cpu";
reg = <0>;
- /*
- * To consider voltage drop between PMIC and SoC,
- * tolerance value is reduced to 2% from 4% and
- * voltage value is increased as a precaution.
- */
- operating-points = <
- /* kHz uV */
- 720000 1285000
- 600000 1225000
- 500000 1125000
- 275000 1125000
- >;
- voltage-tolerance = <2>; /* 2 percentage */
+ operating-points-v2 = <&cpu0_opp_table>;
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
@@ -67,6 +55,79 @@
};
};
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ /*
+ * The three following nodes are marked with opp-suspend
+ * because the can not be enabled simultaneously on a
+ * single SoC.
+ */
+ opp50@300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <950000 931000 969000>;
+ opp-supported-hw = <0x06 0x0010>;
+ opp-suspend;
+ };
+
+ opp100@275000000 {
+ opp-hz = /bits/ 64 <275000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x01 0x00FF>;
+ opp-suspend;
+ };
+
+ opp100@300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0020>;
+ opp-suspend;
+ };
+
+ opp100@500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ opp100@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0040>;
+ };
+
+ opp120@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000 1176000 1224000>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ opp120@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1200000 1176000 1224000>;
+ opp-supported-hw = <0x06 0x0080>;
+ };
+
+ oppturbo@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1260000 1234800 1285200>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ oppturbo@800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1260000 1234800 1285200>;
+ opp-supported-hw = <0x06 0x0100>;
+ };
+
+ oppnitro@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1325000 1298500 1351500>;
+ opp-supported-hw = <0x04 0x0200>;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a8-pmu";
interrupts = <3>;
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 9fe545dbfa89..00da3f2c4072 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -13,6 +13,7 @@
/ {
aliases {
serial3 = &uart4;
+ can = &hecc;
};
ocp@68000000 {
@@ -72,6 +73,17 @@
pinctrl-single,register-width = <16>;
pinctrl-single,function-mask = <0xff1f>;
};
+
+ hecc: can@5c050000 {
+ compatible = "ti,am3517-hecc";
+ status = "disabled";
+ reg = <0x5c050000 0x80>,
+ <0x5c053000 0x180>,
+ <0x5c052000 0x200>;
+ reg-names = "hecc", "hecc-ram", "mbx";
+ interrupts = <24>;
+ clocks = <&hecc_ck>;
+ };
};
};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 97fcaf415de1..176e09e9a45e 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -50,15 +50,14 @@
clock-names = "cpu";
operating-points-v2 = <&cpu0_opp_table>;
- ti,syscon-efuse = <&scm_conf 0x610 0x3f 0>;
- ti,syscon-rev = <&scm_conf 0x600>;
clock-latency = <300000>; /* From omap-cpufreq driver */
};
};
- cpu0_opp_table: opp_table0 {
- compatible = "operating-points-v2";
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
opp50@300000000 {
opp-hz = /bits/ 64 <300000000>;
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index a4f31739057f..397e98b7e246 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -501,6 +501,21 @@
AM4372_IOPAD(0x884, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn2.gpio1_31 */
>;
};
+
+ uart0_pins_default: uart0_pins_default {
+ pinctrl-single,pins = <
+ AM4372_IOPAD(0x968, PIN_INPUT | MUX_MODE0) /* uart0_ctsn.uart0_ctsn */
+ AM4372_IOPAD(0x96C, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_rtsn.uart0_rtsn */
+ AM4372_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ AM4372_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+};
+
+&uart0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_default>;
};
&i2c0 {
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index e5ac1d81d15c..c536b2f5389f 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -101,6 +101,22 @@
};
};
+&dra7_pmx_core {
+ dcan1_pins_default: dcan1_pins_default {
+ pinctrl-single,pins = <
+ DRA7XX_CORE_IOPAD(0x37d0, PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */
+ DRA7XX_CORE_IOPAD(0x37d4, PIN_INPUT_PULLUP | MUX_MODE0) /* dcan1_rx */
+ >;
+ };
+
+ dcan1_pins_sleep: dcan1_pins_sleep {
+ pinctrl-single,pins = <
+ DRA7XX_CORE_IOPAD(0x37d0, MUX_MODE15 | PULL_UP) /* dcan1_tx.off */
+ DRA7XX_CORE_IOPAD(0x37d4, MUX_MODE15 | PULL_UP) /* dcan1_rx.off */
+ >;
+ };
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
@@ -391,6 +407,14 @@
max-frequency = <96000000>;
};
+&dcan1 {
+ status = "okay";
+ pinctrl-names = "default", "sleep", "active";
+ pinctrl-0 = <&dcan1_pins_sleep>;
+ pinctrl-1 = <&dcan1_pins_sleep>;
+ pinctrl-2 = <&dcan1_pins_default>;
+};
+
&qspi {
status = "okay";
diff --git a/arch/arm/boot/dts/armada-385-linksys-shelby.dts b/arch/arm/boot/dts/armada-385-linksys-shelby.dts
new file mode 100644
index 000000000000..c7a8ddd7f9a5
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-linksys-shelby.dts
@@ -0,0 +1,114 @@
+/*
+ * Device Tree file for the Linksys WRT1900ACS (Shelby)
+ *
+ * Copyright (C) 2015 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "armada-385-linksys.dtsi"
+
+/ {
+ model = "Linksys WRT1900ACS";
+ compatible = "linksys,shelby", "linksys,armada385", "marvell,armada385",
+ "marvell,armada380";
+
+ soc {
+ internal-regs{
+ i2c@11000 {
+
+ pca9635@68 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wan_amber@0 {
+ label = "shelby:amber:wan";
+ reg = <0x0>;
+ };
+
+ wan_white@1 {
+ label = "shelby:white:wan";
+ reg = <0x1>;
+ };
+
+ wlan_2g@2 {
+ label = "shelby:white:wlan_2g";
+ reg = <0x2>;
+ };
+
+ wlan_5g@3 {
+ label = "shelby:white:wlan_5g";
+ reg = <0x3>;
+ };
+
+ usb2@5 {
+ label = "shelby:white:usb2";
+ reg = <0x5>;
+ };
+
+ usb3_1@6 {
+ label = "shelby:white:usb3_1";
+ reg = <0x6>;
+ };
+
+ usb3_2@7 {
+ label = "shelby:white:usb3_2";
+ reg = <0x7>;
+ };
+
+ wps_white@8 {
+ label = "shelby:white:wps";
+ reg = <0x8>;
+ };
+
+ wps_amber@9 {
+ label = "shelby:amber:wps";
+ reg = <0x9>;
+ };
+ };
+ };
+ };
+ };
+
+ gpio-leds {
+ power {
+ label = "shelby:white:power";
+ };
+
+ sata {
+ label = "shelby:white:sata";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index df47bf1ea5eb..2306c45685b1 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -59,7 +59,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+ MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
i2c@11000 {
@@ -88,6 +89,9 @@
ethernet@70000 {
status = "okay";
phy-mode = "rgmii-id";
+ buffer-manager = <&bm>;
+ bm,pool-long = <2>;
+ bm,pool-short = <3>;
fixed-link {
speed = <1000>;
full-duplex;
@@ -97,6 +101,9 @@
ethernet@34000 {
status = "okay";
phy-mode = "sgmii";
+ buffer-manager = <&bm>;
+ bm,pool-long = <0>;
+ bm,pool-short = <1>;
fixed-link {
speed = <1000>;
full-duplex;
@@ -159,6 +166,10 @@
status = "okay";
};
+ bm@c8000 {
+ status = "okay";
+ };
+
/* USB part of the eSATA/USB 2.0 port */
usb@58000 {
status = "okay";
@@ -241,6 +252,10 @@
};
};
+ bm-bppi {
+ status = "okay";
+ };
+
pcie-controller {
status = "okay";
@@ -305,6 +320,7 @@
sata {
gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
default-state = "off";
+ linux,default-trigger = "disk-activity";
};
};
diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts
new file mode 100644
index 000000000000..31510eb56f10
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts
@@ -0,0 +1,321 @@
+/*
+ * Device Tree file for Synology DS116 NAS
+ *
+ * Copyright (C) 2017 Willy Tarreau <w@1wt.eu>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "armada-385.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Synology DS116";
+ compatible = "marvell,a385-gp", "marvell,armada385", "marvell,armada380";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1 GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+ MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
+
+ internal-regs {
+ i2c@11000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+ clock-frequency = <100000>;
+
+ eeprom@57 {
+ compatible = "atmel,24c64";
+ reg = <0x57>;
+ };
+ };
+
+ serial@12000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+ };
+
+ serial@12100 {
+ /* A PIC16F1829 is connected to uart1 at 9600 bps,
+ * and takes single-character orders :
+ * "1" : power off // already handled by the poweroff node
+ * "2" : short beep
+ * "3" : long beep
+ * "4" : turn the power LED ON
+ * "5" : flash the power LED
+ * "6" : turn the power LED OFF
+ * "7" : turn the status LED OFF
+ * "8" : turn the status LED ON
+ * "9" : flash the status LED
+ * "A" : flash the motherboard LED (D8)
+ * "B" : turn the motherboard LED OFF
+ * "C" : hard reset
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "okay";
+ };
+
+ poweroff@12100 {
+ compatible = "synology,power-off";
+ reg = <0x12100 0x100>;
+ clocks = <&coreclk 0>;
+ };
+
+ ethernet@70000 {
+ pinctrl-names = "default";
+ phy = <&phy0>;
+ phy-mode = "sgmii";
+ buffer-manager = <&bm>;
+ bm,pool-long = <0>;
+ status = "okay";
+ };
+
+
+ mdio@72004 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio_pins>;
+
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+
+ sata@a8000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sata0_pins>;
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ target-supply = <&reg_5v_sata0>;
+ };
+ };
+
+ bm@c8000 {
+ status = "okay";
+ };
+
+ usb3@f0000 {
+ usb-phy = <&usb3_0_phy>;
+ status = "okay";
+ };
+
+ usb3@f8000 {
+ usb-phy = <&usb3_1_phy>;
+ status = "okay";
+ };
+ };
+
+ bm-bppi {
+ status = "okay";
+ };
+
+ gpio-fan {
+ compatible = "gpio-fan";
+ gpios = <&gpio1 18 GPIO_ACTIVE_HIGH>,
+ <&gpio1 17 GPIO_ACTIVE_HIGH>,
+ <&gpio1 16 GPIO_ACTIVE_HIGH>;
+ gpio-fan,speed-map = < 0 0
+ 1500 1
+ 2500 2
+ 3000 3
+ 3400 4
+ 3700 5
+ 3900 6
+ 4000 7>;
+ cooling-cells = <2>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ /* The green part is on gpio0.20 which is also used by
+ * sata0, and accesses to SATA disk 0 make it blink so it
+ * doesn't need to be declared here.
+ */
+ orange {
+ gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
+ label = "ds116:orange:disk";
+ default-state = "off";
+ };
+ };
+ };
+
+ usb3_0_phy: usb3_0_phy {
+ compatible = "usb-nop-xceiv";
+ vcc-supply = <&reg_usb3_0_vbus>;
+ };
+
+ usb3_1_phy: usb3_1_phy {
+ compatible = "usb-nop-xceiv";
+ vcc-supply = <&reg_usb3_1_vbus>;
+ };
+
+ reg_usb3_0_vbus: usb3-vbus0 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb3-vbus0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&xhci0_vbus_pins>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_usb3_1_vbus: usb3-vbus1 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb3-vbus1";
+ pinctrl-names = "default";
+ pinctrl-0 = <&xhci1_vbus_pins>;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_sata0: pwr-sata0 {
+ compatible = "regulator-fixed";
+ regulator-name = "pwr_en_sata0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&gpio0 15 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_5v_sata0: v5-sata0 {
+ compatible = "regulator-fixed";
+ regulator-name = "v5.0-sata0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&reg_sata0>;
+ };
+
+ reg_12v_sata0: v12-sata0 {
+ compatible = "regulator-fixed";
+ regulator-name = "v12.0-sata0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ vin-supply = <&reg_sata0>;
+ };
+};
+
+&spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ status = "okay";
+
+ spi-flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "macronix,mx25l6405d", "jedec,spi-nor";
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+
+ /* Note: there is a redboot partition table despite u-boot
+ * being used. The names presented here are the same as those
+ * found in the FIS directory. There is also a small device
+ * tree in the last 64kB of the RedBoot partition which is not
+ * enumerated. The MAC address and the serial number are listed
+ * in the "vendor" partition.
+ */
+ partition@00000000 {
+ label = "RedBoot";
+ reg = <0x00000000 0x000f0000>;
+ read-only;
+ };
+
+ partition@000c0000 {
+ label = "zImage";
+ reg = <0x000f0000 0x002d0000>;
+ };
+
+ partition@00390000 {
+ label = "rd.gz";
+ reg = <0x003c0000 0x00410000>;
+ };
+
+ partition@007d0000 {
+ label = "vendor";
+ reg = <0x007d0000 0x00010000>;
+ read-only;
+ };
+
+ partition@007e0000 {
+ label = "RedBoot config";
+ reg = <0x007e0000 0x00010000>;
+ read-only;
+ };
+
+ partition@007f0000 {
+ label = "FIS directory";
+ reg = <0x007f0000 0x00010000>;
+ read-only;
+ };
+ };
+};
+
+&pinctrl {
+ /* use only one pin for UART1, as mpp20 is used by sata0 */
+ uart1_pins: uart-pins-1 {
+ marvell,pins = "mpp19";
+ marvell,function = "ua1";
+ };
+
+ xhci0_vbus_pins: xhci0_vbus_pins {
+ marvell,pins = "mpp58";
+ marvell,function = "gpio";
+ };
+ xhci1_vbus_pins: xhci1_vbus_pins {
+ marvell,pins = "mpp59";
+ marvell,function = "gpio";
+ };
+};
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index 8e63be33472e..7fcc4c4885cf 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -70,13 +70,7 @@
};
soc {
- internal-regs {
- pinctrl@18000 {
- compatible = "marvell,mv88f6820-pinctrl";
- };
- };
-
- pcie-controller {
+ pciec: pcie-controller {
compatible = "marvell,armada-370-pcie";
status = "disabled";
device_type = "pci";
@@ -106,7 +100,7 @@
* configured in x4 by the bootloader, then
* pcie@4,0 is not available.
*/
- pcie@1,0 {
+ pcie1: pcie@1,0 {
device_type = "pci";
assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
reg = <0x0800 0 0 0 0>;
@@ -124,7 +118,7 @@
};
/* x1 port */
- pcie@2,0 {
+ pcie2: pcie@2,0 {
device_type = "pci";
assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
reg = <0x1000 0 0 0 0>;
@@ -142,7 +136,7 @@
};
/* x1 port */
- pcie@3,0 {
+ pcie3: pcie@3,0 {
device_type = "pci";
assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
reg = <0x1800 0 0 0 0>;
@@ -163,7 +157,7 @@
* x1 port only available when pcie@1,0 is
* configured as a x1 port
*/
- pcie@4,0 {
+ pcie4: pcie@4,0 {
device_type = "pci";
assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
reg = <0x2000 0 0 0 0>;
@@ -182,3 +176,7 @@
};
};
};
+
+&pinctrl {
+ compatible = "marvell,mv88f6820-pinctrl";
+};
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index 2745b7416313..0d5f1f062275 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -186,25 +186,6 @@
};
};
-&pinctrl {
- clearfog_dsa0_clk_pins: clearfog-dsa0-clk-pins {
- marvell,pins = "mpp46";
- marvell,function = "ref";
- };
- clearfog_dsa0_pins: clearfog-dsa0-pins {
- marvell,pins = "mpp23", "mpp41";
- marvell,function = "gpio";
- };
- clearfog_spi1_cs_pins: spi1-cs-pins {
- marvell,pins = "mpp55";
- marvell,function = "spi1";
- };
- rear_button_pins: rear-button-pins {
- marvell,pins = "mpp34";
- marvell,function = "gpio";
- };
-};
-
&mdio {
status = "okay";
@@ -268,6 +249,25 @@
};
};
+&pinctrl {
+ clearfog_dsa0_clk_pins: clearfog-dsa0-clk-pins {
+ marvell,pins = "mpp46";
+ marvell,function = "ref";
+ };
+ clearfog_dsa0_pins: clearfog-dsa0-pins {
+ marvell,pins = "mpp23", "mpp41";
+ marvell,function = "gpio";
+ };
+ clearfog_spi1_cs_pins: spi1-cs-pins {
+ marvell,pins = "mpp55";
+ marvell,function = "spi1";
+ };
+ rear_button_pins: rear-button-pins {
+ marvell,pins = "mpp34";
+ marvell,function = "gpio";
+ };
+};
+
&spi1 {
/*
* Add SPI CS pins for clearfog:
diff --git a/arch/arm/boot/dts/armada-388.dtsi b/arch/arm/boot/dts/armada-388.dtsi
index 564fa5937e25..1c0d151b2aaa 100644
--- a/arch/arm/boot/dts/armada-388.dtsi
+++ b/arch/arm/boot/dts/armada-388.dtsi
@@ -50,13 +50,8 @@
model = "Marvell Armada 388 family SoC";
compatible = "marvell,armada388", "marvell,armada385",
"marvell,armada380";
-
soc {
internal-regs {
- pinctrl@18000 {
- compatible = "marvell,mv88f6828-pinctrl";
- };
-
sata@e0000 {
compatible = "marvell,armada-380-ahci";
reg = <0xe0000 0x2000>;
@@ -68,3 +63,7 @@
};
};
};
+
+&pinctrl {
+ compatible = "marvell,mv88f6828-pinctrl";
+};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 79b767507eab..8b165c31de1e 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -82,7 +82,7 @@
reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
};
- devbus-bootcs {
+ devbus_bootcs: devbus-bootcs {
compatible = "marvell,mvebu-devbus";
reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
@@ -92,7 +92,7 @@
status = "disabled";
};
- devbus-cs0 {
+ devbus_cs0: devbus-cs0 {
compatible = "marvell,mvebu-devbus";
reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
@@ -102,7 +102,7 @@
status = "disabled";
};
- devbus-cs1 {
+ devbus_cs1: devbus-cs1 {
compatible = "marvell,mvebu-devbus";
reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
@@ -112,7 +112,7 @@
status = "disabled";
};
- devbus-cs2 {
+ devbus_cs2: devbus-cs2 {
compatible = "marvell,mvebu-devbus";
reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
@@ -122,7 +122,7 @@
status = "disabled";
};
- devbus-cs3 {
+ devbus_cs3: devbus-cs3 {
compatible = "marvell,mvebu-devbus";
reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
@@ -339,7 +339,7 @@
<GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
};
- system-controller@18200 {
+ systemc: system-controller@18200 {
compatible = "marvell,armada-380-system-controller",
"marvell,armada-370-xp-system-controller";
reg = <0x18200 0x100>;
@@ -360,7 +360,8 @@
mbusc: mbus-controller@20000 {
compatible = "marvell,mbus-controller";
- reg = <0x20000 0x100>, <0x20180 0x20>;
+ reg = <0x20000 0x100>, <0x20180 0x20>,
+ <0x20250 0x8>;
};
mpic: interrupt-controller@20a00 {
@@ -373,7 +374,7 @@
interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
};
- timer@20300 {
+ timer: timer@20300 {
compatible = "marvell,armada-380-timer",
"marvell,armada-xp-timer";
reg = <0x20300 0x30>, <0x21040 0x30>;
@@ -387,14 +388,14 @@
clock-names = "nbclk", "fixed";
};
- watchdog@20300 {
+ watchdog: watchdog@20300 {
compatible = "marvell,armada-380-wdt";
reg = <0x20300 0x34>, <0x20704 0x4>, <0x18260 0x4>;
clocks = <&coreclk 2>, <&refclk>;
clock-names = "nbclk", "fixed";
};
- cpurst@20800 {
+ cpurst: cpurst@20800 {
compatible = "marvell,armada-370-cpu-reset";
reg = <0x20800 0x10>;
};
@@ -404,12 +405,12 @@
reg = <0x20d20 0x6c>;
};
- coherency-fabric@21010 {
+ coherencyfab: coherency-fabric@21010 {
compatible = "marvell,armada-380-coherency-fabric";
reg = <0x21010 0x1c>;
};
- pmsu@22000 {
+ pmsu: pmsu@22000 {
compatible = "marvell,armada-380-pmsu";
reg = <0x22000 0x1000>;
};
@@ -451,7 +452,7 @@
status = "disabled";
};
- usb@58000 {
+ usb0: usb@58000 {
compatible = "marvell,orion-ehci";
reg = <0x58000 0x500>;
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
@@ -459,7 +460,7 @@
status = "disabled";
};
- xor@60800 {
+ xor0: xor@60800 {
compatible = "marvell,armada-380-xor", "marvell,orion-xor";
reg = <0x60800 0x100
0x60a00 0x100>;
@@ -479,7 +480,7 @@
};
};
- xor@60900 {
+ xor1: xor@60900 {
compatible = "marvell,armada-380-xor", "marvell,orion-xor";
reg = <0x60900 0x100
0x60b00 0x100>;
@@ -507,7 +508,7 @@
clocks = <&gateclk 4>;
};
- crypto@90000 {
+ cesa: crypto@90000 {
compatible = "marvell,armada-38x-crypto";
reg = <0x90000 0x10000>;
reg-names = "regs";
@@ -522,14 +523,14 @@
marvell,crypto-sram-size = <0x800>;
};
- rtc@a3800 {
+ rtc: rtc@a3800 {
compatible = "marvell,armada-380-rtc";
reg = <0xa3800 0x20>, <0x184a0 0x0c>;
reg-names = "rtc", "rtc-soc";
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
};
- sata@a8000 {
+ ahci0: sata@a8000 {
compatible = "marvell,armada-380-ahci";
reg = <0xa8000 0x2000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@@ -545,7 +546,7 @@
status = "disabled";
};
- sata@e0000 {
+ ahci1: sata@e0000 {
compatible = "marvell,armada-380-ahci";
reg = <0xe0000 0x2000>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
@@ -561,13 +562,13 @@
clock-output-names = "nand";
};
- thermal@e8078 {
+ thermal: thermal@e8078 {
compatible = "marvell,armada380-thermal";
reg = <0xe4078 0x4>, <0xe4074 0x4>;
status = "okay";
};
- flash@d0000 {
+ nand: flash@d0000 {
compatible = "marvell,armada370-nand";
reg = <0xd0000 0x54>;
#address-cells = <1>;
@@ -577,7 +578,7 @@
status = "disabled";
};
- sdhci@d8000 {
+ sdhci: sdhci@d8000 {
compatible = "marvell,armada-380-sdhci";
reg-names = "sdhci", "mbus", "conf-sdio3";
reg = <0xd8000 0x1000>,
@@ -589,7 +590,7 @@
status = "disabled";
};
- usb3@f0000 {
+ usb3_0: usb3@f0000 {
compatible = "marvell,armada-380-xhci";
reg = <0xf0000 0x4000>,<0xf4000 0x4000>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
@@ -597,7 +598,7 @@
status = "disabled";
};
- usb3@f8000 {
+ usb3_1: usb3@f8000 {
compatible = "marvell,armada-380-xhci";
reg = <0xf8000 0x4000>,<0xfc000 0x4000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
index f6a03dcee5ef..84cc232a29e9 100644
--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
@@ -45,11 +45,14 @@
* common to all Armada XP SoCs.
*/
-#include "armada-xp.dtsi"
+#include "armada-370-xp.dtsi"
/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
model = "Marvell 98DX3236 SoC";
- compatible = "marvell,armadaxp-98dx3236", "marvell,armadaxp", "marvell,armada-370-xp";
+ compatible = "marvell,armadaxp-98dx3236", "marvell,armada-370-xp";
aliases {
gpio0 = &gpio0;
@@ -72,12 +75,19 @@
};
soc {
+ compatible = "marvell,armadaxp-mbus", "simple-bus";
+
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
MBUS_ID(0x03, 0x00) 0 0 0xa8000000 0x4000000
MBUS_ID(0x08, 0x00) 0 0 0xac000000 0x100000>;
+ bootrom {
+ compatible = "marvell,bootrom";
+ reg = <MBUS_ID(0x01, 0x1d) 0 0x100000>;
+ };
+
/*
* 98DX3236 has 1 x1 PCIe unit Gen2.0
*/
@@ -95,8 +105,7 @@
ranges =
<0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000 /* Port 0.0 registers */
0x82000000 0x1 0 MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
- 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */
- 0x82000000 0x2 0 MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */>;
+ 0x81000000 0x1 0 MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO */>;
pcie1: pcie@1,0 {
device_type = "pci";
@@ -117,31 +126,86 @@
};
internal-regs {
- coreclk: mvebu-sar@18230 {
- compatible = "marvell,mv98dx3236-core-clock";
+ sdramc@1400 {
+ compatible = "marvell,armada-xp-sdram-controller";
+ reg = <0x1400 0x500>;
+ };
+
+ L2: l2-cache@8000 {
+ compatible = "marvell,aurora-system-cache";
+ reg = <0x08000 0x1000>;
+ cache-id-part = <0x100>;
+ cache-level = <2>;
+ cache-unified;
+ wt-override;
+ };
+
+ gpio0: gpio@18100 {
+ compatible = "marvell,orion-gpio";
+ reg = <0x18100 0x40>;
+ ngpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <82>, <83>, <84>, <85>;
+ };
+
+ /* does not exist */
+ gpio1: gpio@18140 {
+ compatible = "marvell,orion-gpio";
+ reg = <0x18140 0x40>;
+ status = "disabled";
+ };
+
+ gpio2: gpio@18180 { /* rework some properties */
+ compatible = "marvell,orion-gpio";
+ reg = <0x18180 0x40>;
+ ngpios = <1>; /* only gpio #32 */
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <87>;
+ };
+
+ systemc: system-controller@18200 {
+ compatible = "marvell,armada-370-xp-system-controller";
+ reg = <0x18200 0x500>;
+ };
+
+ gateclk: clock-gating-control@18220 {
+ compatible = "marvell,mv98dx3236-gating-clock";
+ reg = <0x18220 0x4>;
+ clocks = <&coreclk 0>;
+ #clock-cells = <1>;
};
cpuclk: clock-complex@18700 {
+ #clock-cells = <1>;
compatible = "marvell,mv98dx3236-cpu-clock";
+ reg = <0x18700 0x24>, <0x1c054 0x10>;
+ clocks = <&coreclk 1>;
};
corediv-clock@18740 {
status = "disabled";
};
- xor@60900 {
- status = "disabled";
+ cpu-config@21000 {
+ compatible = "marvell,armada-xp-cpu-config";
+ reg = <0x21000 0x8>;
};
- crypto@90000 {
- status = "disabled";
+ ethernet@70000 {
+ compatible = "marvell,armada-xp-neta";
};
- xor@f0900 {
- status = "disabled";
+ ethernet@74000 {
+ compatible = "marvell,armada-xp-neta";
};
- xor@f0800 {
+ xor1: xor@f0800 {
compatible = "marvell,orion-xor";
reg = <0xf0800 0x100
0xf0a00 0x100>;
@@ -161,45 +225,43 @@
};
};
- gpio0: gpio@18100 {
- compatible = "marvell,orion-gpio";
- reg = <0x18100 0x40>;
- ngpios = <32>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <82>, <83>, <84>, <85>;
- };
-
- /* does not exist */
- gpio1: gpio@18140 {
- compatible = "marvell,orion-gpio";
- reg = <0x18140 0x40>;
- status = "disabled";
+ nand: nand@d0000 {
+ clocks = <&dfx_coredivclk 0>;
};
- gpio2: gpio@18180 { /* rework some properties */
- compatible = "marvell,orion-gpio";
- reg = <0x18180 0x40>;
- ngpios = <1>; /* only gpio #32 */
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <87>;
- };
+ xor0: xor@f0900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xF0900 0x100
+ 0xF0B00 0x100>;
+ clocks = <&gateclk 28>;
+ status = "okay";
- nand: nand@d0000 {
- clocks = <&dfx_coredivclk 0>;
+ xor00 {
+ interrupts = <94>;
+ dmacap,memcpy;
+ dmacap,xor;
+ };
+ xor01 {
+ interrupts = <95>;
+ dmacap,memcpy;
+ dmacap,xor;
+ dmacap,memset;
+ };
};
};
- dfxr: dfx-registers@ac000000 {
- compatible = "simple-bus";
+ dfx: dfx-server@ac000000 {
+ compatible = "marvell,dfx-server", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 MBUS_ID(0x08, 0x00) 0 0x100000>;
+ reg = <MBUS_ID(0x08, 0x00) 0 0x100000>;
+
+ coreclk: mvebu-sar@f8204 {
+ compatible = "marvell,mv98dx3236-core-clock";
+ reg = <0xf8204 0x4>;
+ #clock-cells = <1>;
+ };
dfx_coredivclk: corediv-clock@f8268 {
compatible = "marvell,mv98dx3236-corediv-clock";
@@ -208,11 +270,6 @@
clocks = <&mainpll>;
clock-output-names = "nand";
};
-
- dfx: dfx@0 {
- compatible = "marvell,dfx-server";
- reg = <0 0x100000>;
- };
};
switch: switch@a8000000 {
@@ -229,6 +286,53 @@
};
};
};
+
+ clocks {
+ /* 25 MHz reference crystal */
+ refclk: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+ };
+};
+
+&i2c0 {
+ compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x100>;
+};
+
+&i2c1 {
+ compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+ reg = <0x11100 0x100>;
+};
+
+&mpic {
+ reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+};
+
+&timer {
+ compatible = "marvell,armada-xp-timer";
+ clocks = <&coreclk 2>, <&refclk>;
+ clock-names = "nbclk", "fixed";
+};
+
+&watchdog {
+ compatible = "marvell,armada-xp-wdt";
+ clocks = <&coreclk 2>, <&refclk>;
+ clock-names = "nbclk", "fixed";
+};
+
+&cpurst {
+ reg = <0x20800 0x20>;
+};
+
+&usb0 {
+ clocks = <&gateclk 18>;
+};
+
+&usb1 {
+ clocks = <&gateclk 19>;
};
&pinctrl {
@@ -241,14 +345,13 @@
};
};
-&sdio {
- status = "disabled";
+&spi0 {
+ compatible = "marvell,armada-xp-spi", "marvell,orion-spi";
+ pinctrl-0 = <&spi0_pins>;
+ pinctrl-names = "default";
};
-&crypto_sram0 {
+&sdio {
status = "disabled";
};
-&crypto_sram1 {
- status = "disabled";
-};
diff --git a/arch/arm/boot/dts/armada-xp-98dx3336.dtsi b/arch/arm/boot/dts/armada-xp-98dx3336.dtsi
index e1580afdc260..a0d81bd7312b 100644
--- a/arch/arm/boot/dts/armada-xp-98dx3336.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx3336.dtsi
@@ -49,7 +49,7 @@
/ {
model = "Marvell 98DX3336 SoC";
- compatible = "marvell,armadaxp-98dx3336", "marvell,armadaxp-98dx3236", "marvell,armadaxp", "marvell,armada-370-xp";
+ compatible = "marvell,armadaxp-98dx3336", "marvell,armadaxp-98dx3236", "marvell,armada-370-xp";
cpus {
cpu@1 {
diff --git a/arch/arm/boot/dts/armada-xp-98dx4251.dtsi b/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
index b9d9b269efb4..51de91b31a9d 100644
--- a/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx4251.dtsi
@@ -49,7 +49,7 @@
/ {
model = "Marvell 98DX4251 SoC";
- compatible = "marvell,armadaxp-98dx4251", "marvell,armadaxp-98dx3236", "marvell,armadaxp", "marvell,armada-370-xp";
+ compatible = "marvell,armadaxp-98dx4251", "marvell,armadaxp-98dx3236", "marvell,armada-370-xp";
cpus {
cpu@1 {
diff --git a/arch/arm/boot/dts/armada-xp-db-dxbc2.dts b/arch/arm/boot/dts/armada-xp-db-dxbc2.dts
index a8130805074e..1b1ff17fdd9c 100644
--- a/arch/arm/boot/dts/armada-xp-db-dxbc2.dts
+++ b/arch/arm/boot/dts/armada-xp-db-dxbc2.dts
@@ -58,7 +58,7 @@
/ {
model = "Marvell Bobcat2 Evaluation Board";
- compatible = "marvell,db-dxbc2", "marvell,armadaxp-98dx4251", "marvell,armadaxp", "marvell,armada-370-xp";
+ compatible = "marvell,db-dxbc2", "marvell,armadaxp-98dx4251", "marvell,armada-370-xp";
chosen {
bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-xp-db-xc3-24g4xg.dts b/arch/arm/boot/dts/armada-xp-db-xc3-24g4xg.dts
index 4e07cb6ed800..06fce35d7491 100644
--- a/arch/arm/boot/dts/armada-xp-db-xc3-24g4xg.dts
+++ b/arch/arm/boot/dts/armada-xp-db-xc3-24g4xg.dts
@@ -58,7 +58,7 @@
/ {
model = "DB-XC3-24G4XG";
- compatible = "marvell,db-xc3-24g4xg", "marvell,armadaxp-98dx3336", "marvell,armadaxp", "marvell,armada-370-xp";
+ compatible = "marvell,db-xc3-24g4xg", "marvell,armadaxp-98dx3336", "marvell,armada-370-xp";
chosen {
bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 42ea8764814c..9efcf59c9b44 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -71,7 +71,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000
+ MBUS_ID(0x0c, 0x04) 0 0 0xf1200000 0x100000>;
internal-regs {
@@ -95,6 +96,9 @@
pinctrl-names = "default";
status = "okay";
phy-mode = "rgmii-id";
+ buffer-manager = <&bm>;
+ bm,pool-long = <0>;
+ bm,pool-short = <1>;
fixed-link {
speed = <1000>;
full-duplex;
@@ -106,6 +110,9 @@
pinctrl-names = "default";
status = "okay";
phy-mode = "rgmii-id";
+ buffer-manager = <&bm>;
+ bm,pool-long = <2>;
+ bm,pool-short = <3>;
fixed-link {
speed = <1000>;
full-duplex;
@@ -156,6 +163,7 @@
esata@4 {
label = "mamba:white:esata";
reg = <0x4>;
+ linux,default-trigger = "disk-activity";
};
usb2@5 {
@@ -185,6 +193,10 @@
};
};
+ bm@c8000 {
+ status = "okay";
+ };
+
nand@d0000 {
status = "okay";
num-cs = <1>;
@@ -258,6 +270,10 @@
};
};
};
+
+ bm-bppi {
+ status = "okay";
+ };
};
gpio_keys {
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
index d967603dade8..7c90dac99822 100644
--- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -20,6 +20,28 @@
};
};
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ };
+};
+
+&spi1 {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+
+&spi2 {
+ status = "okay";
+};
+
&uart5 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
index 1d2fc1e1dc29..112551766275 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
@@ -31,6 +31,24 @@
};
};
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ };
+};
+
+&spi {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+
&uart5 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
index 7a3b2b50c884..1190fec1b5d0 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
@@ -31,6 +31,42 @@
};
};
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+
+&uart1 {
+ /* Rear RS-232 connector */
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
&uart5 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index c79c937b0a8a..8c6bc29eb7f6 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -18,21 +18,41 @@
};
};
- clocks {
- clk_clkin: clk_clkin {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <48000000>;
- };
-
- };
-
ahb {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ fmc: flash-controller@1e620000 {
+ reg = < 0x1e620000 0x94
+ 0x20000000 0x02000000 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "aspeed,ast2400-fmc";
+ status = "disabled";
+ interrupts = <19>;
+ flash@0 {
+ reg = < 0 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ };
+
+ spi: flash-controller@1e630000 {
+ reg = < 0x1e630000 0x18
+ 0x30000000 0x02000000 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "aspeed,ast2400-spi";
+ status = "disabled";
+ flash@0 {
+ reg = < 0 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ };
+
vic: interrupt-controller@1e6c0080 {
compatible = "aspeed,ast2400-vic";
interrupt-controller;
@@ -61,16 +81,48 @@
#size-cells = <1>;
ranges;
- clk_hpll: clk_hpll@1e6e2070 {
- #clock-cells = <0>;
- compatible = "aspeed,g4-hpll-clock";
- reg = <0x1e6e2070 0x4>;
- clocks = <&clk_clkin>;
- };
-
syscon: syscon@1e6e2000 {
compatible = "aspeed,g4-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1a8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk_clkin: clk_clkin {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <48000000>;
+ };
+
+ clk_hpll: clk_hpll@70 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g4-hpll-clock", "fixed-clock";
+ reg = <0x70>;
+ clocks = <&clk_clkin>;
+ clock-frequency = <384000000>;
+ };
+
+ clk_ahb: clk_ahb@70 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g4-ahb-clock", "fixed-clock";
+ reg = <0x70>;
+ clocks = <&clk_hpll>;
+ clock-frequency = <192000000>;
+ };
+
+ clk_apb: clk_apb@08 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g4-apb-clock", "fixed-clock";
+ reg = <0x08>;
+ clocks = <&clk_hpll>;
+ clock-frequency = <48000000>;
+ };
+
+ clk_uart: clk_uart@2c{
+ #clock-cells = <0>;
+ compatible = "aspeed,g4-uart-clock", "fixed-clock";
+ reg = <0x2c>;
+ clock-frequency = <24000000>;
+ };
pinctrl: pinctrl {
compatible = "aspeed,g4-pinctrl";
@@ -818,19 +870,6 @@
};
};
- clk_apb: clk_apb@1e6e2008 {
- #clock-cells = <0>;
- compatible = "aspeed,g4-apb-clock";
- reg = <0x1e6e2008 0x4>;
- clocks = <&clk_hpll>;
- };
-
- clk_uart: clk_uart@1e6e2008 {
- #clock-cells = <0>;
- compatible = "aspeed,uart-clock";
- reg = <0x1e6e202c 0x4>;
- };
-
sram@1e720000 {
compatible = "mmio-sram";
reg = <0x1e720000 0x8000>; // 32K
@@ -857,13 +896,13 @@
};
wdt1: wdt@1e785000 {
- compatible = "aspeed,wdt";
+ compatible = "aspeed,ast2400-wdt";
reg = <0x1e785000 0x1c>;
interrupts = <27>;
};
wdt2: wdt@1e785020 {
- compatible = "aspeed,wdt";
+ compatible = "aspeed,ast2400-wdt";
reg = <0x1e785020 0x1c>;
interrupts = <27>;
clocks = <&clk_apb>;
@@ -930,6 +969,14 @@
no-loopback-test;
status = "disabled";
};
+
+ adc: adc@1e6e9000 {
+ compatible = "aspeed,ast2400-adc";
+ reg = <0x1e6e9000 0xb0>;
+ clocks = <&clk_apb>;
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index b6596633036c..a0bea4a6ec77 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -24,6 +24,69 @@
#size-cells = <1>;
ranges;
+ fmc: flash-controller@1e620000 {
+ reg = < 0x1e620000 0xc4
+ 0x20000000 0x10000000 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "aspeed,ast2500-fmc";
+ status = "disabled";
+ interrupts = <19>;
+ flash@0 {
+ reg = < 0 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ flash@1 {
+ reg = < 1 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ flash@2 {
+ reg = < 2 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ };
+
+ spi1: flash-controller@1e630000 {
+ reg = < 0x1e630000 0xc4
+ 0x30000000 0x08000000 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "aspeed,ast2500-spi";
+ status = "disabled";
+ flash@0 {
+ reg = < 0 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ flash@1 {
+ reg = < 1 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ };
+
+ spi2: flash-controller@1e631000 {
+ reg = < 0x1e631000 0xc4
+ 0x38000000 0x08000000 >;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "aspeed,ast2500-spi";
+ status = "disabled";
+ flash@0 {
+ reg = < 0 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ flash@1 {
+ reg = < 1 >;
+ compatible = "jedec,spi-nor";
+ status = "disabled";
+ };
+ };
+
vic: interrupt-controller@1e6c0080 {
compatible = "aspeed,ast2400-vic";
interrupt-controller;
@@ -52,15 +115,49 @@
#size-cells = <1>;
ranges;
- clk_clkin: clk_clkin@1e6e2070 {
- #clock-cells = <0>;
- compatible = "aspeed,g5-clkin-clock";
- reg = <0x1e6e2070 0x04>;
- };
-
syscon: syscon@1e6e2000 {
compatible = "aspeed,g5-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1a8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk_clkin: clk_clkin@70 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g5-clkin-clock", "fixed-clock";
+ reg = <0x70>;
+ clock-frequency = <24000000>;
+ };
+
+ clk_hpll: clk_hpll@24 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g5-hpll-clock", "fixed-clock";
+ reg = <0x24>;
+ clocks = <&clk_clkin>;
+ clock-frequency = <792000000>;
+ };
+
+ clk_ahb: clk_ahb@70 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g5-ahb-clock", "fixed-clock";
+ reg = <0x70>;
+ clocks = <&clk_hpll>;
+ clock-frequency = <198000000>;
+ };
+
+ clk_apb: clk_apb@08 {
+ #clock-cells = <0>;
+ compatible = "aspeed,g5-apb-clock", "fixed-clock";
+ reg = <0x08>;
+ clocks = <&clk_hpll>;
+ clock-frequency = <24750000>;
+ };
+
+ clk_uart: clk_uart@2c {
+ #clock-cells = <0>;
+ compatible = "aspeed,uart-clock", "fixed-clock";
+ reg = <0x2c>;
+ clock-frequency = <24000000>;
+ };
pinctrl: pinctrl {
compatible = "aspeed,g5-pinctrl";
@@ -285,7 +382,6 @@
function = "LAD0";
groups = "LAD0";
};
-
pinctrl_lad1_default: lad1_default {
function = "LAD1";
groups = "LAD1";
@@ -872,33 +968,7 @@
};
};
- };
-
- clk_hpll: clk_hpll@1e6e2024 {
- #clock-cells = <0>;
- compatible = "aspeed,g5-hpll-clock";
- reg = <0x1e6e2024 0x4>;
- clocks = <&clk_clkin>;
- };
-
- clk_ahb: clk_ahb@1e6e2070 {
- #clock-cells = <0>;
- compatible = "aspeed,g5-ahb-clock";
- reg = <0x1e6e2070 0x4>;
- clocks = <&clk_hpll>;
- };
- clk_apb: clk_apb@1e6e2008 {
- #clock-cells = <0>;
- compatible = "aspeed,g5-apb-clock";
- reg = <0x1e6e2008 0x4>;
- clocks = <&clk_hpll>;
- };
-
- clk_uart: clk_uart@1e6e2008 {
- #clock-cells = <0>;
- compatible = "aspeed,uart-clock";
- reg = <0x1e6e202c 0x4>;
};
gfx: display@1e6e6000 {
@@ -934,21 +1004,21 @@
wdt1: wdt@1e785000 {
- compatible = "aspeed,wdt";
- reg = <0x1e785000 0x1c>;
+ compatible = "aspeed,ast2500-wdt";
+ reg = <0x1e785000 0x20>;
interrupts = <27>;
};
wdt2: wdt@1e785020 {
- compatible = "aspeed,wdt";
- reg = <0x1e785020 0x1c>;
+ compatible = "aspeed,ast2500-wdt";
+ reg = <0x1e785020 0x20>;
interrupts = <27>;
status = "disabled";
};
wdt3: wdt@1e785040 {
- compatible = "aspeed,wdt";
- reg = <0x1e785074 0x1c>;
+ compatible = "aspeed,ast2500-wdt";
+ reg = <0x1e785040 0x20>;
status = "disabled";
};
@@ -1042,6 +1112,14 @@
no-loopback-test;
status = "disabled";
};
+
+ adc: adc@1e6e9000 {
+ compatible = "aspeed,ast2500-adc";
+ reg = <0x1e6e9000 0xb0>;
+ clocks = <&clk_apb>;
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 9f7f8a7d8ff9..0bef9e0b89c6 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -246,6 +246,7 @@
shdwc@f8048010 {
atmel,shdwc-debouncer = <976>;
+ atmel,wakeup-rtc-timer;
input@0 {
reg = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index c51fc652f6c7..5a53fcf542ab 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
};
adc0: adc@f8018000 {
+ atmel,adc-vref = <3300>;
+ atmel,adc-channels-used = <0xfe>;
pinctrl-0 = <
&pinctrl_adc0_adtrg
- &pinctrl_adc0_ad0
&pinctrl_adc0_ad1
&pinctrl_adc0_ad2
&pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
&pinctrl_adc0_ad5
&pinctrl_adc0_ad6
&pinctrl_adc0_ad7
- &pinctrl_adc0_ad8
- &pinctrl_adc0_ad9
>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
index 669a2c6bdefc..498fba3e52b5 100644
--- a/arch/arm/boot/dts/at91-tse850-3.dts
+++ b/arch/arm/boot/dts/at91-tse850-3.dts
@@ -86,16 +86,43 @@
#io-channel-cells = <1>;
};
- envelope-detector {
+ env_det: envelope-detector {
compatible = "axentia,tse850-envelope-detector";
io-channels = <&dac 0>;
io-channel-names = "dac";
+ #io-channel-cells = <1>;
interrupt-parent = <&pioA>;
interrupts = <3 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "comp";
};
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
+ <&pioA 1 GPIO_ACTIVE_HIGH>,
+ <&pioA 2 GPIO_ACTIVE_HIGH>;
+ idle-state = <0>;
+ };
+
+ envelope-detector-mux {
+ compatible = "io-channel-mux";
+ io-channels = <&env_det 0>;
+ io-channel-names = "parent";
+
+ mux-controls = <&mux>;
+
+ channels = "", "",
+ "sync-1",
+ "in",
+ "out",
+ "sync-2",
+ "sys-reg",
+ "ana-reg";
+ };
+
leds {
compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index 3fe77c38bd0d..7e80acda8f69 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -263,7 +263,7 @@
};
matrix: matrix@ffffee00 {
- compatible = "atmel,at91sam9260-bus-matrix", "syscon";
+ compatible = "atmel,at91sam9261-matrix", "syscon";
reg = <0xffffee00 0x200>;
};
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 696b8ba064a6..9d2bbc41a7b0 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -116,7 +116,7 @@
};
spi0: spi@f0000000 {
- status = "okay";
+ status = "disabled"; /* conflicts with mmc1 */
cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
m25p80@0 {
compatible = "atmel,at25df321a";
diff --git a/arch/arm/boot/dts/axp209.dtsi b/arch/arm/boot/dts/axp209.dtsi
index 675bb0f30825..9677dd5cf6b6 100644
--- a/arch/arm/boot/dts/axp209.dtsi
+++ b/arch/arm/boot/dts/axp209.dtsi
@@ -53,6 +53,11 @@
interrupt-controller;
#interrupt-cells = <1>;
+ ac_power_supply: ac-power-supply {
+ compatible = "x-powers,axp202-ac-power-supply";
+ status = "disabled";
+ };
+
axp_gpio: gpio {
compatible = "x-powers,axp209-gpio";
gpio-controller;
diff --git a/arch/arm/boot/dts/axp22x.dtsi b/arch/arm/boot/dts/axp22x.dtsi
index 458b6681e3ec..67331c5f1714 100644
--- a/arch/arm/boot/dts/axp22x.dtsi
+++ b/arch/arm/boot/dts/axp22x.dtsi
@@ -52,6 +52,11 @@
interrupt-controller;
#interrupt-cells = <1>;
+ ac_power_supply: ac-power-supply {
+ compatible = "x-powers,axp221-ac-power-supply";
+ status = "disabled";
+ };
+
regulators {
/* Default work frequency for buck regulators */
x-powers,dcdc-freq = <3000>;
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 8833a4c3cd96..9644fddb5e3c 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -205,7 +205,7 @@
status = "disabled";
msi-parent = <&msi0>;
- msi0: msi@18012000 {
+ msi0: msi-controller {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
@@ -240,7 +240,7 @@
status = "disabled";
msi-parent = <&msi1>;
- msi1: msi@18013000 {
+ msi1: msi-controller {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 832795b0fd0f..fe6cba994a97 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -245,6 +245,15 @@
status = "disabled";
};
+ mailbox: mailbox@25000 {
+ compatible = "brcm,iproc-fa2-mbox";
+ reg = <0x25000 0x445>;
+ interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ brcm,rx-status-len = <32>;
+ brcm,use-bcm-hdr;
+ };
+
nand: nand@26000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x026000 0x600>,
@@ -288,6 +297,12 @@
#size-cells = <0>;
};
+ crypto@2f000 {
+ compatible = "brcm,spum-nsp-crypto";
+ reg = <0x2f000 0x900>;
+ mboxes = <&mailbox 0>;
+ };
+
gpiob: gpio@30000 {
compatible = "brcm,iproc-nsp-gpio", "brcm,iproc-gpio";
reg = <0x30000 0x50>;
@@ -306,6 +321,20 @@
status = "disabled";
};
+ ehci0: usb@2a000 {
+ compatible = "generic-ehci";
+ reg = <0x2a000 0x100>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ ohci0: usb@2b000 {
+ compatible = "generic-ohci";
+ reg = <0x2b000 0x100>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
rng: rng@33000 {
compatible = "brcm,bcm-nsp-rng";
reg = <0x33000 0x14>;
@@ -347,6 +376,7 @@
#size-cells = <0>;
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
clock-frequency = <100000>;
+ status = "disabled";
};
watchdog@39000 {
@@ -450,7 +480,7 @@
status = "disabled";
msi-parent = <&msi0>;
- msi0: msi@18012000 {
+ msi0: msi-controller {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
@@ -486,7 +516,7 @@
status = "disabled";
msi-parent = <&msi1>;
- msi1: msi@18013000 {
+ msi1: msi-controller {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
@@ -522,7 +552,7 @@
status = "disabled";
msi-parent = <&msi2>;
- msi2: msi@18014000 {
+ msi2: msi-controller {
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index 38e6050035bc..a7b5ce133784 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -69,6 +69,12 @@
bus-width = <4>;
};
+&sdhost {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhost_gpio48>;
+ bus-width = <4>;
+};
+
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio45>;
@@ -92,3 +98,11 @@
power-domains = <&power RPI_POWER_DOMAIN_VEC>;
status = "okay";
};
+
+&dsi0 {
+ power-domains = <&power RPI_POWER_DOMAIN_DSI0>;
+};
+
+&dsi1 {
+ power-domains = <&power RPI_POWER_DOMAIN_DSI1>;
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index a3106aa446c6..35cea3fcaf5c 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -93,10 +93,13 @@
#clock-cells = <1>;
reg = <0x7e101000 0x2000>;
- /* CPRMAN derives everything from the platform's
- * oscillator.
+ /* CPRMAN derives almost everything from the
+ * platform's oscillator. However, the DSI
+ * pixel clocks come from the DSI analog PHY.
*/
- clocks = <&clk_osc>;
+ clocks = <&clk_osc>,
+ <&dsi0 0>, <&dsi0 1>, <&dsi0 2>,
+ <&dsi1 0>, <&dsi1 1>, <&dsi1 2>;
};
rng@7e104000 {
@@ -347,6 +350,16 @@
arm,primecell-periphid = <0x00241011>;
};
+ sdhost: mmc@7e202000 {
+ compatible = "brcm,bcm2835-sdhost";
+ reg = <0x7e202000 0x100>;
+ interrupts = <2 24>;
+ clocks = <&clocks BCM2835_CLOCK_VPU>;
+ dmas = <&dma 13>;
+ dma-names = "rx-tx";
+ status = "disabled";
+ };
+
i2s: i2s@7e203000 {
compatible = "brcm,bcm2835-i2s";
reg = <0x7e203000 0x20>,
@@ -390,6 +403,25 @@
interrupts = <2 14>; /* pwa1 */
};
+ dsi0: dsi@7e209000 {
+ compatible = "brcm,bcm2835-dsi0";
+ reg = <0x7e209000 0x78>;
+ interrupts = <2 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ clocks = <&clocks BCM2835_PLLA_DSI0>,
+ <&clocks BCM2835_CLOCK_DSI0E>,
+ <&clocks BCM2835_CLOCK_DSI0P>;
+ clock-names = "phy", "escape", "pixel";
+
+ clock-output-names = "dsi0_byte",
+ "dsi0_ddr2",
+ "dsi0_ddr";
+
+ };
+
thermal: thermal@7e212000 {
compatible = "brcm,bcm2835-thermal";
reg = <0x7e212000 0x8>;
@@ -456,6 +488,26 @@
interrupts = <2 1>;
};
+ dsi1: dsi@7e700000 {
+ compatible = "brcm,bcm2835-dsi1";
+ reg = <0x7e700000 0x8c>;
+ interrupts = <2 12>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #clock-cells = <1>;
+
+ clocks = <&clocks BCM2835_PLLD_DSI1>,
+ <&clocks BCM2835_CLOCK_DSI1E>,
+ <&clocks BCM2835_CLOCK_DSI1P>;
+ clock-names = "phy", "escape", "pixel";
+
+ clock-output-names = "dsi1_byte",
+ "dsi1_ddr2",
+ "dsi1_ddr";
+
+ status = "disabled";
+ };
+
i2c1: i2c@7e804000 {
compatible = "brcm,bcm2835-i2c";
reg = <0x7e804000 0x1000>;
@@ -499,6 +551,8 @@
clocks = <&clocks BCM2835_PLLH_PIX>,
<&clocks BCM2835_CLOCK_HSM>;
clock-names = "pixel", "hdmi";
+ dmas = <&dma 17>;
+ dma-names = "audio-rx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
index d241cee4bfcc..4175174e589a 100644
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -31,19 +41,16 @@
usb3 {
label = "bcm53xx:blue:usb3";
gpios = <&chipcommon 0 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wan {
label = "bcm53xx:blue:wan";
gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
lan {
label = "bcm53xx:blue:lan";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
power {
@@ -61,14 +68,12 @@
2ghz {
label = "bcm53xx:blue:2ghz";
gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:blue:usb2";
gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
index b0e62042f62f..8fa033fea959 100644
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -31,7 +41,6 @@
usb2 {
label = "bcm53xx:blue:usb2";
gpios = <&chipcommon 0 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
power {
@@ -49,7 +58,6 @@
usb3 {
label = "bcm53xx:blue:usb3";
gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
index c9ba6b964b38..62e1427b3f10 100644
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
@@ -52,13 +52,11 @@
usb {
label = "bcm53xx:blue:usb";
gpios = <&hc595 0 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
power0 {
label = "bcm53xx:red:power";
gpios = <&hc595 1 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
power1 {
@@ -76,7 +74,6 @@
router1 {
label = "bcm53xx:amber:router";
gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan {
@@ -88,13 +85,11 @@
wireless0 {
label = "bcm53xx:blue:wireless";
gpios = <&hc595 6 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wireless1 {
label = "bcm53xx:amber:wireless";
gpios = <&hc595 7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
new file mode 100644
index 000000000000..126ab5867772
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm4708.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+/ {
+ compatible = "linksys,ea6300-v1", "brcm,bcm4708";
+ model = "Linksys EA6300 V1";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x00000000 0x08000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 7 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index b9f66c0fae27..a5647efe4118 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -43,19 +43,16 @@
power1 {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb {
label = "bcm53xx:blue:usb";
gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wireless {
label = "bcm53xx:blue:wireless";
gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
index ae0199f6c7a2..bb66cebe0bd8 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2014 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -37,7 +47,6 @@
power0 {
label = "bcm53xx:green:power";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
power1 {
@@ -49,13 +58,11 @@
usb {
label = "bcm53xx:blue:usb";
gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wireless {
label = "bcm53xx:blue:wireless";
gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
index 36b628b190d7..19ee924d7d53 100644
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
@@ -37,61 +37,51 @@
power-amber {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:white:usb2";
gpios = <&chipcommon 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb3-white {
label = "bcm53xx:white:usb3";
gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb3-green {
label = "bcm53xx:green:usb3";
gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wps {
label = "bcm53xx:white:wps";
gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
status-red {
label = "bcm53xx:red:status";
gpios = <&chipcommon 8 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
status-green {
label = "bcm53xx:green:status";
gpios = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
status-blue {
label = "bcm53xx:blue:status";
gpios = <&chipcommon 10 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-white {
label = "bcm53xx:white:wan";
gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-red {
label = "bcm53xx:red:wan";
gpios = <&chipcommon 13 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4708.dtsi b/arch/arm/boot/dts/bcm4708.dtsi
index d0eec099f1f8..1a19e97a987d 100644
--- a/arch/arm/boot/dts/bcm4708.dtsi
+++ b/arch/arm/boot/dts/bcm4708.dtsi
@@ -12,6 +12,14 @@
/ {
compatible = "brcm,bcm4708";
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
index db8608be0ee7..0800a964f2fe 100644
--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2014 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -37,7 +47,6 @@
usb2 {
label = "bcm53xx:blue:usb2";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wan {
@@ -55,7 +64,6 @@
usb3 {
label = "bcm53xx:blue:usb3";
gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
index d51586d95b9a..c2af33eb47de 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2014 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -58,7 +68,6 @@
power1 {
label = "bcm53xx:red:power";
gpios = <&hc595 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
router0 {
@@ -70,7 +79,6 @@
router1 {
label = "bcm53xx:amber:router";
gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan {
@@ -82,13 +90,11 @@
wireless0 {
label = "bcm53xx:green:wireless";
gpios = <&hc595 6 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wireless1 {
label = "bcm53xx:amber:wireless";
gpios = <&hc595 7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
index de041b8c3342..8bef6429feee 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts
new file mode 100644
index 000000000000..a854a5174b7f
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm47081.dtsi"
+
+/ {
+ compatible = "tplink,archer-c5-v2", "brcm,bcm47081", "brcm,bcm4708";
+ model = "TP-LINK Archer C5 V2";
+
+ chosen {
+ bootargs = "earlycon";
+ };
+
+ memory {
+ reg = <0x00000000 0x08000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ 2ghz {
+ label = "bcm53xx:green:2ghz";
+ gpios = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ lan {
+ label = "bcm53xx:green:lan";
+ gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb2-port1 {
+ label = "bcm53xx:green:usb2-port1";
+ gpios = <&chipcommon 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ power {
+ label = "bcm53xx:green:power";
+ gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ wan-green {
+ label = "bcm53xx:green:wan";
+ gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ wps {
+ label = "bcm53xx:green:wps";
+ gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ wan-amber {
+ label = "bcm53xx:amber:wan";
+ gpios = <&chipcommon 8 GPIO_ACTIVE_HIGH>;
+ };
+
+ 5ghz {
+ label = "bcm53xx:green:5ghz";
+ gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb2-port2 {
+ label = "bcm53xx:green:usb2-port2";
+ gpios = <&chipcommon 13 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rfkill {
+ label = "WiFi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&spi_nor {
+ status = "okay";
+};
+
+&usb2 {
+ vcc-gpio = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
+};
diff --git a/arch/arm/boot/dts/bcm47081.dtsi b/arch/arm/boot/dts/bcm47081.dtsi
index c5f7619af4a6..9829d044aaf4 100644
--- a/arch/arm/boot/dts/bcm47081.dtsi
+++ b/arch/arm/boot/dts/bcm47081.dtsi
@@ -4,7 +4,17 @@
*
* Copyright © 2014 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
#include "bcm5301x.dtsi"
@@ -12,6 +22,14 @@
/ {
compatible = "brcm,bcm47081";
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
index eaca6876db0f..df473cc41572 100644
--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -31,7 +41,6 @@
wps {
label = "bcm53xx:blue:wps";
gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
power {
@@ -43,7 +52,6 @@
wan {
label = "bcm53xx:red:wan";
gpios = <&chipcommon 5 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
index b32957ca9443..92058c73ee59 100644
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
@@ -31,13 +31,11 @@
usb {
label = "bcm53xx:green:usb";
gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
power-amber {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
power-white {
@@ -49,37 +47,31 @@
router-amber {
label = "bcm53xx:amber:router";
gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
router-white {
label = "bcm53xx:white:router";
gpios = <&chipcommon 8 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-amber {
label = "bcm53xx:amber:wan";
gpios = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-white {
label = "bcm53xx:white:wan";
gpios = <&chipcommon 10 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wireless-amber {
label = "bcm53xx:amber:wireless";
gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wireless-white {
label = "bcm53xx:white:wireless";
gpios = <&chipcommon 12 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
new file mode 100644
index 000000000000..3d1d9c2c4efc
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm4709.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+/ {
+ compatible = "linksys,ea9200", "brcm,bcm4709", "brcm,bcm4708";
+ model = "Linksys EA9200";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x00000000 0x08000000
+ 0x88000000 0x08000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
index f459a98a72c6..f43ab4721456 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -37,43 +47,36 @@
power-amber {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
5ghz {
label = "bcm53xx:white:5ghz";
gpios = <&chipcommon 12 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
2ghz {
label = "bcm53xx:white:2ghz";
gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wps {
label = "bcm53xx:white:wps";
gpios = <&chipcommon 14 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wireless {
label = "bcm53xx:white:wireless";
gpios = <&chipcommon 15 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb3 {
label = "bcm53xx:white:usb3";
gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:white:usb2";
gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
index 8e39a84e5bf9..d266131652ad 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2015 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -28,58 +38,61 @@
leds {
compatible = "gpio-leds";
- power0 {
+ power-white {
label = "bcm53xx:white:power";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
linux,default-trigger = "default-on";
};
- power1 {
+ power-amber {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
+ };
+
+ wan-white {
+ label = "bcm53xx:white:wan";
+ gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ wan-amber {
+ label = "bcm53xx:amber:wan";
+ gpios = <&chipcommon 9 GPIO_ACTIVE_HIGH>;
};
5ghz-1 {
label = "bcm53xx:white:5ghz-1";
gpios = <&chipcommon 12 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
2ghz {
label = "bcm53xx:white:2ghz";
gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wireless {
label = "bcm53xx:white:wireless";
gpios = <&chipcommon 14 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wps {
label = "bcm53xx:white:wps";
gpios = <&chipcommon 15 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
5ghz-2 {
label = "bcm53xx:white:5ghz-2";
gpios = <&chipcommon 16 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb3 {
label = "bcm53xx:white:usb3";
gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:white:usb2";
gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
@@ -105,6 +118,12 @@
linux,code = <KEY_RESTART>;
gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
};
+
+ brightness {
+ label = "Backlight";
+ linux,code = <KEY_BRIGHTNESS_ZERO>;
+ gpios = <&chipcommon 19 GPIO_ACTIVE_LOW>;
+ };
};
};
diff --git a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
index c67bfaa0c8e8..97aa5d59a1d8 100644
--- a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
+++ b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
@@ -26,49 +26,41 @@
lan {
label = "bcm53xx:blue:lan";
gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wps {
label = "bcm53xx:blue:wps";
gpios = <&chipcommon 2 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
2ghz {
label = "bcm53xx:blue:2ghz";
gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
5ghz {
label = "bcm53xx:blue:5ghz";
gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb3 {
label = "bcm53xx:blue:usb3";
gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:blue:usb2";
gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-blue {
label = "bcm53xx:blue:wan";
gpios = <&chipcommon 14 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wan-amber {
label = "bcm53xx:amber:wan";
gpios = <&chipcommon 15 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
power {
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index 64ded7643e9f..51b0641b5f79 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -4,7 +4,17 @@
*
* Copyright (C) 2016 Rafał Miłecki <zajec5@gmail.com>
*
- * Licensed under the GNU/GPL. See COPYING for details.
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
*/
/dts-v1/;
@@ -46,37 +56,31 @@
wan-white {
label = "bcm53xx:white:wan";
gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
power-amber {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wan-amber {
label = "bcm53xx:amber:wan";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb3-white {
label = "bcm53xx:white:usb3";
gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
2ghz {
label = "bcm53xx:white:2ghz";
gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
5ghz {
label = "bcm53xx:white:5ghz";
gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
new file mode 100644
index 000000000000..b6750f70dffb
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm47094.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+/ {
+ compatible = "linksys,panamera", "brcm,bcm47094", "brcm,bcm4708";
+ model = "Linksys EA9500";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x00000000 0x08000000
+ 0x88000000 0x08000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
index 5cf4ab1ebe85..5f8621d00c50 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
@@ -34,37 +34,31 @@
lan3 {
label = "bcm53xx:green:lan3";
gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
lan4 {
label = "bcm53xx:green:lan4";
gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
wan {
label = "bcm53xx:green:wan";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
lan1 {
label = "bcm53xx:green:lan1";
gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
lan2 {
label = "bcm53xx:green:lan2";
gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb3 {
label = "bcm53xx:green:usb3";
gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
status {
@@ -76,13 +70,11 @@
2ghz {
label = "bcm53xx:green:2ghz";
gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
5ghz {
label = "bcm53xx:green:5ghz";
gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
index 600795ee1aed..859929973158 100644
--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
@@ -34,37 +34,31 @@
power1 {
label = "bcm53xx:amber:power";
gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
5ghz-1 {
label = "bcm53xx:white:5ghz-1";
gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
5ghz-2 {
label = "bcm53xx:white:5ghz-2";
gpios = <&chipcommon 12 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
2ghz {
label = "bcm53xx:white:2ghz";
gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb2 {
label = "bcm53xx:white:usb2";
gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
usb3 {
label = "bcm53xx:white:usb3";
gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "default-off";
};
};
diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
index 4403ae8790c2..34417dac1cd0 100644
--- a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
+++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
@@ -26,19 +26,16 @@
usb {
label = "bcm53xx:blue:usb";
gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
wps {
label = "bcm53xx:blue:wps";
gpios = <&chipcommon 10 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
5ghz {
label = "bcm53xx:blue:5ghz";
gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "default-off";
};
system {
@@ -48,6 +45,15 @@
};
};
+ pcie0_leds {
+ compatible = "gpio-leds";
+
+ 2ghz {
+ label = "bcm53xx:blue:2ghz";
+ gpios = <&pcie0_chipcommon 3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
#address-cells = <1>;
@@ -72,3 +78,30 @@
};
};
};
+
+&pcie0 {
+ ranges = <0x00000000 0 0 0 0 0x00100000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ bridge@0,0,0 {
+ reg = <0x0000 0 0 0 0>;
+ ranges = <0x00000000 0 0 0 0 0 0 0x00100000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ wifi@0,1,0 {
+ reg = <0x0000 0 0 0 0>;
+ ranges = <0x00000000 0 0 0 0x00100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pcie0_chipcommon: chipcommon@0 {
+ reg = <0 0x1000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 00de62dc0042..acee36a61004 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -18,10 +18,6 @@
/ {
interrupt-parent = <&gic>;
- chosen {
- stdout-path = &uart0;
- };
-
chipcommonA {
compatible = "simple-bus";
ranges = <0x00000000 0x18000000 0x00001000>;
@@ -70,10 +66,19 @@
clocks = <&periph_clk>;
};
- local-timer@20600 {
+ timer@20600 {
compatible = "arm,cortex-a9-twd-timer";
- reg = <0x20600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
+ reg = <0x20600 0x20>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_EDGE_RISING)>;
+ clocks = <&periph_clk>;
+ };
+
+ watchdog@20620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0x20620 0x20>;
+ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&periph_clk>;
};
@@ -298,20 +303,6 @@
};
};
- spi@29000 {
- reg = <0x00029000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- spi_nor: spi-nor@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <20000000>;
- linux,part-probe = "ofpart", "bcm47xxpart";
- status = "disabled";
- };
- };
-
gmac0: ethernet@24000 {
reg = <0x24000 0x800>;
};
@@ -329,6 +320,16 @@
};
};
+ i2c0: i2c@18009000 {
+ compatible = "brcm,iproc-i2c";
+ reg = <0x18009000 0x50>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+
lcpll0: lcpll0@1800c100 {
#clock-cells = <1>;
compatible = "brcm,nsp-lcpll0";
@@ -375,4 +376,40 @@
brcm,nand-has-wp;
};
+
+ spi@18029200 {
+ compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ reg = <0x18029200 0x184>,
+ <0x18029000 0x124>,
+ <0x1811b408 0x004>,
+ <0x180293a0 0x01c>;
+ reg-names = "mspi", "bspi", "intr_regs", "intr_status_reg";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "spi_lr_fullness_reached",
+ "spi_lr_session_aborted",
+ "spi_lr_impatient",
+ "spi_lr_session_done",
+ "spi_lr_overhead",
+ "mspi_done",
+ "mspi_halted";
+ clocks = <&iprocmed>;
+ clock-names = "iprocmed";
+ num-cs = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi_nor: spi-nor@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ linux,part-probe = "ofpart", "bcm47xxpart";
+ status = "disabled";
+ };
+ };
};
diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi
index 2da04d0a7348..eae623f76401 100644
--- a/arch/arm/boot/dts/bcm53573.dtsi
+++ b/arch/arm/boot/dts/bcm53573.dtsi
@@ -13,8 +13,12 @@
/ {
interrupt-parent = <&gic>;
+ aliases {
+ serial0 = &uart0;
+ };
+
chosen {
- stdout-path = &uart0;
+ stdout-path = "serial0:115200n8";
};
cpus {
@@ -113,6 +117,10 @@
};
};
+ pcie0: pcie@2000 {
+ reg = <0x00002000 0x1000>;
+ };
+
usb2: usb2@4000 {
reg = <0x4000 0x1000>;
ranges;
diff --git a/arch/arm/boot/dts/bcm94708.dts b/arch/arm/boot/dts/bcm94708.dts
index 42855a7c1bfa..2e08c895f281 100644
--- a/arch/arm/boot/dts/bcm94708.dts
+++ b/arch/arm/boot/dts/bcm94708.dts
@@ -38,14 +38,6 @@
model = "NorthStar SVK (BCM94708)";
compatible = "brcm,bcm94708", "brcm,bcm4708";
- aliases {
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
memory {
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm94709.dts b/arch/arm/boot/dts/bcm94709.dts
index 95e8be65f2f1..c37616c67edc 100644
--- a/arch/arm/boot/dts/bcm94709.dts
+++ b/arch/arm/boot/dts/bcm94709.dts
@@ -38,14 +38,6 @@
model = "NorthStar SVK (BCM94709)";
compatible = "brcm,bcm94709", "brcm,bcm4709", "brcm,bcm4708";
- aliases {
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
memory {
reg = <0x00000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/bcm953012er.dts b/arch/arm/boot/dts/bcm953012er.dts
index decd86bae901..40e694bfe5ca 100644
--- a/arch/arm/boot/dts/bcm953012er.dts
+++ b/arch/arm/boot/dts/bcm953012er.dts
@@ -39,14 +39,6 @@
model = "NorthStar Enterprise Router (BCM953012ER)";
compatible = "brcm,bcm953012er", "brcm,brcm53012", "brcm,bcm4708";
- aliases {
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
memory {
reg = <0x00000000 0x8000000>;
};
diff --git a/arch/arm/boot/dts/bcm953012hr.dts b/arch/arm/boot/dts/bcm953012hr.dts
new file mode 100644
index 000000000000..3076e81699cf
--- /dev/null
+++ b/arch/arm/boot/dts/bcm953012hr.dts
@@ -0,0 +1,97 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2017 Broadcom
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom nor the names of its contributors
+ * may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm4708.dtsi"
+#include "bcm5301x-nand-cs0-bch4.dtsi"
+
+/ {
+ model = "NorthStar HR (BCM953012HR)";
+ compatible = "brcm,bcm953012hr", "brcm,brcm53012", "brcm,bcm4708";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ ethernet2 = &gmac2;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x10000000>;
+ };
+};
+
+&nandcs {
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x07000000>;
+ };
+};
+
+&spi_nor {
+ status = "okay";
+ spi-max-frequency = <62500000>;
+ m25p,default-addr-width = <3>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x000d0000>;
+ };
+ partition@d000 {
+ label = "env";
+ reg = <0x000d0000 0x00030000>;
+ };
+ partition@100000 {
+ label = "system";
+ reg = <0x00100000 0x00600000>;
+ };
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x00700000 0x00900000>;
+ };
+};
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index ae31a5826e91..79c168e2714b 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -43,15 +43,69 @@
serial1 = &uart1;
};
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
memory {
reg = <0x80000000 0x10000000>;
};
};
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x07000000>;
+ };
+ };
+};
+
+&spi_nor {
+ status = "okay";
+ spi-max-frequency = <62500000>;
+ m25p,default-addr-width = <3>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "boot";
+ reg = <0x00000000 0x000d0000>;
+ };
+ partition@d000 {
+ label = "env";
+ reg = <0x000d0000 0x00030000>;
+ };
+ partition@100000 {
+ label = "system";
+ reg = <0x00100000 0x00600000>;
+ };
+ partition@700000 {
+ label = "rootfs";
+ reg = <0x00700000 0x00900000>;
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index df05e7f568af..f5c42962c201 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -60,7 +60,7 @@
};
};
-/* USB 2/3 support needed to be complete */
+/* USB 3 support needed to be complete */
&amac0 {
status = "okay";
@@ -70,6 +70,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -108,6 +112,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 4a3ab19c6281..efcb1f67bdad 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -60,7 +60,7 @@
};
};
-/* USB 2/3 support needed to be complete */
+/* USB 3 support needed to be complete */
&amac0 {
status = "okay";
@@ -70,6 +70,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -108,6 +112,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 81f78435d8c7..b335ce02e32f 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -66,7 +66,13 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&i2c0 {
+ status = "okay";
+
temperature-sensor@4c {
compatible = "adi,adt7461a";
reg = <0x4c>;
@@ -122,6 +128,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index c88b8fefcb2f..16ab2d82a14b 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -60,7 +60,7 @@
};
};
-/* USB 2/3 and SLIC support needed to be complete */
+/* USB 3 and SLIC support needed to be complete */
&amac0 {
status = "okay";
@@ -74,6 +74,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -112,6 +116,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index d503fa0dde31..9b921c6aa8f8 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -60,7 +60,7 @@
};
};
-/* USB 2/3 and SLIC support needed to be complete */
+/* USB 3 and SLIC support needed to be complete */
&amac0 {
status = "okay";
@@ -74,6 +74,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -112,6 +116,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index cc0363b843c1..006b08e41a3b 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -72,6 +72,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -110,6 +114,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index f8d47e517e18..64740f85cf4c 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -65,6 +65,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -103,6 +107,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 74e15a3cd9f8..bce251a68591 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -60,7 +60,7 @@
};
};
-/* USB 2/3 support needed to be complete */
+/* USB 3 support needed to be complete */
&amac0 {
status = "okay";
@@ -74,6 +74,10 @@
status = "okay";
};
+&ehci0 {
+ status = "okay";
+};
+
&nand {
nandcs@0 {
compatible = "brcm,nandcs";
@@ -112,6 +116,10 @@
};
};
+&ohci0 {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index d15107cba765..8d244cd76c36 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -9,6 +9,7 @@
*/
/dts-v1/;
#include "da850.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
compatible = "ti,da850-evm", "ti,da850";
@@ -78,7 +79,10 @@
DRVDD-supply = <&vbat>;
DVDD-supply = <&vbat>;
};
-
+ tca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ };
};
wdt: wdt@21000 {
status = "okay";
@@ -293,20 +297,27 @@
&vpif {
pinctrl-names = "default";
- pinctrl-0 = <&vpif_capture_pins>;
+ pinctrl-0 = <&vpif_capture_pins>, <&vpif_display_pins>;
status = "okay";
/* VPIF capture port */
- port {
- vpif_ch0: endpoint@0 {
- reg = <0>;
- bus-width = <8>;
+ port@0 {
+ vpif_input_ch0: endpoint@0 {
+ reg = <0>;
+ bus-width = <8>;
};
- vpif_ch1: endpoint@1 {
- reg = <1>;
- bus-width = <8>;
- data-shift = <8>;
+ vpif_input_ch1: endpoint@1 {
+ reg = <1>;
+ bus-width = <8>;
+ data-shift = <8>;
+ };
+ };
+
+ /* VPIF display port */
+ port@1 {
+ vpif_output_ch0: endpoint {
+ bus-width = <8>;
};
};
};
diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
index 112ec92064ce..512604ad8b71 100644
--- a/arch/arm/boot/dts/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/da850-lego-ev3.dts
@@ -123,6 +123,14 @@
pinctrl-0 = <&system_power_pin>;
};
+ sound {
+ compatible = "pwm-beeper";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ehrpwm0b_pins>;
+ pwms = <&ehrpwm0 1 1000000 0>;
+ amp-supply = <&amp>;
+ };
+
/*
* This is a 5V current limiting regulator that is shared by USB,
* the sensor (input) ports, the motor (output) ports and the A/DC.
@@ -139,18 +147,36 @@
enable-active-high;
regulator-boot-on;
};
+
+ /*
+ * This is a simple voltage divider on VCC5V to provide a 2.5V
+ * reference signal to the ADC.
+ */
+ adc_ref: regulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "adc ref";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-boot-on;
+ vin-supply = <&vcc5v>;
+ };
+
+ /*
+ * This is the amplifier for the speaker.
+ */
+ amp: regulator3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&amp_pins>;
+ compatible = "regulator-fixed";
+ regulator-name = "amp";
+ gpio = <&gpio 111 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
&pmx_core {
status = "okay";
- spi0_cs3_pin: pinmux_spi0_cs3_pin {
- pinctrl-single,bits = <
- /* CS3 */
- 0xc 0x01000000 0x0f000000
- >;
- };
-
mmc0_cd_pin: pinmux_mmc0_cd {
pinctrl-single,bits = <
/* GP5[14] */
@@ -195,6 +221,13 @@
0x4c 0x00008000 0x0000f000
>;
};
+
+ amp_pins: pinmux_amp_pins {
+ pinctrl-single,bits = <
+ /* GP6[15] */
+ 0x34 0x00000008 0x0000000f
+ >;
+ };
};
&pinconf {
@@ -293,6 +326,18 @@
};
};
};
+
+ adc: adc@3 {
+ compatible = "ti,ads7957";
+ reg = <3>;
+ #io-channel-cells = <1>;
+ spi-max-frequency = <10000000>;
+ vref-supply = <&adc_ref>;
+ };
+};
+
+&ehrpwm0 {
+ status = "okay";
};
&gpio {
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 92d633d1da68..941d455000a7 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -153,6 +153,12 @@
0x10 0x00000010 0x000000f0
>;
};
+ spi0_cs3_pin: pinmux_spi0_cs3_pin {
+ pinctrl-single,bits = <
+ /* CS3 */
+ 0xc 0x01000000 0x0f000000
+ >;
+ };
spi1_pins: pinmux_spi1_pins {
pinctrl-single,bits = <
/* SIMO, SOMI, CLK */
@@ -216,8 +222,21 @@
0x3c 0x11111111 0xffffffff
/* VP_DIN[8..9] */
0x40 0x00000011 0x000000ff
- /* VP_CLKIN3, VP_CLKIN2 */
- 0x4c 0x00010100 0x000f0f00
+ >;
+ };
+ vpif_display_pins: vpif_display_pins {
+ pinctrl-single,bits = <
+ /* VP_DOUT[2..7] */
+ 0x40 0x11111100 0xffffff00
+ /* VP_DOUT[10..15,0..1] */
+ 0x44 0x11111111 0xffffffff
+ /* VP_DOUT[8..9] */
+ 0x48 0x00000011 0x000000ff
+ /*
+ * VP_CLKOUT3, VP_CLKIN3,
+ * VP_CLKOUT2, VP_CLKIN2
+ */
+ 0x4c 0x00111100 0x00ffff00
>;
};
};
@@ -345,7 +364,13 @@
status = "disabled";
/* VPIF capture port */
- port {
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ /* VPIF display port */
+ port@1 {
#address-cells = <1>;
#size-cells = <0>;
};
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index 0bf55fa72dea..1865976db5f9 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -25,6 +25,12 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ sata_refclk: fixedclock0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
};
&dm816x_pinmux {
@@ -173,3 +179,7 @@
pinctrl-0 = <&usb1_pins>;
mentor,multipoint = <0>;
};
+
+&sata {
+ clocks = <&sysclk5_ck>, <&sata_refclk>;
+};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 276211e1ee53..59cbf958fcc3 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -293,6 +293,13 @@
phy-handle = <&phy1>;
};
+ sata: sata@4a140000 {
+ compatible = "ti,dm816-ahci";
+ reg = <0x4a140000 0x10000>;
+ interrupts = <16>;
+ ti,hwmods = "sata";
+ };
+
mcspi1: spi@48030000 {
compatible = "ti,omap4-mcspi";
reg = <0x48030000 0x1000>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index bbfb9d5a70a9..57892f264cea 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -81,11 +81,7 @@
compatible = "arm,cortex-a15";
reg = <0>;
- operating-points = <
- /* kHz uV */
- 1000000 1060000
- 1176000 1160000
- >;
+ operating-points-v2 = <&cpu0_opp_table>;
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
@@ -99,6 +95,24 @@
};
};
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_wkup>;
+
+ opp_nom@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1060000 850000 1150000>;
+ opp-supported-hw = <0xFF 0x01>;
+ opp-suspend;
+ };
+
+ opp_od@1176000000 {
+ opp-hz = /bits/ 64 <1176000000>;
+ opp-microvolt = <1160000 885000 1160000>;
+ opp-supported-hw = <0xFF 0x02>;
+ };
+ };
+
/*
* The soc node represents the soc top level view. It is used for IPs
* that are not memory mapped in the MPU view or for the MPU itself.
@@ -1984,6 +1998,23 @@
&cpu_thermal {
polling-delay = <500>; /* milliseconds */
+ coefficients = <0 2000>;
+};
+
+&gpu_thermal {
+ coefficients = <0 2000>;
+};
+
+&core_thermal {
+ coefficients = <0 2000>;
+};
+
+&dspeve_thermal {
+ coefficients = <0 2000>;
+};
+
+&iva_thermal {
+ coefficients = <0 2000>;
};
/include/ "dra7xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 0a78347e6615..24e6746c5b26 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -17,6 +17,7 @@
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <1>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
};
@@ -79,6 +80,10 @@
};
};
+&cpu0_opp_table {
+ opp-shared;
+};
+
&dss {
reg = <0x58000000 0x80>,
<0x58004054 0x4>,
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 548413e23c47..c9f191ca7b9c 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -215,6 +215,8 @@
&dsi_0 {
vddcore-supply = <&ldo6_reg>;
vddio-supply = <&ldo6_reg>;
+ samsung,burst-clock-frequency = <250000000>;
+ samsung,esc-clock-frequency = <20000000>;
samsung,pll-clock-frequency = <24000000>;
status = "okay";
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 9c28ef4508e0..590ee442d0ae 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -745,23 +745,23 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
opp-microvolt = <800000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <800000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <800000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <825000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <875000>;
};
@@ -835,23 +835,23 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
opp-microvolt = <900000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
opp-microvolt = <900000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <1000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <1000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <1000000>;
};
@@ -861,19 +861,19 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
};
};
@@ -882,19 +882,19 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
};
};
@@ -903,13 +903,13 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@80000000 {
+ opp-80000000 {
opp-hz = /bits/ 64 <80000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 18def1c774d5..497a9470c888 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -283,15 +283,6 @@
};
};
- watchdog: watchdog@10060000 {
- compatible = "samsung,s3c2410-wdt";
- reg = <0x10060000 0x100>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clock CLK_WDT>;
- clock-names = "watchdog";
- status = "disabled";
- };
-
rtc: rtc@10070000 {
compatible = "samsung,s3c6410-rtc";
reg = <0x10070000 0x100>;
@@ -771,6 +762,7 @@
clocks = <&clock CLK_HDMI_CEC>;
clock-names = "hdmicec";
samsung,syscon-phandle = <&pmu_system_controller>;
+ hdmi-phandle = <&hdmi>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>;
status = "disabled";
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index a2c6a13fe67b..312650e2450f 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -328,7 +328,3 @@
&tmu {
status = "okay";
};
-
-&watchdog {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 0ca1b4d355f2..1743ca850070 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -197,6 +197,8 @@
&dsi_0 {
vddcore-supply = <&vusb_reg>;
vddio-supply = <&vmipi_reg>;
+ samsung,burst-clock-frequency = <500000000>;
+ samsung,esc-clock-frequency = <20000000>;
samsung,pll-clock-frequency = <24000000>;
status = "okay";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index f9408188f97f..768fb075b1fd 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -119,6 +119,14 @@
};
};
+ watchdog: watchdog@10060000 {
+ compatible = "samsung,s3c6410-wdt";
+ reg = <0x10060000 0x100>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_WDT>;
+ clock-names = "watchdog";
+ };
+
clock: clock-controller@10030000 {
compatible = "samsung,exynos4210-clock";
reg = <0x10030000 0x20000>;
@@ -335,15 +343,15 @@
compatible = "operating-points-v2";
opp-shared;
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <1025000>;
};
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
opp-microvolt = <1050000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1150000>;
};
@@ -353,13 +361,13 @@
compatible = "operating-points-v2";
opp-shared;
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
};
@@ -368,10 +376,10 @@
compatible = "operating-points-v2";
opp-shared;
- opp@5000000 {
+ opp-5000000 {
opp-hz = /bits/ 64 <5000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
@@ -380,10 +388,10 @@
compatible = "operating-points-v2";
opp-shared;
- opp@10000000 {
+ opp-10000000 {
opp-hz = /bits/ 64 <10000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
};
@@ -392,13 +400,13 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
};
@@ -407,13 +415,13 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
};
diff --git a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
index a36cd36a26b8..4cd62487bb16 100644
--- a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
+++ b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
@@ -495,7 +495,3 @@
vtmu-supply = <&ldo16_reg>;
status = "okay";
};
-
-&watchdog {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 78f118cb73d4..0f1ff792fe44 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -555,7 +555,3 @@
vtmu-supply = <&ldo10_reg>;
status = "okay";
};
-
-&watchdog {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index a1ab6f94bb64..7a83e2df18a6 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -541,7 +541,3 @@
&serial_3 {
status = "okay";
};
-
-&watchdog {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/exynos4412-prime.dtsi b/arch/arm/boot/dts/exynos4412-prime.dtsi
index e75bc170c89c..a67bd953d754 100644
--- a/arch/arm/boot/dts/exynos4412-prime.dtsi
+++ b/arch/arm/boot/dts/exynos4412-prime.dtsi
@@ -20,12 +20,12 @@
};
&cpu0_opp_table {
- opp@1600000000 {
+ opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
opp-microvolt = <1350000>;
clock-latency-ns = <200000>;
};
- opp@1704000000 {
+ opp-1704000000 {
opp-hz = /bits/ 64 <1704000000>;
opp-microvolt = <1350000>;
clock-latency-ns = <200000>;
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 41ecd6d465a7..82221a00444d 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -385,6 +385,8 @@
&dsi_0 {
vddcore-supply = <&ldo8_reg>;
vddio-supply = <&ldo10_reg>;
+ samsung,burst-clock-frequency = <500000000>;
+ samsung,esc-clock-frequency = <20000000>;
samsung,pll-clock-frequency = <24000000>;
status = "okay";
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 235bbb69ad7c..7ff03a7e8fb9 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -76,73 +76,73 @@
compatible = "operating-points-v2";
opp-shared;
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <900000>;
clock-latency-ns = <200000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <900000>;
clock-latency-ns = <200000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <925000>;
clock-latency-ns = <200000>;
};
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <950000>;
clock-latency-ns = <200000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <975000>;
clock-latency-ns = <200000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = <987500>;
clock-latency-ns = <200000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <200000>;
opp-suspend;
};
- opp@900000000 {
+ opp-900000000 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <1037500>;
clock-latency-ns = <200000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <1087500>;
clock-latency-ns = <200000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <1137500>;
clock-latency-ns = <200000>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1187500>;
clock-latency-ns = <200000>;
};
- opp@1300000000 {
+ opp-1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1250000>;
clock-latency-ns = <200000>;
};
- opp@1400000000 {
+ opp-1400000000 {
opp-hz = /bits/ 64 <1400000000>;
opp-microvolt = <1287500>;
clock-latency-ns = <200000>;
};
- cpu0_opp_1500: opp@1500000000 {
+ cpu0_opp_1500: opp-1500000000 {
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1350000>;
clock-latency-ns = <200000>;
@@ -215,6 +215,15 @@
};
};
+ watchdog: watchdog@10060000 {
+ compatible = "samsung,exynos5250-wdt";
+ reg = <0x10060000 0x100>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_WDT>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ };
+
adc: adc@126C0000 {
compatible = "samsung,exynos-adc-v1";
reg = <0x126C0000 0x100>;
@@ -433,23 +442,23 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <900000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <900000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <900000>;
};
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
opp-microvolt = <950000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1050000>;
};
@@ -459,16 +468,16 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
};
};
@@ -525,19 +534,19 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <900000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <925000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <950000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <1000000>;
};
@@ -547,10 +556,10 @@
compatible = "operating-points-v2";
opp-shared;
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
};
@@ -559,10 +568,10 @@
compatible = "operating-points-v2";
opp-shared;
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
};
@@ -571,10 +580,10 @@
compatible = "operating-points-v2";
opp-shared;
- opp@50000000 {
+ opp-50000000 {
opp-hz = /bits/ 64 <50000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..c8771c660550
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for Exynos5420 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <0>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 7dc9dc82afd8..0db0bcf8da36 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -49,62 +49,62 @@
cluster_a15_opp_table: opp_table0 {
compatible = "operating-points-v2";
opp-shared;
- opp@1800000000 {
+ opp-1800000000 {
opp-hz = /bits/ 64 <1800000000>;
opp-microvolt = <1250000>;
clock-latency-ns = <140000>;
};
- opp@1700000000 {
+ opp-1700000000 {
opp-hz = /bits/ 64 <1700000000>;
opp-microvolt = <1212500>;
clock-latency-ns = <140000>;
};
- opp@1600000000 {
+ opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
opp-microvolt = <1175000>;
clock-latency-ns = <140000>;
};
- opp@1500000000 {
+ opp-1500000000 {
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1137500>;
clock-latency-ns = <140000>;
};
- opp@1400000000 {
+ opp-1400000000 {
opp-hz = /bits/ 64 <1400000000>;
opp-microvolt = <1112500>;
clock-latency-ns = <140000>;
};
- opp@1300000000 {
+ opp-1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1062500>;
clock-latency-ns = <140000>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1037500>;
clock-latency-ns = <140000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <1012500>;
clock-latency-ns = <140000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = < 987500>;
clock-latency-ns = <140000>;
};
- opp@900000000 {
+ opp-900000000 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = < 962500>;
clock-latency-ns = <140000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = < 937500>;
clock-latency-ns = <140000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = < 912500>;
clock-latency-ns = <140000>;
@@ -114,42 +114,42 @@
cluster_a7_opp_table: opp_table1 {
compatible = "operating-points-v2";
opp-shared;
- opp@1300000000 {
+ opp-1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1275000>;
clock-latency-ns = <140000>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1212500>;
clock-latency-ns = <140000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <1162500>;
clock-latency-ns = <140000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <1112500>;
clock-latency-ns = <140000>;
};
- opp@900000000 {
+ opp-900000000 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <1062500>;
clock-latency-ns = <140000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1025000>;
clock-latency-ns = <140000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = <975000>;
clock-latency-ns = <140000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <937500>;
clock-latency-ns = <140000>;
@@ -699,7 +699,7 @@
interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu1: tmu@10064000 {
@@ -708,7 +708,7 @@
interrupts = <0 183 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu2: tmu@10068000 {
@@ -717,7 +717,7 @@
interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu3: tmu@1006c000 {
@@ -726,7 +726,7 @@
interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_gpu: tmu@100a0000 {
@@ -735,7 +735,7 @@
interrupts = <0 215 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
sysmmu_g2dr: sysmmu@0x10A60000 {
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 77d35bb92950..a4ea018464fc 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -189,7 +189,7 @@
};
watchdog@110000 {
- compatible = "samsung,s3c2410-wdt";
+ compatible = "samsung,s3c6410-wdt";
reg = <0x110000 0x1000>;
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_B_125>;
@@ -290,11 +290,22 @@
clock-names = "usbhost";
};
+ pcie_phy0: pcie-phy@270000 {
+ #phy-cells = <0>;
+ compatible = "samsung,exynos5440-pcie-phy";
+ reg = <0x270000 0x1000>, <0x271000 0x40>;
+ };
+
+ pcie_phy1: pcie-phy@272000 {
+ #phy-cells = <0>;
+ compatible = "samsung,exynos5440-pcie-phy";
+ reg = <0x272000 0x1000>, <0x271040 0x40>;
+ };
+
pcie_0: pcie@290000 {
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
- reg = <0x290000 0x1000
- 0x270000 0x1000
- 0x271000 0x40>;
+ reg = <0x290000 0x1000>, <0x40000000 0x1000>;
+ reg-names = "elbi", "config";
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
@@ -303,8 +314,8 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0x40000000 0x40000000 0 0x00001000 /* configuration space */
- 0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */
+ phys = <&pcie_phy0>;
+ ranges = <0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x40011000 0x40011000 0 0x1ffef000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
@@ -315,9 +326,8 @@
pcie_1: pcie@2a0000 {
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
- reg = <0x2a0000 0x1000
- 0x272000 0x1000
- 0x271040 0x40>;
+ reg = <0x2a0000 0x1000>, <0x60000000 0x1000>;
+ reg-names = "elbi", "config";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
@@ -326,8 +336,8 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x00000800 0 0x60000000 0x60000000 0 0x00001000 /* configuration space */
- 0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */
+ phys = <&pcie_phy1>;
+ ranges = <0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
diff --git a/arch/arm/boot/dts/exynos5800.dtsi b/arch/arm/boot/dts/exynos5800.dtsi
index 8213016803e5..9ddb6bacac5a 100644
--- a/arch/arm/boot/dts/exynos5800.dtsi
+++ b/arch/arm/boot/dts/exynos5800.dtsi
@@ -24,60 +24,60 @@
};
&cluster_a15_opp_table {
- opp@1700000000 {
+ opp-1700000000 {
opp-microvolt = <1250000>;
};
- opp@1600000000 {
+ opp-1600000000 {
opp-microvolt = <1250000>;
};
- opp@1500000000 {
+ opp-1500000000 {
opp-microvolt = <1100000>;
};
- opp@1400000000 {
+ opp-1400000000 {
opp-microvolt = <1100000>;
};
- opp@1300000000 {
+ opp-1300000000 {
opp-microvolt = <1100000>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-microvolt = <1000000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-microvolt = <1000000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-microvolt = <1000000>;
};
- opp@900000000 {
+ opp-900000000 {
opp-microvolt = <1000000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-microvolt = <900000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-microvolt = <900000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
};
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
@@ -85,46 +85,46 @@
};
&cluster_a7_opp_table {
- opp@1300000000 {
+ opp-1300000000 {
opp-microvolt = <1250000>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-microvolt = <1250000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-microvolt = <1250000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-microvolt = <1100000>;
};
- opp@900000000 {
+ opp-900000000 {
opp-microvolt = <1100000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-microvolt = <1100000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-microvolt = <1000000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-microvolt = <1000000>;
};
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <140000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <140000>;
};
- opp@300000000 {
+ opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <900000>;
clock-latency-ns = <140000>;
diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
new file mode 100644
index 000000000000..7668ba52158e
--- /dev/null
+++ b/arch/arm/boot/dts/gemini-nas4220b.dts
@@ -0,0 +1,102 @@
+/*
+ * Device Tree file for the Gemini-based Raidsonic NAS IB-4220-B
+ */
+
+/dts-v1/;
+
+#include "gemini.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Raidsonic NAS IB-4220-B";
+ compatible = "raidsonic,ib-4220-b", "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory { /* 128 MB */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,19200n8";
+ stdout-path = &uart0;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@29 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_SETUP>;
+ label = "Backup button";
+ gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
+ };
+ button@31 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_RESTART>;
+ label = "Softreset button";
+ gpios = <&gpio1 31 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led@28 {
+ label = "nas4220b:orange:hdd";
+ gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ led@30 {
+ label = "nas4220b:green:os";
+ gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ soc {
+ flash@30000000 {
+ status = "okay";
+ /* 16MB of flash */
+ reg = <0x30000000 0x01000000>;
+
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+ partition@20000 {
+ label = "Kernel";
+ reg = <0x00020000 0x00300000>;
+ };
+ partition@320000 {
+ label = "Ramdisk";
+ reg = <0x00320000 0x00600000>;
+ };
+ partition@920000 {
+ label = "Application";
+ reg = <0x00920000 0x00600000>;
+ };
+ partition@f20000 {
+ label = "VCTL";
+ reg = <0x00f20000 0x00020000>;
+ read-only;
+ };
+ partition@f40000 {
+ label = "CurConf";
+ reg = <0x00f40000 0x000a0000>;
+ read-only;
+ };
+ partition@fe0000 {
+ label = "FIS directory";
+ reg = <0x00fe0000 0x00020000>;
+ read-only;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/gemini-rut1xx.dts b/arch/arm/boot/dts/gemini-rut1xx.dts
new file mode 100644
index 000000000000..7b920bfbda32
--- /dev/null
+++ b/arch/arm/boot/dts/gemini-rut1xx.dts
@@ -0,0 +1,65 @@
+/*
+ * Device Tree file for Teltonika RUT1xx
+ */
+
+/dts-v1/;
+
+#include "gemini.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Teltonika RUT1xx";
+ compatible = "teltonika,rut1xx", "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory { /* 128 MB */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@28 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_SETUP>;
+ label = "Reset to defaults";
+ gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led@7 {
+ /* FIXME: add the LED color */
+ label = "rut1xx::gsm";
+ gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ led@31 {
+ /* FIXME: add the LED color */
+ label = "rut1xx::power";
+ gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ soc {
+ flash@30000000 {
+ status = "okay";
+ /* 8MB of flash */
+ reg = <0x30000000 0x00800000>;
+ /* TODO: add flash partitions here */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/gemini-sq201.dts b/arch/arm/boot/dts/gemini-sq201.dts
new file mode 100644
index 000000000000..46309e79cc7b
--- /dev/null
+++ b/arch/arm/boot/dts/gemini-sq201.dts
@@ -0,0 +1,118 @@
+/*
+ * Device Tree file for ITian Square One SQ201 NAS
+ */
+
+/dts-v1/;
+
+#include "gemini.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "ITian Square One SQ201";
+ compatible = "itian,sq201", "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory { /* 128 MB */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@18 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_SETUP>;
+ label = "factory reset";
+ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led@20 {
+ label = "sq201:green:info";
+ gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ led@31 {
+ label = "sq201:green:usb";
+ gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "usb-host";
+ };
+ };
+
+ soc {
+ flash@30000000 {
+ status = "okay";
+ /* 16MB of flash */
+ reg = <0x30000000 0x01000000>;
+
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00120000>;
+ read-only;
+ };
+ partition@120000 {
+ label = "Kernel";
+ reg = <0x00120000 0x00200000>;
+ };
+ partition@320000 {
+ label = "Ramdisk";
+ reg = <0x00320000 0x00600000>;
+ };
+ partition@920000 {
+ label = "Application";
+ reg = <0x00920000 0x00600000>;
+ };
+ partition@f20000 {
+ label = "VCTL";
+ reg = <0x00f20000 0x00020000>;
+ read-only;
+ };
+ partition@f40000 {
+ label = "CurConf";
+ reg = <0x00f40000 0x000a0000>;
+ read-only;
+ };
+ partition@fe0000 {
+ label = "FIS directory";
+ reg = <0x00fe0000 0x00020000>;
+ read-only;
+ };
+ };
+
+ pci@50000000 {
+ status = "okay";
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map =
+ <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */
+ <0x4800 0 0 2 &pci_intc 1>,
+ <0x4800 0 0 3 &pci_intc 2>,
+ <0x4800 0 0 4 &pci_intc 3>,
+ <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */
+ <0x5000 0 0 2 &pci_intc 2>,
+ <0x5000 0 0 3 &pci_intc 3>,
+ <0x5000 0 0 4 &pci_intc 0>,
+ <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */
+ <0x5800 0 0 2 &pci_intc 3>,
+ <0x5800 0 0 3 &pci_intc 0>,
+ <0x5800 0 0 4 &pci_intc 1>,
+ <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */
+ <0x6000 0 0 2 &pci_intc 0>,
+ <0x6000 0 0 3 &pci_intc 1>,
+ <0x6000 0 0 4 &pci_intc 2>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/gemini-wbd111.dts b/arch/arm/boot/dts/gemini-wbd111.dts
new file mode 100644
index 000000000000..63b756e3bf5a
--- /dev/null
+++ b/arch/arm/boot/dts/gemini-wbd111.dts
@@ -0,0 +1,102 @@
+/*
+ * Device Tree file for Wiliboard WBD-111
+ */
+
+/dts-v1/;
+
+#include "gemini.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Wiliboard WBD-111";
+ compatible = "wiliboard,wbd111", "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory { /* 128 MB */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@5 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_SETUP>;
+ label = "reset";
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led@1 {
+ label = "wbd111:red:L3";
+ gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@2 {
+ label = "wbd111:green:L4";
+ gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@3 {
+ label = "wbd111:red:L4";
+ gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@5 {
+ label = "wbd111:green:L3";
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ soc {
+ flash@30000000 {
+ status = "okay";
+ /* 8MB of flash */
+ reg = <0x30000000 0x00800000>;
+
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+ partition@20000 {
+ label = "kernel";
+ reg = <0x00020000 0x00100000>;
+ };
+ partition@120000 {
+ label = "rootfs";
+ reg = <0x00120000 0x006a0000>;
+ };
+ partition@7c0000 {
+ label = "VCTL";
+ reg = <0x007c0000 0x00010000>;
+ read-only;
+ };
+ partition@7d0000 {
+ label = "cfg";
+ reg = <0x007d0000 0x00010000>;
+ read-only;
+ };
+ partition@7e0000 {
+ label = "FIS";
+ reg = <0x007e0000 0x00010000>;
+ read-only;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/gemini-wbd222.dts b/arch/arm/boot/dts/gemini-wbd222.dts
new file mode 100644
index 000000000000..9747f5a47807
--- /dev/null
+++ b/arch/arm/boot/dts/gemini-wbd222.dts
@@ -0,0 +1,102 @@
+/*
+ * Device Tree file for Wiliboard WBD-222
+ */
+
+/dts-v1/;
+
+#include "gemini.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Wiliboard WBD-222";
+ compatible = "wiliboard,wbd222", "cortina,gemini";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory { /* 128 MB */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@5 {
+ debounce_interval = <50>;
+ wakeup-source;
+ linux,code = <KEY_SETUP>;
+ label = "reset";
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led@1 {
+ label = "wbd111:red:L3";
+ gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@2 {
+ label = "wbd111:green:L4";
+ gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@3 {
+ label = "wbd111:red:L4";
+ gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ led@5 {
+ label = "wbd111:green:L3";
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ soc {
+ flash@30000000 {
+ status = "okay";
+ /* 8MB of flash */
+ reg = <0x30000000 0x00800000>;
+
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00020000>;
+ read-only;
+ };
+ partition@20000 {
+ label = "kernel";
+ reg = <0x00020000 0x00100000>;
+ };
+ partition@120000 {
+ label = "rootfs";
+ reg = <0x00120000 0x006a0000>;
+ };
+ partition@7c0000 {
+ label = "VCTL";
+ reg = <0x007c0000 0x00010000>;
+ read-only;
+ };
+ partition@7d0000 {
+ label = "cfg";
+ reg = <0x007d0000 0x00010000>;
+ read-only;
+ };
+ partition@7e0000 {
+ label = "FIS";
+ reg = <0x007e0000 0x00010000>;
+ read-only;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
new file mode 100644
index 000000000000..b8d011bdcc76
--- /dev/null
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -0,0 +1,156 @@
+/*
+ * Device Tree file for Cortina systems Gemini SoC
+ */
+
+/include/ "skeleton.dtsi"
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+ interrupt-parent = <&intcon>;
+
+ flash@30000000 {
+ compatible = "cortina,gemini-flash", "cfi-flash";
+ syscon = <&syscon>;
+ bank-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ };
+
+ syscon: syscon@40000000 {
+ compatible = "cortina,gemini-syscon", "syscon", "simple-mfd";
+ reg = <0x40000000 0x1000>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ regmap = <&syscon>;
+ /* GLOBAL_RESET register */
+ offset = <0x0c>;
+ /* RESET_GLOBAL | RESET_CPU1 */
+ mask = <0xC0000000>;
+ };
+ };
+
+ watchdog@41000000 {
+ compatible = "cortina,gemini-watchdog";
+ reg = <0x41000000 0x1000>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: serial@42000000 {
+ compatible = "ns16550a";
+ reg = <0x42000000 0x100>;
+ clock-frequency = <48000000>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ };
+
+ timer@43000000 {
+ compatible = "cortina,gemini-timer";
+ reg = <0x43000000 0x1000>;
+ interrupt-parent = <&intcon>;
+ interrupts = <14 IRQ_TYPE_EDGE_FALLING>, /* Timer 1 */
+ <15 IRQ_TYPE_EDGE_FALLING>, /* Timer 2 */
+ <16 IRQ_TYPE_EDGE_FALLING>; /* Timer 3 */
+ syscon = <&syscon>;
+ };
+
+ rtc@45000000 {
+ compatible = "cortina,gemini-rtc";
+ reg = <0x45000000 0x100>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ intcon: interrupt-controller@48000000 {
+ compatible = "faraday,ftintc010";
+ reg = <0x48000000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ power-controller@4b000000 {
+ compatible = "cortina,gemini-power-controller";
+ reg = <0x4b000000 0x100>;
+ interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+ };
+
+ gpio0: gpio@4d000000 {
+ compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
+ reg = <0x4d000000 0x100>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@4e000000 {
+ compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
+ reg = <0x4e000000 0x100>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@4f000000 {
+ compatible = "cortina,gemini-gpio", "faraday,ftgpio010";
+ reg = <0x4f000000 0x100>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pci@50000000 {
+ compatible = "cortina,gemini-pci", "faraday,ftpci100";
+ /*
+ * The first 256 bytes in the IO range is actually used
+ * to configure the host bridge.
+ */
+ reg = <0x50000000 0x100>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ status = "disabled";
+
+ bus-range = <0x00 0xff>;
+ /* PCI ranges mappings */
+ ranges =
+ /* 1MiB I/O space 0x50000000-0x500fffff */
+ <0x01000000 0 0 0x50000000 0 0x00100000>,
+ /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */
+ <0x02000000 0 0x58000000 0x58000000 0 0x08000000>;
+
+ /* DMA ranges */
+ dma-ranges =
+ /* 128MiB at 0x00000000-0x07ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x08000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>,
+ /* 64MiB at 0x00000000-0x03ffffff */
+ <0x02000000 0 0x00000000 0x00000000 0 0x04000000>;
+
+ /*
+ * This PCI host bridge variant has a cascaded interrupt
+ * controller embedded in the host bridge.
+ */
+ pci_intc: interrupt-controller {
+ interrupt-parent = <&intcon>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
index 9300711f1ea3..db39bd6b8e00 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
@@ -179,8 +179,6 @@
};
&usbotg {
- phy_type = "utmi";
- dr_mode = "otg";
external-vbus-divider;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index 70292101ba03..d921dd2ed676 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -309,8 +309,6 @@
};
&usbotg {
- phy_type = "utmi";
- dr_mode = "otg";
external-vbus-divider;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index f840f03ad171..6c63dca1b9b8 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -17,8 +17,6 @@
* <mux_reg conf_reg input_reg mux_mode input_val>
*/
-#define MX25_PAD_TDO__TDO 0x000 0x3e8 0x000 0x00 0x000
-
#define MX25_PAD_A10__A10 0x008 0x000 0x000 0x00 0x000
#define MX25_PAD_A10__GPIO_4_0 0x008 0x000 0x000 0x05 0x000
@@ -68,7 +66,6 @@
#define MX25_PAD_A22__A22 0x030 0x000 0x000 0x00 0x000
#define MX25_PAD_A22__GPIO_2_8 0x030 0x000 0x000 0x05 0x000
-#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x07 0x000
#define MX25_PAD_A22__SIM2_VEN1 0x030 0x000 0x000 0x06 0x000
#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x07 0x000
@@ -542,6 +539,8 @@
#define MX25_PAD_RTCK__OWIRE 0x1ec 0x3e4 0x000 0x01 0x000
#define MX25_PAD_RTCK__GPIO_3_14 0x1ec 0x3e4 0x000 0x05 0x000
+#define MX25_PAD_TDO__TDO 0x000 0x3e8 0x000 0x00 0x000
+
#define MX25_PAD_DE_B__DE_B 0x1f0 0x3ec 0x000 0x00 0x000
#define MX25_PAD_DE_B__GPIO_2_20 0x1f0 0x3ec 0x000 0x05 0x000
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index e0ba55016a04..0cdf333336cd 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -93,6 +93,11 @@
reg = <0x43f00000 0x100000>;
ranges;
+ aips1: bridge@43f00000 {
+ compatible = "fsl,imx25-aips";
+ reg = <0x43f00000 0x4000>;
+ };
+
i2c1: i2c@43f80000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -342,6 +347,11 @@
reg = <0x53f00000 0x100000>;
ranges;
+ aips2: bridge@53f00000 {
+ compatible = "fsl,imx25-aips";
+ reg = <0x53f00000 0x4000>;
+ };
+
clks: ccm@53f80000 {
compatible = "fsl,imx25-ccm";
reg = <0x53f80000 0x4000>;
@@ -544,6 +554,8 @@
clock-names = "ipg", "ahb", "per";
fsl,usbmisc = <&usbmisc 0>;
fsl,usbphy = <&usbphy0>;
+ phy_type = "utmi";
+ dr_mode = "otg";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx28-duckbill-2-485.dts b/arch/arm/boot/dts/imx28-duckbill-2-485.dts
new file mode 100644
index 000000000000..bd3fd470f9c3
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-duckbill-2-485.dts
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2015-2017 I2SE GmbH <info@i2se.com>
+ * Copyright (C) 2016 Michael Heimpold <mhei@heimpold.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx28.dtsi"
+
+/ {
+ model = "I2SE Duckbill 2 485";
+ compatible = "i2se,duckbill-2-485", "i2se,duckbill-2", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ non-removable;
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pins_b
+ &mmc2_cd_cfg &mmc2_sck_cfg_b>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_D17__GPIO_1_17 /* Revision detection */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_reset_pin: mac0-phy-reset@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_ALE__GPIO_0_26 /* PHY Reset */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_int_pin: mac0-phy-int@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D07__GPIO_0_7 /* PHY Interrupt */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ led_pins: leds@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20
+ MX28_PAD_SAIF0_LRCLK__GPIO_3_21
+ MX28_PAD_I2C0_SCL__GPIO_3_24
+ MX28_PAD_I2C0_SDA__GPIO_3_25
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ status = "okay";
+ dr_mode = "peripheral";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>, <&mac0_phy_reset_pin>;
+ phy-supply = <&reg_3p3v>;
+ phy-reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <25>;
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_phy_int_pin>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ max-speed = <100>;
+ };
+ };
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ status-red {
+ label = "duckbill:red:status";
+ gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ status-green {
+ label = "duckbill:green:status";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ rs485-red {
+ label = "duckbill:red:rs485";
+ gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ };
+
+ rs485-green {
+ label = "duckbill:green:rs485";
+ gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-duckbill-2-enocean.dts b/arch/arm/boot/dts/imx28-duckbill-2-enocean.dts
new file mode 100644
index 000000000000..4450047885eb
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-duckbill-2-enocean.dts
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015-2017 I2SE GmbH <info@i2se.com>
+ * Copyright (C) 2016 Michael Heimpold <mhei@heimpold.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx28.dtsi"
+
+/ {
+ model = "I2SE Duckbill 2 EnOcean";
+ compatible = "i2se,duckbill-2-enocean", "i2se,duckbill-2", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ non-removable;
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pins_b
+ &mmc2_cd_cfg &mmc2_sck_cfg_b>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_D17__GPIO_1_17 /* Revision detection */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_reset_pin: mac0-phy-reset@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_ALE__GPIO_0_26 /* PHY Reset */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_int_pin: mac0-phy-int@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D07__GPIO_0_7 /* PHY Interrupt */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ led_pins: leds@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20
+ MX28_PAD_SAIF0_LRCLK__GPIO_3_21
+ MX28_PAD_AUART0_CTS__GPIO_3_2
+ MX28_PAD_I2C0_SCL__GPIO_3_24
+ MX28_PAD_I2C0_SDA__GPIO_3_25
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ enocean_button: enocean-button@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART0_RTS__GPIO_3_3
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ status = "okay";
+ dr_mode = "peripheral";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>, <&mac0_phy_reset_pin>;
+ phy-supply = <&reg_3p3v>;
+ phy-reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <25>;
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_phy_int_pin>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ max-speed = <100>;
+ };
+ };
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ status-red {
+ label = "duckbill:red:status";
+ gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ status-green {
+ label = "duckbill:green:status";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ enocean-blue {
+ label = "duckbill:blue:enocean";
+ gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ };
+
+ enocean-red {
+ label = "duckbill:red:enocean";
+ gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ };
+
+ enocean-green {
+ label = "duckbill:green:enocean";
+ gpios = <&gpio3 2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&enocean_button>;
+
+ enocean {
+ label = "EnOcean";
+ linux,code = <KEY_NEW>;
+ gpios = <&gpio3 3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-duckbill-2-spi.dts b/arch/arm/boot/dts/imx28-duckbill-2-spi.dts
new file mode 100644
index 000000000000..927732efca98
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-duckbill-2-spi.dts
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2015-2017 I2SE GmbH <info@i2se.com>
+ * Copyright (C) 2016 Michael Heimpold <mhei@heimpold.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx28.dtsi"
+
+/ {
+ model = "I2SE Duckbill 2 SPI";
+ compatible = "i2se,duckbill-2-spi", "i2se,duckbill-2", "fsl,imx28";
+
+ aliases {
+ ethernet1 = &qca7000;
+ };
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ non-removable;
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+
+ qca7000: ethernet@0 {
+ reg = <0>;
+ compatible = "qca,qca7000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&qca7000_pins>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <8000000>;
+ };
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_D17__GPIO_1_17 /* Revision detection */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_reset_pin: mac0-phy-reset@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_ALE__GPIO_0_26 /* PHY Reset */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_int_pin: mac0-phy-int@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D07__GPIO_0_7 /* PHY Interrupt */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ led_pins: led@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20
+ MX28_PAD_SAIF0_LRCLK__GPIO_3_21
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ qca7000_pins: qca7000@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_AUART0_RTS__GPIO_3_3 /* Interrupt */
+ MX28_PAD_LCD_D13__GPIO_1_13 /* QCA7K reset */
+ MX28_PAD_LCD_D14__GPIO_1_14 /* GPIO 0 */
+ MX28_PAD_LCD_D15__GPIO_1_15 /* GPIO 1 */
+ MX28_PAD_LCD_D18__GPIO_1_18 /* GPIO 2 */
+ MX28_PAD_LCD_D21__GPIO_1_21 /* GPIO 3 */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ status = "okay";
+ dr_mode = "peripheral";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>, <&mac0_phy_reset_pin>;
+ phy-supply = <&reg_3p3v>;
+ phy-reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <25>;
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_phy_int_pin>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ max-speed = <100>;
+ };
+ };
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ status-red {
+ label = "duckbill:red:status";
+ gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ status-green {
+ label = "duckbill:green:status";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-duckbill-2.dts b/arch/arm/boot/dts/imx28-duckbill-2.dts
new file mode 100644
index 000000000000..7fa3d759505c
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-duckbill-2.dts
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015-2017 I2SE GmbH <info@i2se.com>
+ * Copyright (C) 2016 Michael Heimpold <mhei@heimpold.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx28.dtsi"
+
+/ {
+ model = "I2SE Duckbill 2";
+ compatible = "i2se,duckbill-2", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ non-removable;
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pins_b
+ &mmc2_cd_cfg &mmc2_sck_cfg_b>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3p3v>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_D17__GPIO_1_17 /* Revision detection */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_reset_pin: mac0-phy-reset@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_ALE__GPIO_0_26 /* PHY Reset */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_int_pin: mac0-phy-int@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D07__GPIO_0_7 /* PHY Interrupt */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ led_pins: leds@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SAIF0_MCLK__GPIO_3_20
+ MX28_PAD_SAIF0_LRCLK__GPIO_3_21
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ status = "okay";
+ dr_mode = "peripheral";
+ };
+
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>, <&mac0_phy_reset_pin>;
+ phy-supply = <&reg_3p3v>;
+ phy-reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <25>;
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_phy_int_pin>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ max-speed = <100>;
+ };
+ };
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins>;
+
+ status-red {
+ label = "duckbill:red:status";
+ gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ status-green {
+ label = "duckbill:green:status";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-duckbill.dts b/arch/arm/boot/dts/imx28-duckbill.dts
index ce1a7effba37..3e4385d4ed78 100644
--- a/arch/arm/boot/dts/imx28-duckbill.dts
+++ b/arch/arm/boot/dts/imx28-duckbill.dts
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2013 Michael Heimpold <mhei@heimpold.de>
+ * Copyright (C) 2013-2014,2016 Michael Heimpold <mhei@heimpold.de>
+ * Copyright (C) 2015-2017 I2SE GmbH <info@i2se.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
@@ -10,6 +11,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx28.dtsi"
/ {
@@ -32,6 +34,13 @@
status = "okay";
};
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+ };
+
pinctrl@80018000 {
pinctrl-names = "default";
pinctrl-0 = <&hog_pins_a>;
@@ -39,14 +48,24 @@
hog_pins_a: hog@0 {
reg = <0>;
fsl,pinmux-ids = <
- MX28_PAD_SSP0_DATA7__GPIO_2_7 /* PHY Reset */
+ MX28_PAD_LCD_D17__GPIO_1_17 /* Revision detection */
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ mac0_phy_reset_pin: mac0-phy-reset@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP0_DATA7__GPIO_2_7 /* PHY Reset */
>;
fsl,drive-strength = <MXS_DRIVE_4mA>;
fsl,voltage = <MXS_VOLTAGE_HIGH>;
fsl,pull-up = <MXS_PULL_DISABLE>;
};
- led_pins_a: led_gpio@0 {
+ led_pins: leds@0 {
reg = <0>;
fsl,pinmux-ids = <
MX28_PAD_AUART1_RX__GPIO_3_4
@@ -60,6 +79,22 @@
};
apbx@80040000 {
+ lradc@80050000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
duart: serial@80074000 {
pinctrl-names = "default";
pinctrl-0 = <&duart_pins_a>;
@@ -75,47 +110,43 @@
ahb@80080000 {
usb0: usb@80080000 {
status = "okay";
+ dr_mode = "peripheral";
};
mac0: ethernet@800f0000 {
phy-mode = "rmii";
pinctrl-names = "default";
- pinctrl-0 = <&mac0_pins_a>;
+ pinctrl-0 = <&mac0_pins_a>, <&mac0_phy_reset_pin>;
phy-supply = <&reg_3p3v>;
phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
- phy-reset-duration = <100>;
+ phy-reset-duration = <25>;
status = "okay";
};
};
- regulators {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <0>;
-
- reg_3p3v: regulator@0 {
- compatible = "regulator-fixed";
- reg = <0>;
- regulator-name = "3P3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&led_pins_a>;
+ pinctrl-0 = <&led_pins>;
- status {
- label = "duckbill:green:status";
- gpios = <&gpio3 5 GPIO_ACTIVE_HIGH>;
- };
-
- failure {
+ status-red {
label = "duckbill:red:status";
gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ status-green {
+ label = "duckbill:green:status";
+ gpios = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
};
};
};
diff --git a/arch/arm/boot/dts/imx28-m28cu3.dts b/arch/arm/boot/dts/imx28-m28cu3.dts
index 2df63bee6f4e..bb5329479c62 100644
--- a/arch/arm/boot/dts/imx28-m28cu3.dts
+++ b/arch/arm/boot/dts/imx28-m28cu3.dts
@@ -57,7 +57,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc2_4bit_pins_a
&mmc2_cd_cfg
- &mmc2_sck_cfg>;
+ &mmc2_sck_cfg_a>;
bus-width = <4>;
vmmc-supply = <&reg_vddio_sd1>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 148fcf4d3b98..2f4ebe0318d3 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -590,6 +590,22 @@
fsl,pull-up = <MXS_PULL_ENABLE>;
};
+ mmc2_4bit_pins_b: mmc2-4bit@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP2_SCK__SSP2_SCK
+ MX28_PAD_SSP2_MOSI__SSP2_CMD
+ MX28_PAD_SSP2_MISO__SSP2_D0
+ MX28_PAD_SSP2_SS0__SSP2_D3
+ MX28_PAD_SSP2_SS1__SSP2_D1
+ MX28_PAD_SSP2_SS2__SSP2_D2
+ MX28_PAD_AUART1_RX__SSP2_CARD_DETECT
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
mmc2_cd_cfg: mmc2-cd-cfg {
fsl,pinmux-ids = <
MX28_PAD_AUART1_RX__SSP2_CARD_DETECT
@@ -597,7 +613,8 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
- mmc2_sck_cfg: mmc2-sck-cfg {
+ mmc2_sck_cfg_a: mmc2-sck-cfg@0 {
+ reg = <0>;
fsl,pinmux-ids = <
MX28_PAD_SSP0_DATA7__SSP2_SCK
>;
@@ -605,6 +622,15 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ mmc2_sck_cfg_b: mmc2-sck-cfg@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP2_SCK__SSP2_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_12mA>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
i2c0_pins_a: i2c0@0 {
reg = <0>;
fsl,pinmux-ids = <
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index ceae909e2201..2a98afcd8a4e 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -109,7 +109,7 @@
ranges;
esdhc1: esdhc@50004000 {
- compatible = "fsl,imx50-esdhc";
+ compatible = "fsl,imx50-esdhc", "fsl,imx53-esdhc";
reg = <0x50004000 0x4000>;
interrupts = <1>;
clocks = <&clks IMX5_CLK_ESDHC1_IPG_GATE>,
@@ -121,7 +121,7 @@
};
esdhc2: esdhc@50008000 {
- compatible = "fsl,imx50-esdhc";
+ compatible = "fsl,imx50-esdhc", "fsl,imx53-esdhc";
reg = <0x50008000 0x4000>;
interrupts = <2>;
clocks = <&clks IMX5_CLK_ESDHC2_IPG_GATE>,
@@ -170,7 +170,7 @@
};
esdhc3: esdhc@50020000 {
- compatible = "fsl,imx50-esdhc";
+ compatible = "fsl,imx50-esdhc", "fsl,imx53-esdhc";
reg = <0x50020000 0x4000>;
interrupts = <3>;
clocks = <&clks IMX5_CLK_ESDHC3_IPG_GATE>,
@@ -182,7 +182,7 @@
};
esdhc4: esdhc@50024000 {
- compatible = "fsl,imx50-esdhc";
+ compatible = "fsl,imx50-esdhc", "fsl,imx53-esdhc";
reg = <0x50024000 0x4000>;
interrupts = <4>;
clocks = <&clks IMX5_CLK_ESDHC4_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index f4c158cce908..d3d662e37677 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -88,8 +88,8 @@
};
ldo7_reg: ldo7 {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3600000>;
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
};
ldo8_reg: ldo8 {
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 479ca4c9e384..de2215832372 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -128,8 +128,8 @@
vdac_reg: vdac {
regulator-name = "VDAC";
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <2775000>;
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <2750000>;
};
vgen1_reg: vgen1 {
diff --git a/arch/arm/boot/dts/imx6dl-gw5903.dts b/arch/arm/boot/dts/imx6dl-gw5903.dts
new file mode 100644
index 000000000000..103261ea9334
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5903.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5903.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Duallite/Solo GW5903";
+ compatible = "gw,imx6dl-gw5903", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6dl-gw5904.dts b/arch/arm/boot/dts/imx6dl-gw5904.dts
new file mode 100644
index 000000000000..9c6d3cd3d6a7
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-gw5904.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6dl.dtsi"
+#include "imx6qdl-gw5904.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 DualLite/Solo GW5904";
+ compatible = "gw,imx6dl-gw5904", "gw,ventana", "fsl,imx6dl";
+};
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 116bebb5e435..404a93d9596b 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -104,4 +104,11 @@
output-low;
line-name = "PCA9539-P05";
};
+
+ P07 {
+ gpio-hog;
+ gpios = <7 0>;
+ output-low;
+ line-name = "PCA9539-P07";
+ };
};
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 33f5c436c09f..7f9f176901d4 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -97,6 +97,13 @@
output-low;
line-name = "PCA9539-P05";
};
+
+ P07 {
+ gpio-hog;
+ gpios = <7 0>;
+ output-low;
+ line-name = "PCA9539-P07";
+ };
};
&usbphy1 {
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index d78514c92349..2c1e98e0cf7b 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -72,6 +72,14 @@
fsl,data-mapping = "spwg";
fsl,data-width = <24>;
status = "okay";
+
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&stdp4028_in>;
+ };
+ };
};
};
@@ -142,3 +150,65 @@
reg = <0x4a>;
};
};
+
+&mux2_i2c2 {
+ clock-frequency = <100000>;
+
+ stdp2690@72 {
+ compatible = "megachips,stdp2690-ge-b850v3-fw";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ stdp2690_in: endpoint {
+ remote-endpoint = <&stdp4028_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ stdp2690_out: endpoint {
+ /* Connector for external display */
+ };
+ };
+ };
+ };
+
+ stdp4028@73 {
+ compatible = "megachips,stdp4028-ge-b850v3-fw";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x73>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ stdp4028_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ stdp4028_out: endpoint {
+ remote-endpoint = <&stdp2690_in>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index 36d6bb39593a..c90b26f00e24 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -102,7 +102,7 @@
m25_eeprom: m25p80@0 {
compatible = "atmel,at25";
- spi-max-frequency = <20000000>;
+ spi-max-frequency = <10000000>;
size = <0x8000>;
pagesize = <64>;
reg = <0>;
@@ -183,20 +183,6 @@
interrupt-parent = <&gpio2>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
- P06 {
- gpio-hog;
- gpios = <6 0>;
- output-low;
- line-name = "PCA9539-P06";
- };
-
- P07 {
- gpio-hog;
- gpios = <7 0>;
- output-low;
- line-name = "PCA9539-P07";
- };
-
P10 {
gpio-hog;
gpios = <8 0>;
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
index d8a5789a4bc8..66cac5328b86 100644
--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -43,6 +43,7 @@
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
#include "imx6q.dtsi"
/ {
@@ -90,6 +91,34 @@
enable-active-high;
};
+ sound-analog {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "On-board analog audio";
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack",
+ "Line", "Line Out",
+ "Microphone", "Mic Jack",
+ "Line", "Line In";
+ simple-audio-card,routing =
+ "Headphone Jack", "RHPOUT",
+ "Headphone Jack", "LHPOUT",
+ "MICIN", "Mic Bias",
+ "Mic Bias", "Mic Jack";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound_master>;
+ simple-audio-card,frame-master = <&sound_master>;
+ simple-audio-card,bitclock-inversion;
+
+ sound_master: simple-audio-card,cpu {
+ sound-dai = <&ssi2>;
+ system-clock-frequency = <2822400>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&wm8731>;
+ };
+ };
+
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@@ -99,6 +128,36 @@
};
};
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+
+ ssi2 {
+ fsl,audmux-port = <1>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_RCSEL(3 | 0x8) |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(3))
+ IMX_AUDMUX_V2_PDCR_RXDSEL(3)
+ >;
+ };
+
+ audmux4 {
+ fsl,audmux-port = <3>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSEL(1) |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_RCSEL(1 | 0x8) |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(1))
+ IMX_AUDMUX_V2_PDCR_RXDSEL(1)
+ >;
+ };
+};
+
&cpu0 {
/*
* Although the imx6q fuse indicates that 1.2GHz operation is possible,
@@ -160,9 +219,25 @@
reg = <0x50>;
pagesize = <16>;
};
+
+ wm8731: codec@1a {
+ #sound-dai-cells = <0>;
+ compatible = "wlf,wm8731";
+ reg = <0x1a>;
+ };
};
&iomuxc {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__AUD4_RXC 0x17059
+ MX6QDL_PAD_SD2_DAT0__AUD4_RXD 0x17059
+ MX6QDL_PAD_SD2_DAT3__AUD4_TXC 0x17059
+ MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x17059
+ MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x17059
+ >;
+ };
+
pinctrl_ecspi1: ecspi1grp {
fsl,pins = <
MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
@@ -279,6 +354,14 @@
status = "okay";
};
+&ssi2 {
+ assigned-clocks = <&clks IMX6QDL_CLK_SSI2_SEL>,
+ <&clks IMX6QDL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-rates = <0>, <786432000>;
+ status = "okay";
+};
+
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
diff --git a/arch/arm/boot/dts/imx6q-gw5903.dts b/arch/arm/boot/dts/imx6q-gw5903.dts
new file mode 100644
index 000000000000..a182e4cb0e6e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5903.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5903.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Dual/Quad GW5903";
+ compatible = "gw,imx6q-gw5903", "gw,ventana", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6q-gw5904.dts b/arch/arm/boot/dts/imx6q-gw5904.dts
new file mode 100644
index 000000000000..ca1e2ae3341e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-gw5904.dts
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx6q.dtsi"
+#include "imx6qdl-gw5904.dtsi"
+
+/ {
+ model = "Gateworks Ventana i.MX6 Dual/Quad GW5904";
+ compatible = "gw,imx6q-gw5904", "gw,ventana", "fsl,imx6q";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-icore-ofcap10.dts b/arch/arm/boot/dts/imx6q-icore-ofcap10.dts
new file mode 100644
index 000000000000..49b60ca20e6d
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-icore-ofcap10.dts
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-icore.dtsi"
+
+/ {
+ model = "Engicam i.CoreM6 Quad/Dual OpenFrame Capacitive touch 10.1 Kit";
+ compatible = "engicam,imx6-icore", "fsl,imx6q";
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <24>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <60000000>;
+ hactive = <1280>;
+ vactive = <800>;
+ hback-porch = <40>;
+ hfront-porch = <40>;
+ vback-porch = <10>;
+ vfront-porch = <3>;
+ hsync-len = <80>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-icore-ofcap12.dts b/arch/arm/boot/dts/imx6q-icore-ofcap12.dts
new file mode 100644
index 000000000000..9e230f56c5fb
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-icore-ofcap12.dts
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-icore.dtsi"
+
+/ {
+ model = "Engicam i.CoreM6 Quad/Dual OpenFrame Capacitive touch 12 Kit";
+ compatible = "engicam,imx6-icore", "fsl,imx6q";
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <46800000>;
+ hactive = <1280>;
+ vactive = <480>;
+ hback-porch = <353>;
+ hfront-porch = <47>;
+ vback-porch = <39>;
+ vfront-porch = <4>;
+ hsync-len = <8>;
+ vsync-len = <2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-icore.dts b/arch/arm/boot/dts/imx6q-icore.dts
index 59eb7adc2472..5613dd9dc469 100644
--- a/arch/arm/boot/dts/imx6q-icore.dts
+++ b/arch/arm/boot/dts/imx6q-icore.dts
@@ -57,3 +57,37 @@
&can2 {
status = "okay";
};
+
+&i2c1 {
+ max11801: touchscreen@48 {
+ compatible = "maxim,max11801";
+ reg = <0x48>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <60000000>;
+ hactive = <800>;
+ vactive = <480>;
+ hback-porch = <30>;
+ hfront-porch = <30>;
+ vback-porch = <5>;
+ vfront-porch = <5>;
+ hsync-len = <64>;
+ vsync-len = <20>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q-utilite-pro.dts b/arch/arm/boot/dts/imx6q-utilite-pro.dts
index 69bdd82ce21f..d900ad6ec5f8 100644
--- a/arch/arm/boot/dts/imx6q-utilite-pro.dts
+++ b/arch/arm/boot/dts/imx6q-utilite-pro.dts
@@ -101,9 +101,11 @@
hdmi-connector {
compatible = "hdmi-connector";
-
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hpd>;
type = "a";
ddc-i2c-bus = <&i2c_dvi_ddc>;
+ hpd-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
port {
hdmi_connector_in: endpoint {
@@ -209,6 +211,12 @@
>;
};
+ pinctrl_hpd: hpdgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
diff --git a/arch/arm/boot/dts/imx6q-zii-rdu2.dts b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
new file mode 100644
index 000000000000..b2d346640fd7
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016-2017 Zodiac Inflight Innovations
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <imx6q.dtsi>
+#include <imx6qdl-zii-rdu2.dtsi>
+
+/ {
+ model = "ZII RDU2 Board";
+ compatible = "zii,imx6q-zii-rdu2", "fsl,imx6q";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
new file mode 100644
index 000000000000..444425153fc7
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
@@ -0,0 +1,654 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 5000000>;
+ brightness-levels = <
+ 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57 58 59
+ 60 61 62 63 64 65 66 67 68 69
+ 70 71 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87 88 89
+ 90 91 92 93 94 95 96 97 98 99
+ 100
+ >;
+ default-brightness-level = <100>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ reg_5p0v: regulator-5p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "5P0V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_2p5v: regulator-2p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "2P5V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 30 0>;
+ enable-active-high;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_12p0: regulator-12p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "12P0V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-tlv320";
+ model = "imx-tlv320";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&tlv320aic3105>;
+ /* routing of sink, source */
+ audio-routing =
+ /* TLV320 LINE1L pin <-> Mic Jack connector */
+ "LINE1L", "Mic Jack",
+ /* board Headphone Jack <-> HPOUT */
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "Mic Jack", "Mic Bias";
+ mux-int-port = <1>;
+ mux-ext-port = <6>;
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+ <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pca9555: gpio@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ dts1672: rtc@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ ltc3676: pmic@3c {
+ compatible = "lltc,ltc3676";
+ reg = <0x3c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ /* VDD_1P8 (1+R1/R2 = 2.505): Aud/eMMC/microSD/Touch */
+ reg_1p8v: sw1 {
+ regulator-name = "vdd1p8";
+ regulator-min-microvolt = <1033310>;
+ regulator-max-microvolt = <2004000>;
+ lltc,fb-voltage-divider = <301000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_DDR (1+R1/R2 = 2.105) */
+ reg_vdd_ddr: sw2 {
+ regulator-name = "vddddr";
+ regulator-min-microvolt = <868310>;
+ regulator-max-microvolt = <1684000>;
+ lltc,fb-voltage-divider = <221000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_ARM (1+R1/R2 = 1.635) */
+ reg_vdd_arm: sw3 {
+ regulator-name = "vddarm";
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <1308000>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ linux,phandle = <&reg_vdd_arm>;
+ };
+
+ /* VDD_SOC (1+R1/R2 = 1.635) */
+ reg_vdd_soc: sw4 {
+ regulator-name = "vddsoc";
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <1308000>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ linux,phandle = <&reg_vdd_soc>;
+ };
+
+ /* VDD_1P0 (1+R1/R2 = 1.38): */
+ reg_1p0v: ldo2 {
+ regulator-name = "vdd1p0";
+ regulator-min-microvolt = <1002777>;
+ regulator-max-microvolt = <1002777>;
+ lltc,fb-voltage-divider = <100000 261000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_HIGH (1+R1/R2 = 4.17) */
+ reg_3p0v: ldo4 {
+ regulator-name = "vdd3p0";
+ regulator-min-microvolt = <3023250>;
+ regulator-max-microvolt = <3023250>;
+ lltc,fb-voltage-divider = <634000 200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ tlv320aic3105: codec@18 {
+ compatible = "ti,tlv320aic3x";
+ reg = <0x18>;
+ gpio-reset = <&gpio5 17 GPIO_ACTIVE_LOW>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ ai3x-micbias-vg = <2>; /* MICBIAS_2_5V */
+ /* Regulators */
+ DRVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&reg_1p8v>;
+ };
+
+ accelerometer@1d {
+ compatible = "fsl,mma8451";
+ reg = <0x1d>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <11 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT2";
+ };
+
+ /* headphone detect */
+ ts3a227e@3b {
+ compatible = "ti,ts3a227e";
+ reg = <0x3b>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ ti,micbias = <4>; /* 2.5V micbias */
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: g101evn010 {
+ clock-frequency = <68930000>;
+ hactive = <1280>;
+ vactive = <800>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&ssi1 {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1_200mhz>;
+ vmmc-supply = <&reg_3p3v>;
+ non-removable;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_3p3v>;
+ max-frequency = <100000000>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ non-removable;
+ vmmc-supply = <&reg_3p3v>;
+ keep-power-in-suspend;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
+&iomuxc {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_PIN2__AUD6_TXD 0x130b0
+ MX6QDL_PAD_DI0_PIN3__AUD6_TXFS 0x130b0
+ MX6QDL_PAD_DI0_PIN4__AUD6_RXD 0x130b0
+ MX6QDL_PAD_DI0_PIN15__AUD6_TXC 0x130b0
+ MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0 /* MCK */
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x4001b0b0 /* PHY_RST# */
+ MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x4001b0b0 /* PHY_EN */
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x0001b0b0 /* GSC_IRQ# */
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x0001b0b0 /* PMIC_IRQ# */
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ /* I2C3 */
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+
+ /* Headphone Detect */
+ MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15 0x0001b0b0 /* HPDET_IRQ# */
+ MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16 0x0001b0b0 /* HPDET_MIC# */
+
+ /* Codec */
+ MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17 0x0001b0b0 /* CODEC_RST# */
+
+ /* Touch Controller */
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x0001b0b0 /* TOUCH_IRQ# */
+ MX6QDL_PAD_KEY_COL1__GPIO4_IO08 0x0001b0b0 /* TOUCH_RST */
+
+ /* Stow Sensor */
+ MX6QDL_PAD_GPIO_16__GPIO7_IO11 0x0001b0b0 /* ACCEL_IRQ2 */
+ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x0001b0b0 /* ACCEL_IRQ1 */
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__PWM1_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT12__GPIO5_IO30 0x1b0b1 /* TXEN */
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x4001b0b0 /* PWR_EN */
+ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b0b0 /* OC */
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x4001b0b0 /* EMMY_EN */
+ MX6QDL_PAD_NANDF_D4__GPIO2_IO04 0x4001b0b0 /* EMMY_CFG1# */
+ MX6QDL_PAD_NANDF_D5__GPIO2_IO05 0x4001b0b0 /* EMMY_CFG2# */
+ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x0001b0b0 /* EMMY_BTWAKE# */
+ MX6QDL_PAD_NANDF_D7__GPIO2_IO07 0x0001b0b0 /* EMMY_WFWAKE# */
+
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f9
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x100f9
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f9
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f9
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f9
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x17059 /* CD */
+ MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x17059
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x170b9 /* CD */
+ MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x170f9 /* CD */
+ MX6QDL_PAD_KEY_ROW1__SD2_VSELECT 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x100b9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x100f9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
new file mode 100644
index 000000000000..fd4b68be9fe9
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2017 Gateworks Corporation
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this file; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ /* these are used by bootloader for disabling nodes */
+ aliases {
+ led0 = &led0;
+ led1 = &led1;
+ led2 = &led2;
+ usb0 = &usbh1;
+ usb1 = &usbotg;
+ };
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm4 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led0: user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led1: user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+ led2: user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+ };
+ };
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ pps {
+ compatible = "pps-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pps>;
+ gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_1p0v: regulator-1p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P0V";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_usb_otg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+ <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan4";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan3";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan1";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&fec>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pca9555: gpio@23 {
+ compatible = "nxp,pca9555";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom1: eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ eeprom2: eeprom@51 {
+ compatible = "atmel,24c02";
+ reg = <0x51>;
+ pagesize = <16>;
+ };
+
+ eeprom3: eeprom@52 {
+ compatible = "atmel,24c02";
+ reg = <0x52>;
+ pagesize = <16>;
+ };
+
+ eeprom4: eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+
+ dts1672: rtc@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ ltc3676: pmic@3c {
+ compatible = "lltc,ltc3676";
+ reg = <0x3c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+
+ regulators {
+ /* VDD_SOC (1+R1/R2 = 1.635) */
+ reg_vdd_soc: sw1 {
+ regulator-name = "vddsoc";
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <1308000>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_1P8 (1+R1/R2 = 2.505): GbE switch */
+ reg_1p8v: sw2 {
+ regulator-name = "vdd1p8";
+ regulator-min-microvolt = <1033310>;
+ regulator-max-microvolt = <2004000>;
+ lltc,fb-voltage-divider = <301000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_ARM (1+R1/R2 = 1.635) */
+ reg_vdd_arm: sw3 {
+ regulator-name = "vddarm";
+ regulator-min-microvolt = <674400>;
+ regulator-max-microvolt = <1308000>;
+ lltc,fb-voltage-divider = <127000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_DDR (1+R1/R2 = 2.105) */
+ reg_vdd_ddr: sw4 {
+ regulator-name = "vddddr";
+ regulator-min-microvolt = <868310>;
+ regulator-max-microvolt = <1684000>;
+ lltc,fb-voltage-divider = <221000 200000>;
+ regulator-ramp-delay = <7000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_2P5 (1+R1/R2 = 3.435): PCIe/ENET-PHY */
+ reg_2p5v: ldo2 {
+ regulator-name = "vdd2p5";
+ regulator-min-microvolt = <2490375>;
+ regulator-max-microvolt = <2490375>;
+ lltc,fb-voltage-divider = <487000 200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ /* VDD_HIGH (1+R1/R2 = 4.17) */
+ reg_3p0v: ldo4 {
+ regulator-name = "vdd3p0";
+ regulator-min-microvolt = <3023250>;
+ regulator-max-microvolt = <3023250>;
+ lltc,fb-voltage-divider = <634000 200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ egalax_ts: touchscreen@4 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&ldb {
+ status = "okay";
+
+ lvds-channel@0 {
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: hsd100pxn1 {
+ clock-frequency = <65000000>;
+ hactive = <1024>;
+ vactive = <768>;
+ hback-porch = <220>;
+ hfront-porch = <40>;
+ vback-porch = <21>;
+ vfront-porch = <7>;
+ hsync-len = <60>;
+ vsync-len = <10>;
+ };
+ };
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>; /* MX6_DIO1 */
+ status = "disabled";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>; /* MX6_DIO2 */
+ status = "disabled";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ non-removable;
+ vmmc-supply = <&reg_3p3v>;
+ keep-power-in-suspend;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
+&iomuxc {
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x4001b0b0 /* PHY_RST# */
+ >;
+ };
+
+ pinctrl_gpio_leds: gpioledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__GPIO4_IO06 0x1b0b0
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x1b0b0
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 /* PCIE RST */
+ >;
+ };
+
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0 /* PMIC_IRQ# */
+ >;
+ };
+
+ pinctrl_pps: ppsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_DAT2__PWM2_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_DAT1__PWM3_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL1__UART5_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0 /* PWR_EN */
+ MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b0b0 /* OC */
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170b9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100b9
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x100b9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170b9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170b9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170b9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170b9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170b9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170b9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170b9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170f9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100f9
+ MX6QDL_PAD_SD3_RST__SD3_RESET 0x100f9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170f9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170f9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170f9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170f9
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x170f9
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x170f9
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x170f9
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x170f9
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
index 55bebfc9ad94..56d0c5d21cd0 100644
--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
@@ -48,6 +48,13 @@
reg = <0x10000000 0x80000000>;
};
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm3 0 100000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <7>;
+ };
+
reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "3P3V";
@@ -136,6 +143,12 @@
status = "okay";
};
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
@@ -246,6 +259,12 @@
>;
};
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
+ >;
+ };
+
pinctrl_usbotg: usbotggrp {
fsl,pins = <
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 63bf95ed8c88..58055ceec6dc 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -548,6 +548,18 @@
status = "okay";
};
+&reg_arm {
+ vin-supply = <&sw1a_reg>;
+};
+
+&reg_pu {
+ vin-supply = <&sw1c_reg>;
+};
+
+&reg_soc {
+ vin-supply = <&sw1c_reg>;
+};
+
&snvs_poweroff {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
new file mode 100644
index 000000000000..5d94b5ee6aa0
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2016-2017 Zodiac Inflight Innovations
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
+
+/ {
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ aliases {
+ mdio-gpio0 = &mdio1;
+ };
+
+ mdio1: mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mdio1>;
+ gpios = <&gpio6 5 GPIO_ACTIVE_HIGH
+ &gpio6 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ reg_28p0v: regulator-28p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "28V_IN";
+ regulator-min-microvolt = <28000000>;
+ regulator-max-microvolt = <28000000>;
+ regulator-always-on;
+ };
+
+ reg_12p0v: regulator-12p0v {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_28p0v>;
+ regulator-name = "12V_MAIN";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v_main: regulator-5p0v-main {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0v>;
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v_user_usb: regulator-5p0v-user-usb {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_user_usb>;
+ vin-supply = <&reg_5p0v_main>;
+ regulator-name = "5V_USER_USB";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_LOW>;
+ startup-delay-us = <1000>;
+ };
+
+ reg_3p3v_pmic: regulator-3p3v-pmic {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0v>;
+ regulator-name = "PMIC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_3p3v_pmic>;
+ regulator-name = "GEN_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v_sd: regulator-3p3v-sd {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_3p3v_sd>;
+ vin-supply = <&reg_3p3v>;
+ regulator-name = "3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio7 8 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <1000>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_3p3v_display: regulator-3p3v-display {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0v>;
+ regulator-name = "3V3_DISPLAY";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v_ssd: regulator-3p3v-ssd {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0v>;
+ regulator-name = "3V3_SSD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ sound1 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Front";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound1_codec>;
+ simple-audio-card,frame-master = <&sound1_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa1>;
+
+ sound1_cpu: simple-audio-card,cpu {
+ sound-dai = <&ssi2>;
+ };
+
+ sound1_codec: simple-audio-card,codec {
+ sound-dai = <&codec1>;
+ clocks = <&cs2000>;
+ };
+ };
+
+ sound2 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Back";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound2_codec>;
+ simple-audio-card,frame-master = <&sound2_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa2>;
+
+ sound2_cpu: simple-audio-card,cpu {
+ sound-dai = <&ssi1>;
+ };
+
+ sound2_codec: simple-audio-card,codec {
+ sound-dai = <&codec2>;
+ clocks = <&cs2000>;
+ };
+ };
+
+ panel {
+ power-supply = <&reg_3p3v_display>;
+ status = "disabled";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ disp0: disp0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx-parallel-display";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_disp0>;
+ status = "disabled";
+
+ port@0 {
+ reg = <0>;
+
+ disp0_in_0: endpoint {
+ remote-endpoint = <&ipu1_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ disp0_out: endpoint {
+ remote-endpoint = <&tc358767_in>;
+ };
+ };
+ };
+
+ cs2000_ref: cs2000-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ cs2000_in_dummy: cs2000-in-dummy {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ edp_refclk: edp-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+};
+
+&reg_arm {
+ vin-supply = <&sw1a_reg>;
+};
+
+&reg_pu {
+ vin-supply = <&sw1c_reg>;
+};
+
+&reg_soc {
+ vin-supply = <&sw1c_reg>;
+};
+
+&ldb {
+ lvds-channel@0 {
+ port@4 {
+ reg = <4>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ linux,rs485-enabled-at-boot-time;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "st,m25p128", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ codec2: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec2>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ gpio-reset = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ accel@1c {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_accel>;
+ compatible = "fsl,mma8451";
+ reg = <0x1c>;
+ interrupt-parent = <&gpio1>;
+ interrupt-names = "int1", "int2";
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>, <20 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ hpa2: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa2>;
+ reg = <0x60>;
+ power-gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0v_main>;
+ };
+
+ edp-bridge@68 {
+ compatible = "toshiba,tc358767";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tc358767>;
+ reg = <0x68>;
+ shutdown-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ clock-names = "ref";
+ clocks = <&edp_refclk>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ tc358767_in: endpoint {
+ remote-endpoint = <&disp0_out>;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ pmic@08 {
+ compatible = "fsl,pfuze100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pfuze100_irq>;
+ reg = <0x08>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ temp-sense@48 {
+ compatible = "national,lm75";
+ reg = <0x48>;
+ };
+
+ cs2000: clkgen@4e {
+ compatible = "cirrus,cs2000-cp";
+ reg = <0x4e>;
+ #clock-cells = <0>;
+ clock-names = "clk_in", "ref_clk";
+ clocks = <&cs2000_in_dummy>, <&cs2000_ref>;
+ assigned-clocks = <&cs2000>;
+ assigned-clock-rates = <24000000>;
+ };
+
+ eeprom@54 {
+ compatible = "at,24c128";
+ reg = <0x54>;
+ };
+
+ rtc@68 {
+ compatible = "dallas,ds1341";
+ reg = <0x68>;
+ };
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ codec1: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec1>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ gpio-reset = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ts>;
+ reg = <0x20>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&reg_5p0v_main>;
+ vio-supply = <&reg_3p3v>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ touchscreen-inverted-y;
+ touchscreen-swapped-x-y;
+ syna,sensor-type = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ touchscreen-inverted-y;
+ touchscreen-swapped-x-y;
+ syna,sensor-type = <1>;
+ };
+ };
+
+ hpa1: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa1>;
+ reg = <0x60>;
+ power-gpio = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0v_main>;
+ };
+};
+
+&ipu1_di0_disp0 {
+ remote-endpoint = <&disp0_in_0>;
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_3p3v_sd>;
+ vqmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_3p3v_sd>;
+ vqmmc-supply = <&reg_3p3v>;
+ status = "okay";
+};
+
+&usdhc4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_3p3v>;
+ non-removable;
+ status = "okay";
+};
+
+&sata {
+ target-supply = <&reg_3p3v_ssd>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <100>;
+ phy-supply = <&reg_3p3v>;
+ status = "okay";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+};
+
+&usbh1 {
+ vbus-supply = <&reg_5p0v_main>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_5p0v_user_usb>;
+ disable-over-current;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&ssi1 {
+ status = "okay";
+};
+
+&ssi2 {
+ status = "okay";
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>;
+ status = "okay";
+
+ ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_SYN |
+ IMX_AUDMUX_V2_PTCR_TFSEL(2) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(2) |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR)
+ IMX_AUDMUX_V2_PDCR_RXDSEL(2)
+ >;
+ };
+
+ aud3 {
+ fsl,audmux-port = <2>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN
+ IMX_AUDMUX_V2_PDCR_RXDSEL(0)
+ >;
+ };
+
+ ssi2 {
+ fsl,audmux-port = <1>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_SYN |
+ IMX_AUDMUX_V2_PTCR_TFSEL(4) |
+ IMX_AUDMUX_V2_PTCR_TCSEL(4) |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR)
+ IMX_AUDMUX_V2_PDCR_RXDSEL(4)
+ >;
+ };
+
+ aud5 {
+ fsl,audmux-port = <4>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN
+ IMX_AUDMUX_V2_PDCR_RXDSEL(1)
+ >;
+ };
+};
+
+&iomuxc {
+ pinctrl_accel: accelgrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__GPIO1_IO18 0x4001b000
+ MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x4001b000
+ >;
+ };
+
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0
+ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x130b0
+ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0
+ MX6QDL_PAD_CSI0_DAT4__AUD3_TXC 0x130b0
+ MX6QDL_PAD_CSI0_DAT5__AUD3_TXD 0x130b0
+ MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS 0x130b0
+ >;
+ };
+
+ pinctrl_codec1: dac1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x40000038
+ >;
+ };
+
+ pinctrl_codec2: dac2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x40000038
+ >;
+ };
+
+ pinctrl_disp0: disp0grp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x100f9
+ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x100f9
+ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x100f9
+ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x100f9
+ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x100f9
+ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x100f9
+ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x100f9
+ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x100f9
+ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x100f9
+ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x100f9
+ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x100f9
+ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x100f9
+ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x100f9
+ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x100f9
+ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x100f9
+ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x100f9
+ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x100f9
+ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x100f9
+ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x100f9
+ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x100f9
+ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x100f9
+ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x100f9
+ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x100f9
+ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x100f9
+ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x100f9
+ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x100f9
+ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x100f9
+ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x100f9
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x1b0b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x000b1
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x100b1
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x100f5
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x100f5
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x100c0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x100c0
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x100f5
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x100f5
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x40010040
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x100b0
+ MX6QDL_PAD_ENET_REF_CLK__GPIO1_IO23 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b8b1
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_mdio1: bitbangmdiogrp {
+ fsl,pins = <
+ /* Bitbang MDIO for DEB Switch */
+ MX6QDL_PAD_CSI0_DAT19__GPIO6_IO05 0x4001b030
+ MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40018830
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x10038
+ >;
+ };
+
+ pinctrl_pfuze100_irq: pfuze100grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x40010000
+ >;
+ };
+
+ pinctrl_reg_3p3v_sd: mmcsupply1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x858
+ >;
+ };
+
+ pinctrl_reg_user_usb: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x40000038
+ >;
+ };
+
+ pinctrl_rmii_phy_irq: phygrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x40010000
+ >;
+ };
+
+ pinctrl_tc358767: tc358767grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x10
+ >;
+ };
+
+ pinctrl_tpa1: tpa6130-1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x40000038
+ >;
+ };
+
+ pinctrl_tpa2: tpa6130-2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x40000038
+ >;
+ };
+
+ pinctrl_ts: tsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0
+ MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1
+ MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x10059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10069
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059
+ MX6QDL_PAD_NANDF_D3__GPIO2_IO03 0x40010040
+ MX6QDL_PAD_NANDF_D2__GPIO2_IO02 0x40010040
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x10059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10069
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x40010040
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x40010040
+
+ >;
+ };
+
+ pinctrl_usdhc4: usdhc4grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059
+ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10059
+ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059
+ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059
+ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059
+ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059
+ MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17059
+ MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17059
+ MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17059
+ MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
+ MX6QDL_PAD_NANDF_ALE__SD4_RESET 0x1b0b1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 6d7bf6496117..e426faa9c243 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -197,7 +197,7 @@
arm,shared-override;
};
- pcie: pcie@0x01000000 {
+ pcie: pcie@1ffc000 {
compatible = "fsl,imx6q-pcie", "snps,dw-pcie";
reg = <0x01ffc000 0x04000>,
<0x01f00000 0x80000>;
@@ -205,6 +205,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ bus-range = <0x00 0xff>;
ranges = <0x81000000 0 0 0x01f80000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
num-lanes = <1>;
diff --git a/arch/arm/boot/dts/imx6qp-nitrogen6_som2.dts b/arch/arm/boot/dts/imx6qp-nitrogen6_som2.dts
new file mode 100644
index 000000000000..011726c836cd
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qp-nitrogen6_som2.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6qp.dtsi"
+#include "imx6qdl-nitrogen6_som2.dtsi"
+
+/ {
+ model = "Boundary Devices i.MX6 Quad Plus Nitrogen6_SOM2 Board";
+ compatible = "boundary,imx6qp-nitrogen6_som2", "fsl,imx6qp";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qp-sabresd.dts b/arch/arm/boot/dts/imx6qp-sabresd.dts
index b23458062f5e..a8a5004dd9c8 100644
--- a/arch/arm/boot/dts/imx6qp-sabresd.dts
+++ b/arch/arm/boot/dts/imx6qp-sabresd.dts
@@ -50,8 +50,8 @@
compatible = "fsl,imx6qp-sabresd", "fsl,imx6qp";
};
-&cpu0 {
- arm-supply = <&sw2_reg>;
+&reg_arm {
+ vin-supply = <&sw2_reg>;
};
&iomuxc {
diff --git a/arch/arm/boot/dts/imx6qp-zii-rdu2.dts b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
new file mode 100644
index 000000000000..882b3bd97e07
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016-2017 Zodiac Inflight Innovations
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <imx6qp.dtsi>
+#include <imx6qdl-zii-rdu2.dtsi>
+
+/ {
+ model = "ZII RDU2+ Board";
+ compatible = "zii,imx6qp-zii-rdu2", "fsl,imx6qp";
+};
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 24d071f5d9cd..59453f2ac4ba 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -56,40 +56,59 @@
clocks = <&clks IMX6QDL_CLK_OCRAM>;
};
- ipu1: ipu@02400000 {
- compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu";
- clocks = <&clks IMX6QDL_CLK_IPU1>,
- <&clks IMX6QDL_CLK_IPU1_DI0>, <&clks IMX6QDL_CLK_IPU1_DI1>,
- <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI0_PODF>, <&clks IMX6QDL_CLK_LDB_DI1_PODF>,
- <&clks IMX6QDL_CLK_PRG0_APB>;
- clock-names = "bus",
- "di0", "di1",
- "di0_sel", "di1_sel",
- "ldb_di0", "ldb_di1", "prg";
- };
+ aips-bus@02100000 {
+ pre1: pre@21c8000 {
+ compatible = "fsl,imx6qp-pre";
+ reg = <0x021c8000 0x1000>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clks IMX6QDL_CLK_PRE0>;
+ clock-names = "axi";
+ fsl,iram = <&ocram2>;
+ };
- ipu2: ipu@02800000 {
- compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu";
- clocks = <&clks IMX6QDL_CLK_IPU2>,
- <&clks IMX6QDL_CLK_IPU2_DI0>, <&clks IMX6QDL_CLK_IPU2_DI1>,
- <&clks IMX6QDL_CLK_IPU2_DI0_SEL>, <&clks IMX6QDL_CLK_IPU2_DI1_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI0_PODF>, <&clks IMX6QDL_CLK_LDB_DI1_PODF>,
- <&clks IMX6QDL_CLK_PRG1_APB>;
- clock-names = "bus",
- "di0", "di1",
- "di0_sel", "di1_sel",
- "ldb_di0", "ldb_di1", "prg";
- };
+ pre2: pre@21c9000 {
+ compatible = "fsl,imx6qp-pre";
+ reg = <0x021c9000 0x1000>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clks IMX6QDL_CLK_PRE1>;
+ clock-names = "axi";
+ fsl,iram = <&ocram2>;
+ };
- pcie: pcie@0x01000000 {
- compatible = "fsl,imx6qp-pcie", "snps,dw-pcie";
- };
+ pre3: pre@21ca000 {
+ compatible = "fsl,imx6qp-pre";
+ reg = <0x021ca000 0x1000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clks IMX6QDL_CLK_PRE2>;
+ clock-names = "axi";
+ fsl,iram = <&ocram3>;
+ };
- aips-bus@02100000 {
- mmdc0: mmdc@021b0000 { /* MMDC0 */
- compatible = "fsl,imx6qp-mmdc", "fsl,imx6q-mmdc";
- reg = <0x021b0000 0x4000>;
+ pre4: pre@21cb000 {
+ compatible = "fsl,imx6qp-pre";
+ reg = <0x021cb000 0x1000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clks IMX6QDL_CLK_PRE3>;
+ clock-names = "axi";
+ fsl,iram = <&ocram3>;
+ };
+
+ prg1: prg@21cc000 {
+ compatible = "fsl,imx6qp-prg";
+ reg = <0x021cc000 0x1000>;
+ clocks = <&clks IMX6QDL_CLK_PRG0_APB>,
+ <&clks IMX6QDL_CLK_PRG0_AXI>;
+ clock-names = "ipg", "axi";
+ fsl,pres = <&pre1>, <&pre2>, <&pre3>;
+ };
+
+ prg2: prg@21cd000 {
+ compatible = "fsl,imx6qp-prg";
+ reg = <0x021cd000 0x1000>;
+ clocks = <&clks IMX6QDL_CLK_PRG1_APB>,
+ <&clks IMX6QDL_CLK_PRG1_AXI>;
+ clock-names = "ipg", "axi";
+ fsl,pres = <&pre4>, <&pre2>, <&pre3>;
};
};
};
@@ -101,6 +120,16 @@
<0 119 IRQ_TYPE_LEVEL_HIGH>;
};
+&ipu1 {
+ compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu";
+ fsl,prg = <&prg1>;
+};
+
+&ipu2 {
+ compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu";
+ fsl,prg = <&prg2>;
+};
+
&ldb {
clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
<&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
@@ -110,3 +139,11 @@
"di0_sel", "di1_sel", "di2_sel", "di3_sel",
"di0", "di1";
};
+
+&mmdc0 {
+ compatible = "fsl,imx6qp-mmdc", "fsl,imx6q-mmdc";
+};
+
+&pcie {
+ compatible = "fsl,imx6qp-pcie", "snps,dw-pcie";
+};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index dd4ec85ecbaa..3f1416be4c36 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -297,7 +297,8 @@
};
uart1: serial@02020000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x02020000 0x4000>;
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1053,7 +1054,8 @@
};
uart2: serial@021e8000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021e8000 0x4000>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1065,7 +1067,8 @@
};
uart3: serial@021ec000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021ec000 0x4000>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1077,7 +1080,8 @@
};
uart4: serial@021f0000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f0000 0x4000>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1089,7 +1093,8 @@
};
uart5: serial@021f4000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f4000 0x4000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1229,7 +1234,8 @@
};
uart6: serial@022a0000 {
- compatible = "fsl,imx6sx-uart", "fsl,imx21-uart";
+ compatible = "fsl,imx6sx-uart",
+ "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x022a0000 0x4000>;
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_UART_IPG>,
@@ -1281,7 +1287,7 @@
};
};
- pcie: pcie@0x08000000 {
+ pcie: pcie@8ffc000 {
compatible = "fsl,imx6sx-pcie", "snps,dw-pcie";
reg = <0x08ffc000 0x4000>; /* DBI */
#address-cells = <3>;
@@ -1293,6 +1299,7 @@
0x81000000 0 0 0x08f80000 0 0x00010000
/* non-prefetchable memory */
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
+ bus-range = <0x00 0xff>;
num-lanes = <1>;
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index 00f98e5bfcaf..f18e1f1d0ce2 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -85,11 +85,6 @@
assigned-clock-rates = <786432000>;
};
-&cpu0 {
- arm-supply = <&reg_arm>;
- soc-supply = <&reg_soc>;
-};
-
&i2c2 {
clock_frequency = <100000>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6ul-geam.dtsi b/arch/arm/boot/dts/imx6ul-geam.dtsi
index 940aef67313b..eb94d956808b 100644
--- a/arch/arm/boot/dts/imx6ul-geam.dtsi
+++ b/arch/arm/boot/dts/imx6ul-geam.dtsi
@@ -49,6 +49,23 @@
reg = <0x80000000 0x08000000>;
};
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm8 0 100000>;
+ brightness-levels = < 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57 58 59
+ 60 61 62 63 64 65 66 67 68 69
+ 70 71 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87 88 89
+ 90 91 92 93 94 95 96 97 98 99
+ 100>;
+ default-brightness-level = <100>;
+ };
+
chosen {
stdout-path = &uart1;
};
@@ -143,12 +160,24 @@
display = <&display0>;
};
+&pwm8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm8>;
+ status = "okay";
+};
+
&tsc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_tsc>;
xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
};
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ status = "okay";
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -290,6 +319,12 @@
>;
};
+ pinctrl_pwm8: pwm8grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0
+ >;
+ };
+
pinctrl_tsc: tscgrp {
fsl,pin = <
MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
@@ -299,6 +334,16 @@
>;
};
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x130b0
+ MX6UL_PAD_JTAG_TMS__CCM_CLKO1 0x4001b031
+ MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088
+ MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088
+ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6ul-isiot-common.dtsi b/arch/arm/boot/dts/imx6ul-isiot-common.dtsi
new file mode 100644
index 000000000000..2beaab6e272e
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-isiot-common.dtsi
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&i2c1 {
+ stmpe811: gpio-expander@44 {
+ compatible = "st,stmpe811";
+ reg = <0x44>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_stmpe>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ stmpe: touchscreen {
+ compatible = "st,stmpe-ts";
+ st,sample-time = <4>;
+ st,mod-12b = <1>;
+ st,ref-sel = <0>;
+ st,adc-freq = <1>;
+ st,ave-ctrl = <1>;
+ st,touch-det-delay = <2>;
+ st,settling = <2>;
+ st,fraction-z = <7>;
+ st,i-drive = <1>;
+ };
+ };
+};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcdif_dat
+ &pinctrl_lcdif_ctrl>;
+ display = <&display0>;
+ status = "okay";
+
+ display0: display {
+ bits-per-pixel = <16>;
+ bus-width = <18>;
+
+ display-timings {
+ native-mode = <&timing0>;
+ timing0: timing0 {
+ clock-frequency = <28000000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <30>;
+ hback-porch = <30>;
+ hsync-len = <64>;
+ vback-porch = <5>;
+ vfront-porch = <5>;
+ vsync-len = <20>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_lcdif_ctrl: lcdifctrlgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_CLK__LCDIF_CLK 0x79
+ MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE 0x79
+ MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC 0x79
+ MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC 0x79
+ >;
+ };
+
+ pinctrl_lcdif_dat: lcdifdatgrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_DATA00__LCDIF_DATA00 0x79
+ MX6UL_PAD_LCD_DATA01__LCDIF_DATA01 0x79
+ MX6UL_PAD_LCD_DATA02__LCDIF_DATA02 0x79
+ MX6UL_PAD_LCD_DATA03__LCDIF_DATA03 0x79
+ MX6UL_PAD_LCD_DATA04__LCDIF_DATA04 0x79
+ MX6UL_PAD_LCD_DATA05__LCDIF_DATA05 0x79
+ MX6UL_PAD_LCD_DATA06__LCDIF_DATA06 0x79
+ MX6UL_PAD_LCD_DATA07__LCDIF_DATA07 0x79
+ MX6UL_PAD_LCD_DATA08__LCDIF_DATA08 0x79
+ MX6UL_PAD_LCD_DATA09__LCDIF_DATA09 0x79
+ MX6UL_PAD_LCD_DATA10__LCDIF_DATA10 0x79
+ MX6UL_PAD_LCD_DATA11__LCDIF_DATA11 0x79
+ MX6UL_PAD_LCD_DATA12__LCDIF_DATA12 0x79
+ MX6UL_PAD_LCD_DATA13__LCDIF_DATA13 0x79
+ MX6UL_PAD_LCD_DATA14__LCDIF_DATA14 0x79
+ MX6UL_PAD_LCD_DATA15__LCDIF_DATA15 0x79
+ MX6UL_PAD_LCD_DATA16__LCDIF_DATA16 0x79
+ MX6UL_PAD_LCD_DATA17__LCDIF_DATA17 0x79
+ >;
+ };
+
+ pinctrl_stmpe: stmpegrp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_CTS_B__GPIO1_IO18 0x1b0b0
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx6ul-isiot-emmc.dts b/arch/arm/boot/dts/imx6ul-isiot-emmc.dts
index f5b422898e61..73a1d0f0b9d5 100644
--- a/arch/arm/boot/dts/imx6ul-isiot-emmc.dts
+++ b/arch/arm/boot/dts/imx6ul-isiot-emmc.dts
@@ -43,6 +43,7 @@
/dts-v1/;
#include "imx6ul-isiot.dtsi"
+#include "imx6ul-isiot-common.dtsi"
/ {
model = "Engicam Is.IoT MX6UL eMMC Starter kit";
diff --git a/arch/arm/boot/dts/imx6ul-isiot-nand.dts b/arch/arm/boot/dts/imx6ul-isiot-nand.dts
index de15e1c75dd1..da29a86eb6a8 100644
--- a/arch/arm/boot/dts/imx6ul-isiot-nand.dts
+++ b/arch/arm/boot/dts/imx6ul-isiot-nand.dts
@@ -43,6 +43,7 @@
/dts-v1/;
#include "imx6ul-isiot.dtsi"
+#include "imx6ul-isiot-common.dtsi"
/ {
model = "Engicam Is.IoT MX6UL NAND Starter kit";
diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
index 0b43699af3e3..ea30380ad7a4 100644
--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
+++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
@@ -52,6 +52,49 @@
chosen {
stdout-path = &uart1;
};
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm8 0 100000>;
+ brightness-levels = < 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ 50 51 52 53 54 55 56 57 58 59
+ 60 61 62 63 64 65 66 67 68 69
+ 70 71 72 73 74 75 76 77 78 79
+ 80 81 82 83 84 85 86 87 88 89
+ 90 91 92 93 94 95 96 97 98 99
+ 100>;
+ default-brightness-level = <100>;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock_frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&pwm8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm8>;
+ status = "okay";
+};
+
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ status = "okay";
};
&uart1 {
@@ -72,6 +115,36 @@
};
&iomuxc {
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
+ MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO00__I2C2_SCL 0x4001b8b0
+ MX6UL_PAD_GPIO1_IO01__I2C2_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_pwm8: pwm8grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_ER__PWM8_OUT 0x110b0
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x130b0
+ MX6UL_PAD_JTAG_TMS__CCM_CLKO1 0x4001b031
+ MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088
+ MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088
+ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x120b0
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
index 373ee19196a6..18bebd6d8d47 100644
--- a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
@@ -44,11 +44,39 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ panel: panel {
+ compatible = "edt,et057090dhu";
+ backlight = <&bl>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lcdif_out>;
+ };
+ };
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
};
&bl {
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
+ power-supply = <&reg_3v3>;
+
status = "okay";
};
@@ -75,32 +103,11 @@
};
&lcdif {
- display = <&display0>;
status = "okay";
- display0: lcd-display {
- bits-per-pixel = <16>;
- bus-width = <18>;
-
- display-timings {
- native-mode = <&timing_vga>;
-
- /* Standard VGA timing */
- timing_vga: 640x480 {
- clock-frequency = <25175000>;
- hactive = <640>;
- vactive = <480>;
- hback-porch = <40>;
- hfront-porch = <24>;
- vback-porch = <32>;
- vfront-porch = <11>;
- hsync-len = <96>;
- vsync-len = <2>;
- de-active = <1>;
- hsync-active = <0>;
- vsync-active = <0>;
- pixelclk-active = <0>;
- };
+ port {
+ lcdif_out: endpoint {
+ remote-endpoint = <&panel_in>;
};
};
};
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index a171545478be..2d87489f9105 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -60,13 +60,6 @@
regulator-max-microvolt = <3300000>;
};
- reg_vref_1v8: regulator-vref-1v8 {
- compatible = "regulator-fixed";
- regulator-name = "vref-1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- };
-
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "imx7-sgtl5000";
@@ -85,11 +78,11 @@
};
&adc1 {
- vref-supply = <&reg_vref_1v8>;
+ vref-supply = <&reg_DCDC3>;
};
&adc2 {
- vref-supply = <&reg_vref_1v8>;
+ vref-supply = <&reg_DCDC3>;
};
&cpu0 {
@@ -151,29 +144,29 @@
regulators {
reg_DCDC1: DCDC1 { /* V1.0_SOC */
- regulator-min-microvolt = <975000>;
- regulator-max-microvolt = <1125000>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1100000>;
regulator-boot-on;
regulator-always-on;
};
reg_DCDC2: DCDC2 { /* V1.1_ARM */
- regulator-min-microvolt = <975000>;
- regulator-max-microvolt = <1125000>;
+ regulator-min-microvolt = <975000>;
+ regulator-max-microvolt = <1100000>;
regulator-boot-on;
regulator-always-on;
};
reg_DCDC3: DCDC3 { /* V1.8 */
- regulator-min-microvolt = <1775000>;
- regulator-max-microvolt = <1825000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
};
reg_DCDC4: DCDC4 { /* V1.35_DRAM */
- regulator-min-microvolt = <1325000>;
- regulator-max-microvolt = <1375000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-boot-on;
regulator-always-on;
};
@@ -181,33 +174,33 @@
reg_LDO1: LDO1 { /* PWR_EN_+V3.3_ETH */
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ regulator-boot-on;
};
reg_LDO2: LDO2 { /* +V1.8_SD */
- regulator-min-microvolt = <1775000>;
- regulator-max-microvolt = <3325000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
reg_LDO3: LDO3 { /* PWR_EN_+V3.3_LPSR */
- regulator-min-microvolt = <3275000>;
- regulator-max-microvolt = <3325000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
reg_LDO4: LDO4 { /* V1.8_LPSR */
- regulator-min-microvolt = <1775000>;
- regulator-max-microvolt = <1825000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-boot-on;
regulator-always-on;
};
reg_LDO5: LDO5 { /* PWR_EN_+V3.3 */
- regulator-min-microvolt = <1775000>;
- regulator-max-microvolt = <1825000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
index bd01d2cc642d..a608a14d8c85 100644
--- a/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
@@ -57,6 +57,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ vin-supply = <&reg_5v0>;
};
};
diff --git a/arch/arm/boot/dts/imx7d-sdb-sht11.dts b/arch/arm/boot/dts/imx7d-sdb-sht11.dts
new file mode 100644
index 000000000000..64a20ed1713a
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-sdb-sht11.dts
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx7d-sdb.dts"
+
+/ {
+ sensor {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sensor>;
+ compatible = "sensirion,sht15";
+ clk-gpios = <&gpio4 12 0>;
+ data-gpios = <&gpio4 13 0>;
+ vcc-supply = <&reg_sht15>;
+ };
+
+ reg_sht15: regulator-sht15 {
+ compatible = "regulator-fixed";
+ regulator-name = "reg_sht15";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&i2c3 {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_sensor: sensorgrp {
+ fsl,pins = <
+ MX7D_PAD_I2C3_SDA__GPIO4_IO13 0x4000007f
+ MX7D_PAD_I2C3_SCL__GPIO4_IO12 0x4000007f
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 5d3a43b8de20..c4f12fd2e044 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -493,10 +493,9 @@
};
ocotp: ocotp-ctrl@30350000 {
- compatible = "syscon";
+ compatible = "fsl,imx7d-ocotp", "syscon";
reg = <0x30350000 0x10000>;
- clocks = <&clks IMX7D_CLK_DUMMY>;
- status = "disabled";
+ clocks = <&clks IMX7D_OCOTP_CLK>;
};
anatop: anatop@30360000 {
@@ -559,7 +558,7 @@
};
src: src@30390000 {
- compatible = "fsl,imx7d-src", "fsl,imx51-src", "syscon";
+ compatible = "fsl,imx7d-src", "syscon";
reg = <0x30390000 0x10000>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#reset-cells = <1>;
diff --git a/arch/arm/boot/dts/include/arm b/arch/arm/boot/dts/include/arm
new file mode 120000
index 000000000000..a96aa0ea9d8c
--- /dev/null
+++ b/arch/arm/boot/dts/include/arm
@@ -0,0 +1 @@
+.. \ No newline at end of file
diff --git a/arch/arm/boot/dts/include/arm64 b/arch/arm/boot/dts/include/arm64
new file mode 120000
index 000000000000..074a835fca3e
--- /dev/null
+++ b/arch/arm/boot/dts/include/arm64
@@ -0,0 +1 @@
+../../../../arm64/boot/dts \ No newline at end of file
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index 45619f6162c5..ebc763eab195 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -106,6 +106,7 @@
reg-names = "mux", "pull", "pull-enable", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_cbus 0 0 120>;
};
spi_nor_pins: nor {
@@ -148,6 +149,7 @@
reg-names = "mux", "pull", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aobus 0 120 16>;
};
uart_ao_a_pins: uart_ao_a {
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 41fd53671859..828aa49c678c 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -198,6 +198,7 @@
reg-names = "mux", "pull", "pull-enable", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_cbus 0 0 130>;
};
};
@@ -215,6 +216,7 @@
reg-names = "mux", "pull", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aobus 0 130 16>;
};
uart_ao_a_pins: uart_ao_a {
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
new file mode 100644
index 000000000000..f5aeb3959afd
--- /dev/null
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -0,0 +1,243 @@
+/*
+ * Common CPCAP configuration used on Motorola phones
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&mcspi1 {
+ cpcap: pmic@0 {
+ compatible = "motorola,cpcap", "st,6556002";
+ reg = <0>; /* cs0 */
+ interrupt-parent = <&gpio1>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spi-max-frequency = <3000000>;
+ spi-cs-high;
+
+ cpcap_adc: adc {
+ compatible = "motorola,mapphone-cpcap-adc";
+ interrupts-extended = <&cpcap 8 0>;
+ interrupt-names = "adcdone";
+ #io-channel-cells = <1>;
+ };
+
+ cpcap_charger: charger {
+ compatible = "motorola,mapphone-cpcap-charger";
+ interrupts-extended = <
+ &cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
+ &cpcap 22 0 &cpcap 20 0 &cpcap 19 0 &cpcap 54 0
+ >;
+ interrupt-names =
+ "chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
+ "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb";
+ mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
+ &gpio3 23 GPIO_ACTIVE_LOW>;
+ io-channels = <&cpcap_adc 0 &cpcap_adc 1
+ &cpcap_adc 2 &cpcap_adc 5
+ &cpcap_adc 6>;
+ io-channel-names = "battdetb", "battp",
+ "vbus", "chg_isense",
+ "batti";
+ };
+
+ cpcap_regulator: regulator {
+ compatible = "motorola,mapphone-cpcap-regulator";
+
+ cpcap_regulators: regulators {
+ };
+ };
+
+ cpcap_rtc: rtc {
+ compatible = "motorola,cpcap-rtc";
+
+ interrupt-parent = <&cpcap>;
+ interrupts = <39 IRQ_TYPE_NONE>, <26 IRQ_TYPE_NONE>;
+ };
+
+ power_button: button {
+ compatible = "motorola,cpcap-pwrbutton";
+
+ interrupts = <23 IRQ_TYPE_NONE>;
+ };
+
+ cpcap_usb2_phy: phy {
+ compatible = "motorola,mapphone-cpcap-usb-phy";
+ pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>;
+ pinctrl-1 = <&usb_ulpi_pins>;
+ pinctrl-2 = <&usb_utmi_pins>;
+ pinctrl-3 = <&uart3_pins>;
+ pinctrl-names = "default", "ulpi", "utmi", "uart";
+ #phy-cells = <0>;
+ interrupts-extended = <
+ &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
+ &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
+ &cpcap 48 1
+ >;
+ interrupt-names =
+ "id_ground", "id_float", "se0conn", "vbusvld",
+ "sessvld", "sessend", "se1", "dm", "dp";
+ mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH
+ &gpio1 0 GPIO_ACTIVE_HIGH>;
+ io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>;
+ io-channel-names = "vbus", "id";
+ vusb-supply = <&vusb>;
+ };
+
+ led_red: led-red {
+ compatible = "motorola,cpcap-led-red";
+ vdd-supply = <&sw5>;
+ label = "status-led:red";
+ };
+
+ led_green: led-green {
+ compatible = "motorola,cpcap-led-green";
+ vdd-supply = <&sw5>;
+ label = "status-led:green";
+ };
+
+ led_blue: led-blue {
+ compatible = "motorola,cpcap-led-blue";
+ vdd-supply = <&sw5>;
+ label = "status-led:blue";
+ };
+
+ led_adl: led-adl {
+ compatible = "motorola,cpcap-led-adl";
+ vdd-supply = <&sw5>;
+ label = "button-backlight";
+ };
+
+ led_cp: led-cp {
+ compatible = "motorola,cpcap-led-cp";
+ vdd-supply = <&sw5>;
+ label = "shift-key-light";
+ };
+ };
+};
+
+&cpcap_regulators {
+ sw5: SW5 {
+ regulator-min-microvolt = <5050000>;
+ regulator-max-microvolt = <5050000>;
+ regulator-enable-ramp-delay = <50000>;
+ regulator-boot-on;
+ };
+
+ vcam: VCAM {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ /* Used by DSS */
+ vcsi: VCSI {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <1000>;
+ regulator-boot-on;
+ };
+
+ vdac: VDAC {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vdig: VDIG {
+ regulator-min-microvolt = <1875000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vfuse: VFUSE {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vhvio: VHVIO {
+ regulator-min-microvolt = <2775000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-enable-ramp-delay = <1000>;
+ regulator-always-on;
+ };
+
+ /* Used by eMMC at mmc2 */
+ vsdio: VSDIO {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vpll: VPLL {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <100>;
+ };
+
+ vrf1: VRF1 {
+ regulator-min-microvolt = <2775000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vrf2: VRF2 {
+ regulator-min-microvolt = <2775000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vrfref: VRFREF {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-enable-ramp-delay = <100>;
+ };
+
+ vwlan1: VWLAN1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ /* Used by micro-SDIO at mmc1 */
+ vwlan2: VWLAN2 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vsim: VSIM {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vsimcard: VSIMCARD {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vvib: VVIB {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <500>;
+ };
+
+ vusb: VUSB {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+
+ vaudio: VAUDIO {
+ regulator-min-microvolt = <2775000>;
+ regulator-max-microvolt = <2775000>;
+ regulator-enable-ramp-delay = <1000>;
+ };
+};
diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
index 10d088df0c35..4a962a26482d 100644
--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
+++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
@@ -6,7 +6,7 @@
*/
/dts-v1/;
-/include/ "moxart.dtsi"
+#include "moxart.dtsi"
/ {
model = "MOXA UC-7112-LX";
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1fd27ed65a01..e86f8c905ac5 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -6,6 +6,7 @@
*/
/include/ "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
/ {
compatible = "moxa,moxart";
@@ -36,8 +37,8 @@
ranges;
intc: interrupt-controller@98800000 {
- compatible = "moxa,moxart-ic";
- reg = <0x98800000 0x38>;
+ compatible = "moxa,moxart-ic", "faraday,ftintc010";
+ reg = <0x98800000 0x100>;
interrupt-controller;
#interrupt-cells = <2>;
interrupt-mask = <0x00080000>;
@@ -59,15 +60,15 @@
timer: timer@98400000 {
compatible = "moxa,moxart-timer";
reg = <0x98400000 0x42>;
- interrupts = <19 1>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
clocks = <&clk_apb>;
};
gpio: gpio@98700000 {
gpio-controller;
#gpio-cells = <2>;
- compatible = "moxa,moxart-gpio";
- reg = <0x98700000 0xC>;
+ compatible = "moxa,moxart-gpio", "faraday,ftgpio010";
+ reg = <0x98700000 0x100>;
};
rtc: rtc {
@@ -80,7 +81,7 @@
dma: dma@90500000 {
compatible = "moxa,moxart-dma";
reg = <0x90500080 0x40>;
- interrupts = <24 0>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
};
@@ -93,7 +94,7 @@
sdhci: sdhci@98e00000 {
compatible = "moxa,moxart-sdhci";
reg = <0x98e00000 0x5C>;
- interrupts = <5 0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_apb>;
dmas = <&dma 5>,
<&dma 5>;
@@ -120,7 +121,7 @@
mac0: mac@90900000 {
compatible = "moxa,moxart-mac";
reg = <0x90900000 0x90>;
- interrupts = <25 0>;
+ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy0>;
phy-mode = "mii";
status = "disabled";
@@ -129,7 +130,7 @@
mac1: mac@92000000 {
compatible = "moxa,moxart-mac";
reg = <0x92000000 0x90>;
- interrupts = <27 0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
phy-handle = <&ethphy1>;
phy-mode = "mii";
status = "disabled";
@@ -138,7 +139,7 @@
uart0: uart@98200000 {
compatible = "ns16550a";
reg = <0x98200000 0x20>;
- interrupts = <31 8>;
+ interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
clock-frequency = <14745600>;
diff --git a/arch/arm/boot/dts/omap3-cpu-thermal.dtsi b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi
new file mode 100644
index 000000000000..235ecfd61e2d
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-cpu-thermal.dtsi
@@ -0,0 +1,20 @@
+/*
+ * Device Tree Source for OMAP3 SoC CPU thermal
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+cpu_thermal: cpu_thermal {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+ coefficients = <0 20000>;
+
+ /* sensor ID */
+ thermal-sensors = <&bandgap 0>;
+};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index e268efde6c6d..4ad7d5565906 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -37,6 +37,13 @@
};
&omap3_pmx_core {
+ gpmc_pins: pinmux_gpmc_pins {
+ pinctrl-single,pins = <
+ /* OneNAND seems to require PIN_INPUT on clock. */
+ OMAP3_CORE1_IOPAD(0x20be, PIN_INPUT | MUX_MODE0) /* gpmc_clk.gpmc_clk */
+ >;
+ };
+
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE0) /* uart1_rx.uart1_rx */
@@ -98,6 +105,9 @@
};
&gpmc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmc_pins>;
+
nand@0,0 {
compatible = "ti,omap2-nand";
reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
@@ -126,6 +136,48 @@
#address-cells = <1>;
#size-cells = <1>;
+
+ status = "okay";
+ };
+
+ onenand@0,0 {
+ compatible = "ti,omap2-onenand";
+ reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
+
+ gpmc,sync-read;
+ gpmc,sync-write;
+ gpmc,burst-length = <16>;
+ gpmc,burst-read;
+ gpmc,burst-wrap;
+ gpmc,burst-write;
+ gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
+ gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
+ gpmc,cs-on-ns = <0>;
+ gpmc,cs-rd-off-ns = <87>;
+ gpmc,cs-wr-off-ns = <87>;
+ gpmc,adv-on-ns = <0>;
+ gpmc,adv-rd-off-ns = <10>;
+ gpmc,adv-wr-off-ns = <10>;
+ gpmc,oe-on-ns = <15>;
+ gpmc,oe-off-ns = <87>;
+ gpmc,we-on-ns = <0>;
+ gpmc,we-off-ns = <87>;
+ gpmc,rd-cycle-ns = <112>;
+ gpmc,wr-cycle-ns = <112>;
+ gpmc,access-ns = <81>;
+ gpmc,page-burst-access-ns = <15>;
+ gpmc,bus-turnaround-ns = <0>;
+ gpmc,cycle2cycle-delay-ns = <0>;
+ gpmc,wait-monitoring-ns = <0>;
+ gpmc,clk-activation-ns = <5>;
+ gpmc,wr-data-mux-bus-ns = <30>;
+ gpmc,wr-access-ns = <81>;
+ gpmc,sync-clk-ps = <15000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index b64cfda8dbb7..49f37084e435 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -155,6 +155,13 @@
compatible = "nokia,n900-ir";
pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
};
+
+ /* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
+ vctcxo: vctcxo {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
};
&omap3_pmx_core {
@@ -162,8 +169,10 @@
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx */
+ OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts */
+ OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts */
OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx */
+ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx */
>;
};
@@ -920,6 +929,8 @@
interrupt-parent = <&gpio2>;
interrupts = <10 IRQ_TYPE_NONE>; /* gpio line 42 */
+
+ clocks = <&vctcxo>;
};
};
@@ -937,9 +948,17 @@
};
&uart2 {
- interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>;
pinctrl-names = "default";
pinctrl-0 = <&uart2_pins>;
+
+ bcm2048: bluetooth {
+ compatible = "brcm,bcm2048-nokia", "nokia,h4p-bluetooth";
+ reset-gpios = <&gpio3 27 GPIO_ACTIVE_LOW>; /* 91 */
+ host-wakeup-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* 101 */
+ bluetooth-wakeup-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; /* 37 */
+ clocks = <&vctcxo>;
+ clock-names = "sysclk";
+ };
};
&uart3 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 5d8c4b4a4205..df3366fa5409 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -58,6 +58,13 @@
pinctrl-0 = <&debug_leds>;
};
};
+
+ /* controlled (enabled/disabled) directly by wl1271 */
+ vctcxo: vctcxo {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <38400000>;
+ };
};
&omap3_pmx_core {
@@ -125,6 +132,15 @@
OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */
>;
};
+
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts */
+ OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts */
+ OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx */
+ OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx */
+ >;
+ };
};
&omap3_pmx_core2 {
@@ -435,3 +451,19 @@
&ssi_port2 {
status = "disabled";
};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+
+ bluetooth {
+ compatible = "ti,wl1271-bluetooth-nokia", "nokia,h4p-bluetooth";
+
+ reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>; /* 26 */
+ host-wakeup-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* 101 */
+ bluetooth-wakeup-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>; /* 37 */
+
+ clocks = <&vctcxo>;
+ clock-names = "sysclk";
+ };
+};
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index 834fdf13601f..ac4f8795b756 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -14,7 +14,7 @@
/ {
cpus {
- cpu@0 {
+ cpu: cpu@0 {
/* OMAP343x/OMAP35xx variants OPP1-5 */
operating-points = <
/* kHz uV */
@@ -56,12 +56,16 @@
};
};
- bandgap@48002524 {
+ bandgap: bandgap@48002524 {
reg = <0x48002524 0x4>;
compatible = "ti,omap34xx-bandgap";
#thermal-sensor-cells = <0>;
};
};
+
+ thermal_zones: thermal-zones {
+ #include "omap3-cpu-thermal.dtsi"
+ };
};
&ssi {
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index d1a3e56b50ce..ade31d74c70c 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -19,7 +19,7 @@
cpus {
/* OMAP3630/OMAP37xx 'standard device' variants OPP50 to OPP130 */
- cpu@0 {
+ cpu: cpu@0 {
operating-points = <
/* kHz uV */
300000 1012500
@@ -88,12 +88,16 @@
};
};
- bandgap@48002524 {
+ bandgap: bandgap@48002524 {
reg = <0x48002524 0x4>;
compatible = "ti,omap36xx-bandgap";
#thermal-sensor-cells = <0>;
};
};
+
+ thermal_zones: thermal-zones {
+ #include "omap3-cpu-thermal.dtsi"
+ };
};
/* OMAP3630 needs dss_96m_fck for VENC */
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index f3ccb4ceed9e..89eb607f4a9e 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -5,7 +5,9 @@
*/
/dts-v1/;
+#include <dt-bindings/input/input.h>
#include "omap443x.dtsi"
+#include "motorola-cpcap-mapphone.dtsi"
/ {
model = "Motorola Droid 4 XT894";
@@ -15,35 +17,76 @@
stdout-path = &uart3;
};
+ aliases {
+ display0 = &lcd0;
+ display1 = &hdmi0;
+ };
+
/*
* We seem to have only 1021 MB accessible, 1021 - 1022 is locked,
- * then 1023 - 1024 seems to contain mbm. For SRAM, see the notes
- * below about SRAM and L3_ICLK2 being unused by default,
+ * then 1023 - 1024 seems to contain mbm.
*/
memory {
device_type = "memory";
reg = <0x80000000 0x3fd00000>; /* 1021 MB */
};
- /* CPCAP really supports 1650000 to 3400000 range */
- vmmc: regulator-mmc {
+ /* Poweroff GPIO probably connected to CPCAP */
+ gpio-poweroff {
+ compatible = "gpio-poweroff";
+ pinctrl-0 = <&poweroff_gpio>;
+ pinctrl-names = "default";
+ gpios = <&gpio2 18 GPIO_ACTIVE_LOW>; /* gpio50 */
+ };
+
+ hdmi0: connector {
+ compatible = "hdmi-connector";
+ pinctrl-0 = <&hdmi_hpd_gpio>;
+ pinctrl-names = "default";
+ label = "hdmi";
+ type = "d";
+
+ hpd-gpios = <&gpio2 31 GPIO_ACTIVE_HIGH>; /* gpio63 */
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_out>;
+ };
+ };
+ };
+
+ /*
+ * HDMI 5V regulator probably sourced from battery. Let's keep
+ * keep this as always enabled for HDMI to work until we've
+ * figured what the encoder chip is.
+ */
+ hdmi_regulator: regulator-hdmi {
compatible = "regulator-fixed";
- regulator-name = "vmmc";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
+ regulator-name = "hdmi";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio2 27 GPIO_ACTIVE_HIGH>; /* gpio59 */
+ enable-active-high;
regulator-always-on;
};
- /* CPCAP really supports 3000000 to 3100000 range */
- vemmc: regulator-emmc {
+ /* HS USB Host PHY on PORT 1 */
+ hsusb1_phy: hsusb1_phy {
+ compatible = "usb-nop-xceiv";
+ };
+
+ /* LCD regulator from sw5 source */
+ lcd_regulator: regulator-lcd {
compatible = "regulator-fixed";
- regulator-name = "vemmc";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
+ regulator-name = "lcd";
+ regulator-min-microvolt = <5050000>;
+ regulator-max-microvolt = <5050000>;
+ gpio = <&gpio4 0 GPIO_ACTIVE_HIGH>; /* gpio96 */
+ enable-active-high;
+ vin-supply = <&sw5>;
};
- /* CPCAP really supports 1650000 to 1950000 range */
+ /* This is probably coming straight from the battery.. */
wl12xx_vmmc: regulator-wl12xx {
compatible = "regulator-fixed";
regulator-name = "vwl1271";
@@ -53,21 +96,195 @@
startup-delay-us = <70000>;
enable-active-high;
};
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ volume_down {
+ label = "Volume Down";
+ gpios = <&gpio5 26 GPIO_ACTIVE_LOW>; /* gpio154 */
+ linux,code = <KEY_VOLUMEDOWN>;
+ linux,can-disable;
+ };
+
+ slider {
+ label = "Keypad Slide";
+ gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_KEYPAD_SLIDE>;
+ linux,can-disable;
+
+ };
+ };
+};
+
+&dss {
+ status = "okay";
+};
+
+&gpio6 {
+ touchscreen_reset {
+ gpio-hog;
+ gpios = <13 0>;
+ output-high;
+ line-name = "touchscreen-reset";
+ };
+};
+
+&dsi1 {
+ status = "okay";
+ vdd-supply = <&vcsi>;
+
+ port {
+ dsi1_out_ep: endpoint {
+ remote-endpoint = <&lcd0_in>;
+ lanes = <0 1 2 3 4 5>;
+ };
+ };
+
+ lcd0: display {
+ compatible = "panel-dsi-cm";
+ label = "lcd0";
+ vddi-supply = <&lcd_regulator>;
+ reset-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */
+
+ panel-timing {
+ clock-frequency = <0>; /* Calculated by dsi */
+
+ hback-porch = <2>;
+ hactive = <540>;
+ hfront-porch = <0>;
+ hsync-len = <2>;
+
+ vback-porch = <1>;
+ vactive = <960>;
+ vfront-porch = <0>;
+ vsync-len = <1>;
+
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ port {
+ lcd0_in: endpoint {
+ remote-endpoint = <&dsi1_out_ep>;
+ };
+ };
+ };
};
-/* L3_2 interconnect is unused, SRAM, GPMC and L3_ICLK2 disabled */
-&gpmc {
- status = "disabled";
+&hdmi {
+ status = "okay";
+ pinctrl-0 = <&dss_hdmi_pins>;
+ pinctrl-names = "default";
+ vdda-supply = <&vdac>;
+
+ port {
+ hdmi_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ lanes = <1 0 3 2 5 4 7 6>;
+ };
+ };
+};
+
+&i2c1 {
+ tmp105@48 {
+ compatible = "ti,tmp105";
+ reg = <0x48>;
+ pinctrl-0 = <&tmp105_irq>;
+ pinctrl-names = "default";
+ /* kpd_row0.gpio_178 */
+ interrupts-extended = <&gpio6 18 IRQ_TYPE_EDGE_FALLING
+ &omap4_pmx_core 0x14e>;
+ interrupt-names = "irq", "wakeup";
+ wakeup-source;
+ };
+};
+
+&keypad {
+ keypad,num-rows = <8>;
+ keypad,num-columns = <8>;
+ linux,keymap = <
+
+ /* Row 1 */
+ MATRIX_KEY(0, 2, KEY_1)
+ MATRIX_KEY(0, 6, KEY_2)
+ MATRIX_KEY(2, 3, KEY_3)
+ MATRIX_KEY(0, 7, KEY_4)
+ MATRIX_KEY(0, 4, KEY_5)
+ MATRIX_KEY(5, 5, KEY_6)
+ MATRIX_KEY(0, 1, KEY_7)
+ MATRIX_KEY(0, 5, KEY_8)
+ MATRIX_KEY(0, 0, KEY_9)
+ MATRIX_KEY(1, 6, KEY_0)
+
+ /* Row 2 */
+ MATRIX_KEY(3, 4, KEY_APOSTROPHE)
+ MATRIX_KEY(7, 6, KEY_Q)
+ MATRIX_KEY(7, 7, KEY_W)
+ MATRIX_KEY(7, 2, KEY_E)
+ MATRIX_KEY(1, 0, KEY_R)
+ MATRIX_KEY(4, 4, KEY_T)
+ MATRIX_KEY(1, 2, KEY_Y)
+ MATRIX_KEY(6, 7, KEY_U)
+ MATRIX_KEY(2, 2, KEY_I)
+ MATRIX_KEY(5, 6, KEY_O)
+ MATRIX_KEY(3, 7, KEY_P)
+ MATRIX_KEY(6, 5, KEY_BACKSPACE)
+
+ /* Row 3 */
+ MATRIX_KEY(5, 4, KEY_TAB)
+ MATRIX_KEY(5, 7, KEY_A)
+ MATRIX_KEY(2, 7, KEY_S)
+ MATRIX_KEY(7, 0, KEY_D)
+ MATRIX_KEY(2, 6, KEY_F)
+ MATRIX_KEY(6, 2, KEY_G)
+ MATRIX_KEY(6, 6, KEY_H)
+ MATRIX_KEY(1, 4, KEY_J)
+ MATRIX_KEY(3, 1, KEY_K)
+ MATRIX_KEY(2, 1, KEY_L)
+ MATRIX_KEY(4, 6, KEY_ENTER)
+
+ /* Row 4 */
+ MATRIX_KEY(3, 6, KEY_LEFTSHIFT) /* KEY_CAPSLOCK */
+ MATRIX_KEY(6, 1, KEY_Z)
+ MATRIX_KEY(7, 4, KEY_X)
+ MATRIX_KEY(5, 1, KEY_C)
+ MATRIX_KEY(1, 7, KEY_V)
+ MATRIX_KEY(2, 4, KEY_B)
+ MATRIX_KEY(4, 1, KEY_N)
+ MATRIX_KEY(1, 1, KEY_M)
+ MATRIX_KEY(3, 5, KEY_COMMA)
+ MATRIX_KEY(5, 2, KEY_DOT)
+ MATRIX_KEY(6, 3, KEY_UP)
+ MATRIX_KEY(7, 3, KEY_OK)
+
+ /* Row 5 */
+ MATRIX_KEY(2, 5, KEY_LEFTCTRL) /* KEY_LEFTSHIFT */
+ MATRIX_KEY(4, 5, KEY_LEFTALT) /* SYM */
+ MATRIX_KEY(6, 0, KEY_MINUS)
+ MATRIX_KEY(4, 7, KEY_EQUAL)
+ MATRIX_KEY(1, 5, KEY_SPACE)
+ MATRIX_KEY(3, 2, KEY_SLASH)
+ MATRIX_KEY(4, 3, KEY_LEFT)
+ MATRIX_KEY(5, 3, KEY_DOWN)
+ MATRIX_KEY(3, 3, KEY_RIGHT)
+
+ /* Side buttons, KEY_VOLUMEDOWN and KEY_PWER are on CPCAP? */
+ MATRIX_KEY(5, 0, KEY_VOLUMEUP)
+ >;
};
&mmc1 {
- vmmc-supply = <&vmmc>;
+ vmmc-supply = <&vwlan2>;
bus-width = <4>;
- cd-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>; /* gpio106 */
+ cd-gpios = <&gpio6 16 GPIO_ACTIVE_LOW>; /* gpio176 */
};
&mmc2 {
- vmmc-supply = <&vemmc>;
+ vmmc-supply = <&vsdio>;
bus-width = <8>;
non-removable;
};
@@ -93,12 +310,78 @@
};
};
-/* L3_2 interconnect is unused, SRAM, GPMC and L3_ICLK2 disabled */
-&ocmcram {
- status = "disabled";
+&i2c1 {
+ lm3532@38 {
+ compatible = "ti,lm3532";
+ reg = <0x38>;
+
+ enable-gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+
+ backlight {
+ compatible = "ti,lm3532-backlight";
+
+ lcd {
+ led-sources = <0 1 2>;
+ ramp-up-msec = <1>;
+ ramp-down-msec = <0>;
+ };
+ };
+ };
+};
+
+/*
+ * REVISIT: Add gpio173 reset pin handling to the driver, see gpio-hog above.
+ * If the GPIO reset is used, we probably need to have /lib/firmware/maxtouch.fw
+ * available. See "mxt-app" and "droid4-touchscreen-firmware" tools for more
+ * information.
+ */
+&i2c2 {
+ tsp@4a {
+ compatible = "atmel,maxtouch";
+ reg = <0x4a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ /* gpio_183 with sys_nirq2 pad as wakeup */
+ interrupts-extended = <&gpio6 23 IRQ_TYPE_EDGE_FALLING
+ &omap4_pmx_core 0x160>;
+ interrupt-names = "irq", "wakeup";
+ wakeup-source;
+ };
};
&omap4_pmx_core {
+
+ /* hdmi_hpd.gpio_63 */
+ hdmi_hpd_gpio: pinmux_hdmi_hpd_pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x098, PIN_INPUT | MUX_MODE3)
+ >;
+ };
+
+ /* hdmi_cec.hdmi_cec, hdmi_scl.hdmi_scl, hdmi_sda.hdmi_sda */
+ dss_hdmi_pins: pinmux_dss_hdmi_pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0)
+ OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0)
+ OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0)
+ >;
+ };
+
+ /* gpmc_ncs0.gpio_50 */
+ poweroff_gpio: pinmux_poweroff_pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x074, PIN_OUTPUT_PULLUP | MUX_MODE3)
+ >;
+ };
+
+ /* kpd_row0.gpio_178 */
+ tmp105_irq: pinmux_tmp105_irq {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x18e, PIN_INPUT_PULLUP | MUX_MODE3)
+ >;
+ };
+
usb_gpio_mux_sel1: pinmux_usb_gpio_mux_sel1_pins {
/* gpio_60 */
pinctrl-single,pins = <
@@ -106,6 +389,12 @@
>;
};
+ touchscreen_pins: pinmux_touchscreen_pins {
+ pinctrl-single,pins = <
+ OMAP4_IOPAD(0x1a0, PIN_INPUT_PULLUP | MUX_MODE3)
+ >;
+ };
+
usb_ulpi_pins: pinmux_usb_ulpi_pins {
pinctrl-single,pins = <
OMAP4_IOPAD(0x196, MUX_MODE7)
@@ -180,9 +469,49 @@
&omap4_pmx_core 0x17c>;
};
+&usbhsehci {
+ phys = <&hsusb1_phy>;
+};
+
+&usbhshost {
+ port1-mode = "ohci-phy-4pin-dpdm";
+ port2-mode = "ehci-tll";
+};
+
/* Internal UTMI+ PHY used for OTG, CPCAP ULPI PHY for detection and charger */
&usb_otg_hs {
interface-type = <1>;
mode = <3>;
power = <50>;
};
+
+&i2c4 {
+ ak8975: magnetometer@c {
+ compatible = "asahi-kasei,ak8975";
+ reg = <0x0c>;
+
+ vdd-supply = <&vhvio>;
+
+ interrupt-parent = <&gpio6>;
+ interrupts = <15 IRQ_TYPE_EDGE_RISING>; /* gpio175 */
+
+ rotation-matrix = "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "-1";
+
+ };
+
+ lis3dh: accelerometer@18 {
+ compatible = "st,lis3dh-accel";
+ reg = <0x18>;
+
+ vdd-supply = <&vhvio>;
+
+ interrupt-parent = <&gpio2>;
+ interrupts = <2 IRQ_TYPE_EDGE_BOTH>; /* gpio34 */
+
+ rotation-matrix = "0", "-1", "0",
+ "1", "0", "0",
+ "0", "0", "1";
+ };
+};
diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
index fc6a8610c24c..03c8ad91ddac 100644
--- a/arch/arm/boot/dts/omap443x.dtsi
+++ b/arch/arm/boot/dts/omap443x.dtsi
@@ -71,4 +71,8 @@
};
+&cpu_thermal {
+ coefficients = <0 20000>;
+};
+
/include/ "omap443x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi
index ef66e12e0a67..c43f2a2d0a1e 100644
--- a/arch/arm/boot/dts/omap4460.dtsi
+++ b/arch/arm/boot/dts/omap4460.dtsi
@@ -90,4 +90,8 @@
};
+&cpu_thermal {
+ coefficients = <348 (-9301)>;
+};
+
/include/ "omap446x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 222155ca8ad7..eaff2a5751dd 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -1127,6 +1127,15 @@
&cpu_thermal {
polling-delay = <500>; /* milliseconds */
+ coefficients = <65 (-1791)>;
};
/include/ "omap54xx-clocks.dtsi"
+
+&gpu_thermal {
+ coefficients = <117 (-2992)>;
+};
+
+&core_thermal {
+ coefficients = <0 2000>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index 39d9e6ddefed..2da1413f5720 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -95,17 +95,17 @@
function = "sdc1";
};
clk {
- pins = "gpio167"; /* SDC5 CLK */
+ pins = "gpio167"; /* SDC1 CLK */
drive-strength = <16>;
bias-disable;
};
cmd {
- pins = "gpio168"; /* SDC5 CMD */
+ pins = "gpio168"; /* SDC1 CMD */
drive-strength = <10>;
bias-pull-up;
};
data {
- /* SDC5 D0 to D7 */
+ /* SDC1 D0 to D7 */
pins = "gpio159", "gpio160", "gpio161", "gpio162",
"gpio163", "gpio164", "gpio165", "gpio166";
drive-strength = <10>;
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 91c9a62ae725..747669a62aa8 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -392,6 +392,21 @@
cap-mmc-highspeed;
};
+ sdcc2: sdcc@12140000 {
+ status = "disabled";
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00051180>;
+ reg = <0x12140000 0x8000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC2_CLK>, <&gcc SDC2_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <8>;
+ max-frequency = <48000000>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ };
+
sdcc3: sdcc@12180000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
@@ -408,6 +423,21 @@
no-1-8-v;
};
+ sdcc4: sdcc@121c0000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00051180>;
+ status = "disabled";
+ reg = <0x121c0000 0x8000>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC4_CLK>, <&gcc SDC4_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <4>;
+ max-frequency = <48000000>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ };
+
sdcc5: sdcc@12200000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x00051180>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index 96c853bab8ba..e7c1577d56f4 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -413,14 +413,6 @@
dma-controller@f9944000 {
qcom,controlled-remotely;
};
-
- usb-phy@f9a55000 {
- status = "ok";
- };
-
- usb@f9a55000 {
- status = "ok";
- };
};
&spmi_bus {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index d3e1a61b8671..307bf6a647b3 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -2,8 +2,8 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/reset/qcom,gcc-msm8974.h>
#include "skeleton.dtsi"
/ {
@@ -67,7 +67,7 @@
#size-cells = <0>;
interrupts = <1 9 0xf04>;
- cpu@0 {
+ CPU0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -78,7 +78,7 @@
cpu-idle-states = <&CPU_SPC>;
};
- cpu@1 {
+ CPU1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -89,7 +89,7 @@
cpu-idle-states = <&CPU_SPC>;
};
- cpu@2 {
+ CPU2: cpu@2 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -100,7 +100,7 @@
cpu-idle-states = <&CPU_SPC>;
};
- cpu@3 {
+ CPU3: cpu@3 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -250,6 +250,9 @@
cx-supply = <&pm8841_s2>;
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
memory-region = <&adsp_region>;
qcom,smem-states = <&adsp_smp2p_out 0>;
@@ -695,42 +698,276 @@
qcom,ee = <0>;
};
- usb1_phy: usb-phy@f9a55000 {
- compatible = "qcom,usb-otg-snps";
+ etr@fc322000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0xfc322000 0x1000>;
- reg = <0xf9a55000 0x400>;
- interrupts-extended = <&intc 0 134 0>, <&intc 0 140 0>,
- <&spmi_bus 0 0x9 0 0>;
- interrupt-names = "core_irq", "async_irq", "pmic_id_irq";
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
- vddcx-supply = <&pm8841_s2>;
- v3p3-supply = <&pm8941_l24>;
- v1p8-supply = <&pm8941_l6>;
+ port {
+ etr_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out0>;
+ };
+ };
+ };
- dr_mode = "otg";
- qcom,phy-init-sequence = <0x63 0x81 0xfffffff>;
- qcom,otg-control = <1>;
- qcom,phy-num = <0>;
+ tpiu@fc318000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0xfc318000 0x1000>;
- resets = <&gcc GCC_USB2A_PHY_BCR>, <&gcc GCC_USB_HS_BCR>;
- reset-names = "phy", "link";
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
- clocks = <&gcc GCC_XO_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>,
- <&gcc GCC_USB_HS_AHB_CLK>;
- clock-names = "phy", "core", "iface";
+ port {
+ tpiu_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out1>;
+ };
+ };
+ };
- status = "disabled";
+ replicator@fc31c000 {
+ compatible = "qcom,coresight-replicator1x", "arm,primecell";
+ reg = <0xfc31c000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etr_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ replicator_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out>;
+ };
+ };
+ };
};
- usb@f9a55000 {
- compatible = "qcom,ci-hdrc";
- reg = <0xf9a55000 0x400>;
- dr_mode = "otg";
- interrupts = <0 134 0>, <0 140 0>;
- interrupt-names = "core_irq", "async_irq";
- usb-phy = <&usb1_phy>;
+ etf@fc307000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0xfc307000 0x1000>;
- status = "disabled";
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ etf_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ etf_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&merger_out>;
+ };
+ };
+ };
+ };
+
+ funnel@fc31b000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0xfc31b000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 0 - connected trought funnel to Audio, Modem and
+ * Resource and Power Manager CPU's
+ * 2...7 - not-connected
+ */
+ port@1 {
+ reg = <1>;
+ merger_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ merger_out: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+ };
+ };
+
+ funnel@fc31a000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0xfc31a000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 0 - not-connected
+ * 1 - connected trought funnel to Multimedia CPU
+ * 2 - connected to Wireless CPU
+ * 3 - not-connected
+ * 4 - not-connected
+ * 6 - not-connected
+ * 7 - connected to STM
+ */
+ port@5 {
+ reg = <5>;
+ funnel1_in5: endpoint {
+ slave-mode;
+ remote-endpoint = <&kpss_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ funnel1_out: endpoint {
+ remote-endpoint = <&merger_in1>;
+ };
+ };
+ };
+ };
+
+ funnel@fc345000 { /* KPSS funnel only 4 inputs are used */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0xfc345000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ kpss_in0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ kpss_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ kpss_in2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ kpss_in3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ kpss_out: endpoint {
+ remote-endpoint = <&funnel1_in5>;
+ };
+ };
+ };
+ };
+
+ etm@fc33c000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0xfc33c000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU0>;
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&kpss_in0>;
+ };
+ };
+ };
+
+ etm@fc33d000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0xfc33d000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU1>;
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&kpss_in1>;
+ };
+ };
+ };
+
+ etm@fc33e000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0xfc33e000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU2>;
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&kpss_in2>;
+ };
+ };
+ };
+
+ etm@fc33f000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0xfc33f000 0x1000>;
+
+ clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU3>;
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&kpss_in3>;
+ };
+ };
};
};
@@ -760,6 +997,11 @@
compatible = "qcom,rpm-msm8974";
qcom,smd-channels = "rpm_requests";
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8974", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
pm8841-regulators {
compatible = "qcom,rpm-pm8841-regulators";
diff --git a/arch/arm/boot/dts/r7s72100-genmai.dts b/arch/arm/boot/dts/r7s72100-genmai.dts
index 118a8e2b86bd..52a7b586bac7 100644
--- a/arch/arm/boot/dts/r7s72100-genmai.dts
+++ b/arch/arm/boot/dts/r7s72100-genmai.dts
@@ -44,6 +44,10 @@
clock-frequency = <48000000>;
};
+&rtc_x1_clk {
+ clock-frequency = <32768>;
+};
+
&mtu2 {
status = "okay";
};
@@ -59,6 +63,10 @@
};
};
+&rtc {
+ status = "okay";
+};
+
&scif2 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/r7s72100-rskrza1.dts b/arch/arm/boot/dts/r7s72100-rskrza1.dts
index 02b59c5b3c53..72df20a04320 100644
--- a/arch/arm/boot/dts/r7s72100-rskrza1.dts
+++ b/arch/arm/boot/dts/r7s72100-rskrza1.dts
@@ -43,6 +43,10 @@
clock-frequency = <48000000>;
};
+&rtc_x1_clk {
+ clock-frequency = <32768>;
+};
+
&mtu2 {
status = "okay";
};
@@ -69,6 +73,10 @@
status = "okay";
};
+&rtc {
+ status = "okay";
+};
+
&scif2 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index b8aa256bd515..0423996e4dcc 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -51,6 +51,20 @@
clock-frequency = <0>;
};
+ rtc_x1_clk: rtc_x1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ /* If clk present, value must be set by board to 32678 */
+ clock-frequency = <0>;
+ };
+
+ rtc_x3_clk: rtc_x3 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ /* If clk present, value must be set by board to 4000000 */
+ clock-frequency = <0>;
+ };
+
/* Fixed factor clocks */
b_clk: b {
#clock-cells = <0>;
@@ -117,11 +131,20 @@
clock-output-names = "ostm0", "ostm1";
};
+ mstp6_clks: mstp6_clks@fcfe042c {
+ #clock-cells = <1>;
+ compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
+ reg = <0xfcfe042c 4>;
+ clocks = <&p0_clk>;
+ clock-indices = <R7S72100_CLK_RTC>;
+ clock-output-names = "rtc";
+ };
+
mstp7_clks: mstp7_clks@fcfe0430 {
#clock-cells = <1>;
compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0xfcfe0430 4>;
- clocks = <&p0_clk>;
+ clocks = <&b_clk>;
clock-indices = <R7S72100_CLK_ETHER>;
clock-output-names = "ether";
};
@@ -162,9 +185,12 @@
#clock-cells = <1>;
compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0xfcfe0444 4>;
- clocks = <&p1_clk>, <&p1_clk>;
- clock-indices = <R7S72100_CLK_SDHI1 R7S72100_CLK_SDHI0>;
- clock-output-names = "sdhi1", "sdhi0";
+ clocks = <&p1_clk>, <&p1_clk>, <&p1_clk>, <&p1_clk>;
+ clock-indices = <
+ R7S72100_CLK_SDHI00 R7S72100_CLK_SDHI01
+ R7S72100_CLK_SDHI10 R7S72100_CLK_SDHI11
+ >;
+ clock-output-names = "sdhi00", "sdhi01", "sdhi10", "sdhi11";
};
};
@@ -177,6 +203,7 @@
compatible = "arm,cortex-a9";
reg = <0>;
clock-frequency = <400000000>;
+ next-level-cache = <&L2>;
};
};
@@ -368,6 +395,23 @@
<0xe8202000 0x1000>;
};
+ L2: cache-controller@3ffff000 {
+ compatible = "arm,pl310-cache";
+ reg = <0x3ffff000 0x1000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ arm,early-bresp-disable;
+ arm,full-line-zero-disable;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ wdt: watchdog@fcfe0000 {
+ compatible = "renesas,r7s72100-wdt", "renesas,rza-wdt";
+ reg = <0xfcfe0000 0x6>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&p0_clk>;
+ };
+
i2c0: i2c@fcfee000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -488,7 +532,10 @@
GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH
GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp12_clks R7S72100_CLK_SDHI0>;
+ clocks = <&mstp12_clks R7S72100_CLK_SDHI00>,
+ <&mstp12_clks R7S72100_CLK_SDHI01>;
+ clock-names = "core", "cd";
+ power-domains = <&cpg_clocks>;
cap-sd-highspeed;
cap-sdio-irq;
status = "disabled";
@@ -501,7 +548,10 @@
GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH
GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp12_clks R7S72100_CLK_SDHI1>;
+ clocks = <&mstp12_clks R7S72100_CLK_SDHI10>,
+ <&mstp12_clks R7S72100_CLK_SDHI11>;
+ clock-names = "core", "cd";
+ power-domains = <&cpg_clocks>;
cap-sd-highspeed;
cap-sdio-irq;
status = "disabled";
@@ -524,4 +574,18 @@
power-domains = <&cpg_clocks>;
status = "disabled";
};
+
+ rtc: rtc@fcff1000 {
+ compatible = "renesas,r7s72100-rtc", "renesas,sh-rtc";
+ reg = <0xfcff1000 0x2e>;
+ interrupts = <GIC_SPI 276 IRQ_TYPE_EDGE_RISING
+ GIC_SPI 277 IRQ_TYPE_EDGE_RISING
+ GIC_SPI 278 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm", "period", "carry";
+ clocks = <&mstp6_clks R7S72100_CLK_RTC>, <&rtc_x1_clk>,
+ <&rtc_x3_clk>, <&extal_clk>;
+ clock-names = "fck", "rtc_x1", "rtc_x3", "extal";
+ power-domains = <&cpg_clocks>;
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 00eb9a7114dc..1f5c9f6dddba 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -32,18 +32,16 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
clocks = <&cpg_clocks R8A73A4_CLK_Z>;
power-domains = <&pd_a3sm>;
cache-unified;
cache-level = <2>;
};
- L2_CA7: cache-controller@100 {
+ L2_CA7: cache-controller-1 {
compatible = "cache";
- reg = <0x100>;
clocks = <&cpg_clocks R8A73A4_CLK_Z2>;
power-domains = <&pd_a3km>;
cache-unified;
@@ -469,6 +467,9 @@
<0 0xf1004000 0 0x2000>,
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A73A4_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&pd_c4>;
};
bsc: bus@fec10000 {
@@ -727,16 +728,18 @@
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&main_div2_clk>, <&main_div2_clk>,
+ clocks = <&main_div2_clk>, <&cpg_clocks R8A73A4_CLK_ZS>,
+ <&main_div2_clk>,
<&cpg_clocks R8A73A4_CLK_HP>,
<&cpg_clocks R8A73A4_CLK_HP>;
#clock-cells = <1>;
clock-indices = <
- R8A73A4_CLK_IRQC R8A73A4_CLK_IIC5
- R8A73A4_CLK_IIC4 R8A73A4_CLK_IIC3
+ R8A73A4_CLK_IRQC R8A73A4_CLK_INTC_SYS
+ R8A73A4_CLK_IIC5 R8A73A4_CLK_IIC4
+ R8A73A4_CLK_IIC3
>;
clock-output-names =
- "irqc", "iic5", "iic4", "iic3";
+ "irqc", "intc-sys", "iic5", "iic4", "iic3";
};
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a73a4-mstp-clocks", "renesas,cpg-mstp-clocks";
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index d8393b97768b..0ddac81742e4 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -32,9 +32,8 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
cache-unified;
cache-level = <2>;
power-domains = <&sysc R8A7743_PD_CA15_SCU>;
@@ -63,6 +62,7 @@
clocks = <&cpg CPG_MOD 408>;
clock-names = "clk";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
};
irqc: interrupt-controller@e61c0000 {
@@ -82,6 +82,7 @@
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 407>;
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
};
timer {
@@ -103,6 +104,7 @@
clock-names = "extal", "usb_extal";
#clock-cells = <2>;
#power-domain-cells = <0>;
+ #reset-cells = <1>;
};
prr: chipid@ff000044 {
@@ -149,6 +151,7 @@
clocks = <&cpg CPG_MOD 219>;
clock-names = "fck";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
#dma-cells = <1>;
dma-channels = <15>;
};
@@ -181,6 +184,7 @@
clocks = <&cpg CPG_MOD 218>;
clock-names = "fck";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
#dma-cells = <1>;
dma-channels = <15>;
};
@@ -196,6 +200,7 @@
<&dmac1 0x21>, <&dmac1 0x22>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
status = "disabled";
};
@@ -210,6 +215,7 @@
<&dmac1 0x25>, <&dmac1 0x26>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
status = "disabled";
};
@@ -224,6 +230,7 @@
<&dmac1 0x27>, <&dmac1 0x28>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
status = "disabled";
};
@@ -238,6 +245,7 @@
<&dmac1 0x1b>, <&dmac1 0x1c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 1106>;
status = "disabled";
};
@@ -252,6 +260,7 @@
<&dmac1 0x1f>, <&dmac1 0x20>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 1107>;
status = "disabled";
};
@@ -266,6 +275,7 @@
<&dmac1 0x23>, <&dmac1 0x24>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 1108>;
status = "disabled";
};
@@ -277,9 +287,10 @@
clocks = <&cpg CPG_MOD 206>;
clock-names = "fck";
dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
- <&dmac1 0x3d>, <&dmac1 0x3e>;
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
status = "disabled";
};
@@ -294,6 +305,7 @@
<&dmac1 0x19>, <&dmac1 0x1a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
status = "disabled";
};
@@ -308,6 +320,7 @@
<&dmac1 0x1d>, <&dmac1 0x1e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 216>;
status = "disabled";
};
@@ -323,6 +336,7 @@
<&dmac1 0x29>, <&dmac1 0x2a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 721>;
status = "disabled";
};
@@ -338,6 +352,7 @@
<&dmac1 0x2d>, <&dmac1 0x2e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 720>;
status = "disabled";
};
@@ -353,6 +368,7 @@
<&dmac1 0x2b>, <&dmac1 0x2c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 719>;
status = "disabled";
};
@@ -368,6 +384,7 @@
<&dmac1 0x2f>, <&dmac1 0x30>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 718>;
status = "disabled";
};
@@ -383,6 +400,7 @@
<&dmac1 0xfb>, <&dmac1 0xfc>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 715>;
status = "disabled";
};
@@ -398,6 +416,7 @@
<&dmac1 0xfd>, <&dmac1 0xfe>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 714>;
status = "disabled";
};
@@ -413,6 +432,7 @@
<&dmac1 0x39>, <&dmac1 0x3a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
status = "disabled";
};
@@ -428,6 +448,7 @@
<&dmac1 0x4d>, <&dmac1 0x4e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
status = "disabled";
};
@@ -443,6 +464,7 @@
<&dmac1 0x3b>, <&dmac1 0x3c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 713>;
status = "disabled";
};
@@ -452,6 +474,7 @@
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 813>;
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 813>;
phy-mode = "rmii";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index 1f65ff68a469..2feb0084bb3b 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -32,9 +32,8 @@
next-level-cache = <&L2_CA7>;
};
- L2_CA7: cache-controller@0 {
+ L2_CA7: cache-controller-0 {
compatible = "cache";
- reg = <0>;
cache-unified;
cache-level = <2>;
power-domains = <&sysc R8A7745_PD_CA7_SCU>;
@@ -63,6 +62,7 @@
clocks = <&cpg CPG_MOD 408>;
clock-names = "clk";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
};
irqc: interrupt-controller@e61c0000 {
@@ -82,6 +82,7 @@
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 407>;
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
};
timer {
@@ -103,6 +104,7 @@
clock-names = "extal", "usb_extal";
#clock-cells = <2>;
#power-domain-cells = <0>;
+ #reset-cells = <1>;
};
prr: chipid@ff000044 {
@@ -149,6 +151,7 @@
clocks = <&cpg CPG_MOD 219>;
clock-names = "fck";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
#dma-cells = <1>;
dma-channels = <15>;
};
@@ -181,6 +184,7 @@
clocks = <&cpg CPG_MOD 218>;
clock-names = "fck";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
#dma-cells = <1>;
dma-channels = <15>;
};
@@ -196,6 +200,7 @@
<&dmac1 0x21>, <&dmac1 0x22>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
status = "disabled";
};
@@ -210,6 +215,7 @@
<&dmac1 0x25>, <&dmac1 0x26>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
status = "disabled";
};
@@ -224,6 +230,7 @@
<&dmac1 0x27>, <&dmac1 0x28>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
status = "disabled";
};
@@ -238,6 +245,7 @@
<&dmac1 0x1b>, <&dmac1 0x1c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 1106>;
status = "disabled";
};
@@ -252,6 +260,7 @@
<&dmac1 0x1f>, <&dmac1 0x20>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 1107>;
status = "disabled";
};
@@ -266,6 +275,7 @@
<&dmac1 0x23>, <&dmac1 0x24>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 1108>;
status = "disabled";
};
@@ -277,9 +287,10 @@
clocks = <&cpg CPG_MOD 206>;
clock-names = "fck";
dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
- <&dmac1 0x3d>, <&dmac1 0x3e>;
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
status = "disabled";
};
@@ -294,6 +305,7 @@
<&dmac1 0x19>, <&dmac1 0x1a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
status = "disabled";
};
@@ -308,6 +320,7 @@
<&dmac1 0x1d>, <&dmac1 0x1e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 216>;
status = "disabled";
};
@@ -323,6 +336,7 @@
<&dmac1 0x29>, <&dmac1 0x2a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 721>;
status = "disabled";
};
@@ -338,6 +352,7 @@
<&dmac1 0x2d>, <&dmac1 0x2e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 720>;
status = "disabled";
};
@@ -353,6 +368,7 @@
<&dmac1 0x2b>, <&dmac1 0x2c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 719>;
status = "disabled";
};
@@ -368,6 +384,7 @@
<&dmac1 0x2f>, <&dmac1 0x30>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 718>;
status = "disabled";
};
@@ -383,6 +400,7 @@
<&dmac1 0xfb>, <&dmac1 0xfc>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 715>;
status = "disabled";
};
@@ -398,6 +416,7 @@
<&dmac1 0xfd>, <&dmac1 0xfe>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 714>;
status = "disabled";
};
@@ -413,6 +432,7 @@
<&dmac1 0x39>, <&dmac1 0x3a>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
status = "disabled";
};
@@ -428,6 +448,7 @@
<&dmac1 0x4d>, <&dmac1 0x4e>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
status = "disabled";
};
@@ -443,6 +464,7 @@
<&dmac1 0x3b>, <&dmac1 0x3c>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 713>;
status = "disabled";
};
@@ -452,6 +474,7 @@
interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 813>;
power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+ resets = <&cpg 813>;
phy-mode = "rmii";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index 211d239d9041..c79d55eb43c5 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -229,5 +229,4 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index 89c5b24a3d03..9412a86f9b30 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -236,7 +236,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&sdhi0 {
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index bd512c86e852..ba100a6f67ca 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -581,7 +581,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&msiof1 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 6d10450de6d7..99269aaca6fc 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -129,17 +129,15 @@
next-level-cache = <&L2_CA7>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7790_PD_CA15_SCU>;
cache-unified;
cache-level = <2>;
};
- L2_CA7: cache-controller@100 {
+ L2_CA7: cache-controller-1 {
compatible = "cache";
- reg = <0x100>;
power-domains = <&sysc R8A7790_PD_CA7_SCU>;
cache-unified;
cache-level = <2>;
@@ -187,6 +185,9 @@
<0 0xf1004000 0 0x2000>,
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A7790_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
};
gpio0: gpio@e6050000 {
@@ -1100,7 +1101,7 @@
};
/* External CAN clock */
- can_clk: can_clk {
+ can_clk: can {
compatible = "fixed-clock";
#clock-cells = <0>;
/* This value must be overridden by the board. */
@@ -1366,10 +1367,10 @@
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&cp_clk>;
+ clocks = <&cp_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7790_CLK_IRQC>;
- clock-output-names = "irqc";
+ clock-indices = <R8A7790_CLK_IRQC R8A7790_CLK_INTC_SYS>;
+ clock-output-names = "irqc", "intc-sys";
};
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1442,8 +1443,11 @@
compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
@@ -1740,11 +1744,11 @@
rcar_sound,dvc {
dvc0: dvc-0 {
- dmas = <&audma0 0xbc>;
+ dmas = <&audma1 0xbc>;
dma-names = "tx";
};
dvc1: dvc-1 {
- dmas = <&audma0 0xbe>;
+ dmas = <&audma1 0xbe>;
dma-names = "tx";
};
};
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 5405d337d744..001e6116c47c 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -292,7 +292,7 @@
x2_clk: x2-clock {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <148500000>;
+ clock-frequency = <74250000>;
};
x13_clk: x13-clock {
@@ -516,7 +516,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&sdhi0 {
@@ -767,7 +766,6 @@
&pcie_bus_clk {
clock-frequency = <100000000>;
- status = "okay";
};
&pciec {
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6761d11d3f9e..95da5cb9d37a 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -226,7 +226,7 @@
phy-handle = <&phy1>;
renesas,ether-link-active-low;
- status = "ok";
+ status = "okay";
phy1: ethernet-phy@1 {
reg = <1>;
@@ -359,7 +359,7 @@
/* composite video input */
&vin0 {
- status = "ok";
+ status = "okay";
pinctrl-0 = <&vin0_pins>;
pinctrl-names = "default";
@@ -401,7 +401,6 @@
&pcie_bus_clk {
clock-frequency = <100000000>;
- status = "okay";
};
&pciec {
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 9f9e48511836..4d0c2ce59900 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -74,9 +74,8 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7791_PD_CA15_SCU>;
cache-unified;
cache-level = <2>;
@@ -118,6 +117,9 @@
<0 0xf1004000 0 0x2000>,
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A7791_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
};
gpio0: gpio@e6050000 {
@@ -1124,7 +1126,7 @@
};
/* External CAN clock */
- can_clk: can_clk {
+ can_clk: can {
compatible = "fixed-clock";
#clock-cells = <0>;
/* This value must be overridden by the board. */
@@ -1366,10 +1368,10 @@
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&cp_clk>;
+ clocks = <&cp_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7791_CLK_IRQC>;
- clock-output-names = "irqc";
+ clock-indices = <R8A7791_CLK_IRQC R8A7791_CLK_INTC_SYS>;
+ clock-output-names = "irqc", "intc-sys";
};
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1445,8 +1447,11 @@
compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
@@ -1777,11 +1782,11 @@
rcar_sound,dvc {
dvc0: dvc-0 {
- dmas = <&audma0 0xbc>;
+ dmas = <&audma1 0xbc>;
dma-names = "tx";
};
dvc1: dvc-1 {
- dmas = <&audma0 0xbe>;
+ dmas = <&audma1 0xbe>;
dma-names = "tx";
};
};
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 8ecfda7a004e..0efecb232ee5 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -46,7 +46,7 @@
compatible = "arm,cortex-a15";
reg = <0>;
clock-frequency = <1000000000>;
- clocks = <&cpg_clocks R8A7792_CLK_Z>;
+ clocks = <&z_clk>;
power-domains = <&sysc R8A7792_PD_CA15_CPU0>;
next-level-cache = <&L2_CA15>;
};
@@ -60,9 +60,8 @@
next-level-cache = <&L2_CA15>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
cache-unified;
cache-level = <2>;
power-domains = <&sysc R8A7792_PD_CA15_SCU>;
@@ -93,6 +92,9 @@
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) |
IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A7792_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
};
irqc: interrupt-controller@e61c0000 {
@@ -764,7 +766,7 @@
clocks = <&extal_clk>;
#clock-cells = <1>;
clock-output-names = "main", "pll0", "pll1", "pll3",
- "lb", "qspi", "z";
+ "lb", "qspi";
#power-domain-cells = <0>;
};
@@ -776,6 +778,13 @@
clock-div = <2>;
clock-mult = <1>;
};
+ z_clk: z {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL0>;
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ };
zx_clk: zx {
compatible = "fixed-factor-clock";
clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
@@ -896,10 +905,12 @@
compatible = "renesas,r8a7792-mstp-clocks",
"renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&cp_clk>;
+ clocks = <&cp_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7792_CLK_IRQC>;
- clock-output-names = "irqc";
+ clock-indices = <
+ R8A7792_CLK_IRQC R8A7792_CLK_INTC_SYS
+ >;
+ clock-output-names = "irqc", "intc-sys";
};
mstp7_clks: mstp7_clks@e615014c {
compatible = "renesas,r8a7792-mstp-clocks",
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 92fff07c5e2b..806c93f6ae8b 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -412,7 +412,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&sdhi0 {
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 48ce21c5e8db..4de6041d61f9 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -65,9 +65,8 @@
power-domains = <&sysc R8A7793_PD_CA15_CPU1>;
};
- L2_CA15: cache-controller@0 {
+ L2_CA15: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7793_PD_CA15_SCU>;
cache-unified;
cache-level = <2>;
@@ -109,6 +108,9 @@
<0 0xf1004000 0 0x2000>,
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A7793_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
};
gpio0: gpio@e6050000 {
@@ -1179,10 +1181,12 @@
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&cp_clk>;
+ clocks = <&cp_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7793_CLK_IRQC>;
- clock-output-names = "irqc";
+ clock-indices = <
+ R8A7793_CLK_IRQC R8A7793_CLK_INTC_SYS
+ >;
+ clock-output-names = "irqc", "intc-sys";
};
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1265,8 +1269,11 @@
compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
clocks = <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+ <&mstp10_clks R8A7793_CLK_SSI_ALL>, <&mstp10_clks R8A7793_CLK_SSI_ALL>,
<&p_clk>,
<&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
<&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
@@ -1426,11 +1433,11 @@
rcar_sound,dvc {
dvc0: dvc-0 {
- dmas = <&audma0 0xbc>;
+ dmas = <&audma1 0xbc>;
dma-names = "tx";
};
dvc1: dvc-1 {
- dmas = <&audma0 0xbe>;
+ dmas = <&audma1 0xbe>;
dma-names = "tx";
};
};
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 569e3f0e97a5..f1eea13cdf44 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -168,7 +168,7 @@
status = "okay";
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
- <&mstp7_clks R8A7794_CLK_DU0>,
+ <&mstp7_clks R8A7794_CLK_DU1>,
<&x13_clk>, <&x2_clk>;
clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
@@ -375,7 +375,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&qspi {
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index cf880ac06f4b..4cb5278d104d 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -248,7 +248,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&ether {
@@ -425,7 +424,7 @@
status = "okay";
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
- <&mstp7_clks R8A7794_CLK_DU0>,
+ <&mstp7_clks R8A7794_CLK_DU1>,
<&x2_clk>, <&x3_clk>;
clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 319c1069b7ee..a19b884fb258 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -43,6 +43,7 @@
compatible = "arm,cortex-a7";
reg = <0>;
clock-frequency = <1000000000>;
+ clocks = <&z2_clk>;
power-domains = <&sysc R8A7794_PD_CA7_CPU0>;
next-level-cache = <&L2_CA7>;
};
@@ -56,9 +57,8 @@
next-level-cache = <&L2_CA7>;
};
- L2_CA7: cache-controller@0 {
+ L2_CA7: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7794_PD_CA7_SCU>;
cache-unified;
cache-level = <2>;
@@ -75,6 +75,9 @@
<0 0xf1004000 0 0x2000>,
<0 0xf1006000 0 0x2000>;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&mstp4_clks R8A7794_CLK_INTC_SYS>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
};
gpio0: gpio@e6050000 {
@@ -923,7 +926,7 @@
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
- <&mstp7_clks R8A7794_CLK_DU0>;
+ <&mstp7_clks R8A7794_CLK_DU1>;
clock-names = "du.0", "du.1";
status = "disabled";
@@ -1062,6 +1065,13 @@
clock-div = <2>;
clock-mult = <1>;
};
+ z2_clk: z2 {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7794_CLK_PLL0>;
+ #clock-cells = <0>;
+ clock-div = <1>;
+ clock-mult = <1>;
+ };
zg_clk: zg {
compatible = "fixed-factor-clock";
clocks = <&cpg_clocks R8A7794_CLK_PLL1>;
@@ -1248,10 +1258,10 @@
mstp4_clks: mstp4_clks@e6150140 {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
- clocks = <&cp_clk>;
+ clocks = <&cp_clk>, <&zs_clk>;
#clock-cells = <1>;
- clock-indices = <R8A7794_CLK_IRQC>;
- clock-output-names = "irqc";
+ clock-indices = <R8A7794_CLK_IRQC R8A7794_CLK_INTC_SYS>;
+ clock-output-names = "irqc", "intc-sys";
};
mstp5_clks: mstp5_clks@e6150144 {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1268,19 +1278,21 @@
clocks = <&mp_clk>, <&hp_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
- <&zx_clk>;
+ <&zx_clk>, <&zx_clk>;
#clock-cells = <1>;
clock-indices = <
R8A7794_CLK_EHCI R8A7794_CLK_HSUSB
R8A7794_CLK_HSCIF2 R8A7794_CLK_SCIF5
R8A7794_CLK_SCIF4 R8A7794_CLK_HSCIF1 R8A7794_CLK_HSCIF0
R8A7794_CLK_SCIF3 R8A7794_CLK_SCIF2 R8A7794_CLK_SCIF1
- R8A7794_CLK_SCIF0 R8A7794_CLK_DU0
+ R8A7794_CLK_SCIF0
+ R8A7794_CLK_DU1 R8A7794_CLK_DU0
>;
clock-output-names =
"ehci", "hsusb",
"hscif2", "scif5", "scif4", "hscif1", "hscif0",
- "scif3", "scif2", "scif1", "scif0", "du0";
+ "scif3", "scif2", "scif1", "scif0",
+ "du1", "du0";
};
mstp8_clks: mstp8_clks@e6150990 {
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index ff9b90bfaefd..ec91325d3b6e 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -250,6 +250,8 @@
clock-names = "biu", "ciu";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_MMC0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -262,6 +264,8 @@
clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_SDIO>;
+ reset-names = "reset";
status = "disabled";
};
@@ -286,6 +290,8 @@
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 1aff4ad22fc4..1399bc04ea77 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -545,12 +545,12 @@
};
&global_timer {
- interrupts = <GIC_PPI 11 0xf04>;
+ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
&local_timer {
- interrupts = <GIC_PPI 13 0xf04>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
};
&i2c0 {
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 641607d9ad29..48a0c1cf4301 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -414,6 +414,8 @@
fifo-depth = <0x100>;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index 21326f3e8564..30e93f694ae8 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -68,11 +68,9 @@
compatible = "gpio-leds";
work {
- gpios = <&gpio7 RK_PA4 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio7 RK_PA2 GPIO_ACTIVE_HIGH>;
label = "miqi:green:user";
- linux,default-trigger = "default-on";
- pinctrl-names = "default";
- pinctrl-0 = <&led_ctl>;
+ linux,default-trigger = "timer";
};
};
@@ -363,12 +361,6 @@
};
};
- leds {
- led_ctl: led-ctl {
- rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
-
sdmmc {
/*
* Default drive strength isn't enough to achieve even
diff --git a/arch/arm/boot/dts/rk3288-phycore-rdk.dts b/arch/arm/boot/dts/rk3288-phycore-rdk.dts
new file mode 100644
index 000000000000..3dda79579b51
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-phycore-rdk.dts
@@ -0,0 +1,298 @@
+/*
+ * Device tree file for Phytec PCM-947 carrier board
+ * Copyright (C) 2017 PHYTEC Messtechnik GmbH
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/leds-pca9532.h>
+#include "rk3288-phycore-som.dtsi"
+
+/ {
+ model = "Phytec RK3288 PCM-947";
+ compatible = "phytec,rk3288-pcm-947", "phytec,rk3288-phycore-som", "rockchip,rk3288";
+
+ user_buttons: user-buttons {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_button_pins>;
+
+ button@0 {
+ label = "home";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpio8 0 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
+ };
+
+ button@1 {
+ label = "menu";
+ linux,code = <KEY_MENU>;
+ gpios = <&gpio8 3 GPIO_ACTIVE_HIGH>;
+ wakeup-source;
+ };
+ };
+
+ vcc_host0_5v: usb-host0-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host0_vbus_drv>;
+ regulator-name = "vcc_host0_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&vdd_in_otg_out>;
+ };
+
+ vcc_host1_5v: usb-host1-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio2 0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host1_vbus_drv>;
+ regulator-name = "vcc_host1_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&vdd_in_otg_out>;
+ };
+
+ vcc_otg_5v: usb-otg-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&otg_vbus_drv>;
+ regulator-name = "vcc_otg_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&vdd_in_otg_out>;
+ };
+};
+
+&gmac {
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ touchscreen@44 {
+ compatible = "st,stmpe811";
+ reg = <0x44>;
+ };
+
+ adc@64 {
+ compatible = "maxim,max1037";
+ reg = <0x64>;
+ };
+
+ i2c_rtc: rtc@68 {
+ compatible = "rv4162";
+ reg = <0x68>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_rtc_int>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <10 0>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+
+ i2c_eeprom_cb: eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ /* PCA9533 - 4-bit LED dimmer */
+ leddim: leddimmer@62 {
+ compatible = "nxp,pca9533";
+ reg = <0x62>;
+
+ led1 {
+ label = "red:user1";
+ linux,default-trigger = "none";
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led2 {
+ label = "green:user2";
+ linux,default-trigger = "none";
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led3 {
+ label = "blue:user3";
+ linux,default-trigger = "none";
+ type = <PCA9532_TYPE_LED>;
+ };
+
+ led4 {
+ label = "red:user4";
+ linux,default-trigger = "none";
+ type = <PCA9532_TYPE_LED>;
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&pinctrl {
+ pcfg_pull_up_drv_12ma: pcfg-pull-up-drv-12ma {
+ bias-pull-up;
+ drive-strength = <12>;
+ };
+
+ buttons {
+ user_button_pins: user-button-pins {
+ /* button 1 */
+ rockchip,pins = <8 3 RK_FUNC_GPIO &pcfg_pull_up>,
+ /* button 2 */
+ <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ rv4162 {
+ i2c_rtc_int: i2c-rtc-int {
+ rockchip,pins = <5 10 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sdmmc {
+ /*
+ * Default drive strength isn't enough to achieve even
+ * high-speed mode on pcm-947 board so bump up to 12 mA.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ };
+
+ sdmmc_pwr: sdmmc-pwr {
+ rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ touchscreen {
+ ts_irq_pin: ts-irq-pin {
+ rockchip,pins = <5 15 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb_host {
+ host0_vbus_drv: host0-vbus-drv {
+ rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ host1_vbus_drv: host1-vbus-drv {
+ rockchip,pins = <2 0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb_otg {
+ otg_vbus_drv: otg-vbus-drv {
+ rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ disable-wp;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+ vmmc-supply = <&vdd_io_sd>;
+ vqmmc-supply = <&vdd_io_sd>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host1 {
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
new file mode 100644
index 000000000000..26cd3ad45160
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
@@ -0,0 +1,497 @@
+/*
+ * Device tree file for Phytec phyCORE-RK3288 SoM
+ * Copyright (C) 2017 PHYTEC Messtechnik GmbH
+ * Author: Wadim Egorov <w.egorov@phytec.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/net/ti-dp83867.h>
+#include "rk3288.dtsi"
+
+/ {
+ model = "Phytec RK3288 phyCORE";
+ compatible = "phytec,rk3288-phycore-som", "rockchip,rk3288";
+
+ /*
+ * Set the minimum memory size here and
+ * let the bootloader set the real size.
+ */
+ memory {
+ device_type = "memory";
+ reg = <0 0x8000000>;
+ };
+
+ aliases {
+ rtc0 = &i2c_rtc;
+ rtc1 = &rk818;
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ };
+
+ leds: user-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_led>;
+
+ user {
+ label = "green_led";
+ gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "keep";
+ };
+ };
+
+ vdd_emmc_io: vdd-emmc-io {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_emmc_io";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vdd_3v3_io>;
+ };
+
+ vdd_in_otg_out: vdd-in-otg-out {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_in_otg_out";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vdd_misc_1v8: vdd-misc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_misc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
+
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+ operating-points = <
+ /* KHz uV */
+ 1800000 1400000
+ 1608000 1350000
+ 1512000 1300000
+ 1416000 1200000
+ 1200000 1100000
+ 1008000 1050000
+ 816000 1000000
+ 696000 950000
+ 600000 900000
+ 408000 900000
+ 312000 900000
+ 216000 900000
+ 126000 900000
+ >;
+};
+
+&emmc {
+ status = "okay";
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>;
+ vmmc-supply = <&vdd_3v3_io>;
+ vqmmc-supply = <&vdd_emmc_io>;
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins &phy_rst &phy_int>;
+ phy-handle = <&phy0>;
+ phy-supply = <&vdd_eth_2v5>;
+ phy-mode = "rgmii-id";
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-gpio = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+ tx_delay = <0x0>;
+ rx_delay = <0x0>;
+
+ mdio0 {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ enet-phy-lane-no-swap;
+ };
+ };
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c5>;
+};
+
+&io_domains {
+ status = "okay";
+ sdcard-supply = <&vdd_io_sd>;
+ flash0-supply = <&vdd_emmc_io>;
+ flash1-supply = <&vdd_misc_1v8>;
+ gpio1830-supply = <&vdd_3v3_io>;
+ gpio30-supply = <&vdd_3v3_io>;
+ bb-supply = <&vdd_3v3_io>;
+ dvp-supply = <&vdd_3v3_io>;
+ lcdc-supply = <&vdd_3v3_io>;
+ wifi-supply = <&vdd_3v3_io>;
+ audio-supply = <&vdd_3v3_io>;
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ rk818: pmic@1c {
+ compatible = "rockchip,rk818";
+ reg = <0x1c>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+
+ vcc1-supply = <&vdd_sys>;
+ vcc2-supply = <&vdd_sys>;
+ vcc3-supply = <&vdd_sys>;
+ vcc4-supply = <&vdd_sys>;
+ boost-supply = <&vdd_in_otg_out>;
+ vcc6-supply = <&vdd_sys>;
+ vcc7-supply = <&vdd_misc_1v8>;
+ vcc8-supply = <&vdd_misc_1v8>;
+ vcc9-supply = <&vdd_3v3_io>;
+ vddio-supply = <&vdd_3v3_io>;
+
+ regulators {
+ vdd_log: DCDC_REG1 {
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_3v3_io: DCDC_REG4 {
+ regulator-name = "vdd_3v3_io";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_sys: DCDC_BOOST {
+ regulator-name = "vdd_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <5000000>;
+ };
+ };
+
+ /* vcc9 */
+ vdd_sd: SWITCH_REG {
+ regulator-name = "vdd_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ /* vcc6 */
+ vdd_eth_2v5: LDO_REG2 {
+ regulator-name = "vdd_eth_2v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2500000>;
+ };
+ };
+
+ /* vcc7 */
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ /* vcc8 */
+ vdd_1v8_lcd_ldo: LDO_REG4 {
+ regulator-name = "vdd_1v8_lcd_ldo";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ /* vcc8 */
+ vdd_1v0_lcd: LDO_REG6 {
+ regulator-name = "vdd_1v0_lcd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ /* vcc7 */
+ vdd_1v8_ldo: LDO_REG7 {
+ regulator-name = "vdd_1v8_ldo";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ /* vcc9 */
+ vdd_io_sd: LDO_REG9 {
+ regulator-name = "vdd_io_sd";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+ };
+ };
+
+ /* M24C32-D */
+ i2c_eeprom: eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+
+ vdd_cpu: regulator@60 {
+ compatible = "fcs,fan53555";
+ reg = <0x60>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <300>;
+ regulator-name = "vdd_cpu";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1430000>;
+ regulator-ramp-delay = <8000>;
+ vin-supply = <&vdd_sys>;
+ };
+};
+
+&pinctrl {
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ emmc {
+ /*
+ * We run eMMC at max speed; bump up drive strength.
+ * We also have external pulls, so disable the internal ones.
+ */
+ emmc_clk: emmc-clk {
+ rockchip,pins = <3 18 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ };
+
+ emmc_cmd: emmc-cmd {
+ rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ };
+
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 1 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 2 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 3 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 4 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 5 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 6 RK_FUNC_2 &pcfg_pull_none_12ma>,
+ <3 7 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ };
+ };
+
+ gmac {
+ phy_int: phy-int {
+ rockchip,pins = <4 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_rst: phy-rst {
+ rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+
+ leds {
+ user_led: user-led {
+ rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ /* Pin for switching state between sleep and non-sleep state */
+ pmic_sleep: pmic-sleep {
+ rockchip,pins = <RK_GPIO0 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vdd_1v8_ldo>;
+};
+
+&spi2 {
+ status = "okay";
+
+ serial_flash: flash@0 {
+ compatible = "micron,n25q128a13", "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "okay";
+ };
+};
+
+&tsadc {
+ status = "okay";
+ rockchip,hw-tshut-mode = <0>;
+ rockchip,hw-tshut-polarity = <0>;
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index 1c0bbc9b928b..f0778a46bca9 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -136,7 +136,7 @@
regulator-always-on;
};
- vcc_io: REG2 {
+ vcc_io: vccio_codec: REG2 {
regulator-name = "VCC_IO";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index 96a2e745bb93..a23a94811be8 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -81,11 +81,35 @@
};
};
+ sata_pwr: sata-prw-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 13 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sata_pwr_en>;
+ /* Always turn on the 5V sata power connector */
+ regulator-always-on;
+ regulator-name = "sata_pwr";
+ };
+
spdif_out: spdif-out {
compatible = "linux,spdif-dit";
#sound-dai-cells = <0>;
};
+ sound-i2s {
+ compatible = "rockchip,rk3288-hdmi-analog";
+ pinctrl-names = "default";
+ pinctrl-0 = <&phone_ctl>, <&hp_det>;
+ rockchip,audio-codec = <&es8388>;
+ rockchip,hp-det-gpios = <&gpio7 7 GPIO_ACTIVE_HIGH>;
+ rockchip,hp-en-gpios = <&gpio8 0 GPIO_ACTIVE_HIGH>;
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,model = "I2S";
+ rockchip,routing = "Analog", "LOUT2",
+ "Analog", "ROUT2";
+ };
+
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&hym8563>;
@@ -173,10 +197,28 @@
};
};
+&i2c2 {
+ status = "okay";
+
+ es8388: es8388@10 {
+ compatible = "everest,es8388", "everest,es8328";
+ reg = <0x10>;
+ AVDD-supply = <&vccio_codec>;
+ DVDD-supply = <&vccio_codec>;
+ HPVDD-supply = <&vccio_codec>;
+ PVDD-supply = <&vccio_codec>;
+ clocks = <&cru SCLK_I2S0_OUT>;
+ };
+};
+
&i2c5 {
status = "okay";
};
+&i2s {
+ status = "okay";
+};
+
&pinctrl {
ir {
ir_int: ir-int {
@@ -190,12 +232,28 @@
};
};
+ headphone {
+ hp_det: hp-det {
+ rockchip,pins = <7 7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ phone_ctl: phone-ctl {
+ rockchip,pins = <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
usb {
host_vbus_drv: host-vbus-drv {
rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+ sata {
+ sata_pwr_en: sata-pwr-en {
+ rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
sdmmc {
sdmmc_pwr: sdmmc-pwr {
rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -224,3 +282,7 @@
&usb_host0_ehci {
status = "okay";
};
+
+&usb_host1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-tinker.dts b/arch/arm/boot/dts/rk3288-tinker.dts
new file mode 100644
index 000000000000..f601c78386a9
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-tinker.dts
@@ -0,0 +1,536 @@
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk3288.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Rockchip RK3288 Tinker Board";
+ compatible = "asus,rk3288-tinker", "rockchip,rk3288";
+
+ memory {
+ reg = <0x0 0x80000000>;
+ device_type = "memory";
+ };
+
+ ext_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwrbtn>;
+
+ button@0 {
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ label = "GPIO Key Power";
+ linux,input-type = <1>;
+ wakeup-source;
+ debounce-interval = <100>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ act-led {
+ gpios=<&gpio1 RK_PD0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger="mmc0";
+ };
+
+ heartbeat-led {
+ gpios=<&gpio1 RK_PD1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger="heartbeat";
+ };
+
+ pwr-led {
+ gpios = <&gpio0 RK_PA3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,name = "rockchip,tinker-codec";
+ simple-audio-card,mclk-fs = <512>;
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s>;
+ };
+ };
+
+ vcc_sys: vsys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_sd: sdmmc-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpio7 11 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_pwr>;
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <100000>;
+ vin-supply = <&vcc_io>;
+ };
+};
+
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>;
+ clock_in_out = "input";
+ phy-mode = "rgmii";
+ phy-supply = <&vcc33_lan>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio4 7 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c5>;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "xin32k", "rk808-clkout2";
+ dvs-gpios = <&gpio0 11 GPIO_ACTIVE_HIGH>,
+ <&gpio0 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int &global_pwroff &dvs_1 &dvs_2>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc_sys>;
+ vcc2-supply = <&vcc_sys>;
+ vcc3-supply = <&vcc_sys>;
+ vcc4-supply = <&vcc_sys>;
+ vcc6-supply = <&vcc_sys>;
+ vcc7-supply = <&vcc_sys>;
+ vcc8-supply = <&vcc_io>;
+ vcc9-supply = <&vcc_io>;
+ vcc10-supply = <&vcc_io>;
+ vcc11-supply = <&vcc_sys>;
+ vcc12-supply = <&vcc_io>;
+ vddio-supply = <&vcc_io>;
+
+ regulators {
+ vdd_cpu: DCDC_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-name = "vdd_arm";
+ regulator-ramp-delay = <6000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd_gpu";
+ regulator-ramp-delay = <6000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc_ddr";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_io: DCDC_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_io";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc18_ldo1: LDO_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_ldo1";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc33_mipi: LDO_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc33_mipi";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_10: LDO_REG3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd_10";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc18_codec: LDO_REG4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_codec";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd10_lcd: LDO_REG6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-name = "vdd10_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc_18: LDO_REG7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_18";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc18_lcd: LDO_REG8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18_lcd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc33_sd: SWITCH_REG1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc33_sd";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc33_lan: SWITCH_REG2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vcc33_lan";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c5 {
+ status = "okay";
+};
+
+&i2s {
+ #sound-dai-cells = <0>;
+ status = "okay";
+};
+
+&io_domains {
+ status = "okay";
+
+ sdcard-supply = <&vccio_sd>;
+};
+
+&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ backlight {
+ bl_en: bl-en {
+ rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ buttons {
+ pwrbtn: pwrbtn {
+ rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ eth_phy {
+ eth_phy_pwr: eth-phy-pwr {
+ rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO \
+ &pcfg_pull_up>;
+ };
+
+ dvs_1: dvs-1 {
+ rockchip,pins = <RK_GPIO0 11 RK_FUNC_GPIO \
+ &pcfg_pull_down>;
+ };
+
+ dvs_2: dvs-2 {
+ rockchip,pins = <RK_GPIO0 12 RK_FUNC_GPIO \
+ &pcfg_pull_down>;
+ };
+ };
+
+ sdmmc {
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 \
+ &pcfg_pull_none_drv_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_pwr: sdmmc-pwr {
+ rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ host_vbus_drv: host-vbus-drv {
+ rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pwr_3g: pwr-3g {
+ rockchip,pins = <7 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc18_ldo1>;
+ status ="okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ disable-wp; /* wp not hooked up */
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+ status = "okay";
+ vmmc-supply = <&vcc33_sd>;
+ vqmmc-supply = <&vccio_sd>;
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>; /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-polarity = <1>; /* tshut polarity 0:LOW 1:HIGH */
+ status = "okay";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&uart4 {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host1 {
+ status = "okay";
+};
+
+&usb_otg {
+ status= "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index df8a0dbe9d91..ad5d6022e95f 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -236,6 +236,8 @@
fifo-depth = <0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xff0c0000 0x4000>;
+ resets = <&cru SRST_MMC0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -248,6 +250,8 @@
fifo-depth = <0x100>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xff0d0000 0x4000>;
+ resets = <&cru SRST_SDIO0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -260,6 +264,8 @@
fifo-depth = <0x100>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xff0e0000 0x4000>;
+ resets = <&cru SRST_SDIO1>;
+ reset-names = "reset";
status = "disabled";
};
@@ -272,6 +278,8 @@
fifo-depth = <0x100>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xff0f0000 0x4000>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 0b45811cf28b..4aa6f60d6a22 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -132,14 +132,14 @@
global_timer: global-timer@1013c200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x1013c200 0x20>;
- interrupts = <GIC_PPI 11 0x304>;
+ interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
};
local_timer: local-timer@1013c600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1013c600 0x20>;
- interrupts = <GIC_PPI 13 0x304>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
};
@@ -223,7 +223,11 @@
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
clock-names = "biu", "ciu";
+ dmas = <&dmac2 1>;
+ dma-names = "rx-tx";
fifo-depth = <256>;
+ resets = <&cru SRST_SDMMC>;
+ reset-names = "reset";
status = "disabled";
};
@@ -233,7 +237,11 @@
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>;
clock-names = "biu", "ciu";
+ dmas = <&dmac2 3>;
+ dma-names = "rx-tx";
fifo-depth = <256>;
+ resets = <&cru SRST_SDIO>;
+ reset-names = "reset";
status = "disabled";
};
@@ -243,7 +251,11 @@
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
clock-names = "biu", "ciu";
+ dmas = <&dmac2 4>;
+ dma-names = "rx-tx";
fifo-depth = <256>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi
index 0ccb414cd268..c55cbb3af2c0 100644
--- a/arch/arm/boot/dts/s3c64xx.dtsi
+++ b/arch/arm/boot/dts/s3c64xx.dtsi
@@ -94,13 +94,12 @@
};
watchdog: watchdog@7e004000 {
- compatible = "samsung,s3c2410-wdt";
+ compatible = "samsung,s3c6410-wdt";
reg = <0x7e004000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <26>;
clock-names = "watchdog";
clocks = <&clocks PCLK_WDT>;
- status = "disabled";
};
i2c0: i2c@7f004000 {
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index a853918be43f..726c5d0dbd5b 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -310,7 +310,7 @@
};
watchdog: watchdog@e2700000 {
- compatible = "samsung,s3c2410-wdt";
+ compatible = "samsung,s3c6410-wdt";
reg = <0xe2700000 0x1000>;
interrupt-parent = <&vic0>;
interrupts = <26>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 528b4e9c6d3d..8067c71c3a38 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1305,6 +1305,11 @@
status = "okay";
};
+ sfrbu: sfr@fc05c000 {
+ compatible = "atmel,sama5d2-sfrbu", "syscon";
+ reg = <0xfc05c000 0x20>;
+ };
+
chipid@fc069000 {
compatible = "atmel,sama5d2-chipid";
reg = <0xfc069000 0x8>;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 2c43c4d85dee..b2674bdb8e6a 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -15,7 +15,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/reset/altr,rst-mgr.h>
/ {
@@ -38,13 +37,13 @@
#size-cells = <0>;
enable-method = "altr,socfpga-smp";
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "arm,cortex-a9";
device_type = "cpu";
reg = <1>;
@@ -52,6 +51,15 @@
};
};
+ pmu: pmu@ff111000 {
+ compatible = "arm,cortex-a9-pmu";
+ interrupt-parent = <&intc>;
+ interrupts = <0 176 4>, <0 177 4>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ reg = <0xff111000 0x1000>,
+ <0xff113000 0x1000>;
+ };
+
intc: intc@fffed000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
@@ -145,7 +153,7 @@
compatible = "fixed-clock";
};
- main_pll: main_pll {
+ main_pll: main_pll@40 {
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
@@ -153,7 +161,7 @@
clocks = <&osc1>;
reg = <0x40>;
- mpuclk: mpuclk {
+ mpuclk: mpuclk@48 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
@@ -161,7 +169,7 @@
reg = <0x48>;
};
- mainclk: mainclk {
+ mainclk: mainclk@4c {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
@@ -169,7 +177,7 @@
reg = <0x4C>;
};
- dbg_base_clk: dbg_base_clk {
+ dbg_base_clk: dbg_base_clk@50 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>, <&osc1>;
@@ -177,21 +185,21 @@
reg = <0x50>;
};
- main_qspi_clk: main_qspi_clk {
+ main_qspi_clk: main_qspi_clk@54 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
reg = <0x54>;
};
- main_nand_sdmmc_clk: main_nand_sdmmc_clk {
+ main_nand_sdmmc_clk: main_nand_sdmmc_clk@58 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
reg = <0x58>;
};
- cfg_h2f_usr0_clk: cfg_h2f_usr0_clk {
+ cfg_h2f_usr0_clk: cfg_h2f_usr0_clk@5c {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&main_pll>;
@@ -199,7 +207,7 @@
};
};
- periph_pll: periph_pll {
+ periph_pll: periph_pll@80 {
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
@@ -207,42 +215,42 @@
clocks = <&osc1>, <&osc2>, <&f2s_periph_ref_clk>;
reg = <0x80>;
- emac0_clk: emac0_clk {
+ emac0_clk: emac0_clk@88 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
reg = <0x88>;
};
- emac1_clk: emac1_clk {
+ emac1_clk: emac1_clk@8c {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
reg = <0x8C>;
};
- per_qspi_clk: per_qsi_clk {
+ per_qspi_clk: per_qsi_clk@90 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
reg = <0x90>;
};
- per_nand_mmc_clk: per_nand_mmc_clk {
+ per_nand_mmc_clk: per_nand_mmc_clk@94 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
reg = <0x94>;
};
- per_base_clk: per_base_clk {
+ per_base_clk: per_base_clk@98 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
reg = <0x98>;
};
- h2f_usr1_clk: h2f_usr1_clk {
+ h2f_usr1_clk: h2f_usr1_clk@9c {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&periph_pll>;
@@ -250,7 +258,7 @@
};
};
- sdram_pll: sdram_pll {
+ sdram_pll: sdram_pll@c0 {
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
@@ -258,28 +266,28 @@
clocks = <&osc1>, <&osc2>, <&f2s_sdram_ref_clk>;
reg = <0xC0>;
- ddr_dqs_clk: ddr_dqs_clk {
+ ddr_dqs_clk: ddr_dqs_clk@c8 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&sdram_pll>;
reg = <0xC8>;
};
- ddr_2x_dqs_clk: ddr_2x_dqs_clk {
+ ddr_2x_dqs_clk: ddr_2x_dqs_clk@cc {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&sdram_pll>;
reg = <0xCC>;
};
- ddr_dq_clk: ddr_dq_clk {
+ ddr_dq_clk: ddr_dq_clk@d0 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&sdram_pll>;
reg = <0xD0>;
};
- h2f_usr2_clk: h2f_usr2_clk {
+ h2f_usr2_clk: h2f_usr2_clk@d4 {
#clock-cells = <0>;
compatible = "altr,socfpga-perip-clk";
clocks = <&sdram_pll>;
@@ -678,7 +686,7 @@
status = "disabled";
};
- eccmgr: eccmgr@ffd08140 {
+ eccmgr: eccmgr {
compatible = "altr,socfpga-ecc-manager";
#address-cells = <1>;
#size-cells = <1>;
@@ -879,7 +887,7 @@
dma-names = "tx", "rx";
};
- usbphy0: usbphy@0 {
+ usbphy0: usbphy {
#phy-cells = <0>;
compatible = "usb-nop-xceiv";
status = "okay";
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 6b0b7463f36f..bead79e4b2aa 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "skeleton.dtsi"
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/altr,rst-mgr-a10.h>
@@ -119,7 +118,7 @@
compatible = "fixed-clock";
};
- main_pll: main_pll {
+ main_pll: main_pll@40 {
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
@@ -142,35 +141,35 @@
div-reg = <0x144 0 11>;
};
- main_emaca_clk: main_emaca_clk {
+ main_emaca_clk: main_emaca_clk@68 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x68>;
};
- main_emacb_clk: main_emacb_clk {
+ main_emacb_clk: main_emacb_clk@6c {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x6C>;
};
- main_emac_ptp_clk: main_emac_ptp_clk {
+ main_emac_ptp_clk: main_emac_ptp_clk@70 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x70>;
};
- main_gpio_db_clk: main_gpio_db_clk {
+ main_gpio_db_clk: main_gpio_db_clk@74 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x74>;
};
- main_sdmmc_clk: main_sdmmc_clk {
+ main_sdmmc_clk: main_sdmmc_clk@78 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk"
;
@@ -178,28 +177,28 @@
reg = <0x78>;
};
- main_s2f_usr0_clk: main_s2f_usr0_clk {
+ main_s2f_usr0_clk: main_s2f_usr0_clk@7c {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x7C>;
};
- main_s2f_usr1_clk: main_s2f_usr1_clk {
+ main_s2f_usr1_clk: main_s2f_usr1_clk@80 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x80>;
};
- main_hmc_pll_ref_clk: main_hmc_pll_ref_clk {
+ main_hmc_pll_ref_clk: main_hmc_pll_ref_clk@84 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
reg = <0x84>;
};
- main_periph_ref_clk: main_periph_ref_clk {
+ main_periph_ref_clk: main_periph_ref_clk@9c {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_pll>;
@@ -207,7 +206,7 @@
};
};
- periph_pll: periph_pll {
+ periph_pll: periph_pll@c0 {
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
@@ -230,56 +229,56 @@
div-reg = <0x144 16 11>;
};
- peri_emaca_clk: peri_emaca_clk {
+ peri_emaca_clk: peri_emaca_clk@e8 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xE8>;
};
- peri_emacb_clk: peri_emacb_clk {
+ peri_emacb_clk: peri_emacb_clk@ec {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xEC>;
};
- peri_emac_ptp_clk: peri_emac_ptp_clk {
+ peri_emac_ptp_clk: peri_emac_ptp_clk@f0 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xF0>;
};
- peri_gpio_db_clk: peri_gpio_db_clk {
+ peri_gpio_db_clk: peri_gpio_db_clk@f4 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xF4>;
};
- peri_sdmmc_clk: peri_sdmmc_clk {
+ peri_sdmmc_clk: peri_sdmmc_clk@f8 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xF8>;
};
- peri_s2f_usr0_clk: peri_s2f_usr0_clk {
+ peri_s2f_usr0_clk: peri_s2f_usr0_clk@fc {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0xFC>;
};
- peri_s2f_usr1_clk: peri_s2f_usr1_clk {
+ peri_s2f_usr1_clk: peri_s2f_usr1_clk@100 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
reg = <0x100>;
};
- peri_hmc_pll_ref_clk: peri_hmc_pll_ref_clk {
+ peri_hmc_pll_ref_clk: peri_hmc_pll_ref_clk@104 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&periph_pll>;
@@ -287,7 +286,7 @@
};
};
- mpu_free_clk: mpu_free_clk {
+ mpu_free_clk: mpu_free_clk@60 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_mpu_base_clk>, <&peri_mpu_base_clk>,
@@ -296,7 +295,7 @@
reg = <0x60>;
};
- noc_free_clk: noc_free_clk {
+ noc_free_clk: noc_free_clk@64 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_noc_base_clk>, <&peri_noc_base_clk>,
@@ -305,7 +304,7 @@
reg = <0x64>;
};
- s2f_user1_free_clk: s2f_user1_free_clk {
+ s2f_user1_free_clk: s2f_user1_free_clk@104 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_s2f_usr1_clk>, <&peri_s2f_usr1_clk>,
@@ -314,7 +313,7 @@
reg = <0x104>;
};
- sdmmc_free_clk: sdmmc_free_clk {
+ sdmmc_free_clk: sdmmc_free_clk@f8 {
#clock-cells = <0>;
compatible = "altr,socfpga-a10-perip-clk";
clocks = <&main_sdmmc_clk>, <&peri_sdmmc_clk>,
@@ -649,7 +648,7 @@
reg = <0xffe00000 0x40000>;
};
- eccmgr: eccmgr@ffd06000 {
+ eccmgr: eccmgr {
compatible = "altr,socfpga-a10-ecc-manager";
altr,sysmgr-syscon = <&sysmgr>;
#address-cells = <1>;
@@ -806,7 +805,7 @@
status = "disabled";
};
- usbphy0: usbphy@0 {
+ usbphy0: usbphy {
#phy-cells = <0>;
compatible = "usb-nop-xceiv";
status = "okay";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
index c57e6cea0d83..94e088473823 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
@@ -30,7 +30,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
@@ -121,6 +121,11 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ a10sr_rst: reset-controller {
+ compatible = "altr,a10sr-reset";
+ #reset-cells = <1>;
+ };
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 8672edf9ba4e..aac4feea86f3 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -26,7 +26,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
index 5ecd2ef405e3..7b49395452b6 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
@@ -25,7 +25,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi b/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
index 6ad3b1eb9b86..3c03da6b8b1d 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
+++ b/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
@@ -21,7 +21,7 @@
model = "Aries/DENX MCV";
compatible = "altr,socfpga-cyclone5", "altr,socfpga";
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1 GiB */
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
index e5a98e5696ca..21e397287e29 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
@@ -71,7 +71,6 @@
stmpe_touchscreen {
compatible = "st,stmpe-ts";
- reg = <0>;
ts,sample-time = <4>;
ts,mod-12b = <1>;
ts,ref-sel = <0>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 7ea32c81e720..155829f9eba1 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -26,7 +26,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index a0c90b3bdfd1..a4a555c19d94 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -26,7 +26,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index c3d52f27b21e..53bf99eef66d 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -25,7 +25,7 @@
bootargs = "console=ttyS0,115200";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
@@ -60,18 +60,18 @@
&leds {
compatible = "gpio-leds";
- led@0 {
+ led0 {
label = "led:green:heartbeat";
gpios = <&porta 28 1>;
linux,default-trigger = "heartbeat";
};
- led@1 {
+ led1 {
label = "led:green:D7";
gpios = <&portb 19 1>;
};
- led@2 {
+ led2 {
label = "led:green:D8";
gpios = <&portb 25 1>;
};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
index 5b7e3c27e6e9..8860dd2e242c 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
@@ -28,7 +28,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>;
@@ -121,3 +121,24 @@
&usb1 {
status = "okay";
};
+
+&qspi {
+ status = "okay";
+
+ flash0: n25q512a@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "n25q512a";
+ reg = <0>;
+ spi-max-frequency = <100000000>;
+
+ m25p,fast-read;
+ cdns,page-size = <256>;
+ cdns,block-size = <16>;
+ cdns,read-delay = <4>;
+ cdns,tshsl-ns = <50>;
+ cdns,tsd2d-ns = <50>;
+ cdns,tchsh-ns = <4>;
+ cdns,tslch-ns = <4>;
+ };
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index 363ee62457fe..893198049397 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -57,7 +57,7 @@
bootargs = "console=ttyS0,115200";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1GB */
diff --git a/arch/arm/boot/dts/socfpga_vt.dts b/arch/arm/boot/dts/socfpga_vt.dts
index f9345e02ca49..dfe2193cd4d5 100644
--- a/arch/arm/boot/dts/socfpga_vt.dts
+++ b/arch/arm/boot/dts/socfpga_vt.dts
@@ -26,7 +26,7 @@
bootargs = "console=ttyS0,57600";
};
- memory {
+ memory@0 {
name = "memory";
device_type = "memory";
reg = <0x0 0x40000000>; /* 1 GB */
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index d865a891776d..c67e76c8ba5d 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -22,95 +22,91 @@
device_type = "memory";
reg = <0 0x10000000>;
};
+};
- ahb {
- clcd@fc200000 {
- status = "okay";
- };
+&clcd {
+ status = "okay";
+};
- dma@fc400000 {
- status = "okay";
- };
+&dmac {
+ status = "okay";
+};
- ehci@e1800000 {
- status = "okay";
- };
+&ehci_usb0 {
+ status = "okay";
+};
- ehci@e2000000 {
- status = "okay";
- };
+&ehci_usb1 {
+ status = "okay";
+};
- gmac: ethernet@e0800000 {
- phy-mode = "gmii";
- status = "okay";
- };
+&gmac {
+ phy-mode = "gmii";
+ status = "okay";
+};
- ohci@e1900000 {
- status = "okay";
- };
+&ohci_usb0 {
+ status = "okay";
+};
- ohci@e2100000 {
- status = "okay";
- };
+&ohci_usb1 {
+ status = "okay";
+};
- smi: flash@fc000000 {
- status = "okay";
- clock-rate=<50000000>;
-
- flash@f8000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0xf8000000 0x800000>;
- st,smi-fast-mode;
-
- partition@0 {
- label = "xloader";
- reg = <0x0 0x10000>;
- };
- partition@10000 {
- label = "u-boot";
- reg = <0x10000 0x50000>;
- };
- partition@60000 {
- label = "environment";
- reg = <0x60000 0x10000>;
- };
- partition@70000 {
- label = "dtb";
- reg = <0x70000 0x10000>;
- };
- partition@80000 {
- label = "linux";
- reg = <0x80000 0x310000>;
- };
- partition@390000 {
- label = "rootfs";
- reg = <0x390000 0x0>;
- };
- };
- };
+&smi {
+ status = "okay";
+ clock-rate = <50000000>;
- apb {
- serial@d0000000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <>;
- };
+ flash@f8000000 {
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
- serial@d0080000 {
- status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <>;
- };
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
- rtc@fc900000 {
- status = "okay";
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
};
-
- i2c@d0200000 {
- clock-frequency = <400000>;
- status = "okay";
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x50000>;
+ };
+ partition@60000 {
+ label = "environment";
+ reg = <0x60000 0x10000>;
+ };
+ partition@70000 {
+ label = "dtb";
+ reg = <0x70000 0x10000>;
+ };
+ partition@80000 {
+ label = "linux";
+ reg = <0x80000 0x310000>;
+ };
+ partition@390000 {
+ label = "rootfs";
+ reg = <0x390000 0x0>;
};
};
};
};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&i2c {
+ clock-frequency = <400000>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 9f60a7b6a42b..6b32d20acc9f 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -49,7 +49,7 @@
#interrupt-cells = <1>;
};
- clcd@fc200000 {
+ clcd: clcd@fc200000 {
compatible = "arm,pl110", "arm,primecell";
reg = <0xfc200000 0x1000>;
interrupt-parent = <&vic1>;
@@ -57,7 +57,7 @@
status = "disabled";
};
- dma@fc400000 {
+ dmac: dma@fc400000 {
compatible = "arm,pl080", "arm,primecell";
reg = <0xfc400000 0x1000>;
interrupt-parent = <&vic1>;
@@ -97,7 +97,7 @@
status = "disabled";
};
- ehci@e1800000 {
+ ehci_usb0: ehci@e1800000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe1800000 0x1000>;
interrupt-parent = <&vic1>;
@@ -105,7 +105,7 @@
status = "disabled";
};
- ehci@e2000000 {
+ ehci_usb1: ehci@e2000000 {
compatible = "st,spear600-ehci", "usb-ehci";
reg = <0xe2000000 0x1000>;
interrupt-parent = <&vic1>;
@@ -113,7 +113,7 @@
status = "disabled";
};
- ohci@e1900000 {
+ ohci_usb0: ohci@e1900000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe1900000 0x1000>;
interrupt-parent = <&vic1>;
@@ -121,7 +121,7 @@
status = "disabled";
};
- ohci@e2100000 {
+ ohci_usb1: ohci@e2100000 {
compatible = "st,spear600-ohci", "usb-ohci";
reg = <0xe2100000 0x1000>;
interrupt-parent = <&vic1>;
@@ -135,7 +135,7 @@
compatible = "simple-bus";
ranges = <0xd0000000 0xd0000000 0x30000000>;
- serial@d0000000 {
+ uart0: serial@d0000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xd0000000 0x1000>;
interrupt-parent = <&vic0>;
@@ -143,7 +143,7 @@
status = "disabled";
};
- serial@d0080000 {
+ uart1: serial@d0080000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xd0080000 0x1000>;
interrupt-parent = <&vic0>;
@@ -181,7 +181,7 @@
interrupts = <4>;
};
- i2c@d0200000 {
+ i2c: i2c@d0200000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,designware-i2c";
@@ -191,7 +191,7 @@
status = "disabled";
};
- rtc@fc900000 {
+ rtc: rtc@fc900000 {
compatible = "st,spear600-rtc";
reg = <0xfc900000 0x1000>;
interrupts = <10>;
@@ -204,6 +204,14 @@
interrupt-parent = <&vic0>;
interrupts = <16>;
};
+
+ adc: adc@d820b000 {
+ compatible = "st,spear600-adc";
+ reg = <0xd820b000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <6>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index d753ac36788f..12c0757594d7 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -464,6 +464,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -476,6 +478,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi2_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -488,6 +492,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi3_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -500,6 +506,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi4_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -513,6 +521,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi10_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -525,6 +535,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi11_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -537,6 +549,8 @@
clock-names = "ssc";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi12_default>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -742,18 +756,6 @@
<&clk_s_c0_flexgen CLK_ETH_PHY>;
};
- cec: sti-cec@094a087c {
- compatible = "st,stih-cec";
- reg = <0x94a087c 0x64>;
- clocks = <&clk_sysin>;
- clock-names = "cec-clk";
- interrupts = <GIC_SPI 140 IRQ_TYPE_NONE>;
- interrupt-names = "cec-irq";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_cec0_default>;
- resets = <&softreset STIH407_LPM_SOFTRESET>;
- };
-
rng10: rng@08a89000 {
compatible = "st,rng";
reg = <0x08a89000 0x1000>;
@@ -801,7 +803,7 @@
status = "okay";
};
- st231_gp0: remote-processor {
+ st231_gp0: st231-gp0@0 {
compatible = "st,st231-rproc";
memory-region = <&gp0_reserved>;
resets = <&softreset STIH407_ST231_GP0_SOFTRESET>;
@@ -814,7 +816,7 @@
mboxes = <&mailbox0 0 2>, <&mailbox2 0 1>, <&mailbox0 0 3>, <&mailbox2 0 0>;
};
- st231_delta: remote-processor {
+ st231_delta: st231-delta@0 {
compatible = "st,st231-rproc";
memory-region = <&delta_reserved>;
resets = <&softreset STIH407_ST231_DMU_SOFTRESET>;
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 3c9672c5b09f..21fe72b183d8 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -281,5 +281,18 @@
<&clk_s_c0_flexgen CLK_ST231_DMU>,
<&clk_s_c0_flexgen CLK_FLASH_PROMIP>;
};
+
+ sti-cec@094a087c {
+ compatible = "st,stih-cec";
+ reg = <0x94a087c 0x64>;
+ clocks = <&clk_sysin>;
+ clock-names = "cec-clk";
+ interrupts = <GIC_SPI 140 IRQ_TYPE_NONE>;
+ interrupt-names = "cec-irq";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cec0_default>;
+ resets = <&softreset STIH407_LPM_SOFTRESET>;
+ hdmi-phandle = <&sti_hdmi>;
+ };
};
};
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 3c99466989b1..b6331146aa02 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -167,6 +167,34 @@
status = "okay";
};
+&timers1 {
+ status = "okay";
+
+ pwm {
+ pinctrl-0 = <&pwm1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+
+ timer@0 {
+ status = "okay";
+ };
+};
+
+&timers3 {
+ status = "okay";
+
+ pwm {
+ pinctrl-0 = <&pwm3_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+
+ timer@2 {
+ status = "okay";
+ };
+};
+
&usart1 {
pinctrl-0 = <&usart1_pins_a>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
index 0dc18a0f0940..69a957963fa8 100644
--- a/arch/arm/boot/dts/stm32746g-eval.dts
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -93,6 +93,10 @@
status = "okay";
};
+&rtc {
+ status = "okay";
+};
+
&usart1 {
pinctrl-0 = <&usart1_pins_a>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 9222b9f37bc0..191fa50e34eb 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -88,6 +88,14 @@
gpios = <&gpioa 0 0>;
};
};
+
+ /* This turns on vbus for otg for host mode (dwc2) */
+ vcc5v_otg: vcc5v-otg-regulator {
+ compatible = "regulator-fixed";
+ gpio = <&gpioc 4 0>;
+ regulator-name = "vcc5_host1";
+ regulator-always-on;
+ };
};
&clk_hse {
@@ -105,3 +113,11 @@
pinctrl-names = "default";
status = "okay";
};
+
+&usbotg_hs {
+ compatible = "st,stm32f4x9-fsotg";
+ dr_mode = "host";
+ pinctrl-0 = <&usbotg_fs_pins_b>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index ee0da970e8ad..b2a2b5c38caa 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -450,6 +450,8 @@
clocks = <&rcc 0 STM32F4_APB2_CLOCK(ADC1)>;
interrupt-parent = <&adc>;
interrupts = <0>;
+ dmas = <&dma2 0 0 0x400 0x0>;
+ dma-names = "rx";
status = "disabled";
};
@@ -460,6 +462,8 @@
clocks = <&rcc 0 STM32F4_APB2_CLOCK(ADC2)>;
interrupt-parent = <&adc>;
interrupts = <1>;
+ dmas = <&dma2 3 1 0x400 0x0>;
+ dma-names = "rx";
status = "disabled";
};
@@ -470,6 +474,8 @@
clocks = <&rcc 0 STM32F4_APB2_CLOCK(ADC3)>;
interrupt-parent = <&adc>;
interrupts = <2>;
+ dmas = <&dma2 1 2 0x400 0x0>;
+ dma-names = "rx";
status = "disabled";
};
};
@@ -666,6 +672,28 @@
};
};
+ usbotg_fs_pins_a: usbotg_fs@0 {
+ pins {
+ pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
+ <STM32F429_PA11_FUNC_OTG_FS_DM>,
+ <STM32F429_PA12_FUNC_OTG_FS_DP>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ usbotg_fs_pins_b: usbotg_fs@1 {
+ pins {
+ pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
+ <STM32F429_PB14_FUNC_OTG_HS_DM>,
+ <STM32F429_PB15_FUNC_OTG_HS_DP>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
usbotg_hs_pins_a: usbotg_hs@0 {
pins {
pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
@@ -805,6 +833,15 @@
status = "disabled";
};
+ usbotg_fs: usb@50000000 {
+ compatible = "st,stm32f4x9-fsotg";
+ reg = <0x50000000 0x40000>;
+ interrupts = <67>;
+ clocks = <&rcc 0 39>;
+ clock-names = "otg";
+ status = "disabled";
+ };
+
rng: rng@50060800 {
compatible = "st,stm32-rng";
reg = <0x50060800 0x400>;
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 0dd56ef574fa..75470c34b92c 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -68,6 +68,15 @@
soc {
dma-ranges = <0xc0000000 0x0 0x10000000>;
};
+
+ /* This turns on vbus for otg for host mode (dwc2) */
+ vcc5v_otg: vcc5v-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpiob 2 0>;
+ regulator-name = "vcc5_host1";
+ regulator-always-on;
+ };
};
&rcc {
@@ -115,3 +124,10 @@
pinctrl-names = "default";
status = "okay";
};
+
+&usbotg_fs {
+ dr_mode = "host";
+ pinctrl-0 = <&usbotg_fs_pins_a>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index 755fb923c07b..c2765ce12e2e 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -43,6 +43,8 @@
#include "skeleton.dtsi"
#include "armv7-m.dtsi"
#include <dt-bindings/pinctrl/stm32f746-pinfunc.h>
+#include <dt-bindings/clock/stm32fx-clock.h>
+#include <dt-bindings/mfd/stm32f7-rcc.h>
/ {
clocks {
@@ -51,6 +53,24 @@
compatible = "fixed-clock";
clock-frequency = <0>;
};
+
+ clk-lse {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ clk-lsi {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ };
+
+ clk_i2s_ckin: clk-i2s-ckin {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <48000000>;
+ };
};
soc {
@@ -58,7 +78,7 @@
compatible = "st,stm32-timer";
reg = <0x40000000 0x400>;
interrupts = <28>;
- clocks = <&rcc 0 128>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM2)>;
status = "disabled";
};
@@ -66,7 +86,7 @@
compatible = "st,stm32-timer";
reg = <0x40000400 0x400>;
interrupts = <29>;
- clocks = <&rcc 0 129>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM3)>;
status = "disabled";
};
@@ -74,7 +94,7 @@
compatible = "st,stm32-timer";
reg = <0x40000800 0x400>;
interrupts = <30>;
- clocks = <&rcc 0 130>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM4)>;
status = "disabled";
};
@@ -82,14 +102,14 @@
compatible = "st,stm32-timer";
reg = <0x40000c00 0x400>;
interrupts = <50>;
- clocks = <&rcc 0 131>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM5)>;
};
timer6: timer@40001000 {
compatible = "st,stm32-timer";
reg = <0x40001000 0x400>;
interrupts = <54>;
- clocks = <&rcc 0 132>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM6)>;
status = "disabled";
};
@@ -97,7 +117,21 @@
compatible = "st,stm32-timer";
reg = <0x40001400 0x400>;
interrupts = <55>;
- clocks = <&rcc 0 133>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(TIM7)>;
+ status = "disabled";
+ };
+
+ rtc: rtc@40002800 {
+ compatible = "st,stm32-rtc";
+ reg = <0x40002800 0x400>;
+ clocks = <&rcc 1 CLK_RTC>;
+ clock-names = "ck_rtc";
+ assigned-clocks = <&rcc 1 CLK_RTC>;
+ assigned-clock-parents = <&rcc 1 CLK_LSE>;
+ interrupt-parent = <&exti>;
+ interrupts = <17 1>;
+ interrupt-names = "alarm";
+ st,syscfg = <&pwrcfg>;
status = "disabled";
};
@@ -105,7 +139,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40004400 0x400>;
interrupts = <38>;
- clocks = <&rcc 0 145>;
+ clocks = <&rcc 1 CLK_USART2>;
status = "disabled";
};
@@ -113,7 +147,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40004800 0x400>;
interrupts = <39>;
- clocks = <&rcc 0 146>;
+ clocks = <&rcc 1 CLK_USART3>;
status = "disabled";
};
@@ -121,7 +155,7 @@
compatible = "st,stm32f7-uart";
reg = <0x40004c00 0x400>;
interrupts = <52>;
- clocks = <&rcc 0 147>;
+ clocks = <&rcc 1 CLK_UART4>;
status = "disabled";
};
@@ -129,7 +163,7 @@
compatible = "st,stm32f7-uart";
reg = <0x40005000 0x400>;
interrupts = <53>;
- clocks = <&rcc 0 148>;
+ clocks = <&rcc 1 CLK_UART5>;
status = "disabled";
};
@@ -137,7 +171,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40007800 0x400>;
interrupts = <82>;
- clocks = <&rcc 0 158>;
+ clocks = <&rcc 1 CLK_UART7>;
status = "disabled";
};
@@ -145,7 +179,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40007c00 0x400>;
interrupts = <83>;
- clocks = <&rcc 0 159>;
+ clocks = <&rcc 1 CLK_UART8>;
status = "disabled";
};
@@ -153,7 +187,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
- clocks = <&rcc 0 164>;
+ clocks = <&rcc 1 CLK_USART1>;
status = "disabled";
};
@@ -161,7 +195,7 @@
compatible = "st,stm32f7-usart", "st,stm32f7-uart";
reg = <0x40011400 0x400>;
interrupts = <71>;
- clocks = <&rcc 0 165>;
+ clocks = <&rcc 1 CLK_USART6>;
status = "disabled";
};
@@ -178,6 +212,11 @@
interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
};
+ pwrcfg: power-config@40007000 {
+ compatible = "syscon";
+ reg = <0x40007000 0x400>;
+ };
+
pin-controller {
#address-cells = <1>;
#size-cells = <1>;
@@ -191,7 +230,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x0 0x400>;
- clocks = <&rcc 0 256>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOA)>;
st,bank-name = "GPIOA";
};
@@ -199,7 +238,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x400 0x400>;
- clocks = <&rcc 0 257>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOB)>;
st,bank-name = "GPIOB";
};
@@ -207,7 +246,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x800 0x400>;
- clocks = <&rcc 0 258>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOC)>;
st,bank-name = "GPIOC";
};
@@ -215,7 +254,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0xc00 0x400>;
- clocks = <&rcc 0 259>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOD)>;
st,bank-name = "GPIOD";
};
@@ -223,7 +262,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x1000 0x400>;
- clocks = <&rcc 0 260>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOE)>;
st,bank-name = "GPIOE";
};
@@ -231,7 +270,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x1400 0x400>;
- clocks = <&rcc 0 261>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOF)>;
st,bank-name = "GPIOF";
};
@@ -239,7 +278,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x1800 0x400>;
- clocks = <&rcc 0 262>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOG)>;
st,bank-name = "GPIOG";
};
@@ -247,7 +286,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x1c00 0x400>;
- clocks = <&rcc 0 263>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOH)>;
st,bank-name = "GPIOH";
};
@@ -255,7 +294,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x2000 0x400>;
- clocks = <&rcc 0 264>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOI)>;
st,bank-name = "GPIOI";
};
@@ -263,7 +302,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x2400 0x400>;
- clocks = <&rcc 0 265>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOJ)>;
st,bank-name = "GPIOJ";
};
@@ -271,7 +310,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x2800 0x400>;
- clocks = <&rcc 0 266>;
+ clocks = <&rcc 0 STM32F7_AHB1_CLOCK(GPIOK)>;
st,bank-name = "GPIOK";
};
@@ -298,9 +337,12 @@
rcc: rcc@40023800 {
#clock-cells = <2>;
- compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
+ compatible = "st,stm32f746-rcc", "st,stm32-rcc";
reg = <0x40023800 0x400>;
- clocks = <&clk_hse>;
+ clocks = <&clk_hse>, <&clk_i2s_ckin>;
+ st,syscfg = <&pwrcfg>;
+ assigned-clocks = <&rcc 1 CLK_HSE_RTC>;
+ assigned-clock-rates = <1000000>;
};
};
};
diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
new file mode 100644
index 000000000000..fcc1e0640233
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/pinctrl/stm32h7-pinfunc.h>
+
+/ {
+ soc {
+ pin-controller {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,stm32h743-pinctrl";
+ ranges = <0 0x58020000 0x3000>;
+ pins-are-numbered;
+
+ gpioa: gpio@58020000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOA";
+ };
+
+ gpiob: gpio@58020400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x400 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOB";
+ };
+
+ gpioc: gpio@58020800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x800 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOC";
+ };
+
+ gpiod: gpio@58020c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0xc00 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOD";
+ };
+
+ gpioe: gpio@58021000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1000 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOE";
+ };
+
+ gpiof: gpio@58021400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1400 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOF";
+ };
+
+ gpiog: gpio@58021800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1800 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOG";
+ };
+
+ gpioh: gpio@58021c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1c00 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOH";
+ };
+
+ gpioi: gpio@58022000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2000 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOI";
+ };
+
+ gpioj: gpio@58022400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2400 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOJ";
+ };
+
+ gpiok: gpio@58022800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2800 0x400>;
+ clocks = <&timer_clk>;
+ st,bank-name = "GPIOK";
+ };
+
+ usart1_pins: usart1@0 {
+ pins1 {
+ pinmux = <STM32H7_PB14_FUNC_USART1_TX>;
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32H7_PB15_FUNC_USART1_RX>;
+ bias-disable;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
new file mode 100644
index 000000000000..46856298ee16
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "skeleton.dtsi"
+#include "armv7-m.dtsi"
+
+/ {
+ clocks {
+ clk_hse: clk-hse {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ };
+
+ timer_clk: timer-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ };
+ };
+
+ soc {
+ usart1: serial@40011000 {
+ compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ reg = <0x40011000 0x400>;
+ interrupts = <37>;
+ status = "disabled";
+ clocks = <&timer_clk>;
+
+ };
+
+ timer5: timer@40000c00 {
+ compatible = "st,stm32-timer";
+ reg = <0x40000c00 0x400>;
+ interrupts = <50>;
+ clocks = <&timer_clk>;
+ };
+ };
+};
+
+&systick {
+ clock-frequency = <250000000>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
new file mode 100644
index 000000000000..c6effbb36e4a
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h743i-eval.dts
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "stm32h743.dtsi"
+#include "stm32h743-pinctrl.dtsi"
+
+/ {
+ model = "STMicroelectronics STM32H743i-EVAL board";
+ compatible = "st,stm32h743i-eval", "st,stm32h743";
+
+ chosen {
+ bootargs = "root=/dev/ram";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ reg = <0xd0000000 0x2000000>;
+ };
+
+ aliases {
+ serial0 = &usart1;
+ };
+};
+
+&clk_hse {
+ clock-frequency = <125000000>;
+};
+
+&usart1 {
+ pinctrl-0 = <&usart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index f3fc27412a67..f2a01fe2bebc 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Mele A1000";
diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index 04e040e6233d..d844938e2aa7 100644
--- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -46,7 +46,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Cubietech Cubieboard";
diff --git a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
index 8317fbfeec4a..aad3bec1cb39 100644
--- a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
+++ b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/pwm/pwm.h>
/ {
diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
index a48b46474417..a1a7282199d5 100644
--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Miniand Hackberry";
diff --git a/arch/arm/boot/dts/sun4i-a10-inet1.dts b/arch/arm/boot/dts/sun4i-a10-inet1.dts
index f3092703a1a6..b8923b92cb36 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet1.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet1.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/pwm/pwm.h>
/ {
diff --git a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
index 4ef2a60a8cd4..4a27eb9102cd 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "iNet-9F Rev 03";
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index fc4d4d49e2e2..308dc1513041 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Jesurun Q5";
diff --git a/arch/arm/boot/dts/sun4i-a10-marsboard.dts b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
index a2885039d5f1..98a5f7258dca 100644
--- a/arch/arm/boot/dts/sun4i-a10-marsboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
@@ -46,7 +46,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "HAOYU Electronics Marsboard A10";
diff --git a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
index af42ebb3a97b..484c57493bd2 100644
--- a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "PineRiver Mini X-Plus";
diff --git a/arch/arm/boot/dts/sun4i-a10-mk802.dts b/arch/arm/boot/dts/sun4i-a10-mk802.dts
index 9c1afd4277d7..2b75745cd246 100644
--- a/arch/arm/boot/dts/sun4i-a10-mk802.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mk802.dts
@@ -44,7 +44,6 @@
#include "sun4i-a10.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "MK802";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index 214a5accfe93..3a2522a9419d 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A10-OLinuXino-LIME";
diff --git a/arch/arm/boot/dts/sun4i-a10-pcduino.dts b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
index b0365d63ba70..83596fd2ccfc 100644
--- a/arch/arm/boot/dts/sun4i-a10-pcduino.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
@@ -47,7 +47,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "LinkSprite pcDuino";
diff --git a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
index bfa6bbdaab27..a68c7cc53b94 100644
--- a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/pwm/pwm.h>
/ {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index ba20b48c0702..b63668ece151 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -47,7 +47,6 @@
#include <dt-bindings/clock/sun4i-a10-pll2.h>
#include <dt-bindings/dma/sun4i-a10.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
interrupt-parent = <&intc>;
@@ -974,6 +973,11 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ can0_pins_a: can0@0 {
+ pins = "PH20", "PH21";
+ function = "can";
+ };
+
emac_pins_a: emac0@0 {
pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
@@ -1283,6 +1287,22 @@
status = "disabled";
};
+ ps20: ps2@01c2a000 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a000 0x400>;
+ interrupts = <62>;
+ clocks = <&apb1_gates 6>;
+ status = "disabled";
+ };
+
+ ps21: ps2@01c2a400 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a400 0x400>;
+ interrupts = <63>;
+ clocks = <&apb1_gates 7>;
+ status = "disabled";
+ };
+
i2c0: i2c@01c2ac00 {
compatible = "allwinner,sun4i-a10-i2c";
reg = <0x01c2ac00 0x400>;
@@ -1313,19 +1333,11 @@
#size-cells = <0>;
};
- ps20: ps2@01c2a000 {
- compatible = "allwinner,sun4i-a10-ps2";
- reg = <0x01c2a000 0x400>;
- interrupts = <62>;
- clocks = <&apb1_gates 6>;
- status = "disabled";
- };
-
- ps21: ps2@01c2a400 {
- compatible = "allwinner,sun4i-a10-ps2";
- reg = <0x01c2a400 0x400>;
- interrupts = <63>;
- clocks = <&apb1_gates 7>;
+ can0: can@01c2bc00 {
+ compatible = "allwinner,sun4i-a10-can";
+ reg = <0x01c2bc00 0x400>;
+ interrupts = <26>;
+ clocks = <&apb1_gates 4>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
index a539b72ce093..c6f742a7e69f 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t003.dts
@@ -44,7 +44,6 @@
#include "sun5i-a10s.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Auxtek t003 A10s hdmi tv-stick";
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
index e1b5e8a446fe..a27c3fa58736 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
@@ -44,7 +44,6 @@
#include "sun5i-a10s.dtsi"
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Auxtek t004 A10s hdmi tv-stick";
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index d8245c6314a7..894f874a5beb 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A10s-Olinuxino Micro";
@@ -83,7 +82,7 @@
&emac {
pinctrl-names = "default";
- pinctrl-0 = <&emac_pins_a>;
+ pinctrl-0 = <&emac_pins_b>;
phy = <&phy1>;
status = "okay";
};
@@ -257,7 +256,7 @@
&uart2 {
pinctrl-names = "default";
- pinctrl-0 = <&uart2_pins_a>;
+ pinctrl-0 = <&uart2_pins_b>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts b/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
index 51371f9b1cf0..262b3669f04d 100644
--- a/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-r7-tv-dongle.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "R7 A10s hdmi tv-stick";
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index 2b8adda0deda..ea3e5655a61b 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "A10s-Wobo i5";
@@ -95,7 +94,7 @@
&emac {
pinctrl-names = "default";
- pinctrl-0 = <&emac_pins_b>;
+ pinctrl-0 = <&emac_pins_a>;
phy = <&phy1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 24b0f5f556f8..1e38ff80366c 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -47,7 +47,6 @@
#include "sun5i.dtsi"
#include <dt-bindings/dma/sun4i-a10.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
interrupt-parent = <&intc>;
@@ -61,7 +60,7 @@
#size-cells = <1>;
ranges;
- framebuffer@0 {
+ framebuffer@2 {
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
@@ -70,45 +69,9 @@
<&ccu CLK_DE_BE>, <&ccu CLK_HDMI>;
status = "disabled";
};
-
- framebuffer@1 {
- compatible = "allwinner,simple-framebuffer",
- "simple-framebuffer";
- allwinner,pipeline = "de_be0-lcd0";
- clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_TCON_CH0>, <&ccu CLK_DRAM_DE_BE>;
- status = "disabled";
- };
-
- framebuffer@2 {
- compatible = "allwinner,simple-framebuffer",
- "simple-framebuffer";
- allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&ccu CLK_AHB_TVE>, <&ccu CLK_AHB_LCD>,
- <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_TCON_CH1>, <&ccu CLK_DRAM_DE_BE>;
- status = "disabled";
- };
};
soc@01c00000 {
- emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-a10-emac";
- reg = <0x01c0b000 0x1000>;
- interrupts = <55>;
- clocks = <&ccu CLK_AHB_EMAC>;
- allwinner,sram = <&emac_sram 1>;
- status = "disabled";
- };
-
- mdio: mdio@01c0b080 {
- compatible = "allwinner,sun4i-a10-mdio";
- reg = <0x01c0b080 0x14>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
pwm: pwm@01c20e00 {
compatible = "allwinner,sun5i-a10s-pwm";
reg = <0x01c20e00 0xc>;
@@ -116,26 +79,6 @@
#pwm-cells = <3>;
status = "disabled";
};
-
- uart0: serial@01c28000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28000 0x400>;
- interrupts = <1>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_APB1_UART0>;
- status = "disabled";
- };
-
- uart2: serial@01c28800 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28800 0x400>;
- interrupts = <3>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_APB1_UART2>;
- status = "disabled";
- };
};
};
@@ -151,12 +94,12 @@
function = "uart0";
};
- uart2_pins_a: uart2@0 {
+ uart2_pins_b: uart2@1 {
pins = "PC18", "PC19";
function = "uart2";
};
- emac_pins_a: emac0@0 {
+ emac_pins_b: emac0@1 {
pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
"PA7", "PA8", "PA9", "PA10",
@@ -165,15 +108,6 @@
function = "emac";
};
- emac_pins_b: emac0@1 {
- pins = "PD6", "PD7", "PD10",
- "PD11", "PD12", "PD13", "PD14",
- "PD15", "PD18", "PD19", "PD20",
- "PD21", "PD22", "PD23", "PD24",
- "PD25", "PD26", "PD27";
- function = "emac";
- };
-
mmc1_pins_a: mmc1@0 {
pins = "PG3", "PG4", "PG5",
"PG6", "PG7", "PG8";
@@ -193,9 +127,4 @@
};
&sram_a {
- emac_sram: sram-section@8000 {
- compatible = "allwinner,sun4i-a10-sram-a3-a4";
- reg = <0x8000 0x4000>;
- status = "disabled";
- };
};
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
index 42435454acef..34411d27aadf 100644
--- a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/pwm/pwm.h>
/ {
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index 5879a75cf97a..2489c16f7efa 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "HSG H702";
diff --git a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
index 566cda91a66b..bc883893f4a4 100644
--- a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
+++ b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
@@ -50,7 +50,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Lichee Pi One";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 60e393e28783..3a831eaf1dfc 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -46,7 +46,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A13-Olinuxino Micro";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 940d47e88056..95f591bb8ced 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A13-Olinuxino";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index fb2ddb9a04c9..6436bad94404 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -46,27 +46,11 @@
#include "sun5i.dtsi"
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
- chosen {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- framebuffer@0 {
- compatible = "allwinner,simple-framebuffer",
- "simple-framebuffer";
- allwinner,pipeline = "de_be0-lcd0";
- clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_TCON_CH0>, <&ccu CLK_DRAM_DE_BE>;
- status = "disabled";
- };
- };
-
thermal-zones {
cpu_thermal {
/* milliseconds */
@@ -105,44 +89,6 @@
};
soc@01c00000 {
- tcon0: lcd-controller@01c0c000 {
- compatible = "allwinner,sun5i-a13-tcon";
- reg = <0x01c0c000 0x1000>;
- interrupts = <44>;
- resets = <&ccu RST_LCD>;
- reset-names = "lcd";
- clocks = <&ccu CLK_AHB_LCD>,
- <&ccu CLK_TCON_CH0>,
- <&ccu CLK_TCON_CH1>;
- clock-names = "ahb",
- "tcon-ch0",
- "tcon-ch1";
- clock-output-names = "tcon-pixel-clock";
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- tcon0_in_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_out_tcon0>;
- };
- };
-
- tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
- };
- };
- };
-
pwm: pwm@01c20e00 {
compatible = "allwinner,sun5i-a13-pwm";
reg = <0x01c20e00 0xc>;
@@ -151,74 +97,6 @@
status = "disabled";
};
- fe0: display-frontend@01e00000 {
- compatible = "allwinner,sun5i-a13-display-frontend";
- reg = <0x01e00000 0x20000>;
- interrupts = <47>;
- clocks = <&ccu CLK_DE_FE>, <&ccu CLK_DE_FE>,
- <&ccu CLK_DRAM_DE_FE>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&ccu RST_DE_FE>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- fe0_out_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_in_fe0>;
- };
- };
- };
- };
-
- be0: display-backend@01e60000 {
- compatible = "allwinner,sun5i-a13-display-backend";
- reg = <0x01e60000 0x10000>;
- clocks = <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_DRAM_DE_BE>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&ccu RST_DE_BE>;
- status = "disabled";
-
- assigned-clocks = <&ccu CLK_DE_BE>;
- assigned-clock-rates = <300000000>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- be0_in_fe0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&fe0_out_be0>;
- };
- };
-
- be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- be0_out_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_in_be0>;
- };
- };
- };
- };
};
};
@@ -244,22 +122,4 @@
&pio {
compatible = "allwinner,sun5i-a13-pinctrl";
-
- lcd_rgb666_pins: lcd_rgb666@0 {
- pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
- "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
- "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
- "PD24", "PD25", "PD26", "PD27";
- function = "lcd0";
- };
-
- uart1_pins_a: uart1@0 {
- pins = "PE10", "PE11";
- function = "uart1";
- };
-
- uart1_pins_b: uart1@1 {
- pins = "PG3", "PG4";
- function = "uart1";
- };
};
diff --git a/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
index 0cf0813d363a..c55b11a4d3c7 100644
--- a/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
@@ -171,7 +171,7 @@
&pwm {
pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins_a>, <&pwm1_pins>;
+ pinctrl-0 = <&pwm0_pins>, <&pwm1_pins>;
status = "disabled";
};
@@ -220,7 +220,7 @@
&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_a>, <&uart1_cts_rts_pins_a>;
+ pinctrl-0 = <&uart1_pins_b>, <&uart1_cts_rts_pins_a>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 1a845af4d4db..558c16a30543 100644
--- a/arch/arm/boot/dts/sun5i-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -281,7 +281,7 @@
&pwm {
pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins_a>;
+ pinctrl-0 = <&pwm0_pins>;
status = "okay";
};
@@ -332,7 +332,7 @@
&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_a>, <&uart1_cts_rts_pins_a>;
+ pinctrl-0 = <&uart1_pins_b>, <&uart1_cts_rts_pins_a>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
index cb9b2aaf7297..3eb56cad0cea 100644
--- a/arch/arm/boot/dts/sun5i-gr8.dtsi
+++ b/arch/arm/boot/dts/sun5i-gr8.dtsi
@@ -42,429 +42,19 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include "sun5i.dtsi"
+
#include <dt-bindings/clock/sun5i-ccu.h>
#include <dt-bindings/dma/sun4i-a10.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/reset/sun5i-ccu.h>
/ {
- interrupt-parent = <&intc>;
- #address-cells = <1>;
- #size-cells = <1>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a8";
- reg = <0x0>;
- clocks = <&ccu CLK_CPU>;
- };
- };
-
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- osc24M: clk@01c20050 {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "osc24M";
- };
-
- osc32k: clk@0 {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- clock-output-names = "osc32k";
- };
- };
-
display-engine {
compatible = "allwinner,sun5i-a13-display-engine";
allwinner,pipelines = <&fe0>;
};
soc@01c00000 {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- sram-controller@01c00000 {
- compatible = "allwinner,sun4i-a10-sram-controller";
- reg = <0x01c00000 0x30>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- sram_a: sram@00000000 {
- compatible = "mmio-sram";
- reg = <0x00000000 0xc000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x00000000 0xc000>;
- };
-
- sram_d: sram@00010000 {
- compatible = "mmio-sram";
- reg = <0x00010000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x00010000 0x1000>;
-
- otg_sram: sram-section@0000 {
- compatible = "allwinner,sun4i-a10-sram-d";
- reg = <0x0000 0x1000>;
- status = "disabled";
- };
- };
- };
-
- dma: dma-controller@01c02000 {
- compatible = "allwinner,sun4i-a10-dma";
- reg = <0x01c02000 0x1000>;
- interrupts = <27>;
- clocks = <&ccu CLK_AHB_DMA>;
- #dma-cells = <2>;
- };
-
- nfc: nand@01c03000 {
- compatible = "allwinner,sun4i-a10-nand";
- reg = <0x01c03000 0x1000>;
- interrupts = <37>;
- clocks = <&ccu CLK_AHB_NAND>, <&ccu CLK_NAND>;
- clock-names = "ahb", "mod";
- dmas = <&dma SUN4I_DMA_DEDICATED 3>;
- dma-names = "rxtx";
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- spi0: spi@01c05000 {
- compatible = "allwinner,sun4i-a10-spi";
- reg = <0x01c05000 0x1000>;
- interrupts = <10>;
- clocks = <&ccu CLK_AHB_SPI0>, <&ccu CLK_SPI0>;
- clock-names = "ahb", "mod";
- dmas = <&dma SUN4I_DMA_DEDICATED 27>,
- <&dma SUN4I_DMA_DEDICATED 26>;
- dma-names = "rx", "tx";
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- spi1: spi@01c06000 {
- compatible = "allwinner,sun4i-a10-spi";
- reg = <0x01c06000 0x1000>;
- interrupts = <11>;
- clocks = <&ccu CLK_AHB_SPI1>, <&ccu CLK_SPI1>;
- clock-names = "ahb", "mod";
- dmas = <&dma SUN4I_DMA_DEDICATED 9>,
- <&dma SUN4I_DMA_DEDICATED 8>;
- dma-names = "rx", "tx";
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- tve0: tv-encoder@01c0a000 {
- compatible = "allwinner,sun4i-a10-tv-encoder";
- reg = <0x01c0a000 0x1000>;
- clocks = <&ccu CLK_AHB_TVE>;
- resets = <&ccu RST_TVE>;
- status = "disabled";
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tve0_in_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_out_tve0>;
- };
- };
- };
-
- tcon0: lcd-controller@01c0c000 {
- compatible = "allwinner,sun5i-a13-tcon";
- reg = <0x01c0c000 0x1000>;
- interrupts = <44>;
- resets = <&ccu RST_LCD>;
- reset-names = "lcd";
- clocks = <&ccu CLK_AHB_LCD>,
- <&ccu CLK_TCON_CH0>,
- <&ccu CLK_TCON_CH1>;
- clock-names = "ahb",
- "tcon-ch0",
- "tcon-ch1";
- clock-output-names = "tcon-pixel-clock";
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- tcon0_in_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_out_tcon0>;
- };
- };
-
- tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- tcon0_out_tve0: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&tve0_in_tcon0>;
- };
- };
- };
- };
-
- mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun5i-a13-mmc";
- reg = <0x01c0f000 0x1000>;
- clocks = <&ccu CLK_AHB_MMC0>, <&ccu CLK_MMC0>;
- clock-names = "ahb", "mmc";
- interrupts = <32>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mmc1: mmc@01c10000 {
- compatible = "allwinner,sun5i-a13-mmc";
- reg = <0x01c10000 0x1000>;
- clocks = <&ccu CLK_AHB_MMC1>, <&ccu CLK_MMC1>;
- clock-names = "ahb", "mmc";
- interrupts = <33>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mmc2: mmc@01c11000 {
- compatible = "allwinner,sun5i-a13-mmc";
- reg = <0x01c11000 0x1000>;
- clocks = <&ccu CLK_AHB_MMC2>, <&ccu CLK_MMC2>;
- clock-names = "ahb", "mmc";
- interrupts = <34>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- usb_otg: usb@01c13000 {
- compatible = "allwinner,sun4i-a10-musb";
- reg = <0x01c13000 0x0400>;
- clocks = <&ccu CLK_AHB_OTG>;
- interrupts = <38>;
- interrupt-names = "mc";
- phys = <&usbphy 0>;
- phy-names = "usb";
- extcon = <&usbphy 0>;
- allwinner,sram = <&otg_sram 1>;
- status = "disabled";
-
- dr_mode = "otg";
- };
-
- usbphy: phy@01c13400 {
- #phy-cells = <1>;
- compatible = "allwinner,sun5i-a13-usb-phy";
- reg = <0x01c13400 0x10 0x01c14800 0x4>;
- reg-names = "phy_ctrl", "pmu1";
- clocks = <&ccu CLK_USB_PHY0>;
- clock-names = "usb_phy";
- resets = <&ccu RST_USB_PHY0>, <&ccu RST_USB_PHY1>;
- reset-names = "usb0_reset", "usb1_reset";
- status = "disabled";
- };
-
- ehci0: usb@01c14000 {
- compatible = "allwinner,sun5i-a13-ehci", "generic-ehci";
- reg = <0x01c14000 0x100>;
- interrupts = <39>;
- clocks = <&ccu CLK_AHB_EHCI>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ohci0: usb@01c14400 {
- compatible = "allwinner,sun5i-a13-ohci", "generic-ohci";
- reg = <0x01c14400 0x100>;
- interrupts = <40>;
- clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- status = "disabled";
- };
-
- spi2: spi@01c17000 {
- compatible = "allwinner,sun4i-a10-spi";
- reg = <0x01c17000 0x1000>;
- interrupts = <12>;
- clocks = <&ccu CLK_AHB_SPI2>, <&ccu CLK_SPI2>;
- clock-names = "ahb", "mod";
- dmas = <&dma SUN4I_DMA_DEDICATED 29>,
- <&dma SUN4I_DMA_DEDICATED 28>;
- dma-names = "rx", "tx";
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- ccu: clock@01c20000 {
- compatible = "nextthing,gr8-ccu";
- reg = <0x01c20000 0x400>;
- clocks = <&osc24M>, <&osc32k>;
- clock-names = "hosc", "losc";
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- intc: interrupt-controller@01c20400 {
- compatible = "allwinner,sun4i-a10-ic";
- reg = <0x01c20400 0x400>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
- pio: pinctrl@01c20800 {
- compatible = "nextthing,gr8-pinctrl";
- reg = <0x01c20800 0x400>;
- interrupts = <28>;
- clocks = <&ccu CLK_APB0_PIO>;
- gpio-controller;
- interrupt-controller;
- #interrupt-cells = <3>;
- #gpio-cells = <3>;
-
- i2c0_pins_a: i2c0@0 {
- pins = "PB0", "PB1";
- function = "i2c0";
- };
-
- i2c1_pins_a: i2c1@0 {
- pins = "PB15", "PB16";
- function = "i2c1";
- };
-
- i2c2_pins_a: i2c2@0 {
- pins = "PB17", "PB18";
- function = "i2c2";
- };
-
- i2s0_data_pins_a: i2s0-data@0 {
- pins = "PB6", "PB7", "PB8", "PB9";
- function = "i2s0";
- };
-
- i2s0_mclk_pins_a: i2s0-mclk@0 {
- pins = "PB5";
- function = "i2s0";
- };
-
- ir0_rx_pins_a: ir0@0 {
- pins = "PB4";
- function = "ir0";
- };
-
- lcd_rgb666_pins: lcd-rgb666@0 {
- pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
- "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
- "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
- "PD24", "PD25", "PD26", "PD27";
- function = "lcd0";
- };
-
- mmc0_pins_a: mmc0@0 {
- pins = "PF0", "PF1", "PF2", "PF3",
- "PF4", "PF5";
- function = "mmc0";
- drive-strength = <30>;
- };
-
- nand_pins_a: nand-base0@0 {
- pins = "PC0", "PC1", "PC2",
- "PC5", "PC8", "PC9", "PC10",
- "PC11", "PC12", "PC13", "PC14",
- "PC15";
- function = "nand0";
- };
-
- nand_cs0_pins_a: nand-cs@0 {
- pins = "PC4";
- function = "nand0";
- };
-
- nand_rb0_pins_a: nand-rb@0 {
- pins = "PC6";
- function = "nand0";
- };
-
- pwm0_pins_a: pwm0@0 {
- pins = "PB2";
- function = "pwm0";
- };
-
- pwm1_pins: pwm1 {
- pins = "PG13";
- function = "pwm1";
- };
-
- spdif_tx_pins_a: spdif@0 {
- pins = "PB10";
- function = "spdif";
- bias-pull-up;
- };
-
- uart1_pins_a: uart1@1 {
- pins = "PG3", "PG4";
- function = "uart1";
- };
-
- uart1_cts_rts_pins_a: uart1-cts-rts@0 {
- pins = "PG5", "PG6";
- function = "uart1";
- };
-
- uart2_pins_a: uart2@1 {
- pins = "PD2", "PD3";
- function = "uart2";
- };
-
- uart2_cts_rts_pins_a: uart2-cts-rts@0 {
- pins = "PD4", "PD5";
- function = "uart2";
- };
-
- uart3_pins_a: uart3@1 {
- pins = "PG9", "PG10";
- function = "uart3";
- };
-
- uart3_cts_rts_pins_a: uart3-cts-rts@0 {
- pins = "PG11", "PG12";
- function = "uart3";
- };
- };
-
pwm: pwm@01c20e00 {
compatible = "allwinner,sun5i-a10s-pwm";
reg = <0x01c20e00 0xc>;
@@ -473,18 +63,6 @@
status = "disabled";
};
- timer@01c20c00 {
- compatible = "allwinner,sun4i-a10-timer";
- reg = <0x01c20c00 0x90>;
- interrupts = <22>;
- clocks = <&ccu CLK_HOSC>;
- };
-
- wdt: watchdog@01c20c90 {
- compatible = "allwinner,sun4i-a10-wdt";
- reg = <0x01c20c90 0x10>;
- };
-
spdif: spdif@01c21000 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun4i-a10-spdif";
@@ -498,15 +76,6 @@
status = "disabled";
};
- ir0: ir@01c21800 {
- compatible = "allwinner,sun4i-a10-ir";
- clocks = <&ccu CLK_APB0_IR>, <&ccu CLK_IR>;
- clock-names = "apb", "ir";
- interrupts = <5>;
- reg = <0x01c21800 0x40>;
- status = "disabled";
- };
-
i2s0: i2s@01c22400 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun4i-a10-i2s";
@@ -519,168 +88,39 @@
dma-names = "rx", "tx";
status = "disabled";
};
+ };
+};
- lradc: lradc@01c22800 {
- compatible = "allwinner,sun4i-a10-lradc-keys";
- reg = <0x01c22800 0x100>;
- interrupts = <31>;
- status = "disabled";
- };
-
- codec: codec@01c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun4i-a10-codec";
- reg = <0x01c22c00 0x40>;
- interrupts = <30>;
- clocks = <&ccu CLK_APB0_CODEC>, <&ccu CLK_CODEC>;
- clock-names = "apb", "codec";
- dmas = <&dma SUN4I_DMA_NORMAL 19>,
- <&dma SUN4I_DMA_NORMAL 19>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- rtp: rtp@01c25000 {
- compatible = "allwinner,sun5i-a13-ts";
- reg = <0x01c25000 0x100>;
- interrupts = <29>;
- #thermal-sensor-cells = <0>;
- };
-
- uart1: serial@01c28400 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28400 0x400>;
- interrupts = <2>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_APB1_UART1>;
- status = "disabled";
- };
-
- uart2: serial@01c28800 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28800 0x400>;
- interrupts = <3>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_APB1_UART2>;
- status = "disabled";
- };
-
- uart3: serial@01c28c00 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28c00 0x400>;
- interrupts = <4>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_APB1_UART3>;
- status = "disabled";
- };
-
- i2c0: i2c@01c2ac00 {
- compatible = "allwinner,sun4i-a10-i2c";
- reg = <0x01c2ac00 0x400>;
- interrupts = <7>;
- clocks = <&ccu CLK_APB1_I2C0>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c1: i2c@01c2b000 {
- compatible = "allwinner,sun4i-a10-i2c";
- reg = <0x01c2b000 0x400>;
- interrupts = <8>;
- clocks = <&ccu CLK_APB1_I2C1>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c2: i2c@01c2b400 {
- compatible = "allwinner,sun4i-a10-i2c";
- reg = <0x01c2b400 0x400>;
- interrupts = <9>;
- clocks = <&ccu CLK_APB1_I2C2>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- timer@01c60000 {
- compatible = "allwinner,sun5i-a13-hstimer";
- reg = <0x01c60000 0x1000>;
- interrupts = <82>, <83>;
- clocks = <&ccu CLK_AHB_HSTIMER>;
- };
-
- fe0: display-frontend@01e00000 {
- compatible = "allwinner,sun5i-a13-display-frontend";
- reg = <0x01e00000 0x20000>;
- interrupts = <47>;
- clocks = <&ccu CLK_AHB_DE_FE>, <&ccu CLK_DE_FE>,
- <&ccu CLK_DRAM_DE_FE>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&ccu RST_DE_FE>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- fe0_out_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_in_fe0>;
- };
- };
- };
- };
-
- be0: display-backend@01e60000 {
- compatible = "allwinner,sun5i-a13-display-backend";
- reg = <0x01e60000 0x10000>;
- clocks = <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_DRAM_DE_BE>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&ccu RST_DE_BE>;
- status = "disabled";
+&ccu {
+ compatible = "nextthing,gr8-ccu";
+};
- assigned-clocks = <&ccu CLK_DE_BE>;
- assigned-clock-rates = <300000000>;
+&pio {
+ compatible = "nextthing,gr8-pinctrl";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ i2s0_data_pins_a: i2s0-data@0 {
+ pins = "PB6", "PB7", "PB8", "PB9";
+ function = "i2s0";
+ };
- be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
+ i2s0_mclk_pins_a: i2s0-mclk@0 {
+ pins = "PB5";
+ function = "i2s0";
+ };
- be0_in_fe0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&fe0_out_be0>;
- };
- };
+ pwm1_pins: pwm1 {
+ pins = "PG13";
+ function = "pwm1";
+ };
- be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
+ spdif_tx_pins_a: spdif@0 {
+ pins = "PB10";
+ function = "spdif";
+ bias-pull-up;
+ };
- be0_out_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_in_be0>;
- };
- };
- };
- };
+ uart1_cts_rts_pins_a: uart1-cts-rts@0 {
+ pins = "PG5", "PG6";
+ function = "uart1";
};
};
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index e86fa46fdd45..d0785602663b 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -128,6 +128,10 @@
#include "axp209.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins_a>;
@@ -281,7 +285,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins_a>,
- <&uart3_pins_cts_rts_a>;
+ <&uart3_cts_rts_pins_a>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-r8.dtsi b/arch/arm/boot/dts/sun5i-r8.dtsi
index 4c1141396c99..de35dbcd1191 100644
--- a/arch/arm/boot/dts/sun5i-r8.dtsi
+++ b/arch/arm/boot/dts/sun5i-r8.dtsi
@@ -45,43 +45,3 @@
#include "sun5i-a13.dtsi"
-/ {
- chosen {
- framebuffer@1 {
- compatible = "allwinner,simple-framebuffer",
- "simple-framebuffer";
- allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&ccu CLK_AHB_TVE>, <&ccu CLK_AHB_LCD>,
- <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
- <&ccu CLK_TCON_CH1>, <&ccu CLK_DRAM_DE_BE>;
- status = "disabled";
- };
- };
-
- soc@01c00000 {
- tve0: tv-encoder@01c0a000 {
- compatible = "allwinner,sun4i-a10-tv-encoder";
- reg = <0x01c0a000 0x1000>;
- clocks = <&ccu CLK_AHB_TVE>;
- resets = <&ccu RST_TVE>;
- status = "disabled";
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tve0_in_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_out_tve0>;
- };
- };
- };
- };
-};
-
-&tcon0_out {
- tcon0_out_tve0: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&tve0_in_tcon0>;
- };
-};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index a9574a6cd95c..5175f9cc9bed 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -46,7 +46,6 @@
#include <dt-bindings/clock/sun5i-ccu.h>
#include <dt-bindings/dma/sun4i-a10.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/reset/sun5i-ccu.h>
/ {
@@ -64,6 +63,31 @@
};
};
+ chosen {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ framebuffer@0 {
+ compatible = "allwinner,simple-framebuffer",
+ "simple-framebuffer";
+ allwinner,pipeline = "de_be0-lcd0";
+ clocks = <&ccu CLK_AHB_LCD>, <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_TCON_CH0>, <&ccu CLK_DRAM_DE_BE>;
+ status = "disabled";
+ };
+
+ framebuffer@1 {
+ compatible = "allwinner,simple-framebuffer",
+ "simple-framebuffer";
+ allwinner,pipeline = "de_be0-lcd0-tve0";
+ clocks = <&ccu CLK_AHB_TVE>, <&ccu CLK_AHB_LCD>,
+ <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_TCON_CH1>, <&ccu CLK_DRAM_DE_BE>;
+ status = "disabled";
+ };
+ };
+
clocks {
#address-cells = <1>;
#size-cells = <1>;
@@ -105,6 +129,12 @@
ranges = <0 0x00000000 0xc000>;
};
+ emac_sram: sram-section@8000 {
+ compatible = "allwinner,sun4i-a10-sram-a3-a4";
+ reg = <0x8000 0x4000>;
+ status = "disabled";
+ };
+
sram_d: sram@00010000 {
compatible = "mmio-sram";
reg = <0x00010000 0x1000>;
@@ -128,6 +158,19 @@
#dma-cells = <2>;
};
+ nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <37>;
+ clocks = <&ccu CLK_AHB_NAND>, <&ccu CLK_NAND>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 3>;
+ dma-names = "rxtx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
spi0: spi@01c05000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c05000 0x1000>;
@@ -156,6 +199,84 @@
#size-cells = <0>;
};
+ tve0: tv-encoder@01c0a000 {
+ compatible = "allwinner,sun4i-a10-tv-encoder";
+ reg = <0x01c0a000 0x1000>;
+ clocks = <&ccu CLK_AHB_TVE>;
+ resets = <&ccu RST_TVE>;
+ status = "disabled";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tve0_in_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_out_tve0>;
+ };
+ };
+ };
+
+ emac: ethernet@01c0b000 {
+ compatible = "allwinner,sun4i-a10-emac";
+ reg = <0x01c0b000 0x1000>;
+ interrupts = <55>;
+ clocks = <&ccu CLK_AHB_EMAC>;
+ allwinner,sram = <&emac_sram 1>;
+ status = "disabled";
+ };
+
+ mdio: mdio@01c0b080 {
+ compatible = "allwinner,sun4i-a10-mdio";
+ reg = <0x01c0b080 0x14>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ tcon0: lcd-controller@01c0c000 {
+ compatible = "allwinner,sun5i-a13-tcon";
+ reg = <0x01c0c000 0x1000>;
+ interrupts = <44>;
+ resets = <&ccu RST_LCD>;
+ reset-names = "lcd";
+ clocks = <&ccu CLK_AHB_LCD>,
+ <&ccu CLK_TCON_CH0>,
+ <&ccu CLK_TCON_CH1>;
+ clock-names = "ahb",
+ "tcon-ch0",
+ "tcon-ch1";
+ clock-output-names = "tcon-pixel-clock";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tcon0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ tcon0_in_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_out_tcon0>;
+ };
+ };
+
+ tcon0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ tcon0_out_tve0: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&tve0_in_tcon0>;
+ };
+ };
+ };
+ };
+
mmc0: mmc@01c0f000 {
compatible = "allwinner,sun5i-a13-mmc";
reg = <0x01c0f000 0x1000>;
@@ -273,6 +394,15 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ emac_pins_a: emac0@0 {
+ pins = "PD6", "PD7", "PD10",
+ "PD11", "PD12", "PD13", "PD14",
+ "PD15", "PD18", "PD19", "PD20",
+ "PD21", "PD22", "PD23", "PD24",
+ "PD25", "PD26", "PD27";
+ function = "emac";
+ };
+
i2c0_pins_a: i2c0@0 {
pins = "PB0", "PB1";
function = "i2c0";
@@ -288,6 +418,11 @@
function = "i2c2";
};
+ ir0_rx_pins_a: ir0@0 {
+ pins = "PB4";
+ function = "ir0";
+ };
+
lcd_rgb565_pins: lcd_rgb565@0 {
pins = "PD3", "PD4", "PD5", "PD6", "PD7",
"PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
@@ -296,6 +431,14 @@
function = "lcd0";
};
+ lcd_rgb666_pins: lcd_rgb666@0 {
+ pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
+ "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
+ "PD24", "PD25", "PD26", "PD27";
+ function = "lcd0";
+ };
+
mmc0_pins_a: mmc0@0 {
pins = "PF0", "PF1", "PF2", "PF3",
"PF4", "PF5";
@@ -321,6 +464,24 @@
bias-pull-up;
};
+ nand_pins_a: nand-base0@0 {
+ pins = "PC0", "PC1", "PC2",
+ "PC5", "PC8", "PC9", "PC10",
+ "PC11", "PC12", "PC13", "PC14",
+ "PC15";
+ function = "nand0";
+ };
+
+ nand_cs0_pins_a: nand-cs@0 {
+ pins = "PC4";
+ function = "nand0";
+ };
+
+ nand_rb0_pins_a: nand-rb@0 {
+ pins = "PC6";
+ function = "nand0";
+ };
+
spi2_pins_a: spi2@0 {
pins = "PE1", "PE2", "PE3";
function = "spi2";
@@ -331,12 +492,32 @@
function = "spi2";
};
+ uart1_pins_a: uart1@0 {
+ pins = "PE10", "PE11";
+ function = "uart1";
+ };
+
+ uart1_pins_b: uart1@1 {
+ pins = "PG3", "PG4";
+ function = "uart1";
+ };
+
+ uart2_pins_a: uart2@0 {
+ pins = "PD2", "PD3";
+ function = "uart2";
+ };
+
+ uart2_cts_rts_pins_a: uart2-cts-rts@0 {
+ pins = "PD4", "PD5";
+ function = "uart2";
+ };
+
uart3_pins_a: uart3@0 {
pins = "PG9", "PG10";
function = "uart3";
};
- uart3_pins_cts_rts_a: uart3-cts-rts@0 {
+ uart3_cts_rts_pins_a: uart3-cts-rts@0 {
pins = "PG11", "PG12";
function = "uart3";
};
@@ -359,6 +540,15 @@
reg = <0x01c20c90 0x10>;
};
+ ir0: ir@01c21800 {
+ compatible = "allwinner,sun4i-a10-ir";
+ clocks = <&ccu CLK_APB0_IR>, <&ccu CLK_IR>;
+ clock-names = "apb", "ir";
+ interrupts = <5>;
+ reg = <0x01c21800 0x40>;
+ status = "disabled";
+ };
+
lradc: lradc@01c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x100>;
@@ -391,6 +581,16 @@
#thermal-sensor-cells = <0>;
};
+ uart0: serial@01c28000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28000 0x400>;
+ interrupts = <1>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_APB1_UART0>;
+ status = "disabled";
+ };
+
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
@@ -401,6 +601,16 @@
status = "disabled";
};
+ uart2: serial@01c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+ interrupts = <3>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_APB1_UART2>;
+ status = "disabled";
+ };
+
uart3: serial@01c28c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28c00 0x400>;
@@ -447,5 +657,75 @@
interrupts = <82>, <83>;
clocks = <&ccu CLK_AHB_HSTIMER>;
};
+
+ fe0: display-frontend@01e00000 {
+ compatible = "allwinner,sun5i-a13-display-frontend";
+ reg = <0x01e00000 0x20000>;
+ interrupts = <47>;
+ clocks = <&ccu CLK_DE_FE>, <&ccu CLK_DE_FE>,
+ <&ccu CLK_DRAM_DE_FE>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&ccu RST_DE_FE>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fe0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ fe0_out_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_in_fe0>;
+ };
+ };
+ };
+ };
+
+ be0: display-backend@01e60000 {
+ compatible = "allwinner,sun5i-a13-display-backend";
+ reg = <0x01e60000 0x10000>;
+ interrupts = <47>;
+ clocks = <&ccu CLK_AHB_DE_BE>, <&ccu CLK_DE_BE>,
+ <&ccu CLK_DRAM_DE_BE>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&ccu RST_DE_BE>;
+ status = "disabled";
+
+ assigned-clocks = <&ccu CLK_DE_BE>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ be0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ be0_in_fe0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&fe0_out_be0>;
+ };
+ };
+
+ be0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ be0_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_be0>;
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31-app4-evb1.dts b/arch/arm/boot/dts/sun6i-a31-app4-evb1.dts
index effbdc766938..7f34323a668c 100644
--- a/arch/arm/boot/dts/sun6i-a31-app4-evb1.dts
+++ b/arch/arm/boot/dts/sun6i-a31-app4-evb1.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Allwinner A31 APP4 EVB1 Evaluation Board";
diff --git a/arch/arm/boot/dts/sun6i-a31-colombus.dts b/arch/arm/boot/dts/sun6i-a31-colombus.dts
index f5ececd45bc0..85eff0307ca4 100644
--- a/arch/arm/boot/dts/sun6i-a31-colombus.dts
+++ b/arch/arm/boot/dts/sun6i-a31-colombus.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "WITS A31 Colombus Evaluation Board";
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index f094eeb6c499..d4f74f476f25 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -47,7 +47,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Merrii A31 Hummingbird";
diff --git a/arch/arm/boot/dts/sun6i-a31-i7.dts b/arch/arm/boot/dts/sun6i-a31-i7.dts
index 2bc57d2dcd80..010a84c7c012 100644
--- a/arch/arm/boot/dts/sun6i-a31-i7.dts
+++ b/arch/arm/boot/dts/sun6i-a31-i7.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Mele I7 Quad top set box";
diff --git a/arch/arm/boot/dts/sun6i-a31-m9.dts b/arch/arm/boot/dts/sun6i-a31-m9.dts
index 8af5b667a46d..50605fd4449e 100644
--- a/arch/arm/boot/dts/sun6i-a31-m9.dts
+++ b/arch/arm/boot/dts/sun6i-a31-m9.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Mele M9 top set box";
diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
index bf0f5831126f..5219556e9f73 100644
--- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
+++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Mele A1000G Quad top set box";
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index a4b96184cac1..9c999d3788f6 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -48,7 +48,6 @@
#include <dt-bindings/thermal/thermal.h>
#include <dt-bindings/clock/sun6i-a31-ccu.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/reset/sun6i-a31-ccu.h>
/ {
diff --git a/arch/arm/boot/dts/sun6i-a31s-cs908.dts b/arch/arm/boot/dts/sun6i-a31s-cs908.dts
index 5e8f8c4f2b30..75e578159c3a 100644
--- a/arch/arm/boot/dts/sun6i-a31s-cs908.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-cs908.dts
@@ -43,8 +43,6 @@
/dts-v1/;
#include "sun6i-a31s.dtsi"
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-
/ {
model = "CSQ CS908 top set box";
compatible = "csq,cs908", "allwinner,sun6i-a31s";
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 2238eda318f6..f3712753fa42 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "MSI Primo81 tablet";
diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
index 4ec0c8679b2e..d7325bc4eeb4 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Sinlinx SinA31s Core Board";
diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s.dts b/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
index 7ff68bdd7109..b3d98222bd81 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
@@ -63,6 +63,23 @@
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>; /* PH13 */
};
};
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "On-board SPDIF";
+ simple-audio-card,cpu {
+ sound-dai = <&spdif>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&spdif_out>;
+ };
+ };
+
+ spdif_out: spdif-out {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dit";
+ };
};
&codec {
@@ -153,6 +170,12 @@
regulator-name = "vcc-gmac-phy";
};
+&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_pins_a>;
+ status = "okay";
+};
+
&usb_otg {
dr_mode = "peripheral";
status = "okay";
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index 3bd862bf82a9..bdfdce8ca6ba 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -86,6 +86,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc3>;
+};
+
&ehci0 {
status = "okay";
};
@@ -151,6 +155,17 @@
status = "okay";
};
+&p2wi {
+ status = "okay";
+
+ axp22x: pmic@68 {
+ compatible = "x-powers,axp221";
+ reg = <0x68>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&pio {
gmac_phy_reset_pin_bpi_m2: gmac_phy_reset_pin@0 {
pins = "PA21";
@@ -176,6 +191,48 @@
};
};
+#include "axp22x.dtsi"
+
+&reg_dc5ldo {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vdd-3v0";
+};
+
+&reg_dcdc2 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-gpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-sys-dll";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index 154ebf5082ed..f3edf9ca435c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -45,7 +45,6 @@
#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Yones TopTech BS1078 v2 Tablet";
diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
index edaba5f904fd..3cc4046b904a 100644
--- a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
@@ -44,7 +44,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
aliases {
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 91f2e5f9efcb..ed2f35adf542 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "LeMaker Banana Pi";
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
index 4dc1e10f88c4..a2eab7aa80e0 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Cubietech Cubieboard2";
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index f019aa3fe96d..102903e83bd2 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Cubietech Cubietruck";
@@ -268,6 +267,10 @@
#include "axp209.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -324,6 +327,10 @@
status = "okay";
};
+&usb_power_supply {
+ status = "okay";
+};
+
&usbphy {
pinctrl-names = "default";
pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index e921ba42f170..99c00b9a1546 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Merrii A20 Hummingbird";
diff --git a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
index 385fd8232ae0..4da49717da21 100644
--- a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "I12 / Q5 / QT840A A20 tvbox";
diff --git a/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts b/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
index f5b5325a70e2..28d3abbdc2d4 100644
--- a/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
+++ b/arch/arm/boot/dts/sun7i-a20-icnova-swac.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "ICnova-A20 SWAC";
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index bbf1c8cbaac6..96bb0bc198ba 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Lamobo R1";
diff --git a/arch/arm/boot/dts/sun7i-a20-m3.dts b/arch/arm/boot/dts/sun7i-a20-m3.dts
index 0e074bd0e8c9..86f69813683e 100644
--- a/arch/arm/boot/dts/sun7i-a20-m3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-m3.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Mele M3";
diff --git a/arch/arm/boot/dts/sun7i-a20-mk808c.dts b/arch/arm/boot/dts/sun7i-a20-mk808c.dts
index 97d7a8b65a03..c4ee30709f3a 100644
--- a/arch/arm/boot/dts/sun7i-a20-mk808c.dts
+++ b/arch/arm/boot/dts/sun7i-a20-mk808c.dts
@@ -53,7 +53,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "mk808c";
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
index a1450c10b08e..1af5b46862cb 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A20-Olimex-SOM-EVB";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
index 1297432c2802..dcd0f7a0dffa 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
@@ -49,7 +49,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A20-OLinuXino-LIME";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
index 71cca5360728..e7d45425758c 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A20-OLinuXino-LIME2";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 223fbd9f7c62..def0ad8395bb 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -49,7 +49,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Olimex A20-Olinuxino Micro";
@@ -85,6 +84,14 @@
status = "okay";
};
+&codec {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&ehci0 {
status = "okay";
};
@@ -111,13 +118,9 @@
status = "okay";
axp209: pmic@34 {
- compatible = "x-powers,axp209";
reg = <0x34>;
interrupt-parent = <&nmi_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
};
};
@@ -251,6 +254,29 @@
};
};
+#include "axp209.dtsi"
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
&reg_ahci_5v {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
index a74265749227..7af4c8fc1865 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Orange Pi Mini";
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi.dts b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
index 3de980c8f8ff..0a8d4a05e8a0 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Orange Pi";
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
index 4599f98a3aee..7c96b53b76bf 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "LinkSprite pcDuino3";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 2db97fc820dd..93aa55970bd7 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -49,7 +49,6 @@
#include <dt-bindings/clock/sun4i-a10-pll2.h>
#include <dt-bindings/dma/sun4i-a10.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
interrupt-parent = <&gic>;
@@ -1096,6 +1095,11 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ can0_pins_a: can0@0 {
+ pins = "PH20", "PH21";
+ function = "can";
+ };
+
clk_out_a_pins_a: clk_out_a@0 {
pins = "PI12";
function = "clk_out_a";
@@ -1538,6 +1542,22 @@
status = "disabled";
};
+ ps20: ps2@01c2a000 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a000 0x400>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb1_gates 6>;
+ status = "disabled";
+ };
+
+ ps21: ps2@01c2a400 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a400 0x400>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb1_gates 7>;
+ status = "disabled";
+ };
+
i2c0: i2c@01c2ac00 {
compatible = "allwinner,sun7i-a20-i2c",
"allwinner,sun4i-a10-i2c";
@@ -1582,6 +1602,15 @@
#size-cells = <0>;
};
+ can0: can@01c2bc00 {
+ compatible = "allwinner,sun7i-a20-can",
+ "allwinner,sun4i-a10-can";
+ reg = <0x01c2bc00 0x400>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb1_gates 4>;
+ status = "disabled";
+ };
+
i2c4: i2c@01c2c000 {
compatible = "allwinner,sun7i-a20-i2c",
"allwinner,sun4i-a10-i2c";
@@ -1629,20 +1658,5 @@
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
- ps20: ps2@01c2a000 {
- compatible = "allwinner,sun4i-a10-ps2";
- reg = <0x01c2a000 0x400>;
- interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb1_gates 6>;
- status = "disabled";
- };
-
- ps21: ps2@01c2a400 {
- compatible = "allwinner,sun4i-a10-ps2";
- reg = <0x01c2a400 0x400>;
- interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb1_gates 7>;
- status = "disabled";
- };
};
};
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 8a3ed21cb7bc..a8b978d0f35b 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -47,7 +47,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/sun8i-a23-a33-ccu.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include <dt-bindings/reset/sun8i-a23-a33-ccu.h>
/ {
@@ -493,6 +492,7 @@
clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
clock-names = "bus", "core";
resets = <&ccu RST_BUS_GPU>;
+ #cooling-cells = <2>;
assigned-clocks = <&ccu CLK_GPU>;
assigned-clock-rates = <384000000>;
diff --git a/arch/arm/boot/dts/sun8i-a23-evb.dts b/arch/arm/boot/dts/sun8i-a23-evb.dts
index c21f5b1b255e..87289a60c520 100644
--- a/arch/arm/boot/dts/sun8i-a23-evb.dts
+++ b/arch/arm/boot/dts/sun8i-a23-evb.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Allwinner A23 Evaluation Board";
diff --git a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
index 3ab5c0c09d93..b6958e8f2f01 100644
--- a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
@@ -50,7 +50,6 @@
};
&codec {
- pinctrl-0 = <&codec_pa_pin>;
allwinner,pa-gpios = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */
allwinner,audio-routing =
"Headphone", "HP",
@@ -62,12 +61,3 @@
"Headset Mic", "HBIAS";
status = "okay";
};
-
-&pio {
- codec_pa_pin: codec_pa_pin@0 {
- allwinner,pins = "PH9";
- allwinner,function = "gpio_out";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-};
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index 03b89bdd55ba..9b620cc1d5f1 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -48,7 +48,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Sinlinx SinA33";
@@ -84,6 +83,24 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc3>;
+};
+
+&cpu0_opp_table {
+ opp@1104000000 {
+ opp-hz = /bits/ 64 <1104000000>;
+ opp-microvolt = <1320000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1320000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+};
+
&de {
status = "okay";
};
@@ -175,6 +192,10 @@
#include "axp223.dtsi"
+&ac_power_supply {
+ status = "okay";
+};
+
&reg_aldo1 {
regulator-always-on;
regulator-min-microvolt = <3000000>;
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 306af6cadf26..013978259372 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -43,24 +43,79 @@
*/
#include "sun8i-a23-a33.dtsi"
+#include <dt-bindings/thermal/thermal.h>
/ {
cpu0_opp_table: opp_table0 {
compatible = "operating-points-v2";
opp-shared;
+ opp@120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@240000000 {
+ opp-hz = /bits/ 64 <240000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@312000000 {
+ opp-hz = /bits/ 64 <312000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@408000000 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@504000000 {
+ opp-hz = /bits/ 64 <504000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
+ opp@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1040000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
opp@648000000 {
opp-hz = /bits/ 64 <648000000>;
opp-microvolt = <1040000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
+ opp@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
opp@816000000 {
opp-hz = /bits/ 64 <816000000>;
opp-microvolt = <1100000>;
clock-latency-ns = <244144>; /* 8 32k periods */
};
+ opp@912000000 {
+ opp-hz = /bits/ 64 <912000000>;
+ opp-microvolt = <1200000>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ };
+
opp@1008000000 {
opp-hz = /bits/ 64 <1008000000>;
opp-microvolt = <1200000>;
@@ -73,6 +128,7 @@
clocks = <&ccu CLK_CPUX>;
clock-names = "cpu";
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
cpu@1 {
@@ -100,6 +156,27 @@
status = "disabled";
};
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&ths>;
+ };
+
+ mali_opp_table: gpu-opp-table {
+ compatible = "operating-points-v2";
+
+ opp@144000000 {
+ opp-hz = /bits/ 64 <144000000>;
+ };
+
+ opp@240000000 {
+ opp-hz = /bits/ 64 <240000000>;
+ };
+
+ opp@384000000 {
+ opp-hz = /bits/ 64 <384000000>;
+ };
+ };
+
memory {
reg = <0x40000000 0x80000000>;
};
@@ -196,6 +273,13 @@
status = "disabled";
};
+ ths: ths@01c25000 {
+ compatible = "allwinner,sun8i-a33-ths";
+ reg = <0x01c25000 0x100>;
+ #thermal-sensor-cells = <0>;
+ #io-channel-cells = <0>;
+ };
+
fe0: display-frontend@01e00000 {
compatible = "allwinner,sun8i-a33-display-frontend";
reg = <0x01e00000 0x20000>;
@@ -306,12 +390,83 @@
};
};
};
+
+ thermal-zones {
+ cpu_thermal {
+ /* milliseconds */
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&ths>;
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map2 {
+ trip = <&gpu_alert0>;
+ cooling-device = <&mali 1 THERMAL_NO_LIMIT>;
+ };
+
+ map3 {
+ trip = <&gpu_alert1>;
+ cooling-device = <&mali 2 THERMAL_NO_LIMIT>;
+ };
+ };
+
+ trips {
+ cpu_alert0: cpu_alert0 {
+ /* milliCelsius */
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ gpu_alert0: gpu_alert0 {
+ /* milliCelsius */
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_alert1: cpu_alert1 {
+ /* milliCelsius */
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ gpu_alert1: gpu_alert1 {
+ /* milliCelsius */
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+
+ cpu_crit: cpu_crit {
+ /* milliCelsius */
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
};
&ccu {
compatible = "allwinner,sun8i-a33-ccu";
};
+&mali {
+ operating-points-v2 = <&mali_opp_table>;
+};
+
&pio {
compatible = "allwinner,sun8i-a33-pinctrl";
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index a789a7caf217..0ec143773ee9 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -47,8 +47,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-
/ {
interrupt-parent = <&gic>;
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
index b7ca916d871d..9e8b082c134f 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
@@ -49,7 +49,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Xunlong Orange Pi Zero";
@@ -96,6 +95,10 @@
};
};
+&ehci0 {
+ status = "okay";
+};
+
&ehci1 {
status = "okay";
};
@@ -132,6 +135,10 @@
bias-pull-up;
};
+&ohci0 {
+ status = "okay";
+};
+
&ohci1 {
status = "okay";
};
@@ -154,7 +161,17 @@
status = "disabled";
};
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
&usbphy {
- /* USB VBUS is always on */
+ /*
+ * USB Type-A port VBUS is always on. However, MicroUSB VBUS can only
+ * power up the board; when it's used as OTG port, this VBUS is
+ * always off even if the board is powered via GPIO pins.
+ */
status = "okay";
+ usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
};
diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
index c0c49dd4d3b2..52acbe111cad 100644
--- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Banana Pi BPI-M2-Plus";
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 25b225b7dfd6..e7fae65eb5d3 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Beelink X2";
@@ -138,6 +137,16 @@
};
};
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
&ohci1 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
new file mode 100644
index 000000000000..03ff6f8b93ff
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2017 Jelle van der Waa <jelle@vdwaa.nl>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "FriendlyARM NanoPi NEO Air";
+ compatible = "friendlyarm,nanopi-neo-air", "allwinner,sun8i-h3";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ label = "nanopi:green:pwr";
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
+ default-state = "on";
+ };
+
+ status {
+ label = "nanopi:blue:status";
+ gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
+ };
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+ cd-inverted;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
+
+&usbphy {
+ /* USB VBUS is always on */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
index 2216e68d1838..c6decee41a27 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
@@ -47,7 +47,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
aliases {
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index 047e9e1c6093..5b6d14555b7c 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Xunlong Orange Pi 2";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
index 22b99b407019..9b47a0def740 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Xunlong Orange Pi Lite";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 34da853ee037..5fea430e0eb1 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Xunlong Orange Pi One";
@@ -90,6 +89,10 @@
};
};
+&ehci0 {
+ status = "okay";
+};
+
&ehci1 {
status = "okay";
};
@@ -104,6 +107,10 @@
status = "okay";
};
+&ohci0 {
+ status = "okay";
+};
+
&ohci1 {
status = "okay";
};
@@ -127,6 +134,11 @@
};
};
+&reg_usb0_vbus {
+ gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+ status = "okay";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
@@ -151,7 +163,14 @@
status = "disabled";
};
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
&usbphy {
- /* USB VBUS is always on */
+ /* USB Type-A port's VBUS is always on */
+ usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ usb0_vbus-supply = <&reg_usb0_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index d43978d3294e..f148111c326d 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -46,7 +46,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Xunlong Orange Pi PC";
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 27780b97c863..b36f9f423c39 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -40,16 +40,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "skeleton.dtsi"
-
-#include <dt-bindings/clock/sun8i-h3-ccu.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-#include <dt-bindings/reset/sun8i-h3-ccu.h>
+#include "sunxi-h3-h5.dtsi"
/ {
- interrupt-parent = <&gic>;
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -86,563 +79,48 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
+};
- clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- osc24M: osc24M_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "osc24M";
- };
-
- osc32k: osc32k_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32768>;
- clock-output-names = "osc32k";
- };
-
- apb0: apb0_clk {
- compatible = "fixed-factor-clock";
- #clock-cells = <0>;
- clock-div = <1>;
- clock-mult = <1>;
- clocks = <&osc24M>;
- clock-output-names = "apb0";
- };
-
- apb0_gates: clk@01f01428 {
- compatible = "allwinner,sun8i-h3-apb0-gates-clk",
- "allwinner,sun4i-a10-gates-clk";
- reg = <0x01f01428 0x4>;
- #clock-cells = <1>;
- clocks = <&apb0>;
- clock-indices = <0>, <1>;
- clock-output-names = "apb0_pio", "apb0_ir";
- };
-
- ir_clk: ir_clk@01f01454 {
- compatible = "allwinner,sun4i-a10-mod0-clk";
- reg = <0x01f01454 0x4>;
- #clock-cells = <0>;
- clocks = <&osc32k>, <&osc24M>;
- clock-output-names = "ir";
- };
- };
-
- soc {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- dma: dma-controller@01c02000 {
- compatible = "allwinner,sun8i-h3-dma";
- reg = <0x01c02000 0x1000>;
- interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_DMA>;
- resets = <&ccu RST_BUS_DMA>;
- #dma-cells = <1>;
- };
-
- mmc0: mmc@01c0f000 {
- compatible = "allwinner,sun7i-a20-mmc";
- reg = <0x01c0f000 0x1000>;
- clocks = <&ccu CLK_BUS_MMC0>,
- <&ccu CLK_MMC0>,
- <&ccu CLK_MMC0_OUTPUT>,
- <&ccu CLK_MMC0_SAMPLE>;
- clock-names = "ahb",
- "mmc",
- "output",
- "sample";
- resets = <&ccu RST_BUS_MMC0>;
- reset-names = "ahb";
- interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mmc1: mmc@01c10000 {
- compatible = "allwinner,sun7i-a20-mmc";
- reg = <0x01c10000 0x1000>;
- clocks = <&ccu CLK_BUS_MMC1>,
- <&ccu CLK_MMC1>,
- <&ccu CLK_MMC1_OUTPUT>,
- <&ccu CLK_MMC1_SAMPLE>;
- clock-names = "ahb",
- "mmc",
- "output",
- "sample";
- resets = <&ccu RST_BUS_MMC1>;
- reset-names = "ahb";
- interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- mmc2: mmc@01c11000 {
- compatible = "allwinner,sun7i-a20-mmc";
- reg = <0x01c11000 0x1000>;
- clocks = <&ccu CLK_BUS_MMC2>,
- <&ccu CLK_MMC2>,
- <&ccu CLK_MMC2_OUTPUT>,
- <&ccu CLK_MMC2_SAMPLE>;
- clock-names = "ahb",
- "mmc",
- "output",
- "sample";
- resets = <&ccu RST_BUS_MMC2>;
- reset-names = "ahb";
- interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- usbphy: phy@01c19400 {
- compatible = "allwinner,sun8i-h3-usb-phy";
- reg = <0x01c19400 0x2c>,
- <0x01c1a800 0x4>,
- <0x01c1b800 0x4>,
- <0x01c1c800 0x4>,
- <0x01c1d800 0x4>;
- reg-names = "phy_ctrl",
- "pmu0",
- "pmu1",
- "pmu2",
- "pmu3";
- clocks = <&ccu CLK_USB_PHY0>,
- <&ccu CLK_USB_PHY1>,
- <&ccu CLK_USB_PHY2>,
- <&ccu CLK_USB_PHY3>;
- clock-names = "usb0_phy",
- "usb1_phy",
- "usb2_phy",
- "usb3_phy";
- resets = <&ccu RST_USB_PHY0>,
- <&ccu RST_USB_PHY1>,
- <&ccu RST_USB_PHY2>,
- <&ccu RST_USB_PHY3>;
- reset-names = "usb0_reset",
- "usb1_reset",
- "usb2_reset",
- "usb3_reset";
- status = "disabled";
- #phy-cells = <1>;
- };
-
- ehci1: usb@01c1b000 {
- compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
- reg = <0x01c1b000 0x100>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
- resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ohci1: usb@01c1b400 {
- compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
- reg = <0x01c1b400 0x100>;
- interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>,
- <&ccu CLK_USB_OHCI1>;
- resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ehci2: usb@01c1c000 {
- compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
- reg = <0x01c1c000 0x100>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
- resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
- phys = <&usbphy 2>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ohci2: usb@01c1c400 {
- compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
- reg = <0x01c1c400 0x100>;
- interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>,
- <&ccu CLK_USB_OHCI2>;
- resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
- phys = <&usbphy 2>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ehci3: usb@01c1d000 {
- compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
- reg = <0x01c1d000 0x100>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
- resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
- phys = <&usbphy 3>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ohci3: usb@01c1d400 {
- compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
- reg = <0x01c1d400 0x100>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>,
- <&ccu CLK_USB_OHCI3>;
- resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
- phys = <&usbphy 3>;
- phy-names = "usb";
- status = "disabled";
- };
-
- ccu: clock@01c20000 {
- compatible = "allwinner,sun8i-h3-ccu";
- reg = <0x01c20000 0x400>;
- clocks = <&osc24M>, <&osc32k>;
- clock-names = "hosc", "losc";
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
- pio: pinctrl@01c20800 {
- compatible = "allwinner,sun8i-h3-pinctrl";
- reg = <0x01c20800 0x400>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>;
- clock-names = "apb", "hosc", "losc";
- gpio-controller;
- #gpio-cells = <3>;
- interrupt-controller;
- #interrupt-cells = <3>;
-
- i2c0_pins: i2c0 {
- pins = "PA11", "PA12";
- function = "i2c0";
- };
-
- i2c1_pins: i2c1 {
- pins = "PA18", "PA19";
- function = "i2c1";
- };
-
- i2c2_pins: i2c2 {
- pins = "PE12", "PE13";
- function = "i2c2";
- };
-
- mmc0_pins_a: mmc0@0 {
- pins = "PF0", "PF1", "PF2", "PF3",
- "PF4", "PF5";
- function = "mmc0";
- drive-strength = <30>;
- bias-pull-up;
- };
-
- mmc0_cd_pin: mmc0_cd_pin@0 {
- pins = "PF6";
- function = "gpio_in";
- bias-pull-up;
- };
-
- mmc1_pins_a: mmc1@0 {
- pins = "PG0", "PG1", "PG2", "PG3",
- "PG4", "PG5";
- function = "mmc1";
- drive-strength = <30>;
- bias-pull-up;
- };
-
- mmc2_8bit_pins: mmc2_8bit {
- pins = "PC5", "PC6", "PC8",
- "PC9", "PC10", "PC11",
- "PC12", "PC13", "PC14",
- "PC15", "PC16";
- function = "mmc2";
- drive-strength = <30>;
- bias-pull-up;
- };
-
- spdif_tx_pins_a: spdif@0 {
- pins = "PA17";
- function = "spdif";
- };
-
- spi0_pins: spi0 {
- pins = "PC0", "PC1", "PC2", "PC3";
- function = "spi0";
- };
-
- spi1_pins: spi1 {
- pins = "PA15", "PA16", "PA14", "PA13";
- function = "spi1";
- };
-
- uart0_pins_a: uart0@0 {
- pins = "PA4", "PA5";
- function = "uart0";
- };
-
- uart1_pins: uart1 {
- pins = "PG6", "PG7";
- function = "uart1";
- };
-
- uart1_rts_cts_pins: uart1_rts_cts {
- pins = "PG8", "PG9";
- function = "uart1";
- };
-
- uart2_pins: uart2 {
- pins = "PA0", "PA1";
- function = "uart2";
- };
-
- uart3_pins: uart3 {
- pins = "PA13", "PA14";
- function = "uart3";
- };
- };
-
- timer@01c20c00 {
- compatible = "allwinner,sun4i-a10-timer";
- reg = <0x01c20c00 0xa0>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&osc24M>;
- };
-
- spi0: spi@01c68000 {
- compatible = "allwinner,sun8i-h3-spi";
- reg = <0x01c68000 0x1000>;
- interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
- clock-names = "ahb", "mod";
- dmas = <&dma 23>, <&dma 23>;
- dma-names = "rx", "tx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_pins>;
- resets = <&ccu RST_BUS_SPI0>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- spi1: spi@01c69000 {
- compatible = "allwinner,sun8i-h3-spi";
- reg = <0x01c69000 0x1000>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
- clock-names = "ahb", "mod";
- dmas = <&dma 24>, <&dma 24>;
- dma-names = "rx", "tx";
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_pins>;
- resets = <&ccu RST_BUS_SPI1>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- wdt0: watchdog@01c20ca0 {
- compatible = "allwinner,sun6i-a31-wdt";
- reg = <0x01c20ca0 0x20>;
- interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- spdif: spdif@01c21000 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun8i-h3-spdif";
- reg = <0x01c21000 0x400>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
- resets = <&ccu RST_BUS_SPDIF>;
- clock-names = "apb", "spdif";
- dmas = <&dma 2>;
- dma-names = "tx";
- status = "disabled";
- };
-
- pwm: pwm@01c21400 {
- compatible = "allwinner,sun8i-h3-pwm";
- reg = <0x01c21400 0x8>;
- clocks = <&osc24M>;
- #pwm-cells = <3>;
- status = "disabled";
- };
-
- codec: codec@01c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun8i-h3-codec";
- reg = <0x01c22c00 0x400>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_CODEC>, <&ccu CLK_AC_DIG>;
- clock-names = "apb", "codec";
- resets = <&ccu RST_BUS_CODEC>;
- dmas = <&dma 15>, <&dma 15>;
- dma-names = "rx", "tx";
- allwinner,codec-analog-controls = <&codec_analog>;
- status = "disabled";
- };
-
- uart0: serial@01c28000 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28000 0x400>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_BUS_UART0>;
- resets = <&ccu RST_BUS_UART0>;
- dmas = <&dma 6>, <&dma 6>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart1: serial@01c28400 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28400 0x400>;
- interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_BUS_UART1>;
- resets = <&ccu RST_BUS_UART1>;
- dmas = <&dma 7>, <&dma 7>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart2: serial@01c28800 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28800 0x400>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_BUS_UART2>;
- resets = <&ccu RST_BUS_UART2>;
- dmas = <&dma 8>, <&dma 8>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- uart3: serial@01c28c00 {
- compatible = "snps,dw-apb-uart";
- reg = <0x01c28c00 0x400>;
- interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clocks = <&ccu CLK_BUS_UART3>;
- resets = <&ccu RST_BUS_UART3>;
- dmas = <&dma 9>, <&dma 9>;
- dma-names = "rx", "tx";
- status = "disabled";
- };
-
- i2c0: i2c@01c2ac00 {
- compatible = "allwinner,sun6i-a31-i2c";
- reg = <0x01c2ac00 0x400>;
- interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_I2C0>;
- resets = <&ccu RST_BUS_I2C0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c1: i2c@01c2b000 {
- compatible = "allwinner,sun6i-a31-i2c";
- reg = <0x01c2b000 0x400>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_I2C1>;
- resets = <&ccu RST_BUS_I2C1>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c2: i2c@01c2b400 {
- compatible = "allwinner,sun6i-a31-i2c";
- reg = <0x01c2b000 0x400>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_BUS_I2C2>;
- resets = <&ccu RST_BUS_I2C2>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_pins>;
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- gic: interrupt-controller@01c81000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
- reg = <0x01c81000 0x1000>,
- <0x01c82000 0x2000>,
- <0x01c84000 0x2000>,
- <0x01c86000 0x2000>;
- interrupt-controller;
- #interrupt-cells = <3>;
- interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
- };
-
- rtc: rtc@01f00000 {
- compatible = "allwinner,sun6i-a31-rtc";
- reg = <0x01f00000 0x54>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- apb0_reset: reset@01f014b0 {
- reg = <0x01f014b0 0x4>;
- compatible = "allwinner,sun6i-a31-clock-reset";
- #reset-cells = <1>;
- };
+&ccu {
+ compatible = "allwinner,sun8i-h3-ccu";
+};
- codec_analog: codec-analog@01f015c0 {
- compatible = "allwinner,sun8i-h3-codec-analog";
- reg = <0x01f015c0 0x4>;
- };
+&mmc0 {
+ compatible = "allwinner,sun7i-a20-mmc";
+ clocks = <&ccu CLK_BUS_MMC0>,
+ <&ccu CLK_MMC0>,
+ <&ccu CLK_MMC0_OUTPUT>,
+ <&ccu CLK_MMC0_SAMPLE>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+};
- ir: ir@01f02000 {
- compatible = "allwinner,sun5i-a13-ir";
- clocks = <&apb0_gates 1>, <&ir_clk>;
- clock-names = "apb", "ir";
- resets = <&apb0_reset 1>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x01f02000 0x40>;
- status = "disabled";
- };
+&mmc1 {
+ compatible = "allwinner,sun7i-a20-mmc";
+ clocks = <&ccu CLK_BUS_MMC1>,
+ <&ccu CLK_MMC1>,
+ <&ccu CLK_MMC1_OUTPUT>,
+ <&ccu CLK_MMC1_SAMPLE>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+};
- r_pio: pinctrl@01f02c00 {
- compatible = "allwinner,sun8i-h3-r-pinctrl";
- reg = <0x01f02c00 0x400>;
- interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&apb0_gates 0>, <&osc24M>, <&osc32k>;
- clock-names = "apb", "hosc", "losc";
- resets = <&apb0_reset 0>;
- gpio-controller;
- #gpio-cells = <3>;
- interrupt-controller;
- #interrupt-cells = <3>;
+&mmc2 {
+ compatible = "allwinner,sun7i-a20-mmc";
+ clocks = <&ccu CLK_BUS_MMC2>,
+ <&ccu CLK_MMC2>,
+ <&ccu CLK_MMC2_OUTPUT>,
+ <&ccu CLK_MMC2_SAMPLE>;
+ clock-names = "ahb",
+ "mmc",
+ "output",
+ "sample";
+};
- ir_pins_a: ir@0 {
- pins = "PL11";
- function = "s_cir_rx";
- };
- };
- };
+&pio {
+ compatible = "allwinner,sun8i-h3-pinctrl";
};
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 9112a200fd5e..3741ac71c3d6 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -47,7 +47,6 @@
#include "sun9i-a80.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Cubietech Cubieboard4";
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 0fc3a87f5576..85f1ad670310 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -46,7 +46,6 @@
#include "sun9i-a80.dtsi"
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
/ {
model = "Merrii A80 Optimus Board";
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 15b6d122f878..759a72317eb8 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -46,8 +46,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-
#include <dt-bindings/clock/sun9i-a80-ccu.h>
#include <dt-bindings/clock/sun9i-a80-de.h>
#include <dt-bindings/clock/sun9i-a80-usb.h>
diff --git a/arch/arm/boot/dts/sunxi-common-regulators.dtsi b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
index 17c09fed9e84..ce5c53e4452f 100644
--- a/arch/arm/boot/dts/sunxi-common-regulators.dtsi
+++ b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
@@ -43,7 +43,6 @@
*/
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
&pio {
ahci_pwr_pin_a: ahci_pwr_pin@0 {
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
new file mode 100644
index 000000000000..1aeeacb3a884
--- /dev/null
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/sun8i-h3-ccu.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ osc24M: osc24M_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "osc24M";
+ };
+
+ osc32k: osc32k_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "osc32k";
+ };
+
+ iosc: internal-osc-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <16000000>;
+ clock-accuracy = <300000000>;
+ clock-output-names = "iosc";
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ dma: dma-controller@01c02000 {
+ compatible = "allwinner,sun8i-h3-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
+ #dma-cells = <1>;
+ };
+
+ mmc0: mmc@01c0f000 {
+ /* compatible and clocks are in per SoC .dtsi file */
+ reg = <0x01c0f000 0x1000>;
+ resets = <&ccu RST_BUS_MMC0>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc1: mmc@01c10000 {
+ /* compatible and clocks are in per SoC .dtsi file */
+ reg = <0x01c10000 0x1000>;
+ resets = <&ccu RST_BUS_MMC1>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ mmc2: mmc@01c11000 {
+ /* compatible and clocks are in per SoC .dtsi file */
+ reg = <0x01c11000 0x1000>;
+ resets = <&ccu RST_BUS_MMC2>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ usb_otg: usb@01c19000 {
+ compatible = "allwinner,sun8i-h3-musb";
+ reg = <0x01c19000 0x400>;
+ clocks = <&ccu CLK_BUS_OTG>;
+ resets = <&ccu RST_BUS_OTG>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mc";
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ extcon = <&usbphy 0>;
+ status = "disabled";
+ };
+
+ usbphy: phy@01c19400 {
+ compatible = "allwinner,sun8i-h3-usb-phy";
+ reg = <0x01c19400 0x2c>,
+ <0x01c1a800 0x4>,
+ <0x01c1b800 0x4>,
+ <0x01c1c800 0x4>,
+ <0x01c1d800 0x4>;
+ reg-names = "phy_ctrl",
+ "pmu0",
+ "pmu1",
+ "pmu2",
+ "pmu3";
+ clocks = <&ccu CLK_USB_PHY0>,
+ <&ccu CLK_USB_PHY1>,
+ <&ccu CLK_USB_PHY2>,
+ <&ccu CLK_USB_PHY3>;
+ clock-names = "usb0_phy",
+ "usb1_phy",
+ "usb2_phy",
+ "usb3_phy";
+ resets = <&ccu RST_USB_PHY0>,
+ <&ccu RST_USB_PHY1>,
+ <&ccu RST_USB_PHY2>,
+ <&ccu RST_USB_PHY3>;
+ reset-names = "usb0_reset",
+ "usb1_reset",
+ "usb2_reset",
+ "usb3_reset";
+ status = "disabled";
+ #phy-cells = <1>;
+ };
+
+ ehci0: usb@01c1a000 {
+ compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+ reg = <0x01c1a000 0x100>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI0>, <&ccu CLK_BUS_OHCI0>;
+ resets = <&ccu RST_BUS_EHCI0>, <&ccu RST_BUS_OHCI0>;
+ status = "disabled";
+ };
+
+ ohci0: usb@01c1a400 {
+ compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+ reg = <0x01c1a400 0x100>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI0>, <&ccu CLK_BUS_OHCI0>,
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_EHCI0>, <&ccu RST_BUS_OHCI0>;
+ status = "disabled";
+ };
+
+ ehci1: usb@01c1b000 {
+ compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+ reg = <0x01c1b000 0x100>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
+ resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci1: usb@01c1b400 {
+ compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+ reg = <0x01c1b400 0x100>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>,
+ <&ccu CLK_USB_OHCI1>;
+ resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ehci2: usb@01c1c000 {
+ compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+ reg = <0x01c1c000 0x100>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
+ resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
+ phys = <&usbphy 2>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci2: usb@01c1c400 {
+ compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+ reg = <0x01c1c400 0x100>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>,
+ <&ccu CLK_USB_OHCI2>;
+ resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
+ phys = <&usbphy 2>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ehci3: usb@01c1d000 {
+ compatible = "allwinner,sun8i-h3-ehci", "generic-ehci";
+ reg = <0x01c1d000 0x100>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
+ resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
+ phys = <&usbphy 3>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ohci3: usb@01c1d400 {
+ compatible = "allwinner,sun8i-h3-ohci", "generic-ohci";
+ reg = <0x01c1d400 0x100>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>,
+ <&ccu CLK_USB_OHCI3>;
+ resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
+ phys = <&usbphy 3>;
+ phy-names = "usb";
+ status = "disabled";
+ };
+
+ ccu: clock@01c20000 {
+ /* compatible is in per SoC .dtsi file */
+ reg = <0x01c20000 0x400>;
+ clocks = <&osc24M>, <&osc32k>;
+ clock-names = "hosc", "losc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ pio: pinctrl@01c20800 {
+ /* compatible is in per SoC .dtsi file */
+ reg = <0x01c20800 0x400>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ i2c0_pins: i2c0 {
+ pins = "PA11", "PA12";
+ function = "i2c0";
+ };
+
+ i2c1_pins: i2c1 {
+ pins = "PA18", "PA19";
+ function = "i2c1";
+ };
+
+ i2c2_pins: i2c2 {
+ pins = "PE12", "PE13";
+ function = "i2c2";
+ };
+
+ mmc0_pins_a: mmc0@0 {
+ pins = "PF0", "PF1", "PF2", "PF3",
+ "PF4", "PF5";
+ function = "mmc0";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ mmc0_cd_pin: mmc0_cd_pin@0 {
+ pins = "PF6";
+ function = "gpio_in";
+ bias-pull-up;
+ };
+
+ mmc1_pins_a: mmc1@0 {
+ pins = "PG0", "PG1", "PG2", "PG3",
+ "PG4", "PG5";
+ function = "mmc1";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ mmc2_8bit_pins: mmc2_8bit {
+ pins = "PC5", "PC6", "PC8",
+ "PC9", "PC10", "PC11",
+ "PC12", "PC13", "PC14",
+ "PC15", "PC16";
+ function = "mmc2";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
+ spdif_tx_pins_a: spdif@0 {
+ pins = "PA17";
+ function = "spdif";
+ };
+
+ spi0_pins: spi0 {
+ pins = "PC0", "PC1", "PC2", "PC3";
+ function = "spi0";
+ };
+
+ spi1_pins: spi1 {
+ pins = "PA15", "PA16", "PA14", "PA13";
+ function = "spi1";
+ };
+
+ uart0_pins_a: uart0@0 {
+ pins = "PA4", "PA5";
+ function = "uart0";
+ };
+
+ uart1_pins: uart1 {
+ pins = "PG6", "PG7";
+ function = "uart1";
+ };
+
+ uart1_rts_cts_pins: uart1_rts_cts {
+ pins = "PG8", "PG9";
+ function = "uart1";
+ };
+
+ uart2_pins: uart2 {
+ pins = "PA0", "PA1";
+ function = "uart2";
+ };
+
+ uart3_pins: uart3 {
+ pins = "PA13", "PA14";
+ function = "uart3";
+ };
+ };
+
+ timer@01c20c00 {
+ compatible = "allwinner,sun4i-a10-timer";
+ reg = <0x01c20c00 0xa0>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&osc24M>;
+ };
+
+ spi0: spi@01c68000 {
+ compatible = "allwinner,sun8i-h3-spi";
+ reg = <0x01c68000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 23>, <&dma 23>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_pins>;
+ resets = <&ccu RST_BUS_SPI0>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi1: spi@01c69000 {
+ compatible = "allwinner,sun8i-h3-spi";
+ reg = <0x01c69000 0x1000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma 24>, <&dma 24>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+ resets = <&ccu RST_BUS_SPI1>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ wdt0: watchdog@01c20ca0 {
+ compatible = "allwinner,sun6i-a31-wdt";
+ reg = <0x01c20ca0 0x20>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ spdif: spdif@01c21000 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun8i-h3-spdif";
+ reg = <0x01c21000 0x400>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
+ resets = <&ccu RST_BUS_SPDIF>;
+ clock-names = "apb", "spdif";
+ dmas = <&dma 2>;
+ dma-names = "tx";
+ status = "disabled";
+ };
+
+ pwm: pwm@01c21400 {
+ compatible = "allwinner,sun8i-h3-pwm";
+ reg = <0x01c21400 0x8>;
+ clocks = <&osc24M>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
+ codec: codec@01c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun8i-h3-codec";
+ reg = <0x01c22c00 0x400>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_CODEC>, <&ccu CLK_AC_DIG>;
+ clock-names = "apb", "codec";
+ resets = <&ccu RST_BUS_CODEC>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,codec-analog-controls = <&codec_analog>;
+ status = "disabled";
+ };
+
+ uart0: serial@01c28000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28000 0x400>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART0>;
+ resets = <&ccu RST_BUS_UART0>;
+ dmas = <&dma 6>, <&dma 6>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart1: serial@01c28400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28400 0x400>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART1>;
+ resets = <&ccu RST_BUS_UART1>;
+ dmas = <&dma 7>, <&dma 7>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart2: serial@01c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART2>;
+ resets = <&ccu RST_BUS_UART2>;
+ dmas = <&dma 8>, <&dma 8>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart3: serial@01c28c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28c00 0x400>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART3>;
+ resets = <&ccu RST_BUS_UART3>;
+ dmas = <&dma 9>, <&dma 9>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ i2c0: i2c@01c2ac00 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2ac00 0x400>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C0>;
+ resets = <&ccu RST_BUS_I2C0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@01c2b000 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C1>;
+ resets = <&ccu RST_BUS_I2C1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c2: i2c@01c2b400 {
+ compatible = "allwinner,sun6i-a31-i2c";
+ reg = <0x01c2b000 0x400>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_I2C2>;
+ resets = <&ccu RST_BUS_I2C2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ gic: interrupt-controller@01c81000 {
+ compatible = "arm,gic-400";
+ reg = <0x01c81000 0x1000>,
+ <0x01c82000 0x2000>,
+ <0x01c84000 0x2000>,
+ <0x01c86000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ rtc: rtc@01f00000 {
+ compatible = "allwinner,sun6i-a31-rtc";
+ reg = <0x01f00000 0x54>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ r_ccu: clock@1f01400 {
+ compatible = "allwinner,sun50i-a64-r-ccu";
+ reg = <0x01f01400 0x100>;
+ clocks = <&osc24M>, <&osc32k>, <&iosc>;
+ clock-names = "hosc", "losc", "iosc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ codec_analog: codec-analog@01f015c0 {
+ compatible = "allwinner,sun8i-h3-codec-analog";
+ reg = <0x01f015c0 0x4>;
+ };
+
+ ir: ir@01f02000 {
+ compatible = "allwinner,sun5i-a13-ir";
+ clocks = <&r_ccu 4>, <&r_ccu 11>;
+ clock-names = "apb", "ir";
+ resets = <&r_ccu 0>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x01f02000 0x40>;
+ status = "disabled";
+ };
+
+ r_pio: pinctrl@01f02c00 {
+ compatible = "allwinner,sun8i-h3-r-pinctrl";
+ reg = <0x01f02c00 0x400>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu 3>, <&osc24M>, <&osc32k>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ ir_pins_a: ir@0 {
+ pins = "PL11";
+ function = "s_cir_rx";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi b/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi
index b8241462fcea..245d0bcde441 100644
--- a/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi
@@ -42,7 +42,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
#include "sunxi-common-regulators.dtsi"
&i2c0 {
diff --git a/arch/arm/boot/dts/uniphier-ld4-ref.dts b/arch/arm/boot/dts/uniphier-ld4-ref.dts
index 110031bc0e7e..e0da4ee21c21 100644
--- a/arch/arm/boot/dts/uniphier-ld4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld4-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier LD4 Reference Board";
compatible = "socionext,uniphier-ld4-ref", "socionext,uniphier-ld4";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x20000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -71,6 +66,11 @@
i2c2 = &i2c2;
i2c3 = &i2c3;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
};
&ethsc {
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index a7c494d7c43a..4f5fe15eaee2 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-ld4";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
index c05d631dcf02..a397a8811c78 100644
--- a/arch/arm/boot/dts/uniphier-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier LD6b Reference Board";
compatible = "socionext,uniphier-ld6b-ref", "socionext,uniphier-ld6b";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -73,6 +68,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
};
&ethsc {
diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
index 8ee79da9af7c..246f35ffb638 100644
--- a/arch/arm/boot/dts/uniphier-pinctrl.dtsi
+++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
@@ -45,7 +45,7 @@
&pinctrl {
pinctrl_emmc: emmc_grp {
- groups = "emmc";
+ groups = "emmc", "emmc_dat8";
function = "emmc";
};
diff --git a/arch/arm/boot/dts/uniphier-pro4-ace.dts b/arch/arm/boot/dts/uniphier-pro4-ace.dts
index 0ab0a40c041e..fefc89149234 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ace.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ace.dts
@@ -50,11 +50,6 @@
model = "UniPhier Pro4 Ace Board";
compatible = "socionext,uniphier-pro4-ace", "socionext,uniphier-pro4";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x40000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -70,6 +65,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
};
&serial0 {
@@ -90,6 +90,7 @@
eeprom@54 {
compatible = "st,24c64";
reg = <0x54>;
+ pagesize = <32>;
};
};
diff --git a/arch/arm/boot/dts/uniphier-pro4-ref.dts b/arch/arm/boot/dts/uniphier-pro4-ref.dts
index 9e92e60d25ce..6077e634d14a 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier Pro4 Reference Board";
compatible = "socionext,uniphier-pro4-ref", "socionext,uniphier-pro4";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x40000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -73,6 +68,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
};
&ethsc {
diff --git a/arch/arm/boot/dts/uniphier-pro4-sanji.dts b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
index dc4ea8832ce2..6c63c8bad825 100644
--- a/arch/arm/boot/dts/uniphier-pro4-sanji.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
@@ -50,11 +50,6 @@
model = "UniPhier Pro4 Sanji Board";
compatible = "socionext,uniphier-pro4-sanji", "socionext,uniphier-pro4";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -69,6 +64,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
};
&serial0 {
@@ -85,6 +85,7 @@
eeprom@54 {
compatible = "st,24c64";
reg = <0x54>;
+ pagesize = <32>;
};
};
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index e960b09ff01c..794a85a7068b 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-pro4";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index dbc5e5333163..df07b555cbed 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-pro5";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
index 373818ace086..cccc86658d20 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
@@ -51,11 +51,6 @@
compatible = "socionext,uniphier-pxs2-gentil",
"socionext,uniphier-pxs2";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -70,6 +65,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
};
&serial2 {
@@ -82,6 +82,7 @@
eeprom@54 {
compatible = "st,24c64";
reg = <0x54>;
+ pagesize = <32>;
};
};
diff --git a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
index 51a3eacddfc6..803a39aa39d0 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
@@ -50,11 +50,6 @@
model = "UniPhier PXs2 Vodka Board";
compatible = "socionext,uniphier-pxs2-vodka", "socionext,uniphier-pxs2";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x80000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -68,6 +63,11 @@
i2c5 = &i2c5;
i2c6 = &i2c6;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x80000000>;
+ };
};
&serial2 {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index e9e031d63c1a..58c3e2f35706 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-pxs2";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
index f7df0881c5e0..c62ae1a81f47 100644
--- a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
+++ b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
@@ -1,7 +1,8 @@
/*
* Device Tree Source for UniPhier Reference Daughter Board
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -46,5 +47,6 @@
eeprom@50 {
compatible = "microchip,24lc128";
reg = <0x50>;
+ pagesize = <64>;
};
};
diff --git a/arch/arm/boot/dts/uniphier-sld3-ref.dts b/arch/arm/boot/dts/uniphier-sld3-ref.dts
index ac792ae07ae0..eb63dcca92b5 100644
--- a/arch/arm/boot/dts/uniphier-sld3-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld3-ref.dts
@@ -52,12 +52,6 @@
model = "UniPhier sLD3 Reference Board";
compatible = "socionext,uniphier-sld3-ref", "socionext,uniphier-sld3";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x20000000
- 0xc0000000 0x20000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -72,6 +66,12 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
};
+
+ memory@8000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000
+ 0xc0000000 0x20000000>;
+ };
};
&ethsc {
diff --git a/arch/arm/boot/dts/uniphier-sld3.dtsi b/arch/arm/boot/dts/uniphier-sld3.dtsi
index 9fad6bd2db8a..01d77edac01f 100644
--- a/arch/arm/boot/dts/uniphier-sld3.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld3.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-sld3";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-sld8-ref.dts b/arch/arm/boot/dts/uniphier-sld8-ref.dts
index a8291f988066..737d276349fd 100644
--- a/arch/arm/boot/dts/uniphier-sld8-ref.dts
+++ b/arch/arm/boot/dts/uniphier-sld8-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier sLD8 Reference Board";
compatible = "socionext,uniphier-sld8-ref", "socionext,uniphier-sld8";
- memory {
- device_type = "memory";
- reg = <0x80000000 0x20000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -71,6 +66,11 @@
i2c2 = &i2c2;
i2c3 = &i2c3;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
};
&ethsc {
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index b2c980ead7f0..eb06fdc04b02 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -43,10 +43,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/include/ "skeleton.dtsi"
-
/ {
compatible = "socionext,uniphier-sld8";
+ #address-cells = <1>;
+ #size-cells = <1>;
cpus {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-support-card.dtsi b/arch/arm/boot/dts/uniphier-support-card.dtsi
index 51ecc9b9c0ce..f61dfec2807f 100644
--- a/arch/arm/boot/dts/uniphier-support-card.dtsi
+++ b/arch/arm/boot/dts/uniphier-support-card.dtsi
@@ -1,7 +1,8 @@
/*
* Device Tree Source for UniPhier Support Card (Expansion Board)
*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ * Copyright (C) 2015-2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -46,7 +47,7 @@
status = "okay";
ranges = <1 0x00000000 0x42000000 0x02000000>;
- support_card: support_card {
+ support_card: support_card@1,1f00000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 3086efacd00e..35714ff6f467 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -71,7 +71,7 @@
#size-cells = <1>;
ranges = <0 3 0 0x200000>;
- v2m_sysreg: sysreg@010000 {
+ v2m_sysreg: sysreg@10000 {
compatible = "arm,vexpress-sysreg";
reg = <0x010000 0x1000>;
@@ -94,7 +94,7 @@
};
};
- v2m_sysctl: sysctl@020000 {
+ v2m_sysctl: sysctl@20000 {
compatible = "arm,sp810", "arm,primecell";
reg = <0x020000 0x1000>;
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
@@ -106,7 +106,7 @@
};
/* PCI-E I2C bus */
- v2m_i2c_pcie: i2c@030000 {
+ v2m_i2c_pcie: i2c@30000 {
compatible = "arm,versatile-i2c";
reg = <0x030000 0x1000>;
@@ -119,7 +119,7 @@
};
};
- aaci@040000 {
+ aaci@40000 {
compatible = "arm,pl041", "arm,primecell";
reg = <0x040000 0x1000>;
interrupts = <11>;
@@ -127,7 +127,7 @@
clock-names = "apb_pclk";
};
- mmci@050000 {
+ mmci@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
interrupts = <9 10>;
@@ -139,7 +139,7 @@
clock-names = "mclk", "apb_pclk";
};
- kmi@060000 {
+ kmi@60000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x060000 0x1000>;
interrupts = <12>;
@@ -147,7 +147,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- kmi@070000 {
+ kmi@70000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x070000 0x1000>;
interrupts = <13>;
@@ -155,7 +155,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- v2m_serial0: uart@090000 {
+ v2m_serial0: uart@90000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x090000 0x1000>;
interrupts = <5>;
@@ -163,7 +163,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial1: uart@0a0000 {
+ v2m_serial1: uart@a0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0a0000 0x1000>;
interrupts = <6>;
@@ -171,7 +171,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial2: uart@0b0000 {
+ v2m_serial2: uart@b0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0b0000 0x1000>;
interrupts = <7>;
@@ -179,7 +179,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial3: uart@0c0000 {
+ v2m_serial3: uart@c0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0c0000 0x1000>;
interrupts = <8>;
@@ -187,7 +187,7 @@
clock-names = "uartclk", "apb_pclk";
};
- wdt@0f0000 {
+ wdt@f0000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f0000 0x1000>;
interrupts = <0>;
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index c6393d3f1719..1b6f6393be93 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -70,7 +70,7 @@
#size-cells = <1>;
ranges = <0 7 0 0x20000>;
- v2m_sysreg: sysreg@00000 {
+ v2m_sysreg: sysreg@0 {
compatible = "arm,vexpress-sysreg";
reg = <0x00000 0x1000>;
@@ -93,7 +93,7 @@
};
};
- v2m_sysctl: sysctl@01000 {
+ v2m_sysctl: sysctl@1000 {
compatible = "arm,sp810", "arm,primecell";
reg = <0x01000 0x1000>;
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
@@ -105,7 +105,7 @@
};
/* PCI-E I2C bus */
- v2m_i2c_pcie: i2c@02000 {
+ v2m_i2c_pcie: i2c@2000 {
compatible = "arm,versatile-i2c";
reg = <0x02000 0x1000>;
@@ -118,7 +118,7 @@
};
};
- aaci@04000 {
+ aaci@4000 {
compatible = "arm,pl041", "arm,primecell";
reg = <0x04000 0x1000>;
interrupts = <11>;
@@ -126,7 +126,7 @@
clock-names = "apb_pclk";
};
- mmci@05000 {
+ mmci@5000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x05000 0x1000>;
interrupts = <9 10>;
@@ -138,7 +138,7 @@
clock-names = "mclk", "apb_pclk";
};
- kmi@06000 {
+ kmi@6000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x06000 0x1000>;
interrupts = <12>;
@@ -146,7 +146,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- kmi@07000 {
+ kmi@7000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x07000 0x1000>;
interrupts = <13>;
@@ -154,7 +154,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- v2m_serial0: uart@09000 {
+ v2m_serial0: uart@9000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x09000 0x1000>;
interrupts = <5>;
@@ -162,7 +162,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial1: uart@0a000 {
+ v2m_serial1: uart@a000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0a000 0x1000>;
interrupts = <6>;
@@ -170,7 +170,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial2: uart@0b000 {
+ v2m_serial2: uart@b000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0b000 0x1000>;
interrupts = <7>;
@@ -178,7 +178,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial3: uart@0c000 {
+ v2m_serial3: uart@c000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0c000 0x1000>;
interrupts = <8>;
@@ -186,7 +186,7 @@
clock-names = "uartclk", "apb_pclk";
};
- wdt@0f000 {
+ wdt@f000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f000 0x1000>;
interrupts = <0>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 15f4fd3f4695..0c8de0ca73ee 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -220,7 +220,7 @@
};
};
- smb@08000000 {
+ smb@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index bd107c5a0226..65ecf206388c 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -385,7 +385,7 @@
};
};
- etb@0,20010000 {
+ etb@20010000 {
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0 0x20010000 0 0x1000>;
@@ -399,7 +399,7 @@
};
};
- tpiu@0,20030000 {
+ tpiu@20030000 {
compatible = "arm,coresight-tpiu", "arm,primecell";
reg = <0 0x20030000 0 0x1000>;
@@ -449,7 +449,7 @@
};
};
- funnel@0,20040000 {
+ funnel@20040000 {
compatible = "arm,coresight-funnel", "arm,primecell";
reg = <0 0x20040000 0 0x1000>;
@@ -513,7 +513,7 @@
};
};
- ptm@0,2201c000 {
+ ptm@2201c000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0 0x2201c000 0 0x1000>;
@@ -527,7 +527,7 @@
};
};
- ptm@0,2201d000 {
+ ptm@2201d000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0 0x2201d000 0 0x1000>;
@@ -541,7 +541,7 @@
};
};
- etm@0,2203c000 {
+ etm@2203c000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0 0x2203c000 0 0x1000>;
@@ -555,7 +555,7 @@
};
};
- etm@0,2203d000 {
+ etm@2203d000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0 0x2203d000 0 0x1000>;
@@ -569,7 +569,7 @@
};
};
- etm@0,2203e000 {
+ etm@2203e000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0 0x2203e000 0 0x1000>;
@@ -583,7 +583,7 @@
};
};
- smb@08000000 {
+ smb@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 1acecaf4b13d..6e69b8e6c1a7 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -190,7 +190,7 @@
};
};
- smb@08000000 {
+ smb@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index b608a03ee02f..c9305b58afc2 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -300,7 +300,7 @@
};
};
- smb@04000000 {
+ smb@4000000 {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 7940408838df..37f95427616f 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -239,7 +239,7 @@
#size-cells = <0>;
reg = <4>;
- switch2: switch2@0 {
+ switch2: switch@0 {
compatible = "marvell,mv88e6085";
#address-cells = <1>;
#size-cells = <0>;
@@ -459,18 +459,6 @@
>;
};
- pinctrl_gpio_switch0: pinctrl-gpio-switch0 {
- fsl,pins = <
- VF610_PAD_PTB5__GPIO_27 0x219d
- >;
- };
-
- pinctrl_gpio_switch1: pinctrl-gpio-switch1 {
- fsl,pins = <
- VF610_PAD_PTB4__GPIO_26 0x219d
- >;
- };
-
pinctrl_mdio_mux: pinctrl-mdio-mux {
fsl,pins = <
VF610_PAD_PTA18__GPIO_8 0x31c2
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
index 6a45bd24ffe6..db3b408ea55a 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
@@ -67,11 +67,17 @@
switch0: switch@0 {
compatible = "marvell,mv88e6190";
+ pinctrl-0 = <&pinctrl_gpio_switch0>;
+ pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
dsa,member = <0 0>;
eeprom-length = <512>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
ports {
#address-cells = <1>;
@@ -91,21 +97,25 @@
port@1 {
reg = <1>;
label = "lan1";
+ phy-handle = <&switch0phy1>;
};
port@2 {
reg = <2>;
label = "lan2";
+ phy-handle = <&switch0phy2>;
};
port@3 {
reg = <3>;
label = "lan3";
+ phy-handle = <&switch0phy3>;
};
port@4 {
reg = <4>;
label = "lan4";
+ phy-handle = <&switch0phy4>;
};
switch0port10: port@10 {
@@ -115,6 +125,35 @@
link = <&switch1port10>;
};
};
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0phy1: switch0phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch0>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch0phy2: switch0phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch0phy3: switch0phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch0>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch0phy4: switch0phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
};
};
@@ -125,11 +164,17 @@
switch1: switch@0 {
compatible = "marvell,mv88e6190";
+ pinctrl-0 = <&pinctrl_gpio_switch1>;
+ pinctrl-names = "default";
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
dsa,member = <0 1>;
eeprom-length = <512>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
ports {
#address-cells = <1>;
@@ -138,21 +183,25 @@
port@1 {
reg = <1>;
label = "lan5";
+ phy-handle = <&switch1phy1>;
};
port@2 {
reg = <2>;
label = "lan6";
+ phy-handle = <&switch1phy2>;
};
port@3 {
reg = <3>;
label = "lan7";
+ phy-handle = <&switch1phy3>;
};
port@4 {
reg = <4>;
label = "lan8";
+ phy-handle = <&switch1phy4>;
};
@@ -163,6 +212,34 @@
link = <&switch0port10>;
};
};
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch1phy1: switch1phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch1phy2: switch1phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch1phy3: switch1phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switch1phy4: switch1phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/vf610-zii-dev.dtsi b/arch/arm/boot/dts/vf610-zii-dev.dtsi
index ca9e1bc35e45..6b58d3a97992 100644
--- a/arch/arm/boot/dts/vf610-zii-dev.dtsi
+++ b/arch/arm/boot/dts/vf610-zii-dev.dtsi
@@ -296,6 +296,18 @@
>;
};
+ pinctrl_gpio_switch0: pinctrl-gpio-switch0 {
+ fsl,pins = <
+ VF610_PAD_PTB5__GPIO_27 0x219d
+ >;
+ };
+
+ pinctrl_gpio_switch1: pinctrl-gpio-switch1 {
+ fsl,pins = <
+ VF610_PAD_PTB4__GPIO_26 0x219d
+ >;
+ };
+
pinctrl_i2c_mux_reset: pinctrl-i2c-mux-reset {
fsl,pins = <
VF610_PAD_PTE14__GPIO_119 0x31c2
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 9353184d730d..1181053e3ade 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -1,6 +1,3 @@
-config ICST
- bool
-
config SA1111
bool
select DMABOUNCE if !ARCH_PXA
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 27f23b15b1ea..29fdf6a3601d 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,7 +4,6 @@
obj-y += firmware.o
-obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
deleted file mode 100644
index d7ed252708c5..000000000000
--- a/arch/arm/common/icst.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * linux/arch/arm/common/icst307.c
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST307
- * clock generators. See http://www.idt.com/ for more information
- * on these devices.
- *
- * This is an almost identical implementation to the ICST525 clock generator.
- * The s2div and idx2s files are different
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <asm/div64.h>
-#include <asm/hardware/icst.h>
-
-/*
- * Divisors for each OD setting.
- */
-const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
-const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
-EXPORT_SYMBOL(icst307_s2div);
-EXPORT_SYMBOL(icst525_s2div);
-
-unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
-{
- u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
- u32 divisor = (vco.r + 2) * p->s2div[vco.s];
-
- do_div(dividend, divisor);
- return (unsigned long)dividend;
-}
-
-EXPORT_SYMBOL(icst_hz);
-
-/*
- * Ascending divisor S values.
- */
-const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
-const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
-EXPORT_SYMBOL(icst307_idx2s);
-EXPORT_SYMBOL(icst525_idx2s);
-
-struct icst_vco
-icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
-{
- struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
- unsigned long f;
- unsigned int i = 0, rd, best = (unsigned int)-1;
-
- /*
- * First, find the PLL output divisor such
- * that the PLL output is within spec.
- */
- do {
- f = freq * p->s2div[p->idx2s[i]];
-
- if (f > p->vco_min && f <= p->vco_max)
- break;
- i++;
- } while (i < 8);
-
- if (i >= 8)
- return vco;
-
- vco.s = p->idx2s[i];
-
- /*
- * Now find the closest divisor combination
- * which gives a PLL output of 'f'.
- */
- for (rd = p->rd_min; rd <= p->rd_max; rd++) {
- unsigned long fref_div, f_pll;
- unsigned int vd;
- int f_diff;
-
- fref_div = (2 * p->ref) / rd;
-
- vd = (f + fref_div / 2) / fref_div;
- if (vd < p->vd_min || vd > p->vd_max)
- continue;
-
- f_pll = fref_div * vd;
- f_diff = f_pll - f;
- if (f_diff < 0)
- f_diff = -f_diff;
-
- if ((unsigned)f_diff < best) {
- vco.v = vd - 8;
- vco.r = rd - 2;
- if (f_diff == 0)
- break;
- best = f_diff;
- }
- }
-
- return vco;
-}
-
-EXPORT_SYMBOL(icst_hz_to_vco);
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index d25010e3a0bc..cfc2465e8b77 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -1,6 +1,6 @@
CONFIG_KERNEL_XZ=y
+# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_USELIB=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -8,39 +8,65 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_SHMEM is not set
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCC_PLUGINS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLOCK is not set
+# CONFIG_LBDAF is not set
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_ASPEED=y
CONFIG_MACH_ASPEED_G4=y
CONFIG_AEABI=y
-CONFIG_UACCESS_WITH_MEMCPY=y
+# CONFIG_CPU_SW_DOMAIN_PAN is not set
+# CONFIG_COMPACTION is not set
CONFIG_SECCOMP=y
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
CONFIG_NET_NCSI=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_ASPEED_SMC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_ASPEED_LPC_CTRL=y
+CONFIG_EEPROM_AT24=y
CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
@@ -63,9 +89,10 @@ CONFIG_FTGMAC100=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_BROADCOM_PHY=y
+CONFIG_REALTEK_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -81,35 +108,74 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_ASPEED_BT_IPMI_BMC=y
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_ASPEED=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_W1=y
+CONFIG_W1_MASTER_GPIO=y
+CONFIG_W1_SLAVE_THERM=y
+CONFIG_SENSORS_ASPEED=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LM75=y
+CONFIG_SENSORS_NCT7904=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_ADM1275=y
+CONFIG_SENSORS_LM25066=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_TMP421=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PCF8523=y
+CONFIG_RTC_DRV_RV8803=y
+CONFIG_MAILBOX=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_IIO=y
+CONFIG_ASPEED_ADC=y
+CONFIG_BMP280=y
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
+CONFIG_OVERLAY_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_PAGE_POISONING=y
-CONFIG_DEBUG_KMEMLEAK=y
-CONFIG_DEBUG_SHIRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=-1
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_STACKTRACE=y
# CONFIG_FTRACE is not set
-CONFIG_MEMTEST=y
-CONFIG_UBSAN=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_LL_UART_8250=y
-CONFIG_DEBUG_UART_PHYS=0x1e784000
-CONFIG_DEBUG_UART_VIRT=0xe8784000
-CONFIG_EARLY_PRINTK=y
-CONFIG_STRICT_MODULE_RWX=y
-CONFIG_STRICT_KERNEL_RWX=y
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 5f660b02abd9..3c20d93de389 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -1,6 +1,6 @@
CONFIG_KERNEL_XZ=y
+# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_USELIB=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -8,27 +8,28 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
-# CONFIG_SHMEM is not set
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCC_PLUGINS=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLOCK is not set
+# CONFIG_LBDAF is not set
CONFIG_ARCH_MULTI_V6=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_ASPEED=y
CONFIG_MACH_ASPEED_G5=y
+# CONFIG_CACHE_L2X0 is not set
CONFIG_VMSPLIT_2G=y
CONFIG_AEABI=y
-CONFIG_UACCESS_WITH_MEMCPY=y
+# CONFIG_CPU_SW_DOMAIN_PAN is not set
+# CONFIG_COMPACTION is not set
CONFIG_SECCOMP=y
# CONFIG_ATAGS is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
@@ -38,15 +39,37 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_KEXEC=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
CONFIG_NET_NCSI=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_ASPEED_SMC=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_ASPEED_LPC_CTRL=y
+CONFIG_EEPROM_AT24=y
CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_ARC is not set
@@ -69,9 +92,10 @@ CONFIG_FTGMAC100=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_BROADCOM_PHY=y
+CONFIG_REALTEK_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -87,37 +111,74 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_ASPEED_BT_IPMI_BMC=y
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA9541=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_ASPEED=y
-# CONFIG_USB_SUPPORT is not set
+CONFIG_W1=y
+CONFIG_W1_MASTER_GPIO=y
+CONFIG_W1_SLAVE_THERM=y
+CONFIG_SENSORS_ASPEED=y
+CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_LM75=y
+CONFIG_SENSORS_NCT7904=y
+CONFIG_PMBUS=y
+CONFIG_SENSORS_ADM1275=y
+CONFIG_SENSORS_LM25066=y
+CONFIG_SENSORS_UCD9000=y
+CONFIG_SENSORS_TMP421=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_PCF8523=y
+CONFIG_RTC_DRV_RV8803=y
+CONFIG_MAILBOX=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_IIO=y
+CONFIG_ASPEED_ADC=y
+CONFIG_BMP280=y
CONFIG_FIRMWARE_MEMMAP=y
CONFIG_FANOTIFY=y
+CONFIG_OVERLAY_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_UBIFS_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_STRIP_ASM_SYMS=y
-CONFIG_PAGE_POISONING=y
-CONFIG_DEBUG_KMEMLEAK=y
-CONFIG_DEBUG_SHIRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=-1
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_STACKTRACE=y
# CONFIG_FTRACE is not set
-CONFIG_MEMTEST=y
-CONFIG_UBSAN=y
-CONFIG_UBSAN_ALIGNMENT=y
CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_DEBUG_LL_UART_8250=y
-CONFIG_DEBUG_UART_PHYS=0x1e784000
-CONFIG_DEBUG_UART_VIRT=0xe8784000
-CONFIG_EARLY_PRINTK=y
-CONFIG_STRICT_MODULE_RWX=y
-CONFIG_STRICT_KERNEL_RWX=y
-CONFIG_CRYPTO_ECHAINIV=y
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 4b89f4e6e849..3ba8cd3211f8 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -1,6 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
@@ -32,6 +31,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
CONFIG_KSM=y
CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
@@ -52,6 +52,7 @@ CONFIG_MAC80211=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
@@ -62,7 +63,6 @@ CONFIG_USB_NET_SMSC95XX=y
CONFIG_ZD1211RW=y
CONFIG_INPUT_EVDEV=y
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_TTY_PRINTK=y
@@ -92,6 +92,7 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_IPROC=y
+CONFIG_MMC_BCM2835=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c8663eac9b1b..67db82999c06 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -74,12 +74,10 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_EEPROM_AT24=y
-CONFIG_IDE=m
-CONFIG_BLK_DEV_PALMCHIP_BK3710=m
-CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_ATA=m
CONFIG_AHCI_DA850=m
+CONFIG_PATA_BK3710=m
CONFIG_NETDEVICES=y
CONFIG_NETCONSOLE=y
CONFIG_TUN=m
@@ -121,6 +119,7 @@ CONFIG_PINCTRL_DA850_PUPD=m
CONFIG_PINCTRL_SINGLE=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_GPIO=y
CONFIG_WATCHDOG=y
@@ -133,9 +132,11 @@ CONFIG_REGULATOR_TPS6507X=y
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY=m
CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE=m
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_TVP514X=m
+CONFIG_VIDEO_ADV7343=m
CONFIG_DRM=m
CONFIG_DRM_TILCDC=m
CONFIG_DRM_DUMB_VGA_DAC=m
@@ -204,12 +205,10 @@ CONFIG_MEMORY=y
CONFIG_TI_AEMIF=m
CONFIG_DA8XX_DDRCTL=y
CONFIG_IIO=m
-CONFIG_IIO_BUFFER=y
CONFIG_IIO_BUFFER_CB=m
-CONFIG_IIO_KFIFO_BUF=m
-CONFIG_IIO_TRIGGER=y
CONFIG_IIO_SW_DEVICE=m
CONFIG_IIO_SW_TRIGGER=m
+CONFIG_TI_ADS7950=m
CONFIG_IIO_HRTIMER_TRIGGER=m
CONFIG_IIO_SYSFS_TRIGGER=m
CONFIG_PWM=y
diff --git a/arch/arm/configs/dram_0xd0000000.config b/arch/arm/configs/dram_0xd0000000.config
new file mode 100644
index 000000000000..61ba7045f8a1
--- /dev/null
+++ b/arch/arm/configs/dram_0xd0000000.config
@@ -0,0 +1 @@
+CONFIG_DRAM_BASE=0xd0000000
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 742baf067e1c..6dc661c4a2c1 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -53,7 +53,7 @@ CONFIG_RFKILL_REGULATOR=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_CMA_SIZE_MBYTES=96
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -240,7 +240,7 @@ CONFIG_PWM=y
CONFIG_PWM_SAMSUNG=y
CONFIG_PHY_EXYNOS5250_SATA=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -255,6 +255,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index eaba3b165d72..bb6fa568b620 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -143,6 +143,7 @@ CONFIG_SMSC911X=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_AT803X_PHY=y
CONFIG_MICREL_PHY=y
+CONFIG_SMSC_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
@@ -152,7 +153,6 @@ CONFIG_BRCMFMAC=m
CONFIG_WL12XX=m
CONFIG_WLCORE_SDIO=m
# CONFIG_WILINK_PLATFORM_DATA is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
@@ -165,6 +165,7 @@ CONFIG_TOUCHSCREEN_ADS7846=y
CONFIG_TOUCHSCREEN_EGALAX=y
CONFIG_TOUCHSCREEN_IMX6UL_TSC=y
CONFIG_TOUCHSCREEN_EDT_FT5X06=y
+CONFIG_TOUCHSCREEN_MAX11801=y
CONFIG_TOUCHSCREEN_MC13783=y
CONFIG_TOUCHSCREEN_TSC2004=y
CONFIG_TOUCHSCREEN_TSC2007=y
@@ -173,6 +174,7 @@ CONFIG_TOUCHSCREEN_SX8654=y
CONFIG_TOUCHSCREEN_COLIBRI_VF50=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MMA8450=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_SERIO_SERPORT=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_IMX=y
@@ -376,7 +378,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_PROVE_LOCKING=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6aa7be191f1a..2685e03600b1 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -195,7 +195,7 @@ CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_OMAP_OCP2SCP=y
CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=m
+CONFIG_SUNXI_RSB=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -482,9 +482,10 @@ CONFIG_MFD_AS3722=y
CONFIG_MFD_ATMEL_FLEXCOM=y
CONFIG_MFD_ATMEL_HLCDC=m
CONFIG_MFD_BCM590XX=y
+CONFIG_MFD_AC100=y
CONFIG_MFD_AXP20X=y
-CONFIG_MFD_AXP20X_I2C=m
-CONFIG_MFD_AXP20X_RSB=m
+CONFIG_MFD_AXP20X_I2C=y
+CONFIG_MFD_AXP20X_RSB=y
CONFIG_MFD_CROS_EC=m
CONFIG_MFD_CROS_EC_I2C=m
CONFIG_MFD_CROS_EC_SPI=m
@@ -513,7 +514,7 @@ CONFIG_REGULATOR_ACT8865=y
CONFIG_REGULATOR_ANATOP=y
CONFIG_REGULATOR_AS3711=y
CONFIG_REGULATOR_AS3722=y
-CONFIG_REGULATOR_AXP20X=m
+CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_BCM590XX=y
CONFIG_REGULATOR_DA9210=y
CONFIG_REGULATOR_FAN53555=y
@@ -593,10 +594,10 @@ CONFIG_DRM_EXYNOS_DPI=y
CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_ROCKCHIP=m
-CONFIG_ROCKCHIP_ANALOGIX_DP=m
-CONFIG_ROCKCHIP_DW_HDMI=m
-CONFIG_ROCKCHIP_DW_MIPI_DSI=m
-CONFIG_ROCKCHIP_INNO_HDMI=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_ATMEL_HLCDC=m
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_RCAR_HDMI=y
@@ -730,6 +731,7 @@ CONFIG_MMC_DW_EXYNOS=y
CONFIG_MMC_DW_ROCKCHIP=y
CONFIG_MMC_SH_MMCIF=y
CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_CLASS_FLASH=m
@@ -751,6 +753,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_HIGHBANK_MC=y
CONFIG_EDAC_HIGHBANK_L2=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AC100=y
CONFIG_RTC_DRV_AS3722=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_HYM8563=m
@@ -883,7 +886,7 @@ CONFIG_TI_PIPE3=y
CONFIG_PHY_BERLIN_USB=y
CONFIG_PHY_BERLIN_SATA=y
CONFIG_PHY_ROCKCHIP_DP=m
-CONFIG_PHY_ROCKCHIP_USB=m
+CONFIG_PHY_ROCKCHIP_USB=y
CONFIG_PHY_QCOM_APQ8064_SATA=m
CONFIG_PHY_MIPHY28LP=y
CONFIG_PHY_RCAR_GEN2=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index decd388d613d..a120ae816260 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -64,6 +64,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPUFREQ_DT=m
+CONFIG_ARM_TI_CPUFREQ=y
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
CONFIG_CPU_IDLE=y
CONFIG_BINFMT_MISC=y
@@ -141,6 +142,7 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ATA=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_DM816=m
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
@@ -167,8 +169,18 @@ CONFIG_TI_CPTS=y
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_AT803X_PHY=y
+CONFIG_DP83848_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_SMSC_PHY=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_SMSC75XX=m
CONFIG_USB_NET_SMSC95XX=m
@@ -176,6 +188,7 @@ CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
+CONFIG_USB_NET_QMI_WWAN=m
CONFIG_USB_CDC_PHONET=m
CONFIG_LIBERTAS=m
CONFIG_LIBERTAS_USB=m
@@ -199,6 +212,7 @@ CONFIG_KEYBOARD_TWL4030=m
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=m
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_TOUCHSCREEN_EDT_FT5X06=m
CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
CONFIG_TOUCHSCREEN_PIXCIR=m
@@ -206,6 +220,7 @@ CONFIG_TOUCHSCREEN_TSC2004=m
CONFIG_TOUCHSCREEN_TSC2005=m
CONFIG_TOUCHSCREEN_TSC2007=m
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_CPCAP_PWRBUTTON=m
CONFIG_INPUT_TPS65218_PWRBUTTON=m
CONFIG_INPUT_TWL4030_PWRBUTTON=m
CONFIG_INPUT_PALMAS_PWRBUTTON=m
@@ -241,6 +256,7 @@ CONFIG_W1=m
CONFIG_HDQ_MASTER_OMAP=m
CONFIG_POWER_AVS=y
CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
CONFIG_BATTERY_BQ27XXX=m
CONFIG_CHARGER_ISP1704=m
CONFIG_CHARGER_TWL4030=m
@@ -263,6 +279,7 @@ CONFIG_DRA752_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_OMAP_WATCHDOG=m
CONFIG_TWL4030_WATCHDOG=m
+CONFIG_MFD_CPCAP=y
CONFIG_MFD_TI_AM335X_TSCADC=m
CONFIG_MFD_PALMAS=y
CONFIG_MFD_TPS65217=y
@@ -270,7 +287,9 @@ CONFIG_MFD_TI_LP873X=y
CONFIG_MFD_TPS65218=y
CONFIG_MFD_TPS65910=y
CONFIG_TWL6040_CORE=y
+CONFIG_REGULATOR_CPCAP=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_LM363X=m
CONFIG_REGULATOR_LP872X=y
CONFIG_REGULATOR_LP873X=y
CONFIG_REGULATOR_PALMAS=y
@@ -339,6 +358,7 @@ CONFIG_SND_SOC=m
CONFIG_SND_EDMA_SOC=m
CONFIG_SND_AM33XX_SOC_EVM=m
CONFIG_SND_OMAP_SOC=m
+CONFIG_SND_OMAP_SOC_HDMI_AUDIO=m
CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
@@ -354,6 +374,7 @@ CONFIG_USB_XHCI_HCD=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_WDM=m
+CONFIG_USB_ACM=m
CONFIG_USB_STORAGE=m
CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_TUSB6010=m
@@ -402,6 +423,7 @@ CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CPCAP=m
CONFIG_LEDS_GPIO=m
CONFIG_LEDS_PCA963X=m
CONFIG_LEDS_PWM=m
@@ -420,6 +442,7 @@ CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=m
CONFIG_RTC_DRV_PALMAS=m
CONFIG_RTC_DRV_OMAP=m
+CONFIG_RTC_DRV_CPCAP=m
CONFIG_DMADEVICES=y
CONFIG_DMA_OMAP=y
CONFIG_TI_EDMA=y
@@ -432,6 +455,7 @@ CONFIG_IIO=m
CONFIG_IIO_SW_DEVICE=m
CONFIG_IIO_SW_TRIGGER=m
CONFIG_IIO_ST_ACCEL_3AXIS=m
+CONFIG_CPCAP_ADC=m
CONFIG_TI_AM335X_ADC=m
CONFIG_BMP280=m
CONFIG_PWM=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 4ffdd607205d..07666a7a9de5 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -190,11 +190,18 @@ CONFIG_MDM_LCC_9615=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_WCNSS_PIL=y
CONFIG_QCOM_GSBI=y
CONFIG_QCOM_PM=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD=y
CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_WCNSS_CTRL=y
CONFIG_IIO=y
CONFIG_IIO_BUFFER_CB=y
CONFIG_IIO_SW_TRIGGER=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 030264c98eec..2620ce790db0 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -71,6 +71,7 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
+CONFIG_ALTERA_TSE=m
CONFIG_E1000E=m
CONFIG_IGB=m
CONFIG_IXGBE=m
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 03437f8f9ad1..a0975386a96c 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -20,6 +20,7 @@ CONFIG_EMBEDDED=y
# CONFIG_MMU is not set
CONFIG_ARM_SINGLE_ARMV7M=y
CONFIG_ARCH_STM32=y
+CONFIG_CPU_V7M_NUM_IRQ=240
CONFIG_SET_MEM_PARAM=y
CONFIG_DRAM_BASE=0x90000000
CONFIG_FLASH_MEM_BASE=0x08000000
@@ -47,6 +48,9 @@ CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_STM32=y
CONFIG_SERIAL_STM32_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_STM32F4=y
# CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index da92c25eb7cc..5cd5dd70bc83 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -87,6 +87,7 @@ CONFIG_THERMAL_OF=y
CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
+CONFIG_MFD_AC100=y
CONFIG_MFD_AXP20X=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_MFD_AXP20X_RSB=y
@@ -129,6 +130,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_INTF_SYSFS is not set
# CONFIG_RTC_INTF_PROC is not set
+CONFIG_RTC_DRV_AC100=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_RTC_DRV_SUNXI=y
CONFIG_DMADEVICES=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 02454fa15d2c..d69bebf697e7 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -478,26 +478,6 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
"r9","r10","lr","memory" )
-#ifdef CONFIG_MMU
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
-#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
-#endif
-
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
diff --git a/arch/arm/include/asm/cpufeature.h b/arch/arm/include/asm/cpufeature.h
new file mode 100644
index 000000000000..6d425191d01d
--- /dev/null
+++ b/arch/arm/include/asm/cpufeature.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <linux/log2.h>
+#include <asm/hwcap.h>
+
+/*
+ * Due to the fact that ELF_HWCAP is a 32-bit type on ARM, and given the number
+ * of optional CPU features it defines, ARM's CPU hardware capability bits have
+ * been distributed over separate elf_hwcap and elf_hwcap2 variables, each of
+ * which covers a subset of the available CPU features.
+ *
+ * Currently, only a few of those are suitable for automatic module loading
+ * (which is the primary use case of this facility) and those happen to be all
+ * covered by HWCAP2. So let's only cover those via the cpu_feature()
+ * convenience macro for now (which is used by module_cpu_feature_match()).
+ * However, all capabilities are exposed via the modalias, and can be matched
+ * using an explicit MODULE_DEVICE_TABLE() that uses __hwcap_feature() directly.
+ */
+#define MAX_CPU_FEATURES 64
+#define __hwcap_feature(x) ilog2(HWCAP_ ## x)
+#define __hwcap2_feature(x) (32 + ilog2(HWCAP2_ ## x))
+#define cpu_feature(x) __hwcap2_feature(x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return num < 32 ? elf_hwcap & BIT(num) : elf_hwcap2 & BIT(num - 32);
+}
+
+#endif
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 220ba207be91..36ec9c8f6e16 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -16,6 +16,9 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
+#ifdef CONFIG_XEN
+ const struct dma_map_ops *dev_dma_ops;
+#endif
bool dma_coherent;
};
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 716656925975..680d3f3889e7 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -16,19 +16,9 @@
extern const struct dma_map_ops arm_dma_ops;
extern const struct dma_map_ops arm_coherent_dma_ops;
-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
-{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
- return &arm_dma_ops;
-}
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(NULL);
+ return &arm_dma_ops;
}
#define HAVE_ARCH_DMA_SUPPORTED 1
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 5c17d2dec777..8f967d1373f6 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -41,7 +41,7 @@ static const enum fixed_addresses __end_of_fixed_addresses =
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
-#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+#define FIXMAP_PAGE_NORMAL (pgprot_kernel | L_PTE_XN)
#define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY)
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h
deleted file mode 100644
index 794220b087d2..000000000000
--- a/arch/arm/include/asm/hardware/icst.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/icst.h
- *
- * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Support functions for calculating clocks/divisors for the ICST
- * clock generators. See http://www.idt.com/ for more information
- * on these devices.
- */
-#ifndef ASMARM_HARDWARE_ICST_H
-#define ASMARM_HARDWARE_ICST_H
-
-struct icst_params {
- unsigned long ref;
- unsigned long vco_max; /* inclusive */
- unsigned long vco_min; /* exclusive */
- unsigned short vd_min; /* inclusive */
- unsigned short vd_max; /* inclusive */
- unsigned char rd_min; /* inclusive */
- unsigned char rd_max; /* inclusive */
- const unsigned char *s2div; /* chip specific s2div array */
- const unsigned char *idx2s; /* chip specific idx2s array */
-};
-
-struct icst_vco {
- unsigned short v;
- unsigned char r;
- unsigned char s;
-};
-
-unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
-struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
-
-/*
- * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
- * This frequency is pre-output divider.
- */
-#define ICST307_VCO_MIN 6000000
-#define ICST307_VCO_MAX 200000000
-
-extern const unsigned char icst307_s2div[];
-extern const unsigned char icst307_idx2s[];
-
-/*
- * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
- * This frequency is pre-output divider.
- */
-#define ICST525_VCO_MIN 10000000
-#define ICST525_VCO_MAX_3V 200000000
-#define ICST525_VCO_MAX_5V 320000000
-
-extern const unsigned char icst525_s2div[];
-extern const unsigned char icst525_idx2s[];
-
-#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 42871fb8340e..2cfbc531f63b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -187,6 +187,16 @@ static inline void pci_ioremap_set_mem_type(int mem_type) {}
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification does not allow configuration write
+ * transactions to be posted. Add an arch specific
+ * pci_remap_cfgspace() definition that is implemented
+ * through strongly ordered memory mappings.
+ */
+#define pci_remap_cfgspace pci_remap_cfgspace
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size);
+/*
* Now, pick up the machine-defined IO definitions
*/
#ifdef CONFIG_NEED_MACH_IO_H
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 8ef05381984b..14d68a4d826f 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -33,7 +33,7 @@
#define ARM_EXCEPTION_IRQ 5
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
-
+#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
@@ -72,10 +72,11 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
-extern void __kvm_hyp_reset(unsigned long);
-
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u64 __vgic_v3_read_vmcr(void);
+extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 31ee468ce667..f0e66577ce05 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
#define KVM_HALT_POLL_NS_DEFAULT 500000
@@ -45,7 +44,7 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#endif
-#define KVM_REQ_VCPU_EXIT 8
+#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
@@ -270,12 +269,6 @@ static inline void __cpu_init_stage2(void)
kvm_call_hyp(__init_stage2_translation);
}
-static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
- phys_addr_t phys_idmap_start)
-{
- kvm_call_hyp((void *)virt_to_idmap(__kvm_hyp_reset), vector_ptr);
-}
-
static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
{
return 0;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 95f38dcd611d..fa6f2174276b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -56,7 +56,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 464748b9fd7d..ed2319663a1e 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -18,13 +18,18 @@ enum {
};
#endif
+struct mod_plt_sec {
+ struct elf32_shdr *plt;
+ int plt_count;
+};
+
struct mod_arch_specific {
#ifdef CONFIG_ARM_UNWIND
struct unwind_table *unwind[ARM_SEC_MAX];
#endif
#ifdef CONFIG_ARM_MODULE_PLTS
- struct elf32_shdr *plt;
- int plt_count;
+ struct mod_plt_sec core;
+ struct mod_plt_sec init;
#endif
};
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 057d381f4e57..396c92bcc0cf 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8877ad5ffe10..f2e1af45bd6f 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -43,7 +43,7 @@ extern struct processor {
/*
* Special stuff for a reset
*/
- void (*reset)(unsigned long addr) __attribute__((noreturn));
+ void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
/*
* Idle the processor
*/
@@ -88,7 +88,7 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
#endif
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
extern void cpu_do_suspend(void *);
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
new file mode 100644
index 000000000000..5aa4315abe91
--- /dev/null
+++ b/arch/arm/include/asm/set_memory.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 1999-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASMARM_SET_MEMORY_H
+#define _ASMARM_SET_MEMORY_H
+
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 6dae1956c74d..141144f333a2 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -53,7 +53,7 @@ static inline void sync_boot_mode(void)
}
void __hyp_set_vectors(unsigned long phys_vector_base);
-unsigned long __hyp_get_vectors(void);
+void __hyp_reset_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
#define sync_boot_mode()
@@ -94,6 +94,18 @@ extern char __hyp_text_start[];
extern char __hyp_text_end[];
#endif
+#else
+
+/* Only assembly code should need those */
+
+#define HVC_SET_VECTORS 0
+#define HVC_SOFT_RESTART 1
+#define HVC_RESET_VECTORS 2
+
+#define HVC_STUB_HCALL_NR 3
+
#endif /* __ASSEMBLY__ */
+#define HVC_STUB_ERR 0xbadca11
+
#endif /* ! VIRT_H */
diff --git a/arch/arm/include/debug/brcmstb.S b/arch/arm/include/debug/brcmstb.S
index 9113d7b33ae0..52aaed2b936f 100644
--- a/arch/arm/include/debug/brcmstb.S
+++ b/arch/arm/include/debug/brcmstb.S
@@ -22,7 +22,8 @@
#define UARTA_3390 REG_PHYS_ADDR(0x40a900)
#define UARTA_7250 REG_PHYS_ADDR(0x40b400)
-#define UARTA_7268 REG_PHYS_ADDR(0x40c000)
+#define UARTA_7260 REG_PHYS_ADDR(0x40c000)
+#define UARTA_7268 UARTA_7260
#define UARTA_7271 UARTA_7268
#define UARTA_7364 REG_PHYS_ADDR(0x40b000)
#define UARTA_7366 UARTA_7364
@@ -62,13 +63,14 @@
/* Chip specific detection starts here */
20: checkuart(\rp, \rv, 0x33900000, 3390)
21: checkuart(\rp, \rv, 0x72500000, 7250)
-22: checkuart(\rp, \rv, 0x72680000, 7268)
-23: checkuart(\rp, \rv, 0x72710000, 7271)
-24: checkuart(\rp, \rv, 0x73640000, 7364)
-25: checkuart(\rp, \rv, 0x73660000, 7366)
-26: checkuart(\rp, \rv, 0x07437100, 74371)
-27: checkuart(\rp, \rv, 0x74390000, 7439)
-28: checkuart(\rp, \rv, 0x74450000, 7445)
+22: checkuart(\rp, \rv, 0x72600000, 7260)
+23: checkuart(\rp, \rv, 0x72680000, 7268)
+24: checkuart(\rp, \rv, 0x72710000, 7271)
+25: checkuart(\rp, \rv, 0x73640000, 7364)
+26: checkuart(\rp, \rv, 0x73660000, 7366)
+27: checkuart(\rp, \rv, 0x07437100, 74371)
+28: checkuart(\rp, \rv, 0x74390000, 7439)
+29: checkuart(\rp, \rv, 0x74450000, 7445)
/* No valid UART found */
90: mov \rp, #0
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 6ebd3e6a1fd1..a88726359e5f 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,8 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
};
struct kvm_sync_regs {
+ /* Used with KVM_CAP_ARM_USER_IRQ */
+ __u64 device_irq_level;
};
struct kvm_arch_memory_slot {
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 2f0e07735d1d..b259956365a0 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Mark this as IO
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
void __init pci_map_io_early(unsigned long pfn)
{
struct map_desc pci_io_desc = {
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 3f1759411d51..833c991075a1 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -21,6 +21,7 @@
#include <asm/opcodes.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/set_memory.h>
#ifdef CONFIG_THUMB2_KERNEL
#define NOP 0xf85deb04 /* pop.w {lr} */
@@ -29,11 +30,6 @@
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
static int __ftrace_modify_code(void *data)
{
@@ -51,6 +47,12 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
+#ifdef CONFIG_OLD_MCOUNT
+#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
+#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
+
+#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
+
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return rec->arch.old_mcount ? OLD_NOP : NOP;
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 15d073ae5da2..ec7e7377d423 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -125,7 +125,7 @@ ENTRY(__hyp_stub_install_secondary)
* (see safe_svcmode_maskall).
*/
@ Now install the hypervisor stub:
- adr r7, __hyp_stub_vectors
+ W(adr) r7, __hyp_stub_vectors
mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
@ Disable all traps, so we don't get any nasty surprise
@@ -202,9 +202,23 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
- cmp r0, #-1
- mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
- mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
+ teq r0, #HVC_SET_VECTORS
+ bne 1f
+ mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
+ b __hyp_stub_exit
+
+1: teq r0, #HVC_SOFT_RESTART
+ bne 1f
+ bx r1
+
+1: teq r0, #HVC_RESET_VECTORS
+ beq __hyp_stub_exit
+
+ ldr r0, =HVC_STUB_ERR
+ __ERET
+
+__hyp_stub_exit:
+ mov r0, #0
__ERET
ENDPROC(__hyp_stub_do_trap)
@@ -230,15 +244,26 @@ ENDPROC(__hyp_stub_do_trap)
* so you will need to set that to something sensible at the new hypervisor's
* initialisation entry point.
*/
-ENTRY(__hyp_get_vectors)
- mov r0, #-1
-ENDPROC(__hyp_get_vectors)
- @ fall through
ENTRY(__hyp_set_vectors)
+ mov r1, r0
+ mov r0, #HVC_SET_VECTORS
__HVC(0)
ret lr
ENDPROC(__hyp_set_vectors)
+ENTRY(__hyp_soft_restart)
+ mov r1, r0
+ mov r0, #HVC_SOFT_RESTART
+ __HVC(0)
+ ret lr
+ENDPROC(__hyp_soft_restart)
+
+ENTRY(__hyp_reset_vectors)
+ mov r0, #HVC_RESET_VECTORS
+ __HVC(0)
+ ret lr
+ENDPROC(__hyp_reset_vectors)
+
#ifndef ZIMAGE
.align 2
.L__boot_cpu_mode_offset:
@@ -246,7 +271,7 @@ ENDPROC(__hyp_set_vectors)
#endif
.align 5
-__hyp_stub_vectors:
+ENTRY(__hyp_stub_vectors)
__hyp_stub_reset: W(b) .
__hyp_stub_und: W(b) .
__hyp_stub_svc: W(b) .
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 9232caee7060..1bb4c40a3135 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -269,7 +269,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
/*
* Register our undef instruction hooks with ARM undef core.
- * We regsiter a hook specifically looking for the KGB break inst
+ * We register a hook specifically looking for the KGB break inst
* and we handle the normal undef case within the do_undefinstr
* handler.
*/
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index b18c1ea56bed..15495887ca14 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
#include <asm/system_misc.h>
+#include <asm/set_memory.h>
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3a5cba90c971..3d0c2e4dda1d 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,9 +31,17 @@ struct plt_entries {
u32 lit[PLT_ENT_COUNT];
};
+static bool in_init(const struct module *mod, unsigned long loc)
+{
+ return loc - (u32)mod->init_layout.base < mod->init_layout.size;
+}
+
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
- struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+ struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+ &mod->arch.init;
+
+ struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
int idx = 0;
/*
@@ -41,9 +49,9 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
* relocations are sorted, this will be the last entry we allocated.
* (if one exists).
*/
- if (mod->arch.plt_count > 0) {
- plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
- idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+ if (pltsec->plt_count > 0) {
+ plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
+ idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
if (plt->lit[idx] == val)
return (u32)&plt->ldr[idx];
@@ -53,8 +61,8 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
plt++;
}
- mod->arch.plt_count++;
- BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+ pltsec->plt_count++;
+ BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
if (!idx)
/* Populate a new set of entries */
@@ -129,7 +137,7 @@ static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
/* Count how many PLT entries we may need */
static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
- const Elf32_Rel *rel, int num)
+ const Elf32_Rel *rel, int num, Elf32_Word dstidx)
{
unsigned int ret = 0;
const Elf32_Sym *s;
@@ -144,13 +152,17 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
case R_ARM_THM_JUMP24:
/*
* We only have to consider branch targets that resolve
- * to undefined symbols. This is not simply a heuristic,
- * it is a fundamental limitation, since the PLT itself
- * is part of the module, and needs to be within range
- * as well, so modules can never grow beyond that limit.
+ * to symbols that are defined in a different section.
+ * This is not simply a heuristic, it is a fundamental
+ * limitation, since there is no guaranteed way to emit
+ * PLT entries sufficiently close to the branch if the
+ * section size exceeds the range of a branch
+ * instruction. So ignore relocations against defined
+ * symbols if they live in the same section as the
+ * relocation target.
*/
s = syms + ELF32_R_SYM(rel[i].r_info);
- if (s->st_shndx != SHN_UNDEF)
+ if (s->st_shndx == dstidx)
break;
/*
@@ -161,7 +173,12 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
* So we need to support them, but there is no need to
* take them into consideration when trying to optimize
* this code. So let's only check for duplicates when
- * the addend is zero.
+ * the addend is zero. (Note that calls into the core
+ * module via init PLT entries could involve section
+ * relative symbol references with non-zero addends, for
+ * which we may end up emitting duplicates, but the init
+ * PLT is released along with the rest of the .init
+ * region as soon as module loading completes.)
*/
if (!is_zero_addend_relocation(base, rel + i) ||
!duplicate_rel(base, rel, i))
@@ -174,7 +191,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long plts = 0;
+ unsigned long core_plts = 0;
+ unsigned long init_plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
@@ -184,13 +202,15 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
*/
for (s = sechdrs; s < sechdrs_end; ++s) {
if (strcmp(".plt", secstrings + s->sh_name) == 0)
- mod->arch.plt = s;
+ mod->arch.core.plt = s;
+ else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
+ mod->arch.init.plt = s;
else if (s->sh_type == SHT_SYMTAB)
syms = (Elf32_Sym *)s->sh_addr;
}
- if (!mod->arch.plt) {
- pr_err("%s: module PLT section missing\n", mod->name);
+ if (!mod->arch.core.plt || !mod->arch.init.plt) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!syms) {
@@ -213,16 +233,29 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
/* sort by type and symbol index */
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
- plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
+ if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+ core_plts += count_plts(syms, dstsec->sh_addr, rels,
+ numrels, s->sh_info);
+ else
+ init_plts += count_plts(syms, dstsec->sh_addr, rels,
+ numrels, s->sh_info);
}
- mod->arch.plt->sh_type = SHT_NOBITS;
- mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
- mod->arch.plt_count = 0;
-
- pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
+ mod->arch.core.plt->sh_type = SHT_NOBITS;
+ mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
+ mod->arch.core.plt_count = 0;
+
+ mod->arch.init.plt->sh_type = SHT_NOBITS;
+ mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
+ mod->arch.init.plt_count = 0;
+
+ pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
+ mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
return 0;
}
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 05881e2b414c..eacb5c67f61e 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,3 +1,4 @@
SECTIONS {
.plt : { BYTE(0) }
+ .init.plt : { BYTE(0) }
}
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 3fa867a2aae6..3b2aa9a9fe26 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -12,10 +12,11 @@
#include <asm/cacheflush.h>
#include <asm/idmap.h>
+#include <asm/virt.h>
#include "reboot.h"
-typedef void (*phys_reset_t)(unsigned long);
+typedef void (*phys_reset_t)(unsigned long, bool);
/*
* Function pointers to optional machine specific functions
@@ -51,7 +52,9 @@ static void __soft_restart(void *addr)
/* Switch to the identity mapping. */
phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
- phys_reset((unsigned long)addr);
+
+ /* original stub should be restored by kvm */
+ phys_reset((unsigned long)addr, is_hyp_mode_available());
/* Should never get here. */
BUG();
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f4e54503afa9..32e1a9513dc7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -80,7 +80,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc);
-extern void early_paging_init(const struct machine_desc *);
+extern void early_mm_init(const struct machine_desc *);
extern void adjust_lowmem_bounds(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
@@ -1088,7 +1088,7 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
#ifdef CONFIG_MMU
- early_paging_init(mdesc);
+ early_mm_init(mdesc);
#endif
setup_dma_zone(mdesc);
xen_early_init();
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 314eb6abe1ff..8a31906bdc9b 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -53,7 +53,6 @@ __asm__(".arch_extension virt");
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
-static unsigned long hyp_default_vectors;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
@@ -209,9 +208,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
break;
@@ -230,6 +226,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = kvm->arch.vgic.msis_require_devid;
break;
+ case KVM_CAP_ARM_USER_IRQ:
+ /*
+ * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
+ * (bump this number if adding more devices)
+ */
+ r = 1;
+ break;
default:
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
break;
@@ -351,15 +354,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
kvm_arm_set_running_vcpu(vcpu);
+
+ kvm_vgic_load(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
- /*
- * The arch-generic KVM code expects the cpu field of a vcpu to be -1
- * if the vcpu is no longer assigned to a cpu. This is used for the
- * optimized make_all_cpus_request path.
- */
+ kvm_vgic_put(vcpu);
+
vcpu->cpu = -1;
kvm_arm_set_running_vcpu(NULL);
@@ -517,13 +519,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return ret;
}
- /*
- * Enable the arch timers only if we have an in-kernel VGIC
- * and it has been properly initialized, since we cannot handle
- * interrupts from the virtual timer with a userspace gic.
- */
- if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
- ret = kvm_timer_enable(vcpu);
+ ret = kvm_timer_enable(vcpu);
return ret;
}
@@ -633,16 +629,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* non-preemptible context.
*/
preempt_disable();
+
kvm_pmu_flush_hwstate(vcpu);
+
kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
local_irq_disable();
/*
- * Re-check atomic conditions
+ * If we have a singal pending, or need to notify a userspace
+ * irqchip about timer or PMU level changes, then we exit (and
+ * update the timer level state in kvm_timer_update_run
+ * below).
*/
- if (signal_pending(current)) {
+ if (signal_pending(current) ||
+ kvm_timer_should_notify_user(vcpu) ||
+ kvm_pmu_should_notify_user(vcpu)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
@@ -714,6 +717,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = handle_exit(vcpu, run, ret);
}
+ /* Tell userspace about in-kernel device output levels */
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
+ kvm_timer_update_run(vcpu);
+ kvm_pmu_update_run(vcpu);
+ }
+
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
return ret;
@@ -1112,8 +1121,16 @@ static void cpu_init_hyp_mode(void *dummy)
kvm_arm_init_debug();
}
+static void cpu_hyp_reset(void)
+{
+ if (!is_kernel_in_hyp_mode())
+ __hyp_reset_vectors();
+}
+
static void cpu_hyp_reinit(void)
{
+ cpu_hyp_reset();
+
if (is_kernel_in_hyp_mode()) {
/*
* __cpu_init_stage2() is safe to call even if the PM
@@ -1121,21 +1138,13 @@ static void cpu_hyp_reinit(void)
*/
__cpu_init_stage2();
} else {
- if (__hyp_get_vectors() == hyp_default_vectors)
- cpu_init_hyp_mode(NULL);
+ cpu_init_hyp_mode(NULL);
}
if (vgic_present)
kvm_vgic_init_cpu_hardware();
}
-static void cpu_hyp_reset(void)
-{
- if (!is_kernel_in_hyp_mode())
- __cpu_reset_hyp_mode(hyp_default_vectors,
- kvm_get_idmap_start());
-}
-
static void _kvm_arch_hardware_enable(void *discard)
{
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
@@ -1319,12 +1328,6 @@ static int init_hyp_mode(void)
goto out_err;
/*
- * It is probably enough to obtain the default on one
- * CPU. It's unlikely to be different on the others.
- */
- hyp_default_vectors = __hyp_get_vectors();
-
- /*
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3e5e4194ef86..2c14b69511e9 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -40,6 +40,24 @@
* Co-processor emulation
*****************************************************************************/
+static bool write_to_read_only(struct kvm_vcpu *vcpu,
+ const struct coproc_params *params)
+{
+ WARN_ONCE(1, "CP15 write to read-only register\n");
+ print_cp_instr(params);
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
+static bool read_from_write_only(struct kvm_vcpu *vcpu,
+ const struct coproc_params *params)
+{
+ WARN_ONCE(1, "CP15 read to write-only register\n");
+ print_cp_instr(params);
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
static u32 cache_levels;
@@ -502,15 +520,15 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
}
- /* If access function fails, it should complain. */
} else {
+ /* If access function fails, it should complain. */
kvm_err("Unsupported guest CP15 access at: %08lx\n",
*vcpu_pc(vcpu));
print_cp_instr(params);
+ kvm_inject_undefined(vcpu);
}
- kvm_inject_undefined(vcpu);
+
return 1;
}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index eef1759c2b65..3a41b7d1eb86 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -81,24 +81,6 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
return true;
}
-static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
- const struct coproc_params *params)
-{
- kvm_debug("CP15 write to read-only register at: %08lx\n",
- *vcpu_pc(vcpu));
- print_cp_instr(params);
- return false;
-}
-
-static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
- const struct coproc_params *params)
-{
- kvm_debug("CP15 read to write-only register at: %08lx\n",
- *vcpu_pc(vcpu));
- print_cp_instr(params);
- return false;
-}
-
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 96af65a30d78..5fd7968cdae9 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -160,6 +160,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_DATA_ABORT:
kvm_inject_vabt(vcpu);
return 1;
+ case ARM_EXCEPTION_HYP_GONE:
+ /*
+ * HYP has been reset to the hyp-stub. This happens
+ * when a guest is pre-empted by kvm_reboot()'s
+ * shutdown call.
+ */
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ return 0;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 96beb53934c9..95a2faefc070 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -126,11 +126,29 @@ hyp_hvc:
*/
pop {r0, r1, r2}
- /* Check for __hyp_get_vectors */
- cmp r0, #-1
- mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
- beq 1f
+ /*
+ * Check if we have a kernel function, which is guaranteed to be
+ * bigger than the maximum hyp stub hypercall
+ */
+ cmp r0, #HVC_STUB_HCALL_NR
+ bhs 1f
+ /*
+ * Not a kernel function, treat it as a stub hypercall.
+ * Compute the physical address for __kvm_handle_stub_hvc
+ * (as the code lives in the idmaped page) and branch there.
+ * We hijack ip (r12) as a tmp register.
+ */
+ push {r1}
+ ldr r1, =kimage_voffset
+ ldr r1, [r1]
+ ldr ip, =__kvm_handle_stub_hvc
+ sub ip, ip, r1
+ pop {r1}
+
+ bx ip
+
+1:
push {lr}
mov lr, r0
@@ -142,7 +160,7 @@ THUMB( orr lr, #1)
blx lr @ Call the HYP function
pop {lr}
-1: eret
+ eret
guest_trap:
load_vcpu r0 @ Load VCPU pointer to r0
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index bf89c919efc1..570ed4a9c261 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -23,6 +23,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
+#include <asm/virt.h>
/********************************************************************
* Hypervisor initialization
@@ -39,6 +40,10 @@
* - Setup the page tables
* - Enable the MMU
* - Profit! (or eret, if you only care about the code).
+ *
+ * Another possibility is to get a HYP stub hypercall.
+ * We discriminate between the two by checking if r0 contains a value
+ * that is less than HVC_STUB_HCALL_NR.
*/
.text
@@ -58,6 +63,10 @@ __kvm_hyp_init:
W(b) .
__do_hyp_init:
+ @ Check for a stub hypercall
+ cmp r0, #HVC_STUB_HCALL_NR
+ blo __kvm_handle_stub_hvc
+
@ Set stack pointer
mov sp, r0
@@ -112,20 +121,46 @@ __do_hyp_init:
eret
- @ r0 : stub vectors address
-ENTRY(__kvm_hyp_reset)
+ENTRY(__kvm_handle_stub_hvc)
+ cmp r0, #HVC_SOFT_RESTART
+ bne 1f
+
+ /* The target is expected in r1 */
+ msr ELR_hyp, r1
+ mrs r0, cpsr
+ bic r0, r0, #MODE_MASK
+ orr r0, r0, #HYP_MODE
+THUMB( orr r0, r0, #PSR_T_BIT )
+ msr spsr_cxsf, r0
+ b reset
+
+1: cmp r0, #HVC_RESET_VECTORS
+ bne 1f
+
+reset:
/* We're now in idmap, disable MMU */
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
- ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
- bic r1, r1, r2
+ ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
+ bic r1, r1, r0
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
- /* Install stub vectors */
- mcr p15, 4, r0, c12, c0, 0 @ HVBAR
- isb
+ /*
+ * Install stub vectors, using ardb's VA->PA trick.
+ */
+0: adr r0, 0b @ PA(0)
+ movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
+ movt r1, #:upper16:__hyp_stub_vectors - 0b
+ add r1, r1, r0 @ PA(stub)
+ mcr p15, 4, r1, c12, c0, 0 @ HVBAR
+ b exit
+
+1: ldr r0, =HVC_STUB_ERR
+ eret
+exit:
+ mov r0, #0
eret
-ENDPROC(__kvm_hyp_reset)
+ENDPROC(__kvm_handle_stub_hvc)
.ltorg
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index b1bd316f14c0..80a1d6cd261c 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -37,10 +37,6 @@
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 (strictly 32bit).
*
- * A function pointer with a value of 0xffffffff has a special meaning,
- * and is used to implement __hyp_get_vectors in the same way as in
- * arch/arm/kernel/hyp_stub.S.
- *
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 582a972371cf..313ee646480f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1524,7 +1524,8 @@ static int handle_hva_to_gpa(struct kvm *kvm,
unsigned long start,
unsigned long end,
int (*handler)(struct kvm *kvm,
- gpa_t gpa, void *data),
+ gpa_t gpa, u64 size,
+ void *data),
void *data)
{
struct kvm_memslots *slots;
@@ -1536,7 +1537,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
/* we only care about the pages that the guest sees */
kvm_for_each_memslot(memslot, slots) {
unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
+ gfn_t gpa;
hva_start = max(start, memslot->userspace_addr);
hva_end = min(end, memslot->userspace_addr +
@@ -1544,25 +1545,16 @@ static int handle_hva_to_gpa(struct kvm *kvm,
if (hva_start >= hva_end)
continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- for (; gfn < gfn_end; ++gfn) {
- gpa_t gpa = gfn << PAGE_SHIFT;
- ret |= handler(kvm, gpa, data);
- }
+ gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
+ ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
}
return ret;
}
-static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
- unmap_stage2_range(kvm, gpa, PAGE_SIZE);
+ unmap_stage2_range(kvm, gpa, size);
return 0;
}
@@ -1589,10 +1581,11 @@ int kvm_unmap_hva_range(struct kvm *kvm,
return 0;
}
-static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
pte_t *pte = (pte_t *)data;
+ WARN_ON(size != PAGE_SIZE);
/*
* We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
* flag clear because MMU notifiers will have unmapped a huge PMD before
@@ -1618,11 +1611,12 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
-static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
pmd_t *pmd;
pte_t *pte;
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0;
@@ -1637,11 +1631,12 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
return stage2_ptep_test_and_clear_young(pte);
}
-static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
pmd_t *pmd;
pte_t *pte;
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
pmd = stage2_get_pmd(kvm, NULL, gpa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
return 0;
@@ -1686,11 +1681,6 @@ phys_addr_t kvm_get_idmap_vector(void)
return hyp_idmap_vector;
}
-phys_addr_t kvm_get_idmap_start(void)
-{
- return hyp_idmap_start;
-}
-
static int kvm_map_idmap_text(pgd_t *pgd)
{
int err;
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index c2b131527a64..a08d7a93aebb 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
- int ret = 1;
+ struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
+ int ret = 1;
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
@@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
+ mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
+ mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
@@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
@@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
+ mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
+ mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index c5bbf8bb8c0f..cfd8f60a9268 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -1,7 +1,6 @@
#
# Makefile for the linux kernel.
#
-obj-y := soc.o
# CPU-specific support
obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o
@@ -18,3 +17,36 @@ endif
ifeq ($(CONFIG_PM_DEBUG),y)
CFLAGS_pm.o += -DDEBUG
endif
+
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+ "/^->/{s:->#\(.*\):/* \1 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+ s:->::; p;}"
+endef
+
+# Use filechk to avoid rebuilds when a header changes, but the resulting file
+# does not
+define filechk_offsets
+ (set -e; \
+ echo "#ifndef $2"; \
+ echo "#define $2"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Kbuild"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y); \
+ echo ""; \
+ echo "#endif" )
+endef
+
+arch/arm/mach-at91/pm_data-offsets.s: arch/arm/mach-at91/pm_data-offsets.c
+ $(call if_changed_dep,cc_s_c)
+
+include/generated/at91_pm_data-offsets.h: arch/arm/mach-at91/pm_data-offsets.s FORCE
+ $(call filechk,offsets,__PM_DATA_OFFSETS_H__)
+
+arch/arm/mach-at91/pm_suspend.o: include/generated/at91_pm_data-offsets.h
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index d068ec3cd1f6..656ad409a253 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -14,23 +14,10 @@
#include <asm/mach/arch.h>
#include "generic.h"
-#include "soc.h"
-
-static const struct at91_soc rm9200_socs[] = {
- AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"),
- { /* sentinel */ },
-};
static void __init at91rm9200_dt_device_init(void)
{
- struct soc_device *soc;
- struct device *soc_dev = NULL;
-
- soc = at91_soc_init(rm9200_socs);
- if (soc != NULL)
- soc_dev = soc_device_to_device(soc);
-
- of_platform_default_populate(NULL, NULL, soc_dev);
+ of_platform_default_populate(NULL, NULL, NULL);
at91rm9200_pm_init();
}
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index ba28e9cc584d..3dbdef4d3cbf 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -14,60 +14,12 @@
#include <asm/system_misc.h>
#include "generic.h"
-#include "soc.h"
-static const struct at91_soc at91sam9_socs[] = {
- AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL),
- AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL),
- AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL),
- AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL),
- AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH,
- "at91sam9m11", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH,
- "at91sam9m10", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH,
- "at91sam9g46", "at91sam9g45"),
- AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH,
- "at91sam9g45", "at91sam9g45"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH,
- "at91sam9g15", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH,
- "at91sam9g35", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH,
- "at91sam9x35", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH,
- "at91sam9g25", "at91sam9x5"),
- AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH,
- "at91sam9x25", "at91sam9x5"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH,
- "at91sam9cn12", "at91sam9n12"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH,
- "at91sam9n12", "at91sam9n12"),
- AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH,
- "at91sam9cn11", "at91sam9n12"),
- AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
- AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
- AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
- { /* sentinel */ },
-};
-
-static void __init at91sam9_common_init(void)
+static void __init at91sam9_init(void)
{
- struct soc_device *soc;
- struct device *soc_dev = NULL;
-
- soc = at91_soc_init(at91sam9_socs);
- if (soc != NULL)
- soc_dev = soc_device_to_device(soc);
+ of_platform_default_populate(NULL, NULL, NULL);
- of_platform_default_populate(NULL, NULL, soc_dev);
-}
-
-static void __init at91sam9_dt_device_init(void)
-{
- at91sam9_common_init();
- at91sam9260_pm_init();
+ at91sam9_pm_init();
}
static const char *const at91_dt_board_compat[] __initconst = {
@@ -77,41 +29,6 @@ static const char *const at91_dt_board_compat[] __initconst = {
DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
/* Maintainer: Atmel */
- .init_machine = at91sam9_dt_device_init,
+ .init_machine = at91sam9_init,
.dt_compat = at91_dt_board_compat,
MACHINE_END
-
-static void __init at91sam9g45_dt_device_init(void)
-{
- at91sam9_common_init();
- at91sam9g45_pm_init();
-}
-
-static const char *const at91sam9g45_board_compat[] __initconst = {
- "atmel,at91sam9g45",
- NULL
-};
-
-DT_MACHINE_START(at91sam9g45_dt, "Atmel AT91SAM9G45")
- /* Maintainer: Atmel */
- .init_machine = at91sam9g45_dt_device_init,
- .dt_compat = at91sam9g45_board_compat,
-MACHINE_END
-
-static void __init at91sam9x5_dt_device_init(void)
-{
- at91sam9_common_init();
- at91sam9x5_pm_init();
-}
-
-static const char *const at91sam9x5_board_compat[] __initconst = {
- "atmel,at91sam9x5",
- "atmel,at91sam9n12",
- NULL
-};
-
-DT_MACHINE_START(at91sam9x5_dt, "Atmel AT91SAM9")
- /* Maintainer: Atmel */
- .init_machine = at91sam9x5_dt_device_init,
- .dt_compat = at91sam9x5_board_compat,
-MACHINE_END
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 28ca57a2060f..f1ead0f13c19 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -13,15 +13,11 @@
#ifdef CONFIG_PM
extern void __init at91rm9200_pm_init(void);
-extern void __init at91sam9260_pm_init(void);
-extern void __init at91sam9g45_pm_init(void);
-extern void __init at91sam9x5_pm_init(void);
+extern void __init at91sam9_pm_init(void);
extern void __init sama5_pm_init(void);
#else
static inline void __init at91rm9200_pm_init(void) { }
-static inline void __init at91sam9260_pm_init(void) { }
-static inline void __init at91sam9g45_pm_init(void) { }
-static inline void __init at91sam9x5_pm_init(void) { }
+static inline void __init at91sam9_pm_init(void) { }
static inline void __init sama5_pm_init(void) { }
#endif
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index a277981f414d..2cd27c830ab6 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -10,35 +10,22 @@
* (at your option) any later version.
*/
-#include <linux/gpio.h>
-#include <linux/suspend.h>
-#include <linux/sched.h>
-#include <linux/proc_fs.h>
#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/atmel.h>
-#include <linux/io.h>
+#include <linux/suspend.h>
+
#include <linux/clk/at91_pmc.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
-#include <asm/mach/time.h>
-#include <asm/mach/irq.h>
-#include <asm/fncpy.h>
#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
#include <asm/system_misc.h>
#include "generic.h"
#include "pm.h"
-static void __iomem *pmc;
-
/*
* FIXME: this is needed to communicate between the pinctrl driver and
* the PM implementation in the machine. Possibly part of the PM
@@ -50,12 +37,13 @@ extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
#endif
-static struct {
- unsigned long uhp_udp_mask;
- int memctrl;
-} at91_pm_data;
+static struct at91_pm_data pm_data;
-static void __iomem *at91_ramc_base[2];
+#define at91_ramc_read(id, field) \
+ __raw_readl(pm_data.ramc[id] + field)
+
+#define at91_ramc_write(id, field, value) \
+ __raw_writel(value, pm_data.ramc[id] + field)
static int at91_pm_valid_state(suspend_state_t state)
{
@@ -91,10 +79,10 @@ static int at91_pm_verify_clocks(void)
unsigned long scsr;
int i;
- scsr = readl(pmc + AT91_PMC_SCSR);
+ scsr = readl(pm_data.pmc + AT91_PMC_SCSR);
/* USB must not be using PLLB */
- if ((scsr & at91_pm_data.uhp_udp_mask) != 0) {
+ if ((scsr & pm_data.uhp_udp_mask) != 0) {
pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
return 0;
}
@@ -105,7 +93,7 @@ static int at91_pm_verify_clocks(void)
if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
continue;
- css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+ css = readl(pm_data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
if (css != AT91_PMC_CSS_SLOW) {
pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
return 0;
@@ -131,25 +119,18 @@ int at91_suspend_entering_slow_clock(void)
}
EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
-static void (*at91_suspend_sram_fn)(void __iomem *pmc, void __iomem *ramc0,
- void __iomem *ramc1, int memctrl);
-
-extern void at91_pm_suspend_in_sram(void __iomem *pmc, void __iomem *ramc0,
- void __iomem *ramc1, int memctrl);
+static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
+extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
extern u32 at91_pm_suspend_in_sram_sz;
static void at91_pm_suspend(suspend_state_t state)
{
- unsigned int pm_data = at91_pm_data.memctrl;
-
- pm_data |= (state == PM_SUSPEND_MEM) ?
- AT91_PM_MODE(AT91_PM_SLOW_CLOCK) : 0;
+ pm_data.mode = (state == PM_SUSPEND_MEM) ? AT91_PM_SLOW_CLOCK : 0;
flush_cache_all();
outer_disable();
- at91_suspend_sram_fn(pmc, at91_ramc_base[0],
- at91_ramc_base[1], pm_data);
+ at91_suspend_sram_fn(&pm_data);
outer_resume();
}
@@ -224,12 +205,6 @@ static struct platform_device at91_cpuidle_device = {
.name = "cpuidle-at91",
};
-static void at91_pm_set_standby(void (*at91_standby)(void))
-{
- if (at91_standby)
- at91_cpuidle_device.dev.platform_data = at91_standby;
-}
-
/*
* The AT91RM9200 goes into self-refresh mode with this command, and will
* terminate self-refresh automatically on the next SDRAM access.
@@ -241,20 +216,15 @@ static void at91_pm_set_standby(void (*at91_standby)(void))
*/
static void at91rm9200_standby(void)
{
- u32 lpr = at91_ramc_read(0, AT91_MC_SDRAMC_LPR);
-
asm volatile(
"b 1f\n\t"
".align 5\n\t"
"1: mcr p15, 0, %0, c7, c10, 4\n\t"
- " str %0, [%1, %2]\n\t"
- " str %3, [%1, %4]\n\t"
+ " str %2, [%1, %3]\n\t"
" mcr p15, 0, %0, c7, c0, 4\n\t"
- " str %5, [%1, %2]"
:
- : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91_MC_SDRAMC_LPR),
- "r" (1), "r" (AT91_MC_SDRAMC_SRR),
- "r" (lpr));
+ : "r" (0), "r" (pm_data.ramc[0]),
+ "r" (1), "r" (AT91_MC_SDRAMC_SRR));
}
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
@@ -265,12 +235,27 @@ static void at91_ddr_standby(void)
/* Those two values allow us to delay self-refresh activation
* to the maximum. */
u32 lpr0, lpr1 = 0;
+ u32 mdr, saved_mdr0, saved_mdr1 = 0;
u32 saved_lpr0, saved_lpr1 = 0;
- if (at91_ramc_base[1]) {
+ /* LPDDR1 --> force DDR2 mode during self-refresh */
+ saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
+ if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
+ mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
+ mdr |= AT91_DDRSDRC_MD_DDR2;
+ at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
+ }
+
+ if (pm_data.ramc[1]) {
saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+ saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
+ if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
+ mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
+ mdr |= AT91_DDRSDRC_MD_DDR2;
+ at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
+ }
}
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
@@ -279,14 +264,17 @@ static void at91_ddr_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
- if (at91_ramc_base[1])
+ if (pm_data.ramc[1])
at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
cpu_do_idle();
+ at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
- if (at91_ramc_base[1])
+ if (pm_data.ramc[1]) {
+ at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+ }
}
static void sama5d3_ddr_standby(void)
@@ -313,7 +301,7 @@ static void at91sam9_sdram_standby(void)
u32 lpr0, lpr1 = 0;
u32 saved_lpr0, saved_lpr1 = 0;
- if (at91_ramc_base[1]) {
+ if (pm_data.ramc[1]) {
saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
@@ -325,21 +313,33 @@ static void at91sam9_sdram_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
- if (at91_ramc_base[1])
+ if (pm_data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
- if (at91_ramc_base[1])
+ if (pm_data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
+struct ramc_info {
+ void (*idle)(void);
+ unsigned int memctrl;
+};
+
+static const struct ramc_info ramc_infos[] __initconst = {
+ { .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
+ { .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
+ { .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
+ { .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
+};
+
static const struct of_device_id const ramc_ids[] __initconst = {
- { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
- { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
- { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
- { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
+ { .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
+ { .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
+ { .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
+ { .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
{ /*sentinel*/ }
};
@@ -348,15 +348,18 @@ static __init void at91_dt_ramc(void)
struct device_node *np;
const struct of_device_id *of_id;
int idx = 0;
- const void *standby = NULL;
+ void *standby = NULL;
+ const struct ramc_info *ramc;
for_each_matching_node_and_match(np, ramc_ids, &of_id) {
- at91_ramc_base[idx] = of_iomap(np, 0);
- if (!at91_ramc_base[idx])
+ pm_data.ramc[idx] = of_iomap(np, 0);
+ if (!pm_data.ramc[idx])
panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
+ ramc = of_id->data;
if (!standby)
- standby = of_id->data;
+ standby = ramc->idle;
+ pm_data.memctrl = ramc->memctrl;
idx++;
}
@@ -369,7 +372,7 @@ static __init void at91_dt_ramc(void)
return;
}
- at91_pm_set_standby(standby);
+ at91_cpuidle_device.dev.platform_data = standby;
}
static void at91rm9200_idle(void)
@@ -378,12 +381,12 @@ static void at91rm9200_idle(void)
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
- writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
+ writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
}
static void at91sam9_idle(void)
{
- writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
+ writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
cpu_do_idle();
}
@@ -433,31 +436,46 @@ static void __init at91_pm_sram_init(void)
&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
}
+struct pmc_info {
+ unsigned long uhp_udp_mask;
+};
+
+static const struct pmc_info pmc_infos[] __initconst = {
+ { .uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP },
+ { .uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP },
+ { .uhp_udp_mask = AT91SAM926x_PMC_UHP },
+};
+
static const struct of_device_id atmel_pmc_ids[] __initconst = {
- { .compatible = "atmel,at91rm9200-pmc" },
- { .compatible = "atmel,at91sam9260-pmc" },
- { .compatible = "atmel,at91sam9g45-pmc" },
- { .compatible = "atmel,at91sam9n12-pmc" },
- { .compatible = "atmel,at91sam9x5-pmc" },
- { .compatible = "atmel,sama5d3-pmc" },
- { .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
+ { .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
+ { .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
+ { .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
{ /* sentinel */ },
};
static void __init at91_pm_init(void (*pm_idle)(void))
{
struct device_node *pmc_np;
+ const struct of_device_id *of_id;
+ const struct pmc_info *pmc;
if (at91_cpuidle_device.dev.platform_data)
platform_device_register(&at91_cpuidle_device);
- pmc_np = of_find_matching_node(NULL, atmel_pmc_ids);
- pmc = of_iomap(pmc_np, 0);
- if (!pmc) {
+ pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
+ pm_data.pmc = of_iomap(pmc_np, 0);
+ if (!pm_data.pmc) {
pr_err("AT91: PM not supported, PMC not found\n");
return;
}
+ pmc = of_id->data;
+ pm_data.uhp_udp_mask = pmc->uhp_udp_mask;
+
if (pm_idle)
arm_pm_idle = pm_idle;
@@ -478,40 +496,17 @@ void __init at91rm9200_pm_init(void)
*/
at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
- at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_MC;
-
at91_pm_init(at91rm9200_idle);
}
-void __init at91sam9260_pm_init(void)
-{
- at91_dt_ramc();
- at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC;
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
- at91_pm_init(at91sam9_idle);
-}
-
-void __init at91sam9g45_pm_init(void)
-{
- at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
- at91_pm_init(at91sam9_idle);
-}
-
-void __init at91sam9x5_pm_init(void)
+void __init at91sam9_pm_init(void)
{
at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
at91_pm_init(at91sam9_idle);
}
void __init sama5_pm_init(void)
{
at91_dt_ramc();
- at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP;
- at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR;
at91_pm_init(NULL);
}
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index bf980c6ef294..fc0f7d048187 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -17,24 +17,20 @@
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
-#ifndef __ASSEMBLY__
-#define at91_ramc_read(id, field) \
- __raw_readl(at91_ramc_base[id] + field)
-
-#define at91_ramc_write(id, field, value) \
- __raw_writel(value, at91_ramc_base[id] + field)
-#endif
-
#define AT91_MEMCTRL_MC 0
#define AT91_MEMCTRL_SDRAMC 1
#define AT91_MEMCTRL_DDRSDR 2
-#define AT91_PM_MEMTYPE_MASK 0x0f
-
-#define AT91_PM_MODE_OFFSET 4
-#define AT91_PM_MODE_MASK 0x01
-#define AT91_PM_MODE(x) (((x) & AT91_PM_MODE_MASK) << AT91_PM_MODE_OFFSET)
-
#define AT91_PM_SLOW_CLOCK 0x01
+#ifndef __ASSEMBLY__
+struct at91_pm_data {
+ void __iomem *pmc;
+ void __iomem *ramc[2];
+ unsigned long uhp_udp_mask;
+ unsigned int memctrl;
+ unsigned int mode;
+};
+#endif
+
#endif
diff --git a/arch/arm/mach-at91/pm_data-offsets.c b/arch/arm/mach-at91/pm_data-offsets.c
new file mode 100644
index 000000000000..30302cb16df0
--- /dev/null
+++ b/arch/arm/mach-at91/pm_data-offsets.c
@@ -0,0 +1,13 @@
+#include <linux/stddef.h>
+#include <linux/kbuild.h>
+#include "pm.h"
+
+int main(void)
+{
+ DEFINE(PM_DATA_PMC, offsetof(struct at91_pm_data, pmc));
+ DEFINE(PM_DATA_RAMC0, offsetof(struct at91_pm_data, ramc[0]));
+ DEFINE(PM_DATA_RAMC1, offsetof(struct at91_pm_data, ramc[1]));
+ DEFINE(PM_DATA_MEMCTRL, offsetof(struct at91_pm_data, memctrl));
+ DEFINE(PM_DATA_MODE, offsetof(struct at91_pm_data, mode));
+ return 0;
+}
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index a25defda3d22..96781daa671a 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -4,7 +4,7 @@
* Copyright (C) 2006 Savin Zlobec
*
* AT91SAM9 support:
- * Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee
+ * Copyright (C) 2007 Anti Sullin <anti.sullin@artecdesign.ee>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -14,6 +14,7 @@
#include <linux/linkage.h>
#include <linux/clk/at91_pmc.h>
#include "pm.h"
+#include "generated/at91_pm_data-offsets.h"
#define SRAMC_SELF_FRESH_ACTIVE 0x01
#define SRAMC_SELF_FRESH_EXIT 0x00
@@ -72,13 +73,9 @@ tmp2 .req r5
.arm
/*
- * void at91_pm_suspend_in_sram(void __iomem *pmc, void __iomem *sdramc,
- * void __iomem *ramc1, int memctrl)
+ * void at91_suspend_sram_fn(struct at91_pm_data*)
* @input param:
- * @r0: base address of AT91_PMC
- * @r1: base address of SDRAM Controller (SDRAM, DDRSDR, or AT91_SYS)
- * @r2: base address of second SDRAM Controller or 0 if not present
- * @r3: pm information
+ * @r0: base address of struct at91_pm_data
*/
/* at91_pm_suspend_in_sram must be 8-byte aligned per the requirements of fncpy() */
.align 3
@@ -90,16 +87,16 @@ ENTRY(at91_pm_suspend_in_sram)
mov tmp1, #0
mcr p15, 0, tmp1, c7, c10, 4
- str r0, .pmc_base
- str r1, .sramc_base
- str r2, .sramc1_base
-
- and r0, r3, #AT91_PM_MEMTYPE_MASK
- str r0, .memtype
-
- lsr r0, r3, #AT91_PM_MODE_OFFSET
- and r0, r0, #AT91_PM_MODE_MASK
- str r0, .pm_mode
+ ldr tmp1, [r0, #PM_DATA_PMC]
+ str tmp1, .pmc_base
+ ldr tmp1, [r0, #PM_DATA_RAMC0]
+ str tmp1, .sramc_base
+ ldr tmp1, [r0, #PM_DATA_RAMC1]
+ str tmp1, .sramc1_base
+ ldr tmp1, [r0, #PM_DATA_MEMCTRL]
+ str tmp1, .memtype
+ ldr tmp1, [r0, #PM_DATA_MODE]
+ str tmp1, .pm_mode
/* Active the self-refresh mode */
mov r0, #SRAMC_SELF_FRESH_ACTIVE
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index b272c45b400f..6d157d0ead8e 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -15,60 +15,10 @@
#include <asm/system_misc.h>
#include "generic.h"
-#include "soc.h"
-
-static const struct at91_soc sama5_socs[] = {
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
- "sama5d21", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
- "sama5d22", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
- "sama5d23", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
- "sama5d24", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH,
- "sama5d24", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH,
- "sama5d26", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH,
- "sama5d27", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
- "sama5d27", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
- "sama5d28", "sama5d2"),
- AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
- "sama5d28", "sama5d2"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
- "sama5d31", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
- "sama5d33", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH,
- "sama5d34", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH,
- "sama5d35", "sama5d3"),
- AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH,
- "sama5d36", "sama5d3"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH,
- "sama5d41", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH,
- "sama5d42", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH,
- "sama5d43", "sama5d4"),
- AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
- "sama5d44", "sama5d4"),
- { /* sentinel */ },
-};
static void __init sama5_dt_device_init(void)
{
- struct soc_device *soc;
- struct device *soc_dev = NULL;
-
- soc = at91_soc_init(sama5_socs);
- if (soc != NULL)
- soc_dev = soc_device_to_device(soc);
-
- of_platform_default_populate(NULL, NULL, soc_dev);
+ of_platform_default_populate(NULL, NULL, NULL);
sama5_pm_init();
}
diff --git a/arch/arm/mach-at91/soc.c b/arch/arm/mach-at91/soc.c
deleted file mode 100644
index c6fda75ddb89..000000000000
--- a/arch/arm/mach-at91/soc.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2015 Atmel
- *
- * Alexandre Belloni <alexandre.belloni@free-electrons.com
- * Boris Brezillon <boris.brezillon@free-electrons.com
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- */
-
-#define pr_fmt(fmt) "AT91: " fmt
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/slab.h>
-#include <linux/sys_soc.h>
-
-#include "soc.h"
-
-#define AT91_DBGU_CIDR 0x40
-#define AT91_DBGU_EXID 0x44
-#define AT91_CHIPID_CIDR 0x00
-#define AT91_CHIPID_EXID 0x04
-#define AT91_CIDR_VERSION(x) ((x) & 0x1f)
-#define AT91_CIDR_EXT BIT(31)
-#define AT91_CIDR_MATCH_MASK 0x7fffffe0
-
-static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid)
-{
- struct device_node *np;
- void __iomem *regs;
-
- np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
- if (!np)
- np = of_find_compatible_node(NULL, NULL,
- "atmel,at91sam9260-dbgu");
- if (!np)
- return -ENODEV;
-
- regs = of_iomap(np, 0);
- of_node_put(np);
-
- if (!regs) {
- pr_warn("Could not map DBGU iomem range");
- return -ENXIO;
- }
-
- *cidr = readl(regs + AT91_DBGU_CIDR);
- *exid = readl(regs + AT91_DBGU_EXID);
-
- iounmap(regs);
-
- return 0;
-}
-
-static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
-{
- struct device_node *np;
- void __iomem *regs;
-
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid");
- if (!np)
- return -ENODEV;
-
- regs = of_iomap(np, 0);
- of_node_put(np);
-
- if (!regs) {
- pr_warn("Could not map DBGU iomem range");
- return -ENXIO;
- }
-
- *cidr = readl(regs + AT91_CHIPID_CIDR);
- *exid = readl(regs + AT91_CHIPID_EXID);
-
- iounmap(regs);
-
- return 0;
-}
-
-struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
-{
- struct soc_device_attribute *soc_dev_attr;
- const struct at91_soc *soc;
- struct soc_device *soc_dev;
- u32 cidr, exid;
- int ret;
-
- /*
- * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more
- * in the dbgu device but in the chipid device whose purpose is only
- * to expose these two registers.
- */
- ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid);
- if (ret)
- ret = at91_get_cidr_exid_from_chipid(&cidr, &exid);
- if (ret) {
- if (ret == -ENODEV)
- pr_warn("Could not find identification node");
- return NULL;
- }
-
- for (soc = socs; soc->name; soc++) {
- if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK))
- continue;
-
- if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
- break;
- }
-
- if (!soc->name) {
- pr_warn("Could not find matching SoC description\n");
- return NULL;
- }
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return NULL;
-
- soc_dev_attr->family = soc->family;
- soc_dev_attr->soc_id = soc->name;
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
- AT91_CIDR_VERSION(cidr));
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr->revision);
- kfree(soc_dev_attr);
- pr_warn("Could not register SoC device\n");
- return NULL;
- }
-
- if (soc->family)
- pr_info("Detected SoC family: %s\n", soc->family);
- pr_info("Detected SoC: %s, revision %X\n", soc->name,
- AT91_CIDR_VERSION(cidr));
-
- return soc_dev;
-}
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index a0e66d8200c5..f9389c5910e7 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -198,7 +198,9 @@ config ARCH_BRCMSTB
select HAVE_ARM_ARCH_TIMER
select BRCMSTB_L2_IRQ
select BCM7120_L2_IRQ
+ select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
+ select ZONE_DMA if ARM_LPAE
select SOC_BRCMSTB
select SOC_BUS
help
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index df3ca38778af..b5625d009288 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -828,6 +828,9 @@ static struct regulator_consumer_supply fixed_supplies[] = {
/* Baseboard 1.8V: 5V -> TPS73701DCQ -> 1.8V */
REGULATOR_SUPPLY("DVDD", "1-0018"),
+
+ /* UI card 3.3V: 5V -> TPS73701DCQ -> 3.3V */
+ REGULATOR_SUPPLY("vcc", "1-0020"),
};
/* TPS65070 voltage regulator support */
@@ -1213,6 +1216,7 @@ static struct vpif_subdev_info da850_vpif_capture_sdev_info[] = {
static struct vpif_capture_config da850_vpif_capture_config = {
.subdev_info = da850_vpif_capture_sdev_info,
.subdev_count = ARRAY_SIZE(da850_vpif_capture_sdev_info),
+ .i2c_adapter_id = 1,
.chan_config[0] = {
.inputs = da850_ch0_inputs,
.input_count = ARRAY_SIZE(da850_ch0_inputs),
@@ -1290,6 +1294,7 @@ static struct vpif_display_config da850_vpif_display_config = {
.output_count = ARRAY_SIZE(da850_ch0_outputs),
},
.card_name = "DA850/OMAP-L138 Video Display",
+ .i2c_adapter_id = 1,
};
static __init void da850_vpif_init(void)
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 023480b75244..20f1874a5657 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -744,7 +744,8 @@ static int davinci_phy_fixup(struct phy_device *phydev)
return 0;
}
-#define HAS_ATA IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710)
+#define HAS_ATA (IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
+ IS_ENABLED(CONFIG_PATA_BK3710))
#define HAS_NOR IS_ENABLED(CONFIG_MTD_PHYSMAP)
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index f702d4fc8eb8..cb176826d1cb 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -119,7 +119,8 @@ static struct platform_device davinci_nand_device = {
},
};
-#define HAS_ATA IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710)
+#define HAS_ATA (IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
+ IS_ENABLED(CONFIG_PATA_BK3710))
#ifdef CONFIG_I2C
/* CPLD Register 0 bits to control ATA */
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 0a7838852649..0c02aaad0539 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -163,7 +163,8 @@ static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
.wires = 4,
};
-#define HAS_ATA IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710)
+#define HAS_ATA (IS_ENABLED(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
+ IS_ENABLED(CONFIG_PATA_BK3710))
#define HAS_NAND IS_ENABLED(CONFIG_MTD_NAND_DAVINCI)
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 073c458d0c67..bd88470f3e5c 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -304,6 +304,11 @@ static struct clk usb20_clk = {
.gpsc = 1,
};
+static struct clk cppi41_clk = {
+ .name = "cppi41",
+ .parent = &usb20_clk,
+};
+
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll0_sysclk3,
@@ -413,6 +418,7 @@ static struct clk_lookup da830_clks[] = {
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
CLK("musb-da8xx", "usb20", &usb20_clk),
+ CLK("cppi41-dmaengine", NULL, &cppi41_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
CLK(NULL, "secu_mgr", &secu_mgr_clk),
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index ccad2f99dfc9..07d6f0eb8c82 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -401,6 +401,11 @@ static struct clk usb20_clk = {
.gpsc = 1,
};
+static struct clk cppi41_clk = {
+ .name = "cppi41",
+ .parent = &usb20_clk,
+};
+
static struct clk spi0_clk = {
.name = "spi0",
.parent = &pll0_sysclk2,
@@ -560,6 +565,7 @@ static struct clk_lookup da850_clks[] = {
CLK("davinci-nand.0", "aemif", &aemif_nand_clk),
CLK("ohci-da8xx", "usb11", &usb11_clk),
CLK("musb-da8xx", "usb20", &usb20_clk),
+ CLK("cppi41-dmaengine", NULL, &cppi41_clk),
CLK("spi_davinci.0", NULL, &spi0_clk),
CLK("spi_davinci.1", NULL, &spi1_clk),
CLK("vpif", NULL, &vpif_clk),
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index e3cef503cd8f..5699ce39e64f 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -53,6 +53,7 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
OF_DEV_AUXDATA("ti,da830-usb-phy", 0x01c1417c, "da8xx-usb-phy", NULL),
OF_DEV_AUXDATA("ti,da850-ahci", 0x01e18000, "ahci_da850", NULL),
+ OF_DEV_AUXDATA("ti,da850-vpif", 0x01e17000, "vpif", NULL),
{}
};
diff --git a/arch/arm/mach-davinci/pdata-quirks.c b/arch/arm/mach-davinci/pdata-quirks.c
index 5b57da475065..329f5402ad1d 100644
--- a/arch/arm/mach-davinci/pdata-quirks.c
+++ b/arch/arm/mach-davinci/pdata-quirks.c
@@ -10,26 +10,202 @@
#include <linux/kernel.h>
#include <linux/of_platform.h>
+#include <media/i2c/tvp514x.h>
+#include <media/i2c/adv7343.h>
+
#include <mach/common.h>
+#include <mach/da8xx.h>
struct pdata_init {
const char *compatible;
void (*fn)(void);
};
+#define TVP5147_CH0 "tvp514x-0"
+#define TVP5147_CH1 "tvp514x-1"
+
+/* VPIF capture configuration */
+static struct tvp514x_platform_data tvp5146_pdata = {
+ .clk_polarity = 0,
+ .hs_polarity = 1,
+ .vs_polarity = 1,
+};
+
+#define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
+
+static const struct vpif_input da850_ch0_inputs[] = {
+ {
+ .input = {
+ .index = 0,
+ .name = "Composite",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_IN_CAP_STD,
+ .std = TVP514X_STD_ALL,
+ },
+ .input_route = INPUT_CVBS_VI2B,
+ .output_route = OUTPUT_10BIT_422_EMBEDDED_SYNC,
+ .subdev_name = TVP5147_CH0,
+ },
+};
+
+static const struct vpif_input da850_ch1_inputs[] = {
+ {
+ .input = {
+ .index = 0,
+ .name = "S-Video",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_IN_CAP_STD,
+ .std = TVP514X_STD_ALL,
+ },
+ .input_route = INPUT_SVIDEO_VI2C_VI1C,
+ .output_route = OUTPUT_10BIT_422_EMBEDDED_SYNC,
+ .subdev_name = TVP5147_CH1,
+ },
+};
+
+static struct vpif_subdev_info da850_vpif_capture_sdev_info[] = {
+ {
+ .name = TVP5147_CH0,
+ .board_info = {
+ I2C_BOARD_INFO("tvp5146", 0x5d),
+ .platform_data = &tvp5146_pdata,
+ },
+ },
+ {
+ .name = TVP5147_CH1,
+ .board_info = {
+ I2C_BOARD_INFO("tvp5146", 0x5c),
+ .platform_data = &tvp5146_pdata,
+ },
+ },
+};
+
+static struct vpif_capture_config da850_vpif_capture_config = {
+ .subdev_info = da850_vpif_capture_sdev_info,
+ .subdev_count = ARRAY_SIZE(da850_vpif_capture_sdev_info),
+ .chan_config[0] = {
+ .inputs = da850_ch0_inputs,
+ .input_count = ARRAY_SIZE(da850_ch0_inputs),
+ .vpif_if = {
+ .if_type = VPIF_IF_BT656,
+ .hd_pol = 1,
+ .vd_pol = 1,
+ .fid_pol = 0,
+ },
+ },
+ .chan_config[1] = {
+ .inputs = da850_ch1_inputs,
+ .input_count = ARRAY_SIZE(da850_ch1_inputs),
+ .vpif_if = {
+ .if_type = VPIF_IF_BT656,
+ .hd_pol = 1,
+ .vd_pol = 1,
+ .fid_pol = 0,
+ },
+ },
+ .card_name = "DA850/OMAP-L138 Video Capture",
+};
+
+static void __init da850_vpif_legacy_register_capture(void)
+{
+ int ret;
+
+ ret = da850_register_vpif_capture(&da850_vpif_capture_config);
+ if (ret)
+ pr_warn("%s: VPIF capture setup failed: %d\n",
+ __func__, ret);
+}
+
+static void __init da850_vpif_capture_legacy_init_lcdk(void)
+{
+ da850_vpif_capture_config.subdev_count = 1;
+ da850_vpif_legacy_register_capture();
+}
+
+static void __init da850_vpif_capture_legacy_init_evm(void)
+{
+ da850_vpif_legacy_register_capture();
+}
+
+static struct adv7343_platform_data adv7343_pdata = {
+ .mode_config = {
+ .dac = { 1, 1, 1 },
+ },
+ .sd_config = {
+ .sd_dac_out = { 1 },
+ },
+};
+
+static struct vpif_subdev_info da850_vpif_subdev[] = {
+ {
+ .name = "adv7343",
+ .board_info = {
+ I2C_BOARD_INFO("adv7343", 0x2a),
+ .platform_data = &adv7343_pdata,
+ },
+ },
+};
+
+static const struct vpif_output da850_ch0_outputs[] = {
+ {
+ .output = {
+ .index = 0,
+ .name = "Composite",
+ .type = V4L2_OUTPUT_TYPE_ANALOG,
+ .capabilities = V4L2_OUT_CAP_STD,
+ .std = V4L2_STD_ALL,
+ },
+ .subdev_name = "adv7343",
+ .output_route = ADV7343_COMPOSITE_ID,
+ },
+ {
+ .output = {
+ .index = 1,
+ .name = "S-Video",
+ .type = V4L2_OUTPUT_TYPE_ANALOG,
+ .capabilities = V4L2_OUT_CAP_STD,
+ .std = V4L2_STD_ALL,
+ },
+ .subdev_name = "adv7343",
+ .output_route = ADV7343_SVIDEO_ID,
+ },
+};
+
+static struct vpif_display_config da850_vpif_display_config = {
+ .subdevinfo = da850_vpif_subdev,
+ .subdev_count = ARRAY_SIZE(da850_vpif_subdev),
+ .chan_config[0] = {
+ .outputs = da850_ch0_outputs,
+ .output_count = ARRAY_SIZE(da850_ch0_outputs),
+ },
+ .card_name = "DA850/OMAP-L138 Video Display",
+};
+
+static void __init da850_vpif_display_legacy_init_evm(void)
+{
+ int ret;
+
+ ret = da850_register_vpif_display(&da850_vpif_display_config);
+ if (ret)
+ pr_warn("%s: VPIF display setup failed: %d\n",
+ __func__, ret);
+}
+
static void pdata_quirks_check(struct pdata_init *quirks)
{
while (quirks->compatible) {
if (of_machine_is_compatible(quirks->compatible)) {
if (quirks->fn)
quirks->fn();
- break;
}
quirks++;
}
}
static struct pdata_init pdata_quirks[] __initdata = {
+ { "ti,da850-lcdk", da850_vpif_capture_legacy_init_lcdk, },
+ { "ti,da850-evm", da850_vpif_display_legacy_init_evm, },
+ { "ti,da850-evm", da850_vpif_capture_legacy_init_evm, },
{ /* sentinel */ },
};
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 0afd201ab980..efb80354f303 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -108,7 +108,6 @@ static int davinci_pm_enter(suspend_state_t state)
int ret = 0;
switch (state) {
- case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
davinci_pm_suspend();
break;
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 8a5b6f059498..55b186ef863a 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -210,6 +210,28 @@ static struct ep93xx_eth_data __initdata ts72xx_eth_data = {
.phy_id = 1,
};
+#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
+
+/* Relative to EP93XX_CS1_PHYS_BASE */
+#define TS73XX_FPGA_LOADER_BASE 0x03c00000
+
+static struct resource ts73xx_fpga_resources[] = {
+ {
+ .start = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE,
+ .end = EP93XX_CS1_PHYS_BASE + TS73XX_FPGA_LOADER_BASE + 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ts73xx_fpga_device = {
+ .name = "ts73xx-fpga-mgr",
+ .id = -1,
+ .resource = ts73xx_fpga_resources,
+ .num_resources = ARRAY_SIZE(ts73xx_fpga_resources),
+};
+
+#endif
+
static void __init ts72xx_init_machine(void)
{
ep93xx_init_devices();
@@ -218,6 +240,10 @@ static void __init ts72xx_init_machine(void)
platform_device_register(&ts72xx_wdt_device);
ep93xx_register_eth(&ts72xx_eth_data, 1);
+#if IS_ENABLED(CONFIG_FPGA_MGR_TS73XX)
+ if (board_is_ts7300())
+ platform_device_register(&ts73xx_fpga_device);
+#endif
}
MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
diff --git a/arch/arm/mach-gemini/Kconfig b/arch/arm/mach-gemini/Kconfig
index 6f066ee4bf24..06c8b095154c 100644
--- a/arch/arm/mach-gemini/Kconfig
+++ b/arch/arm/mach-gemini/Kconfig
@@ -1,40 +1,13 @@
-if ARCH_GEMINI
-
-menu "Cortina Systems Gemini Implementations"
-
-config MACH_NAS4220B
- bool "Raidsonic NAS-4220-B"
- select GEMINI_MEM_SWAP
- help
- Say Y here if you intend to run this kernel on a
- Raidsonic NAS-4220-B.
-
-config MACH_RUT100
- bool "Teltonika RUT100"
- select GEMINI_MEM_SWAP
- help
- Say Y here if you intend to run this kernel on a
- Teltonika 3G Router RUT100.
-
-config MACH_WBD111
- bool "Wiliboard WBD-111"
- select GEMINI_MEM_SWAP
- help
- Say Y here if you intend to run this kernel on a
- Wiliboard WBD-111.
-
-config MACH_WBD222
- bool "Wiliboard WBD-222"
- select GEMINI_MEM_SWAP
- help
- Say Y here if you intend to run this kernel on a
- Wiliboard WBD-222.
-
-endmenu
-
-config GEMINI_MEM_SWAP
- bool "Gemini memory is swapped"
- help
- Say Y here if Gemini memory is swapped by bootloader.
-
-endif
+menuconfig ARCH_GEMINI
+ bool "Cortina Systems Gemini"
+ depends on ARCH_MULTI_V4
+ select ARM_APPENDED_DTB # Old Redboot bootloaders deployed
+ select FARADAY_FTINTC010
+ select FTTMR010_TIMER
+ select GPIO_FTGPIO010
+ select GPIOLIB
+ select POWER_RESET
+ select POWER_RESET_GEMINI_POWEROFF
+ select POWER_RESET_SYSCON
+ help
+ Support for the Cortina Systems Gemini family SoCs
diff --git a/arch/arm/mach-gemini/Makefile b/arch/arm/mach-gemini/Makefile
index 7963a77be637..ca0db5477180 100644
--- a/arch/arm/mach-gemini/Makefile
+++ b/arch/arm/mach-gemini/Makefile
@@ -1,13 +1,2 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := irq.o mm.o time.o devices.o gpio.o idle.o reset.o
-
-# Board-specific support
-obj-$(CONFIG_MACH_NAS4220B) += board-nas4220b.o
-obj-$(CONFIG_MACH_RUT100) += board-rut1xx.o
-obj-$(CONFIG_MACH_WBD111) += board-wbd111.o
-obj-$(CONFIG_MACH_WBD222) += board-wbd222.o
+# Makefile for Cortina systems Gemini
+obj-y := board-dt.o
diff --git a/arch/arm/mach-gemini/Makefile.boot b/arch/arm/mach-gemini/Makefile.boot
deleted file mode 100644
index 683f52b20e3d..000000000000
--- a/arch/arm/mach-gemini/Makefile.boot
+++ /dev/null
@@ -1,9 +0,0 @@
-ifeq ($(CONFIG_GEMINI_MEM_SWAP),y)
- zreladdr-y += 0x00008000
-params_phys-y := 0x00000100
-initrd_phys-y := 0x00800000
-else
- zreladdr-y += 0x10008000
-params_phys-y := 0x10000100
-initrd_phys-y := 0x10800000
-endif
diff --git a/arch/arm/mach-gemini/board-dt.c b/arch/arm/mach-gemini/board-dt.c
new file mode 100644
index 000000000000..c0c0ebdd551e
--- /dev/null
+++ b/arch/arm/mach-gemini/board-dt.c
@@ -0,0 +1,62 @@
+/*
+ * Gemini Device Tree boot support
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_misc.h>
+#include <asm/proc-fns.h>
+
+#ifdef CONFIG_DEBUG_GEMINI
+/* This is needed for LL-debug/earlyprintk/debug-macro.S */
+static struct map_desc gemini_io_desc[] __initdata = {
+ {
+ .virtual = CONFIG_DEBUG_UART_VIRT,
+ .pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init gemini_map_io(void)
+{
+ iotable_init(gemini_io_desc, ARRAY_SIZE(gemini_io_desc));
+}
+#else
+#define gemini_map_io NULL
+#endif
+
+static void gemini_idle(void)
+{
+ /*
+ * Because of broken hardware we have to enable interrupts or the CPU
+ * will never wakeup... Acctualy it is not very good to enable
+ * interrupts first since scheduler can miss a tick, but there is
+ * no other way around this. Platforms that needs it for power saving
+ * should enable it in init code, since by default it is
+ * disabled.
+ */
+
+ /* FIXME: Enabling interrupts here is racy! */
+ local_irq_enable();
+ cpu_do_idle();
+}
+
+static void __init gemini_init_machine(void)
+{
+ arm_pm_idle = gemini_idle;
+}
+
+static const char *gemini_board_compat[] = {
+ "cortina,gemini",
+ NULL,
+};
+
+DT_MACHINE_START(GEMINI_DT, "Gemini (Device Tree)")
+ .map_io = gemini_map_io,
+ .init_machine = gemini_init_machine,
+ .dt_compat = gemini_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-gemini/board-nas4220b.c b/arch/arm/mach-gemini/board-nas4220b.c
deleted file mode 100644
index 18b12796acf9..000000000000
--- a/arch/arm/mach-gemini/board-nas4220b.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Support for Raidsonic NAS-4220-B
- *
- * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
- *
- * based on rut1xx.c
- * Copyright (C) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#include <mach/hardware.h>
-#include <mach/global_reg.h>
-
-#include "common.h"
-
-static struct gpio_led ib4220b_leds[] = {
- {
- .name = "nas4220b:orange:hdd",
- .default_trigger = "none",
- .gpio = 60,
- },
- {
- .name = "nas4220b:green:os",
- .default_trigger = "heartbeat",
- .gpio = 62,
- },
-};
-
-static struct gpio_led_platform_data ib4220b_leds_data = {
- .num_leds = ARRAY_SIZE(ib4220b_leds),
- .leds = ib4220b_leds,
-};
-
-static struct platform_device ib4220b_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &ib4220b_leds_data,
- },
-};
-
-static struct gpio_keys_button ib4220b_keys[] = {
- {
- .code = KEY_SETUP,
- .gpio = 61,
- .active_low = 1,
- .desc = "Backup Button",
- .type = EV_KEY,
- },
- {
- .code = KEY_RESTART,
- .gpio = 63,
- .active_low = 1,
- .desc = "Softreset Button",
- .type = EV_KEY,
- },
-};
-
-static struct gpio_keys_platform_data ib4220b_keys_data = {
- .buttons = ib4220b_keys,
- .nbuttons = ARRAY_SIZE(ib4220b_keys),
-};
-
-static struct platform_device ib4220b_key_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &ib4220b_keys_data,
- },
-};
-
-static void __init ib4220b_init(void)
-{
- gemini_gpio_init();
- platform_register_uart();
- platform_register_pflash(SZ_16M, NULL, 0);
- platform_device_register(&ib4220b_led_device);
- platform_device_register(&ib4220b_key_device);
- platform_register_rtc();
-}
-
-MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")
- .atag_offset = 0x100,
- .map_io = gemini_map_io,
- .init_irq = gemini_init_irq,
- .init_time = gemini_timer_init,
- .init_machine = ib4220b_init,
- .restart = gemini_restart,
-MACHINE_END
diff --git a/arch/arm/mach-gemini/board-rut1xx.c b/arch/arm/mach-gemini/board-rut1xx.c
deleted file mode 100644
index 7a675f88ffd6..000000000000
--- a/arch/arm/mach-gemini/board-rut1xx.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Support for Teltonika RUT1xx
- *
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/sizes.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-#include "common.h"
-
-static struct gpio_keys_button rut1xx_keys[] = {
- {
- .code = KEY_SETUP,
- .gpio = 60,
- .active_low = 1,
- .desc = "Reset to defaults",
- .type = EV_KEY,
- },
-};
-
-static struct gpio_keys_platform_data rut1xx_keys_data = {
- .buttons = rut1xx_keys,
- .nbuttons = ARRAY_SIZE(rut1xx_keys),
-};
-
-static struct platform_device rut1xx_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &rut1xx_keys_data,
- },
-};
-
-static struct gpio_led rut100_leds[] = {
- {
- .name = "Power",
- .default_trigger = "heartbeat",
- .gpio = 17,
- },
- {
- .name = "GSM",
- .default_trigger = "default-on",
- .gpio = 7,
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data rut100_leds_data = {
- .num_leds = ARRAY_SIZE(rut100_leds),
- .leds = rut100_leds,
-};
-
-static struct platform_device rut1xx_leds = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &rut100_leds_data,
- },
-};
-
-static void __init rut1xx_init(void)
-{
- gemini_gpio_init();
- platform_register_uart();
- platform_register_pflash(SZ_8M, NULL, 0);
- platform_device_register(&rut1xx_leds);
- platform_device_register(&rut1xx_keys_device);
- platform_register_rtc();
-}
-
-MACHINE_START(RUT100, "Teltonika RUT100")
- .atag_offset = 0x100,
- .map_io = gemini_map_io,
- .init_irq = gemini_init_irq,
- .init_time = gemini_timer_init,
- .init_machine = rut1xx_init,
- .restart = gemini_restart,
-MACHINE_END
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
deleted file mode 100644
index 14c56f3f0ec2..000000000000
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Support for Wiliboard WBD-111
- *
- * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/input.h>
-#include <linux/skbuff.h>
-#include <linux/gpio_keys.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-
-#include "common.h"
-
-static struct gpio_keys_button wbd111_keys[] = {
- {
- .code = KEY_SETUP,
- .gpio = 5,
- .active_low = 1,
- .desc = "reset",
- .type = EV_KEY,
- },
-};
-
-static struct gpio_keys_platform_data wbd111_keys_data = {
- .buttons = wbd111_keys,
- .nbuttons = ARRAY_SIZE(wbd111_keys),
-};
-
-static struct platform_device wbd111_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &wbd111_keys_data,
- },
-};
-
-static struct gpio_led wbd111_leds[] = {
- {
- .name = "L3red",
- .gpio = 1,
- },
- {
- .name = "L4green",
- .gpio = 2,
- },
- {
- .name = "L4red",
- .gpio = 3,
- },
- {
- .name = "L3green",
- .gpio = 5,
- },
-};
-
-static struct gpio_led_platform_data wbd111_leds_data = {
- .num_leds = ARRAY_SIZE(wbd111_leds),
- .leds = wbd111_leds,
-};
-
-static struct platform_device wbd111_leds_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &wbd111_leds_data,
- },
-};
-
-static struct mtd_partition wbd111_partitions[] = {
- {
- .name = "RedBoot",
- .offset = 0,
- .size = 0x020000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "kernel",
- .offset = 0x020000,
- .size = 0x100000,
- } , {
- .name = "rootfs",
- .offset = 0x120000,
- .size = 0x6a0000,
- } , {
- .name = "VCTL",
- .offset = 0x7c0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "cfg",
- .offset = 0x7d0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "FIS",
- .offset = 0x7e0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- }
-};
-#define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions)
-
-static void __init wbd111_init(void)
-{
- gemini_gpio_init();
- platform_register_uart();
- platform_register_pflash(SZ_8M, wbd111_partitions,
- wbd111_num_partitions);
- platform_device_register(&wbd111_leds_device);
- platform_device_register(&wbd111_keys_device);
- platform_register_rtc();
-}
-
-MACHINE_START(WBD111, "Wiliboard WBD-111")
- .atag_offset = 0x100,
- .map_io = gemini_map_io,
- .init_irq = gemini_init_irq,
- .init_time = gemini_timer_init,
- .init_machine = wbd111_init,
- .restart = gemini_restart,
-MACHINE_END
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
deleted file mode 100644
index 6070282ce243..000000000000
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Support for Wiliboard WBD-222
- *
- * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/input.h>
-#include <linux/skbuff.h>
-#include <linux/gpio_keys.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/time.h>
-
-
-#include "common.h"
-
-static struct gpio_keys_button wbd222_keys[] = {
- {
- .code = KEY_SETUP,
- .gpio = 5,
- .active_low = 1,
- .desc = "reset",
- .type = EV_KEY,
- },
-};
-
-static struct gpio_keys_platform_data wbd222_keys_data = {
- .buttons = wbd222_keys,
- .nbuttons = ARRAY_SIZE(wbd222_keys),
-};
-
-static struct platform_device wbd222_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &wbd222_keys_data,
- },
-};
-
-static struct gpio_led wbd222_leds[] = {
- {
- .name = "L3red",
- .gpio = 1,
- },
- {
- .name = "L4green",
- .gpio = 2,
- },
- {
- .name = "L4red",
- .gpio = 3,
- },
- {
- .name = "L3green",
- .gpio = 5,
- },
-};
-
-static struct gpio_led_platform_data wbd222_leds_data = {
- .num_leds = ARRAY_SIZE(wbd222_leds),
- .leds = wbd222_leds,
-};
-
-static struct platform_device wbd222_leds_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &wbd222_leds_data,
- },
-};
-
-static struct mtd_partition wbd222_partitions[] = {
- {
- .name = "RedBoot",
- .offset = 0,
- .size = 0x020000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "kernel",
- .offset = 0x020000,
- .size = 0x100000,
- } , {
- .name = "rootfs",
- .offset = 0x120000,
- .size = 0x6a0000,
- } , {
- .name = "VCTL",
- .offset = 0x7c0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "cfg",
- .offset = 0x7d0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- } , {
- .name = "FIS",
- .offset = 0x7e0000,
- .size = 0x010000,
- .mask_flags = MTD_WRITEABLE,
- }
-};
-#define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions)
-
-static void __init wbd222_init(void)
-{
- gemini_gpio_init();
- platform_register_uart();
- platform_register_pflash(SZ_8M, wbd222_partitions,
- wbd222_num_partitions);
- platform_device_register(&wbd222_leds_device);
- platform_device_register(&wbd222_keys_device);
- platform_register_rtc();
-}
-
-MACHINE_START(WBD222, "Wiliboard WBD-222")
- .atag_offset = 0x100,
- .map_io = gemini_map_io,
- .init_irq = gemini_init_irq,
- .init_time = gemini_timer_init,
- .init_machine = wbd222_init,
- .restart = gemini_restart,
-MACHINE_END
diff --git a/arch/arm/mach-gemini/common.h b/arch/arm/mach-gemini/common.h
deleted file mode 100644
index dd883698ff7e..000000000000
--- a/arch/arm/mach-gemini/common.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Common Gemini architecture functions
- *
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __GEMINI_COMMON_H__
-#define __GEMINI_COMMON_H__
-
-#include <linux/reboot.h>
-
-struct mtd_partition;
-
-extern void gemini_map_io(void);
-extern void gemini_init_irq(void);
-extern void gemini_timer_init(void);
-extern void gemini_gpio_init(void);
-extern void platform_register_rtc(void);
-
-/* Common platform devices registration functions */
-extern int platform_register_uart(void);
-extern int platform_register_pflash(unsigned int size,
- struct mtd_partition *parts,
- unsigned int nr_parts);
-
-extern void gemini_restart(enum reboot_mode mode, const char *cmd);
-
-#endif /* __GEMINI_COMMON_H__ */
diff --git a/arch/arm/mach-gemini/devices.c b/arch/arm/mach-gemini/devices.c
deleted file mode 100644
index 5cff29818b73..000000000000
--- a/arch/arm/mach-gemini/devices.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Common devices definition for Gemini
- *
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/serial_8250.h>
-#include <linux/mtd/physmap.h>
-
-#include <mach/irqs.h>
-#include <mach/hardware.h>
-#include <mach/global_reg.h>
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .membase = (void *)IO_ADDRESS(GEMINI_UART_BASE),
- .mapbase = GEMINI_UART_BASE,
- .irq = IRQ_UART,
- .uartclk = UART_CLK,
- .regshift = 2,
- .iotype = UPIO_MEM,
- .type = PORT_16550A,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_FIXED_TYPE,
- },
- {},
-};
-
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
-};
-
-int platform_register_uart(void)
-{
- return platform_device_register(&serial_device);
-}
-
-static struct resource flash_resource = {
- .start = GEMINI_FLASH_BASE,
- .flags = IORESOURCE_MEM,
-};
-
-static struct physmap_flash_data pflash_platform_data = {};
-
-static struct platform_device pflash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &pflash_platform_data,
- },
- .resource = &flash_resource,
- .num_resources = 1,
-};
-
-int platform_register_pflash(unsigned int size, struct mtd_partition *parts,
- unsigned int nr_parts)
-{
- unsigned int reg;
-
- reg = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_STATUS);
-
- if ((reg & FLASH_TYPE_MASK) != FLASH_TYPE_PARALLEL)
- return -ENXIO;
-
- if (reg & FLASH_WIDTH_16BIT)
- pflash_platform_data.width = 2;
- else
- pflash_platform_data.width = 1;
-
- /* enable parallel flash pins and disable others */
- reg = __raw_readl(IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_MISC_CTRL);
- reg &= ~PFLASH_PADS_DISABLE;
- reg |= SFLASH_PADS_DISABLE | NAND_PADS_DISABLE;
- __raw_writel(reg, IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_MISC_CTRL);
-
- flash_resource.end = flash_resource.start + size - 1;
-
- pflash_platform_data.parts = parts;
- pflash_platform_data.nr_parts = nr_parts;
-
- return platform_device_register(&pflash_device);
-}
-
-static struct resource gemini_rtc_resources[] = {
- [0] = {
- .start = GEMINI_RTC_BASE,
- .end = GEMINI_RTC_BASE + 0x24,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_RTC,
- .end = IRQ_RTC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device gemini_rtc_device = {
- .name = "rtc-gemini",
- .id = 0,
- .num_resources = ARRAY_SIZE(gemini_rtc_resources),
- .resource = gemini_rtc_resources,
-};
-
-int __init platform_register_rtc(void)
-{
- return platform_device_register(&gemini_rtc_device);
-}
-
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
deleted file mode 100644
index 469a76ea0459..000000000000
--- a/arch/arm/mach-gemini/gpio.c
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Gemini gpiochip and interrupt routines
- *
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * Based on plat-mxc/gpio.c:
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio/driver.h>
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-
-#define GPIO_BASE(x) IO_ADDRESS(GEMINI_GPIO_BASE(x))
-#define irq_to_gpio(x) ((x) - GPIO_IRQ_BASE)
-
-/* GPIO registers definition */
-#define GPIO_DATA_OUT 0x0
-#define GPIO_DATA_IN 0x4
-#define GPIO_DIR 0x8
-#define GPIO_DATA_SET 0x10
-#define GPIO_DATA_CLR 0x14
-#define GPIO_PULL_EN 0x18
-#define GPIO_PULL_TYPE 0x1C
-#define GPIO_INT_EN 0x20
-#define GPIO_INT_STAT 0x24
-#define GPIO_INT_MASK 0x2C
-#define GPIO_INT_CLR 0x30
-#define GPIO_INT_TYPE 0x34
-#define GPIO_INT_BOTH_EDGE 0x38
-#define GPIO_INT_LEVEL 0x3C
-#define GPIO_DEBOUNCE_EN 0x40
-#define GPIO_DEBOUNCE_PRESCALE 0x44
-
-#define GPIO_PORT_NUM 3
-
-static void _set_gpio_irqenable(void __iomem *base, unsigned int index,
- int enable)
-{
- unsigned int reg;
-
- reg = __raw_readl(base + GPIO_INT_EN);
- reg = (reg & (~(1 << index))) | (!!enable << index);
- __raw_writel(reg, base + GPIO_INT_EN);
-}
-
-static void gpio_ack_irq(struct irq_data *d)
-{
- unsigned int gpio = irq_to_gpio(d->irq);
- void __iomem *base = GPIO_BASE(gpio / 32);
-
- __raw_writel(1 << (gpio % 32), base + GPIO_INT_CLR);
-}
-
-static void gpio_mask_irq(struct irq_data *d)
-{
- unsigned int gpio = irq_to_gpio(d->irq);
- void __iomem *base = GPIO_BASE(gpio / 32);
-
- _set_gpio_irqenable(base, gpio % 32, 0);
-}
-
-static void gpio_unmask_irq(struct irq_data *d)
-{
- unsigned int gpio = irq_to_gpio(d->irq);
- void __iomem *base = GPIO_BASE(gpio / 32);
-
- _set_gpio_irqenable(base, gpio % 32, 1);
-}
-
-static int gpio_set_irq_type(struct irq_data *d, unsigned int type)
-{
- unsigned int gpio = irq_to_gpio(d->irq);
- unsigned int gpio_mask = 1 << (gpio % 32);
- void __iomem *base = GPIO_BASE(gpio / 32);
- unsigned int reg_both, reg_level, reg_type;
-
- reg_type = __raw_readl(base + GPIO_INT_TYPE);
- reg_level = __raw_readl(base + GPIO_INT_LEVEL);
- reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
-
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
- reg_type &= ~gpio_mask;
- reg_both |= gpio_mask;
- break;
- case IRQ_TYPE_EDGE_RISING:
- reg_type &= ~gpio_mask;
- reg_both &= ~gpio_mask;
- reg_level &= ~gpio_mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- reg_type &= ~gpio_mask;
- reg_both &= ~gpio_mask;
- reg_level |= gpio_mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- reg_type |= gpio_mask;
- reg_level &= ~gpio_mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- reg_type |= gpio_mask;
- reg_level |= gpio_mask;
- break;
- default:
- return -EINVAL;
- }
-
- __raw_writel(reg_type, base + GPIO_INT_TYPE);
- __raw_writel(reg_level, base + GPIO_INT_LEVEL);
- __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
-
- gpio_ack_irq(d);
-
- return 0;
-}
-
-static void gpio_irq_handler(struct irq_desc *desc)
-{
- unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
- unsigned int gpio_irq_no, irq_stat;
-
- irq_stat = __raw_readl(GPIO_BASE(port) + GPIO_INT_STAT);
-
- gpio_irq_no = GPIO_IRQ_BASE + port * 32;
- for (; irq_stat != 0; irq_stat >>= 1, gpio_irq_no++) {
-
- if ((irq_stat & 1) == 0)
- continue;
-
- generic_handle_irq(gpio_irq_no);
- }
-}
-
-static struct irq_chip gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = gpio_ack_irq,
- .irq_mask = gpio_mask_irq,
- .irq_unmask = gpio_unmask_irq,
- .irq_set_type = gpio_set_irq_type,
-};
-
-static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
- int dir)
-{
- void __iomem *base = GPIO_BASE(offset / 32);
- unsigned int reg;
-
- reg = __raw_readl(base + GPIO_DIR);
- if (dir)
- reg |= 1 << (offset % 32);
- else
- reg &= ~(1 << (offset % 32));
- __raw_writel(reg, base + GPIO_DIR);
-}
-
-static void gemini_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- void __iomem *base = GPIO_BASE(offset / 32);
-
- if (value)
- __raw_writel(1 << (offset % 32), base + GPIO_DATA_SET);
- else
- __raw_writel(1 << (offset % 32), base + GPIO_DATA_CLR);
-}
-
-static int gemini_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- void __iomem *base = GPIO_BASE(offset / 32);
-
- return (__raw_readl(base + GPIO_DATA_IN) >> (offset % 32)) & 1;
-}
-
-static int gemini_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- _set_gpio_direction(chip, offset, 0);
- return 0;
-}
-
-static int gemini_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- _set_gpio_direction(chip, offset, 1);
- gemini_gpio_set(chip, offset, value);
- return 0;
-}
-
-static struct gpio_chip gemini_gpio_chip = {
- .label = "Gemini",
- .direction_input = gemini_gpio_direction_input,
- .get = gemini_gpio_get,
- .direction_output = gemini_gpio_direction_output,
- .set = gemini_gpio_set,
- .base = 0,
- .ngpio = GPIO_PORT_NUM * 32,
-};
-
-void __init gemini_gpio_init(void)
-{
- int i, j;
-
- for (i = 0; i < GPIO_PORT_NUM; i++) {
- /* disable, unmask and clear all interrupts */
- __raw_writel(0x0, GPIO_BASE(i) + GPIO_INT_EN);
- __raw_writel(0x0, GPIO_BASE(i) + GPIO_INT_MASK);
- __raw_writel(~0x0, GPIO_BASE(i) + GPIO_INT_CLR);
-
- for (j = GPIO_IRQ_BASE + i * 32;
- j < GPIO_IRQ_BASE + (i + 1) * 32; j++) {
- irq_set_chip_and_handler(j, &gpio_irq_chip,
- handle_edge_irq);
- irq_clear_status_flags(j, IRQ_NOREQUEST);
- }
-
- irq_set_chained_handler_and_data(IRQ_GPIO(i), gpio_irq_handler,
- (void *)i);
- }
-
- BUG_ON(gpiochip_add_data(&gemini_gpio_chip, NULL));
-}
diff --git a/arch/arm/mach-gemini/idle.c b/arch/arm/mach-gemini/idle.c
deleted file mode 100644
index ddf8ec9d203b..000000000000
--- a/arch/arm/mach-gemini/idle.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/arm/mach-gemini/idle.c
- */
-
-#include <linux/init.h>
-#include <asm/system_misc.h>
-#include <asm/proc-fns.h>
-
-static void gemini_idle(void)
-{
- /*
- * Because of broken hardware we have to enable interrupts or the CPU
- * will never wakeup... Acctualy it is not very good to enable
- * interrupts first since scheduler can miss a tick, but there is
- * no other way around this. Platforms that needs it for power saving
- * should enable it in init code, since by default it is
- * disabled.
- */
-
- /* FIXME: Enabling interrupts here is racy! */
- local_irq_enable();
- cpu_do_idle();
-}
-
-static int __init gemini_idle_init(void)
-{
- arm_pm_idle = gemini_idle;
- return 0;
-}
-
-arch_initcall(gemini_idle_init);
diff --git a/arch/arm/mach-gemini/include/mach/entry-macro.S b/arch/arm/mach-gemini/include/mach/entry-macro.S
deleted file mode 100644
index f044e430bfa4..000000000000
--- a/arch/arm/mach-gemini/include/mach/entry-macro.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Low-level IRQ helper macros for Gemini platform.
- *
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <mach/hardware.h>
-
-#define IRQ_STATUS 0x14
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, =IO_ADDRESS(GEMINI_INTERRUPT_BASE + IRQ_STATUS)
- ldr \irqnr, [\irqstat]
- cmp \irqnr, #0
- beq 2313f
- mov \tmp, \irqnr
- mov \irqnr, #0
-2312:
- tst \tmp, #1
- bne 2313f
- add \irqnr, \irqnr, #1
- mov \tmp, \tmp, lsr #1
- cmp \irqnr, #31
- bcc 2312b
-2313:
- .endm
diff --git a/arch/arm/mach-gemini/include/mach/global_reg.h b/arch/arm/mach-gemini/include/mach/global_reg.h
deleted file mode 100644
index de7ff7e849fc..000000000000
--- a/arch/arm/mach-gemini/include/mach/global_reg.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * This file contains the hardware definitions for Gemini.
- *
- * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __MACH_GLOBAL_REG_H
-#define __MACH_GLOBAL_REG_H
-
-/* Global Word ID Register*/
-#define GLOBAL_ID 0x00
-
-#define CHIP_ID(reg) ((reg) >> 8)
-#define CHIP_REVISION(reg) ((reg) & 0xFF)
-
-/* Global Status Register */
-#define GLOBAL_STATUS 0x04
-
-#define CPU_BIG_ENDIAN (1 << 31)
-#define PLL_OSC_30M (1 << 30) /* else 60MHz */
-
-#define OPERATION_MODE_MASK (0xF << 26)
-#define OPM_IDDQ (0xF << 26)
-#define OPM_NAND (0xE << 26)
-#define OPM_RING (0xD << 26)
-#define OPM_DIRECT_BOOT (0xC << 26)
-#define OPM_USB1_PHY_TEST (0xB << 26)
-#define OPM_USB0_PHY_TEST (0xA << 26)
-#define OPM_SATA1_PHY_TEST (0x9 << 26)
-#define OPM_SATA0_PHY_TEST (0x8 << 26)
-#define OPM_ICE_ARM (0x7 << 26)
-#define OPM_ICE_FARADAY (0x6 << 26)
-#define OPM_PLL_BYPASS (0x5 << 26)
-#define OPM_DEBUG (0x4 << 26)
-#define OPM_BURN_IN (0x3 << 26)
-#define OPM_MBIST (0x2 << 26)
-#define OPM_SCAN (0x1 << 26)
-#define OPM_REAL (0x0 << 26)
-
-#define FLASH_TYPE_MASK (0x3 << 24)
-#define FLASH_TYPE_NAND_2K (0x3 << 24)
-#define FLASH_TYPE_NAND_512 (0x2 << 24)
-#define FLASH_TYPE_PARALLEL (0x1 << 24)
-#define FLASH_TYPE_SERIAL (0x0 << 24)
-/* if parallel */
-#define FLASH_WIDTH_16BIT (1 << 23) /* else 8 bit */
-/* if serial */
-#define FLASH_ATMEL (1 << 23) /* else STM */
-
-#define FLASH_SIZE_MASK (0x3 << 21)
-#define NAND_256M (0x3 << 21) /* and more */
-#define NAND_128M (0x2 << 21)
-#define NAND_64M (0x1 << 21)
-#define NAND_32M (0x0 << 21)
-#define ATMEL_16M (0x3 << 21) /* and more */
-#define ATMEL_8M (0x2 << 21)
-#define ATMEL_4M_2M (0x1 << 21)
-#define ATMEL_1M (0x0 << 21) /* and less */
-#define STM_32M (1 << 22) /* and more */
-#define STM_16M (0 << 22) /* and less */
-
-#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
-
-#define CPU_AHB_RATIO_MASK (0x3 << 18)
-#define CPU_AHB_1_1 (0x0 << 18)
-#define CPU_AHB_3_2 (0x1 << 18)
-#define CPU_AHB_24_13 (0x2 << 18)
-#define CPU_AHB_2_1 (0x3 << 18)
-
-#define REG_TO_AHB_SPEED(reg) ((((reg) >> 15) & 0x7) * 10 + 130)
-#define AHB_SPEED_TO_REG(x) ((((x - 130)) / 10) << 15)
-
-/* it is posible to override some settings, use >> OVERRIDE_xxxx_SHIFT */
-#define OVERRIDE_FLASH_TYPE_SHIFT 16
-#define OVERRIDE_FLASH_WIDTH_SHIFT 16
-#define OVERRIDE_FLASH_SIZE_SHIFT 16
-#define OVERRIDE_CPU_AHB_RATIO_SHIFT 15
-#define OVERRIDE_AHB_SPEED_SHIFT 15
-
-/* Global PLL Control Register */
-#define GLOBAL_PLL_CTRL 0x08
-
-#define PLL_BYPASS (1 << 31)
-#define PLL_POWER_DOWN (1 << 8)
-#define PLL_CONTROL_Q (0x1F << 0)
-
-/* Global Soft Reset Control Register */
-#define GLOBAL_RESET 0x0C
-
-#define RESET_GLOBAL (1 << 31)
-#define RESET_CPU1 (1 << 30)
-#define RESET_TVE (1 << 28)
-#define RESET_SATA1 (1 << 27)
-#define RESET_SATA0 (1 << 26)
-#define RESET_CIR (1 << 25)
-#define RESET_EXT_DEV (1 << 24)
-#define RESET_WD (1 << 23)
-#define RESET_GPIO2 (1 << 22)
-#define RESET_GPIO1 (1 << 21)
-#define RESET_GPIO0 (1 << 20)
-#define RESET_SSP (1 << 19)
-#define RESET_UART (1 << 18)
-#define RESET_TIMER (1 << 17)
-#define RESET_RTC (1 << 16)
-#define RESET_INT1 (1 << 15)
-#define RESET_INT0 (1 << 14)
-#define RESET_LCD (1 << 13)
-#define RESET_LPC (1 << 12)
-#define RESET_APB (1 << 11)
-#define RESET_DMA (1 << 10)
-#define RESET_USB1 (1 << 9)
-#define RESET_USB0 (1 << 8)
-#define RESET_PCI (1 << 7)
-#define RESET_GMAC1 (1 << 6)
-#define RESET_GMAC0 (1 << 5)
-#define RESET_SECURITY (1 << 4)
-#define RESET_RAID (1 << 3)
-#define RESET_IDE (1 << 2)
-#define RESET_FLASH (1 << 1)
-#define RESET_DRAM (1 << 0)
-
-/* Global IO Pad Driving Capability Control Register */
-#define GLOBAL_IO_DRIVING_CTRL 0x10
-
-#define DRIVING_CURRENT_MASK 0x3
-
-/* here 00-4mA, 01-8mA, 10-12mA, 11-16mA */
-#define GPIO1_PADS_31_28_SHIFT 28
-#define GPIO0_PADS_31_16_SHIFT 26
-#define GPIO0_PADS_15_0_SHIFT 24
-#define PCI_AND_EXT_RESET_PADS_SHIFT 22
-#define IDE_PADS_SHIFT 20
-#define GMAC1_PADS_SHIFT 18
-#define GMAC0_PADS_SHIFT 16
-/* DRAM is not in mA and poorly documented */
-#define DRAM_CLOCK_PADS_SHIFT 8
-#define DRAM_DATA_PADS_SHIFT 4
-#define DRAM_CONTROL_PADS_SHIFT 0
-
-/* Global IO Pad Slew Rate Control Register */
-#define GLOBAL_IO_SLEW_RATE_CTRL 0x14
-
-#define GPIO1_PADS_31_28_SLOW (1 << 10)
-#define GPIO0_PADS_31_16_SLOW (1 << 9)
-#define GPIO0_PADS_15_0_SLOW (1 << 8)
-#define PCI_PADS_SLOW (1 << 7)
-#define IDE_PADS_SLOW (1 << 6)
-#define GMAC1_PADS_SLOW (1 << 5)
-#define GMAC0_PADS_SLOW (1 << 4)
-#define DRAM_CLOCK_PADS_SLOW (1 << 1)
-#define DRAM_IO_PADS_SLOW (1 << 0)
-
-/*
- * General skew control defines
- * 16 steps, each step is around 0.2ns
- */
-#define SKEW_MASK 0xF
-
-/* Global IDE PAD Skew Control Register */
-#define GLOBAL_IDE_SKEW_CTRL 0x18
-
-#define IDE1_HOST_STROBE_DELAY_SHIFT 28
-#define IDE1_DEVICE_STROBE_DELAY_SHIFT 24
-#define IDE1_OUTPUT_IO_SKEW_SHIFT 20
-#define IDE1_INPUT_IO_SKEW_SHIFT 16
-#define IDE0_HOST_STROBE_DELAY_SHIFT 12
-#define IDE0_DEVICE_STROBE_DELAY_SHIFT 8
-#define IDE0_OUTPUT_IO_SKEW_SHIFT 4
-#define IDE0_INPUT_IO_SKEW_SHIFT 0
-
-/* Global GMAC Control Pad Skew Control Register */
-#define GLOBAL_GMAC_CTRL_SKEW_CTRL 0x1C
-
-#define GMAC1_TXC_SKEW_SHIFT 28
-#define GMAC1_TXEN_SKEW_SHIFT 24
-#define GMAC1_RXC_SKEW_SHIFT 20
-#define GMAC1_RXDV_SKEW_SHIFT 16
-#define GMAC0_TXC_SKEW_SHIFT 12
-#define GMAC0_TXEN_SKEW_SHIFT 8
-#define GMAC0_RXC_SKEW_SHIFT 4
-#define GMAC0_RXDV_SKEW_SHIFT 0
-
-/* Global GMAC0 Data PAD Skew Control Register */
-#define GLOBAL_GMAC0_DATA_SKEW_CTRL 0x20
-/* Global GMAC1 Data PAD Skew Control Register */
-#define GLOBAL_GMAC1_DATA_SKEW_CTRL 0x24
-
-#define GMAC_TXD_SKEW_SHIFT(x) (((x) * 4) + 16)
-#define GMAC_RXD_SKEW_SHIFT(x) ((x) * 4)
-
-/* CPU has two AHB busses. */
-
-/* Global Arbitration0 Control Register */
-#define GLOBAL_ARBITRATION0_CTRL 0x28
-
-#define BOOT_CONTROLLER_HIGH_PRIO (1 << 3)
-#define DMA_BUS1_HIGH_PRIO (1 << 2)
-#define CPU0_HIGH_PRIO (1 << 0)
-
-/* Global Arbitration1 Control Register */
-#define GLOBAL_ARBITRATION1_CTRL 0x2C
-
-#define TVE_HIGH_PRIO (1 << 9)
-#define PCI_HIGH_PRIO (1 << 8)
-#define USB1_HIGH_PRIO (1 << 7)
-#define USB0_HIGH_PRIO (1 << 6)
-#define GMAC1_HIGH_PRIO (1 << 5)
-#define GMAC0_HIGH_PRIO (1 << 4)
-#define SECURITY_HIGH_PRIO (1 << 3)
-#define RAID_HIGH_PRIO (1 << 2)
-#define IDE_HIGH_PRIO (1 << 1)
-#define DMA_BUS2_HIGH_PRIO (1 << 0)
-
-/* Common bits for both arbitration registers */
-#define BURST_LENGTH_SHIFT 16
-#define BURST_LENGTH_MASK (0x3F << 16)
-
-/* Miscellaneous Control Register */
-#define GLOBAL_MISC_CTRL 0x30
-
-#define MEMORY_SPACE_SWAP (1 << 31)
-#define USB1_PLUG_MINIB (1 << 30) /* else plug is mini-A */
-#define USB0_PLUG_MINIB (1 << 29)
-#define GMAC_GMII (1 << 28)
-#define GMAC_1_ENABLE (1 << 27)
-/* TODO: define ATA/SATA bits */
-#define USB1_VBUS_ON (1 << 23)
-#define USB0_VBUS_ON (1 << 22)
-#define APB_CLKOUT_ENABLE (1 << 21)
-#define TVC_CLKOUT_ENABLE (1 << 20)
-#define EXT_CLKIN_ENABLE (1 << 19)
-#define PCI_66MHZ (1 << 18) /* else 33 MHz */
-#define PCI_CLKOUT_ENABLE (1 << 17)
-#define LPC_CLKOUT_ENABLE (1 << 16)
-#define USB1_WAKEUP_ON (1 << 15)
-#define USB0_WAKEUP_ON (1 << 14)
-/* TODO: define PCI idle detect bits */
-#define TVC_PADS_ENABLE (1 << 9)
-#define SSP_PADS_ENABLE (1 << 8)
-#define LCD_PADS_ENABLE (1 << 7)
-#define LPC_PADS_ENABLE (1 << 6)
-#define PCI_PADS_ENABLE (1 << 5)
-#define IDE_PADS_ENABLE (1 << 4)
-#define DRAM_PADS_POWER_DOWN (1 << 3)
-#define NAND_PADS_DISABLE (1 << 2)
-#define PFLASH_PADS_DISABLE (1 << 1)
-#define SFLASH_PADS_DISABLE (1 << 0)
-
-/* Global Clock Control Register */
-#define GLOBAL_CLOCK_CTRL 0x34
-
-#define POWER_STATE_G0 (1 << 31)
-#define POWER_STATE_S1 (1 << 30) /* else it is S3/S4 state */
-#define SECURITY_APB_AHB (1 << 29)
-/* else Security APB clk will be 0.75xAHB */
-/* TODO: TVC clock divider */
-#define PCI_CLKRUN_ENABLE (1 << 16)
-#define BOOT_CLK_DISABLE (1 << 13)
-#define TVC_CLK_DISABLE (1 << 12)
-#define FLASH_CLK_DISABLE (1 << 11)
-#define DDR_CLK_DISABLE (1 << 10)
-#define PCI_CLK_DISABLE (1 << 9)
-#define IDE_CLK_DISABLE (1 << 8)
-#define USB1_CLK_DISABLE (1 << 7)
-#define USB0_CLK_DISABLE (1 << 6)
-#define SATA1_CLK_DISABLE (1 << 5)
-#define SATA0_CLK_DISABLE (1 << 4)
-#define GMAC1_CLK_DISABLE (1 << 3)
-#define GMAC0_CLK_DISABLE (1 << 2)
-#define SECURITY_CLK_DISABLE (1 << 1)
-
-/* TODO: other registers definitions if needed */
-
-#endif /* __MACH_GLOBAL_REG_H */
diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h
deleted file mode 100644
index f0390f184742..000000000000
--- a/arch/arm/mach-gemini/include/mach/hardware.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * This file contains the hardware definitions for Gemini.
- *
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __MACH_HARDWARE_H
-#define __MACH_HARDWARE_H
-
-/*
- * Memory Map definitions
- */
-#ifdef CONFIG_GEMINI_MEM_SWAP
-# define GEMINI_DRAM_BASE 0x00000000
-# define GEMINI_SRAM_BASE 0x70000000
-#else
-# define GEMINI_SRAM_BASE 0x00000000
-# define GEMINI_DRAM_BASE 0x10000000
-#endif
-#define GEMINI_FLASH_BASE 0x30000000
-#define GEMINI_GLOBAL_BASE 0x40000000
-#define GEMINI_WAQTCHDOG_BASE 0x41000000
-#define GEMINI_UART_BASE 0x42000000
-#define GEMINI_TIMER_BASE 0x43000000
-#define GEMINI_LCD_BASE 0x44000000
-#define GEMINI_RTC_BASE 0x45000000
-#define GEMINI_SATA_BASE 0x46000000
-#define GEMINI_LPC_HOST_BASE 0x47000000
-#define GEMINI_LPC_IO_BASE 0x47800000
-#define GEMINI_INTERRUPT_BASE 0x48000000
-/* TODO: Different interrupt controllers when SMP
- * #define GEMINI_INTERRUPT0_BASE 0x48000000
- * #define GEMINI_INTERRUPT1_BASE 0x49000000
- */
-#define GEMINI_SSP_CTRL_BASE 0x4A000000
-#define GEMINI_POWER_CTRL_BASE 0x4B000000
-#define GEMINI_CIR_BASE 0x4C000000
-#define GEMINI_GPIO_BASE(x) (0x4D000000 + (x) * 0x1000000)
-#define GEMINI_PCI_IO_BASE 0x50000000
-#define GEMINI_PCI_MEM_BASE 0x58000000
-#define GEMINI_TOE_BASE 0x60000000
-#define GEMINI_GMAC0_BASE 0x6000A000
-#define GEMINI_GMAC1_BASE 0x6000E000
-#define GEMINI_SECURITY_BASE 0x62000000
-#define GEMINI_IDE0_BASE 0x63000000
-#define GEMINI_IDE1_BASE 0x63400000
-#define GEMINI_RAID_BASE 0x64000000
-#define GEMINI_FLASH_CTRL_BASE 0x65000000
-#define GEMINI_DRAM_CTRL_BASE 0x66000000
-#define GEMINI_GENERAL_DMA_BASE 0x67000000
-#define GEMINI_USB0_BASE 0x68000000
-#define GEMINI_USB1_BASE 0x69000000
-#define GEMINI_BIG_ENDIAN_BASE 0x80000000
-
-
-/*
- * UART Clock when System clk is 150MHz
- */
-#define UART_CLK 48000000
-
-/*
- * macro to get at IO space when running virtually
- */
-#define IO_ADDRESS(x) IOMEM((((x) & 0xFFF00000) >> 4) | ((x) & 0x000FFFFF) | 0xF0000000)
-
-#endif
diff --git a/arch/arm/mach-gemini/include/mach/irqs.h b/arch/arm/mach-gemini/include/mach/irqs.h
deleted file mode 100644
index 06bc47e77e8b..000000000000
--- a/arch/arm/mach-gemini/include/mach/irqs.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __MACH_IRQS_H__
-#define __MACH_IRQS_H__
-
-#define IRQ_SERIRQ1 31
-#define IRQ_SERIRQ0 30
-#define IRQ_PCID 29
-#define IRQ_PCIC 28
-#define IRQ_PCIB 27
-#define IRQ_PWR 26
-#define IRQ_CIR 25
-#define IRQ_GPIO(x) (22 + (x))
-#define IRQ_SSP 21
-#define IRQ_LPC 20
-#define IRQ_LCD 19
-#define IRQ_UART 18
-#define IRQ_RTC 17
-#define IRQ_TIMER3 16
-#define IRQ_TIMER2 15
-#define IRQ_TIMER1 14
-#define IRQ_FLASH 12
-#define IRQ_USB1 11
-#define IRQ_USB0 10
-#define IRQ_DMA 9
-#define IRQ_PCI 8
-#define IRQ_IPSEC 7
-#define IRQ_RAID 6
-#define IRQ_IDE1 5
-#define IRQ_IDE0 4
-#define IRQ_WATCHDOG 3
-#define IRQ_GMAC1 2
-#define IRQ_GMAC0 1
-#define IRQ_IPI 0
-
-#define NORMAL_IRQ_NUM 32
-
-#define GPIO_IRQ_BASE NORMAL_IRQ_NUM
-#define GPIO_IRQ_NUM (3 * 32)
-
-#define ARCH_TIMER_IRQ IRQ_TIMER2
-
-#define NR_IRQS (NORMAL_IRQ_NUM + GPIO_IRQ_NUM)
-
-#endif /* __MACH_IRQS_H__ */
diff --git a/arch/arm/mach-gemini/include/mach/uncompress.h b/arch/arm/mach-gemini/include/mach/uncompress.h
deleted file mode 100644
index 02e225673acb..000000000000
--- a/arch/arm/mach-gemini/include/mach/uncompress.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * Based on mach-pxa/include/mach/uncompress.h:
- * Copyright: (C) 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __MACH_UNCOMPRESS_H
-#define __MACH_UNCOMPRESS_H
-
-#include <linux/serial_reg.h>
-#include <mach/hardware.h>
-
-static volatile unsigned long * const UART = (unsigned long *)GEMINI_UART_BASE;
-
-/*
- * The following code assumes the serial port has already been
- * initialized by the bootloader. If you didn't setup a port in
- * your bootloader then nothing will appear (which might be desired).
- */
-static inline void putc(char c)
-{
- while (!(UART[UART_LSR] & UART_LSR_THRE))
- barrier();
- UART[UART_TX] = c;
-}
-
-static inline void flush(void)
-{
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
-
-#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
deleted file mode 100644
index d929b3ff18fd..000000000000
--- a/arch/arm/mach-gemini/irq.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Interrupt routines for Gemini
- *
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/stddef.h>
-#include <linux/list.h>
-#include <linux/sched.h>
-#include <linux/cpu.h>
-
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <asm/system_misc.h>
-#include <mach/hardware.h>
-
-#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
-#define IRQ_MASK(base_addr) (base_addr + 0x04)
-#define IRQ_CLEAR(base_addr) (base_addr + 0x08)
-#define IRQ_TMODE(base_addr) (base_addr + 0x0C)
-#define IRQ_TLEVEL(base_addr) (base_addr + 0x10)
-#define IRQ_STATUS(base_addr) (base_addr + 0x14)
-#define FIQ_SOURCE(base_addr) (base_addr + 0x20)
-#define FIQ_MASK(base_addr) (base_addr + 0x24)
-#define FIQ_CLEAR(base_addr) (base_addr + 0x28)
-#define FIQ_TMODE(base_addr) (base_addr + 0x2C)
-#define FIQ_LEVEL(base_addr) (base_addr + 0x30)
-#define FIQ_STATUS(base_addr) (base_addr + 0x34)
-
-static void gemini_ack_irq(struct irq_data *d)
-{
- __raw_writel(1 << d->irq, IRQ_CLEAR(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-}
-
-static void gemini_mask_irq(struct irq_data *d)
-{
- unsigned int mask;
-
- mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
- mask &= ~(1 << d->irq);
- __raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-}
-
-static void gemini_unmask_irq(struct irq_data *d)
-{
- unsigned int mask;
-
- mask = __raw_readl(IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
- mask |= (1 << d->irq);
- __raw_writel(mask, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-}
-
-static struct irq_chip gemini_irq_chip = {
- .name = "INTC",
- .irq_ack = gemini_ack_irq,
- .irq_mask = gemini_mask_irq,
- .irq_unmask = gemini_unmask_irq,
-};
-
-static struct resource irq_resource = {
- .name = "irq_handler",
- .start = GEMINI_INTERRUPT_BASE,
- .end = FIQ_STATUS(GEMINI_INTERRUPT_BASE) + 4,
-};
-
-void __init gemini_init_irq(void)
-{
- unsigned int i, mode = 0, level = 0;
-
- /*
- * Disable the idle handler by default since it is buggy
- * For more info see arch/arm/mach-gemini/idle.c
- */
- cpu_idle_poll_ctrl(true);
-
- request_resource(&iomem_resource, &irq_resource);
-
- for (i = 0; i < NR_IRQS; i++) {
- irq_set_chip(i, &gemini_irq_chip);
- if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) {
- irq_set_handler(i, handle_edge_irq);
- mode |= 1 << i;
- level |= 1 << i;
- } else {
- irq_set_handler(i, handle_level_irq);
- }
- irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- /* Disable all interrupts */
- __raw_writel(0, IRQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
- __raw_writel(0, FIQ_MASK(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-
- /* Set interrupt mode */
- __raw_writel(mode, IRQ_TMODE(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
- __raw_writel(level, IRQ_TLEVEL(IO_ADDRESS(GEMINI_INTERRUPT_BASE)));
-}
diff --git a/arch/arm/mach-gemini/mm.c b/arch/arm/mach-gemini/mm.c
deleted file mode 100644
index 2c2cd284bb6a..000000000000
--- a/arch/arm/mach-gemini/mm.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Static mappings for Gemini
- *
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/mach/map.h>
-
-#include <mach/hardware.h>
-
-/* Page table mapping for I/O region */
-static struct map_desc gemini_io_desc[] __initdata = {
- {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_GLOBAL_BASE),
- .pfn =__phys_to_pfn(GEMINI_GLOBAL_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_UART_BASE),
- .pfn = __phys_to_pfn(GEMINI_UART_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_TIMER_BASE),
- .pfn = __phys_to_pfn(GEMINI_TIMER_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_INTERRUPT_BASE),
- .pfn = __phys_to_pfn(GEMINI_INTERRUPT_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_POWER_CTRL_BASE),
- .pfn = __phys_to_pfn(GEMINI_POWER_CTRL_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_GPIO_BASE(0)),
- .pfn = __phys_to_pfn(GEMINI_GPIO_BASE(0)),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_GPIO_BASE(1)),
- .pfn = __phys_to_pfn(GEMINI_GPIO_BASE(1)),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_GPIO_BASE(2)),
- .pfn = __phys_to_pfn(GEMINI_GPIO_BASE(2)),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_FLASH_CTRL_BASE),
- .pfn = __phys_to_pfn(GEMINI_FLASH_CTRL_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_DRAM_CTRL_BASE),
- .pfn = __phys_to_pfn(GEMINI_DRAM_CTRL_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)IO_ADDRESS(GEMINI_GENERAL_DMA_BASE),
- .pfn = __phys_to_pfn(GEMINI_GENERAL_DMA_BASE),
- .length = SZ_512K,
- .type = MT_DEVICE,
- },
-};
-
-void __init gemini_map_io(void)
-{
- iotable_init(gemini_io_desc, ARRAY_SIZE(gemini_io_desc));
-}
diff --git a/arch/arm/mach-gemini/reset.c b/arch/arm/mach-gemini/reset.c
deleted file mode 100644
index 21a6d6d4f9c4..000000000000
--- a/arch/arm/mach-gemini/reset.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __MACH_SYSTEM_H
-#define __MACH_SYSTEM_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/global_reg.h>
-
-#include "common.h"
-
-void gemini_restart(enum reboot_mode mode, const char *cmd)
-{
- __raw_writel(RESET_GLOBAL | RESET_CPU1,
- IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET);
-}
-
-#endif /* __MACH_SYSTEM_H */
diff --git a/arch/arm/mach-gemini/time.c b/arch/arm/mach-gemini/time.c
deleted file mode 100644
index f5f18df5aacd..000000000000
--- a/arch/arm/mach-gemini/time.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <mach/global_reg.h>
-#include <asm/mach/time.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/sched_clock.h>
-
-/*
- * Register definitions for the timers
- */
-
-#define TIMER1_BASE GEMINI_TIMER_BASE
-#define TIMER2_BASE (GEMINI_TIMER_BASE + 0x10)
-#define TIMER3_BASE (GEMINI_TIMER_BASE + 0x20)
-
-#define TIMER_COUNT(BASE) (IO_ADDRESS(BASE) + 0x00)
-#define TIMER_LOAD(BASE) (IO_ADDRESS(BASE) + 0x04)
-#define TIMER_MATCH1(BASE) (IO_ADDRESS(BASE) + 0x08)
-#define TIMER_MATCH2(BASE) (IO_ADDRESS(BASE) + 0x0C)
-#define TIMER_CR (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x30)
-#define TIMER_INTR_STATE (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x34)
-#define TIMER_INTR_MASK (IO_ADDRESS(GEMINI_TIMER_BASE) + 0x38)
-
-#define TIMER_1_CR_ENABLE (1 << 0)
-#define TIMER_1_CR_CLOCK (1 << 1)
-#define TIMER_1_CR_INT (1 << 2)
-#define TIMER_2_CR_ENABLE (1 << 3)
-#define TIMER_2_CR_CLOCK (1 << 4)
-#define TIMER_2_CR_INT (1 << 5)
-#define TIMER_3_CR_ENABLE (1 << 6)
-#define TIMER_3_CR_CLOCK (1 << 7)
-#define TIMER_3_CR_INT (1 << 8)
-#define TIMER_1_CR_UPDOWN (1 << 9)
-#define TIMER_2_CR_UPDOWN (1 << 10)
-#define TIMER_3_CR_UPDOWN (1 << 11)
-#define TIMER_DEFAULT_FLAGS (TIMER_1_CR_UPDOWN | \
- TIMER_3_CR_ENABLE | \
- TIMER_3_CR_UPDOWN)
-
-#define TIMER_1_INT_MATCH1 (1 << 0)
-#define TIMER_1_INT_MATCH2 (1 << 1)
-#define TIMER_1_INT_OVERFLOW (1 << 2)
-#define TIMER_2_INT_MATCH1 (1 << 3)
-#define TIMER_2_INT_MATCH2 (1 << 4)
-#define TIMER_2_INT_OVERFLOW (1 << 5)
-#define TIMER_3_INT_MATCH1 (1 << 6)
-#define TIMER_3_INT_MATCH2 (1 << 7)
-#define TIMER_3_INT_OVERFLOW (1 << 8)
-#define TIMER_INT_ALL_MASK 0x1ff
-
-
-static unsigned int tick_rate;
-
-static u64 notrace gemini_read_sched_clock(void)
-{
- return readl(TIMER_COUNT(TIMER3_BASE));
-}
-
-static int gemini_timer_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- u32 cr;
-
- /* Setup the match register */
- cr = readl(TIMER_COUNT(TIMER1_BASE));
- writel(cr + cycles, TIMER_MATCH1(TIMER1_BASE));
- if (readl(TIMER_COUNT(TIMER1_BASE)) - cr > cycles)
- return -ETIME;
-
- return 0;
-}
-
-static int gemini_timer_shutdown(struct clock_event_device *evt)
-{
- u32 cr;
-
- /*
- * Disable also for oneshot: the set_next() call will arm the timer
- * instead.
- */
- /* Stop timer and interrupt. */
- cr = readl(TIMER_CR);
- cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
- writel(cr, TIMER_CR);
-
- /* Setup counter start from 0 */
- writel(0, TIMER_COUNT(TIMER1_BASE));
- writel(0, TIMER_LOAD(TIMER1_BASE));
-
- /* enable interrupt */
- cr = readl(TIMER_INTR_MASK);
- cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
- cr |= TIMER_1_INT_MATCH1;
- writel(cr, TIMER_INTR_MASK);
-
- /* start the timer */
- cr = readl(TIMER_CR);
- cr |= TIMER_1_CR_ENABLE;
- writel(cr, TIMER_CR);
-
- return 0;
-}
-
-static int gemini_timer_set_periodic(struct clock_event_device *evt)
-{
- u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
- u32 cr;
-
- /* Stop timer and interrupt */
- cr = readl(TIMER_CR);
- cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
- writel(cr, TIMER_CR);
-
- /* Setup timer to fire at 1/HT intervals. */
- cr = 0xffffffff - (period - 1);
- writel(cr, TIMER_COUNT(TIMER1_BASE));
- writel(cr, TIMER_LOAD(TIMER1_BASE));
-
- /* enable interrupt on overflow */
- cr = readl(TIMER_INTR_MASK);
- cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
- cr |= TIMER_1_INT_OVERFLOW;
- writel(cr, TIMER_INTR_MASK);
-
- /* Start the timer */
- cr = readl(TIMER_CR);
- cr |= TIMER_1_CR_ENABLE;
- cr |= TIMER_1_CR_INT;
- writel(cr, TIMER_CR);
-
- return 0;
-}
-
-/* Use TIMER1 as clock event */
-static struct clock_event_device gemini_clockevent = {
- .name = "TIMER1",
- /* Reasonably fast and accurate clock event */
- .rating = 300,
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = gemini_timer_set_next_event,
- .set_state_shutdown = gemini_timer_shutdown,
- .set_state_periodic = gemini_timer_set_periodic,
- .set_state_oneshot = gemini_timer_shutdown,
- .tick_resume = gemini_timer_shutdown,
-};
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t gemini_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &gemini_clockevent;
-
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-static struct irqaction gemini_timer_irq = {
- .name = "Gemini Timer Tick",
- .flags = IRQF_TIMER,
- .handler = gemini_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
-void __init gemini_timer_init(void)
-{
- u32 reg_v;
-
- reg_v = readl(IO_ADDRESS(GEMINI_GLOBAL_BASE + GLOBAL_STATUS));
- tick_rate = REG_TO_AHB_SPEED(reg_v) * 1000000;
-
- printk(KERN_INFO "Bus: %dMHz", tick_rate / 1000000);
-
- tick_rate /= 6; /* APB bus run AHB*(1/6) */
-
- switch(reg_v & CPU_AHB_RATIO_MASK) {
- case CPU_AHB_1_1:
- printk(KERN_CONT "(1/1)\n");
- break;
- case CPU_AHB_3_2:
- printk(KERN_CONT "(3/2)\n");
- break;
- case CPU_AHB_24_13:
- printk(KERN_CONT "(24/13)\n");
- break;
- case CPU_AHB_2_1:
- printk(KERN_CONT "(2/1)\n");
- break;
- }
-
- /*
- * Reset the interrupt mask and status
- */
- writel(TIMER_INT_ALL_MASK, TIMER_INTR_MASK);
- writel(0, TIMER_INTR_STATE);
- writel(TIMER_DEFAULT_FLAGS, TIMER_CR);
-
- /*
- * Setup free-running clocksource timer (interrupts
- * disabled.)
- */
- writel(0, TIMER_COUNT(TIMER3_BASE));
- writel(0, TIMER_LOAD(TIMER3_BASE));
- writel(0, TIMER_MATCH1(TIMER3_BASE));
- writel(0, TIMER_MATCH2(TIMER3_BASE));
- clocksource_mmio_init(TIMER_COUNT(TIMER3_BASE),
- "gemini_clocksource", tick_rate,
- 300, 32, clocksource_mmio_readl_up);
- sched_clock_register(gemini_read_sched_clock, 32, tick_rate);
-
- /*
- * Setup clockevent timer (interrupt-driven.)
- */
- writel(0, TIMER_COUNT(TIMER1_BASE));
- writel(0, TIMER_LOAD(TIMER1_BASE));
- writel(0, TIMER_MATCH1(TIMER1_BASE));
- writel(0, TIMER_MATCH2(TIMER1_BASE));
- setup_irq(IRQ_TIMER1, &gemini_timer_irq);
- gemini_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&gemini_clockevent, tick_rate,
- 1, 0xffffffff);
-
-}
diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
index a6c117622d67..f66815c3dd07 100644
--- a/arch/arm/mach-hisi/platmcpm.c
+++ b/arch/arm/mach-hisi/platmcpm.c
@@ -279,6 +279,8 @@ static int __init hip04_smp_init(void)
&hip04_boot_method[0], 4);
if (ret)
goto err;
+
+ ret = -ENODEV;
np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
if (!np_sctl)
goto err;
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 1dc2a34b9dbd..93f584ba0130 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -10,26 +10,17 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-#include <linux/regulator/consumer.h>
#include <linux/irqchip/arm-gic.h>
#include "common.h"
#include "hardware.h"
-#define GPC_CNTR 0x000
#define GPC_IMR1 0x008
-#define GPC_PGC_GPU_PDN 0x260
-#define GPC_PGC_GPU_PUPSCR 0x264
-#define GPC_PGC_GPU_PDNSCR 0x268
#define GPC_PGC_CPU_PDN 0x2a0
#define GPC_PGC_CPU_PUPSCR 0x2a4
#define GPC_PGC_CPU_PDNSCR 0x2a8
@@ -39,18 +30,6 @@
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
-#define GPU_VPU_PUP_REQ BIT(1)
-#define GPU_VPU_PDN_REQ BIT(0)
-
-#define GPC_CLK_MAX 6
-
-struct pu_domain {
- struct generic_pm_domain base;
- struct regulator *reg;
- struct clk *clk[GPC_CLK_MAX];
- int num_clks;
-};
-
static void __iomem *gpc_base;
static u32 gpc_wake_irqs[IMR_NUM];
static u32 gpc_saved_imrs[IMR_NUM];
@@ -296,199 +275,3 @@ void __init imx_gpc_check_dt(void)
gpc_base = of_iomap(np, 0);
}
}
-
-static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
-{
- int iso, iso2sw;
- u32 val;
-
- /* Read ISO and ISO2SW power down delays */
- val = readl_relaxed(gpc_base + GPC_PGC_GPU_PDNSCR);
- iso = val & 0x3f;
- iso2sw = (val >> 8) & 0x3f;
-
- /* Gate off PU domain when GPU/VPU when powered down */
- writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
-
- /* Request GPC to power down GPU/VPU */
- val = readl_relaxed(gpc_base + GPC_CNTR);
- val |= GPU_VPU_PDN_REQ;
- writel_relaxed(val, gpc_base + GPC_CNTR);
-
- /* Wait ISO + ISO2SW IPG clock cycles */
- ndelay((iso + iso2sw) * 1000 / 66);
-}
-
-static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
-{
- struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
-
- _imx6q_pm_pu_power_off(genpd);
-
- if (pu->reg)
- regulator_disable(pu->reg);
-
- return 0;
-}
-
-static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
-{
- struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
- int i, ret, sw, sw2iso;
- u32 val;
-
- if (pu->reg)
- ret = regulator_enable(pu->reg);
- if (pu->reg && ret) {
- pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
- return ret;
- }
-
- /* Enable reset clocks for all devices in the PU domain */
- for (i = 0; i < pu->num_clks; i++)
- clk_prepare_enable(pu->clk[i]);
-
- /* Gate off PU domain when GPU/VPU when powered down */
- writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
-
- /* Read ISO and ISO2SW power down delays */
- val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
- sw = val & 0x3f;
- sw2iso = (val >> 8) & 0x3f;
-
- /* Request GPC to power up GPU/VPU */
- val = readl_relaxed(gpc_base + GPC_CNTR);
- val |= GPU_VPU_PUP_REQ;
- writel_relaxed(val, gpc_base + GPC_CNTR);
-
- /* Wait ISO + ISO2SW IPG clock cycles */
- ndelay((sw + sw2iso) * 1000 / 66);
-
- /* Disable reset clocks for all devices in the PU domain */
- for (i = 0; i < pu->num_clks; i++)
- clk_disable_unprepare(pu->clk[i]);
-
- return 0;
-}
-
-static struct generic_pm_domain imx6q_arm_domain = {
- .name = "ARM",
-};
-
-static struct pu_domain imx6q_pu_domain = {
- .base = {
- .name = "PU",
- .power_off = imx6q_pm_pu_power_off,
- .power_on = imx6q_pm_pu_power_on,
- },
-};
-
-static struct generic_pm_domain imx6sl_display_domain = {
- .name = "DISPLAY",
-};
-
-static struct generic_pm_domain *imx_gpc_domains[] = {
- &imx6q_arm_domain,
- &imx6q_pu_domain.base,
- &imx6sl_display_domain,
-};
-
-static struct genpd_onecell_data imx_gpc_onecell_data = {
- .domains = imx_gpc_domains,
- .num_domains = ARRAY_SIZE(imx_gpc_domains),
-};
-
-static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
-{
- struct clk *clk;
- int i, ret;
-
- imx6q_pu_domain.reg = pu_reg;
-
- for (i = 0; ; i++) {
- clk = of_clk_get(dev->of_node, i);
- if (IS_ERR(clk))
- break;
- if (i >= GPC_CLK_MAX) {
- dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
- goto clk_err;
- }
- imx6q_pu_domain.clk[i] = clk;
- }
- imx6q_pu_domain.num_clks = i;
-
- /* Enable power always in case bootloader disabled it. */
- imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
-
- if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
- return 0;
-
- imx6q_pu_domain.base.states = devm_kzalloc(dev,
- sizeof(*imx6q_pu_domain.base.states),
- GFP_KERNEL);
- if (!imx6q_pu_domain.base.states)
- return -ENOMEM;
-
- imx6q_pu_domain.base.states[0].power_off_latency_ns = 25000;
- imx6q_pu_domain.base.states[0].power_on_latency_ns = 2000000;
- imx6q_pu_domain.base.state_count = 1;
-
- for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
- pm_genpd_init(imx_gpc_domains[i], NULL, false);
-
- ret = of_genpd_add_provider_onecell(dev->of_node,
- &imx_gpc_onecell_data);
- if (ret)
- goto power_off;
-
- return 0;
-
-power_off:
- imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
-clk_err:
- while (i--)
- clk_put(imx6q_pu_domain.clk[i]);
- imx6q_pu_domain.reg = NULL;
- return -EINVAL;
-}
-
-static int imx_gpc_probe(struct platform_device *pdev)
-{
- struct regulator *pu_reg;
- int ret;
-
- /* bail out if DT too old and doesn't provide the necessary info */
- if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
- return 0;
-
- pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
- if (PTR_ERR(pu_reg) == -ENODEV)
- pu_reg = NULL;
- if (IS_ERR(pu_reg)) {
- ret = PTR_ERR(pu_reg);
- dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
- return ret;
- }
-
- return imx_gpc_genpd_init(&pdev->dev, pu_reg);
-}
-
-static const struct of_device_id imx_gpc_dt_ids[] = {
- { .compatible = "fsl,imx6q-gpc" },
- { .compatible = "fsl,imx6sl-gpc" },
- { }
-};
-
-static struct platform_driver imx_gpc_driver = {
- .driver = {
- .name = "imx-gpc",
- .of_match_table = imx_gpc_dt_ids,
- },
- .probe = imx_gpc_probe,
-};
-
-static int __init imx_pgc_init(void)
-{
- return platform_driver_register(&imx_gpc_driver);
-}
-subsys_initcall(imx_pgc_init);
diff --git a/arch/arm/mach-imx/mach-imx25.c b/arch/arm/mach-imx/mach-imx25.c
index 32dcb5e99e23..353b86e3808f 100644
--- a/arch/arm/mach-imx/mach-imx25.c
+++ b/arch/arm/mach-imx/mach-imx25.c
@@ -23,6 +23,11 @@ static void __init imx25_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX25);
}
+static void __init imx25_dt_init(void)
+{
+ imx_aips_allow_unprivileged_access("fsl,imx25-aips");
+}
+
static void __init mx25_init_irq(void)
{
struct device_node *np;
@@ -41,6 +46,7 @@ static const char * const imx25_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.init_early = imx25_init_early,
+ .init_machine = imx25_dt_init,
.init_late = imx25_pm_init,
.init_irq = mx25_init_irq,
.dt_compat = imx25_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 558e5f8589cb..68c3f0799d5b 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -375,6 +375,8 @@ static struct imx_ssi_platform_data mx31_3ds_ssi_pdata = {
/* SPI */
static int spi0_internal_chipselect[] = {
+ MXC_SPI_CS(0),
+ MXC_SPI_CS(1),
MXC_SPI_CS(2),
};
@@ -385,6 +387,7 @@ static const struct spi_imx_master spi0_pdata __initconst = {
static int spi1_internal_chipselect[] = {
MXC_SPI_CS(0),
+ MXC_SPI_CS(1),
MXC_SPI_CS(2),
};
@@ -398,7 +401,7 @@ static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
.modalias = "mc13783",
.max_speed_hz = 1000000,
.bus_num = 1,
- .chip_select = 1, /* SS2 */
+ .chip_select = 2, /* SS2 */
.platform_data = &mc13783_pdata,
/* irq number is run-time assigned */
.mode = SPI_CS_HIGH,
@@ -406,7 +409,7 @@ static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
.modalias = "l4f00242t03",
.max_speed_hz = 5000000,
.bus_num = 0,
- .chip_select = 0, /* SS2 */
+ .chip_select = 2, /* SS2 */
.platform_data = &mx31_3ds_l4f00242t03_pdata,
},
};
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index cc867682520e..bde9a9af6714 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -296,14 +296,14 @@ static struct spi_board_info moboard_spi_board_info[] __initdata = {
/* irq number is run-time assigned */
.max_speed_hz = 300000,
.bus_num = 1,
- .chip_select = 0,
+ .chip_select = 1,
.platform_data = &moboard_pmic,
.mode = SPI_CS_HIGH,
},
};
static int moboard_spi2_cs[] = {
- MXC_SPI_CS(1),
+ MXC_SPI_CS(0), MXC_SPI_CS(1),
};
static const struct spi_imx_master moboard_spi2_pdata __initconst = {
diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c
index 8fd8255068ee..95bd97710494 100644
--- a/arch/arm/mach-imx/mach-pcm037_eet.c
+++ b/arch/arm/mach-imx/mach-pcm037_eet.c
@@ -50,13 +50,13 @@ static struct spi_board_info pcm037_spi_dev[] = {
.modalias = "dac124s085",
.max_speed_hz = 400000,
.bus_num = 0,
- .chip_select = 0, /* Index in pcm037_spi1_cs[] */
+ .chip_select = 1, /* Index in pcm037_spi1_cs[] */
.mode = SPI_CPHA,
},
};
/* Platform Data for MXC CSPI */
-static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
+static int pcm037_spi1_cs[] = { MXC_SPI_CS(0), MXC_SPI_CS(1), };
static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
.chipselect = pcm037_spi1_cs,
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index c03bf28d8bbc..78262899a590 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -1,4 +1,5 @@
/*
+ * Copyright 2017 NXP
* Copyright 2011,2016 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
*
@@ -47,6 +48,7 @@
#define PROFILE_SEL 0x10
#define MMDC_MADPCR0 0x410
+#define MMDC_MADPCR1 0x414
#define MMDC_MADPSR0 0x418
#define MMDC_MADPSR1 0x41C
#define MMDC_MADPSR2 0x420
@@ -57,6 +59,7 @@
#define MMDC_NUM_COUNTERS 6
#define MMDC_FLAG_PROFILE_SEL 0x1
+#define MMDC_PRF_AXI_ID_CLEAR 0x0
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
@@ -87,7 +90,7 @@ static DEFINE_IDA(mmdc_ida);
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
-PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "config=0x03")
+PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
@@ -161,8 +164,11 @@ static struct attribute_group mmdc_pmu_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-63");
+PMU_FORMAT_ATTR(axi_id, "config1:0-63");
+
static struct attribute *mmdc_pmu_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_axi_id.attr,
NULL,
};
@@ -345,6 +351,14 @@ static void mmdc_pmu_event_start(struct perf_event *event, int flags)
writel(DBG_RST, reg);
+ /*
+ * Write the AXI id parameter to MADPCR1.
+ */
+ val = event->attr.config1;
+ reg = mmdc_base + MMDC_MADPCR1;
+ writel(val, reg);
+
+ reg = mmdc_base + MMDC_MADPCR0;
val = DBG_EN;
if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
val |= PROFILE_SEL;
@@ -382,6 +396,10 @@ static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
reg = mmdc_base + MMDC_MADPCR0;
writel(PRF_FRZ, reg);
+
+ reg = mmdc_base + MMDC_MADPCR1;
+ writel(MMDC_PRF_AXI_ID_CLEAR, reg);
+
mmdc_pmu_event_update(event);
}
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 4977296f0c78..bcf3df59f71b 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -43,14 +43,14 @@
int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
/*
- * Base address for PCI regsiter region
+ * Base address for PCI register region
*/
unsigned long ixp4xx_pci_reg_base = 0;
/*
* PCI cfg an I/O routines are done by programming a
* command/byte enable register, and then read/writing
- * the data from a data regsiter. We need to ensure
+ * the data from a data register. We need to ensure
* these transactions are atomic or we will end up
* with corrupt data on the bus or in a driver.
*/
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index 554357035f30..db122356b410 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -10,6 +10,7 @@ config ARCH_KEYSTONE
select ARCH_SUPPORTS_BIG_ENDIAN
select ZONE_DMA if ARM_LPAE
select PINCTRL
+ select PM_GENERIC_DOMAINS if PM
help
Support for boards based on the Texas Instruments Keystone family of
SoCs.
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index 8cbb35765a19..fe57e2692629 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -32,7 +32,9 @@ static struct pm_clk_notifier_block platform_domain_notifier = {
};
static const struct of_device_id of_keystone_table[] = {
- {.compatible = "ti,keystone"},
+ {.compatible = "ti,k2hk"},
+ {.compatible = "ti,k2e"},
+ {.compatible = "ti,k2l"},
{ /* end of list */ },
};
diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c
index ac6633d0b69b..28fe64c6e2f5 100644
--- a/arch/arm/mach-mmp/clock.c
+++ b/arch/arm/mach-mmp/clock.c
@@ -67,6 +67,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
+ if (!clk)
+ return;
+
WARN_ON(clk->enabled == 0);
spin_lock_irqsave(&clocks_lock, flags);
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index e4f21086b42b..1c6062d240c8 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -419,7 +419,8 @@ static void __init mxs_machine_init(void)
crystalfontz_init();
else if (of_machine_is_compatible("eukrea,mbmx283lc"))
eukrea_mbmx283lc_init();
- else if (of_machine_is_compatible("i2se,duckbill"))
+ else if (of_machine_is_compatible("i2se,duckbill") ||
+ of_machine_is_compatible("i2se,duckbill-2"))
duckbill_init();
else if (of_machine_is_compatible("msr,m28cu3"))
m28cu3_init();
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index ee5460b8ec2e..f1135bf8940e 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -581,7 +581,6 @@ static int omap_pm_enter(suspend_state_t state)
{
switch (state)
{
- case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
omap1_pm_suspend();
break;
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 6b6fda65fb3b..91272db09fa3 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -117,7 +117,7 @@ static struct musb_hdrc_platform_data tusb_data = {
static void __init n8x0_usb_init(void)
{
int ret = 0;
- static char announce[] __initdata = KERN_INFO "TUSB 6010\n";
+ static const char announce[] __initconst = KERN_INFO "TUSB 6010\n";
/* PM companion chip power control pin */
ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW,
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 6c679659cda5..67ebff829cf2 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = {
.dep_bit = DRA7XX_PCIE_STATDEP_SHIFT,
.wkdep_srcs = pcie_wkup_sleep_deps,
.sleepdep_srcs = pcie_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain atl_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/clockdomains81xx_data.c b/arch/arm/mach-omap2/clockdomains81xx_data.c
index 3b5fb05ae701..65fbd136b20c 100644
--- a/arch/arm/mach-omap2/clockdomains81xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains81xx_data.c
@@ -91,6 +91,14 @@ static struct clockdomain default_l3_slow_81xx_clkdm = {
.flags = CLKDM_CAN_SWSUP,
};
+static struct clockdomain default_sata_81xx_clkdm = {
+ .name = "default_clkdm",
+ .pwrdm = { .name = "default_pwrdm" },
+ .cm_inst = TI81XX_CM_DEFAULT_MOD,
+ .clkdm_offs = TI816X_CM_DEFAULT_SATA_CLKDM,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
/* 816x only */
static struct clockdomain alwon_mpu_816x_clkdm = {
@@ -173,6 +181,7 @@ static struct clockdomain *clockdomains_ti814x[] __initdata = {
&mmu_81xx_clkdm,
&mmu_cfg_81xx_clkdm,
&default_l3_slow_81xx_clkdm,
+ &default_sata_81xx_clkdm,
NULL,
};
@@ -200,6 +209,7 @@ static struct clockdomain *clockdomains_ti816x[] __initdata = {
&default_ducati_816x_clkdm,
&default_pci_816x_clkdm,
&default_l3_slow_81xx_clkdm,
+ &default_sata_81xx_clkdm,
NULL,
};
diff --git a/arch/arm/mach-omap2/cm81xx.h b/arch/arm/mach-omap2/cm81xx.h
index 3a0ccf07c76f..5d73a1057c82 100644
--- a/arch/arm/mach-omap2/cm81xx.h
+++ b/arch/arm/mach-omap2/cm81xx.h
@@ -57,5 +57,6 @@
#define TI816X_CM_DEFAULT_PCI_CLKDM 0x0010
#define TI816X_CM_DEFAULT_L3_SLOW_CLKDM 0x0014
#define TI816X_CM_DEFAULT_DUCATI_CLKDM 0x0018
+#define TI816X_CM_DEFAULT_SATA_CLKDM 0x0060
#endif
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 3fdb94599184..473951203104 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -121,7 +121,7 @@ static inline void omap_init_mcspi(void) {}
*
* Bind the RNG hwmod to the RNG omap_device. No return value.
*/
-static void omap_init_rng(void)
+static void __init omap_init_rng(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 0da4f2ea76c4..8bcea0d83fa0 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -138,7 +138,6 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/bootmem.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -216,49 +215,12 @@ static LIST_HEAD(omap_hwmod_list);
/* mpu_oh: used to add/remove MPU initiator from sleepdep list */
static struct omap_hwmod *mpu_oh;
-/*
- * linkspace: ptr to a buffer that struct omap_hwmod_link records are
- * allocated from - used to reduce the number of small memory
- * allocations, which has a significant impact on performance
- */
-static struct omap_hwmod_link *linkspace;
-
-/*
- * free_ls, max_ls: array indexes into linkspace; representing the
- * next free struct omap_hwmod_link index, and the maximum number of
- * struct omap_hwmod_link records allocated (respectively)
- */
-static unsigned short free_ls, max_ls, ls_supp;
-
/* inited: set to true once the hwmod code is initialized */
static bool inited;
/* Private functions */
/**
- * _fetch_next_ocp_if - return the next OCP interface in a list
- * @p: ptr to a ptr to the list_head inside the ocp_if to return
- * @i: pointer to the index of the element pointed to by @p in the list
- *
- * Return a pointer to the struct omap_hwmod_ocp_if record
- * containing the struct list_head pointed to by @p, and increment
- * @p such that a future call to this routine will return the next
- * record.
- */
-static struct omap_hwmod_ocp_if *_fetch_next_ocp_if(struct list_head **p,
- int *i)
-{
- struct omap_hwmod_ocp_if *oi;
-
- oi = list_entry(*p, struct omap_hwmod_link, node)->ocp_if;
- *p = (*p)->next;
-
- *i = *i + 1;
-
- return oi;
-}
-
-/**
* _update_sysc_cache - return the module OCP_SYSCONFIG register, keep copy
* @oh: struct omap_hwmod *
*
@@ -794,15 +756,10 @@ static int _init_main_clk(struct omap_hwmod *oh)
static int _init_interface_clks(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
- struct list_head *p;
struct clk *c;
- int i = 0;
int ret = 0;
- p = oh->slave_ports.next;
-
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (!os->clk)
continue;
@@ -905,19 +862,13 @@ static void _disable_optional_clocks(struct omap_hwmod *oh)
static int _enable_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
- struct list_head *p;
- int i = 0;
pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
if (oh->_clk)
clk_enable(oh->_clk);
- p = oh->slave_ports.next;
-
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
-
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
clk_enable(os->_clk);
}
@@ -939,19 +890,13 @@ static int _enable_clocks(struct omap_hwmod *oh)
static int _disable_clocks(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
- struct list_head *p;
- int i = 0;
pr_debug("omap_hwmod: %s: disabling clocks\n", oh->name);
if (oh->_clk)
clk_disable(oh->_clk);
- p = oh->slave_ports.next;
-
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
-
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
clk_disable(os->_clk);
}
@@ -1190,16 +1135,11 @@ static int _get_sdma_req_by_name(struct omap_hwmod *oh, const char *name,
static int _get_addr_space_by_name(struct omap_hwmod *oh, const char *name,
u32 *pa_start, u32 *pa_end)
{
- int i, j;
+ int j;
struct omap_hwmod_ocp_if *os;
- struct list_head *p = NULL;
bool found = false;
- p = oh->slave_ports.next;
-
- i = 0;
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (!os->addr)
return -ENOENT;
@@ -1239,18 +1179,13 @@ static int _get_addr_space_by_name(struct omap_hwmod *oh, const char *name,
static void __init _save_mpu_port_index(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os = NULL;
- struct list_head *p;
- int i = 0;
if (!oh)
return;
oh->_int_flags |= _HWMOD_NO_MPU_PORT;
- p = oh->slave_ports.next;
-
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (os->user & OCP_USER_MPU) {
oh->_mpu_port = os;
oh->_int_flags &= ~_HWMOD_NO_MPU_PORT;
@@ -1393,7 +1328,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
*/
if ((oh->flags & HWMOD_SET_DEFAULT_CLOCKACT) &&
(sf & SYSC_HAS_CLOCKACTIVITY))
- _set_clockactivity(oh, oh->class->sysc->clockact, &v);
+ _set_clockactivity(oh, CLOCKACT_TEST_ICLK, &v);
_write_sysconfig(v, oh);
@@ -2092,7 +2027,7 @@ static int _enable(struct omap_hwmod *oh)
r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) :
-EINVAL;
- if (oh->clkdm)
+ if (oh->clkdm && !(oh->flags & HWMOD_CLKDM_NOAUTO))
clkdm_allow_idle(oh->clkdm);
if (!r) {
@@ -2149,7 +2084,12 @@ static int _idle(struct omap_hwmod *oh)
_idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
- if (oh->clkdm)
+ /*
+ * If HWMOD_CLKDM_NOAUTO is set then we don't
+ * deny idle the clkdm again since idle was already denied
+ * in _enable()
+ */
+ if (oh->clkdm && !(oh->flags & HWMOD_CLKDM_NOAUTO))
clkdm_deny_idle(oh->clkdm);
if (oh->flags & HWMOD_BLOCK_WFI)
@@ -2451,15 +2391,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
- struct list_head *p;
- int i = 0;
+
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return;
- p = oh->slave_ports.next;
-
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node) {
if (!os->_clk)
continue;
@@ -2657,7 +2593,6 @@ static int __init _register(struct omap_hwmod *oh)
list_add_tail(&oh->node, &omap_hwmod_list);
- INIT_LIST_HEAD(&oh->master_ports);
INIT_LIST_HEAD(&oh->slave_ports);
spin_lock_init(&oh->_lock);
lockdep_set_class(&oh->_lock, &oh->hwmod_key);
@@ -2675,49 +2610,10 @@ static int __init _register(struct omap_hwmod *oh)
}
/**
- * _alloc_links - return allocated memory for hwmod links
- * @ml: pointer to a struct omap_hwmod_link * for the master link
- * @sl: pointer to a struct omap_hwmod_link * for the slave link
- *
- * Return pointers to two struct omap_hwmod_link records, via the
- * addresses pointed to by @ml and @sl. Will first attempt to return
- * memory allocated as part of a large initial block, but if that has
- * been exhausted, will allocate memory itself. Since ideally this
- * second allocation path will never occur, the number of these
- * 'supplemental' allocations will be logged when debugging is
- * enabled. Returns 0.
- */
-static int __init _alloc_links(struct omap_hwmod_link **ml,
- struct omap_hwmod_link **sl)
-{
- unsigned int sz;
-
- if ((free_ls + LINKS_PER_OCP_IF) <= max_ls) {
- *ml = &linkspace[free_ls++];
- *sl = &linkspace[free_ls++];
- return 0;
- }
-
- sz = sizeof(struct omap_hwmod_link) * LINKS_PER_OCP_IF;
-
- *sl = NULL;
- *ml = memblock_virt_alloc(sz, 0);
-
- *sl = (void *)(*ml) + sizeof(struct omap_hwmod_link);
-
- ls_supp++;
- pr_debug("omap_hwmod: supplemental link allocations needed: %d\n",
- ls_supp * LINKS_PER_OCP_IF);
-
- return 0;
-};
-
-/**
* _add_link - add an interconnect between two IP blocks
* @oi: pointer to a struct omap_hwmod_ocp_if record
*
- * Add struct omap_hwmod_link records connecting the master IP block
- * specified in @oi->master to @oi, and connecting the slave IP block
+ * Add struct omap_hwmod_link records connecting the slave IP block
* specified in @oi->slave to @oi. This code is assumed to run before
* preemption or SMP has been enabled, thus avoiding the need for
* locking in this code. Changes to this assumption will require
@@ -2725,19 +2621,10 @@ static int __init _alloc_links(struct omap_hwmod_link **ml,
*/
static int __init _add_link(struct omap_hwmod_ocp_if *oi)
{
- struct omap_hwmod_link *ml, *sl;
-
pr_debug("omap_hwmod: %s -> %s: adding link\n", oi->master->name,
oi->slave->name);
- _alloc_links(&ml, &sl);
-
- ml->ocp_if = oi;
- list_add(&ml->node, &oi->master->master_ports);
- oi->master->masters_cnt++;
-
- sl->ocp_if = oi;
- list_add(&sl->node, &oi->slave->slave_ports);
+ list_add(&oi->node, &oi->slave->slave_ports);
oi->slave->slaves_cnt++;
return 0;
@@ -2784,45 +2671,6 @@ static int __init _register_link(struct omap_hwmod_ocp_if *oi)
return 0;
}
-/**
- * _alloc_linkspace - allocate large block of hwmod links
- * @ois: pointer to an array of struct omap_hwmod_ocp_if records to count
- *
- * Allocate a large block of struct omap_hwmod_link records. This
- * improves boot time significantly by avoiding the need to allocate
- * individual records one by one. If the number of records to
- * allocate in the block hasn't been manually specified, this function
- * will count the number of struct omap_hwmod_ocp_if records in @ois
- * and use that to determine the allocation size. For SoC families
- * that require multiple list registrations, such as OMAP3xxx, this
- * estimation process isn't optimal, so manual estimation is advised
- * in those cases. Returns -EEXIST if the allocation has already occurred
- * or 0 upon success.
- */
-static int __init _alloc_linkspace(struct omap_hwmod_ocp_if **ois)
-{
- unsigned int i = 0;
- unsigned int sz;
-
- if (linkspace) {
- WARN(1, "linkspace already allocated\n");
- return -EEXIST;
- }
-
- if (max_ls == 0)
- while (ois[i++])
- max_ls += LINKS_PER_OCP_IF;
-
- sz = sizeof(struct omap_hwmod_link) * max_ls;
-
- pr_debug("omap_hwmod: %s: allocating %d byte linkspace (%d links)\n",
- __func__, sz, max_ls);
-
- linkspace = memblock_virt_alloc(sz, 0);
-
- return 0;
-}
-
/* Static functions intended only for use in soc_ops field function pointers */
/**
@@ -3180,13 +3028,6 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
if (ois[0] == NULL) /* Empty list */
return 0;
- if (!linkspace) {
- if (_alloc_linkspace(ois)) {
- pr_err("omap_hwmod: could not allocate link space\n");
- return -ENOMEM;
- }
- }
-
i = 0;
do {
r = _register_link(ois[i]);
@@ -3398,14 +3239,10 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh, unsigned long flags)
ret += _count_sdma_reqs(oh);
if (flags & IORESOURCE_MEM) {
- int i = 0;
struct omap_hwmod_ocp_if *os;
- struct list_head *p = oh->slave_ports.next;
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node)
ret += _count_ocp_if_addr_spaces(os);
- }
}
return ret;
@@ -3424,7 +3261,6 @@ int omap_hwmod_count_resources(struct omap_hwmod *oh, unsigned long flags)
int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
{
struct omap_hwmod_ocp_if *os;
- struct list_head *p;
int i, j, mpu_irqs_cnt, sdma_reqs_cnt, addr_cnt;
int r = 0;
@@ -3454,11 +3290,7 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
r++;
}
- p = oh->slave_ports.next;
-
- i = 0;
- while (i < oh->slaves_cnt) {
- os = _fetch_next_ocp_if(&p, &i);
+ list_for_each_entry(os, &oh->slave_ports, node) {
addr_cnt = _count_ocp_if_addr_spaces(os);
for (j = 0; j < addr_cnt; j++) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 78904017f18c..a8f779381fd8 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -313,6 +313,7 @@ struct omap_hwmod_ocp_if {
struct omap_hwmod_addr_space *addr;
const char *clk;
struct clk *_clk;
+ struct list_head node;
union {
struct omap_hwmod_omap2_firewall omap2;
} fw;
@@ -410,7 +411,6 @@ struct omap_hwmod_class_sysconfig {
struct omap_hwmod_sysc_fields *sysc_fields;
u8 srst_udelay;
u8 idlemodes;
- u8 clockact;
};
/**
@@ -531,6 +531,10 @@ struct omap_hwmod_omap4_prcm {
* operate and they need to be handled at the same time as the main_clk.
* HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
* IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
+ * HWMOD_CLKDM_NOAUTO: Allows the hwmod's clockdomain to be prevented from
+ * entering HW_AUTO while hwmod is active. This is needed to workaround
+ * some modules which don't function correctly with HW_AUTO. For example,
+ * DCAN on DRA7x SoC needs this to workaround errata i893.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -548,6 +552,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
#define HWMOD_NO_IDLE (1 << 15)
+#define HWMOD_CLKDM_NOAUTO (1 << 16)
/*
* omap_hwmod._int_flags definitions
@@ -617,16 +622,6 @@ struct omap_hwmod_class {
};
/**
- * struct omap_hwmod_link - internal structure linking hwmods with ocp_ifs
- * @ocp_if: OCP interface structure record pointer
- * @node: list_head pointing to next struct omap_hwmod_link in a list
- */
-struct omap_hwmod_link {
- struct omap_hwmod_ocp_if *ocp_if;
- struct list_head node;
-};
-
-/**
* struct omap_hwmod - integration data for OMAP hardware "modules" (IP blocks)
* @name: name of the hwmod
* @class: struct omap_hwmod_class * to the class of this hwmod
@@ -686,9 +681,8 @@ struct omap_hwmod {
const char *main_clk;
struct clk *_clk;
struct omap_hwmod_opt_clk *opt_clks;
- char *clkdm_name;
+ const char *clkdm_name;
struct clockdomain *clkdm;
- struct list_head master_ports; /* connect to *_IA */
struct list_head slave_ports; /* connect to *_TA */
void *dev_attr;
u32 _sysc_cache;
@@ -698,12 +692,11 @@ struct omap_hwmod {
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
unsigned int (*xlate_irq)(unsigned int);
- u16 flags;
+ u32 flags;
u8 mpu_rt_idx;
u8 response_lat;
u8 rst_lines_cnt;
u8 opt_clks_cnt;
- u8 masters_cnt;
u8 slaves_cnt;
u8 hwmods_cnt;
u8 _int_flags;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index e047033caa3e..d190f1ad97b7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -55,7 +55,6 @@ static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 1435fee39a89..1c6ca4d5fa2d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -149,7 +149,6 @@ static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = {
SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -424,7 +423,6 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = {
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -1045,7 +1043,6 @@ static struct omap_hwmod_class_sysconfig omap3xxx_mcbsp_sysc = {
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
- .clockact = 0x2,
};
static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = {
@@ -1210,7 +1207,6 @@ static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = {
static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = {
.sysc_offs = 0x24,
.sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap34xx_sr_sysc_fields,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index dad871a4cd96..94f09c720f29 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1320,7 +1320,6 @@ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = {
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -2548,7 +2547,6 @@ static struct omap_hwmod_class_sysconfig omap44xx_timer_1ms_sysc = {
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index a2d763a4cc57..9a67f013ebad 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -839,7 +839,6 @@ static struct omap_hwmod_class_sysconfig omap54xx_i2c_sysc = {
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -1530,7 +1529,6 @@ static struct omap_hwmod_class_sysconfig omap54xx_timer_1ms_sysc = {
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type2,
- .clockact = CLOCKACT_TEST_ICLK,
};
static struct omap_hwmod_class omap54xx_timer_1ms_hwmod_class = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index d0585293a381..b3abb8d8b2f6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -359,6 +359,7 @@ static struct omap_hwmod dra7xx_dcan1_hwmod = {
.class = &dra7xx_dcan_hwmod_class,
.clkdm_name = "wkupaon_clkdm",
.main_clk = "dcan1_sys_clk_mux",
+ .flags = HWMOD_CLKDM_NOAUTO,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_WKUPAON_DCAN1_CLKCTRL_OFFSET,
@@ -374,6 +375,7 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
.class = &dra7xx_dcan_hwmod_class,
.clkdm_name = "l4per2_clkdm",
.main_clk = "sys_clkin1",
+ .flags = HWMOD_CLKDM_NOAUTO,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER2_DCAN2_CLKCTRL_OFFSET,
@@ -1098,7 +1100,6 @@ static struct omap_hwmod_class_sysconfig dra7xx_i2c_sysc = {
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
- .clockact = CLOCKACT_TEST_ICLK,
.sysc_fields = &omap_hwmod_sysc_type1,
};
@@ -2700,6 +2701,7 @@ static struct omap_hwmod dra7xx_usb_otg_ss1_hwmod = {
.class = &dra7xx_usb_otg_ss_hwmod_class,
.clkdm_name = "l3init_clkdm",
.main_clk = "dpll_core_h13x2_ck",
+ .flags = HWMOD_CLKDM_NOAUTO,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS1_CLKCTRL_OFFSET,
@@ -2721,6 +2723,7 @@ static struct omap_hwmod dra7xx_usb_otg_ss2_hwmod = {
.class = &dra7xx_usb_otg_ss_hwmod_class,
.clkdm_name = "l3init_clkdm",
.main_clk = "dpll_core_h13x2_ck",
+ .flags = HWMOD_CLKDM_NOAUTO,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L3INIT_USB_OTG_SS2_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index b82b77cff24c..310afe474ec4 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -106,6 +106,7 @@
*/
#define DM81XX_CM_DEFAULT_OFFSET 0x500
#define DM81XX_CM_DEFAULT_USB_CLKCTRL (0x558 - DM81XX_CM_DEFAULT_OFFSET)
+#define DM81XX_CM_DEFAULT_SATA_CLKCTRL (0x560 - DM81XX_CM_DEFAULT_OFFSET)
/* L3 Interconnect entries clocked at 125, 250 and 500MHz */
static struct omap_hwmod dm81xx_alwon_l3_slow_hwmod = {
@@ -973,6 +974,38 @@ static struct omap_hwmod_ocp_if dm816x_l4_hs__emac1 = {
.user = OCP_USER_MPU,
};
+static struct omap_hwmod_class_sysconfig dm81xx_sata_sysc = {
+ .sysc_offs = 0x1100,
+ .sysc_flags = SYSC_HAS_SIDLEMODE,
+ .idlemodes = SIDLE_FORCE,
+ .sysc_fields = &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class dm81xx_sata_hwmod_class = {
+ .name = "sata",
+ .sysc = &dm81xx_sata_sysc,
+};
+
+static struct omap_hwmod dm81xx_sata_hwmod = {
+ .name = "sata",
+ .clkdm_name = "default_sata_clkdm",
+ .flags = HWMOD_NO_IDLEST,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DM81XX_CM_DEFAULT_SATA_CLKCTRL,
+ .modulemode = MODULEMODE_SWCTRL,
+ },
+ },
+ .class = &dm81xx_sata_hwmod_class,
+};
+
+static struct omap_hwmod_ocp_if dm81xx_l4_hs__sata = {
+ .master = &dm81xx_l4_hs_hwmod,
+ .slave = &dm81xx_sata_hwmod,
+ .clk = "sysclk5_ck",
+ .user = OCP_USER_MPU,
+};
+
static struct omap_hwmod_class_sysconfig dm81xx_mmc_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x110,
@@ -1474,6 +1507,7 @@ static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = {
&dm81xx_l4_hs__emac0,
&dm81xx_emac0__mdio,
&dm816x_l4_hs__emac1,
+ &dm81xx_l4_hs__sata,
&dm81xx_alwon_l3_fast__tpcc,
&dm81xx_alwon_l3_fast__tptc0,
&dm81xx_alwon_l3_fast__tptc1,
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 0598630c1778..63027e60cc20 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -163,7 +163,6 @@ static int omap_pm_enter(suspend_state_t suspend_state)
return -ENOENT; /* XXX doublecheck */
switch (suspend_state) {
- case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
ret = omap_pm_suspend();
break;
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig
index 8fa4557e27a9..e3610c5b309b 100644
--- a/arch/arm/mach-oxnas/Kconfig
+++ b/arch/arm/mach-oxnas/Kconfig
@@ -28,7 +28,6 @@ config MACH_OX820
depends on ARCH_MULTI_V6
select ARM_GIC
select DMA_CACHE_RWFO if SMP
- select CPU_V6K
select HAVE_SMP
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c
index d46639fc6849..319ca9508ec6 100644
--- a/arch/arm/mach-shmobile/setup-r7s72100.c
+++ b/arch/arm/mach-shmobile/setup-r7s72100.c
@@ -26,6 +26,8 @@ static const char *const r7s72100_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R7S72100_DT, "Generic R7S72100 (Flattened Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.init_early = shmobile_init_delay,
.init_late = shmobile_init_late,
.dt_compat = r7s72100_boards_compat_dt,
diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig
new file mode 100644
index 000000000000..2d1419eb0896
--- /dev/null
+++ b/arch/arm/mach-stm32/Kconfig
@@ -0,0 +1,26 @@
+config ARCH_STM32
+ bool "STMicrolectronics STM32"
+ depends on ARM_SINGLE_ARMV7M
+ select ARCH_HAS_RESET_CONTROLLER
+ select ARMV7M_SYSTICK
+ select CLKSRC_STM32
+ select PINCTRL
+ select RESET_CONTROLLER
+ select STM32_EXTI
+ help
+ Support for STMicroelectronics STM32 processors.
+
+config MACH_STM32F429
+ bool "STMicrolectronics STM32F429"
+ depends on ARCH_STM32
+ default y
+
+config MACH_STM32F746
+ bool "STMicrolectronics STM32F746"
+ depends on ARCH_STM32
+ default y
+
+config MACH_STM32H743
+ bool "STMicrolectronics STM32H743"
+ depends on ARCH_STM32
+ default y
diff --git a/arch/arm/mach-stm32/board-dt.c b/arch/arm/mach-stm32/board-dt.c
index c354222a4158..e918686e4191 100644
--- a/arch/arm/mach-stm32/board-dt.c
+++ b/arch/arm/mach-stm32/board-dt.c
@@ -12,6 +12,7 @@ static const char *const stm32_compat[] __initconst = {
"st,stm32f429",
"st,stm32f469",
"st,stm32f746",
+ "st,stm32h743",
NULL
};
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index b9863f9a35fa..58153cdf025b 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_SUNXI
select GENERIC_IRQ_CHIP
select GPIOLIB
select PINCTRL
+ select PM_OPP
select SUN4I_TIMER
select RESET_CONTROLLER
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index fffad2426ee4..3b33f0bb78ae 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -2,7 +2,6 @@ asflags-y += -march=armv7-a
obj-y += io.o
obj-y += irq.o
-obj-y += flowctrl.o
obj-y += pm.o
obj-y += reset.o
obj-y += reset-handler.o
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index afcee04f2616..76e4c83cd5c8 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -26,12 +26,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <soc/tegra/flowctrl.h>
+
#include <asm/cpuidle.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include "cpuidle.h"
-#include "flowctrl.h"
#include "iomap.h"
#include "irq.h"
#include "pm.h"
diff --git a/arch/arm/mach-tegra/flowctrl.c b/arch/arm/mach-tegra/flowctrl.c
deleted file mode 100644
index 475e783992fd..000000000000
--- a/arch/arm/mach-tegra/flowctrl.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * arch/arm/mach-tegra/flowctrl.c
- *
- * functions and macros to control the flowcontroller
- *
- * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/cpumask.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <soc/tegra/fuse.h>
-
-#include "flowctrl.h"
-
-static u8 flowctrl_offset_halt_cpu[] = {
- FLOW_CTRL_HALT_CPU0_EVENTS,
- FLOW_CTRL_HALT_CPU1_EVENTS,
- FLOW_CTRL_HALT_CPU1_EVENTS + 8,
- FLOW_CTRL_HALT_CPU1_EVENTS + 16,
-};
-
-static u8 flowctrl_offset_cpu_csr[] = {
- FLOW_CTRL_CPU0_CSR,
- FLOW_CTRL_CPU1_CSR,
- FLOW_CTRL_CPU1_CSR + 8,
- FLOW_CTRL_CPU1_CSR + 16,
-};
-
-static void __iomem *tegra_flowctrl_base;
-
-static void flowctrl_update(u8 offset, u32 value)
-{
- writel(value, tegra_flowctrl_base + offset);
-
- /* ensure the update has reached the flow controller */
- wmb();
- readl_relaxed(tegra_flowctrl_base + offset);
-}
-
-u32 flowctrl_read_cpu_csr(unsigned int cpuid)
-{
- u8 offset = flowctrl_offset_cpu_csr[cpuid];
-
- return readl(tegra_flowctrl_base + offset);
-}
-
-void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
-{
- return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
-}
-
-void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
-{
- return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
-}
-
-void flowctrl_cpu_suspend_enter(unsigned int cpuid)
-{
- unsigned int reg;
- int i;
-
- reg = flowctrl_read_cpu_csr(cpuid);
- switch (tegra_get_chip_id()) {
- case TEGRA20:
- /* clear wfe bitmap */
- reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
- /* clear wfi bitmap */
- reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
- /* pwr gating on wfe */
- reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
- break;
- case TEGRA30:
- case TEGRA114:
- case TEGRA124:
- /* clear wfe bitmap */
- reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
- /* clear wfi bitmap */
- reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
- /* pwr gating on wfi */
- reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
- break;
- }
- reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
- reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
- reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */
- flowctrl_write_cpu_csr(cpuid, reg);
-
- for (i = 0; i < num_possible_cpus(); i++) {
- if (i == cpuid)
- continue;
- reg = flowctrl_read_cpu_csr(i);
- reg |= FLOW_CTRL_CSR_EVENT_FLAG;
- reg |= FLOW_CTRL_CSR_INTR_FLAG;
- flowctrl_write_cpu_csr(i, reg);
- }
-}
-
-void flowctrl_cpu_suspend_exit(unsigned int cpuid)
-{
- unsigned int reg;
-
- /* Disable powergating via flow controller for CPU0 */
- reg = flowctrl_read_cpu_csr(cpuid);
- switch (tegra_get_chip_id()) {
- case TEGRA20:
- /* clear wfe bitmap */
- reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
- /* clear wfi bitmap */
- reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
- break;
- case TEGRA30:
- case TEGRA114:
- case TEGRA124:
- /* clear wfe bitmap */
- reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
- /* clear wfi bitmap */
- reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
- break;
- }
- reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
- reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
- reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
- flowctrl_write_cpu_csr(cpuid, reg);
-}
-
-static const struct of_device_id matches[] __initconst = {
- { .compatible = "nvidia,tegra124-flowctrl" },
- { .compatible = "nvidia,tegra114-flowctrl" },
- { .compatible = "nvidia,tegra30-flowctrl" },
- { .compatible = "nvidia,tegra20-flowctrl" },
- { }
-};
-
-void __init tegra_flowctrl_init(void)
-{
- /* hardcoded fallback if device tree node is missing */
- unsigned long base = 0x60007000;
- unsigned long size = SZ_4K;
- struct device_node *np;
-
- np = of_find_matching_node(NULL, matches);
- if (np) {
- struct resource res;
-
- if (of_address_to_resource(np, 0, &res) == 0) {
- size = resource_size(&res);
- base = res.start;
- }
-
- of_node_put(np);
- }
-
- tegra_flowctrl_base = ioremap_nocache(base, size);
-}
diff --git a/arch/arm/mach-tegra/flowctrl.h b/arch/arm/mach-tegra/flowctrl.h
deleted file mode 100644
index 73a9c5016c1a..000000000000
--- a/arch/arm/mach-tegra/flowctrl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * arch/arm/mach-tegra/flowctrl.h
- *
- * functions and macros to control the flowcontroller
- *
- * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __MACH_TEGRA_FLOWCTRL_H
-#define __MACH_TEGRA_FLOWCTRL_H
-
-#define FLOW_CTRL_HALT_CPU0_EVENTS 0x0
-#define FLOW_CTRL_WAITEVENT (2 << 29)
-#define FLOW_CTRL_WAIT_FOR_INTERRUPT (4 << 29)
-#define FLOW_CTRL_JTAG_RESUME (1 << 28)
-#define FLOW_CTRL_SCLK_RESUME (1 << 27)
-#define FLOW_CTRL_HALT_CPU_IRQ (1 << 10)
-#define FLOW_CTRL_HALT_CPU_FIQ (1 << 8)
-#define FLOW_CTRL_HALT_LIC_IRQ (1 << 11)
-#define FLOW_CTRL_HALT_LIC_FIQ (1 << 10)
-#define FLOW_CTRL_HALT_GIC_IRQ (1 << 9)
-#define FLOW_CTRL_HALT_GIC_FIQ (1 << 8)
-#define FLOW_CTRL_CPU0_CSR 0x8
-#define FLOW_CTRL_CSR_INTR_FLAG (1 << 15)
-#define FLOW_CTRL_CSR_EVENT_FLAG (1 << 14)
-#define FLOW_CTRL_CSR_ENABLE_EXT_CRAIL (1 << 13)
-#define FLOW_CTRL_CSR_ENABLE_EXT_NCPU (1 << 12)
-#define FLOW_CTRL_CSR_ENABLE_EXT_MASK ( \
- FLOW_CTRL_CSR_ENABLE_EXT_NCPU | \
- FLOW_CTRL_CSR_ENABLE_EXT_CRAIL)
-#define FLOW_CTRL_CSR_ENABLE (1 << 0)
-#define FLOW_CTRL_HALT_CPU1_EVENTS 0x14
-#define FLOW_CTRL_CPU1_CSR 0x18
-
-#define TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 (1 << 4)
-#define TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP (3 << 4)
-#define TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP 0
-
-#define TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 (1 << 8)
-#define TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP (0xF << 4)
-#define TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP (0xF << 8)
-
-#ifndef __ASSEMBLY__
-u32 flowctrl_read_cpu_csr(unsigned int cpuid);
-void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value);
-void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value);
-
-void flowctrl_cpu_suspend_enter(unsigned int cpuid);
-void flowctrl_cpu_suspend_exit(unsigned int cpuid);
-
-void tegra_flowctrl_init(void);
-#endif
-
-#endif
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 75620ae73913..b5a2afe99101 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -21,6 +21,7 @@
#include <linux/jiffies.h>
#include <linux/smp.h>
+#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
@@ -30,7 +31,6 @@
#include <asm/smp_scu.h>
#include "common.h"
-#include "flowctrl.h"
#include "iomap.h"
#include "reset.h"
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index b0f48a3946fa..1ad5719779b0 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -27,6 +27,7 @@
#include <linux/spinlock.h>
#include <linux/suspend.h>
+#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pm.h>
#include <soc/tegra/pmc.h>
@@ -38,7 +39,6 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
-#include "flowctrl.h"
#include "iomap.h"
#include "pm.h"
#include "reset.h"
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index e3070fdab80b..805f306fa6f7 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -17,12 +17,12 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
-#include "flowctrl.h"
#include "iomap.h"
#include "reset.h"
#include "sleep.h"
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index f5d19667484e..5c8e638ee51a 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -20,6 +20,8 @@
#include <linux/linkage.h>
+#include <soc/tegra/flowctrl.h>
+
#include <asm/assembler.h>
#include <asm/proc-fns.h>
#include <asm/cp15.h>
@@ -27,7 +29,6 @@
#include "irammap.h"
#include "sleep.h"
-#include "flowctrl.h"
#define EMC_CFG 0xc
#define EMC_ADR_CFG 0x10
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 16e5ff03383c..dd4a67dabd91 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -16,13 +16,13 @@
#include <linux/linkage.h>
+#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/cache.h>
-#include "flowctrl.h"
#include "irammap.h"
#include "sleep.h"
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index f024a5109e8e..5e3496753df1 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -30,8 +30,6 @@
#include <asm/hardware/cache-l2x0.h>
#include "iomap.h"
-
-#include "flowctrl.h"
#include "sleep.h"
#define CLK_RESET_CCLK_BURST 0x20
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index e01cbca196b5..649e9e8c7bcc 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -48,7 +48,6 @@
#include "board.h"
#include "common.h"
#include "cpuidle.h"
-#include "flowctrl.h"
#include "iomap.h"
#include "irq.h"
#include "pm.h"
@@ -75,7 +74,6 @@ static void __init tegra_init_early(void)
{
of_register_trusted_foundations();
tegra_cpu_reset_handler_init();
- tegra_flowctrl_init();
}
static void __init tegra_dt_init_irq(void)
diff --git a/arch/arm/mach-w90x900/clock.c b/arch/arm/mach-w90x900/clock.c
index 2c371ff22e51..ac6fd1a2cb59 100644
--- a/arch/arm/mach-w90x900/clock.c
+++ b/arch/arm/mach-w90x900/clock.c
@@ -46,6 +46,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
+ if (!clk)
+ return;
+
WARN_ON(clk->enabled == 0);
spin_lock_irqsave(&clocks_lock, flags);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 2290be390f87..808efbb89b88 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -57,6 +57,9 @@ static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs;
+static bool l2x0_bresp_disable;
+static bool l2x0_flz_disable;
+
/*
* Common code for all cache controllers.
*/
@@ -620,7 +623,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
- if (cortex_a9) {
+ if (cortex_a9 && !l2x0_bresp_disable) {
aux |= L310_AUX_CTRL_EARLY_BRESP;
pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
@@ -629,7 +632,7 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
}
}
- if (cortex_a9) {
+ if (cortex_a9 && !l2x0_flz_disable) {
u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
u32 acr = get_auxcr();
@@ -1200,6 +1203,12 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
+ if (of_property_read_bool(np, "arm,early-bresp-disable"))
+ l2x0_bresp_disable = true;
+
+ if (of_property_read_bool(np, "arm,full-line-zero-disable"))
+ l2x0_flz_disable = true;
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 475811f5383a..c742dfd2967b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2408,12 +2408,28 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
dma_ops = arm_get_dma_map_ops(coherent);
set_dma_ops(dev, dma_ops);
+
+#ifdef CONFIG_XEN
+ if (xen_initial_domain()) {
+ dev->archdata.dev_dma_ops = dev->dma_ops;
+ dev->dma_ops = xen_dma_ops;
+ }
+#endif
}
void arch_teardown_dma_ops(struct device *dev)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 21192d6eda40..35ff45470dbf 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
+#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
@@ -43,6 +44,7 @@ struct pg_state {
unsigned long start_address;
unsigned level;
u64 current_prot;
+ const char *current_domain;
};
struct prot_bits {
@@ -216,7 +218,8 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t
}
}
-static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
+static void note_page(struct pg_state *st, unsigned long addr,
+ unsigned int level, u64 val, const char *domain)
{
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
@@ -224,8 +227,10 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
if (!st->level) {
st->level = level;
st->current_prot = prot;
+ st->current_domain = domain;
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
+ domain != st->current_domain ||
addr >= st->marker[1].start_address) {
const char *unit = units;
unsigned long delta;
@@ -240,6 +245,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
unit++;
}
seq_printf(st->seq, "%9lu%c", delta, *unit);
+ if (st->current_domain)
+ seq_printf(st->seq, " %s", st->current_domain);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
seq_printf(st->seq, "\n");
@@ -251,11 +258,13 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
}
st->start_address = addr;
st->current_prot = prot;
+ st->current_domain = domain;
st->level = level;
}
}
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
+ const char *domain)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
unsigned long addr;
@@ -263,25 +272,50 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
+ note_page(st, addr, 4, pte_val(*pte), domain);
}
}
+static const char *get_domain_name(pmd_t *pmd)
+{
+#ifndef CONFIG_ARM_LPAE
+ switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
+ case PMD_DOMAIN(DOMAIN_KERNEL):
+ return "KERNEL ";
+ case PMD_DOMAIN(DOMAIN_USER):
+ return "USER ";
+ case PMD_DOMAIN(DOMAIN_IO):
+ return "IO ";
+ case PMD_DOMAIN(DOMAIN_VECTORS):
+ return "VECTORS";
+ default:
+ return "unknown";
+ }
+#endif
+ return NULL;
+}
+
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
pmd_t *pmd = pmd_offset(pud, 0);
unsigned long addr;
unsigned i;
+ const char *domain;
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
+ domain = get_domain_name(pmd);
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
- note_page(st, addr, 3, pmd_val(*pmd));
+ note_page(st, addr, 3, pmd_val(*pmd), domain);
else
- walk_pte(st, pmd, addr);
+ walk_pte(st, pmd, addr, domain);
- if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
- note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
+ if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
+ addr += SECTION_SIZE;
+ pmd++;
+ domain = get_domain_name(pmd);
+ note_page(st, addr, 3, pmd_val(*pmd), domain);
+ }
}
}
@@ -296,7 +330,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
if (!pud_none(*pud)) {
walk_pmd(st, pud, addr);
} else {
- note_page(st, addr, 2, pud_val(*pud));
+ note_page(st, addr, 2, pud_val(*pud), NULL);
}
}
}
@@ -317,11 +351,11 @@ static void walk_pgd(struct seq_file *m)
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
} else {
- note_page(&st, addr, 1, pgd_val(*pgd));
+ note_page(&st, addr, 1, pgd_val(*pgd), NULL);
}
}
- note_page(&st, 0, 0, 0);
+ note_page(&st, 0, 0, 0, NULL);
}
static int ptdump_show(struct seq_file *m, void *v)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1d8558ff9827..ad80548325fe 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -709,34 +709,37 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
}
+/**
+ * update_sections_early intended to be called only through stop_machine
+ * framework and executed by only one CPU while all other CPUs will spin and
+ * wait, so no locking is required in this function.
+ */
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
- read_lock(&tasklist_lock);
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
}
- read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
-int __fix_kernmem_perms(void *unused)
+static int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
-void fix_kernmem_perms(void)
+static void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
}
-int __mark_rodata_ro(void *unused)
+static int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ff0eed23ddf1..fc91205ff46c 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -481,6 +481,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
EXPORT_SYMBOL_GPL(pci_ioremap_io);
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
/*
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4e016d7f37b3..31af3cb59a60 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -87,6 +87,8 @@ struct cachepolicy {
#define s2_policy(policy) 0
#endif
+unsigned long kimage_voffset __ro_after_init;
+
static struct cachepolicy cache_policies[] __initdata = {
{
.policy = "uncached",
@@ -414,6 +416,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
FIXADDR_END);
BUG_ON(idx >= __end_of_fixed_addresses);
+ /* we only support device mappings until pgprot_kernel has been set */
+ if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
+ pgprot_val(pgprot_kernel) == 0))
+ return;
+
if (pgprot_val(prot))
set_pte_at(NULL, vaddr, pte,
pfn_pte(phys >> PAGE_SHIFT, prot));
@@ -1492,7 +1499,7 @@ pgtables_remap lpae_pgtables_remap_asm;
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
-void __init early_paging_init(const struct machine_desc *mdesc)
+static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
@@ -1560,7 +1567,7 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#else
-void __init early_paging_init(const struct machine_desc *mdesc)
+static void __init early_paging_init(const struct machine_desc *mdesc)
{
long long offset;
@@ -1616,7 +1623,6 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
- build_mem_type_table();
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
@@ -1635,4 +1641,13 @@ void __init paging_init(const struct machine_desc *mdesc)
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
+
+ /* Compute the virt/idmap offset, mostly for the sake of KVM */
+ kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
+}
+
+void __init early_mm_init(const struct machine_desc *mdesc)
+{
+ build_mem_type_table();
+ early_paging_init(mdesc);
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 33a45bd96860..3b8e728cc944 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -436,6 +436,18 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
}
EXPORT_SYMBOL(ioremap_wc);
+#ifdef CONFIG_PCI
+
+#include <asm/mach/map.h>
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
+#endif
+
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
{
return (void *)phys_addr;
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index 3b69f2642513..1403cb4a0c3d 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -15,6 +15,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include <asm/set_memory.h>
struct page_change_data {
pgprot_t set_mask;
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index d00d52c9de3e..01d64c0b2563 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -39,13 +39,14 @@ ENTRY(cpu_v7_proc_fin)
ENDPROC(cpu_v7_proc_fin)
/*
- * cpu_v7_reset(loc)
+ * cpu_v7_reset(loc, hyp)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
+ * - hyp - indicate if restart occurs in HYP mode
*
* This code must be executed using a flat identity mapping with
* caches disabled.
@@ -53,11 +54,15 @@ ENDPROC(cpu_v7_proc_fin)
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
- mrc p15, 0, r1, c1, c0, 0 @ ctrl register
- bic r1, r1, #0x1 @ ...............m
- THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
- mcr p15, 0, r1, c1, c0, 0 @ disable MMU
+ mrc p15, 0, r2, c1, c0, 0 @ ctrl register
+ bic r2, r2, #0x1 @ ...............m
+ THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
+ mcr p15, 0, r2, c1, c0, 0 @ disable MMU
isb
+#ifdef CONFIG_ARM_VIRT_EXT
+ teq r1, #0
+ bne __hyp_soft_restart
+#endif
bx r0
ENDPROC(cpu_v7_reset)
.popsection
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 8dea61640cc1..47a5acc64433 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -135,9 +135,11 @@ __v7m_setup_cont:
dsb
mov r6, lr @ save LR
ldr sp, =init_thread_union + THREAD_START_SP
+ stmia sp, {r0-r3, r12}
cpsie i
svc #0
1: cpsid i
+ ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
@@ -147,10 +149,10 @@ __v7m_setup_cont:
@ Configure caches (if implemented)
teq r8, #0
- stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
blne v7m_invalidate_l1
teq r8, #0 @ re-evalutae condition
- ldmneia r12, {r0-r6, lr}
+ ldmneia sp, {r0-r6, lr}
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 93d0b6d0b63e..d5b9fa19b684 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -18,6 +18,7 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/hwcap.h>
#include <asm/opcodes.h>
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 03fac123676d..dc269d9143bc 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/amba/pl330.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
diff --git a/arch/arm/plat-versatile/include/plat/clock.h b/arch/arm/plat-versatile/include/plat/clock.h
deleted file mode 100644
index 3cfb024ccd70..000000000000
--- a/arch/arm/plat-versatile/include/plat/clock.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef PLAT_CLOCK_H
-#define PLAT_CLOCK_H
-
-#include <asm/hardware/icst.h>
-
-struct clk_ops {
- long (*round)(struct clk *, unsigned long);
- int (*set)(struct clk *, unsigned long);
- void (*setvco)(struct clk *, struct icst_vco);
-};
-
-int icst_clk_set(struct clk *, unsigned long);
-long icst_clk_round(struct clk *, unsigned long);
-
-#endif
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
index 16db419f9e90..b4d78959cadf 100644
--- a/arch/arm/xen/efi.c
+++ b/arch/arm/xen/efi.c
@@ -35,6 +35,6 @@ void __init xen_efi_runtime_setup(void)
efi.update_capsule = xen_efi_update_capsule;
efi.query_capsule_caps = xen_efi_query_capsule_caps;
efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
- efi.reset_system = NULL; /* Functionality provided by Xen. */
+ efi.reset_system = xen_efi_reset_system;
}
EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 81e3217b12d3..ba7f4c8f5c3e 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -191,20 +191,24 @@ static int xen_dying_cpu(unsigned int cpu)
return 0;
}
-static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+void xen_reboot(int reason)
{
- struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
+ struct sched_shutdown r = { .reason = reason };
int rc;
+
rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
BUG_ON(rc);
}
+static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ xen_reboot(SHUTDOWN_reboot);
+}
+
+
static void xen_power_off(void)
{
- struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
- int rc;
- rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
- BUG_ON(rc);
+ xen_reboot(SHUTDOWN_poweroff);
}
static irqreturn_t xen_arm_callback(int irq, void *arg)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 67695fadae96..3dcd7ec69bca 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -736,6 +736,17 @@ config KEXEC
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
+config CRASH_DUMP
+ bool "Build kdump crash kernel"
+ help
+ Generate crash dump after being started by kexec. This should
+ be normally only set in special crash dump kernels which are
+ loaded in the main kernel with kexec-tools into a specially
+ reserved region and then later executed after a crash by
+ kdump/kexec.
+
+ For more details see Documentation/kdump/kdump.txt
+
config XEN_DOM0
def_bool y
depends on XEN
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index fca2f02cde68..cc6bd559af85 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -92,6 +92,10 @@ config DEBUG_EFI
the kernel that are only useful when using a debug build of the
UEFI firmware
+config ARM64_RELOC_TEST
+ depends on m
+ tristate "Relocation testing module"
+
source "drivers/hwtracing/coresight/Kconfig"
endmenu
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 129cc5ae4091..4afcffcb46cb 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -2,9 +2,10 @@ menu "Platform selection"
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
select GENERIC_IRQ_CHIP
select PINCTRL
- select PINCTRL_SUN50I_A64
+ select RESET_CONTROLLER
help
This enables support for Allwinner sunxi based SoCs like the A64.
@@ -54,6 +55,8 @@ config ARCH_BRCMSTB
config ARCH_EXYNOS
bool "ARMv8 based Samsung Exynos SoC family"
select COMMON_CLK_SAMSUNG
+ select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
+ select EXYNOS_PMU
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b9a4a934ca05..7dedf2d8494e 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -37,10 +37,12 @@ $(warning LSE atomics not supported by binutils)
endif
endif
+ifeq ($(CONFIG_ARM64), y)
brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
-ifneq ($(brokengasinst),)
+ ifneq ($(brokengasinst),)
$(warning Detected assembler with broken .inst; disassembly will be unreliable)
+ endif
endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index bc6f342be59f..244e8b7565f9 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -1,5 +1,6 @@
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-bananapi-m64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 0565779e66fa..c7f669f5884f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -98,6 +98,14 @@
clock-output-names = "osc32k";
};
+ iosc: internal-osc-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <16000000>;
+ clock-accuracy = <300000000>;
+ clock-output-names = "iosc";
+ };
+
psci {
compatible = "arm,psci-0.2";
method = "smc";
@@ -394,5 +402,26 @@
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ r_ccu: clock@1f01400 {
+ compatible = "allwinner,sun50i-a64-r-ccu";
+ reg = <0x01f01400 0x100>;
+ clocks = <&osc24M>, <&osc32k>, <&iosc>;
+ clock-names = "hosc", "losc", "iosc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ r_pio: pinctrl@01f02c00 {
+ compatible = "allwinner,sun50i-a64-r-pinctrl";
+ reg = <0x01f02c00 0x400>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&r_ccu 3>, <&osc24M>, <&osc32k>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
new file mode 100644
index 000000000000..dfecc17dcc92
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun50i-h5.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ model = "Xunlong Orange Pi PC 2";
+ compatible = "xunlong,orangepi-pc2", "allwinner,sun50i-h5";
+
+ reg_vcc3v3: vcc3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr {
+ label = "orangepi:green:pwr";
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+ status {
+ label = "orangepi:red:status";
+ gpios = <&pio 0 20 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ r-gpio-keys {
+ compatible = "gpio-keys";
+
+ sw4 {
+ label = "sw4";
+ linux,code = <BTN_0>;
+ gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ reg_usb0_vbus: usb0-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
+ status = "okay";
+ };
+};
+
+&codec {
+ allwinner,audio-routing =
+ "Line Out", "LINEOUT",
+ "MIC1", "Mic",
+ "Mic", "MBIAS";
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&ir {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_pins_a>;
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
+ status = "disabled";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+ status = "disabled";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbphy {
+ /* USB Type-A ports' VBUS is always on */
+ usb0_id_det-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ usb0_vbus-supply = <&reg_usb0_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
new file mode 100644
index 000000000000..4d314a253fd9
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sunxi-h3-h5.dtsi"
+
+/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <1>;
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <2>;
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <3>;
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
+
+&ccu {
+ compatible = "allwinner,sun50i-h5-ccu";
+};
+
+&mmc0 {
+ compatible = "allwinner,sun50i-h5-mmc",
+ "allwinner,sun50i-a64-mmc";
+ clocks = <&ccu CLK_BUS_MMC0>, <&ccu CLK_MMC0>;
+ clock-names = "ahb", "mmc";
+};
+
+&mmc1 {
+ compatible = "allwinner,sun50i-h5-mmc",
+ "allwinner,sun50i-a64-mmc";
+ clocks = <&ccu CLK_BUS_MMC1>, <&ccu CLK_MMC1>;
+ clock-names = "ahb", "mmc";
+};
+
+&mmc2 {
+ compatible = "allwinner,sun50i-h5-emmc",
+ "allwinner,sun50i-a64-emmc";
+ clocks = <&ccu CLK_BUS_MMC2>, <&ccu CLK_MMC2>;
+ clock-names = "ahb", "mmc";
+};
+
+&pio {
+ compatible = "allwinner,sun50i-h5-pinctrl";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
new file mode 120000
index 000000000000..036f01dc2b9b
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/sunxi-h3-h5.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 3f94bce33b7f..b9ad2db7398b 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -7,9 +7,11 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-meta.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-hub.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-wetek-play2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-khadas-vim.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-hwacom-amazetv.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 7a078bef04cd..a84e27622639 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -98,6 +98,27 @@
clocks = <&wifi32k>;
clock-names = "ext_clock";
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -188,3 +209,21 @@
&ethmac {
status = "okay";
};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 620495a43363..436b875060e7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -71,6 +71,14 @@
reg = <0x0 0x10000000 0x0 0x200000>;
no-map;
};
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x0 0xbc00000>;
+ alignment = <0x0 0x400000>;
+ linux,cma-default;
+ };
};
cpus {
@@ -233,7 +241,7 @@
};
i2c_A: i2c@8500 {
- compatible = "amlogic,meson-gxbb-i2c";
+ compatible = "amlogic,meson-gx-i2c", "amlogic,meson-gxbb-i2c";
reg = <0x0 0x08500 0x0 0x20>;
interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
@@ -279,7 +287,7 @@
};
i2c_B: i2c@87c0 {
- compatible = "amlogic,meson-gxbb-i2c";
+ compatible = "amlogic,meson-gx-i2c", "amlogic,meson-gxbb-i2c";
reg = <0x0 0x087c0 0x0 0x20>;
interrupts = <GIC_SPI 214 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
@@ -288,7 +296,7 @@
};
i2c_C: i2c@87e0 {
- compatible = "amlogic,meson-gxbb-i2c";
+ compatible = "amlogic,meson-gx-i2c", "amlogic,meson-gxbb-i2c";
reg = <0x0 0x087e0 0x0 0x20>;
interrupts = <GIC_SPI 215 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
@@ -296,6 +304,14 @@
status = "disabled";
};
+ spifc: spi@8c80 {
+ compatible = "amlogic,meson-gx-spifc", "amlogic,meson-gxbb-spifc";
+ reg = <0x0 0x08c80 0x0 0x80>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
watchdog@98d0 {
compatible = "amlogic,meson-gx-wdt", "amlogic,meson-gxbb-wdt";
reg = <0x0 0x098d0 0x0 0x10>;
@@ -317,7 +333,7 @@
};
sram: sram@c8000000 {
- compatible = "amlogic,meson-gxbb-sram", "mmio-sram";
+ compatible = "amlogic,meson-gx-sram", "amlogic,meson-gxbb-sram", "mmio-sram";
reg = <0x0 0xc8000000 0x0 0x14000>;
#address-cells = <1>;
@@ -325,12 +341,12 @@
ranges = <0 0x0 0xc8000000 0x14000>;
cpu_scp_lpri: scp-shmem@0 {
- compatible = "amlogic,meson-gxbb-scp-shmem";
+ compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem";
reg = <0x13000 0x400>;
};
cpu_scp_hpri: scp-shmem@200 {
- compatible = "amlogic,meson-gxbb-scp-shmem";
+ compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem";
reg = <0x13400 0x400>;
};
};
@@ -342,6 +358,13 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8100000 0x0 0x100000>;
+ clkc_AO: clock-controller@040 {
+ compatible = "amlogic,gx-aoclkc", "amlogic,gxbb-aoclkc";
+ reg = <0x0 0x00040 0x0 0x4>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
uart_AO: serial@4c0 {
compatible = "amlogic,meson-uart";
reg = <0x0 0x004c0 0x0 0x14>;
@@ -358,6 +381,15 @@
status = "disabled";
};
+ i2c_AO: i2c@500 {
+ compatible = "amlogic,meson-gx-i2c", "amlogic,meson-gxbb-i2c";
+ reg = <0x0 0x500 0x0 0x20>;
+ interrupts = <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pwm_AO_ab: pwm@550 {
compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
reg = <0x0 0x00550 0x0 0x10>;
@@ -366,7 +398,7 @@
};
ir: ir@580 {
- compatible = "amlogic,meson-gxbb-ir";
+ compatible = "amlogic,meson-gx-ir", "amlogic,meson-gxbb-ir";
reg = <0x0 0x00580 0x0 0x40>;
interrupts = <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
@@ -386,7 +418,6 @@
};
};
-
hiubus: hiubus@c883c000 {
compatible = "simple-bus";
reg = <0x0 0xc883c000 0x0 0x2000>;
@@ -410,7 +441,6 @@
0x0 0xc8834540 0x0 0x4>;
interrupts = <0 8 1>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
status = "disabled";
};
@@ -457,6 +487,38 @@
cvbs_vdac_port: port@0 {
reg = <0>;
};
+
+ /* HDMI-TX output port */
+ hdmi_tx_port: port@1 {
+ reg = <1>;
+
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_tx_in>;
+ };
+ };
+ };
+
+ hdmi_tx: hdmi-tx@c883a000 {
+ compatible = "amlogic,meson-gx-dw-hdmi";
+ reg = <0x0 0xc883a000 0x0 0x1c>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ /* VPU VENC Input */
+ hdmi_tx_venc_port: port@0 {
+ reg = <0>;
+
+ hdmi_tx_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+
+ /* TMDS Output */
+ hdmi_tx_tmds_port: port@1 {
+ reg = <1>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 4cbd626a9e88..87198eafb04b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -152,6 +152,17 @@
};
};
};
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
};
&uart_AO {
@@ -164,7 +175,24 @@
status = "okay";
pinctrl-0 = <&eth_rmii_pins>;
pinctrl-names = "default";
+
+ phy-handle = <&eth_phy0>;
phy-mode = "rmii";
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* IC Plus IP101GR (0x02430c54) */
+ reg = <0>;
+ };
+ };
};
&ir {
@@ -245,3 +273,15 @@
remote-endpoint = <&cvbs_connector_in>;
};
};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index c59403adb387..54a9c6a6b392 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -96,7 +96,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpio = <&gpio_ao GPIOAO_12 GPIO_ACTIVE_HIGH>;
+ gpio = <&gpio GPIOY_12 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
@@ -152,6 +152,13 @@
pinctrl-0 = <&eth_rgmii_pins>;
pinctrl-names = "default";
phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ amlogic,tx-delay-ns = <2>;
mdio {
compatible = "snps,dwmac-mdio";
@@ -165,6 +172,57 @@
};
};
+&pinctrl_aobus {
+ gpio-line-names = "UART TX", "UART RX", "VCCK En", "TF 3V3/1V8 En",
+ "USB HUB nRESET", "USB OTG Power En",
+ "J7 Header Pin2", "IR In", "J7 Header Pin4",
+ "J7 Header Pin6", "J7 Header Pin5", "J7 Header Pin7",
+ "HDMI CEC", "SYS LED";
+};
+
+&pinctrl_periphs {
+ gpio-line-names = /* Bank GPIOZ */
+ "Eth MDIO", "Eth MDC", "Eth RGMII RX Clk",
+ "Eth RX DV", "Eth RX D0", "Eth RX D1", "Eth RX D2",
+ "Eth RX D3", "Eth RGMII TX Clk", "Eth TX En",
+ "Eth TX D0", "Eth TX D1", "Eth TX D2", "Eth TX D3",
+ "Eth PHY nRESET", "Eth PHY Intc",
+ /* Bank GPIOH */
+ "HDMI HPD", "HDMI DDC SDA", "HDMI DDC SCL", "",
+ /* Bank BOOT */
+ "eMMC D0", "eMMC D1", "eMMC D2", "eMMC D3", "eMMC D4",
+ "eMMC D5", "eMMC D6", "eMMC D7", "eMMC Clk",
+ "eMMC Reset", "eMMC CMD",
+ "", "", "", "", "", "", "",
+ /* Bank CARD */
+ "SDCard D1", "SDCard D0", "SDCard CLK", "SDCard CMD",
+ "SDCard D3", "SDCard D2", "SDCard Det",
+ /* Bank GPIODV */
+ "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "", "",
+ "I2C A SDA", "I2C A SCK", "I2C B SDA", "I2C B SCK",
+ "PWM D", "PWM B",
+ /* Bank GPIOY */
+ "Revision Bit0", "Revision Bit1", "",
+ "J2 Header Pin35", "", "", "", "J2 Header Pin36",
+ "J2 Header Pin31", "", "", "", "TF VDD En",
+ "J2 Header Pin32", "J2 Header Pin26", "", "",
+ /* Bank GPIOX */
+ "J2 Header Pin29", "J2 Header Pin24",
+ "J2 Header Pin23", "J2 Header Pin22",
+ "J2 Header Pin21", "J2 Header Pin18",
+ "J2 Header Pin33", "J2 Header Pin19",
+ "J2 Header Pin16", "J2 Header Pin15",
+ "J2 Header Pin12", "J2 Header Pin13",
+ "J2 Header Pin8", "J2 Header Pin10",
+ "", "", "", "", "",
+ "J2 Header Pin11", "", "J2 Header Pin7",
+ /* Bank GPIOCLK */
+ "", "", "", "",
+ /* GPIO_TEST_N */
+ "";
+};
+
&ir {
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
@@ -177,6 +235,21 @@
pinctrl-names = "default";
};
+&gpio_ao {
+ /*
+ * WARNING: The USB Hub on the Odroid-C2 needs a reset signal
+ * to be turned high in order to be detected by the USB Controller
+ * This signal should be handled by a USB specific power sequence
+ * in order to reset the Hub when USB bus is powered down.
+ */
+ usb-hub {
+ gpio-hog;
+ gpios = <GPIOAO_4 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "usb-hub-reset";
+ };
+};
+
&usb0_phy {
status = "okay";
phy-supply = <&usb_otg_pwr>;
@@ -194,6 +267,11 @@
status = "okay";
};
+&saradc {
+ status = "okay";
+ vref-supply = <&vcc1v8>;
+};
+
/* SD */
&sd_emmc_b {
status = "okay";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
index fc0e86cb4cde..2054a474e0a9 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p200.dts
@@ -96,6 +96,31 @@
};
};
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ amlogic,tx-delay-ns = <2>;
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@3 {
+ /* Micrel KSZ9031 (0x00221620) */
+ reg = <3>;
+ };
+ };
+};
+
&i2c_B {
status = "okay";
pinctrl-0 = <&i2c_b_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
index 39bb037a3e47..ae3194663d64 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p201.dts
@@ -50,3 +50,14 @@
compatible = "amlogic,p201", "amlogic,meson-gxbb";
model = "Amlogic Meson GXBB P201 Development Board";
};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_rmii_pins>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 4a96e0f6f926..3c6c0b7f4187 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -135,6 +135,17 @@
};
};
};
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -144,12 +155,6 @@
pinctrl-names = "default";
};
-&ethmac {
- status = "okay";
- pinctrl-0 = <&eth_rgmii_pins>;
- pinctrl-names = "default";
-};
-
&ir {
status = "okay";
pinctrl-0 = <&remote_input_ao_pins>;
@@ -250,3 +255,15 @@
remote-endpoint = <&cvbs_connector_in>;
};
};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 86709929fd20..aefa66dff72d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -115,7 +115,6 @@
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
-
};
&ir {
@@ -128,6 +127,26 @@
status = "okay";
pinctrl-0 = <&eth_rgmii_pins>;
pinctrl-names = "default";
+
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ amlogic,tx-delay-ns = <2>;
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ };
+ };
};
&usb0_phy {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
index 56f855901262..f057fb48fee5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-hub.dts
@@ -64,3 +64,29 @@
status = "disabled";
};
};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ amlogic,tx-delay-ns = <2>;
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
index ea79fdd2c248..743acb5f5d06 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
@@ -87,6 +87,32 @@
};
};
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ amlogic,tx-delay-ns = <2>;
+
+ snps,reset-gpio = <&gpio GPIOZ_14 0>;
+ snps,reset-delays-us = <0 10000 1000000>;
+ snps,reset-active-low;
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ };
+ };
+};
+
&i2c_A {
status = "okay";
pinctrl-0 = <&i2c_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index a375cb21cc8b..86105a69690a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -97,17 +97,6 @@
};
};
-&cbus {
- spifc: spi@8c80 {
- compatible = "amlogic,meson-gxbb-spifc";
- reg = <0x0 0x08c80 0x0 0x80>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clkc CLKID_SPI>;
- status = "disabled";
- };
-};
-
&ethmac {
clocks = <&clkc CLKID_ETH>,
<&clkc CLKID_FCLK_DIV2>,
@@ -129,6 +118,7 @@
reg-names = "mux", "pull", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aobus 0 0 14>;
};
uart_ao_a_pins: uart_ao_a {
@@ -203,30 +193,62 @@
function = "pwm_ao_b";
};
};
- };
- clkc_AO: clock-controller@040 {
- compatible = "amlogic,gxbb-aoclkc";
- reg = <0x0 0x00040 0x0 0x4>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
+ i2s_am_clk_pins: i2s_am_clk {
+ mux {
+ groups = "i2s_am_clk";
+ function = "i2s_out_ao";
+ };
+ };
- pwm_ab_AO: pwm@550 {
- compatible = "amlogic,meson-gxbb-pwm";
- reg = <0x0 0x0550 0x0 0x10>;
- #pwm-cells = <3>;
- status = "disabled";
- };
+ i2s_out_ao_clk_pins: i2s_out_ao_clk {
+ mux {
+ groups = "i2s_out_ao_clk";
+ function = "i2s_out_ao";
+ };
+ };
- i2c_AO: i2c@500 {
- compatible = "amlogic,meson-gxbb-i2c";
- reg = <0x0 0x500 0x0 0x20>;
- interrupts = <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>;
- clocks = <&clkc CLKID_AO_I2C>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
+ i2s_out_lr_clk_pins: i2s_out_lr_clk {
+ mux {
+ groups = "i2s_out_lr_clk";
+ function = "i2s_out_ao";
+ };
+ };
+
+ i2s_out_ch01_ao_pins: i2s_out_ch01_ao {
+ mux {
+ groups = "i2s_out_ch01_ao";
+ function = "i2s_out_ao";
+ };
+ };
+
+ i2s_out_ch23_ao_pins: i2s_out_ch23_ao {
+ mux {
+ groups = "i2s_out_ch23_ao";
+ function = "i2s_out_ao";
+ };
+ };
+
+ i2s_out_ch45_ao_pins: i2s_out_ch45_ao {
+ mux {
+ groups = "i2s_out_ch45_ao";
+ function = "i2s_out_ao";
+ };
+ };
+
+ spdif_out_ao_6_pins: spdif_out_ao_6 {
+ mux {
+ groups = "spdif_out_ao_6";
+ function = "spdif_out_ao";
+ };
+ };
+
+ spdif_out_ao_13_pins: spdif_out_ao_13 {
+ mux {
+ groups = "spdif_out_ao_13";
+ function = "spdif_out_ao";
+ };
+ };
};
};
@@ -245,6 +267,7 @@
reg-names = "mux", "pull", "pull-enable", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_periphs 0 14 120>;
};
emmc_pins: emmc {
@@ -467,6 +490,34 @@
function = "hdmi_i2c";
};
};
+
+ i2sout_ch23_y_pins: i2sout_ch23_y {
+ mux {
+ groups = "i2sout_ch23_y";
+ function = "i2s_out";
+ };
+ };
+
+ i2sout_ch45_y_pins: i2sout_ch45_y {
+ mux {
+ groups = "i2sout_ch45_y";
+ function = "i2s_out";
+ };
+ };
+
+ i2sout_ch67_y_pins: i2sout_ch67_y {
+ mux {
+ groups = "i2sout_ch67_y";
+ function = "i2s_out";
+ };
+ };
+
+ spdif_out_y_pins: spdif_out_y {
+ mux {
+ groups = "spdif_out_y";
+ function = "spdif_out";
+ };
+ };
};
};
@@ -478,10 +529,51 @@
};
};
+&apb {
+ mali: gpu@c0000 {
+ compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+ reg = <0x0 0xc0000 0x0 0x40000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp", "gpmmu", "pp", "pmu",
+ "pp0", "ppmmu0", "pp1", "ppmmu1",
+ "pp2", "ppmmu2";
+ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
+ clock-names = "bus", "core";
+
+ /*
+ * Mali clocking is provided by two identical clock paths
+ * MALI_0 and MALI_1 muxed to a single clock by a glitch
+ * free mux to safely change frequency while running.
+ */
+ assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+ <&clkc CLKID_MALI_0>,
+ <&clkc CLKID_MALI>; /* Glitch free mux */
+ assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_MALI_0>;
+ assigned-clock-rates = <0>, /* Do Nothing */
+ <666666666>,
+ <0>; /* Do Nothing */
+ };
+};
+
&i2c_A {
clocks = <&clkc CLKID_I2C>;
};
+&i2c_AO {
+ clocks = <&clkc CLKID_AO_I2C>;
+};
+
&i2c_B {
clocks = <&clkc CLKID_I2C>;
};
@@ -521,6 +613,10 @@
clock-names = "core", "clkin0", "clkin1";
};
+&spifc {
+ clocks = <&clkc CLKID_SPI>;
+};
+
&vpu {
compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
};
@@ -529,3 +625,15 @@
clocks = <&clkc CLKID_RNG0>;
clock-names = "core";
};
+
+&hdmi_tx {
+ compatible = "amlogic,meson-gxbb-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+ resets = <&reset RESET_HDMITX_CAPB3>,
+ <&reset RESET_HDMI_SYSTEM_RESET>,
+ <&reset RESET_HDMI_TX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ clock-names = "isfr", "iahb", "venci";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
new file mode 100644
index 000000000000..f06cc234693b
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017 BayLibre SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+&apb {
+ mali: gpu@c0000 {
+ compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+ reg = <0x0 0xc0000 0x0 0x40000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp", "gpmmu", "pp", "pmu",
+ "pp0", "ppmmu0", "pp1", "ppmmu1",
+ "pp2", "ppmmu2";
+ clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
+ clock-names = "bus", "core";
+
+ /*
+ * Mali clocking is provided by two identical clock paths
+ * MALI_0 and MALI_1 muxed to a single clock by a glitch
+ * free mux to safely change frequency while running.
+ */
+ assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+ <&clkc CLKID_MALI_0>,
+ <&clkc CLKID_MALI>; /* Glitch free mux */
+ assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_MALI_0>;
+ assigned-clock-rates = <0>, /* Do Nothing */
+ <666666666>,
+ <0>; /* Do Nothing */
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
index f66939cacd37..f9fbfdad8dde 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
@@ -43,12 +43,47 @@
/dts-v1/;
+#include <dt-bindings/input/input.h>
+
#include "meson-gxl-s905d.dtsi"
#include "meson-gx-p23x-q20x.dtsi"
/ {
compatible = "amlogic,p230", "amlogic,s905d", "amlogic,meson-gxl";
model = "Amlogic Meson GXL (S905D) P230 Development Board";
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "Update";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ vddio_ao18: regulator-vddio_ao18 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
};
/* P230 has exclusive choice between internal or external PHY */
@@ -59,6 +94,8 @@
/* Select external PHY by default */
phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+
/* External PHY reset is shared with internal PHY Led signals */
snps,reset-gpio = <&gpio GPIOZ_14 0>;
snps,reset-delays-us = <0 10000 1000000>;
@@ -75,3 +112,8 @@
max-speed = <1000>;
};
};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddio_ao18>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi
index 615308e55576..5a90e30c1006 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi
@@ -42,6 +42,7 @@
*/
#include "meson-gxl.dtsi"
+#include "meson-gxl-mali.dtsi"
/ {
compatible = "amlogic,s905d", "amlogic,meson-gxl";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
new file mode 100644
index 000000000000..2a5804ce7f4b
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017 Carlo Caione
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Neil Armstrong <narmstrong@kernel.org>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905x.dtsi"
+
+/ {
+ compatible = "hwacom,amazetv", "amlogic,s905x", "amlogic,meson-gxl";
+ model = "Hwacom AmazeTV (S905X)";
+
+ aliases {
+ serial0 = &uart_AO;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ vddio_card: gpio-regulator {
+ compatible = "regulator-gpio";
+
+ regulator-name = "VDDIO_CARD";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+
+ /* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
+ states = <1800000 0
+ 3300000 1>;
+ };
+
+ vddio_boot: regulator-vddio_boot {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_BOOT";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+ };
+
+ wifi32k: wifi32k {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+ clocks = <&wifi32k>;
+ clock-names = "ext_clock";
+ };
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&ethmac {
+ status = "okay";
+ phy-mode = "rmii";
+ phy-handle = <&internal_phy>;
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&pwm_ef {
+ status = "okay";
+ pinctrl-0 = <&pwm_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&clkc CLKID_FCLK_DIV4>;
+ clock-names = "clkin0";
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <100000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+ cd-inverted;
+
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddio_card>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <8>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <100000000>;
+ non-removable;
+ disable-wp;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
new file mode 100644
index 000000000000..3c8b0b51ef27
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+
+#include "meson-gxl-s905x-p212.dtsi"
+
+/ {
+ compatible = "khadas,vim", "amlogic,s905x", "amlogic,meson-gxl";
+ model = "Khadas VIM";
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "Function";
+ linux,code = <KEY_FN>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ aliases {
+ serial2 = &uart_AO_B;
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ pwmleds {
+ compatible = "pwm-leds";
+
+ power {
+ label = "vim:red:power";
+ pwms = <&pwm_AO_ab 1 7812500 0>;
+ max-brightness = <255>;
+ linux,default-trigger = "default-on";
+ };
+ };
+};
+
+&i2c_A {
+ status = "okay";
+ pinctrl-0 = <&i2c_a_pins>;
+ pinctrl-names = "default";
+};
+
+&i2c_B {
+ status = "okay";
+ pinctrl-0 = <&i2c_b_pins>;
+ pinctrl-names = "default";
+
+ rtc: rtc@51 {
+ /* has to be enabled manually when a battery is connected: */
+ status = "disabled";
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ };
+};
+
+&ir {
+ linux,rc-map-name = "rc-geekbox";
+};
+
+&pwm_AO_ab {
+ status = "okay";
+ pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>;
+ pinctrl-names = "default";
+ clocks = <&clkc CLKID_FCLK_DIV4>;
+ clock-names = "clkin0";
+};
+
+&pwm_ef {
+ pinctrl-0 = <&pwm_e_pins>, <&pwm_f_clk_pins>;
+};
+
+&sd_emmc_a {
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+/* This is brought out on the Linux_RX (18) and Linux_TX (19) pins: */
+&uart_AO {
+ status = "okay";
+};
+
+/* This is brought out on the UART_RX_AO_B (15) and UART_TX_AO_B (16) pins: */
+&uart_AO_B {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_b_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index cea4a3eded9b..8873c058fad2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -127,6 +127,17 @@
};
};
};
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
};
&uart_AO {
@@ -219,3 +230,15 @@
remote-endpoint = <&cvbs_connector_in>;
};
};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
index 9639f012b02b..db31e093f40e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
@@ -43,23 +43,26 @@
/dts-v1/;
-#include "meson-gxl-s905x.dtsi"
+#include "meson-gxl-s905x-p212.dtsi"
/ {
compatible = "amlogic,p212", "amlogic,s905x", "amlogic,meson-gxl";
model = "Amlogic Meson GXL (S905X) P212 Development Board";
- aliases {
- serial0 = &uart_AO;
- };
+ cvbs-connector {
+ compatible = "composite-video-connector";
- chosen {
- stdout-path = "serial0:115200n8";
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
};
+};
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x80000000>;
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
new file mode 100644
index 000000000000..f3eea8e89d12
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>.
+ * Based on meson-gx-p23x-q20x.dtsi:
+ * - Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * - Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/* Common DTSI for devices which are based on the P212 reference board. */
+
+#include "meson-gxl-s905x.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uart_AO;
+ serial1 = &uart_A;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ vddio_boot: regulator-vddio_boot {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_BOOT";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vddio_ao18: regulator-vddio_ao18 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+ };
+
+ wifi32k: wifi32k {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+ clocks = <&wifi32k>;
+ clock-names = "ext_clock";
+ };
+};
+
+&ethmac {
+ status = "okay";
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddio_ao18>;
+};
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+ status = "okay";
+ pinctrl-0 = <&sdio_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <100000000>;
+
+ non-removable;
+ disable-wp;
+
+ mmc-pwrseq = <&sdio_pwrseq>;
+
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <100000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+ cd-inverted;
+
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <8>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <200000000>;
+ non-removable;
+ disable-wp;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+ status = "okay";
+ pinctrl-0 = <&pwm_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&clkc CLKID_FCLK_DIV4>;
+ clock-names = "clkin0";
+};
+
+/* This is connected to the Bluetooth module: */
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
index 08237ee1e362..0f78d836edaf 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
@@ -42,6 +42,7 @@
*/
#include "meson-gxl.dtsi"
+#include "meson-gxl-mali.dtsi"
/ {
compatible = "amlogic,s905x", "amlogic,meson-gxl";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index fe11b5fc61f7..d8e096dff10a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -44,6 +44,7 @@
#include "meson-gx.dtsi"
#include <dt-bindings/clock/gxbb-clkc.h>
#include <dt-bindings/gpio/meson-gxl-gpio.h>
+#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
/ {
compatible = "amlogic,meson-gxl";
@@ -79,6 +80,7 @@
reg-names = "mux", "pull", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aobus 0 0 14>;
};
uart_ao_a_pins: uart_ao_a {
@@ -103,6 +105,13 @@
};
};
+ uart_ao_b_0_1_pins: uart_ao_b_0_1 {
+ mux {
+ groups = "uart_tx_ao_b_0", "uart_rx_ao_b_1";
+ function = "uart_ao_b";
+ };
+ };
+
uart_ao_b_cts_rts_pins: uart_ao_b_cts_rts {
mux {
groups = "uart_cts_ao_b",
@@ -118,12 +127,69 @@
};
};
+ i2c_ao_pins: i2c_ao {
+ mux {
+ groups = "i2c_sck_ao",
+ "i2c_sda_ao";
+ function = "i2c_ao";
+ };
+ };
+
+ pwm_ao_a_3_pins: pwm_ao_a_3 {
+ mux {
+ groups = "pwm_ao_a_3";
+ function = "pwm_ao_a";
+ };
+ };
+
+ pwm_ao_a_8_pins: pwm_ao_a_8 {
+ mux {
+ groups = "pwm_ao_a_8";
+ function = "pwm_ao_a";
+ };
+ };
+
pwm_ao_b_pins: pwm_ao_b {
mux {
groups = "pwm_ao_b";
function = "pwm_ao_b";
};
};
+
+ pwm_ao_b_6_pins: pwm_ao_b_6 {
+ mux {
+ groups = "pwm_ao_b_6";
+ function = "pwm_ao_b";
+ };
+ };
+
+ i2s_out_ch23_ao_pins: i2s_out_ch23_ao {
+ mux {
+ groups = "i2s_out_ch23_ao";
+ function = "i2s_out_ao";
+ };
+ };
+
+ i2s_out_ch45_ao_pins: i2s_out_ch45_ao {
+ mux {
+ groups = "i2s_out_ch45_ao";
+ function = "i2s_out_ao";
+ };
+ };
+
+ spdif_out_ao_6_pins: spdif_out_ao_6 {
+ mux {
+ groups = "spdif_out_ao_6";
+ function = "spdif_out_ao";
+ };
+ };
+
+ spdif_out_ao_9_pins: spdif_out_ao_9 {
+ mux {
+ groups = "spdif_out_ao_9";
+ function = "spdif_out_ao";
+ };
+ };
};
};
@@ -142,6 +208,7 @@
reg-names = "mux", "pull", "pull-enable", "gpio";
gpio-controller;
#gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_periphs 0 14 101>;
};
emmc_pins: emmc {
@@ -154,6 +221,16 @@
};
};
+ nor_pins: nor {
+ mux {
+ groups = "nor_d",
+ "nor_q",
+ "nor_c",
+ "nor_cs";
+ function = "nor";
+ };
+ };
+
sdcard_pins: sdcard {
mux {
groups = "sdcard_d0",
@@ -277,6 +354,34 @@
};
};
+ pwm_a_pins: pwm_a {
+ mux {
+ groups = "pwm_a";
+ function = "pwm_a";
+ };
+ };
+
+ pwm_b_pins: pwm_b {
+ mux {
+ groups = "pwm_b";
+ function = "pwm_b";
+ };
+ };
+
+ pwm_c_pins: pwm_c {
+ mux {
+ groups = "pwm_c";
+ function = "pwm_c";
+ };
+ };
+
+ pwm_d_pins: pwm_d {
+ mux {
+ groups = "pwm_d";
+ function = "pwm_d";
+ };
+ };
+
pwm_e_pins: pwm_e {
mux {
groups = "pwm_e";
@@ -284,6 +389,20 @@
};
};
+ pwm_f_clk_pins: pwm_f_clk {
+ mux {
+ groups = "pwm_f_clk";
+ function = "pwm_f";
+ };
+ };
+
+ pwm_f_x_pins: pwm_f_x {
+ mux {
+ groups = "pwm_f_x";
+ function = "pwm_f";
+ };
+ };
+
hdmi_hpd_pins: hdmi_hpd {
mux {
groups = "hdmi_hpd";
@@ -297,6 +416,61 @@
function = "hdmi_i2c";
};
};
+
+ i2s_am_clk_pins: i2s_am_clk {
+ mux {
+ groups = "i2s_am_clk";
+ function = "i2s_out";
+ };
+ };
+
+ i2s_out_ao_clk_pins: i2s_out_ao_clk {
+ mux {
+ groups = "i2s_out_ao_clk";
+ function = "i2s_out";
+ };
+ };
+
+ i2s_out_lr_clk_pins: i2s_out_lr_clk {
+ mux {
+ groups = "i2s_out_lr_clk";
+ function = "i2s_out";
+ };
+ };
+
+ i2s_out_ch01_pins: i2s_out_ch01 {
+ mux {
+ groups = "i2s_out_ch01";
+ function = "i2s_out";
+ };
+ };
+ i2sout_ch23_z_pins: i2sout_ch23_z {
+ mux {
+ groups = "i2sout_ch23_z";
+ function = "i2s_out";
+ };
+ };
+
+ i2sout_ch45_z_pins: i2sout_ch45_z {
+ mux {
+ groups = "i2sout_ch45_z";
+ function = "i2s_out";
+ };
+ };
+
+ i2sout_ch67_z_pins: i2sout_ch67_z {
+ mux {
+ groups = "i2sout_ch67_z";
+ function = "i2s_out";
+ };
+ };
+
+ spdif_out_h_pins: spdif_out_ao_h {
+ mux {
+ groups = "spdif_out_h";
+ function = "spdif_out";
+ };
+ };
};
eth-phy-mux {
@@ -339,6 +513,10 @@
clocks = <&clkc CLKID_I2C>;
};
+&i2c_AO {
+ clocks = <&clkc CLKID_AO_I2C>;
+};
+
&i2c_B {
clocks = <&clkc CLKID_I2C>;
};
@@ -378,6 +556,22 @@
clock-names = "core", "clkin0", "clkin1";
};
+&spifc {
+ clocks = <&clkc CLKID_SPI>;
+};
+
&vpu {
compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
};
+
+&hdmi_tx {
+ compatible = "amlogic,meson-gxl-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+ resets = <&reset RESET_HDMITX_CAPB3>,
+ <&reset RESET_HDMI_SYSTEM_RESET>,
+ <&reset RESET_HDMI_TX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+ clocks = <&clkc CLKID_HDMI_PCLK>,
+ <&clkc CLKID_CLK81>,
+ <&clkc CLKID_GCLK_VENCI_INT0>;
+ clock-names = "isfr", "iahb", "venci";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 5a337d339df1..11b0bf46a95c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -100,6 +100,17 @@
};
};
};
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
};
/* This UART is brought out to the DB9 connector */
@@ -162,6 +173,8 @@
/* Select external PHY by default */
phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+
snps,reset-gpio = <&gpio GPIOZ_14 0>;
snps,reset-delays-us = <0 10000 1000000>;
snps,reset-active-low;
@@ -183,3 +196,15 @@
remote-endpoint = <&cvbs_connector_in>;
};
};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
index 5dbc66088355..b65776b01911 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
@@ -43,12 +43,47 @@
/dts-v1/;
+#include <dt-bindings/input/input.h>
+
#include "meson-gxm.dtsi"
#include "meson-gx-p23x-q20x.dtsi"
/ {
compatible = "amlogic,q200", "amlogic,s912", "amlogic,meson-gxm";
model = "Amlogic Meson GXM (S912) Q200 Development Board";
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "Update";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ vddio_ao18: regulator-vddio_ao18 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
};
/* Q200 has exclusive choice between internal or external PHY */
@@ -59,6 +94,8 @@
/* Select external PHY by default */
phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+
/* External PHY reset is shared with internal PHY Led signals */
snps,reset-gpio = <&gpio GPIOZ_14 0>;
snps,reset-delays-us = <0 10000 1000000>;
@@ -75,3 +112,8 @@
max-speed = <1000>;
};
};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddio_ao18>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index ddea7305c644..fe451cce93e7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -130,3 +130,6 @@
compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu";
};
+&hdmi_tx {
+ compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+};
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index df539e865b90..bfe7d683a42e 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -428,7 +428,7 @@
};
};
- pcie_ctlr: pcie-controller@40000000 {
+ pcie_ctlr: pcie@40000000 {
compatible = "arm,juno-r1-pcie", "plda,xpressrich3-axi", "pci-host-ecam-generic";
device_type = "pci";
reg = <0 0x40000000 0 0x10000000>; /* ECAM config space */
@@ -699,7 +699,7 @@
<0x00000008 0x80000000 0x1 0x80000000>;
};
- smb@08000000 {
+ smb@8000000 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 098601657f82..2ac43221ddb6 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -137,7 +137,7 @@
#size-cells = <1>;
ranges = <0 3 0 0x200000>;
- v2m_sysctl: sysctl@020000 {
+ v2m_sysctl: sysctl@20000 {
compatible = "arm,sp810", "arm,primecell";
reg = <0x020000 0x1000>;
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
@@ -148,7 +148,7 @@
assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
};
- apbregs@010000 {
+ apbregs@10000 {
compatible = "syscon", "simple-mfd";
reg = <0x010000 0x1000>;
@@ -216,7 +216,7 @@
};
};
- mmci@050000 {
+ mmci@50000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
interrupts = <5>;
@@ -228,7 +228,7 @@
clock-names = "mclk", "apb_pclk";
};
- kmi@060000 {
+ kmi@60000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x060000 0x1000>;
interrupts = <8>;
@@ -236,7 +236,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- kmi@070000 {
+ kmi@70000 {
compatible = "arm,pl050", "arm,primecell";
reg = <0x070000 0x1000>;
interrupts = <8>;
@@ -244,7 +244,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- wdt@0f0000 {
+ wdt@f0000 {
compatible = "arm,sp805", "arm,primecell";
reg = <0x0f0000 0x10000>;
interrupts = <7>;
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 0033c59a64b5..0e8943ab94d7 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -89,6 +89,12 @@
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A57_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -100,6 +106,12 @@
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A57_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -111,6 +123,12 @@
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -122,6 +140,12 @@
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -133,6 +157,12 @@
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -144,6 +174,12 @@
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -152,10 +188,16 @@
A57_L2: l2-cache0 {
compatible = "cache";
+ cache-size = <0x200000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
};
A53_L2: l2-cache1 {
compatible = "cache";
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
};
};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 218d0e4736a8..405e2fba025b 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -89,6 +89,12 @@
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A72_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -100,6 +106,12 @@
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A72_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -111,6 +123,12 @@
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -122,6 +140,12 @@
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -133,6 +157,12 @@
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -144,6 +174,12 @@
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -152,10 +188,16 @@
A72_L2: l2-cache0 {
compatible = "cache";
+ cache-size = <0x200000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
};
A53_L2: l2-cache1 {
compatible = "cache";
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
};
};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index bb2820ef3d5b..0220494c9b80 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -88,6 +88,12 @@
reg = <0x0 0x0>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A57_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -99,6 +105,12 @@
reg = <0x0 0x1>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0xc000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
next-level-cache = <&A57_L2>;
clocks = <&scpi_dvfs 0>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -110,6 +122,12 @@
reg = <0x0 0x100>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -121,6 +139,12 @@
reg = <0x0 0x101>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -132,6 +156,12 @@
reg = <0x0 0x102>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -143,6 +173,12 @@
reg = <0x0 0x103>;
device_type = "cpu";
enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
next-level-cache = <&A53_L2>;
clocks = <&scpi_dvfs 1>;
cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
@@ -151,10 +187,16 @@
A57_L2: l2-cache0 {
compatible = "cache";
+ cache-size = <0x200000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
};
A53_L2: l2-cache1 {
compatible = "cache";
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <1024>;
};
};
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index f1caece9d3a7..bfa8f8e4c5af 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,6 +1,5 @@
dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb ns2-xmc.dtb
-dtb-$(CONFIG_ARCH_VULCAN) += vulcan-eval.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 5ae08161649e..ec19fbf928a1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -57,55 +57,55 @@
};
&enet {
- status = "ok";
+ status = "okay";
};
&pci_phy0 {
- status = "ok";
+ status = "okay";
};
&pci_phy1 {
- status = "ok";
+ status = "okay";
};
&pcie0 {
- status = "ok";
+ status = "okay";
};
&pcie4 {
- status = "ok";
+ status = "okay";
};
&pcie8 {
- status = "ok";
+ status = "okay";
};
&i2c0 {
- status = "ok";
+ status = "okay";
};
&i2c1 {
- status = "ok";
+ status = "okay";
};
&uart0 {
- status = "ok";
+ status = "okay";
};
&uart1 {
- status = "ok";
+ status = "okay";
};
&uart2 {
- status = "ok";
+ status = "okay";
};
&uart3 {
- status = "ok";
+ status = "okay";
};
&ssp0 {
- status = "ok";
+ status = "okay";
slic@0 {
compatible = "silabs,si3226x";
@@ -126,7 +126,7 @@
};
&ssp1 {
- status = "ok";
+ status = "okay";
at25@0 {
compatible = "atmel,at25";
@@ -150,23 +150,23 @@
};
&sata_phy0 {
- status = "ok";
+ status = "okay";
};
&sata_phy1 {
- status = "ok";
+ status = "okay";
};
&sata {
- status = "ok";
+ status = "okay";
};
&sdio0 {
- status = "ok";
+ status = "okay";
};
&sdio1 {
- status = "ok";
+ status = "okay";
};
&nand {
diff --git a/arch/arm64/boot/dts/broadcom/ns2-xmc.dts b/arch/arm64/boot/dts/broadcom/ns2-xmc.dts
index 99a2723cccd2..ab4ae1a32fab 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-xmc.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-xmc.dts
@@ -54,15 +54,15 @@
};
&enet {
- status = "ok";
+ status = "okay";
};
&i2c0 {
- status = "ok";
+ status = "okay";
};
&i2c1 {
- status = "ok";
+ status = "okay";
};
&mdio_mux_iproc {
@@ -122,27 +122,27 @@
};
&pci_phy0 {
- status = "ok";
+ status = "okay";
};
&pcie0 {
- status = "ok";
+ status = "okay";
};
&pcie8 {
- status = "ok";
+ status = "okay";
};
&sata_phy0 {
- status = "ok";
+ status = "okay";
};
&sata_phy1 {
- status = "ok";
+ status = "okay";
};
&sata {
- status = "ok";
+ status = "okay";
};
&qspi {
@@ -187,5 +187,5 @@
};
&uart3 {
- status = "ok";
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index bcb03fc32665..35a309ae3ed8 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -222,6 +222,12 @@
brcm,use-bcm-hdr;
};
+ crypto0: crypto@612d0000 {
+ compatible = "brcm,spum-crypto";
+ reg = <0x612d0000 0x900>;
+ mboxes = <&pdc0 0>;
+ };
+
pdc1: iproc-pdc1@612e0000 {
compatible = "brcm,iproc-pdc-mbox";
reg = <0x612e0000 0x445>; /* PDC FS1 regs */
@@ -232,6 +238,12 @@
brcm,use-bcm-hdr;
};
+ crypto1: crypto@612f0000 {
+ compatible = "brcm,spum-crypto";
+ reg = <0x612f0000 0x900>;
+ mboxes = <&pdc1 0>;
+ };
+
pdc2: iproc-pdc2@61300000 {
compatible = "brcm,iproc-pdc-mbox";
reg = <0x61300000 0x445>; /* PDC FS2 regs */
@@ -242,6 +254,12 @@
brcm,use-bcm-hdr;
};
+ crypto2: crypto@61310000 {
+ compatible = "brcm,spum-crypto";
+ reg = <0x61310000 0x900>;
+ mboxes = <&pdc2 0>;
+ };
+
pdc3: iproc-pdc3@61320000 {
compatible = "brcm,iproc-pdc-mbox";
reg = <0x61320000 0x445>; /* PDC FS3 regs */
@@ -252,6 +270,12 @@
brcm,use-bcm-hdr;
};
+ crypto3: crypto@61330000 {
+ compatible = "brcm,spum-crypto";
+ reg = <0x61330000 0x900>;
+ mboxes = <&pdc3 0>;
+ };
+
dma0: dma@61360000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x61360000 0x1000>;
diff --git a/arch/arm64/boot/dts/broadcom/vulcan-eval.dts b/arch/arm64/boot/dts/broadcom/vulcan-eval.dts
deleted file mode 100644
index 9ee8d3da0e3f..000000000000
--- a/arch/arm64/boot/dts/broadcom/vulcan-eval.dts
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * dts file for Broadcom (BRCM) Vulcan Evaluation Platform
- *
- * Copyright (c) 2013-2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- */
-
-/dts-v1/;
-
-#include "vulcan.dtsi"
-
-/ {
- model = "Broadcom Vulcan Eval Platform";
- compatible = "brcm,vulcan-eval", "brcm,vulcan-soc";
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x80000000 0x0 0x80000000>, /* 2G @ 2G */
- <0x00000008 0x80000000 0x0 0x80000000>; /* 2G @ 34G */
- };
-
- aliases {
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
diff --git a/arch/arm64/boot/dts/broadcom/vulcan.dtsi b/arch/arm64/boot/dts/broadcom/vulcan.dtsi
deleted file mode 100644
index 34e11a9db2a0..000000000000
--- a/arch/arm64/boot/dts/broadcom/vulcan.dtsi
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * dtsi file for Broadcom (BRCM) Vulcan processor
- *
- * Copyright (c) 2013-2016 Broadcom
- * Author: Zi Shen Lim <zlim@broadcom.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- */
-
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-
-/ {
- model = "Broadcom Vulcan";
- compatible = "brcm,vulcan-soc";
- interrupt-parent = <&gic>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- /* just 4 cpus now, 128 needed in full config */
- cpus {
- #address-cells = <0x2>;
- #size-cells = <0x0>;
-
- cpu@0 {
- device_type = "cpu";
- compatible = "brcm,vulcan", "arm,armv8";
- reg = <0x0 0x0>;
- enable-method = "psci";
- };
-
- cpu@1 {
- device_type = "cpu";
- compatible = "brcm,vulcan", "arm,armv8";
- reg = <0x0 0x1>;
- enable-method = "psci";
- };
-
- cpu@2 {
- device_type = "cpu";
- compatible = "brcm,vulcan", "arm,armv8";
- reg = <0x0 0x2>;
- enable-method = "psci";
- };
-
- cpu@3 {
- device_type = "cpu";
- compatible = "brcm,vulcan", "arm,armv8";
- reg = <0x0 0x3>;
- enable-method = "psci";
- };
- };
-
- psci {
- compatible = "arm,psci-0.2";
- method = "smc";
- };
-
- gic: interrupt-controller@400080000 {
- compatible = "arm,gic-v3";
- #interrupt-cells = <3>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- interrupt-controller;
- #redistributor-regions = <1>;
- reg = <0x04 0x00080000 0x0 0x20000>, /* GICD */
- <0x04 0x01000000 0x0 0x1000000>; /* GICR */
- interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
-
- gicits: gic-its@40010000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- reg = <0x04 0x00100000 0x0 0x20000>; /* GIC ITS */
- };
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- pmu {
- compatible = "brcm,vulcan-pmu", "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; /* PMU overflow */
- };
-
- clk125mhz: uart_clk125mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <125000000>;
- clock-output-names = "clk125mhz";
- };
-
- pci {
- compatible = "pci-host-ecam-generic";
- device_type = "pci";
- #interrupt-cells = <1>;
- #address-cells = <3>;
- #size-cells = <2>;
-
- /* ECAM at 0x3000_0000 - 0x4000_0000 */
- reg = <0x0 0x30000000 0x0 0x10000000>;
- reg-names = "PCI ECAM";
-
- /*
- * PCI ranges:
- * IO no supported
- * MEM 0x4000_0000 - 0x6000_0000
- * MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
- */
- ranges =
- <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
- 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map =
- /* addr pin ic icaddr icintr */
- <0 0 0 1 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 2 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 3 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
- 0 0 0 4 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- msi-parent = <&gicits>;
- dma-coherent;
- };
-
- soc {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- uart0: serial@402020000 {
- compatible = "arm,pl011", "arm,primecell";
- reg = <0x04 0x02020000 0x0 0x1000>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk125mhz>;
- clock-names = "apb_pclk";
- };
- };
-
-};
diff --git a/arch/arm64/boot/dts/cavium/Makefile b/arch/arm64/boot/dts/cavium/Makefile
index e34f89ddabb2..581b2c1c400a 100644
--- a/arch/arm64/boot/dts/cavium/Makefile
+++ b/arch/arm64/boot/dts/cavium/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
+dtb-$(CONFIG_ARCH_THUNDER2) += thunder2-99xx.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dts b/arch/arm64/boot/dts/cavium/thunder2-99xx.dts
new file mode 100644
index 000000000000..6c6fb8692fde
--- /dev/null
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dts
@@ -0,0 +1,34 @@
+/*
+ * dts file for Cavium ThunderX2 CN99XX Evaluation Platform
+ *
+ * Copyright (c) 2017 Cavium Inc.
+ * Copyright (c) 2013-2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+#include "thunder2-99xx.dtsi"
+
+/ {
+ model = "Cavium ThunderX2 CN99XX";
+ compatible = "cavium,thunderx2-cn9900", "brcm,vulcan-soc";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0x0 0x80000000>, /* 2G @ 2G */
+ <0x00000008 0x80000000 0x0 0x80000000>; /* 2G @ 34G */
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
new file mode 100644
index 000000000000..4220fbdcb24a
--- /dev/null
+++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
@@ -0,0 +1,148 @@
+/*
+ * dtsi file for Cavium ThunderX2 CN99XX processor
+ *
+ * Copyright (c) 2017 Cavium Inc.
+ * Copyright (c) 2013-2016 Broadcom
+ * Author: Zi Shen Lim <zlim@broadcom.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Cavium ThunderX2 CN99XX";
+ compatible = "cavium,thunderx2-cn9900", "brcm,vulcan-soc";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ /* just 4 cpus now, 128 needed in full config */
+ cpus {
+ #address-cells = <0x2>;
+ #size-cells = <0x0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ gic: interrupt-controller@400080000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ reg = <0x04 0x00080000 0x0 0x20000>, /* GICD */
+ <0x04 0x01000000 0x0 0x1000000>; /* GICR */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gicits: gic-its@40010000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x04 0x00100000 0x0 0x20000>; /* GIC ITS */
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmu {
+ compatible = "brcm,vulcan-pmu", "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; /* PMU overflow */
+ };
+
+ clk125mhz: uart_clk125mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "clk125mhz";
+ };
+
+ pci {
+ compatible = "pci-host-ecam-generic";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ /* ECAM at 0x3000_0000 - 0x4000_0000 */
+ reg = <0x0 0x30000000 0x0 0x10000000>;
+ reg-names = "PCI ECAM";
+
+ /*
+ * PCI ranges:
+ * IO no supported
+ * MEM 0x4000_0000 - 0x6000_0000
+ * MEM64 pref 0x40_0000_0000 - 0x60_0000_0000
+ */
+ ranges =
+ <0x02000000 0 0x40000000 0 0x40000000 0 0x20000000
+ 0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map =
+ /* addr pin ic icaddr icintr */
+ <0 0 0 1 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 2 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 3 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 4 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ msi-parent = <&gicits>;
+ dma-coherent;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ uart0: serial@402020000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x04 0x02020000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk125mhz>;
+ clock-names = "apb_pclk";
+ };
+ };
+
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-bus.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-bus.dtsi
index c42dc39c3223..ec11343dc528 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-bus.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-bus.dtsi
@@ -94,27 +94,27 @@
compatible = "operating-points-v2";
opp-shared;
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1075000>;
};
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
opp-microvolt = <1000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <975000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <962500>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
opp-microvolt = <950000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <937500>;
};
@@ -123,19 +123,19 @@
bus_g2d_266_opp_table: opp_table3 {
compatible = "operating-points-v2";
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
@@ -143,13 +143,13 @@
bus_gscl_opp_table: opp_table4 {
compatible = "operating-points-v2";
- opp@333000000 {
+ opp-333000000 {
opp-hz = /bits/ 64 <333000000>;
};
- opp@222000000 {
+ opp-222000000 {
opp-hz = /bits/ 64 <222000000>;
};
- opp@166500000 {
+ opp-166500000 {
opp-hz = /bits/ 64 <166500000>;
};
};
@@ -158,22 +158,22 @@
compatible = "operating-points-v2";
opp-shared;
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
};
- opp@267000000 {
+ opp-267000000 {
opp-hz = /bits/ 64 <267000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
@@ -181,16 +181,16 @@
bus_noc2_opp_table: opp_table6 {
compatible = "operating-points-v2";
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
};
- opp@200000000 {
+ opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
};
- opp@134000000 {
+ opp-134000000 {
opp-hz = /bits/ 64 <134000000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
};
};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 098ad557fee3..e2b0da2c0bc7 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -106,6 +106,13 @@
};
};
+ irda_regulator: irda-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpr3 3 GPIO_ACTIVE_HIGH>;
+ regulator-name = "irda_regulator";
+ };
+
sound {
compatible = "samsung,tm2-audio";
audio-codec = <&wm5110>;
@@ -298,6 +305,8 @@
status = "okay";
vddcore-supply = <&ldo6_reg>;
vddio-supply = <&ldo7_reg>;
+ samsung,burst-clock-frequency = <512000000>;
+ samsung,esc-clock-frequency = <16000000>;
samsung,pll-clock-frequency = <24000000>;
pinctrl-names = "default";
pinctrl-0 = <&te_irq>;
@@ -749,6 +758,19 @@
};
};
+&hsi2c_5 {
+ status = "okay";
+
+ stmfts: touchscreen@49 {
+ compatible = "st,stmfts";
+ reg = <0x49>;
+ interrupt-parent = <&gpa1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ avdd-supply = <&ldo30_reg>;
+ vdd-supply = <&ldo31_reg>;
+ };
+};
+
&hsi2c_7 {
status = "okay";
@@ -894,7 +916,7 @@
PIN(INPUT, gpa0-7, NONE, FAST_SR1);
PIN(INPUT, gpa1-0, UP, FAST_SR1);
- PIN(INPUT, gpa1-1, NONE, FAST_SR1);
+ PIN(INPUT, gpa1-1, UP, FAST_SR1);
PIN(INPUT, gpa1-2, NONE, FAST_SR1);
PIN(INPUT, gpa1-3, DOWN, FAST_SR1);
PIN(INPUT, gpa1-4, DOWN, FAST_SR1);
@@ -1074,7 +1096,6 @@
PIN(INPUT, gpg3-0, DOWN, FAST_SR1);
PIN(INPUT, gpg3-1, DOWN, FAST_SR1);
PIN(INPUT, gpg3-5, DOWN, FAST_SR1);
- PIN(INPUT, gpg3-7, DOWN, FAST_SR1);
};
};
@@ -1152,6 +1173,24 @@
};
};
+&spi_3 {
+ status = "okay";
+ no-cs-readback;
+
+ irled@0 {
+ compatible = "ir-spi-led";
+ reg = <0x0>;
+ spi-max-frequency = <5000000>;
+ power-supply = <&irda_regulator>;
+ duty-cycle = <60>;
+ led-active-low;
+
+ controller-data {
+ samsung,spi-feedback-delay = <0>;
+ };
+ };
+};
+
&timer {
clock-frequency = <24000000>;
};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
index dea0a6f5bc18..3ff95277a8ec 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
@@ -52,6 +52,18 @@
assigned-clock-rates = <250000000>, <400000000>;
};
+&dsi {
+ panel@0 {
+ compatible = "samsung,s6e3ha2";
+ reg = <0>;
+ vdd3-supply = <&ldo27_reg>;
+ vci-supply = <&ldo28_reg>;
+ reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
+ te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
&hsi2c_9 {
status = "okay";
@@ -76,3 +88,8 @@
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
};
+
+&stmfts {
+ touchscreen-size-x = <1439>;
+ touchscreen-size-y = <2559>;
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
index 7891a31adc17..b73e1231a86f 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
@@ -52,6 +52,17 @@
assigned-clock-rates = <278000000>, <400000000>;
};
+&dsi {
+ panel@0 {
+ compatible = "samsung,s6e3hf2";
+ reg = <0>;
+ vdd3-supply = <&ldo27_reg>;
+ vci-supply = <&ldo28_reg>;
+ reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>;
+ enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>;
+ };
+};
+
&ldo31_reg {
regulator-name = "TSP_VDD_1.8V_AP";
regulator-min-microvolt = <1800000>;
@@ -63,3 +74,10 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+&stmfts {
+ touchscreen-size-x = <1599>;
+ touchscreen-size-y = <2559>;
+ touch-key-connected;
+ ledvdd-supply = <&ldo33_reg>;
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 16072c1c3ed3..727f36abf3d4 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -119,43 +119,43 @@
compatible = "operating-points-v2";
opp-shared;
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <900000>;
};
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <925000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <950000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = <975000>;
};
- opp@800000000 {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <1000000>;
};
- opp@900000000 {
+ opp-900000000 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <1050000>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <1075000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <1112500>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1112500>;
};
- opp@1300000000 {
+ opp-1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1150000>;
};
@@ -165,63 +165,63 @@
compatible = "operating-points-v2";
opp-shared;
- opp@500000000 {
+ opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <900000>;
};
- opp@600000000 {
+ opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <900000>;
};
- opp@700000000 {
+ opp-700000000 {
opp-hz = /bits/ 64 <700000000>;
opp-microvolt = <912500>;
};
- opp@800000000 {
+ opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
opp-microvolt = <912500>;
};
- opp@900000000 {
+ opp-900000000 {
opp-hz = /bits/ 64 <900000000>;
opp-microvolt = <937500>;
};
- opp@1000000000 {
+ opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <975000>;
};
- opp@1100000000 {
+ opp-1100000000 {
opp-hz = /bits/ 64 <1100000000>;
opp-microvolt = <1012500>;
};
- opp@1200000000 {
+ opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1037500>;
};
- opp@1300000000 {
+ opp-1300000000 {
opp-hz = /bits/ 64 <1300000000>;
opp-microvolt = <1062500>;
};
- opp@1400000000 {
+ opp-1400000000 {
opp-hz = /bits/ 64 <1400000000>;
opp-microvolt = <1087500>;
};
- opp@1500000000 {
+ opp-1500000000 {
opp-hz = /bits/ 64 <1500000000>;
opp-microvolt = <1125000>;
};
- opp@1600000000 {
+ opp-1600000000 {
opp-hz = /bits/ 64 <1600000000>;
opp-microvolt = <1137500>;
};
- opp@1700000000 {
+ opp-1700000000 {
opp-hz = /bits/ 64 <1700000000>;
opp-microvolt = <1175000>;
};
- opp@1800000000 {
+ opp-1800000000 {
opp-hz = /bits/ 64 <1800000000>;
opp-microvolt = <1212500>;
};
- opp@1900000000 {
+ opp-1900000000 {
opp-hz = /bits/ 64 <1900000000>;
opp-microvolt = <1262500>;
};
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 39db645b268e..72c4b525726f 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -5,9 +5,13 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-qds.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1088a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
index a619f6496a4c..17fae8112e4d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
@@ -113,3 +113,7 @@
&sai2 {
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
index 14a67f1709e7..e2a93d53d3d8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
@@ -126,3 +126,7 @@
&sai2 {
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
index 62c5c7123a15..ed77f6b0937b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
@@ -57,3 +57,7 @@
&i2c0 {
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index cffebb4b3df1..b497ac196ccc 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -42,7 +42,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
compatible = "fsl,ls1012a";
@@ -50,6 +51,15 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ crypto = &crypto;
+ rtic_a = &rtic_a;
+ rtic_b = &rtic_b;
+ rtic_c = &rtic_c;
+ rtic_d = &rtic_d;
+ sec_mon = &sec_mon;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -113,6 +123,95 @@
big-endian;
};
+ crypto: crypto@1700000 {
+ compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
+ "fsl,sec-v4.0";
+ fsl,sec-era = <8>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x1700000 0x100000>;
+ reg = <0x00 0x1700000 0x0 0x100000>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+
+ sec_jr0: jr@10000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x10000 0x10000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr1: jr@20000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x20000 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr2: jr@30000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x30000 0x10000>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr3: jr@40000 {
+ compatible = "fsl,sec-v5.4-job-ring",
+ "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x40000 0x10000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ rtic@60000 {
+ compatible = "fsl,sec-v5.4-rtic",
+ "fsl,sec-v5.0-rtic",
+ "fsl,sec-v4.0-rtic";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x60000 0x100 0x60e00 0x18>;
+ ranges = <0x0 0x60100 0x500>;
+
+ rtic_a: rtic-a@0 {
+ compatible = "fsl,sec-v5.4-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x00 0x20 0x100 0x100>;
+ };
+
+ rtic_b: rtic-b@20 {
+ compatible = "fsl,sec-v5.4-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x20 0x20 0x200 0x100>;
+ };
+
+ rtic_c: rtic-c@40 {
+ compatible = "fsl,sec-v5.4-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x40 0x20 0x300 0x100>;
+ };
+
+ rtic_d: rtic-d@60 {
+ compatible = "fsl,sec-v5.4-rtic-memory",
+ "fsl,sec-v5.0-rtic-memory",
+ "fsl,sec-v4.0-rtic-memory";
+ reg = <0x60 0x20 0x400 0x100>;
+ };
+ };
+ };
+
+ sec_mon: sec_mon@1e90000 {
+ compatible = "fsl,sec-v5.4-mon", "fsl,sec-v5.0-mon",
+ "fsl,sec-v4.0-mon";
+ reg = <0x0 0x1e90000 0x0 0x10000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1012a-dcfg",
"syscon";
@@ -127,6 +226,82 @@
clocks = <&sysclk>;
};
+ tmu: tmu@1f00000 {
+ compatible = "fsl,qoriq-tmu";
+ reg = <0x0 0x1f00000 0x0 0x10000>;
+ interrupts = <0 33 0x4>;
+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
+ fsl,tmu-calibration = <0x00000000 0x00000026
+ 0x00000001 0x0000002d
+ 0x00000002 0x00000032
+ 0x00000003 0x00000039
+ 0x00000004 0x0000003f
+ 0x00000005 0x00000046
+ 0x00000006 0x0000004d
+ 0x00000007 0x00000054
+ 0x00000008 0x0000005a
+ 0x00000009 0x00000061
+ 0x0000000a 0x0000006a
+ 0x0000000b 0x00000071
+
+ 0x00010000 0x00000025
+ 0x00010001 0x0000002c
+ 0x00010002 0x00000035
+ 0x00010003 0x0000003d
+ 0x00010004 0x00000045
+ 0x00010005 0x0000004e
+ 0x00010006 0x00000057
+ 0x00010007 0x00000061
+ 0x00010008 0x0000006b
+ 0x00010009 0x00000076
+
+ 0x00020000 0x00000029
+ 0x00020001 0x00000033
+ 0x00020002 0x0000003d
+ 0x00020003 0x00000049
+ 0x00020004 0x00000056
+ 0x00020005 0x00000061
+ 0x00020006 0x0000006d
+
+ 0x00030000 0x00000021
+ 0x00030001 0x0000002a
+ 0x00030002 0x0000003c
+ 0x00030003 0x0000004e>;
+ big-endian;
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 0>;
+
+ trips {
+ cpu_alert: cpu-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit: cpu-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
i2c0: i2c@2180000 {
compatible = "fsl,vf610-i2c";
#address-cells = <1>;
@@ -238,9 +413,12 @@
sata: sata@3200000 {
compatible = "fsl,ls1012a-ahci", "fsl,ls1043a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>;
+ reg = <0x0 0x3200000 0x0 0x10000>,
+ <0x0 0x20140520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 0>;
+ dma-coherent;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index ec13a6ecb754..45cface08cbb 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -582,7 +582,9 @@
sata: sata@3200000 {
compatible = "fsl,ls1043a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>;
+ reg = <0x0 0x3200000 0x0 0x10000>,
+ <0x0 0x20140520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
interrupts = <0 69 0x4>;
clocks = <&clockgen 4 0>;
dma-coherent;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 4a164b801882..f4b8b7edaf9d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -587,7 +587,9 @@
sata: sata@3200000 {
compatible = "fsl,ls1046a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>;
+ reg = <0x0 0x3200000 0x0 0x10000>,
+ <0x0 0x20140520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
new file mode 100644
index 000000000000..8c3cae530f8f
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -0,0 +1,123 @@
+/*
+ * Device Tree file for NXP LS1088A QDS Board.
+ *
+ * Copyright 2017 NXP
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1088a.dtsi"
+
+/ {
+ model = "LS1088A QDS Board";
+ compatible = "fsl,ls1088a-qds", "fsl,ls1088a";
+};
+
+&i2c0 {
+ status = "okay";
+
+ i2c-switch@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+
+ ina220@41 {
+ compatible = "ti,ina220";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ temp-sensor@4c {
+ compatible = "adi,adt7461a";
+ reg = <0x4c>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf2129";
+ reg = <0x51>;
+ /* IRQ10_B */
+ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ eeprom@56 {
+ compatible = "atmel,24c512";
+ reg = <0x56>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c512";
+ reg = <0x57>;
+ };
+ };
+ };
+};
+
+&duart0 {
+ status = "okay";
+};
+
+&duart1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
new file mode 100644
index 000000000000..8a04fbb25cb4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -0,0 +1,107 @@
+/*
+ * Device Tree file for NXP LS1088A RDB Board.
+ *
+ * Copyright 2017 NXP
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1088a.dtsi"
+
+/ {
+ model = "L1088A RDB Board";
+ compatible = "fsl,ls1088a-rdb", "fsl,ls1088a";
+};
+
+&i2c0 {
+ status = "okay";
+
+ i2c-switch@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ temp-sensor@4c {
+ compatible = "adi,adt7461a";
+ reg = <0x4c>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf2129";
+ reg = <0x51>;
+ /* IRQ10_B */
+ interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+};
+
+&duart0 {
+ status = "okay";
+};
+
+&duart1 {
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
new file mode 100644
index 000000000000..2946fd797121
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
@@ -0,0 +1,275 @@
+/*
+ * Device Tree Include file for NXP Layerscape-1088A family SoC.
+ *
+ * Copyright 2017 NXP
+ *
+ * Harninder Rai <harninder.rai@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "fsl,ls1088a";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* We have 2 clusters having 4 Cortex-A53 cores each */
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ clocks = <&clockgen 1 0>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ clocks = <&clockgen 1 0>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ clocks = <&clockgen 1 0>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ clocks = <&clockgen 1 0>;
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x100>;
+ clocks = <&clockgen 1 1>;
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x101>;
+ clocks = <&clockgen 1 1>;
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x102>;
+ clocks = <&clockgen 1 1>;
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x103>;
+ clocks = <&clockgen 1 1>;
+ };
+ };
+
+ gic: interrupt-controller@6000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
+ <0x0 0x06100000 0 0x100000>, /* GICR(RD_base+SGI_base)*/
+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
+ interrupts = <1 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 IRQ_TYPE_LEVEL_LOW>,/* Physical Secure PPI */
+ <1 14 IRQ_TYPE_LEVEL_LOW>,/* Physical Non-Secure PPI */
+ <1 11 IRQ_TYPE_LEVEL_LOW>,/* Virtual PPI */
+ <1 10 IRQ_TYPE_LEVEL_LOW>;/* Hypervisor PPI */
+ };
+
+ sysclk: sysclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "sysclk";
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clockgen: clocking@1300000 {
+ compatible = "fsl,ls1088a-clockgen";
+ reg = <0 0x1300000 0 0xa0000>;
+ #clock-cells = <2>;
+ clocks = <&sysclk>;
+ };
+
+ duart0: serial@21c0500 {
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0500 0x0 0x100>;
+ clocks = <&clockgen 4 3>;
+ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ duart1: serial@21c0600 {
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0600 0x0 0x100>;
+ clocks = <&clockgen 4 3>;
+ interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ gpio0: gpio@2300000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x0 0x2300000 0x0 0x10000>;
+ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@2310000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x0 0x2310000 0x0 0x10000>;
+ interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2320000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x0 0x2320000 0x0 0x10000>;
+ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@2330000 {
+ compatible = "fsl,qoriq-gpio";
+ reg = <0x0 0x2330000 0x0 0x10000>;
+ interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ ifc: ifc@2240000 {
+ compatible = "fsl,ifc", "simple-bus";
+ reg = <0x0 0x2240000 0x0 0x20000>;
+ interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <0 0 0x5 0x80000000 0x08000000
+ 2 0 0x5 0x30000000 0x00010000
+ 3 0 0x5 0x20000000 0x00010000>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@2000000 {
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2000000 0x0 0x10000>;
+ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@2010000 {
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2010000 0x0 0x10000>;
+ interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@2020000 {
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2020000 0x0 0x10000>;
+ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@2030000 {
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2030000 0x0 0x10000>;
+ interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ status = "disabled";
+ };
+
+ sata: sata@3200000 {
+ compatible = "fsl,ls1088a-ahci", "fsl,ls1043a-ahci";
+ reg = <0x0 0x3200000 0x0 0x10000>;
+ interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ status = "disabled";
+ };
+ };
+
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
index 8bc1f8f6fcfc..c1e76dfca48e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
@@ -1,8 +1,9 @@
/*
* Device Tree file for Freescale LS2080a QDS Board.
*
- * Copyright (C) 2015, Freescale Semiconductor
+ * Copyright (C) 2015-17, Freescale Semiconductor
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
* This file is dual-licensed: you can use it either under the terms
@@ -47,6 +48,7 @@
/dts-v1/;
#include "fsl-ls2080a.dtsi"
+#include "fsl-ls208xa-qds.dtsi"
/ {
model = "Freescale Layerscape 2080a QDS Board";
@@ -61,154 +63,3 @@
stdout-path = "serial0:115200n8";
};
};
-
-&esdhc {
- status = "okay";
-};
-
-&ifc {
- status = "okay";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x5 0x80000000 0x08000000
- 0x2 0x0 0x5 0x30000000 0x00010000
- 0x3 0x0 0x5 0x20000000 0x00010000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
- };
-
- nand@2,0 {
- compatible = "fsl,ifc-nand";
- reg = <0x2 0x0 0x10000>;
- };
-
- cpld@3,0 {
- reg = <0x3 0x0 0x10000>;
- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
- };
-};
-
-&i2c0 {
- status = "okay";
- pca9547@77 {
- compatible = "nxp,pca9547";
- reg = <0x77>;
- #address-cells = <1>;
- #size-cells = <0>;
- i2c@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x00>;
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- };
- };
-
- i2c@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x02>;
-
- ina220@40 {
- compatible = "ti,ina220";
- reg = <0x40>;
- shunt-resistor = <500>;
- };
-
- ina220@41 {
- compatible = "ti,ina220";
- reg = <0x41>;
- shunt-resistor = <1000>;
- };
- };
-
- i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x3>;
-
- adt7481@4c {
- compatible = "adi,adt7461";
- reg = <0x4c>;
- };
- };
- };
-};
-
-&i2c1 {
- status = "disabled";
-};
-
-&i2c2 {
- status = "disabled";
-};
-
-&i2c3 {
- status = "disabled";
-};
-
-&dspi {
- status = "okay";
- dflash0: n25q128a {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <3000000>;
- reg = <0>;
- };
- dflash1: sst25wf040b {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <3000000>;
- reg = <1>;
- };
- dflash2: en25s64 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <3000000>;
- reg = <2>;
- };
-};
-
-&qspi {
- status = "okay";
- flash0: s25fl256s1@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <20000000>;
- reg = <0>;
- };
- flash2: s25fl256s1@2 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <20000000>;
- reg = <0>;
- };
-};
-
-&sata0 {
- status = "okay";
-};
-
-&sata1 {
- status = "okay";
-};
-
-&usb0 {
- status = "okay";
-};
-
-&usb1 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
index 2ff46ca450b1..18ad19587311 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
@@ -1,8 +1,9 @@
/*
* Device Tree file for Freescale LS2080a RDB Board.
*
- * Copyright (C) 2015, Freescale Semiconductor
+ * Copyright (C) 2016-17, Freescale Semiconductor
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
* This file is dual-licensed: you can use it either under the terms
@@ -47,6 +48,7 @@
/dts-v1/;
#include "fsl-ls2080a.dtsi"
+#include "fsl-ls208xa-rdb.dtsi"
/ {
model = "Freescale Layerscape 2080a RDB Board";
@@ -61,109 +63,3 @@
stdout-path = "serial1:115200n8";
};
};
-
-&esdhc {
- status = "okay";
-};
-
-&ifc {
- status = "okay";
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x5 0x80000000 0x08000000
- 0x2 0x0 0x5 0x30000000 0x00010000
- 0x3 0x0 0x5 0x20000000 0x00010000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
- };
-
- nand@2,0 {
- compatible = "fsl,ifc-nand";
- reg = <0x2 0x0 0x10000>;
- };
-
- cpld@3,0 {
- reg = <0x3 0x0 0x10000>;
- compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
- };
-
-};
-
-&i2c0 {
- status = "okay";
- pca9547@75 {
- compatible = "nxp,pca9547";
- reg = <0x75>;
- #address-cells = <1>;
- #size-cells = <0>;
- i2c@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x01>;
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- };
- };
-
- i2c@3 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x3>;
-
- adt7481@4c {
- compatible = "adi,adt7461";
- reg = <0x4c>;
- };
- };
- };
-};
-
-&i2c1 {
- status = "disabled";
-};
-
-&i2c2 {
- status = "disabled";
-};
-
-&i2c3 {
- status = "disabled";
-};
-
-&dspi {
- status = "okay";
- dflash0: n25q512a {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,m25p80";
- spi-max-frequency = <3000000>;
- reg = <0>;
- };
-};
-
-&qspi {
- status = "disabled";
-};
-
-&sata0 {
- status = "okay";
-};
-
-&sata1 {
- status = "okay";
-};
-
-&usb0 {
- status = "okay";
-};
-
-&usb1 {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e5935f28848c..46a26c021421 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -1,8 +1,9 @@
/*
* Device Tree Include file for Freescale Layerscape-2080A family SoC.
*
- * Copyright (C) 2014-2015, Freescale Semiconductor
+ * Copyright (C) 2014-2016, Freescale Semiconductor
*
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
* Bhupesh Sharma <bhupesh.sharma@freescale.com>
*
* This file is dual-licensed: you can use it either under the terms
@@ -44,802 +45,122 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <dt-bindings/thermal/thermal.h>
+#include "fsl-ls208xa.dtsi"
-/ {
- compatible = "fsl,ls2080a";
- interrupt-parent = <&gic>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /*
- * We expect the enable-method for cpu's to be "psci", but this
- * is dependent on the SoC FW, which will fill this in.
- *
- * Currently supported enable-method is psci v0.2
- */
-
- /* We have 4 clusters having 2 Cortex-A57 cores each */
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x0>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&cluster0_l2>;
- #cooling-cells = <2>;
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x1>;
- clocks = <&clockgen 1 0>;
- next-level-cache = <&cluster0_l2>;
- };
-
- cpu2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x100>;
- clocks = <&clockgen 1 1>;
- next-level-cache = <&cluster1_l2>;
- #cooling-cells = <2>;
- };
-
- cpu3: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x101>;
- clocks = <&clockgen 1 1>;
- next-level-cache = <&cluster1_l2>;
- };
-
- cpu4: cpu@200 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x200>;
- clocks = <&clockgen 1 2>;
- next-level-cache = <&cluster2_l2>;
- #cooling-cells = <2>;
- };
-
- cpu5: cpu@201 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x201>;
- clocks = <&clockgen 1 2>;
- next-level-cache = <&cluster2_l2>;
- };
-
- cpu6: cpu@300 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x300>;
- clocks = <&clockgen 1 3>;
- next-level-cache = <&cluster3_l2>;
- #cooling-cells = <2>;
- };
-
- cpu7: cpu@301 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x301>;
- clocks = <&clockgen 1 3>;
- next-level-cache = <&cluster3_l2>;
- };
-
- cluster0_l2: l2-cache0 {
- compatible = "cache";
- };
-
- cluster1_l2: l2-cache1 {
- compatible = "cache";
- };
-
- cluster2_l2: l2-cache2 {
- compatible = "cache";
- };
-
- cluster3_l2: l2-cache3 {
- compatible = "cache";
- };
+&cpu {
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
+ #cooling-cells = <2>;
};
- memory@80000000 {
- device_type = "memory";
- reg = <0x00000000 0x80000000 0 0x80000000>;
- /* DRAM space - 1, size : 2 GB DRAM */
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x1>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
};
- sysclk: sysclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <100000000>;
- clock-output-names = "sysclk";
+ cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x100>;
+ clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
+ #cooling-cells = <2>;
};
- gic: interrupt-controller@6000000 {
- compatible = "arm,gic-v3";
- reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
- <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
- <0x0 0x0c0c0000 0 0x2000>, /* GICC */
- <0x0 0x0c0d0000 0 0x1000>, /* GICH */
- <0x0 0x0c0e0000 0 0x20000>; /* GICV */
- #interrupt-cells = <3>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- interrupt-controller;
- interrupts = <1 9 0x4>;
-
- its: gic-its@6020000 {
- compatible = "arm,gic-v3-its";
- msi-controller;
- reg = <0x0 0x6020000 0 0x20000>;
- };
+ cpu3: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x101>;
+ clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
};
- rstcr: syscon@1e60000 {
- compatible = "fsl,ls2080a-rstcr", "syscon";
- reg = <0x0 0x1e60000 0x0 0x4>;
+ cpu4: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x200>;
+ clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
+ #cooling-cells = <2>;
};
- reboot {
- compatible ="syscon-reboot";
- regmap = <&rstcr>;
- offset = <0x0>;
- mask = <0x2>;
+ cpu5: cpu@201 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x201>;
+ clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
};
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
- <1 14 4>, /* Physical Non-Secure PPI, active-low */
- <1 11 4>, /* Virtual PPI, active-low */
- <1 10 4>; /* Hypervisor PPI, active-low */
- fsl,erratum-a008585;
+ cpu6: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x300>;
+ clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
+ #cooling-cells = <2>;
};
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ cpu7: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x301>;
+ clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
};
- soc {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- clockgen: clocking@1300000 {
- compatible = "fsl,ls2080a-clockgen";
- reg = <0 0x1300000 0 0xa0000>;
- #clock-cells = <2>;
- clocks = <&sysclk>;
- };
-
- dcfg: dcfg@1e00000 {
- compatible = "fsl,ls2080a-dcfg", "syscon";
- reg = <0x0 0x1e00000 0x0 0x10000>;
- little-endian;
- };
-
- tmu: tmu@1f80000 {
- compatible = "fsl,qoriq-tmu";
- reg = <0x0 0x1f80000 0x0 0x10000>;
- interrupts = <0 23 0x4>;
- fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
- fsl,tmu-calibration = <0x00000000 0x00000026
- 0x00000001 0x0000002d
- 0x00000002 0x00000032
- 0x00000003 0x00000039
- 0x00000004 0x0000003f
- 0x00000005 0x00000046
- 0x00000006 0x0000004d
- 0x00000007 0x00000054
- 0x00000008 0x0000005a
- 0x00000009 0x00000061
- 0x0000000a 0x0000006a
- 0x0000000b 0x00000071
-
- 0x00010000 0x00000025
- 0x00010001 0x0000002c
- 0x00010002 0x00000035
- 0x00010003 0x0000003d
- 0x00010004 0x00000045
- 0x00010005 0x0000004e
- 0x00010006 0x00000057
- 0x00010007 0x00000061
- 0x00010008 0x0000006b
- 0x00010009 0x00000076
-
- 0x00020000 0x00000029
- 0x00020001 0x00000033
- 0x00020002 0x0000003d
- 0x00020003 0x00000049
- 0x00020004 0x00000056
- 0x00020005 0x00000061
- 0x00020006 0x0000006d
-
- 0x00030000 0x00000021
- 0x00030001 0x0000002a
- 0x00030002 0x0000003c
- 0x00030003 0x0000004e>;
- little-endian;
- #thermal-sensor-cells = <1>;
- };
-
- thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <1000>;
- polling-delay = <5000>;
-
- thermal-sensors = <&tmu 4>;
-
- trips {
- cpu_alert: cpu-alert {
- temperature = <75000>;
- hysteresis = <2000>;
- type = "passive";
- };
- cpu_crit: cpu-crit {
- temperature = <85000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&cpu_alert>;
- cooling-device =
- <&cpu0 THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- };
- map1 {
- trip = <&cpu_alert>;
- cooling-device =
- <&cpu2 THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- };
- map2 {
- trip = <&cpu_alert>;
- cooling-device =
- <&cpu4 THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- };
- map3 {
- trip = <&cpu_alert>;
- cooling-device =
- <&cpu6 THERMAL_NO_LIMIT
- THERMAL_NO_LIMIT>;
- };
- };
- };
- };
-
- serial0: serial@21c0500 {
- compatible = "fsl,ns16550", "ns16550a";
- reg = <0x0 0x21c0500 0x0 0x100>;
- clocks = <&clockgen 4 3>;
- interrupts = <0 32 0x4>; /* Level high type */
- };
-
- serial1: serial@21c0600 {
- compatible = "fsl,ns16550", "ns16550a";
- reg = <0x0 0x21c0600 0x0 0x100>;
- clocks = <&clockgen 4 3>;
- interrupts = <0 32 0x4>; /* Level high type */
- };
-
- cluster1_core0_watchdog: wdt@c000000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc000000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster1_core1_watchdog: wdt@c010000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc010000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster2_core0_watchdog: wdt@c100000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc100000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster2_core1_watchdog: wdt@c110000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc110000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster3_core0_watchdog: wdt@c200000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc200000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster3_core1_watchdog: wdt@c210000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc210000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster4_core0_watchdog: wdt@c300000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc300000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- cluster4_core1_watchdog: wdt@c310000 {
- compatible = "arm,sp805-wdt", "arm,primecell";
- reg = <0x0 0xc310000 0x0 0x1000>;
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "apb_pclk", "wdog_clk";
- };
-
- fsl_mc: fsl-mc@80c000000 {
- compatible = "fsl,qoriq-mc";
- reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
- <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
- msi-parent = <&its>;
- #address-cells = <3>;
- #size-cells = <1>;
-
- /*
- * Region type 0x0 - MC portals
- * Region type 0x1 - QBMAN portals
- */
- ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
- 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
-
- /*
- * Define the maximum number of MACs present on the SoC.
- */
- dpmacs {
- #address-cells = <1>;
- #size-cells = <0>;
-
- dpmac1: dpmac@1 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x1>;
- };
-
- dpmac2: dpmac@2 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x2>;
- };
-
- dpmac3: dpmac@3 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x3>;
- };
-
- dpmac4: dpmac@4 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x4>;
- };
-
- dpmac5: dpmac@5 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x5>;
- };
-
- dpmac6: dpmac@6 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x6>;
- };
-
- dpmac7: dpmac@7 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x7>;
- };
-
- dpmac8: dpmac@8 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x8>;
- };
-
- dpmac9: dpmac@9 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x9>;
- };
-
- dpmac10: dpmac@a {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xa>;
- };
-
- dpmac11: dpmac@b {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xb>;
- };
-
- dpmac12: dpmac@c {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xc>;
- };
-
- dpmac13: dpmac@d {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xd>;
- };
-
- dpmac14: dpmac@e {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xe>;
- };
-
- dpmac15: dpmac@f {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0xf>;
- };
-
- dpmac16: dpmac@10 {
- compatible = "fsl,qoriq-mc-dpmac";
- reg = <0x10>;
- };
- };
- };
-
- smmu: iommu@5000000 {
- compatible = "arm,mmu-500";
- reg = <0 0x5000000 0 0x800000>;
- #global-interrupts = <12>;
- interrupts = <0 13 4>, /* global secure fault */
- <0 14 4>, /* combined secure interrupt */
- <0 15 4>, /* global non-secure fault */
- <0 16 4>, /* combined non-secure interrupt */
- /* performance counter interrupts 0-7 */
- <0 211 4>, <0 212 4>,
- <0 213 4>, <0 214 4>,
- <0 215 4>, <0 216 4>,
- <0 217 4>, <0 218 4>,
- /* per context interrupt, 64 interrupts */
- <0 146 4>, <0 147 4>,
- <0 148 4>, <0 149 4>,
- <0 150 4>, <0 151 4>,
- <0 152 4>, <0 153 4>,
- <0 154 4>, <0 155 4>,
- <0 156 4>, <0 157 4>,
- <0 158 4>, <0 159 4>,
- <0 160 4>, <0 161 4>,
- <0 162 4>, <0 163 4>,
- <0 164 4>, <0 165 4>,
- <0 166 4>, <0 167 4>,
- <0 168 4>, <0 169 4>,
- <0 170 4>, <0 171 4>,
- <0 172 4>, <0 173 4>,
- <0 174 4>, <0 175 4>,
- <0 176 4>, <0 177 4>,
- <0 178 4>, <0 179 4>,
- <0 180 4>, <0 181 4>,
- <0 182 4>, <0 183 4>,
- <0 184 4>, <0 185 4>,
- <0 186 4>, <0 187 4>,
- <0 188 4>, <0 189 4>,
- <0 190 4>, <0 191 4>,
- <0 192 4>, <0 193 4>,
- <0 194 4>, <0 195 4>,
- <0 196 4>, <0 197 4>,
- <0 198 4>, <0 199 4>,
- <0 200 4>, <0 201 4>,
- <0 202 4>, <0 203 4>,
- <0 204 4>, <0 205 4>,
- <0 206 4>, <0 207 4>,
- <0 208 4>, <0 209 4>;
- mmu-masters = <&fsl_mc 0x300 0>;
- };
-
- dspi: dspi@2100000 {
- status = "disabled";
- compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2100000 0x0 0x10000>;
- interrupts = <0 26 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
- clock-names = "dspi";
- spi-num-chipselects = <5>;
- bus-num = <0>;
- };
-
- esdhc: esdhc@2140000 {
- status = "disabled";
- compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
- reg = <0x0 0x2140000 0x0 0x10000>;
- interrupts = <0 28 0x4>; /* Level high type */
- clock-frequency = <0>; /* Updated by bootloader */
- voltage-ranges = <1800 1800 3300 3300>;
- sdhci,auto-cmd12;
- little-endian;
- bus-width = <4>;
- };
-
- gpio0: gpio@2300000 {
- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2300000 0x0 0x10000>;
- interrupts = <0 36 0x4>; /* Level high type */
- gpio-controller;
- little-endian;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio1: gpio@2310000 {
- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2310000 0x0 0x10000>;
- interrupts = <0 36 0x4>; /* Level high type */
- gpio-controller;
- little-endian;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@2320000 {
- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2320000 0x0 0x10000>;
- interrupts = <0 37 0x4>; /* Level high type */
- gpio-controller;
- little-endian;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@2330000 {
- compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
- reg = <0x0 0x2330000 0x0 0x10000>;
- interrupts = <0 37 0x4>; /* Level high type */
- gpio-controller;
- little-endian;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- i2c0: i2c@2000000 {
- status = "disabled";
- compatible = "fsl,vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2000000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
- clocks = <&clockgen 4 3>;
- };
-
- i2c1: i2c@2010000 {
- status = "disabled";
- compatible = "fsl,vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2010000 0x0 0x10000>;
- interrupts = <0 34 0x4>; /* Level high type */
- clock-names = "i2c";
- clocks = <&clockgen 4 3>;
- };
-
- i2c2: i2c@2020000 {
- status = "disabled";
- compatible = "fsl,vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2020000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
- clocks = <&clockgen 4 3>;
- };
-
- i2c3: i2c@2030000 {
- status = "disabled";
- compatible = "fsl,vf610-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x2030000 0x0 0x10000>;
- interrupts = <0 35 0x4>; /* Level high type */
- clock-names = "i2c";
- clocks = <&clockgen 4 3>;
- };
-
- ifc: ifc@2240000 {
- compatible = "fsl,ifc", "simple-bus";
- reg = <0x0 0x2240000 0x0 0x20000>;
- interrupts = <0 21 0x4>; /* Level high type */
- little-endian;
- #address-cells = <2>;
- #size-cells = <1>;
-
- ranges = <0 0 0x5 0x80000000 0x08000000
- 2 0 0x5 0x30000000 0x00010000
- 3 0 0x5 0x20000000 0x00010000>;
- };
-
- qspi: quadspi@20c0000 {
- status = "disabled";
- compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x20c0000 0x0 0x10000>,
- <0x0 0x20000000 0x0 0x10000000>;
- reg-names = "QuadSPI", "QuadSPI-memory";
- interrupts = <0 25 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>, <&clockgen 4 3>;
- clock-names = "qspi_en", "qspi";
- };
+ cluster0_l2: l2-cache0 {
+ compatible = "cache";
+ };
- pcie@3400000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
- 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <0 108 0x4>; /* Level high type */
- interrupt-names = "intr";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
- <0000 0 0 2 &gic 0 0 0 110 4>,
- <0000 0 0 3 &gic 0 0 0 111 4>,
- <0000 0 0 4 &gic 0 0 0 112 4>;
- };
+ cluster1_l2: l2-cache1 {
+ compatible = "cache";
+ };
- pcie@3500000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
- 0x12 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <0 113 0x4>; /* Level high type */
- interrupt-names = "intr";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
- <0000 0 0 2 &gic 0 0 0 115 4>,
- <0000 0 0 3 &gic 0 0 0 116 4>,
- <0000 0 0 4 &gic 0 0 0 117 4>;
- };
+ cluster2_l2: l2-cache2 {
+ compatible = "cache";
+ };
- pcie@3600000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
- 0x14 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <0 118 0x4>; /* Level high type */
- interrupt-names = "intr";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-lanes = <8>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
- <0000 0 0 2 &gic 0 0 0 120 4>,
- <0000 0 0 3 &gic 0 0 0 121 4>,
- <0000 0 0 4 &gic 0 0 0 122 4>;
- };
+ cluster3_l2: l2-cache3 {
+ compatible = "cache";
+ };
+};
- pcie@3700000 {
- compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
- "snps,dw-pcie";
- reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
- 0x16 0x00000000 0x0 0x00002000>; /* configuration space */
- reg-names = "regs", "config";
- interrupts = <0 123 0x4>; /* Level high type */
- interrupt-names = "intr";
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- dma-coherent;
- num-lanes = <4>;
- bus-range = <0x0 0xff>;
- ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
- 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&its>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
- <0000 0 0 2 &gic 0 0 0 125 4>,
- <0000 0 0 3 &gic 0 0 0 126 4>,
- <0000 0 0 4 &gic 0 0 0 127 4>;
- };
+&pcie1 {
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x10 0x00000000 0x0 0x00002000>; /* configuration space */
- sata0: sata@3200000 {
- status = "disabled";
- compatible = "fsl,ls2080a-ahci";
- reg = <0x0 0x3200000 0x0 0x10000>;
- interrupts = <0 133 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
- dma-coherent;
- };
+ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+};
- sata1: sata@3210000 {
- status = "disabled";
- compatible = "fsl,ls2080a-ahci";
- reg = <0x0 0x3210000 0x0 0x10000>;
- interrupts = <0 136 0x4>; /* Level high type */
- clocks = <&clockgen 4 3>;
- dma-coherent;
- };
+&pcie2 {
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x12 0x00000000 0x0 0x00002000>; /* configuration space */
- usb0: usb3@3100000 {
- status = "disabled";
- compatible = "snps,dwc3";
- reg = <0x0 0x3100000 0x0 0x10000>;
- interrupts = <0 80 0x4>; /* Level high type */
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
- };
+ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+};
- usb1: usb3@3110000 {
- status = "disabled";
- compatible = "snps,dwc3";
- reg = <0x0 0x3110000 0x0 0x10000>;
- interrupts = <0 81 0x4>; /* Level high type */
- dr_mode = "host";
- snps,quirk-frame-length-adjustment = <0x20>;
- snps,dis_rxdet_inp3_quirk;
- };
+&pcie3 {
+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+ 0x14 0x00000000 0x0 0x00002000>; /* configuration space */
- ccn@4000000 {
- compatible = "arm,ccn-504";
- reg = <0x0 0x04000000 0x0 0x01000000>;
- interrupts = <0 12 4>;
- };
- };
+ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+};
- ddr1: memory-controller@1080000 {
- compatible = "fsl,qoriq-memory-controller";
- reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <0 17 0x4>;
- little-endian;
- };
+&pcie4 {
+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
+ 0x16 0x00000000 0x0 0x00002000>; /* configuration space */
- ddr2: memory-controller@1090000 {
- compatible = "fsl,qoriq-memory-controller";
- reg = <0x0 0x1090000 0x0 0x1000>;
- interrupts = <0 18 0x4>;
- little-endian;
- };
+ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
new file mode 100644
index 000000000000..ebcd6ee4da0d
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-qds.dts
@@ -0,0 +1,64 @@
+/*
+ * Device Tree file for Freescale LS2088A QDS Board.
+ *
+ * Copyright (C) 2016-17, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls2088a.dtsi"
+#include "fsl-ls208xa-qds.dtsi"
+
+/ {
+ model = "Freescale Layerscape 2088A QDS Board";
+ compatible = "fsl,ls2088a-qds", "fsl,ls2088a";
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
new file mode 100644
index 000000000000..5992dc130faa
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a-rdb.dts
@@ -0,0 +1,64 @@
+/*
+ * Device Tree file for Freescale LS2088A RDB Board.
+ *
+ * Copyright (C) 2016-17, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls2088a.dtsi"
+#include "fsl-ls208xa-rdb.dtsi"
+
+/ {
+ model = "Freescale Layerscape 2088A RDB Board";
+ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a";
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
new file mode 100644
index 000000000000..33ce404cf7e4
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2088a.dtsi
@@ -0,0 +1,165 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-2088A family SoC.
+ *
+ * Copyright (C) 2016-17, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "fsl-ls208xa.dtsi"
+
+&cpu {
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x0>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
+ #cooling-cells = <2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x1>;
+ clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
+ };
+
+ cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x100>;
+ clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
+ #cooling-cells = <2>;
+ };
+
+ cpu3: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x101>;
+ clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
+ };
+
+ cpu4: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x200>;
+ clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
+ #cooling-cells = <2>;
+ };
+
+ cpu5: cpu@201 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x201>;
+ clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
+ };
+
+ cpu6: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x300>;
+ clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
+ #cooling-cells = <2>;
+ };
+
+ cpu7: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72";
+ reg = <0x301>;
+ clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
+ };
+
+ cluster0_l2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ cluster1_l2: l2-cache1 {
+ compatible = "cache";
+ };
+
+ cluster2_l2: l2-cache2 {
+ compatible = "cache";
+ };
+
+ cluster3_l2: l2-cache3 {
+ compatible = "cache";
+ };
+};
+
+&pcie1 {
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x20 0x00000000 0x0 0x00002000>; /* configuration space */
+
+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000
+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>;
+};
+
+&pcie2 {
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x28 0x00000000 0x0 0x00002000>; /* configuration space */
+
+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000
+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>;
+};
+
+&pcie3 {
+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+ 0x30 0x00000000 0x0 0x00002000>; /* configuration space */
+
+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000
+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>;
+};
+
+&pcie4 {
+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */
+ 0x38 0x00000000 0x0 0x00002000>; /* configuration space */
+
+ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000
+ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>;
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
new file mode 100644
index 000000000000..8b6204845973
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
@@ -0,0 +1,196 @@
+/*
+ * Device Tree file for Freescale LS2080A QDS Board.
+ *
+ * Copyright (C) 2016-17, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&esdhc {
+ status = "okay";
+};
+
+&ifc {
+ status = "okay";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
+ 0x2 0x0 0x5 0x30000000 0x00010000
+ 0x3 0x0 0x5 0x20000000 0x00010000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+ };
+
+ cpld@3,0 {
+ reg = <0x3 0x0 0x10000>;
+ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
+ };
+};
+
+&i2c0 {
+ status = "okay";
+ pca9547@77 {
+ compatible = "nxp,pca9547";
+ reg = <0x77>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x00>;
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x02>;
+
+ ina220@40 {
+ compatible = "ti,ina220";
+ reg = <0x40>;
+ shunt-resistor = <500>;
+ };
+
+ ina220@41 {
+ compatible = "ti,ina220";
+ reg = <0x41>;
+ shunt-resistor = <1000>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ adt7481@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "disabled";
+};
+
+&i2c2 {
+ status = "disabled";
+};
+
+&i2c3 {
+ status = "disabled";
+};
+
+&dspi {
+ status = "okay";
+ dflash0: n25q128a {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <3000000>;
+ reg = <0>;
+ };
+ dflash1: sst25wf040b {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <3000000>;
+ reg = <1>;
+ };
+ dflash2: en25s64 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <3000000>;
+ reg = <2>;
+ };
+};
+
+&qspi {
+ status = "okay";
+ flash0: s25fl256s1@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+ flash2: s25fl256s1@2 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&sata0 {
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
new file mode 100644
index 000000000000..3737587ffb33
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -0,0 +1,151 @@
+/*
+ * Device Tree file for Freescale LS2080A RDB Board.
+ *
+ * Copyright (C) 2016-17, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&esdhc {
+ status = "okay";
+};
+
+&ifc {
+ status = "okay";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000
+ 0x2 0x0 0x5 0x30000000 0x00010000
+ 0x3 0x0 0x5 0x20000000 0x00010000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ nand@2,0 {
+ compatible = "fsl,ifc-nand";
+ reg = <0x2 0x0 0x10000>;
+ };
+
+ cpld@3,0 {
+ reg = <0x3 0x0 0x10000>;
+ compatible = "fsl,ls2080aqds-fpga", "fsl,fpga-qixis";
+ };
+
+};
+
+&i2c0 {
+ status = "okay";
+ pca9547@75 {
+ compatible = "nxp,pca9547";
+ reg = <0x75>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x01>;
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>;
+
+ adt7481@4c {
+ compatible = "adi,adt7461";
+ reg = <0x4c>;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ status = "disabled";
+};
+
+&i2c2 {
+ status = "disabled";
+};
+
+&i2c3 {
+ status = "disabled";
+};
+
+&dspi {
+ status = "okay";
+ dflash0: n25q512a {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p80";
+ spi-max-frequency = <3000000>;
+ reg = <0>;
+ };
+};
+
+&qspi {
+ status = "disabled";
+};
+
+&sata0 {
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&usb1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
new file mode 100644
index 000000000000..abb2fff7d162
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -0,0 +1,737 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-2080A family SoC.
+ *
+ * Copyright (C) 2016-2017, Freescale Semiconductor
+ *
+ * Abhimanyu Saini <abhimanyu.saini@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+ compatible = "fsl,ls2080a";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpu: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x80000000>;
+ /* DRAM space - 1, size : 2 GB DRAM */
+ };
+
+ sysclk: sysclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "sysclk";
+ };
+
+ gic: interrupt-controller@6000000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */
+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */
+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */
+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */
+ #interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ interrupt-controller;
+ interrupts = <1 9 0x4>;
+
+ its: gic-its@6020000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x0 0x6020000 0 0x20000>;
+ };
+ };
+
+ rstcr: syscon@1e60000 {
+ compatible = "fsl,ls2080a-rstcr", "syscon";
+ reg = <0x0 0x1e60000 0x0 0x4>;
+ };
+
+ reboot {
+ compatible ="syscon-reboot";
+ regmap = <&rstcr>;
+ offset = <0x0>;
+ mask = <0x2>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+ <1 14 4>, /* Physical Non-Secure PPI, active-low */
+ <1 11 4>, /* Virtual PPI, active-low */
+ <1 10 4>; /* Hypervisor PPI, active-low */
+ fsl,erratum-a008585;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clockgen: clocking@1300000 {
+ compatible = "fsl,ls2080a-clockgen";
+ reg = <0 0x1300000 0 0xa0000>;
+ #clock-cells = <2>;
+ clocks = <&sysclk>;
+ };
+
+ dcfg: dcfg@1e00000 {
+ compatible = "fsl,ls2080a-dcfg", "syscon";
+ reg = <0x0 0x1e00000 0x0 0x10000>;
+ little-endian;
+ };
+
+ tmu: tmu@1f80000 {
+ compatible = "fsl,qoriq-tmu";
+ reg = <0x0 0x1f80000 0x0 0x10000>;
+ interrupts = <0 23 0x4>;
+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
+ fsl,tmu-calibration = <0x00000000 0x00000026
+ 0x00000001 0x0000002d
+ 0x00000002 0x00000032
+ 0x00000003 0x00000039
+ 0x00000004 0x0000003f
+ 0x00000005 0x00000046
+ 0x00000006 0x0000004d
+ 0x00000007 0x00000054
+ 0x00000008 0x0000005a
+ 0x00000009 0x00000061
+ 0x0000000a 0x0000006a
+ 0x0000000b 0x00000071
+
+ 0x00010000 0x00000025
+ 0x00010001 0x0000002c
+ 0x00010002 0x00000035
+ 0x00010003 0x0000003d
+ 0x00010004 0x00000045
+ 0x00010005 0x0000004e
+ 0x00010006 0x00000057
+ 0x00010007 0x00000061
+ 0x00010008 0x0000006b
+ 0x00010009 0x00000076
+
+ 0x00020000 0x00000029
+ 0x00020001 0x00000033
+ 0x00020002 0x0000003d
+ 0x00020003 0x00000049
+ 0x00020004 0x00000056
+ 0x00020005 0x00000061
+ 0x00020006 0x0000006d
+
+ 0x00030000 0x00000021
+ 0x00030001 0x0000002a
+ 0x00030002 0x0000003c
+ 0x00030003 0x0000004e>;
+ little-endian;
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+
+ thermal-sensors = <&tmu 4>;
+
+ trips {
+ cpu_alert: cpu-alert {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit: cpu-crit {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&cpu0 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&cpu2 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ map2 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&cpu4 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ map3 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&cpu6 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ serial0: serial@21c0500 {
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0500 0x0 0x100>;
+ clocks = <&clockgen 4 3>;
+ interrupts = <0 32 0x4>; /* Level high type */
+ };
+
+ serial1: serial@21c0600 {
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0600 0x0 0x100>;
+ clocks = <&clockgen 4 3>;
+ interrupts = <0 32 0x4>; /* Level high type */
+ };
+
+ cluster1_core0_watchdog: wdt@c000000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc000000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster1_core1_watchdog: wdt@c010000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc010000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster2_core0_watchdog: wdt@c100000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc100000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster2_core1_watchdog: wdt@c110000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc110000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster3_core0_watchdog: wdt@c200000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc200000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster3_core1_watchdog: wdt@c210000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc210000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster4_core0_watchdog: wdt@c300000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc300000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster4_core1_watchdog: wdt@c310000 {
+ compatible = "arm,sp805-wdt", "arm,primecell";
+ reg = <0x0 0xc310000 0x0 0x1000>;
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ fsl_mc: fsl-mc@80c000000 {
+ compatible = "fsl,qoriq-mc";
+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
+ msi-parent = <&its>;
+ #address-cells = <3>;
+ #size-cells = <1>;
+
+ /*
+ * Region type 0x0 - MC portals
+ * Region type 0x1 - QBMAN portals
+ */
+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000
+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>;
+
+ /*
+ * Define the maximum number of MACs present on the SoC.
+ */
+ dpmacs {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dpmac1: dpmac@1 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x1>;
+ };
+
+ dpmac2: dpmac@2 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x2>;
+ };
+
+ dpmac3: dpmac@3 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x3>;
+ };
+
+ dpmac4: dpmac@4 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x4>;
+ };
+
+ dpmac5: dpmac@5 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x5>;
+ };
+
+ dpmac6: dpmac@6 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x6>;
+ };
+
+ dpmac7: dpmac@7 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x7>;
+ };
+
+ dpmac8: dpmac@8 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x8>;
+ };
+
+ dpmac9: dpmac@9 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x9>;
+ };
+
+ dpmac10: dpmac@a {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xa>;
+ };
+
+ dpmac11: dpmac@b {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xb>;
+ };
+
+ dpmac12: dpmac@c {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xc>;
+ };
+
+ dpmac13: dpmac@d {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xd>;
+ };
+
+ dpmac14: dpmac@e {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xe>;
+ };
+
+ dpmac15: dpmac@f {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0xf>;
+ };
+
+ dpmac16: dpmac@10 {
+ compatible = "fsl,qoriq-mc-dpmac";
+ reg = <0x10>;
+ };
+ };
+ };
+
+ smmu: iommu@5000000 {
+ compatible = "arm,mmu-500";
+ reg = <0 0x5000000 0 0x800000>;
+ #global-interrupts = <12>;
+ interrupts = <0 13 4>, /* global secure fault */
+ <0 14 4>, /* combined secure interrupt */
+ <0 15 4>, /* global non-secure fault */
+ <0 16 4>, /* combined non-secure interrupt */
+ /* performance counter interrupts 0-7 */
+ <0 211 4>, <0 212 4>,
+ <0 213 4>, <0 214 4>,
+ <0 215 4>, <0 216 4>,
+ <0 217 4>, <0 218 4>,
+ /* per context interrupt, 64 interrupts */
+ <0 146 4>, <0 147 4>,
+ <0 148 4>, <0 149 4>,
+ <0 150 4>, <0 151 4>,
+ <0 152 4>, <0 153 4>,
+ <0 154 4>, <0 155 4>,
+ <0 156 4>, <0 157 4>,
+ <0 158 4>, <0 159 4>,
+ <0 160 4>, <0 161 4>,
+ <0 162 4>, <0 163 4>,
+ <0 164 4>, <0 165 4>,
+ <0 166 4>, <0 167 4>,
+ <0 168 4>, <0 169 4>,
+ <0 170 4>, <0 171 4>,
+ <0 172 4>, <0 173 4>,
+ <0 174 4>, <0 175 4>,
+ <0 176 4>, <0 177 4>,
+ <0 178 4>, <0 179 4>,
+ <0 180 4>, <0 181 4>,
+ <0 182 4>, <0 183 4>,
+ <0 184 4>, <0 185 4>,
+ <0 186 4>, <0 187 4>,
+ <0 188 4>, <0 189 4>,
+ <0 190 4>, <0 191 4>,
+ <0 192 4>, <0 193 4>,
+ <0 194 4>, <0 195 4>,
+ <0 196 4>, <0 197 4>,
+ <0 198 4>, <0 199 4>,
+ <0 200 4>, <0 201 4>,
+ <0 202 4>, <0 203 4>,
+ <0 204 4>, <0 205 4>,
+ <0 206 4>, <0 207 4>,
+ <0 208 4>, <0 209 4>;
+ mmu-masters = <&fsl_mc 0x300 0>;
+ };
+
+ dspi: dspi@2100000 {
+ status = "disabled";
+ compatible = "fsl,ls2080a-dspi", "fsl,ls2085a-dspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2100000 0x0 0x10000>;
+ interrupts = <0 26 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>;
+ clock-names = "dspi";
+ spi-num-chipselects = <5>;
+ bus-num = <0>;
+ };
+
+ esdhc: esdhc@2140000 {
+ status = "disabled";
+ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc";
+ reg = <0x0 0x2140000 0x0 0x10000>;
+ interrupts = <0 28 0x4>; /* Level high type */
+ clock-frequency = <0>; /* Updated by bootloader */
+ voltage-ranges = <1800 1800 3300 3300>;
+ sdhci,auto-cmd12;
+ little-endian;
+ bus-width = <4>;
+ };
+
+ gpio0: gpio@2300000 {
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+ reg = <0x0 0x2300000 0x0 0x10000>;
+ interrupts = <0 36 0x4>; /* Level high type */
+ gpio-controller;
+ little-endian;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@2310000 {
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+ reg = <0x0 0x2310000 0x0 0x10000>;
+ interrupts = <0 36 0x4>; /* Level high type */
+ gpio-controller;
+ little-endian;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2320000 {
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+ reg = <0x0 0x2320000 0x0 0x10000>;
+ interrupts = <0 37 0x4>; /* Level high type */
+ gpio-controller;
+ little-endian;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@2330000 {
+ compatible = "fsl,ls2080a-gpio", "fsl,qoriq-gpio";
+ reg = <0x0 0x2330000 0x0 0x10000>;
+ interrupts = <0 37 0x4>; /* Level high type */
+ gpio-controller;
+ little-endian;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ i2c0: i2c@2000000 {
+ status = "disabled";
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2000000 0x0 0x10000>;
+ interrupts = <0 34 0x4>; /* Level high type */
+ clock-names = "i2c";
+ clocks = <&clockgen 4 3>;
+ };
+
+ i2c1: i2c@2010000 {
+ status = "disabled";
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2010000 0x0 0x10000>;
+ interrupts = <0 34 0x4>; /* Level high type */
+ clock-names = "i2c";
+ clocks = <&clockgen 4 3>;
+ };
+
+ i2c2: i2c@2020000 {
+ status = "disabled";
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2020000 0x0 0x10000>;
+ interrupts = <0 35 0x4>; /* Level high type */
+ clock-names = "i2c";
+ clocks = <&clockgen 4 3>;
+ };
+
+ i2c3: i2c@2030000 {
+ status = "disabled";
+ compatible = "fsl,vf610-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2030000 0x0 0x10000>;
+ interrupts = <0 35 0x4>; /* Level high type */
+ clock-names = "i2c";
+ clocks = <&clockgen 4 3>;
+ };
+
+ ifc: ifc@2240000 {
+ compatible = "fsl,ifc", "simple-bus";
+ reg = <0x0 0x2240000 0x0 0x20000>;
+ interrupts = <0 21 0x4>; /* Level high type */
+ little-endian;
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <0 0 0x5 0x80000000 0x08000000
+ 2 0 0x5 0x30000000 0x00010000
+ 3 0 0x5 0x20000000 0x00010000>;
+ };
+
+ qspi: quadspi@20c0000 {
+ status = "disabled";
+ compatible = "fsl,ls2080a-qspi", "fsl,ls1021a-qspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x20c0000 0x0 0x10000>,
+ <0x0 0x20000000 0x0 0x10000000>;
+ reg-names = "QuadSPI", "QuadSPI-memory";
+ interrupts = <0 25 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>, <&clockgen 4 3>;
+ clock-names = "qspi_en", "qspi";
+ };
+
+ pcie1: pcie@3400000 {
+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+ "snps,dw-pcie";
+ reg-names = "regs", "config";
+ interrupts = <0 108 0x4>; /* Level high type */
+ interrupt-names = "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>,
+ <0000 0 0 2 &gic 0 0 0 110 4>,
+ <0000 0 0 3 &gic 0 0 0 111 4>,
+ <0000 0 0 4 &gic 0 0 0 112 4>;
+ };
+
+ pcie2: pcie@3500000 {
+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+ "snps,dw-pcie";
+ reg-names = "regs", "config";
+ interrupts = <0 113 0x4>; /* Level high type */
+ interrupt-names = "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>,
+ <0000 0 0 2 &gic 0 0 0 115 4>,
+ <0000 0 0 3 &gic 0 0 0 116 4>,
+ <0000 0 0 4 &gic 0 0 0 117 4>;
+ };
+
+ pcie3: pcie@3600000 {
+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+ "snps,dw-pcie";
+ reg-names = "regs", "config";
+ interrupts = <0 118 0x4>; /* Level high type */
+ interrupt-names = "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <8>;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>,
+ <0000 0 0 2 &gic 0 0 0 120 4>,
+ <0000 0 0 3 &gic 0 0 0 121 4>,
+ <0000 0 0 4 &gic 0 0 0 122 4>;
+ };
+
+ pcie4: pcie@3700000 {
+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie",
+ "snps,dw-pcie";
+ reg-names = "regs", "config";
+ interrupts = <0 123 0x4>; /* Level high type */
+ interrupt-names = "intr";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ msi-parent = <&its>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>,
+ <0000 0 0 2 &gic 0 0 0 125 4>,
+ <0000 0 0 3 &gic 0 0 0 126 4>,
+ <0000 0 0 4 &gic 0 0 0 127 4>;
+ };
+
+ sata0: sata@3200000 {
+ status = "disabled";
+ compatible = "fsl,ls2080a-ahci";
+ reg = <0x0 0x3200000 0x0 0x10000>;
+ interrupts = <0 133 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ };
+
+ sata1: sata@3210000 {
+ status = "disabled";
+ compatible = "fsl,ls2080a-ahci";
+ reg = <0x0 0x3210000 0x0 0x10000>;
+ interrupts = <0 136 0x4>; /* Level high type */
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ };
+
+ usb0: usb3@3100000 {
+ status = "disabled";
+ compatible = "snps,dwc3";
+ reg = <0x0 0x3100000 0x0 0x10000>;
+ interrupts = <0 80 0x4>; /* Level high type */
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
+ };
+
+ usb1: usb3@3110000 {
+ status = "disabled";
+ compatible = "snps,dwc3";
+ reg = <0x0 0x3110000 0x0 0x10000>;
+ interrupts = <0 81 0x4>; /* Level high type */
+ dr_mode = "host";
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
+ };
+
+ ccn@4000000 {
+ compatible = "arm,ccn-504";
+ reg = <0x0 0x04000000 0x0 0x01000000>;
+ interrupts = <0 12 4>;
+ };
+ };
+
+ ddr1: memory-controller@1080000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1080000 0x0 0x1000>;
+ interrupts = <0 17 0x4>;
+ little-endian;
+ };
+
+ ddr2: memory-controller@1090000 {
+ compatible = "fsl,qoriq-memory-controller";
+ reg = <0x0 0x1090000 0x0 0x1000>;
+ interrupts = <0 18 0x4>;
+ little-endian;
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index c3a6c1943038..8960ecafd37d 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_HISI) += hi3660-hikey960.dtb
+dtb-$(CONFIG_ARCH_HISI) += hi3798cv200-poplar.dtb
dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index ff37f0a0aa93..186251ffc6b2 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -8,6 +8,7 @@
/dts-v1/;
#include "hi3660.dtsi"
+#include "hikey960-pinctrl.dtsi"
/ {
model = "HiKey960";
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
new file mode 100644
index 000000000000..b9142871d6fe
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
@@ -0,0 +1,162 @@
+/*
+ * DTS File for HiSilicon Poplar Development Board
+ *
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "hi3798cv200.dtsi"
+
+/ {
+ model = "HiSilicon Poplar Development Board";
+ compatible = "hisilicon,hi3798cv200-poplar", "hisilicon,hi3798cv200";
+
+ aliases {
+ serial0 = &uart0;
+ serial2 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user-led0 {
+ label = "USER-LED0";
+ gpios = <&gpio6 3 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ user-led1 {
+ label = "USER-LED1";
+ gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ user-led2 {
+ label = "USER-LED2";
+ gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+
+ user-led3 {
+ label = "USER-LED3";
+ gpios = <&gpio10 6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "cpu0";
+ default-state = "off";
+ };
+ };
+};
+
+&gmac1 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy-handle = <&eth_phy1>;
+ phy-mode = "rgmii";
+ hisilicon,phy-reset-delays-us = <10000 10000 30000>;
+
+ eth_phy1: phy@3 {
+ reg = <3>;
+ };
+};
+
+&gpio1 {
+ status = "okay";
+ gpio-line-names = "LS-GPIO-E", "",
+ "", "",
+ "", "LS-GPIO-F",
+ "", "LS-GPIO-J";
+};
+
+&gpio2 {
+ status = "okay";
+ gpio-line-names = "LS-GPIO-H", "LS-GPIO-I",
+ "LS-GPIO-L", "LS-GPIO-G",
+ "LS-GPIO-K", "",
+ "", "";
+};
+
+&gpio3 {
+ status = "okay";
+ gpio-line-names = "", "",
+ "", "",
+ "LS-GPIO-C", "",
+ "", "LS-GPIO-B";
+};
+
+&gpio4 {
+ status = "okay";
+ gpio-line-names = "", "",
+ "", "",
+ "", "LS-GPIO-D",
+ "", "";
+};
+
+&gpio5 {
+ status = "okay";
+ gpio-line-names = "", "USER-LED-1",
+ "USER-LED-2", "",
+ "", "LS-GPIO-A",
+ "", "";
+};
+
+&gpio6 {
+ status = "okay";
+ gpio-line-names = "", "",
+ "", "USER-LED-0",
+ "", "",
+ "", "";
+};
+
+&gpio10 {
+ status = "okay";
+ gpio-line-names = "", "",
+ "", "",
+ "", "",
+ "USER-LED-3", "";
+};
+
+&i2c0 {
+ status = "okay";
+ label = "LS-I2C0";
+};
+
+&i2c2 {
+ status = "okay";
+ label = "LS-I2C1";
+};
+
+&ir {
+ status = "okay";
+};
+
+&spi0 {
+ status = "okay";
+ label = "LS-SPI0";
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+ label = "LS-UART0";
+};
+/* No optional LS-UART1 on Low Speed Expansion Connector. */
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
new file mode 100644
index 000000000000..75865f8a862a
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -0,0 +1,411 @@
+/*
+ * DTS File for HiSilicon Hi3798cv200 SoC.
+ *
+ * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <dt-bindings/clock/histb-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/ti-syscon.h>
+
+/ {
+ compatible = "hisilicon,hi3798cv200";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ gic: interrupt-controller@f1001000 {
+ compatible = "arm,gic-400";
+ reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */
+ <0x0 0xf1002000 0x0 0x100>; /* GICC */
+ #address-cells = <0>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc: soc@f0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0xf0000000 0x10000000>;
+
+ crg: clock-reset-controller@8a22000 {
+ compatible = "hisilicon,hi3798cv200-crg", "syscon", "simple-mfd";
+ reg = <0x8a22000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <2>;
+
+ gmacphyrst: reset-controller {
+ compatible = "ti,syscon-reset";
+ #reset-cells = <1>;
+ ti,reset-bits =
+ <0xcc 12 0xcc 12 0 0 (ASSERT_CLEAR |
+ DEASSERT_SET|STATUS_NONE)>,
+ <0xcc 13 0xcc 13 0 0 (ASSERT_CLEAR |
+ DEASSERT_SET|STATUS_NONE)>;
+ };
+ };
+
+ sysctrl: system-controller@8000000 {
+ compatible = "hisilicon,hi3798cv200-sysctrl", "syscon";
+ reg = <0x8000000 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <2>;
+ };
+
+ uart0: serial@8b00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x8b00000 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sysctrl HISTB_UART0_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ uart2: serial@8b02000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x8b02000 0x1000>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HISTB_UART2_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ i2c0: i2c@8b10000 {
+ compatible = "hisilicon,hix5hd2-i2c";
+ reg = <0x8b10000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&crg HISTB_I2C0_CLK>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@8b11000 {
+ compatible = "hisilicon,hix5hd2-i2c";
+ reg = <0x8b11000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&crg HISTB_I2C1_CLK>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@8b12000 {
+ compatible = "hisilicon,hix5hd2-i2c";
+ reg = <0x8b12000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&crg HISTB_I2C2_CLK>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@8b13000 {
+ compatible = "hisilicon,hix5hd2-i2c";
+ reg = <0x8b13000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&crg HISTB_I2C3_CLK>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@8b14000 {
+ compatible = "hisilicon,hix5hd2-i2c";
+ reg = <0x8b14000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <400000>;
+ clocks = <&crg HISTB_I2C4_CLK>;
+ status = "disabled";
+ };
+
+ spi0: spi@8b1a000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x8b1a000 0x1000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ num-cs = <1>;
+ cs-gpios = <&gpio7 1 0>;
+ clocks = <&crg HISTB_SPI0_CLK>;
+ clock-names = "apb_pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ emmc: mmc@9830000 {
+ compatible = "snps,dw-mshc";
+ reg = <0x9830000 0x10000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HISTB_MMC_CIU_CLK>,
+ <&crg HISTB_MMC_BIU_CLK>;
+ clock-names = "ciu", "biu";
+ };
+
+ gpio0: gpio@8b20000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b20000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio1: gpio@8b21000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b21000 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio2: gpio@8b22000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b22000 0x1000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio3: gpio@8b23000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b23000 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio4: gpio@8b24000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b24000 0x1000>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio5: gpio@8004000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8004000 0x1000>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio6: gpio@8b26000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b26000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio7: gpio@8b27000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b27000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio8: gpio@8b28000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b28000 0x1000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio9: gpio@8b29000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b29000 0x1000>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio10: gpio@8b2a000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b2a000 0x1000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio11: gpio@8b2b000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b2b000 0x1000>;
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gpio12: gpio@8b2c000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x8b2c000 0x1000>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&crg HISTB_APB_CLK>;
+ clock-names = "apb_pclk";
+ status = "disabled";
+ };
+
+ gmac0: ethernet@9840000 {
+ compatible = "hisilicon,hi3798cv200-gmac", "hisilicon,hisi-gmac-v2";
+ reg = <0x9840000 0x1000>,
+ <0x984300c 0x4>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HISTB_ETH0_MAC_CLK>,
+ <&crg HISTB_ETH0_MACIF_CLK>;
+ clock-names = "mac_core", "mac_ifc";
+ resets = <&crg 0xcc 8>,
+ <&crg 0xcc 10>,
+ <&gmacphyrst 0>;
+ reset-names = "mac_core", "mac_ifc", "phy";
+ status = "disabled";
+ };
+
+ gmac1: ethernet@9841000 {
+ compatible = "hisilicon,hi3798cv200-gmac", "hisilicon,hisi-gmac-v2";
+ reg = <0x9841000 0x1000>,
+ <0x9843010 0x4>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HISTB_ETH1_MAC_CLK>,
+ <&crg HISTB_ETH1_MACIF_CLK>;
+ clock-names = "mac_core", "mac_ifc";
+ resets = <&crg 0xcc 9>,
+ <&crg 0xcc 11>,
+ <&gmacphyrst 1>;
+ reset-names = "mac_core", "mac_ifc", "phy";
+ status = "disabled";
+ };
+
+ ir: ir@8001000 {
+ compatible = "hisilicon,hix5hd2-ir";
+ reg = <0x8001000 0x1000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sysctrl HISTB_IR_CLK>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 9b4ba7169210..75bce2d0b1a8 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -411,6 +411,13 @@
};
};
};
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
};
&uart2 {
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 470461ddd427..1e5129b19280 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -774,6 +774,7 @@
clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
+ reset-names = "reset";
bus-width = <0x8>;
vmmc-supply = <&ldo19>;
pinctrl-names = "default";
@@ -797,6 +798,7 @@
clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
+ reset-names = "reset";
vqmmc-supply = <&ldo7>;
vmmc-supply = <&ldo10>;
bus-width = <0x4>;
@@ -815,6 +817,7 @@
clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
+ reset-names = "reset";
bus-width = <0x4>;
broken-cd;
pinctrl-names = "default", "idle";
diff --git a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
new file mode 100644
index 000000000000..719c4bc937a4
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
@@ -0,0 +1,407 @@
+/*
+ * pinctrl dts fils for Hislicon HiKey960 development board
+ *
+ */
+
+#include <dt-bindings/pinctrl/hisi.h>
+
+/ {
+ soc {
+ /* [IOMG_000, IOMG_123] */
+ range: gpio-range {
+ #pinctrl-single,gpio-range-cells = <3>;
+ };
+
+ pmx0: pinmux@e896c000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xe896c000 0x0 0x1f0>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <
+ &range 0 7 0
+ &range 8 116 0>;
+
+ isp0_pmx_func: isp0_pmx_func {
+ pinctrl-single,pins = <
+ 0x058 MUX_M1 /* ISP_CLK0 */
+ 0x064 MUX_M1 /* ISP_SCL0 */
+ 0x068 MUX_M1 /* ISP_SDA0 */
+ >;
+ };
+
+ isp1_pmx_func: isp1_pmx_func {
+ pinctrl-single,pins = <
+ 0x05c MUX_M1 /* ISP_CLK1 */
+ 0x06c MUX_M1 /* ISP_SCL1 */
+ 0x070 MUX_M1 /* ISP_SDA1 */
+ >;
+ };
+
+ i2c3_pmx_func: i2c3_pmx_func {
+ pinctrl-single,pins = <
+ 0x02c MUX_M1 /* I2C3_SCL */
+ 0x030 MUX_M1 /* I2C3_SDA */
+ >;
+ };
+
+ i2c4_pmx_func: i2c4_pmx_func {
+ pinctrl-single,pins = <
+ 0x090 MUX_M1 /* I2C4_SCL */
+ 0x094 MUX_M1 /* I2C4_SDA */
+ >;
+ };
+
+ pcie_perstn_pmx_func: pcie_perstn_pmx_func {
+ pinctrl-single,pins = <
+ 0x15c MUX_M1 /* PCIE_PERST_N */
+ >;
+ };
+
+ usbhub5734_pmx_func: usbhub5734_pmx_func {
+ pinctrl-single,pins = <
+ 0x11c MUX_M0 /* GPIO_073 */
+ 0x120 MUX_M0 /* GPIO_074 */
+ >;
+ };
+
+ spi1_pmx_func: spi1_pmx_func {
+ pinctrl-single,pins = <
+ 0x034 MUX_M1 /* SPI1_CLK */
+ 0x038 MUX_M1 /* SPI1_DI */
+ 0x03c MUX_M1 /* SPI1_DO */
+ 0x040 MUX_M1 /* SPI1_CS_N */
+ >;
+ };
+
+ uart0_pmx_func: uart0_pmx_func {
+ pinctrl-single,pins = <
+ 0x0cc MUX_M2 /* UART0_RXD */
+ 0x0d0 MUX_M2 /* UART0_TXD */
+ 0x0d4 MUX_M2 /* UART0_RXD_M */
+ 0x0d8 MUX_M2 /* UART0_TXD_M */
+ >;
+ };
+
+ uart1_pmx_func: uart1_pmx_func {
+ pinctrl-single,pins = <
+ 0x0b0 MUX_M2 /* UART1_CTS_N */
+ 0x0b4 MUX_M2 /* UART1_RTS_N */
+ 0x0a8 MUX_M2 /* UART1_RXD */
+ 0x0ac MUX_M2 /* UART1_TXD */
+ >;
+ };
+
+ uart2_pmx_func: uart2_pmx_func {
+ pinctrl-single,pins = <
+ 0x0bc MUX_M2 /* UART2_CTS_N */
+ 0x0c0 MUX_M2 /* UART2_RTS_N */
+ 0x0c8 MUX_M2 /* UART2_RXD */
+ 0x0c4 MUX_M2 /* UART2_TXD */
+ >;
+ };
+
+ uart3_pmx_func: uart3_pmx_func {
+ pinctrl-single,pins = <
+ 0x0dc MUX_M1 /* UART3_CTS_N */
+ 0x0e0 MUX_M1 /* UART3_RTS_N */
+ 0x0e4 MUX_M1 /* UART3_RXD */
+ 0x0e8 MUX_M1 /* UART3_TXD */
+ >;
+ };
+
+ uart4_pmx_func: uart4_pmx_func {
+ pinctrl-single,pins = <
+ 0x0ec MUX_M1 /* UART4_CTS_N */
+ 0x0f0 MUX_M1 /* UART4_RTS_N */
+ 0x0f4 MUX_M1 /* UART4_RXD */
+ 0x0f8 MUX_M1 /* UART4_TXD */
+ >;
+ };
+
+ uart5_pmx_func: uart5_pmx_func {
+ pinctrl-single,pins = <
+ 0x0c4 MUX_M3 /* UART5_CTS_N */
+ 0x0c8 MUX_M3 /* UART5_RTS_N */
+ 0x0bc MUX_M3 /* UART5_RXD */
+ 0x0c0 MUX_M3 /* UART5_TXD */
+ >;
+ };
+
+ uart6_pmx_func: uart6_pmx_func {
+ pinctrl-single,pins = <
+ 0x0cc MUX_M1 /* UART6_CTS_N */
+ 0x0d0 MUX_M1 /* UART6_RTS_N */
+ 0x0d4 MUX_M1 /* UART6_RXD */
+ 0x0d8 MUX_M1 /* UART6_TXD */
+ >;
+ };
+ };
+
+ /* [IOMG_MMC0_000, IOMG_MMC0_005] */
+ pmx1: pinmux@ff37e000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff37e000 0x0 0x18>;
+ #gpio-range-cells = <0x3>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 6 0>;
+
+ sd_pmx_func: sd_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SD_CLK */
+ 0x004 MUX_M1 /* SD_CMD */
+ 0x008 MUX_M1 /* SD_DATA0 */
+ 0x00c MUX_M1 /* SD_DATA1 */
+ 0x010 MUX_M1 /* SD_DATA2 */
+ 0x014 MUX_M1 /* SD_DATA3 */
+ >;
+ };
+ };
+
+ /* [IOMG_FIX_000, IOMG_FIX_011] */
+ pmx2: pinmux@ff3b6000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff3b6000 0x0 0x30>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 12 0>;
+
+ spi3_pmx_func: spi3_pmx_func {
+ pinctrl-single,pins = <
+ 0x008 MUX_M1 /* SPI3_CLK */
+ 0x00c MUX_M1 /* SPI3_DI */
+ 0x010 MUX_M1 /* SPI3_DO */
+ 0x014 MUX_M1 /* SPI3_CS0_N */
+ >;
+ };
+ };
+
+ /* [IOMG_MMC1_000, IOMG_MMC1_005] */
+ pmx3: pinmux@ff3fd000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xff3fd000 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 6 0>;
+
+ sdio_pmx_func: sdio_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SDIO_CLK */
+ 0x004 MUX_M1 /* SDIO_CMD */
+ 0x008 MUX_M1 /* SDIO_DATA0 */
+ 0x00c MUX_M1 /* SDIO_DATA1 */
+ 0x010 MUX_M1 /* SDIO_DATA2 */
+ 0x014 MUX_M1 /* SDIO_DATA3 */
+ >;
+ };
+ };
+
+ /* [IOMG_AO_000, IOMG_AO_041] */
+ pmx4: pinmux@fff11000 {
+ compatible = "pinctrl-single";
+ reg = <0x0 0xfff11000 0x0 0xa8>;
+ #pinctrl-cells = <1>;
+ #gpio-range-cells = <0x3>;
+ pinctrl-single,register-width = <0x20>;
+ pinctrl-single,function-mask = <0x7>;
+ /* pin base in node, nr pins & gpio function */
+ pinctrl-single,gpio-range = <&range 0 42 0>;
+
+ i2s2_pmx_func: i2s2_pmx_func {
+ pinctrl-single,pins = <
+ 0x044 MUX_M1 /* I2S2_DI */
+ 0x048 MUX_M1 /* I2S2_DO */
+ 0x04c MUX_M1 /* I2S2_XCLK */
+ 0x050 MUX_M1 /* I2S2_XFS */
+ >;
+ };
+
+ slimbus_pmx_func: slimbus_pmx_func {
+ pinctrl-single,pins = <
+ 0x02c MUX_M1 /* SLIMBUS_CLK */
+ 0x030 MUX_M1 /* SLIMBUS_DATA */
+ >;
+ };
+
+ i2c0_pmx_func: i2c0_pmx_func {
+ pinctrl-single,pins = <
+ 0x014 MUX_M1 /* I2C0_SCL */
+ 0x018 MUX_M1 /* I2C0_SDA */
+ >;
+ };
+
+ i2c1_pmx_func: i2c1_pmx_func {
+ pinctrl-single,pins = <
+ 0x01c MUX_M1 /* I2C1_SCL */
+ 0x020 MUX_M1 /* I2C1_SDA */
+ >;
+ };
+
+ i2c2_pmx_func: i2c2_pmx_func {
+ pinctrl-single,pins = <
+ 0x024 MUX_M1 /* I2C2_SCL */
+ 0x028 MUX_M1 /* I2C2_SDA */
+ >;
+ };
+
+ i2c7_pmx_func: i2c7_pmx_func {
+ pinctrl-single,pins = <
+ 0x024 MUX_M3 /* I2C7_SCL */
+ 0x028 MUX_M3 /* I2C7_SDA */
+ >;
+ };
+
+ spi2_pmx_func: spi2_pmx_func {
+ pinctrl-single,pins = <
+ 0x08c MUX_M1 /* SPI2_CLK */
+ 0x090 MUX_M1 /* SPI2_DI */
+ 0x094 MUX_M1 /* SPI2_DO */
+ 0x098 MUX_M1 /* SPI2_CS0_N */
+ >;
+ };
+
+ spi4_pmx_func: spi4_pmx_func {
+ pinctrl-single,pins = <
+ 0x08c MUX_M4 /* SPI4_CLK */
+ 0x090 MUX_M4 /* SPI4_DI */
+ 0x094 MUX_M4 /* SPI4_DO */
+ 0x098 MUX_M4 /* SPI4_CS0_N */
+ >;
+ };
+
+ i2s0_pmx_func: i2s0_pmx_func {
+ pinctrl-single,pins = <
+ 0x034 MUX_M1 /* I2S0_DI */
+ 0x038 MUX_M1 /* I2S0_DO */
+ 0x03c MUX_M1 /* I2S0_XCLK */
+ 0x040 MUX_M1 /* I2S0_XFS */
+ >;
+ };
+ };
+
+ pmx5: pinmux@ff3fd800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xff3fd800 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pinctrl-single,register-width = <32>;
+
+ sdio_clk_cfg_func: sdio_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SDIO_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA
+ DRIVE6_MASK
+ >;
+ };
+
+ sdio_cfg_func: sdio_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SDIO_CMD */
+ 0x008 0x0 /* SDIO_DATA0 */
+ 0x00c 0x0 /* SDIO_DATA1 */
+ 0x010 0x0 /* SDIO_DATA2 */
+ 0x014 0x0 /* SDIO_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA
+ DRIVE6_MASK
+ >;
+ };
+ };
+
+ pmx6: pinmux@ff37e800 {
+ compatible = "pinconf-single";
+ reg = <0x0 0xff37e800 0x0 0x18>;
+ #pinctrl-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pinctrl-single,register-width = <32>;
+
+ sd_clk_cfg_func: sd_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SD_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA
+ DRIVE6_MASK
+ >;
+ };
+
+ sd_cfg_func: sd_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SD_CMD */
+ 0x008 0x0 /* SD_DATA0 */
+ 0x00c 0x0 /* SD_DATA1 */
+ 0x010 0x0 /* SD_DATA2 */
+ 0x014 0x0 /* SD_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA
+ DRIVE6_MASK
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
index e05844230583..f5d7f0889b41 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
@@ -64,3 +64,23 @@
&usb_ehci {
status = "ok";
};
+
+&eth0 {
+ status = "ok";
+};
+
+&eth1 {
+ status = "ok";
+};
+
+&eth2 {
+ status = "ok";
+};
+
+&eth3 {
+ status = "ok";
+};
+
+&sas1 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
index 5144eb1c179d..283d7b532e16 100644
--- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -1014,6 +1014,34 @@
compatible = "hisilicon,mbigen-v2";
reg = <0x0 0xa0080000 0x0 0x10000>;
+ mbigen_pcie2_a: intc_pcie2_a {
+ msi-parent = <&p0_its_dsa_a 0x40087>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <10>;
+ };
+
+ mbigen_sas1: intc_sas1 {
+ msi-parent = <&p0_its_dsa_a 0x40000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
+
+ mbigen_sas2: intc_sas2 {
+ msi-parent = <&p0_its_dsa_a 0x40040>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
+
+ mbigen_smmu_pcie: intc_smmu_pcie {
+ msi-parent = <&p0_its_dsa_a 0x40b0c>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+
mbigen_usb: intc_usb {
msi-parent = <&p0_its_dsa_a 0x40080>;
interrupt-controller;
@@ -1022,6 +1050,39 @@
};
};
+ p0_mbigen_dsa_a: interrupt-controller@c0080000 {
+ compatible = "hisilicon,mbigen-v2";
+ reg = <0x0 0xc0080000 0x0 0x10000>;
+
+ mbigen_dsaf0: intc_dsaf0 {
+ msi-parent = <&p0_its_dsa_a 0x40800>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <409>;
+ };
+
+ mbigen_dsa_roce: intc-roce {
+ msi-parent = <&p0_its_dsa_a 0x40B1E>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <34>;
+ };
+
+ mbigen_sas0: intc-sas0 {
+ msi-parent = <&p0_its_dsa_a 0x40900>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <128>;
+ };
+
+ mbigen_smmu_dsa: intc_smmu_dsa {
+ msi-parent = <&p0_its_dsa_a 0x40b20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num-pins = <3>;
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -1055,5 +1116,423 @@
dma-coherent;
status = "disabled";
};
+
+ peri_c_subctrl: sub_ctrl_c@60000000 {
+ compatible = "hisilicon,peri-subctrl","syscon";
+ reg = <0 0x60000000 0x0 0x10000>;
+ };
+
+ dsa_subctrl: dsa_subctrl@c0000000 {
+ compatible = "hisilicon,dsa-subctrl", "syscon";
+ reg = <0x0 0xc0000000 0x0 0x10000>;
+ };
+
+ pcie_subctl: pcie_subctl@a0000000 {
+ compatible = "hisilicon,pcie-sas-subctrl", "syscon";
+ reg = <0x0 0xa0000000 0x0 0x10000>;
+ };
+
+ serdes_ctrl: sds_ctrl@c2200000 {
+ compatible = "syscon";
+ reg = <0 0xc2200000 0x0 0x80000>;
+ };
+
+ mdio@603c0000 {
+ compatible = "hisilicon,hns-mdio";
+ reg = <0x0 0x603c0000 0x0 0x1000>;
+ subctrl-vbase = <&peri_c_subctrl 0x338 0xa38
+ 0x531c 0x5a1c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+
+ dsaf0: dsa@c7000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "hisilicon,hns-dsaf-v2";
+ mode = "6port-16rss";
+ reg = <0x0 0xc5000000 0x0 0x890000
+ 0x0 0xc7000000 0x0 0x600000>;
+ reg-names = "ppe-base", "dsaf-base";
+ interrupt-parent = <&mbigen_dsaf0>;
+ subctrl-syscon = <&dsa_subctrl>;
+ reset-field-offset = <0>;
+ interrupts =
+ <576 1>, <577 1>, <578 1>, <579 1>, <580 1>,
+ <581 1>, <582 1>, <583 1>, <584 1>, <585 1>,
+ <586 1>, <587 1>, <588 1>, <589 1>, <590 1>,
+ <591 1>, <592 1>, <593 1>, <594 1>, <595 1>,
+ <596 1>, <597 1>, <598 1>, <599 1>, <600 1>,
+ <960 1>, <961 1>, <962 1>, <963 1>, <964 1>,
+ <965 1>, <966 1>, <967 1>, <968 1>, <969 1>,
+ <970 1>, <971 1>, <972 1>, <973 1>, <974 1>,
+ <975 1>, <976 1>, <977 1>, <978 1>, <979 1>,
+ <980 1>, <981 1>, <982 1>, <983 1>, <984 1>,
+ <985 1>, <986 1>, <987 1>, <988 1>, <989 1>,
+ <990 1>, <991 1>, <992 1>, <993 1>, <994 1>,
+ <995 1>, <996 1>, <997 1>, <998 1>, <999 1>,
+ <1000 1>, <1001 1>, <1002 1>, <1003 1>, <1004 1>,
+ <1005 1>, <1006 1>, <1007 1>, <1008 1>, <1009 1>,
+ <1010 1>, <1011 1>, <1012 1>, <1013 1>, <1014 1>,
+ <1015 1>, <1016 1>, <1017 1>, <1018 1>, <1019 1>,
+ <1020 1>, <1021 1>, <1022 1>, <1023 1>, <1024 1>,
+ <1025 1>, <1026 1>, <1027 1>, <1028 1>, <1029 1>,
+ <1030 1>, <1031 1>, <1032 1>, <1033 1>, <1034 1>,
+ <1035 1>, <1036 1>, <1037 1>, <1038 1>, <1039 1>,
+ <1040 1>, <1041 1>, <1042 1>, <1043 1>, <1044 1>,
+ <1045 1>, <1046 1>, <1047 1>, <1048 1>, <1049 1>,
+ <1050 1>, <1051 1>, <1052 1>, <1053 1>, <1054 1>,
+ <1055 1>, <1056 1>, <1057 1>, <1058 1>, <1059 1>,
+ <1060 1>, <1061 1>, <1062 1>, <1063 1>, <1064 1>,
+ <1065 1>, <1066 1>, <1067 1>, <1068 1>, <1069 1>,
+ <1070 1>, <1071 1>, <1072 1>, <1073 1>, <1074 1>,
+ <1075 1>, <1076 1>, <1077 1>, <1078 1>, <1079 1>,
+ <1080 1>, <1081 1>, <1082 1>, <1083 1>, <1084 1>,
+ <1085 1>, <1086 1>, <1087 1>, <1088 1>, <1089 1>,
+ <1090 1>, <1091 1>, <1092 1>, <1093 1>, <1094 1>,
+ <1095 1>, <1096 1>, <1097 1>, <1098 1>, <1099 1>,
+ <1100 1>, <1101 1>, <1102 1>, <1103 1>, <1104 1>,
+ <1105 1>, <1106 1>, <1107 1>, <1108 1>, <1109 1>,
+ <1110 1>, <1111 1>, <1112 1>, <1113 1>, <1114 1>,
+ <1115 1>, <1116 1>, <1117 1>, <1118 1>, <1119 1>,
+ <1120 1>, <1121 1>, <1122 1>, <1123 1>, <1124 1>,
+ <1125 1>, <1126 1>, <1127 1>, <1128 1>, <1129 1>,
+ <1130 1>, <1131 1>, <1132 1>, <1133 1>, <1134 1>,
+ <1135 1>, <1136 1>, <1137 1>, <1138 1>, <1139 1>,
+ <1140 1>, <1141 1>, <1142 1>, <1143 1>, <1144 1>,
+ <1145 1>, <1146 1>, <1147 1>, <1148 1>, <1149 1>,
+ <1150 1>, <1151 1>, <1152 1>, <1153 1>, <1154 1>,
+ <1155 1>, <1156 1>, <1157 1>, <1158 1>, <1159 1>,
+ <1160 1>, <1161 1>, <1162 1>, <1163 1>, <1164 1>,
+ <1165 1>, <1166 1>, <1167 1>, <1168 1>, <1169 1>,
+ <1170 1>, <1171 1>, <1172 1>, <1173 1>, <1174 1>,
+ <1175 1>, <1176 1>, <1177 1>, <1178 1>, <1179 1>,
+ <1180 1>, <1181 1>, <1182 1>, <1183 1>, <1184 1>,
+ <1185 1>, <1186 1>, <1187 1>, <1188 1>, <1189 1>,
+ <1190 1>, <1191 1>, <1192 1>, <1193 1>, <1194 1>,
+ <1195 1>, <1196 1>, <1197 1>, <1198 1>, <1199 1>,
+ <1200 1>, <1201 1>, <1202 1>, <1203 1>, <1204 1>,
+ <1205 1>, <1206 1>, <1207 1>, <1208 1>, <1209 1>,
+ <1210 1>, <1211 1>, <1212 1>, <1213 1>, <1214 1>,
+ <1215 1>, <1216 1>, <1217 1>, <1218 1>, <1219 1>,
+ <1220 1>, <1221 1>, <1222 1>, <1223 1>, <1224 1>,
+ <1225 1>, <1226 1>, <1227 1>, <1228 1>, <1229 1>,
+ <1230 1>, <1231 1>, <1232 1>, <1233 1>, <1234 1>,
+ <1235 1>, <1236 1>, <1237 1>, <1238 1>, <1239 1>,
+ <1240 1>, <1241 1>, <1242 1>, <1243 1>, <1244 1>,
+ <1245 1>, <1246 1>, <1247 1>, <1248 1>, <1249 1>,
+ <1250 1>, <1251 1>, <1252 1>, <1253 1>, <1254 1>,
+ <1255 1>, <1256 1>, <1257 1>, <1258 1>, <1259 1>,
+ <1260 1>, <1261 1>, <1262 1>, <1263 1>, <1264 1>,
+ <1265 1>, <1266 1>, <1267 1>, <1268 1>, <1269 1>,
+ <1270 1>, <1271 1>, <1272 1>, <1273 1>, <1274 1>,
+ <1275 1>, <1276 1>, <1277 1>, <1278 1>, <1279 1>,
+ <1280 1>, <1281 1>, <1282 1>, <1283 1>, <1284 1>,
+ <1285 1>, <1286 1>, <1287 1>, <1288 1>, <1289 1>,
+ <1290 1>, <1291 1>, <1292 1>, <1293 1>, <1294 1>,
+ <1295 1>, <1296 1>, <1297 1>, <1298 1>, <1299 1>,
+ <1300 1>, <1301 1>, <1302 1>, <1303 1>, <1304 1>,
+ <1305 1>, <1306 1>, <1307 1>, <1308 1>, <1309 1>,
+ <1310 1>, <1311 1>, <1312 1>, <1313 1>, <1314 1>,
+ <1315 1>, <1316 1>, <1317 1>, <1318 1>, <1319 1>,
+ <1320 1>, <1321 1>, <1322 1>, <1323 1>, <1324 1>,
+ <1325 1>, <1326 1>, <1327 1>, <1328 1>, <1329 1>,
+ <1330 1>, <1331 1>, <1332 1>, <1333 1>, <1334 1>,
+ <1335 1>, <1336 1>, <1337 1>, <1338 1>, <1339 1>,
+ <1340 1>, <1341 1>, <1342 1>, <1343 1>;
+
+ desc-num = <0x400>;
+ buf-size = <0x1000>;
+ dma-coherent;
+
+ port@0 {
+ reg = <0>;
+ serdes-syscon = <&serdes_ctrl>;
+ port-rst-offset = <0>;
+ port-mode-offset = <0>;
+ mc-mac-mask = [ff f0 00 00 00 00];
+ media-type = "fiber";
+ };
+
+ port@1 {
+ reg = <1>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <1>;
+ port-mode-offset = <1>;
+ mc-mac-mask = [ff f0 00 00 00 00];
+ media-type = "fiber";
+ };
+
+ port@4 {
+ reg = <4>;
+ phy-handle = <&phy0>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <4>;
+ port-mode-offset = <2>;
+ mc-mac-mask = [ff f0 00 00 00 00];
+ media-type = "copper";
+ };
+
+ port@5 {
+ reg = <5>;
+ phy-handle = <&phy1>;
+ serdes-syscon= <&serdes_ctrl>;
+ port-rst-offset = <5>;
+ port-mode-offset = <3>;
+ mc-mac-mask = [ff f0 00 00 00 00];
+ media-type = "copper";
+ };
+ };
+
+ eth0: ethernet@4{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <4>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth1: ethernet@5{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <5>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth2: ethernet@0{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <0>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ eth3: ethernet@1{
+ compatible = "hisilicon,hns-nic-v2";
+ ae-handle = <&dsaf0>;
+ port-idx-in-ae = <1>;
+ local-mac-address = [00 00 00 00 00 00];
+ status = "disabled";
+ dma-coherent;
+ };
+
+ infiniband@c4000000 {
+ compatible = "hisilicon,hns-roce-v1";
+ reg = <0x0 0xc4000000 0x0 0x100000>;
+ dma-coherent;
+ eth-handle = <&eth2 &eth3 0 0 &eth0 &eth1>;
+ dsaf-handle = <&dsaf0>;
+ node-guid = [00 9A CD 00 00 01 02 03];
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&mbigen_dsa_roce>;
+ interrupts = <722 1>,
+ <723 1>,
+ <724 1>,
+ <725 1>,
+ <726 1>,
+ <727 1>,
+ <728 1>,
+ <729 1>,
+ <730 1>,
+ <731 1>,
+ <732 1>,
+ <733 1>,
+ <734 1>,
+ <735 1>,
+ <736 1>,
+ <737 1>,
+ <738 1>,
+ <739 1>,
+ <740 1>,
+ <741 1>,
+ <742 1>,
+ <743 1>,
+ <744 1>,
+ <745 1>,
+ <746 1>,
+ <747 1>,
+ <748 1>,
+ <749 1>,
+ <750 1>,
+ <751 1>,
+ <752 1>,
+ <753 1>,
+ <785 1>,
+ <754 4>;
+
+ interrupt-names = "hns-roce-comp-0",
+ "hns-roce-comp-1",
+ "hns-roce-comp-2",
+ "hns-roce-comp-3",
+ "hns-roce-comp-4",
+ "hns-roce-comp-5",
+ "hns-roce-comp-6",
+ "hns-roce-comp-7",
+ "hns-roce-comp-8",
+ "hns-roce-comp-9",
+ "hns-roce-comp-10",
+ "hns-roce-comp-11",
+ "hns-roce-comp-12",
+ "hns-roce-comp-13",
+ "hns-roce-comp-14",
+ "hns-roce-comp-15",
+ "hns-roce-comp-16",
+ "hns-roce-comp-17",
+ "hns-roce-comp-18",
+ "hns-roce-comp-19",
+ "hns-roce-comp-20",
+ "hns-roce-comp-21",
+ "hns-roce-comp-22",
+ "hns-roce-comp-23",
+ "hns-roce-comp-24",
+ "hns-roce-comp-25",
+ "hns-roce-comp-26",
+ "hns-roce-comp-27",
+ "hns-roce-comp-28",
+ "hns-roce-comp-29",
+ "hns-roce-comp-30",
+ "hns-roce-comp-31",
+ "hns-roce-async",
+ "hns-roce-common";
+ };
+
+ sas0: sas@c3000000 {
+ compatible = "hisilicon,hip07-sas-v2";
+ reg = <0 0xc3000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&dsa_subctrl>;
+ ctrl-reset-reg = <0xa60>;
+ ctrl-reset-sts-reg = <0x5a30>;
+ ctrl-clock-ena-reg = <0x338>;
+ queue-count = <16>;
+ phy-count = <8>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas0>;
+ interrupts = <64 4>,<65 4>,<66 4>,<67 4>,<68 4>,
+ <69 4>,<70 4>,<71 4>,<72 4>,<73 4>,
+ <74 4>,<75 4>,<76 4>,<77 4>,<78 4>,
+ <79 4>,<80 4>,<81 4>,<82 4>,<83 4>,
+ <84 4>,<85 4>,<86 4>,<87 4>,<88 4>,
+ <89 4>,<90 4>,<91 4>,<92 4>,<93 4>,
+ <94 4>,<95 4>,<96 4>,<97 4>,<98 4>,
+ <99 4>,<100 4>,<101 4>,<102 4>,<103 4>,
+ <104 4>,<105 4>,<106 4>,<107 4>,<108 4>,
+ <109 4>,<110 4>,<111 4>,<112 4>,<113 4>,
+ <114 4>,<115 4>,<116 4>,<117 4>,<118 4>,
+ <119 4>,<120 4>,<121 4>,<122 4>,<123 4>,
+ <124 4>,<125 4>,<126 4>,<127 4>,<128 4>,
+ <129 4>,<130 4>,<131 4>,<132 4>,<133 4>,
+ <134 4>,<135 4>,<136 4>,<137 4>,<138 4>,
+ <139 4>,<140 4>,<141 4>,<142 4>,<143 4>,
+ <144 4>,<145 4>,<146 4>,<147 4>,<148 4>,
+ <149 4>,<150 4>,<151 4>,<152 4>,<153 4>,
+ <154 4>,<155 4>,<156 4>,<157 4>,<158 4>,
+ <159 4>,<601 1>,<602 1>,<603 1>,<604 1>,
+ <605 1>,<606 1>,<607 1>,<608 1>,<609 1>,
+ <610 1>,<611 1>,<612 1>,<613 1>,<614 1>,
+ <615 1>,<616 1>,<617 1>,<618 1>,<619 1>,
+ <620 1>,<621 1>,<622 1>,<623 1>,<624 1>,
+ <625 1>,<626 1>,<627 1>,<628 1>,<629 1>,
+ <630 1>,<631 1>,<632 1>;
+ status = "disabled";
+ };
+
+ sas1: sas@a2000000 {
+ compatible = "hisilicon,hip07-sas-v2";
+ reg = <0 0xa2000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&pcie_subctl>;
+ hip06-sas-v2-quirk-amt;
+ ctrl-reset-reg = <0xa18>;
+ ctrl-reset-sts-reg = <0x5a0c>;
+ ctrl-clock-ena-reg = <0x318>;
+ queue-count = <16>;
+ phy-count = <8>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas1>;
+ interrupts = <64 4>,<65 4>,<66 4>,<67 4>,<68 4>,
+ <69 4>,<70 4>,<71 4>,<72 4>,<73 4>,
+ <74 4>,<75 4>,<76 4>,<77 4>,<78 4>,
+ <79 4>,<80 4>,<81 4>,<82 4>,<83 4>,
+ <84 4>,<85 4>,<86 4>,<87 4>,<88 4>,
+ <89 4>,<90 4>,<91 4>,<92 4>,<93 4>,
+ <94 4>,<95 4>,<96 4>,<97 4>,<98 4>,
+ <99 4>,<100 4>,<101 4>,<102 4>,<103 4>,
+ <104 4>,<105 4>,<106 4>,<107 4>,<108 4>,
+ <109 4>,<110 4>,<111 4>,<112 4>,<113 4>,
+ <114 4>,<115 4>,<116 4>,<117 4>,<118 4>,
+ <119 4>,<120 4>,<121 4>,<122 4>,<123 4>,
+ <124 4>,<125 4>,<126 4>,<127 4>,<128 4>,
+ <129 4>,<130 4>,<131 4>,<132 4>,<133 4>,
+ <134 4>,<135 4>,<136 4>,<137 4>,<138 4>,
+ <139 4>,<140 4>,<141 4>,<142 4>,<143 4>,
+ <144 4>,<145 4>,<146 4>,<147 4>,<148 4>,
+ <149 4>,<150 4>,<151 4>,<152 4>,<153 4>,
+ <154 4>,<155 4>,<156 4>,<157 4>,<158 4>,
+ <159 4>,<576 1>,<577 1>,<578 1>,<579 1>,
+ <580 1>,<581 1>,<582 1>,<583 1>,<584 1>,
+ <585 1>,<586 1>,<587 1>,<588 1>,<589 1>,
+ <590 1>,<591 1>,<592 1>,<593 1>,<594 1>,
+ <595 1>,<596 1>,<597 1>,<598 1>,<599 1>,
+ <600 1>,<601 1>,<602 1>,<603 1>,<604 1>,
+ <605 1>,<606 1>,<607 1>;
+ status = "disabled";
+ };
+
+ sas2: sas@a3000000 {
+ compatible = "hisilicon,hip07-sas-v2";
+ reg = <0 0xa3000000 0 0x10000>;
+ sas-addr = [50 01 88 20 16 00 00 00];
+ hisilicon,sas-syscon = <&pcie_subctl>;
+ ctrl-reset-reg = <0xae0>;
+ ctrl-reset-sts-reg = <0x5a70>;
+ ctrl-clock-ena-reg = <0x3a8>;
+ queue-count = <16>;
+ phy-count = <9>;
+ dma-coherent;
+ interrupt-parent = <&mbigen_sas2>;
+ interrupts = <192 4>,<193 4>,<194 4>,<195 4>,<196 4>,
+ <197 4>,<198 4>,<199 4>,<200 4>,<201 4>,
+ <202 4>,<203 4>,<204 4>,<205 4>,<206 4>,
+ <207 4>,<208 4>,<209 4>,<210 4>,<211 4>,
+ <212 4>,<213 4>,<214 4>,<215 4>,<216 4>,
+ <217 4>,<218 4>,<219 4>,<220 4>,<221 4>,
+ <222 4>,<223 4>,<224 4>,<225 4>,<226 4>,
+ <227 4>,<228 4>,<229 4>,<230 4>,<231 4>,
+ <232 4>,<233 4>,<234 4>,<235 4>,<236 4>,
+ <237 4>,<238 4>,<239 4>,<240 4>,<241 4>,
+ <242 4>,<243 4>,<244 4>,<245 4>,<246 4>,
+ <247 4>,<248 4>,<249 4>,<250 4>,<251 4>,
+ <252 4>,<253 4>,<254 4>,<255 4>,<256 4>,
+ <257 4>,<258 4>,<259 4>,<260 4>,<261 4>,
+ <262 4>,<263 4>,<264 4>,<265 4>,<266 4>,
+ <267 4>,<268 4>,<269 4>,<270 4>,<271 4>,
+ <272 4>,<273 4>,<274 4>,<275 4>,<276 4>,
+ <277 4>,<278 4>,<279 4>,<280 4>,<281 4>,
+ <282 4>,<283 4>,<284 4>,<285 4>,<286 4>,
+ <287 4>,<608 1>,<609 1>,<610 1>,<611 1>,
+ <612 1>,<613 1>,<614 1>,<615 1>,<616 1>,
+ <617 1>,<618 1>,<619 1>,<620 1>,<621 1>,
+ <622 1>,<623 1>,<624 1>,<625 1>,<626 1>,
+ <627 1>,<628 1>,<629 1>,<630 1>,<631 1>,
+ <632 1>,<633 1>,<634 1>,<635 1>,<636 1>,
+ <637 1>,<638 1>,<639 1>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/include/arm b/arch/arm64/boot/dts/include/arm
new file mode 120000
index 000000000000..cf63d80e2b93
--- /dev/null
+++ b/arch/arm64/boot/dts/include/arm
@@ -0,0 +1 @@
+../../../../arm/boot/dts \ No newline at end of file
diff --git a/arch/arm64/boot/dts/include/arm64 b/arch/arm64/boot/dts/include/arm64
new file mode 120000
index 000000000000..a96aa0ea9d8c
--- /dev/null
+++ b/arch/arm64/boot/dts/include/arm64
@@ -0,0 +1 @@
+.. \ No newline at end of file
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index 86602c907a61..cef5f976bc0f 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -46,6 +46,7 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "armada-372x.dtsi"
/ {
@@ -60,10 +61,49 @@
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
};
+
+ exp_usb3_vbus: usb3-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb3-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ regulator-always-on;
+ gpio = <&gpio_exp 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ usb3_phy: usb3-phy {
+ compatible = "usb-nop-xceiv";
+ vcc-supply = <&exp_usb3_vbus>;
+ };
};
&i2c0 {
status = "okay";
+
+ gpio_exp: pca9555@22 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ reg = <0x22>;
+ /*
+ * IO0_0: PWR_EN_USB2 IO1_0: PWR_EN_VTT
+ * IO0_1: PWR_EN_USB23 IO1_1: MPCIE_WDISABLE
+ * IO0_2: PWR_EN_SATA IO1_2: RGMII_DEV_RSTN
+ * IO0_3: PWR_EN_PCIE IO1_3: SGMII_DEV_RSTN
+ * IO0_4: PWR_EN_SD
+ * IO0_5: PWR_EN_EMMC
+ * IO0_6: PWR_EN_RGMII IO1_6: SATA_USB3.0_SEL
+ * IO0_7: PWR_EN_SGMII IO1_7: PWR_MCI_PS
+ */
+ };
+
+ rtc@68 {
+ /* PT7C4337A from pericom fully compatible with the ds1337 */
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
};
/* CON3 */
@@ -106,9 +146,19 @@
status = "okay";
};
+&sdhci0 {
+ non-removable;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ mmc-hs400-1_8v;
+ marvell,pad-type = "fixed-1-8v";
+ status = "okay";
+};
+
/* CON31 */
&usb3 {
status = "okay";
+ usb-phy = <&usb3_phy>;
};
/* CON17 (PCIe) / CON12 (mini-PCIe) */
@@ -116,6 +166,12 @@
status = "okay";
};
+/* CON27 */
+&usb2 {
+ status = "okay";
+};
+
+
&mdio {
status = "okay";
phy0: ethernet-phy@0 {
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index b48d668a6ab6..58ae9e095af2 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -112,6 +112,8 @@
i2c0: i2c@11000 {
compatible = "marvell,armada-3700-i2c";
reg = <0x11000 0x24>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&nb_periph_clk 10>;
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
mrvl,i2c-fast-mode;
@@ -121,6 +123,8 @@
i2c1: i2c@11080 {
compatible = "marvell,armada-3700-i2c";
reg = <0x11080 0x24>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clocks = <&nb_periph_clk 9>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
mrvl,i2c-fast-mode;
@@ -196,7 +200,15 @@
compatible = "marvell,armada3700-xhci",
"generic-xhci";
reg = <0x58000 0x4000>;
- interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sb_periph_clk 12>;
+ status = "disabled";
+ };
+
+ usb2: usb@5e000 {
+ compatible = "marvell,armada-3700-ehci";
+ reg = <0x5e000 0x2000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -213,6 +225,17 @@
};
};
+ sdhci0: sdhci@d8000 {
+ compatible = "marvell,armada-3700-sdhci",
+ "marvell,sdhci-xenon";
+ reg = <0xd8000 0x300
+ 0x17808 0x4>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&nb_periph_clk 0>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
sata: sata@e0000 {
compatible = "marvell,armada-3700-ahci";
reg = <0xe0000 0x2000>;
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index 070b589680c5..12442329b80f 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -146,3 +146,46 @@
&cpm_usb3_1 {
status = "okay";
};
+
+&ap_sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ no-1-8-v;
+ non-removable;
+};
+
+&cpm_sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ no-1-8-v;
+ non-removable;
+};
+
+&cpm_mdio {
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
+&cpm_ethernet {
+ status = "okay";
+};
+
+&cpm_eth1 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "sgmii";
+};
+
+&cpm_eth2 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+};
+
+&cpm_crypto {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-8020.dtsi b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
index 048e5cf5160e..7c08f1f28d9e 100644
--- a/arch/arm64/boot/dts/marvell/armada-8020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
@@ -54,3 +54,13 @@
compatible = "marvell,armada8020", "marvell,armada-ap806-dual",
"marvell,armada-ap806";
};
+
+/* The RTC requires external oscillator. But on Aramda 80x0, the RTC clock
+ * in CP master is not connected (by package) to the oscillator. So
+ * disable it. However, the RTC clock in CP slave is connected to the
+ * oscillator so this one is let enabled.
+ */
+
+&cpm_rtc {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index 6e6f182fb297..dc0d084005b2 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -124,6 +124,26 @@
status = "okay";
};
+&cpm_mdio {
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
+&cpm_ethernet {
+ status = "okay";
+};
+
+&cpm_eth2 {
+ status = "okay";
+ phy = <&phy1>;
+ phy-mode = "rgmii-id";
+};
+
+&cpm_crypto {
+ status = "okay";
+};
+
/* CON5 on CP1 expansion */
&cps_pcie2 {
status = "okay";
@@ -148,3 +168,15 @@
&cps_usb3_1 {
status = "okay";
};
+
+&ap_sdhci0 {
+ status = "okay";
+ bus-width = <4>;
+ non-removable;
+};
+
+&cpm_sdhci0 {
+ status = "okay";
+ bus-width = <8>;
+ non-removable;
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
index 9c1b28c47683..33813a75bc30 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
@@ -54,3 +54,12 @@
compatible = "marvell,armada8040", "marvell,armada-ap806-quad",
"marvell,armada-ap806";
};
+
+/* The RTC requires external oscillator. But on Aramda 80x0, the RTC clock
+ * in CP master is not connected (by package) to the oscillator. So
+ * disable it. However, the RTC clock in CP slave is connected to the
+ * oscillator so this one is let enabled.
+ */
+&cpm_rtc {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 5019c8f4acd0..fe41bf9c301e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -229,6 +229,17 @@
};
+ ap_sdhci0: sdhci@6e0000 {
+ compatible = "marvell,armada-ap806-sdhci";
+ reg = <0x6e0000 0x300>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "core";
+ clocks = <&ap_syscon 4>;
+ dma-coherent;
+ marvell,xenon-phy-slow-mode;
+ status = "disabled";
+ };
+
ap_syscon: system-controller@6f4000 {
compatible = "marvell,ap806-system-controller",
"syscon";
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 3a99c36433d6..ac8df5201cd6 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -59,6 +59,43 @@
interrupt-parent = <&gic>;
ranges = <0x0 0x0 0xf2000000 0x2000000>;
+ cpm_ethernet: ethernet@0 {
+ compatible = "marvell,armada-7k-pp22";
+ reg = <0x0 0x100000>, <0x129000 0xb000>;
+ clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>;
+ clock-names = "pp_clk", "gop_clk", "mg_clk";
+ status = "disabled";
+ dma-coherent;
+
+ cpm_eth0: eth0 {
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <0>;
+ gop-port-id = <0>;
+ status = "disabled";
+ };
+
+ cpm_eth1: eth1 {
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <1>;
+ gop-port-id = <2>;
+ status = "disabled";
+ };
+
+ cpm_eth2: eth2 {
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <2>;
+ gop-port-id = <3>;
+ status = "disabled";
+ };
+ };
+
+ cpm_mdio: mdio@12a200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0x12a200 0x10>;
+ };
+
cpm_syscon0: system-controller@440000 {
compatible = "marvell,cp110-system-controller0",
"syscon";
@@ -79,6 +116,13 @@
"cpm-usb3dev", "cpm-eip150", "cpm-eip197";
};
+ cpm_rtc: rtc@284000 {
+ compatible = "marvell,armada-8k-rtc";
+ reg = <0x284000 0x20>, <0x284080 0x24>;
+ reg-names = "rtc", "rtc-soc";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
cpm_sata0: sata@540000 {
compatible = "marvell,armada-8k-ahci",
"generic-ahci";
@@ -173,6 +217,32 @@
clocks = <&cpm_syscon0 1 25>;
status = "okay";
};
+
+ cpm_sdhci0: sdhci@780000 {
+ compatible = "marvell,armada-cp110-sdhci";
+ reg = <0x780000 0x300>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "core";
+ clocks = <&cpm_syscon0 1 4>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ cpm_crypto: crypto@800000 {
+ compatible = "inside-secure,safexcel-eip197";
+ reg = <0x800000 0x200000>;
+ interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
+ | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mem", "ring0", "ring1",
+ "ring2", "ring3", "eip";
+ clocks = <&cpm_syscon0 1 26>;
+ status = "disabled";
+ };
};
cpm_pcie0: pcie@f2600000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 9e09c4d3b6bd..7740a75a8230 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -59,6 +59,50 @@
interrupt-parent = <&gic>;
ranges = <0x0 0x0 0xf4000000 0x2000000>;
+ cps_rtc: rtc@284000 {
+ compatible = "marvell,armada-8k-rtc";
+ reg = <0x284000 0x20>, <0x284080 0x24>;
+ reg-names = "rtc", "rtc-soc";
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ cps_ethernet: ethernet@0 {
+ compatible = "marvell,armada-7k-pp22";
+ reg = <0x0 0x100000>, <0x129000 0xb000>;
+ clocks = <&cps_syscon0 1 3>, <&cps_syscon0 1 9>, <&cps_syscon0 1 5>;
+ clock-names = "pp_clk", "gop_clk", "mg_clk";
+ status = "disabled";
+ dma-coherent;
+
+ cps_eth0: eth0 {
+ interrupts = <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <0>;
+ gop-port-id = <0>;
+ status = "disabled";
+ };
+
+ cps_eth1: eth1 {
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <1>;
+ gop-port-id = <2>;
+ status = "disabled";
+ };
+
+ cps_eth2: eth2 {
+ interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ port-id = <2>;
+ gop-port-id = <3>;
+ status = "disabled";
+ };
+ };
+
+ cps_mdio: mdio@12a200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,orion-mdio";
+ reg = <0x12a200 0x10>;
+ };
+
cps_syscon0: system-controller@440000 {
compatible = "marvell,cp110-system-controller0",
"syscon";
@@ -173,6 +217,22 @@
clocks = <&cps_syscon0 1 25>;
status = "okay";
};
+
+ cps_crypto: crypto@800000 {
+ compatible = "inside-secure,safexcel-eip197";
+ reg = <0x800000 0x200000>;
+ interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
+ | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mem", "ring0", "ring1",
+ "ring2", "ring3", "eip";
+ clocks = <&cps_syscon0 1 26>;
+ status = "disabled";
+ };
};
cps_pcie0: pcie@f4600000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 3f3a46a4bd01..2b17936ac5be 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -224,7 +224,7 @@
};
flow-controller@60007000 {
- compatible = "nvidia,tegra124-flowctrl";
+ compatible = "nvidia,tegra132-flowctrl", "nvidia,tegra124-flowctrl";
reg = <0x0 0x60007000 0x0 0x1000>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 0d3c0996d832..8daadadec63a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -1,8 +1,99 @@
/dts-v1/;
+#include <dt-bindings/input/linux-event-codes.h>
+
#include "tegra186-p3310.dtsi"
/ {
model = "NVIDIA Tegra186 P2771-0000 Development Board";
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
+
+ i2c@3160000 {
+ power-monitor@42 {
+ compatible = "ti,ina3221";
+ reg = <0x42>;
+ };
+
+ power-monitor@43 {
+ compatible = "ti,ina3221";
+ reg = <0x43>;
+ };
+
+ exp1: gpio@74 {
+ compatible = "ti,tca9539";
+ reg = <0x74>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_MAIN_GPIO(Y, 0) GPIO_ACTIVE_LOW>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ exp2: gpio@77 {
+ compatible = "ti,tca9539";
+ reg = <0x77>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_MAIN_GPIO(Y, 6) GPIO_ACTIVE_LOW>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+
+ /* SDMMC1 (SD/MMC) */
+ sdhci@3400000 {
+ status = "okay";
+
+ vmmc-supply = <&vdd_sd>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ label = "Power";
+ gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 0)
+ GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ volume-up {
+ label = "Volume Up";
+ gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 1)
+ GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <10>;
+ };
+
+ volume-down {
+ label = "Volume Down";
+ gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 2)
+ GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <10>;
+ };
+ };
+
+ regulators {
+ vdd_sd: regulator@100 {
+ compatible = "regulator-fixed";
+ reg = <100>;
+
+ regulator-name = "SD_CARD_SW_PWR";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio TEGRA_MAIN_GPIO(P, 6) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_3v3_sys>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index 1abe2eceb3d1..cf84d7046ad5 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -1,11 +1,23 @@
#include "tegra186.dtsi"
+#include <dt-bindings/mfd/max77620.h>
+
/ {
model = "NVIDIA Tegra186 P3310 Processor Module";
compatible = "nvidia,p3310", "nvidia,tegra186";
aliases {
+ sdhci0 = "/sdhci@3460000";
+ sdhci1 = "/sdhci@3400000";
serial0 = &uarta;
+ i2c0 = "/bpmp/i2c";
+ i2c1 = "/i2c@3160000";
+ i2c2 = "/i2c@c240000";
+ i2c3 = "/i2c@3180000";
+ i2c4 = "/i2c@3190000";
+ i2c5 = "/i2c@31c0000";
+ i2c6 = "/i2c@c250000";
+ i2c7 = "/i2c@31e0000";
};
chosen {
@@ -18,14 +30,99 @@
reg = <0x0 0x80000000 0x2 0x00000000>;
};
+ ethernet@2490000 {
+ status = "okay";
+
+ phy-reset-gpios = <&gpio TEGRA_MAIN_GPIO(M, 4) GPIO_ACTIVE_LOW>;
+ phy-handle = <&phy>;
+ phy-mode = "rgmii";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+
serial@3100000 {
status = "okay";
};
+ i2c@3160000 {
+ status = "okay";
+
+ power-monitor@40 {
+ compatible = "ti,ina3221";
+ reg = <0x40>;
+ };
+
+ power-monitor@41 {
+ compatible = "ti,ina3221";
+ reg = <0x41>;
+ };
+ };
+
+ i2c@3180000 {
+ status = "okay";
+ };
+
+ i2c@3190000 {
+ status = "okay";
+ };
+
+ i2c@31c0000 {
+ status = "okay";
+ };
+
+ i2c@31e0000 {
+ status = "okay";
+ };
+
+ /* SDMMC1 (SD/MMC) */
+ sdhci@3400000 {
+ cd-gpios = <&gpio TEGRA_MAIN_GPIO(P, 5) GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio TEGRA_MAIN_GPIO(P, 4) GPIO_ACTIVE_LOW>;
+
+ vqmmc-supply = <&vddio_sdmmc1>;
+ };
+
+ /* SDMMC3 (SDIO) */
+ sdhci@3440000 {
+ status = "okay";
+ };
+
+ /* SDMMC4 (eMMC) */
+ sdhci@3460000 {
+ status = "okay";
+ bus-width = <8>;
+ non-removable;
+
+ vqmmc-supply = <&vdd_1v8_ap>;
+ vmmc-supply = <&vdd_3v3_sys>;
+ };
+
hsp@3c00000 {
status = "okay";
};
+ i2c@c240000 {
+ status = "okay";
+ };
+
+ i2c@c250000 {
+ status = "okay";
+ };
+
+ pmc@c360000 {
+ nvidia,invert-interrupt;
+ };
+
cpus {
cpu@0 {
enable-method = "psci";
@@ -53,7 +150,192 @@
};
bpmp {
- status = "okay";
+ i2c {
+ status = "okay";
+
+ pmic: pmic@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+
+ interrupts = <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77620_default>;
+
+ max77620_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ };
+
+ gpio1 {
+ pins = "gpio1";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ gpio2 {
+ pins = "gpio2";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ gpio3 {
+ pins = "gpio3";
+ function = "fps-out";
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ gpio4 {
+ pins = "gpio4";
+ function = "32k-out1";
+ drive-push-pull = <1>;
+ };
+
+ gpio5 {
+ pins = "gpio5";
+ function = "gpio";
+ drive-push-pull = <0>;
+ };
+
+ gpio6 {
+ pins = "gpio6";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+
+ gpio7 {
+ pins = "gpio7";
+ function = "gpio";
+ drive-push-pull = <0>;
+ };
+ };
+
+ fps {
+ fps0 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ };
+
+ fps1 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ };
+
+ fps2 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,shutdown-fps-time-period-us = <640>;
+ };
+ };
+
+ regulators {
+ in-sd0-supply = <&vdd_5v0_sys>;
+ in-sd1-supply = <&vdd_5v0_sys>;
+ in-sd2-supply = <&vdd_5v0_sys>;
+ in-sd3-supply = <&vdd_5v0_sys>;
+
+ in-ldo0-1-supply = <&vdd_5v0_sys>;
+ in-ldo2-supply = <&vdd_5v0_sys>;
+ in-ldo3-5-supply = <&vdd_5v0_sys>;
+ in-ldo4-6-supply = <&vdd_1v8>;
+ in-ldo7-8-supply = <&avdd_dsi_csi>;
+
+ sd0 {
+ regulator-name = "VDD_DDR_1V1_PMIC";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ avdd_dsi_csi: sd1 {
+ regulator-name = "AVDD_DSI_CSI_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8: sd2 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3_sys: sd3 {
+ regulator-name = "VDD_3V3_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo0 {
+ regulator-name = "VDD_1V8_AP_PLL";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo2 {
+ regulator-name = "VDDIO_3V3_AOHV";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vddio_sdmmc1: ldo3 {
+ regulator-name = "VDDIO_SDMMC1_AP";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo4 {
+ regulator-name = "VDD_RTC";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vddio_sdmmc3: ldo5 {
+ regulator-name = "VDDIO_SDMMC3_AP";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ avdd_1v05: ldo7 {
+ regulator-name = "VDD_HDMI_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_pex: ldo8 {
+ regulator-name = "VDD_PEX_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+ };
};
psci {
@@ -61,4 +343,39 @@
status = "okay";
method = "smc";
};
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd_5v0_sys: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+
+ regulator-name = "VDD_5V0_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8_ap: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+
+ regulator-name = "VDD_1V8_AP";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ /* XXX */
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pmic 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_1v8>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 62fa85ae0271..5e62e68ac053 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -2,6 +2,7 @@
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mailbox/tegra186-hsp.h>
+#include <dt-bindings/power/tegra186-powergate.h>
#include <dt-bindings/reset/tegra186-reset.h>
/ {
@@ -27,6 +28,37 @@
gpio-controller;
};
+ ethernet@2490000 {
+ compatible = "nvidia,tegra186-eqos",
+ "snps,dwc-qos-ethernet-4.10";
+ reg = <0x0 0x02490000 0x0 0x10000>;
+ interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, /* common */
+ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>, /* power */
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, /* rx0 */
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, /* tx0 */
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, /* rx1 */
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>, /* tx1 */
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, /* rx2 */
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* tx2 */
+ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>, /* rx3 */
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; /* tx3 */
+ clocks = <&bpmp TEGRA186_CLK_AXI_CBB>,
+ <&bpmp TEGRA186_CLK_EQOS_AXI>,
+ <&bpmp TEGRA186_CLK_EQOS_RX>,
+ <&bpmp TEGRA186_CLK_EQOS_TX>,
+ <&bpmp TEGRA186_CLK_EQOS_PTP_REF>;
+ clock-names = "master_bus", "slave_bus", "rx", "tx", "ptp_ref";
+ resets = <&bpmp TEGRA186_RESET_EQOS>;
+ reset-names = "eqos";
+ status = "disabled";
+
+ snps,write-requests = <1>;
+ snps,read-requests = <3>;
+ snps,burst-map = <0x7>;
+ snps,txpbl = <32>;
+ snps,rxpbl = <8>;
+ };
+
uarta: serial@3100000 {
compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
reg = <0x0 0x03100000 0x0 0x40>;
@@ -307,6 +339,33 @@
#interrupt-cells = <2>;
};
+ pmc@c360000 {
+ compatible = "nvidia,tegra186-pmc";
+ reg = <0 0x0c360000 0 0x10000>,
+ <0 0x0c370000 0 0x10000>,
+ <0 0x0c380000 0 0x10000>,
+ <0 0x0c390000 0 0x10000>;
+ reg-names = "pmc", "wake", "aotag", "scratch";
+ };
+
+ gpu@17000000 {
+ compatible = "nvidia,gp10b";
+ reg = <0x0 0x17000000 0x0 0x1000000>,
+ <0x0 0x18000000 0x0 0x1000000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "stall", "nonstall";
+
+ clocks = <&bpmp TEGRA186_CLK_GPCCLK>,
+ <&bpmp TEGRA186_CLK_GPU>;
+ clock-names = "gpu", "pwr";
+ resets = <&bpmp TEGRA186_RESET_GPU>;
+ reset-names = "gpu";
+ status = "disabled";
+
+ power-domains = <&bpmp TEGRA186_POWER_DOMAIN_GPU>;
+ };
+
sysram@30000000 {
compatible = "nvidia,tegra186-sysram", "mmio-sram";
reg = <0x0 0x30000000 0x0 0x50000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 2f832df29da8..8f26c4d4409a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -89,6 +89,8 @@
ranges = <0x0 0x54000000 0x0 0x54000000 0x0 0x01000000>;
+ iommus = <&mc TEGRA_SWGROUP_HC>;
+
dpaux1: dpaux@54040000 {
compatible = "nvidia,tegra210-dpaux";
reg = <0x0 0x54040000 0x0 0x00040000>;
@@ -185,7 +187,14 @@
vic@54340000 {
compatible = "nvidia,tegra210-vic";
reg = <0x0 0x54340000 0x0 0x00040000>;
- status = "disabled";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_VIC03>;
+ clock-names = "vic";
+ resets = <&tegra_car 178>;
+ reset-names = "vic";
+
+ iommus = <&mc TEGRA_SWGROUP_VIC>;
+ power-domains = <&pd_vic>;
};
nvjpg@54380000 {
@@ -755,6 +764,14 @@
resets = <&tegra_car TEGRA210_CLK_XUSB_HOST>;
#power-domain-cells = <0>;
};
+
+ pd_vic: vic {
+ clocks = <&tegra_car TEGRA210_CLK_VIC03>;
+ clock-names = "vic";
+ resets = <&tegra_car 178>;
+ reset-names = "vic";
+ #power-domain-cells = <0>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index eac5389f2f38..a17f5b9a5de6 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -35,6 +35,17 @@
stdout-path = "serial0";
};
+ reserved-memory {
+ ramoops@bff00000{
+ compatible = "ramoops";
+ reg = <0x0 0xbff00000 0x0 0x100000>;
+
+ record-size = <0x20000>;
+ console-size = <0x20000>;
+ ftrace-size = <0x20000>;
+ };
+ };
+
soc {
dma@7884000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 68a8e67cba29..ab3093995ded 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -157,7 +157,7 @@
};
pmu {
- compatible = "arm,armv8-pmuv3";
+ compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>;
};
@@ -833,8 +833,9 @@
clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
<&gcc GCC_MSS_Q6_BIMC_AXI_CLK>,
- <&gcc GCC_BOOT_ROM_AHB_CLK>;
- clock-names = "iface", "bus", "mem";
+ <&gcc GCC_BOOT_ROM_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "bus", "mem", "xo";
qcom,smem-states = <&hexagon_smp2p_out 0>;
qcom,smem-state-names = "stop";
@@ -842,6 +843,7 @@
resets = <&scm 0>;
reset-names = "mss_restart";
+ cx-supply = <&pm8916_s1>;
mx-supply = <&pm8916_l3>;
pll-supply = <&pm8916_l7>;
@@ -856,6 +858,16 @@
mpss {
memory-region = <&mpss_mem>;
};
+
+ smd-edge {
+ interrupts = <0 25 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,smd-edge = <0>;
+ qcom,ipc = <&apcs 8 12>;
+ qcom,remote-pid = <1>;
+
+ label = "hexagon";
+ };
};
pronto: wcnss@a21b000 {
@@ -1214,14 +1226,6 @@
};
};
};
-
- hexagon {
- interrupts = <0 25 IRQ_TYPE_EDGE_RISING>;
-
- qcom,smd-edge = <0>;
- qcom,ipc = <&apcs 8 12>;
- qcom,remote-pid = <1>;
- };
};
hexagon-smp2p {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index ed7223d3c8cb..9bc9c857a000 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -534,6 +534,26 @@
};
};
+ adsp-pil {
+ compatible = "qcom,msm8996-adsp-pil";
+
+ interrupts-extended = <&intc 0 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&xo_board>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_region>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+ };
+
adsp-smp2p {
compatible = "qcom,smp2p";
qcom,smem = <443>, <429>;
@@ -547,7 +567,7 @@
adsp_smp2p_out: master-kernel {
qcom,entry-name = "master-kernel";
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
adsp_smp2p_in: slave-kernel {
@@ -557,5 +577,29 @@
#interrupt-cells = <2>;
};
};
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+
+ interrupts = <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&apcs 16 26>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ slpi_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ slpi_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+ };
+
};
#include "msm8996-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index 0f1866024ae3..b413e44fd09e 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -9,6 +9,13 @@
#address-cells = <1>;
#size-cells = <0>;
+ rtc@6000 {
+ compatible = "qcom,pm8941-rtc";
+ reg = <0x6000>, <0x6100>;
+ reg-names = "rtc", "alarm";
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+ };
+
pm8994_gpios: gpios@c000 {
compatible = "qcom,pm8994-gpio";
reg = <0xc000>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
index c5f8f69a4f5f..ab352159de65 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -33,6 +33,21 @@
reg = <0x0 0x48000000 0x0 0x38000000>;
};
+ memory@500000000 {
+ device_type = "memory";
+ reg = <0x5 0x00000000 0x0 0x40000000>;
+ };
+
+ memory@600000000 {
+ device_type = "memory";
+ reg = <0x6 0x00000000 0x0 0x40000000>;
+ };
+
+ memory@700000000 {
+ device_type = "memory";
+ reg = <0x7 0x00000000 0x0 0x40000000>;
+ };
+
leds {
compatible = "gpio-leds";
@@ -213,7 +228,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&i2c2 {
@@ -339,18 +353,7 @@
status = "okay";
phy0: ethernet-phy@0 {
- rxc-skew-ps = <900>;
- rxdv-skew-ps = <0>;
- rxd0-skew-ps = <0>;
- rxd1-skew-ps = <0>;
- rxd2-skew-ps = <0>;
- rxd3-skew-ps = <0>;
- txc-skew-ps = <900>;
- txen-skew-ps = <0>;
- txd0-skew-ps = <0>;
- txd1-skew-ps = <0>;
- txd2-skew-ps = <0>;
- txd3-skew-ps = <0>;
+ rxc-skew-ps = <1500>;
reg = <0>;
interrupt-parent = <&gpio2>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index 7a8986edcdc0..639aa085d996 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -56,7 +56,7 @@
reg = <0x0 0x48000000 0x0 0x38000000>;
};
- x12_clk: x12_clk {
+ x12_clk: x12 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24576000>;
@@ -247,8 +247,22 @@
};
avb_pins: avb {
- groups = "avb_mdc";
- function = "avb";
+ mux {
+ groups = "avb_link", "avb_phy_int", "avb_mdc",
+ "avb_mii";
+ function = "avb";
+ };
+
+ pins_mdc {
+ groups = "avb_mdc";
+ drive-strength = <24>;
+ };
+
+ pins_mii_tx {
+ pins = "PIN_AVB_TX_CTL", "PIN_AVB_TXC", "PIN_AVB_TD0",
+ "PIN_AVB_TD1", "PIN_AVB_TD2", "PIN_AVB_TD3";
+ drive-strength = <12>;
+ };
};
du_pins: du {
@@ -348,7 +362,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&i2c2 {
@@ -485,6 +498,10 @@
clock-frequency = <22579200>;
};
+&i2c_dvfs {
+ status = "okay";
+};
+
&avb {
pinctrl-0 = <&avb_pins>;
pinctrl-names = "default";
@@ -493,18 +510,7 @@
status = "okay";
phy0: ethernet-phy@0 {
- rxc-skew-ps = <900>;
- rxdv-skew-ps = <0>;
- rxd0-skew-ps = <0>;
- rxd1-skew-ps = <0>;
- rxd2-skew-ps = <0>;
- rxd3-skew-ps = <0>;
- txc-skew-ps = <900>;
- txen-skew-ps = <0>;
- txd0-skew-ps = <0>;
- txd1-skew-ps = <0>;
- txd2-skew-ps = <0>;
- txd3-skew-ps = <0>;
+ rxc-skew-ps = <1500>;
reg = <0>;
interrupt-parent = <&gpio2>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
@@ -567,7 +573,6 @@
&pcie_bus_clk {
clock-frequency = <100000000>;
- status = "okay";
};
&pciec0 {
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index eac4f29aa5cd..e99d6443b3e4 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -25,10 +25,11 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ i2c7 = &i2c_dvfs;
};
psci {
- compatible = "arm,psci-0.2";
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
method = "smc";
};
@@ -72,17 +73,51 @@
enable-method = "psci";
};
- L2_CA57: cache-controller@0 {
+ a53_0: cpu@100 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x100>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7795_PD_CA53_CPU0>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_1: cpu@101 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x101>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7795_PD_CA53_CPU1>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_2: cpu@102 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x102>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7795_PD_CA53_CPU2>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_3: cpu@103 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x103>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7795_PD_CA53_CPU3>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ L2_CA57: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7795_PD_CA57_SCU>;
cache-unified;
cache-level = <2>;
};
- L2_CA53: cache-controller@100 {
+ L2_CA53: cache-controller-1 {
compatible = "cache";
- reg = <0x100>;
power-domains = <&sysc R8A7795_PD_CA53_SCU>;
cache-unified;
cache-level = <2>;
@@ -165,10 +200,11 @@
<0x0 0xf1040000 0 0x20000>,
<0x0 0xf1060000 0 0x20000>;
interrupts = <GIC_PPI 9
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&cpg CPG_MOD 408>;
clock-names = "clk";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
};
wdt0: watchdog@e6020000 {
@@ -176,6 +212,7 @@
reg = <0 0xe6020000 0 0x0c>;
clocks = <&cpg CPG_MOD 402>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 402>;
status = "disabled";
};
@@ -191,6 +228,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 912>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
};
gpio1: gpio@e6051000 {
@@ -205,6 +243,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 911>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
};
gpio2: gpio@e6052000 {
@@ -219,6 +258,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 910>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
};
gpio3: gpio@e6053000 {
@@ -233,6 +273,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 909>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
};
gpio4: gpio@e6054000 {
@@ -247,6 +288,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 908>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 908>;
};
gpio5: gpio@e6055000 {
@@ -261,6 +303,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 907>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 907>;
};
gpio6: gpio@e6055400 {
@@ -275,6 +318,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 906>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 906>;
};
gpio7: gpio@e6055800 {
@@ -289,6 +333,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 905>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 905>;
};
pmu_a57 {
@@ -303,16 +348,28 @@
<&a57_3>;
};
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&a53_0>,
+ <&a53_1>,
+ <&a53_2>,
+ <&a53_3>;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
cpg: clock-controller@e6150000 {
@@ -322,6 +379,7 @@
clock-names = "extal", "extalr";
#clock-cells = <2>;
#power-domain-cells = <0>;
+ #reset-cells = <1>;
};
rst: reset-controller@e6160000 {
@@ -358,6 +416,7 @@
GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 407>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
};
dmac0: dma-controller@e6700000 {
@@ -389,6 +448,7 @@
clocks = <&cpg CPG_MOD 219>;
clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -422,6 +482,7 @@
clocks = <&cpg CPG_MOD 218>;
clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -455,6 +516,7 @@
clocks = <&cpg CPG_MOD 217>;
clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 217>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -488,6 +550,7 @@
clocks = <&cpg CPG_MOD 502>;
clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 502>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -521,6 +584,7 @@
clocks = <&cpg CPG_MOD 501>;
clock-names = "fck";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 501>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -563,7 +627,8 @@
"ch24";
clocks = <&cpg CPG_MOD 812>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- phy-mode = "rgmii-id";
+ resets = <&cpg 812>;
+ phy-mode = "rgmii-txid";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -581,6 +646,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
status = "disabled";
};
@@ -596,6 +662,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
status = "disabled";
};
@@ -612,6 +679,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 914>;
status = "disabled";
channel0 {
@@ -636,6 +704,7 @@
dmas = <&dmac1 0x31>, <&dmac1 0x30>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 520>;
status = "disabled";
};
@@ -652,6 +721,7 @@
dmas = <&dmac1 0x33>, <&dmac1 0x32>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
status = "disabled";
};
@@ -668,6 +738,7 @@
dmas = <&dmac1 0x35>, <&dmac1 0x34>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 518>;
status = "disabled";
};
@@ -684,6 +755,7 @@
dmas = <&dmac0 0x37>, <&dmac0 0x36>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
status = "disabled";
};
@@ -700,6 +772,7 @@
dmas = <&dmac0 0x39>, <&dmac0 0x38>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
status = "disabled";
};
@@ -715,6 +788,7 @@
dmas = <&dmac1 0x51>, <&dmac1 0x50>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
status = "disabled";
};
@@ -730,6 +804,7 @@
dmas = <&dmac1 0x53>, <&dmac1 0x52>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
status = "disabled";
};
@@ -745,6 +820,7 @@
dmas = <&dmac1 0x13>, <&dmac1 0x12>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
status = "disabled";
};
@@ -760,6 +836,7 @@
dmas = <&dmac0 0x57>, <&dmac0 0x56>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
status = "disabled";
};
@@ -775,6 +852,7 @@
dmas = <&dmac0 0x59>, <&dmac0 0x58>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
status = "disabled";
};
@@ -790,6 +868,21 @@
dmas = <&dmac1 0x5b>, <&dmac1 0x5a>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
+ status = "disabled";
+ };
+
+ i2c_dvfs: i2c@e60b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-r8a7795",
+ "renesas,rcar-gen3-iic",
+ "renesas,rmobile-iic";
+ reg = <0 0xe60b0000 0 0x425>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 926>;
status = "disabled";
};
@@ -802,6 +895,7 @@
interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 931>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 931>;
dmas = <&dmac1 0x91>, <&dmac1 0x90>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -817,6 +911,7 @@
interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 930>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 930>;
dmas = <&dmac1 0x93>, <&dmac1 0x92>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
@@ -832,6 +927,7 @@
interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 929>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 929>;
dmas = <&dmac1 0x95>, <&dmac1 0x94>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
@@ -847,6 +943,7 @@
interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 928>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 928>;
dmas = <&dmac0 0x97>, <&dmac0 0x96>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -862,6 +959,7 @@
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 927>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 927>;
dmas = <&dmac0 0x99>, <&dmac0 0x98>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -877,6 +975,7 @@
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 919>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -892,6 +991,7 @@
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 918>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
@@ -903,6 +1003,7 @@
reg = <0 0xe6e30000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -912,6 +1013,7 @@
reg = <0 0xe6e31000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -921,6 +1023,7 @@
reg = <0 0xe6e32000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -930,6 +1033,7 @@
reg = <0 0xe6e33000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -939,6 +1043,7 @@
reg = <0 0xe6e34000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -948,6 +1053,7 @@
reg = <0 0xe6e35000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -957,6 +1063,7 @@
reg = <0 0xe6e36000 0 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
#pwm-cells = <2>;
status = "disabled";
};
@@ -1015,11 +1122,11 @@
rcar_sound,dvc {
dvc0: dvc-0 {
- dmas = <&audma0 0xbc>;
+ dmas = <&audma1 0xbc>;
dma-names = "tx";
};
dvc1: dvc-1 {
- dmas = <&audma0 0xbe>;
+ dmas = <&audma1 0xbe>;
dma-names = "tx";
};
};
@@ -1149,10 +1256,11 @@
sata: sata@ee300000 {
compatible = "renesas,sata-r8a7795";
- reg = <0 0xee300000 0 0x1fff>;
+ reg = <0 0xee300000 0 0x200000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 815>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 815>;
status = "disabled";
};
@@ -1162,6 +1270,7 @@
interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 328>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
status = "disabled";
};
@@ -1171,6 +1280,7 @@
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 327>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 327>;
status = "disabled";
};
@@ -1183,6 +1293,7 @@
interrupt-names = "ch0", "ch1";
clocks = <&cpg CPG_MOD 330>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 330>;
#dma-cells = <1>;
dma-channels = <2>;
};
@@ -1196,6 +1307,7 @@
interrupt-names = "ch0", "ch1";
clocks = <&cpg CPG_MOD 331>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 331>;
#dma-cells = <1>;
dma-channels = <2>;
};
@@ -1207,6 +1319,7 @@
clocks = <&cpg CPG_MOD 314>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 314>;
status = "disabled";
};
@@ -1217,6 +1330,7 @@
clocks = <&cpg CPG_MOD 313>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 313>;
status = "disabled";
};
@@ -1227,6 +1341,7 @@
clocks = <&cpg CPG_MOD 312>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 312>;
status = "disabled";
};
@@ -1237,6 +1352,7 @@
clocks = <&cpg CPG_MOD 311>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 311>;
status = "disabled";
};
@@ -1247,6 +1363,7 @@
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 703>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
#phy-cells = <0>;
status = "disabled";
};
@@ -1257,6 +1374,7 @@
reg = <0 0xee0a0200 0 0x700>;
clocks = <&cpg CPG_MOD 702>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
#phy-cells = <0>;
status = "disabled";
};
@@ -1267,6 +1385,7 @@
reg = <0 0xee0c0200 0 0x700>;
clocks = <&cpg CPG_MOD 701>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 701>;
#phy-cells = <0>;
status = "disabled";
};
@@ -1279,6 +1398,7 @@
phys = <&usb2_phy0>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
status = "disabled";
};
@@ -1290,6 +1410,7 @@
phys = <&usb2_phy1>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
status = "disabled";
};
@@ -1301,6 +1422,7 @@
phys = <&usb2_phy2>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 701>;
status = "disabled";
};
@@ -1312,6 +1434,7 @@
phys = <&usb2_phy0>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
status = "disabled";
};
@@ -1323,6 +1446,7 @@
phys = <&usb2_phy1>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
status = "disabled";
};
@@ -1334,6 +1458,7 @@
phys = <&usb2_phy2>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 701>;
status = "disabled";
};
@@ -1350,6 +1475,7 @@
phys = <&usb2_phy0>;
phy-names = "usb";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
status = "disabled";
};
@@ -1376,6 +1502,7 @@
clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
clock-names = "pcie", "pcie_bus";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
status = "disabled";
};
@@ -1402,6 +1529,7 @@
clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
clock-names = "pcie", "pcie_bus";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 318>;
status = "disabled";
};
@@ -1411,6 +1539,7 @@
interrupts = <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 624>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 624>;
renesas,fcp = <&fcpvb1>;
};
@@ -1420,6 +1549,7 @@
reg = <0 0xfe92f000 0 0x200>;
clocks = <&cpg CPG_MOD 606>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 606>;
};
fcpf0: fcp@fe950000 {
@@ -1427,6 +1557,7 @@
reg = <0 0xfe950000 0 0x200>;
clocks = <&cpg CPG_MOD 615>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 615>;
};
fcpf1: fcp@fe951000 {
@@ -1434,6 +1565,7 @@
reg = <0 0xfe951000 0 0x200>;
clocks = <&cpg CPG_MOD 614>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 614>;
};
fcpf2: fcp@fe952000 {
@@ -1441,6 +1573,7 @@
reg = <0 0xfe952000 0 0x200>;
clocks = <&cpg CPG_MOD 613>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 613>;
};
vspbd: vsp@fe960000 {
@@ -1449,6 +1582,7 @@
interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 626>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 626>;
renesas,fcp = <&fcpvb0>;
};
@@ -1458,6 +1592,7 @@
reg = <0 0xfe96f000 0 0x200>;
clocks = <&cpg CPG_MOD 607>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 607>;
};
vspi0: vsp@fe9a0000 {
@@ -1466,6 +1601,7 @@
interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 631>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 631>;
renesas,fcp = <&fcpvi0>;
};
@@ -1475,6 +1611,7 @@
reg = <0 0xfe9af000 0 0x200>;
clocks = <&cpg CPG_MOD 611>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 611>;
};
vspi1: vsp@fe9b0000 {
@@ -1483,6 +1620,7 @@
interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 630>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 630>;
renesas,fcp = <&fcpvi1>;
};
@@ -1492,6 +1630,7 @@
reg = <0 0xfe9bf000 0 0x200>;
clocks = <&cpg CPG_MOD 610>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 610>;
};
vspi2: vsp@fe9c0000 {
@@ -1500,6 +1639,7 @@
interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 629>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 629>;
renesas,fcp = <&fcpvi2>;
};
@@ -1509,6 +1649,7 @@
reg = <0 0xfe9cf000 0 0x200>;
clocks = <&cpg CPG_MOD 609>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 609>;
};
vspd0: vsp@fea20000 {
@@ -1517,6 +1658,7 @@
interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 623>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 623>;
renesas,fcp = <&fcpvd0>;
};
@@ -1526,6 +1668,7 @@
reg = <0 0xfea27000 0 0x200>;
clocks = <&cpg CPG_MOD 603>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 603>;
};
vspd1: vsp@fea28000 {
@@ -1534,6 +1677,7 @@
interrupts = <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 622>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 622>;
renesas,fcp = <&fcpvd1>;
};
@@ -1543,6 +1687,7 @@
reg = <0 0xfea2f000 0 0x200>;
clocks = <&cpg CPG_MOD 602>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 602>;
};
vspd2: vsp@fea30000 {
@@ -1551,6 +1696,7 @@
interrupts = <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 621>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 621>;
renesas,fcp = <&fcpvd2>;
};
@@ -1560,6 +1706,7 @@
reg = <0 0xfea37000 0 0x200>;
clocks = <&cpg CPG_MOD 601>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 601>;
};
vspd3: vsp@fea38000 {
@@ -1568,6 +1715,7 @@
interrupts = <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 620>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 620>;
renesas,fcp = <&fcpvd3>;
};
@@ -1577,6 +1725,7 @@
reg = <0 0xfea3f000 0 0x200>;
clocks = <&cpg CPG_MOD 600>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 600>;
};
fdp1@fe940000 {
@@ -1585,6 +1734,7 @@
interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 119>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 119>;
renesas,fcp = <&fcpf0>;
};
@@ -1594,6 +1744,7 @@
interrupts = <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 118>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 118>;
renesas,fcp = <&fcpf1>;
};
@@ -1603,6 +1754,7 @@
interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 117>;
power-domains = <&sysc R8A7795_PD_A3VP>;
+ resets = <&cpg 117>;
renesas,fcp = <&fcpf2>;
};
@@ -1662,6 +1814,7 @@
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 522>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
#thermal-sensor-cells = <1>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
index c3f064ac2cb4..372b2a944716 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
@@ -180,7 +180,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&wdt0 {
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
index c7f40f8f3169..c9f59b6ce33f 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -18,6 +18,7 @@
aliases {
serial0 = &scif2;
+ serial1 = &scif1;
ethernet0 = &avb;
};
@@ -113,6 +114,11 @@
function = "avb";
};
+ scif1_pins: scif1 {
+ groups = "scif1_data_a", "scif1_ctrl";
+ function = "scif1";
+ };
+
scif2_pins: scif2 {
groups = "scif2_data_a";
function = "scif2";
@@ -172,18 +178,7 @@
status = "okay";
phy0: ethernet-phy@0 {
- rxc-skew-ps = <900>;
- rxdv-skew-ps = <0>;
- rxd0-skew-ps = <0>;
- rxd1-skew-ps = <0>;
- rxd2-skew-ps = <0>;
- rxd3-skew-ps = <0>;
- txc-skew-ps = <900>;
- txen-skew-ps = <0>;
- txd0-skew-ps = <0>;
- txd1-skew-ps = <0>;
- txd2-skew-ps = <0>;
- txd3-skew-ps = <0>;
+ rxc-skew-ps = <1500>;
reg = <0>;
interrupt-parent = <&gpio2>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
@@ -239,6 +234,14 @@
status = "okay";
};
+&scif1 {
+ pinctrl-0 = <&scif1_pins>;
+ pinctrl-names = "default";
+
+ uart-has-rtscts;
+ status = "okay";
+};
+
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
@@ -247,7 +250,6 @@
&scif_clk {
clock-frequency = <14745600>;
- status = "okay";
};
&i2c2 {
@@ -261,3 +263,7 @@
timeout-sec = <60>;
status = "okay";
};
+
+&i2c_dvfs {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index f7120cdedd0d..2ec1ed5f4991 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -25,10 +25,11 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ i2c7 = &i2c_dvfs;
};
psci {
- compatible = "arm,psci-0.2";
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
method = "smc";
};
@@ -36,7 +37,6 @@
#address-cells = <1>;
#size-cells = <0>;
- /* 1 core only at this point */
a57_0: cpu@0 {
compatible = "arm,cortex-a57", "arm,armv8";
reg = <0x0>;
@@ -46,13 +46,64 @@
enable-method = "psci";
};
- L2_CA57: cache-controller@0 {
+ a57_1: cpu@1 {
+ compatible = "arm,cortex-a57","arm,armv8";
+ reg = <0x1>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA57_CPU1>;
+ next-level-cache = <&L2_CA57>;
+ enable-method = "psci";
+ };
+
+ a53_0: cpu@100 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x100>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA53_CPU0>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_1: cpu@101 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x101>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA53_CPU1>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_2: cpu@102 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x102>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA53_CPU2>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ a53_3: cpu@103 {
+ compatible = "arm,cortex-a53","arm,armv8";
+ reg = <0x103>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA53_CPU3>;
+ next-level-cache = <&L2_CA53>;
+ enable-method = "psci";
+ };
+
+ L2_CA57: cache-controller-0 {
compatible = "cache";
- reg = <0>;
power-domains = <&sysc R8A7796_PD_CA57_SCU>;
cache-unified;
cache-level = <2>;
};
+
+ L2_CA53: cache-controller-1 {
+ compatible = "cache";
+ power-domains = <&sysc R8A7796_PD_CA53_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
extal_clk: extal {
@@ -100,22 +151,23 @@
<0x0 0xf1040000 0 0x20000>,
<0x0 0xf1060000 0 0x20000>;
interrupts = <GIC_PPI 9
- (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&cpg CPG_MOD 408>;
clock-names = "clk";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
};
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
};
wdt0: watchdog@e6020000 {
@@ -124,6 +176,7 @@
reg = <0 0xe6020000 0 0x0c>;
clocks = <&cpg CPG_MOD 402>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 402>;
status = "disabled";
};
@@ -139,6 +192,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 912>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
};
gpio1: gpio@e6051000 {
@@ -153,6 +207,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 911>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
};
gpio2: gpio@e6052000 {
@@ -167,6 +222,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 910>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
};
gpio3: gpio@e6053000 {
@@ -181,6 +237,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 909>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
};
gpio4: gpio@e6054000 {
@@ -195,6 +252,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 908>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 908>;
};
gpio5: gpio@e6055000 {
@@ -209,6 +267,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 907>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 907>;
};
gpio6: gpio@e6055400 {
@@ -223,6 +282,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 906>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 906>;
};
gpio7: gpio@e6055800 {
@@ -237,6 +297,7 @@
interrupt-controller;
clocks = <&cpg CPG_MOD 905>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 905>;
};
pfc: pin-controller@e6060000 {
@@ -244,6 +305,26 @@
reg = <0 0xe6060000 0 0x50c>;
};
+ pmu_a57 {
+ compatible = "arm,cortex-a57-pmu";
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&a57_0>,
+ <&a57_1>;
+ };
+
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&a53_0>,
+ <&a53_1>,
+ <&a53_2>,
+ <&a53_3>;
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7796-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -251,6 +332,7 @@
clock-names = "extal", "extalr";
#clock-cells = <2>;
#power-domain-cells = <0>;
+ #reset-cells = <1>;
};
rst: reset-controller@e6160000 {
@@ -269,6 +351,20 @@
#power-domain-cells = <1>;
};
+ i2c_dvfs: i2c@e60b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-r8a7796",
+ "renesas,rcar-gen3-iic",
+ "renesas,rmobile-iic";
+ reg = <0 0xe60b0000 0 0x425>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 926>;
+ status = "disabled";
+ };
+
i2c0: i2c@e6500000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -278,6 +374,7 @@
interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 931>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 931>;
dmas = <&dmac1 0x91>, <&dmac1 0x90>,
<&dmac2 0x91>, <&dmac2 0x90>;
dma-names = "tx", "rx", "tx", "rx";
@@ -294,6 +391,7 @@
interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 930>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 930>;
dmas = <&dmac1 0x93>, <&dmac1 0x92>,
<&dmac2 0x93>, <&dmac2 0x92>;
dma-names = "tx", "rx", "tx", "rx";
@@ -310,6 +408,7 @@
interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 929>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 929>;
dmas = <&dmac1 0x95>, <&dmac1 0x94>,
<&dmac2 0x95>, <&dmac2 0x94>;
dma-names = "tx", "rx", "tx", "rx";
@@ -326,6 +425,7 @@
interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 928>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 928>;
dmas = <&dmac0 0x97>, <&dmac0 0x96>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -341,6 +441,7 @@
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 927>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 927>;
dmas = <&dmac0 0x99>, <&dmac0 0x98>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -356,6 +457,7 @@
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 919>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
@@ -371,6 +473,7 @@
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 918>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 918>;
dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
@@ -389,6 +492,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
status = "disabled";
};
@@ -404,6 +508,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
status = "disabled";
};
@@ -420,6 +525,7 @@
assigned-clocks = <&cpg CPG_CORE R8A7796_CLK_CANFD>;
assigned-clock-rates = <40000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 914>;
status = "disabled";
channel0 {
@@ -469,12 +575,135 @@
"ch24";
clocks = <&cpg CPG_MOD 812>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
- phy-mode = "rgmii-id";
+ resets = <&cpg 812>;
+ phy-mode = "rgmii-txid";
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
};
+ hscif0: serial@e6540000 {
+ compatible = "renesas,hscif-r8a7796",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6540000 0 0x60>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 520>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x31>, <&dmac1 0x30>,
+ <&dmac2 0x31>, <&dmac2 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 520>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a7796",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6550000 0 0x60>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 519>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+ <&dmac2 0x33>, <&dmac2 0x32>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
+ status = "disabled";
+ };
+
+ hscif2: serial@e6560000 {
+ compatible = "renesas,hscif-r8a7796",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 518>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+ <&dmac2 0x35>, <&dmac2 0x34>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 518>;
+ status = "disabled";
+ };
+
+ hscif3: serial@e66a0000 {
+ compatible = "renesas,hscif-r8a7796",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x37>, <&dmac0 0x36>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
+ status = "disabled";
+ };
+
+ hscif4: serial@e66b0000 {
+ compatible = "renesas,hscif-r8a7796",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66b0000 0 0x60>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x38>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
+ status = "disabled";
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 207>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+ <&dmac2 0x51>, <&dmac2 0x50>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
+ status = "disabled";
+ };
+
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 206>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+ <&dmac2 0x53>, <&dmac2 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
+ status = "disabled";
+ };
+
scif2: serial@e6e88000 {
compatible = "renesas,scif-r8a7796",
"renesas,rcar-gen3-scif", "renesas,scif";
@@ -485,6 +714,56 @@
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
+ status = "disabled";
+ };
+
+ scif3: serial@e6c50000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 204>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x57>, <&dmac0 0x56>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
+ status = "disabled";
+ };
+
+ scif4: serial@e6c40000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 203>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x59>, <&dmac0 0x58>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
+ status = "disabled";
+ };
+
+ scif5: serial@e6f30000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6f30000 0 64>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 202>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
+ <&dmac2 0x5b>, <&dmac2 0x5a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
status = "disabled";
};
@@ -498,6 +777,7 @@
<&dmac2 0x41>, <&dmac2 0x40>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 211>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -513,6 +793,7 @@
<&dmac2 0x43>, <&dmac2 0x42>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 210>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -527,6 +808,7 @@
dmas = <&dmac0 0x45>, <&dmac0 0x44>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 209>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -541,6 +823,7 @@
dmas = <&dmac0 0x47>, <&dmac0 0x46>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 208>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -575,6 +858,7 @@
clocks = <&cpg CPG_MOD 219>;
clock-names = "fck";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -608,6 +892,7 @@
clocks = <&cpg CPG_MOD 218>;
clock-names = "fck";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -641,6 +926,7 @@
clocks = <&cpg CPG_MOD 217>;
clock-names = "fck";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 217>;
#dma-cells = <1>;
dma-channels = <16>;
};
@@ -652,6 +938,7 @@
clocks = <&cpg CPG_MOD 314>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 314>;
status = "disabled";
};
@@ -662,6 +949,7 @@
clocks = <&cpg CPG_MOD 313>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 313>;
status = "disabled";
};
@@ -672,6 +960,7 @@
clocks = <&cpg CPG_MOD 312>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 312>;
status = "disabled";
};
@@ -682,6 +971,7 @@
clocks = <&cpg CPG_MOD 311>;
max-frequency = <200000000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 311>;
status = "disabled";
};
@@ -695,6 +985,7 @@
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 522>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
#thermal-sensor-cells = <1>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 3a862894ea44..b5636bba6b1c 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,9 +1,11 @@
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-orion-r68-meta.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-px5-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
new file mode 100644
index 000000000000..cf272392cebf
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3328.dtsi"
+
+/ {
+ model = "Rockchip RK3328 EVB";
+ compatible = "rockchip,rk3328-evb", "rockchip,rk3328";
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
new file mode 100644
index 000000000000..7e69f1fe78d6
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -0,0 +1,1264 @@
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rk3328-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/power/rk3328-power.h>
+#include <dt-bindings/soc/rockchip,boot-mode.h>
+
+/ {
+ compatible = "rockchip,rk3328";
+
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x0>;
+ clocks = <&cru ARMCLK>;
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x1>;
+ clocks = <&cru ARMCLK>;
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x2>;
+ clocks = <&cru ARMCLK>;
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x3>;
+ clocks = <&cru ARMCLK>;
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ l2: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ amba {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ dmac: dmac@ff1f0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff1f0000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_DMAC>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+ };
+
+ arm-pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ xin24m: xin24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ };
+
+ grf: syscon@ff100000 {
+ compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ power: power-controller {
+ compatible = "rockchip,rk3328-power-controller";
+ #power-domain-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pd_hevc@RK3328_PD_HEVC {
+ reg = <RK3328_PD_HEVC>;
+ };
+ pd_video@RK3328_PD_VIDEO {
+ reg = <RK3328_PD_VIDEO>;
+ };
+ pd_vpu@RK3328_PD_VPU {
+ reg = <RK3328_PD_VPU>;
+ };
+ };
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x5c8>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
+
+ };
+
+ uart0: serial@ff110000 {
+ compatible = "rockchip,rk3328-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff110000 0x0 0x100>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac 2>, <&dmac 3>;
+ #dma-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart1: serial@ff120000 {
+ compatible = "rockchip,rk3328-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff120000 0x0 0x100>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "sclk_uart", "pclk_uart";
+ dmas = <&dmac 4>, <&dmac 5>;
+ #dma-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_xfer &uart1_cts &uart1_rts>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ uart2: serial@ff130000 {
+ compatible = "rockchip,rk3328-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff130000 0x0 0x100>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ dmas = <&dmac 6>, <&dmac 7>;
+ #dma-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m1_xfer>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@ff150000 {
+ compatible = "rockchip,rk3328-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xff150000 0x0 0x1000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cru SCLK_I2C0>, <&cru PCLK_I2C0>;
+ clock-names = "i2c", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_xfer>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ff160000 {
+ compatible = "rockchip,rk3328-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_xfer>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ff170000 {
+ compatible = "rockchip,rk3328-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xff170000 0x0 0x1000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_xfer>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ff180000 {
+ compatible = "rockchip,rk3328-i2c", "rockchip,rk3399-i2c";
+ reg = <0x0 0xff180000 0x0 0x1000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_xfer>;
+ status = "disabled";
+ };
+
+ spi0: spi@ff190000 {
+ compatible = "rockchip,rk3328-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff190000 0x0 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cru SCLK_SPI>, <&cru PCLK_SPI>;
+ clock-names = "spiclk", "apb_pclk";
+ dmas = <&dmac 8>, <&dmac 9>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0m2_clk &spi0m2_tx &spi0m2_rx &spi0m2_cs0>;
+ status = "disabled";
+ };
+
+ wdt: watchdog@ff1a0000 {
+ compatible = "snps,dw-wdt";
+ reg = <0x0 0xff1a0000 0x0 0x100>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ saradc: adc@ff280000 {
+ compatible = "rockchip,rk3328-saradc", "rockchip,rk3399-saradc";
+ reg = <0x0 0xff280000 0x0 0x100>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC_P>;
+ reset-names = "saradc-apb";
+ status = "disabled";
+ };
+
+ cru: clock-controller@ff440000 {
+ compatible = "rockchip,rk3328-cru", "rockchip,cru", "syscon";
+ reg = <0x0 0xff440000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ assigned-clocks =
+ /*
+ * CPLL should run at 1200, but that is to high for
+ * the initial dividers of most of its children.
+ * We need set cpll child clk div first,
+ * and then set the cpll frequency.
+ */
+ <&cru DCLK_LCDC>, <&cru SCLK_PDM>,
+ <&cru SCLK_RTC32K>, <&cru SCLK_UART0>,
+ <&cru SCLK_UART1>, <&cru SCLK_UART2>,
+ <&cru ACLK_BUS_PRE>, <&cru ACLK_PERI_PRE>,
+ <&cru ACLK_VIO_PRE>, <&cru ACLK_RGA_PRE>,
+ <&cru ACLK_VOP_PRE>, <&cru ACLK_RKVDEC_PRE>,
+ <&cru ACLK_RKVENC>, <&cru ACLK_VPU_PRE>,
+ <&cru SCLK_VDEC_CABAC>, <&cru SCLK_VDEC_CORE>,
+ <&cru SCLK_VENC_CORE>, <&cru SCLK_VENC_DSP>,
+ <&cru SCLK_SDIO>, <&cru SCLK_TSP>,
+ <&cru SCLK_WIFI>, <&cru ARMCLK>,
+ <&cru PLL_GPLL>, <&cru PLL_CPLL>,
+ <&cru ACLK_BUS_PRE>, <&cru HCLK_BUS_PRE>,
+ <&cru PCLK_BUS_PRE>, <&cru ACLK_PERI_PRE>,
+ <&cru HCLK_PERI>, <&cru PCLK_PERI>,
+ <&cru SCLK_RTC32K>;
+ assigned-clock-parents =
+ <&cru HDMIPHY>, <&cru PLL_APLL>,
+ <&cru PLL_GPLL>, <&xin24m>,
+ <&xin24m>, <&xin24m>;
+ assigned-clock-rates =
+ <0>, <61440000>,
+ <0>, <24000000>,
+ <24000000>, <24000000>,
+ <15000000>, <15000000>,
+ <100000000>, <100000000>,
+ <100000000>, <100000000>,
+ <50000000>, <100000000>,
+ <100000000>, <100000000>,
+ <50000000>, <50000000>,
+ <50000000>, <50000000>,
+ <24000000>, <600000000>,
+ <491520000>, <1200000000>,
+ <150000000>, <75000000>,
+ <75000000>, <150000000>,
+ <75000000>, <75000000>,
+ <32768>;
+ };
+
+ gmac2io: ethernet@ff540000 {
+ compatible = "rockchip,rk3328-gmac";
+ reg = <0x0 0xff540000 0x0 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_RX>,
+ <&cru SCLK_MAC2IO_TX>, <&cru SCLK_MAC2IO_REF>,
+ <&cru SCLK_MAC2IO_REFOUT>, <&cru ACLK_MAC2IO>,
+ <&cru PCLK_MAC2IO>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac";
+ resets = <&cru SRST_GMAC2IO_A>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@ff811000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xff811000 0 0x1000>,
+ <0x0 0xff812000 0 0x2000>,
+ <0x0 0xff814000 0 0x2000>,
+ <0x0 0xff816000 0 0x2000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3328-pinctrl";
+ rockchip,grf = <&grf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio0@ff210000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff210000 0x0 0x100>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio1@ff220000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff220000 0x0 0x100>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO1>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio2@ff230000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff230000 0x0 0x100>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO2>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio3@ff240000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff240000 0x0 0x100>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru PCLK_GPIO3>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pcfg_pull_up: pcfg-pull-up {
+ bias-pull-up;
+ };
+
+ pcfg_pull_down: pcfg-pull-down {
+ bias-pull-down;
+ };
+
+ pcfg_pull_none: pcfg-pull-none {
+ bias-disable;
+ };
+
+ pcfg_pull_none_2ma: pcfg-pull-none-2ma {
+ bias-disable;
+ drive-strength = <2>;
+ };
+
+ pcfg_pull_up_2ma: pcfg-pull-up-2ma {
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+
+ pcfg_pull_up_4ma: pcfg-pull-up-4ma {
+ bias-pull-up;
+ drive-strength = <4>;
+ };
+
+ pcfg_pull_none_4ma: pcfg-pull-none-4ma {
+ bias-disable;
+ drive-strength = <4>;
+ };
+
+ pcfg_pull_down_4ma: pcfg-pull-down-4ma {
+ bias-pull-down;
+ drive-strength = <4>;
+ };
+
+ pcfg_pull_none_8ma: pcfg-pull-none-8ma {
+ bias-disable;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_8ma: pcfg-pull-up-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ pcfg_pull_up_12ma: pcfg-pull-up-12ma {
+ bias-pull-up;
+ drive-strength = <12>;
+ };
+
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+
+ pcfg_input_high: pcfg-input-high {
+ bias-pull-up;
+ input-enable;
+ };
+
+ pcfg_input: pcfg-input {
+ input-enable;
+ };
+
+ i2c0 {
+ i2c0_xfer: i2c0-xfer {
+ rockchip,pins = <2 RK_PD0 1 &pcfg_pull_none>,
+ <2 RK_PD1 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c1 {
+ i2c1_xfer: i2c1-xfer {
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none>,
+ <2 RK_PA5 2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c2 {
+ i2c2_xfer: i2c2-xfer {
+ rockchip,pins = <2 RK_PB5 1 &pcfg_pull_none>,
+ <2 RK_PB6 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c3 {
+ i2c3_xfer: i2c3-xfer {
+ rockchip,pins = <0 RK_PA5 2 &pcfg_pull_none>,
+ <0 RK_PA6 2 &pcfg_pull_none>;
+ };
+ i2c3_gpio: i2c3-gpio {
+ rockchip,pins =
+ <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>,
+ <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hdmi_i2c {
+ hdmii2c_xfer: hdmii2c-xfer {
+ rockchip,pins = <0 RK_PA5 1 &pcfg_pull_none>,
+ <0 RK_PA6 1 &pcfg_pull_none>;
+ };
+ };
+
+ tsadc {
+ otp_gpio: otp-gpio {
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ otp_out: otp-out {
+ rockchip,pins = <2 RK_PB5 1 &pcfg_pull_none>;
+ };
+ };
+
+ uart0 {
+ uart0_xfer: uart0-xfer {
+ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>,
+ <1 RK_PB0 1 &pcfg_pull_none>;
+ };
+
+ uart0_cts: uart0-cts {
+ rockchip,pins = <1 RK_PB3 1 &pcfg_pull_none>;
+ };
+
+ uart0_rts: uart0-rts {
+ rockchip,pins = <1 RK_PB2 1 &pcfg_pull_none>;
+ };
+
+ uart0_rts_gpio: uart0-rts-gpio {
+ rockchip,pins = <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ uart1 {
+ uart1_xfer: uart1-xfer {
+ rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>,
+ <3 RK_PA6 4 &pcfg_pull_none>;
+ };
+
+ uart1_cts: uart1-cts {
+ rockchip,pins = <3 RK_PA7 4 &pcfg_pull_none>;
+ };
+
+ uart1_rts: uart1-rts {
+ rockchip,pins = <3 RK_PA5 4 &pcfg_pull_none>;
+ };
+
+ uart1_rts_gpio: uart1-rts-gpio {
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ uart2-0 {
+ uart2m0_xfer: uart2m0-xfer {
+ rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>,
+ <1 RK_PA1 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart2-1 {
+ uart2m1_xfer: uart2m1-xfer {
+ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>,
+ <2 RK_PA1 1 &pcfg_pull_none>;
+ };
+ };
+
+ spi0-0 {
+ spi0m0_clk: spi0m0-clk {
+ rockchip,pins = <2 RK_PB0 1 &pcfg_pull_up>;
+ };
+
+ spi0m0_cs0: spi0m0-cs0 {
+ rockchip,pins = <2 RK_PB3 1 &pcfg_pull_up>;
+ };
+
+ spi0m0_tx: spi0m0-tx {
+ rockchip,pins = <2 RK_PB1 1 &pcfg_pull_up>;
+ };
+
+ spi0m0_rx: spi0m0-rx {
+ rockchip,pins = <2 RK_PB2 1 &pcfg_pull_up>;
+ };
+
+ spi0m0_cs1: spi0m0-cs1 {
+ rockchip,pins = <2 RK_PB4 1 &pcfg_pull_up>;
+ };
+ };
+
+ spi0-1 {
+ spi0m1_clk: spi0m1-clk {
+ rockchip,pins = <3 RK_PC7 2 &pcfg_pull_up>;
+ };
+
+ spi0m1_cs0: spi0m1-cs0 {
+ rockchip,pins = <3 RK_PD2 2 &pcfg_pull_up>;
+ };
+
+ spi0m1_tx: spi0m1-tx {
+ rockchip,pins = <3 RK_PD1 2 &pcfg_pull_up>;
+ };
+
+ spi0m1_rx: spi0m1-rx {
+ rockchip,pins = <3 RK_PD0 2 &pcfg_pull_up>;
+ };
+
+ spi0m1_cs1: spi0m1-cs1 {
+ rockchip,pins = <3 RK_PD3 2 &pcfg_pull_up>;
+ };
+ };
+
+ spi0-2 {
+ spi0m2_clk: spi0m2-clk {
+ rockchip,pins = <3 RK_PA0 4 &pcfg_pull_up>;
+ };
+
+ spi0m2_cs0: spi0m2-cs0 {
+ rockchip,pins = <3 RK_PB0 3 &pcfg_pull_up>;
+ };
+
+ spi0m2_tx: spi0m2-tx {
+ rockchip,pins = <3 RK_PA1 4 &pcfg_pull_up>;
+ };
+
+ spi0m2_rx: spi0m2-rx {
+ rockchip,pins = <3 RK_PA2 4 &pcfg_pull_up>;
+ };
+ };
+
+ i2s1 {
+ i2s1_mclk: i2s1-mclk {
+ rockchip,pins = <2 RK_PB7 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sclk: i2s1-sclk {
+ rockchip,pins = <2 RK_PC2 1 &pcfg_pull_none>;
+ };
+
+ i2s1_lrckrx: i2s1-lrckrx {
+ rockchip,pins = <2 RK_PC0 1 &pcfg_pull_none>;
+ };
+
+ i2s1_lrcktx: i2s1-lrcktx {
+ rockchip,pins = <2 RK_PC1 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sdi: i2s1-sdi {
+ rockchip,pins = <2 RK_PC3 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sdo: i2s1-sdo {
+ rockchip,pins = <2 RK_PC7 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sdio1: i2s1-sdio1 {
+ rockchip,pins = <2 RK_PC4 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sdio2: i2s1-sdio2 {
+ rockchip,pins = <2 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sdio3: i2s1-sdio3 {
+ rockchip,pins = <2 RK_PC6 1 &pcfg_pull_none>;
+ };
+
+ i2s1_sleep: i2s1-sleep {
+ rockchip,pins =
+ <2 RK_PB7 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC0 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC1 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC2 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC3 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC4 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC5 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC6 RK_FUNC_GPIO &pcfg_input_high>,
+ <2 RK_PC7 RK_FUNC_GPIO &pcfg_input_high>;
+ };
+ };
+
+ i2s2-0 {
+ i2s2m0_mclk: i2s2m0-mclk {
+ rockchip,pins = <1 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_sclk: i2s2m0-sclk {
+ rockchip,pins = <1 RK_PC6 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_lrckrx: i2s2m0-lrckrx {
+ rockchip,pins = <1 RK_PD2 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_lrcktx: i2s2m0-lrcktx {
+ rockchip,pins = <1 RK_PC7 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_sdi: i2s2m0-sdi {
+ rockchip,pins = <1 RK_PD0 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_sdo: i2s2m0-sdo {
+ rockchip,pins = <1 RK_PD1 1 &pcfg_pull_none>;
+ };
+
+ i2s2m0_sleep: i2s2m0-sleep {
+ rockchip,pins =
+ <1 RK_PC5 RK_FUNC_GPIO &pcfg_input_high>,
+ <1 RK_PC6 RK_FUNC_GPIO &pcfg_input_high>,
+ <1 RK_PD2 RK_FUNC_GPIO &pcfg_input_high>,
+ <1 RK_PC7 RK_FUNC_GPIO &pcfg_input_high>,
+ <1 RK_PD0 RK_FUNC_GPIO &pcfg_input_high>,
+ <1 RK_PD1 RK_FUNC_GPIO &pcfg_input_high>;
+ };
+ };
+
+ i2s2-1 {
+ i2s2m1_mclk: i2s2m1-mclk {
+ rockchip,pins = <1 RK_PC5 1 &pcfg_pull_none>;
+ };
+
+ i2s2m1_sclk: i2s2m1-sclk {
+ rockchip,pins = <3 RK_PA0 6 &pcfg_pull_none>;
+ };
+
+ i2s2m1_lrckrx: i2sm1-lrckrx {
+ rockchip,pins = <3 RK_PB0 6 &pcfg_pull_none>;
+ };
+
+ i2s2m1_lrcktx: i2s2m1-lrcktx {
+ rockchip,pins = <3 RK_PB0 4 &pcfg_pull_none>;
+ };
+
+ i2s2m1_sdi: i2s2m1-sdi {
+ rockchip,pins = <3 RK_PA2 6 &pcfg_pull_none>;
+ };
+
+ i2s2m1_sdo: i2s2m1-sdo {
+ rockchip,pins = <3 RK_PA1 6 &pcfg_pull_none>;
+ };
+
+ i2s2m1_sleep: i2s2m1-sleep {
+ rockchip,pins =
+ <1 RK_PC5 RK_FUNC_GPIO &pcfg_input_high>,
+ <3 RK_PA0 RK_FUNC_GPIO &pcfg_input_high>,
+ <3 RK_PB0 RK_FUNC_GPIO &pcfg_input_high>,
+ <3 RK_PA2 RK_FUNC_GPIO &pcfg_input_high>,
+ <3 RK_PA1 RK_FUNC_GPIO &pcfg_input_high>;
+ };
+ };
+
+ spdif-0 {
+ spdifm0_tx: spdifm0-tx {
+ rockchip,pins = <0 RK_PD3 1 &pcfg_pull_none>;
+ };
+ };
+
+ spdif-1 {
+ spdifm1_tx: spdifm1-tx {
+ rockchip,pins = <2 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ spdif-2 {
+ spdifm2_tx: spdifm2-tx {
+ rockchip,pins = <0 RK_PA2 2 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc0-0 {
+ sdmmc0m0_pwren: sdmmc0m0-pwren {
+ rockchip,pins = <2 RK_PA7 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0m0_gpio: sdmmc0m0-gpio {
+ rockchip,pins = <2 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
+ };
+ };
+
+ sdmmc0-1 {
+ sdmmc0m1_pwren: sdmmc0m1-pwren {
+ rockchip,pins = <0 RK_PD6 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0m1_gpio: sdmmc0m1-gpio {
+ rockchip,pins = <0 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
+ };
+ };
+
+ sdmmc0 {
+ sdmmc0_clk: sdmmc0-clk {
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
+ };
+
+ sdmmc0_cmd: sdmmc0-cmd {
+ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0_dectn: sdmmc0-dectn {
+ rockchip,pins = <1 RK_PA5 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0_wrprt: sdmmc0-wrprt {
+ rockchip,pins = <1 RK_PA7 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0_bus1: sdmmc0-bus1 {
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0_bus4: sdmmc0-bus4 {
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
+ <1 RK_PA1 1 &pcfg_pull_up_4ma>,
+ <1 RK_PA2 1 &pcfg_pull_up_4ma>,
+ <1 RK_PA3 1 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0_gpio: sdmmc0-gpio {
+ rockchip,pins =
+ <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
+ };
+ };
+
+ sdmmc0ext {
+ sdmmc0ext_clk: sdmmc0ext-clk {
+ rockchip,pins = <3 RK_PA2 3 &pcfg_pull_none_4ma>;
+ };
+
+ sdmmc0ext_cmd: sdmmc0ext-cmd {
+ rockchip,pins = <3 RK_PA0 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0ext_wrprt: sdmmc0ext-wrprt {
+ rockchip,pins = <3 RK_PA3 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0ext_dectn: sdmmc0ext-dectn {
+ rockchip,pins = <3 RK_PA1 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0ext_bus1: sdmmc0ext-bus1 {
+ rockchip,pins = <3 RK_PA4 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0ext_bus4: sdmmc0ext-bus4 {
+ rockchip,pins =
+ <3 RK_PA4 3 &pcfg_pull_up_4ma>,
+ <3 RK_PA5 3 &pcfg_pull_up_4ma>,
+ <3 RK_PA6 3 &pcfg_pull_up_4ma>,
+ <3 RK_PA7 3 &pcfg_pull_up_4ma>;
+ };
+
+ sdmmc0ext_gpio: sdmmc0ext-gpio {
+ rockchip,pins =
+ <3 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
+ };
+ };
+
+ sdmmc1 {
+ sdmmc1_clk: sdmmc1-clk {
+ rockchip,pins = <1 RK_PB4 1 &pcfg_pull_none_8ma>;
+ };
+
+ sdmmc1_cmd: sdmmc1-cmd {
+ rockchip,pins = <1 RK_PB5 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_pwren: sdmmc1-pwren {
+ rockchip,pins = <1 RK_PC2 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_wrprt: sdmmc1-wrprt {
+ rockchip,pins = <1 RK_PC4 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_dectn: sdmmc1-dectn {
+ rockchip,pins = <1 RK_PC3 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_bus1: sdmmc1-bus1 {
+ rockchip,pins = <1 RK_PB6 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_bus4: sdmmc1-bus4 {
+ rockchip,pins = <1 RK_PB6 1 &pcfg_pull_up_8ma>,
+ <1 RK_PB7 1 &pcfg_pull_up_8ma>,
+ <1 RK_PC0 1 &pcfg_pull_up_8ma>,
+ <1 RK_PC1 1 &pcfg_pull_up_8ma>;
+ };
+
+ sdmmc1_gpio: sdmmc1-gpio {
+ rockchip,pins =
+ <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PC0 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PC3 RK_FUNC_GPIO &pcfg_pull_up_4ma>,
+ <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
+ };
+ };
+
+ emmc {
+ emmc_clk: emmc-clk {
+ rockchip,pins = <3 RK_PC5 2 &pcfg_pull_none_12ma>;
+ };
+
+ emmc_cmd: emmc-cmd {
+ rockchip,pins = <3 RK_PC3 2 &pcfg_pull_up_12ma>;
+ };
+
+ emmc_pwren: emmc-pwren {
+ rockchip,pins = <3 RK_PC6 2 &pcfg_pull_none>;
+ };
+
+ emmc_rstnout: emmc-rstnout {
+ rockchip,pins = <3 RK_PC4 2 &pcfg_pull_none>;
+ };
+
+ emmc_bus1: emmc-bus1 {
+ rockchip,pins = <0 RK_PA7 2 &pcfg_pull_up_12ma>;
+ };
+
+ emmc_bus4: emmc-bus4 {
+ rockchip,pins =
+ <0 RK_PA7 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD4 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD5 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD6 2 &pcfg_pull_up_12ma>;
+ };
+
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins =
+ <0 RK_PA7 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD4 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD5 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD6 2 &pcfg_pull_up_12ma>,
+ <2 RK_PD7 2 &pcfg_pull_up_12ma>,
+ <3 RK_PC0 2 &pcfg_pull_up_12ma>,
+ <3 RK_PC1 2 &pcfg_pull_up_12ma>,
+ <3 RK_PC2 2 &pcfg_pull_up_12ma>;
+ };
+ };
+
+ pwm0 {
+ pwm0_pin: pwm0-pin {
+ rockchip,pins = <2 RK_PA4 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm1 {
+ pwm1_pin: pwm1-pin {
+ rockchip,pins = <2 RK_PA5 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwm2 {
+ pwm2_pin: pwm2-pin {
+ rockchip,pins = <2 RK_PA6 1 &pcfg_pull_none>;
+ };
+ };
+
+ pwmir {
+ pwmir_pin: pwmir-pin {
+ rockchip,pins = <2 RK_PA2 1 &pcfg_pull_none>;
+ };
+ };
+
+ gmac-1 {
+ rgmiim1_pins: rgmiim1-pins {
+ rockchip,pins =
+ /* mac_txclk */
+ <1 RK_PB4 2 &pcfg_pull_none_12ma>,
+ /* mac_rxclk */
+ <1 RK_PB5 2 &pcfg_pull_none_2ma>,
+ /* mac_mdio */
+ <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+ /* mac_txen */
+ <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+ /* mac_clk */
+ <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+ /* mac_rxdv */
+ <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+ /* mac_mdc */
+ <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+ /* mac_rxd1 */
+ <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+ /* mac_rxd0 */
+ <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+ /* mac_txd1 */
+ <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+ /* mac_txd0 */
+ <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+ /* mac_rxd3 */
+ <1 RK_PB6 2 &pcfg_pull_none_2ma>,
+ /* mac_rxd2 */
+ <1 RK_PB7 2 &pcfg_pull_none_2ma>,
+ /* mac_txd3 */
+ <1 RK_PC0 2 &pcfg_pull_none_12ma>,
+ /* mac_txd2 */
+ <1 RK_PC1 2 &pcfg_pull_none_12ma>,
+
+ /* mac_txclk */
+ <0 RK_PB0 1 &pcfg_pull_none>,
+ /* mac_txen */
+ <0 RK_PB4 1 &pcfg_pull_none>,
+ /* mac_clk */
+ <0 RK_PD0 1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <0 RK_PC0 1 &pcfg_pull_none>,
+ /* mac_txd0 */
+ <0 RK_PC1 1 &pcfg_pull_none>,
+ /* mac_txd3 */
+ <0 RK_PC7 1 &pcfg_pull_none>,
+ /* mac_txd2 */
+ <0 RK_PC6 1 &pcfg_pull_none>;
+ };
+
+ rmiim1_pins: rmiim1-pins {
+ rockchip,pins =
+ /* mac_mdio */
+ <1 RK_PC3 2 &pcfg_pull_none_2ma>,
+ /* mac_txen */
+ <1 RK_PD1 2 &pcfg_pull_none_12ma>,
+ /* mac_clk */
+ <1 RK_PC5 2 &pcfg_pull_none_2ma>,
+ /* mac_rxer */
+ <1 RK_PD0 2 &pcfg_pull_none_2ma>,
+ /* mac_rxdv */
+ <1 RK_PC6 2 &pcfg_pull_none_2ma>,
+ /* mac_mdc */
+ <1 RK_PC7 2 &pcfg_pull_none_2ma>,
+ /* mac_rxd1 */
+ <1 RK_PB2 2 &pcfg_pull_none_2ma>,
+ /* mac_rxd0 */
+ <1 RK_PB3 2 &pcfg_pull_none_2ma>,
+ /* mac_txd1 */
+ <1 RK_PB0 2 &pcfg_pull_none_12ma>,
+ /* mac_txd0 */
+ <1 RK_PB1 2 &pcfg_pull_none_12ma>,
+
+ /* mac_mdio */
+ <0 RK_PB3 1 &pcfg_pull_none>,
+ /* mac_txen */
+ <0 RK_PB4 1 &pcfg_pull_none>,
+ /* mac_clk */
+ <0 RK_PD0 1 &pcfg_pull_none>,
+ /* mac_mdc */
+ <0 RK_PC3 1 &pcfg_pull_none>,
+ /* mac_txd1 */
+ <0 RK_PC0 1 &pcfg_pull_none>,
+ /* mac_txd0 */
+ <0 RK_PC1 1 &pcfg_pull_none>;
+ };
+ };
+
+ gmac2phy {
+ fephyled_speed100: fephyled-speed100 {
+ rockchip,pins = <0 RK_PD7 1 &pcfg_pull_none>;
+ };
+
+ fephyled_speed10: fephyled-speed10 {
+ rockchip,pins = <0 RK_PD6 1 &pcfg_pull_none>;
+ };
+
+ fephyled_duplex: fephyled-duplex {
+ rockchip,pins = <0 RK_PD6 2 &pcfg_pull_none>;
+ };
+
+ fephyled_rxm0: fephyled-rxm0 {
+ rockchip,pins = <0 RK_PD5 1 &pcfg_pull_none>;
+ };
+
+ fephyled_txm0: fephyled-txm0 {
+ rockchip,pins = <0 RK_PD5 2 &pcfg_pull_none>;
+ };
+
+ fephyled_linkm0: fephyled-linkm0 {
+ rockchip,pins = <0 RK_PD4 1 &pcfg_pull_none>;
+ };
+
+ fephyled_rxm1: fephyled-rxm1 {
+ rockchip,pins = <2 RK_PD1 2 &pcfg_pull_none>;
+ };
+
+ fephyled_txm1: fephyled-txm1 {
+ rockchip,pins = <2 RK_PD1 3 &pcfg_pull_none>;
+ };
+
+ fephyled_linkm1: fephyled-linkm1 {
+ rockchip,pins = <2 RK_PD0 2 &pcfg_pull_none>;
+ };
+ };
+
+ tsadc_pin {
+ tsadc_int: tsadc-int {
+ rockchip,pins = <2 RK_PB5 2 &pcfg_pull_none>;
+ };
+ tsadc_gpio: tsadc-gpio {
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ hdmi_pin {
+ hdmi_cec: hdmi-cec {
+ rockchip,pins = <0 RK_PA3 1 &pcfg_pull_none>;
+ };
+
+ hdmi_hpd: hdmi-hpd {
+ rockchip,pins = <0 RK_PA4 1 &pcfg_pull_down>;
+ };
+ };
+
+ cif-0 {
+ dvp_d2d9_m0:dvp-d2d9-m0 {
+ rockchip,pins =
+ /* cif_d0 */
+ <3 RK_PA4 2 &pcfg_pull_none>,
+ /* cif_d1 */
+ <3 RK_PA5 2 &pcfg_pull_none>,
+ /* cif_d2 */
+ <3 RK_PA6 2 &pcfg_pull_none>,
+ /* cif_d3 */
+ <3 RK_PA7 2 &pcfg_pull_none>,
+ /* cif_d4 */
+ <3 RK_PB0 2 &pcfg_pull_none>,
+ /* cif_d5m0 */
+ <3 RK_PB1 2 &pcfg_pull_none>,
+ /* cif_d6m0 */
+ <3 RK_PB2 2 &pcfg_pull_none>,
+ /* cif_d7m0 */
+ <3 RK_PB3 2 &pcfg_pull_none>,
+ /* cif_href */
+ <3 RK_PA1 2 &pcfg_pull_none>,
+ /* cif_vsync */
+ <3 RK_PA0 2 &pcfg_pull_none>,
+ /* cif_clkoutm0 */
+ <3 RK_PA3 2 &pcfg_pull_none>,
+ /* cif_clkin */
+ <3 RK_PA2 2 &pcfg_pull_none>;
+ };
+ };
+
+ cif-1 {
+ dvp_d2d9_m1:dvp-d2d9-m1 {
+ rockchip,pins =
+ /* cif_d0 */
+ <3 RK_PA4 2 &pcfg_pull_none>,
+ /* cif_d1 */
+ <3 RK_PA5 2 &pcfg_pull_none>,
+ /* cif_d2 */
+ <3 RK_PA6 2 &pcfg_pull_none>,
+ /* cif_d3 */
+ <3 RK_PA7 2 &pcfg_pull_none>,
+ /* cif_d4 */
+ <3 RK_PB0 2 &pcfg_pull_none>,
+ /* cif_d5m1 */
+ <2 RK_PC0 4 &pcfg_pull_none>,
+ /* cif_d6m1 */
+ <2 RK_PC1 4 &pcfg_pull_none>,
+ /* cif_d7m1 */
+ <2 RK_PC2 4 &pcfg_pull_none>,
+ /* cif_href */
+ <3 RK_PA1 2 &pcfg_pull_none>,
+ /* cif_vsync */
+ <3 RK_PA0 2 &pcfg_pull_none>,
+ /* cif_clkoutm1 */
+ <2 RK_PB7 4 &pcfg_pull_none>,
+ /* cif_clkin */
+ <3 RK_PA2 2 &pcfg_pull_none>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
index 8cdb3bff9c55..ff48edd8e348 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -53,7 +53,7 @@
};
memory@0 {
- reg = <0x0 0x0 0x0 0x80000000>;
+ reg = <0x0 0x0 0x0 0x40000000>;
device_type = "memory";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a635adc47e74..6d5dc0587e59 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -108,23 +108,10 @@
};
};
- idle-states {
- entry-method = "psci";
-
- cpu_sleep: cpu-sleep-0 {
- compatible = "arm,idle-state";
- arm,psci-suspend-param = <0x1010000>;
- entry-latency-us = <0x3fffffff>;
- exit-latency-us = <0x40000000>;
- min-residency-us = <0xffffffff>;
- };
- };
-
cpu_l0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x0>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -134,7 +121,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x1>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
@@ -142,7 +128,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x2>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
@@ -150,7 +135,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x3>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
@@ -158,7 +142,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x100>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
#cooling-cells = <2>; /* min followed by max */
@@ -168,7 +151,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x101>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
@@ -176,7 +158,6 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x102>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
@@ -184,11 +165,39 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x103>;
- cpu-idle-states = <&cpu_sleep>;
enable-method = "psci";
};
};
+ amba {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ dmac_peri: dma-controller@ff250000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff250000 0x0 0x4000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ clocks = <&cru ACLK_DMAC_PERI>;
+ clock-names = "apb_pclk";
+ };
+
+ dmac_bus: dma-controller@ff600000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xff600000 0x0 0x4000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ arm,pl330-broken-no-flushp;
+ clocks = <&cru ACLK_DMAC_BUS>;
+ clock-names = "apb_pclk";
+ };
+ };
+
arm-pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
@@ -237,6 +246,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_MMC0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -249,6 +260,8 @@
clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_SDIO0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -261,6 +274,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_EMMC>;
+ reset-names = "reset";
status = "disabled";
};
@@ -631,6 +646,7 @@
clocks = <&cru PCLK_MAILBOX>;
clock-names = "pclk_mailbox";
#mbox-cells = <1>;
+ status = "disabled";
};
pmugrf: syscon@ff738000 {
@@ -684,6 +700,30 @@
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
};
+ i2s_2ch: i2s-2ch@ff890000 {
+ compatible = "rockchip,rk3368-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xff890000 0x0 0x1000>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S_2CH>, <&cru HCLK_I2S_2CH>;
+ dmas = <&dmac_bus 6>, <&dmac_bus 7>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2s_8ch: i2s-8ch@ff898000 {
+ compatible = "rockchip,rk3368-i2s", "rockchip,rk3066-i2s";
+ reg = <0x0 0xff898000 0x0 0x1000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S_8CH>, <&cru HCLK_I2S_8CH>;
+ dmas = <&dmac_bus 0>, <&dmac_bus 1>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s_8ch_bus>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@ffb71000 {
compatible = "arm,gic-400";
interrupt-controller;
@@ -886,6 +926,20 @@
};
};
+ i2s {
+ i2s_8ch_bus: i2s-8ch-bus {
+ rockchip,pins = <2 12 RK_FUNC_1 &pcfg_pull_none>,
+ <2 13 RK_FUNC_1 &pcfg_pull_none>,
+ <2 14 RK_FUNC_1 &pcfg_pull_none>,
+ <2 15 RK_FUNC_1 &pcfg_pull_none>,
+ <2 16 RK_FUNC_1 &pcfg_pull_none>,
+ <2 17 RK_FUNC_1 &pcfg_pull_none>,
+ <2 18 RK_FUNC_1 &pcfg_pull_none>,
+ <2 19 RK_FUNC_1 &pcfg_pull_none>,
+ <2 20 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
pwm0 {
pwm0_pin: pwm0-pin {
rockchip,pins = <3 8 RK_FUNC_2 &pcfg_pull_none>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
new file mode 100644
index 000000000000..658bb9dc9dfd
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -0,0 +1,306 @@
+/*
+ * Google Gru-Kevin Rev 6+ board device tree source
+ *
+ * Copyright 2016-2017 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3399-gru.dtsi"
+#include <include/dt-bindings/input/linux-event-codes.h>
+
+/*
+ * Kevin-specific things
+ *
+ * Things in this section should use names from Kevin schematic since no
+ * equivalent exists in Gru schematic. If referring to signals that exist
+ * in Gru we use the Gru names, though. Confusing enough for you?
+ */
+/ {
+ model = "Google Kevin";
+ compatible = "google,kevin-rev15", "google,kevin-rev14",
+ "google,kevin-rev13", "google,kevin-rev12",
+ "google,kevin-rev11", "google,kevin-rev10",
+ "google,kevin-rev9", "google,kevin-rev8",
+ "google,kevin-rev7", "google,kevin-rev6",
+ "google,kevin", "google,gru", "rockchip,rk3399";
+
+ /* Power tree */
+
+ p3_3v_dig: p3-3v-dig {
+ compatible = "regulator-fixed";
+ regulator-name = "p3.3v_dig";
+ pinctrl-names = "default";
+ pinctrl-0 = <&cpu3_pen_pwr_en>;
+
+ enable-active-high;
+ gpio = <&gpio4 30 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&pp3300>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&cros_ec_pwm 1>;
+ brightness-levels = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29 30
+ 31 32 33 34 35 36 37 38 39 40 41 42 43 44
+ 45 46 47 48 49 50 51 52 53 54 55 56 57 58
+ 59 60 61 62 63 64 65 66 67 68 69 70 71 72
+ 73 74 75 76 77 78 79 80 81 82 83 84 85 86
+ 87 88 89 90 91 92 93 94 95 96 97 98 99 100>;
+ default-brightness-level = <51>;
+ enable-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
+ power-supply = <&pp3300_disp>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bl_en>;
+ pwm-delay-us = <10000>;
+ };
+
+ thermistor_ppvar_bigcpu: thermistor-ppvar-bigcpu {
+ compatible = "murata,ncp15wb473";
+ pullup-uv = <1800000>;
+ pullup-ohm = <25500>;
+ pulldown-ohm = <0>;
+ io-channels = <&saradc 2>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermistor_ppvar_litcpu: thermistor-ppvar-litcpu {
+ compatible = "murata,ncp15wb473";
+ pullup-uv = <1800000>;
+ pullup-ohm = <25500>;
+ pulldown-ohm = <0>;
+ io-channels = <&saradc 3>;
+ #thermal-sensor-cells = <0>;
+ };
+};
+
+&gpio_keys {
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l>, <&cpu1_pen_eject>;
+
+ pen-insert {
+ label = "Pen Insert";
+ /* Insert = low, eject = high */
+ gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ linux,code = <SW_PEN_INSERTED>;
+ linux,input-type = <EV_SW>;
+ wakeup-source;
+ };
+};
+
+&thermal_zones {
+ bigcpu_reg_thermal: bigcpu-reg-thermal {
+ polling-delay-passive = <100>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+ thermal-sensors = <&thermistor_ppvar_bigcpu 0>;
+ sustainable-power = <4000>;
+
+ ppvar_bigcpu_trips: trips {
+ ppvar_bigcpu_on: ppvar-bigcpu-on {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ ppvar_bigcpu_alert: ppvar-bigcpu-alert {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ ppvar_bigcpu_crit: ppvar-bigcpu-crit {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&ppvar_bigcpu_alert>;
+ cooling-device =
+ <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ contribution = <4096>;
+ };
+ map1 {
+ trip = <&ppvar_bigcpu_alert>;
+ cooling-device =
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ contribution = <1024>;
+ };
+ };
+ };
+
+ litcpu_reg_thermal: litcpu-reg-thermal {
+ polling-delay-passive = <100>; /* milliseconds */
+ polling-delay = <1000>; /* milliseconds */
+ thermal-sensors = <&thermistor_ppvar_litcpu 0>;
+ sustainable-power = <4000>;
+
+ ppvar_litcpu_trips: trips {
+ ppvar_litcpu_on: ppvar-litcpu-on {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ ppvar_litcpu_alert: ppvar-litcpu-alert {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2000>; /* millicelsius */
+ type = "passive";
+ };
+
+ ppvar_litcpu_crit: ppvar-litcpu-crit {
+ temperature = <90000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+ };
+};
+
+ap_i2c_tpm: &i2c0 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times. */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+
+ tpm: tpm@20 {
+ compatible = "infineon,slb9645tt";
+ reg = <0x20>;
+ powered-while-suspended;
+ };
+};
+
+ap_i2c_dig: &i2c2 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times. */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+
+ digitizer: digitizer@9 {
+ /* wacom,w9013 */
+ compatible = "hid-over-i2c";
+ reg = <0x9>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cpu1_dig_irq_l &cpu1_dig_pdct_l>;
+
+ vdd-supply = <&p3_3v_dig>;
+ post-power-on-delay-ms = <100>;
+
+ interrupt-parent = <&gpio2>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+
+ hid-descr-addr = <0x1>;
+ };
+};
+
+/* Adjustments to things in the gru baseboard */
+
+&ap_i2c_tp {
+ trackpad@4a {
+ compatible = "atmel,atmel_mxt_tp";
+ reg = <0x4a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_int_l>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+};
+
+&ap_i2c_ts {
+ touchscreen@4b {
+ compatible = "atmel,atmel_mxt_ts";
+ reg = <0x4b>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_int_l>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&pp1800_ap_io>;
+};
+
+&mvl_wifi {
+ marvell,wakeup-pin = <14>; /* GPIO_14 on Marvell */
+};
+
+&pinctrl {
+ digitizer {
+ /* Has external pullup */
+ cpu1_dig_irq_l: cpu1-dig-irq-l {
+ rockchip,pins = <2 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ /* Has external pullup */
+ cpu1_dig_pdct_l: cpu1-dig-pdct-l {
+ rockchip,pins = <2 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ discrete-regulators {
+ cpu3_pen_pwr_en: cpu3-pen-pwr-en {
+ rockchip,pins = <4 30 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pen {
+ cpu1_pen_eject: cpu1-pen-eject {
+ rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ wifi {
+ wlan_host_wake_l: wlan-host-wake-l {
+ rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
new file mode 100644
index 000000000000..0d960b7f7625
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -0,0 +1,1103 @@
+/*
+ * Google Gru (and derivatives) board device tree source
+ *
+ * Copyright 2016-2017 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ /*
+ * Power Tree
+ *
+ * In general an attempt is made to include all rails called out by
+ * the schematic as long as those rails interact in some way with
+ * the AP. AKA:
+ * - Rails that only connect to the EC (or devices that the EC talks to)
+ * are not included.
+ * - Rails _are_ included if the rails go to the AP even if the AP
+ * doesn't currently care about them / they are always on. The idea
+ * here is that it makes it easier to map to the schematic or extend
+ * later.
+ *
+ * If two rails are substantially the same from the AP's point of
+ * view, though, we won't create a full fixed regulator. We'll just
+ * put the child rail as an alias of the parent rail. Sometimes rails
+ * look the same to the AP because one of these is true:
+ * - The EC controls the enable and the EC always enables a rail as
+ * long as the AP is running.
+ * - The rails are actually connected to each other by a jumper and
+ * the distinction is just there to add clarity/flexibility to the
+ * schematic.
+ */
+
+ ppvar_sys: ppvar-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvar_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pp900_ap: pp900-ap {
+ compatible = "regulator-fixed";
+ regulator-name = "pp900_ap";
+
+ /* EC turns on w/ pp900_ap_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp1200_lpddr: pp1200-lpddr {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1200_lpddr";
+
+ /* EC turns on w/ lpddr_pwr_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp1800: pp1800 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800";
+
+ /* Always on when ppvar_sys shows power good */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp3000: pp3000 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp3000_en>;
+
+ enable-active-high;
+ gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp3300: pp3300 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300";
+
+ /* Always on; plain and simple */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ pp5000: pp5000 {
+ compatible = "regulator-fixed";
+ regulator-name = "pp5000";
+
+ /* EC turns on w/ pp5000_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ ppvar_bigcpu: ppvar-bigcpu {
+ compatible = "pwm-regulator";
+ regulator-name = "ppvar_bigcpu";
+ /*
+ * OVP circuit requires special handling which is not yet
+ * represented. Keep disabled for now.
+ */
+ status = "disabled";
+
+ pwms = <&pwm1 0 3337 0>;
+ pwm-supply = <&ppvar_sys>;
+ pwm-dutycycle-range = <100 0>;
+ pwm-dutycycle-unit = <100>;
+
+ /* EC turns on w/ ap_core_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <798674>;
+ regulator-max-microvolt = <1302172>;
+ };
+
+ ppvar_litcpu: ppvar-litcpu {
+ compatible = "pwm-regulator";
+ regulator-name = "ppvar_litcpu";
+ /*
+ * OVP circuit requires special handling which is not yet
+ * represented. Keep disabled for now.
+ */
+ status = "disabled";
+
+ pwms = <&pwm2 0 3337 0>;
+ pwm-supply = <&ppvar_sys>;
+ pwm-dutycycle-range = <100 0>;
+ pwm-dutycycle-unit = <100>;
+
+ /* EC turns on w/ ap_core_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <799065>;
+ regulator-max-microvolt = <1303738>;
+ };
+
+ ppvar_gpu: ppvar-gpu {
+ compatible = "pwm-regulator";
+ regulator-name = "ppvar_gpu";
+ /*
+ * OVP circuit requires special handling which is not yet
+ * represented. Keep disabled for now.
+ */
+ status = "disabled";
+
+ pwms = <&pwm0 0 3337 0>;
+ pwm-supply = <&ppvar_sys>;
+ pwm-dutycycle-range = <100 0>;
+ pwm-dutycycle-unit = <100>;
+
+ /* EC turns on w/ ap_core_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <785782>;
+ regulator-max-microvolt = <1217729>;
+ };
+
+ ppvar_centerlogic: ppvar-centerlogic {
+ compatible = "pwm-regulator";
+ regulator-name = "ppvar_centerlogic";
+ /*
+ * OVP circuit requires special handling which is not yet
+ * represented. Keep disabled for now.
+ */
+ status = "disabled";
+
+ pwms = <&pwm3 0 3337 0>;
+ pwm-supply = <&ppvar_sys>;
+ pwm-dutycycle-range = <100 0>;
+ pwm-dutycycle-unit = <100>;
+
+ /* EC turns on w/ ppvar_centerlogic_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800069>;
+ regulator-max-microvolt = <1049692>;
+ };
+
+ /* Schematics call this PPVAR even though it's fixed */
+ ppvar_logic: ppvar-logic {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvar_logic";
+
+ /* EC turns on w/ ppvar_logic_en; always on for AP */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ vin-supply = <&ppvar_sys>;
+ };
+
+ /* EC turns on w/ pp900_ddrpll_en */
+ pp900_ddrpll: pp900-ap {
+ };
+
+ /* EC turns on w/ pp900_pcie_en */
+ pp900_pcie: pp900-ap {
+ };
+
+ /* EC turns on w/ pp900_pll_en */
+ pp900_pll: pp900-ap {
+ };
+
+ /* EC turns on w/ pp900_pmu_en */
+ pp900_pmu: pp900-ap {
+ };
+
+ /* EC turns on w/ pp900_usb_en */
+ pp900_usb: pp900-ap {
+ };
+
+ /* EC turns on w/ pp1800_s0_en_l */
+ pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 {
+ };
+
+ /* EC turns on w/ pp1800_avdd_en_l */
+ pp1800_avdd: pp1800 {
+ };
+
+ /* EC turns on w/ pp1800_lid_en_l */
+ pp1800_lid: pp1800_mic: pp1800 {
+ };
+
+ /* EC turns on w/ lpddr_pwr_en */
+ pp1800_lpddr: pp1800 {
+ };
+
+ /* EC turns on w/ pp1800_pmu_en_l */
+ pp1800_pmu: pp1800 {
+ };
+
+ /* EC turns on w/ pp1800_usb_en_l */
+ pp1800_usb: pp1800 {
+ };
+
+ pp1500_ap_io: pp1500-ap-io {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1500_ap_io";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1500_en>;
+
+ enable-active-high;
+ gpio = <&gpio0 10 GPIO_ACTIVE_HIGH>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+
+ vin-supply = <&pp1800>;
+ };
+
+ pp1800_audio: pp1800-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1800_audio_en>;
+
+ enable-active-high;
+ gpio = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&pp1800>;
+ };
+
+ /* gpio is shared with pp3300_wifi_bt */
+ pp1800_pcie: pp1800-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_pcie";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_module_pd_l>;
+
+ enable-active-high;
+ gpio = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+
+ /*
+ * Need to wait 1ms + ramp-up time before we can power on WiFi.
+ * This has been approximated as 8ms total.
+ */
+ regulator-enable-ramp-delay = <8000>;
+
+ vin-supply = <&pp1800>;
+ };
+
+ /*
+ * This is a bit of a hack. The WiFi module should be reset at least
+ * 1ms after its regulators have ramped up (max rampup time is ~7ms).
+ * With some stretching of the imagination, we can call the 1.8V
+ * regulator a supply.
+ */
+ wlan_pd_n: wlan-pd-n {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan_pd_n";
+
+ /* Note the wlan_module_reset_l pinctrl */
+ enable-active-high;
+ gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
+
+ vin-supply = <&pp1800_pcie>;
+ };
+
+ /* Always on; plain and simple */
+ pp3000_ap: pp3000_emmc: pp3000 {
+ };
+
+ pp3000_sd_slot: pp3000-sd-slot {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3000_sd_slot";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_slot_pwr_en>;
+
+ enable-active-high;
+ gpio = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+
+ vin-supply = <&pp3000>;
+ };
+
+ /*
+ * Technically, this is a small abuse of 'regulator-gpio'; this
+ * regulator is a mux between pp1800 and pp3300. pp1800 and pp3300 are
+ * always on though, so it is sufficient to simply control the mux
+ * here.
+ */
+ ppvar_sd_card_io: ppvar-sd-card-io {
+ compatible = "regulator-gpio";
+ regulator-name = "ppvar_sd_card_io";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_io_pwr_en &sd_pwr_1800_sel>;
+
+ enable-active-high;
+ enable-gpio = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x1
+ 3000000 0x0>;
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ /* EC turns on w/ pp3300_trackpad_en_l */
+ pp3300_trackpad: pp3300-trackpad {
+ };
+
+ /* EC turns on w/ pp3300_usb_en_l */
+ pp3300_usb: pp3300 {
+ };
+
+ pp3300_disp: pp3300-disp {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_disp";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp3300_disp_en>;
+
+ enable-active-high;
+ gpio = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+
+ startup-delay-us = <2000>;
+ vin-supply = <&pp3300>;
+ };
+
+ /* gpio is shared with pp1800_pcie and pinctrl is set there */
+ pp3300_wifi_bt: pp3300-wifi-bt {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_wifi_bt";
+
+ enable-active-high;
+ gpio = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+
+ vin-supply = <&pp3300>;
+ };
+
+ /* EC turns on w/ usb_a_en */
+ pp5000_usb_a_vbus: pp5000 {
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l>;
+
+ wake-on-bt {
+ label = "Wake-on-Bluetooth";
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ wakeup-source;
+ };
+ };
+
+ max98357a: max98357a {
+ compatible = "maxim,max98357a";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmode_en>;
+ sdmode-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ sdmode-delay = <2>;
+ #sound-dai-cells = <0>;
+ status = "okay";
+ };
+
+ sound {
+ compatible = "rockchip,rk3399-gru-sound";
+ rockchip,cpu = <&i2s0 &i2s2>;
+ rockchip,codec = <&max98357a &headsetcodec &codec>;
+ };
+};
+
+/*
+ * Set some suspend operating points to avoid OVP in suspend
+ *
+ * When we go into S3 ARM Trusted Firmware will transition our PWM regulators
+ * from wherever they're at back to the "default" operating point (whatever
+ * voltage we get when we set the PWM pins to "input").
+ *
+ * This quick transition under light load has the possibility to trigger the
+ * regulator "over voltage protection" (OVP).
+ *
+ * To make extra certain that we don't hit this OVP at suspend time, we'll
+ * transition to a voltage that's much closer to the default (~1.0 V) so that
+ * there will not be a big jump. Technically we only need to get within 200 mV
+ * of the default voltage, but the speed here should be fast enough and we need
+ * suspend/resume to be rock solid.
+ */
+
+&cluster0_opp {
+ opp05 {
+ opp-suspend;
+ };
+};
+
+&cluster1_opp {
+ opp06 {
+ opp-suspend;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&ppvar_litcpu>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&ppvar_litcpu>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&ppvar_litcpu>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&ppvar_litcpu>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&ppvar_bigcpu>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&ppvar_bigcpu>;
+};
+
+
+&cru {
+ assigned-clocks =
+ <&cru PLL_GPLL>, <&cru PLL_CPLL>,
+ <&cru PLL_NPLL>,
+ <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>,
+ <&cru PCLK_PERIHP>,
+ <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>,
+ <&cru PCLK_PERILP0>, <&cru ACLK_CCI>,
+ <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>;
+ assigned-clock-rates =
+ <600000000>, <800000000>,
+ <1000000000>,
+ <150000000>, <75000000>,
+ <37500000>,
+ <100000000>, <100000000>,
+ <50000000>, <800000000>,
+ <100000000>, <50000000>;
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+ap_i2c_mic: &i2c1 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+
+ headsetcodec: rt5514@57 {
+ compatible = "realtek,rt5514";
+ reg = <0x57>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mic_int>;
+ realtek,dmic-init-delay = <20>;
+ wakeup-source;
+ };
+};
+
+ap_i2c_ts: &i2c3 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+};
+
+ap_i2c_tp: &i2c5 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+
+ /*
+ * Note strange pullup enable. Apparently this avoids leakage but
+ * still allows us to get nice 4.7K pullups for high speed i2c
+ * transfers. Basically we want the pullup on whenever the ap is
+ * alive, so the "en" pin just gets set to output high.
+ */
+ pinctrl-0 = <&i2c5_xfer &ap_i2c_tp_pu_en>;
+};
+
+ap_i2c_audio: &i2c8 {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ /* These are relatively safe rise/fall times */
+ i2c-scl-falling-time-ns = <50>;
+ i2c-scl-rising-time-ns = <300>;
+
+ codec: da7219@1a {
+ compatible = "dlg,da7219";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&cru SCLK_I2S_8CH_OUT>;
+ clock-names = "mclk";
+ dlg,micbias-lvl = <2600>;
+ dlg,mic-amp-in-sel = "diff";
+ pinctrl-names = "default";
+ pinctrl-0 = <&headset_int_l>;
+ VDD-supply = <&pp1800>;
+ VDDMIC-supply = <&pp3300>;
+ VDDIO-supply = <&pp1800>;
+
+ da7219_aad {
+ dlg,adc-1bit-rpt = <1>;
+ dlg,btn-avg = <4>;
+ dlg,btn-cfg = <50>;
+ dlg,mic-det-thr = <500>;
+ dlg,jack-ins-deb = <20>;
+ dlg,jack-det-rate = "32ms_64ms";
+ dlg,jack-rem-deb = <1>;
+
+ dlg,a-d-btn-thr = <0xa>;
+ dlg,d-b-btn-thr = <0x16>;
+ dlg,b-c-btn-thr = <0x21>;
+ dlg,c-mic-btn-thr = <0x3E>;
+ };
+ };
+};
+
+&i2s0 {
+ status = "okay";
+};
+
+&i2s2 {
+ status = "okay";
+};
+
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&pp1800_audio>; /* APIO5_VDD; 3d 4a */
+ bt656-supply = <&pp1800_ap_io>; /* APIO2_VDD; 2a 2b */
+ gpio1830-supply = <&pp3000_ap>; /* APIO4_VDD; 4c 4d */
+ sdmmc-supply = <&ppvar_sd_card_io>; /* SDMMC0_VDD; 4b */
+};
+
+&pcie0 {
+ status = "okay";
+
+ ep-gpios = <&gpio2 27 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_clkreqn_cpm>, <&wifi_perst_l>;
+ vpcie3v3-supply = <&pp3300_wifi_bt>;
+ vpcie1v8-supply = <&wlan_pd_n>; /* HACK: see &wlan_pd_n */
+ vpcie0v9-supply = <&pp900_pcie>;
+
+ pci_rootport: pcie@0,0 {
+ reg = <0x83000000 0x0 0x00000000 0x0 0x00000000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ mvl_wifi: wifi@0,0 {
+ compatible = "pci1b4b,2b42";
+ reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
+ 0x83010000 0x0 0x00100000 0x0 0x00100000>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_host_wake_l>;
+ wakeup-source;
+ };
+ };
+};
+
+&pcie_phy {
+ status = "okay";
+};
+
+&pmu_io_domains {
+ status = "okay";
+
+ pmu1830-supply = <&pp1800_pmu>; /* PMUIO2_VDD */
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&pwm3 {
+ status = "okay";
+};
+
+&sdhci {
+ /*
+ * Signal integrity isn't great at 200 MHz and 150 MHz (DDR) gives the
+ * same (or nearly the same) performance for all eMMC that are intended
+ * to be used.
+ */
+ assigned-clock-rates = <150000000>;
+
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ non-removable;
+ status = "okay";
+};
+
+&sdmmc {
+ status = "okay";
+
+ /*
+ * Note: configure "sdmmc_cd" as card detect even though it's actually
+ * hooked to ground. Because we specified "cd-gpios" below dw_mmc
+ * should be ignoring card detect anyway. Specifying the pin as
+ * sdmmc_cd means that even if you've got GRF_SOC_CON7[12] (force_jtag)
+ * turned on that the system will still make sure the port is
+ * configured as SDMMC and not JTAG.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_cd_gpio
+ &sdmmc_bus4>;
+
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&pp3000_sd_slot>;
+ vqmmc-supply = <&ppvar_sd_card_io>;
+};
+
+&spi1 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-1 = <&spi1_sleep>;
+
+ spiflash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+
+ /* May run faster once verified. */
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&spi2 {
+ status = "okay";
+
+ wacky_spi_audio: spi2@0 {
+ compatible = "realtek,rt5514";
+ reg = <0>;
+
+ /* May run faster once verified. */
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&spi5 {
+ status = "okay";
+
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ec_ap_int_l>;
+ spi-max-frequency = <3000000>;
+
+ i2c_tunnel: i2c-tunnel {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cros_ec_pwm: ec-pwm {
+ compatible = "google,cros-ec-pwm";
+ #pwm-cells = <1>;
+ };
+ };
+};
+
+&tsadc {
+ status = "okay";
+
+ rockchip,hw-tshut-mode = <1>; /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-polarity = <1>; /* tshut polarity 0:LOW 1:HIGH */
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy0_host {
+ status = "okay";
+};
+
+&u2phy1_host {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+#include <arm/cros-ec-keyboard.dtsi>
+#include <arm/cros-ec-sbs.dtsi>
+
+&pinctrl {
+ /*
+ * pinctrl settings for pins that have no real owners.
+ *
+ * At the moment settings are identical for S0 and S3, but if we later
+ * need to configure things differently for S3 we'll adjust here.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &ap_pwroff /* AP will auto-assert this when in S3 */
+ &clk_32k /* This pin is always 32k on gru boards */
+
+ /*
+ * We want this driven low ASAP; firmware should help us, but
+ * we can help ourselves too.
+ */
+ &wlan_module_reset_l
+ >;
+
+ pcfg_output_low: pcfg-output-low {
+ output-low;
+ };
+
+ pcfg_output_high: pcfg-output-high {
+ output-high;
+ };
+
+ pcfg_pull_none_8ma: pcfg-pull-none-8ma {
+ bias-disable;
+ drive-strength = <8>;
+ };
+
+ backlight-enable {
+ bl_en: bl-en {
+ rockchip,pins = <1 17 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ cros-ec {
+ ec_ap_int_l: ec-ap-int-l {
+ rockchip,pins = <RK_GPIO0 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ discrete-regulators {
+ pp1500_en: pp1500-en {
+ rockchip,pins = <RK_GPIO0 10 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ pp1800_audio_en: pp1800-audio-en {
+ rockchip,pins = <RK_GPIO0 2 RK_FUNC_GPIO
+ &pcfg_pull_down>;
+ };
+
+ pp3300_disp_en: pp3300-disp-en {
+ rockchip,pins = <RK_GPIO4 27 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ pp3000_en: pp3000-en {
+ rockchip,pins = <RK_GPIO0 12 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ sd_io_pwr_en: sd-io-pwr-en {
+ rockchip,pins = <RK_GPIO2 2 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ sd_pwr_1800_sel: sd-pwr-1800-sel {
+ rockchip,pins = <RK_GPIO2 28 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ sd_slot_pwr_en: sd-slot-pwr-en {
+ rockchip,pins = <RK_GPIO4 29 RK_FUNC_GPIO
+ &pcfg_pull_none>;
+ };
+
+ wlan_module_pd_l: wlan-module-pd-l {
+ rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO
+ &pcfg_pull_down>;
+ };
+ };
+
+ codec {
+ /* Has external pullup */
+ headset_int_l: headset-int-l {
+ rockchip,pins = <1 23 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ mic_int: mic-int {
+ rockchip,pins = <1 13 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ max98357a {
+ sdmode_en: sdmode-en {
+ rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ pcie {
+ pcie_clkreqn_cpm: pci-clkreqn-cpm {
+ /*
+ * Since our pcie doesn't support ClockPM(CPM), we want
+ * to hack this as gpio, so the EP could be able to
+ * de-assert it along and make ClockPM(CPM) work.
+ */
+ rockchip,pins = <2 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ /*
+ * We run sdmmc at max speed; bump up drive strength.
+ * We also have external pulls, so disable the internal ones.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins =
+ <4 8 RK_FUNC_1 &pcfg_pull_none_8ma>,
+ <4 9 RK_FUNC_1 &pcfg_pull_none_8ma>,
+ <4 10 RK_FUNC_1 &pcfg_pull_none_8ma>,
+ <4 11 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins =
+ <4 12 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins =
+ <4 13 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ };
+
+ /*
+ * In our case the official card detect is hooked to ground
+ * to avoid getting access to JTAG just by sticking something
+ * in the SD card slot (see the force_jtag bit in the TRM).
+ *
+ * We still configure it as card detect because it doesn't
+ * hurt and dw_mmc will ignore it. We make sure to disable
+ * the pull though so we don't burn needless power.
+ */
+ sdmmc_cd: sdmcc-cd {
+ rockchip,pins =
+ <0 7 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ /* This is where we actually hook up CD; has external pull */
+ sdmmc_cd_gpio: sdmmc-cd-gpio {
+ rockchip,pins = <4 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ spi1 {
+ spi1_sleep: spi1-sleep {
+ /*
+ * Pull down SPI1 CLK/CS/RX/TX during suspend, to
+ * prevent leakage.
+ */
+ rockchip,pins = <1 9 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 10 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 7 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 8 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ touchscreen {
+ touch_int_l: touch-int-l {
+ rockchip,pins = <3 13 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ touch_reset_l: touch-reset-l {
+ rockchip,pins = <4 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ trackpad {
+ ap_i2c_tp_pu_en: ap-i2c-tp-pu-en {
+ rockchip,pins = <3 12 RK_FUNC_GPIO &pcfg_output_high>;
+ };
+
+ trackpad_int_l: trackpad-int-l {
+ rockchip,pins = <1 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ wifi {
+ wifi_perst_l: wifi-perst-l {
+ rockchip,pins = <2 27 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wlan_module_reset_l: wlan-module-reset-l {
+ /*
+ * We want this driven low ASAP (As {Soon,Strongly} As
+ * Possible), to avoid leakage through the powered-down
+ * WiFi.
+ */
+ rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ /* Kevin has an external pull up, but Gru does not */
+ rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ write-protect {
+ ap_fw_wp: ap-fw-wp {
+ rockchip,pins = <1 18 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
new file mode 100644
index 000000000000..dd82e16236a8
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016-2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ cluster0_opp: opp-table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <40000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <800000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <800000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <875000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <925000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1050000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-microvolt = <1125000>;
+ };
+ };
+
+ cluster1_opp: opp-table1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <408000000>;
+ opp-microvolt = <800000>;
+ clock-latency-ns = <40000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <800000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <816000000>;
+ opp-microvolt = <825000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <1008000000>;
+ opp-microvolt = <875000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <950000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1416000000>;
+ opp-microvolt = <1025000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1608000000>;
+ opp-microvolt = <1075000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <2016000000>;
+ opp-microvolt = <1250000>;
+ };
+ };
+};
+
+&cpu_l0 {
+ operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l1 {
+ operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l2 {
+ operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_l3 {
+ operating-points-v2 = <&cluster0_opp>;
+};
+
+&cpu_b0 {
+ operating-points-v2 = <&cluster1_opp>;
+};
+
+&cpu_b1 {
+ operating-points-v2 = <&cluster1_opp>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 8e6d1bdeb9c3..f4f3c96c798d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -211,6 +211,51 @@
};
};
+ pcie0: pcie@f8000000 {
+ compatible = "rockchip,rk3399-pcie";
+ reg = <0x0 0xf8000000 0x0 0x2000000>,
+ <0x0 0xfd000000 0x0 0x1000000>;
+ reg-names = "axi-base", "apb-base";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ aspm-no-l0s;
+ bus-range = <0x0 0x1>;
+ clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
+ <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
+ clock-names = "aclk", "aclk-perf",
+ "hclk", "pm";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "sys", "legacy", "client";
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie0_intc 0>,
+ <0 0 0 2 &pcie0_intc 1>,
+ <0 0 0 3 &pcie0_intc 2>,
+ <0 0 0 4 &pcie0_intc 3>;
+ linux,pci-domain = <0>;
+ max-link-speed = <1>;
+ msi-map = <0x0 &its 0x0 0x1000>;
+ phys = <&pcie_phy>;
+ phy-names = "pcie-phy";
+ ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
+ 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
+ resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
+ <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
+ <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
+ <&cru SRST_A_PCIE>;
+ reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
+ "pm", "pclk", "aclk";
+ status = "disabled";
+
+ pcie0_intc: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
+
gmac: ethernet@fe300000 {
compatible = "rockchip,rk3399-gmac";
reg = <0x0 0xfe300000 0x0 0x10000>;
@@ -241,6 +286,8 @@
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
+ resets = <&cru SRST_SDIO0>;
+ reset-names = "reset";
status = "disabled";
};
@@ -255,6 +302,8 @@
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
fifo-depth = <0x100>;
power-domains = <&power RK3399_PD_SD>;
+ resets = <&cru SRST_SDMMC>;
+ reset-names = "reset";
status = "disabled";
};
@@ -275,50 +324,6 @@
status = "disabled";
};
- pcie0: pcie@f8000000 {
- compatible = "rockchip,rk3399-pcie";
- reg = <0x0 0xf8000000 0x0 0x2000000>,
- <0x0 0xfd000000 0x0 0x1000000>;
- reg-names = "axi-base", "apb-base";
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- aspm-no-l0s;
- bus-range = <0x0 0x1>;
- clocks = <&cru ACLK_PCIE>, <&cru ACLK_PERF_PCIE>,
- <&cru PCLK_PCIE>, <&cru SCLK_PCIE_PM>;
- clock-names = "aclk", "aclk-perf",
- "hclk", "pm";
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH 0>,
- <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "sys", "legacy", "client";
- interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &pcie0_intc 0>,
- <0 0 0 2 &pcie0_intc 1>,
- <0 0 0 3 &pcie0_intc 2>,
- <0 0 0 4 &pcie0_intc 3>;
- max-link-speed = <1>;
- msi-map = <0x0 &its 0x0 0x1000>;
- phys = <&pcie_phy>;
- phy-names = "pcie-phy";
- ranges = <0x83000000 0x0 0xfa000000 0x0 0xfa000000 0x0 0x600000
- 0x81000000 0x0 0xfa600000 0x0 0xfa600000 0x0 0x100000>;
- resets = <&cru SRST_PCIE_CORE>, <&cru SRST_PCIE_MGMT>,
- <&cru SRST_PCIE_MGMT_STICKY>, <&cru SRST_PCIE_PIPE>,
- <&cru SRST_PCIE_PM>, <&cru SRST_P_PCIE>,
- <&cru SRST_A_PCIE>;
- reset-names = "core", "mgmt", "mgmt-sticky", "pipe",
- "pm", "pclk", "aclk";
- status = "disabled";
-
- pcie0_intc: interrupt-controller {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
- };
-
usb_host0_ehci: usb@fe380000 {
compatible = "generic-ehci";
reg = <0x0 0xfe380000 0x0 0x20000>;
@@ -371,6 +376,60 @@
status = "disabled";
};
+ usbdrd3_0: usb@fe800000 {
+ compatible = "rockchip,rk3399-dwc3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&cru SCLK_USB3OTG0_REF>, <&cru SCLK_USB3OTG0_SUSPEND>,
+ <&cru ACLK_USB3OTG0>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "grf_clk";
+ status = "disabled";
+
+ usbdrd_dwc3_0: dwc3 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe800000 0x0 0x100000>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
+ dr_mode = "otg";
+ phys = <&u2phy0_otg>;
+ phy-names = "usb2-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ status = "disabled";
+ };
+ };
+
+ usbdrd3_1: usb@fe900000 {
+ compatible = "rockchip,rk3399-dwc3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&cru SCLK_USB3OTG1_REF>, <&cru SCLK_USB3OTG1_SUSPEND>,
+ <&cru ACLK_USB3OTG1>, <&cru ACLK_USB3_GRF>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk", "grf_clk";
+ status = "disabled";
+
+ usbdrd_dwc3_1: dwc3 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe900000 0x0 0x100000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
+ dr_mode = "otg";
+ phys = <&u2phy1_otg>;
+ phy-names = "usb2-phy";
+ phy_type = "utmi_wide";
+ snps,dis_enblslpm_quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis-del-phy-power-chg-quirk;
+ status = "disabled";
+ };
+ };
+
gic: interrupt-controller@fee00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <4>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
index 7168cf818ad8..0173e93ab141 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier LD11 Reference Board";
compatible = "socionext,uniphier-ld11-ref", "socionext,uniphier-ld11";
- memory {
- device_type = "memory";
- reg = <0 0x80000000 0 0x40000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -73,6 +68,11 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
};
&ethsc {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index da881f5b6ed4..151c043b4835 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -140,7 +140,7 @@
<1 10 4>;
};
- soc {
+ soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -304,6 +304,8 @@
compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_emmc>;
clocks = <&sys_clk 4>;
bus-width = <8>;
mmc-ddr-1_8v;
@@ -318,7 +320,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
- resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
+ resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
+ <&mio_rst 12>;
};
usb1: usb@5a810100 {
@@ -329,7 +332,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
- resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
+ resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
+ <&mio_rst 13>;
};
usb2: usb@5a820100 {
@@ -340,7 +344,8 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2>;
clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
- resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
+ resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
+ <&mio_rst 14>;
};
mioctrl@5b3e0000 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index 609162a1a322..fca4c479b469 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -52,11 +52,6 @@
model = "UniPhier LD20 Reference Board";
compatible = "socionext,uniphier-ld20-ref", "socionext,uniphier-ld20";
- memory {
- device_type = "memory";
- reg = <0 0x80000000 0 0xc0000000>;
- };
-
chosen {
stdout-path = "serial0:115200n8";
};
@@ -73,6 +68,11 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
};
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0xc0000000>;
+ };
};
&ethsc {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index a6b3a70dae83..6193f11acb78 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -209,7 +209,7 @@
<1 10 4>;
};
- soc {
+ soc@0 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -378,6 +378,8 @@
compatible = "socionext,uniphier-sd4hc", "cdns,sd4hc";
reg = <0x5a000000 0x400>;
interrupts = <0 78 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_emmc>;
clocks = <&sys_clk 4>;
bus-width = <8>;
mmc-ddr-1_8v;
diff --git a/arch/arm64/boot/dts/sprd/Makefile b/arch/arm64/boot/dts/sprd/Makefile
index b658c5e09b15..f0535e6eaaaa 100644
--- a/arch/arm64/boot/dts/sprd/Makefile
+++ b/arch/arm64/boot/dts/sprd/Makefile
@@ -1,4 +1,5 @@
-dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb
+dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb \
+ sp9860g-1h10.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi
new file mode 100644
index 000000000000..7b7d8cedacda
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi
@@ -0,0 +1,569 @@
+/*
+ * Spreadtrum SC9860 SoC
+ *
+ * Copyright (C) 2016, Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "whale2.dtsi"
+
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ core2 {
+ cpu = <&CPU2>;
+ };
+ core3 {
+ cpu = <&CPU3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU4>;
+ };
+ core1 {
+ cpu = <&CPU5>;
+ };
+ core2 {
+ cpu = <&CPU6>;
+ };
+ core3 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
+
+ CPU0: cpu@530000 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530000>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU1: cpu@530001 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530001>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU2: cpu@530002 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530002>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU3: cpu@530003 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530003>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU4: cpu@530100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530100>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU5: cpu@530101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530101>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU6: cpu@530102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530102>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+
+ CPU7: cpu@530103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x530103>;
+ enable-method = "psci";
+ cpu-idle-states = <&CORE_PD &CLUSTER_PD>;
+ };
+ };
+
+ idle-states{
+ entry-method = "arm,psci";
+
+ CORE_PD: core_pd {
+ compatible = "arm,idle-state";
+ entry-latency-us = <1000>;
+ exit-latency-us = <700>;
+ min-residency-us = <2500>;
+ local-timer-stop;
+ arm,psci-suspend-param = <0x00010002>;
+ };
+
+ CLUSTER_PD: cluster_pd {
+ compatible = "arm,idle-state";
+ entry-latency-us = <1000>;
+ exit-latency-us = <1000>;
+ min-residency-us = <3000>;
+ local-timer-stop;
+ arm,psci-suspend-param = <0x01010003>;
+ };
+ };
+
+ gic: interrupt-controller@12001000 {
+ compatible = "arm,gic-400";
+ reg = <0 0x12001000 0 0x1000>,
+ <0 0x12002000 0 0x2000>,
+ <0 0x12004000 0 0x2000>,
+ <0 0x12006000 0 0x2000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8)
+ | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&CPU0>,
+ <&CPU1>,
+ <&CPU2>,
+ <&CPU3>,
+ <&CPU4>,
+ <&CPU5>,
+ <&CPU6>,
+ <&CPU7>;
+ };
+
+ soc {
+ funnel@10001000 { /* SoC Funnel */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x10001000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ soc_funnel_out_port: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ soc_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&main_funnel_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <4>;
+ soc_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpioint =
+ <&stm_out_port>;
+ };
+ };
+ };
+ };
+
+ etb@10003000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x10003000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&soc_funnel_out_port>;
+ };
+ };
+ };
+
+ stm@10006000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0 0x10006000 0 0x1000>,
+ <0 0x01000000 0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+ port {
+ stm_out_port: endpoint {
+ remote-endpoint =
+ <&soc_funnel_in_port1>;
+ };
+ };
+ };
+
+ funnel@11001000 { /* Cluster0 Funnel */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x11001000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster0_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&cluster0_etf_in>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster0_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ cluster0_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ cluster0_funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ cluster0_funnel_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ };
+ };
+
+ funnel@11002000 { /* Cluster1 Funnel */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x11002000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster1_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&cluster1_etf_in>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster1_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm4_out>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ cluster1_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm5_out>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ cluster1_funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm6_out>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ cluster1_funnel_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm7_out>;
+ };
+ };
+ };
+ };
+
+ etf@11003000 { /* ETF on Cluster0 */
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x11003000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster0_etf_out: endpoint {
+ remote-endpoint =
+ <&main_funnel_in_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster0_etf_in: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&cluster0_funnel_out_port>;
+ };
+ };
+ };
+ };
+
+ etf@11004000 { /* ETF on Cluster1 */
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x11004000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster1_etf_out: endpoint {
+ remote-endpoint =
+ <&main_funnel_in_port1>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster1_etf_in: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&cluster1_funnel_out_port>;
+ };
+ };
+ };
+ };
+
+ funnel@11005000 { /* Main Funnel */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x11005000 0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ main_funnel_out_port: endpoint {
+ remote-endpoint =
+ <&soc_funnel_in_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ main_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&cluster0_etf_out>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ main_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&cluster1_etf_out>;
+ };
+ };
+ };
+ };
+
+ etm@11440000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11440000 0 0x1000>;
+ cpu = <&CPU0>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint =
+ <&cluster0_funnel_in_port0>;
+ };
+ };
+ };
+
+ etm@11540000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11540000 0 0x1000>;
+ cpu = <&CPU1>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint =
+ <&cluster0_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@11640000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11640000 0 0x1000>;
+ cpu = <&CPU2>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint =
+ <&cluster0_funnel_in_port2>;
+ };
+ };
+ };
+
+ etm@11740000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11740000 0 0x1000>;
+ cpu = <&CPU3>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint =
+ <&cluster0_funnel_in_port3>;
+ };
+ };
+ };
+
+ etm@11840000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11840000 0 0x1000>;
+ cpu = <&CPU4>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm4_out: endpoint {
+ remote-endpoint =
+ <&cluster1_funnel_in_port0>;
+ };
+ };
+ };
+
+ etm@11940000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11940000 0 0x1000>;
+ cpu = <&CPU5>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm5_out: endpoint {
+ remote-endpoint =
+ <&cluster1_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@11a40000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11a40000 0 0x1000>;
+ cpu = <&CPU6>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm6_out: endpoint {
+ remote-endpoint =
+ <&cluster1_funnel_in_port2>;
+ };
+ };
+ };
+
+ etm@11b40000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x11b40000 0 0x1000>;
+ cpu = <&CPU7>;
+ clocks = <&ext_26m>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm7_out: endpoint {
+ remote-endpoint =
+ <&cluster1_funnel_in_port3>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
new file mode 100644
index 000000000000..ae0b28ce6319
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts
@@ -0,0 +1,56 @@
+/*
+ * Spreadtrum SP9860g board
+ *
+ * Copyright (C) 2017, Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/dts-v1/;
+
+#include "sc9860.dtsi"
+
+/ {
+ model = "Spreadtrum SP9860G 3GFHD Board";
+
+ compatible = "sprd,sp9860g-1h10", "sprd,sc9860";
+
+ aliases {
+ serial0 = &uart0; /* for Bluetooth */
+ serial1 = &uart1; /* UART console */
+ serial2 = &uart2; /* Reserved */
+ serial3 = &uart3; /* for GPS */
+ };
+
+ memory{
+ device_type = "memory";
+ reg = <0x0 0x80000000 0 0x60000000>,
+ <0x1 0x80000000 0 0x60000000>;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi
new file mode 100644
index 000000000000..7c217c547f85
--- /dev/null
+++ b/arch/arm64/boot/dts/sprd/whale2.dtsi
@@ -0,0 +1,71 @@
+/*
+ * Spreadtrum Whale2 platform peripherals
+ *
+ * Copyright (C) 2016, Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ap-apb {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x70000000 0x10000000>;
+
+ uart0: serial@0 {
+ compatible = "sprd,sc9860-uart",
+ "sprd,sc9836-uart";
+ reg = <0x0 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_26m>;
+ status = "disabled";
+ };
+
+ uart1: serial@100000 {
+ compatible = "sprd,sc9860-uart",
+ "sprd,sc9836-uart";
+ reg = <0x100000 0x100>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_26m>;
+ status = "disabled";
+ };
+
+ uart2: serial@200000 {
+ compatible = "sprd,sc9860-uart",
+ "sprd,sc9836-uart";
+ reg = <0x200000 0x100>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_26m>;
+ status = "disabled";
+ };
+
+ uart3: serial@300000 {
+ compatible = "sprd,sc9860-uart",
+ "sprd,sc9836-uart";
+ reg = <0x300000 0x100>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_26m>;
+ status = "disabled";
+ };
+ };
+
+ };
+
+ ext_26m: ext-26m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ clock-output-names = "ext_26m";
+ };
+};
diff --git a/arch/arm64/boot/dts/zte/zx296718-evb.dts b/arch/arm64/boot/dts/zte/zx296718-evb.dts
index e164ff6de5fc..bb900d2bbcfb 100644
--- a/arch/arm64/boot/dts/zte/zx296718-evb.dts
+++ b/arch/arm64/boot/dts/zte/zx296718-evb.dts
@@ -57,6 +57,34 @@
reg = <0x40000000 0x40000000>;
};
+ sound0 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "zx_snd_spdif0";
+
+ simple-audio-card,cpu {
+ sound-dai = <&spdif0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi>;
+ };
+ };
+};
+
+&emmc {
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&sd1 {
+ status = "okay";
+};
+
+&spdif0 {
+ status = "okay";
};
&uart0 {
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
index b850b2cd0adc..316dc713268c 100644
--- a/arch/arm64/boot/dts/zte/zx296718.dtsi
+++ b/arch/arm64/boot/dts/zte/zx296718.dtsi
@@ -235,13 +235,6 @@
clock-output-names = "pll_mac";
};
- pll_vga: clk-pll-1073m {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1073000000>;
- clock-output-names = "pll_vga";
- };
-
pll_mm0: clk-pll-1188m {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -305,6 +298,51 @@
status = "disabled";
};
+ sd0: mmc@1110000 {
+ compatible = "zte,zx296718-dw-mshc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x01110000 0x1000>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ fifo-depth = <32>;
+ data-addr = <0x200>;
+ fifo-watermark-aligned;
+ bus-width = <4>;
+ clock-frequency = <50000000>;
+ clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
+ clock-names = "biu", "ciu";
+ num-slots = <1>;
+ max-frequency = <50000000>;
+ cap-sdio-irq;
+ cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ sd-uhs-ddr50;
+ status = "disabled";
+ };
+
+ sd1: mmc@1111000 {
+ compatible = "zte,zx296718-dw-mshc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x01111000 0x1000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ fifo-depth = <32>;
+ data-addr = <0x200>;
+ fifo-watermark-aligned;
+ bus-width = <4>;
+ clock-frequency = <167000000>;
+ clocks = <&topcrm SD1_AHB>, <&topcrm SD1_WCLK>;
+ clock-names = "biu", "ciu";
+ num-slots = <1>;
+ max-frequency = <167000000>;
+ cap-sdio-irq;
+ cap-sd-highspeed;
+ status = "disabled";
+ };
+
dma: dma-controller@1460000 {
compatible = "zte,zx296702-dma";
reg = <0x01460000 0x1000>;
@@ -328,6 +366,47 @@
#clock-cells = <1>;
};
+ vou: vou@1440000 {
+ compatible = "zte,zx296718-vou";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x1440000 0x10000>;
+
+ dpc: dpc@0 {
+ compatible = "zte,zx296718-dpc";
+ reg = <0x0000 0x1000>, <0x1000 0x1000>,
+ <0x5000 0x1000>, <0x6000 0x1000>,
+ <0xa000 0x1000>;
+ reg-names = "osd", "timing_ctrl",
+ "dtrc", "vou_ctrl",
+ "otfppu";
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&topcrm VOU_ACLK>, <&topcrm VOU_PPU_WCLK>,
+ <&topcrm VOU_MAIN_WCLK>, <&topcrm VOU_AUX_WCLK>;
+ clock-names = "aclk", "ppu_wclk",
+ "main_wclk", "aux_wclk";
+ };
+
+ hdmi: hdmi@c000 {
+ compatible = "zte,zx296718-hdmi";
+ reg = <0xc000 0x4000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&topcrm HDMI_OSC_CEC>,
+ <&topcrm HDMI_OSC_CLK>,
+ <&topcrm HDMI_XCLK>;
+ clock-names = "osc_cec", "osc_clk", "xclk";
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ tvenc: tvenc@2000 {
+ compatible = "zte,zx296718-tvenc";
+ reg = <0x2000 0x1000>;
+ zte,tvenc-power-control = <&sysctrl 0x170 0x10>;
+ status = "disabled";
+ };
+ };
+
topcrm: clock-controller@1461000 {
compatible = "zte,zx296718-topcrm";
reg = <0x01461000 0x1000>;
@@ -339,10 +418,43 @@
reg = <0x1463000 0x1000>;
};
+ emmc: mmc@1470000{
+ compatible = "zte,zx296718-dw-mshc";
+ reg = <0x01470000 0x1000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ zte,aon-syscon = <&aon_sysctrl>;
+ bus-width = <8>;
+ fifo-depth = <128>;
+ data-addr = <0x200>;
+ fifo-watermark-aligned;
+ clock-frequency = <167000000>;
+ clocks = <&topcrm EMMC_NAND_AHB>, <&topcrm EMMC_WCLK>;
+ clock-names = "biu", "ciu";
+ max-frequency = <167000000>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ non-removable;
+ disable-wp;
+ status = "disabled";
+ };
+
audiocrm: clock-controller@1480000 {
compatible = "zte,zx296718-audiocrm";
reg = <0x01480000 0x1000>;
#clock-cells = <1>;
};
+
+ spdif0: spdif@1488000 {
+ compatible = "zte,zx296702-spdif";
+ reg = <0x1488000 0x1000>;
+ clocks = <&audiocrm AUDIO_SPDIF0_WCLK>;
+ clock-names = "tx";
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ #sound-dai-cells = <0>;
+ dmas = <&dma 30>;
+ dma-names = "tx";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 7c48028ec64a..ce072859e3b2 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -82,6 +82,7 @@ CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_XEN=y
CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
@@ -185,6 +186,8 @@ CONFIG_HNS_ENET=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IGBVF=y
+CONFIG_MVPP2=y
+CONFIG_MVNETA=y
CONFIG_SKY2=y
CONFIG_RAVB=y
CONFIG_SMC91X=y
@@ -249,8 +252,10 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_IMX=y
CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_PXA=y
CONFIG_I2C_QUP=y
CONFIG_I2C_RK3X=y
+CONFIG_I2C_SH_MOBILE=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_UNIPHIER_F=y
CONFIG_I2C_RCAR=y
@@ -299,6 +304,7 @@ CONFIG_RENESAS_WDT=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MESON_GXBB_WATCHDOG=m
CONFIG_MESON_WATCHDOG=m
+CONFIG_MFD_EXYNOS_LPASS=m
CONFIG_MFD_MAX77620=y
CONFIG_MFD_RK808=y
CONFIG_MFD_SPMI_PMIC=y
@@ -324,10 +330,20 @@ CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_DVB_NET is not set
CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_DRM=m
CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_RCAR_HDMI=y
CONFIG_DRM_RCAR_LVDS=y
@@ -398,11 +414,15 @@ CONFIG_MMC_DW_EXYNOS=y
CONFIG_MMC_DW_K3=y
CONFIG_MMC_DW_ROCKCHIP=y
CONFIG_MMC_SUNXI=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_MMC_BCM2835=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_RTC_CLASS=y
@@ -469,6 +489,7 @@ CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_XGENE=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_ARM_SCPI_PROTOCOL=y
@@ -518,6 +539,7 @@ CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_SAFEXCEL=m
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index a12f1afc95a3..a7a97a608033 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += rwsem.h
generic-y += segment.h
generic-y += sembuf.h
generic-y += serial.h
+generic-y += set_memory.h
generic-y += shmbuf.h
generic-y += simd.h
generic-y += sizes.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index c1976c0adca7..0e99978da3f0 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -85,6 +85,8 @@ static inline bool acpi_has_cpu_in_madt(void)
return true;
}
+struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index f37e3a21f6e7..1a98bc8602a2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -20,69 +20,14 @@
#include <asm/sysreg.h>
-#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
-#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
-#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
-#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
-#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
-#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
-#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
-#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
-#define ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
-
-#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-
-/*
- * System register definitions
- */
-#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
-#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
-#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
-#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
-#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
-
-#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
-#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
-
-#define ICH_LR0_EL2 __LR0_EL2(0)
-#define ICH_LR1_EL2 __LR0_EL2(1)
-#define ICH_LR2_EL2 __LR0_EL2(2)
-#define ICH_LR3_EL2 __LR0_EL2(3)
-#define ICH_LR4_EL2 __LR0_EL2(4)
-#define ICH_LR5_EL2 __LR0_EL2(5)
-#define ICH_LR6_EL2 __LR0_EL2(6)
-#define ICH_LR7_EL2 __LR0_EL2(7)
-#define ICH_LR8_EL2 __LR8_EL2(0)
-#define ICH_LR9_EL2 __LR8_EL2(1)
-#define ICH_LR10_EL2 __LR8_EL2(2)
-#define ICH_LR11_EL2 __LR8_EL2(3)
-#define ICH_LR12_EL2 __LR8_EL2(4)
-#define ICH_LR13_EL2 __LR8_EL2(5)
-#define ICH_LR14_EL2 __LR8_EL2(6)
-#define ICH_LR15_EL2 __LR8_EL2(7)
-
-#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
-#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
-#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
-#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
-#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
-
-#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
-#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
-#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
-#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
-#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
-
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
-#define read_gicreg read_sysreg_s
-#define write_gicreg write_sysreg_s
+#define read_gicreg(r) read_sysreg_s(SYS_ ## r)
+#define write_gicreg(v, r) write_sysreg_s(v, SYS_ ## r)
/*
* Low-level accessors
@@ -93,13 +38,13 @@
static inline void gic_write_eoir(u32 irq)
{
- write_sysreg_s(irq, ICC_EOIR1_EL1);
+ write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
isb();
}
static inline void gic_write_dir(u32 irq)
{
- write_sysreg_s(irq, ICC_DIR_EL1);
+ write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
@@ -107,7 +52,7 @@ static inline u64 gic_read_iar_common(void)
{
u64 irqstat;
- irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
@@ -124,7 +69,7 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
u64 irqstat;
nops(8);
- irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
nops(4);
mb();
@@ -133,40 +78,40 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
- write_sysreg_s(val, ICC_PMR_EL1);
+ write_sysreg_s(val, SYS_ICC_PMR_EL1);
}
static inline void gic_write_ctlr(u32 val)
{
- write_sysreg_s(val, ICC_CTLR_EL1);
+ write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- write_sysreg_s(val, ICC_GRPEN1_EL1);
+ write_sysreg_s(val, SYS_ICC_GRPEN1_EL1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- write_sysreg_s(val, ICC_SGI1R_EL1);
+ write_sysreg_s(val, SYS_ICC_SGI1R_EL1);
}
static inline u32 gic_read_sre(void)
{
- return read_sysreg_s(ICC_SRE_EL1);
+ return read_sysreg_s(SYS_ICC_SRE_EL1);
}
static inline void gic_write_sre(u32 val)
{
- write_sysreg_s(val, ICC_SRE_EL1);
+ write_sysreg_s(val, SYS_ICC_SRE_EL1);
isb();
}
static inline void gic_write_bpr1(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
+ write_sysreg_s(val, SYS_ICC_BPR1_EL1);
}
#define gic_read_typer(c) readq_relaxed(c)
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index a9be1072933c..366448eb0fb7 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -20,9 +20,6 @@
#include <asm/brk-imm.h>
-#ifdef CONFIG_GENERIC_BUG
-#define HAVE_ARCH_BUG
-
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
#define __BUGVERBOSE_LOCATION(file, line) \
@@ -36,28 +33,35 @@
#define _BUGVERBOSE_LOCATION(file, line)
#endif
-#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+#ifdef CONFIG_GENERIC_BUG
-#define __BUG_FLAGS(flags) asm volatile ( \
+#define __BUG_ENTRY(flags) \
".pushsection __bug_table,\"a\"\n\t" \
".align 2\n\t" \
"0: .long 1f - 0b\n\t" \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
".short " #flags "\n\t" \
".popsection\n" \
- \
- "1: brk %[imm]" \
- :: [imm] "i" (BUG_BRK_IMM) \
-)
+ "1: "
+#else
+#define __BUG_ENTRY(flags) ""
+#endif
+
+#define __BUG_FLAGS(flags) \
+ asm volatile ( \
+ __BUG_ENTRY(flags) \
+ "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \
+ );
-#define BUG() do { \
- _BUG_FLAGS(0); \
- unreachable(); \
+
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
} while (0)
-#define __WARN_FLAGS(flags) _BUG_FLAGS(BUGFLAG_WARNING|(flags))
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
-#endif /* ! CONFIG_GENERIC_BUG */
+#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5082b30bc2c0..ea9bb4e0e9bb 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -16,7 +16,18 @@
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
-#include <asm/cachetype.h>
+#include <asm/cputype.h>
+
+#define CTR_L1IP_SHIFT 14
+#define CTR_L1IP_MASK 3
+#define CTR_CWG_SHIFT 24
+#define CTR_CWG_MASK 15
+
+#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+#define ICACHE_POLICY_VPIPT 0
+#define ICACHE_POLICY_VIPT 2
+#define ICACHE_POLICY_PIPT 3
#define L1_CACHE_SHIFT 7
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -32,6 +43,31 @@
#ifndef __ASSEMBLY__
+#include <linux/bitops.h>
+
+#define ICACHEF_ALIASING 0
+#define ICACHEF_VPIPT 1
+extern unsigned long __icache_flags;
+
+/*
+ * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
+ * permitted in the I-cache.
+ */
+static inline int icache_is_aliasing(void)
+{
+ return test_bit(ICACHEF_ALIASING, &__icache_flags);
+}
+
+static inline int icache_is_vpipt(void)
+{
+ return test_bit(ICACHEF_VPIPT, &__icache_flags);
+}
+
+static inline u32 cache_type_cwg(void)
+{
+ return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
+}
+
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
static inline int cache_line_size(void)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 5a2a6ee65f65..d74a284abdc2 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -150,9 +150,6 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, unsigned long size, int enable);
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
deleted file mode 100644
index f5588692f1d4..000000000000
--- a/arch/arm64/include/asm/cachetype.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_CACHETYPE_H
-#define __ASM_CACHETYPE_H
-
-#include <asm/cputype.h>
-
-#define CTR_L1IP_SHIFT 14
-#define CTR_L1IP_MASK 3
-#define CTR_CWG_SHIFT 24
-#define CTR_CWG_MASK 15
-
-#define ICACHE_POLICY_RESERVED 0
-#define ICACHE_POLICY_AIVIVT 1
-#define ICACHE_POLICY_VIPT 2
-#define ICACHE_POLICY_PIPT 3
-
-#ifndef __ASSEMBLY__
-
-#include <linux/bitops.h>
-
-#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
-
-#define ICACHEF_ALIASING 0
-#define ICACHEF_AIVIVT 1
-
-extern unsigned long __icache_flags;
-
-/*
- * NumSets, bits[27:13] - (Number of sets in cache) - 1
- * Associativity, bits[12:3] - (Associativity of cache) - 1
- * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
- */
-#define CCSIDR_EL1_WRITE_THROUGH BIT(31)
-#define CCSIDR_EL1_WRITE_BACK BIT(30)
-#define CCSIDR_EL1_READ_ALLOCATE BIT(29)
-#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28)
-#define CCSIDR_EL1_LINESIZE_MASK 0x7
-#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
-#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3
-#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff
-#define CCSIDR_EL1_ASSOCIATIVITY(x) \
- (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK)
-#define CCSIDR_EL1_NUMSETS_SHIFT 13
-#define CCSIDR_EL1_NUMSETS_MASK 0x7fff
-#define CCSIDR_EL1_NUMSETS(x) \
- (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK)
-
-#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x))
-#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1)
-#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1)
-
-extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr);
-
-/* Helpers for Level 1 Instruction cache csselr = 1L */
-static inline int icache_get_linesize(void)
-{
- return CACHE_LINESIZE(cache_get_ccsidr(1L));
-}
-
-static inline int icache_get_numsets(void)
-{
- return CACHE_NUMSETS(cache_get_ccsidr(1L));
-}
-
-/*
- * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
- * permitted in the I-cache.
- */
-static inline int icache_is_aliasing(void)
-{
- return test_bit(ICACHEF_ALIASING, &__icache_flags);
-}
-
-static inline int icache_is_aivivt(void)
-{
- return test_bit(ICACHEF_AIVIVT, &__icache_flags);
-}
-
-static inline u32 cache_type_cwg(void)
-{
- return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_CACHETYPE_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f31c48d0cd68..e7f84a7b4465 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -226,7 +226,7 @@ void update_cpu_errata_workarounds(void);
void __init enable_errata_workarounds(void);
void verify_local_cpu_errata_workarounds(void);
-u64 read_system_reg(u32 id);
+u64 read_sanitised_ftr_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -240,7 +240,7 @@ static inline bool system_supports_32bit_el0(void)
static inline bool system_supports_mixed_endian_el0(void)
{
- return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
+ return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
}
static inline bool system_supports_fpsimd(void)
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 73d5bab015eb..5a5fa47a6b18 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,6 +20,9 @@ struct dev_archdata {
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_XEN
+ const struct dma_map_ops *dev_dma_ops;
+#endif
bool dma_coherent;
};
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 505756cdc67a..5392dbeffa45 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -27,11 +27,8 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern const struct dma_map_ops dummy_dma_ops;
-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- if (dev && dev->dma_ops)
- return dev->dma_ops;
-
/*
* We expect no ISA devices, and all other DMA masters are expected to
* have someone call arch_setup_dma_ops at device creation time.
@@ -39,14 +36,6 @@ static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops;
}
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- if (xen_initial_domain())
- return xen_dma_ops;
- else
- return __generic_dma_ops(NULL);
-}
-
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent);
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index ad42e79a5d4d..85997c0e5443 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -177,6 +177,10 @@
#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ)
+
+#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 8740297dac77..1473fc2f7ab7 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index b6b167ac082b..41770766d964 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -149,7 +149,7 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task)
/* Determine number of BRP registers available. */
static inline int get_num_brps(void)
{
- u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_BRPS_SHIFT);
@@ -158,7 +158,7 @@ static inline int get_num_brps(void)
/* Determine number of WRP registers available. */
static inline int get_num_wrps(void)
{
- u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
return 1 +
cpuid_feature_extract_unsigned_field(dfr0,
ID_AA64DFR0_WRPS_SHIFT);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0c00c87bb9dd..35b2e50f17fb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -173,6 +173,16 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define iounmap __iounmap
/*
+ * PCI configuration space mapping function.
+ *
+ * The PCI specification disallows posted write configuration transactions.
+ * Add an arch specific pci_remap_cfgspace() definition that is implemented
+ * through nGnRnE device memory attribute as recommended by the ARM v8
+ * Architecture reference manual Issue A.k B2.8.2 "Device memory".
+ */
+#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+/*
* io{read,write}{16,32,64}be() macros
*/
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 04744dc5fb61..e17f0529a882 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -40,9 +40,59 @@
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
- /* Empty routine needed to avoid build errors. */
+ if (oldregs) {
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ } else {
+ u64 tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "stp x0, x1, [%2, #16 * 0]\n"
+ "stp x2, x3, [%2, #16 * 1]\n"
+ "stp x4, x5, [%2, #16 * 2]\n"
+ "stp x6, x7, [%2, #16 * 3]\n"
+ "stp x8, x9, [%2, #16 * 4]\n"
+ "stp x10, x11, [%2, #16 * 5]\n"
+ "stp x12, x13, [%2, #16 * 6]\n"
+ "stp x14, x15, [%2, #16 * 7]\n"
+ "stp x16, x17, [%2, #16 * 8]\n"
+ "stp x18, x19, [%2, #16 * 9]\n"
+ "stp x20, x21, [%2, #16 * 10]\n"
+ "stp x22, x23, [%2, #16 * 11]\n"
+ "stp x24, x25, [%2, #16 * 12]\n"
+ "stp x26, x27, [%2, #16 * 13]\n"
+ "stp x28, x29, [%2, #16 * 14]\n"
+ "mov %0, sp\n"
+ "stp x30, %0, [%2, #16 * 15]\n"
+
+ "/* faked current PSTATE */\n"
+ "mrs %0, CurrentEL\n"
+ "mrs %1, SPSEL\n"
+ "orr %0, %0, %1\n"
+ "mrs %1, DAIF\n"
+ "orr %0, %0, %1\n"
+ "mrs %1, NZCV\n"
+ "orr %0, %0, %1\n"
+ /* pc */
+ "adr %1, 1f\n"
+ "1:\n"
+ "stp %1, %0, [%2, #16 * 16]\n"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (newregs)
+ : "memory"
+ );
+ }
}
+#if defined(CONFIG_KEXEC_CORE) && defined(CONFIG_HIBERNATION)
+extern bool crash_is_nosave(unsigned long pfn);
+extern void crash_prepare_suspend(void);
+extern void crash_post_resume(void);
+#else
+static inline bool crash_is_nosave(unsigned long pfn) {return false; }
+static inline void crash_prepare_suspend(void) {}
+static inline void crash_post_resume(void) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ec3553eb9349..26a64d0f9ab9 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -28,7 +28,7 @@
#define ARM_EXCEPTION_EL1_SERROR 1
#define ARM_EXCEPTION_TRAP 2
/* The hyp-stub will return this for any kvm_call_hyp() call */
-#define ARM_EXCEPTION_HYP_GONE 3
+#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
@@ -47,7 +47,6 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
-extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
@@ -59,6 +58,8 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u64 __vgic_v3_read_vmcr(void);
+extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e7705e7bb07b..5e19165c5fa8 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -31,7 +31,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 512
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000
#include <kvm/arm_vgic.h>
@@ -42,7 +41,7 @@
#define KVM_VCPU_MAX_FEATURES 4
-#define KVM_REQ_VCPU_EXIT 8
+#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -362,13 +361,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}
-void __kvm_hyp_teardown(void);
-static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
- phys_addr_t phys_idmap_start)
-{
- kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
-}
-
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index ed1246014901..a89cc22abadc 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -108,7 +108,7 @@ alternative_else_nop_endif
#else
#include <asm/pgalloc.h>
-#include <asm/cachetype.h>
+#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -155,7 +155,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -242,12 +241,13 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_flush_dcache_to_poc(va, size);
- if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range((unsigned long)va,
- (unsigned long)va + size);
- } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
+ if (icache_is_aliasing()) {
/* any kind of VIPT cache */
__flush_icache_all();
+ } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
+ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
}
}
@@ -307,7 +307,7 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
static inline unsigned int kvm_get_vmid_bits(void)
{
- int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+ int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 47619411f0ff..5468c834b072 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -37,5 +37,6 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
+extern void mark_linear_text_alias_ro(void);
#endif
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 06ff7fd9e81f..d57693f5d4ec 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,26 +17,26 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
-#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
#ifdef CONFIG_ARM64_MODULE_PLTS
-struct mod_arch_specific {
+struct mod_plt_sec {
struct elf64_shdr *plt;
int plt_num_entries;
int plt_max_entries;
};
+
+struct mod_arch_specific {
+ struct mod_plt_sec core;
+ struct mod_plt_sec init;
+};
#endif
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
-#ifdef CONFIG_MODVERSIONS
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
-#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b9a7ba9ca44c..1fc19744ffe9 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -22,6 +22,8 @@
*/
#define PCI_DMA_BUS_IS_PHYS (0)
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
+
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0eef6064bf3b..c213fdbd056c 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -74,6 +74,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
+#define pte_cont_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
+#define pmd_cont_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
#else
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index c97b8bd2acba..9428b93fefb2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -50,6 +50,7 @@ extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
struct debug_info {
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
/* Have we suspended stepping by a debugger? */
int suspended_step;
/* Allow breakpoints and watchpoints to be disabled for this thread. */
@@ -58,6 +59,7 @@ struct debug_info {
/* Hardware breakpoints pinned to this task. */
struct perf_event *hbp_break[ARM_MAX_BRP];
struct perf_event *hbp_watch[ARM_MAX_WRP];
+#endif
};
struct cpu_context {
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 4e7e7067afdb..941267caa39c 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -24,6 +24,8 @@ extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
extern char __idmap_text_start[], __idmap_text_end[];
+extern char __initdata_begin[], __initdata_end[];
+extern char __inittext_begin[], __inittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index d050d720a1b4..55f08c5acfad 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -148,6 +148,9 @@ static inline void cpu_panic_kernel(void)
*/
bool cpus_are_stuck_in_kernel(void);
+extern void smp_send_crash_stop(void);
+extern bool smp_crash_stop_failed(void);
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index ac24b6e798b1..15c142ce991c 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -48,6 +48,8 @@
((crn) << CRn_shift) | ((crm) << CRm_shift) | \
((op2) << Op2_shift))
+#define sys_insn sys_reg
+
#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
@@ -81,6 +83,41 @@
#endif /* CONFIG_BROKEN_GAS_INST */
+#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+
+#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
+ (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
+ (!!x)<<8 | 0x1f)
+
+#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
+
+#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
+#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
+#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
+#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
+#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
+#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
+#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
+#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
+#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
+#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
+#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
+#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
+#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
+#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
+#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
+#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
+#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
+#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
+#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
+#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -88,6 +125,7 @@
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
+#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
@@ -118,17 +156,127 @@
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
-#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
+#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
+#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+
+#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
+#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
+#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
+
+#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+
+#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
+#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
+#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
+#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+
+#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
+#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+
+#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
+#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
+
+#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+
+#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
+#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
+#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
+#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
+
+#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+
+#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
+
+#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
+
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
-#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
+#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
+#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
+#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
+#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
+#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
+#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
+#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
+#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
+#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
+#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
+
+#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
+#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
-#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
- (!!x)<<8 | 0x1f)
+#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+
+#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
+#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+
+#define __PMEV_op2(n) ((n) & 0x7)
+#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
+#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
+#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
+
+#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
+
+#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+
+#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
+#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
+#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
+#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
+
+#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
+#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
+#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
+#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
+
+#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define SYS_ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
+#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
+#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
+#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
+#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
+#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
+#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
+#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
+
+#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
+#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
+#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
+#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
+#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
+#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
+#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
+#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
@@ -156,6 +304,11 @@
#define ID_AA64ISAR0_SHA1_SHIFT 8
#define ID_AA64ISAR0_AES_SHIFT 4
+/* id_aa64isar1 */
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+
/* id_aa64pfr0 */
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439f6b5d31f6..c5f89442785c 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -19,25 +19,38 @@
#define __ASM__VIRT_H
/*
- * The arm64 hcall implementation uses x0 to specify the hcall type. A value
- * less than 0xfff indicates a special hcall, such as get/set vector.
- * Any other value is used as a pointer to the function to call.
+ * The arm64 hcall implementation uses x0 to specify the hcall
+ * number. A value less than HVC_STUB_HCALL_NR indicates a special
+ * hcall, such as set vector. Any other value is handled in a
+ * hypervisor specific way.
+ *
+ * The hypercall is allowed to clobber any of the caller-saved
+ * registers (x0-x18), so it is advisable to use it through the
+ * indirection of a function call (as implemented in hyp-stub.S).
*/
-/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
-#define HVC_GET_VECTORS 0
-
/*
* HVC_SET_VECTORS - Set the value of the vbar_el2 register.
*
* @x1: Physical address of the new vector table.
*/
-#define HVC_SET_VECTORS 1
+#define HVC_SET_VECTORS 0
/*
* HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
*/
-#define HVC_SOFT_RESTART 2
+#define HVC_SOFT_RESTART 1
+
+/*
+ * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
+ */
+#define HVC_RESET_VECTORS 2
+
+/* Max number of HYP stub hypercalls */
+#define HVC_STUB_HCALL_NR 3
+
+/* Error returned when an invalid stub number is passed into x0 */
+#define HVC_STUB_ERR 0xbadca11
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
@@ -61,7 +74,7 @@
extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base);
-phys_addr_t __hyp_get_vectors(void);
+void __hyp_reset_vectors(void);
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 61c263cba272..4e187ce2a811 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -32,5 +32,8 @@
#define HWCAP_ASIMDHP (1 << 10)
#define HWCAP_CPUID (1 << 11)
#define HWCAP_ASIMDRDM (1 << 12)
+#define HWCAP_JSCVT (1 << 13)
+#define HWCAP_FCMA (1 << 14)
+#define HWCAP_LRCPC (1 << 15)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index c2860358ae3e..869ee480deed 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,8 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
#define KVM_GUESTDBG_USE_HW (1 << 17)
struct kvm_sync_regs {
+ /* Used with KVM_CAP_ARM_USER_IRQ */
+ __u64 device_irq_level;
};
struct kvm_arch_memory_slot {
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 1606c6b2a280..1dcb69d3d0e5 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -50,6 +50,9 @@ arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
cpu-reset.o
+arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
+arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
+arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 06d650f61da7..8840c109c5d6 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -105,11 +105,11 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
return insn;
}
-static void __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region, bool use_linear_alias)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
- u32 *origptr, *replptr;
+ u32 *origptr, *replptr, *updptr;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
@@ -124,11 +124,12 @@ static void __apply_alternatives(void *alt_region)
origptr = ALT_ORIG_PTR(alt);
replptr = ALT_REPL_PTR(alt);
+ updptr = use_linear_alias ? (u32 *)lm_alias(origptr) : origptr;
nr_inst = alt->alt_len / sizeof(insn);
for (i = 0; i < nr_inst; i++) {
insn = get_alt_insn(alt, origptr + i, replptr + i);
- *(origptr + i) = cpu_to_le32(insn);
+ updptr[i] = cpu_to_le32(insn);
}
flush_icache_range((uintptr_t)origptr,
@@ -155,7 +156,7 @@ static int __apply_alternatives_multi_stop(void *unused)
isb();
} else {
BUG_ON(patched);
- __apply_alternatives(&region);
+ __apply_alternatives(&region, true);
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
}
@@ -176,5 +177,5 @@ void apply_alternatives(void *start, size_t length)
.end = start + length,
};
- __apply_alternatives(&region);
+ __apply_alternatives(&region, false);
}
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 3f2250fc391b..380f2e2fbed5 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -17,15 +17,9 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/bitops.h>
#include <linux/cacheinfo.h>
-#include <linux/cpu.h>
-#include <linux/compiler.h>
#include <linux/of.h>
-#include <asm/cachetype.h>
-#include <asm/processor.h>
-
#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
@@ -43,43 +37,11 @@ static inline enum cache_type get_cache_type(int level)
return CLIDR_CTYPE(clidr, level);
}
-/*
- * Cache Size Selection Register(CSSELR) selects which Cache Size ID
- * Register(CCSIDR) is accessible by specifying the required cache
- * level and the cache type. We need to ensure that no one else changes
- * CSSELR by calling this in non-preemtible context
- */
-u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
-{
- u64 ccsidr;
-
- WARN_ON(preemptible());
-
- write_sysreg(csselr, csselr_el1);
- isb();
- ccsidr = read_sysreg(ccsidr_el1);
-
- return ccsidr;
-}
-
static void ci_leaf_init(struct cacheinfo *this_leaf,
enum cache_type type, unsigned int level)
{
- bool is_icache = type & CACHE_TYPE_INST;
- u64 tmp = cache_get_ccsidr((level - 1) << 1 | is_icache);
-
this_leaf->level = level;
this_leaf->type = type;
- this_leaf->coherency_line_size = CACHE_LINESIZE(tmp);
- this_leaf->number_of_sets = CACHE_NUMSETS(tmp);
- this_leaf->ways_of_associativity = CACHE_ASSOCIATIVITY(tmp);
- this_leaf->size = this_leaf->number_of_sets *
- this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
- this_leaf->attributes =
- ((tmp & CCSIDR_EL1_WRITE_THROUGH) ? CACHE_WRITE_THROUGH : 0) |
- ((tmp & CCSIDR_EL1_WRITE_BACK) ? CACHE_WRITE_BACK : 0) |
- ((tmp & CCSIDR_EL1_READ_ALLOCATE) ? CACHE_READ_ALLOCATE : 0) |
- ((tmp & CCSIDR_EL1_WRITE_ALLOCATE) ? CACHE_WRITE_ALLOCATE : 0);
}
static int __init_cache_level(unsigned int cpu)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6eb77ae99b79..94b8f7fc3310 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -97,6 +97,13 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
@@ -153,9 +160,9 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine.
- * If we have differing I-cache policies, report it as the weakest - AIVIVT.
+ * If we have differing I-cache policies, report it as the weakest - VIPT.
*/
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_END,
};
@@ -314,7 +321,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
- ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@@ -585,7 +592,7 @@ void update_cpu_features(int cpu,
* If we have AArch32, we care about 32-bit features for compat.
* If the system doesn't support AArch32, don't update them.
*/
- if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
+ if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
@@ -636,7 +643,7 @@ void update_cpu_features(int cpu,
"Unsupported CPU feature variation.\n");
}
-u64 read_system_reg(u32 id)
+u64 read_sanitised_ftr_reg(u32 id)
{
struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
@@ -649,10 +656,10 @@ u64 read_system_reg(u32 id)
case r: return read_sysreg_s(r)
/*
- * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
+ * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
* Read the system register on the current CPU
*/
-static u64 __raw_read_system_reg(u32 sys_id)
+static u64 __read_sysreg_by_encoding(u32 sys_id)
{
switch (sys_id) {
read_sysreg_case(SYS_ID_PFR0_EL1);
@@ -709,9 +716,9 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
if (scope == SCOPE_SYSTEM)
- val = read_system_reg(entry->sys_reg);
+ val = read_sanitised_ftr_reg(entry->sys_reg);
else
- val = __raw_read_system_reg(entry->sys_reg);
+ val = __read_sysreg_by_encoding(entry->sys_reg);
return feature_matches(val, entry);
}
@@ -761,7 +768,7 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
{
- u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return cpuid_feature_extract_signed_field(pfr0,
ID_AA64PFR0_FP_SHIFT) < 0;
@@ -888,6 +895,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
{},
};
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 5b22c687f02a..68b1f364c515 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -15,7 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <asm/arch_timer.h>
-#include <asm/cachetype.h>
+#include <asm/cache.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
@@ -43,10 +43,10 @@ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
static char *icache_policy_str[] = {
- [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
- [ICACHE_POLICY_AIVIVT] = "AIVIVT",
- [ICACHE_POLICY_VIPT] = "VIPT",
- [ICACHE_POLICY_PIPT] = "PIPT",
+ [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
+ [ICACHE_POLICY_VIPT] = "VIPT",
+ [ICACHE_POLICY_PIPT] = "PIPT",
+ [ICACHE_POLICY_VPIPT] = "VPIPT",
};
unsigned long __icache_flags;
@@ -65,6 +65,9 @@ static const char *const hwcap_str[] = {
"asimdhp",
"cpuid",
"asimdrdm",
+ "jscvt",
+ "fcma",
+ "lrcpc",
NULL
};
@@ -289,20 +292,18 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
unsigned int cpu = smp_processor_id();
u32 l1ip = CTR_L1IP(info->reg_ctr);
- if (l1ip != ICACHE_POLICY_PIPT) {
- /*
- * VIPT caches are non-aliasing if the VA always equals the PA
- * in all bit positions that are covered by the index. This is
- * the case if the size of a way (# of sets * line size) does
- * not exceed PAGE_SIZE.
- */
- u32 waysize = icache_get_numsets() * icache_get_linesize();
-
- if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
- set_bit(ICACHEF_ALIASING, &__icache_flags);
+ switch (l1ip) {
+ case ICACHE_POLICY_PIPT:
+ break;
+ case ICACHE_POLICY_VPIPT:
+ set_bit(ICACHEF_VPIPT, &__icache_flags);
+ break;
+ default:
+ /* Fallthrough */
+ case ICACHE_POLICY_VIPT:
+ /* Assume aliasing */
+ set_bit(ICACHEF_ALIASING, &__icache_flags);
}
- if (l1ip == ICACHE_POLICY_AIVIVT)
- set_bit(ICACHEF_AIVIVT, &__icache_flags);
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
new file mode 100644
index 000000000000..f46d57c31443
--- /dev/null
+++ b/arch/arm64/kernel/crash_dump.c
@@ -0,0 +1,71 @@
+/*
+ * Routines for doing kexec-based kdump
+ *
+ * Copyright (C) 2017 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crash_dump.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/uaccess.h>
+#include <asm/memory.h>
+
+/**
+ * copy_oldmem_page() - copy one page from old kernel memory
+ * @pfn: page frame number to be copied
+ * @buf: buffer where the copied page is placed
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page
+ * @userbuf: if set, @buf is in a user address space
+ *
+ * This function copies one page from old kernel memory into buffer pointed by
+ * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+ * copied or negative error in case of failure.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset,
+ int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
+ if (!vaddr)
+ return -ENOMEM;
+
+ if (userbuf) {
+ if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
+ memunmap(vaddr);
+ return -EFAULT;
+ }
+ } else {
+ memcpy(buf, vaddr + offset, csize);
+ }
+
+ memunmap(vaddr);
+
+ return csize;
+}
+
+/**
+ * elfcorehdr_read - read from ELF core header
+ * @buf: buffer where the data is placed
+ * @csize: number of bytes to read
+ * @ppos: address in the memory
+ *
+ * This function reads @count bytes from elf core header which exists
+ * on crash dump kernel's memory.
+ */
+ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+{
+ memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ return count;
+}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 32913567da08..d618e25c3de1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -36,7 +36,7 @@
/* Determine debug architecture. */
u8 debug_monitors_arch(void)
{
- return cpuid_feature_extract_unsigned_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+ return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1),
ID_AA64DFR0_DEBUGVER_SHIFT);
}
diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
new file mode 100644
index 000000000000..613fc3000677
--- /dev/null
+++ b/arch/arm64/kernel/efi-header.S
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2013 - 2017 Linaro, Ltd.
+ * Copyright (C) 2013, 2014 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __EFI_PE_HEADER
+ .long PE_MAGIC
+coff_header:
+ .short IMAGE_FILE_MACHINE_ARM64 // Machine
+ .short section_count // NumberOfSections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 0 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics
+
+optional_header:
+ .short PE_OPT_MAGIC_PE32PLUS // PE32+ format
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long __initdata_begin - efi_header_end // SizeOfCode
+ .long __pecoff_data_size // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long __efistub_entry - _head // AddressOfEntryPoint
+ .long efi_header_end - _head // BaseOfCode
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long SZ_4K // SectionAlignment
+ .long PECOFF_FILE_ALIGNMENT // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short 0 // MajorImageVersion
+ .short 0 // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _end - _head // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long efi_header_end - _head // SizeOfHeaders
+ .long 0 // CheckSum
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long (section_table - .) / 8 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+#ifdef CONFIG_DEBUG_EFI
+ .long efi_debug_table - _head // DebugTable
+ .long efi_debug_table_size
+#endif
+
+ // Section table
+section_table:
+ .ascii ".text\0\0\0"
+ .long __initdata_begin - efi_header_end // VirtualSize
+ .long efi_header_end - _head // VirtualAddress
+ .long __initdata_begin - efi_header_end // SizeOfRawData
+ .long efi_header_end - _head // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE // Characteristics
+
+ .ascii ".data\0\0\0"
+ .long __pecoff_data_size // VirtualSize
+ .long __initdata_begin - _head // VirtualAddress
+ .long __pecoff_data_rawsize // SizeOfRawData
+ .long __initdata_begin - _head // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE // Characteristics
+
+ .set section_count, (. - section_table) / 40
+
+#ifdef CONFIG_DEBUG_EFI
+ /*
+ * The debug table is referenced via its Relative Virtual Address (RVA),
+ * which is only defined for those parts of the image that are covered
+ * by a section declaration. Since this header is not covered by any
+ * section, the debug table must be emitted elsewhere. So stick it in
+ * the .init.rodata section instead.
+ *
+ * Note that the EFI debug entry itself may legally have a zero RVA,
+ * which means we can simply put it right after the section headers.
+ */
+ __INITRODATA
+
+ .align 2
+efi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long efi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long efi_debug_entry - _head // FileOffset
+
+ .set efi_debug_table_size, . - efi_debug_table
+ .previous
+
+efi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz VMLINUX_PATH
+
+ .set efi_debug_entry_size, . - efi_debug_entry
+#endif
+
+ /*
+ * EFI will load .text onwards at the 4k section alignment
+ * described in the PE/COFF header. To ensure that instruction
+ * sequences using an adrp and a :lo12: immediate will function
+ * correctly at this alignment, we must ensure that .text is
+ * placed at a 4k boundary in the Image to begin with.
+ */
+ .align 12
+efi_header_end:
+ .endm
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4fb6ccd886d1..973df7de7bf8 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -42,6 +42,8 @@
#include <asm/thread_info.h>
#include <asm/virt.h>
+#include "efi-header.S"
+
#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
#if (TEXT_OFFSET & 0xfff) != 0
@@ -89,166 +91,14 @@ _head:
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
- .byte 0x41 // Magic number, "ARM\x64"
- .byte 0x52
- .byte 0x4d
- .byte 0x64
+ .ascii "ARM\x64" // Magic number
#ifdef CONFIG_EFI
.long pe_header - _head // Offset to the PE header.
-#else
- .word 0 // reserved
-#endif
-#ifdef CONFIG_EFI
- .align 3
pe_header:
- .ascii "PE"
- .short 0
-coff_header:
- .short 0xaa64 // AArch64
- .short 2 // nr_sections
- .long 0 // TimeDateStamp
- .long 0 // PointerToSymbolTable
- .long 1 // NumberOfSymbols
- .short section_table - optional_header // SizeOfOptionalHeader
- .short 0x206 // Characteristics.
- // IMAGE_FILE_DEBUG_STRIPPED |
- // IMAGE_FILE_EXECUTABLE_IMAGE |
- // IMAGE_FILE_LINE_NUMS_STRIPPED
-optional_header:
- .short 0x20b // PE32+ format
- .byte 0x02 // MajorLinkerVersion
- .byte 0x14 // MinorLinkerVersion
- .long _end - efi_header_end // SizeOfCode
- .long 0 // SizeOfInitializedData
- .long 0 // SizeOfUninitializedData
- .long __efistub_entry - _head // AddressOfEntryPoint
- .long efi_header_end - _head // BaseOfCode
-
-extra_header_fields:
- .quad 0 // ImageBase
- .long 0x1000 // SectionAlignment
- .long PECOFF_FILE_ALIGNMENT // FileAlignment
- .short 0 // MajorOperatingSystemVersion
- .short 0 // MinorOperatingSystemVersion
- .short 0 // MajorImageVersion
- .short 0 // MinorImageVersion
- .short 0 // MajorSubsystemVersion
- .short 0 // MinorSubsystemVersion
- .long 0 // Win32VersionValue
-
- .long _end - _head // SizeOfImage
-
- // Everything before the kernel image is considered part of the header
- .long efi_header_end - _head // SizeOfHeaders
- .long 0 // CheckSum
- .short 0xa // Subsystem (EFI application)
- .short 0 // DllCharacteristics
- .quad 0 // SizeOfStackReserve
- .quad 0 // SizeOfStackCommit
- .quad 0 // SizeOfHeapReserve
- .quad 0 // SizeOfHeapCommit
- .long 0 // LoaderFlags
- .long (section_table - .) / 8 // NumberOfRvaAndSizes
-
- .quad 0 // ExportTable
- .quad 0 // ImportTable
- .quad 0 // ResourceTable
- .quad 0 // ExceptionTable
- .quad 0 // CertificationTable
- .quad 0 // BaseRelocationTable
-
-#ifdef CONFIG_DEBUG_EFI
- .long efi_debug_table - _head // DebugTable
- .long efi_debug_table_size
-#endif
-
- // Section table
-section_table:
-
- /*
- * The EFI application loader requires a relocation section
- * because EFI applications must be relocatable. This is a
- * dummy section as far as we are concerned.
- */
- .ascii ".reloc"
- .byte 0
- .byte 0 // end of 0 padding of section name
- .long 0
- .long 0
- .long 0 // SizeOfRawData
- .long 0 // PointerToRawData
- .long 0 // PointerToRelocations
- .long 0 // PointerToLineNumbers
- .short 0 // NumberOfRelocations
- .short 0 // NumberOfLineNumbers
- .long 0x42100040 // Characteristics (section flags)
-
-
- .ascii ".text"
- .byte 0
- .byte 0
- .byte 0 // end of 0 padding of section name
- .long _end - efi_header_end // VirtualSize
- .long efi_header_end - _head // VirtualAddress
- .long _edata - efi_header_end // SizeOfRawData
- .long efi_header_end - _head // PointerToRawData
-
- .long 0 // PointerToRelocations (0 for executables)
- .long 0 // PointerToLineNumbers (0 for executables)
- .short 0 // NumberOfRelocations (0 for executables)
- .short 0 // NumberOfLineNumbers (0 for executables)
- .long 0xe0500020 // Characteristics (section flags)
-
-#ifdef CONFIG_DEBUG_EFI
- /*
- * The debug table is referenced via its Relative Virtual Address (RVA),
- * which is only defined for those parts of the image that are covered
- * by a section declaration. Since this header is not covered by any
- * section, the debug table must be emitted elsewhere. So stick it in
- * the .init.rodata section instead.
- *
- * Note that the EFI debug entry itself may legally have a zero RVA,
- * which means we can simply put it right after the section headers.
- */
- __INITRODATA
-
- .align 2
-efi_debug_table:
- // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
- .long 0 // Characteristics
- .long 0 // TimeDateStamp
- .short 0 // MajorVersion
- .short 0 // MinorVersion
- .long 2 // Type == EFI_IMAGE_DEBUG_TYPE_CODEVIEW
- .long efi_debug_entry_size // SizeOfData
- .long 0 // RVA
- .long efi_debug_entry - _head // FileOffset
-
- .set efi_debug_table_size, . - efi_debug_table
- .previous
-
-efi_debug_entry:
- // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
- .ascii "NB10" // Signature
- .long 0 // Unknown
- .long 0 // Unknown2
- .long 0 // Unknown3
-
- .asciz VMLINUX_PATH
-
- .set efi_debug_entry_size, . - efi_debug_entry
-#endif
-
- /*
- * EFI will load .text onwards at the 4k section alignment
- * described in the PE/COFF header. To ensure that instruction
- * sequences using an adrp and a :lo12: immediate will function
- * correctly at this alignment, we must ensure that .text is
- * placed at a 4k boundary in the Image to begin with.
- */
- .align 12
-efi_header_end:
+ __EFI_PE_HEADER
+#else
+ .long 0 // reserved
#endif
__INIT
@@ -534,13 +384,8 @@ ENTRY(kimage_vaddr)
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
- b.ne 1f
- mrs x0, sctlr_el2
-CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
-CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
- msr sctlr_el2, x0
- b 2f
-1: mrs x0, sctlr_el1
+ b.eq 1f
+ mrs x0, sctlr_el1
CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
msr sctlr_el1, x0
@@ -548,7 +393,11 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
isb
ret
-2:
+1: mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+
#ifdef CONFIG_ARM64_VHE
/*
* Check for VHE being present. For the rest of the EL2 setup,
@@ -594,14 +443,14 @@ set_hcr:
cmp x0, #1
b.ne 3f
- mrs_s x0, ICC_SRE_EL2
+ mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
- msr_s ICC_SRE_EL2, x0
+ msr_s SYS_ICC_SRE_EL2, x0
isb // Make sure SRE is now set
- mrs_s x0, ICC_SRE_EL2 // Read SRE back,
+ mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
tbz x0, #0, 3f // and check that it sticks
- msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
+ msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
3:
#endif
@@ -612,26 +461,6 @@ set_hcr:
msr vpidr_el2, x0
msr vmpidr_el2, x1
- /*
- * When VHE is not in use, early init of EL2 and EL1 needs to be
- * done here.
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
- cbnz x2, 1f
-
- /* sctlr_el1 */
- mov x0, #0x0800 // Set/clear RES{1,0} bits
-CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
-CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
- msr sctlr_el1, x0
-
- /* Coprocessor traps. */
- mov x0, #0x33ff
- msr cptr_el2, x0 // Disable copro. traps to EL2
-1:
-
#ifdef CONFIG_COMPAT
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
@@ -668,6 +497,23 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
ret
install_el2_stub:
+ /*
+ * When VHE is not in use, early init of EL2 and EL1 needs to be
+ * done here.
+ * When VHE _is_ in use, EL1 will not be used in the host and
+ * requires no configuration, and all non-hyp-specific EL2 setup
+ * will be done via the _EL1 system register aliases in __cpu_setup.
+ */
+ /* sctlr_el1 */
+ mov x0, #0x0800 // Set/clear RES{1,0} bits
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ msr sctlr_el1, x0
+
+ /* Coprocessor traps. */
+ mov x0, #0x33ff
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+
/* Hypervisor stub */
adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 97a7384100f3..a44e13942d30 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -28,6 +28,7 @@
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/irqflags.h>
+#include <asm/kexec.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -102,7 +103,8 @@ int pfn_is_nosave(unsigned long pfn)
unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
- return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
+ return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
+ crash_is_nosave(pfn);
}
void notrace save_processor_state(void)
@@ -286,6 +288,9 @@ int swsusp_arch_suspend(void)
local_dbg_save(flags);
if (__cpu_suspend_enter(&state)) {
+ /* make the crash dump kernel image visible/saveable */
+ crash_prepare_suspend();
+
sleep_cpu = smp_processor_id();
ret = swsusp_save();
} else {
@@ -297,6 +302,9 @@ int swsusp_arch_suspend(void)
if (el2_reset_needed())
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
+ /* make the crash dump kernel image protected again */
+ crash_post_resume();
+
/*
* Tell the hibernation core that we've just restored
* the memory
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index d3b5f75e652e..e1261fbaa374 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -55,18 +55,7 @@ ENDPROC(__hyp_stub_vectors)
.align 11
el1_sync:
- mrs x30, esr_el2
- lsr x30, x30, #ESR_ELx_EC_SHIFT
-
- cmp x30, #ESR_ELx_EC_HVC64
- b.ne 9f // Not an HVC trap
-
- cmp x0, #HVC_GET_VECTORS
- b.ne 1f
- mrs x0, vbar_el2
- b 9f
-
-1: cmp x0, #HVC_SET_VECTORS
+ cmp x0, #HVC_SET_VECTORS
b.ne 2f
msr vbar_el2, x1
b 9f
@@ -79,10 +68,15 @@ el1_sync:
mov x1, x3
br x4 // no return
+3: cmp x0, #HVC_RESET_VECTORS
+ beq 9f // Nothing to reset!
+
/* Someone called kvm_call_hyp() against the hyp-stub... */
-3: mov x0, #ARM_EXCEPTION_HYP_GONE
+ ldr x0, =HVC_STUB_ERR
+ eret
-9: eret
+9: mov x0, xzr
+ eret
ENDPROC(el1_sync)
.macro invalid_vector label
@@ -121,19 +115,15 @@ ENDPROC(\label)
* initialisation entry point.
*/
-ENTRY(__hyp_get_vectors)
- str lr, [sp, #-16]!
- mov x0, #HVC_GET_VECTORS
- hvc #0
- ldr lr, [sp], #16
- ret
-ENDPROC(__hyp_get_vectors)
-
ENTRY(__hyp_set_vectors)
- str lr, [sp, #-16]!
mov x1, x0
mov x0, #HVC_SET_VECTORS
hvc #0
- ldr lr, [sp], #16
ret
ENDPROC(__hyp_set_vectors)
+
+ENTRY(__hyp_reset_vectors)
+ mov x0, #HVC_RESET_VECTORS
+ hvc #0
+ ret
+ENDPROC(__hyp_reset_vectors)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index bc96c8a7fc79..481f54a866c5 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -9,12 +9,19 @@
* published by the Free Software Foundation.
*/
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <linux/page-flags.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/mmu_context.h>
+#include <asm/page.h>
#include "cpu-reset.h"
@@ -22,8 +29,6 @@
extern const unsigned char arm64_relocate_new_kernel[];
extern const unsigned long arm64_relocate_new_kernel_size;
-static unsigned long kimage_start;
-
/**
* kexec_image_info - For debugging output.
*/
@@ -64,8 +69,6 @@ void machine_kexec_cleanup(struct kimage *kimage)
*/
int machine_kexec_prepare(struct kimage *kimage)
{
- kimage_start = kimage->start;
-
kexec_image_info(kimage);
if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
@@ -144,11 +147,15 @@ void machine_kexec(struct kimage *kimage)
{
phys_addr_t reboot_code_buffer_phys;
void *reboot_code_buffer;
+ bool in_kexec_crash = (kimage == kexec_crash_image);
+ bool stuck_cpus = cpus_are_stuck_in_kernel();
/*
* New cpus may have become stuck_in_kernel after we loaded the image.
*/
- BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
+ BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1)));
+ WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()),
+ "Some CPUs may be stale, kdump will be unreliable.\n");
reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
@@ -183,7 +190,7 @@ void machine_kexec(struct kimage *kimage)
kexec_list_flush(kimage);
/* Flush the new image if already in place. */
- if (kimage->head & IND_DONE)
+ if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE))
kexec_segment_flush(kimage);
pr_info("Bye!\n");
@@ -200,13 +207,158 @@ void machine_kexec(struct kimage *kimage)
* relocation is complete.
*/
- cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
- kimage_start, 0);
+ cpu_soft_restart(kimage != kexec_crash_image,
+ reboot_code_buffer_phys, kimage->head, kimage->start, 0);
BUG(); /* Should never get here. */
}
+static void machine_kexec_mask_interrupts(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+ int ret;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ /*
+ * First try to remove the active state. If this
+ * fails, try to EOI the interrupt.
+ */
+ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
+
+ if (ret && irqd_irq_inprogress(&desc->irq_data) &&
+ chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
+}
+
+/**
+ * machine_crash_shutdown - shutdown non-crashing cpus and save registers
+ */
void machine_crash_shutdown(struct pt_regs *regs)
{
- /* Empty routine needed to avoid build errors. */
+ local_irq_disable();
+
+ /* shutdown non-crashing cpus */
+ smp_send_crash_stop();
+
+ /* for crashing cpu */
+ crash_save_cpu(regs, smp_processor_id());
+ machine_kexec_mask_interrupts();
+
+ pr_info("Starting crashdump kernel...\n");
+}
+
+void arch_kexec_protect_crashkres(void)
+{
+ int i;
+
+ kexec_segment_flush(kexec_crash_image);
+
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ set_memory_valid(
+ __phys_to_virt(kexec_crash_image->segment[i].mem),
+ kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
+}
+
+void arch_kexec_unprotect_crashkres(void)
+{
+ int i;
+
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ set_memory_valid(
+ __phys_to_virt(kexec_crash_image->segment[i].mem),
+ kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
+}
+
+#ifdef CONFIG_HIBERNATION
+/*
+ * To preserve the crash dump kernel image, the relevant memory segments
+ * should be mapped again around the hibernation.
+ */
+void crash_prepare_suspend(void)
+{
+ if (kexec_crash_image)
+ arch_kexec_unprotect_crashkres();
+}
+
+void crash_post_resume(void)
+{
+ if (kexec_crash_image)
+ arch_kexec_protect_crashkres();
+}
+
+/*
+ * crash_is_nosave
+ *
+ * Return true only if a page is part of reserved memory for crash dump kernel,
+ * but does not hold any data of loaded kernel image.
+ *
+ * Note that all the pages in crash dump kernel memory have been initially
+ * marked as Reserved in kexec_reserve_crashkres_pages().
+ *
+ * In hibernation, the pages which are Reserved and yet "nosave" are excluded
+ * from the hibernation iamge. crash_is_nosave() does thich check for crash
+ * dump kernel and will reduce the total size of hibernation image.
+ */
+
+bool crash_is_nosave(unsigned long pfn)
+{
+ int i;
+ phys_addr_t addr;
+
+ if (!crashk_res.end)
+ return false;
+
+ /* in reserved memory? */
+ addr = __pfn_to_phys(pfn);
+ if ((addr < crashk_res.start) || (crashk_res.end < addr))
+ return false;
+
+ if (!kexec_crash_image)
+ return true;
+
+ /* not part of loaded kernel image? */
+ for (i = 0; i < kexec_crash_image->nr_segments; i++)
+ if (addr >= kexec_crash_image->segment[i].mem &&
+ addr < (kexec_crash_image->segment[i].mem +
+ kexec_crash_image->segment[i].memsz))
+ return false;
+
+ return true;
+}
+
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+ struct page *page;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ page = phys_to_page(addr);
+ ClearPageReserved(page);
+ free_reserved_page(page);
+ }
+}
+#endif /* CONFIG_HIBERNATION */
+
+void arch_crash_save_vmcoreinfo(void)
+{
+ VMCOREINFO_NUMBER(VA_BITS);
+ /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+ vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+ kimage_voffset);
+ vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+ PHYS_OFFSET);
}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 1ce90d8450ae..d05dbe658409 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,35 +26,21 @@ struct plt_entry {
__le32 br; /* br x16 */
};
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+static bool in_init(const struct module *mod, void *loc)
+{
+ return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
+}
+
+u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym)
{
- struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
- int i = mod->arch.plt_num_entries;
+ struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+ &mod->arch.init;
+ struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+ int i = pltsec->plt_num_entries;
u64 val = sym->st_value + rela->r_addend;
/*
- * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
- * which are listed in the ELF symtab section, but without a type
- * or a size.
- * So, similar to how the module loader uses the Elf64_Sym::st_value
- * field to store the resolved addresses of undefined symbols, let's
- * borrow the Elf64_Sym::st_size field (whose value is never used by
- * the module loader, even for symbols that are defined) to record
- * the address of a symbol's associated PLT entry as we emit it for a
- * zero addend relocation (which is the only kind we have to deal with
- * in practice). This allows us to find duplicates without having to
- * go through the table every time.
- */
- if (rela->r_addend == 0 && sym->st_size != 0) {
- BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
- return sym->st_size;
- }
-
- mod->arch.plt_num_entries++;
- BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
-
- /*
* MOVK/MOVN/MOVZ opcode:
* +--------+------------+--------+-----------+-------------+---------+
* | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
@@ -72,8 +58,19 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
cpu_to_le32(0xd61f0200)
};
- if (rela->r_addend == 0)
- sym->st_size = (u64)&plt[i];
+ /*
+ * Check if the entry we just created is a duplicate. Given that the
+ * relocations are sorted, this will be the last entry we allocated.
+ * (if one exists).
+ */
+ if (i > 0 &&
+ plt[i].mov0 == plt[i - 1].mov0 &&
+ plt[i].mov1 == plt[i - 1].mov1 &&
+ plt[i].mov2 == plt[i - 1].mov2)
+ return (u64)&plt[i - 1];
+
+ pltsec->plt_num_entries++;
+ BUG_ON(pltsec->plt_num_entries > pltsec->plt_max_entries);
return (u64)&plt[i];
}
@@ -104,7 +101,8 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num)
return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
}
-static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+ Elf64_Word dstidx)
{
unsigned int ret = 0;
Elf64_Sym *s;
@@ -116,13 +114,17 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
case R_AARCH64_CALL26:
/*
* We only have to consider branch targets that resolve
- * to undefined symbols. This is not simply a heuristic,
- * it is a fundamental limitation, since the PLT itself
- * is part of the module, and needs to be within 128 MB
- * as well, so modules can never grow beyond that limit.
+ * to symbols that are defined in a different section.
+ * This is not simply a heuristic, it is a fundamental
+ * limitation, since there is no guaranteed way to emit
+ * PLT entries sufficiently close to the branch if the
+ * section size exceeds the range of a branch
+ * instruction. So ignore relocations against defined
+ * symbols if they live in the same section as the
+ * relocation target.
*/
s = syms + ELF64_R_SYM(rela[i].r_info);
- if (s->st_shndx != SHN_UNDEF)
+ if (s->st_shndx == dstidx)
break;
/*
@@ -149,7 +151,8 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long plt_max_entries = 0;
+ unsigned long core_plts = 0;
+ unsigned long init_plts = 0;
Elf64_Sym *syms = NULL;
int i;
@@ -158,14 +161,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
* entries. Record the symtab address as well.
*/
for (i = 0; i < ehdr->e_shnum; i++) {
- if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
- mod->arch.plt = sechdrs + i;
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.core.plt = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
+ mod->arch.init.plt = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
syms = (Elf64_Sym *)sechdrs[i].sh_addr;
}
- if (!mod->arch.plt) {
- pr_err("%s: module PLT section missing\n", mod->name);
+ if (!mod->arch.core.plt || !mod->arch.init.plt) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!syms) {
@@ -188,14 +193,27 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
/* sort by type, symbol index and addend */
sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
- plt_max_entries += count_plts(syms, rels, numrels);
+ if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+ core_plts += count_plts(syms, rels, numrels,
+ sechdrs[i].sh_info);
+ else
+ init_plts += count_plts(syms, rels, numrels,
+ sechdrs[i].sh_info);
}
- mod->arch.plt->sh_type = SHT_NOBITS;
- mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
- mod->arch.plt_num_entries = 0;
- mod->arch.plt_max_entries = plt_max_entries;
+ mod->arch.core.plt->sh_type = SHT_NOBITS;
+ mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.core.plt_num_entries = 0;
+ mod->arch.core.plt_max_entries = core_plts;
+
+ mod->arch.init.plt->sh_type = SHT_NOBITS;
+ mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.init.plt_num_entries = 0;
+ mod->arch.init.plt_max_entries = init_plts;
+
return 0;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f316982ce00..c9a2ab446dc6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -380,7 +380,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) {
- val = module_emit_plt_entry(me, &rel[i], sym);
+ val = module_emit_plt_entry(me, loc, &rel[i], sym);
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
26, AARCH64_INSN_IMM_26);
}
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
index 8949f6c6f729..f7c9781a9d48 100644
--- a/arch/arm64/kernel/module.lds
+++ b/arch/arm64/kernel/module.lds
@@ -1,3 +1,4 @@
SECTIONS {
.plt (NOLOAD) : { BYTE(0) }
+ .init.plt (NOLOAD) : { BYTE(0) }
}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 57ae9d9ed9bb..bcc79471b38e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -290,6 +290,12 @@ static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL,
+ [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE,
+ [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
@@ -957,10 +963,26 @@ static int armv8_vulcan_map_event(struct perf_event *event)
ARMV8_PMU_EVTYPE_EVENT);
}
+struct armv8pmu_probe_info {
+ struct arm_pmu *pmu;
+ bool present;
+};
+
static void __armv8pmu_probe_pmu(void *info)
{
- struct arm_pmu *cpu_pmu = info;
+ struct armv8pmu_probe_info *probe = info;
+ struct arm_pmu *cpu_pmu = probe->pmu;
+ u64 dfr0;
u32 pmceid[2];
+ int pmuver;
+
+ dfr0 = read_sysreg(id_aa64dfr0_el1);
+ pmuver = cpuid_feature_extract_signed_field(dfr0,
+ ID_AA64DFR0_PMUVER_SHIFT);
+ if (pmuver < 1)
+ return;
+
+ probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
@@ -979,13 +1001,27 @@ static void __armv8pmu_probe_pmu(void *info)
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
{
- return smp_call_function_any(&cpu_pmu->supported_cpus,
+ struct armv8pmu_probe_info probe = {
+ .pmu = cpu_pmu,
+ .present = false,
+ };
+ int ret;
+
+ ret = smp_call_function_any(&cpu_pmu->supported_cpus,
__armv8pmu_probe_pmu,
- cpu_pmu, 1);
+ &probe, 1);
+ if (ret)
+ return ret;
+
+ return probe.present ? 0 : -ENODEV;
}
-static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
+static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
{
+ int ret = armv8pmu_probe_pmu(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->handle_irq = armv8pmu_handle_irq,
cpu_pmu->enable = armv8pmu_enable_event,
cpu_pmu->disable = armv8pmu_disable_event,
@@ -997,78 +1033,104 @@ static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->max_period = (1LLU << 32) - 1,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
+
+ return 0;
}
static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_pmuv3";
cpu_pmu->map_event = armv8_pmuv3_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_cortex_a53";
cpu_pmu->map_event = armv8_a53_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_cortex_a57";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_cortex_a72";
cpu_pmu->map_event = armv8_a57_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_cavium_thunder";
cpu_pmu->map_event = armv8_thunder_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv8_pmu_init(cpu_pmu);
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
cpu_pmu->name = "armv8_brcm_vulcan";
cpu_pmu->map_event = armv8_vulcan_map_event;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
&armv8_pmuv3_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
&armv8_pmuv3_format_attr_group;
- return armv8pmu_probe_pmu(cpu_pmu);
+
+ return 0;
}
static const struct of_device_id armv8_pmu_of_device_ids[] = {
@@ -1081,24 +1143,9 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{},
};
-/*
- * Non DT systems have their micro/arch events probed at run-time.
- * A fairly complete list of generic events are provided and ones that
- * aren't supported by the current PMU are disabled.
- */
-static const struct pmu_probe_info armv8_pmu_probe_table[] = {
- PMU_PROBE(0, 0, armv8_pmuv3_init), /* enable all defined counters */
- { /* sentinel value */ }
-};
-
static int armv8_pmu_device_probe(struct platform_device *pdev)
{
- if (acpi_disabled)
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- NULL);
-
- return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
- armv8_pmu_probe_table);
+ return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
}
static struct platform_driver armv8_pmu_driver = {
@@ -1109,4 +1156,11 @@ static struct platform_driver armv8_pmu_driver = {
.probe = armv8_pmu_device_probe,
};
-builtin_platform_driver(armv8_pmu_driver);
+static int __init armv8_pmu_driver_init(void)
+{
+ if (acpi_disabled)
+ return platform_driver_register(&armv8_pmu_driver);
+ else
+ return arm_pmu_acpi_probe(armv8_pmuv3_init);
+}
+device_initcall(armv8_pmu_driver_init)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 043d373b8369..ae2a835898d7 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -205,12 +205,10 @@ void __show_regs(struct pt_regs *regs)
pr_cont("\n");
}
- printk("\n");
}
void show_regs(struct pt_regs * regs)
{
- printk("\n");
__show_regs(regs);
}
diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c
new file mode 100644
index 000000000000..c124752a8bd3
--- /dev/null
+++ b/arch/arm64/kernel/reloc_test_core.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+int sym64_rel;
+
+#define SYM64_ABS_VAL 0xffff880000cccccc
+#define SYM32_ABS_VAL 0xf800cccc
+#define SYM16_ABS_VAL 0xf8cc
+
+#define __SET_ABS(name, val) asm(".globl " #name "; .set "#name ", " #val)
+#define SET_ABS(name, val) __SET_ABS(name, val)
+
+SET_ABS(sym64_abs, SYM64_ABS_VAL);
+SET_ABS(sym32_abs, SYM32_ABS_VAL);
+SET_ABS(sym16_abs, SYM16_ABS_VAL);
+
+asmlinkage u64 absolute_data64(void);
+asmlinkage u64 absolute_data32(void);
+asmlinkage u64 absolute_data16(void);
+asmlinkage u64 signed_movw(void);
+asmlinkage u64 unsigned_movw(void);
+asmlinkage u64 relative_adrp(void);
+asmlinkage u64 relative_adr(void);
+asmlinkage u64 relative_data64(void);
+asmlinkage u64 relative_data32(void);
+asmlinkage u64 relative_data16(void);
+
+static struct {
+ char name[32];
+ u64 (*f)(void);
+ u64 expect;
+} const funcs[] = {
+ { "R_AARCH64_ABS64", absolute_data64, UL(SYM64_ABS_VAL) },
+ { "R_AARCH64_ABS32", absolute_data32, UL(SYM32_ABS_VAL) },
+ { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) },
+ { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) },
+ { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) },
+#ifndef CONFIG_ARM64_ERRATUM_843419
+ { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel },
+#endif
+ { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel },
+ { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel },
+ { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel },
+ { "R_AARCH64_PREL16", relative_data16, (u64)&sym64_rel },
+};
+
+static int reloc_test_init(void)
+{
+ int i;
+
+ pr_info("Relocation test:\n");
+ pr_info("-------------------------------------------------------\n");
+
+ for (i = 0; i < ARRAY_SIZE(funcs); i++) {
+ u64 ret = funcs[i].f();
+
+ pr_info("%-31s 0x%016llx %s\n", funcs[i].name, ret,
+ ret == funcs[i].expect ? "pass" : "fail");
+ if (ret != funcs[i].expect)
+ pr_err("Relocation failed, expected 0x%016llx, not 0x%016llx\n",
+ funcs[i].expect, ret);
+ }
+ return 0;
+}
+
+static void reloc_test_exit(void)
+{
+}
+
+module_init(reloc_test_init);
+module_exit(reloc_test_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S
new file mode 100644
index 000000000000..e1edcefeb02d
--- /dev/null
+++ b/arch/arm64/kernel/reloc_test_syms.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(absolute_data64)
+ ldr x0, 0f
+ ret
+0: .quad sym64_abs
+ENDPROC(absolute_data64)
+
+ENTRY(absolute_data32)
+ ldr w0, 0f
+ ret
+0: .long sym32_abs
+ENDPROC(absolute_data32)
+
+ENTRY(absolute_data16)
+ adr x0, 0f
+ ldrh w0, [x0]
+ ret
+0: .short sym16_abs, 0
+ENDPROC(absolute_data16)
+
+ENTRY(signed_movw)
+ movz x0, #:abs_g2_s:sym64_abs
+ movk x0, #:abs_g1_nc:sym64_abs
+ movk x0, #:abs_g0_nc:sym64_abs
+ ret
+ENDPROC(signed_movw)
+
+ENTRY(unsigned_movw)
+ movz x0, #:abs_g3:sym64_abs
+ movk x0, #:abs_g2_nc:sym64_abs
+ movk x0, #:abs_g1_nc:sym64_abs
+ movk x0, #:abs_g0_nc:sym64_abs
+ ret
+ENDPROC(unsigned_movw)
+
+#ifndef CONFIG_ARM64_ERRATUM_843419
+
+ENTRY(relative_adrp)
+ adrp x0, sym64_rel
+ add x0, x0, #:lo12:sym64_rel
+ ret
+ENDPROC(relative_adrp)
+
+#endif
+
+ENTRY(relative_adr)
+ adr x0, sym64_rel
+ ret
+ENDPROC(relative_adr)
+
+ENTRY(relative_data64)
+ adr x1, 0f
+ ldr x0, [x1]
+ add x0, x0, x1
+ ret
+0: .quad sym64_rel - .
+ENDPROC(relative_data64)
+
+ENTRY(relative_data32)
+ adr x1, 0f
+ ldr w0, [x1]
+ add x0, x0, x1
+ ret
+0: .long sym64_rel - .
+ENDPROC(relative_data32)
+
+ENTRY(relative_data16)
+ adr x1, 0f
+ ldrsh w0, [x1]
+ add x0, x0, x1
+ ret
+0: .short sym64_rel - ., 0
+ENDPROC(relative_data16)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 42274bda0ccb..2c822ef94f34 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -31,7 +31,6 @@
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
-#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@@ -181,6 +180,7 @@ static void __init smp_build_mpidr_hash(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
void *dt_virt = fixmap_remap_fdt(dt_phys);
+ const char *name;
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("\n"
@@ -193,7 +193,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
cpu_relax();
}
- dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+ name = of_flat_dt_get_machine_name();
+ pr_info("Machine model: %s\n", name);
+ dump_stack_set_arch_desc("%s (DT)", name);
}
static void __init request_standard_resources(void)
@@ -226,6 +228,12 @@ static void __init request_standard_resources(void)
if (kernel_data.start >= res->start &&
kernel_data.end <= res->end)
request_resource(res, &kernel_data);
+#ifdef CONFIG_KEXEC_CORE
+ /* Userspace will find "Crash kernel" region in /proc/iomem. */
+ if (crashk_res.end && crashk_res.start >= res->start &&
+ crashk_res.end <= res->end)
+ request_resource(res, &crashk_res);
+#endif
}
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 9b1036570586..6e0e16a3a7d4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/irq_work.h>
+#include <linux/kexec.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -76,6 +77,7 @@ enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CPU_STOP,
+ IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
IPI_WAKEUP
@@ -434,6 +436,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
setup_cpu_features();
hyp_mode_check();
apply_alternatives_all();
+ mark_linear_text_alias_ro();
}
void __init smp_prepare_boot_cpu(void)
@@ -518,6 +521,13 @@ static bool bootcpu_valid __initdata;
static unsigned int cpu_count = 1;
#ifdef CONFIG_ACPI
+static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
+
+struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
+{
+ return &cpu_madt_gicc[cpu];
+}
+
/*
* acpi_map_gic_cpu_interface - parse processor MADT entry
*
@@ -552,6 +562,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
bootcpu_valid = true;
+ cpu_madt_gicc[0] = *processor;
early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
@@ -562,6 +573,8 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(cpu_count) = hwid;
+ cpu_madt_gicc[cpu_count] = *processor;
+
/*
* Set-up the ACPI parking protocol cpu entries
* while initializing the cpu_logical_map to
@@ -755,6 +768,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
S(IPI_WAKEUP, "CPU wake-up interrupts"),
@@ -829,6 +843,29 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+#ifdef CONFIG_KEXEC_CORE
+static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
+#endif
+
+static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
+{
+#ifdef CONFIG_KEXEC_CORE
+ crash_save_cpu(regs, cpu);
+
+ atomic_dec(&waiting_for_crash_ipi);
+
+ local_irq_disable();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ if (cpu_ops[cpu]->cpu_die)
+ cpu_ops[cpu]->cpu_die(cpu);
+#endif
+
+ /* just in case */
+ cpu_park_loop();
+#endif
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -859,6 +896,15 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_CPU_CRASH_STOP:
+ if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
+ irq_enter();
+ ipi_cpu_crash_stop(cpu, regs);
+
+ unreachable();
+ }
+ break;
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
irq_enter();
@@ -931,6 +977,39 @@ void smp_send_stop(void)
cpumask_pr_args(cpu_online_mask));
}
+#ifdef CONFIG_KEXEC_CORE
+void smp_send_crash_stop(void)
+{
+ cpumask_t mask;
+ unsigned long timeout;
+
+ if (num_online_cpus() == 1)
+ return;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+
+ pr_crit("SMP: stopping secondary CPUs\n");
+ smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
+ udelay(1);
+
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
+ pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(&mask));
+}
+
+bool smp_crash_stop_failed(void)
+{
+ return (atomic_read(&waiting_for_crash_ipi) > 0);
+}
+#endif
+
/*
* not supported here
*/
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 1de444e6c669..d4d6ae02cd55 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -513,6 +513,14 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
regs->pc += 4;
}
+static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+
+ pt_regs_write_reg(regs, rt, read_sysreg(cntfrq_el0));
+ regs->pc += 4;
+}
+
struct sys64_hook {
unsigned int esr_mask;
unsigned int esr_val;
@@ -537,6 +545,12 @@ static struct sys64_hook sys64_hooks[] = {
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
.handler = cntvct_read_handler,
},
+ {
+ /* Trap read access to CNTFRQ_EL0 */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
+ .handler = cntfrq_read_handler,
+ },
{},
};
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index b8deffa9e1bf..987a00ee446c 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -143,12 +143,27 @@ SECTIONS
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
+ __inittext_begin = .;
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
+ . = ALIGN(4);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ __inittext_end = .;
+ __initdata_begin = .;
+
.init.data : {
INIT_DATA
INIT_SETUP(16)
@@ -164,15 +179,6 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
- . = ALIGN(4);
- .altinstructions : {
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
- }
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
.rela : ALIGN(8) {
*(.rela .rela*)
}
@@ -181,6 +187,7 @@ SECTIONS
__rela_size = SIZEOF(.rela);
. = ALIGN(SEGMENT_ALIGN);
+ __initdata_end = .;
__init_end = .;
_data = .;
@@ -206,6 +213,7 @@ SECTIONS
}
PECOFF_EDATA_PADDING
+ __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
_edata = .;
BSS_SECTION(0, 0, 0)
@@ -221,6 +229,7 @@ SECTIONS
. += RESERVED_TTBR0_SIZE;
#endif
+ __pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
STABS_DEBUG
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6b29d3d9e1f2..839425c24b1c 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -22,6 +22,7 @@
#include <asm/kvm_mmu.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sysreg.h>
+#include <asm/virt.h>
.text
.pushsection .hyp.idmap.text, "ax"
@@ -58,6 +59,9 @@ __invalid:
* x2: HYP vectors
*/
__do_hyp_init:
+ /* Check for a stub HVC call */
+ cmp x0, #HVC_STUB_HCALL_NR
+ b.lo __kvm_handle_stub_hvc
msr ttbr0_el2, x0
@@ -119,23 +123,45 @@ __do_hyp_init:
eret
ENDPROC(__kvm_hyp_init)
+ENTRY(__kvm_handle_stub_hvc)
+ cmp x0, #HVC_SOFT_RESTART
+ b.ne 1f
+
+ /* This is where we're about to jump, staying at EL2 */
+ msr elr_el2, x1
+ mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
+ msr spsr_el2, x0
+
+ /* Shuffle the arguments, and don't come back */
+ mov x0, x2
+ mov x1, x3
+ mov x2, x4
+ b reset
+
+1: cmp x0, #HVC_RESET_VECTORS
+ b.ne 1f
+reset:
/*
- * Reset kvm back to the hyp stub.
+ * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
+ * case we coming via HVC_SOFT_RESTART.
*/
-ENTRY(__kvm_hyp_reset)
- /* We're now in idmap, disable MMU */
- mrs x0, sctlr_el2
- ldr x1, =SCTLR_ELx_FLAGS
- bic x0, x0, x1 // Clear SCTL_M and etc
- msr sctlr_el2, x0
+ mrs x5, sctlr_el2
+ ldr x6, =SCTLR_ELx_FLAGS
+ bic x5, x5, x6 // Clear SCTL_M and etc
+ msr sctlr_el2, x5
isb
/* Install stub vectors */
- adr_l x0, __hyp_stub_vectors
- msr vbar_el2, x0
+ adr_l x5, __hyp_stub_vectors
+ msr vbar_el2, x5
+ mov x0, xzr
+ eret
+1: /* Bad stub call */
+ ldr x0, =HVC_STUB_ERR
eret
-ENDPROC(__kvm_hyp_reset)
+
+ENDPROC(__kvm_handle_stub_hvc)
.ltorg
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 2726635dceba..952f6cb9cf72 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -36,15 +36,12 @@
* passed in x0.
*
* A function pointer with a value less than 0xfff has a special meaning,
- * and is used to implement __hyp_get_vectors in the same way as in
+ * and is used to implement hyp stubs in the same way as in
* arch/arm64/kernel/hyp_stub.S.
- * HVC behaves as a 'bl' call and will clobber lr.
*/
ENTRY(__kvm_call_hyp)
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- str lr, [sp, #-16]!
hvc #0
- ldr lr, [sp], #16
ret
alternative_else_nop_endif
b __vhe_hyp_call
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5e9052f087f2..5170ce1021da 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -32,17 +32,17 @@
* Shuffle the parameters before calling the function
* pointed to in x0. Assumes parameters in x[1,2,3].
*/
+ str lr, [sp, #-16]!
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
+ ldr lr, [sp], #16
.endm
ENTRY(__vhe_hyp_call)
- str lr, [sp, #-16]!
do_el2_call
- ldr lr, [sp], #16
/*
* We used to rely on having an exception return to get
* an implicit isb. In the E2H case, we don't have it anymore.
@@ -53,21 +53,6 @@ ENTRY(__vhe_hyp_call)
ret
ENDPROC(__vhe_hyp_call)
-/*
- * Compute the idmap address of __kvm_hyp_reset based on the idmap
- * start passed as a parameter, and jump there.
- *
- * x0: HYP phys_idmap_start
- */
-ENTRY(__kvm_hyp_teardown)
- mov x4, x0
- adr_l x3, __kvm_hyp_reset
-
- /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
- bfi x4, x3, #0, #PAGE_SHIFT
- br x4
-ENDPROC(__kvm_hyp_teardown)
-
el1_sync: // Guest trapped into EL2
stp x0, x1, [sp, #-16]!
@@ -87,10 +72,24 @@ alternative_endif
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
- cmp x0, #HVC_GET_VECTORS
- b.ne 1f
- mrs x0, vbar_el2
- b 2f
+ /* Check for a stub HVC call */
+ cmp x0, #HVC_STUB_HCALL_NR
+ b.hs 1f
+
+ /*
+ * Compute the idmap address of __kvm_handle_stub_hvc and
+ * jump there. Since we use kimage_voffset, do not use the
+ * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
+ * (by loading it from the constant pool).
+ *
+ * Preserve x0-x4, which may contain stub parameters.
+ */
+ ldr x5, =__kvm_handle_stub_hvc
+ ldr_l x6, kimage_voffset
+
+ /* x5 = __pa(x5) */
+ sub x5, x5, x6
+ br x5
1:
/*
@@ -99,7 +98,7 @@ alternative_endif
kern_hyp_va x0
do_el2_call
-2: eret
+ eret
el1_trap:
/*
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 9e1d2b75eecd..73464a96c365 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -94,6 +94,28 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb(ish);
isb();
+ /*
+ * If the host is running at EL1 and we have a VPIPT I-cache,
+ * then we must perform I-cache maintenance at EL2 in order for
+ * it to have an effect on the guest. Since the guest cannot hit
+ * I-cache lines allocated with a different VMID, we don't need
+ * to worry about junk out of guest reset (we nuke the I-cache on
+ * VMID rollover), but we do need to be careful when remapping
+ * executable pages for the same guest. This can happen when KSM
+ * takes a CoW fault on an executable page, copies the page into
+ * a page that was previously mapped in the guest and then needs
+ * to invalidate the guest view of the I-cache for that page
+ * from EL1. To solve this, we invalidate the entire I-cache when
+ * unmapping a page from a guest if we have a VPIPT I-cache but
+ * the host is running at EL1. As above, we could do better if
+ * we had the VA.
+ *
+ * The moral of this story is: if you have a VPIPT I-cache, then
+ * you should be running with VHE enabled.
+ */
+ if (!has_vhe() && icache_is_vpipt())
+ __flush_icache_all();
+
__tlb_switch_to_host()(kvm);
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index d9e9697de1b2..561badf93de8 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -60,7 +60,7 @@ static bool cpu_has_32bit_el1(void)
{
u64 pfr0;
- pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+ pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
return !!(pfr0 & 0x20);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0e26f8c2b56f..efbe9e8e7a78 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -55,6 +55,15 @@
* 64bit interface.
*/
+static bool read_from_write_only(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *params)
+{
+ WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
+ print_sys_reg_instr(params);
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
static u32 cache_levels;
@@ -460,35 +469,35 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
}
-static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
+static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+ bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
+ if (!enabled)
+ kvm_inject_undefined(vcpu);
+
+ return !enabled;
}
-static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
+static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
{
- u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+ return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
+}
- return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
- || vcpu_mode_priv(vcpu));
+static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
}
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
{
- u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
-
- return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
- || vcpu_mode_priv(vcpu));
+ return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
}
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
{
- u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
-
- return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
- || vcpu_mode_priv(vcpu));
+ return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
}
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -567,8 +576,10 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
- if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
+ if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
+ kvm_inject_undefined(vcpu);
return false;
+ }
return true;
}
@@ -707,8 +718,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
- if (!vcpu_mode_priv(vcpu))
+ if (!vcpu_mode_priv(vcpu)) {
+ kvm_inject_undefined(vcpu);
return false;
+ }
if (p->is_write) {
u64 val = p->regval & mask;
@@ -759,16 +772,15 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
- if (p->is_write) {
- mask = kvm_pmu_valid_counter_mask(vcpu);
- kvm_pmu_software_increment(vcpu, p->regval & mask);
- return true;
- }
-
- return false;
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ kvm_pmu_software_increment(vcpu, p->regval & mask);
+ return true;
}
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -778,8 +790,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return trap_raz_wi(vcpu, p, r);
if (p->is_write) {
- if (!vcpu_mode_priv(vcpu))
+ if (!vcpu_mode_priv(vcpu)) {
+ kvm_inject_undefined(vcpu);
return false;
+ }
vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
& ARMV8_PMU_USERENR_MASK;
@@ -793,31 +807,23 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
- /* DBGBVRn_EL1 */ \
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
+ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
- /* DBGBCRn_EL1 */ \
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
+ { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
- /* DBGWVRn_EL1 */ \
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
+ { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
- /* DBGWCRn_EL1 */ \
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
+ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
- /* PMEVCNTRn_EL0 */ \
- { Op0(0b11), Op1(0b011), CRn(0b1110), \
- CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
- /* PMEVTYPERn_EL0 */ \
- { Op0(0b11), Op1(0b011), CRn(0b1110), \
- CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
static bool access_cntp_tval(struct kvm_vcpu *vcpu,
@@ -887,24 +893,14 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
* more demanding guest...
*/
static const struct sys_reg_desc sys_reg_descs[] = {
- /* DC ISW */
- { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
- access_dcsw },
- /* DC CSW */
- { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
- access_dcsw },
- /* DC CISW */
- { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
- access_dcsw },
+ { SYS_DESC(SYS_DC_ISW), access_dcsw },
+ { SYS_DESC(SYS_DC_CSW), access_dcsw },
+ { SYS_DESC(SYS_DC_CISW), access_dcsw },
DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1),
- /* MDCCINT_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
- trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
- /* MDSCR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
- trap_debug_regs, reset_val, MDSCR_EL1, 0 },
+ { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
+ { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
DBG_BCR_BVR_WCR_WVR_EL1(2),
DBG_BCR_BVR_WCR_WVR_EL1(3),
DBG_BCR_BVR_WCR_WVR_EL1(4),
@@ -920,179 +916,77 @@ static const struct sys_reg_desc sys_reg_descs[] = {
DBG_BCR_BVR_WCR_WVR_EL1(14),
DBG_BCR_BVR_WCR_WVR_EL1(15),
- /* MDRAR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- trap_raz_wi },
- /* OSLAR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
- trap_raz_wi },
- /* OSLSR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
- trap_oslsr_el1 },
- /* OSDLR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
- trap_raz_wi },
- /* DBGPRCR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
- trap_raz_wi },
- /* DBGCLAIMSET_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
- trap_raz_wi },
- /* DBGCLAIMCLR_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
- trap_raz_wi },
- /* DBGAUTHSTATUS_EL1 */
- { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
- trap_dbgauthstatus_el1 },
-
- /* MDCCSR_EL1 */
- { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
- trap_raz_wi },
- /* DBGDTR_EL0 */
- { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
- trap_raz_wi },
- /* DBGDTR[TR]X_EL0 */
- { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
- trap_raz_wi },
-
- /* DBGVCR32_EL2 */
- { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
- NULL, reset_val, DBGVCR32_EL2, 0 },
-
- /* MPIDR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
- NULL, reset_mpidr, MPIDR_EL1 },
- /* SCTLR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
- /* CPACR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
- NULL, reset_val, CPACR_EL1, 0 },
- /* TTBR0_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
- access_vm_reg, reset_unknown, TTBR0_EL1 },
- /* TTBR1_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
- access_vm_reg, reset_unknown, TTBR1_EL1 },
- /* TCR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
- access_vm_reg, reset_val, TCR_EL1, 0 },
-
- /* AFSR0_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
- access_vm_reg, reset_unknown, AFSR0_EL1 },
- /* AFSR1_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
- access_vm_reg, reset_unknown, AFSR1_EL1 },
- /* ESR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
- access_vm_reg, reset_unknown, ESR_EL1 },
- /* FAR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
- access_vm_reg, reset_unknown, FAR_EL1 },
- /* PAR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
- NULL, reset_unknown, PAR_EL1 },
-
- /* PMINTENSET_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
- access_pminten, reset_unknown, PMINTENSET_EL1 },
- /* PMINTENCLR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
- access_pminten, NULL, PMINTENSET_EL1 },
-
- /* MAIR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
- access_vm_reg, reset_unknown, MAIR_EL1 },
- /* AMAIR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
- access_vm_reg, reset_amair_el1, AMAIR_EL1 },
-
- /* VBAR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
- NULL, reset_val, VBAR_EL1, 0 },
-
- /* ICC_SGI1R_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
- access_gic_sgi },
- /* ICC_SRE_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
- access_gic_sre },
-
- /* CONTEXTIDR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
- access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
- /* TPIDR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
- NULL, reset_unknown, TPIDR_EL1 },
-
- /* CNTKCTL_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
- NULL, reset_val, CNTKCTL_EL1, 0},
-
- /* CSSELR_EL1 */
- { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
- NULL, reset_unknown, CSSELR_EL1 },
-
- /* PMCR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
- access_pmcr, reset_pmcr, },
- /* PMCNTENSET_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
- access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- /* PMCNTENCLR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
- access_pmcnten, NULL, PMCNTENSET_EL0 },
- /* PMOVSCLR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
- access_pmovs, NULL, PMOVSSET_EL0 },
- /* PMSWINC_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- access_pmswinc, reset_unknown, PMSWINC_EL0 },
- /* PMSELR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
- access_pmselr, reset_unknown, PMSELR_EL0 },
- /* PMCEID0_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
- access_pmceid },
- /* PMCEID1_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
- access_pmceid },
- /* PMCCNTR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
- access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
- /* PMXEVTYPER_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
- access_pmu_evtyper },
- /* PMXEVCNTR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
- access_pmu_evcntr },
- /* PMUSERENR_EL0
- * This register resets as unknown in 64bit mode while it resets as zero
+ { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
+ { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
+
+ { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
+ { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
+ // DBGDTR[TR]X_EL0 share the same encoding
+ { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
+
+ { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
+
+ { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+ { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
+ { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+ { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
+ { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
+ { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
+
+ { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
+ { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
+ { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
+ { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
+ { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
+
+ { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+ { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
+
+ { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
+ { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
+
+ { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
+
+ { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
+ { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+
+ { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
+ { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
+
+ { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
+
+ { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
+
+ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+ { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
+ { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
+ { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
+ { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
+ { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
+ { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
+ { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
+ { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
+ { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
+ { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+ /*
+ * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
- /* PMOVSSET_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
- access_pmovs, reset_unknown, PMOVSSET_EL0 },
-
- /* TPIDR_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
- NULL, reset_unknown, TPIDR_EL0 },
- /* TPIDRRO_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
- NULL, reset_unknown, TPIDRRO_EL0 },
-
- /* CNTP_TVAL_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b000),
- access_cntp_tval },
- /* CNTP_CTL_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b001),
- access_cntp_ctl },
- /* CNTP_CVAL_EL0 */
- { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b010),
- access_cntp_cval },
+ { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
+ { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+
+ { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
+ { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
+
+ { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
+ { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
+ { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
/* PMEVCNTRn_EL0 */
PMU_PMEVCNTR_EL0(0),
@@ -1158,22 +1052,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
PMU_PMEVTYPER_EL0(28),
PMU_PMEVTYPER_EL0(29),
PMU_PMEVTYPER_EL0(30),
- /* PMCCFILTR_EL0
- * This register resets as unknown in 64bit mode while it resets as zero
+ /*
+ * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
- access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
-
- /* DACR32_EL2 */
- { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
- NULL, reset_unknown, DACR32_EL2 },
- /* IFSR32_EL2 */
- { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
- NULL, reset_unknown, IFSR32_EL2 },
- /* FPEXC32_EL2 */
- { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
- NULL, reset_val, FPEXC32_EL2, 0x70 },
+ { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+
+ { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
+ { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
+ { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
};
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
@@ -1183,8 +1070,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
- u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
- u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
+ u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
@@ -1557,6 +1444,22 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static void perform_access(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params,
+ const struct sys_reg_desc *r)
+{
+ /*
+ * Not having an accessor means that we have configured a trap
+ * that we don't know how to handle. This certainly qualifies
+ * as a gross bug that should be fixed right away.
+ */
+ BUG_ON(!r->access);
+
+ /* Skip instruction if instructed so */
+ if (likely(r->access(vcpu, params, r)))
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+}
+
/*
* emulate_cp -- tries to match a sys_reg access in a handling table, and
* call the corresponding trap handler.
@@ -1580,20 +1483,8 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
r = find_reg(params, table, num);
if (r) {
- /*
- * Not having an accessor means that we have
- * configured a trap that we don't know how to
- * handle. This certainly qualifies as a gross bug
- * that should be fixed right away.
- */
- BUG_ON(!r->access);
-
- if (likely(r->access(vcpu, params, r))) {
- /* Skip instruction, since it was emulated */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- /* Handled */
- return 0;
- }
+ perform_access(vcpu, params, r);
+ return 0;
}
/* Not handled */
@@ -1660,20 +1551,25 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
}
- if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
- goto out;
- if (!emulate_cp(vcpu, &params, global, nr_global))
- goto out;
-
- unhandled_cp_access(vcpu, &params);
+ /*
+ * Try to emulate the coprocessor access using the target
+ * specific table first, and using the global table afterwards.
+ * If either of the tables contains a handler, handle the
+ * potential register operation in the case of a read and return
+ * with success.
+ */
+ if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
+ !emulate_cp(vcpu, &params, global, nr_global)) {
+ /* Split up the value between registers for the read side */
+ if (!params.is_write) {
+ vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
+ vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
+ }
-out:
- /* Split up the value between registers for the read side */
- if (!params.is_write) {
- vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
- vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
+ return 1;
}
+ unhandled_cp_access(vcpu, &params);
return 1;
}
@@ -1763,26 +1659,13 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
if (likely(r)) {
- /*
- * Not having an accessor means that we have
- * configured a trap that we don't know how to
- * handle. This certainly qualifies as a gross bug
- * that should be fixed right away.
- */
- BUG_ON(!r->access);
-
- if (likely(r->access(vcpu, params, r))) {
- /* Skip instruction, since it was emulated */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 1;
- }
- /* If access function fails, it should complain. */
+ perform_access(vcpu, params, r);
} else {
kvm_err("Unsupported guest sys_reg access at: %lx\n",
*vcpu_pc(vcpu));
print_sys_reg_instr(params);
+ kvm_inject_undefined(vcpu);
}
- kvm_inject_undefined(vcpu);
return 1;
}
@@ -1932,44 +1815,25 @@ FUNCTION_INVARIANT(aidr_el1)
/* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] = {
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
- NULL, get_midr_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
- NULL, get_revidr_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
- NULL, get_id_pfr0_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
- NULL, get_id_pfr1_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
- NULL, get_id_dfr0_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
- NULL, get_id_afr0_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
- NULL, get_id_mmfr0_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
- NULL, get_id_mmfr1_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
- NULL, get_id_mmfr2_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
- NULL, get_id_mmfr3_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
- NULL, get_id_isar0_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
- NULL, get_id_isar1_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
- NULL, get_id_isar2_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
- NULL, get_id_isar3_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
- NULL, get_id_isar4_el1 },
- { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
- NULL, get_id_isar5_el1 },
- { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
- NULL, get_clidr_el1 },
- { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
- NULL, get_aidr_el1 },
- { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
- NULL, get_ctr_el0 },
+ { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
+ { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
+ { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
+ { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
+ { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
+ { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
+ { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
+ { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
+ { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
+ { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
+ { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
+ { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
+ { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
+ { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
+ { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
+ { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
+ { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
+ { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
+ { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
};
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 9c6ffd0f0196..060f5348ef25 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -83,24 +83,6 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
return true;
}
-static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
-{
- kvm_debug("sys_reg write to read-only register at: %lx\n",
- *vcpu_pc(vcpu));
- print_sys_reg_instr(params);
- return false;
-}
-
-static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
-{
- kvm_debug("sys_reg read to write-only register at: %lx\n",
- *vcpu_pc(vcpu));
- print_sys_reg_instr(params);
- return false;
-}
-
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
@@ -147,4 +129,9 @@ const struct sys_reg_desc *find_reg_by_id(u64 id,
#define CRm(_x) .CRm = _x
#define Op2(_x) .Op2 = _x
+#define SYS_DESC(reg) \
+ Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
+ CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
+ Op2(sys_reg_Op2(reg))
+
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 46af7186bca6..969ade1d333d 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -52,9 +52,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
*/
static const struct sys_reg_desc genericv8_sys_regs[] = {
- /* ACTLR_EL1 */
- { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
- access_actlr, reset_actlr, ACTLR_EL1 },
+ { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
};
static const struct sys_reg_desc genericv8_cp15_regs[] = {
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 68634c630cdd..ab9f5f0fb2c7 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -119,9 +119,6 @@ static void flush_context(unsigned int cpu)
/* Queue a TLB invalidate and flush the I-cache if necessary. */
cpumask_setall(&tlb_flush_pending);
-
- if (icache_is_aivivt())
- __flush_icache_all();
}
static bool check_update_reserved_asid(u64 asid, u64 newasid)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 81cdb2e844ed..3216e098c058 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -28,6 +28,7 @@
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/pci.h>
#include <asm/cacheflush.h>
@@ -308,24 +309,15 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
sg->length, dir);
}
-static int __swiotlb_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
+static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, size_t size)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
PAGE_SHIFT;
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
-
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
pfn + off,
@@ -336,19 +328,43 @@ static int __swiotlb_mmap(struct device *dev,
return ret;
}
-static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
+static int __swiotlb_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ int ret;
+ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ return __swiotlb_mmap_pfn(vma, pfn, size);
+}
+
+static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
+ struct page *page, size_t size)
{
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
- sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
- PAGE_ALIGN(size), 0);
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ struct page *page = phys_to_page(dma_to_phys(dev, handle));
+
+ return __swiotlb_get_sgtable_page(sgt, page, size);
+}
+
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
if (swiotlb)
@@ -584,20 +600,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
*/
gfp |= __GFP_ZERO;
- if (gfpflags_allow_blocking(gfp)) {
- struct page **pages;
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
-
- pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
- handle, flush_page);
- if (!pages)
- return NULL;
-
- addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
- __builtin_return_address(0));
- if (!addr)
- iommu_dma_free(dev, pages, iosize, handle);
- } else {
+ if (!gfpflags_allow_blocking(gfp)) {
struct page *page;
/*
* In atomic context we can't remap anything, so we'll only
@@ -621,6 +624,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
__free_from_pool(addr, size);
addr = NULL;
}
+ } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size), gfp);
+ if (!page)
+ return NULL;
+
+ *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+ if (iommu_dma_mapping_error(dev, *handle)) {
+ dma_release_from_contiguous(dev, page,
+ size >> PAGE_SHIFT);
+ return NULL;
+ }
+ if (!coherent)
+ __dma_flush_area(page_to_virt(page), iosize);
+
+ addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ prot,
+ __builtin_return_address(0));
+ if (!addr) {
+ iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+ dma_release_from_contiguous(dev, page,
+ size >> PAGE_SHIFT);
+ }
+ } else {
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ struct page **pages;
+
+ pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
+ handle, flush_page);
+ if (!pages)
+ return NULL;
+
+ addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ iommu_dma_free(dev, pages, iosize, handle);
}
return addr;
}
@@ -632,7 +674,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
size = PAGE_ALIGN(size);
/*
- * @cpu_addr will be one of 3 things depending on how it was allocated:
+ * @cpu_addr will be one of 4 things depending on how it was allocated:
+ * - A remapped array of pages for contiguous allocations.
* - A remapped array of pages from iommu_dma_alloc(), for all
* non-atomic allocations.
* - A non-cacheable alias from the atomic pool, for atomic
@@ -644,6 +687,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (__in_atomic_pool(cpu_addr, size)) {
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_from_pool(cpu_addr, size);
+ } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ struct page *page = vmalloc_to_page(cpu_addr);
+
+ iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
@@ -670,6 +719,15 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ /*
+ * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+ * hence in the vmalloc space.
+ */
+ unsigned long pfn = vmalloc_to_pfn(cpu_addr);
+ return __swiotlb_mmap_pfn(vma, pfn, size);
+ }
+
area = find_vm_area(cpu_addr);
if (WARN_ON(!area || !area->pages))
return -ENXIO;
@@ -684,6 +742,15 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr);
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+ /*
+ * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
+ * hence in the vmalloc space.
+ */
+ struct page *page = vmalloc_to_page(cpu_addr);
+ return __swiotlb_get_sgtable_page(sgt, page, size);
+ }
+
if (WARN_ON(!area || !area->pages))
return -ENXIO;
@@ -813,34 +880,26 @@ static const struct dma_map_ops iommu_dma_ops = {
.mapping_error = iommu_dma_mapping_error,
};
-/*
- * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
- * everything it needs to - the device is only partially created and the
- * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
- * need this delayed attachment dance. Once IOMMU probe ordering is sorted
- * to move the arch_setup_dma_ops() call later, all the notifier bits below
- * become unnecessary, and will go away.
- */
-struct iommu_dma_notifier_data {
- struct list_head list;
- struct device *dev;
- const struct iommu_ops *ops;
- u64 dma_base;
- u64 size;
-};
-static LIST_HEAD(iommu_dma_masters);
-static DEFINE_MUTEX(iommu_dma_notifier_lock);
+static int __init __iommu_dma_init(void)
+{
+ return iommu_dma_init();
+}
+arch_initcall(__iommu_dma_init);
-static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
+static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *ops)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain;
+
+ if (!ops)
+ return;
/*
- * If the IOMMU driver has the DMA domain support that we require,
- * then the IOMMU core will have already configured a group for this
- * device, and allocated the default domain for that group.
+ * The IOMMU core code allocates the default DMA domain, which the
+ * underlying IOMMU driver needs to support via the dma-iommu layer.
*/
+ domain = iommu_get_domain_for_dev(dev);
+
if (!domain)
goto out_err;
@@ -851,109 +910,11 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
dev->dma_ops = &iommu_dma_ops;
}
- return true;
+ return;
+
out_err:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
- return false;
-}
-
-static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
-{
- struct iommu_dma_notifier_data *iommudata;
-
- iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
- if (!iommudata)
- return;
-
- iommudata->dev = dev;
- iommudata->ops = ops;
- iommudata->dma_base = dma_base;
- iommudata->size = size;
-
- mutex_lock(&iommu_dma_notifier_lock);
- list_add(&iommudata->list, &iommu_dma_masters);
- mutex_unlock(&iommu_dma_notifier_lock);
-}
-
-static int __iommu_attach_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct iommu_dma_notifier_data *master, *tmp;
-
- if (action != BUS_NOTIFY_BIND_DRIVER)
- return 0;
-
- mutex_lock(&iommu_dma_notifier_lock);
- list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
- if (data == master->dev && do_iommu_attach(master->dev,
- master->ops, master->dma_base, master->size)) {
- list_del(&master->list);
- kfree(master);
- break;
- }
- }
- mutex_unlock(&iommu_dma_notifier_lock);
- return 0;
-}
-
-static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
-{
- struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
- int ret;
-
- if (!nb)
- return -ENOMEM;
-
- nb->notifier_call = __iommu_attach_notifier;
-
- ret = bus_register_notifier(bus, nb);
- if (ret) {
- pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
- bus->name);
- kfree(nb);
- }
- return ret;
-}
-
-static int __init __iommu_dma_init(void)
-{
- int ret;
-
- ret = iommu_dma_init();
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&platform_bus_type);
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&amba_bustype);
-#ifdef CONFIG_PCI
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&pci_bus_type);
-#endif
- return ret;
-}
-arch_initcall(__iommu_dma_init);
-
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *ops)
-{
- struct iommu_group *group;
-
- if (!ops)
- return;
- /*
- * TODO: As a concession to the future, we're ready to handle being
- * called both early and late (i.e. after bus_add_device). Once all
- * the platform bus code is reworked to call us late and the notifier
- * junk above goes away, move the body of do_iommu_attach here.
- */
- group = iommu_group_get(dev);
- if (group) {
- do_iommu_attach(dev, ops, dma_base, size);
- iommu_group_put(group);
- } else {
- queue_iommu_attach(dev, ops, dma_base, size);
- }
}
void arch_teardown_dma_ops(struct device *dev)
@@ -977,4 +938,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
+
+#ifdef CONFIG_XEN
+ if (xen_initial_domain()) {
+ dev->archdata.dev_dma_ops = dev->dma_ops;
+ dev->dma_ops = xen_dma_ops;
+ }
+#endif
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1b35b8bddbfb..37b95dff0b07 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -174,12 +174,33 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
+ unsigned long addr)
+{
+ unsigned int ec = ESR_ELx_EC(esr);
+ unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+
+ if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ return false;
+
+ if (fsc_type == ESR_ELx_FSC_PERM)
+ return true;
+
+ if (addr < USER_DS && system_uses_ttbr0_pan())
+ return fsc_type == ESR_ELx_FSC_FAULT &&
+ (regs->pstate & PSR_PAN_BIT);
+
+ return false;
+}
+
/*
* The kernel tried to access some page that wasn't present.
*/
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
unsigned int esr, struct pt_regs *regs)
{
+ const char *msg;
+
/*
* Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
@@ -191,9 +212,20 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
- pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
- (addr < PAGE_SIZE) ? "NULL pointer dereference" :
- "paging request", addr);
+
+ if (is_permission_fault(esr, regs, addr)) {
+ if (esr & ESR_ELx_WNR)
+ msg = "write to read-only memory";
+ else
+ msg = "read from unreadable memory";
+ } else if (addr < PAGE_SIZE) {
+ msg = "NULL pointer dereference";
+ } else {
+ msg = "paging request";
+ }
+
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n", msg,
+ addr);
show_pte(mm, addr);
die("Oops", regs, esr);
@@ -287,21 +319,6 @@ out:
return fault;
}
-static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
-{
- unsigned int ec = ESR_ELx_EC(esr);
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
-
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
- return false;
-
- if (system_uses_ttbr0_pan())
- return fsc_type == ESR_ELx_FSC_FAULT &&
- (regs->pstate & PSR_PAN_BIT);
- else
- return fsc_type == ESR_ELx_FSC_PERM;
-}
-
static bool is_el0_instruction_abort(unsigned int esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
@@ -339,7 +356,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < USER_DS && is_permission_fault(esr, regs)) {
+ if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 554a2558c12e..21a8d828cbf4 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -22,7 +22,7 @@
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
-#include <asm/cachetype.h>
+#include <asm/cache.h>
#include <asm/tlbflush.h>
void sync_icache_aliases(void *kaddr, unsigned long len)
@@ -65,8 +65,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
- else if (icache_is_aivivt())
- __flush_icache_all();
}
/*
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index e19e06593e37..5960bef0170d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,7 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
@@ -37,6 +38,8 @@
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
@@ -77,6 +80,142 @@ static int __init early_initrd(char *p)
early_param("initrd", early_initrd);
#endif
+#ifdef CONFIG_KEXEC_CORE
+/*
+ * reserve_crashkernel() - reserves memory for crash kernel
+ *
+ * This function reserves memory area given in "crashkernel=" kernel command
+ * line parameter. The memory reserved is used by dump capture kernel when
+ * primary kernel is crashing.
+ */
+static void __init reserve_crashkernel(void)
+{
+ unsigned long long crash_base, crash_size;
+ int ret;
+
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &crash_size, &crash_base);
+ /* no crashkernel= or invalid value specified */
+ if (ret || !crash_size)
+ return;
+
+ crash_size = PAGE_ALIGN(crash_size);
+
+ if (crash_base == 0) {
+ /* Current arm64 boot protocol requires 2MB alignment */
+ crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
+ crash_size, SZ_2M);
+ if (crash_base == 0) {
+ pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
+ crash_size);
+ return;
+ }
+ } else {
+ /* User specifies base address explicitly. */
+ if (!memblock_is_region_memory(crash_base, crash_size)) {
+ pr_warn("cannot reserve crashkernel: region is not memory\n");
+ return;
+ }
+
+ if (memblock_is_region_reserved(crash_base, crash_size)) {
+ pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
+ return;
+ }
+
+ if (!IS_ALIGNED(crash_base, SZ_2M)) {
+ pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
+ return;
+ }
+ }
+ memblock_reserve(crash_base, crash_size);
+
+ pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
+ crash_base, crash_base + crash_size, crash_size >> 20);
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init kexec_reserve_crashkres_pages(void)
+{
+#ifdef CONFIG_HIBERNATION
+ phys_addr_t addr;
+ struct page *page;
+
+ if (!crashk_res.end)
+ return;
+
+ /*
+ * To reduce the size of hibernation image, all the pages are
+ * marked as Reserved initially.
+ */
+ for (addr = crashk_res.start; addr < (crashk_res.end + 1);
+ addr += PAGE_SIZE) {
+ page = phys_to_page(addr);
+ SetPageReserved(page);
+ }
+#endif
+}
+#else
+static void __init reserve_crashkernel(void)
+{
+}
+
+static void __init kexec_reserve_crashkres_pages(void)
+{
+}
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_CRASH_DUMP
+static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ const __be32 *reg;
+ int len;
+
+ if (depth != 1 || strcmp(uname, "chosen") != 0)
+ return 0;
+
+ reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
+ if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
+ return 1;
+
+ elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+ return 1;
+}
+
+/*
+ * reserve_elfcorehdr() - reserves memory for elf core header
+ *
+ * This function reserves the memory occupied by an elf core header
+ * described in the device tree. This region contains all the
+ * information about primary kernel's core image and is used by a dump
+ * capture kernel to access the system memory on primary kernel.
+ */
+static void __init reserve_elfcorehdr(void)
+{
+ of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
+
+ if (!elfcorehdr_size)
+ return;
+
+ if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
+ pr_warn("elfcorehdr is overlapped\n");
+ return;
+ }
+
+ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+
+ pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
+ elfcorehdr_size >> 10, elfcorehdr_addr);
+}
+#else
+static void __init reserve_elfcorehdr(void)
+{
+}
+#endif /* CONFIG_CRASH_DUMP */
/*
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
@@ -188,10 +327,45 @@ static int __init early_mem(char *p)
}
early_param("mem", early_mem);
+static int __init early_init_dt_scan_usablemem(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ struct memblock_region *usablemem = data;
+ const __be32 *reg;
+ int len;
+
+ if (depth != 1 || strcmp(uname, "chosen") != 0)
+ return 0;
+
+ reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
+ if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
+ return 1;
+
+ usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+ return 1;
+}
+
+static void __init fdt_enforce_memory_region(void)
+{
+ struct memblock_region reg = {
+ .size = 0,
+ };
+
+ of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
+
+ if (reg.size)
+ memblock_cap_memory_range(reg.base, reg.size);
+}
+
void __init arm64_memblock_init(void)
{
const s64 linear_region_size = -(s64)PAGE_OFFSET;
+ /* Handle linux,usable-memory-range property */
+ fdt_enforce_memory_region();
+
/*
* Ensure that the linear region takes up exactly half of the kernel
* virtual address space. This way, we can distinguish a linear address
@@ -297,6 +471,11 @@ void __init arm64_memblock_init(void)
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;
+
+ reserve_crashkernel();
+
+ reserve_elfcorehdr();
+
dma_contiguous_reserve(arm64_dma_phys_limit);
memblock_allow_resize();
@@ -416,6 +595,8 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
free_all_bootmem();
+ kexec_reserve_crashkres_pages();
+
mem_init_print_info(NULL);
#define MLK(b, t) b, t, ((t) - (b)) >> 10
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d28dbcf596b6..0c429ec6fde8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -22,6 +22,8 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
@@ -43,6 +45,9 @@
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
+#define NO_BLOCK_MAPPINGS BIT(0)
+#define NO_CONT_MAPPINGS BIT(1)
+
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 kimage_voffset __ro_after_init;
@@ -103,33 +108,27 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
*/
static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
- return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
+ /* creating or taking down mappings is always safe */
+ if (old == 0 || new == 0)
+ return true;
+
+ /* live contiguous mappings may not be manipulated at all */
+ if ((old | new) & PTE_CONT)
+ return false;
+
+ return ((old ^ new) & ~mask) == 0;
}
-static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long pfn,
- pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void))
+static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot)
{
pte_t *pte;
- BUG_ON(pmd_sect(*pmd));
- if (pmd_none(*pmd)) {
- phys_addr_t pte_phys;
- BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc();
- pte = pte_set_fixmap(pte_phys);
- __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
- pte_clear_fixmap();
- }
- BUG_ON(pmd_bad(*pmd));
-
pte = pte_set_fixmap_offset(pmd, addr);
do {
pte_t old_pte = *pte;
- set_pte(pte, pfn_pte(pfn, prot));
- pfn++;
+ set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
/*
* After the PTE entry has been populated once, we
@@ -137,32 +136,51 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
*/
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+ phys += PAGE_SIZE;
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_clear_fixmap();
}
-static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
- phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
- bool page_mappings_only)
+static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(void),
+ int flags)
{
- pmd_t *pmd;
unsigned long next;
- /*
- * Check for initial section mappings in the pgd/pud and remove them.
- */
- BUG_ON(pud_sect(*pud));
- if (pud_none(*pud)) {
- phys_addr_t pmd_phys;
+ BUG_ON(pmd_sect(*pmd));
+ if (pmd_none(*pmd)) {
+ phys_addr_t pte_phys;
BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc();
- pmd = pmd_set_fixmap(pmd_phys);
- __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
- pmd_clear_fixmap();
+ pte_phys = pgtable_alloc();
+ __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
}
- BUG_ON(pud_bad(*pud));
+ BUG_ON(pmd_bad(*pmd));
+
+ do {
+ pgprot_t __prot = prot;
+
+ next = pte_cont_addr_end(addr, end);
+
+ /* use a contiguous mapping if the range is suitably aligned */
+ if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
+ (flags & NO_CONT_MAPPINGS) == 0)
+ __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+ init_pte(pmd, addr, next, phys, __prot);
+
+ phys += next - addr;
+ } while (addr = next, addr != end);
+}
+
+static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(void), int flags)
+{
+ unsigned long next;
+ pmd_t *pmd;
pmd = pmd_set_fixmap_offset(pud, addr);
do {
@@ -172,7 +190,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
- !page_mappings_only) {
+ (flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmd, phys, prot);
/*
@@ -182,8 +200,8 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
pmd_val(*pmd)));
} else {
- alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot, pgtable_alloc);
+ alloc_init_cont_pte(pmd, addr, next, phys, prot,
+ pgtable_alloc, flags);
BUG_ON(pmd_val(old_pmd) != 0 &&
pmd_val(old_pmd) != pmd_val(*pmd));
@@ -194,6 +212,41 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd_clear_fixmap();
}
+static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(void), int flags)
+{
+ unsigned long next;
+
+ /*
+ * Check for initial section mappings in the pgd/pud.
+ */
+ BUG_ON(pud_sect(*pud));
+ if (pud_none(*pud)) {
+ phys_addr_t pmd_phys;
+ BUG_ON(!pgtable_alloc);
+ pmd_phys = pgtable_alloc();
+ __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
+ }
+ BUG_ON(pud_bad(*pud));
+
+ do {
+ pgprot_t __prot = prot;
+
+ next = pmd_cont_addr_end(addr, end);
+
+ /* use a contiguous mapping if the range is suitably aligned */
+ if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
+ (flags & NO_CONT_MAPPINGS) == 0)
+ __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+
+ init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
+
+ phys += next - addr;
+ } while (addr = next, addr != end);
+}
+
static inline bool use_1G_block(unsigned long addr, unsigned long next,
unsigned long phys)
{
@@ -209,7 +262,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool page_mappings_only)
+ int flags)
{
pud_t *pud;
unsigned long next;
@@ -231,7 +284,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (use_1G_block(addr, next, phys) && !page_mappings_only) {
+ if (use_1G_block(addr, next, phys) &&
+ (flags & NO_BLOCK_MAPPINGS) == 0) {
pud_set_huge(pud, phys, prot);
/*
@@ -241,8 +295,8 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
pud_val(*pud)));
} else {
- alloc_init_pmd(pud, addr, next, phys, prot,
- pgtable_alloc, page_mappings_only);
+ alloc_init_cont_pmd(pud, addr, next, phys, prot,
+ pgtable_alloc, flags);
BUG_ON(pud_val(old_pud) != 0 &&
pud_val(old_pud) != pud_val(*pud));
@@ -257,7 +311,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(void),
- bool page_mappings_only)
+ int flags)
{
unsigned long addr, length, end, next;
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
@@ -277,7 +331,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
do {
next = pgd_addr_end(addr, end);
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
- page_mappings_only);
+ flags);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
@@ -306,82 +360,80 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
+ NO_CONT_MAPPINGS);
}
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only)
{
+ int flags = 0;
+
BUG_ON(mm == &init_mm);
+ if (page_mappings_only)
+ flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
- pgd_pgtable_alloc, page_mappings_only);
+ pgd_pgtable_alloc, flags);
}
-static void create_mapping_late(phys_addr_t phys, unsigned long virt,
- phys_addr_t size, pgprot_t prot)
+static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
- pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
- NULL, debug_pagealloc_enabled());
+ __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
+ NO_CONT_MAPPINGS);
+
+ /* flush the TLBs after updating live kernel mappings */
+ flush_tlb_kernel_range(virt, virt + size);
}
-static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
+ phys_addr_t end, pgprot_t prot, int flags)
{
- phys_addr_t kernel_start = __pa_symbol(_text);
- phys_addr_t kernel_end = __pa_symbol(__init_begin);
-
- /*
- * Take care not to create a writable alias for the
- * read-only text and rodata sections of the kernel image.
- */
-
- /* No overlap with the kernel text/rodata */
- if (end < kernel_start || start >= kernel_end) {
- __create_pgd_mapping(pgd, start, __phys_to_virt(start),
- end - start, PAGE_KERNEL,
- early_pgtable_alloc,
- debug_pagealloc_enabled());
- return;
- }
-
- /*
- * This block overlaps the kernel text/rodata mappings.
- * Map the portion(s) which don't overlap.
- */
- if (start < kernel_start)
- __create_pgd_mapping(pgd, start,
- __phys_to_virt(start),
- kernel_start - start, PAGE_KERNEL,
- early_pgtable_alloc,
- debug_pagealloc_enabled());
- if (kernel_end < end)
- __create_pgd_mapping(pgd, kernel_end,
- __phys_to_virt(kernel_end),
- end - kernel_end, PAGE_KERNEL,
- early_pgtable_alloc,
- debug_pagealloc_enabled());
+ __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
+ prot, early_pgtable_alloc, flags);
+}
+void __init mark_linear_text_alias_ro(void)
+{
/*
- * Map the linear alias of the [_text, __init_begin) interval as
- * read-only/non-executable. This makes the contents of the
- * region accessible to subsystems such as hibernate, but
- * protects it from inadvertent modification or execution.
+ * Remove the write permissions from the linear alias of .text/.rodata
*/
- __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
- kernel_end - kernel_start, PAGE_KERNEL_RO,
- early_pgtable_alloc, debug_pagealloc_enabled());
+ update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
+ (unsigned long)__init_begin - (unsigned long)_text,
+ PAGE_KERNEL_RO);
}
static void __init map_mem(pgd_t *pgd)
{
+ phys_addr_t kernel_start = __pa_symbol(_text);
+ phys_addr_t kernel_end = __pa_symbol(__init_begin);
struct memblock_region *reg;
+ int flags = 0;
+
+ if (debug_pagealloc_enabled())
+ flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
+ /*
+ * Take care not to create a writable alias for the
+ * read-only text and rodata sections of the kernel image.
+ * So temporarily mark them as NOMAP to skip mappings in
+ * the following for-loop
+ */
+ memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
+#ifdef CONFIG_KEXEC_CORE
+ if (crashk_res.end)
+ memblock_mark_nomap(crashk_res.start,
+ resource_size(&crashk_res));
+#endif
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -393,33 +445,57 @@ static void __init map_mem(pgd_t *pgd)
if (memblock_is_nomap(reg))
continue;
- __map_memblock(pgd, start, end);
+ __map_memblock(pgd, start, end, PAGE_KERNEL, flags);
+ }
+
+ /*
+ * Map the linear alias of the [_text, __init_begin) interval
+ * as non-executable now, and remove the write permission in
+ * mark_linear_text_alias_ro() below (which will be called after
+ * alternative patching has completed). This makes the contents
+ * of the region accessible to subsystems such as hibernate,
+ * but protects it from inadvertent modification or execution.
+ * Note that contiguous mappings cannot be remapped in this way,
+ * so we should avoid them here.
+ */
+ __map_memblock(pgd, kernel_start, kernel_end,
+ PAGE_KERNEL, NO_CONT_MAPPINGS);
+ memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
+
+#ifdef CONFIG_KEXEC_CORE
+ /*
+ * Use page-level mappings here so that we can shrink the region
+ * in page granularity and put back unused memory to buddy system
+ * through /sys/kernel/kexec_crash_size interface.
+ */
+ if (crashk_res.end) {
+ __map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
+ PAGE_KERNEL,
+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(crashk_res.start,
+ resource_size(&crashk_res));
}
+#endif
}
void mark_rodata_ro(void)
{
unsigned long section_size;
- section_size = (unsigned long)_etext - (unsigned long)_text;
- create_mapping_late(__pa_symbol(_text), (unsigned long)_text,
- section_size, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
- create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
+ update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
- /* flush the TLBs after updating live kernel mappings */
- flush_tlb_all();
-
debug_checkwx();
}
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
- pgprot_t prot, struct vm_struct *vma)
+ pgprot_t prot, struct vm_struct *vma,
+ int flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -428,7 +504,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(size));
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, debug_pagealloc_enabled());
+ early_pgtable_alloc, flags);
vma->addr = va_start;
vma->phys_addr = pa_start;
@@ -439,18 +515,39 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
vm_area_add_early(vma);
}
+static int __init parse_rodata(char *arg)
+{
+ return strtobool(arg, &rodata_enabled);
+}
+early_param("rodata", parse_rodata);
+
/*
* Create fine-grained mappings for the kernel.
*/
static void __init map_kernel(pgd_t *pgd)
{
- static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
+ static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
+ vmlinux_initdata, vmlinux_data;
- map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
- map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
- map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
- &vmlinux_init);
- map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+ /*
+ * External debuggers may need to write directly to the text
+ * mapping to install SW breakpoints. Allow this (only) when
+ * explicitly requested with rodata=off.
+ */
+ pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+
+ /*
+ * Only rodata will be remapped with different permissions later on,
+ * all other segments are allowed to use contiguous mappings.
+ */
+ map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
+ map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
+ &vmlinux_rodata, NO_CONT_MAPPINGS);
+ map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
+ &vmlinux_inittext, 0);
+ map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
+ &vmlinux_initdata, 0);
+ map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/*
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 8def55e7249b..a682a0a2a0fa 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -17,6 +17,7 @@
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
struct page_change_data {
@@ -125,20 +126,23 @@ int set_memory_x(unsigned long addr, int numpages)
}
EXPORT_SYMBOL_GPL(set_memory_x);
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+int set_memory_valid(unsigned long addr, int numpages, int enable)
{
- unsigned long addr = (unsigned long) page_address(page);
-
if (enable)
- __change_memory_common(addr, PAGE_SIZE * numpages,
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(PTE_VALID),
__pgprot(0));
else
- __change_memory_common(addr, PAGE_SIZE * numpages,
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(0),
__pgprot(PTE_VALID));
}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ set_memory_valid((unsigned long)page_address(page), numpages, enable);
+}
#ifdef CONFIG_HIBERNATION
/*
* When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index d68abde52740..c6e53580aefe 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -27,6 +27,7 @@
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/set_memory.h>
#include "bpf_jit.h"
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 212266a2c5d9..394c2a73d5e2 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* Leave vm_pgoff as-is, the PCI space address is the physical
- * address on this platform.
- */
- prot = pgprot_val(vma->vm_page_prot);
- vma->vm_page_prot = __pgprot(prot);
-
- /* Write-combine setting is ignored, it is changed via the mtrr
- * interfaces on this platform.
- */
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h
index b1b289df04c7..6e505332b3e3 100644
--- a/arch/cris/include/asm/pci.h
+++ b/arch/cris/include/asm/pci.h
@@ -42,9 +42,7 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index c0835b0dc722..6459f2d46200 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+#define arch_can_pci_mmap_wc() 1
+
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 8786c8b4f187..798bdb209d00 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -56,7 +56,6 @@ void foo(void)
DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
- DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
@@ -64,9 +63,6 @@ void foo(void)
BLANK();
- DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));
-
- BLANK();
DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
group_stop_count));
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 2955f359e2a7..75859a07d75b 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -27,28 +27,6 @@ static int kdump_freeze_monarch;
static int kdump_on_init = 1;
static int kdump_on_fatal_mca = 1;
-static inline Elf64_Word
-*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
- size_t data_len)
-{
- struct elf_note *note = (struct elf_note *)buf;
- note->n_namesz = strlen(name) + 1;
- note->n_descsz = data_len;
- note->n_type = type;
- buf += (sizeof(*note) + 3)/4;
- memcpy(buf, name, note->n_namesz);
- buf += (note->n_namesz + 3)/4;
- memcpy(buf, data, data_len);
- buf += (data_len + 3)/4;
- return buf;
-}
-
-static void
-final_note(void *buf)
-{
- memset(buf, 0, sizeof(struct elf_note));
-}
-
extern void ia64_dump_cpu_regs(void *);
static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 8f6ac2f8ae4c..4068bde623dc 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res,
return res->start;
}
-int
-pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long size = vma->vm_end - vma->vm_start;
- pgprot_t prot;
-
- /*
- * I/O space cannot be accessed via normal processor loads and
- * stores on this platform.
- */
- if (mmap_state == pci_mmap_io)
- /*
- * XXX we could relax this for I/O spaces for which ACPI
- * indicates that the space is 1-to-1 mapped. But at the
- * moment, we don't support multiple PCI address spaces and
- * the legacy I/O space is not 1-to-1 mapped, so this is moot.
- */
- return -EINVAL;
-
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
- return -EINVAL;
-
- prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
- vma->vm_page_prot);
-
- /*
- * If the user requested WC, the kernel uses UC or WC for this region,
- * and the chipset supports WC, we can use WC. Otherwise, we have to
- * use the same attribute the kernel uses.
- */
- if (write_combine &&
- ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
- (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
- efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot = prot;
-
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
/**
* ia64_pci_get_legacy_mem - generic legacy mem routine
* @bus: bus to get legacy memory base address for
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
index 970abaf3303e..dd5b2c357e95 100644
--- a/arch/m68k/ifpsp060/src/ilsp.S
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -776,7 +776,7 @@ muls64_zero:
# ALGORITHM *********************************************************** #
# In the interest of simplicity, all operands are converted to #
# longword size whether the operation is byte, word, or long. The #
-# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is #
+# bounds are sign extended accordingly. If Rn is a data register, Rn is #
# also sign extended. If Rn is an address register, it need not be sign #
# extended since the full register is always used. #
# The condition codes are set correctly before the final "rts". #
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
index b865c1a052ba..29a9f8629b9d 100644
--- a/arch/m68k/ifpsp060/src/isp.S
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -1876,7 +1876,7 @@ movp_read_err:
# word, or longword sized operands. Then, in the interest of #
# simplicity, all operands are converted to longword size whether the #
# operation is byte, word, or long. The bounds are sign extended #
-# accordingly. If Rn is a data regsiter, Rn is also sign extended. If #
+# accordingly. If Rn is a data register, Rn is also sign extended. If #
# Rn is an address register, it need not be sign extended since the #
# full register is always used. #
# The comparisons are made and the condition codes calculated. #
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index 2a120bb70e54..efd4983cb697 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 13bc93242c0c..404fb38d06b7 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d6d545a45118..4e9ebf65d071 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1686,6 +1686,7 @@ config CPU_CAVIUM_OCTEON
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7
+ select HAVE_KVM
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index ba4753c23b03..d18ed5af62f4 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -152,7 +152,7 @@ static int __cvmx_helper_errata_asx_pass1(int interface, int port,
}
/**
- * Configure all of the ASX, GMX, and PKO regsiters required
+ * Configure all of the ASX, GMX, and PKO registers required
* to get RGMII to function on the supplied interface.
*
* @interface: PKO Interface to configure (0 or 1)
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 4e1761e0a09a..d88eb7a6662b 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -88,7 +88,7 @@ void __init which_prom(s32 magic, s32 *prom_vec)
void __init prom_init(void)
{
extern void dec_machine_halt(void);
- static char cpu_msg[] __initdata =
+ static const char cpu_msg[] __initconst =
"Sorry, this kernel is compiled for a wrong CPU type!\n";
s32 argc = fw_arg0;
s32 *argv = (void *)fw_arg1;
@@ -111,7 +111,7 @@ void __init prom_init(void)
#if defined(CONFIG_CPU_R3000)
if ((current_cpu_type() == CPU_R4000SC) ||
(current_cpu_type() == CPU_R4400SC)) {
- static char r4k_msg[] __initdata =
+ static const char r4k_msg[] __initconst =
"Please recompile with \"CONFIG_CPU_R4x00 = y\".\n";
printk(cpu_msg);
printk(r4k_msg);
@@ -122,7 +122,7 @@ void __init prom_init(void)
#if defined(CONFIG_CPU_R4X00)
if ((current_cpu_type() == CPU_R3000) ||
(current_cpu_type() == CPU_R3000A)) {
- static char r3k_msg[] __initdata =
+ static const char r3k_msg[] __initconst =
"Please recompile with \"CONFIG_CPU_R3000 = y\".\n";
printk(cpu_msg);
printk(r3k_msg);
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index e961c8a7ea66..494d38274142 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -444,6 +444,10 @@
# define cpu_has_msa 0
#endif
+#ifndef cpu_has_ufr
+# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
+#endif
+
#ifndef cpu_has_fre
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
#endif
@@ -528,6 +532,9 @@
#ifndef cpu_guest_has_htw
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
#endif
+#ifndef cpu_guest_has_mvh
+#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
+#endif
#ifndef cpu_guest_has_msa
#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
#endif
@@ -543,6 +550,9 @@
#ifndef cpu_guest_has_maar
#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
#endif
+#ifndef cpu_guest_has_userlocal
+#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
+#endif
/*
* Guest dynamic capabilities
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index edbe2734a1bf..be3b4c25f335 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -33,6 +33,7 @@ struct guest_info {
unsigned long ases_dyn;
unsigned long long options;
unsigned long long options_dyn;
+ int tlbsize;
u8 conf;
u8 kscratch_mask;
};
@@ -109,6 +110,7 @@ struct cpuinfo_mips {
struct guest_info guest;
unsigned int gtoffset_mask;
unsigned int guestid_mask;
+ unsigned int guestid_cache;
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 9a8372484edc..98f59307e6a3 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -415,6 +415,7 @@ enum cpu_type_enum {
#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
+#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 05e785fc061d..2998479fd4e8 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -10,6 +10,7 @@
#ifndef __MIPS_KVM_HOST_H__
#define __MIPS_KVM_HOST_H__
+#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
@@ -33,12 +34,23 @@
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
+#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
+#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
+#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
+#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
+#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
+#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
+#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
@@ -55,6 +67,7 @@
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
+#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
@@ -70,9 +83,13 @@
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000
+#ifdef CONFIG_KVM_MIPS_VZ
+extern unsigned long GUESTID_MASK;
+extern unsigned long GUESTID_FIRST_VERSION;
+extern unsigned long GUESTID_VERSION_MASK;
+#endif
/*
@@ -145,6 +162,16 @@ struct kvm_vcpu_stat {
u64 fpe_exits;
u64 msa_disabled_exits;
u64 flush_dcache_exits;
+#ifdef CONFIG_KVM_MIPS_VZ
+ u64 vz_gpsi_exits;
+ u64 vz_gsfc_exits;
+ u64 vz_hc_exits;
+ u64 vz_grr_exits;
+ u64 vz_gva_exits;
+ u64 vz_ghfc_exits;
+ u64 vz_gpa_exits;
+ u64 vz_resvd_exits;
+#endif
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
@@ -157,6 +184,8 @@ struct kvm_arch_memory_slot {
struct kvm_arch {
/* Guest physical mm */
struct mm_struct gpa_mm;
+ /* Mask of CPUs needing GPA ASID flush */
+ cpumask_t asid_flush_mask;
};
#define N_MIPS_COPROC_REGS 32
@@ -214,6 +243,11 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5
+#define MIPS_CP0_GUESTCTL2 10
+#define MIPS_CP0_GUESTCTL2_SEL 5
+#define MIPS_CP0_GTOFFSET 12
+#define MIPS_CP0_GTOFFSET_SEL 7
+
/* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -229,6 +263,7 @@ enum emulation_result {
EMULATE_WAIT, /* WAIT instruction */
EMULATE_PRIV_FAIL,
EMULATE_EXCEPT, /* A guest exception has been generated */
+ EMULATE_HYPERCALL, /* HYPCALL instruction */
};
#define mips3_paddr_to_tlbpfn(x) \
@@ -276,13 +311,18 @@ struct kvm_mmu_memory_cache {
struct kvm_vcpu_arch {
void *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+ /* Host registers preserved across guest mode execution */
unsigned long host_stack;
unsigned long host_gp;
+ unsigned long host_pgd;
+ unsigned long host_entryhi;
/* Host CP0 registers used when handling exits from guest */
unsigned long host_cp0_badvaddr;
unsigned long host_cp0_epc;
u32 host_cp0_cause;
+ u32 host_cp0_guestctl0;
u32 host_cp0_badinstr;
u32 host_cp0_badinstrp;
@@ -340,7 +380,23 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* vcpu's vzguestid is different on each host cpu in an smp system */
+ u32 vzguestid[NR_CPUS];
+
+ /* wired guest TLB entries */
+ struct kvm_mips_tlb *wired_tlb;
+ unsigned int wired_tlb_limit;
+ unsigned int wired_tlb_used;
+
+ /* emulated guest MAAR registers */
+ unsigned long maar[6];
+#endif
+
+ /* Last CPU the VCPU state was loaded on */
int last_sched_cpu;
+ /* Last CPU the VCPU actually executed guest code on */
+ int last_exec_cpu;
/* WAIT executed */
int wait;
@@ -349,78 +405,6 @@ struct kvm_vcpu_arch {
u8 msa_enabled;
};
-
-#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
-#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
-#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
-#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val))
-#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
-#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val))
-#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
-#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
-#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
-#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
-#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
-#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
-#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
-#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
-#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
-#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
-#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
-#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
-#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
-#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
-#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
-#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
-#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
-#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
-#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
-#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
-#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
-#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
-#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
-#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
-#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
-#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
-#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
-#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
-#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
-#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
-#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
-#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
-#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
-#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
-#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
-#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
-#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
-#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
-#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
-#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
-#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
-#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
-#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
-#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
-#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
-#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
-#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
-#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
-#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
-#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
-#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
-#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
-#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
-#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
-#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
-#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
-#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
-#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
-
-/*
- * Some of the guest registers may be modified asynchronously (e.g. from a
- * hrtimer callback in hard irq context) and therefore need stronger atomicity
- * guarantees than other registers.
- */
-
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long val)
{
@@ -471,26 +455,286 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
} while (unlikely(!temp));
}
-#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
-#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+/* Guest register types, used in accessor build below */
+#define __KVMT32 u32
+#define __KVMTl unsigned long
-/* Cause can be modified asynchronously from hardirq hrtimer callback */
-#define kvm_set_c0_guest_cause(cop0, val) \
- _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
-#define kvm_clear_c0_guest_cause(cop0, val) \
- _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
-#define kvm_change_c0_guest_cause(cop0, change, val) \
- _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
- change, val)
-
-#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
-#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
-#define kvm_change_c0_guest_ebase(cop0, change, val) \
+/*
+ * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
+ * These operate on the saved guest C0 state in RAM.
+ */
+
+/* Generate saved context simple accessors */
+#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ return cop0->reg[(_reg)][(sel)]; \
+} \
+static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] = val; \
+}
+
+/* Generate saved context bitwise modifiers */
+#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] |= val; \
+} \
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ cop0->reg[(_reg)][(sel)] &= ~val; \
+} \
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ unsigned long _mask = mask; \
+ cop0->reg[(_reg)][(sel)] &= ~_mask; \
+ cop0->reg[(_reg)][(sel)] |= val & _mask; \
+}
+
+/* Generate saved context atomic bitwise modifiers */
+#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
+static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
+} \
+static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
+} \
+static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
+ val); \
+}
+
+/*
+ * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
+ * These operate on the VZ guest C0 context in hardware.
+ */
+
+/* Generate VZ guest context simple accessors */
+#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
+static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ return read_gc0_##name(); \
+} \
+static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ write_gc0_##name(val); \
+}
+
+/* Generate VZ guest context bitwise modifiers */
+#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
+static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ set_gc0_##name(val); \
+} \
+static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ clear_gc0_##name(val); \
+} \
+static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ change_gc0_##name(mask, val); \
+}
+
+/* Generate VZ guest context save/restore to/from saved context */
+#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
+static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
+} \
+static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
+{ \
+ cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
+}
+
+/*
+ * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
+ * These wrap a set of operations to provide them with a different name.
+ */
+
+/* Generate simple accessor wrapper */
+#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
+static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
+{ \
+ return kvm_read_##name2(cop0); \
+} \
+static inline void kvm_write_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ kvm_write_##name2(cop0, val); \
+}
+
+/* Generate bitwise modifier wrapper */
+#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
+static inline void kvm_set_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
{ \
- kvm_clear_c0_guest_ebase(cop0, change); \
- kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+ kvm_set_##name2(cop0, val); \
+} \
+static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
+ __KVMT##type val) \
+{ \
+ kvm_clear_##name2(cop0, val); \
+} \
+static inline void kvm_change_##name1(struct mips_coproc *cop0, \
+ __KVMT##type mask, \
+ __KVMT##type val) \
+{ \
+ kvm_change_##name2(cop0, mask, val); \
}
+/*
+ * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
+ * These generate accessors operating on the saved context in RAM, and wrap them
+ * with the common guest C0 accessors (for use by common emulation code).
+ */
+
+#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
+ __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
+
+#ifndef CONFIG_KVM_MIPS_VZ
+
+/*
+ * T&E (trap & emulate software based virtualisation)
+ * We generate the common accessors operating exclusively on the saved context
+ * in RAM.
+ */
+
+#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
+#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
+
+#else
+
+/*
+ * VZ (hardware assisted virtualisation)
+ * These macros use the active guest state in VZ mode (hardware registers),
+ */
+
+/*
+ * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
+ * These generate accessors operating on the VZ guest context in hardware, and
+ * wrap them with the common guest C0 accessors (for use by common emulation
+ * code).
+ *
+ * Accessors operating on the saved context in RAM are also generated to allow
+ * convenient explicit saving and restoring of the state.
+ */
+
+#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
+ __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
+ __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
+ __BUILD_KVM_SAVE_VZ(name, _reg, sel)
+
+#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
+ __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
+ __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
+ __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
+
+/*
+ * We can't do atomic modifications of COP0 state if hardware can modify it.
+ * Races must be handled explicitly.
+ */
+#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
+
+#endif
+
+/*
+ * Define accessors for CP0 registers that are accessible to the guest. These
+ * are primarily used by common emulation code, which may need to access the
+ * registers differently depending on the implementation.
+ *
+ * fns_hw/sw name type reg num select
+ */
+__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
+__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
+__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
+__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
+__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
+__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
+__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
+__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
+__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
+__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
+__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
+__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
+__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
+__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
+__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
+__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
+__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
+__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
+__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
+__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
+__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
+__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
+__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
+__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
+__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
+__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
+__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
+__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
+__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
+__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
+__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
+__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
+__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
+__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
+__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
+__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
+__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
+__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
+__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
+__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
+__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
+__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
+__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
+__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
+__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
+__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
+__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
+
+/* Bitwise operations (on HW state) */
+__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
+/* Cause can be modified asynchronously from hardirq hrtimer callback */
+__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
+__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
+
+/* Bitwise operations (on saved state) */
+__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
+__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
+__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
+__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
+__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
+__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
+
/* Helpers */
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
@@ -531,6 +775,10 @@ struct kvm_mips_callbacks {
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
int (*handle_fpe)(struct kvm_vcpu *vcpu);
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
+ int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
+ int (*hardware_enable)(void);
+ void (*hardware_disable)(void);
+ int (*check_extension)(struct kvm *kvm, long ext);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
@@ -599,6 +847,10 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu, bool write_fault);
+#endif
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu,
bool write_fault);
@@ -625,6 +877,18 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
unsigned long entryhi);
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa);
+void kvm_vz_local_flush_roottlb_all_guests(void);
+void kvm_vz_local_flush_guesttlb_all(void);
+void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count);
+void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count);
+#endif
+
void kvm_mips_suspend_mm(int cpu);
void kvm_mips_resume_mm(int cpu);
@@ -795,7 +1059,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
-void kvm_mips_init_count(struct kvm_vcpu *vcpu);
+void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
@@ -803,6 +1067,20 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
+/* fairly internal functions requiring some care to use */
+int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
+ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
+int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
+ u32 count, int min_drift);
+
+#ifdef CONFIG_KVM_MIPS_VZ
+void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
+void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
+#endif
+
enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc,
struct kvm_run *run,
@@ -827,11 +1105,20 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
+/* COP0 */
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
+
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
+/* Hypercalls (hypcall.c) */
+
+enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
+ union mips_instruction inst);
+int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
+
/* Dynamic binary translation */
extern int kvm_mips_trans_cache_index(union mips_instruction inst,
u32 *opc, struct kvm_vcpu *vcpu);
@@ -846,7 +1133,6 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
-static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index 21d9607c80d7..e10f78befbd9 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs);
* @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
- * MIPS_MAAR_V attribute will automatically be set.
+ * MIPS_MAAR_VL attribute will automatically be set.
*
* Program the pair of MAAR registers specified by idx to apply the attributes
* specified by attrs to the range of addresses from lower to higher.
@@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
BUG_ON(((upper & 0xffff) != 0xffff)
|| ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
- /* Automatically set MIPS_MAAR_V */
- attrs |= MIPS_MAAR_V;
+ /* Automatically set MIPS_MAAR_VL */
+ attrs |= MIPS_MAAR_VL;
- /* Write the upper address & attributes (only MIPS_MAAR_V matters) */
+ /* Write the upper address & attributes (only MIPS_MAAR_VL matters) */
write_c0_maari(idx << 1);
back_to_back_c0_hazard();
write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
@@ -81,7 +81,7 @@ extern void maar_init(void);
* @upper: The highest address that the MAAR pair will affect. Must be
* aligned to one byte before a 2^16 byte boundary.
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
- * MIPS_MAAR_V attribute will automatically be set.
+ * MIPS_MAAR_VL attribute will automatically be set.
*
* Describes the configuration of a pair of Memory Accessibility Attribute
* Registers - applying attributes from attrs to the range of physical
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index f8d1d2f1d80d..6875b69f59f7 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -34,8 +34,10 @@
*/
#ifdef __ASSEMBLY__
#define _ULCAST_
+#define _U64CAST_
#else
#define _ULCAST_ (unsigned long)
+#define _U64CAST_ (u64)
#endif
/*
@@ -217,8 +219,10 @@
/*
* Wired register bits
*/
-#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
-#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
+#define MIPSR6_WIRED_LIMIT_SHIFT 16
+#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
+#define MIPSR6_WIRED_WIRED_SHIFT 0
+#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
/*
* Values used for computation of new tlb entries
@@ -645,6 +649,7 @@
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
+#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
@@ -719,10 +724,14 @@
#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
/* MAAR bit definitions */
+#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
-#define MIPS_MAAR_V (_ULCAST_(1) << 0)
+#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
+
+/* MAARI bit definitions */
+#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
/* EBase bit definitions */
#define MIPS_EBASE_CPUNUM_SHIFT 0
@@ -736,6 +745,10 @@
#define MIPS_CMGCRB_BASE 11
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
+/* LLAddr bit definitions */
+#define MIPS_LLADDR_LLB_SHIFT 0
+#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
+
/*
* Bits in the MIPS32 Memory Segmentation registers.
*/
@@ -961,6 +974,22 @@
/* Flush FTLB */
#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
+/* CvmCtl register field definitions */
+#define CVMCTL_IPPCI_SHIFT 7
+#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
+#define CVMCTL_IPTI_SHIFT 4
+#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
+
+/* CvmMemCtl2 register field definitions */
+#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
+
+/* CvmVMConfig register field definitions */
+#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
+#define CVMVMCONF_MMUSIZEM1_S 12
+#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
+#define CVMVMCONF_RMMUSIZEM1_S 0
+#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
+
/*
* Coprocessor 1 (FPU) register names
*/
@@ -1720,6 +1749,13 @@ do { \
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+
+#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
+#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
+
+#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
+#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
+
/*
* The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide.
@@ -1989,6 +2025,8 @@ do { \
#define read_gc0_epc() __read_ulong_gc0_register(14, 0)
#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
+#define read_gc0_prid() __read_32bit_gc0_register(15, 0)
+
#define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
@@ -2012,6 +2050,9 @@ do { \
#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
+#define read_gc0_lladdr() __read_ulong_gc0_register(17, 0)
+#define write_gc0_lladdr(val) __write_ulong_gc0_register(17, 0, val)
+
#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
@@ -2090,6 +2131,19 @@ do { \
#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
+/* Cavium OCTEON (cnMIPS) */
+#define read_gc0_cvmcount() __read_ulong_gc0_register(9, 6)
+#define write_gc0_cvmcount(val) __write_ulong_gc0_register(9, 6, val)
+
+#define read_gc0_cvmctl() __read_64bit_gc0_register(9, 7)
+#define write_gc0_cvmctl(val) __write_64bit_gc0_register(9, 7, val)
+
+#define read_gc0_cvmmemctl() __read_64bit_gc0_register(11, 7)
+#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register(11, 7, val)
+
+#define read_gc0_cvmmemctl2() __read_64bit_gc0_register(16, 6)
+#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register(16, 6, val)
+
/*
* Macros to access the floating point coprocessor control registers
*/
@@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode)
*/
#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
+__BUILD_SET_GC0(wired)
__BUILD_SET_GC0(status)
__BUILD_SET_GC0(cause)
__BUILD_SET_GC0(ebase)
+__BUILD_SET_GC0(config1)
/*
* Return low 10 bits of ebase.
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index f89775be7654..f7a95d7de140 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -55,7 +55,7 @@ extern int __cvmx_helper_rgmii_probe(int interface);
extern void cvmx_helper_rgmii_internal_loopback(int port);
/**
- * Configure all of the ASX, GMX, and PKO regsiters required
+ * Configure all of the ASX, GMX, and PKO registers required
* to get RGMII to function on the supplied interface.
*
* @interface: PKO Interface to configure (0 or 1)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 30d1129d8624..1000c1b4c875 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM;
extern void pcibios_set_master(struct pci_dev *dev);
#define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
/*
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index dd179fd8acda..939734de4359 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -21,9 +21,11 @@
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-#define UNIQUE_ENTRYHI(idx) \
- ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
+#define _UNIQUE_ENTRYHI(base, idx) \
+ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
+#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
+#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
static inline unsigned int num_wired_entries(void)
{
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 77429d1622b3..b5e46ae872d3 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -179,7 +179,7 @@ enum cop0_coi_func {
tlbr_op = 0x01, tlbwi_op = 0x02,
tlbwr_op = 0x06, tlbp_op = 0x08,
rfe_op = 0x10, eret_op = 0x18,
- wait_op = 0x20,
+ wait_op = 0x20, hypcall_op = 0x28
};
/*
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index a8a0199bf760..0318c6b442ab 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -21,6 +21,8 @@
#define __KVM_HAVE_READONLY_MEM
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
/*
* for KVM_GET_REGS and KVM_SET_REGS
*
@@ -54,9 +56,14 @@ struct kvm_fpu {
* Register set = 0: GP registers from kvm_regs (see definitions below).
*
* Register set = 1: CP0 registers.
- * bits[15..8] - Must be zero.
- * bits[7..3] - Register 'rd' index.
- * bits[2..0] - Register 'sel' index.
+ * bits[15..8] - COP0 register set.
+ *
+ * COP0 register set = 0: Main CP0 registers.
+ * bits[7..3] - Register 'rd' index.
+ * bits[2..0] - Register 'sel' index.
+ *
+ * COP0 register set = 1: MAARs.
+ * bits[7..0] - MAAR index.
*
* Register set = 2: KVM specific registers (see definitions below).
*
@@ -115,6 +122,15 @@ struct kvm_fpu {
/*
+ * KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
+ */
+
+#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
+#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
+ KVM_REG_SIZE_U64 | (n))
+
+
+/*
* KVM_REG_MIPS_KVM - KVM specific control registers.
*/
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 12422fd4af23..3382892544f0 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_UFRP)
+ c->options |= MIPS_CPU_UFR;
if (c->fpu_id & MIPS_FPIR_FREP)
c->options |= MIPS_CPU_FRE;
}
@@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
unsigned int config3, config3_dyn;
probe_gc0_config_dyn(config3, config3, config3_dyn,
- MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC);
+ MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
+ MIPS_CONF3_CTXTC);
if (config3 & MIPS_CONF3_CTXTC)
c->guest.options |= MIPS_CPU_CTXTC;
@@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_PW)
c->guest.options |= MIPS_CPU_HTW;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->guest.options |= MIPS_CPU_ULRI;
+
if (config3 & MIPS_CONF3_SC)
c->guest.options |= MIPS_CPU_SEGMENTS;
@@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
unsigned int config5, config5_dyn;
probe_gc0_config_dyn(config5, config5, config5_dyn,
- MIPS_CONF_M | MIPS_CONF5_MRP);
+ MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
if (config5 & MIPS_CONF5_MRP)
c->guest.options |= MIPS_CPU_MAAR;
@@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
if (config5 & MIPS_CONF5_LLB)
c->guest.options |= MIPS_CPU_RW_LLB;
+ if (config5 & MIPS_CONF5_MVH)
+ c->guest.options |= MIPS_CPU_MVH;
+
if (config5 & MIPS_CONF_M)
c->guest.conf |= BIT(6);
return config5 & MIPS_CONF_M;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 9452b02ce079..313a88b2973f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -618,7 +618,7 @@ static int mipspmu_event_init(struct perf_event *event)
return -ENOENT;
}
- if (event->cpu >= nr_cpumask_bits ||
+ if ((unsigned int)event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index a7f81261c781..c036157fb891 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
*/
unsigned int mips_hpt_frequency;
+EXPORT_SYMBOL_GPL(mips_hpt_frequency);
/*
* This function exists in order to cause an error due to a duplicate
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b49e7bf9f950..9681b5877140 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2256,8 +2256,8 @@ void set_handler(unsigned long offset, void *addr, unsigned long size)
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
-static char panic_null_cerr[] =
- "Trying to set NULL cache error exception handler";
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
/*
* Install uncached CPU exception handler.
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 65067327db12..50a722dfb236 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -26,11 +26,34 @@ config KVM
select SRCU
---help---
Support for hosting Guest kernels.
- Currently supported on MIPS32 processors.
+
+choice
+ prompt "Virtualization mode"
+ depends on KVM
+ default KVM_MIPS_TE
+
+config KVM_MIPS_TE
+ bool "Trap & Emulate"
+ ---help---
+ Use trap and emulate to virtualize 32-bit guests in user mode. This
+ does not require any special hardware Virtualization support beyond
+ standard MIPS32/64 r2 or later, but it does require the guest kernel
+ to be configured with CONFIG_KVM_GUEST=y so that it resides in the
+ user address segment.
+
+config KVM_MIPS_VZ
+ bool "MIPS Virtualization (VZ) ASE"
+ ---help---
+ Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
+ supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
+ but requires hardware support.
+
+endchoice
config KVM_MIPS_DYN_TRANS
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
- depends on KVM
+ depends on KVM_MIPS_TE
+ default y
---help---
When running in Trap & Emulate mode patch privileged
instructions to reduce the number of traps.
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 847429de780d..45d90f5d5177 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \
- dyntrans.o trap_emul.o fpu.o
+ fpu.o
+kvm-objs += hypcall.o
kvm-objs += mmu.o
+ifdef CONFIG_KVM_MIPS_VZ
+kvm-objs += vz.o
+else
+kvm-objs += dyntrans.o
+kvm-objs += trap_emul.o
+endif
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d40cfaad4529..4144bfaef137 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -308,7 +308,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
* CP0_Cause.DC bit or the count_ctl.DC bit.
* 0 otherwise (in which case CP0_Count timer is running).
*/
-static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -467,7 +467,7 @@ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
*
* Returns: The ktime at the point of freeze.
*/
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
+ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{
ktime_t now;
@@ -517,6 +517,82 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
}
/**
+ * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
+ * @vcpu: Virtual CPU.
+ * @before: Time before Count was saved, lower bound of drift calculation.
+ * @count: CP0_Count at point of restore.
+ * @min_drift: Minimum amount of drift permitted before correction.
+ * Must be <= 0.
+ *
+ * Restores the timer from a particular @count, accounting for drift. This can
+ * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
+ * to be used for a period of time, but the exact ktime corresponding to the
+ * final Count that must be restored is not known.
+ *
+ * It is gauranteed that a timer interrupt immediately after restore will be
+ * handled, but not if CP0_Compare is exactly at @count. That case should
+ * already be handled when the hardware timer state is saved.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
+ * stopped).
+ *
+ * Returns: Amount of correction to count_bias due to drift.
+ */
+int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
+ u32 count, int min_drift)
+{
+ ktime_t now, count_time;
+ u32 now_count, before_count;
+ u64 delta;
+ int drift, ret = 0;
+
+ /* Calculate expected count at before */
+ before_count = vcpu->arch.count_bias +
+ kvm_mips_ktime_to_count(vcpu, before);
+
+ /*
+ * Detect significantly negative drift, where count is lower than
+ * expected. Some negative drift is expected when hardware counter is
+ * set after kvm_mips_freeze_timer(), and it is harmless to allow the
+ * time to jump forwards a little, within reason. If the drift is too
+ * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
+ */
+ drift = count - before_count;
+ if (drift < min_drift) {
+ count_time = before;
+ vcpu->arch.count_bias += drift;
+ ret = drift;
+ goto resume;
+ }
+
+ /* Calculate expected count right now */
+ now = ktime_get();
+ now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+
+ /*
+ * Detect positive drift, where count is higher than expected, and
+ * adjust the bias to avoid guest time going backwards.
+ */
+ drift = count - now_count;
+ if (drift > 0) {
+ count_time = now;
+ vcpu->arch.count_bias += drift;
+ ret = drift;
+ goto resume;
+ }
+
+ /* Subtract nanosecond delta to find ktime when count was read */
+ delta = (u64)(u32)(now_count - count);
+ delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+ count_time = ktime_sub_ns(now, delta);
+
+resume:
+ /* Resume using the calculated ktime */
+ kvm_mips_resume_hrtimer(vcpu, count_time, count);
+ return ret;
+}
+
+/**
* kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU.
* @count: Guest CP0_Count value to set.
@@ -543,16 +619,15 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
/**
* kvm_mips_init_count() - Initialise timer.
* @vcpu: Virtual CPU.
+ * @count_hz: Frequency of timer.
*
- * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
- * it going if it's enabled.
+ * Initialise the timer to the specified frequency, zero it, and set it going if
+ * it's enabled.
*/
-void kvm_mips_init_count(struct kvm_vcpu *vcpu)
+void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
{
- /* 100 MHz */
- vcpu->arch.count_hz = 100*1000*1000;
- vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
- vcpu->arch.count_hz);
+ vcpu->arch.count_hz = count_hz;
+ vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu->arch.count_dyn_bias = 0;
/* Starting at 0 */
@@ -622,7 +697,9 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
struct mips_coproc *cop0 = vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
- ktime_t now;
+ s32 delta = compare - old_compare;
+ u32 cause;
+ ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
u32 count;
/* if unchanged, must just be an ack */
@@ -634,6 +711,21 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
return;
}
+ /*
+ * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
+ * too to prevent guest CP0_Count hitting guest CP0_Compare.
+ *
+ * The new GTOffset corresponds to the new value of CP0_Compare, and is
+ * set prior to it being written into the guest context. We disable
+ * preemption until the new value is written to prevent restore of a
+ * GTOffset corresponding to the old CP0_Compare value.
+ */
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
+ preempt_disable();
+ write_c0_gtoffset(compare - read_c0_count());
+ back_to_back_c0_hazard();
+ }
+
/* freeze_hrtimer() takes care of timer interrupts <= count */
dc = kvm_mips_count_disabled(vcpu);
if (!dc)
@@ -641,12 +733,36 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
if (ack)
kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ /*
+ * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
+ * preserve guest CP0_Cause.TI if we don't want to ack it.
+ */
+ cause = kvm_read_c0_guest_cause(cop0);
kvm_write_c0_guest_compare(cop0, compare);
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ if (delta > 0)
+ preempt_enable();
+
+ back_to_back_c0_hazard();
+
+ if (!ack && cause & CAUSEF_TI)
+ kvm_write_c0_guest_cause(cop0, cause);
+ }
+
/* resume_hrtimer() takes care of timer interrupts > count */
if (!dc)
kvm_mips_resume_hrtimer(vcpu, now, count);
+
+ /*
+ * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
+ * until after the new CP0_Compare is written, otherwise new guest
+ * CP0_Count could hit new guest CP0_Compare.
+ */
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
+ write_c0_gtoffset(compare - read_c0_count());
}
/**
@@ -857,6 +973,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
++vcpu->stat.wait_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) {
+ kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu);
@@ -865,7 +982,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
* check if any I/O interrupts are pending.
*/
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
}
}
@@ -873,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
-/*
- * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
- * we can catch this, if things ever change
- */
+static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
+ unsigned long entryhi)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+ int cpu, i;
+ u32 nasid = entryhi & KVM_ENTRYHI_ASID;
+
+ if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
+ trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID, nasid);
+
+ /*
+ * Flush entries from the GVA page tables.
+ * Guest user page table will get flushed lazily on re-entry to
+ * guest user if the guest ASID actually changes.
+ */
+ kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
+
+ /*
+ * Regenerate/invalidate kernel MMU context.
+ * The user MMU context will be regenerated lazily on re-entry
+ * to guest user if the guest ASID actually changes.
+ */
+ preempt_disable();
+ cpu = smp_processor_id();
+ get_new_mmu_context(kern_mm, cpu);
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ cpu_context(i, kern_mm) = 0;
+ preempt_enable();
+ }
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+}
+
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb *tlb;
unsigned long pc = vcpu->arch.pc;
+ int index;
- kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
- return EMULATE_FAIL;
+ index = kvm_read_c0_guest_index(cop0);
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ /* UNDEFINED */
+ kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
+ index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+ kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
+ kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
+ kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
+ kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
+
+ return EMULATE_DONE;
}
/**
@@ -1105,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
- int cpu, i;
/*
* Update PC and hold onto current PC in case there is
@@ -1143,6 +1303,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
+ case hypcall_op:
+ er = kvm_mips_emul_hypcall(vcpu, inst);
+ break;
}
} else {
rt = inst.c0r_format.rt;
@@ -1208,44 +1371,8 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
vcpu->arch.gprs[rt]);
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- u32 nasid =
- vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
- if (((kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID) != nasid)) {
- trace_kvm_asid_change(vcpu,
- kvm_read_c0_guest_entryhi(cop0)
- & KVM_ENTRYHI_ASID,
- nasid);
-
- /*
- * Flush entries from the GVA page
- * tables.
- * Guest user page table will get
- * flushed lazily on re-entry to guest
- * user if the guest ASID actually
- * changes.
- */
- kvm_mips_flush_gva_pt(kern_mm->pgd,
- KMF_KERN);
-
- /*
- * Regenerate/invalidate kernel MMU
- * context.
- * The user MMU context will be
- * regenerated lazily on re-entry to
- * guest user if the guest ASID actually
- * changes.
- */
- preempt_disable();
- cpu = smp_processor_id();
- get_new_mmu_context(kern_mm, cpu);
- for_each_possible_cpu(i)
- if (i != cpu)
- cpu_context(i, kern_mm) = 0;
- preempt_enable();
- }
- kvm_write_c0_guest_entryhi(cop0,
- vcpu->arch.gprs[rt]);
+ kvm_mips_change_entryhi(vcpu,
+ vcpu->arch.gprs[rt]);
}
/* Are we writing to COUNT */
else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
@@ -1474,9 +1601,8 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- enum emulation_result er = EMULATE_DO_MMIO;
+ enum emulation_result er;
u32 rt;
- u32 bytes;
void *data = run->mmio.data;
unsigned long curr_pc;
@@ -1491,103 +1617,74 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
rt = inst.i_format.rt;
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR)
+ goto out_fail;
+
switch (inst.i_format.opcode) {
- case sb_op:
- bytes = 1;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
- run->mmio.len = bytes;
- run->mmio.is_write = 1;
- vcpu->mmio_needed = 1;
- vcpu->mmio_is_write = 1;
- *(u8 *) data = vcpu->arch.gprs[rt];
- kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
- vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
- *(u8 *) data);
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case sd_op:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
break;
+#endif
case sw_op:
- bytes = 4;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
-
- run->mmio.len = bytes;
- run->mmio.is_write = 1;
- vcpu->mmio_needed = 1;
- vcpu->mmio_is_write = 1;
- *(u32 *) data = vcpu->arch.gprs[rt];
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(u32 *) data);
+ vcpu->arch.gprs[rt], *(u32 *)data);
break;
case sh_op:
- bytes = 2;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
-
- run->mmio.len = bytes;
- run->mmio.is_write = 1;
- vcpu->mmio_needed = 1;
- vcpu->mmio_is_write = 1;
- *(u16 *) data = vcpu->arch.gprs[rt];
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(u32 *) data);
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+
+ case sb_op:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
break;
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
- er = EMULATE_FAIL;
- break;
+ goto out_fail;
}
- /* Rollback PC if emulation was unsuccessful */
- if (er == EMULATE_FAIL)
- vcpu->arch.pc = curr_pc;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ return EMULATE_DO_MMIO;
- return er;
+out_fail:
+ /* Rollback PC if emulation was unsuccessful */
+ vcpu->arch.pc = curr_pc;
+ return EMULATE_FAIL;
}
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- enum emulation_result er = EMULATE_DO_MMIO;
+ enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
- u32 bytes;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
@@ -1606,96 +1703,53 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
vcpu->arch.io_gpr = rt;
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR)
+ return EMULATE_FAIL;
+
+ vcpu->mmio_needed = 2; /* signed */
switch (op) {
- case lw_op:
- bytes = 4;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- er = EMULATE_FAIL;
- break;
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+ case ld_op:
+ run->mmio.len = 8;
+ break;
- run->mmio.len = bytes;
- run->mmio.is_write = 0;
- vcpu->mmio_needed = 1;
- vcpu->mmio_is_write = 0;
+ case lwu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ /* fall through */
+#endif
+ case lw_op:
+ run->mmio.len = 4;
break;
- case lh_op:
case lhu_op:
- bytes = 2;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- er = EMULATE_FAIL;
- break;
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
-
- run->mmio.len = bytes;
- run->mmio.is_write = 0;
- vcpu->mmio_needed = 1;
- vcpu->mmio_is_write = 0;
-
- if (op == lh_op)
- vcpu->mmio_needed = 2;
- else
- vcpu->mmio_needed = 1;
-
+ vcpu->mmio_needed = 1; /* unsigned */
+ /* fall through */
+ case lh_op:
+ run->mmio.len = 2;
break;
case lbu_op:
+ vcpu->mmio_needed = 1; /* unsigned */
+ /* fall through */
case lb_op:
- bytes = 1;
- if (bytes > sizeof(run->mmio.data)) {
- kvm_err("%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- er = EMULATE_FAIL;
- break;
- }
- run->mmio.phys_addr =
- kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
- host_cp0_badvaddr);
- if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
- er = EMULATE_FAIL;
- break;
- }
-
- run->mmio.len = bytes;
- run->mmio.is_write = 0;
- vcpu->mmio_is_write = 0;
-
- if (op == lb_op)
- vcpu->mmio_needed = 2;
- else
- vcpu->mmio_needed = 1;
-
+ run->mmio.len = 1;
break;
default:
kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
- er = EMULATE_FAIL;
- break;
+ vcpu->mmio_needed = 0;
+ return EMULATE_FAIL;
}
- return er;
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
+ return EMULATE_DO_MMIO;
}
+#ifndef CONFIG_KVM_MIPS_VZ
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
unsigned long curr_pc,
unsigned long addr,
@@ -1786,11 +1840,35 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
arch->gprs[base], offset);
- if (cache == Cache_D)
+ if (cache == Cache_D) {
+#ifdef CONFIG_CPU_R4K_CACHE_TLB
r4k_blast_dcache();
- else if (cache == Cache_I)
+#else
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* locally flush icache */
+ local_flush_icache_range(0, 0);
+ break;
+ default:
+ __flush_cache_all();
+ break;
+ }
+#endif
+ } else if (cache == Cache_I) {
+#ifdef CONFIG_CPU_R4K_CACHE_TLB
r4k_blast_icache();
- else {
+#else
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* locally flush icache */
+ local_flush_icache_range(0, 0);
+ break;
+ default:
+ flush_icache_all();
+ break;
+ }
+#endif
+ } else {
kvm_err("%s: unsupported CACHE INDEX operation\n",
__func__);
return EMULATE_FAIL;
@@ -1870,18 +1948,6 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break;
- case sb_op:
- case sh_op:
- case sw_op:
- er = kvm_mips_emulate_store(inst, cause, run, vcpu);
- break;
- case lb_op:
- case lbu_op:
- case lhu_op:
- case lh_op:
- case lw_op:
- er = kvm_mips_emulate_load(inst, cause, run, vcpu);
- break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
@@ -1915,6 +1981,7 @@ unknown:
return er;
}
+#endif /* CONFIG_KVM_MIPS_VZ */
/**
* kvm_mips_guest_exception_base() - Find guest exception vector base address.
@@ -2524,8 +2591,15 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
+ case 8:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+
case 4:
- *gpr = *(s32 *) run->mmio.data;
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(s32 *)run->mmio.data;
+ else
+ *gpr = *(u32 *)run->mmio.data;
break;
case 2:
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index c5b254c4d0da..16e1c93b484f 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -51,12 +51,15 @@
#define RA 31
/* Some CP0 registers */
+#define C0_PWBASE 5, 5
#define C0_HWRENA 7, 0
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
#define C0_ENTRYHI 10, 0
+#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
+#define C0_GUESTCTL0 12, 6
#define C0_CAUSE 13, 0
#define C0_EPC 14, 0
#define C0_EBASE 15, 1
@@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr)
unsigned int i;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
- struct uasm_label *l = labels;
- struct uasm_reloc *r = relocs;
+ struct uasm_label __maybe_unused *l = labels;
+ struct uasm_reloc __maybe_unused *r = relocs;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC);
- /* Set the ASID for the Guest Kernel */
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
+ UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
+
+ /*
+ * Set up KVM GPA pgd.
+ * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
+ * - call tlbmiss_handler_setup_pgd(mm->pgd)
+ * - write mm->pgd into CP0_PWBase
+ *
+ * We keep S0 pointing at struct kvm so we can load the ASID below.
+ */
+ UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
+ (int)offsetof(struct kvm_vcpu, arch), K1);
+ UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
+ UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, RA, T9);
+ /* delay slot */
+ if (cpu_has_htw)
+ UASM_i_MTC0(&p, A0, C0_PWBASE);
+ else
+ uasm_i_nop(&p);
+
+ /* Set GM bit to setup eret to VZ guest context */
+ uasm_i_addiu(&p, V1, ZERO, 1);
+ uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+
+ if (cpu_has_guestid) {
+ /*
+ * Set root mode GuestID, so that root TLB refill handler can
+ * use the correct GuestID in the root TLB.
+ */
+
+ /* Get current GuestID */
+ uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ /* Set GuestCtl1.RID = GuestCtl1.ID */
+ uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
+ MIPS_GCTL1_ID_WIDTH);
+ uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
+ MIPS_GCTL1_RID_WIDTH);
+ uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+
+ /* GuestID handles dealiasing so we don't need to touch ASID */
+ goto skip_asid_restore;
+ }
+
+ /* Root ASID Dealias (RAD) */
+
+ /* Save host ASID */
+ UASM_i_MFC0(&p, K0, C0_ENTRYHI);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ K1);
+
+ /* Set the root ASID for the Guest */
+ UASM_i_ADDIU(&p, T1, S0,
+ offsetof(struct kvm, arch.gpa_mm.context.asid));
+#else
+ /* Set the ASID for the Guest Kernel or User */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
T0);
@@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
guest_user_mm.context.asid));
uasm_l_kernel_asid(&l, p);
+#endif
/* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */
@@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif
+#ifndef CONFIG_KVM_MIPS_VZ
/*
* Set up KVM T&E GVA pgd.
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
@@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
uasm_i_jalr(&p, RA, T9);
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
-
+#else
+ /* Set up KVM VZ root ASID (!guestid) */
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+skip_asid_restore:
+#endif
uasm_i_ehb(&p);
/* Disable RDHWR access */
@@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */
/* Restore vcpu */
- UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
- uasm_i_move(&p, S1, A1);
+ UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
/* Restore run (vcpu->run) */
- UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
- /* Save pointer to run in s0, will be saved by the compiler */
- uasm_i_move(&p, S0, A0);
+ UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
/*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
@@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_msa_1(&l, p);
}
+#ifdef CONFIG_KVM_MIPS_VZ
+ /* Restore host ASID */
+ if (!cpu_has_guestid) {
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
+ K1);
+ UASM_i_MTC0(&p, K0, C0_ENTRYHI);
+ }
+
+ /*
+ * Set up normal Linux process pgd.
+ * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
+ * - call tlbmiss_handler_setup_pgd(mm->pgd)
+ * - write mm->pgd into CP0_PWBase
+ */
+ UASM_i_LW(&p, A0,
+ offsetof(struct kvm_vcpu_arch, host_pgd), K1);
+ UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
+ uasm_i_jalr(&p, RA, T9);
+ /* delay slot */
+ if (cpu_has_htw)
+ UASM_i_MTC0(&p, A0, C0_PWBASE);
+ else
+ uasm_i_nop(&p);
+
+ /* Clear GM bit so we don't enter guest mode when EXL is cleared */
+ uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
+ uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
+ uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
+
+ /* Save GuestCtl0 so we can access GExcCode after CPU migration */
+ uasm_i_sw(&p, K0,
+ offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
+
+ if (cpu_has_guestid) {
+ /*
+ * Clear root mode GuestID, so that root TLB operations use the
+ * root GuestID in the root TLB.
+ */
+ uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
+ /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
+ uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
+ MIPS_GCTL1_RID_WIDTH);
+ uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
+ }
+#endif
+
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
uasm_i_and(&p, V0, V0, AT);
@@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr)
* Now jump to the kvm_mips_handle_exit() to see if we can deal
* with this in the kernel
*/
+ uasm_i_move(&p, A0, S0);
+ uasm_i_move(&p, A1, S1);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
diff --git a/arch/mips/kvm/hypcall.c b/arch/mips/kvm/hypcall.c
new file mode 100644
index 000000000000..83063435195f
--- /dev/null
+++ b/arch/mips/kvm/hypcall.c
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Hypercall handling.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm_para.h>
+
+#define MAX_HYPCALL_ARGS 4
+
+enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
+ union mips_instruction inst)
+{
+ unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
+
+ kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
+
+ switch (code) {
+ case 0:
+ return EMULATE_HYPERCALL;
+ default:
+ return EMULATE_FAIL;
+ };
+}
+
+static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
+ const unsigned long *args, unsigned long *hret)
+{
+ /* Report unimplemented hypercall to guest */
+ *hret = -KVM_ENOSYS;
+ return RESUME_GUEST;
+}
+
+int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
+{
+ unsigned long num, args[MAX_HYPCALL_ARGS];
+
+ /* read hypcall number and arguments */
+ num = vcpu->arch.gprs[2]; /* v0 */
+ args[0] = vcpu->arch.gprs[4]; /* a0 */
+ args[1] = vcpu->arch.gprs[5]; /* a1 */
+ args[2] = vcpu->arch.gprs[6]; /* a2 */
+ args[3] = vcpu->arch.gprs[7]; /* a3 */
+
+ return kvm_mips_hypercall(vcpu, num,
+ args, &vcpu->arch.gprs[2] /* v0 */);
+}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index fb118a2c8379..3bf0a49725e8 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -30,8 +30,13 @@
#define C_TI (_ULCAST_(1) << 30)
+#ifdef CONFIG_KVM_MIPS_VZ
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
+#else
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
+#endif
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 15a1b1716c2e..d4b2ad18eef2 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+#ifdef CONFIG_KVM_MIPS_VZ
+ { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
+ { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
+ { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
+ { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
+ { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
+ { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
+ { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
+ { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
+#endif
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
@@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{NULL}
};
+bool kvm_trace_guest_mode_change;
+
+int kvm_guest_mode_change_trace_reg(void)
+{
+ kvm_trace_guest_mode_change = 1;
+ return 0;
+}
+
+void kvm_guest_mode_change_trace_unreg(void)
+{
+ kvm_trace_guest_mode_change = 0;
+}
+
/*
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
* Config7, so we are "runnable" if interrupts are pending
@@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
int kvm_arch_hardware_enable(void)
{
- return 0;
+ return kvm_mips_callbacks->hardware_enable();
+}
+
+void kvm_arch_hardware_disable(void)
+{
+ kvm_mips_callbacks->hardware_disable();
}
int kvm_arch_hardware_setup(void)
@@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
+ switch (type) {
+#ifdef CONFIG_KVM_MIPS_VZ
+ case KVM_VM_MIPS_VZ:
+#else
+ case KVM_VM_MIPS_TE:
+#endif
+ break;
+ default:
+ /* Unsupported KVM type */
+ return -EINVAL;
+ };
+
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd)
@@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Build guest exception vectors dynamically in unmapped memory */
handler = gebase + 0x2000;
- /* TLB refill */
+ /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase;
+ if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
+ refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
/* General Exception Entry point */
@@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
/* Init */
vcpu->arch.last_sched_cpu = -1;
-
- /* Start off the timer */
- kvm_mips_init_count(vcpu);
+ vcpu->arch.last_exec_cpu = -1;
return vcpu;
@@ -1030,9 +1070,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
@@ -1059,7 +1096,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
break;
default:
- r = 0;
+ r = kvm_mips_callbacks->check_extension(kvm, ext);
break;
}
return r;
@@ -1067,7 +1104,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- return kvm_mips_pending_timer(vcpu);
+ return kvm_mips_pending_timer(vcpu) ||
+ kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
@@ -1092,7 +1130,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
cop0 = vcpu->arch.cop0;
- kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+ kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0));
@@ -1208,7 +1246,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
/* re-enable HTW before enabling interrupts */
- htw_start();
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ htw_start();
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -1226,17 +1265,20 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode);
- /*
- * Do a privilege check, if in UM most of these exit conditions end up
- * causing an exception to be delivered to the Guest Kernel
- */
- er = kvm_mips_check_privilege(cause, opc, run, vcpu);
- if (er == EMULATE_PRIV_FAIL) {
- goto skip_emul;
- } else if (er == EMULATE_FAIL) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- goto skip_emul;
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ /*
+ * Do a privilege check, if in UM most of these exit conditions
+ * end up causing an exception to be delivered to the Guest
+ * Kernel
+ */
+ er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ if (er == EMULATE_PRIV_FAIL) {
+ goto skip_emul;
+ } else if (er == EMULATE_FAIL) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ goto skip_emul;
+ }
}
switch (exccode) {
@@ -1267,7 +1309,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case EXCCODE_TLBS:
- kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
badvaddr);
@@ -1328,12 +1370,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break;
+ case EXCCODE_GE:
+ /* defer exit accounting to handler */
+ ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
+ break;
+
default:
if (cause & CAUSEF_BD)
opc += 1;
inst = 0;
kvm_get_badinstr(opc, vcpu, &inst);
- kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
+ kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
@@ -1346,6 +1393,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
skip_emul:
local_irq_disable();
+ if (ret == RESUME_GUEST)
+ kvm_vz_acquire_htimer(vcpu);
+
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
kvm_mips_deliver_interrupts(vcpu, cause);
@@ -1391,7 +1441,8 @@ skip_emul:
}
/* Disable HTW before returning to guest or host */
- htw_stop();
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ htw_stop();
return ret;
}
@@ -1527,16 +1578,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
{
/*
- * FPU & MSA get disabled in root context (hardware) when it is disabled
- * in guest context (software), but the register state in the hardware
- * may still be in use. This is why we explicitly re-enable the hardware
- * before saving.
+ * With T&E, FPU & MSA get disabled in root context (hardware) when it
+ * is disabled in guest context (software), but the register state in
+ * the hardware may still be in use.
+ * This is why we explicitly re-enable the hardware before saving.
*/
preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- set_c0_config5(MIPS_CONF5_MSAEN);
- enable_fpu_hazard();
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+ }
__kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
@@ -1549,8 +1602,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
}
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
- set_c0_status(ST0_CU1);
- enable_fpu_hazard();
+ if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
+ set_c0_status(ST0_CU1);
+ enable_fpu_hazard();
+ }
__kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index cb0faade311e..ee64db032793 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
return kvm_mips_gpa_pte_to_gva_unmapped(pte);
}
+#ifdef CONFIG_KVM_MIPS_VZ
+int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu,
+ bool write_fault)
+{
+ int ret;
+
+ ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
+ if (ret)
+ return ret;
+
+ /* Invalidate this entry in the TLB */
+ return kvm_vz_host_tlb_inv(vcpu, badvaddr);
+}
+#endif
+
/* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
@@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
{
int err;
+ if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
+ "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
+ return -EINVAL;
+
retry:
kvm_trap_emul_gva_lockless_begin(vcpu);
err = get_user(*out, opc);
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 2819eb793345..7c6336dd2638 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -33,6 +33,25 @@
#define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1
+#ifdef CONFIG_KVM_MIPS_VZ
+unsigned long GUESTID_MASK;
+EXPORT_SYMBOL_GPL(GUESTID_MASK);
+unsigned long GUESTID_FIRST_VERSION;
+EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
+unsigned long GUESTID_VERSION_MASK;
+EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
+
+static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
+{
+ struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
+
+ if (cpu_has_guestid)
+ return 0;
+ else
+ return cpu_asid(smp_processor_id(), gpa_mm);
+}
+#endif
+
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
@@ -166,6 +185,13 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
local_irq_restore(flags);
+ /*
+ * We don't want to get reserved instruction exceptions for missing tlb
+ * entries.
+ */
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
if (user && idx_user >= 0)
kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
__func__, (va & VPN2_MASK) |
@@ -179,6 +205,421 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
}
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
+#ifdef CONFIG_KVM_MIPS_VZ
+
+/* GuestID management */
+
+/**
+ * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
+ */
+static inline void clear_root_gid(void)
+{
+ if (cpu_has_guestid) {
+ clear_c0_guestctl1(MIPS_GCTL1_RID);
+ mtc0_tlbw_hazard();
+ }
+}
+
+/**
+ * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
+ *
+ * Sets the root GuestID to match the current guest GuestID, for TLB operation
+ * on the GPA->RPA mappings in the root TLB.
+ *
+ * The caller must be sure to disable HTW while the root GID is set, and
+ * possibly longer if TLB registers are modified.
+ */
+static inline void set_root_gid_to_guest_gid(void)
+{
+ unsigned int guestctl1;
+
+ if (cpu_has_guestid) {
+ back_to_back_c0_hazard();
+ guestctl1 = read_c0_guestctl1();
+ guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
+ ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
+ << MIPS_GCTL1_RID_SHIFT;
+ write_c0_guestctl1(guestctl1);
+ mtc0_tlbw_hazard();
+ }
+}
+
+int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+ int idx;
+ unsigned long flags, old_entryhi;
+
+ local_irq_save(flags);
+ htw_stop();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ old_entryhi = read_c0_entryhi();
+
+ idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
+ kvm_mips_get_root_asid(vcpu));
+
+ write_c0_entryhi(old_entryhi);
+ clear_root_gid();
+ mtc0_tlbw_hazard();
+
+ htw_start();
+ local_irq_restore(flags);
+
+ /*
+ * We don't want to get reserved instruction exceptions for missing tlb
+ * entries.
+ */
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ if (idx > 0)
+ kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
+ __func__, (va & VPN2_MASK) |
+ kvm_mips_get_root_asid(vcpu), idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
+
+/**
+ * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
+ * @vcpu: KVM VCPU pointer.
+ * @gpa: Guest virtual address in a TLB mapped guest segment.
+ * @gpa: Ponter to output guest physical address it maps to.
+ *
+ * Converts a guest virtual address in a guest TLB mapped segment to a guest
+ * physical address, by probing the guest TLB.
+ *
+ * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
+ * written.
+ * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
+ * have been written.
+ */
+int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa)
+{
+ unsigned long o_entryhi, o_entrylo[2], o_pagemask;
+ unsigned int o_index;
+ unsigned long entrylo[2], pagemask, pagemaskbit, pa;
+ unsigned long flags;
+ int index;
+
+ /* Probe the guest TLB for a mapping */
+ local_irq_save(flags);
+ /* Set root GuestID for root probe of guest TLB entry */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+
+ o_entryhi = read_gc0_entryhi();
+ o_index = read_gc0_index();
+
+ write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
+ mtc0_tlbw_hazard();
+ guest_tlb_probe();
+ tlb_probe_hazard();
+
+ index = read_gc0_index();
+ if (index < 0) {
+ /* No match, fail */
+ write_gc0_entryhi(o_entryhi);
+ write_gc0_index(o_index);
+
+ clear_root_gid();
+ htw_start();
+ local_irq_restore(flags);
+ return -EFAULT;
+ }
+
+ /* Match! read the TLB entry */
+ o_entrylo[0] = read_gc0_entrylo0();
+ o_entrylo[1] = read_gc0_entrylo1();
+ o_pagemask = read_gc0_pagemask();
+
+ mtc0_tlbr_hazard();
+ guest_tlb_read();
+ tlb_read_hazard();
+
+ entrylo[0] = read_gc0_entrylo0();
+ entrylo[1] = read_gc0_entrylo1();
+ pagemask = ~read_gc0_pagemask() & ~0x1fffl;
+
+ write_gc0_entryhi(o_entryhi);
+ write_gc0_index(o_index);
+ write_gc0_entrylo0(o_entrylo[0]);
+ write_gc0_entrylo1(o_entrylo[1]);
+ write_gc0_pagemask(o_pagemask);
+
+ clear_root_gid();
+ htw_start();
+ local_irq_restore(flags);
+
+ /* Select one of the EntryLo values and interpret the GPA */
+ pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
+ pa = entrylo[!!(gva & pagemaskbit)];
+
+ /*
+ * TLB entry may have become invalid since TLB probe if physical FTLB
+ * entries are shared between threads (e.g. I6400).
+ */
+ if (!(pa & ENTRYLO_V))
+ return -EFAULT;
+
+ /*
+ * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
+ * split with XI/RI in the middle.
+ */
+ pa = (pa << 6) & ~0xfffl;
+ pa |= gva & ~(pagemask | pagemaskbit);
+
+ *gpa = pa;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
+
+/**
+ * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
+ * guests.
+ *
+ * Invalidate all entries in root tlb which are GPA mappings.
+ */
+void kvm_vz_local_flush_roottlb_all_guests(void)
+{
+ unsigned long flags;
+ unsigned long old_entryhi, old_pagemask, old_guestctl1;
+ int entry;
+
+ if (WARN_ON(!cpu_has_guestid))
+ return;
+
+ local_irq_save(flags);
+ htw_stop();
+
+ /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ old_guestctl1 = read_c0_guestctl1();
+
+ /*
+ * Invalidate guest entries in root TLB while leaving root entries
+ * intact when possible.
+ */
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_read();
+ tlb_read_hazard();
+
+ /* Don't invalidate non-guest (RVA) mappings in the root TLB */
+ if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
+ continue;
+
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ write_c0_guestctl1(0);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ write_c0_guestctl1(old_guestctl1);
+ tlbw_use_hazard();
+
+ htw_start();
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
+
+/**
+ * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
+ *
+ * Invalidate all entries in guest tlb irrespective of guestid.
+ */
+void kvm_vz_local_flush_guesttlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_index;
+ unsigned long old_entryhi;
+ unsigned long old_entrylo[2];
+ unsigned long old_pagemask;
+ int entry;
+ u64 cvmmemctl2 = 0;
+
+ local_irq_save(flags);
+
+ /* Preserve all clobbered guest registers */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo[0] = read_gc0_entrylo0();
+ old_entrylo[1] = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Inhibit machine check due to multiple matching TLB entries */
+ cvmmemctl2 = read_c0_cvmmemctl2();
+ cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
+ write_c0_cvmmemctl2(cvmmemctl2);
+ break;
+ };
+
+ /* Invalidate guest entries in guest TLB */
+ write_gc0_entrylo0(0);
+ write_gc0_entrylo1(0);
+ write_gc0_pagemask(0);
+ for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
+ /* Make sure all entries differ. */
+ write_gc0_index(entry);
+ write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
+ mtc0_tlbw_hazard();
+ guest_tlb_write_indexed();
+ }
+
+ if (cvmmemctl2) {
+ cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
+ write_c0_cvmmemctl2(cvmmemctl2);
+ };
+
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo[0]);
+ write_gc0_entrylo1(old_entrylo[1]);
+ write_gc0_pagemask(old_pagemask);
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
+
+/**
+ * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
+ * @buf: Buffer to write TLB entries into.
+ * @index: Start index.
+ * @count: Number of entries to save.
+ *
+ * Save a range of guest TLB entries. The caller must ensure interrupts are
+ * disabled.
+ */
+void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count)
+{
+ unsigned int end = index + count;
+ unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
+ unsigned int guestctl1 = 0;
+ int old_index, i;
+
+ /* Save registers we're about to clobber */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo0 = read_gc0_entrylo0();
+ old_entrylo1 = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ /* Set root GuestID for root probe */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+ if (cpu_has_guestid)
+ guestctl1 = read_c0_guestctl1();
+
+ /* Read each entry from guest TLB */
+ for (i = index; i < end; ++i, ++buf) {
+ write_gc0_index(i);
+
+ mtc0_tlbr_hazard();
+ guest_tlb_read();
+ tlb_read_hazard();
+
+ if (cpu_has_guestid &&
+ (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
+ /* Entry invalid or belongs to another guest */
+ buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
+ buf->tlb_lo[0] = 0;
+ buf->tlb_lo[1] = 0;
+ buf->tlb_mask = 0;
+ } else {
+ /* Entry belongs to the right guest */
+ buf->tlb_hi = read_gc0_entryhi();
+ buf->tlb_lo[0] = read_gc0_entrylo0();
+ buf->tlb_lo[1] = read_gc0_entrylo1();
+ buf->tlb_mask = read_gc0_pagemask();
+ }
+ }
+
+ /* Clear root GuestID again */
+ clear_root_gid();
+ htw_start();
+
+ /* Restore clobbered registers */
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo0);
+ write_gc0_entrylo1(old_entrylo1);
+ write_gc0_pagemask(old_pagemask);
+
+ tlbw_use_hazard();
+}
+EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
+
+/**
+ * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
+ * @buf: Buffer to read TLB entries from.
+ * @index: Start index.
+ * @count: Number of entries to load.
+ *
+ * Load a range of guest TLB entries. The caller must ensure interrupts are
+ * disabled.
+ */
+void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
+ unsigned int count)
+{
+ unsigned int end = index + count;
+ unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
+ int old_index, i;
+
+ /* Save registers we're about to clobber */
+ old_index = read_gc0_index();
+ old_entryhi = read_gc0_entryhi();
+ old_entrylo0 = read_gc0_entrylo0();
+ old_entrylo1 = read_gc0_entrylo1();
+ old_pagemask = read_gc0_pagemask();
+
+ /* Set root GuestID for root probe */
+ htw_stop();
+ set_root_gid_to_guest_gid();
+
+ /* Write each entry to guest TLB */
+ for (i = index; i < end; ++i, ++buf) {
+ write_gc0_index(i);
+ write_gc0_entryhi(buf->tlb_hi);
+ write_gc0_entrylo0(buf->tlb_lo[0]);
+ write_gc0_entrylo1(buf->tlb_lo[1]);
+ write_gc0_pagemask(buf->tlb_mask);
+
+ mtc0_tlbw_hazard();
+ guest_tlb_write_indexed();
+ }
+
+ /* Clear root GuestID again */
+ clear_root_gid();
+ htw_start();
+
+ /* Restore clobbered registers */
+ write_gc0_index(old_index);
+ write_gc0_entryhi(old_entryhi);
+ write_gc0_entrylo0(old_entrylo0);
+ write_gc0_entrylo1(old_entrylo1);
+ write_gc0_pagemask(old_pagemask);
+
+ tlbw_use_hazard();
+}
+EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
+
+#endif
+
/**
* kvm_mips_suspend_mm() - Suspend the active mm.
* @cpu The CPU we're running on.
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index c858cf168078..a8c7fd7bf6d2 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -18,6 +18,13 @@
#define TRACE_INCLUDE_FILE trace
/*
+ * arch/mips/kvm/mips.c
+ */
+extern bool kvm_trace_guest_mode_change;
+int kvm_guest_mode_change_trace_reg(void);
+void kvm_guest_mode_change_trace_unreg(void);
+
+/*
* Tracepoints for VM enters
*/
DECLARE_EVENT_CLASS(kvm_transition,
@@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out,
#define KVM_TRACE_EXIT_MSA_FPE 14
#define KVM_TRACE_EXIT_FPE 15
#define KVM_TRACE_EXIT_MSA_DISABLED 21
+#define KVM_TRACE_EXIT_GUEST_EXIT 27
/* Further exit reasons */
#define KVM_TRACE_EXIT_WAIT 32
#define KVM_TRACE_EXIT_CACHE 33
#define KVM_TRACE_EXIT_SIGNAL 34
+/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
+#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
+#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
+#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
+#define KVM_TRACE_EXIT_HC 66 /* 2 */
+#define KVM_TRACE_EXIT_GRR 67 /* 3 */
+#define KVM_TRACE_EXIT_GVA 72 /* 8 */
+#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
+#define KVM_TRACE_EXIT_GPA 74 /* 10 */
/* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \
@@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out,
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
{ KVM_TRACE_EXIT_FPE, "FPE" }, \
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
+ { KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \
- { KVM_TRACE_EXIT_SIGNAL, "Signal" }
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
+ { KVM_TRACE_EXIT_GPSI, "GPSI" }, \
+ { KVM_TRACE_EXIT_GSFC, "GSFC" }, \
+ { KVM_TRACE_EXIT_HC, "HC" }, \
+ { KVM_TRACE_EXIT_GRR, "GRR" }, \
+ { KVM_TRACE_EXIT_GVA, "GVA" }, \
+ { KVM_TRACE_EXIT_GHFC, "GHFC" }, \
+ { KVM_TRACE_EXIT_GPA, "GPA" }
TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit,
{ KVM_TRACE_COP0(16, 4), "Config4" }, \
{ KVM_TRACE_COP0(16, 5), "Config5" }, \
{ KVM_TRACE_COP0(16, 7), "Config7" }, \
+ { KVM_TRACE_COP0(17, 1), "MAAR" }, \
+ { KVM_TRACE_COP0(17, 2), "MAARI" }, \
{ KVM_TRACE_COP0(26, 0), "ECC" }, \
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \
@@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change,
__entry->new_asid)
);
+TRACE_EVENT(kvm_guestid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
+ TP_ARGS(vcpu, guestid),
+ TP_STRUCT__entry(
+ __field(unsigned int, guestid)
+ ),
+
+ TP_fast_assign(
+ __entry->guestid = guestid;
+ ),
+
+ TP_printk("GuestID: 0x%02x",
+ __entry->guestid)
+);
+
+TRACE_EVENT_FN(kvm_guest_mode_change,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, epc)
+ __field(unsigned long, pc)
+ __field(unsigned long, badvaddr)
+ __field(unsigned int, status)
+ __field(unsigned int, cause)
+ ),
+
+ TP_fast_assign(
+ __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
+ __entry->pc = vcpu->arch.pc;
+ __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
+ __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
+ __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
+ ),
+
+ TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
+ __entry->epc,
+ __entry->pc,
+ __entry->status,
+ __entry->cause,
+ __entry->badvaddr),
+
+ kvm_guest_mode_change_trace_reg,
+ kvm_guest_mode_change_trace_unreg
+);
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index b1fa53b252ea..a563759fd142 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/log2.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <asm/mmu_context.h>
@@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
return gpa;
}
+static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
+{
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 inst = 0;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
+ exccode, opc, inst, badvaddr,
+ kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+}
+
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
break;
+ case EMULATE_HYPERCALL:
+ ret = kvm_mips_handle_hypcall(vcpu);
+ break;
+
default:
BUG();
}
@@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_hardware_enable(void)
+{
+ return 0;
+}
+
+static void kvm_trap_emul_hardware_disable(void)
+{
+}
+
+static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_MIPS_TE:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
@@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
u32 config, config1;
int vcpu_id = vcpu->vcpu_id;
+ /* Start off the timer at 100 MHz */
+ kvm_mips_init_count(vcpu, 100*1000*1000);
+
/*
* Arch specific stuff, set up config registers properly so that the
* guest will come up as expected
@@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
/* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f);
+ /* DCache line size not correctly reported in Config1 on Octeon CPUs */
+ if (cpu_dcache_line_size()) {
+ config1 &= ~MIPS_CONF1_DL;
+ config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
+ MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
+ }
+
/* Set up MMU size */
config1 &= ~(0x3f << 25);
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
@@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
if (v & CAUSEF_DC) {
/* disable timer first */
kvm_mips_count_disable_cause(vcpu);
- kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+ kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
+ v);
} else {
/* enable timer last */
- kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+ kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
+ v);
kvm_mips_count_enable_cause(vcpu);
}
} else {
@@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
.handle_fpe = kvm_trap_emul_handle_fpe,
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
+ .handle_guest_exit = kvm_trap_emul_no_handler,
+ .hardware_enable = kvm_trap_emul_hardware_enable,
+ .hardware_disable = kvm_trap_emul_hardware_disable,
+ .check_extension = kvm_trap_emul_check_extension,
.vcpu_init = kvm_trap_emul_vcpu_init,
.vcpu_uninit = kvm_trap_emul_vcpu_uninit,
.vcpu_setup = kvm_trap_emul_vcpu_setup,
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
new file mode 100644
index 000000000000..71d8856ade64
--- /dev/null
+++ b/arch/mips/kvm/vz.c
@@ -0,0 +1,3223 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Support for hardware virtualization extensions
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/cmpxchg.h>
+#include <asm/fpu.h>
+#include <asm/hazards.h>
+#include <asm/inst.h>
+#include <asm/mmu_context.h>
+#include <asm/r4kcache.h>
+#include <asm/time.h>
+#include <asm/tlb.h>
+#include <asm/tlbex.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+
+#include "trace.h"
+
+/* Pointers to last VCPU loaded on each physical CPU */
+static struct kvm_vcpu *last_vcpu[NR_CPUS];
+/* Pointers to last VCPU executed on each physical CPU */
+static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
+
+/*
+ * Number of guest VTLB entries to use, so we can catch inconsistency between
+ * CPUs.
+ */
+static unsigned int kvm_vz_guest_vtlb_size;
+
+static inline long kvm_vz_read_gc0_ebase(void)
+{
+ if (sizeof(long) == 8 && cpu_has_ebase_wg)
+ return read_gc0_ebase_64();
+ else
+ return read_gc0_ebase();
+}
+
+static inline void kvm_vz_write_gc0_ebase(long v)
+{
+ /*
+ * First write with WG=1 to write upper bits, then write again in case
+ * WG should be left at 0.
+ * write_gc0_ebase_64() is no longer UNDEFINED since R6.
+ */
+ if (sizeof(long) == 8 &&
+ (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
+ write_gc0_ebase_64(v | MIPS_EBASE_WG);
+ write_gc0_ebase_64(v);
+ } else {
+ write_gc0_ebase(v | MIPS_EBASE_WG);
+ write_gc0_ebase(v);
+ }
+}
+
+/*
+ * These Config bits may be writable by the guest:
+ * Config: [K23, KU] (!TLB), K0
+ * Config1: (none)
+ * Config2: [TU, SU] (impl)
+ * Config3: ISAOnExc
+ * Config4: FTLBPageSize
+ * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
+ */
+
+static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return CONF_CM_CMASK;
+}
+
+static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return MIPS_CONF3_ISA_OE;
+}
+
+static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ /* no need to be exact */
+ return MIPS_CONF4_VFTLBPAGESIZE;
+}
+
+static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
+
+ /* Permit MSAEn changes if MSA supported and enabled */
+ if (kvm_mips_guest_has_msa(&vcpu->arch))
+ mask |= MIPS_CONF5_MSAEN;
+
+ /*
+ * Permit guest FPU mode changes if FPU is enabled and the relevant
+ * feature exists according to FIR register.
+ */
+ if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
+ if (cpu_has_ufr)
+ mask |= MIPS_CONF5_UFR;
+ if (cpu_has_fre)
+ mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
+ }
+
+ return mask;
+}
+
+/*
+ * VZ optionally allows these additional Config bits to be written by root:
+ * Config: M, [MT]
+ * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
+ * Config2: M
+ * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
+ * VInt, SP, CDMM, MT, SM, TL]
+ * Config4: M, [VTLBSizeExt, MMUSizeExt]
+ * Config5: MRP
+ */
+
+static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
+
+ /* Permit FPU to be present if FPU is supported */
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
+ mask |= MIPS_CONF1_FP;
+
+ return mask;
+}
+
+static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
+ MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
+
+ /* Permit MSA to be present if MSA is supported */
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ mask |= MIPS_CONF3_MSA;
+
+ return mask;
+}
+
+static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
+}
+
+static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
+}
+
+static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
+{
+ /* VZ guest has already converted gva to gpa */
+ return gva;
+}
+
+static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+ clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
+}
+
+static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ set_bit(priority, &vcpu->arch.pending_exceptions_clr);
+}
+
+static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /*
+ * timer expiry is asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /*
+ * timer expiry is asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /*
+ * interrupts are asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ switch (intr) {
+ case 2:
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case 3:
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case 4:
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /*
+ * interrupts are asynchronous to vcpu execution therefore defer guest
+ * cp0 accesses
+ */
+ switch (intr) {
+ case -2:
+ kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case -3:
+ kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case -4:
+ kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ u32 irq = (priority < MIPS_EXC_MAX) ?
+ kvm_vz_priority_to_irq[priority] : 0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ set_gc0_cause(C_TI);
+ break;
+
+ case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IPI_1:
+ case MIPS_EXC_INT_IPI_2:
+ if (cpu_has_guestctl2)
+ set_c0_guestctl2(irq);
+ else
+ set_gc0_cause(irq);
+ break;
+
+ default:
+ break;
+ }
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ return 1;
+}
+
+static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ u32 cause)
+{
+ u32 irq = (priority < MIPS_EXC_MAX) ?
+ kvm_vz_priority_to_irq[priority] : 0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ /*
+ * Call to kvm_write_c0_guest_compare() clears Cause.TI in
+ * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
+ * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
+ * supported or if not using GuestCtl2 Hardware Clear.
+ */
+ if (cpu_has_guestctl2) {
+ if (!(read_c0_guestctl2() & (irq << 14)))
+ clear_c0_guestctl2(irq);
+ } else {
+ clear_gc0_cause(irq);
+ }
+ break;
+
+ case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IPI_1:
+ case MIPS_EXC_INT_IPI_2:
+ /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
+ if (cpu_has_guestctl2) {
+ if (!(read_c0_guestctl2() & (irq << 14)))
+ clear_c0_guestctl2(irq);
+ } else {
+ clear_gc0_cause(irq);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
+ return 1;
+}
+
+/*
+ * VZ guest timer handling.
+ */
+
+/**
+ * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
+ * instead of software emulation of guest timer.
+ * false otherwise.
+ */
+static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
+{
+ if (kvm_mips_count_disabled(vcpu))
+ return false;
+
+ /* Chosen frequency must match real frequency */
+ if (mips_hpt_frequency != vcpu->arch.count_hz)
+ return false;
+
+ /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
+ if (current_cpu_data.gtoffset_mask != 0xffffffff)
+ return false;
+
+ return true;
+}
+
+/**
+ * _kvm_vz_restore_stimer() - Restore soft timer state.
+ * @vcpu: Virtual CPU.
+ * @compare: CP0_Compare register value, restored by caller.
+ * @cause: CP0_Cause register to restore.
+ *
+ * Restore VZ state relating to the soft timer. The hard timer can be enabled
+ * later.
+ */
+static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
+ u32 cause)
+{
+ /*
+ * Avoid spurious counter interrupts by setting Guest CP0_Count to just
+ * after Guest CP0_Compare.
+ */
+ write_c0_gtoffset(compare - read_c0_count());
+
+ back_to_back_c0_hazard();
+ write_gc0_cause(cause);
+}
+
+/**
+ * _kvm_vz_restore_htimer() - Restore hard timer state.
+ * @vcpu: Virtual CPU.
+ * @compare: CP0_Compare register value, restored by caller.
+ * @cause: CP0_Cause register to restore.
+ *
+ * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
+ * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
+ */
+static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
+ u32 compare, u32 cause)
+{
+ u32 start_count, after_count;
+ ktime_t freeze_time;
+ unsigned long flags;
+
+ /*
+ * Freeze the soft-timer and sync the guest CP0_Count with it. We do
+ * this with interrupts disabled to avoid latency.
+ */
+ local_irq_save(flags);
+ freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
+ write_c0_gtoffset(start_count - read_c0_count());
+ local_irq_restore(flags);
+
+ /* restore guest CP0_Cause, as TI may already be set */
+ back_to_back_c0_hazard();
+ write_gc0_cause(cause);
+
+ /*
+ * The above sequence isn't atomic and would result in lost timer
+ * interrupts if we're not careful. Detect if a timer interrupt is due
+ * and assert it.
+ */
+ back_to_back_c0_hazard();
+ after_count = read_gc0_count();
+ if (after_count - start_count > compare - start_count - 1)
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+/**
+ * kvm_vz_restore_timer() - Restore timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore soft timer state from saved context.
+ */
+static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 cause, compare;
+
+ compare = kvm_read_sw_gc0_compare(cop0);
+ cause = kvm_read_sw_gc0_cause(cop0);
+
+ write_gc0_compare(compare);
+ _kvm_vz_restore_stimer(vcpu, compare, cause);
+}
+
+/**
+ * kvm_vz_acquire_htimer() - Switch to hard timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore hard timer state on top of existing soft timer state if possible.
+ *
+ * Since hard timer won't remain active over preemption, preemption should be
+ * disabled by the caller.
+ */
+void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
+{
+ u32 gctl0;
+
+ gctl0 = read_c0_guestctl0();
+ if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
+ /* enable guest access to hard timer */
+ write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
+
+ _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
+ read_gc0_cause());
+ }
+}
+
+/**
+ * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
+ * @vcpu: Virtual CPU.
+ * @compare: Pointer to write compare value to.
+ * @cause: Pointer to write cause value to.
+ *
+ * Save VZ guest timer state and switch to software emulation of guest CP0
+ * timer. The hard timer must already be in use, so preemption should be
+ * disabled.
+ */
+static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
+ u32 *out_compare, u32 *out_cause)
+{
+ u32 cause, compare, before_count, end_count;
+ ktime_t before_time;
+
+ compare = read_gc0_compare();
+ *out_compare = compare;
+
+ before_time = ktime_get();
+
+ /*
+ * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
+ * at which no pending timer interrupt is missing.
+ */
+ before_count = read_gc0_count();
+ back_to_back_c0_hazard();
+ cause = read_gc0_cause();
+ *out_cause = cause;
+
+ /*
+ * Record a final CP0_Count which we will transfer to the soft-timer.
+ * This is recorded *after* saving CP0_Cause, so we don't get any timer
+ * interrupts from just after the final CP0_Count point.
+ */
+ back_to_back_c0_hazard();
+ end_count = read_gc0_count();
+
+ /*
+ * The above sequence isn't atomic, so we could miss a timer interrupt
+ * between reading CP0_Cause and end_count. Detect and record any timer
+ * interrupt due between before_count and end_count.
+ */
+ if (end_count - before_count > compare - before_count - 1)
+ kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+ /*
+ * Restore soft-timer, ignoring a small amount of negative drift due to
+ * delay between freeze_hrtimer and setting CP0_GTOffset.
+ */
+ kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
+}
+
+/**
+ * kvm_vz_save_timer() - Save guest timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Save VZ guest timer state and switch to soft guest timer if hard timer was in
+ * use.
+ */
+static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ u32 gctl0, compare, cause;
+
+ gctl0 = read_c0_guestctl0();
+ if (gctl0 & MIPS_GCTL0_GT) {
+ /* disable guest use of hard timer */
+ write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+ /* save hard timer state */
+ _kvm_vz_save_htimer(vcpu, &compare, &cause);
+ } else {
+ compare = read_gc0_compare();
+ cause = read_gc0_cause();
+ }
+
+ /* save timer-related state to VCPU context */
+ kvm_write_sw_gc0_cause(cop0, cause);
+ kvm_write_sw_gc0_compare(cop0, compare);
+}
+
+/**
+ * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
+ * @vcpu: Virtual CPU.
+ *
+ * Transfers the state of the hard guest timer to the soft guest timer, leaving
+ * guest state intact so it can continue to be used with the soft timer.
+ */
+void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
+{
+ u32 gctl0, compare, cause;
+
+ preempt_disable();
+ gctl0 = read_c0_guestctl0();
+ if (gctl0 & MIPS_GCTL0_GT) {
+ /* disable guest use of timer */
+ write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
+
+ /* switch to soft timer */
+ _kvm_vz_save_htimer(vcpu, &compare, &cause);
+
+ /* leave soft timer in usable state */
+ _kvm_vz_restore_stimer(vcpu, compare, cause);
+ }
+ preempt_enable();
+}
+
+/**
+ * is_eva_access() - Find whether an instruction is an EVA memory accessor.
+ * @inst: 32-bit instruction encoding.
+ *
+ * Finds whether @inst encodes an EVA memory access instruction, which would
+ * indicate that emulation of it should access the user mode address space
+ * instead of the kernel mode address space. This matters for MUSUK segments
+ * which are TLB mapped for user mode but unmapped for kernel mode.
+ *
+ * Returns: Whether @inst encodes an EVA accessor instruction.
+ */
+static bool is_eva_access(union mips_instruction inst)
+{
+ if (inst.spec3_format.opcode != spec3_op)
+ return false;
+
+ switch (inst.spec3_format.func) {
+ case lwle_op:
+ case lwre_op:
+ case cachee_op:
+ case sbe_op:
+ case she_op:
+ case sce_op:
+ case swe_op:
+ case swle_op:
+ case swre_op:
+ case prefe_op:
+ case lbue_op:
+ case lhue_op:
+ case lbe_op:
+ case lhe_op:
+ case lle_op:
+ case lwe_op:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * is_eva_am_mapped() - Find whether an access mode is mapped.
+ * @vcpu: KVM VCPU state.
+ * @am: 3-bit encoded access mode.
+ * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
+ *
+ * Decode @am to find whether it encodes a mapped segment for the current VCPU
+ * state. Where necessary @eu and the actual instruction causing the fault are
+ * taken into account to make the decision.
+ *
+ * Returns: Whether the VCPU faulted on a TLB mapped address.
+ */
+static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
+{
+ u32 am_lookup;
+ int err;
+
+ /*
+ * Interpret access control mode. We assume address errors will already
+ * have been caught by the guest, leaving us with:
+ * AM UM SM KM 31..24 23..16
+ * UK 0 000 Unm 0 0
+ * MK 1 001 TLB 1
+ * MSK 2 010 TLB TLB 1
+ * MUSK 3 011 TLB TLB TLB 1
+ * MUSUK 4 100 TLB TLB Unm 0 1
+ * USK 5 101 Unm Unm 0 0
+ * - 6 110 0 0
+ * UUSK 7 111 Unm Unm Unm 0 0
+ *
+ * We shift a magic value by AM across the sign bit to find if always
+ * TLB mapped, and if not shift by 8 again to find if it depends on KM.
+ */
+ am_lookup = 0x70080000 << am;
+ if ((s32)am_lookup < 0) {
+ /*
+ * MK, MSK, MUSK
+ * Always TLB mapped, unless SegCtl.EU && ERL
+ */
+ if (!eu || !(read_gc0_status() & ST0_ERL))
+ return true;
+ } else {
+ am_lookup <<= 8;
+ if ((s32)am_lookup < 0) {
+ union mips_instruction inst;
+ unsigned int status;
+ u32 *opc;
+
+ /*
+ * MUSUK
+ * TLB mapped if not in kernel mode
+ */
+ status = read_gc0_status();
+ if (!(status & (ST0_EXL | ST0_ERL)) &&
+ (status & ST0_KSU))
+ return true;
+ /*
+ * EVA access instructions in kernel
+ * mode access user address space.
+ */
+ opc = (u32 *)vcpu->arch.pc;
+ if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (!err && is_eva_access(inst))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
+ * @vcpu: KVM VCPU state.
+ * @gva: Guest virtual address to convert.
+ * @gpa: Output guest physical address.
+ *
+ * Convert a guest virtual address (GVA) which is valid according to the guest
+ * context, to a guest physical address (GPA).
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+ unsigned long *gpa)
+{
+ u32 gva32 = gva;
+ unsigned long segctl;
+
+ if ((long)gva == (s32)gva32) {
+ /* Handle canonical 32-bit virtual address */
+ if (cpu_guest_has_segments) {
+ unsigned long mask, pa;
+
+ switch (gva32 >> 29) {
+ case 0:
+ case 1: /* CFG5 (1GB) */
+ segctl = read_gc0_segctl2() >> 16;
+ mask = (unsigned long)0xfc0000000ull;
+ break;
+ case 2:
+ case 3: /* CFG4 (1GB) */
+ segctl = read_gc0_segctl2();
+ mask = (unsigned long)0xfc0000000ull;
+ break;
+ case 4: /* CFG3 (512MB) */
+ segctl = read_gc0_segctl1() >> 16;
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 5: /* CFG2 (512MB) */
+ segctl = read_gc0_segctl1();
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 6: /* CFG1 (512MB) */
+ segctl = read_gc0_segctl0() >> 16;
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ case 7: /* CFG0 (512MB) */
+ segctl = read_gc0_segctl0();
+ mask = (unsigned long)0xfe0000000ull;
+ break;
+ default:
+ /*
+ * GCC 4.9 isn't smart enough to figure out that
+ * segctl and mask are always initialised.
+ */
+ unreachable();
+ }
+
+ if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
+ segctl & 0x0008))
+ goto tlb_mapped;
+
+ /* Unmapped, find guest physical address */
+ pa = (segctl << 20) & mask;
+ pa |= gva32 & ~mask;
+ *gpa = pa;
+ return 0;
+ } else if ((s32)gva32 < (s32)0xc0000000) {
+ /* legacy unmapped KSeg0 or KSeg1 */
+ *gpa = gva32 & 0x1fffffff;
+ return 0;
+ }
+#ifdef CONFIG_64BIT
+ } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
+ /* XKPHYS */
+ if (cpu_guest_has_segments) {
+ /*
+ * Each of the 8 regions can be overridden by SegCtl2.XR
+ * to use SegCtl1.XAM.
+ */
+ segctl = read_gc0_segctl2();
+ if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
+ segctl = read_gc0_segctl1();
+ if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
+ 0))
+ goto tlb_mapped;
+ }
+
+ }
+ /*
+ * Traditionally fully unmapped.
+ * Bits 61:59 specify the CCA, which we can just mask off here.
+ * Bits 58:PABITS should be zero, but we shouldn't have got here
+ * if it wasn't.
+ */
+ *gpa = gva & 0x07ffffffffffffff;
+ return 0;
+#endif
+ }
+
+tlb_mapped:
+ return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
+}
+
+/**
+ * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
+ * @vcpu: KVM VCPU state.
+ * @badvaddr: Root BadVAddr.
+ * @gpa: Output guest physical address.
+ *
+ * VZ implementations are permitted to report guest virtual addresses (GVA) in
+ * BadVAddr on a root exception during guest execution, instead of the more
+ * convenient guest physical addresses (GPA). When we get a GVA, this function
+ * converts it to a GPA, taking into account guest segmentation and guest TLB
+ * state.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
+ unsigned long *gpa)
+{
+ unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
+ MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
+
+ /* If BadVAddr is GPA, then all is well in the world */
+ if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
+ *gpa = badvaddr;
+ return 0;
+ }
+
+ /* Otherwise we'd expect it to be GVA ... */
+ if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
+ "Unexpected gexccode %#x\n", gexccode))
+ return -EINVAL;
+
+ /* ... and we need to perform the GVA->GPA translation in software */
+ return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
+}
+
+static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
+{
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ u32 inst = 0;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
+ exccode, opc, inst, badvaddr,
+ read_gc0_status());
+ kvm_arch_vcpu_dump_regs(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+}
+
+static unsigned long mips_process_maar(unsigned int op, unsigned long val)
+{
+ /* Mask off unused bits */
+ unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
+
+ if (read_gc0_pagegrain() & PG_ELPA)
+ mask |= 0x00ffffff00000000ull;
+ if (cpu_guest_has_mvh)
+ mask |= MIPS_MAAR_VH;
+
+ /* Set or clear VH */
+ if (op == mtc_op) {
+ /* clear VH */
+ val &= ~MIPS_MAAR_VH;
+ } else if (op == dmtc_op) {
+ /* set VH to match VL */
+ val &= ~MIPS_MAAR_VH;
+ if (val & MIPS_MAAR_VL)
+ val |= MIPS_MAAR_VH;
+ }
+
+ return val & mask;
+}
+
+static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ val &= MIPS_MAARI_INDEX;
+ if (val == MIPS_MAARI_INDEX)
+ kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
+ else if (val < ARRAY_SIZE(vcpu->arch.maar))
+ kvm_write_sw_gc0_maari(cop0, val);
+}
+
+static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ u32 rt, rd, sel;
+ unsigned long curr_pc;
+ unsigned long val;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
+ case wait_op:
+ er = kvm_mips_emul_wait(vcpu);
+ break;
+ default:
+ er = EMULATE_FAIL;
+ }
+ } else {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
+ case dmfc_op:
+ case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ if (rd == MIPS_CP0_COUNT &&
+ sel == 0) { /* Count */
+ val = kvm_mips_read_count(vcpu);
+ } else if (rd == MIPS_CP0_COMPARE &&
+ sel == 0) { /* Compare */
+ val = read_gc0_compare();
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 0) { /* LLAddr */
+ if (cpu_guest_has_rw_llb)
+ val = read_gc0_lladdr() &
+ MIPS_LLADDR_LLB;
+ else
+ val = 0;
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 1 && /* MAAR */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ /* MAARI must be in range */
+ BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+ ARRAY_SIZE(vcpu->arch.maar));
+ val = vcpu->arch.maar[
+ kvm_read_sw_gc0_maari(cop0)];
+ } else if ((rd == MIPS_CP0_PRID &&
+ (sel == 0 || /* PRid */
+ sel == 2 || /* CDMMBase */
+ sel == 3)) || /* CMGCRBase */
+ (rd == MIPS_CP0_STATUS &&
+ (sel == 2 || /* SRSCtl */
+ sel == 3)) || /* SRSMap */
+ (rd == MIPS_CP0_CONFIG &&
+ (sel == 7)) || /* Config7 */
+ (rd == MIPS_CP0_LLADDR &&
+ (sel == 2) && /* MAARI */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) ||
+ (rd == MIPS_CP0_ERRCTL &&
+ (sel == 0))) { /* ErrCtl */
+ val = cop0->reg[rd][sel];
+ } else {
+ val = 0;
+ er = EMULATE_FAIL;
+ }
+
+ if (er != EMULATE_FAIL) {
+ /* Sign extend */
+ if (inst.c0r_format.rs == mfc_op)
+ val = (int)val;
+ vcpu->arch.gprs[rt] = val;
+ }
+
+ trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
+ KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel), val);
+ break;
+
+ case dmtc_op:
+ case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ val = vcpu->arch.gprs[rt];
+ trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
+ KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel), val);
+
+ if (rd == MIPS_CP0_COUNT &&
+ sel == 0) { /* Count */
+ kvm_vz_lose_htimer(vcpu);
+ kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+ } else if (rd == MIPS_CP0_COMPARE &&
+ sel == 0) { /* Compare */
+ kvm_mips_write_compare(vcpu,
+ vcpu->arch.gprs[rt],
+ true);
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 0) { /* LLAddr */
+ /*
+ * P5600 generates GPSI on guest MTC0 LLAddr.
+ * Only allow the guest to clear LLB.
+ */
+ if (cpu_guest_has_rw_llb &&
+ !(val & MIPS_LLADDR_LLB))
+ write_gc0_lladdr(0);
+ } else if (rd == MIPS_CP0_LLADDR &&
+ sel == 1 && /* MAAR */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ val = mips_process_maar(inst.c0r_format.rs,
+ val);
+
+ /* MAARI must be in range */
+ BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
+ ARRAY_SIZE(vcpu->arch.maar));
+ vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
+ val;
+ } else if (rd == MIPS_CP0_LLADDR &&
+ (sel == 2) && /* MAARI */
+ cpu_guest_has_maar &&
+ !cpu_guest_has_dyn_maar) {
+ kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_ERRCTL &&
+ (sel == 0)) { /* ErrCtl */
+ /* ignore the written value */
+ } else {
+ er = EMULATE_FAIL;
+ }
+ break;
+
+ default:
+ er = EMULATE_FAIL;
+ break;
+ }
+ }
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
+ curr_pc, __func__, inst.word);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ u32 cache, op_inst, op, base;
+ s16 offset;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long va, curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
+ cache = op_inst & CacheOp_Cache;
+ op = op_inst & CacheOp_Op;
+
+ va = arch->gprs[base] + offset;
+
+ kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+
+ /* Secondary or tirtiary cache ops ignored */
+ if (cache != Cache_I && cache != Cache_D)
+ return EMULATE_DONE;
+
+ switch (op_inst) {
+ case Index_Invalidate_I:
+ flush_icache_line_indexed(va);
+ return EMULATE_DONE;
+ case Index_Writeback_Inv_D:
+ flush_dcache_line_indexed(va);
+ return EMULATE_DONE;
+ case Hit_Invalidate_I:
+ case Hit_Invalidate_D:
+ case Hit_Writeback_Inv_D:
+ if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
+ /* We can just flush entire icache */
+ local_flush_icache_range(0, 0);
+ return EMULATE_DONE;
+ }
+
+ /* So far, other platforms support guest hit cache ops */
+ break;
+ default:
+ break;
+ };
+
+ kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
+ offset);
+ /* Rollback PC */
+ vcpu->arch.pc = curr_pc;
+
+ return EMULATE_FAIL;
+}
+
+static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ struct kvm_run *run = vcpu->run;
+ union mips_instruction inst;
+ int rd, rt, sel;
+ int err;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ switch (inst.r_format.opcode) {
+ case cop0_op:
+ er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
+ break;
+#ifndef CONFIG_CPU_MIPSR6
+ case cache_op:
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ break;
+#endif
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+#ifdef CONFIG_CPU_MIPSR6
+ case cache6_op:
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
+ break;
+#endif
+ case rdhwr_op:
+ if (inst.r_format.rs || (inst.r_format.re >> 3))
+ goto unknown;
+
+ rd = inst.r_format.rd;
+ rt = inst.r_format.rt;
+ sel = inst.r_format.re & 0x7;
+
+ switch (rd) {
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] =
+ (long)(int)kvm_mips_read_count(vcpu);
+ break;
+ default:
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
+ KVM_TRACE_HWR(rd, sel), 0);
+ goto unknown;
+ };
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
+ KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
+
+ er = update_pc(vcpu, cause);
+ break;
+ default:
+ goto unknown;
+ };
+ break;
+unknown:
+
+ default:
+ kvm_err("GPSI exception not supported (%p/%#x)\n",
+ opc, inst.word);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ union mips_instruction inst;
+ int err;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ /* complete MTC0 on behalf of guest and advance EPC */
+ if (inst.c0r_format.opcode == cop0_op &&
+ inst.c0r_format.rs == mtc_op &&
+ inst.c0r_format.z == 0) {
+ int rt = inst.c0r_format.rt;
+ int rd = inst.c0r_format.rd;
+ int sel = inst.c0r_format.sel;
+ unsigned int val = arch->gprs[rt];
+ unsigned int old_val, change;
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
+ val);
+
+ if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ /* FR bit should read as zero if no FPU */
+ if (!kvm_mips_guest_has_fpu(&vcpu->arch))
+ val &= ~(ST0_CU1 | ST0_FR);
+
+ /*
+ * Also don't allow FR to be set if host doesn't support
+ * it.
+ */
+ if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ val &= ~ST0_FR;
+
+ old_val = read_gc0_status();
+ change = val ^ old_val;
+
+ if (change & ST0_FR) {
+ /*
+ * FPU and Vector register state is made
+ * UNPREDICTABLE by a change of FR, so don't
+ * even bother saving it.
+ */
+ kvm_drop_fpu(vcpu);
+ }
+
+ /*
+ * If MSA state is already live, it is undefined how it
+ * interacts with FR=0 FPU state, and we don't want to
+ * hit reserved instruction exceptions trying to save
+ * the MSA state later when CU=1 && FR=1, so play it
+ * safe and save it first.
+ */
+ if (change & ST0_CU1 && !(val & ST0_FR) &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
+ kvm_lose_fpu(vcpu);
+
+ write_gc0_status(val);
+ } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+ u32 old_cause = read_gc0_cause();
+ u32 change = old_cause ^ val;
+
+ /* DC bit enabling/disabling timer? */
+ if (change & CAUSEF_DC) {
+ if (val & CAUSEF_DC) {
+ kvm_vz_lose_htimer(vcpu);
+ kvm_mips_count_disable_cause(vcpu);
+ } else {
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ }
+
+ /* Only certain bits are RW to the guest */
+ change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
+ CAUSEF_IP0 | CAUSEF_IP1);
+
+ /* WP can only be cleared */
+ change &= ~CAUSEF_WP | old_cause;
+
+ write_gc0_cause(old_cause ^ change);
+ } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
+ write_gc0_intctl(val);
+ } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
+ old_val = read_gc0_config5();
+ change = val ^ old_val;
+ /* Handle changes in FPU/MSA modes */
+ preempt_disable();
+
+ /*
+ * Propagate FRE changes immediately if the FPU
+ * context is already loaded.
+ */
+ if (change & MIPS_CONF5_FRE &&
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
+ change_c0_config5(MIPS_CONF5_FRE, val);
+
+ preempt_enable();
+
+ val = old_val ^
+ (change & kvm_vz_config5_guest_wrmask(vcpu));
+ write_gc0_config5(val);
+ } else {
+ kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
+ opc, inst.word);
+ er = EMULATE_FAIL;
+ }
+
+ if (er != EMULATE_FAIL)
+ er = update_pc(vcpu, cause);
+ } else {
+ kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
+ opc, inst.word);
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ /*
+ * Presumably this is due to MC (guest mode change), so lets trace some
+ * relevant info.
+ */
+ trace_kvm_guest_mode_change(vcpu);
+
+ return EMULATE_DONE;
+}
+
+static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ union mips_instruction inst;
+ unsigned long curr_pc;
+ int err;
+
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err)
+ return EMULATE_FAIL;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ er = kvm_mips_emul_hypcall(vcpu, inst);
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
+
+ return er;
+}
+
+static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
+ u32 cause,
+ u32 *opc,
+ struct kvm_vcpu *vcpu)
+{
+ u32 inst;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ kvm_get_badinstr(opc, vcpu, &inst);
+
+ kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
+ gexccode, opc, inst, read_gc0_status());
+
+ return EMULATE_FAIL;
+}
+
+static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
+{
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
+ MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
+ int ret = RESUME_GUEST;
+
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
+ switch (gexccode) {
+ case MIPS_GCTL0_GEXC_GPSI:
+ ++vcpu->stat.vz_gpsi_exits;
+ er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GSFC:
+ ++vcpu->stat.vz_gsfc_exits;
+ er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_HC:
+ ++vcpu->stat.vz_hc_exits;
+ er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GRR:
+ ++vcpu->stat.vz_grr_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GVA:
+ ++vcpu->stat.vz_gva_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GHFC:
+ ++vcpu->stat.vz_ghfc_exits;
+ er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
+ break;
+ case MIPS_GCTL0_GEXC_GPA:
+ ++vcpu->stat.vz_gpa_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+ default:
+ ++vcpu->stat.vz_resvd_exits;
+ er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
+ vcpu);
+ break;
+
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_HYPERCALL) {
+ ret = kvm_mips_handle_hypcall(vcpu);
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use a coprocessor which hasn't been allowed
+ * by the root context.
+ */
+static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_FAIL;
+ int ret = RESUME_GUEST;
+
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ /*
+ * If guest FPU not present, the FPU operation should have been
+ * treated as a reserved instruction!
+ * If FPU already in use, we shouldn't get this at all.
+ */
+ if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
+ preempt_enable();
+ return EMULATE_FAIL;
+ }
+
+ kvm_own_fpu(vcpu);
+ er = EMULATE_DONE;
+ }
+ /* other coprocessors not handled */
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+/**
+ * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
+ * @vcpu: Virtual CPU context.
+ *
+ * Handle when the guest attempts to use MSA when it is disabled in the root
+ * context.
+ */
+static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+
+ /*
+ * If MSA not present or not exposed to guest or FR=0, the MSA operation
+ * should have been treated as a reserved instruction!
+ * Same if CU1=1, FR=0.
+ * If MSA already in use, we shouldn't get this at all.
+ */
+ if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
+ (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
+ !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ kvm_own_msa(vcpu);
+
+ return RESUME_GUEST;
+}
+
+static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
+ union mips_instruction inst;
+ enum emulation_result er = EMULATE_DONE;
+ int err, ret = RESUME_GUEST;
+
+ if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
+ /* A code fetch fault doesn't count as an MMIO */
+ if (kvm_is_ifetch_fault(&vcpu->arch)) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Fetch the instruction */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Treat as MMIO */
+ er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
+ opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ }
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_DO_MMIO) {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ u32 *opc = (u32 *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
+ union mips_instruction inst;
+ enum emulation_result er = EMULATE_DONE;
+ int err;
+ int ret = RESUME_GUEST;
+
+ /* Just try the access again if we couldn't do the translation */
+ if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
+ return RESUME_GUEST;
+ vcpu->arch.host_cp0_badvaddr = badvaddr;
+
+ if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
+ /* Fetch the instruction */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+ err = kvm_get_badinstr(opc, vcpu, &inst.word);
+ if (err) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
+
+ /* Treat as MMIO */
+ er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
+ opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ }
+ }
+
+ if (er == EMULATE_DONE) {
+ ret = RESUME_GUEST;
+ } else if (er == EMULATE_DO_MMIO) {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static u64 kvm_vz_get_one_regs[] = {
+ KVM_REG_MIPS_CP0_INDEX,
+ KVM_REG_MIPS_CP0_ENTRYLO0,
+ KVM_REG_MIPS_CP0_ENTRYLO1,
+ KVM_REG_MIPS_CP0_CONTEXT,
+ KVM_REG_MIPS_CP0_PAGEMASK,
+ KVM_REG_MIPS_CP0_PAGEGRAIN,
+ KVM_REG_MIPS_CP0_WIRED,
+ KVM_REG_MIPS_CP0_HWRENA,
+ KVM_REG_MIPS_CP0_BADVADDR,
+ KVM_REG_MIPS_CP0_COUNT,
+ KVM_REG_MIPS_CP0_ENTRYHI,
+ KVM_REG_MIPS_CP0_COMPARE,
+ KVM_REG_MIPS_CP0_STATUS,
+ KVM_REG_MIPS_CP0_INTCTL,
+ KVM_REG_MIPS_CP0_CAUSE,
+ KVM_REG_MIPS_CP0_EPC,
+ KVM_REG_MIPS_CP0_PRID,
+ KVM_REG_MIPS_CP0_EBASE,
+ KVM_REG_MIPS_CP0_CONFIG,
+ KVM_REG_MIPS_CP0_CONFIG1,
+ KVM_REG_MIPS_CP0_CONFIG2,
+ KVM_REG_MIPS_CP0_CONFIG3,
+ KVM_REG_MIPS_CP0_CONFIG4,
+ KVM_REG_MIPS_CP0_CONFIG5,
+#ifdef CONFIG_64BIT
+ KVM_REG_MIPS_CP0_XCONTEXT,
+#endif
+ KVM_REG_MIPS_CP0_ERROREPC,
+
+ KVM_REG_MIPS_COUNT_CTL,
+ KVM_REG_MIPS_COUNT_RESUME,
+ KVM_REG_MIPS_COUNT_HZ,
+};
+
+static u64 kvm_vz_get_one_regs_contextconfig[] = {
+ KVM_REG_MIPS_CP0_CONTEXTCONFIG,
+#ifdef CONFIG_64BIT
+ KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
+#endif
+};
+
+static u64 kvm_vz_get_one_regs_segments[] = {
+ KVM_REG_MIPS_CP0_SEGCTL0,
+ KVM_REG_MIPS_CP0_SEGCTL1,
+ KVM_REG_MIPS_CP0_SEGCTL2,
+};
+
+static u64 kvm_vz_get_one_regs_htw[] = {
+ KVM_REG_MIPS_CP0_PWBASE,
+ KVM_REG_MIPS_CP0_PWFIELD,
+ KVM_REG_MIPS_CP0_PWSIZE,
+ KVM_REG_MIPS_CP0_PWCTL,
+};
+
+static u64 kvm_vz_get_one_regs_kscratch[] = {
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
+static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_vz_get_one_regs);
+ if (cpu_guest_has_userlocal)
+ ++ret;
+ if (cpu_guest_has_badinstr)
+ ++ret;
+ if (cpu_guest_has_badinstrp)
+ ++ret;
+ if (cpu_guest_has_contextconfig)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+ if (cpu_guest_has_segments)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+ if (cpu_guest_has_htw)
+ ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+ if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
+ ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
+ ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
+
+ return ret;
+}
+
+static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_vz_get_one_regs,
+ sizeof(kvm_vz_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs);
+
+ if (cpu_guest_has_userlocal) {
+ index = KVM_REG_MIPS_CP0_USERLOCAL;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_badinstr) {
+ index = KVM_REG_MIPS_CP0_BADINSTR;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_badinstrp) {
+ index = KVM_REG_MIPS_CP0_BADINSTRP;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ if (cpu_guest_has_contextconfig) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
+ sizeof(kvm_vz_get_one_regs_contextconfig)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
+ }
+ if (cpu_guest_has_segments) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
+ sizeof(kvm_vz_get_one_regs_segments)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
+ }
+ if (cpu_guest_has_htw) {
+ if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
+ sizeof(kvm_vz_get_one_regs_htw)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
+ }
+ if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
+ index = KVM_REG_MIPS_CP0_MAAR(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+
+ index = KVM_REG_MIPS_CP0_MAARI;
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ for (i = 0; i < 6; ++i) {
+ if (!cpu_guest_has_kscr(i + 2))
+ continue;
+
+ if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
+ sizeof(kvm_vz_get_one_regs_kscratch[i])))
+ return -EFAULT;
+ ++indices;
+ }
+
+ return 0;
+}
+
+static inline s64 entrylo_kvm_to_user(unsigned long v)
+{
+ s64 mask, ret = v;
+
+ if (BITS_PER_LONG == 32) {
+ /*
+ * KVM API exposes 64-bit version of the register, so move the
+ * RI/XI bits up into place.
+ */
+ mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
+ ret &= ~mask;
+ ret |= ((s64)v & mask) << 32;
+ }
+ return ret;
+}
+
+static inline unsigned long entrylo_user_to_kvm(s64 v)
+{
+ unsigned long mask, ret = v;
+
+ if (BITS_PER_LONG == 32) {
+ /*
+ * KVM API exposes 64-bit versiono of the register, so move the
+ * RI/XI bits down into place.
+ */
+ mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
+ ret &= ~mask;
+ ret |= (v >> 32) & mask;
+ }
+ return ret;
+}
+
+static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 *v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int idx;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ *v = (long)read_gc0_index();
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ *v = entrylo_kvm_to_user(read_gc0_entrylo0());
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ *v = entrylo_kvm_to_user(read_gc0_entrylo1());
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ *v = (long)read_gc0_context();
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ *v = read_gc0_contextconfig();
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ if (!cpu_guest_has_userlocal)
+ return -EINVAL;
+ *v = read_gc0_userlocal();
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ *v = read_gc0_xcontextconfig();
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ *v = (long)read_gc0_pagemask();
+ break;
+ case KVM_REG_MIPS_CP0_PAGEGRAIN:
+ *v = (long)read_gc0_pagegrain();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL0:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl0();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL1:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl1();
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL2:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ *v = read_gc0_segctl2();
+ break;
+ case KVM_REG_MIPS_CP0_PWBASE:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ *v = read_gc0_pwbase();
+ break;
+ case KVM_REG_MIPS_CP0_PWFIELD:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ *v = read_gc0_pwfield();
+ break;
+ case KVM_REG_MIPS_CP0_PWSIZE:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ *v = read_gc0_pwsize();
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ *v = (long)read_gc0_wired();
+ break;
+ case KVM_REG_MIPS_CP0_PWCTL:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ *v = read_gc0_pwctl();
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ *v = (long)read_gc0_hwrena();
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ *v = (long)read_gc0_badvaddr();
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTR:
+ if (!cpu_guest_has_badinstr)
+ return -EINVAL;
+ *v = read_gc0_badinstr();
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTRP:
+ if (!cpu_guest_has_badinstrp)
+ return -EINVAL;
+ *v = read_gc0_badinstrp();
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ *v = kvm_mips_read_count(vcpu);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ *v = (long)read_gc0_entryhi();
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ *v = (long)read_gc0_compare();
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ *v = (long)read_gc0_status();
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ *v = read_gc0_intctl();
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ *v = (long)read_gc0_cause();
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ *v = (long)read_gc0_epc();
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Octeon III has a read-only guest.PRid */
+ *v = read_gc0_prid();
+ break;
+ default:
+ *v = (long)kvm_read_c0_guest_prid(cop0);
+ break;
+ };
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ *v = kvm_vz_read_gc0_ebase();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ *v = read_gc0_config();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ if (!cpu_guest_has_conf1)
+ return -EINVAL;
+ *v = read_gc0_config1();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ if (!cpu_guest_has_conf2)
+ return -EINVAL;
+ *v = read_gc0_config2();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ if (!cpu_guest_has_conf3)
+ return -EINVAL;
+ *v = read_gc0_config3();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ if (!cpu_guest_has_conf4)
+ return -EINVAL;
+ *v = read_gc0_config4();
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ if (!cpu_guest_has_conf5)
+ return -EINVAL;
+ *v = read_gc0_config5();
+ break;
+ case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+ if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+ return -EINVAL;
+ *v = vcpu->arch.maar[idx];
+ break;
+ case KVM_REG_MIPS_CP0_MAARI:
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXT:
+ *v = read_gc0_xcontext();
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ *v = (long)read_gc0_errorepc();
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!cpu_guest_has_kscr(idx))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ *v = (long)read_gc0_kscratch1();
+ break;
+ case 3:
+ *v = (long)read_gc0_kscratch2();
+ break;
+ case 4:
+ *v = (long)read_gc0_kscratch3();
+ break;
+ case 5:
+ *v = (long)read_gc0_kscratch4();
+ break;
+ case 6:
+ *v = (long)read_gc0_kscratch5();
+ break;
+ case 7:
+ *v = (long)read_gc0_kscratch6();
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ *v = vcpu->arch.count_ctl;
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ *v = ktime_to_ns(vcpu->arch.count_resume);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ *v = vcpu->arch.count_hz;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ s64 v)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned int idx;
+ int ret = 0;
+ unsigned int cur, change;
+
+ switch (reg->id) {
+ case KVM_REG_MIPS_CP0_INDEX:
+ write_gc0_index(v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO0:
+ write_gc0_entrylo0(entrylo_user_to_kvm(v));
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYLO1:
+ write_gc0_entrylo1(entrylo_user_to_kvm(v));
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXT:
+ write_gc0_context(v);
+ break;
+ case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ write_gc0_contextconfig(v);
+ break;
+ case KVM_REG_MIPS_CP0_USERLOCAL:
+ if (!cpu_guest_has_userlocal)
+ return -EINVAL;
+ write_gc0_userlocal(v);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
+ if (!cpu_guest_has_contextconfig)
+ return -EINVAL;
+ write_gc0_xcontextconfig(v);
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_PAGEMASK:
+ write_gc0_pagemask(v);
+ break;
+ case KVM_REG_MIPS_CP0_PAGEGRAIN:
+ write_gc0_pagegrain(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL0:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl0(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL1:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl1(v);
+ break;
+ case KVM_REG_MIPS_CP0_SEGCTL2:
+ if (!cpu_guest_has_segments)
+ return -EINVAL;
+ write_gc0_segctl2(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWBASE:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ write_gc0_pwbase(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWFIELD:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ write_gc0_pwfield(v);
+ break;
+ case KVM_REG_MIPS_CP0_PWSIZE:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ write_gc0_pwsize(v);
+ break;
+ case KVM_REG_MIPS_CP0_WIRED:
+ change_gc0_wired(MIPSR6_WIRED_WIRED, v);
+ break;
+ case KVM_REG_MIPS_CP0_PWCTL:
+ if (!cpu_guest_has_htw)
+ return -EINVAL;
+ write_gc0_pwctl(v);
+ break;
+ case KVM_REG_MIPS_CP0_HWRENA:
+ write_gc0_hwrena(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADVADDR:
+ write_gc0_badvaddr(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTR:
+ if (!cpu_guest_has_badinstr)
+ return -EINVAL;
+ write_gc0_badinstr(v);
+ break;
+ case KVM_REG_MIPS_CP0_BADINSTRP:
+ if (!cpu_guest_has_badinstrp)
+ return -EINVAL;
+ write_gc0_badinstrp(v);
+ break;
+ case KVM_REG_MIPS_CP0_COUNT:
+ kvm_mips_write_count(vcpu, v);
+ break;
+ case KVM_REG_MIPS_CP0_ENTRYHI:
+ write_gc0_entryhi(v);
+ break;
+ case KVM_REG_MIPS_CP0_COMPARE:
+ kvm_mips_write_compare(vcpu, v, false);
+ break;
+ case KVM_REG_MIPS_CP0_STATUS:
+ write_gc0_status(v);
+ break;
+ case KVM_REG_MIPS_CP0_INTCTL:
+ write_gc0_intctl(v);
+ break;
+ case KVM_REG_MIPS_CP0_CAUSE:
+ /*
+ * If the timer is stopped or started (DC bit) it must look
+ * atomic with changes to the timer interrupt pending bit (TI).
+ * A timer interrupt should not happen in between.
+ */
+ if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
+ if (v & CAUSEF_DC) {
+ /* disable timer first */
+ kvm_mips_count_disable_cause(vcpu);
+ change_gc0_cause((u32)~CAUSEF_DC, v);
+ } else {
+ /* enable timer last */
+ change_gc0_cause((u32)~CAUSEF_DC, v);
+ kvm_mips_count_enable_cause(vcpu);
+ }
+ } else {
+ write_gc0_cause(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_EPC:
+ write_gc0_epc(v);
+ break;
+ case KVM_REG_MIPS_CP0_PRID:
+ switch (boot_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Octeon III has a guest.PRid, but its read-only */
+ break;
+ default:
+ kvm_write_c0_guest_prid(cop0, v);
+ break;
+ };
+ break;
+ case KVM_REG_MIPS_CP0_EBASE:
+ kvm_vz_write_gc0_ebase(v);
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG:
+ cur = read_gc0_config();
+ change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG1:
+ if (!cpu_guest_has_conf1)
+ break;
+ cur = read_gc0_config1();
+ change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config1(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG2:
+ if (!cpu_guest_has_conf2)
+ break;
+ cur = read_gc0_config2();
+ change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config2(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG3:
+ if (!cpu_guest_has_conf3)
+ break;
+ cur = read_gc0_config3();
+ change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config3(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG4:
+ if (!cpu_guest_has_conf4)
+ break;
+ cur = read_gc0_config4();
+ change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config4(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_CONFIG5:
+ if (!cpu_guest_has_conf5)
+ break;
+ cur = read_gc0_config5();
+ change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ write_gc0_config5(v);
+ }
+ break;
+ case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
+ if (idx >= ARRAY_SIZE(vcpu->arch.maar))
+ return -EINVAL;
+ vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
+ break;
+ case KVM_REG_MIPS_CP0_MAARI:
+ if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
+ return -EINVAL;
+ kvm_write_maari(vcpu, v);
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_REG_MIPS_CP0_XCONTEXT:
+ write_gc0_xcontext(v);
+ break;
+#endif
+ case KVM_REG_MIPS_CP0_ERROREPC:
+ write_gc0_errorepc(v);
+ break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!cpu_guest_has_kscr(idx))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ write_gc0_kscratch1(v);
+ break;
+ case 3:
+ write_gc0_kscratch2(v);
+ break;
+ case 4:
+ write_gc0_kscratch3(v);
+ break;
+ case 5:
+ write_gc0_kscratch4(v);
+ break;
+ case 6:
+ write_gc0_kscratch5(v);
+ break;
+ case 7:
+ write_gc0_kscratch6(v);
+ break;
+ }
+ break;
+ case KVM_REG_MIPS_COUNT_CTL:
+ ret = kvm_mips_set_count_ctl(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_RESUME:
+ ret = kvm_mips_set_count_resume(vcpu, v);
+ break;
+ case KVM_REG_MIPS_COUNT_HZ:
+ ret = kvm_mips_set_count_hz(vcpu, v);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
+static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
+{
+ unsigned long guestid = guestid_cache(cpu);
+
+ if (!(++guestid & GUESTID_MASK)) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ if (!guestid) /* fix version if needed */
+ guestid = GUESTID_FIRST_VERSION;
+
+ ++guestid; /* guestid 0 reserved for root */
+
+ /* start new guestid cycle */
+ kvm_vz_local_flush_roottlb_all_guests();
+ kvm_vz_local_flush_guesttlb_all();
+ }
+
+ guestid_cache(cpu) = guestid;
+}
+
+/* Returns 1 if the guest TLB may be clobbered */
+static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
+{
+ int ret = 0;
+ int i;
+
+ if (!vcpu->requests)
+ return 0;
+
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+ if (cpu_has_guestid) {
+ /* Drop all GuestIDs for this VCPU */
+ for_each_possible_cpu(i)
+ vcpu->arch.vzguestid[i] = 0;
+ /* This will clobber guest TLB contents too */
+ ret = 1;
+ }
+ /*
+ * For Root ASID Dealias (RAD) we don't do anything here, but we
+ * still need the request to ensure we recheck asid_flush_mask.
+ * We can still return 0 as only the root TLB will be affected
+ * by a root ASID flush.
+ */
+ }
+
+ return ret;
+}
+
+static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
+{
+ unsigned int wired = read_gc0_wired();
+ struct kvm_mips_tlb *tlbs;
+ int i;
+
+ /* Expand the wired TLB array if necessary */
+ wired &= MIPSR6_WIRED_WIRED;
+ if (wired > vcpu->arch.wired_tlb_limit) {
+ tlbs = krealloc(vcpu->arch.wired_tlb, wired *
+ sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
+ if (WARN_ON(!tlbs)) {
+ /* Save whatever we can */
+ wired = vcpu->arch.wired_tlb_limit;
+ } else {
+ vcpu->arch.wired_tlb = tlbs;
+ vcpu->arch.wired_tlb_limit = wired;
+ }
+ }
+
+ if (wired)
+ /* Save wired entries from the guest TLB */
+ kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
+ /* Invalidate any dropped entries since last time */
+ for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
+ vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
+ vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
+ vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
+ vcpu->arch.wired_tlb[i].tlb_mask = 0;
+ }
+ vcpu->arch.wired_tlb_used = wired;
+}
+
+static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
+{
+ /* Load wired entries into the guest TLB */
+ if (vcpu->arch.wired_tlb)
+ kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
+ vcpu->arch.wired_tlb_used);
+}
+
+static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
+ bool migrated;
+
+ /*
+ * Are we entering guest context on a different CPU to last time?
+ * If so, the VCPU's guest TLB state on this CPU may be stale.
+ */
+ migrated = (vcpu->arch.last_exec_cpu != cpu);
+ vcpu->arch.last_exec_cpu = cpu;
+
+ /*
+ * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
+ * remains set until another vcpu is loaded in. As a rule GuestRID
+ * remains zeroed when in root context unless the kernel is busy
+ * manipulating guest tlb entries.
+ */
+ if (cpu_has_guestid) {
+ /*
+ * Check if our GuestID is of an older version and thus invalid.
+ *
+ * We also discard the stored GuestID if we've executed on
+ * another CPU, as the guest mappings may have changed without
+ * hypervisor knowledge.
+ */
+ if (migrated ||
+ (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
+ GUESTID_VERSION_MASK) {
+ kvm_vz_get_new_guestid(cpu, vcpu);
+ vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
+ trace_kvm_guestid_change(vcpu,
+ vcpu->arch.vzguestid[cpu]);
+ }
+
+ /* Restore GuestID */
+ change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
+ } else {
+ /*
+ * The Guest TLB only stores a single guest's TLB state, so
+ * flush it if another VCPU has executed on this CPU.
+ *
+ * We also flush if we've executed on another CPU, as the guest
+ * mappings may have changed without hypervisor knowledge.
+ */
+ if (migrated || last_exec_vcpu[cpu] != vcpu)
+ kvm_vz_local_flush_guesttlb_all();
+ last_exec_vcpu[cpu] = vcpu;
+
+ /*
+ * Root ASID dealiases guest GPA mappings in the root TLB.
+ * Allocate new root ASID if needed.
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
+ || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
+ asid_version_mask(cpu))
+ get_new_mmu_context(gpa_mm, cpu);
+ }
+}
+
+static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ bool migrated, all;
+
+ /*
+ * Have we migrated to a different CPU?
+ * If so, any old guest TLB state may be stale.
+ */
+ migrated = (vcpu->arch.last_sched_cpu != cpu);
+
+ /*
+ * Was this the last VCPU to run on this CPU?
+ * If not, any old guest state from this VCPU will have been clobbered.
+ */
+ all = migrated || (last_vcpu[cpu] != vcpu);
+ last_vcpu[cpu] = vcpu;
+
+ /*
+ * Restore CP0_Wired unconditionally as we clear it after use, and
+ * restore wired guest TLB entries (while in guest context).
+ */
+ kvm_restore_gc0_wired(cop0);
+ if (current->flags & PF_VCPU) {
+ tlbw_use_hazard();
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+ kvm_vz_vcpu_load_wired(vcpu);
+ }
+
+ /*
+ * Restore timer state regardless, as e.g. Cause.TI can change over time
+ * if left unmaintained.
+ */
+ kvm_vz_restore_timer(vcpu);
+
+ /* Set MC bit if we want to trace guest mode changes */
+ if (kvm_trace_guest_mode_change)
+ set_c0_guestctl0(MIPS_GCTL0_MC);
+ else
+ clear_c0_guestctl0(MIPS_GCTL0_MC);
+
+ /* Don't bother restoring registers multiple times unless necessary */
+ if (!all)
+ return 0;
+
+ /*
+ * Restore config registers first, as some implementations restrict
+ * writes to other registers when the corresponding feature bits aren't
+ * set. For example Status.CU1 cannot be set unless Config1.FP is set.
+ */
+ kvm_restore_gc0_config(cop0);
+ if (cpu_guest_has_conf1)
+ kvm_restore_gc0_config1(cop0);
+ if (cpu_guest_has_conf2)
+ kvm_restore_gc0_config2(cop0);
+ if (cpu_guest_has_conf3)
+ kvm_restore_gc0_config3(cop0);
+ if (cpu_guest_has_conf4)
+ kvm_restore_gc0_config4(cop0);
+ if (cpu_guest_has_conf5)
+ kvm_restore_gc0_config5(cop0);
+ if (cpu_guest_has_conf6)
+ kvm_restore_gc0_config6(cop0);
+ if (cpu_guest_has_conf7)
+ kvm_restore_gc0_config7(cop0);
+
+ kvm_restore_gc0_index(cop0);
+ kvm_restore_gc0_entrylo0(cop0);
+ kvm_restore_gc0_entrylo1(cop0);
+ kvm_restore_gc0_context(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_restore_gc0_contextconfig(cop0);
+#ifdef CONFIG_64BIT
+ kvm_restore_gc0_xcontext(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_restore_gc0_xcontextconfig(cop0);
+#endif
+ kvm_restore_gc0_pagemask(cop0);
+ kvm_restore_gc0_pagegrain(cop0);
+ kvm_restore_gc0_hwrena(cop0);
+ kvm_restore_gc0_badvaddr(cop0);
+ kvm_restore_gc0_entryhi(cop0);
+ kvm_restore_gc0_status(cop0);
+ kvm_restore_gc0_intctl(cop0);
+ kvm_restore_gc0_epc(cop0);
+ kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
+ if (cpu_guest_has_userlocal)
+ kvm_restore_gc0_userlocal(cop0);
+
+ kvm_restore_gc0_errorepc(cop0);
+
+ /* restore KScratch registers if enabled in guest */
+ if (cpu_guest_has_conf4) {
+ if (cpu_guest_has_kscr(2))
+ kvm_restore_gc0_kscratch1(cop0);
+ if (cpu_guest_has_kscr(3))
+ kvm_restore_gc0_kscratch2(cop0);
+ if (cpu_guest_has_kscr(4))
+ kvm_restore_gc0_kscratch3(cop0);
+ if (cpu_guest_has_kscr(5))
+ kvm_restore_gc0_kscratch4(cop0);
+ if (cpu_guest_has_kscr(6))
+ kvm_restore_gc0_kscratch5(cop0);
+ if (cpu_guest_has_kscr(7))
+ kvm_restore_gc0_kscratch6(cop0);
+ }
+
+ if (cpu_guest_has_badinstr)
+ kvm_restore_gc0_badinstr(cop0);
+ if (cpu_guest_has_badinstrp)
+ kvm_restore_gc0_badinstrp(cop0);
+
+ if (cpu_guest_has_segments) {
+ kvm_restore_gc0_segctl0(cop0);
+ kvm_restore_gc0_segctl1(cop0);
+ kvm_restore_gc0_segctl2(cop0);
+ }
+
+ /* restore HTW registers */
+ if (cpu_guest_has_htw) {
+ kvm_restore_gc0_pwbase(cop0);
+ kvm_restore_gc0_pwfield(cop0);
+ kvm_restore_gc0_pwsize(cop0);
+ kvm_restore_gc0_pwctl(cop0);
+ }
+
+ /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
+ if (cpu_has_guestctl2)
+ write_c0_guestctl2(
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
+
+ /*
+ * We should clear linked load bit to break interrupted atomics. This
+ * prevents a SC on the next VCPU from succeeding by matching a LL on
+ * the previous VCPU.
+ */
+ if (cpu_guest_has_rw_llb)
+ write_gc0_lladdr(0);
+
+ return 0;
+}
+
+static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ if (current->flags & PF_VCPU)
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ kvm_lose_fpu(vcpu);
+
+ kvm_save_gc0_index(cop0);
+ kvm_save_gc0_entrylo0(cop0);
+ kvm_save_gc0_entrylo1(cop0);
+ kvm_save_gc0_context(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_save_gc0_contextconfig(cop0);
+#ifdef CONFIG_64BIT
+ kvm_save_gc0_xcontext(cop0);
+ if (cpu_guest_has_contextconfig)
+ kvm_save_gc0_xcontextconfig(cop0);
+#endif
+ kvm_save_gc0_pagemask(cop0);
+ kvm_save_gc0_pagegrain(cop0);
+ kvm_save_gc0_wired(cop0);
+ /* allow wired TLB entries to be overwritten */
+ clear_gc0_wired(MIPSR6_WIRED_WIRED);
+ kvm_save_gc0_hwrena(cop0);
+ kvm_save_gc0_badvaddr(cop0);
+ kvm_save_gc0_entryhi(cop0);
+ kvm_save_gc0_status(cop0);
+ kvm_save_gc0_intctl(cop0);
+ kvm_save_gc0_epc(cop0);
+ kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
+ if (cpu_guest_has_userlocal)
+ kvm_save_gc0_userlocal(cop0);
+
+ /* only save implemented config registers */
+ kvm_save_gc0_config(cop0);
+ if (cpu_guest_has_conf1)
+ kvm_save_gc0_config1(cop0);
+ if (cpu_guest_has_conf2)
+ kvm_save_gc0_config2(cop0);
+ if (cpu_guest_has_conf3)
+ kvm_save_gc0_config3(cop0);
+ if (cpu_guest_has_conf4)
+ kvm_save_gc0_config4(cop0);
+ if (cpu_guest_has_conf5)
+ kvm_save_gc0_config5(cop0);
+ if (cpu_guest_has_conf6)
+ kvm_save_gc0_config6(cop0);
+ if (cpu_guest_has_conf7)
+ kvm_save_gc0_config7(cop0);
+
+ kvm_save_gc0_errorepc(cop0);
+
+ /* save KScratch registers if enabled in guest */
+ if (cpu_guest_has_conf4) {
+ if (cpu_guest_has_kscr(2))
+ kvm_save_gc0_kscratch1(cop0);
+ if (cpu_guest_has_kscr(3))
+ kvm_save_gc0_kscratch2(cop0);
+ if (cpu_guest_has_kscr(4))
+ kvm_save_gc0_kscratch3(cop0);
+ if (cpu_guest_has_kscr(5))
+ kvm_save_gc0_kscratch4(cop0);
+ if (cpu_guest_has_kscr(6))
+ kvm_save_gc0_kscratch5(cop0);
+ if (cpu_guest_has_kscr(7))
+ kvm_save_gc0_kscratch6(cop0);
+ }
+
+ if (cpu_guest_has_badinstr)
+ kvm_save_gc0_badinstr(cop0);
+ if (cpu_guest_has_badinstrp)
+ kvm_save_gc0_badinstrp(cop0);
+
+ if (cpu_guest_has_segments) {
+ kvm_save_gc0_segctl0(cop0);
+ kvm_save_gc0_segctl1(cop0);
+ kvm_save_gc0_segctl2(cop0);
+ }
+
+ /* save HTW registers if enabled in guest */
+ if (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+ kvm_save_gc0_pwbase(cop0);
+ kvm_save_gc0_pwfield(cop0);
+ kvm_save_gc0_pwsize(cop0);
+ kvm_save_gc0_pwctl(cop0);
+ }
+
+ kvm_vz_save_timer(vcpu);
+
+ /* save Root.GuestCtl2 in unused Guest guestctl2 register */
+ if (cpu_has_guestctl2)
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
+ read_c0_guestctl2();
+
+ return 0;
+}
+
+/**
+ * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
+ * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
+ *
+ * Attempt to resize the guest VTLB by writing guest Config registers. This is
+ * necessary for cores with a shared root/guest TLB to avoid overlap with wired
+ * entries in the root VTLB.
+ *
+ * Returns: The resulting guest VTLB size.
+ */
+static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
+{
+ unsigned int config4 = 0, ret = 0, limit;
+
+ /* Write MMUSize - 1 into guest Config registers */
+ if (cpu_guest_has_conf1)
+ change_gc0_config1(MIPS_CONF1_TLBS,
+ (size - 1) << MIPS_CONF1_TLBS_SHIFT);
+ if (cpu_guest_has_conf4) {
+ config4 = read_gc0_config4();
+ if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
+ config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
+ config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT;
+ } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
+ config4 &= ~MIPS_CONF4_MMUSIZEEXT;
+ config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
+ MIPS_CONF4_MMUSIZEEXT_SHIFT;
+ }
+ write_gc0_config4(config4);
+ }
+
+ /*
+ * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
+ * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
+ * not dropped)
+ */
+ if (cpu_has_mips_r6) {
+ limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
+ MIPSR6_WIRED_LIMIT_SHIFT;
+ if (size - 1 <= limit)
+ limit = 0;
+ write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
+ }
+
+ /* Read back MMUSize - 1 */
+ back_to_back_c0_hazard();
+ if (cpu_guest_has_conf1)
+ ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
+ MIPS_CONF1_TLBS_SHIFT;
+ if (config4) {
+ if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
+ ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE;
+ else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
+ MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
+ ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
+ MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE;
+ }
+ return ret + 1;
+}
+
+static int kvm_vz_hardware_enable(void)
+{
+ unsigned int mmu_size, guest_mmu_size, ftlb_size;
+ u64 guest_cvmctl, cvmvmconfig;
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /* Set up guest timer/perfcount IRQ lines */
+ guest_cvmctl = read_gc0_cvmctl();
+ guest_cvmctl &= ~CVMCTL_IPTI;
+ guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
+ guest_cvmctl &= ~CVMCTL_IPPCI;
+ guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
+ write_gc0_cvmctl(guest_cvmctl);
+
+ cvmvmconfig = read_c0_cvmvmconfig();
+ /* No I/O hole translation. */
+ cvmvmconfig |= CVMVMCONF_DGHT;
+ /* Halve the root MMU size */
+ mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+ >> CVMVMCONF_MMUSIZEM1_S) + 1;
+ guest_mmu_size = mmu_size / 2;
+ mmu_size -= guest_mmu_size;
+ cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+ cvmvmconfig |= mmu_size - 1;
+ write_c0_cvmvmconfig(cvmvmconfig);
+
+ /* Update our records */
+ current_cpu_data.tlbsize = mmu_size;
+ current_cpu_data.tlbsizevtlb = mmu_size;
+ current_cpu_data.guest.tlbsize = guest_mmu_size;
+
+ /* Flush moved entries in new (guest) context */
+ kvm_vz_local_flush_guesttlb_all();
+ break;
+ default:
+ /*
+ * ImgTec cores tend to use a shared root/guest TLB. To avoid
+ * overlap of root wired and guest entries, the guest TLB may
+ * need resizing.
+ */
+ mmu_size = current_cpu_data.tlbsizevtlb;
+ ftlb_size = current_cpu_data.tlbsize - mmu_size;
+
+ /* Try switching to maximum guest VTLB size for flush */
+ guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
+ current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+ kvm_vz_local_flush_guesttlb_all();
+
+ /*
+ * Reduce to make space for root wired entries and at least 2
+ * root non-wired entries. This does assume that long-term wired
+ * entries won't be added later.
+ */
+ guest_mmu_size = mmu_size - num_wired_entries() - 2;
+ guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
+ current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
+
+ /*
+ * Write the VTLB size, but if another CPU has already written,
+ * check it matches or we won't provide a consistent view to the
+ * guest. If this ever happens it suggests an asymmetric number
+ * of wired entries.
+ */
+ if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
+ WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
+ "Available guest VTLB size mismatch"))
+ return -EINVAL;
+ break;
+ }
+
+ /*
+ * Enable virtualization features granting guest direct control of
+ * certain features:
+ * CP0=1: Guest coprocessor 0 context.
+ * AT=Guest: Guest MMU.
+ * CG=1: Hit (virtual address) CACHE operations (optional).
+ * CF=1: Guest Config registers.
+ * CGI=1: Indexed flush CACHE operations (optional).
+ */
+ write_c0_guestctl0(MIPS_GCTL0_CP0 |
+ (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
+ MIPS_GCTL0_CG | MIPS_GCTL0_CF);
+ if (cpu_has_guestctl0ext)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+
+ if (cpu_has_guestid) {
+ write_c0_guestctl1(0);
+ kvm_vz_local_flush_roottlb_all_guests();
+
+ GUESTID_MASK = current_cpu_data.guestid_mask;
+ GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
+ GUESTID_VERSION_MASK = ~GUESTID_MASK;
+
+ current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
+ }
+
+ /* clear any pending injected virtual guest interrupts */
+ if (cpu_has_guestctl2)
+ clear_c0_guestctl2(0x3f << 10);
+
+ return 0;
+}
+
+static void kvm_vz_hardware_disable(void)
+{
+ u64 cvmvmconfig;
+ unsigned int mmu_size;
+
+ /* Flush any remaining guest TLB entries */
+ kvm_vz_local_flush_guesttlb_all();
+
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON3:
+ /*
+ * Allocate whole TLB for root. Existing guest TLB entries will
+ * change ownership to the root TLB. We should be safe though as
+ * they've already been flushed above while in guest TLB.
+ */
+ cvmvmconfig = read_c0_cvmvmconfig();
+ mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
+ >> CVMVMCONF_MMUSIZEM1_S) + 1;
+ cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
+ cvmvmconfig |= mmu_size - 1;
+ write_c0_cvmvmconfig(cvmvmconfig);
+
+ /* Update our records */
+ current_cpu_data.tlbsize = mmu_size;
+ current_cpu_data.tlbsizevtlb = mmu_size;
+ current_cpu_data.guest.tlbsize = 0;
+
+ /* Flush moved entries in new (root) context */
+ local_flush_tlb_all();
+ break;
+ }
+
+ if (cpu_has_guestid) {
+ write_c0_guestctl1(0);
+ kvm_vz_local_flush_roottlb_all_guests();
+ }
+}
+
+static int kvm_vz_check_extension(struct kvm *kvm, long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_MIPS_VZ:
+ /* we wouldn't be here unless cpu_has_vz */
+ r = 1;
+ break;
+#ifdef CONFIG_64BIT
+ case KVM_CAP_MIPS_64BIT:
+ /* We support 64-bit registers/operations and addresses */
+ r = 2;
+ break;
+#endif
+ default:
+ r = 0;
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ vcpu->arch.vzguestid[i] = 0;
+
+ return 0;
+}
+
+static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+
+ /*
+ * If the VCPU is freed and reused as another VCPU, we don't want the
+ * matching pointer wrongly hanging around in last_vcpu[] or
+ * last_exec_vcpu[].
+ */
+ for_each_possible_cpu(cpu) {
+ if (last_vcpu[cpu] == vcpu)
+ last_vcpu[cpu] = NULL;
+ if (last_exec_vcpu[cpu] == vcpu)
+ last_exec_vcpu[cpu] = NULL;
+ }
+}
+
+static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
+
+ /*
+ * Start off the timer at the same frequency as the host timer, but the
+ * soft timer doesn't handle frequencies greater than 1GHz yet.
+ */
+ if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
+ count_hz = mips_hpt_frequency;
+ kvm_mips_init_count(vcpu, count_hz);
+
+ /*
+ * Initialize guest register state to valid architectural reset state.
+ */
+
+ /* PageGrain */
+ if (cpu_has_mips_r6)
+ kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
+ /* Wired */
+ if (cpu_has_mips_r6)
+ kvm_write_sw_gc0_wired(cop0,
+ read_gc0_wired() & MIPSR6_WIRED_LIMIT);
+ /* Status */
+ kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
+ if (cpu_has_mips_r6)
+ kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
+ /* IntCtl */
+ kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
+ (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
+ /* PRId */
+ kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
+ /* EBase */
+ kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
+ /* Config */
+ kvm_save_gc0_config(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
+ _page_cachable_default >> _CACHE_SHIFT);
+ /* architecturally read only, but maybe writable from root */
+ kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
+ if (cpu_guest_has_conf1) {
+ kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
+ /* Config1 */
+ kvm_save_gc0_config1(cop0);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
+ MIPS_CONF1_MD |
+ MIPS_CONF1_PC |
+ MIPS_CONF1_WR |
+ MIPS_CONF1_CA |
+ MIPS_CONF1_FP);
+ }
+ if (cpu_guest_has_conf2) {
+ kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
+ /* Config2 */
+ kvm_save_gc0_config2(cop0);
+ }
+ if (cpu_guest_has_conf3) {
+ kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
+ /* Config3 */
+ kvm_save_gc0_config3(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
+ MIPS_CONF3_BPG |
+ MIPS_CONF3_ULRI |
+ MIPS_CONF3_DSP |
+ MIPS_CONF3_CTXTC |
+ MIPS_CONF3_ITL |
+ MIPS_CONF3_LPA |
+ MIPS_CONF3_VEIC |
+ MIPS_CONF3_VINT |
+ MIPS_CONF3_SP |
+ MIPS_CONF3_CDMM |
+ MIPS_CONF3_MT |
+ MIPS_CONF3_SM |
+ MIPS_CONF3_TL);
+ }
+ if (cpu_guest_has_conf4) {
+ kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
+ /* Config4 */
+ kvm_save_gc0_config4(cop0);
+ }
+ if (cpu_guest_has_conf5) {
+ kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
+ /* Config5 */
+ kvm_save_gc0_config5(cop0);
+ /* architecturally writable (e.g. from guest) */
+ kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
+ MIPS_CONF5_CV |
+ MIPS_CONF5_MSAEN |
+ MIPS_CONF5_UFE |
+ MIPS_CONF5_FRE |
+ MIPS_CONF5_SBRI |
+ MIPS_CONF5_UFR);
+ /* architecturally read only, but maybe writable from root */
+ kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
+ }
+
+ if (cpu_guest_has_contextconfig) {
+ /* ContextConfig */
+ kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
+#ifdef CONFIG_64BIT
+ /* XContextConfig */
+ /* bits SEGBITS-13+3:4 set */
+ kvm_write_sw_gc0_xcontextconfig(cop0,
+ ((1ull << (cpu_vmbits - 13)) - 1) << 4);
+#endif
+ }
+
+ /* Implementation dependent, use the legacy layout */
+ if (cpu_guest_has_segments) {
+ /* SegCtl0, SegCtl1, SegCtl2 */
+ kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
+ kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
+ (_page_cachable_default >> _CACHE_SHIFT) <<
+ (16 + MIPS_SEGCFG_C_SHIFT));
+ kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
+ }
+
+ /* reset HTW registers */
+ if (cpu_guest_has_htw && cpu_has_mips_r6) {
+ /* PWField */
+ kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
+ /* PWSize */
+ kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
+ }
+
+ /* start with no pending virtual guest interrupts */
+ if (cpu_has_guestctl2)
+ cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
+
+ /* Put PC at reset vector */
+ vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
+
+ return 0;
+}
+
+static void kvm_vz_flush_shadow_all(struct kvm *kvm)
+{
+ if (cpu_has_guestid) {
+ /* Flush GuestID for each VCPU individually */
+ kvm_flush_remote_tlbs(kvm);
+ } else {
+ /*
+ * For each CPU there is a single GPA ASID used by all VCPUs in
+ * the VM, so it doesn't make sense for the VCPUs to handle
+ * invalidation of these ASIDs individually.
+ *
+ * Instead mark all CPUs as needing ASID invalidation in
+ * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
+ * kick any running VCPUs so they check asid_flush_mask.
+ */
+ cpumask_setall(&kvm->arch.asid_flush_mask);
+ kvm_flush_remote_tlbs(kvm);
+ }
+}
+
+static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
+{
+ kvm_vz_flush_shadow_all(kvm);
+}
+
+static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+ int preserve_guest_tlb;
+
+ preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
+
+ if (preserve_guest_tlb)
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+
+ if (preserve_guest_tlb)
+ kvm_vz_vcpu_load_wired(vcpu);
+}
+
+static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+ int r;
+
+ kvm_vz_acquire_htimer(vcpu);
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
+
+ kvm_vz_check_requests(vcpu, cpu);
+ kvm_vz_vcpu_load_tlb(vcpu, cpu);
+ kvm_vz_vcpu_load_wired(vcpu);
+
+ r = vcpu->arch.vcpu_run(run, vcpu);
+
+ kvm_vz_vcpu_save_wired(vcpu);
+
+ return r;
+}
+
+static struct kvm_mips_callbacks kvm_vz_callbacks = {
+ .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
+ .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
+ .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
+ .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
+ .handle_addr_err_st = kvm_trap_vz_no_handler,
+ .handle_addr_err_ld = kvm_trap_vz_no_handler,
+ .handle_syscall = kvm_trap_vz_no_handler,
+ .handle_res_inst = kvm_trap_vz_no_handler,
+ .handle_break = kvm_trap_vz_no_handler,
+ .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
+ .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
+
+ .hardware_enable = kvm_vz_hardware_enable,
+ .hardware_disable = kvm_vz_hardware_disable,
+ .check_extension = kvm_vz_check_extension,
+ .vcpu_init = kvm_vz_vcpu_init,
+ .vcpu_uninit = kvm_vz_vcpu_uninit,
+ .vcpu_setup = kvm_vz_vcpu_setup,
+ .flush_shadow_all = kvm_vz_flush_shadow_all,
+ .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
+ .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
+ .queue_timer_int = kvm_vz_queue_timer_int_cb,
+ .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
+ .queue_io_int = kvm_vz_queue_io_int_cb,
+ .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
+ .irq_deliver = kvm_vz_irq_deliver_cb,
+ .irq_clear = kvm_vz_irq_clear_cb,
+ .num_regs = kvm_vz_num_regs,
+ .copy_reg_indices = kvm_vz_copy_reg_indices,
+ .get_one_reg = kvm_vz_get_one_reg,
+ .set_one_reg = kvm_vz_set_one_reg,
+ .vcpu_load = kvm_vz_vcpu_load,
+ .vcpu_put = kvm_vz_vcpu_put,
+ .vcpu_run = kvm_vz_vcpu_run,
+ .vcpu_reenter = kvm_vz_vcpu_reenter,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+ if (!cpu_has_vz)
+ return -ENODEV;
+
+ /*
+ * VZ requires at least 2 KScratch registers, so it should have been
+ * possible to allocate pgd_reg.
+ */
+ if (WARN(pgd_reg == -1,
+ "pgd_reg not allocated even though cpu_has_vz\n"))
+ return -ENODEV;
+
+ pr_info("Starting KVM with MIPS VZ extensions\n");
+
+ *install_callbacks = &kvm_vz_callbacks;
+ return 0;
+}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 6db341347202..899e46279902 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -24,6 +24,7 @@
/* Cache operations. */
void (*flush_cache_all)(void);
void (*__flush_cache_all)(void);
+EXPORT_SYMBOL_GPL(__flush_cache_all);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index aa75849c36bc..3ca20283b31e 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -348,7 +348,7 @@ void maar_init(void)
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
pr_info(" [%d]: ", i / 2);
- if (!(attr & MIPS_MAAR_V)) {
+ if (!(attr & MIPS_MAAR_VL)) {
pr_cont("disabled\n");
continue;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index f6325fa657fb..bd67ac74fe2d 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- prot = pgprot_val(vma->vm_page_prot);
- prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
- vma->vm_page_prot = __pgprot(prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index 51159fff025a..d27654902f28 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev)
}
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index b7ab8378964c..e0f4617c0c7a 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* Leave vm_pgoff as-is, the PCI space address is the physical
- * address on this platform.
- */
- vma->vm_flags |= VM_LOCKED;
-
- prot = pgprot_val(vma->vm_page_prot);
- prot &= ~_PAGE_CACHE;
- vma->vm_page_prot = __pgprot(prot);
-
- /* Write-combine setting is ignored */
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index defebd956585..1de1a3f412ec 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#define HAVE_PCI_MMAP
-
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __ASM_PARISC_PCI_H */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ad4cb1613c57..a4fd296c958e 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1369,7 +1369,7 @@ nadtlb_nullify:
/*
When there is no translation for the probe address then we
- must nullify the insn and return zero in the target regsiter.
+ must nullify the insn and return zero in the target register.
This will indicate to the calling code that it does not have
write/read privileges to this address.
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index c66c943d9322..f1a76935a314 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -218,7 +218,7 @@ void *module_alloc(unsigned long size)
* easier than trying to map the text, data, init_text and
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL | __GFP_HIGHMEM,
+ GFP_KERNEL,
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 0903c6abd7a4..13ee3569959a 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- if (write_combine)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- prot = pgprot_val(vma->vm_page_prot);
- prot |= _PAGE_NO_CACHE;
- vma->vm_page_prot = __pgprot(prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
-}
-
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 053382616533..964da1891ea9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -22,6 +22,48 @@ config MMU
bool
default y
+config ARCH_MMAP_RND_BITS_MAX
+ # On Book3S 64, the default virtual address space for 64-bit processes
+ # is 2^47 (128TB). As a maximum, allow randomisation to consume up to
+ # 32T of address space (2^45), which should ensure a reasonable gap
+ # between bottom-up and top-down allocations for applications that
+ # consume "normal" amounts of address space. Book3S 64 only supports 64K
+ # and 4K page sizes.
+ default 29 if PPC_BOOK3S_64 && PPC_64K_PAGES # 29 = 45 (32T) - 16 (64K)
+ default 33 if PPC_BOOK3S_64 # 33 = 45 (32T) - 12 (4K)
+ #
+ # On all other 64-bit platforms (currently only Book3E), the virtual
+ # address space is 2^46 (64TB). Allow randomisation to consume up to 16T
+ # of address space (2^44). Only 4K page sizes are supported.
+ default 32 if 64BIT # 32 = 44 (16T) - 12 (4K)
+ #
+ # For 32-bit, use the compat values, as they're the same.
+ default ARCH_MMAP_RND_COMPAT_BITS_MAX
+
+config ARCH_MMAP_RND_BITS_MIN
+ # Allow randomisation to consume up to 1GB of address space (2^30).
+ default 14 if 64BIT && PPC_64K_PAGES # 14 = 30 (1GB) - 16 (64K)
+ default 18 if 64BIT # 18 = 30 (1GB) - 12 (4K)
+ #
+ # For 32-bit, use the compat values, as they're the same.
+ default ARCH_MMAP_RND_COMPAT_BITS_MIN
+
+config ARCH_MMAP_RND_COMPAT_BITS_MAX
+ # Total virtual address space for 32-bit processes is 2^31 (2GB).
+ # Allow randomisation to consume up to 512MB of address space (2^29).
+ default 11 if PPC_256K_PAGES # 11 = 29 (512MB) - 18 (256K)
+ default 13 if PPC_64K_PAGES # 13 = 29 (512MB) - 16 (64K)
+ default 15 if PPC_16K_PAGES # 15 = 29 (512MB) - 14 (16K)
+ default 17 # 17 = 29 (512MB) - 12 (4K)
+
+config ARCH_MMAP_RND_COMPAT_BITS_MIN
+ # Total virtual address space for 32-bit processes is 2^31 (2GB).
+ # Allow randomisation to consume up to 8MB of address space (2^23).
+ default 5 if PPC_256K_PAGES # 5 = 23 (8MB) - 18 (256K)
+ default 7 if PPC_64K_PAGES # 7 = 23 (8MB) - 16 (64K)
+ default 9 if PPC_16K_PAGES # 9 = 23 (8MB) - 14 (16K)
+ default 11 # 11 = 23 (8MB) - 12 (4K)
+
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
@@ -38,6 +80,11 @@ config NR_IRQS
/proc/interrupts. If you configure your system to have too few,
drivers will fail to load or worse - handle with care.
+config NMI_IPI
+ bool
+ depends on SMP && (DEBUGGER || KEXEC_CORE)
+ default y
+
config STACKTRACE_SUPPORT
bool
default y
@@ -99,6 +146,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
@@ -119,6 +167,8 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_MMAP_RND_BITS
+ select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
@@ -141,6 +191,7 @@ config PPC
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_GZIP
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
@@ -489,7 +540,7 @@ config KEXEC_FILE
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
+ depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
select NONSTATIC_KERNEL
select MODULE_REL_CRCS if MODVERSIONS
help
@@ -521,21 +572,23 @@ config RELOCATABLE_TEST
relocation code.
config CRASH_DUMP
- bool "Build a kdump crash kernel"
+ bool "Build a dump capture kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
+ select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
help
- Build a kernel suitable for use as a kdump capture kernel.
+ Build a kernel suitable for use as a dump capture kernel.
The same kernel binary can be used as production kernel and dump
capture kernel.
config FA_DUMP
bool "Firmware-assisted dump"
- depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE
+ depends on PPC64 && PPC_RTAS
+ select CRASH_CORE
+ select CRASH_DUMP
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,
- instead firmware assists in booting the kdump kernel
+ instead firmware assists in booting the capture kernel
while preserving memory contents. Firmware-assisted dump
is meant to be a kdump replacement offering robustness and
speed not possible without system firmware assistance.
@@ -585,7 +638,7 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
- depends on (SMP && PPC_PSERIES) || PPC_PS3
+ depends on PPC_BOOK3S_64
config SYS_SUPPORTS_HUGETLBFS
bool
@@ -677,6 +730,16 @@ config PPC_256K_PAGES
endchoice
+config THREAD_SHIFT
+ int "Thread shift" if EXPERT
+ range 13 15
+ default "15" if PPC_256K_PAGES
+ default "14" if PPC64
+ default "13"
+ help
+ Used to define the stack size. The default is almost always what you
+ want. Only change this if you know what you are doing.
+
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 19b0d1a81959..3e0f0e1fadef 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -136,7 +136,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
ifdef CONFIG_MPROFILE_KERNEL
- ifeq ($(shell $(srctree)/arch/powerpc/scripts/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__),OK)
+ ifeq ($(shell $(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__),OK)
CC_FLAGS_FTRACE := -pg -mprofile-kernel
KBUILD_CPPFLAGS += -DCC_USING_MPROFILE_KERNEL
else
@@ -274,17 +274,6 @@ PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
boot := arch/$(ARCH)/boot
-ifeq ($(CONFIG_RELOCATABLE),y)
-quiet_cmd_relocs_check = CALL $<
- cmd_relocs_check = $(CONFIG_SHELL) $< "$(OBJDUMP)" "$(obj)/vmlinux"
-
-PHONY += relocs_check
-relocs_check: arch/powerpc/relocs_check.sh vmlinux
- $(call cmd,relocs_check)
-
-zImage: relocs_check
-endif
-
$(BOOT_TARGETS1): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
$(BOOT_TARGETS2): vmlinux
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
new file mode 100644
index 000000000000..3c22d64b2de9
--- /dev/null
+++ b/arch/powerpc/Makefile.postlink
@@ -0,0 +1,34 @@
+# ===========================================================================
+# Post-link powerpc pass
+# ===========================================================================
+#
+# 1. Check that vmlinux relocations look sane
+
+PHONY := __archpost
+__archpost:
+
+include include/config/auto.conf
+include scripts/Kbuild.include
+
+quiet_cmd_relocs_check = CHKREL $@
+ cmd_relocs_check = $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
+
+# `@true` prevents complaint when there is nothing to be done
+
+vmlinux: FORCE
+ @true
+ifdef CONFIG_RELOCATABLE
+ $(call if_changed,relocs_check)
+endif
+
+%.ko: FORCE
+ @true
+
+clean:
+ @true
+
+PHONY += FORCE clean
+
+FORCE:
+
+.PHONY: $(PHONY)
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index ac8b8332ed82..0695ce047d56 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -33,7 +33,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -261,7 +261,7 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=m
+CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=m
@@ -306,7 +306,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index f2e03f032041..5175028c56ce 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -19,7 +19,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -290,7 +290,7 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=m
+CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=m
@@ -339,7 +339,7 @@ CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 4ff68b752618..1a61aa20dfba 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -34,7 +34,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
+CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -259,7 +259,7 @@ CONFIG_NILFS2_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
-CONFIG_ISO9660_FS=m
+CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=m
@@ -303,7 +303,7 @@ CONFIG_XMON=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index f6c5264287e5..7330150bfe34 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -17,6 +17,8 @@
#include <asm/checksum.h>
#include <linux/uaccess.h>
#include <asm/epapr_hcalls.h>
+#include <asm/dcr.h>
+#include <asm/mmu_context.h>
#include <uapi/asm/ucontext.h>
@@ -120,6 +122,8 @@ extern s64 __ashrdi3(s64, int);
extern int __cmpdi2(s64, s64);
extern int __ucmpdi2(u64, u64);
+/* tracing */
void _mcount(void);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index bc5fdfd22788..33a24fdd7958 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -55,6 +55,14 @@
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+#define PPC_BITLSHIFT32(be) (32 - 1 - (be))
+#define PPC_BIT32(bit) (1UL << PPC_BITLSHIFT32(bit))
+#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
+
+#define PPC_BITLSHIFT8(be) (8 - 1 - (be))
+#define PPC_BIT8(bit) (1UL << PPC_BITLSHIFT8(bit))
+#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
+
#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 0c4e470571ca..b4b5e6b671ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
#define H_PTE_INDEX_SIZE 9
#define H_PMD_INDEX_SIZE 7
#define H_PUD_INDEX_SIZE 9
-#define H_PGD_INDEX_SIZE 9
+#define H_PGD_INDEX_SIZE 12
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index f3dd21efa2ea..214219dff87c 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -4,10 +4,14 @@
#define H_PTE_INDEX_SIZE 8
#define H_PMD_INDEX_SIZE 5
#define H_PUD_INDEX_SIZE 5
-#define H_PGD_INDEX_SIZE 12
+#define H_PGD_INDEX_SIZE 15
-#define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */
-#define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */
+/*
+ * 64k aligned address free up few of the lower bits of RPN for us
+ * We steal that here. For more deatils look at pte_pfn/pfn_pte()
+ */
+#define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
+#define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
/*
* We need to differentiate between explicit huge page and THP huge
* page, since THP huge page also need to track real subpage details
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f7b721bbf918..4e957b027fe0 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -6,19 +6,13 @@
* Common bits between 4K and 64K pages in a linux-style PTE.
* Additional bits may be defined in pgtable-hash64-*.h
*
- * Note: We only support user read/write permissions. Supervisor always
- * have full read/write to pages above PAGE_OFFSET (pages below that
- * always use the user access permissions).
- *
- * We could create separate kernel read-only if we used the 3 PP bits
- * combinations that newer processors provide but we currently don't.
*/
-#define H_PAGE_BUSY 0x00800 /* software: PTE & hash are busy */
#define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
-#define H_PAGE_F_GIX_SHIFT 57
-#define H_PAGE_F_GIX (7ul << 57) /* HPTE index within HPTEG */
-#define H_PAGE_F_SECOND (1ul << 60) /* HPTE is in 2ndary HPTEG */
-#define H_PAGE_HASHPTE (1ul << 61) /* PTE has associated HPTE */
+#define H_PAGE_F_GIX_SHIFT 56
+#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */
+#define H_PAGE_F_SECOND _RPAGE_RSV2 /* HPTE is in 2ndary HPTEG */
+#define H_PAGE_F_GIX (_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44)
+#define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/hash-64k.h>
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c62f14d0bec1..6666cd366596 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -46,7 +46,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
*/
VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- return __pte(pte_val(entry) | _PAGE_LARGE);
+ return __pte(pte_val(entry) | R_PAGE_LARGE);
else
return entry;
}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 52d8d1e4b772..6981a52b3887 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -39,6 +39,7 @@
/* Bits in the SLB VSID word */
#define SLB_VSID_SHIFT 12
+#define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
#define SLB_VSID_SHIFT_1T 24
#define SLB_VSID_SSIZE_SHIFT 62
#define SLB_VSID_B ASM_CONST(0xc000000000000000)
@@ -408,7 +409,7 @@ static inline unsigned long hpt_vpn(unsigned long ea,
static inline unsigned long hpt_hash(unsigned long vpn,
unsigned int shift, int ssize)
{
- int mask;
+ unsigned long mask;
unsigned long hash, vsid;
/* VPN_SHIFT can be atmost 12 */
@@ -491,13 +492,14 @@ extern void slb_set_size(u16 size);
* We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
* from mmu context id and effective segment id of the address.
*
- * For user processes max context id is limited to ((1ul << 19) - 5)
- * for kernel space, we use the top 4 context ids to map address as below
+ * For user processes max context id is limited to MAX_USER_CONTEXT.
+
+ * For kernel space, we use context ids 1-4 to map addresses as below:
* NOTE: each context only support 64TB now.
- * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
- * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
- * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
- * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@@ -511,20 +513,28 @@ extern void slb_set_size(u16 size);
* robust scattering in the hash table (at least based on some initial
* results).
*
- * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
- * bad address. This enables us to consolidate bad address handling in
- * hash_page.
+ * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
+ * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
+ * will produce a VSID of 0.
*
* We also need to avoid the last segment of the last context, because that
* would give a protovsid of 0x1fffffffff. That will result in a VSID 0
- * because of the modulo operation in vsid scramble. But the vmemmap
- * (which is what uses region 0xf) will never be close to 64TB in size
- * (it's 56 bytes per page of system memory).
+ * because of the modulo operation in vsid scramble.
*/
+/*
+ * Max Va bits we support as of now is 68 bits. We want 19 bit
+ * context ID.
+ * Restrictions:
+ * GPU has restrictions of not able to access beyond 128TB
+ * (47 bit effective address). We also cannot do more than 20bit PID.
+ * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
+ * to 16 bits (ie, we can only have 2^16 pids at the same time).
+ */
+#define VA_BITS 68
#define CONTEXT_BITS 19
-#define ESID_BITS 18
-#define ESID_BITS_1T 6
+#define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
+#define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
#define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
@@ -532,63 +542,70 @@ extern void slb_set_size(u16 size);
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
- * available for user + kernel mapping. The top 4 contexts are used for
- * kernel mapping. Each segment contains 2^28 bytes. Each
- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
- * (19 == 37 + 28 - 46).
+ * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
+ * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^49 bytes (512TB).
+ *
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble.
+ */
+#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+#define MIN_USER_CONTEXT (5)
+
+/* Would be nice to use KERNEL_REGION_ID here */
+#define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1)
+
+/*
+ * For platforms that support on 65bit VA we limit the context bits
*/
-#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
+#define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
/*
* This should be computed such that protovosid * vsid_mulitplier
- * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
+ * doesn't overflow 64 bits. The vsid_mutliplier should also be
+ * co-prime to vsid_modulus. We also need to make sure that number
+ * of bits in multiplied result (dividend) is less than twice the number of
+ * protovsid bits for our modulus optmization to work.
+ *
+ * The below table shows the current values used.
+ * |-------+------------+----------------------+------------+-------------------|
+ * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 1T | 24 | 25 | 49 | 50 |
+ * |-------+------------+----------------------+------------+-------------------|
+ * | 256MB | 24 | 37 | 61 | 74 |
+ * |-------+------------+----------------------+------------+-------------------|
+ *
+ * |-------+------------+----------------------+------------+--------------------|
+ * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 1T | 24 | 28 | 52 | 56 |
+ * |-------+------------+----------------------+------------+--------------------|
+ * | 256MB | 24 | 40 | 64 | 80 |
+ * |-------+------------+----------------------+------------+--------------------|
+ *
*/
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
-#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
+#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
+#define VSID_BITS_65_256M (65 - SID_SHIFT)
+/*
+ * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
+ */
+#define VSID_MULINV_256M ASM_CONST(665548017062)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
-#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
-
+#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
+#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
+#define VSID_MULINV_1T ASM_CONST(209034062)
+/* 1TB VSID reserved for VRMA */
+#define VRMA_VSID 0x1ffffffUL
#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
-/*
- * This macro generates asm code to compute the VSID scramble
- * function. Used in slb_allocate() and do_stab_bolted. The function
- * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
- *
- * rt = register containing the proto-VSID and into which the
- * VSID will be stored
- * rx = scratch register (clobbered)
- *
- * - rt and rx must be different registers
- * - The answer will end up in the low VSID_BITS bits of rt. The higher
- * bits may contain other garbage, so you may need to mask the
- * result.
- */
-#define ASM_VSID_SCRAMBLE(rt, rx, size) \
- lis rx,VSID_MULTIPLIER_##size@h; \
- ori rx,rx,VSID_MULTIPLIER_##size@l; \
- mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
- \
- srdi rx,rt,VSID_BITS_##size; \
- clrldi rt,rt,(64-VSID_BITS_##size); \
- add rt,rt,rx; /* add high and low bits */ \
- /* NOTE: explanation based on VSID_BITS_##size = 36 \
- * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
- * 2^36-1+2^28-1. That in particular means that if r3 >= \
- * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
- * the bit clear, r3 already has the answer we want, if it \
- * doesn't, the answer is the low 36 bits of r3+1. So in all \
- * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
- addi rx,rt,1; \
- srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
- add rt,rt,rx
-
/* 4 bits per slice and we have one slice per 1TB */
-#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
+#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41)
#ifndef __ASSEMBLY__
@@ -634,7 +651,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#define vsid_scramble(protovsid, size) \
((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
-#else /* 1 */
+/* simplified form avoiding mod operation */
#define vsid_scramble(protovsid, size) \
({ \
unsigned long x; \
@@ -642,6 +659,21 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
})
+
+#else /* 1 */
+static inline unsigned long vsid_scramble(unsigned long protovsid,
+ unsigned long vsid_multiplier, int vsid_bits)
+{
+ unsigned long vsid;
+ unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
+ /*
+ * We have same multipler for both 256 and 1T segements now
+ */
+ vsid = protovsid * vsid_multiplier;
+ vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
+ return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
+}
+
#endif /* 1 */
/* Returns the segment size indicator for a user address */
@@ -656,36 +688,56 @@ static inline int user_segment_size(unsigned long addr)
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
+ unsigned long va_bits = VA_BITS;
+ unsigned long vsid_bits;
+ unsigned long protovsid;
+
/*
* Bad address. We return VSID 0 for that
*/
if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
return 0;
- if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble((context << ESID_BITS)
- | ((ea >> SID_SHIFT) & ESID_BITS_MASK), 256M);
- return vsid_scramble((context << ESID_BITS_1T)
- | ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK), 1T);
+ if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
+ va_bits = 65;
+
+ if (ssize == MMU_SEGSIZE_256M) {
+ vsid_bits = va_bits - SID_SHIFT;
+ protovsid = (context << ESID_BITS) |
+ ((ea >> SID_SHIFT) & ESID_BITS_MASK);
+ return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
+ }
+ /* 1T segment */
+ vsid_bits = va_bits - SID_SHIFT_1T;
+ protovsid = (context << ESID_BITS_1T) |
+ ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
+ return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
}
/*
* This is only valid for addresses >= PAGE_OFFSET
- *
- * For kernel space, we use the top 4 context ids to map address as below
- * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
- * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
- * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
- * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
unsigned long context;
+ if (!is_kernel_addr(ea))
+ return 0;
+
/*
- * kernel take the top 4 context from the available range
+ * For kernel space, we use context ids 1-4 to map the address space as
+ * below:
+ *
+ * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ *
+ * So we can compute the context from the region (top nibble) by
+ * subtracting 11, or 0xc - 1.
*/
- context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+ context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
+
return get_vsid(context, ea, ssize);
}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 805d4105e9bb..77529a3e3811 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -65,6 +65,8 @@ extern struct patb_entry *partition_tb;
* MAX_USER_CONTEXT * 16 bytes of space.
*/
#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4)
+#define PRTB_ENTRIES (1ul << CONTEXT_BITS)
+
/*
* Power9 currently only support 64K partition table size.
*/
@@ -73,13 +75,20 @@ extern struct patb_entry *partition_tb;
typedef unsigned long mm_context_id_t;
struct spinlock;
+/* Maximum possible number of NPUs in a system. */
+#define NV_MAX_NPUS 8
+
typedef struct {
mm_context_id_t id;
u16 user_psize; /* page size index */
+ /* NPU NMMU context */
+ struct npu_context *npu_context;
+
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
+ unsigned long addr_limit;
#else
u16 sllp; /* SLB page size encoding */
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8f4d41936e5a..85bc9875c3be 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -13,6 +13,7 @@
#define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_RO 0
+#define _PAGE_SHARED 0
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
@@ -37,21 +38,47 @@
#define _RPAGE_RSV3 0x0400000000000000UL
#define _RPAGE_RSV4 0x0200000000000000UL
-#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
-#else
-#define _PAGE_SOFT_DIRTY 0x00000
-#endif
-#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
+#define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */
+#define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
/*
- * For P9 DD1 only, we need to track whether the pte's huge.
+ * Top and bottom bits of RPN which can be used by hash
+ * translation mode, because we expect them to be zero
+ * otherwise.
*/
-#define _PAGE_LARGE _RPAGE_RSV1
+#define _RPAGE_RPN0 0x01000
+#define _RPAGE_RPN1 0x02000
+#define _RPAGE_RPN44 0x0100000000000000UL
+#define _RPAGE_RPN43 0x0080000000000000UL
+#define _RPAGE_RPN42 0x0040000000000000UL
+#define _RPAGE_RPN41 0x0020000000000000UL
+
+/* Max physical address bit as per radix table */
+#define _RPAGE_PA_MAX 57
+/*
+ * Max physical address bit we will use for now.
+ *
+ * This is mostly a hardware limitation and for now Power9 has
+ * a 51 bit limit.
+ *
+ * This is different from the number of physical bit required to address
+ * the last byte of memory. That is defined by MAX_PHYSMEM_BITS.
+ * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum
+ * number of sections we can support (SECTIONS_SHIFT).
+ *
+ * This is different from Radix page table limitation above and
+ * should always be less than that. The limit is done such that
+ * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX
+ * for hash linux page table specific bits.
+ *
+ * In order to be compatible with future hardware generations we keep
+ * some offsets and limit this for now to 53
+ */
+#define _PAGE_PA_MAX 53
-#define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
-#define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
+#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
+#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
/*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
* Instead of fixing all of them, add an alternate define which
@@ -59,10 +86,11 @@
*/
#define _PAGE_NO_CACHE _PAGE_TOLERANT
/*
- * We support 57 bit real address in pte. Clear everything above 57, and
- * every thing below PAGE_SHIFT;
+ * We support _RPAGE_PA_MAX bit real address in pte. On the linux side
+ * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX
+ * and every thing below PAGE_SHIFT;
*/
-#define PTE_RPN_MASK (((1UL << 57) - 1) & (PAGE_MASK))
+#define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
/*
* set of bits not changed in pmd_modify. Even though we have hash specific bits
* in here, on radix we expect them to be zero.
@@ -205,10 +233,6 @@ extern unsigned long __pte_frag_nr;
extern unsigned long __pte_frag_size_shift;
#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
-/*
- * Pgtable size used by swapper, init in asm code
- */
-#define MAX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 9e0bb7cd6e22..ac16d1943022 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -11,6 +11,12 @@
#include <asm/book3s/64/radix-4k.h>
#endif
+/*
+ * For P9 DD1 only, we need to track whether the pte's huge.
+ */
+#define R_PAGE_LARGE _RPAGE_RSV1
+
+
#ifndef __ASSEMBLY__
#include <asm/book3s/64/tlbflush-radix.h>
#include <asm/cpu_has_feature.h>
@@ -252,7 +258,7 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
+ return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 8ab937771068..abef812de7f8 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -12,6 +12,8 @@
#include <asm/types.h>
#include <asm/ppc-opcode.h>
+#include <linux/string.h>
+#include <linux/kallsyms.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
@@ -99,6 +101,45 @@ static inline unsigned long ppc_global_function_entry(void *func)
#endif
}
+/*
+ * Wrapper around kallsyms_lookup() to return function entry address:
+ * - For ABIv1, we lookup the dot variant.
+ * - For ABIv2, we return the local entry point.
+ */
+static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
+{
+ unsigned long addr;
+#ifdef PPC64_ELF_ABI_v1
+ /* check for dot variant */
+ char dot_name[1 + KSYM_NAME_LEN];
+ bool dot_appended = false;
+
+ if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
+ return 0;
+
+ if (name[0] != '.') {
+ dot_name[0] = '.';
+ dot_name[1] = '\0';
+ strlcat(dot_name, name, sizeof(dot_name));
+ dot_appended = true;
+ } else {
+ dot_name[0] = '\0';
+ strlcat(dot_name, name, sizeof(dot_name));
+ }
+ addr = kallsyms_lookup_name(dot_name);
+ if (!addr && dot_appended)
+ /* Let's try the original non-dot symbol lookup */
+ addr = kallsyms_lookup_name(name);
+#elif defined(PPC64_ELF_ABI_v2)
+ addr = kallsyms_lookup_name(name);
+ if (addr)
+ addr = ppc_function_entry((void *)addr);
+#else
+ addr = kallsyms_lookup_name(name);
+#endif
+ return addr;
+}
+
#ifdef CONFIG_PPC64
/*
* Some instruction encodings commonly used in dynamic ftracing
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 155731557c9b..52586f9956bb 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -2,13 +2,39 @@
#define _ASM_POWERPC_CPUIDLE_H
#ifdef CONFIG_PPC_POWERNV
-/* Used in powernv idle state management */
+/* Thread state used in powernv idle state management */
#define PNV_THREAD_RUNNING 0
#define PNV_THREAD_NAP 1
#define PNV_THREAD_SLEEP 2
#define PNV_THREAD_WINKLE 3
-#define PNV_CORE_IDLE_LOCK_BIT 0x100
-#define PNV_CORE_IDLE_THREAD_BITS 0x0FF
+
+/*
+ * Core state used in powernv idle for POWER8.
+ *
+ * The lock bit synchronizes updates to the state, as well as parts of the
+ * sleep/wake code (see kernel/idle_book3s.S).
+ *
+ * Bottom 8 bits track the idle state of each thread. Bit is cleared before
+ * the thread executes an idle instruction (nap/sleep/winkle).
+ *
+ * Then there is winkle tracking. A core does not lose complete state
+ * until every thread is in winkle. So the winkle count field counts the
+ * number of threads in winkle (small window of false positives is okay
+ * around the sleep/wake, so long as there are no false negatives).
+ *
+ * When the winkle count reaches 8 (the COUNT_ALL_BIT becomes set), then
+ * the THREAD_WINKLE_BITS are set, which indicate which threads have not
+ * yet woken from the winkle state.
+ */
+#define PNV_CORE_IDLE_LOCK_BIT 0x10000000
+
+#define PNV_CORE_IDLE_WINKLE_COUNT 0x00010000
+#define PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT 0x00080000
+#define PNV_CORE_IDLE_WINKLE_COUNT_BITS 0x000F0000
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT 8
+#define PNV_CORE_IDLE_THREAD_WINKLE_BITS 0x0000FF00
+
+#define PNV_CORE_IDLE_THREAD_BITS 0x000000FF
/*
* ============================ NOTE =================================
@@ -46,6 +72,7 @@ extern u32 pnv_fastsleep_workaround_at_exit[];
extern u64 pnv_first_deep_stop_state;
+unsigned long pnv_cpu_offline(unsigned int cpu);
int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
static inline void report_invalid_psscr_val(u64 psscr_val, int err)
{
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index ab68d0ee7725..1f6847b107e4 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -471,10 +471,11 @@ enum {
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
-#define CPU_FTRS_POWER9_DD1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1)
+#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
+ (~CPU_FTR_SAO))
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 378167377065..f70cbfe0ec04 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -35,33 +35,53 @@ enum ppc_dbell {
#ifdef CONFIG_PPC_BOOK3S
#define PPC_DBELL_MSGTYPE PPC_DBELL_SERVER
-#define SPRN_DOORBELL_CPUTAG SPRN_TIR
-#define PPC_DBELL_TAG_MASK 0x7f
static inline void _ppc_msgsnd(u32 msg)
{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
- else
- __asm__ __volatile__ (PPC_MSGSNDP(%0) : : "r" (msg));
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSND(%1), PPC_MSGSNDP(%1), %0)
+ : : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+ /* sync is not required when taking messages from the same core */
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGSYNC " ; lwsync", "", %0)
+ : : "i" (CPU_FTR_HVMODE|CPU_FTR_ARCH_300));
}
#else /* CONFIG_PPC_BOOK3S */
#define PPC_DBELL_MSGTYPE PPC_DBELL
-#define SPRN_DOORBELL_CPUTAG SPRN_PIR
-#define PPC_DBELL_TAG_MASK 0x3fff
static inline void _ppc_msgsnd(u32 msg)
{
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
}
+/* sync before sending message */
+static inline void ppc_msgsnd_sync(void)
+{
+ __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* sync after taking message interrupt */
+static inline void ppc_msgsync(void)
+{
+}
+
#endif /* CONFIG_PPC_BOOK3S */
-extern void doorbell_cause_ipi(int cpu, unsigned long data);
+extern void doorbell_global_ipi(int cpu);
+extern void doorbell_core_ipi(int cpu);
+extern int doorbell_try_core_ipi(int cpu);
extern void doorbell_exception(struct pt_regs *regs);
-extern void doorbell_setup_this_cpu(void);
static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
{
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 86308f177f2d..5d5af3fddfd8 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -8,8 +8,6 @@
struct pt_regs;
-extern struct dentry *powerpc_debugfs_root;
-
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
extern int (*__debugger)(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/debugfs.h b/arch/powerpc/include/asm/debugfs.h
new file mode 100644
index 000000000000..4f3b39f3e3d2
--- /dev/null
+++ b/arch/powerpc/include/asm/debugfs.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_POWERPC_DEBUGFS_H
+#define _ASM_POWERPC_DEBUGFS_H
+
+/*
+ * Copyright 2017, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/debugfs.h>
+
+extern struct dentry *powerpc_debugfs_root;
+
+#endif /* _ASM_POWERPC_DEBUGFS_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 4852e849128b..c0a55050f70f 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst)
return (inst >> 11) & 0x7fff;
}
+static inline unsigned int get_tx_or_sx(u32 inst)
+{
+ return (inst) & 0x1;
+}
+
#define IS_XFORM(inst) (get_op(inst) == 31)
#define IS_DSFORM(inst) (get_op(inst) >= 56)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index ed3beadd2cc5..183d73b6ed99 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -167,17 +167,14 @@ BEGIN_FTR_SECTION_NESTED(943) \
std ra,offset(r13); \
END_FTR_SECTION_NESTED(ftr,ftr,943)
-#define EXCEPTION_PROLOG_0_PACA(area) \
+#define EXCEPTION_PROLOG_0(area) \
+ GET_PACA(r13); \
std r9,area+EX_R9(r13); /* save r9 */ \
OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
HMT_MEDIUM; \
std r10,area+EX_R10(r13); /* save r10 - r12 */ \
OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
-#define EXCEPTION_PROLOG_0(area) \
- GET_PACA(r13); \
- EXCEPTION_PROLOG_0_PACA(area)
-
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
@@ -203,17 +200,26 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
__EXCEPTION_PROLOG_PSERIES_1(label, h)
+/* _NORI variant keeps MSR_RI clear */
+#define __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
+ xori r10,r10,MSR_RI; /* Clear MSR_RI */ \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+ LOAD_HANDLER(r12,label) \
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+ h##rfid; \
+ b . /* prevent speculative execution */
+
+#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+ __EXCEPTION_PROLOG_PSERIES_1_NORI(label, h)
+
#define EXCEPTION_PROLOG_PSERIES(area, label, h, extra, vec) \
EXCEPTION_PROLOG_0(area); \
EXCEPTION_PROLOG_1(area, extra, vec); \
EXCEPTION_PROLOG_PSERIES_1(label, h);
-/* Have the PACA in r13 already */
-#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \
- EXCEPTION_PROLOG_0_PACA(area); \
- EXCEPTION_PROLOG_1(area, extra, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, h);
-
#define __KVMTEST(h, n) \
lbz r10,HSTATE_IN_GUEST(r13); \
cmpwi r10,0; \
@@ -256,11 +262,6 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
ld r9,area+EX_R9(r13); \
bctr
-#define BRANCH_TO_KVM(reg, label) \
- __LOAD_FAR_HANDLER(reg, label); \
- mtctr reg; \
- bctr
-
#else
#define BRANCH_TO_COMMON(reg, label) \
b label
@@ -268,15 +269,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define BRANCH_LINK_TO_FAR(label) \
bl label
-#define BRANCH_TO_KVM(reg, label) \
- b label
-
#define __BRANCH_TO_KVM_EXIT(area, label) \
ld r9,area+EX_R9(r13); \
b label
#endif
+/* Do not enable RI */
+#define EXCEPTION_PROLOG_PSERIES_NORI(area, label, h, extra, vec) \
+ EXCEPTION_PROLOG_0(area); \
+ EXCEPTION_PROLOG_1(area, extra, vec); \
+ EXCEPTION_PROLOG_PSERIES_1_NORI(label, h);
+
#define __KVM_HANDLER(area, h, n) \
BEGIN_FTR_SECTION_NESTED(947) \
@@ -325,6 +329,15 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define NOTEST(n)
+#define EXCEPTION_PROLOG_COMMON_1() \
+ std r9,_CCR(r1); /* save CR in stackframe */ \
+ std r11,_NIP(r1); /* save SRR0 in stackframe */ \
+ std r12,_MSR(r1); /* save SRR1 in stackframe */ \
+ std r10,0(r1); /* make stack chain pointer */ \
+ std r0,GPR0(r1); /* save r0 in stackframe */ \
+ std r10,GPR1(r1); /* save r1 in stackframe */ \
+
+
/*
* The common exception prolog is used for all except a few exceptions
* such as a segment miss on a kernel address. We have to be prepared
@@ -349,12 +362,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
addi r3,r13,area; /* r3 -> where regs are saved*/ \
RESTORE_CTR(r1, area); \
b bad_stack; \
-3: std r9,_CCR(r1); /* save CR in stackframe */ \
- std r11,_NIP(r1); /* save SRR0 in stackframe */ \
- std r12,_MSR(r1); /* save SRR1 in stackframe */ \
- std r10,0(r1); /* make stack chain pointer */ \
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r10,GPR1(r1); /* save r1 in stackframe */ \
+3: EXCEPTION_PROLOG_COMMON_1(); \
beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
SAVE_PPR(area, r9, r10); \
@@ -522,7 +530,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label) \
EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec); \
- EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
+ EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
/*
* Our exception common code can be passed various "additions"
@@ -547,26 +555,39 @@ BEGIN_FTR_SECTION \
beql ppc64_runlatch_on_trampoline; \
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
- EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+#define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
+ EXCEPTION_PROLOG_COMMON(trap, area); \
/* Volatile regs are potentially clobbered here */ \
additions; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b ret
+/*
+ * Exception where stack is already set in r1, r1 is saved in r10, and it
+ * continues rather than returns.
+ */
+#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
+ EXCEPTION_PROLOG_COMMON_1(); \
+ EXCEPTION_PROLOG_COMMON_2(area); \
+ EXCEPTION_PROLOG_COMMON_3(trap); \
+ /* Volatile regs are potentially clobbered here */ \
+ additions; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ bl hdlr
+
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
- EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
- ADD_NVGPRS;ADD_RECONCILE)
+ EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
+ ret_from_except, ADD_NVGPRS;ADD_RECONCILE)
/*
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
* in the idle task and therefore need the special idle handling
* (finish nap and runlatch)
*/
-#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
- EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
+#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
+ EXCEPTION_COMMON(PACA_EXGEN, trap, label, hdlr, \
+ ret_from_except_lite, FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 0031806475f0..60b91084f33c 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -73,6 +73,8 @@
reg_entry++; \
})
+extern int crashing_cpu;
+
/* Kernel Dump section info */
struct fadump_section {
__be32 request_flag;
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index ddf54f5bbdd1..2de2319b99e2 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -66,6 +66,9 @@ label##5: \
#define END_FTR_SECTION(msk, val) \
END_FTR_SECTION_NESTED(msk, val, 97)
+#define END_FTR_SECTION_NESTED_IFSET(msk, label) \
+ END_FTR_SECTION_NESTED((msk), (msk), label)
+
#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 5067048daad4..86eb87382031 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -213,6 +213,7 @@ name:
USE_TEXT_SECTION(); \
.balign IFETCH_ALIGN_BYTES; \
.global name; \
+ _ASM_NOKPROBE_SYMBOL(name); \
DEFINE_FIXED_SYMBOL(name); \
name:
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 3cc12a86ef5d..d73755fafbb0 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -377,16 +377,6 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
-/* For hcall instrumentation. One structure per-hcall, per-CPU */
-struct hcall_stats {
- unsigned long num_calls; /* number of calls (on this CPU) */
- unsigned long tb_total; /* total wall time (mftb) of calls. */
- unsigned long purr_total; /* total cpu time (PURR) of calls. */
- unsigned long tb_start;
- unsigned long purr_start;
-};
-#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
-
struct hvcall_mpp_data {
unsigned long entitled_mem;
unsigned long mapped_mem;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 5ed292431b5b..422f99cf9924 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -25,8 +25,6 @@ extern struct pci_dev *isa_bridge_pcidev;
#endif
#include <linux/device.h>
-#include <linux/io.h>
-
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/byteorder.h>
@@ -192,24 +190,8 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
#endif /* __BIG_ENDIAN */
-/*
- * Cache inhibitied accessors for use in real mode, you don't want to use these
- * unless you know what you're doing.
- *
- * NB. These use the cpu byte ordering.
- */
-DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
-DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
-DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
-DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
-DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
-DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
-
#ifdef __powerpc64__
-DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
-DEF_MMIO_IN_X(in_rm64, 64, ldcix);
-
#ifdef __BIG_ENDIAN__
DEF_MMIO_OUT_D(out_be64, 64, std);
DEF_MMIO_IN_D(in_be64, 64, ld);
@@ -242,35 +224,6 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val)
#endif
#endif /* __powerpc64__ */
-
-/*
- * Simple Cache inhibited accessors
- * Unlike the DEF_MMIO_* macros, these don't include any h/w memory
- * barriers, callers need to manage memory barriers on their own.
- * These can only be used in hypervisor real mode.
- */
-
-static inline u32 _lwzcix(unsigned long addr)
-{
- u32 ret;
-
- __asm__ __volatile__("lwzcix %0,0, %1"
- : "=r" (ret) : "r" (addr) : "memory");
- return ret;
-}
-
-static inline void _stbcix(u64 addr, u8 val)
-{
- __asm__ __volatile__("stbcix %0,0,%1"
- : : "r" (val), "r" (addr) : "memory");
-}
-
-static inline void _stwcix(u64 addr, u32 val)
-{
- __asm__ __volatile__("stwcix %0,0,%1"
- : : "r" (val), "r" (addr) : "memory");
-}
-
/*
* Low level IO stream instructions are defined out of line for now
*/
@@ -417,15 +370,64 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
}
/*
- * Real mode version of the above. stdcix is only supposed to be used
- * in hypervisor real mode as per the architecture spec.
+ * Real mode versions of the above. Those instructions are only supposed
+ * to be used in hypervisor real mode as per the architecture spec.
*/
+static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__("stbcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__("sthcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
+{
+ __asm__ __volatile__("stwcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
{
__asm__ __volatile__("stdcix %0,0,%1"
: : "r" (val), "r" (paddr) : "memory");
}
+static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
+{
+ u8 ret;
+ __asm__ __volatile__("lbzcix %0,0, %1"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
+{
+ u16 ret;
+ __asm__ __volatile__("lhzcix %0,0, %1"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
+{
+ u32 ret;
+ __asm__ __volatile__("lwzcix %0,0, %1"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
+
+static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
+{
+ u64 ret;
+ __asm__ __volatile__("ldcix %0,0, %1"
+ : "=r" (ret) : "r" (paddr) : "memory");
+ return ret;
+}
#endif /* __powerpc64__ */
/*
@@ -757,6 +759,8 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_uc(addr, size) ioremap((addr), (size))
+#define ioremap_cache(addr, size) \
+ ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
extern void iounmap(volatile void __iomem *addr);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 2c1d50792944..8a8ce220d7d0 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -64,6 +64,11 @@ struct iommu_table_ops {
long index,
unsigned long *hpa,
enum dma_data_direction *direction);
+ /* Real mode */
+ int (*exchange_rm)(struct iommu_table *tbl,
+ long index,
+ unsigned long *hpa,
+ enum dma_data_direction *direction);
#endif
void (*clear)(struct iommu_table *tbl,
long index, long npages);
@@ -114,6 +119,7 @@ struct iommu_table {
struct list_head it_group_list;/* List of iommu_table_group_link */
unsigned long *it_userspace; /* userspace view of the table */
struct iommu_table_ops *it_ops;
+ struct kref it_kref;
};
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
@@ -146,8 +152,8 @@ static inline void *get_iommu_table_base(struct device *dev)
extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
-/* Frees table for an individual device node */
-extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
+extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
+extern int iommu_tce_table_put(struct iommu_table *tbl);
/* Initializes an iommu_table based in values set in the passed-in
* structure
@@ -208,6 +214,8 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
+extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
+ unsigned long *hpa, enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
@@ -288,11 +296,21 @@ static inline void iommu_restore(void)
#endif
/* The API to support IOMMU operations for VFIO */
-extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
- unsigned long ioba, unsigned long tce_value,
- unsigned long npages);
-extern int iommu_tce_put_param_check(struct iommu_table *tbl,
- unsigned long ioba, unsigned long tce);
+extern int iommu_tce_check_ioba(unsigned long page_shift,
+ unsigned long offset, unsigned long size,
+ unsigned long ioba, unsigned long npages);
+extern int iommu_tce_check_gpa(unsigned long page_shift,
+ unsigned long gpa);
+
+#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
+ (iommu_tce_check_ioba((tbl)->it_page_shift, \
+ (tbl)->it_offset, (tbl)->it_size, \
+ (ioba), (npages)) || (tce_value))
+#define iommu_tce_put_param_check(tbl, ioba, gpa) \
+ (iommu_tce_check_ioba((tbl)->it_page_shift, \
+ (tbl)->it_offset, (tbl)->it_size, \
+ (ioba), 1) || \
+ iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
extern void iommu_flush_tce(struct iommu_table *tbl);
extern int iommu_take_ownership(struct iommu_table *tbl);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 0503c98b2117..a83821f33ea3 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -61,59 +61,6 @@ extern kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry)
#define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
-#ifdef PPC64_ELF_ABI_v2
-/* PPC64 ABIv2 needs local entry point */
-#define kprobe_lookup_name(name, addr) \
-{ \
- addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
- if (addr) \
- addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
-}
-#elif defined(PPC64_ELF_ABI_v1)
-/*
- * 64bit powerpc ABIv1 uses function descriptors:
- * - Check for the dot variant of the symbol first.
- * - If that fails, try looking up the symbol provided.
- *
- * This ensures we always get to the actual symbol and not the descriptor.
- * Also handle <module:symbol> format.
- */
-#define kprobe_lookup_name(name, addr) \
-{ \
- char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; \
- const char *modsym; \
- bool dot_appended = false; \
- if ((modsym = strchr(name, ':')) != NULL) { \
- modsym++; \
- if (*modsym != '\0' && *modsym != '.') { \
- /* Convert to <module:.symbol> */ \
- strncpy(dot_name, name, modsym - name); \
- dot_name[modsym - name] = '.'; \
- dot_name[modsym - name + 1] = '\0'; \
- strncat(dot_name, modsym, \
- sizeof(dot_name) - (modsym - name) - 2);\
- dot_appended = true; \
- } else { \
- dot_name[0] = '\0'; \
- strncat(dot_name, name, sizeof(dot_name) - 1); \
- } \
- } else if (name[0] != '.') { \
- dot_name[0] = '.'; \
- dot_name[1] = '\0'; \
- strncat(dot_name, name, KSYM_NAME_LEN - 2); \
- dot_appended = true; \
- } else { \
- dot_name[0] = '\0'; \
- strncat(dot_name, name, KSYM_NAME_LEN - 1); \
- } \
- addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
- if (!addr && dot_appended) { \
- /* Let's try the original non-dot symbol lookup */ \
- addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
- } \
-}
-#endif
-
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
@@ -156,6 +103,16 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ return 0;
+}
+#endif
#else
static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d9b48f5bb606..d55c7f881ce7 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -49,8 +49,6 @@ static inline bool kvm_is_radix(struct kvm *kvm)
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
-#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
-
/*
* We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d318d432caa9..0593d9479f74 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -110,7 +110,7 @@ struct kvmppc_host_state {
u8 ptid;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
- unsigned long xics_phys;
+ void __iomem *xics_phys;
u32 saved_xirr;
u64 dabr;
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 7bba8f415627..77c60826d145 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -45,9 +45,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#ifdef CONFIG_KVM_MMIO
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
-#endif
#define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
/* These values are internal and can be increased later */
@@ -191,6 +188,13 @@ struct kvmppc_pginfo {
atomic_t refcnt;
};
+struct kvmppc_spapr_tce_iommu_table {
+ struct rcu_head rcu;
+ struct list_head next;
+ struct iommu_table *tbl;
+ struct kref kref;
+};
+
struct kvmppc_spapr_tce_table {
struct list_head list;
struct kvm *kvm;
@@ -199,6 +203,7 @@ struct kvmppc_spapr_tce_table {
u32 page_shift;
u64 offset; /* in pages */
u64 size; /* window size in pages */
+ struct list_head iommu_tables;
struct page *pages[0];
};
@@ -345,6 +350,7 @@ struct kvmppc_pte {
bool may_read : 1;
bool may_write : 1;
bool may_execute : 1;
+ unsigned long wimg;
u8 page_size; /* MMU_PAGE_xxx */
};
@@ -441,6 +447,11 @@ struct mmio_hpte_cache {
unsigned int index;
};
+#define KVMPPC_VSX_COPY_NONE 0
+#define KVMPPC_VSX_COPY_WORD 1
+#define KVMPPC_VSX_COPY_DWORD 2
+#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
+
struct openpic;
struct kvm_vcpu_arch {
@@ -644,6 +655,21 @@ struct kvm_vcpu_arch {
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_host_swabbed;
u8 mmio_sign_extend;
+ /* conversion between single and double precision */
+ u8 mmio_sp64_extend;
+ /*
+ * Number of simulations for vsx.
+ * If we use 2*8bytes to simulate 1*16bytes,
+ * then the number should be 2 and
+ * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
+ * If we use 4*4bytes to simulate 1*16bytes,
+ * the number should be 4 and
+ * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
+ */
+ u8 mmio_vsx_copy_nums;
+ u8 mmio_vsx_offset;
+ u8 mmio_vsx_copy_type;
+ u8 mmio_vsx_tx_sx_enabled;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
@@ -732,6 +758,8 @@ struct kvm_vcpu_arch {
};
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
+#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
+#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
/* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0
@@ -745,6 +773,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_FPR 0x0020
#define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060
+#define KVM_MMIO_REG_VSX 0x0080
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index dd11c4c8c56a..76e940a3c145 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
+extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian, int mmio_sign_extend);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
+extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, unsigned int bytes,
+ int is_default_endian);
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
enum instruction_type type, u32 *inst);
@@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
@@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
+extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ struct iommu_group *grp);
+extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ struct iommu_group *grp);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args);
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
- struct kvm_vcpu *vcpu, unsigned long liobn);
-extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long ioba, unsigned long npages);
+ struct kvm *kvm, unsigned long liobn);
+#define kvmppc_ioba_validate(stt, ioba, npages) \
+ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
+ (stt)->size, (ioba), (npages)) ? \
+ H_PARAMETER : H_SUCCESS)
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
unsigned long tce);
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
@@ -240,6 +255,7 @@ union kvmppc_one_reg {
u64 dval;
vector128 vval;
u64 vsxval[2];
+ u32 vsx32val[4];
struct {
u64 addr;
u64 length;
@@ -409,7 +425,7 @@ struct openpic;
extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
- paca[cpu].kvm_hstate.xics_phys = addr;
+ paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
}
static inline u32 kvmppc_get_xics_latch(void)
@@ -478,8 +494,6 @@ extern void kvmppc_free_host_rm_ops(void);
extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
-extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
-extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
@@ -507,12 +521,6 @@ static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
-static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
- unsigned long server)
- { return -EINVAL; }
-static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
- struct kvm_irq_level *args)
- { return -ENOTTY; }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
#endif
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 5011b69107a7..f90b22c722e1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -173,6 +173,8 @@ struct machdep_calls {
/* Called after scan and before resource survey */
void (*pcibios_fixup_phb)(struct pci_controller *hose);
+ resource_size_t (*pcibios_default_alignment)(void);
+
#ifdef CONFIG_PCI_IOV
void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index ed62efe01e49..81eff8631434 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -24,97 +24,6 @@
#include <linux/bitops.h>
-/*
- * Machine Check bits on power7 and power8
- */
-#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */
-
-/* SRR1 bits for machine check (On Power7 and Power8) */
-#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */
-
-#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */
-#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */
-#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */
-#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45))
-#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */
-#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */
-#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45))
-
-/* SRR1 bits for machine check (On Power8) */
-#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45))
-
-/* DSISR bits for machine check (On Power7 and Power8) */
-#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */
-#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */
-#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */
-#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */
-#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */
-#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */
-#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */
-
-/*
- * DSISR bits for machine check (Power8) in addition to above.
- * Secondary DERAT Multihit
- */
-#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54))
-
-/* SLB error bits */
-#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \
- P7_DSISR_MC_SLB_PARITY_MFSLB | \
- P7_DSISR_MC_SLB_MULTIHIT | \
- P7_DSISR_MC_SLB_MULTIHIT_PARITY)
-
-#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
- P8_DSISR_MC_ERAT_MULTIHIT_SEC)
-
-/*
- * Machine Check bits on power9
- */
-#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
-
-#define P9_SRR1_MC_IFETCH(srr1) ( \
- PPC_BITEXTRACT(srr1, 45, 0) | \
- PPC_BITEXTRACT(srr1, 44, 1) | \
- PPC_BITEXTRACT(srr1, 43, 2) | \
- PPC_BITEXTRACT(srr1, 36, 3) )
-
-/* 0 is reserved */
-#define P9_SRR1_MC_IFETCH_UE 1
-#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
-#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
-#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
-#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
-#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
-/* 7 is reserved */
-#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
-#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
-/* 10 ? */
-#define P9_SRR1_MC_IFETCH_RA 11
-#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
-#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
-#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
-#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
-
-/* DSISR bits for machine check (On Power9) */
-#define P9_DSISR_MC_UE (PPC_BIT(48))
-#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
-#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
-#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
-#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
-#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
-#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
-#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
-#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
-#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
-#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
-#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
-#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
-
-/* SLB error bits */
-#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
- P9_DSISR_MC_SLB_PARITY_MFSLB | \
- P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
-
enum MCE_Version {
MCE_V1 = 1,
};
@@ -298,7 +207,8 @@ extern void save_mce_event(struct pt_regs *regs, long handled,
extern int get_mce_event(struct machine_check_event *mce, bool release);
extern void release_mce_event(void);
extern void machine_check_queue_event(void);
-extern void machine_check_print_event_info(struct machine_check_event *evt);
+extern void machine_check_print_event_info(struct machine_check_event *evt,
+ bool user_mode);
extern uint64_t get_mce_fault_addr(struct machine_check_event *evt);
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index b62a8d43a06c..7ca8d8e80ffa 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -229,11 +229,6 @@ typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
-#ifdef CONFIG_PPC_MM_SLICES
- u64 low_slices_psize; /* SLB page size encodings */
- u64 high_slices_psize; /* 4 bits per slice for now */
- u16 user_psize; /* page size index */
-#endif
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
void *pte_frag;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 065e762fae85..78260409dc9c 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -29,6 +29,10 @@
*/
/*
+ * Support for 68 bit VA space. We added that from ISA 2.05
+ */
+#define MMU_FTR_68_BIT_VA ASM_CONST(0x00002000)
+/*
* Kernel read only support.
* We added the ppp value 0b110 in ISA 2.04.
*/
@@ -109,10 +113,10 @@
#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
-#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
-#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
+#define MMU_FTRS_POWER6 MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
+#define MMU_FTRS_POWER7 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -136,7 +140,7 @@ enum {
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
- MMU_FTR_KERNEL_RO |
+ MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
#endif
@@ -290,7 +294,10 @@ static inline bool early_radix_enabled(void)
#define MMU_PAGE_16G 14
#define MMU_PAGE_64G 15
-/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
+/*
+ * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
+ * Also we need to change he type of mm_context.low/high_slices_psize.
+ */
#define MMU_PAGE_COUNT 16
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index ecf9885ab660..da7e9432fa8f 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -29,10 +29,14 @@ extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
+ struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
+extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
+ unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
@@ -51,7 +55,8 @@ static inline void switch_mmu_context(struct mm_struct *prev,
return switch_slb(tsk, next);
}
-extern int __init_new_context(void);
+extern int hash__alloc_context_id(void);
+extern void hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
#else
@@ -70,8 +75,9 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm);
* switch_mm is the entry point called from the architecture independent
* code in kernel/sched/core.c
*/
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
{
/* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
@@ -110,6 +116,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
switch_mmu_context(prev, next, tsk);
}
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+
#define deactivate_mm(tsk,mm) do { } while (0)
/*
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index c7f927e67d14..f0ff384d4ca5 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -88,11 +88,6 @@
#include <asm/nohash/pte-book3e.h>
#include <asm/pte-common.h>
-#ifdef CONFIG_PPC_MM_SLICES
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* CONFIG_PPC_MM_SLICES */
-
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index a0aa285869b5..cb3e6242a78c 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -40,6 +40,8 @@
#define OPAL_I2C_ARBT_LOST -22
#define OPAL_I2C_NACK_RCVD -23
#define OPAL_I2C_STOP_ERR -24
+#define OPAL_XIVE_PROVISIONING -31
+#define OPAL_XIVE_FREE_ACTIVE -32
/* API Tokens (in r0) */
#define OPAL_INVALID_CALL -1
@@ -168,7 +170,27 @@
#define OPAL_INT_SET_MFRR 125
#define OPAL_PCI_TCE_KILL 126
#define OPAL_NMMU_SET_PTCR 127
-#define OPAL_LAST 127
+#define OPAL_XIVE_RESET 128
+#define OPAL_XIVE_GET_IRQ_INFO 129
+#define OPAL_XIVE_GET_IRQ_CONFIG 130
+#define OPAL_XIVE_SET_IRQ_CONFIG 131
+#define OPAL_XIVE_GET_QUEUE_INFO 132
+#define OPAL_XIVE_SET_QUEUE_INFO 133
+#define OPAL_XIVE_DONATE_PAGE 134
+#define OPAL_XIVE_ALLOCATE_VP_BLOCK 135
+#define OPAL_XIVE_FREE_VP_BLOCK 136
+#define OPAL_XIVE_GET_VP_INFO 137
+#define OPAL_XIVE_SET_VP_INFO 138
+#define OPAL_XIVE_ALLOCATE_IRQ 139
+#define OPAL_XIVE_FREE_IRQ 140
+#define OPAL_XIVE_SYNC 141
+#define OPAL_XIVE_DUMP 142
+#define OPAL_XIVE_RESERVED3 143
+#define OPAL_XIVE_RESERVED4 144
+#define OPAL_NPU_INIT_CONTEXT 146
+#define OPAL_NPU_DESTROY_CONTEXT 147
+#define OPAL_NPU_MAP_LPAR 148
+#define OPAL_LAST 148
/* Device tree flags */
@@ -928,6 +950,59 @@ enum {
OPAL_PCI_TCE_KILL_ALL,
};
+/* The xive operation mode indicates the active "API" and
+ * corresponds to the "mode" parameter of the opal_xive_reset()
+ * call
+ */
+enum {
+ OPAL_XIVE_MODE_EMU = 0,
+ OPAL_XIVE_MODE_EXPL = 1,
+};
+
+/* Flags for OPAL_XIVE_GET_IRQ_INFO */
+enum {
+ OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001,
+ OPAL_XIVE_IRQ_STORE_EOI = 0x00000002,
+ OPAL_XIVE_IRQ_LSI = 0x00000004,
+ OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008,
+ OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010,
+ OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
+enum {
+ OPAL_XIVE_EQ_ENABLED = 0x00000001,
+ OPAL_XIVE_EQ_ALWAYS_NOTIFY = 0x00000002,
+ OPAL_XIVE_EQ_ESCALATE = 0x00000004,
+};
+
+/* Flags for OPAL_XIVE_GET/SET_VP_INFO */
+enum {
+ OPAL_XIVE_VP_ENABLED = 0x00000001,
+};
+
+/* "Any chip" replacement for chip ID for allocation functions */
+enum {
+ OPAL_XIVE_ANY_CHIP = 0xffffffff,
+};
+
+/* Xive sync options */
+enum {
+ /* This bits are cumulative, arg is a girq */
+ XIVE_SYNC_EAS = 0x00000001, /* Sync irq source */
+ XIVE_SYNC_QUEUE = 0x00000002, /* Sync irq target */
+};
+
+/* Dump options */
+enum {
+ XIVE_DUMP_TM_HYP = 0,
+ XIVE_DUMP_TM_POOL = 1,
+ XIVE_DUMP_TM_OS = 2,
+ XIVE_DUMP_TM_USER = 3,
+ XIVE_DUMP_VP = 4,
+ XIVE_DUMP_EMU_STATE = 5,
+};
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 1ff03a6da76e..588fb1c23af9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -29,6 +29,11 @@ extern struct device_node *opal_node;
/* API functions */
int64_t opal_invalid_call(void);
+int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf);
+int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr,
+ uint64_t bdf);
+int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
+ uint64_t lpcr);
int64_t opal_console_write(int64_t term_number, __be64 *length,
const uint8_t *buffer);
int64_t opal_console_read(int64_t term_number, __be64 *length,
@@ -226,6 +231,42 @@ int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
uint32_t pe_num, uint32_t tce_size,
uint64_t dma_addr, uint32_t npages);
int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr);
+int64_t opal_xive_reset(uint64_t version);
+int64_t opal_xive_get_irq_info(uint32_t girq,
+ __be64 *out_flags,
+ __be64 *out_eoi_page,
+ __be64 *out_trig_page,
+ __be32 *out_esb_shift,
+ __be32 *out_src_chip);
+int64_t opal_xive_get_irq_config(uint32_t girq, __be64 *out_vp,
+ uint8_t *out_prio, __be32 *out_lirq);
+int64_t opal_xive_set_irq_config(uint32_t girq, uint64_t vp, uint8_t prio,
+ uint32_t lirq);
+int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
+ __be64 *out_qpage,
+ __be64 *out_qsize,
+ __be64 *out_qeoi_page,
+ __be32 *out_escalate_irq,
+ __be64 *out_qflags);
+int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
+ uint64_t qpage,
+ uint64_t qsize,
+ uint64_t qflags);
+int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr);
+int64_t opal_xive_alloc_vp_block(uint32_t alloc_order);
+int64_t opal_xive_free_vp_block(uint64_t vp);
+int64_t opal_xive_get_vp_info(uint64_t vp,
+ __be64 *out_flags,
+ __be64 *out_cam_value,
+ __be64 *out_report_cl_pair,
+ __be32 *out_chip_id);
+int64_t opal_xive_set_vp_info(uint64_t vp,
+ uint64_t flags,
+ uint64_t report_cl_pair);
+int64_t opal_xive_allocate_irq(uint32_t chip_id);
+int64_t opal_xive_free_irq(uint32_t girq);
+int64_t opal_xive_sync(uint32_t type, uint32_t id);
+int64_t opal_xive_dump(uint32_t type, uint32_t id);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 708c3e592eeb..1c09f8fe2ee8 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -99,7 +99,6 @@ struct paca_struct {
*/
/* used for most interrupts/exceptions */
u64 exgen[13] __attribute__((aligned(0x80)));
- u64 exmc[13]; /* used for machine checks */
u64 exslb[13]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
@@ -139,6 +138,7 @@ struct paca_struct {
#ifdef CONFIG_PPC_MM_SLICES
u64 mm_ctx_low_slices_psize;
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
+ unsigned long addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
@@ -172,17 +172,31 @@ struct paca_struct {
u8 thread_mask;
/* Mask to denote subcore sibling threads */
u8 subcore_sibling_mask;
+ /*
+ * Pointer to an array which contains pointer
+ * to the sibling threads' paca.
+ */
+ struct paca_struct **thread_sibling_pacas;
#endif
+#ifdef CONFIG_PPC_STD_MMU_64
+ /* Non-maskable exceptions that are not performance critical */
+ u64 exnmi[13]; /* used for system reset (nmi) */
+ u64 exmc[13]; /* used for machine checks */
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
- /* Exclusive emergency stack pointer for machine check exception. */
+ /* Exclusive stacks for system reset and machine check exception. */
+ void *nmi_emergency_sp;
void *mc_emergency_sp;
+
+ u16 in_nmi; /* In nmi handler */
+
/*
* Flag to check whether we are in machine check early handler
* and already using emergency stack.
*/
u16 in_mce;
- u8 hmi_event_available; /* HMI event is available */
+ u8 hmi_event_available; /* HMI event is available */
#endif
/* Stuff for accurate time accounting */
@@ -206,23 +220,7 @@ struct paca_struct {
#endif
};
-#ifdef CONFIG_PPC_BOOK3S
-static inline void copy_mm_to_paca(mm_context_t *context)
-{
- get_paca()->mm_ctx_id = context->id;
-#ifdef CONFIG_PPC_MM_SLICES
- get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
- memcpy(&get_paca()->mm_ctx_high_slices_psize,
- &context->high_slices_psize, SLICE_ARRAY_SIZE);
-#else
- get_paca()->mm_ctx_user_psize = context->user_psize;
- get_paca()->mm_ctx_sllp = context->sllp;
-#endif
-}
-#else
-static inline void copy_mm_to_paca(mm_context_t *context){}
-#endif
-
+extern void copy_mm_to_paca(struct mm_struct *mm);
extern struct paca_struct *paca;
extern void initialise_paca(struct paca_struct *new_paca, int cpu);
extern void setup_paca(struct paca_struct *new_paca);
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 3e83d2a20b6f..c4d9654bd637 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -98,21 +98,7 @@ extern u64 ppc64_pft_size;
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
-/*
- * 1 bit per slice and we have one slice per 1TB
- * Right now we support only 64TB.
- * IF we change this we will have to change the type
- * of high_slices
- */
-#define SLICE_MASK_SIZE 8
-
#ifndef __ASSEMBLY__
-
-struct slice_mask {
- u16 low_slices;
- u64 high_slices;
-};
-
struct mm_struct;
extern unsigned long slice_get_unmapped_area(unsigned long addr,
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 93eded8d3843..c8975dac535f 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
+#define arch_can_pci_mmap_wc() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index ae0a23091a9b..723bf48e7494 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -38,6 +38,9 @@ struct power_pmu {
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
+ void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
+ u32 flags, struct pt_regs *regs);
+ void (*get_mem_weight)(u64 *weight);
u64 (*bhrb_filter_map)(u64 branch_sample_type);
void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
index 0e9c2402dd20..f62797702300 100644
--- a/arch/powerpc/include/asm/powernv.h
+++ b/arch/powerpc/include/asm/powernv.h
@@ -11,9 +11,31 @@
#define _ASM_POWERNV_H
#ifdef CONFIG_PPC_POWERNV
+#define NPU2_WRITE 1
extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
+extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+ unsigned long flags,
+ struct npu_context *(*cb)(struct npu_context *, void *),
+ void *priv);
+extern void pnv_npu2_destroy_context(struct npu_context *context,
+ struct pci_dev *gpdev);
+extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
+ unsigned long *flags, unsigned long *status,
+ int count);
#else
static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
+static inline struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+ unsigned long flags,
+ struct npu_context *(*cb)(struct npu_context *, void *),
+ void *priv) { return ERR_PTR(-ENODEV); }
+static inline void pnv_npu2_destroy_context(struct npu_context *context,
+ struct pci_dev *gpdev) { }
+
+static inline int pnv_npu2_handle_fault(struct npu_context *context,
+ uintptr_t *ea, unsigned long *flags,
+ unsigned long *status, int count) {
+ return -ENODEV;
+}
#endif
#endif /* _ASM_POWERNV_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e7d6d86563ee..3a8d278e7421 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -86,32 +86,79 @@
#define OP_TRAP_64 2
#define OP_31_XOP_TRAP 4
+#define OP_31_XOP_LDX 21
#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_LDUX 53
#define OP_31_XOP_DCBST 54
#define OP_31_XOP_LWZUX 55
#define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_DCBF 86
#define OP_31_XOP_LBZX 87
+#define OP_31_XOP_STDX 149
#define OP_31_XOP_STWX 151
+#define OP_31_XOP_STDUX 181
+#define OP_31_XOP_STWUX 183
#define OP_31_XOP_STBX 215
#define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343
+#define OP_31_XOP_LWAUX 373
#define OP_31_XOP_LHAUX 375
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470
+#define OP_31_XOP_LDBRX 532
#define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566
+#define OP_31_XOP_STDBRX 660
#define OP_31_XOP_STWBRX 662
+#define OP_31_XOP_STFSX 663
+#define OP_31_XOP_STFSUX 695
+#define OP_31_XOP_STFDX 727
+#define OP_31_XOP_STFDUX 759
#define OP_31_XOP_LHBRX 790
+#define OP_31_XOP_LFIWAX 855
+#define OP_31_XOP_LFIWZX 887
#define OP_31_XOP_STHBRX 918
+#define OP_31_XOP_STFIWX 983
+
+/* VSX Scalar Load Instructions */
+#define OP_31_XOP_LXSDX 588
+#define OP_31_XOP_LXSSPX 524
+#define OP_31_XOP_LXSIWAX 76
+#define OP_31_XOP_LXSIWZX 12
+
+/* VSX Scalar Store Instructions */
+#define OP_31_XOP_STXSDX 716
+#define OP_31_XOP_STXSSPX 652
+#define OP_31_XOP_STXSIWX 140
+
+/* VSX Vector Load Instructions */
+#define OP_31_XOP_LXVD2X 844
+#define OP_31_XOP_LXVW4X 780
+
+/* VSX Vector Load and Splat Instruction */
+#define OP_31_XOP_LXVDSX 332
+
+/* VSX Vector Store Instructions */
+#define OP_31_XOP_STXVD2X 972
+#define OP_31_XOP_STXVW4X 908
+
+#define OP_31_XOP_LFSX 535
+#define OP_31_XOP_LFSUX 567
+#define OP_31_XOP_LFDX 599
+#define OP_31_XOP_LFDUX 631
#define OP_LWZ 32
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
#define OP_LD 58
#define OP_LWZU 33
#define OP_LBZ 34
@@ -127,6 +174,17 @@
#define OP_LHAU 43
#define OP_STH 44
#define OP_STHU 45
+#define OP_LMW 46
+#define OP_STMW 47
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_LQ 56
/* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c
@@ -161,6 +219,7 @@
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
+#define PPC_INST_MSGSYNC 0x7c0006ec
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
@@ -345,6 +404,7 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
+#define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC)
#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e0fecbcea2a2..a4b1d8d6b793 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -102,11 +102,25 @@ void release_thread(struct task_struct *);
#endif
#ifdef CONFIG_PPC64
-/* 64-bit user address space is 46-bits (64TB user VM) */
-#define TASK_SIZE_USER64 (0x0000400000000000UL)
+/*
+ * 64-bit user address space can have multiple limits
+ * For now supported values are:
+ */
+#define TASK_SIZE_64TB (0x0000400000000000UL)
+#define TASK_SIZE_128TB (0x0000800000000000UL)
+#define TASK_SIZE_512TB (0x0002000000000000UL)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * Max value currently used:
+ */
+#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#else
+#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#endif
-/*
- * 32-bit user address space is 4GB - 1 page
+/*
+ * 32-bit user address space is 4GB - 1 page
* (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
*/
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
@@ -114,26 +128,37 @@ void release_thread(struct task_struct *);
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
TASK_SIZE_USER32 : TASK_SIZE_USER64)
#define TASK_SIZE TASK_SIZE_OF(current)
-
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
#endif
+/*
+ * Initial task size value for user applications. For book3s 64 we start
+ * with 128TB and conditionally enable upto 512TB
+ */
+#ifdef CONFIG_PPC_BOOK3S_64
+#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
+ TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#else
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#endif
+
#ifdef __powerpc64__
-#define STACK_TOP_USER64 TASK_SIZE_USER64
+/* Limit stack to 128TB */
+#define STACK_TOP_USER64 TASK_SIZE_128TB
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (is_32bit_task() ? \
STACK_TOP_USER32 : STACK_TOP_USER64)
-#define STACK_TOP_MAX STACK_TOP_USER64
+#define STACK_TOP_MAX TASK_SIZE_USER64
#else /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fc879fd6bdae..d4f653c9259a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -310,6 +310,7 @@
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
@@ -320,6 +321,7 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_SCV __MASK(FSCR_SCV_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
@@ -365,6 +367,7 @@
#define LPCR_MER_SH 11
#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
+#define LPCR_HEIC ASM_CONST(0x0000000000000010) /* Hypervisor External Interrupt Control */
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
@@ -656,6 +659,7 @@
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
+#define SRR1_WAKEMCE_RESVD 0x003c0000 /* Unused/reserved value used by MCE wakeup to indicate cause to idle wakeup handler */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 7dc006b58369..7902d6358854 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -6,6 +6,8 @@
#include <linux/uaccess.h>
#include <asm-generic/sections.h>
+extern char __head_end[];
+
#ifdef __powerpc64__
extern char __start_interrupts[];
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 32db16d2e7ad..ebddb2111d87 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -40,10 +40,12 @@ extern int cpu_to_chip_id(int cpu);
struct smp_ops_t {
void (*message_pass)(int cpu, int msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
- void (*cause_ipi)(int cpu, unsigned long data);
+ void (*cause_ipi)(int cpu);
#endif
+ int (*cause_nmi_ipi)(int cpu);
void (*probe)(void);
int (*kick_cpu)(int nr);
+ int (*prepare_cpu)(int nr);
void (*setup_cpu)(int nr);
void (*bringup_done)(void);
void (*take_timebase)(void);
@@ -61,7 +63,6 @@ extern void smp_generic_take_timebase(void);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void migrate_irqs(void);
int generic_cpu_disable(void);
void generic_cpu_die(unsigned int cpu);
void generic_set_cpu_dead(unsigned int cpu);
@@ -112,23 +113,31 @@ extern int cpu_to_core_id(int cpu);
*
* Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
* in /proc/interrupts will be wrong!!! --Troy */
-#define PPC_MSG_CALL_FUNCTION 0
-#define PPC_MSG_RESCHEDULE 1
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
#define PPC_MSG_TICK_BROADCAST 2
-#define PPC_MSG_DEBUGGER_BREAK 3
+#define PPC_MSG_NMI_IPI 3
/* This is only used by the powernv kernel */
#define PPC_MSG_RM_HOST_ACTION 4
+#define NMI_IPI_ALL_OTHERS -2
+
+#ifdef CONFIG_NMI_IPI
+extern int smp_handle_nmi_ipi(struct pt_regs *regs);
+#else
+static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
+#endif
+
/* for irq controllers that have dedicated ipis per message (4) */
extern int smp_request_message_ipi(int virq, int message);
extern const char *smp_ipi_name[];
/* for irq controllers with only a single ipi */
-extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
extern void smp_muxed_ipi_set_message(int cpu, int msg);
extern irqreturn_t smp_ipi_demux(void);
+extern irqreturn_t smp_ipi_demux_relaxed(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index 23be8f1e7e64..16fab6898240 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -8,10 +8,10 @@
struct rtas_args;
-asmlinkage unsigned long sys_mmap(unsigned long addr, size_t len,
+asmlinkage long sys_mmap(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t offset);
-asmlinkage unsigned long sys_mmap2(unsigned long addr, size_t len,
+asmlinkage long sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
asmlinkage long ppc64_personality(unsigned long personality);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 6fc6464f7421..a941cc6fc3e9 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -10,15 +10,7 @@
#ifdef __KERNEL__
-/* We have 8k stacks on ppc32 and 16k on ppc64 */
-
-#if defined(CONFIG_PPC64)
-#define THREAD_SHIFT 14
-#elif defined(CONFIG_PPC_256K_PAGES)
-#define THREAD_SHIFT 15
-#else
-#define THREAD_SHIFT 13
-#endif
+#define THREAD_SHIFT CONFIG_THREAD_SHIFT
#define THREAD_SIZE (1 << THREAD_SHIFT)
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index e0b9e576905a..7ce2c3ac2964 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -57,7 +57,7 @@ struct icp_ops {
void (*teardown_cpu)(void);
void (*flush_ipi)(void);
#ifdef CONFIG_SMP
- void (*cause_ipi)(int cpu, unsigned long data);
+ void (*cause_ipi)(int cpu);
irq_handler_t ipi_action;
#endif
};
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
new file mode 100644
index 000000000000..1d3f2be5ae39
--- /dev/null
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_XIVE_REGS_H
+#define _ASM_POWERPC_XIVE_REGS_H
+
+/*
+ * Thread Management (aka "TM") registers
+ */
+
+/* TM register offsets */
+#define TM_QW0_USER 0x000 /* All rings */
+#define TM_QW1_OS 0x010 /* Ring 0..2 */
+#define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */
+#define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */
+
+/* Byte offsets inside a QW QW0 QW1 QW2 QW3 */
+#define TM_NSR 0x0 /* + + - + */
+#define TM_CPPR 0x1 /* - + - + */
+#define TM_IPB 0x2 /* - + + + */
+#define TM_LSMFB 0x3 /* - + + + */
+#define TM_ACK_CNT 0x4 /* - + - - */
+#define TM_INC 0x5 /* - + - + */
+#define TM_AGE 0x6 /* - + - + */
+#define TM_PIPR 0x7 /* - + - + */
+
+#define TM_WORD0 0x0
+#define TM_WORD1 0x4
+
+/*
+ * QW word 2 contains the valid bit at the top and other fields
+ * depending on the QW.
+ */
+#define TM_WORD2 0x8
+#define TM_QW0W2_VU PPC_BIT32(0)
+#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1,31) // XX 2,31 ?
+#define TM_QW1W2_VO PPC_BIT32(0)
+#define TM_QW1W2_OS_CAM PPC_BITMASK32(8,31)
+#define TM_QW2W2_VP PPC_BIT32(0)
+#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8,31)
+#define TM_QW3W2_VT PPC_BIT32(0)
+#define TM_QW3W2_LP PPC_BIT32(6)
+#define TM_QW3W2_LE PPC_BIT32(7)
+#define TM_QW3W2_T PPC_BIT32(31)
+
+/*
+ * In addition to normal loads to "peek" and writes (only when invalid)
+ * using 4 and 8 bytes accesses, the above registers support these
+ * "special" byte operations:
+ *
+ * - Byte load from QW0[NSR] - User level NSR (EBB)
+ * - Byte store to QW0[NSR] - User level NSR (EBB)
+ * - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
+ * - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
+ * otherwise VT||0000000
+ * - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
+ *
+ * Then we have all these "special" CI ops at these offset that trigger
+ * all sorts of side effects:
+ */
+#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/
+#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
+#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */
+#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user context */
+#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
+#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS context to reg */
+#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool context to reg*/
+#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
+#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd line */
+#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
+#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even line */
+#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
+/* XXX more... */
+
+/* NSR fields for the various QW ack types */
+#define TM_QW0_NSR_EB PPC_BIT8(0)
+#define TM_QW1_NSR_EO PPC_BIT8(0)
+#define TM_QW3_NSR_HE PPC_BITMASK8(0,1)
+#define TM_QW3_NSR_HE_NONE 0
+#define TM_QW3_NSR_HE_POOL 1
+#define TM_QW3_NSR_HE_PHYS 2
+#define TM_QW3_NSR_HE_LSI 3
+#define TM_QW3_NSR_I PPC_BIT8(2)
+#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3,7)
+
+/* Utilities to manipulate these (originaly from OPAL) */
+#define MASK_TO_LSH(m) (__builtin_ffsl(m) - 1)
+#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m))
+#define SETFIELD(m, v, val) \
+ (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
+
+#endif /* _ASM_POWERPC_XIVE_REGS_H */
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
new file mode 100644
index 000000000000..3cdbeaeac397
--- /dev/null
+++ b/arch/powerpc/include/asm/xive.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_XIVE_H
+#define _ASM_POWERPC_XIVE_H
+
+#define XIVE_INVALID_VP 0xffffffff
+
+#ifdef CONFIG_PPC_XIVE
+
+/*
+ * Thread Interrupt Management Area (TIMA)
+ *
+ * This is a global MMIO region divided in 4 pages of varying access
+ * permissions, providing access to per-cpu interrupt management
+ * functions. It always identifies the CPU doing the access based
+ * on the PowerBus initiator ID, thus we always access via the
+ * same offset regardless of where the code is executing
+ */
+extern void __iomem *xive_tima;
+
+/*
+ * Offset in the TM area of our current execution level (provided by
+ * the backend)
+ */
+extern u32 xive_tima_offset;
+
+/*
+ * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
+ * have it stored in the xive_cpu structure. We also cache
+ * for normal interrupts the current target CPU.
+ *
+ * This structure is setup by the backend for each interrupt.
+ */
+struct xive_irq_data {
+ u64 flags;
+ u64 eoi_page;
+ void __iomem *eoi_mmio;
+ u64 trig_page;
+ void __iomem *trig_mmio;
+ u32 esb_shift;
+ int src_chip;
+
+ /* Setup/used by frontend */
+ int target;
+ bool saved_p;
+};
+#define XIVE_IRQ_FLAG_STORE_EOI 0x01
+#define XIVE_IRQ_FLAG_LSI 0x02
+#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
+#define XIVE_IRQ_FLAG_MASK_FW 0x08
+#define XIVE_IRQ_FLAG_EOI_FW 0x10
+
+#define XIVE_INVALID_CHIP_ID -1
+
+/* A queue tracking structure in a CPU */
+struct xive_q {
+ __be32 *qpage;
+ u32 msk;
+ u32 idx;
+ u32 toggle;
+ u64 eoi_phys;
+ u32 esc_irq;
+ atomic_t count;
+ atomic_t pending_count;
+};
+
+/*
+ * "magic" Event State Buffer (ESB) MMIO offsets.
+ *
+ * Each interrupt source has a 2-bit state machine called ESB
+ * which can be controlled by MMIO. It's made of 2 bits, P and
+ * Q. P indicates that an interrupt is pending (has been sent
+ * to a queue and is waiting for an EOI). Q indicates that the
+ * interrupt has been triggered while pending.
+ *
+ * This acts as a coalescing mechanism in order to guarantee
+ * that a given interrupt only occurs at most once in a queue.
+ *
+ * When doing an EOI, the Q bit will indicate if the interrupt
+ * needs to be re-triggered.
+ *
+ * The following offsets into the ESB MMIO allow to read or
+ * manipulate the PQ bits. They must be used with an 8-bytes
+ * load instruction. They all return the previous state of the
+ * interrupt (atomically).
+ *
+ * Additionally, some ESB pages support doing an EOI via a
+ * store at 0 and some ESBs support doing a trigger via a
+ * separate trigger page.
+ */
+#define XIVE_ESB_GET 0x800
+#define XIVE_ESB_SET_PQ_00 0xc00
+#define XIVE_ESB_SET_PQ_01 0xd00
+#define XIVE_ESB_SET_PQ_10 0xe00
+#define XIVE_ESB_SET_PQ_11 0xf00
+#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01
+
+#define XIVE_ESB_VAL_P 0x2
+#define XIVE_ESB_VAL_Q 0x1
+
+/* Global enable flags for the XIVE support */
+extern bool __xive_enabled;
+
+static inline bool xive_enabled(void) { return __xive_enabled; }
+
+extern bool xive_native_init(void);
+extern void xive_smp_probe(void);
+extern int xive_smp_prepare_cpu(unsigned int cpu);
+extern void xive_smp_setup_cpu(void);
+extern void xive_smp_disable_cpu(void);
+extern void xive_kexec_teardown_cpu(int secondary);
+extern void xive_shutdown(void);
+extern void xive_flush_interrupt(void);
+
+/* xmon hook */
+extern void xmon_xive_do_dump(int cpu);
+
+/* APIs used by KVM */
+extern u32 xive_native_default_eq_shift(void);
+extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
+extern void xive_native_free_vp_block(u32 vp_base);
+extern int xive_native_populate_irq_data(u32 hw_irq,
+ struct xive_irq_data *data);
+extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
+extern u32 xive_native_alloc_irq(void);
+extern void xive_native_free_irq(u32 irq);
+extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+
+extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+ __be32 *qpage, u32 order, bool can_escalate);
+extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+
+extern bool __xive_irq_trigger(struct xive_irq_data *xd);
+extern bool __xive_irq_retrigger(struct xive_irq_data *xd);
+extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd);
+
+extern bool is_xive_irq(struct irq_chip *chip);
+
+#else
+
+static inline bool xive_enabled(void) { return false; }
+
+static inline bool xive_native_init(void) { return false; }
+static inline void xive_smp_probe(void) { }
+extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
+static inline void xive_smp_setup_cpu(void) { }
+static inline void xive_smp_disable_cpu(void) { }
+static inline void xive_kexec_teardown_cpu(int secondary) { }
+static inline void xive_shutdown(void) { }
+static inline void xive_flush_interrupt(void) { }
+
+static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
+static inline void xive_native_free_vp_block(u32 vp_base) { }
+
+#endif
+
+#endif /* _ASM_POWERPC_XIVE_H */
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index 5eb8e599e5cc..eb42a0c6e1d9 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -29,5 +29,7 @@ static inline void xmon_register_spus(struct list_head *list) { };
extern int cpus_are_in_xmon(void);
#endif
+extern void xmon_printf(const char *format, ...);
+
#endif /* __KERNEL __ */
#endif /* __ASM_POWERPC_XMON_H */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 4edbe4bb0e8b..07fbeb927834 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -29,6 +29,9 @@
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_GUEST_DEBUG
+/* Not always available, but if it is, this is the correct offset. */
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
struct kvm_regs {
__u64 pc;
__u64 cr;
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
index 03c06ba7464f..ab45cc2f3101 100644
--- a/arch/powerpc/include/uapi/asm/mman.h
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -29,4 +29,20 @@
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
+/*
+ * When MAP_HUGETLB is set, bits [26:31] of the flags argument to mmap(2),
+ * encode the log2 of the huge page size. A value of zero indicates that the
+ * default huge page size should be used. To use a non-default huge page size,
+ * one of these defines can be used, or the size can be encoded by hand. Note
+ * that on most systems only a subset, or possibly none, of these sizes will be
+ * available.
+ */
+#define MAP_HUGE_512KB (19 << MAP_HUGE_SHIFT) /* 512KB HugeTLB Page */
+#define MAP_HUGE_1MB (20 << MAP_HUGE_SHIFT) /* 1MB HugeTLB Page */
+#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) /* 2MB HugeTLB Page */
+#define MAP_HUGE_8MB (23 << MAP_HUGE_SHIFT) /* 8MB HugeTLB Page */
+#define MAP_HUGE_16MB (24 << MAP_HUGE_SHIFT) /* 16MB HugeTLB Page */
+#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) /* 1GB HugeTLB Page */
+#define MAP_HUGE_16GB (34 << MAP_HUGE_SHIFT) /* 16GB HugeTLB Page */
+
#endif /* _UAPI_ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 811f441a125f..b9db46ae545b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -25,8 +25,6 @@ CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-# do not trace tracer code
-CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
# timers used by tracing
CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
endif
@@ -97,6 +95,7 @@ obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -118,10 +117,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
-obj-$(CONFIG_TRACING) += trace_clock.o
+obj-y += trace/
ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
obj-y += iomap.o
@@ -142,14 +138,14 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
# Disable GCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_prom_init.o := n
UBSAN_SANITIZE_prom_init.o := n
-GCOV_PROFILE_ftrace.o := n
-UBSAN_SANITIZE_ftrace.o := n
GCOV_PROFILE_machine_kexec_64.o := n
UBSAN_SANITIZE_machine_kexec_64.o := n
GCOV_PROFILE_machine_kexec_32.o := n
UBSAN_SANITIZE_machine_kexec_32.o := n
GCOV_PROFILE_kprobes.o := n
UBSAN_SANITIZE_kprobes.o := n
+GCOV_PROFILE_kprobes-ftrace.o := n
+UBSAN_SANITIZE_kprobes-ftrace.o := n
UBSAN_SANITIZE_vdso.o := n
extra-$(CONFIG_PPC_FPU) += fpu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4367e7df51a1..439c257dec4a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,6 +185,7 @@ int main(void)
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
+ DEFINE(PACA_ADDR_LIMIT, offsetof(struct paca_struct, addr_limit));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
#endif
@@ -219,6 +220,7 @@ int main(void)
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXSLB, paca_struct, exslb);
+ OFFSET(PACA_EXNMI, paca_struct, exnmi);
OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
@@ -232,7 +234,9 @@ int main(void)
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
+ OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
OFFSET(PACA_IN_MCE, paca_struct, in_mce);
+ OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
#endif
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
@@ -399,8 +403,8 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef MAX_PGD_TABLE_SIZE
- DEFINE(PGD_TABLE_SIZE, MAX_PGD_TABLE_SIZE);
+#ifdef CONFIG_PPC_BOOK3S_64
+ DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
#else
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
#endif
@@ -727,6 +731,7 @@ int main(void)
OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
+ OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 7fe8c79e6937..10cb2896b2ae 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -29,7 +29,8 @@ _GLOBAL(__setup_cpu_power7)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- bl __init_LPCR
+ li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
+ bl __init_LPCR_ISA206
bl __init_tlb_power7
mtlr r11
blr
@@ -42,7 +43,8 @@ _GLOBAL(__restore_cpu_power7)
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- bl __init_LPCR
+ li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
+ bl __init_LPCR_ISA206
bl __init_tlb_power7
mtlr r11
blr
@@ -59,7 +61,8 @@ _GLOBAL(__setup_cpu_power8)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
- bl __init_LPCR
+ li r4,0 /* LPES = 0 */
+ bl __init_LPCR_ISA206
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
@@ -80,7 +83,8 @@ _GLOBAL(__restore_cpu_power8)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
- bl __init_LPCR
+ li r4,0 /* LPES = 0 */
+ bl __init_LPCR_ISA206
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
@@ -99,11 +103,12 @@ _GLOBAL(__setup_cpu_power9)
mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
or r3, r3, r4
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
andc r3, r3, r4
- bl __init_LPCR
+ li r4,0 /* LPES = 0 */
+ bl __init_LPCR_ISA300
bl __init_HFSCR
bl __init_tlb_power9
bl __init_PMU_HV
@@ -122,11 +127,12 @@ _GLOBAL(__restore_cpu_power9)
mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
- LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
or r3, r3, r4
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
andc r3, r3, r4
- bl __init_LPCR
+ li r4,0 /* LPES = 0 */
+ bl __init_LPCR_ISA300
bl __init_HFSCR
bl __init_tlb_power9
bl __init_PMU_HV
@@ -144,9 +150,9 @@ __init_hvmode_206:
std r5,CPU_SPEC_FEATURES(r4)
blr
-__init_LPCR:
+__init_LPCR_ISA206:
/* Setup a sane LPCR:
- * Called with initial LPCR in R3
+ * Called with initial LPCR in R3 and desired LPES 2-bit value in R4
*
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
@@ -157,16 +163,18 @@ __init_LPCR:
*
* Other bits untouched for now
*/
- li r5,1
- rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
+ li r5,0x10
+ rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
+
+ /* POWER9 has no VRMASD */
+__init_LPCR_ISA300:
+ rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
li r5,4
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
clrrdi r3,r3,1 /* clear HDICE */
li r5,4
rldimi r3,r5, LPCR_VC_SH, 0
- li r5,0x10
- rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
mtspr SPRN_LPCR,r3
isync
blr
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 47b63de81f9b..cbabb5adccd9 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -43,8 +43,6 @@
#define IPI_TIMEOUT 10000
#define REAL_MODE_TIMEOUT 10000
-/* This keeps a track of which one is the crashing cpu. */
-int crashing_cpu = -1;
static int time_to_dump;
#define CRASH_HANDLER_MAX 3
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 2128f3a96c32..b6fe883b1016 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -20,18 +20,60 @@
#include <asm/kvm_ppc.h>
#ifdef CONFIG_SMP
-void doorbell_setup_this_cpu(void)
+
+/*
+ * Doorbells must only be used if CPU_FTR_DBELL is available.
+ * msgsnd is used in HV, and msgsndp is used in !HV.
+ *
+ * These should be used by platform code that is aware of restrictions.
+ * Other arch code should use ->cause_ipi.
+ *
+ * doorbell_global_ipi() sends a dbell to any target CPU.
+ * Must be used only by architectures that address msgsnd target
+ * by PIR/get_hard_smp_processor_id.
+ */
+void doorbell_global_ipi(int cpu)
{
- unsigned long tag = mfspr(SPRN_DOORBELL_CPUTAG) & PPC_DBELL_TAG_MASK;
+ u32 tag = get_hard_smp_processor_id(cpu);
- smp_muxed_ipi_set_data(smp_processor_id(), tag);
+ kvmppc_set_host_ipi(cpu, 1);
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
}
-void doorbell_cause_ipi(int cpu, unsigned long data)
+/*
+ * doorbell_core_ipi() sends a dbell to a target CPU in the same core.
+ * Must be used only by architectures that address msgsnd target
+ * by TIR/cpu_thread_in_core.
+ */
+void doorbell_core_ipi(int cpu)
{
+ u32 tag = cpu_thread_in_core(cpu);
+
+ kvmppc_set_host_ipi(cpu, 1);
/* Order previous accesses vs. msgsnd, which is treated as a store */
- mb();
- ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, data);
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
+}
+
+/*
+ * Attempt to cause a core doorbell if destination is on the same core.
+ * Returns 1 on success, 0 on failure.
+ */
+int doorbell_try_core_ipi(int cpu)
+{
+ int this_cpu = get_cpu();
+ int ret = 0;
+
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
+ doorbell_core_ipi(cpu);
+ ret = 1;
+ }
+
+ put_cpu();
+
+ return ret;
}
void doorbell_exception(struct pt_regs *regs)
@@ -40,12 +82,14 @@ void doorbell_exception(struct pt_regs *regs)
irq_enter();
+ ppc_msgsync();
+
may_hard_irq_enable();
kvmppc_set_host_ipi(smp_processor_id(), 0);
__this_cpu_inc(irq_stat.doorbell_irqs);
- smp_ipi_demux();
+ smp_ipi_demux_relaxed(); /* already performed the barrier */
irq_exit();
set_irq_regs(old_regs);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 9de7f79e702b..63992b2d8e15 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -37,7 +36,7 @@
#include <linux/of.h>
#include <linux/atomic.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index b94887165a10..c405c79e50cd 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -724,7 +724,16 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
*/
#define MAX_WAIT_FOR_RECOVERY 300
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+/**
+ * eeh_handle_normal_event - Handle EEH events on a specific PE
+ * @pe: EEH PE
+ *
+ * Attempts to recover the given PE. If recovery fails or the PE has failed
+ * too many times, remove the PE.
+ *
+ * Returns true if @pe should no longer be used, else false.
+ */
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
{
struct pci_bus *frozen_bus;
struct eeh_dev *edev, *tmp;
@@ -736,13 +745,18 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
if (!frozen_bus) {
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
- return;
+ return false;
}
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
- if (pe->freeze_count > eeh_max_freezes)
- goto excess_failures;
+ if (pe->freeze_count > eeh_max_freezes) {
+ pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
+ "last hour and has been permanently disabled.\n",
+ pe->phb->global_number, pe->addr,
+ pe->freeze_count);
+ goto hard_fail;
+ }
pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
pe->freeze_count);
@@ -870,27 +884,18 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Notify device driver to resume\n");
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
- return;
+ return false;
-excess_failures:
+hard_fail:
/*
* About 90% of all real-life EEH failures in the field
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
- pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
- "last hour and has been permanently disabled.\n"
- "Please try reseating or replacing it.\n",
- pe->phb->global_number, pe->addr,
- pe->freeze_count);
- goto perm_error;
-
-hard_fail:
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
-perm_error:
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Notify all devices that they're about to go down. */
@@ -915,10 +920,21 @@ perm_error:
pci_lock_rescan_remove();
pci_hp_remove_devices(frozen_bus);
pci_unlock_rescan_remove();
+
+ /* The passed PE should no longer be used */
+ return true;
}
}
+ return false;
}
+/**
+ * eeh_handle_special_event - Handle EEH events without a specific failing PE
+ *
+ * Called when an EEH event is detected but can't be narrowed down to a
+ * specific PE. Iterates through possible failures and handles them as
+ * necessary.
+ */
static void eeh_handle_special_event(void)
{
struct eeh_pe *pe, *phb_pe;
@@ -982,7 +998,14 @@ static void eeh_handle_special_event(void)
*/
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
- eeh_handle_normal_event(pe);
+ /*
+ * eeh_handle_normal_event() can make the PE stale if it
+ * determines that the PE cannot possibly be recovered.
+ * Don't modify the PE state if that's the case.
+ */
+ if (eeh_handle_normal_event(pe))
+ continue;
+
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
} else {
pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index a38600949f3a..8587059ad848 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,7 +31,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
-#include <asm/ftrace.h>
#include <asm/ptrace.h>
#include <asm/export.h>
@@ -1315,109 +1314,3 @@ machine_check_in_rtas:
/* XXX load up BATs and panic */
#endif /* CONFIG_PPC_RTAS */
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-_GLOBAL(mcount)
-_GLOBAL(_mcount)
- /*
- * It is required that _mcount on PPC32 must preserve the
- * link register. But we have r0 to play with. We use r0
- * to push the return address back to the caller of mcount
- * into the ctr register, restore the link register and
- * then jump back using the ctr register.
- */
- mflr r0
- mtctr r0
- lwz r0, 4(r1)
- mtlr r0
- bctr
-
-_GLOBAL(ftrace_caller)
- MCOUNT_SAVE_FRAME
- /* r3 ends up with link register */
- subi r3, r3, MCOUNT_INSN_SIZE
-.globl ftrace_call
-ftrace_call:
- bl ftrace_stub
- nop
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
- b ftrace_graph_stub
-_GLOBAL(ftrace_graph_stub)
-#endif
- MCOUNT_RESTORE_FRAME
- /* old link register ends up in ctr reg */
- bctr
-#else
-_GLOBAL(mcount)
-_GLOBAL(_mcount)
-
- MCOUNT_SAVE_FRAME
-
- subi r3, r3, MCOUNT_INSN_SIZE
- LOAD_REG_ADDR(r5, ftrace_trace_function)
- lwz r5,0(r5)
-
- mtctr r5
- bctrl
- nop
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- b ftrace_graph_caller
-#endif
- MCOUNT_RESTORE_FRAME
- bctr
-#endif
-EXPORT_SYMBOL(_mcount)
-
-_GLOBAL(ftrace_stub)
- blr
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-_GLOBAL(ftrace_graph_caller)
- /* load r4 with local address */
- lwz r4, 44(r1)
- subi r4, r4, MCOUNT_INSN_SIZE
-
- /* Grab the LR out of the caller stack frame */
- lwz r3,52(r1)
-
- bl prepare_ftrace_return
- nop
-
- /*
- * prepare_ftrace_return gives us the address we divert to.
- * Change the LR in the callers stack frame to this.
- */
- stw r3,52(r1)
-
- MCOUNT_RESTORE_FRAME
- /* old link register ends up in ctr reg */
- bctr
-
-_GLOBAL(return_to_handler)
- /* need to save return values */
- stwu r1, -32(r1)
- stw r3, 20(r1)
- stw r4, 16(r1)
- stw r31, 12(r1)
- mr r31, r1
-
- bl ftrace_return_to_handler
- nop
-
- /* return value has real return address */
- mtlr r3
-
- lwz r3, 20(r1)
- lwz r4, 16(r1)
- lwz r31,12(r1)
- lwz r1, 0(r1)
-
- /* Jump back to real return address */
- blr
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 767ef6d68c9e..bfbad08a1207 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -20,7 +20,6 @@
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/magic.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -33,7 +32,6 @@
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/irqflags.h>
-#include <asm/ftrace.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
#include <asm/tm.h>
@@ -1173,381 +1171,3 @@ _GLOBAL(enter_prom)
ld r0,16(r1)
mtlr r0
blr
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-_GLOBAL(mcount)
-_GLOBAL(_mcount)
-EXPORT_SYMBOL(_mcount)
- mflr r12
- mtctr r12
- mtlr r0
- bctr
-
-#ifndef CC_USING_MPROFILE_KERNEL
-_GLOBAL_TOC(ftrace_caller)
- /* Taken from output of objdump from lib64/glibc */
- mflr r3
- ld r11, 0(r1)
- stdu r1, -112(r1)
- std r3, 128(r1)
- ld r4, 16(r11)
- subi r3, r3, MCOUNT_INSN_SIZE
-.globl ftrace_call
-ftrace_call:
- bl ftrace_stub
- nop
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
- b ftrace_graph_stub
-_GLOBAL(ftrace_graph_stub)
-#endif
- ld r0, 128(r1)
- mtlr r0
- addi r1, r1, 112
-
-#else /* CC_USING_MPROFILE_KERNEL */
-/*
- *
- * ftrace_caller() is the function that replaces _mcount() when ftrace is
- * active.
- *
- * We arrive here after a function A calls function B, and we are the trace
- * function for B. When we enter r1 points to A's stack frame, B has not yet
- * had a chance to allocate one yet.
- *
- * Additionally r2 may point either to the TOC for A, or B, depending on
- * whether B did a TOC setup sequence before calling us.
- *
- * On entry the LR points back to the _mcount() call site, and r0 holds the
- * saved LR as it was on entry to B, ie. the original return address at the
- * call site in A.
- *
- * Our job is to save the register state into a struct pt_regs (on the stack)
- * and then arrange for the ftrace function to be called.
- */
-_GLOBAL(ftrace_caller)
- /* Save the original return address in A's stack frame */
- std r0,LRSAVE(r1)
-
- /* Create our stack frame + pt_regs */
- stdu r1,-SWITCH_FRAME_SIZE(r1)
-
- /* Save all gprs to pt_regs */
- SAVE_8GPRS(0,r1)
- SAVE_8GPRS(8,r1)
- SAVE_8GPRS(16,r1)
- SAVE_8GPRS(24,r1)
-
- /* Load special regs for save below */
- mfmsr r8
- mfctr r9
- mfxer r10
- mfcr r11
-
- /* Get the _mcount() call site out of LR */
- mflr r7
- /* Save it as pt_regs->nip & pt_regs->link */
- std r7, _NIP(r1)
- std r7, _LINK(r1)
-
- /* Save callee's TOC in the ABI compliant location */
- std r2, 24(r1)
- ld r2,PACATOC(r13) /* get kernel TOC in r2 */
-
- addis r3,r2,function_trace_op@toc@ha
- addi r3,r3,function_trace_op@toc@l
- ld r5,0(r3)
-
-#ifdef CONFIG_LIVEPATCH
- mr r14,r7 /* remember old NIP */
-#endif
- /* Calculate ip from nip-4 into r3 for call below */
- subi r3, r7, MCOUNT_INSN_SIZE
-
- /* Put the original return address in r4 as parent_ip */
- mr r4, r0
-
- /* Save special regs */
- std r8, _MSR(r1)
- std r9, _CTR(r1)
- std r10, _XER(r1)
- std r11, _CCR(r1)
-
- /* Load &pt_regs in r6 for call below */
- addi r6, r1 ,STACK_FRAME_OVERHEAD
-
- /* ftrace_call(r3, r4, r5, r6) */
-.globl ftrace_call
-ftrace_call:
- bl ftrace_stub
- nop
-
- /* Load ctr with the possibly modified NIP */
- ld r3, _NIP(r1)
- mtctr r3
-#ifdef CONFIG_LIVEPATCH
- cmpd r14,r3 /* has NIP been altered? */
-#endif
-
- /* Restore gprs */
- REST_8GPRS(0,r1)
- REST_8GPRS(8,r1)
- REST_8GPRS(16,r1)
- REST_8GPRS(24,r1)
-
- /* Restore callee's TOC */
- ld r2, 24(r1)
-
- /* Pop our stack frame */
- addi r1, r1, SWITCH_FRAME_SIZE
-
- /* Restore original LR for return to B */
- ld r0, LRSAVE(r1)
- mtlr r0
-
-#ifdef CONFIG_LIVEPATCH
- /* Based on the cmpd above, if the NIP was altered handle livepatch */
- bne- livepatch_handler
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- stdu r1, -112(r1)
-.globl ftrace_graph_call
-ftrace_graph_call:
- b ftrace_graph_stub
-_GLOBAL(ftrace_graph_stub)
- addi r1, r1, 112
-#endif
-
- ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
- mtlr r0
- bctr /* jump after _mcount site */
-#endif /* CC_USING_MPROFILE_KERNEL */
-
-_GLOBAL(ftrace_stub)
- blr
-
-#ifdef CONFIG_LIVEPATCH
- /*
- * This function runs in the mcount context, between two functions. As
- * such it can only clobber registers which are volatile and used in
- * function linkage.
- *
- * We get here when a function A, calls another function B, but B has
- * been live patched with a new function C.
- *
- * On entry:
- * - we have no stack frame and can not allocate one
- * - LR points back to the original caller (in A)
- * - CTR holds the new NIP in C
- * - r0 & r12 are free
- *
- * r0 can't be used as the base register for a DS-form load or store, so
- * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
- */
-livepatch_handler:
- CURRENT_THREAD_INFO(r12, r1)
-
- /* Save stack pointer into r0 */
- mr r0, r1
-
- /* Allocate 3 x 8 bytes */
- ld r1, TI_livepatch_sp(r12)
- addi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
-
- /* Save toc & real LR on livepatch stack */
- std r2, -24(r1)
- mflr r12
- std r12, -16(r1)
-
- /* Store stack end marker */
- lis r12, STACK_END_MAGIC@h
- ori r12, r12, STACK_END_MAGIC@l
- std r12, -8(r1)
-
- /* Restore real stack pointer */
- mr r1, r0
-
- /* Put ctr in r12 for global entry and branch there */
- mfctr r12
- bctrl
-
- /*
- * Now we are returning from the patched function to the original
- * caller A. We are free to use r0 and r12, and we can use r2 until we
- * restore it.
- */
-
- CURRENT_THREAD_INFO(r12, r1)
-
- /* Save stack pointer into r0 */
- mr r0, r1
-
- ld r1, TI_livepatch_sp(r12)
-
- /* Check stack marker hasn't been trashed */
- lis r2, STACK_END_MAGIC@h
- ori r2, r2, STACK_END_MAGIC@l
- ld r12, -8(r1)
-1: tdne r12, r2
- EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
-
- /* Restore LR & toc from livepatch stack */
- ld r12, -16(r1)
- mtlr r12
- ld r2, -24(r1)
-
- /* Pop livepatch stack frame */
- CURRENT_THREAD_INFO(r12, r0)
- subi r1, r1, 24
- std r1, TI_livepatch_sp(r12)
-
- /* Restore real stack pointer */
- mr r1, r0
-
- /* Return to original caller of live patched function */
- blr
-#endif
-
-
-#else
-_GLOBAL_TOC(_mcount)
-EXPORT_SYMBOL(_mcount)
- /* Taken from output of objdump from lib64/glibc */
- mflr r3
- ld r11, 0(r1)
- stdu r1, -112(r1)
- std r3, 128(r1)
- ld r4, 16(r11)
-
- subi r3, r3, MCOUNT_INSN_SIZE
- LOAD_REG_ADDR(r5,ftrace_trace_function)
- ld r5,0(r5)
- ld r5,0(r5)
- mtctr r5
- bctrl
- nop
-
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- b ftrace_graph_caller
-#endif
- ld r0, 128(r1)
- mtlr r0
- addi r1, r1, 112
-_GLOBAL(ftrace_stub)
- blr
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifndef CC_USING_MPROFILE_KERNEL
-_GLOBAL(ftrace_graph_caller)
- /* load r4 with local address */
- ld r4, 128(r1)
- subi r4, r4, MCOUNT_INSN_SIZE
-
- /* Grab the LR out of the caller stack frame */
- ld r11, 112(r1)
- ld r3, 16(r11)
-
- bl prepare_ftrace_return
- nop
-
- /*
- * prepare_ftrace_return gives us the address we divert to.
- * Change the LR in the callers stack frame to this.
- */
- ld r11, 112(r1)
- std r3, 16(r11)
-
- ld r0, 128(r1)
- mtlr r0
- addi r1, r1, 112
- blr
-
-#else /* CC_USING_MPROFILE_KERNEL */
-_GLOBAL(ftrace_graph_caller)
- /* with -mprofile-kernel, parameter regs are still alive at _mcount */
- std r10, 104(r1)
- std r9, 96(r1)
- std r8, 88(r1)
- std r7, 80(r1)
- std r6, 72(r1)
- std r5, 64(r1)
- std r4, 56(r1)
- std r3, 48(r1)
-
- /* Save callee's TOC in the ABI compliant location */
- std r2, 24(r1)
- ld r2, PACATOC(r13) /* get kernel TOC in r2 */
-
- mfctr r4 /* ftrace_caller has moved local addr here */
- std r4, 40(r1)
- mflr r3 /* ftrace_caller has restored LR from stack */
- subi r4, r4, MCOUNT_INSN_SIZE
-
- bl prepare_ftrace_return
- nop
-
- /*
- * prepare_ftrace_return gives us the address we divert to.
- * Change the LR to this.
- */
- mtlr r3
-
- ld r0, 40(r1)
- mtctr r0
- ld r10, 104(r1)
- ld r9, 96(r1)
- ld r8, 88(r1)
- ld r7, 80(r1)
- ld r6, 72(r1)
- ld r5, 64(r1)
- ld r4, 56(r1)
- ld r3, 48(r1)
-
- /* Restore callee's TOC */
- ld r2, 24(r1)
-
- addi r1, r1, 112
- mflr r0
- std r0, LRSAVE(r1)
- bctr
-#endif /* CC_USING_MPROFILE_KERNEL */
-
-_GLOBAL(return_to_handler)
- /* need to save return values */
- std r4, -32(r1)
- std r3, -24(r1)
- /* save TOC */
- std r2, -16(r1)
- std r31, -8(r1)
- mr r31, r1
- stdu r1, -112(r1)
-
- /*
- * We might be called from a module.
- * Switch to our TOC to run inside the core kernel.
- */
- ld r2, PACATOC(r13)
-
- bl ftrace_return_to_handler
- nop
-
- /* return value has real return address */
- mtlr r3
-
- ld r1, 0(r1)
- ld r4, -32(r1)
- ld r3, -24(r1)
- ld r2, -16(r1)
- ld r31, -8(r1)
-
- /* Jump back to real return address */
- blr
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6353019966e6..a9312b52fe6f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -116,9 +116,11 @@ EXC_VIRT_NONE(0x4000, 0x100)
EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
SET_SCRATCH0(r13)
- GET_PACA(r13)
- clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
- EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
+ /*
+ * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
+ * being used, so a nested NMI exception would corrupt it.
+ */
+ EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common, EXC_STD,
IDLETEST, 0x100)
EXC_REAL_END(system_reset, 0x100, 0x100)
@@ -126,34 +128,37 @@ EXC_VIRT_NONE(0x4100, 0x100)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
-BEGIN_FTR_SECTION
- GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- bl pnv_restore_hyp_resource
+ b pnv_powersave_wakeup
+#endif
- li r0,PNV_THREAD_RUNNING
- stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
+EXC_COMMON_BEGIN(system_reset_common)
+ /*
+ * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
+ * to recover, but nested NMI will notice in_nmi and not recover
+ * because of the use of the NMI stack. in_nmi reentrancy is tested in
+ * system_reset_exception.
+ */
+ lhz r10,PACA_IN_NMI(r13)
+ addi r10,r10,1
+ sth r10,PACA_IN_NMI(r13)
+ li r10,MSR_RI
+ mtmsrd r10,1
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- li r0,KVM_HWTHREAD_IN_KERNEL
- stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* Order setting hwthread_state vs. testing hwthread_req */
- sync
- lbz r0,HSTATE_HWTHREAD_REQ(r13)
- cmpwi r0,0
- beq 1f
- BRANCH_TO_KVM(r10, kvm_start_guest)
-1:
-#endif
+ mr r10,r1
+ ld r1,PACA_NMI_EMERG_SP(r13)
+ subi r1,r1,INT_FRAME_SIZE
+ EXCEPTION_COMMON_NORET_STACK(PACA_EXNMI, 0x100,
+ system_reset, system_reset_exception,
+ ADD_NVGPRS;ADD_RECONCILE)
- /* Return SRR1 from power7_nap() */
- mfspr r3,SPRN_SRR1
- blt cr3,2f
- b pnv_wakeup_loss
-2: b pnv_wakeup_noloss
-#endif
+ /*
+ * The stack is no longer in use, decrement in_nmi.
+ */
+ lhz r10,PACA_IN_NMI(r13)
+ subi r10,r10,1
+ sth r10,PACA_IN_NMI(r13)
-EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
+ b ret_from_except
#ifdef CONFIG_PPC_PSERIES
/*
@@ -161,8 +166,9 @@ EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
SET_SCRATCH0(r13) /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
+ /* See comment at system_reset exception */
+ EXCEPTION_PROLOG_PSERIES_NORI(PACA_EXNMI, system_reset_common,
+ EXC_STD, NOTEST, 0x100)
#endif /* CONFIG_PPC_PSERIES */
@@ -172,14 +178,6 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
* vector
*/
SET_SCRATCH0(r13) /* save r13 */
- /*
- * Running native on arch 2.06 or later, we may wakeup from winkle
- * inside machine check. If yes, then last bit of HSPRG0 would be set
- * to 1. Hence clear it unconditionally.
- */
- GET_PACA(r13)
- clrrdi r13,r13,1
- SET_PACA(r13)
EXCEPTION_PROLOG_0(PACA_EXMC)
BEGIN_FTR_SECTION
b machine_check_powernv_early
@@ -212,6 +210,12 @@ BEGIN_FTR_SECTION
* NOTE: We are here with MSR_ME=0 (off), which means we risk a
* checkstop if we get another machine check exception before we do
* rfid with MSR_ME=1.
+ *
+ * This interrupt can wake directly from idle. If that is the case,
+ * the machine check is handled then the idle wakeup code is called
+ * to restore state. In that case, the POWER9 DD1 idle PACA workaround
+ * is not applied in the early machine check code, which will cause
+ * bugs.
*/
mr r11,r1 /* Save r1 */
lhz r10,PACA_IN_MCE(r13)
@@ -268,20 +272,11 @@ machine_check_fwnmi:
machine_check_pSeries_0:
EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
/*
- * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
- * difference that MSR_RI is not enabled, because PACA_EXMC is being
- * used, so nested machine check corrupts it. machine_check_common
- * enables MSR_RI.
+ * MSR_RI is not enabled, because PACA_EXMC is being used, so a
+ * nested machine check corrupts it. machine_check_common enables
+ * MSR_RI.
*/
- ld r10,PACAKMSR(r13)
- xori r10,r10,MSR_RI
- mfspr r11,SPRN_SRR0
- LOAD_HANDLER(r12, machine_check_common)
- mtspr SPRN_SRR0,r12
- mfspr r12,SPRN_SRR1
- mtspr SPRN_SRR1,r10
- rfid
- b . /* prevent speculative execution */
+ EXCEPTION_PROLOG_PSERIES_1_NORI(machine_check_common, EXC_STD)
TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
@@ -340,6 +335,37 @@ EXC_COMMON_BEGIN(machine_check_common)
/* restore original r1. */ \
ld r1,GPR1(r1)
+#ifdef CONFIG_PPC_P7_NAP
+/*
+ * This is an idle wakeup. Low level machine check has already been
+ * done. Queue the event then call the idle code to do the wake up.
+ */
+EXC_COMMON_BEGIN(machine_check_idle_common)
+ bl machine_check_queue_event
+
+ /*
+ * We have not used any non-volatile GPRs here, and as a rule
+ * most exception code including machine check does not.
+ * Therefore PACA_NAPSTATELOST does not need to be set. Idle
+ * wakeup will restore volatile registers.
+ *
+ * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
+ *
+ * Then decrement MCE nesting after finishing with the stack.
+ */
+ ld r3,_MSR(r1)
+
+ lhz r11,PACA_IN_MCE(r13)
+ subi r11,r11,1
+ sth r11,PACA_IN_MCE(r13)
+
+ /* Turn off the RI bit because SRR1 is used by idle wakeup code. */
+ /* Recoverability could be improved by reducing the use of SRR1. */
+ li r11,0
+ mtmsrd r11,1
+
+ b pnv_powersave_wakeup_mce
+#endif
/*
* Handle machine check early in real mode. We come here with
* ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
@@ -352,6 +378,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
+
#ifdef CONFIG_PPC_P7_NAP
/*
* Check if thread was in power saving mode. We come here when any
@@ -362,48 +389,14 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*
* Go back to nap/sleep/winkle mode again if (b) is true.
*/
- rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
- beq 4f /* No, it wasn;t */
- /* Thread was in power saving mode. Go back to nap again. */
- cmpwi r11,2
- blt 3f
- /* Supervisor/Hypervisor state loss */
- li r0,1
- stb r0,PACA_NAPSTATELOST(r13)
-3: bl machine_check_queue_event
- MACHINE_CHECK_HANDLER_WINDUP
- GET_PACA(r13)
- ld r1,PACAR1(r13)
- /*
- * Check what idle state this CPU was in and go back to same mode
- * again.
- */
- lbz r3,PACA_THREAD_IDLE_STATE(r13)
- cmpwi r3,PNV_THREAD_NAP
- bgt 10f
- IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
- /* No return */
-10:
- cmpwi r3,PNV_THREAD_SLEEP
- bgt 2f
- IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
- /* No return */
-
-2:
- /*
- * Go back to winkle. Please note that this thread was woken up in
- * machine check from winkle and have not restored the per-subcore
- * state. Hence before going back to winkle, set last bit of HSPRG0
- * to 1. This will make sure that if this thread gets woken up
- * again at reset vector 0x100 then it will get chance to restore
- * the subcore state.
- */
- ori r13,r13,1
- SET_PACA(r13)
- IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
- /* No return */
+ BEGIN_FTR_SECTION
+ rlwinm. r11,r12,47-31,30,31
+ beq- 4f
+ BRANCH_TO_COMMON(r10, machine_check_idle_common)
4:
+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
+
/*
* Check if we are coming from hypervisor userspace. If yes then we
* continue in host kernel in V mode to deliver the MC event.
@@ -968,17 +961,12 @@ EXC_VIRT_NONE(0x4e60, 0x20)
TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
TRAMP_REAL_BEGIN(hmi_exception_early)
EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
- mr r10,r1 /* Save r1 */
- ld r1,PACAEMERGSP(r13) /* Use emergency stack */
+ mr r10,r1 /* Save r1 */
+ ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- std r9,_CCR(r1) /* save CR in stackframe */
mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
- std r11,_NIP(r1) /* save HSRR0 in stackframe */
- mfspr r12,SPRN_HSRR1 /* Save SRR1 */
- std r12,_MSR(r1) /* save SRR1 in stackframe */
- std r10,0(r1) /* make stack chain pointer */
- std r0,GPR0(r1) /* save r0 in stackframe */
- std r10,GPR1(r1) /* save r1 in stackframe */
+ mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
+ EXCEPTION_PROLOG_COMMON_1()
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
EXCEPTION_PROLOG_COMMON_3(0xe60)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 8ff0dd4e77a7..466569e26278 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -30,17 +30,16 @@
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/delay.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <asm/debugfs.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
-#include <asm/debug.h>
#include <asm/setup.h>
static struct fw_dump fw_dump;
@@ -210,14 +209,20 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
*/
static inline unsigned long fadump_calculate_reserve_size(void)
{
- unsigned long size;
+ int ret;
+ unsigned long long base, size;
/*
- * Check if the size is specified through fadump_reserve_mem= cmdline
- * option. If yes, then use that.
+ * Check if the size is specified through crashkernel= cmdline
+ * option. If yes, then use that but ignore base as fadump
+ * reserves memory at end of RAM.
*/
- if (fw_dump.reserve_bootvar)
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &size, &base);
+ if (ret == 0 && size > 0) {
+ fw_dump.reserve_bootvar = (unsigned long)size;
return fw_dump.reserve_bootvar;
+ }
/* divide by 20 to get 5% of value */
size = memblock_end_of_DRAM() / 20;
@@ -319,15 +324,34 @@ int __init fadump_reserve_mem(void)
pr_debug("fadumphdr_addr = %p\n",
(void *) fw_dump.fadumphdr_addr);
} else {
- /* Reserve the memory at the top of memory. */
size = get_fadump_area_size();
- base = memory_boundary - size;
- memblock_reserve(base, size);
- printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
- "for firmware-assisted dump\n",
- (unsigned long)(size >> 20),
- (unsigned long)(base >> 20));
+
+ /*
+ * Reserve memory at an offset closer to bottom of the RAM to
+ * minimize the impact of memory hot-remove operation. We can't
+ * use memblock_find_in_range() here since it doesn't allocate
+ * from bottom to top.
+ */
+ for (base = fw_dump.boot_memory_size;
+ base <= (memory_boundary - size);
+ base += size) {
+ if (memblock_is_region_memory(base, size) &&
+ !memblock_is_region_reserved(base, size))
+ break;
+ }
+ if ((base > (memory_boundary - size)) ||
+ memblock_reserve(base, size)) {
+ pr_err("Failed to reserve memory\n");
+ return 0;
+ }
+
+ pr_info("Reserved %ldMB of memory at %ldMB for firmware-"
+ "assisted dump (System RAM: %ldMB)\n",
+ (unsigned long)(size >> 20),
+ (unsigned long)(base >> 20),
+ (unsigned long)(memblock_phys_mem_size() >> 20));
}
+
fw_dump.reserve_dump_area_start = base;
fw_dump.reserve_dump_area_size = size;
return 1;
@@ -353,15 +377,6 @@ static int __init early_fadump_param(char *p)
}
early_param("fadump", early_fadump_param);
-/* Look for fadump_reserve_mem= cmdline option */
-static int __init early_fadump_reserve_mem(char *p)
-{
- if (p)
- fw_dump.reserve_bootvar = memparse(p, &p);
- return 0;
-}
-early_param("fadump_reserve_mem", early_fadump_reserve_mem);
-
static void register_fw_dump(struct fadump_mem_struct *fdm)
{
int rc;
@@ -509,34 +524,6 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
return reg_entry;
}
-static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type,
- void *data, size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) + 3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void fadump_final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
{
struct elf_prstatus prstatus;
@@ -547,8 +534,8 @@ static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* prstatus.pr_pid = ????
*/
elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
- buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
- &prstatus, sizeof(prstatus));
+ buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ &prstatus, sizeof(prstatus));
return buf;
}
@@ -689,7 +676,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
}
}
- fadump_final_note(note_buf);
+ final_note(note_buf);
if (fdh) {
pr_debug("Updating elfcore header (%llx) with cpu notes\n",
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
deleted file mode 100644
index 5c9f50c1aa99..000000000000
--- a/arch/powerpc/kernel/ftrace.c
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Code for replacing ftrace calls with jumps.
- *
- * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
- *
- * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
- *
- * Added function graph tracer code, taken from x86 that was written
- * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
- *
- */
-
-#define pr_fmt(fmt) "ftrace-powerpc: " fmt
-
-#include <linux/spinlock.h>
-#include <linux/hardirq.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/ftrace.h>
-#include <linux/percpu.h>
-#include <linux/init.h>
-#include <linux/list.h>
-
-#include <asm/cacheflush.h>
-#include <asm/code-patching.h>
-#include <asm/ftrace.h>
-#include <asm/syscall.h>
-
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-static unsigned int
-ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
-{
- unsigned int op;
-
- addr = ppc_function_entry((void *)addr);
-
- /* if (link) set op to 'bl' else 'b' */
- op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
-
- return op;
-}
-
-static int
-ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
-{
- unsigned int replaced;
-
- /*
- * Note:
- * We are paranoid about modifying text, as if a bug was to happen, it
- * could cause us to read or write to someplace that could cause harm.
- * Carefully read and modify the code with probe_kernel_*(), and make
- * sure what we read is what we expected it to be before modifying it.
- */
-
- /* read the text we want to modify */
- if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure it is what we expect it to be */
- if (replaced != old) {
- pr_err("%p: replaced (%#x) != old (%#x)",
- (void *)ip, replaced, old);
- return -EINVAL;
- }
-
- /* replace the text with the new text */
- if (patch_instruction((unsigned int *)ip, new))
- return -EPERM;
-
- return 0;
-}
-
-/*
- * Helper functions that are the same for both PPC64 and PPC32.
- */
-static int test_24bit_addr(unsigned long ip, unsigned long addr)
-{
- addr = ppc_function_entry((void *)addr);
-
- /* use the create_branch to verify that this offset can be branched */
- return create_branch((unsigned int *)ip, addr, 0);
-}
-
-#ifdef CONFIG_MODULES
-
-static int is_bl_op(unsigned int op)
-{
- return (op & 0xfc000003) == 0x48000001;
-}
-
-static unsigned long find_bl_target(unsigned long ip, unsigned int op)
-{
- static int offset;
-
- offset = (op & 0x03fffffc);
- /* make it signed */
- if (offset & 0x02000000)
- offset |= 0xfe000000;
-
- return ip + (long)offset;
-}
-
-#ifdef CONFIG_PPC64
-static int
-__ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long entry, ptr, tramp;
- unsigned long ip = rec->ip;
- unsigned int op, pop;
-
- /* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
- pr_err("Fetching opcode failed.\n");
- return -EFAULT;
- }
-
- /* Make sure that that this is still a 24bit jump */
- if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
- return -EINVAL;
- }
-
- /* lets find where the pointer goes */
- tramp = find_bl_target(ip, op);
-
- pr_devel("ip:%lx jumps to %lx", ip, tramp);
-
- if (module_trampoline_target(mod, tramp, &ptr)) {
- pr_err("Failed to get trampoline target\n");
- return -EFAULT;
- }
-
- pr_devel("trampoline target %lx", ptr);
-
- entry = ppc_global_function_entry((void *)addr);
- /* This should match what was called */
- if (ptr != entry) {
- pr_err("addr %lx does not match expected %lx\n", ptr, entry);
- return -EINVAL;
- }
-
-#ifdef CC_USING_MPROFILE_KERNEL
- /* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
-
- if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
-
- /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
- pr_err("Unexpected instruction %08x around bl _mcount\n", op);
- return -EINVAL;
- }
-#else
- /*
- * Our original call site looks like:
- *
- * bl <tramp>
- * ld r2,XX(r1)
- *
- * Milton Miller pointed out that we can not simply nop the branch.
- * If a task was preempted when calling a trace function, the nops
- * will remove the way to restore the TOC in r2 and the r2 TOC will
- * get corrupted.
- *
- * Use a b +8 to jump over the load.
- */
-
- pop = PPC_INST_BRANCH | 8; /* b +8 */
-
- /*
- * Check what is in the next instruction. We can see ld r2,40(r1), but
- * on first pass after boot we will see mflr r0.
- */
- if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
- pr_err("Fetching op failed.\n");
- return -EFAULT;
- }
-
- if (op != PPC_INST_LD_TOC) {
- pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
- return -EINVAL;
- }
-#endif /* CC_USING_MPROFILE_KERNEL */
-
- if (patch_instruction((unsigned int *)ip, pop)) {
- pr_err("Patching NOP failed.\n");
- return -EPERM;
- }
-
- return 0;
-}
-
-#else /* !PPC64 */
-static int
-__ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned int op;
- unsigned int jmp[4];
- unsigned long ip = rec->ip;
- unsigned long tramp;
-
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure that that this is still a 24bit jump */
- if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
- return -EINVAL;
- }
-
- /* lets find where the pointer goes */
- tramp = find_bl_target(ip, op);
-
- /*
- * On PPC32 the trampoline looks like:
- * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
- * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
- * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
- * 0x4e, 0x80, 0x04, 0x20 bctr
- */
-
- pr_devel("ip:%lx jumps to %lx", ip, tramp);
-
- /* Find where the trampoline jumps to */
- if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
- pr_err("Failed to read %lx\n", tramp);
- return -EFAULT;
- }
-
- pr_devel(" %08x %08x ", jmp[0], jmp[1]);
-
- /* verify that this is what we expect it to be */
- if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
- ((jmp[1] & 0xffff0000) != 0x398c0000) ||
- (jmp[2] != 0x7d8903a6) ||
- (jmp[3] != 0x4e800420)) {
- pr_err("Not a trampoline\n");
- return -EINVAL;
- }
-
- tramp = (jmp[1] & 0xffff) |
- ((jmp[0] & 0xffff) << 16);
- if (tramp & 0x8000)
- tramp -= 0x10000;
-
- pr_devel(" %lx ", tramp);
-
- if (tramp != addr) {
- pr_err("Trampoline location %08lx does not match addr\n",
- tramp);
- return -EINVAL;
- }
-
- op = PPC_INST_NOP;
-
- if (patch_instruction((unsigned int *)ip, op))
- return -EPERM;
-
- return 0;
-}
-#endif /* PPC64 */
-#endif /* CONFIG_MODULES */
-
-int ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long ip = rec->ip;
- unsigned int old, new;
-
- /*
- * If the calling address is more that 24 bits away,
- * then we had to use a trampoline to make the call.
- * Otherwise just update the call site.
- */
- if (test_24bit_addr(ip, addr)) {
- /* within range */
- old = ftrace_call_replace(ip, addr, 1);
- new = PPC_INST_NOP;
- return ftrace_modify_code(ip, old, new);
- }
-
-#ifdef CONFIG_MODULES
- /*
- * Out of range jumps are called from modules.
- * We should either already have a pointer to the module
- * or it has been passed in.
- */
- if (!rec->arch.mod) {
- if (!mod) {
- pr_err("No module loaded addr=%lx\n", addr);
- return -EFAULT;
- }
- rec->arch.mod = mod;
- } else if (mod) {
- if (mod != rec->arch.mod) {
- pr_err("Record mod %p not equal to passed in mod %p\n",
- rec->arch.mod, mod);
- return -EINVAL;
- }
- /* nothing to do if mod == rec->arch.mod */
- } else
- mod = rec->arch.mod;
-
- return __ftrace_make_nop(mod, rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
-}
-
-#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
-/*
- * Examine the existing instructions for __ftrace_make_call.
- * They should effectively be a NOP, and follow formal constraints,
- * depending on the ABI. Return false if they don't.
- */
-#ifndef CC_USING_MPROFILE_KERNEL
-static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
-{
- /*
- * We expect to see:
- *
- * b +8
- * ld r2,XX(r1)
- *
- * The load offset is different depending on the ABI. For simplicity
- * just mask it out when doing the compare.
- */
- if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
- return 0;
- return 1;
-}
-#else
-static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
-{
- /* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (op0 != PPC_INST_NOP)
- return 0;
- return 1;
-}
-#endif
-
-static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned int op[2];
- void *ip = (void *)rec->ip;
-
- /* read where this goes */
- if (probe_kernel_read(op, ip, sizeof(op)))
- return -EFAULT;
-
- if (!expected_nop_sequence(ip, op[0], op[1])) {
- pr_err("Unexpected call sequence at %p: %x %x\n",
- ip, op[0], op[1]);
- return -EINVAL;
- }
-
- /* If we never set up a trampoline to ftrace_caller, then bail */
- if (!rec->arch.mod->arch.tramp) {
- pr_err("No ftrace trampoline\n");
- return -EINVAL;
- }
-
- /* Ensure branch is within 24 bits */
- if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
- if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
- pr_err("REL24 out of range!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
- unsigned long addr)
-{
- return ftrace_make_call(rec, addr);
-}
-#endif
-
-#else /* !CONFIG_PPC64: */
-static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned int op;
- unsigned long ip = rec->ip;
-
- /* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* It should be pointing to a nop */
- if (op != PPC_INST_NOP) {
- pr_err("Expected NOP but have %x\n", op);
- return -EINVAL;
- }
-
- /* If we never set up a trampoline to ftrace_caller, then bail */
- if (!rec->arch.mod->arch.tramp) {
- pr_err("No ftrace trampoline\n");
- return -EINVAL;
- }
-
- /* create the branch to the trampoline */
- op = create_branch((unsigned int *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
- if (!op) {
- pr_err("REL24 out of range!\n");
- return -EINVAL;
- }
-
- pr_devel("write to %lx\n", rec->ip);
-
- if (patch_instruction((unsigned int *)ip, op))
- return -EPERM;
-
- return 0;
-}
-#endif /* CONFIG_PPC64 */
-#endif /* CONFIG_MODULES */
-
-int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
-{
- unsigned long ip = rec->ip;
- unsigned int old, new;
-
- /*
- * If the calling address is more that 24 bits away,
- * then we had to use a trampoline to make the call.
- * Otherwise just update the call site.
- */
- if (test_24bit_addr(ip, addr)) {
- /* within range */
- old = PPC_INST_NOP;
- new = ftrace_call_replace(ip, addr, 1);
- return ftrace_modify_code(ip, old, new);
- }
-
-#ifdef CONFIG_MODULES
- /*
- * Out of range jumps are called from modules.
- * Being that we are converting from nop, it had better
- * already have a module defined.
- */
- if (!rec->arch.mod) {
- pr_err("No module loaded\n");
- return -EINVAL;
- }
-
- return __ftrace_make_call(rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
-}
-
-int ftrace_update_ftrace_func(ftrace_func_t func)
-{
- unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned int old, new;
- int ret;
-
- old = *(unsigned int *)&ftrace_call;
- new = ftrace_call_replace(ip, (unsigned long)func, 1);
- ret = ftrace_modify_code(ip, old, new);
-
- return ret;
-}
-
-static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
-{
- unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
- int ret;
-
- ret = ftrace_update_record(rec, enable);
-
- switch (ret) {
- case FTRACE_UPDATE_IGNORE:
- return 0;
- case FTRACE_UPDATE_MAKE_CALL:
- return ftrace_make_call(rec, ftrace_addr);
- case FTRACE_UPDATE_MAKE_NOP:
- return ftrace_make_nop(NULL, rec, ftrace_addr);
- }
-
- return 0;
-}
-
-void ftrace_replace_code(int enable)
-{
- struct ftrace_rec_iter *iter;
- struct dyn_ftrace *rec;
- int ret;
-
- for (iter = ftrace_rec_iter_start(); iter;
- iter = ftrace_rec_iter_next(iter)) {
- rec = ftrace_rec_iter_record(iter);
- ret = __ftrace_replace_code(rec, enable);
- if (ret) {
- ftrace_bug(ret, rec);
- return;
- }
- }
-}
-
-/*
- * Use the default ftrace_modify_all_code, but without
- * stop_machine().
- */
-void arch_ftrace_update_code(int command)
-{
- ftrace_modify_all_code(command);
-}
-
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-extern void ftrace_graph_call(void);
-extern void ftrace_graph_stub(void);
-
-int ftrace_enable_ftrace_graph_caller(void)
-{
- unsigned long ip = (unsigned long)(&ftrace_graph_call);
- unsigned long addr = (unsigned long)(&ftrace_graph_caller);
- unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
-
- old = ftrace_call_replace(ip, stub, 0);
- new = ftrace_call_replace(ip, addr, 0);
-
- return ftrace_modify_code(ip, old, new);
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- unsigned long ip = (unsigned long)(&ftrace_graph_call);
- unsigned long addr = (unsigned long)(&ftrace_graph_caller);
- unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
-
- old = ftrace_call_replace(ip, addr, 0);
- new = ftrace_call_replace(ip, stub, 0);
-
- return ftrace_modify_code(ip, old, new);
-}
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
-/*
- * Hook the return address and push it in the stack of return addrs
- * in current thread info. Return the address we want to divert to.
- */
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
-{
- struct ftrace_graph_ent trace;
- unsigned long return_hooker;
-
- if (unlikely(ftrace_graph_is_dead()))
- goto out;
-
- if (unlikely(atomic_read(&current->tracing_graph_pause)))
- goto out;
-
- return_hooker = ppc_function_entry(return_to_handler);
-
- trace.func = ip;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- goto out;
-
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
- NULL) == -EBUSY)
- goto out;
-
- parent = return_hooker;
-out:
- return parent;
-}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
-unsigned long __init arch_syscall_addr(int nr)
-{
- return sys_call_table[nr*2];
-}
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-
-#ifdef PPC64_ELF_ABI_v1
-char *arch_ftrace_match_adjust(char *str, const char *search)
-{
- if (str[0] == '.' && search[0] != '.')
- return str + 1;
- else
- return str;
-}
-#endif /* PPC64_ELF_ABI_v1 */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 1607be7c0ef2..e22734278458 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -735,11 +735,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
-
- .globl mol_trampoline
- .set mol_trampoline, i0x2f00
- EXPORT_SYMBOL(mol_trampoline)
+ EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_EE)
. = 0x3000
@@ -1278,16 +1274,6 @@ EXPORT_SYMBOL(empty_zero_page)
swapper_pg_dir:
.space PGD_TABLE_SIZE
- .globl intercept_table
-intercept_table:
- .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
- .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
- .long 0, 0, 0, i0x1300, 0, 0, 0, 0
- .long 0, 0, 0, 0, 0, 0, 0, 0
- .long 0, 0, 0, 0, 0, 0, 0, 0
- .long 0, 0, 0, 0, 0, 0, 0, 0
-EXPORT_SYMBOL(intercept_table)
-
/* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 1dc5eae2ced3..0ddc602b33a4 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -949,7 +949,8 @@ start_here_multiplatform:
LOAD_REG_ADDR(r3,init_thread_union)
/* set up a stack pointer */
- addi r1,r3,THREAD_SIZE
+ LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
+ add r1,r3,r1
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 6fd08219248d..07d4e0ad60db 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -20,6 +20,7 @@
#include <asm/kvm_book3s_asm.h>
#include <asm/opal.h>
#include <asm/cpuidle.h>
+#include <asm/exception-64s.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/mmu.h>
@@ -94,12 +95,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
core_idle_lock_held:
HMT_LOW
3: lwz r15,0(r14)
- andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
- bne core_idle_lock_held
+ andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
+ bne- core_idle_lock_held
blr
/*
@@ -113,7 +114,7 @@ core_idle_lock_held:
*
* Address to 'rfid' to in r5
*/
-_GLOBAL(pnv_powersave_common)
+pnv_powersave_common:
/* Use r3 to pass state nap/sleep/winkle */
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
@@ -188,8 +189,8 @@ pnv_enter_arch207_idle_mode:
/* The following store to HSTATE_HWTHREAD_STATE(r13) */
/* MUST occur in real mode, i.e. with the MMU off, */
/* and the MMU must stay off until we clear this flag */
- /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
- /* reset interrupt vector in exceptions-64s.S. */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in */
+ /* pnv_powersave_wakeup in this file. */
/* The reason is that another thread can switch the */
/* MMU to a guest context whenever this flag is set */
/* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
@@ -209,15 +210,20 @@ pnv_enter_arch207_idle_mode:
/* Sleep or winkle */
lbz r7,PACA_THREAD_MASK(r13)
ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+ li r5,0
+ beq cr3,3f
+ lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
+3:
lwarx_loop1:
lwarx r15,0,r14
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
- bnel core_idle_lock_held
+ andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
+ bnel- core_idle_lock_held
+ add r15,r15,r5 /* Add if winkle */
andc r15,r15,r7 /* Clear thread bit */
- andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+ andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
/*
* If cr0 = 0, then current thread is the last thread of the core entering
@@ -240,7 +246,7 @@ common_enter: /* common code for all the threads entering sleep or winkle */
IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
fastsleep_workaround_at_entry:
- ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
stwcx. r15,0,r14
bne- lwarx_loop1
isync
@@ -250,10 +256,10 @@ fastsleep_workaround_at_entry:
li r4,1
bl opal_config_cpu_idle_state
- /* Clear Lock bit */
- li r0,0
+ /* Unlock */
+ xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
lwsync
- stw r0,0(r14)
+ stw r15,0(r14)
b common_enter
enter_winkle:
@@ -301,8 +307,8 @@ power_enter_stop:
lwarx_loop_stop:
lwarx r15,0,r14
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
- bnel core_idle_lock_held
+ andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
+ bnel- core_idle_lock_held
andc r15,r15,r7 /* Clear thread bit */
stwcx. r15,0,r14
@@ -375,17 +381,113 @@ _GLOBAL(power9_idle_stop)
li r4,1
b pnv_powersave_common
/* No return */
+
/*
- * Called from reset vector. Check whether we have woken up with
- * hypervisor state loss. If yes, restore hypervisor state and return
- * back to reset vector.
+ * On waking up from stop 0,1,2 with ESL=1 on POWER9 DD1,
+ * HSPRG0 will be set to the HSPRG0 value of one of the
+ * threads in this core. Thus the value we have in r13
+ * may not be this thread's paca pointer.
+ *
+ * Fortunately, the TIR remains invariant. Since this thread's
+ * paca pointer is recorded in all its sibling's paca, we can
+ * correctly recover this thread's paca pointer if we
+ * know the index of this thread in the core.
+ *
+ * This index can be obtained from the TIR.
*
- * r13 - Contents of HSPRG0
+ * i.e, thread's position in the core = TIR.
+ * If this value is i, then this thread's paca is
+ * paca->thread_sibling_pacas[i].
+ */
+power9_dd1_recover_paca:
+ mfspr r4, SPRN_TIR
+ /*
+ * Since each entry in thread_sibling_pacas is 8 bytes
+ * we need to left-shift by 3 bits. Thus r4 = i * 8
+ */
+ sldi r4, r4, 3
+ /* Get &paca->thread_sibling_pacas[0] in r5 */
+ ld r5, PACA_SIBLING_PACA_PTRS(r13)
+ /* Load paca->thread_sibling_pacas[i] into r13 */
+ ldx r13, r4, r5
+ SET_PACA(r13)
+ /*
+ * Indicate that we have lost NVGPR state
+ * which needs to be restored from the stack.
+ */
+ li r3, 1
+ stb r0,PACA_NAPSTATELOST(r13)
+ blr
+
+/*
+ * Called from machine check handler for powersave wakeups.
+ * Low level machine check processing has already been done. Now just
+ * go through the wake up path to get everything in order.
+ *
+ * r3 - The original SRR1 value.
+ * Original SRR[01] have been clobbered.
+ * MSR_RI is clear.
+ */
+.global pnv_powersave_wakeup_mce
+pnv_powersave_wakeup_mce:
+ /* Set cr3 for pnv_powersave_wakeup */
+ rlwinm r11,r3,47-31,30,31
+ cmpwi cr3,r11,2
+
+ /*
+ * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
+ * reason into SRR1, which allows reuse of the system reset wakeup
+ * code without being mistaken for another type of wakeup.
+ */
+ oris r3,r3,SRR1_WAKEMCE_RESVD@h
+ mtspr SPRN_SRR1,r3
+
+ b pnv_powersave_wakeup
+
+/*
+ * Called from reset vector for powersave wakeups.
* cr3 - set to gt if waking up with partial/complete hypervisor state loss
*/
-_GLOBAL(pnv_restore_hyp_resource)
+.global pnv_powersave_wakeup
+pnv_powersave_wakeup:
+ ld r2, PACATOC(r13)
+
BEGIN_FTR_SECTION
- ld r2,PACATOC(r13);
+BEGIN_FTR_SECTION_NESTED(70)
+ bl power9_dd1_recover_paca
+END_FTR_SECTION_NESTED_IFSET(CPU_FTR_POWER9_DD1, 70)
+ bl pnv_restore_hyp_resource_arch300
+FTR_SECTION_ELSE
+ bl pnv_restore_hyp_resource_arch207
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
+
+ li r0,PNV_THREAD_RUNNING
+ stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beq 1f
+ b kvm_start_guest
+1:
+#endif
+
+ /* Return SRR1 from power7_nap() */
+ mfspr r3,SPRN_SRR1
+ blt cr3,pnv_wakeup_noloss
+ b pnv_wakeup_loss
+
+/*
+ * Check whether we have woken up with hypervisor state loss.
+ * If yes, restore hypervisor state and return back to link.
+ *
+ * cr3 - set to gt if waking up with partial/complete hypervisor state loss
+ */
+pnv_restore_hyp_resource_arch300:
/*
* POWER ISA 3. Use PSSCR to determine if we
* are waking up from deep idle state
@@ -400,31 +502,19 @@ BEGIN_FTR_SECTION
*/
rldicl r5,r5,4,60
cmpd cr4,r5,r4
- bge cr4,pnv_wakeup_tb_loss
- /*
- * Waking up without hypervisor state loss. Return to
- * reset vector
- */
- blr
+ bge cr4,pnv_wakeup_tb_loss /* returns to caller */
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ blr /* Waking up without hypervisor state loss. */
+/* Same calling convention as arch300 */
+pnv_restore_hyp_resource_arch207:
/*
* POWER ISA 2.07 or less.
- * Check if last bit of HSPGR0 is set. This indicates whether we are
- * waking up from winkle.
+ * Check if we slept with sleep or winkle.
*/
- clrldi r5,r13,63
- clrrdi r13,r13,1
-
- /* Now that we are sure r13 is corrected, load TOC */
- ld r2,PACATOC(r13);
- cmpwi cr4,r5,1
- mtspr SPRN_HSPRG0,r13
-
- lbz r0,PACA_THREAD_IDLE_STATE(r13)
- cmpwi cr2,r0,PNV_THREAD_NAP
- bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
+ lbz r4,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr2,r4,PNV_THREAD_NAP
+ bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
/*
* We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
@@ -433,8 +523,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*/
bgt cr3,.
- blr /* Return back to System Reset vector from where
- pnv_restore_hyp_resource was invoked */
+ blr /* Waking up without hypervisor state loss */
/*
* Called if waking up from idle state which can cause either partial or
@@ -444,9 +533,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*
* r13 - PACA
* cr3 - gt if waking up with partial/complete hypervisor state loss
+ *
+ * If ISA300:
* cr4 - gt or eq if waking up from complete hypervisor state loss.
+ *
+ * If ISA207:
+ * r4 - PACA_THREAD_IDLE_STATE
*/
-_GLOBAL(pnv_wakeup_tb_loss)
+pnv_wakeup_tb_loss:
ld r1,PACAR1(r13)
/*
* Before entering any idle state, the NVGPRs are saved in the stack.
@@ -473,18 +567,19 @@ _GLOBAL(pnv_wakeup_tb_loss)
* is required to return back to reset vector after hypervisor state
* restore is complete.
*/
+ mr r18,r4
mflr r17
mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- lbz r7,PACA_THREAD_MASK(r13)
ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
-lwarx_loop2:
- lwarx r15,0,r14
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ lbz r7,PACA_THREAD_MASK(r13)
+
/*
+ * Take the core lock to synchronize against other threads.
+ *
* Lock bit is set in one of the 2 cases-
* a. In the sleep/winkle enter path, the last thread is executing
* fastsleep workaround code.
@@ -492,23 +587,93 @@ lwarx_loop2:
* workaround undo code or resyncing timebase or restoring context
* In either case loop until the lock bit is cleared.
*/
- bnel core_idle_lock_held
+1:
+ lwarx r15,0,r14
+ andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
+ bnel- core_idle_lock_held
+ oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
+ stwcx. r15,0,r14
+ bne- 1b
+ isync
- cmpwi cr2,r15,0
+ andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
+ cmpwi cr2,r9,0
/*
* At this stage
* cr2 - eq if first thread to wakeup in core
* cr3- gt if waking up with partial/complete hypervisor state loss
+ * ISA300:
* cr4 - gt or eq if waking up from complete hypervisor state loss.
*/
- ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
- stwcx. r15,0,r14
- bne- lwarx_loop2
- isync
-
BEGIN_FTR_SECTION
+ /*
+ * Were we in winkle?
+ * If yes, check if all threads were in winkle, decrement our
+ * winkle count, set all thread winkle bits if all were in winkle.
+ * Check if our thread has a winkle bit set, and set cr4 accordingly
+ * (to match ISA300, above). Pseudo-code for core idle state
+ * transitions for ISA207 is as follows (everything happens atomically
+ * due to store conditional and/or lock bit):
+ *
+ * nap_idle() { }
+ * nap_wake() { }
+ *
+ * sleep_idle()
+ * {
+ * core_idle_state &= ~thread_in_core
+ * }
+ *
+ * sleep_wake()
+ * {
+ * bool first_in_core, first_in_subcore;
+ *
+ * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
+ * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
+ *
+ * core_idle_state |= thread_in_core;
+ * }
+ *
+ * winkle_idle()
+ * {
+ * core_idle_state &= ~thread_in_core;
+ * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
+ * }
+ *
+ * winkle_wake()
+ * {
+ * bool first_in_core, first_in_subcore, winkle_state_lost;
+ *
+ * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
+ * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
+ *
+ * core_idle_state |= thread_in_core;
+ *
+ * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
+ * core_idle_state |= THREAD_WINKLE_BITS;
+ * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
+ *
+ * winkle_state_lost = core_idle_state &
+ * (thread_in_core << WINKLE_THREAD_SHIFT);
+ * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
+ * }
+ *
+ */
+ cmpwi r18,PNV_THREAD_WINKLE
+ bne 2f
+ andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
+ subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
+ beq 2f
+ ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
+2:
+ /* Shift thread bit to winkle mask, then test if this thread is set,
+ * and remove it from the winkle bits */
+ slwi r8,r7,8
+ and r8,r8,r15
+ andc r15,r15,r8
+ cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
+
lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
and r4,r4,r15
cmpwi r4,0 /* Check if first in subcore */
@@ -593,7 +758,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_WORC,r4
clear_lock:
- andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
+ xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
lwsync
stw r15,0(r14)
@@ -651,8 +816,7 @@ hypervisor_state_restored:
mtspr SPRN_SRR1,r16
mtlr r17
- blr /* Return back to System Reset vector from where
- pnv_restore_hyp_resource was invoked */
+ blr /* return to pnv_powersave_wakeup */
fastsleep_workaround_at_exit:
li r3,1
@@ -664,7 +828,8 @@ fastsleep_workaround_at_exit:
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(pnv_wakeup_loss)
+.global pnv_wakeup_loss
+pnv_wakeup_loss:
ld r1,PACAR1(r13)
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
@@ -684,7 +849,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(pnv_wakeup_noloss)
+pnv_wakeup_noloss:
lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0
bne pnv_wakeup_loss
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5f202a566ec5..f2b724cd9e64 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -711,13 +711,16 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
return tbl;
}
-void iommu_free_table(struct iommu_table *tbl, const char *node_name)
+static void iommu_table_free(struct kref *kref)
{
unsigned long bitmap_sz;
unsigned int order;
+ struct iommu_table *tbl;
- if (!tbl)
- return;
+ tbl = container_of(kref, struct iommu_table, it_kref);
+
+ if (tbl->it_ops->free)
+ tbl->it_ops->free(tbl);
if (!tbl->it_map) {
kfree(tbl);
@@ -733,7 +736,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
/* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size))
- pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
+ pr_warn("%s: Unexpected TCEs\n", __func__);
/* calculate bitmap size in bytes */
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
@@ -746,6 +749,24 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
kfree(tbl);
}
+struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
+{
+ if (kref_get_unless_zero(&tbl->it_kref))
+ return tbl;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iommu_tce_table_get);
+
+int iommu_tce_table_put(struct iommu_table *tbl)
+{
+ if (WARN_ON(!tbl))
+ return 0;
+
+ return kref_put(&tbl->it_kref, iommu_table_free);
+}
+EXPORT_SYMBOL_GPL(iommu_tce_table_put);
+
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
@@ -942,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
}
EXPORT_SYMBOL_GPL(iommu_flush_tce);
-int iommu_tce_clear_param_check(struct iommu_table *tbl,
- unsigned long ioba, unsigned long tce_value,
- unsigned long npages)
+int iommu_tce_check_ioba(unsigned long page_shift,
+ unsigned long offset, unsigned long size,
+ unsigned long ioba, unsigned long npages)
{
- /* tbl->it_ops->clear() does not support any value but 0 */
- if (tce_value)
- return -EINVAL;
+ unsigned long mask = (1UL << page_shift) - 1;
- if (ioba & ~IOMMU_PAGE_MASK(tbl))
+ if (ioba & mask)
return -EINVAL;
- ioba >>= tbl->it_page_shift;
- if (ioba < tbl->it_offset)
+ ioba >>= page_shift;
+ if (ioba < offset)
return -EINVAL;
- if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
+ if ((ioba + 1) > (offset + size))
return -EINVAL;
return 0;
}
-EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
+EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
-int iommu_tce_put_param_check(struct iommu_table *tbl,
- unsigned long ioba, unsigned long tce)
+int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
{
- if (tce & ~IOMMU_PAGE_MASK(tbl))
- return -EINVAL;
-
- if (ioba & ~IOMMU_PAGE_MASK(tbl))
- return -EINVAL;
+ unsigned long mask = (1UL << page_shift) - 1;
- ioba >>= tbl->it_page_shift;
- if (ioba < tbl->it_offset)
- return -EINVAL;
-
- if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
+ if (gpa & mask)
return -EINVAL;
return 0;
}
-EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
+EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction)
@@ -1004,6 +1014,31 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
+#ifdef CONFIG_PPC_BOOK3S_64
+long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
+ unsigned long *hpa, enum dma_data_direction *direction)
+{
+ long ret;
+
+ ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+ if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+ (*direction == DMA_BIDIRECTIONAL))) {
+ struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
+
+ if (likely(pg)) {
+ SetPageDirty(pg);
+ } else {
+ tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+ ret = -EFAULT;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
+#endif
+
int iommu_take_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a018f5cae899..5c291df30fe3 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -65,7 +65,6 @@
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
-#include <asm/debug.h>
#include <asm/livepatch.h>
#include <asm/asm-prototypes.h>
@@ -442,46 +441,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
return sum;
}
-#ifdef CONFIG_HOTPLUG_CPU
-void migrate_irqs(void)
-{
- struct irq_desc *desc;
- unsigned int irq;
- static int warned;
- cpumask_var_t mask;
- const struct cpumask *map = cpu_online_mask;
-
- alloc_cpumask_var(&mask, GFP_KERNEL);
-
- for_each_irq_desc(irq, desc) {
- struct irq_data *data;
- struct irq_chip *chip;
-
- data = irq_desc_get_irq_data(desc);
- if (irqd_is_per_cpu(data))
- continue;
-
- chip = irq_data_get_irq_chip(data);
-
- cpumask_and(mask, irq_data_get_affinity_mask(data), map);
- if (cpumask_any(mask) >= nr_cpu_ids) {
- pr_warn("Breaking affinity for irq %i\n", irq);
- cpumask_copy(mask, map);
- }
- if (chip->irq_set_affinity)
- chip->irq_set_affinity(data, mask, true);
- else if (desc->action && !(warned++))
- pr_err("Cannot set affinity for irq %i\n", irq);
- }
-
- free_cpumask_var(mask);
-
- local_irq_enable();
- mdelay(1);
- local_irq_disable();
-}
-#endif
-
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
new file mode 100644
index 000000000000..6c089d9757c9
--- /dev/null
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -0,0 +1,104 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+static nokprobe_inline
+int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, unsigned long orig_nip)
+{
+ /*
+ * Emulate singlestep (and also recover regs->nip)
+ * as if there is a nop
+ */
+ regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ __this_cpu_write(current_kprobe, NULL);
+ if (orig_nip)
+ regs->nip = orig_nip;
+ return 1;
+}
+
+int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (kprobe_ftrace(p))
+ return __skip_singlestep(p, regs, kcb, 0);
+ else
+ return 0;
+}
+NOKPROBE_SYMBOL(skip_singlestep);
+
+/* Ftrace callback handler for kprobes */
+void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+ unsigned long flags;
+
+ /* Disable irq for emulating a breakpoint and avoiding preempt */
+ local_irq_save(flags);
+ hard_irq_disable();
+
+ p = get_kprobe((kprobe_opcode_t *)nip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto end;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_nip = regs->nip;
+
+ /*
+ * On powerpc, NIP is *before* this instruction for the
+ * pre handler
+ */
+ regs->nip -= MCOUNT_INSN_SIZE;
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs))
+ __skip_singlestep(p, regs, kcb, orig_nip);
+ /*
+ * If pre_handler returns !0, it sets regs->nip and
+ * resets current kprobe.
+ */
+ }
+end:
+ local_irq_restore(flags);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ p->ainsn.boostable = -1;
+ return 0;
+}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index fce05a38851c..160ae0fa7d0d 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -35,6 +35,7 @@
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
+#include <asm/sections.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -42,7 +43,86 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+ return (addr >= (unsigned long)__kprobes_text_start &&
+ addr < (unsigned long)__kprobes_text_end) ||
+ (addr >= (unsigned long)_stext &&
+ addr < (unsigned long)__head_end);
+}
+
+kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
+{
+ kprobe_opcode_t *addr;
+
+#ifdef PPC64_ELF_ABI_v2
+ /* PPC64 ABIv2 needs local entry point */
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+ if (addr && !offset) {
+#ifdef CONFIG_KPROBES_ON_FTRACE
+ unsigned long faddr;
+ /*
+ * Per livepatch.h, ftrace location is always within the first
+ * 16 bytes of a function on powerpc with -mprofile-kernel.
+ */
+ faddr = ftrace_location_range((unsigned long)addr,
+ (unsigned long)addr + 16);
+ if (faddr)
+ addr = (kprobe_opcode_t *)faddr;
+ else
+#endif
+ addr = (kprobe_opcode_t *)ppc_function_entry(addr);
+ }
+#elif defined(PPC64_ELF_ABI_v1)
+ /*
+ * 64bit powerpc ABIv1 uses function descriptors:
+ * - Check for the dot variant of the symbol first.
+ * - If that fails, try looking up the symbol provided.
+ *
+ * This ensures we always get to the actual symbol and not
+ * the descriptor.
+ *
+ * Also handle <module:symbol> format.
+ */
+ char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
+ const char *modsym;
+ bool dot_appended = false;
+ if ((modsym = strchr(name, ':')) != NULL) {
+ modsym++;
+ if (*modsym != '\0' && *modsym != '.') {
+ /* Convert to <module:.symbol> */
+ strncpy(dot_name, name, modsym - name);
+ dot_name[modsym - name] = '.';
+ dot_name[modsym - name + 1] = '\0';
+ strncat(dot_name, modsym,
+ sizeof(dot_name) - (modsym - name) - 2);
+ dot_appended = true;
+ } else {
+ dot_name[0] = '\0';
+ strncat(dot_name, name, sizeof(dot_name) - 1);
+ }
+ } else if (name[0] != '.') {
+ dot_name[0] = '.';
+ dot_name[1] = '\0';
+ strncat(dot_name, name, KSYM_NAME_LEN - 2);
+ dot_appended = true;
+ } else {
+ dot_name[0] = '\0';
+ strncat(dot_name, name, KSYM_NAME_LEN - 1);
+ }
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
+ if (!addr && dot_appended) {
+ /* Let's try the original non-dot symbol lookup */
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+ }
+#else
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+#endif
+
+ return addr;
+}
+
+int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
kprobe_opcode_t insn = *p->addr;
@@ -74,30 +154,34 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.boostable = 0;
return ret;
}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
-void __kprobes arch_arm_kprobe(struct kprobe *p)
+void arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
-void __kprobes arch_disarm_kprobe(struct kprobe *p)
+void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long) p->addr,
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
-void __kprobes arch_remove_kprobe(struct kprobe *p)
+void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
enable_single_step(regs);
@@ -110,37 +194,80 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->nip = (unsigned long)p->ainsn.insn;
}
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
}
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
}
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_msr = regs->msr;
}
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
+bool arch_function_offset_within_entry(unsigned long offset)
+{
+#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_KPROBES_ON_FTRACE
+ return offset <= 16;
+#else
+ return offset <= 8;
+#endif
+#else
+ return !offset;
+#endif
+}
+
+void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->link;
/* Replace the return addr with trampoline addr */
regs->link = (unsigned long)kretprobe_trampoline;
}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
-int __kprobes kprobe_handler(struct pt_regs *regs)
+int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
+{
+ int ret;
+ unsigned int insn = *p->ainsn.insn;
+
+ /* regs->nip is also adjusted if emulate_step returns 1 */
+ ret = emulate_step(regs, insn);
+ if (ret > 0) {
+ /*
+ * Once this instruction has been boosted
+ * successfully, set the boostable flag
+ */
+ if (unlikely(p->ainsn.boostable == 0))
+ p->ainsn.boostable = 1;
+ } else if (ret < 0) {
+ /*
+ * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
+ * So, we should never get here... but, its still
+ * good to catch them, just in case...
+ */
+ printk("Can't step on instruction %x\n", insn);
+ BUG();
+ } else if (ret == 0)
+ /* This instruction can't be boosted */
+ p->ainsn.boostable = -1;
+
+ return ret;
+}
+NOKPROBE_SYMBOL(try_to_emulate);
+
+int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
@@ -177,10 +304,17 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
- kcb->kprobe_saved_msr = regs->msr;
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
+ if (p->ainsn.boostable >= 0) {
+ ret = try_to_emulate(p, regs);
+
+ if (ret > 0) {
+ restore_previous_kprobe(kcb);
+ return 1;
+ }
+ }
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -197,7 +331,9 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
}
p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
- goto ss_probe;
+ if (!skip_singlestep(p, regs, kcb))
+ goto ss_probe;
+ ret = 1;
}
}
goto no_kprobe;
@@ -235,18 +371,9 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
ss_probe:
if (p->ainsn.boostable >= 0) {
- unsigned int insn = *p->ainsn.insn;
+ ret = try_to_emulate(p, regs);
- /* regs->nip is also adjusted if emulate_step returns 1 */
- ret = emulate_step(regs, insn);
if (ret > 0) {
- /*
- * Once this instruction has been boosted
- * successfully, set the boostable flag
- */
- if (unlikely(p->ainsn.boostable == 0))
- p->ainsn.boostable = 1;
-
if (p->post_handler)
p->post_handler(p, regs, 0);
@@ -254,17 +381,7 @@ ss_probe:
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
- } else if (ret < 0) {
- /*
- * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
- * So, we should never get here... but, its still
- * good to catch them, just in case...
- */
- printk("Can't step on instruction %x\n", insn);
- BUG();
- } else if (ret == 0)
- /* This instruction can't be boosted */
- p->ainsn.boostable = -1;
+ }
}
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
@@ -274,6 +391,7 @@ no_kprobe:
preempt_enable_no_resched();
return ret;
}
+NOKPROBE_SYMBOL(kprobe_handler);
/*
* Function return probe trampoline:
@@ -291,8 +409,7 @@ asm(".global kretprobe_trampoline\n"
/*
* Called when the probe at kretprobe trampoline is hit
*/
-static int __kprobes trampoline_probe_handler(struct kprobe *p,
- struct pt_regs *regs)
+static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
@@ -361,6 +478,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
*/
return 1;
}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
/*
* Called after single-stepping. p->addr is the address of the
@@ -370,7 +488,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
-int __kprobes kprobe_post_handler(struct pt_regs *regs)
+int kprobe_post_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -410,8 +528,9 @@ out:
return 1;
}
+NOKPROBE_SYMBOL(kprobe_post_handler);
-int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -474,13 +593,15 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}
return 0;
}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
unsigned long arch_deref_entry_point(void *entry)
{
return ppc_global_function_entry(entry);
}
+NOKPROBE_SYMBOL(arch_deref_entry_point);
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -497,17 +618,20 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
return 1;
}
+NOKPROBE_SYMBOL(setjmp_pre_handler);
-void __used __kprobes jprobe_return(void)
+void __used jprobe_return(void)
{
asm volatile("trap" ::: "memory");
}
+NOKPROBE_SYMBOL(jprobe_return);
-static void __used __kprobes jprobe_return_end(void)
+static void __used jprobe_return_end(void)
{
-};
+}
+NOKPROBE_SYMBOL(jprobe_return_end);
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -520,6 +644,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
preempt_enable_no_resched();
return 1;
}
+NOKPROBE_SYMBOL(longjmp_break_handler);
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
@@ -531,10 +656,11 @@ int __init arch_init_kprobes(void)
return register_kprobe(&trampoline_p);
}
-int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+int arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
return 1;
return 0;
}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a1475e6aef3a..5f9eada3519b 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -221,6 +221,8 @@ static void machine_check_process_queued_event(struct irq_work *work)
{
int index;
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
/*
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
@@ -228,12 +230,13 @@ static void machine_check_process_queued_event(struct irq_work *work)
while (__this_cpu_read(mce_queue_count) > 0) {
index = __this_cpu_read(mce_queue_count) - 1;
machine_check_print_event_info(
- this_cpu_ptr(&mce_event_queue[index]));
+ this_cpu_ptr(&mce_event_queue[index]), false);
__this_cpu_dec(mce_queue_count);
}
}
-void machine_check_print_event_info(struct machine_check_event *evt)
+void machine_check_print_event_info(struct machine_check_event *evt,
+ bool user_mode)
{
const char *level, *sevstr, *subtype;
static const char *mc_ue_types[] = {
@@ -310,7 +313,16 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
evt->disposition == MCE_DISPOSITION_RECOVERED ?
- "Recovered" : "[Not recovered");
+ "Recovered" : "Not recovered");
+
+ if (user_mode) {
+ printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
+ evt->srr0, current->pid, current->comm);
+ } else {
+ printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
+ (void *)evt->srr0);
+ }
+
printk("%s Initiator: %s\n", level,
evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
switch (evt->error_type) {
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 763d6f58caa8..f913139bb0c2 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -72,10 +72,14 @@ void __flush_tlb_power8(unsigned int action)
void __flush_tlb_power9(unsigned int action)
{
+ unsigned int num_sets;
+
if (radix_enabled())
- flush_tlb_206(POWER9_TLB_SETS_RADIX, action);
+ num_sets = POWER9_TLB_SETS_RADIX;
+ else
+ num_sets = POWER9_TLB_SETS_HASH;
- flush_tlb_206(POWER9_TLB_SETS_HASH, action);
+ flush_tlb_206(num_sets, action);
}
@@ -147,159 +151,365 @@ static int mce_flush(int what)
return 0;
}
-static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
-{
- if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
- dsisr &= ~slb;
- if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
- dsisr &= ~erat;
- if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
- dsisr &= ~tlb;
- /* Any other errors we don't understand? */
- if (dsisr)
- return 0;
- return 1;
-}
-
-static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
+#define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
+
+struct mce_ierror_table {
+ unsigned long srr1_mask;
+ unsigned long srr1_value;
+ bool nip_valid; /* nip is a valid indicator of faulting address */
+ unsigned int error_type;
+ unsigned int error_subtype;
+ unsigned int initiator;
+ unsigned int severity;
+};
+
+static const struct mce_ierror_table mce_p7_ierror_table[] = {
+{ 0x00000000001c0000, 0x0000000000040000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x0000000000080000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x00000000000c0000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x0000000000100000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x0000000000140000, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x0000000000180000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000001c0000, 0x00000000001c0000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, 0, 0, 0, 0, 0 } };
+
+static const struct mce_ierror_table mce_p8_ierror_table[] = {
+{ 0x00000000081c0000, 0x0000000000040000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000080000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x00000000000c0000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000100000, true,
+ MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000140000, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000180000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x00000000001c0000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008000000, true,
+ MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008040000, true,
+ MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, 0, 0, 0, 0, 0 } };
+
+static const struct mce_ierror_table mce_p9_ierror_table[] = {
+{ 0x00000000081c0000, 0x0000000000040000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000080000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x00000000000c0000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000100000, true,
+ MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000140000, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000000180000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008000000, true,
+ MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008040000, true,
+ MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x00000000080c0000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008100000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x0000000008140000, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
+{ 0x00000000081c0000, 0x0000000008180000, false,
+ MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
+{ 0x00000000081c0000, 0x00000000081c0000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, 0, 0, 0, 0, 0 } };
+
+struct mce_derror_table {
+ unsigned long dsisr_value;
+ bool dar_valid; /* dar is a valid indicator of faulting address */
+ unsigned int error_type;
+ unsigned int error_subtype;
+ unsigned int initiator;
+ unsigned int severity;
+};
+
+static const struct mce_derror_table mce_p7_derror_table[] = {
+{ 0x00008000, false,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00004000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000800, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000400, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000100, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000040, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, false, 0, 0, 0, 0 } };
+
+static const struct mce_derror_table mce_p8_derror_table[] = {
+{ 0x00008000, false,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00004000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00002000, true,
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00001000, true,
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000800, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000400, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000200, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000100, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, false, 0, 0, 0, 0 } };
+
+static const struct mce_derror_table mce_p9_derror_table[] = {
+{ 0x00008000, false,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00004000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00002000, true,
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00001000, true,
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000800, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000400, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000200, false,
+ MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000100, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000040, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000020, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000010, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000008, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0, false, 0, 0, 0, 0 } };
+
+static int mce_handle_ierror(struct pt_regs *regs,
+ const struct mce_ierror_table table[],
+ struct mce_error_info *mce_err, uint64_t *addr)
{
- long handled = 1;
+ uint64_t srr1 = regs->msr;
+ int handled = 0;
+ int i;
+
+ *addr = 0;
+
+ for (i = 0; table[i].srr1_mask; i++) {
+ if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
+ continue;
+
+ /* attempt to correct the error */
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_SLB:
+ handled = mce_flush(MCE_FLUSH_SLB);
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ handled = mce_flush(MCE_FLUSH_ERAT);
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ handled = mce_flush(MCE_FLUSH_TLB);
+ break;
+ }
- /*
- * flush and reload SLBs for SLB errors and flush TLBs for TLB errors.
- * reset the error bits whenever we handle them so that at the end
- * we can check whether we handled all of them or not.
- * */
-#ifdef CONFIG_PPC_STD_MMU_64
- if (dsisr & slb_error_bits) {
- flush_and_reload_slb();
- /* reset error bits */
- dsisr &= ~(slb_error_bits);
- }
- if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
- if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
- /* reset error bits */
- dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
+ /* now fill in mce_error_info */
+ mce_err->error_type = table[i].error_type;
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_UE:
+ mce_err->u.ue_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ mce_err->u.slb_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ mce_err->u.erat_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ mce_err->u.tlb_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_USER:
+ mce_err->u.user_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ mce_err->u.ra_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ mce_err->u.link_error_type = table[i].error_subtype;
+ break;
+ }
+ mce_err->severity = table[i].severity;
+ mce_err->initiator = table[i].initiator;
+ if (table[i].nip_valid)
+ *addr = regs->nip;
+ return handled;
}
-#endif
- /* Any other errors we don't understand? */
- if (dsisr & 0xffffffffUL)
- handled = 0;
- return handled;
-}
+ mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->initiator = MCE_INITIATOR_CPU;
-static long mce_handle_derror_p7(uint64_t dsisr)
-{
- return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS);
+ return 0;
}
-static long mce_handle_common_ierror(uint64_t srr1)
+static int mce_handle_derror(struct pt_regs *regs,
+ const struct mce_derror_table table[],
+ struct mce_error_info *mce_err, uint64_t *addr)
{
- long handled = 0;
-
- switch (P7_SRR1_MC_IFETCH(srr1)) {
- case 0:
- break;
-#ifdef CONFIG_PPC_STD_MMU_64
- case P7_SRR1_MC_IFETCH_SLB_PARITY:
- case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
- /* flush and reload SLBs for SLB errors. */
- flush_and_reload_slb();
- handled = 1;
- break;
- case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
- if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
- cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
- handled = 1;
+ uint64_t dsisr = regs->dsisr;
+ int handled = 0;
+ int found = 0;
+ int i;
+
+ *addr = 0;
+
+ for (i = 0; table[i].dsisr_value; i++) {
+ if (!(dsisr & table[i].dsisr_value))
+ continue;
+
+ /* attempt to correct the error */
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_SLB:
+ if (mce_flush(MCE_FLUSH_SLB))
+ handled = 1;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ if (mce_flush(MCE_FLUSH_ERAT))
+ handled = 1;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ if (mce_flush(MCE_FLUSH_TLB))
+ handled = 1;
+ break;
}
- break;
-#endif
- default:
- break;
- }
- return handled;
-}
-
-static long mce_handle_ierror_p7(uint64_t srr1)
-{
- long handled = 0;
-
- handled = mce_handle_common_ierror(srr1);
+ /*
+ * Attempt to handle multiple conditions, but only return
+ * one. Ensure uncorrectable errors are first in the table
+ * to match.
+ */
+ if (found)
+ continue;
+
+ /* now fill in mce_error_info */
+ mce_err->error_type = table[i].error_type;
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_UE:
+ mce_err->u.ue_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_SLB:
+ mce_err->u.slb_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ mce_err->u.erat_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ mce_err->u.tlb_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_USER:
+ mce_err->u.user_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ mce_err->u.ra_error_type = table[i].error_subtype;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ mce_err->u.link_error_type = table[i].error_subtype;
+ break;
+ }
+ mce_err->severity = table[i].severity;
+ mce_err->initiator = table[i].initiator;
+ if (table[i].dar_valid)
+ *addr = regs->dar;
-#ifdef CONFIG_PPC_STD_MMU_64
- if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
- flush_and_reload_slb();
- handled = 1;
+ found = 1;
}
-#endif
- return handled;
-}
-static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1)
-{
- switch (P7_SRR1_MC_IFETCH(srr1)) {
- case P7_SRR1_MC_IFETCH_SLB_PARITY:
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
- break;
- case P7_SRR1_MC_IFETCH_SLB_MULTIHIT:
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
- break;
- case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
- mce_err->error_type = MCE_ERROR_TYPE_TLB;
- mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
- break;
- case P7_SRR1_MC_IFETCH_UE:
- case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL:
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
- break;
- case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD:
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type =
- MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
- break;
- }
-}
+ if (found)
+ return handled;
-static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1)
-{
- mce_get_common_ierror(mce_err, srr1);
- if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
- }
-}
+ mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->initiator = MCE_INITIATOR_CPU;
-static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr)
-{
- if (dsisr & P7_DSISR_MC_UE) {
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
- } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) {
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type =
- MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
- } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) {
- mce_err->error_type = MCE_ERROR_TYPE_ERAT;
- mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
- } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
- } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
- } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
- mce_err->error_type = MCE_ERROR_TYPE_TLB;
- mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
- } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
- }
+ return 0;
}
static long mce_handle_ue_error(struct pt_regs *regs)
@@ -320,292 +530,42 @@ static long mce_handle_ue_error(struct pt_regs *regs)
return handled;
}
-long __machine_check_early_realmode_p7(struct pt_regs *regs)
+static long mce_handle_error(struct pt_regs *regs,
+ const struct mce_derror_table dtable[],
+ const struct mce_ierror_table itable[])
{
- uint64_t srr1, nip, addr;
- long handled = 1;
- struct mce_error_info mce_error_info = { 0 };
-
- mce_error_info.severity = MCE_SEV_ERROR_SYNC;
- mce_error_info.initiator = MCE_INITIATOR_CPU;
-
- srr1 = regs->msr;
- nip = regs->nip;
+ struct mce_error_info mce_err = { 0 };
+ uint64_t addr;
+ uint64_t srr1 = regs->msr;
+ long handled;
- /*
- * Handle memory errors depending whether this was a load/store or
- * ifetch exception. Also, populate the mce error_type and
- * type-specific error_type from either SRR1 or DSISR, depending
- * whether this was a load/store or ifetch exception
- */
- if (P7_SRR1_MC_LOADSTORE(srr1)) {
- handled = mce_handle_derror_p7(regs->dsisr);
- mce_get_derror_p7(&mce_error_info, regs->dsisr);
- addr = regs->dar;
- } else {
- handled = mce_handle_ierror_p7(srr1);
- mce_get_ierror_p7(&mce_error_info, srr1);
- addr = regs->nip;
- }
+ if (SRR1_MC_LOADSTORE(srr1))
+ handled = mce_handle_derror(regs, dtable, &mce_err, &addr);
+ else
+ handled = mce_handle_ierror(regs, itable, &mce_err, &addr);
- /* Handle UE error. */
- if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+ if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
handled = mce_handle_ue_error(regs);
- save_mce_event(regs, handled, &mce_error_info, nip, addr);
- return handled;
-}
-
-static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1)
-{
- mce_get_common_ierror(mce_err, srr1);
- if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
- mce_err->error_type = MCE_ERROR_TYPE_ERAT;
- mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
- }
-}
-
-static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr)
-{
- mce_get_derror_p7(mce_err, dsisr);
- if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) {
- mce_err->error_type = MCE_ERROR_TYPE_ERAT;
- mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
- }
-}
-
-static long mce_handle_ierror_p8(uint64_t srr1)
-{
- long handled = 0;
+ save_mce_event(regs, handled, &mce_err, regs->nip, addr);
- handled = mce_handle_common_ierror(srr1);
-
-#ifdef CONFIG_PPC_STD_MMU_64
- if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) {
- flush_and_reload_slb();
- handled = 1;
- }
-#endif
return handled;
}
-static long mce_handle_derror_p8(uint64_t dsisr)
-{
- return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS);
-}
-
-long __machine_check_early_realmode_p8(struct pt_regs *regs)
-{
- uint64_t srr1, nip, addr;
- long handled = 1;
- struct mce_error_info mce_error_info = { 0 };
-
- mce_error_info.severity = MCE_SEV_ERROR_SYNC;
- mce_error_info.initiator = MCE_INITIATOR_CPU;
-
- srr1 = regs->msr;
- nip = regs->nip;
-
- if (P7_SRR1_MC_LOADSTORE(srr1)) {
- handled = mce_handle_derror_p8(regs->dsisr);
- mce_get_derror_p8(&mce_error_info, regs->dsisr);
- addr = regs->dar;
- } else {
- handled = mce_handle_ierror_p8(srr1);
- mce_get_ierror_p8(&mce_error_info, srr1);
- addr = regs->nip;
- }
-
- /* Handle UE error. */
- if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
- handled = mce_handle_ue_error(regs);
-
- save_mce_event(regs, handled, &mce_error_info, nip, addr);
- return handled;
-}
-
-static int mce_handle_derror_p9(struct pt_regs *regs)
-{
- uint64_t dsisr = regs->dsisr;
-
- return mce_handle_flush_derrors(dsisr,
- P9_DSISR_MC_SLB_PARITY_MFSLB |
- P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
-
- P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
-
- P9_DSISR_MC_ERAT_MULTIHIT);
-}
-
-static int mce_handle_ierror_p9(struct pt_regs *regs)
+long __machine_check_early_realmode_p7(struct pt_regs *regs)
{
- uint64_t srr1 = regs->msr;
+ /* P7 DD1 leaves top bits of DSISR undefined */
+ regs->dsisr &= 0x0000ffff;
- switch (P9_SRR1_MC_IFETCH(srr1)) {
- case P9_SRR1_MC_IFETCH_SLB_PARITY:
- case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
- return mce_flush(MCE_FLUSH_SLB);
- case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
- return mce_flush(MCE_FLUSH_TLB);
- case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
- return mce_flush(MCE_FLUSH_ERAT);
- default:
- return 0;
- }
+ return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
}
-static void mce_get_derror_p9(struct pt_regs *regs,
- struct mce_error_info *mce_err, uint64_t *addr)
-{
- uint64_t dsisr = regs->dsisr;
-
- mce_err->severity = MCE_SEV_ERROR_SYNC;
- mce_err->initiator = MCE_INITIATOR_CPU;
-
- if (dsisr & P9_DSISR_MC_USER_TLBIE)
- *addr = regs->nip;
- else
- *addr = regs->dar;
-
- if (dsisr & P9_DSISR_MC_UE) {
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
- } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
- } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
- mce_err->error_type = MCE_ERROR_TYPE_LINK;
- mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
- } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
- mce_err->error_type = MCE_ERROR_TYPE_LINK;
- mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
- } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
- mce_err->error_type = MCE_ERROR_TYPE_ERAT;
- mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
- } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
- mce_err->error_type = MCE_ERROR_TYPE_TLB;
- mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
- } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
- mce_err->error_type = MCE_ERROR_TYPE_USER;
- mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
- } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
- } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
- } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
- } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
- } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
- } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
- }
-}
-
-static void mce_get_ierror_p9(struct pt_regs *regs,
- struct mce_error_info *mce_err, uint64_t *addr)
+long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
- uint64_t srr1 = regs->msr;
-
- switch (P9_SRR1_MC_IFETCH(srr1)) {
- case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
- case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
- mce_err->severity = MCE_SEV_FATAL;
- break;
- default:
- mce_err->severity = MCE_SEV_ERROR_SYNC;
- break;
- }
-
- mce_err->initiator = MCE_INITIATOR_CPU;
-
- *addr = regs->nip;
-
- switch (P9_SRR1_MC_IFETCH(srr1)) {
- case P9_SRR1_MC_IFETCH_UE:
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
- break;
- case P9_SRR1_MC_IFETCH_SLB_PARITY:
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
- break;
- case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
- mce_err->error_type = MCE_ERROR_TYPE_SLB;
- mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
- break;
- case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
- mce_err->error_type = MCE_ERROR_TYPE_ERAT;
- mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
- break;
- case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
- mce_err->error_type = MCE_ERROR_TYPE_TLB;
- mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
- break;
- case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
- mce_err->error_type = MCE_ERROR_TYPE_UE;
- mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
- break;
- case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
- mce_err->error_type = MCE_ERROR_TYPE_LINK;
- mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
- break;
- case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
- mce_err->error_type = MCE_ERROR_TYPE_LINK;
- mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
- break;
- case P9_SRR1_MC_IFETCH_RA:
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
- break;
- case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
- break;
- case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
- break;
- case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
- mce_err->error_type = MCE_ERROR_TYPE_LINK;
- mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
- break;
- case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
- mce_err->error_type = MCE_ERROR_TYPE_RA;
- mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
- break;
- default:
- break;
- }
+ return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
- uint64_t nip, addr;
- long handled;
- struct mce_error_info mce_error_info = { 0 };
-
- nip = regs->nip;
-
- if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
- handled = mce_handle_derror_p9(regs);
- mce_get_derror_p9(regs, &mce_error_info, &addr);
- } else {
- handled = mce_handle_ierror_p9(regs);
- mce_get_ierror_p9(regs, &mce_error_info, &addr);
- }
-
- /* Handle UE error. */
- if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
- handled = mce_handle_ue_error(regs);
-
- save_mce_event(regs, handled, &mce_error_info, nip, addr);
- return handled;
+ return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
}
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 2282bf4e63cd..ec60ed0d4aad 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -243,10 +243,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 2. branch to optimized_callback() and emulate_step()
*/
- kprobe_lookup_name("optimized_callback", op_callback_addr);
- kprobe_lookup_name("emulate_step", emulate_step_addr);
+ op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
+ emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
- WARN(1, "kprobe_lookup_name() failed\n");
+ WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index dfc479df9634..8d63627e067f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -245,3 +245,24 @@ void __init free_unused_pacas(void)
free_lppacas();
}
+
+void copy_mm_to_paca(struct mm_struct *mm)
+{
+#ifdef CONFIG_PPC_BOOK3S
+ mm_context_t *context = &mm->context;
+
+ get_paca()->mm_ctx_id = context->id;
+#ifdef CONFIG_PPC_MM_SLICES
+ VM_BUG_ON(!mm->context.addr_limit);
+ get_paca()->addr_limit = mm->context.addr_limit;
+ get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
+ memcpy(&get_paca()->mm_ctx_high_slices_psize,
+ &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
+#else /* CONFIG_PPC_MM_SLICES */
+ get_paca()->mm_ctx_user_psize = context->user_psize;
+ get_paca()->mm_ctx_sllp = context->sllp;
+#endif
+#else /* CONFIG_PPC_BOOK3S */
+ return;
+#endif
+}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index ffda24a38dda..341a7469cab8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -233,6 +233,14 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev)
pci_reset_secondary_bus(dev);
}
+resource_size_t pcibios_default_alignment(void)
+{
+ if (ppc_md.pcibios_default_alignment)
+ return ppc_md.pcibios_default_alignment();
+
+ return 0;
+}
+
#ifdef CONFIG_PCI_IOV
resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
{
@@ -513,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f5d399e46193..d2f0afeae5a0 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -55,7 +55,6 @@
#include <asm/kexec.h>
#include <asm/opal.h>
#include <asm/fadump.h>
-#include <asm/debug.h>
#include <asm/epapr_hcalls.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1c1b44ec7642..dd8a04f3053a 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -815,7 +815,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.virt_base = cpu_to_be32(0xffffffff),
.virt_size = cpu_to_be32(0xffffffff),
.load_base = cpu_to_be32(0xffffffff),
- .min_rma = cpu_to_be32(256), /* 256MB min RMA */
+ .min_rma = cpu_to_be32(512), /* 512MB min RMA */
.min_load = cpu_to_be32(0xffffffff), /* full client load */
.min_rma_percent = 0, /* min RMA percentage of total RAM */
.max_pft_size = 48, /* max log_2(hash table size) */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 4697da895133..69e077180db6 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -31,11 +31,11 @@
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
-#include <linux/debugfs.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
+#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
@@ -125,6 +125,11 @@ int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
+#ifdef CONFIG_CRASH_CORE
+/* This keeps a track of which one is the crashing cpu. */
+int crashing_cpu = -1;
+#endif
+
/* also used by kexec */
void machine_shutdown(void)
{
@@ -920,6 +925,15 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
+
+#ifdef CONFIG_PPC_MM_SLICES
+#ifdef CONFIG_PPC64
+ init_mm.context.addr_limit = TASK_SIZE_128TB;
+#else
+#error "context.addr_limit not initialized."
+#endif
+#endif
+
#ifdef CONFIG_PPC_64K_PAGES
init_mm.context.pte_frag = NULL;
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f997154dfc41..0d4dcaeaafcb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -230,8 +230,8 @@ static void cpu_ready_for_interrupts(void)
* If we are not in hypervisor mode the job is done once for
* the whole partition in configure_exceptions().
*/
- if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
- early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
@@ -637,6 +637,11 @@ void __init emergency_stack_init(void)
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
+ /* emergency stack for NMI exception handling. */
+ ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
+ klp_init_thread_info(ti);
+ paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
+
/* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d68ed1f004a3..df2a41647d8e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/kvm_ppc.h>
+#include <asm/dbell.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
@@ -86,8 +87,6 @@ volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
-static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
-
/*
* Returns 1 if the specified cpu should be brought up during boot.
* Used to inhibit booting threads if they've been disabled or
@@ -158,32 +157,33 @@ static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t debug_ipi_action(int irq, void *data)
+#ifdef CONFIG_NMI_IPI
+static irqreturn_t nmi_ipi_action(int irq, void *data)
{
- if (crash_ipi_function_ptr) {
- crash_ipi_function_ptr(get_irq_regs());
- return IRQ_HANDLED;
- }
-
-#ifdef CONFIG_DEBUGGER
- debugger_ipi(get_irq_regs());
-#endif /* CONFIG_DEBUGGER */
-
+ smp_handle_nmi_ipi(get_irq_regs());
return IRQ_HANDLED;
}
+#endif
static irq_handler_t smp_ipi_action[] = {
[PPC_MSG_CALL_FUNCTION] = call_function_action,
[PPC_MSG_RESCHEDULE] = reschedule_action,
[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
- [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
+#ifdef CONFIG_NMI_IPI
+ [PPC_MSG_NMI_IPI] = nmi_ipi_action,
+#endif
};
+/*
+ * The NMI IPI is a fallback and not truly non-maskable. It is simpler
+ * than going through the call function infrastructure, and strongly
+ * serialized, so it is more appropriate for debugging.
+ */
const char *smp_ipi_name[] = {
[PPC_MSG_CALL_FUNCTION] = "ipi call function",
[PPC_MSG_RESCHEDULE] = "ipi reschedule",
[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
- [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
+ [PPC_MSG_NMI_IPI] = "nmi ipi",
};
/* optional function to request ipi, for controllers with >= 4 ipis */
@@ -191,14 +191,13 @@ int smp_request_message_ipi(int virq, int msg)
{
int err;
- if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
+ if (msg < 0 || msg > PPC_MSG_NMI_IPI)
return -EINVAL;
- }
-#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE)
- if (msg == PPC_MSG_DEBUGGER_BREAK) {
+#ifndef CONFIG_NMI_IPI
+ if (msg == PPC_MSG_NMI_IPI)
return 1;
- }
#endif
+
err = request_irq(virq, smp_ipi_action[msg],
IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
smp_ipi_name[msg], NULL);
@@ -211,17 +210,9 @@ int smp_request_message_ipi(int virq, int msg)
#ifdef CONFIG_PPC_SMP_MUXED_IPI
struct cpu_messages {
long messages; /* current messages */
- unsigned long data; /* data for cause ipi */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
-void smp_muxed_ipi_set_data(int cpu, unsigned long data)
-{
- struct cpu_messages *info = &per_cpu(ipi_message, cpu);
-
- info->data = data;
-}
-
void smp_muxed_ipi_set_message(int cpu, int msg)
{
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
@@ -236,14 +227,13 @@ void smp_muxed_ipi_set_message(int cpu, int msg)
void smp_muxed_ipi_message_pass(int cpu, int msg)
{
- struct cpu_messages *info = &per_cpu(ipi_message, cpu);
-
smp_muxed_ipi_set_message(cpu, msg);
+
/*
* cause_ipi functions are required to include a full barrier
* before doing whatever causes the IPI.
*/
- smp_ops->cause_ipi(cpu, info->data);
+ smp_ops->cause_ipi(cpu);
}
#ifdef __BIG_ENDIAN__
@@ -254,11 +244,18 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
irqreturn_t smp_ipi_demux(void)
{
- struct cpu_messages *info = this_cpu_ptr(&ipi_message);
- unsigned long all;
-
mb(); /* order any irq clear */
+ return smp_ipi_demux_relaxed();
+}
+
+/* sync-free variant. Callers should ensure synchronization */
+irqreturn_t smp_ipi_demux_relaxed(void)
+{
+ struct cpu_messages *info;
+ unsigned long all;
+
+ info = this_cpu_ptr(&ipi_message);
do {
all = xchg(&info->messages, 0);
#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
@@ -278,8 +275,10 @@ irqreturn_t smp_ipi_demux(void)
scheduler_ipi();
if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
tick_broadcast_ipi_handler();
- if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
- debug_ipi_action(0, NULL);
+#ifdef CONFIG_NMI_IPI
+ if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
+ nmi_ipi_action(0, NULL);
+#endif
} while (info->messages);
return IRQ_HANDLED;
@@ -316,6 +315,187 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
+#ifdef CONFIG_NMI_IPI
+
+/*
+ * "NMI IPI" system.
+ *
+ * NMI IPIs may not be recoverable, so should not be used as ongoing part of
+ * a running system. They can be used for crash, debug, halt/reboot, etc.
+ *
+ * NMI IPIs are globally single threaded. No more than one in progress at
+ * any time.
+ *
+ * The IPI call waits with interrupts disabled until all targets enter the
+ * NMI handler, then the call returns.
+ *
+ * No new NMI can be initiated until targets exit the handler.
+ *
+ * The IPI call may time out without all targets entering the NMI handler.
+ * In that case, there is some logic to recover (and ignore subsequent
+ * NMI interrupts that may eventually be raised), but the platform interrupt
+ * handler may not be able to distinguish this from other exception causes,
+ * which may cause a crash.
+ */
+
+static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
+static struct cpumask nmi_ipi_pending_mask;
+static int nmi_ipi_busy_count = 0;
+static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
+
+static void nmi_ipi_lock_start(unsigned long *flags)
+{
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+ raw_local_irq_restore(*flags);
+ cpu_relax();
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ }
+}
+
+static void nmi_ipi_lock(void)
+{
+ while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+ cpu_relax();
+}
+
+static void nmi_ipi_unlock(void)
+{
+ smp_mb();
+ WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
+ atomic_set(&__nmi_ipi_lock, 0);
+}
+
+static void nmi_ipi_unlock_end(unsigned long *flags)
+{
+ nmi_ipi_unlock();
+ raw_local_irq_restore(*flags);
+}
+
+/*
+ * Platform NMI handler calls this to ack
+ */
+int smp_handle_nmi_ipi(struct pt_regs *regs)
+{
+ void (*fn)(struct pt_regs *);
+ unsigned long flags;
+ int me = raw_smp_processor_id();
+ int ret = 0;
+
+ /*
+ * Unexpected NMIs are possible here because the interrupt may not
+ * be able to distinguish NMI IPIs from other types of NMIs, or
+ * because the caller may have timed out.
+ */
+ nmi_ipi_lock_start(&flags);
+ if (!nmi_ipi_busy_count)
+ goto out;
+ if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
+ goto out;
+
+ fn = nmi_ipi_function;
+ if (!fn)
+ goto out;
+
+ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+ nmi_ipi_busy_count++;
+ nmi_ipi_unlock();
+
+ ret = 1;
+
+ fn(regs);
+
+ nmi_ipi_lock();
+ nmi_ipi_busy_count--;
+out:
+ nmi_ipi_unlock_end(&flags);
+
+ return ret;
+}
+
+static void do_smp_send_nmi_ipi(int cpu)
+{
+ if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
+ return;
+
+ if (cpu >= 0) {
+ do_message_pass(cpu, PPC_MSG_NMI_IPI);
+ } else {
+ int c;
+
+ for_each_online_cpu(c) {
+ if (c == raw_smp_processor_id())
+ continue;
+ do_message_pass(c, PPC_MSG_NMI_IPI);
+ }
+ }
+}
+
+/*
+ * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
+ * - fn is the target callback function.
+ * - delay_us > 0 is the delay before giving up waiting for targets to
+ * enter the handler, == 0 specifies indefinite delay.
+ */
+static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+{
+ unsigned long flags;
+ int me = raw_smp_processor_id();
+ int ret = 1;
+
+ BUG_ON(cpu == me);
+ BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
+
+ if (unlikely(!smp_ops))
+ return 0;
+
+ /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
+ nmi_ipi_lock_start(&flags);
+ while (nmi_ipi_busy_count) {
+ nmi_ipi_unlock_end(&flags);
+ cpu_relax();
+ nmi_ipi_lock_start(&flags);
+ }
+
+ nmi_ipi_function = fn;
+
+ if (cpu < 0) {
+ /* ALL_OTHERS */
+ cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
+ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
+ } else {
+ /* cpumask starts clear */
+ cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
+ }
+ nmi_ipi_busy_count++;
+ nmi_ipi_unlock();
+
+ do_smp_send_nmi_ipi(cpu);
+
+ while (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ udelay(1);
+ if (delay_us) {
+ delay_us--;
+ if (!delay_us)
+ break;
+ }
+ }
+
+ nmi_ipi_lock();
+ if (!cpumask_empty(&nmi_ipi_pending_mask)) {
+ /* Could not gather all CPUs */
+ ret = 0;
+ cpumask_clear(&nmi_ipi_pending_mask);
+ }
+ nmi_ipi_busy_count--;
+ nmi_ipi_unlock_end(&flags);
+
+ return ret;
+}
+#endif /* CONFIG_NMI_IPI */
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -326,29 +506,22 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
-void smp_send_debugger_break(void)
+#ifdef CONFIG_DEBUGGER
+void debugger_ipi_callback(struct pt_regs *regs)
{
- int cpu;
- int me = raw_smp_processor_id();
-
- if (unlikely(!smp_ops))
- return;
+ debugger_ipi(regs);
+}
- for_each_online_cpu(cpu)
- if (cpu != me)
- do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+void smp_send_debugger_break(void)
+{
+ smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
}
#endif
#ifdef CONFIG_KEXEC_CORE
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
- crash_ipi_function_ptr = crash_ipi_callback;
- if (crash_ipi_callback) {
- mb();
- smp_send_debugger_break();
- }
+ smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
}
#endif
@@ -439,7 +612,21 @@ int generic_cpu_disable(void)
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
#endif
- migrate_irqs();
+ /* Update affinity of all IRQs previously aimed at this CPU */
+ irq_migrate_all_off_this_cpu();
+
+ /*
+ * Depending on the details of the interrupt controller, it's possible
+ * that one of the interrupts we just migrated away from this CPU is
+ * actually already pending on this CPU. If we leave it in that state
+ * the interrupt will never be EOI'ed, and will never fire again. So
+ * temporarily enable interrupts here, to allow any pending interrupt to
+ * be received (and EOI'ed), before we take this CPU offline.
+ */
+ local_irq_enable();
+ mdelay(1);
+ local_irq_disable();
+
return 0;
}
@@ -521,6 +708,16 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
cpu_idle_thread_init(cpu, tidle);
+ /*
+ * The platform might need to allocate resources prior to bringing
+ * up the CPU
+ */
+ if (smp_ops->prepare_cpu) {
+ rc = smp_ops->prepare_cpu(cpu);
+ if (rc)
+ return rc;
+ }
+
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 66711958493c..d534ed901538 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -59,7 +59,14 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- save_context_stack(trace, tsk->thread.ksp, tsk, 0);
+ unsigned long sp;
+
+ if (tsk == current)
+ sp = current_stack_pointer();
+ else
+ sp = tsk->thread.ksp;
+
+ save_context_stack(trace, sp, tsk, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 6ae9bd5086a4..0050b2d2ff7a 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -10,6 +10,7 @@
*/
#include <linux/sched.h>
+#include <linux/suspend.h>
#include <asm/current.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index de04c9fbb5cd..a877bf8269fe 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -42,11 +42,11 @@
#include <asm/unistd.h>
#include <asm/asm-prototypes.h>
-static inline unsigned long do_mmap2(unsigned long addr, size_t len,
+static inline long do_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long off, int shift)
{
- unsigned long ret = -EINVAL;
+ long ret = -EINVAL;
if (!arch_validate_prot(prot))
goto out;
@@ -62,16 +62,16 @@ out:
return ret;
}
-unsigned long sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, pgoff)
{
return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
}
-unsigned long sys_mmap(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset)
+SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, offset)
{
return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c1fb255a60d6..4437c70c7c2b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -710,6 +710,10 @@ static int register_cpu_online(unsigned int cpu)
struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
+ /* For cpus present at boot a reference was already grabbed in register_cpu() */
+ if (!s->of_node)
+ s->of_node = of_get_cpu_node(cpu, NULL);
+
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SMT))
device_create_file(s, &dev_attr_smt_snooze_delay);
@@ -785,9 +789,9 @@ static int register_cpu_online(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
static int unregister_cpu_online(unsigned int cpu)
{
-#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
struct device_attribute *attrs, *pmc_attrs;
@@ -864,9 +868,13 @@ static int unregister_cpu_online(unsigned int cpu)
}
#endif
cacheinfo_cpu_offline(cpu);
-#endif /* CONFIG_HOTPLUG_CPU */
+ of_node_put(s->of_node);
+ s->of_node = NULL;
return 0;
}
+#else /* !CONFIG_HOTPLUG_CPU */
+#define unregister_cpu_online NULL
+#endif
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ssize_t arch_cpu_probe(const char *buf, size_t count)
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
new file mode 100644
index 000000000000..729dffc5f7bc
--- /dev/null
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the powerpc trace subsystem
+#
+
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+ifdef CONFIG_FUNCTION_TRACER
+# do not trace tracer code
+CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+endif
+
+obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o
+obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64.o
+ifdef CONFIG_MPROFILE_KERNEL
+obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_mprofile.o
+else
+obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o
+endif
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_TRACING) += trace_clock.o
+
+obj-$(CONFIG_PPC64) += $(obj64-y)
+obj-$(CONFIG_PPC32) += $(obj32-y)
+
+# Disable GCOV & sanitizers in odd or sensitive code
+GCOV_PROFILE_ftrace.o := n
+UBSAN_SANITIZE_ftrace.o := n
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
new file mode 100644
index 000000000000..32509de6ce4c
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -0,0 +1,620 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
+ *
+ * Added function graph tracer code, taken from x86 that was written
+ * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
+ *
+ */
+
+#define pr_fmt(fmt) "ftrace-powerpc: " fmt
+
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/asm-prototypes.h>
+#include <asm/cacheflush.h>
+#include <asm/code-patching.h>
+#include <asm/ftrace.h>
+#include <asm/syscall.h>
+
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static unsigned int
+ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
+{
+ unsigned int op;
+
+ addr = ppc_function_entry((void *)addr);
+
+ /* if (link) set op to 'bl' else 'b' */
+ op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
+
+ return op;
+}
+
+static int
+ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
+{
+ unsigned int replaced;
+
+ /*
+ * Note:
+ * We are paranoid about modifying text, as if a bug was to happen, it
+ * could cause us to read or write to someplace that could cause harm.
+ * Carefully read and modify the code with probe_kernel_*(), and make
+ * sure what we read is what we expected it to be before modifying it.
+ */
+
+ /* read the text we want to modify */
+ if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure it is what we expect it to be */
+ if (replaced != old) {
+ pr_err("%p: replaced (%#x) != old (%#x)",
+ (void *)ip, replaced, old);
+ return -EINVAL;
+ }
+
+ /* replace the text with the new text */
+ if (patch_instruction((unsigned int *)ip, new))
+ return -EPERM;
+
+ return 0;
+}
+
+/*
+ * Helper functions that are the same for both PPC64 and PPC32.
+ */
+static int test_24bit_addr(unsigned long ip, unsigned long addr)
+{
+ addr = ppc_function_entry((void *)addr);
+
+ /* use the create_branch to verify that this offset can be branched */
+ return create_branch((unsigned int *)ip, addr, 0);
+}
+
+#ifdef CONFIG_MODULES
+
+static int is_bl_op(unsigned int op)
+{
+ return (op & 0xfc000003) == 0x48000001;
+}
+
+static unsigned long find_bl_target(unsigned long ip, unsigned int op)
+{
+ static int offset;
+
+ offset = (op & 0x03fffffc);
+ /* make it signed */
+ if (offset & 0x02000000)
+ offset |= 0xfe000000;
+
+ return ip + (long)offset;
+}
+
+#ifdef CONFIG_PPC64
+static int
+__ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long entry, ptr, tramp;
+ unsigned long ip = rec->ip;
+ unsigned int op, pop;
+
+ /* read where this goes */
+ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ pr_err("Fetching opcode failed.\n");
+ return -EFAULT;
+ }
+
+ /* Make sure that that this is still a 24bit jump */
+ if (!is_bl_op(op)) {
+ pr_err("Not expected bl: opcode is %x\n", op);
+ return -EINVAL;
+ }
+
+ /* lets find where the pointer goes */
+ tramp = find_bl_target(ip, op);
+
+ pr_devel("ip:%lx jumps to %lx", ip, tramp);
+
+ if (module_trampoline_target(mod, tramp, &ptr)) {
+ pr_err("Failed to get trampoline target\n");
+ return -EFAULT;
+ }
+
+ pr_devel("trampoline target %lx", ptr);
+
+ entry = ppc_global_function_entry((void *)addr);
+ /* This should match what was called */
+ if (ptr != entry) {
+ pr_err("addr %lx does not match expected %lx\n", ptr, entry);
+ return -EINVAL;
+ }
+
+#ifdef CC_USING_MPROFILE_KERNEL
+ /* When using -mkernel_profile there is no load to jump over */
+ pop = PPC_INST_NOP;
+
+ if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
+
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
+ pr_err("Unexpected instruction %08x around bl _mcount\n", op);
+ return -EINVAL;
+ }
+#else
+ /*
+ * Our original call site looks like:
+ *
+ * bl <tramp>
+ * ld r2,XX(r1)
+ *
+ * Milton Miller pointed out that we can not simply nop the branch.
+ * If a task was preempted when calling a trace function, the nops
+ * will remove the way to restore the TOC in r2 and the r2 TOC will
+ * get corrupted.
+ *
+ * Use a b +8 to jump over the load.
+ */
+
+ pop = PPC_INST_BRANCH | 8; /* b +8 */
+
+ /*
+ * Check what is in the next instruction. We can see ld r2,40(r1), but
+ * on first pass after boot we will see mflr r0.
+ */
+ if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
+ pr_err("Fetching op failed.\n");
+ return -EFAULT;
+ }
+
+ if (op != PPC_INST_LD_TOC) {
+ pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
+ return -EINVAL;
+ }
+#endif /* CC_USING_MPROFILE_KERNEL */
+
+ if (patch_instruction((unsigned int *)ip, pop)) {
+ pr_err("Patching NOP failed.\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+#else /* !PPC64 */
+static int
+__ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ unsigned int jmp[4];
+ unsigned long ip = rec->ip;
+ unsigned long tramp;
+
+ if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure that that this is still a 24bit jump */
+ if (!is_bl_op(op)) {
+ pr_err("Not expected bl: opcode is %x\n", op);
+ return -EINVAL;
+ }
+
+ /* lets find where the pointer goes */
+ tramp = find_bl_target(ip, op);
+
+ /*
+ * On PPC32 the trampoline looks like:
+ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
+ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
+ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
+ * 0x4e, 0x80, 0x04, 0x20 bctr
+ */
+
+ pr_devel("ip:%lx jumps to %lx", ip, tramp);
+
+ /* Find where the trampoline jumps to */
+ if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+ pr_err("Failed to read %lx\n", tramp);
+ return -EFAULT;
+ }
+
+ pr_devel(" %08x %08x ", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
+ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+ (jmp[2] != 0x7d8903a6) ||
+ (jmp[3] != 0x4e800420)) {
+ pr_err("Not a trampoline\n");
+ return -EINVAL;
+ }
+
+ tramp = (jmp[1] & 0xffff) |
+ ((jmp[0] & 0xffff) << 16);
+ if (tramp & 0x8000)
+ tramp -= 0x10000;
+
+ pr_devel(" %lx ", tramp);
+
+ if (tramp != addr) {
+ pr_err("Trampoline location %08lx does not match addr\n",
+ tramp);
+ return -EINVAL;
+ }
+
+ op = PPC_INST_NOP;
+
+ if (patch_instruction((unsigned int *)ip, op))
+ return -EPERM;
+
+ return 0;
+}
+#endif /* PPC64 */
+#endif /* CONFIG_MODULES */
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ unsigned int old, new;
+
+ /*
+ * If the calling address is more that 24 bits away,
+ * then we had to use a trampoline to make the call.
+ * Otherwise just update the call site.
+ */
+ if (test_24bit_addr(ip, addr)) {
+ /* within range */
+ old = ftrace_call_replace(ip, addr, 1);
+ new = PPC_INST_NOP;
+ return ftrace_modify_code(ip, old, new);
+ }
+
+#ifdef CONFIG_MODULES
+ /*
+ * Out of range jumps are called from modules.
+ * We should either already have a pointer to the module
+ * or it has been passed in.
+ */
+ if (!rec->arch.mod) {
+ if (!mod) {
+ pr_err("No module loaded addr=%lx\n", addr);
+ return -EFAULT;
+ }
+ rec->arch.mod = mod;
+ } else if (mod) {
+ if (mod != rec->arch.mod) {
+ pr_err("Record mod %p not equal to passed in mod %p\n",
+ rec->arch.mod, mod);
+ return -EINVAL;
+ }
+ /* nothing to do if mod == rec->arch.mod */
+ } else
+ mod = rec->arch.mod;
+
+ return __ftrace_make_nop(mod, rec, addr);
+#else
+ /* We should not get here without modules */
+ return -EINVAL;
+#endif /* CONFIG_MODULES */
+}
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_PPC64
+/*
+ * Examine the existing instructions for __ftrace_make_call.
+ * They should effectively be a NOP, and follow formal constraints,
+ * depending on the ABI. Return false if they don't.
+ */
+#ifndef CC_USING_MPROFILE_KERNEL
+static int
+expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+{
+ /*
+ * We expect to see:
+ *
+ * b +8
+ * ld r2,XX(r1)
+ *
+ * The load offset is different depending on the ABI. For simplicity
+ * just mask it out when doing the compare.
+ */
+ if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
+ return 0;
+ return 1;
+}
+#else
+static int
+expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+{
+ /* look for patched "NOP" on ppc64 with -mprofile-kernel */
+ if (op0 != PPC_INST_NOP)
+ return 0;
+ return 1;
+}
+#endif
+
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op[2];
+ void *ip = (void *)rec->ip;
+
+ /* read where this goes */
+ if (probe_kernel_read(op, ip, sizeof(op)))
+ return -EFAULT;
+
+ if (!expected_nop_sequence(ip, op[0], op[1])) {
+ pr_err("Unexpected call sequence at %p: %x %x\n",
+ ip, op[0], op[1]);
+ return -EINVAL;
+ }
+
+ /* If we never set up a trampoline to ftrace_caller, then bail */
+ if (!rec->arch.mod->arch.tramp) {
+ pr_err("No ftrace trampoline\n");
+ return -EINVAL;
+ }
+
+ /* Ensure branch is within 24 bits */
+ if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+ pr_err("Branch out of range\n");
+ return -EINVAL;
+ }
+
+ if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+ pr_err("REL24 out of range!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return ftrace_make_call(rec, addr);
+}
+#endif
+
+#else /* !CONFIG_PPC64: */
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ unsigned long ip = rec->ip;
+
+ /* read where this goes */
+ if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* It should be pointing to a nop */
+ if (op != PPC_INST_NOP) {
+ pr_err("Expected NOP but have %x\n", op);
+ return -EINVAL;
+ }
+
+ /* If we never set up a trampoline to ftrace_caller, then bail */
+ if (!rec->arch.mod->arch.tramp) {
+ pr_err("No ftrace trampoline\n");
+ return -EINVAL;
+ }
+
+ /* create the branch to the trampoline */
+ op = create_branch((unsigned int *)ip,
+ rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ if (!op) {
+ pr_err("REL24 out of range!\n");
+ return -EINVAL;
+ }
+
+ pr_devel("write to %lx\n", rec->ip);
+
+ if (patch_instruction((unsigned int *)ip, op))
+ return -EPERM;
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_MODULES */
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ unsigned int old, new;
+
+ /*
+ * If the calling address is more that 24 bits away,
+ * then we had to use a trampoline to make the call.
+ * Otherwise just update the call site.
+ */
+ if (test_24bit_addr(ip, addr)) {
+ /* within range */
+ old = PPC_INST_NOP;
+ new = ftrace_call_replace(ip, addr, 1);
+ return ftrace_modify_code(ip, old, new);
+ }
+
+#ifdef CONFIG_MODULES
+ /*
+ * Out of range jumps are called from modules.
+ * Being that we are converting from nop, it had better
+ * already have a module defined.
+ */
+ if (!rec->arch.mod) {
+ pr_err("No module loaded\n");
+ return -EINVAL;
+ }
+
+ return __ftrace_make_call(rec, addr);
+#else
+ /* We should not get here without modules */
+ return -EINVAL;
+#endif /* CONFIG_MODULES */
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned int old, new;
+ int ret;
+
+ old = *(unsigned int *)&ftrace_call;
+ new = ftrace_call_replace(ip, (unsigned long)func, 1);
+ ret = ftrace_modify_code(ip, old, new);
+
+ return ret;
+}
+
+static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+{
+ unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
+ int ret;
+
+ ret = ftrace_update_record(rec, enable);
+
+ switch (ret) {
+ case FTRACE_UPDATE_IGNORE:
+ return 0;
+ case FTRACE_UPDATE_MAKE_CALL:
+ return ftrace_make_call(rec, ftrace_addr);
+ case FTRACE_UPDATE_MAKE_NOP:
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+
+ return 0;
+}
+
+void ftrace_replace_code(int enable)
+{
+ struct ftrace_rec_iter *iter;
+ struct dyn_ftrace *rec;
+ int ret;
+
+ for (iter = ftrace_rec_iter_start(); iter;
+ iter = ftrace_rec_iter_next(iter)) {
+ rec = ftrace_rec_iter_record(iter);
+ ret = __ftrace_replace_code(rec, enable);
+ if (ret) {
+ ftrace_bug(ret, rec);
+ return;
+ }
+ }
+}
+
+/*
+ * Use the default ftrace_modify_all_code, but without
+ * stop_machine().
+ */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+extern void ftrace_graph_stub(void);
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ unsigned long addr = (unsigned long)(&ftrace_graph_caller);
+ unsigned long stub = (unsigned long)(&ftrace_graph_stub);
+ unsigned int old, new;
+
+ old = ftrace_call_replace(ip, stub, 0);
+ new = ftrace_call_replace(ip, addr, 0);
+
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip = (unsigned long)(&ftrace_graph_call);
+ unsigned long addr = (unsigned long)(&ftrace_graph_caller);
+ unsigned long stub = (unsigned long)(&ftrace_graph_stub);
+ unsigned int old, new;
+
+ old = ftrace_call_replace(ip, addr, 0);
+ new = ftrace_call_replace(ip, stub, 0);
+
+ return ftrace_modify_code(ip, old, new);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info. Return the address we want to divert to.
+ */
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
+{
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker;
+
+ if (unlikely(ftrace_graph_is_dead()))
+ goto out;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ goto out;
+
+ return_hooker = ppc_function_entry(return_to_handler);
+
+ trace.func = ip;
+ trace.depth = current->curr_ret_stack + 1;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
+ goto out;
+
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
+ NULL) == -EBUSY)
+ goto out;
+
+ parent = return_hooker;
+out:
+ return parent;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return sys_call_table[nr*2];
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
+
+#ifdef PPC64_ELF_ABI_v1
+char *arch_ftrace_match_adjust(char *str, const char *search)
+{
+ if (str[0] == '.' && search[0] != '.')
+ return str + 1;
+ else
+ return str;
+}
+#endif /* PPC64_ELF_ABI_v1 */
diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S
new file mode 100644
index 000000000000..afef2c076282
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_32.S
@@ -0,0 +1,118 @@
+/*
+ * Split from entry_32.S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/magic.h>
+#include <asm/reg.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
+#include <asm/export.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+ /*
+ * It is required that _mcount on PPC32 must preserve the
+ * link register. But we have r0 to play with. We use r0
+ * to push the return address back to the caller of mcount
+ * into the ctr register, restore the link register and
+ * then jump back using the ctr register.
+ */
+ mflr r0
+ mtctr r0
+ lwz r0, 4(r1)
+ mtlr r0
+ bctr
+
+_GLOBAL(ftrace_caller)
+ MCOUNT_SAVE_FRAME
+ /* r3 ends up with link register */
+ subi r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+ b ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+#endif
+ MCOUNT_RESTORE_FRAME
+ /* old link register ends up in ctr reg */
+ bctr
+#else
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+
+ MCOUNT_SAVE_FRAME
+
+ subi r3, r3, MCOUNT_INSN_SIZE
+ LOAD_REG_ADDR(r5, ftrace_trace_function)
+ lwz r5,0(r5)
+
+ mtctr r5
+ bctrl
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ b ftrace_graph_caller
+#endif
+ MCOUNT_RESTORE_FRAME
+ bctr
+#endif
+EXPORT_SYMBOL(_mcount)
+
+_GLOBAL(ftrace_stub)
+ blr
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(ftrace_graph_caller)
+ /* load r4 with local address */
+ lwz r4, 44(r1)
+ subi r4, r4, MCOUNT_INSN_SIZE
+
+ /* Grab the LR out of the caller stack frame */
+ lwz r3,52(r1)
+
+ bl prepare_ftrace_return
+ nop
+
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR in the callers stack frame to this.
+ */
+ stw r3,52(r1)
+
+ MCOUNT_RESTORE_FRAME
+ /* old link register ends up in ctr reg */
+ bctr
+
+_GLOBAL(return_to_handler)
+ /* need to save return values */
+ stwu r1, -32(r1)
+ stw r3, 20(r1)
+ stw r4, 16(r1)
+ stw r31, 12(r1)
+ mr r31, r1
+
+ bl ftrace_return_to_handler
+ nop
+
+ /* return value has real return address */
+ mtlr r3
+
+ lwz r3, 20(r1)
+ lwz r4, 16(r1)
+ lwz r31,12(r1)
+ lwz r1, 0(r1)
+
+ /* Jump back to real return address */
+ blr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_64.S
new file mode 100644
index 000000000000..e5ccea19821e
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_64.S
@@ -0,0 +1,85 @@
+/*
+ * Split from entry_64.S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/magic.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
+#include <asm/ppc-opcode.h>
+#include <asm/export.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+EXPORT_SYMBOL(_mcount)
+ mflr r12
+ mtctr r12
+ mtlr r0
+ bctr
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+_GLOBAL_TOC(_mcount)
+EXPORT_SYMBOL(_mcount)
+ /* Taken from output of objdump from lib64/glibc */
+ mflr r3
+ ld r11, 0(r1)
+ stdu r1, -112(r1)
+ std r3, 128(r1)
+ ld r4, 16(r11)
+
+ subi r3, r3, MCOUNT_INSN_SIZE
+ LOAD_REG_ADDR(r5,ftrace_trace_function)
+ ld r5,0(r5)
+ ld r5,0(r5)
+ mtctr r5
+ bctrl
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ b ftrace_graph_caller
+#endif
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+_GLOBAL(ftrace_stub)
+ blr
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(return_to_handler)
+ /* need to save return values */
+ std r4, -32(r1)
+ std r3, -24(r1)
+ /* save TOC */
+ std r2, -16(r1)
+ std r31, -8(r1)
+ mr r31, r1
+ stdu r1, -112(r1)
+
+ /*
+ * We might be called from a module.
+ * Switch to our TOC to run inside the core kernel.
+ */
+ ld r2, PACATOC(r13)
+
+ bl ftrace_return_to_handler
+ nop
+
+ /* return value has real return address */
+ mtlr r3
+
+ ld r1, 0(r1)
+ ld r4, -32(r1)
+ ld r3, -24(r1)
+ ld r2, -16(r1)
+ ld r31, -8(r1)
+
+ /* Jump back to real return address */
+ blr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
new file mode 100644
index 000000000000..7c933a99f5d5
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -0,0 +1,272 @@
+/*
+ * Split from ftrace_64.S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/magic.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
+#include <asm/ppc-opcode.h>
+#include <asm/export.h>
+#include <asm/thread_info.h>
+#include <asm/bug.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ *
+ * ftrace_caller() is the function that replaces _mcount() when ftrace is
+ * active.
+ *
+ * We arrive here after a function A calls function B, and we are the trace
+ * function for B. When we enter r1 points to A's stack frame, B has not yet
+ * had a chance to allocate one yet.
+ *
+ * Additionally r2 may point either to the TOC for A, or B, depending on
+ * whether B did a TOC setup sequence before calling us.
+ *
+ * On entry the LR points back to the _mcount() call site, and r0 holds the
+ * saved LR as it was on entry to B, ie. the original return address at the
+ * call site in A.
+ *
+ * Our job is to save the register state into a struct pt_regs (on the stack)
+ * and then arrange for the ftrace function to be called.
+ */
+_GLOBAL(ftrace_caller)
+ /* Save the original return address in A's stack frame */
+ std r0,LRSAVE(r1)
+
+ /* Create our stack frame + pt_regs */
+ stdu r1,-SWITCH_FRAME_SIZE(r1)
+
+ /* Save all gprs to pt_regs */
+ SAVE_8GPRS(0,r1)
+ SAVE_8GPRS(8,r1)
+ SAVE_8GPRS(16,r1)
+ SAVE_8GPRS(24,r1)
+
+ /* Load special regs for save below */
+ mfmsr r8
+ mfctr r9
+ mfxer r10
+ mfcr r11
+
+ /* Get the _mcount() call site out of LR */
+ mflr r7
+ /* Save it as pt_regs->nip */
+ std r7, _NIP(r1)
+ /* Save the read LR in pt_regs->link */
+ std r0, _LINK(r1)
+
+ /* Save callee's TOC in the ABI compliant location */
+ std r2, 24(r1)
+ ld r2,PACATOC(r13) /* get kernel TOC in r2 */
+
+ addis r3,r2,function_trace_op@toc@ha
+ addi r3,r3,function_trace_op@toc@l
+ ld r5,0(r3)
+
+#ifdef CONFIG_LIVEPATCH
+ mr r14,r7 /* remember old NIP */
+#endif
+ /* Calculate ip from nip-4 into r3 for call below */
+ subi r3, r7, MCOUNT_INSN_SIZE
+
+ /* Put the original return address in r4 as parent_ip */
+ mr r4, r0
+
+ /* Save special regs */
+ std r8, _MSR(r1)
+ std r9, _CTR(r1)
+ std r10, _XER(r1)
+ std r11, _CCR(r1)
+
+ /* Load &pt_regs in r6 for call below */
+ addi r6, r1 ,STACK_FRAME_OVERHEAD
+
+ /* ftrace_call(r3, r4, r5, r6) */
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+
+ /* Load ctr with the possibly modified NIP */
+ ld r3, _NIP(r1)
+ mtctr r3
+#ifdef CONFIG_LIVEPATCH
+ cmpd r14,r3 /* has NIP been altered? */
+#endif
+
+ /* Restore gprs */
+ REST_8GPRS(0,r1)
+ REST_8GPRS(8,r1)
+ REST_8GPRS(16,r1)
+ REST_8GPRS(24,r1)
+
+ /* Restore possibly modified LR */
+ ld r0, _LINK(r1)
+ mtlr r0
+
+ /* Restore callee's TOC */
+ ld r2, 24(r1)
+
+ /* Pop our stack frame */
+ addi r1, r1, SWITCH_FRAME_SIZE
+
+#ifdef CONFIG_LIVEPATCH
+ /* Based on the cmpd above, if the NIP was altered handle livepatch */
+ bne- livepatch_handler
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+ b ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+#endif
+
+ bctr /* jump after _mcount site */
+
+_GLOBAL(ftrace_stub)
+ blr
+
+#ifdef CONFIG_LIVEPATCH
+ /*
+ * This function runs in the mcount context, between two functions. As
+ * such it can only clobber registers which are volatile and used in
+ * function linkage.
+ *
+ * We get here when a function A, calls another function B, but B has
+ * been live patched with a new function C.
+ *
+ * On entry:
+ * - we have no stack frame and can not allocate one
+ * - LR points back to the original caller (in A)
+ * - CTR holds the new NIP in C
+ * - r0 & r12 are free
+ *
+ * r0 can't be used as the base register for a DS-form load or store, so
+ * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
+ */
+livepatch_handler:
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ /* Allocate 3 x 8 bytes */
+ ld r1, TI_livepatch_sp(r12)
+ addi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Save toc & real LR on livepatch stack */
+ std r2, -24(r1)
+ mflr r12
+ std r12, -16(r1)
+
+ /* Store stack end marker */
+ lis r12, STACK_END_MAGIC@h
+ ori r12, r12, STACK_END_MAGIC@l
+ std r12, -8(r1)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Put ctr in r12 for global entry and branch there */
+ mfctr r12
+ bctrl
+
+ /*
+ * Now we are returning from the patched function to the original
+ * caller A. We are free to use r0 and r12, and we can use r2 until we
+ * restore it.
+ */
+
+ CURRENT_THREAD_INFO(r12, r1)
+
+ /* Save stack pointer into r0 */
+ mr r0, r1
+
+ ld r1, TI_livepatch_sp(r12)
+
+ /* Check stack marker hasn't been trashed */
+ lis r2, STACK_END_MAGIC@h
+ ori r2, r2, STACK_END_MAGIC@l
+ ld r12, -8(r1)
+1: tdne r12, r2
+ EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
+
+ /* Restore LR & toc from livepatch stack */
+ ld r12, -16(r1)
+ mtlr r12
+ ld r2, -24(r1)
+
+ /* Pop livepatch stack frame */
+ CURRENT_THREAD_INFO(r12, r0)
+ subi r1, r1, 24
+ std r1, TI_livepatch_sp(r12)
+
+ /* Restore real stack pointer */
+ mr r1, r0
+
+ /* Return to original caller of live patched function */
+ blr
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(ftrace_graph_caller)
+ stdu r1, -112(r1)
+ /* with -mprofile-kernel, parameter regs are still alive at _mcount */
+ std r10, 104(r1)
+ std r9, 96(r1)
+ std r8, 88(r1)
+ std r7, 80(r1)
+ std r6, 72(r1)
+ std r5, 64(r1)
+ std r4, 56(r1)
+ std r3, 48(r1)
+
+ /* Save callee's TOC in the ABI compliant location */
+ std r2, 24(r1)
+ ld r2, PACATOC(r13) /* get kernel TOC in r2 */
+
+ mfctr r4 /* ftrace_caller has moved local addr here */
+ std r4, 40(r1)
+ mflr r3 /* ftrace_caller has restored LR from stack */
+ subi r4, r4, MCOUNT_INSN_SIZE
+
+ bl prepare_ftrace_return
+ nop
+
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR to this.
+ */
+ mtlr r3
+
+ ld r0, 40(r1)
+ mtctr r0
+ ld r10, 104(r1)
+ ld r9, 96(r1)
+ ld r8, 88(r1)
+ ld r7, 80(r1)
+ ld r6, 72(r1)
+ ld r5, 64(r1)
+ ld r4, 56(r1)
+ ld r3, 48(r1)
+
+ /* Restore callee's TOC */
+ ld r2, 24(r1)
+
+ addi r1, r1, 112
+ mflr r0
+ std r0, LRSAVE(r1)
+ bctr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.S b/arch/powerpc/kernel/trace/ftrace_64_pg.S
new file mode 100644
index 000000000000..f095358da96e
--- /dev/null
+++ b/arch/powerpc/kernel/trace/ftrace_64_pg.S
@@ -0,0 +1,68 @@
+/*
+ * Split from ftrace_64.S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/magic.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/ftrace.h>
+#include <asm/ppc-opcode.h>
+#include <asm/export.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL_TOC(ftrace_caller)
+ /* Taken from output of objdump from lib64/glibc */
+ mflr r3
+ ld r11, 0(r1)
+ stdu r1, -112(r1)
+ std r3, 128(r1)
+ ld r4, 16(r11)
+ subi r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+ b ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+#endif
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+
+_GLOBAL(ftrace_stub)
+ blr
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+_GLOBAL(ftrace_graph_caller)
+ /* load r4 with local address */
+ ld r4, 128(r1)
+ subi r4, r4, MCOUNT_INSN_SIZE
+
+ /* Grab the LR out of the caller stack frame */
+ ld r11, 112(r1)
+ ld r3, 16(r11)
+
+ bl prepare_ftrace_return
+ nop
+
+ /*
+ * prepare_ftrace_return gives us the address we divert to.
+ * Change the LR in the callers stack frame to this.
+ */
+ ld r11, 112(r1)
+ std r3, 16(r11)
+
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+ blr
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace_clock.c b/arch/powerpc/kernel/trace/trace_clock.c
index 49170690946d..49170690946d 100644
--- a/arch/powerpc/kernel/trace_clock.c
+++ b/arch/powerpc/kernel/trace/trace_clock.c
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ff365f9de27a..d4e545d27ef9 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -35,13 +35,13 @@
#include <linux/backlight.h>
#include <linux/bug.h>
#include <linux/kdebug.h>
-#include <linux/debugfs.h>
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
#include <linux/uaccess.h>
+#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -279,18 +279,35 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
void system_reset_exception(struct pt_regs *regs)
{
+ /*
+ * Avoid crashes in case of nested NMI exceptions. Recoverability
+ * is determined by RI and in_nmi
+ */
+ bool nested = in_nmi();
+ if (!nested)
+ nmi_enter();
+
/* See if any machine dependent calls */
if (ppc_md.system_reset_exception) {
if (ppc_md.system_reset_exception(regs))
- return;
+ goto out;
}
die("System Reset", regs, SIGABRT);
+out:
+#ifdef CONFIG_PPC_BOOK3S_64
+ BUG_ON(get_paca()->in_nmi == 0);
+ if (get_paca()->in_nmi > 1)
+ panic("Unrecoverable nested System Reset");
+#endif
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
+ if (!nested)
+ nmi_exit();
+
/* What should we do here? We could issue a shutdown or hard reset. */
}
@@ -306,8 +323,6 @@ long machine_check_early(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions);
- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
@@ -741,6 +756,8 @@ void machine_check_exception(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code
@@ -1440,6 +1457,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_MSGP_LG] = "MSGP",
+ [FSCR_SCV_LG] = "SCV",
};
char *facility = "unknown";
u64 value;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 1c24c894c908..2f793be3d2b1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -77,6 +77,8 @@ SECTIONS
#endif
} :kernel
+ __head_end = .;
+
/*
* If the build dies here, it's likely code in head_64.S is referencing
* labels it can't reach, and the linker inserting stubs without the
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 029be26b5a17..65a471de96de 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -67,6 +67,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
+ select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b6b5c185bd92..8c4d7e9d27d2 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -20,6 +20,10 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -31,10 +35,6 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
#include "book3s.h"
#include "trace.h"
@@ -197,6 +197,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
+void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
+{
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
+}
+
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+{
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
+}
+
+void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
+{
+ /* might as well deliver this straight away */
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
+}
+
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 70153578131a..29ebe2fd5867 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -319,6 +319,7 @@ do_second:
gpte->may_execute = true;
gpte->may_read = false;
gpte->may_write = false;
+ gpte->wimg = r & HPTE_R_WIMG;
switch (pp) {
case 0:
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index a587e8f4fd26..9a4614cd0e53 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
else
kvmppc_mmu_flush_icache(pfn);
+ rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
+
/*
* Use 64K pages if possible; otherwise, on 64K page kernels,
* we need to transfer 4 more bits from guest real to host real addr.
@@ -177,12 +179,15 @@ map_again:
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
hpsize, hpsize, MMU_SEGSIZE_256M);
- if (ret < 0) {
+ if (ret == -1) {
/* If we couldn't map a primary PTE, try a secondary */
hash = ~hash;
vflags ^= HPTE_V_SECONDARY;
attempt++;
goto map_again;
+ } else if (ret < 0) {
+ r = -EIO;
+ goto out_unlock;
} else {
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
@@ -229,6 +234,7 @@ void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
{
+ unsigned long vsid_bits = VSID_BITS_65_256M;
struct kvmppc_sid_map *map;
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u16 sid_map_mask;
@@ -257,7 +263,12 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
kvmppc_mmu_pte_flush(vcpu, 0, 0);
kvmppc_mmu_flush_segments(vcpu);
}
- map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
+
+ if (mmu_has_feature(MMU_FTR_68_BIT_VA))
+ vsid_bits = VSID_BITS_256M;
+
+ map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
+ VSID_MULTIPLIER_256M, vsid_bits);
map->guest_vsid = gvsid;
map->valid = true;
@@ -390,7 +401,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
- err = __init_new_context();
+ err = hash__alloc_context_id();
if (err < 0)
return -1;
vcpu3s->context_id[0] = err;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 3e26cd4979f9..a160c14304eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -28,6 +28,8 @@
#include <linux/hugetlb.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
+#include <linux/iommu.h>
+#include <linux/file.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -40,6 +42,7 @@
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
+#include <asm/mmu_context.h>
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{
@@ -91,6 +94,137 @@ static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
return ret;
}
+static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
+{
+ struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
+ struct kvmppc_spapr_tce_iommu_table, rcu);
+
+ iommu_tce_table_put(stit->tbl);
+
+ kfree(stit);
+}
+
+static void kvm_spapr_tce_liobn_put(struct kref *kref)
+{
+ struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
+ struct kvmppc_spapr_tce_iommu_table, kref);
+
+ list_del_rcu(&stit->next);
+
+ call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
+}
+
+extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ struct iommu_group *grp)
+{
+ int i;
+ struct kvmppc_spapr_tce_table *stt;
+ struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
+ struct iommu_table_group *table_group = NULL;
+
+ list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
+
+ table_group = iommu_group_get_iommudata(grp);
+ if (WARN_ON(!table_group))
+ continue;
+
+ list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (table_group->tables[i] != stit->tbl)
+ continue;
+
+ kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
+ return;
+ }
+ }
+ }
+}
+
+extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+ struct iommu_group *grp)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ bool found = false;
+ struct iommu_table *tbl = NULL;
+ struct iommu_table_group *table_group;
+ long i;
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ struct fd f;
+
+ f = fdget(tablefd);
+ if (!f.file)
+ return -EBADF;
+
+ list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt == f.file->private_data) {
+ found = true;
+ break;
+ }
+ }
+
+ fdput(f);
+
+ if (!found)
+ return -EINVAL;
+
+ table_group = iommu_group_get_iommudata(grp);
+ if (WARN_ON(!table_group))
+ return -EFAULT;
+
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ struct iommu_table *tbltmp = table_group->tables[i];
+
+ if (!tbltmp)
+ continue;
+ /*
+ * Make sure hardware table parameters are exactly the same;
+ * this is used in the TCE handlers where boundary checks
+ * use only the first attached table.
+ */
+ if ((tbltmp->it_page_shift == stt->page_shift) &&
+ (tbltmp->it_offset == stt->offset) &&
+ (tbltmp->it_size == stt->size)) {
+ /*
+ * Reference the table to avoid races with
+ * add/remove DMA windows.
+ */
+ tbl = iommu_tce_table_get(tbltmp);
+ break;
+ }
+ }
+ if (!tbl)
+ return -EINVAL;
+
+ list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+ if (tbl != stit->tbl)
+ continue;
+
+ if (!kref_get_unless_zero(&stit->kref)) {
+ /* stit is being destroyed */
+ iommu_tce_table_put(tbl);
+ return -ENOTTY;
+ }
+ /*
+ * The table is already known to this KVM, we just increased
+ * its KVM reference counter and can return.
+ */
+ return 0;
+ }
+
+ stit = kzalloc(sizeof(*stit), GFP_KERNEL);
+ if (!stit) {
+ iommu_tce_table_put(tbl);
+ return -ENOMEM;
+ }
+
+ stit->tbl = tbl;
+ kref_init(&stit->kref);
+
+ list_add_rcu(&stit->next, &stt->iommu_tables);
+
+ return 0;
+}
+
static void release_spapr_tce_table(struct rcu_head *head)
{
struct kvmppc_spapr_tce_table *stt = container_of(head,
@@ -130,9 +264,18 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
+ struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
list_del_rcu(&stt->list);
+ list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
+ WARN_ON(!kref_read(&stit->kref));
+ while (1) {
+ if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
+ break;
+ }
+ }
+
kvm_put_kvm(stt->kvm);
kvmppc_account_memlimit(
@@ -164,7 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return -EBUSY;
}
- size = args->size;
+ size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret) {
@@ -183,6 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
stt->offset = args->offset;
stt->size = size;
stt->kvm = kvm;
+ INIT_LIST_HEAD_RCU(&stt->iommu_tables);
for (i = 0; i < npages; i++) {
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -211,15 +355,106 @@ fail:
return ret;
}
+static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
+{
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
+
+ iommu_tce_xchg(tbl, entry, &hpa, &dir);
+}
+
+static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+{
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ const unsigned long pgsize = 1ULL << tbl->it_page_shift;
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+ return H_TOO_HARD;
+
+ mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
+ if (!mem)
+ return H_TOO_HARD;
+
+ mm_iommu_mapped_dec(mem);
+
+ *pua = 0;
+
+ return H_SUCCESS;
+}
+
+static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+{
+ enum dma_data_direction dir = DMA_NONE;
+ unsigned long hpa = 0;
+ long ret;
+
+ if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
+ return H_HARDWARE;
+
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
+
+ ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
+ if (ret != H_SUCCESS)
+ iommu_tce_xchg(tbl, entry, &hpa, &dir);
+
+ return ret;
+}
+
+long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+{
+ long ret;
+ unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ struct mm_iommu_table_group_mem_t *mem;
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+ return H_TOO_HARD;
+
+ mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
+ if (!mem)
+ /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
+ return H_TOO_HARD;
+
+ if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
+ return H_HARDWARE;
+
+ if (mm_iommu_mapped_inc(mem))
+ return H_CLOSED;
+
+ ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
+ if (WARN_ON_ONCE(ret)) {
+ mm_iommu_mapped_dec(mem);
+ return H_HARDWARE;
+ }
+
+ if (dir != DMA_NONE)
+ kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
+
+ *pua = ua;
+
+ return 0;
+}
+
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
- struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
- long ret;
+ struct kvmppc_spapr_tce_table *stt;
+ long ret, idx;
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long entry, ua = 0;
+ enum dma_data_direction dir;
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -231,7 +466,35 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
- kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
+ dir = iommu_tce_direction(tce);
+ if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
+ return H_PARAMETER;
+
+ entry = ioba >> stt->page_shift;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ if (dir == DMA_NONE) {
+ ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
+ stit->tbl, entry);
+ } else {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
+ entry, ua, dir);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ }
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ return ret;
+
+ WARN_ON_ONCE(1);
+ kvmppc_clear_tce(stit->tbl, entry);
+ }
+
+ kvmppc_tce_put(stt, entry, tce);
return H_SUCCESS;
}
@@ -246,8 +509,9 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long entry, ua = 0;
u64 __user *tces;
u64 tce;
+ struct kvmppc_spapr_tce_iommu_table *stit;
- stt = kvmppc_find_table(vcpu, liobn);
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -284,6 +548,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (ret != H_SUCCESS)
goto unlock_exit;
+ if (kvmppc_gpa_to_ua(vcpu->kvm,
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
+ &ua, NULL))
+ return H_PARAMETER;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ ret = kvmppc_tce_iommu_map(vcpu->kvm,
+ stit->tbl, entry + i, ua,
+ iommu_tce_direction(tce));
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ goto unlock_exit;
+
+ WARN_ON_ONCE(1);
+ kvmppc_clear_tce(stit->tbl, entry);
+ }
+
kvmppc_tce_put(stt, entry + i, tce);
}
@@ -300,8 +584,9 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
{
struct kvmppc_spapr_tce_table *stt;
long i, ret;
+ struct kvmppc_spapr_tce_iommu_table *stit;
- stt = kvmppc_find_table(vcpu, liobn);
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -313,6 +598,24 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
return H_PARAMETER;
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ unsigned long entry = ioba >> stit->tbl->it_page_shift;
+
+ for (i = 0; i < npages; ++i) {
+ ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
+ stit->tbl, entry + i);
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ return ret;
+
+ WARN_ON_ONCE(1);
+ kvmppc_clear_tce(stit->tbl, entry);
+ }
+ }
+
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index e4c4ea973e57..eda0a8f6fae8 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -40,6 +40,31 @@
#include <asm/iommu.h>
#include <asm/tce.h>
+#ifdef CONFIG_BUG
+
+#define WARN_ON_ONCE_RM(condition) ({ \
+ static bool __section(.data.unlikely) __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ __warned = true; \
+ pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
+ __stringify(condition), \
+ __func__, __LINE__); \
+ dump_stack(); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+
+#else
+
+#define WARN_ON_ONCE_RM(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+
+#endif
+
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
/*
@@ -48,10 +73,9 @@
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
* mode on PR KVM
*/
-struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
unsigned long liobn)
{
- struct kvm *kvm = vcpu->kvm;
struct kvmppc_spapr_tce_table *stt;
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
@@ -63,27 +87,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
EXPORT_SYMBOL_GPL(kvmppc_find_table);
/*
- * Validates IO address.
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
- */
-long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long ioba, unsigned long npages)
-{
- unsigned long mask = (1ULL << stt->page_shift) - 1;
- unsigned long idx = ioba >> stt->page_shift;
-
- if ((ioba & mask) || (idx < stt->offset) ||
- (idx - stt->offset + npages > stt->size) ||
- (idx + npages < idx))
- return H_PARAMETER;
-
- return H_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
-
-/*
* Validates TCE address.
* At the moment flags and page mask are validated.
* As the host kernel does not access those addresses (just puts them
@@ -96,10 +99,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
*/
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
{
- unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
- unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
+ unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ enum dma_data_direction dir = iommu_tce_direction(tce);
+
+ /* Allow userspace to poison TCE table */
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
- if (tce & mask)
+ if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
return H_SUCCESS;
@@ -179,15 +186,122 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
+{
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
+
+ iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+}
+
+static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+{
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ const unsigned long pgsize = 1ULL << tbl->it_page_shift;
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+ return H_TOO_HARD;
+
+ pua = (void *) vmalloc_to_phys(pua);
+ if (WARN_ON_ONCE_RM(!pua))
+ return H_HARDWARE;
+
+ mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
+ if (!mem)
+ return H_TOO_HARD;
+
+ mm_iommu_mapped_dec(mem);
+
+ *pua = 0;
+
+ return H_SUCCESS;
+}
+
+static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+{
+ enum dma_data_direction dir = DMA_NONE;
+ unsigned long hpa = 0;
+ long ret;
+
+ if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
+ /*
+ * real mode xchg can fail if struct page crosses
+ * a page boundary
+ */
+ return H_TOO_HARD;
+
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
+
+ ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
+ if (ret)
+ iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+
+ return ret;
+}
+
+static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+{
+ long ret;
+ unsigned long hpa = 0;
+ unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ struct mm_iommu_table_group_mem_t *mem;
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+ return H_TOO_HARD;
+
+ mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
+ return H_HARDWARE;
+
+ pua = (void *) vmalloc_to_phys(pua);
+ if (WARN_ON_ONCE_RM(!pua))
+ return H_HARDWARE;
+
+ if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
+ return H_CLOSED;
+
+ ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ if (ret) {
+ mm_iommu_mapped_dec(mem);
+ /*
+ * real mode xchg can fail if struct page crosses
+ * a page boundary
+ */
+ return H_TOO_HARD;
+ }
+
+ if (dir != DMA_NONE)
+ kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
+
+ *pua = ua;
+
+ return 0;
+}
+
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
- struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+ struct kvmppc_spapr_tce_table *stt;
long ret;
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long entry, ua = 0;
+ enum dma_data_direction dir;
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -199,7 +313,32 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
- kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
+ dir = iommu_tce_direction(tce);
+ if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
+ return H_PARAMETER;
+
+ entry = ioba >> stt->page_shift;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ if (dir == DMA_NONE)
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ stit->tbl, entry);
+ else
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ stit->tbl, entry, ua, dir);
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ return ret;
+
+ WARN_ON_ONCE_RM(1);
+ kvmppc_rm_clear_tce(stit->tbl, entry);
+ }
+
+ kvmppc_tce_put(stt, entry, tce);
return H_SUCCESS;
}
@@ -239,8 +378,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
long i, ret = H_SUCCESS;
unsigned long tces, entry, ua = 0;
unsigned long *rmap = NULL;
+ bool prereg = false;
+ struct kvmppc_spapr_tce_iommu_table *stit;
- stt = kvmppc_find_table(vcpu, liobn);
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -259,23 +400,49 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (ret != H_SUCCESS)
return ret;
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
- return H_TOO_HARD;
+ if (mm_iommu_preregistered(vcpu->kvm->mm)) {
+ /*
+ * We get here if guest memory was pre-registered which
+ * is normally VFIO case and gpa->hpa translation does not
+ * depend on hpt.
+ */
+ struct mm_iommu_table_group_mem_t *mem;
- rmap = (void *) vmalloc_to_phys(rmap);
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ return H_TOO_HARD;
- /*
- * Synchronize with the MMU notifier callbacks in
- * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
- * While we have the rmap lock, code running on other CPUs
- * cannot finish unmapping the host real page that backs
- * this guest real page, so we are OK to access the host
- * real page.
- */
- lock_rmap(rmap);
- if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
- ret = H_TOO_HARD;
- goto unlock_exit;
+ mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
+ if (mem)
+ prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+ }
+
+ if (!prereg) {
+ /*
+ * This is usually a case of a guest with emulated devices only
+ * when TCE list is not in preregistered memory.
+ * We do not require memory to be preregistered in this case
+ * so lock rmap and do __find_linux_pte_or_hugepte().
+ */
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ return H_TOO_HARD;
+
+ rmap = (void *) vmalloc_to_phys(rmap);
+ if (WARN_ON_ONCE_RM(!rmap))
+ return H_HARDWARE;
+
+ /*
+ * Synchronize with the MMU notifier callbacks in
+ * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
+ * While we have the rmap lock, code running on other CPUs
+ * cannot finish unmapping the host real page that backs
+ * this guest real page, so we are OK to access the host
+ * real page.
+ */
+ lock_rmap(rmap);
+ if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
}
for (i = 0; i < npages; ++i) {
@@ -285,11 +452,33 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (ret != H_SUCCESS)
goto unlock_exit;
+ ua = 0;
+ if (kvmppc_gpa_to_ua(vcpu->kvm,
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
+ &ua, NULL))
+ return H_PARAMETER;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ stit->tbl, entry + i, ua,
+ iommu_tce_direction(tce));
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ goto unlock_exit;
+
+ WARN_ON_ONCE_RM(1);
+ kvmppc_rm_clear_tce(stit->tbl, entry);
+ }
+
kvmppc_tce_put(stt, entry + i, tce);
}
unlock_exit:
- unlock_rmap(rmap);
+ if (rmap)
+ unlock_rmap(rmap);
return ret;
}
@@ -300,8 +489,9 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
{
struct kvmppc_spapr_tce_table *stt;
long i, ret;
+ struct kvmppc_spapr_tce_iommu_table *stit;
- stt = kvmppc_find_table(vcpu, liobn);
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -313,6 +503,24 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
return H_PARAMETER;
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ unsigned long entry = ioba >> stit->tbl->it_page_shift;
+
+ for (i = 0; i < npages; ++i) {
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ stit->tbl, entry + i);
+
+ if (ret == H_SUCCESS)
+ continue;
+
+ if (ret == H_TOO_HARD)
+ return ret;
+
+ WARN_ON_ONCE_RM(1);
+ kvmppc_rm_clear_tce(stit->tbl, entry);
+ }
+ }
+
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
@@ -322,12 +530,13 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{
- struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+ struct kvmppc_spapr_tce_table *stt;
long ret;
unsigned long idx;
struct page *page;
u64 *tbl;
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 8359752b3efc..68d68983948e 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
break;
unprivileged:
default:
- printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
-#ifndef DEBUG_SPR
- emulated = EMULATE_FAIL;
-#endif
+ pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
+ if (sprn & 0x10) {
+ if (kvmppc_get_msr(vcpu) & MSR_PR) {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ emulated = EMULATE_AGAIN;
+ }
+ } else {
+ if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ emulated = EMULATE_AGAIN;
+ }
+ }
break;
}
@@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
break;
default:
unprivileged:
- printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
-#ifndef DEBUG_SPR
- emulated = EMULATE_FAIL;
-#endif
+ pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
+ if (sprn & 0x10) {
+ if (kvmppc_get_msr(vcpu) & MSR_PR) {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ emulated = EMULATE_AGAIN;
+ }
+ } else {
+ if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
+ sprn == 4 || sprn == 5 || sprn == 6) {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ emulated = EMULATE_AGAIN;
+ }
+ }
+
break;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1ec86d9e2a82..549dd6070dee 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -35,6 +35,15 @@
#include <linux/srcu.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/hugetlb.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/of.h>
#include <asm/reg.h>
#include <asm/cputable.h>
@@ -58,15 +67,6 @@
#include <asm/mmu.h>
#include <asm/opal.h>
#include <asm/xics.h>
-#include <linux/gfp.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <linux/hugetlb.h>
-#include <linux/kvm_irqfd.h>
-#include <linux/irqbypass.h>
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/of.h>
#include "book3s.h"
@@ -3624,11 +3624,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -EIO;
mutex_lock(&kvm->lock);
+ if (!kvm->arch.pimap)
+ goto unlock;
- if (kvm->arch.pimap == NULL) {
- mutex_unlock(&kvm->lock);
- return 0;
- }
pimap = kvm->arch.pimap;
for (i = 0; i < pimap->n_mapped; i++) {
@@ -3650,7 +3648,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
* We don't free this structure even when the count goes to
* zero. The structure is freed when we destroy the VM.
*/
-
+ unlock:
mutex_unlock(&kvm->lock);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4d6c64b3041c..9c71c72e65ce 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -23,6 +23,7 @@
#include <asm/kvm_book3s.h>
#include <asm/archrandom.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/dbell.h>
#include <asm/cputhreads.h>
#include <asm/io.h>
@@ -100,7 +101,8 @@ void __init kvm_cma_reserve(void)
(unsigned long)selected_size / SZ_1M);
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
- KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
+ KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
+ &kvm_cma);
}
}
@@ -193,12 +195,6 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
return H_HARDWARE;
}
-static inline void rm_writeb(unsigned long paddr, u8 val)
-{
- __asm__ __volatile__("stbcix %0,0,%1"
- : : "r" (val), "r" (paddr) : "memory");
-}
-
/*
* Send an interrupt or message to another CPU.
* The caller needs to include any barrier needed to order writes
@@ -206,7 +202,7 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
*/
void kvmhv_rm_send_ipi(int cpu)
{
- unsigned long xics_phys;
+ void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
/* On POWER9 we can use msgsnd for any destination cpu. */
@@ -224,10 +220,14 @@ void kvmhv_rm_send_ipi(int cpu)
return;
}
+ /* We should never reach this */
+ if (WARN_ON_ONCE(xive_enabled()))
+ return;
+
/* Else poke the target with an IPI */
xics_phys = paca[cpu].kvm_hstate.xics_phys;
if (xics_phys)
- rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
else
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
}
@@ -386,6 +386,9 @@ long kvmppc_read_intr(void)
long rc;
bool again;
+ if (xive_enabled())
+ return 1;
+
do {
again = false;
rc = kvmppc_read_one_intr(&again);
@@ -397,7 +400,7 @@ long kvmppc_read_intr(void)
static long kvmppc_read_one_intr(bool *again)
{
- unsigned long xics_phys;
+ void __iomem *xics_phys;
u32 h_xirr;
__be32 xirr;
u32 xisr;
@@ -415,7 +418,7 @@ static long kvmppc_read_one_intr(bool *again)
if (!xics_phys)
rc = opal_int_get_xirr(&xirr, false);
else
- xirr = _lwzcix(xics_phys + XICS_XIRR);
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
if (rc < 0)
return 1;
@@ -445,8 +448,8 @@ static long kvmppc_read_one_intr(bool *again)
if (xisr == XICS_IPI) {
rc = 0;
if (xics_phys) {
- _stbcix(xics_phys + XICS_MFRR, 0xff);
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
+ __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
rc = opal_int_eoi(h_xirr);
@@ -471,7 +474,8 @@ static long kvmppc_read_one_intr(bool *again)
* we need to resend that IPI, bummer
*/
if (xics_phys)
- _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ __raw_rm_writeb(IPI_PRIORITY,
+ xics_phys + XICS_MFRR);
else
opal_int_set_mfrr(hard_smp_processor_id(),
IPI_PRIORITY);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index e78542d99cd6..ffde4507ddfd 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -16,7 +16,6 @@
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
-#include <asm/debug.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
#include <asm/pgtable.h>
@@ -766,7 +765,7 @@ unsigned long eoi_rc;
static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
{
- unsigned long xics_phys;
+ void __iomem *xics_phys;
int64_t rc;
rc = pnv_opal_pci_msi_eoi(c, hwirq);
@@ -779,7 +778,7 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
/* EOI it */
xics_phys = local_paca->kvm_hstate.xics_phys;
if (xics_phys) {
- _stwcix(xics_phys + XICS_XIRR, xirr);
+ __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
rc = opal_int_eoi(be32_to_cpu(xirr));
*again = rc > 0;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d4dfc0ca2a44..69a09444d46e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -349,7 +349,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.halt_wakeup++;
/* Unset POW bit after we woke up */
@@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int r = RESUME_GUEST;
int relocated;
int page_found = 0;
- struct kvmppc_pte pte;
- bool is_mmio = false;
+ struct kvmppc_pte pte = { 0 };
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
u64 vsid;
@@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Page not found in guest SLB */
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
- } else if (!is_mmio &&
- kvmppc_visible_gpa(vcpu, pte.raddr)) {
+ } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
/*
* There is already a host HPTE there, presumably
@@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_unmap_page(vcpu, &pte);
}
/* The guest's PTE is not mapped yet. Map on the host */
- kvmppc_mmu_map_page(vcpu, &pte, iswrite);
+ if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
+ /* Exit KVM if mapping failed */
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ return RESUME_HOST;
+ }
if (data)
vcpu->stat.sp_storage++;
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f102616febc7..bcbeeb62dd13 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -344,7 +344,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index e48803e2918d..459b72cb617a 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,10 +19,9 @@
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/time.h>
-#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "book3s_xics.h"
@@ -1084,7 +1083,7 @@ static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
return xics->ics[icsid];
}
-int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
+static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
{
struct kvmppc_icp *icp;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 0514cbd4e533..3eaac3809977 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
}
+void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+}
+
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
@@ -579,7 +584,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
* userspace, so clear the KVM_REQ_WATCHDOG request.
*/
if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
- clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
nr_jiffies = watchdog_next_timeout(vcpu);
@@ -690,7 +695,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 0fda4230f6c0..77fd043b3ecc 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].sets =
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
-
- vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
- host_tlb_params[1].entries,
+ vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
+ sizeof(*vcpu_e500->h2g_tlb1_rmap),
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
return -EINVAL;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index b379146de55b..c873ffe55362 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_31_XOP_MFSPR:
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
+ if (emulated == EMULATE_AGAIN) {
+ emulated = EMULATE_DONE;
+ advance = 0;
+ }
break;
case OP_31_XOP_MTSPR:
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
+ if (emulated == EMULATE_AGAIN) {
+ emulated = EMULATE_DONE;
+ advance = 0;
+ }
break;
case OP_31_XOP_TLBSYNC:
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 6d3c0ee1d744..af833531af31 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -34,18 +34,38 @@
#include "timing.h"
#include "trace.h"
-/* XXX to do:
- * lhax
- * lhaux
- * lswx
- * lswi
- * stswx
- * stswi
- * lha
- * lhau
- * lmw
- * stmw
+#ifdef CONFIG_PPC_FPU
+static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
+ kvmppc_core_queue_fpunavail(vcpu);
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_VSX
+static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
+ kvmppc_core_queue_vsx_unavail(vcpu);
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_VSX */
+
+/*
+ * XXX to do:
+ * lfiwax, lfiwzx
+ * vector loads and stores
*
+ * Instructions that trap when used on cache-inhibited mappings
+ * are not emulated here: multiple and string instructions,
+ * lq/stq, and the load-reserve/store-conditional instructions.
*/
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
@@ -66,6 +86,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs = get_rs(inst);
rt = get_rt(inst);
+ /*
+ * if mmio_vsx_tx_sx_enabled == 0, copy data between
+ * VSR[0..31] and memory
+ * if mmio_vsx_tx_sx_enabled == 1, copy data between
+ * VSR[32..63] and memory
+ */
+ vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
+ vcpu->arch.mmio_vsx_copy_nums = 0;
+ vcpu->arch.mmio_vsx_offset = 0;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
+ vcpu->arch.mmio_sp64_extend = 0;
+ vcpu->arch.mmio_sign_extend = 0;
+
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
@@ -73,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
+ case OP_31_XOP_LWZUX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
case OP_31_XOP_LBZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
@@ -82,22 +120,36 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
+ case OP_31_XOP_STDX:
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 8, 1);
+ break;
+
+ case OP_31_XOP_STDUX:
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
case OP_31_XOP_STWX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 1);
+ kvmppc_get_gpr(vcpu, rs), 4, 1);
+ break;
+
+ case OP_31_XOP_STWUX:
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STBX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
+ kvmppc_get_gpr(vcpu, rs), 1, 1);
break;
case OP_31_XOP_STBUX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
+ kvmppc_get_gpr(vcpu, rs), 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
@@ -105,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
+ case OP_31_XOP_LHAUX:
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
case OP_31_XOP_LHZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
@@ -116,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STHX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
+ kvmppc_get_gpr(vcpu, rs), 2, 1);
break;
case OP_31_XOP_STHUX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
+ kvmppc_get_gpr(vcpu, rs), 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
@@ -143,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STWBRX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 0);
+ kvmppc_get_gpr(vcpu, rs), 4, 0);
break;
case OP_31_XOP_LHBRX:
@@ -153,10 +207,258 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_31_XOP_STHBRX:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 0);
+ kvmppc_get_gpr(vcpu, rs), 2, 0);
+ break;
+
+ case OP_31_XOP_LDBRX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
+ break;
+
+ case OP_31_XOP_STDBRX:
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 8, 0);
+ break;
+
+ case OP_31_XOP_LDX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ break;
+
+ case OP_31_XOP_LDUX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_LWAX:
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
+ break;
+
+ case OP_31_XOP_LWAUX:
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+#ifdef CONFIG_PPC_FPU
+ case OP_31_XOP_LFSX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_31_XOP_LFSUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_LFDX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ break;
+
+ case OP_31_XOP_LFDUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_LFIWAX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_loads(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_31_XOP_LFIWZX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_31_XOP_STFSX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 4, 1);
+ break;
+
+ case OP_31_XOP_STFSUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_STFDX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 8, 1);
+ break;
+
+ case OP_31_XOP_STFDUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_STFIWX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 4, 1);
+ break;
+#endif
+
+#ifdef CONFIG_VSX
+ case OP_31_XOP_LXSDX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_LXSSPX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXSIWAX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 1);
+ break;
+
+ case OP_31_XOP_LXSIWZX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVD2X:
+ /*
+ * In this case, the official load/store process is like this:
+ * Step1, exit from vm by page fault isr, then kvm save vsr.
+ * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
+ * as reference.
+ *
+ * Step2, copy data between memory and VCPU
+ * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
+ * 2copies*8bytes or 4copies*4bytes
+ * to simulate one copy of 16bytes.
+ * Also there is an endian issue here, we should notice the
+ * layout of memory.
+ * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
+ * If host is little-endian, kvm will call XXSWAPD for
+ * LXVD2X_ROT/STXVD2X_ROT.
+ * So, if host is little-endian,
+ * the postion of memeory should be swapped.
+ *
+ * Step3, return to guest, kvm reset register.
+ * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
+ * as reference.
+ */
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 2;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVW4X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 4;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVDSX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type =
+ KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_STXSDX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 8, 1);
break;
+ case OP_31_XOP_STXSSPX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+
+ case OP_31_XOP_STXSIWX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_offset = 1;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+
+ case OP_31_XOP_STXVD2X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 2;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 8, 1);
+ break;
+
+ case OP_31_XOP_STXVW4X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 4;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+#endif /* CONFIG_VSX */
default:
emulated = EMULATE_FAIL;
break;
@@ -167,10 +469,60 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
- /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
+#ifdef CONFIG_PPC_FPU
+ case OP_STFS:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 4, 1);
+ break;
+
+ case OP_STFSU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_STFD:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ break;
+
+ case OP_STFDU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+#endif
+
case OP_LD:
rt = get_rt(inst);
- emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ switch (inst & 3) {
+ case 0: /* ld */
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ break;
+ case 1: /* ldu */
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+ case 2: /* lwa */
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
+ break;
+ default:
+ emulated = EMULATE_FAIL;
+ }
break;
case OP_LWZU:
@@ -193,31 +545,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
4, 1);
break;
- /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
case OP_STD:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 8, 1);
+ switch (inst & 3) {
+ case 0: /* std */
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 8, 1);
+ break;
+ case 1: /* stdu */
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs), 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+ default:
+ emulated = EMULATE_FAIL;
+ }
break;
case OP_STWU:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 4, 1);
+ kvmppc_get_gpr(vcpu, rs), 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STB:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
+ kvmppc_get_gpr(vcpu, rs), 1, 1);
break;
case OP_STBU:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 1, 1);
+ kvmppc_get_gpr(vcpu, rs), 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
@@ -241,16 +599,48 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
case OP_STH:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
+ kvmppc_get_gpr(vcpu, rs), 2, 1);
break;
case OP_STHU:
emulated = kvmppc_handle_store(run, vcpu,
- kvmppc_get_gpr(vcpu, rs),
- 2, 1);
+ kvmppc_get_gpr(vcpu, rs), 2, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+#ifdef CONFIG_PPC_FPU
+ case OP_LFS:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_LFSU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_LFD:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ break;
+
+ case OP_LFDU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
+#endif
default:
emulated = EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 95c91a9de351..1ee22a910074 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -37,6 +37,7 @@
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include <asm/iommu.h>
+#include <asm/switch_to.h>
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -232,7 +233,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS;
kvm_vcpu_block(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
break;
default:
r = EV_UNIMPLEMENTED;
@@ -524,11 +525,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
/* We support this only for PR */
r = !hv_enabled;
break;
-#ifdef CONFIG_KVM_MMIO
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
-#endif
#ifdef CONFIG_KVM_MPIC
case KVM_CAP_IRQ_MPIC:
r = 1;
@@ -538,6 +534,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
+ /* fallthrough */
+ case KVM_CAP_SPAPR_TCE_VFIO:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
@@ -806,6 +804,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
}
+#ifdef CONFIG_VSX
+static inline int kvmppc_get_vsr_dword_offset(int index)
+{
+ int offset;
+
+ if ((index != 0) && (index != 1))
+ return -1;
+
+#ifdef __BIG_ENDIAN
+ offset = index;
+#else
+ offset = 1 - index;
+#endif
+
+ return offset;
+}
+
+static inline int kvmppc_get_vsr_word_offset(int index)
+{
+ int offset;
+
+ if ((index > 3) || (index < 0))
+ return -1;
+
+#ifdef __BIG_ENDIAN
+ offset = index;
+#else
+ offset = 3 - index;
+#endif
+ return offset;
+}
+
+static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
+ u64 gpr)
+{
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (offset == -1)
+ return;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsxval[offset] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ }
+}
+
+static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
+ u64 gpr)
+{
+ union kvmppc_one_reg val;
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsxval[0] = gpr;
+ val.vsxval[1] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ VCPU_VSX_FPR(vcpu, index, 0) = gpr;
+ VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ }
+}
+
+static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
+ u32 gpr32)
+{
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+ int dword_offset, word_offset;
+
+ if (offset == -1)
+ return;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsx32val[offset] = gpr32;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ dword_offset = offset / 2;
+ word_offset = offset % 2;
+ val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsx32val[word_offset] = gpr32;
+ VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ }
+}
+#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_PPC_FPU
+static inline u64 sp_to_dp(u32 fprs)
+{
+ u64 fprd;
+
+ preempt_disable();
+ enable_kernel_fp();
+ asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
+ : "fr0");
+ preempt_enable();
+ return fprd;
+}
+
+static inline u32 dp_to_sp(u64 fprd)
+{
+ u32 fprs;
+
+ preempt_disable();
+ enable_kernel_fp();
+ asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
+ : "fr0");
+ preempt_enable();
+ return fprs;
+}
+
+#else
+#define sp_to_dp(x) (x)
+#define dp_to_sp(x) (x)
+#endif /* CONFIG_PPC_FPU */
+
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
@@ -832,6 +953,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
+ /* conversion between single and double precision */
+ if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
+ gpr = sp_to_dp(gpr);
+
if (vcpu->arch.mmio_sign_extend) {
switch (run->mmio.len) {
#ifdef CONFIG_PPC64
@@ -848,8 +973,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
- kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
-
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
case KVM_MMIO_REG_GPR:
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
@@ -866,6 +989,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
+#ifdef CONFIG_VSX
+ case KVM_MMIO_REG_VSX:
+ if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
+ kvmppc_set_vsr_dword(vcpu, gpr);
+ else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
+ kvmppc_set_vsr_word(vcpu, gpr);
+ else if (vcpu->arch.mmio_vsx_copy_type ==
+ KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
+ kvmppc_set_vsr_dword_dump(vcpu, gpr);
+ break;
+#endif
default:
BUG();
}
@@ -932,6 +1066,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
}
+#ifdef CONFIG_VSX
+int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian, int mmio_sign_extend)
+{
+ enum emulation_result emulated = EMULATE_DONE;
+
+ /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
+ if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
+ (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ return EMULATE_FAIL;
+ }
+
+ while (vcpu->arch.mmio_vsx_copy_nums) {
+ emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ is_default_endian, mmio_sign_extend);
+
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+ return emulated;
+}
+#endif /* CONFIG_VSX */
+
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
@@ -957,6 +1120,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
+ if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
+ val = dp_to_sp(val);
+
/* Store the value at the lowest bytes in 'data'. */
if (!host_swabbed) {
switch (bytes) {
@@ -990,6 +1156,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
+#ifdef CONFIG_VSX
+static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
+{
+ u32 dword_offset, word_offset;
+ union kvmppc_one_reg reg;
+ int vsx_offset = 0;
+ int copy_type = vcpu->arch.mmio_vsx_copy_type;
+ int result = 0;
+
+ switch (copy_type) {
+ case KVMPPC_VSX_COPY_DWORD:
+ vsx_offset =
+ kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
+
+ if (vsx_offset == -1) {
+ result = -1;
+ break;
+ }
+
+ if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ } else {
+ reg.vval = VCPU_VSX_VR(vcpu, rs);
+ *val = reg.vsxval[vsx_offset];
+ }
+ break;
+
+ case KVMPPC_VSX_COPY_WORD:
+ vsx_offset =
+ kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
+
+ if (vsx_offset == -1) {
+ result = -1;
+ break;
+ }
+
+ if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ dword_offset = vsx_offset / 2;
+ word_offset = vsx_offset % 2;
+ reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ *val = reg.vsx32val[word_offset];
+ } else {
+ reg.vval = VCPU_VSX_VR(vcpu, rs);
+ *val = reg.vsx32val[vsx_offset];
+ }
+ break;
+
+ default:
+ result = -1;
+ break;
+ }
+
+ return result;
+}
+
+int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, unsigned int bytes, int is_default_endian)
+{
+ u64 val;
+ enum emulation_result emulated = EMULATE_DONE;
+
+ vcpu->arch.io_gpr = rs;
+
+ /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
+ if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
+ (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ return EMULATE_FAIL;
+ }
+
+ while (vcpu->arch.mmio_vsx_copy_nums) {
+ if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
+ return EMULATE_FAIL;
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ val, bytes, is_default_endian);
+
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+
+ return emulated;
+}
+
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ enum emulation_result emulated = EMULATE_FAIL;
+ int r;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ if (!vcpu->mmio_is_write) {
+ emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
+ run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
+ } else {
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ vcpu->arch.io_gpr, run->mmio.len, 1);
+ }
+
+ switch (emulated) {
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ r = RESUME_HOST;
+ break;
+ default:
+ r = RESUME_GUEST;
+ break;
+ }
+ return r;
+}
+#endif /* CONFIG_VSX */
+
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
int r = 0;
@@ -1092,13 +1381,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r;
sigset_t sigsaved;
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
if (vcpu->mmio_needed) {
+ vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
kvmppc_complete_mmio_load(vcpu, run);
- vcpu->mmio_needed = 0;
+#ifdef CONFIG_VSX
+ if (vcpu->arch.mmio_vsx_copy_nums > 0) {
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+
+ if (vcpu->arch.mmio_vsx_copy_nums > 0) {
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
+ if (r == RESUME_HOST) {
+ vcpu->mmio_needed = 1;
+ return r;
+ }
+ }
+#endif
} else if (vcpu->arch.osi_needed) {
u64 *gprs = run->osi.gprs;
int i;
@@ -1120,6 +1420,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
#endif
}
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
if (run->immediate_exit)
r = -EINTR;
else
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 0d3002b7e2b4..500b0f6a0b64 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/kprobes.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -59,7 +60,7 @@ bool is_offset_in_branch_range(long offset)
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
*/
-bool __kprobes is_conditional_branch(unsigned int instr)
+bool is_conditional_branch(unsigned int instr)
{
unsigned int opcode = instr >> 26;
@@ -75,6 +76,7 @@ bool __kprobes is_conditional_branch(unsigned int instr)
}
return false;
}
+NOKPROBE_SYMBOL(is_conditional_branch);
unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 9c542ec70c5b..33117f8a0882 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -49,7 +49,8 @@ extern int do_stxvd2x(int rn, unsigned long ea);
/*
* Emulate the truncation of 64 bit values in 32-bit mode.
*/
-static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
+static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
+ unsigned long val)
{
#ifdef __powerpc64__
if ((msr & MSR_64BIT) == 0)
@@ -61,7 +62,7 @@ static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
/*
* Determine whether a conditional branch instruction would branch.
*/
-static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs)
{
unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi;
@@ -81,8 +82,7 @@ static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
return 1;
}
-
-static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
+static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
{
if (!user_mode(regs))
return 1;
@@ -92,7 +92,7 @@ static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
/*
* Calculate effective address for a D-form instruction
*/
-static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs *regs)
{
int ra;
unsigned long ea;
@@ -109,7 +109,7 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
/*
* Calculate effective address for a DS-form instruction
*/
-static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_regs *regs)
{
int ra;
unsigned long ea;
@@ -126,8 +126,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg
/*
* Calculate effective address for an X-form instruction
*/
-static unsigned long __kprobes xform_ea(unsigned int instr,
- struct pt_regs *regs)
+static nokprobe_inline unsigned long xform_ea(unsigned int instr,
+ struct pt_regs *regs)
{
int ra, rb;
unsigned long ea;
@@ -145,33 +145,33 @@ static unsigned long __kprobes xform_ea(unsigned int instr,
* Return the largest power of 2, not greater than sizeof(unsigned long),
* such that x is a multiple of it.
*/
-static inline unsigned long max_align(unsigned long x)
+static nokprobe_inline unsigned long max_align(unsigned long x)
{
x |= sizeof(unsigned long);
return x & -x; /* isolates rightmost bit */
}
-static inline unsigned long byterev_2(unsigned long x)
+static nokprobe_inline unsigned long byterev_2(unsigned long x)
{
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
}
-static inline unsigned long byterev_4(unsigned long x)
+static nokprobe_inline unsigned long byterev_4(unsigned long x)
{
return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
((x & 0xff00) << 8) | ((x & 0xff) << 24);
}
#ifdef __powerpc64__
-static inline unsigned long byterev_8(unsigned long x)
+static nokprobe_inline unsigned long byterev_8(unsigned long x)
{
return (byterev_4(x) << 32) | byterev_4(x >> 32);
}
#endif
-static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
- int nb)
+static nokprobe_inline int read_mem_aligned(unsigned long *dest,
+ unsigned long ea, int nb)
{
int err = 0;
unsigned long x = 0;
@@ -197,8 +197,8 @@ static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
return err;
}
-static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
- int nb, struct pt_regs *regs)
+static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
+ unsigned long ea, int nb, struct pt_regs *regs)
{
int err;
unsigned long x, b, c;
@@ -248,7 +248,7 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
* Read memory at address ea for nb bytes, return 0 for success
* or -EFAULT if an error occurred.
*/
-static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
+static int read_mem(unsigned long *dest, unsigned long ea, int nb,
struct pt_regs *regs)
{
if (!address_ok(regs, ea, nb))
@@ -257,9 +257,10 @@ static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
return read_mem_aligned(dest, ea, nb);
return read_mem_unaligned(dest, ea, nb, regs);
}
+NOKPROBE_SYMBOL(read_mem);
-static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
- int nb)
+static nokprobe_inline int write_mem_aligned(unsigned long val,
+ unsigned long ea, int nb)
{
int err = 0;
@@ -282,8 +283,8 @@ static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
return err;
}
-static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
- int nb, struct pt_regs *regs)
+static nokprobe_inline int write_mem_unaligned(unsigned long val,
+ unsigned long ea, int nb, struct pt_regs *regs)
{
int err;
unsigned long c;
@@ -325,7 +326,7 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
* Write memory at address ea for nb bytes, return 0 for success
* or -EFAULT if an error occurred.
*/
-static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
+static int write_mem(unsigned long val, unsigned long ea, int nb,
struct pt_regs *regs)
{
if (!address_ok(regs, ea, nb))
@@ -334,13 +335,14 @@ static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
return write_mem_aligned(val, ea, nb);
return write_mem_unaligned(val, ea, nb, regs);
}
+NOKPROBE_SYMBOL(write_mem);
#ifdef CONFIG_PPC_FPU
/*
* Check the address and alignment, and call func to do the actual
* load or store.
*/
-static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
+static int do_fp_load(int rn, int (*func)(int, unsigned long),
unsigned long ea, int nb,
struct pt_regs *regs)
{
@@ -380,8 +382,9 @@ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
return err;
return (*func)(rn, ptr);
}
+NOKPROBE_SYMBOL(do_fp_load);
-static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
+static int do_fp_store(int rn, int (*func)(int, unsigned long),
unsigned long ea, int nb,
struct pt_regs *regs)
{
@@ -425,11 +428,12 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
}
return err;
}
+NOKPROBE_SYMBOL(do_fp_store);
#endif
#ifdef CONFIG_ALTIVEC
/* For Altivec/VMX, no need to worry about alignment */
-static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
+static nokprobe_inline int do_vec_load(int rn, int (*func)(int, unsigned long),
unsigned long ea, struct pt_regs *regs)
{
if (!address_ok(regs, ea & ~0xfUL, 16))
@@ -437,7 +441,7 @@ static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
return (*func)(rn, ea);
}
-static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
+static nokprobe_inline int do_vec_store(int rn, int (*func)(int, unsigned long),
unsigned long ea, struct pt_regs *regs)
{
if (!address_ok(regs, ea & ~0xfUL, 16))
@@ -447,7 +451,7 @@ static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
+static nokprobe_inline int do_vsx_load(int rn, int (*func)(int, unsigned long),
unsigned long ea, struct pt_regs *regs)
{
int err;
@@ -465,7 +469,7 @@ static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
return err;
}
-static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
+static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long),
unsigned long ea, struct pt_regs *regs)
{
int err;
@@ -522,7 +526,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
: "=r" (err) \
: "r" (addr), "i" (-EFAULT), "0" (err))
-static void __kprobes set_cr0(struct pt_regs *regs, int rd)
+static nokprobe_inline void set_cr0(struct pt_regs *regs, int rd)
{
long val = regs->gpr[rd];
@@ -539,7 +543,7 @@ static void __kprobes set_cr0(struct pt_regs *regs, int rd)
regs->ccr |= 0x20000000;
}
-static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
+static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd,
unsigned long val1, unsigned long val2,
unsigned long carry_in)
{
@@ -560,7 +564,7 @@ static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
regs->xer &= ~XER_CA;
}
-static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
+static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2,
int crfld)
{
unsigned int crval, shift;
@@ -576,7 +580,7 @@ static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
}
-static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
+static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
unsigned long v2, int crfld)
{
unsigned int crval, shift;
@@ -592,7 +596,7 @@ static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
}
-static int __kprobes trap_compare(long v1, long v2)
+static nokprobe_inline int trap_compare(long v1, long v2)
{
int ret = 0;
@@ -631,7 +635,7 @@ static int __kprobes trap_compare(long v1, long v2)
* Returns 1 if the instruction has been executed, or 0 if not.
* Sets *op to indicate what the instruction does.
*/
-int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
unsigned int instr)
{
unsigned int opcode, ra, rb, rd, spr, u;
@@ -1692,6 +1696,7 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
#endif
}
EXPORT_SYMBOL_GPL(analyse_instr);
+NOKPROBE_SYMBOL(analyse_instr);
/*
* For PPC32 we always use stwu with r1 to change the stack pointer.
@@ -1701,7 +1706,7 @@ EXPORT_SYMBOL_GPL(analyse_instr);
* don't emulate the real store operation. We will do real store
* operation safely in exception return code by checking this flag.
*/
-static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
+static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
{
#ifdef CONFIG_PPC32
/*
@@ -1721,7 +1726,7 @@ static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
return 0;
}
-static __kprobes void do_signext(unsigned long *valp, int size)
+static nokprobe_inline void do_signext(unsigned long *valp, int size)
{
switch (size) {
case 2:
@@ -1733,7 +1738,7 @@ static __kprobes void do_signext(unsigned long *valp, int size)
}
}
-static __kprobes void do_byterev(unsigned long *valp, int size)
+static nokprobe_inline void do_byterev(unsigned long *valp, int size)
{
switch (size) {
case 2:
@@ -1757,7 +1762,7 @@ static __kprobes void do_byterev(unsigned long *valp, int size)
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
-int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
+int emulate_step(struct pt_regs *regs, unsigned int instr)
{
struct instruction_op op;
int r, err, size;
@@ -1988,3 +1993,4 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
return 1;
}
+NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index d979709a0239..c6b900f54c07 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -468,7 +468,7 @@ static void walk_linearmapping(struct pg_state *st)
unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift;
for (addr = PAGE_OFFSET; addr < PAGE_OFFSET +
- memblock_phys_mem_size(); addr += psize)
+ memblock_end_of_DRAM(); addr += psize)
hpte_find(st, addr, mmu_linear_psize);
}
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index 49abaf4dc8e3..d659345a98d6 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -26,6 +26,10 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_PPC32
+#define KERN_VIRT_START 0
+#endif
+
/*
* To visualise what is happening,
*
@@ -56,6 +60,8 @@ struct pg_state {
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
+ unsigned long start_pa;
+ unsigned long last_pa;
unsigned int level;
u64 current_flags;
};
@@ -69,6 +75,7 @@ static struct addr_marker address_markers[] = {
{ 0, "Start of kernel VM" },
{ 0, "vmalloc() Area" },
{ 0, "vmalloc() End" },
+#ifdef CONFIG_PPC64
{ 0, "isa I/O start" },
{ 0, "isa I/O end" },
{ 0, "phb I/O start" },
@@ -76,6 +83,20 @@ static struct addr_marker address_markers[] = {
{ 0, "I/O remap start" },
{ 0, "I/O remap end" },
{ 0, "vmemmap start" },
+#else
+ { 0, "Early I/O remap start" },
+ { 0, "Early I/O remap end" },
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ { 0, "Consistent mem start" },
+ { 0, "Consistent mem end" },
+#endif
+#ifdef CONFIG_HIGHMEM
+ { 0, "Highmem PTEs start" },
+ { 0, "Highmem PTEs end" },
+#endif
+ { 0, "Fixmap start" },
+ { 0, "Fixmap end" },
+#endif
{ -1, NULL },
};
@@ -100,8 +121,13 @@ static const struct flag_info flag_array[] = {
.set = "user",
.clear = " ",
}, {
+#if _PAGE_RO == 0
.mask = _PAGE_RW,
.val = _PAGE_RW,
+#else
+ .mask = _PAGE_RO,
+ .val = 0,
+#endif
.set = "rw",
.clear = "ro",
}, {
@@ -154,11 +180,24 @@ static const struct flag_info flag_array[] = {
.clear = " ",
}, {
#endif
+#ifndef CONFIG_PPC_BOOK3S_64
.mask = _PAGE_NO_CACHE,
.val = _PAGE_NO_CACHE,
.set = "no cache",
.clear = " ",
}, {
+#else
+ .mask = _PAGE_NON_IDEMPOTENT,
+ .val = _PAGE_NON_IDEMPOTENT,
+ .set = "non-idempotent",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_TOLERANT,
+ .val = _PAGE_TOLERANT,
+ .set = "tolerant",
+ .clear = " ",
+ }, {
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
.mask = H_PAGE_BUSY,
.val = H_PAGE_BUSY,
@@ -188,6 +227,10 @@ static const struct flag_info flag_array[] = {
.mask = _PAGE_SPECIAL,
.val = _PAGE_SPECIAL,
.set = "special",
+ }, {
+ .mask = _PAGE_SHARED,
+ .val = _PAGE_SHARED,
+ .set = "shared",
}
};
@@ -252,7 +295,14 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
const char *unit = units;
unsigned long delta;
- seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1);
+#ifdef CONFIG_PPC64
+ seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1);
+ seq_printf(st->seq, "0x%016lx ", st->start_pa);
+#else
+ seq_printf(st->seq, "0x%08lx-0x%08lx ", st->start_address, addr - 1);
+ seq_printf(st->seq, "0x%08lx ", st->start_pa);
+#endif
+
delta = (addr - st->start_address) >> 10;
/* Work out what appropriate unit to use */
while (!(delta & 1023) && unit[1]) {
@@ -267,11 +317,15 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val)
{
u64 flag = val & pg_level[level].mask;
+ u64 pa = val & PTE_RPN_MASK;
+
/* At first no level is set */
if (!st->level) {
st->level = level;
st->current_flags = flag;
st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
/*
* Dump the section of virtual memory when:
@@ -279,9 +333,11 @@ static void note_page(struct pg_state *st, unsigned long addr,
* - we change levels in the tree.
* - the address is in a different section of memory and is thus
* used for a different purpose, regardless of the flags.
+ * - the pa of this page is not adjacent to the last inspected page
*/
} else if (flag != st->current_flags || level != st->level ||
- addr >= st->marker[1].start_address) {
+ addr >= st->marker[1].start_address ||
+ pa != st->last_pa + PAGE_SIZE) {
/* Check the PTE flags */
if (st->current_flags) {
@@ -305,8 +361,12 @@ static void note_page(struct pg_state *st, unsigned long addr,
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
st->current_flags = flag;
st->level = level;
+ } else {
+ st->last_pa = pa;
}
}
@@ -377,20 +437,38 @@ static void walk_pagetables(struct pg_state *st)
static void populate_markers(void)
{
- address_markers[0].start_address = PAGE_OFFSET;
- address_markers[1].start_address = VMALLOC_START;
- address_markers[2].start_address = VMALLOC_END;
- address_markers[3].start_address = ISA_IO_BASE;
- address_markers[4].start_address = ISA_IO_END;
- address_markers[5].start_address = PHB_IO_BASE;
- address_markers[6].start_address = PHB_IO_END;
- address_markers[7].start_address = IOREMAP_BASE;
- address_markers[8].start_address = IOREMAP_END;
+ int i = 0;
+
+ address_markers[i++].start_address = PAGE_OFFSET;
+ address_markers[i++].start_address = VMALLOC_START;
+ address_markers[i++].start_address = VMALLOC_END;
+#ifdef CONFIG_PPC64
+ address_markers[i++].start_address = ISA_IO_BASE;
+ address_markers[i++].start_address = ISA_IO_END;
+ address_markers[i++].start_address = PHB_IO_BASE;
+ address_markers[i++].start_address = PHB_IO_END;
+ address_markers[i++].start_address = IOREMAP_BASE;
+ address_markers[i++].start_address = IOREMAP_END;
#ifdef CONFIG_PPC_STD_MMU_64
- address_markers[9].start_address = H_VMEMMAP_BASE;
+ address_markers[i++].start_address = H_VMEMMAP_BASE;
#else
- address_markers[9].start_address = VMEMMAP_BASE;
+ address_markers[i++].start_address = VMEMMAP_BASE;
+#endif
+#else /* !CONFIG_PPC64 */
+ address_markers[i++].start_address = ioremap_bot;
+ address_markers[i++].start_address = IOREMAP_TOP;
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ address_markers[i++].start_address = IOREMAP_TOP;
+ address_markers[i++].start_address = IOREMAP_TOP +
+ CONFIG_CONSISTENT_SIZE;
+#endif
+#ifdef CONFIG_HIGHMEM
+ address_markers[i++].start_address = PKMAP_BASE;
+ address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
#endif
+ address_markers[i++].start_address = FIXADDR_START;
+ address_markers[i++].start_address = FIXADDR_TOP;
+#endif /* CONFIG_PPC64 */
}
static int ptdump_show(struct seq_file *m, void *v)
@@ -435,7 +513,7 @@ static int ptdump_init(void)
populate_markers();
build_pgtable_complete_mask();
- debugfs_file = debugfs_create_file("kernel_pagetables", 0400, NULL,
+ debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL,
NULL, &ptdump_fops);
return debugfs_file ? 0 : -ENOMEM;
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 51def8a515be..3a7d580fdc59 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -120,8 +120,6 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address,
siginfo_t info;
unsigned int lsb = 0;
- up_read(&current->mm->mmap_sem);
-
if (!user_mode(regs))
return MM_FAULT_ERR(SIGBUS);
@@ -154,13 +152,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* continue the pagefault.
*/
if (fatal_signal_pending(current)) {
- /*
- * If we have retry set, the mmap semaphore will have
- * alrady been released in __lock_page_or_retry(). Else
- * we release it now.
- */
- if (!(fault & VM_FAULT_RETRY))
- up_read(&current->mm->mmap_sem);
/* Coming from kernel, we need to deal with uaccess fixups */
if (user_mode(regs))
return MM_FAULT_RETURN;
@@ -173,8 +164,6 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
/* Out of memory */
if (fault & VM_FAULT_OOM) {
- up_read(&current->mm->mmap_sem);
-
/*
* We ran out of memory, or some other thing happened to us that
* made us unable to handle the page fault gracefully.
@@ -298,7 +287,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* can result in fault, which will cause a deadlock when called with
* mmap_sem held
*/
- if (user_mode(regs))
+ if (!is_exec && user_mode(regs))
store_update_sp = store_updates_sp(regs);
if (user_mode(regs))
@@ -458,9 +447,30 @@ good_area:
* the fault.
*/
fault = handle_mm_fault(vma, address, flags);
+
+ /*
+ * Handle the retry right now, the mmap_sem has been released in that
+ * case.
+ */
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ /* We retry only once */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ /*
+ * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation.
+ */
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ if (!fatal_signal_pending(current))
+ goto retry;
+ }
+ /* We will enter mm_fault_error() below */
+ } else
+ up_read(&current->mm->mmap_sem);
+
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
+ goto bad_area_nosemaphore;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;
@@ -469,41 +479,29 @@ good_area:
}
/*
- * Major/minor page fault accounting is only done on the
- * initial attempt. If we go through a retry, it is extremely
- * likely that the page will be found in page cache at that point.
+ * Major/minor page fault accounting.
*/
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_MAJOR) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
+ if (fault & VM_FAULT_MAJOR) {
+ current->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+ regs, address);
#ifdef CONFIG_PPC_SMLPAR
- if (firmware_has_feature(FW_FEATURE_CMO)) {
- u32 page_ins;
-
- preempt_disable();
- page_ins = be32_to_cpu(get_lppaca()->page_ins);
- page_ins += 1 << PAGE_FACTOR;
- get_lppaca()->page_ins = cpu_to_be32(page_ins);
- preempt_enable();
- }
-#endif /* CONFIG_PPC_SMLPAR */
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
- if (fault & VM_FAULT_RETRY) {
- /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation. */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ u32 page_ins;
+
+ preempt_disable();
+ page_ins = be32_to_cpu(get_lppaca()->page_ins);
+ page_ins += 1 << PAGE_FACTOR;
+ get_lppaca()->page_ins = cpu_to_be32(page_ins);
+ preempt_enable();
}
+#endif /* CONFIG_PPC_SMLPAR */
+ } else {
+ current->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+ regs, address);
}
- up_read(&mm->mmap_sem);
goto bail;
bad_area:
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 09cc50c8dace..6f962e5cb5e1 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -31,10 +31,8 @@
#ifdef CONFIG_SMP
.section .bss
.align 2
- .globl mmu_hash_lock
mmu_hash_lock:
.space 4
-EXPORT_SYMBOL(mmu_hash_lock)
#endif /* CONFIG_SMP */
/*
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c554768b1fa2..f2095ce9d4b0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -35,9 +35,8 @@
#include <linux/memblock.h>
#include <linux/context_tracking.h>
#include <linux/libfdt.h>
-#include <linux/debugfs.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -927,11 +926,6 @@ static void __init htab_initialize(void)
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
- /* On U3 based machines, we need to reserve the DART area and
- * _NOT_ map it to avoid cache paradoxes as it's remapped non
- * cacheable later on
- */
-
/* create bolted the linear mapping in the hash table */
for_each_memblock(memory, reg) {
base = (unsigned long)__va(reg->base);
@@ -981,6 +975,19 @@ void __init hash__early_init_devtree(void)
void __init hash__early_init_mmu(void)
{
+ /*
+ * We have code in __hash_page_64K() and elsewhere, which assumes it can
+ * do the following:
+ * new_pte |= (slot << H_PAGE_F_GIX_SHIFT) & (H_PAGE_F_SECOND | H_PAGE_F_GIX);
+ *
+ * Where the slot number is between 0-15, and values of 8-15 indicate
+ * the secondary bucket. For that code to work H_PAGE_F_SECOND and
+ * H_PAGE_F_GIX must occupy four contiguous bits in the PTE, and
+ * H_PAGE_F_SECOND must be placed above H_PAGE_F_GIX. Assert that here
+ * with a BUILD_BUG_ON().
+ */
+ BUILD_BUG_ON(H_PAGE_F_SECOND != (1ul << (H_PAGE_F_GIX_SHIFT + 3)));
+
htab_init_page_sizes();
/*
@@ -1120,7 +1127,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
copro_flush_all_slbs(mm);
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
- copy_mm_to_paca(&mm->context);
+ copy_mm_to_paca(mm);
slb_flush_and_rebolt();
}
}
@@ -1192,7 +1199,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
{
if (user_region) {
if (psize != get_paca_psize(ea)) {
- copy_mm_to_paca(&mm->context);
+ copy_mm_to_paca(mm);
slb_flush_and_rebolt();
}
} else if (get_paca()->vmalloc_sllp !=
@@ -1855,5 +1862,4 @@ static int __init hash64_debugfs(void)
return 0;
}
machine_device_initcall(pseries, hash64_debugfs);
-
#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 83a8be791e06..bfe4e8526b2d 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -148,16 +148,9 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
mm = vma->vm_mm;
-#ifdef CONFIG_PPC_MM_SLICES
- psize = get_slice_psize(mm, ea);
- tsize = mmu_get_tsize(psize);
- shift = mmu_psize_defs[psize].shift;
-#else
psize = vma_mmu_pagesize(vma);
shift = __ilog2(psize);
tsize = shift - 10;
-#endif
-
/*
* We can't be interrupted while we're setting up the MAS
* regusters or after we've confirmed that no tlb exists.
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 35254a678456..6575b9aabef4 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -50,9 +50,12 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
+ if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
+ mm->context.addr_limit = TASK_SIZE;
+
if (len & ~huge_page_mask(h))
return -EINVAL;
- if (len > TASK_SIZE)
+ if (len > mm->task_size)
return -ENOMEM;
if (flags & MAP_FIXED) {
@@ -64,7 +67,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
+ if (mm->task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
@@ -78,5 +81,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
+
+ if (addr > DEFAULT_MAP_WINDOW)
+ info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
+
return vm_unmapped_area(&info);
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8c3389cbcd12..a4f33de4008e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -753,6 +753,24 @@ static int __init add_huge_page_size(unsigned long long size)
if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
return -EINVAL;
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * We need to make sure that for different page sizes reported by
+ * firmware we only add hugetlb support for page sizes that can be
+ * supported by linux page table layout.
+ * For now we have
+ * Radix: 2M
+ * Hash: 16M and 16G
+ */
+ if (radix_enabled()) {
+ if (mmu_psize != MMU_PAGE_2M)
+ return -EINVAL;
+ } else {
+ if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
+ return -EINVAL;
+ }
+#endif
+
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 915412e4d5ba..1fa794d7d59f 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -186,7 +186,7 @@ static u32 acop_get_inst(struct pt_regs *regs)
}
/**
- * @regs: regsiters at time of interrupt
+ * @regs: registers at time of interrupt
* @address: storage address
* @error_code: Fault code, usually the DSISR or ESR depending on
* processor type
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c22f207aa656..ec84b31c6c86 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -71,10 +71,6 @@
#if H_PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
-
-#if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
-#warning TASK_SIZE is smaller than it needs to be.
-#endif
#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index a5d9ef59debe..9dbd2a733d6b 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -59,13 +59,14 @@ static inline int mmap_is_legacy(void)
unsigned long arch_mmap_rnd(void)
{
- unsigned long rnd;
+ unsigned long shift, rnd;
- /* 8MB for 32bit, 1GB for 64bit */
+ shift = mmap_rnd_bits;
+#ifdef CONFIG_COMPAT
if (is_32bit_task())
- rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
- else
- rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
+ shift = mmap_rnd_compat_bits;
+#endif
+ rnd = get_random_long() % (1ul << shift);
return rnd << PAGE_SHIFT;
}
@@ -79,7 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd)
else if (gap > MAX_GAP)
gap = MAX_GAP;
- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+ return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
}
#ifdef CONFIG_PPC_RADIX_MMU
@@ -97,7 +98,11 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
- if (len > TASK_SIZE - mmap_min_addr)
+ if (unlikely(addr > mm->context.addr_limit &&
+ mm->context.addr_limit != TASK_SIZE))
+ mm->context.addr_limit = TASK_SIZE;
+
+ if (len > mm->task_size - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -106,7 +111,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
@@ -114,8 +119,13 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
info.align_mask = 0;
+
+ if (unlikely(addr > DEFAULT_MAP_WINDOW))
+ info.high_limit = mm->context.addr_limit;
+ else
+ info.high_limit = DEFAULT_MAP_WINDOW;
+
return vm_unmapped_area(&info);
}
@@ -131,8 +141,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
+ if (unlikely(addr > mm->context.addr_limit &&
+ mm->context.addr_limit != TASK_SIZE))
+ mm->context.addr_limit = TASK_SIZE;
+
/* requested length too big for entire address space */
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mm->task_size - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -142,7 +156,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
@@ -152,7 +166,14 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
+
+ if (addr > DEFAULT_MAP_WINDOW)
+ info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
+
addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+ VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
@@ -160,15 +181,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
* can happen with large stack limits and large mmap()
* allocations.
*/
- if (addr & ~PAGE_MASK) {
- VM_BUG_ON(addr != -ENOMEM);
- info.flags = 0;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
- addr = vm_unmapped_area(&info);
- }
-
- return addr;
+ return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 73bf6e14c3aa..c6dca2ae78ef 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -30,17 +30,16 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
-int __init_new_context(void)
+static int alloc_context_id(int min_id, int max_id)
{
- int index;
- int err;
+ int index, err;
again:
if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
- err = ida_get_new_above(&mmu_context_ida, 1, &index);
+ err = ida_get_new_above(&mmu_context_ida, min_id, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
@@ -48,7 +47,7 @@ again:
else if (err)
return err;
- if (index > MAX_USER_CONTEXT) {
+ if (index > max_id) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);
@@ -57,48 +56,105 @@ again:
return index;
}
-EXPORT_SYMBOL_GPL(__init_new_context);
-static int radix__init_new_context(struct mm_struct *mm, int index)
+
+void hash__reserve_context_id(int id)
+{
+ int rc, result = 0;
+
+ do {
+ if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
+ break;
+
+ spin_lock(&mmu_context_lock);
+ rc = ida_get_new_above(&mmu_context_ida, id, &result);
+ spin_unlock(&mmu_context_lock);
+ } while (rc == -EAGAIN);
+
+ WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
+}
+
+int hash__alloc_context_id(void)
+{
+ unsigned long max;
+
+ if (mmu_has_feature(MMU_FTR_68_BIT_VA))
+ max = MAX_USER_CONTEXT;
+ else
+ max = MAX_USER_CONTEXT_65BIT_VA;
+
+ return alloc_context_id(MIN_USER_CONTEXT, max);
+}
+EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+
+static int hash__init_new_context(struct mm_struct *mm)
+{
+ int index;
+
+ index = hash__alloc_context_id();
+ if (index < 0)
+ return index;
+
+ /*
+ * We do switch_slb() early in fork, even before we setup the
+ * mm->context.addr_limit. Default to max task size so that we copy the
+ * default values to paca which will help us to handle slb miss early.
+ */
+ mm->context.addr_limit = TASK_SIZE_128TB;
+
+ /*
+ * The old code would re-promote on fork, we don't do that when using
+ * slices as it could cause problem promoting slices that have been
+ * forced down to 4K.
+ *
+ * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
+ * explicitly against context.id == 0. This ensures that we properly
+ * initialize context slice details for newly allocated mm's (which will
+ * have id == 0) and don't alter context slice inherited via fork (which
+ * will have id != 0).
+ *
+ * We should not be calling init_new_context() on init_mm. Hence a
+ * check against 0 is OK.
+ */
+ if (mm->context.id == 0)
+ slice_set_user_psize(mm, mmu_virtual_psize);
+
+ subpage_prot_init_new_context(mm);
+
+ return index;
+}
+
+static int radix__init_new_context(struct mm_struct *mm)
{
unsigned long rts_field;
+ int index;
+
+ index = alloc_context_id(1, PRTB_ENTRIES - 1);
+ if (index < 0)
+ return index;
/*
* set the process table entry,
*/
rts_field = radix__get_tree_size();
process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
- return 0;
+
+ mm->context.npu_context = NULL;
+
+ return index;
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
- index = __init_new_context();
+ if (radix_enabled())
+ index = radix__init_new_context(mm);
+ else
+ index = hash__init_new_context(mm);
+
if (index < 0)
return index;
- if (radix_enabled()) {
- radix__init_new_context(mm, index);
- } else {
-
- /* The old code would re-promote on fork, we don't do that
- * when using slices as it could cause problem promoting slices
- * that have been forced down to 4K
- *
- * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
- * explicitly against context.id == 0. This ensures that we
- * properly initialize context slice details for newly allocated
- * mm's (which will have id == 0) and don't alter context slice
- * inherited via fork (which will have id != 0).
- *
- * We should not be calling init_new_context() on init_mm. Hence a
- * check against 0 is ok.
- */
- if (mm->context.id == 0)
- slice_set_user_psize(mm, mmu_virtual_psize);
- subpage_prot_init_new_context(mm);
- }
mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 497130c5c742..e0a2d8e806ed 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -81,7 +81,7 @@ struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
gfp_t gfp_mask = GFP_USER;
struct page *new_page;
- if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+ if (PageCompound(page))
return NULL;
if (PageHighMem(page))
@@ -100,7 +100,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
LIST_HEAD(cma_migrate_pages);
/* Ignore huge pages for now */
- if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+ if (PageCompound(page))
return -EBUSY;
lru_add_drain();
@@ -314,6 +314,25 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
+struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
+ unsigned long ua, unsigned long size)
+{
+ struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+
+ list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
+ next) {
+ if ((mem->ua <= ua) &&
+ (ua + size <= mem->ua +
+ (mem->entries << PAGE_SHIFT))) {
+ ret = mem;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
+
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
{
@@ -345,6 +364,26 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
+long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
+ unsigned long ua, unsigned long *hpa)
+{
+ const long entry = (ua - mem->ua) >> PAGE_SHIFT;
+ void *va = &mem->hpas[entry];
+ unsigned long *pa;
+
+ if (entry >= mem->entries)
+ return -EFAULT;
+
+ pa = (void *) vmalloc_to_phys(va);
+ if (!pa)
+ return -EFAULT;
+
+ *hpa = *pa | (ua & ~PAGE_MASK);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
+
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
if (atomic64_inc_not_zero(&mem->mapped))
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index c491f2c8f2b9..4554d6527682 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -333,11 +333,6 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
-
-#ifdef CONFIG_PPC_MM_SLICES
- slice_set_user_psize(mm, mmu_virtual_psize);
-#endif
-
return 0;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9befaee237d6..371792e4418f 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -875,13 +875,6 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- if (spanned_pages)
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start_pfn << PAGE_SHIFT,
- (end_pfn << PAGE_SHIFT) - 1);
- else
- pr_info("Initmem setup node %d\n", nid);
-
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5e01b2ece1d0..654a0d7ba0e7 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -131,7 +131,7 @@ static void __slb_flush_and_rebolt(void)
"slbmte %2,%3\n"
"isync"
:: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
- "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
+ "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
"r"(ksp_vsid_data),
"r"(ksp_esid_data)
: "memory");
@@ -229,7 +229,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("slbie %0" : : "r" (slbie_data));
get_paca()->slb_cache_ptr = 0;
- copy_mm_to_paca(&mm->context);
+ copy_mm_to_paca(mm);
/*
* preload some userspace segments into the SLB.
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a85e06ea6c20..1519617aab36 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -23,6 +23,48 @@
#include <asm/pgtable.h>
#include <asm/firmware.h>
+/*
+ * This macro generates asm code to compute the VSID scramble
+ * function. Used in slb_allocate() and do_stab_bolted. The function
+ * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
+ *
+ * rt = register containing the proto-VSID and into which the
+ * VSID will be stored
+ * rx = scratch register (clobbered)
+ * rf = flags
+ *
+ * - rt and rx must be different registers
+ * - The answer will end up in the low VSID_BITS bits of rt. The higher
+ * bits may contain other garbage, so you may need to mask the
+ * result.
+ */
+#define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \
+ lis rx,VSID_MULTIPLIER_##size@h; \
+ ori rx,rx,VSID_MULTIPLIER_##size@l; \
+ mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
+/* \
+ * powermac get slb fault before feature fixup, so make 65 bit part \
+ * the default part of feature fixup \
+ */ \
+BEGIN_MMU_FTR_SECTION \
+ srdi rx,rt,VSID_BITS_65_##size; \
+ clrldi rt,rt,(64-VSID_BITS_65_##size); \
+ add rt,rt,rx; \
+ addi rx,rt,1; \
+ srdi rx,rx,VSID_BITS_65_##size; \
+ add rt,rt,rx; \
+ rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \
+MMU_FTR_SECTION_ELSE \
+ srdi rx,rt,VSID_BITS_##size; \
+ clrldi rt,rt,(64-VSID_BITS_##size); \
+ add rt,rt,rx; /* add high and low bits */ \
+ addi rx,rt,1; \
+ srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
+ add rt,rt,rx; \
+ rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
+
+
/* void slb_allocate_realmode(unsigned long ea);
*
* Create an SLB entry for the given EA (user or kernel).
@@ -45,13 +87,6 @@ _GLOBAL(slb_allocate_realmode)
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
blt cr7,0f /* user or kernel? */
- /* kernel address: proto-VSID = ESID */
- /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
- * this code will generate the protoVSID 0xfffffffff for the
- * top segment. That's ok, the scramble below will translate
- * it to VSID 0, which is reserved as a bad VSID - one which
- * will never have any pages in it. */
-
/* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f
@@ -63,12 +98,10 @@ _GLOBAL(slb_allocate_realmode)
slb_miss_kernel_load_linear:
li r11,0
/*
- * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * context = (ea >> 60) - (0xc - 1)
* r9 = region id.
*/
- addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
- addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
-
+ subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
BEGIN_FTR_SECTION
b .Lslb_finish_load
@@ -77,9 +110,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
1:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- /* Check virtual memmap region. To be patches at kernel boot */
cmpldi cr0,r9,0xf
bne 1f
+/* Check virtual memmap region. To be patched at kernel boot */
.globl slb_miss_kernel_load_vmemmap
slb_miss_kernel_load_vmemmap:
li r11,0
@@ -102,11 +135,10 @@ slb_miss_kernel_load_io:
li r11,0
6:
/*
- * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+ * context = (ea >> 60) - (0xc - 1)
* r9 = region id.
*/
- addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
- addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+ subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
BEGIN_FTR_SECTION
b .Lslb_finish_load
@@ -117,7 +149,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
* For userspace addresses, make sure this is region 0.
*/
cmpdi r9, 0
- bne 8f
+ bne- 8f
+ /*
+ * user space make sure we are within the allowed limit
+ */
+ ld r11,PACA_ADDR_LIMIT(r13)
+ cmpld r3,r11
+ bge- 8f
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
@@ -189,13 +227,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
*/
.Lslb_finish_load:
rldimi r10,r9,ESID_BITS,0
- ASM_VSID_SCRAMBLE(r10,r9,256M)
- /*
- * bits above VSID_BITS_256M need to be ignored from r10
- * also combine VSID and flags
- */
- rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
-
+ ASM_VSID_SCRAMBLE(r10,r9,r11,256M)
/* r3 = EA, r11 = VSID data */
/*
* Find a slot, round robin. Previously we tried to find a
@@ -259,12 +291,12 @@ slb_compare_rr_to_size:
.Lslb_finish_load_1T:
srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
rldimi r10,r9,ESID_BITS_1T,0
- ASM_VSID_SCRAMBLE(r10,r9,1T)
+ ASM_VSID_SCRAMBLE(r10,r9,r11,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10
* also combine VSID and flags
*/
- rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
+
li r10,MMU_SEGSIZE_1T
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 2b27458902ee..966b9fccfa66 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -36,38 +36,29 @@
#include <asm/copro.h>
#include <asm/hugetlb.h>
-/* some sanity checks */
-#if (H_PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
-#error H_PGTABLE_RANGE exceeds slice_mask high_slices size
-#endif
-
static DEFINE_SPINLOCK(slice_convert_lock);
-
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.
+ */
+struct slice_mask {
+ u64 low_slices;
+ DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
+};
#ifdef DEBUG
int _slice_debug = 1;
static void slice_print_mask(const char *label, struct slice_mask mask)
{
- char *p, buf[16 + 3 + 64 + 1];
- int i;
-
if (!_slice_debug)
return;
- p = buf;
- for (i = 0; i < SLICE_NUM_LOW; i++)
- *(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
- *(p++) = ' ';
- *(p++) = '-';
- *(p++) = ' ';
- for (i = 0; i < SLICE_NUM_HIGH; i++)
- *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
- *(p++) = 0;
-
- printk(KERN_DEBUG "%s:%s\n", label, buf);
+ pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices);
+ pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices);
}
-#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)
+#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
#else
@@ -76,25 +67,28 @@ static void slice_print_mask(const char *label, struct slice_mask mask) {}
#endif
-static struct slice_mask slice_range_to_mask(unsigned long start,
- unsigned long len)
+static void slice_range_to_mask(unsigned long start, unsigned long len,
+ struct slice_mask *ret)
{
unsigned long end = start + len - 1;
- struct slice_mask ret = { 0, 0 };
+
+ ret->low_slices = 0;
+ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
if (start < SLICE_LOW_TOP) {
- unsigned long mend = min(end, SLICE_LOW_TOP);
- unsigned long mstart = min(start, SLICE_LOW_TOP);
+ unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
- ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
- - (1u << GET_LOW_SLICE_INDEX(mstart));
+ ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+ - (1u << GET_LOW_SLICE_INDEX(start));
}
- if ((start + len) > SLICE_LOW_TOP)
- ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
- - (1ul << GET_HIGH_SLICE_INDEX(start));
+ if ((start + len) > SLICE_LOW_TOP) {
+ unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
+ unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
+ unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
- return ret;
+ bitmap_set(ret->high_slices, start_index, count);
+ }
}
static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -128,53 +122,60 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
return !slice_area_is_free(mm, start, end - start);
}
-static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
{
- struct slice_mask ret = { 0, 0 };
unsigned long i;
+ ret->low_slices = 0;
+ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
for (i = 0; i < SLICE_NUM_LOW; i++)
if (!slice_low_has_vma(mm, i))
- ret.low_slices |= 1u << i;
+ ret->low_slices |= 1u << i;
if (mm->task_size <= SLICE_LOW_TOP)
- return ret;
+ return;
- for (i = 0; i < SLICE_NUM_HIGH; i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
if (!slice_high_has_vma(mm, i))
- ret.high_slices |= 1ul << i;
-
- return ret;
+ __set_bit(i, ret->high_slices);
}
-static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
{
unsigned char *hpsizes;
int index, mask_index;
- struct slice_mask ret = { 0, 0 };
unsigned long i;
u64 lpsizes;
+ ret->low_slices = 0;
+ bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
lpsizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
if (((lpsizes >> (i * 4)) & 0xf) == psize)
- ret.low_slices |= 1u << i;
+ ret->low_slices |= 1u << i;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
- ret.high_slices |= 1ul << i;
+ __set_bit(i, ret->high_slices);
}
-
- return ret;
}
-static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
+static int slice_check_fit(struct mm_struct *mm,
+ struct slice_mask mask, struct slice_mask available)
{
+ DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
+
+ bitmap_and(result, mask.high_slices,
+ available.high_slices, slice_count);
+
return (mask.low_slices & available.low_slices) == mask.low_slices &&
- (mask.high_slices & available.high_slices) == mask.high_slices;
+ bitmap_equal(result, mask.high_slices, slice_count);
}
static void slice_flush_segments(void *parm)
@@ -185,7 +186,7 @@ static void slice_flush_segments(void *parm)
if (mm != current->active_mm)
return;
- copy_mm_to_paca(&current->active_mm->context);
+ copy_mm_to_paca(current->active_mm);
local_irq_save(flags);
slb_flush_and_rebolt();
@@ -218,18 +219,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
mm->context.low_slices_psize = lpsizes;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
- if (mask.high_slices & (1ul << i))
+ if (test_bit(i, mask.high_slices))
hpsizes[index] = (hpsizes[index] &
~(0xf << (mask_index * 4))) |
(((unsigned long)psize) << (mask_index * 4));
}
slice_dbg(" lsps=%lx, hsps=%lx\n",
- mm->context.low_slices_psize,
- mm->context.high_slices_psize);
+ (unsigned long)mm->context.low_slices_psize,
+ (unsigned long)mm->context.high_slices_psize);
spin_unlock_irqrestore(&slice_convert_lock, flags);
@@ -257,14 +258,14 @@ static bool slice_scan_available(unsigned long addr,
slice = GET_HIGH_SLICE_INDEX(addr);
*boundary_addr = (slice + end) ?
((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1ul << slice));
+ return !!test_bit(slice, available.high_slices);
}
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize)
+ int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, next_end;
@@ -276,7 +277,10 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
info.align_offset = 0;
addr = TASK_UNMAPPED_BASE;
- while (addr < TASK_SIZE) {
+ /*
+ * Check till the allow max value for this mmap request
+ */
+ while (addr < high_limit) {
info.low_limit = addr;
if (!slice_scan_available(addr, available, 1, &addr))
continue;
@@ -288,8 +292,8 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can
* extend it to cover the next available slice.
*/
- if (addr >= TASK_SIZE)
- addr = TASK_SIZE;
+ if (addr >= high_limit)
+ addr = high_limit;
else if (slice_scan_available(addr, available, 1, &next_end)) {
addr = next_end;
goto next_slice;
@@ -307,7 +311,7 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize)
+ int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
@@ -319,6 +323,15 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
info.align_offset = 0;
addr = mm->mmap_base;
+ /*
+ * If we are trying to allocate above DEFAULT_MAP_WINDOW
+ * Add the different to the mmap_base.
+ * Only for that request for which high_limit is above
+ * DEFAULT_MAP_WINDOW we should apply this.
+ */
+ if (high_limit > DEFAULT_MAP_WINDOW)
+ addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
+
while (addr > PAGE_SIZE) {
info.high_limit = addr;
if (!slice_scan_available(addr - 1, available, 0, &addr))
@@ -350,29 +363,38 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- return slice_find_area_bottomup(mm, len, available, psize);
+ return slice_find_area_bottomup(mm, len, available, psize, high_limit);
}
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
struct slice_mask mask, int psize,
- int topdown)
+ int topdown, unsigned long high_limit)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize);
+ return slice_find_area_topdown(mm, len, mask, psize, high_limit);
else
- return slice_find_area_bottomup(mm, len, mask, psize);
+ return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
}
-#define or_mask(dst, src) do { \
- (dst).low_slices |= (src).low_slices; \
- (dst).high_slices |= (src).high_slices; \
-} while (0)
+static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
+{
+ DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+
+ dst->low_slices |= src->low_slices;
+ bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+ bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
+}
-#define andnot_mask(dst, src) do { \
- (dst).low_slices &= ~(src).low_slices; \
- (dst).high_slices &= ~(src).high_slices; \
-} while (0)
+static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
+{
+ DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+
+ dst->low_slices &= ~src->low_slices;
+
+ bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+ bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
+}
#ifdef CONFIG_PPC_64K_PAGES
#define MMU_PAGE_BASE MMU_PAGE_64K
@@ -384,14 +406,43 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
int topdown)
{
- struct slice_mask mask = {0, 0};
+ struct slice_mask mask;
struct slice_mask good_mask;
- struct slice_mask potential_mask = {0,0} /* silence stupid warning */;
- struct slice_mask compat_mask = {0, 0};
+ struct slice_mask potential_mask;
+ struct slice_mask compat_mask;
int fixed = (flags & MAP_FIXED);
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
struct mm_struct *mm = current->mm;
unsigned long newaddr;
+ unsigned long high_limit;
+
+ /*
+ * Check if we need to expland slice area.
+ */
+ if (unlikely(addr > mm->context.addr_limit &&
+ mm->context.addr_limit != TASK_SIZE)) {
+ mm->context.addr_limit = TASK_SIZE;
+ on_each_cpu(slice_flush_segments, mm, 1);
+ }
+ /*
+ * This mmap request can allocate upt to 512TB
+ */
+ if (addr > DEFAULT_MAP_WINDOW)
+ high_limit = mm->context.addr_limit;
+ else
+ high_limit = DEFAULT_MAP_WINDOW;
+ /*
+ * init different masks
+ */
+ mask.low_slices = 0;
+ bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
+
+ /* silence stupid warning */;
+ potential_mask.low_slices = 0;
+ bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
+
+ compat_mask.low_slices = 0;
+ bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
/* Sanity checks */
BUG_ON(mm->task_size == 0);
@@ -423,7 +474,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* First make up a "good" mask of slices that have the right size
* already
*/
- good_mask = slice_mask_for_size(mm, psize);
+ slice_mask_for_size(mm, psize, &good_mask);
slice_print_mask(" good_mask", good_mask);
/*
@@ -448,22 +499,22 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
- compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
if (fixed)
- or_mask(good_mask, compat_mask);
+ slice_or_mask(&good_mask, &compat_mask);
}
#endif
/* First check hint if it's valid or if we have MAP_FIXED */
if (addr != 0 || fixed) {
/* Build a mask for the requested range */
- mask = slice_range_to_mask(addr, len);
+ slice_range_to_mask(addr, len, &mask);
slice_print_mask(" mask", mask);
/* Check if we fit in the good mask. If we do, we just return,
* nothing else to do
*/
- if (slice_check_fit(mask, good_mask)) {
+ if (slice_check_fit(mm, mask, good_mask)) {
slice_dbg(" fits good !\n");
return addr;
}
@@ -471,7 +522,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing
* slices for that size
*/
- newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
+ newaddr = slice_find_area(mm, len, good_mask,
+ psize, topdown, high_limit);
if (newaddr != -ENOMEM) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
@@ -484,11 +536,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
- potential_mask = slice_mask_for_free(mm);
- or_mask(potential_mask, good_mask);
+ slice_mask_for_free(mm, &potential_mask);
+ slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
- if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
+ if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
slice_dbg(" fits potential !\n");
goto convert;
}
@@ -503,7 +555,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* anywhere in the good area.
*/
if (addr) {
- addr = slice_find_area(mm, len, good_mask, psize, topdown);
+ addr = slice_find_area(mm, len, good_mask,
+ psize, topdown, high_limit);
if (addr != -ENOMEM) {
slice_dbg(" found area at 0x%lx\n", addr);
return addr;
@@ -513,28 +566,29 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing slices
* for that size plus free slices
*/
- addr = slice_find_area(mm, len, potential_mask, psize, topdown);
+ addr = slice_find_area(mm, len, potential_mask,
+ psize, topdown, high_limit);
#ifdef CONFIG_PPC_64K_PAGES
if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
- or_mask(potential_mask, compat_mask);
- addr = slice_find_area(mm, len, potential_mask, psize,
- topdown);
+ slice_or_mask(&potential_mask, &compat_mask);
+ addr = slice_find_area(mm, len, potential_mask,
+ psize, topdown, high_limit);
}
#endif
if (addr == -ENOMEM)
return -ENOMEM;
- mask = slice_range_to_mask(addr, len);
+ slice_range_to_mask(addr, len, &mask);
slice_dbg(" found potential area at 0x%lx\n", addr);
slice_print_mask(" mask", mask);
convert:
- andnot_mask(mask, good_mask);
- andnot_mask(mask, compat_mask);
- if (mask.low_slices || mask.high_slices) {
+ slice_andnot_mask(&mask, &good_mask);
+ slice_andnot_mask(&mask, &compat_mask);
+ if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
slice_convert(mm, mask, psize);
if (psize > MMU_PAGE_BASE)
on_each_cpu(slice_flush_segments, mm, 1);
@@ -649,8 +703,8 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
slice_dbg(" lsps=%lx, hsps=%lx\n",
- mm->context.low_slices_psize,
- mm->context.high_slices_psize);
+ (unsigned long)mm->context.low_slices_psize,
+ (unsigned long)mm->context.high_slices_psize);
bail:
spin_unlock_irqrestore(&slice_convert_lock, flags);
@@ -659,9 +713,11 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
- struct slice_mask mask = slice_range_to_mask(start, len);
+ struct slice_mask mask;
VM_BUG_ON(radix_enabled());
+
+ slice_range_to_mask(start, len, &mask);
slice_convert(mm, mask, psize);
}
@@ -694,14 +750,14 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
if (radix_enabled())
return 0;
- mask = slice_range_to_mask(addr, len);
- available = slice_mask_for_size(mm, psize);
+ slice_range_to_mask(addr, len, &mask);
+ slice_mask_for_size(mm, psize, &available);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
- compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
- or_mask(available, compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_or_mask(&available, &compat_mask);
}
#endif
@@ -711,6 +767,6 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
slice_print_mask(" mask", mask);
slice_print_mask(" available", available);
#endif
- return !slice_check_fit(mask, available);
+ return !slice_check_fit(mm, mask, available);
}
#endif
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 94210940112f..e94fbd4c8845 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -197,7 +197,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
/* Check parameters */
if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
- addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
+ addr >= mm->task_size || len >= mm->task_size ||
+ addr + len > mm->task_size)
return -EINVAL;
if (is_hugepage_only_range(mm, addr, len))
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 952713d6cf04..02e71402fdd3 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -17,7 +17,6 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
@@ -34,10 +33,8 @@ static inline void __tlbiel_pid(unsigned long pid, int set,
prs = 1; /* process scoped */
r = 1; /* raidx format */
- asm volatile("ptesync": : :"memory");
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
- asm volatile("ptesync": : :"memory");
}
/*
@@ -47,9 +44,33 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
- for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
+ asm volatile("ptesync": : :"memory");
+
+ /*
+ * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
+ * also flush the entire Page Walk Cache.
+ */
+ __tlbiel_pid(pid, 0, ric);
+
+ if (ric == RIC_FLUSH_ALL)
+ /* For the remaining sets, just flush the TLB */
+ ric = RIC_FLUSH_TLB;
+
+ for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
__tlbiel_pid(pid, set, ric);
- }
+
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+}
+
+static inline void tlbiel_pwc(unsigned long pid)
+{
+ asm volatile("ptesync": : :"memory");
+
+ /* For PWC flush, we don't look at set number */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
+
+ asm volatile("ptesync": : :"memory");
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
@@ -129,12 +150,18 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
{
unsigned long pid;
struct mm_struct *mm = tlb->mm;
+ /*
+ * If we are doing a full mm flush, we will do a tlb flush
+ * with RIC_FLUSH_ALL later.
+ */
+ if (tlb->fullmm)
+ return;
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_pid(pid, RIC_FLUSH_PWC);
+ tlbiel_pwc(pid);
preempt_enable();
}
@@ -175,15 +202,9 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_thread_local(mm)) {
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-
- if (lock_tlbie)
- raw_spin_lock(&native_tlbie_lock);
+ if (!mm_is_thread_local(mm))
_tlbie_pid(pid, RIC_FLUSH_ALL);
- if (lock_tlbie)
- raw_spin_unlock(&native_tlbie_lock);
- } else
+ else
_tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
@@ -195,22 +216,22 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
unsigned long pid;
struct mm_struct *mm = tlb->mm;
+ /*
+ * If we are doing a full mm flush, we will do a tlb flush
+ * with RIC_FLUSH_ALL later.
+ */
+ if (tlb->fullmm)
+ return;
preempt_disable();
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_thread_local(mm)) {
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-
- if (lock_tlbie)
- raw_spin_lock(&native_tlbie_lock);
+ if (!mm_is_thread_local(mm))
_tlbie_pid(pid, RIC_FLUSH_PWC);
- if (lock_tlbie)
- raw_spin_unlock(&native_tlbie_lock);
- } else
- _tlbiel_pid(pid, RIC_FLUSH_PWC);
+ else
+ tlbiel_pwc(pid);
no_context:
preempt_enable();
}
@@ -226,15 +247,9 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_thread_local(mm)) {
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-
- if (lock_tlbie)
- raw_spin_lock(&native_tlbie_lock);
+ if (!mm_is_thread_local(mm))
_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
- if (lock_tlbie)
- raw_spin_unlock(&native_tlbie_lock);
- } else
+ else
_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
preempt_enable();
@@ -255,13 +270,7 @@ EXPORT_SYMBOL(radix__flush_tlb_page);
void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
-
- if (lock_tlbie)
- raw_spin_lock(&native_tlbie_lock);
_tlbie_pid(0, RIC_FLUSH_ALL);
- if (lock_tlbie)
- raw_spin_unlock(&native_tlbie_lock);
}
EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
@@ -323,7 +332,6 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long addr;
int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
@@ -344,13 +352,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
if (local)
_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
- else {
- if (lock_tlbie)
- raw_spin_lock(&native_tlbie_lock);
+ else
_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
- if (lock_tlbie)
- raw_spin_unlock(&native_tlbie_lock);
- }
}
err_out:
preempt_enable();
@@ -437,7 +440,7 @@ void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
return;
}
- if (old_pte & _PAGE_LARGE)
+ if (old_pte & R_PAGE_LARGE)
radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
else
radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index ba28fcb98597..bfc4a0869609 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -770,7 +770,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* avoid going over total available memory just in case...
*/
#ifdef CONFIG_PPC_FSL_BOOK3E
- if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 2ff13249f87a..6c2d4168daec 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2049,6 +2049,14 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
data.br_stack = &cpuhw->bhrb_stack;
}
+ if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
+ ppmu->get_mem_data_src)
+ ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs);
+
+ if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
+ ppmu->get_mem_weight)
+ ppmu->get_mem_weight(&data.weight);
+
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index cd951fd231c4..8125160be7bc 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -148,6 +148,88 @@ static bool is_thresh_cmp_valid(u64 event)
return true;
}
+static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
+{
+ u64 ret = PERF_MEM_NA;
+
+ switch(idx) {
+ case 0:
+ /* Nothing to do */
+ break;
+ case 1:
+ ret = PH(LVL, L1);
+ break;
+ case 2:
+ ret = PH(LVL, L2);
+ break;
+ case 3:
+ ret = PH(LVL, L3);
+ break;
+ case 4:
+ if (sub_idx <= 1)
+ ret = PH(LVL, LOC_RAM);
+ else if (sub_idx > 1 && sub_idx <= 2)
+ ret = PH(LVL, REM_RAM1);
+ else
+ ret = PH(LVL, REM_RAM2);
+ ret |= P(SNOOP, HIT);
+ break;
+ case 5:
+ ret = PH(LVL, REM_CCE1);
+ if ((sub_idx == 0) || (sub_idx == 2) || (sub_idx == 4))
+ ret |= P(SNOOP, HIT);
+ else if ((sub_idx == 1) || (sub_idx == 3) || (sub_idx == 5))
+ ret |= P(SNOOP, HITM);
+ break;
+ case 6:
+ ret = PH(LVL, REM_CCE2);
+ if ((sub_idx == 0) || (sub_idx == 2))
+ ret |= P(SNOOP, HIT);
+ else if ((sub_idx == 1) || (sub_idx == 3))
+ ret |= P(SNOOP, HITM);
+ break;
+ case 7:
+ ret = PM(LVL, L1);
+ break;
+ }
+
+ return ret;
+}
+
+void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
+ struct pt_regs *regs)
+{
+ u64 idx;
+ u32 sub_idx;
+ u64 sier;
+ u64 val;
+
+ /* Skip if no SIER support */
+ if (!(flags & PPMU_HAS_SIER)) {
+ dsrc->val = 0;
+ return;
+ }
+
+ sier = mfspr(SPRN_SIER);
+ val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
+ if (val == 1 || val == 2) {
+ idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
+ sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+
+ dsrc->val = isa207_find_source(idx, sub_idx);
+ dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
+ }
+}
+
+void isa207_get_mem_weight(u64 *weight)
+{
+ u64 mmcra = mfspr(SPRN_MMCRA);
+ u64 exp = MMCRA_THR_CTR_EXP(mmcra);
+ u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
+
+ *weight = mantissa << (2 * exp);
+}
+
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
{
unsigned int unit, pmc, cache, ebb;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 899210f14ee4..8acbe6e802c7 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -248,6 +248,15 @@
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
+#define MMCRA_THR_CTR_MANT_SHIFT 19
+#define MMCRA_THR_CTR_MANT_MASK 0x7Ful
+#define MMCRA_THR_CTR_MANT(v) (((v) >> MMCRA_THR_CTR_MANT_SHIFT) &\
+ MMCRA_THR_CTR_MANT_MASK)
+
+#define MMCRA_THR_CTR_EXP_SHIFT 27
+#define MMCRA_THR_CTR_EXP_MASK 0x7ul
+#define MMCRA_THR_CTR_EXP(v) (((v) >> MMCRA_THR_CTR_EXP_SHIFT) &\
+ MMCRA_THR_CTR_EXP_MASK)
/* MMCR1 Threshold Compare bit constant for power9 */
#define p9_MMCRA_THR_CMP_SHIFT 45
@@ -260,6 +269,19 @@
#define MAX_ALT 2
#define MAX_PMU_COUNTERS 6
+#define ISA207_SIER_TYPE_SHIFT 15
+#define ISA207_SIER_TYPE_MASK (0x7ull << ISA207_SIER_TYPE_SHIFT)
+
+#define ISA207_SIER_LDST_SHIFT 1
+#define ISA207_SIER_LDST_MASK (0x7ull << ISA207_SIER_LDST_SHIFT)
+
+#define ISA207_SIER_DATA_SRC_SHIFT 53
+#define ISA207_SIER_DATA_SRC_MASK (0x7ull << ISA207_SIER_DATA_SRC_SHIFT)
+
+#define P(a, b) PERF_MEM_S(a, b)
+#define PH(a, b) (P(LVL, HIT) | P(a, b))
+#define PM(a, b) (P(LVL, MISS) | P(a, b))
+
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp);
int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], unsigned long mmcr[],
@@ -267,6 +289,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
int isa207_get_alternatives(u64 event, u64 alt[],
const unsigned int ev_alt[][MAX_ALT], int size);
-
+void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
+ struct pt_regs *regs);
+void isa207_get_mem_weight(u64 *weight);
#endif
diff --git a/arch/powerpc/perf/power8-events-list.h b/arch/powerpc/perf/power8-events-list.h
index 3a2e6e8ebb92..0f1d184627cc 100644
--- a/arch/powerpc/perf/power8-events-list.h
+++ b/arch/powerpc/perf/power8-events-list.h
@@ -89,3 +89,9 @@ EVENT(PM_MRK_FILT_MATCH, 0x2013c)
EVENT(PM_MRK_FILT_MATCH_ALT, 0x3012e)
/* Alternate event code for PM_LD_MISS_L1 */
EVENT(PM_LD_MISS_L1_ALT, 0x400f0)
+/*
+ * Memory Access Event -- mem_access
+ * Primary PMU event used here is PM_MRK_INST_CMPL, along with
+ * Random Load/Store Facility Sampling (RIS) in Random sampling mode (MMCRA[SM]).
+ */
+EVENT(MEM_ACCESS, 0x10401e0)
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index ce15b19a7962..5463516e369b 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -90,6 +90,7 @@ GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
@@ -120,6 +121,7 @@ static struct attribute *power8_events_attr[] = {
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ GENERIC_EVENT_PTR(MEM_ACCESS),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_REF_L1),
@@ -325,6 +327,8 @@ static struct power_pmu power8_pmu = {
.bhrb_filter_map = power8_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power8_get_alternatives,
+ .get_mem_data_src = isa207_get_mem_data_src,
+ .get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 7f6582708e06..018f8e90ac35 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -427,6 +427,8 @@ static struct power_pmu power9_pmu = {
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power9_get_alternatives,
+ .get_mem_data_src = isa207_get_mem_data_src,
+ .get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events),
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 688ffeab0699..55fed5e4de14 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -70,7 +70,7 @@ static struct i2c_board_info sam440ep_rtc_info = {
.irq = -1,
};
-static int sam440ep_setup_rtc(void)
+static int __init sam440ep_setup_rtc(void)
{
return i2c_register_board_info(0, &sam440ep_rtc_info, 1);
}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index b625a2c6f4f2..e4c745981912 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -33,7 +33,6 @@ config PPC_EFIKA
bool "bPlan Efika 5k2. MPC5200B based computer"
depends on PPC_MPC52xx
select PPC_RTAS
- select RTAS_PROC
select PPC_NATIVE
config PPC_LITE5200
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 078097a0b09d..f51fd35f4618 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -344,6 +344,7 @@ done:
}
struct smp_ops_t smp_85xx_ops = {
+ .cause_nmi_ipi = NULL,
.kick_cpu = smp_85xx_kick_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
#ifdef CONFIG_HOTPLUG_CPU
@@ -461,16 +462,9 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
}
#endif /* CONFIG_KEXEC_CORE */
-static void smp_85xx_basic_setup(int cpu_nr)
-{
- if (cpu_has_feature(CPU_FTR_DBELL))
- doorbell_setup_this_cpu();
-}
-
static void smp_85xx_setup_cpu(int cpu_nr)
{
mpic_setup_this_cpu();
- smp_85xx_basic_setup(cpu_nr);
}
void __init mpc85xx_smp_init(void)
@@ -484,7 +478,7 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
smp_85xx_ops.message_pass = smp_mpic_message_pass;
} else
- smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
+ smp_85xx_ops.setup_cpu = NULL;
if (cpu_has_feature(CPU_FTR_DBELL)) {
/*
@@ -492,7 +486,7 @@ void __init mpc85xx_smp_init(void)
* smp_muxed_ipi_message_pass
*/
smp_85xx_ops.message_pass = NULL;
- smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
+ smp_85xx_ops.cause_ipi = doorbell_global_ipi;
smp_85xx_ops.probe = NULL;
}
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index af09baee22cb..020e84a47a32 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -105,6 +105,7 @@ smp_86xx_setup_cpu(int cpu_nr)
struct smp_ops_t smp_86xx_ops = {
+ .cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_86xx_kick_cpu,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 7e3a2ebba29b..33244e3d9375 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -284,6 +284,7 @@ config CPM2
config AXON_RAM
tristate "Axon DDR2 memory device driver"
depends on PPC_IBM_CELL_BLADE && BLOCK
+ select DAX
default m
help
It registers one block device per Axon's DDR2 memory bank found
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 99b0ae8acb78..684e886eaae4 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -279,7 +279,8 @@ config PPC_ICSWX
This option enables kernel support for the PowerPC Initiate
Coprocessor Store Word (icswx) coprocessor instruction on POWER7
- or newer processors.
+ and POWER8 processors. POWER9 uses new copy/paste instructions
+ to invoke the coprocessor.
This option is only useful if you have a processor that supports
the icswx coprocessor instruction. It does not have any effect
@@ -359,7 +360,7 @@ config PPC_BOOK3E_MMU
config PPC_MM_SLICES
bool
- default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
+ default y if PPC_STD_MMU_64
default n
config PPC_HAVE_PMU_SUPPORT
@@ -371,9 +372,16 @@ config PPC_PERF_CTRS
help
This enables the powerpc-specific perf_event back-end.
+config FORCE_SMP
+ # Allow platforms to force SMP=y by selecting this
+ bool
+ default n
+ select SMP
+
config SMP
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
- bool "Symmetric multi-processing support"
+ select GENERIC_IRQ_MIGRATION
+ bool "Symmetric multi-processing support" if !FORCE_SMP
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 8b55c5f19d4c..8d3ae2cc52bf 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -15,9 +15,9 @@
#include <linux/msi.h>
#include <linux/export.h>
#include <linux/of_platform.h>
-#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <asm/debugfs.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index a6bbbaba14a3..871d38479a25 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -211,7 +211,7 @@ void iic_request_IPIs(void)
iic_request_ipi(PPC_MSG_CALL_FUNCTION);
iic_request_ipi(PPC_MSG_RESCHEDULE);
iic_request_ipi(PPC_MSG_TICK_BROADCAST);
- iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
+ iic_request_ipi(PPC_MSG_NMI_IPI);
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index e7d075077cb0..a88944db9fc3 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -88,11 +88,14 @@ static void cbe_power_save(void)
static int cbe_system_reset_exception(struct pt_regs *regs)
{
switch (regs->msr & SRR1_WAKEMASK) {
- case SRR1_WAKEEE:
- do_IRQ(regs);
- break;
case SRR1_WAKEDEC:
- timer_interrupt(regs);
+ set_dec(1);
+ case SRR1_WAKEEE:
+ /*
+ * Handle these when interrupts get re-enabled and we take
+ * them as regular exceptions. We are in an NMI context
+ * and can't handle these here.
+ */
break;
case SRR1_WAKEMT:
return cbe_sysreset_hack();
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index b6c9a0dcc924..14515040f7cd 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -44,6 +44,7 @@ static void smp_chrp_setup_cpu(int cpu_nr)
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
+ .cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 75b296bc51af..44e0d9226f0a 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -53,11 +53,14 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
regs->nip = regs->link;
switch (regs->msr & SRR1_WAKEMASK) {
- case SRR1_WAKEEE:
- do_IRQ(regs);
- break;
case SRR1_WAKEDEC:
- timer_interrupt(regs);
+ set_dec(1);
+ case SRR1_WAKEEE:
+ /*
+ * Handle these when interrupts get re-enabled and we take
+ * them as regular exceptions. We are in an NMI context
+ * and can't handle these here.
+ */
break;
default:
/* do system reset */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 746ca7321b03..2cd99eb30762 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -172,7 +172,7 @@ static irqreturn_t psurge_ipi_intr(int irq, void *d)
return IRQ_HANDLED;
}
-static void smp_psurge_cause_ipi(int cpu, unsigned long data)
+static void smp_psurge_cause_ipi(int cpu)
{
psurge_set_ipi(cpu);
}
@@ -447,6 +447,7 @@ void __init smp_psurge_give_timebase(void)
struct smp_ops_t psurge_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = smp_psurge_cause_ipi,
+ .cause_nmi_ipi = NULL,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 3a07e4dcf97c..6a6f4ef46b9e 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -4,6 +4,7 @@ config PPC_POWERNV
select PPC_NATIVE
select PPC_XICS
select PPC_ICP_NATIVE
+ select PPC_XIVE_NATIVE
select PPC_P7_NAP
select PCI
select PCI_MSI
@@ -19,6 +20,8 @@ config PPC_POWERNV
select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
+ select MMU_NOTIFIER
+ select FORCE_SMP
default y
config OPAL_PRD
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 6fb5522acd70..d2f19821d71d 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1102,6 +1102,13 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
return -EIO;
}
+ /*
+ * If dealing with the root bus (or the bus underneath the
+ * root port), we reset the bus underneath the root port.
+ *
+ * The cxl driver depends on this behaviour for bi-modal card
+ * switching.
+ */
if (pci_is_root_bus(bus) ||
pci_is_root_bus(bus->parent))
return pnv_eeh_root_reset(hose, option);
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 4ee837e6391a..445f30a2c5ef 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -53,19 +53,6 @@ static int pnv_save_sprs_for_deep_states(void)
uint64_t pir = get_hard_smp_processor_id(cpu);
uint64_t hsprg0_val = (uint64_t)&paca[cpu];
- if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
- /*
- * HSPRG0 is used to store the cpu's pointer to paca.
- * Hence last 3 bits are guaranteed to be 0. Program
- * slw to restore HSPRG0 with 63rd bit set, so that
- * when a thread wakes up at 0x100 we can use this bit
- * to distinguish between fastsleep and deep winkle.
- * This is not necessary with stop/psscr since PLS
- * field of psscr indicates which state we are waking
- * up from.
- */
- hsprg0_val |= 1;
- }
rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
if (rc != 0)
return rc;
@@ -122,9 +109,12 @@ static void pnv_alloc_idle_core_states(void)
for (i = 0; i < nr_cores; i++) {
int first_cpu = i * threads_per_core;
int node = cpu_to_node(first_cpu);
+ size_t paca_ptr_array_size;
core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
*core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+ paca_ptr_array_size = (threads_per_core *
+ sizeof(struct paca_struct *));
for (j = 0; j < threads_per_core; j++) {
int cpu = first_cpu + j;
@@ -132,6 +122,11 @@ static void pnv_alloc_idle_core_states(void)
paca[cpu].core_idle_state_ptr = core_idle_state;
paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
paca[cpu].thread_mask = 1 << j;
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ continue;
+ paca[cpu].thread_sibling_pacas =
+ kmalloc_node(paca_ptr_array_size,
+ GFP_KERNEL, node);
}
}
@@ -147,7 +142,6 @@ u32 pnv_get_supported_cpuidle_states(void)
}
EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
-
static void pnv_fastsleep_workaround_apply(void *info)
{
@@ -241,8 +235,9 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
* The default stop state that will be used by ppc_md.power_save
* function on platforms that support stop instruction.
*/
-u64 pnv_default_stop_val;
-u64 pnv_default_stop_mask;
+static u64 pnv_default_stop_val;
+static u64 pnv_default_stop_mask;
+static bool default_stop_found;
/*
* Used for ppc_md.power_save which needs a function with no parameters
@@ -262,8 +257,42 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
* psscr value and mask of the deepest stop idle state.
* Used when a cpu is offlined.
*/
-u64 pnv_deepest_stop_psscr_val;
-u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_psscr_val;
+static u64 pnv_deepest_stop_psscr_mask;
+static bool deepest_stop_found;
+
+/*
+ * pnv_cpu_offline: A function that puts the CPU into the deepest
+ * available platform idle state on a CPU-Offline.
+ */
+unsigned long pnv_cpu_offline(unsigned int cpu)
+{
+ unsigned long srr1;
+
+ u32 idle_states = pnv_get_supported_cpuidle_states();
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
+ srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val,
+ pnv_deepest_stop_psscr_mask);
+ } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+ srr1 = power7_winkle();
+ } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
+ (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+ srr1 = power7_sleep();
+ } else if (idle_states & OPAL_PM_NAP_ENABLED) {
+ srr1 = power7_nap(1);
+ } else {
+ /* This is the fallback method. We emulate snooze */
+ while (!generic_check_cpu_restart(cpu)) {
+ HMT_low();
+ HMT_very_low();
+ }
+ srr1 = 0;
+ HMT_medium();
+ }
+
+ return srr1;
+}
/*
* Power ISA 3.0 idle initialization.
@@ -352,7 +381,6 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
u32 *residency_ns = NULL;
u64 max_residency_ns = 0;
int rc = 0, i;
- bool default_stop_found = false, deepest_stop_found = false;
psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val), GFP_KERNEL);
psscr_mask = kcalloc(dt_idle_states, sizeof(*psscr_mask), GFP_KERNEL);
@@ -432,21 +460,24 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
}
}
- if (!default_stop_found) {
- pnv_default_stop_val = PSSCR_HV_DEFAULT_VAL;
- pnv_default_stop_mask = PSSCR_HV_DEFAULT_MASK;
- pr_warn("Setting default stop psscr val=0x%016llx,mask=0x%016llx\n",
+ if (unlikely(!default_stop_found)) {
+ pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
+ } else {
+ ppc_md.power_save = power9_idle;
+ pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
pnv_default_stop_val, pnv_default_stop_mask);
}
- if (!deepest_stop_found) {
- pnv_deepest_stop_psscr_val = PSSCR_HV_DEFAULT_VAL;
- pnv_deepest_stop_psscr_mask = PSSCR_HV_DEFAULT_MASK;
- pr_warn("Setting default stop psscr val=0x%016llx,mask=0x%016llx\n",
+ if (unlikely(!deepest_stop_found)) {
+ pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait");
+ } else {
+ pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n",
pnv_deepest_stop_psscr_val,
pnv_deepest_stop_psscr_mask);
}
+ pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
+ pnv_first_deep_stop_state);
out:
kfree(psscr_val);
kfree(psscr_mask);
@@ -524,10 +555,30 @@ static int __init pnv_init_idle_states(void)
pnv_alloc_idle_core_states();
+ /*
+ * For each CPU, record its PACA address in each of it's
+ * sibling thread's PACA at the slot corresponding to this
+ * CPU's index in the core.
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ int cpu;
+
+ pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
+ for_each_possible_cpu(cpu) {
+ int base_cpu = cpu_first_thread_sibling(cpu);
+ int idx = cpu_thread_in_core(cpu);
+ int i;
+
+ for (i = 0; i < threads_per_core; i++) {
+ int j = base_cpu + i;
+
+ paca[j].thread_sibling_pacas[idx] = &paca[cpu];
+ }
+ }
+ }
+
if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
ppc_md.power_save = power7_idle;
- else if (supported_cpuidle_states & OPAL_PM_STOP_INST_FAST)
- ppc_md.power_save = power9_idle;
out:
return 0;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 1c383f38031d..067defeea691 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -9,11 +9,20 @@
* License as published by the Free Software Foundation.
*/
+#include <linux/slab.h>
+#include <linux/mmu_notifier.h>
+#include <linux/mmu_context.h>
+#include <linux/of.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/memblock.h>
#include <linux/iommu.h>
+#include <asm/tlb.h>
+#include <asm/powernv.h>
+#include <asm/reg.h>
+#include <asm/opal.h>
+#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/pnv-pci.h>
#include <asm/msi_bitmap.h>
@@ -22,6 +31,8 @@
#include "powernv.h"
#include "pci.h"
+#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
+
/*
* Other types of TCE cache invalidation are not functional in the
* hardware.
@@ -37,6 +48,12 @@ struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
struct device_node *dn;
struct pci_dev *gpdev;
+ if (WARN_ON(!npdev))
+ return NULL;
+
+ if (WARN_ON(!npdev->dev.of_node))
+ return NULL;
+
/* Get assoicated PCI device */
dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
if (!dn)
@@ -55,6 +72,12 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
struct device_node *dn;
struct pci_dev *npdev;
+ if (WARN_ON(!gpdev))
+ return NULL;
+
+ if (WARN_ON(!gpdev->dev.of_node))
+ return NULL;
+
/* Get assoicated PCI device */
dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
if (!dn)
@@ -180,7 +203,7 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
return rc;
}
- pnv_pci_phb3_tce_invalidate_entire(phb, false);
+ pnv_pci_ioda2_tce_invalidate_entire(phb, false);
/* Add the table to the list so its TCE cache will get invalidated */
pnv_pci_link_table_and_group(phb->hose->node, num,
@@ -204,7 +227,7 @@ long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
return rc;
}
- pnv_pci_phb3_tce_invalidate_entire(phb, false);
+ pnv_pci_ioda2_tce_invalidate_entire(phb, false);
pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
&npe->table_group);
@@ -270,7 +293,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
0 /* bypass base */, top);
if (rc == OPAL_SUCCESS)
- pnv_pci_phb3_tce_invalidate_entire(phb, false);
+ pnv_pci_ioda2_tce_invalidate_entire(phb, false);
return rc;
}
@@ -334,7 +357,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
return;
}
- pnv_pci_phb3_tce_invalidate_entire(npe->phb, false);
+ pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
}
struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
@@ -359,3 +382,442 @@ struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
return gpe;
}
+
+/* Maximum number of nvlinks per npu */
+#define NV_MAX_LINKS 6
+
+/* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
+static int max_npu2_index;
+
+struct npu_context {
+ struct mm_struct *mm;
+ struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
+ struct mmu_notifier mn;
+ struct kref kref;
+
+ /* Callback to stop translation requests on a given GPU */
+ struct npu_context *(*release_cb)(struct npu_context *, void *);
+
+ /*
+ * Private pointer passed to the above callback for usage by
+ * device drivers.
+ */
+ void *priv;
+};
+
+/*
+ * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
+ * if none are available.
+ */
+static int get_mmio_atsd_reg(struct npu *npu)
+{
+ int i;
+
+ for (i = 0; i < npu->mmio_atsd_count; i++) {
+ if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static void put_mmio_atsd_reg(struct npu *npu, int reg)
+{
+ clear_bit(reg, &npu->mmio_atsd_usage);
+}
+
+/* MMIO ATSD register offsets */
+#define XTS_ATSD_AVA 1
+#define XTS_ATSD_STAT 2
+
+static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
+ unsigned long va)
+{
+ int mmio_atsd_reg;
+
+ do {
+ mmio_atsd_reg = get_mmio_atsd_reg(npu);
+ cpu_relax();
+ } while (mmio_atsd_reg < 0);
+
+ __raw_writeq(cpu_to_be64(va),
+ npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
+ eieio();
+ __raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
+
+ return mmio_atsd_reg;
+}
+
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+{
+ unsigned long launch;
+
+ /* IS set to invalidate matching PID */
+ launch = PPC_BIT(12);
+
+ /* PRS set to process-scoped */
+ launch |= PPC_BIT(13);
+
+ /* AP */
+ launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+
+ /* PID */
+ launch |= pid << PPC_BITLSHIFT(38);
+
+ /* Invalidating the entire process doesn't use a va */
+ return mmio_launch_invalidate(npu, launch, 0);
+}
+
+static int mmio_invalidate_va(struct npu *npu, unsigned long va,
+ unsigned long pid)
+{
+ unsigned long launch;
+
+ /* IS set to invalidate target VA */
+ launch = 0;
+
+ /* PRS set to process scoped */
+ launch |= PPC_BIT(13);
+
+ /* AP */
+ launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+
+ /* PID */
+ launch |= pid << PPC_BITLSHIFT(38);
+
+ return mmio_launch_invalidate(npu, launch, va);
+}
+
+#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
+
+/*
+ * Invalidate either a single address or an entire PID depending on
+ * the value of va.
+ */
+static void mmio_invalidate(struct npu_context *npu_context, int va,
+ unsigned long address)
+{
+ int i, j, reg;
+ struct npu *npu;
+ struct pnv_phb *nphb;
+ struct pci_dev *npdev;
+ struct {
+ struct npu *npu;
+ int reg;
+ } mmio_atsd_reg[NV_MAX_NPUS];
+ unsigned long pid = npu_context->mm->context.id;
+
+ /*
+ * Loop over all the NPUs this process is active on and launch
+ * an invalidate.
+ */
+ for (i = 0; i <= max_npu2_index; i++) {
+ mmio_atsd_reg[i].reg = -1;
+ for (j = 0; j < NV_MAX_LINKS; j++) {
+ npdev = npu_context->npdev[i][j];
+ if (!npdev)
+ continue;
+
+ nphb = pci_bus_to_host(npdev->bus)->private_data;
+ npu = &nphb->npu;
+ mmio_atsd_reg[i].npu = npu;
+
+ if (va)
+ mmio_atsd_reg[i].reg =
+ mmio_invalidate_va(npu, address, pid);
+ else
+ mmio_atsd_reg[i].reg =
+ mmio_invalidate_pid(npu, pid);
+
+ /*
+ * The NPU hardware forwards the shootdown to all GPUs
+ * so we only have to launch one shootdown per NPU.
+ */
+ break;
+ }
+ }
+
+ /*
+ * Unfortunately the nest mmu does not support flushing specific
+ * addresses so we have to flush the whole mm.
+ */
+ flush_tlb_mm(npu_context->mm);
+
+ /* Wait for all invalidations to complete */
+ for (i = 0; i <= max_npu2_index; i++) {
+ if (mmio_atsd_reg[i].reg < 0)
+ continue;
+
+ /* Wait for completion */
+ npu = mmio_atsd_reg[i].npu;
+ reg = mmio_atsd_reg[i].reg;
+ while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+ cpu_relax();
+ put_mmio_atsd_reg(npu, reg);
+ }
+}
+
+static void pnv_npu2_mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+{
+ struct npu_context *npu_context = mn_to_npu_context(mn);
+
+ /* Call into device driver to stop requests to the NMMU */
+ if (npu_context->release_cb)
+ npu_context->release_cb(npu_context, npu_context->priv);
+
+ /*
+ * There should be no more translation requests for this PID, but we
+ * need to ensure any entries for it are removed from the TLB.
+ */
+ mmio_invalidate(npu_context, 0, 0);
+}
+
+static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte)
+{
+ struct npu_context *npu_context = mn_to_npu_context(mn);
+
+ mmio_invalidate(npu_context, 1, address);
+}
+
+static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct npu_context *npu_context = mn_to_npu_context(mn);
+
+ mmio_invalidate(npu_context, 1, address);
+}
+
+static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct npu_context *npu_context = mn_to_npu_context(mn);
+ unsigned long address;
+
+ for (address = start; address <= end; address += PAGE_SIZE)
+ mmio_invalidate(npu_context, 1, address);
+}
+
+static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
+ .release = pnv_npu2_mn_release,
+ .change_pte = pnv_npu2_mn_change_pte,
+ .invalidate_page = pnv_npu2_mn_invalidate_page,
+ .invalidate_range = pnv_npu2_mn_invalidate_range,
+};
+
+/*
+ * Call into OPAL to setup the nmmu context for the current task in
+ * the NPU. This must be called to setup the context tables before the
+ * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
+ *
+ * A release callback should be registered to allow a device driver to
+ * be notified that it should not launch any new translation requests
+ * as the final TLB invalidate is about to occur.
+ *
+ * Returns an error if there no contexts are currently available or a
+ * npu_context which should be passed to pnv_npu2_handle_fault().
+ *
+ * mmap_sem must be held in write mode.
+ */
+struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+ unsigned long flags,
+ struct npu_context *(*cb)(struct npu_context *, void *),
+ void *priv)
+{
+ int rc;
+ u32 nvlink_index;
+ struct device_node *nvlink_dn;
+ struct mm_struct *mm = current->mm;
+ struct pnv_phb *nphb;
+ struct npu *npu;
+ struct npu_context *npu_context;
+
+ /*
+ * At present we don't support GPUs connected to multiple NPUs and I'm
+ * not sure the hardware does either.
+ */
+ struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
+
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return ERR_PTR(-ENODEV);
+
+ if (!npdev)
+ /* No nvlink associated with this GPU device */
+ return ERR_PTR(-ENODEV);
+
+ if (!mm) {
+ /* kernel thread contexts are not supported */
+ return ERR_PTR(-EINVAL);
+ }
+
+ nphb = pci_bus_to_host(npdev->bus)->private_data;
+ npu = &nphb->npu;
+
+ /*
+ * Setup the NPU context table for a particular GPU. These need to be
+ * per-GPU as we need the tables to filter ATSDs when there are no
+ * active contexts on a particular GPU.
+ */
+ rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ if (rc < 0)
+ return ERR_PTR(-ENOSPC);
+
+ /*
+ * We store the npu pci device so we can more easily get at the
+ * associated npus.
+ */
+ npu_context = mm->context.npu_context;
+ if (!npu_context) {
+ npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
+ if (!npu_context)
+ return ERR_PTR(-ENOMEM);
+
+ mm->context.npu_context = npu_context;
+ npu_context->mm = mm;
+ npu_context->mn.ops = &nv_nmmu_notifier_ops;
+ __mmu_notifier_register(&npu_context->mn, mm);
+ kref_init(&npu_context->kref);
+ } else {
+ kref_get(&npu_context->kref);
+ }
+
+ npu_context->release_cb = cb;
+ npu_context->priv = priv;
+ nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
+ if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ &nvlink_index)))
+ return ERR_PTR(-ENODEV);
+ npu_context->npdev[npu->index][nvlink_index] = npdev;
+
+ return npu_context;
+}
+EXPORT_SYMBOL(pnv_npu2_init_context);
+
+static void pnv_npu2_release_context(struct kref *kref)
+{
+ struct npu_context *npu_context =
+ container_of(kref, struct npu_context, kref);
+
+ npu_context->mm->context.npu_context = NULL;
+ mmu_notifier_unregister(&npu_context->mn,
+ npu_context->mm);
+
+ kfree(npu_context);
+}
+
+void pnv_npu2_destroy_context(struct npu_context *npu_context,
+ struct pci_dev *gpdev)
+{
+ struct pnv_phb *nphb, *phb;
+ struct npu *npu;
+ struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
+ struct device_node *nvlink_dn;
+ u32 nvlink_index;
+
+ if (WARN_ON(!npdev))
+ return;
+
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
+
+ nphb = pci_bus_to_host(npdev->bus)->private_data;
+ npu = &nphb->npu;
+ phb = pci_bus_to_host(gpdev->bus)->private_data;
+ nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
+ if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ &nvlink_index)))
+ return;
+ npu_context->npdev[npu->index][nvlink_index] = NULL;
+ opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ kref_put(&npu_context->kref, pnv_npu2_release_context);
+}
+EXPORT_SYMBOL(pnv_npu2_destroy_context);
+
+/*
+ * Assumes mmap_sem is held for the contexts associated mm.
+ */
+int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
+ unsigned long *flags, unsigned long *status, int count)
+{
+ u64 rc = 0, result = 0;
+ int i, is_write;
+ struct page *page[1];
+
+ /* mmap_sem should be held so the struct_mm must be present */
+ struct mm_struct *mm = context->mm;
+
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
+
+ for (i = 0; i < count; i++) {
+ is_write = flags[i] & NPU2_WRITE;
+ rc = get_user_pages_remote(NULL, mm, ea[i], 1,
+ is_write ? FOLL_WRITE : 0,
+ page, NULL, NULL);
+
+ /*
+ * To support virtualised environments we will have to do an
+ * access to the page to ensure it gets faulted into the
+ * hypervisor. For the moment virtualisation is not supported in
+ * other areas so leave the access out.
+ */
+ if (rc != 1) {
+ status[i] = rc;
+ result = -EFAULT;
+ continue;
+ }
+
+ status[i] = 0;
+ put_page(page[0]);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(pnv_npu2_handle_fault);
+
+int pnv_npu2_init(struct pnv_phb *phb)
+{
+ unsigned int i;
+ u64 mmio_atsd;
+ struct device_node *dn;
+ struct pci_dev *gpdev;
+ static int npu_index;
+ uint64_t rc = 0;
+
+ for_each_child_of_node(phb->hose->dn, dn) {
+ gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
+ if (gpdev) {
+ rc = opal_npu_map_lpar(phb->opal_id,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn),
+ 0, 0);
+ if (rc)
+ dev_err(&gpdev->dev,
+ "Error %lld mapping device to LPAR\n",
+ rc);
+ }
+ }
+
+ for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
+ i, &mmio_atsd); i++)
+ phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+
+ pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
+ phb->npu.mmio_atsd_count = i;
+ phb->npu.mmio_atsd_usage = 0;
+ npu_index++;
+ if (WARN_ON(npu_index >= NV_MAX_NPUS))
+ return -ENOSPC;
+ max_npu2_index = npu_index;
+ phb->npu.index = npu_index;
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index a91d7876fae2..6c7ad1d8b32e 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/bug.h>
-#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -21,7 +20,7 @@
#include <asm/opal.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/isa-bridge.h>
static int opal_lpc_chip_id = -1;
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 308efd170c27..aa267f120033 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -64,6 +64,10 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
*sensor_data = be32_to_cpu(data);
break;
+ case OPAL_WRONG_STATE:
+ ret = -EIO;
+ break;
+
default:
ret = opal_error_code(ret);
break;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index da8a0f7a035c..f620572f891f 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -50,21 +50,13 @@ END_FTR_SECTION(0, 1); \
#define OPAL_BRANCH(LABEL)
#endif
-/* TODO:
- *
- * - Trace irqs in/off (needs saving/restoring all args, argh...)
- * - Get r11 feed up by Dave so I can have better register usage
+/*
+ * DO_OPAL_CALL assumes:
+ * r0 = opal call token
+ * r12 = msr
+ * LR has been saved
*/
-
-#define OPAL_CALL(name, token) \
- _GLOBAL_TOC(name); \
- mfmsr r12; \
- mflr r0; \
- andi. r11,r12,MSR_IR|MSR_DR; \
- std r0,PPC_LR_STKOFF(r1); \
- li r0,token; \
- beq opal_real_call; \
- OPAL_BRANCH(opal_tracepoint_entry) \
+#define DO_OPAL_CALL() \
mfcr r11; \
stw r11,8(r1); \
li r11,0; \
@@ -83,6 +75,18 @@ END_FTR_SECTION(0, 1); \
mtspr SPRN_HSRR0,r12; \
hrfid
+#define OPAL_CALL(name, token) \
+ _GLOBAL_TOC(name); \
+ mfmsr r12; \
+ mflr r0; \
+ andi. r11,r12,MSR_IR|MSR_DR; \
+ std r0,PPC_LR_STKOFF(r1); \
+ li r0,token; \
+ beq opal_real_call; \
+ OPAL_BRANCH(opal_tracepoint_entry) \
+ DO_OPAL_CALL()
+
+
opal_return:
/*
* Fixup endian on OPAL return... we should be able to simplify
@@ -148,26 +152,13 @@ opal_tracepoint_entry:
ld r8,STK_REG(R29)(r1)
ld r9,STK_REG(R30)(r1)
ld r10,STK_REG(R31)(r1)
+
+ /* setup LR so we return via tracepoint_return */
LOAD_REG_ADDR(r11,opal_tracepoint_return)
- mfcr r12
std r11,16(r1)
- stw r12,8(r1)
- li r11,0
+
mfmsr r12
- ori r11,r11,MSR_EE
- std r12,PACASAVEDMSR(r13)
- andc r12,r12,r11
- mtmsrd r12,1
- LOAD_REG_ADDR(r11,opal_return)
- mtlr r11
- li r11,MSR_DR|MSR_IR|MSR_LE
- andc r12,r12,r11
- mtspr SPRN_HSRR1,r12
- LOAD_REG_ADDR(r11,opal)
- ld r12,8(r11)
- ld r2,0(r11)
- mtspr SPRN_HSRR0,r12
- hrfid
+ DO_OPAL_CALL()
opal_tracepoint_return:
std r3,STK_REG(R31)(r1)
@@ -301,3 +292,21 @@ OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR);
+OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET);
+OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO);
+OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG);
+OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG);
+OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO);
+OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO);
+OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE);
+OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK);
+OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK);
+OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ);
+OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ);
+OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
+OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
+OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
+OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
+OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
+OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
+OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index d0ac535cf5d7..28651fb25417 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -73,25 +73,32 @@ static int opal_xscom_err_xlate(int64_t rc)
static u64 opal_scom_unmangle(u64 addr)
{
+ u64 tmp;
+
/*
- * XSCOM indirect addresses have the top bit set. Additionally
- * the rest of the top 3 nibbles is always 0.
+ * XSCOM addresses use the top nibble to set indirect mode and
+ * its form. Bits 4-11 are always 0.
*
* Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits
* of the 64-bit address, and thus cannot use the indirect bit.
*
- * To deal with that, we support the indirect bit being in bit
- * 4 (IBM notation) instead of bit 0 in this API, we do the
- * conversion here. To leave room for further xscom address
- * expansion, we only clear out the top byte
+ * To deal with that, we support the indirect bits being in
+ * bits 4-7 (IBM notation) instead of bit 0-3 in this API, we
+ * do the conversion here.
*
- * For in-kernel use, we also support the real indirect bit, so
- * we test for any of the top 5 bits
+ * For in-kernel use, we don't need to do this mangling. In
+ * kernel won't have bits 4-7 set.
*
+ * So:
+ * debugfs will always set 0-3 = 0 and clear 4-7
+ * kernel will always clear 0-3 = 0 and set 4-7
*/
- if (addr & (0x1full << 59))
- addr = (addr & ~(0xffull << 56)) | (1ull << 63);
+ tmp = addr;
+ tmp &= 0x0f00000000000000;
+ addr &= 0xf0ffffffffffffff;
+ addr |= tmp << 4;
+
return addr;
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index e0f856bfbfe8..7925a9d72cca 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -435,7 +435,7 @@ int opal_machine_check(struct pt_regs *regs)
evt.version);
return 0;
}
- machine_check_print_event_info(&evt);
+ machine_check_print_event_info(&evt, user_mode(regs));
if (opal_recover_mce(regs, &evt))
return 1;
@@ -595,6 +595,80 @@ static void opal_export_symmap(void)
pr_warn("Error %d creating OPAL symbols file\n", rc);
}
+static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ return memory_read_from_buffer(buf, count, &off, bin_attr->private,
+ bin_attr->size);
+}
+
+/*
+ * opal_export_attrs: creates a sysfs node for each property listed in
+ * the device-tree under /ibm,opal/firmware/exports/
+ * All new sysfs nodes are created under /opal/exports/.
+ * This allows for reserved memory regions (e.g. HDAT) to be read.
+ * The new sysfs nodes are only readable by root.
+ */
+static void opal_export_attrs(void)
+{
+ struct bin_attribute *attr;
+ struct device_node *np;
+ struct property *prop;
+ struct kobject *kobj;
+ u64 vals[2];
+ int rc;
+
+ np = of_find_node_by_path("/ibm,opal/firmware/exports");
+ if (!np)
+ return;
+
+ /* Create new 'exports' directory - /sys/firmware/opal/exports */
+ kobj = kobject_create_and_add("exports", opal_kobj);
+ if (!kobj) {
+ pr_warn("kobject_create_and_add() of exports failed\n");
+ return;
+ }
+
+ for_each_property_of_node(np, prop) {
+ if (!strcmp(prop->name, "name") || !strcmp(prop->name, "phandle"))
+ continue;
+
+ if (of_property_read_u64_array(np, prop->name, &vals[0], 2))
+ continue;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+
+ if (attr == NULL) {
+ pr_warn("Failed kmalloc for bin_attribute!");
+ continue;
+ }
+
+ sysfs_bin_attr_init(attr);
+ attr->attr.name = kstrdup(prop->name, GFP_KERNEL);
+ attr->attr.mode = 0400;
+ attr->read = export_attr_read;
+ attr->private = __va(vals[0]);
+ attr->size = vals[1];
+
+ if (attr->attr.name == NULL) {
+ pr_warn("Failed kstrdup for bin_attribute attr.name");
+ kfree(attr);
+ continue;
+ }
+
+ rc = sysfs_create_bin_file(kobj, attr);
+ if (rc) {
+ pr_warn("Error %d creating OPAL sysfs exports/%s file\n",
+ rc, prop->name);
+ kfree(attr->attr.name);
+ kfree(attr);
+ }
+ }
+
+ of_node_put(np);
+}
+
static void __init opal_dump_region_init(void)
{
void *addr;
@@ -733,6 +807,9 @@ static int __init opal_init(void)
opal_msglog_sysfs_init();
}
+ /* Export all properties */
+ opal_export_attrs();
+
/* Initialize platform devices: IPMI backend, PRD & flash interface */
opal_pdev_init("ibm,opal-ipmi");
opal_pdev_init("ibm,opal-flash");
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index e36738291c32..283caf1070c9 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/crash_dump.h>
-#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -38,7 +37,7 @@
#include <asm/iommu.h>
#include <asm/tce.h>
#include <asm/xics.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/firmware.h>
#include <asm/pnv-pci.h>
#include <asm/mmzone.h>
@@ -1262,6 +1261,8 @@ static void pnv_pci_ioda_setup_PEs(void)
/* PE#0 is needed for error reporting */
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
+ if (phb->model == PNV_PHB_MODEL_NPU2)
+ pnv_npu2_init(phb);
}
}
}
@@ -1424,8 +1425,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
iommu_group_put(pe->table_group.group);
BUG_ON(pe->table_group.group);
}
- pnv_pci_ioda2_table_free_pages(tbl);
- iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
+ iommu_tce_table_put(tbl);
}
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
@@ -1860,6 +1860,17 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
return ret;
}
+
+static int pnv_ioda1_tce_xchg_rm(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction)
+{
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+ if (!ret)
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, true);
+
+ return ret;
+}
#endif
static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
@@ -1874,6 +1885,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.set = pnv_ioda1_tce_build,
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda1_tce_xchg,
+ .exchange_rm = pnv_ioda1_tce_xchg_rm,
#endif
.clear = pnv_ioda1_tce_free,
.get = pnv_tce_get,
@@ -1883,7 +1895,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
-void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
+static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
__be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
@@ -1948,7 +1960,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
{
struct iommu_table_group_link *tgl;
- list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+ list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) {
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
struct pnv_phb *phb = pe->phb;
@@ -1979,6 +1991,14 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
}
}
+void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
+{
+ if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
+ pnv_pci_phb3_tce_invalidate_entire(phb, rm);
+ else
+ opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
+}
+
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
@@ -2004,6 +2024,17 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
return ret;
}
+
+static int pnv_ioda2_tce_xchg_rm(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction)
+{
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+ if (!ret)
+ pnv_pci_ioda2_tce_invalidate(tbl, index, 1, true);
+
+ return ret;
+}
#endif
static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
@@ -2017,13 +2048,13 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
static void pnv_ioda2_table_free(struct iommu_table *tbl)
{
pnv_pci_ioda2_table_free_pages(tbl);
- iommu_free_table(tbl, "pnv");
}
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
.set = pnv_ioda2_tce_build,
#ifdef CONFIG_IOMMU_API
.exchange = pnv_ioda2_tce_xchg,
+ .exchange_rm = pnv_ioda2_tce_xchg_rm,
#endif
.clear = pnv_ioda2_tce_free,
.get = pnv_tce_get,
@@ -2128,6 +2159,9 @@ static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
found:
tbl = pnv_pci_table_alloc(phb->hose->node);
+ if (WARN_ON(!tbl))
+ return;
+
iommu_register_group(&pe->table_group, phb->hose->global_number,
pe->pe_number);
pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
@@ -2203,7 +2237,7 @@ found:
__free_pages(tce_mem, get_order(tce32_segsz * segs));
if (tbl) {
pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
- iommu_free_table(tbl, "pnv");
+ iommu_tce_table_put(tbl);
}
}
@@ -2293,16 +2327,16 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
if (!tbl)
return -ENOMEM;
+ tbl->it_ops = &pnv_ioda2_iommu_ops;
+
ret = pnv_pci_ioda2_table_alloc_pages(nid,
bus_offset, page_shift, window_size,
levels, tbl);
if (ret) {
- iommu_free_table(tbl, "pnv");
+ iommu_tce_table_put(tbl);
return ret;
}
- tbl->it_ops = &pnv_ioda2_iommu_ops;
-
*ptbl = tbl;
return 0;
@@ -2343,7 +2377,7 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (rc) {
pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
rc);
- pnv_ioda2_table_free(tbl);
+ iommu_tce_table_put(tbl);
return rc;
}
@@ -2414,7 +2448,8 @@ static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
tce_table_size /= direct_table_size;
tce_table_size <<= 3;
- tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
+ tce_table_size = max_t(unsigned long,
+ tce_table_size, direct_table_size);
}
return bytes;
@@ -2431,7 +2466,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus)
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
- pnv_ioda2_table_free(tbl);
+ iommu_tce_table_put(tbl);
}
static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
@@ -2735,9 +2770,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
if (rc)
return;
- if (pe->flags & PNV_IODA_PE_DEV)
- iommu_add_device(&pe->pdev->dev);
- else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
+ if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
}
@@ -3297,6 +3330,11 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
}
}
+static resource_size_t pnv_pci_default_alignment(void)
+{
+ return PAGE_SIZE;
+}
+
#ifdef CONFIG_PCI_IOV
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
int resno)
@@ -3406,7 +3444,7 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
}
free_pages(tbl->it_base, get_order(tbl->it_size << 3));
- iommu_free_table(tbl, "pnv");
+ iommu_tce_table_put(tbl);
}
static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
@@ -3433,7 +3471,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
}
pnv_pci_ioda2_table_free_pages(tbl);
- iommu_free_table(tbl, "pnv");
+ iommu_tce_table_put(tbl);
}
static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
@@ -3830,6 +3868,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->controller_ops = pnv_pci_ioda_controller_ops;
}
+ ppc_md.pcibios_default_alignment = pnv_pci_default_alignment;
+
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index eb835e977e33..935ccb249a8a 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -758,7 +758,7 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
{
- return *(pnv_tce(tbl, index - tbl->it_offset));
+ return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset)));
}
struct iommu_table *pnv_pci_table_alloc(int nid)
@@ -766,7 +766,11 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
struct iommu_table *tbl;
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
+ if (!tbl)
+ return NULL;
+
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
+ kref_init(&tbl->it_kref);
return tbl;
}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index e1d3e5526b54..18c8a2fa03b8 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -7,6 +7,9 @@
struct pci_dn;
+/* Maximum possible number of ATSD MMIO registers per NPU */
+#define NV_NMMU_ATSD_REGS 8
+
enum pnv_phb_type {
PNV_PHB_IODA1 = 0,
PNV_PHB_IODA2 = 1,
@@ -174,6 +177,16 @@ struct pnv_phb {
struct OpalIoP7IOCErrorData hub_diag;
} diag;
+ /* Nvlink2 data */
+ struct npu {
+ int index;
+ __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
+ unsigned int mmio_atsd_count;
+
+ /* Bitmask for MMIO register usage */
+ unsigned long mmio_atsd_usage;
+ } npu;
+
#ifdef CONFIG_CXL_BASE
struct cxl_afu *cxl_afu;
#endif
@@ -229,14 +242,14 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
/* Nvlink functions */
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
-extern void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
+extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
struct iommu_table *tbl);
extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num);
extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
-
+extern int pnv_npu2_init(struct pnv_phb *phb);
/* cxl functions */
extern bool pnv_cxl_enable_device_hook(struct pci_dev *dev);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 613052232475..6dbc0a1da1f6 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -18,8 +18,6 @@ static inline void pnv_pci_shutdown(void) { }
#endif
extern u32 pnv_get_supported_cpuidle_states(void);
-extern u64 pnv_deepest_stop_psscr_val;
-extern u64 pnv_deepest_stop_psscr_mask;
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 5dcbdea1afac..1a9d84371a4d 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -62,7 +62,7 @@ int powernv_get_random_real_mode(unsigned long *v)
rng = raw_cpu_read(powernv_rng);
- *v = rng_whiten(rng, in_rm64(rng->regs_real));
+ *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
return 1;
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index d50c7d99baaf..2dc7e5fb86c3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -32,6 +32,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
@@ -76,7 +77,9 @@ static void __init pnv_init(void)
static void __init pnv_init_IRQ(void)
{
- xics_init();
+ /* Try using a XIVE if available, otherwise use a XICS */
+ if (!xive_native_init())
+ xics_init();
WARN_ON(!ppc_md.get_irq);
}
@@ -95,6 +98,10 @@ static void pnv_show_cpuinfo(struct seq_file *m)
else
seq_printf(m, "firmware\t: BML\n");
of_node_put(root);
+ if (radix_enabled())
+ seq_printf(m, "MMU\t\t: Radix\n");
+ else
+ seq_printf(m, "MMU\t\t: Hash\n");
}
static void pnv_prepare_going_down(void)
@@ -218,10 +225,12 @@ static void pnv_kexec_wait_secondaries_down(void)
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
- xics_kexec_teardown_cpu(secondary);
+ if (xive_enabled())
+ xive_kexec_teardown_cpu(secondary);
+ else
+ xics_kexec_teardown_cpu(secondary);
/* On OPAL, we return all CPUs to firmware */
-
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
@@ -237,6 +246,10 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
/* Primary waits for the secondaries to have reached OPAL */
pnv_kexec_wait_secondaries_down();
+ /* Switch XIVE back to emulation mode */
+ if (xive_enabled())
+ xive_shutdown();
+
/*
* We might be running as little-endian - now that interrupts
* are disabled, reset the HILE bit to big-endian so we don't
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 8b67e1eefb5c..4aff754b6f2c 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -29,12 +29,14 @@
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/opal.h>
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
#include <asm/kvm_ppc.h>
#include <asm/ppc-opcode.h>
+#include <asm/cpuidle.h>
#include "powernv.h"
@@ -47,13 +49,10 @@
static void pnv_smp_setup_cpu(int cpu)
{
- if (cpu != boot_cpuid)
+ if (xive_enabled())
+ xive_smp_setup_cpu();
+ else if (cpu != boot_cpuid)
xics_setup_cpu();
-
-#ifdef CONFIG_PPC_DOORBELL
- if (cpu_has_feature(CPU_FTR_DBELL))
- doorbell_setup_this_cpu();
-#endif
}
static int pnv_smp_kick_cpu(int nr)
@@ -132,7 +131,10 @@ static int pnv_smp_cpu_disable(void)
vdso_data->processorCount--;
if (cpu == boot_cpuid)
boot_cpuid = cpumask_any(cpu_online_mask);
- xics_migrate_irqs_away();
+ if (xive_enabled())
+ xive_smp_disable_cpu();
+ else
+ xics_migrate_irqs_away();
return 0;
}
@@ -140,7 +142,6 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
unsigned long srr1, wmask;
- u32 idle_states;
/* Standard hot unplug procedure */
local_irq_disable();
@@ -155,8 +156,6 @@ static void pnv_smp_cpu_kill_self(void)
if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8;
- idle_states = pnv_get_supported_cpuidle_states();
-
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
* enabled as to let IPIs in.
@@ -184,19 +183,7 @@ static void pnv_smp_cpu_kill_self(void)
kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off();
-
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val,
- pnv_deepest_stop_psscr_mask);
- } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
- srr1 = power7_winkle();
- } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
- (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
- srr1 = power7_sleep();
- } else {
- srr1 = power7_nap(1);
- }
-
+ srr1 = pnv_cpu_offline(cpu);
ppc64_runlatch_on();
/*
@@ -213,9 +200,12 @@ static void pnv_smp_cpu_kill_self(void)
if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI) ||
(local_paca->irq_happened & PACA_IRQ_EE)) {
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- icp_opal_flush_interrupt();
- else
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (xive_enabled())
+ xive_flush_interrupt();
+ else
+ icp_opal_flush_interrupt();
+ } else
icp_native_flush_interrupt();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
@@ -252,10 +242,69 @@ static int pnv_cpu_bootable(unsigned int nr)
return smp_generic_cpu_bootable(nr);
}
+static int pnv_smp_prepare_cpu(int cpu)
+{
+ if (xive_enabled())
+ return xive_smp_prepare_cpu(cpu);
+ return 0;
+}
+
+/* Cause IPI as setup by the interrupt controller (xics or xive) */
+static void (*ic_cause_ipi)(int cpu);
+
+static void pnv_cause_ipi(int cpu)
+{
+ if (doorbell_try_core_ipi(cpu))
+ return;
+
+ ic_cause_ipi(cpu);
+}
+
+static void pnv_p9_dd1_cause_ipi(int cpu)
+{
+ int this_cpu = get_cpu();
+
+ /*
+ * POWER9 DD1 has a global addressed msgsnd, but for now we restrict
+ * IPIs to same core, because it requires additional synchronization
+ * for inter-core doorbells which we do not implement.
+ */
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu)))
+ doorbell_global_ipi(cpu);
+ else
+ ic_cause_ipi(cpu);
+
+ put_cpu();
+}
+
+static void __init pnv_smp_probe(void)
+{
+ if (xive_enabled())
+ xive_smp_probe();
+ else
+ xics_smp_probe();
+
+ if (cpu_has_feature(CPU_FTR_DBELL)) {
+ ic_cause_ipi = smp_ops->cause_ipi;
+ WARN_ON(!ic_cause_ipi);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ smp_ops->cause_ipi = pnv_p9_dd1_cause_ipi;
+ else
+ smp_ops->cause_ipi = doorbell_global_ipi;
+ } else {
+ smp_ops->cause_ipi = pnv_cause_ipi;
+ }
+ }
+}
+
static struct smp_ops_t pnv_smp_ops = {
- .message_pass = smp_muxed_ipi_message_pass,
- .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
- .probe = xics_smp_probe,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
+ .cause_ipi = NULL, /* Filled at runtime by pnv_smp_probe() */
+ .cause_nmi_ipi = NULL,
+ .probe = pnv_smp_probe,
+ .prepare_cpu = pnv_smp_prepare_cpu,
.kick_cpu = pnv_smp_kick_cpu,
.setup_cpu = pnv_smp_setup_cpu,
.cpu_bootable = pnv_cpu_bootable,
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 60154d08debf..1d1ad5df106f 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -77,7 +77,7 @@ static void __init ps3_smp_probe(void)
BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
BUILD_BUG_ON(PPC_MSG_TICK_BROADCAST != 2);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_NMI_IPI != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
@@ -96,7 +96,7 @@ static void __init ps3_smp_probe(void)
ps3_register_ipi_irq(cpu, virqs[i]);
}
- ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+ ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_NMI_IPI]);
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 30ec04f1c67c..913c54e23eea 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -17,9 +17,10 @@ config PPC_PSERIES
select PPC_UDBG_16550
select PPC_NATIVE
select PPC_DOORBELL
- select HOTPLUG_CPU if SMP
+ select HOTPLUG_CPU
select ARCH_RANDOM
select PPC_DOORBELL
+ select FORCE_SMP
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 193e052fa0dd..bda18d8e1674 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -288,7 +288,6 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
- of_node_put(dn); /* Must decrement the refcount */
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 6b04e3f0f982..18014cdeb590 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -21,13 +21,12 @@
*/
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <asm/lppaca.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/plpar_wrappers.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index f02ec3ab428c..957ae347b0b3 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -29,6 +29,16 @@
#include <asm/trace.h>
#include <asm/machdep.h>
+/* For hcall instrumentation. One structure per-hcall, per-CPU */
+struct hcall_stats {
+ unsigned long num_calls; /* number of calls (on this CPU) */
+ unsigned long tb_total; /* total wall time (mftb) of calls. */
+ unsigned long purr_total; /* total cpu time (PURR) of calls. */
+ unsigned long tb_start;
+ unsigned long purr_start;
+};
+#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
+
DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
/*
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 99a6bf7f3bcf..b363e439ddb9 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -410,10 +410,7 @@ static ssize_t name_show(struct device *dev,
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
- buf[len] = '\n';
- buf[len+1] = 0;
- return len+1;
+ return of_device_modalias(dev, buf, PAGE_SIZE);
}
static struct device_attribute ibmebus_bus_device_attrs[] = {
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 4d757eaa46bf..8374adee27e3 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -74,6 +74,7 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
goto fail_exit;
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
+ kref_init(&tbl->it_kref);
tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list);
@@ -115,7 +116,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
BUG_ON(table_group->group);
}
#endif
- iommu_free_table(tbl, node_name);
+ iommu_tce_table_put(tbl);
kfree(table_group);
}
@@ -550,6 +551,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
+ struct iommu_table_group *table_group,
const __be32 *dma_window)
{
unsigned long offset, size;
@@ -563,6 +565,9 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
tbl->it_type = TCE_PCI;
tbl->it_offset = offset >> tbl->it_page_shift;
tbl->it_size = size >> tbl->it_page_shift;
+
+ table_group->tce32_start = offset;
+ table_group->tce32_size = size;
}
struct iommu_table_ops iommu_table_pseries_ops = {
@@ -651,8 +656,38 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
}
+#ifdef CONFIG_IOMMU_API
+static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
+ long *tce, enum dma_data_direction *direction)
+{
+ long rc;
+ unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
+ unsigned long flags, oldtce = 0;
+ u64 proto_tce = iommu_direction_to_tce_perm(*direction);
+ unsigned long newtce = *tce | proto_tce;
+
+ spin_lock_irqsave(&tbl->large_pool.lock, flags);
+
+ rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
+ if (!rc)
+ rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
+
+ if (!rc) {
+ *direction = iommu_tce_direction(oldtce);
+ *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ }
+
+ spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
+
+ return rc;
+}
+#endif
+
struct iommu_table_ops iommu_table_lpar_multi_ops = {
.set = tce_buildmulti_pSeriesLP,
+#ifdef CONFIG_IOMMU_API
+ .exchange = tce_exchange_pseries,
+#endif
.clear = tce_freemulti_pSeriesLP,
.get = tce_get_pSeriesLP
};
@@ -689,7 +724,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
- iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
+ iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+ ppci->table_group, dma_window);
tbl->it_ops = &iommu_table_lpar_multi_ops;
iommu_init_table(tbl, ppci->phb->node);
iommu_register_group(ppci->table_group,
@@ -1143,7 +1179,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pci->table_group) {
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
- iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
+ iommu_table_setparms_lpar(pci->phb, pdn, tbl,
+ pci->table_group, dma_window);
tbl->it_ops = &iommu_table_lpar_multi_ops;
iommu_init_table(tbl, pci->phb->node);
iommu_register_group(pci->table_group,
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8b1fe895daa3..6541d0b03e4c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -958,3 +958,64 @@ int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
return rc;
}
+
+static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
+{
+ unsigned long protovsid;
+ unsigned long va_bits = VA_BITS;
+ unsigned long modinv, vsid_modulus;
+ unsigned long max_mod_inv, tmp_modinv;
+
+ if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
+ va_bits = 65;
+
+ if (ssize == MMU_SEGSIZE_256M) {
+ modinv = VSID_MULINV_256M;
+ vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
+ } else {
+ modinv = VSID_MULINV_1T;
+ vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
+ }
+
+ /*
+ * vsid outside our range.
+ */
+ if (vsid >= vsid_modulus)
+ return 0;
+
+ /*
+ * If modinv is the modular multiplicate inverse of (x % vsid_modulus)
+ * and vsid = (protovsid * x) % vsid_modulus, then we say:
+ * protovsid = (vsid * modinv) % vsid_modulus
+ */
+
+ /* Check if (vsid * modinv) overflow (63 bits) */
+ max_mod_inv = 0x7fffffffffffffffull / vsid;
+ if (modinv < max_mod_inv)
+ return (vsid * modinv) % vsid_modulus;
+
+ tmp_modinv = modinv/max_mod_inv;
+ modinv %= max_mod_inv;
+
+ protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
+ protovsid = (protovsid + vsid * modinv) % vsid_modulus;
+
+ return protovsid;
+}
+
+static int __init reserve_vrma_context_id(void)
+{
+ unsigned long protovsid;
+
+ /*
+ * Reserve context ids which map to reserved virtual addresses. For now
+ * we only reserve the context id which maps to the VRMA VSID. We ignore
+ * the addresses in "ibm,adjunct-virtual-addresses" because we don't
+ * enable adjunct support via the "ibm,client-architecture-support"
+ * interface.
+ */
+ protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
+ hash__reserve_context_id(protovsid >> ESID_BITS_1T);
+ return 0;
+}
+machine_device_initcall(pseries, reserve_vrma_context_id);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 904a677208d1..bb70b26334f0 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -386,6 +386,10 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
}
fwnmi_release_errinfo();
}
+
+ if (smp_handle_nmi_ipi(regs))
+ return 1;
+
return 0; /* need to perform reset */
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b4d362ed03a1..b5d86426e97b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -87,6 +87,10 @@ static void pSeries_show_cpuinfo(struct seq_file *m)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: CHRP %s\n", model);
of_node_put(root);
+ if (radix_enabled())
+ seq_printf(m, "MMU\t\t: Radix\n");
+ else
+ seq_printf(m, "MMU\t\t: Hash\n");
}
/* Initialize firmware assisted non-maskable interrupts if
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index f6f83aeccaaa..52ca6b311d44 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -55,11 +55,6 @@
*/
static cpumask_var_t of_spin_mask;
-/*
- * If we multiplex IPI mechanisms, store the appropriate XICS IPI mechanism here
- */
-static void (*xics_cause_ipi)(int cpu, unsigned long data);
-
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
int smp_query_cpu_stopped(unsigned int pcpu)
{
@@ -143,8 +138,6 @@ static void smp_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
- if (cpu_has_feature(CPU_FTR_DBELL))
- doorbell_setup_this_cpu();
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
@@ -187,28 +180,50 @@ static int smp_pSeries_kick_cpu(int nr)
return 0;
}
-/* Only used on systems that support multiple IPI mechanisms */
-static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
+static void smp_pseries_cause_ipi(int cpu)
{
- if (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id())))
- doorbell_cause_ipi(cpu, data);
- else
- xics_cause_ipi(cpu, data);
+ /* POWER9 should not use this handler */
+ if (doorbell_try_core_ipi(cpu))
+ return;
+
+ icp_ops->cause_ipi(cpu);
+}
+
+static int pseries_cause_nmi_ipi(int cpu)
+{
+ int hwcpu;
+
+ if (cpu == NMI_IPI_ALL_OTHERS) {
+ hwcpu = H_SIGNAL_SYS_RESET_ALL_OTHERS;
+ } else {
+ if (cpu < 0) {
+ WARN_ONCE(true, "incorrect cpu parameter %d", cpu);
+ return 0;
+ }
+
+ hwcpu = get_hard_smp_processor_id(cpu);
+ }
+
+ if (plapr_signal_sys_reset(hwcpu) == H_SUCCESS)
+ return 1;
+
+ return 0;
}
static __init void pSeries_smp_probe(void)
{
xics_smp_probe();
- if (cpu_has_feature(CPU_FTR_DBELL)) {
- xics_cause_ipi = smp_ops->cause_ipi;
- smp_ops->cause_ipi = pSeries_cause_ipi_mux;
- }
+ if (cpu_has_feature(CPU_FTR_DBELL))
+ smp_ops->cause_ipi = smp_pseries_cause_ipi;
+ else
+ smp_ops->cause_ipi = icp_ops->cause_ipi;
}
static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
+ .cause_nmi_ipi = pseries_cause_nmi_ipi,
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_setup_cpu,
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 720493932486..28b09fd797ec 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1318,7 +1318,7 @@ static void vio_dev_release(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl)
- iommu_free_table(tbl, of_node_full_name(dev->of_node));
+ iommu_tce_table_put(tbl);
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 52dc165c0efb..caf882e749dc 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -28,6 +28,7 @@ config PPC_MSI_BITMAP
default y if PPC_POWERNV
source "arch/powerpc/sysdev/xics/Kconfig"
+source "arch/powerpc/sysdev/xive/Kconfig"
config PPC_SCOM
bool
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index a254824719f1..c0ae11d4f62f 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -71,5 +71,6 @@ obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PPC_XICS) += xics/
+obj-$(CONFIG_PPC_XIVE) += xive/
obj-$(CONFIG_GE_FPGA) += ge/
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index f523ac883150..a7fe5fee744f 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/dax.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -62,6 +63,7 @@ static int azfs_major, azfs_minor;
struct axon_ram_bank {
struct platform_device *device;
struct gendisk *disk;
+ struct dax_device *dax_dev;
unsigned int irq_id;
unsigned long ph_addr;
unsigned long io_addr;
@@ -137,25 +139,32 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
return BLK_QC_T_NONE;
}
-/**
- * axon_ram_direct_access - direct_access() method for block device
- * @device, @sector, @data: see block_device_operations method
- */
+static const struct block_device_operations axon_ram_devops = {
+ .owner = THIS_MODULE,
+};
+
static long
-axon_ram_direct_access(struct block_device *device, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+__axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn)
{
- struct axon_ram_bank *bank = device->bd_disk->private_data;
- loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
+ resource_size_t offset = pgoff * PAGE_SIZE;
*kaddr = (void *) bank->io_addr + offset;
*pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV);
- return bank->size - offset;
+ return (bank->size - offset) / PAGE_SIZE;
}
-static const struct block_device_operations axon_ram_devops = {
- .owner = THIS_MODULE,
- .direct_access = axon_ram_direct_access
+static long
+axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn)
+{
+ struct axon_ram_bank *bank = dax_get_private(dax_dev);
+
+ return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn);
+}
+
+static const struct dax_operations axon_ram_dax_ops = {
+ .direct_access = axon_ram_dax_direct_access,
};
/**
@@ -219,6 +228,7 @@ static int axon_ram_probe(struct platform_device *device)
goto failed;
}
+
bank->disk->major = azfs_major;
bank->disk->first_minor = azfs_minor;
bank->disk->fops = &axon_ram_devops;
@@ -227,6 +237,13 @@ static int axon_ram_probe(struct platform_device *device)
sprintf(bank->disk->disk_name, "%s%d",
AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
+ bank->dax_dev = alloc_dax(bank, bank->disk->disk_name,
+ &axon_ram_dax_ops);
+ if (!bank->dax_dev) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
if (bank->disk->queue == NULL) {
dev_err(&device->dev, "Cannot register disk queue\n");
@@ -278,6 +295,8 @@ failed:
del_gendisk(bank->disk);
put_disk(bank->disk);
}
+ kill_dax(bank->dax_dev);
+ put_dax(bank->dax_dev);
device->dev.platform_data = NULL;
if (bank->io_addr != 0)
iounmap((void __iomem *) bank->io_addr);
@@ -300,6 +319,8 @@ axon_ram_remove(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_ecc);
free_irq(bank->irq_id, device);
+ kill_dax(bank->dax_dev);
+ put_dax(bank->dax_dev);
del_gendisk(bank->disk);
put_disk(bank->disk);
iounmap((void __iomem *) bank->io_addr);
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index d0e9f178a324..76ea32c1b664 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -19,10 +19,9 @@
*/
#include <linux/kernel.h>
-#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <asm/debug.h>
+#include <asm/debugfs.h>
#include <asm/prom.h>
#include <asm/scom.h>
#include <linux/uaccess.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index e7fa26c4ff73..bbc839a98c41 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -138,7 +138,7 @@ static void icp_hv_set_cpu_priority(unsigned char cppr)
#ifdef CONFIG_SMP
-static void icp_hv_cause_ipi(int cpu, unsigned long data)
+static void icp_hv_cause_ipi(int cpu)
{
icp_hv_set_qirr(cpu, IPI_PRIORITY);
}
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 8a6a043e239b..2bfb9968d562 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -143,19 +143,9 @@ static unsigned int icp_native_get_irq(void)
#ifdef CONFIG_SMP
-static void icp_native_cause_ipi(int cpu, unsigned long data)
+static void icp_native_cause_ipi(int cpu)
{
kvmppc_set_host_ipi(cpu, 1);
-#ifdef CONFIG_PPC_DOORBELL
- if (cpu_has_feature(CPU_FTR_DBELL)) {
- if (cpumask_test_cpu(cpu, cpu_sibling_mask(get_cpu()))) {
- doorbell_cause_ipi(cpu, data);
- put_cpu();
- return;
- }
- put_cpu();
- }
-#endif
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
@@ -168,15 +158,15 @@ void icp_native_cause_ipi_rm(int cpu)
* Need the physical address of the XICS to be
* previously saved in kvm_hstate in the paca.
*/
- unsigned long xics_phys;
+ void __iomem *xics_phys;
/*
* Just like the cause_ipi functions, it is required to
- * include a full barrier (out8 includes a sync) before
- * causing the IPI.
+ * include a full barrier before causing the IPI.
*/
xics_phys = paca[cpu].kvm_hstate.xics_phys;
- out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY);
+ mb();
+ __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
}
#endif
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index b53f80f0b4d8..c71d2ea42627 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -126,7 +126,7 @@ static void icp_opal_eoi(struct irq_data *d)
#ifdef CONFIG_SMP
-static void icp_opal_cause_ipi(int cpu, unsigned long data)
+static void icp_opal_cause_ipi(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 23efe4e42172..ffe138b8b9dc 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -143,11 +143,11 @@ static void xics_request_ipi(void)
void __init xics_smp_probe(void)
{
- /* Setup cause_ipi callback based on which ICP is used */
- smp_ops->cause_ipi = icp_ops->cause_ipi;
-
/* Register all the IPIs */
xics_request_ipi();
+
+ /* Setup cause_ipi callback based on which ICP is used */
+ smp_ops->cause_ipi = icp_ops->cause_ipi;
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
new file mode 100644
index 000000000000..12ccd7373d2f
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -0,0 +1,11 @@
+config PPC_XIVE
+ bool
+ default n
+ select PPC_SMP_MUXED_IPI
+ select HARDIRQS_SW_RESEND
+
+config PPC_XIVE_NATIVE
+ bool
+ default n
+ select PPC_XIVE
+ depends on PPC_POWERNV
diff --git a/arch/powerpc/sysdev/xive/Makefile b/arch/powerpc/sysdev/xive/Makefile
new file mode 100644
index 000000000000..3fab303fc169
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/Makefile
@@ -0,0 +1,4 @@
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+obj-y += common.o
+obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
new file mode 100644
index 000000000000..6a98efb14264
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -0,0 +1,1302 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "xive: " fmt
+
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/debugfs.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/xmon.h>
+
+#include "xive-internal.h"
+
+#undef DEBUG_FLUSH
+#undef DEBUG_ALL
+
+#ifdef DEBUG_ALL
+#define DBG_VERBOSE(fmt...) pr_devel(fmt)
+#else
+#define DBG_VERBOSE(fmt...) do { } while(0)
+#endif
+
+bool __xive_enabled;
+bool xive_cmdline_disabled;
+
+/* We use only one priority for now */
+static u8 xive_irq_priority;
+
+/* TIMA */
+void __iomem *xive_tima;
+u32 xive_tima_offset;
+
+/* Backend ops */
+static const struct xive_ops *xive_ops;
+
+/* Our global interrupt domain */
+static struct irq_domain *xive_irq_domain;
+
+#ifdef CONFIG_SMP
+/* The IPIs all use the same logical irq number */
+static u32 xive_ipi_irq;
+#endif
+
+/* Xive state for each CPU */
+static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
+
+/*
+ * A "disabled" interrupt should never fire, to catch problems
+ * we set its logical number to this
+ */
+#define XIVE_BAD_IRQ 0x7fffffff
+#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+
+/* An invalid CPU target */
+#define XIVE_INVALID_TARGET (-1)
+
+/*
+ * Read the next entry in a queue, return its content if it's valid
+ * or 0 if there is no new entry.
+ *
+ * The queue pointer is moved forward unless "just_peek" is set
+ */
+static u32 xive_read_eq(struct xive_q *q, bool just_peek)
+{
+ u32 cur;
+
+ if (!q->qpage)
+ return 0;
+ cur = be32_to_cpup(q->qpage + q->idx);
+
+ /* Check valid bit (31) vs current toggle polarity */
+ if ((cur >> 31) == q->toggle)
+ return 0;
+
+ /* If consuming from the queue ... */
+ if (!just_peek) {
+ /* Next entry */
+ q->idx = (q->idx + 1) & q->msk;
+
+ /* Wrap around: flip valid toggle */
+ if (q->idx == 0)
+ q->toggle ^= 1;
+ }
+ /* Mask out the valid bit (31) */
+ return cur & 0x7fffffff;
+}
+
+/*
+ * Scans all the queue that may have interrupts in them
+ * (based on "pending_prio") in priority order until an
+ * interrupt is found or all the queues are empty.
+ *
+ * Then updates the CPPR (Current Processor Priority
+ * Register) based on the most favored interrupt found
+ * (0xff if none) and return what was found (0 if none).
+ *
+ * If just_peek is set, return the most favored pending
+ * interrupt if any but don't update the queue pointers.
+ *
+ * Note: This function can operate generically on any number
+ * of queues (up to 8). The current implementation of the XIVE
+ * driver only uses a single queue however.
+ *
+ * Note2: This will also "flush" "the pending_count" of a queue
+ * into the "count" when that queue is observed to be empty.
+ * This is used to keep track of the amount of interrupts
+ * targetting a queue. When an interrupt is moved away from
+ * a queue, we only decrement that queue count once the queue
+ * has been observed empty to avoid races.
+ */
+static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
+{
+ u32 irq = 0;
+ u8 prio;
+
+ /* Find highest pending priority */
+ while (xc->pending_prio != 0) {
+ struct xive_q *q;
+
+ prio = ffs(xc->pending_prio) - 1;
+ DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
+
+ /* Try to fetch */
+ irq = xive_read_eq(&xc->queue[prio], just_peek);
+
+ /* Found something ? That's it */
+ if (irq)
+ break;
+
+ /* Clear pending bits */
+ xc->pending_prio &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away. See description of
+ * xive_dec_target_count()
+ */
+ q = &xc->queue[prio];
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
+ WARN_ON(p > atomic_read(&q->count));
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /* If nothing was found, set CPPR to 0xff */
+ if (irq == 0)
+ prio = 0xff;
+
+ /* Update HW CPPR to match if necessary */
+ if (prio != xc->cppr) {
+ DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
+ xc->cppr = prio;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
+ }
+
+ return irq;
+}
+
+/*
+ * This is used to perform the magic loads from an ESB
+ * described in xive.h
+ */
+static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ /* Handle HW errata */
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val = in_be64(xd->eoi_mmio + offset);
+
+ return (u8)val;
+}
+
+#ifdef CONFIG_XMON
+static void xive_dump_eq(const char *name, struct xive_q *q)
+{
+ u32 i0, i1, idx;
+
+ if (!q->qpage)
+ return;
+ idx = q->idx;
+ i0 = be32_to_cpup(q->qpage + idx);
+ idx = (idx + 1) & q->msk;
+ i1 = be32_to_cpup(q->qpage + idx);
+ xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
+ q->toggle, i0, i1);
+}
+
+void xmon_xive_do_dump(int cpu)
+{
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+
+ xmon_printf("XIVE state for CPU %d:\n", cpu);
+ xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
+ xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
+#ifdef CONFIG_SMP
+ {
+ u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET);
+ xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
+ val & XIVE_ESB_VAL_P ? 'P' : 'p',
+ val & XIVE_ESB_VAL_P ? 'Q' : 'q');
+ }
+#endif
+}
+#endif /* CONFIG_XMON */
+
+static unsigned int xive_get_irq(void)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ u32 irq;
+
+ /*
+ * This can be called either as a result of a HW interrupt or
+ * as a "replay" because EOI decided there was still something
+ * in one of the queues.
+ *
+ * First we perform an ACK cycle in order to update our mask
+ * of pending priorities. This will also have the effect of
+ * updating the CPPR to the most favored pending interrupts.
+ *
+ * In the future, if we have a way to differenciate a first
+ * entry (on HW interrupt) from a replay triggered by EOI,
+ * we could skip this on replays unless we soft-mask tells us
+ * that a new HW interrupt occurred.
+ */
+ xive_ops->update_pending(xc);
+
+ DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
+
+ /* Scan our queue(s) for interrupts */
+ irq = xive_scan_interrupts(xc, false);
+
+ DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
+ irq, xc->pending_prio);
+
+ /* Return pending interrupt if any */
+ if (irq == XIVE_BAD_IRQ)
+ return 0;
+ return irq;
+}
+
+/*
+ * After EOI'ing an interrupt, we need to re-check the queue
+ * to see if another interrupt is pending since multiple
+ * interrupts can coalesce into a single notification to the
+ * CPU.
+ *
+ * If we find that there is indeed more in there, we call
+ * force_external_irq_replay() to make Linux synthetize an
+ * external interrupt on the next call to local_irq_restore().
+ */
+static void xive_do_queue_eoi(struct xive_cpu *xc)
+{
+ if (xive_scan_interrupts(xc, true) != 0) {
+ DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
+ force_external_irq_replay();
+ }
+}
+
+/*
+ * EOI an interrupt at the source. There are several methods
+ * to do this depending on the HW version and source type
+ */
+void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ out_be64(xd->eoi_mmio, 0);
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ /*
+ * The FW told us to call it. This happens for some
+ * interrupt sources that need additional HW whacking
+ * beyond the ESB manipulation. For example LPC interrupts
+ * on P9 DD1.0 need a latch to be clared in the LPC bridge
+ * itself. The Firmware will take care of it.
+ */
+ if (WARN_ON_ONCE(!xive_ops->eoi))
+ return;
+ xive_ops->eoi(hw_irq);
+ } else {
+ u8 eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthesizing an interrupt in software
+ *
+ * For LSIs, using the HW EOI cycle works around a problem
+ * on P9 DD1 PHBs where the other ESB accesses don't work
+ * properly.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI)
+ in_be64(xd->eoi_mmio);
+ else {
+ eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
+ DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
+ out_be64(xd->trig_mmio, 0);
+ }
+ }
+}
+
+/* irq_chip eoi callback */
+static void xive_irq_eoi(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+
+ DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
+ d->irq, irqd_to_hwirq(d), xc->pending_prio);
+
+ /* EOI the source if it hasn't been disabled */
+ if (!irqd_irq_disabled(d))
+ xive_do_source_eoi(irqd_to_hwirq(d), xd);
+
+ /*
+ * Clear saved_p to indicate that it's no longer occupying
+ * a queue slot on the target queue
+ */
+ xd->saved_p = false;
+
+ /* Check for more work in the queue */
+ xive_do_queue_eoi(xc);
+}
+
+/*
+ * Helper used to mask and unmask an interrupt source. This
+ * is only called for normal interrupts that do not require
+ * masking/unmasking via firmware.
+ */
+static void xive_do_source_set_mask(struct xive_irq_data *xd,
+ bool mask)
+{
+ u64 val;
+
+ /*
+ * If the interrupt had P set, it may be in a queue.
+ *
+ * We need to make sure we don't re-enable it until it
+ * has been fetched from that queue and EOId. We keep
+ * a copy of that P state and use it to restore the
+ * ESB accordingly on unmask.
+ */
+ if (mask) {
+ val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01);
+ xd->saved_p = !!(val & XIVE_ESB_VAL_P);
+ } else if (xd->saved_p)
+ xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+ else
+ xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
+}
+
+/*
+ * Try to chose "cpu" as a new interrupt target. Increments
+ * the queue accounting for that target if it's not already
+ * full.
+ */
+static bool xive_try_pick_target(int cpu)
+{
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+ struct xive_q *q = &xc->queue[xive_irq_priority];
+ int max;
+
+ /*
+ * Calculate max number of interrupts in that queue.
+ *
+ * We leave a gap of 1 just in case...
+ */
+ max = (q->msk + 1) - 1;
+ return !!atomic_add_unless(&q->count, 1, max);
+}
+
+/*
+ * Un-account an interrupt for a target CPU. We don't directly
+ * decrement q->count since the interrupt might still be present
+ * in the queue.
+ *
+ * Instead increment a separate counter "pending_count" which
+ * will be substracted from "count" later when that CPU observes
+ * the queue to be empty.
+ */
+static void xive_dec_target_count(int cpu)
+{
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+ struct xive_q *q = &xc->queue[xive_irq_priority];
+
+ if (unlikely(WARN_ON(cpu < 0 || !xc))) {
+ pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
+ return;
+ }
+
+ /*
+ * We increment the "pending count" which will be used
+ * to decrement the target queue count whenever it's next
+ * processed and found empty. This ensure that we don't
+ * decrement while we still have the interrupt there
+ * occupying a slot.
+ */
+ atomic_inc(&q->pending_count);
+}
+
+/* Find a tentative CPU target in a CPU mask */
+static int xive_find_target_in_mask(const struct cpumask *mask,
+ unsigned int fuzz)
+{
+ int cpu, first, num, i;
+
+ /* Pick up a starting point CPU in the mask based on fuzz */
+ num = cpumask_weight(mask);
+ first = fuzz % num;
+
+ /* Locate it */
+ cpu = cpumask_first(mask);
+ for (i = 0; i < first && cpu < nr_cpu_ids; i++)
+ cpu = cpumask_next(cpu, mask);
+
+ /* Sanity check */
+ if (WARN_ON(cpu >= nr_cpu_ids))
+ cpu = cpumask_first(cpu_online_mask);
+
+ /* Remember first one to handle wrap-around */
+ first = cpu;
+
+ /*
+ * Now go through the entire mask until we find a valid
+ * target.
+ */
+ for (;;) {
+ /*
+ * We re-check online as the fallback case passes us
+ * an untested affinity mask
+ */
+ if (cpu_online(cpu) && xive_try_pick_target(cpu))
+ return cpu;
+ cpu = cpumask_next(cpu, mask);
+ if (cpu == first)
+ break;
+ /* Wrap around */
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(mask);
+ }
+ return -1;
+}
+
+/*
+ * Pick a target CPU for an interrupt. This is done at
+ * startup or if the affinity is changed in a way that
+ * invalidates the current target.
+ */
+static int xive_pick_irq_target(struct irq_data *d,
+ const struct cpumask *affinity)
+{
+ static unsigned int fuzz;
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ cpumask_var_t mask;
+ int cpu = -1;
+
+ /*
+ * If we have chip IDs, first we try to build a mask of
+ * CPUs matching the CPU and find a target in there
+ */
+ if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
+ zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
+ /* Build a mask of matching chip IDs */
+ for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+ if (xc->chip_id == xd->src_chip)
+ cpumask_set_cpu(cpu, mask);
+ }
+ /* Try to find a target */
+ if (cpumask_empty(mask))
+ cpu = -1;
+ else
+ cpu = xive_find_target_in_mask(mask, fuzz++);
+ free_cpumask_var(mask);
+ if (cpu >= 0)
+ return cpu;
+ fuzz--;
+ }
+
+ /* No chip IDs, fallback to using the affinity mask */
+ return xive_find_target_in_mask(affinity, fuzz++);
+}
+
+static unsigned int xive_irq_startup(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int target, rc;
+
+ pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
+ d->irq, hw_irq, d);
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_data_get_msi_desc(d))
+ pci_msi_unmask_irq(d);
+#endif
+
+ /* Pick a target */
+ target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
+ if (target == XIVE_INVALID_TARGET) {
+ /* Try again breaking affinity */
+ target = xive_pick_irq_target(d, cpu_online_mask);
+ if (target == XIVE_INVALID_TARGET)
+ return -ENXIO;
+ pr_warn("irq %d started with broken affinity\n", d->irq);
+ }
+
+ /* Sanity check */
+ if (WARN_ON(target == XIVE_INVALID_TARGET ||
+ target >= nr_cpu_ids))
+ target = smp_processor_id();
+
+ xd->target = target;
+
+ /*
+ * Configure the logical number to be the Linux IRQ number
+ * and set the target queue
+ */
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
+ if (rc)
+ return rc;
+
+ /* Unmask the ESB */
+ xive_do_source_set_mask(xd, false);
+
+ return 0;
+}
+
+static void xive_irq_shutdown(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
+ d->irq, hw_irq, d);
+
+ if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
+ return;
+
+ /* Mask the interrupt at the source */
+ xive_do_source_set_mask(xd, true);
+
+ /*
+ * The above may have set saved_p. We clear it otherwise it
+ * will prevent re-enabling later on. It is ok to forget the
+ * fact that the interrupt might be in a queue because we are
+ * accounting that already in xive_dec_target_count() and will
+ * be re-routing it to a new queue with proper accounting when
+ * it's started up again
+ */
+ xd->saved_p = false;
+
+ /*
+ * Mask the interrupt in HW in the IVT/EAS and set the number
+ * to be the "bad" IRQ number
+ */
+ xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ 0xff, XIVE_BAD_IRQ);
+
+ xive_dec_target_count(xd->target);
+ xd->target = XIVE_INVALID_TARGET;
+}
+
+static void xive_irq_unmask(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+
+ pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
+
+ /*
+ * This is a workaround for PCI LSI problems on P9, for
+ * these, we call FW to set the mask. The problems might
+ * be fixed by P9 DD2.0, if that is the case, firmware
+ * will no longer set that flag.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ xive_irq_priority, d->irq);
+ return;
+ }
+
+ xive_do_source_set_mask(xd, false);
+}
+
+static void xive_irq_mask(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+
+ pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
+
+ /*
+ * This is a workaround for PCI LSI problems on P9, for
+ * these, we call OPAL to set the mask. The problems might
+ * be fixed by P9 DD2.0, if that is the case, firmware
+ * will no longer set that flag.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ 0xff, d->irq);
+ return;
+ }
+
+ xive_do_source_set_mask(xd, true);
+}
+
+static int xive_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ u32 target, old_target;
+ int rc = 0;
+
+ pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
+
+ /* Is this valid ? */
+ if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
+ return -EINVAL;
+
+ /*
+ * If existing target is already in the new mask, and is
+ * online then do nothing.
+ */
+ if (xd->target != XIVE_INVALID_TARGET &&
+ cpu_online(xd->target) &&
+ cpumask_test_cpu(xd->target, cpumask))
+ return IRQ_SET_MASK_OK;
+
+ /* Pick a new target */
+ target = xive_pick_irq_target(d, cpumask);
+
+ /* No target found */
+ if (target == XIVE_INVALID_TARGET)
+ return -ENXIO;
+
+ /* Sanity check */
+ if (WARN_ON(target >= nr_cpu_ids))
+ target = smp_processor_id();
+
+ old_target = xd->target;
+
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
+ if (rc < 0) {
+ pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
+ return rc;
+ }
+
+ pr_devel(" target: 0x%x\n", target);
+ xd->target = target;
+
+ /* Give up previous target */
+ if (old_target != XIVE_INVALID_TARGET)
+ xive_dec_target_count(old_target);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+
+ /*
+ * We only support these. This has really no effect other than setting
+ * the corresponding descriptor bits mind you but those will in turn
+ * affect the resend function when re-enabling an edge interrupt.
+ *
+ * Set set the default to edge as explained in map().
+ */
+ if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_EDGE_RISING;
+
+ if (flow_type != IRQ_TYPE_EDGE_RISING &&
+ flow_type != IRQ_TYPE_LEVEL_LOW)
+ return -EINVAL;
+
+ irqd_set_trigger_type(d, flow_type);
+
+ /*
+ * Double check it matches what the FW thinks
+ *
+ * NOTE: We don't know yet if the PAPR interface will provide
+ * the LSI vs MSI information apart from the device-tree so
+ * this check might have to move into an optional backend call
+ * that is specific to the native backend
+ */
+ if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
+ !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
+ pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
+ d->irq, (u32)irqd_to_hwirq(d),
+ (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
+ (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
+ }
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int xive_irq_retrigger(struct irq_data *d)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+
+ /* This should be only for MSIs */
+ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
+ return 0;
+
+ /*
+ * To perform a retrigger, we first set the PQ bits to
+ * 11, then perform an EOI.
+ */
+ xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+
+ /*
+ * Note: We pass "0" to the hw_irq argument in order to
+ * avoid calling into the backend EOI code which we don't
+ * want to do in the case of a re-trigger. Backends typically
+ * only do EOI for LSIs anyway.
+ */
+ xive_do_source_eoi(0, xd);
+
+ return 1;
+}
+
+static struct irq_chip xive_irq_chip = {
+ .name = "XIVE-IRQ",
+ .irq_startup = xive_irq_startup,
+ .irq_shutdown = xive_irq_shutdown,
+ .irq_eoi = xive_irq_eoi,
+ .irq_mask = xive_irq_mask,
+ .irq_unmask = xive_irq_unmask,
+ .irq_set_affinity = xive_irq_set_affinity,
+ .irq_set_type = xive_irq_set_type,
+ .irq_retrigger = xive_irq_retrigger,
+};
+
+bool is_xive_irq(struct irq_chip *chip)
+{
+ return chip == &xive_irq_chip;
+}
+
+void xive_cleanup_irq_data(struct xive_irq_data *xd)
+{
+ if (xd->eoi_mmio) {
+ iounmap(xd->eoi_mmio);
+ if (xd->eoi_mmio == xd->trig_mmio)
+ xd->trig_mmio = NULL;
+ xd->eoi_mmio = NULL;
+ }
+ if (xd->trig_mmio) {
+ iounmap(xd->trig_mmio);
+ xd->trig_mmio = NULL;
+ }
+}
+
+static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
+{
+ struct xive_irq_data *xd;
+ int rc;
+
+ xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
+ if (!xd)
+ return -ENOMEM;
+ rc = xive_ops->populate_irq_data(hw, xd);
+ if (rc) {
+ kfree(xd);
+ return rc;
+ }
+ xd->target = XIVE_INVALID_TARGET;
+ irq_set_handler_data(virq, xd);
+
+ return 0;
+}
+
+static void xive_irq_free_data(unsigned int virq)
+{
+ struct xive_irq_data *xd = irq_get_handler_data(virq);
+
+ if (!xd)
+ return;
+ irq_set_handler_data(virq, NULL);
+ xive_cleanup_irq_data(xd);
+ kfree(xd);
+}
+
+#ifdef CONFIG_SMP
+
+static void xive_cause_ipi(int cpu)
+{
+ struct xive_cpu *xc;
+ struct xive_irq_data *xd;
+
+ xc = per_cpu(xive_cpu, cpu);
+
+ DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
+ smp_processor_id(), cpu, xc->hw_ipi);
+
+ xd = &xc->ipi_data;
+ if (WARN_ON(!xd->trig_mmio))
+ return;
+ out_be64(xd->trig_mmio, 0);
+}
+
+static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
+{
+ return smp_ipi_demux();
+}
+
+static void xive_ipi_eoi(struct irq_data *d)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+
+ /* Handle possible race with unplug and drop stale IPIs */
+ if (!xc)
+ return;
+ xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
+ xive_do_queue_eoi(xc);
+}
+
+static void xive_ipi_do_nothing(struct irq_data *d)
+{
+ /*
+ * Nothing to do, we never mask/unmask IPIs, but the callback
+ * has to exist for the struct irq_chip.
+ */
+}
+
+static struct irq_chip xive_ipi_chip = {
+ .name = "XIVE-IPI",
+ .irq_eoi = xive_ipi_eoi,
+ .irq_mask = xive_ipi_do_nothing,
+ .irq_unmask = xive_ipi_do_nothing,
+};
+
+static void __init xive_request_ipi(void)
+{
+ unsigned int virq;
+
+ /*
+ * Initialization failed, move on, we might manage to
+ * reach the point where we display our errors before
+ * the system falls appart
+ */
+ if (!xive_irq_domain)
+ return;
+
+ /* Initialize it */
+ virq = irq_create_mapping(xive_irq_domain, 0);
+ xive_ipi_irq = virq;
+
+ WARN_ON(request_irq(virq, xive_muxed_ipi_action,
+ IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
+}
+
+static int xive_setup_cpu_ipi(unsigned int cpu)
+{
+ struct xive_cpu *xc;
+ int rc;
+
+ pr_debug("Setting up IPI for CPU %d\n", cpu);
+
+ xc = per_cpu(xive_cpu, cpu);
+
+ /* Check if we are already setup */
+ if (xc->hw_ipi != 0)
+ return 0;
+
+ /* Grab an IPI from the backend, this will populate xc->hw_ipi */
+ if (xive_ops->get_ipi(cpu, xc))
+ return -EIO;
+
+ /*
+ * Populate the IRQ data in the xive_cpu structure and
+ * configure the HW / enable the IPIs.
+ */
+ rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
+ if (rc) {
+ pr_err("Failed to populate IPI data on CPU %d\n", cpu);
+ return -EIO;
+ }
+ rc = xive_ops->configure_irq(xc->hw_ipi,
+ get_hard_smp_processor_id(cpu),
+ xive_irq_priority, xive_ipi_irq);
+ if (rc) {
+ pr_err("Failed to map IPI CPU %d\n", cpu);
+ return -EIO;
+ }
+ pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
+ xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
+
+ /* Unmask it */
+ xive_do_source_set_mask(&xc->ipi_data, false);
+
+ return 0;
+}
+
+static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
+{
+ /* Disable the IPI and free the IRQ data */
+
+ /* Already cleaned up ? */
+ if (xc->hw_ipi == 0)
+ return;
+
+ /* Mask the IPI */
+ xive_do_source_set_mask(&xc->ipi_data, true);
+
+ /*
+ * Note: We don't call xive_cleanup_irq_data() to free
+ * the mappings as this is called from an IPI on kexec
+ * which is not a safe environment to call iounmap()
+ */
+
+ /* Deconfigure/mask in the backend */
+ xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
+ 0xff, xive_ipi_irq);
+
+ /* Free the IPIs in the backend */
+ xive_ops->put_ipi(cpu, xc);
+}
+
+void __init xive_smp_probe(void)
+{
+ smp_ops->cause_ipi = xive_cause_ipi;
+
+ /* Register the IPI */
+ xive_request_ipi();
+
+ /* Allocate and setup IPI for the boot CPU */
+ xive_setup_cpu_ipi(smp_processor_id());
+}
+
+#endif /* CONFIG_SMP */
+
+static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ int rc;
+
+ /*
+ * Mark interrupts as edge sensitive by default so that resend
+ * actually works. Will fix that up below if needed.
+ */
+ irq_clear_status_flags(virq, IRQ_LEVEL);
+
+#ifdef CONFIG_SMP
+ /* IPIs are special and come up with HW number 0 */
+ if (hw == 0) {
+ /*
+ * IPIs are marked per-cpu. We use separate HW interrupts under
+ * the hood but associated with the same "linux" interrupt
+ */
+ irq_set_chip_and_handler(virq, &xive_ipi_chip,
+ handle_percpu_irq);
+ return 0;
+ }
+#endif
+
+ rc = xive_irq_alloc_data(virq, hw);
+ if (rc)
+ return rc;
+
+ irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
+
+ return 0;
+}
+
+static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
+{
+ struct irq_data *data = irq_get_irq_data(virq);
+ unsigned int hw_irq;
+
+ /* XXX Assign BAD number */
+ if (!data)
+ return;
+ hw_irq = (unsigned int)irqd_to_hwirq(data);
+ if (hw_irq)
+ xive_irq_free_data(virq);
+}
+
+static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+
+{
+ *out_hwirq = intspec[0];
+
+ /*
+ * If intsize is at least 2, we look for the type in the second cell,
+ * we assume the LSB indicates a level interrupt.
+ */
+ if (intsize > 1) {
+ if (intspec[1] & 1)
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+ else
+ *out_flags = IRQ_TYPE_EDGE_RISING;
+ } else
+ *out_flags = IRQ_TYPE_LEVEL_LOW;
+
+ return 0;
+}
+
+static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ return xive_ops->match(node);
+}
+
+static const struct irq_domain_ops xive_irq_domain_ops = {
+ .match = xive_irq_domain_match,
+ .map = xive_irq_domain_map,
+ .unmap = xive_irq_domain_unmap,
+ .xlate = xive_irq_domain_xlate,
+};
+
+static void __init xive_init_host(void)
+{
+ xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
+ &xive_irq_domain_ops, NULL);
+ if (WARN_ON(xive_irq_domain == NULL))
+ return;
+ irq_set_default_host(xive_irq_domain);
+}
+
+static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
+{
+ if (xc->queue[xive_irq_priority].qpage)
+ xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
+}
+
+static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
+{
+ int rc = 0;
+
+ /* We setup 1 queues for now with a 64k page */
+ if (!xc->queue[xive_irq_priority].qpage)
+ rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
+
+ return rc;
+}
+
+static int xive_prepare_cpu(unsigned int cpu)
+{
+ struct xive_cpu *xc;
+
+ xc = per_cpu(xive_cpu, cpu);
+ if (!xc) {
+ struct device_node *np;
+
+ xc = kzalloc_node(sizeof(struct xive_cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!xc)
+ return -ENOMEM;
+ np = of_get_cpu_node(cpu, NULL);
+ if (np)
+ xc->chip_id = of_get_ibm_chip_id(np);
+ of_node_put(np);
+
+ per_cpu(xive_cpu, cpu) = xc;
+ }
+
+ /* Setup EQs if not already */
+ return xive_setup_cpu_queues(cpu, xc);
+}
+
+static void xive_setup_cpu(void)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+
+ /* Debug: Dump the TM state */
+ pr_devel("CPU %d [HW 0x%02x] VT=%02x\n",
+ smp_processor_id(), hard_smp_processor_id(),
+ in_8(xive_tima + xive_tima_offset + TM_WORD2));
+
+ /* The backend might have additional things to do */
+ if (xive_ops->setup_cpu)
+ xive_ops->setup_cpu(smp_processor_id(), xc);
+
+ /* Set CPPR to 0xff to enable flow of interrupts */
+ xc->cppr = 0xff;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
+}
+
+#ifdef CONFIG_SMP
+void xive_smp_setup_cpu(void)
+{
+ pr_devel("SMP setup CPU %d\n", smp_processor_id());
+
+ /* This will have already been done on the boot CPU */
+ if (smp_processor_id() != boot_cpuid)
+ xive_setup_cpu();
+
+}
+
+int xive_smp_prepare_cpu(unsigned int cpu)
+{
+ int rc;
+
+ /* Allocate per-CPU data and queues */
+ rc = xive_prepare_cpu(cpu);
+ if (rc)
+ return rc;
+
+ /* Allocate and setup IPI for the new CPU */
+ return xive_setup_cpu_ipi(cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
+{
+ u32 irq;
+
+ /* We assume local irqs are disabled */
+ WARN_ON(!irqs_disabled());
+
+ /* Check what's already in the CPU queue */
+ while ((irq = xive_scan_interrupts(xc, false)) != 0) {
+ /*
+ * We need to re-route that interrupt to its new destination.
+ * First get and lock the descriptor
+ */
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ struct xive_irq_data *xd;
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+
+ /*
+ * Ignore anything that isn't a XIVE irq and ignore
+ * IPIs, so can just be dropped.
+ */
+ if (d->domain != xive_irq_domain || hw_irq == 0)
+ continue;
+
+ /*
+ * The IRQ should have already been re-routed, it's just a
+ * stale in the old queue, so re-trigger it in order to make
+ * it reach is new destination.
+ */
+#ifdef DEBUG_FLUSH
+ pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
+ cpu, irq);
+#endif
+ raw_spin_lock(&desc->lock);
+ xd = irq_desc_get_handler_data(desc);
+
+ /*
+ * For LSIs, we EOI, this will cause a resend if it's
+ * still asserted. Otherwise do an MSI retrigger.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI)
+ xive_do_source_eoi(irqd_to_hwirq(d), xd);
+ else
+ xive_irq_retrigger(d);
+
+ raw_spin_unlock(&desc->lock);
+ }
+}
+
+void xive_smp_disable_cpu(void)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ unsigned int cpu = smp_processor_id();
+
+ /* Migrate interrupts away from the CPU */
+ irq_migrate_all_off_this_cpu();
+
+ /* Set CPPR to 0 to disable flow of interrupts */
+ xc->cppr = 0;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
+
+ /* Flush everything still in the queue */
+ xive_flush_cpu_queue(cpu, xc);
+
+ /* Re-enable CPPR */
+ xc->cppr = 0xff;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
+}
+
+void xive_flush_interrupt(void)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ unsigned int cpu = smp_processor_id();
+
+ /* Called if an interrupt occurs while the CPU is hot unplugged */
+ xive_flush_cpu_queue(cpu, xc);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* CONFIG_SMP */
+
+void xive_kexec_teardown_cpu(int secondary)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ unsigned int cpu = smp_processor_id();
+
+ /* Set CPPR to 0 to disable flow of interrupts */
+ xc->cppr = 0;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
+
+ /* Backend cleanup if any */
+ if (xive_ops->teardown_cpu)
+ xive_ops->teardown_cpu(cpu, xc);
+
+#ifdef CONFIG_SMP
+ /* Get rid of IPI */
+ xive_cleanup_cpu_ipi(cpu, xc);
+#endif
+
+ /* Disable and free the queues */
+ xive_cleanup_cpu_queues(cpu, xc);
+}
+
+void xive_shutdown(void)
+{
+ xive_ops->shutdown();
+}
+
+bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
+ u8 max_prio)
+{
+ xive_tima = area;
+ xive_tima_offset = offset;
+ xive_ops = ops;
+ xive_irq_priority = max_prio;
+
+ ppc_md.get_irq = xive_get_irq;
+ __xive_enabled = true;
+
+ pr_devel("Initializing host..\n");
+ xive_init_host();
+
+ pr_devel("Initializing boot CPU..\n");
+
+ /* Allocate per-CPU data and queues */
+ xive_prepare_cpu(smp_processor_id());
+
+ /* Get ready for interrupts */
+ xive_setup_cpu();
+
+ pr_info("Interrupt handling intialized with %s backend\n",
+ xive_ops->name);
+ pr_info("Using priority %d for all interrupts\n", max_prio);
+
+ return true;
+}
+
+static int __init xive_off(char *arg)
+{
+ xive_cmdline_disabled = true;
+ return 0;
+}
+__setup("xive=off", xive_off);
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
new file mode 100644
index 000000000000..1a726229a427
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "xive: " fmt
+
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/debugfs.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/cpumask.h>
+#include <linux/mm.h>
+
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/opal.h>
+
+#include "xive-internal.h"
+
+
+static u32 xive_provision_size;
+static u32 *xive_provision_chips;
+static u32 xive_provision_chip_count;
+static u32 xive_queue_shift;
+static u32 xive_pool_vps = XIVE_INVALID_VP;
+static struct kmem_cache *xive_provision_cache;
+
+int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
+{
+ __be64 flags, eoi_page, trig_page;
+ __be32 esb_shift, src_chip;
+ u64 opal_flags;
+ s64 rc;
+
+ memset(data, 0, sizeof(*data));
+
+ rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
+ &esb_shift, &src_chip);
+ if (rc) {
+ pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
+ hw_irq, rc);
+ return -EINVAL;
+ }
+
+ opal_flags = be64_to_cpu(flags);
+ if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (opal_flags & OPAL_XIVE_IRQ_LSI)
+ data->flags |= XIVE_IRQ_FLAG_LSI;
+ if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
+ data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
+ if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
+ data->flags |= XIVE_IRQ_FLAG_MASK_FW;
+ if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
+ data->flags |= XIVE_IRQ_FLAG_EOI_FW;
+ data->eoi_page = be64_to_cpu(eoi_page);
+ data->trig_page = be64_to_cpu(trig_page);
+ data->esb_shift = be32_to_cpu(esb_shift);
+ data->src_chip = be32_to_cpu(src_chip);
+
+ data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
+ if (!data->eoi_mmio) {
+ pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+
+ if (!data->trig_page)
+ return 0;
+ if (data->trig_page == data->eoi_page) {
+ data->trig_mmio = data->eoi_mmio;
+ return 0;
+ }
+
+ data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
+ if (!data->trig_mmio) {
+ pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc == 0 ? 0 : -ENXIO;
+}
+
+/* This can be called multiple time to change a queue configuration */
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+ __be32 *qpage, u32 order, bool can_escalate)
+{
+ s64 rc = 0;
+ __be64 qeoi_page_be;
+ __be32 esc_irq_be;
+ u64 flags, qpage_phys;
+
+ /* If there's an actual queue page, clean it */
+ if (order) {
+ if (WARN_ON(!qpage))
+ return -EINVAL;
+ qpage_phys = __pa(qpage);
+ } else
+ qpage_phys = 0;
+
+ /* Initialize the rest of the fields */
+ q->msk = order ? ((1u << (order - 2)) - 1) : 0;
+ q->idx = 0;
+ q->toggle = 0;
+
+ rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
+ &qeoi_page_be,
+ &esc_irq_be,
+ NULL);
+ if (rc) {
+ pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ rc = -EIO;
+ goto fail;
+ }
+ q->eoi_phys = be64_to_cpu(qeoi_page_be);
+
+ /* Default flags */
+ flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
+
+ /* Escalation needed ? */
+ if (can_escalate) {
+ q->esc_irq = be32_to_cpu(esc_irq_be);
+ flags |= OPAL_XIVE_EQ_ESCALATE;
+ }
+
+ /* Configure and enable the queue in HW */
+ for (;;) {
+ rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ if (rc) {
+ pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ rc = -EIO;
+ } else {
+ /*
+ * KVM code requires all of the above to be visible before
+ * q->qpage is set due to how it manages IPI EOIs
+ */
+ wmb();
+ q->qpage = qpage;
+ }
+fail:
+ return rc;
+}
+
+static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
+{
+ s64 rc;
+
+ /* Disable the queue in HW */
+ for (;;) {
+ rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ if (rc)
+ pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
+}
+
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
+{
+ __xive_native_disable_queue(vp_id, q, prio);
+}
+
+static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
+{
+ struct xive_q *q = &xc->queue[prio];
+ unsigned int alloc_order;
+ struct page *pages;
+ __be32 *qpage;
+
+ alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
+ (xive_queue_shift - PAGE_SHIFT) : 0;
+ pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
+ if (!pages)
+ return -ENOMEM;
+ qpage = (__be32 *)page_address(pages);
+ memset(qpage, 0, 1 << xive_queue_shift);
+ return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
+ q, prio, qpage, xive_queue_shift, false);
+}
+
+static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
+{
+ struct xive_q *q = &xc->queue[prio];
+ unsigned int alloc_order;
+
+ /*
+ * We use the variant with no iounmap as this is called on exec
+ * from an IPI and iounmap isn't safe
+ */
+ __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
+ alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
+ (xive_queue_shift - PAGE_SHIFT) : 0;
+ free_pages((unsigned long)q->qpage, alloc_order);
+ q->qpage = NULL;
+}
+
+static bool xive_native_match(struct device_node *node)
+{
+ return of_device_is_compatible(node, "ibm,opal-xive-vc");
+}
+
+#ifdef CONFIG_SMP
+static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
+{
+ struct device_node *np;
+ unsigned int chip_id;
+ s64 irq;
+
+ /* Find the chip ID */
+ np = of_get_cpu_node(cpu, NULL);
+ if (np) {
+ if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0)
+ chip_id = 0;
+ }
+
+ /* Allocate an IPI and populate info about it */
+ for (;;) {
+ irq = opal_xive_allocate_irq(chip_id);
+ if (irq == OPAL_BUSY) {
+ msleep(1);
+ continue;
+ }
+ if (irq < 0) {
+ pr_err("Failed to allocate IPI on CPU %d\n", cpu);
+ return -ENXIO;
+ }
+ xc->hw_ipi = irq;
+ break;
+ }
+ return 0;
+}
+
+u32 xive_native_alloc_irq(void)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ if (rc < 0)
+ return 0;
+ return rc;
+}
+
+void xive_native_free_irq(u32 irq)
+{
+ for (;;) {
+ s64 rc = opal_xive_free_irq(irq);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+}
+
+static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
+{
+ s64 rc;
+
+ /* Free the IPI */
+ if (!xc->hw_ipi)
+ return;
+ for (;;) {
+ rc = opal_xive_free_irq(xc->hw_ipi);
+ if (rc == OPAL_BUSY) {
+ msleep(1);
+ continue;
+ }
+ xc->hw_ipi = 0;
+ break;
+ }
+}
+#endif /* CONFIG_SMP */
+
+static void xive_native_shutdown(void)
+{
+ /* Switch the XIVE to emulation mode */
+ opal_xive_reset(OPAL_XIVE_MODE_EMU);
+}
+
+/*
+ * Perform an "ack" cycle on the current thread, thus
+ * grabbing the pending active priorities and updating
+ * the CPPR to the most favored one.
+ */
+static void xive_native_update_pending(struct xive_cpu *xc)
+{
+ u8 he, cppr;
+ u16 ack;
+
+ /* Perform the acknowledge hypervisor to register cycle */
+ ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /*
+ * Grab the CPPR and the "HE" field which indicates the source
+ * of the hypervisor interrupt (if any)
+ */
+ cppr = ack & 0xff;
+ he = GETFIELD(TM_QW3_NSR_HE, (ack >> 8));
+ switch(he) {
+ case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
+ break;
+ case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
+ if (cppr == 0xff)
+ return;
+ /* Mark the priority pending */
+ xc->pending_prio |= 1 << cppr;
+
+ /*
+ * A new interrupt should never have a CPPR less favored
+ * than our current one.
+ */
+ if (cppr >= xc->cppr)
+ pr_err("CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->cppr);
+
+ /* Update our idea of what the CPPR is */
+ xc->cppr = cppr;
+ break;
+ case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
+ case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
+ pr_err("CPU %d got unexpected interrupt type HE=%d\n",
+ smp_processor_id(), he);
+ return;
+ }
+}
+
+static void xive_native_eoi(u32 hw_irq)
+{
+ /*
+ * Not normally used except if specific interrupts need
+ * a workaround on EOI.
+ */
+ opal_int_eoi(hw_irq);
+}
+
+static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
+{
+ s64 rc;
+ u32 vp;
+ __be64 vp_cam_be;
+ u64 vp_cam;
+
+ if (xive_pool_vps == XIVE_INVALID_VP)
+ return;
+
+ /* Enable the pool VP */
+ vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ if (rc) {
+ pr_err("Failed to enable pool VP on CPU %d\n", cpu);
+ return;
+ }
+
+ /* Grab it's CAM value */
+ rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
+ if (rc) {
+ pr_err("Failed to get pool VP info CPU %d\n", cpu);
+ return;
+ }
+ vp_cam = be64_to_cpu(vp_cam_be);
+
+ pr_debug("VP CAM = %llx\n", vp_cam);
+
+ /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
+ pr_debug("(Old HW value: %08x)\n",
+ in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
+ out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
+ out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
+ TM_QW2W2_VP | vp_cam);
+ pr_debug("(New HW value: %08x)\n",
+ in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
+}
+
+static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
+{
+ s64 rc;
+ u32 vp;
+
+ if (xive_pool_vps == XIVE_INVALID_VP)
+ return;
+
+ /* Pull the pool VP from the CPU */
+ in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
+
+ /* Disable it */
+ vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+}
+
+static void xive_native_sync_source(u32 hw_irq)
+{
+ opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
+}
+
+static const struct xive_ops xive_native_ops = {
+ .populate_irq_data = xive_native_populate_irq_data,
+ .configure_irq = xive_native_configure_irq,
+ .setup_queue = xive_native_setup_queue,
+ .cleanup_queue = xive_native_cleanup_queue,
+ .match = xive_native_match,
+ .shutdown = xive_native_shutdown,
+ .update_pending = xive_native_update_pending,
+ .eoi = xive_native_eoi,
+ .setup_cpu = xive_native_setup_cpu,
+ .teardown_cpu = xive_native_teardown_cpu,
+ .sync_source = xive_native_sync_source,
+#ifdef CONFIG_SMP
+ .get_ipi = xive_native_get_ipi,
+ .put_ipi = xive_native_put_ipi,
+#endif /* CONFIG_SMP */
+ .name = "native",
+};
+
+static bool xive_parse_provisioning(struct device_node *np)
+{
+ int rc;
+
+ if (of_property_read_u32(np, "ibm,xive-provision-page-size",
+ &xive_provision_size) < 0)
+ return true;
+ rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
+ if (rc < 0) {
+ pr_err("Error %d getting provision chips array\n", rc);
+ return false;
+ }
+ xive_provision_chip_count = rc;
+ if (rc == 0)
+ return true;
+
+ xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
+ GFP_KERNEL);
+ if (WARN_ON(!xive_provision_chips))
+ return false;
+
+ rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
+ xive_provision_chips,
+ xive_provision_chip_count);
+ if (rc < 0) {
+ pr_err("Error %d reading provision chips array\n", rc);
+ return false;
+ }
+
+ xive_provision_cache = kmem_cache_create("xive-provision",
+ xive_provision_size,
+ xive_provision_size,
+ 0, NULL);
+ if (!xive_provision_cache) {
+ pr_err("Failed to allocate provision cache\n");
+ return false;
+ }
+ return true;
+}
+
+u32 xive_native_default_eq_shift(void)
+{
+ return xive_queue_shift;
+}
+
+bool xive_native_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ void __iomem *tima;
+ struct property *prop;
+ u8 max_prio = 7;
+ const __be32 *p;
+ u32 val;
+ s64 rc;
+
+ if (xive_cmdline_disabled)
+ return false;
+
+ pr_devel("xive_native_init()\n");
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
+ if (!np) {
+ pr_devel("not found !\n");
+ return false;
+ }
+ pr_devel("Found %s\n", np->full_name);
+
+ /* Resource 1 is HV window */
+ if (of_address_to_resource(np, 1, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ return false;
+ }
+ tima = ioremap(r.start, resource_size(&r));
+ if (!tima) {
+ pr_err("Failed to map thread mgmnt area\n");
+ return false;
+ }
+
+ /* Read number of priorities */
+ if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
+ max_prio = val - 1;
+
+ /* Iterate the EQ sizes and pick one */
+ of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
+ xive_queue_shift = val;
+ if (val == PAGE_SHIFT)
+ break;
+ }
+
+ /* Grab size of provisioning pages */
+ xive_parse_provisioning(np);
+
+ /* Switch the XIVE to exploitation mode */
+ rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
+ if (rc) {
+ pr_err("Switch to exploitation mode failed with error %lld\n", rc);
+ return false;
+ }
+
+ /* Initialize XIVE core with our backend */
+ if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
+ max_prio)) {
+ opal_xive_reset(OPAL_XIVE_MODE_EMU);
+ return false;
+ }
+ pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
+ return true;
+}
+
+static bool xive_native_provision_pages(void)
+{
+ u32 i;
+ void *p;
+
+ for (i = 0; i < xive_provision_chip_count; i++) {
+ u32 chip = xive_provision_chips[i];
+
+ /*
+ * XXX TODO: Try to make the allocation local to the node where
+ * the chip resides.
+ */
+ p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
+ if (!p) {
+ pr_err("Failed to allocate provisioning page\n");
+ return false;
+ }
+ opal_xive_donate_page(chip, __pa(p));
+ }
+ return true;
+}
+
+u32 xive_native_alloc_vp_block(u32 max_vcpus)
+{
+ s64 rc;
+ u32 order;
+
+ order = fls(max_vcpus) - 1;
+ if (max_vcpus > (1 << order))
+ order++;
+
+ pr_info("VP block alloc, for max VCPUs %d use order %d\n",
+ max_vcpus, order);
+
+ for (;;) {
+ rc = opal_xive_alloc_vp_block(order);
+ switch (rc) {
+ case OPAL_BUSY:
+ msleep(1);
+ break;
+ case OPAL_XIVE_PROVISIONING:
+ if (!xive_native_provision_pages())
+ return XIVE_INVALID_VP;
+ break;
+ default:
+ if (rc < 0) {
+ pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
+ order, rc);
+ return XIVE_INVALID_VP;
+ }
+ return rc;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
+
+void xive_native_free_vp_block(u32 vp_base)
+{
+ s64 rc;
+
+ if (vp_base == XIVE_INVALID_VP)
+ return;
+
+ rc = opal_xive_free_vp_block(vp_base);
+ if (rc < 0)
+ pr_warn("OPAL error %lld freeing VP block\n", rc);
+}
+EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
new file mode 100644
index 000000000000..d07ef2d29caf
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __XIVE_INTERNAL_H
+#define __XIVE_INTERNAL_H
+
+/* Each CPU carry one of these with various per-CPU state */
+struct xive_cpu {
+#ifdef CONFIG_SMP
+ /* HW irq number and data of IPI */
+ u32 hw_ipi;
+ struct xive_irq_data ipi_data;
+#endif /* CONFIG_SMP */
+
+ int chip_id;
+
+ /* Queue datas. Only one is populated */
+#define XIVE_MAX_QUEUES 8
+ struct xive_q queue[XIVE_MAX_QUEUES];
+
+ /*
+ * Pending mask. Each bit corresponds to a priority that
+ * potentially has pending interrupts.
+ */
+ u8 pending_prio;
+
+ /* Cache of HW CPPR */
+ u8 cppr;
+};
+
+/* Backend ops */
+struct xive_ops {
+ int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
+ int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+ int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
+ bool (*match)(struct device_node *np);
+ void (*shutdown)(void);
+
+ void (*update_pending)(struct xive_cpu *xc);
+ void (*eoi)(u32 hw_irq);
+ void (*sync_source)(u32 hw_irq);
+#ifdef CONFIG_SMP
+ int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
+ void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
+#endif
+ const char *name;
+};
+
+bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
+ u8 max_prio);
+
+extern bool xive_cmdline_disabled;
+
+#endif /* __XIVE_INTERNAL_H */
diff --git a/arch/powerpc/scripts/gcc-check-mprofile-kernel.sh b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
index c658d8cf760b..c658d8cf760b 100755
--- a/arch/powerpc/scripts/gcc-check-mprofile-kernel.sh
+++ b/arch/powerpc/tools/gcc-check-mprofile-kernel.sh
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index ec2d5c835170..ec2d5c835170 100755
--- a/arch/powerpc/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 16321ad9e70c..f11f65634aab 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -29,7 +29,9 @@
#include <linux/nmi.h>
#include <linux/ctype.h>
+#include <asm/debugfs.h>
#include <asm/ptrace.h>
+#include <asm/smp.h>
#include <asm/string.h>
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -48,7 +50,7 @@
#include <asm/reg.h>
#include <asm/debug.h>
#include <asm/hw_breakpoint.h>
-
+#include <asm/xive.h>
#include <asm/opal.h>
#include <asm/firmware.h>
@@ -76,6 +78,7 @@ static int xmon_gate;
#endif /* CONFIG_SMP */
static unsigned long in_xmon __read_mostly = 0;
+static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
static unsigned long adrs;
static int size = 1;
@@ -184,8 +187,6 @@ static void dump_tlb_44x(void);
static void dump_tlb_book3e(void);
#endif
-static int xmon_no_auto_backtrace;
-
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#else
@@ -232,7 +233,13 @@ Commands:\n\
"\
dr dump stream of raw bytes\n\
dt dump the tracing buffers (uses printk)\n\
- e print exception information\n\
+"
+#ifdef CONFIG_PPC_POWERNV
+" dx# dump xive on CPU #\n\
+ dxi# dump xive irq state #\n\
+ dxa dump xive on all CPUs\n"
+#endif
+" e print exception information\n\
f flush cache\n\
la lookup symbol+offset of specified address\n\
ls lookup address of specified symbol\n\
@@ -411,7 +418,22 @@ int cpus_are_in_xmon(void)
{
return !cpumask_empty(&cpus_in_xmon);
}
-#endif
+
+static bool wait_for_other_cpus(int ncpus)
+{
+ unsigned long timeout;
+
+ /* We wait for 2s, which is a metric "little while" */
+ for (timeout = 20000; timeout != 0; --timeout) {
+ if (cpumask_weight(&cpus_in_xmon) >= ncpus)
+ return true;
+ udelay(100);
+ barrier();
+ }
+
+ return false;
+}
+#endif /* CONFIG_SMP */
static inline int unrecoverable_excp(struct pt_regs *regs)
{
@@ -433,7 +455,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
#ifdef CONFIG_SMP
int cpu;
int secondary;
- unsigned long timeout;
#endif
local_irq_save(flags);
@@ -520,13 +541,17 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
xmon_owner = cpu;
mb();
if (ncpus > 1) {
- smp_send_debugger_break();
- /* wait for other cpus to come in */
- for (timeout = 100000000; timeout != 0; --timeout) {
- if (cpumask_weight(&cpus_in_xmon) >= ncpus)
- break;
- barrier();
- }
+ /*
+ * A system reset (trap == 0x100) can be triggered on
+ * all CPUs, so when we come in via 0x100 try waiting
+ * for the other CPUs to come in before we send the
+ * debugger break (IPI). This is similar to
+ * crash_kexec_secondary().
+ */
+ if (TRAP(regs) != 0x100 || !wait_for_other_cpus(ncpus))
+ smp_send_debugger_break();
+
+ wait_for_other_cpus(ncpus);
}
remove_bpts();
disable_surveillance();
@@ -884,10 +909,7 @@ cmds(struct pt_regs *excp)
last_cmd = NULL;
xmon_regs = excp;
- if (!xmon_no_auto_backtrace) {
- xmon_no_auto_backtrace = 1;
- xmon_show_stack(excp->gpr[1], excp->link, excp->nip);
- }
+ xmon_show_stack(excp->gpr[1], excp->link, excp->nip);
for(;;) {
#ifdef CONFIG_SMP
@@ -1347,9 +1369,19 @@ const char *getvecname(unsigned long vec)
case 0x100: ret = "(System Reset)"; break;
case 0x200: ret = "(Machine Check)"; break;
case 0x300: ret = "(Data Access)"; break;
- case 0x380: ret = "(Data SLB Access)"; break;
+ case 0x380:
+ if (radix_enabled())
+ ret = "(Data Access Out of Range)";
+ else
+ ret = "(Data SLB Access)";
+ break;
case 0x400: ret = "(Instruction Access)"; break;
- case 0x480: ret = "(Instruction SLB Access)"; break;
+ case 0x480:
+ if (radix_enabled())
+ ret = "(Instruction Access Out of Range)";
+ else
+ ret = "(Instruction SLB Access)";
+ break;
case 0x500: ret = "(Hardware Interrupt)"; break;
case 0x600: ret = "(Alignment)"; break;
case 0x700: ret = "(Program Check)"; break;
@@ -2231,7 +2263,9 @@ static void dump_one_paca(int cpu)
DUMP(p, kernel_msr, "lx");
DUMP(p, emergency_sp, "p");
#ifdef CONFIG_PPC_BOOK3S_64
+ DUMP(p, nmi_emergency_sp, "p");
DUMP(p, mc_emergency_sp, "p");
+ DUMP(p, in_nmi, "x");
DUMP(p, in_mce, "x");
DUMP(p, hmi_event_available, "x");
#endif
@@ -2338,6 +2372,81 @@ static void dump_pacas(void)
}
#endif
+#ifdef CONFIG_PPC_POWERNV
+static void dump_one_xive(int cpu)
+{
+ unsigned int hwid = get_hard_smp_processor_id(cpu);
+
+ opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+ opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+ opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+ opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+ opal_xive_dump(XIVE_DUMP_VP, hwid);
+ opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
+
+ if (setjmp(bus_error_jmp) != 0) {
+ catch_memory_errors = 0;
+ printf("*** Error dumping xive on cpu %d\n", cpu);
+ return;
+ }
+
+ catch_memory_errors = 1;
+ sync();
+ xmon_xive_do_dump(cpu);
+ sync();
+ __delay(200);
+ catch_memory_errors = 0;
+}
+
+static void dump_all_xives(void)
+{
+ int cpu;
+
+ if (num_possible_cpus() == 0) {
+ printf("No possible cpus, use 'dx #' to dump individual cpus\n");
+ return;
+ }
+
+ for_each_possible_cpu(cpu)
+ dump_one_xive(cpu);
+}
+
+static void dump_one_xive_irq(u32 num)
+{
+ s64 rc;
+ __be64 vp;
+ u8 prio;
+ __be32 lirq;
+
+ rc = opal_xive_get_irq_config(num, &vp, &prio, &lirq);
+ xmon_printf("IRQ 0x%x config: vp=0x%llx prio=%d lirq=0x%x (rc=%lld)\n",
+ num, be64_to_cpu(vp), prio, be32_to_cpu(lirq), rc);
+}
+
+static void dump_xives(void)
+{
+ unsigned long num;
+ int c;
+
+ c = inchar();
+ if (c == 'a') {
+ dump_all_xives();
+ return;
+ } else if (c == 'i') {
+ if (scanhex(&num))
+ dump_one_xive_irq(num);
+ return;
+ }
+
+ termch = c; /* Put c back, it wasn't 'a' */
+
+ if (scanhex(&num))
+ dump_one_xive(num);
+ else
+ dump_one_xive(xmon_owner);
+}
+#endif /* CONFIG_PPC_POWERNV */
+
static void dump_by_size(unsigned long addr, long count, int size)
{
unsigned char temp[16];
@@ -2386,6 +2495,14 @@ dump(void)
return;
}
#endif
+#ifdef CONFIG_PPC_POWERNV
+ if (c == 'x') {
+ xmon_start_pagination();
+ dump_xives();
+ xmon_end_pagination();
+ return;
+ }
+#endif
if (c == '\n')
termch = c;
@@ -3070,23 +3187,28 @@ void dump_segments(void)
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
- if (esid || vsid) {
- printf("%02d %016lx %016lx", i, esid, vsid);
- if (esid & SLB_ESID_V) {
- llp = vsid & SLB_VSID_LLP;
- if (vsid & SLB_VSID_B_1T) {
- printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n",
- GET_ESID_1T(esid),
- (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T,
- llp);
- } else {
- printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n",
- GET_ESID(esid),
- (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT,
- llp);
- }
- } else
- printf("\n");
+
+ if (!esid && !vsid)
+ continue;
+
+ printf("%02d %016lx %016lx", i, esid, vsid);
+
+ if (!(esid & SLB_ESID_V)) {
+ printf("\n");
+ continue;
+ }
+
+ llp = vsid & SLB_VSID_LLP;
+ if (vsid & SLB_VSID_B_1T) {
+ printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n",
+ GET_ESID_1T(esid),
+ (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T,
+ llp);
+ } else {
+ printf(" 256M ESID=%9lx VSID=%13lx LLP:%3lx \n",
+ GET_ESID(esid),
+ (vsid & ~SLB_VSID_B) >> SLB_VSID_SHIFT,
+ llp);
}
}
}
@@ -3302,6 +3424,8 @@ static void sysrq_handle_xmon(int key)
/* ensure xmon is enabled */
xmon_init(1);
debugger(get_irq_regs());
+ if (!xmon_on)
+ xmon_init(0);
}
static struct sysrq_key_op sysrq_xmon_op = {
@@ -3315,10 +3439,37 @@ static int __init setup_xmon_sysrq(void)
register_sysrq_key('x', &sysrq_xmon_op);
return 0;
}
-__initcall(setup_xmon_sysrq);
+device_initcall(setup_xmon_sysrq);
#endif /* CONFIG_MAGIC_SYSRQ */
-static int __initdata xmon_early, xmon_off;
+#ifdef CONFIG_DEBUG_FS
+static int xmon_dbgfs_set(void *data, u64 val)
+{
+ xmon_on = !!val;
+ xmon_init(xmon_on);
+
+ return 0;
+}
+
+static int xmon_dbgfs_get(void *data, u64 *val)
+{
+ *val = xmon_on;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(xmon_dbgfs_ops, xmon_dbgfs_get,
+ xmon_dbgfs_set, "%llu\n");
+
+static int __init setup_xmon_dbgfs(void)
+{
+ debugfs_create_file("xmon", 0600, powerpc_debugfs_root, NULL,
+ &xmon_dbgfs_ops);
+ return 0;
+}
+device_initcall(setup_xmon_dbgfs);
+#endif /* CONFIG_DEBUG_FS */
+
+static int xmon_early __initdata;
static int __init early_parse_xmon(char *p)
{
@@ -3326,12 +3477,12 @@ static int __init early_parse_xmon(char *p)
/* just "xmon" is equivalent to "xmon=early" */
xmon_init(1);
xmon_early = 1;
- } else if (strncmp(p, "on", 2) == 0)
+ xmon_on = 1;
+ } else if (strncmp(p, "on", 2) == 0) {
xmon_init(1);
- else if (strncmp(p, "off", 3) == 0)
- xmon_off = 1;
- else if (strncmp(p, "nobt", 4) == 0)
- xmon_no_auto_backtrace = 1;
+ xmon_on = 1;
+ } else if (strncmp(p, "off", 3) == 0)
+ xmon_on = 0;
else
return 1;
@@ -3341,10 +3492,8 @@ early_param("xmon", early_parse_xmon);
void __init xmon_setup(void)
{
-#ifdef CONFIG_XMON_DEFAULT
- if (!xmon_off)
+ if (xmon_on)
xmon_init(1);
-#endif
if (xmon_early)
debugger(NULL);
}
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 7e3481eb2174..45092b12f54f 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,4 +1,5 @@
generic-y += asm-offsets.h
+generic-y += cacheflush.h
generic-y += clkdev.h
generic-y += dma-contiguous.h
generic-y += div64.h
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
deleted file mode 100644
index 0499334f9473..000000000000
--- a/arch/s390/include/asm/cacheflush.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef _S390_CACHEFLUSH_H
-#define _S390_CACHEFLUSH_H
-
-/* Caches aren't brain-dead on the s390. */
-#include <asm-generic/cacheflush.h>
-
-#define SET_MEMORY_RO 1UL
-#define SET_MEMORY_RW 2UL
-#define SET_MEMORY_NX 4UL
-#define SET_MEMORY_X 8UL
-
-int __set_memory(unsigned long addr, int numpages, unsigned long flags);
-
-static inline int set_memory_ro(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RO);
-}
-
-static inline int set_memory_rw(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RW);
-}
-
-static inline int set_memory_nx(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_NX);
-}
-
-static inline int set_memory_x(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_X);
-}
-
-#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a41faf34b034..426614a882a9 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <asm/cpu.h>
#include <asm/fpu/api.h>
#include <asm/isc.h>
+#include <asm/guarded_storage.h>
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
@@ -121,6 +122,7 @@ struct esca_block {
#define CPUSTAT_SLSR 0x00002000
#define CPUSTAT_ZARCH 0x00000800
#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_KSS 0x00000200
#define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040
#define CPUSTAT_GED2 0x00000010
@@ -164,16 +166,27 @@ struct kvm_s390_sie_block {
#define ICTL_RRBE 0x00001000
#define ICTL_TPROT 0x00000200
__u32 ictl; /* 0x0048 */
+#define ECA_CEI 0x80000000
+#define ECA_IB 0x40000000
+#define ECA_SIGPI 0x10000000
+#define ECA_MVPGI 0x01000000
+#define ECA_VX 0x00020000
+#define ECA_PROTEXCI 0x00002000
+#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTREQ 0x10
#define ICPT_EXTINT 0x14
+#define ICPT_IOREQ 0x18
+#define ICPT_WAIT 0x1c
#define ICPT_VALIDITY 0x20
#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
+#define ICPT_KSS 0x5c
__u8 icptcode; /* 0x0050 */
__u8 icptstatus; /* 0x0051 */
__u16 ihcpu; /* 0x0052 */
@@ -182,10 +195,19 @@ struct kvm_s390_sie_block {
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
__u8 reserved60; /* 0x0060 */
+#define ECB_GS 0x40
+#define ECB_TE 0x10
+#define ECB_SRSI 0x04
+#define ECB_HOSTPROTINT 0x02
__u8 ecb; /* 0x0061 */
+#define ECB2_CMMA 0x80
+#define ECB2_IEP 0x20
+#define ECB2_PFMFI 0x08
+#define ECB2_ESCA 0x04
__u8 ecb2; /* 0x0062 */
-#define ECB3_AES 0x04
#define ECB3_DEA 0x08
+#define ECB3_AES 0x04
+#define ECB3_RI 0x01
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
@@ -219,11 +241,14 @@ struct kvm_s390_sie_block {
__u32 crycbd; /* 0x00fc */
__u64 gcr[16]; /* 0x0100 */
__u64 gbea; /* 0x0180 */
- __u8 reserved188[24]; /* 0x0188 */
+ __u8 reserved188[8]; /* 0x0188 */
+ __u64 sdnxo; /* 0x0190 */
+ __u8 reserved198[8]; /* 0x0198 */
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[8]; /* 0x01c0 */
+#define ECD_HOSTREGMGMT 0x20000000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -498,6 +523,12 @@ struct kvm_s390_local_interrupt {
#define FIRQ_CNTR_PFAULT 3
#define FIRQ_MAX_COUNT 4
+/* mask the AIS mode for a given ISC */
+#define AIS_MODE_MASK(isc) (0x80 >> isc)
+
+#define KVM_S390_AIS_MODE_ALL 0
+#define KVM_S390_AIS_MODE_SINGLE 1
+
struct kvm_s390_float_interrupt {
unsigned long pending_irqs;
spinlock_t lock;
@@ -507,6 +538,10 @@ struct kvm_s390_float_interrupt {
struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ struct mutex ais_lock;
+ u8 simm;
+ u8 nimm;
+ int ais_enabled;
};
struct kvm_hw_wp_info_arch {
@@ -554,6 +589,7 @@ struct kvm_vcpu_arch {
/* if vsie is active, currently executed shadow sie control block */
struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
+ struct gs_cb *host_gscb;
struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
@@ -574,6 +610,7 @@ struct kvm_vcpu_arch {
*/
seqcount_t cputm_seqcount;
__u64 cputm_start;
+ bool gs_enabled;
};
struct kvm_vm_stat {
@@ -596,6 +633,7 @@ struct s390_io_adapter {
bool maskable;
bool masked;
bool swap;
+ bool suppressible;
struct rw_semaphore maps_lock;
struct list_head maps;
atomic_t nr_maps;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index ace3bd315438..6f5167bc1928 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -75,6 +75,7 @@ struct sclp_info {
unsigned char has_pfmfi : 1;
unsigned char has_ibs : 1;
unsigned char has_skey : 1;
+ unsigned char has_kss : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
new file mode 100644
index 000000000000..46a4db44c47a
--- /dev/null
+++ b/arch/s390/include/asm/set_memory.h
@@ -0,0 +1,31 @@
+#ifndef _ASMS390_SET_MEMORY_H
+#define _ASMS390_SET_MEMORY_H
+
+#define SET_MEMORY_RO 1UL
+#define SET_MEMORY_RW 2UL
+#define SET_MEMORY_NX 4UL
+#define SET_MEMORY_X 8UL
+
+int __set_memory(unsigned long addr, int numpages, unsigned long flags);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_X);
+}
+
+#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a2ffec4139ad..3dd2a1d308dd 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -26,6 +26,8 @@
#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
+#define KVM_DEV_FLIC_AISM 9
+#define KVM_DEV_FLIC_AIRQ_INJECT 10
/*
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
__u8 isc;
__u8 maskable;
__u8 swap;
- __u8 pad;
+ __u8 flags;
+};
+
+#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
+
+struct kvm_s390_ais_req {
+ __u8 isc;
+ __u16 mode;
};
#define KVM_S390_IO_ADAPTER_MASK 1
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
#define KVM_S390_VM_CPU_FEAT_CMMA 10
#define KVM_S390_VM_CPU_FEAT_PFMFI 11
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
+#define KVM_S390_VM_CPU_FEAT_KSS 13
struct kvm_s390_vm_cpu_feat {
__u64 feat[16];
};
@@ -131,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kmo[16]; /* with MSA4 */
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
- __u8 reserved[1824];
+ __u8 kma[16]; /* with MSA8 */
+ __u8 reserved[1808];
};
/* kvm attributes for crypto */
@@ -197,6 +208,10 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
#define KVM_SYNC_FPRS (1UL << 8)
+#define KVM_SYNC_GSCB (1UL << 9)
+/* length and alignment of the sdnx as a power of two */
+#define SDNXC 8
+#define SDNXL (1UL << SDNXC)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -217,8 +232,16 @@ struct kvm_sync_regs {
};
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
- __u8 padding[52]; /* riccb needs to be 64byte aligned */
+ __u8 padding1[52]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
+ __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
+ union {
+ __u8 sdnx[SDNXL]; /* state description annex */
+ struct {
+ __u64 reserved1[2];
+ __u64 gscb[4];
+ };
+ };
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 60a8a4e207ed..27477f34cc0a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -17,6 +17,7 @@
#include <trace/syscall.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include "entry.h"
/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 76f9eda1d7c0..3d6a99746454 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -31,7 +31,7 @@
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/sections.h>
#include <linux/uaccess.h>
#include <asm/dis.h>
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index db5658daf994..49a6bd45957b 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -26,6 +26,7 @@
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/os_info.h>
+#include <asm/set_memory.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 9a4f279d25ca..ca960d0370d5 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -823,7 +823,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
}
/* Check online status of the CPU to which the event is pinned */
- if (event->cpu >= nr_cpumask_bits ||
+ if ((unsigned int)event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index ddbffb715b40..9da243d94cc3 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -261,7 +261,7 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1) {
+ if (vcpu->arch.sie_block->eca & ECA_SII) {
int rc;
read_lock(&vcpu->kvm->arch.sca_lock);
@@ -360,7 +360,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
void ipte_lock(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1)
+ if (vcpu->arch.sie_block->eca & ECA_SII)
ipte_lock_siif(vcpu);
else
ipte_lock_simple(vcpu);
@@ -368,7 +368,7 @@ void ipte_lock(struct kvm_vcpu *vcpu)
void ipte_unlock(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.sie_block->eca & 1)
+ if (vcpu->arch.sie_block->eca & ECA_SII)
ipte_unlock_siif(vcpu);
else
ipte_unlock_simple(vcpu);
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 59920f96ebc0..a4752bf6b526 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -35,6 +35,7 @@ static const intercept_handler_t instruction_handlers[256] = {
[0xb6] = kvm_s390_handle_stctl,
[0xb7] = kvm_s390_handle_lctl,
[0xb9] = kvm_s390_handle_b9,
+ [0xe3] = kvm_s390_handle_e3,
[0xe5] = kvm_s390_handle_e5,
[0xeb] = kvm_s390_handle_eb,
};
@@ -368,8 +369,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu->arch.sie_block->ipb);
- if (vcpu->arch.sie_block->ipa == 0xb256 &&
- test_kvm_facility(vcpu->kvm, 74))
+ if (vcpu->arch.sie_block->ipa == 0xb256)
return handle_sthyi(vcpu);
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
@@ -404,28 +404,31 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) {
- case 0x10:
- case 0x18:
+ case ICPT_EXTREQ:
+ case ICPT_IOREQ:
return handle_noop(vcpu);
- case 0x04:
+ case ICPT_INST:
rc = handle_instruction(vcpu);
break;
- case 0x08:
+ case ICPT_PROGI:
return handle_prog(vcpu);
- case 0x14:
+ case ICPT_EXTINT:
return handle_external_interrupt(vcpu);
- case 0x1c:
+ case ICPT_WAIT:
return kvm_s390_handle_wait(vcpu);
- case 0x20:
+ case ICPT_VALIDITY:
return handle_validity(vcpu);
- case 0x28:
+ case ICPT_STOP:
return handle_stop(vcpu);
- case 0x2c:
+ case ICPT_OPEREXC:
rc = handle_operexc(vcpu);
break;
- case 0x38:
+ case ICPT_PARTEXEC:
rc = handle_partial_execution(vcpu);
break;
+ case ICPT_KSS:
+ rc = kvm_s390_skey_check_enable(vcpu);
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 169558dc7daf..caf15c8a8948 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -410,6 +410,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
struct kvm_s390_mchk_info *mchk)
{
unsigned long ext_sa_addr;
+ unsigned long lc;
freg_t fprs[NUM_FPRS];
union mci mci;
int rc;
@@ -418,12 +419,34 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
/* take care of lazy register loading */
save_fpu_regs();
save_access_regs(vcpu->run->s.regs.acrs);
+ if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
+ save_gs_cb(current->thread.gs_cb);
/* Extended save area */
rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
sizeof(unsigned long));
- /* Only bits 0-53 are used for address formation */
- ext_sa_addr &= ~0x3ffUL;
+ /* Only bits 0 through 63-LC are used for address formation */
+ lc = ext_sa_addr & MCESA_LC_MASK;
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ switch (lc) {
+ case 0:
+ case 10:
+ ext_sa_addr &= ~0x3ffUL;
+ break;
+ case 11:
+ ext_sa_addr &= ~0x7ffUL;
+ break;
+ case 12:
+ ext_sa_addr &= ~0xfffUL;
+ break;
+ default:
+ ext_sa_addr = 0;
+ break;
+ }
+ } else {
+ ext_sa_addr &= ~0x3ffUL;
+ }
+
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
512))
@@ -431,6 +454,14 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
} else {
mci.vr = 0;
}
+ if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
+ && (lc == 11 || lc == 12)) {
+ if (write_guest_abs(vcpu, ext_sa_addr + 1024,
+ &vcpu->run->s.regs.gscb, 32))
+ mci.gs = 0;
+ } else {
+ mci.gs = 0;
+ }
/* General interruption information */
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
@@ -1968,6 +1999,8 @@ static int register_io_adapter(struct kvm_device *dev,
adapter->maskable = adapter_info.maskable;
adapter->masked = false;
adapter->swap = adapter_info.swap;
+ adapter->suppressible = (adapter_info.flags) &
+ KVM_S390_ADAPTER_SUPPRESSIBLE;
dev->kvm->arch.adapters[adapter->id] = adapter;
return 0;
@@ -2121,6 +2154,87 @@ static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
return 0;
}
+static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_ais_req req;
+ int ret = 0;
+
+ if (!fi->ais_enabled)
+ return -ENOTSUPP;
+
+ if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
+ return -EFAULT;
+
+ if (req.isc > MAX_ISC)
+ return -EINVAL;
+
+ trace_kvm_s390_modify_ais_mode(req.isc,
+ (fi->simm & AIS_MODE_MASK(req.isc)) ?
+ (fi->nimm & AIS_MODE_MASK(req.isc)) ?
+ 2 : KVM_S390_AIS_MODE_SINGLE :
+ KVM_S390_AIS_MODE_ALL, req.mode);
+
+ mutex_lock(&fi->ais_lock);
+ switch (req.mode) {
+ case KVM_S390_AIS_MODE_ALL:
+ fi->simm &= ~AIS_MODE_MASK(req.isc);
+ fi->nimm &= ~AIS_MODE_MASK(req.isc);
+ break;
+ case KVM_S390_AIS_MODE_SINGLE:
+ fi->simm |= AIS_MODE_MASK(req.isc);
+ fi->nimm &= ~AIS_MODE_MASK(req.isc);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&fi->ais_lock);
+
+ return ret;
+}
+
+static int kvm_s390_inject_airq(struct kvm *kvm,
+ struct s390_io_adapter *adapter)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_interrupt s390int = {
+ .type = KVM_S390_INT_IO(1, 0, 0, 0),
+ .parm = 0,
+ .parm64 = (adapter->isc << 27) | 0x80000000,
+ };
+ int ret = 0;
+
+ if (!fi->ais_enabled || !adapter->suppressible)
+ return kvm_s390_inject_vm(kvm, &s390int);
+
+ mutex_lock(&fi->ais_lock);
+ if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
+ trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
+ goto out;
+ }
+
+ ret = kvm_s390_inject_vm(kvm, &s390int);
+ if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
+ fi->nimm |= AIS_MODE_MASK(adapter->isc);
+ trace_kvm_s390_modify_ais_mode(adapter->isc,
+ KVM_S390_AIS_MODE_SINGLE, 2);
+ }
+out:
+ mutex_unlock(&fi->ais_lock);
+ return ret;
+}
+
+static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ unsigned int id = attr->attr;
+ struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
+
+ if (!adapter)
+ return -EINVAL;
+
+ return kvm_s390_inject_airq(kvm, adapter);
+}
+
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = 0;
@@ -2157,6 +2271,12 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
r = clear_io_irq(dev->kvm, attr);
break;
+ case KVM_DEV_FLIC_AISM:
+ r = modify_ais_mode(dev->kvm, attr);
+ break;
+ case KVM_DEV_FLIC_AIRQ_INJECT:
+ r = flic_inject_airq(dev->kvm, attr);
+ break;
default:
r = -EINVAL;
}
@@ -2176,6 +2296,8 @@ static int flic_has_attr(struct kvm_device *dev,
case KVM_DEV_FLIC_ADAPTER_REGISTER:
case KVM_DEV_FLIC_ADAPTER_MODIFY:
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
+ case KVM_DEV_FLIC_AISM:
+ case KVM_DEV_FLIC_AIRQ_INJECT:
return 0;
}
return -ENXIO;
@@ -2286,12 +2408,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
up_read(&adapter->maps_lock);
if ((ret > 0) && !adapter->masked) {
- struct kvm_s390_interrupt s390int = {
- .type = KVM_S390_INT_IO(1, 0, 0, 0),
- .parm = 0,
- .parm64 = (adapter->isc << 27) | 0x80000000,
- };
- ret = kvm_s390_inject_vm(kvm, &s390int);
+ ret = kvm_s390_inject_airq(kvm, adapter);
if (ret == 0)
ret = 1;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d5c5c911821a..689ac48361c6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -276,6 +276,10 @@ static void kvm_s390_cpu_feat_init(void)
__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
kvm_s390_available_subfunc.ppno);
+ if (test_facility(146)) /* MSA8 */
+ __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kma);
+
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
@@ -300,6 +304,8 @@ static void kvm_s390_cpu_feat_init(void)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
if (sclp.has_ibs)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
+ if (sclp.has_kss)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
/*
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
* all skey handling functions read/set the skey from the PGSTE
@@ -380,6 +386,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_SKEYS:
case KVM_CAP_S390_IRQ_STATE:
case KVM_CAP_S390_USER_INSTR0:
+ case KVM_CAP_S390_AIS:
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -405,6 +412,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_RI:
r = test_facility(64);
break;
+ case KVM_CAP_S390_GS:
+ r = test_facility(133);
+ break;
default:
r = 0;
}
@@ -541,6 +551,34 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_AIS:
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus) {
+ r = -EBUSY;
+ } else {
+ set_kvm_facility(kvm->arch.model.fac_mask, 72);
+ set_kvm_facility(kvm->arch.model.fac_list, 72);
+ kvm->arch.float_int.ais_enabled = 1;
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: AIS %s",
+ r ? "(not available)" : "(success)");
+ break;
+ case KVM_CAP_S390_GS:
+ r = -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ r = -EBUSY;
+ } else if (test_facility(133)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 133);
+ set_kvm_facility(kvm->arch.model.fac_list, 133);
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -1166,10 +1204,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
- keys = kmalloc_array(args->count, sizeof(uint8_t),
- GFP_KERNEL | __GFP_NOWARN);
- if (!keys)
- keys = vmalloc(sizeof(uint8_t) * args->count);
+ keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
if (!keys)
return -ENOMEM;
@@ -1211,10 +1246,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
return -EINVAL;
- keys = kmalloc_array(args->count, sizeof(uint8_t),
- GFP_KERNEL | __GFP_NOWARN);
- if (!keys)
- keys = vmalloc(sizeof(uint8_t) * args->count);
+ keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
if (!keys)
return -ENOMEM;
@@ -1498,6 +1530,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_crypto_init(kvm);
+ mutex_init(&kvm->arch.float_int.ais_lock);
+ kvm->arch.float_int.simm = 0;
+ kvm->arch.float_int.nimm = 0;
+ kvm->arch.float_int.ais_enabled = 0;
spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
@@ -1646,7 +1682,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
- vcpu->arch.sie_block->ecb2 |= 0x04U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
@@ -1700,7 +1736,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu->arch.sie_block->scaoh = scaoh;
vcpu->arch.sie_block->scaol = scaol;
- vcpu->arch.sie_block->ecb2 |= 0x04U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
}
kvm->arch.sca = new_sca;
kvm->arch.use_esca = 1;
@@ -1749,6 +1785,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_s390_set_prefix(vcpu, 0);
if (test_kvm_facility(vcpu->kvm, 64))
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
+ if (test_kvm_facility(vcpu->kvm, 133))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
/* fprs can be synchronized via vrs, even if the guest has no vx. With
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
*/
@@ -1939,8 +1977,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
if (!vcpu->arch.sie_block->cbrlo)
return -ENOMEM;
- vcpu->arch.sie_block->ecb2 |= 0x80;
- vcpu->arch.sie_block->ecb2 &= ~0x08;
+ vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
+ vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
return 0;
}
@@ -1970,31 +2008,37 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
if (MACHINE_HAS_ESOP)
- vcpu->arch.sie_block->ecb |= 0x02;
+ vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
if (test_kvm_facility(vcpu->kvm, 9))
- vcpu->arch.sie_block->ecb |= 0x04;
+ vcpu->arch.sie_block->ecb |= ECB_SRSI;
if (test_kvm_facility(vcpu->kvm, 73))
- vcpu->arch.sie_block->ecb |= 0x10;
+ vcpu->arch.sie_block->ecb |= ECB_TE;
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
- vcpu->arch.sie_block->ecb2 |= 0x08;
+ vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
if (test_kvm_facility(vcpu->kvm, 130))
- vcpu->arch.sie_block->ecb2 |= 0x20;
- vcpu->arch.sie_block->eca = 0x1002000U;
+ vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
+ vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
if (sclp.has_cei)
- vcpu->arch.sie_block->eca |= 0x80000000U;
+ vcpu->arch.sie_block->eca |= ECA_CEI;
if (sclp.has_ib)
- vcpu->arch.sie_block->eca |= 0x40000000U;
+ vcpu->arch.sie_block->eca |= ECA_IB;
if (sclp.has_siif)
- vcpu->arch.sie_block->eca |= 1;
+ vcpu->arch.sie_block->eca |= ECA_SII;
if (sclp.has_sigpif)
- vcpu->arch.sie_block->eca |= 0x10000000U;
+ vcpu->arch.sie_block->eca |= ECA_SIGPI;
if (test_kvm_facility(vcpu->kvm, 129)) {
- vcpu->arch.sie_block->eca |= 0x00020000;
- vcpu->arch.sie_block->ecd |= 0x20000000;
+ vcpu->arch.sie_block->eca |= ECA_VX;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
}
+ vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
+ | SDNXC;
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
- vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+
+ if (sclp.has_kss)
+ atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
+ else
+ vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (vcpu->kvm->arch.use_cmma) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
@@ -2446,7 +2490,7 @@ retry:
}
/* nothing to do, just clear the request */
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return 0;
}
@@ -2719,6 +2763,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
+ struct runtime_instr_cb *riccb;
+ struct gs_cb *gscb;
+
+ riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
+ gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
@@ -2747,12 +2796,24 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
* we should enable RI here instead of doing the lazy enablement.
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
- test_kvm_facility(vcpu->kvm, 64)) {
- struct runtime_instr_cb *riccb =
- (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
-
- if (riccb->valid)
- vcpu->arch.sie_block->ecb3 |= 0x01;
+ test_kvm_facility(vcpu->kvm, 64) &&
+ riccb->valid &&
+ !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
+ vcpu->arch.sie_block->ecb3 |= ECB3_RI;
+ }
+ /*
+ * If userspace sets the gscb (e.g. after migration) to non-zero,
+ * we should enable GS here instead of doing the lazy enablement.
+ */
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
+ test_kvm_facility(vcpu->kvm, 133) &&
+ gscb->gssm &&
+ !vcpu->arch.gs_enabled) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
+ vcpu->arch.sie_block->ecb |= ECB_GS;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
+ vcpu->arch.gs_enabled = 1;
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
@@ -2768,6 +2829,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0;
+ if (MACHINE_HAS_GS) {
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ if (current->thread.gs_cb) {
+ vcpu->arch.host_gscb = current->thread.gs_cb;
+ save_gs_cb(vcpu->arch.host_gscb);
+ }
+ if (vcpu->arch.gs_enabled) {
+ current->thread.gs_cb = (struct gs_cb *)
+ &vcpu->run->s.regs.gscb;
+ restore_gs_cb(current->thread.gs_cb);
+ }
+ preempt_enable();
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2794,6 +2869,18 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Restore will be done lazily at return */
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+ if (MACHINE_HAS_GS) {
+ __ctl_set_bit(2, 4);
+ if (vcpu->arch.gs_enabled)
+ save_gs_cb(current->thread.gs_cb);
+ preempt_disable();
+ current->thread.gs_cb = vcpu->arch.host_gscb;
+ restore_gs_cb(vcpu->arch.host_gscb);
+ preempt_enable();
+ if (!vcpu->arch.host_gscb)
+ __ctl_clear_bit(2, 4);
+ vcpu->arch.host_gscb = NULL;
+ }
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index af9fa91a0c91..55f5c8457d6d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -25,7 +25,7 @@
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
/* Transactional Memory Execution related macros */
-#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
+#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
#define TDB_FORMAT1 1
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
@@ -246,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
@@ -253,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
+int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
/* implemented in vsie.c */
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 64b6a309f2c4..c03106c428cf 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -37,7 +37,8 @@
static int handle_ri(struct kvm_vcpu *vcpu)
{
if (test_kvm_facility(vcpu->kvm, 64)) {
- vcpu->arch.sie_block->ecb3 |= 0x01;
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
+ vcpu->arch.sie_block->ecb3 |= ECB3_RI;
kvm_s390_retry_instr(vcpu);
return 0;
} else
@@ -52,6 +53,33 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_gs(struct kvm_vcpu *vcpu)
+{
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
+ preempt_disable();
+ __ctl_set_bit(2, 4);
+ current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
+ restore_gs_cb(current->thread.gs_cb);
+ preempt_enable();
+ vcpu->arch.sie_block->ecb |= ECB_GS;
+ vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
+ vcpu->arch.gs_enabled = 1;
+ kvm_s390_retry_instr(vcpu);
+ return 0;
+ } else
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
+int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
+{
+ int code = vcpu->arch.sie_block->ipb & 0xff;
+
+ if (code == 0x49 || code == 0x4d)
+ return handle_gs(vcpu);
+ else
+ return -EOPNOTSUPP;
+}
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
@@ -170,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
return 0;
}
-static int __skey_check_enable(struct kvm_vcpu *vcpu)
+int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
{
int rc = 0;
+ struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
trace_kvm_s390_skey_related_inst(vcpu);
- if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
+ if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
+ !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
return rc;
rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
- if (!rc)
- vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
+ if (!rc) {
+ if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
+ atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
+ else
+ sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
+ ICTL_RRBE);
+ }
return rc;
}
@@ -190,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
int rc;
vcpu->stat.instruction_storage_key++;
- rc = __skey_check_enable(vcpu);
+ rc = kvm_s390_skey_check_enable(vcpu);
if (rc)
return rc;
if (sclp.has_skey) {
@@ -759,6 +794,7 @@ static const intercept_handler_t b2_handlers[256] = {
[0x3b] = handle_io_inst,
[0x3c] = handle_io_inst,
[0x50] = handle_ipte_interlock,
+ [0x56] = handle_sthyi,
[0x5f] = handle_io_inst,
[0x74] = handle_io_inst,
[0x76] = handle_io_inst,
@@ -887,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
- int rc = __skey_check_enable(vcpu);
+ int rc = kvm_s390_skey_check_enable(vcpu);
if (rc)
return rc;
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index 05c98bb853cf..926b5244263e 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -404,6 +404,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
u64 code, addr, cc = 0;
struct sthyi_sctns *sctns = NULL;
+ if (!test_kvm_facility(vcpu->kvm, 74))
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
/*
* STHYI requires extensive locking in the higher hypervisors
* and is very computational/memory expensive. Therefore we
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 396485bca191..78b7e847984a 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -280,6 +280,58 @@ TRACE_EVENT(kvm_s390_enable_disable_ibs,
__entry->state ? "enabling" : "disabling", __entry->id)
);
+/*
+ * Trace point for modifying ais mode for a given isc.
+ */
+TRACE_EVENT(kvm_s390_modify_ais_mode,
+ TP_PROTO(__u8 isc, __u16 from, __u16 to),
+ TP_ARGS(isc, from, to),
+
+ TP_STRUCT__entry(
+ __field(__u8, isc)
+ __field(__u16, from)
+ __field(__u16, to)
+ ),
+
+ TP_fast_assign(
+ __entry->isc = isc;
+ __entry->from = from;
+ __entry->to = to;
+ ),
+
+ TP_printk("for isc %x, modifying interruption mode from %s to %s",
+ __entry->isc,
+ (__entry->from == KVM_S390_AIS_MODE_ALL) ?
+ "ALL-Interruptions Mode" :
+ (__entry->from == KVM_S390_AIS_MODE_SINGLE) ?
+ "Single-Interruption Mode" : "No-Interruptions Mode",
+ (__entry->to == KVM_S390_AIS_MODE_ALL) ?
+ "ALL-Interruptions Mode" :
+ (__entry->to == KVM_S390_AIS_MODE_SINGLE) ?
+ "Single-Interruption Mode" : "No-Interruptions Mode")
+ );
+
+/*
+ * Trace point for suppressed adapter I/O interrupt.
+ */
+TRACE_EVENT(kvm_s390_airq_suppressed,
+ TP_PROTO(__u32 id, __u8 isc),
+ TP_ARGS(id, isc),
+
+ TP_STRUCT__entry(
+ __field(__u32, id)
+ __field(__u8, isc)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->isc = isc;
+ ),
+
+ TP_printk("adapter I/O interrupt suppressed (id:%x isc:%x)",
+ __entry->id, __entry->isc)
+ );
+
#endif /* _TRACE_KVMS390_H */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5491be39776b..4719ecb9ab42 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
newflags |= cpuflags & CPUSTAT_SM;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
newflags |= cpuflags & CPUSTAT_IBS;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
+ newflags |= cpuflags & CPUSTAT_KSS;
atomic_set(&scb_s->cpuflags, newflags);
return 0;
@@ -249,7 +251,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
- bool had_tx = scb_s->ecb & 0x10U;
+ bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0;
int rc;
@@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
* bits. Therefore we cannot provide interpretation and would later
* have to provide own emulation handlers.
*/
- scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+ if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
+ scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+
scb_s->icpua = scb_o->icpua;
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
@@ -307,34 +311,39 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->ihcpu = scb_o->ihcpu;
/* MVPG and Protection Exception Interpretation are always available */
- scb_s->eca |= scb_o->eca & 0x01002000U;
+ scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
/* Host-protection-interruption introduced with ESOP */
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
- scb_s->ecb |= scb_o->ecb & 0x02U;
+ scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/* transactional execution */
if (test_kvm_facility(vcpu->kvm, 73)) {
/* remap the prefix is tx is toggled on */
- if ((scb_o->ecb & 0x10U) && !had_tx)
+ if ((scb_o->ecb & ECB_TE) && !had_tx)
prefix_unmapped(vsie_page);
- scb_s->ecb |= scb_o->ecb & 0x10U;
+ scb_s->ecb |= scb_o->ecb & ECB_TE;
}
/* SIMD */
if (test_kvm_facility(vcpu->kvm, 129)) {
- scb_s->eca |= scb_o->eca & 0x00020000U;
- scb_s->ecd |= scb_o->ecd & 0x20000000U;
+ scb_s->eca |= scb_o->eca & ECA_VX;
+ scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
}
/* Run-time-Instrumentation */
if (test_kvm_facility(vcpu->kvm, 64))
- scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
+ scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
/* Instruction Execution Prevention */
if (test_kvm_facility(vcpu->kvm, 130))
- scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
+ scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
+ /* Guarded Storage */
+ if (test_kvm_facility(vcpu->kvm, 133)) {
+ scb_s->ecb |= scb_o->ecb & ECB_GS;
+ scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
+ }
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
- scb_s->eca |= scb_o->eca & 0x00000001U;
+ scb_s->eca |= scb_o->eca & ECA_SII;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
- scb_s->eca |= scb_o->eca & 0x40000000U;
+ scb_s->eca |= scb_o->eca & ECA_IB;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
- scb_s->eca |= scb_o->eca & 0x80000000U;
+ scb_s->eca |= scb_o->eca & ECA_CEI;
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
@@ -406,7 +415,7 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
- if (!rc && (scb_s->ecb & 0x10U))
+ if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE);
/*
@@ -496,6 +505,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
unpin_guest_page(vcpu->kvm, gpa, hpa);
scb_s->riccbd = 0;
}
+
+ hpa = scb_s->sdnxo;
+ if (hpa) {
+ gpa = scb_o->sdnxo;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->sdnxo = 0;
+ }
}
/*
@@ -543,7 +559,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->itdba & ~0xffUL;
- if (gpa && (scb_s->ecb & 0x10U)) {
+ if (gpa && (scb_s->ecb & ECB_TE)) {
if (!(gpa & ~0x1fffU)) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
@@ -558,8 +574,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->gvrd & ~0x1ffUL;
- if (gpa && (scb_s->eca & 0x00020000U) &&
- !(scb_s->ecd & 0x20000000U)) {
+ if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin;
@@ -577,7 +592,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
gpa = scb_o->riccbd & ~0x3fUL;
- if (gpa && (scb_s->ecb3 & 0x01U)) {
+ if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0043U);
goto unpin;
@@ -591,6 +606,33 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
goto unpin;
scb_s->riccbd = hpa;
}
+ if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
+ unsigned long sdnxc;
+
+ gpa = scb_o->sdnxo & ~0xfUL;
+ sdnxc = scb_o->sdnxo & 0xfUL;
+ if (!gpa || !(gpa & ~0x1fffUL)) {
+ rc = set_validity_icpt(scb_s, 0x10b0U);
+ goto unpin;
+ }
+ if (sdnxc < 6 || sdnxc > 12) {
+ rc = set_validity_icpt(scb_s, 0x10b1U);
+ goto unpin;
+ }
+ if (gpa & ((1 << sdnxc) - 1)) {
+ rc = set_validity_icpt(scb_s, 0x10b2U);
+ goto unpin;
+ }
+ /* Due to alignment rules (checked above) this cannot
+ * cross page boundaries
+ */
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x10b0U);
+ if (rc)
+ goto unpin;
+ scb_s->sdnxo = hpa | sdnxc;
+ }
return 0;
unpin:
unpin_blocks(vcpu, vsie_page);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ee5066718b21..ee6a1d3d4983 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -39,6 +39,7 @@
#include <asm/sections.h>
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
+#include <asm/set_memory.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index fc321c5ec30e..49e721f3645e 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -8,6 +8,7 @@
#include <asm/facility.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/set_memory.h>
static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
{
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 60d38993f232..c33c94b4be60 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -17,6 +17,7 @@
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/set_memory.h>
static DEFINE_MUTEX(vmem_mutex);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 4ecf6d687509..6e97a2e3fd8d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,7 @@
#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
+#include <asm/set_memory.h>
#include "bpf_jit.h"
int bpf_jit_enable __read_mostly;
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 0cf802de52a1..be63fbd699fd 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -82,6 +82,7 @@ static struct facility_def facility_defs[] = {
78, /* enhanced-DAT 2 */
130, /* instruction-execution-protection */
131, /* enhanced-SOP 2 and side-effect */
+ 146, /* msa extension 8 */
-1 /* END */
}
},
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 84563e39a5b8..c99ee286b69f 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
}
}
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- /*
- * I/O space can be accessed via normal processor loads and stores on
- * this platform but for now we elect not to do this and portable
- * drivers should not do this anyway.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- /*
- * Ignore write-combine; for now only return uncached mappings.
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
#ifndef CONFIG_GENERIC_IOMAP
void __iomem *__pci_ioport_map(struct pci_dev *dev,
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 644314f2b1ef..17fa69bc814d 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev;
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
extern void pcibios_set_master(struct pci_dev *dev);
/* Dynamic DMA mapping stuff.
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 2303635158f5..b957ca5527a3 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
#define HAVE_PCI_MMAP
+#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
#define get_pci_unmapped_area get_fb_unmapped_area
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
-
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return PCI_IRQ_NONE;
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 44101196d02b..41a407328667 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -939,3 +939,9 @@ ENTRY(__retl_o1)
retl
mov %o1, %o0
ENDPROC(__retl_o1)
+
+ENTRY(__retl_o1_asi)
+ wr %o5, 0x0, %asi
+ retl
+ mov %o1, %o0
+ENDPROC(__retl_o1_asi)
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index 44a3ed93c214..e278bf52963b 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -70,16 +70,9 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
if (count > LED_MAX_LENGTH)
count = LED_MAX_LENGTH;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- kfree(buf);
- return -EFAULT;
- }
-
- buf[count] = '\0';
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
/* work around \n when echo'ing into proc */
if (buf[count - 1] == '\n')
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 015e55a7495d..7eceaa10836f 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine)
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
{
int ret;
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 6f06058c5ae7..6722308d1a98 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -148,7 +148,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 6b7331d198e9..422b17880955 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -133,7 +133,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
index 8e7a843ddd88..2fbf6297d57c 100644
--- a/arch/sparc/lib/GENbzero.S
+++ b/arch/sparc/lib/GENbzero.S
@@ -8,7 +8,7 @@
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_o1; \
+ .word 98b, __retl_o1_asi;\
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
index beab29bf419b..33053bdf3766 100644
--- a/arch/sparc/lib/NGbzero.S
+++ b/arch/sparc/lib/NGbzero.S
@@ -8,7 +8,7 @@
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_o1; \
+ .word 98b, __retl_o1_asi;\
.text; \
.align 4;
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h
index 37e55d018de5..ac5acdf4c4d0 100644
--- a/arch/unicore32/include/asm/pci.h
+++ b/arch/unicore32/include/asm/pci.h
@@ -17,8 +17,7 @@
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
#endif
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index 62137d13c6f9..1053bca1f8aa 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
return 0;
}
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long phys;
-
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- phys = vma->vm_pgoff;
-
- /*
- * Mark this as IO
- */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (remap_pfn_range(vma, vma->vm_start, phys,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 2b01421f7d0f..5b882cc0c0e9 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -25,7 +25,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
-
+#include <linux/hyperv.h>
#ifdef CONFIG_HYPERV_TSCPAGE
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e7e1942edff7..8b4140f6724f 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -5,93 +5,8 @@
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
-/*
- * The set_memory_* API can be used to change various attributes of a virtual
- * address range. The attributes include:
- * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
- * Executability : eXeutable, NoteXecutable
- * Read/Write : ReadOnly, ReadWrite
- * Presence : NotPresent
- *
- * Within a category, the attributes are mutually exclusive.
- *
- * The implementation of this API will take care of various aspects that
- * are associated with changing such attributes, such as:
- * - Flushing TLBs
- * - Flushing CPU caches
- * - Making sure aliases of the memory behind the mapping don't violate
- * coherency rules as defined by the CPU in the system.
- *
- * What this API does not do:
- * - Provide exclusion between various callers - including callers that
- * operation on other mappings of the same physical page
- * - Restore default attributes when a page is freed
- * - Guarantee that mappings other than the requested one are
- * in any state, other than that these do not violate rules for
- * the CPU you have. Do not depend on any effects on other mappings,
- * CPUs other than the one you have may have more relaxed rules.
- * The caller is required to take care of these.
- */
-
-int _set_memory_uc(unsigned long addr, int numpages);
-int _set_memory_wc(unsigned long addr, int numpages);
-int _set_memory_wt(unsigned long addr, int numpages);
-int _set_memory_wb(unsigned long addr, int numpages);
-int set_memory_uc(unsigned long addr, int numpages);
-int set_memory_wc(unsigned long addr, int numpages);
-int set_memory_wt(unsigned long addr, int numpages);
-int set_memory_wb(unsigned long addr, int numpages);
-int set_memory_x(unsigned long addr, int numpages);
-int set_memory_nx(unsigned long addr, int numpages);
-int set_memory_ro(unsigned long addr, int numpages);
-int set_memory_rw(unsigned long addr, int numpages);
-int set_memory_np(unsigned long addr, int numpages);
-int set_memory_4k(unsigned long addr, int numpages);
-
-int set_memory_array_uc(unsigned long *addr, int addrinarray);
-int set_memory_array_wc(unsigned long *addr, int addrinarray);
-int set_memory_array_wt(unsigned long *addr, int addrinarray);
-int set_memory_array_wb(unsigned long *addr, int addrinarray);
-
-int set_pages_array_uc(struct page **pages, int addrinarray);
-int set_pages_array_wc(struct page **pages, int addrinarray);
-int set_pages_array_wt(struct page **pages, int addrinarray);
-int set_pages_array_wb(struct page **pages, int addrinarray);
-
-/*
- * For legacy compatibility with the old APIs, a few functions
- * are provided that work on a "struct page".
- * These functions operate ONLY on the 1:1 kernel mapping of the
- * memory that the struct page represents, and internally just
- * call the set_memory_* function. See the description of the
- * set_memory_* function for more details on conventions.
- *
- * These APIs should be considered *deprecated* and are likely going to
- * be removed in the future.
- * The reason for this is the implicit operation on the 1:1 mapping only,
- * making this not a generally useful API.
- *
- * Specifically, many users of the old APIs had a virtual address,
- * called virt_to_page() or vmalloc_to_page() on that address to
- * get a struct page* that the old API required.
- * To convert these cases, use set_memory_*() on the original
- * virtual address, do not use these functions.
- */
-
-int set_pages_uc(struct page *page, int numpages);
-int set_pages_wb(struct page *page, int numpages);
-int set_pages_x(struct page *page, int numpages);
-int set_pages_nx(struct page *page, int numpages);
-int set_pages_ro(struct page *page, int numpages);
-int set_pages_rw(struct page *page, int numpages);
-
-
void clflush_cache_range(void *addr, unsigned int size);
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
-extern int kernel_set_to_readonly;
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-
#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 67942b6ad4b7..21126155a739 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -35,9 +35,6 @@ struct hypervisor_x86 {
/* Detection routine */
uint32_t (*detect)(void);
- /* Adjust CPU feature bits (run once per CPU) */
- void (*set_cpu_features)(struct cpuinfo_x86 *);
-
/* Platform setup (run once per boot) */
void (*init_platform)(void);
@@ -53,15 +50,14 @@ extern const struct hypervisor_x86 *x86_hyper;
/* Recognized hypervisors */
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
-extern const struct hypervisor_x86 x86_hyper_xen;
+extern const struct hypervisor_x86 x86_hyper_xen_pv;
+extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
-extern void init_hypervisor(struct cpuinfo_x86 *c);
extern void init_hypervisor_platform(void);
extern bool hypervisor_x2apic_available(void);
extern void hypervisor_pin_vcpu(int cpu);
#else
-static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
static inline void init_hypervisor_platform(void) { }
static inline bool hypervisor_x2apic_available(void) { return false; }
#endif /* CONFIG_HYPERVISOR_GUEST */
diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h
index 4291b6a5ddf7..fac89eb78a6b 100644
--- a/arch/x86/include/asm/intel_pmc_ipc.h
+++ b/arch/x86/include/asm/intel_pmc_ipc.h
@@ -23,6 +23,11 @@
#define IPC_ERR_EMSECURITY 6
#define IPC_ERR_UNSIGNEDKERNEL 7
+/* GCR reg offsets from gcr base*/
+#define PMC_GCR_PMC_CFG_REG 0x08
+#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78
+#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80
+
#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
int intel_pmc_ipc_simple_command(int cmd, int sub);
@@ -31,6 +36,9 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
u32 *out, u32 outlen);
int intel_pmc_s0ix_counter_read(u64 *data);
+int intel_pmc_gcr_read(u32 offset, u32 *data);
+int intel_pmc_gcr_write(u32 offset, u32 data);
+int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val);
#else
@@ -56,6 +64,21 @@ static inline int intel_pmc_s0ix_counter_read(u64 *data)
return -EINVAL;
}
+static inline int intel_pmc_gcr_read(u32 offset, u32 *data)
+{
+ return -EINVAL;
+}
+
+static inline int intel_pmc_gcr_write(u32 offset, u32 data)
+{
+ return -EINVAL;
+}
+
+static inline int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+{
+ return -EINVAL;
+}
+
#endif /*CONFIG_INTEL_PMC_IPC*/
#endif
diff --git a/arch/x86/include/asm/intel_scu_ipc.h b/arch/x86/include/asm/intel_scu_ipc.h
index 4fb1d0abef95..81d3d8776fd9 100644
--- a/arch/x86/include/asm/intel_scu_ipc.h
+++ b/arch/x86/include/asm/intel_scu_ipc.h
@@ -3,6 +3,9 @@
#include <linux/notifier.h>
+#define IPCMSG_INDIRECT_READ 0x02
+#define IPCMSG_INDIRECT_WRITE 0x05
+
#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
#define IPCMSG_WARM_RESET 0xF0
@@ -45,7 +48,10 @@ int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
/* Issue commands to the SCU with or without data */
int intel_scu_ipc_simple_command(int cmd, int sub);
int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
- u32 *out, int outlen);
+ u32 *out, int outlen);
+int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
+ u32 *out, int outlen, u32 dptr, u32 sptr);
+
/* I2C control api */
int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 3e8c287090e4..055962615779 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -221,6 +221,9 @@ struct x86_emulate_ops {
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+
+ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
+ void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
- int emul_flags;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74ef58c8ff53..f5bddf92faba 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,8 +43,6 @@
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
-#define KVM_PIO_PAGE_OFFSET 1
-#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
@@ -63,10 +61,10 @@
#define KVM_REQ_PMI 19
#define KVM_REQ_SMI 20
#define KVM_REQ_MASTERCLOCK_UPDATE 21
-#define KVM_REQ_MCLOCK_INPROGRESS 22
-#define KVM_REQ_SCAN_IOAPIC 23
+#define KVM_REQ_MCLOCK_INPROGRESS (22 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_SCAN_IOAPIC (23 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
-#define KVM_REQ_APIC_PAGE_RELOAD 25
+#define KVM_REQ_APIC_PAGE_RELOAD (25 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH 26
#define KVM_REQ_IOAPIC_EOI_EXIT 27
#define KVM_REQ_HV_RESET 28
@@ -343,9 +341,10 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
- int root_level;
- int shadow_root_level;
union kvm_mmu_page_role base_role;
+ u8 root_level;
+ u8 shadow_root_level;
+ u8 ept_ad;
bool direct_map;
/*
@@ -612,6 +611,8 @@ struct kvm_vcpu_arch {
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
unsigned long guest_debug_dr7;
+ u64 msr_platform_info;
+ u64 msr_misc_features_enables;
u64 mcg_cap;
u64 mcg_status;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 1411dbed5e5e..f513cc231151 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
+#include <asm/pat.h>
#include <asm/x86_init.h>
#ifdef __KERNEL__
@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine);
-
+#define arch_can_pci_mmap_wc() pat_enabled()
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#ifdef CONFIG_PCI
extern void early_quirks(void);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 529bb4a6487a..d5a22bac9988 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -44,11 +44,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
-{
- return memcpy_mcsafe(dst, src, n);
-}
-
/**
* arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
new file mode 100644
index 000000000000..eaec6c364e42
--- /dev/null
+++ b/arch/x86/include/asm/set_memory.h
@@ -0,0 +1,87 @@
+#ifndef _ASM_X86_SET_MEMORY_H
+#define _ASM_X86_SET_MEMORY_H
+
+#include <asm/page.h>
+#include <asm-generic/set_memory.h>
+
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write : ReadOnly, ReadWrite
+ * Presence : NotPresent
+ *
+ * Within a category, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ * coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ * operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ * in any state, other than that these do not violate rules for
+ * the CPU you have. Do not depend on any effects on other mappings,
+ * CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int _set_memory_uc(unsigned long addr, int numpages);
+int _set_memory_wc(unsigned long addr, int numpages);
+int _set_memory_wt(unsigned long addr, int numpages);
+int _set_memory_wb(unsigned long addr, int numpages);
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wt(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wt(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wt(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+
+/*
+ * For legacy compatibility with the old APIs, a few functions
+ * are provided that work on a "struct page".
+ * These functions operate ONLY on the 1:1 kernel mapping of the
+ * memory that the struct page represents, and internally just
+ * call the set_memory_* function. See the description of the
+ * set_memory_* function for more details on conventions.
+ *
+ * These APIs should be considered *deprecated* and are likely going to
+ * be removed in the future.
+ * The reason for this is the implicit operation on the 1:1 mapping only,
+ * making this not a generally useful API.
+ *
+ * Specifically, many users of the old APIs had a virtual address,
+ * called virt_to_page() or vmalloc_to_page() on that address to
+ * get a struct page* that the old API required.
+ * To convert these cases, use set_memory_*() on the original
+ * virtual address, do not use these functions.
+ */
+
+int set_pages_uc(struct page *page, int numpages);
+int set_pages_wb(struct page *page, int numpages);
+int set_pages_x(struct page *page, int numpages);
+int set_pages_nx(struct page *page, int numpages);
+int set_pages_ro(struct page *page, int numpages);
+int set_pages_rw(struct page *page, int numpages);
+
+extern int kernel_set_to_readonly;
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+
+#endif /* _ASM_X86_SET_MEMORY_H */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index a164862d77e3..733bae07fb29 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -79,6 +79,7 @@ int strcmp(const char *cs, const char *ct);
#define memset(s, c, n) __memset(s, c, n)
#endif
+#define __HAVE_ARCH_MEMCPY_MCSAFE 1
__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
DECLARE_STATIC_KEY_FALSE(mcsafe_key);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index cc54b7026567..35cd06f636ab 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -70,8 +70,10 @@
#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define SECONDARY_EXEC_RDRAND 0x00000800
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_RDSEED 0x00010000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
#define SECONDARY_EXEC_XSAVES 0x00100000
#define SECONDARY_EXEC_TSC_SCALING 0x02000000
@@ -516,12 +518,14 @@ struct vmx_msr_entry {
#define EPT_VIOLATION_READABLE_BIT 3
#define EPT_VIOLATION_WRITABLE_BIT 4
#define EPT_VIOLATION_EXECUTABLE_BIT 5
+#define EPT_VIOLATION_GVA_TRANSLATED_BIT 8
#define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT)
#define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT)
#define EPT_VIOLATION_ACC_INSTR (1 << EPT_VIOLATION_ACC_INSTR_BIT)
#define EPT_VIOLATION_READABLE (1 << EPT_VIOLATION_READABLE_BIT)
#define EPT_VIOLATION_WRITABLE (1 << EPT_VIOLATION_WRITABLE_BIT)
#define EPT_VIOLATION_EXECUTABLE (1 << EPT_VIOLATION_EXECUTABLE_BIT)
+#define EPT_VIOLATION_GVA_TRANSLATED (1 << EPT_VIOLATION_GVA_TRANSLATED_BIT)
/*
* VM-instruction error numbers
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 608a79d5a466..e6911caf5bbf 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
/* No need for a barrier -- XCHG is a barrier on x86. */
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+extern int xen_have_vector_callback;
+
+/*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+static inline bool xen_support_evtchn_rebind(void)
+{
+ return (!xen_hvm_domain() || xen_have_vector_callback);
+}
+
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8a5a02b1dfba..8417ef7c3885 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -52,12 +52,30 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
+#ifdef CONFIG_XEN_PV
extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
+#else
+static inline int
+set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
+{
+ return 0;
+}
+
+static inline int
+clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
+ struct page **pages, unsigned int count)
+{
+ return 0;
+}
+#endif
/*
* Helper functions to write or read unsigned long values to/from
@@ -73,6 +91,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
return __get_user(*val, (unsigned long __user *)addr);
}
+#ifdef CONFIG_XEN_PV
/*
* When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
* - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
@@ -99,6 +118,12 @@ static inline unsigned long __pfn_to_mfn(unsigned long pfn)
return mfn;
}
+#else
+static inline unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ return pfn;
+}
+#endif
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 3a20ccf787b8..432df4b1baec 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -124,7 +124,7 @@
* Recommend using hypercall for address space switches rather
* than MOV to CR3 instruction
*/
-#define HV_X64_MWAIT_RECOMMENDED (1 << 0)
+#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0)
/* Recommend using hypercall for local TLB flushes rather
* than INVLPG or MOV to CR3 instructions */
#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
@@ -148,6 +148,11 @@
#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
/*
+ * Virtual APIC support
+ */
+#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
+
+/*
* Crash notification flag.
*/
#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 739c0c594022..c2824d02ba37 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -9,6 +9,9 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define KVM_PIO_PAGE_OFFSET 1
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+
#define DE_VECTOR 0
#define DB_VECTOR 1
#define BP_VECTOR 3
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 14458658e988..690a2dcf4078 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -76,7 +76,11 @@
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
+#define EXIT_REASON_RDRAND 57
#define EXIT_REASON_INVPCID 58
+#define EXIT_REASON_VMFUNC 59
+#define EXIT_REASON_ENCLS 60
+#define EXIT_REASON_RDSEED 61
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
@@ -90,6 +94,7 @@
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
{ EXIT_REASON_HLT, "HLT" }, \
+ { EXIT_REASON_INVD, "INVD" }, \
{ EXIT_REASON_INVLPG, "INVLPG" }, \
{ EXIT_REASON_RDPMC, "RDPMC" }, \
{ EXIT_REASON_RDTSC, "RDTSC" }, \
@@ -108,6 +113,8 @@
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
+ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
{ EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
@@ -115,20 +122,24 @@
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
- { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
- { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
+ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
+ { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \
+ { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_INVEPT, "INVEPT" }, \
+ { EXIT_REASON_RDTSCP, "RDTSCP" }, \
{ EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
+ { EXIT_REASON_INVVPID, "INVVPID" }, \
{ EXIT_REASON_WBINVD, "WBINVD" }, \
+ { EXIT_REASON_XSETBV, "XSETBV" }, \
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
- { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
- { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
- { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
- { EXIT_REASON_INVD, "INVD" }, \
- { EXIT_REASON_INVVPID, "INVVPID" }, \
+ { EXIT_REASON_RDRAND, "RDRAND" }, \
{ EXIT_REASON_INVPCID, "INVPCID" }, \
+ { EXIT_REASON_VMFUNC, "VMFUNC" }, \
+ { EXIT_REASON_ENCLS, "ENCLS" }, \
+ { EXIT_REASON_RDSEED, "RDSEED" }, \
+ { EXIT_REASON_PML_FULL, "PML_FULL" }, \
{ EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" }
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index df083efe6ee0..815dd63f49d0 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -36,7 +36,7 @@
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/amd_nb.h>
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c36140d788fe..ee8f11800295 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -16,7 +16,7 @@
#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
-# include <asm/cacheflush.h>
+# include <asm/set_memory.h>
#endif
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a44ef52184df..0af86d9242da 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,7 +17,7 @@
#include <asm/paravirt.h>
#include <asm/alternative.h>
#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
void __init check_bugs(void)
{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8ee32119144d..c8b39870f33e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1149,7 +1149,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
detect_ht(c);
#endif
- init_hypervisor(c);
x86_init_rdrand(c);
x86_init_cache_qos(c);
setup_pku(c);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 35691a6b0d32..4fa90006ac68 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -28,8 +28,11 @@
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
-#ifdef CONFIG_XEN
- &x86_hyper_xen,
+#ifdef CONFIG_XEN_PV
+ &x86_hyper_xen_pv,
+#endif
+#ifdef CONFIG_XEN_PVHVM
+ &x86_hyper_xen_hvm,
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
@@ -60,12 +63,6 @@ detect_hypervisor_vendor(void)
pr_info("Hypervisor detected: %s\n", x86_hyper->name);
}
-void init_hypervisor(struct cpuinfo_x86 *c)
-{
- if (x86_hyper && x86_hyper->set_cpu_features)
- x86_hyper->set_cpu_features(c);
-}
-
void __init init_hypervisor_platform(void)
{
@@ -74,8 +71,6 @@ void __init init_hypervisor_platform(void)
if (!x86_hyper)
return;
- init_hypervisor(&boot_cpu_data);
-
if (x86_hyper->init_platform)
x86_hyper->init_platform();
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index b5375b9497b3..04cb8d34ccb8 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -49,6 +49,9 @@ void hyperv_vector_handler(struct pt_regs *regs)
if (vmbus_handler)
vmbus_handler();
+ if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ ack_APIC_irq();
+
exiting_irq();
set_irq_regs(old_regs);
}
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 22403a28caf5..40ed26852ebd 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -113,6 +113,24 @@ static void __init vmware_paravirt_ops_setup(void)
#define vmware_paravirt_ops_setup() do {} while (0)
#endif
+/*
+ * VMware hypervisor takes care of exporting a reliable TSC to the guest.
+ * Still, due to timing difference when running on virtual cpus, the TSC can
+ * be marked as unstable in some cases. For example, the TSC sync check at
+ * bootup can fail due to a marginal offset between vcpus' TSCs (though the
+ * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
+ * is not suitable as a watchdog when running on a hypervisor because the
+ * kernel may miss a wrap of the counter if the vcpu is descheduled for a
+ * long time. To skip these checks at runtime we set these capability bits,
+ * so that the kernel could just trust the hypervisor with providing a
+ * reliable virtual TSC that is suitable for timekeeping.
+ */
+static void __init vmware_set_capabilities(void)
+{
+ setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+}
+
static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -152,6 +170,8 @@ static void __init vmware_platform_setup(void)
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
+
+ vmware_set_capabilities();
}
/*
@@ -176,24 +196,6 @@ static uint32_t __init vmware_platform(void)
return 0;
}
-/*
- * VMware hypervisor takes care of exporting a reliable TSC to the guest.
- * Still, due to timing difference when running on virtual cpus, the TSC can
- * be marked as unstable in some cases. For example, the TSC sync check at
- * bootup can fail due to a marginal offset between vcpus' TSCs (though the
- * TSCs do not drift from each other). Also, the ACPI PM timer clocksource
- * is not suitable as a watchdog when running on a hypervisor because the
- * kernel may miss a wrap of the counter if the vcpu is descheduled for a
- * long time. To skip these checks at runtime we set these capability bits,
- * so that the kernel could just trust the hypervisor with providing a
- * reliable virtual TSC that is suitable for timekeeping.
- */
-static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
-{
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
-}
-
/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
static bool __init vmware_legacy_x2apic_available(void)
{
@@ -206,7 +208,6 @@ static bool __init vmware_legacy_x2apic_available(void)
const __refconst struct hypervisor_x86 x86_hyper_vmware = {
.name = "VMware",
.detect = vmware_platform,
- .set_cpu_features = vmware_set_cpu_features,
.init_platform = vmware_platform_setup,
.x2apic_available = vmware_legacy_x2apic_available,
};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8ee76dce9140..0651e974dcb3 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -24,7 +24,7 @@
#include <trace/syscall.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 19e1f2a6d7b0..5b2bbfbb3712 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -61,6 +61,7 @@
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/debugreg.h>
+#include <asm/set_memory.h>
#include "common.h"
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 9aadff3d0902..901c640d152f 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -37,6 +37,7 @@
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/debugreg.h>
+#include <asm/set_memory.h>
#include "common.h"
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 14f65a5f938e..da5c09789984 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -396,9 +396,9 @@ static u64 kvm_steal_clock(int cpu)
src = &per_cpu(steal_time, cpu);
do {
version = src->version;
- rmb();
+ virt_rmb();
steal = src->steal;
- rmb();
+ virt_rmb();
} while ((version & 1) || (version != src->version));
return steal;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 5f43cec296c5..8c53c5d7a1bc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,7 +23,7 @@
#include <asm/io_apic.h>
#include <asm/cpufeature.h>
#include <asm/desc.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/debugreg.h>
static void set_idt(void *newidt, __u16 limit)
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 085c3b300d32..ce640428d6fe 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -27,6 +27,7 @@
#include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h>
#include <asm/setup.h>
+#include <asm/set_memory.h>
#ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = {
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 477ae806c2fa..f67bd3205df7 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -85,7 +85,7 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+ MODULES_END, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 825a1e47cf3e..b6840bf3940b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -446,7 +446,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
-#ifdef CONFIG_XEN
+#ifdef CONFIG_XEN_PV
/*
* On Xen PV, IOPL bits in pt_regs->flags have no effect, and
* current_pt_regs()->flags may not match the current task's
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index d4c8011a2293..4b1724059909 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,6 +514,9 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
+ if (!intel_iommu_tboot_noforce)
+ return 1;
+
if (no_iommu || swiotlb || dmar_disabled)
pr_warning("Forcing Intel-IOMMU to enabled\n");
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ab8e32f7b9a8..760433b2574a 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -86,18 +86,6 @@ config KVM_MMU_AUDIT
This option adds a R/W kVM module parameter 'mmu_audit', which allows
auditing of KVM MMU events at runtime.
-config KVM_DEVICE_ASSIGNMENT
- bool "KVM legacy PCI device assignment support (DEPRECATED)"
- depends on KVM && PCI && IOMMU_API
- default n
- ---help---
- Provide support for legacy PCI device assignment through KVM. The
- kernel now also supports a full featured userspace device driver
- framework through VFIO, which supersedes this support and provides
- better security.
-
- If unsure, say N.
-
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/vhost/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 3bff20710471..09d4b17be022 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -15,8 +15,6 @@ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o page_track.o debugfs.o
-kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
-
kvm-intel-y += vmx.o pmu_intel.o
kvm-amd-y += svm.o pmu_amd.o
diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
deleted file mode 100644
index 308b8597c691..000000000000
--- a/arch/x86/kvm/assigned-dev.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Kernel-based Virtual Machine - device assignment support
- *
- * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/namei.h>
-#include <linux/fs.h>
-#include "irq.h"
-#include "assigned-dev.h"
-#include "trace/events/kvm.h"
-
-struct kvm_assigned_dev_kernel {
- struct kvm_irq_ack_notifier ack_notifier;
- struct list_head list;
- int assigned_dev_id;
- int host_segnr;
- int host_busnr;
- int host_devfn;
- unsigned int entries_nr;
- int host_irq;
- bool host_irq_disabled;
- bool pci_2_3;
- struct msix_entry *host_msix_entries;
- int guest_irq;
- struct msix_entry *guest_msix_entries;
- unsigned long irq_requested_type;
- int irq_source_id;
- int flags;
- struct pci_dev *dev;
- struct kvm *kvm;
- spinlock_t intx_lock;
- spinlock_t intx_mask_lock;
- char irq_name[32];
- struct pci_saved_state *pci_saved_state;
-};
-
-static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
- int assigned_dev_id)
-{
- struct kvm_assigned_dev_kernel *match;
-
- list_for_each_entry(match, head, list) {
- if (match->assigned_dev_id == assigned_dev_id)
- return match;
- }
- return NULL;
-}
-
-static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
- *assigned_dev, int irq)
-{
- int i, index;
- struct msix_entry *host_msix_entries;
-
- host_msix_entries = assigned_dev->host_msix_entries;
-
- index = -1;
- for (i = 0; i < assigned_dev->entries_nr; i++)
- if (irq == host_msix_entries[i].vector) {
- index = i;
- break;
- }
- if (index < 0)
- printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
-
- return index;
-}
-
-static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int ret;
-
- spin_lock(&assigned_dev->intx_lock);
- if (pci_check_and_mask_intx(assigned_dev->dev)) {
- assigned_dev->host_irq_disabled = true;
- ret = IRQ_WAKE_THREAD;
- } else
- ret = IRQ_NONE;
- spin_unlock(&assigned_dev->intx_lock);
-
- return ret;
-}
-
-static void
-kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
- int vector)
-{
- if (unlikely(assigned_dev->irq_requested_type &
- KVM_DEV_IRQ_GUEST_INTX)) {
- spin_lock(&assigned_dev->intx_mask_lock);
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
- kvm_set_irq(assigned_dev->kvm,
- assigned_dev->irq_source_id, vector, 1,
- false);
- spin_unlock(&assigned_dev->intx_mask_lock);
- } else
- kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
- vector, 1, false);
-}
-
-static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
-
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- spin_lock_irq(&assigned_dev->intx_lock);
- disable_irq_nosync(irq);
- assigned_dev->host_irq_disabled = true;
- spin_unlock_irq(&assigned_dev->intx_lock);
- }
-
- kvm_assigned_dev_raise_guest_irq(assigned_dev,
- assigned_dev->guest_irq);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Deliver an IRQ in an atomic context if we can, or return a failure,
- * user can retry in a process context.
- * Return value:
- * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
- * Other values - No need to retry.
- */
-static int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq,
- int level)
-{
- struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
- struct kvm_kernel_irq_routing_entry *e;
- int ret = -EINVAL;
- int idx;
-
- trace_kvm_set_irq(irq, level, irq_source_id);
-
- /*
- * Injection into either PIC or IOAPIC might need to scan all CPUs,
- * which would need to be retried from thread context; when same GSI
- * is connected to both PIC and IOAPIC, we'd have to report a
- * partial failure here.
- * Since there's no easy way to do this, we only support injecting MSI
- * which is limited to 1:1 GSI mapping.
- */
- idx = srcu_read_lock(&kvm->irq_srcu);
- if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
- e = &entries[0];
- ret = kvm_arch_set_irq_inatomic(e, kvm, irq_source_id,
- irq, level);
- }
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
-}
-
-
-static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int ret = kvm_set_irq_inatomic(assigned_dev->kvm,
- assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 1);
- return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
-}
-
-static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
-
- kvm_assigned_dev_raise_guest_irq(assigned_dev,
- assigned_dev->guest_irq);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int index = find_index_from_host_irq(assigned_dev, irq);
- u32 vector;
- int ret = 0;
-
- if (index >= 0) {
- vector = assigned_dev->guest_msix_entries[index].vector;
- ret = kvm_set_irq_inatomic(assigned_dev->kvm,
- assigned_dev->irq_source_id,
- vector, 1);
- }
-
- return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
-}
-
-static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int index = find_index_from_host_irq(assigned_dev, irq);
- u32 vector;
-
- if (index >= 0) {
- vector = assigned_dev->guest_msix_entries[index].vector;
- kvm_assigned_dev_raise_guest_irq(assigned_dev, vector);
- }
-
- return IRQ_HANDLED;
-}
-
-/* Ack the irq line for an assigned device */
-static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
-{
- struct kvm_assigned_dev_kernel *dev =
- container_of(kian, struct kvm_assigned_dev_kernel,
- ack_notifier);
-
- kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
-
- spin_lock(&dev->intx_mask_lock);
-
- if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) {
- bool reassert = false;
-
- spin_lock_irq(&dev->intx_lock);
- /*
- * The guest IRQ may be shared so this ack can come from an
- * IRQ for another guest device.
- */
- if (dev->host_irq_disabled) {
- if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3))
- enable_irq(dev->host_irq);
- else if (!pci_check_and_unmask_intx(dev->dev))
- reassert = true;
- dev->host_irq_disabled = reassert;
- }
- spin_unlock_irq(&dev->intx_lock);
-
- if (reassert)
- kvm_set_irq(dev->kvm, dev->irq_source_id,
- dev->guest_irq, 1, false);
- }
-
- spin_unlock(&dev->intx_mask_lock);
-}
-
-static void deassign_guest_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- if (assigned_dev->ack_notifier.gsi != -1)
- kvm_unregister_irq_ack_notifier(kvm,
- &assigned_dev->ack_notifier);
-
- kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 0, false);
-
- if (assigned_dev->irq_source_id != -1)
- kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
- assigned_dev->irq_source_id = -1;
- assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
-}
-
-/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
-static void deassign_host_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- /*
- * We disable irq here to prevent further events.
- *
- * Notice this maybe result in nested disable if the interrupt type is
- * INTx, but it's OK for we are going to free it.
- *
- * If this function is a part of VM destroy, please ensure that till
- * now, the kvm state is still legal for probably we also have to wait
- * on a currently running IRQ handler.
- */
- if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
- int i;
- for (i = 0; i < assigned_dev->entries_nr; i++)
- disable_irq(assigned_dev->host_msix_entries[i].vector);
-
- for (i = 0; i < assigned_dev->entries_nr; i++)
- free_irq(assigned_dev->host_msix_entries[i].vector,
- assigned_dev);
-
- assigned_dev->entries_nr = 0;
- kfree(assigned_dev->host_msix_entries);
- kfree(assigned_dev->guest_msix_entries);
- pci_disable_msix(assigned_dev->dev);
- } else {
- /* Deal with MSI and INTx */
- if ((assigned_dev->irq_requested_type &
- KVM_DEV_IRQ_HOST_INTX) &&
- (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- spin_lock_irq(&assigned_dev->intx_lock);
- pci_intx(assigned_dev->dev, false);
- spin_unlock_irq(&assigned_dev->intx_lock);
- synchronize_irq(assigned_dev->host_irq);
- } else
- disable_irq(assigned_dev->host_irq);
-
- free_irq(assigned_dev->host_irq, assigned_dev);
-
- if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
- pci_disable_msi(assigned_dev->dev);
- }
-
- assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
-}
-
-static int kvm_deassign_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev,
- unsigned long irq_requested_type)
-{
- unsigned long guest_irq_type, host_irq_type;
-
- if (!irqchip_in_kernel(kvm))
- return -EINVAL;
- /* no irq assignment to deassign */
- if (!assigned_dev->irq_requested_type)
- return -ENXIO;
-
- host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
- guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
-
- if (host_irq_type)
- deassign_host_irq(kvm, assigned_dev);
- if (guest_irq_type)
- deassign_guest_irq(kvm, assigned_dev);
-
- return 0;
-}
-
-static void kvm_free_assigned_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
-}
-
-static void kvm_free_assigned_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel
- *assigned_dev)
-{
- kvm_free_assigned_irq(kvm, assigned_dev);
-
- pci_reset_function(assigned_dev->dev);
- if (pci_load_and_free_saved_state(assigned_dev->dev,
- &assigned_dev->pci_saved_state))
- printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&assigned_dev->dev->dev));
- else
- pci_restore_state(assigned_dev->dev);
-
- pci_clear_dev_assigned(assigned_dev->dev);
-
- pci_release_regions(assigned_dev->dev);
- pci_disable_device(assigned_dev->dev);
- pci_dev_put(assigned_dev->dev);
-
- list_del(&assigned_dev->list);
- kfree(assigned_dev);
-}
-
-void kvm_free_all_assigned_devices(struct kvm *kvm)
-{
- struct kvm_assigned_dev_kernel *assigned_dev, *tmp;
-
- list_for_each_entry_safe(assigned_dev, tmp,
- &kvm->arch.assigned_dev_head, list) {
- kvm_free_assigned_device(kvm, assigned_dev);
- }
-}
-
-static int assigned_device_enable_host_intx(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- irq_handler_t irq_handler;
- unsigned long flags;
-
- dev->host_irq = dev->dev->irq;
-
- /*
- * We can only share the IRQ line with other host devices if we are
- * able to disable the IRQ source at device-level - independently of
- * the guest driver. Otherwise host devices may suffer from unbounded
- * IRQ latencies when the guest keeps the line asserted.
- */
- if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
- irq_handler = kvm_assigned_dev_intx;
- flags = IRQF_SHARED;
- } else {
- irq_handler = NULL;
- flags = IRQF_ONESHOT;
- }
- if (request_threaded_irq(dev->host_irq, irq_handler,
- kvm_assigned_dev_thread_intx, flags,
- dev->irq_name, dev))
- return -EIO;
-
- if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
- spin_lock_irq(&dev->intx_lock);
- pci_intx(dev->dev, true);
- spin_unlock_irq(&dev->intx_lock);
- }
- return 0;
-}
-
-static int assigned_device_enable_host_msi(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- int r;
-
- if (!dev->dev->msi_enabled) {
- r = pci_enable_msi(dev->dev);
- if (r)
- return r;
- }
-
- dev->host_irq = dev->dev->irq;
- if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
- kvm_assigned_dev_thread_msi, 0,
- dev->irq_name, dev)) {
- pci_disable_msi(dev->dev);
- return -EIO;
- }
-
- return 0;
-}
-
-static int assigned_device_enable_host_msix(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- int i, r = -EINVAL;
-
- /* host_msix_entries and guest_msix_entries should have been
- * initialized */
- if (dev->entries_nr == 0)
- return r;
-
- r = pci_enable_msix_exact(dev->dev,
- dev->host_msix_entries, dev->entries_nr);
- if (r)
- return r;
-
- for (i = 0; i < dev->entries_nr; i++) {
- r = request_threaded_irq(dev->host_msix_entries[i].vector,
- kvm_assigned_dev_msix,
- kvm_assigned_dev_thread_msix,
- 0, dev->irq_name, dev);
- if (r)
- goto err;
- }
-
- return 0;
-err:
- for (i -= 1; i >= 0; i--)
- free_irq(dev->host_msix_entries[i].vector, dev);
- pci_disable_msix(dev->dev);
- return r;
-}
-
-static int assigned_device_enable_guest_intx(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = irq->guest_irq;
- return 0;
-}
-
-static int assigned_device_enable_guest_msi(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = -1;
- return 0;
-}
-
-static int assigned_device_enable_guest_msix(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = -1;
- return 0;
-}
-
-static int assign_host_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- __u32 host_irq_type)
-{
- int r = -EEXIST;
-
- if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
- return r;
-
- snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
- pci_name(dev->dev));
-
- switch (host_irq_type) {
- case KVM_DEV_IRQ_HOST_INTX:
- r = assigned_device_enable_host_intx(kvm, dev);
- break;
- case KVM_DEV_IRQ_HOST_MSI:
- r = assigned_device_enable_host_msi(kvm, dev);
- break;
- case KVM_DEV_IRQ_HOST_MSIX:
- r = assigned_device_enable_host_msix(kvm, dev);
- break;
- default:
- r = -EINVAL;
- }
- dev->host_irq_disabled = false;
-
- if (!r)
- dev->irq_requested_type |= host_irq_type;
-
- return r;
-}
-
-static int assign_guest_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq,
- unsigned long guest_irq_type)
-{
- int id;
- int r = -EEXIST;
-
- if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
- return r;
-
- id = kvm_request_irq_source_id(kvm);
- if (id < 0)
- return id;
-
- dev->irq_source_id = id;
-
- switch (guest_irq_type) {
- case KVM_DEV_IRQ_GUEST_INTX:
- r = assigned_device_enable_guest_intx(kvm, dev, irq);
- break;
- case KVM_DEV_IRQ_GUEST_MSI:
- r = assigned_device_enable_guest_msi(kvm, dev, irq);
- break;
- case KVM_DEV_IRQ_GUEST_MSIX:
- r = assigned_device_enable_guest_msix(kvm, dev, irq);
- break;
- default:
- r = -EINVAL;
- }
-
- if (!r) {
- dev->irq_requested_type |= guest_irq_type;
- if (dev->ack_notifier.gsi != -1)
- kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
- } else {
- kvm_free_irq_source_id(kvm, dev->irq_source_id);
- dev->irq_source_id = -1;
- }
-
- return r;
-}
-
-/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
-static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
- struct kvm_assigned_irq *assigned_irq)
-{
- int r = -EINVAL;
- struct kvm_assigned_dev_kernel *match;
- unsigned long host_irq_type, guest_irq_type;
-
- if (!irqchip_in_kernel(kvm))
- return r;
-
- mutex_lock(&kvm->lock);
- r = -ENODEV;
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_irq->assigned_dev_id);
- if (!match)
- goto out;
-
- host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
- guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
-
- r = -EINVAL;
- /* can only assign one type at a time */
- if (hweight_long(host_irq_type) > 1)
- goto out;
- if (hweight_long(guest_irq_type) > 1)
- goto out;
- if (host_irq_type == 0 && guest_irq_type == 0)
- goto out;
-
- r = 0;
- if (host_irq_type)
- r = assign_host_irq(kvm, match, host_irq_type);
- if (r)
- goto out;
-
- if (guest_irq_type)
- r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
- struct kvm_assigned_irq
- *assigned_irq)
-{
- int r = -ENODEV;
- struct kvm_assigned_dev_kernel *match;
- unsigned long irq_type;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_irq->assigned_dev_id);
- if (!match)
- goto out;
-
- irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK |
- KVM_DEV_IRQ_GUEST_MASK);
- r = kvm_deassign_irq(kvm, match, irq_type);
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-/*
- * We want to test whether the caller has been granted permissions to
- * use this device. To be able to configure and control the device,
- * the user needs access to PCI configuration space and BAR resources.
- * These are accessed through PCI sysfs. PCI config space is often
- * passed to the process calling this ioctl via file descriptor, so we
- * can't rely on access to that file. We can check for permissions
- * on each of the BAR resource files, which is a pretty clear
- * indicator that the user has been granted access to the device.
- */
-static int probe_sysfs_permissions(struct pci_dev *dev)
-{
-#ifdef CONFIG_SYSFS
- int i;
- bool bar_found = false;
-
- for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
- char *kpath, *syspath;
- struct path path;
- struct inode *inode;
- int r;
-
- if (!pci_resource_len(dev, i))
- continue;
-
- kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
- if (!kpath)
- return -ENOMEM;
-
- /* Per sysfs-rules, sysfs is always at /sys */
- syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
- kfree(kpath);
- if (!syspath)
- return -ENOMEM;
-
- r = kern_path(syspath, LOOKUP_FOLLOW, &path);
- kfree(syspath);
- if (r)
- return r;
-
- inode = d_backing_inode(path.dentry);
-
- r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
- path_put(&path);
- if (r)
- return r;
-
- bar_found = true;
- }
-
- /* If no resources, probably something special */
- if (!bar_found)
- return -EPERM;
-
- return 0;
-#else
- return -EINVAL; /* No way to control the device without sysfs */
-#endif
-}
-
-static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0, idx;
- struct kvm_assigned_dev_kernel *match;
- struct pci_dev *dev;
-
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
- return -EINVAL;
-
- mutex_lock(&kvm->lock);
- idx = srcu_read_lock(&kvm->srcu);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (match) {
- /* device already assigned */
- r = -EEXIST;
- goto out;
- }
-
- match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
- if (match == NULL) {
- printk(KERN_INFO "%s: Couldn't allocate memory\n",
- __func__);
- r = -ENOMEM;
- goto out;
- }
- dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
- assigned_dev->busnr,
- assigned_dev->devfn);
- if (!dev) {
- printk(KERN_INFO "%s: host device not found\n", __func__);
- r = -EINVAL;
- goto out_free;
- }
-
- /* Don't allow bridges to be assigned */
- if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
- r = -EPERM;
- goto out_put;
- }
-
- r = probe_sysfs_permissions(dev);
- if (r)
- goto out_put;
-
- if (pci_enable_device(dev)) {
- printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
- r = -EBUSY;
- goto out_put;
- }
- r = pci_request_regions(dev, "kvm_assigned_device");
- if (r) {
- printk(KERN_INFO "%s: Could not get access to device regions\n",
- __func__);
- goto out_disable;
- }
-
- pci_reset_function(dev);
- pci_save_state(dev);
- match->pci_saved_state = pci_store_saved_state(dev);
- if (!match->pci_saved_state)
- printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
- __func__, dev_name(&dev->dev));
-
- if (!pci_intx_mask_supported(dev))
- assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3;
-
- match->assigned_dev_id = assigned_dev->assigned_dev_id;
- match->host_segnr = assigned_dev->segnr;
- match->host_busnr = assigned_dev->busnr;
- match->host_devfn = assigned_dev->devfn;
- match->flags = assigned_dev->flags;
- match->dev = dev;
- spin_lock_init(&match->intx_lock);
- spin_lock_init(&match->intx_mask_lock);
- match->irq_source_id = -1;
- match->kvm = kvm;
- match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
-
- list_add(&match->list, &kvm->arch.assigned_dev_head);
-
- if (!kvm->arch.iommu_domain) {
- r = kvm_iommu_map_guest(kvm);
- if (r)
- goto out_list_del;
- }
- r = kvm_assign_device(kvm, match->dev);
- if (r)
- goto out_list_del;
-
-out:
- srcu_read_unlock(&kvm->srcu, idx);
- mutex_unlock(&kvm->lock);
- return r;
-out_list_del:
- if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
- printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&dev->dev));
- list_del(&match->list);
- pci_release_regions(dev);
-out_disable:
- pci_disable_device(dev);
-out_put:
- pci_dev_put(dev);
-out_free:
- kfree(match);
- srcu_read_unlock(&kvm->srcu, idx);
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *match;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (!match) {
- printk(KERN_INFO "%s: device hasn't been assigned before, "
- "so cannot be deassigned\n", __func__);
- r = -EINVAL;
- goto out;
- }
-
- kvm_deassign_device(kvm, match->dev);
-
- kvm_free_assigned_device(kvm, match);
-
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-
-static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
- struct kvm_assigned_msix_nr *entry_nr)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *adev;
-
- mutex_lock(&kvm->lock);
-
- adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- entry_nr->assigned_dev_id);
- if (!adev) {
- r = -EINVAL;
- goto msix_nr_out;
- }
-
- if (adev->entries_nr == 0) {
- adev->entries_nr = entry_nr->entry_nr;
- if (adev->entries_nr == 0 ||
- adev->entries_nr > KVM_MAX_MSIX_PER_DEV) {
- r = -EINVAL;
- goto msix_nr_out;
- }
-
- adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
- entry_nr->entry_nr,
- GFP_KERNEL);
- if (!adev->host_msix_entries) {
- r = -ENOMEM;
- goto msix_nr_out;
- }
- adev->guest_msix_entries =
- kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
- GFP_KERNEL);
- if (!adev->guest_msix_entries) {
- kfree(adev->host_msix_entries);
- r = -ENOMEM;
- goto msix_nr_out;
- }
- } else /* Not allowed set MSI-X number twice */
- r = -EINVAL;
-msix_nr_out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
- struct kvm_assigned_msix_entry *entry)
-{
- int r = 0, i;
- struct kvm_assigned_dev_kernel *adev;
-
- mutex_lock(&kvm->lock);
-
- adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- entry->assigned_dev_id);
-
- if (!adev) {
- r = -EINVAL;
- goto msix_entry_out;
- }
-
- for (i = 0; i < adev->entries_nr; i++)
- if (adev->guest_msix_entries[i].vector == 0 ||
- adev->guest_msix_entries[i].entry == entry->entry) {
- adev->guest_msix_entries[i].entry = entry->entry;
- adev->guest_msix_entries[i].vector = entry->gsi;
- adev->host_msix_entries[i].entry = entry->entry;
- break;
- }
- if (i == adev->entries_nr) {
- r = -ENOSPC;
- goto msix_entry_out;
- }
-
-msix_entry_out:
- mutex_unlock(&kvm->lock);
-
- return r;
-}
-
-static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *match;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (!match) {
- r = -ENODEV;
- goto out;
- }
-
- spin_lock(&match->intx_mask_lock);
-
- match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX;
- match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX;
-
- if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
- if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
- kvm_set_irq(match->kvm, match->irq_source_id,
- match->guest_irq, 0, false);
- /*
- * Masking at hardware-level is performed on demand,
- * i.e. when an IRQ actually arrives at the host.
- */
- } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- /*
- * Unmask the IRQ line if required. Unmasking at
- * device level will be performed by user space.
- */
- spin_lock_irq(&match->intx_lock);
- if (match->host_irq_disabled) {
- enable_irq(match->host_irq);
- match->host_irq_disabled = false;
- }
- spin_unlock_irq(&match->intx_lock);
- }
- }
-
- spin_unlock(&match->intx_mask_lock);
-
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int r;
-
- switch (ioctl) {
- case KVM_ASSIGN_PCI_DEVICE: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_IRQ: {
- r = -EOPNOTSUPP;
- break;
- }
- case KVM_ASSIGN_DEV_IRQ: {
- struct kvm_assigned_irq assigned_irq;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
- goto out;
- r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
- if (r)
- goto out;
- break;
- }
- case KVM_DEASSIGN_DEV_IRQ: {
- struct kvm_assigned_irq assigned_irq;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
- goto out;
- r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
- if (r)
- goto out;
- break;
- }
- case KVM_DEASSIGN_PCI_DEVICE: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_SET_MSIX_NR: {
- struct kvm_assigned_msix_nr entry_nr;
- r = -EFAULT;
- if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
- goto out;
- r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_SET_MSIX_ENTRY: {
- struct kvm_assigned_msix_entry entry;
- r = -EFAULT;
- if (copy_from_user(&entry, argp, sizeof entry))
- goto out;
- r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_SET_INTX_MASK: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev);
- break;
- }
- default:
- r = -ENOTTY;
- break;
- }
-out:
- return r;
-}
diff --git a/arch/x86/kvm/assigned-dev.h b/arch/x86/kvm/assigned-dev.h
deleted file mode 100644
index a428c1a211b2..000000000000
--- a/arch/x86/kvm/assigned-dev.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef ARCH_X86_KVM_ASSIGNED_DEV_H
-#define ARCH_X86_KVM_ASSIGNED_DEV_H
-
-#include <linux/kvm_host.h>
-
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev);
-int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev);
-
-int kvm_iommu_map_guest(struct kvm *kvm);
-int kvm_iommu_unmap_guest(struct kvm *kvm);
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg);
-
-void kvm_free_all_assigned_devices(struct kvm *kvm);
-#else
-static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- return 0;
-}
-
-static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
-#endif /* CONFIG_KVM_DEVICE_ASSIGNMENT */
-
-#endif /* ARCH_X86_KVM_ASSIGNED_DEV_H */
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index efde6cc50875..a181ae76c71c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -876,6 +876,9 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
u32 eax, ebx, ecx, edx;
+ if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
+ return 1;
+
eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 35058c2c0eea..a6fd40aade7c 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -205,4 +205,15 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
return x86_stepping(best->eax);
}
+static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
+}
+
+static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.msr_misc_features_enables &
+ MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
+}
+
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 45c7306c8780..c25cfaf584e7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2547,7 +2547,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
u64 smbase;
int ret;
- if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
@@ -2596,11 +2596,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
}
- if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
- ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
+ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
@@ -3854,6 +3854,13 @@ static int em_sti(struct x86_emulate_ctxt *ctxt)
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
+ u64 msr = 0;
+
+ ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
+ if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
+ ctxt->ops->cpl(ctxt)) {
+ return emulate_gp(ctxt, 0);
+ }
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
@@ -5316,6 +5323,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
+ unsigned emul_flags;
ctxt->mem_read.pos = 0;
@@ -5330,6 +5338,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
+ emul_flags = ctxt->ops->get_hflags(ctxt);
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@@ -5363,7 +5372,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5392,7 +5401,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5446,7 +5455,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 047b17a26269..bdcd4139eca9 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -49,7 +49,7 @@ static void pic_unlock(struct kvm_pic *s)
__releases(&s->lock)
{
bool wakeup = s->wakeup_needed;
- struct kvm_vcpu *vcpu, *found = NULL;
+ struct kvm_vcpu *vcpu;
int i;
s->wakeup_needed = false;
@@ -59,16 +59,11 @@ static void pic_unlock(struct kvm_pic *s)
if (wakeup) {
kvm_for_each_vcpu(i, vcpu, s->kvm) {
if (kvm_apic_accept_pic_intr(vcpu)) {
- found = vcpu;
- break;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ return;
}
}
-
- if (!found)
- return;
-
- kvm_make_request(KVM_REQ_EVENT, found);
- kvm_vcpu_kick(found);
}
}
@@ -239,7 +234,7 @@ static inline void pic_intack(struct kvm_kpic_state *s, int irq)
int kvm_pic_read_irq(struct kvm *kvm)
{
int irq, irq2, intno;
- struct kvm_pic *s = pic_irqchip(kvm);
+ struct kvm_pic *s = kvm->arch.vpic;
s->output = 0;
@@ -273,7 +268,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
return intno;
}
-void kvm_pic_reset(struct kvm_kpic_state *s)
+static void kvm_pic_reset(struct kvm_kpic_state *s)
{
int irq, i;
struct kvm_vcpu *vcpu;
@@ -422,19 +417,16 @@ static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1)
return ret;
}
-static u32 pic_ioport_read(void *opaque, u32 addr1)
+static u32 pic_ioport_read(void *opaque, u32 addr)
{
struct kvm_kpic_state *s = opaque;
- unsigned int addr;
int ret;
- addr = addr1;
- addr &= 1;
if (s->poll) {
- ret = pic_poll_read(s, addr1);
+ ret = pic_poll_read(s, addr);
s->poll = 0;
} else
- if (addr == 0)
+ if ((addr & 1) == 0)
if (s->read_reg_select)
ret = s->isr;
else
@@ -456,76 +448,64 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1)
return s->elcr;
}
-static int picdev_in_range(gpa_t addr)
-{
- switch (addr) {
- case 0x20:
- case 0x21:
- case 0xa0:
- case 0xa1:
- case 0x4d0:
- case 0x4d1:
- return 1;
- default:
- return 0;
- }
-}
-
static int picdev_write(struct kvm_pic *s,
gpa_t addr, int len, const void *val)
{
unsigned char data = *(unsigned char *)val;
- if (!picdev_in_range(addr))
- return -EOPNOTSUPP;
if (len != 1) {
pr_pic_unimpl("non byte write\n");
return 0;
}
- pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
case 0xa0:
case 0xa1:
+ pic_lock(s);
pic_ioport_write(&s->pics[addr >> 7], addr, data);
+ pic_unlock(s);
break;
case 0x4d0:
case 0x4d1:
+ pic_lock(s);
elcr_ioport_write(&s->pics[addr & 1], addr, data);
+ pic_unlock(s);
break;
+ default:
+ return -EOPNOTSUPP;
}
- pic_unlock(s);
return 0;
}
static int picdev_read(struct kvm_pic *s,
gpa_t addr, int len, void *val)
{
- unsigned char data = 0;
- if (!picdev_in_range(addr))
- return -EOPNOTSUPP;
+ unsigned char *data = (unsigned char *)val;
if (len != 1) {
memset(val, 0, len);
pr_pic_unimpl("non byte read\n");
return 0;
}
- pic_lock(s);
switch (addr) {
case 0x20:
case 0x21:
case 0xa0:
case 0xa1:
- data = pic_ioport_read(&s->pics[addr >> 7], addr);
+ pic_lock(s);
+ *data = pic_ioport_read(&s->pics[addr >> 7], addr);
+ pic_unlock(s);
break;
case 0x4d0:
case 0x4d1:
- data = elcr_ioport_read(&s->pics[addr & 1], addr);
+ pic_lock(s);
+ *data = elcr_ioport_read(&s->pics[addr & 1], addr);
+ pic_unlock(s);
break;
+ default:
+ return -EOPNOTSUPP;
}
- *(unsigned char *)val = data;
- pic_unlock(s);
return 0;
}
@@ -576,7 +556,7 @@ static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
*/
static void pic_irq_request(struct kvm *kvm, int level)
{
- struct kvm_pic *s = pic_irqchip(kvm);
+ struct kvm_pic *s = kvm->arch.vpic;
if (!s->output)
s->wakeup_needed = true;
@@ -660,9 +640,11 @@ void kvm_pic_destroy(struct kvm *kvm)
if (!vpic)
return;
+ mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+ mutex_unlock(&kvm->slots_lock);
kvm->arch.vpic = NULL;
kfree(vpic);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 289270a6aecb..bdff437acbcb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -266,11 +266,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
spin_unlock(&ioapic->lock);
}
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-
- if (!ioapic)
+ if (!ioapic_in_kernel(kvm))
return;
kvm_make_scan_ioapic_request(kvm);
}
@@ -315,7 +313,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
&& ioapic->irr & (1 << index))
ioapic_service(ioapic, index, false);
- kvm_vcpu_request_scan_ioapic(ioapic->kvm);
+ kvm_make_scan_ioapic_request(ioapic->kvm);
break;
}
}
@@ -624,10 +622,8 @@ int kvm_ioapic_init(struct kvm *kvm)
if (ret < 0) {
kvm->arch.vioapic = NULL;
kfree(ioapic);
- return ret;
}
- kvm_vcpu_request_scan_ioapic(kvm);
return ret;
}
@@ -639,36 +635,32 @@ void kvm_ioapic_destroy(struct kvm *kvm)
return;
cancel_delayed_work_sync(&ioapic->eoi_inject);
+ mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ mutex_unlock(&kvm->slots_lock);
kvm->arch.vioapic = NULL;
kfree(ioapic);
}
-int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
+void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
- struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
- if (!ioapic)
- return -EINVAL;
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
spin_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
state->irr &= ~ioapic->irr_delivered;
spin_unlock(&ioapic->lock);
- return 0;
}
-int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
+void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
- struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
- if (!ioapic)
- return -EINVAL;
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
spin_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
ioapic->irr = 0;
ioapic->irr_delivered = 0;
- kvm_vcpu_request_scan_ioapic(kvm);
+ kvm_make_scan_ioapic_request(kvm);
kvm_ioapic_inject_all(ioapic, state->irr);
spin_unlock(&ioapic->lock);
- return 0;
}
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 1cc6e54436db..29ce19732ccf 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -105,17 +105,13 @@ do { \
#define ASSERT(x) do { } while (0)
#endif
-static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
-{
- return kvm->arch.vioapic;
-}
-
static inline int ioapic_in_kernel(struct kvm *kvm)
{
- int ret;
+ int mode = kvm->arch.irqchip_mode;
- ret = (ioapic_irqchip(kvm) != NULL);
- return ret;
+ /* Matches smp_wmb() when setting irqchip_mode */
+ smp_rmb();
+ return mode == KVM_IRQCHIP_KERNEL;
}
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
@@ -132,8 +128,8 @@ void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq,
struct dest_map *dest_map);
-int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
+void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
+void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
ulong *ioapic_handled_vectors);
void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
deleted file mode 100644
index b181426f67b4..000000000000
--- a/arch/x86/kvm/iommu.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Copyright IBM Corporation, 2008
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- *
- * Author: Allen M. Kay <allen.m.kay@intel.com>
- * Author: Weidong Han <weidong.han@intel.com>
- * Author: Ben-Ami Yassour <benami@il.ibm.com>
- */
-
-#include <linux/list.h>
-#include <linux/kvm_host.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/stat.h>
-#include <linux/iommu.h>
-#include "assigned-dev.h"
-
-static bool allow_unsafe_assigned_interrupts;
-module_param_named(allow_unsafe_assigned_interrupts,
- allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
- "Enable device assignment on platforms without interrupt remapping support.");
-
-static int kvm_iommu_unmap_memslots(struct kvm *kvm);
-static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages);
-
-static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
- unsigned long npages)
-{
- gfn_t end_gfn;
- kvm_pfn_t pfn;
-
- pfn = gfn_to_pfn_memslot(slot, gfn);
- end_gfn = gfn + npages;
- gfn += 1;
-
- if (is_error_noslot_pfn(pfn))
- return pfn;
-
- while (gfn < end_gfn)
- gfn_to_pfn_memslot(slot, gfn++);
-
- return pfn;
-}
-
-static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
- unsigned long npages)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i)
- kvm_release_pfn_clean(pfn + i);
-}
-
-int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- gfn_t gfn, end_gfn;
- kvm_pfn_t pfn;
- int r = 0;
- struct iommu_domain *domain = kvm->arch.iommu_domain;
- int flags;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- gfn = slot->base_gfn;
- end_gfn = gfn + slot->npages;
-
- flags = IOMMU_READ;
- if (!(slot->flags & KVM_MEM_READONLY))
- flags |= IOMMU_WRITE;
- if (!kvm->arch.iommu_noncoherent)
- flags |= IOMMU_CACHE;
-
-
- while (gfn < end_gfn) {
- unsigned long page_size;
-
- /* Check if already mapped */
- if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
- gfn += 1;
- continue;
- }
-
- /* Get the page size we could use to map */
- page_size = kvm_host_page_size(kvm, gfn);
-
- /* Make sure the page_size does not exceed the memslot */
- while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
- page_size >>= 1;
-
- /* Make sure gfn is aligned to the page size we want to map */
- while ((gfn << PAGE_SHIFT) & (page_size - 1))
- page_size >>= 1;
-
- /* Make sure hva is aligned to the page size we want to map */
- while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
- page_size >>= 1;
-
- /*
- * Pin all pages we are about to map in memory. This is
- * important because we unmap and unpin in 4kb steps later.
- */
- pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
- if (is_error_noslot_pfn(pfn)) {
- gfn += 1;
- continue;
- }
-
- /* Map into IO address space */
- r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
- page_size, flags);
- if (r) {
- printk(KERN_ERR "kvm_iommu_map_address:"
- "iommu failed to map pfn=%llx\n", pfn);
- kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
- goto unmap_pages;
- }
-
- gfn += page_size >> PAGE_SHIFT;
-
- cond_resched();
- }
-
- return 0;
-
-unmap_pages:
- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
- return r;
-}
-
-static int kvm_iommu_map_memslots(struct kvm *kvm)
-{
- int idx, r = 0;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- if (kvm->arch.iommu_noncoherent)
- kvm_arch_register_noncoherent_dma(kvm);
-
- idx = srcu_read_lock(&kvm->srcu);
- slots = kvm_memslots(kvm);
-
- kvm_for_each_memslot(memslot, slots) {
- r = kvm_iommu_map_pages(kvm, memslot);
- if (r)
- break;
- }
- srcu_read_unlock(&kvm->srcu, idx);
-
- return r;
-}
-
-int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
-{
- struct iommu_domain *domain = kvm->arch.iommu_domain;
- int r;
- bool noncoherent;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- if (pdev == NULL)
- return -ENODEV;
-
- r = iommu_attach_device(domain, &pdev->dev);
- if (r) {
- dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
- return r;
- }
-
- noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
-
- /* Check if need to update IOMMU page table for guest memory */
- if (noncoherent != kvm->arch.iommu_noncoherent) {
- kvm_iommu_unmap_memslots(kvm);
- kvm->arch.iommu_noncoherent = noncoherent;
- r = kvm_iommu_map_memslots(kvm);
- if (r)
- goto out_unmap;
- }
-
- kvm_arch_start_assignment(kvm);
- pci_set_dev_assigned(pdev);
-
- dev_info(&pdev->dev, "kvm assign device\n");
-
- return 0;
-out_unmap:
- kvm_iommu_unmap_memslots(kvm);
- return r;
-}
-
-int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
-{
- struct iommu_domain *domain = kvm->arch.iommu_domain;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- if (pdev == NULL)
- return -ENODEV;
-
- iommu_detach_device(domain, &pdev->dev);
-
- pci_clear_dev_assigned(pdev);
- kvm_arch_end_assignment(kvm);
-
- dev_info(&pdev->dev, "kvm deassign device\n");
-
- return 0;
-}
-
-int kvm_iommu_map_guest(struct kvm *kvm)
-{
- int r;
-
- if (!iommu_present(&pci_bus_type)) {
- printk(KERN_ERR "%s: iommu not found\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&kvm->slots_lock);
-
- kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
- if (!kvm->arch.iommu_domain) {
- r = -ENOMEM;
- goto out_unlock;
- }
-
- if (!allow_unsafe_assigned_interrupts &&
- !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
- printk(KERN_WARNING "%s: No interrupt remapping support,"
- " disallowing device assignment."
- " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
- " module option.\n", __func__);
- iommu_domain_free(kvm->arch.iommu_domain);
- kvm->arch.iommu_domain = NULL;
- r = -EPERM;
- goto out_unlock;
- }
-
- r = kvm_iommu_map_memslots(kvm);
- if (r)
- kvm_iommu_unmap_memslots(kvm);
-
-out_unlock:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
-{
- struct iommu_domain *domain;
- gfn_t end_gfn, gfn;
- kvm_pfn_t pfn;
- u64 phys;
-
- domain = kvm->arch.iommu_domain;
- end_gfn = base_gfn + npages;
- gfn = base_gfn;
-
- /* check if iommu exists and in use */
- if (!domain)
- return;
-
- while (gfn < end_gfn) {
- unsigned long unmap_pages;
- size_t size;
-
- /* Get physical address */
- phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
-
- if (!phys) {
- gfn++;
- continue;
- }
-
- pfn = phys >> PAGE_SHIFT;
-
- /* Unmap address from IO address space */
- size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
- unmap_pages = 1ULL << get_order(size);
-
- /* Unpin all pages we just unmapped to not leak any memory */
- kvm_unpin_pages(kvm, pfn, unmap_pages);
-
- gfn += unmap_pages;
-
- cond_resched();
- }
-}
-
-void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
-}
-
-static int kvm_iommu_unmap_memslots(struct kvm *kvm)
-{
- int idx;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- idx = srcu_read_lock(&kvm->srcu);
- slots = kvm_memslots(kvm);
-
- kvm_for_each_memslot(memslot, slots)
- kvm_iommu_unmap_pages(kvm, memslot);
-
- srcu_read_unlock(&kvm->srcu, idx);
-
- if (kvm->arch.iommu_noncoherent)
- kvm_arch_unregister_noncoherent_dma(kvm);
-
- return 0;
-}
-
-int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- struct iommu_domain *domain = kvm->arch.iommu_domain;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- mutex_lock(&kvm->slots_lock);
- kvm_iommu_unmap_memslots(kvm);
- kvm->arch.iommu_domain = NULL;
- kvm->arch.iommu_noncoherent = false;
- mutex_unlock(&kvm->slots_lock);
-
- iommu_domain_free(domain);
- return 0;
-}
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 60d91c9d160c..5c24811e8b0b 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -60,7 +60,7 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
if (irqchip_split(v->kvm))
return pending_userspace_extint(v);
else
- return pic_irqchip(v->kvm)->output;
+ return v->kvm->arch.vpic->output;
} else
return 0;
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 40d5b2cf6061..d5005cc26521 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -78,40 +78,42 @@ void kvm_pic_destroy(struct kvm *kvm);
int kvm_pic_read_irq(struct kvm *kvm);
void kvm_pic_update_irq(struct kvm_pic *s);
-static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
-{
- return kvm->arch.vpic;
-}
-
static inline int pic_in_kernel(struct kvm *kvm)
{
- int ret;
+ int mode = kvm->arch.irqchip_mode;
- ret = (pic_irqchip(kvm) != NULL);
- return ret;
+ /* Matches smp_wmb() when setting irqchip_mode */
+ smp_rmb();
+ return mode == KVM_IRQCHIP_KERNEL;
}
static inline int irqchip_split(struct kvm *kvm)
{
- return kvm->arch.irqchip_mode == KVM_IRQCHIP_SPLIT;
+ int mode = kvm->arch.irqchip_mode;
+
+ /* Matches smp_wmb() when setting irqchip_mode */
+ smp_rmb();
+ return mode == KVM_IRQCHIP_SPLIT;
}
static inline int irqchip_kernel(struct kvm *kvm)
{
- return kvm->arch.irqchip_mode == KVM_IRQCHIP_KERNEL;
+ int mode = kvm->arch.irqchip_mode;
+
+ /* Matches smp_wmb() when setting irqchip_mode */
+ smp_rmb();
+ return mode == KVM_IRQCHIP_KERNEL;
}
static inline int irqchip_in_kernel(struct kvm *kvm)
{
- bool ret = kvm->arch.irqchip_mode != KVM_IRQCHIP_NONE;
+ int mode = kvm->arch.irqchip_mode;
- /* Matches with wmb after initializing kvm->irq_routing. */
+ /* Matches smp_wmb() when setting irqchip_mode */
smp_rmb();
- return ret;
+ return mode != KVM_IRQCHIP_NONE;
}
-void kvm_pic_reset(struct kvm_kpic_state *s);
-
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 6825cd36d13b..3cc3b2d130a0 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -42,7 +42,7 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
- struct kvm_pic *pic = pic_irqchip(kvm);
+ struct kvm_pic *pic = kvm->arch.vpic;
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
}
@@ -232,11 +232,11 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
goto unlock;
}
clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
- if (!ioapic_in_kernel(kvm))
+ if (!irqchip_kernel(kvm))
goto unlock;
kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
- kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
+ kvm_pic_clear_all(kvm->arch.vpic, irq_source_id);
unlock:
mutex_unlock(&kvm->irq_lock);
}
@@ -274,42 +274,42 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm);
+}
+
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
- int r = -EINVAL;
- int delta;
- unsigned max_pin;
-
+ /* We can't check irqchip_in_kernel() here as some callers are
+ * currently inititalizing the irqchip. Other callers should therefore
+ * check kvm_arch_can_set_irq_routing() before calling this function.
+ */
switch (ue->type) {
case KVM_IRQ_ROUTING_IRQCHIP:
- delta = 0;
+ if (irqchip_split(kvm))
+ return -EINVAL;
+ e->irqchip.pin = ue->u.irqchip.pin;
switch (ue->u.irqchip.irqchip) {
case KVM_IRQCHIP_PIC_SLAVE:
- delta = 8;
+ e->irqchip.pin += PIC_NUM_PINS / 2;
/* fall through */
case KVM_IRQCHIP_PIC_MASTER:
- if (!pic_in_kernel(kvm))
- goto out;
-
+ if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
+ return -EINVAL;
e->set = kvm_set_pic_irq;
- max_pin = PIC_NUM_PINS;
break;
case KVM_IRQCHIP_IOAPIC:
- if (!ioapic_in_kernel(kvm))
- goto out;
-
- max_pin = KVM_IOAPIC_NUM_PINS;
+ if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
+ return -EINVAL;
e->set = kvm_set_ioapic_irq;
break;
default:
- goto out;
+ return -EINVAL;
}
e->irqchip.irqchip = ue->u.irqchip.irqchip;
- e->irqchip.pin = ue->u.irqchip.pin + delta;
- if (e->irqchip.pin >= max_pin)
- goto out;
break;
case KVM_IRQ_ROUTING_MSI:
e->set = kvm_set_msi;
@@ -318,7 +318,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
e->msi.data = ue->u.msi.data;
if (kvm_msi_route_invalid(kvm, e))
- goto out;
+ return -EINVAL;
break;
case KVM_IRQ_ROUTING_HV_SINT:
e->set = kvm_hv_set_sint;
@@ -326,12 +326,10 @@ int kvm_set_routing_entry(struct kvm *kvm,
e->hv_sint.sint = ue->u.hv_sint.sint;
break;
default:
- goto out;
+ return -EINVAL;
}
- r = 0;
-out:
- return r;
+ return 0;
}
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bad6a25067bc..c329d2894905 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -177,8 +177,8 @@ static void recalculate_apic_map(struct kvm *kvm)
if (kvm_apic_present(vcpu))
max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
- new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
- sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
+ new = kvzalloc(sizeof(struct kvm_apic_map) +
+ sizeof(struct kvm_lapic *) * ((u64)max_id + 1), GFP_KERNEL);
if (!new)
goto out;
@@ -529,14 +529,16 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
- return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
- sizeof(val));
+
+ return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
+ sizeof(val));
}
static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{
- return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
- sizeof(*val));
+
+ return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
+ sizeof(*val));
}
static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
@@ -2285,8 +2287,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
- sizeof(u32)))
+ if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32)))
return;
apic_set_tpr(vcpu->arch.apic, data & 0xff);
@@ -2338,14 +2340,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
- sizeof(u32));
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
}
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
if (vapic_addr) {
- if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.apic->vapic_cache,
vapic_addr, sizeof(u32)))
return -EINVAL;
@@ -2439,7 +2441,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
- return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data,
+ return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr, sizeof(u8));
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac7810513d0e..558676538fca 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4340,7 +4340,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ bool accessed_dirty)
{
struct kvm_mmu *context = &vcpu->arch.mmu;
@@ -4349,6 +4350,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->nx = true;
+ context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index ddc56e91f2e4..d8ccb32f7308 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -74,7 +74,8 @@ enum {
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
-void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ bool accessed_dirty);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 60168cdd0546..ea67dc876316 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -40,8 +40,8 @@ int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
int i;
for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
- slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
- sizeof(*slot->arch.gfn_track[i]));
+ slot->arch.gfn_track[i] = kvzalloc(npages *
+ sizeof(*slot->arch.gfn_track[i]), GFP_KERNEL);
if (!slot->arch.gfn_track[i])
goto track_free;
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a01105485315..314d2071b337 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -23,13 +23,6 @@
* so the code in this file is compiled twice, once per pte size.
*/
-/*
- * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
- * uses for EPT without A/D paging type.
- */
-extern u64 __pure __using_nonexistent_pte_bit(void)
- __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
-
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
@@ -39,10 +32,9 @@ extern u64 __pure __using_nonexistent_pte_bit(void)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_BITS PT64_LEVEL_BITS
- #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
- #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
+ #define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#define CMPXCHG cmpxchg
@@ -60,10 +52,9 @@ extern u64 __pure __using_nonexistent_pte_bit(void)
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define PT_LEVEL_BITS PT32_LEVEL_BITS
#define PT_MAX_FULL_LEVELS 2
- #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
- #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
+ #define PT_HAVE_ACCESSED_DIRTY(mmu) true
#define CMPXCHG cmpxchg
#elif PTTYPE == PTTYPE_EPT
#define pt_element_t u64
@@ -74,16 +65,18 @@ extern u64 __pure __using_nonexistent_pte_bit(void)
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_BITS PT64_LEVEL_BITS
- #define PT_GUEST_ACCESSED_MASK 0
- #define PT_GUEST_DIRTY_MASK 0
- #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
- #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
+ #define PT_GUEST_DIRTY_SHIFT 9
+ #define PT_GUEST_ACCESSED_SHIFT 8
+ #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
#define CMPXCHG cmpxchg64
#define PT_MAX_FULL_LEVELS 4
#else
#error Invalid PTTYPE value
#endif
+#define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
+#define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
+
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
@@ -111,12 +104,13 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
}
-static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
+static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
+ unsigned gpte)
{
unsigned mask;
/* dirty bit is not supported, so no need to track it */
- if (!PT_GUEST_DIRTY_MASK)
+ if (!PT_HAVE_ACCESSED_DIRTY(mmu))
return;
BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
@@ -171,7 +165,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
goto no_present;
/* if accessed bit is not supported prefetch non accessed gpte */
- if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
+ if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present;
return false;
@@ -217,7 +211,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
int ret;
/* dirty/accessed bits are not supported, so no need to update them */
- if (!PT_GUEST_DIRTY_MASK)
+ if (!PT_HAVE_ACCESSED_DIRTY(mmu))
return 0;
for (level = walker->max_level; level >= walker->level; --level) {
@@ -286,7 +280,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
+ unsigned nested_access;
gpa_t pte_gpa;
+ bool have_ad;
int offset;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
@@ -299,6 +295,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
retry_walk:
walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu);
+ have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
if (walker->level == PT32E_ROOT_LEVEL) {
@@ -312,7 +309,15 @@ retry_walk:
walker->max_level = walker->level;
ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
- accessed_dirty = PT_GUEST_ACCESSED_MASK;
+ accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0;
+
+ /*
+ * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
+ * by the MOV to CR instruction are treated as reads and do not cause the
+ * processor to set the dirty flag in any EPT paging-structure entry.
+ */
+ nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
+
pt_access = pte_access = ACC_ALL;
++walker->level;
@@ -332,7 +337,7 @@ retry_walk:
walker->pte_gpa[walker->level - 1] = pte_gpa;
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
- PFERR_USER_MASK|PFERR_WRITE_MASK,
+ nested_access,
&walker->fault);
/*
@@ -394,7 +399,7 @@ retry_walk:
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
- FNAME(protect_clean_gpte)(&pte_access, pte);
+ FNAME(protect_clean_gpte)(mmu, &pte_access, pte);
else
/*
* On a write fault, fold the dirty bit into accessed_dirty.
@@ -485,7 +490,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
- FNAME(protect_clean_gpte)(&pte_access, gpte);
+ FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn))
@@ -979,7 +984,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(vcpu, gpte);
- FNAME(protect_clean_gpte)(&pte_access, gpte);
+ FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present))
@@ -1025,3 +1030,4 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
#undef PT_GUEST_DIRTY_MASK
#undef PT_GUEST_DIRTY_SHIFT
#undef PT_GUEST_ACCESSED_SHIFT
+#undef PT_HAVE_ACCESSED_DIRTY
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 5f48f62b8dc2..c27ac6923a18 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1196,10 +1196,13 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_CLGI);
set_intercept(svm, INTERCEPT_SKINIT);
set_intercept(svm, INTERCEPT_WBINVD);
- set_intercept(svm, INTERCEPT_MONITOR);
- set_intercept(svm, INTERCEPT_MWAIT);
set_intercept(svm, INTERCEPT_XSETBV);
+ if (!kvm_mwait_in_guest()) {
+ set_intercept(svm, INTERCEPT_MONITOR);
+ set_intercept(svm, INTERCEPT_MWAIT);
+ }
+
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
control->int_ctl = V_INTR_MASKING_MASK;
@@ -5254,6 +5257,12 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
avic_handle_ldr_update(vcpu);
}
+static void svm_setup_mce(struct kvm_vcpu *vcpu)
+{
+ /* [63:9] are reserved. */
+ vcpu->arch.mcg_cap &= 0x1ff;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5365,6 +5374,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
.update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1a471e5f963f..c5fd459c4043 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,9 +84,6 @@ module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
static bool __read_mostly emulate_invalid_guest_state = true;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
-static bool __read_mostly vmm_exclusive = 1;
-module_param(vmm_exclusive, bool, S_IRUGO);
-
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
@@ -615,10 +612,6 @@ struct vcpu_vmx {
int vpid;
bool emulation_required;
- /* Support for vnmi-less CPUs */
- int soft_vnmi_blocked;
- ktime_t entry_time;
- s64 vnmi_blocked_time;
u32 exit_reason;
/* Posted interrupt descriptor */
@@ -914,8 +907,6 @@ static void nested_release_page_clean(struct page *page)
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
-static void kvm_cpu_vmxon(u64 addr);
-static void kvm_cpu_vmxoff(void);
static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -1289,11 +1280,6 @@ static inline bool cpu_has_vmx_invpcid(void)
SECONDARY_EXEC_ENABLE_INVPCID;
}
-static inline bool cpu_has_virtual_nmis(void)
-{
- return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
-}
-
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2238,15 +2224,10 @@ static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
- if (!vmm_exclusive)
- kvm_cpu_vmxon(phys_addr);
- else if (!already_loaded)
- loaded_vmcs_clear(vmx->loaded_vmcs);
-
if (!already_loaded) {
+ loaded_vmcs_clear(vmx->loaded_vmcs);
local_irq_disable();
crash_disable_local_vmclear(cpu);
@@ -2324,11 +2305,6 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vmx_vcpu_pi_put(vcpu);
__vmx_load_host_state(to_vmx(vcpu));
- if (!vmm_exclusive) {
- __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
- vcpu->cpu = -1;
- kvm_cpu_vmxoff();
- }
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
@@ -2752,6 +2728,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_high);
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
+ SECONDARY_EXEC_RDRAND | SECONDARY_EXEC_RDSEED |
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_DESC |
@@ -2766,14 +2743,16 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
- VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
- VMX_EPT_INVEPT_BIT;
+ VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
if (cpu_has_vmx_ept_execute_only())
vmx->nested.nested_vmx_ept_caps |=
VMX_EPT_EXECUTE_ONLY_BIT;
vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
- VMX_EPT_EXTENT_CONTEXT_BIT;
+ VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
+ VMX_EPT_1GB_PAGE_BIT;
+ if (enable_ept_ad_bits)
+ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -3420,6 +3399,7 @@ static __init int vmx_disabled_by_bios(void)
static void kvm_cpu_vmxon(u64 addr)
{
+ cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1);
asm volatile (ASM_VMX_VMXON_RAX
@@ -3462,12 +3442,8 @@ static int hardware_enable(void)
/* enable and lock */
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
}
- cr4_set_bits(X86_CR4_VMXE);
-
- if (vmm_exclusive) {
- kvm_cpu_vmxon(phys_addr);
- ept_sync_global();
- }
+ kvm_cpu_vmxon(phys_addr);
+ ept_sync_global();
return 0;
}
@@ -3491,15 +3467,13 @@ static void kvm_cpu_vmxoff(void)
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
intel_pt_handle_vmx(0);
+ cr4_clear_bits(X86_CR4_VMXE);
}
static void hardware_disable(void)
{
- if (vmm_exclusive) {
- vmclear_local_loaded_vmcss();
- kvm_cpu_vmxoff();
- }
- cr4_clear_bits(X86_CR4_VMXE);
+ vmclear_local_loaded_vmcss();
+ kvm_cpu_vmxoff();
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
@@ -3549,11 +3523,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
- CPU_BASED_MWAIT_EXITING |
- CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING |
CPU_BASED_RDPMC_EXITING;
+ if (!kvm_mwait_in_guest())
+ min |= CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING;
+
opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -3619,9 +3595,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
&_vmexit_control) < 0)
return -EIO;
- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
- opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
- PIN_BASED_VMX_PREEMPTION_TIMER;
+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS;
+ opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
return -EIO;
@@ -4013,11 +3989,12 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
{
- vpid_sync_context(vpid);
if (enable_ept) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
+ } else {
+ vpid_sync_context(vpid);
}
}
@@ -5293,8 +5270,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->rmode.vm86_active = 0;
- vmx->soft_vnmi_blocked = 0;
-
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(vcpu, 0);
@@ -5414,8 +5389,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
- if (!cpu_has_virtual_nmis() ||
- vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+ if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
return;
}
@@ -5456,19 +5430,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!is_guest_mode(vcpu)) {
- if (!cpu_has_virtual_nmis()) {
- /*
- * Tracking the NMI-blocked state in software is built upon
- * finding the next open IRQ window. This, in turn, depends on
- * well-behaving guests: They have to keep IRQs disabled at
- * least as long as the NMI handler runs. Otherwise we may
- * cause NMI nesting, maybe breaking the guest. But as this is
- * highly unlikely, we can live with the residual risk.
- */
- vmx->soft_vnmi_blocked = 1;
- vmx->vnmi_blocked_time = 0;
- }
-
++vcpu->stat.nmi_injections;
vmx->nmi_known_unmasked = false;
}
@@ -5485,8 +5446,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{
- if (!cpu_has_virtual_nmis())
- return to_vmx(vcpu)->soft_vnmi_blocked;
if (to_vmx(vcpu)->nmi_known_unmasked)
return false;
return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@@ -5496,20 +5455,13 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!cpu_has_virtual_nmis()) {
- if (vmx->soft_vnmi_blocked != masked) {
- vmx->soft_vnmi_blocked = masked;
- vmx->vnmi_blocked_time = 0;
- }
- } else {
- vmx->nmi_known_unmasked = !masked;
- if (masked)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- }
+ vmx->nmi_known_unmasked = !masked;
+ if (masked)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
}
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -5517,9 +5469,6 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
- if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
- return 0;
-
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI));
@@ -6240,21 +6189,18 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
unsigned long exit_qualification;
gpa_t gpa;
u32 error_code;
- int gla_validity;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- gla_validity = (exit_qualification >> 7) & 0x3;
- if (gla_validity == 0x2) {
- printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
- printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
- (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
- vmcs_readl(GUEST_LINEAR_ADDRESS));
- printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
- (long unsigned int)exit_qualification);
- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
- vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
- return 0;
+ if (is_guest_mode(vcpu)
+ && !(exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)) {
+ /*
+ * Fix up exit_qualification according to whether guest
+ * page table accesses are reads or writes.
+ */
+ u64 eptp = nested_ept_get_cr3(vcpu);
+ if (!(eptp & VMX_EPT_AD_ENABLE_BIT))
+ exit_qualification &= ~EPT_VIOLATION_ACC_WRITE;
}
/*
@@ -6264,7 +6210,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* AAK134, BY25.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
- cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
@@ -6350,7 +6295,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (intr_window_requested && vmx_interrupt_allowed(vcpu))
return handle_interrupt_window(&vmx->vcpu);
- if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+ if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1;
err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
@@ -7103,34 +7048,24 @@ out_msr_bitmap:
static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
- struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
- /* The Intel VMX Instruction Reference lists a bunch of bits that
- * are prerequisite to running VMXON, most notably cr4.VMXE must be
- * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
- * Otherwise, we should fail with #UD. We test these now:
+ /*
+ * The Intel VMX Instruction Reference lists a bunch of bits that are
+ * prerequisite to running VMXON, most notably cr4.VMXE must be set to
+ * 1 (see vmx_set_cr4() for when we allow the guest to set this).
+ * Otherwise, we should fail with #UD. But most faulting conditions
+ * have already been checked by hardware, prior to the VM-exit for
+ * VMXON. We do test guest cr4.VMXE because processor CR4 always has
+ * that bit set to 1 in non-root mode.
*/
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
- !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
- (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
+ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- if (is_long_mode(vcpu) && !cs.l) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- if (vmx_get_cpl(vcpu)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
return kvm_skip_emulated_instruction(vcpu);
@@ -7157,29 +7092,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Intel's VMX Instruction Reference specifies a common set of prerequisites
* for running VMX instructions (except VMXON, whose prerequisites are
* slightly different). It also specifies what exception to inject otherwise.
+ * Note that many of these exceptions have priority over VM exits, so they
+ * don't have to be checked again here.
*/
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{
- struct kvm_segment cs;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!vmx->nested.vmxon) {
+ if (!to_vmx(vcpu)->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
-
- vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
- (is_long_mode(vcpu) && !cs.l)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 0;
- }
-
- if (vmx_get_cpl(vcpu)) {
- kvm_inject_gp(vcpu, 0);
- return 0;
- }
-
return 1;
}
@@ -7523,7 +7444,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &gva))
return 1;
- /* _system ok, as nested_vmx_check_permission verified cpl=0 */
+ /* _system ok, as hardware has verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
}
@@ -7656,7 +7577,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
- /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
+ /* ok to use *_system, as hardware has verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
@@ -7689,11 +7610,6 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
@@ -7815,7 +7731,6 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
* "blocked by NMI" bit has to be set before next VM entry.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
- cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
@@ -8117,6 +8032,10 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
+ case EXIT_REASON_RDRAND:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND);
+ case EXIT_REASON_RDSEED:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED);
case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
@@ -8490,26 +8409,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return 0;
}
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
- !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
- get_vmcs12(vcpu))))) {
- if (vmx_interrupt_allowed(vcpu)) {
- vmx->soft_vnmi_blocked = 0;
- } else if (vmx->vnmi_blocked_time > 1000000000LL &&
- vcpu->arch.nmi_pending) {
- /*
- * This CPU don't support us in finding the end of an
- * NMI-blocked window if the guest runs with IRQs
- * disabled. So we pull the trigger after 1 s of
- * futile waiting, but inform the user about this.
- */
- printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
- "state on VCPU %d after 1 s timeout\n",
- __func__, vcpu->vcpu_id);
- vmx->soft_vnmi_blocked = 0;
- }
- }
-
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
@@ -8785,37 +8684,33 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
- if (cpu_has_virtual_nmis()) {
- if (vmx->nmi_known_unmasked)
- return;
- /*
- * Can't use vmx->exit_intr_info since we're not sure what
- * the exit reason is.
- */
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
- /*
- * SDM 3: 27.7.1.2 (September 2008)
- * Re-set bit "block by NMI" before VM entry if vmexit caused by
- * a guest IRET fault.
- * SDM 3: 23.2.2 (September 2008)
- * Bit 12 is undefined in any of the following cases:
- * If the VM exit sets the valid bit in the IDT-vectoring
- * information field.
- * If the VM exit is due to a double fault.
- */
- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
- vector != DF_VECTOR && !idtv_info_valid)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmx->nmi_known_unmasked =
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
- & GUEST_INTR_STATE_NMI);
- } else if (unlikely(vmx->soft_vnmi_blocked))
- vmx->vnmi_blocked_time +=
- ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
+ if (vmx->nmi_known_unmasked)
+ return;
+ /*
+ * Can't use vmx->exit_intr_info since we're not sure what
+ * the exit reason is.
+ */
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ /*
+ * SDM 3: 27.7.1.2 (September 2008)
+ * Re-set bit "block by NMI" before VM entry if vmexit caused by
+ * a guest IRET fault.
+ * SDM 3: 23.2.2 (September 2008)
+ * Bit 12 is undefined in any of the following cases:
+ * If the VM exit sets the valid bit in the IDT-vectoring
+ * information field.
+ * If the VM exit is due to a double fault.
+ */
+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+ vector != DF_VECTOR && !idtv_info_valid)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmx->nmi_known_unmasked =
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+ & GUEST_INTR_STATE_NMI);
}
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@@ -8932,10 +8827,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr4;
- /* Record the guest's net vcpu time for enforced NMI injections. */
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
- vmx->entry_time = ktime_get();
-
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
@@ -9143,16 +9034,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_interrupts(vmx);
}
-static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
- if (vmx->loaded_vmcs == &vmx->vmcs01)
+ if (vmx->loaded_vmcs == vmcs)
return;
cpu = get_cpu();
- vmx->loaded_vmcs = &vmx->vmcs01;
+ vmx->loaded_vmcs = vmcs;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
@@ -9170,7 +9061,7 @@ static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
r = vcpu_load(vcpu);
BUG_ON(r);
- vmx_load_vmcs01(vcpu);
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
free_nested(vmx);
vcpu_put(vcpu);
}
@@ -9231,11 +9122,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->loaded_vmcs->shadow_vmcs = NULL;
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
- if (!vmm_exclusive)
- kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
loaded_vmcs_init(vmx->loaded_vmcs);
- if (!vmm_exclusive)
- kvm_cpu_vmxoff();
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
@@ -9495,17 +9382,26 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer;
}
-static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
+ u64 eptp;
+
WARN_ON(mmu_is_nested(vcpu));
+ eptp = nested_ept_get_cr3(vcpu);
+ if ((eptp & VMX_EPT_AD_ENABLE_BIT) && !enable_ept_ad_bits)
+ return 1;
+
+ kvm_mmu_unload(vcpu);
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.nested_vmx_ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT);
+ VMX_EPT_EXECUTE_ONLY_BIT,
+ eptp & VMX_EPT_AD_ENABLE_BIT);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
+ return 0;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
@@ -10279,8 +10175,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
if (nested_cpu_has_ept(vmcs12)) {
- kvm_mmu_unload(vcpu);
- nested_ept_init_mmu_context(vcpu);
+ if (nested_ept_init_mmu_context(vcpu)) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+ return 1;
+ }
} else if (nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
@@ -10353,9 +10251,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
- !vmx_control_verify(vmcs12->secondary_vm_exec_control,
- vmx->nested.nested_vmx_secondary_ctls_low,
- vmx->nested.nested_vmx_secondary_ctls_high) ||
+ (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+ vmx->nested.nested_vmx_secondary_ctls_low,
+ vmx->nested.nested_vmx_secondary_ctls_high)) ||
!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high) ||
@@ -10434,7 +10333,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct loaded_vmcs *vmcs02;
- int cpu;
u32 msr_entry_idx;
u32 exit_qual;
@@ -10447,18 +10345,12 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
- cpu = get_cpu();
- vmx->loaded_vmcs = vmcs02;
- vmx_vcpu_put(vcpu);
- vmx_vcpu_load(vcpu, cpu);
- vcpu->cpu = cpu;
- put_cpu();
-
+ vmx_switch_vmcs(vcpu, vmcs02);
vmx_segment_cache_clear(vmx);
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, exit_qual);
return 1;
@@ -10471,7 +10363,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx) {
leave_guest_mode(vcpu);
- vmx_load_vmcs01(vcpu);
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
return 1;
@@ -11039,7 +10931,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (unlikely(vmx->fail))
vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
- vmx_load_vmcs01(vcpu);
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
&& nested_exit_intr_ack_set(vcpu)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ccbd45ecd41a..464da936c53d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -27,7 +27,6 @@
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
-#include "assigned-dev.h"
#include "pmu.h"
#include "hyperv.h"
@@ -1008,6 +1007,8 @@ static u32 emulated_msrs[] = {
MSR_IA32_MCG_CTL,
MSR_IA32_MCG_EXT_CTL,
MSR_IA32_SMBASE,
+ MSR_PLATFORM_INFO,
+ MSR_MISC_FEATURES_ENABLES,
};
static unsigned num_emulated_msrs;
@@ -1444,10 +1445,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
- s64 usdiff;
bool matched;
bool already_matched;
u64 data = msr->data;
+ bool synchronizing = false;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_tsc_offset(vcpu, data);
@@ -1455,51 +1456,34 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
elapsed = ns - kvm->arch.last_tsc_nsec;
if (vcpu->arch.virtual_tsc_khz) {
- int faulted = 0;
-
- /* n.b - signed multiplication and division required */
- usdiff = data - kvm->arch.last_tsc_write;
-#ifdef CONFIG_X86_64
- usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
-#else
- /* do_div() only does unsigned */
- asm("1: idivl %[divisor]\n"
- "2: xor %%edx, %%edx\n"
- " movl $0, %[faulted]\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- "4: movl $1, %[faulted]\n"
- " jmp 3b\n"
- ".previous\n"
-
- _ASM_EXTABLE(1b, 4b)
-
- : "=A"(usdiff), [faulted] "=r" (faulted)
- : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
-
-#endif
- do_div(elapsed, 1000);
- usdiff -= elapsed;
- if (usdiff < 0)
- usdiff = -usdiff;
-
- /* idivl overflow => difference is larger than USEC_PER_SEC */
- if (faulted)
- usdiff = USEC_PER_SEC;
- } else
- usdiff = USEC_PER_SEC; /* disable TSC match window below */
+ if (data == 0 && msr->host_initiated) {
+ /*
+ * detection of vcpu initialization -- need to sync
+ * with other vCPUs. This particularly helps to keep
+ * kvm_clock stable after CPU hotplug
+ */
+ synchronizing = true;
+ } else {
+ u64 tsc_exp = kvm->arch.last_tsc_write +
+ nsec_to_cycles(vcpu, elapsed);
+ u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
+ /*
+ * Special case: TSC write with a small delta (1 second)
+ * of virtual cycle time against real time is
+ * interpreted as an attempt to synchronize the CPU.
+ */
+ synchronizing = data < tsc_exp + tsc_hz &&
+ data + tsc_hz > tsc_exp;
+ }
+ }
/*
- * Special case: TSC write with a small delta (1 second) of virtual
- * cycle time against real time is interpreted as an attempt to
- * synchronize the CPU.
- *
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
- if (usdiff < USEC_PER_SEC &&
+ if (synchronizing &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
@@ -1769,13 +1753,13 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
/* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm)
- clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}
-static u64 __get_kvmclock_ns(struct kvm *kvm)
+u64 get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock;
@@ -1796,24 +1780,12 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
return __pvclock_read_cycles(&hv_clock, rdtsc());
}
-u64 get_kvmclock_ns(struct kvm *kvm)
-{
- unsigned long flags;
- s64 ns;
-
- local_irq_save(flags);
- ns = __get_kvmclock_ns(kvm);
- local_irq_restore(flags);
-
- return ns;
-}
-
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
{
struct kvm_vcpu_arch *vcpu = &v->arch;
struct pvclock_vcpu_time_info guest_hv_clock;
- if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
return;
@@ -1834,9 +1806,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
vcpu->hv_clock.version = guest_hv_clock.version + 1;
- kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
smp_wmb();
@@ -1850,16 +1822,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
- kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
smp_wmb();
vcpu->hv_clock.version++;
- kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
- &vcpu->hv_clock,
- sizeof(vcpu->hv_clock.version));
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
}
static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2092,7 +2064,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
- if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
sizeof(u32)))
return 1;
@@ -2111,7 +2083,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
- if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
+ if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
@@ -2122,7 +2094,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
vcpu->arch.st.steal.version += 1;
- kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb();
@@ -2131,14 +2103,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
- kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb();
vcpu->arch.st.steal.version += 1;
- kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}
@@ -2155,6 +2127,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
+ case MSR_AMD64_DC_CFG:
break;
case MSR_EFER:
@@ -2230,8 +2203,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
if (ka->boot_vcpu_runs_old_kvmclock != tmp)
- set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
- &vcpu->requests);
+ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
ka->boot_vcpu_runs_old_kvmclock = tmp;
}
@@ -2243,7 +2215,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false;
@@ -2264,7 +2236,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
- if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
@@ -2331,6 +2303,21 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vcpu->arch.osvw.status = data;
break;
+ case MSR_PLATFORM_INFO:
+ if (!msr_info->host_initiated ||
+ data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
+ (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
+ cpuid_fault_enabled(vcpu)))
+ return 1;
+ vcpu->arch.msr_platform_info = data;
+ break;
+ case MSR_MISC_FEATURES_ENABLES:
+ if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
+ (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
+ !supports_cpuid_fault(vcpu)))
+ return 1;
+ vcpu->arch.msr_misc_features_enables = data;
+ break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
@@ -2417,6 +2404,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
+ case MSR_AMD64_DC_CFG:
msr_info->data = 0;
break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2545,6 +2533,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
msr_info->data = vcpu->arch.osvw.status;
break;
+ case MSR_PLATFORM_INFO:
+ msr_info->data = vcpu->arch.msr_platform_info;
+ break;
+ case MSR_MISC_FEATURES_ENABLES:
+ msr_info->data = vcpu->arch.msr_misc_features_enables;
+ break;
default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -2675,15 +2669,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SET_BOOT_CPU_ID:
case KVM_CAP_SPLIT_IRQCHIP:
case KVM_CAP_IMMEDIATE_EXIT:
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
- case KVM_CAP_ASSIGN_DEV_IRQ:
- case KVM_CAP_PCI_2_3:
-#endif
r = 1;
break;
case KVM_CAP_ADJUST_CLOCK:
r = KVM_CLOCK_TSC_STABLE;
break;
+ case KVM_CAP_X86_GUEST_MWAIT:
+ r = kvm_mwait_in_guest();
+ break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
@@ -2695,9 +2688,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
break;
- case KVM_CAP_COALESCED_MMIO:
- r = KVM_COALESCED_MMIO_PAGE_OFFSET;
- break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
@@ -2713,11 +2703,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
- case KVM_CAP_IOMMU:
- r = iommu_present(&pci_bus_type);
- break;
-#endif
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
@@ -2816,11 +2801,6 @@ static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
return kvm_arch_has_noncoherent_dma(vcpu->kvm);
}
-static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
-{
- set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
-}
-
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
@@ -2864,7 +2844,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
- kvm_migrate_timers(vcpu);
+ kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
vcpu->cpu = cpu;
}
@@ -2878,7 +2858,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
vcpu->arch.st.steal.preempted = 1;
- kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
+ kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal.preempted,
offsetof(struct kvm_steal_time, preempted),
sizeof(vcpu->arch.st.steal.preempted));
@@ -3124,7 +3104,14 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL;
if (events->exception.injected &&
- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
+ is_guest_mode(vcpu)))
+ return -EINVAL;
+
+ /* INITs are latched while in SMM */
+ if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
+ (events->smi.smm || events->smi.pending) &&
+ vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
return -EINVAL;
process_nmi(vcpu);
@@ -3721,22 +3708,21 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
+ struct kvm_pic *pic = kvm->arch.vpic;
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
- memcpy(&chip->chip.pic,
- &pic_irqchip(kvm)->pics[0],
+ memcpy(&chip->chip.pic, &pic->pics[0],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_PIC_SLAVE:
- memcpy(&chip->chip.pic,
- &pic_irqchip(kvm)->pics[1],
+ memcpy(&chip->chip.pic, &pic->pics[1],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_IOAPIC:
- r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
+ kvm_get_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
@@ -3747,32 +3733,31 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
+ struct kvm_pic *pic = kvm->arch.vpic;
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
- spin_lock(&pic_irqchip(kvm)->lock);
- memcpy(&pic_irqchip(kvm)->pics[0],
- &chip->chip.pic,
+ spin_lock(&pic->lock);
+ memcpy(&pic->pics[0], &chip->chip.pic,
sizeof(struct kvm_pic_state));
- spin_unlock(&pic_irqchip(kvm)->lock);
+ spin_unlock(&pic->lock);
break;
case KVM_IRQCHIP_PIC_SLAVE:
- spin_lock(&pic_irqchip(kvm)->lock);
- memcpy(&pic_irqchip(kvm)->pics[1],
- &chip->chip.pic,
+ spin_lock(&pic->lock);
+ memcpy(&pic->pics[1], &chip->chip.pic,
sizeof(struct kvm_pic_state));
- spin_unlock(&pic_irqchip(kvm)->lock);
+ spin_unlock(&pic->lock);
break;
case KVM_IRQCHIP_IOAPIC:
- r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
+ kvm_set_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
break;
}
- kvm_pic_update_irq(pic_irqchip(kvm));
+ kvm_pic_update_irq(pic);
return r;
}
@@ -4018,20 +4003,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_ioapic_init(kvm);
if (r) {
- mutex_lock(&kvm->slots_lock);
kvm_pic_destroy(kvm);
- mutex_unlock(&kvm->slots_lock);
goto create_irqchip_unlock;
}
r = kvm_setup_default_irq_routing(kvm);
if (r) {
- mutex_lock(&kvm->slots_lock);
- mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_pic_destroy(kvm);
- mutex_unlock(&kvm->irq_lock);
- mutex_unlock(&kvm->slots_lock);
goto create_irqchip_unlock;
}
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
@@ -4196,10 +4175,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
r = 0;
- local_irq_disable();
- now_ns = __get_kvmclock_ns(kvm);
+ now_ns = get_kvmclock_ns(kvm);
kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
- local_irq_enable();
kvm_gen_update_masterclock(kvm);
break;
}
@@ -4207,11 +4184,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
- local_irq_disable();
- now_ns = __get_kvmclock_ns(kvm);
+ now_ns = get_kvmclock_ns(kvm);
user_ns.clock = now_ns;
user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
- local_irq_enable();
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
@@ -4230,7 +4205,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
default:
- r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
+ r = -ENOTTY;
}
out:
return r;
@@ -5223,6 +5198,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
+static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+{
+ return emul_to_vcpu(ctxt)->arch.hflags;
+}
+
+static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+{
+ kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@@ -5262,6 +5247,8 @@ static const struct x86_emulate_ops emulate_ops = {
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
.set_nmi_mask = emulator_set_nmi_mask,
+ .get_hflags = emulator_get_hflags,
+ .set_hflags = emulator_set_hflags,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -5314,7 +5301,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
- ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@@ -5718,8 +5704,6 @@ restart:
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- if (vcpu->arch.hflags != ctxt->emul_flags)
- kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@@ -6869,7 +6853,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* 1) We should set ->mode before checking ->requests. Please see
- * the comment in kvm_make_all_cpus_request.
+ * the comment in kvm_vcpu_exiting_guest_mode().
*
* 2) For APICv, we should set ->mode before checking PIR.ON. This
* pairs with the memory barrier implicit in pi_test_and_set_on
@@ -7051,7 +7035,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
- clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@@ -7179,7 +7163,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
kvm_apic_accept_events(vcpu);
- clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
goto out;
}
@@ -7355,6 +7339,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
return -EINVAL;
+ /* INITs are latched while in SMM */
+ if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
+ (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
+ mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
+ return -EINVAL;
+
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
@@ -7724,6 +7714,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!init_event) {
kvm_pmu_reset(vcpu);
vcpu->arch.smbase = 0x30000;
+
+ vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
+ vcpu->arch.msr_misc_features_enables = 0;
}
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
@@ -8068,7 +8061,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
{
cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
- kvm_free_all_assigned_devices(kvm);
kvm_free_pit(kvm);
}
@@ -8152,7 +8144,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
- kvm_iommu_unmap_guest(kvm);
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
@@ -8199,13 +8190,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
slot->base_gfn, level) + 1;
slot->arch.rmap[i] =
- kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
+ kvzalloc(lpages * sizeof(*slot->arch.rmap[i]), GFP_KERNEL);
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
- linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
+ linfo = kvzalloc(lpages * sizeof(*linfo), GFP_KERNEL);
if (!linfo)
goto out_free;
@@ -8385,7 +8376,7 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (atomic_read(&vcpu->arch.nmi_queued))
return true;
- if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+ if (kvm_test_request(KVM_REQ_SMI, vcpu))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -8536,8 +8527,9 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
- return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
- sizeof(val));
+
+ return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+ sizeof(val));
}
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e8ff3e4ce38a..612067074905 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -1,6 +1,8 @@
#ifndef ARCH_X86_KVM_X86_H
#define ARCH_X86_KVM_X86_H
+#include <asm/processor.h>
+#include <asm/mwait.h>
#include <linux/kvm_host.h>
#include <asm/pvclock.h>
#include "kvm_cache_regs.h"
@@ -212,4 +214,38 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
__rem; \
})
+static inline bool kvm_mwait_in_guest(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT))
+ return false;
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /* All AMD CPUs have a working MWAIT implementation */
+ return true;
+ case X86_VENDOR_INTEL:
+ /* Handle Intel below */
+ break;
+ default:
+ return false;
+ }
+
+ /*
+ * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
+ * they would allow guest to stop the CPU completely by disabling
+ * interrupts then invoking MWAIT.
+ */
+ if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+ return false;
+
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ return false;
+
+ return true;
+}
+
#endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 138bad2fb6bc..cbc87ea98751 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -5,7 +5,7 @@
#include <linux/memblock.h>
#include <linux/bootmem.h> /* for max_low_pfn */
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/init.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f34d275ee201..99fb83819a5f 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -48,7 +48,7 @@
#include <asm/sections.h>
#include <asm/paravirt.h>
#include <asm/setup.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/page_types.h>
#include <asm/init.h>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 745e5e183169..41270b96403d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -50,7 +50,7 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/numa.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/init.h>
#include <asm/uv/uv.h>
#include <asm/setup.h>
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index e4f7b25df18e..bbc558b88a88 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -14,7 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 56b22fa504df..1dcd2be4cce4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -24,6 +24,7 @@
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/pat.h>
+#include <asm/set_memory.h>
/*
* The current flushing context - we pass it instead of 5 arguments:
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 14f840df1d95..f58939393eef 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <linux/bpf.h>
int bpf_jit_enable __read_mostly;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 6fa84d531f4f..7b4307163eac 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void)
*/
ioapic_insert_resources();
}
-
-static const struct vm_operations_struct pci_mmap_ops = {
- .access = generic_access_phys,
-};
-
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- unsigned long prot;
-
- /* I/O space cannot be accessed via normal processor loads and
- * stores on this platform.
- */
- if (mmap_state == pci_mmap_io)
- return -EINVAL;
-
- prot = pgprot_val(vma->vm_page_prot);
-
- /*
- * Return error if pat is not enabled and write_combine is requested.
- * Caller can followup with UC MINUS request and add a WC mtrr if there
- * is a free mtrr slot.
- */
- if (!pat_enabled() && write_combine)
- return -EINVAL;
-
- if (pat_enabled() && write_combine)
- prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
- else if (pat_enabled() || boot_cpu_data.x86 > 3)
- /*
- * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
- * To avoid attribute conflicts, request UC MINUS here
- * as well.
- */
- prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
-
- vma->vm_page_prot = __pgprot(prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- vma->vm_ops = &pci_mmap_ops;
-
- return 0;
-}
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 29e9ba6ace9d..c1bdb9edcae7 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -11,7 +11,7 @@
#include <asm/pci_x86.h>
#include <asm/e820/types.h>
#include <asm/pci-functions.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
/* BIOS32 signature: "_32_" */
#define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 292ab0364a89..c4b3646bd04c 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -447,7 +447,7 @@ void __init xen_msi_init(void)
int __init pci_xen_hvm_init(void)
{
- if (!xen_feature(XENFEAT_hvm_pirqs))
+ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
return 0;
#ifdef CONFIG_ACPI
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index a15cf815ac4e..7e76a4d8304b 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -49,7 +49,7 @@
#include <asm/efi.h>
#include <asm/e820/api.h>
#include <asm/time.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/uv/uv.h>
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 5db706f14111..a163a90af4aa 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,7 @@
#include <linux/slab.h>
#include <linux/memblock.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/pgtable.h>
#include <asm/realmode.h>
#include <asm/tlbflush.h>
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 76b6dbd627df..027987638e98 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -6,8 +6,6 @@ config XEN
bool "Xen guest support"
depends on PARAVIRT
select PARAVIRT_CLOCK
- select XEN_HAVE_PVMMU
- select XEN_HAVE_VPMU
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_LOCAL_APIC && X86_TSC
help
@@ -15,18 +13,41 @@ config XEN
kernel to boot in a paravirtualized environment under the
Xen hypervisor.
-config XEN_DOM0
+config XEN_PV
+ bool "Xen PV guest support"
+ default y
+ depends on XEN
+ select XEN_HAVE_PVMMU
+ select XEN_HAVE_VPMU
+ help
+ Support running as a Xen PV guest.
+
+config XEN_PV_SMP
def_bool y
- depends on XEN && PCI_XEN && SWIOTLB_XEN
+ depends on XEN_PV && SMP
+
+config XEN_DOM0
+ bool "Xen PV Dom0 support"
+ default y
+ depends on XEN_PV && PCI_XEN && SWIOTLB_XEN
depends on X86_IO_APIC && ACPI && PCI
+ help
+ Support running as a Xen PV Dom0 guest.
config XEN_PVHVM
- def_bool y
+ bool "Xen PVHVM guest support"
+ default y
depends on XEN && PCI && X86_LOCAL_APIC
+ help
+ Support running as a Xen PVHVM guest.
+
+config XEN_PVHVM_SMP
+ def_bool y
+ depends on XEN_PVHVM && SMP
config XEN_512GB
bool "Limit Xen pv-domain memory to 512GB"
- depends on XEN && X86_64
+ depends on XEN_PV && X86_64
default y
help
Limit paravirtualized user domains to 512GB of RAM.
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index cb0164aee156..fffb0a16f9e3 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -7,17 +7,23 @@ endif
# Make sure early boot has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
-CFLAGS_enlighten.o := $(nostackp)
-CFLAGS_mmu.o := $(nostackp)
+CFLAGS_enlighten_pv.o := $(nostackp)
+CFLAGS_mmu_pv.o := $(nostackp)
-obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
+obj-y := enlighten.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
- grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o apic.o pmu.o
+ grant-table.o suspend.o platform-pci-unplug.o
+
+obj-$(CONFIG_XEN_PVHVM) += enlighten_hvm.o mmu_hvm.o suspend_hvm.o
+obj-$(CONFIG_XEN_PV) += setup.o apic.o pmu.o suspend_pv.o \
+ p2m.o enlighten_pv.o mmu_pv.o
+obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_XEN_PV_SMP) += smp_pv.o
+obj-$(CONFIG_XEN_PVHVM_SMP) += smp_hvm.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_XEN_DOM0) += vga.o
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 3be012115853..30bb2e80cfe7 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -81,7 +81,7 @@ static const struct efi efi_xen __initconst = {
.update_capsule = xen_efi_update_capsule,
.query_capsule_caps = xen_efi_query_capsule_caps,
.get_next_high_mono_count = xen_efi_get_next_high_mono_count,
- .reset_system = NULL, /* Functionality provided by Xen. */
+ .reset_system = xen_efi_reset_system,
.set_virtual_address_map = NULL, /* Not used under Xen. */
.flags = 0 /* Initialized later. */
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 30822e8e64ac..a5ffcbb20cc0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,95 +1,16 @@
-/*
- * Core of Xen paravirt_ops implementation.
- *
- * This file contains the xen_paravirt_ops structure itself, and the
- * implementations for:
- * - privileged instructions
- * - interrupt flags
- * - segment operations
- * - booting and setup
- *
- * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
- */
-
#include <linux/cpu.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/preempt.h>
-#include <linux/hardirq.h>
-#include <linux/percpu.h>
-#include <linux/delay.h>
-#include <linux/start_kernel.h>
-#include <linux/sched.h>
-#include <linux/kprobes.h>
-#include <linux/bootmem.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/highmem.h>
-#include <linux/console.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/memblock.h>
-#include <linux/edd.h>
-#include <linux/frame.h>
-
#include <linux/kexec.h>
-#include <xen/xen.h>
-#include <xen/events.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/version.h>
-#include <xen/interface/physdev.h>
-#include <xen/interface/vcpu.h>
-#include <xen/interface/memory.h>
-#include <xen/interface/nmi.h>
-#include <xen/interface/xen-mca.h>
-#include <xen/interface/hvm/start_info.h>
#include <xen/features.h>
#include <xen/page.h>
-#include <xen/hvm.h>
-#include <xen/hvc-console.h>
-#include <xen/acpi.h>
-#include <asm/paravirt.h>
-#include <asm/apic.h>
-#include <asm/page.h>
-#include <asm/xen/pci.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
-#include <asm/xen/cpuid.h>
-#include <asm/fixmap.h>
-#include <asm/processor.h>
-#include <asm/proto.h>
-#include <asm/msr-index.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <asm/desc.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/reboot.h>
-#include <asm/stackprotector.h>
-#include <asm/hypervisor.h>
-#include <asm/mach_traps.h>
-#include <asm/mwait.h>
-#include <asm/pci_x86.h>
#include <asm/cpu.h>
#include <asm/e820/api.h>
-#ifdef CONFIG_ACPI
-#include <linux/acpi.h>
-#include <asm/acpi.h>
-#include <acpi/pdc_intel.h>
-#include <acpi/processor.h>
-#include <xen/interface/platform.h>
-#endif
-
#include "xen-ops.h"
-#include "mmu.h"
#include "smp.h"
-#include "multicalls.h"
#include "pmu.h"
EXPORT_SYMBOL_GPL(hypercall_page);
@@ -136,13 +57,8 @@ EXPORT_SYMBOL_GPL(xen_start_info);
struct shared_info xen_dummy_shared_info;
-void *xen_initial_gdt;
-
-RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
-
-static int xen_cpu_up_prepare(unsigned int cpu);
-static int xen_cpu_up_online(unsigned int cpu);
-static int xen_cpu_dead(unsigned int cpu);
+__read_mostly int xen_have_vector_callback;
+EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
* Point at some empty memory to start with. We map the real shared_info
@@ -163,34 +79,32 @@ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
*
* 0: not available, 1: available
*/
-static int have_vcpu_info_placement = 1;
+int xen_have_vcpu_info_placement = 1;
-struct tls_descs {
- struct desc_struct desc[3];
-};
+static int xen_cpu_up_online(unsigned int cpu)
+{
+ xen_init_lock_cpu(cpu);
+ return 0;
+}
-/*
- * Updating the 3 TLS descriptors in the GDT on every task switch is
- * surprisingly expensive so we avoid updating them if they haven't
- * changed. Since Xen writes different descriptors than the one
- * passed in the update_descriptor hypercall we keep shadow copies to
- * compare against.
- */
-static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
+int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
+ int (*cpu_dead_cb)(unsigned int))
+{
+ int rc;
-#ifdef CONFIG_XEN_PVH
-/*
- * PVH variables.
- *
- * xen_pvh and pvh_bootparams need to live in data segment since they
- * are used after startup_{32|64}, which clear .bss, are invoked.
- */
-bool xen_pvh __attribute__((section(".data"))) = 0;
-struct boot_params pvh_bootparams __attribute__((section(".data")));
+ rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
+ "x86/xen/hvm_guest:prepare",
+ cpu_up_prepare_cb, cpu_dead_cb);
+ if (rc >= 0) {
+ rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "x86/xen/hvm_guest:online",
+ xen_cpu_up_online, NULL);
+ if (rc < 0)
+ cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
+ }
-struct hvm_start_info pvh_start_info;
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
-#endif
+ return rc >= 0 ? 0 : rc;
+}
static void clamp_max_cpus(void)
{
@@ -227,7 +141,7 @@ void xen_vcpu_setup(int cpu)
per_cpu(xen_vcpu, cpu) =
&HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
- if (!have_vcpu_info_placement) {
+ if (!xen_have_vcpu_info_placement) {
if (cpu >= MAX_VIRT_CPUS)
clamp_max_cpus();
return;
@@ -250,7 +164,7 @@ void xen_vcpu_setup(int cpu)
if (err) {
printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
- have_vcpu_info_placement = 0;
+ xen_have_vcpu_info_placement = 0;
clamp_max_cpus();
} else {
/* This cpu is using the registered vcpu info, even if
@@ -259,1043 +173,7 @@ void xen_vcpu_setup(int cpu)
}
}
-/*
- * On restore, set the vcpu placement up again.
- * If it fails, then we're in a bad state, since
- * we can't back out from using it...
- */
-void xen_vcpu_restore(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- bool other_cpu = (cpu != smp_processor_id());
- bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
- NULL);
-
- if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
- BUG();
-
- xen_setup_runstate_info(cpu);
-
- if (have_vcpu_info_placement)
- xen_vcpu_setup(cpu);
-
- if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
- BUG();
- }
-}
-
-static void __init xen_banner(void)
-{
- unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
- struct xen_extraversion extra;
- HYPERVISOR_xen_version(XENVER_extraversion, &extra);
-
- pr_info("Booting paravirtualized kernel %son %s\n",
- xen_feature(XENFEAT_auto_translated_physmap) ?
- "with PVH extensions " : "", pv_info.name);
- printk(KERN_INFO "Xen version: %d.%d%s%s\n",
- version >> 16, version & 0xffff, extra.extraversion,
- xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
-}
-/* Check if running on Xen version (major, minor) or later */
-bool
-xen_running_on_version_or_later(unsigned int major, unsigned int minor)
-{
- unsigned int version;
-
- if (!xen_domain())
- return false;
-
- version = HYPERVISOR_xen_version(XENVER_version, NULL);
- if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
- ((version >> 16) > major))
- return true;
- return false;
-}
-
-#define CPUID_THERM_POWER_LEAF 6
-#define APERFMPERF_PRESENT 0
-
-static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
-static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
-
-static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
-static __read_mostly unsigned int cpuid_leaf5_ecx_val;
-static __read_mostly unsigned int cpuid_leaf5_edx_val;
-
-static void xen_cpuid(unsigned int *ax, unsigned int *bx,
- unsigned int *cx, unsigned int *dx)
-{
- unsigned maskebx = ~0;
- unsigned maskecx = ~0;
- unsigned maskedx = ~0;
- unsigned setecx = 0;
- /*
- * Mask out inconvenient features, to try and disable as many
- * unsupported kernel subsystems as possible.
- */
- switch (*ax) {
- case 1:
- maskecx = cpuid_leaf1_ecx_mask;
- setecx = cpuid_leaf1_ecx_set_mask;
- maskedx = cpuid_leaf1_edx_mask;
- break;
-
- case CPUID_MWAIT_LEAF:
- /* Synthesize the values.. */
- *ax = 0;
- *bx = 0;
- *cx = cpuid_leaf5_ecx_val;
- *dx = cpuid_leaf5_edx_val;
- return;
-
- case CPUID_THERM_POWER_LEAF:
- /* Disabling APERFMPERF for kernel usage */
- maskecx = ~(1 << APERFMPERF_PRESENT);
- break;
-
- case 0xb:
- /* Suppress extended topology stuff */
- maskebx = 0;
- break;
- }
-
- asm(XEN_EMULATE_PREFIX "cpuid"
- : "=a" (*ax),
- "=b" (*bx),
- "=c" (*cx),
- "=d" (*dx)
- : "0" (*ax), "2" (*cx));
-
- *bx &= maskebx;
- *cx &= maskecx;
- *cx |= setecx;
- *dx &= maskedx;
-}
-STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
-
-static bool __init xen_check_mwait(void)
-{
-#ifdef CONFIG_ACPI
- struct xen_platform_op op = {
- .cmd = XENPF_set_processor_pminfo,
- .u.set_pminfo.id = -1,
- .u.set_pminfo.type = XEN_PM_PDC,
- };
- uint32_t buf[3];
- unsigned int ax, bx, cx, dx;
- unsigned int mwait_mask;
-
- /* We need to determine whether it is OK to expose the MWAIT
- * capability to the kernel to harvest deeper than C3 states from ACPI
- * _CST using the processor_harvest_xen.c module. For this to work, we
- * need to gather the MWAIT_LEAF values (which the cstate.c code
- * checks against). The hypervisor won't expose the MWAIT flag because
- * it would break backwards compatibility; so we will find out directly
- * from the hardware and hypercall.
- */
- if (!xen_initial_domain())
- return false;
-
- /*
- * When running under platform earlier than Xen4.2, do not expose
- * mwait, to avoid the risk of loading native acpi pad driver
- */
- if (!xen_running_on_version_or_later(4, 2))
- return false;
-
- ax = 1;
- cx = 0;
-
- native_cpuid(&ax, &bx, &cx, &dx);
-
- mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
- (1 << (X86_FEATURE_MWAIT % 32));
-
- if ((cx & mwait_mask) != mwait_mask)
- return false;
-
- /* We need to emulate the MWAIT_LEAF and for that we need both
- * ecx and edx. The hypercall provides only partial information.
- */
-
- ax = CPUID_MWAIT_LEAF;
- bx = 0;
- cx = 0;
- dx = 0;
-
- native_cpuid(&ax, &bx, &cx, &dx);
-
- /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
- * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
- */
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
-
- set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
-
- if ((HYPERVISOR_platform_op(&op) == 0) &&
- (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
- cpuid_leaf5_ecx_val = cx;
- cpuid_leaf5_edx_val = dx;
- }
- return true;
-#else
- return false;
-#endif
-}
-static void __init xen_init_cpuid_mask(void)
-{
- unsigned int ax, bx, cx, dx;
- unsigned int xsave_mask;
-
- cpuid_leaf1_edx_mask =
- ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
- (1 << X86_FEATURE_ACC)); /* thermal monitoring */
-
- if (!xen_initial_domain())
- cpuid_leaf1_edx_mask &=
- ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
-
- cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
-
- ax = 1;
- cx = 0;
- cpuid(1, &ax, &bx, &cx, &dx);
-
- xsave_mask =
- (1 << (X86_FEATURE_XSAVE % 32)) |
- (1 << (X86_FEATURE_OSXSAVE % 32));
-
- /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
- if ((cx & xsave_mask) != xsave_mask)
- cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
- if (xen_check_mwait())
- cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
-}
-
-static void xen_set_debugreg(int reg, unsigned long val)
-{
- HYPERVISOR_set_debugreg(reg, val);
-}
-
-static unsigned long xen_get_debugreg(int reg)
-{
- return HYPERVISOR_get_debugreg(reg);
-}
-
-static void xen_end_context_switch(struct task_struct *next)
-{
- xen_mc_flush();
- paravirt_end_context_switch(next);
-}
-
-static unsigned long xen_store_tr(void)
-{
- return 0;
-}
-
-/*
- * Set the page permissions for a particular virtual address. If the
- * address is a vmalloc mapping (or other non-linear mapping), then
- * find the linear mapping of the page and also set its protections to
- * match.
- */
-static void set_aliased_prot(void *v, pgprot_t prot)
-{
- int level;
- pte_t *ptep;
- pte_t pte;
- unsigned long pfn;
- struct page *page;
- unsigned char dummy;
-
- ptep = lookup_address((unsigned long)v, &level);
- BUG_ON(ptep == NULL);
-
- pfn = pte_pfn(*ptep);
- page = pfn_to_page(pfn);
-
- pte = pfn_pte(pfn, prot);
-
- /*
- * Careful: update_va_mapping() will fail if the virtual address
- * we're poking isn't populated in the page tables. We don't
- * need to worry about the direct map (that's always in the page
- * tables), but we need to be careful about vmap space. In
- * particular, the top level page table can lazily propagate
- * entries between processes, so if we've switched mms since we
- * vmapped the target in the first place, we might not have the
- * top-level page table entry populated.
- *
- * We disable preemption because we want the same mm active when
- * we probe the target and when we issue the hypercall. We'll
- * have the same nominal mm, but if we're a kernel thread, lazy
- * mm dropping could change our pgd.
- *
- * Out of an abundance of caution, this uses __get_user() to fault
- * in the target address just in case there's some obscure case
- * in which the target address isn't readable.
- */
-
- preempt_disable();
-
- probe_kernel_read(&dummy, v, 1);
-
- if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
- BUG();
-
- if (!PageHighMem(page)) {
- void *av = __va(PFN_PHYS(pfn));
-
- if (av != v)
- if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
- BUG();
- } else
- kmap_flush_unused();
-
- preempt_enable();
-}
-
-static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
-{
- const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
- int i;
-
- /*
- * We need to mark the all aliases of the LDT pages RO. We
- * don't need to call vm_flush_aliases(), though, since that's
- * only responsible for flushing aliases out the TLBs, not the
- * page tables, and Xen will flush the TLB for us if needed.
- *
- * To avoid confusing future readers: none of this is necessary
- * to load the LDT. The hypervisor only checks this when the
- * LDT is faulted in due to subsequent descriptor access.
- */
-
- for(i = 0; i < entries; i += entries_per_page)
- set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
-}
-
-static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
-{
- const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
- int i;
-
- for(i = 0; i < entries; i += entries_per_page)
- set_aliased_prot(ldt + i, PAGE_KERNEL);
-}
-
-static void xen_set_ldt(const void *addr, unsigned entries)
-{
- struct mmuext_op *op;
- struct multicall_space mcs = xen_mc_entry(sizeof(*op));
-
- trace_xen_cpu_set_ldt(addr, entries);
-
- op = mcs.args;
- op->cmd = MMUEXT_SET_LDT;
- op->arg1.linear_addr = (unsigned long)addr;
- op->arg2.nr_ents = entries;
-
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
-}
-
-static void xen_load_gdt(const struct desc_ptr *dtr)
-{
- unsigned long va = dtr->address;
- unsigned int size = dtr->size + 1;
- unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
- unsigned long frames[pages];
- int f;
-
- /*
- * A GDT can be up to 64k in size, which corresponds to 8192
- * 8-byte entries, or 16 4k pages..
- */
-
- BUG_ON(size > 65536);
- BUG_ON(va & ~PAGE_MASK);
-
- for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
- int level;
- pte_t *ptep;
- unsigned long pfn, mfn;
- void *virt;
-
- /*
- * The GDT is per-cpu and is in the percpu data area.
- * That can be virtually mapped, so we need to do a
- * page-walk to get the underlying MFN for the
- * hypercall. The page can also be in the kernel's
- * linear range, so we need to RO that mapping too.
- */
- ptep = lookup_address(va, &level);
- BUG_ON(ptep == NULL);
-
- pfn = pte_pfn(*ptep);
- mfn = pfn_to_mfn(pfn);
- virt = __va(PFN_PHYS(pfn));
-
- frames[f] = mfn;
-
- make_lowmem_page_readonly((void *)va);
- make_lowmem_page_readonly(virt);
- }
-
- if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
- BUG();
-}
-
-/*
- * load_gdt for early boot, when the gdt is only mapped once
- */
-static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
-{
- unsigned long va = dtr->address;
- unsigned int size = dtr->size + 1;
- unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
- unsigned long frames[pages];
- int f;
-
- /*
- * A GDT can be up to 64k in size, which corresponds to 8192
- * 8-byte entries, or 16 4k pages..
- */
-
- BUG_ON(size > 65536);
- BUG_ON(va & ~PAGE_MASK);
-
- for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
- pte_t pte;
- unsigned long pfn, mfn;
-
- pfn = virt_to_pfn(va);
- mfn = pfn_to_mfn(pfn);
-
- pte = pfn_pte(pfn, PAGE_KERNEL_RO);
-
- if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
- BUG();
-
- frames[f] = mfn;
- }
-
- if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
- BUG();
-}
-
-static inline bool desc_equal(const struct desc_struct *d1,
- const struct desc_struct *d2)
-{
- return d1->a == d2->a && d1->b == d2->b;
-}
-
-static void load_TLS_descriptor(struct thread_struct *t,
- unsigned int cpu, unsigned int i)
-{
- struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
- struct desc_struct *gdt;
- xmaddr_t maddr;
- struct multicall_space mc;
-
- if (desc_equal(shadow, &t->tls_array[i]))
- return;
-
- *shadow = t->tls_array[i];
-
- gdt = get_cpu_gdt_rw(cpu);
- maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
- mc = __xen_mc_entry(0);
-
- MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
-}
-
-static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
-{
- /*
- * XXX sleazy hack: If we're being called in a lazy-cpu zone
- * and lazy gs handling is enabled, it means we're in a
- * context switch, and %gs has just been saved. This means we
- * can zero it out to prevent faults on exit from the
- * hypervisor if the next process has no %gs. Either way, it
- * has been saved, and the new value will get loaded properly.
- * This will go away as soon as Xen has been modified to not
- * save/restore %gs for normal hypercalls.
- *
- * On x86_64, this hack is not used for %gs, because gs points
- * to KERNEL_GS_BASE (and uses it for PDA references), so we
- * must not zero %gs on x86_64
- *
- * For x86_64, we need to zero %fs, otherwise we may get an
- * exception between the new %fs descriptor being loaded and
- * %fs being effectively cleared at __switch_to().
- */
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
-#ifdef CONFIG_X86_32
- lazy_load_gs(0);
-#else
- loadsegment(fs, 0);
-#endif
- }
-
- xen_mc_batch();
-
- load_TLS_descriptor(t, cpu, 0);
- load_TLS_descriptor(t, cpu, 1);
- load_TLS_descriptor(t, cpu, 2);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
-}
-
-#ifdef CONFIG_X86_64
-static void xen_load_gs_index(unsigned int idx)
-{
- if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
- BUG();
-}
-#endif
-
-static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
- const void *ptr)
-{
- xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
- u64 entry = *(u64 *)ptr;
-
- trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
-
- preempt_disable();
-
- xen_mc_flush();
- if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
- BUG();
-
- preempt_enable();
-}
-
-static int cvt_gate_to_trap(int vector, const gate_desc *val,
- struct trap_info *info)
-{
- unsigned long addr;
-
- if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
- return 0;
-
- info->vector = vector;
-
- addr = gate_offset(*val);
-#ifdef CONFIG_X86_64
- /*
- * Look for known traps using IST, and substitute them
- * appropriately. The debugger ones are the only ones we care
- * about. Xen will handle faults like double_fault,
- * so we should never see them. Warn if
- * there's an unexpected IST-using fault handler.
- */
- if (addr == (unsigned long)debug)
- addr = (unsigned long)xen_debug;
- else if (addr == (unsigned long)int3)
- addr = (unsigned long)xen_int3;
- else if (addr == (unsigned long)stack_segment)
- addr = (unsigned long)xen_stack_segment;
- else if (addr == (unsigned long)double_fault) {
- /* Don't need to handle these */
- return 0;
-#ifdef CONFIG_X86_MCE
- } else if (addr == (unsigned long)machine_check) {
- /*
- * when xen hypervisor inject vMCE to guest,
- * use native mce handler to handle it
- */
- ;
-#endif
- } else if (addr == (unsigned long)nmi)
- /*
- * Use the native version as well.
- */
- ;
- else {
- /* Some other trap using IST? */
- if (WARN_ON(val->ist != 0))
- return 0;
- }
-#endif /* CONFIG_X86_64 */
- info->address = addr;
-
- info->cs = gate_segment(*val);
- info->flags = val->dpl;
- /* interrupt gates clear IF */
- if (val->type == GATE_INTERRUPT)
- info->flags |= 1 << 2;
-
- return 1;
-}
-
-/* Locations of each CPU's IDT */
-static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
-
-/* Set an IDT entry. If the entry is part of the current IDT, then
- also update Xen. */
-static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
-{
- unsigned long p = (unsigned long)&dt[entrynum];
- unsigned long start, end;
-
- trace_xen_cpu_write_idt_entry(dt, entrynum, g);
-
- preempt_disable();
-
- start = __this_cpu_read(idt_desc.address);
- end = start + __this_cpu_read(idt_desc.size) + 1;
-
- xen_mc_flush();
-
- native_write_idt_entry(dt, entrynum, g);
-
- if (p >= start && (p + 8) <= end) {
- struct trap_info info[2];
-
- info[1].address = 0;
-
- if (cvt_gate_to_trap(entrynum, g, &info[0]))
- if (HYPERVISOR_set_trap_table(info))
- BUG();
- }
-
- preempt_enable();
-}
-
-static void xen_convert_trap_info(const struct desc_ptr *desc,
- struct trap_info *traps)
-{
- unsigned in, out, count;
-
- count = (desc->size+1) / sizeof(gate_desc);
- BUG_ON(count > 256);
-
- for (in = out = 0; in < count; in++) {
- gate_desc *entry = (gate_desc*)(desc->address) + in;
-
- if (cvt_gate_to_trap(in, entry, &traps[out]))
- out++;
- }
- traps[out].address = 0;
-}
-
-void xen_copy_trap_info(struct trap_info *traps)
-{
- const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
-
- xen_convert_trap_info(desc, traps);
-}
-
-/* Load a new IDT into Xen. In principle this can be per-CPU, so we
- hold a spinlock to protect the static traps[] array (static because
- it avoids allocation, and saves stack space). */
-static void xen_load_idt(const struct desc_ptr *desc)
-{
- static DEFINE_SPINLOCK(lock);
- static struct trap_info traps[257];
-
- trace_xen_cpu_load_idt(desc);
-
- spin_lock(&lock);
-
- memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
-
- xen_convert_trap_info(desc, traps);
-
- xen_mc_flush();
- if (HYPERVISOR_set_trap_table(traps))
- BUG();
-
- spin_unlock(&lock);
-}
-
-/* Write a GDT descriptor entry. Ignore LDT descriptors, since
- they're handled differently. */
-static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
- const void *desc, int type)
-{
- trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
-
- preempt_disable();
-
- switch (type) {
- case DESC_LDT:
- case DESC_TSS:
- /* ignore */
- break;
-
- default: {
- xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
-
- xen_mc_flush();
- if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
- BUG();
- }
-
- }
-
- preempt_enable();
-}
-
-/*
- * Version of write_gdt_entry for use at early boot-time needed to
- * update an entry as simply as possible.
- */
-static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
- const void *desc, int type)
-{
- trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
-
- switch (type) {
- case DESC_LDT:
- case DESC_TSS:
- /* ignore */
- break;
-
- default: {
- xmaddr_t maddr = virt_to_machine(&dt[entry]);
-
- if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
- dt[entry] = *(struct desc_struct *)desc;
- }
-
- }
-}
-
-static void xen_load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
-{
- struct multicall_space mcs;
-
- mcs = xen_mc_entry(0);
- MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
- tss->x86_tss.sp0 = thread->sp0;
-}
-
-void xen_set_iopl_mask(unsigned mask)
-{
- struct physdev_set_iopl set_iopl;
-
- /* Force the change at ring 0. */
- set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
- HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
-}
-
-static void xen_io_delay(void)
-{
-}
-
-static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
-
-static unsigned long xen_read_cr0(void)
-{
- unsigned long cr0 = this_cpu_read(xen_cr0_value);
-
- if (unlikely(cr0 == 0)) {
- cr0 = native_read_cr0();
- this_cpu_write(xen_cr0_value, cr0);
- }
-
- return cr0;
-}
-
-static void xen_write_cr0(unsigned long cr0)
-{
- struct multicall_space mcs;
-
- this_cpu_write(xen_cr0_value, cr0);
-
- /* Only pay attention to cr0.TS; everything else is
- ignored. */
- mcs = xen_mc_entry(0);
-
- MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
-}
-
-static void xen_write_cr4(unsigned long cr4)
-{
- cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
-
- native_write_cr4(cr4);
-}
-#ifdef CONFIG_X86_64
-static inline unsigned long xen_read_cr8(void)
-{
- return 0;
-}
-static inline void xen_write_cr8(unsigned long val)
-{
- BUG_ON(val);
-}
-#endif
-
-static u64 xen_read_msr_safe(unsigned int msr, int *err)
-{
- u64 val;
-
- if (pmu_msr_read(msr, &val, err))
- return val;
-
- val = native_read_msr_safe(msr, err);
- switch (msr) {
- case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
- val &= ~X2APIC_ENABLE;
- break;
- }
- return val;
-}
-
-static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
-{
- int ret;
-
- ret = 0;
-
- switch (msr) {
-#ifdef CONFIG_X86_64
- unsigned which;
- u64 base;
-
- case MSR_FS_BASE: which = SEGBASE_FS; goto set;
- case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
- case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
-
- set:
- base = ((u64)high << 32) | low;
- if (HYPERVISOR_set_segment_base(which, base) != 0)
- ret = -EIO;
- break;
-#endif
-
- case MSR_STAR:
- case MSR_CSTAR:
- case MSR_LSTAR:
- case MSR_SYSCALL_MASK:
- case MSR_IA32_SYSENTER_CS:
- case MSR_IA32_SYSENTER_ESP:
- case MSR_IA32_SYSENTER_EIP:
- /* Fast syscall setup is all done in hypercalls, so
- these are all ignored. Stub them out here to stop
- Xen console noise. */
- break;
-
- default:
- if (!pmu_msr_write(msr, low, high, &ret))
- ret = native_write_msr_safe(msr, low, high);
- }
-
- return ret;
-}
-
-static u64 xen_read_msr(unsigned int msr)
-{
- /*
- * This will silently swallow a #GP from RDMSR. It may be worth
- * changing that.
- */
- int err;
-
- return xen_read_msr_safe(msr, &err);
-}
-
-static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
-{
- /*
- * This will silently swallow a #GP from WRMSR. It may be worth
- * changing that.
- */
- xen_write_msr_safe(msr, low, high);
-}
-
-void xen_setup_shared_info(void)
-{
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_fixmap(FIX_PARAVIRT_BOOTMAP,
- xen_start_info->shared_info);
-
- HYPERVISOR_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
- } else
- HYPERVISOR_shared_info =
- (struct shared_info *)__va(xen_start_info->shared_info);
-
-#ifndef CONFIG_SMP
- /* In UP this is as good a place as any to set up shared info */
- xen_setup_vcpu_info_placement();
-#endif
-
- xen_setup_mfn_list_list();
-}
-
-/* This is called once we have the cpu_possible_mask */
-void xen_setup_vcpu_info_placement(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- /* Set up direct vCPU id mapping for PV guests. */
- per_cpu(xen_vcpu_id, cpu) = cpu;
- xen_vcpu_setup(cpu);
- }
-
- /*
- * xen_vcpu_setup managed to place the vcpu_info within the
- * percpu area for all cpus, so make use of it.
- */
- if (have_vcpu_info_placement) {
- pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
- pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
- pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
- pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
- pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
- }
-}
-
-static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
- unsigned long addr, unsigned len)
-{
- char *start, *end, *reloc;
- unsigned ret;
-
- start = end = reloc = NULL;
-
-#define SITE(op, x) \
- case PARAVIRT_PATCH(op.x): \
- if (have_vcpu_info_placement) { \
- start = (char *)xen_##x##_direct; \
- end = xen_##x##_direct_end; \
- reloc = xen_##x##_direct_reloc; \
- } \
- goto patch_site
-
- switch (type) {
- SITE(pv_irq_ops, irq_enable);
- SITE(pv_irq_ops, irq_disable);
- SITE(pv_irq_ops, save_fl);
- SITE(pv_irq_ops, restore_fl);
-#undef SITE
-
- patch_site:
- if (start == NULL || (end-start) > len)
- goto default_patch;
-
- ret = paravirt_patch_insns(insnbuf, len, start, end);
-
- /* Note: because reloc is assigned from something that
- appears to be an array, gcc assumes it's non-null,
- but doesn't know its relationship with start and
- end. */
- if (reloc > start && reloc < end) {
- int reloc_off = reloc - start;
- long *relocp = (long *)(insnbuf + reloc_off);
- long delta = start - (char *)addr;
-
- *relocp += delta;
- }
- break;
-
- default_patch:
- default:
- ret = paravirt_patch_default(type, clobbers, insnbuf,
- addr, len);
- break;
- }
-
- return ret;
-}
-
-static const struct pv_info xen_info __initconst = {
- .shared_kernel_pmd = 0,
-
-#ifdef CONFIG_X86_64
- .extra_user_64bit_cs = FLAT_USER_CS64,
-#endif
- .name = "Xen",
-};
-
-static const struct pv_init_ops xen_init_ops __initconst = {
- .patch = xen_patch,
-};
-
-static const struct pv_cpu_ops xen_cpu_ops __initconst = {
- .cpuid = xen_cpuid,
-
- .set_debugreg = xen_set_debugreg,
- .get_debugreg = xen_get_debugreg,
-
- .read_cr0 = xen_read_cr0,
- .write_cr0 = xen_write_cr0,
-
- .read_cr4 = native_read_cr4,
- .write_cr4 = xen_write_cr4,
-
-#ifdef CONFIG_X86_64
- .read_cr8 = xen_read_cr8,
- .write_cr8 = xen_write_cr8,
-#endif
-
- .wbinvd = native_wbinvd,
-
- .read_msr = xen_read_msr,
- .write_msr = xen_write_msr,
-
- .read_msr_safe = xen_read_msr_safe,
- .write_msr_safe = xen_write_msr_safe,
-
- .read_pmc = xen_read_pmc,
-
- .iret = xen_iret,
-#ifdef CONFIG_X86_64
- .usergs_sysret64 = xen_sysret64,
-#endif
-
- .load_tr_desc = paravirt_nop,
- .set_ldt = xen_set_ldt,
- .load_gdt = xen_load_gdt,
- .load_idt = xen_load_idt,
- .load_tls = xen_load_tls,
-#ifdef CONFIG_X86_64
- .load_gs_index = xen_load_gs_index,
-#endif
-
- .alloc_ldt = xen_alloc_ldt,
- .free_ldt = xen_free_ldt,
-
- .store_idt = native_store_idt,
- .store_tr = xen_store_tr,
-
- .write_ldt_entry = xen_write_ldt_entry,
- .write_gdt_entry = xen_write_gdt_entry,
- .write_idt_entry = xen_write_idt_entry,
- .load_sp0 = xen_load_sp0,
-
- .set_iopl_mask = xen_set_iopl_mask,
- .io_delay = xen_io_delay,
-
- /* Xen takes care of %gs when switching to usermode for us */
- .swapgs = paravirt_nop,
-
- .start_context_switch = paravirt_start_context_switch,
- .end_context_switch = xen_end_context_switch,
-};
-
-static void xen_reboot(int reason)
+void xen_reboot(int reason)
{
struct sched_shutdown r = { .reason = reason };
int cpu;
@@ -1307,33 +185,11 @@ static void xen_reboot(int reason)
BUG();
}
-static void xen_restart(char *msg)
+void xen_emergency_restart(void)
{
xen_reboot(SHUTDOWN_reboot);
}
-static void xen_emergency_restart(void)
-{
- xen_reboot(SHUTDOWN_reboot);
-}
-
-static void xen_machine_halt(void)
-{
- xen_reboot(SHUTDOWN_poweroff);
-}
-
-static void xen_machine_power_off(void)
-{
- if (pm_power_off)
- pm_power_off();
- xen_reboot(SHUTDOWN_poweroff);
-}
-
-static void xen_crash_shutdown(struct pt_regs *regs)
-{
- xen_reboot(SHUTDOWN_crash);
-}
-
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
@@ -1343,7 +199,7 @@ xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
}
static struct notifier_block xen_panic_block = {
- .notifier_call= xen_panic_event,
+ .notifier_call = xen_panic_event,
.priority = INT_MIN
};
@@ -1353,624 +209,7 @@ int xen_panic_handler_init(void)
return 0;
}
-static const struct machine_ops xen_machine_ops __initconst = {
- .restart = xen_restart,
- .halt = xen_machine_halt,
- .power_off = xen_machine_power_off,
- .shutdown = xen_machine_halt,
- .crash_shutdown = xen_crash_shutdown,
- .emergency_restart = xen_emergency_restart,
-};
-
-static unsigned char xen_get_nmi_reason(void)
-{
- unsigned char reason = 0;
-
- /* Construct a value which looks like it came from port 0x61. */
- if (test_bit(_XEN_NMIREASON_io_error,
- &HYPERVISOR_shared_info->arch.nmi_reason))
- reason |= NMI_REASON_IOCHK;
- if (test_bit(_XEN_NMIREASON_pci_serr,
- &HYPERVISOR_shared_info->arch.nmi_reason))
- reason |= NMI_REASON_SERR;
-
- return reason;
-}
-
-static void __init xen_boot_params_init_edd(void)
-{
-#if IS_ENABLED(CONFIG_EDD)
- struct xen_platform_op op;
- struct edd_info *edd_info;
- u32 *mbr_signature;
- unsigned nr;
- int ret;
-
- edd_info = boot_params.eddbuf;
- mbr_signature = boot_params.edd_mbr_sig_buffer;
-
- op.cmd = XENPF_firmware_info;
-
- op.u.firmware_info.type = XEN_FW_DISK_INFO;
- for (nr = 0; nr < EDDMAXNR; nr++) {
- struct edd_info *info = edd_info + nr;
-
- op.u.firmware_info.index = nr;
- info->params.length = sizeof(info->params);
- set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
- &info->params);
- ret = HYPERVISOR_platform_op(&op);
- if (ret)
- break;
-
-#define C(x) info->x = op.u.firmware_info.u.disk_info.x
- C(device);
- C(version);
- C(interface_support);
- C(legacy_max_cylinder);
- C(legacy_max_head);
- C(legacy_sectors_per_track);
-#undef C
- }
- boot_params.eddbuf_entries = nr;
-
- op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
- for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
- op.u.firmware_info.index = nr;
- ret = HYPERVISOR_platform_op(&op);
- if (ret)
- break;
- mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
- }
- boot_params.edd_mbr_sig_buf_entries = nr;
-#endif
-}
-
-/*
- * Set up the GDT and segment registers for -fstack-protector. Until
- * we do this, we have to be careful not to call any stack-protected
- * function, which is most of the kernel.
- */
-static void xen_setup_gdt(int cpu)
-{
- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
- pv_cpu_ops.load_gdt = xen_load_gdt_boot;
-
- setup_stack_canary_segment(0);
- switch_to_new_gdt(0);
-
- pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
- pv_cpu_ops.load_gdt = xen_load_gdt;
-}
-
-static void __init xen_dom0_set_legacy_features(void)
-{
- x86_platform.legacy.rtc = 1;
-}
-
-static int xen_cpuhp_setup(void)
-{
- int rc;
-
- rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
- "x86/xen/hvm_guest:prepare",
- xen_cpu_up_prepare, xen_cpu_dead);
- if (rc >= 0) {
- rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "x86/xen/hvm_guest:online",
- xen_cpu_up_online, NULL);
- if (rc < 0)
- cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
- }
-
- return rc >= 0 ? 0 : rc;
-}
-
-/* First C function to be called on Xen boot */
-asmlinkage __visible void __init xen_start_kernel(void)
-{
- struct physdev_set_iopl set_iopl;
- unsigned long initrd_start = 0;
- int rc;
-
- if (!xen_start_info)
- return;
-
- xen_domain_type = XEN_PV_DOMAIN;
-
- xen_setup_features();
-
- xen_setup_machphys_mapping();
-
- /* Install Xen paravirt ops */
- pv_info = xen_info;
- pv_init_ops = xen_init_ops;
- pv_cpu_ops = xen_cpu_ops;
-
- x86_platform.get_nmi_reason = xen_get_nmi_reason;
-
- x86_init.resources.memory_setup = xen_memory_setup;
- x86_init.oem.arch_setup = xen_arch_setup;
- x86_init.oem.banner = xen_banner;
-
- xen_init_time_ops();
-
- /*
- * Set up some pagetable state before starting to set any ptes.
- */
-
- xen_init_mmu_ops();
-
- /* Prevent unwanted bits from being set in PTEs. */
- __supported_pte_mask &= ~_PAGE_GLOBAL;
-
- /*
- * Prevent page tables from being allocated in highmem, even
- * if CONFIG_HIGHPTE is enabled.
- */
- __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
-
- /* Work out if we support NX */
- x86_configure_nx();
-
- /* Get mfn list */
- xen_build_dynamic_phys_to_machine();
-
- /*
- * Set up kernel GDT and segment registers, mainly so that
- * -fstack-protector code can be executed.
- */
- xen_setup_gdt(0);
-
- xen_init_irq_ops();
- xen_init_cpuid_mask();
-
-#ifdef CONFIG_X86_LOCAL_APIC
- /*
- * set up the basic apic ops.
- */
- xen_init_apic();
-#endif
-
- if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
- pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
- pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
- }
-
- machine_ops = xen_machine_ops;
-
- /*
- * The only reliable way to retain the initial address of the
- * percpu gdt_page is to remember it here, so we can go and
- * mark it RW later, when the initial percpu area is freed.
- */
- xen_initial_gdt = &per_cpu(gdt_page, 0);
-
- xen_smp_init();
-
-#ifdef CONFIG_ACPI_NUMA
- /*
- * The pages we from Xen are not related to machine pages, so
- * any NUMA information the kernel tries to get from ACPI will
- * be meaningless. Prevent it from trying.
- */
- acpi_numa = -1;
-#endif
- /* Don't do the full vcpu_info placement stuff until we have a
- possible map and a non-dummy shared_info. */
- per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
-
- WARN_ON(xen_cpuhp_setup());
-
- local_irq_disable();
- early_boot_irqs_disabled = true;
-
- xen_raw_console_write("mapping kernel into physical memory\n");
- xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
- xen_start_info->nr_pages);
- xen_reserve_special_pages();
-
- /* keep using Xen gdt for now; no urgent need to change it */
-
-#ifdef CONFIG_X86_32
- pv_info.kernel_rpl = 1;
- if (xen_feature(XENFEAT_supervisor_mode_kernel))
- pv_info.kernel_rpl = 0;
-#else
- pv_info.kernel_rpl = 0;
-#endif
- /* set the limit of our address space */
- xen_reserve_top();
-
- /*
- * We used to do this in xen_arch_setup, but that is too late
- * on AMD were early_cpu_init (run before ->arch_setup()) calls
- * early_amd_init which pokes 0xcf8 port.
- */
- set_iopl.iopl = 1;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
- if (rc != 0)
- xen_raw_printk("physdev_op failed %d\n", rc);
-
-#ifdef CONFIG_X86_32
- /* set up basic CPUID stuff */
- cpu_detect(&new_cpu_data);
- set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
- new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
-#endif
-
- if (xen_start_info->mod_start) {
- if (xen_start_info->flags & SIF_MOD_START_PFN)
- initrd_start = PFN_PHYS(xen_start_info->mod_start);
- else
- initrd_start = __pa(xen_start_info->mod_start);
- }
-
- /* Poke various useful things into boot_params */
- boot_params.hdr.type_of_loader = (9 << 4) | 0;
- boot_params.hdr.ramdisk_image = initrd_start;
- boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
- boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
- boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
-
- if (!xen_initial_domain()) {
- add_preferred_console("xenboot", 0, NULL);
- add_preferred_console("tty", 0, NULL);
- add_preferred_console("hvc", 0, NULL);
- if (pci_xen)
- x86_init.pci.arch_init = pci_xen_init;
- } else {
- const struct dom0_vga_console_info *info =
- (void *)((char *)xen_start_info +
- xen_start_info->console.dom0.info_off);
- struct xen_platform_op op = {
- .cmd = XENPF_firmware_info,
- .interface_version = XENPF_INTERFACE_VERSION,
- .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
- };
-
- x86_platform.set_legacy_features =
- xen_dom0_set_legacy_features;
- xen_init_vga(info, xen_start_info->console.dom0.info_size);
- xen_start_info->console.domU.mfn = 0;
- xen_start_info->console.domU.evtchn = 0;
-
- if (HYPERVISOR_platform_op(&op) == 0)
- boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
-
- /* Make sure ACS will be enabled */
- pci_request_acs();
-
- xen_acpi_sleep_register();
-
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
- xen_boot_params_init_edd();
- }
-#ifdef CONFIG_PCI
- /* PCI BIOS service won't work from a PV guest. */
- pci_probe &= ~PCI_PROBE_BIOS;
-#endif
- xen_raw_console_write("about to get started...\n");
-
- /* Let's presume PV guests always boot on vCPU with id 0. */
- per_cpu(xen_vcpu_id, 0) = 0;
-
- xen_setup_runstate_info(0);
-
- xen_efi_init();
-
- /* Start the world */
-#ifdef CONFIG_X86_32
- i386_start_kernel();
-#else
- cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
- x86_64_start_reservations((char *)__pa_symbol(&boot_params));
-#endif
-}
-
-#ifdef CONFIG_XEN_PVH
-
-static void xen_pvh_arch_setup(void)
-{
-#ifdef CONFIG_ACPI
- /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
- if (nr_ioapics == 0)
- acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
-#endif
-}
-
-static void __init init_pvh_bootparams(void)
-{
- struct xen_memory_map memmap;
- unsigned int i;
- int rc;
-
- memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
-
- memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
- set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
- rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
- if (rc) {
- xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
- BUG();
- }
-
- if (memmap.nr_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
- pvh_bootparams.e820_table[memmap.nr_entries].addr =
- ISA_START_ADDRESS;
- pvh_bootparams.e820_table[memmap.nr_entries].size =
- ISA_END_ADDRESS - ISA_START_ADDRESS;
- pvh_bootparams.e820_table[memmap.nr_entries].type =
- E820_TYPE_RESERVED;
- memmap.nr_entries++;
- } else
- xen_raw_printk("Warning: Can fit ISA range into e820\n");
-
- pvh_bootparams.e820_entries = memmap.nr_entries;
- for (i = 0; i < pvh_bootparams.e820_entries; i++)
- e820__range_add(pvh_bootparams.e820_table[i].addr,
- pvh_bootparams.e820_table[i].size,
- pvh_bootparams.e820_table[i].type);
-
- e820__update_table(e820_table);
-
- pvh_bootparams.hdr.cmd_line_ptr =
- pvh_start_info.cmdline_paddr;
-
- /* The first module is always ramdisk. */
- if (pvh_start_info.nr_modules) {
- struct hvm_modlist_entry *modaddr =
- __va(pvh_start_info.modlist_paddr);
- pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
- pvh_bootparams.hdr.ramdisk_size = modaddr->size;
- }
-
- /*
- * See Documentation/x86/boot.txt.
- *
- * Version 2.12 supports Xen entry point but we will use default x86/PC
- * environment (i.e. hardware_subarch 0).
- */
- pvh_bootparams.hdr.version = 0x212;
- pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
-}
-
-/*
- * This routine (and those that it might call) should not use
- * anything that lives in .bss since that segment will be cleared later.
- */
-void __init xen_prepare_pvh(void)
-{
- u32 msr;
- u64 pfn;
-
- if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
- xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
- pvh_start_info.magic);
- BUG();
- }
-
- xen_pvh = 1;
-
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- init_pvh_bootparams();
-
- x86_init.oem.arch_setup = xen_pvh_arch_setup;
-}
-#endif
-
-void __ref xen_hvm_init_shared_info(void)
-{
- int cpu;
- struct xen_add_to_physmap xatp;
- static struct shared_info *shared_info_page = 0;
-
- if (!shared_info_page)
- shared_info_page = (struct shared_info *)
- extend_brk(PAGE_SIZE, PAGE_SIZE);
- xatp.domid = DOMID_SELF;
- xatp.idx = 0;
- xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
- if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
- BUG();
-
- HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
-
- /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
- * page, we use it in the event channel upcall and in some pvclock
- * related functions. We don't need the vcpu_info placement
- * optimizations because we don't use any pv_mmu or pv_irq op on
- * HVM.
- * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_init_shared_info is run at resume time too and
- * in that case multiple vcpus might be online. */
- for_each_online_cpu(cpu) {
- /* Leave it to be NULL. */
- if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
- continue;
- per_cpu(xen_vcpu, cpu) =
- &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
- }
-}
-
-#ifdef CONFIG_XEN_PVHVM
-static void __init init_hvm_pv_info(void)
-{
- int major, minor;
- uint32_t eax, ebx, ecx, edx, base;
-
- base = xen_cpuid_base();
- eax = cpuid_eax(base + 1);
-
- major = eax >> 16;
- minor = eax & 0xffff;
- printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
-
- xen_domain_type = XEN_HVM_DOMAIN;
-
- /* PVH set up hypercall page in xen_prepare_pvh(). */
- if (xen_pvh_domain())
- pv_info.name = "Xen PVH";
- else {
- u64 pfn;
- uint32_t msr;
-
- pv_info.name = "Xen HVM";
- msr = cpuid_ebx(base + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
- }
-
- xen_setup_features();
-
- cpuid(base + 4, &eax, &ebx, &ecx, &edx);
- if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
- this_cpu_write(xen_vcpu_id, ebx);
- else
- this_cpu_write(xen_vcpu_id, smp_processor_id());
-}
-#endif
-
-static int xen_cpu_up_prepare(unsigned int cpu)
-{
- int rc;
-
- if (xen_hvm_domain()) {
- /*
- * This can happen if CPU was offlined earlier and
- * offlining timed out in common_cpu_die().
- */
- if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- }
-
- if (cpu_acpi_id(cpu) != U32_MAX)
- per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
- else
- per_cpu(xen_vcpu_id, cpu) = cpu;
- xen_vcpu_setup(cpu);
- }
-
- if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
- xen_setup_timer(cpu);
-
- rc = xen_smp_intr_init(cpu);
- if (rc) {
- WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
- cpu, rc);
- return rc;
- }
- return 0;
-}
-
-static int xen_cpu_dead(unsigned int cpu)
-{
- xen_smp_intr_free(cpu);
-
- if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
- xen_teardown_timer(cpu);
-
- return 0;
-}
-
-static int xen_cpu_up_online(unsigned int cpu)
-{
- xen_init_lock_cpu(cpu);
- return 0;
-}
-
-#ifdef CONFIG_XEN_PVHVM
-#ifdef CONFIG_KEXEC_CORE
-static void xen_hvm_shutdown(void)
-{
- native_machine_shutdown();
- if (kexec_in_progress)
- xen_reboot(SHUTDOWN_soft_reset);
-}
-
-static void xen_hvm_crash_shutdown(struct pt_regs *regs)
-{
- native_machine_crash_shutdown(regs);
- xen_reboot(SHUTDOWN_soft_reset);
-}
-#endif
-
-static void __init xen_hvm_guest_init(void)
-{
- if (xen_pv_domain())
- return;
-
- init_hvm_pv_info();
-
- xen_hvm_init_shared_info();
-
- xen_panic_handler_init();
-
- BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
-
- xen_hvm_smp_init();
- WARN_ON(xen_cpuhp_setup());
- xen_unplug_emulated_devices();
- x86_init.irqs.intr_init = xen_init_IRQ;
- xen_hvm_init_time_ops();
- xen_hvm_init_mmu_ops();
-
- if (xen_pvh_domain())
- machine_ops.emergency_restart = xen_emergency_restart;
-#ifdef CONFIG_KEXEC_CORE
- machine_ops.shutdown = xen_hvm_shutdown;
- machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
-#endif
-}
-#endif
-
-static bool xen_nopv = false;
-static __init int xen_parse_nopv(char *arg)
-{
- xen_nopv = true;
- return 0;
-}
-early_param("xen_nopv", xen_parse_nopv);
-
-static uint32_t __init xen_platform(void)
-{
- if (xen_nopv)
- return 0;
-
- return xen_cpuid_base();
-}
-
-bool xen_hvm_need_lapic(void)
-{
- if (xen_nopv)
- return false;
- if (xen_pv_domain())
- return false;
- if (!xen_hvm_domain())
- return false;
- if (xen_feature(XENFEAT_hvm_pirqs))
- return false;
- return true;
-}
-EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
-
-static void xen_set_cpu_features(struct cpuinfo_x86 *c)
-{
- if (xen_pv_domain()) {
- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- set_cpu_cap(c, X86_FEATURE_XENPV);
- }
-}
-
-static void xen_pin_vcpu(int cpu)
+void xen_pin_vcpu(int cpu)
{
static bool disable_pinning;
struct sched_pin_override pin_override;
@@ -2009,18 +248,6 @@ static void xen_pin_vcpu(int cpu)
}
}
-const struct hypervisor_x86 x86_hyper_xen = {
- .name = "Xen",
- .detect = xen_platform,
-#ifdef CONFIG_XEN_PVHVM
- .init_platform = xen_hvm_guest_init,
-#endif
- .x2apic_available = xen_x2apic_para_available,
- .set_cpu_features = xen_set_cpu_features,
- .pin_vcpu = xen_pin_vcpu,
-};
-EXPORT_SYMBOL(x86_hyper_xen);
-
#ifdef CONFIG_HOTPLUG_CPU
void xen_arch_register_cpu(int num)
{
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
new file mode 100644
index 000000000000..a6d014f47e52
--- /dev/null
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -0,0 +1,214 @@
+#include <linux/cpu.h>
+#include <linux/kexec.h>
+
+#include <xen/features.h>
+#include <xen/events.h>
+#include <xen/interface/memory.h>
+
+#include <asm/cpu.h>
+#include <asm/smp.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+#include <asm/hypervisor.h>
+
+#include <asm/xen/cpuid.h>
+#include <asm/xen/hypervisor.h>
+
+#include "xen-ops.h"
+#include "mmu.h"
+#include "smp.h"
+
+void __ref xen_hvm_init_shared_info(void)
+{
+ int cpu;
+ struct xen_add_to_physmap xatp;
+ static struct shared_info *shared_info_page;
+
+ if (!shared_info_page)
+ shared_info_page = (struct shared_info *)
+ extend_brk(PAGE_SIZE, PAGE_SIZE);
+ xatp.domid = DOMID_SELF;
+ xatp.idx = 0;
+ xatp.space = XENMAPSPACE_shared_info;
+ xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
+ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
+ BUG();
+
+ HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
+
+ /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
+ * page, we use it in the event channel upcall and in some pvclock
+ * related functions. We don't need the vcpu_info placement
+ * optimizations because we don't use any pv_mmu or pv_irq op on
+ * HVM.
+ * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+ * online but xen_hvm_init_shared_info is run at resume time too and
+ * in that case multiple vcpus might be online. */
+ for_each_online_cpu(cpu) {
+ /* Leave it to be NULL. */
+ if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
+ continue;
+ per_cpu(xen_vcpu, cpu) =
+ &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
+ }
+}
+
+static void __init init_hvm_pv_info(void)
+{
+ int major, minor;
+ uint32_t eax, ebx, ecx, edx, base;
+
+ base = xen_cpuid_base();
+ eax = cpuid_eax(base + 1);
+
+ major = eax >> 16;
+ minor = eax & 0xffff;
+ printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
+
+ xen_domain_type = XEN_HVM_DOMAIN;
+
+ /* PVH set up hypercall page in xen_prepare_pvh(). */
+ if (xen_pvh_domain())
+ pv_info.name = "Xen PVH";
+ else {
+ u64 pfn;
+ uint32_t msr;
+
+ pv_info.name = "Xen HVM";
+ msr = cpuid_ebx(base + 2);
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+ }
+
+ xen_setup_features();
+
+ cpuid(base + 4, &eax, &ebx, &ecx, &edx);
+ if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
+ this_cpu_write(xen_vcpu_id, ebx);
+ else
+ this_cpu_write(xen_vcpu_id, smp_processor_id());
+}
+
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+ native_machine_shutdown();
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+ native_machine_crash_shutdown(regs);
+ xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
+static int xen_cpu_up_prepare_hvm(unsigned int cpu)
+{
+ int rc;
+
+ /*
+ * This can happen if CPU was offlined earlier and
+ * offlining timed out in common_cpu_die().
+ */
+ if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ }
+
+ if (cpu_acpi_id(cpu) != U32_MAX)
+ per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
+ else
+ per_cpu(xen_vcpu_id, cpu) = cpu;
+ xen_vcpu_setup(cpu);
+
+ if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_timer(cpu);
+
+ rc = xen_smp_intr_init(cpu);
+ if (rc) {
+ WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
+ cpu, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int xen_cpu_dead_hvm(unsigned int cpu)
+{
+ xen_smp_intr_free(cpu);
+
+ if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_teardown_timer(cpu);
+
+ return 0;
+}
+
+static void __init xen_hvm_guest_init(void)
+{
+ if (xen_pv_domain())
+ return;
+
+ init_hvm_pv_info();
+
+ xen_hvm_init_shared_info();
+
+ xen_panic_handler_init();
+
+ if (xen_feature(XENFEAT_hvm_callback_vector))
+ xen_have_vector_callback = 1;
+
+ xen_hvm_smp_init();
+ WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
+ xen_unplug_emulated_devices();
+ x86_init.irqs.intr_init = xen_init_IRQ;
+ xen_hvm_init_time_ops();
+ xen_hvm_init_mmu_ops();
+
+ if (xen_pvh_domain())
+ machine_ops.emergency_restart = xen_emergency_restart;
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
+}
+
+static bool xen_nopv;
+static __init int xen_parse_nopv(char *arg)
+{
+ xen_nopv = true;
+ return 0;
+}
+early_param("xen_nopv", xen_parse_nopv);
+
+bool xen_hvm_need_lapic(void)
+{
+ if (xen_nopv)
+ return false;
+ if (xen_pv_domain())
+ return false;
+ if (!xen_hvm_domain())
+ return false;
+ if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
+ return false;
+ return true;
+}
+EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+static uint32_t __init xen_platform_hvm(void)
+{
+ if (xen_pv_domain() || xen_nopv)
+ return 0;
+
+ return xen_cpuid_base();
+}
+
+const struct hypervisor_x86 x86_hyper_xen_hvm = {
+ .name = "Xen HVM",
+ .detect = xen_platform_hvm,
+ .init_platform = xen_hvm_guest_init,
+ .pin_vcpu = xen_pin_vcpu,
+ .x2apic_available = xen_x2apic_para_available,
+};
+EXPORT_SYMBOL(x86_hyper_xen_hvm);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
new file mode 100644
index 000000000000..a732bc2b9dfc
--- /dev/null
+++ b/arch/x86/xen/enlighten_pv.c
@@ -0,0 +1,1513 @@
+/*
+ * Core of Xen paravirt_ops implementation.
+ *
+ * This file contains the xen_paravirt_ops structure itself, and the
+ * implementations for:
+ * - privileged instructions
+ * - interrupt flags
+ * - segment operations
+ * - booting and setup
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/preempt.h>
+#include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/start_kernel.h>
+#include <linux/sched.h>
+#include <linux/kprobes.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/highmem.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/edd.h>
+#include <linux/frame.h>
+
+#include <xen/xen.h>
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/vcpu.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
+#include <xen/interface/xen-mca.h>
+#include <xen/features.h>
+#include <xen/page.h>
+#include <xen/hvc-console.h>
+#include <xen/acpi.h>
+
+#include <asm/paravirt.h>
+#include <asm/apic.h>
+#include <asm/page.h>
+#include <asm/xen/pci.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/cpuid.h>
+#include <asm/fixmap.h>
+#include <asm/processor.h>
+#include <asm/proto.h>
+#include <asm/msr-index.h>
+#include <asm/traps.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/reboot.h>
+#include <asm/stackprotector.h>
+#include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
+#include <asm/mwait.h>
+#include <asm/pci_x86.h>
+#include <asm/cpu.h>
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#include <asm/acpi.h>
+#include <acpi/pdc_intel.h>
+#include <acpi/processor.h>
+#include <xen/interface/platform.h>
+#endif
+
+#include "xen-ops.h"
+#include "mmu.h"
+#include "smp.h"
+#include "multicalls.h"
+#include "pmu.h"
+
+void *xen_initial_gdt;
+
+RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
+
+static int xen_cpu_up_prepare_pv(unsigned int cpu);
+static int xen_cpu_dead_pv(unsigned int cpu);
+
+struct tls_descs {
+ struct desc_struct desc[3];
+};
+
+/*
+ * Updating the 3 TLS descriptors in the GDT on every task switch is
+ * surprisingly expensive so we avoid updating them if they haven't
+ * changed. Since Xen writes different descriptors than the one
+ * passed in the update_descriptor hypercall we keep shadow copies to
+ * compare against.
+ */
+static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
+
+/*
+ * On restore, set the vcpu placement up again.
+ * If it fails, then we're in a bad state, since
+ * we can't back out from using it...
+ */
+void xen_vcpu_restore(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ bool other_cpu = (cpu != smp_processor_id());
+ bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
+ NULL);
+
+ if (other_cpu && is_up &&
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
+ BUG();
+
+ xen_setup_runstate_info(cpu);
+
+ if (xen_have_vcpu_info_placement)
+ xen_vcpu_setup(cpu);
+
+ if (other_cpu && is_up &&
+ HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
+ BUG();
+ }
+}
+
+static void __init xen_banner(void)
+{
+ unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ struct xen_extraversion extra;
+ HYPERVISOR_xen_version(XENVER_extraversion, &extra);
+
+ pr_info("Booting paravirtualized kernel %son %s\n",
+ xen_feature(XENFEAT_auto_translated_physmap) ?
+ "with PVH extensions " : "", pv_info.name);
+ printk(KERN_INFO "Xen version: %d.%d%s%s\n",
+ version >> 16, version & 0xffff, extra.extraversion,
+ xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+}
+/* Check if running on Xen version (major, minor) or later */
+bool
+xen_running_on_version_or_later(unsigned int major, unsigned int minor)
+{
+ unsigned int version;
+
+ if (!xen_domain())
+ return false;
+
+ version = HYPERVISOR_xen_version(XENVER_version, NULL);
+ if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
+ ((version >> 16) > major))
+ return true;
+ return false;
+}
+
+static __read_mostly unsigned int cpuid_leaf5_ecx_val;
+static __read_mostly unsigned int cpuid_leaf5_edx_val;
+
+static void xen_cpuid(unsigned int *ax, unsigned int *bx,
+ unsigned int *cx, unsigned int *dx)
+{
+ unsigned maskebx = ~0;
+
+ /*
+ * Mask out inconvenient features, to try and disable as many
+ * unsupported kernel subsystems as possible.
+ */
+ switch (*ax) {
+ case CPUID_MWAIT_LEAF:
+ /* Synthesize the values.. */
+ *ax = 0;
+ *bx = 0;
+ *cx = cpuid_leaf5_ecx_val;
+ *dx = cpuid_leaf5_edx_val;
+ return;
+
+ case 0xb:
+ /* Suppress extended topology stuff */
+ maskebx = 0;
+ break;
+ }
+
+ asm(XEN_EMULATE_PREFIX "cpuid"
+ : "=a" (*ax),
+ "=b" (*bx),
+ "=c" (*cx),
+ "=d" (*dx)
+ : "0" (*ax), "2" (*cx));
+
+ *bx &= maskebx;
+}
+STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
+
+static bool __init xen_check_mwait(void)
+{
+#ifdef CONFIG_ACPI
+ struct xen_platform_op op = {
+ .cmd = XENPF_set_processor_pminfo,
+ .u.set_pminfo.id = -1,
+ .u.set_pminfo.type = XEN_PM_PDC,
+ };
+ uint32_t buf[3];
+ unsigned int ax, bx, cx, dx;
+ unsigned int mwait_mask;
+
+ /* We need to determine whether it is OK to expose the MWAIT
+ * capability to the kernel to harvest deeper than C3 states from ACPI
+ * _CST using the processor_harvest_xen.c module. For this to work, we
+ * need to gather the MWAIT_LEAF values (which the cstate.c code
+ * checks against). The hypervisor won't expose the MWAIT flag because
+ * it would break backwards compatibility; so we will find out directly
+ * from the hardware and hypercall.
+ */
+ if (!xen_initial_domain())
+ return false;
+
+ /*
+ * When running under platform earlier than Xen4.2, do not expose
+ * mwait, to avoid the risk of loading native acpi pad driver
+ */
+ if (!xen_running_on_version_or_later(4, 2))
+ return false;
+
+ ax = 1;
+ cx = 0;
+
+ native_cpuid(&ax, &bx, &cx, &dx);
+
+ mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
+ (1 << (X86_FEATURE_MWAIT % 32));
+
+ if ((cx & mwait_mask) != mwait_mask)
+ return false;
+
+ /* We need to emulate the MWAIT_LEAF and for that we need both
+ * ecx and edx. The hypercall provides only partial information.
+ */
+
+ ax = CPUID_MWAIT_LEAF;
+ bx = 0;
+ cx = 0;
+ dx = 0;
+
+ native_cpuid(&ax, &bx, &cx, &dx);
+
+ /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
+ * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
+ */
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+ buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
+
+ set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
+
+ if ((HYPERVISOR_platform_op(&op) == 0) &&
+ (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
+ cpuid_leaf5_ecx_val = cx;
+ cpuid_leaf5_edx_val = dx;
+ }
+ return true;
+#else
+ return false;
+#endif
+}
+
+static bool __init xen_check_xsave(void)
+{
+ unsigned int err, eax, edx;
+
+ /*
+ * Xen 4.0 and older accidentally leaked the host XSAVE flag into guest
+ * view, despite not being able to support guests using the
+ * functionality. Probe for the actual availability of XSAVE by seeing
+ * whether xgetbv executes successfully or raises #UD.
+ */
+ asm volatile("1: .byte 0x0f,0x01,0xd0\n\t" /* xgetbv */
+ "xor %[err], %[err]\n"
+ "2:\n\t"
+ ".pushsection .fixup,\"ax\"\n\t"
+ "3: movl $1,%[err]\n\t"
+ "jmp 2b\n\t"
+ ".popsection\n\t"
+ _ASM_EXTABLE(1b, 3b)
+ : [err] "=r" (err), "=a" (eax), "=d" (edx)
+ : "c" (0));
+
+ return err == 0;
+}
+
+static void __init xen_init_capabilities(void)
+{
+ setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
+ setup_force_cpu_cap(X86_FEATURE_XENPV);
+ setup_clear_cpu_cap(X86_FEATURE_DCA);
+ setup_clear_cpu_cap(X86_FEATURE_APERFMPERF);
+ setup_clear_cpu_cap(X86_FEATURE_MTRR);
+ setup_clear_cpu_cap(X86_FEATURE_ACC);
+ setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+
+ if (!xen_initial_domain())
+ setup_clear_cpu_cap(X86_FEATURE_ACPI);
+
+ if (xen_check_mwait())
+ setup_force_cpu_cap(X86_FEATURE_MWAIT);
+ else
+ setup_clear_cpu_cap(X86_FEATURE_MWAIT);
+
+ if (xen_check_xsave()) {
+ setup_force_cpu_cap(X86_FEATURE_XSAVE);
+ setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
+ } else {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+ setup_clear_cpu_cap(X86_FEATURE_OSXSAVE);
+ }
+}
+
+static void xen_set_debugreg(int reg, unsigned long val)
+{
+ HYPERVISOR_set_debugreg(reg, val);
+}
+
+static unsigned long xen_get_debugreg(int reg)
+{
+ return HYPERVISOR_get_debugreg(reg);
+}
+
+static void xen_end_context_switch(struct task_struct *next)
+{
+ xen_mc_flush();
+ paravirt_end_context_switch(next);
+}
+
+static unsigned long xen_store_tr(void)
+{
+ return 0;
+}
+
+/*
+ * Set the page permissions for a particular virtual address. If the
+ * address is a vmalloc mapping (or other non-linear mapping), then
+ * find the linear mapping of the page and also set its protections to
+ * match.
+ */
+static void set_aliased_prot(void *v, pgprot_t prot)
+{
+ int level;
+ pte_t *ptep;
+ pte_t pte;
+ unsigned long pfn;
+ struct page *page;
+ unsigned char dummy;
+
+ ptep = lookup_address((unsigned long)v, &level);
+ BUG_ON(ptep == NULL);
+
+ pfn = pte_pfn(*ptep);
+ page = pfn_to_page(pfn);
+
+ pte = pfn_pte(pfn, prot);
+
+ /*
+ * Careful: update_va_mapping() will fail if the virtual address
+ * we're poking isn't populated in the page tables. We don't
+ * need to worry about the direct map (that's always in the page
+ * tables), but we need to be careful about vmap space. In
+ * particular, the top level page table can lazily propagate
+ * entries between processes, so if we've switched mms since we
+ * vmapped the target in the first place, we might not have the
+ * top-level page table entry populated.
+ *
+ * We disable preemption because we want the same mm active when
+ * we probe the target and when we issue the hypercall. We'll
+ * have the same nominal mm, but if we're a kernel thread, lazy
+ * mm dropping could change our pgd.
+ *
+ * Out of an abundance of caution, this uses __get_user() to fault
+ * in the target address just in case there's some obscure case
+ * in which the target address isn't readable.
+ */
+
+ preempt_disable();
+
+ probe_kernel_read(&dummy, v, 1);
+
+ if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ BUG();
+
+ if (!PageHighMem(page)) {
+ void *av = __va(PFN_PHYS(pfn));
+
+ if (av != v)
+ if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
+ BUG();
+ } else
+ kmap_flush_unused();
+
+ preempt_enable();
+}
+
+static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
+
+ /*
+ * We need to mark the all aliases of the LDT pages RO. We
+ * don't need to call vm_flush_aliases(), though, since that's
+ * only responsible for flushing aliases out the TLBs, not the
+ * page tables, and Xen will flush the TLB for us if needed.
+ *
+ * To avoid confusing future readers: none of this is necessary
+ * to load the LDT. The hypervisor only checks this when the
+ * LDT is faulted in due to subsequent descriptor access.
+ */
+
+ for (i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+}
+
+static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ int i;
+
+ for (i = 0; i < entries; i += entries_per_page)
+ set_aliased_prot(ldt + i, PAGE_KERNEL);
+}
+
+static void xen_set_ldt(const void *addr, unsigned entries)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+ trace_xen_cpu_set_ldt(addr, entries);
+
+ op = mcs.args;
+ op->cmd = MMUEXT_SET_LDT;
+ op->arg1.linear_addr = (unsigned long)addr;
+ op->arg2.nr_ents = entries;
+
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_load_gdt(const struct desc_ptr *dtr)
+{
+ unsigned long va = dtr->address;
+ unsigned int size = dtr->size + 1;
+ unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ unsigned long frames[pages];
+ int f;
+
+ /*
+ * A GDT can be up to 64k in size, which corresponds to 8192
+ * 8-byte entries, or 16 4k pages..
+ */
+
+ BUG_ON(size > 65536);
+ BUG_ON(va & ~PAGE_MASK);
+
+ for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+ int level;
+ pte_t *ptep;
+ unsigned long pfn, mfn;
+ void *virt;
+
+ /*
+ * The GDT is per-cpu and is in the percpu data area.
+ * That can be virtually mapped, so we need to do a
+ * page-walk to get the underlying MFN for the
+ * hypercall. The page can also be in the kernel's
+ * linear range, so we need to RO that mapping too.
+ */
+ ptep = lookup_address(va, &level);
+ BUG_ON(ptep == NULL);
+
+ pfn = pte_pfn(*ptep);
+ mfn = pfn_to_mfn(pfn);
+ virt = __va(PFN_PHYS(pfn));
+
+ frames[f] = mfn;
+
+ make_lowmem_page_readonly((void *)va);
+ make_lowmem_page_readonly(virt);
+ }
+
+ if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
+ BUG();
+}
+
+/*
+ * load_gdt for early boot, when the gdt is only mapped once
+ */
+static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+{
+ unsigned long va = dtr->address;
+ unsigned int size = dtr->size + 1;
+ unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ unsigned long frames[pages];
+ int f;
+
+ /*
+ * A GDT can be up to 64k in size, which corresponds to 8192
+ * 8-byte entries, or 16 4k pages..
+ */
+
+ BUG_ON(size > 65536);
+ BUG_ON(va & ~PAGE_MASK);
+
+ for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+ pte_t pte;
+ unsigned long pfn, mfn;
+
+ pfn = virt_to_pfn(va);
+ mfn = pfn_to_mfn(pfn);
+
+ pte = pfn_pte(pfn, PAGE_KERNEL_RO);
+
+ if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
+ BUG();
+
+ frames[f] = mfn;
+ }
+
+ if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
+ BUG();
+}
+
+static inline bool desc_equal(const struct desc_struct *d1,
+ const struct desc_struct *d2)
+{
+ return d1->a == d2->a && d1->b == d2->b;
+}
+
+static void load_TLS_descriptor(struct thread_struct *t,
+ unsigned int cpu, unsigned int i)
+{
+ struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
+ struct desc_struct *gdt;
+ xmaddr_t maddr;
+ struct multicall_space mc;
+
+ if (desc_equal(shadow, &t->tls_array[i]))
+ return;
+
+ *shadow = t->tls_array[i];
+
+ gdt = get_cpu_gdt_rw(cpu);
+ maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
+ mc = __xen_mc_entry(0);
+
+ MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
+}
+
+static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+ /*
+ * XXX sleazy hack: If we're being called in a lazy-cpu zone
+ * and lazy gs handling is enabled, it means we're in a
+ * context switch, and %gs has just been saved. This means we
+ * can zero it out to prevent faults on exit from the
+ * hypervisor if the next process has no %gs. Either way, it
+ * has been saved, and the new value will get loaded properly.
+ * This will go away as soon as Xen has been modified to not
+ * save/restore %gs for normal hypercalls.
+ *
+ * On x86_64, this hack is not used for %gs, because gs points
+ * to KERNEL_GS_BASE (and uses it for PDA references), so we
+ * must not zero %gs on x86_64
+ *
+ * For x86_64, we need to zero %fs, otherwise we may get an
+ * exception between the new %fs descriptor being loaded and
+ * %fs being effectively cleared at __switch_to().
+ */
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
+#ifdef CONFIG_X86_32
+ lazy_load_gs(0);
+#else
+ loadsegment(fs, 0);
+#endif
+ }
+
+ xen_mc_batch();
+
+ load_TLS_descriptor(t, cpu, 0);
+ load_TLS_descriptor(t, cpu, 1);
+ load_TLS_descriptor(t, cpu, 2);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+#ifdef CONFIG_X86_64
+static void xen_load_gs_index(unsigned int idx)
+{
+ if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
+ BUG();
+}
+#endif
+
+static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
+ const void *ptr)
+{
+ xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
+ u64 entry = *(u64 *)ptr;
+
+ trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
+
+ preempt_disable();
+
+ xen_mc_flush();
+ if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
+ BUG();
+
+ preempt_enable();
+}
+
+static int cvt_gate_to_trap(int vector, const gate_desc *val,
+ struct trap_info *info)
+{
+ unsigned long addr;
+
+ if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
+ return 0;
+
+ info->vector = vector;
+
+ addr = gate_offset(*val);
+#ifdef CONFIG_X86_64
+ /*
+ * Look for known traps using IST, and substitute them
+ * appropriately. The debugger ones are the only ones we care
+ * about. Xen will handle faults like double_fault,
+ * so we should never see them. Warn if
+ * there's an unexpected IST-using fault handler.
+ */
+ if (addr == (unsigned long)debug)
+ addr = (unsigned long)xen_debug;
+ else if (addr == (unsigned long)int3)
+ addr = (unsigned long)xen_int3;
+ else if (addr == (unsigned long)stack_segment)
+ addr = (unsigned long)xen_stack_segment;
+ else if (addr == (unsigned long)double_fault) {
+ /* Don't need to handle these */
+ return 0;
+#ifdef CONFIG_X86_MCE
+ } else if (addr == (unsigned long)machine_check) {
+ /*
+ * when xen hypervisor inject vMCE to guest,
+ * use native mce handler to handle it
+ */
+ ;
+#endif
+ } else if (addr == (unsigned long)nmi)
+ /*
+ * Use the native version as well.
+ */
+ ;
+ else {
+ /* Some other trap using IST? */
+ if (WARN_ON(val->ist != 0))
+ return 0;
+ }
+#endif /* CONFIG_X86_64 */
+ info->address = addr;
+
+ info->cs = gate_segment(*val);
+ info->flags = val->dpl;
+ /* interrupt gates clear IF */
+ if (val->type == GATE_INTERRUPT)
+ info->flags |= 1 << 2;
+
+ return 1;
+}
+
+/* Locations of each CPU's IDT */
+static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
+
+/* Set an IDT entry. If the entry is part of the current IDT, then
+ also update Xen. */
+static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
+{
+ unsigned long p = (unsigned long)&dt[entrynum];
+ unsigned long start, end;
+
+ trace_xen_cpu_write_idt_entry(dt, entrynum, g);
+
+ preempt_disable();
+
+ start = __this_cpu_read(idt_desc.address);
+ end = start + __this_cpu_read(idt_desc.size) + 1;
+
+ xen_mc_flush();
+
+ native_write_idt_entry(dt, entrynum, g);
+
+ if (p >= start && (p + 8) <= end) {
+ struct trap_info info[2];
+
+ info[1].address = 0;
+
+ if (cvt_gate_to_trap(entrynum, g, &info[0]))
+ if (HYPERVISOR_set_trap_table(info))
+ BUG();
+ }
+
+ preempt_enable();
+}
+
+static void xen_convert_trap_info(const struct desc_ptr *desc,
+ struct trap_info *traps)
+{
+ unsigned in, out, count;
+
+ count = (desc->size+1) / sizeof(gate_desc);
+ BUG_ON(count > 256);
+
+ for (in = out = 0; in < count; in++) {
+ gate_desc *entry = (gate_desc *)(desc->address) + in;
+
+ if (cvt_gate_to_trap(in, entry, &traps[out]))
+ out++;
+ }
+ traps[out].address = 0;
+}
+
+void xen_copy_trap_info(struct trap_info *traps)
+{
+ const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
+
+ xen_convert_trap_info(desc, traps);
+}
+
+/* Load a new IDT into Xen. In principle this can be per-CPU, so we
+ hold a spinlock to protect the static traps[] array (static because
+ it avoids allocation, and saves stack space). */
+static void xen_load_idt(const struct desc_ptr *desc)
+{
+ static DEFINE_SPINLOCK(lock);
+ static struct trap_info traps[257];
+
+ trace_xen_cpu_load_idt(desc);
+
+ spin_lock(&lock);
+
+ memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
+
+ xen_convert_trap_info(desc, traps);
+
+ xen_mc_flush();
+ if (HYPERVISOR_set_trap_table(traps))
+ BUG();
+
+ spin_unlock(&lock);
+}
+
+/* Write a GDT descriptor entry. Ignore LDT descriptors, since
+ they're handled differently. */
+static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
+ const void *desc, int type)
+{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
+ preempt_disable();
+
+ switch (type) {
+ case DESC_LDT:
+ case DESC_TSS:
+ /* ignore */
+ break;
+
+ default: {
+ xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
+
+ xen_mc_flush();
+ if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
+ BUG();
+ }
+
+ }
+
+ preempt_enable();
+}
+
+/*
+ * Version of write_gdt_entry for use at early boot-time needed to
+ * update an entry as simply as possible.
+ */
+static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
+ const void *desc, int type)
+{
+ trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
+
+ switch (type) {
+ case DESC_LDT:
+ case DESC_TSS:
+ /* ignore */
+ break;
+
+ default: {
+ xmaddr_t maddr = virt_to_machine(&dt[entry]);
+
+ if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
+ dt[entry] = *(struct desc_struct *)desc;
+ }
+
+ }
+}
+
+static void xen_load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ struct multicall_space mcs;
+
+ mcs = xen_mc_entry(0);
+ MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+ tss->x86_tss.sp0 = thread->sp0;
+}
+
+void xen_set_iopl_mask(unsigned mask)
+{
+ struct physdev_set_iopl set_iopl;
+
+ /* Force the change at ring 0. */
+ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+}
+
+static void xen_io_delay(void)
+{
+}
+
+static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
+
+static unsigned long xen_read_cr0(void)
+{
+ unsigned long cr0 = this_cpu_read(xen_cr0_value);
+
+ if (unlikely(cr0 == 0)) {
+ cr0 = native_read_cr0();
+ this_cpu_write(xen_cr0_value, cr0);
+ }
+
+ return cr0;
+}
+
+static void xen_write_cr0(unsigned long cr0)
+{
+ struct multicall_space mcs;
+
+ this_cpu_write(xen_cr0_value, cr0);
+
+ /* Only pay attention to cr0.TS; everything else is
+ ignored. */
+ mcs = xen_mc_entry(0);
+
+ MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_write_cr4(unsigned long cr4)
+{
+ cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
+
+ native_write_cr4(cr4);
+}
+#ifdef CONFIG_X86_64
+static inline unsigned long xen_read_cr8(void)
+{
+ return 0;
+}
+static inline void xen_write_cr8(unsigned long val)
+{
+ BUG_ON(val);
+}
+#endif
+
+static u64 xen_read_msr_safe(unsigned int msr, int *err)
+{
+ u64 val;
+
+ if (pmu_msr_read(msr, &val, err))
+ return val;
+
+ val = native_read_msr_safe(msr, err);
+ switch (msr) {
+ case MSR_IA32_APICBASE:
+#ifdef CONFIG_X86_X2APIC
+ if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
+#endif
+ val &= ~X2APIC_ENABLE;
+ break;
+ }
+ return val;
+}
+
+static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+{
+ int ret;
+
+ ret = 0;
+
+ switch (msr) {
+#ifdef CONFIG_X86_64
+ unsigned which;
+ u64 base;
+
+ case MSR_FS_BASE: which = SEGBASE_FS; goto set;
+ case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
+ case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
+
+ set:
+ base = ((u64)high << 32) | low;
+ if (HYPERVISOR_set_segment_base(which, base) != 0)
+ ret = -EIO;
+ break;
+#endif
+
+ case MSR_STAR:
+ case MSR_CSTAR:
+ case MSR_LSTAR:
+ case MSR_SYSCALL_MASK:
+ case MSR_IA32_SYSENTER_CS:
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ /* Fast syscall setup is all done in hypercalls, so
+ these are all ignored. Stub them out here to stop
+ Xen console noise. */
+ break;
+
+ default:
+ if (!pmu_msr_write(msr, low, high, &ret))
+ ret = native_write_msr_safe(msr, low, high);
+ }
+
+ return ret;
+}
+
+static u64 xen_read_msr(unsigned int msr)
+{
+ /*
+ * This will silently swallow a #GP from RDMSR. It may be worth
+ * changing that.
+ */
+ int err;
+
+ return xen_read_msr_safe(msr, &err);
+}
+
+static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
+{
+ /*
+ * This will silently swallow a #GP from WRMSR. It may be worth
+ * changing that.
+ */
+ xen_write_msr_safe(msr, low, high);
+}
+
+void xen_setup_shared_info(void)
+{
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_fixmap(FIX_PARAVIRT_BOOTMAP,
+ xen_start_info->shared_info);
+
+ HYPERVISOR_shared_info =
+ (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+ } else
+ HYPERVISOR_shared_info =
+ (struct shared_info *)__va(xen_start_info->shared_info);
+
+#ifndef CONFIG_SMP
+ /* In UP this is as good a place as any to set up shared info */
+ xen_setup_vcpu_info_placement();
+#endif
+
+ xen_setup_mfn_list_list();
+}
+
+/* This is called once we have the cpu_possible_mask */
+void xen_setup_vcpu_info_placement(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ /* Set up direct vCPU id mapping for PV guests. */
+ per_cpu(xen_vcpu_id, cpu) = cpu;
+ xen_vcpu_setup(cpu);
+ }
+
+ /*
+ * xen_vcpu_setup managed to place the vcpu_info within the
+ * percpu area for all cpus, so make use of it.
+ */
+ if (xen_have_vcpu_info_placement) {
+ pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
+ pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
+ pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
+ pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
+ pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
+ }
+}
+
+static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len)
+{
+ char *start, *end, *reloc;
+ unsigned ret;
+
+ start = end = reloc = NULL;
+
+#define SITE(op, x) \
+ case PARAVIRT_PATCH(op.x): \
+ if (xen_have_vcpu_info_placement) { \
+ start = (char *)xen_##x##_direct; \
+ end = xen_##x##_direct_end; \
+ reloc = xen_##x##_direct_reloc; \
+ } \
+ goto patch_site
+
+ switch (type) {
+ SITE(pv_irq_ops, irq_enable);
+ SITE(pv_irq_ops, irq_disable);
+ SITE(pv_irq_ops, save_fl);
+ SITE(pv_irq_ops, restore_fl);
+#undef SITE
+
+ patch_site:
+ if (start == NULL || (end-start) > len)
+ goto default_patch;
+
+ ret = paravirt_patch_insns(insnbuf, len, start, end);
+
+ /* Note: because reloc is assigned from something that
+ appears to be an array, gcc assumes it's non-null,
+ but doesn't know its relationship with start and
+ end. */
+ if (reloc > start && reloc < end) {
+ int reloc_off = reloc - start;
+ long *relocp = (long *)(insnbuf + reloc_off);
+ long delta = start - (char *)addr;
+
+ *relocp += delta;
+ }
+ break;
+
+ default_patch:
+ default:
+ ret = paravirt_patch_default(type, clobbers, insnbuf,
+ addr, len);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pv_info xen_info __initconst = {
+ .shared_kernel_pmd = 0,
+
+#ifdef CONFIG_X86_64
+ .extra_user_64bit_cs = FLAT_USER_CS64,
+#endif
+ .name = "Xen",
+};
+
+static const struct pv_init_ops xen_init_ops __initconst = {
+ .patch = xen_patch,
+};
+
+static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .cpuid = xen_cpuid,
+
+ .set_debugreg = xen_set_debugreg,
+ .get_debugreg = xen_get_debugreg,
+
+ .read_cr0 = xen_read_cr0,
+ .write_cr0 = xen_write_cr0,
+
+ .read_cr4 = native_read_cr4,
+ .write_cr4 = xen_write_cr4,
+
+#ifdef CONFIG_X86_64
+ .read_cr8 = xen_read_cr8,
+ .write_cr8 = xen_write_cr8,
+#endif
+
+ .wbinvd = native_wbinvd,
+
+ .read_msr = xen_read_msr,
+ .write_msr = xen_write_msr,
+
+ .read_msr_safe = xen_read_msr_safe,
+ .write_msr_safe = xen_write_msr_safe,
+
+ .read_pmc = xen_read_pmc,
+
+ .iret = xen_iret,
+#ifdef CONFIG_X86_64
+ .usergs_sysret64 = xen_sysret64,
+#endif
+
+ .load_tr_desc = paravirt_nop,
+ .set_ldt = xen_set_ldt,
+ .load_gdt = xen_load_gdt,
+ .load_idt = xen_load_idt,
+ .load_tls = xen_load_tls,
+#ifdef CONFIG_X86_64
+ .load_gs_index = xen_load_gs_index,
+#endif
+
+ .alloc_ldt = xen_alloc_ldt,
+ .free_ldt = xen_free_ldt,
+
+ .store_idt = native_store_idt,
+ .store_tr = xen_store_tr,
+
+ .write_ldt_entry = xen_write_ldt_entry,
+ .write_gdt_entry = xen_write_gdt_entry,
+ .write_idt_entry = xen_write_idt_entry,
+ .load_sp0 = xen_load_sp0,
+
+ .set_iopl_mask = xen_set_iopl_mask,
+ .io_delay = xen_io_delay,
+
+ /* Xen takes care of %gs when switching to usermode for us */
+ .swapgs = paravirt_nop,
+
+ .start_context_switch = paravirt_start_context_switch,
+ .end_context_switch = xen_end_context_switch,
+};
+
+static void xen_restart(char *msg)
+{
+ xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_machine_halt(void)
+{
+ xen_reboot(SHUTDOWN_poweroff);
+}
+
+static void xen_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ xen_reboot(SHUTDOWN_poweroff);
+}
+
+static void xen_crash_shutdown(struct pt_regs *regs)
+{
+ xen_reboot(SHUTDOWN_crash);
+}
+
+static const struct machine_ops xen_machine_ops __initconst = {
+ .restart = xen_restart,
+ .halt = xen_machine_halt,
+ .power_off = xen_machine_power_off,
+ .shutdown = xen_machine_halt,
+ .crash_shutdown = xen_crash_shutdown,
+ .emergency_restart = xen_emergency_restart,
+};
+
+static unsigned char xen_get_nmi_reason(void)
+{
+ unsigned char reason = 0;
+
+ /* Construct a value which looks like it came from port 0x61. */
+ if (test_bit(_XEN_NMIREASON_io_error,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_IOCHK;
+ if (test_bit(_XEN_NMIREASON_pci_serr,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_SERR;
+
+ return reason;
+}
+
+static void __init xen_boot_params_init_edd(void)
+{
+#if IS_ENABLED(CONFIG_EDD)
+ struct xen_platform_op op;
+ struct edd_info *edd_info;
+ u32 *mbr_signature;
+ unsigned nr;
+ int ret;
+
+ edd_info = boot_params.eddbuf;
+ mbr_signature = boot_params.edd_mbr_sig_buffer;
+
+ op.cmd = XENPF_firmware_info;
+
+ op.u.firmware_info.type = XEN_FW_DISK_INFO;
+ for (nr = 0; nr < EDDMAXNR; nr++) {
+ struct edd_info *info = edd_info + nr;
+
+ op.u.firmware_info.index = nr;
+ info->params.length = sizeof(info->params);
+ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
+ &info->params);
+ ret = HYPERVISOR_platform_op(&op);
+ if (ret)
+ break;
+
+#define C(x) info->x = op.u.firmware_info.u.disk_info.x
+ C(device);
+ C(version);
+ C(interface_support);
+ C(legacy_max_cylinder);
+ C(legacy_max_head);
+ C(legacy_sectors_per_track);
+#undef C
+ }
+ boot_params.eddbuf_entries = nr;
+
+ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
+ for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
+ op.u.firmware_info.index = nr;
+ ret = HYPERVISOR_platform_op(&op);
+ if (ret)
+ break;
+ mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
+ }
+ boot_params.edd_mbr_sig_buf_entries = nr;
+#endif
+}
+
+/*
+ * Set up the GDT and segment registers for -fstack-protector. Until
+ * we do this, we have to be careful not to call any stack-protected
+ * function, which is most of the kernel.
+ */
+static void xen_setup_gdt(int cpu)
+{
+ pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
+ pv_cpu_ops.load_gdt = xen_load_gdt_boot;
+
+ setup_stack_canary_segment(0);
+ switch_to_new_gdt(0);
+
+ pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
+ pv_cpu_ops.load_gdt = xen_load_gdt;
+}
+
+static void __init xen_dom0_set_legacy_features(void)
+{
+ x86_platform.legacy.rtc = 1;
+}
+
+/* First C function to be called on Xen boot */
+asmlinkage __visible void __init xen_start_kernel(void)
+{
+ struct physdev_set_iopl set_iopl;
+ unsigned long initrd_start = 0;
+ int rc;
+
+ if (!xen_start_info)
+ return;
+
+ xen_domain_type = XEN_PV_DOMAIN;
+
+ xen_setup_features();
+
+ xen_setup_machphys_mapping();
+
+ /* Install Xen paravirt ops */
+ pv_info = xen_info;
+ pv_init_ops = xen_init_ops;
+ pv_cpu_ops = xen_cpu_ops;
+
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+
+ x86_init.resources.memory_setup = xen_memory_setup;
+ x86_init.oem.arch_setup = xen_arch_setup;
+ x86_init.oem.banner = xen_banner;
+
+ xen_init_time_ops();
+
+ /*
+ * Set up some pagetable state before starting to set any ptes.
+ */
+
+ xen_init_mmu_ops();
+
+ /* Prevent unwanted bits from being set in PTEs. */
+ __supported_pte_mask &= ~_PAGE_GLOBAL;
+
+ /*
+ * Prevent page tables from being allocated in highmem, even
+ * if CONFIG_HIGHPTE is enabled.
+ */
+ __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+
+ /* Work out if we support NX */
+ x86_configure_nx();
+
+ /* Get mfn list */
+ xen_build_dynamic_phys_to_machine();
+
+ /*
+ * Set up kernel GDT and segment registers, mainly so that
+ * -fstack-protector code can be executed.
+ */
+ xen_setup_gdt(0);
+
+ xen_init_irq_ops();
+ xen_init_capabilities();
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * set up the basic apic ops.
+ */
+ xen_init_apic();
+#endif
+
+ if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
+ pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
+ pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
+ }
+
+ machine_ops = xen_machine_ops;
+
+ /*
+ * The only reliable way to retain the initial address of the
+ * percpu gdt_page is to remember it here, so we can go and
+ * mark it RW later, when the initial percpu area is freed.
+ */
+ xen_initial_gdt = &per_cpu(gdt_page, 0);
+
+ xen_smp_init();
+
+#ifdef CONFIG_ACPI_NUMA
+ /*
+ * The pages we from Xen are not related to machine pages, so
+ * any NUMA information the kernel tries to get from ACPI will
+ * be meaningless. Prevent it from trying.
+ */
+ acpi_numa = -1;
+#endif
+ /* Don't do the full vcpu_info placement stuff until we have a
+ possible map and a non-dummy shared_info. */
+ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+
+ WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
+
+ local_irq_disable();
+ early_boot_irqs_disabled = true;
+
+ xen_raw_console_write("mapping kernel into physical memory\n");
+ xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
+ xen_start_info->nr_pages);
+ xen_reserve_special_pages();
+
+ /* keep using Xen gdt for now; no urgent need to change it */
+
+#ifdef CONFIG_X86_32
+ pv_info.kernel_rpl = 1;
+ if (xen_feature(XENFEAT_supervisor_mode_kernel))
+ pv_info.kernel_rpl = 0;
+#else
+ pv_info.kernel_rpl = 0;
+#endif
+ /* set the limit of our address space */
+ xen_reserve_top();
+
+ /*
+ * We used to do this in xen_arch_setup, but that is too late
+ * on AMD were early_cpu_init (run before ->arch_setup()) calls
+ * early_amd_init which pokes 0xcf8 port.
+ */
+ set_iopl.iopl = 1;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+ if (rc != 0)
+ xen_raw_printk("physdev_op failed %d\n", rc);
+
+#ifdef CONFIG_X86_32
+ /* set up basic CPUID stuff */
+ cpu_detect(&new_cpu_data);
+ set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
+ new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
+#endif
+
+ if (xen_start_info->mod_start) {
+ if (xen_start_info->flags & SIF_MOD_START_PFN)
+ initrd_start = PFN_PHYS(xen_start_info->mod_start);
+ else
+ initrd_start = __pa(xen_start_info->mod_start);
+ }
+
+ /* Poke various useful things into boot_params */
+ boot_params.hdr.type_of_loader = (9 << 4) | 0;
+ boot_params.hdr.ramdisk_image = initrd_start;
+ boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
+ boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
+ boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
+
+ if (!xen_initial_domain()) {
+ add_preferred_console("xenboot", 0, NULL);
+ add_preferred_console("tty", 0, NULL);
+ add_preferred_console("hvc", 0, NULL);
+ if (pci_xen)
+ x86_init.pci.arch_init = pci_xen_init;
+ } else {
+ const struct dom0_vga_console_info *info =
+ (void *)((char *)xen_start_info +
+ xen_start_info->console.dom0.info_off);
+ struct xen_platform_op op = {
+ .cmd = XENPF_firmware_info,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
+ };
+
+ x86_platform.set_legacy_features =
+ xen_dom0_set_legacy_features;
+ xen_init_vga(info, xen_start_info->console.dom0.info_size);
+ xen_start_info->console.domU.mfn = 0;
+ xen_start_info->console.domU.evtchn = 0;
+
+ if (HYPERVISOR_platform_op(&op) == 0)
+ boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+
+ xen_acpi_sleep_register();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+
+ xen_boot_params_init_edd();
+ }
+#ifdef CONFIG_PCI
+ /* PCI BIOS service won't work from a PV guest. */
+ pci_probe &= ~PCI_PROBE_BIOS;
+#endif
+ xen_raw_console_write("about to get started...\n");
+
+ /* Let's presume PV guests always boot on vCPU with id 0. */
+ per_cpu(xen_vcpu_id, 0) = 0;
+
+ xen_setup_runstate_info(0);
+
+ xen_efi_init();
+
+ /* Start the world */
+#ifdef CONFIG_X86_32
+ i386_start_kernel();
+#else
+ cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
+ x86_64_start_reservations((char *)__pa_symbol(&boot_params));
+#endif
+}
+
+static int xen_cpu_up_prepare_pv(unsigned int cpu)
+{
+ int rc;
+
+ xen_setup_timer(cpu);
+
+ rc = xen_smp_intr_init(cpu);
+ if (rc) {
+ WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
+ cpu, rc);
+ return rc;
+ }
+
+ rc = xen_smp_intr_init_pv(cpu);
+ if (rc) {
+ WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n",
+ cpu, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xen_cpu_dead_pv(unsigned int cpu)
+{
+ xen_smp_intr_free(cpu);
+ xen_smp_intr_free_pv(cpu);
+
+ xen_teardown_timer(cpu);
+
+ return 0;
+}
+
+static uint32_t __init xen_platform_pv(void)
+{
+ if (xen_pv_domain())
+ return xen_cpuid_base();
+
+ return 0;
+}
+
+const struct hypervisor_x86 x86_hyper_xen_pv = {
+ .name = "Xen PV",
+ .detect = xen_platform_pv,
+ .pin_vcpu = xen_pin_vcpu,
+};
+EXPORT_SYMBOL(x86_hyper_xen_pv);
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
new file mode 100644
index 000000000000..98ab17673454
--- /dev/null
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -0,0 +1,106 @@
+#include <linux/acpi.h>
+
+#include <xen/hvc-console.h>
+
+#include <asm/io_apic.h>
+#include <asm/hypervisor.h>
+#include <asm/e820/api.h>
+
+#include <asm/xen/interface.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/memory.h>
+#include <xen/interface/hvm/start_info.h>
+
+/*
+ * PVH variables.
+ *
+ * xen_pvh and pvh_bootparams need to live in data segment since they
+ * are used after startup_{32|64}, which clear .bss, are invoked.
+ */
+bool xen_pvh __attribute__((section(".data"))) = 0;
+struct boot_params pvh_bootparams __attribute__((section(".data")));
+
+struct hvm_start_info pvh_start_info;
+unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+
+static void xen_pvh_arch_setup(void)
+{
+ /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
+ if (nr_ioapics == 0)
+ acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
+}
+
+static void __init init_pvh_bootparams(void)
+{
+ struct xen_memory_map memmap;
+ int rc;
+
+ memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+
+ memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
+ set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+ if (rc) {
+ xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
+ BUG();
+ }
+ pvh_bootparams.e820_entries = memmap.nr_entries;
+
+ if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
+ ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
+ ISA_END_ADDRESS - ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
+ E820_TYPE_RESERVED;
+ pvh_bootparams.e820_entries++;
+ } else
+ xen_raw_printk("Warning: Can fit ISA range into e820\n");
+
+ pvh_bootparams.hdr.cmd_line_ptr =
+ pvh_start_info.cmdline_paddr;
+
+ /* The first module is always ramdisk. */
+ if (pvh_start_info.nr_modules) {
+ struct hvm_modlist_entry *modaddr =
+ __va(pvh_start_info.modlist_paddr);
+ pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
+ pvh_bootparams.hdr.ramdisk_size = modaddr->size;
+ }
+
+ /*
+ * See Documentation/x86/boot.txt.
+ *
+ * Version 2.12 supports Xen entry point but we will use default x86/PC
+ * environment (i.e. hardware_subarch 0).
+ */
+ pvh_bootparams.hdr.version = 0x212;
+ pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
+}
+
+/*
+ * This routine (and those that it might call) should not use
+ * anything that lives in .bss since that segment will be cleared later.
+ */
+void __init xen_prepare_pvh(void)
+{
+ u32 msr;
+ u64 pfn;
+
+ if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
+ xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
+ pvh_start_info.magic);
+ BUG();
+ }
+
+ xen_pvh = 1;
+
+ msr = cpuid_ebx(xen_cpuid_base() + 2);
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+
+ init_pvh_bootparams();
+
+ x86_init.oem.arch_setup = xen_pvh_arch_setup;
+}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f226038a39ca..5e375a5e815f 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1,84 +1,10 @@
-/*
- * Xen mmu operations
- *
- * This file contains the various mmu fetch and update operations.
- * The most important job they must perform is the mapping between the
- * domain's pfn and the overall machine mfns.
- *
- * Xen allows guests to directly update the pagetable, in a controlled
- * fashion. In other words, the guest modifies the same pagetable
- * that the CPU actually uses, which eliminates the overhead of having
- * a separate shadow pagetable.
- *
- * In order to allow this, it falls on the guest domain to map its
- * notion of a "physical" pfn - which is just a domain-local linear
- * address - into a real "machine address" which the CPU's MMU can
- * use.
- *
- * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
- * inserted directly into the pagetable. When creating a new
- * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
- * when reading the content back with __(pgd|pmd|pte)_val, it converts
- * the mfn back into a pfn.
- *
- * The other constraint is that all pages which make up a pagetable
- * must be mapped read-only in the guest. This prevents uncontrolled
- * guest updates to the pagetable. Xen strictly enforces this, and
- * will disallow any pagetable update which will end up mapping a
- * pagetable page RW, and will disallow using any writable page as a
- * pagetable.
- *
- * Naively, when loading %cr3 with the base of a new pagetable, Xen
- * would need to validate the whole pagetable before going on.
- * Naturally, this is quite slow. The solution is to "pin" a
- * pagetable, which enforces all the constraints on the pagetable even
- * when it is not actively in use. This menas that Xen can be assured
- * that it is still valid when you do load it into %cr3, and doesn't
- * need to revalidate it.
- *
- * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
- */
-#include <linux/sched/mm.h>
-#include <linux/highmem.h>
-#include <linux/debugfs.h>
-#include <linux/bug.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/gfp.h>
-#include <linux/memblock.h>
-#include <linux/seq_file.h>
-#include <linux/crash_dump.h>
-
-#include <trace/events/xen.h>
-
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include <asm/fixmap.h>
-#include <asm/mmu_context.h>
-#include <asm/setup.h>
-#include <asm/paravirt.h>
-#include <asm/e820/api.h>
-#include <asm/linkage.h>
-#include <asm/page.h>
-#include <asm/init.h>
-#include <asm/pat.h>
-#include <asm/smp.h>
-
+#include <linux/pfn.h>
+#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
-#include <asm/xen/hypervisor.h>
-
-#include <xen/xen.h>
-#include <xen/page.h>
-#include <xen/interface/xen.h>
-#include <xen/interface/hvm/hvm_op.h>
-#include <xen/interface/version.h>
#include <xen/interface/memory.h>
-#include <xen/hvc-console.h>
#include "multicalls.h"
#include "mmu.h"
-#include "debugfs.h"
/*
* Protects atomic reservation decrease/increase against concurrent increases.
@@ -86,45 +12,6 @@
*/
DEFINE_SPINLOCK(xen_reservation_lock);
-#ifdef CONFIG_X86_32
-/*
- * Identity map, in addition to plain kernel map. This needs to be
- * large enough to allocate page table pages to allocate the rest.
- * Each page can map 2MB.
- */
-#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
-static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
-#endif
-#ifdef CONFIG_X86_64
-/* l3 pud for userspace vsyscall mapping */
-static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
-#endif /* CONFIG_X86_64 */
-
-/*
- * Note about cr3 (pagetable base) values:
- *
- * xen_cr3 contains the current logical cr3 value; it contains the
- * last set cr3. This may not be the current effective cr3, because
- * its update may be being lazily deferred. However, a vcpu looking
- * at its own cr3 can use this value knowing that it everything will
- * be self-consistent.
- *
- * xen_current_cr3 contains the actual vcpu cr3; it is set once the
- * hypercall to set the vcpu cr3 is complete (so it may be a little
- * out of date, but it will never be set early). If one vcpu is
- * looking at another vcpu's cr3 value, it should use this variable.
- */
-DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
-DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
-
-static phys_addr_t xen_pt_base, xen_pt_size __initdata;
-
-/*
- * Just beyond the highest usermode address. STACK_TOP_MAX has a
- * redzone above it, so round it up to a PGD boundary.
- */
-#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
-
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
@@ -155,1218 +42,6 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
-void make_lowmem_page_readonly(void *vaddr)
-{
- pte_t *pte, ptev;
- unsigned long address = (unsigned long)vaddr;
- unsigned int level;
-
- pte = lookup_address(address, &level);
- if (pte == NULL)
- return; /* vaddr missing */
-
- ptev = pte_wrprotect(*pte);
-
- if (HYPERVISOR_update_va_mapping(address, ptev, 0))
- BUG();
-}
-
-void make_lowmem_page_readwrite(void *vaddr)
-{
- pte_t *pte, ptev;
- unsigned long address = (unsigned long)vaddr;
- unsigned int level;
-
- pte = lookup_address(address, &level);
- if (pte == NULL)
- return; /* vaddr missing */
-
- ptev = pte_mkwrite(*pte);
-
- if (HYPERVISOR_update_va_mapping(address, ptev, 0))
- BUG();
-}
-
-
-static bool xen_page_pinned(void *ptr)
-{
- struct page *page = virt_to_page(ptr);
-
- return PagePinned(page);
-}
-
-void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
-{
- struct multicall_space mcs;
- struct mmu_update *u;
-
- trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
-
- mcs = xen_mc_entry(sizeof(*u));
- u = mcs.args;
-
- /* ptep might be kmapped when using 32-bit HIGHPTE */
- u->ptr = virt_to_machine(ptep).maddr;
- u->val = pte_val_ma(pteval);
-
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-EXPORT_SYMBOL_GPL(xen_set_domain_pte);
-
-static void xen_extend_mmu_update(const struct mmu_update *update)
-{
- struct multicall_space mcs;
- struct mmu_update *u;
-
- mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
-
- if (mcs.mc != NULL) {
- mcs.mc->args[1]++;
- } else {
- mcs = __xen_mc_entry(sizeof(*u));
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
- }
-
- u = mcs.args;
- *u = *update;
-}
-
-static void xen_extend_mmuext_op(const struct mmuext_op *op)
-{
- struct multicall_space mcs;
- struct mmuext_op *u;
-
- mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
-
- if (mcs.mc != NULL) {
- mcs.mc->args[1]++;
- } else {
- mcs = __xen_mc_entry(sizeof(*u));
- MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
- }
-
- u = mcs.args;
- *u = *op;
-}
-
-static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
-{
- struct mmu_update u;
-
- preempt_disable();
-
- xen_mc_batch();
-
- /* ptr may be ioremapped for 64-bit pagetable setup */
- u.ptr = arbitrary_virt_to_machine(ptr).maddr;
- u.val = pmd_val_ma(val);
- xen_extend_mmu_update(&u);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-static void xen_set_pmd(pmd_t *ptr, pmd_t val)
-{
- trace_xen_mmu_set_pmd(ptr, val);
-
- /* If page is not pinned, we can just update the entry
- directly */
- if (!xen_page_pinned(ptr)) {
- *ptr = val;
- return;
- }
-
- xen_set_pmd_hyper(ptr, val);
-}
-
-/*
- * Associate a virtual page frame with a given physical page frame
- * and protection flags for that frame.
- */
-void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
-{
- set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
-}
-
-static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
-{
- struct mmu_update u;
-
- if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
- return false;
-
- xen_mc_batch();
-
- u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
- u.val = pte_val_ma(pteval);
- xen_extend_mmu_update(&u);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- return true;
-}
-
-static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
-{
- if (!xen_batched_set_pte(ptep, pteval)) {
- /*
- * Could call native_set_pte() here and trap and
- * emulate the PTE write but with 32-bit guests this
- * needs two traps (one for each of the two 32-bit
- * words in the PTE) so do one hypercall directly
- * instead.
- */
- struct mmu_update u;
-
- u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
- u.val = pte_val_ma(pteval);
- HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
- }
-}
-
-static void xen_set_pte(pte_t *ptep, pte_t pteval)
-{
- trace_xen_mmu_set_pte(ptep, pteval);
- __xen_set_pte(ptep, pteval);
-}
-
-static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
- __xen_set_pte(ptep, pteval);
-}
-
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- /* Just return the pte as-is. We preserve the bits on commit */
- trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
- return *ptep;
-}
-
-void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- struct mmu_update u;
-
- trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
- xen_mc_batch();
-
- u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
- u.val = pte_val_ma(pte);
- xen_extend_mmu_update(&u);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-
-/* Assume pteval_t is equivalent to all the other *val_t types. */
-static pteval_t pte_mfn_to_pfn(pteval_t val)
-{
- if (val & _PAGE_PRESENT) {
- unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
- unsigned long pfn = mfn_to_pfn(mfn);
-
- pteval_t flags = val & PTE_FLAGS_MASK;
- if (unlikely(pfn == ~0))
- val = flags & ~_PAGE_PRESENT;
- else
- val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
- }
-
- return val;
-}
-
-static pteval_t pte_pfn_to_mfn(pteval_t val)
-{
- if (val & _PAGE_PRESENT) {
- unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
- pteval_t flags = val & PTE_FLAGS_MASK;
- unsigned long mfn;
-
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- mfn = __pfn_to_mfn(pfn);
- else
- mfn = pfn;
- /*
- * If there's no mfn for the pfn, then just create an
- * empty non-present pte. Unfortunately this loses
- * information about the original pfn, so
- * pte_mfn_to_pfn is asymmetric.
- */
- if (unlikely(mfn == INVALID_P2M_ENTRY)) {
- mfn = 0;
- flags = 0;
- } else
- mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
- val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
- }
-
- return val;
-}
-
-__visible pteval_t xen_pte_val(pte_t pte)
-{
- pteval_t pteval = pte.pte;
-
- return pte_mfn_to_pfn(pteval);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
-
-__visible pgdval_t xen_pgd_val(pgd_t pgd)
-{
- return pte_mfn_to_pfn(pgd.pgd);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
-
-__visible pte_t xen_make_pte(pteval_t pte)
-{
- pte = pte_pfn_to_mfn(pte);
-
- return native_make_pte(pte);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-
-__visible pgd_t xen_make_pgd(pgdval_t pgd)
-{
- pgd = pte_pfn_to_mfn(pgd);
- return native_make_pgd(pgd);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
-
-__visible pmdval_t xen_pmd_val(pmd_t pmd)
-{
- return pte_mfn_to_pfn(pmd.pmd);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
-
-static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
-{
- struct mmu_update u;
-
- preempt_disable();
-
- xen_mc_batch();
-
- /* ptr may be ioremapped for 64-bit pagetable setup */
- u.ptr = arbitrary_virt_to_machine(ptr).maddr;
- u.val = pud_val_ma(val);
- xen_extend_mmu_update(&u);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-static void xen_set_pud(pud_t *ptr, pud_t val)
-{
- trace_xen_mmu_set_pud(ptr, val);
-
- /* If page is not pinned, we can just update the entry
- directly */
- if (!xen_page_pinned(ptr)) {
- *ptr = val;
- return;
- }
-
- xen_set_pud_hyper(ptr, val);
-}
-
-#ifdef CONFIG_X86_PAE
-static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
- trace_xen_mmu_set_pte_atomic(ptep, pte);
- set_64bit((u64 *)ptep, native_pte_val(pte));
-}
-
-static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- trace_xen_mmu_pte_clear(mm, addr, ptep);
- if (!xen_batched_set_pte(ptep, native_make_pte(0)))
- native_pte_clear(mm, addr, ptep);
-}
-
-static void xen_pmd_clear(pmd_t *pmdp)
-{
- trace_xen_mmu_pmd_clear(pmdp);
- set_pmd(pmdp, __pmd(0));
-}
-#endif /* CONFIG_X86_PAE */
-
-__visible pmd_t xen_make_pmd(pmdval_t pmd)
-{
- pmd = pte_pfn_to_mfn(pmd);
- return native_make_pmd(pmd);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
-
-#if CONFIG_PGTABLE_LEVELS == 4
-__visible pudval_t xen_pud_val(pud_t pud)
-{
- return pte_mfn_to_pfn(pud.pud);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
-
-__visible pud_t xen_make_pud(pudval_t pud)
-{
- pud = pte_pfn_to_mfn(pud);
-
- return native_make_pud(pud);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
-
-static pgd_t *xen_get_user_pgd(pgd_t *pgd)
-{
- pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
- unsigned offset = pgd - pgd_page;
- pgd_t *user_ptr = NULL;
-
- if (offset < pgd_index(USER_LIMIT)) {
- struct page *page = virt_to_page(pgd_page);
- user_ptr = (pgd_t *)page->private;
- if (user_ptr)
- user_ptr += offset;
- }
-
- return user_ptr;
-}
-
-static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
-{
- struct mmu_update u;
-
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = p4d_val_ma(val);
- xen_extend_mmu_update(&u);
-}
-
-/*
- * Raw hypercall-based set_p4d, intended for in early boot before
- * there's a page structure. This implies:
- * 1. The only existing pagetable is the kernel's
- * 2. It is always pinned
- * 3. It has no user pagetable attached to it
- */
-static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
-{
- preempt_disable();
-
- xen_mc_batch();
-
- __xen_set_p4d_hyper(ptr, val);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-static void xen_set_p4d(p4d_t *ptr, p4d_t val)
-{
- pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
- pgd_t pgd_val;
-
- trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
-
- /* If page is not pinned, we can just update the entry
- directly */
- if (!xen_page_pinned(ptr)) {
- *ptr = val;
- if (user_ptr) {
- WARN_ON(xen_page_pinned(user_ptr));
- pgd_val.pgd = p4d_val_ma(val);
- *user_ptr = pgd_val;
- }
- return;
- }
-
- /* If it's pinned, then we can at least batch the kernel and
- user updates together. */
- xen_mc_batch();
-
- __xen_set_p4d_hyper(ptr, val);
- if (user_ptr)
- __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
-
-static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
-{
- int i, nr, flush = 0;
-
- nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
- for (i = 0; i < nr; i++) {
- if (!pmd_none(pmd[i]))
- flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
- }
- return flush;
-}
-
-static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
-{
- int i, nr, flush = 0;
-
- nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
- for (i = 0; i < nr; i++) {
- pmd_t *pmd;
-
- if (pud_none(pud[i]))
- continue;
-
- pmd = pmd_offset(&pud[i], 0);
- if (PTRS_PER_PMD > 1)
- flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
- flush |= xen_pmd_walk(mm, pmd, func,
- last && i == nr - 1, limit);
- }
- return flush;
-}
-
-static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
- bool last, unsigned long limit)
-{
- int i, nr, flush = 0;
-
- nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
- for (i = 0; i < nr; i++) {
- pud_t *pud;
-
- if (p4d_none(p4d[i]))
- continue;
-
- pud = pud_offset(&p4d[i], 0);
- if (PTRS_PER_PUD > 1)
- flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
- flush |= xen_pud_walk(mm, pud, func,
- last && i == nr - 1, limit);
- }
- return flush;
-}
-
-/*
- * (Yet another) pagetable walker. This one is intended for pinning a
- * pagetable. This means that it walks a pagetable and calls the
- * callback function on each page it finds making up the page table,
- * at every level. It walks the entire pagetable, but it only bothers
- * pinning pte pages which are below limit. In the normal case this
- * will be STACK_TOP_MAX, but at boot we need to pin up to
- * FIXADDR_TOP.
- *
- * For 32-bit the important bit is that we don't pin beyond there,
- * because then we start getting into Xen's ptes.
- *
- * For 64-bit, we must skip the Xen hole in the middle of the address
- * space, just after the big x86-64 virtual hole.
- */
-static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
- int (*func)(struct mm_struct *mm, struct page *,
- enum pt_level),
- unsigned long limit)
-{
- int i, nr, flush = 0;
- unsigned hole_low, hole_high;
-
- /* The limit is the last byte to be touched */
- limit--;
- BUG_ON(limit >= FIXADDR_TOP);
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
- /*
- * 64-bit has a great big hole in the middle of the address
- * space, which contains the Xen mappings. On 32-bit these
- * will end up making a zero-sized hole and so is a no-op.
- */
- hole_low = pgd_index(USER_LIMIT);
- hole_high = pgd_index(PAGE_OFFSET);
-
- nr = pgd_index(limit) + 1;
- for (i = 0; i < nr; i++) {
- p4d_t *p4d;
-
- if (i >= hole_low && i < hole_high)
- continue;
-
- if (pgd_none(pgd[i]))
- continue;
-
- p4d = p4d_offset(&pgd[i], 0);
- if (PTRS_PER_P4D > 1)
- flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
- flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
- }
-
- /* Do the top level last, so that the callbacks can use it as
- a cue to do final things like tlb flushes. */
- flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
-
- return flush;
-}
-
-static int xen_pgd_walk(struct mm_struct *mm,
- int (*func)(struct mm_struct *mm, struct page *,
- enum pt_level),
- unsigned long limit)
-{
- return __xen_pgd_walk(mm, mm->pgd, func, limit);
-}
-
-/* If we're using split pte locks, then take the page's lock and
- return a pointer to it. Otherwise return NULL. */
-static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
-{
- spinlock_t *ptl = NULL;
-
-#if USE_SPLIT_PTE_PTLOCKS
- ptl = ptlock_ptr(page);
- spin_lock_nest_lock(ptl, &mm->page_table_lock);
-#endif
-
- return ptl;
-}
-
-static void xen_pte_unlock(void *v)
-{
- spinlock_t *ptl = v;
- spin_unlock(ptl);
-}
-
-static void xen_do_pin(unsigned level, unsigned long pfn)
-{
- struct mmuext_op op;
-
- op.cmd = level;
- op.arg1.mfn = pfn_to_mfn(pfn);
-
- xen_extend_mmuext_op(&op);
-}
-
-static int xen_pin_page(struct mm_struct *mm, struct page *page,
- enum pt_level level)
-{
- unsigned pgfl = TestSetPagePinned(page);
- int flush;
-
- if (pgfl)
- flush = 0; /* already pinned */
- else if (PageHighMem(page))
- /* kmaps need flushing if we found an unpinned
- highpage */
- flush = 1;
- else {
- void *pt = lowmem_page_address(page);
- unsigned long pfn = page_to_pfn(page);
- struct multicall_space mcs = __xen_mc_entry(0);
- spinlock_t *ptl;
-
- flush = 0;
-
- /*
- * We need to hold the pagetable lock between the time
- * we make the pagetable RO and when we actually pin
- * it. If we don't, then other users may come in and
- * attempt to update the pagetable by writing it,
- * which will fail because the memory is RO but not
- * pinned, so Xen won't do the trap'n'emulate.
- *
- * If we're using split pte locks, we can't hold the
- * entire pagetable's worth of locks during the
- * traverse, because we may wrap the preempt count (8
- * bits). The solution is to mark RO and pin each PTE
- * page while holding the lock. This means the number
- * of locks we end up holding is never more than a
- * batch size (~32 entries, at present).
- *
- * If we're not using split pte locks, we needn't pin
- * the PTE pages independently, because we're
- * protected by the overall pagetable lock.
- */
- ptl = NULL;
- if (level == PT_PTE)
- ptl = xen_pte_lock(page, mm);
-
- MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
- pfn_pte(pfn, PAGE_KERNEL_RO),
- level == PT_PGD ? UVMF_TLB_FLUSH : 0);
-
- if (ptl) {
- xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
-
- /* Queue a deferred unlock for when this batch
- is completed. */
- xen_mc_callback(xen_pte_unlock, ptl);
- }
- }
-
- return flush;
-}
-
-/* This is called just after a mm has been created, but it has not
- been used yet. We need to make sure that its pagetable is all
- read-only, and can be pinned. */
-static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
-{
- trace_xen_mmu_pgd_pin(mm, pgd);
-
- xen_mc_batch();
-
- if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
- /* re-enable interrupts for flushing */
- xen_mc_issue(0);
-
- kmap_flush_unused();
-
- xen_mc_batch();
- }
-
-#ifdef CONFIG_X86_64
- {
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
- xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
-
- if (user_pgd) {
- xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
- xen_do_pin(MMUEXT_PIN_L4_TABLE,
- PFN_DOWN(__pa(user_pgd)));
- }
- }
-#else /* CONFIG_X86_32 */
-#ifdef CONFIG_X86_PAE
- /* Need to make sure unshared kernel PMD is pinnable */
- xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
- PT_PMD);
-#endif
- xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
-#endif /* CONFIG_X86_64 */
- xen_mc_issue(0);
-}
-
-static void xen_pgd_pin(struct mm_struct *mm)
-{
- __xen_pgd_pin(mm, mm->pgd);
-}
-
-/*
- * On save, we need to pin all pagetables to make sure they get their
- * mfns turned into pfns. Search the list for any unpinned pgds and pin
- * them (unpinned pgds are not currently in use, probably because the
- * process is under construction or destruction).
- *
- * Expected to be called in stop_machine() ("equivalent to taking
- * every spinlock in the system"), so the locking doesn't really
- * matter all that much.
- */
-void xen_mm_pin_all(void)
-{
- struct page *page;
-
- spin_lock(&pgd_lock);
-
- list_for_each_entry(page, &pgd_list, lru) {
- if (!PagePinned(page)) {
- __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
- SetPageSavePinned(page);
- }
- }
-
- spin_unlock(&pgd_lock);
-}
-
-/*
- * The init_mm pagetable is really pinned as soon as its created, but
- * that's before we have page structures to store the bits. So do all
- * the book-keeping now.
- */
-static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
- enum pt_level level)
-{
- SetPagePinned(page);
- return 0;
-}
-
-static void __init xen_mark_init_mm_pinned(void)
-{
- xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
-}
-
-static int xen_unpin_page(struct mm_struct *mm, struct page *page,
- enum pt_level level)
-{
- unsigned pgfl = TestClearPagePinned(page);
-
- if (pgfl && !PageHighMem(page)) {
- void *pt = lowmem_page_address(page);
- unsigned long pfn = page_to_pfn(page);
- spinlock_t *ptl = NULL;
- struct multicall_space mcs;
-
- /*
- * Do the converse to pin_page. If we're using split
- * pte locks, we must be holding the lock for while
- * the pte page is unpinned but still RO to prevent
- * concurrent updates from seeing it in this
- * partially-pinned state.
- */
- if (level == PT_PTE) {
- ptl = xen_pte_lock(page, mm);
-
- if (ptl)
- xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
- }
-
- mcs = __xen_mc_entry(0);
-
- MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
- pfn_pte(pfn, PAGE_KERNEL),
- level == PT_PGD ? UVMF_TLB_FLUSH : 0);
-
- if (ptl) {
- /* unlock when batch completed */
- xen_mc_callback(xen_pte_unlock, ptl);
- }
- }
-
- return 0; /* never need to flush on unpin */
-}
-
-/* Release a pagetables pages back as normal RW */
-static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
-{
- trace_xen_mmu_pgd_unpin(mm, pgd);
-
- xen_mc_batch();
-
- xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-#ifdef CONFIG_X86_64
- {
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
- if (user_pgd) {
- xen_do_pin(MMUEXT_UNPIN_TABLE,
- PFN_DOWN(__pa(user_pgd)));
- xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
- }
- }
-#endif
-
-#ifdef CONFIG_X86_PAE
- /* Need to make sure unshared kernel PMD is unpinned */
- xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
- PT_PMD);
-#endif
-
- __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
-
- xen_mc_issue(0);
-}
-
-static void xen_pgd_unpin(struct mm_struct *mm)
-{
- __xen_pgd_unpin(mm, mm->pgd);
-}
-
-/*
- * On resume, undo any pinning done at save, so that the rest of the
- * kernel doesn't see any unexpected pinned pagetables.
- */
-void xen_mm_unpin_all(void)
-{
- struct page *page;
-
- spin_lock(&pgd_lock);
-
- list_for_each_entry(page, &pgd_list, lru) {
- if (PageSavePinned(page)) {
- BUG_ON(!PagePinned(page));
- __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
- ClearPageSavePinned(page);
- }
- }
-
- spin_unlock(&pgd_lock);
-}
-
-static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
-{
- spin_lock(&next->page_table_lock);
- xen_pgd_pin(next);
- spin_unlock(&next->page_table_lock);
-}
-
-static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
-{
- spin_lock(&mm->page_table_lock);
- xen_pgd_pin(mm);
- spin_unlock(&mm->page_table_lock);
-}
-
-
-#ifdef CONFIG_SMP
-/* Another cpu may still have their %cr3 pointing at the pagetable, so
- we need to repoint it somewhere else before we can unpin it. */
-static void drop_other_mm_ref(void *info)
-{
- struct mm_struct *mm = info;
- struct mm_struct *active_mm;
-
- active_mm = this_cpu_read(cpu_tlbstate.active_mm);
-
- if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
- leave_mm(smp_processor_id());
-
- /* If this cpu still has a stale cr3 reference, then make sure
- it has been flushed. */
- if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
- load_cr3(swapper_pg_dir);
-}
-
-static void xen_drop_mm_ref(struct mm_struct *mm)
-{
- cpumask_var_t mask;
- unsigned cpu;
-
- if (current->active_mm == mm) {
- if (current->mm == mm)
- load_cr3(swapper_pg_dir);
- else
- leave_mm(smp_processor_id());
- }
-
- /* Get the "official" set of cpus referring to our pagetable. */
- if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
- for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
- && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
- continue;
- smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
- }
- return;
- }
- cpumask_copy(mask, mm_cpumask(mm));
-
- /* It's possible that a vcpu may have a stale reference to our
- cr3, because its in lazy mode, and it hasn't yet flushed
- its set of pending hypercalls yet. In this case, we can
- look at its actual current cr3 value, and force it to flush
- if needed. */
- for_each_online_cpu(cpu) {
- if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
- cpumask_set_cpu(cpu, mask);
- }
-
- if (!cpumask_empty(mask))
- smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
- free_cpumask_var(mask);
-}
-#else
-static void xen_drop_mm_ref(struct mm_struct *mm)
-{
- if (current->active_mm == mm)
- load_cr3(swapper_pg_dir);
-}
-#endif
-
-/*
- * While a process runs, Xen pins its pagetables, which means that the
- * hypervisor forces it to be read-only, and it controls all updates
- * to it. This means that all pagetable updates have to go via the
- * hypervisor, which is moderately expensive.
- *
- * Since we're pulling the pagetable down, we switch to use init_mm,
- * unpin old process pagetable and mark it all read-write, which
- * allows further operations on it to be simple memory accesses.
- *
- * The only subtle point is that another CPU may be still using the
- * pagetable because of lazy tlb flushing. This means we need need to
- * switch all CPUs off this pagetable before we can unpin it.
- */
-static void xen_exit_mmap(struct mm_struct *mm)
-{
- get_cpu(); /* make sure we don't move around */
- xen_drop_mm_ref(mm);
- put_cpu();
-
- spin_lock(&mm->page_table_lock);
-
- /* pgd may not be pinned in the error exit path of execve */
- if (xen_page_pinned(mm->pgd))
- xen_pgd_unpin(mm);
-
- spin_unlock(&mm->page_table_lock);
-}
-
-static void xen_post_allocator_init(void);
-
-static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
-{
- struct mmuext_op op;
-
- op.cmd = cmd;
- op.arg1.mfn = pfn_to_mfn(pfn);
- if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
- BUG();
-}
-
-#ifdef CONFIG_X86_64
-static void __init xen_cleanhighmap(unsigned long vaddr,
- unsigned long vaddr_end)
-{
- unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
- pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
-
- /* NOTE: The loop is more greedy than the cleanup_highmap variant.
- * We include the PMD passed in on _both_ boundaries. */
- for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
- pmd++, vaddr += PMD_SIZE) {
- if (pmd_none(*pmd))
- continue;
- if (vaddr < (unsigned long) _text || vaddr > kernel_end)
- set_pmd(pmd, __pmd(0));
- }
- /* In case we did something silly, we should crash in this function
- * instead of somewhere later and be confusing. */
- xen_mc_flush();
-}
-
-/*
- * Make a page range writeable and free it.
- */
-static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
-{
- void *vaddr = __va(paddr);
- void *vaddr_end = vaddr + size;
-
- for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
- make_lowmem_page_readwrite(vaddr);
-
- memblock_free(paddr, size);
-}
-
-static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
-{
- unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
-
- if (unpin)
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
- ClearPagePinned(virt_to_page(__va(pa)));
- xen_free_ro_pages(pa, PAGE_SIZE);
-}
-
-static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
-{
- unsigned long pa;
- pte_t *pte_tbl;
- int i;
-
- if (pmd_large(*pmd)) {
- pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
- xen_free_ro_pages(pa, PMD_SIZE);
- return;
- }
-
- pte_tbl = pte_offset_kernel(pmd, 0);
- for (i = 0; i < PTRS_PER_PTE; i++) {
- if (pte_none(pte_tbl[i]))
- continue;
- pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
- xen_free_ro_pages(pa, PAGE_SIZE);
- }
- set_pmd(pmd, __pmd(0));
- xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
-}
-
-static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
-{
- unsigned long pa;
- pmd_t *pmd_tbl;
- int i;
-
- if (pud_large(*pud)) {
- pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
- xen_free_ro_pages(pa, PUD_SIZE);
- return;
- }
-
- pmd_tbl = pmd_offset(pud, 0);
- for (i = 0; i < PTRS_PER_PMD; i++) {
- if (pmd_none(pmd_tbl[i]))
- continue;
- xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
- }
- set_pud(pud, __pud(0));
- xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
-}
-
-static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
-{
- unsigned long pa;
- pud_t *pud_tbl;
- int i;
-
- if (p4d_large(*p4d)) {
- pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
- xen_free_ro_pages(pa, P4D_SIZE);
- return;
- }
-
- pud_tbl = pud_offset(p4d, 0);
- for (i = 0; i < PTRS_PER_PUD; i++) {
- if (pud_none(pud_tbl[i]))
- continue;
- xen_cleanmfnmap_pud(pud_tbl + i, unpin);
- }
- set_p4d(p4d, __p4d(0));
- xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
-}
-
-/*
- * Since it is well isolated we can (and since it is perhaps large we should)
- * also free the page tables mapping the initial P->M table.
- */
-static void __init xen_cleanmfnmap(unsigned long vaddr)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- unsigned int i;
- bool unpin;
-
- unpin = (vaddr == 2 * PGDIR_SIZE);
- vaddr &= PMD_MASK;
- pgd = pgd_offset_k(vaddr);
- p4d = p4d_offset(pgd, 0);
- for (i = 0; i < PTRS_PER_P4D; i++) {
- if (p4d_none(p4d[i]))
- continue;
- xen_cleanmfnmap_p4d(p4d + i, unpin);
- }
- if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
- set_pgd(pgd, __pgd(0));
- xen_cleanmfnmap_free_pgtbl(p4d, unpin);
- }
-}
-
-static void __init xen_pagetable_p2m_free(void)
-{
- unsigned long size;
- unsigned long addr;
-
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
-
- /* No memory or already called. */
- if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
- return;
-
- /* using __ka address and sticking INVALID_P2M_ENTRY! */
- memset((void *)xen_start_info->mfn_list, 0xff, size);
-
- addr = xen_start_info->mfn_list;
- /*
- * We could be in __ka space.
- * We roundup to the PMD, which means that if anybody at this stage is
- * using the __ka address of xen_start_info or
- * xen_start_info->shared_info they are in going to crash. Fortunatly
- * we have already revectored in xen_setup_kernel_pagetable and in
- * xen_setup_shared_info.
- */
- size = roundup(size, PMD_SIZE);
-
- if (addr >= __START_KERNEL_map) {
- xen_cleanhighmap(addr, addr + size);
- size = PAGE_ALIGN(xen_start_info->nr_pages *
- sizeof(unsigned long));
- memblock_free(__pa(addr), size);
- } else {
- xen_cleanmfnmap(addr);
- }
-}
-
-static void __init xen_pagetable_cleanhighmap(void)
-{
- unsigned long size;
- unsigned long addr;
-
- /* At this stage, cleanup_highmap has already cleaned __ka space
- * from _brk_limit way up to the max_pfn_mapped (which is the end of
- * the ramdisk). We continue on, erasing PMD entries that point to page
- * tables - do note that they are accessible at this stage via __va.
- * For good measure we also round up to the PMD - which means that if
- * anybody is using __ka address to the initial boot-stack - and try
- * to use it - they are going to crash. The xen_start_info has been
- * taken care of already in xen_setup_kernel_pagetable. */
- addr = xen_start_info->pt_base;
- size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
-
- xen_cleanhighmap(addr, addr + size);
- xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
-#ifdef DEBUG
- /* This is superfluous and is not necessary, but you know what
- * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
- * anything at this stage. */
- xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
-#endif
-}
-#endif
-
-static void __init xen_pagetable_p2m_setup(void)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
- xen_vmalloc_p2m_tree();
-
-#ifdef CONFIG_X86_64
- xen_pagetable_p2m_free();
-
- xen_pagetable_cleanhighmap();
-#endif
- /* And revector! Bye bye old array */
- xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
-}
-
-static void __init xen_pagetable_init(void)
-{
- paging_init();
- xen_post_allocator_init();
-
- xen_pagetable_p2m_setup();
-
- /* Allocate and initialize top and mid mfn levels for p2m structure */
- xen_build_mfn_list_list();
-
- /* Remap memory freed due to conflicts with E820 map */
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- xen_remap_memory();
-
- xen_setup_shared_info();
-}
-static void xen_write_cr2(unsigned long cr2)
-{
- this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
-}
-
-static unsigned long xen_read_cr2(void)
-{
- return this_cpu_read(xen_vcpu)->arch.cr2;
-}
-
-unsigned long xen_read_cr2_direct(void)
-{
- return this_cpu_read(xen_vcpu_info.arch.cr2);
-}
-
void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
@@ -1386,1464 +61,6 @@ void xen_flush_tlb_all(void)
preempt_enable();
}
-static void xen_flush_tlb(void)
-{
- struct mmuext_op *op;
- struct multicall_space mcs;
-
- trace_xen_mmu_flush_tlb(0);
-
- preempt_disable();
-
- mcs = xen_mc_entry(sizeof(*op));
-
- op = mcs.args;
- op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-static void xen_flush_tlb_single(unsigned long addr)
-{
- struct mmuext_op *op;
- struct multicall_space mcs;
-
- trace_xen_mmu_flush_tlb_single(addr);
-
- preempt_disable();
-
- mcs = xen_mc_entry(sizeof(*op));
- op = mcs.args;
- op->cmd = MMUEXT_INVLPG_LOCAL;
- op->arg1.linear_addr = addr & PAGE_MASK;
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-
- preempt_enable();
-}
-
-static void xen_flush_tlb_others(const struct cpumask *cpus,
- struct mm_struct *mm, unsigned long start,
- unsigned long end)
-{
- struct {
- struct mmuext_op op;
-#ifdef CONFIG_SMP
- DECLARE_BITMAP(mask, num_processors);
-#else
- DECLARE_BITMAP(mask, NR_CPUS);
-#endif
- } *args;
- struct multicall_space mcs;
-
- trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
-
- if (cpumask_empty(cpus))
- return; /* nothing to do */
-
- mcs = xen_mc_entry(sizeof(*args));
- args = mcs.args;
- args->op.arg2.vcpumask = to_cpumask(args->mask);
-
- /* Remove us, and any offline CPUS. */
- cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
-
- args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
- args->op.cmd = MMUEXT_INVLPG_MULTI;
- args->op.arg1.linear_addr = start;
- }
-
- MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-
-static unsigned long xen_read_cr3(void)
-{
- return this_cpu_read(xen_cr3);
-}
-
-static void set_current_cr3(void *v)
-{
- this_cpu_write(xen_current_cr3, (unsigned long)v);
-}
-
-static void __xen_write_cr3(bool kernel, unsigned long cr3)
-{
- struct mmuext_op op;
- unsigned long mfn;
-
- trace_xen_mmu_write_cr3(kernel, cr3);
-
- if (cr3)
- mfn = pfn_to_mfn(PFN_DOWN(cr3));
- else
- mfn = 0;
-
- WARN_ON(mfn == 0 && kernel);
-
- op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
- op.arg1.mfn = mfn;
-
- xen_extend_mmuext_op(&op);
-
- if (kernel) {
- this_cpu_write(xen_cr3, cr3);
-
- /* Update xen_current_cr3 once the batch has actually
- been submitted. */
- xen_mc_callback(set_current_cr3, (void *)cr3);
- }
-}
-static void xen_write_cr3(unsigned long cr3)
-{
- BUG_ON(preemptible());
-
- xen_mc_batch(); /* disables interrupts */
-
- /* Update while interrupts are disabled, so its atomic with
- respect to ipis */
- this_cpu_write(xen_cr3, cr3);
-
- __xen_write_cr3(true, cr3);
-
-#ifdef CONFIG_X86_64
- {
- pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
- if (user_pgd)
- __xen_write_cr3(false, __pa(user_pgd));
- else
- __xen_write_cr3(false, 0);
- }
-#endif
-
- xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
-}
-
-#ifdef CONFIG_X86_64
-/*
- * At the start of the day - when Xen launches a guest, it has already
- * built pagetables for the guest. We diligently look over them
- * in xen_setup_kernel_pagetable and graft as appropriate them in the
- * init_level4_pgt and its friends. Then when we are happy we load
- * the new init_level4_pgt - and continue on.
- *
- * The generic code starts (start_kernel) and 'init_mem_mapping' sets
- * up the rest of the pagetables. When it has completed it loads the cr3.
- * N.B. that baremetal would start at 'start_kernel' (and the early
- * #PF handler would create bootstrap pagetables) - so we are running
- * with the same assumptions as what to do when write_cr3 is executed
- * at this point.
- *
- * Since there are no user-page tables at all, we have two variants
- * of xen_write_cr3 - the early bootup (this one), and the late one
- * (xen_write_cr3). The reason we have to do that is that in 64-bit
- * the Linux kernel and user-space are both in ring 3 while the
- * hypervisor is in ring 0.
- */
-static void __init xen_write_cr3_init(unsigned long cr3)
-{
- BUG_ON(preemptible());
-
- xen_mc_batch(); /* disables interrupts */
-
- /* Update while interrupts are disabled, so its atomic with
- respect to ipis */
- this_cpu_write(xen_cr3, cr3);
-
- __xen_write_cr3(true, cr3);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
-}
-#endif
-
-static int xen_pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *pgd = mm->pgd;
- int ret = 0;
-
- BUG_ON(PagePinned(virt_to_page(pgd)));
-
-#ifdef CONFIG_X86_64
- {
- struct page *page = virt_to_page(pgd);
- pgd_t *user_pgd;
-
- BUG_ON(page->private != 0);
-
- ret = -ENOMEM;
-
- user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- page->private = (unsigned long)user_pgd;
-
- if (user_pgd != NULL) {
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
- user_pgd[pgd_index(VSYSCALL_ADDR)] =
- __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
-#endif
- ret = 0;
- }
-
- BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
- }
-#endif
- return ret;
-}
-
-static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifdef CONFIG_X86_64
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
- if (user_pgd)
- free_page((unsigned long)user_pgd);
-#endif
-}
-
-/*
- * Init-time set_pte while constructing initial pagetables, which
- * doesn't allow RO page table pages to be remapped RW.
- *
- * If there is no MFN for this PFN then this page is initially
- * ballooned out so clear the PTE (as in decrease_reservation() in
- * drivers/xen/balloon.c).
- *
- * Many of these PTE updates are done on unpinned and writable pages
- * and doing a hypercall for these is unnecessary and expensive. At
- * this point it is not possible to tell if a page is pinned or not,
- * so always write the PTE directly and rely on Xen trapping and
- * emulating any updates as necessary.
- */
-__visible pte_t xen_make_pte_init(pteval_t pte)
-{
-#ifdef CONFIG_X86_64
- unsigned long pfn;
-
- /*
- * Pages belonging to the initial p2m list mapped outside the default
- * address range must be mapped read-only. This region contains the
- * page tables for mapping the p2m list, too, and page tables MUST be
- * mapped read-only.
- */
- pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
- if (xen_start_info->mfn_list < __START_KERNEL_map &&
- pfn >= xen_start_info->first_p2m_pfn &&
- pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
- pte &= ~_PAGE_RW;
-#endif
- pte = pte_pfn_to_mfn(pte);
- return native_make_pte(pte);
-}
-PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
-
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
-{
-#ifdef CONFIG_X86_32
- /* If there's an existing pte, then don't allow _PAGE_RW to be set */
- if (pte_mfn(pte) != INVALID_P2M_ENTRY
- && pte_val_ma(*ptep) & _PAGE_PRESENT)
- pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
- pte_val_ma(pte));
-#endif
- native_set_pte(ptep, pte);
-}
-
-/* Early in boot, while setting up the initial pagetable, assume
- everything is pinned. */
-static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
-{
-#ifdef CONFIG_FLATMEM
- BUG_ON(mem_map); /* should only be used early */
-#endif
- make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
-}
-
-/* Used for pmd and pud */
-static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
-{
-#ifdef CONFIG_FLATMEM
- BUG_ON(mem_map); /* should only be used early */
-#endif
- make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
-}
-
-/* Early release_pte assumes that all pts are pinned, since there's
- only init_mm and anything attached to that is pinned. */
-static void __init xen_release_pte_init(unsigned long pfn)
-{
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
- make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-}
-
-static void __init xen_release_pmd_init(unsigned long pfn)
-{
- make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-}
-
-static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
-{
- struct multicall_space mcs;
- struct mmuext_op *op;
-
- mcs = __xen_mc_entry(sizeof(*op));
- op = mcs.args;
- op->cmd = cmd;
- op->arg1.mfn = pfn_to_mfn(pfn);
-
- MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
-}
-
-static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
-{
- struct multicall_space mcs;
- unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
-
- mcs = __xen_mc_entry(0);
- MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
- pfn_pte(pfn, prot), 0);
-}
-
-/* This needs to make sure the new pte page is pinned iff its being
- attached to a pinned pagetable. */
-static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
- unsigned level)
-{
- bool pinned = PagePinned(virt_to_page(mm->pgd));
-
- trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
-
- if (pinned) {
- struct page *page = pfn_to_page(pfn);
-
- SetPagePinned(page);
-
- if (!PageHighMem(page)) {
- xen_mc_batch();
-
- __set_pfn_prot(pfn, PAGE_KERNEL_RO);
-
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
- __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- } else {
- /* make sure there are no stray mappings of
- this page */
- kmap_flush_unused();
- }
- }
-}
-
-static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-{
- xen_alloc_ptpage(mm, pfn, PT_PTE);
-}
-
-static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-{
- xen_alloc_ptpage(mm, pfn, PT_PMD);
-}
-
-/* This should never happen until we're OK to use struct page */
-static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
-{
- struct page *page = pfn_to_page(pfn);
- bool pinned = PagePinned(page);
-
- trace_xen_mmu_release_ptpage(pfn, level, pinned);
-
- if (pinned) {
- if (!PageHighMem(page)) {
- xen_mc_batch();
-
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
- __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
-
- __set_pfn_prot(pfn, PAGE_KERNEL);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- }
- ClearPagePinned(page);
- }
-}
-
-static void xen_release_pte(unsigned long pfn)
-{
- xen_release_ptpage(pfn, PT_PTE);
-}
-
-static void xen_release_pmd(unsigned long pfn)
-{
- xen_release_ptpage(pfn, PT_PMD);
-}
-
-#if CONFIG_PGTABLE_LEVELS >= 4
-static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-{
- xen_alloc_ptpage(mm, pfn, PT_PUD);
-}
-
-static void xen_release_pud(unsigned long pfn)
-{
- xen_release_ptpage(pfn, PT_PUD);
-}
-#endif
-
-void __init xen_reserve_top(void)
-{
-#ifdef CONFIG_X86_32
- unsigned long top = HYPERVISOR_VIRT_START;
- struct xen_platform_parameters pp;
-
- if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
- top = pp.virt_start;
-
- reserve_top_address(-top);
-#endif /* CONFIG_X86_32 */
-}
-
-/*
- * Like __va(), but returns address in the kernel mapping (which is
- * all we have until the physical memory mapping has been set up.
- */
-static void * __init __ka(phys_addr_t paddr)
-{
-#ifdef CONFIG_X86_64
- return (void *)(paddr + __START_KERNEL_map);
-#else
- return __va(paddr);
-#endif
-}
-
-/* Convert a machine address to physical address */
-static unsigned long __init m2p(phys_addr_t maddr)
-{
- phys_addr_t paddr;
-
- maddr &= PTE_PFN_MASK;
- paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
-
- return paddr;
-}
-
-/* Convert a machine address to kernel virtual */
-static void * __init m2v(phys_addr_t maddr)
-{
- return __ka(m2p(maddr));
-}
-
-/* Set the page permissions on an identity-mapped pages */
-static void __init set_page_prot_flags(void *addr, pgprot_t prot,
- unsigned long flags)
-{
- unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
- pte_t pte = pfn_pte(pfn, prot);
-
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
- BUG();
-}
-static void __init set_page_prot(void *addr, pgprot_t prot)
-{
- return set_page_prot_flags(addr, prot, UVMF_NONE);
-}
-#ifdef CONFIG_X86_32
-static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
-{
- unsigned pmdidx, pteidx;
- unsigned ident_pte;
- unsigned long pfn;
-
- level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
- PAGE_SIZE);
-
- ident_pte = 0;
- pfn = 0;
- for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
- pte_t *pte_page;
-
- /* Reuse or allocate a page of ptes */
- if (pmd_present(pmd[pmdidx]))
- pte_page = m2v(pmd[pmdidx].pmd);
- else {
- /* Check for free pte pages */
- if (ident_pte == LEVEL1_IDENT_ENTRIES)
- break;
-
- pte_page = &level1_ident_pgt[ident_pte];
- ident_pte += PTRS_PER_PTE;
-
- pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
- }
-
- /* Install mappings */
- for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
- pte_t pte;
-
- if (pfn > max_pfn_mapped)
- max_pfn_mapped = pfn;
-
- if (!pte_none(pte_page[pteidx]))
- continue;
-
- pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
- pte_page[pteidx] = pte;
- }
- }
-
- for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
- set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
-
- set_page_prot(pmd, PAGE_KERNEL_RO);
-}
-#endif
-void __init xen_setup_machphys_mapping(void)
-{
- struct xen_machphys_mapping mapping;
-
- if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
- machine_to_phys_mapping = (unsigned long *)mapping.v_start;
- machine_to_phys_nr = mapping.max_mfn + 1;
- } else {
- machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
- }
-#ifdef CONFIG_X86_32
- WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
- < machine_to_phys_mapping);
-#endif
-}
-
-#ifdef CONFIG_X86_64
-static void __init convert_pfn_mfn(void *v)
-{
- pte_t *pte = v;
- int i;
-
- /* All levels are converted the same way, so just treat them
- as ptes. */
- for (i = 0; i < PTRS_PER_PTE; i++)
- pte[i] = xen_make_pte(pte[i].pte);
-}
-static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
- unsigned long addr)
-{
- if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
- clear_page((void *)addr);
- (*pt_base)++;
- }
- if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
- clear_page((void *)addr);
- (*pt_end)--;
- }
-}
-/*
- * Set up the initial kernel pagetable.
- *
- * We can construct this by grafting the Xen provided pagetable into
- * head_64.S's preconstructed pagetables. We copy the Xen L2's into
- * level2_ident_pgt, and level2_kernel_pgt. This means that only the
- * kernel has a physical mapping to start with - but that's enough to
- * get __va working. We need to fill in the rest of the physical
- * mapping once some sort of allocator has been set up.
- */
-void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
-{
- pud_t *l3;
- pmd_t *l2;
- unsigned long addr[3];
- unsigned long pt_base, pt_end;
- unsigned i;
-
- /* max_pfn_mapped is the last pfn mapped in the initial memory
- * mappings. Considering that on Xen after the kernel mappings we
- * have the mappings of some pages that don't exist in pfn space, we
- * set max_pfn_mapped to the last real pfn mapped. */
- if (xen_start_info->mfn_list < __START_KERNEL_map)
- max_pfn_mapped = xen_start_info->first_p2m_pfn;
- else
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
-
- pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
- pt_end = pt_base + xen_start_info->nr_pt_frames;
-
- /* Zap identity mapping */
- init_level4_pgt[0] = __pgd(0);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt
- * L4[511] -> level3_kernel_pgt */
- convert_pfn_mfn(init_level4_pgt);
-
- /* L3_i[0] -> level2_ident_pgt */
- convert_pfn_mfn(level3_ident_pgt);
- /* L3_k[510] -> level2_kernel_pgt
- * L3_k[511] -> level2_fixmap_pgt */
- convert_pfn_mfn(level3_kernel_pgt);
-
- /* L3_k[511][506] -> level1_fixmap_pgt */
- convert_pfn_mfn(level2_fixmap_pgt);
- }
- /* We get [511][511] and have Xen's version of level2_kernel_pgt */
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
- l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
-
- addr[0] = (unsigned long)pgd;
- addr[1] = (unsigned long)l3;
- addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][510] have entries that point to the same
- * L2 (PMD) tables. Meaning that if you modify it in __va space
- * it will be also modified in the __ka space! (But if you just
- * modify the PMD table to point to other PTE's or none, then you
- * are OK - which is what cleanup_highmap does) */
- copy_page(level2_ident_pgt, l2);
- /* Graft it onto L4[511][510] */
- copy_page(level2_kernel_pgt, l2);
-
- /* Copy the initial P->M table mappings if necessary. */
- i = pgd_index(xen_start_info->mfn_list);
- if (i && i < pgd_index(__START_KERNEL_map))
- init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Make pagetable pieces RO */
- set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
- set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-
- /* Pin down new L4 */
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
- PFN_DOWN(__pa_symbol(init_level4_pgt)));
-
- /* Unpin Xen-provided one */
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
- /*
- * At this stage there can be no user pgd, and no page
- * structure to attach it to, so make sure we just set kernel
- * pgd.
- */
- xen_mc_batch();
- __xen_write_cr3(true, __pa(init_level4_pgt));
- xen_mc_issue(PARAVIRT_LAZY_CPU);
- } else
- native_write_cr3(__pa(init_level4_pgt));
-
- /* We can't that easily rip out L3 and L2, as the Xen pagetables are
- * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
- * the initial domain. For guests using the toolstack, they are in:
- * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
- * rip out the [L4] (pgd), but for guests we shave off three pages.
- */
- for (i = 0; i < ARRAY_SIZE(addr); i++)
- check_pt_base(&pt_base, &pt_end, addr[i]);
-
- /* Our (by three pages) smaller Xen pagetable that we are using */
- xen_pt_base = PFN_PHYS(pt_base);
- xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
- memblock_reserve(xen_pt_base, xen_pt_size);
-
- /* Revector the xen_start_info */
- xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
-}
-
-/*
- * Read a value from a physical address.
- */
-static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
-{
- unsigned long *vaddr;
- unsigned long val;
-
- vaddr = early_memremap_ro(addr, sizeof(val));
- val = *vaddr;
- early_memunmap(vaddr, sizeof(val));
- return val;
-}
-
-/*
- * Translate a virtual address to a physical one without relying on mapped
- * page tables.
- */
-static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
-{
- phys_addr_t pa;
- pgd_t pgd;
- pud_t pud;
- pmd_t pmd;
- pte_t pte;
-
- pa = read_cr3();
- pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
- sizeof(pgd)));
- if (!pgd_present(pgd))
- return 0;
-
- pa = pgd_val(pgd) & PTE_PFN_MASK;
- pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
- sizeof(pud)));
- if (!pud_present(pud))
- return 0;
- pa = pud_pfn(pud) << PAGE_SHIFT;
- if (pud_large(pud))
- return pa + (vaddr & ~PUD_MASK);
-
- pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
- sizeof(pmd)));
- if (!pmd_present(pmd))
- return 0;
- pa = pmd_pfn(pmd) << PAGE_SHIFT;
- if (pmd_large(pmd))
- return pa + (vaddr & ~PMD_MASK);
-
- pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
- sizeof(pte)));
- if (!pte_present(pte))
- return 0;
- pa = pte_pfn(pte) << PAGE_SHIFT;
-
- return pa | (vaddr & ~PAGE_MASK);
-}
-
-/*
- * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
- * this area.
- */
-void __init xen_relocate_p2m(void)
-{
- phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
- unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
- int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
- pte_t *pt;
- pmd_t *pmd;
- pud_t *pud;
- p4d_t *p4d = NULL;
- pgd_t *pgd;
- unsigned long *new_p2m;
- int save_pud;
-
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
- n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
- n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
- n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
- if (PTRS_PER_P4D > 1)
- n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
- else
- n_p4d = 0;
- n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
-
- new_area = xen_find_free_area(PFN_PHYS(n_frames));
- if (!new_area) {
- xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
- BUG();
- }
-
- /*
- * Setup the page tables for addressing the new p2m list.
- * We have asked the hypervisor to map the p2m list at the user address
- * PUD_SIZE. It may have done so, or it may have used a kernel space
- * address depending on the Xen version.
- * To avoid any possible virtual address collision, just use
- * 2 * PUD_SIZE for the new area.
- */
- p4d_phys = new_area;
- pud_phys = p4d_phys + PFN_PHYS(n_p4d);
- pmd_phys = pud_phys + PFN_PHYS(n_pud);
- pt_phys = pmd_phys + PFN_PHYS(n_pmd);
- p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
-
- pgd = __va(read_cr3());
- new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
- idx_p4d = 0;
- save_pud = n_pud;
- do {
- if (n_p4d > 0) {
- p4d = early_memremap(p4d_phys, PAGE_SIZE);
- clear_page(p4d);
- n_pud = min(save_pud, PTRS_PER_P4D);
- }
- for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
- pud = early_memremap(pud_phys, PAGE_SIZE);
- clear_page(pud);
- for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
- idx_pmd++) {
- pmd = early_memremap(pmd_phys, PAGE_SIZE);
- clear_page(pmd);
- for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
- idx_pt++) {
- pt = early_memremap(pt_phys, PAGE_SIZE);
- clear_page(pt);
- for (idx_pte = 0;
- idx_pte < min(n_pte, PTRS_PER_PTE);
- idx_pte++) {
- set_pte(pt + idx_pte,
- pfn_pte(p2m_pfn, PAGE_KERNEL));
- p2m_pfn++;
- }
- n_pte -= PTRS_PER_PTE;
- early_memunmap(pt, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pt_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
- PFN_DOWN(pt_phys));
- set_pmd(pmd + idx_pt,
- __pmd(_PAGE_TABLE | pt_phys));
- pt_phys += PAGE_SIZE;
- }
- n_pt -= PTRS_PER_PMD;
- early_memunmap(pmd, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pmd_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
- PFN_DOWN(pmd_phys));
- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
- pmd_phys += PAGE_SIZE;
- }
- n_pmd -= PTRS_PER_PUD;
- early_memunmap(pud, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pud_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
- if (n_p4d > 0)
- set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
- else
- set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
- pud_phys += PAGE_SIZE;
- }
- if (n_p4d > 0) {
- save_pud -= PTRS_PER_P4D;
- early_memunmap(p4d, PAGE_SIZE);
- make_lowmem_page_readonly(__va(p4d_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
- set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
- p4d_phys += PAGE_SIZE;
- }
- } while (++idx_p4d < n_p4d);
-
- /* Now copy the old p2m info to the new area. */
- memcpy(new_p2m, xen_p2m_addr, size);
- xen_p2m_addr = new_p2m;
-
- /* Release the old p2m list and set new list info. */
- p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
- BUG_ON(!p2m_pfn);
- p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
-
- if (xen_start_info->mfn_list < __START_KERNEL_map) {
- pfn = xen_start_info->first_p2m_pfn;
- pfn_end = xen_start_info->first_p2m_pfn +
- xen_start_info->nr_p2m_frames;
- set_pgd(pgd + 1, __pgd(0));
- } else {
- pfn = p2m_pfn;
- pfn_end = p2m_pfn_end;
- }
-
- memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
- while (pfn < pfn_end) {
- if (pfn == p2m_pfn) {
- pfn = p2m_pfn_end;
- continue;
- }
- make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
- pfn++;
- }
-
- xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
- xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
- xen_start_info->nr_p2m_frames = n_frames;
-}
-
-#else /* !CONFIG_X86_64 */
-static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
-static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
-
-static void __init xen_write_cr3_init(unsigned long cr3)
-{
- unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
-
- BUG_ON(read_cr3() != __pa(initial_page_table));
- BUG_ON(cr3 != __pa(swapper_pg_dir));
-
- /*
- * We are switching to swapper_pg_dir for the first time (from
- * initial_page_table) and therefore need to mark that page
- * read-only and then pin it.
- *
- * Xen disallows sharing of kernel PMDs for PAE
- * guests. Therefore we must copy the kernel PMD from
- * initial_page_table into a new kernel PMD to be used in
- * swapper_pg_dir.
- */
- swapper_kernel_pmd =
- extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
- copy_page(swapper_kernel_pmd, initial_kernel_pmd);
- swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
- __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
- set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
-
- set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
- xen_write_cr3(cr3);
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
-
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
- PFN_DOWN(__pa(initial_page_table)));
- set_page_prot(initial_page_table, PAGE_KERNEL);
- set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
-
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
-}
-
-/*
- * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
- * not the first page table in the page table pool.
- * Iterate through the initial page tables to find the real page table base.
- */
-static phys_addr_t xen_find_pt_base(pmd_t *pmd)
-{
- phys_addr_t pt_base, paddr;
- unsigned pmdidx;
-
- pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
-
- for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
- if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
- paddr = m2p(pmd[pmdidx].pmd);
- pt_base = min(pt_base, paddr);
- }
-
- return pt_base;
-}
-
-void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
-{
- pmd_t *kernel_pmd;
-
- kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
-
- xen_pt_base = xen_find_pt_base(kernel_pmd);
- xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
-
- initial_kernel_pmd =
- extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
-
- max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
-
- copy_page(initial_kernel_pmd, kernel_pmd);
-
- xen_map_identity_early(initial_kernel_pmd, max_pfn);
-
- copy_page(initial_page_table, pgd);
- initial_page_table[KERNEL_PGD_BOUNDARY] =
- __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
-
- set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
- set_page_prot(initial_page_table, PAGE_KERNEL_RO);
- set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
-
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
- PFN_DOWN(__pa(initial_page_table)));
- xen_write_cr3(__pa(initial_page_table));
-
- memblock_reserve(xen_pt_base, xen_pt_size);
-}
-#endif /* CONFIG_X86_64 */
-
-void __init xen_reserve_special_pages(void)
-{
- phys_addr_t paddr;
-
- memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
- if (xen_start_info->store_mfn) {
- paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
- memblock_reserve(paddr, PAGE_SIZE);
- }
- if (!xen_initial_domain()) {
- paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
- memblock_reserve(paddr, PAGE_SIZE);
- }
-}
-
-void __init xen_pt_check_e820(void)
-{
- if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
- xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
- BUG();
- }
-}
-
-static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
-
-static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
-{
- pte_t pte;
-
- phys >>= PAGE_SHIFT;
-
- switch (idx) {
- case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
- case FIX_RO_IDT:
-#ifdef CONFIG_X86_32
- case FIX_WP_TEST:
-# ifdef CONFIG_HIGHMEM
- case FIX_KMAP_BEGIN ... FIX_KMAP_END:
-# endif
-#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
- case VSYSCALL_PAGE:
-#endif
- case FIX_TEXT_POKE0:
- case FIX_TEXT_POKE1:
- case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
- /* All local page mappings */
- pte = pfn_pte(phys, prot);
- break;
-
-#ifdef CONFIG_X86_LOCAL_APIC
- case FIX_APIC_BASE: /* maps dummy local APIC */
- pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
- break;
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
- case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
- /*
- * We just don't map the IO APIC - all access is via
- * hypercalls. Keep the address in the pte for reference.
- */
- pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
- break;
-#endif
-
- case FIX_PARAVIRT_BOOTMAP:
- /* This is an MFN, but it isn't an IO mapping from the
- IO domain */
- pte = mfn_pte(phys, prot);
- break;
-
- default:
- /* By default, set_fixmap is used for hardware mappings */
- pte = mfn_pte(phys, prot);
- break;
- }
-
- __native_set_fixmap(idx, pte);
-
-#ifdef CONFIG_X86_VSYSCALL_EMULATION
- /* Replicate changes to map the vsyscall page into the user
- pagetable vsyscall mapping. */
- if (idx == VSYSCALL_PAGE) {
- unsigned long vaddr = __fix_to_virt(idx);
- set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
- }
-#endif
-}
-
-static void __init xen_post_allocator_init(void)
-{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
- pv_mmu_ops.set_pte = xen_set_pte;
- pv_mmu_ops.set_pmd = xen_set_pmd;
- pv_mmu_ops.set_pud = xen_set_pud;
-#if CONFIG_PGTABLE_LEVELS >= 4
- pv_mmu_ops.set_p4d = xen_set_p4d;
-#endif
-
- /* This will work as long as patching hasn't happened yet
- (which it hasn't) */
- pv_mmu_ops.alloc_pte = xen_alloc_pte;
- pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
- pv_mmu_ops.release_pte = xen_release_pte;
- pv_mmu_ops.release_pmd = xen_release_pmd;
-#if CONFIG_PGTABLE_LEVELS >= 4
- pv_mmu_ops.alloc_pud = xen_alloc_pud;
- pv_mmu_ops.release_pud = xen_release_pud;
-#endif
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
-
-#ifdef CONFIG_X86_64
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
- SetPagePinned(virt_to_page(level3_user_vsyscall));
-#endif
- xen_mark_init_mm_pinned();
-}
-
-static void xen_leave_lazy_mmu(void)
-{
- preempt_disable();
- xen_mc_flush();
- paravirt_leave_lazy_mmu();
- preempt_enable();
-}
-
-static const struct pv_mmu_ops xen_mmu_ops __initconst = {
- .read_cr2 = xen_read_cr2,
- .write_cr2 = xen_write_cr2,
-
- .read_cr3 = xen_read_cr3,
- .write_cr3 = xen_write_cr3_init,
-
- .flush_tlb_user = xen_flush_tlb,
- .flush_tlb_kernel = xen_flush_tlb,
- .flush_tlb_single = xen_flush_tlb_single,
- .flush_tlb_others = xen_flush_tlb_others,
-
- .pte_update = paravirt_nop,
-
- .pgd_alloc = xen_pgd_alloc,
- .pgd_free = xen_pgd_free,
-
- .alloc_pte = xen_alloc_pte_init,
- .release_pte = xen_release_pte_init,
- .alloc_pmd = xen_alloc_pmd_init,
- .release_pmd = xen_release_pmd_init,
-
- .set_pte = xen_set_pte_init,
- .set_pte_at = xen_set_pte_at,
- .set_pmd = xen_set_pmd_hyper,
-
- .ptep_modify_prot_start = __ptep_modify_prot_start,
- .ptep_modify_prot_commit = __ptep_modify_prot_commit,
-
- .pte_val = PV_CALLEE_SAVE(xen_pte_val),
- .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
-
- .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
- .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
-
-#ifdef CONFIG_X86_PAE
- .set_pte_atomic = xen_set_pte_atomic,
- .pte_clear = xen_pte_clear,
- .pmd_clear = xen_pmd_clear,
-#endif /* CONFIG_X86_PAE */
- .set_pud = xen_set_pud_hyper,
-
- .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
- .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
-
-#if CONFIG_PGTABLE_LEVELS >= 4
- .pud_val = PV_CALLEE_SAVE(xen_pud_val),
- .make_pud = PV_CALLEE_SAVE(xen_make_pud),
- .set_p4d = xen_set_p4d_hyper,
-
- .alloc_pud = xen_alloc_pmd_init,
- .release_pud = xen_release_pmd_init,
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
-
- .activate_mm = xen_activate_mm,
- .dup_mmap = xen_dup_mmap,
- .exit_mmap = xen_exit_mmap,
-
- .lazy_mode = {
- .enter = paravirt_enter_lazy_mmu,
- .leave = xen_leave_lazy_mmu,
- .flush = paravirt_flush_lazy_mmu,
- },
-
- .set_fixmap = xen_set_fixmap,
-};
-
-void __init xen_init_mmu_ops(void)
-{
- x86_init.paging.pagetable_init = xen_pagetable_init;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
- pv_mmu_ops = xen_mmu_ops;
-
- memset(dummy_mapping, 0xff, PAGE_SIZE);
-}
-
-/* Protected by xen_reservation_lock. */
-#define MAX_CONTIG_ORDER 9 /* 2MB */
-static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
-
-#define VOID_PTE (mfn_pte(0, __pgprot(0)))
-static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
- unsigned long *in_frames,
- unsigned long *out_frames)
-{
- int i;
- struct multicall_space mcs;
-
- xen_mc_batch();
- for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
- mcs = __xen_mc_entry(0);
-
- if (in_frames)
- in_frames[i] = virt_to_mfn(vaddr);
-
- MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
- __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
-
- if (out_frames)
- out_frames[i] = virt_to_pfn(vaddr);
- }
- xen_mc_issue(0);
-}
-
-/*
- * Update the pfn-to-mfn mappings for a virtual address range, either to
- * point to an array of mfns, or contiguously from a single starting
- * mfn.
- */
-static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
- unsigned long *mfns,
- unsigned long first_mfn)
-{
- unsigned i, limit;
- unsigned long mfn;
-
- xen_mc_batch();
-
- limit = 1u << order;
- for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
- struct multicall_space mcs;
- unsigned flags;
-
- mcs = __xen_mc_entry(0);
- if (mfns)
- mfn = mfns[i];
- else
- mfn = first_mfn + i;
-
- if (i < (limit - 1))
- flags = 0;
- else {
- if (order == 0)
- flags = UVMF_INVLPG | UVMF_ALL;
- else
- flags = UVMF_TLB_FLUSH | UVMF_ALL;
- }
-
- MULTI_update_va_mapping(mcs.mc, vaddr,
- mfn_pte(mfn, PAGE_KERNEL), flags);
-
- set_phys_to_machine(virt_to_pfn(vaddr), mfn);
- }
-
- xen_mc_issue(0);
-}
-
-/*
- * Perform the hypercall to exchange a region of our pfns to point to
- * memory with the required contiguous alignment. Takes the pfns as
- * input, and populates mfns as output.
- *
- * Returns a success code indicating whether the hypervisor was able to
- * satisfy the request or not.
- */
-static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
- unsigned long *pfns_in,
- unsigned long extents_out,
- unsigned int order_out,
- unsigned long *mfns_out,
- unsigned int address_bits)
-{
- long rc;
- int success;
-
- struct xen_memory_exchange exchange = {
- .in = {
- .nr_extents = extents_in,
- .extent_order = order_in,
- .extent_start = pfns_in,
- .domid = DOMID_SELF
- },
- .out = {
- .nr_extents = extents_out,
- .extent_order = order_out,
- .extent_start = mfns_out,
- .address_bits = address_bits,
- .domid = DOMID_SELF
- }
- };
-
- BUG_ON(extents_in << order_in != extents_out << order_out);
-
- rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
- success = (exchange.nr_exchanged == extents_in);
-
- BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
- BUG_ON(success && (rc != 0));
-
- return success;
-}
-
-int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle)
-{
- unsigned long *in_frames = discontig_frames, out_frame;
- unsigned long flags;
- int success;
- unsigned long vstart = (unsigned long)phys_to_virt(pstart);
-
- /*
- * Currently an auto-translated guest will not perform I/O, nor will
- * it require PAE page directories below 4GB. Therefore any calls to
- * this function are redundant and can be ignored.
- */
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
- if (unlikely(order > MAX_CONTIG_ORDER))
- return -ENOMEM;
-
- memset((void *) vstart, 0, PAGE_SIZE << order);
-
- spin_lock_irqsave(&xen_reservation_lock, flags);
-
- /* 1. Zap current PTEs, remembering MFNs. */
- xen_zap_pfn_range(vstart, order, in_frames, NULL);
-
- /* 2. Get a new contiguous memory extent. */
- out_frame = virt_to_pfn(vstart);
- success = xen_exchange_memory(1UL << order, 0, in_frames,
- 1, order, &out_frame,
- address_bits);
-
- /* 3. Map the new extent in place of old pages. */
- if (success)
- xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
- else
- xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
-
- spin_unlock_irqrestore(&xen_reservation_lock, flags);
-
- *dma_handle = virt_to_machine(vstart).maddr;
- return success ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
-
-void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
-{
- unsigned long *out_frames = discontig_frames, in_frame;
- unsigned long flags;
- int success;
- unsigned long vstart;
-
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
- if (unlikely(order > MAX_CONTIG_ORDER))
- return;
-
- vstart = (unsigned long)phys_to_virt(pstart);
- memset((void *) vstart, 0, PAGE_SIZE << order);
-
- spin_lock_irqsave(&xen_reservation_lock, flags);
-
- /* 1. Find start MFN of contiguous extent. */
- in_frame = virt_to_mfn(vstart);
-
- /* 2. Zap current PTEs. */
- xen_zap_pfn_range(vstart, order, NULL, out_frames);
-
- /* 3. Do the exchange for non-contiguous MFNs. */
- success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
- 0, out_frames, 0);
-
- /* 4. Map new pages in place of old pages. */
- if (success)
- xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
- else
- xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
-
- spin_unlock_irqrestore(&xen_reservation_lock, flags);
-}
-EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
-
-#ifdef CONFIG_XEN_PVHVM
-#ifdef CONFIG_PROC_VMCORE
-/*
- * This function is used in two contexts:
- * - the kdump kernel has to check whether a pfn of the crashed kernel
- * was a ballooned page. vmcore is using this function to decide
- * whether to access a pfn of the crashed kernel.
- * - the kexec kernel has to check whether a pfn was ballooned by the
- * previous kernel. If the pfn is ballooned, handle it properly.
- * Returns 0 if the pfn is not backed by a RAM page, the caller may
- * handle the pfn special in this case.
- */
-static int xen_oldmem_pfn_is_ram(unsigned long pfn)
-{
- struct xen_hvm_get_mem_type a = {
- .domid = DOMID_SELF,
- .pfn = pfn,
- };
- int ram;
-
- if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
- return -ENXIO;
-
- switch (a.mem_type) {
- case HVMMEM_mmio_dm:
- ram = 0;
- break;
- case HVMMEM_ram_rw:
- case HVMMEM_ram_ro:
- default:
- ram = 1;
- break;
- }
-
- return ram;
-}
-#endif
-
-static void xen_hvm_exit_mmap(struct mm_struct *mm)
-{
- struct xen_hvm_pagetable_dying a;
- int rc;
-
- a.domid = DOMID_SELF;
- a.gpa = __pa(mm->pgd);
- rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
- WARN_ON_ONCE(rc < 0);
-}
-
-static int is_pagetable_dying_supported(void)
-{
- struct xen_hvm_pagetable_dying a;
- int rc = 0;
-
- a.domid = DOMID_SELF;
- a.gpa = 0x00;
- rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
- if (rc < 0) {
- printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
- return 0;
- }
- return 1;
-}
-
-void __init xen_hvm_init_mmu_ops(void)
-{
- if (is_pagetable_dying_supported())
- pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
-#ifdef CONFIG_PROC_VMCORE
- register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
-#endif
-}
-#endif
#define REMAP_BATCH_SIZE 16
@@ -2974,7 +191,6 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
-
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
new file mode 100644
index 000000000000..1c57f1cd545c
--- /dev/null
+++ b/arch/x86/xen/mmu_hvm.c
@@ -0,0 +1,79 @@
+#include <linux/types.h>
+#include <linux/crash_dump.h>
+
+#include <xen/interface/xen.h>
+#include <xen/hvm.h>
+
+#include "mmu.h"
+
+#ifdef CONFIG_PROC_VMCORE
+/*
+ * This function is used in two contexts:
+ * - the kdump kernel has to check whether a pfn of the crashed kernel
+ * was a ballooned page. vmcore is using this function to decide
+ * whether to access a pfn of the crashed kernel.
+ * - the kexec kernel has to check whether a pfn was ballooned by the
+ * previous kernel. If the pfn is ballooned, handle it properly.
+ * Returns 0 if the pfn is not backed by a RAM page, the caller may
+ * handle the pfn special in this case.
+ */
+static int xen_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ struct xen_hvm_get_mem_type a = {
+ .domid = DOMID_SELF,
+ .pfn = pfn,
+ };
+ int ram;
+
+ if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
+ return -ENXIO;
+
+ switch (a.mem_type) {
+ case HVMMEM_mmio_dm:
+ ram = 0;
+ break;
+ case HVMMEM_ram_rw:
+ case HVMMEM_ram_ro:
+ default:
+ ram = 1;
+ break;
+ }
+
+ return ram;
+}
+#endif
+
+static void xen_hvm_exit_mmap(struct mm_struct *mm)
+{
+ struct xen_hvm_pagetable_dying a;
+ int rc;
+
+ a.domid = DOMID_SELF;
+ a.gpa = __pa(mm->pgd);
+ rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
+ WARN_ON_ONCE(rc < 0);
+}
+
+static int is_pagetable_dying_supported(void)
+{
+ struct xen_hvm_pagetable_dying a;
+ int rc = 0;
+
+ a.domid = DOMID_SELF;
+ a.gpa = 0x00;
+ rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
+ if (rc < 0) {
+ printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
+ return 0;
+ }
+ return 1;
+}
+
+void __init xen_hvm_init_mmu_ops(void)
+{
+ if (is_pagetable_dying_supported())
+ pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
+#ifdef CONFIG_PROC_VMCORE
+ register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+#endif
+}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
new file mode 100644
index 000000000000..9d9ae6650aa1
--- /dev/null
+++ b/arch/x86/xen/mmu_pv.c
@@ -0,0 +1,2730 @@
+/*
+ * Xen mmu operations
+ *
+ * This file contains the various mmu fetch and update operations.
+ * The most important job they must perform is the mapping between the
+ * domain's pfn and the overall machine mfns.
+ *
+ * Xen allows guests to directly update the pagetable, in a controlled
+ * fashion. In other words, the guest modifies the same pagetable
+ * that the CPU actually uses, which eliminates the overhead of having
+ * a separate shadow pagetable.
+ *
+ * In order to allow this, it falls on the guest domain to map its
+ * notion of a "physical" pfn - which is just a domain-local linear
+ * address - into a real "machine address" which the CPU's MMU can
+ * use.
+ *
+ * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
+ * inserted directly into the pagetable. When creating a new
+ * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
+ * when reading the content back with __(pgd|pmd|pte)_val, it converts
+ * the mfn back into a pfn.
+ *
+ * The other constraint is that all pages which make up a pagetable
+ * must be mapped read-only in the guest. This prevents uncontrolled
+ * guest updates to the pagetable. Xen strictly enforces this, and
+ * will disallow any pagetable update which will end up mapping a
+ * pagetable page RW, and will disallow using any writable page as a
+ * pagetable.
+ *
+ * Naively, when loading %cr3 with the base of a new pagetable, Xen
+ * would need to validate the whole pagetable before going on.
+ * Naturally, this is quite slow. The solution is to "pin" a
+ * pagetable, which enforces all the constraints on the pagetable even
+ * when it is not actively in use. This menas that Xen can be assured
+ * that it is still valid when you do load it into %cr3, and doesn't
+ * need to revalidate it.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/sched/mm.h>
+#include <linux/highmem.h>
+#include <linux/debugfs.h>
+#include <linux/bug.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/crash_dump.h>
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
+#include <trace/events/xen.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+#include <asm/paravirt.h>
+#include <asm/e820/api.h>
+#include <asm/linkage.h>
+#include <asm/page.h>
+#include <asm/init.h>
+#include <asm/pat.h>
+#include <asm/smp.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/hvm/hvm_op.h>
+#include <xen/interface/version.h>
+#include <xen/interface/memory.h>
+#include <xen/hvc-console.h>
+
+#include "multicalls.h"
+#include "mmu.h"
+#include "debugfs.h"
+
+#ifdef CONFIG_X86_32
+/*
+ * Identity map, in addition to plain kernel map. This needs to be
+ * large enough to allocate page table pages to allocate the rest.
+ * Each page can map 2MB.
+ */
+#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
+#endif
+#ifdef CONFIG_X86_64
+/* l3 pud for userspace vsyscall mapping */
+static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Note about cr3 (pagetable base) values:
+ *
+ * xen_cr3 contains the current logical cr3 value; it contains the
+ * last set cr3. This may not be the current effective cr3, because
+ * its update may be being lazily deferred. However, a vcpu looking
+ * at its own cr3 can use this value knowing that it everything will
+ * be self-consistent.
+ *
+ * xen_current_cr3 contains the actual vcpu cr3; it is set once the
+ * hypercall to set the vcpu cr3 is complete (so it may be a little
+ * out of date, but it will never be set early). If one vcpu is
+ * looking at another vcpu's cr3 value, it should use this variable.
+ */
+DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
+DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
+
+static phys_addr_t xen_pt_base, xen_pt_size __initdata;
+
+/*
+ * Just beyond the highest usermode address. STACK_TOP_MAX has a
+ * redzone above it, so round it up to a PGD boundary.
+ */
+#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
+
+void make_lowmem_page_readonly(void *vaddr)
+{
+ pte_t *pte, ptev;
+ unsigned long address = (unsigned long)vaddr;
+ unsigned int level;
+
+ pte = lookup_address(address, &level);
+ if (pte == NULL)
+ return; /* vaddr missing */
+
+ ptev = pte_wrprotect(*pte);
+
+ if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+ BUG();
+}
+
+void make_lowmem_page_readwrite(void *vaddr)
+{
+ pte_t *pte, ptev;
+ unsigned long address = (unsigned long)vaddr;
+ unsigned int level;
+
+ pte = lookup_address(address, &level);
+ if (pte == NULL)
+ return; /* vaddr missing */
+
+ ptev = pte_mkwrite(*pte);
+
+ if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+ BUG();
+}
+
+
+static bool xen_page_pinned(void *ptr)
+{
+ struct page *page = virt_to_page(ptr);
+
+ return PagePinned(page);
+}
+
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
+{
+ struct multicall_space mcs;
+ struct mmu_update *u;
+
+ trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+
+ /* ptep might be kmapped when using 32-bit HIGHPTE */
+ u->ptr = virt_to_machine(ptep).maddr;
+ u->val = pte_val_ma(pteval);
+
+ MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+EXPORT_SYMBOL_GPL(xen_set_domain_pte);
+
+static void xen_extend_mmu_update(const struct mmu_update *update)
+{
+ struct multicall_space mcs;
+ struct mmu_update *u;
+
+ mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
+
+ if (mcs.mc != NULL) {
+ mcs.mc->args[1]++;
+ } else {
+ mcs = __xen_mc_entry(sizeof(*u));
+ MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+ }
+
+ u = mcs.args;
+ *u = *update;
+}
+
+static void xen_extend_mmuext_op(const struct mmuext_op *op)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *u;
+
+ mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
+
+ if (mcs.mc != NULL) {
+ mcs.mc->args[1]++;
+ } else {
+ mcs = __xen_mc_entry(sizeof(*u));
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+ }
+
+ u = mcs.args;
+ *u = *op;
+}
+
+static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
+{
+ struct mmu_update u;
+
+ preempt_disable();
+
+ xen_mc_batch();
+
+ /* ptr may be ioremapped for 64-bit pagetable setup */
+ u.ptr = arbitrary_virt_to_machine(ptr).maddr;
+ u.val = pmd_val_ma(val);
+ xen_extend_mmu_update(&u);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+static void xen_set_pmd(pmd_t *ptr, pmd_t val)
+{
+ trace_xen_mmu_set_pmd(ptr, val);
+
+ /* If page is not pinned, we can just update the entry
+ directly */
+ if (!xen_page_pinned(ptr)) {
+ *ptr = val;
+ return;
+ }
+
+ xen_set_pmd_hyper(ptr, val);
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame
+ * and protection flags for that frame.
+ */
+void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
+{
+ set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
+}
+
+static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
+{
+ struct mmu_update u;
+
+ if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
+ return false;
+
+ xen_mc_batch();
+
+ u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+ u.val = pte_val_ma(pteval);
+ xen_extend_mmu_update(&u);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ return true;
+}
+
+static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+ if (!xen_batched_set_pte(ptep, pteval)) {
+ /*
+ * Could call native_set_pte() here and trap and
+ * emulate the PTE write but with 32-bit guests this
+ * needs two traps (one for each of the two 32-bit
+ * words in the PTE) so do one hypercall directly
+ * instead.
+ */
+ struct mmu_update u;
+
+ u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+ u.val = pte_val_ma(pteval);
+ HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
+ }
+}
+
+static void xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+ trace_xen_mmu_set_pte(ptep, pteval);
+ __xen_set_pte(ptep, pteval);
+}
+
+static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
+ __xen_set_pte(ptep, pteval);
+}
+
+pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ /* Just return the pte as-is. We preserve the bits on commit */
+ trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
+ return *ptep;
+}
+
+void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ struct mmu_update u;
+
+ trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
+ xen_mc_batch();
+
+ u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+ u.val = pte_val_ma(pte);
+ xen_extend_mmu_update(&u);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+/* Assume pteval_t is equivalent to all the other *val_t types. */
+static pteval_t pte_mfn_to_pfn(pteval_t val)
+{
+ if (val & _PAGE_PRESENT) {
+ unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+
+ pteval_t flags = val & PTE_FLAGS_MASK;
+ if (unlikely(pfn == ~0))
+ val = flags & ~_PAGE_PRESENT;
+ else
+ val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
+ }
+
+ return val;
+}
+
+static pteval_t pte_pfn_to_mfn(pteval_t val)
+{
+ if (val & _PAGE_PRESENT) {
+ unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ pteval_t flags = val & PTE_FLAGS_MASK;
+ unsigned long mfn;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ mfn = __pfn_to_mfn(pfn);
+ else
+ mfn = pfn;
+ /*
+ * If there's no mfn for the pfn, then just create an
+ * empty non-present pte. Unfortunately this loses
+ * information about the original pfn, so
+ * pte_mfn_to_pfn is asymmetric.
+ */
+ if (unlikely(mfn == INVALID_P2M_ENTRY)) {
+ mfn = 0;
+ flags = 0;
+ } else
+ mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
+ val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
+ }
+
+ return val;
+}
+
+__visible pteval_t xen_pte_val(pte_t pte)
+{
+ pteval_t pteval = pte.pte;
+
+ return pte_mfn_to_pfn(pteval);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
+
+__visible pgdval_t xen_pgd_val(pgd_t pgd)
+{
+ return pte_mfn_to_pfn(pgd.pgd);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
+
+__visible pte_t xen_make_pte(pteval_t pte)
+{
+ pte = pte_pfn_to_mfn(pte);
+
+ return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
+
+__visible pgd_t xen_make_pgd(pgdval_t pgd)
+{
+ pgd = pte_pfn_to_mfn(pgd);
+ return native_make_pgd(pgd);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
+
+__visible pmdval_t xen_pmd_val(pmd_t pmd)
+{
+ return pte_mfn_to_pfn(pmd.pmd);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
+
+static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
+{
+ struct mmu_update u;
+
+ preempt_disable();
+
+ xen_mc_batch();
+
+ /* ptr may be ioremapped for 64-bit pagetable setup */
+ u.ptr = arbitrary_virt_to_machine(ptr).maddr;
+ u.val = pud_val_ma(val);
+ xen_extend_mmu_update(&u);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+static void xen_set_pud(pud_t *ptr, pud_t val)
+{
+ trace_xen_mmu_set_pud(ptr, val);
+
+ /* If page is not pinned, we can just update the entry
+ directly */
+ if (!xen_page_pinned(ptr)) {
+ *ptr = val;
+ return;
+ }
+
+ xen_set_pud_hyper(ptr, val);
+}
+
+#ifdef CONFIG_X86_PAE
+static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ trace_xen_mmu_set_pte_atomic(ptep, pte);
+ set_64bit((u64 *)ptep, native_pte_val(pte));
+}
+
+static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ trace_xen_mmu_pte_clear(mm, addr, ptep);
+ if (!xen_batched_set_pte(ptep, native_make_pte(0)))
+ native_pte_clear(mm, addr, ptep);
+}
+
+static void xen_pmd_clear(pmd_t *pmdp)
+{
+ trace_xen_mmu_pmd_clear(pmdp);
+ set_pmd(pmdp, __pmd(0));
+}
+#endif /* CONFIG_X86_PAE */
+
+__visible pmd_t xen_make_pmd(pmdval_t pmd)
+{
+ pmd = pte_pfn_to_mfn(pmd);
+ return native_make_pmd(pmd);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
+
+#if CONFIG_PGTABLE_LEVELS == 4
+__visible pudval_t xen_pud_val(pud_t pud)
+{
+ return pte_mfn_to_pfn(pud.pud);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
+
+__visible pud_t xen_make_pud(pudval_t pud)
+{
+ pud = pte_pfn_to_mfn(pud);
+
+ return native_make_pud(pud);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
+
+static pgd_t *xen_get_user_pgd(pgd_t *pgd)
+{
+ pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
+ unsigned offset = pgd - pgd_page;
+ pgd_t *user_ptr = NULL;
+
+ if (offset < pgd_index(USER_LIMIT)) {
+ struct page *page = virt_to_page(pgd_page);
+ user_ptr = (pgd_t *)page->private;
+ if (user_ptr)
+ user_ptr += offset;
+ }
+
+ return user_ptr;
+}
+
+static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
+{
+ struct mmu_update u;
+
+ u.ptr = virt_to_machine(ptr).maddr;
+ u.val = p4d_val_ma(val);
+ xen_extend_mmu_update(&u);
+}
+
+/*
+ * Raw hypercall-based set_p4d, intended for in early boot before
+ * there's a page structure. This implies:
+ * 1. The only existing pagetable is the kernel's
+ * 2. It is always pinned
+ * 3. It has no user pagetable attached to it
+ */
+static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
+{
+ preempt_disable();
+
+ xen_mc_batch();
+
+ __xen_set_p4d_hyper(ptr, val);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+static void xen_set_p4d(p4d_t *ptr, p4d_t val)
+{
+ pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
+ pgd_t pgd_val;
+
+ trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
+
+ /* If page is not pinned, we can just update the entry
+ directly */
+ if (!xen_page_pinned(ptr)) {
+ *ptr = val;
+ if (user_ptr) {
+ WARN_ON(xen_page_pinned(user_ptr));
+ pgd_val.pgd = p4d_val_ma(val);
+ *user_ptr = pgd_val;
+ }
+ return;
+ }
+
+ /* If it's pinned, then we can at least batch the kernel and
+ user updates together. */
+ xen_mc_batch();
+
+ __xen_set_p4d_hyper(ptr, val);
+ if (user_ptr)
+ __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+
+static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
+ int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
+ bool last, unsigned long limit)
+{
+ int i, nr, flush = 0;
+
+ nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
+ for (i = 0; i < nr; i++) {
+ if (!pmd_none(pmd[i]))
+ flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
+ }
+ return flush;
+}
+
+static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
+ int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
+ bool last, unsigned long limit)
+{
+ int i, nr, flush = 0;
+
+ nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
+ for (i = 0; i < nr; i++) {
+ pmd_t *pmd;
+
+ if (pud_none(pud[i]))
+ continue;
+
+ pmd = pmd_offset(&pud[i], 0);
+ if (PTRS_PER_PMD > 1)
+ flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
+ flush |= xen_pmd_walk(mm, pmd, func,
+ last && i == nr - 1, limit);
+ }
+ return flush;
+}
+
+static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
+ int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
+ bool last, unsigned long limit)
+{
+ int i, nr, flush = 0;
+
+ nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
+ for (i = 0; i < nr; i++) {
+ pud_t *pud;
+
+ if (p4d_none(p4d[i]))
+ continue;
+
+ pud = pud_offset(&p4d[i], 0);
+ if (PTRS_PER_PUD > 1)
+ flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
+ flush |= xen_pud_walk(mm, pud, func,
+ last && i == nr - 1, limit);
+ }
+ return flush;
+}
+
+/*
+ * (Yet another) pagetable walker. This one is intended for pinning a
+ * pagetable. This means that it walks a pagetable and calls the
+ * callback function on each page it finds making up the page table,
+ * at every level. It walks the entire pagetable, but it only bothers
+ * pinning pte pages which are below limit. In the normal case this
+ * will be STACK_TOP_MAX, but at boot we need to pin up to
+ * FIXADDR_TOP.
+ *
+ * For 32-bit the important bit is that we don't pin beyond there,
+ * because then we start getting into Xen's ptes.
+ *
+ * For 64-bit, we must skip the Xen hole in the middle of the address
+ * space, just after the big x86-64 virtual hole.
+ */
+static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
+ int (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ unsigned long limit)
+{
+ int i, nr, flush = 0;
+ unsigned hole_low, hole_high;
+
+ /* The limit is the last byte to be touched */
+ limit--;
+ BUG_ON(limit >= FIXADDR_TOP);
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ /*
+ * 64-bit has a great big hole in the middle of the address
+ * space, which contains the Xen mappings. On 32-bit these
+ * will end up making a zero-sized hole and so is a no-op.
+ */
+ hole_low = pgd_index(USER_LIMIT);
+ hole_high = pgd_index(PAGE_OFFSET);
+
+ nr = pgd_index(limit) + 1;
+ for (i = 0; i < nr; i++) {
+ p4d_t *p4d;
+
+ if (i >= hole_low && i < hole_high)
+ continue;
+
+ if (pgd_none(pgd[i]))
+ continue;
+
+ p4d = p4d_offset(&pgd[i], 0);
+ if (PTRS_PER_P4D > 1)
+ flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
+ flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
+ }
+
+ /* Do the top level last, so that the callbacks can use it as
+ a cue to do final things like tlb flushes. */
+ flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
+
+ return flush;
+}
+
+static int xen_pgd_walk(struct mm_struct *mm,
+ int (*func)(struct mm_struct *mm, struct page *,
+ enum pt_level),
+ unsigned long limit)
+{
+ return __xen_pgd_walk(mm, mm->pgd, func, limit);
+}
+
+/* If we're using split pte locks, then take the page's lock and
+ return a pointer to it. Otherwise return NULL. */
+static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
+{
+ spinlock_t *ptl = NULL;
+
+#if USE_SPLIT_PTE_PTLOCKS
+ ptl = ptlock_ptr(page);
+ spin_lock_nest_lock(ptl, &mm->page_table_lock);
+#endif
+
+ return ptl;
+}
+
+static void xen_pte_unlock(void *v)
+{
+ spinlock_t *ptl = v;
+ spin_unlock(ptl);
+}
+
+static void xen_do_pin(unsigned level, unsigned long pfn)
+{
+ struct mmuext_op op;
+
+ op.cmd = level;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+
+ xen_extend_mmuext_op(&op);
+}
+
+static int xen_pin_page(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
+{
+ unsigned pgfl = TestSetPagePinned(page);
+ int flush;
+
+ if (pgfl)
+ flush = 0; /* already pinned */
+ else if (PageHighMem(page))
+ /* kmaps need flushing if we found an unpinned
+ highpage */
+ flush = 1;
+ else {
+ void *pt = lowmem_page_address(page);
+ unsigned long pfn = page_to_pfn(page);
+ struct multicall_space mcs = __xen_mc_entry(0);
+ spinlock_t *ptl;
+
+ flush = 0;
+
+ /*
+ * We need to hold the pagetable lock between the time
+ * we make the pagetable RO and when we actually pin
+ * it. If we don't, then other users may come in and
+ * attempt to update the pagetable by writing it,
+ * which will fail because the memory is RO but not
+ * pinned, so Xen won't do the trap'n'emulate.
+ *
+ * If we're using split pte locks, we can't hold the
+ * entire pagetable's worth of locks during the
+ * traverse, because we may wrap the preempt count (8
+ * bits). The solution is to mark RO and pin each PTE
+ * page while holding the lock. This means the number
+ * of locks we end up holding is never more than a
+ * batch size (~32 entries, at present).
+ *
+ * If we're not using split pte locks, we needn't pin
+ * the PTE pages independently, because we're
+ * protected by the overall pagetable lock.
+ */
+ ptl = NULL;
+ if (level == PT_PTE)
+ ptl = xen_pte_lock(page, mm);
+
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+ pfn_pte(pfn, PAGE_KERNEL_RO),
+ level == PT_PGD ? UVMF_TLB_FLUSH : 0);
+
+ if (ptl) {
+ xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
+
+ /* Queue a deferred unlock for when this batch
+ is completed. */
+ xen_mc_callback(xen_pte_unlock, ptl);
+ }
+ }
+
+ return flush;
+}
+
+/* This is called just after a mm has been created, but it has not
+ been used yet. We need to make sure that its pagetable is all
+ read-only, and can be pinned. */
+static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
+{
+ trace_xen_mmu_pgd_pin(mm, pgd);
+
+ xen_mc_batch();
+
+ if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
+ /* re-enable interrupts for flushing */
+ xen_mc_issue(0);
+
+ kmap_flush_unused();
+
+ xen_mc_batch();
+ }
+
+#ifdef CONFIG_X86_64
+ {
+ pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
+ xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
+
+ if (user_pgd) {
+ xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
+ xen_do_pin(MMUEXT_PIN_L4_TABLE,
+ PFN_DOWN(__pa(user_pgd)));
+ }
+ }
+#else /* CONFIG_X86_32 */
+#ifdef CONFIG_X86_PAE
+ /* Need to make sure unshared kernel PMD is pinnable */
+ xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
+ PT_PMD);
+#endif
+ xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
+#endif /* CONFIG_X86_64 */
+ xen_mc_issue(0);
+}
+
+static void xen_pgd_pin(struct mm_struct *mm)
+{
+ __xen_pgd_pin(mm, mm->pgd);
+}
+
+/*
+ * On save, we need to pin all pagetables to make sure they get their
+ * mfns turned into pfns. Search the list for any unpinned pgds and pin
+ * them (unpinned pgds are not currently in use, probably because the
+ * process is under construction or destruction).
+ *
+ * Expected to be called in stop_machine() ("equivalent to taking
+ * every spinlock in the system"), so the locking doesn't really
+ * matter all that much.
+ */
+void xen_mm_pin_all(void)
+{
+ struct page *page;
+
+ spin_lock(&pgd_lock);
+
+ list_for_each_entry(page, &pgd_list, lru) {
+ if (!PagePinned(page)) {
+ __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
+ SetPageSavePinned(page);
+ }
+ }
+
+ spin_unlock(&pgd_lock);
+}
+
+/*
+ * The init_mm pagetable is really pinned as soon as its created, but
+ * that's before we have page structures to store the bits. So do all
+ * the book-keeping now.
+ */
+static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
+{
+ SetPagePinned(page);
+ return 0;
+}
+
+static void __init xen_mark_init_mm_pinned(void)
+{
+ xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
+}
+
+static int xen_unpin_page(struct mm_struct *mm, struct page *page,
+ enum pt_level level)
+{
+ unsigned pgfl = TestClearPagePinned(page);
+
+ if (pgfl && !PageHighMem(page)) {
+ void *pt = lowmem_page_address(page);
+ unsigned long pfn = page_to_pfn(page);
+ spinlock_t *ptl = NULL;
+ struct multicall_space mcs;
+
+ /*
+ * Do the converse to pin_page. If we're using split
+ * pte locks, we must be holding the lock for while
+ * the pte page is unpinned but still RO to prevent
+ * concurrent updates from seeing it in this
+ * partially-pinned state.
+ */
+ if (level == PT_PTE) {
+ ptl = xen_pte_lock(page, mm);
+
+ if (ptl)
+ xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
+ }
+
+ mcs = __xen_mc_entry(0);
+
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+ pfn_pte(pfn, PAGE_KERNEL),
+ level == PT_PGD ? UVMF_TLB_FLUSH : 0);
+
+ if (ptl) {
+ /* unlock when batch completed */
+ xen_mc_callback(xen_pte_unlock, ptl);
+ }
+ }
+
+ return 0; /* never need to flush on unpin */
+}
+
+/* Release a pagetables pages back as normal RW */
+static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
+{
+ trace_xen_mmu_pgd_unpin(mm, pgd);
+
+ xen_mc_batch();
+
+ xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+#ifdef CONFIG_X86_64
+ {
+ pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
+ if (user_pgd) {
+ xen_do_pin(MMUEXT_UNPIN_TABLE,
+ PFN_DOWN(__pa(user_pgd)));
+ xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
+ }
+ }
+#endif
+
+#ifdef CONFIG_X86_PAE
+ /* Need to make sure unshared kernel PMD is unpinned */
+ xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
+ PT_PMD);
+#endif
+
+ __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
+
+ xen_mc_issue(0);
+}
+
+static void xen_pgd_unpin(struct mm_struct *mm)
+{
+ __xen_pgd_unpin(mm, mm->pgd);
+}
+
+/*
+ * On resume, undo any pinning done at save, so that the rest of the
+ * kernel doesn't see any unexpected pinned pagetables.
+ */
+void xen_mm_unpin_all(void)
+{
+ struct page *page;
+
+ spin_lock(&pgd_lock);
+
+ list_for_each_entry(page, &pgd_list, lru) {
+ if (PageSavePinned(page)) {
+ BUG_ON(!PagePinned(page));
+ __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
+ ClearPageSavePinned(page);
+ }
+ }
+
+ spin_unlock(&pgd_lock);
+}
+
+static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ spin_lock(&next->page_table_lock);
+ xen_pgd_pin(next);
+ spin_unlock(&next->page_table_lock);
+}
+
+static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ spin_lock(&mm->page_table_lock);
+ xen_pgd_pin(mm);
+ spin_unlock(&mm->page_table_lock);
+}
+
+
+#ifdef CONFIG_SMP
+/* Another cpu may still have their %cr3 pointing at the pagetable, so
+ we need to repoint it somewhere else before we can unpin it. */
+static void drop_other_mm_ref(void *info)
+{
+ struct mm_struct *mm = info;
+ struct mm_struct *active_mm;
+
+ active_mm = this_cpu_read(cpu_tlbstate.active_mm);
+
+ if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+ leave_mm(smp_processor_id());
+
+ /* If this cpu still has a stale cr3 reference, then make sure
+ it has been flushed. */
+ if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
+ load_cr3(swapper_pg_dir);
+}
+
+static void xen_drop_mm_ref(struct mm_struct *mm)
+{
+ cpumask_var_t mask;
+ unsigned cpu;
+
+ if (current->active_mm == mm) {
+ if (current->mm == mm)
+ load_cr3(swapper_pg_dir);
+ else
+ leave_mm(smp_processor_id());
+ }
+
+ /* Get the "official" set of cpus referring to our pagetable. */
+ if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
+ && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
+ continue;
+ smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
+ }
+ return;
+ }
+ cpumask_copy(mask, mm_cpumask(mm));
+
+ /* It's possible that a vcpu may have a stale reference to our
+ cr3, because its in lazy mode, and it hasn't yet flushed
+ its set of pending hypercalls yet. In this case, we can
+ look at its actual current cr3 value, and force it to flush
+ if needed. */
+ for_each_online_cpu(cpu) {
+ if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
+ cpumask_set_cpu(cpu, mask);
+ }
+
+ if (!cpumask_empty(mask))
+ smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
+ free_cpumask_var(mask);
+}
+#else
+static void xen_drop_mm_ref(struct mm_struct *mm)
+{
+ if (current->active_mm == mm)
+ load_cr3(swapper_pg_dir);
+}
+#endif
+
+/*
+ * While a process runs, Xen pins its pagetables, which means that the
+ * hypervisor forces it to be read-only, and it controls all updates
+ * to it. This means that all pagetable updates have to go via the
+ * hypervisor, which is moderately expensive.
+ *
+ * Since we're pulling the pagetable down, we switch to use init_mm,
+ * unpin old process pagetable and mark it all read-write, which
+ * allows further operations on it to be simple memory accesses.
+ *
+ * The only subtle point is that another CPU may be still using the
+ * pagetable because of lazy tlb flushing. This means we need need to
+ * switch all CPUs off this pagetable before we can unpin it.
+ */
+static void xen_exit_mmap(struct mm_struct *mm)
+{
+ get_cpu(); /* make sure we don't move around */
+ xen_drop_mm_ref(mm);
+ put_cpu();
+
+ spin_lock(&mm->page_table_lock);
+
+ /* pgd may not be pinned in the error exit path of execve */
+ if (xen_page_pinned(mm->pgd))
+ xen_pgd_unpin(mm);
+
+ spin_unlock(&mm->page_table_lock);
+}
+
+static void xen_post_allocator_init(void);
+
+static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+ struct mmuext_op op;
+
+ op.cmd = cmd;
+ op.arg1.mfn = pfn_to_mfn(pfn);
+ if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+ BUG();
+}
+
+#ifdef CONFIG_X86_64
+static void __init xen_cleanhighmap(unsigned long vaddr,
+ unsigned long vaddr_end)
+{
+ unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
+ pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
+
+ /* NOTE: The loop is more greedy than the cleanup_highmap variant.
+ * We include the PMD passed in on _both_ boundaries. */
+ for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
+ pmd++, vaddr += PMD_SIZE) {
+ if (pmd_none(*pmd))
+ continue;
+ if (vaddr < (unsigned long) _text || vaddr > kernel_end)
+ set_pmd(pmd, __pmd(0));
+ }
+ /* In case we did something silly, we should crash in this function
+ * instead of somewhere later and be confusing. */
+ xen_mc_flush();
+}
+
+/*
+ * Make a page range writeable and free it.
+ */
+static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
+{
+ void *vaddr = __va(paddr);
+ void *vaddr_end = vaddr + size;
+
+ for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
+ make_lowmem_page_readwrite(vaddr);
+
+ memblock_free(paddr, size);
+}
+
+static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
+{
+ unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
+
+ if (unpin)
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
+ ClearPagePinned(virt_to_page(__va(pa)));
+ xen_free_ro_pages(pa, PAGE_SIZE);
+}
+
+static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
+{
+ unsigned long pa;
+ pte_t *pte_tbl;
+ int i;
+
+ if (pmd_large(*pmd)) {
+ pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
+ xen_free_ro_pages(pa, PMD_SIZE);
+ return;
+ }
+
+ pte_tbl = pte_offset_kernel(pmd, 0);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ if (pte_none(pte_tbl[i]))
+ continue;
+ pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
+ xen_free_ro_pages(pa, PAGE_SIZE);
+ }
+ set_pmd(pmd, __pmd(0));
+ xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
+}
+
+static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
+{
+ unsigned long pa;
+ pmd_t *pmd_tbl;
+ int i;
+
+ if (pud_large(*pud)) {
+ pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
+ xen_free_ro_pages(pa, PUD_SIZE);
+ return;
+ }
+
+ pmd_tbl = pmd_offset(pud, 0);
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (pmd_none(pmd_tbl[i]))
+ continue;
+ xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
+ }
+ set_pud(pud, __pud(0));
+ xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
+}
+
+static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
+{
+ unsigned long pa;
+ pud_t *pud_tbl;
+ int i;
+
+ if (p4d_large(*p4d)) {
+ pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
+ xen_free_ro_pages(pa, P4D_SIZE);
+ return;
+ }
+
+ pud_tbl = pud_offset(p4d, 0);
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ if (pud_none(pud_tbl[i]))
+ continue;
+ xen_cleanmfnmap_pud(pud_tbl + i, unpin);
+ }
+ set_p4d(p4d, __p4d(0));
+ xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
+}
+
+/*
+ * Since it is well isolated we can (and since it is perhaps large we should)
+ * also free the page tables mapping the initial P->M table.
+ */
+static void __init xen_cleanmfnmap(unsigned long vaddr)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ unsigned int i;
+ bool unpin;
+
+ unpin = (vaddr == 2 * PGDIR_SIZE);
+ vaddr &= PMD_MASK;
+ pgd = pgd_offset_k(vaddr);
+ p4d = p4d_offset(pgd, 0);
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ if (p4d_none(p4d[i]))
+ continue;
+ xen_cleanmfnmap_p4d(p4d + i, unpin);
+ }
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ set_pgd(pgd, __pgd(0));
+ xen_cleanmfnmap_free_pgtbl(p4d, unpin);
+ }
+}
+
+static void __init xen_pagetable_p2m_free(void)
+{
+ unsigned long size;
+ unsigned long addr;
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+
+ /* No memory or already called. */
+ if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
+ return;
+
+ /* using __ka address and sticking INVALID_P2M_ENTRY! */
+ memset((void *)xen_start_info->mfn_list, 0xff, size);
+
+ addr = xen_start_info->mfn_list;
+ /*
+ * We could be in __ka space.
+ * We roundup to the PMD, which means that if anybody at this stage is
+ * using the __ka address of xen_start_info or
+ * xen_start_info->shared_info they are in going to crash. Fortunatly
+ * we have already revectored in xen_setup_kernel_pagetable and in
+ * xen_setup_shared_info.
+ */
+ size = roundup(size, PMD_SIZE);
+
+ if (addr >= __START_KERNEL_map) {
+ xen_cleanhighmap(addr, addr + size);
+ size = PAGE_ALIGN(xen_start_info->nr_pages *
+ sizeof(unsigned long));
+ memblock_free(__pa(addr), size);
+ } else {
+ xen_cleanmfnmap(addr);
+ }
+}
+
+static void __init xen_pagetable_cleanhighmap(void)
+{
+ unsigned long size;
+ unsigned long addr;
+
+ /* At this stage, cleanup_highmap has already cleaned __ka space
+ * from _brk_limit way up to the max_pfn_mapped (which is the end of
+ * the ramdisk). We continue on, erasing PMD entries that point to page
+ * tables - do note that they are accessible at this stage via __va.
+ * For good measure we also round up to the PMD - which means that if
+ * anybody is using __ka address to the initial boot-stack - and try
+ * to use it - they are going to crash. The xen_start_info has been
+ * taken care of already in xen_setup_kernel_pagetable. */
+ addr = xen_start_info->pt_base;
+ size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
+
+ xen_cleanhighmap(addr, addr + size);
+ xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
+#ifdef DEBUG
+ /* This is superfluous and is not necessary, but you know what
+ * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
+ * anything at this stage. */
+ xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
+#endif
+}
+#endif
+
+static void __init xen_pagetable_p2m_setup(void)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ xen_vmalloc_p2m_tree();
+
+#ifdef CONFIG_X86_64
+ xen_pagetable_p2m_free();
+
+ xen_pagetable_cleanhighmap();
+#endif
+ /* And revector! Bye bye old array */
+ xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
+}
+
+static void __init xen_pagetable_init(void)
+{
+ paging_init();
+ xen_post_allocator_init();
+
+ xen_pagetable_p2m_setup();
+
+ /* Allocate and initialize top and mid mfn levels for p2m structure */
+ xen_build_mfn_list_list();
+
+ /* Remap memory freed due to conflicts with E820 map */
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ xen_remap_memory();
+
+ xen_setup_shared_info();
+}
+static void xen_write_cr2(unsigned long cr2)
+{
+ this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
+}
+
+static unsigned long xen_read_cr2(void)
+{
+ return this_cpu_read(xen_vcpu)->arch.cr2;
+}
+
+unsigned long xen_read_cr2_direct(void)
+{
+ return this_cpu_read(xen_vcpu_info.arch.cr2);
+}
+
+static void xen_flush_tlb(void)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb(0);
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+static void xen_flush_tlb_single(unsigned long addr)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb_single(addr);
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+ op = mcs.args;
+ op->cmd = MMUEXT_INVLPG_LOCAL;
+ op->arg1.linear_addr = addr & PAGE_MASK;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+static void xen_flush_tlb_others(const struct cpumask *cpus,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ struct {
+ struct mmuext_op op;
+#ifdef CONFIG_SMP
+ DECLARE_BITMAP(mask, num_processors);
+#else
+ DECLARE_BITMAP(mask, NR_CPUS);
+#endif
+ } *args;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
+
+ if (cpumask_empty(cpus))
+ return; /* nothing to do */
+
+ mcs = xen_mc_entry(sizeof(*args));
+ args = mcs.args;
+ args->op.arg2.vcpumask = to_cpumask(args->mask);
+
+ /* Remove us, and any offline CPUS. */
+ cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
+
+ args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+ if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+ args->op.cmd = MMUEXT_INVLPG_MULTI;
+ args->op.arg1.linear_addr = start;
+ }
+
+ MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static unsigned long xen_read_cr3(void)
+{
+ return this_cpu_read(xen_cr3);
+}
+
+static void set_current_cr3(void *v)
+{
+ this_cpu_write(xen_current_cr3, (unsigned long)v);
+}
+
+static void __xen_write_cr3(bool kernel, unsigned long cr3)
+{
+ struct mmuext_op op;
+ unsigned long mfn;
+
+ trace_xen_mmu_write_cr3(kernel, cr3);
+
+ if (cr3)
+ mfn = pfn_to_mfn(PFN_DOWN(cr3));
+ else
+ mfn = 0;
+
+ WARN_ON(mfn == 0 && kernel);
+
+ op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
+ op.arg1.mfn = mfn;
+
+ xen_extend_mmuext_op(&op);
+
+ if (kernel) {
+ this_cpu_write(xen_cr3, cr3);
+
+ /* Update xen_current_cr3 once the batch has actually
+ been submitted. */
+ xen_mc_callback(set_current_cr3, (void *)cr3);
+ }
+}
+static void xen_write_cr3(unsigned long cr3)
+{
+ BUG_ON(preemptible());
+
+ xen_mc_batch(); /* disables interrupts */
+
+ /* Update while interrupts are disabled, so its atomic with
+ respect to ipis */
+ this_cpu_write(xen_cr3, cr3);
+
+ __xen_write_cr3(true, cr3);
+
+#ifdef CONFIG_X86_64
+ {
+ pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
+ if (user_pgd)
+ __xen_write_cr3(false, __pa(user_pgd));
+ else
+ __xen_write_cr3(false, 0);
+ }
+#endif
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
+}
+
+#ifdef CONFIG_X86_64
+/*
+ * At the start of the day - when Xen launches a guest, it has already
+ * built pagetables for the guest. We diligently look over them
+ * in xen_setup_kernel_pagetable and graft as appropriate them in the
+ * init_level4_pgt and its friends. Then when we are happy we load
+ * the new init_level4_pgt - and continue on.
+ *
+ * The generic code starts (start_kernel) and 'init_mem_mapping' sets
+ * up the rest of the pagetables. When it has completed it loads the cr3.
+ * N.B. that baremetal would start at 'start_kernel' (and the early
+ * #PF handler would create bootstrap pagetables) - so we are running
+ * with the same assumptions as what to do when write_cr3 is executed
+ * at this point.
+ *
+ * Since there are no user-page tables at all, we have two variants
+ * of xen_write_cr3 - the early bootup (this one), and the late one
+ * (xen_write_cr3). The reason we have to do that is that in 64-bit
+ * the Linux kernel and user-space are both in ring 3 while the
+ * hypervisor is in ring 0.
+ */
+static void __init xen_write_cr3_init(unsigned long cr3)
+{
+ BUG_ON(preemptible());
+
+ xen_mc_batch(); /* disables interrupts */
+
+ /* Update while interrupts are disabled, so its atomic with
+ respect to ipis */
+ this_cpu_write(xen_cr3, cr3);
+
+ __xen_write_cr3(true, cr3);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
+}
+#endif
+
+static int xen_pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd = mm->pgd;
+ int ret = 0;
+
+ BUG_ON(PagePinned(virt_to_page(pgd)));
+
+#ifdef CONFIG_X86_64
+ {
+ struct page *page = virt_to_page(pgd);
+ pgd_t *user_pgd;
+
+ BUG_ON(page->private != 0);
+
+ ret = -ENOMEM;
+
+ user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ page->private = (unsigned long)user_pgd;
+
+ if (user_pgd != NULL) {
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+ user_pgd[pgd_index(VSYSCALL_ADDR)] =
+ __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
+#endif
+ ret = 0;
+ }
+
+ BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
+ }
+#endif
+ return ret;
+}
+
+static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_X86_64
+ pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
+ if (user_pgd)
+ free_page((unsigned long)user_pgd);
+#endif
+}
+
+/*
+ * Init-time set_pte while constructing initial pagetables, which
+ * doesn't allow RO page table pages to be remapped RW.
+ *
+ * If there is no MFN for this PFN then this page is initially
+ * ballooned out so clear the PTE (as in decrease_reservation() in
+ * drivers/xen/balloon.c).
+ *
+ * Many of these PTE updates are done on unpinned and writable pages
+ * and doing a hypercall for these is unnecessary and expensive. At
+ * this point it is not possible to tell if a page is pinned or not,
+ * so always write the PTE directly and rely on Xen trapping and
+ * emulating any updates as necessary.
+ */
+__visible pte_t xen_make_pte_init(pteval_t pte)
+{
+#ifdef CONFIG_X86_64
+ unsigned long pfn;
+
+ /*
+ * Pages belonging to the initial p2m list mapped outside the default
+ * address range must be mapped read-only. This region contains the
+ * page tables for mapping the p2m list, too, and page tables MUST be
+ * mapped read-only.
+ */
+ pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+ if (xen_start_info->mfn_list < __START_KERNEL_map &&
+ pfn >= xen_start_info->first_p2m_pfn &&
+ pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+ pte &= ~_PAGE_RW;
+#endif
+ pte = pte_pfn_to_mfn(pte);
+ return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
+
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_mfn(pte) != INVALID_P2M_ENTRY
+ && pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+#endif
+ native_set_pte(ptep, pte);
+}
+
+/* Early in boot, while setting up the initial pagetable, assume
+ everything is pinned. */
+static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
+{
+#ifdef CONFIG_FLATMEM
+ BUG_ON(mem_map); /* should only be used early */
+#endif
+ make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+}
+
+/* Used for pmd and pud */
+static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
+{
+#ifdef CONFIG_FLATMEM
+ BUG_ON(mem_map); /* should only be used early */
+#endif
+ make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+}
+
+/* Early release_pte assumes that all pts are pinned, since there's
+ only init_mm and anything attached to that is pinned. */
+static void __init xen_release_pte_init(unsigned long pfn)
+{
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
+static void __init xen_release_pmd_init(unsigned long pfn)
+{
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
+static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *op;
+
+ mcs = __xen_mc_entry(sizeof(*op));
+ op = mcs.args;
+ op->cmd = cmd;
+ op->arg1.mfn = pfn_to_mfn(pfn);
+
+ MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
+}
+
+static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+ struct multicall_space mcs;
+ unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
+
+ mcs = __xen_mc_entry(0);
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
+ pfn_pte(pfn, prot), 0);
+}
+
+/* This needs to make sure the new pte page is pinned iff its being
+ attached to a pinned pagetable. */
+static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
+ unsigned level)
+{
+ bool pinned = PagePinned(virt_to_page(mm->pgd));
+
+ trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
+
+ if (pinned) {
+ struct page *page = pfn_to_page(pfn);
+
+ SetPagePinned(page);
+
+ if (!PageHighMem(page)) {
+ xen_mc_batch();
+
+ __set_pfn_prot(pfn, PAGE_KERNEL_RO);
+
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ } else {
+ /* make sure there are no stray mappings of
+ this page */
+ kmap_flush_unused();
+ }
+ }
+}
+
+static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
+{
+ xen_alloc_ptpage(mm, pfn, PT_PTE);
+}
+
+static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
+{
+ xen_alloc_ptpage(mm, pfn, PT_PMD);
+}
+
+/* This should never happen until we're OK to use struct page */
+static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
+{
+ struct page *page = pfn_to_page(pfn);
+ bool pinned = PagePinned(page);
+
+ trace_xen_mmu_release_ptpage(pfn, level, pinned);
+
+ if (pinned) {
+ if (!PageHighMem(page)) {
+ xen_mc_batch();
+
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
+ __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+
+ __set_pfn_prot(pfn, PAGE_KERNEL);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ }
+ ClearPagePinned(page);
+ }
+}
+
+static void xen_release_pte(unsigned long pfn)
+{
+ xen_release_ptpage(pfn, PT_PTE);
+}
+
+static void xen_release_pmd(unsigned long pfn)
+{
+ xen_release_ptpage(pfn, PT_PMD);
+}
+
+#if CONFIG_PGTABLE_LEVELS >= 4
+static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
+{
+ xen_alloc_ptpage(mm, pfn, PT_PUD);
+}
+
+static void xen_release_pud(unsigned long pfn)
+{
+ xen_release_ptpage(pfn, PT_PUD);
+}
+#endif
+
+void __init xen_reserve_top(void)
+{
+#ifdef CONFIG_X86_32
+ unsigned long top = HYPERVISOR_VIRT_START;
+ struct xen_platform_parameters pp;
+
+ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
+ top = pp.virt_start;
+
+ reserve_top_address(-top);
+#endif /* CONFIG_X86_32 */
+}
+
+/*
+ * Like __va(), but returns address in the kernel mapping (which is
+ * all we have until the physical memory mapping has been set up.
+ */
+static void * __init __ka(phys_addr_t paddr)
+{
+#ifdef CONFIG_X86_64
+ return (void *)(paddr + __START_KERNEL_map);
+#else
+ return __va(paddr);
+#endif
+}
+
+/* Convert a machine address to physical address */
+static unsigned long __init m2p(phys_addr_t maddr)
+{
+ phys_addr_t paddr;
+
+ maddr &= PTE_PFN_MASK;
+ paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
+
+ return paddr;
+}
+
+/* Convert a machine address to kernel virtual */
+static void * __init m2v(phys_addr_t maddr)
+{
+ return __ka(m2p(maddr));
+}
+
+/* Set the page permissions on an identity-mapped pages */
+static void __init set_page_prot_flags(void *addr, pgprot_t prot,
+ unsigned long flags)
+{
+ unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
+ pte_t pte = pfn_pte(pfn, prot);
+
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
+ BUG();
+}
+static void __init set_page_prot(void *addr, pgprot_t prot)
+{
+ return set_page_prot_flags(addr, prot, UVMF_NONE);
+}
+#ifdef CONFIG_X86_32
+static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+{
+ unsigned pmdidx, pteidx;
+ unsigned ident_pte;
+ unsigned long pfn;
+
+ level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+ PAGE_SIZE);
+
+ ident_pte = 0;
+ pfn = 0;
+ for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+ pte_t *pte_page;
+
+ /* Reuse or allocate a page of ptes */
+ if (pmd_present(pmd[pmdidx]))
+ pte_page = m2v(pmd[pmdidx].pmd);
+ else {
+ /* Check for free pte pages */
+ if (ident_pte == LEVEL1_IDENT_ENTRIES)
+ break;
+
+ pte_page = &level1_ident_pgt[ident_pte];
+ ident_pte += PTRS_PER_PTE;
+
+ pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
+ }
+
+ /* Install mappings */
+ for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+ pte_t pte;
+
+ if (pfn > max_pfn_mapped)
+ max_pfn_mapped = pfn;
+
+ if (!pte_none(pte_page[pteidx]))
+ continue;
+
+ pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
+ pte_page[pteidx] = pte;
+ }
+ }
+
+ for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+ set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
+
+ set_page_prot(pmd, PAGE_KERNEL_RO);
+}
+#endif
+void __init xen_setup_machphys_mapping(void)
+{
+ struct xen_machphys_mapping mapping;
+
+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
+ machine_to_phys_nr = mapping.max_mfn + 1;
+ } else {
+ machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
+ }
+#ifdef CONFIG_X86_32
+ WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
+ < machine_to_phys_mapping);
+#endif
+}
+
+#ifdef CONFIG_X86_64
+static void __init convert_pfn_mfn(void *v)
+{
+ pte_t *pte = v;
+ int i;
+
+ /* All levels are converted the same way, so just treat them
+ as ptes. */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ pte[i] = xen_make_pte(pte[i].pte);
+}
+static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
+ unsigned long addr)
+{
+ if (*pt_base == PFN_DOWN(__pa(addr))) {
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ clear_page((void *)addr);
+ (*pt_base)++;
+ }
+ if (*pt_end == PFN_DOWN(__pa(addr))) {
+ set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ clear_page((void *)addr);
+ (*pt_end)--;
+ }
+}
+/*
+ * Set up the initial kernel pagetable.
+ *
+ * We can construct this by grafting the Xen provided pagetable into
+ * head_64.S's preconstructed pagetables. We copy the Xen L2's into
+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
+ * kernel has a physical mapping to start with - but that's enough to
+ * get __va working. We need to fill in the rest of the physical
+ * mapping once some sort of allocator has been set up.
+ */
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+{
+ pud_t *l3;
+ pmd_t *l2;
+ unsigned long addr[3];
+ unsigned long pt_base, pt_end;
+ unsigned i;
+
+ /* max_pfn_mapped is the last pfn mapped in the initial memory
+ * mappings. Considering that on Xen after the kernel mappings we
+ * have the mappings of some pages that don't exist in pfn space, we
+ * set max_pfn_mapped to the last real pfn mapped. */
+ if (xen_start_info->mfn_list < __START_KERNEL_map)
+ max_pfn_mapped = xen_start_info->first_p2m_pfn;
+ else
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
+
+ pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
+ pt_end = pt_base + xen_start_info->nr_pt_frames;
+
+ /* Zap identity mapping */
+ init_level4_pgt[0] = __pgd(0);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Pre-constructed entries are in pfn, so convert to mfn */
+ /* L4[272] -> level3_ident_pgt
+ * L4[511] -> level3_kernel_pgt */
+ convert_pfn_mfn(init_level4_pgt);
+
+ /* L3_i[0] -> level2_ident_pgt */
+ convert_pfn_mfn(level3_ident_pgt);
+ /* L3_k[510] -> level2_kernel_pgt
+ * L3_k[511] -> level2_fixmap_pgt */
+ convert_pfn_mfn(level3_kernel_pgt);
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
+ }
+ /* We get [511][511] and have Xen's version of level2_kernel_pgt */
+ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+ l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+
+ addr[0] = (unsigned long)pgd;
+ addr[1] = (unsigned long)l3;
+ addr[2] = (unsigned long)l2;
+ /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
+ * Both L4[272][0] and L4[511][510] have entries that point to the same
+ * L2 (PMD) tables. Meaning that if you modify it in __va space
+ * it will be also modified in the __ka space! (But if you just
+ * modify the PMD table to point to other PTE's or none, then you
+ * are OK - which is what cleanup_highmap does) */
+ copy_page(level2_ident_pgt, l2);
+ /* Graft it onto L4[511][510] */
+ copy_page(level2_kernel_pgt, l2);
+
+ /* Copy the initial P->M table mappings if necessary. */
+ i = pgd_index(xen_start_info->mfn_list);
+ if (i && i < pgd_index(__START_KERNEL_map))
+ init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Make pagetable pieces RO */
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+ set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+ PFN_DOWN(__pa_symbol(init_level4_pgt)));
+
+ /* Unpin Xen-provided one */
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+ /*
+ * At this stage there can be no user pgd, and no page
+ * structure to attach it to, so make sure we just set kernel
+ * pgd.
+ */
+ xen_mc_batch();
+ __xen_write_cr3(true, __pa(init_level4_pgt));
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+ } else
+ native_write_cr3(__pa(init_level4_pgt));
+
+ /* We can't that easily rip out L3 and L2, as the Xen pagetables are
+ * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
+ * the initial domain. For guests using the toolstack, they are in:
+ * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
+ * rip out the [L4] (pgd), but for guests we shave off three pages.
+ */
+ for (i = 0; i < ARRAY_SIZE(addr); i++)
+ check_pt_base(&pt_base, &pt_end, addr[i]);
+
+ /* Our (by three pages) smaller Xen pagetable that we are using */
+ xen_pt_base = PFN_PHYS(pt_base);
+ xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
+ memblock_reserve(xen_pt_base, xen_pt_size);
+
+ /* Revector the xen_start_info */
+ xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
+}
+
+/*
+ * Read a value from a physical address.
+ */
+static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
+{
+ unsigned long *vaddr;
+ unsigned long val;
+
+ vaddr = early_memremap_ro(addr, sizeof(val));
+ val = *vaddr;
+ early_memunmap(vaddr, sizeof(val));
+ return val;
+}
+
+/*
+ * Translate a virtual address to a physical one without relying on mapped
+ * page tables.
+ */
+static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
+{
+ phys_addr_t pa;
+ pgd_t pgd;
+ pud_t pud;
+ pmd_t pmd;
+ pte_t pte;
+
+ pa = read_cr3();
+ pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
+ sizeof(pgd)));
+ if (!pgd_present(pgd))
+ return 0;
+
+ pa = pgd_val(pgd) & PTE_PFN_MASK;
+ pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
+ sizeof(pud)));
+ if (!pud_present(pud))
+ return 0;
+ pa = pud_pfn(pud) << PAGE_SHIFT;
+ if (pud_large(pud))
+ return pa + (vaddr & ~PUD_MASK);
+
+ pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
+ sizeof(pmd)));
+ if (!pmd_present(pmd))
+ return 0;
+ pa = pmd_pfn(pmd) << PAGE_SHIFT;
+ if (pmd_large(pmd))
+ return pa + (vaddr & ~PMD_MASK);
+
+ pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
+ sizeof(pte)));
+ if (!pte_present(pte))
+ return 0;
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+
+ return pa | (vaddr & ~PAGE_MASK);
+}
+
+/*
+ * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
+ * this area.
+ */
+void __init xen_relocate_p2m(void)
+{
+ phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
+ unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
+ int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
+ pte_t *pt;
+ pmd_t *pmd;
+ pud_t *pud;
+ p4d_t *p4d = NULL;
+ pgd_t *pgd;
+ unsigned long *new_p2m;
+ int save_pud;
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+ n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
+ n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
+ n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
+ n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
+ if (PTRS_PER_P4D > 1)
+ n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
+ else
+ n_p4d = 0;
+ n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
+
+ new_area = xen_find_free_area(PFN_PHYS(n_frames));
+ if (!new_area) {
+ xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
+ BUG();
+ }
+
+ /*
+ * Setup the page tables for addressing the new p2m list.
+ * We have asked the hypervisor to map the p2m list at the user address
+ * PUD_SIZE. It may have done so, or it may have used a kernel space
+ * address depending on the Xen version.
+ * To avoid any possible virtual address collision, just use
+ * 2 * PUD_SIZE for the new area.
+ */
+ p4d_phys = new_area;
+ pud_phys = p4d_phys + PFN_PHYS(n_p4d);
+ pmd_phys = pud_phys + PFN_PHYS(n_pud);
+ pt_phys = pmd_phys + PFN_PHYS(n_pmd);
+ p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
+
+ pgd = __va(read_cr3());
+ new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
+ idx_p4d = 0;
+ save_pud = n_pud;
+ do {
+ if (n_p4d > 0) {
+ p4d = early_memremap(p4d_phys, PAGE_SIZE);
+ clear_page(p4d);
+ n_pud = min(save_pud, PTRS_PER_P4D);
+ }
+ for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
+ pud = early_memremap(pud_phys, PAGE_SIZE);
+ clear_page(pud);
+ for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
+ idx_pmd++) {
+ pmd = early_memremap(pmd_phys, PAGE_SIZE);
+ clear_page(pmd);
+ for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
+ idx_pt++) {
+ pt = early_memremap(pt_phys, PAGE_SIZE);
+ clear_page(pt);
+ for (idx_pte = 0;
+ idx_pte < min(n_pte, PTRS_PER_PTE);
+ idx_pte++) {
+ set_pte(pt + idx_pte,
+ pfn_pte(p2m_pfn, PAGE_KERNEL));
+ p2m_pfn++;
+ }
+ n_pte -= PTRS_PER_PTE;
+ early_memunmap(pt, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pt_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
+ PFN_DOWN(pt_phys));
+ set_pmd(pmd + idx_pt,
+ __pmd(_PAGE_TABLE | pt_phys));
+ pt_phys += PAGE_SIZE;
+ }
+ n_pt -= PTRS_PER_PMD;
+ early_memunmap(pmd, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pmd_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
+ PFN_DOWN(pmd_phys));
+ set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
+ pmd_phys += PAGE_SIZE;
+ }
+ n_pmd -= PTRS_PER_PUD;
+ early_memunmap(pud, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pud_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
+ if (n_p4d > 0)
+ set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
+ else
+ set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
+ pud_phys += PAGE_SIZE;
+ }
+ if (n_p4d > 0) {
+ save_pud -= PTRS_PER_P4D;
+ early_memunmap(p4d, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(p4d_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
+ set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
+ p4d_phys += PAGE_SIZE;
+ }
+ } while (++idx_p4d < n_p4d);
+
+ /* Now copy the old p2m info to the new area. */
+ memcpy(new_p2m, xen_p2m_addr, size);
+ xen_p2m_addr = new_p2m;
+
+ /* Release the old p2m list and set new list info. */
+ p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
+ BUG_ON(!p2m_pfn);
+ p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
+
+ if (xen_start_info->mfn_list < __START_KERNEL_map) {
+ pfn = xen_start_info->first_p2m_pfn;
+ pfn_end = xen_start_info->first_p2m_pfn +
+ xen_start_info->nr_p2m_frames;
+ set_pgd(pgd + 1, __pgd(0));
+ } else {
+ pfn = p2m_pfn;
+ pfn_end = p2m_pfn_end;
+ }
+
+ memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
+ while (pfn < pfn_end) {
+ if (pfn == p2m_pfn) {
+ pfn = p2m_pfn_end;
+ continue;
+ }
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+ pfn++;
+ }
+
+ xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
+ xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
+ xen_start_info->nr_p2m_frames = n_frames;
+}
+
+#else /* !CONFIG_X86_64 */
+static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
+static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
+
+static void __init xen_write_cr3_init(unsigned long cr3)
+{
+ unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
+
+ BUG_ON(read_cr3() != __pa(initial_page_table));
+ BUG_ON(cr3 != __pa(swapper_pg_dir));
+
+ /*
+ * We are switching to swapper_pg_dir for the first time (from
+ * initial_page_table) and therefore need to mark that page
+ * read-only and then pin it.
+ *
+ * Xen disallows sharing of kernel PMDs for PAE
+ * guests. Therefore we must copy the kernel PMD from
+ * initial_page_table into a new kernel PMD to be used in
+ * swapper_pg_dir.
+ */
+ swapper_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+ copy_page(swapper_kernel_pmd, initial_kernel_pmd);
+ swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
+ set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
+
+ set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+ xen_write_cr3(cr3);
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
+
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ set_page_prot(initial_page_table, PAGE_KERNEL);
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
+
+ pv_mmu_ops.write_cr3 = &xen_write_cr3;
+}
+
+/*
+ * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
+ * not the first page table in the page table pool.
+ * Iterate through the initial page tables to find the real page table base.
+ */
+static phys_addr_t xen_find_pt_base(pmd_t *pmd)
+{
+ phys_addr_t pt_base, paddr;
+ unsigned pmdidx;
+
+ pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
+
+ for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
+ if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
+ paddr = m2p(pmd[pmdidx].pmd);
+ pt_base = min(pt_base, paddr);
+ }
+
+ return pt_base;
+}
+
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+{
+ pmd_t *kernel_pmd;
+
+ kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+
+ xen_pt_base = xen_find_pt_base(kernel_pmd);
+ xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
+
+ initial_kernel_pmd =
+ extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+
+ max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
+
+ copy_page(initial_kernel_pmd, kernel_pmd);
+
+ xen_map_identity_early(initial_kernel_pmd, max_pfn);
+
+ copy_page(initial_page_table, pgd);
+ initial_page_table[KERNEL_PGD_BOUNDARY] =
+ __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
+
+ set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
+ set_page_prot(initial_page_table, PAGE_KERNEL_RO);
+ set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
+
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
+ PFN_DOWN(__pa(initial_page_table)));
+ xen_write_cr3(__pa(initial_page_table));
+
+ memblock_reserve(xen_pt_base, xen_pt_size);
+}
+#endif /* CONFIG_X86_64 */
+
+void __init xen_reserve_special_pages(void)
+{
+ phys_addr_t paddr;
+
+ memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
+ if (xen_start_info->store_mfn) {
+ paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
+ memblock_reserve(paddr, PAGE_SIZE);
+ }
+ if (!xen_initial_domain()) {
+ paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
+ memblock_reserve(paddr, PAGE_SIZE);
+ }
+}
+
+void __init xen_pt_check_e820(void)
+{
+ if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
+ xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
+ BUG();
+ }
+}
+
+static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
+
+static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
+{
+ pte_t pte;
+
+ phys >>= PAGE_SHIFT;
+
+ switch (idx) {
+ case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
+ case FIX_RO_IDT:
+#ifdef CONFIG_X86_32
+ case FIX_WP_TEST:
+# ifdef CONFIG_HIGHMEM
+ case FIX_KMAP_BEGIN ... FIX_KMAP_END:
+# endif
+#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
+ case VSYSCALL_PAGE:
+#endif
+ case FIX_TEXT_POKE0:
+ case FIX_TEXT_POKE1:
+ case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
+ /* All local page mappings */
+ pte = pfn_pte(phys, prot);
+ break;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ case FIX_APIC_BASE: /* maps dummy local APIC */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
+ /*
+ * We just don't map the IO APIC - all access is via
+ * hypercalls. Keep the address in the pte for reference.
+ */
+ pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
+ break;
+#endif
+
+ case FIX_PARAVIRT_BOOTMAP:
+ /* This is an MFN, but it isn't an IO mapping from the
+ IO domain */
+ pte = mfn_pte(phys, prot);
+ break;
+
+ default:
+ /* By default, set_fixmap is used for hardware mappings */
+ pte = mfn_pte(phys, prot);
+ break;
+ }
+
+ __native_set_fixmap(idx, pte);
+
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+ /* Replicate changes to map the vsyscall page into the user
+ pagetable vsyscall mapping. */
+ if (idx == VSYSCALL_PAGE) {
+ unsigned long vaddr = __fix_to_virt(idx);
+ set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
+ }
+#endif
+}
+
+static void __init xen_post_allocator_init(void)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ pv_mmu_ops.set_pte = xen_set_pte;
+ pv_mmu_ops.set_pmd = xen_set_pmd;
+ pv_mmu_ops.set_pud = xen_set_pud;
+#if CONFIG_PGTABLE_LEVELS >= 4
+ pv_mmu_ops.set_p4d = xen_set_p4d;
+#endif
+
+ /* This will work as long as patching hasn't happened yet
+ (which it hasn't) */
+ pv_mmu_ops.alloc_pte = xen_alloc_pte;
+ pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+ pv_mmu_ops.release_pte = xen_release_pte;
+ pv_mmu_ops.release_pmd = xen_release_pmd;
+#if CONFIG_PGTABLE_LEVELS >= 4
+ pv_mmu_ops.alloc_pud = xen_alloc_pud;
+ pv_mmu_ops.release_pud = xen_release_pud;
+#endif
+ pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
+
+#ifdef CONFIG_X86_64
+ pv_mmu_ops.write_cr3 = &xen_write_cr3;
+ SetPagePinned(virt_to_page(level3_user_vsyscall));
+#endif
+ xen_mark_init_mm_pinned();
+}
+
+static void xen_leave_lazy_mmu(void)
+{
+ preempt_disable();
+ xen_mc_flush();
+ paravirt_leave_lazy_mmu();
+ preempt_enable();
+}
+
+static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+ .read_cr2 = xen_read_cr2,
+ .write_cr2 = xen_write_cr2,
+
+ .read_cr3 = xen_read_cr3,
+ .write_cr3 = xen_write_cr3_init,
+
+ .flush_tlb_user = xen_flush_tlb,
+ .flush_tlb_kernel = xen_flush_tlb,
+ .flush_tlb_single = xen_flush_tlb_single,
+ .flush_tlb_others = xen_flush_tlb_others,
+
+ .pte_update = paravirt_nop,
+
+ .pgd_alloc = xen_pgd_alloc,
+ .pgd_free = xen_pgd_free,
+
+ .alloc_pte = xen_alloc_pte_init,
+ .release_pte = xen_release_pte_init,
+ .alloc_pmd = xen_alloc_pmd_init,
+ .release_pmd = xen_release_pmd_init,
+
+ .set_pte = xen_set_pte_init,
+ .set_pte_at = xen_set_pte_at,
+ .set_pmd = xen_set_pmd_hyper,
+
+ .ptep_modify_prot_start = __ptep_modify_prot_start,
+ .ptep_modify_prot_commit = __ptep_modify_prot_commit,
+
+ .pte_val = PV_CALLEE_SAVE(xen_pte_val),
+ .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
+
+ .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
+ .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
+
+#ifdef CONFIG_X86_PAE
+ .set_pte_atomic = xen_set_pte_atomic,
+ .pte_clear = xen_pte_clear,
+ .pmd_clear = xen_pmd_clear,
+#endif /* CONFIG_X86_PAE */
+ .set_pud = xen_set_pud_hyper,
+
+ .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
+ .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
+
+#if CONFIG_PGTABLE_LEVELS >= 4
+ .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+ .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+ .set_p4d = xen_set_p4d_hyper,
+
+ .alloc_pud = xen_alloc_pmd_init,
+ .release_pud = xen_release_pmd_init,
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+
+ .activate_mm = xen_activate_mm,
+ .dup_mmap = xen_dup_mmap,
+ .exit_mmap = xen_exit_mmap,
+
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_mmu,
+ .leave = xen_leave_lazy_mmu,
+ .flush = paravirt_flush_lazy_mmu,
+ },
+
+ .set_fixmap = xen_set_fixmap,
+};
+
+void __init xen_init_mmu_ops(void)
+{
+ x86_init.paging.pagetable_init = xen_pagetable_init;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ pv_mmu_ops = xen_mmu_ops;
+
+ memset(dummy_mapping, 0xff, PAGE_SIZE);
+}
+
+/* Protected by xen_reservation_lock. */
+#define MAX_CONTIG_ORDER 9 /* 2MB */
+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+
+#define VOID_PTE (mfn_pte(0, __pgprot(0)))
+static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+ unsigned long *in_frames,
+ unsigned long *out_frames)
+{
+ int i;
+ struct multicall_space mcs;
+
+ xen_mc_batch();
+ for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
+ mcs = __xen_mc_entry(0);
+
+ if (in_frames)
+ in_frames[i] = virt_to_mfn(vaddr);
+
+ MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
+ __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+
+ if (out_frames)
+ out_frames[i] = virt_to_pfn(vaddr);
+ }
+ xen_mc_issue(0);
+}
+
+/*
+ * Update the pfn-to-mfn mappings for a virtual address range, either to
+ * point to an array of mfns, or contiguously from a single starting
+ * mfn.
+ */
+static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
+ unsigned long *mfns,
+ unsigned long first_mfn)
+{
+ unsigned i, limit;
+ unsigned long mfn;
+
+ xen_mc_batch();
+
+ limit = 1u << order;
+ for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
+ struct multicall_space mcs;
+ unsigned flags;
+
+ mcs = __xen_mc_entry(0);
+ if (mfns)
+ mfn = mfns[i];
+ else
+ mfn = first_mfn + i;
+
+ if (i < (limit - 1))
+ flags = 0;
+ else {
+ if (order == 0)
+ flags = UVMF_INVLPG | UVMF_ALL;
+ else
+ flags = UVMF_TLB_FLUSH | UVMF_ALL;
+ }
+
+ MULTI_update_va_mapping(mcs.mc, vaddr,
+ mfn_pte(mfn, PAGE_KERNEL), flags);
+
+ set_phys_to_machine(virt_to_pfn(vaddr), mfn);
+ }
+
+ xen_mc_issue(0);
+}
+
+/*
+ * Perform the hypercall to exchange a region of our pfns to point to
+ * memory with the required contiguous alignment. Takes the pfns as
+ * input, and populates mfns as output.
+ *
+ * Returns a success code indicating whether the hypervisor was able to
+ * satisfy the request or not.
+ */
+static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
+ unsigned long *pfns_in,
+ unsigned long extents_out,
+ unsigned int order_out,
+ unsigned long *mfns_out,
+ unsigned int address_bits)
+{
+ long rc;
+ int success;
+
+ struct xen_memory_exchange exchange = {
+ .in = {
+ .nr_extents = extents_in,
+ .extent_order = order_in,
+ .extent_start = pfns_in,
+ .domid = DOMID_SELF
+ },
+ .out = {
+ .nr_extents = extents_out,
+ .extent_order = order_out,
+ .extent_start = mfns_out,
+ .address_bits = address_bits,
+ .domid = DOMID_SELF
+ }
+ };
+
+ BUG_ON(extents_in << order_in != extents_out << order_out);
+
+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
+ success = (exchange.nr_exchanged == extents_in);
+
+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
+ BUG_ON(success && (rc != 0));
+
+ return success;
+}
+
+int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
+{
+ unsigned long *in_frames = discontig_frames, out_frame;
+ unsigned long flags;
+ int success;
+ unsigned long vstart = (unsigned long)phys_to_virt(pstart);
+
+ /*
+ * Currently an auto-translated guest will not perform I/O, nor will
+ * it require PAE page directories below 4GB. Therefore any calls to
+ * this function are redundant and can be ignored.
+ */
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ if (unlikely(order > MAX_CONTIG_ORDER))
+ return -ENOMEM;
+
+ memset((void *) vstart, 0, PAGE_SIZE << order);
+
+ spin_lock_irqsave(&xen_reservation_lock, flags);
+
+ /* 1. Zap current PTEs, remembering MFNs. */
+ xen_zap_pfn_range(vstart, order, in_frames, NULL);
+
+ /* 2. Get a new contiguous memory extent. */
+ out_frame = virt_to_pfn(vstart);
+ success = xen_exchange_memory(1UL << order, 0, in_frames,
+ 1, order, &out_frame,
+ address_bits);
+
+ /* 3. Map the new extent in place of old pages. */
+ if (success)
+ xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
+ else
+ xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
+
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
+
+ *dma_handle = virt_to_machine(vstart).maddr;
+ return success ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
+{
+ unsigned long *out_frames = discontig_frames, in_frame;
+ unsigned long flags;
+ int success;
+ unsigned long vstart;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ if (unlikely(order > MAX_CONTIG_ORDER))
+ return;
+
+ vstart = (unsigned long)phys_to_virt(pstart);
+ memset((void *) vstart, 0, PAGE_SIZE << order);
+
+ spin_lock_irqsave(&xen_reservation_lock, flags);
+
+ /* 1. Find start MFN of contiguous extent. */
+ in_frame = virt_to_mfn(vstart);
+
+ /* 2. Zap current PTEs. */
+ xen_zap_pfn_range(vstart, order, NULL, out_frames);
+
+ /* 3. Do the exchange for non-contiguous MFNs. */
+ success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
+ 0, out_frames, 0);
+
+ /* 4. Map new pages in place of old pages. */
+ if (success)
+ xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
+ else
+ xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
+
+ spin_unlock_irqrestore(&xen_reservation_lock, flags);
+}
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+
+#ifdef CONFIG_KEXEC_CORE
+phys_addr_t paddr_vmcoreinfo_note(void)
+{
+ if (xen_pv_domain())
+ return virt_to_machine(&vmcoreinfo_note).maddr;
+ else
+ return __pa_symbol(&vmcoreinfo_note);
+}
+#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h
index af5f0ad94078..4be5355b56f7 100644
--- a/arch/x86/xen/pmu.h
+++ b/arch/x86/xen/pmu.h
@@ -4,8 +4,13 @@
#include <xen/interface/xenpmu.h>
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id);
+#ifdef CONFIG_XEN_HAVE_VPMU
void xen_pmu_init(int cpu);
void xen_pmu_finish(int cpu);
+#else
+static inline void xen_pmu_init(int cpu) {}
+static inline void xen_pmu_finish(int cpu) {}
+#endif
bool is_xen_pmu(int cpu);
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index eaa36162ed4a..82ac611f2fc1 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -1,63 +1,21 @@
-/*
- * Xen SMP support
- *
- * This file implements the Xen versions of smp_ops. SMP under Xen is
- * very straightforward. Bringing a CPU up is simply a matter of
- * loading its initial context and setting it running.
- *
- * IPIs are handled through the Xen event mechanism.
- *
- * Because virtual CPUs can be scheduled onto any real CPU, there's no
- * useful topology information for the kernel to make use of. As a
- * result, all CPUs are treated as if they're single-core and
- * single-threaded.
- */
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/irq_work.h>
-#include <linux/tick.h>
-#include <linux/nmi.h>
-
-#include <asm/paravirt.h>
-#include <asm/desc.h>
-#include <asm/pgtable.h>
-#include <asm/cpu.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/vcpu.h>
-#include <xen/interface/xenpmu.h>
-
-#include <asm/xen/interface.h>
-#include <asm/xen/hypercall.h>
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
-#include <xen/xen.h>
-#include <xen/page.h>
#include <xen/events.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"
-#include "mmu.h"
#include "smp.h"
-#include "pmu.h"
-
-cpumask_var_t xen_cpu_initialized_map;
-struct xen_common_irq {
- int irq;
- char *name;
-};
static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
-static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
-static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
-static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
/*
* Reschedule call back.
@@ -70,42 +28,6 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void cpu_bringup(void)
-{
- int cpu;
-
- cpu_init();
- touch_softlockup_watchdog();
- preempt_disable();
-
- /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
- if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
- xen_enable_sysenter();
- xen_enable_syscall();
- }
- cpu = smp_processor_id();
- smp_store_cpu_info(cpu);
- cpu_data(cpu).x86_max_cores = 1;
- set_cpu_sibling_map(cpu);
-
- xen_setup_cpu_clockevents();
-
- notify_cpu_starting(cpu);
-
- set_cpu_online(cpu, true);
-
- cpu_set_state_online(cpu); /* Implies full memory barrier. */
-
- /* We can take interrupts now: we're officially "up". */
- local_irq_enable();
-}
-
-asmlinkage __visible void cpu_bringup_and_idle(void)
-{
- cpu_bringup();
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-}
-
void xen_smp_intr_free(unsigned int cpu)
{
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
@@ -133,27 +55,12 @@ void xen_smp_intr_free(unsigned int cpu)
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
}
- if (xen_hvm_domain())
- return;
-
- if (per_cpu(xen_irq_work, cpu).irq >= 0) {
- unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
- per_cpu(xen_irq_work, cpu).irq = -1;
- kfree(per_cpu(xen_irq_work, cpu).name);
- per_cpu(xen_irq_work, cpu).name = NULL;
- }
+}
- if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
- unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
- per_cpu(xen_pmu_irq, cpu).irq = -1;
- kfree(per_cpu(xen_pmu_irq, cpu).name);
- per_cpu(xen_pmu_irq, cpu).name = NULL;
- }
-};
int xen_smp_intr_init(unsigned int cpu)
{
int rc;
- char *resched_name, *callfunc_name, *debug_name, *pmu_name;
+ char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
@@ -200,37 +107,6 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
- /*
- * The IRQ worker on PVHVM goes through the native path and uses the
- * IPI mechanism.
- */
- if (xen_hvm_domain())
- return 0;
-
- callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
- rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
- cpu,
- xen_irq_work_interrupt,
- IRQF_PERCPU|IRQF_NOBALANCING,
- callfunc_name,
- NULL);
- if (rc < 0)
- goto fail;
- per_cpu(xen_irq_work, cpu).irq = rc;
- per_cpu(xen_irq_work, cpu).name = callfunc_name;
-
- if (is_xen_pmu(cpu)) {
- pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
- rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
- xen_pmu_irq_handler,
- IRQF_PERCPU|IRQF_NOBALANCING,
- pmu_name, NULL);
- if (rc < 0)
- goto fail;
- per_cpu(xen_pmu_irq, cpu).irq = rc;
- per_cpu(xen_pmu_irq, cpu).name = pmu_name;
- }
-
return 0;
fail:
@@ -238,333 +114,7 @@ int xen_smp_intr_init(unsigned int cpu)
return rc;
}
-static void __init xen_fill_possible_map(void)
-{
- int i, rc;
-
- if (xen_initial_domain())
- return;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- }
- }
-}
-
-static void __init xen_filter_cpu_maps(void)
-{
- int i, rc;
- unsigned int subtract = 0;
-
- if (!xen_initial_domain())
- return;
-
- num_processors = 0;
- disabled_cpus = 0;
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- } else {
- set_cpu_possible(i, false);
- set_cpu_present(i, false);
- subtract++;
- }
- }
-#ifdef CONFIG_HOTPLUG_CPU
- /* This is akin to using 'nr_cpus' on the Linux command line.
- * Which is OK as when we use 'dom0_max_vcpus=X' we can only
- * have up to X, while nr_cpu_ids is greater than X. This
- * normally is not a problem, except when CPU hotplugging
- * is involved and then there might be more than X CPUs
- * in the guest - which will not work as there is no
- * hypercall to expand the max number of VCPUs an already
- * running guest has. So cap it up to X. */
- if (subtract)
- nr_cpu_ids = nr_cpu_ids - subtract;
-#endif
-
-}
-
-static void __init xen_smp_prepare_boot_cpu(void)
-{
- BUG_ON(smp_processor_id() != 0);
- native_smp_prepare_boot_cpu();
-
- if (xen_pv_domain()) {
- if (!xen_feature(XENFEAT_writable_page_tables))
- /* We've switched to the "real" per-cpu gdt, so make
- * sure the old memory can be recycled. */
- make_lowmem_page_readwrite(xen_initial_gdt);
-
-#ifdef CONFIG_X86_32
- /*
- * Xen starts us with XEN_FLAT_RING1_DS, but linux code
- * expects __USER_DS
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
-#endif
-
- xen_filter_cpu_maps();
- xen_setup_vcpu_info_placement();
- }
-
- /*
- * Setup vcpu_info for boot CPU.
- */
- if (xen_hvm_domain())
- xen_vcpu_setup(0);
-
- /*
- * The alternative logic (which patches the unlock/lock) runs before
- * the smp bootup up code is activated. Hence we need to set this up
- * the core kernel is being patched. Otherwise we will have only
- * modules patched but not core code.
- */
- xen_init_spinlocks();
-}
-
-static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
-{
- unsigned cpu;
- unsigned int i;
-
- if (skip_ioapic_setup) {
- char *m = (max_cpus == 0) ?
- "The nosmp parameter is incompatible with Xen; " \
- "use Xen dom0_max_vcpus=1 parameter" :
- "The noapic parameter is incompatible with Xen";
-
- xen_raw_printk(m);
- panic(m);
- }
- xen_init_lock_cpu(0);
-
- smp_store_boot_cpu_info();
- cpu_data(0).x86_max_cores = 1;
-
- for_each_possible_cpu(i) {
- zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
- }
- set_cpu_sibling_map(0);
-
- xen_pmu_init(0);
-
- if (xen_smp_intr_init(0))
- BUG();
-
- if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
- panic("could not allocate xen_cpu_initialized_map\n");
-
- cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
-
- /* Restrict the possible_map according to max_cpus. */
- while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
- for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
- continue;
- set_cpu_possible(cpu, false);
- }
-
- for_each_possible_cpu(cpu)
- set_cpu_present(cpu, true);
-}
-
-static int
-cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
-{
- struct vcpu_guest_context *ctxt;
- struct desc_struct *gdt;
- unsigned long gdt_mfn;
-
- /* used to tell cpu_init() that it can proceed with initialization */
- cpumask_set_cpu(cpu, cpu_callout_mask);
- if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
- return 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (ctxt == NULL)
- return -ENOMEM;
-
- gdt = get_cpu_gdt_rw(cpu);
-
-#ifdef CONFIG_X86_32
- ctxt->user_regs.fs = __KERNEL_PERCPU;
- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
-#endif
- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
- ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
- ctxt->flags = VGCF_IN_KERNEL;
- ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
- ctxt->user_regs.ds = __USER_DS;
- ctxt->user_regs.es = __USER_DS;
- ctxt->user_regs.ss = __KERNEL_DS;
-
- xen_copy_trap_info(ctxt->trap_ctxt);
-
- ctxt->ldt_ents = 0;
-
- BUG_ON((unsigned long)gdt & ~PAGE_MASK);
-
- gdt_mfn = arbitrary_virt_to_mfn(gdt);
- make_lowmem_page_readonly(gdt);
- make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
-
- ctxt->gdt_frames[0] = gdt_mfn;
- ctxt->gdt_ents = GDT_ENTRIES;
-
- ctxt->kernel_ss = __KERNEL_DS;
- ctxt->kernel_sp = idle->thread.sp0;
-
-#ifdef CONFIG_X86_32
- ctxt->event_callback_cs = __KERNEL_CS;
- ctxt->failsafe_callback_cs = __KERNEL_CS;
-#else
- ctxt->gs_base_kernel = per_cpu_offset(cpu);
-#endif
- ctxt->event_callback_eip =
- (unsigned long)xen_hypervisor_callback;
- ctxt->failsafe_callback_eip =
- (unsigned long)xen_failsafe_callback;
- ctxt->user_regs.cs = __KERNEL_CS;
- per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
-
- ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
- ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
- if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
- BUG();
-
- kfree(ctxt);
- return 0;
-}
-
-static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
-{
- int rc;
-
- common_cpu_up(cpu, idle);
-
- xen_setup_runstate_info(cpu);
-
- /*
- * PV VCPUs are always successfully taken down (see 'while' loop
- * in xen_cpu_die()), so -EBUSY is an error.
- */
- rc = cpu_check_up_prepare(cpu);
- if (rc)
- return rc;
-
- /* make sure interrupts start blocked */
- per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
-
- rc = cpu_initialize_context(cpu, idle);
- if (rc)
- return rc;
-
- xen_pmu_init(cpu);
-
- rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
- BUG_ON(rc);
-
- while (cpu_report_state(cpu) != CPU_ONLINE)
- HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
-
- return 0;
-}
-
-static void xen_smp_cpus_done(unsigned int max_cpus)
-{
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int xen_cpu_disable(void)
-{
- unsigned int cpu = smp_processor_id();
- if (cpu == 0)
- return -EBUSY;
-
- cpu_disable_common();
-
- load_cr3(swapper_pg_dir);
- return 0;
-}
-
-static void xen_cpu_die(unsigned int cpu)
-{
- while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up,
- xen_vcpu_nr(cpu), NULL)) {
- __set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/10);
- }
-
- if (common_cpu_die(cpu) == 0) {
- xen_smp_intr_free(cpu);
- xen_uninit_lock_cpu(cpu);
- xen_teardown_timer(cpu);
- xen_pmu_finish(cpu);
- }
-}
-
-static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
-{
- play_dead_common();
- HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
- cpu_bringup();
- /*
- * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
- * clears certain data that the cpu_idle loop (which called us
- * and that we return from) expects. The only way to get that
- * data back is to call:
- */
- tick_nohz_idle_enter();
-
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-static int xen_cpu_disable(void)
-{
- return -ENOSYS;
-}
-
-static void xen_cpu_die(unsigned int cpu)
-{
- BUG();
-}
-
-static void xen_play_dead(void)
-{
- BUG();
-}
-
-#endif
-static void stop_self(void *v)
-{
- int cpu = smp_processor_id();
-
- /* make sure we're not pinning something down */
- load_cr3(swapper_pg_dir);
- /* should set up a minimal gdt */
-
- set_cpu_online(cpu, false);
-
- HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
- BUG();
-}
-
-static void xen_stop_other_cpus(int wait)
-{
- smp_call_function(stop_self, NULL, wait);
-}
-
-static void xen_smp_send_reschedule(int cpu)
+void xen_smp_send_reschedule(int cpu)
{
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
@@ -578,7 +128,7 @@ static void __xen_send_IPI_mask(const struct cpumask *mask,
xen_send_IPI_one(cpu, vector);
}
-static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
+void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{
int cpu;
@@ -593,7 +143,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
}
}
-static void xen_smp_send_call_function_single_ipi(int cpu)
+void xen_smp_send_call_function_single_ipi(int cpu)
{
__xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
@@ -698,54 +248,3 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
-static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
-{
- irq_enter();
- irq_work_run();
- inc_irq_stat(apic_irq_work_irqs);
- irq_exit();
-
- return IRQ_HANDLED;
-}
-
-static const struct smp_ops xen_smp_ops __initconst = {
- .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
- .smp_prepare_cpus = xen_smp_prepare_cpus,
- .smp_cpus_done = xen_smp_cpus_done,
-
- .cpu_up = xen_cpu_up,
- .cpu_die = xen_cpu_die,
- .cpu_disable = xen_cpu_disable,
- .play_dead = xen_play_dead,
-
- .stop_other_cpus = xen_stop_other_cpus,
- .smp_send_reschedule = xen_smp_send_reschedule,
-
- .send_call_func_ipi = xen_smp_send_call_function_ipi,
- .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
-};
-
-void __init xen_smp_init(void)
-{
- smp_ops = xen_smp_ops;
- xen_fill_possible_map();
-}
-
-static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
-{
- native_smp_prepare_cpus(max_cpus);
- WARN_ON(xen_smp_intr_init(0));
-
- xen_init_lock_cpu(0);
-}
-
-void __init xen_hvm_smp_init(void)
-{
- smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
- smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
- smp_ops.cpu_die = xen_cpu_die;
- smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
- smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
- smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
-}
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 9beef333584a..8ebb6acca64a 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -11,7 +11,17 @@ extern void xen_send_IPI_self(int vector);
extern int xen_smp_intr_init(unsigned int cpu);
extern void xen_smp_intr_free(unsigned int cpu);
+int xen_smp_intr_init_pv(unsigned int cpu);
+void xen_smp_intr_free_pv(unsigned int cpu);
+void xen_smp_send_reschedule(int cpu);
+void xen_smp_send_call_function_ipi(const struct cpumask *mask);
+void xen_smp_send_call_function_single_ipi(int cpu);
+
+struct xen_common_irq {
+ int irq;
+ char *name;
+};
#else /* CONFIG_SMP */
static inline int xen_smp_intr_init(unsigned int cpu)
@@ -19,6 +29,12 @@ static inline int xen_smp_intr_init(unsigned int cpu)
return 0;
}
static inline void xen_smp_intr_free(unsigned int cpu) {}
+
+static inline int xen_smp_intr_init_pv(unsigned int cpu)
+{
+ return 0;
+}
+static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
#endif /* CONFIG_SMP */
#endif
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
new file mode 100644
index 000000000000..f18561bbf5c9
--- /dev/null
+++ b/arch/x86/xen/smp_hvm.c
@@ -0,0 +1,63 @@
+#include <asm/smp.h>
+
+#include <xen/events.h>
+
+#include "xen-ops.h"
+#include "smp.h"
+
+
+static void __init xen_hvm_smp_prepare_boot_cpu(void)
+{
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+
+ /*
+ * Setup vcpu_info for boot CPU.
+ */
+ xen_vcpu_setup(0);
+
+ /*
+ * The alternative logic (which patches the unlock/lock) runs before
+ * the smp bootup up code is activated. Hence we need to set this up
+ * the core kernel is being patched. Otherwise we will have only
+ * modules patched but not core code.
+ */
+ xen_init_spinlocks();
+}
+
+static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
+{
+ native_smp_prepare_cpus(max_cpus);
+ WARN_ON(xen_smp_intr_init(0));
+
+ xen_init_lock_cpu(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void xen_hvm_cpu_die(unsigned int cpu)
+{
+ if (common_cpu_die(cpu) == 0) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ }
+}
+#else
+static void xen_hvm_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+#endif
+
+void __init xen_hvm_smp_init(void)
+{
+ if (!xen_have_vector_callback)
+ return;
+
+ smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+ smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+ smp_ops.cpu_die = xen_hvm_cpu_die;
+ smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
+ smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
+ smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
new file mode 100644
index 000000000000..aae32535f4ec
--- /dev/null
+++ b/arch/x86/xen/smp_pv.c
@@ -0,0 +1,490 @@
+/*
+ * Xen SMP support
+ *
+ * This file implements the Xen versions of smp_ops. SMP under Xen is
+ * very straightforward. Bringing a CPU up is simply a matter of
+ * loading its initial context and setting it running.
+ *
+ * IPIs are handled through the Xen event mechanism.
+ *
+ * Because virtual CPUs can be scheduled onto any real CPU, there's no
+ * useful topology information for the kernel to make use of. As a
+ * result, all CPUs are treated as if they're single-core and
+ * single-threaded.
+ */
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/irq_work.h>
+#include <linux/tick.h>
+#include <linux/nmi.h>
+
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/cpu.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+#include <xen/interface/xenpmu.h>
+
+#include <asm/xen/interface.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/events.h>
+
+#include <xen/hvc-console.h>
+#include "xen-ops.h"
+#include "mmu.h"
+#include "smp.h"
+#include "pmu.h"
+
+cpumask_var_t xen_cpu_initialized_map;
+
+static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
+static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
+
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
+
+static void cpu_bringup(void)
+{
+ int cpu;
+
+ cpu_init();
+ touch_softlockup_watchdog();
+ preempt_disable();
+
+ /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
+ if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
+ xen_enable_sysenter();
+ xen_enable_syscall();
+ }
+ cpu = smp_processor_id();
+ smp_store_cpu_info(cpu);
+ cpu_data(cpu).x86_max_cores = 1;
+ set_cpu_sibling_map(cpu);
+
+ xen_setup_cpu_clockevents();
+
+ notify_cpu_starting(cpu);
+
+ set_cpu_online(cpu, true);
+
+ cpu_set_state_online(cpu); /* Implies full memory barrier. */
+
+ /* We can take interrupts now: we're officially "up". */
+ local_irq_enable();
+}
+
+asmlinkage __visible void cpu_bringup_and_idle(void)
+{
+ cpu_bringup();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void xen_smp_intr_free_pv(unsigned int cpu)
+{
+ if (per_cpu(xen_irq_work, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
+ per_cpu(xen_irq_work, cpu).irq = -1;
+ kfree(per_cpu(xen_irq_work, cpu).name);
+ per_cpu(xen_irq_work, cpu).name = NULL;
+ }
+
+ if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
+ per_cpu(xen_pmu_irq, cpu).irq = -1;
+ kfree(per_cpu(xen_pmu_irq, cpu).name);
+ per_cpu(xen_pmu_irq, cpu).name = NULL;
+ }
+}
+
+int xen_smp_intr_init_pv(unsigned int cpu)
+{
+ int rc;
+ char *callfunc_name, *pmu_name;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ cpu,
+ xen_irq_work_interrupt,
+ IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_irq_work, cpu).irq = rc;
+ per_cpu(xen_irq_work, cpu).name = callfunc_name;
+
+ if (is_xen_pmu(cpu)) {
+ pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
+ rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
+ xen_pmu_irq_handler,
+ IRQF_PERCPU|IRQF_NOBALANCING,
+ pmu_name, NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_pmu_irq, cpu).irq = rc;
+ per_cpu(xen_pmu_irq, cpu).name = pmu_name;
+ }
+
+ return 0;
+
+ fail:
+ xen_smp_intr_free_pv(cpu);
+ return rc;
+}
+
+static void __init xen_fill_possible_map(void)
+{
+ int i, rc;
+
+ if (xen_initial_domain())
+ return;
+
+ for (i = 0; i < nr_cpu_ids; i++) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+ if (rc >= 0) {
+ num_processors++;
+ set_cpu_possible(i, true);
+ }
+ }
+}
+
+static void __init xen_filter_cpu_maps(void)
+{
+ int i, rc;
+ unsigned int subtract = 0;
+
+ if (!xen_initial_domain())
+ return;
+
+ num_processors = 0;
+ disabled_cpus = 0;
+ for (i = 0; i < nr_cpu_ids; i++) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+ if (rc >= 0) {
+ num_processors++;
+ set_cpu_possible(i, true);
+ } else {
+ set_cpu_possible(i, false);
+ set_cpu_present(i, false);
+ subtract++;
+ }
+ }
+#ifdef CONFIG_HOTPLUG_CPU
+ /* This is akin to using 'nr_cpus' on the Linux command line.
+ * Which is OK as when we use 'dom0_max_vcpus=X' we can only
+ * have up to X, while nr_cpu_ids is greater than X. This
+ * normally is not a problem, except when CPU hotplugging
+ * is involved and then there might be more than X CPUs
+ * in the guest - which will not work as there is no
+ * hypercall to expand the max number of VCPUs an already
+ * running guest has. So cap it up to X. */
+ if (subtract)
+ nr_cpu_ids = nr_cpu_ids - subtract;
+#endif
+
+}
+
+static void __init xen_pv_smp_prepare_boot_cpu(void)
+{
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+
+ if (!xen_feature(XENFEAT_writable_page_tables))
+ /* We've switched to the "real" per-cpu gdt, so make
+ * sure the old memory can be recycled. */
+ make_lowmem_page_readwrite(xen_initial_gdt);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+ * expects __USER_DS
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
+ xen_filter_cpu_maps();
+ xen_setup_vcpu_info_placement();
+
+ /*
+ * The alternative logic (which patches the unlock/lock) runs before
+ * the smp bootup up code is activated. Hence we need to set this up
+ * the core kernel is being patched. Otherwise we will have only
+ * modules patched but not core code.
+ */
+ xen_init_spinlocks();
+}
+
+static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned cpu;
+ unsigned int i;
+
+ if (skip_ioapic_setup) {
+ char *m = (max_cpus == 0) ?
+ "The nosmp parameter is incompatible with Xen; " \
+ "use Xen dom0_max_vcpus=1 parameter" :
+ "The noapic parameter is incompatible with Xen";
+
+ xen_raw_printk(m);
+ panic(m);
+ }
+ xen_init_lock_cpu(0);
+
+ smp_store_boot_cpu_info();
+ cpu_data(0).x86_max_cores = 1;
+
+ for_each_possible_cpu(i) {
+ zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+ zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
+ }
+ set_cpu_sibling_map(0);
+
+ xen_pmu_init(0);
+
+ if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
+ BUG();
+
+ if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
+ panic("could not allocate xen_cpu_initialized_map\n");
+
+ cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
+
+ /* Restrict the possible_map according to max_cpus. */
+ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
+ for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
+ continue;
+ set_cpu_possible(cpu, false);
+ }
+
+ for_each_possible_cpu(cpu)
+ set_cpu_present(cpu, true);
+}
+
+static int
+cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+{
+ struct vcpu_guest_context *ctxt;
+ struct desc_struct *gdt;
+ unsigned long gdt_mfn;
+
+ /* used to tell cpu_init() that it can proceed with initialization */
+ cpumask_set_cpu(cpu, cpu_callout_mask);
+ if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (ctxt == NULL)
+ return -ENOMEM;
+
+ gdt = get_cpu_gdt_rw(cpu);
+
+#ifdef CONFIG_X86_32
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+ ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
+#endif
+ memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
+
+ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+ ctxt->flags = VGCF_IN_KERNEL;
+ ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+ ctxt->user_regs.ds = __USER_DS;
+ ctxt->user_regs.es = __USER_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
+
+ xen_copy_trap_info(ctxt->trap_ctxt);
+
+ ctxt->ldt_ents = 0;
+
+ BUG_ON((unsigned long)gdt & ~PAGE_MASK);
+
+ gdt_mfn = arbitrary_virt_to_mfn(gdt);
+ make_lowmem_page_readonly(gdt);
+ make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
+
+ ctxt->gdt_frames[0] = gdt_mfn;
+ ctxt->gdt_ents = GDT_ENTRIES;
+
+ ctxt->kernel_ss = __KERNEL_DS;
+ ctxt->kernel_sp = idle->thread.sp0;
+
+#ifdef CONFIG_X86_32
+ ctxt->event_callback_cs = __KERNEL_CS;
+ ctxt->failsafe_callback_cs = __KERNEL_CS;
+#else
+ ctxt->gs_base_kernel = per_cpu_offset(cpu);
+#endif
+ ctxt->event_callback_eip =
+ (unsigned long)xen_hypervisor_callback;
+ ctxt->failsafe_callback_eip =
+ (unsigned long)xen_failsafe_callback;
+ ctxt->user_regs.cs = __KERNEL_CS;
+ per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
+
+ ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
+ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
+ BUG();
+
+ kfree(ctxt);
+ return 0;
+}
+
+static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ int rc;
+
+ common_cpu_up(cpu, idle);
+
+ xen_setup_runstate_info(cpu);
+
+ /*
+ * PV VCPUs are always successfully taken down (see 'while' loop
+ * in xen_cpu_die()), so -EBUSY is an error.
+ */
+ rc = cpu_check_up_prepare(cpu);
+ if (rc)
+ return rc;
+
+ /* make sure interrupts start blocked */
+ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
+
+ rc = cpu_initialize_context(cpu, idle);
+ if (rc)
+ return rc;
+
+ xen_pmu_init(cpu);
+
+ rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
+ BUG_ON(rc);
+
+ while (cpu_report_state(cpu) != CPU_ONLINE)
+ HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
+
+ return 0;
+}
+
+static void xen_pv_smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int xen_pv_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ if (cpu == 0)
+ return -EBUSY;
+
+ cpu_disable_common();
+
+ load_cr3(swapper_pg_dir);
+ return 0;
+}
+
+static void xen_pv_cpu_die(unsigned int cpu)
+{
+ while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
+ xen_vcpu_nr(cpu), NULL)) {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/10);
+ }
+
+ if (common_cpu_die(cpu) == 0) {
+ xen_smp_intr_free(cpu);
+ xen_uninit_lock_cpu(cpu);
+ xen_teardown_timer(cpu);
+ xen_pmu_finish(cpu);
+ }
+}
+
+static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
+{
+ play_dead_common();
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
+ cpu_bringup();
+ /*
+ * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
+ * clears certain data that the cpu_idle loop (which called us
+ * and that we return from) expects. The only way to get that
+ * data back is to call:
+ */
+ tick_nohz_idle_enter();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static int xen_pv_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void xen_pv_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+
+static void xen_pv_play_dead(void)
+{
+ BUG();
+}
+
+#endif
+static void stop_self(void *v)
+{
+ int cpu = smp_processor_id();
+
+ /* make sure we're not pinning something down */
+ load_cr3(swapper_pg_dir);
+ /* should set up a minimal gdt */
+
+ set_cpu_online(cpu, false);
+
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
+ BUG();
+}
+
+static void xen_pv_stop_other_cpus(int wait)
+{
+ smp_call_function(stop_self, NULL, wait);
+}
+
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
+{
+ irq_enter();
+ irq_work_run();
+ inc_irq_stat(apic_irq_work_irqs);
+ irq_exit();
+
+ return IRQ_HANDLED;
+}
+
+static const struct smp_ops xen_smp_ops __initconst = {
+ .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
+ .smp_cpus_done = xen_pv_smp_cpus_done,
+
+ .cpu_up = xen_pv_cpu_up,
+ .cpu_die = xen_pv_cpu_die,
+ .cpu_disable = xen_pv_cpu_disable,
+ .play_dead = xen_pv_play_dead,
+
+ .stop_other_cpus = xen_pv_stop_other_cpus,
+ .smp_send_reschedule = xen_smp_send_reschedule,
+
+ .send_call_func_ipi = xen_smp_send_call_function_ipi,
+ .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
+};
+
+void __init xen_smp_init(void)
+{
+ smp_ops = xen_smp_ops;
+ xen_fill_possible_map();
+}
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 7f664c416faf..d6b1680693a9 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -14,60 +14,6 @@
#include "mmu.h"
#include "pmu.h"
-static void xen_pv_pre_suspend(void)
-{
- xen_mm_pin_all();
-
- xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
- xen_start_info->console.domU.mfn =
- mfn_to_pfn(xen_start_info->console.domU.mfn);
-
- BUG_ON(!irqs_disabled());
-
- HYPERVISOR_shared_info = &xen_dummy_shared_info;
- if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
- __pte_ma(0), 0))
- BUG();
-}
-
-static void xen_hvm_post_suspend(int suspend_cancelled)
-{
-#ifdef CONFIG_XEN_PVHVM
- int cpu;
- if (!suspend_cancelled)
- xen_hvm_init_shared_info();
- xen_callback_vector();
- xen_unplug_emulated_devices();
- if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
- for_each_online_cpu(cpu) {
- xen_setup_runstate_info(cpu);
- }
- }
-#endif
-}
-
-static void xen_pv_post_suspend(int suspend_cancelled)
-{
- xen_build_mfn_list_list();
-
- xen_setup_shared_info();
-
- if (suspend_cancelled) {
- xen_start_info->store_mfn =
- pfn_to_mfn(xen_start_info->store_mfn);
- xen_start_info->console.domU.mfn =
- pfn_to_mfn(xen_start_info->console.domU.mfn);
- } else {
-#ifdef CONFIG_SMP
- BUG_ON(xen_cpu_initialized_map == NULL);
- cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
-#endif
- xen_vcpu_restore();
- }
-
- xen_mm_unpin_all();
-}
-
void xen_arch_pre_suspend(void)
{
if (xen_pv_domain())
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
new file mode 100644
index 000000000000..01afcadde50a
--- /dev/null
+++ b/arch/x86/xen/suspend_hvm.c
@@ -0,0 +1,22 @@
+#include <linux/types.h>
+
+#include <xen/xen.h>
+#include <xen/features.h>
+#include <xen/interface/features.h>
+
+#include "xen-ops.h"
+
+void xen_hvm_post_suspend(int suspend_cancelled)
+{
+ int cpu;
+
+ if (!suspend_cancelled)
+ xen_hvm_init_shared_info();
+ xen_callback_vector();
+ xen_unplug_emulated_devices();
+ if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
+ for_each_online_cpu(cpu) {
+ xen_setup_runstate_info(cpu);
+ }
+ }
+}
diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c
new file mode 100644
index 000000000000..3abe4f07f34a
--- /dev/null
+++ b/arch/x86/xen/suspend_pv.c
@@ -0,0 +1,46 @@
+#include <linux/types.h>
+
+#include <asm/fixmap.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+
+#include "xen-ops.h"
+
+void xen_pv_pre_suspend(void)
+{
+ xen_mm_pin_all();
+
+ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
+ xen_start_info->console.domU.mfn =
+ mfn_to_pfn(xen_start_info->console.domU.mfn);
+
+ BUG_ON(!irqs_disabled());
+
+ HYPERVISOR_shared_info = &xen_dummy_shared_info;
+ if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
+ __pte_ma(0), 0))
+ BUG();
+}
+
+void xen_pv_post_suspend(int suspend_cancelled)
+{
+ xen_build_mfn_list_list();
+
+ xen_setup_shared_info();
+
+ if (suspend_cancelled) {
+ xen_start_info->store_mfn =
+ pfn_to_mfn(xen_start_info->store_mfn);
+ xen_start_info->console.domU.mfn =
+ pfn_to_mfn(xen_start_info->console.domU.mfn);
+ } else {
+#ifdef CONFIG_SMP
+ BUG_ON(xen_cpu_initialized_map == NULL);
+ cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
+#endif
+ xen_vcpu_restore();
+ }
+
+ xen_mm_unpin_all();
+}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 7a3089285c59..090c7eb4dca9 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -436,6 +436,14 @@ static void xen_hvm_setup_cpu_clockevents(void)
void __init xen_hvm_init_time_ops(void)
{
+ /*
+ * vector callback is needed otherwise we cannot receive interrupts
+ * on cpu > 0 and at this point we don't know how many cpus are
+ * available.
+ */
+ if (!xen_have_vector_callback)
+ return;
+
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n");
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 37794e42b67d..72a8e6adebe6 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -16,6 +16,7 @@
#include <xen/interface/xen-mca.h>
#include <asm/xen/interface.h>
+#ifdef CONFIG_XEN_PV
__INIT
ENTRY(startup_xen)
cld
@@ -34,6 +35,7 @@ ENTRY(startup_xen)
jmp xen_start_kernel
__FINIT
+#endif
.pushsection .text
.balign PAGE_SIZE
@@ -58,7 +60,9 @@ ENTRY(hypercall_page)
/* Map the p2m table to a 512GB-aligned user address. */
ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad PGDIR_SIZE)
#endif
+#ifdef CONFIG_XEN_PV
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
+#endif
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
ELFNOTE(Xen, XEN_ELFNOTE_FEATURES,
.ascii "!writable_page_tables|pae_pgdir_above_4gb")
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index f6a41c41ebc7..9a440a42c618 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -76,6 +76,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
bool xen_vcpu_stolen(int vcpu);
+extern int xen_have_vcpu_info_placement;
+
void xen_vcpu_setup(int cpu);
void xen_setup_vcpu_info_placement(void);
@@ -146,4 +148,24 @@ __visible void xen_adjust_exception_frame(void);
extern int xen_panic_handler_init(void);
+int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
+ int (*cpu_dead_cb)(unsigned int));
+
+void xen_pin_vcpu(int cpu);
+
+void xen_emergency_restart(void);
+#ifdef CONFIG_XEN_PV
+void xen_pv_pre_suspend(void);
+void xen_pv_post_suspend(int suspend_cancelled);
+#else
+static inline void xen_pv_pre_suspend(void) {}
+static inline void xen_pv_post_suspend(int suspend_cancelled) {}
+#endif
+
+#ifdef CONFIG_XEN_PVHVM
+void xen_hvm_post_suspend(int suspend_cancelled);
+#else
+static inline void xen_hvm_post_suspend(int suspend_cancelled) {}
+#endif
+
#endif /* XEN_OPS_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 5d6bd932ba4e..e4f366a488d3 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -46,12 +46,9 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
-/* Map a range of PCI memory or I/O space for a device into user space */
-int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
-
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
-#define HAVE_PCI_MMAP 1
+#define HAVE_PCI_MMAP 1
+#define arch_can_pci_mmap_io() 1
#endif /* __KERNEL__ */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 86ffcd68e496..003eeee3fbc6 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -113,6 +113,21 @@
*/
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp. reg must be in the range [0..4).
+ */
+#define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call8. reg must be in the range [4..8).
+ */
+#define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg)))
+
+/* Spill slot location for the register reg in the spill area under the stack
+ * pointer sp for the call12. reg must be in the range [4..12).
+ */
+#define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg)))
+
typedef struct {
unsigned long seg;
} mm_segment_t;
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 598e752dcbcd..e2d9c5eb10bd 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -12,6 +12,45 @@
#include <uapi/asm/ptrace.h>
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
+
+/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
+
+#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
+#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
+#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
+#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
+#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
+#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
+#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
+#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
+#define EXC_TABLE_SIZE 0x400
#ifndef __ASSEMBLY__
diff --git a/arch/xtensa/include/uapi/asm/ptrace.h b/arch/xtensa/include/uapi/asm/ptrace.h
index 6ccbd9e38e35..8853a0d544c8 100644
--- a/arch/xtensa/include/uapi/asm/ptrace.h
+++ b/arch/xtensa/include/uapi/asm/ptrace.h
@@ -11,46 +11,6 @@
#ifndef _UAPI_XTENSA_PTRACE_H
#define _UAPI_XTENSA_PTRACE_H
-/*
- * Kernel stack
- *
- * +-----------------------+ -------- STACK_SIZE
- * | register file | |
- * +-----------------------+ |
- * | struct pt_regs | |
- * +-----------------------+ | ------ PT_REGS_OFFSET
- * double : 16 bytes spill area : | ^
- * excetion :- - - - - - - - - - - -: | |
- * frame : struct pt_regs : | |
- * :- - - - - - - - - - - -: | |
- * | | | |
- * | memory stack | | |
- * | | | |
- * ~ ~ ~ ~
- * ~ ~ ~ ~
- * | | | |
- * | | | |
- * +-----------------------+ | | --- STACK_BIAS
- * | struct task_struct | | | ^
- * current --> +-----------------------+ | | |
- * | struct thread_info | | | |
- * +-----------------------+ --------
- */
-
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
-#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
-#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
-#define EXC_TABLE_SIZE 0x400
-
/* Registers used by strace */
#define REG_A_BASE 0x0000
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 6911e384f608..3a98503ad11a 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -26,30 +26,6 @@
#include <asm/signal.h>
#include <asm/tlbflush.h>
-/*
- * Entry condition:
- *
- * a0: trashed, original value saved on stack (PT_AREG0)
- * a1: a1
- * a2: new stack pointer, original in DEPC
- * a3: a3
- * depc: a2, original value saved on stack (PT_DEPC)
- * excsave_1: dispatch table
- *
- * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
- * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
- */
-
-/* IO protection is currently unsupported. */
-
-ENTRY(fast_io_protect)
-
- wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
-
-ENDPROC(fast_io_protect)
-
#if XTENSA_HAVE_COPROCESSORS
/*
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index f5ef3cc0497c..37a239556889 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1899,10 +1899,11 @@ ENTRY(system_call)
movi a4, do_syscall_trace_enter
s32i a3, a2, PT_SYSCALL
callx4 a4
+ mov a3, a6
/* syscall = sys_call_table[syscall_nr] */
- movi a4, sys_call_table;
+ movi a4, sys_call_table
movi a5, __NR_syscall_count
movi a6, -ENOSYS
bgeu a3, a5, 1f
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b848cc3dc913..903963ee495d 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -334,25 +334,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
}
/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static __inline__ void
-__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- int prot = pgprot_val(vma->vm_page_prot);
-
- /* Set to write-through */
- prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
-#if 0
- if (!write_combine)
- prot |= _PAGE_WRITETHRU;
-#endif
- vma->vm_page_prot = __pgprot(prot);
-}
-
-/*
* Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map
* is described by vm_start and vm_end members of VMA, the base physical
@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
*
* Returns a negative error code on failure, zero on success.
*/
-int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+int pci_mmap_page_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0)
return ret;
- __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,vma->vm_page_prot);
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 58f96d1230d4..ff4f0ecb03dd 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -204,8 +204,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
#endif
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
- *((int*)childregs - 3) = (unsigned long)childregs;
- *((int*)childregs - 4) = 0;
+ SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
+ SPILL_SLOT(childregs, 0) = 0;
p->thread.sp = (unsigned long)childregs;
@@ -266,8 +266,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
/* pass parameters to ret_from_kernel_thread:
* a2 = thread_fn, a3 = thread_fn arg
*/
- *((int *)childregs - 1) = thread_fn_arg;
- *((int *)childregs - 2) = usp_thread_fn;
+ SPILL_SLOT(childregs, 3) = thread_fn_arg;
+ SPILL_SLOT(childregs, 2) = usp_thread_fn;
/* Childregs are only used when we're going to userspace
* in which case start_thread will set them up.
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index e0f583fed06a..e2461968efb2 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -1,4 +1,3 @@
-// TODO some minor issues
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -24,13 +23,14 @@
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/smp.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
#include <asm/coprocessor.h>
#include <asm/elf.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
-#include <linux/uaccess.h>
void user_enable_single_step(struct task_struct *child)
@@ -52,7 +52,7 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */
}
-int ptrace_getregs(struct task_struct *child, void __user *uregs)
+static int ptrace_getregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t __user *gregset = uregs;
@@ -73,12 +73,12 @@ int ptrace_getregs(struct task_struct *child, void __user *uregs)
for (i = 0; i < XCHAL_NUM_AREGS; i++)
__put_user(regs->areg[i],
- gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
+ gregset->a + ((wb * 4 + i) % XCHAL_NUM_AREGS));
return 0;
}
-int ptrace_setregs(struct task_struct *child, void __user *uregs)
+static int ptrace_setregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
xtensa_gregset_t *gregset = uregs;
@@ -107,7 +107,7 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
unsigned long rotws, wmask;
rotws = (((ws | (ws << WSBITS)) >> wb) &
- ((1 << WSBITS) - 1)) & ~1;
+ ((1 << WSBITS) - 1)) & ~1;
wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
(rotws & 0xF) | 1;
regs->windowbase = wb;
@@ -115,19 +115,19 @@ int ptrace_setregs(struct task_struct *child, void __user *uregs)
regs->wmask = wmask;
}
- if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
- gregset->a, wb * 16))
+ if (wb != 0 && __copy_from_user(regs->areg + XCHAL_NUM_AREGS - wb * 4,
+ gregset->a, wb * 16))
return -EFAULT;
if (__copy_from_user(regs->areg, gregset->a + wb * 4,
- (WSBITS - wb) * 16))
+ (WSBITS - wb) * 16))
return -EFAULT;
return 0;
}
-int ptrace_getxregs(struct task_struct *child, void __user *uregs)
+static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
struct thread_info *ti = task_thread_info(child);
@@ -151,7 +151,7 @@ int ptrace_getxregs(struct task_struct *child, void __user *uregs)
return ret ? -EFAULT : 0;
}
-int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
{
struct thread_info *ti = task_thread_info(child);
struct pt_regs *regs = task_pt_regs(child);
@@ -177,7 +177,8 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
return ret ? -EFAULT : 0;
}
-int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
+static int ptrace_peekusr(struct task_struct *child, long regno,
+ long __user *ret)
{
struct pt_regs *regs;
unsigned long tmp;
@@ -186,86 +187,87 @@ int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
tmp = 0; /* Default return value. */
switch(regno) {
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ tmp = regs->areg[regno - REG_AR_BASE];
+ break;
- case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- tmp = regs->areg[regno - REG_AR_BASE];
- break;
-
- case REG_A_BASE ... REG_A_BASE + 15:
- tmp = regs->areg[regno - REG_A_BASE];
- break;
+ case REG_A_BASE ... REG_A_BASE + 15:
+ tmp = regs->areg[regno - REG_A_BASE];
+ break;
- case REG_PC:
- tmp = regs->pc;
- break;
+ case REG_PC:
+ tmp = regs->pc;
+ break;
- case REG_PS:
- /* Note: PS.EXCM is not set while user task is running;
- * its being set in regs is for exception handling
- * convenience. */
- tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
- break;
+ case REG_PS:
+ /* Note: PS.EXCM is not set while user task is running;
+ * its being set in regs is for exception handling
+ * convenience.
+ */
+ tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
+ break;
- case REG_WB:
- break; /* tmp = 0 */
+ case REG_WB:
+ break; /* tmp = 0 */
- case REG_WS:
+ case REG_WS:
{
unsigned long wb = regs->windowbase;
unsigned long ws = regs->windowstart;
- tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
+ tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
+ ((1 << WSBITS) - 1);
break;
}
- case REG_LBEG:
- tmp = regs->lbeg;
- break;
+ case REG_LBEG:
+ tmp = regs->lbeg;
+ break;
- case REG_LEND:
- tmp = regs->lend;
- break;
+ case REG_LEND:
+ tmp = regs->lend;
+ break;
- case REG_LCOUNT:
- tmp = regs->lcount;
- break;
+ case REG_LCOUNT:
+ tmp = regs->lcount;
+ break;
- case REG_SAR:
- tmp = regs->sar;
- break;
+ case REG_SAR:
+ tmp = regs->sar;
+ break;
- case SYSCALL_NR:
- tmp = regs->syscall;
- break;
+ case SYSCALL_NR:
+ tmp = regs->syscall;
+ break;
- default:
- return -EIO;
+ default:
+ return -EIO;
}
return put_user(tmp, ret);
}
-int ptrace_pokeusr(struct task_struct *child, long regno, long val)
+static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
{
struct pt_regs *regs;
regs = task_pt_regs(child);
switch (regno) {
- case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
- regs->areg[regno - REG_AR_BASE] = val;
- break;
+ case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
+ regs->areg[regno - REG_AR_BASE] = val;
+ break;
- case REG_A_BASE ... REG_A_BASE + 15:
- regs->areg[regno - REG_A_BASE] = val;
- break;
+ case REG_A_BASE ... REG_A_BASE + 15:
+ regs->areg[regno - REG_A_BASE] = val;
+ break;
- case REG_PC:
- regs->pc = val;
- break;
+ case REG_PC:
+ regs->pc = val;
+ break;
- case SYSCALL_NR:
- regs->syscall = val;
- break;
+ case SYSCALL_NR:
+ regs->syscall = val;
+ break;
- default:
- return -EIO;
+ default:
+ return -EIO;
}
return 0;
}
@@ -467,39 +469,21 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-void do_syscall_trace(void)
-{
- /*
- * The 0x80 provides a way for the tracing parent to distinguish
- * between a syscall stop and SIGTRAP delivery
- */
- ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
-}
-
-void do_syscall_trace_enter(struct pt_regs *regs)
+unsigned long do_syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE)
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ return -1;
-#if 0
- audit_syscall_entry(...);
-#endif
+ return regs->areg[2];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
- if ((test_thread_flag(TIF_SYSCALL_TRACE))
- && (current->ptrace & PT_PTRACED))
- do_syscall_trace();
+ int step;
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 197e75b400b1..394ef08300b6 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -317,8 +317,9 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p)
{
- strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
+ platform_setup(cmdline_p);
+ strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
/* Reserve some memory regions */
@@ -382,8 +383,6 @@ void __init setup_arch(char **cmdline_p)
unflatten_and_copy_device_tree();
- platform_setup(cmdline_p);
-
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -453,9 +452,9 @@ void cpu_reset(void)
tmpaddr += SZ_512M;
/* Invalidate mapping in the selected temporary area */
- if (itlb_probe(tmpaddr) & 0x8)
+ if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT))
invalidate_itlb_entry(itlb_probe(tmpaddr));
- if (itlb_probe(tmpaddr + PAGE_SIZE) & 0x8)
+ if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT))
invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
/*
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 70a131945443..d427e784ab44 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -91,14 +91,14 @@ flush_window_regs_user(struct pt_regs *regs)
inc = 1;
} else if (m & 4) { /* call8 */
- if (copy_to_user((void*)(sp - 32),
- &regs->areg[(base + 1) * 4], 16))
+ if (copy_to_user(&SPILL_SLOT_CALL8(sp, 4),
+ &regs->areg[(base + 1) * 4], 16))
goto errout;
inc = 2;
} else if (m & 8) { /* call12 */
- if (copy_to_user((void*)(sp - 48),
- &regs->areg[(base + 1) * 4], 32))
+ if (copy_to_user(&SPILL_SLOT_CALL12(sp, 4),
+ &regs->areg[(base + 1) * 4], 32))
goto errout;
inc = 3;
}
@@ -106,7 +106,7 @@ flush_window_regs_user(struct pt_regs *regs)
/* Save current frame a0..a3 under next SP */
sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
- if (copy_to_user((void*)(sp - 16), &regs->areg[base * 4], 16))
+ if (copy_to_user(&SPILL_SLOT(sp, 0), &regs->areg[base * 4], 16))
goto errout;
/* Get current stack pointer for next loop iteration. */
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index e7d30ee0fd48..0df4080fa20f 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -23,14 +23,6 @@
*/
extern int common_exception_return;
-/* A struct that maps to the part of the frame containing the a0 and
- * a1 registers.
- */
-struct frame_start {
- unsigned long a0;
- unsigned long a1;
-};
-
void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
int (*ufn)(struct stackframe *frame, void *data),
void *data)
@@ -96,26 +88,16 @@ void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
/* Start from the a1 register. */
/* a1 = regs->areg[1]; */
while (a0 != 0 && depth--) {
- struct frame_start frame_start;
- /* Get the location for a1, a0 for the
- * previous frame from the current a1.
- */
- unsigned long *psp = (unsigned long *)a1;
-
- psp -= 4;
+ pc = MAKE_PC_FROM_RA(a0, pc);
/* Check if the region is OK to access. */
- if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
+ if (!access_ok(VERIFY_READ, &SPILL_SLOT(a1, 0), 8))
return;
/* Copy a1, a0 from user space stack frame. */
- if (__copy_from_user_inatomic(&frame_start, psp,
- sizeof(frame_start)))
+ if (__get_user(a0, &SPILL_SLOT(a1, 0)) ||
+ __get_user(a1, &SPILL_SLOT(a1, 1)))
return;
- pc = MAKE_PC_FROM_RA(a0, pc);
- a0 = frame_start.a0;
- a1 = frame_start.a1;
-
frame.pc = pc;
frame.sp = a1;
@@ -147,7 +129,6 @@ void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
*/
while (a1 > sp_start && a1 < sp_end && depth--) {
struct stackframe frame;
- unsigned long *psp = (unsigned long *)a1;
frame.pc = pc;
frame.sp = a1;
@@ -171,8 +152,8 @@ void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
sp_start = a1;
pc = MAKE_PC_FROM_RA(a0, pc);
- a0 = *(psp - 4);
- a1 = *(psp - 3);
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
}
}
EXPORT_SYMBOL(xtensa_backtrace_kernel);
@@ -196,8 +177,8 @@ void walk_stackframe(unsigned long *sp,
sp = (unsigned long *)a1;
- a0 = *(sp - 4);
- a1 = *(sp - 3);
+ a0 = SPILL_SLOT(a1, 0);
+ a1 = SPILL_SLOT(a1, 1);
if (a1 <= (unsigned long)sp)
break;
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index 27d7a528b41a..2ba45858e50a 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -6,6 +6,7 @@
* for more details.
*
* Copyright (C) 2001 Tensilica Inc.
+ * Copyright (C) 2017 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
@@ -49,6 +50,10 @@
#define SYS_bind 30
#define SYS_ioctl 31
+#define SYS_iss_argc 1000 /* returns value of argc */
+#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
+#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
+
/*
* SYS_select_one specifiers
*/
@@ -118,5 +123,20 @@ static inline int simc_lseek(int fd, uint32_t off, int whence)
return __simc(SYS_lseek, fd, off, whence);
}
+static inline int simc_argc(void)
+{
+ return __simc(SYS_iss_argc, 0, 0, 0);
+}
+
+static inline int simc_argv_size(void)
+{
+ return __simc(SYS_iss_argv_size, 0, 0, 0);
+}
+
+static inline void simc_argv(void *buf)
+{
+ __simc(SYS_iss_set_argv, (int)buf, 0, 0);
+}
+
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index 379aeddcc638..f4bbb28026f8 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -8,6 +8,7 @@
* Joe Taylor <joe@tensilica.com>
*
* Copyright 2001 - 2005 Tensilica Inc.
+ * Copyright 2017 Cadence Design Systems Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -15,6 +16,7 @@
* option) any later version.
*
*/
+#include <linux/bootmem.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -31,13 +33,13 @@
#include <asm/platform.h>
#include <asm/bootparam.h>
+#include <asm/setup.h>
#include <platform/simcall.h>
void __init platform_init(bp_tag_t* bootparam)
{
-
}
void platform_halt(void)
@@ -59,26 +61,10 @@ void platform_restart(void)
/* control never gets here */
}
-extern void iss_net_poll(void);
-
-const char twirl[]="|/-\\|/-\\";
-
void platform_heartbeat(void)
{
-#if 0
- static int i = 0, j = 0;
-
- if (--i < 0) {
- i = 99;
- printk("\r%c\r", twirl[j++]);
- if (j == 8)
- j = 0;
- }
-#endif
}
-
-
static int
iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
@@ -87,12 +73,29 @@ iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
}
static struct notifier_block iss_panic_block = {
- iss_panic_event,
- NULL,
- 0
+ .notifier_call = iss_panic_event,
};
void __init platform_setup(char **p_cmdline)
{
+ int argc = simc_argc();
+ int argv_size = simc_argv_size();
+
+ if (argc > 1) {
+ void **argv = alloc_bootmem(argv_size);
+ char *cmdline = alloc_bootmem(argv_size);
+ int i;
+
+ cmdline[0] = 0;
+ simc_argv((void *)argv);
+
+ for (i = 1; i < argc; ++i) {
+ if (i > 1)
+ strcat(cmdline, " ");
+ strcat(cmdline, argv[i]);
+ }
+ *p_cmdline = cmdline;
+ }
+
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
}
diff --git a/block/Kconfig b/block/Kconfig
index 89cd28f8d051..a8ad7e77db28 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -6,6 +6,7 @@ menuconfig BLOCK
default y
select SBITMAP
select SRCU
+ select DAX
help
Provide block layer support for the kernel.
diff --git a/block/blk-core.c b/block/blk-core.c
index 24886b69690f..c580b0138a7f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -561,13 +561,9 @@ void blk_cleanup_queue(struct request_queue *q)
* prevent that q->request_fn() gets invoked after draining finished.
*/
blk_freeze_queue(q);
- if (!q->mq_ops) {
- spin_lock_irq(lock);
+ spin_lock_irq(lock);
+ if (!q->mq_ops)
__blk_drain_queue(q, true);
- } else {
- blk_mq_debugfs_unregister_mq(q);
- spin_lock_irq(lock);
- }
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index bcd2a7d4a3a5..803aed4d7221 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -21,28 +21,9 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
-struct blk_mq_debugfs_attr {
- const char *name;
- umode_t mode;
- const struct file_operations *fops;
-};
-
-static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
- const struct seq_operations *ops)
-{
- struct seq_file *m;
- int ret;
-
- ret = seq_open(file, ops);
- if (!ret) {
- m = file->private_data;
- m->private = inode->i_private;
- }
- return ret;
-}
-
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
const char *const *flag_name, int flag_name_count)
{
@@ -53,7 +34,7 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
if (!(flags & BIT(i)))
continue;
if (sep)
- seq_puts(m, " ");
+ seq_puts(m, "|");
sep = true;
if (i < flag_name_count && flag_name[i])
seq_puts(m, flag_name[i]);
@@ -63,41 +44,43 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
+#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
- [QUEUE_FLAG_QUEUED] = "QUEUED",
- [QUEUE_FLAG_STOPPED] = "STOPPED",
- [QUEUE_FLAG_SYNCFULL] = "SYNCFULL",
- [QUEUE_FLAG_ASYNCFULL] = "ASYNCFULL",
- [QUEUE_FLAG_DYING] = "DYING",
- [QUEUE_FLAG_BYPASS] = "BYPASS",
- [QUEUE_FLAG_BIDI] = "BIDI",
- [QUEUE_FLAG_NOMERGES] = "NOMERGES",
- [QUEUE_FLAG_SAME_COMP] = "SAME_COMP",
- [QUEUE_FLAG_FAIL_IO] = "FAIL_IO",
- [QUEUE_FLAG_STACKABLE] = "STACKABLE",
- [QUEUE_FLAG_NONROT] = "NONROT",
- [QUEUE_FLAG_IO_STAT] = "IO_STAT",
- [QUEUE_FLAG_DISCARD] = "DISCARD",
- [QUEUE_FLAG_NOXMERGES] = "NOXMERGES",
- [QUEUE_FLAG_ADD_RANDOM] = "ADD_RANDOM",
- [QUEUE_FLAG_SECERASE] = "SECERASE",
- [QUEUE_FLAG_SAME_FORCE] = "SAME_FORCE",
- [QUEUE_FLAG_DEAD] = "DEAD",
- [QUEUE_FLAG_INIT_DONE] = "INIT_DONE",
- [QUEUE_FLAG_NO_SG_MERGE] = "NO_SG_MERGE",
- [QUEUE_FLAG_POLL] = "POLL",
- [QUEUE_FLAG_WC] = "WC",
- [QUEUE_FLAG_FUA] = "FUA",
- [QUEUE_FLAG_FLUSH_NQ] = "FLUSH_NQ",
- [QUEUE_FLAG_DAX] = "DAX",
- [QUEUE_FLAG_STATS] = "STATS",
- [QUEUE_FLAG_POLL_STATS] = "POLL_STATS",
- [QUEUE_FLAG_REGISTERED] = "REGISTERED",
-};
-
-static int blk_queue_flags_show(struct seq_file *m, void *v)
-{
- struct request_queue *q = m->private;
+ QUEUE_FLAG_NAME(QUEUED),
+ QUEUE_FLAG_NAME(STOPPED),
+ QUEUE_FLAG_NAME(SYNCFULL),
+ QUEUE_FLAG_NAME(ASYNCFULL),
+ QUEUE_FLAG_NAME(DYING),
+ QUEUE_FLAG_NAME(BYPASS),
+ QUEUE_FLAG_NAME(BIDI),
+ QUEUE_FLAG_NAME(NOMERGES),
+ QUEUE_FLAG_NAME(SAME_COMP),
+ QUEUE_FLAG_NAME(FAIL_IO),
+ QUEUE_FLAG_NAME(STACKABLE),
+ QUEUE_FLAG_NAME(NONROT),
+ QUEUE_FLAG_NAME(IO_STAT),
+ QUEUE_FLAG_NAME(DISCARD),
+ QUEUE_FLAG_NAME(NOXMERGES),
+ QUEUE_FLAG_NAME(ADD_RANDOM),
+ QUEUE_FLAG_NAME(SECERASE),
+ QUEUE_FLAG_NAME(SAME_FORCE),
+ QUEUE_FLAG_NAME(DEAD),
+ QUEUE_FLAG_NAME(INIT_DONE),
+ QUEUE_FLAG_NAME(NO_SG_MERGE),
+ QUEUE_FLAG_NAME(POLL),
+ QUEUE_FLAG_NAME(WC),
+ QUEUE_FLAG_NAME(FUA),
+ QUEUE_FLAG_NAME(FLUSH_NQ),
+ QUEUE_FLAG_NAME(DAX),
+ QUEUE_FLAG_NAME(STATS),
+ QUEUE_FLAG_NAME(POLL_STATS),
+ QUEUE_FLAG_NAME(REGISTERED),
+};
+#undef QUEUE_FLAG_NAME
+
+static int queue_state_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
ARRAY_SIZE(blk_queue_flag_name));
@@ -105,42 +88,41 @@ static int blk_queue_flags_show(struct seq_file *m, void *v)
return 0;
}
-static ssize_t blk_queue_flags_store(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
+static ssize_t queue_state_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct request_queue *q = file_inode(file)->i_private;
- char op[16] = { }, *s;
+ struct request_queue *q = data;
+ char opbuf[16] = { }, *op;
+
+ /*
+ * The "state" attribute is removed after blk_cleanup_queue() has called
+ * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
+ * triggering a use-after-free.
+ */
+ if (blk_queue_dead(q))
+ return -ENOENT;
- len = min(len, sizeof(op) - 1);
- if (copy_from_user(op, ubuf, len))
+ if (count >= sizeof(opbuf)) {
+ pr_err("%s: operation too long\n", __func__);
+ goto inval;
+ }
+
+ if (copy_from_user(opbuf, buf, count))
return -EFAULT;
- s = op;
- strsep(&s, " \t\n"); /* strip trailing whitespace */
+ op = strstrip(opbuf);
if (strcmp(op, "run") == 0) {
blk_mq_run_hw_queues(q, true);
} else if (strcmp(op, "start") == 0) {
blk_mq_start_stopped_hw_queues(q, true);
} else {
- pr_err("%s: unsupported operation %s. Use either 'run' or 'start'\n",
- __func__, op);
+ pr_err("%s: unsupported operation '%s'\n", __func__, op);
+inval:
+ pr_err("%s: use either 'run' or 'start'\n", __func__);
return -EINVAL;
}
- return len;
-}
-
-static int blk_queue_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blk_queue_flags_show, inode->i_private);
+ return count;
}
-static const struct file_operations blk_queue_flags_fops = {
- .open = blk_queue_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = blk_queue_flags_store,
-};
-
static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
{
if (stat->nr_samples) {
@@ -151,9 +133,9 @@ static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
}
}
-static int queue_poll_stat_show(struct seq_file *m, void *v)
+static int queue_poll_stat_show(void *data, struct seq_file *m)
{
- struct request_queue *q = m->private;
+ struct request_queue *q = data;
int bucket;
for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
@@ -168,28 +150,19 @@ static int queue_poll_stat_show(struct seq_file *m, void *v)
return 0;
}
-static int queue_poll_stat_open(struct inode *inode, struct file *file)
-{
- return single_open(file, queue_poll_stat_show, inode->i_private);
-}
-
-static const struct file_operations queue_poll_stat_fops = {
- .open = queue_poll_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
static const char *const hctx_state_name[] = {
- [BLK_MQ_S_STOPPED] = "STOPPED",
- [BLK_MQ_S_TAG_ACTIVE] = "TAG_ACTIVE",
- [BLK_MQ_S_SCHED_RESTART] = "SCHED_RESTART",
- [BLK_MQ_S_TAG_WAITING] = "TAG_WAITING",
-
+ HCTX_STATE_NAME(STOPPED),
+ HCTX_STATE_NAME(TAG_ACTIVE),
+ HCTX_STATE_NAME(SCHED_RESTART),
+ HCTX_STATE_NAME(TAG_WAITING),
+ HCTX_STATE_NAME(START_ON_RUN),
};
-static int hctx_state_show(struct seq_file *m, void *v)
+#undef HCTX_STATE_NAME
+
+static int hctx_state_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
blk_flags_show(m, hctx->state, hctx_state_name,
ARRAY_SIZE(hctx_state_name));
@@ -197,34 +170,26 @@ static int hctx_state_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_state_show, inode->i_private);
-}
-
-static const struct file_operations hctx_state_fops = {
- .open = hctx_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
static const char *const alloc_policy_name[] = {
- [BLK_TAG_ALLOC_FIFO] = "fifo",
- [BLK_TAG_ALLOC_RR] = "rr",
+ BLK_TAG_ALLOC_NAME(FIFO),
+ BLK_TAG_ALLOC_NAME(RR),
};
+#undef BLK_TAG_ALLOC_NAME
+#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
static const char *const hctx_flag_name[] = {
- [ilog2(BLK_MQ_F_SHOULD_MERGE)] = "SHOULD_MERGE",
- [ilog2(BLK_MQ_F_TAG_SHARED)] = "TAG_SHARED",
- [ilog2(BLK_MQ_F_SG_MERGE)] = "SG_MERGE",
- [ilog2(BLK_MQ_F_BLOCKING)] = "BLOCKING",
- [ilog2(BLK_MQ_F_NO_SCHED)] = "NO_SCHED",
+ HCTX_FLAG_NAME(SHOULD_MERGE),
+ HCTX_FLAG_NAME(TAG_SHARED),
+ HCTX_FLAG_NAME(SG_MERGE),
+ HCTX_FLAG_NAME(BLOCKING),
+ HCTX_FLAG_NAME(NO_SCHED),
};
+#undef HCTX_FLAG_NAME
-static int hctx_flags_show(struct seq_file *m, void *v)
+static int hctx_flags_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
seq_puts(m, "alloc_policy=");
@@ -241,76 +206,69 @@ static int hctx_flags_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_flags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_flags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_flags_fops = {
- .open = hctx_flags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const op_name[] = {
- [REQ_OP_READ] = "READ",
- [REQ_OP_WRITE] = "WRITE",
- [REQ_OP_FLUSH] = "FLUSH",
- [REQ_OP_DISCARD] = "DISCARD",
- [REQ_OP_ZONE_REPORT] = "ZONE_REPORT",
- [REQ_OP_SECURE_ERASE] = "SECURE_ERASE",
- [REQ_OP_ZONE_RESET] = "ZONE_RESET",
- [REQ_OP_WRITE_SAME] = "WRITE_SAME",
- [REQ_OP_WRITE_ZEROES] = "WRITE_ZEROES",
- [REQ_OP_SCSI_IN] = "SCSI_IN",
- [REQ_OP_SCSI_OUT] = "SCSI_OUT",
- [REQ_OP_DRV_IN] = "DRV_IN",
- [REQ_OP_DRV_OUT] = "DRV_OUT",
-};
-
+ REQ_OP_NAME(READ),
+ REQ_OP_NAME(WRITE),
+ REQ_OP_NAME(FLUSH),
+ REQ_OP_NAME(DISCARD),
+ REQ_OP_NAME(ZONE_REPORT),
+ REQ_OP_NAME(SECURE_ERASE),
+ REQ_OP_NAME(ZONE_RESET),
+ REQ_OP_NAME(WRITE_SAME),
+ REQ_OP_NAME(WRITE_ZEROES),
+ REQ_OP_NAME(SCSI_IN),
+ REQ_OP_NAME(SCSI_OUT),
+ REQ_OP_NAME(DRV_IN),
+ REQ_OP_NAME(DRV_OUT),
+};
+#undef REQ_OP_NAME
+
+#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
static const char *const cmd_flag_name[] = {
- [__REQ_FAILFAST_DEV] = "FAILFAST_DEV",
- [__REQ_FAILFAST_TRANSPORT] = "FAILFAST_TRANSPORT",
- [__REQ_FAILFAST_DRIVER] = "FAILFAST_DRIVER",
- [__REQ_SYNC] = "SYNC",
- [__REQ_META] = "META",
- [__REQ_PRIO] = "PRIO",
- [__REQ_NOMERGE] = "NOMERGE",
- [__REQ_IDLE] = "IDLE",
- [__REQ_INTEGRITY] = "INTEGRITY",
- [__REQ_FUA] = "FUA",
- [__REQ_PREFLUSH] = "PREFLUSH",
- [__REQ_RAHEAD] = "RAHEAD",
- [__REQ_BACKGROUND] = "BACKGROUND",
- [__REQ_NR_BITS] = "NR_BITS",
-};
-
+ CMD_FLAG_NAME(FAILFAST_DEV),
+ CMD_FLAG_NAME(FAILFAST_TRANSPORT),
+ CMD_FLAG_NAME(FAILFAST_DRIVER),
+ CMD_FLAG_NAME(SYNC),
+ CMD_FLAG_NAME(META),
+ CMD_FLAG_NAME(PRIO),
+ CMD_FLAG_NAME(NOMERGE),
+ CMD_FLAG_NAME(IDLE),
+ CMD_FLAG_NAME(INTEGRITY),
+ CMD_FLAG_NAME(FUA),
+ CMD_FLAG_NAME(PREFLUSH),
+ CMD_FLAG_NAME(RAHEAD),
+ CMD_FLAG_NAME(BACKGROUND),
+ CMD_FLAG_NAME(NOUNMAP),
+};
+#undef CMD_FLAG_NAME
+
+#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
static const char *const rqf_name[] = {
- [ilog2((__force u32)RQF_SORTED)] = "SORTED",
- [ilog2((__force u32)RQF_STARTED)] = "STARTED",
- [ilog2((__force u32)RQF_QUEUED)] = "QUEUED",
- [ilog2((__force u32)RQF_SOFTBARRIER)] = "SOFTBARRIER",
- [ilog2((__force u32)RQF_FLUSH_SEQ)] = "FLUSH_SEQ",
- [ilog2((__force u32)RQF_MIXED_MERGE)] = "MIXED_MERGE",
- [ilog2((__force u32)RQF_MQ_INFLIGHT)] = "MQ_INFLIGHT",
- [ilog2((__force u32)RQF_DONTPREP)] = "DONTPREP",
- [ilog2((__force u32)RQF_PREEMPT)] = "PREEMPT",
- [ilog2((__force u32)RQF_COPY_USER)] = "COPY_USER",
- [ilog2((__force u32)RQF_FAILED)] = "FAILED",
- [ilog2((__force u32)RQF_QUIET)] = "QUIET",
- [ilog2((__force u32)RQF_ELVPRIV)] = "ELVPRIV",
- [ilog2((__force u32)RQF_IO_STAT)] = "IO_STAT",
- [ilog2((__force u32)RQF_ALLOCED)] = "ALLOCED",
- [ilog2((__force u32)RQF_PM)] = "PM",
- [ilog2((__force u32)RQF_HASHED)] = "HASHED",
- [ilog2((__force u32)RQF_STATS)] = "STATS",
- [ilog2((__force u32)RQF_SPECIAL_PAYLOAD)] = "SPECIAL_PAYLOAD",
-};
-
-static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
+ RQF_NAME(SORTED),
+ RQF_NAME(STARTED),
+ RQF_NAME(QUEUED),
+ RQF_NAME(SOFTBARRIER),
+ RQF_NAME(FLUSH_SEQ),
+ RQF_NAME(MIXED_MERGE),
+ RQF_NAME(MQ_INFLIGHT),
+ RQF_NAME(DONTPREP),
+ RQF_NAME(PREEMPT),
+ RQF_NAME(COPY_USER),
+ RQF_NAME(FAILED),
+ RQF_NAME(QUIET),
+ RQF_NAME(ELVPRIV),
+ RQF_NAME(IO_STAT),
+ RQF_NAME(ALLOCED),
+ RQF_NAME(PM),
+ RQF_NAME(HASHED),
+ RQF_NAME(STATS),
+ RQF_NAME(SPECIAL_PAYLOAD),
+};
+#undef RQF_NAME
+
+int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
- struct request *rq = list_entry_rq(v);
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
@@ -332,6 +290,13 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
seq_puts(m, "}\n");
return 0;
}
+EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
+
+int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
+{
+ return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
+}
+EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
__acquires(&hctx->lock)
@@ -364,38 +329,14 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
.show = blk_mq_debugfs_rq_show,
};
-static int hctx_dispatch_open(struct inode *inode, struct file *file)
-{
- return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops);
-}
-
-static const struct file_operations hctx_dispatch_fops = {
- .open = hctx_dispatch_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static int hctx_ctx_map_show(struct seq_file *m, void *v)
+static int hctx_ctx_map_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
sbitmap_bitmap_show(&hctx->ctx_map, m);
return 0;
}
-static int hctx_ctx_map_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_ctx_map_show, inode->i_private);
-}
-
-static const struct file_operations hctx_ctx_map_fops = {
- .open = hctx_ctx_map_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void blk_mq_debugfs_tags_show(struct seq_file *m,
struct blk_mq_tags *tags)
{
@@ -413,9 +354,9 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
}
}
-static int hctx_tags_show(struct seq_file *m, void *v)
+static int hctx_tags_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -430,21 +371,9 @@ out:
return res;
}
-static int hctx_tags_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_tags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_tags_fops = {
- .open = hctx_tags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
+static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -459,21 +388,9 @@ out:
return res;
}
-static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
+static int hctx_sched_tags_show(void *data, struct seq_file *m)
{
- return single_open(file, hctx_tags_bitmap_show, inode->i_private);
-}
-
-static const struct file_operations hctx_tags_bitmap_fops = {
- .open = hctx_tags_bitmap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_sched_tags_show(struct seq_file *m, void *v)
-{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -488,21 +405,9 @@ out:
return res;
}
-static int hctx_sched_tags_open(struct inode *inode, struct file *file)
+static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
{
- return single_open(file, hctx_sched_tags_show, inode->i_private);
-}
-
-static const struct file_operations hctx_sched_tags_fops = {
- .open = hctx_sched_tags_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
-{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
struct request_queue *q = hctx->queue;
int res;
@@ -517,21 +422,9 @@ out:
return res;
}
-static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private);
-}
-
-static const struct file_operations hctx_sched_tags_bitmap_fops = {
- .open = hctx_sched_tags_bitmap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_io_poll_show(struct seq_file *m, void *v)
+static int hctx_io_poll_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "considered=%lu\n", hctx->poll_considered);
seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
@@ -539,32 +432,18 @@ static int hctx_io_poll_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_io_poll_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_io_poll_show, inode->i_private);
-}
-
-static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf,
+static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
return count;
}
-static const struct file_operations hctx_io_poll_fops = {
- .open = hctx_io_poll_open,
- .read = seq_read,
- .write = hctx_io_poll_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_dispatched_show(struct seq_file *m, void *v)
+static int hctx_dispatched_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
int i;
seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
@@ -579,16 +458,10 @@ static int hctx_dispatched_show(struct seq_file *m, void *v)
return 0;
}
-static int hctx_dispatched_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_dispatched_show, inode->i_private);
-}
-
-static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
+static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
int i;
for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
@@ -596,96 +469,48 @@ static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations hctx_dispatched_fops = {
- .open = hctx_dispatched_open,
- .read = seq_read,
- .write = hctx_dispatched_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_queued_show(struct seq_file *m, void *v)
+static int hctx_queued_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%lu\n", hctx->queued);
return 0;
}
-static int hctx_queued_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_queued_show, inode->i_private);
-}
-
-static ssize_t hctx_queued_write(struct file *file, const char __user *buf,
+static ssize_t hctx_queued_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->queued = 0;
return count;
}
-static const struct file_operations hctx_queued_fops = {
- .open = hctx_queued_open,
- .read = seq_read,
- .write = hctx_queued_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_run_show(struct seq_file *m, void *v)
+static int hctx_run_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%lu\n", hctx->run);
return 0;
}
-static int hctx_run_open(struct inode *inode, struct file *file)
+static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
+ loff_t *ppos)
{
- return single_open(file, hctx_run_show, inode->i_private);
-}
-
-static ssize_t hctx_run_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = file->private_data;
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
hctx->run = 0;
return count;
}
-static const struct file_operations hctx_run_fops = {
- .open = hctx_run_open,
- .read = seq_read,
- .write = hctx_run_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int hctx_active_show(struct seq_file *m, void *v)
+static int hctx_active_show(void *data, struct seq_file *m)
{
- struct blk_mq_hw_ctx *hctx = m->private;
+ struct blk_mq_hw_ctx *hctx = data;
seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
return 0;
}
-static int hctx_active_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hctx_active_show, inode->i_private);
-}
-
-static const struct file_operations hctx_active_fops = {
- .open = hctx_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
__acquires(&ctx->lock)
{
@@ -716,156 +541,192 @@ static const struct seq_operations ctx_rq_list_seq_ops = {
.stop = ctx_rq_list_stop,
.show = blk_mq_debugfs_rq_show,
};
-
-static int ctx_rq_list_open(struct inode *inode, struct file *file)
+static int ctx_dispatched_show(void *data, struct seq_file *m)
{
- return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops);
-}
-
-static const struct file_operations ctx_rq_list_fops = {
- .open = ctx_rq_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-static int ctx_dispatched_show(struct seq_file *m, void *v)
-{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
return 0;
}
-static int ctx_dispatched_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ctx_dispatched_show, inode->i_private);
-}
-
-static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf,
+static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
return count;
}
-static const struct file_operations ctx_dispatched_fops = {
- .open = ctx_dispatched_open,
- .read = seq_read,
- .write = ctx_dispatched_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ctx_merged_show(struct seq_file *m, void *v)
+static int ctx_merged_show(void *data, struct seq_file *m)
{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu\n", ctx->rq_merged);
return 0;
}
-static int ctx_merged_open(struct inode *inode, struct file *file)
+static ssize_t ctx_merged_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, ctx_merged_show, inode->i_private);
-}
-
-static ssize_t ctx_merged_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
ctx->rq_merged = 0;
return count;
}
-static const struct file_operations ctx_merged_fops = {
- .open = ctx_merged_open,
- .read = seq_read,
- .write = ctx_merged_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ctx_completed_show(struct seq_file *m, void *v)
+static int ctx_completed_show(void *data, struct seq_file *m)
{
- struct blk_mq_ctx *ctx = m->private;
+ struct blk_mq_ctx *ctx = data;
seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
return 0;
}
-static int ctx_completed_open(struct inode *inode, struct file *file)
+static ssize_t ctx_completed_write(void *data, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- return single_open(file, ctx_completed_show, inode->i_private);
+ struct blk_mq_ctx *ctx = data;
+
+ ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
+ return count;
}
-static ssize_t ctx_completed_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static int blk_mq_debugfs_show(struct seq_file *m, void *v)
+{
+ const struct blk_mq_debugfs_attr *attr = m->private;
+ void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
+
+ return attr->show(data, m);
+}
+
+static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
- struct blk_mq_ctx *ctx = m->private;
+ const struct blk_mq_debugfs_attr *attr = m->private;
+ void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
- ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
- return count;
+ if (!attr->write)
+ return -EPERM;
+
+ return attr->write(data, buf, count, ppos);
+}
+
+static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
+{
+ const struct blk_mq_debugfs_attr *attr = inode->i_private;
+ void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
+ struct seq_file *m;
+ int ret;
+
+ if (attr->seq_ops) {
+ ret = seq_open(file, attr->seq_ops);
+ if (!ret) {
+ m = file->private_data;
+ m->private = data;
+ }
+ return ret;
+ }
+
+ if (WARN_ON_ONCE(!attr->show))
+ return -EPERM;
+
+ return single_open(file, blk_mq_debugfs_show, inode->i_private);
+}
+
+static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
+{
+ const struct blk_mq_debugfs_attr *attr = inode->i_private;
+
+ if (attr->show)
+ return single_release(inode, file);
+ else
+ return seq_release(inode, file);
}
-static const struct file_operations ctx_completed_fops = {
- .open = ctx_completed_open,
+const struct file_operations blk_mq_debugfs_fops = {
+ .open = blk_mq_debugfs_open,
.read = seq_read,
- .write = ctx_completed_write,
+ .write = blk_mq_debugfs_write,
.llseek = seq_lseek,
- .release = single_release,
+ .release = blk_mq_debugfs_release,
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
- {"poll_stat", 0400, &queue_poll_stat_fops},
- {"state", 0600, &blk_queue_flags_fops},
+ {"poll_stat", 0400, queue_poll_stat_show},
+ {"state", 0600, queue_state_show, queue_state_write},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
- {"state", 0400, &hctx_state_fops},
- {"flags", 0400, &hctx_flags_fops},
- {"dispatch", 0400, &hctx_dispatch_fops},
- {"ctx_map", 0400, &hctx_ctx_map_fops},
- {"tags", 0400, &hctx_tags_fops},
- {"tags_bitmap", 0400, &hctx_tags_bitmap_fops},
- {"sched_tags", 0400, &hctx_sched_tags_fops},
- {"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops},
- {"io_poll", 0600, &hctx_io_poll_fops},
- {"dispatched", 0600, &hctx_dispatched_fops},
- {"queued", 0600, &hctx_queued_fops},
- {"run", 0600, &hctx_run_fops},
- {"active", 0400, &hctx_active_fops},
+ {"state", 0400, hctx_state_show},
+ {"flags", 0400, hctx_flags_show},
+ {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
+ {"ctx_map", 0400, hctx_ctx_map_show},
+ {"tags", 0400, hctx_tags_show},
+ {"tags_bitmap", 0400, hctx_tags_bitmap_show},
+ {"sched_tags", 0400, hctx_sched_tags_show},
+ {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
+ {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
+ {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
+ {"queued", 0600, hctx_queued_show, hctx_queued_write},
+ {"run", 0600, hctx_run_show, hctx_run_write},
+ {"active", 0400, hctx_active_show},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
- {"rq_list", 0400, &ctx_rq_list_fops},
- {"dispatched", 0600, &ctx_dispatched_fops},
- {"merged", 0600, &ctx_merged_fops},
- {"completed", 0600, &ctx_completed_fops},
+ {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
+ {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
+ {"merged", 0600, ctx_merged_show, ctx_merged_write},
+ {"completed", 0600, ctx_completed_show, ctx_completed_write},
{},
};
+static bool debugfs_create_files(struct dentry *parent, void *data,
+ const struct blk_mq_debugfs_attr *attr)
+{
+ d_inode(parent)->i_private = data;
+
+ for (; attr->name; attr++) {
+ if (!debugfs_create_file(attr->name, attr->mode, parent,
+ (void *)attr, &blk_mq_debugfs_fops))
+ return false;
+ }
+ return true;
+}
+
int blk_mq_debugfs_register(struct request_queue *q)
{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
if (!blk_debugfs_root)
return -ENOENT;
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
if (!q->debugfs_dir)
- goto err;
+ return -ENOMEM;
- if (blk_mq_debugfs_register_mq(q))
+ if (!debugfs_create_files(q->debugfs_dir, q,
+ blk_mq_debugfs_queue_attrs))
goto err;
+ /*
+ * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
+ * didn't exist yet (because we don't know what to name the directory
+ * until the queue is registered to a gendisk).
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
+ goto err;
+ if (q->elevator && !hctx->sched_debugfs_dir &&
+ blk_mq_debugfs_register_sched_hctx(q, hctx))
+ goto err;
+ }
+
return 0;
err:
@@ -876,30 +737,18 @@ err:
void blk_mq_debugfs_unregister(struct request_queue *q)
{
debugfs_remove_recursive(q->debugfs_dir);
- q->mq_debugfs_dir = NULL;
+ q->sched_debugfs_dir = NULL;
q->debugfs_dir = NULL;
}
-static bool debugfs_create_files(struct dentry *parent, void *data,
- const struct blk_mq_debugfs_attr *attr)
-{
- for (; attr->name; attr++) {
- if (!debugfs_create_file(attr->name, attr->mode, parent,
- data, attr->fops))
- return false;
- }
- return true;
-}
-
-static int blk_mq_debugfs_register_ctx(struct request_queue *q,
- struct blk_mq_ctx *ctx,
- struct dentry *hctx_dir)
+static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
- ctx_dir = debugfs_create_dir(name, hctx_dir);
+ ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
if (!ctx_dir)
return -ENOMEM;
@@ -909,59 +758,122 @@ static int blk_mq_debugfs_register_ctx(struct request_queue *q,
return 0;
}
-static int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
- struct dentry *hctx_dir;
char name[20];
int i;
- snprintf(name, sizeof(name), "%u", hctx->queue_num);
- hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir);
- if (!hctx_dir)
- return -ENOMEM;
+ if (!q->debugfs_dir)
+ return -ENOENT;
- if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
+ snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
+ hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
+ if (!hctx->debugfs_dir)
return -ENOMEM;
+ if (!debugfs_create_files(hctx->debugfs_dir, hctx,
+ blk_mq_debugfs_hctx_attrs))
+ goto err;
+
hctx_for_each_ctx(hctx, ctx, i) {
- if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
+ if (blk_mq_debugfs_register_ctx(hctx, ctx))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ blk_mq_debugfs_unregister_hctx(hctx);
+ return -ENOMEM;
+}
+
+void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ debugfs_remove_recursive(hctx->debugfs_dir);
+ hctx->sched_debugfs_dir = NULL;
+ hctx->debugfs_dir = NULL;
+}
+
+int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (blk_mq_debugfs_register_hctx(q, hctx))
return -ENOMEM;
}
return 0;
}
-int blk_mq_debugfs_register_mq(struct request_queue *q)
+void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_unregister_hctx(hctx);
+}
+
+int blk_mq_debugfs_register_sched(struct request_queue *q)
+{
+ struct elevator_type *e = q->elevator->type;
+
if (!q->debugfs_dir)
return -ENOENT;
- q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir);
- if (!q->mq_debugfs_dir)
- goto err;
+ if (!e->queue_debugfs_attrs)
+ return 0;
- if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs))
- goto err;
+ q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
+ if (!q->sched_debugfs_dir)
+ return -ENOMEM;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (blk_mq_debugfs_register_hctx(q, hctx))
- goto err;
- }
+ if (!debugfs_create_files(q->sched_debugfs_dir, q,
+ e->queue_debugfs_attrs))
+ goto err;
return 0;
err:
- blk_mq_debugfs_unregister_mq(q);
+ blk_mq_debugfs_unregister_sched(q);
return -ENOMEM;
}
-void blk_mq_debugfs_unregister_mq(struct request_queue *q)
+void blk_mq_debugfs_unregister_sched(struct request_queue *q)
+{
+ debugfs_remove_recursive(q->sched_debugfs_dir);
+ q->sched_debugfs_dir = NULL;
+}
+
+int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ struct elevator_type *e = q->elevator->type;
+
+ if (!hctx->debugfs_dir)
+ return -ENOENT;
+
+ if (!e->hctx_debugfs_attrs)
+ return 0;
+
+ hctx->sched_debugfs_dir = debugfs_create_dir("sched",
+ hctx->debugfs_dir);
+ if (!hctx->sched_debugfs_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+ e->hctx_debugfs_attrs))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
- debugfs_remove_recursive(q->mq_debugfs_dir);
- q->mq_debugfs_dir = NULL;
+ debugfs_remove_recursive(hctx->sched_debugfs_dir);
+ hctx->sched_debugfs_dir = NULL;
}
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
new file mode 100644
index 000000000000..a182e6f97565
--- /dev/null
+++ b/block/blk-mq-debugfs.h
@@ -0,0 +1,82 @@
+#ifndef INT_BLK_MQ_DEBUGFS_H
+#define INT_BLK_MQ_DEBUGFS_H
+
+#ifdef CONFIG_BLK_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+struct blk_mq_debugfs_attr {
+ const char *name;
+ umode_t mode;
+ int (*show)(void *, struct seq_file *);
+ ssize_t (*write)(void *, const char __user *, size_t, loff_t *);
+ /* Set either .show or .seq_ops. */
+ const struct seq_operations *seq_ops;
+};
+
+int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
+int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
+
+int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_unregister(struct request_queue *q);
+int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
+int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
+
+int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_unregister_sched(struct request_queue *q);
+int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
+#else
+static inline int blk_mq_debugfs_register(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
+{
+}
+
+static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
+{
+}
+
+static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
+{
+}
+#endif
+
+#endif
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 8b361e192e8a..1f5b692526ae 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"
@@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
- /*
- * For a reserved tag, allocate a normal request since we might
- * have driver dependencies on the value of the internal tag.
- */
- if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
+ if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
/*
@@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
+
return 0;
}
@@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
if (!e)
return;
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
@@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err;
- if (e->ops.mq.init_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_register_sched(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (e->ops.mq.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
@@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
return 0;
@@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- if (e->type->ops.mq.exit_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
- if (hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
- hctx->sched_data = NULL;
- }
+ queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+ if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
+ e->type->ops.mq.exit_hctx(hctx, i);
+ hctx->sched_data = NULL;
}
}
+ blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ec0afdf765e3..79969c3c234f 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -258,8 +258,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
- blk_mq_debugfs_unregister_mq(q);
-
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
@@ -318,8 +316,6 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
- blk_mq_debugfs_register(q);
-
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
@@ -335,8 +331,6 @@ unreg:
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
- blk_mq_debugfs_unregister_mq(q);
-
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&dev->kobj);
@@ -364,8 +358,6 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
- blk_mq_debugfs_unregister_mq(q);
-
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
@@ -382,8 +374,6 @@ int blk_mq_sysfs_register(struct request_queue *q)
if (!q->mq_sysfs_init_done)
goto unlock;
- blk_mq_debugfs_register_mq(q);
-
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
if (ret)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bf90684a007a..5d4ce7eb8dbf 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -31,6 +31,7 @@
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
#include "blk-wbt.h"
@@ -41,6 +42,7 @@ static LIST_HEAD(all_q_list);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
@@ -166,7 +168,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
unsigned int i;
bool rcu = false;
- blk_mq_stop_hw_queues(q);
+ __blk_mq_stop_hw_queues(q, true);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
@@ -1218,20 +1220,34 @@ bool blk_mq_queue_stopped(struct request_queue *q)
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
{
- cancel_delayed_work_sync(&hctx->run_work);
+ if (sync)
+ cancel_delayed_work_sync(&hctx->run_work);
+ else
+ cancel_delayed_work(&hctx->run_work);
+
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+ __blk_mq_stop_hw_queue(hctx, false);
+}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
-void blk_mq_stop_hw_queues(struct request_queue *q)
+void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_stop_hw_queue(hctx);
+ __blk_mq_stop_hw_queue(hctx, sync);
+}
+
+void blk_mq_stop_hw_queues(struct request_queue *q)
+{
+ __blk_mq_stop_hw_queues(q, false);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);
@@ -1655,8 +1671,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
if (!rq)
continue;
- set->ops->exit_request(set->driver_data, rq,
- hctx_idx, i);
+ set->ops->exit_request(set, rq, hctx_idx);
tags->static_rqs[i] = NULL;
}
}
@@ -1787,8 +1802,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
tags->static_rqs[i] = rq;
if (set->ops->init_request) {
- if (set->ops->init_request(set->driver_data,
- rq, hctx_idx, i,
+ if (set->ops->init_request(set, rq, hctx_idx,
node)) {
tags->static_rqs[i] = NULL;
goto fail;
@@ -1849,14 +1863,12 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- unsigned flush_start_tag = set->queue_depth;
+ blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
- set->ops->exit_request(set->driver_data,
- hctx->fq->flush_rq, hctx_idx,
- flush_start_tag + hctx_idx);
+ set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
@@ -1889,7 +1901,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
int node;
- unsigned flush_start_tag = set->queue_depth;
node = hctx->numa_node;
if (node == NUMA_NO_NODE)
@@ -1933,14 +1944,15 @@ static int blk_mq_init_hctx(struct request_queue *q,
goto sched_exit_hctx;
if (set->ops->init_request &&
- set->ops->init_request(set->driver_data,
- hctx->fq->flush_rq, hctx_idx,
- flush_start_tag + hctx_idx, node))
+ set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
+ node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(&hctx->queue_rq_srcu);
+ blk_mq_debugfs_register_hctx(q, hctx);
+
return 0;
free_fq:
@@ -2329,15 +2341,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
- get_online_cpus();
mutex_lock(&all_q_mutex);
+ get_online_cpus();
list_add_tail(&q->all_q_node, &all_q_list);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q, cpu_online_mask);
- mutex_unlock(&all_q_mutex);
put_online_cpus();
+ mutex_unlock(&all_q_mutex);
if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
int ret;
@@ -2378,6 +2390,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
{
WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
+ blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
/*
@@ -2389,6 +2402,7 @@ static void blk_mq_queue_reinit(struct request_queue *q,
blk_mq_map_swqueue(q, online_mask);
blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
}
/*
@@ -2617,7 +2631,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
@@ -2643,7 +2656,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
q->nr_requests = nr;
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
return ret;
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 2814a14e529c..cc67b48e3551 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -83,34 +83,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
-/*
- * debugfs helpers
- */
-#ifdef CONFIG_BLK_DEBUG_FS
-int blk_mq_debugfs_register(struct request_queue *q);
-void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_mq(struct request_queue *q);
-void blk_mq_debugfs_unregister_mq(struct request_queue *q);
-#else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
-{
- return 0;
-}
-
-static inline void blk_mq_debugfs_unregister(struct request_queue *q)
-{
-}
-
-static inline int blk_mq_debugfs_register_mq(struct request_queue *q)
-{
- return 0;
-}
-
-static inline void blk_mq_debugfs_unregister_mq(struct request_queue *q)
-{
-}
-#endif
-
extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3f37813ccbaf..504fee940052 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-wbt.h"
struct queue_sysfs_entry {
@@ -889,6 +890,8 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
__blk_mq_register_dev(dev, q);
+ blk_mq_debugfs_register(q);
+
kobject_uevent(&q->kobj, KOBJ_ADD);
wbt_enable_default(q);
diff --git a/block/elevator.c b/block/elevator.c
index bf11e70f008b..ab726a5c0bf6 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -950,7 +950,6 @@ static int elevator_switch_mq(struct request_queue *q,
int ret;
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
@@ -978,9 +977,7 @@ static int elevator_switch_mq(struct request_queue *q,
out:
blk_mq_unfreeze_queue(q);
- blk_mq_start_stopped_hw_queues(q, true);
return ret;
-
}
/*
@@ -1088,19 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, e);
}
-int elevator_change(struct request_queue *q, const char *name)
-{
- int ret;
-
- /* Protect q->elevator from elevator_init() */
- mutex_lock(&q->sysfs_lock);
- ret = __elevator_change(q, name);
- mutex_unlock(&q->sysfs_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(elevator_change);
-
static inline bool elv_support_iosched(struct request_queue *q)
{
if (q->mq_ops && q->tag_set && (q->tag_set->flags &
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 3b0090bc5dd1..b9faabc75fdb 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -26,6 +26,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-stat.h"
@@ -683,6 +684,131 @@ static struct elv_fs_entry kyber_sched_attrs[] = {
};
#undef KYBER_LAT_ATTR
+#ifdef CONFIG_BLK_DEBUG_FS
+#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
+static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
+{ \
+ struct request_queue *q = data; \
+ struct kyber_queue_data *kqd = q->elevator->elevator_data; \
+ \
+ sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
+ return 0; \
+} \
+ \
+static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
+ __acquires(&khd->lock) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ spin_lock(&khd->lock); \
+ return seq_list_start(&khd->rqs[domain], *pos); \
+} \
+ \
+static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ return seq_list_next(v, &khd->rqs[domain], pos); \
+} \
+ \
+static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
+ __releases(&khd->lock) \
+{ \
+ struct blk_mq_hw_ctx *hctx = m->private; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ \
+ spin_unlock(&khd->lock); \
+} \
+ \
+static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
+ .start = kyber_##name##_rqs_start, \
+ .next = kyber_##name##_rqs_next, \
+ .stop = kyber_##name##_rqs_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}; \
+ \
+static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
+{ \
+ struct blk_mq_hw_ctx *hctx = data; \
+ struct kyber_hctx_data *khd = hctx->sched_data; \
+ wait_queue_t *wait = &khd->domain_wait[domain]; \
+ \
+ seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
+ return 0; \
+}
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
+#undef KYBER_DEBUGFS_DOMAIN_ATTRS
+
+static int kyber_async_depth_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct kyber_queue_data *kqd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", kqd->async_depth);
+ return 0;
+}
+
+static int kyber_cur_domain_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct kyber_hctx_data *khd = hctx->sched_data;
+
+ switch (khd->cur_domain) {
+ case KYBER_READ:
+ seq_puts(m, "READ\n");
+ break;
+ case KYBER_SYNC_WRITE:
+ seq_puts(m, "SYNC_WRITE\n");
+ break;
+ case KYBER_OTHER:
+ seq_puts(m, "OTHER\n");
+ break;
+ default:
+ seq_printf(m, "%u\n", khd->cur_domain);
+ break;
+ }
+ return 0;
+}
+
+static int kyber_batching_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+ struct kyber_hctx_data *khd = hctx->sched_data;
+
+ seq_printf(m, "%u\n", khd->batching);
+ return 0;
+}
+
+#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
+ {#name "_tokens", 0400, kyber_##name##_tokens_show}
+static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
+ KYBER_QUEUE_DOMAIN_ATTRS(read),
+ KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
+ KYBER_QUEUE_DOMAIN_ATTRS(other),
+ {"async_depth", 0400, kyber_async_depth_show},
+ {},
+};
+#undef KYBER_QUEUE_DOMAIN_ATTRS
+
+#define KYBER_HCTX_DOMAIN_ATTRS(name) \
+ {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
+ {#name "_waiting", 0400, kyber_##name##_waiting_show}
+static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
+ KYBER_HCTX_DOMAIN_ATTRS(read),
+ KYBER_HCTX_DOMAIN_ATTRS(sync_write),
+ KYBER_HCTX_DOMAIN_ATTRS(other),
+ {"cur_domain", 0400, kyber_cur_domain_show},
+ {"batching", 0400, kyber_batching_show},
+ {},
+};
+#undef KYBER_HCTX_DOMAIN_ATTRS
+#endif
+
static struct elevator_type kyber_sched = {
.ops.mq = {
.init_sched = kyber_init_sched,
@@ -696,6 +822,10 @@ static struct elevator_type kyber_sched = {
.has_work = kyber_has_work,
},
.uses_mq = true,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
+ .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
+#endif
.elevator_attrs = kyber_sched_attrs,
.elevator_name = "kyber",
.elevator_owner = THIS_MODULE,
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 236121633ca0..1b964a387afe 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -19,6 +19,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
@@ -517,6 +518,125 @@ static struct elv_fs_entry deadline_attrs[] = {
__ATTR_NULL
};
+#ifdef CONFIG_BLK_DEBUG_FS
+#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
+static void *deadline_##name##_fifo_start(struct seq_file *m, \
+ loff_t *pos) \
+ __acquires(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_lock(&dd->lock); \
+ return seq_list_start(&dd->fifo_list[ddir], *pos); \
+} \
+ \
+static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ return seq_list_next(v, &dd->fifo_list[ddir], pos); \
+} \
+ \
+static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
+ __releases(&dd->lock) \
+{ \
+ struct request_queue *q = m->private; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ \
+ spin_unlock(&dd->lock); \
+} \
+ \
+static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
+ .start = deadline_##name##_fifo_start, \
+ .next = deadline_##name##_fifo_next, \
+ .stop = deadline_##name##_fifo_stop, \
+ .show = blk_mq_debugfs_rq_show, \
+}; \
+ \
+static int deadline_##name##_next_rq_show(void *data, \
+ struct seq_file *m) \
+{ \
+ struct request_queue *q = data; \
+ struct deadline_data *dd = q->elevator->elevator_data; \
+ struct request *rq = dd->next_rq[ddir]; \
+ \
+ if (rq) \
+ __blk_mq_debugfs_rq_show(m, rq); \
+ return 0; \
+}
+DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
+DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
+#undef DEADLINE_DEBUGFS_DDIR_ATTRS
+
+static int deadline_batching_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->batching);
+ return 0;
+}
+
+static int deadline_starved_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ seq_printf(m, "%u\n", dd->starved);
+ return 0;
+}
+
+static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
+ __acquires(&dd->lock)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ spin_lock(&dd->lock);
+ return seq_list_start(&dd->dispatch, *pos);
+}
+
+static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ return seq_list_next(v, &dd->dispatch, pos);
+}
+
+static void deadline_dispatch_stop(struct seq_file *m, void *v)
+ __releases(&dd->lock)
+{
+ struct request_queue *q = m->private;
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ spin_unlock(&dd->lock);
+}
+
+static const struct seq_operations deadline_dispatch_seq_ops = {
+ .start = deadline_dispatch_start,
+ .next = deadline_dispatch_next,
+ .stop = deadline_dispatch_stop,
+ .show = blk_mq_debugfs_rq_show,
+};
+
+#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
+ {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
+ {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
+static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
+ DEADLINE_QUEUE_DDIR_ATTRS(read),
+ DEADLINE_QUEUE_DDIR_ATTRS(write),
+ {"batching", 0400, deadline_batching_show},
+ {"starved", 0400, deadline_starved_show},
+ {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
+ {},
+};
+#undef DEADLINE_QUEUE_DDIR_ATTRS
+#endif
+
static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
@@ -533,6 +653,9 @@ static struct elevator_type mq_deadline = {
},
.uses_mq = true,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
+#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
.elevator_owner = THIS_MODULE,
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 0171a2faad68..ff07b9143ca4 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -16,7 +16,6 @@
#include <linux/kmod.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
-#include <linux/dax.h>
#include <linux/blktrace_api.h>
#include "partitions/check.h"
@@ -630,24 +629,12 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
return 0;
}
-static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
-{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
-
- return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
- NULL);
-}
-
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page;
- /* don't populate page cache for dax capable devices */
- if (IS_DAX(bdev->bd_inode))
- page = read_dax_sector(bdev, n);
- else
- page = read_pagecache_sector(bdev, n);
-
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL);
if (!IS_ERR(page)) {
if (PageError(page))
goto fail;
diff --git a/certs/blacklist.c b/certs/blacklist.c
index 3eddce0e307a..3a507b9e2568 100644
--- a/certs/blacklist.c
+++ b/certs/blacklist.c
@@ -140,7 +140,7 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type)
EXPORT_SYMBOL_GPL(is_hash_blacklisted);
/*
- * Intialise the blacklist
+ * Initialise the blacklist
*/
static int __init blacklist_init(void)
{
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 168df784da84..218567d717d6 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -32,9 +32,7 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
- ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
- if (!ctx)
- ctx = vmalloc(LZO1X_MEM_COMPRESS);
+ ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 117ca14ccf85..ba2901e76769 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -204,4 +204,6 @@ source "drivers/fpga/Kconfig"
source "drivers/fsi/Kconfig"
+source "drivers/tee/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 2eced9afba53..cfabd141dba2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -14,7 +14,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy/
obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
+
obj-$(CONFIG_PCI) += pci/
+obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/
# PCI dwc controller drivers
obj-y += pci/dwc/
@@ -71,7 +73,7 @@ obj-$(CONFIG_PARPORT) += parport/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
-obj-$(CONFIG_DEV_DAX) += dax/
+obj-$(CONFIG_DAX) += dax/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
@@ -104,6 +106,7 @@ obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/
+obj-$(CONFIG_OF) += usb/
obj-$(CONFIG_SERIO) += input/serio/
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/
@@ -177,3 +180,4 @@ obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
+obj-$(CONFIG_TEE) += tee/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index d78065cc9324..b1aacfc62b1f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
+acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 17a1eb14847a..fc6c416f8724 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -106,6 +106,16 @@ static const struct apd_device_desc vulcan_spi_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 133000000,
};
+
+static const struct apd_device_desc hip07_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 200000000,
+};
+
+static const struct apd_device_desc hip08_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 250000000,
+};
#endif
#else
@@ -170,6 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
+ { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
+ { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
#endif
{ }
};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 5edfd9c49044..10347e3d73ad 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -143,6 +143,22 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata)
writel(val, pdata->mmio_base + offset);
}
+/*
+ * BYT PWM used for backlight control by the i915 driver on systems without
+ * the Crystal Cove PMIC.
+ */
+static struct pwm_lookup byt_pwm_lookup[] = {
+ PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
+ "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm-lpss-platform"),
+};
+
+static void byt_pwm_setup(struct lpss_private_data *pdata)
+{
+ if (!acpi_dev_present("INT33FD", NULL, -1))
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+}
+
#define LPSS_I2C_ENABLE 0x6c
static void byt_i2c_setup(struct lpss_private_data *pdata)
@@ -200,6 +216,7 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
+ .setup = byt_pwm_setup,
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 32d93edbc479..dea65306b687 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -2,7 +2,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os -DBUILDING_ACPICA
+ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
new file mode 100644
index 000000000000..c84223b60b35
--- /dev/null
+++ b/drivers/acpi/acpica/acconvert.h
@@ -0,0 +1,144 @@
+/******************************************************************************
+ *
+ * Module Name: acapps - common include for ACPI applications/tools
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef _ACCONVERT
+#define _ACCONVERT
+
+/* Definitions for comment state */
+
+#define ASL_COMMENT_STANDARD 1
+#define ASLCOMMENT_INLINE 2
+#define ASL_COMMENT_OPEN_PAREN 3
+#define ASL_COMMENT_CLOSE_PAREN 4
+#define ASL_COMMENT_CLOSE_BRACE 5
+
+/* Definitions for comment print function*/
+
+#define AML_COMMENT_STANDARD 1
+#define AMLCOMMENT_INLINE 2
+#define AML_COMMENT_END_NODE 3
+#define AML_NAMECOMMENT 4
+#define AML_COMMENT_CLOSE_BRACE 5
+#define AML_COMMENT_ENDBLK 6
+#define AML_COMMENT_INCLUDE 7
+
+#ifdef ACPI_ASL_COMPILER
+/*
+ * cvcompiler
+ */
+void
+cv_process_comment(struct asl_comment_state current_state,
+ char *string_buffer, int c1);
+
+void
+cv_process_comment_type2(struct asl_comment_state current_state,
+ char *string_buffer);
+
+u32 cv_calculate_comment_lengths(union acpi_parse_object *op);
+
+void cv_process_comment_state(char input);
+
+char *cv_append_inline_comment(char *inline_comment, char *to_add);
+
+void cv_add_to_comment_list(char *to_add);
+
+void cv_place_comment(u8 type, char *comment_string);
+
+u32 cv_parse_op_block_type(union acpi_parse_object *op);
+
+struct acpi_comment_node *cv_comment_node_calloc(void);
+
+void cg_write_aml_def_block_comment(union acpi_parse_object *op);
+
+void
+cg_write_one_aml_comment(union acpi_parse_object *op,
+ char *comment_to_print, u8 input_option);
+
+void cg_write_aml_comment(union acpi_parse_object *op);
+
+/*
+ * cvparser
+ */
+void
+cv_init_file_tree(struct acpi_table_header *table,
+ u8 *aml_start, u32 aml_length);
+
+void cv_clear_op_comments(union acpi_parse_object *op);
+
+struct acpi_file_node *cv_filename_exists(char *filename,
+ struct acpi_file_node *head);
+
+void cv_label_file_node(union acpi_parse_object *op);
+
+void
+cv_capture_list_comments(struct acpi_parse_state *parser_state,
+ struct acpi_comment_node *list_head,
+ struct acpi_comment_node *list_tail);
+
+void cv_capture_comments_only(struct acpi_parse_state *parser_state);
+
+void cv_capture_comments(struct acpi_walk_state *walk_state);
+
+void cv_transfer_comments(union acpi_parse_object *op);
+
+/*
+ * cvdisasm
+ */
+void cv_switch_files(u32 level, union acpi_parse_object *op);
+
+u8 cv_file_has_switched(union acpi_parse_object *op);
+
+void cv_close_paren_write_comment(union acpi_parse_object *op, u32 level);
+
+void cv_close_brace_write_comment(union acpi_parse_object *op, u32 level);
+
+void
+cv_print_one_comment_list(struct acpi_comment_node *comment_list, u32 level);
+
+void
+cv_print_one_comment_type(union acpi_parse_object *op,
+ u8 comment_type, char *end_str, u32 level);
+
+#endif
+
+#endif /* _ACCONVERT */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 1d955fe216c4..abe8c316908c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -370,6 +370,59 @@ ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
#endif
+/*
+ * Meant for the -ca option.
+ */
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_close_brace_comment, NULL);
+
+ACPI_INIT_GLOBAL(char *, acpi_gbl_root_filename, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
+ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
+ NULL);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
+ NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
+ NULL);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
+ NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
+ NULL);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
+ NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
+ NULL);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
+ NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
+ *acpi_gbl_comment_addr_list_head, NULL);
+
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
+
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
+
+ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
+
+ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
+
/*****************************************************************************
*
* Application globals
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 8fd495e8fdce..f9b3f7fef462 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x82
+#define AML_NUM_OPCODES 0x83
/* Forward declarations */
@@ -754,21 +754,52 @@ union acpi_parse_value {
#define ACPI_DISASM_ONLY_MEMBERS(a)
#endif
+#if defined(ACPI_ASL_COMPILER)
+#define ACPI_CONVERTER_ONLY_MEMBERS(a) a;
+#else
+#define ACPI_CONVERTER_ONLY_MEMBERS(a)
+#endif
+
#define ACPI_PARSE_COMMON \
- union acpi_parse_object *parent; /* Parent op */\
- u8 descriptor_type; /* To differentiate various internal objs */\
- u8 flags; /* Type of Op */\
- u16 aml_opcode; /* AML opcode */\
- u8 *aml; /* Address of declaration in AML */\
- union acpi_parse_object *next; /* Next op */\
- struct acpi_namespace_node *node; /* For use by interpreter */\
- union acpi_parse_value value; /* Value or args associated with the opcode */\
- u8 arg_list_length; /* Number of elements in the arg list */\
- ACPI_DISASM_ONLY_MEMBERS (\
- u16 disasm_flags; /* Used during AML disassembly */\
- u8 disasm_opcode; /* Subtype used for disassembly */\
- char *operator_symbol;/* Used for C-style operator name strings */\
- char aml_op_name[16]) /* Op name (debug only) */
+ union acpi_parse_object *parent; /* Parent op */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; /* Type of Op */\
+ u16 aml_opcode; /* AML opcode */\
+ u8 *aml; /* Address of declaration in AML */\
+ union acpi_parse_object *next; /* Next op */\
+ struct acpi_namespace_node *node; /* For use by interpreter */\
+ union acpi_parse_value value; /* Value or args associated with the opcode */\
+ u8 arg_list_length; /* Number of elements in the arg list */\
+ ACPI_DISASM_ONLY_MEMBERS (\
+ u16 disasm_flags; /* Used during AML disassembly */\
+ u8 disasm_opcode; /* Subtype used for disassembly */\
+ char *operator_symbol; /* Used for C-style operator name strings */\
+ char aml_op_name[16]) /* Op name (debug only) */\
+ ACPI_CONVERTER_ONLY_MEMBERS (\
+ char *inline_comment; /* Inline comment */\
+ char *end_node_comment; /* End of node comment */\
+ char *name_comment; /* Comment associated with the first parameter of the name node */\
+ char *close_brace_comment; /* Comments that come after } on the same as } */\
+ struct acpi_comment_node *comment_list; /* comments that appears before this node */\
+ struct acpi_comment_node *end_blk_comment; /* comments that at the end of a block but before ) or } */\
+ char *cv_filename; /* Filename associated with this node. Used for ASL/ASL+ converter */\
+ char *cv_parent_filename) /* Parent filename associated with this node. Used for ASL/ASL+ converter */
+
+/* categories of comments */
+
+typedef enum {
+ STANDARD_COMMENT = 1,
+ INLINE_COMMENT,
+ ENDNODE_COMMENT,
+ OPENBRACE_COMMENT,
+ CLOSE_BRACE_COMMENT,
+ STD_DEFBLK_COMMENT,
+ END_DEFBLK_COMMENT,
+ FILENAME_COMMENT,
+ PARENTFILENAME_COMMENT,
+ ENDBLK_COMMENT,
+ INCLUDE_COMMENT
+} asl_comment_types;
/* Internal opcodes for disasm_opcode field above */
@@ -784,9 +815,38 @@ union acpi_parse_value {
#define ACPI_DASM_LNOT_SUFFIX 0x09 /* End of a Lnot_equal (etc.) pair of opcodes */
#define ACPI_DASM_HID_STRING 0x0A /* String is a _HID or _CID */
#define ACPI_DASM_IGNORE_SINGLE 0x0B /* Ignore the opcode but not it's children */
-#define ACPI_DASM_SWITCH_PREDICATE 0x0C /* Object is a predicate for a Switch or Case block */
-#define ACPI_DASM_CASE 0x0D /* If/Else is a Case in a Switch/Case block */
-#define ACPI_DASM_DEFAULT 0x0E /* Else is a Default in a Switch/Case block */
+#define ACPI_DASM_SWITCH 0x0C /* While is a Switch */
+#define ACPI_DASM_SWITCH_PREDICATE 0x0D /* Object is a predicate for a Switch or Case block */
+#define ACPI_DASM_CASE 0x0E /* If/Else is a Case in a Switch/Case block */
+#define ACPI_DASM_DEFAULT 0x0F /* Else is a Default in a Switch/Case block */
+
+/*
+ * List struct used in the -ca option
+ */
+struct acpi_comment_node {
+ char *comment;
+ struct acpi_comment_node *next;
+};
+
+struct acpi_comment_addr_node {
+ u8 *addr;
+ struct acpi_comment_addr_node *next;
+};
+
+/*
+ * File node - used for "Include" operator file stack and
+ * depdendency tree for the -ca option
+ */
+struct acpi_file_node {
+ void *file;
+ char *filename;
+ char *file_start; /* Points to AML and indicates when the AML for this particular file starts. */
+ char *file_end; /* Points to AML and indicates when the AML for this particular file ends. */
+ struct acpi_file_node *next;
+ struct acpi_file_node *parent;
+ u8 include_written;
+ struct acpi_comment_node *include_comment;
+};
/*
* Generic operation (for example: If, While, Store)
@@ -813,6 +873,8 @@ struct acpi_parse_obj_asl {
ACPI_PARSE_COMMON union acpi_parse_object *child;
union acpi_parse_object *parent_method;
char *filename;
+ u8 file_changed;
+ char *parent_filename;
char *external_name;
char *namepath;
char name_seg[4];
@@ -842,6 +904,14 @@ union acpi_parse_object {
struct acpi_parse_obj_asl asl;
};
+struct asl_comment_state {
+ u8 comment_type;
+ u32 spaces_before;
+ union acpi_parse_object *latest_parse_node;
+ union acpi_parse_object *parsing_paren_brace_node;
+ u8 capture_comments;
+};
+
/*
* Parse state - one state per parser invocation and each control
* method.
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c3337514e0ed..c7f0c96cc00f 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -493,4 +493,39 @@
#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
+/*
+ * Macors used for the ASL-/ASL+ converter utility
+ */
+#ifdef ACPI_ASL_COMPILER
+
+#define ASL_CV_LABEL_FILENODE(a) cv_label_file_node(a);
+#define ASL_CV_CAPTURE_COMMENTS_ONLY(a) cv_capture_comments_only (a);
+#define ASL_CV_CAPTURE_COMMENTS(a) cv_capture_comments (a);
+#define ASL_CV_TRANSFER_COMMENTS(a) cv_transfer_comments (a);
+#define ASL_CV_CLOSE_PAREN(a,b) cv_close_paren_write_comment(a,b);
+#define ASL_CV_CLOSE_BRACE(a,b) cv_close_brace_write_comment(a,b);
+#define ASL_CV_SWITCH_FILES(a,b) cv_switch_files(a,b);
+#define ASL_CV_CLEAR_OP_COMMENTS(a) cv_clear_op_comments(a);
+#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d) cv_print_one_comment_type (a,b,c,d);
+#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b) cv_print_one_comment_list (a,b);
+#define ASL_CV_FILE_HAS_SWITCHED(a) cv_file_has_switched(a)
+#define ASL_CV_INIT_FILETREE(a,b,c) cv_init_file_tree(a,b,c);
+
+#else
+
+#define ASL_CV_LABEL_FILENODE(a)
+#define ASL_CV_CAPTURE_COMMENTS_ONLY(a)
+#define ASL_CV_CAPTURE_COMMENTS(a)
+#define ASL_CV_TRANSFER_COMMENTS(a)
+#define ASL_CV_CLOSE_PAREN(a,b) acpi_os_printf (")");
+#define ASL_CV_CLOSE_BRACE(a,b) acpi_os_printf ("}");
+#define ASL_CV_SWITCH_FILES(a,b)
+#define ASL_CV_CLEAR_OP_COMMENTS(a)
+#define ASL_CV_PRINT_ONE_COMMENT(a,b,c,d)
+#define ASL_CV_PRINT_ONE_COMMENT_LIST(a,b)
+#define ASL_CV_FILE_HAS_SWITCHED(a) 0
+#define ASL_CV_INIT_FILETREE(a,b,c)
+
+#endif
+
#endif /* ACMACROS_H */
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index e758f098ff4b..a5d9af758c52 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -90,6 +90,7 @@
#define ARGP_BUFFER_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_TERMARG, ARGP_BYTELIST)
#define ARGP_BYTE_OP ARGP_LIST1 (ARGP_BYTEDATA)
#define ARGP_BYTELIST_OP ARGP_LIST1 (ARGP_NAMESTRING)
+#define ARGP_COMMENT_OP ARGP_LIST2 (ARGP_BYTEDATA, ARGP_COMMENT)
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SIMPLENAME, ARGP_TARGET)
@@ -223,6 +224,7 @@
#define ARGI_BUFFER_OP ARGI_LIST1 (ARGI_INTEGER)
#define ARGI_BYTE_OP ARGI_INVALID_OPCODE
#define ARGI_BYTELIST_OP ARGI_INVALID_OPCODE
+#define ARGI_COMMENT_OP ARGI_INVALID_OPCODE
#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_ANYTYPE, ARGI_ANYTYPE, ARGI_TARGETREF)
#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index b536fd471292..176f7e9b4d0e 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -48,11 +48,8 @@
/* primary opcodes */
-#define AML_NULL_CHAR (u16) 0x00
-
#define AML_ZERO_OP (u16) 0x00
#define AML_ONE_OP (u16) 0x01
-#define AML_UNASSIGNED (u16) 0x02
#define AML_ALIAS_OP (u16) 0x06
#define AML_NAME_OP (u16) 0x08
#define AML_BYTE_OP (u16) 0x0a
@@ -63,17 +60,15 @@
#define AML_SCOPE_OP (u16) 0x10
#define AML_BUFFER_OP (u16) 0x11
#define AML_PACKAGE_OP (u16) 0x12
-#define AML_VAR_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */
+#define AML_VARIABLE_PACKAGE_OP (u16) 0x13 /* ACPI 2.0 */
#define AML_METHOD_OP (u16) 0x14
#define AML_EXTERNAL_OP (u16) 0x15 /* ACPI 6.0 */
#define AML_DUAL_NAME_PREFIX (u16) 0x2e
-#define AML_MULTI_NAME_PREFIX_OP (u16) 0x2f
-#define AML_NAME_CHAR_SUBSEQ (u16) 0x30
-#define AML_NAME_CHAR_FIRST (u16) 0x41
-#define AML_EXTENDED_OP_PREFIX (u16) 0x5b
+#define AML_MULTI_NAME_PREFIX (u16) 0x2f
+#define AML_EXTENDED_PREFIX (u16) 0x5b
#define AML_ROOT_PREFIX (u16) 0x5c
#define AML_PARENT_PREFIX (u16) 0x5e
-#define AML_LOCAL_OP (u16) 0x60
+#define AML_FIRST_LOCAL_OP (u16) 0x60 /* Used for Local op # calculations */
#define AML_LOCAL0 (u16) 0x60
#define AML_LOCAL1 (u16) 0x61
#define AML_LOCAL2 (u16) 0x62
@@ -82,7 +77,7 @@
#define AML_LOCAL5 (u16) 0x65
#define AML_LOCAL6 (u16) 0x66
#define AML_LOCAL7 (u16) 0x67
-#define AML_ARG_OP (u16) 0x68
+#define AML_FIRST_ARG_OP (u16) 0x68 /* Used for Arg op # calculations */
#define AML_ARG0 (u16) 0x68
#define AML_ARG1 (u16) 0x69
#define AML_ARG2 (u16) 0x6a
@@ -93,7 +88,7 @@
#define AML_STORE_OP (u16) 0x70
#define AML_REF_OF_OP (u16) 0x71
#define AML_ADD_OP (u16) 0x72
-#define AML_CONCAT_OP (u16) 0x73
+#define AML_CONCATENATE_OP (u16) 0x73
#define AML_SUBTRACT_OP (u16) 0x74
#define AML_INCREMENT_OP (u16) 0x75
#define AML_DECREMENT_OP (u16) 0x76
@@ -110,7 +105,7 @@
#define AML_FIND_SET_LEFT_BIT_OP (u16) 0x81
#define AML_FIND_SET_RIGHT_BIT_OP (u16) 0x82
#define AML_DEREF_OF_OP (u16) 0x83
-#define AML_CONCAT_RES_OP (u16) 0x84 /* ACPI 2.0 */
+#define AML_CONCATENATE_TEMPLATE_OP (u16) 0x84 /* ACPI 2.0 */
#define AML_MOD_OP (u16) 0x85 /* ACPI 2.0 */
#define AML_NOTIFY_OP (u16) 0x86
#define AML_SIZE_OF_OP (u16) 0x87
@@ -122,18 +117,18 @@
#define AML_CREATE_BIT_FIELD_OP (u16) 0x8d
#define AML_OBJECT_TYPE_OP (u16) 0x8e
#define AML_CREATE_QWORD_FIELD_OP (u16) 0x8f /* ACPI 2.0 */
-#define AML_LAND_OP (u16) 0x90
-#define AML_LOR_OP (u16) 0x91
-#define AML_LNOT_OP (u16) 0x92
-#define AML_LEQUAL_OP (u16) 0x93
-#define AML_LGREATER_OP (u16) 0x94
-#define AML_LLESS_OP (u16) 0x95
+#define AML_LOGICAL_AND_OP (u16) 0x90
+#define AML_LOGICAL_OR_OP (u16) 0x91
+#define AML_LOGICAL_NOT_OP (u16) 0x92
+#define AML_LOGICAL_EQUAL_OP (u16) 0x93
+#define AML_LOGICAL_GREATER_OP (u16) 0x94
+#define AML_LOGICAL_LESS_OP (u16) 0x95
#define AML_TO_BUFFER_OP (u16) 0x96 /* ACPI 2.0 */
-#define AML_TO_DECSTRING_OP (u16) 0x97 /* ACPI 2.0 */
-#define AML_TO_HEXSTRING_OP (u16) 0x98 /* ACPI 2.0 */
+#define AML_TO_DECIMAL_STRING_OP (u16) 0x97 /* ACPI 2.0 */
+#define AML_TO_HEX_STRING_OP (u16) 0x98 /* ACPI 2.0 */
#define AML_TO_INTEGER_OP (u16) 0x99 /* ACPI 2.0 */
#define AML_TO_STRING_OP (u16) 0x9c /* ACPI 2.0 */
-#define AML_COPY_OP (u16) 0x9d /* ACPI 2.0 */
+#define AML_COPY_OBJECT_OP (u16) 0x9d /* ACPI 2.0 */
#define AML_MID_OP (u16) 0x9e /* ACPI 2.0 */
#define AML_CONTINUE_OP (u16) 0x9f /* ACPI 2.0 */
#define AML_IF_OP (u16) 0xa0
@@ -142,18 +137,27 @@
#define AML_NOOP_OP (u16) 0xa3
#define AML_RETURN_OP (u16) 0xa4
#define AML_BREAK_OP (u16) 0xa5
-#define AML_BREAK_POINT_OP (u16) 0xcc
+#define AML_COMMENT_OP (u16) 0xa9
+#define AML_BREAKPOINT_OP (u16) 0xcc
#define AML_ONES_OP (u16) 0xff
-/* prefixed opcodes */
+/*
+ * Combination opcodes (actually two one-byte opcodes)
+ * Used by the disassembler and iASL compiler
+ */
+#define AML_LOGICAL_GREATER_EQUAL_OP (u16) 0x9295 /* LNot (LLess) */
+#define AML_LOGICAL_LESS_EQUAL_OP (u16) 0x9294 /* LNot (LGreater) */
+#define AML_LOGICAL_NOT_EQUAL_OP (u16) 0x9293 /* LNot (LEqual) */
+
+/* Prefixed (2-byte) opcodes (with AML_EXTENDED_PREFIX) */
-#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* prefix for 2-byte opcodes */
+#define AML_EXTENDED_OPCODE (u16) 0x5b00 /* Prefix for 2-byte opcodes */
#define AML_MUTEX_OP (u16) 0x5b01
#define AML_EVENT_OP (u16) 0x5b02
-#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10
-#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11
-#define AML_COND_REF_OF_OP (u16) 0x5b12
+#define AML_SHIFT_RIGHT_BIT_OP (u16) 0x5b10 /* Obsolete, not in ACPI spec */
+#define AML_SHIFT_LEFT_BIT_OP (u16) 0x5b11 /* Obsolete, not in ACPI spec */
+#define AML_CONDITIONAL_REF_OF_OP (u16) 0x5b12
#define AML_CREATE_FIELD_OP (u16) 0x5b13
#define AML_LOAD_TABLE_OP (u16) 0x5b1f /* ACPI 2.0 */
#define AML_LOAD_OP (u16) 0x5b20
@@ -175,21 +179,13 @@
#define AML_FIELD_OP (u16) 0x5b81
#define AML_DEVICE_OP (u16) 0x5b82
#define AML_PROCESSOR_OP (u16) 0x5b83
-#define AML_POWER_RES_OP (u16) 0x5b84
+#define AML_POWER_RESOURCE_OP (u16) 0x5b84
#define AML_THERMAL_ZONE_OP (u16) 0x5b85
#define AML_INDEX_FIELD_OP (u16) 0x5b86
#define AML_BANK_FIELD_OP (u16) 0x5b87
#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */
/*
- * Combination opcodes (actually two one-byte opcodes)
- * Used by the disassembler and iASL compiler
- */
-#define AML_LGREATEREQUAL_OP (u16) 0x9295
-#define AML_LLESSEQUAL_OP (u16) 0x9294
-#define AML_LNOTEQUAL_OP (u16) 0x9293
-
-/*
* Opcodes for "Field" operators
*/
#define AML_FIELD_OFFSET_OP (u8) 0x00
@@ -241,6 +237,7 @@
#define ARGP_SIMPLENAME 0x12 /* name_string | local_term | arg_term */
#define ARGP_NAME_OR_REF 0x13 /* For object_type only */
#define ARGP_MAX 0x13
+#define ARGP_COMMENT 0x14
/*
* Resolved argument types for the AML Interpreter
@@ -308,24 +305,19 @@
#define ARGI_INVALID_OPCODE 0xFFFFFFFF
/*
- * hash offsets
- */
-#define AML_EXTOP_HASH_OFFSET 22
-#define AML_LNOT_HASH_OFFSET 19
-
-/*
- * opcode groups and types
+ * Some of the flags and types below are of the form:
+ *
+ * AML_FLAGS_EXEC_#A_#T,#R, or
+ * AML_TYPE_EXEC_#A_#T,#R where:
+ *
+ * #A is the number of required arguments
+ * #T is the number of target operands
+ * #R indicates whether there is a return value
*/
-#define OPGRP_NAMED 0x01
-#define OPGRP_FIELD 0x02
-#define OPGRP_BYTELIST 0x04
/*
- * Opcode information
+ * Opcode information flags
*/
-
-/* Opcode flags */
-
#define AML_LOGICAL 0x0001
#define AML_LOGICAL_NUMERIC 0x0002
#define AML_MATH 0x0004
@@ -342,7 +334,7 @@
#define AML_CONSTANT 0x2000
#define AML_NO_OPERAND_RESOLVE 0x4000
-/* Convenient flag groupings */
+/* Convenient flag groupings of the flags above */
#define AML_FLAGS_EXEC_0A_0T_1R AML_HAS_RETVAL
#define AML_FLAGS_EXEC_1A_0T_0R AML_HAS_ARGS /* Monadic1 */
@@ -359,7 +351,7 @@
/*
* The opcode Type is used in a dispatch table, do not change
- * without updating the table.
+ * or add anything new without updating the table.
*/
#define AML_TYPE_EXEC_0A_0T_1R 0x00
#define AML_TYPE_EXEC_1A_0T_0R 0x01 /* Monadic1 */
@@ -385,7 +377,7 @@
#define AML_TYPE_METHOD_CALL 0x10
-/* Misc */
+/* Miscellaneous types */
#define AML_TYPE_CREATE_FIELD 0x11
#define AML_TYPE_CREATE_OBJECT 0x12
@@ -395,7 +387,6 @@
#define AML_TYPE_NAMED_SIMPLE 0x16
#define AML_TYPE_NAMED_COMPLEX 0x17
#define AML_TYPE_RETURN 0x18
-
#define AML_TYPE_UNDEFINED 0x19
#define AML_TYPE_BOGUS 0x1A
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 15c8237b8a80..df62c9245efc 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -422,6 +422,7 @@ acpi_db_walk_for_execute(acpi_handle obj_handle,
status = acpi_get_object_info(obj_handle, &obj_info);
if (ACPI_FAILURE(status)) {
+ ACPI_FREE(pathname);
return (status);
}
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index 205b8e0eded5..8f665d94b8b5 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "amlcode.h"
#include "acdebug.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbxface")
@@ -125,7 +126,7 @@ error_exit:
*
* RETURN: Status
*
- * DESCRIPTION: Called for AML_BREAK_POINT_OP
+ * DESCRIPTION: Called for AML_BREAKPOINT_OP
*
******************************************************************************/
@@ -368,7 +369,9 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
walk_state->method_breakpoint = 1; /* Must be non-zero! */
}
+ acpi_ex_exit_interpreter();
status = acpi_db_start_command(walk_state, op);
+ acpi_ex_enter_interpreter();
/* User commands complete, continue execution of the interrupted method */
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index d31b49feaa79..f470e81b0499 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -347,7 +347,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
break;
- case AML_BREAK_POINT_OP:
+ case AML_BREAKPOINT_OP:
acpi_db_signal_break_point(walk_state);
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index adcc72cd53a7..27a7de95f7b0 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -672,7 +672,8 @@ acpi_ds_store_object_to_local(u8 type,
*
* FUNCTION: acpi_ds_method_data_get_type
*
- * PARAMETERS: opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * PARAMETERS: opcode - Either AML_FIRST LOCAL_OP or
+ * AML_FIRST_ARG_OP
* index - Which Local or Arg whose type to get
* walk_state - Current walk state object
*
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8deaa16493a0..7df3152ed856 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -114,7 +114,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
((op->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode ==
- AML_VAR_PACKAGE_OP))) {
+ AML_VARIABLE_PACKAGE_OP))) {
/*
* We didn't find the target and we are populating elements
* of a package - ignore if slack enabled. Some ASL code
@@ -144,7 +144,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
(op->common.parent->common.aml_opcode ==
- AML_VAR_PACKAGE_OP)) {
+ AML_VARIABLE_PACKAGE_OP)) {
/*
* Attempt to resolve the node to a value before we insert it into
* the package. If this is a reference to a common data type,
@@ -398,7 +398,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
parent = op->common.parent;
while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (parent->common.aml_opcode == AML_VAR_PACKAGE_OP)) {
+ (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
parent = parent->common.parent;
}
@@ -769,10 +769,10 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
switch (op_info->type) {
case AML_TYPE_LOCAL_VARIABLE:
- /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
+ /* Local ID (0-7) is (AML opcode - base AML_FIRST_LOCAL_OP) */
obj_desc->reference.value =
- ((u32)opcode) - AML_LOCAL_OP;
+ ((u32)opcode) - AML_FIRST_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
#ifndef ACPI_NO_METHOD_EXECUTION
@@ -790,9 +790,10 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case AML_TYPE_METHOD_ARGUMENT:
- /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
+ /* Arg ID (0-6) is (AML opcode - base AML_FIRST_ARG_OP) */
- obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP;
+ obj_desc->reference.value =
+ ((u32)opcode) - AML_FIRST_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG;
#ifndef ACPI_NO_METHOD_EXECUTION
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 148523205d41..9a8f8a992b3e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -639,7 +639,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
break;
case AML_PACKAGE_OP:
- case AML_VAR_PACKAGE_OP:
+ case AML_VARIABLE_PACKAGE_OP:
status =
acpi_ds_build_internal_package_obj(walk_state, op, length,
@@ -660,7 +660,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
if ((!op->common.parent) ||
((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
(op->common.parent->common.aml_opcode !=
- AML_VAR_PACKAGE_OP)
+ AML_VARIABLE_PACKAGE_OP)
&& (op->common.parent->common.aml_opcode !=
AML_NAME_OP))) {
walk_state->result_obj = obj_desc;
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 049fbab4e5a6..406edec20de7 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -275,10 +275,10 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
if ((op->common.parent->common.aml_opcode == AML_REGION_OP) ||
(op->common.parent->common.aml_opcode == AML_DATA_REGION_OP)
|| (op->common.parent->common.aml_opcode == AML_PACKAGE_OP)
- || (op->common.parent->common.aml_opcode ==
- AML_VAR_PACKAGE_OP)
|| (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
|| (op->common.parent->common.aml_opcode ==
+ AML_VARIABLE_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
AML_INT_EVAL_SUBTREE_OP)
|| (op->common.parent->common.aml_opcode ==
AML_BANK_FIELD_OP)) {
@@ -551,7 +551,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
*/
if (status == AE_NOT_FOUND) {
if (parent_op->common.aml_opcode ==
- AML_COND_REF_OF_OP) {
+ AML_CONDITIONAL_REF_OF_OP) {
/*
* For the Conditional Reference op, it's OK if
* the name is not found; We just need a way to
@@ -806,7 +806,7 @@ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
}
if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP) ||
(op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
/* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 78f8e6a4f72f..a2ff8ad70d58 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -497,7 +497,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if ((op->asl.parent) &&
((op->asl.parent->asl.aml_opcode == AML_PACKAGE_OP)
|| (op->asl.parent->asl.aml_opcode ==
- AML_VAR_PACKAGE_OP))) {
+ AML_VARIABLE_PACKAGE_OP))) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Method Reference in a Package, Op=%p\n",
op));
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 44d4553dfbdd..8d510c7e20c8 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -528,7 +528,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status = acpi_ex_create_processor(walk_state);
break;
- case AML_POWER_RES_OP:
+ case AML_POWER_RESOURCE_OP:
status = acpi_ex_create_power_resource(walk_state);
break;
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 1a6f59079ea5..f222a80ca38e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -249,14 +249,14 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
- case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
+ case AML_LOGICAL_AND_OP: /* LAnd (Integer0, Integer1) */
if (integer0 && integer1) {
local_result = TRUE;
}
break;
- case AML_LOR_OP: /* LOr (Integer0, Integer1) */
+ case AML_LOGICAL_OR_OP: /* LOr (Integer0, Integer1) */
if (integer0 || integer1) {
local_result = TRUE;
@@ -365,21 +365,21 @@ acpi_ex_do_logical_op(u16 opcode,
integer1 = local_operand1->integer.value;
switch (opcode) {
- case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
+ case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */
if (integer0 == integer1) {
local_result = TRUE;
}
break;
- case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
+ case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */
if (integer0 > integer1) {
local_result = TRUE;
}
break;
- case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
+ case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */
if (integer0 < integer1) {
local_result = TRUE;
@@ -408,7 +408,7 @@ acpi_ex_do_logical_op(u16 opcode,
(length0 > length1) ? length1 : length0);
switch (opcode) {
- case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
+ case AML_LOGICAL_EQUAL_OP: /* LEqual (Operand0, Operand1) */
/* Length and all bytes must be equal */
@@ -420,7 +420,7 @@ acpi_ex_do_logical_op(u16 opcode,
}
break;
- case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
+ case AML_LOGICAL_GREATER_OP: /* LGreater (Operand0, Operand1) */
if (compare > 0) {
local_result = TRUE;
@@ -437,7 +437,7 @@ acpi_ex_do_logical_op(u16 opcode,
}
break;
- case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
+ case AML_LOGICAL_LESS_OP: /* LLess (Operand0, Operand1) */
if (compare > 0) {
goto cleanup; /* FALSE */
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index ee7b62a86661..caa5ed1f65ec 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -122,7 +122,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Set up multi prefixes */
- *temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
+ *temp_ptr++ = AML_MULTI_NAME_PREFIX;
*temp_ptr++ = (char)num_name_segs;
} else if (2 == num_name_segs) {
@@ -342,7 +342,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
}
break;
- case AML_MULTI_NAME_PREFIX_OP:
+ case AML_MULTI_NAME_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"MultiNamePrefix at %p\n",
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index af73fcde7e5c..e327349675cd 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -274,7 +274,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
case AML_FIND_SET_RIGHT_BIT_OP:
case AML_FROM_BCD_OP:
case AML_TO_BCD_OP:
- case AML_COND_REF_OF_OP:
+ case AML_CONDITIONAL_REF_OF_OP:
/* Create a return object of type Integer for these opcodes */
@@ -405,7 +405,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
}
break;
- case AML_COND_REF_OF_OP: /* cond_ref_of (source_object, Result) */
+ case AML_CONDITIONAL_REF_OF_OP: /* cond_ref_of (source_object, Result) */
/*
* This op is a little strange because the internal return value is
* different than the return value stored in the result descriptor
@@ -475,14 +475,14 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
/*
* ACPI 2.0 Opcodes
*/
- case AML_COPY_OP: /* Copy (Source, Target) */
+ case AML_COPY_OBJECT_OP: /* copy_object (Source, Target) */
status =
acpi_ut_copy_iobject_to_iobject(operand[0], &return_desc,
walk_state);
break;
- case AML_TO_DECSTRING_OP: /* to_decimal_string (Data, Result) */
+ case AML_TO_DECIMAL_STRING_OP: /* to_decimal_string (Data, Result) */
status =
acpi_ex_convert_to_string(operand[0], &return_desc,
@@ -495,7 +495,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
}
break;
- case AML_TO_HEXSTRING_OP: /* to_hex_string (Data, Result) */
+ case AML_TO_HEX_STRING_OP: /* to_hex_string (Data, Result) */
status =
acpi_ex_convert_to_string(operand[0], &return_desc,
@@ -603,7 +603,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/* Examine the AML opcode */
switch (walk_state->opcode) {
- case AML_LNOT_OP: /* LNot (Operand) */
+ case AML_LOGICAL_NOT_OP: /* LNot (Operand) */
return_desc = acpi_ut_create_integer_object((u64) 0);
if (!return_desc) {
@@ -652,9 +652,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
* NOTE: We use LNOT_OP here in order to force resolution of the
* reference operand to an actual integer.
*/
- status =
- acpi_ex_resolve_operands(AML_LNOT_OP, &temp_desc,
- walk_state);
+ status = acpi_ex_resolve_operands(AML_LOGICAL_NOT_OP,
+ &temp_desc, walk_state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While resolving operands for [%s]",
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 44ecba50c0da..eecb3bff7fd7 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -298,7 +298,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
NULL, &return_desc->integer.value);
break;
- case AML_CONCAT_OP: /* Concatenate (Data1, Data2, Result) */
+ case AML_CONCATENATE_OP: /* Concatenate (Data1, Data2, Result) */
status =
acpi_ex_do_concatenate(operand[0], operand[1], &return_desc,
@@ -343,7 +343,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
operand[0]->buffer.pointer, length);
break;
- case AML_CONCAT_RES_OP:
+ case AML_CONCATENATE_TEMPLATE_OP:
/* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 31e4df97cbe1..688032b58a21 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -124,8 +124,8 @@ acpi_ex_do_match(u32 match_op,
* Change to: (M == P[i])
*/
status =
- acpi_ex_do_logical_op(AML_LEQUAL_OP, match_obj, package_obj,
- &logical_result);
+ acpi_ex_do_logical_op(AML_LOGICAL_EQUAL_OP, match_obj,
+ package_obj, &logical_result);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
@@ -137,8 +137,8 @@ acpi_ex_do_match(u32 match_op,
* Change to: (M >= P[i]) (M not_less than P[i])
*/
status =
- acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
- &logical_result);
+ acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj,
+ package_obj, &logical_result);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
@@ -151,7 +151,7 @@ acpi_ex_do_match(u32 match_op,
* Change to: (M > P[i])
*/
status =
- acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+ acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj,
package_obj, &logical_result);
if (ACPI_FAILURE(status)) {
return (FALSE);
@@ -164,7 +164,7 @@ acpi_ex_do_match(u32 match_op,
* Change to: (M <= P[i]) (M not_greater than P[i])
*/
status =
- acpi_ex_do_logical_op(AML_LGREATER_OP, match_obj,
+ acpi_ex_do_logical_op(AML_LOGICAL_GREATER_OP, match_obj,
package_obj, &logical_result);
if (ACPI_FAILURE(status)) {
return (FALSE);
@@ -178,8 +178,8 @@ acpi_ex_do_match(u32 match_op,
* Change to: (M < P[i])
*/
status =
- acpi_ex_do_logical_op(AML_LLESS_OP, match_obj, package_obj,
- &logical_result);
+ acpi_ex_do_logical_op(AML_LOGICAL_LESS_OP, match_obj,
+ package_obj, &logical_result);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7fecefc2e1b4..aa8c6fd74cc3 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -196,7 +196,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
if ((walk_state->opcode ==
AML_INT_METHODCALL_OP)
- || (walk_state->opcode == AML_COPY_OP)) {
+ || (walk_state->opcode ==
+ AML_COPY_OBJECT_OP)) {
break;
}
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index a2f8001aeb86..bdd43cde8f36 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -416,7 +416,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
/* Only limited target types possible for everything except copy_object */
- if (walk_state->opcode != AML_COPY_OP) {
+ if (walk_state->opcode != AML_COPY_OBJECT_OP) {
/*
* Only copy_object allows all object types to be overwritten. For
* target_ref(s), there are restrictions on the object types that
@@ -499,7 +499,8 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
- if ((walk_state->opcode == AML_COPY_OP) || !implicit_conversion) {
+ if ((walk_state->opcode == AML_COPY_OBJECT_OP) ||
+ !implicit_conversion) {
/*
* However, copy_object and Stores to arg_x do not perform
* an implicit conversion, as per the ACPI specification.
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 85db4716a043..56f59cf5da29 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -107,7 +107,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
/* For copy_object, no further validation necessary */
- if (walk_state->opcode == AML_COPY_OP) {
+ if (walk_state->opcode == AML_COPY_OBJECT_OP) {
break;
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 531620abed80..3094cec4eab4 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -102,7 +102,7 @@ static const struct acpi_port_info acpi_protected_ports[] = {
{"PCI", 0x0CF8, 0x0CFF, ACPI_OSI_WIN_XP}
};
-#define ACPI_PORT_INFO_ENTRIES ACPI_ARRAY_LENGTH (acpi_protected_ports)
+#define ACPI_PORT_INFO_ENTRIES ACPI_ARRAY_LENGTH (acpi_protected_ports)
/******************************************************************************
*
@@ -128,7 +128,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_TRACE(hw_validate_io_request);
+ ACPI_FUNCTION_NAME(hw_validate_io_request);
/* Supported widths are 8/16/32 */
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return_ACPI_STATUS(AE_LIMIT);
+ return (AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -167,7 +167,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
for (i = 0; i < ACPI_PORT_INFO_ENTRIES; i++, port_info++) {
/*
* Check if the requested address range will write to a reserved
- * port. Four cases to consider:
+ * port. There are four cases to consider:
*
* 1) Address range is contained completely in the port address range
* 2) Address range overlaps port range at the port range start
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/******************************************************************************
@@ -206,7 +206,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
* FUNCTION: acpi_hw_read_port
*
* PARAMETERS: Address Address of I/O port/register to read
- * Value Where value is placed
+ * Value Where value (data) is returned
* Width Number of bits
*
* RETURN: Status and value read from port
@@ -244,7 +244,7 @@ acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width)
/*
* There has been a protection violation within the request. Fall
* back to byte granularity port I/O and ignore the failing bytes.
- * This provides Windows compatibility.
+ * This provides compatibility with other ACPI implementations.
*/
for (i = 0, *value = 0; i < width; i += 8) {
@@ -307,7 +307,7 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width)
/*
* There has been a protection violation within the request. Fall
* back to byte granularity port I/O and ignore the failing bytes.
- * This provides Windows compatibility.
+ * This provides compatibility with other ACPI implementations.
*/
for (i = 0; i < width; i += 8) {
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 498bb8f70e6b..fb265b5737de 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -485,7 +485,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
flags));
break;
- case AML_MULTI_NAME_PREFIX_OP:
+ case AML_MULTI_NAME_PREFIX:
/* More than one name_seg, search rules do not apply */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 38316266521e..418ef2ac82ab 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -290,22 +290,12 @@ object_repaired:
/* Object was successfully repaired */
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
- /*
- * The original object is a package element. We need to
- * decrement the reference count of the original object,
- * for removing it from the package.
- *
- * However, if the original object was just wrapped with a
- * package object as part of the repair, we don't need to
- * change the reference count.
- */
+
+ /* Update reference count of new object */
+
if (!(info->return_flags & ACPI_OBJECT_WRAPPED)) {
new_object->common.reference_count =
return_object->common.reference_count;
-
- if (return_object->common.reference_count > 1) {
- return_object->common.reference_count--;
- }
}
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 352265498e90..06037e044694 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -403,16 +403,12 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
return (status);
}
- /* Take care with reference counts */
-
if (original_element != *element_ptr) {
- /* Element was replaced */
+ /* Update reference count of new object */
(*element_ptr)->common.reference_count =
original_ref_count;
-
- acpi_ut_remove_reference(original_element);
}
element_ptr++;
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 661676714f7b..2fe87d0dd9d5 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -252,7 +252,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
internal_name[1] = AML_DUAL_NAME_PREFIX;
result = &internal_name[2];
} else {
- internal_name[1] = AML_MULTI_NAME_PREFIX_OP;
+ internal_name[1] = AML_MULTI_NAME_PREFIX;
internal_name[2] = (char)num_segments;
result = &internal_name[3];
}
@@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
internal_name[i] = AML_DUAL_NAME_PREFIX;
result = &internal_name[(acpi_size)i + 1];
} else {
- internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
+ internal_name[i] = AML_MULTI_NAME_PREFIX;
internal_name[(acpi_size)i + 1] = (char)num_segments;
result = &internal_name[(acpi_size)i + 2];
}
@@ -450,7 +450,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
*/
if (prefix_length < internal_name_length) {
switch (internal_name[prefix_length]) {
- case AML_MULTI_NAME_PREFIX_OP:
+ case AML_MULTI_NAME_PREFIX:
/* <count> 4-byte names */
@@ -594,25 +594,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
void acpi_ns_terminate(void)
{
acpi_status status;
+ union acpi_operand_object *prev;
+ union acpi_operand_object *next;
ACPI_FUNCTION_TRACE(ns_terminate);
-#ifdef ACPI_EXEC_APP
- {
- union acpi_operand_object *prev;
- union acpi_operand_object *next;
+ /* Delete any module-level code blocks */
- /* Delete any module-level code blocks */
-
- next = acpi_gbl_module_code_list;
- while (next) {
- prev = next;
- next = next->method.mutex;
- prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
- acpi_ut_remove_reference(prev);
- }
+ next = acpi_gbl_module_code_list;
+ while (next) {
+ prev = next;
+ next = next->method.mutex;
+ prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
+ acpi_ut_remove_reference(prev);
}
-#endif
/*
* Free the entire namespace -- all nodes and all objects
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 05b62ad44c3e..eb9dfaca555f 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -47,6 +47,7 @@
#include "amlcode.h"
#include "acnamesp.h"
#include "acdispat.h"
+#include "acconvert.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psargs")
@@ -186,7 +187,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
end += 1 + (2 * ACPI_NAME_SIZE);
break;
- case AML_MULTI_NAME_PREFIX_OP:
+ case AML_MULTI_NAME_PREFIX:
/* Multiple name segments, 4 chars each, count in next byte */
@@ -339,7 +340,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* 2) not_found during a cond_ref_of(x) is ok by definition */
else if (walk_state->op->common.aml_opcode ==
- AML_COND_REF_OF_OP) {
+ AML_CONDITIONAL_REF_OF_OP) {
status = AE_OK;
}
@@ -352,7 +353,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
((arg->common.parent->common.aml_opcode ==
AML_PACKAGE_OP)
|| (arg->common.parent->common.aml_opcode ==
- AML_VAR_PACKAGE_OP))) {
+ AML_VARIABLE_PACKAGE_OP))) {
status = AE_OK;
}
}
@@ -502,6 +503,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
ACPI_FUNCTION_TRACE(ps_get_next_field);
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
aml = parser_state->aml;
/* Determine field type */
@@ -546,6 +548,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/* Decode the field type */
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
switch (opcode) {
case AML_INT_NAMEDFIELD_OP:
@@ -555,6 +558,22 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
acpi_ps_set_name(field, name);
parser_state->aml += ACPI_NAME_SIZE;
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
+
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * Because the package length isn't represented as a parse tree object,
+ * take comments surrounding this and add to the previously created
+ * parse node.
+ */
+ if (field->common.inline_comment) {
+ field->common.name_comment =
+ field->common.inline_comment;
+ }
+ field->common.inline_comment = acpi_gbl_current_inline_comment;
+ acpi_gbl_current_inline_comment = NULL;
+#endif
+
/* Get the length which is encoded as a package length */
field->common.value.size =
@@ -609,11 +628,13 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
parser_state->aml++;
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
pkg_end = parser_state->aml;
pkg_length =
acpi_ps_get_next_package_length(parser_state);
pkg_end += pkg_length;
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
if (parser_state->aml < pkg_end) {
/* Non-empty list */
@@ -630,6 +651,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
opcode = ACPI_GET8(parser_state->aml);
parser_state->aml++;
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
switch (opcode) {
case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
@@ -660,6 +682,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
/* Fill in bytelist data */
+ ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
arg->named.value.size = buffer_length;
arg->named.data = parser_state->aml;
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 14d689606d2f..b4224005783c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -55,6 +55,7 @@
#include "acparser.h"
#include "acdispat.h"
#include "amlcode.h"
+#include "acconvert.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psloop")
@@ -132,6 +133,21 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
!walk_state->arg_count) {
walk_state->aml = walk_state->parser_state.aml;
+ switch (op->common.aml_opcode) {
+ case AML_METHOD_OP:
+ case AML_BUFFER_OP:
+ case AML_PACKAGE_OP:
+ case AML_VARIABLE_PACKAGE_OP:
+ case AML_WHILE_OP:
+
+ break;
+
+ default:
+
+ ASL_CV_CAPTURE_COMMENTS(walk_state);
+ break;
+ }
+
status =
acpi_ps_get_next_arg(walk_state,
&(walk_state->parser_state),
@@ -254,7 +270,7 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
case AML_BUFFER_OP:
case AML_PACKAGE_OP:
- case AML_VAR_PACKAGE_OP:
+ case AML_VARIABLE_PACKAGE_OP:
if ((op->common.parent) &&
(op->common.parent->common.aml_opcode ==
@@ -480,6 +496,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Iterative parsing loop, while there is more AML to process: */
while ((parser_state->aml < parser_state->aml_end) || (op)) {
+ ASL_CV_CAPTURE_COMMENTS(walk_state);
+
aml_op_start = parser_state->aml;
if (!op) {
status =
@@ -516,6 +534,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
*/
walk_state->arg_count = 0;
+ switch (op->common.aml_opcode) {
+ case AML_BYTE_OP:
+ case AML_WORD_OP:
+ case AML_DWORD_OP:
+ case AML_QWORD_OP:
+
+ break;
+
+ default:
+
+ ASL_CV_CAPTURE_COMMENTS(walk_state);
+ break;
+ }
+
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 5c4aff0f4f26..5bcb61831706 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
+#include "acconvert.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psobject")
@@ -190,6 +191,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
*/
while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
(GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
+ ASL_CV_CAPTURE_COMMENTS(walk_state);
status =
acpi_ps_get_next_arg(walk_state,
&(walk_state->parser_state),
@@ -203,6 +205,18 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
+ /* are there any inline comments associated with the name_seg?? If so, save this. */
+
+ ASL_CV_CAPTURE_COMMENTS(walk_state);
+
+#ifdef ACPI_ASL_COMPILER
+ if (acpi_gbl_current_inline_comment != NULL) {
+ unnamed_op->common.name_comment =
+ acpi_gbl_current_inline_comment;
+ acpi_gbl_current_inline_comment = NULL;
+ }
+#endif
+
/*
* Make sure that we found a NAME and didn't run out of arguments
*/
@@ -243,6 +257,30 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
+#ifdef ACPI_ASL_COMPILER
+
+ /* save any comments that might be associated with unnamed_op. */
+
+ (*op)->common.inline_comment = unnamed_op->common.inline_comment;
+ (*op)->common.end_node_comment = unnamed_op->common.end_node_comment;
+ (*op)->common.close_brace_comment =
+ unnamed_op->common.close_brace_comment;
+ (*op)->common.name_comment = unnamed_op->common.name_comment;
+ (*op)->common.comment_list = unnamed_op->common.comment_list;
+ (*op)->common.end_blk_comment = unnamed_op->common.end_blk_comment;
+ (*op)->common.cv_filename = unnamed_op->common.cv_filename;
+ (*op)->common.cv_parent_filename =
+ unnamed_op->common.cv_parent_filename;
+ (*op)->named.aml = unnamed_op->common.aml;
+
+ unnamed_op->common.inline_comment = NULL;
+ unnamed_op->common.end_node_comment = NULL;
+ unnamed_op->common.close_brace_comment = NULL;
+ unnamed_op->common.name_comment = NULL;
+ unnamed_op->common.comment_list = NULL;
+ unnamed_op->common.end_blk_comment = NULL;
+#endif
+
if ((*op)->common.aml_opcode == AML_REGION_OP ||
(*op)->common.aml_opcode == AML_DATA_REGION_OP) {
/*
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 451b672915f1..c343a0d5a3d2 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -69,7 +69,7 @@ ACPI_MODULE_NAME("psopcode")
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
- AML_POWER_RES_OP
+ AML_POWER_RESOURCE_OP
AML_PROCESSOR_OP
AML_FIELD_OP
AML_INDEX_FIELD_OP
@@ -95,7 +95,7 @@ ACPI_MODULE_NAME("psopcode")
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
- AML_POWER_RES_OP
+ AML_POWER_RESOURCE_OP
AML_PROCESSOR_OP
AML_FIELD_OP
AML_INDEX_FIELD_OP
@@ -113,7 +113,7 @@ ACPI_MODULE_NAME("psopcode")
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
- AML_POWER_RES_OP
+ AML_POWER_RESOURCE_OP
AML_PROCESSOR_OP
AML_NAME_OP
AML_ALIAS_OP
@@ -136,7 +136,7 @@ ACPI_MODULE_NAME("psopcode")
AML_DEVICE_OP
AML_THERMAL_ZONE_OP
AML_METHOD_OP
- AML_POWER_RES_OP
+ AML_POWER_RESOURCE_OP
AML_PROCESSOR_OP
AML_NAME_OP
AML_ALIAS_OP
@@ -149,7 +149,7 @@ ACPI_MODULE_NAME("psopcode")
must be deferred until needed
AML_METHOD_OP
- AML_VAR_PACKAGE_OP
+ AML_VARIABLE_PACKAGE_OP
AML_CREATE_FIELD_OP
AML_CREATE_BIT_FIELD_OP
AML_CREATE_BYTE_FIELD_OP
@@ -652,7 +652,10 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 81 */ ACPI_OP("External", ARGP_EXTERNAL_OP, ARGI_EXTERNAL_OP,
ACPI_TYPE_ANY, AML_CLASS_EXECUTE, /* ? */
- AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R)
+ AML_TYPE_EXEC_3A_0T_0R, AML_FLAGS_EXEC_3A_0T_0R),
+/* 82 */ ACPI_OP("Comment", ARGP_COMMENT_OP, ARGI_COMMENT_OP,
+ ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
+ AML_TYPE_LITERAL, AML_CONSTANT)
/*! [End] no source code translation !*/
};
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 89f95b7f26e9..eff22950232b 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -226,7 +226,7 @@ const u8 acpi_gbl_short_op_index[256] = {
/* 0x90 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x73, 0x74,
/* 0x98 */ 0x75, 0x76, _UNK, _UNK, 0x77, 0x78, 0x79, 0x7A,
/* 0xA0 */ 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x60, 0x61,
-/* 0xA8 */ 0x62, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0xA8 */ 0x62, 0x82, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xB0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xB8 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0xC0 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index a813bbbd5a8b..8116a670de39 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -105,7 +105,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
aml = parser_state->aml;
opcode = (u16) ACPI_GET8(aml);
- if (opcode == AML_EXTENDED_OP_PREFIX) {
+ if (opcode == AML_EXTENDED_PREFIX) {
/* Extended opcode, get the second opcode byte */
@@ -210,7 +210,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
|| (op->common.parent->common.aml_opcode ==
AML_BANK_FIELD_OP)
|| (op->common.parent->common.aml_opcode ==
- AML_VAR_PACKAGE_OP)) {
+ AML_VARIABLE_PACKAGE_OP)) {
replacement_op =
acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
op->common.aml);
@@ -225,7 +225,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
if ((op->common.aml_opcode == AML_BUFFER_OP)
|| (op->common.aml_opcode == AML_PACKAGE_OP)
|| (op->common.aml_opcode ==
- AML_VAR_PACKAGE_OP)) {
+ AML_VARIABLE_PACKAGE_OP)) {
replacement_op =
acpi_ps_alloc_op(op->common.
aml_opcode,
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 9677fff8fd47..c06d6e2fc7a5 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
+#include "acconvert.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("pstree")
@@ -216,6 +217,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
next = acpi_ps_get_arg(op, 0);
if (next) {
+ ASL_CV_LABEL_FILENODE(next);
return (next);
}
@@ -223,6 +225,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
next = op->common.next;
if (next) {
+ ASL_CV_LABEL_FILENODE(next);
return (next);
}
@@ -233,6 +236,8 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
while (parent) {
arg = acpi_ps_get_arg(parent, 0);
while (arg && (arg != origin) && (arg != op)) {
+
+ ASL_CV_LABEL_FILENODE(arg);
arg = arg->common.next;
}
@@ -247,6 +252,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
/* Found sibling of parent */
+ ASL_CV_LABEL_FILENODE(parent->common.next);
return (parent->common.next);
}
@@ -254,6 +260,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
parent = parent->common.parent;
}
+ ASL_CV_LABEL_FILENODE(next);
return (next);
}
@@ -296,7 +303,7 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
child = acpi_ps_get_arg(op, 1);
break;
- case AML_POWER_RES_OP:
+ case AML_POWER_RESOURCE_OP:
case AML_INDEX_FIELD_OP:
child = acpi_ps_get_arg(op, 2);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 2fa38bb76a55..02642760cb93 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
+#include "acconvert.h"
#define _COMPONENT ACPI_PARSER
ACPI_MODULE_NAME("psutils")
@@ -152,6 +153,15 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
acpi_ps_init_op(op, opcode);
op->common.aml = aml;
op->common.flags = flags;
+ ASL_CV_CLEAR_OP_COMMENTS(op);
+
+ if (opcode == AML_SCOPE_OP) {
+ acpi_gbl_current_scope = op;
+ }
+ }
+
+ if (gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
}
return (op);
@@ -174,6 +184,7 @@ void acpi_ps_free_op(union acpi_parse_object *op)
{
ACPI_FUNCTION_NAME(ps_free_op);
+ ASL_CV_CLEAR_OP_COMMENTS(op);
if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Free retval op: %p\n", op));
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index a3401bd29413..5594a359dbf1 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -142,6 +142,45 @@ acpi_status acpi_ut_create_caches(void)
if (ACPI_FAILURE(status)) {
return (status);
}
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * For use with the ASL-/ASL+ option. This cache keeps track of regular
+ * 0xA9 0x01 comments.
+ */
+ status =
+ acpi_os_create_cache("Acpi-Comment",
+ sizeof(struct acpi_comment_node),
+ ACPI_MAX_COMMENT_CACHE_DEPTH,
+ &acpi_gbl_reg_comment_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * This cache keeps track of the starting addresses of where the comments
+ * lie. This helps prevent duplication of comments.
+ */
+ status =
+ acpi_os_create_cache("Acpi-Comment-Addr",
+ sizeof(struct acpi_comment_addr_node),
+ ACPI_MAX_COMMENT_CACHE_DEPTH,
+ &acpi_gbl_comment_addr_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /*
+ * This cache will be used for nodes that represent files.
+ */
+ status =
+ acpi_os_create_cache("Acpi-File", sizeof(struct acpi_file_node),
+ ACPI_MAX_COMMENT_CACHE_DEPTH,
+ &acpi_gbl_file_cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+#endif
+
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
/* Memory allocation lists */
@@ -201,6 +240,17 @@ acpi_status acpi_ut_delete_caches(void)
(void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
acpi_gbl_ps_node_ext_cache = NULL;
+#ifdef ACPI_ASL_COMPILER
+ (void)acpi_os_delete_cache(acpi_gbl_reg_comment_cache);
+ acpi_gbl_reg_comment_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_comment_addr_cache);
+ acpi_gbl_comment_addr_cache = NULL;
+
+ (void)acpi_os_delete_cache(acpi_gbl_file_cache);
+ acpi_gbl_file_cache = NULL;
+#endif
+
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
/* Debug only - display leftover memory allocation, if any */
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 11c7f72f2d56..531493306dee 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -71,7 +71,7 @@ acpi_os_create_cache(char *cache_name,
ACPI_FUNCTION_ENTRY();
- if (!cache_name || !return_cache || (object_size < 16)) {
+ if (!cache_name || !return_cache || !object_size) {
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index bd5ea3101eb7..615a885e2ca3 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -627,4 +627,5 @@ acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)
}
ACPI_EXPORT_SYMBOL(acpi_trace_point)
+
#endif
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index ff096d9755b9..e0587c85bafd 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -474,6 +474,15 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
+ /*
+ * The end_tag opcode must be followed by a zero byte.
+ * Although this byte is technically defined to be a checksum,
+ * in practice, all ASL compilers set this byte to zero.
+ */
+ if (*(aml + 1) != 0) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index a16bd9eac653..950a1e500bfa 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -91,7 +91,7 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * status - Status to be formatted
+ * status - Status value to be decoded/formatted
* format - Printf format string + additional args
*
* RETURN: None
@@ -132,8 +132,8 @@ ACPI_EXPORT_SYMBOL(acpi_exception)
*
* FUNCTION: acpi_warning
*
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
+ * PARAMETERS: module_name - Caller's module name (for warning output)
+ * line_number - Caller's line number (for warning output)
* format - Printf format string + additional args
*
* RETURN: None
@@ -163,17 +163,13 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
*
* FUNCTION: acpi_info
*
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- * format - Printf format string + additional args
+ * PARAMETERS: format - Printf format string + additional args
*
* RETURN: None
*
* DESCRIPTION: Print generic "ACPI:" information message. There is no
* module/line/version info in order to keep the message simple.
*
- * TBD: module_name and line_number args are not needed, should be removed.
- *
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *format, ...)
{
@@ -229,8 +225,8 @@ ACPI_EXPORT_SYMBOL(acpi_bios_error)
*
* FUNCTION: acpi_bios_warning
*
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
+ * PARAMETERS: module_name - Caller's module name (for warning output)
+ * line_number - Caller's line number (for warning output)
* format - Printf format string + additional args
*
* RETURN: None
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 7207e5fc9d3d..2c462beee551 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -513,7 +513,7 @@ retry:
if (i < erst_record_id_cache.len)
goto retry;
if (erst_record_id_cache.len >= erst_record_id_cache.size) {
- int new_size, alloc_size;
+ int new_size;
u64 *new_entries;
new_size = erst_record_id_cache.size * 2;
@@ -524,11 +524,7 @@ retry:
pr_warn(FW_WARN "too many record IDs!\n");
return 0;
}
- alloc_size = new_size * sizeof(entries[0]);
- if (alloc_size < PAGE_SIZE)
- new_entries = kmalloc(alloc_size, GFP_KERNEL);
- else
- new_entries = vmalloc(alloc_size);
+ new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL);
if (!new_entries)
return -ENOMEM;
memcpy(new_entries, entries,
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 4a5bb967250b..c5fecf97ee2f 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -225,7 +225,7 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
if (iort_node->type == type &&
ACPI_SUCCESS(callback(iort_node, context)))
- return iort_node;
+ return iort_node;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
@@ -253,17 +253,15 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
struct device *dev = context;
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
struct acpi_iort_named_component *ncomp;
- if (!adev) {
- status = AE_NOT_FOUND;
+ if (!adev)
goto out;
- }
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
if (ACPI_FAILURE(status)) {
@@ -289,8 +287,6 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
*/
status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
AE_OK : AE_NOT_FOUND;
- } else {
- status = AE_NOT_FOUND;
}
out:
return status;
@@ -322,8 +318,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
static
struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
- u32 *id_out, u8 type_mask,
- int index)
+ u32 *id_out, int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
@@ -345,9 +340,6 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
- if (!(IORT_TYPE_MASK(parent->type) & type_mask))
- return NULL;
-
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
@@ -359,11 +351,11 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL;
}
-static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
- u32 rid_in, u32 *rid_out,
- u8 type_mask)
+static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
{
- u32 rid = rid_in;
+ u32 id = id_in;
/* Parse the ID mapping tree to find specified node type */
while (node) {
@@ -371,8 +363,8 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
int i;
if (IORT_TYPE_MASK(node->type) & type_mask) {
- if (rid_out)
- *rid_out = rid;
+ if (id_out)
+ *id_out = id;
return node;
}
@@ -389,9 +381,9 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
goto fail_map;
}
- /* Do the RID translation */
+ /* Do the ID translation */
for (i = 0; i < node->mapping_count; i++, map++) {
- if (!iort_id_map(map, node->type, rid, &rid))
+ if (!iort_id_map(map, node->type, id, &id))
break;
}
@@ -403,13 +395,41 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
}
fail_map:
- /* Map input RID to output RID unchanged on mapping failure*/
- if (rid_out)
- *rid_out = rid_in;
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
return NULL;
}
+static
+struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
+ u32 *id_out, u8 type_mask,
+ int index)
+{
+ struct acpi_iort_node *parent;
+ u32 id;
+
+ /* step 1: retrieve the initial dev id */
+ parent = iort_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ /*
+ * optional step 2: map the initial dev id if its parent is not
+ * the target type we want, map it again for the use cases such
+ * as NC (named component) -> SMMU -> ITS. If the type is matched,
+ * return the initial dev id and its parent pointer directly.
+ */
+ if (!(IORT_TYPE_MASK(parent->type) & type_mask))
+ parent = iort_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
struct pci_bus *pbus;
@@ -443,13 +463,38 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
if (!node)
return req_id;
- iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
+ iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
/**
+ * iort_pmsi_get_dev_id() - Get the device id for a device
+ * @dev: The device for which the mapping is to be done.
+ * @dev_id: The device ID found.
+ *
+ * Returns: 0 for successful find a dev id, -ENODEV on error
+ */
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+ int i;
+ struct acpi_iort_node *node;
+
+ node = iort_find_dev_node(dev);
+ if (!node)
+ return -ENODEV;
+
+ for (i = 0; i < node->mapping_count; i++) {
+ if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
+ * @req_id: Device's requester ID
* @idx: Index of the ITS identifier list.
* @its_id: ITS identifier.
*
@@ -465,7 +510,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
+ node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -503,6 +548,56 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+/**
+ * iort_get_platform_device_domain() - Find MSI domain related to a
+ * platform device
+ * @dev: the dev pointer associated with the platform device
+ *
+ * Returns: the MSI domain for this device, NULL otherwise
+ */
+static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
+{
+ struct acpi_iort_node *node, *msi_parent;
+ struct fwnode_handle *iort_fwnode;
+ struct acpi_iort_its_group *its;
+ int i;
+
+ /* find its associated iort node */
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return NULL;
+
+ /* then find its msi parent node */
+ for (i = 0; i < node->mapping_count; i++) {
+ msi_parent = iort_node_map_platform_id(node, NULL,
+ IORT_MSI_TYPE, i);
+ if (msi_parent)
+ break;
+ }
+
+ if (!msi_parent)
+ return NULL;
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)msi_parent->node_data;
+
+ iort_fwnode = iort_find_domain_token(its->identifiers[0]);
+ if (!iort_fwnode)
+ return NULL;
+
+ return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
+}
+
+void acpi_configure_pmsi_domain(struct device *dev)
+{
+ struct irq_domain *msi_domain;
+
+ msi_domain = iort_get_platform_device_domain(dev);
+ if (msi_domain)
+ dev_set_msi_domain(dev, msi_domain);
+}
+
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *rid = data;
@@ -523,6 +618,46 @@ static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
return ret;
}
+static inline bool iort_iommu_driver_enabled(u8 type)
+{
+ switch (type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
+ case ACPI_IORT_NODE_SMMU:
+ return IS_BUILTIN(CONFIG_ARM_SMMU);
+ default:
+ pr_warn("IORT node type %u does not describe an SMMU\n", type);
+ return false;
+ }
+}
+
+#ifdef CONFIG_IOMMU_API
+static inline
+const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+{
+ return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
+}
+
+static inline
+int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+{
+ int err = 0;
+
+ if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
+ !dev->iommu_group)
+ err = ops->add_device(dev);
+
+ return err;
+}
+#else
+static inline
+const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+{ return NULL; }
+static inline
+int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+{ return 0; }
+#endif
+
static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
struct acpi_iort_node *node,
u32 streamid)
@@ -531,14 +666,31 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
int ret = -ENODEV;
struct fwnode_handle *iort_fwnode;
+ /*
+ * If we already translated the fwspec there
+ * is nothing left to do, return the iommu_ops.
+ */
+ ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+ if (ops)
+ return ops;
+
if (node) {
iort_fwnode = iort_get_fwnode(node);
if (!iort_fwnode)
return NULL;
ops = iommu_ops_from_fwnode(iort_fwnode);
+ /*
+ * If the ops look-up fails, this means that either
+ * the SMMU drivers have not been probed yet or that
+ * the SMMU drivers are not built in the kernel;
+ * Depending on whether the SMMU drivers are built-in
+ * in the kernel or not, defer the IOMMU configuration
+ * or just abort it.
+ */
if (!ops)
- return NULL;
+ return iort_iommu_driver_enabled(node->type) ?
+ ERR_PTR(-EPROBE_DEFER) : NULL;
ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
}
@@ -581,6 +733,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
struct acpi_iort_node *node, *parent;
const struct iommu_ops *ops = NULL;
u32 streamid = 0;
+ int err;
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
@@ -594,8 +747,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_map_rid(node, rid, &streamid,
- IORT_IOMMU_TYPE);
+ parent = iort_node_map_id(node, rid, &streamid,
+ IORT_IOMMU_TYPE);
ops = iort_iommu_xlate(dev, parent, streamid);
@@ -607,17 +760,28 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_get_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
+ parent = iort_node_map_platform_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
while (parent) {
ops = iort_iommu_xlate(dev, parent, streamid);
+ if (IS_ERR_OR_NULL(ops))
+ return ops;
- parent = iort_node_get_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
+ parent = iort_node_map_platform_id(node, &streamid,
+ IORT_IOMMU_TYPE,
+ i++);
}
}
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * add_device callback for dev, replay it to get things in order.
+ */
+ err = iort_add_device_replay(ops, dev);
+ if (err)
+ ops = ERR_PTR(err);
+
return ops;
}
@@ -956,6 +1120,4 @@ void __init acpi_iort_init(void)
}
iort_init_platform_devices();
-
- acpi_probe_device_table(iort);
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index d42eeef9d928..a9a9ab3399d4 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
(battery->capacity_now <= battery->alarm)))
- pm_wakeup_event(&battery->device->dev, 0);
+ pm_wakeup_hard_event(&battery->device->dev);
return result;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 34fbe027e73a..784bda663d16 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -114,6 +114,11 @@ int acpi_bus_get_status(struct acpi_device *device)
acpi_status status;
unsigned long long sta;
+ if (acpi_device_always_present(device)) {
+ acpi_set_device_status(device, ACPI_STA_DEFAULT);
+ return 0;
+ }
+
status = acpi_bus_get_status_handle(device->handle, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 668137e4a069..b7c2a06963d6 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
if (state)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_hard_event(&device->dev);
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
if (ret == NOTIFY_DONE)
@@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
} else {
int keycode;
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_hard_event(&device->dev);
if (button->suspended)
break;
@@ -530,6 +530,7 @@ static int acpi_button_add(struct acpi_device *device)
lid_device = device;
}
+ device_init_wakeup(&device->dev, true);
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6cbe6036da99..e5b47f032d9a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -95,7 +95,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
-/* Check if a CPC regsiter is in PCC */
+/* Check if a CPC register is in PCC */
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 993fd31394c8..798d5003a039 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -24,6 +24,7 @@
#include <linux/pm_qos.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include "internal.h"
@@ -399,7 +400,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present) {
- __pm_wakeup_event(adev->wakeup.ws, 0);
+ pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
if (adev->wakeup.context.work.func)
queue_pm_work(&adev->wakeup.context.work);
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index edc8663b5db3..3be1433853bf 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -6,6 +6,8 @@
*
* This file is released under the GPLv2.
*/
+
+#include <linux/acpi_iort.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -14,6 +16,7 @@
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include "internal.h"
@@ -176,7 +179,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
struct list_head *physnode_list;
unsigned int node_id;
int retval = -EINVAL;
- enum dev_dma_attr attr;
if (has_acpi_companion(dev)) {
if (acpi_dev) {
@@ -233,10 +235,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
if (!has_acpi_companion(dev))
ACPI_COMPANION_SET(dev, acpi_dev);
- attr = acpi_get_dma_attr(acpi_dev);
- if (attr != DEV_DMA_NOT_SUPPORTED)
- acpi_dma_configure(dev, attr);
-
acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
physical_node_name);
@@ -322,6 +320,9 @@ static int acpi_platform_notify(struct device *dev)
if (!adev)
goto out;
+ if (dev->bus == &platform_bus_type)
+ acpi_configure_pmsi_domain(dev);
+
if (type && type->setup)
type->setup(dev);
else if (adev->handler && adev->handler->bind)
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
index dd0d53c52552..6d3351452ea2 100644
--- a/drivers/acpi/nfit/Kconfig
+++ b/drivers/acpi/nfit/Kconfig
@@ -12,15 +12,3 @@ config ACPI_NFIT
To compile this driver as a module, choose M here:
the module will be called nfit.
-
-config ACPI_NFIT_DEBUG
- bool "NFIT DSM debug"
- depends on ACPI_NFIT
- depends on DYNAMIC_DEBUG
- default n
- help
- Enabling this option causes the nfit driver to dump the
- input and output buffers of _DSM operations on the ACPI0012
- device and its children. This can be very verbose, so leave
- it disabled unless you are debugging a hardware / firmware
- issue.
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index c8ea9d698cd0..656acb5d7166 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -49,7 +49,16 @@ MODULE_PARM_DESC(scrub_overflow_abort,
static bool disable_vendor_specific;
module_param(disable_vendor_specific, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vendor_specific,
- "Limit commands to the publicly specified set\n");
+ "Limit commands to the publicly specified set");
+
+static unsigned long override_dsm_mask;
+module_param(override_dsm_mask, ulong, S_IRUGO);
+MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
+
+static int default_dsm_family = -1;
+module_param(default_dsm_family, int, S_IRUGO);
+MODULE_PARM_DESC(default_dsm_family,
+ "Try this DSM type first when identifying NVDIMM family");
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);
@@ -175,14 +184,29 @@ static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
return 0;
}
+static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status)
+{
+ switch (cmd) {
+ case ND_CMD_GET_CONFIG_SIZE:
+ if (status >> 16 & ND_CONFIG_LOCKED)
+ return -EACCES;
+ break;
+ default:
+ break;
+ }
+
+ /* all other non-zero status results in an error */
+ if (status)
+ return -EIO;
+ return 0;
+}
+
static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
u32 status)
{
if (!nvdimm)
return xlat_bus_status(buf, cmd, status);
- if (status)
- return -EIO;
- return 0;
+ return xlat_nvdimm_status(buf, cmd, status);
}
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
@@ -259,14 +283,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.length = call_pkg->nd_size_in;
}
- if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
- dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
- __func__, dimm_name, cmd, func,
- in_buf.buffer.length);
- print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
+ dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
+ __func__, dimm_name, cmd, func, in_buf.buffer.length);
+ print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
in_buf.buffer.pointer,
min_t(u32, 256, in_buf.buffer.length), true);
- }
out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
if (!out_obj) {
@@ -298,13 +319,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
goto out;
}
- if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
- dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
- dimm_name, cmd_name, out_obj->buffer.length);
- print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
- 4, out_obj->buffer.pointer, min_t(u32, 128,
- out_obj->buffer.length), true);
- }
+ dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
+ cmd_name, out_obj->buffer.length);
+ print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
+ out_obj->buffer.pointer,
+ min_t(u32, 128, out_obj->buffer.length), true);
for (i = 0, offset = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
@@ -448,9 +467,9 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
INIT_LIST_HEAD(&nfit_memdev->list);
memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
- dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
+ dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
__func__, memdev->device_handle, memdev->range_index,
- memdev->region_index);
+ memdev->region_index, memdev->flags);
return true;
}
@@ -729,28 +748,38 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
}
}
-static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
struct acpi_nfit_system_address *spa)
{
struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev;
- int type = nfit_spa_type(spa);
+ int type = spa ? nfit_spa_type(spa) : 0;
switch (type) {
case NFIT_SPA_DCR:
case NFIT_SPA_PM:
break;
default:
- return 0;
+ if (spa)
+ return 0;
}
+ /*
+ * This loop runs in two modes, when a dimm is mapped the loop
+ * adds memdev associations to an existing dimm, or creates a
+ * dimm. In the unmapped dimm case this loop sweeps for memdev
+ * instances with an invalid / zero range_index and adds those
+ * dimms without spa associations.
+ */
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
u32 device_handle;
u16 dcr;
- if (nfit_memdev->memdev->range_index != spa->range_index)
+ if (spa && nfit_memdev->memdev->range_index != spa->range_index)
+ continue;
+ if (!spa && nfit_memdev->memdev->range_index)
continue;
found = NULL;
dcr = nfit_memdev->memdev->region_index;
@@ -835,14 +864,15 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
break;
}
nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
- } else {
+ } else if (type == NFIT_SPA_PM) {
/*
* A single dimm may belong to multiple SPA-PM
* ranges, record at least one in addition to
* any SPA-DCR range.
*/
nfit_mem->memdev_pmem = nfit_memdev->memdev;
- }
+ } else
+ nfit_mem->memdev_dcr = nfit_memdev->memdev;
}
return 0;
@@ -866,6 +896,8 @@ static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
+ int rc;
+
/*
* For each SPA-DCR or SPA-PMEM address range find its
@@ -876,13 +908,20 @@ static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
* BDWs are optional.
*/
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- int rc;
-
- rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
+ rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
if (rc)
return rc;
}
+ /*
+ * If a DIMM has failed to be mapped into SPA there will be no
+ * SPA entries above. Find and register all the unmapped DIMMs
+ * for reporting and recovery purposes.
+ */
+ rc = __nfit_mem_init(acpi_desc, NULL);
+ if (rc)
+ return rc;
+
list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
return 0;
@@ -1237,12 +1276,14 @@ static ssize_t flags_show(struct device *dev,
{
u16 flags = to_nfit_memdev(dev)->flags;
- return sprintf(buf, "%s%s%s%s%s\n",
+ return sprintf(buf, "%s%s%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
- flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
+ flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
+ flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
+ flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
}
static DEVICE_ATTR_RO(flags);
@@ -1290,8 +1331,16 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
- if (!to_nfit_dcr(dev))
+ if (!to_nfit_dcr(dev)) {
+ /* Without a dcr only the memdev attributes can be surfaced */
+ if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
+ || a == &dev_attr_flags.attr
+ || a == &dev_attr_family.attr
+ || a == &dev_attr_dsm_mask.attr)
+ return a->mode;
return 0;
+ }
+
if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
return 0;
return a->mode;
@@ -1368,6 +1417,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
unsigned long dsm_mask;
const u8 *uuid;
int i;
+ int family = -1;
/* nfit test assumes 1:1 relationship between commands and dsms */
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
@@ -1398,11 +1448,14 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
*/
for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
- break;
+ if (family < 0 || i == default_dsm_family)
+ family = i;
/* limit the supported commands to those that are publicly documented */
- nfit_mem->family = i;
- if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+ nfit_mem->family = family;
+ if (override_dsm_mask && !disable_vendor_specific)
+ dsm_mask = override_dsm_mask;
+ else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
dsm_mask = 0x3fe;
if (disable_vendor_specific)
dsm_mask &= ~(1 << ND_CMD_VENDOR);
@@ -1462,6 +1515,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
struct acpi_nfit_flush_address *flush;
unsigned long flags = 0, cmd_mask;
+ struct nfit_memdev *nfit_memdev;
u32 device_handle;
u16 mem_flags;
@@ -1473,11 +1527,22 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
}
if (nfit_mem->bdw && nfit_mem->memdev_pmem)
- flags |= NDD_ALIASING;
+ set_bit(NDD_ALIASING, &flags);
+
+ /* collate flags across all memdevs for this dimm */
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ struct acpi_nfit_memory_map *dimm_memdev;
+
+ dimm_memdev = __to_nfit_memdev(nfit_mem);
+ if (dimm_memdev->device_handle
+ != nfit_memdev->memdev->device_handle)
+ continue;
+ dimm_memdev->flags |= nfit_memdev->memdev->flags;
+ }
mem_flags = __to_nfit_memdev(nfit_mem)->flags;
if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
- flags |= NDD_UNARMED;
+ set_bit(NDD_UNARMED, &flags);
rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
if (rc)
@@ -1507,12 +1572,13 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
- dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
+ dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
nvdimm_name(nvdimm),
mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
- mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
+ mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
+ mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
}
@@ -1783,8 +1849,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
mmio_flush_range((void __force *)
mmio->addr.aperture + offset, c);
- memcpy_from_pmem(iobuf + copied,
- mmio->addr.aperture + offset, c);
+ memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
}
copied += c;
@@ -2525,6 +2590,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
acpi_nfit_register_region(acpi_desc, nfit_spa);
}
}
+ acpi_desc->init_complete = 1;
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
acpi_nfit_async_scrub(acpi_desc, nfit_spa);
@@ -2547,7 +2613,8 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
return rc;
}
- queue_work(nfit_wq, &acpi_desc->work);
+ if (!acpi_desc->cancel)
+ queue_work(nfit_wq, &acpi_desc->work);
return 0;
}
@@ -2593,32 +2660,11 @@ static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
return 0;
}
-static void acpi_nfit_destruct(void *data)
+static void acpi_nfit_unregister(void *data)
{
struct acpi_nfit_desc *acpi_desc = data;
- struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
-
- /*
- * Destruct under acpi_desc_lock so that nfit_handle_mce does not
- * race teardown
- */
- mutex_lock(&acpi_desc_lock);
- acpi_desc->cancel = 1;
- /*
- * Bounce the nvdimm bus lock to make sure any in-flight
- * acpi_nfit_ars_rescan() submissions have had a chance to
- * either submit or see ->cancel set.
- */
- device_lock(bus_dev);
- device_unlock(bus_dev);
- flush_workqueue(nfit_wq);
- if (acpi_desc->scrub_count_state)
- sysfs_put(acpi_desc->scrub_count_state);
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
- acpi_desc->nvdimm_bus = NULL;
- list_del(&acpi_desc->list);
- mutex_unlock(&acpi_desc_lock);
}
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
@@ -2636,7 +2682,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
if (!acpi_desc->nvdimm_bus)
return -ENOMEM;
- rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
+ rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
acpi_desc);
if (rc)
return rc;
@@ -2728,6 +2774,13 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
device_lock(dev);
device_unlock(dev);
+ /* bounce the init_mutex to make init_complete valid */
+ mutex_lock(&acpi_desc->init_mutex);
+ if (acpi_desc->cancel || acpi_desc->init_complete) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ return 0;
+ }
+
/*
* Scrub work could take 10s of seconds, userspace may give up so we
* need to be interruptible while waiting.
@@ -2735,6 +2788,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
INIT_WORK_ONSTACK(&flush.work, flush_probe);
COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
queue_work(nfit_wq, &flush.work);
+ mutex_unlock(&acpi_desc->init_mutex);
rc = wait_for_completion_interruptible(&flush.cmp);
cancel_work_sync(&flush.work);
@@ -2771,10 +2825,12 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
if (work_busy(&acpi_desc->work))
return -EBUSY;
- if (acpi_desc->cancel)
+ mutex_lock(&acpi_desc->init_mutex);
+ if (acpi_desc->cancel) {
+ mutex_unlock(&acpi_desc->init_mutex);
return 0;
+ }
- mutex_lock(&acpi_desc->init_mutex);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
struct acpi_nfit_system_address *spa = nfit_spa->spa;
@@ -2818,6 +2874,40 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
+static void acpi_nfit_put_table(void *table)
+{
+ acpi_put_table(table);
+}
+
+void acpi_nfit_shutdown(void *data)
+{
+ struct acpi_nfit_desc *acpi_desc = data;
+ struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
+
+ /*
+ * Destruct under acpi_desc_lock so that nfit_handle_mce does not
+ * race teardown
+ */
+ mutex_lock(&acpi_desc_lock);
+ list_del(&acpi_desc->list);
+ mutex_unlock(&acpi_desc_lock);
+
+ mutex_lock(&acpi_desc->init_mutex);
+ acpi_desc->cancel = 1;
+ mutex_unlock(&acpi_desc->init_mutex);
+
+ /*
+ * Bounce the nvdimm bus lock to make sure any in-flight
+ * acpi_nfit_ars_rescan() submissions have had a chance to
+ * either submit or see ->cancel set.
+ */
+ device_lock(bus_dev);
+ device_unlock(bus_dev);
+
+ flush_workqueue(nfit_wq);
+}
+EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
+
static int acpi_nfit_add(struct acpi_device *adev)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -2834,6 +2924,10 @@ static int acpi_nfit_add(struct acpi_device *adev)
dev_dbg(dev, "failed to find NFIT at startup\n");
return 0;
}
+
+ rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
+ if (rc)
+ return rc;
sz = tbl->length;
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
@@ -2861,12 +2955,15 @@ static int acpi_nfit_add(struct acpi_device *adev)
rc = acpi_nfit_init(acpi_desc, (void *) tbl
+ sizeof(struct acpi_table_nfit),
sz - sizeof(struct acpi_table_nfit));
- return rc;
+
+ if (rc)
+ return rc;
+ return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
}
static int acpi_nfit_remove(struct acpi_device *adev)
{
- /* see acpi_nfit_destruct */
+ /* see acpi_nfit_unregister */
return 0;
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index fc29c2e9832e..58fb7d68e04a 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -37,7 +37,7 @@
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
- | ACPI_NFIT_MEM_NOT_ARMED)
+ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
@@ -163,6 +163,7 @@ struct acpi_nfit_desc {
unsigned int scrub_count;
unsigned int scrub_mode;
unsigned int cancel:1;
+ unsigned int init_complete:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
@@ -238,6 +239,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
+void acpi_nfit_shutdown(void *data);
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
void __acpi_nvdimm_notify(struct device *dev, u32 event);
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 2944353253ed..a4e8432fc2fb 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+
QCOM_ECAM32(0),
QCOM_ECAM32(1),
QCOM_ECAM32(2),
@@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
{ "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
{ "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+
HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops),
HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops),
@@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define THUNDER_PEM_RES(addr, node) \
DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+
#define THUNDER_PEM_QUIRK(rev, node) \
{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \
@@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = {
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \
{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \
&thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
- /* SoC pass2.x */
- THUNDER_PEM_QUIRK(1, 0),
- THUNDER_PEM_QUIRK(1, 1),
#define THUNDER_ECAM_QUIRK(rev, seg) \
{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \
&pci_thunder_ecam_ops }
+
+ /* SoC pass2.x */
+ THUNDER_PEM_QUIRK(1, 0),
+ THUNDER_PEM_QUIRK(1, 1),
+ THUNDER_ECAM_QUIRK(1, 10),
+
/* SoC pass1.x */
THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */
THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */
@@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = {
#define XGENE_V1_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v1_pcie_ecam_ops }
+
#define XGENE_V2_ECAM_MCFG(rev, seg) \
{"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \
&xgene_v2_pcie_ecam_ops }
+
/* X-Gene SoC with v1 PCIe controller */
XGENE_V1_ECAM_MCFG(1, 0),
XGENE_V1_ECAM_MCFG(1, 1),
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 55f51115f016..1a76c784cd4c 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,97 +27,97 @@ static struct pmic_table power_table[] = {
.address = 0x00,
.reg = 0x13,
.bit = 0x05,
- },
+ }, /* ALD1 */
{
.address = 0x04,
.reg = 0x13,
.bit = 0x06,
- },
+ }, /* ALD2 */
{
.address = 0x08,
.reg = 0x13,
.bit = 0x07,
- },
+ }, /* ALD3 */
{
.address = 0x0c,
.reg = 0x12,
.bit = 0x03,
- },
+ }, /* DLD1 */
{
.address = 0x10,
.reg = 0x12,
.bit = 0x04,
- },
+ }, /* DLD2 */
{
.address = 0x14,
.reg = 0x12,
.bit = 0x05,
- },
+ }, /* DLD3 */
{
.address = 0x18,
.reg = 0x12,
.bit = 0x06,
- },
+ }, /* DLD4 */
{
.address = 0x1c,
.reg = 0x12,
.bit = 0x00,
- },
+ }, /* ELD1 */
{
.address = 0x20,
.reg = 0x12,
.bit = 0x01,
- },
+ }, /* ELD2 */
{
.address = 0x24,
.reg = 0x12,
.bit = 0x02,
- },
+ }, /* ELD3 */
{
.address = 0x28,
.reg = 0x13,
.bit = 0x02,
- },
+ }, /* FLD1 */
{
.address = 0x2c,
.reg = 0x13,
.bit = 0x03,
- },
+ }, /* FLD2 */
{
.address = 0x30,
.reg = 0x13,
.bit = 0x04,
- },
+ }, /* FLD3 */
{
- .address = 0x38,
+ .address = 0x34,
.reg = 0x10,
.bit = 0x03,
- },
+ }, /* BUC1 */
{
- .address = 0x3c,
+ .address = 0x38,
.reg = 0x10,
.bit = 0x06,
- },
+ }, /* BUC2 */
{
- .address = 0x40,
+ .address = 0x3c,
.reg = 0x10,
.bit = 0x05,
- },
+ }, /* BUC3 */
{
- .address = 0x44,
+ .address = 0x40,
.reg = 0x10,
.bit = 0x04,
- },
+ }, /* BUC4 */
{
- .address = 0x48,
+ .address = 0x44,
.reg = 0x10,
.bit = 0x01,
- },
+ }, /* BUC5 */
{
- .address = 0x4c,
+ .address = 0x48,
.reg = 0x10,
.bit = 0x00
- },
+ }, /* BUC6 */
};
/* TMP0 - TMP5 are the same, all from GPADC */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1c2b846c5776..3a6c9b741b23 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -864,6 +864,16 @@ void acpi_resume_power_resources(void)
mutex_unlock(&resource->resource_lock);
}
+
+ mutex_unlock(&power_resource_list_lock);
+}
+
+void acpi_turn_off_unused_power_resources(void)
+{
+ struct acpi_power_resource *resource;
+
+ mutex_lock(&power_resource_list_lock);
+
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
int result, state;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c26931067415..e39ec7b7cb67 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1363,20 +1363,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
* @dev: The pointer to the device
* @attr: device dma attributes
*/
-void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
+ u64 size;
iort_set_dma_mask(dev);
iommu = iort_iommu_configure(dev);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
/*
* Assume dma valid range starts at 0 and covers the whole
* coherent_dma_mask.
*/
- arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
- attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_dma_configure);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a4327af676fe..a6574d626340 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -474,6 +474,7 @@ static void acpi_pm_start(u32 acpi_state)
*/
static void acpi_pm_end(void)
{
+ acpi_turn_off_unused_power_resources();
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
@@ -662,14 +663,40 @@ static int acpi_freeze_prepare(void)
acpi_os_wait_events_complete();
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
+
return 0;
}
+static void acpi_freeze_wake(void)
+{
+ /*
+ * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
+ * that the SCI has triggered while suspended, so cancel the wakeup in
+ * case it has not been a wakeup event (the GPEs will be checked later).
+ */
+ if (acpi_sci_irq_valid() &&
+ !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ pm_system_cancel_wakeup();
+}
+
+static void acpi_freeze_sync(void)
+{
+ /*
+ * Process all pending events in case there are any wakeup ones.
+ *
+ * The EC driver uses the system workqueue, so that one needs to be
+ * flushed too.
+ */
+ acpi_os_wait_events_complete();
+ flush_scheduled_work();
+}
+
static void acpi_freeze_restore(void)
{
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
+
acpi_enable_all_runtime_gpes();
}
@@ -681,6 +708,8 @@ static void acpi_freeze_end(void)
static const struct platform_freeze_ops acpi_freeze_ops = {
.begin = acpi_freeze_begin,
.prepare = acpi_freeze_prepare,
+ .wake = acpi_freeze_wake,
+ .sync = acpi_freeze_sync,
.restore = acpi_freeze_restore,
.end = acpi_freeze_end,
};
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index a9cc34e663f9..a82ff74faf7a 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -6,6 +6,7 @@ extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
extern void acpi_resume_power_resources(void);
+extern void acpi_turn_off_unused_power_resources(void);
static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
{
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
new file mode 100644
index 000000000000..bd86b809c848
--- /dev/null
+++ b/drivers/acpi/x86/utils.c
@@ -0,0 +1,90 @@
+/*
+ * X86 ACPI Utility Functions
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
+ * Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include "../internal.h"
+
+/*
+ * Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because
+ * some recent Windows drivers bind to one device but poke at multiple
+ * devices at the same time, so the others get hidden.
+ * We work around this by always reporting ACPI_STA_DEFAULT for these
+ * devices. Note this MUST only be done for devices where this is safe.
+ *
+ * This forcing of devices to be present is limited to specific CPU (SoC)
+ * models both to avoid potentially causing trouble on other models and
+ * because some HIDs are re-used on different SoCs for completely
+ * different devices.
+ */
+struct always_present_id {
+ struct acpi_device_id hid[2];
+ struct x86_cpu_id cpu_ids[2];
+ const char *uid;
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+#define ENTRY(hid, uid, cpu_models) { \
+ { { hid, }, {} }, \
+ { cpu_models, {} }, \
+ uid, \
+}
+
+static const struct always_present_id always_present_ids[] = {
+ /*
+ * Bay / Cherry Trail PWM directly poked by GPU driver in win10,
+ * but Linux uses a separate PWM driver, harmless if not used.
+ */
+ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1)),
+ ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)),
+ /*
+ * The INT0002 device is necessary to clear wakeup interrupt sources
+ * on Cherry Trail devices, without it we get nobody cared IRQ msgs.
+ */
+ ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)),
+};
+
+bool acpi_device_always_present(struct acpi_device *adev)
+{
+ u32 *status = (u32 *)&adev->status;
+ u32 old_status = *status;
+ bool ret = false;
+ unsigned int i;
+
+ /* acpi_match_device_ids checks status, so set it to default */
+ *status = ACPI_STA_DEFAULT;
+ for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) {
+ if (acpi_match_device_ids(adev, always_present_ids[i].hid))
+ continue;
+
+ if (!adev->pnp.unique_id ||
+ strcmp(adev->pnp.unique_id, always_present_ids[i].uid))
+ continue;
+
+ if (!x86_match_cpu(always_present_ids[i].cpu_ids))
+ continue;
+
+ if (old_status != ACPI_STA_DEFAULT) /* Log only once */
+ dev_info(&adev->dev,
+ "Device [%s] is in always present list\n",
+ adev->pnp.bus_id);
+
+ ret = true;
+ break;
+ }
+ *status = old_status;
+
+ return ret;
+}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index a82fc022d34b..832e885349b1 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder"
+ default "binder,hwbinder"
---help---
Default value for the binder.devices parameter.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ff6cb9e4c381..de3eaf051697 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -518,6 +518,15 @@ config PATA_BF54X
If unsure, say N.
+config PATA_BK3710
+ tristate "Palmchip BK3710 PATA support"
+ depends on ARCH_DAVINCI
+ help
+ This option enables support for the integrated IDE controller on
+ the TI DaVinci SoC.
+
+ If unsure, say N.
+
config PATA_CMD64X
tristate "CMD64x PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 3048cc100a46..cd931a5eba92 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
+obj-$(CONFIG_PATA_BK3710) += pata_bk3710.o
obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
diff --git a/drivers/ata/pata_bk3710.c b/drivers/ata/pata_bk3710.c
new file mode 100644
index 000000000000..6c3bd5fae3e4
--- /dev/null
+++ b/drivers/ata/pata_bk3710.c
@@ -0,0 +1,382 @@
+/*
+ * Palmchip BK3710 PATA controller driver
+ *
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Based on palm_bk3710.c:
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DRV_NAME "pata_bk3710"
+
+#define BK3710_TF_OFFSET 0x1F0
+#define BK3710_CTL_OFFSET 0x3F6
+
+#define BK3710_BMISP 0x02
+#define BK3710_IDETIMP 0x40
+#define BK3710_UDMACTL 0x48
+#define BK3710_MISCCTL 0x50
+#define BK3710_REGSTB 0x54
+#define BK3710_REGRCVR 0x58
+#define BK3710_DATSTB 0x5C
+#define BK3710_DATRCVR 0x60
+#define BK3710_DMASTB 0x64
+#define BK3710_DMARCVR 0x68
+#define BK3710_UDMASTB 0x6C
+#define BK3710_UDMATRP 0x70
+#define BK3710_UDMAENV 0x74
+#define BK3710_IORDYTMP 0x78
+
+static struct scsi_host_template pata_bk3710_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+static unsigned int ideclk_period; /* in nanoseconds */
+
+struct pata_bk3710_udmatiming {
+ unsigned int rptime; /* tRP -- Ready to pause time (nsec) */
+ unsigned int cycletime; /* tCYCTYP2/2 -- avg Cycle Time (nsec) */
+ /* tENV is always a minimum of 20 nsec */
+};
+
+static const struct pata_bk3710_udmatiming pata_bk3710_udmatimings[6] = {
+ { 160, 240 / 2 }, /* UDMA Mode 0 */
+ { 125, 160 / 2 }, /* UDMA Mode 1 */
+ { 100, 120 / 2 }, /* UDMA Mode 2 */
+ { 100, 90 / 2 }, /* UDMA Mode 3 */
+ { 100, 60 / 2 }, /* UDMA Mode 4 */
+ { 85, 40 / 2 }, /* UDMA Mode 5 */
+};
+
+static void pata_bk3710_setudmamode(void __iomem *base, unsigned int dev,
+ unsigned int mode)
+{
+ u32 val32;
+ u16 val16;
+ u8 tenv, trp, t0;
+
+ /* DMA Data Setup */
+ t0 = DIV_ROUND_UP(pata_bk3710_udmatimings[mode].cycletime,
+ ideclk_period) - 1;
+ tenv = DIV_ROUND_UP(20, ideclk_period) - 1;
+ trp = DIV_ROUND_UP(pata_bk3710_udmatimings[mode].rptime,
+ ideclk_period) - 1;
+
+ /* udmastb Ultra DMA Access Strobe Width */
+ val32 = ioread32(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= t0 << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_UDMASTB);
+
+ /* udmatrp Ultra DMA Ready to Pause Time */
+ val32 = ioread32(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
+ val32 |= trp << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_UDMATRP);
+
+ /* udmaenv Ultra DMA envelop Time */
+ val32 = ioread32(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
+ val32 |= tenv << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_UDMAENV);
+
+ /* Enable UDMA for Device */
+ val16 = ioread16(base + BK3710_UDMACTL) | (1 << dev);
+ iowrite16(val16, base + BK3710_UDMACTL);
+}
+
+static void pata_bk3710_setmwdmamode(void __iomem *base, unsigned int dev,
+ unsigned short min_cycle,
+ unsigned int mode)
+{
+ const struct ata_timing *t;
+ int cycletime;
+ u32 val32;
+ u16 val16;
+ u8 td, tkw, t0;
+
+ t = ata_timing_find_mode(mode);
+ cycletime = max_t(int, t->cycle, min_cycle);
+
+ /* DMA Data Setup */
+ t0 = DIV_ROUND_UP(cycletime, ideclk_period);
+ td = DIV_ROUND_UP(t->active, ideclk_period);
+ tkw = t0 - td - 1;
+ td--;
+
+ val32 = ioread32(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= td << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_DMASTB);
+
+ val32 = ioread32(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= tkw << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_DMARCVR);
+
+ /* Disable UDMA for Device */
+ val16 = ioread16(base + BK3710_UDMACTL) & ~(1 << dev);
+ iowrite16(val16, base + BK3710_UDMACTL);
+}
+
+static void pata_bk3710_set_dmamode(struct ata_port *ap,
+ struct ata_device *adev)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.bmdma_addr;
+ int is_slave = adev->devno;
+ const u8 xferspeed = adev->dma_mode;
+
+ if (xferspeed >= XFER_UDMA_0)
+ pata_bk3710_setudmamode(base, is_slave,
+ xferspeed - XFER_UDMA_0);
+ else
+ pata_bk3710_setmwdmamode(base, is_slave,
+ adev->id[ATA_ID_EIDE_DMA_MIN],
+ xferspeed);
+}
+
+static void pata_bk3710_setpiomode(void __iomem *base, struct ata_device *pair,
+ unsigned int dev, unsigned int cycletime,
+ unsigned int mode)
+{
+ const struct ata_timing *t;
+ u32 val32;
+ u8 t2, t2i, t0;
+
+ t = ata_timing_find_mode(XFER_PIO_0 + mode);
+
+ /* PIO Data Setup */
+ t0 = DIV_ROUND_UP(cycletime, ideclk_period);
+ t2 = DIV_ROUND_UP(t->active, ideclk_period);
+
+ t2i = t0 - t2 - 1;
+ t2--;
+
+ val32 = ioread32(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= t2 << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_DATSTB);
+
+ val32 = ioread32(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= t2i << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_DATRCVR);
+
+ /* FIXME: this is broken also in the old driver */
+ if (pair) {
+ u8 mode2 = pair->pio_mode - XFER_PIO_0;
+
+ if (mode2 < mode)
+ mode = mode2;
+ }
+
+ /* TASKFILE Setup */
+ t0 = DIV_ROUND_UP(t->cyc8b, ideclk_period);
+ t2 = DIV_ROUND_UP(t->act8b, ideclk_period);
+
+ t2i = t0 - t2 - 1;
+ t2--;
+
+ val32 = ioread32(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= t2 << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_REGSTB);
+
+ val32 = ioread32(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= t2i << (dev ? 8 : 0);
+ iowrite32(val32, base + BK3710_REGRCVR);
+}
+
+static void pata_bk3710_set_piomode(struct ata_port *ap,
+ struct ata_device *adev)
+{
+ void __iomem *base = (void __iomem *)ap->ioaddr.bmdma_addr;
+ struct ata_device *pair = ata_dev_pair(adev);
+ const struct ata_timing *t = ata_timing_find_mode(adev->pio_mode);
+ const u16 *id = adev->id;
+ unsigned int cycle_time = 0;
+ int is_slave = adev->devno;
+ const u8 pio = adev->pio_mode - XFER_PIO_0;
+
+ if (id[ATA_ID_FIELD_VALID] & 2) {
+ if (ata_id_has_iordy(id))
+ cycle_time = id[ATA_ID_EIDE_PIO_IORDY];
+ else
+ cycle_time = id[ATA_ID_EIDE_PIO];
+
+ /* conservative "downgrade" for all pre-ATA2 drives */
+ if (pio < 3 && cycle_time < t->cycle)
+ cycle_time = 0; /* use standard timing */
+ }
+
+ if (!cycle_time)
+ cycle_time = t->cycle;
+
+ pata_bk3710_setpiomode(base, pair, is_slave, cycle_time, pio);
+}
+
+static void pata_bk3710_chipinit(void __iomem *base)
+{
+ /*
+ * REVISIT: the ATA reset signal needs to be managed through a
+ * GPIO, which means it should come from platform_data. Until
+ * we get and use such information, we have to trust that things
+ * have been reset before we get here.
+ */
+
+ /*
+ * Program the IDETIMP Register Value based on the following assumptions
+ *
+ * (ATA_IDETIMP_IDEEN , ENABLE ) |
+ * (ATA_IDETIMP_PREPOST1 , DISABLE) |
+ * (ATA_IDETIMP_PREPOST0 , DISABLE) |
+ *
+ * DM6446 silicon rev 2.1 and earlier have no observed net benefit
+ * from enabling prefetch/postwrite.
+ */
+ iowrite16(BIT(15), base + BK3710_IDETIMP);
+
+ /*
+ * UDMACTL Ultra-ATA DMA Control
+ * (ATA_UDMACTL_UDMAP1 , 0 ) |
+ * (ATA_UDMACTL_UDMAP0 , 0 )
+ *
+ */
+ iowrite16(0, base + BK3710_UDMACTL);
+
+ /*
+ * MISCCTL Miscellaneous Conrol Register
+ * (ATA_MISCCTL_HWNHLD1P , 1 cycle)
+ * (ATA_MISCCTL_HWNHLD0P , 1 cycle)
+ * (ATA_MISCCTL_TIMORIDE , 1)
+ */
+ iowrite32(0x001, base + BK3710_MISCCTL);
+
+ /*
+ * IORDYTMP IORDY Timer for Primary Register
+ * (ATA_IORDYTMP_IORDYTMP , DISABLE)
+ */
+ iowrite32(0, base + BK3710_IORDYTMP);
+
+ /*
+ * Configure BMISP Register
+ * (ATA_BMISP_DMAEN1 , DISABLE ) |
+ * (ATA_BMISP_DMAEN0 , DISABLE ) |
+ * (ATA_BMISP_IORDYINT , CLEAR) |
+ * (ATA_BMISP_INTRSTAT , CLEAR) |
+ * (ATA_BMISP_DMAERROR , CLEAR)
+ */
+ iowrite16(0xE, base + BK3710_BMISP);
+
+ pata_bk3710_setpiomode(base, NULL, 0, 600, 0);
+ pata_bk3710_setpiomode(base, NULL, 1, 600, 0);
+}
+
+static struct ata_port_operations pata_bk3710_ports_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .cable_detect = ata_cable_80wire,
+
+ .set_piomode = pata_bk3710_set_piomode,
+ .set_dmamode = pata_bk3710_set_dmamode,
+};
+
+static int __init pata_bk3710_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *mem;
+ struct ata_host *host;
+ struct ata_port *ap;
+ void __iomem *base;
+ unsigned long rate;
+ int irq;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ clk_enable(clk);
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ /* NOTE: round *down* to meet minimum timings; we count in clocks */
+ ideclk_period = 1000000000UL / rate;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ pr_err(DRV_NAME ": failed to get IRQ resource\n");
+ return irq;
+ }
+
+ base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* configure the Palmchip controller */
+ pata_bk3710_chipinit(base);
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ return -ENOMEM;
+ ap = host->ports[0];
+
+ ap->ops = &pata_bk3710_ports_ops;
+ ap->pio_mask = ATA_PIO4;
+ ap->mwdma_mask = ATA_MWDMA2;
+ ap->udma_mask = rate < 100000000 ? ATA_UDMA4 : ATA_UDMA5;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ ap->ioaddr.data_addr = base + BK3710_TF_OFFSET;
+ ap->ioaddr.error_addr = base + BK3710_TF_OFFSET + 1;
+ ap->ioaddr.feature_addr = base + BK3710_TF_OFFSET + 1;
+ ap->ioaddr.nsect_addr = base + BK3710_TF_OFFSET + 2;
+ ap->ioaddr.lbal_addr = base + BK3710_TF_OFFSET + 3;
+ ap->ioaddr.lbam_addr = base + BK3710_TF_OFFSET + 4;
+ ap->ioaddr.lbah_addr = base + BK3710_TF_OFFSET + 5;
+ ap->ioaddr.device_addr = base + BK3710_TF_OFFSET + 6;
+ ap->ioaddr.status_addr = base + BK3710_TF_OFFSET + 7;
+ ap->ioaddr.command_addr = base + BK3710_TF_OFFSET + 7;
+
+ ap->ioaddr.altstatus_addr = base + BK3710_CTL_OFFSET;
+ ap->ioaddr.ctl_addr = base + BK3710_CTL_OFFSET;
+
+ ap->ioaddr.bmdma_addr = base;
+
+ ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
+ (unsigned long)base + BK3710_TF_OFFSET,
+ (unsigned long)base + BK3710_CTL_OFFSET);
+
+ /* activate */
+ return ata_host_activate(host, irq, ata_sff_interrupt, 0,
+ &pata_bk3710_sht);
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:palm_bk3710");
+
+static struct platform_driver pata_bk3710_driver = {
+ .driver = {
+ .name = "palm_bk3710",
+ },
+};
+
+static int __init pata_bk3710_init(void)
+{
+ return platform_driver_probe(&pata_bk3710_driver, pata_bk3710_probe);
+}
+
+module_init(pata_bk3710_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 8a8e403644d6..9ae6681c90ad 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -13,8 +13,22 @@ menuconfig AUXDISPLAY
If you say N, all options in this submenu will be skipped and disabled.
+config CHARLCD
+ tristate "Character LCD core support" if COMPILE_TEST
+
if AUXDISPLAY
+config HD44780
+ tristate "HD44780 Character LCD support"
+ depends on GPIOLIB || COMPILE_TEST
+ select CHARLCD
+ ---help---
+ Enable support for Character LCDs using a HD44780 controller.
+ The LCD is accessible through the /dev/lcd char device (10, 156).
+ This code can either be compiled as a module, or linked into the
+ kernel and started at boot.
+ If you don't understand what all this is about, say N.
+
config KS0108
tristate "KS0108 LCD Controller"
depends on PARPORT_PC
@@ -142,3 +156,293 @@ config HT16K33
LED controller driver with keyscan.
endif # AUXDISPLAY
+
+config ARM_CHARLCD
+ bool "ARM Ltd. Character LCD Driver"
+ depends on PLAT_VERSATILE
+ help
+ This is a driver for the character LCD found on the ARM Ltd.
+ Versatile and RealView Platform Baseboards. It doesn't do
+ very much more than display the text "ARM Linux" on the first
+ line and the Linux version on the second line, but that's
+ still useful.
+
+config PANEL
+ tristate "Parallel port LCD/Keypad Panel support"
+ depends on PARPORT
+ select CHARLCD
+ ---help---
+ Say Y here if you have an HD44780 or KS-0074 LCD connected to your
+ parallel port. This driver also features 4 and 6-key keypads. The LCD
+ is accessible through the /dev/lcd char device (10, 156), and the
+ keypad through /dev/keypad (10, 185). This code can either be
+ compiled as a module, or linked into the kernel and started at boot.
+ If you don't understand what all this is about, say N.
+
+if PANEL
+
+config PANEL_PARPORT
+ int "Default parallel port number (0=LPT1)"
+ range 0 255
+ default "0"
+ ---help---
+ This is the index of the parallel port the panel is connected to. One
+ driver instance only supports one parallel port, so if your keypad
+ and LCD are connected to two separate ports, you have to start two
+ modules with different arguments. Numbering starts with '0' for LPT1,
+ and so on.
+
+config PANEL_PROFILE
+ int "Default panel profile (0-5, 0=custom)"
+ range 0 5
+ default "5"
+ ---help---
+ To ease configuration, the driver supports different configuration
+ profiles for past and recent wirings. These profiles can also be
+ used to define an approximative configuration, completed by a few
+ other options. Here are the profiles :
+
+ 0 = custom (see further)
+ 1 = 2x16 parallel LCD, old keypad
+ 2 = 2x16 serial LCD (KS-0074), new keypad
+ 3 = 2x16 parallel LCD (Hantronix), no keypad
+ 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
+ 5 = 2x40 parallel LCD (old one), with old keypad
+
+ Custom configurations allow you to define how your display is
+ wired to the parallel port, and how it works. This is only intended
+ for experts.
+
+config PANEL_KEYPAD
+ depends on PANEL_PROFILE="0"
+ int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
+ range 0 3
+ default 0
+ ---help---
+ This enables and configures a keypad connected to the parallel port.
+ The keys will be read from character device 10,185. Valid values are :
+
+ 0 : do not enable this driver
+ 1 : old 6 keys keypad
+ 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
+ 3 : Nexcom NSA1045's 4 keys keypad
+
+ New profiles can be described in the driver source. The driver also
+ supports simultaneous keys pressed when the keypad supports them.
+
+config PANEL_LCD
+ depends on PANEL_PROFILE="0"
+ int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
+ range 0 5
+ default 0
+ ---help---
+ This enables and configures an LCD connected to the parallel port.
+ The driver includes an interpreter for escape codes starting with
+ '\e[L' which are specific to the LCD, and a few ANSI codes. The
+ driver will be registered as character device 10,156, usually
+ under the name '/dev/lcd'. There are a total of 6 supported types :
+
+ 0 : do not enable the driver
+ 1 : custom configuration and wiring (see further)
+ 2 : 2x16 & 2x40 parallel LCD (old wiring)
+ 3 : 2x16 serial LCD (KS-0074 based)
+ 4 : 2x16 parallel LCD (Hantronix wiring)
+ 5 : 2x16 parallel LCD (Nexcom wiring)
+
+ When type '1' is specified, other options will appear to configure
+ more precise aspects (wiring, dimensions, protocol, ...). Please note
+ that those values changed from the 2.4 driver for better consistency.
+
+config PANEL_LCD_HEIGHT
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of lines on the LCD (1-2)"
+ range 1 2
+ default 2
+ ---help---
+ This is the number of visible character lines on the LCD in custom profile.
+ It can either be 1 or 2.
+
+config PANEL_LCD_WIDTH
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of characters per line on the LCD (1-40)"
+ range 1 40
+ default 40
+ ---help---
+ This is the number of characters per line on the LCD in custom profile.
+ Common values are 16,20,24,40.
+
+config PANEL_LCD_BWIDTH
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Internal LCD line width (1-40, 40 by default)"
+ range 1 40
+ default 40
+ ---help---
+ Most LCDs use a standard controller which supports hardware lines of 40
+ characters, although sometimes only 16, 20 or 24 of them are really wired
+ to the terminal. This results in some non-visible but addressable characters,
+ and is the case for most parallel LCDs. Other LCDs, and some serial ones,
+ however, use the same line width internally as what is visible. The KS0074
+ for example, uses 16 characters per line for 16 visible characters per line.
+
+ This option lets you configure the value used by your LCD in 'custom' profile.
+ If you don't know, put '40' here.
+
+config PANEL_LCD_HWIDTH
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Hardware LCD line width (1-64, 64 by default)"
+ range 1 64
+ default 64
+ ---help---
+ Most LCDs use a single address bit to differentiate line 0 and line 1. Since
+ some of them need to be able to address 40 chars with the lower bits, they
+ often use the immediately superior power of 2, which is 64, to address the
+ next line.
+
+ If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
+ 64 here for a 2x40.
+
+config PANEL_LCD_CHARSET
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD character set (0=normal, 1=KS0074)"
+ range 0 1
+ default 0
+ ---help---
+ Some controllers such as the KS0074 use a somewhat strange character set
+ where many symbols are at unusual places. The driver knows how to map
+ 'standard' ASCII characters to the character sets used by these controllers.
+ Valid values are :
+
+ 0 : normal (untranslated) character set
+ 1 : KS0074 character set
+
+ If you don't know, use the normal one (0).
+
+config PANEL_LCD_PROTO
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD communication mode (0=parallel 8 bits, 1=serial)"
+ range 0 1
+ default 0
+ ---help---
+ This driver now supports any serial or parallel LCD wired to a parallel
+ port. But before assigning signals, the driver needs to know if it will
+ be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
+ (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
+ (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
+ parallel LCD, and 1 for a serial LCD.
+
+config PANEL_LCD_PIN_E
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+ range -17 17
+ default 14
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'E'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'E' pin in custom profile is '14' (AUTOFEED).
+
+config PANEL_LCD_PIN_RS
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+ range -17 17
+ default 17
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RS'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RS' pin in custom profile is '17' (SELECT IN).
+
+config PANEL_LCD_PIN_RW
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+ range -17 17
+ default 16
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RW'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RW' pin in custom profile is '16' (INIT).
+
+config PANEL_LCD_PIN_SCL
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+ range -17 17
+ default 1
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SCL' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SCL' pin in custom profile is '1' (STROBE).
+
+config PANEL_LCD_PIN_SDA
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+ range -17 17
+ default 2
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SDA' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SDA' pin in custom profile is '2' (D0).
+
+config PANEL_LCD_PIN_BL
+ depends on PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+ range -17 17
+ default 0
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'BL' signal
+ has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+
+config PANEL_CHANGE_MESSAGE
+ bool "Change LCD initialization message ?"
+ default "n"
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
+ say 'N' and keep the default message with the version.
+
+config PANEL_BOOT_MESSAGE
+ depends on PANEL_CHANGE_MESSAGE="y"
+ string "New initialization message"
+ default ""
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ An empty message will only clear the display at driver init time. Any other
+ printf()-formatted message is valid with newline and escape codes.
+
+endif # PANEL
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index cb3dd847713b..2b8af3dc5e42 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -2,7 +2,11 @@
# Makefile for the kernel auxiliary displays device drivers.
#
+obj-$(CONFIG_CHARLCD) += charlcd.o
+obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
+obj-$(CONFIG_HD44780) += hd44780.o
obj-$(CONFIG_HT16K33) += ht16k33.o
+obj-$(CONFIG_PANEL) += panel.o
diff --git a/drivers/misc/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index b3176ee92b90..b3176ee92b90 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
new file mode 100644
index 000000000000..cfeb049a01ef
--- /dev/null
+++ b/drivers/auxdisplay/charlcd.c
@@ -0,0 +1,818 @@
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <generated/utsrelease.h>
+
+#include <misc/charlcd.h>
+
+#define LCD_MINOR 156
+
+#define DEFAULT_LCD_BWIDTH 40
+#define DEFAULT_LCD_HWIDTH 64
+
+/* Keep the backlight on this many seconds for each flash */
+#define LCD_BL_TEMPO_PERIOD 4
+
+#define LCD_FLAG_B 0x0004 /* Blink on */
+#define LCD_FLAG_C 0x0008 /* Cursor on */
+#define LCD_FLAG_D 0x0010 /* Display on */
+#define LCD_FLAG_F 0x0020 /* Large font mode */
+#define LCD_FLAG_N 0x0040 /* 2-rows mode */
+#define LCD_FLAG_L 0x0080 /* Backlight enabled */
+
+/* LCD commands */
+#define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */
+
+#define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */
+#define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */
+
+#define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */
+#define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */
+#define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */
+#define LCD_CMD_BLINK_ON 0x01 /* Set blink on */
+
+#define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */
+#define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */
+#define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */
+
+#define LCD_CMD_FUNCTION_SET 0x20 /* Set function */
+#define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */
+#define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */
+#define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */
+
+#define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */
+
+#define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */
+
+#define LCD_ESCAPE_LEN 24 /* Max chars for LCD escape command */
+#define LCD_ESCAPE_CHAR 27 /* Use char 27 for escape command */
+
+struct charlcd_priv {
+ struct charlcd lcd;
+
+ struct delayed_work bl_work;
+ struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
+ bool bl_tempo;
+
+ bool must_clear;
+
+ /* contains the LCD config state */
+ unsigned long int flags;
+
+ /* Contains the LCD X and Y offset */
+ struct {
+ unsigned long int x;
+ unsigned long int y;
+ } addr;
+
+ /* Current escape sequence and it's length or -1 if outside */
+ struct {
+ char buf[LCD_ESCAPE_LEN + 1];
+ int len;
+ } esc_seq;
+
+ unsigned long long drvdata[0];
+};
+
+#define to_priv(p) container_of(p, struct charlcd_priv, lcd)
+
+/* Device single-open policy control */
+static atomic_t charlcd_available = ATOMIC_INIT(1);
+
+/* sleeps that many milliseconds with a reschedule */
+static void long_sleep(int ms)
+{
+ if (in_interrupt())
+ mdelay(ms);
+ else
+ schedule_timeout_interruptible(msecs_to_jiffies(ms));
+}
+
+/* turn the backlight on or off */
+static void charlcd_backlight(struct charlcd *lcd, int on)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ if (!lcd->ops->backlight)
+ return;
+
+ mutex_lock(&priv->bl_tempo_lock);
+ if (!priv->bl_tempo)
+ lcd->ops->backlight(lcd, on);
+ mutex_unlock(&priv->bl_tempo_lock);
+}
+
+static void charlcd_bl_off(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct charlcd_priv *priv =
+ container_of(dwork, struct charlcd_priv, bl_work);
+
+ mutex_lock(&priv->bl_tempo_lock);
+ if (priv->bl_tempo) {
+ priv->bl_tempo = false;
+ if (!(priv->flags & LCD_FLAG_L))
+ priv->lcd.ops->backlight(&priv->lcd, 0);
+ }
+ mutex_unlock(&priv->bl_tempo_lock);
+}
+
+/* turn the backlight on for a little while */
+void charlcd_poke(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ if (!lcd->ops->backlight)
+ return;
+
+ cancel_delayed_work_sync(&priv->bl_work);
+
+ mutex_lock(&priv->bl_tempo_lock);
+ if (!priv->bl_tempo && !(priv->flags & LCD_FLAG_L))
+ lcd->ops->backlight(lcd, 1);
+ priv->bl_tempo = true;
+ schedule_delayed_work(&priv->bl_work, LCD_BL_TEMPO_PERIOD * HZ);
+ mutex_unlock(&priv->bl_tempo_lock);
+}
+EXPORT_SYMBOL_GPL(charlcd_poke);
+
+static void charlcd_gotoxy(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+ unsigned int addr;
+
+ /*
+ * we force the cursor to stay at the end of the
+ * line if it wants to go farther
+ */
+ addr = priv->addr.x < lcd->bwidth ? priv->addr.x & (lcd->hwidth - 1)
+ : lcd->bwidth - 1;
+ if (priv->addr.y & 1)
+ addr += lcd->hwidth;
+ if (priv->addr.y & 2)
+ addr += lcd->bwidth;
+ lcd->ops->write_cmd(lcd, LCD_CMD_SET_DDRAM_ADDR | addr);
+}
+
+static void charlcd_home(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ priv->addr.x = 0;
+ priv->addr.y = 0;
+ charlcd_gotoxy(lcd);
+}
+
+static void charlcd_print(struct charlcd *lcd, char c)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ if (priv->addr.x < lcd->bwidth) {
+ if (lcd->char_conv)
+ c = lcd->char_conv[(unsigned char)c];
+ lcd->ops->write_data(lcd, c);
+ priv->addr.x++;
+ }
+ /* prevents the cursor from wrapping onto the next line */
+ if (priv->addr.x == lcd->bwidth)
+ charlcd_gotoxy(lcd);
+}
+
+static void charlcd_clear_fast(struct charlcd *lcd)
+{
+ int pos;
+
+ charlcd_home(lcd);
+
+ if (lcd->ops->clear_fast)
+ lcd->ops->clear_fast(lcd);
+ else
+ for (pos = 0; pos < min(2, lcd->height) * lcd->hwidth; pos++)
+ lcd->ops->write_data(lcd, ' ');
+
+ charlcd_home(lcd);
+}
+
+/* clears the display and resets X/Y */
+static void charlcd_clear_display(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
+ priv->addr.x = 0;
+ priv->addr.y = 0;
+ /* we must wait a few milliseconds (15) */
+ long_sleep(15);
+}
+
+static int charlcd_init_display(struct charlcd *lcd)
+{
+ void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
+ struct charlcd_priv *priv = to_priv(lcd);
+ u8 init;
+
+ if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
+ return -EINVAL;
+
+ priv->flags = ((lcd->height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D |
+ LCD_FLAG_C | LCD_FLAG_B;
+
+ long_sleep(20); /* wait 20 ms after power-up for the paranoid */
+
+ /*
+ * 8-bit mode, 1 line, small fonts; let's do it 3 times, to make sure
+ * the LCD is in 8-bit mode afterwards
+ */
+ init = LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS;
+ if (lcd->ifwidth == 4) {
+ init >>= 4;
+ write_cmd_raw = lcd->ops->write_cmd_raw4;
+ } else {
+ write_cmd_raw = lcd->ops->write_cmd;
+ }
+ write_cmd_raw(lcd, init);
+ long_sleep(10);
+ write_cmd_raw(lcd, init);
+ long_sleep(10);
+ write_cmd_raw(lcd, init);
+ long_sleep(10);
+
+ if (lcd->ifwidth == 4) {
+ /* Switch to 4-bit mode, 1 line, small fonts */
+ lcd->ops->write_cmd_raw4(lcd, LCD_CMD_FUNCTION_SET >> 4);
+ long_sleep(10);
+ }
+
+ /* set font height and lines number */
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_FUNCTION_SET |
+ ((lcd->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) |
+ ((priv->flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) |
+ ((priv->flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0));
+ long_sleep(10);
+
+ /* display off, cursor off, blink off */
+ lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CTRL);
+ long_sleep(10);
+
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_DISPLAY_CTRL | /* set display mode */
+ ((priv->flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) |
+ ((priv->flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) |
+ ((priv->flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0));
+
+ charlcd_backlight(lcd, (priv->flags & LCD_FLAG_L) ? 1 : 0);
+
+ long_sleep(10);
+
+ /* entry mode set : increment, cursor shifting */
+ lcd->ops->write_cmd(lcd, LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC);
+
+ charlcd_clear_display(lcd);
+ return 0;
+}
+
+/*
+ * These are the file operation function for user access to /dev/lcd
+ * This function can also be called from inside the kernel, by
+ * setting file and ppos to NULL.
+ *
+ */
+
+static inline int handle_lcd_special_code(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ /* LCD special codes */
+
+ int processed = 0;
+
+ char *esc = priv->esc_seq.buf + 2;
+ int oldflags = priv->flags;
+
+ /* check for display mode flags */
+ switch (*esc) {
+ case 'D': /* Display ON */
+ priv->flags |= LCD_FLAG_D;
+ processed = 1;
+ break;
+ case 'd': /* Display OFF */
+ priv->flags &= ~LCD_FLAG_D;
+ processed = 1;
+ break;
+ case 'C': /* Cursor ON */
+ priv->flags |= LCD_FLAG_C;
+ processed = 1;
+ break;
+ case 'c': /* Cursor OFF */
+ priv->flags &= ~LCD_FLAG_C;
+ processed = 1;
+ break;
+ case 'B': /* Blink ON */
+ priv->flags |= LCD_FLAG_B;
+ processed = 1;
+ break;
+ case 'b': /* Blink OFF */
+ priv->flags &= ~LCD_FLAG_B;
+ processed = 1;
+ break;
+ case '+': /* Back light ON */
+ priv->flags |= LCD_FLAG_L;
+ processed = 1;
+ break;
+ case '-': /* Back light OFF */
+ priv->flags &= ~LCD_FLAG_L;
+ processed = 1;
+ break;
+ case '*': /* Flash back light */
+ charlcd_poke(lcd);
+ processed = 1;
+ break;
+ case 'f': /* Small Font */
+ priv->flags &= ~LCD_FLAG_F;
+ processed = 1;
+ break;
+ case 'F': /* Large Font */
+ priv->flags |= LCD_FLAG_F;
+ processed = 1;
+ break;
+ case 'n': /* One Line */
+ priv->flags &= ~LCD_FLAG_N;
+ processed = 1;
+ break;
+ case 'N': /* Two Lines */
+ priv->flags |= LCD_FLAG_N;
+ break;
+ case 'l': /* Shift Cursor Left */
+ if (priv->addr.x > 0) {
+ /* back one char if not at end of line */
+ if (priv->addr.x < lcd->bwidth)
+ lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
+ priv->addr.x--;
+ }
+ processed = 1;
+ break;
+ case 'r': /* shift cursor right */
+ if (priv->addr.x < lcd->width) {
+ /* allow the cursor to pass the end of the line */
+ if (priv->addr.x < (lcd->bwidth - 1))
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_SHIFT | LCD_CMD_SHIFT_RIGHT);
+ priv->addr.x++;
+ }
+ processed = 1;
+ break;
+ case 'L': /* shift display left */
+ lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT);
+ processed = 1;
+ break;
+ case 'R': /* shift display right */
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT |
+ LCD_CMD_SHIFT_RIGHT);
+ processed = 1;
+ break;
+ case 'k': { /* kill end of line */
+ int x;
+
+ for (x = priv->addr.x; x < lcd->bwidth; x++)
+ lcd->ops->write_data(lcd, ' ');
+
+ /* restore cursor position */
+ charlcd_gotoxy(lcd);
+ processed = 1;
+ break;
+ }
+ case 'I': /* reinitialize display */
+ charlcd_init_display(lcd);
+ processed = 1;
+ break;
+ case 'G': {
+ /* Generator : LGcxxxxx...xx; must have <c> between '0'
+ * and '7', representing the numerical ASCII code of the
+ * redefined character, and <xx...xx> a sequence of 16
+ * hex digits representing 8 bytes for each character.
+ * Most LCDs will only use 5 lower bits of the 7 first
+ * bytes.
+ */
+
+ unsigned char cgbytes[8];
+ unsigned char cgaddr;
+ int cgoffset;
+ int shift;
+ char value;
+ int addr;
+
+ if (!strchr(esc, ';'))
+ break;
+
+ esc++;
+
+ cgaddr = *(esc++) - '0';
+ if (cgaddr > 7) {
+ processed = 1;
+ break;
+ }
+
+ cgoffset = 0;
+ shift = 0;
+ value = 0;
+ while (*esc && cgoffset < 8) {
+ shift ^= 4;
+ if (*esc >= '0' && *esc <= '9') {
+ value |= (*esc - '0') << shift;
+ } else if (*esc >= 'A' && *esc <= 'Z') {
+ value |= (*esc - 'A' + 10) << shift;
+ } else if (*esc >= 'a' && *esc <= 'z') {
+ value |= (*esc - 'a' + 10) << shift;
+ } else {
+ esc++;
+ continue;
+ }
+
+ if (shift == 0) {
+ cgbytes[cgoffset++] = value;
+ value = 0;
+ }
+
+ esc++;
+ }
+
+ lcd->ops->write_cmd(lcd, LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8));
+ for (addr = 0; addr < cgoffset; addr++)
+ lcd->ops->write_data(lcd, cgbytes[addr]);
+
+ /* ensures that we stop writing to CGRAM */
+ charlcd_gotoxy(lcd);
+ processed = 1;
+ break;
+ }
+ case 'x': /* gotoxy : LxXXX[yYYY]; */
+ case 'y': /* gotoxy : LyYYY[xXXX]; */
+ if (!strchr(esc, ';'))
+ break;
+
+ while (*esc) {
+ if (*esc == 'x') {
+ esc++;
+ if (kstrtoul(esc, 10, &priv->addr.x) < 0)
+ break;
+ } else if (*esc == 'y') {
+ esc++;
+ if (kstrtoul(esc, 10, &priv->addr.y) < 0)
+ break;
+ } else {
+ break;
+ }
+ }
+
+ charlcd_gotoxy(lcd);
+ processed = 1;
+ break;
+ }
+
+ /* TODO: This indent party here got ugly, clean it! */
+ /* Check whether one flag was changed */
+ if (oldflags == priv->flags)
+ return processed;
+
+ /* check whether one of B,C,D flags were changed */
+ if ((oldflags ^ priv->flags) &
+ (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
+ /* set display mode */
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_DISPLAY_CTRL |
+ ((priv->flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0) |
+ ((priv->flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0) |
+ ((priv->flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0));
+ /* check whether one of F,N flags was changed */
+ else if ((oldflags ^ priv->flags) & (LCD_FLAG_F | LCD_FLAG_N))
+ lcd->ops->write_cmd(lcd,
+ LCD_CMD_FUNCTION_SET |
+ ((lcd->ifwidth == 8) ? LCD_CMD_DATA_LEN_8BITS : 0) |
+ ((priv->flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0) |
+ ((priv->flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0));
+ /* check whether L flag was changed */
+ else if ((oldflags ^ priv->flags) & LCD_FLAG_L)
+ charlcd_backlight(lcd, !!(priv->flags & LCD_FLAG_L));
+
+ return processed;
+}
+
+static void charlcd_write_char(struct charlcd *lcd, char c)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ /* first, we'll test if we're in escape mode */
+ if ((c != '\n') && priv->esc_seq.len >= 0) {
+ /* yes, let's add this char to the buffer */
+ priv->esc_seq.buf[priv->esc_seq.len++] = c;
+ priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ } else {
+ /* aborts any previous escape sequence */
+ priv->esc_seq.len = -1;
+
+ switch (c) {
+ case LCD_ESCAPE_CHAR:
+ /* start of an escape sequence */
+ priv->esc_seq.len = 0;
+ priv->esc_seq.buf[priv->esc_seq.len] = 0;
+ break;
+ case '\b':
+ /* go back one char and clear it */
+ if (priv->addr.x > 0) {
+ /*
+ * check if we're not at the
+ * end of the line
+ */
+ if (priv->addr.x < lcd->bwidth)
+ /* back one char */
+ lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
+ priv->addr.x--;
+ }
+ /* replace with a space */
+ lcd->ops->write_data(lcd, ' ');
+ /* back one char again */
+ lcd->ops->write_cmd(lcd, LCD_CMD_SHIFT);
+ break;
+ case '\014':
+ /* quickly clear the display */
+ charlcd_clear_fast(lcd);
+ break;
+ case '\n':
+ /*
+ * flush the remainder of the current line and
+ * go to the beginning of the next line
+ */
+ for (; priv->addr.x < lcd->bwidth; priv->addr.x++)
+ lcd->ops->write_data(lcd, ' ');
+ priv->addr.x = 0;
+ priv->addr.y = (priv->addr.y + 1) % lcd->height;
+ charlcd_gotoxy(lcd);
+ break;
+ case '\r':
+ /* go to the beginning of the same line */
+ priv->addr.x = 0;
+ charlcd_gotoxy(lcd);
+ break;
+ case '\t':
+ /* print a space instead of the tab */
+ charlcd_print(lcd, ' ');
+ break;
+ default:
+ /* simply print this char */
+ charlcd_print(lcd, c);
+ break;
+ }
+ }
+
+ /*
+ * now we'll see if we're in an escape mode and if the current
+ * escape sequence can be understood.
+ */
+ if (priv->esc_seq.len >= 2) {
+ int processed = 0;
+
+ if (!strcmp(priv->esc_seq.buf, "[2J")) {
+ /* clear the display */
+ charlcd_clear_fast(lcd);
+ processed = 1;
+ } else if (!strcmp(priv->esc_seq.buf, "[H")) {
+ /* cursor to home */
+ charlcd_home(lcd);
+ processed = 1;
+ }
+ /* codes starting with ^[[L */
+ else if ((priv->esc_seq.len >= 3) &&
+ (priv->esc_seq.buf[0] == '[') &&
+ (priv->esc_seq.buf[1] == 'L')) {
+ processed = handle_lcd_special_code(lcd);
+ }
+
+ /* LCD special escape codes */
+ /*
+ * flush the escape sequence if it's been processed
+ * or if it is getting too long.
+ */
+ if (processed || (priv->esc_seq.len >= LCD_ESCAPE_LEN))
+ priv->esc_seq.len = -1;
+ } /* escape codes */
+}
+
+static struct charlcd *the_charlcd;
+
+static ssize_t charlcd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char __user *tmp = buf;
+ char c;
+
+ for (; count-- > 0; (*ppos)++, tmp++) {
+ if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
+ /*
+ * let's be a little nice with other processes
+ * that need some CPU
+ */
+ schedule();
+
+ if (get_user(c, tmp))
+ return -EFAULT;
+
+ charlcd_write_char(the_charlcd, c);
+ }
+
+ return tmp - buf;
+}
+
+static int charlcd_open(struct inode *inode, struct file *file)
+{
+ struct charlcd_priv *priv = to_priv(the_charlcd);
+
+ if (!atomic_dec_and_test(&charlcd_available))
+ return -EBUSY; /* open only once at a time */
+
+ if (file->f_mode & FMODE_READ) /* device is write-only */
+ return -EPERM;
+
+ if (priv->must_clear) {
+ charlcd_clear_display(&priv->lcd);
+ priv->must_clear = false;
+ }
+ return nonseekable_open(inode, file);
+}
+
+static int charlcd_release(struct inode *inode, struct file *file)
+{
+ atomic_inc(&charlcd_available);
+ return 0;
+}
+
+static const struct file_operations charlcd_fops = {
+ .write = charlcd_write,
+ .open = charlcd_open,
+ .release = charlcd_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice charlcd_dev = {
+ .minor = LCD_MINOR,
+ .name = "lcd",
+ .fops = &charlcd_fops,
+};
+
+static void charlcd_puts(struct charlcd *lcd, const char *s)
+{
+ const char *tmp = s;
+ int count = strlen(s);
+
+ for (; count-- > 0; tmp++) {
+ if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
+ /*
+ * let's be a little nice with other processes
+ * that need some CPU
+ */
+ schedule();
+
+ charlcd_write_char(lcd, *tmp);
+ }
+}
+
+/* initialize the LCD driver */
+static int charlcd_init(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+ int ret;
+
+ if (lcd->ops->backlight) {
+ mutex_init(&priv->bl_tempo_lock);
+ INIT_DELAYED_WORK(&priv->bl_work, charlcd_bl_off);
+ }
+
+ /*
+ * before this line, we must NOT send anything to the display.
+ * Since charlcd_init_display() needs to write data, we have to
+ * enable mark the LCD initialized just before.
+ */
+ ret = charlcd_init_display(lcd);
+ if (ret)
+ return ret;
+
+ /* display a short message */
+#ifdef CONFIG_PANEL_CHANGE_MESSAGE
+#ifdef CONFIG_PANEL_BOOT_MESSAGE
+ charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
+#endif
+#else
+ charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
+#endif
+ /* clear the display on the next device opening */
+ priv->must_clear = true;
+ charlcd_home(lcd);
+ return 0;
+}
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
+{
+ struct charlcd_priv *priv;
+ struct charlcd *lcd;
+
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ priv->esc_seq.len = -1;
+
+ lcd = &priv->lcd;
+ lcd->ifwidth = 8;
+ lcd->bwidth = DEFAULT_LCD_BWIDTH;
+ lcd->hwidth = DEFAULT_LCD_HWIDTH;
+ lcd->drvdata = priv->drvdata;
+
+ return lcd;
+}
+EXPORT_SYMBOL_GPL(charlcd_alloc);
+
+static int panel_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct charlcd *lcd = the_charlcd;
+
+ switch (code) {
+ case SYS_DOWN:
+ charlcd_puts(lcd,
+ "\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+");
+ break;
+ case SYS_HALT:
+ charlcd_puts(lcd, "\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+");
+ break;
+ case SYS_POWER_OFF:
+ charlcd_puts(lcd, "\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+");
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panel_notifier = {
+ panel_notify_sys,
+ NULL,
+ 0
+};
+
+int charlcd_register(struct charlcd *lcd)
+{
+ int ret;
+
+ ret = charlcd_init(lcd);
+ if (ret)
+ return ret;
+
+ ret = misc_register(&charlcd_dev);
+ if (ret)
+ return ret;
+
+ the_charlcd = lcd;
+ register_reboot_notifier(&panel_notifier);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(charlcd_register);
+
+int charlcd_unregister(struct charlcd *lcd)
+{
+ struct charlcd_priv *priv = to_priv(lcd);
+
+ unregister_reboot_notifier(&panel_notifier);
+ charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
+ misc_deregister(&charlcd_dev);
+ the_charlcd = NULL;
+ if (lcd->ops->backlight) {
+ cancel_delayed_work_sync(&priv->bl_work);
+ priv->lcd.ops->backlight(&priv->lcd, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(charlcd_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
new file mode 100644
index 000000000000..036eec404289
--- /dev/null
+++ b/drivers/auxdisplay/hd44780.c
@@ -0,0 +1,326 @@
+/*
+ * HD44780 Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <misc/charlcd.h>
+
+
+enum hd44780_pin {
+ /* Order does matter due to writing to GPIO array subsets! */
+ PIN_DATA0, /* Optional */
+ PIN_DATA1, /* Optional */
+ PIN_DATA2, /* Optional */
+ PIN_DATA3, /* Optional */
+ PIN_DATA4,
+ PIN_DATA5,
+ PIN_DATA6,
+ PIN_DATA7,
+ PIN_CTRL_RS,
+ PIN_CTRL_RW, /* Optional */
+ PIN_CTRL_E,
+ PIN_CTRL_BL, /* Optional */
+ PIN_NUM
+};
+
+struct hd44780 {
+ struct gpio_desc *pins[PIN_NUM];
+};
+
+static void hd44780_backlight(struct charlcd *lcd, int on)
+{
+ struct hd44780 *hd = lcd->drvdata;
+
+ if (hd->pins[PIN_CTRL_BL])
+ gpiod_set_value_cansleep(hd->pins[PIN_CTRL_BL], on);
+}
+
+static void hd44780_strobe_gpio(struct hd44780 *hd)
+{
+ /* Maintain the data during 20 us before the strobe */
+ udelay(20);
+
+ gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 1);
+
+ /* Maintain the strobe during 40 us */
+ udelay(40);
+
+ gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 0);
+}
+
+/* write to an LCD panel register in 8 bit GPIO mode */
+static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs)
+{
+ int values[10]; /* for DATA[0-7], RS, RW */
+ unsigned int i, n;
+
+ for (i = 0; i < 8; i++)
+ values[PIN_DATA0 + i] = !!(val & BIT(i));
+ values[PIN_CTRL_RS] = rs;
+ n = 9;
+ if (hd->pins[PIN_CTRL_RW]) {
+ values[PIN_CTRL_RW] = 0;
+ n++;
+ }
+
+ /* Present the data to the port */
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], values);
+
+ hd44780_strobe_gpio(hd);
+}
+
+/* write to an LCD panel register in 4 bit GPIO mode */
+static void hd44780_write_gpio4(struct hd44780 *hd, u8 val, unsigned int rs)
+{
+ int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
+ unsigned int i, n;
+
+ /* High nibble + RS, RW */
+ for (i = 4; i < 8; i++)
+ values[PIN_DATA0 + i] = !!(val & BIT(i));
+ values[PIN_CTRL_RS] = rs;
+ n = 5;
+ if (hd->pins[PIN_CTRL_RW]) {
+ values[PIN_CTRL_RW] = 0;
+ n++;
+ }
+
+ /* Present the data to the port */
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
+ &values[PIN_DATA4]);
+
+ hd44780_strobe_gpio(hd);
+
+ /* Low nibble */
+ for (i = 0; i < 4; i++)
+ values[PIN_DATA4 + i] = !!(val & BIT(i));
+
+ /* Present the data to the port */
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
+ &values[PIN_DATA4]);
+
+ hd44780_strobe_gpio(hd);
+}
+
+/* Send a command to the LCD panel in 8 bit GPIO mode */
+static void hd44780_write_cmd_gpio8(struct charlcd *lcd, int cmd)
+{
+ struct hd44780 *hd = lcd->drvdata;
+
+ hd44780_write_gpio8(hd, cmd, 0);
+
+ /* The shortest command takes at least 120 us */
+ udelay(120);
+}
+
+/* Send data to the LCD panel in 8 bit GPIO mode */
+static void hd44780_write_data_gpio8(struct charlcd *lcd, int data)
+{
+ struct hd44780 *hd = lcd->drvdata;
+
+ hd44780_write_gpio8(hd, data, 1);
+
+ /* The shortest data takes at least 45 us */
+ udelay(45);
+}
+
+static const struct charlcd_ops hd44780_ops_gpio8 = {
+ .write_cmd = hd44780_write_cmd_gpio8,
+ .write_data = hd44780_write_data_gpio8,
+ .backlight = hd44780_backlight,
+};
+
+/* Send a command to the LCD panel in 4 bit GPIO mode */
+static void hd44780_write_cmd_gpio4(struct charlcd *lcd, int cmd)
+{
+ struct hd44780 *hd = lcd->drvdata;
+
+ hd44780_write_gpio4(hd, cmd, 0);
+
+ /* The shortest command takes at least 120 us */
+ udelay(120);
+}
+
+/* Send 4-bits of a command to the LCD panel in raw 4 bit GPIO mode */
+static void hd44780_write_cmd_raw_gpio4(struct charlcd *lcd, int cmd)
+{
+ int values[10]; /* for DATA[0-7], RS, RW, but DATA[0-3] is unused */
+ struct hd44780 *hd = lcd->drvdata;
+ unsigned int i, n;
+
+ /* Command nibble + RS, RW */
+ for (i = 0; i < 4; i++)
+ values[PIN_DATA4 + i] = !!(cmd & BIT(i));
+ values[PIN_CTRL_RS] = 0;
+ n = 5;
+ if (hd->pins[PIN_CTRL_RW]) {
+ values[PIN_CTRL_RW] = 0;
+ n++;
+ }
+
+ /* Present the data to the port */
+ gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4],
+ &values[PIN_DATA4]);
+
+ hd44780_strobe_gpio(hd);
+}
+
+/* Send data to the LCD panel in 4 bit GPIO mode */
+static void hd44780_write_data_gpio4(struct charlcd *lcd, int data)
+{
+ struct hd44780 *hd = lcd->drvdata;
+
+ hd44780_write_gpio4(hd, data, 1);
+
+ /* The shortest data takes at least 45 us */
+ udelay(45);
+}
+
+static const struct charlcd_ops hd44780_ops_gpio4 = {
+ .write_cmd = hd44780_write_cmd_gpio4,
+ .write_cmd_raw4 = hd44780_write_cmd_raw_gpio4,
+ .write_data = hd44780_write_data_gpio4,
+ .backlight = hd44780_backlight,
+};
+
+static int hd44780_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int i, base;
+ struct charlcd *lcd;
+ struct hd44780 *hd;
+ int ifwidth, ret;
+
+ /* Required pins */
+ ifwidth = gpiod_count(dev, "data");
+ if (ifwidth < 0)
+ return ifwidth;
+
+ switch (ifwidth) {
+ case 4:
+ base = PIN_DATA4;
+ break;
+ case 8:
+ base = PIN_DATA0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lcd = charlcd_alloc(sizeof(struct hd44780));
+ if (!lcd)
+ return -ENOMEM;
+
+ hd = lcd->drvdata;
+
+ for (i = 0; i < ifwidth; i++) {
+ hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hd->pins[base + i])) {
+ ret = PTR_ERR(hd->pins[base + i]);
+ goto fail;
+ }
+ }
+
+ hd->pins[PIN_CTRL_E] = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(hd->pins[PIN_CTRL_E])) {
+ ret = PTR_ERR(hd->pins[PIN_CTRL_E]);
+ goto fail;
+ }
+
+ hd->pins[PIN_CTRL_RS] = devm_gpiod_get(dev, "rs", GPIOD_OUT_HIGH);
+ if (IS_ERR(hd->pins[PIN_CTRL_RS])) {
+ ret = PTR_ERR(hd->pins[PIN_CTRL_RS]);
+ goto fail;
+ }
+
+ /* Optional pins */
+ hd->pins[PIN_CTRL_RW] = devm_gpiod_get_optional(dev, "rw",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hd->pins[PIN_CTRL_RW])) {
+ ret = PTR_ERR(hd->pins[PIN_CTRL_RW]);
+ goto fail;
+ }
+
+ hd->pins[PIN_CTRL_BL] = devm_gpiod_get_optional(dev, "backlight",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hd->pins[PIN_CTRL_BL])) {
+ ret = PTR_ERR(hd->pins[PIN_CTRL_BL]);
+ goto fail;
+ }
+
+ /* Required properties */
+ ret = device_property_read_u32(dev, "display-height-chars",
+ &lcd->height);
+ if (ret)
+ goto fail;
+ ret = device_property_read_u32(dev, "display-width-chars", &lcd->width);
+ if (ret)
+ goto fail;
+
+ /*
+ * On displays with more than two rows, the internal buffer width is
+ * usually equal to the display width
+ */
+ if (lcd->height > 2)
+ lcd->bwidth = lcd->width;
+
+ /* Optional properties */
+ device_property_read_u32(dev, "internal-buffer-width", &lcd->bwidth);
+
+ lcd->ifwidth = ifwidth;
+ lcd->ops = ifwidth == 8 ? &hd44780_ops_gpio8 : &hd44780_ops_gpio4;
+
+ ret = charlcd_register(lcd);
+ if (ret)
+ goto fail;
+
+ platform_set_drvdata(pdev, lcd);
+ return 0;
+
+fail:
+ kfree(lcd);
+ return ret;
+}
+
+static int hd44780_remove(struct platform_device *pdev)
+{
+ struct charlcd *lcd = platform_get_drvdata(pdev);
+
+ charlcd_unregister(lcd);
+ return 0;
+}
+
+static const struct of_device_id hd44780_of_match[] = {
+ { .compatible = "hit,hd44780" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hd44780_of_match);
+
+static struct platform_driver hd44780_driver = {
+ .probe = hd44780_probe,
+ .remove = hd44780_remove,
+ .driver = {
+ .name = "hd44780",
+ .of_match_table = hd44780_of_match,
+ },
+};
+
+module_platform_driver(hd44780_driver);
+MODULE_DESCRIPTION("HD44780 Character LCD driver");
+MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index f66b45b235b0..fbfa5b4cc567 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -254,18 +254,22 @@ static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
{
const unsigned short *keycodes = keypad->dev->keycode;
u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
- u8 data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+ __le16 data[HT16K33_MATRIX_KEYPAD_MAX_COLS];
unsigned long bits_changed;
int row, col, code;
+ int rc;
bool pressed = false;
- if (i2c_smbus_read_i2c_block_data(keypad->client, 0x40, 6, data) != 6) {
- dev_err(&keypad->client->dev, "Failed to read key data\n");
+ rc = i2c_smbus_read_i2c_block_data(keypad->client, 0x40,
+ sizeof(data), (u8 *)data);
+ if (rc != sizeof(data)) {
+ dev_err(&keypad->client->dev,
+ "Failed to read key data, rc=%d\n", rc);
return false;
}
for (col = 0; col < keypad->cols; col++) {
- new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+ new_state[col] = le16_to_cpu(data[col]);
if (new_state[col])
pressed = true;
bits_changed = keypad->last_key_state[col] ^ new_state[col];
@@ -278,7 +282,7 @@ static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
}
}
input_sync(keypad->dev);
- memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+ memcpy(keypad->last_key_state, new_state, sizeof(u16) * keypad->cols);
return pressed;
}
@@ -353,6 +357,12 @@ static int ht16k33_keypad_probe(struct i2c_client *client,
err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
if (err)
return err;
+ if (rows > HT16K33_MATRIX_KEYPAD_MAX_ROWS ||
+ cols > HT16K33_MATRIX_KEYPAD_MAX_COLS) {
+ dev_err(&client->dev, "%u rows or %u cols out of range in DT\n",
+ rows, cols);
+ return -ERANGE;
+ }
keypad->rows = rows;
keypad->cols = cols;
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 83f1439e57fd..25306fa27251 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -220,6 +220,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = {
{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
/**
* img_ascii_lcd_scroll() - scroll the display by a character
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
new file mode 100644
index 000000000000..e0c014c2356f
--- /dev/null
+++ b/drivers/auxdisplay/panel.c
@@ -0,0 +1,1796 @@
+/*
+ * Front panel driver for Linux
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad)
+ * connected to a parallel printer port.
+ *
+ * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit
+ * serial module compatible with Samsung's KS0074. The pins may be connected in
+ * any combination, everything is programmable.
+ *
+ * The keypad consists in a matrix of push buttons connecting input pins to
+ * data output pins or to the ground. The combinations have to be hard-coded
+ * in the driver, though several profiles exist and adding new ones is easy.
+ *
+ * Several profiles are provided for commonly found LCD+keypad modules on the
+ * market, such as those found in Nexcom's appliances.
+ *
+ * FIXME:
+ * - the initialization/deinitialization process is very dirty and should
+ * be rewritten. It may even be buggy.
+ *
+ * TODO:
+ * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs)
+ * - make the LCD a part of a virtual screen of Vx*Vy
+ * - make the inputs list smp-safe
+ * - change the keyboard to a double mapping : signals -> key_id -> values
+ * so that applications can change values without knowing signals
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/parport.h>
+#include <linux/list.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <misc/charlcd.h>
+
+#define KEYPAD_MINOR 185
+
+#define LCD_MAXBYTES 256 /* max burst write */
+
+#define KEYPAD_BUFFER 64
+
+/* poll the keyboard this every second */
+#define INPUT_POLL_TIME (HZ / 50)
+/* a key starts to repeat after this times INPUT_POLL_TIME */
+#define KEYPAD_REP_START (10)
+/* a key repeats this times INPUT_POLL_TIME */
+#define KEYPAD_REP_DELAY (2)
+
+/* converts an r_str() input to an active high, bits string : 000BAOSE */
+#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
+
+#define PNL_PBUSY 0x80 /* inverted input, active low */
+#define PNL_PACK 0x40 /* direct input, active low */
+#define PNL_POUTPA 0x20 /* direct input, active high */
+#define PNL_PSELECD 0x10 /* direct input, active high */
+#define PNL_PERRORP 0x08 /* direct input, active low */
+
+#define PNL_PBIDIR 0x20 /* bi-directional ports */
+/* high to read data in or-ed with data out */
+#define PNL_PINTEN 0x10
+#define PNL_PSELECP 0x08 /* inverted output, active low */
+#define PNL_PINITP 0x04 /* direct output, active low */
+#define PNL_PAUTOLF 0x02 /* inverted output, active low */
+#define PNL_PSTROBE 0x01 /* inverted output */
+
+#define PNL_PD0 0x01
+#define PNL_PD1 0x02
+#define PNL_PD2 0x04
+#define PNL_PD3 0x08
+#define PNL_PD4 0x10
+#define PNL_PD5 0x20
+#define PNL_PD6 0x40
+#define PNL_PD7 0x80
+
+#define PIN_NONE 0
+#define PIN_STROBE 1
+#define PIN_D0 2
+#define PIN_D1 3
+#define PIN_D2 4
+#define PIN_D3 5
+#define PIN_D4 6
+#define PIN_D5 7
+#define PIN_D6 8
+#define PIN_D7 9
+#define PIN_AUTOLF 14
+#define PIN_INITP 16
+#define PIN_SELECP 17
+#define PIN_NOT_SET 127
+
+#define NOT_SET -1
+
+/* macros to simplify use of the parallel port */
+#define r_ctr(x) (parport_read_control((x)->port))
+#define r_dtr(x) (parport_read_data((x)->port))
+#define r_str(x) (parport_read_status((x)->port))
+#define w_ctr(x, y) (parport_write_control((x)->port, (y)))
+#define w_dtr(x, y) (parport_write_data((x)->port, (y)))
+
+/* this defines which bits are to be used and which ones to be ignored */
+/* logical or of the output bits involved in the scan matrix */
+static __u8 scan_mask_o;
+/* logical or of the input bits involved in the scan matrix */
+static __u8 scan_mask_i;
+
+enum input_type {
+ INPUT_TYPE_STD,
+ INPUT_TYPE_KBD,
+};
+
+enum input_state {
+ INPUT_ST_LOW,
+ INPUT_ST_RISING,
+ INPUT_ST_HIGH,
+ INPUT_ST_FALLING,
+};
+
+struct logical_input {
+ struct list_head list;
+ __u64 mask;
+ __u64 value;
+ enum input_type type;
+ enum input_state state;
+ __u8 rise_time, fall_time;
+ __u8 rise_timer, fall_timer, high_timer;
+
+ union {
+ struct { /* valid when type == INPUT_TYPE_STD */
+ void (*press_fct)(int);
+ void (*release_fct)(int);
+ int press_data;
+ int release_data;
+ } std;
+ struct { /* valid when type == INPUT_TYPE_KBD */
+ /* strings can be non null-terminated */
+ char press_str[sizeof(void *) + sizeof(int)];
+ char repeat_str[sizeof(void *) + sizeof(int)];
+ char release_str[sizeof(void *) + sizeof(int)];
+ } kbd;
+ } u;
+};
+
+static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
+
+/* physical contacts history
+ * Physical contacts are a 45 bits string of 9 groups of 5 bits each.
+ * The 8 lower groups correspond to output bits 0 to 7, and the 9th group
+ * corresponds to the ground.
+ * Within each group, bits are stored in the same order as read on the port :
+ * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
+ * So, each __u64 is represented like this :
+ * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
+ * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
+ */
+
+/* what has just been read from the I/O ports */
+static __u64 phys_read;
+/* previous phys_read */
+static __u64 phys_read_prev;
+/* stabilized phys_read (phys_read|phys_read_prev) */
+static __u64 phys_curr;
+/* previous phys_curr */
+static __u64 phys_prev;
+/* 0 means that at least one logical signal needs be computed */
+static char inputs_stable;
+
+/* these variables are specific to the keypad */
+static struct {
+ bool enabled;
+} keypad;
+
+static char keypad_buffer[KEYPAD_BUFFER];
+static int keypad_buflen;
+static int keypad_start;
+static char keypressed;
+static wait_queue_head_t keypad_read_wait;
+
+/* lcd-specific variables */
+static struct {
+ bool enabled;
+ bool initialized;
+
+ int charset;
+ int proto;
+
+ /* TODO: use union here? */
+ struct {
+ int e;
+ int rs;
+ int rw;
+ int cl;
+ int da;
+ int bl;
+ } pins;
+
+ struct charlcd *charlcd;
+} lcd;
+
+/* Needed only for init */
+static int selected_lcd_type = NOT_SET;
+
+/*
+ * Bit masks to convert LCD signals to parallel port outputs.
+ * _d_ are values for data port, _c_ are for control port.
+ * [0] = signal OFF, [1] = signal ON, [2] = mask
+ */
+#define BIT_CLR 0
+#define BIT_SET 1
+#define BIT_MSK 2
+#define BIT_STATES 3
+/*
+ * one entry for each bit on the LCD
+ */
+#define LCD_BIT_E 0
+#define LCD_BIT_RS 1
+#define LCD_BIT_RW 2
+#define LCD_BIT_BL 3
+#define LCD_BIT_CL 4
+#define LCD_BIT_DA 5
+#define LCD_BITS 6
+
+/*
+ * each bit can be either connected to a DATA or CTRL port
+ */
+#define LCD_PORT_C 0
+#define LCD_PORT_D 1
+#define LCD_PORTS 2
+
+static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
+
+/*
+ * LCD protocols
+ */
+#define LCD_PROTO_PARALLEL 0
+#define LCD_PROTO_SERIAL 1
+#define LCD_PROTO_TI_DA8XX_LCD 2
+
+/*
+ * LCD character sets
+ */
+#define LCD_CHARSET_NORMAL 0
+#define LCD_CHARSET_KS0074 1
+
+/*
+ * LCD types
+ */
+#define LCD_TYPE_NONE 0
+#define LCD_TYPE_CUSTOM 1
+#define LCD_TYPE_OLD 2
+#define LCD_TYPE_KS0074 3
+#define LCD_TYPE_HANTRONIX 4
+#define LCD_TYPE_NEXCOM 5
+
+/*
+ * keypad types
+ */
+#define KEYPAD_TYPE_NONE 0
+#define KEYPAD_TYPE_OLD 1
+#define KEYPAD_TYPE_NEW 2
+#define KEYPAD_TYPE_NEXCOM 3
+
+/*
+ * panel profiles
+ */
+#define PANEL_PROFILE_CUSTOM 0
+#define PANEL_PROFILE_OLD 1
+#define PANEL_PROFILE_NEW 2
+#define PANEL_PROFILE_HANTRONIX 3
+#define PANEL_PROFILE_NEXCOM 4
+#define PANEL_PROFILE_LARGE 5
+
+/*
+ * Construct custom config from the kernel's configuration
+ */
+#define DEFAULT_PARPORT 0
+#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
+#define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD
+#define DEFAULT_LCD_TYPE LCD_TYPE_OLD
+#define DEFAULT_LCD_HEIGHT 2
+#define DEFAULT_LCD_WIDTH 40
+#define DEFAULT_LCD_BWIDTH 40
+#define DEFAULT_LCD_HWIDTH 64
+#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
+#define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL
+
+#define DEFAULT_LCD_PIN_E PIN_AUTOLF
+#define DEFAULT_LCD_PIN_RS PIN_SELECP
+#define DEFAULT_LCD_PIN_RW PIN_INITP
+#define DEFAULT_LCD_PIN_SCL PIN_STROBE
+#define DEFAULT_LCD_PIN_SDA PIN_D0
+#define DEFAULT_LCD_PIN_BL PIN_NOT_SET
+
+#ifdef CONFIG_PANEL_PARPORT
+#undef DEFAULT_PARPORT
+#define DEFAULT_PARPORT CONFIG_PANEL_PARPORT
+#endif
+
+#ifdef CONFIG_PANEL_PROFILE
+#undef DEFAULT_PROFILE
+#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
+#endif
+
+#if DEFAULT_PROFILE == 0 /* custom */
+#ifdef CONFIG_PANEL_KEYPAD
+#undef DEFAULT_KEYPAD_TYPE
+#define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD
+#endif
+
+#ifdef CONFIG_PANEL_LCD
+#undef DEFAULT_LCD_TYPE
+#define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD
+#endif
+
+#ifdef CONFIG_PANEL_LCD_HEIGHT
+#undef DEFAULT_LCD_HEIGHT
+#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
+#endif
+
+#ifdef CONFIG_PANEL_LCD_WIDTH
+#undef DEFAULT_LCD_WIDTH
+#define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH
+#endif
+
+#ifdef CONFIG_PANEL_LCD_BWIDTH
+#undef DEFAULT_LCD_BWIDTH
+#define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH
+#endif
+
+#ifdef CONFIG_PANEL_LCD_HWIDTH
+#undef DEFAULT_LCD_HWIDTH
+#define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH
+#endif
+
+#ifdef CONFIG_PANEL_LCD_CHARSET
+#undef DEFAULT_LCD_CHARSET
+#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PROTO
+#undef DEFAULT_LCD_PROTO
+#define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_E
+#undef DEFAULT_LCD_PIN_E
+#define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_RS
+#undef DEFAULT_LCD_PIN_RS
+#define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_RW
+#undef DEFAULT_LCD_PIN_RW
+#define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_SCL
+#undef DEFAULT_LCD_PIN_SCL
+#define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_SDA
+#undef DEFAULT_LCD_PIN_SDA
+#define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA
+#endif
+
+#ifdef CONFIG_PANEL_LCD_PIN_BL
+#undef DEFAULT_LCD_PIN_BL
+#define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL
+#endif
+
+#endif /* DEFAULT_PROFILE == 0 */
+
+/* global variables */
+
+/* Device single-open policy control */
+static atomic_t keypad_available = ATOMIC_INIT(1);
+
+static struct pardevice *pprt;
+
+static int keypad_initialized;
+
+static DEFINE_SPINLOCK(pprt_lock);
+static struct timer_list scan_timer;
+
+MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver");
+
+static int parport = DEFAULT_PARPORT;
+module_param(parport, int, 0000);
+MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)");
+
+static int profile = DEFAULT_PROFILE;
+module_param(profile, int, 0000);
+MODULE_PARM_DESC(profile,
+ "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
+ "4=16x2 nexcom; default=40x2, old kp");
+
+static int keypad_type = NOT_SET;
+module_param(keypad_type, int, 0000);
+MODULE_PARM_DESC(keypad_type,
+ "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
+
+static int lcd_type = NOT_SET;
+module_param(lcd_type, int, 0000);
+MODULE_PARM_DESC(lcd_type,
+ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
+
+static int lcd_height = NOT_SET;
+module_param(lcd_height, int, 0000);
+MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD");
+
+static int lcd_width = NOT_SET;
+module_param(lcd_width, int, 0000);
+MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD");
+
+static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */
+module_param(lcd_bwidth, int, 0000);
+MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)");
+
+static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */
+module_param(lcd_hwidth, int, 0000);
+MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)");
+
+static int lcd_charset = NOT_SET;
+module_param(lcd_charset, int, 0000);
+MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
+
+static int lcd_proto = NOT_SET;
+module_param(lcd_proto, int, 0000);
+MODULE_PARM_DESC(lcd_proto,
+ "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
+
+/*
+ * These are the parallel port pins the LCD control signals are connected to.
+ * Set this to 0 if the signal is not used. Set it to its opposite value
+ * (negative) if the signal is negated. -MAXINT is used to indicate that the
+ * pin has not been explicitly specified.
+ *
+ * WARNING! no check will be performed about collisions with keypad !
+ */
+
+static int lcd_e_pin = PIN_NOT_SET;
+module_param(lcd_e_pin, int, 0000);
+MODULE_PARM_DESC(lcd_e_pin,
+ "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
+
+static int lcd_rs_pin = PIN_NOT_SET;
+module_param(lcd_rs_pin, int, 0000);
+MODULE_PARM_DESC(lcd_rs_pin,
+ "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
+
+static int lcd_rw_pin = PIN_NOT_SET;
+module_param(lcd_rw_pin, int, 0000);
+MODULE_PARM_DESC(lcd_rw_pin,
+ "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
+
+static int lcd_cl_pin = PIN_NOT_SET;
+module_param(lcd_cl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_cl_pin,
+ "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
+
+static int lcd_da_pin = PIN_NOT_SET;
+module_param(lcd_da_pin, int, 0000);
+MODULE_PARM_DESC(lcd_da_pin,
+ "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
+
+static int lcd_bl_pin = PIN_NOT_SET;
+module_param(lcd_bl_pin, int, 0000);
+MODULE_PARM_DESC(lcd_bl_pin,
+ "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
+
+/* Deprecated module parameters - consider not using them anymore */
+
+static int lcd_enabled = NOT_SET;
+module_param(lcd_enabled, int, 0000);
+MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
+
+static int keypad_enabled = NOT_SET;
+module_param(keypad_enabled, int, 0000);
+MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
+
+/* for some LCD drivers (ks0074) we need a charset conversion table. */
+static const unsigned char lcd_char_conv_ks0074[256] = {
+ /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */
+ /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27,
+ /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4,
+ /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20,
+ /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f,
+ /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96,
+ /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd,
+ /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60,
+ /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9,
+ /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3,
+ /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78,
+ /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe,
+ /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8,
+ /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69,
+ /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25,
+ /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79,
+};
+
+static const char old_keypad_profile[][4][9] = {
+ {"S0", "Left\n", "Left\n", ""},
+ {"S1", "Down\n", "Down\n", ""},
+ {"S2", "Up\n", "Up\n", ""},
+ {"S3", "Right\n", "Right\n", ""},
+ {"S4", "Esc\n", "Esc\n", ""},
+ {"S5", "Ret\n", "Ret\n", ""},
+ {"", "", "", ""}
+};
+
+/* signals, press, repeat, release */
+static const char new_keypad_profile[][4][9] = {
+ {"S0", "Left\n", "Left\n", ""},
+ {"S1", "Down\n", "Down\n", ""},
+ {"S2", "Up\n", "Up\n", ""},
+ {"S3", "Right\n", "Right\n", ""},
+ {"S4s5", "", "Esc\n", "Esc\n"},
+ {"s4S5", "", "Ret\n", "Ret\n"},
+ {"S4S5", "Help\n", "", ""},
+ /* add new signals above this line */
+ {"", "", "", ""}
+};
+
+/* signals, press, repeat, release */
+static const char nexcom_keypad_profile[][4][9] = {
+ {"a-p-e-", "Down\n", "Down\n", ""},
+ {"a-p-E-", "Ret\n", "Ret\n", ""},
+ {"a-P-E-", "Esc\n", "Esc\n", ""},
+ {"a-P-e-", "Up\n", "Up\n", ""},
+ /* add new signals above this line */
+ {"", "", "", ""}
+};
+
+static const char (*keypad_profile)[4][9] = old_keypad_profile;
+
+static DECLARE_BITMAP(bits, LCD_BITS);
+
+static void lcd_get_bits(unsigned int port, int *val)
+{
+ unsigned int bit, state;
+
+ for (bit = 0; bit < LCD_BITS; bit++) {
+ state = test_bit(bit, bits) ? BIT_SET : BIT_CLR;
+ *val &= lcd_bits[port][bit][BIT_MSK];
+ *val |= lcd_bits[port][bit][state];
+ }
+}
+
+/* sets data port bits according to current signals values */
+static int set_data_bits(void)
+{
+ int val;
+
+ val = r_dtr(pprt);
+ lcd_get_bits(LCD_PORT_D, &val);
+ w_dtr(pprt, val);
+ return val;
+}
+
+/* sets ctrl port bits according to current signals values */
+static int set_ctrl_bits(void)
+{
+ int val;
+
+ val = r_ctr(pprt);
+ lcd_get_bits(LCD_PORT_C, &val);
+ w_ctr(pprt, val);
+ return val;
+}
+
+/* sets ctrl & data port bits according to current signals values */
+static void panel_set_bits(void)
+{
+ set_data_bits();
+ set_ctrl_bits();
+}
+
+/*
+ * Converts a parallel port pin (from -25 to 25) to data and control ports
+ * masks, and data and control port bits. The signal will be considered
+ * unconnected if it's on pin 0 or an invalid pin (<-25 or >25).
+ *
+ * Result will be used this way :
+ * out(dport, in(dport) & d_val[2] | d_val[signal_state])
+ * out(cport, in(cport) & c_val[2] | c_val[signal_state])
+ */
+static void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
+{
+ int d_bit, c_bit, inv;
+
+ d_val[0] = 0;
+ c_val[0] = 0;
+ d_val[1] = 0;
+ c_val[1] = 0;
+ d_val[2] = 0xFF;
+ c_val[2] = 0xFF;
+
+ if (pin == 0)
+ return;
+
+ inv = (pin < 0);
+ if (inv)
+ pin = -pin;
+
+ d_bit = 0;
+ c_bit = 0;
+
+ switch (pin) {
+ case PIN_STROBE: /* strobe, inverted */
+ c_bit = PNL_PSTROBE;
+ inv = !inv;
+ break;
+ case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */
+ d_bit = 1 << (pin - 2);
+ break;
+ case PIN_AUTOLF: /* autofeed, inverted */
+ c_bit = PNL_PAUTOLF;
+ inv = !inv;
+ break;
+ case PIN_INITP: /* init, direct */
+ c_bit = PNL_PINITP;
+ break;
+ case PIN_SELECP: /* select_in, inverted */
+ c_bit = PNL_PSELECP;
+ inv = !inv;
+ break;
+ default: /* unknown pin, ignore */
+ break;
+ }
+
+ if (c_bit) {
+ c_val[2] &= ~c_bit;
+ c_val[!inv] = c_bit;
+ } else if (d_bit) {
+ d_val[2] &= ~d_bit;
+ d_val[!inv] = d_bit;
+ }
+}
+
+/*
+ * send a serial byte to the LCD panel. The caller is responsible for locking
+ * if needed.
+ */
+static void lcd_send_serial(int byte)
+{
+ int bit;
+
+ /*
+ * the data bit is set on D0, and the clock on STROBE.
+ * LCD reads D0 on STROBE's rising edge.
+ */
+ for (bit = 0; bit < 8; bit++) {
+ clear_bit(LCD_BIT_CL, bits); /* CLK low */
+ panel_set_bits();
+ if (byte & 1) {
+ set_bit(LCD_BIT_DA, bits);
+ } else {
+ clear_bit(LCD_BIT_DA, bits);
+ }
+
+ panel_set_bits();
+ udelay(2); /* maintain the data during 2 us before CLK up */
+ set_bit(LCD_BIT_CL, bits); /* CLK high */
+ panel_set_bits();
+ udelay(1); /* maintain the strobe during 1 us */
+ byte >>= 1;
+ }
+}
+
+/* turn the backlight on or off */
+static void lcd_backlight(struct charlcd *charlcd, int on)
+{
+ if (lcd.pins.bl == PIN_NONE)
+ return;
+
+ /* The backlight is activated by setting the AUTOFEED line to +5V */
+ spin_lock_irq(&pprt_lock);
+ if (on)
+ set_bit(LCD_BIT_BL, bits);
+ else
+ clear_bit(LCD_BIT_BL, bits);
+ panel_set_bits();
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send a command to the LCD panel in serial mode */
+static void lcd_write_cmd_s(struct charlcd *charlcd, int cmd)
+{
+ spin_lock_irq(&pprt_lock);
+ lcd_send_serial(0x1F); /* R/W=W, RS=0 */
+ lcd_send_serial(cmd & 0x0F);
+ lcd_send_serial((cmd >> 4) & 0x0F);
+ udelay(40); /* the shortest command takes at least 40 us */
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send data to the LCD panel in serial mode */
+static void lcd_write_data_s(struct charlcd *charlcd, int data)
+{
+ spin_lock_irq(&pprt_lock);
+ lcd_send_serial(0x5F); /* R/W=W, RS=1 */
+ lcd_send_serial(data & 0x0F);
+ lcd_send_serial((data >> 4) & 0x0F);
+ udelay(40); /* the shortest data takes at least 40 us */
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send a command to the LCD panel in 8 bits parallel mode */
+static void lcd_write_cmd_p8(struct charlcd *charlcd, int cmd)
+{
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, cmd);
+ udelay(20); /* maintain the data during 20 us before the strobe */
+
+ set_bit(LCD_BIT_E, bits);
+ clear_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
+ set_ctrl_bits();
+
+ udelay(40); /* maintain the strobe during 40 us */
+
+ clear_bit(LCD_BIT_E, bits);
+ set_ctrl_bits();
+
+ udelay(120); /* the shortest command takes at least 120 us */
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send data to the LCD panel in 8 bits parallel mode */
+static void lcd_write_data_p8(struct charlcd *charlcd, int data)
+{
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, data);
+ udelay(20); /* maintain the data during 20 us before the strobe */
+
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
+ set_ctrl_bits();
+
+ udelay(40); /* maintain the strobe during 40 us */
+
+ clear_bit(LCD_BIT_E, bits);
+ set_ctrl_bits();
+
+ udelay(45); /* the shortest data takes at least 45 us */
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send a command to the TI LCD panel */
+static void lcd_write_cmd_tilcd(struct charlcd *charlcd, int cmd)
+{
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the control port */
+ w_ctr(pprt, cmd);
+ udelay(60);
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* send data to the TI LCD panel */
+static void lcd_write_data_tilcd(struct charlcd *charlcd, int data)
+{
+ spin_lock_irq(&pprt_lock);
+ /* present the data to the data port */
+ w_dtr(pprt, data);
+ udelay(60);
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* fills the display with spaces and resets X/Y */
+static void lcd_clear_fast_s(struct charlcd *charlcd)
+{
+ int pos;
+
+ spin_lock_irq(&pprt_lock);
+ for (pos = 0; pos < charlcd->height * charlcd->hwidth; pos++) {
+ lcd_send_serial(0x5F); /* R/W=W, RS=1 */
+ lcd_send_serial(' ' & 0x0F);
+ lcd_send_serial((' ' >> 4) & 0x0F);
+ /* the shortest data takes at least 40 us */
+ udelay(40);
+ }
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* fills the display with spaces and resets X/Y */
+static void lcd_clear_fast_p8(struct charlcd *charlcd)
+{
+ int pos;
+
+ spin_lock_irq(&pprt_lock);
+ for (pos = 0; pos < charlcd->height * charlcd->hwidth; pos++) {
+ /* present the data to the data port */
+ w_dtr(pprt, ' ');
+
+ /* maintain the data during 20 us before the strobe */
+ udelay(20);
+
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
+ set_ctrl_bits();
+
+ /* maintain the strobe during 40 us */
+ udelay(40);
+
+ clear_bit(LCD_BIT_E, bits);
+ set_ctrl_bits();
+
+ /* the shortest data takes at least 45 us */
+ udelay(45);
+ }
+ spin_unlock_irq(&pprt_lock);
+}
+
+/* fills the display with spaces and resets X/Y */
+static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
+{
+ int pos;
+
+ spin_lock_irq(&pprt_lock);
+ for (pos = 0; pos < charlcd->height * charlcd->hwidth; pos++) {
+ /* present the data to the data port */
+ w_dtr(pprt, ' ');
+ udelay(60);
+ }
+
+ spin_unlock_irq(&pprt_lock);
+}
+
+static struct charlcd_ops charlcd_serial_ops = {
+ .write_cmd = lcd_write_cmd_s,
+ .write_data = lcd_write_data_s,
+ .clear_fast = lcd_clear_fast_s,
+ .backlight = lcd_backlight,
+};
+
+static struct charlcd_ops charlcd_parallel_ops = {
+ .write_cmd = lcd_write_cmd_p8,
+ .write_data = lcd_write_data_p8,
+ .clear_fast = lcd_clear_fast_p8,
+ .backlight = lcd_backlight,
+};
+
+static struct charlcd_ops charlcd_tilcd_ops = {
+ .write_cmd = lcd_write_cmd_tilcd,
+ .write_data = lcd_write_data_tilcd,
+ .clear_fast = lcd_clear_fast_tilcd,
+ .backlight = lcd_backlight,
+};
+
+/* initialize the LCD driver */
+static void lcd_init(void)
+{
+ struct charlcd *charlcd;
+
+ charlcd = charlcd_alloc(0);
+ if (!charlcd)
+ return;
+
+ /*
+ * Init lcd struct with load-time values to preserve exact
+ * current functionality (at least for now).
+ */
+ charlcd->height = lcd_height;
+ charlcd->width = lcd_width;
+ charlcd->bwidth = lcd_bwidth;
+ charlcd->hwidth = lcd_hwidth;
+
+ switch (selected_lcd_type) {
+ case LCD_TYPE_OLD:
+ /* parallel mode, 8 bits */
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_AUTOLF;
+
+ charlcd->width = 40;
+ charlcd->bwidth = 40;
+ charlcd->hwidth = 64;
+ charlcd->height = 2;
+ break;
+ case LCD_TYPE_KS0074:
+ /* serial mode, ks0074 */
+ lcd.proto = LCD_PROTO_SERIAL;
+ lcd.charset = LCD_CHARSET_KS0074;
+ lcd.pins.bl = PIN_AUTOLF;
+ lcd.pins.cl = PIN_STROBE;
+ lcd.pins.da = PIN_D0;
+
+ charlcd->width = 16;
+ charlcd->bwidth = 40;
+ charlcd->hwidth = 16;
+ charlcd->height = 2;
+ break;
+ case LCD_TYPE_NEXCOM:
+ /* parallel mode, 8 bits, generic */
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_AUTOLF;
+ lcd.pins.rs = PIN_SELECP;
+ lcd.pins.rw = PIN_INITP;
+
+ charlcd->width = 16;
+ charlcd->bwidth = 40;
+ charlcd->hwidth = 64;
+ charlcd->height = 2;
+ break;
+ case LCD_TYPE_CUSTOM:
+ /* customer-defined */
+ lcd.proto = DEFAULT_LCD_PROTO;
+ lcd.charset = DEFAULT_LCD_CHARSET;
+ /* default geometry will be set later */
+ break;
+ case LCD_TYPE_HANTRONIX:
+ /* parallel mode, 8 bits, hantronix-like */
+ default:
+ lcd.proto = LCD_PROTO_PARALLEL;
+ lcd.charset = LCD_CHARSET_NORMAL;
+ lcd.pins.e = PIN_STROBE;
+ lcd.pins.rs = PIN_SELECP;
+
+ charlcd->width = 16;
+ charlcd->bwidth = 40;
+ charlcd->hwidth = 64;
+ charlcd->height = 2;
+ break;
+ }
+
+ /* Overwrite with module params set on loading */
+ if (lcd_height != NOT_SET)
+ charlcd->height = lcd_height;
+ if (lcd_width != NOT_SET)
+ charlcd->width = lcd_width;
+ if (lcd_bwidth != NOT_SET)
+ charlcd->bwidth = lcd_bwidth;
+ if (lcd_hwidth != NOT_SET)
+ charlcd->hwidth = lcd_hwidth;
+ if (lcd_charset != NOT_SET)
+ lcd.charset = lcd_charset;
+ if (lcd_proto != NOT_SET)
+ lcd.proto = lcd_proto;
+ if (lcd_e_pin != PIN_NOT_SET)
+ lcd.pins.e = lcd_e_pin;
+ if (lcd_rs_pin != PIN_NOT_SET)
+ lcd.pins.rs = lcd_rs_pin;
+ if (lcd_rw_pin != PIN_NOT_SET)
+ lcd.pins.rw = lcd_rw_pin;
+ if (lcd_cl_pin != PIN_NOT_SET)
+ lcd.pins.cl = lcd_cl_pin;
+ if (lcd_da_pin != PIN_NOT_SET)
+ lcd.pins.da = lcd_da_pin;
+ if (lcd_bl_pin != PIN_NOT_SET)
+ lcd.pins.bl = lcd_bl_pin;
+
+ /* this is used to catch wrong and default values */
+ if (charlcd->width <= 0)
+ charlcd->width = DEFAULT_LCD_WIDTH;
+ if (charlcd->bwidth <= 0)
+ charlcd->bwidth = DEFAULT_LCD_BWIDTH;
+ if (charlcd->hwidth <= 0)
+ charlcd->hwidth = DEFAULT_LCD_HWIDTH;
+ if (charlcd->height <= 0)
+ charlcd->height = DEFAULT_LCD_HEIGHT;
+
+ if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */
+ charlcd->ops = &charlcd_serial_ops;
+
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = DEFAULT_LCD_PIN_SDA;
+
+ } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
+ charlcd->ops = &charlcd_parallel_ops;
+
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = DEFAULT_LCD_PIN_E;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = DEFAULT_LCD_PIN_RS;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = DEFAULT_LCD_PIN_RW;
+ } else {
+ charlcd->ops = &charlcd_tilcd_ops;
+ }
+
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = DEFAULT_LCD_PIN_BL;
+
+ if (lcd.pins.e == PIN_NOT_SET)
+ lcd.pins.e = PIN_NONE;
+ if (lcd.pins.rs == PIN_NOT_SET)
+ lcd.pins.rs = PIN_NONE;
+ if (lcd.pins.rw == PIN_NOT_SET)
+ lcd.pins.rw = PIN_NONE;
+ if (lcd.pins.bl == PIN_NOT_SET)
+ lcd.pins.bl = PIN_NONE;
+ if (lcd.pins.cl == PIN_NOT_SET)
+ lcd.pins.cl = PIN_NONE;
+ if (lcd.pins.da == PIN_NOT_SET)
+ lcd.pins.da = PIN_NONE;
+
+ if (lcd.charset == NOT_SET)
+ lcd.charset = DEFAULT_LCD_CHARSET;
+
+ if (lcd.charset == LCD_CHARSET_KS0074)
+ charlcd->char_conv = lcd_char_conv_ks0074;
+ else
+ charlcd->char_conv = NULL;
+
+ pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
+ lcd_bits[LCD_PORT_C][LCD_BIT_E]);
+ pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
+ lcd_bits[LCD_PORT_C][LCD_BIT_RS]);
+ pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
+ lcd_bits[LCD_PORT_C][LCD_BIT_RW]);
+ pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
+ lcd_bits[LCD_PORT_C][LCD_BIT_BL]);
+ pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
+ lcd_bits[LCD_PORT_C][LCD_BIT_CL]);
+ pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
+ lcd_bits[LCD_PORT_C][LCD_BIT_DA]);
+
+ lcd.charlcd = charlcd;
+ lcd.initialized = true;
+}
+
+/*
+ * These are the file operation function for user access to /dev/keypad
+ */
+
+static ssize_t keypad_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ unsigned i = *ppos;
+ char __user *tmp = buf;
+
+ if (keypad_buflen == 0) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(keypad_read_wait,
+ keypad_buflen != 0))
+ return -EINTR;
+ }
+
+ for (; count-- > 0 && (keypad_buflen > 0);
+ ++i, ++tmp, --keypad_buflen) {
+ put_user(keypad_buffer[keypad_start], tmp);
+ keypad_start = (keypad_start + 1) % KEYPAD_BUFFER;
+ }
+ *ppos = i;
+
+ return tmp - buf;
+}
+
+static int keypad_open(struct inode *inode, struct file *file)
+{
+ if (!atomic_dec_and_test(&keypad_available))
+ return -EBUSY; /* open only once at a time */
+
+ if (file->f_mode & FMODE_WRITE) /* device is read-only */
+ return -EPERM;
+
+ keypad_buflen = 0; /* flush the buffer on opening */
+ return 0;
+}
+
+static int keypad_release(struct inode *inode, struct file *file)
+{
+ atomic_inc(&keypad_available);
+ return 0;
+}
+
+static const struct file_operations keypad_fops = {
+ .read = keypad_read, /* read */
+ .open = keypad_open, /* open */
+ .release = keypad_release, /* close */
+ .llseek = default_llseek,
+};
+
+static struct miscdevice keypad_dev = {
+ .minor = KEYPAD_MINOR,
+ .name = "keypad",
+ .fops = &keypad_fops,
+};
+
+static void keypad_send_key(const char *string, int max_len)
+{
+ /* send the key to the device only if a process is attached to it. */
+ if (!atomic_read(&keypad_available)) {
+ while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) {
+ keypad_buffer[(keypad_start + keypad_buflen++) %
+ KEYPAD_BUFFER] = *string++;
+ }
+ wake_up_interruptible(&keypad_read_wait);
+ }
+}
+
+/* this function scans all the bits involving at least one logical signal,
+ * and puts the results in the bitfield "phys_read" (one bit per established
+ * contact), and sets "phys_read_prev" to "phys_read".
+ *
+ * Note: to debounce input signals, we will only consider as switched a signal
+ * which is stable across 2 measures. Signals which are different between two
+ * reads will be kept as they previously were in their logical form (phys_prev).
+ * A signal which has just switched will have a 1 in
+ * (phys_read ^ phys_read_prev).
+ */
+static void phys_scan_contacts(void)
+{
+ int bit, bitval;
+ char oldval;
+ char bitmask;
+ char gndmask;
+
+ phys_prev = phys_curr;
+ phys_read_prev = phys_read;
+ phys_read = 0; /* flush all signals */
+
+ /* keep track of old value, with all outputs disabled */
+ oldval = r_dtr(pprt) | scan_mask_o;
+ /* activate all keyboard outputs (active low) */
+ w_dtr(pprt, oldval & ~scan_mask_o);
+
+ /* will have a 1 for each bit set to gnd */
+ bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
+ /* disable all matrix signals */
+ w_dtr(pprt, oldval);
+
+ /* now that all outputs are cleared, the only active input bits are
+ * directly connected to the ground
+ */
+
+ /* 1 for each grounded input */
+ gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
+
+ /* grounded inputs are signals 40-44 */
+ phys_read |= (__u64)gndmask << 40;
+
+ if (bitmask != gndmask) {
+ /*
+ * since clearing the outputs changed some inputs, we know
+ * that some input signals are currently tied to some outputs.
+ * So we'll scan them.
+ */
+ for (bit = 0; bit < 8; bit++) {
+ bitval = BIT(bit);
+
+ if (!(scan_mask_o & bitval)) /* skip unused bits */
+ continue;
+
+ w_dtr(pprt, oldval & ~bitval); /* enable this output */
+ bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
+ phys_read |= (__u64)bitmask << (5 * bit);
+ }
+ w_dtr(pprt, oldval); /* disable all outputs */
+ }
+ /*
+ * this is easy: use old bits when they are flapping,
+ * use new ones when stable
+ */
+ phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) |
+ (phys_read & ~(phys_read ^ phys_read_prev));
+}
+
+static inline int input_state_high(struct logical_input *input)
+{
+#if 0
+ /* FIXME:
+ * this is an invalid test. It tries to catch
+ * transitions from single-key to multiple-key, but
+ * doesn't take into account the contacts polarity.
+ * The only solution to the problem is to parse keys
+ * from the most complex to the simplest combinations,
+ * and mark them as 'caught' once a combination
+ * matches, then unmatch it for all other ones.
+ */
+
+ /* try to catch dangerous transitions cases :
+ * someone adds a bit, so this signal was a false
+ * positive resulting from a transition. We should
+ * invalidate the signal immediately and not call the
+ * release function.
+ * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release.
+ */
+ if (((phys_prev & input->mask) == input->value) &&
+ ((phys_curr & input->mask) > input->value)) {
+ input->state = INPUT_ST_LOW; /* invalidate */
+ return 1;
+ }
+#endif
+
+ if ((phys_curr & input->mask) == input->value) {
+ if ((input->type == INPUT_TYPE_STD) &&
+ (input->high_timer == 0)) {
+ input->high_timer++;
+ if (input->u.std.press_fct)
+ input->u.std.press_fct(input->u.std.press_data);
+ } else if (input->type == INPUT_TYPE_KBD) {
+ /* will turn on the light */
+ keypressed = 1;
+
+ if (input->high_timer == 0) {
+ char *press_str = input->u.kbd.press_str;
+
+ if (press_str[0]) {
+ int s = sizeof(input->u.kbd.press_str);
+
+ keypad_send_key(press_str, s);
+ }
+ }
+
+ if (input->u.kbd.repeat_str[0]) {
+ char *repeat_str = input->u.kbd.repeat_str;
+
+ if (input->high_timer >= KEYPAD_REP_START) {
+ int s = sizeof(input->u.kbd.repeat_str);
+
+ input->high_timer -= KEYPAD_REP_DELAY;
+ keypad_send_key(repeat_str, s);
+ }
+ /* we will need to come back here soon */
+ inputs_stable = 0;
+ }
+
+ if (input->high_timer < 255)
+ input->high_timer++;
+ }
+ return 1;
+ }
+
+ /* else signal falling down. Let's fall through. */
+ input->state = INPUT_ST_FALLING;
+ input->fall_timer = 0;
+
+ return 0;
+}
+
+static inline void input_state_falling(struct logical_input *input)
+{
+#if 0
+ /* FIXME !!! same comment as in input_state_high */
+ if (((phys_prev & input->mask) == input->value) &&
+ ((phys_curr & input->mask) > input->value)) {
+ input->state = INPUT_ST_LOW; /* invalidate */
+ return;
+ }
+#endif
+
+ if ((phys_curr & input->mask) == input->value) {
+ if (input->type == INPUT_TYPE_KBD) {
+ /* will turn on the light */
+ keypressed = 1;
+
+ if (input->u.kbd.repeat_str[0]) {
+ char *repeat_str = input->u.kbd.repeat_str;
+
+ if (input->high_timer >= KEYPAD_REP_START) {
+ int s = sizeof(input->u.kbd.repeat_str);
+
+ input->high_timer -= KEYPAD_REP_DELAY;
+ keypad_send_key(repeat_str, s);
+ }
+ /* we will need to come back here soon */
+ inputs_stable = 0;
+ }
+
+ if (input->high_timer < 255)
+ input->high_timer++;
+ }
+ input->state = INPUT_ST_HIGH;
+ } else if (input->fall_timer >= input->fall_time) {
+ /* call release event */
+ if (input->type == INPUT_TYPE_STD) {
+ void (*release_fct)(int) = input->u.std.release_fct;
+
+ if (release_fct)
+ release_fct(input->u.std.release_data);
+ } else if (input->type == INPUT_TYPE_KBD) {
+ char *release_str = input->u.kbd.release_str;
+
+ if (release_str[0]) {
+ int s = sizeof(input->u.kbd.release_str);
+
+ keypad_send_key(release_str, s);
+ }
+ }
+
+ input->state = INPUT_ST_LOW;
+ } else {
+ input->fall_timer++;
+ inputs_stable = 0;
+ }
+}
+
+static void panel_process_inputs(void)
+{
+ struct list_head *item;
+ struct logical_input *input;
+
+ keypressed = 0;
+ inputs_stable = 1;
+ list_for_each(item, &logical_inputs) {
+ input = list_entry(item, struct logical_input, list);
+
+ switch (input->state) {
+ case INPUT_ST_LOW:
+ if ((phys_curr & input->mask) != input->value)
+ break;
+ /* if all needed ones were already set previously,
+ * this means that this logical signal has been
+ * activated by the releasing of another combined
+ * signal, so we don't want to match.
+ * eg: AB -(release B)-> A -(release A)-> 0 :
+ * don't match A.
+ */
+ if ((phys_prev & input->mask) == input->value)
+ break;
+ input->rise_timer = 0;
+ input->state = INPUT_ST_RISING;
+ /* no break here, fall through */
+ case INPUT_ST_RISING:
+ if ((phys_curr & input->mask) != input->value) {
+ input->state = INPUT_ST_LOW;
+ break;
+ }
+ if (input->rise_timer < input->rise_time) {
+ inputs_stable = 0;
+ input->rise_timer++;
+ break;
+ }
+ input->high_timer = 0;
+ input->state = INPUT_ST_HIGH;
+ /* no break here, fall through */
+ case INPUT_ST_HIGH:
+ if (input_state_high(input))
+ break;
+ /* no break here, fall through */
+ case INPUT_ST_FALLING:
+ input_state_falling(input);
+ }
+ }
+}
+
+static void panel_scan_timer(void)
+{
+ if (keypad.enabled && keypad_initialized) {
+ if (spin_trylock_irq(&pprt_lock)) {
+ phys_scan_contacts();
+
+ /* no need for the parport anymore */
+ spin_unlock_irq(&pprt_lock);
+ }
+
+ if (!inputs_stable || phys_curr != phys_prev)
+ panel_process_inputs();
+ }
+
+ if (keypressed && lcd.enabled && lcd.initialized)
+ charlcd_poke(lcd.charlcd);
+
+ mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
+}
+
+static void init_scan_timer(void)
+{
+ if (scan_timer.function)
+ return; /* already started */
+
+ setup_timer(&scan_timer, (void *)&panel_scan_timer, 0);
+ scan_timer.expires = jiffies + INPUT_POLL_TIME;
+ add_timer(&scan_timer);
+}
+
+/* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits.
+ * if <omask> or <imask> are non-null, they will be or'ed with the bits
+ * corresponding to out and in bits respectively.
+ * returns 1 if ok, 0 if error (in which case, nothing is written).
+ */
+static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value,
+ u8 *imask, u8 *omask)
+{
+ const char sigtab[] = "EeSsPpAaBb";
+ u8 im, om;
+ __u64 m, v;
+
+ om = 0;
+ im = 0;
+ m = 0ULL;
+ v = 0ULL;
+ while (*name) {
+ int in, out, bit, neg;
+ const char *idx;
+
+ idx = strchr(sigtab, *name);
+ if (!idx)
+ return 0; /* input name not found */
+
+ in = idx - sigtab;
+ neg = (in & 1); /* odd (lower) names are negated */
+ in >>= 1;
+ im |= BIT(in);
+
+ name++;
+ if (*name >= '0' && *name <= '7') {
+ out = *name - '0';
+ om |= BIT(out);
+ } else if (*name == '-') {
+ out = 8;
+ } else {
+ return 0; /* unknown bit name */
+ }
+
+ bit = (out * 5) + in;
+
+ m |= 1ULL << bit;
+ if (!neg)
+ v |= 1ULL << bit;
+ name++;
+ }
+ *mask = m;
+ *value = v;
+ if (imask)
+ *imask |= im;
+ if (omask)
+ *omask |= om;
+ return 1;
+}
+
+/* tries to bind a key to the signal name <name>. The key will send the
+ * strings <press>, <repeat>, <release> for these respective events.
+ * Returns the pointer to the new key if ok, NULL if the key could not be bound.
+ */
+static struct logical_input *panel_bind_key(const char *name, const char *press,
+ const char *repeat,
+ const char *release)
+{
+ struct logical_input *key;
+
+ key = kzalloc(sizeof(*key), GFP_KERNEL);
+ if (!key)
+ return NULL;
+
+ if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i,
+ &scan_mask_o)) {
+ kfree(key);
+ return NULL;
+ }
+
+ key->type = INPUT_TYPE_KBD;
+ key->state = INPUT_ST_LOW;
+ key->rise_time = 1;
+ key->fall_time = 1;
+
+ strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str));
+ strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str));
+ strncpy(key->u.kbd.release_str, release,
+ sizeof(key->u.kbd.release_str));
+ list_add(&key->list, &logical_inputs);
+ return key;
+}
+
+#if 0
+/* tries to bind a callback function to the signal name <name>. The function
+ * <press_fct> will be called with the <press_data> arg when the signal is
+ * activated, and so on for <release_fct>/<release_data>
+ * Returns the pointer to the new signal if ok, NULL if the signal could not
+ * be bound.
+ */
+static struct logical_input *panel_bind_callback(char *name,
+ void (*press_fct)(int),
+ int press_data,
+ void (*release_fct)(int),
+ int release_data)
+{
+ struct logical_input *callback;
+
+ callback = kmalloc(sizeof(*callback), GFP_KERNEL);
+ if (!callback)
+ return NULL;
+
+ memset(callback, 0, sizeof(struct logical_input));
+ if (!input_name2mask(name, &callback->mask, &callback->value,
+ &scan_mask_i, &scan_mask_o))
+ return NULL;
+
+ callback->type = INPUT_TYPE_STD;
+ callback->state = INPUT_ST_LOW;
+ callback->rise_time = 1;
+ callback->fall_time = 1;
+ callback->u.std.press_fct = press_fct;
+ callback->u.std.press_data = press_data;
+ callback->u.std.release_fct = release_fct;
+ callback->u.std.release_data = release_data;
+ list_add(&callback->list, &logical_inputs);
+ return callback;
+}
+#endif
+
+static void keypad_init(void)
+{
+ int keynum;
+
+ init_waitqueue_head(&keypad_read_wait);
+ keypad_buflen = 0; /* flushes any eventual noisy keystroke */
+
+ /* Let's create all known keys */
+
+ for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) {
+ panel_bind_key(keypad_profile[keynum][0],
+ keypad_profile[keynum][1],
+ keypad_profile[keynum][2],
+ keypad_profile[keynum][3]);
+ }
+
+ init_scan_timer();
+ keypad_initialized = 1;
+}
+
+/**************************************************/
+/* device initialization */
+/**************************************************/
+
+static void panel_attach(struct parport *port)
+{
+ struct pardev_cb panel_cb;
+
+ if (port->number != parport)
+ return;
+
+ if (pprt) {
+ pr_err("%s: port->number=%d parport=%d, already registered!\n",
+ __func__, port->number, parport);
+ return;
+ }
+
+ memset(&panel_cb, 0, sizeof(panel_cb));
+ panel_cb.private = &pprt;
+ /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
+
+ pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
+ if (!pprt) {
+ pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
+ __func__, port->number, parport);
+ return;
+ }
+
+ if (parport_claim(pprt)) {
+ pr_err("could not claim access to parport%d. Aborting.\n",
+ parport);
+ goto err_unreg_device;
+ }
+
+ /* must init LCD first, just in case an IRQ from the keypad is
+ * generated at keypad init
+ */
+ if (lcd.enabled) {
+ lcd_init();
+ if (!lcd.charlcd || charlcd_register(lcd.charlcd))
+ goto err_unreg_device;
+ }
+
+ if (keypad.enabled) {
+ keypad_init();
+ if (misc_register(&keypad_dev))
+ goto err_lcd_unreg;
+ }
+ return;
+
+err_lcd_unreg:
+ if (lcd.enabled)
+ charlcd_unregister(lcd.charlcd);
+err_unreg_device:
+ kfree(lcd.charlcd);
+ lcd.charlcd = NULL;
+ parport_unregister_device(pprt);
+ pprt = NULL;
+}
+
+static void panel_detach(struct parport *port)
+{
+ if (port->number != parport)
+ return;
+
+ if (!pprt) {
+ pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n",
+ __func__, port->number, parport);
+ return;
+ }
+ if (scan_timer.function)
+ del_timer_sync(&scan_timer);
+
+ if (keypad.enabled) {
+ misc_deregister(&keypad_dev);
+ keypad_initialized = 0;
+ }
+
+ if (lcd.enabled) {
+ charlcd_unregister(lcd.charlcd);
+ lcd.initialized = false;
+ kfree(lcd.charlcd);
+ lcd.charlcd = NULL;
+ }
+
+ /* TODO: free all input signals */
+ parport_release(pprt);
+ parport_unregister_device(pprt);
+ pprt = NULL;
+}
+
+static struct parport_driver panel_driver = {
+ .name = "panel",
+ .match_port = panel_attach,
+ .detach = panel_detach,
+ .devmodel = true,
+};
+
+/* init function */
+static int __init panel_init_module(void)
+{
+ int selected_keypad_type = NOT_SET, err;
+
+ /* take care of an eventual profile */
+ switch (profile) {
+ case PANEL_PROFILE_CUSTOM:
+ /* custom profile */
+ selected_keypad_type = DEFAULT_KEYPAD_TYPE;
+ selected_lcd_type = DEFAULT_LCD_TYPE;
+ break;
+ case PANEL_PROFILE_OLD:
+ /* 8 bits, 2*16, old keypad */
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
+
+ /* TODO: This two are a little hacky, sort it out later */
+ if (lcd_width == NOT_SET)
+ lcd_width = 16;
+ if (lcd_hwidth == NOT_SET)
+ lcd_hwidth = 16;
+ break;
+ case PANEL_PROFILE_NEW:
+ /* serial, 2*16, new keypad */
+ selected_keypad_type = KEYPAD_TYPE_NEW;
+ selected_lcd_type = LCD_TYPE_KS0074;
+ break;
+ case PANEL_PROFILE_HANTRONIX:
+ /* 8 bits, 2*16 hantronix-like, no keypad */
+ selected_keypad_type = KEYPAD_TYPE_NONE;
+ selected_lcd_type = LCD_TYPE_HANTRONIX;
+ break;
+ case PANEL_PROFILE_NEXCOM:
+ /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
+ selected_keypad_type = KEYPAD_TYPE_NEXCOM;
+ selected_lcd_type = LCD_TYPE_NEXCOM;
+ break;
+ case PANEL_PROFILE_LARGE:
+ /* 8 bits, 2*40, old keypad */
+ selected_keypad_type = KEYPAD_TYPE_OLD;
+ selected_lcd_type = LCD_TYPE_OLD;
+ break;
+ }
+
+ /*
+ * Overwrite selection with module param values (both keypad and lcd),
+ * where the deprecated params have lower prio.
+ */
+ if (keypad_enabled != NOT_SET)
+ selected_keypad_type = keypad_enabled;
+ if (keypad_type != NOT_SET)
+ selected_keypad_type = keypad_type;
+
+ keypad.enabled = (selected_keypad_type > 0);
+
+ if (lcd_enabled != NOT_SET)
+ selected_lcd_type = lcd_enabled;
+ if (lcd_type != NOT_SET)
+ selected_lcd_type = lcd_type;
+
+ lcd.enabled = (selected_lcd_type > 0);
+
+ if (lcd.enabled) {
+ /*
+ * Init lcd struct with load-time values to preserve exact
+ * current functionality (at least for now).
+ */
+ lcd.charset = lcd_charset;
+ lcd.proto = lcd_proto;
+ lcd.pins.e = lcd_e_pin;
+ lcd.pins.rs = lcd_rs_pin;
+ lcd.pins.rw = lcd_rw_pin;
+ lcd.pins.cl = lcd_cl_pin;
+ lcd.pins.da = lcd_da_pin;
+ lcd.pins.bl = lcd_bl_pin;
+ }
+
+ switch (selected_keypad_type) {
+ case KEYPAD_TYPE_OLD:
+ keypad_profile = old_keypad_profile;
+ break;
+ case KEYPAD_TYPE_NEW:
+ keypad_profile = new_keypad_profile;
+ break;
+ case KEYPAD_TYPE_NEXCOM:
+ keypad_profile = nexcom_keypad_profile;
+ break;
+ default:
+ keypad_profile = NULL;
+ break;
+ }
+
+ if (!lcd.enabled && !keypad.enabled) {
+ /* no device enabled, let's exit */
+ pr_err("panel driver disabled.\n");
+ return -ENODEV;
+ }
+
+ err = parport_register_driver(&panel_driver);
+ if (err) {
+ pr_err("could not register with parport. Aborting.\n");
+ return err;
+ }
+
+ if (pprt)
+ pr_info("panel driver registered on parport%d (io=0x%lx).\n",
+ parport, pprt->port->base);
+ else
+ pr_info("panel driver not yet registered\n");
+ return 0;
+}
+
+static void __exit panel_cleanup_module(void)
+{
+ parport_unregister_driver(&panel_driver);
+}
+
+module_init(panel_init_module);
+module_exit(panel_cleanup_module);
+MODULE_AUTHOR("Willy Tarreau");
+MODULE_LICENSE("GPL");
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6bb60fb6a30b..bbecaf9293be 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1607,7 +1607,7 @@ int device_private_init(struct device *dev)
*/
int device_add(struct device *dev)
{
- struct device *parent = NULL;
+ struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a1fbf55c4d3a..4882f06d12df 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -19,6 +19,7 @@
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
@@ -356,6 +357,10 @@ re_probe:
if (ret)
goto pinctrl_bind_failed;
+ ret = dma_configure(dev);
+ if (ret)
+ goto dma_failed;
+
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
@@ -417,6 +422,8 @@ re_probe:
goto done;
probe_failed:
+ dma_deconfigure(dev);
+dma_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -826,6 +833,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv->remove(dev);
device_links_driver_cleanup(dev);
+ dma_deconfigure(dev);
+
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index b55804cac4c4..ea9726e71468 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -165,7 +165,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
{
int ret;
- ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
+ "reserved", res_cma);
if (ret)
return ret;
@@ -258,7 +259,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
return -EINVAL;
}
- err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
+ err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
if (err) {
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index efd71cf4fdea..f3deb6af42ad 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -7,9 +7,11 @@
* This file is released under the GPLv2.
*/
+#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -309,14 +311,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
int i;
struct page **pages;
void *ptr;
- unsigned long pfn;
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
if (!pages)
return NULL;
- for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
- pages[i] = pfn_to_page(pfn + i);
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ pages[i] = nth_page(page, i);
ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
@@ -341,3 +342,42 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
vunmap(cpu_addr);
}
#endif
+
+/*
+ * Common configuration to enable DMA API use for a device
+ */
+#include <linux/pci.h>
+
+int dma_configure(struct device *dev)
+{
+ struct device *bridge = NULL, *dma_dev = dev;
+ enum dev_dma_attr attr;
+ int ret = 0;
+
+ if (dev_is_pci(dev)) {
+ bridge = pci_get_host_bridge_device(to_pci_dev(dev));
+ dma_dev = bridge;
+ if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
+ dma_dev->parent->of_node)
+ dma_dev = dma_dev->parent;
+ }
+
+ if (dma_dev->of_node) {
+ ret = of_dma_configure(dev, dma_dev->of_node);
+ } else if (has_acpi_companion(dma_dev)) {
+ attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
+ if (attr != DEV_DMA_NOT_SUPPORTED)
+ ret = acpi_dma_configure(dev, attr);
+ }
+
+ if (bridge)
+ pci_put_host_bridge_device(bridge);
+
+ return ret;
+}
+
+void dma_deconfigure(struct device *dev)
+{
+ of_dma_deconfigure(dev);
+ acpi_dma_deconfigure(dev);
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c2456839214a..a102152301c8 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -847,7 +847,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
struct platform_device *pdev = to_platform_device(dev);
int len;
- len = of_device_get_modalias(dev, buf, PAGE_SIZE -1);
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
if (len != -ENODEV)
return len;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ad196427b4f2..da49a8383dc3 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1636,8 +1636,6 @@ static struct generic_pm_domain *genpd_xlate_simple(
struct of_phandle_args *genpdspec,
void *data)
{
- if (genpdspec->args_count != 0)
- return ERR_PTR(-EINVAL);
return data;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9faee1c893e5..e987a6f55d36 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1091,11 +1091,6 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
- if (pm_wakeup_pending()) {
- async_error = -EBUSY;
- goto Complete;
- }
-
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 136854970489..f62082fdd670 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
unsigned int pm_wakeup_irq __read_mostly;
-/* If set and the system is suspending, terminate the suspend. */
-static bool pm_abort_suspend __read_mostly;
+/* If greater than 0 and the system is suspending, terminate the suspend. */
+static atomic_t pm_abort_suspend __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
@@ -512,12 +512,13 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
/**
* wakup_source_activate - Mark given wakeup source as active.
* @ws: Wakeup source to handle.
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
* core of the event by incrementing the counter of of wakeup events being
* processed.
*/
-static void wakeup_source_activate(struct wakeup_source *ws)
+static void wakeup_source_activate(struct wakeup_source *ws, bool hard)
{
unsigned int cec;
@@ -525,11 +526,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
"unregistered wakeup source\n"))
return;
- /*
- * active wakeup source should bring the system
- * out of PM_SUSPEND_FREEZE state
- */
- freeze_wake();
+ if (hard)
+ pm_system_wakeup();
ws->active = true;
ws->active_count++;
@@ -546,8 +544,9 @@ static void wakeup_source_activate(struct wakeup_source *ws)
/**
* wakeup_source_report_event - Report wakeup event using the given source.
* @ws: Wakeup source to report the event for.
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*/
-static void wakeup_source_report_event(struct wakeup_source *ws)
+static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
{
ws->event_count++;
/* This is racy, but the counter is approximate anyway. */
@@ -555,7 +554,7 @@ static void wakeup_source_report_event(struct wakeup_source *ws)
ws->wakeup_count++;
if (!ws->active)
- wakeup_source_activate(ws);
+ wakeup_source_activate(ws, hard);
}
/**
@@ -573,7 +572,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
- wakeup_source_report_event(ws);
+ wakeup_source_report_event(ws, false);
del_timer(&ws->timer);
ws->timer_expires = 0;
@@ -739,9 +738,10 @@ static void pm_wakeup_timer_fn(unsigned long data)
}
/**
- * __pm_wakeup_event - Notify the PM core of a wakeup event.
+ * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
* @ws: Wakeup source object associated with the event source.
* @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
* Notify the PM core of a wakeup event whose source is @ws that will take
* approximately @msec milliseconds to be processed by the kernel. If @ws is
@@ -750,7 +750,7 @@ static void pm_wakeup_timer_fn(unsigned long data)
*
* It is safe to call this function from interrupt context.
*/
-void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
{
unsigned long flags;
unsigned long expires;
@@ -760,7 +760,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
spin_lock_irqsave(&ws->lock, flags);
- wakeup_source_report_event(ws);
+ wakeup_source_report_event(ws, hard);
if (!msec) {
wakeup_source_deactivate(ws);
@@ -779,17 +779,17 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
unlock:
spin_unlock_irqrestore(&ws->lock, flags);
}
-EXPORT_SYMBOL_GPL(__pm_wakeup_event);
-
+EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
/**
* pm_wakeup_event - Notify the PM core of a wakeup event.
* @dev: Device the wakeup event is related to.
* @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
*
- * Call __pm_wakeup_event() for the @dev's wakeup source object.
+ * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
*/
-void pm_wakeup_event(struct device *dev, unsigned int msec)
+void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
{
unsigned long flags;
@@ -797,10 +797,10 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
return;
spin_lock_irqsave(&dev->power.lock, flags);
- __pm_wakeup_event(dev->power.wakeup, msec);
+ pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
spin_unlock_irqrestore(&dev->power.lock, flags);
}
-EXPORT_SYMBOL_GPL(pm_wakeup_event);
+EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
void pm_print_active_wakeup_sources(void)
{
@@ -856,20 +856,26 @@ bool pm_wakeup_pending(void)
pm_print_active_wakeup_sources();
}
- return ret || pm_abort_suspend;
+ return ret || atomic_read(&pm_abort_suspend) > 0;
}
void pm_system_wakeup(void)
{
- pm_abort_suspend = true;
+ atomic_inc(&pm_abort_suspend);
freeze_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
-void pm_wakeup_clear(void)
+void pm_system_cancel_wakeup(void)
+{
+ atomic_dec(&pm_abort_suspend);
+}
+
+void pm_wakeup_clear(bool reset)
{
- pm_abort_suspend = false;
pm_wakeup_irq = 0;
+ if (reset)
+ atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index dc26e5949a32..909dedae4c4e 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -109,15 +109,18 @@ static void soc_release(struct device *dev)
kfree(soc_dev);
}
+static struct soc_device_attribute *early_soc_dev_attr;
+
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
int ret;
if (!soc_bus_type.p) {
- ret = bus_register(&soc_bus_type);
- if (ret)
- goto out1;
+ if (early_soc_dev_attr)
+ return ERR_PTR(-EBUSY);
+ early_soc_dev_attr = soc_dev_attr;
+ return NULL;
}
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
@@ -159,45 +162,53 @@ void soc_device_unregister(struct soc_device *soc_dev)
ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
device_unregister(&soc_dev->dev);
+ early_soc_dev_attr = NULL;
}
static int __init soc_bus_register(void)
{
- if (soc_bus_type.p)
- return 0;
+ int ret;
- return bus_register(&soc_bus_type);
+ ret = bus_register(&soc_bus_type);
+ if (ret)
+ return ret;
+
+ if (early_soc_dev_attr)
+ return PTR_ERR(soc_device_register(early_soc_dev_attr));
+
+ return 0;
}
core_initcall(soc_bus_register);
-static int soc_device_match_one(struct device *dev, void *arg)
+static int soc_device_match_attr(const struct soc_device_attribute *attr,
+ const struct soc_device_attribute *match)
{
- struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
- const struct soc_device_attribute *match = arg;
-
if (match->machine &&
- (!soc_dev->attr->machine ||
- !glob_match(match->machine, soc_dev->attr->machine)))
+ (!attr->machine || !glob_match(match->machine, attr->machine)))
return 0;
if (match->family &&
- (!soc_dev->attr->family ||
- !glob_match(match->family, soc_dev->attr->family)))
+ (!attr->family || !glob_match(match->family, attr->family)))
return 0;
if (match->revision &&
- (!soc_dev->attr->revision ||
- !glob_match(match->revision, soc_dev->attr->revision)))
+ (!attr->revision || !glob_match(match->revision, attr->revision)))
return 0;
if (match->soc_id &&
- (!soc_dev->attr->soc_id ||
- !glob_match(match->soc_id, soc_dev->attr->soc_id)))
+ (!attr->soc_id || !glob_match(match->soc_id, attr->soc_id)))
return 0;
return 1;
}
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ return soc_device_match_attr(soc_dev->attr, arg);
+}
+
/*
* soc_device_match - identify the SoC in the machine
* @matches: zero-terminated array of possible matches
@@ -230,6 +241,11 @@ const struct soc_device_attribute *soc_device_match(
break;
ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
soc_device_match_one);
+ if (ret < 0 && early_soc_dev_attr)
+ ret = soc_device_match_attr(early_soc_dev_attr,
+ matches);
+ if (ret < 0)
+ return NULL;
if (!ret)
matches++;
else
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index d545abbd5378..8ddc98279c8f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -323,6 +323,7 @@ config BLK_DEV_SX8
config BLK_DEV_RAM
tristate "RAM block device support"
+ select DAX if BLK_DEV_RAM_DAX
---help---
Saying Y here will allow you to use a portion of your RAM memory as
a block device, so that you can make file systems on it, read and
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 4ec84d504780..57b574f2f66a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#ifdef CONFIG_BLK_DEV_RAM_DAX
#include <linux/pfn_t.h>
+#include <linux/dax.h>
#endif
#include <linux/uaccess.h>
@@ -41,6 +42,9 @@ struct brd_device {
struct request_queue *brd_queue;
struct gendisk *brd_disk;
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+ struct dax_device *dax_dev;
+#endif
struct list_head brd_list;
/*
@@ -326,30 +330,38 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
}
#ifdef CONFIG_BLK_DEV_RAM_DAX
-static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
if (!brd)
return -ENODEV;
- page = brd_insert_page(brd, sector);
+ page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512);
if (!page)
return -ENOSPC;
*kaddr = page_address(page);
*pfn = page_to_pfn_t(page);
- return PAGE_SIZE;
+ return 1;
}
-#else
-#define brd_direct_access NULL
+
+static long brd_dax_direct_access(struct dax_device *dax_dev,
+ pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct brd_device *brd = dax_get_private(dax_dev);
+
+ return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
+}
+
+static const struct dax_operations brd_dax_ops = {
+ .direct_access = brd_dax_direct_access,
+};
#endif
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
- .direct_access = brd_direct_access,
};
/*
@@ -415,9 +427,6 @@ static struct brd_device *brd_alloc(int i)
* is harmless)
*/
blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
-#endif
disk = brd->brd_disk = alloc_disk(max_part);
if (!disk)
goto out_free_queue;
@@ -430,8 +439,21 @@ static struct brd_device *brd_alloc(int i)
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
+ brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
+ if (!brd->dax_dev)
+ goto out_free_inode;
+#endif
+
+
return brd;
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+out_free_inode:
+ kill_dax(brd->dax_dev);
+ put_dax(brd->dax_dev);
+#endif
out_free_queue:
blk_cleanup_queue(brd->brd_queue);
out_free_dev:
@@ -471,6 +493,10 @@ out:
static void brd_del_one(struct brd_device *brd)
{
list_del(&brd->brd_list);
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+ kill_dax(brd->dax_dev);
+ put_dax(brd->dax_dev);
+#endif
del_gendisk(brd->brd_disk);
brd_free(brd);
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index dece26f119d4..a804a4107fbc 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -409,7 +409,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
new_pages = __vmalloc(bytes,
- GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
+ GFP_NOIO | __GFP_ZERO,
PAGE_KERNEL);
if (!new_pages)
return NULL;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 994403efee19..28d932906f24 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1697,9 +1697,8 @@ static void loop_queue_work(struct kthread_work *work)
loop_handle_cmd(cmd);
}
-static int loop_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 02804cc79d82..3a779a4f5653 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -195,7 +195,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
if (mtip_check_surprise_removal(dd->pdev))
return NULL;
- rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+ rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
if (IS_ERR(rq))
return NULL;
@@ -205,66 +205,12 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
return blk_mq_rq_to_pdu(rq);
}
-static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
-{
- blk_put_request(blk_mq_rq_from_pdu(cmd));
-}
-
-/*
- * Once we add support for one hctx per mtip group, this will change a bit
- */
-static struct request *mtip_rq_from_tag(struct driver_data *dd,
- unsigned int tag)
-{
- struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
-
- return blk_mq_tag_to_rq(hctx->tags, tag);
-}
-
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
- struct request *rq = mtip_rq_from_tag(dd, tag);
-
- return blk_mq_rq_to_pdu(rq);
-}
-
-/*
- * IO completion function.
- *
- * This completion function is called by the driver ISR when a
- * command that was issued by the kernel completes. It first calls the
- * asynchronous completion function which normally calls back into the block
- * layer passing the asynchronous callback data, then unmaps the
- * scatter list associated with the completed command, and finally
- * clears the allocated bit associated with the completed command.
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command.
- * @data Pointer to driver_data.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_async_complete(struct mtip_port *port,
- int tag, struct mtip_cmd *cmd, int status)
-{
- struct driver_data *dd = port->dd;
- struct request *rq;
-
- if (unlikely(!dd) || unlikely(!port))
- return;
-
- if (unlikely(status == PORT_IRQ_TF_ERR)) {
- dev_warn(&port->dd->pdev->dev,
- "Command tag %d failed due to TFE\n", tag);
- }
-
- rq = mtip_rq_from_tag(dd, tag);
+ struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
- cmd->status = status;
- blk_mq_complete_request(rq);
+ return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
}
/*
@@ -581,43 +527,19 @@ static void print_tags(struct driver_data *dd,
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
-/*
- * Internal command completion callback function.
- *
- * This function is normally called by the driver ISR when an internal
- * command completed. This function signals the command completion by
- * calling complete().
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command that has completed.
- * @data Pointer to a completion structure.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_completion(struct mtip_port *port,
- int tag, struct mtip_cmd *command, int status)
-{
- struct completion *waiting = command->comp_data;
- if (unlikely(status == PORT_IRQ_TF_ERR))
- dev_warn(&port->dd->pdev->dev,
- "Internal command %d completed with TFE\n", tag);
-
- command->comp_func = NULL;
- command->comp_data = NULL;
- complete(waiting);
-}
-
-static void mtip_null_completion(struct mtip_port *port,
- int tag, struct mtip_cmd *command, int status)
-{
-}
-
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
dma_addr_t buffer_dma, unsigned int sectors);
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib);
+
+static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ cmd->status = status;
+ blk_mq_complete_request(req);
+}
+
/*
* Handle an error.
*
@@ -646,11 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-
- if (cmd->comp_data && cmd->comp_func) {
- cmd->comp_func(port, MTIP_TAG_INTERNAL,
- cmd, PORT_IRQ_TF_ERR);
- }
+ mtip_complete_command(cmd, -EIO);
return;
}
@@ -677,19 +595,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
continue;
cmd = mtip_cmd_from_tag(dd, tag);
- if (likely(cmd->comp_func)) {
- set_bit(tag, tagaccum);
- cmd_cnt++;
- cmd->comp_func(port, tag, cmd, 0);
- } else {
- dev_err(&port->dd->pdev->dev,
- "Missing completion func for tag %d",
- tag);
- if (mtip_check_surprise_removal(dd->pdev)) {
- /* don't proceed further */
- return;
- }
- }
+ mtip_complete_command(cmd, 0);
+ set_bit(tag, tagaccum);
+ cmd_cnt++;
}
}
@@ -759,10 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
tag,
fail_reason != NULL ?
fail_reason : "unknown");
- if (cmd->comp_func) {
- cmd->comp_func(port, tag,
- cmd, -ENODATA);
- }
+ mtip_complete_command(cmd, -ENODATA);
continue;
}
}
@@ -785,12 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- if (cmd->comp_func)
- cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
- else
- dev_warn(&port->dd->pdev->dev,
- "Bad completion for tag %d\n",
- tag);
+ mtip_complete_command(cmd, -EIO);
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -823,18 +723,7 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
continue;
command = mtip_cmd_from_tag(dd, tag);
- if (likely(command->comp_func))
- command->comp_func(port, tag, command, 0);
- else {
- dev_dbg(&dd->pdev->dev,
- "Null completion for tag %d",
- tag);
-
- if (mtip_check_surprise_removal(
- dd->pdev)) {
- return;
- }
- }
+ mtip_complete_command(command, 0);
}
completed >>= 1;
}
@@ -852,16 +741,13 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
struct mtip_port *port = dd->port;
struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
- if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
- (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))) {
- if (cmd->comp_func) {
- cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
- return;
- }
- }
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
+ int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
+ int status = readl(port->cmd_issue[group]);
- return;
+ if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
+ mtip_complete_command(cmd, 0);
+ }
}
/*
@@ -869,7 +755,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
*/
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
{
-
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
dev_warn(&dd->pdev->dev,
"Clearing PxSERR.DIAG.x\n");
@@ -996,8 +881,7 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance)
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
{
- writel(1 << MTIP_TAG_BIT(tag),
- port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+ writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
static bool mtip_pause_ncq(struct mtip_port *port,
@@ -1035,53 +919,53 @@ static bool mtip_pause_ncq(struct mtip_port *port,
return false;
}
+static bool mtip_commands_active(struct mtip_port *port)
+{
+ unsigned int active;
+ unsigned int n;
+
+ /*
+ * Ignore s_active bit 0 of array element 0.
+ * This bit will always be set
+ */
+ active = readl(port->s_active[0]) & 0xFFFFFFFE;
+ for (n = 1; n < port->dd->slot_groups; n++)
+ active |= readl(port->s_active[n]);
+
+ return active != 0;
+}
+
/*
* Wait for port to quiesce
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
- * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
- gfp_t atomic)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
{
unsigned long to;
- unsigned int n;
- unsigned int active = 1;
+ bool active = true;
blk_mq_stop_hw_queues(port->dd->queue);
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
- atomic == GFP_KERNEL) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- if (atomic == GFP_KERNEL)
- msleep(100);
- else {
- cpu_relax();
- udelay(100);
- }
+ msleep(100);
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- /*
- * Ignore s_active bit 0 of array element 0.
- * This bit will always be set
- */
- active = readl(port->s_active[0]) & 0xFFFFFFFE;
- for (n = 1; n < port->dd->slot_groups; n++)
- active |= readl(port->s_active[n]);
-
+ active = mtip_commands_active(port);
if (!active)
break;
} while (time_before(jiffies, to));
@@ -1093,6 +977,13 @@ err_fault:
return -EFAULT;
}
+struct mtip_int_cmd {
+ int fis_len;
+ dma_addr_t buffer;
+ int buf_len;
+ u32 opts;
+};
+
/*
* Execute an internal command and wait for the completion.
*
@@ -1117,13 +1008,17 @@ static int mtip_exec_internal_command(struct mtip_port *port,
dma_addr_t buffer,
int buf_len,
u32 opts,
- gfp_t atomic,
unsigned long timeout)
{
- struct mtip_cmd_sg *command_sg;
- DECLARE_COMPLETION_ONSTACK(wait);
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
+ struct request *rq;
+ struct mtip_int_cmd icmd = {
+ .fis_len = fis_len,
+ .buffer = buffer,
+ .buf_len = buf_len,
+ .opts = opts
+ };
int rv = 0;
unsigned long start;
@@ -1138,6 +1033,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
return -EFAULT;
}
+ rq = blk_mq_rq_from_pdu(int_cmd);
+ rq->special = &icmd;
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1146,135 +1043,60 @@ static int mtip_exec_internal_command(struct mtip_port *port,
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
- if (atomic == GFP_KERNEL) {
- if (fis->command != ATA_CMD_STANDBYNOW1) {
- /* wait for io to complete if non atomic */
- if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
- dev_warn(&dd->pdev->dev,
- "Failed to quiesce IO\n");
- mtip_put_int_command(dd, int_cmd);
- clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
- return -EBUSY;
- }
+ if (fis->command != ATA_CMD_STANDBYNOW1) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
+ blk_mq_free_request(rq);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
}
-
- /* Set the completion function and data for the command. */
- int_cmd->comp_data = &wait;
- int_cmd->comp_func = mtip_completion;
-
- } else {
- /* Clear completion - we're going to poll */
- int_cmd->comp_data = NULL;
- int_cmd->comp_func = mtip_null_completion;
}
/* Copy the command to the command table */
memcpy(int_cmd->command, fis, fis_len*4);
- /* Populate the SG list */
- int_cmd->command_header->opts =
- __force_bit2int cpu_to_le32(opts | fis_len);
- if (buf_len) {
- command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
-
- command_sg->info =
- __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
- command_sg->dba =
- __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
- command_sg->dba_upper =
- __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
-
- int_cmd->command_header->opts |=
- __force_bit2int cpu_to_le32((1 << 16));
- }
-
- /* Populate the command header */
- int_cmd->command_header->byte_count = 0;
-
start = jiffies;
+ rq->timeout = timeout;
- /* Issue the command to the hardware */
- mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
-
- if (atomic == GFP_KERNEL) {
- /* Wait for the command to complete or timeout. */
- if ((rv = wait_for_completion_interruptible_timeout(
- &wait,
- msecs_to_jiffies(timeout))) <= 0) {
-
- if (rv == -ERESTARTSYS) { /* interrupted */
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %u ms\n",
- fis->command,
- jiffies_to_msecs(jiffies - start));
- rv = -EINTR;
- goto exec_ic_exit;
- } else if (rv == 0) /* timeout */
- dev_err(&dd->pdev->dev,
- "Internal command did not complete [%02X] within timeout of %lu ms\n",
- fis->command, timeout);
- else
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
- fis->command, rv, timeout);
-
- if (mtip_check_surprise_removal(dd->pdev) ||
- test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned due to SR\n",
- fis->command);
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- mtip_device_reset(dd); /* recover from timeout issue */
- rv = -EAGAIN;
+ /* insert request and run queue */
+ blk_execute_rq(rq->q, NULL, rq, true);
+
+ rv = int_cmd->status;
+ if (rv < 0) {
+ if (rv == -ERESTARTSYS) { /* interrupted */
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
+ rv = -EINTR;
goto exec_ic_exit;
- }
- } else {
- u32 hba_stat, port_stat;
-
- /* Spin for <timeout> checking if command still outstanding */
- timeout = jiffies + msecs_to_jiffies(timeout);
- while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))
- && time_before(jiffies, timeout)) {
- if (mtip_check_surprise_removal(dd->pdev)) {
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- if ((fis->command != ATA_CMD_STANDBYNOW1) &&
- test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- port_stat = readl(port->mmio + PORT_IRQ_STAT);
- if (!port_stat)
- continue;
+ } else if (rv == 0) /* timeout */
+ dev_err(&dd->pdev->dev,
+ "Internal command did not complete [%02X] within timeout of %lu ms\n",
+ fis->command, timeout);
+ else
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
+ fis->command, rv, timeout);
- if (port_stat & PORT_IRQ_ERR) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] failed\n",
- fis->command);
- mtip_device_reset(dd);
- rv = -EIO;
- goto exec_ic_exit;
- } else {
- writel(port_stat, port->mmio + PORT_IRQ_STAT);
- hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
- if (hba_stat)
- writel(hba_stat,
- dd->mmio + HOST_IRQ_STAT);
- }
- break;
+ if (mtip_check_surprise_removal(dd->pdev) ||
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)) {
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned due to SR\n",
+ fis->command);
+ rv = -ENXIO;
+ goto exec_ic_exit;
}
+ mtip_device_reset(dd); /* recover from timeout issue */
+ rv = -EAGAIN;
+ goto exec_ic_exit;
}
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL)) {
+ if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
+ & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
rv = -ENXIO;
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
mtip_device_reset(dd);
@@ -1283,7 +1105,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
- mtip_put_int_command(dd, int_cmd);
+ blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
/* NCQ paused */
@@ -1391,7 +1213,6 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
port->identify_dma,
sizeof(u16) * ATA_ID_WORDS,
0,
- GFP_KERNEL,
MTIP_INT_CMD_TIMEOUT_MS)
< 0) {
rv = -1;
@@ -1477,7 +1298,6 @@ static int mtip_standby_immediate(struct mtip_port *port)
0,
0,
0,
- GFP_ATOMIC,
timeout);
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
jiffies_to_msecs(jiffies - start));
@@ -1523,7 +1343,6 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
buffer_dma,
sectors * ATA_SECT_SIZE,
0,
- GFP_ATOMIC,
MTIP_INT_CMD_TIMEOUT_MS);
}
@@ -1558,7 +1377,6 @@ static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
buffer_dma,
ATA_SECT_SIZE,
0,
- GFP_ATOMIC,
15000);
}
@@ -1686,7 +1504,6 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
dma_addr,
ATA_SECT_SIZE,
0,
- GFP_KERNEL,
MTIP_TRIM_TIMEOUT_MS) < 0)
rv = -EIO;
@@ -1850,7 +1667,6 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
0,
0,
0,
- GFP_KERNEL,
to) < 0) {
return -1;
}
@@ -1946,7 +1762,6 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
(xfer_sz ? dma_addr : 0),
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
- GFP_KERNEL,
to)
< 0) {
rv = -EFAULT;
@@ -2189,7 +2004,6 @@ static int exec_drive_taskfile(struct driver_data *dd,
dma_buffer,
transfer_size,
0,
- GFP_KERNEL,
timeout) < 0) {
err = -EIO;
goto abort;
@@ -2446,12 +2260,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
(nents << 16) | 5 | AHCI_CMD_PREFETCH);
command->command_header->byte_count = 0;
- /*
- * Set the completion function and data for the command
- * within this layer.
- */
- command->comp_data = dd;
- command->comp_func = mtip_async_complete;
command->direction = dma_dir;
/*
@@ -3825,6 +3633,42 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
return false;
}
+static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct driver_data *dd = hctx->queue->queuedata;
+ struct mtip_int_cmd *icmd = rq->special;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd_sg *command_sg;
+
+ if (mtip_commands_active(dd->port))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ /* Populate the SG list */
+ cmd->command_header->opts =
+ __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
+ if (icmd->buf_len) {
+ command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+ command_sg->info =
+ __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
+ command_sg->dba =
+ __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
+ command_sg->dba_upper =
+ __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
+
+ cmd->command_header->opts |=
+ __force_bit2int cpu_to_le32((1 << 16));
+ }
+
+ /* Populate the command header */
+ cmd->command_header->byte_count = 0;
+
+ blk_mq_start_request(rq);
+ mtip_issue_non_ncq_command(dd->port, rq->tag);
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -3833,6 +3677,9 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
mtip_init_cmd_header(rq);
+ if (blk_rq_is_passthrough(rq))
+ return mtip_issue_reserved_cmd(hctx, rq);
+
if (unlikely(mtip_check_unal_depth(hctx, rq)))
return BLK_MQ_RQ_QUEUE_BUSY;
@@ -3845,10 +3692,10 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_ERROR;
}
-static void mtip_free_cmd(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx)
+static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
{
- struct driver_data *dd = data;
+ struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (!cmd->command)
@@ -3858,20 +3705,12 @@ static void mtip_free_cmd(void *data, struct request *rq,
cmd->command, cmd->command_dma);
}
-static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
- unsigned int request_idx, unsigned int numa_node)
+static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct driver_data *dd = data;
+ struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- /*
- * For flush requests, request_idx starts at the end of the
- * tag space. Since we don't support FLUSH/FUA, simply return
- * 0 as there's nothing to be done.
- */
- if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
- return 0;
-
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)
@@ -3888,8 +3727,12 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
{
struct driver_data *dd = req->q->queuedata;
- if (reserved)
- goto exit_handler;
+ if (reserved) {
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
+
+ cmd->status = -ETIME;
+ return BLK_EH_HANDLED;
+ }
if (test_bit(req->tag, dd->port->cmds_to_issue))
goto exit_handler;
@@ -3982,7 +3825,7 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
+ dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
@@ -4116,20 +3959,10 @@ protocol_init_error:
static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
- struct driver_data *dd = (struct driver_data *)data;
- struct mtip_cmd *cmd;
-
- if (likely(!reserv)) {
- cmd = blk_mq_rq_to_pdu(rq);
- cmd->status = -ENODEV;
- blk_mq_complete_request(rq);
- } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
- if (cmd->comp_func)
- cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
- cmd, -ENODEV);
- }
+ cmd->status = -ENODEV;
+ blk_mq_complete_request(rq);
}
/*
@@ -4168,8 +4001,7 @@ static int mtip_block_remove(struct driver_data *dd)
* Explicitly wait here for IOs to quiesce,
* as mtip_standby_drive usually won't wait for IOs.
*/
- if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
- GFP_KERNEL))
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
mtip_standby_drive(dd);
}
else
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 57b41528a824..37b8e3e0bb78 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -333,16 +333,6 @@ struct mtip_cmd {
dma_addr_t command_dma; /* corresponding physical address */
- void *comp_data; /* data passed to completion function comp_func() */
- /*
- * Completion function called by the ISR upon completion of
- * a command.
- */
- void (*comp_func)(struct mtip_port *port,
- int tag,
- struct mtip_cmd *cmd,
- int status);
-
int scatter_ents; /* Number of scatter list entries used */
int unaligned; /* command is unaligned on 4k boundary */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9b482baa869e..9a7bb2c29447 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/stat.h>
@@ -347,7 +348,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct socket *sock = config->socks[index]->sock;
int result;
struct msghdr msg;
- unsigned long pflags = current->flags;
+ unsigned int noreclaim_flag;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -358,7 +359,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
msg.msg_iter = *iter;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
msg.msg_name = NULL;
@@ -381,7 +382,7 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
*sent += result;
} while (msg_data_left(&msg));
- current_restore_flags(pflags, PF_MEMALLOC);
+ memalloc_noreclaim_restore(noreclaim_flag);
return result;
}
@@ -1396,12 +1397,11 @@ static void nbd_dbg_close(void)
#endif
-static int nbd_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->nbd = data;
+ cmd->nbd = set->driver_data;
return 0;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 089ac4179919..454bf9c34882 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -387,6 +387,7 @@ struct rbd_device {
struct rw_semaphore lock_rwsem;
enum rbd_lock_state lock_state;
+ char lock_cookie[32];
struct rbd_client_id owner_cid;
struct work_struct acquired_lock_work;
struct work_struct released_lock_work;
@@ -477,13 +478,6 @@ static int minor_to_rbd_dev_id(int minor)
return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
}
-static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
-{
- return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
- rbd_dev->spec->snap_id == CEPH_NOSNAP &&
- !rbd_dev->mapping.read_only;
-}
-
static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
{
return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
@@ -731,7 +725,7 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
kref_init(&rbdc->kref);
INIT_LIST_HEAD(&rbdc->node);
- rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
+ rbdc->client = ceph_create_client(ceph_opts, rbdc);
if (IS_ERR(rbdc->client))
goto out_rbdc;
ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
@@ -804,6 +798,7 @@ enum {
Opt_read_only,
Opt_read_write,
Opt_lock_on_read,
+ Opt_exclusive,
Opt_err
};
@@ -816,6 +811,7 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_write, "read_write"},
{Opt_read_write, "rw"}, /* Alternate spelling */
{Opt_lock_on_read, "lock_on_read"},
+ {Opt_exclusive, "exclusive"},
{Opt_err, NULL}
};
@@ -823,11 +819,13 @@ struct rbd_options {
int queue_depth;
bool read_only;
bool lock_on_read;
+ bool exclusive;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
+#define RBD_EXCLUSIVE_DEFAULT false
static int parse_rbd_opts_token(char *c, void *private)
{
@@ -866,6 +864,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_lock_on_read:
rbd_opts->lock_on_read = true;
break;
+ case Opt_exclusive:
+ rbd_opts->exclusive = true;
+ break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@@ -1922,7 +1923,7 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
struct ceph_osd_request *osd_req = obj_request->osd_req;
- osd_req->r_mtime = CURRENT_TIME;
+ ktime_get_real_ts(&osd_req->r_mtime);
osd_req->r_data_offset = obj_request->offset;
}
@@ -3079,7 +3080,8 @@ static int rbd_lock(struct rbd_device *rbd_dev)
char cookie[32];
int ret;
- WARN_ON(__rbd_is_lock_owner(rbd_dev));
+ WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
+ rbd_dev->lock_cookie[0] != '\0');
format_lock_cookie(rbd_dev, cookie);
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
@@ -3089,6 +3091,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
return ret;
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
+ strcpy(rbd_dev->lock_cookie, cookie);
rbd_set_owner_cid(rbd_dev, &cid);
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
return 0;
@@ -3097,27 +3100,24 @@ static int rbd_lock(struct rbd_device *rbd_dev)
/*
* lock_rwsem must be held for write
*/
-static int rbd_unlock(struct rbd_device *rbd_dev)
+static void rbd_unlock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- char cookie[32];
int ret;
- WARN_ON(!__rbd_is_lock_owner(rbd_dev));
-
- rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+ WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
+ rbd_dev->lock_cookie[0] == '\0');
- format_lock_cookie(rbd_dev, cookie);
ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
- RBD_LOCK_NAME, cookie);
- if (ret && ret != -ENOENT) {
- rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
- return ret;
- }
+ RBD_LOCK_NAME, rbd_dev->lock_cookie);
+ if (ret && ret != -ENOENT)
+ rbd_warn(rbd_dev, "failed to unlock: %d", ret);
+ /* treat errors as the image is unlocked */
+ rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
+ rbd_dev->lock_cookie[0] = '\0';
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
- return 0;
}
static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
@@ -3447,6 +3447,18 @@ again:
ret = rbd_request_lock(rbd_dev);
if (ret == -ETIMEDOUT) {
goto again; /* treat this as a dead client */
+ } else if (ret == -EROFS) {
+ rbd_warn(rbd_dev, "peer will not release lock");
+ /*
+ * If this is rbd_add_acquire_lock(), we want to fail
+ * immediately -- reuse BLACKLISTED flag. Otherwise we
+ * want to block.
+ */
+ if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ /* wake "rbd map --exclusive" process */
+ wake_requests(rbd_dev, false);
+ }
} else if (ret < 0) {
rbd_warn(rbd_dev, "error requesting lock: %d", ret);
mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
@@ -3490,16 +3502,15 @@ static bool rbd_release_lock(struct rbd_device *rbd_dev)
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
return false;
- if (!rbd_unlock(rbd_dev))
- /*
- * Give others a chance to grab the lock - we would re-acquire
- * almost immediately if we got new IO during ceph_osdc_sync()
- * otherwise. We need to ack our own notifications, so this
- * lock_dwork will be requeued from rbd_wait_state_locked()
- * after wake_requests() in rbd_handle_released_lock().
- */
- cancel_delayed_work(&rbd_dev->lock_dwork);
-
+ rbd_unlock(rbd_dev);
+ /*
+ * Give others a chance to grab the lock - we would re-acquire
+ * almost immediately if we got new IO during ceph_osdc_sync()
+ * otherwise. We need to ack our own notifications, so this
+ * lock_dwork will be requeued from rbd_wait_state_locked()
+ * after wake_requests() in rbd_handle_released_lock().
+ */
+ cancel_delayed_work(&rbd_dev->lock_dwork);
return true;
}
@@ -3580,12 +3591,16 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
up_read(&rbd_dev->lock_rwsem);
}
-static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
- void **p)
+/*
+ * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
+ * ResponseMessage is needed.
+ */
+static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
+ void **p)
{
struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
struct rbd_client_id cid = { 0 };
- bool need_to_send;
+ int result = 1;
if (struct_v >= 2) {
cid.gid = ceph_decode_64(p);
@@ -3595,19 +3610,36 @@ static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
cid.handle);
if (rbd_cid_equal(&cid, &my_cid))
- return false;
+ return result;
down_read(&rbd_dev->lock_rwsem);
- need_to_send = __rbd_is_lock_owner(rbd_dev);
- if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
- if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
- dout("%s rbd_dev %p queueing unlock_work\n", __func__,
- rbd_dev);
- queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
+ if (__rbd_is_lock_owner(rbd_dev)) {
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
+ rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
+ goto out_unlock;
+
+ /*
+ * encode ResponseMessage(0) so the peer can detect
+ * a missing owner
+ */
+ result = 0;
+
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
+ if (!rbd_dev->opts->exclusive) {
+ dout("%s rbd_dev %p queueing unlock_work\n",
+ __func__, rbd_dev);
+ queue_work(rbd_dev->task_wq,
+ &rbd_dev->unlock_work);
+ } else {
+ /* refuse to release the lock */
+ result = -EROFS;
+ }
}
}
+
+out_unlock:
up_read(&rbd_dev->lock_rwsem);
- return need_to_send;
+ return result;
}
static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
@@ -3690,13 +3722,10 @@ static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
case RBD_NOTIFY_OP_REQUEST_LOCK:
- if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
- /*
- * send ResponseMessage(0) back so the client
- * can detect a missing owner
- */
+ ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
+ if (ret <= 0)
rbd_acknowledge_notify_result(rbd_dev, notify_id,
- cookie, 0);
+ cookie, ret);
else
rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
break;
@@ -3821,24 +3850,51 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
+/*
+ * lock_rwsem must be held for write
+ */
+static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ char cookie[32];
+ int ret;
+
+ WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+
+ format_lock_cookie(rbd_dev, cookie);
+ ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, RBD_LOCK_NAME,
+ CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
+ RBD_LOCK_TAG, cookie);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ rbd_warn(rbd_dev, "failed to update lock cookie: %d",
+ ret);
+
+ /*
+ * Lock cookie cannot be updated on older OSDs, so do
+ * a manual release and queue an acquire.
+ */
+ if (rbd_release_lock(rbd_dev))
+ queue_delayed_work(rbd_dev->task_wq,
+ &rbd_dev->lock_dwork, 0);
+ } else {
+ strcpy(rbd_dev->lock_cookie, cookie);
+ }
+}
+
static void rbd_reregister_watch(struct work_struct *work)
{
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
- bool was_lock_owner = false;
- bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
- down_write(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
- was_lock_owner = rbd_release_lock(rbd_dev);
-
mutex_lock(&rbd_dev->watch_mutex);
if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
mutex_unlock(&rbd_dev->watch_mutex);
- goto out;
+ return;
}
ret = __rbd_register_watch(rbd_dev);
@@ -3846,36 +3902,28 @@ static void rbd_reregister_watch(struct work_struct *work)
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
if (ret == -EBLACKLISTED || ret == -ENOENT) {
set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
- need_to_wake = true;
+ wake_requests(rbd_dev, true);
} else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
}
mutex_unlock(&rbd_dev->watch_mutex);
- goto out;
+ return;
}
- need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
+ down_write(&rbd_dev->lock_rwsem);
+ if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
+ rbd_reacquire_lock(rbd_dev);
+ up_write(&rbd_dev->lock_rwsem);
+
ret = rbd_dev_refresh(rbd_dev);
if (ret)
rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
-
- if (was_lock_owner) {
- ret = rbd_try_lock(rbd_dev);
- if (ret)
- rbd_warn(rbd_dev, "reregisteration lock failed: %d",
- ret);
- }
-
-out:
- up_write(&rbd_dev->lock_rwsem);
- if (need_to_wake)
- wake_requests(rbd_dev, true);
}
/*
@@ -4034,10 +4082,6 @@ static void rbd_queue_workfn(struct work_struct *work)
if (op_type != OBJ_OP_READ) {
snapc = rbd_dev->header.snapc;
ceph_get_snap_context(snapc);
- must_be_locked = rbd_is_lock_supported(rbd_dev);
- } else {
- must_be_locked = rbd_dev->opts->lock_on_read &&
- rbd_is_lock_supported(rbd_dev);
}
up_read(&rbd_dev->header_rwsem);
@@ -4048,14 +4092,20 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
+ must_be_locked =
+ (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
+ (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ if (rbd_dev->opts->exclusive) {
+ rbd_warn(rbd_dev, "exclusive lock required");
+ result = -EROFS;
+ goto err_unlock;
+ }
rbd_wait_state_locked(rbd_dev);
-
- WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
- !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ }
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
result = -EBLACKLISTED;
goto err_unlock;
@@ -4114,19 +4164,10 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static void rbd_free_disk(struct rbd_device *rbd_dev)
{
- struct gendisk *disk = rbd_dev->disk;
-
- if (!disk)
- return;
-
+ blk_cleanup_queue(rbd_dev->disk->queue);
+ blk_mq_free_tag_set(&rbd_dev->tag_set);
+ put_disk(rbd_dev->disk);
rbd_dev->disk = NULL;
- if (disk->flags & GENHD_FL_UP) {
- del_gendisk(disk);
- if (disk->queue)
- blk_cleanup_queue(disk->queue);
- blk_mq_free_tag_set(&rbd_dev->tag_set);
- }
- put_disk(disk);
}
static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
@@ -4307,9 +4348,8 @@ out:
return ret;
}
-static int rbd_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct work_struct *work = blk_mq_rq_to_pdu(rq);
@@ -4384,8 +4424,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ /*
+ * disk_release() expects a queue ref from add_disk() and will
+ * put it. Hold an extra ref until add_disk() is called.
+ */
+ WARN_ON(!blk_get_queue(q));
disk->queue = q;
-
q->queuedata = rbd_dev;
rbd_dev->disk = disk;
@@ -5625,6 +5669,7 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
+ rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@@ -5683,6 +5728,33 @@ again:
return ret;
}
+static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
+{
+ down_write(&rbd_dev->lock_rwsem);
+ if (__rbd_is_lock_owner(rbd_dev))
+ rbd_unlock(rbd_dev);
+ up_write(&rbd_dev->lock_rwsem);
+}
+
+static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
+{
+ if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
+ rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
+ return -EINVAL;
+ }
+
+ /* FIXME: "rbd map --exclusive" should be in interruptible */
+ down_read(&rbd_dev->lock_rwsem);
+ rbd_wait_state_locked(rbd_dev);
+ up_read(&rbd_dev->lock_rwsem);
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ rbd_warn(rbd_dev, "failed to acquire exclusive lock");
+ return -EROFS;
+ }
+
+ return 0;
+}
+
/*
* An rbd format 2 image has a unique identifier, distinct from the
* name given to it by the user. Internally, that identifier is
@@ -5874,6 +5946,15 @@ out_err:
return ret;
}
+static void rbd_dev_device_release(struct rbd_device *rbd_dev)
+{
+ clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
+ rbd_dev_mapping_clear(rbd_dev);
+ rbd_free_disk(rbd_dev);
+ if (!single_major)
+ unregister_blkdev(rbd_dev->major, rbd_dev->name);
+}
+
/*
* rbd_dev->header_rwsem must be locked for write and will be unlocked
* upon return.
@@ -5909,26 +5990,13 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
- dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
- ret = device_add(&rbd_dev->dev);
+ ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
goto err_out_mapping;
- /* Everything's ready. Announce the disk to the world. */
-
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
up_write(&rbd_dev->header_rwsem);
-
- spin_lock(&rbd_dev_list_lock);
- list_add_tail(&rbd_dev->node, &rbd_dev_list);
- spin_unlock(&rbd_dev_list_lock);
-
- add_disk(rbd_dev->disk);
- pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
- (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
- rbd_dev->header.features);
-
- return ret;
+ return 0;
err_out_mapping:
rbd_dev_mapping_clear(rbd_dev);
@@ -5963,11 +6031,11 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
+ if (rbd_dev->opts)
+ rbd_unregister_watch(rbd_dev);
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
-
- rbd_dev_destroy(rbd_dev);
}
/*
@@ -6127,22 +6195,43 @@ static ssize_t do_rbd_add(struct bus_type *bus,
rbd_dev->mapping.read_only = read_only;
rc = rbd_dev_device_setup(rbd_dev);
- if (rc) {
- /*
- * rbd_unregister_watch() can't be moved into
- * rbd_dev_image_release() without refactoring, see
- * commit 1f3ef78861ac.
- */
- rbd_unregister_watch(rbd_dev);
- rbd_dev_image_release(rbd_dev);
- goto out;
+ if (rc)
+ goto err_out_image_probe;
+
+ if (rbd_dev->opts->exclusive) {
+ rc = rbd_add_acquire_lock(rbd_dev);
+ if (rc)
+ goto err_out_device_setup;
}
+ /* Everything's ready. Announce the disk to the world. */
+
+ rc = device_add(&rbd_dev->dev);
+ if (rc)
+ goto err_out_image_lock;
+
+ add_disk(rbd_dev->disk);
+ /* see rbd_init_disk() */
+ blk_put_queue(rbd_dev->disk->queue);
+
+ spin_lock(&rbd_dev_list_lock);
+ list_add_tail(&rbd_dev->node, &rbd_dev_list);
+ spin_unlock(&rbd_dev_list_lock);
+
+ pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
+ (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
+ rbd_dev->header.features);
rc = count;
out:
module_put(THIS_MODULE);
return rc;
+err_out_image_lock:
+ rbd_dev_image_unlock(rbd_dev);
+err_out_device_setup:
+ rbd_dev_device_release(rbd_dev);
+err_out_image_probe:
+ rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
@@ -6170,21 +6259,6 @@ static ssize_t rbd_add_single_major(struct bus_type *bus,
return do_rbd_add(bus, buf, count);
}
-static void rbd_dev_device_release(struct rbd_device *rbd_dev)
-{
- rbd_free_disk(rbd_dev);
-
- spin_lock(&rbd_dev_list_lock);
- list_del_init(&rbd_dev->node);
- spin_unlock(&rbd_dev_list_lock);
-
- clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- device_del(&rbd_dev->dev);
- rbd_dev_mapping_clear(rbd_dev);
- if (!single_major)
- unregister_blkdev(rbd_dev->major, rbd_dev->name);
-}
-
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
{
while (rbd_dev->parent) {
@@ -6202,6 +6276,7 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
}
rbd_assert(second);
rbd_dev_image_release(second);
+ rbd_dev_destroy(second);
first->parent = NULL;
first->parent_overlap = 0;
@@ -6270,21 +6345,16 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
blk_set_queue_dying(rbd_dev->disk->queue);
}
- down_write(&rbd_dev->lock_rwsem);
- if (__rbd_is_lock_owner(rbd_dev))
- rbd_unlock(rbd_dev);
- up_write(&rbd_dev->lock_rwsem);
- rbd_unregister_watch(rbd_dev);
+ del_gendisk(rbd_dev->disk);
+ spin_lock(&rbd_dev_list_lock);
+ list_del_init(&rbd_dev->node);
+ spin_unlock(&rbd_dev_list_lock);
+ device_del(&rbd_dev->dev);
- /*
- * Don't free anything from rbd_dev->disk until after all
- * notifies are completely processed. Otherwise
- * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
- * in a potential use after free of rbd_dev->disk or rbd_dev.
- */
+ rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
rbd_dev_image_release(rbd_dev);
-
+ rbd_dev_destroy(rbd_dev);
return count;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index f94614257462..94173de1efaa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -573,11 +573,10 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
-static int virtblk_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct virtio_blk *vblk = data;
+ struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 3661a51e93e2..5fbd333e4c6d 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -9,6 +9,7 @@
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <asm/set_memory.h>
#include "agp.h"
#define AMD_MMBASE_BAR 1
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 75a9786a77e6..0b5ec7af2414 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/agp_backend.h>
#include <asm/agp.h>
+#include <asm/set_memory.h>
#include "agp.h"
#define ATI_GART_MMBASE_BAR 1
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index f002fa5d1887..658664a5a5aa 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -39,7 +39,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include <asm/pgtable.h>
#include "agp.h"
@@ -88,13 +90,7 @@ static int agp_get_key(void)
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{
- mem->pages = NULL;
-
- if (size <= 2*PAGE_SIZE)
- mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (mem->pages == NULL) {
- mem->pages = vmalloc(size);
- }
+ mem->pages = kvmalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL(agp_alloc_page_array);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7fcc2a9d1d5a..9b6b6023193b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -25,6 +25,7 @@
#include "agp.h"
#include "intel-agp.h"
#include <drm/intel-gtt.h>
+#include <asm/set_memory.h>
/*
* If we have Intel graphics, we're not going to have anything other than
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 9b163b49d976..03be4ac79b0d 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/agp_backend.h>
+#include <asm/set_memory.h>
#include "agp.h"
#define SVWRKS_COMMAND 0x04
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 50aa9ba91f25..0d7b577e0ff0 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -489,7 +489,7 @@ static const struct file_operations dsp56k_fops = {
/****** Init and module functions ******/
-static char banner[] __initdata = KERN_INFO "DSP56k driver installed\n";
+static const char banner[] __initconst = KERN_INFO "DSP56k driver installed\n";
static int __init dsp56k_init_driver(void)
{
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 4f337375252e..5406b90bf626 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -32,7 +32,7 @@
* timer and 180 seconds for the margin of error. IOW, a timer is set
* for 60 seconds. When the timer fires, the callback checks the
* actual duration that the timer waited. If the duration exceeds the
- * alloted time and margin (here 60 + 180, or 240 seconds), the machine
+ * allotted time and margin (here 60 + 180, or 240 seconds), the machine
* is restarted. A healthy machine will have the duration match the
* expected timeout very closely.
*/
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 8bdc38d81adf..b941e6d59fd6 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
}
static int
-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 8069b361b8dd..c9cd1ea6844a 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -109,7 +109,7 @@ static const struct file_operations misc_proc_fops = {
};
#endif
-static int misc_open(struct inode * inode, struct file * file)
+static int misc_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct miscdevice *c;
@@ -150,7 +150,7 @@ static int misc_open(struct inode * inode, struct file * file)
err = 0;
replace_fops(file, new_fops);
if (file->f_op->open)
- err = file->f_op->open(inode,file);
+ err = file->f_op->open(inode, file);
fail:
mutex_unlock(&misc_mtx);
return err;
@@ -182,7 +182,7 @@ static const struct file_operations misc_fops = {
* failure.
*/
-int misc_register(struct miscdevice * misc)
+int misc_register(struct miscdevice *misc)
{
dev_t dev;
int err = 0;
@@ -194,6 +194,7 @@ int misc_register(struct miscdevice * misc)
if (is_dynamic) {
int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
+
if (i >= DYNAMIC_MINORS) {
err = -EBUSY;
goto out;
@@ -287,13 +288,13 @@ static int __init misc_init(void)
goto fail_remove;
err = -EIO;
- if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
+ if (register_chrdev(MISC_MAJOR, "misc", &misc_fops))
goto fail_printk;
misc_class->devnode = misc_devnode;
return 0;
fail_printk:
- printk("unable to get major %d for misc devices\n", MISC_MAJOR);
+ pr_err("unable to get major %d for misc devices\n", MISC_MAJOR);
class_destroy(misc_class);
fail_remove:
if (ret)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index a9c2fa3c81e5..7b75669d3670 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -43,6 +43,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/numa.h>
+#include <linux/refcount.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <linux/atomic.h>
@@ -89,7 +90,7 @@ static int is_sn2;
* protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
- atomic_t refcnt; /* Number of vmas sharing the data. */
+ refcount_t refcnt; /* Number of vmas sharing the data. */
spinlock_t lock; /* Serialize access to this structure. */
int count; /* Number of pages allocated. */
enum mspec_page_type type; /* Type of pages allocated. */
@@ -144,7 +145,7 @@ mspec_open(struct vm_area_struct *vma)
struct vma_data *vdata;
vdata = vma->vm_private_data;
- atomic_inc(&vdata->refcnt);
+ refcount_inc(&vdata->refcnt);
}
/*
@@ -162,7 +163,7 @@ mspec_close(struct vm_area_struct *vma)
vdata = vma->vm_private_data;
- if (!atomic_dec_and_test(&vdata->refcnt))
+ if (!refcount_dec_and_test(&vdata->refcnt))
return;
last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
@@ -274,7 +275,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
vdata->vm_end = vma->vm_end;
vdata->type = type;
spin_lock_init(&vdata->lock);
- atomic_set(&vdata->refcnt, 1);
+ refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 9dec9f551b83..322b8a51ffc6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -218,8 +218,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
cdev_init(&chip->cdevs, &tpmrm_fops);
chip->cdev.owner = THIS_MODULE;
chip->cdevs.owner = THIS_MODULE;
- chip->cdev.kobj.parent = &chip->dev.kobj;
- chip->cdevs.kobj.parent = &chip->devs.kobj;
chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!chip->work_space.context_buf) {
@@ -275,45 +273,24 @@ static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
- rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
+ rc = cdev_device_add(&chip->cdev, &chip->dev);
if (rc) {
dev_err(&chip->dev,
- "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
dev_name(&chip->dev), MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
return rc;
}
- rc = device_add(&chip->dev);
- if (rc) {
- dev_err(&chip->dev,
- "unable to device_register() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->dev), MAJOR(chip->dev.devt),
- MINOR(chip->dev.devt), rc);
-
- cdev_del(&chip->cdev);
- return rc;
- }
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- rc = cdev_add(&chip->cdevs, chip->devs.devt, 1);
- if (rc) {
- dev_err(&chip->dev,
- "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- return rc;
- }
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- rc = device_add(&chip->devs);
- if (rc) {
- dev_err(&chip->dev,
- "unable to device_register() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- cdev_del(&chip->cdevs);
- return rc;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ return rc;
+ }
}
/* Make the chip available. */
@@ -326,8 +303,7 @@ static int tpm_add_char_device(struct tpm_chip *chip)
static void tpm_del_char_device(struct tpm_chip *chip)
{
- cdev_del(&chip->cdev);
- device_del(&chip->dev);
+ cdev_device_del(&chip->cdev, &chip->dev);
/* Make the chip unavailable. */
mutex_lock(&idr_lock);
@@ -449,10 +425,8 @@ void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
tpm_bios_log_teardown(chip);
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- cdev_del(&chip->cdevs);
- device_del(&chip->devs);
- }
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ cdev_device_del(&chip->cdevs, &chip->devs);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 87fe111d0be6..7d041d026680 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2304,7 +2304,7 @@ static int __init init(void)
pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
if (!pdrvdata.debugfs_dir)
- pr_warning("Error creating debugfs dir for virtio-ports\n");
+ pr_warn("Error creating debugfs dir for virtio-ports\n");
INIT_LIST_HEAD(&pdrvdata.consoles);
INIT_LIST_HEAD(&pdrvdata.portdevs);
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index cbd62e46bb5b..945aefa4d251 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -177,7 +177,7 @@
/* CLKID_FCLK_DIV4 */
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
-#define CLKID_GP0_PLL 9
+/* CLKID_GP0_PLL */
#define CLKID_MPEG_SEL 10
#define CLKID_MPEG_DIV 11
/* CLKID_CLK81 */
@@ -206,16 +206,16 @@
#define CLKID_I2S_SPDIF 35
/* CLKID_ETH */
#define CLKID_DEMUX 37
-#define CLKID_AIU_GLUE 38
+/* CLKID_AIU_GLUE */
#define CLKID_IEC958 39
-#define CLKID_I2S_OUT 40
+/* CLKID_I2S_OUT */
#define CLKID_AMCLK 41
#define CLKID_AIFIFO2 42
#define CLKID_MIXER 43
-#define CLKID_MIXER_IFACE 44
+/* CLKID_MIXER_IFACE */
#define CLKID_ADC 45
#define CLKID_BLKMV 46
-#define CLKID_AIU 47
+/* CLKID_AIU */
#define CLKID_UART1 48
#define CLKID_G2D 49
/* CLKID_USB0 */
@@ -248,7 +248,7 @@
/* CLKID_GCLK_VENCI_INT0 */
#define CLKID_GCLK_VENCI_INT 78
#define CLKID_DAC_CLK 79
-#define CLKID_AOCLK_GATE 80
+/* CLKID_AOCLK_GATE */
#define CLKID_IEC958_GATE 81
#define CLKID_ENC480P 82
#define CLKID_RNG1 83
@@ -268,8 +268,15 @@
/* CLKID_SAR_ADC_CLK */
/* CLKID_SAR_ADC_SEL */
#define CLKID_SAR_ADC_DIV 99
+/* CLKID_MALI_0_SEL */
+#define CLKID_MALI_0_DIV 101
+/* CLKID_MALI_0 */
+/* CLKID_MALI_1_SEL */
+#define CLKID_MALI_1_DIV 104
+/* CLKID_MALI_1 */
+/* CLKID_MALI */
-#define NR_CLKS 100
+#define NR_CLKS 107
/* include the CLKIDs that have been made part of the stable DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 03f9d316f969..d523991c945f 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -128,7 +128,7 @@ static void qcom_cc_gdsc_unregister(void *data)
/*
* Backwards compatibility with old DTs. Register a pass-through factor 1/1
- * clock to translate 'path' clk into 'name' clk and regsiter the 'path'
+ * clock to translate 'path' clk into 'name' clk and register the 'path'
* clk as a fixed rate clock if it isn't present.
*/
static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index a6da2aa09f83..8aa875f25239 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,3 +1,6 @@
+config ICST
+ bool
+
config COMMON_CLK_VERSATILE
bool "Clock driver for ARM Reference designs"
depends on ARCH_INTEGRATOR || ARCH_REALVIEW || \
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index 8ff03744fe98..794130402c8d 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -1,5 +1,5 @@
# Makefile for Versatile-specific clocks
-obj-$(CONFIG_ICST) += clk-icst.o clk-versatile.o
+obj-$(CONFIG_ICST) += icst.o clk-icst.o clk-versatile.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
obj-$(CONFIG_CLK_SP810) += clk-sp810.o
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 4faa94440779..09fbe66f1f11 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include "icst.h"
#include "clk-icst.h"
/* Magic unlocking token used on all Versatile boards */
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index 04e6f0aef588..5add02ebec5d 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -1,5 +1,3 @@
-#include <asm/hardware/icst.h>
-
/**
* struct clk_icst_desc - descriptor for the ICST VCO
* @params: ICST parameters
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 74c3216dbb00..401558bfc409 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/platform_data/clk-integrator.h>
+#include "icst.h"
#include "clk-icst.h"
#define IMPD1_OSC1 0x00
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index c56efc70ac16..6fdfee3232f4 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
+#include "icst.h"
#include "clk-icst.h"
#define REALVIEW_SYS_OSC0_OFFSET 0x0C
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index a89a927567e0..d6960de64d4a 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
+#include "icst.h"
#include "clk-icst.h"
#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
diff --git a/drivers/clk/versatile/icst.c b/drivers/clk/versatile/icst.c
new file mode 100644
index 000000000000..de2af63a3aad
--- /dev/null
+++ b/drivers/clk/versatile/icst.c
@@ -0,0 +1,105 @@
+/*
+ * linux/arch/arm/common/icst307.c
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST307
+ * clock generators. See http://www.idt.com/ for more information
+ * on these devices.
+ *
+ * This is an almost identical implementation to the ICST525 clock generator.
+ * The s2div and idx2s files are different
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <asm/div64.h>
+#include "icst.h"
+
+/*
+ * Divisors for each OD setting.
+ */
+const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
+const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
+EXPORT_SYMBOL(icst307_s2div);
+EXPORT_SYMBOL(icst525_s2div);
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+{
+ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+ do_div(dividend, divisor);
+ return (unsigned long)dividend;
+}
+
+EXPORT_SYMBOL(icst_hz);
+
+/*
+ * Ascending divisor S values.
+ */
+const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
+const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
+EXPORT_SYMBOL(icst307_idx2s);
+EXPORT_SYMBOL(icst525_idx2s);
+
+struct icst_vco
+icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+{
+ struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
+ unsigned long f;
+ unsigned int i = 0, rd, best = (unsigned int)-1;
+
+ /*
+ * First, find the PLL output divisor such
+ * that the PLL output is within spec.
+ */
+ do {
+ f = freq * p->s2div[p->idx2s[i]];
+
+ if (f > p->vco_min && f <= p->vco_max)
+ break;
+ i++;
+ } while (i < 8);
+
+ if (i >= 8)
+ return vco;
+
+ vco.s = p->idx2s[i];
+
+ /*
+ * Now find the closest divisor combination
+ * which gives a PLL output of 'f'.
+ */
+ for (rd = p->rd_min; rd <= p->rd_max; rd++) {
+ unsigned long fref_div, f_pll;
+ unsigned int vd;
+ int f_diff;
+
+ fref_div = (2 * p->ref) / rd;
+
+ vd = (f + fref_div / 2) / fref_div;
+ if (vd < p->vd_min || vd > p->vd_max)
+ continue;
+
+ f_pll = fref_div * vd;
+ f_diff = f_pll - f;
+ if (f_diff < 0)
+ f_diff = -f_diff;
+
+ if ((unsigned)f_diff < best) {
+ vco.v = vd - 8;
+ vco.r = rd - 2;
+ if (f_diff == 0)
+ break;
+ best = f_diff;
+ }
+ }
+
+ return vco;
+}
+
+EXPORT_SYMBOL(icst_hz_to_vco);
diff --git a/drivers/clk/versatile/icst.h b/drivers/clk/versatile/icst.h
new file mode 100644
index 000000000000..7519bba03b04
--- /dev/null
+++ b/drivers/clk/versatile/icst.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST
+ * clock generators. See http://www.idt.com/ for more information
+ * on these devices.
+ */
+#ifndef ICST_H
+#define ICST_H
+
+struct icst_params {
+ unsigned long ref;
+ unsigned long vco_max; /* inclusive */
+ unsigned long vco_min; /* exclusive */
+ unsigned short vd_min; /* inclusive */
+ unsigned short vd_max; /* inclusive */
+ unsigned char rd_min; /* inclusive */
+ unsigned char rd_max; /* inclusive */
+ const unsigned char *s2div; /* chip specific s2div array */
+ const unsigned char *idx2s; /* chip specific idx2s array */
+};
+
+struct icst_vco {
+ unsigned short v;
+ unsigned char r;
+ unsigned char s;
+};
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco);
+struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq);
+
+/*
+ * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST307_VCO_MIN 6000000
+#define ICST307_VCO_MAX 200000000
+
+extern const unsigned char icst307_s2div[];
+extern const unsigned char icst307_idx2s[];
+
+/*
+ * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V).
+ * This frequency is pre-output divider.
+ */
+#define ICST525_VCO_MIN 10000000
+#define ICST525_VCO_MAX_3V 200000000
+#define ICST525_VCO_MAX_5V 320000000
+
+extern const unsigned char icst525_s2div[];
+extern const unsigned char icst525_idx2s[];
+
+#endif
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0b5bf135b090..062d71434e47 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1171,7 +1171,8 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
static void __request_acpi_cpufreq(void)
{
- const char *cur_drv, *drv = "acpi-cpufreq";
+ const char drv[] = "acpi-cpufreq";
+ const char *cur_drv;
cur_drv = cpufreq_get_current_driver();
if (!cur_drv)
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a7db9011d5fe..d2d0430d09d4 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -236,7 +236,7 @@ use_defaults:
return 0;
}
-static int sti_cpufreq_fetch_syscon_regsiters(void)
+static int sti_cpufreq_fetch_syscon_registers(void)
{
struct device *dev = ddata.cpu;
struct device_node *np = dev->of_node;
@@ -275,7 +275,7 @@ static int sti_cpufreq_init(void)
goto skip_voltage_scaling;
}
- ret = sti_cpufreq_fetch_syscon_regsiters();
+ ret = sti_cpufreq_fetch_syscon_registers();
if (ret)
goto skip_voltage_scaling;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 548b90be7685..2706be7ed334 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -111,7 +111,8 @@ void cpuidle_use_deepest_state(bool enable)
preempt_disable();
dev = cpuidle_get_device();
- dev->use_deepest_state = enable;
+ if (dev)
+ dev->use_deepest_state = enable;
preempt_enable();
}
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 9e95bf94eb13..b7053eafd88e 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -1,8 +1,13 @@
-menuconfig DEV_DAX
+menuconfig DAX
tristate "DAX: direct access to differentiated memory"
+ select SRCU
default m if NVDIMM_DAX
+
+if DAX
+
+config DEV_DAX
+ tristate "Device DAX: direct access mapping device"
depends on TRANSPARENT_HUGEPAGE
- select SRCU
help
Support raw access to differentiated (persistence, bandwidth,
latency...) memory via an mmap(2) capable character
@@ -11,7 +16,6 @@ menuconfig DEV_DAX
baseline memory pool. Mappings of a /dev/daxX.Y device impose
restrictions that make the mapping behavior deterministic.
-if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 27c54e38478a..dc7422530462 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -1,4 +1,7 @@
-obj-$(CONFIG_DEV_DAX) += dax.o
+obj-$(CONFIG_DAX) += dax.o
+obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+dax-y := super.o
dax_pmem-y := pmem.o
+device_dax-y := device.o
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
new file mode 100644
index 000000000000..b6fc4f04636d
--- /dev/null
+++ b/drivers/dax/dax-private.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __DAX_PRIVATE_H__
+#define __DAX_PRIVATE_H__
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+
+/**
+ * struct dax_region - mapping infrastructure for dax devices
+ * @id: kernel-wide unique region for a memory range
+ * @base: linear address corresponding to @res
+ * @kref: to pin while other agents have a need to do lookups
+ * @dev: parent device backing this region
+ * @align: allocation and mapping alignment for child dax devices
+ * @res: physical address range of the region
+ * @pfn_flags: identify whether the pfns are paged back or not
+ */
+struct dax_region {
+ int id;
+ struct ida ida;
+ void *base;
+ struct kref kref;
+ struct device *dev;
+ unsigned int align;
+ struct resource res;
+ unsigned long pfn_flags;
+};
+
+/**
+ * struct dev_dax - instance data for a subdivision of a dax region
+ * @region - parent region
+ * @dax_dev - core dax functionality
+ * @dev - device core
+ * @id - child id in the region
+ * @num_resources - number of physical address extents in this device
+ * @res - array of physical address ranges
+ */
+struct dev_dax {
+ struct dax_region *region;
+ struct dax_device *dax_dev;
+ struct device dev;
+ int id;
+ int num_resources;
+ struct resource res[0];
+};
+#endif
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
deleted file mode 100644
index 806f180c80d8..000000000000
--- a/drivers/dax/dax.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#include <linux/pagemap.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/magic.h>
-#include <linux/mount.h>
-#include <linux/pfn_t.h>
-#include <linux/hash.h>
-#include <linux/cdev.h>
-#include <linux/slab.h>
-#include <linux/dax.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include "dax.h"
-
-static dev_t dax_devt;
-DEFINE_STATIC_SRCU(dax_srcu);
-static struct class *dax_class;
-static DEFINE_IDA(dax_minor_ida);
-static int nr_dax = CONFIG_NR_DEV_DAX;
-module_param(nr_dax, int, S_IRUGO);
-static struct vfsmount *dax_mnt;
-static struct kmem_cache *dax_cache __read_mostly;
-static struct super_block *dax_superblock __read_mostly;
-MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
-
-/**
- * struct dax_region - mapping infrastructure for dax devices
- * @id: kernel-wide unique region for a memory range
- * @base: linear address corresponding to @res
- * @kref: to pin while other agents have a need to do lookups
- * @dev: parent device backing this region
- * @align: allocation and mapping alignment for child dax devices
- * @res: physical address range of the region
- * @pfn_flags: identify whether the pfns are paged back or not
- */
-struct dax_region {
- int id;
- struct ida ida;
- void *base;
- struct kref kref;
- struct device *dev;
- unsigned int align;
- struct resource res;
- unsigned long pfn_flags;
-};
-
-/**
- * struct dax_dev - subdivision of a dax region
- * @region - parent region
- * @dev - device backing the character device
- * @cdev - core chardev data
- * @alive - !alive + srcu grace period == no new mappings can be established
- * @id - child id in the region
- * @num_resources - number of physical address extents in this device
- * @res - array of physical address ranges
- */
-struct dax_dev {
- struct dax_region *region;
- struct inode *inode;
- struct device dev;
- struct cdev cdev;
- bool alive;
- int id;
- int num_resources;
- struct resource res[0];
-};
-
-static ssize_t id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region;
- ssize_t rc = -ENXIO;
-
- device_lock(dev);
- dax_region = dev_get_drvdata(dev);
- if (dax_region)
- rc = sprintf(buf, "%d\n", dax_region->id);
- device_unlock(dev);
-
- return rc;
-}
-static DEVICE_ATTR_RO(id);
-
-static ssize_t region_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region;
- ssize_t rc = -ENXIO;
-
- device_lock(dev);
- dax_region = dev_get_drvdata(dev);
- if (dax_region)
- rc = sprintf(buf, "%llu\n", (unsigned long long)
- resource_size(&dax_region->res));
- device_unlock(dev);
-
- return rc;
-}
-static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
- region_size_show, NULL);
-
-static ssize_t align_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_region *dax_region;
- ssize_t rc = -ENXIO;
-
- device_lock(dev);
- dax_region = dev_get_drvdata(dev);
- if (dax_region)
- rc = sprintf(buf, "%u\n", dax_region->align);
- device_unlock(dev);
-
- return rc;
-}
-static DEVICE_ATTR_RO(align);
-
-static struct attribute *dax_region_attributes[] = {
- &dev_attr_region_size.attr,
- &dev_attr_align.attr,
- &dev_attr_id.attr,
- NULL,
-};
-
-static const struct attribute_group dax_region_attribute_group = {
- .name = "dax_region",
- .attrs = dax_region_attributes,
-};
-
-static const struct attribute_group *dax_region_attribute_groups[] = {
- &dax_region_attribute_group,
- NULL,
-};
-
-static struct inode *dax_alloc_inode(struct super_block *sb)
-{
- return kmem_cache_alloc(dax_cache, GFP_KERNEL);
-}
-
-static void dax_i_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
-
- kmem_cache_free(dax_cache, inode);
-}
-
-static void dax_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, dax_i_callback);
-}
-
-static const struct super_operations dax_sops = {
- .statfs = simple_statfs,
- .alloc_inode = dax_alloc_inode,
- .destroy_inode = dax_destroy_inode,
- .drop_inode = generic_delete_inode,
-};
-
-static struct dentry *dax_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
-}
-
-static struct file_system_type dax_type = {
- .name = "dax",
- .mount = dax_mount,
- .kill_sb = kill_anon_super,
-};
-
-static int dax_test(struct inode *inode, void *data)
-{
- return inode->i_cdev == data;
-}
-
-static int dax_set(struct inode *inode, void *data)
-{
- inode->i_cdev = data;
- return 0;
-}
-
-static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
-{
- struct inode *inode;
-
- inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
- dax_test, dax_set, cdev);
-
- if (!inode)
- return NULL;
-
- if (inode->i_state & I_NEW) {
- inode->i_mode = S_IFCHR;
- inode->i_flags = S_DAX;
- inode->i_rdev = devt;
- mapping_set_gfp_mask(&inode->i_data, GFP_USER);
- unlock_new_inode(inode);
- }
- return inode;
-}
-
-static void init_once(void *inode)
-{
- inode_init_once(inode);
-}
-
-static int dax_inode_init(void)
-{
- int rc;
-
- dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
- (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
- init_once);
- if (!dax_cache)
- return -ENOMEM;
-
- rc = register_filesystem(&dax_type);
- if (rc)
- goto err_register_fs;
-
- dax_mnt = kern_mount(&dax_type);
- if (IS_ERR(dax_mnt)) {
- rc = PTR_ERR(dax_mnt);
- goto err_mount;
- }
- dax_superblock = dax_mnt->mnt_sb;
-
- return 0;
-
- err_mount:
- unregister_filesystem(&dax_type);
- err_register_fs:
- kmem_cache_destroy(dax_cache);
-
- return rc;
-}
-
-static void dax_inode_exit(void)
-{
- kern_unmount(dax_mnt);
- unregister_filesystem(&dax_type);
- kmem_cache_destroy(dax_cache);
-}
-
-static void dax_region_free(struct kref *kref)
-{
- struct dax_region *dax_region;
-
- dax_region = container_of(kref, struct dax_region, kref);
- kfree(dax_region);
-}
-
-void dax_region_put(struct dax_region *dax_region)
-{
- kref_put(&dax_region->kref, dax_region_free);
-}
-EXPORT_SYMBOL_GPL(dax_region_put);
-
-static void dax_region_unregister(void *region)
-{
- struct dax_region *dax_region = region;
-
- sysfs_remove_groups(&dax_region->dev->kobj,
- dax_region_attribute_groups);
- dax_region_put(dax_region);
-}
-
-struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, unsigned int align, void *addr,
- unsigned long pfn_flags)
-{
- struct dax_region *dax_region;
-
- /*
- * The DAX core assumes that it can store its private data in
- * parent->driver_data. This WARN is a reminder / safeguard for
- * developers of device-dax drivers.
- */
- if (dev_get_drvdata(parent)) {
- dev_WARN(parent, "dax core failed to setup private data\n");
- return NULL;
- }
-
- if (!IS_ALIGNED(res->start, align)
- || !IS_ALIGNED(resource_size(res), align))
- return NULL;
-
- dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
- if (!dax_region)
- return NULL;
-
- dev_set_drvdata(parent, dax_region);
- memcpy(&dax_region->res, res, sizeof(*res));
- dax_region->pfn_flags = pfn_flags;
- kref_init(&dax_region->kref);
- dax_region->id = region_id;
- ida_init(&dax_region->ida);
- dax_region->align = align;
- dax_region->dev = parent;
- dax_region->base = addr;
- if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
- kfree(dax_region);
- return NULL;;
- }
-
- kref_get(&dax_region->kref);
- if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
- return NULL;
- return dax_region;
-}
-EXPORT_SYMBOL_GPL(alloc_dax_region);
-
-static struct dax_dev *to_dax_dev(struct device *dev)
-{
- return container_of(dev, struct dax_dev, dev);
-}
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dax_dev *dax_dev = to_dax_dev(dev);
- unsigned long long size = 0;
- int i;
-
- for (i = 0; i < dax_dev->num_resources; i++)
- size += resource_size(&dax_dev->res[i]);
-
- return sprintf(buf, "%llu\n", size);
-}
-static DEVICE_ATTR_RO(size);
-
-static struct attribute *dax_device_attributes[] = {
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group dax_device_attribute_group = {
- .attrs = dax_device_attributes,
-};
-
-static const struct attribute_group *dax_attribute_groups[] = {
- &dax_device_attribute_group,
- NULL,
-};
-
-static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
- const char *func)
-{
- struct dax_region *dax_region = dax_dev->region;
- struct device *dev = &dax_dev->dev;
- unsigned long mask;
-
- if (!dax_dev->alive)
- return -ENXIO;
-
- /* prevent private mappings from being established */
- if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
- dev_info(dev, "%s: %s: fail, attempted private mapping\n",
- current->comm, func);
- return -EINVAL;
- }
-
- mask = dax_region->align - 1;
- if (vma->vm_start & mask || vma->vm_end & mask) {
- dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
- current->comm, func, vma->vm_start, vma->vm_end,
- mask);
- return -EINVAL;
- }
-
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
- && (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
- current->comm, func);
- return -EINVAL;
- }
-
- if (!vma_is_dax(vma)) {
- dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
- current->comm, func);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
- unsigned long size)
-{
- struct resource *res;
- phys_addr_t phys;
- int i;
-
- for (i = 0; i < dax_dev->num_resources; i++) {
- res = &dax_dev->res[i];
- phys = pgoff * PAGE_SIZE + res->start;
- if (phys >= res->start && phys <= res->end)
- break;
- pgoff -= PHYS_PFN(resource_size(res));
- }
-
- if (i < dax_dev->num_resources) {
- res = &dax_dev->res[i];
- if (phys + size - 1 <= res->end)
- return phys;
- }
-
- return -1;
-}
-
-static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
-{
- struct device *dev = &dax_dev->dev;
- struct dax_region *dax_region;
- int rc = VM_FAULT_SIGBUS;
- phys_addr_t phys;
- pfn_t pfn;
- unsigned int fault_size = PAGE_SIZE;
-
- if (check_vma(dax_dev, vmf->vma, __func__))
- return VM_FAULT_SIGBUS;
-
- dax_region = dax_dev->region;
- if (dax_region->align > PAGE_SIZE) {
- dev_dbg(dev, "%s: alignment > fault size\n", __func__);
- return VM_FAULT_SIGBUS;
- }
-
- if (fault_size != dax_region->align)
- return VM_FAULT_SIGBUS;
-
- phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
- if (phys == -1) {
- dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
- vmf->pgoff);
- return VM_FAULT_SIGBUS;
- }
-
- pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
-
- rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
-
- if (rc == -ENOMEM)
- return VM_FAULT_OOM;
- if (rc < 0 && rc != -EBUSY)
- return VM_FAULT_SIGBUS;
-
- return VM_FAULT_NOPAGE;
-}
-
-static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
-{
- unsigned long pmd_addr = vmf->address & PMD_MASK;
- struct device *dev = &dax_dev->dev;
- struct dax_region *dax_region;
- phys_addr_t phys;
- pgoff_t pgoff;
- pfn_t pfn;
- unsigned int fault_size = PMD_SIZE;
-
- if (check_vma(dax_dev, vmf->vma, __func__))
- return VM_FAULT_SIGBUS;
-
- dax_region = dax_dev->region;
- if (dax_region->align > PMD_SIZE) {
- dev_dbg(dev, "%s: alignment > fault size\n", __func__);
- return VM_FAULT_SIGBUS;
- }
-
- /* dax pmd mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "%s: alignment > fault size\n", __func__);
- return VM_FAULT_SIGBUS;
- }
-
- if (fault_size < dax_region->align)
- return VM_FAULT_SIGBUS;
- else if (fault_size > dax_region->align)
- return VM_FAULT_FALLBACK;
-
- /* if we are outside of the VMA */
- if (pmd_addr < vmf->vma->vm_start ||
- (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
- return VM_FAULT_SIGBUS;
-
- pgoff = linear_page_index(vmf->vma, pmd_addr);
- phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
- if (phys == -1) {
- dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
- pgoff);
- return VM_FAULT_SIGBUS;
- }
-
- pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
-
- return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
- vmf->flags & FAULT_FLAG_WRITE);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
-{
- unsigned long pud_addr = vmf->address & PUD_MASK;
- struct device *dev = &dax_dev->dev;
- struct dax_region *dax_region;
- phys_addr_t phys;
- pgoff_t pgoff;
- pfn_t pfn;
- unsigned int fault_size = PUD_SIZE;
-
-
- if (check_vma(dax_dev, vmf->vma, __func__))
- return VM_FAULT_SIGBUS;
-
- dax_region = dax_dev->region;
- if (dax_region->align > PUD_SIZE) {
- dev_dbg(dev, "%s: alignment > fault size\n", __func__);
- return VM_FAULT_SIGBUS;
- }
-
- /* dax pud mappings require pfn_t_devmap() */
- if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
- dev_dbg(dev, "%s: alignment > fault size\n", __func__);
- return VM_FAULT_SIGBUS;
- }
-
- if (fault_size < dax_region->align)
- return VM_FAULT_SIGBUS;
- else if (fault_size > dax_region->align)
- return VM_FAULT_FALLBACK;
-
- /* if we are outside of the VMA */
- if (pud_addr < vmf->vma->vm_start ||
- (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
- return VM_FAULT_SIGBUS;
-
- pgoff = linear_page_index(vmf->vma, pud_addr);
- phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
- if (phys == -1) {
- dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
- pgoff);
- return VM_FAULT_SIGBUS;
- }
-
- pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
-
- return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
- vmf->flags & FAULT_FLAG_WRITE);
-}
-#else
-static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
-{
- return VM_FAULT_FALLBACK;
-}
-#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-
-static int dax_dev_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
-{
- int rc, id;
- struct file *filp = vmf->vma->vm_file;
- struct dax_dev *dax_dev = filp->private_data;
-
- dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
- current->comm, (vmf->flags & FAULT_FLAG_WRITE)
- ? "write" : "read",
- vmf->vma->vm_start, vmf->vma->vm_end);
-
- id = srcu_read_lock(&dax_srcu);
- switch (pe_size) {
- case PE_SIZE_PTE:
- rc = __dax_dev_pte_fault(dax_dev, vmf);
- break;
- case PE_SIZE_PMD:
- rc = __dax_dev_pmd_fault(dax_dev, vmf);
- break;
- case PE_SIZE_PUD:
- rc = __dax_dev_pud_fault(dax_dev, vmf);
- break;
- default:
- return VM_FAULT_FALLBACK;
- }
- srcu_read_unlock(&dax_srcu, id);
-
- return rc;
-}
-
-static int dax_dev_fault(struct vm_fault *vmf)
-{
- return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
-}
-
-static const struct vm_operations_struct dax_dev_vm_ops = {
- .fault = dax_dev_fault,
- .huge_fault = dax_dev_huge_fault,
-};
-
-static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct dax_dev *dax_dev = filp->private_data;
- int rc;
-
- dev_dbg(&dax_dev->dev, "%s\n", __func__);
-
- rc = check_vma(dax_dev, vma, __func__);
- if (rc)
- return rc;
-
- vma->vm_ops = &dax_dev_vm_ops;
- vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
- return 0;
-}
-
-/* return an unmapped area aligned to the dax region specified alignment */
-static unsigned long dax_get_unmapped_area(struct file *filp,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- unsigned long off, off_end, off_align, len_align, addr_align, align;
- struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
- struct dax_region *dax_region;
-
- if (!dax_dev || addr)
- goto out;
-
- dax_region = dax_dev->region;
- align = dax_region->align;
- off = pgoff << PAGE_SHIFT;
- off_end = off + len;
- off_align = round_up(off, align);
-
- if ((off_end <= off_align) || ((off_end - off_align) < align))
- goto out;
-
- len_align = len + align;
- if ((off + len_align) < off)
- goto out;
-
- addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
- pgoff, flags);
- if (!IS_ERR_VALUE(addr_align)) {
- addr_align += (off - addr_align) & (align - 1);
- return addr_align;
- }
- out:
- return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
-}
-
-static int dax_open(struct inode *inode, struct file *filp)
-{
- struct dax_dev *dax_dev;
-
- dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
- dev_dbg(&dax_dev->dev, "%s\n", __func__);
- inode->i_mapping = dax_dev->inode->i_mapping;
- inode->i_mapping->host = dax_dev->inode;
- filp->f_mapping = inode->i_mapping;
- filp->private_data = dax_dev;
- inode->i_flags = S_DAX;
-
- return 0;
-}
-
-static int dax_release(struct inode *inode, struct file *filp)
-{
- struct dax_dev *dax_dev = filp->private_data;
-
- dev_dbg(&dax_dev->dev, "%s\n", __func__);
- return 0;
-}
-
-static const struct file_operations dax_fops = {
- .llseek = noop_llseek,
- .owner = THIS_MODULE,
- .open = dax_open,
- .release = dax_release,
- .get_unmapped_area = dax_get_unmapped_area,
- .mmap = dax_mmap,
-};
-
-static void dax_dev_release(struct device *dev)
-{
- struct dax_dev *dax_dev = to_dax_dev(dev);
- struct dax_region *dax_region = dax_dev->region;
-
- ida_simple_remove(&dax_region->ida, dax_dev->id);
- ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
- dax_region_put(dax_region);
- iput(dax_dev->inode);
- kfree(dax_dev);
-}
-
-static void unregister_dax_dev(void *dev)
-{
- struct dax_dev *dax_dev = to_dax_dev(dev);
- struct cdev *cdev = &dax_dev->cdev;
-
- dev_dbg(dev, "%s\n", __func__);
-
- /*
- * Note, rcu is not protecting the liveness of dax_dev, rcu is
- * ensuring that any fault handlers that might have seen
- * dax_dev->alive == true, have completed. Any fault handlers
- * that start after synchronize_srcu() has started will abort
- * upon seeing dax_dev->alive == false.
- */
- dax_dev->alive = false;
- synchronize_srcu(&dax_srcu);
- unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
- cdev_del(cdev);
- device_unregister(dev);
-}
-
-struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
- struct resource *res, int count)
-{
- struct device *parent = dax_region->dev;
- struct dax_dev *dax_dev;
- int rc = 0, minor, i;
- struct device *dev;
- struct cdev *cdev;
- dev_t dev_t;
-
- dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
- if (!dax_dev)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < count; i++) {
- if (!IS_ALIGNED(res[i].start, dax_region->align)
- || !IS_ALIGNED(resource_size(&res[i]),
- dax_region->align)) {
- rc = -EINVAL;
- break;
- }
- dax_dev->res[i].start = res[i].start;
- dax_dev->res[i].end = res[i].end;
- }
-
- if (i < count)
- goto err_id;
-
- dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- if (dax_dev->id < 0) {
- rc = dax_dev->id;
- goto err_id;
- }
-
- minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
- if (minor < 0) {
- rc = minor;
- goto err_minor;
- }
-
- dev_t = MKDEV(MAJOR(dax_devt), minor);
- dev = &dax_dev->dev;
- dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
- if (!dax_dev->inode) {
- rc = -ENOMEM;
- goto err_inode;
- }
-
- /* device_initialize() so cdev can reference kobj parent */
- device_initialize(dev);
-
- cdev = &dax_dev->cdev;
- cdev_init(cdev, &dax_fops);
- cdev->owner = parent->driver->owner;
- cdev->kobj.parent = &dev->kobj;
- rc = cdev_add(&dax_dev->cdev, dev_t, 1);
- if (rc)
- goto err_cdev;
-
- /* from here on we're committed to teardown via dax_dev_release() */
- dax_dev->num_resources = count;
- dax_dev->alive = true;
- dax_dev->region = dax_region;
- kref_get(&dax_region->kref);
-
- dev->devt = dev_t;
- dev->class = dax_class;
- dev->parent = parent;
- dev->groups = dax_attribute_groups;
- dev->release = dax_dev_release;
- dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
- rc = device_add(dev);
- if (rc) {
- put_device(dev);
- return ERR_PTR(rc);
- }
-
- rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
- if (rc)
- return ERR_PTR(rc);
-
- return dax_dev;
-
- err_cdev:
- iput(dax_dev->inode);
- err_inode:
- ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
- ida_simple_remove(&dax_region->ida, dax_dev->id);
- err_id:
- kfree(dax_dev);
-
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(devm_create_dax_dev);
-
-static int __init dax_init(void)
-{
- int rc;
-
- rc = dax_inode_init();
- if (rc)
- return rc;
-
- nr_dax = max(nr_dax, 256);
- rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
- if (rc)
- goto err_chrdev;
-
- dax_class = class_create(THIS_MODULE, "dax");
- if (IS_ERR(dax_class)) {
- rc = PTR_ERR(dax_class);
- goto err_class;
- }
-
- return 0;
-
- err_class:
- unregister_chrdev_region(dax_devt, nr_dax);
- err_chrdev:
- dax_inode_exit();
- return rc;
-}
-
-static void __exit dax_exit(void)
-{
- class_destroy(dax_class);
- unregister_chrdev_region(dax_devt, nr_dax);
- ida_destroy(&dax_minor_ida);
- dax_inode_exit();
-}
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL v2");
-subsys_initcall(dax_init);
-module_exit(dax_exit);
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
index ddd829ab58c0..f9e5feea742c 100644
--- a/drivers/dax/dax.h
+++ b/drivers/dax/dax.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -12,14 +12,7 @@
*/
#ifndef __DAX_H__
#define __DAX_H__
-struct device;
-struct dax_dev;
-struct resource;
-struct dax_region;
-void dax_region_put(struct dax_region *dax_region);
-struct dax_region *alloc_dax_region(struct device *parent,
- int region_id, struct resource *res, unsigned int align,
- void *addr, unsigned long flags);
-struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
- struct resource *res, int count);
+struct dax_device;
+struct dax_device *inode_dax(struct inode *inode);
+struct inode *dax_inode(struct dax_device *dax_dev);
#endif /* __DAX_H__ */
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
new file mode 100644
index 000000000000..fdcd9769ffde
--- /dev/null
+++ b/drivers/dax/device-dax.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __DEVICE_DAX_H__
+#define __DEVICE_DAX_H__
+struct device;
+struct dev_dax;
+struct resource;
+struct dax_region;
+void dax_region_put(struct dax_region *dax_region);
+struct dax_region *alloc_dax_region(struct device *parent,
+ int region_id, struct resource *res, unsigned int align,
+ void *addr, unsigned long flags);
+struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
+ struct resource *res, int count);
+#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
new file mode 100644
index 000000000000..006e657dfcb9
--- /dev/null
+++ b/drivers/dax/device.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pfn_t.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include "dax-private.h"
+#include "dax.h"
+
+static struct class *dax_class;
+
+/*
+ * Rely on the fact that drvdata is set before the attributes are
+ * registered, and that the attributes are unregistered before drvdata
+ * is cleared to assume that drvdata is always valid.
+ */
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", dax_region->id);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t region_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&dax_region->res));
+}
+static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
+ region_size_show, NULL);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_region *dax_region = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", dax_region->align);
+}
+static DEVICE_ATTR_RO(align);
+
+static struct attribute *dax_region_attributes[] = {
+ &dev_attr_region_size.attr,
+ &dev_attr_align.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_region_attribute_group = {
+ .name = "dax_region",
+ .attrs = dax_region_attributes,
+};
+
+static const struct attribute_group *dax_region_attribute_groups[] = {
+ &dax_region_attribute_group,
+ NULL,
+};
+
+static void dax_region_free(struct kref *kref)
+{
+ struct dax_region *dax_region;
+
+ dax_region = container_of(kref, struct dax_region, kref);
+ kfree(dax_region);
+}
+
+void dax_region_put(struct dax_region *dax_region)
+{
+ kref_put(&dax_region->kref, dax_region_free);
+}
+EXPORT_SYMBOL_GPL(dax_region_put);
+
+static void dax_region_unregister(void *region)
+{
+ struct dax_region *dax_region = region;
+
+ sysfs_remove_groups(&dax_region->dev->kobj,
+ dax_region_attribute_groups);
+ dax_region_put(dax_region);
+}
+
+struct dax_region *alloc_dax_region(struct device *parent, int region_id,
+ struct resource *res, unsigned int align, void *addr,
+ unsigned long pfn_flags)
+{
+ struct dax_region *dax_region;
+
+ /*
+ * The DAX core assumes that it can store its private data in
+ * parent->driver_data. This WARN is a reminder / safeguard for
+ * developers of device-dax drivers.
+ */
+ if (dev_get_drvdata(parent)) {
+ dev_WARN(parent, "dax core failed to setup private data\n");
+ return NULL;
+ }
+
+ if (!IS_ALIGNED(res->start, align)
+ || !IS_ALIGNED(resource_size(res), align))
+ return NULL;
+
+ dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+ if (!dax_region)
+ return NULL;
+
+ dev_set_drvdata(parent, dax_region);
+ memcpy(&dax_region->res, res, sizeof(*res));
+ dax_region->pfn_flags = pfn_flags;
+ kref_init(&dax_region->kref);
+ dax_region->id = region_id;
+ ida_init(&dax_region->ida);
+ dax_region->align = align;
+ dax_region->dev = parent;
+ dax_region->base = addr;
+ if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
+ kfree(dax_region);
+ return NULL;;
+ }
+
+ kref_get(&dax_region->kref);
+ if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
+ return NULL;
+ return dax_region;
+}
+EXPORT_SYMBOL_GPL(alloc_dax_region);
+
+static struct dev_dax *to_dev_dax(struct device *dev)
+{
+ return container_of(dev, struct dev_dax, dev);
+}
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ unsigned long long size = 0;
+ int i;
+
+ for (i = 0; i < dev_dax->num_resources; i++)
+ size += resource_size(&dev_dax->res[i]);
+
+ return sprintf(buf, "%llu\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static struct attribute *dev_dax_attributes[] = {
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_dax_attribute_group = {
+ .attrs = dev_dax_attributes,
+};
+
+static const struct attribute_group *dax_attribute_groups[] = {
+ &dev_dax_attribute_group,
+ NULL,
+};
+
+static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct device *dev = &dev_dax->dev;
+ unsigned long mask;
+
+ if (!dax_alive(dev_dax->dax_dev))
+ return -ENXIO;
+
+ /* prevent private mappings from being established */
+ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+ dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ mask = dax_region->align - 1;
+ if (vma->vm_start & mask || vma->vm_end & mask) {
+ dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ current->comm, func, vma->vm_start, vma->vm_end,
+ mask);
+ return -EINVAL;
+ }
+
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
+ && (vma->vm_flags & VM_DONTCOPY) == 0) {
+ dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ if (!vma_is_dax(vma)) {
+ dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
+__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
+ unsigned long size)
+{
+ struct resource *res;
+ phys_addr_t phys;
+ int i;
+
+ for (i = 0; i < dev_dax->num_resources; i++) {
+ res = &dev_dax->res[i];
+ phys = pgoff * PAGE_SIZE + res->start;
+ if (phys >= res->start && phys <= res->end)
+ break;
+ pgoff -= PHYS_PFN(resource_size(res));
+ }
+
+ if (i < dev_dax->num_resources) {
+ res = &dev_dax->res[i];
+ if (phys + size - 1 <= res->end)
+ return phys;
+ }
+
+ return -1;
+}
+
+static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+{
+ struct device *dev = &dev_dax->dev;
+ struct dax_region *dax_region;
+ int rc = VM_FAULT_SIGBUS;
+ phys_addr_t phys;
+ pfn_t pfn;
+ unsigned int fault_size = PAGE_SIZE;
+
+ if (check_vma(dev_dax, vmf->vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dev_dax->region;
+ if (dax_region->align > PAGE_SIZE) {
+ dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
+ __func__, dax_region->align, fault_size);
+ return VM_FAULT_SIGBUS;
+ }
+
+ if (fault_size != dax_region->align)
+ return VM_FAULT_SIGBUS;
+
+ phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
+ vmf->pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
+
+ if (rc == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (rc < 0 && rc != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+{
+ unsigned long pmd_addr = vmf->address & PMD_MASK;
+ struct device *dev = &dev_dax->dev;
+ struct dax_region *dax_region;
+ phys_addr_t phys;
+ pgoff_t pgoff;
+ pfn_t pfn;
+ unsigned int fault_size = PMD_SIZE;
+
+ if (check_vma(dev_dax, vmf->vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dev_dax->region;
+ if (dax_region->align > PMD_SIZE) {
+ dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
+ __func__, dax_region->align, fault_size);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* dax pmd mappings require pfn_t_devmap() */
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+ dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pmd_addr < vmf->vma->vm_start ||
+ (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ pgoff = linear_page_index(vmf->vma, pmd_addr);
+ phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
+ pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
+ vmf->flags & FAULT_FLAG_WRITE);
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+{
+ unsigned long pud_addr = vmf->address & PUD_MASK;
+ struct device *dev = &dev_dax->dev;
+ struct dax_region *dax_region;
+ phys_addr_t phys;
+ pgoff_t pgoff;
+ pfn_t pfn;
+ unsigned int fault_size = PUD_SIZE;
+
+
+ if (check_vma(dev_dax, vmf->vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dev_dax->region;
+ if (dax_region->align > PUD_SIZE) {
+ dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
+ __func__, dax_region->align, fault_size);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* dax pud mappings require pfn_t_devmap() */
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+ dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pud_addr < vmf->vma->vm_start ||
+ (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
+ pgoff = linear_page_index(vmf->vma, pud_addr);
+ phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
+ pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
+ vmf->flags & FAULT_FLAG_WRITE);
+}
+#else
+static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+{
+ return VM_FAULT_FALLBACK;
+}
+#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+static int dev_dax_huge_fault(struct vm_fault *vmf,
+ enum page_entry_size pe_size)
+{
+ int rc, id;
+ struct file *filp = vmf->vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+
+ dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__,
+ current->comm, (vmf->flags & FAULT_FLAG_WRITE)
+ ? "write" : "read",
+ vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
+
+ id = dax_read_lock();
+ switch (pe_size) {
+ case PE_SIZE_PTE:
+ rc = __dev_dax_pte_fault(dev_dax, vmf);
+ break;
+ case PE_SIZE_PMD:
+ rc = __dev_dax_pmd_fault(dev_dax, vmf);
+ break;
+ case PE_SIZE_PUD:
+ rc = __dev_dax_pud_fault(dev_dax, vmf);
+ break;
+ default:
+ rc = VM_FAULT_SIGBUS;
+ }
+ dax_read_unlock(id);
+
+ return rc;
+}
+
+static int dev_dax_fault(struct vm_fault *vmf)
+{
+ return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
+}
+
+static const struct vm_operations_struct dax_vm_ops = {
+ .fault = dev_dax_fault,
+ .huge_fault = dev_dax_huge_fault,
+};
+
+static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dev_dax *dev_dax = filp->private_data;
+ int rc, id;
+
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+
+ /*
+ * We lock to check dax_dev liveness and will re-check at
+ * fault time.
+ */
+ id = dax_read_lock();
+ rc = check_vma(dev_dax, vma, __func__);
+ dax_read_unlock(id);
+ if (rc)
+ return rc;
+
+ vma->vm_ops = &dax_vm_ops;
+ vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ return 0;
+}
+
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off, off_end, off_align, len_align, addr_align, align;
+ struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
+ struct dax_region *dax_region;
+
+ if (!dev_dax || addr)
+ goto out;
+
+ dax_region = dev_dax->region;
+ align = dax_region->align;
+ off = pgoff << PAGE_SHIFT;
+ off_end = off + len;
+ off_align = round_up(off, align);
+
+ if ((off_end <= off_align) || ((off_end - off_align) < align))
+ goto out;
+
+ len_align = len + align;
+ if ((off + len_align) < off)
+ goto out;
+
+ addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+ pgoff, flags);
+ if (!IS_ERR_VALUE(addr_align)) {
+ addr_align += (off - addr_align) & (align - 1);
+ return addr_align;
+ }
+ out:
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int dax_open(struct inode *inode, struct file *filp)
+{
+ struct dax_device *dax_dev = inode_dax(inode);
+ struct inode *__dax_inode = dax_inode(dax_dev);
+ struct dev_dax *dev_dax = dax_get_private(dax_dev);
+
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+ inode->i_mapping = __dax_inode->i_mapping;
+ inode->i_mapping->host = __dax_inode;
+ filp->f_mapping = inode->i_mapping;
+ filp->private_data = dev_dax;
+ inode->i_flags = S_DAX;
+
+ return 0;
+}
+
+static int dax_release(struct inode *inode, struct file *filp)
+{
+ struct dev_dax *dev_dax = filp->private_data;
+
+ dev_dbg(&dev_dax->dev, "%s\n", __func__);
+ return 0;
+}
+
+static const struct file_operations dax_fops = {
+ .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = dax_open,
+ .release = dax_release,
+ .get_unmapped_area = dax_get_unmapped_area,
+ .mmap = dax_mmap,
+};
+
+static void dev_dax_release(struct device *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
+ dax_region_put(dax_region);
+ put_dax(dax_dev);
+ kfree(dev_dax);
+}
+
+static void kill_dev_dax(struct dev_dax *dev_dax)
+{
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+ struct inode *inode = dax_inode(dax_dev);
+
+ kill_dax(dax_dev);
+ unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+}
+
+static void unregister_dev_dax(void *dev)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_device *dax_dev = dev_dax->dax_dev;
+ struct inode *inode = dax_inode(dax_dev);
+ struct cdev *cdev = inode->i_cdev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ kill_dev_dax(dev_dax);
+ cdev_device_del(cdev, dev);
+ put_device(dev);
+}
+
+struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
+ struct resource *res, int count)
+{
+ struct device *parent = dax_region->dev;
+ struct dax_device *dax_dev;
+ struct dev_dax *dev_dax;
+ struct inode *inode;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc = 0, i;
+
+ dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
+ if (!dev_dax)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < count; i++) {
+ if (!IS_ALIGNED(res[i].start, dax_region->align)
+ || !IS_ALIGNED(resource_size(&res[i]),
+ dax_region->align)) {
+ rc = -EINVAL;
+ break;
+ }
+ dev_dax->res[i].start = res[i].start;
+ dev_dax->res[i].end = res[i].end;
+ }
+
+ if (i < count)
+ goto err_id;
+
+ dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ if (dev_dax->id < 0) {
+ rc = dev_dax->id;
+ goto err_id;
+ }
+
+ /*
+ * No 'host' or dax_operations since there is no access to this
+ * device outside of mmap of the resulting character device.
+ */
+ dax_dev = alloc_dax(dev_dax, NULL, NULL);
+ if (!dax_dev)
+ goto err_dax;
+
+ /* from here on we're committed to teardown via dax_dev_release() */
+ dev = &dev_dax->dev;
+ device_initialize(dev);
+
+ inode = dax_inode(dax_dev);
+ cdev = inode->i_cdev;
+ cdev_init(cdev, &dax_fops);
+ cdev->owner = parent->driver->owner;
+
+ dev_dax->num_resources = count;
+ dev_dax->dax_dev = dax_dev;
+ dev_dax->region = dax_region;
+ kref_get(&dax_region->kref);
+
+ dev->devt = inode->i_rdev;
+ dev->class = dax_class;
+ dev->parent = parent;
+ dev->groups = dax_attribute_groups;
+ dev->release = dev_dax_release;
+ dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+
+ rc = cdev_device_add(cdev, dev);
+ if (rc) {
+ kill_dev_dax(dev_dax);
+ put_device(dev);
+ return ERR_PTR(rc);
+ }
+
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return dev_dax;
+
+ err_dax:
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
+ err_id:
+ kfree(dev_dax);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_create_dev_dax);
+
+static int __init dax_init(void)
+{
+ dax_class = class_create(THIS_MODULE, "dax");
+ return PTR_ERR_OR_ZERO(dax_class);
+}
+
+static void __exit dax_exit(void)
+{
+ class_destroy(dax_class);
+}
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+subsys_initcall(dax_init);
+module_exit(dax_exit);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index cb0d742fa23f..9f2a0b4fd801 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -16,7 +16,7 @@
#include <linux/pfn_t.h>
#include "../nvdimm/pfn.h"
#include "../nvdimm/nd.h"
-#include "dax.h"
+#include "device-dax.h"
struct dax_pmem {
struct device *dev;
@@ -61,8 +61,8 @@ static int dax_pmem_probe(struct device *dev)
int rc;
void *addr;
struct resource res;
- struct dax_dev *dax_dev;
struct nd_pfn_sb *pfn_sb;
+ struct dev_dax *dev_dax;
struct dax_pmem *dax_pmem;
struct nd_region *nd_region;
struct nd_namespace_io *nsio;
@@ -130,12 +130,12 @@ static int dax_pmem_probe(struct device *dev)
return -ENOMEM;
/* TODO: support for subdividing a dax region... */
- dax_dev = devm_create_dax_dev(dax_region, &res, 1);
+ dev_dax = devm_create_dev_dax(dax_region, &res, 1);
- /* child dax_dev instances now own the lifetime of the dax_region */
+ /* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
- return PTR_ERR_OR_ZERO(dax_dev);
+ return PTR_ERR_OR_ZERO(dev_dax);
}
static struct nd_device_driver dax_pmem_driver = {
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
new file mode 100644
index 000000000000..465dcd7317d5
--- /dev/null
+++ b/drivers/dax/super.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/magic.h>
+#include <linux/cdev.h>
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include <linux/fs.h>
+
+static int nr_dax = CONFIG_NR_DEV_DAX;
+module_param(nr_dax, int, S_IRUGO);
+MODULE_PARM_DESC(nr_dax, "max number of dax device instances");
+
+static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
+static struct vfsmount *dax_mnt;
+static DEFINE_IDA(dax_minor_ida);
+static struct kmem_cache *dax_cache __read_mostly;
+static struct super_block *dax_superblock __read_mostly;
+
+#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
+static struct hlist_head dax_host_list[DAX_HASH_SIZE];
+static DEFINE_SPINLOCK(dax_host_lock);
+
+int dax_read_lock(void)
+{
+ return srcu_read_lock(&dax_srcu);
+}
+EXPORT_SYMBOL_GPL(dax_read_lock);
+
+void dax_read_unlock(int id)
+{
+ srcu_read_unlock(&dax_srcu, id);
+}
+EXPORT_SYMBOL_GPL(dax_read_unlock);
+
+/**
+ * struct dax_device - anchor object for dax services
+ * @inode: core vfs
+ * @cdev: optional character interface for "device dax"
+ * @host: optional name for lookups where the device path is not available
+ * @private: dax driver private data
+ * @alive: !alive + rcu grace period == no new operations / mappings
+ */
+struct dax_device {
+ struct hlist_node list;
+ struct inode inode;
+ struct cdev cdev;
+ const char *host;
+ void *private;
+ bool alive;
+ const struct dax_operations *ops;
+};
+
+/**
+ * dax_direct_access() - translate a device pgoff to an absolute pfn
+ * @dax_dev: a dax_device instance representing the logical memory range
+ * @pgoff: offset in pages from the start of the device to translate
+ * @nr_pages: number of consecutive pages caller can handle relative to @pfn
+ * @kaddr: output parameter that returns a virtual address mapping of pfn
+ * @pfn: output parameter that returns an absolute pfn translation of @pgoff
+ *
+ * Return: negative errno if an error occurs, otherwise the number of
+ * pages accessible at the device relative @pgoff.
+ */
+long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn)
+{
+ long avail;
+
+ /*
+ * The device driver is allowed to sleep, in order to make the
+ * memory directly accessible.
+ */
+ might_sleep();
+
+ if (!dax_dev)
+ return -EOPNOTSUPP;
+
+ if (!dax_alive(dax_dev))
+ return -ENXIO;
+
+ if (nr_pages < 0)
+ return nr_pages;
+
+ avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
+ kaddr, pfn);
+ if (!avail)
+ return -ERANGE;
+ return min(avail, nr_pages);
+}
+EXPORT_SYMBOL_GPL(dax_direct_access);
+
+bool dax_alive(struct dax_device *dax_dev)
+{
+ lockdep_assert_held(&dax_srcu);
+ return dax_dev->alive;
+}
+EXPORT_SYMBOL_GPL(dax_alive);
+
+static int dax_host_hash(const char *host)
+{
+ return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
+}
+
+/*
+ * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
+ * that any fault handlers or operations that might have seen
+ * dax_alive(), have completed. Any operations that start after
+ * synchronize_srcu() has run will abort upon seeing !dax_alive().
+ */
+void kill_dax(struct dax_device *dax_dev)
+{
+ if (!dax_dev)
+ return;
+
+ dax_dev->alive = false;
+
+ synchronize_srcu(&dax_srcu);
+
+ spin_lock(&dax_host_lock);
+ hlist_del_init(&dax_dev->list);
+ spin_unlock(&dax_host_lock);
+
+ dax_dev->private = NULL;
+}
+EXPORT_SYMBOL_GPL(kill_dax);
+
+static struct inode *dax_alloc_inode(struct super_block *sb)
+{
+ struct dax_device *dax_dev;
+
+ dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
+ return &dax_dev->inode;
+}
+
+static struct dax_device *to_dax_dev(struct inode *inode)
+{
+ return container_of(inode, struct dax_device, inode);
+}
+
+static void dax_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ struct dax_device *dax_dev = to_dax_dev(inode);
+
+ kfree(dax_dev->host);
+ dax_dev->host = NULL;
+ ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+ kmem_cache_free(dax_cache, dax_dev);
+}
+
+static void dax_destroy_inode(struct inode *inode)
+{
+ struct dax_device *dax_dev = to_dax_dev(inode);
+
+ WARN_ONCE(dax_dev->alive,
+ "kill_dax() must be called before final iput()\n");
+ call_rcu(&inode->i_rcu, dax_i_callback);
+}
+
+static const struct super_operations dax_sops = {
+ .statfs = simple_statfs,
+ .alloc_inode = dax_alloc_inode,
+ .destroy_inode = dax_destroy_inode,
+ .drop_inode = generic_delete_inode,
+};
+
+static struct dentry *dax_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
+}
+
+static struct file_system_type dax_fs_type = {
+ .name = "dax",
+ .mount = dax_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static int dax_test(struct inode *inode, void *data)
+{
+ dev_t devt = *(dev_t *) data;
+
+ return inode->i_rdev == devt;
+}
+
+static int dax_set(struct inode *inode, void *data)
+{
+ dev_t devt = *(dev_t *) data;
+
+ inode->i_rdev = devt;
+ return 0;
+}
+
+static struct dax_device *dax_dev_get(dev_t devt)
+{
+ struct dax_device *dax_dev;
+ struct inode *inode;
+
+ inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
+ dax_test, dax_set, &devt);
+
+ if (!inode)
+ return NULL;
+
+ dax_dev = to_dax_dev(inode);
+ if (inode->i_state & I_NEW) {
+ dax_dev->alive = true;
+ inode->i_cdev = &dax_dev->cdev;
+ inode->i_mode = S_IFCHR;
+ inode->i_flags = S_DAX;
+ mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+ unlock_new_inode(inode);
+ }
+
+ return dax_dev;
+}
+
+static void dax_add_host(struct dax_device *dax_dev, const char *host)
+{
+ int hash;
+
+ /*
+ * Unconditionally init dax_dev since it's coming from a
+ * non-zeroed slab cache
+ */
+ INIT_HLIST_NODE(&dax_dev->list);
+ dax_dev->host = host;
+ if (!host)
+ return;
+
+ hash = dax_host_hash(host);
+ spin_lock(&dax_host_lock);
+ hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
+ spin_unlock(&dax_host_lock);
+}
+
+struct dax_device *alloc_dax(void *private, const char *__host,
+ const struct dax_operations *ops)
+{
+ struct dax_device *dax_dev;
+ const char *host;
+ dev_t devt;
+ int minor;
+
+ host = kstrdup(__host, GFP_KERNEL);
+ if (__host && !host)
+ return NULL;
+
+ minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL);
+ if (minor < 0)
+ goto err_minor;
+
+ devt = MKDEV(MAJOR(dax_devt), minor);
+ dax_dev = dax_dev_get(devt);
+ if (!dax_dev)
+ goto err_dev;
+
+ dax_add_host(dax_dev, host);
+ dax_dev->ops = ops;
+ dax_dev->private = private;
+ return dax_dev;
+
+ err_dev:
+ ida_simple_remove(&dax_minor_ida, minor);
+ err_minor:
+ kfree(host);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_dax);
+
+void put_dax(struct dax_device *dax_dev)
+{
+ if (!dax_dev)
+ return;
+ iput(&dax_dev->inode);
+}
+EXPORT_SYMBOL_GPL(put_dax);
+
+/**
+ * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
+ * @host: alternate name for the device registered by a dax driver
+ */
+struct dax_device *dax_get_by_host(const char *host)
+{
+ struct dax_device *dax_dev, *found = NULL;
+ int hash, id;
+
+ if (!host)
+ return NULL;
+
+ hash = dax_host_hash(host);
+
+ id = dax_read_lock();
+ spin_lock(&dax_host_lock);
+ hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
+ if (!dax_alive(dax_dev)
+ || strcmp(host, dax_dev->host) != 0)
+ continue;
+
+ if (igrab(&dax_dev->inode))
+ found = dax_dev;
+ break;
+ }
+ spin_unlock(&dax_host_lock);
+ dax_read_unlock(id);
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(dax_get_by_host);
+
+/**
+ * inode_dax: convert a public inode into its dax_dev
+ * @inode: An inode with i_cdev pointing to a dax_dev
+ *
+ * Note this is not equivalent to to_dax_dev() which is for private
+ * internal use where we know the inode filesystem type == dax_fs_type.
+ */
+struct dax_device *inode_dax(struct inode *inode)
+{
+ struct cdev *cdev = inode->i_cdev;
+
+ return container_of(cdev, struct dax_device, cdev);
+}
+EXPORT_SYMBOL_GPL(inode_dax);
+
+struct inode *dax_inode(struct dax_device *dax_dev)
+{
+ return &dax_dev->inode;
+}
+EXPORT_SYMBOL_GPL(dax_inode);
+
+void *dax_get_private(struct dax_device *dax_dev)
+{
+ return dax_dev->private;
+}
+EXPORT_SYMBOL_GPL(dax_get_private);
+
+static void init_once(void *_dax_dev)
+{
+ struct dax_device *dax_dev = _dax_dev;
+ struct inode *inode = &dax_dev->inode;
+
+ inode_init_once(inode);
+}
+
+static int __dax_fs_init(void)
+{
+ int rc;
+
+ dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
+ (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ init_once);
+ if (!dax_cache)
+ return -ENOMEM;
+
+ rc = register_filesystem(&dax_fs_type);
+ if (rc)
+ goto err_register_fs;
+
+ dax_mnt = kern_mount(&dax_fs_type);
+ if (IS_ERR(dax_mnt)) {
+ rc = PTR_ERR(dax_mnt);
+ goto err_mount;
+ }
+ dax_superblock = dax_mnt->mnt_sb;
+
+ return 0;
+
+ err_mount:
+ unregister_filesystem(&dax_fs_type);
+ err_register_fs:
+ kmem_cache_destroy(dax_cache);
+
+ return rc;
+}
+
+static void __dax_fs_exit(void)
+{
+ kern_unmount(dax_mnt);
+ unregister_filesystem(&dax_fs_type);
+ kmem_cache_destroy(dax_cache);
+}
+
+static int __init dax_fs_init(void)
+{
+ int rc;
+
+ rc = __dax_fs_init();
+ if (rc)
+ return rc;
+
+ nr_dax = max(nr_dax, 256);
+ rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
+ if (rc)
+ __dax_fs_exit();
+ return rc;
+}
+
+static void __exit dax_fs_exit(void)
+{
+ unregister_chrdev_region(dax_devt, nr_dax);
+ ida_destroy(&dax_minor_ida);
+ __dax_fs_exit();
+}
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+subsys_initcall(dax_fs_init);
+module_exit(dax_fs_exit);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d01d59812cf3..24e8597b2c3e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -514,12 +514,12 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config TI_CPPI41
- tristate "AM33xx CPPI41 DMA support"
- depends on ARCH_OMAP
+ tristate "CPPI 4.1 DMA support"
+ depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
select DMA_ENGINE
help
The Communications Port Programming Interface (CPPI) 4.1 DMA engine
- is currently used by the USB driver on AM335x platforms.
+ is currently used by the USB driver on AM335x and DA8xx platforms.
config TI_DMA_CROSSBAR
bool
@@ -608,6 +608,7 @@ config ASYNC_TX_DMA
config DMATEST
tristate "DMA Test client"
depends on DMA_ENGINE
+ select DMA_ENGINE_RAID
help
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 0b7c6ce629a6..6bb8813ca275 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -106,6 +106,7 @@ struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
+ * @config_offset: offset to the configuration register
* @channels: the number of channels available in this variant
* @signals: the number of request signals available from the hardware
* @dualmaster: whether this version supports dual AHB masters or not.
@@ -145,6 +146,8 @@ struct pl08x_bus_data {
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
+ * @base: memory base address for this physical channel
+ * @reg_config: configuration address for this physical channel
* @lock: a lock to use when altering an instance of this struct
* @serving: the virtual channel currently being served by this physical
* channel
@@ -203,7 +206,7 @@ struct pl08x_txd {
};
/**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel
* states
* @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
@@ -226,9 +229,8 @@ enum pl08x_dma_chan_state {
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
- * @runtime_addr: address for RX/TX according to the runtime config
+ * @cfg: slave configuration
* @at: active transaction on this channel
- * @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
@@ -262,7 +264,7 @@ struct pl08x_dma_chan {
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
- * @lock: a spinlock for this struct
+ * @lli_words: how many words are used in each LLI item for this variant
*/
struct pl08x_driver_data {
struct dma_device slave;
@@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
- while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
+ while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id))
cpu_relax();
/* Do not access config register until channel shows as inactive */
@@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
writel(val, ch->reg_config);
- writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
- writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
+ writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR);
+ writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
}
static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return IRQ_NONE;
for (i = 0; i < pl08x->vd->channels; i++) {
- if (((1 << i) & err) || ((1 << i) & tc)) {
+ if ((BIT(i) & err) || (BIT(i) & tc)) {
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
@@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
}
spin_unlock(&plchan->vc.lock);
- mask |= (1 << i);
+ mask |= BIT(i);
}
}
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d74cee077842..f7e965f63274 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -68,7 +68,6 @@
#define QMGR_MEMCTRL_IDX_SH 16
#define QMGR_MEMCTRL_DESC_SH 8
-#define QMGR_NUM_PEND 5
#define QMGR_PEND(x) (0x90 + (x) * 4)
#define QMGR_PENDING_SLOT_Q(x) (x / 32)
@@ -131,7 +130,6 @@ struct cppi41_dd {
u32 first_td_desc;
struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
- void __iomem *usbss_mem;
void __iomem *ctrl_mem;
void __iomem *sched_mem;
void __iomem *qmgr_mem;
@@ -139,6 +137,10 @@ struct cppi41_dd {
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+ u16 first_completion_queue;
+ u16 qmgr_num_pend;
+ u32 n_chans;
+ u8 platform;
struct list_head pending; /* Pending queued transfers */
spinlock_t lock; /* Lock for pending list */
@@ -149,8 +151,7 @@ struct cppi41_dd {
bool is_suspended;
};
-#define FIST_COMPLETION_QUEUE 93
-static struct chan_queues usb_queues_tx[] = {
+static struct chan_queues am335x_usb_queues_tx[] = {
/* USB0 ENDP 1 */
[ 0] = { .submit = 32, .complete = 93},
[ 1] = { .submit = 34, .complete = 94},
@@ -186,7 +187,7 @@ static struct chan_queues usb_queues_tx[] = {
[29] = { .submit = 90, .complete = 139},
};
-static const struct chan_queues usb_queues_rx[] = {
+static const struct chan_queues am335x_usb_queues_rx[] = {
/* USB0 ENDP 1 */
[ 0] = { .submit = 1, .complete = 109},
[ 1] = { .submit = 2, .complete = 110},
@@ -222,11 +223,26 @@ static const struct chan_queues usb_queues_rx[] = {
[29] = { .submit = 30, .complete = 155},
};
+static const struct chan_queues da8xx_usb_queues_tx[] = {
+ [0] = { .submit = 16, .complete = 24},
+ [1] = { .submit = 18, .complete = 24},
+ [2] = { .submit = 20, .complete = 24},
+ [3] = { .submit = 22, .complete = 24},
+};
+
+static const struct chan_queues da8xx_usb_queues_rx[] = {
+ [0] = { .submit = 1, .complete = 26},
+ [1] = { .submit = 3, .complete = 26},
+ [2] = { .submit = 5, .complete = 26},
+ [3] = { .submit = 7, .complete = 26},
+};
+
struct cppi_glue_infos {
- irqreturn_t (*isr)(int irq, void *data);
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
+ u16 first_completion_queue;
+ u16 qmgr_num_pend;
};
static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
@@ -285,19 +301,21 @@ static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
static irqreturn_t cppi41_irq(int irq, void *data)
{
struct cppi41_dd *cdd = data;
+ u16 first_completion_queue = cdd->first_completion_queue;
+ u16 qmgr_num_pend = cdd->qmgr_num_pend;
struct cppi41_channel *c;
int i;
- for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
+ for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
i++) {
u32 val;
u32 q_num;
val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
- if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
+ if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
u32 mask;
/* set corresponding bit for completetion Q 93 */
- mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
+ mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
/* not set all bits for queues less than Q 93 */
mask--;
/* now invert and keep only Q 93+ set */
@@ -402,11 +420,9 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
struct cppi41_channel *c = to_cpp41_chan(chan);
enum dma_status ret;
- /* lock */
ret = dma_cookie_status(chan, cookie, txstate);
- if (txstate && ret == DMA_COMPLETE)
- txstate->residue = c->residue;
- /* unlock */
+
+ dma_set_residue(txstate, c->residue);
return ret;
}
@@ -630,7 +646,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
if (!c->is_tx) {
reg |= GCR_STARV_RETRY;
reg |= GCR_DESC_TYPE_HOST;
- reg |= c->q_comp_num;
+ reg |= cdd->td_queue.complete;
}
reg |= GCR_TEARDOWN;
cppi_writel(reg, c->gcr_reg);
@@ -641,7 +657,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
if (!c->td_seen || !c->td_desc_seen) {
desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
- if (!desc_phys)
+ if (!desc_phys && c->is_tx)
desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
if (desc_phys == c->desc_phys) {
@@ -723,39 +739,24 @@ static int cppi41_stop_chan(struct dma_chan *chan)
return 0;
}
-static void cleanup_chans(struct cppi41_dd *cdd)
-{
- while (!list_empty(&cdd->ddev.channels)) {
- struct cppi41_channel *cchan;
-
- cchan = list_first_entry(&cdd->ddev.channels,
- struct cppi41_channel, chan.device_node);
- list_del(&cchan->chan.device_node);
- kfree(cchan);
- }
-}
-
static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
- struct cppi41_channel *cchan;
+ struct cppi41_channel *cchan, *chans;
int i;
- int ret;
- u32 n_chans;
+ u32 n_chans = cdd->n_chans;
- ret = of_property_read_u32(dev->of_node, "#dma-channels",
- &n_chans);
- if (ret)
- return ret;
/*
* The channels can only be used as TX or as RX. So we add twice
* that much dma channels because USB can only do RX or TX.
*/
n_chans *= 2;
+ chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
for (i = 0; i < n_chans; i++) {
- cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
- if (!cchan)
- goto err;
+ cchan = &chans[i];
cchan->cdd = cdd;
if (i & 1) {
@@ -775,9 +776,6 @@ static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
cdd->first_td_desc = n_chans;
return 0;
-err:
- cleanup_chans(cdd);
- return -ENOMEM;
}
static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
@@ -859,7 +857,7 @@ static void init_sched(struct cppi41_dd *cdd)
word = 0;
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
- for (ch = 0; ch < 15 * 2; ch += 2) {
+ for (ch = 0; ch < cdd->n_chans; ch += 2) {
reg = SCHED_ENTRY0_CHAN(ch);
reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
@@ -869,7 +867,7 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
word++;
}
- reg = 15 * 2 * 2 - 1;
+ reg = cdd->n_chans * 2 - 1;
reg |= DMA_SCHED_CTRL_EN;
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
@@ -885,7 +883,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
return -ENOMEM;
cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
- cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+ cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
ret = init_descs(dev, cdd);
@@ -894,6 +892,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
init_sched(cdd);
+
return 0;
err_td:
deinit_cppi41(dev, cdd);
@@ -933,8 +932,9 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
else
queues = cdd->queues_rx;
- BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
- if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
+ BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
+ ARRAY_SIZE(am335x_usb_queues_tx));
+ if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
@@ -962,15 +962,25 @@ static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
&dma_spec->args[0]);
}
-static const struct cppi_glue_infos usb_infos = {
- .isr = cppi41_irq,
- .queues_rx = usb_queues_rx,
- .queues_tx = usb_queues_tx,
+static const struct cppi_glue_infos am335x_usb_infos = {
+ .queues_rx = am335x_usb_queues_rx,
+ .queues_tx = am335x_usb_queues_tx,
.td_queue = { .submit = 31, .complete = 0 },
+ .first_completion_queue = 93,
+ .qmgr_num_pend = 5,
+};
+
+static const struct cppi_glue_infos da8xx_usb_infos = {
+ .queues_rx = da8xx_usb_queues_rx,
+ .queues_tx = da8xx_usb_queues_tx,
+ .td_queue = { .submit = 31, .complete = 0 },
+ .first_completion_queue = 24,
+ .qmgr_num_pend = 2,
};
static const struct of_device_id cppi41_dma_ids[] = {
- { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
+ { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
+ { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
{},
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
@@ -995,6 +1005,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
+ struct resource *mem;
+ int index;
int irq;
int ret;
@@ -1021,19 +1033,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
- cdd->usbss_mem = of_iomap(dev->of_node, 0);
- cdd->ctrl_mem = of_iomap(dev->of_node, 1);
- cdd->sched_mem = of_iomap(dev->of_node, 2);
- cdd->qmgr_mem = of_iomap(dev->of_node, 3);
+ index = of_property_match_string(dev->of_node,
+ "reg-names", "controller");
+ if (index < 0)
+ return index;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(cdd->ctrl_mem))
+ return PTR_ERR(cdd->ctrl_mem);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
+ cdd->sched_mem = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(cdd->sched_mem))
+ return PTR_ERR(cdd->sched_mem);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
+ cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(cdd->qmgr_mem))
+ return PTR_ERR(cdd->qmgr_mem);
+
spin_lock_init(&cdd->lock);
INIT_LIST_HEAD(&cdd->pending);
platform_set_drvdata(pdev, cdd);
- if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
- !cdd->qmgr_mem)
- return -ENXIO;
-
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
@@ -1044,6 +1068,13 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->queues_rx = glue_info->queues_rx;
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
+ cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
+ cdd->first_completion_queue = glue_info->first_completion_queue;
+
+ ret = of_property_read_u32(dev->of_node,
+ "#dma-channels", &cdd->n_chans);
+ if (ret)
+ goto err_get_n_chans;
ret = init_cppi41(dev, cdd);
if (ret)
@@ -1056,18 +1087,18 @@ static int cppi41_dma_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
ret = -EINVAL;
- goto err_irq;
+ goto err_chans;
}
- ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
+ ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
dev_name(dev), cdd);
if (ret)
- goto err_irq;
+ goto err_chans;
cdd->irq = irq;
ret = dma_async_device_register(&cdd->ddev);
if (ret)
- goto err_dma_reg;
+ goto err_chans;
ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
@@ -1080,20 +1111,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
return 0;
err_of:
dma_async_device_unregister(&cdd->ddev);
-err_dma_reg:
-err_irq:
- cleanup_chans(cdd);
err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_dont_use_autosuspend(dev);
+err_get_n_chans:
err_get_sync:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- iounmap(cdd->usbss_mem);
- iounmap(cdd->ctrl_mem);
- iounmap(cdd->sched_mem);
- iounmap(cdd->qmgr_mem);
return ret;
}
@@ -1110,12 +1135,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&cdd->ddev);
devm_free_irq(&pdev->dev, cdd->irq, cdd);
- cleanup_chans(cdd);
deinit_cppi41(&pdev->dev, cdd);
- iounmap(cdd->usbss_mem);
- iounmap(cdd->ctrl_mem);
- iounmap(cdd->sched_mem);
- iounmap(cdd->qmgr_mem);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 54d581d407aa..a07ef3d6b3ec 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -535,6 +535,13 @@ static int dmatest_func(void *data)
total_tests++;
+ /* Check if buffer count fits into map count variable (u8) */
+ if ((src_cnt + dst_cnt) >= 255) {
+ pr_err("too many buffers (%d of 255 supported)\n",
+ src_cnt + dst_cnt);
+ break;
+ }
+
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
@@ -585,7 +592,7 @@ static int dmatest_func(void *data)
for (i = 0; i < src_cnt; i++) {
void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
+ unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
@@ -605,7 +612,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) {
void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
+ unsigned long pg_off = offset_in_page(buf);
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d1651a50c349..085993cb2ccc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
+static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+{
+ sdma_disable_channel(chan);
+
+ /*
+ * According to NXP R&D team a delay of one BD SDMA cost time
+ * (maximum is 1ms) should be added after disable of the channel
+ * bit, to ensure SDMA core has really been stopped after SDMA
+ * clients call .device_terminate_all.
+ */
+ mdelay(1);
+
+ return 0;
+}
+
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel;
+ sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
dma_set_max_seg_size(sdma->dma_device.dev, 65535);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index cc5259b881d4..6ad4384b3fa8 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
dma_cookie_init(&ioat_chan->dma_chan);
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan;
- init_timer(&ioat_chan->timer);
- ioat_chan->timer.function = ioat_timer_event;
- ioat_chan->timer.data = data;
+ setup_timer(&ioat_chan->timer, ioat_timer_event, data);
tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0cb951b743a6..25bc5b103aa2 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
- (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+ offset_in_page(src), PAGE_SIZE,
DMA_TO_DEVICE);
unmap->addr[0] = src_dma;
@@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
unmap->to_cnt = 1;
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
- (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+ offset_in_page(dest), PAGE_SIZE,
DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma;
@@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev)
int irq;
cd = &pdata->channels[i];
- if (!cd) {
- ret = -ENODEV;
- goto err_channel_add;
- }
-
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f37f4978dabb..8b0da7fa520d 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -22,7 +22,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
-#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/of_dma.h>
@@ -2077,18 +2076,6 @@ static void pl330_tasklet(unsigned long data)
}
}
-bool pl330_filter(struct dma_chan *chan, void *param)
-{
- u8 *peri_id;
-
- if (chan->device->dev->driver != &pl330_driver.drv)
- return false;
-
- peri_id = chan->private;
- return *peri_id == (unsigned long)param;
-}
-EXPORT_SYMBOL(pl330_filter);
-
static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -2833,7 +2820,6 @@ static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct dma_pl330_platdata *pdat;
struct pl330_config *pcfg;
struct pl330_dmac *pl330;
struct dma_pl330_chan *pch, *_p;
@@ -2843,8 +2829,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int num_chan;
struct device_node *np = adev->dev.of_node;
- pdat = dev_get_platdata(&adev->dev);
-
ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -2857,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd = &pl330->ddma;
pd->dev = &adev->dev;
- pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ pl330->mcbufsz = 0;
/* get quirk */
for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
@@ -2901,10 +2885,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- if (pdat)
- num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
- else
- num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
+ num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
pl330->num_peripherals = num_chan;
@@ -2916,11 +2897,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
for (i = 0; i < num_chan; i++) {
pch = &pl330->peripherals[i];
- if (!adev->dev.of_node)
- pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
- else
- pch->chan.private = adev->dev.of_node;
+ pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->submitted_list);
INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list);
@@ -2933,15 +2911,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
list_add_tail(&pch->chan.device_node, &pd->channels);
}
- if (pdat) {
- pd->cap_mask = pdat->cap_mask;
- } else {
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- if (pcfg->num_peri) {
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- dma_cap_set(DMA_CYCLIC, pd->cap_mask);
- dma_cap_set(DMA_PRIVATE, pd->cap_mask);
- }
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ if (pcfg->num_peri) {
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ dma_cap_set(DMA_PRIVATE, pd->cap_mask);
}
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 3c982c96b4b7..5072a7d306d4 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -865,6 +865,20 @@ bailout:
return rc;
}
+static void hidma_shutdown(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+ dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (hidma_ll_disable(dmadev->lldev))
+ dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+
+}
+
static int hidma_remove(struct platform_device *pdev)
{
struct hidma_dev *dmadev = platform_get_drvdata(pdev);
@@ -908,6 +922,7 @@ MODULE_DEVICE_TABLE(of, hidma_match);
static struct platform_driver hidma_driver = {
.probe = hidma_probe,
.remove = hidma_remove,
+ .shutdown = hidma_shutdown,
.driver = {
.name = "hidma",
.of_match_table = hidma_match,
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 6645bdf0d151..1530a661518d 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -499,6 +499,9 @@ int hidma_ll_enable(struct hidma_lldev *lldev)
lldev->trch_state = HIDMA_CH_ENABLED;
lldev->evch_state = HIDMA_CH_ENABLED;
+ /* enable irqs */
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
return 0;
}
@@ -596,6 +599,9 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
lldev->trch_state = HIDMA_CH_SUSPENDED;
lldev->evch_state = HIDMA_CH_SUSPENDED;
+
+ /* disable interrupts */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
return 0;
}
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 48b22d5c8602..db41795fe42a 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
if (desc->hwdescs.use) {
- struct rcar_dmac_xfer_chunk *chunk;
+ struct rcar_dmac_xfer_chunk *chunk =
+ list_first_entry(&desc->chunks,
+ struct rcar_dmac_xfer_chunk, node);
dev_dbg(chan->chan.device->dev,
"chan%u: queue desc %p: %u@%pad\n",
chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
+ chunk->src_addr >> 32);
+ rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
+ chunk->dst_addr >> 32);
rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
desc->hwdescs.dma >> 32);
#endif
@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
* should. Initialize it manually with the destination address
* of the first chunk.
*/
- chunk = list_first_entry(&desc->chunks,
- struct rcar_dmac_xfer_chunk, node);
rcar_dmac_chan_write(chan, RCAR_DMADAR,
chunk->dst_addr & 0xffffffff);
@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
unsigned int nchunks = 0;
unsigned int max_chunk_size;
unsigned int full_size = 0;
- bool highmem = false;
+ bool cross_boundary = false;
unsigned int i;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u32 high_dev_addr;
+ u32 high_mem_addr;
+#endif
desc = rcar_dmac_desc_get(chan);
if (!desc)
@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
full_size += len;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (i == 0) {
+ high_dev_addr = dev_addr >> 32;
+ high_mem_addr = mem_addr >> 32;
+ }
+
+ if ((dev_addr >> 32 != high_dev_addr) ||
+ (mem_addr >> 32 != high_mem_addr))
+ cross_boundary = true;
+#endif
while (len) {
unsigned int size = min(len, max_chunk_size);
@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Prevent individual transfers from crossing 4GB
* boundaries.
*/
- if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
+ if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
- if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
+ cross_boundary = true;
+ }
+ if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
-
- /*
- * Check if either of the source or destination address
- * can't be expressed in 32 bits. If so we can't use
- * hardware descriptor lists.
- */
- if (dev_addr >> 32 || mem_addr >> 32)
- highmem = true;
+ cross_boundary = true;
+ }
#endif
chunk = rcar_dmac_xfer_chunk_get(chan);
@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Use hardware descriptor lists if possible when more than one chunk
* needs to be transferred (otherwise they don't make much sense).
*
- * The highmem check currently covers the whole transfer. As an
- * optimization we could use descriptor lists for consecutive lowmem
- * chunks and direct manual mode for highmem chunks. Whether the
- * performance improvement would be significant enough compared to the
- * additional complexity remains to be investigated.
+ * Source/Destination address should be located in same 4GiB region
+ * in the 40bit address space when it uses Hardware descriptor,
+ * and cross_boundary is checking it.
*/
- desc->hwdescs.use = !highmem && nchunks > 1;
+ desc->hwdescs.use = !cross_boundary && nchunks > 1;
if (desc->hwdescs.use) {
if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
desc->hwdescs.use = false;
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 49f86cabcfec..786fc8fcc38e 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
c = dma_get_slave_channel(&chan->vchan.chan);
if (!c) {
- dev_err(dev, "No more channel avalaible\n");
+ dev_err(dev, "No more channels available\n");
return NULL;
}
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 57aa227bfadb..f4ed3f17607c 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
}
spin_lock_irqsave(&priv->lock, flags);
- for_each_clear_bit_from(i, &priv->pchans_used, max) {
+ for_each_clear_bit_from(i, priv->pchans_used, max) {
pchan = &pchans[i];
pchan->vchan = vchan;
set_bit(i, priv->pchans_used);
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index e47fc9b0944f..545e97279083 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
static void vchan_complete(unsigned long arg)
{
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
- struct virt_dma_desc *vd;
+ struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb;
LIST_HEAD(head);
@@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_callback_invoke(&cb, NULL);
- while (!list_empty(&head)) {
- vd = list_first_entry(&head, struct virt_dma_desc, node);
+ list_for_each_entry_safe(vd, _vd, &head, node) {
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
@@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg)
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
{
- while (!list_empty(head)) {
- struct virt_dma_desc *vd = list_first_entry(head,
- struct virt_dma_desc, node);
+ struct virt_dma_desc *vd, *_vd;
+
+ list_for_each_entry_safe(vd, _vd, head, node) {
if (dmaengine_desc_test_reuse(&vd->tx)) {
list_move_tail(&vd->node, &vc->desc_allocated);
} else {
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8288fe4d17c3..8cf87b1a284b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -331,6 +331,7 @@ struct xilinx_dma_tx_descriptor {
* @seg_v: Statically allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @start_transfer: Differentiate b/w DMA IP's transfer
+ * @stop_transfer: Differentiate b/w DMA IP's quiesce
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -361,6 +362,7 @@ struct xilinx_dma_chan {
struct xilinx_axidma_tx_segment *seg_v;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
void (*start_transfer)(struct xilinx_dma_chan *chan);
+ int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
};
@@ -946,26 +948,32 @@ static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
}
/**
- * xilinx_dma_halt - Halt DMA channel
+ * xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
*/
-static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
+static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{
- int err;
u32 val;
dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */
- err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
- (val & XILINX_DMA_DMASR_HALTED), 0,
- XILINX_DMA_LOOP_COUNT);
+ return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ val & XILINX_DMA_DMASR_HALTED, 0,
+ XILINX_DMA_LOOP_COUNT);
+}
- if (err) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
- chan->err = true;
- }
+/**
+ * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
+ * @chan: Driver specific DMA channel
+ */
+static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
+{
+ u32 val;
+
+ return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ val & XILINX_DMA_DMASR_IDLE, 0,
+ XILINX_DMA_LOOP_COUNT);
}
/**
@@ -1653,7 +1661,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_cdma_tx_segment *segment, *prev;
+ struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw;
if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
@@ -1680,21 +1688,11 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
hw->dest_addr_msb = upper_32_bits(dma_dst);
}
- /* Fill the previous next descriptor with current */
- prev = list_last_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
-
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
- prev = segment;
-
- /* Link the last hardware descriptor with the first. */
- segment = list_first_entry(&desc->segments,
- struct xilinx_cdma_tx_segment, node);
desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
+ hw->next_desc = segment->phys;
return &desc->async_tx;
@@ -2003,12 +2001,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
u32 reg;
+ int err;
if (chan->cyclic)
xilinx_dma_chan_reset(chan);
- /* Halt the DMA engine */
- xilinx_dma_halt(chan);
+ err = chan->stop_transfer(chan);
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
@@ -2397,12 +2400,16 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return err;
}
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->start_transfer = xilinx_dma_start_transfer;
- else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->start_transfer = xilinx_cdma_start_transfer;
- else
+ chan->stop_transfer = xilinx_cdma_stop_transfer;
+ } else {
chan->start_transfer = xilinx_vdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
+ }
/* Initialize the tasklet */
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index fc09c76248b4..32f2dc8e4702 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -52,6 +52,13 @@ config EXTCON_INTEL_INT3496
This ACPI device is typically found on Intel Baytrail or Cherrytrail
based tablets, or other Baytrail / Cherrytrail devices.
+config EXTCON_INTEL_CHT_WC
+ tristate "Intel Cherrytrail Whiskey Cove PMIC extcon driver"
+ depends on INTEL_SOC_PMIC_CHTWC
+ help
+ Say Y here to enable extcon support for charger detection / control
+ on the Intel Cherrytrail Whiskey Cove PMIC.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 237ac3f953c2..ecfa95804427 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
+obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index ed78b7c26627..e2d78cd7030d 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -51,6 +51,9 @@
#define HPDET_DEBOUNCE 500
#define DEFAULT_MICD_TIMEOUT 2000
+#define ARIZONA_HPDET_WAIT_COUNT 15
+#define ARIZONA_HPDET_WAIT_DELAY_MS 20
+
#define QUICK_HEADPHONE_MAX_OHM 3
#define MICROPHONE_MIN_OHM 1257
#define MICROPHONE_MAX_OHM 30000
@@ -1049,6 +1052,40 @@ static void arizona_hpdet_work(struct work_struct *work)
mutex_unlock(&info->lock);
}
+static int arizona_hpdet_wait(struct arizona_extcon_info *info)
+{
+ struct arizona *arizona = info->arizona;
+ unsigned int val;
+ int i, ret;
+
+ for (i = 0; i < ARIZONA_HPDET_WAIT_COUNT; i++) {
+ ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2,
+ &val);
+ if (ret) {
+ dev_err(arizona->dev,
+ "Failed to read HPDET state: %d\n", ret);
+ return ret;
+ }
+
+ switch (info->hpdet_ip_version) {
+ case 0:
+ if (val & ARIZONA_HP_DONE)
+ return 0;
+ break;
+ default:
+ if (val & ARIZONA_HP_DONE_B)
+ return 0;
+ break;
+ }
+
+ msleep(ARIZONA_HPDET_WAIT_DELAY_MS);
+ }
+
+ dev_warn(arizona->dev, "HPDET did not appear to complete\n");
+
+ return -ETIMEDOUT;
+}
+
static irqreturn_t arizona_jackdet(int irq, void *data)
{
struct arizona_extcon_info *info = data;
@@ -1155,6 +1192,15 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
"Removal report failed: %d\n", ret);
}
+ /*
+ * If the jack was removed during a headphone detection we
+ * need to wait for the headphone detection to finish, as
+ * it can not be aborted. We don't want to be able to start
+ * a new headphone detection from a fresh insert until this
+ * one is finished.
+ */
+ arizona_hpdet_wait(info);
+
regmap_update_bits(arizona->regmap,
ARIZONA_JACK_DETECT_DEBOUNCE,
ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
new file mode 100644
index 000000000000..91a0023074af
--- /dev/null
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -0,0 +1,395 @@
+/*
+ * Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
+ * Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CHT_WC_PHYCTRL 0x5e07
+
+#define CHT_WC_CHGRCTRL0 0x5e16
+#define CHT_WC_CHGRCTRL0_CHGRRESET BIT(0)
+#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1)
+#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2)
+#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3)
+#define CHT_WC_CHGRCTRL0_TTLCK_MASK BIT(4)
+#define CHT_WC_CHGRCTRL0_CCSM_OFF_MASK BIT(5)
+#define CHT_WC_CHGRCTRL0_DBPOFF_MASK BIT(6)
+#define CHT_WC_CHGRCTRL0_WDT_NOKICK BIT(7)
+
+#define CHT_WC_CHGRCTRL1 0x5e17
+
+#define CHT_WC_USBSRC 0x5e29
+#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0)
+#define CHT_WC_USBSRC_STS_SUCCESS 2
+#define CHT_WC_USBSRC_STS_FAIL 3
+#define CHT_WC_USBSRC_TYPE_SHIFT 2
+#define CHT_WC_USBSRC_TYPE_MASK GENMASK(5, 2)
+#define CHT_WC_USBSRC_TYPE_NONE 0
+#define CHT_WC_USBSRC_TYPE_SDP 1
+#define CHT_WC_USBSRC_TYPE_DCP 2
+#define CHT_WC_USBSRC_TYPE_CDP 3
+#define CHT_WC_USBSRC_TYPE_ACA 4
+#define CHT_WC_USBSRC_TYPE_SE1 5
+#define CHT_WC_USBSRC_TYPE_MHL 6
+#define CHT_WC_USBSRC_TYPE_FLOAT_DP_DN 7
+#define CHT_WC_USBSRC_TYPE_OTHER 8
+#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
+
+#define CHT_WC_PWRSRC_IRQ 0x6e03
+#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f
+#define CHT_WC_PWRSRC_STS 0x6e1e
+#define CHT_WC_PWRSRC_VBUS BIT(0)
+#define CHT_WC_PWRSRC_DC BIT(1)
+#define CHT_WC_PWRSRC_BAT BIT(2)
+#define CHT_WC_PWRSRC_ID_GND BIT(3)
+#define CHT_WC_PWRSRC_ID_FLOAT BIT(4)
+
+#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
+#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
+
+enum cht_wc_usb_id {
+ USB_ID_OTG,
+ USB_ID_GND,
+ USB_ID_FLOAT,
+ USB_RID_A,
+ USB_RID_B,
+ USB_RID_C,
+};
+
+enum cht_wc_mux_select {
+ MUX_SEL_PMIC = 0,
+ MUX_SEL_SOC,
+};
+
+static const unsigned int cht_wc_extcon_cables[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_NONE,
+};
+
+struct cht_wc_extcon_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct extcon_dev *edev;
+ unsigned int previous_cable;
+ bool usb_host;
+};
+
+static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
+{
+ if (pwrsrc_sts & CHT_WC_PWRSRC_ID_GND)
+ return USB_ID_GND;
+ if (pwrsrc_sts & CHT_WC_PWRSRC_ID_FLOAT)
+ return USB_ID_FLOAT;
+
+ /*
+ * Once we have iio support for the gpadc we should read the USBID
+ * gpadc channel here and determine ACA role based on that.
+ */
+ return USB_ID_FLOAT;
+}
+
+static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
+ bool ignore_errors)
+{
+ int ret, usbsrc, status;
+ unsigned long timeout;
+
+ /* Charger detection can take upto 600ms, wait 800ms max. */
+ timeout = jiffies + msecs_to_jiffies(800);
+ do {
+ ret = regmap_read(ext->regmap, CHT_WC_USBSRC, &usbsrc);
+ if (ret) {
+ dev_err(ext->dev, "Error reading usbsrc: %d\n", ret);
+ return ret;
+ }
+
+ status = usbsrc & CHT_WC_USBSRC_STS_MASK;
+ if (status == CHT_WC_USBSRC_STS_SUCCESS ||
+ status == CHT_WC_USBSRC_STS_FAIL)
+ break;
+
+ msleep(50); /* Wait a bit before retrying */
+ } while (time_before(jiffies, timeout));
+
+ if (status != CHT_WC_USBSRC_STS_SUCCESS) {
+ if (ignore_errors)
+ return EXTCON_CHG_USB_SDP; /* Save fallback */
+
+ if (status == CHT_WC_USBSRC_STS_FAIL)
+ dev_warn(ext->dev, "Could not detect charger type\n");
+ else
+ dev_warn(ext->dev, "Timeout detecting charger type\n");
+ return EXTCON_CHG_USB_SDP; /* Save fallback */
+ }
+
+ usbsrc = (usbsrc & CHT_WC_USBSRC_TYPE_MASK) >> CHT_WC_USBSRC_TYPE_SHIFT;
+ switch (usbsrc) {
+ default:
+ dev_warn(ext->dev,
+ "Unhandled charger type %d, defaulting to SDP\n",
+ ret);
+ /* Fall through, treat as SDP */
+ case CHT_WC_USBSRC_TYPE_SDP:
+ case CHT_WC_USBSRC_TYPE_FLOAT_DP_DN:
+ case CHT_WC_USBSRC_TYPE_OTHER:
+ return EXTCON_CHG_USB_SDP;
+ case CHT_WC_USBSRC_TYPE_CDP:
+ return EXTCON_CHG_USB_CDP;
+ case CHT_WC_USBSRC_TYPE_DCP:
+ case CHT_WC_USBSRC_TYPE_DCP_EXTPHY:
+ case CHT_WC_USBSRC_TYPE_MHL: /* MHL2+ delivers upto 2A, treat as DCP */
+ return EXTCON_CHG_USB_DCP;
+ case CHT_WC_USBSRC_TYPE_ACA:
+ return EXTCON_CHG_USB_ACA;
+ }
+}
+
+static void cht_wc_extcon_set_phymux(struct cht_wc_extcon_data *ext, u8 state)
+{
+ int ret;
+
+ ret = regmap_write(ext->regmap, CHT_WC_PHYCTRL, state);
+ if (ret)
+ dev_err(ext->dev, "Error writing phyctrl: %d\n", ret);
+}
+
+static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
+ bool enable)
+{
+ int ret, val;
+
+ val = enable ? CHT_WC_VBUS_GPIO_CTLO_OUTPUT : 0;
+
+ /*
+ * The 5V boost converter is enabled through a gpio on the PMIC, since
+ * there currently is no gpio driver we access the gpio reg directly.
+ */
+ ret = regmap_update_bits(ext->regmap, CHT_WC_VBUS_GPIO_CTLO,
+ CHT_WC_VBUS_GPIO_CTLO_OUTPUT, val);
+ if (ret)
+ dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
+}
+
+/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */
+static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext,
+ unsigned int cable, bool state)
+{
+ extcon_set_state_sync(ext->edev, cable, state);
+ if (cable == EXTCON_CHG_USB_SDP)
+ extcon_set_state_sync(ext->edev, EXTCON_USB, state);
+}
+
+static void cht_wc_extcon_pwrsrc_event(struct cht_wc_extcon_data *ext)
+{
+ int ret, pwrsrc_sts, id;
+ unsigned int cable = EXTCON_NONE;
+ /* Ignore errors in host mode, as the 5v boost converter is on then */
+ bool ignore_get_charger_errors = ext->usb_host;
+
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
+ if (ret) {
+ dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
+ return;
+ }
+
+ id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
+ if (id == USB_ID_GND) {
+ /* The 5v boost causes a false VBUS / SDP detect, skip */
+ goto charger_det_done;
+ }
+
+ /* Plugged into a host/charger or not connected? */
+ if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) {
+ /* Route D+ and D- to PMIC for future charger detection */
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+ goto set_state;
+ }
+
+ ret = cht_wc_extcon_get_charger(ext, ignore_get_charger_errors);
+ if (ret >= 0)
+ cable = ret;
+
+charger_det_done:
+ /* Route D+ and D- to SoC for the host or gadget controller */
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_SOC);
+
+set_state:
+ if (cable != ext->previous_cable) {
+ cht_wc_extcon_set_state(ext, cable, true);
+ cht_wc_extcon_set_state(ext, ext->previous_cable, false);
+ ext->previous_cable = cable;
+ }
+
+ ext->usb_host = ((id == USB_ID_GND) || (id == USB_RID_A));
+ extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host);
+}
+
+static irqreturn_t cht_wc_extcon_isr(int irq, void *data)
+{
+ struct cht_wc_extcon_data *ext = data;
+ int ret, irqs;
+
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_IRQ, &irqs);
+ if (ret) {
+ dev_err(ext->dev, "Error reading irqs: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ cht_wc_extcon_pwrsrc_event(ext);
+
+ ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ, irqs);
+ if (ret) {
+ dev_err(ext->dev, "Error writing irqs: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
+{
+ int ret, mask, val;
+
+ mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF_MASK;
+ val = enable ? mask : 0;
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
+ if (ret)
+ dev_err(ext->dev, "Error setting sw control: %d\n", ret);
+
+ return ret;
+}
+
+static int cht_wc_extcon_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct cht_wc_extcon_data *ext;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ext = devm_kzalloc(&pdev->dev, sizeof(*ext), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->dev = &pdev->dev;
+ ext->regmap = pmic->regmap;
+ ext->previous_cable = EXTCON_NONE;
+
+ /* Initialize extcon device */
+ ext->edev = devm_extcon_dev_allocate(ext->dev, cht_wc_extcon_cables);
+ if (IS_ERR(ext->edev))
+ return PTR_ERR(ext->edev);
+
+ /*
+ * When a host-cable is detected the BIOS enables an external 5v boost
+ * converter to power connected devices there are 2 problems with this:
+ * 1) This gets seen by the external battery charger as a valid Vbus
+ * supply and it then tries to feed Vsys from this creating a
+ * feedback loop which causes aprox. 300 mA extra battery drain
+ * (and unless we drive the external-charger-disable pin high it
+ * also tries to charge the battery causing even more feedback).
+ * 2) This gets seen by the pwrsrc block as a SDP USB Vbus supply
+ * Since the external battery charger has its own 5v boost converter
+ * which does not have these issues, we simply turn the separate
+ * external 5v boost converter off and leave it off entirely.
+ */
+ cht_wc_extcon_set_5v_boost(ext, false);
+
+ /* Enable sw control */
+ ret = cht_wc_extcon_sw_control(ext, true);
+ if (ret)
+ return ret;
+
+ /* Register extcon device */
+ ret = devm_extcon_dev_register(ext->dev, ext->edev);
+ if (ret) {
+ dev_err(ext->dev, "Error registering extcon device: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /* Route D+ and D- to PMIC for initial charger detection */
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+
+ /* Get initial state */
+ cht_wc_extcon_pwrsrc_event(ext);
+
+ ret = devm_request_threaded_irq(ext->dev, irq, NULL, cht_wc_extcon_isr,
+ IRQF_ONESHOT, pdev->name, ext);
+ if (ret) {
+ dev_err(ext->dev, "Error requesting interrupt: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /* Unmask irqs */
+ ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK,
+ (int)~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_ID_GND |
+ CHT_WC_PWRSRC_ID_FLOAT));
+ if (ret) {
+ dev_err(ext->dev, "Error writing irq-mask: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ platform_set_drvdata(pdev, ext);
+
+ return 0;
+
+disable_sw_control:
+ cht_wc_extcon_sw_control(ext, false);
+ return ret;
+}
+
+static int cht_wc_extcon_remove(struct platform_device *pdev)
+{
+ struct cht_wc_extcon_data *ext = platform_get_drvdata(pdev);
+
+ cht_wc_extcon_sw_control(ext, false);
+
+ return 0;
+}
+
+static const struct platform_device_id cht_wc_extcon_table[] = {
+ { .name = "cht_wcove_pwrsrc" },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, cht_wc_extcon_table);
+
+static struct platform_driver cht_wc_extcon_driver = {
+ .probe = cht_wc_extcon_probe,
+ .remove = cht_wc_extcon_remove,
+ .id_table = cht_wc_extcon_table,
+ .driver = {
+ .name = "cht_wcove_pwrsrc",
+ },
+};
+module_platform_driver(cht_wc_extcon_driver);
+
+MODULE_DESCRIPTION("Intel Cherrytrail Whiskey Cove PMIC extcon driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index ca904e8b3235..2af4369a2d73 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -413,6 +413,12 @@ static int palmas_usb_resume(struct device *dev)
if (palmas_usb->enable_gpio_id_detection)
disable_irq_wake(palmas_usb->gpio_id_irq);
}
+
+ /* check if GPIO states changed while suspend/resume */
+ if (palmas_usb->enable_gpio_vbus_detection)
+ palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
+ palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
+
return 0;
};
#endif
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a5e1882b4ca6..9c925b05b7aa 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -26,7 +26,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
@@ -111,7 +110,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
struct usb_extcon_info *info;
int ret;
- if (!np && !ACPI_HANDLE(dev))
+ if (!np)
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -195,7 +194,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, true);
+ device_set_wakeup_capable(&pdev->dev, true);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -282,9 +281,8 @@ static int usb_extcon_resume(struct device *dev)
if (info->vbus_gpiod)
enable_irq(info->vbus_irq);
- if (!device_may_wakeup(dev))
- queue_delayed_work(system_power_efficient_wq,
- &info->wq_detcable, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &info->wq_detcable, 0);
return ret;
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e7750545469f..f422a78ba342 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -230,9 +230,6 @@ struct extcon_cable {
};
static struct class *extcon_class;
-#if defined(CONFIG_ANDROID)
-static struct class_compat *switch_class;
-#endif /* CONFIG_ANDROID */
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
@@ -380,7 +377,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < edev->max_supported; i++) {
count += sprintf(buf + count, "%s=%d\n",
extcon_info[edev->supported_cable[i]].name,
- !!(edev->state & (1 << i)));
+ !!(edev->state & BIT(i)));
}
return count;
@@ -1032,12 +1029,6 @@ static int create_extcon_class(void)
if (IS_ERR(extcon_class))
return PTR_ERR(extcon_class);
extcon_class->dev_groups = extcon_groups;
-
-#if defined(CONFIG_ANDROID)
- switch_class = class_compat_register("switch");
- if (WARN(!switch_class, "cannot allocate"))
- return -ENOMEM;
-#endif /* CONFIG_ANDROID */
}
return 0;
@@ -1259,10 +1250,6 @@ int extcon_dev_register(struct extcon_dev *edev)
put_device(&edev->dev);
goto err_dev;
}
-#if defined(CONFIG_ANDROID)
- if (switch_class)
- ret = class_compat_create_link(switch_class, &edev->dev, NULL);
-#endif /* CONFIG_ANDROID */
spin_lock_init(&edev->lock);
@@ -1350,10 +1337,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
kfree(edev->cables);
}
-#if defined(CONFIG_ANDROID)
- if (switch_class)
- class_compat_remove_link(switch_class, &edev->dev, NULL);
-#endif
put_device(&edev->dev);
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
@@ -1424,9 +1407,6 @@ module_init(extcon_class_init);
static void __exit extcon_class_exit(void)
{
-#if defined(CONFIG_ANDROID)
- class_compat_unregister(switch_class);
-#endif
class_destroy(extcon_class);
}
module_exit(extcon_class_exit);
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 0de83508f321..939d259ddf19 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -124,7 +124,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
node->port_count = port_count;
- atomic_set(&node->ref_count, 1);
+ refcount_set(&node->ref_count, 1);
INIT_LIST_HEAD(&node->link);
return node;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index e1480ff683d2..c07962ead5e4 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
struct device;
struct fw_card;
@@ -184,7 +184,7 @@ struct fw_node {
* local node to this node. */
u8 max_depth:4; /* Maximum depth to any leaf node */
u8 max_hops:4; /* Max hops in this sub tree */
- atomic_t ref_count;
+ refcount_t ref_count;
/* For serializing node topology into a list. */
struct list_head link;
@@ -197,14 +197,14 @@ struct fw_node {
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
- atomic_inc(&node->ref_count);
+ refcount_inc(&node->ref_count);
return node;
}
static inline void fw_node_put(struct fw_node *node)
{
- if (atomic_dec_and_test(&node->ref_count))
+ if (refcount_dec_and_test(&node->ref_count))
kfree(node);
}
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 9ad0b1934be9..f6cfc31d34c7 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -538,7 +538,7 @@ static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
msg->tx_len = tx_len;
msg->rx_buf = rx_buf;
msg->rx_len = rx_len;
- init_completion(&msg->done);
+ reinit_completion(&msg->done);
ret = mbox_send_message(scpi_chan->chan, msg);
if (ret < 0 || !rx_buf)
@@ -872,8 +872,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
return -ENOMEM;
ch->xfers = xfers;
- for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++)
+ for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) {
+ init_completion(&xfers->done);
list_add_tail(&xfers->node, &ch->xfers_list);
+ }
+
return 0;
}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 41f457be64e8..8830fa601e45 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,6 +16,22 @@
#include "efistub.h"
+#define EFI_DT_ADDR_CELLS_DEFAULT 2
+#define EFI_DT_SIZE_CELLS_DEFAULT 2
+
+static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+{
+ int offset;
+
+ offset = fdt_path_offset(fdt, "/");
+ /* Set the #address-cells and #size-cells values for an empty tree */
+
+ fdt_setprop_u32(fdt, offset, "#address-cells",
+ EFI_DT_ADDR_CELLS_DEFAULT);
+
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+}
+
static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
@@ -42,10 +58,18 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
}
- if (orig_fdt)
+ if (orig_fdt) {
status = fdt_open_into(orig_fdt, fdt, new_fdt_size);
- else
+ } else {
status = fdt_create_empty_tree(fdt, new_fdt_size);
+ if (status == 0) {
+ /*
+ * Any failure from the following function is non
+ * critical
+ */
+ fdt_update_cell_size(sys_table, fdt);
+ }
+ }
if (status != 0)
goto fdt_set_fail;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 29c8cdda82a1..f16b381a569c 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -1,18 +1,16 @@
-config GOOGLE_FIRMWARE
+menuconfig GOOGLE_FIRMWARE
bool "Google Firmware Drivers"
- depends on X86
default n
help
These firmware drivers are used by Google's servers. They are
only useful if you are working directly on one of their
proprietary servers. If in doubt, say "N".
-menu "Google Firmware Drivers"
- depends on GOOGLE_FIRMWARE
+if GOOGLE_FIRMWARE
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
- depends on ACPI && DMI && EFI
+ depends on X86 && ACPI && DMI && EFI
select EFI_VARS
help
Say Y here if you want to enable SMI callbacks for Google
@@ -20,12 +18,57 @@ config GOOGLE_SMI
clearing the EFI event log and reading and writing NVRAM
variables.
+config GOOGLE_COREBOOT_TABLE
+ tristate
+ depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF
+
+config GOOGLE_COREBOOT_TABLE_ACPI
+ tristate "Coreboot Table Access - ACPI"
+ depends on ACPI
+ select GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the coreboot_table module, which provides other
+ firmware modules to access to the coreboot table. The coreboot table
+ pointer is accessed through the ACPI "GOOGCB00" object.
+ If unsure say N.
+
+config GOOGLE_COREBOOT_TABLE_OF
+ tristate "Coreboot Table Access - Device Tree"
+ depends on OF
+ select GOOGLE_COREBOOT_TABLE
+ help
+ This option enable the coreboot_table module, which provide other
+ firmware modules to access coreboot table. The coreboot table pointer
+ is accessed through the device tree node /firmware/coreboot.
+ If unsure say N.
+
config GOOGLE_MEMCONSOLE
- tristate "Firmware Memory Console"
- depends on DMI
+ tristate
+ depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
+
+config GOOGLE_MEMCONSOLE_X86_LEGACY
+ tristate "Firmware Memory Console - X86 Legacy support"
+ depends on X86 && ACPI && DMI
+ select GOOGLE_MEMCONSOLE
help
This option enables the kernel to search for a firmware log in
the EBDA on Google servers. If found, this log is exported to
userland in the file /sys/firmware/log.
-endmenu
+config GOOGLE_MEMCONSOLE_COREBOOT
+ tristate "Firmware Memory Console"
+ depends on GOOGLE_COREBOOT_TABLE
+ select GOOGLE_MEMCONSOLE
+ help
+ This option enables the kernel to search for a firmware log in
+ the coreboot table. If found, this log is exported to userland
+ in the file /sys/firmware/log.
+
+config GOOGLE_VPD
+ tristate "Vital Product Data"
+ depends on GOOGLE_COREBOOT_TABLE
+ help
+ This option enables the kernel to expose the content of Google VPD
+ under /sys/firmware/vpd.
+
+endif # GOOGLE_FIRMWARE
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index 54a294e3cb61..bc4de02202ad 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -1,3 +1,11 @@
obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
-obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI) += coreboot_table-acpi.o
+obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF) += coreboot_table-of.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
+obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o
+
+vpd-sysfs-y := vpd.o vpd_decode.o
+obj-$(CONFIG_GOOGLE_VPD) += vpd-sysfs.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
new file mode 100644
index 000000000000..fb98db2d20e2
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table-acpi.c
@@ -0,0 +1,88 @@
+/*
+ * coreboot_table-acpi.c
+ *
+ * Using ACPI to locate Coreboot table and provide coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+static int coreboot_table_acpi_probe(struct platform_device *pdev)
+{
+ phys_addr_t phyaddr;
+ resource_size_t len;
+ struct coreboot_table_header __iomem *header = NULL;
+ struct resource *res;
+ void __iomem *ptr = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -EINVAL;
+
+ phyaddr = res->start;
+ header = ioremap_cache(phyaddr, sizeof(*header));
+ if (header == NULL)
+ return -ENOMEM;
+
+ ptr = ioremap_cache(phyaddr,
+ header->header_bytes + header->table_bytes);
+ iounmap(header);
+ if (!ptr)
+ return -ENOMEM;
+
+ return coreboot_table_init(ptr);
+}
+
+static int coreboot_table_acpi_remove(struct platform_device *pdev)
+{
+ return coreboot_table_exit();
+}
+
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+ { "GOOGCB00", 0 },
+ { "BOOT0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+
+static struct platform_driver coreboot_table_acpi_driver = {
+ .probe = coreboot_table_acpi_probe,
+ .remove = coreboot_table_acpi_remove,
+ .driver = {
+ .name = "coreboot_table_acpi",
+ .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+ },
+};
+
+static int __init coreboot_table_acpi_init(void)
+{
+ return platform_driver_register(&coreboot_table_acpi_driver);
+}
+
+module_init(coreboot_table_acpi_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
new file mode 100644
index 000000000000..727acdc83e83
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table-of.c
@@ -0,0 +1,82 @@
+/*
+ * coreboot_table-of.c
+ *
+ * Coreboot table access through open firmware.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "coreboot_table.h"
+
+static int coreboot_table_of_probe(struct platform_device *pdev)
+{
+ struct device_node *fw_dn = pdev->dev.of_node;
+ void __iomem *ptr;
+
+ ptr = of_iomap(fw_dn, 0);
+ of_node_put(fw_dn);
+ if (!ptr)
+ return -ENOMEM;
+
+ return coreboot_table_init(ptr);
+}
+
+static int coreboot_table_of_remove(struct platform_device *pdev)
+{
+ return coreboot_table_exit();
+}
+
+static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+ {},
+};
+
+static struct platform_driver coreboot_table_of_driver = {
+ .probe = coreboot_table_of_probe,
+ .remove = coreboot_table_of_remove,
+ .driver = {
+ .name = "coreboot_table_of",
+ .of_match_table = coreboot_of_match,
+ },
+};
+
+static int __init platform_coreboot_table_of_init(void)
+{
+ struct platform_device *pdev;
+ struct device_node *of_node;
+
+ /* Limit device creation to the presence of /firmware/coreboot node */
+ of_node = of_find_node_by_path("/firmware/coreboot");
+ if (!of_node)
+ return -ENODEV;
+
+ if (!of_match_node(coreboot_of_match, of_node))
+ return -ENODEV;
+
+ pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
+ if (!pdev)
+ return -ENODEV;
+
+ return platform_driver_register(&coreboot_table_of_driver);
+}
+
+module_init(platform_coreboot_table_of_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
new file mode 100644
index 000000000000..0019d3ec18dd
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.c
@@ -0,0 +1,94 @@
+/*
+ * coreboot_table.c
+ *
+ * Module providing coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "coreboot_table.h"
+
+struct coreboot_table_entry {
+ u32 tag;
+ u32 size;
+};
+
+static struct coreboot_table_header __iomem *ptr_header;
+
+/*
+ * This function parses the coreboot table for an entry that contains the base
+ * address of the given entry tag. The coreboot table consists of a header
+ * directly followed by a number of small, variable-sized entries, which each
+ * contain an identifying tag and their length as the first two fields.
+ */
+int coreboot_table_find(int tag, void *data, size_t data_size)
+{
+ struct coreboot_table_header header;
+ struct coreboot_table_entry entry;
+ void *ptr_entry;
+ int i;
+
+ if (!ptr_header)
+ return -EPROBE_DEFER;
+
+ memcpy_fromio(&header, ptr_header, sizeof(header));
+
+ if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
+ pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
+ return -ENODEV;
+ }
+
+ ptr_entry = (void *)ptr_header + header.header_bytes;
+
+ for (i = 0; i < header.table_entries; i++) {
+ memcpy_fromio(&entry, ptr_entry, sizeof(entry));
+ if (entry.tag == tag) {
+ if (data_size < entry.size)
+ return -EINVAL;
+
+ memcpy_fromio(data, ptr_entry, entry.size);
+
+ return 0;
+ }
+
+ ptr_entry += entry.size;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(coreboot_table_find);
+
+int coreboot_table_init(void __iomem *ptr)
+{
+ ptr_header = ptr;
+
+ return 0;
+}
+EXPORT_SYMBOL(coreboot_table_init);
+
+int coreboot_table_exit(void)
+{
+ if (ptr_header)
+ iounmap(ptr_header);
+
+ return 0;
+}
+EXPORT_SYMBOL(coreboot_table_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
new file mode 100644
index 000000000000..6eff1ae0c5d3
--- /dev/null
+++ b/drivers/firmware/google/coreboot_table.h
@@ -0,0 +1,50 @@
+/*
+ * coreboot_table.h
+ *
+ * Internal header for coreboot table access.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __COREBOOT_TABLE_H
+#define __COREBOOT_TABLE_H
+
+#include <linux/io.h>
+
+/* List of coreboot entry structures that is used */
+struct lb_cbmem_ref {
+ uint32_t tag;
+ uint32_t size;
+
+ uint64_t cbmem_addr;
+};
+
+/* Coreboot table header structure */
+struct coreboot_table_header {
+ char signature[4];
+ u32 header_bytes;
+ u32 header_checksum;
+ u32 table_bytes;
+ u32 table_checksum;
+ u32 table_entries;
+};
+
+/* Retrieve coreboot table entry with tag *tag* and copy it to data */
+int coreboot_table_find(int tag, void *data, size_t data_size);
+
+/* Initialize coreboot table module given a pointer to iomem */
+int coreboot_table_init(void __iomem *ptr);
+
+/* Cleanup coreboot table module */
+int coreboot_table_exit(void);
+
+#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
new file mode 100644
index 000000000000..02711114dece
--- /dev/null
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -0,0 +1,109 @@
+/*
+ * memconsole-coreboot.c
+ *
+ * Memory based BIOS console accessed through coreboot table.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "memconsole.h"
+#include "coreboot_table.h"
+
+#define CB_TAG_CBMEM_CONSOLE 0x17
+
+/* CBMEM firmware console log descriptor. */
+struct cbmem_cons {
+ u32 buffer_size;
+ u32 buffer_cursor;
+ u8 buffer_body[0];
+} __packed;
+
+static struct cbmem_cons __iomem *cbmem_console;
+
+static int memconsole_coreboot_init(phys_addr_t physaddr)
+{
+ struct cbmem_cons __iomem *tmp_cbmc;
+
+ tmp_cbmc = memremap(physaddr, sizeof(*tmp_cbmc), MEMREMAP_WB);
+
+ if (!tmp_cbmc)
+ return -ENOMEM;
+
+ cbmem_console = memremap(physaddr,
+ tmp_cbmc->buffer_size + sizeof(*cbmem_console),
+ MEMREMAP_WB);
+ memunmap(tmp_cbmc);
+
+ if (!cbmem_console)
+ return -ENOMEM;
+
+ memconsole_setup(cbmem_console->buffer_body,
+ min(cbmem_console->buffer_cursor, cbmem_console->buffer_size));
+
+ return 0;
+}
+
+static int memconsole_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct lb_cbmem_ref entry;
+
+ ret = coreboot_table_find(CB_TAG_CBMEM_CONSOLE, &entry, sizeof(entry));
+ if (ret)
+ return ret;
+
+ ret = memconsole_coreboot_init(entry.cbmem_addr);
+ if (ret)
+ return ret;
+
+ return memconsole_sysfs_init();
+}
+
+static int memconsole_remove(struct platform_device *pdev)
+{
+ memconsole_exit();
+
+ if (cbmem_console)
+ memunmap(cbmem_console);
+
+ return 0;
+}
+
+static struct platform_driver memconsole_driver = {
+ .probe = memconsole_probe,
+ .remove = memconsole_remove,
+ .driver = {
+ .name = "memconsole",
+ },
+};
+
+static int __init platform_memconsole_init(void)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("memconsole", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ platform_driver_register(&memconsole_driver);
+
+ return 0;
+}
+
+module_init(platform_memconsole_init);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole-x86-legacy.c b/drivers/firmware/google/memconsole-x86-legacy.c
new file mode 100644
index 000000000000..1f279ee883b9
--- /dev/null
+++ b/drivers/firmware/google/memconsole-x86-legacy.c
@@ -0,0 +1,153 @@
+/*
+ * memconsole-x86-legacy.c
+ *
+ * EBDA specific parts of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/mm.h>
+#include <asm/bios_ebda.h>
+#include <linux/acpi.h>
+
+#include "memconsole.h"
+
+#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
+#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
+
+struct biosmemcon_ebda {
+ u32 signature;
+ union {
+ struct {
+ u8 enabled;
+ u32 buffer_addr;
+ u16 start;
+ u16 end;
+ u16 num_chars;
+ u8 wrapped;
+ } __packed v1;
+ struct {
+ u32 buffer_addr;
+ /* Misdocumented as number of pages! */
+ u16 num_bytes;
+ u16 start;
+ u16 end;
+ } __packed v2;
+ };
+} __packed;
+
+static void found_v1_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num = %d\n",
+ hdr->v1.buffer_addr, hdr->v1.start,
+ hdr->v1.end, hdr->v1.num_chars);
+
+ memconsole_setup(phys_to_virt(hdr->v1.buffer_addr), hdr->v1.num_chars);
+}
+
+static void found_v2_header(struct biosmemcon_ebda *hdr)
+{
+ pr_info("memconsole: BIOS console v2 EBDA structure found at %p\n",
+ hdr);
+ pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num_bytes = %d\n",
+ hdr->v2.buffer_addr, hdr->v2.start,
+ hdr->v2.end, hdr->v2.num_bytes);
+
+ memconsole_setup(phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start),
+ hdr->v2.end - hdr->v2.start);
+}
+
+/*
+ * Search through the EBDA for the BIOS Memory Console, and
+ * set the global variables to point to it. Return true if found.
+ */
+static bool memconsole_ebda_init(void)
+{
+ unsigned int address;
+ size_t length, cur;
+
+ address = get_bios_ebda();
+ if (!address) {
+ pr_info("memconsole: BIOS EBDA non-existent.\n");
+ return false;
+ }
+
+ /* EBDA length is byte 0 of EBDA (in KB) */
+ length = *(u8 *)phys_to_virt(address);
+ length <<= 10; /* convert to bytes */
+
+ /*
+ * Search through EBDA for BIOS memory console structure
+ * note: signature is not necessarily dword-aligned
+ */
+ for (cur = 0; cur < length; cur++) {
+ struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
+
+ /* memconsole v1 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
+ found_v1_header(hdr);
+ return true;
+ }
+
+ /* memconsole v2 */
+ if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
+ found_v2_header(hdr);
+ return true;
+ }
+ }
+
+ pr_info("memconsole: BIOS console EBDA structure not found!\n");
+ return false;
+}
+
+static struct dmi_system_id memconsole_dmi_table[] __initdata = {
+ {
+ .ident = "Google Board",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
+
+static bool __init memconsole_find(void)
+{
+ if (!dmi_check_system(memconsole_dmi_table))
+ return false;
+
+ return memconsole_ebda_init();
+}
+
+static int __init memconsole_x86_init(void)
+{
+ if (!memconsole_find())
+ return -ENODEV;
+
+ return memconsole_sysfs_init();
+}
+
+static void __exit memconsole_x86_exit(void)
+{
+ memconsole_exit();
+}
+
+module_init(memconsole_x86_init);
+module_exit(memconsole_x86_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
index 2f569aaed4c7..94e200ddb4fa 100644
--- a/drivers/firmware/google/memconsole.c
+++ b/drivers/firmware/google/memconsole.c
@@ -1,66 +1,36 @@
/*
* memconsole.c
*
- * Infrastructure for importing the BIOS memory based console
- * into the kernel log ringbuffer.
+ * Architecture-independent parts of the memory based BIOS console.
*
- * Copyright 2010 Google Inc. All rights reserved.
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-#include <linux/ctype.h>
#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/module.h>
-#include <linux/dmi.h>
-#include <linux/io.h>
-#include <asm/bios_ebda.h>
-#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
-#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
+#include "memconsole.h"
-struct biosmemcon_ebda {
- u32 signature;
- union {
- struct {
- u8 enabled;
- u32 buffer_addr;
- u16 start;
- u16 end;
- u16 num_chars;
- u8 wrapped;
- } __packed v1;
- struct {
- u32 buffer_addr;
- /* Misdocumented as number of pages! */
- u16 num_bytes;
- u16 start;
- u16 end;
- } __packed v2;
- };
-} __packed;
-
-static u32 memconsole_baseaddr;
+static char *memconsole_baseaddr;
static size_t memconsole_length;
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
- char *memconsole;
- ssize_t ret;
-
- memconsole = ioremap_cache(memconsole_baseaddr, memconsole_length);
- if (!memconsole) {
- pr_err("memconsole: ioremap_cache failed\n");
- return -ENOMEM;
- }
- ret = memory_read_from_buffer(buf, count, &pos, memconsole,
- memconsole_length);
- iounmap(memconsole);
- return ret;
+ return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
+ memconsole_length);
}
static struct bin_attribute memconsole_bin_attr = {
@@ -68,104 +38,25 @@ static struct bin_attribute memconsole_bin_attr = {
.read = memconsole_read,
};
-
-static void __init found_v1_header(struct biosmemcon_ebda *hdr)
-{
- pr_info("BIOS console v1 EBDA structure found at %p\n", hdr);
- pr_info("BIOS console buffer at 0x%.8x, "
- "start = %d, end = %d, num = %d\n",
- hdr->v1.buffer_addr, hdr->v1.start,
- hdr->v1.end, hdr->v1.num_chars);
-
- memconsole_length = hdr->v1.num_chars;
- memconsole_baseaddr = hdr->v1.buffer_addr;
-}
-
-static void __init found_v2_header(struct biosmemcon_ebda *hdr)
-{
- pr_info("BIOS console v2 EBDA structure found at %p\n", hdr);
- pr_info("BIOS console buffer at 0x%.8x, "
- "start = %d, end = %d, num_bytes = %d\n",
- hdr->v2.buffer_addr, hdr->v2.start,
- hdr->v2.end, hdr->v2.num_bytes);
-
- memconsole_length = hdr->v2.end - hdr->v2.start;
- memconsole_baseaddr = hdr->v2.buffer_addr + hdr->v2.start;
-}
-
-/*
- * Search through the EBDA for the BIOS Memory Console, and
- * set the global variables to point to it. Return true if found.
- */
-static bool __init found_memconsole(void)
+void memconsole_setup(void *baseaddr, size_t length)
{
- unsigned int address;
- size_t length, cur;
-
- address = get_bios_ebda();
- if (!address) {
- pr_info("BIOS EBDA non-existent.\n");
- return false;
- }
-
- /* EBDA length is byte 0 of EBDA (in KB) */
- length = *(u8 *)phys_to_virt(address);
- length <<= 10; /* convert to bytes */
-
- /*
- * Search through EBDA for BIOS memory console structure
- * note: signature is not necessarily dword-aligned
- */
- for (cur = 0; cur < length; cur++) {
- struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
-
- /* memconsole v1 */
- if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
- found_v1_header(hdr);
- return true;
- }
-
- /* memconsole v2 */
- if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
- found_v2_header(hdr);
- return true;
- }
- }
-
- pr_info("BIOS console EBDA structure not found!\n");
- return false;
+ memconsole_baseaddr = baseaddr;
+ memconsole_length = length;
}
+EXPORT_SYMBOL(memconsole_setup);
-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
- {
- .ident = "Google Board",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
- },
- },
- {}
-};
-MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
-
-static int __init memconsole_init(void)
+int memconsole_sysfs_init(void)
{
- if (!dmi_check_system(memconsole_dmi_table))
- return -ENODEV;
-
- if (!found_memconsole())
- return -ENODEV;
-
memconsole_bin_attr.size = memconsole_length;
return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
}
+EXPORT_SYMBOL(memconsole_sysfs_init);
-static void __exit memconsole_exit(void)
+void memconsole_exit(void)
{
sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
}
-
-module_init(memconsole_init);
-module_exit(memconsole_exit);
+EXPORT_SYMBOL(memconsole_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/memconsole.h b/drivers/firmware/google/memconsole.h
new file mode 100644
index 000000000000..190fc03a51ae
--- /dev/null
+++ b/drivers/firmware/google/memconsole.h
@@ -0,0 +1,43 @@
+/*
+ * memconsole.h
+ *
+ * Internal headers of the memory based BIOS console.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FIRMWARE_GOOGLE_MEMCONSOLE_H
+#define __FIRMWARE_GOOGLE_MEMCONSOLE_H
+
+/*
+ * memconsole_setup
+ *
+ * Initialize the memory console from raw (virtual) base
+ * address and length.
+ */
+void memconsole_setup(void *baseaddr, size_t length);
+
+/*
+ * memconsole_sysfs_init
+ *
+ * Update memory console length and create binary file
+ * for firmware object.
+ */
+int memconsole_sysfs_init(void);
+
+/* memconsole_exit
+ *
+ * Unmap the console buffer.
+ */
+void memconsole_exit(void);
+
+#endif /* __FIRMWARE_GOOGLE_MEMCONSOLE_H */
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
new file mode 100644
index 000000000000..3ce813110d5e
--- /dev/null
+++ b/drivers/firmware/google/vpd.c
@@ -0,0 +1,332 @@
+/*
+ * vpd.c
+ *
+ * Driver for exporting VPD content to sysfs.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "coreboot_table.h"
+#include "vpd_decode.h"
+
+#define CB_TAG_VPD 0x2c
+#define VPD_CBMEM_MAGIC 0x43524f53
+
+static struct kobject *vpd_kobj;
+
+struct vpd_cbmem {
+ u32 magic;
+ u32 version;
+ u32 ro_size;
+ u32 rw_size;
+ u8 blob[0];
+};
+
+struct vpd_section {
+ bool enabled;
+ const char *name;
+ char *raw_name; /* the string name_raw */
+ struct kobject *kobj; /* vpd/name directory */
+ char *baseaddr;
+ struct bin_attribute bin_attr; /* vpd/name_raw bin_attribute */
+ struct list_head attribs; /* key/value in vpd_attrib_info list */
+};
+
+struct vpd_attrib_info {
+ char *key;
+ const char *value;
+ struct bin_attribute bin_attr;
+ struct list_head list;
+};
+
+static struct vpd_section ro_vpd;
+static struct vpd_section rw_vpd;
+
+static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_attrib_info *info = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, info->value,
+ info->bin_attr.size);
+}
+
+/*
+ * vpd_section_check_key_name()
+ *
+ * The VPD specification supports only [a-zA-Z0-9_]+ characters in key names but
+ * old firmware versions may have entries like "S/N" which are problematic when
+ * exporting them as sysfs attributes. These keys present in old firmwares are
+ * ignored.
+ *
+ * Returns VPD_OK for a valid key name, VPD_FAIL otherwise.
+ *
+ * @key: The key name to check
+ * @key_len: key name length
+ */
+static int vpd_section_check_key_name(const u8 *key, s32 key_len)
+{
+ int c;
+
+ while (key_len-- > 0) {
+ c = *key++;
+
+ if (!isalnum(c) && c != '_')
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
+
+static int vpd_section_attrib_add(const u8 *key, s32 key_len,
+ const u8 *value, s32 value_len,
+ void *arg)
+{
+ int ret;
+ struct vpd_section *sec = arg;
+ struct vpd_attrib_info *info;
+
+ /*
+ * Return VPD_OK immediately to decode next entry if the current key
+ * name contains invalid characters.
+ */
+ if (vpd_section_check_key_name(key, key_len) != VPD_OK)
+ return VPD_OK;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info->key = kzalloc(key_len + 1, GFP_KERNEL);
+ if (!info->key)
+ return -ENOMEM;
+
+ memcpy(info->key, key, key_len);
+
+ sysfs_bin_attr_init(&info->bin_attr);
+ info->bin_attr.attr.name = info->key;
+ info->bin_attr.attr.mode = 0444;
+ info->bin_attr.size = value_len;
+ info->bin_attr.read = vpd_attrib_read;
+ info->bin_attr.private = info;
+
+ info->value = value;
+
+ INIT_LIST_HEAD(&info->list);
+ list_add_tail(&info->list, &sec->attribs);
+
+ ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
+ if (ret) {
+ kfree(info->key);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vpd_section_attrib_destroy(struct vpd_section *sec)
+{
+ struct vpd_attrib_info *info;
+ struct vpd_attrib_info *temp;
+
+ list_for_each_entry_safe(info, temp, &sec->attribs, list) {
+ kfree(info->key);
+ sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+ kfree(info);
+ }
+}
+
+static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct vpd_section *sec = bin_attr->private;
+
+ return memory_read_from_buffer(buf, count, &pos, sec->baseaddr,
+ sec->bin_attr.size);
+}
+
+static int vpd_section_create_attribs(struct vpd_section *sec)
+{
+ s32 consumed;
+ int ret;
+
+ consumed = 0;
+ do {
+ ret = vpd_decode_string(sec->bin_attr.size, sec->baseaddr,
+ &consumed, vpd_section_attrib_add, sec);
+ } while (ret == VPD_OK);
+
+ return 0;
+}
+
+static int vpd_section_init(const char *name, struct vpd_section *sec,
+ phys_addr_t physaddr, size_t size)
+{
+ int ret;
+ int raw_len;
+
+ sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
+ if (!sec->baseaddr)
+ return -ENOMEM;
+
+ sec->name = name;
+
+ /* We want to export the raw partion with name ${name}_raw */
+ raw_len = strlen(name) + 5;
+ sec->raw_name = kzalloc(raw_len, GFP_KERNEL);
+ strncpy(sec->raw_name, name, raw_len);
+ strncat(sec->raw_name, "_raw", raw_len);
+
+ sysfs_bin_attr_init(&sec->bin_attr);
+ sec->bin_attr.attr.name = sec->raw_name;
+ sec->bin_attr.attr.mode = 0444;
+ sec->bin_attr.size = size;
+ sec->bin_attr.read = vpd_section_read;
+ sec->bin_attr.private = sec;
+
+ ret = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
+ if (ret)
+ goto free_sec;
+
+ sec->kobj = kobject_create_and_add(name, vpd_kobj);
+ if (!sec->kobj) {
+ ret = -EINVAL;
+ goto sysfs_remove;
+ }
+
+ INIT_LIST_HEAD(&sec->attribs);
+ vpd_section_create_attribs(sec);
+
+ sec->enabled = true;
+
+ return 0;
+
+sysfs_remove:
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+
+free_sec:
+ kfree(sec->raw_name);
+ iounmap(sec->baseaddr);
+
+ return ret;
+}
+
+static int vpd_section_destroy(struct vpd_section *sec)
+{
+ if (sec->enabled) {
+ vpd_section_attrib_destroy(sec);
+ kobject_del(sec->kobj);
+ sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
+ kfree(sec->raw_name);
+ iounmap(sec->baseaddr);
+ }
+
+ return 0;
+}
+
+static int vpd_sections_init(phys_addr_t physaddr)
+{
+ struct vpd_cbmem __iomem *temp;
+ struct vpd_cbmem header;
+ int ret = 0;
+
+ temp = memremap(physaddr, sizeof(struct vpd_cbmem), MEMREMAP_WB);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+ iounmap(temp);
+
+ if (header.magic != VPD_CBMEM_MAGIC)
+ return -ENODEV;
+
+ if (header.ro_size) {
+ ret = vpd_section_init("ro", &ro_vpd,
+ physaddr + sizeof(struct vpd_cbmem),
+ header.ro_size);
+ if (ret)
+ return ret;
+ }
+
+ if (header.rw_size) {
+ ret = vpd_section_init("rw", &rw_vpd,
+ physaddr + sizeof(struct vpd_cbmem) +
+ header.ro_size, header.rw_size);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vpd_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct lb_cbmem_ref entry;
+
+ ret = coreboot_table_find(CB_TAG_VPD, &entry, sizeof(entry));
+ if (ret)
+ return ret;
+
+ return vpd_sections_init(entry.cbmem_addr);
+}
+
+static struct platform_driver vpd_driver = {
+ .probe = vpd_probe,
+ .driver = {
+ .name = "vpd",
+ },
+};
+
+static int __init vpd_platform_init(void)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("vpd", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
+ if (!vpd_kobj)
+ return -ENOMEM;
+
+ memset(&ro_vpd, 0, sizeof(ro_vpd));
+ memset(&rw_vpd, 0, sizeof(rw_vpd));
+
+ platform_driver_register(&vpd_driver);
+
+ return 0;
+}
+
+static void __exit vpd_platform_exit(void)
+{
+ vpd_section_destroy(&ro_vpd);
+ vpd_section_destroy(&rw_vpd);
+ kobject_del(vpd_kobj);
+}
+
+module_init(vpd_platform_init);
+module_exit(vpd_platform_exit);
+
+MODULE_AUTHOR("Google, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
new file mode 100644
index 000000000000..943acaa8aa76
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.c
@@ -0,0 +1,99 @@
+/*
+ * vpd_decode.c
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+
+#include "vpd_decode.h"
+
+static int vpd_decode_len(const s32 max_len, const u8 *in,
+ s32 *length, s32 *decoded_len)
+{
+ u8 more;
+ int i = 0;
+
+ if (!length || !decoded_len)
+ return VPD_FAIL;
+
+ *length = 0;
+ do {
+ if (i >= max_len)
+ return VPD_FAIL;
+
+ more = in[i] & 0x80;
+ *length <<= 7;
+ *length |= in[i] & 0x7f;
+ ++i;
+ } while (more);
+
+ *decoded_len = i;
+
+ return VPD_OK;
+}
+
+int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+ vpd_decode_callback callback, void *callback_arg)
+{
+ int type;
+ int res;
+ s32 key_len;
+ s32 value_len;
+ s32 decoded_len;
+ const u8 *key;
+ const u8 *value;
+
+ /* type */
+ if (*consumed >= max_len)
+ return VPD_FAIL;
+
+ type = input_buf[*consumed];
+
+ switch (type) {
+ case VPD_TYPE_INFO:
+ case VPD_TYPE_STRING:
+ (*consumed)++;
+
+ /* key */
+ res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+ &key_len, &decoded_len);
+ if (res != VPD_OK || *consumed + decoded_len >= max_len)
+ return VPD_FAIL;
+
+ *consumed += decoded_len;
+ key = &input_buf[*consumed];
+ *consumed += key_len;
+
+ /* value */
+ res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+ &value_len, &decoded_len);
+ if (res != VPD_OK || *consumed + decoded_len > max_len)
+ return VPD_FAIL;
+
+ *consumed += decoded_len;
+ value = &input_buf[*consumed];
+ *consumed += value_len;
+
+ if (type == VPD_TYPE_STRING)
+ return callback(key, key_len, value, value_len,
+ callback_arg);
+ break;
+
+ default:
+ return VPD_FAIL;
+ }
+
+ return VPD_OK;
+}
diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
new file mode 100644
index 000000000000..be3d62c5ca2f
--- /dev/null
+++ b/drivers/firmware/google/vpd_decode.h
@@ -0,0 +1,58 @@
+/*
+ * vpd_decode.h
+ *
+ * Google VPD decoding routines.
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VPD_DECODE_H
+#define __VPD_DECODE_H
+
+#include <linux/types.h>
+
+enum {
+ VPD_OK = 0,
+ VPD_FAIL,
+};
+
+enum {
+ VPD_TYPE_TERMINATOR = 0,
+ VPD_TYPE_STRING,
+ VPD_TYPE_INFO = 0xfe,
+ VPD_TYPE_IMPLICIT_TERMINATOR = 0xff,
+};
+
+/* Callback for vpd_decode_string to invoke. */
+typedef int vpd_decode_callback(const u8 *key, s32 key_len,
+ const u8 *value, s32 value_len,
+ void *arg);
+
+/*
+ * vpd_decode_string
+ *
+ * Given the encoded string, this function invokes callback with extracted
+ * (key, value). The *consumed will be plused the number of bytes consumed in
+ * this function.
+ *
+ * The input_buf points to the first byte of the input buffer.
+ *
+ * The *consumed starts from 0, which is actually the next byte to be decoded.
+ * It can be non-zero to be used in multiple calls.
+ *
+ * If one entry is successfully decoded, sends it to callback and returns the
+ * result.
+ */
+int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+ vpd_decode_callback callback, void *callback_arg);
+
+#endif /* __VPD_DECODE_H */
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index b0d254930ed3..ff204421117b 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -127,6 +127,7 @@ EXPORT_SYMBOL(meson_sm_call);
* meson_sm_call_read - retrieve data from secure-monitor
*
* @buffer: Buffer to store the retrieved data
+ * @bsize: Size of the buffer
* @cmd_index: Index of the SMC32 function ID
* @arg0: SMC32 Argument 0
* @arg1: SMC32 Argument 1
@@ -135,11 +136,14 @@ EXPORT_SYMBOL(meson_sm_call);
* @arg4: SMC32 Argument 4
*
* Return: size of read data on success, a negative value on error
+ * When 0 is returned there is no guarantee about the amount of
+ * data read and bsize bytes are copied in buffer.
*/
-int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0,
- u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 size;
+ int ret;
if (!fw.chip)
return -ENOENT;
@@ -147,16 +151,24 @@ int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0,
if (!fw.chip->cmd_shmem_out_base)
return -EINVAL;
+ if (bsize > fw.chip->shmem_size)
+ return -EINVAL;
+
if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
- if (!size || size > fw.chip->shmem_size)
+ if (size > bsize)
return -EINVAL;
+ ret = size;
+
+ if (!size)
+ size = bsize;
+
if (buffer)
memcpy(buffer, fw.sm_shmem_out_base, size);
- return size;
+ return ret;
}
EXPORT_SYMBOL(meson_sm_call_read);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 8ad226c60374..93e3b96b6dfa 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -578,3 +578,21 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : le32_to_cpu(scm_ret);
}
+
+int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
+ u32 spare)
+{
+ return -ENODEV;
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+ size_t *size)
+{
+ return -ENODEV;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
+ u32 spare)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index c9332590e8c6..6e6d561708e2 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -381,3 +381,61 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : res.a1;
}
+
+int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = device_id;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+ size_t *size)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
+
+ if (size)
+ *size = res.a1;
+
+ return ret ? : res.a2;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
+ u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = addr;
+ desc.args[1] = size;
+ desc.args[2] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
+
+ /* the pg table has been initialized already, ignore the error */
+ if (ret == -EPERM)
+ ret = 0;
+
+ return ret;
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d987bcc7489d..bb16510d75ba 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -315,6 +315,24 @@ static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.deassert = qcom_scm_pas_reset_deassert,
};
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+{
+ return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 6a0f15469344..9bea691f30fb 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -85,4 +85,15 @@ static inline int qcom_scm_remap_error(int err)
return -EINVAL;
}
+#define QCOM_SCM_SVC_MP 0xc
+#define QCOM_SCM_RESTORE_SEC_CFG 2
+extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
+ u32 spare);
+#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
+#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
+ size_t *size);
+extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
+ u32 size, u32 spare);
+
#endif
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ce861a2853a4..161ba9dccede 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -20,6 +20,12 @@ config FPGA_REGION
FPGA Regions allow loading FPGA images under control of
the Device Tree.
+config FPGA_MGR_ICE40_SPI
+ tristate "Lattice iCE40 SPI"
+ depends on OF && SPI
+ help
+ FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -33,6 +39,20 @@ config FPGA_MGR_SOCFPGA_A10
help
FPGA manager driver support for Altera Arria10 SoCFPGA.
+config FPGA_MGR_TS73XX
+ tristate "Technologic Systems TS-73xx SBC FPGA Manager"
+ depends on ARCH_EP93XX && MACH_TS72XX
+ help
+ FPGA manager driver support for the Altera Cyclone II FPGA
+ present on the TS-73xx SBC boards.
+
+config FPGA_MGR_XILINX_SPI
+ tristate "Xilinx Configuration over Slave Serial (SPI)"
+ depends on SPI
+ help
+ FPGA manager driver support for Xilinx FPGA configuration
+ over slave serial interface.
+
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
depends on ARCH_ZYNQ || COMPILE_TEST
@@ -63,6 +83,28 @@ config ALTERA_FREEZE_BRIDGE
isolate one region of the FPGA from the busses while that
region is being reprogrammed.
+config ALTERA_PR_IP_CORE
+ tristate "Altera Partial Reconfiguration IP Core"
+ help
+ Core driver support for Altera Partial Reconfiguration IP component
+
+config ALTERA_PR_IP_CORE_PLAT
+ tristate "Platform support of Altera Partial Reconfiguration IP Core"
+ depends on ALTERA_PR_IP_CORE && OF && HAS_IOMEM
+ help
+ Platform driver support for Altera Partial Reconfiguration IP
+ component
+
+config XILINX_PR_DECOUPLER
+ tristate "Xilinx LogiCORE PR Decoupler"
+ depends on FPGA_BRIDGE
+ depends on HAS_IOMEM
+ help
+ Say Y to enable drivers for Xilinx LogiCORE PR Decoupler.
+ The PR Decoupler exists in the FPGA fabric to isolate one
+ region of the FPGA from the busses while that region is
+ being reprogrammed during partial reconfig.
+
endif # FPGA
endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 8df07bcf42a6..2a4f0218145c 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -6,14 +6,20 @@
obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
+obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
+obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
+obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
# FPGA Bridge Drivers
obj-$(CONFIG_FPGA_BRIDGE) += fpga-bridge.o
obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE) += altera-hps2fpga.o altera-fpga2sdram.o
obj-$(CONFIG_ALTERA_FREEZE_BRIDGE) += altera-freeze-bridge.o
+obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
# High Level Interfaces
obj-$(CONFIG_FPGA_REGION) += fpga-region.o
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 8dcd9fb22cb9..6159cfcf78a2 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -28,6 +28,7 @@
#define FREEZE_CSR_REG_VERSION 12
#define FREEZE_CSR_SUPPORTED_VERSION 2
+#define FREEZE_CSR_OFFICIAL_VERSION 0xad000003
#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE BIT(0)
#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE BIT(1)
@@ -203,7 +204,7 @@ static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
return priv->enable;
}
-static struct fpga_bridge_ops altera_freeze_br_br_ops = {
+static const struct fpga_bridge_ops altera_freeze_br_br_ops = {
.enable_set = altera_freeze_br_enable_set,
.enable_show = altera_freeze_br_enable_show,
};
@@ -218,6 +219,7 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ void __iomem *base_addr;
struct altera_freeze_br_data *priv;
struct resource *res;
u32 status, revision;
@@ -225,26 +227,32 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_addr))
+ return PTR_ERR(base_addr);
+
+ revision = readl(base_addr + FREEZE_CSR_REG_VERSION);
+ if ((revision != FREEZE_CSR_SUPPORTED_VERSION) &&
+ (revision != FREEZE_CSR_OFFICIAL_VERSION)) {
+ dev_err(dev,
+ "%s unexpected revision 0x%x != 0x%x != 0x%x\n",
+ __func__, revision, FREEZE_CSR_SUPPORTED_VERSION,
+ FREEZE_CSR_OFFICIAL_VERSION);
+ return -EINVAL;
+ }
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base_addr))
- return PTR_ERR(priv->base_addr);
-
- status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+ status = readl(base_addr + FREEZE_CSR_STATUS_OFFSET);
if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
priv->enable = 1;
- revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION);
- if (revision != FREEZE_CSR_SUPPORTED_VERSION)
- dev_warn(dev,
- "%s Freeze Controller unexpected revision %d != %d\n",
- __func__, revision, FREEZE_CSR_SUPPORTED_VERSION);
+ priv->base_addr = base_addr;
return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
&altera_freeze_br_br_ops, priv);
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 4b354c79be31..3066b805f2d0 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -181,15 +181,18 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
(enable ? "enabling" : "disabling"));
ret = _alt_hps2fpga_enable_set(priv, enable);
- if (ret) {
- fpga_bridge_unregister(&pdev->dev);
- return ret;
- }
+ if (ret)
+ goto err;
}
}
- return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
- priv);
+ ret = fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
+ priv);
+err:
+ if (ret)
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
}
static int alt_fpga_bridge_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
new file mode 100644
index 000000000000..8fb36b8b4648
--- /dev/null
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -0,0 +1,68 @@
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016-2017 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/fpga/altera-pr-ip-core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static int alt_pr_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *reg_base;
+ struct resource *res;
+
+ /* First mmio base is for register access */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ reg_base = devm_ioremap_resource(dev, res);
+
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ return alt_pr_register(dev, reg_base);
+}
+
+static int alt_pr_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return alt_pr_unregister(dev);
+}
+
+static const struct of_device_id alt_pr_of_match[] = {
+ { .compatible = "altr,a10-pr-ip", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, alt_pr_of_match);
+
+static struct platform_driver alt_pr_platform_driver = {
+ .probe = alt_pr_platform_probe,
+ .remove = alt_pr_platform_remove,
+ .driver = {
+ .name = "alt_a10_pr_ip",
+ .of_match_table = alt_pr_of_match,
+ },
+};
+
+module_platform_driver(alt_pr_platform_driver);
+MODULE_AUTHOR("Matthew Gerlach <matthew.gerlach@linux.intel.com>");
+MODULE_DESCRIPTION("Altera Partial Reconfiguration IP Platform Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
new file mode 100644
index 000000000000..a7b31f9797ce
--- /dev/null
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -0,0 +1,220 @@
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016-2017 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/fpga/altera-pr-ip-core.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+
+#define ALT_PR_DATA_OFST 0x00
+#define ALT_PR_CSR_OFST 0x04
+
+#define ALT_PR_CSR_PR_START BIT(0)
+#define ALT_PR_CSR_STATUS_SFT 2
+#define ALT_PR_CSR_STATUS_MSK (7 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_NRESET (0 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_ERR (1 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_CRC_ERR (2 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_BAD_BITS (3 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_IN_PROG (4 << ALT_PR_CSR_STATUS_SFT)
+#define ALT_PR_CSR_STATUS_PR_SUCCESS (5 << ALT_PR_CSR_STATUS_SFT)
+
+struct alt_pr_priv {
+ void __iomem *reg_base;
+};
+
+static enum fpga_mgr_states alt_pr_fpga_state(struct fpga_manager *mgr)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ const char *err = "unknown";
+ enum fpga_mgr_states ret = FPGA_MGR_STATE_UNKNOWN;
+ u32 val;
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ val &= ALT_PR_CSR_STATUS_MSK;
+
+ switch (val) {
+ case ALT_PR_CSR_STATUS_NRESET:
+ return FPGA_MGR_STATE_RESET;
+
+ case ALT_PR_CSR_STATUS_PR_ERR:
+ err = "pr error";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_CRC_ERR:
+ err = "crc error";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_BAD_BITS:
+ err = "bad bits";
+ ret = FPGA_MGR_STATE_WRITE_ERR;
+ break;
+
+ case ALT_PR_CSR_STATUS_PR_IN_PROG:
+ return FPGA_MGR_STATE_WRITE;
+
+ case ALT_PR_CSR_STATUS_PR_SUCCESS:
+ return FPGA_MGR_STATE_OPERATING;
+
+ default:
+ break;
+ }
+
+ dev_err(&mgr->dev, "encountered error code %d (%s) in %s()\n",
+ val, err, __func__);
+ return ret;
+}
+
+static int alt_pr_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ u32 val;
+
+ if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(&mgr->dev, "%s Partial Reconfiguration flag not set\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ if (val & ALT_PR_CSR_PR_START) {
+ dev_err(&mgr->dev,
+ "%s Partial Reconfiguration already started\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ writel(val | ALT_PR_CSR_PR_START, priv->reg_base + ALT_PR_CSR_OFST);
+
+ return 0;
+}
+
+static int alt_pr_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct alt_pr_priv *priv = mgr->priv;
+ u32 *buffer_32 = (u32 *)buf;
+ size_t i = 0;
+
+ if (count <= 0)
+ return -EINVAL;
+
+ /* Write out the complete 32-bit chunks */
+ while (count >= sizeof(u32)) {
+ writel(buffer_32[i++], priv->reg_base);
+ count -= sizeof(u32);
+ }
+
+ /* Write out remaining non 32-bit chunks */
+ switch (count) {
+ case 3:
+ writel(buffer_32[i++] & 0x00ffffff, priv->reg_base);
+ break;
+ case 2:
+ writel(buffer_32[i++] & 0x0000ffff, priv->reg_base);
+ break;
+ case 1:
+ writel(buffer_32[i++] & 0x000000ff, priv->reg_base);
+ break;
+ case 0:
+ break;
+ default:
+ /* This will never happen */
+ return -EFAULT;
+ }
+
+ if (alt_pr_fpga_state(mgr) == FPGA_MGR_STATE_WRITE_ERR)
+ return -EIO;
+
+ return 0;
+}
+
+static int alt_pr_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ u32 i = 0;
+
+ do {
+ switch (alt_pr_fpga_state(mgr)) {
+ case FPGA_MGR_STATE_WRITE_ERR:
+ return -EIO;
+
+ case FPGA_MGR_STATE_OPERATING:
+ dev_info(&mgr->dev,
+ "successful partial reconfiguration\n");
+ return 0;
+
+ default:
+ break;
+ }
+ udelay(1);
+ } while (info->config_complete_timeout_us > i++);
+
+ dev_err(&mgr->dev, "timed out waiting for write to complete\n");
+ return -ETIMEDOUT;
+}
+
+static const struct fpga_manager_ops alt_pr_ops = {
+ .state = alt_pr_fpga_state,
+ .write_init = alt_pr_fpga_write_init,
+ .write = alt_pr_fpga_write,
+ .write_complete = alt_pr_fpga_write_complete,
+};
+
+int alt_pr_register(struct device *dev, void __iomem *reg_base)
+{
+ struct alt_pr_priv *priv;
+ u32 val;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reg_base = reg_base;
+
+ val = readl(priv->reg_base + ALT_PR_CSR_OFST);
+
+ dev_dbg(dev, "%s status=%d start=%d\n", __func__,
+ (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
+ (int)(val & ALT_PR_CSR_PR_START));
+
+ return fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv);
+}
+EXPORT_SYMBOL_GPL(alt_pr_register);
+
+int alt_pr_unregister(struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+
+ fpga_mgr_unregister(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(alt_pr_unregister);
+
+MODULE_AUTHOR("Matthew Gerlach <matthew.gerlach@linux.intel.com>");
+MODULE_DESCRIPTION("Altera Partial Reconfiguration IP Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 33ee83e6373c..9651aa56244a 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -27,7 +27,7 @@ static DEFINE_IDA(fpga_bridge_ida);
static struct class *fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
-spinlock_t bridge_list_lock;
+static spinlock_t bridge_list_lock;
static int fpga_bridge_of_node_match(struct device *dev, const void *data)
{
@@ -146,11 +146,9 @@ EXPORT_SYMBOL_GPL(fpga_bridge_put);
int fpga_bridges_enable(struct list_head *bridge_list)
{
struct fpga_bridge *bridge;
- struct list_head *node;
int ret;
- list_for_each(node, bridge_list) {
- bridge = list_entry(node, struct fpga_bridge, node);
+ list_for_each_entry(bridge, bridge_list, node) {
ret = fpga_bridge_enable(bridge);
if (ret)
return ret;
@@ -172,11 +170,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_enable);
int fpga_bridges_disable(struct list_head *bridge_list)
{
struct fpga_bridge *bridge;
- struct list_head *node;
int ret;
- list_for_each(node, bridge_list) {
- bridge = list_entry(node, struct fpga_bridge, node);
+ list_for_each_entry(bridge, bridge_list, node) {
ret = fpga_bridge_disable(bridge);
if (ret)
return ret;
@@ -196,13 +192,10 @@ EXPORT_SYMBOL_GPL(fpga_bridges_disable);
*/
void fpga_bridges_put(struct list_head *bridge_list)
{
- struct fpga_bridge *bridge;
- struct list_head *node, *next;
+ struct fpga_bridge *bridge, *next;
unsigned long flags;
- list_for_each_safe(node, next, bridge_list) {
- bridge = list_entry(node, struct fpga_bridge, node);
-
+ list_for_each_entry_safe(bridge, next, bridge_list, node) {
fpga_bridge_put(bridge);
spin_lock_irqsave(&bridge_list_lock, flags);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 86d2cb203533..188ffefa3cc3 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -361,7 +361,7 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-struct fpga_manager *__fpga_mgr_get(struct device *dev)
+static struct fpga_manager *__fpga_mgr_get(struct device *dev)
{
struct fpga_manager *mgr;
int ret = -ENODEV;
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 3222fdbad75a..3b6b2f4182a1 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -245,7 +245,8 @@ static int fpga_region_program_fpga(struct fpga_region *region,
mgr = fpga_region_get_manager(region);
if (IS_ERR(mgr)) {
pr_err("failed to get fpga region manager\n");
- return PTR_ERR(mgr);
+ ret = PTR_ERR(mgr);
+ goto err_put_region;
}
ret = fpga_region_get_bridges(region, overlay);
@@ -281,6 +282,7 @@ err_put_br:
fpga_bridges_put(&region->bridge_list);
err_put_mgr:
fpga_mgr_put(mgr);
+err_put_region:
fpga_region_put(region);
return ret;
@@ -337,8 +339,9 @@ static int child_regions_with_firmware(struct device_node *overlay)
* The overlay must add either firmware-name or external-fpga-config property
* to the FPGA Region.
*
- * firmware-name : program the FPGA
- * external-fpga-config : FPGA is already programmed
+ * firmware-name : program the FPGA
+ * external-fpga-config : FPGA is already programmed
+ * encrypted-fpga-config : FPGA bitstream is encrypted
*
* The overlay can add other FPGA regions, but child FPGA regions cannot have a
* firmware-name property since those regions don't exist yet.
@@ -373,6 +376,9 @@ static int fpga_region_notify_pre_apply(struct fpga_region *region,
if (of_property_read_bool(nd->overlay, "external-fpga-config"))
info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+ if (of_property_read_bool(nd->overlay, "encrypted-fpga-config"))
+ info->flags |= FPGA_MGR_ENCRYPTED_BITSTREAM;
+
of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
@@ -381,6 +387,9 @@ static int fpga_region_notify_pre_apply(struct fpga_region *region,
of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
&info->disable_timeout_us);
+ of_property_read_u32(nd->overlay, "config-complete-timeout-us",
+ &info->config_complete_timeout_us);
+
/* If FPGA was externally programmed, don't specify firmware */
if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
pr_err("error: specified firmware and external-fpga-config");
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
new file mode 100644
index 000000000000..7fca82023062
--- /dev/null
+++ b/drivers/fpga/ice40-spi.c
@@ -0,0 +1,207 @@
+/*
+ * FPGA Manager Driver for Lattice iCE40.
+ *
+ * Copyright (c) 2016 Joel Holdsworth
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This driver adds support to the FPGA manager for configuring the SRAM of
+ * Lattice iCE40 FPGAs through slave SPI.
+ */
+
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/stringify.h>
+
+#define ICE40_SPI_MAX_SPEED 25000000 /* Hz */
+#define ICE40_SPI_MIN_SPEED 1000000 /* Hz */
+
+#define ICE40_SPI_RESET_DELAY 1 /* us (>200ns) */
+#define ICE40_SPI_HOUSEKEEPING_DELAY 1200 /* us */
+
+#define ICE40_SPI_NUM_ACTIVATION_BYTES DIV_ROUND_UP(49, 8)
+
+struct ice40_fpga_priv {
+ struct spi_device *dev;
+ struct gpio_desc *reset;
+ struct gpio_desc *cdone;
+};
+
+static enum fpga_mgr_states ice40_fpga_ops_state(struct fpga_manager *mgr)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+
+ return gpiod_get_value(priv->cdone) ? FPGA_MGR_STATE_OPERATING :
+ FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+ struct spi_device *dev = priv->dev;
+ struct spi_message message;
+ struct spi_transfer assert_cs_then_reset_delay = {
+ .cs_change = 1,
+ .delay_usecs = ICE40_SPI_RESET_DELAY
+ };
+ struct spi_transfer housekeeping_delay_then_release_cs = {
+ .delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
+ };
+ int ret;
+
+ if ((info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+ dev_err(&dev->dev,
+ "Partial reconfiguration is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* Lock the bus, assert CRESET_B and SS_B and delay >200ns */
+ spi_bus_lock(dev->master);
+
+ gpiod_set_value(priv->reset, 1);
+
+ spi_message_init(&message);
+ spi_message_add_tail(&assert_cs_then_reset_delay, &message);
+ ret = spi_sync_locked(dev, &message);
+
+ /* Come out of reset */
+ gpiod_set_value(priv->reset, 0);
+
+ /* Abort if the chip-select failed */
+ if (ret)
+ goto fail;
+
+ /* Check CDONE is de-asserted i.e. the FPGA is reset */
+ if (gpiod_get_value(priv->cdone)) {
+ dev_err(&dev->dev, "Device reset failed, CDONE is asserted\n");
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Wait for the housekeeping to complete, and release SS_B */
+ spi_message_init(&message);
+ spi_message_add_tail(&housekeeping_delay_then_release_cs, &message);
+ ret = spi_sync_locked(dev, &message);
+
+fail:
+ spi_bus_unlock(dev->master);
+
+ return ret;
+}
+
+static int ice40_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t count)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+
+ return spi_write(priv->dev, buf, count);
+}
+
+static int ice40_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct ice40_fpga_priv *priv = mgr->priv;
+ struct spi_device *dev = priv->dev;
+ const u8 padding[ICE40_SPI_NUM_ACTIVATION_BYTES] = {0};
+
+ /* Check CDONE is asserted */
+ if (!gpiod_get_value(priv->cdone)) {
+ dev_err(&dev->dev,
+ "CDONE was not asserted after firmware transfer\n");
+ return -EIO;
+ }
+
+ /* Send of zero-padding to activate the firmware */
+ return spi_write(dev, padding, sizeof(padding));
+}
+
+static const struct fpga_manager_ops ice40_fpga_ops = {
+ .state = ice40_fpga_ops_state,
+ .write_init = ice40_fpga_ops_write_init,
+ .write = ice40_fpga_ops_write,
+ .write_complete = ice40_fpga_ops_write_complete,
+};
+
+static int ice40_fpga_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ice40_fpga_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = spi;
+
+ /* Check board setup data. */
+ if (spi->max_speed_hz > ICE40_SPI_MAX_SPEED) {
+ dev_err(dev, "SPI speed is too high, maximum speed is "
+ __stringify(ICE40_SPI_MAX_SPEED) "\n");
+ return -EINVAL;
+ }
+
+ if (spi->max_speed_hz < ICE40_SPI_MIN_SPEED) {
+ dev_err(dev, "SPI speed is too low, minimum speed is "
+ __stringify(ICE40_SPI_MIN_SPEED) "\n");
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPHA) {
+ dev_err(dev, "Bad SPI mode, CPHA not supported\n");
+ return -EINVAL;
+ }
+
+ /* Set up the GPIOs */
+ priv->cdone = devm_gpiod_get(dev, "cdone", GPIOD_IN);
+ if (IS_ERR(priv->cdone)) {
+ ret = PTR_ERR(priv->cdone);
+ dev_err(dev, "Failed to get CDONE GPIO: %d\n", ret);
+ return ret;
+ }
+
+ priv->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ dev_err(dev, "Failed to get CRESET_B GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Register with the FPGA manager */
+ return fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
+}
+
+static int ice40_fpga_remove(struct spi_device *spi)
+{
+ fpga_mgr_unregister(&spi->dev);
+ return 0;
+}
+
+static const struct of_device_id ice40_fpga_of_match[] = {
+ { .compatible = "lattice,ice40-fpga-mgr", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ice40_fpga_of_match);
+
+static struct spi_driver ice40_fpga_driver = {
+ .probe = ice40_fpga_probe,
+ .remove = ice40_fpga_remove,
+ .driver = {
+ .name = "ice40spi",
+ .of_match_table = of_match_ptr(ice40_fpga_of_match),
+ },
+};
+
+module_spi_driver(ice40_fpga_driver);
+
+MODULE_AUTHOR("Joel Holdsworth <joel@airwebreathe.org.uk>");
+MODULE_DESCRIPTION("Lattice iCE40 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
new file mode 100644
index 000000000000..f6a96b42e2ca
--- /dev/null
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -0,0 +1,156 @@
+/*
+ * Technologic Systems TS-73xx SBC FPGA loader
+ *
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * FPGA Manager Driver for the on-board Altera Cyclone II FPGA found on
+ * TS-7300, heavily based on load_fpga.c in their vendor tree.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/iopoll.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#define TS73XX_FPGA_DATA_REG 0
+#define TS73XX_FPGA_CONFIG_REG 1
+
+#define TS73XX_FPGA_WRITE_DONE 0x1
+#define TS73XX_FPGA_WRITE_DONE_TIMEOUT 1000 /* us */
+#define TS73XX_FPGA_RESET 0x2
+#define TS73XX_FPGA_RESET_LOW_DELAY 30 /* us */
+#define TS73XX_FPGA_RESET_HIGH_DELAY 80 /* us */
+#define TS73XX_FPGA_LOAD_OK 0x4
+#define TS73XX_FPGA_CONFIG_LOAD 0x8
+
+struct ts73xx_fpga_priv {
+ void __iomem *io_base;
+ struct device *dev;
+};
+
+static enum fpga_mgr_states ts73xx_fpga_state(struct fpga_manager *mgr)
+{
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int ts73xx_fpga_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+
+ /* Reset the FPGA */
+ writeb(0, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ udelay(TS73XX_FPGA_RESET_LOW_DELAY);
+ writeb(TS73XX_FPGA_RESET, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ udelay(TS73XX_FPGA_RESET_HIGH_DELAY);
+
+ return 0;
+}
+
+static int ts73xx_fpga_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+ size_t i = 0;
+ int ret;
+ u8 reg;
+
+ while (count--) {
+ ret = readb_poll_timeout(priv->io_base + TS73XX_FPGA_CONFIG_REG,
+ reg, !(reg & TS73XX_FPGA_WRITE_DONE),
+ 1, TS73XX_FPGA_WRITE_DONE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ writeb(buf[i], priv->io_base + TS73XX_FPGA_DATA_REG);
+ i++;
+ }
+
+ return 0;
+}
+
+static int ts73xx_fpga_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct ts73xx_fpga_priv *priv = mgr->priv;
+ u8 reg;
+
+ usleep_range(1000, 2000);
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ reg |= TS73XX_FPGA_CONFIG_LOAD;
+ writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+
+ usleep_range(1000, 2000);
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ reg &= ~TS73XX_FPGA_CONFIG_LOAD;
+ writeb(reg, priv->io_base + TS73XX_FPGA_CONFIG_REG);
+
+ reg = readb(priv->io_base + TS73XX_FPGA_CONFIG_REG);
+ if ((reg & TS73XX_FPGA_LOAD_OK) != TS73XX_FPGA_LOAD_OK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct fpga_manager_ops ts73xx_fpga_ops = {
+ .state = ts73xx_fpga_state,
+ .write_init = ts73xx_fpga_write_init,
+ .write = ts73xx_fpga_write,
+ .write_complete = ts73xx_fpga_write_complete,
+};
+
+static int ts73xx_fpga_probe(struct platform_device *pdev)
+{
+ struct device *kdev = &pdev->dev;
+ struct ts73xx_fpga_priv *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = kdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->io_base = devm_ioremap_resource(kdev, res);
+ if (IS_ERR(priv->io_base)) {
+ dev_err(kdev, "unable to remap registers\n");
+ return PTR_ERR(priv->io_base);
+ }
+
+ return fpga_mgr_register(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
+}
+
+static int ts73xx_fpga_remove(struct platform_device *pdev)
+{
+ fpga_mgr_unregister(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver ts73xx_fpga_driver = {
+ .driver = {
+ .name = "ts73xx-fpga-mgr",
+ },
+ .probe = ts73xx_fpga_probe,
+ .remove = ts73xx_fpga_remove,
+};
+module_platform_driver(ts73xx_fpga_driver);
+
+MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>");
+MODULE_DESCRIPTION("TS-73xx FPGA Manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
new file mode 100644
index 000000000000..e359930bebc8
--- /dev/null
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2017, National Instruments Corp.
+ * Copyright (c) 2017, Xilix Inc
+ *
+ * FPGA Bridge Driver for the Xilinx LogiCORE Partial Reconfiguration
+ * Decoupler IP Core.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define CTRL_CMD_DECOUPLE BIT(0)
+#define CTRL_CMD_COUPLE 0
+#define CTRL_OFFSET 0
+
+struct xlnx_pr_decoupler_data {
+ void __iomem *io_base;
+ struct clk *clk;
+};
+
+static inline void xlnx_pr_decoupler_write(struct xlnx_pr_decoupler_data *d,
+ u32 offset, u32 val)
+{
+ writel(val, d->io_base + offset);
+}
+
+static inline u32 xlnx_pr_decouple_read(const struct xlnx_pr_decoupler_data *d,
+ u32 offset)
+{
+ return readl(d->io_base + offset);
+}
+
+static int xlnx_pr_decoupler_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ int err;
+ struct xlnx_pr_decoupler_data *priv = bridge->priv;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ if (enable)
+ xlnx_pr_decoupler_write(priv, CTRL_OFFSET, CTRL_CMD_COUPLE);
+ else
+ xlnx_pr_decoupler_write(priv, CTRL_OFFSET, CTRL_CMD_DECOUPLE);
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
+{
+ const struct xlnx_pr_decoupler_data *priv = bridge->priv;
+ u32 status;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err)
+ return err;
+
+ status = readl(priv->io_base);
+
+ clk_disable(priv->clk);
+
+ return !status;
+}
+
+static struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
+ .enable_set = xlnx_pr_decoupler_enable_set,
+ .enable_show = xlnx_pr_decoupler_enable_show,
+};
+
+static const struct of_device_id xlnx_pr_decoupler_of_match[] = {
+ { .compatible = "xlnx,pr-decoupler-1.00", },
+ { .compatible = "xlnx,pr-decoupler", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match);
+
+static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
+{
+ struct xlnx_pr_decoupler_data *priv;
+ int err;
+ struct resource *res;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ priv->clk = devm_clk_get(&pdev->dev, "aclk");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "input clock not found\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ return err;
+ }
+
+ clk_disable(priv->clk);
+
+ err = fpga_bridge_register(&pdev->dev, "Xilinx PR Decoupler",
+ &xlnx_pr_decoupler_br_ops, priv);
+
+ if (err) {
+ dev_err(&pdev->dev, "unable to register Xilinx PR Decoupler");
+ clk_unprepare(priv->clk);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xlnx_pr_decoupler_remove(struct platform_device *pdev)
+{
+ struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+ struct xlnx_pr_decoupler_data *p = bridge->priv;
+
+ fpga_bridge_unregister(&pdev->dev);
+
+ clk_unprepare(p->clk);
+
+ return 0;
+}
+
+static struct platform_driver xlnx_pr_decoupler_driver = {
+ .probe = xlnx_pr_decoupler_probe,
+ .remove = xlnx_pr_decoupler_remove,
+ .driver = {
+ .name = "xlnx_pr_decoupler",
+ .of_match_table = of_match_ptr(xlnx_pr_decoupler_of_match),
+ },
+};
+
+module_platform_driver(xlnx_pr_decoupler_driver);
+
+MODULE_DESCRIPTION("Xilinx Partial Reconfiguration Decoupler");
+MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
new file mode 100644
index 000000000000..9b62a4c2a3df
--- /dev/null
+++ b/drivers/fpga/xilinx-spi.c
@@ -0,0 +1,198 @@
+/*
+ * Xilinx Spartan6 Slave Serial SPI Driver
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Manage Xilinx FPGA firmware that is loaded over SPI using
+ * the slave serial configuration interface.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+struct xilinx_spi_conf {
+ struct spi_device *spi;
+ struct gpio_desc *prog_b;
+ struct gpio_desc *done;
+};
+
+static enum fpga_mgr_states xilinx_spi_state(struct fpga_manager *mgr)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+
+ if (!gpiod_get_value(conf->done))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static int xilinx_spi_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ const size_t prog_latency_7500us = 7500;
+ const size_t prog_pulse_1us = 1;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value(conf->prog_b, 1);
+
+ udelay(prog_pulse_1us); /* min is 500 ns */
+
+ gpiod_set_value(conf->prog_b, 0);
+
+ if (gpiod_get_value(conf->done)) {
+ dev_err(&mgr->dev, "Unexpected DONE pin state...\n");
+ return -EIO;
+ }
+
+ /* program latency */
+ usleep_range(prog_latency_7500us, prog_latency_7500us + 100);
+ return 0;
+}
+
+static int xilinx_spi_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ size_t remaining, stride;
+ int ret;
+
+ remaining = fw_data_end - fw_data;
+ stride = min_t(size_t, remaining, SZ_4K);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "SPI error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int xilinx_spi_apply_cclk_cycles(struct xilinx_spi_conf *conf)
+{
+ struct spi_device *spi = conf->spi;
+ const u8 din_data[1] = { 0xff };
+ int ret;
+
+ ret = spi_write(conf->spi, din_data, sizeof(din_data));
+ if (ret)
+ dev_err(&spi->dev, "applying CCLK cycles failed: %d\n", ret);
+
+ return ret;
+}
+
+static int xilinx_spi_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct xilinx_spi_conf *conf = mgr->priv;
+ unsigned long timeout;
+ int ret;
+
+ if (gpiod_get_value(conf->done))
+ return xilinx_spi_apply_cclk_cycles(conf);
+
+ timeout = jiffies + usecs_to_jiffies(info->config_complete_timeout_us);
+
+ while (time_before(jiffies, timeout)) {
+
+ ret = xilinx_spi_apply_cclk_cycles(conf);
+ if (ret)
+ return ret;
+
+ if (gpiod_get_value(conf->done))
+ return xilinx_spi_apply_cclk_cycles(conf);
+ }
+
+ dev_err(&mgr->dev, "Timeout after config data transfer.\n");
+ return -ETIMEDOUT;
+}
+
+static const struct fpga_manager_ops xilinx_spi_ops = {
+ .state = xilinx_spi_state,
+ .write_init = xilinx_spi_write_init,
+ .write = xilinx_spi_write,
+ .write_complete = xilinx_spi_write_complete,
+};
+
+static int xilinx_spi_probe(struct spi_device *spi)
+{
+ struct xilinx_spi_conf *conf;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ conf->spi = spi;
+
+ /* PROGRAM_B is active low */
+ conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
+ if (IS_ERR(conf->prog_b)) {
+ dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
+ PTR_ERR(conf->prog_b));
+ return PTR_ERR(conf->prog_b);
+ }
+
+ conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
+ if (IS_ERR(conf->done)) {
+ dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
+ PTR_ERR(conf->done));
+ return PTR_ERR(conf->done);
+ }
+
+ return fpga_mgr_register(&spi->dev, "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
+}
+
+static int xilinx_spi_remove(struct spi_device *spi)
+{
+ fpga_mgr_unregister(&spi->dev);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_spi_of_match[] = {
+ { .compatible = "xlnx,fpga-slave-serial", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnx_spi_of_match);
+
+static struct spi_driver xilinx_slave_spi_driver = {
+ .driver = {
+ .name = "xlnx-slave-spi",
+ .of_match_table = of_match_ptr(xlnx_spi_of_match),
+ },
+ .probe = xilinx_spi_probe,
+ .remove = xilinx_spi_remove,
+};
+
+module_spi_driver(xilinx_slave_spi_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Load Xilinx FPGA firmware over SPI");
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 34cb98139442..70b15b303471 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -72,6 +72,10 @@
#define CTRL_PCAP_PR_MASK BIT(27)
/* Enable PCAP */
#define CTRL_PCAP_MODE_MASK BIT(26)
+/* Lower rate to allow decrypt on the fly */
+#define CTRL_PCAP_RATE_EN_MASK BIT(25)
+/* System booted in secure mode */
+#define CTRL_SEC_EN_MASK BIT(7)
/* Miscellaneous Control Register bit definitions */
/* Internal PCAP loopback */
@@ -266,6 +270,17 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
if (err)
return err;
+ /* check if bitstream is encrypted & and system's still secure */
+ if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
+ ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
+ if (!(ctrl & CTRL_SEC_EN_MASK)) {
+ dev_err(&mgr->dev,
+ "System not secure, can't use crypted bitstreams\n");
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+
/* don't globally reset PL if we're doing partial reconfig */
if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
if (!zynq_fpga_has_sync(buf, count)) {
@@ -337,12 +352,19 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
/* set configuration register with following options:
* - enable PCAP interface
- * - set throughput for maximum speed
+ * - set throughput for maximum speed (if bistream not crypted)
* - set CPU in user mode
*/
ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
- zynq_fpga_write(priv, CTRL_OFFSET,
- (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
+ if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
+ | CTRL_PCAP_RATE_EN_MASK | ctrl));
+ else
+ zynq_fpga_write(priv, CTRL_OFFSET,
+ (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
+ | ctrl));
+
/* We expect that the command queue is empty right now. */
status = zynq_fpga_read(priv, STATUS_OFFSET);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 9b1bcb4d0df7..23ca51ee6b28 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -204,14 +204,15 @@ config GPIO_GE_FPGA
and write pin state) for GPIO implemented in a number of GE single
board computers.
-config GPIO_GEMINI
- bool "Gemini GPIO"
- depends on ARCH_GEMINI
+config GPIO_FTGPIO010
+ bool "Faraday FTGPIO010 GPIO"
depends on OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
+ default (ARCH_GEMINI || ARCH_MOXART)
help
- Support for common GPIOs found in Cortina systems Gemini platforms.
+ Support for common GPIOs from the Faraday FTGPIO010 IP core, found in
+ Cortina systems Gemini platforms, Moxa ART and others.
config GPIO_GENERIC_PLATFORM
tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
@@ -308,14 +309,6 @@ config GPIO_MOCKUP
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
-config GPIO_MOXART
- bool "MOXART GPIO support"
- depends on ARCH_MOXART || COMPILE_TEST
- select GPIO_GENERIC
- help
- Select this option to enable GPIO driver for
- MOXA ART SoC devices.
-
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -387,6 +380,12 @@ config GPIO_RCAR
help
Say yes here to support GPIO on Renesas R-Car SoCs.
+config GPIO_REG
+ bool
+ help
+ A 32-bit single register GPIO fixed in/out implementation. This
+ can be used to represent any register as a set of GPIO signals.
+
config GPIO_SPEAR_SPICS
bool "ST SPEAr13xx SPI Chip Select as GPIO support"
depends on PLAT_SPEAR
@@ -505,7 +504,7 @@ config GPIO_XILINX
config GPIO_XLP
tristate "Netlogic XLP GPIO support"
- depends on OF_GPIO && (CPU_XLP || ARCH_VULCAN || COMPILE_TEST)
+ depends on OF_GPIO && (CPU_XLP || ARCH_VULCAN || ARCH_THUNDER2 || COMPILE_TEST)
select GPIOLIB_IRQCHIP
help
This driver provides support for GPIO interface on Netlogic XLP MIPS64
@@ -557,7 +556,7 @@ menu "Port-mapped I/O GPIO drivers"
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
- depends on ISA_BUS_API
+ depends on PC104 && ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -567,7 +566,7 @@ config GPIO_104_DIO_48E
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
- depends on ISA_BUS_API
+ depends on PC104 && ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -578,7 +577,7 @@ config GPIO_104_IDIO_16
config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
- depends on ISA_BUS_API
+ depends on PC104 && ISA_BUS_API
select GPIOLIB_IRQCHIP
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -598,7 +597,7 @@ config GPIO_F7188X
config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
- depends on ISA_BUS_API
+ depends on PC104 && ISA_BUS_API
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
@@ -753,7 +752,7 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, xra1202
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
tca6416
@@ -845,6 +844,17 @@ config GPIO_ARIZONA
help
Support for GPIOs on Wolfson Arizona class devices.
+config GPIO_BD9571MWV
+ tristate "ROHM BD9571 GPIO support"
+ depends on MFD_BD9571MWV
+ help
+ Support for GPIOs on ROHM BD9571 PMIC. There are two GPIOs
+ available on the ROHM PMIC in total, both of which can also
+ generate interrupts.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-bd9571mwv.
+
config GPIO_CRYSTAL_COVE
tristate "GPIO support for Crystal Cove PMIC"
depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index becb96c724fe..68b96277d9fa 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
+obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
@@ -48,8 +49,8 @@ obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_ETRAXFS) += gpio-etraxfs.o
obj-$(CONFIG_GPIO_EXAR) += gpio-exar.o
obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o
+obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
-obj-$(CONFIG_GPIO_GEMINI) += gpio-gemini.o
obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
@@ -80,7 +81,6 @@ obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
-obj-$(CONFIG_GPIO_MOXART) += gpio-moxart.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
@@ -99,6 +99,7 @@ obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 7031eea165c9..a75511d1ea5d 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(devm_gpiod_get_index);
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
*
- * On successfull request the GPIO pin is configured in accordance with
+ * On successful request the GPIO pin is configured in accordance with
* provided @flags.
*/
struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 17bd2ab4ebe2..61b50c40b87b 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -55,7 +55,7 @@ struct dio48e_gpio {
unsigned char io_state[6];
unsigned char out_state[6];
unsigned char control[2];
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned base;
unsigned char irq_mask;
};
@@ -78,7 +78,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
unsigned control;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* Check if configuring Port C */
if (io_port == 2 || io_port == 5) {
@@ -103,7 +103,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
control &= ~BIT(7);
outb(control, control_addr);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
return 0;
}
@@ -120,7 +120,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
unsigned long flags;
unsigned control;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* Check if configuring Port C */
if (io_port == 2 || io_port == 5) {
@@ -153,7 +153,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
control &= ~BIT(7);
outb(control, control_addr);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
return 0;
}
@@ -167,17 +167,17 @@ static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
unsigned port_state;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* ensure that GPIO is set for input */
if (!(dio48egpio->io_state[port] & mask)) {
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
return -EINVAL;
}
port_state = inb(dio48egpio->base + in_port);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
return !!(port_state & mask);
}
@@ -190,7 +190,7 @@ static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
const unsigned out_port = (port > 2) ? port + 1 : port;
unsigned long flags;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
if (value)
dio48egpio->out_state[port] |= mask;
@@ -199,7 +199,7 @@ static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
@@ -225,14 +225,14 @@ static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
out_port = (port > 2) ? port + 1 : port;
bitmask = mask[BIT_WORD(i)] & bits[BIT_WORD(i)];
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
/* update output state data and set device gpio register */
dio48egpio->out_state[port] &= ~mask[BIT_WORD(i)];
dio48egpio->out_state[port] |= bitmask;
outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
/* prepare for next gpio register set */
mask[BIT_WORD(i)] >>= gpio_reg_size;
@@ -255,7 +255,7 @@ static void dio48e_irq_mask(struct irq_data *data)
if (offset != 19 && offset != 43)
return;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
if (offset == 19)
dio48egpio->irq_mask &= ~BIT(0);
@@ -266,7 +266,7 @@ static void dio48e_irq_mask(struct irq_data *data)
/* disable interrupts */
inb(dio48egpio->base + 0xB);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
static void dio48e_irq_unmask(struct irq_data *data)
@@ -280,7 +280,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
if (offset != 19 && offset != 43)
return;
- spin_lock_irqsave(&dio48egpio->lock, flags);
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
if (!dio48egpio->irq_mask) {
/* enable interrupts */
@@ -293,7 +293,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
else
dio48egpio->irq_mask |= BIT(1);
- spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
}
static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type)
@@ -329,11 +329,11 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
generic_handle_irq(irq_find_mapping(chip->irqdomain,
19 + gpio*24));
- spin_lock(&dio48egpio->lock);
+ raw_spin_lock(&dio48egpio->lock);
outb(0x00, dio48egpio->base + 0xF);
- spin_unlock(&dio48egpio->lock);
+ raw_spin_unlock(&dio48egpio->lock);
return IRQ_HANDLED;
}
@@ -388,7 +388,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
dio48egpio->base = base[id];
- spin_lock_init(&dio48egpio->lock);
+ raw_spin_lock_init(&dio48egpio->lock);
err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
if (err) {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 568375a7ebc2..337c048168d8 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -51,7 +51,7 @@ MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
*/
struct idi_48_gpio {
struct gpio_chip chip;
- spinlock_t lock;
+ raw_spinlock_t lock;
spinlock_t ack_lock;
unsigned char irq_mask[6];
unsigned base;
@@ -112,11 +112,12 @@ static void idi_48_irq_mask(struct irq_data *data)
if (!idi48gpio->irq_mask[boundary]) {
idi48gpio->cos_enb &= ~BIT(boundary);
- spin_lock_irqsave(&idi48gpio->lock, flags);
+ raw_spin_lock_irqsave(&idi48gpio->lock, flags);
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock,
+ flags);
}
return;
@@ -145,11 +146,12 @@ static void idi_48_irq_unmask(struct irq_data *data)
if (!prev_irq_mask) {
idi48gpio->cos_enb |= BIT(boundary);
- spin_lock_irqsave(&idi48gpio->lock, flags);
+ raw_spin_lock_irqsave(&idi48gpio->lock, flags);
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock,
+ flags);
}
return;
@@ -186,11 +188,11 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
spin_lock(&idi48gpio->ack_lock);
- spin_lock(&idi48gpio->lock);
+ raw_spin_lock(&idi48gpio->lock);
cos_status = inb(idi48gpio->base + 7);
- spin_unlock(&idi48gpio->lock);
+ raw_spin_unlock(&idi48gpio->lock);
/* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
if (cos_status & BIT(6)) {
@@ -256,7 +258,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get = idi_48_gpio_get;
idi48gpio->base = base[id];
- spin_lock_init(&idi48gpio->lock);
+ raw_spin_lock_init(&idi48gpio->lock);
spin_lock_init(&idi48gpio->ack_lock);
err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 7053cf736648..5281e1cedb01 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
*/
struct idio_16_gpio {
struct gpio_chip chip;
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned long irq_mask;
unsigned base;
unsigned out_state;
@@ -99,7 +99,7 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (offset > 15)
return;
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
if (value)
idio16gpio->out_state |= mask;
@@ -111,7 +111,7 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
else
outb(idio16gpio->out_state, idio16gpio->base);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
@@ -120,7 +120,7 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
idio16gpio->out_state &= ~*mask;
idio16gpio->out_state |= *mask & *bits;
@@ -130,7 +130,7 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
if ((*mask >> 8) & 0xFF)
outb(idio16gpio->out_state >> 8, idio16gpio->base + 4);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_irq_ack(struct irq_data *data)
@@ -147,11 +147,11 @@ static void idio_16_irq_mask(struct irq_data *data)
idio16gpio->irq_mask &= ~mask;
if (!idio16gpio->irq_mask) {
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
outb(0, idio16gpio->base + 2);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
}
@@ -166,11 +166,11 @@ static void idio_16_irq_unmask(struct irq_data *data)
idio16gpio->irq_mask |= mask;
if (!prev_irq_mask) {
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
inb(idio16gpio->base + 2);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
}
@@ -201,11 +201,11 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
- spin_lock(&idio16gpio->lock);
+ raw_spin_lock(&idio16gpio->lock);
outb(0, idio16gpio->base + 1);
- spin_unlock(&idio16gpio->lock);
+ raw_spin_unlock(&idio16gpio->lock);
return IRQ_HANDLED;
}
@@ -249,7 +249,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->base = base[id];
idio16gpio->out_state = 0xFFFF;
- spin_lock_init(&idio16gpio->lock);
+ raw_spin_lock_init(&idio16gpio->lock);
err = devm_gpiochip_add_data(dev, &idio16gpio->chip, idio16gpio);
if (err) {
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 3fe6a21e05a5..17485dc20384 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -38,7 +38,7 @@
*/
struct altera_gpio_chip {
struct of_mm_gpio_chip mmchip;
- spinlock_t gpio_lock;
+ raw_spinlock_t gpio_lock;
int interrupt_trigger;
int mapped_irq;
};
@@ -53,12 +53,12 @@ static void altera_gpio_irq_unmask(struct irq_data *d)
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
mm_gc = &altera_gc->mmchip;
- spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Set ALTERA_GPIO_IRQ_MASK bit to unmask */
intmask |= BIT(irqd_to_hwirq(d));
writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
- spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
static void altera_gpio_irq_mask(struct irq_data *d)
@@ -71,12 +71,12 @@ static void altera_gpio_irq_mask(struct irq_data *d)
altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
mm_gc = &altera_gc->mmchip;
- spin_lock_irqsave(&altera_gc->gpio_lock, flags);
+ raw_spin_lock_irqsave(&altera_gc->gpio_lock, flags);
intmask = readl(mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
/* Clear ALTERA_GPIO_IRQ_MASK bit to mask */
intmask &= ~BIT(irqd_to_hwirq(d));
writel(intmask, mm_gc->regs + ALTERA_GPIO_IRQ_MASK);
- spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
}
/**
@@ -140,14 +140,14 @@ static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
if (value)
data_reg |= BIT(offset);
else
data_reg &= ~BIT(offset);
writel(data_reg, mm_gc->regs + ALTERA_GPIO_DATA);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -160,12 +160,12 @@ static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set pin as input, assumes software controlled IP */
gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr &= ~BIT(offset);
writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -181,7 +181,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc,
mm_gc = to_of_mm_gpio_chip(gc);
chip = gpiochip_get_data(gc);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Sets the GPIO value */
data_reg = readl(mm_gc->regs + ALTERA_GPIO_DATA);
if (value)
@@ -194,7 +194,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc,
gpio_ddr = readl(mm_gc->regs + ALTERA_GPIO_DIR);
gpio_ddr |= BIT(offset);
writel(gpio_ddr, mm_gc->regs + ALTERA_GPIO_DIR);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -262,7 +262,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
if (!altera_gc)
return -ENOMEM;
- spin_lock_init(&altera_gc->gpio_lock);
+ raw_spin_lock_init(&altera_gc->gpio_lock);
if (of_property_read_u32(node, "altr,ngpio", &reg))
/* By default assume maximum ngpio */
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 1f91557717a6..cd23fd727f95 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/mfd/arizona/core.h>
@@ -41,13 +42,38 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
- unsigned int val;
+ unsigned int reg, val;
int ret;
- ret = regmap_read(arizona->regmap, ARIZONA_GPIO1_CTRL + offset, &val);
+ reg = ARIZONA_GPIO1_CTRL + offset;
+ ret = regmap_read(arizona->regmap, reg, &val);
if (ret < 0)
return ret;
+ /* Resume to read actual registers for input pins */
+ if (val & ARIZONA_GPN_DIR) {
+ ret = pm_runtime_get_sync(chip->parent);
+ if (ret < 0) {
+ dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ return ret;
+ }
+
+ /* Register is cached, drop it to ensure a physical read */
+ ret = regcache_drop_region(arizona->regmap, reg, reg);
+ if (ret < 0) {
+ dev_err(chip->parent, "Failed to drop cache: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_read(arizona->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_mark_last_busy(chip->parent);
+ pm_runtime_put_autosuspend(chip->parent);
+ }
+
if (val & ARIZONA_GPN_LVL)
return 1;
else
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index fb16cc771c0d..ccea609676ee 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -9,14 +9,18 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <asm/div64.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
struct aspeed_bank_props {
unsigned int bank;
@@ -29,59 +33,85 @@ struct aspeed_gpio_config {
const struct aspeed_bank_props *props;
};
+/*
+ * @offset_timer: Maps an offset to an @timer_users index, or zero if disabled
+ * @timer_users: Tracks the number of users for each timer
+ *
+ * The @timer_users has four elements but the first element is unused. This is
+ * to simplify accounting and indexing, as a zero value in @offset_timer
+ * represents disabled debouncing for the GPIO. Any other value for an element
+ * of @offset_timer is used as an index into @timer_users. This behaviour of
+ * the zero value aligns with the behaviour of zero built from the timer
+ * configuration registers (i.e. debouncing is disabled).
+ */
struct aspeed_gpio {
struct gpio_chip chip;
spinlock_t lock;
void __iomem *base;
int irq;
const struct aspeed_gpio_config *config;
+
+ u8 *offset_timer;
+ unsigned int timer_users[4];
+ struct clk *clk;
};
struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
+ uint16_t debounce_regs;
const char names[4][3];
};
+static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 };
+
static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
{
.val_regs = 0x0000,
.irq_regs = 0x0008,
+ .debounce_regs = 0x0040,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
+ .debounce_regs = 0x0048,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
+ .debounce_regs = 0x00b0,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
+ .debounce_regs = 0x0100,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
+ .debounce_regs = 0x0130,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
+ .debounce_regs = 0x0160,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
.irq_regs = 0x0178,
+ .debounce_regs = 0x0190,
.names = { "Y", "Z", "AA", "AB" },
},
{
.val_regs = 0x01E8,
.irq_regs = 0x01A8,
+ .debounce_regs = 0x01c0,
.names = { "AC", "", "", "" },
},
};
@@ -99,6 +129,13 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
#define GPIO_IRQ_TYPE2 0x0c
#define GPIO_IRQ_STATUS 0x10
+#define GPIO_DEBOUNCE_SEL1 0x00
+#define GPIO_DEBOUNCE_SEL2 0x04
+
+#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o))
+#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1)
+#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0)
+
static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
@@ -144,6 +181,7 @@ static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset)
}
#define have_irq(g, o) have_input((g), (o))
+#define have_debounce(g, o) have_input((g), (o))
static inline bool have_output(struct aspeed_gpio *gpio, unsigned int offset)
{
@@ -506,6 +544,231 @@ static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
pinctrl_free_gpio(chip->base + offset);
}
+static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
+ const struct aspeed_gpio_bank *bank,
+ unsigned int reg)
+{
+ return gpio->base + bank->debounce_regs + reg;
+}
+
+static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
+ u32 *cycles)
+{
+ u64 rate;
+ u64 n;
+ u32 r;
+
+ rate = clk_get_rate(gpio->clk);
+ if (!rate)
+ return -ENOTSUPP;
+
+ n = rate * usecs;
+ r = do_div(n, 1000000);
+
+ if (n >= U32_MAX)
+ return -ERANGE;
+
+ /* At least as long as the requested time */
+ *cycles = n + (!!r);
+
+ return 0;
+}
+
+/* Call under gpio->lock */
+static int register_allocated_timer(struct aspeed_gpio *gpio,
+ unsigned int offset, unsigned int timer)
+{
+ if (WARN(gpio->offset_timer[offset] != 0,
+ "Offset %d already allocated timer %d\n",
+ offset, gpio->offset_timer[offset]))
+ return -EINVAL;
+
+ if (WARN(gpio->timer_users[timer] == UINT_MAX,
+ "Timer user count would overflow\n"))
+ return -EPERM;
+
+ gpio->offset_timer[offset] = timer;
+ gpio->timer_users[timer]++;
+
+ return 0;
+}
+
+/* Call under gpio->lock */
+static int unregister_allocated_timer(struct aspeed_gpio *gpio,
+ unsigned int offset)
+{
+ if (WARN(gpio->offset_timer[offset] == 0,
+ "No timer allocated to offset %d\n", offset))
+ return -EINVAL;
+
+ if (WARN(gpio->timer_users[gpio->offset_timer[offset]] == 0,
+ "No users recorded for timer %d\n",
+ gpio->offset_timer[offset]))
+ return -EINVAL;
+
+ gpio->timer_users[gpio->offset_timer[offset]]--;
+ gpio->offset_timer[offset] = 0;
+
+ return 0;
+}
+
+/* Call under gpio->lock */
+static inline bool timer_allocation_registered(struct aspeed_gpio *gpio,
+ unsigned int offset)
+{
+ return gpio->offset_timer[offset] > 0;
+}
+
+/* Call under gpio->lock */
+static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset,
+ unsigned int timer)
+{
+ const struct aspeed_gpio_bank *bank = to_bank(offset);
+ const u32 mask = GPIO_BIT(offset);
+ void __iomem *addr;
+ u32 val;
+
+ addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL1);
+ val = ioread32(addr);
+ iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr);
+
+ addr = bank_debounce_reg(gpio, bank, GPIO_DEBOUNCE_SEL2);
+ val = ioread32(addr);
+ iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr);
+}
+
+static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned long usecs)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ u32 requested_cycles;
+ unsigned long flags;
+ int rc;
+ int i;
+
+ rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
+ if (rc < 0) {
+ dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
+ usecs, clk_get_rate(gpio->clk), rc);
+ return rc;
+ }
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ if (timer_allocation_registered(gpio, offset)) {
+ rc = unregister_allocated_timer(gpio, offset);
+ if (rc < 0)
+ goto out;
+ }
+
+ /* Try to find a timer already configured for the debounce period */
+ for (i = 1; i < ARRAY_SIZE(debounce_timers); i++) {
+ u32 cycles;
+
+ cycles = ioread32(gpio->base + debounce_timers[i]);
+ if (requested_cycles == cycles)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(debounce_timers)) {
+ int j;
+
+ /*
+ * As there are no timers configured for the requested debounce
+ * period, find an unused timer instead
+ */
+ for (j = 1; j < ARRAY_SIZE(gpio->timer_users); j++) {
+ if (gpio->timer_users[j] == 0)
+ break;
+ }
+
+ if (j == ARRAY_SIZE(gpio->timer_users)) {
+ dev_warn(chip->parent,
+ "Debounce timers exhausted, cannot debounce for period %luus\n",
+ usecs);
+
+ rc = -EPERM;
+
+ /*
+ * We already adjusted the accounting to remove @offset
+ * as a user of its previous timer, so also configure
+ * the hardware so @offset has timers disabled for
+ * consistency.
+ */
+ configure_timer(gpio, offset, 0);
+ goto out;
+ }
+
+ i = j;
+
+ iowrite32(requested_cycles, gpio->base + debounce_timers[i]);
+ }
+
+ if (WARN(i == 0, "Cannot register index of disabled timer\n")) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ register_allocated_timer(gpio, offset, i);
+ configure_timer(gpio, offset, i);
+
+out:
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return rc;
+}
+
+static int disable_debounce(struct gpio_chip *chip, unsigned int offset)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&gpio->lock, flags);
+
+ rc = unregister_allocated_timer(gpio, offset);
+ if (!rc)
+ configure_timer(gpio, offset, 0);
+
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return rc;
+}
+
+static int set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned long usecs)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+
+ if (!have_debounce(gpio, offset))
+ return -ENOTSUPP;
+
+ if (usecs)
+ return enable_debounce(chip, offset, usecs);
+
+ return disable_debounce(chip, offset);
+}
+
+static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ unsigned long param = pinconf_to_config_param(config);
+ u32 arg = pinconf_to_config_argument(config);
+
+ if (param == PIN_CONFIG_INPUT_DEBOUNCE)
+ return set_debounce(chip, offset, arg);
+ else if (param == PIN_CONFIG_BIAS_DISABLE ||
+ param == PIN_CONFIG_BIAS_PULL_DOWN ||
+ param == PIN_CONFIG_DRIVE_STRENGTH)
+ return pinctrl_gpio_set_config(offset, config);
+ else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
+ param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
+ /* Return -ENOTSUPP to trigger emulation, as per datasheet */
+ return -ENOTSUPP;
+
+ return -ENOTSUPP;
+}
+
/*
* Any banks not specified in a struct aspeed_bank_props array are assumed to
* have the properties:
@@ -565,8 +828,16 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (!gpio_id)
return -EINVAL;
+ gpio->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(gpio->clk)) {
+ dev_warn(&pdev->dev,
+ "No HPLL clock phandle provided, debouncing disabled\n");
+ gpio->clk = NULL;
+ }
+
gpio->config = gpio_id->data;
+ gpio->chip.parent = &pdev->dev;
gpio->chip.ngpio = gpio->config->nr_gpios;
gpio->chip.parent = &pdev->dev;
gpio->chip.direction_input = aspeed_gpio_dir_in;
@@ -576,6 +847,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
gpio->chip.set = aspeed_gpio_set;
+ gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
gpio->chip.irq_need_valid_mask = true;
@@ -584,6 +856,9 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
if (rc < 0)
return rc;
+ gpio->offset_timer =
+ devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+
return aspeed_gpio_setup_irqs(gpio, pdev);
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index dc37dbe4b46d..f33d4a5fe671 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -32,7 +32,7 @@
struct ath79_gpio_ctrl {
struct gpio_chip gc;
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned long both_edges;
};
@@ -74,9 +74,9 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_mask(struct irq_data *data)
@@ -85,9 +85,9 @@ static void ath79_gpio_irq_mask(struct irq_data *data)
u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_enable(struct irq_data *data)
@@ -96,10 +96,10 @@ static void ath79_gpio_irq_enable(struct irq_data *data)
u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_disable(struct irq_data *data)
@@ -108,10 +108,10 @@ static void ath79_gpio_irq_disable(struct irq_data *data)
u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int ath79_gpio_irq_set_type(struct irq_data *data,
@@ -140,7 +140,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return -EINVAL;
}
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
ctrl->both_edges |= mask;
@@ -165,7 +165,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
ath79_gpio_update_bits(
ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
@@ -191,7 +191,7 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock_irqsave(&ctrl->lock, flags);
+ raw_spin_lock_irqsave(&ctrl->lock, flags);
pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
@@ -203,7 +203,7 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
both_edges, ~state);
}
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&ctrl->lock, flags);
if (pending) {
for_each_set_bit(irq, &pending, gc->ngpio)
@@ -262,7 +262,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (!ctrl->base)
return -ENOMEM;
- spin_lock_init(&ctrl->lock);
+ raw_spin_lock_init(&ctrl->lock);
err = bgpio_init(&ctrl->gc, &pdev->dev, 4,
ctrl->base + AR71XX_GPIO_REG_IN,
ctrl->base + AR71XX_GPIO_REG_SET,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 41d0ac142580..dfcf56ee3c61 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -67,7 +67,7 @@
struct bcm_kona_gpio {
void __iomem *reg_base;
int num_bank;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct gpio_chip gpio_chip;
struct irq_domain *irq_domain;
struct bcm_kona_gpio_bank *banks;
@@ -95,13 +95,13 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val |= BIT(gpio);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
@@ -111,13 +111,13 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val &= ~BIT(gpio);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@@ -141,7 +141,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
@@ -154,7 +154,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
writel(val, reg_base + reg_offset);
out:
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -168,7 +168,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
@@ -178,7 +178,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
/* read the GPIO bank status */
val = readl(reg_base + reg_offset);
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
/* return the specified bit status */
return !!(val & BIT(bit));
@@ -208,14 +208,14 @@ static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
}
@@ -232,7 +232,7 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
@@ -244,7 +244,7 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
val |= BIT(bit);
writel(val, reg_base + reg_offset);
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
}
@@ -288,7 +288,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
}
/* spin lock for read-modify-write of the GPIO register */
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_DBR_MASK;
@@ -303,7 +303,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
writel(val, reg_base + GPIO_CONTROL(gpio));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
}
@@ -347,13 +347,13 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_STATUS(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_STATUS(bank_id));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_mask(struct irq_data *d)
@@ -368,13 +368,13 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
@@ -389,13 +389,13 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -431,14 +431,14 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- spin_lock_irqsave(&kona_gpio->lock, flags);
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_ITR_MASK;
val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
return 0;
}
@@ -655,7 +655,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank);
}
- spin_lock_init(&kona_gpio->lock);
+ raw_spin_lock_init(&kona_gpio->lock);
return 0;
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
new file mode 100644
index 000000000000..5224a946e8ab
--- /dev/null
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -0,0 +1,144 @@
+/*
+ * ROHM BD9571MWV-M GPIO driver
+ *
+ * Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65086 driver
+ *
+ * NOTE: Interrupts are not supported yet.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/bd9571mwv.h>
+
+struct bd9571mwv_gpio {
+ struct gpio_chip chip;
+ struct bd9571mwv *bd;
+};
+
+static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_DIR, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(offset);
+}
+
+static int bd9571mwv_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
+ BIT(offset), 0);
+
+ return 0;
+}
+
+static int bd9571mwv_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Set the initial value */
+ regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
+ regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_DIR,
+ BIT(offset), BIT(offset));
+
+ return 0;
+}
+
+static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_IN, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(offset);
+}
+
+static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->bd->regmap, BD9571MWV_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
+}
+
+static const struct gpio_chip template_chip = {
+ .label = "bd9571mwv-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = bd9571mwv_gpio_get_direction,
+ .direction_input = bd9571mwv_gpio_direction_input,
+ .direction_output = bd9571mwv_gpio_direction_output,
+ .get = bd9571mwv_gpio_get,
+ .set = bd9571mwv_gpio_set,
+ .base = -1,
+ .ngpio = 2,
+ .can_sleep = true,
+};
+
+static int bd9571mwv_gpio_probe(struct platform_device *pdev)
+{
+ struct bd9571mwv_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpio->bd = dev_get_drvdata(pdev->dev.parent);
+ gpio->chip = template_chip;
+ gpio->chip.parent = gpio->bd->dev;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id bd9571mwv_gpio_id_table[] = {
+ { "bd9571mwv-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bd9571mwv_gpio_id_table);
+
+static struct platform_driver bd9571mwv_gpio_driver = {
+ .driver = {
+ .name = "bd9571mwv-gpio",
+ },
+ .probe = bd9571mwv_gpio_probe,
+ .id_table = bd9571mwv_gpio_id_table,
+};
+module_platform_driver(bd9571mwv_gpio_driver);
+
+MODULE_AUTHOR("Marek Vasut <marek.vasut+renesas@gmail.com>");
+MODULE_DESCRIPTION("BD9571MWV GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 72f49d1e110d..ac173575d3f6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -483,7 +483,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
clk_prepare_enable(clk);
if (!pdata->gpio_unbanked) {
- irq = irq_alloc_descs(-1, 0, ngpio, 0);
+ irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0);
if (irq < 0) {
dev_err(dev, "Couldn't allocate IRQ numbers\n");
return irq;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 9c15ee4ef4e9..f051c4552af5 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -55,6 +56,14 @@
#define GPIO_SWPORT_DR_SIZE (GPIO_SWPORTB_DR - GPIO_SWPORTA_DR)
#define GPIO_SWPORT_DDR_SIZE (GPIO_SWPORTB_DDR - GPIO_SWPORTA_DDR)
+#define GPIO_REG_OFFSET_V2 1
+
+#define GPIO_INTMASK_V2 0x44
+#define GPIO_INTTYPE_LEVEL_V2 0x34
+#define GPIO_INT_POLARITY_V2 0x38
+#define GPIO_INTSTATUS_V2 0x3c
+#define GPIO_PORTA_EOI_V2 0x40
+
struct dwapb_gpio;
#ifdef CONFIG_PM_SLEEP
@@ -87,14 +96,41 @@ struct dwapb_gpio {
struct dwapb_gpio_port *ports;
unsigned int nr_ports;
struct irq_domain *domain;
+ unsigned int flags;
};
+static inline u32 gpio_reg_v2_convert(unsigned int offset)
+{
+ switch (offset) {
+ case GPIO_INTMASK:
+ return GPIO_INTMASK_V2;
+ case GPIO_INTTYPE_LEVEL:
+ return GPIO_INTTYPE_LEVEL_V2;
+ case GPIO_INT_POLARITY:
+ return GPIO_INT_POLARITY_V2;
+ case GPIO_INTSTATUS:
+ return GPIO_INTSTATUS_V2;
+ case GPIO_PORTA_EOI:
+ return GPIO_PORTA_EOI_V2;
+ }
+
+ return offset;
+}
+
+static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
+{
+ if (gpio->flags & GPIO_REG_OFFSET_V2)
+ return gpio_reg_v2_convert(offset);
+
+ return offset;
+}
+
static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset)
{
struct gpio_chip *gc = &gpio->ports[0].gc;
void __iomem *reg_base = gpio->regs;
- return gc->read_reg(reg_base + offset);
+ return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset));
}
static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
@@ -103,7 +139,7 @@ static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
struct gpio_chip *gc = &gpio->ports[0].gc;
void __iomem *reg_base = gpio->regs;
- gc->write_reg(reg_base + offset, val);
+ gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
}
static int dwapb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -128,7 +164,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- u32 irq_status = readl_relaxed(gpio->regs + GPIO_INTSTATUS);
+ u32 irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
u32 ret = irq_status;
while (irq_status) {
@@ -348,8 +384,8 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_disable = dwapb_irq_disable;
ct->chip.irq_request_resources = dwapb_irq_reqres;
ct->chip.irq_release_resources = dwapb_irq_relres;
- ct->regs.ack = GPIO_PORTA_EOI;
- ct->regs.mask = GPIO_INTMASK;
+ ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI);
+ ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK);
ct->type = IRQ_TYPE_LEVEL_MASK;
}
@@ -532,6 +568,21 @@ dwapb_gpio_get_pdata(struct device *dev)
return pdata;
}
+static const struct of_device_id dwapb_of_match[] = {
+ { .compatible = "snps,dw-apb-gpio", .data = (void *)0},
+ { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dwapb_of_match);
+
+static const struct acpi_device_id dwapb_acpi_match[] = {
+ {"HISI0181", 0},
+ {"APMC0D07", 0},
+ {"APMC0D81", GPIO_REG_OFFSET_V2},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
+
static int dwapb_gpio_probe(struct platform_device *pdev)
{
unsigned int i;
@@ -567,6 +618,25 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
+ gpio->flags = 0;
+ if (dev->of_node) {
+ const struct of_device_id *of_devid;
+
+ of_devid = of_match_device(dwapb_of_match, dev);
+ if (of_devid) {
+ if (of_devid->data)
+ gpio->flags = (uintptr_t)of_devid->data;
+ }
+ } else if (has_acpi_companion(dev)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(dwapb_acpi_match, dev);
+ if (acpi_id) {
+ if (acpi_id->driver_data)
+ gpio->flags = acpi_id->driver_data;
+ }
+ }
+
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
if (err)
@@ -593,19 +663,6 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id dwapb_of_match[] = {
- { .compatible = "snps,dw-apb-gpio" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, dwapb_of_match);
-
-static const struct acpi_device_id dwapb_acpi_match[] = {
- {"HISI0181", 0},
- {"APMC0D07", 0},
- { }
-};
-MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
-
#ifdef CONFIG_PM_SLEEP
static int dwapb_gpio_suspend(struct device *dev)
{
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index a254d5b07b94..14c6aac26780 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -54,7 +54,7 @@
struct etraxfs_gpio_info;
struct etraxfs_gpio_block {
- spinlock_t lock;
+ raw_spinlock_t lock;
u32 mask;
u32 cfg;
u32 pins;
@@ -233,10 +233,10 @@ static void etraxfs_gpio_irq_mask(struct irq_data *d)
struct etraxfs_gpio_block *block = chip->block;
unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
- spin_lock(&block->lock);
+ raw_spin_lock(&block->lock);
block->mask &= ~BIT(grpirq);
writel(block->mask, block->regs + block->info->rw_intr_mask);
- spin_unlock(&block->lock);
+ raw_spin_unlock(&block->lock);
}
static void etraxfs_gpio_irq_unmask(struct irq_data *d)
@@ -246,10 +246,10 @@ static void etraxfs_gpio_irq_unmask(struct irq_data *d)
struct etraxfs_gpio_block *block = chip->block;
unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
- spin_lock(&block->lock);
+ raw_spin_lock(&block->lock);
block->mask |= BIT(grpirq);
writel(block->mask, block->regs + block->info->rw_intr_mask);
- spin_unlock(&block->lock);
+ raw_spin_unlock(&block->lock);
}
static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
@@ -280,11 +280,11 @@ static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- spin_lock(&block->lock);
+ raw_spin_lock(&block->lock);
block->cfg &= ~(0x7 << (grpirq * 3));
block->cfg |= (cfg << (grpirq * 3));
writel(block->cfg, block->regs + block->info->rw_intr_cfg);
- spin_unlock(&block->lock);
+ raw_spin_unlock(&block->lock);
return 0;
}
@@ -297,7 +297,7 @@ static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
int ret = -EBUSY;
- spin_lock(&block->lock);
+ raw_spin_lock(&block->lock);
if (block->group[grpirq])
goto out;
@@ -316,7 +316,7 @@ static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
}
out:
- spin_unlock(&block->lock);
+ raw_spin_unlock(&block->lock);
return ret;
}
@@ -327,10 +327,10 @@ static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
struct etraxfs_gpio_block *block = chip->block;
unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
- spin_lock(&block->lock);
+ raw_spin_lock(&block->lock);
block->group[grpirq] = 0;
gpiochip_unlock_as_irq(&chip->gc, d->hwirq);
- spin_unlock(&block->lock);
+ raw_spin_unlock(&block->lock);
}
static struct irq_chip etraxfs_gpio_irq_chip = {
@@ -391,7 +391,7 @@ static int etraxfs_gpio_probe(struct platform_device *pdev)
if (!block)
return -ENOMEM;
- spin_lock_init(&block->lock);
+ raw_spin_lock_init(&block->lock);
block->regs = regs;
block->info = info;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 05c8946d6446..081076771217 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -59,17 +59,6 @@ static int exar_set_direction(struct gpio_chip *chip, int direction,
return 0;
}
-static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- return exar_set_direction(chip, 0, offset);
-}
-
-static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- return exar_set_direction(chip, 1, offset);
-}
-
static int exar_get(struct gpio_chip *chip, unsigned int reg)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
@@ -116,6 +105,18 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
exar_update(chip, addr, value, offset % 8);
}
+static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ exar_set_value(chip, offset, value);
+ return exar_set_direction(chip, 0, offset);
+}
+
+static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return exar_set_direction(chip, 1, offset);
+}
+
static int gpio_exar_probe(struct platform_device *pdev)
{
struct pci_dev *pcidev = platform_get_drvdata(pdev);
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 56bd76c33767..13350c9d7f5e 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -37,14 +37,16 @@
#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
+#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
-enum chips { f71869, f71869a, f71882fg, f71889f, f81866 };
+enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866 };
static const char * const f7188x_names[] = {
"f71869",
"f71869a",
"f71882fg",
+ "f71889a",
"f71889f",
"f81866",
};
@@ -187,6 +189,17 @@ static struct f7188x_gpio_bank f71882_gpio_bank[] = {
F7188X_GPIO_BANK(40, 4, 0xB0),
};
+static struct f7188x_gpio_bank f71889a_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 7, 0xF0),
+ F7188X_GPIO_BANK(10, 7, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 5, 0xA0),
+ F7188X_GPIO_BANK(60, 8, 0x90),
+ F7188X_GPIO_BANK(70, 8, 0x80),
+};
+
static struct f7188x_gpio_bank f71889_gpio_bank[] = {
F7188X_GPIO_BANK(0, 7, 0xF0),
F7188X_GPIO_BANK(10, 7, 0xE0),
@@ -382,6 +395,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f71882_gpio_bank);
data->bank = f71882_gpio_bank;
break;
+ case f71889a:
+ data->nr_bank = ARRAY_SIZE(f71889a_gpio_bank);
+ data->bank = f71889a_gpio_bank;
+ break;
case f71889f:
data->nr_bank = ARRAY_SIZE(f71889_gpio_bank);
data->bank = f71889_gpio_bank;
@@ -443,6 +460,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F71882_ID:
sio->type = f71882fg;
break;
+ case SIO_F71889A_ID:
+ sio->type = f71889a;
+ break;
case SIO_F71889_ID:
sio->type = f71889f;
break;
@@ -538,6 +558,6 @@ static void __exit f7188x_gpio_exit(void)
}
module_exit(f7188x_gpio_exit);
-MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG, F71889F and F81866");
+MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG, F71889A, F71889F and F81866");
MODULE_AUTHOR("Simon Guinot <simon.guinot@sequanux.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
new file mode 100644
index 000000000000..e9386f8b67f5
--- /dev/null
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -0,0 +1,242 @@
+/*
+ * Faraday Technolog FTGPIO010 gpiochip and interrupt routines
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-gemini/gpio.c:
+ * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ *
+ * Based on plat-mxc/gpio.c:
+ * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ */
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+
+/* GPIO registers definition */
+#define GPIO_DATA_OUT 0x00
+#define GPIO_DATA_IN 0x04
+#define GPIO_DIR 0x08
+#define GPIO_DATA_SET 0x10
+#define GPIO_DATA_CLR 0x14
+#define GPIO_PULL_EN 0x18
+#define GPIO_PULL_TYPE 0x1C
+#define GPIO_INT_EN 0x20
+#define GPIO_INT_STAT 0x24
+#define GPIO_INT_MASK 0x2C
+#define GPIO_INT_CLR 0x30
+#define GPIO_INT_TYPE 0x34
+#define GPIO_INT_BOTH_EDGE 0x38
+#define GPIO_INT_LEVEL 0x3C
+#define GPIO_DEBOUNCE_EN 0x40
+#define GPIO_DEBOUNCE_PRESCALE 0x44
+
+/**
+ * struct ftgpio_gpio - Gemini GPIO state container
+ * @dev: containing device for this instance
+ * @gc: gpiochip for this instance
+ */
+struct ftgpio_gpio {
+ struct device *dev;
+ struct gpio_chip gc;
+ void __iomem *base;
+};
+
+static void ftgpio_gpio_ack_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+
+ writel(BIT(irqd_to_hwirq(d)), g->base + GPIO_INT_CLR);
+}
+
+static void ftgpio_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+ u32 val;
+
+ val = readl(g->base + GPIO_INT_EN);
+ val &= ~BIT(irqd_to_hwirq(d));
+ writel(val, g->base + GPIO_INT_EN);
+}
+
+static void ftgpio_gpio_unmask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+ u32 val;
+
+ val = readl(g->base + GPIO_INT_EN);
+ val |= BIT(irqd_to_hwirq(d));
+ writel(val, g->base + GPIO_INT_EN);
+}
+
+static int ftgpio_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+ u32 mask = BIT(irqd_to_hwirq(d));
+ u32 reg_both, reg_level, reg_type;
+
+ reg_type = readl(g->base + GPIO_INT_TYPE);
+ reg_level = readl(g->base + GPIO_INT_LEVEL);
+ reg_both = readl(g->base + GPIO_INT_BOTH_EDGE);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ reg_type &= ~mask;
+ reg_both |= mask;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ reg_type &= ~mask;
+ reg_both &= ~mask;
+ reg_level &= ~mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ reg_type &= ~mask;
+ reg_both &= ~mask;
+ reg_level |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ reg_type |= mask;
+ reg_level &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ reg_type |= mask;
+ reg_level |= mask;
+ break;
+ default:
+ irq_set_handler_locked(d, handle_bad_irq);
+ return -EINVAL;
+ }
+
+ writel(reg_type, g->base + GPIO_INT_TYPE);
+ writel(reg_level, g->base + GPIO_INT_LEVEL);
+ writel(reg_both, g->base + GPIO_INT_BOTH_EDGE);
+
+ ftgpio_gpio_ack_irq(d);
+
+ return 0;
+}
+
+static struct irq_chip ftgpio_gpio_irqchip = {
+ .name = "FTGPIO010",
+ .irq_ack = ftgpio_gpio_ack_irq,
+ .irq_mask = ftgpio_gpio_mask_irq,
+ .irq_unmask = ftgpio_gpio_unmask_irq,
+ .irq_set_type = ftgpio_gpio_set_irq_type,
+};
+
+static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct ftgpio_gpio *g = gpiochip_get_data(gc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ int offset;
+ unsigned long stat;
+
+ chained_irq_enter(irqchip, desc);
+
+ stat = readl(g->base + GPIO_INT_STAT);
+ if (stat)
+ for_each_set_bit(offset, &stat, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ offset));
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int ftgpio_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct ftgpio_gpio *g;
+ int irq;
+ int ret;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base))
+ return PTR_ERR(g->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq)
+ return -EINVAL;
+
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + GPIO_DATA_IN,
+ g->base + GPIO_DATA_SET,
+ g->base + GPIO_DATA_CLR,
+ g->base + GPIO_DIR,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.label = "FTGPIO010";
+ g->gc.base = -1;
+ g->gc.parent = dev;
+ g->gc.owner = THIS_MODULE;
+ /* ngpio is set by bgpio_init() */
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret)
+ return ret;
+
+ /* Disable, unmask and clear all interrupts */
+ writel(0x0, g->base + GPIO_INT_EN);
+ writel(0x0, g->base + GPIO_INT_MASK);
+ writel(~0x0, g->base + GPIO_INT_CLR);
+
+ ret = gpiochip_irqchip_add(&g->gc, &ftgpio_gpio_irqchip,
+ 0, handle_bad_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_info(dev, "could not add irqchip\n");
+ return ret;
+ }
+ gpiochip_set_chained_irqchip(&g->gc, &ftgpio_gpio_irqchip,
+ irq, ftgpio_gpio_irq_handler);
+
+ dev_info(dev, "FTGPIO010 @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ftgpio_gpio_of_match[] = {
+ {
+ .compatible = "cortina,gemini-gpio",
+ },
+ {
+ .compatible = "moxa,moxart-gpio",
+ },
+ {
+ .compatible = "faraday,ftgpio010",
+ },
+ {},
+};
+
+static struct platform_driver ftgpio_gpio_driver = {
+ .driver = {
+ .name = "ftgpio010-gpio",
+ .of_match_table = of_match_ptr(ftgpio_gpio_of_match),
+ },
+ .probe = ftgpio_gpio_probe,
+};
+builtin_platform_driver(ftgpio_gpio_driver);
diff --git a/drivers/gpio/gpio-gemini.c b/drivers/gpio/gpio-gemini.c
deleted file mode 100644
index 962485163b7f..000000000000
--- a/drivers/gpio/gpio-gemini.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Gemini gpiochip and interrupt routines
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- *
- * Based on arch/arm/mach-gemini/gpio.c:
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * Based on plat-mxc/gpio.c:
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- */
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/bitops.h>
-
-/* GPIO registers definition */
-#define GPIO_DATA_OUT 0x00
-#define GPIO_DATA_IN 0x04
-#define GPIO_DIR 0x08
-#define GPIO_DATA_SET 0x10
-#define GPIO_DATA_CLR 0x14
-#define GPIO_PULL_EN 0x18
-#define GPIO_PULL_TYPE 0x1C
-#define GPIO_INT_EN 0x20
-#define GPIO_INT_STAT 0x24
-#define GPIO_INT_MASK 0x2C
-#define GPIO_INT_CLR 0x30
-#define GPIO_INT_TYPE 0x34
-#define GPIO_INT_BOTH_EDGE 0x38
-#define GPIO_INT_LEVEL 0x3C
-#define GPIO_DEBOUNCE_EN 0x40
-#define GPIO_DEBOUNCE_PRESCALE 0x44
-
-/**
- * struct gemini_gpio - Gemini GPIO state container
- * @dev: containing device for this instance
- * @gc: gpiochip for this instance
- */
-struct gemini_gpio {
- struct device *dev;
- struct gpio_chip gc;
- void __iomem *base;
-};
-
-static void gemini_gpio_ack_irq(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gemini_gpio *g = gpiochip_get_data(gc);
-
- writel(BIT(irqd_to_hwirq(d)), g->base + GPIO_INT_CLR);
-}
-
-static void gemini_gpio_mask_irq(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gemini_gpio *g = gpiochip_get_data(gc);
- u32 val;
-
- val = readl(g->base + GPIO_INT_EN);
- val &= ~BIT(irqd_to_hwirq(d));
- writel(val, g->base + GPIO_INT_EN);
-}
-
-static void gemini_gpio_unmask_irq(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gemini_gpio *g = gpiochip_get_data(gc);
- u32 val;
-
- val = readl(g->base + GPIO_INT_EN);
- val |= BIT(irqd_to_hwirq(d));
- writel(val, g->base + GPIO_INT_EN);
-}
-
-static int gemini_gpio_set_irq_type(struct irq_data *d, unsigned int type)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gemini_gpio *g = gpiochip_get_data(gc);
- u32 mask = BIT(irqd_to_hwirq(d));
- u32 reg_both, reg_level, reg_type;
-
- reg_type = readl(g->base + GPIO_INT_TYPE);
- reg_level = readl(g->base + GPIO_INT_LEVEL);
- reg_both = readl(g->base + GPIO_INT_BOTH_EDGE);
-
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
- irq_set_handler_locked(d, handle_edge_irq);
- reg_type &= ~mask;
- reg_both |= mask;
- break;
- case IRQ_TYPE_EDGE_RISING:
- irq_set_handler_locked(d, handle_edge_irq);
- reg_type &= ~mask;
- reg_both &= ~mask;
- reg_level &= ~mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler_locked(d, handle_edge_irq);
- reg_type &= ~mask;
- reg_both &= ~mask;
- reg_level |= mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler_locked(d, handle_level_irq);
- reg_type |= mask;
- reg_level &= ~mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler_locked(d, handle_level_irq);
- reg_type |= mask;
- reg_level |= mask;
- break;
- default:
- irq_set_handler_locked(d, handle_bad_irq);
- return -EINVAL;
- }
-
- writel(reg_type, g->base + GPIO_INT_TYPE);
- writel(reg_level, g->base + GPIO_INT_LEVEL);
- writel(reg_both, g->base + GPIO_INT_BOTH_EDGE);
-
- gemini_gpio_ack_irq(d);
-
- return 0;
-}
-
-static struct irq_chip gemini_gpio_irqchip = {
- .name = "GPIO",
- .irq_ack = gemini_gpio_ack_irq,
- .irq_mask = gemini_gpio_mask_irq,
- .irq_unmask = gemini_gpio_unmask_irq,
- .irq_set_type = gemini_gpio_set_irq_type,
-};
-
-static void gemini_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct gemini_gpio *g = gpiochip_get_data(gc);
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
- int offset;
- unsigned long stat;
-
- chained_irq_enter(irqchip, desc);
-
- stat = readl(g->base + GPIO_INT_STAT);
- if (stat)
- for_each_set_bit(offset, &stat, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
- offset));
-
- chained_irq_exit(irqchip, desc);
-}
-
-static int gemini_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct gemini_gpio *g;
- int irq;
- int ret;
-
- g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
- if (!g)
- return -ENOMEM;
-
- g->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(g->base))
- return PTR_ERR(g->base);
-
- irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
-
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + GPIO_DATA_IN,
- g->base + GPIO_DATA_SET,
- g->base + GPIO_DATA_CLR,
- g->base + GPIO_DIR,
- NULL,
- 0);
- if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
- return ret;
- }
- g->gc.label = "Gemini";
- g->gc.base = -1;
- g->gc.parent = dev;
- g->gc.owner = THIS_MODULE;
- /* ngpio is set by bgpio_init() */
-
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
- if (ret)
- return ret;
-
- /* Disable, unmask and clear all interrupts */
- writel(0x0, g->base + GPIO_INT_EN);
- writel(0x0, g->base + GPIO_INT_MASK);
- writel(~0x0, g->base + GPIO_INT_CLR);
-
- ret = gpiochip_irqchip_add(&g->gc, &gemini_gpio_irqchip,
- 0, handle_bad_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_info(dev, "could not add irqchip\n");
- return ret;
- }
- gpiochip_set_chained_irqchip(&g->gc, &gemini_gpio_irqchip,
- irq, gemini_gpio_irq_handler);
-
- dev_info(dev, "Gemini GPIO @%p registered\n", g->base);
-
- return 0;
-}
-
-static const struct of_device_id gemini_gpio_of_match[] = {
- {
- .compatible = "cortina,gemini-gpio",
- },
- {},
-};
-
-static struct platform_driver gemini_gpio_driver = {
- .driver = {
- .name = "gemini-gpio",
- .of_match_table = of_match_ptr(gemini_gpio_of_match),
- },
- .probe = gemini_gpio_probe,
-};
-builtin_platform_driver(gemini_gpio_driver);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index f40088d268c1..9dbdc3672f5e 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -166,7 +166,7 @@ static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- return (readl(gpdr) & BIT(offset % 32)) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
+ return !(readl(gpdr) & BIT(offset % 32));
}
static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 796a5a4bc4f5..78896a869fd9 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -459,41 +459,31 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
chip = chip_save;
for (j = 0; j < 8; j++, chip++) {
- irq_base = irq_alloc_descs(-1, IOH_IRQ_BASE, num_ports[j],
- NUMA_NO_NODE);
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1, IOH_IRQ_BASE,
+ num_ports[j], NUMA_NO_NODE);
if (irq_base < 0) {
dev_warn(&pdev->dev,
"ml_ioh_gpio: Failed to get IRQ base num\n");
- chip->irq_base = -1;
ret = irq_base;
- goto err_irq_alloc_descs;
+ goto err_gpiochip_add;
}
chip->irq_base = irq_base;
ioh_gpio_alloc_generic_chip(chip, irq_base, num_ports[j]);
}
chip = chip_save;
- ret = request_irq(pdev->irq, ioh_gpio_handler,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ ret = devm_request_irq(&pdev->dev, pdev->irq, ioh_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret != 0) {
dev_err(&pdev->dev,
"%s request_irq failed\n", __func__);
- goto err_request_irq;
+ goto err_gpiochip_add;
}
pci_set_drvdata(pdev, chip);
return 0;
-err_request_irq:
- chip = chip_save;
-err_irq_alloc_descs:
- while (--j >= 0) {
- chip--;
- irq_free_descs(chip->irq_base, num_ports[j]);
- }
-
- chip = chip_save;
err_gpiochip_add:
while (--i >= 0) {
chip--;
@@ -524,12 +514,8 @@ static void ioh_gpio_remove(struct pci_dev *pdev)
chip_save = chip;
- free_irq(pdev->irq, chip);
-
- for (i = 0; i < 8; i++, chip++) {
- irq_free_descs(chip->irq_base, num_ports[i]);
+ for (i = 0; i < 8; i++, chip++)
gpiochip_remove(&chip->gpio);
- }
chip = chip_save;
pci_iounmap(pdev, chip->base);
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index d7d03ad052d0..f7da40e46c55 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -575,6 +575,7 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
static const struct of_device_id bgpio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
+ { .compatible = "ni,169445-nand-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, bgpio_of_match);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d99338689213..c6dadac70593 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -169,7 +169,7 @@ static int gpio_mockup_irqchip_setup(struct device *dev,
struct gpio_chip *gc = &chip->gc;
int irq_base, i;
- irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
+ irq_base = devm_irq_alloc_descs(dev, -1, 0, gc->ngpio, 0);
if (irq_base < 0)
return irq_base;
@@ -372,25 +372,11 @@ static int gpio_mockup_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_mockup_remove(struct platform_device *pdev)
-{
- struct gpio_mockup_chip *chips;
- int i;
-
- chips = platform_get_drvdata(pdev);
-
- for (i = 0; i < gpio_mockup_params_nr >> 1; i++)
- irq_free_descs(chips[i].gc.irq_base, chips[i].gc.ngpio);
-
- return 0;
-}
-
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = GPIO_MOCKUP_NAME,
},
.probe = gpio_mockup_probe,
- .remove = gpio_mockup_remove,
};
static struct platform_device *pdev;
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
deleted file mode 100644
index d58d38906ba6..000000000000
--- a/drivers/gpio/gpio-moxart.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * MOXA ART SoCs GPIO driver.
- *
- * Copyright (C) 2013 Jonas Jensen
- *
- * Jonas Jensen <jonas.jensen@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of_address.h>
-#include <linux/of_gpio.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/bitops.h>
-#include <linux/gpio/driver.h>
-
-#define GPIO_DATA_OUT 0x00
-#define GPIO_DATA_IN 0x04
-#define GPIO_PIN_DIRECTION 0x08
-
-static int moxart_gpio_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct gpio_chip *gc;
- void __iomem *base;
- int ret;
-
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- ret = bgpio_init(gc, dev, 4, base + GPIO_DATA_IN,
- base + GPIO_DATA_OUT, NULL,
- base + GPIO_PIN_DIRECTION, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
- if (ret) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- return ret;
- }
-
- gc->label = "moxart-gpio";
- gc->request = gpiochip_generic_request;
- gc->free = gpiochip_generic_free;
- gc->base = 0;
- gc->owner = THIS_MODULE;
-
- ret = devm_gpiochip_add_data(dev, gc, NULL);
- if (ret) {
- dev_err(dev, "%s: gpiochip_add failed\n",
- dev->of_node->full_name);
- return ret;
- }
-
- return ret;
-}
-
-static const struct of_device_id moxart_gpio_match[] = {
- { .compatible = "moxa,moxart-gpio" },
- { }
-};
-
-static struct platform_driver moxart_gpio_driver = {
- .driver = {
- .name = "moxart-gpio",
- .of_match_table = moxart_gpio_match,
- },
- .probe = moxart_gpio_probe,
-};
-builtin_platform_driver(moxart_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a649556ac3ca..19a92efabbef 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -42,29 +42,44 @@
#include <linux/io.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
+#include <linux/pwm.h>
#include <linux/clk.h>
#include <linux/pinctrl/consumer.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+#include "gpiolib.h"
/*
* GPIO unit register offsets.
*/
-#define GPIO_OUT_OFF 0x0000
-#define GPIO_IO_CONF_OFF 0x0004
-#define GPIO_BLINK_EN_OFF 0x0008
-#define GPIO_IN_POL_OFF 0x000c
-#define GPIO_DATA_IN_OFF 0x0010
-#define GPIO_EDGE_CAUSE_OFF 0x0014
-#define GPIO_EDGE_MASK_OFF 0x0018
-#define GPIO_LEVEL_MASK_OFF 0x001c
+#define GPIO_OUT_OFF 0x0000
+#define GPIO_IO_CONF_OFF 0x0004
+#define GPIO_BLINK_EN_OFF 0x0008
+#define GPIO_IN_POL_OFF 0x000c
+#define GPIO_DATA_IN_OFF 0x0010
+#define GPIO_EDGE_CAUSE_OFF 0x0014
+#define GPIO_EDGE_MASK_OFF 0x0018
+#define GPIO_LEVEL_MASK_OFF 0x001c
+#define GPIO_BLINK_CNT_SELECT_OFF 0x0020
+
+/*
+ * PWM register offsets.
+ */
+#define PWM_BLINK_ON_DURATION_OFF 0x0
+#define PWM_BLINK_OFF_DURATION_OFF 0x4
+
/* The MV78200 has per-CPU registers for edge mask and level mask */
#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18)
#define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C)
-/* The Armada XP has per-CPU registers for interrupt cause, interrupt
+/*
+ * The Armada XP has per-CPU registers for interrupt cause, interrupt
* mask and interrupt level mask. Those are relative to the
- * percpu_membase. */
+ * percpu_membase.
+ */
#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
#define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
@@ -75,6 +90,20 @@
#define MVEBU_MAX_GPIO_PER_BANK 32
+struct mvebu_pwm {
+ void __iomem *membase;
+ unsigned long clk_rate;
+ struct gpio_desc *gpiod;
+ struct pwm_chip chip;
+ spinlock_t lock;
+ struct mvebu_gpio_chip *mvchip;
+
+ /* Used to preserve GPIO/PWM registers across suspend/resume */
+ u32 blink_select;
+ u32 blink_on_duration;
+ u32 blink_off_duration;
+};
+
struct mvebu_gpio_chip {
struct gpio_chip chip;
spinlock_t lock;
@@ -84,48 +113,55 @@ struct mvebu_gpio_chip {
struct irq_domain *domain;
int soc_variant;
+ /* Used for PWM support */
+ struct clk *clk;
+ struct mvebu_pwm *mvpwm;
+
/* Used to preserve GPIO registers across suspend/resume */
- u32 out_reg;
- u32 io_conf_reg;
- u32 blink_en_reg;
- u32 in_pol_reg;
- u32 edge_mask_regs[4];
- u32 level_mask_regs[4];
+ u32 out_reg;
+ u32 io_conf_reg;
+ u32 blink_en_reg;
+ u32 in_pol_reg;
+ u32 edge_mask_regs[4];
+ u32 level_mask_regs[4];
};
/*
* Functions returning addresses of individual registers for a given
* GPIO controller.
*/
-static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_OUT_OFF;
}
-static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_BLINK_EN_OFF;
}
-static inline void __iomem *
-mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_blink_counter_select(struct mvebu_gpio_chip
+ *mvchip)
+{
+ return mvchip->membase + GPIO_BLINK_CNT_SELECT_OFF;
+}
+
+static void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IO_CONF_OFF;
}
-static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_IN_POL_OFF;
}
-static inline void __iomem *
-mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
{
return mvchip->membase + GPIO_DATA_IN_OFF;
}
-static inline void __iomem *
-mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
{
int cpu;
@@ -142,8 +178,7 @@ mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
}
}
-static inline void __iomem *
-mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
+static void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
{
int cpu;
@@ -182,10 +217,23 @@ static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
}
/*
- * Functions implementing the gpio_chip methods
+ * Functions returning addresses of individual registers for a given
+ * PWM controller.
*/
+static void __iomem *mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
+{
+ return mvpwm->membase + PWM_BLINK_ON_DURATION_OFF;
+}
+
+static void __iomem *mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
+{
+ return mvpwm->membase + PWM_BLINK_OFF_DURATION_OFF;
+}
-static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+/*
+ * Functions implementing the gpio_chip methods
+ */
+static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
unsigned long flags;
@@ -194,19 +242,19 @@ static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_out(mvchip));
if (value)
- u |= 1 << pin;
+ u |= BIT(pin);
else
- u &= ~(1 << pin);
+ u &= ~BIT(pin);
writel_relaxed(u, mvebu_gpioreg_out(mvchip));
spin_unlock_irqrestore(&mvchip->lock, flags);
}
-static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
+static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
u32 u;
- if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) {
+ if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & BIT(pin)) {
u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
} else {
@@ -216,7 +264,8 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
return (u >> pin) & 1;
}
-static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
unsigned long flags;
@@ -225,36 +274,38 @@ static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
if (value)
- u |= 1 << pin;
+ u |= BIT(pin);
else
- u &= ~(1 << pin);
+ u &= ~BIT(pin);
writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
spin_unlock_irqrestore(&mvchip->lock, flags);
}
-static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
unsigned long flags;
int ret;
u32 u;
- /* Check with the pinctrl driver whether this pin is usable as
- * an input GPIO */
+ /*
+ * Check with the pinctrl driver whether this pin is usable as
+ * an input GPIO
+ */
ret = pinctrl_gpio_direction_input(chip->base + pin);
if (ret)
return ret;
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
- u |= 1 << pin;
+ u |= BIT(pin);
writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
spin_unlock_irqrestore(&mvchip->lock, flags);
return 0;
}
-static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
+static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
@@ -262,8 +313,10 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
int ret;
u32 u;
- /* Check with the pinctrl driver whether this pin is usable as
- * an output GPIO */
+ /*
+ * Check with the pinctrl driver whether this pin is usable as
+ * an output GPIO
+ */
ret = pinctrl_gpio_direction_output(chip->base + pin);
if (ret)
return ret;
@@ -273,16 +326,17 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
- u &= ~(1 << pin);
+ u &= ~BIT(pin);
writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
spin_unlock_irqrestore(&mvchip->lock, flags);
return 0;
}
-static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
+static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned int pin)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
+
return irq_create_mapping(mvchip->domain, pin);
}
@@ -389,7 +443,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin = d->hwirq;
- u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
+ u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & BIT(pin);
if (!u)
return -EINVAL;
@@ -409,13 +463,13 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- u &= ~(1 << pin);
+ u &= ~BIT(pin);
writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
break;
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- u |= 1 << pin;
+ u |= BIT(pin);
writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
break;
case IRQ_TYPE_EDGE_BOTH: {
@@ -428,10 +482,10 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* set initial polarity based on current input level
*/
u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- if (v & (1 << pin))
- u |= 1 << pin; /* falling */
+ if (v & BIT(pin))
+ u |= BIT(pin); /* falling */
else
- u &= ~(1 << pin); /* rising */
+ u &= ~BIT(pin); /* rising */
writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
break;
}
@@ -461,7 +515,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
irq = irq_find_mapping(mvchip->domain, i);
- if (!(cause & (1 << i)))
+ if (!(cause & BIT(i)))
continue;
type = irq_get_trigger_type(irq);
@@ -470,7 +524,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
u32 polarity;
polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- polarity ^= 1 << i;
+ polarity ^= BIT(i);
writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
}
@@ -480,6 +534,246 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+/*
+ * Functions implementing the pwm_chip methods
+ */
+static struct mvebu_pwm *to_mvebu_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct mvebu_pwm, chip);
+}
+
+static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
+ struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
+ struct gpio_desc *desc;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&mvpwm->lock, flags);
+
+ if (mvpwm->gpiod) {
+ ret = -EBUSY;
+ } else {
+ desc = gpio_to_desc(mvchip->chip.base + pwm->hwpwm);
+ if (!desc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = gpiod_request(desc, "mvebu-pwm");
+ if (ret)
+ goto out;
+
+ ret = gpiod_direction_output(desc, 0);
+ if (ret) {
+ gpiod_free(desc);
+ goto out;
+ }
+
+ mvpwm->gpiod = desc;
+ }
+out:
+ spin_unlock_irqrestore(&mvpwm->lock, flags);
+ return ret;
+}
+
+static void mvebu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mvpwm->lock, flags);
+ gpiod_free(mvpwm->gpiod);
+ mvpwm->gpiod = NULL;
+ spin_unlock_irqrestore(&mvpwm->lock, flags);
+}
+
+static void mvebu_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state) {
+
+ struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
+ struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
+ unsigned long long val;
+ unsigned long flags;
+ u32 u;
+
+ spin_lock_irqsave(&mvpwm->lock, flags);
+
+ val = (unsigned long long)
+ readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
+ val *= NSEC_PER_SEC;
+ do_div(val, mvpwm->clk_rate);
+ if (val > UINT_MAX)
+ state->duty_cycle = UINT_MAX;
+ else if (val)
+ state->duty_cycle = val;
+ else
+ state->duty_cycle = 1;
+
+ val = (unsigned long long)
+ readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+ val *= NSEC_PER_SEC;
+ do_div(val, mvpwm->clk_rate);
+ if (val < state->duty_cycle) {
+ state->period = 1;
+ } else {
+ val -= state->duty_cycle;
+ if (val > UINT_MAX)
+ state->period = UINT_MAX;
+ else if (val)
+ state->period = val;
+ else
+ state->period = 1;
+ }
+
+ u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ if (u)
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ spin_unlock_irqrestore(&mvpwm->lock, flags);
+}
+
+static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct mvebu_pwm *mvpwm = to_mvebu_pwm(chip);
+ struct mvebu_gpio_chip *mvchip = mvpwm->mvchip;
+ unsigned long long val;
+ unsigned long flags;
+ unsigned int on, off;
+
+ val = (unsigned long long) mvpwm->clk_rate * state->duty_cycle;
+ do_div(val, NSEC_PER_SEC);
+ if (val > UINT_MAX)
+ return -EINVAL;
+ if (val)
+ on = val;
+ else
+ on = 1;
+
+ val = (unsigned long long) mvpwm->clk_rate *
+ (state->period - state->duty_cycle);
+ do_div(val, NSEC_PER_SEC);
+ if (val > UINT_MAX)
+ return -EINVAL;
+ if (val)
+ off = val;
+ else
+ off = 1;
+
+ spin_lock_irqsave(&mvpwm->lock, flags);
+
+ writel_relaxed(on, mvebu_pwmreg_blink_on_duration(mvpwm));
+ writel_relaxed(off, mvebu_pwmreg_blink_off_duration(mvpwm));
+ if (state->enabled)
+ mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 1);
+ else
+ mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 0);
+
+ spin_unlock_irqrestore(&mvpwm->lock, flags);
+
+ return 0;
+}
+
+static const struct pwm_ops mvebu_pwm_ops = {
+ .request = mvebu_pwm_request,
+ .free = mvebu_pwm_free,
+ .get_state = mvebu_pwm_get_state,
+ .apply = mvebu_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
+{
+ struct mvebu_pwm *mvpwm = mvchip->mvpwm;
+
+ mvpwm->blink_select =
+ readl_relaxed(mvebu_gpioreg_blink_counter_select(mvchip));
+ mvpwm->blink_on_duration =
+ readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
+ mvpwm->blink_off_duration =
+ readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+}
+
+static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
+{
+ struct mvebu_pwm *mvpwm = mvchip->mvpwm;
+
+ writel_relaxed(mvpwm->blink_select,
+ mvebu_gpioreg_blink_counter_select(mvchip));
+ writel_relaxed(mvpwm->blink_on_duration,
+ mvebu_pwmreg_blink_on_duration(mvpwm));
+ writel_relaxed(mvpwm->blink_off_duration,
+ mvebu_pwmreg_blink_off_duration(mvpwm));
+}
+
+static int mvebu_pwm_probe(struct platform_device *pdev,
+ struct mvebu_gpio_chip *mvchip,
+ int id)
+{
+ struct device *dev = &pdev->dev;
+ struct mvebu_pwm *mvpwm;
+ struct resource *res;
+ u32 set;
+
+ if (!of_device_is_compatible(mvchip->chip.of_node,
+ "marvell,armada-370-xp-gpio"))
+ return 0;
+
+ if (IS_ERR(mvchip->clk))
+ return PTR_ERR(mvchip->clk);
+
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
+ if (!res)
+ return 0;
+
+ /*
+ * Use set A for lines of GPIO chip with id 0, B for GPIO chip
+ * with id 1. Don't allow further GPIO chips to be used for PWM.
+ */
+ if (id == 0)
+ set = 0;
+ else if (id == 1)
+ set = U32_MAX;
+ else
+ return -EINVAL;
+ writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip));
+
+ mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
+ if (!mvpwm)
+ return -ENOMEM;
+ mvchip->mvpwm = mvpwm;
+ mvpwm->mvchip = mvchip;
+
+ mvpwm->membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mvpwm->membase))
+ return PTR_ERR(mvpwm->membase);
+
+ mvpwm->clk_rate = clk_get_rate(mvchip->clk);
+ if (!mvpwm->clk_rate) {
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ mvpwm->chip.dev = dev;
+ mvpwm->chip.ops = &mvebu_pwm_ops;
+ mvpwm->chip.npwm = mvchip->chip.ngpio;
+
+ spin_lock_init(&mvpwm->lock);
+
+ return pwmchip_add(&mvpwm->chip);
+}
+
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -507,7 +801,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!label)
continue;
- msk = 1 << i;
+ msk = BIT(i);
is_out = !(io_conf & msk);
seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
@@ -551,6 +845,10 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
},
{
+ .compatible = "marvell,armada-370-xp-gpio",
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
+ },
+ {
/* sentinel */
},
};
@@ -596,6 +894,9 @@ static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
BUG();
}
+ if (IS_ENABLED(CONFIG_PWM))
+ mvebu_pwm_suspend(mvchip);
+
return 0;
}
@@ -639,6 +940,9 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
BUG();
}
+ if (IS_ENABLED(CONFIG_PWM))
+ mvebu_pwm_resume(mvchip);
+
return 0;
}
@@ -650,7 +954,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
struct resource *res;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- struct clk *clk;
unsigned int ngpios;
bool have_irqs;
int soc_variant;
@@ -684,10 +987,10 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
return id;
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ mvchip->clk = devm_clk_get(&pdev->dev, NULL);
/* Not all SoCs require a clock.*/
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
+ if (!IS_ERR(mvchip->clk))
+ clk_prepare_enable(mvchip->clk);
mvchip->soc_variant = soc_variant;
mvchip->chip.label = dev_name(&pdev->dev);
@@ -712,8 +1015,10 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (IS_ERR(mvchip->membase))
return PTR_ERR(mvchip->membase);
- /* The Armada XP has a second range of registers for the
- * per-CPU registers */
+ /*
+ * The Armada XP has a second range of registers for the
+ * per-CPU registers
+ */
if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
@@ -780,7 +1085,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
goto err_domain;
}
- /* NOTE: The common accessors cannot be used because of the percpu
+ /*
+ * NOTE: The common accessors cannot be used because of the percpu
* access to the mask registers
*/
gc = irq_get_domain_generic_chip(mvchip->domain, 0);
@@ -801,7 +1107,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- /* Setup the interrupt handlers. Each chip can have up to 4
+ /*
+ * Setup the interrupt handlers. Each chip can have up to 4
* interrupt handlers, with each handler dealing with 8 GPIO
* pins.
*/
@@ -814,6 +1121,10 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip);
}
+ /* Armada 370/XP has simple PWM support for GPIO lines */
+ if (IS_ENABLED(CONFIG_PWM))
+ return mvebu_pwm_probe(pdev, mvchip, id);
+
return 0;
err_domain:
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c1a1e00b8cb0..3abea3f0b307 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -471,7 +471,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgio;
- irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, 32, numa_node_id());
if (irq_base < 0) {
err = irq_base;
goto out_bgio;
@@ -481,7 +481,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
- goto out_irqdesc_free;
+ goto out_bgio;
}
/* gpio-mxc can be a generic irq chip */
@@ -495,8 +495,6 @@ static int mxc_gpio_probe(struct platform_device *pdev)
out_irqdomain_remove:
irq_domain_remove(port->domain);
-out_irqdesc_free:
- irq_free_descs(irq_base, 32);
out_bgio:
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 2292742eac8f..6ae583f36733 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -328,7 +328,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
/* clear address has to be used to clear IRQSTAT bits */
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
- irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, 32, numa_node_id());
if (irq_base < 0) {
err = irq_base;
goto out_iounmap;
@@ -338,7 +338,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
- goto out_irqdesc_free;
+ goto out_iounmap;
}
/* gpio-mxs can be a generic irq chip */
@@ -370,8 +370,6 @@ static int mxs_gpio_probe(struct platform_device *pdev)
out_irqdomain_remove:
irq_domain_remove(port->domain);
-out_irqdesc_free:
- irq_free_descs(irq_base, 32);
out_iounmap:
iounmap(port->base);
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index efc85a279d54..f8c550de6c72 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -208,9 +208,11 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
* OMAP's debounce time is in 31us steps
* <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
* so we need to convert and round up to the closest unit.
+ *
+ * Return: 0 on success, negative error otherwise.
*/
-static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
- unsigned debounce)
+static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
+ unsigned debounce)
{
void __iomem *reg;
u32 val;
@@ -218,11 +220,12 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
bool enable = !!debounce;
if (!bank->dbck_flag)
- return;
+ return -ENOTSUPP;
if (enable) {
debounce = DIV_ROUND_UP(debounce, 31) - 1;
- debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+ if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
+ return -EINVAL;
}
l = BIT(offset);
@@ -255,6 +258,8 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
bank->context.debounce = debounce;
bank->context.debounce_en = val;
}
+
+ return 0;
}
/**
@@ -964,14 +969,20 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
{
struct gpio_bank *bank;
unsigned long flags;
+ int ret;
bank = gpiochip_get_data(chip);
raw_spin_lock_irqsave(&bank->lock, flags);
- omap2_set_gpio_debounce(bank, offset, debounce);
+ ret = omap2_set_gpio_debounce(bank, offset, debounce);
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
+ if (ret)
+ dev_info(chip->parent,
+ "Could not set line %u debounce to %u microseconds (%d)",
+ offset, debounce, ret);
+
+ return ret;
}
static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
@@ -1085,7 +1096,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
* irq_alloc_descs() since a base IRQ offset will no longer be needed.
*/
- irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
+ irq_base = devm_irq_alloc_descs(bank->chip.parent,
+ -1, 0, bank->width, 0);
if (irq_base < 0) {
dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d44232aadb6c..4c9e21300a26 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -11,18 +11,19 @@
* the Free Software Foundation; version 2 of the License.
*/
-#include <linux/module.h>
-#include <linux/init.h>
+#include <linux/acpi.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/platform_data/pca953x.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+
#include <asm/unaligned.h>
-#include <linux/of_platform.h>
-#include <linux/acpi.h>
-#include <linux/regulator/consumer.h>
#define PCA953X_INPUT 0
#define PCA953X_OUTPUT 1
@@ -81,6 +82,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
+ { "tca9554", 8 | PCA953X_TYPE | PCA_INT, },
{ "xra1202", 8 | PCA953X_TYPE },
{ }
};
@@ -363,6 +365,21 @@ exit:
mutex_unlock(&chip->i2c_lock);
}
+static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
+ u32 reg_val;
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_read_single(chip, chip->regs->direction, &reg_val, off);
+ mutex_unlock(&chip->i2c_lock);
+ if (ret < 0)
+ return ret;
+
+ return !!(reg_val & (1u << (off % BANK_SZ)));
+}
+
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
@@ -408,6 +425,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
+ gc->get_direction = pca953x_gpio_get_direction;
gc->set_multiple = pca953x_gpio_set_multiple;
gc->can_sleep = true;
@@ -760,7 +778,13 @@ static int pca953x_probe(struct i2c_client *client,
chip->gpio_start = -1;
irq_base = 0;
- /* See if we need to de-assert a reset pin */
+ /*
+ * See if we need to de-assert a reset pin.
+ *
+ * There is no known ACPI-enabled platforms that are
+ * using "reset" GPIO. Otherwise any of those platform
+ * must use _DSD method with corresponding property.
+ */
reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(reset_gpio))
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 895af42a4513..8ddf9302ce3b 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -46,7 +46,6 @@ static const struct i2c_device_id pcf857x_id[] = {
{ "pca9675", 16 },
{ "max7328", 8 },
{ "max7329", 8 },
- { "tca9554", 8 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
@@ -66,7 +65,6 @@ static const struct of_device_id pcf857x_of_table[] = {
{ .compatible = "nxp,pca9675" },
{ .compatible = "maxim,max7328" },
{ .compatible = "maxim,max7329" },
- { .compatible = "ti,tca9554" },
{ }
};
MODULE_DEVICE_TABLE(of, pcf857x_of_table);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 7c7135da5d4a..71bc6da11337 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -403,7 +403,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
goto err_gpiochip_add;
}
- irq_base = irq_alloc_descs(-1, 0, gpio_pins[chip->ioh], NUMA_NO_NODE);
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0,
+ gpio_pins[chip->ioh], NUMA_NO_NODE);
if (irq_base < 0) {
dev_warn(&pdev->dev, "PCH gpio: Failed to get IRQ base num\n");
chip->irq_base = -1;
@@ -416,8 +417,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
iowrite32(msk, &chip->reg->imask);
iowrite32(msk, &chip->reg->ien);
- ret = request_irq(pdev->irq, pch_gpio_handler,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ ret = devm_request_irq(&pdev->dev, pdev->irq, pch_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret != 0) {
dev_err(&pdev->dev,
"%s request_irq failed\n", __func__);
@@ -430,7 +431,6 @@ end:
return 0;
err_request_irq:
- irq_free_descs(irq_base, gpio_pins[chip->ioh]);
gpiochip_remove(&chip->gpio);
err_gpiochip_add:
@@ -452,12 +452,6 @@ static void pch_gpio_remove(struct pci_dev *pdev)
{
struct pch_gpio *chip = pci_get_drvdata(pdev);
- if (chip->irq_base != -1) {
- free_irq(pdev->irq, chip);
-
- irq_free_descs(chip->irq_base, gpio_pins[chip->ioh]);
- }
-
gpiochip_remove(&chip->gpio);
pci_iounmap(pdev, chip->base);
pci_release_regions(pdev);
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 269ab628634b..7de4f6a2cb49 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -59,7 +59,7 @@ struct idio_16_gpio_reg {
*/
struct idio_16_gpio {
struct gpio_chip chip;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct idio_16_gpio_reg __iomem *reg;
unsigned long irq_mask;
};
@@ -121,7 +121,7 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
} else
base = &idio16gpio->reg->out0_7;
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
if (value)
out_state = ioread8(base) | mask;
@@ -130,7 +130,7 @@ static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
iowrite8(out_state, base);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
@@ -140,7 +140,7 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
unsigned long flags;
unsigned int out_state;
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
/* process output lines 0-7 */
if (*mask & 0xFF) {
@@ -160,7 +160,7 @@ static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
iowrite8(out_state, &idio16gpio->reg->out8_15);
}
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
static void idio_16_irq_ack(struct irq_data *data)
@@ -177,11 +177,11 @@ static void idio_16_irq_mask(struct irq_data *data)
idio16gpio->irq_mask &= ~mask;
if (!idio16gpio->irq_mask) {
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
iowrite8(0, &idio16gpio->reg->irq_ctl);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
}
@@ -196,11 +196,11 @@ static void idio_16_irq_unmask(struct irq_data *data)
idio16gpio->irq_mask |= mask;
if (!prev_irq_mask) {
- spin_lock_irqsave(&idio16gpio->lock, flags);
+ raw_spin_lock_irqsave(&idio16gpio->lock, flags);
ioread8(&idio16gpio->reg->irq_ctl);
- spin_unlock_irqrestore(&idio16gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
}
}
@@ -229,11 +229,11 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
struct gpio_chip *const chip = &idio16gpio->chip;
int gpio;
- spin_lock(&idio16gpio->lock);
+ raw_spin_lock(&idio16gpio->lock);
irq_status = ioread8(&idio16gpio->reg->irq_status);
- spin_unlock(&idio16gpio->lock);
+ raw_spin_unlock(&idio16gpio->lock);
/* Make sure our device generated IRQ */
if (!(irq_status & 0x3) || !(irq_status & 0x4))
@@ -242,12 +242,12 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
- spin_lock(&idio16gpio->lock);
+ raw_spin_lock(&idio16gpio->lock);
/* Clear interrupt */
iowrite8(0, &idio16gpio->reg->in0_7);
- spin_unlock(&idio16gpio->lock);
+ raw_spin_unlock(&idio16gpio->lock);
return IRQ_HANDLED;
}
@@ -302,7 +302,7 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
idio16gpio->chip.set = idio_16_gpio_set;
idio16gpio->chip.set_multiple = idio_16_gpio_set_multiple;
- spin_lock_init(&idio16gpio->lock);
+ raw_spin_lock_init(&idio16gpio->lock);
err = devm_gpiochip_add_data(dev, &idio16gpio->chip, idio16gpio);
if (err) {
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 0a6bfd2b06e5..3d3d6b6645a7 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -50,7 +50,7 @@ struct pl061_context_save_regs {
#endif
struct pl061 {
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *base;
struct gpio_chip gc;
@@ -74,11 +74,11 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&pl061->lock, flags);
+ raw_spin_lock_irqsave(&pl061->lock, flags);
gpiodir = readb(pl061->base + GPIODIR);
gpiodir &= ~(BIT(offset));
writeb(gpiodir, pl061->base + GPIODIR);
- spin_unlock_irqrestore(&pl061->lock, flags);
+ raw_spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -90,7 +90,7 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
unsigned long flags;
unsigned char gpiodir;
- spin_lock_irqsave(&pl061->lock, flags);
+ raw_spin_lock_irqsave(&pl061->lock, flags);
writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
gpiodir = readb(pl061->base + GPIODIR);
gpiodir |= BIT(offset);
@@ -101,7 +101,7 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
* a gpio pin before configuring it in OUT mode.
*/
writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
- spin_unlock_irqrestore(&pl061->lock, flags);
+ raw_spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -143,7 +143,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
}
- spin_lock_irqsave(&pl061->lock, flags);
+ raw_spin_lock_irqsave(&pl061->lock, flags);
gpioiev = readb(pl061->base + GPIOIEV);
gpiois = readb(pl061->base + GPIOIS);
@@ -203,7 +203,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
writeb(gpioibe, pl061->base + GPIOIBE);
writeb(gpioiev, pl061->base + GPIOIEV);
- spin_unlock_irqrestore(&pl061->lock, flags);
+ raw_spin_unlock_irqrestore(&pl061->lock, flags);
return 0;
}
@@ -235,10 +235,10 @@ static void pl061_irq_mask(struct irq_data *d)
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&pl061->lock);
+ raw_spin_lock(&pl061->lock);
gpioie = readb(pl061->base + GPIOIE) & ~mask;
writeb(gpioie, pl061->base + GPIOIE);
- spin_unlock(&pl061->lock);
+ raw_spin_unlock(&pl061->lock);
}
static void pl061_irq_unmask(struct irq_data *d)
@@ -248,10 +248,10 @@ static void pl061_irq_unmask(struct irq_data *d)
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&pl061->lock);
+ raw_spin_lock(&pl061->lock);
gpioie = readb(pl061->base + GPIOIE) | mask;
writeb(gpioie, pl061->base + GPIOIE);
- spin_unlock(&pl061->lock);
+ raw_spin_unlock(&pl061->lock);
}
/**
@@ -268,9 +268,9 @@ static void pl061_irq_ack(struct irq_data *d)
struct pl061 *pl061 = gpiochip_get_data(gc);
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
- spin_lock(&pl061->lock);
+ raw_spin_lock(&pl061->lock);
writeb(mask, pl061->base + GPIOIC);
- spin_unlock(&pl061->lock);
+ raw_spin_unlock(&pl061->lock);
}
static int pl061_irq_set_wake(struct irq_data *d, unsigned int state)
@@ -304,7 +304,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(pl061->base))
return PTR_ERR(pl061->base);
- spin_lock_init(&pl061->lock);
+ raw_spin_lock_init(&pl061->lock);
if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
pl061->gc.request = gpiochip_generic_request;
pl061->gc.free = gpiochip_generic_free;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 76ac906b4d78..832f3e46ba9f 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -601,7 +601,7 @@ static int pxa_gpio_probe_dt(struct platform_device *pdev,
nr_gpios = gpio_id->gpio_nums;
pxa_last_gpio = nr_gpios - 1;
- irq_base = irq_alloc_descs(-1, 0, nr_gpios, 0);
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0, nr_gpios, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
return irq_base;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
new file mode 100644
index 000000000000..e85903eddc68
--- /dev/null
+++ b/drivers/gpio/gpio-reg.c
@@ -0,0 +1,185 @@
+/*
+ * gpio-reg: single register individually fixed-direction GPIOs
+ *
+ * Copyright (C) 2016 Russell King
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+#include <linux/gpio/driver.h>
+#include <linux/gpio/gpio-reg.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct gpio_reg {
+ struct gpio_chip gc;
+ spinlock_t lock;
+ u32 direction;
+ u32 out;
+ void __iomem *reg;
+ struct irq_domain *irqdomain;
+ const int *irqs;
+};
+
+#define to_gpio_reg(x) container_of(x, struct gpio_reg, gc)
+
+static int gpio_reg_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+
+ return r->direction & BIT(offset) ? 1 : 0;
+}
+
+static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+
+ if (r->direction & BIT(offset))
+ return -ENOTSUPP;
+
+ gc->set(gc, offset, value);
+ return 0;
+}
+
+static int gpio_reg_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+
+ return r->direction & BIT(offset) ? 0 : -ENOTSUPP;
+}
+
+static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+ unsigned long flags;
+ u32 val, mask = BIT(offset);
+
+ spin_lock_irqsave(&r->lock, flags);
+ val = r->out;
+ if (value)
+ val |= mask;
+ else
+ val &= ~mask;
+ r->out = val;
+ writel_relaxed(val, r->reg);
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+ u32 val, mask = BIT(offset);
+
+ if (r->direction & mask) {
+ /*
+ * double-read the value, some registers latch after the
+ * first read.
+ */
+ readl_relaxed(r->reg);
+ val = readl_relaxed(r->reg);
+ } else {
+ val = r->out;
+ }
+ return !!(val & mask);
+}
+
+static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ r->out = (r->out & ~*mask) | (*bits & *mask);
+ writel_relaxed(r->out, r->reg);
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+ int irq = r->irqs[offset];
+
+ if (irq >= 0 && r->irqdomain)
+ irq = irq_find_mapping(r->irqdomain, irq);
+
+ return irq;
+}
+
+/**
+ * gpio_reg_init - add a fixed in/out register as gpio
+ * @dev: optional struct device associated with this register
+ * @base: start gpio number, or -1 to allocate
+ * @num: number of GPIOs, maximum 32
+ * @label: GPIO chip label
+ * @direction: bitmask of fixed direction, one per GPIO signal, 1 = in
+ * @def_out: initial GPIO output value
+ * @names: array of %num strings describing each GPIO signal or %NULL
+ * @irqdom: irq domain or %NULL
+ * @irqs: array of %num ints describing the interrupt mapping for each
+ * GPIO signal, or %NULL. If @irqdom is %NULL, then this
+ * describes the Linux interrupt number, otherwise it describes
+ * the hardware interrupt number in the specified irq domain.
+ *
+ * Add a single-register GPIO device containing up to 32 GPIO signals,
+ * where each GPIO has a fixed input or output configuration. Only
+ * input GPIOs are assumed to be readable from the register, and only
+ * then after a double-read. Output values are assumed not to be
+ * readable.
+ */
+struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
+ int base, int num, const char *label, u32 direction, u32 def_out,
+ const char *const *names, struct irq_domain *irqdom, const int *irqs)
+{
+ struct gpio_reg *r;
+ int ret;
+
+ if (dev)
+ r = devm_kzalloc(dev, sizeof(*r), GFP_KERNEL);
+ else
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&r->lock);
+
+ r->gc.label = label;
+ r->gc.get_direction = gpio_reg_get_direction;
+ r->gc.direction_input = gpio_reg_direction_input;
+ r->gc.direction_output = gpio_reg_direction_output;
+ r->gc.set = gpio_reg_set;
+ r->gc.get = gpio_reg_get;
+ r->gc.set_multiple = gpio_reg_set_multiple;
+ if (irqs)
+ r->gc.to_irq = gpio_reg_to_irq;
+ r->gc.base = base;
+ r->gc.ngpio = num;
+ r->gc.names = names;
+ r->direction = direction;
+ r->out = def_out;
+ r->reg = reg;
+ r->irqs = irqs;
+
+ if (dev)
+ ret = devm_gpiochip_add_data(dev, &r->gc, r);
+ else
+ ret = gpiochip_add_data(&r->gc, r);
+
+ return ret ? ERR_PTR(ret) : &r->gc;
+}
+
+int gpio_reg_resume(struct gpio_chip *gc)
+{
+ struct gpio_reg *r = to_gpio_reg(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ writel_relaxed(r->out, r->reg);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 8d8ee0ebf14c..249f433aa62d 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -12,57 +12,97 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
+#include <soc/sa1100/pwer.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
+struct sa1100_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *membase;
+ int irqbase;
+ u32 irqmask;
+ u32 irqrising;
+ u32 irqfalling;
+ u32 irqwake;
+};
+
+#define sa1100_gpio_chip(x) container_of(x, struct sa1100_gpio_chip, chip)
+
+enum {
+ R_GPLR = 0x00,
+ R_GPDR = 0x04,
+ R_GPSR = 0x08,
+ R_GPCR = 0x0c,
+ R_GRER = 0x10,
+ R_GFER = 0x14,
+ R_GEDR = 0x18,
+ R_GAFR = 0x1c,
+};
+
static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return !!(GPLR & GPIO_GPIO(offset));
+ return readl_relaxed(sa1100_gpio_chip(chip)->membase + R_GPLR) &
+ BIT(offset);
}
static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- if (value)
- GPSR = GPIO_GPIO(offset);
- else
- GPCR = GPIO_GPIO(offset);
+ int reg = value ? R_GPSR : R_GPCR;
+
+ writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg);
+}
+
+static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR;
+
+ return !(readl_relaxed(gpdr) & BIT(offset));
}
static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset)
{
+ void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR;
unsigned long flags;
local_irq_save(flags);
- GPDR &= ~GPIO_GPIO(offset);
+ writel_relaxed(readl_relaxed(gpdr) & ~BIT(offset), gpdr);
local_irq_restore(flags);
+
return 0;
}
static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
+ void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR;
unsigned long flags;
local_irq_save(flags);
sa1100_gpio_set(chip, offset, value);
- GPDR |= GPIO_GPIO(offset);
+ writel_relaxed(readl_relaxed(gpdr) | BIT(offset), gpdr);
local_irq_restore(flags);
+
return 0;
}
static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_GPIO0 + offset;
+ return sa1100_gpio_chip(chip)->irqbase + offset;
}
-static struct gpio_chip sa1100_gpio_chip = {
- .label = "gpio",
- .direction_input = sa1100_direction_input,
- .direction_output = sa1100_direction_output,
- .set = sa1100_gpio_set,
- .get = sa1100_gpio_get,
- .to_irq = sa1100_to_irq,
- .base = 0,
- .ngpio = GPIO_MAX + 1,
+static struct sa1100_gpio_chip sa1100_gpio_chip = {
+ .chip = {
+ .label = "gpio",
+ .get_direction = sa1100_get_direction,
+ .direction_input = sa1100_direction_input,
+ .direction_output = sa1100_direction_output,
+ .set = sa1100_gpio_set,
+ .get = sa1100_gpio_get,
+ .to_irq = sa1100_to_irq,
+ .base = 0,
+ .ngpio = GPIO_MAX + 1,
+ },
+ .membase = (void *)&GPLR,
+ .irqbase = IRQ_GPIO0,
};
/*
@@ -70,33 +110,39 @@ static struct gpio_chip sa1100_gpio_chip = {
* IRQs are generated on Falling-Edge, Rising-Edge, or both.
* Use this instead of directly setting GRER/GFER.
*/
-static int GPIO_IRQ_rising_edge;
-static int GPIO_IRQ_falling_edge;
-static int GPIO_IRQ_mask;
+static void sa1100_update_edge_regs(struct sa1100_gpio_chip *sgc)
+{
+ void *base = sgc->membase;
+ u32 grer, gfer;
+
+ grer = sgc->irqrising & sgc->irqmask;
+ gfer = sgc->irqfalling & sgc->irqmask;
+
+ writel_relaxed(grer, base + R_GRER);
+ writel_relaxed(gfer, base + R_GFER);
+}
static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
{
- unsigned int mask;
-
- mask = BIT(d->hwirq);
+ struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d);
+ unsigned int mask = BIT(d->hwirq);
if (type == IRQ_TYPE_PROBE) {
- if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
+ if ((sgc->irqrising | sgc->irqfalling) & mask)
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & IRQ_TYPE_EDGE_RISING)
- GPIO_IRQ_rising_edge |= mask;
+ sgc->irqrising |= mask;
else
- GPIO_IRQ_rising_edge &= ~mask;
+ sgc->irqrising &= ~mask;
if (type & IRQ_TYPE_EDGE_FALLING)
- GPIO_IRQ_falling_edge |= mask;
+ sgc->irqfalling |= mask;
else
- GPIO_IRQ_falling_edge &= ~mask;
+ sgc->irqfalling &= ~mask;
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+ sa1100_update_edge_regs(sgc);
return 0;
}
@@ -106,36 +152,42 @@ static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
*/
static void sa1100_gpio_ack(struct irq_data *d)
{
- GEDR = BIT(d->hwirq);
+ struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d);
+
+ writel_relaxed(BIT(d->hwirq), sgc->membase + R_GEDR);
}
static void sa1100_gpio_mask(struct irq_data *d)
{
+ struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d);
unsigned int mask = BIT(d->hwirq);
- GPIO_IRQ_mask &= ~mask;
+ sgc->irqmask &= ~mask;
- GRER &= ~mask;
- GFER &= ~mask;
+ sa1100_update_edge_regs(sgc);
}
static void sa1100_gpio_unmask(struct irq_data *d)
{
+ struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d);
unsigned int mask = BIT(d->hwirq);
- GPIO_IRQ_mask |= mask;
+ sgc->irqmask |= mask;
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+ sa1100_update_edge_regs(sgc);
}
static int sa1100_gpio_wake(struct irq_data *d, unsigned int on)
{
- if (on)
- PWER |= BIT(d->hwirq);
- else
- PWER &= ~BIT(d->hwirq);
- return 0;
+ struct sa1100_gpio_chip *sgc = irq_data_get_irq_chip_data(d);
+ int ret = sa11x0_gpio_set_wake(d->hwirq, on);
+ if (!ret) {
+ if (on)
+ sgc->irqwake |= BIT(d->hwirq);
+ else
+ sgc->irqwake &= ~BIT(d->hwirq);
+ }
+ return ret;
}
/*
@@ -153,8 +205,10 @@ static struct irq_chip sa1100_gpio_irq_chip = {
static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
- handle_edge_irq);
+ struct sa1100_gpio_chip *sgc = d->host_data;
+
+ irq_set_chip_data(irq, sgc);
+ irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, handle_edge_irq);
irq_set_probe(irq);
return 0;
@@ -174,17 +228,19 @@ static struct irq_domain *sa1100_gpio_irqdomain;
*/
static void sa1100_gpio_handler(struct irq_desc *desc)
{
+ struct sa1100_gpio_chip *sgc = irq_desc_get_handler_data(desc);
unsigned int irq, mask;
+ void __iomem *gedr = sgc->membase + R_GEDR;
- mask = GEDR;
+ mask = readl_relaxed(gedr);
do {
/*
* clear down all currently active IRQ sources.
* We will be processing them all.
*/
- GEDR = mask;
+ writel_relaxed(mask, gedr);
- irq = IRQ_GPIO0;
+ irq = sgc->irqbase;
do {
if (mask & 1)
generic_handle_irq(irq);
@@ -192,30 +248,32 @@ static void sa1100_gpio_handler(struct irq_desc *desc)
irq++;
} while (mask);
- mask = GEDR;
+ mask = readl_relaxed(gedr);
} while (mask);
}
static int sa1100_gpio_suspend(void)
{
+ struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip;
+
/*
* Set the appropriate edges for wakeup.
*/
- GRER = PWER & GPIO_IRQ_rising_edge;
- GFER = PWER & GPIO_IRQ_falling_edge;
+ writel_relaxed(sgc->irqwake & sgc->irqrising, sgc->membase + R_GRER);
+ writel_relaxed(sgc->irqwake & sgc->irqfalling, sgc->membase + R_GFER);
/*
* Clear any pending GPIO interrupts.
*/
- GEDR = GEDR;
+ writel_relaxed(readl_relaxed(sgc->membase + R_GEDR),
+ sgc->membase + R_GEDR);
return 0;
}
static void sa1100_gpio_resume(void)
{
- GRER = GPIO_IRQ_rising_edge & GPIO_IRQ_mask;
- GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
+ sa1100_update_edge_regs(&sa1100_gpio_chip);
}
static struct syscore_ops sa1100_gpio_syscore_ops = {
@@ -231,36 +289,40 @@ static int __init sa1100_gpio_init_devicefs(void)
device_initcall(sa1100_gpio_init_devicefs);
+static const int sa1100_gpio_irqs[] __initconst = {
+ /* Install handlers for GPIO 0-10 edge detect interrupts */
+ IRQ_GPIO0_SC,
+ IRQ_GPIO1_SC,
+ IRQ_GPIO2_SC,
+ IRQ_GPIO3_SC,
+ IRQ_GPIO4_SC,
+ IRQ_GPIO5_SC,
+ IRQ_GPIO6_SC,
+ IRQ_GPIO7_SC,
+ IRQ_GPIO8_SC,
+ IRQ_GPIO9_SC,
+ IRQ_GPIO10_SC,
+ /* Install handler for GPIO 11-27 edge detect interrupts */
+ IRQ_GPIO11_27,
+};
+
void __init sa1100_init_gpio(void)
{
+ struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip;
+ int i;
+
/* clear all GPIO edge detects */
- GFER = 0;
- GRER = 0;
- GEDR = -1;
+ writel_relaxed(0, sgc->membase + R_GFER);
+ writel_relaxed(0, sgc->membase + R_GRER);
+ writel_relaxed(-1, sgc->membase + R_GEDR);
- gpiochip_add_data(&sa1100_gpio_chip, NULL);
+ gpiochip_add_data(&sa1100_gpio_chip.chip, NULL);
sa1100_gpio_irqdomain = irq_domain_add_simple(NULL,
28, IRQ_GPIO0,
- &sa1100_gpio_irqdomain_ops, NULL);
-
- /*
- * Install handlers for GPIO 0-10 edge detect interrupts
- */
- irq_set_chained_handler(IRQ_GPIO0_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO1_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO2_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO3_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO4_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO5_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO6_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO7_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO8_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO9_SC, sa1100_gpio_handler);
- irq_set_chained_handler(IRQ_GPIO10_SC, sa1100_gpio_handler);
- /*
- * Install handler for GPIO 11-27 edge detect interrupts
- */
- irq_set_chained_handler(IRQ_GPIO11_27, sa1100_gpio_handler);
+ &sa1100_gpio_irqdomain_ops, sgc);
+ for (i = 0; i < ARRAY_SIZE(sa1100_gpio_irqs); i++)
+ irq_set_chained_handler_and_data(sa1100_gpio_irqs[i],
+ sa1100_gpio_handler, sgc);
}
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 7da9e6c4546a..f60da83349ef 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -135,7 +135,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
struct irq_chip_type *ct;
int ret;
- sd->irq_base = irq_alloc_descs(-1, 0, SDV_NUM_PUB_GPIOS, -1);
+ sd->irq_base = devm_irq_alloc_descs(&pdev->dev, -1, 0,
+ SDV_NUM_PUB_GPIOS, -1);
if (sd->irq_base < 0)
return sd->irq_base;
@@ -143,10 +144,11 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
writel(0, sd->gpio_pub_base + GPIO_INT);
writel((1 << 11) - 1, sd->gpio_pub_base + GPSTR);
- ret = request_irq(pdev->irq, sdv_gpio_pub_irq_handler, IRQF_SHARED,
- "sdv_gpio", sd);
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ sdv_gpio_pub_irq_handler, IRQF_SHARED,
+ "sdv_gpio", sd);
if (ret)
- goto out_free_desc;
+ return ret;
/*
* This gpio irq controller latches level irqs. Testing shows that if
@@ -155,10 +157,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
*/
sd->gc = irq_alloc_generic_chip("sdv-gpio", 1, sd->irq_base,
sd->gpio_pub_base, handle_fasteoi_irq);
- if (!sd->gc) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
+ if (!sd->gc)
+ return -ENOMEM;
sd->gc->private = sd;
ct = sd->gc->chip_types;
@@ -176,16 +176,10 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
sd->irq_base, 0, &irq_domain_sdv_ops, sd);
- if (!sd->id) {
- ret = -ENODEV;
- goto out_free_irq;
- }
+ if (!sd->id)
+ return -ENODEV;
+
return 0;
-out_free_irq:
- free_irq(pdev->irq, sd);
-out_free_desc:
- irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
- return ret;
}
static int sdv_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 853ca23cad88..39df0620fa38 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -392,7 +392,8 @@ static int gsta_probe(struct platform_device *dev)
gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
/* 384 was used in previous code: be compatible for other drivers */
- err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+ err = devm_irq_alloc_descs(&dev->dev, -1, 384,
+ GSTA_NR_GPIO, NUMA_NO_NODE);
if (err < 0) {
dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
-err);
@@ -401,29 +402,23 @@ static int gsta_probe(struct platform_device *dev)
chip->irq_base = err;
gsta_alloc_irq_chip(chip);
- err = request_irq(pdev->irq, gsta_gpio_handler,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ err = devm_request_irq(&dev->dev, pdev->irq, gsta_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
if (err < 0) {
dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
-err);
- goto err_free_descs;
+ return err;
}
err = devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip);
if (err < 0) {
dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
-err);
- goto err_free_irq;
+ return err;
}
platform_set_drvdata(dev, chip);
return 0;
-
-err_free_irq:
- free_irq(pdev->irq, chip);
-err_free_descs:
- irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
- return err;
}
static struct platform_driver sta2x11_gpio_platform_driver = {
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index dfcfbba74416..24f388ed46d4 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -485,7 +485,8 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
goto no_irqs;
}
- irq_base = irq_alloc_descs(-1, 0, TWL4030_GPIO_MAX, 0);
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1,
+ 0, TWL4030_GPIO_MAX, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to alloc irq_descs\n");
return irq_base;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 97613de5304e..7b1bc20be209 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -51,6 +51,8 @@
#define GROUP1_NR_IRQS 6
#define IRQ_MASK_BASE 0x4e19
#define IRQ_STATUS_BASE 0x4e0b
+#define GPIO_IRQ0_MASK GENMASK(6, 0)
+#define GPIO_IRQ1_MASK GENMASK(5, 0)
#define UPDATE_IRQ_TYPE BIT(0)
#define UPDATE_IRQ_MASK BIT(1)
@@ -309,7 +311,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- pending = p[0] | (p[1] << 8);
+ pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
if (!pending)
return IRQ_NONE;
@@ -317,7 +319,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
while (pending) {
/* One iteration is for all pending bits */
for_each_set_bit(gpio, (const unsigned long *)&pending,
- GROUP0_NR_IRQS) {
+ WCOVE_GPIO_NUM) {
offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
BIT(gpio);
@@ -333,7 +335,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
break;
}
- pending = p[0] | (p[1] << 8);
+ pending = (p[0] & GPIO_IRQ0_MASK) | ((p[1] & GPIO_IRQ1_MASK) << 7);
}
return IRQ_HANDLED;
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 901b5ccb032d..87d63695dfcf 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -51,7 +51,7 @@ struct ws16c48_gpio {
struct gpio_chip chip;
unsigned char io_state[6];
unsigned char out_state[6];
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned long irq_mask;
unsigned long flow_mask;
unsigned base;
@@ -73,13 +73,13 @@ static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
const unsigned mask = BIT(offset % 8);
unsigned long flags;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->io_state[port] |= mask;
ws16c48gpio->out_state[port] &= ~mask;
outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return 0;
}
@@ -92,7 +92,7 @@ static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
const unsigned mask = BIT(offset % 8);
unsigned long flags;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->io_state[port] &= ~mask;
if (value)
@@ -101,7 +101,7 @@ static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
ws16c48gpio->out_state[port] &= ~mask;
outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return 0;
}
@@ -114,17 +114,17 @@ static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
unsigned port_state;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
/* ensure that GPIO is set for input */
if (!(ws16c48gpio->io_state[port] & mask)) {
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return -EINVAL;
}
port_state = inb(ws16c48gpio->base + port);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return !!(port_state & mask);
}
@@ -136,11 +136,11 @@ static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
const unsigned mask = BIT(offset % 8);
unsigned long flags;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
/* ensure that GPIO is set for output */
if (ws16c48gpio->io_state[port] & mask) {
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return;
}
@@ -150,7 +150,7 @@ static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
ws16c48gpio->out_state[port] &= ~mask;
outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
@@ -178,14 +178,14 @@ static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
iomask = mask[BIT_WORD(i)] & ~ws16c48gpio->io_state[port];
bitmask = iomask & bits[BIT_WORD(i)];
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
/* update output state data and set device gpio register */
ws16c48gpio->out_state[port] &= ~iomask;
ws16c48gpio->out_state[port] |= bitmask;
outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
/* prepare for next gpio register set */
mask[BIT_WORD(i)] >>= gpio_reg_size;
@@ -207,7 +207,7 @@ static void ws16c48_irq_ack(struct irq_data *data)
if (port > 2)
return;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
port_state = ws16c48gpio->irq_mask >> (8*port);
@@ -216,7 +216,7 @@ static void ws16c48_irq_ack(struct irq_data *data)
outb(port_state | mask, ws16c48gpio->base + 8 + port);
outb(0xC0, ws16c48gpio->base + 7);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
static void ws16c48_irq_mask(struct irq_data *data)
@@ -232,7 +232,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
if (port > 2)
return;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
@@ -240,7 +240,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
outb(0xC0, ws16c48gpio->base + 7);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
static void ws16c48_irq_unmask(struct irq_data *data)
@@ -256,7 +256,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
if (port > 2)
return;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask |= mask;
@@ -264,7 +264,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
outb(0xC0, ws16c48gpio->base + 7);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
}
static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
@@ -280,7 +280,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
if (port > 2)
return -EINVAL;
- spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
switch (flow_type) {
case IRQ_TYPE_NONE:
@@ -292,7 +292,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
ws16c48gpio->flow_mask &= ~mask;
break;
default:
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return -EINVAL;
}
@@ -300,7 +300,7 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
outb(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
outb(0xC0, ws16c48gpio->base + 7);
- spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return 0;
}
@@ -387,7 +387,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
ws16c48gpio->base = base[id];
- spin_lock_init(&ws16c48gpio->lock);
+ raw_spin_lock_init(&ws16c48gpio->lock);
err = devm_gpiochip_add_data(dev, &ws16c48gpio->chip, ws16c48gpio);
if (err) {
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 4620d050e5a8..d857e1d8e731 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -404,7 +404,9 @@ static int xlp_gpio_probe(struct platform_device *pdev)
/* XLP(MIPS) has fixed range for GPIO IRQs, Vulcan(ARM64) does not */
if (soc_type != GPIO_VARIANT_VULCAN) {
- irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
+ irq_base = devm_irq_alloc_descs(&pdev->dev, -1,
+ XLP_GPIO_IRQ_BASE,
+ gc->ngpio, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
return irq_base;
@@ -415,7 +417,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
err = gpiochip_add_data(gc, priv);
if (err < 0)
- goto out_free_desc;
+ return err;
err = gpiochip_irqchip_add(gc, &xlp_gpio_irq_chip, irq_base,
handle_level_irq, IRQ_TYPE_NONE);
@@ -433,14 +435,13 @@ static int xlp_gpio_probe(struct platform_device *pdev)
out_gpio_remove:
gpiochip_remove(gc);
-out_free_desc:
- irq_free_descs(irq_base, gc->ngpio);
return err;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp_gpio_acpi_match[] = {
{ "BRCM9006", GPIO_VARIANT_VULCAN },
+ { "CAV9006", GPIO_VARIANT_VULCAN },
{},
};
MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 93de8be0d885..be3a87da8438 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -41,7 +41,7 @@
#define ZX_GPIO_NR 16
struct zx_gpio {
- spinlock_t lock;
+ raw_spinlock_t lock;
void __iomem *base;
struct gpio_chip gc;
@@ -56,11 +56,11 @@ static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
if (offset >= gc->ngpio)
return -EINVAL;
- spin_lock_irqsave(&chip->lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
gpiodir &= ~BIT(offset);
writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
- spin_unlock_irqrestore(&chip->lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
@@ -75,7 +75,7 @@ static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
if (offset >= gc->ngpio)
return -EINVAL;
- spin_lock_irqsave(&chip->lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
gpiodir |= BIT(offset);
writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
@@ -84,7 +84,7 @@ static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
else
writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
- spin_unlock_irqrestore(&chip->lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
@@ -118,7 +118,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger)
if (offset < 0 || offset >= ZX_GPIO_NR)
return -EINVAL;
- spin_lock_irqsave(&chip->lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
@@ -151,7 +151,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger)
writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
- spin_unlock_irqrestore(&chip->lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
@@ -184,12 +184,12 @@ static void zx_irq_mask(struct irq_data *d)
u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
u16 gpioie;
- spin_lock(&chip->lock);
+ raw_spin_lock(&chip->lock);
gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- spin_unlock(&chip->lock);
+ raw_spin_unlock(&chip->lock);
}
static void zx_irq_unmask(struct irq_data *d)
@@ -199,12 +199,12 @@ static void zx_irq_unmask(struct irq_data *d)
u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
u16 gpioie;
- spin_lock(&chip->lock);
+ raw_spin_lock(&chip->lock);
gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
- spin_unlock(&chip->lock);
+ raw_spin_unlock(&chip->lock);
}
static struct irq_chip zx_irqchip = {
@@ -230,7 +230,7 @@ static int zx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- spin_lock_init(&chip->lock);
+ raw_spin_lock_init(&chip->lock);
if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
chip->gc.request = gpiochip_generic_request;
chip->gc.free = gpiochip_generic_free;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2bd683e2be02..2185232da823 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -368,6 +368,37 @@ int acpi_dev_add_driver_gpios(struct acpi_device *adev,
}
EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
+static void devm_acpi_dev_release_driver_gpios(struct device *dev, void *res)
+{
+ acpi_dev_remove_driver_gpios(ACPI_COMPANION(dev));
+}
+
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ void *res;
+ int ret;
+
+ res = devres_alloc(devm_acpi_dev_release_driver_gpios, 0, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), gpios);
+ if (ret) {
+ devres_free(res);
+ return ret;
+ }
+ devres_add(dev, res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
+
+void devm_acpi_dev_remove_driver_gpios(struct device *dev)
+{
+ WARN_ON(devres_release(dev, devm_acpi_dev_release_driver_gpios, NULL, NULL));
+}
+EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
+
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
struct acpi_reference_args *args)
@@ -661,20 +692,24 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
- int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc)) {
- ret = PTR_ERR(desc);
- break;
- }
+
+ /* Ignore -EPROBE_DEFER, it only matters if idx matches */
+ if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+ return PTR_ERR(desc);
+
if (info.gpioint && idx++ == index) {
- int irq = gpiod_to_irq(desc);
+ int irq;
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ irq = gpiod_to_irq(desc);
if (irq < 0)
return irq;
@@ -690,7 +725,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return ret;
+ return -ENOENT;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
@@ -1075,7 +1110,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
break;
}
}
- if (count >= 0)
+ if (count > 0)
break;
}
@@ -1091,7 +1126,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
if (crs_count > 0)
count = crs_count;
}
- return count;
+ return count ? count : -ENOENT;
}
struct acpi_crs_lookup {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 975b9f6cf408..b13b7c7c335f 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -147,7 +147,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_ACTIVE_LOW;
if (of_flags & OF_GPIO_SINGLE_ENDED) {
- if (of_flags & OF_GPIO_ACTIVE_LOW)
+ if (of_flags & OF_GPIO_OPEN_DRAIN)
*flags |= GPIO_OPEN_DRAIN;
else
*flags |= GPIO_OPEN_SOURCE;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 8b4d721d6d63..5db44139cef8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1035,18 +1035,14 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
cdev_init(&gdev->chrdev, &gpio_fileops);
gdev->chrdev.owner = THIS_MODULE;
- gdev->chrdev.kobj.parent = &gdev->dev.kobj;
gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
- status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
- if (status < 0)
- chip_warn(gdev->chip, "failed to add char device %d:%d\n",
- MAJOR(gpio_devt), gdev->id);
- else
- chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
- MAJOR(gpio_devt), gdev->id);
- status = device_add(&gdev->dev);
+
+ status = cdev_device_add(&gdev->chrdev, &gdev->dev);
if (status)
- goto err_remove_chardev;
+ return status;
+
+ chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+ MAJOR(gpio_devt), gdev->id);
status = gpiochip_sysfs_register(gdev);
if (status)
@@ -1061,9 +1057,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
return 0;
err_remove_device:
- device_del(&gdev->dev);
-err_remove_chardev:
- cdev_del(&gdev->chrdev);
+ cdev_device_del(&gdev->chrdev, &gdev->dev);
return status;
}
@@ -1347,8 +1341,7 @@ void gpiochip_remove(struct gpio_chip *chip)
* be removed, else it will be dangling until the last user is
* gone.
*/
- cdev_del(&gdev->chrdev);
- device_del(&gdev->dev);
+ cdev_device_del(&gdev->chrdev, &gdev->dev);
put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -1522,7 +1515,7 @@ static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
*/
static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq,
+ unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
unsigned int offset;
@@ -1571,7 +1564,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
*/
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq,
+ unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
@@ -1588,7 +1581,7 @@ EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
*/
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq)
+ unsigned int parent_irq)
{
if (!gpiochip->irq_nested) {
chip_err(gpiochip, "tried to nest a chained gpiochip\n");
@@ -3122,10 +3115,10 @@ static int dt_gpio_count(struct device *dev, const char *con_id)
gpio_suffixes[i]);
ret = of_gpio_named_count(dev->of_node, propname);
- if (ret >= 0)
+ if (ret > 0)
break;
}
- return ret;
+ return ret ? ret : -ENOENT;
}
static int platform_gpio_count(struct device *dev, const char *con_id)
@@ -3326,7 +3319,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
- * On successfull request the GPIO pin is configured in accordance with
+ * On successful request the GPIO pin is configured in accordance with
* provided @dflags.
*
* In case of error an ERR_PTR() is returned.
@@ -3340,6 +3333,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
unsigned long lflags = 0;
bool active_low = false;
bool single_ended = false;
+ bool open_drain = false;
int ret;
if (!fwnode)
@@ -3353,6 +3347,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
if (!IS_ERR(desc)) {
active_low = flags & OF_GPIO_ACTIVE_LOW;
single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
}
} else if (is_acpi_node(fwnode)) {
struct acpi_gpio_info info;
@@ -3373,7 +3368,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
lflags |= GPIO_ACTIVE_LOW;
if (single_ended) {
- if (active_low)
+ if (open_drain)
lflags |= GPIO_OPEN_DRAIN;
else
lflags |= GPIO_OPEN_SOURCE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 6d691abe889c..2ee327d69775 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -27,6 +27,9 @@
*/
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include "amdgpu.h"
/*
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a0ea3241c651..1f178b878e42 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2446,7 +2446,7 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
int __init drm_fb_helper_modinit(void)
{
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
- const char *name = "fbcon";
+ const char name[] = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index d019b5e311cc..2d955d7d7b6d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -161,8 +161,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_HIGHMEM |
- __GFP_NOWARN | __GFP_NORETRY, PAGE_KERNEL);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ PAGE_KERNEL);
if (!iter.start) {
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 52438404c8c9..1ff6ab6371e8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -43,6 +43,8 @@
#include <drm/exynos_drm.h>
+#include <media/cec-notifier.h>
+
#include "exynos_drm_crtc.h"
#define HOTPLUG_DEBOUNCE_MS 1100
@@ -118,6 +120,7 @@ struct hdmi_context {
bool dvi_mode;
struct delayed_work hotplug_work;
struct drm_display_mode current_mode;
+ struct cec_notifier *notifier;
const struct hdmi_driver_data *drv_data;
void __iomem *regs;
@@ -821,6 +824,7 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
if (gpiod_get_value(hdata->hpd_gpio))
return connector_status_connected;
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
@@ -859,6 +863,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
edid->width_cm, edid->height_cm);
drm_mode_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -1501,6 +1506,7 @@ static void hdmi_disable(struct drm_encoder *encoder)
if (funcs && funcs->disable)
(*funcs->disable)(crtc);
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
cancel_delayed_work(&hdata->hotplug_work);
hdmiphy_disable(hdata);
@@ -1880,15 +1886,22 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
+ hdata->notifier = cec_notifier_get(&pdev->dev);
+ if (hdata->notifier == NULL) {
+ ret = -ENOMEM;
+ goto err_hdmiphy;
+ }
+
pm_runtime_enable(dev);
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
- goto err_disable_pm_runtime;
+ goto err_notifier_put;
return ret;
-err_disable_pm_runtime:
+err_notifier_put:
+ cec_notifier_put(hdata->notifier);
pm_runtime_disable(dev);
err_hdmiphy:
@@ -1907,9 +1920,11 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
+ cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 3f4f424196b2..3949b0990916 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -21,6 +21,7 @@
#include <drm/drmP.h>
#include <linux/shmem_fs.h>
+#include <asm/set_memory.h>
#include "psb_drv.h"
#include "blitter.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 5ee93ff55608..1f9b35afefee 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <linux/module.h>
+#include <asm/set_memory.h>
static struct drm_driver driver;
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 532a577ff7a1..b6ac3df18b58 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4789,7 +4789,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU);
+ SLAB_TYPESAFE_BY_RCU);
if (!dev_priv->requests)
goto err_vmas;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8bab4aea63e6..2aa6b97fd22f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -31,6 +31,8 @@
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
+#include <asm/set_memory.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index a211c53c813f..129c58bb4805 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -521,7 +521,7 @@ static inline struct drm_i915_gem_request *
__i915_gem_active_get_rcu(const struct i915_gem_active *active)
{
/* Performing a lockless retrieval of the active request is super
- * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+ * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
* slab of request objects will not be freed whilst we hold the
* RCU read lock. It does not guarantee that the request itself
* will not be freed and then *reused*. Viz,
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 6a8258eacdcb..9f24c5da3f8d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -174,7 +174,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->requests = KMEM_CACHE(mock_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
- SLAB_DESTROY_BY_RCU);
+ SLAB_TYPESAFE_BY_RCU);
if (!i915->requests)
goto err_vmas;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ca5397beb357..2170534101ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -568,9 +568,7 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
size *= nmemb;
- mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!mem)
- mem = vmalloc(size);
+ mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c4777c8d0312..0b3ec35515f3 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -27,6 +27,9 @@
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include "radeon.h"
/*
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index ce2dcba679d5..243905b6ae59 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -771,6 +771,8 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
clk_disable_unprepare(hdmi->clk_pix);
hdmi->enabled = false;
+
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
}
/**
@@ -973,6 +975,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
DRM_DEBUG_KMS("%s : %dx%d cm\n",
(hdmi->hdmi_monitor ? "hdmi monitor" : "dvi monitor"),
edid->width_cm, edid->height_cm);
+ cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
@@ -1035,6 +1038,7 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
return connector_status_disconnected;
}
@@ -1423,6 +1427,10 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
+ hdmi->notifier = cec_notifier_get(&pdev->dev);
+ if (!hdmi->notifier)
+ goto release_adapter;
+
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1442,11 +1450,14 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
+ cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
+
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
+ cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 407012350f1a..c6469b56ce7e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -11,6 +11,7 @@
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <media/cec-notifier.h>
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
@@ -64,6 +65,7 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
* @audio_pdev: ASoC hdmi-codec platform device
* @audio: hdmi audio parameters.
* @drm_connector: hdmi connector
+ * @notifier: hotplug detect notifier
*/
struct sti_hdmi {
struct device dev;
@@ -89,6 +91,7 @@ struct sti_hdmi {
struct platform_device *audio_pdev;
struct hdmi_audio_params audio;
struct drm_connector *drm_connector;
+ struct cec_notifier *notifier;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index bbf5a4b7e0b6..2db29d67193d 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -7,6 +7,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
+ select IOMMU_IOVA if IOMMU_SUPPORT
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 2c66a8db9da4..6af3a9ad6565 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -13,6 +13,8 @@ tegra-drm-y := \
sor.o \
dpaux.o \
gr2d.o \
- gr3d.o
+ gr3d.o \
+ falcon.o \
+ vic.o
obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index dba4e090d3df..9a1e34e48f64 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1,13 +1,15 @@
/*
* Copyright (C) 2012 Avionic Design GmbH
- * Copyright (C) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/host1x.h>
+#include <linux/idr.h>
#include <linux/iommu.h>
#include <drm/drm_atomic.h>
@@ -23,8 +25,11 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
+#define CARVEOUT_SZ SZ_64M
+
struct tegra_drm_file {
- struct list_head contexts;
+ struct idr contexts;
+ struct mutex lock;
};
static void tegra_atomic_schedule(struct tegra_drm *tegra,
@@ -126,8 +131,9 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
return -ENOMEM;
if (iommu_present(&platform_bus_type)) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
struct iommu_domain_geometry *geometry;
- u64 start, end;
+ unsigned long order;
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
@@ -136,12 +142,26 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
}
geometry = &tegra->domain->geometry;
- start = geometry->aperture_start;
- end = geometry->aperture_end;
-
- DRM_DEBUG_DRIVER("IOMMU aperture initialized (%#llx-%#llx)\n",
- start, end);
- drm_mm_init(&tegra->mm, start, end - start + 1);
+ gem_start = geometry->aperture_start;
+ gem_end = geometry->aperture_end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = geometry->aperture_end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order,
+ carveout_end >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG("IOMMU apertures:\n");
+ DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
}
mutex_init(&tegra->clients_lock);
@@ -161,6 +181,8 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
+ drm->mode_config.allow_fb_modifiers = true;
+
drm->mode_config.funcs = &tegra_drm_mode_funcs;
err = tegra_drm_fb_prepare(drm);
@@ -208,6 +230,8 @@ config:
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
+ mutex_destroy(&tegra->mm_lock);
+ put_iova_domain(&tegra->carveout.domain);
}
free:
kfree(tegra);
@@ -232,6 +256,8 @@ static void tegra_drm_unload(struct drm_device *drm)
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
+ mutex_destroy(&tegra->mm_lock);
+ put_iova_domain(&tegra->carveout.domain);
}
kfree(tegra);
@@ -245,7 +271,8 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
if (!fpriv)
return -ENOMEM;
- INIT_LIST_HEAD(&fpriv->contexts);
+ idr_init(&fpriv->contexts);
+ mutex_init(&fpriv->lock);
filp->driver_priv = fpriv;
return 0;
@@ -424,21 +451,16 @@ fail:
#ifdef CONFIG_DRM_TEGRA_STAGING
-static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
-{
- return (struct tegra_drm_context *)(uintptr_t)context;
-}
-
-static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
- struct tegra_drm_context *context)
+static struct tegra_drm_context *
+tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
{
- struct tegra_drm_context *ctx;
+ struct tegra_drm_context *context;
- list_for_each_entry(ctx, &file->contexts, list)
- if (ctx == context)
- return true;
+ mutex_lock(&file->lock);
+ context = idr_find(&file->contexts, id);
+ mutex_unlock(&file->lock);
- return false;
+ return context;
}
static int tegra_gem_create(struct drm_device *drm, void *data,
@@ -519,6 +541,28 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
&args->value);
}
+static int tegra_client_open(struct tegra_drm_file *fpriv,
+ struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ int err;
+
+ err = client->ops->open_channel(client, context);
+ if (err < 0)
+ return err;
+
+ err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
+ if (err < 0) {
+ client->ops->close_channel(context);
+ return err;
+ }
+
+ context->client = client;
+ context->id = err;
+
+ return 0;
+}
+
static int tegra_open_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
@@ -533,19 +577,22 @@ static int tegra_open_channel(struct drm_device *drm, void *data,
if (!context)
return -ENOMEM;
+ mutex_lock(&fpriv->lock);
+
list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == args->client) {
- err = client->ops->open_channel(client, context);
- if (err)
+ err = tegra_client_open(fpriv, client, context);
+ if (err < 0)
break;
- list_add(&context->list, &fpriv->contexts);
- args->context = (uintptr_t)context;
- context->client = client;
- return 0;
+ args->context = context->id;
+ break;
}
- kfree(context);
+ if (err < 0)
+ kfree(context);
+
+ mutex_unlock(&fpriv->lock);
return err;
}
@@ -555,16 +602,22 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_close_channel *args = data;
struct tegra_drm_context *context;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -EINVAL;
+ context = tegra_drm_file_get_context(fpriv, args->context);
+ if (!context) {
+ err = -EINVAL;
+ goto unlock;
+ }
- list_del(&context->list);
+ idr_remove(&fpriv->contexts, context->id);
tegra_drm_context_free(context);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_get_syncpt(struct drm_device *drm, void *data,
@@ -574,19 +627,27 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
struct drm_tegra_get_syncpt *args = data;
struct tegra_drm_context *context;
struct host1x_syncpt *syncpt;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ context = tegra_drm_file_get_context(fpriv, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
- if (args->index >= context->client->base.num_syncpts)
- return -EINVAL;
+ if (args->index >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
syncpt = context->client->base.syncpts[args->index];
args->id = host1x_syncpt_id(syncpt);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_submit(struct drm_device *drm, void *data,
@@ -595,13 +656,21 @@ static int tegra_submit(struct drm_device *drm, void *data,
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_submit *args = data;
struct tegra_drm_context *context;
+ int err;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
+
+ context = tegra_drm_file_get_context(fpriv, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ err = context->client->ops->submit(context, args, drm, file);
- return context->client->ops->submit(context, args, drm, file);
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
@@ -612,24 +681,34 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
struct tegra_drm_context *context;
struct host1x_syncpt_base *base;
struct host1x_syncpt *syncpt;
+ int err = 0;
- context = tegra_drm_get_context(args->context);
+ mutex_lock(&fpriv->lock);
- if (!tegra_drm_file_owns_context(fpriv, context))
- return -ENODEV;
+ context = tegra_drm_file_get_context(fpriv, args->context);
+ if (!context) {
+ err = -ENODEV;
+ goto unlock;
+ }
- if (args->syncpt >= context->client->base.num_syncpts)
- return -EINVAL;
+ if (args->syncpt >= context->client->base.num_syncpts) {
+ err = -EINVAL;
+ goto unlock;
+ }
syncpt = context->client->base.syncpts[args->syncpt];
base = host1x_syncpt_get_base(syncpt);
- if (!base)
- return -ENXIO;
+ if (!base) {
+ err = -ENXIO;
+ goto unlock;
+ }
args->id = host1x_syncpt_base_id(base);
- return 0;
+unlock:
+ mutex_unlock(&fpriv->lock);
+ return err;
}
static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
@@ -804,14 +883,25 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
+static int tegra_drm_context_cleanup(int id, void *p, void *data)
+{
+ struct tegra_drm_context *context = p;
+
+ tegra_drm_context_free(context);
+
+ return 0;
+}
+
static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
- struct tegra_drm_context *context, *tmp;
- list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
- tegra_drm_context_free(context);
+ mutex_lock(&fpriv->lock);
+ idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
+ mutex_unlock(&fpriv->lock);
+ idr_destroy(&fpriv->contexts);
+ mutex_destroy(&fpriv->lock);
kfree(fpriv);
}
@@ -844,7 +934,9 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
+ mutex_lock(&tegra->mm_lock);
drm_mm_print(&tegra->mm, &p);
+ mutex_unlock(&tegra->mm_lock);
return 0;
}
@@ -919,6 +1011,84 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
+ dma_addr_t *dma)
+{
+ struct iova *alloc;
+ void *virt;
+ gfp_t gfp;
+ int err;
+
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ gfp = GFP_KERNEL | __GFP_ZERO;
+ if (!tegra->domain) {
+ /*
+ * Many units only support 32-bit addresses, even on 64-bit
+ * SoCs. If there is no IOMMU to translate into a 32-bit IO
+ * virtual address space, force allocations to be in the
+ * lower 32-bit range.
+ */
+ gfp |= GFP_DMA;
+ }
+
+ virt = (void *)__get_free_pages(gfp, get_order(size));
+ if (!virt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!tegra->domain) {
+ /*
+ * If IOMMU is disabled, devices address physical memory
+ * directly.
+ */
+ *dma = virt_to_phys(virt);
+ return virt;
+ }
+
+ alloc = alloc_iova(&tegra->carveout.domain,
+ size >> tegra->carveout.shift,
+ tegra->carveout.limit, true);
+ if (!alloc) {
+ err = -EBUSY;
+ goto free_pages;
+ }
+
+ *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
+ err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
+ size, IOMMU_READ | IOMMU_WRITE);
+ if (err < 0)
+ goto free_iova;
+
+ return virt;
+
+free_iova:
+ __free_iova(&tegra->carveout.domain, alloc);
+free_pages:
+ free_pages((unsigned long)virt, get_order(size));
+
+ return ERR_PTR(err);
+}
+
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t dma)
+{
+ if (tegra->domain)
+ size = iova_align(&tegra->carveout.domain, size);
+ else
+ size = PAGE_ALIGN(size);
+
+ if (tegra->domain) {
+ iommu_unmap(tegra->domain, dma, size);
+ free_iova(&tegra->carveout.domain,
+ iova_pfn(&tegra->carveout.domain, dma));
+ }
+
+ free_pages((unsigned long)virt, get_order(size));
+}
+
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
@@ -1003,11 +1173,13 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra124-sor", },
{ .compatible = "nvidia,tegra124-hdmi", },
{ .compatible = "nvidia,tegra124-dsi", },
+ { .compatible = "nvidia,tegra124-vic", },
{ .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra210-dc", },
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
+ { .compatible = "nvidia,tegra210-vic", },
{ /* sentinel */ }
};
@@ -1029,6 +1201,7 @@ static struct platform_driver * const drivers[] = {
&tegra_sor_driver,
&tegra_gr2d_driver,
&tegra_gr3d_driver,
+ &tegra_vic_driver,
};
static int __init host1x_drm_init(void)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 5747accb2271..85aa2e3d9d4e 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -12,6 +12,7 @@
#include <uapi/drm/tegra_drm.h>
#include <linux/host1x.h>
+#include <linux/iova.h>
#include <linux/of_gpio.h>
#include <drm/drmP.h>
@@ -42,8 +43,15 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
+ struct mutex mm_lock;
struct drm_mm mm;
+ struct {
+ struct iova_domain domain;
+ unsigned long shift;
+ unsigned long limit;
+ } carveout;
+
struct mutex clients_lock;
struct list_head clients;
@@ -67,7 +75,7 @@ struct tegra_drm_client;
struct tegra_drm_context {
struct tegra_drm_client *client;
struct host1x_channel *channel;
- struct list_head list;
+ unsigned int id;
};
struct tegra_drm_client_ops {
@@ -105,6 +113,10 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
+void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
+void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+ dma_addr_t iova);
+
struct tegra_dc_soc_info;
struct tegra_output;
@@ -283,5 +295,6 @@ extern struct platform_driver tegra_dpaux_driver;
extern struct platform_driver tegra_sor_driver;
extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
+extern struct platform_driver tegra_vic_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
new file mode 100644
index 000000000000..f685e72949d1
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/pci_ids.h>
+#include <linux/iopoll.h>
+
+#include "falcon.h"
+#include "drm.h"
+
+enum falcon_memory {
+ FALCON_MEMORY_IMEM,
+ FALCON_MEMORY_DATA,
+};
+
+static void falcon_writel(struct falcon *falcon, u32 value, u32 offset)
+{
+ writel(value, falcon->regs + offset);
+}
+
+int falcon_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_IDLESTATE, value,
+ (value == 0), 10, 100000);
+}
+
+static int falcon_dma_wait_idle(struct falcon *falcon)
+{
+ u32 value;
+
+ return readl_poll_timeout(falcon->regs + FALCON_DMATRFCMD, value,
+ (value & FALCON_DMATRFCMD_IDLE), 10, 100000);
+}
+
+static int falcon_copy_chunk(struct falcon *falcon,
+ phys_addr_t base,
+ unsigned long offset,
+ enum falcon_memory target)
+{
+ u32 cmd = FALCON_DMATRFCMD_SIZE_256B;
+
+ if (target == FALCON_MEMORY_IMEM)
+ cmd |= FALCON_DMATRFCMD_IMEM;
+
+ falcon_writel(falcon, offset, FALCON_DMATRFMOFFS);
+ falcon_writel(falcon, base, FALCON_DMATRFFBOFFS);
+ falcon_writel(falcon, cmd, FALCON_DMATRFCMD);
+
+ return falcon_dma_wait_idle(falcon);
+}
+
+static void falcon_copy_firmware_image(struct falcon *falcon,
+ const struct firmware *firmware)
+{
+ u32 *firmware_vaddr = falcon->firmware.vaddr;
+ dma_addr_t daddr;
+ size_t i;
+ int err;
+
+ /* copy the whole thing taking into account endianness */
+ for (i = 0; i < firmware->size / sizeof(u32); i++)
+ firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+
+ /* ensure that caches are flushed and falcon can see the firmware */
+ daddr = dma_map_single(falcon->dev, firmware_vaddr,
+ falcon->firmware.size, DMA_TO_DEVICE);
+ err = dma_mapping_error(falcon->dev, daddr);
+ if (err) {
+ dev_err(falcon->dev, "failed to map firmware: %d\n", err);
+ return;
+ }
+ dma_sync_single_for_device(falcon->dev, daddr,
+ falcon->firmware.size, DMA_TO_DEVICE);
+ dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
+ DMA_TO_DEVICE);
+}
+
+static int falcon_parse_firmware_image(struct falcon *falcon)
+{
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_os_header_v1 *os;
+
+ /* endian problems would show up right here */
+ if (bin->magic != PCI_VENDOR_ID_NVIDIA) {
+ dev_err(falcon->dev, "incorrect firmware magic\n");
+ return -EINVAL;
+ }
+
+ /* currently only version 1 is supported */
+ if (bin->version != 1) {
+ dev_err(falcon->dev, "unsupported firmware version\n");
+ return -EINVAL;
+ }
+
+ /* check that the firmware size is consistent */
+ if (bin->size > falcon->firmware.size) {
+ dev_err(falcon->dev, "firmware image size inconsistency\n");
+ return -EINVAL;
+ }
+
+ os = falcon->firmware.vaddr + bin->os_header_offset;
+
+ falcon->firmware.bin_data.size = bin->os_size;
+ falcon->firmware.bin_data.offset = bin->os_data_offset;
+ falcon->firmware.code.offset = os->code_offset;
+ falcon->firmware.code.size = os->code_size;
+ falcon->firmware.data.offset = os->data_offset;
+ falcon->firmware.data.size = os->data_size;
+
+ return 0;
+}
+
+int falcon_read_firmware(struct falcon *falcon, const char *name)
+{
+ int err;
+
+ /* request_firmware prints error if it fails */
+ err = request_firmware(&falcon->firmware.firmware, name, falcon->dev);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+int falcon_load_firmware(struct falcon *falcon)
+{
+ const struct firmware *firmware = falcon->firmware.firmware;
+ int err;
+
+ falcon->firmware.size = firmware->size;
+
+ /* allocate iova space for the firmware */
+ falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
+ &falcon->firmware.paddr);
+ if (!falcon->firmware.vaddr) {
+ dev_err(falcon->dev, "dma memory mapping failed\n");
+ return -ENOMEM;
+ }
+
+ /* copy firmware image into local area. this also ensures endianness */
+ falcon_copy_firmware_image(falcon, firmware);
+
+ /* parse the image data */
+ err = falcon_parse_firmware_image(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "failed to parse firmware image\n");
+ goto err_setup_firmware_image;
+ }
+
+ release_firmware(firmware);
+ falcon->firmware.firmware = NULL;
+
+ return 0;
+
+err_setup_firmware_image:
+ falcon->ops->free(falcon, falcon->firmware.size,
+ falcon->firmware.paddr, falcon->firmware.vaddr);
+
+ return err;
+}
+
+int falcon_init(struct falcon *falcon)
+{
+ /* check mandatory ops */
+ if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
+ return -EINVAL;
+
+ falcon->firmware.vaddr = NULL;
+
+ return 0;
+}
+
+void falcon_exit(struct falcon *falcon)
+{
+ if (falcon->firmware.firmware) {
+ release_firmware(falcon->firmware.firmware);
+ falcon->firmware.firmware = NULL;
+ }
+
+ if (falcon->firmware.vaddr) {
+ falcon->ops->free(falcon, falcon->firmware.size,
+ falcon->firmware.paddr,
+ falcon->firmware.vaddr);
+ falcon->firmware.vaddr = NULL;
+ }
+}
+
+int falcon_boot(struct falcon *falcon)
+{
+ unsigned long offset;
+ int err;
+
+ if (!falcon->firmware.vaddr)
+ return -EINVAL;
+
+ falcon_writel(falcon, 0, FALCON_DMACTL);
+
+ /* setup the address of the binary data so Falcon can access it later */
+ falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon->firmware.bin_data.offset) >> 8,
+ FALCON_DMATRFBASE);
+
+ /* copy the data segment into Falcon internal memory */
+ for (offset = 0; offset < falcon->firmware.data.size; offset += 256)
+ falcon_copy_chunk(falcon,
+ falcon->firmware.data.offset + offset,
+ offset, FALCON_MEMORY_DATA);
+
+ /* copy the first code segment into Falcon internal memory */
+ falcon_copy_chunk(falcon, falcon->firmware.code.offset,
+ 0, FALCON_MEMORY_IMEM);
+
+ /* setup falcon interrupts */
+ falcon_writel(falcon, FALCON_IRQMSET_EXT(0xff) |
+ FALCON_IRQMSET_SWGEN1 |
+ FALCON_IRQMSET_SWGEN0 |
+ FALCON_IRQMSET_EXTERR |
+ FALCON_IRQMSET_HALT |
+ FALCON_IRQMSET_WDTMR,
+ FALCON_IRQMSET);
+ falcon_writel(falcon, FALCON_IRQDEST_EXT(0xff) |
+ FALCON_IRQDEST_SWGEN1 |
+ FALCON_IRQDEST_SWGEN0 |
+ FALCON_IRQDEST_EXTERR |
+ FALCON_IRQDEST_HALT,
+ FALCON_IRQDEST);
+
+ /* enable interface */
+ falcon_writel(falcon, FALCON_ITFEN_MTHDEN |
+ FALCON_ITFEN_CTXEN,
+ FALCON_ITFEN);
+
+ /* boot falcon */
+ falcon_writel(falcon, 0x00000000, FALCON_BOOTVEC);
+ falcon_writel(falcon, FALCON_CPUCTL_STARTCPU, FALCON_CPUCTL);
+
+ err = falcon_wait_idle(falcon);
+ if (err < 0) {
+ dev_err(falcon->dev, "Falcon boot failed due to timeout\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data)
+{
+ falcon_writel(falcon, method >> 2, FALCON_UCLASS_METHOD_OFFSET);
+ falcon_writel(falcon, data, FALCON_UCLASS_METHOD_DATA);
+}
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
new file mode 100644
index 000000000000..4504ed5a199e
--- /dev/null
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _FALCON_H_
+#define _FALCON_H_
+
+#include <linux/types.h>
+
+#define FALCON_UCLASS_METHOD_OFFSET 0x00000040
+
+#define FALCON_UCLASS_METHOD_DATA 0x00000044
+
+#define FALCON_IRQMSET 0x00001010
+#define FALCON_IRQMSET_WDTMR (1 << 1)
+#define FALCON_IRQMSET_HALT (1 << 4)
+#define FALCON_IRQMSET_EXTERR (1 << 5)
+#define FALCON_IRQMSET_SWGEN0 (1 << 6)
+#define FALCON_IRQMSET_SWGEN1 (1 << 7)
+#define FALCON_IRQMSET_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_IRQDEST 0x0000101c
+#define FALCON_IRQDEST_HALT (1 << 4)
+#define FALCON_IRQDEST_EXTERR (1 << 5)
+#define FALCON_IRQDEST_SWGEN0 (1 << 6)
+#define FALCON_IRQDEST_SWGEN1 (1 << 7)
+#define FALCON_IRQDEST_EXT(v) (((v) & 0xff) << 8)
+
+#define FALCON_ITFEN 0x00001048
+#define FALCON_ITFEN_CTXEN (1 << 0)
+#define FALCON_ITFEN_MTHDEN (1 << 1)
+
+#define FALCON_IDLESTATE 0x0000104c
+
+#define FALCON_CPUCTL 0x00001100
+#define FALCON_CPUCTL_STARTCPU (1 << 1)
+
+#define FALCON_BOOTVEC 0x00001104
+
+#define FALCON_DMACTL 0x0000110c
+#define FALCON_DMACTL_DMEM_SCRUBBING (1 << 1)
+#define FALCON_DMACTL_IMEM_SCRUBBING (1 << 2)
+
+#define FALCON_DMATRFBASE 0x00001110
+
+#define FALCON_DMATRFMOFFS 0x00001114
+
+#define FALCON_DMATRFCMD 0x00001118
+#define FALCON_DMATRFCMD_IDLE (1 << 1)
+#define FALCON_DMATRFCMD_IMEM (1 << 4)
+#define FALCON_DMATRFCMD_SIZE_256B (6 << 8)
+
+#define FALCON_DMATRFFBOFFS 0x0000111c
+
+struct falcon_fw_bin_header_v1 {
+ u32 magic; /* 0x10de */
+ u32 version; /* version of bin format (1) */
+ u32 size; /* entire image size including this header */
+ u32 os_header_offset;
+ u32 os_data_offset;
+ u32 os_size;
+};
+
+struct falcon_fw_os_app_v1 {
+ u32 offset;
+ u32 size;
+};
+
+struct falcon_fw_os_header_v1 {
+ u32 code_offset;
+ u32 code_size;
+ u32 data_offset;
+ u32 data_size;
+};
+
+struct falcon;
+
+struct falcon_ops {
+ void *(*alloc)(struct falcon *falcon, size_t size,
+ dma_addr_t *paddr);
+ void (*free)(struct falcon *falcon, size_t size,
+ dma_addr_t paddr, void *vaddr);
+};
+
+struct falcon_firmware_section {
+ unsigned long offset;
+ size_t size;
+};
+
+struct falcon_firmware {
+ /* Firmware after it is read but not loaded */
+ const struct firmware *firmware;
+
+ /* Raw firmware data */
+ dma_addr_t paddr;
+ void *vaddr;
+ size_t size;
+
+ /* Parsed firmware information */
+ struct falcon_firmware_section bin_data;
+ struct falcon_firmware_section data;
+ struct falcon_firmware_section code;
+};
+
+struct falcon {
+ /* Set by falcon client */
+ struct device *dev;
+ void __iomem *regs;
+ const struct falcon_ops *ops;
+ void *data;
+
+ struct falcon_firmware firmware;
+};
+
+int falcon_init(struct falcon *falcon);
+void falcon_exit(struct falcon *falcon);
+int falcon_read_firmware(struct falcon *falcon, const char *firmware_name);
+int falcon_load_firmware(struct falcon *falcon);
+int falcon_boot(struct falcon *falcon);
+void falcon_execute_method(struct falcon *falcon, u32 method, u32 data);
+int falcon_wait_idle(struct falcon *falcon);
+
+#endif /* _FALCON_H_ */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index c61d67d16ce3..25acb73ee728 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -52,9 +52,26 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
-
- /* TODO: handle YUV formats? */
- *tiling = fb->planes[0]->tiling;
+ uint64_t modifier = fb->base.modifier;
+
+ switch (fourcc_mod_tegra_mod(modifier)) {
+ case NV_FORMAT_MOD_TEGRA_TILED:
+ tiling->mode = TEGRA_BO_TILING_MODE_TILED;
+ tiling->value = 0;
+ break;
+
+ case NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(0):
+ tiling->mode = TEGRA_BO_TILING_MODE_BLOCK;
+ tiling->value = fourcc_mod_tegra_param(modifier);
+ if (tiling->value > 5)
+ return -EINVAL;
+ break;
+
+ default:
+ /* TODO: handle YUV formats? */
+ *tiling = fb->planes[0]->tiling;
+ break;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8672f5d2f237..424569b53e57 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -128,12 +128,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm)
return -ENOMEM;
+ mutex_lock(&tegra->mm_lock);
+
err = drm_mm_insert_node_generic(&tegra->mm,
bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err);
- goto free;
+ goto unlock;
}
bo->paddr = bo->mm->start;
@@ -147,11 +149,14 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
bo->size = err;
+ mutex_unlock(&tegra->mm_lock);
+
return 0;
remove:
drm_mm_remove_node(bo->mm);
-free:
+unlock:
+ mutex_unlock(&tegra->mm_lock);
kfree(bo->mm);
return err;
}
@@ -161,8 +166,11 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm)
return 0;
+ mutex_lock(&tegra->mm_lock);
iommu_unmap(tegra->domain, bo->paddr, bo->size);
drm_mm_remove_node(bo->mm);
+ mutex_unlock(&tegra->mm_lock);
+
kfree(bo->mm);
return 0;
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
new file mode 100644
index 000000000000..cd804e404a11
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "drm.h"
+#include "falcon.h"
+#include "vic.h"
+
+struct vic_config {
+ const char *firmware;
+};
+
+struct vic {
+ struct falcon falcon;
+ bool booted;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct host1x_channel *channel;
+ struct iommu_domain *domain;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct vic_config *config;
+};
+
+static inline struct vic *to_vic(struct tegra_drm_client *client)
+{
+ return container_of(client, struct vic, client);
+}
+
+static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
+{
+ writel(value, vic->regs + offset);
+}
+
+static int vic_runtime_resume(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(vic->clk);
+}
+
+static int vic_runtime_suspend(struct device *dev)
+{
+ struct vic *vic = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vic->clk);
+
+ vic->booted = false;
+
+ return 0;
+}
+
+static int vic_boot(struct vic *vic)
+{
+ u32 fce_ucode_size, fce_bin_data_offset;
+ void *hdr;
+ int err = 0;
+
+ if (vic->booted)
+ return 0;
+
+ /* setup clockgating registers */
+ vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
+ CG_IDLE_CG_EN |
+ CG_WAKEUP_DLY_CNT(4),
+ NV_PVIC_MISC_PRI_VIC_CG);
+
+ err = falcon_boot(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ hdr = vic->falcon.firmware.vaddr;
+ fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
+ hdr = vic->falcon.firmware.vaddr +
+ *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
+ fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
+
+ falcon_execute_method(&vic->falcon, VIC_SET_APPLICATION_ID, 1);
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
+ fce_ucode_size);
+ falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
+ (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ >> 8);
+
+ err = falcon_wait_idle(&vic->falcon);
+ if (err < 0) {
+ dev_err(vic->dev,
+ "failed to set application ID and FCE base\n");
+ return err;
+ }
+
+ vic->booted = true;
+
+ return 0;
+}
+
+static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
+ dma_addr_t *iova)
+{
+ struct tegra_drm *tegra = falcon->data;
+
+ return tegra_drm_alloc(tegra, size, iova);
+}
+
+static void vic_falcon_free(struct falcon *falcon, size_t size,
+ dma_addr_t iova, void *va)
+{
+ struct tegra_drm *tegra = falcon->data;
+
+ return tegra_drm_free(tegra, size, va, iova);
+}
+
+static const struct falcon_ops vic_falcon_ops = {
+ .alloc = vic_falcon_alloc,
+ .free = vic_falcon_free
+};
+
+static int vic_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ if (tegra->domain) {
+ err = iommu_attach_device(tegra->domain, vic->dev);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n",
+ err);
+ return err;
+ }
+
+ vic->domain = tegra->domain;
+ }
+
+ if (!vic->falcon.data) {
+ vic->falcon.data = tegra;
+ err = falcon_load_firmware(&vic->falcon);
+ if (err < 0)
+ goto detach_device;
+ }
+
+ vic->channel = host1x_channel_request(client->dev);
+ if (!vic->channel) {
+ err = -ENOMEM;
+ goto detach_device;
+ }
+
+ client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
+ if (!client->syncpts[0]) {
+ err = -ENOMEM;
+ goto free_channel;
+ }
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto free_syncpt;
+
+ return 0;
+
+free_syncpt:
+ host1x_syncpt_free(client->syncpts[0]);
+free_channel:
+ host1x_channel_free(vic->channel);
+detach_device:
+ if (tegra->domain)
+ iommu_detach_device(tegra->domain, vic->dev);
+
+ return err;
+}
+
+static int vic_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->parent);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct vic *vic = to_vic(drm);
+ int err;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ host1x_syncpt_free(client->syncpts[0]);
+ host1x_channel_free(vic->channel);
+
+ if (vic->domain) {
+ iommu_detach_device(vic->domain, vic->dev);
+ vic->domain = NULL;
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops vic_client_ops = {
+ .init = vic_init,
+ .exit = vic_exit,
+};
+
+static int vic_open_channel(struct tegra_drm_client *client,
+ struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(client);
+ int err;
+
+ err = pm_runtime_get_sync(vic->dev);
+ if (err < 0)
+ return err;
+
+ err = vic_boot(vic);
+ if (err < 0) {
+ pm_runtime_put(vic->dev);
+ return err;
+ }
+
+ context->channel = host1x_channel_get(vic->channel);
+ if (!context->channel) {
+ pm_runtime_put(vic->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void vic_close_channel(struct tegra_drm_context *context)
+{
+ struct vic *vic = to_vic(context->client);
+
+ host1x_channel_put(context->channel);
+
+ pm_runtime_put(vic->dev);
+}
+
+static const struct tegra_drm_client_ops vic_ops = {
+ .open_channel = vic_open_channel,
+ .close_channel = vic_close_channel,
+ .submit = tegra_drm_submit,
+};
+
+static const struct vic_config vic_t124_config = {
+ .firmware = "nvidia/tegra124/vic03_ucode.bin",
+};
+
+static const struct vic_config vic_t210_config = {
+ .firmware = "nvidia/tegra210/vic04_ucode.bin",
+};
+
+static const struct of_device_id vic_match[] = {
+ { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
+ { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
+ { },
+};
+
+static int vic_probe(struct platform_device *pdev)
+{
+ struct vic_config *vic_config = NULL;
+ struct device *dev = &pdev->dev;
+ struct host1x_syncpt **syncpts;
+ struct resource *regs;
+ const struct of_device_id *match;
+ struct vic *vic;
+ int err;
+
+ match = of_match_device(vic_match, dev);
+ vic_config = (struct vic_config *)match->data;
+
+ vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
+ if (!vic)
+ return -ENOMEM;
+
+ syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+ if (!syncpts)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
+
+ vic->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(vic->regs))
+ return PTR_ERR(vic->regs);
+
+ vic->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(vic->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(vic->clk);
+ }
+
+ vic->falcon.dev = dev;
+ vic->falcon.regs = vic->regs;
+ vic->falcon.ops = &vic_falcon_ops;
+
+ err = falcon_init(&vic->falcon);
+ if (err < 0)
+ return err;
+
+ err = falcon_read_firmware(&vic->falcon, vic_config->firmware);
+ if (err < 0)
+ goto exit_falcon;
+
+ platform_set_drvdata(pdev, vic);
+
+ INIT_LIST_HEAD(&vic->client.base.list);
+ vic->client.base.ops = &vic_client_ops;
+ vic->client.base.dev = dev;
+ vic->client.base.class = HOST1X_CLASS_VIC;
+ vic->client.base.syncpts = syncpts;
+ vic->client.base.num_syncpts = 1;
+ vic->dev = dev;
+ vic->config = vic_config;
+
+ INIT_LIST_HEAD(&vic->client.list);
+ vic->client.ops = &vic_ops;
+
+ err = host1x_client_register(&vic->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ platform_set_drvdata(pdev, NULL);
+ goto exit_falcon;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ err = vic_runtime_resume(&pdev->dev);
+ if (err < 0)
+ goto unregister_client;
+ }
+
+ return 0;
+
+unregister_client:
+ host1x_client_unregister(&vic->client.base);
+exit_falcon:
+ falcon_exit(&vic->falcon);
+
+ return err;
+}
+
+static int vic_remove(struct platform_device *pdev)
+{
+ struct vic *vic = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&vic->client.base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+ err);
+ return err;
+ }
+
+ if (pm_runtime_enabled(&pdev->dev))
+ pm_runtime_disable(&pdev->dev);
+ else
+ vic_runtime_suspend(&pdev->dev);
+
+ falcon_exit(&vic->falcon);
+
+ return 0;
+}
+
+static const struct dev_pm_ops vic_pm_ops = {
+ SET_RUNTIME_PM_OPS(vic_runtime_suspend, vic_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_vic_driver = {
+ .driver = {
+ .name = "tegra-vic",
+ .of_match_table = vic_match,
+ .pm = &vic_pm_ops
+ },
+ .probe = vic_probe,
+ .remove = vic_remove,
+};
diff --git a/drivers/gpu/drm/tegra/vic.h b/drivers/gpu/drm/tegra/vic.h
new file mode 100644
index 000000000000..21844817a7e1
--- /dev/null
+++ b/drivers/gpu/drm/tegra/vic.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_VIC_H
+#define TEGRA_VIC_H
+
+/* VIC methods */
+
+#define VIC_SET_APPLICATION_ID 0x00000200
+#define VIC_SET_FCE_UCODE_SIZE 0x0000071C
+#define VIC_SET_FCE_UCODE_OFFSET 0x0000072C
+
+/* VIC registers */
+
+#define NV_PVIC_MISC_PRI_VIC_CG 0x000016d0
+#define CG_IDLE_CG_DLY_CNT(val) ((val & 0x3f) << 0)
+#define CG_IDLE_CG_EN (1 << 6)
+#define CG_WAKEUP_DLY_CNT(val) ((val & 0xf) << 16)
+
+/* Firmware offsets */
+
+#define VIC_UCODE_FCE_HEADER_OFFSET (6*4)
+#define VIC_UCODE_FCE_DATA_OFFSET (7*4)
+#define FCE_UCODE_SIZE_OFFSET (2*4)
+
+#endif /* TEGRA_VIC_H */
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a37de5db5731..eeddc1e48409 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -51,6 +51,9 @@
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index cec4b4baa179..90ddbdca93bd 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -53,6 +53,9 @@
#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index aee3c00f836e..5260179d788a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -44,6 +44,9 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
/**
* Allocates storage for pointers to the pages that back the ttm.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index eeb021fe6410..561831e1ae2c 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -267,37 +267,6 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
return strcmp(dev_name(dev), drv->name) == 0;
}
-static int host1x_device_probe(struct device *dev)
-{
- struct host1x_driver *driver = to_host1x_driver(dev->driver);
- struct host1x_device *device = to_host1x_device(dev);
-
- if (driver->probe)
- return driver->probe(device);
-
- return 0;
-}
-
-static int host1x_device_remove(struct device *dev)
-{
- struct host1x_driver *driver = to_host1x_driver(dev->driver);
- struct host1x_device *device = to_host1x_device(dev);
-
- if (driver->remove)
- return driver->remove(device);
-
- return 0;
-}
-
-static void host1x_device_shutdown(struct device *dev)
-{
- struct host1x_driver *driver = to_host1x_driver(dev->driver);
- struct host1x_device *device = to_host1x_device(dev);
-
- if (driver->shutdown)
- driver->shutdown(device);
-}
-
static const struct dev_pm_ops host1x_device_pm_ops = {
.suspend = pm_generic_suspend,
.resume = pm_generic_resume,
@@ -310,9 +279,6 @@ static const struct dev_pm_ops host1x_device_pm_ops = {
struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
- .probe = host1x_device_probe,
- .remove = host1x_device_remove,
- .shutdown = host1x_device_shutdown,
.pm = &host1x_device_pm_ops,
};
@@ -516,6 +482,37 @@ int host1x_unregister(struct host1x *host1x)
return 0;
}
+static int host1x_device_probe(struct device *dev)
+{
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->probe)
+ return driver->probe(device);
+
+ return 0;
+}
+
+static int host1x_device_remove(struct device *dev)
+{
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->remove)
+ return driver->remove(device);
+
+ return 0;
+}
+
+static void host1x_device_shutdown(struct device *dev)
+{
+ struct host1x_driver *driver = to_host1x_driver(dev->driver);
+ struct host1x_device *device = to_host1x_device(dev);
+
+ if (driver->shutdown)
+ driver->shutdown(device);
+}
+
int host1x_driver_register_full(struct host1x_driver *driver,
struct module *owner)
{
@@ -536,6 +533,9 @@ int host1x_driver_register_full(struct host1x_driver *driver,
driver->driver.bus = &host1x_bus_type;
driver->driver.owner = owner;
+ driver->driver.probe = host1x_device_probe;
+ driver->driver.remove = host1x_device_remove;
+ driver->driver.shutdown = host1x_device_shutdown;
return driver_register(&driver->driver);
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index c5d82a8a2ec9..28541b280739 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -51,9 +51,15 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
- if (pb->phys != 0)
- dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
- pb->phys);
+ if (!pb->phys)
+ return;
+
+ if (host1x->domain) {
+ iommu_unmap(host1x->domain, pb->dma, pb->alloc_size);
+ free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
+ }
+
+ dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
pb->mapped = NULL;
pb->phys = 0;
@@ -66,28 +72,64 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
{
struct host1x_cdma *cdma = pb_to_cdma(pb);
struct host1x *host1x = cdma_to_host1x(cdma);
+ struct iova *alloc;
+ u32 size;
+ int err;
pb->mapped = NULL;
pb->phys = 0;
- pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
+ pb->size = HOST1X_PUSHBUFFER_SLOTS * 8;
+
+ size = pb->size + 4;
/* initialize buffer pointers */
- pb->fence = pb->size_bytes - 8;
+ pb->fence = pb->size - 8;
pb->pos = 0;
- /* allocate and map pushbuffer memory */
- pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
- GFP_KERNEL);
- if (!pb->mapped)
- goto fail;
+ if (host1x->domain) {
+ unsigned long shift;
+
+ size = iova_align(&host1x->iova, size);
+
+ pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
+ GFP_KERNEL);
+ if (!pb->mapped)
+ return -ENOMEM;
+
+ shift = iova_shift(&host1x->iova);
+ alloc = alloc_iova(&host1x->iova, size >> shift,
+ host1x->iova_end >> shift, true);
+ if (!alloc) {
+ err = -ENOMEM;
+ goto iommu_free_mem;
+ }
+
+ pb->dma = iova_dma_addr(&host1x->iova, alloc);
+ err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
+ IOMMU_READ);
+ if (err)
+ goto iommu_free_iova;
+ } else {
+ pb->mapped = dma_alloc_wc(host1x->dev, size, &pb->phys,
+ GFP_KERNEL);
+ if (!pb->mapped)
+ return -ENOMEM;
+
+ pb->dma = pb->phys;
+ }
+
+ pb->alloc_size = size;
host1x_hw_pushbuffer_init(host1x, pb);
return 0;
-fail:
- host1x_pushbuffer_destroy(pb);
- return -ENOMEM;
+iommu_free_iova:
+ __free_iova(&host1x->iova, alloc);
+iommu_free_mem:
+ dma_free_wc(host1x->dev, pb->alloc_size, pb->mapped, pb->phys);
+
+ return err;
}
/*
@@ -101,7 +143,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
- pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
+ pb->pos = (pb->pos + 8) & (pb->size - 1);
}
/*
@@ -111,7 +153,7 @@ static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
{
/* Advance the next write position */
- pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
+ pb->fence = (pb->fence + slots * 8) & (pb->size - 1);
}
/*
@@ -119,7 +161,7 @@ static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
*/
static u32 host1x_pushbuffer_space(struct push_buffer *pb)
{
- return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
+ return ((pb->fence - pb->pos) & (pb->size - 1)) / 8;
}
/*
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 470087af8fe5..ec170a78f4e1 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -43,10 +43,12 @@ struct host1x_job;
struct push_buffer {
void *mapped; /* mapped pushbuffer memory */
- dma_addr_t phys; /* physical address of pushbuffer */
+ dma_addr_t dma; /* device address of pushbuffer */
+ phys_addr_t phys; /* physical address of pushbuffer */
u32 fence; /* index we've written */
u32 pos; /* index to write to */
- u32 size_bytes;
+ u32 size;
+ u32 alloc_size;
};
struct buffer_timeout {
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index a62317af76ad..f05ebb14fa63 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -16,23 +16,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
#define CREATE_TRACE_POINTS
#include <trace/events/host1x.h>
+#undef CREATE_TRACE_POINTS
#include "bus.h"
-#include "dev.h"
-#include "intr.h"
#include "channel.h"
#include "debug.h"
+#include "dev.h"
+#include "intr.h"
+
#include "hw/host1x01.h"
#include "hw/host1x02.h"
#include "hw/host1x04.h"
@@ -168,22 +170,56 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
+ host->rst = devm_reset_control_get(&pdev->dev, "host1x");
+ if (IS_ERR(host->rst)) {
+ err = PTR_ERR(host->clk);
+ dev_err(&pdev->dev, "failed to get reset: %d\n", err);
+ return err;
+ }
+
+ if (iommu_present(&platform_bus_type)) {
+ struct iommu_domain_geometry *geometry;
+ unsigned long order;
+
+ host->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!host->domain)
+ return -ENOMEM;
+
+ err = iommu_attach_device(host->domain, &pdev->dev);
+ if (err)
+ goto fail_free_domain;
+
+ geometry = &host->domain->geometry;
+
+ order = __ffs(host->domain->pgsize_bitmap);
+ init_iova_domain(&host->iova, 1UL << order,
+ geometry->aperture_start >> order,
+ geometry->aperture_end >> order);
+ host->iova_end = geometry->aperture_end;
+ }
+
err = host1x_channel_list_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
- return err;
+ goto fail_detach_device;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
- return err;
+ goto fail_detach_device;
+ }
+
+ err = reset_control_deassert(host->rst);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
+ goto fail_unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto fail_unprepare_disable;
+ goto fail_reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
@@ -204,8 +240,19 @@ fail_deinit_intr:
host1x_intr_deinit(host);
fail_deinit_syncpt:
host1x_syncpt_deinit(host);
+fail_reset_assert:
+ reset_control_assert(host->rst);
fail_unprepare_disable:
clk_disable_unprepare(host->clk);
+fail_detach_device:
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_device(host->domain, &pdev->dev);
+ }
+fail_free_domain:
+ if (host->domain)
+ iommu_domain_free(host->domain);
+
return err;
}
@@ -216,8 +263,15 @@ static int host1x_remove(struct platform_device *pdev)
host1x_unregister(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
+ reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_device(host->domain, &pdev->dev);
+ iommu_domain_free(host->domain);
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 06dd4f85125f..229d08b6a45e 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -17,14 +17,17 @@
#ifndef HOST1X_DEV_H
#define HOST1X_DEV_H
-#include <linux/platform_device.h>
#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/iova.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include "cdma.h"
#include "channel.h"
-#include "syncpt.h"
#include "intr.h"
-#include "cdma.h"
#include "job.h"
+#include "syncpt.h"
struct host1x_syncpt;
struct host1x_syncpt_base;
@@ -107,6 +110,11 @@ struct host1x {
struct host1x_syncpt_base *bases;
struct device *dev;
struct clk *clk;
+ struct reset_control *rst;
+
+ struct iommu_domain *domain;
+ struct iova_domain iova;
+ dma_addr_t iova_end;
struct mutex intr_mutex;
int intr_syncpt_irq;
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 659c1bbfeeba..6b231119193e 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -30,7 +30,7 @@
*/
static void push_buffer_init(struct push_buffer *pb)
{
- *(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0);
+ *(u32 *)(pb->mapped + pb->size) = host1x_opcode_restart(0);
}
/*
@@ -55,8 +55,8 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
*(p++) = HOST1X_OPCODE_NOP;
*(p++) = HOST1X_OPCODE_NOP;
dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
- &pb->phys, getptr);
- getptr = (getptr + 8) & (pb->size_bytes - 1);
+ &pb->dma, getptr);
+ getptr = (getptr + 8) & (pb->size - 1);
}
wmb();
@@ -78,10 +78,9 @@ static void cdma_start(struct host1x_cdma *cdma)
HOST1X_CHANNEL_DMACTRL);
/* set base, put and end pointer */
- host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+ host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
- host1x_ch_writel(ch, cdma->push_buffer.phys +
- cdma->push_buffer.size_bytes + 4,
+ host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size + 4,
HOST1X_CHANNEL_DMAEND);
/* reset GET */
@@ -115,9 +114,8 @@ static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
HOST1X_CHANNEL_DMACTRL);
/* set base, end pointer (all of memory) */
- host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
- host1x_ch_writel(ch, cdma->push_buffer.phys +
- cdma->push_buffer.size_bytes,
+ host1x_ch_writel(ch, cdma->push_buffer.dma, HOST1X_CHANNEL_DMASTART);
+ host1x_ch_writel(ch, cdma->push_buffer.dma + cdma->push_buffer.size,
HOST1X_CHANNEL_DMAEND);
/* set GET, by loading the value in PUT (then reset GET) */
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 92c3df933303..5f5f8ee6143d 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -174,9 +174,10 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
return 0;
}
-static unsigned int pin_job(struct host1x_job *job)
+static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
unsigned int i;
+ int err;
job->num_unpins = 0;
@@ -186,12 +187,16 @@ static unsigned int pin_job(struct host1x_job *job)
dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
- if (!reloc->target.bo)
+ if (!reloc->target.bo) {
+ err = -EINVAL;
goto unpin;
+ }
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
- if (!phys_addr)
+ if (!phys_addr) {
+ err = -EINVAL;
goto unpin;
+ }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -201,28 +206,67 @@ static unsigned int pin_job(struct host1x_job *job)
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
+ size_t gather_size = 0;
+ struct scatterlist *sg;
struct sg_table *sgt;
dma_addr_t phys_addr;
+ unsigned long shift;
+ struct iova *alloc;
+ unsigned int j;
g->bo = host1x_bo_get(g->bo);
- if (!g->bo)
+ if (!g->bo) {
+ err = -EINVAL;
goto unpin;
+ }
phys_addr = host1x_bo_pin(g->bo, &sgt);
- if (!phys_addr)
+ if (!phys_addr) {
+ err = -EINVAL;
goto unpin;
+ }
+
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ for_each_sg(sgt->sgl, sg, sgt->nents, j)
+ gather_size += sg->length;
+ gather_size = iova_align(&host->iova, gather_size);
+
+ shift = iova_shift(&host->iova);
+ alloc = alloc_iova(&host->iova, gather_size >> shift,
+ host->iova_end >> shift, true);
+ if (!alloc) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ err = iommu_map_sg(host->domain,
+ iova_dma_addr(&host->iova, alloc),
+ sgt->sgl, sgt->nents, IOMMU_READ);
+ if (err == 0) {
+ __free_iova(&host->iova, alloc);
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ job->addr_phys[job->num_unpins] =
+ iova_dma_addr(&host->iova, alloc);
+ job->unpins[job->num_unpins].size = gather_size;
+ } else {
+ job->addr_phys[job->num_unpins] = phys_addr;
+ }
+
+ job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
- job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
}
- return job->num_unpins;
+ return 0;
unpin:
host1x_job_unpin(job);
- return 0;
+ return err;
}
static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
@@ -525,8 +569,8 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
host1x_syncpt_load(host->syncpt + i);
/* pin memory */
- err = pin_job(job);
- if (!err)
+ err = pin_job(host, job);
+ if (err)
goto out;
/* patch gathers */
@@ -572,11 +616,19 @@ EXPORT_SYMBOL(host1x_job_pin);
void host1x_job_unpin(struct host1x_job *job)
{
+ struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
unsigned int i;
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ iommu_unmap(host->domain, job->addr_phys[i],
+ unpin->size);
+ free_iova(&host->iova,
+ iova_pfn(&host->iova, job->addr_phys[i]));
+ }
+
host1x_bo_unpin(unpin->bo, unpin->sgt);
host1x_bo_put(unpin->bo);
}
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 8b3c15df0660..878239c476d2 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -44,6 +44,7 @@ struct host1x_waitchk {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
+ size_t size;
};
/*
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 25c11a85050b..0ac026cdc30c 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -484,7 +484,7 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
{
- if (host->info->nb_pts < id)
+ if (id >= host->info->nb_pts)
return NULL;
return host->syncpt + id;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 321b8833fa6f..736ac76d2a6a 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -333,7 +333,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
* Gpadl is u32 and we are using a pointer which could
* be 64-bit
* This is governed by the guest/host protocol and
- * so the hypervisor gurantees that this is ok.
+ * so the hypervisor guarantees that this is ok.
*/
for (i = 0; i < pfncurr; i++)
gpadl_body->pfn[i] = slow_virt_to_phys(
@@ -380,7 +380,7 @@ nomem:
}
/*
- * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
*
* @channel: a channel
* @kbuffer: from kmalloc or vmalloc
@@ -731,7 +731,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
desc.flags = flags;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+ desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
desc.rangecount = pagecount;
@@ -792,7 +792,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
/* Setup the descriptor */
desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
+ desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
desc->rangecount = 1;
@@ -842,7 +842,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+ desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
desc.rangecount = 1;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index fbcb06352308..735f9363f2e4 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1080,30 +1080,30 @@ static void vmbus_onversion_response(
}
/* Channel message dispatch table */
-struct vmbus_channel_message_table_entry
- channel_message_table[CHANNELMSG_COUNT] = {
- {CHANNELMSG_INVALID, 0, NULL},
- {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
- {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
- {CHANNELMSG_REQUESTOFFERS, 0, NULL},
- {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
- {CHANNELMSG_OPENCHANNEL, 0, NULL},
- {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
- {CHANNELMSG_CLOSECHANNEL, 0, NULL},
- {CHANNELMSG_GPADL_HEADER, 0, NULL},
- {CHANNELMSG_GPADL_BODY, 0, NULL},
- {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
- {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
- {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
- {CHANNELMSG_RELID_RELEASED, 0, NULL},
- {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
- {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
- {CHANNELMSG_UNLOAD, 0, NULL},
- {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
- {CHANNELMSG_18, 0, NULL},
- {CHANNELMSG_19, 0, NULL},
- {CHANNELMSG_20, 0, NULL},
- {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
+const struct vmbus_channel_message_table_entry
+channel_message_table[CHANNELMSG_COUNT] = {
+ { CHANNELMSG_INVALID, 0, NULL },
+ { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
+ { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
+ { CHANNELMSG_REQUESTOFFERS, 0, NULL },
+ { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
+ { CHANNELMSG_OPENCHANNEL, 0, NULL },
+ { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
+ { CHANNELMSG_CLOSECHANNEL, 0, NULL },
+ { CHANNELMSG_GPADL_HEADER, 0, NULL },
+ { CHANNELMSG_GPADL_BODY, 0, NULL },
+ { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
+ { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
+ { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
+ { CHANNELMSG_RELID_RELEASED, 0, NULL },
+ { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
+ { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
+ { CHANNELMSG_UNLOAD, 0, NULL },
+ { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
+ { CHANNELMSG_18, 0, NULL },
+ { CHANNELMSG_19, 0, NULL },
+ { CHANNELMSG_20, 0, NULL },
+ { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
};
/*
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index a8366fec1458..fce27fb141cc 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -296,44 +296,47 @@ struct vmbus_channel *relid2channel(u32 relid)
/*
* vmbus_on_event - Process a channel event notification
+ *
+ * For batched channels (default) optimize host to guest signaling
+ * by ensuring:
+ * 1. While reading the channel, we disable interrupts from host.
+ * 2. Ensure that we process all posted messages from the host
+ * before returning from this callback.
+ * 3. Once we return, enable signaling from the host. Once this
+ * state is set we check to see if additional packets are
+ * available to read. In this case we repeat the process.
+ * If this tasklet has been running for a long time
+ * then reschedule ourselves.
*/
void vmbus_on_event(unsigned long data)
{
struct vmbus_channel *channel = (void *) data;
- void (*callback_fn)(void *);
+ unsigned long time_limit = jiffies + 2;
- /*
- * A channel once created is persistent even when there
- * is no driver handling the device. An unloading driver
- * sets the onchannel_callback to NULL on the same CPU
- * as where this interrupt is handled (in an interrupt context).
- * Thus, checking and invoking the driver specific callback takes
- * care of orderly unloading of the driver.
- */
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (unlikely(callback_fn == NULL))
- return;
-
- (*callback_fn)(channel->channel_callback_context);
-
- if (channel->callback_mode == HV_CALL_BATCHED) {
- /*
- * This callback reads the messages sent by the host.
- * We can optimize host to guest signaling by ensuring:
- * 1. While reading the channel, we disable interrupts from
- * host.
- * 2. Ensure that we process all posted messages from the host
- * before returning from this callback.
- * 3. Once we return, enable signaling from the host. Once this
- * state is set we check to see if additional packets are
- * available to read. In this case we repeat the process.
+ do {
+ void (*callback_fn)(void *);
+
+ /* A channel once created is persistent even when
+ * there is no driver handling the device. An
+ * unloading driver sets the onchannel_callback to NULL.
*/
- if (hv_end_read(&channel->inbound) != 0) {
- hv_begin_read(&channel->inbound);
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(callback_fn == NULL))
+ return;
- tasklet_schedule(&channel->callback_event);
- }
- }
+ (*callback_fn)(channel->channel_callback_context);
+
+ if (channel->callback_mode != HV_CALL_BATCHED)
+ return;
+
+ if (likely(hv_end_read(&channel->inbound) == 0))
+ return;
+
+ hv_begin_read(&channel->inbound);
+ } while (likely(time_before(jiffies, time_limit)));
+
+ /* The time limit (2 jiffies) has been reached */
+ tasklet_schedule(&channel->callback_event);
}
/*
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 665a64f1611e..12e7baecb84e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -254,7 +254,10 @@ int hv_synic_init(unsigned int cpu)
shared_sint.as_uint64 = 0;
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
- shared_sint.auto_eoi = true;
+ if (ms_hyperv.hints & HV_X64_DEPRECATING_AEOI_RECOMMENDED)
+ shared_sint.auto_eoi = false;
+ else
+ shared_sint.auto_eoi = true;
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5fd03e59cee5..f5728deff893 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -722,8 +722,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
5*HZ);
post_status(&dm_device);
}
-
- return;
}
static void hv_online_page(struct page *pg)
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index a5596a642ed0..daa75bd41f86 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -186,8 +186,6 @@ static void fcopy_send_data(struct work_struct *dummy)
}
}
kfree(smsg_out);
-
- return;
}
/*
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index a1adfe2cfb34..e99ff2ddad40 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -69,7 +69,7 @@ static const int fw_versions[] = {
*
* While the request/response protocol is guaranteed by the host, we further
* ensure this by serializing packet processing in this driver - we do not
- * read additional packets from the VMBUs until the current packet is fully
+ * read additional packets from the VMBUS until the current packet is fully
* handled.
*/
@@ -397,7 +397,7 @@ kvp_send_key(struct work_struct *dummy)
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
* function we limit the size of the buffer where the converted
- * string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to gaurantee
+ * string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
* that the strings can be properly terminated!
*/
@@ -483,8 +483,6 @@ kvp_send_key(struct work_struct *dummy)
}
kfree(message);
-
- return;
}
/*
@@ -533,7 +531,7 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
*/
if (error) {
/*
- * Something failed or we have timedout;
+ * Something failed or we have timed out;
* terminate the current host-side iteration.
*/
goto response_done;
@@ -607,8 +605,8 @@ response_done:
* This callback is invoked when we get a KVP message from the host.
* The host ensures that only one KVP transaction can be active at a time.
* KVP implementation in Linux needs to forward the key to a user-mde
- * component to retrive the corresponding value. Consequently, we cannot
- * respond to the host in the conext of this callback. Since the host
+ * component to retrieve the corresponding value. Consequently, we cannot
+ * respond to the host in the context of this callback. Since the host
* guarantees that at most only one transaction can be active at a time,
* we stash away the transaction state in a set of global variables.
*/
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index e659d1b94a57..6831efd73394 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -212,8 +212,6 @@ static void vss_send_op(void)
}
kfree(vss_msg);
-
- return;
}
static void vss_handle_request(struct work_struct *dummy)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 884f83bba1ab..6113e915c50e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -218,8 +218,8 @@ struct hv_per_cpu_context {
struct hv_context {
/* We only support running on top of Hyper-V
- * So at this point this really can only contain the Hyper-V ID
- */
+ * So at this point this really can only contain the Hyper-V ID
+ */
u64 guestid;
void *tsc_page;
@@ -248,14 +248,6 @@ struct hv_context {
extern struct hv_context hv_context;
-struct hv_ring_buffer_debug_info {
- u32 current_interrupt_mask;
- u32 current_read_index;
- u32 current_write_index;
- u32 bytes_avail_toread;
- u32 bytes_avail_towrite;
-};
-
/* Hv Interface */
extern int hv_init(void);
@@ -289,9 +281,6 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw);
-void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
- struct hv_ring_buffer_debug_info *debug_info);
-
/*
* Maximum channels is determined by the size of the interrupt page
* which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -376,7 +365,7 @@ struct vmbus_channel_message_table_entry {
void (*message_handler)(struct vmbus_channel_message_header *msg);
};
-extern struct vmbus_channel_message_table_entry
+extern const struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT];
@@ -403,17 +392,17 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
-int hv_kvp_init(struct hv_util_service *);
+int hv_kvp_init(struct hv_util_service *srv);
void hv_kvp_deinit(void);
-void hv_kvp_onchannelcallback(void *);
+void hv_kvp_onchannelcallback(void *context);
-int hv_vss_init(struct hv_util_service *);
+int hv_vss_init(struct hv_util_service *srv);
void hv_vss_deinit(void);
-void hv_vss_onchannelcallback(void *);
+void hv_vss_onchannelcallback(void *context);
-int hv_fcopy_init(struct hv_util_service *);
+int hv_fcopy_init(struct hv_util_service *srv);
void hv_fcopy_deinit(void);
-void hv_fcopy_onchannelcallback(void *);
+void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index c3f1a9e33cef..1f450c39a9b0 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -75,8 +75,6 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
*/
if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
vmbus_setevent(channel);
-
- return;
}
/* Get the next write location for the specified ring buffer. */
@@ -210,6 +208,7 @@ void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->interrupt_mask;
}
}
+EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
@@ -269,14 +268,13 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
int hv_ringbuffer_write(struct vmbus_channel *channel,
const struct kvec *kv_list, u32 kv_count)
{
- int i = 0;
+ int i;
u32 bytes_avail_towrite;
- u32 totalbytes_towrite = 0;
-
+ u32 totalbytes_towrite = sizeof(u64);
u32 next_write_location;
u32 old_write;
- u64 prev_indices = 0;
- unsigned long flags = 0;
+ u64 prev_indices;
+ unsigned long flags;
struct hv_ring_buffer_info *outring_info = &channel->outbound;
if (channel->rescind)
@@ -285,8 +283,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
- totalbytes_towrite += sizeof(u64);
-
spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
@@ -349,18 +345,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
u64 *requestid, bool raw)
{
u32 bytes_avail_toread;
- u32 next_read_location = 0;
+ u32 next_read_location;
u64 prev_indices = 0;
struct vmpacket_descriptor desc;
u32 offset;
u32 packetlen;
- int ret = 0;
struct hv_ring_buffer_info *inring_info = &channel->inbound;
if (buflen <= 0)
return -EINVAL;
-
*buffer_actual_len = 0;
*requestid = 0;
@@ -371,7 +365,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
*/
- return ret;
+ return 0;
}
init_cached_read_index(inring_info);
@@ -417,7 +411,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
hv_signal_on_read(channel);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8370b9dc6037..0087b49095eb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -787,8 +787,6 @@ static void vmbus_shutdown(struct device *child_device)
if (drv->shutdown)
drv->shutdown(dev);
-
- return;
}
@@ -855,7 +853,7 @@ void vmbus_on_msg_dpc(unsigned long data)
struct hv_message *msg = (struct hv_message *)page_addr +
VMBUS_MESSAGE_SINT;
struct vmbus_channel_message_header *hdr;
- struct vmbus_channel_message_table_entry *entry;
+ const struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
u32 message_type = msg->header.message_type;
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 629e031b7456..09142e99e915 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -149,7 +149,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
continue;
/* The local out port number */
- pdata->outports[i] = endpoint.id;
+ pdata->outports[i] = endpoint.port;
/*
* Get a handle on the remote port and parent
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index e88afe1a435c..dbbe31df74df 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -27,7 +27,9 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include "intel_th.h"
#include "msu.h"
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5a4eb6b6bd92..f2acd4b6bf01 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -157,6 +157,8 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "AMDI0010", ACCESS_INTR_MASK },
{ "AMDI0510", 0 },
{ "APMC0D0F", 0 },
+ { "HISI02A1", 0 },
+ { "HISI02A2", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 45b3f41a43d4..323af721f8cb 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -107,7 +107,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
if (cmd->tf_flags & IDE_TFLAG_DYN)
kfree(orig_cmd);
- else
+ else if (cmd != orig_cmd)
memcpy(orig_cmd, cmd, sizeof(*cmd));
}
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index a74ae8df4bb8..023562565d11 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1183,9 +1183,7 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
spin_lock_init(&hwif->lock);
- init_timer(&hwif->timer);
- hwif->timer.function = &ide_timer_expiry;
- hwif->timer.data = (unsigned long)hwif;
+ setup_timer(&hwif->timer, &ide_timer_expiry, (unsigned long)hwif);
init_completion(&hwif->gendev_rel_comp);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5805b041dd0f..216d7ec88c0c 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1097,6 +1097,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
};
@@ -1309,6 +1310,7 @@ static void intel_idle_state_table_update(void)
ivt_idle_state_table_update();
break;
case INTEL_FAM6_ATOM_GOLDMONT:
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
bxt_idle_state_table_update();
break;
case INTEL_FAM6_SKYLAKE_DESKTOP:
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index ef8401ac1141..15de262015df 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -5,6 +5,37 @@
menu "Accelerometers"
+config ADXL345
+ tristate
+
+config ADXL345_I2C
+ tristate "Analog Devices ADXL345 3-Axis Digital Accelerometer I2C Driver"
+ depends on INPUT_ADXL34X=n
+ depends on I2C
+ select ADXL345
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for the Analog Devices
+ ADXL345 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl345_i2c and you will also get adxl345_core
+ for the core module.
+
+config ADXL345_SPI
+ tristate "Analog Devices ADXL345 3-Axis Digital Accelerometer SPI Driver"
+ depends on INPUT_ADXL34X=n
+ depends on SPI
+ select ADXL345
+ select REGMAP_SPI
+ help
+ Say Y here if you want to build support for the Analog Devices
+ ADXL345 3-axis digital accelerometer.
+
+ To compile this driver as a module, choose M here: the module
+ will be called adxl345_spi and you will also get adxl345_core
+ for the core module.
+
config BMA180
tristate "Bosch BMA180/BMA250 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 69fe8edc57a2..31fba1974e95 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -3,6 +3,9 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADXL345) += adxl345_core.o
+obj-$(CONFIG_ADXL345_I2C) += adxl345_i2c.o
+obj-$(CONFIG_ADXL345_SPI) += adxl345_spi.o
obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
new file mode 100644
index 000000000000..c1ddf3927c47
--- /dev/null
+++ b/drivers/iio/accel/adxl345.h
@@ -0,0 +1,18 @@
+/*
+ * ADXL345 3-Axis Digital Accelerometer
+ *
+ * Copyright (c) 2017 Eva Rachel Retuya <eraretuya@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#ifndef _ADXL345_H_
+#define _ADXL345_H_
+
+int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name);
+int adxl345_core_remove(struct device *dev);
+
+#endif /* _ADXL345_H_ */
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
new file mode 100644
index 000000000000..9ccb5828db98
--- /dev/null
+++ b/drivers/iio/accel/adxl345_core.c
@@ -0,0 +1,179 @@
+/*
+ * ADXL345 3-Axis Digital Accelerometer IIO core driver
+ *
+ * Copyright (c) 2017 Eva Rachel Retuya <eraretuya@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#include "adxl345.h"
+
+#define ADXL345_REG_DEVID 0x00
+#define ADXL345_REG_POWER_CTL 0x2D
+#define ADXL345_REG_DATA_FORMAT 0x31
+#define ADXL345_REG_DATAX0 0x32
+#define ADXL345_REG_DATAY0 0x34
+#define ADXL345_REG_DATAZ0 0x36
+
+#define ADXL345_POWER_CTL_MEASURE BIT(3)
+#define ADXL345_POWER_CTL_STANDBY 0x00
+
+#define ADXL345_DATA_FORMAT_FULL_RES BIT(3) /* Up to 13-bits resolution */
+#define ADXL345_DATA_FORMAT_2G 0
+#define ADXL345_DATA_FORMAT_4G 1
+#define ADXL345_DATA_FORMAT_8G 2
+#define ADXL345_DATA_FORMAT_16G 3
+
+#define ADXL345_DEVID 0xE5
+
+/*
+ * In full-resolution mode, scale factor is maintained at ~4 mg/LSB
+ * in all g ranges.
+ *
+ * At +/- 16g with 13-bit resolution, scale is computed as:
+ * (16 + 16) * 9.81 / (2^13 - 1) = 0.0383
+ */
+static const int adxl345_uscale = 38300;
+
+struct adxl345_data {
+ struct regmap *regmap;
+ u8 data_range;
+};
+
+#define ADXL345_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .address = reg, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec adxl345_channels[] = {
+ ADXL345_CHANNEL(ADXL345_REG_DATAX0, X),
+ ADXL345_CHANNEL(ADXL345_REG_DATAY0, Y),
+ ADXL345_CHANNEL(ADXL345_REG_DATAZ0, Z),
+};
+
+static int adxl345_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adxl345_data *data = iio_priv(indio_dev);
+ __le16 regval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Data is stored in adjacent registers:
+ * ADXL345_REG_DATA(X0/Y0/Z0) contain the least significant byte
+ * and ADXL345_REG_DATA(X0/Y0/Z0) + 1 the most significant byte
+ */
+ ret = regmap_bulk_read(data->regmap, chan->address, &regval,
+ sizeof(regval));
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(regval), 12);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = adxl345_uscale;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info adxl345_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = adxl345_read_raw,
+};
+
+int adxl345_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name)
+{
+ struct adxl345_data *data;
+ struct iio_dev *indio_dev;
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(regmap, ADXL345_REG_DEVID, &regval);
+ if (ret < 0) {
+ dev_err(dev, "Error reading device ID: %d\n", ret);
+ return ret;
+ }
+
+ if (regval != ADXL345_DEVID) {
+ dev_err(dev, "Invalid device ID: %x, expected %x\n",
+ regval, ADXL345_DEVID);
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ /* Enable full-resolution mode */
+ data->data_range = ADXL345_DATA_FORMAT_FULL_RES;
+
+ ret = regmap_write(data->regmap, ADXL345_REG_DATA_FORMAT,
+ data->data_range);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set data range: %d\n", ret);
+ return ret;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &adxl345_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adxl345_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adxl345_channels);
+
+ /* Enable measurement mode */
+ ret = regmap_write(data->regmap, ADXL345_REG_POWER_CTL,
+ ADXL345_POWER_CTL_MEASURE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable measurement mode: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "iio_device_register failed: %d\n", ret);
+ regmap_write(data->regmap, ADXL345_REG_POWER_CTL,
+ ADXL345_POWER_CTL_STANDBY);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adxl345_core_probe);
+
+int adxl345_core_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adxl345_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ return regmap_write(data->regmap, ADXL345_REG_POWER_CTL,
+ ADXL345_POWER_CTL_STANDBY);
+}
+EXPORT_SYMBOL_GPL(adxl345_core_remove);
+
+MODULE_AUTHOR("Eva Rachel Retuya <eraretuya@gmail.com>");
+MODULE_DESCRIPTION("ADXL345 3-Axis Digital Accelerometer core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl345_i2c.c b/drivers/iio/accel/adxl345_i2c.c
new file mode 100644
index 000000000000..05e1ec49700c
--- /dev/null
+++ b/drivers/iio/accel/adxl345_i2c.c
@@ -0,0 +1,73 @@
+/*
+ * ADXL345 3-Axis Digital Accelerometer I2C driver
+ *
+ * Copyright (c) 2017 Eva Rachel Retuya <eraretuya@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address: 0x1D (ALT ADDRESS pin tied to VDDIO) or
+ * 0x53 (ALT ADDRESS pin grounded)
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adxl345.h"
+
+static const struct regmap_config adxl345_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int adxl345_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &adxl345_i2c_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl345_core_probe(&client->dev, regmap, id ? id->name : NULL);
+}
+
+static int adxl345_i2c_remove(struct i2c_client *client)
+{
+ return adxl345_core_remove(&client->dev);
+}
+
+static const struct i2c_device_id adxl345_i2c_id[] = {
+ { "adxl345", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, adxl345_i2c_id);
+
+static const struct of_device_id adxl345_of_match[] = {
+ { .compatible = "adi,adxl345" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, adxl345_of_match);
+
+static struct i2c_driver adxl345_i2c_driver = {
+ .driver = {
+ .name = "adxl345_i2c",
+ .of_match_table = adxl345_of_match,
+ },
+ .probe = adxl345_i2c_probe,
+ .remove = adxl345_i2c_remove,
+ .id_table = adxl345_i2c_id,
+};
+
+module_i2c_driver(adxl345_i2c_driver);
+
+MODULE_AUTHOR("Eva Rachel Retuya <eraretuya@gmail.com>");
+MODULE_DESCRIPTION("ADXL345 3-Axis Digital Accelerometer I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/adxl345_spi.c b/drivers/iio/accel/adxl345_spi.c
new file mode 100644
index 000000000000..6d658196f81c
--- /dev/null
+++ b/drivers/iio/accel/adxl345_spi.c
@@ -0,0 +1,81 @@
+/*
+ * ADXL345 3-Axis Digital Accelerometer SPI driver
+ *
+ * Copyright (c) 2017 Eva Rachel Retuya <eraretuya@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "adxl345.h"
+
+#define ADXL345_MAX_SPI_FREQ_HZ 5000000
+
+static const struct regmap_config adxl345_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+};
+
+static int adxl345_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ /* Bail out if max_speed_hz exceeds 5 MHz */
+ if (spi->max_speed_hz > ADXL345_MAX_SPI_FREQ_HZ) {
+ dev_err(&spi->dev, "SPI CLK, %d Hz exceeds 5 MHz\n",
+ spi->max_speed_hz);
+ return -EINVAL;
+ }
+
+ regmap = devm_regmap_init_spi(spi, &adxl345_spi_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl345_core_probe(&spi->dev, regmap, id->name);
+}
+
+static int adxl345_spi_remove(struct spi_device *spi)
+{
+ return adxl345_core_remove(&spi->dev);
+}
+
+static const struct spi_device_id adxl345_spi_id[] = {
+ { "adxl345", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(spi, adxl345_spi_id);
+
+static const struct of_device_id adxl345_of_match[] = {
+ { .compatible = "adi,adxl345" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, adxl345_of_match);
+
+static struct spi_driver adxl345_spi_driver = {
+ .driver = {
+ .name = "adxl345_spi",
+ .of_match_table = adxl345_of_match,
+ },
+ .probe = adxl345_spi_probe,
+ .remove = adxl345_spi_remove,
+ .id_table = adxl345_spi_id,
+};
+
+module_spi_driver(adxl345_spi_driver);
+
+MODULE_AUTHOR("Eva Rachel Retuya <eraretuya@gmail.com>");
+MODULE_DESCRIPTION("ADXL345 3-Axis Digital Accelerometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 0890934ef66f..efc67739c28f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/slab.h>
@@ -32,7 +33,7 @@
#define BMA180_DRV_NAME "bma180"
#define BMA180_IRQ_NAME "bma180_event"
-enum {
+enum chip_ids {
BMA180,
BMA250,
};
@@ -41,11 +42,11 @@ struct bma180_data;
struct bma180_part_info {
const struct iio_chan_spec *channels;
- unsigned num_channels;
+ unsigned int num_channels;
const int *scale_table;
- unsigned num_scales;
+ unsigned int num_scales;
const int *bw_table;
- unsigned num_bw;
+ unsigned int num_bw;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -408,7 +409,7 @@ err:
dev_err(&data->client->dev, "failed to disable the chip\n");
}
-static ssize_t bma180_show_avail(char *buf, const int *vals, unsigned n,
+static ssize_t bma180_show_avail(char *buf, const int *vals, unsigned int n,
bool micros)
{
size_t len = 0;
@@ -707,6 +708,7 @@ static int bma180_probe(struct i2c_client *client,
{
struct bma180_data *data;
struct iio_dev *indio_dev;
+ enum chip_ids chip;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -716,7 +718,11 @@ static int bma180_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->part_info = &bma180_part_info[id->driver_data];
+ if (client->dev.of_node)
+ chip = (enum chip_ids)of_device_get_match_data(&client->dev);
+ else
+ chip = id->driver_data;
+ data->part_info = &bma180_part_info[chip];
ret = data->part_info->chip_config(data);
if (ret < 0)
@@ -844,10 +850,24 @@ static struct i2c_device_id bma180_ids[] = {
MODULE_DEVICE_TABLE(i2c, bma180_ids);
+static const struct of_device_id bma180_of_match[] = {
+ {
+ .compatible = "bosch,bma180",
+ .data = (void *)BMA180
+ },
+ {
+ .compatible = "bosch,bma250",
+ .data = (void *)BMA250
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bma180_of_match);
+
static struct i2c_driver bma180_driver = {
.driver = {
.name = "bma180",
.pm = BMA180_PM_OPS,
+ .of_match_table = bma180_of_match,
},
.probe = bma180_probe,
.remove = bma180_remove,
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index 3cab5fb4a3c4..73bf81a8ab14 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -41,12 +41,20 @@ static const struct i2c_device_id mma7455_i2c_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, mma7455_i2c_ids);
+static const struct of_device_id mma7455_of_match[] = {
+ { .compatible = "fsl,mma7455" },
+ { .compatible = "fsl,mma7456" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mma7455_of_match);
+
static struct i2c_driver mma7455_i2c_driver = {
.probe = mma7455_i2c_probe,
.remove = mma7455_i2c_remove,
.id_table = mma7455_i2c_ids,
.driver = {
.name = "mma7455-i2c",
+ .of_match_table = mma7455_of_match,
},
};
module_i2c_driver(mma7455_i2c_driver);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 3a40774cca74..42fa57e41bdd 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -253,6 +253,12 @@ static const struct i2c_device_id mma7660_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mma7660_i2c_id);
+static const struct of_device_id mma7660_of_match[] = {
+ { .compatible = "fsl,mma7660" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mma7660_of_match);
+
static const struct acpi_device_id mma7660_acpi_id[] = {
{"MMA7660", 0},
{}
@@ -264,6 +270,7 @@ static struct i2c_driver mma7660_driver = {
.driver = {
.name = "mma7660",
.pm = MMA7660_PM_OPS,
+ .of_match_table = mma7660_of_match,
.acpi_match_table = ACPI_PTR(mma7660_acpi_id),
},
.probe = mma7660_probe,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ff5ad3be55b4..401f47b51d83 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -130,6 +130,17 @@ config AD799X
To compile this driver as a module, choose M here: the module will be
called ad799x.
+config ASPEED_ADC
+ tristate "Aspeed ADC"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on COMMON_CLK
+ help
+ If you say yes here you get support for the ADC included in Aspeed
+ BMC SoCs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called aspeed_adc.
+
config AT91_ADC
tristate "Atmel AT91 ADC"
depends on ARCH_AT91
@@ -205,6 +216,17 @@ config CC10001_ADC
This driver can also be built as a module. If so, the module will be
called cc10001_adc.
+config CPCAP_ADC
+ tristate "Motorola CPCAP PMIC ADC driver"
+ depends on MFD_CPCAP
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Motorola CPCAP PMIC ADC.
+
+ This driver can also be built as a module. If so, the module will be
+ called cpcap-adc.
+
config DA9150_GPADC
tristate "Dialog DA9150 GPADC driver support"
depends on MFD_DA9150
@@ -328,6 +350,18 @@ config LPC18XX_ADC
To compile this driver as a module, choose M here: the module will be
called lpc18xx_adc.
+config LPC32XX_ADC
+ tristate "NXP LPC32XX ADC"
+ depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for the integrated ADC inside the
+ LPC32XX SoC. Note that this feature uses the same hardware as the
+ touchscreen driver, so you should either select only one of the two
+ drivers (lpc32xx_adc or lpc32xx_ts) or, in the OpenFirmware case,
+ activate only one via device tree selection. Provides direct access
+ via sysfs.
+
config LTC2485
tristate "Linear Technology LTC2485 ADC driver"
depends on I2C
@@ -337,6 +371,16 @@ config LTC2485
To compile this driver as a module, choose M here: the module will be
called ltc2485.
+config LTC2497
+ tristate "Linear Technology LTC2497 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Linear Technology LTC2497
+ 16-Bit 8-/16-Channel Delta Sigma ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ltc2497.
+
config MAX1027
tristate "Maxim max1027 ADC driver"
depends on SPI
@@ -358,6 +402,18 @@ config MAX11100
To compile this driver as a module, choose M here: the module will be
called max11100.
+config MAX1118
+ tristate "Maxim max1117/max1118/max1119 ADCs driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Maxim max1117/max1118/max1119
+ 8-bit, dual-channel ADCs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max1118.
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
@@ -377,6 +433,16 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
+config MAX9611
+ tristate "Maxim max9611/max9612 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Maxim max9611/max9612 current sense
+ amplifier with 12-bits ADC interface.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max9611.
+
config MCP320X
tristate "Microchip Technology MCP3x01/02/04/08"
depends on SPI
@@ -451,6 +517,20 @@ config PALMAS_GPADC
is used in smartphones and tablets and supports a 16 channel
general purpose ADC.
+config QCOM_VADC_COMMON
+ tristate
+
+config QCOM_PM8XXX_XOADC
+ tristate "Qualcomm SSBI PM8xxx PMIC XOADCs"
+ depends on MFD_PM8XXX
+ select QCOM_VADC_COMMON
+ help
+ ADC driver for the XOADC portions of the Qualcomm PM8xxx PMICs
+ using SSBI transport: PM8018, PM8038, PM8058, PM8921.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qcom-pm8xxx-xoadc.
+
config QCOM_SPMI_IADC
tristate "Qualcomm SPMI PMIC current ADC"
depends on SPMI
@@ -469,6 +549,7 @@ config QCOM_SPMI_VADC
tristate "Qualcomm SPMI PMIC voltage ADC"
depends on SPMI
select REGMAP_SPMI
+ select QCOM_VADC_COMMON
help
This is the IIO Voltage ADC driver for Qualcomm QPNP VADC Chip.
@@ -503,6 +584,17 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config SPEAR_ADC
+ tristate "ST SPEAr ADC"
+ depends on PLAT_SPEAR || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for the integrated ADC inside the
+ ST SPEAr SoC. Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called spear_adc.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -532,7 +624,7 @@ config STM32_ADC
config STX104
tristate "Apex Embedded Systems STX104 driver"
- depends on X86 && ISA_BUS_API
+ depends on PC104 && X86 && ISA_BUS_API
select GPIOLIB
help
Say yes here to build support for the Apex Embedded Systems STX104
@@ -545,6 +637,24 @@ config STX104
The base port addresses for the devices may be configured via the base
array module parameter.
+config SUN4I_GPADC
+ tristate "Support for the Allwinner SoCs GPADC"
+ depends on IIO
+ depends on MFD_SUN4I_GPADC || MACH_SUN8I
+ depends on THERMAL || !THERMAL_OF
+ help
+ Say yes here to build support for Allwinner (A10, A13 and A31) SoCs
+ GPADC. This ADC provides 4 channels which can be used as an ADC or as
+ a touchscreen input and one channel for thermal sensor.
+
+ The thermal sensor slows down ADC readings and can be disabled by
+ disabling CONFIG_THERMAL_OF. However, the thermal sensor should be
+ enabled by default since the SoC temperature is usually more critical
+ than ADC readings.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sun4i-gpadc-iio.
+
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index a01de757f42c..9339bec4babe 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD799X) += ad799x.o
+obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
obj-$(CONFIG_AXP20X_ADC) += axp20x_adc.o
@@ -21,6 +22,7 @@ obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
+obj-$(CONFIG_CPCAP_ADC) += cpcap-adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
@@ -31,10 +33,14 @@ obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
+obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
obj-$(CONFIG_LTC2485) += ltc2485.o
+obj-$(CONFIG_LTC2497) += ltc2497.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
+obj-$(CONFIG_MAX1118) += max1118.o
obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
@@ -44,10 +50,14 @@ obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
+obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
+obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 9704090b7908..22426ae4af97 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -520,7 +520,7 @@ static struct attribute *ad799x_event_attributes[] = {
NULL,
};
-static struct attribute_group ad799x_event_attrs_group = {
+static const struct attribute_group ad799x_event_attrs_group = {
.attrs = ad799x_event_attributes,
};
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
new file mode 100644
index 000000000000..62670cbfa2bb
--- /dev/null
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -0,0 +1,295 @@
+/*
+ * Aspeed AST2400/2500 ADC
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+
+#define ASPEED_RESOLUTION_BITS 10
+#define ASPEED_CLOCKS_PER_SAMPLE 12
+
+#define ASPEED_REG_ENGINE_CONTROL 0x00
+#define ASPEED_REG_INTERRUPT_CONTROL 0x04
+#define ASPEED_REG_VGA_DETECT_CONTROL 0x08
+#define ASPEED_REG_CLOCK_CONTROL 0x0C
+#define ASPEED_REG_MAX 0xC0
+
+#define ASPEED_OPERATION_MODE_POWER_DOWN (0x0 << 1)
+#define ASPEED_OPERATION_MODE_STANDBY (0x1 << 1)
+#define ASPEED_OPERATION_MODE_NORMAL (0x7 << 1)
+
+#define ASPEED_ENGINE_ENABLE BIT(0)
+
+struct aspeed_adc_model_data {
+ const char *model_name;
+ unsigned int min_sampling_rate; // Hz
+ unsigned int max_sampling_rate; // Hz
+ unsigned int vref_voltage; // mV
+};
+
+struct aspeed_adc_data {
+ struct device *dev;
+ void __iomem *base;
+ spinlock_t clk_lock;
+ struct clk_hw *clk_prescaler;
+ struct clk_hw *clk_scaler;
+};
+
+#define ASPEED_CHAN(_idx, _data_reg_addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .address = (_data_reg_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec aspeed_adc_iio_channels[] = {
+ ASPEED_CHAN(0, 0x10),
+ ASPEED_CHAN(1, 0x12),
+ ASPEED_CHAN(2, 0x14),
+ ASPEED_CHAN(3, 0x16),
+ ASPEED_CHAN(4, 0x18),
+ ASPEED_CHAN(5, 0x1A),
+ ASPEED_CHAN(6, 0x1C),
+ ASPEED_CHAN(7, 0x1E),
+ ASPEED_CHAN(8, 0x20),
+ ASPEED_CHAN(9, 0x22),
+ ASPEED_CHAN(10, 0x24),
+ ASPEED_CHAN(11, 0x26),
+ ASPEED_CHAN(12, 0x28),
+ ASPEED_CHAN(13, 0x2A),
+ ASPEED_CHAN(14, 0x2C),
+ ASPEED_CHAN(15, 0x2E),
+};
+
+static int aspeed_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+ const struct aspeed_adc_model_data *model_data =
+ of_device_get_match_data(data->dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = readw(data->base + chan->address);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = model_data->vref_voltage;
+ *val2 = ASPEED_RESOLUTION_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(data->clk_scaler->clk) /
+ ASPEED_CLOCKS_PER_SAMPLE;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aspeed_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+ const struct aspeed_adc_model_data *model_data =
+ of_device_get_match_data(data->dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < model_data->min_sampling_rate ||
+ val > model_data->max_sampling_rate)
+ return -EINVAL;
+
+ clk_set_rate(data->clk_scaler->clk,
+ val * ASPEED_CLOCKS_PER_SAMPLE);
+ return 0;
+
+ case IIO_CHAN_INFO_SCALE:
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Technically, these could be written but the only reasons
+ * for doing so seem better handled in userspace. EPERM is
+ * returned to signal this is a policy choice rather than a
+ * hardware limitation.
+ */
+ return -EPERM;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int aspeed_adc_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+
+ if (!readval || reg % 4 || reg > ASPEED_REG_MAX)
+ return -EINVAL;
+
+ *readval = readl(data->base + reg);
+
+ return 0;
+}
+
+static const struct iio_info aspeed_adc_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = aspeed_adc_read_raw,
+ .write_raw = aspeed_adc_write_raw,
+ .debugfs_reg_access = aspeed_adc_reg_access,
+};
+
+static int aspeed_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct aspeed_adc_data *data;
+ const struct aspeed_adc_model_data *model_data;
+ struct resource *res;
+ const char *clk_parent_name;
+ int ret;
+ u32 adc_engine_control_reg_val;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ /* Register ADC clock prescaler with source specified by device tree. */
+ spin_lock_init(&data->clk_lock);
+ clk_parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
+
+ data->clk_prescaler = clk_hw_register_divider(
+ &pdev->dev, "prescaler", clk_parent_name, 0,
+ data->base + ASPEED_REG_CLOCK_CONTROL,
+ 17, 15, 0, &data->clk_lock);
+ if (IS_ERR(data->clk_prescaler))
+ return PTR_ERR(data->clk_prescaler);
+
+ /*
+ * Register ADC clock scaler downstream from the prescaler. Allow rate
+ * setting to adjust the prescaler as well.
+ */
+ data->clk_scaler = clk_hw_register_divider(
+ &pdev->dev, "scaler", "prescaler",
+ CLK_SET_RATE_PARENT,
+ data->base + ASPEED_REG_CLOCK_CONTROL,
+ 0, 10, 0, &data->clk_lock);
+ if (IS_ERR(data->clk_scaler)) {
+ ret = PTR_ERR(data->clk_scaler);
+ goto scaler_error;
+ }
+
+ /* Start all channels in normal mode. */
+ clk_prepare_enable(data->clk_scaler->clk);
+ adc_engine_control_reg_val = GENMASK(31, 16) |
+ ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE;
+ writel(adc_engine_control_reg_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ model_data = of_device_get_match_data(&pdev->dev);
+ indio_dev->name = model_data->model_name;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &aspeed_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = aspeed_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(aspeed_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto iio_register_error;
+
+ return 0;
+
+iio_register_error:
+ writel(ASPEED_OPERATION_MODE_POWER_DOWN,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ clk_disable_unprepare(data->clk_scaler->clk);
+ clk_hw_unregister_divider(data->clk_scaler);
+
+scaler_error:
+ clk_hw_unregister_divider(data->clk_prescaler);
+ return ret;
+}
+
+static int aspeed_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ writel(ASPEED_OPERATION_MODE_POWER_DOWN,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+ clk_disable_unprepare(data->clk_scaler->clk);
+ clk_hw_unregister_divider(data->clk_scaler);
+ clk_hw_unregister_divider(data->clk_prescaler);
+
+ return 0;
+}
+
+static const struct aspeed_adc_model_data ast2400_model_data = {
+ .model_name = "ast2400-adc",
+ .vref_voltage = 2500, // mV
+ .min_sampling_rate = 10000,
+ .max_sampling_rate = 500000,
+};
+
+static const struct aspeed_adc_model_data ast2500_model_data = {
+ .model_name = "ast2500-adc",
+ .vref_voltage = 1800, // mV
+ .min_sampling_rate = 1,
+ .max_sampling_rate = 1000000,
+};
+
+static const struct of_device_id aspeed_adc_matches[] = {
+ { .compatible = "aspeed,ast2400-adc", .data = &ast2400_model_data },
+ { .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
+
+static struct platform_driver aspeed_adc_driver = {
+ .probe = aspeed_adc_probe,
+ .remove = aspeed_adc_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_adc_matches,
+ }
+};
+
+module_platform_driver(aspeed_adc_driver);
+
+MODULE_AUTHOR("Rick Altherr <raltherr@google.com>");
+MODULE_DESCRIPTION("Aspeed AST2400/2500 ADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
new file mode 100644
index 000000000000..62d37f8725b8
--- /dev/null
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
+ *
+ * Rewritten for Linux IIO framework with some code based on
+ * earlier driver found in the Motorola Linux kernel:
+ *
+ * Copyright (C) 2009-2010 Motorola, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/mfd/motorola-cpcap.h>
+
+/* Register CPCAP_REG_ADCC1 bits */
+#define CPCAP_BIT_ADEN_AUTO_CLR BIT(15) /* Currently unused */
+#define CPCAP_BIT_CAL_MODE BIT(14) /* Set with BIT_RAND0 */
+#define CPCAP_BIT_ADC_CLK_SEL1 BIT(13) /* Currently unused */
+#define CPCAP_BIT_ADC_CLK_SEL0 BIT(12) /* Currently unused */
+#define CPCAP_BIT_ATOX BIT(11)
+#define CPCAP_BIT_ATO3 BIT(10)
+#define CPCAP_BIT_ATO2 BIT(9)
+#define CPCAP_BIT_ATO1 BIT(8)
+#define CPCAP_BIT_ATO0 BIT(7)
+#define CPCAP_BIT_ADA2 BIT(6)
+#define CPCAP_BIT_ADA1 BIT(5)
+#define CPCAP_BIT_ADA0 BIT(4)
+#define CPCAP_BIT_AD_SEL1 BIT(3) /* Set for bank1 */
+#define CPCAP_BIT_RAND1 BIT(2) /* Set for channel 16 & 17 */
+#define CPCAP_BIT_RAND0 BIT(1) /* Set with CAL_MODE */
+#define CPCAP_BIT_ADEN BIT(0) /* Currently unused */
+
+/* Register CPCAP_REG_ADCC2 bits */
+#define CPCAP_BIT_CAL_FACTOR_ENABLE BIT(15) /* Currently unused */
+#define CPCAP_BIT_BATDETB_EN BIT(14) /* Currently unused */
+#define CPCAP_BIT_ADTRIG_ONESHOT BIT(13) /* Set for !TIMING_IMM */
+#define CPCAP_BIT_ASC BIT(12) /* Set for TIMING_IMM */
+#define CPCAP_BIT_ATOX_PS_FACTOR BIT(11)
+#define CPCAP_BIT_ADC_PS_FACTOR1 BIT(10)
+#define CPCAP_BIT_ADC_PS_FACTOR0 BIT(9)
+#define CPCAP_BIT_AD4_SELECT BIT(8) /* Currently unused */
+#define CPCAP_BIT_ADC_BUSY BIT(7) /* Currently unused */
+#define CPCAP_BIT_THERMBIAS_EN BIT(6) /* Currently unused */
+#define CPCAP_BIT_ADTRIG_DIS BIT(5) /* Disable interrupt */
+#define CPCAP_BIT_LIADC BIT(4) /* Currently unused */
+#define CPCAP_BIT_TS_REFEN BIT(3) /* Currently unused */
+#define CPCAP_BIT_TS_M2 BIT(2) /* Currently unused */
+#define CPCAP_BIT_TS_M1 BIT(1) /* Currently unused */
+#define CPCAP_BIT_TS_M0 BIT(0) /* Currently unused */
+
+#define CPCAP_MAX_TEMP_LVL 27
+#define CPCAP_FOUR_POINT_TWO_ADC 801
+#define ST_ADC_CAL_CHRGI_HIGH_THRESHOLD 530
+#define ST_ADC_CAL_CHRGI_LOW_THRESHOLD 494
+#define ST_ADC_CAL_BATTI_HIGH_THRESHOLD 530
+#define ST_ADC_CAL_BATTI_LOW_THRESHOLD 494
+#define ST_ADC_CALIBRATE_DIFF_THRESHOLD 3
+
+#define CPCAP_ADC_MAX_RETRIES 5 /* Calibration and quirk */
+
+/**
+ * struct cpcap_adc_ato - timing settings for cpcap adc
+ *
+ * Unfortunately no cpcap documentation available, please document when
+ * using these.
+ */
+struct cpcap_adc_ato {
+ unsigned short ato_in;
+ unsigned short atox_in;
+ unsigned short adc_ps_factor_in;
+ unsigned short atox_ps_factor_in;
+ unsigned short ato_out;
+ unsigned short atox_out;
+ unsigned short adc_ps_factor_out;
+ unsigned short atox_ps_factor_out;
+};
+
+/**
+ * struct cpcap-adc - cpcap adc device driver data
+ * @reg: cpcap regmap
+ * @dev: struct device
+ * @vendor: cpcap vendor
+ * @irq: interrupt
+ * @lock: mutex
+ * @ato: request timings
+ * @wq_data_avail: work queue
+ * @done: work done
+ */
+struct cpcap_adc {
+ struct regmap *reg;
+ struct device *dev;
+ u16 vendor;
+ int irq;
+ struct mutex lock; /* ADC register access lock */
+ const struct cpcap_adc_ato *ato;
+ wait_queue_head_t wq_data_avail;
+ bool done;
+};
+
+/**
+ * enum cpcap_adc_channel - cpcap adc channels
+ */
+enum cpcap_adc_channel {
+ /* Bank0 channels */
+ CPCAP_ADC_AD0_BATTDETB, /* Battery detection */
+ CPCAP_ADC_BATTP, /* Battery voltage */
+ CPCAP_ADC_VBUS, /* USB VBUS voltage */
+ CPCAP_ADC_AD3, /* Battery temperature when charging */
+ CPCAP_ADC_BPLUS_AD4, /* Another battery or system voltage */
+ CPCAP_ADC_CHG_ISENSE, /* Calibrated charge current */
+ CPCAP_ADC_BATTI, /* Calibrated system current */
+ CPCAP_ADC_USB_ID, /* USB OTG ID, unused on droid 4? */
+
+ /* Bank1 channels */
+ CPCAP_ADC_AD8, /* Seems unused */
+ CPCAP_ADC_AD9, /* Seems unused */
+ CPCAP_ADC_LICELL, /* Maybe system voltage? Always 3V */
+ CPCAP_ADC_HV_BATTP, /* Another battery detection? */
+ CPCAP_ADC_TSX1_AD12, /* Seems unused, for touchscreen? */
+ CPCAP_ADC_TSX2_AD13, /* Seems unused, for touchscreen? */
+ CPCAP_ADC_TSY1_AD14, /* Seems unused, for touchscreen? */
+ CPCAP_ADC_TSY2_AD15, /* Seems unused, for touchscreen? */
+
+ /* Remuxed channels using bank0 entries */
+ CPCAP_ADC_BATTP_PI16, /* Alternative mux mode for BATTP */
+ CPCAP_ADC_BATTI_PI17, /* Alternative mux mode for BATTI */
+
+ CPCAP_ADC_CHANNEL_NUM,
+};
+
+/**
+ * enum cpcap_adc_timing - cpcap adc timing options
+ *
+ * CPCAP_ADC_TIMING_IMM seems to be immediate with no timings.
+ * Please document when using.
+ */
+enum cpcap_adc_timing {
+ CPCAP_ADC_TIMING_IMM,
+ CPCAP_ADC_TIMING_IN,
+ CPCAP_ADC_TIMING_OUT,
+};
+
+/**
+ * struct cpcap_adc_phasing_tbl - cpcap phasing table
+ * @offset: offset in the phasing table
+ * @multiplier: multiplier in the phasing table
+ * @divider: divider in the phasing table
+ * @min: minimum value
+ * @max: maximum value
+ */
+struct cpcap_adc_phasing_tbl {
+ short offset;
+ unsigned short multiplier;
+ unsigned short divider;
+ short min;
+ short max;
+};
+
+/**
+ * struct cpcap_adc_conversion_tbl - cpcap conversion table
+ * @conv_type: conversion type
+ * @align_offset: align offset
+ * @conv_offset: conversion offset
+ * @cal_offset: calibration offset
+ * @multiplier: conversion multiplier
+ * @divider: conversion divider
+ */
+struct cpcap_adc_conversion_tbl {
+ enum iio_chan_info_enum conv_type;
+ int align_offset;
+ int conv_offset;
+ int cal_offset;
+ int multiplier;
+ int divider;
+};
+
+/**
+ * struct cpcap_adc_request - cpcap adc request
+ * @channel: request channel
+ * @phase_tbl: channel phasing table
+ * @conv_tbl: channel conversion table
+ * @bank_index: channel index within the bank
+ * @timing: timing settings
+ * @result: result
+ */
+struct cpcap_adc_request {
+ int channel;
+ const struct cpcap_adc_phasing_tbl *phase_tbl;
+ const struct cpcap_adc_conversion_tbl *conv_tbl;
+ int bank_index;
+ enum cpcap_adc_timing timing;
+ int result;
+};
+
+/* Phasing table for channels. Note that channels 16 & 17 use BATTP and BATTI */
+static const struct cpcap_adc_phasing_tbl bank_phasing[] = {
+ /* Bank0 */
+ [CPCAP_ADC_AD0_BATTDETB] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_BATTP] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_VBUS] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_AD3] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_BPLUS_AD4] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_CHG_ISENSE] = {0, 0x80, 0x80, -512, 511},
+ [CPCAP_ADC_BATTI] = {0, 0x80, 0x80, -512, 511},
+ [CPCAP_ADC_USB_ID] = {0, 0x80, 0x80, 0, 1023},
+
+ /* Bank1 */
+ [CPCAP_ADC_AD8] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_AD9] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_LICELL] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_HV_BATTP] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_TSX1_AD12] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_TSX2_AD13] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_TSY1_AD14] = {0, 0x80, 0x80, 0, 1023},
+ [CPCAP_ADC_TSY2_AD15] = {0, 0x80, 0x80, 0, 1023},
+};
+
+/*
+ * Conversion table for channels. Updated during init based on calibration.
+ * Here too channels 16 & 17 use BATTP and BATTI.
+ */
+static struct cpcap_adc_conversion_tbl bank_conversion[] = {
+ /* Bank0 */
+ [CPCAP_ADC_AD0_BATTDETB] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_BATTP] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 2400, 0, 2300, 1023,
+ },
+ [CPCAP_ADC_VBUS] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 0, 0, 10000, 1023,
+ },
+ [CPCAP_ADC_AD3] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_BPLUS_AD4] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 2400, 0, 2300, 1023,
+ },
+ [CPCAP_ADC_CHG_ISENSE] = {
+ IIO_CHAN_INFO_PROCESSED, -512, 2, 0, 5000, 1023,
+ },
+ [CPCAP_ADC_BATTI] = {
+ IIO_CHAN_INFO_PROCESSED, -512, 2, 0, 5000, 1023,
+ },
+ [CPCAP_ADC_USB_ID] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+
+ /* Bank1 */
+ [CPCAP_ADC_AD8] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_AD9] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_LICELL] = {
+ IIO_CHAN_INFO_PROCESSED, 0, 0, 0, 3400, 1023,
+ },
+ [CPCAP_ADC_HV_BATTP] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_TSX1_AD12] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_TSX2_AD13] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_TSY1_AD14] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+ [CPCAP_ADC_TSY2_AD15] = {
+ IIO_CHAN_INFO_RAW, 0, 0, 0, 1, 1,
+ },
+};
+
+/*
+ * Temperature lookup table of register values to milliCelcius.
+ * REVISIT: Check the duplicate 0x3ff entry in a freezer
+ */
+static const int temp_map[CPCAP_MAX_TEMP_LVL][2] = {
+ { 0x03ff, -40000 },
+ { 0x03ff, -35000 },
+ { 0x03ef, -30000 },
+ { 0x03b2, -25000 },
+ { 0x036c, -20000 },
+ { 0x0320, -15000 },
+ { 0x02d0, -10000 },
+ { 0x027f, -5000 },
+ { 0x022f, 0 },
+ { 0x01e4, 5000 },
+ { 0x019f, 10000 },
+ { 0x0161, 15000 },
+ { 0x012b, 20000 },
+ { 0x00fc, 25000 },
+ { 0x00d4, 30000 },
+ { 0x00b2, 35000 },
+ { 0x0095, 40000 },
+ { 0x007d, 45000 },
+ { 0x0069, 50000 },
+ { 0x0059, 55000 },
+ { 0x004b, 60000 },
+ { 0x003f, 65000 },
+ { 0x0036, 70000 },
+ { 0x002e, 75000 },
+ { 0x0027, 80000 },
+ { 0x0022, 85000 },
+ { 0x001d, 90000 },
+};
+
+#define CPCAP_CHAN(_type, _index, _address, _datasheet_name) { \
+ .type = (_type), \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED), \
+ .scan_index = (_index), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ .datasheet_name = (_datasheet_name), \
+}
+
+/*
+ * The datasheet names are from Motorola mapphone Linux kernel except
+ * for the last two which might be uncalibrated charge voltage and
+ * current.
+ */
+static const struct iio_chan_spec cpcap_adc_channels[] = {
+ /* Bank0 */
+ CPCAP_CHAN(IIO_TEMP, 0, CPCAP_REG_ADCD0, "battdetb"),
+ CPCAP_CHAN(IIO_VOLTAGE, 1, CPCAP_REG_ADCD1, "battp"),
+ CPCAP_CHAN(IIO_VOLTAGE, 2, CPCAP_REG_ADCD2, "vbus"),
+ CPCAP_CHAN(IIO_TEMP, 3, CPCAP_REG_ADCD3, "ad3"),
+ CPCAP_CHAN(IIO_VOLTAGE, 4, CPCAP_REG_ADCD4, "ad4"),
+ CPCAP_CHAN(IIO_CURRENT, 5, CPCAP_REG_ADCD5, "chg_isense"),
+ CPCAP_CHAN(IIO_CURRENT, 6, CPCAP_REG_ADCD6, "batti"),
+ CPCAP_CHAN(IIO_VOLTAGE, 7, CPCAP_REG_ADCD7, "usb_id"),
+
+ /* Bank1 */
+ CPCAP_CHAN(IIO_CURRENT, 8, CPCAP_REG_ADCD0, "ad8"),
+ CPCAP_CHAN(IIO_VOLTAGE, 9, CPCAP_REG_ADCD1, "ad9"),
+ CPCAP_CHAN(IIO_VOLTAGE, 10, CPCAP_REG_ADCD2, "licell"),
+ CPCAP_CHAN(IIO_VOLTAGE, 11, CPCAP_REG_ADCD3, "hv_battp"),
+ CPCAP_CHAN(IIO_VOLTAGE, 12, CPCAP_REG_ADCD4, "tsx1_ad12"),
+ CPCAP_CHAN(IIO_VOLTAGE, 13, CPCAP_REG_ADCD5, "tsx2_ad13"),
+ CPCAP_CHAN(IIO_VOLTAGE, 14, CPCAP_REG_ADCD6, "tsy1_ad14"),
+ CPCAP_CHAN(IIO_VOLTAGE, 15, CPCAP_REG_ADCD7, "tsy2_ad15"),
+
+ /* There are two registers with multiplexed functionality */
+ CPCAP_CHAN(IIO_VOLTAGE, 16, CPCAP_REG_ADCD0, "chg_vsense"),
+ CPCAP_CHAN(IIO_CURRENT, 17, CPCAP_REG_ADCD1, "batti2"),
+};
+
+static irqreturn_t cpcap_adc_irq_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct cpcap_adc *ddata = iio_priv(indio_dev);
+ int error;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS,
+ CPCAP_BIT_ADTRIG_DIS);
+ if (error)
+ return IRQ_NONE;
+
+ ddata->done = true;
+ wake_up_interruptible(&ddata->wq_data_avail);
+
+ return IRQ_HANDLED;
+}
+
+/* ADC calibration functions */
+static void cpcap_adc_setup_calibrate(struct cpcap_adc *ddata,
+ enum cpcap_adc_channel chan)
+{
+ unsigned int value = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(3000);
+ int error;
+
+ if ((chan != CPCAP_ADC_CHG_ISENSE) &&
+ (chan != CPCAP_ADC_BATTI))
+ return;
+
+ value |= CPCAP_BIT_CAL_MODE | CPCAP_BIT_RAND0;
+ value |= ((chan << 4) &
+ (CPCAP_BIT_ADA2 | CPCAP_BIT_ADA1 | CPCAP_BIT_ADA0));
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC1,
+ CPCAP_BIT_CAL_MODE | CPCAP_BIT_ATOX |
+ CPCAP_BIT_ATO3 | CPCAP_BIT_ATO2 |
+ CPCAP_BIT_ATO1 | CPCAP_BIT_ATO0 |
+ CPCAP_BIT_ADA2 | CPCAP_BIT_ADA1 |
+ CPCAP_BIT_ADA0 | CPCAP_BIT_AD_SEL1 |
+ CPCAP_BIT_RAND1 | CPCAP_BIT_RAND0,
+ value);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ATOX_PS_FACTOR |
+ CPCAP_BIT_ADC_PS_FACTOR1 |
+ CPCAP_BIT_ADC_PS_FACTOR0,
+ 0);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS,
+ CPCAP_BIT_ADTRIG_DIS);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ASC,
+ CPCAP_BIT_ASC);
+ if (error)
+ return;
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ error = regmap_read(ddata->reg, CPCAP_REG_ADCC2, &value);
+ if (error)
+ return;
+ } while ((value & CPCAP_BIT_ASC) && time_before(jiffies, timeout));
+
+ if (value & CPCAP_BIT_ASC)
+ dev_err(ddata->dev,
+ "Timeout waiting for calibration to complete\n");
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC1,
+ CPCAP_BIT_CAL_MODE, 0);
+ if (error)
+ return;
+}
+
+static int cpcap_adc_calibrate_one(struct cpcap_adc *ddata,
+ int channel,
+ u16 calibration_register,
+ int lower_threshold,
+ int upper_threshold)
+{
+ unsigned int calibration_data[2];
+ unsigned short cal_data_diff;
+ int i, error;
+
+ for (i = 0; i < CPCAP_ADC_MAX_RETRIES; i++) {
+ calibration_data[0] = 0;
+ calibration_data[1] = 0;
+ cal_data_diff = 0;
+ cpcap_adc_setup_calibrate(ddata, channel);
+ error = regmap_read(ddata->reg, calibration_register,
+ &calibration_data[0]);
+ if (error)
+ return error;
+ cpcap_adc_setup_calibrate(ddata, channel);
+ error = regmap_read(ddata->reg, calibration_register,
+ &calibration_data[1]);
+ if (error)
+ return error;
+
+ if (calibration_data[0] > calibration_data[1])
+ cal_data_diff =
+ calibration_data[0] - calibration_data[1];
+ else
+ cal_data_diff =
+ calibration_data[1] - calibration_data[0];
+
+ if (((calibration_data[1] >= lower_threshold) &&
+ (calibration_data[1] <= upper_threshold) &&
+ (cal_data_diff <= ST_ADC_CALIBRATE_DIFF_THRESHOLD)) ||
+ (ddata->vendor == CPCAP_VENDOR_TI)) {
+ bank_conversion[channel].cal_offset =
+ ((short)calibration_data[1] * -1) + 512;
+ dev_dbg(ddata->dev, "ch%i calibration complete: %i\n",
+ channel, bank_conversion[channel].cal_offset);
+ break;
+ }
+ usleep_range(5000, 10000);
+ }
+
+ return 0;
+}
+
+static int cpcap_adc_calibrate(struct cpcap_adc *ddata)
+{
+ int error;
+
+ error = cpcap_adc_calibrate_one(ddata, CPCAP_ADC_CHG_ISENSE,
+ CPCAP_REG_ADCAL1,
+ ST_ADC_CAL_CHRGI_LOW_THRESHOLD,
+ ST_ADC_CAL_CHRGI_HIGH_THRESHOLD);
+ if (error)
+ return error;
+
+ error = cpcap_adc_calibrate_one(ddata, CPCAP_ADC_BATTI,
+ CPCAP_REG_ADCAL2,
+ ST_ADC_CAL_BATTI_LOW_THRESHOLD,
+ ST_ADC_CAL_BATTI_HIGH_THRESHOLD);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/* ADC setup, read and scale functions */
+static void cpcap_adc_setup_bank(struct cpcap_adc *ddata,
+ struct cpcap_adc_request *req)
+{
+ const struct cpcap_adc_ato *ato = ddata->ato;
+ unsigned short value1 = 0;
+ unsigned short value2 = 0;
+ int error;
+
+ if (!ato)
+ return;
+
+ switch (req->channel) {
+ case CPCAP_ADC_AD8 ... CPCAP_ADC_TSY2_AD15:
+ value1 |= CPCAP_BIT_AD_SEL1;
+ break;
+ case CPCAP_ADC_BATTP_PI16 ... CPCAP_ADC_BATTI_PI17:
+ value1 |= CPCAP_BIT_RAND1;
+ default:
+ break;
+ }
+
+ switch (req->timing) {
+ case CPCAP_ADC_TIMING_IN:
+ value1 |= ato->ato_in;
+ value1 |= ato->atox_in;
+ value2 |= ato->adc_ps_factor_in;
+ value2 |= ato->atox_ps_factor_in;
+ break;
+ case CPCAP_ADC_TIMING_OUT:
+ value1 |= ato->ato_out;
+ value1 |= ato->atox_out;
+ value2 |= ato->adc_ps_factor_out;
+ value2 |= ato->atox_ps_factor_out;
+ break;
+
+ case CPCAP_ADC_TIMING_IMM:
+ default:
+ break;
+ }
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC1,
+ CPCAP_BIT_CAL_MODE | CPCAP_BIT_ATOX |
+ CPCAP_BIT_ATO3 | CPCAP_BIT_ATO2 |
+ CPCAP_BIT_ATO1 | CPCAP_BIT_ATO0 |
+ CPCAP_BIT_ADA2 | CPCAP_BIT_ADA1 |
+ CPCAP_BIT_ADA0 | CPCAP_BIT_AD_SEL1 |
+ CPCAP_BIT_RAND1 | CPCAP_BIT_RAND0,
+ value1);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ATOX_PS_FACTOR |
+ CPCAP_BIT_ADC_PS_FACTOR1 |
+ CPCAP_BIT_ADC_PS_FACTOR0,
+ value2);
+ if (error)
+ return;
+
+ if (req->timing == CPCAP_ADC_TIMING_IMM) {
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS,
+ CPCAP_BIT_ADTRIG_DIS);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ASC,
+ CPCAP_BIT_ASC);
+ if (error)
+ return;
+ } else {
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_ONESHOT,
+ CPCAP_BIT_ADTRIG_ONESHOT);
+ if (error)
+ return;
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS, 0);
+ if (error)
+ return;
+ }
+}
+
+/*
+ * Occasionally the ADC does not seem to start and there will be no
+ * interrupt. Let's re-init interrupt to prevent the ADC from hanging
+ * for the next request. It is unclear why this happens, but the next
+ * request will usually work after doing this.
+ */
+static void cpcap_adc_quirk_reset_lost_irq(struct cpcap_adc *ddata)
+{
+ int error;
+
+ dev_info(ddata->dev, "lost ADC irq, attempting to reinit\n");
+ disable_irq(ddata->irq);
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_ADCC2,
+ CPCAP_BIT_ADTRIG_DIS,
+ CPCAP_BIT_ADTRIG_DIS);
+ if (error)
+ dev_warn(ddata->dev, "%s reset failed: %i\n",
+ __func__, error);
+ enable_irq(ddata->irq);
+}
+
+static int cpcap_adc_start_bank(struct cpcap_adc *ddata,
+ struct cpcap_adc_request *req)
+{
+ int i, error;
+
+ req->timing = CPCAP_ADC_TIMING_IMM;
+ ddata->done = false;
+
+ for (i = 0; i < CPCAP_ADC_MAX_RETRIES; i++) {
+ cpcap_adc_setup_bank(ddata, req);
+ error = wait_event_interruptible_timeout(ddata->wq_data_avail,
+ ddata->done,
+ msecs_to_jiffies(50));
+ if (error > 0)
+ return 0;
+
+ if (error == 0) {
+ cpcap_adc_quirk_reset_lost_irq(ddata);
+ error = -ETIMEDOUT;
+ continue;
+ }
+
+ if (error < 0)
+ return error;
+ }
+
+ return error;
+}
+
+static void cpcap_adc_phase(struct cpcap_adc_request *req)
+{
+ const struct cpcap_adc_conversion_tbl *conv_tbl = req->conv_tbl;
+ const struct cpcap_adc_phasing_tbl *phase_tbl = req->phase_tbl;
+ int index = req->channel;
+
+ /* Remuxed channels 16 and 17 use BATTP and BATTI entries */
+ switch (req->channel) {
+ case CPCAP_ADC_BATTP:
+ case CPCAP_ADC_BATTP_PI16:
+ index = req->bank_index;
+ req->result -= phase_tbl[index].offset;
+ req->result -= CPCAP_FOUR_POINT_TWO_ADC;
+ req->result *= phase_tbl[index].multiplier;
+ if (phase_tbl[index].divider == 0)
+ return;
+ req->result /= phase_tbl[index].divider;
+ req->result += CPCAP_FOUR_POINT_TWO_ADC;
+ break;
+ case CPCAP_ADC_BATTI_PI17:
+ index = req->bank_index;
+ /* fallthrough */
+ default:
+ req->result += conv_tbl[index].cal_offset;
+ req->result += conv_tbl[index].align_offset;
+ req->result *= phase_tbl[index].multiplier;
+ if (phase_tbl[index].divider == 0)
+ return;
+ req->result /= phase_tbl[index].divider;
+ req->result += phase_tbl[index].offset;
+ break;
+ }
+
+ if (req->result < phase_tbl[index].min)
+ req->result = phase_tbl[index].min;
+ else if (req->result > phase_tbl[index].max)
+ req->result = phase_tbl[index].max;
+}
+
+/* Looks up temperatures in a table and calculates averages if needed */
+static int cpcap_adc_table_to_millicelcius(unsigned short value)
+{
+ int i, result = 0, alpha;
+
+ if (value <= temp_map[CPCAP_MAX_TEMP_LVL - 1][0])
+ return temp_map[CPCAP_MAX_TEMP_LVL - 1][1];
+
+ if (value >= temp_map[0][0])
+ return temp_map[0][1];
+
+ for (i = 0; i < CPCAP_MAX_TEMP_LVL - 1; i++) {
+ if ((value <= temp_map[i][0]) &&
+ (value >= temp_map[i + 1][0])) {
+ if (value == temp_map[i][0]) {
+ result = temp_map[i][1];
+ } else if (value == temp_map[i + 1][0]) {
+ result = temp_map[i + 1][1];
+ } else {
+ alpha = ((value - temp_map[i][0]) * 1000) /
+ (temp_map[i + 1][0] - temp_map[i][0]);
+
+ result = temp_map[i][1] +
+ ((alpha * (temp_map[i + 1][1] -
+ temp_map[i][1])) / 1000);
+ }
+ break;
+ }
+ }
+
+ return result;
+}
+
+static void cpcap_adc_convert(struct cpcap_adc_request *req)
+{
+ const struct cpcap_adc_conversion_tbl *conv_tbl = req->conv_tbl;
+ int index = req->channel;
+
+ /* Remuxed channels 16 and 17 use BATTP and BATTI entries */
+ switch (req->channel) {
+ case CPCAP_ADC_BATTP_PI16:
+ index = CPCAP_ADC_BATTP;
+ break;
+ case CPCAP_ADC_BATTI_PI17:
+ index = CPCAP_ADC_BATTI;
+ break;
+ default:
+ break;
+ }
+
+ /* No conversion for raw channels */
+ if (conv_tbl[index].conv_type == IIO_CHAN_INFO_RAW)
+ return;
+
+ /* Temperatures use a lookup table instead of conversion table */
+ if ((req->channel == CPCAP_ADC_AD0_BATTDETB) ||
+ (req->channel == CPCAP_ADC_AD3)) {
+ req->result =
+ cpcap_adc_table_to_millicelcius(req->result);
+
+ return;
+ }
+
+ /* All processed channels use a conversion table */
+ req->result *= conv_tbl[index].multiplier;
+ if (conv_tbl[index].divider == 0)
+ return;
+ req->result /= conv_tbl[index].divider;
+ req->result += conv_tbl[index].conv_offset;
+}
+
+/*
+ * REVISIT: Check if timed sampling can use multiple channels at the
+ * same time. If not, replace channel_mask with just channel.
+ */
+static int cpcap_adc_read_bank_scaled(struct cpcap_adc *ddata,
+ struct cpcap_adc_request *req)
+{
+ int calibration_data, error, addr;
+
+ if (ddata->vendor == CPCAP_VENDOR_TI) {
+ error = regmap_read(ddata->reg, CPCAP_REG_ADCAL1,
+ &calibration_data);
+ if (error)
+ return error;
+ bank_conversion[CPCAP_ADC_CHG_ISENSE].cal_offset =
+ ((short)calibration_data * -1) + 512;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_ADCAL2,
+ &calibration_data);
+ if (error)
+ return error;
+ bank_conversion[CPCAP_ADC_BATTI].cal_offset =
+ ((short)calibration_data * -1) + 512;
+ }
+
+ addr = CPCAP_REG_ADCD0 + req->bank_index * 4;
+
+ error = regmap_read(ddata->reg, addr, &req->result);
+ if (error)
+ return error;
+
+ req->result &= 0x3ff;
+ cpcap_adc_phase(req);
+ cpcap_adc_convert(req);
+
+ return 0;
+}
+
+static int cpcap_adc_init_request(struct cpcap_adc_request *req,
+ int channel)
+{
+ req->channel = channel;
+ req->phase_tbl = bank_phasing;
+ req->conv_tbl = bank_conversion;
+
+ switch (channel) {
+ case CPCAP_ADC_AD0_BATTDETB ... CPCAP_ADC_USB_ID:
+ req->bank_index = channel;
+ break;
+ case CPCAP_ADC_AD8 ... CPCAP_ADC_TSY2_AD15:
+ req->bank_index = channel - 8;
+ break;
+ case CPCAP_ADC_BATTP_PI16:
+ req->bank_index = CPCAP_ADC_BATTP;
+ break;
+ case CPCAP_ADC_BATTI_PI17:
+ req->bank_index = CPCAP_ADC_BATTI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_adc_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cpcap_adc *ddata = iio_priv(indio_dev);
+ struct cpcap_adc_request req;
+ int error;
+
+ error = cpcap_adc_init_request(&req, chan->channel);
+ if (error)
+ return error;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ddata->lock);
+ error = cpcap_adc_start_bank(ddata, &req);
+ if (error)
+ goto err_unlock;
+ error = regmap_read(ddata->reg, chan->address, val);
+ if (error)
+ goto err_unlock;
+ mutex_unlock(&ddata->lock);
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&ddata->lock);
+ error = cpcap_adc_start_bank(ddata, &req);
+ if (error)
+ goto err_unlock;
+ error = cpcap_adc_read_bank_scaled(ddata, &req);
+ if (error)
+ goto err_unlock;
+ mutex_unlock(&ddata->lock);
+ *val = req.result;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT;
+
+err_unlock:
+ mutex_unlock(&ddata->lock);
+ dev_err(ddata->dev, "error reading ADC: %i\n", error);
+
+ return error;
+}
+
+static const struct iio_info cpcap_adc_info = {
+ .read_raw = &cpcap_adc_read,
+ .driver_module = THIS_MODULE,
+};
+
+/*
+ * Configuration for Motorola mapphone series such as droid 4.
+ * Copied from the Motorola mapphone kernel tree.
+ */
+static const struct cpcap_adc_ato mapphone_adc = {
+ .ato_in = 0x0480,
+ .atox_in = 0,
+ .adc_ps_factor_in = 0x0200,
+ .atox_ps_factor_in = 0,
+ .ato_out = 0,
+ .atox_out = 0,
+ .adc_ps_factor_out = 0,
+ .atox_ps_factor_out = 0,
+};
+
+static const struct of_device_id cpcap_adc_id_table[] = {
+ {
+ .compatible = "motorola,cpcap-adc",
+ },
+ {
+ .compatible = "motorola,mapphone-cpcap-adc",
+ .data = &mapphone_adc,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpcap_adc_id_table);
+
+static int cpcap_adc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct cpcap_adc *ddata;
+ struct iio_dev *indio_dev;
+ int error;
+
+ match = of_match_device(of_match_ptr(cpcap_adc_id_table),
+ &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ if (!match->data) {
+ dev_err(&pdev->dev, "no configuration data found\n");
+
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ddata));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+
+ return -ENOMEM;
+ }
+ ddata = iio_priv(indio_dev);
+ ddata->ato = match->data;
+ ddata->dev = &pdev->dev;
+
+ mutex_init(&ddata->lock);
+ init_waitqueue_head(&ddata->wq_data_avail);
+
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->channels = cpcap_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(cpcap_adc_channels);
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->info = &cpcap_adc_info;
+
+ ddata->reg = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!ddata->reg)
+ return -ENODEV;
+
+ error = cpcap_get_vendor(ddata->dev, ddata->reg, &ddata->vendor);
+ if (error)
+ return error;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ddata->irq = platform_get_irq_byname(pdev, "adcdone");
+ if (!ddata->irq)
+ return -ENODEV;
+
+ error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
+ cpcap_adc_irq_thread,
+ IRQF_TRIGGER_NONE,
+ "cpcap-adc", indio_dev);
+ if (error) {
+ dev_err(&pdev->dev, "could not get irq: %i\n",
+ error);
+
+ return error;
+ }
+
+ error = cpcap_adc_calibrate(ddata);
+ if (error)
+ return error;
+
+ dev_info(&pdev->dev, "CPCAP ADC device probed\n");
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver cpcap_adc_driver = {
+ .driver = {
+ .name = "cpcap_adc",
+ .of_match_table = of_match_ptr(cpcap_adc_id_table),
+ },
+ .probe = cpcap_adc_probe,
+};
+
+module_platform_driver(cpcap_adc_driver);
+
+MODULE_ALIAS("platform:cpcap_adc");
+MODULE_DESCRIPTION("CPCAP ADC driver");
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index ad1775b5f83c..6c5a7be9f8c1 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -579,7 +579,7 @@ static int exynos_read_s3c64xx_ts(struct iio_dev *indio_dev, int *x, int *y)
static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
{
- struct exynos_adc *info = (struct exynos_adc *)dev_id;
+ struct exynos_adc *info = dev_id;
u32 mask = info->data->mask;
/* Read value */
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 139639f73769..27005d84ed73 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -369,7 +369,7 @@ static struct attribute *hx711_attributes[] = {
NULL,
};
-static struct attribute_group hx711_attribute_group = {
+static const struct attribute_group hx711_attribute_group = {
.attrs = hx711_attributes,
};
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index e2241ee94783..254b29a68b9d 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -365,7 +365,7 @@ static int imx7d_adc_read_data(struct imx7d_adc *info)
static irqreturn_t imx7d_adc_isr(int irq, void *dev_id)
{
- struct imx7d_adc *info = (struct imx7d_adc *)dev_id;
+ struct imx7d_adc *info = dev_id;
int status;
status = readl(info->regs + IMX7D_REG_ADC_INT_STATUS);
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 3263231276ca..db9838230257 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -28,6 +28,7 @@
#include <linux/iio/sysfs.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -635,6 +636,7 @@ static int ina2xx_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct iio_buffer *buffer;
unsigned int val;
+ enum ina2xx_ids type;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
@@ -652,7 +654,11 @@ static int ina2xx_probe(struct i2c_client *client,
return PTR_ERR(chip->regmap);
}
- chip->config = &ina2xx_config[id->driver_data];
+ if (client->dev.of_node)
+ type = (enum ina2xx_ids)of_device_get_match_data(&client->dev);
+ else
+ type = id->driver_data;
+ chip->config = &ina2xx_config[type];
mutex_init(&chip->state_lock);
@@ -726,9 +732,35 @@ static const struct i2c_device_id ina2xx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
+static const struct of_device_id ina2xx_of_match[] = {
+ {
+ .compatible = "ti,ina219",
+ .data = (void *)ina219
+ },
+ {
+ .compatible = "ti,ina220",
+ .data = (void *)ina219
+ },
+ {
+ .compatible = "ti,ina226",
+ .data = (void *)ina226
+ },
+ {
+ .compatible = "ti,ina230",
+ .data = (void *)ina226
+ },
+ {
+ .compatible = "ti,ina231",
+ .data = (void *)ina226
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ina2xx_of_match);
+
static struct i2c_driver ina2xx_driver = {
.driver = {
.name = KBUILD_MODNAME,
+ .of_match_table = ina2xx_of_match,
},
.probe = ina2xx_probe,
.remove = ina2xx_remove,
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
new file mode 100644
index 000000000000..0de709b4288b
--- /dev/null
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -0,0 +1,219 @@
+/*
+ * lpc32xx_adc.c - Support for ADC in LPC32XX
+ *
+ * 3-channel, 10-bit ADC
+ *
+ * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/*
+ * LPC32XX registers definitions
+ */
+#define LPC32XXAD_SELECT(x) ((x) + 0x04)
+#define LPC32XXAD_CTRL(x) ((x) + 0x08)
+#define LPC32XXAD_VALUE(x) ((x) + 0x48)
+
+/* Bit definitions for LPC32XXAD_SELECT: */
+/* constant, always write this value! */
+#define LPC32XXAD_REFm 0x00000200
+/* constant, always write this value! */
+#define LPC32XXAD_REFp 0x00000080
+ /* multiple of this is the channel number: 0, 1, 2 */
+#define LPC32XXAD_IN 0x00000010
+/* constant, always write this value! */
+#define LPC32XXAD_INTERNAL 0x00000004
+
+/* Bit definitions for LPC32XXAD_CTRL: */
+#define LPC32XXAD_STROBE 0x00000002
+#define LPC32XXAD_PDN_CTRL 0x00000004
+
+/* Bit definitions for LPC32XXAD_VALUE: */
+#define LPC32XXAD_VALUE_MASK 0x000003FF
+
+#define LPC32XXAD_NAME "lpc32xx-adc"
+
+struct lpc32xx_adc_state {
+ void __iomem *adc_base;
+ struct clk *clk;
+ struct completion completion;
+
+ u32 value;
+};
+
+static int lpc32xx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct lpc32xx_adc_state *st = iio_priv(indio_dev);
+
+ if (mask == IIO_CHAN_INFO_RAW) {
+ mutex_lock(&indio_dev->mlock);
+ clk_prepare_enable(st->clk);
+ /* Measurement setup */
+ __raw_writel(LPC32XXAD_INTERNAL | (chan->address) |
+ LPC32XXAD_REFp | LPC32XXAD_REFm,
+ LPC32XXAD_SELECT(st->adc_base));
+ /* Trigger conversion */
+ __raw_writel(LPC32XXAD_PDN_CTRL | LPC32XXAD_STROBE,
+ LPC32XXAD_CTRL(st->adc_base));
+ wait_for_completion(&st->completion); /* set by ISR */
+ clk_disable_unprepare(st->clk);
+ *val = st->value;
+ mutex_unlock(&indio_dev->mlock);
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc32xx_adc_iio_info = {
+ .read_raw = &lpc32xx_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define LPC32XX_ADC_CHANNEL(_index) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .address = LPC32XXAD_IN * _index, \
+ .scan_index = _index, \
+}
+
+static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
+ LPC32XX_ADC_CHANNEL(0),
+ LPC32XX_ADC_CHANNEL(1),
+ LPC32XX_ADC_CHANNEL(2),
+};
+
+static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
+{
+ struct lpc32xx_adc_state *st = dev_id;
+
+ /* Read value and clear irq */
+ st->value = __raw_readl(LPC32XXAD_VALUE(st->adc_base)) &
+ LPC32XXAD_VALUE_MASK;
+ complete(&st->completion);
+
+ return IRQ_HANDLED;
+}
+
+static int lpc32xx_adc_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_adc_state *st = NULL;
+ struct resource *res;
+ int retval = -ENODEV;
+ struct iio_dev *iodev = NULL;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get platform I/O memory\n");
+ return -ENXIO;
+ }
+
+ iodev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!iodev)
+ return -ENOMEM;
+
+ st = iio_priv(iodev);
+
+ st->adc_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!st->adc_base) {
+ dev_err(&pdev->dev, "failed mapping memory\n");
+ return -EBUSY;
+ }
+
+ st->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->clk)) {
+ dev_err(&pdev->dev, "failed getting clock\n");
+ return PTR_ERR(st->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed getting interrupt resource\n");
+ return -ENXIO;
+ }
+
+ retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
+ LPC32XXAD_NAME, st);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "failed requesting interrupt\n");
+ return retval;
+ }
+
+ platform_set_drvdata(pdev, iodev);
+
+ init_completion(&st->completion);
+
+ iodev->name = LPC32XXAD_NAME;
+ iodev->dev.parent = &pdev->dev;
+ iodev->info = &lpc32xx_adc_iio_info;
+ iodev->modes = INDIO_DIRECT_MODE;
+ iodev->channels = lpc32xx_adc_iio_channels;
+ iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
+
+ retval = devm_iio_device_register(&pdev->dev, iodev);
+ if (retval)
+ return retval;
+
+ dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_adc_match[] = {
+ { .compatible = "nxp,lpc3220-adc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
+#endif
+
+static struct platform_driver lpc32xx_adc_driver = {
+ .probe = lpc32xx_adc_probe,
+ .driver = {
+ .name = LPC32XXAD_NAME,
+ .of_match_table = of_match_ptr(lpc32xx_adc_match),
+ },
+};
+
+module_platform_driver(lpc32xx_adc_driver);
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("LPC32XX ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
new file mode 100644
index 000000000000..2691b10023f5
--- /dev/null
+++ b/drivers/iio/adc/ltc2497.c
@@ -0,0 +1,279 @@
+/*
+ * ltc2497.c - Driver for Analog Devices/Linear Technology LTC2497 ADC
+ *
+ * Copyright (C) 2017 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ *
+ * Datasheet: http://cds.linear.com/docs/en/datasheet/2497fd.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#define LTC2497_ENABLE 0xA0
+#define LTC2497_SGL BIT(4)
+#define LTC2497_DIFF 0
+#define LTC2497_SIGN BIT(3)
+#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
+#define LTC2497_CONVERSION_TIME_MS 150ULL
+
+struct ltc2497_st {
+ struct i2c_client *client;
+ struct regulator *ref;
+ ktime_t time_prev;
+ u8 addr_prev;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be32 buf ____cacheline_aligned;
+};
+
+static int ltc2497_wait_conv(struct ltc2497_st *st)
+{
+ s64 time_elapsed;
+
+ time_elapsed = ktime_ms_delta(ktime_get(), st->time_prev);
+
+ if (time_elapsed < LTC2497_CONVERSION_TIME_MS) {
+ /* delay if conversion time not passed
+ * since last read or write
+ */
+ if (msleep_interruptible(
+ LTC2497_CONVERSION_TIME_MS - time_elapsed))
+ return -ERESTARTSYS;
+
+ return 0;
+ }
+
+ if (time_elapsed - LTC2497_CONVERSION_TIME_MS <= 0) {
+ /* We're in automatic mode -
+ * so the last reading is stil not outdated
+ */
+ return 0;
+ }
+
+ return 1;
+}
+
+static int ltc2497_read(struct ltc2497_st *st, u8 address, int *val)
+{
+ struct i2c_client *client = st->client;
+ int ret;
+
+ ret = ltc2497_wait_conv(st);
+ if (ret < 0)
+ return ret;
+
+ if (ret || st->addr_prev != address) {
+ ret = i2c_smbus_write_byte(st->client,
+ LTC2497_ENABLE | address);
+ if (ret < 0)
+ return ret;
+ st->addr_prev = address;
+ if (msleep_interruptible(LTC2497_CONVERSION_TIME_MS))
+ return -ERESTARTSYS;
+ }
+ ret = i2c_master_recv(client, (char *)&st->buf, 3);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c_master_recv failed\n");
+ return ret;
+ }
+ st->time_prev = ktime_get();
+
+ /* convert and shift the result,
+ * and finally convert from offset binary to signed integer
+ */
+ *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+
+ return ret;
+}
+
+static int ltc2497_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2497_st *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = ltc2497_read(st, chan->address, val);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(st->ref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = 17;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#define LTC2497_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan), \
+ .address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+#define LTC2497_CHAN_DIFF(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 1 : 0), \
+ .channel2 = (_chan) * 2 + ((_addr) & LTC2497_SIGN ? 0 : 1),\
+ .address = (_addr | _chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .differential = 1, \
+}
+
+static const struct iio_chan_spec ltc2497_channel[] = {
+ LTC2497_CHAN(0, LTC2497_SGL),
+ LTC2497_CHAN(1, LTC2497_SGL),
+ LTC2497_CHAN(2, LTC2497_SGL),
+ LTC2497_CHAN(3, LTC2497_SGL),
+ LTC2497_CHAN(4, LTC2497_SGL),
+ LTC2497_CHAN(5, LTC2497_SGL),
+ LTC2497_CHAN(6, LTC2497_SGL),
+ LTC2497_CHAN(7, LTC2497_SGL),
+ LTC2497_CHAN(8, LTC2497_SGL),
+ LTC2497_CHAN(9, LTC2497_SGL),
+ LTC2497_CHAN(10, LTC2497_SGL),
+ LTC2497_CHAN(11, LTC2497_SGL),
+ LTC2497_CHAN(12, LTC2497_SGL),
+ LTC2497_CHAN(13, LTC2497_SGL),
+ LTC2497_CHAN(14, LTC2497_SGL),
+ LTC2497_CHAN(15, LTC2497_SGL),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF),
+ LTC2497_CHAN_DIFF(0, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(1, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(2, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(3, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(4, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(5, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(6, LTC2497_DIFF | LTC2497_SIGN),
+ LTC2497_CHAN_DIFF(7, LTC2497_DIFF | LTC2497_SIGN),
+};
+
+static const struct iio_info ltc2497_info = {
+ .read_raw = ltc2497_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltc2497_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2497_st *st;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ st->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ltc2497_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ltc2497_channel;
+ indio_dev->num_channels = ARRAY_SIZE(ltc2497_channel);
+
+ st->ref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(st->ref))
+ return PTR_ERR(st->ref);
+
+ ret = regulator_enable(st->ref);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT);
+ if (ret < 0)
+ goto err_regulator_disable;
+
+ st->addr_prev = LTC2497_CONFIG_DEFAULT;
+ st->time_prev = ktime_get();
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_regulator_disable;
+
+ return 0;
+
+err_regulator_disable:
+ regulator_disable(st->ref);
+
+ return ret;
+}
+
+static int ltc2497_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ltc2497_st *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(st->ref);
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc2497_id[] = {
+ { "ltc2497", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc2497_id);
+
+static const struct of_device_id ltc2497_of_match[] = {
+ { .compatible = "lltc,ltc2497", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2497_of_match);
+
+static struct i2c_driver ltc2497_driver = {
+ .driver = {
+ .name = "ltc2497",
+ .of_match_table = of_match_ptr(ltc2497_of_match),
+ },
+ .probe = ltc2497_probe,
+ .remove = ltc2497_remove,
+ .id_table = ltc2497_id,
+};
+module_i2c_driver(ltc2497_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Linear Technology LTC2497 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 3b7c4f78f37a..ebc715927e63 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -364,7 +364,7 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
static irqreturn_t max1027_trigger_handler(int irq, void *private)
{
- struct iio_poll_func *pf = (struct iio_poll_func *)private;
+ struct iio_poll_func *pf = private;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1027_state *st = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index a088cf99bfe1..1180bcc22ff1 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -124,8 +124,8 @@ static int max11100_probe(struct spi_device *spi)
indio_dev->name = "max11100";
indio_dev->info = &max11100_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = max11100_channels,
- indio_dev->num_channels = ARRAY_SIZE(max11100_channels),
+ indio_dev->channels = max11100_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max11100_channels);
state->vref_reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(state->vref_reg))
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, max11100_ids);
static struct spi_driver max11100_driver = {
.driver = {
.name = "max11100",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max11100_ids),
},
.probe = max11100_probe,
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
new file mode 100644
index 000000000000..2e9648a078c4
--- /dev/null
+++ b/drivers/iio/adc/max1118.c
@@ -0,0 +1,307 @@
+/*
+ * MAX1117/MAX1118/MAX1119 8-bit, dual-channel ADCs driver
+ *
+ * Copyright (c) 2017 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1117-MAX1119.pdf
+ *
+ * SPI interface connections
+ *
+ * SPI MAXIM
+ * Master Direction MAX1117/8/9
+ * ------ --------- -----------
+ * nCS --> CNVST
+ * SCK --> SCLK
+ * MISO <-- DOUT
+ * ------ --------- -----------
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/regulator/consumer.h>
+
+enum max1118_id {
+ max1117,
+ max1118,
+ max1119,
+};
+
+struct max1118 {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct regulator *reg;
+
+ u8 data ____cacheline_aligned;
+};
+
+#define MAX1118_CHANNEL(ch) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = ch, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ }, \
+ }
+
+static const struct iio_chan_spec max1118_channels[] = {
+ MAX1118_CHANNEL(0),
+ MAX1118_CHANNEL(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int max1118_read(struct spi_device *spi, int channel)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct max1118 *adc = iio_priv(indio_dev);
+ struct spi_transfer xfers[] = {
+ /*
+ * To select CH1 for conversion, CNVST pin must be brought high
+ * and low for a second time.
+ */
+ {
+ .len = 0,
+ .delay_usecs = 1, /* > CNVST Low Time 100 ns */
+ .cs_change = 1,
+ },
+ /*
+ * The acquisition interval begins with the falling edge of
+ * CNVST. The total acquisition and conversion process takes
+ * <7.5us.
+ */
+ {
+ .len = 0,
+ .delay_usecs = 8,
+ },
+ {
+ .rx_buf = &adc->data,
+ .len = 1,
+ },
+ };
+ int ret;
+
+ if (channel == 0)
+ ret = spi_sync_transfer(spi, xfers + 1, 2);
+ else
+ ret = spi_sync_transfer(spi, xfers, 3);
+
+ if (ret)
+ return ret;
+
+ return adc->data;
+}
+
+static int max1118_get_vref_mV(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct max1118 *adc = iio_priv(indio_dev);
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ int vref_uV;
+
+ switch (id->driver_data) {
+ case max1117:
+ return 2048;
+ case max1119:
+ return 4096;
+ case max1118:
+ vref_uV = regulator_get_voltage(adc->reg);
+ if (vref_uV < 0)
+ return vref_uV;
+ return vref_uV / 1000;
+ }
+
+ return -ENODEV;
+}
+
+static int max1118_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max1118 *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *val = max1118_read(adc->spi, chan->channel);
+ mutex_unlock(&adc->lock);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = max1118_get_vref_mV(adc->spi);
+ if (*val < 0)
+ return *val;
+ *val2 = 8;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info max1118_info = {
+ .read_raw = max1118_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t max1118_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max1118 *adc = iio_priv(indio_dev);
+ u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */
+ int scan_index;
+ int i = 0;
+
+ mutex_lock(&adc->lock);
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ int ret = max1118_read(adc->spi, scan_chan->channel);
+
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ data[i] = ret;
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int max1118_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct max1118 *adc;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ if (id->driver_data == max1118) {
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg)) {
+ dev_err(&spi->dev, "failed to get vref regulator\n");
+ return PTR_ERR(adc->reg);
+ }
+ ret = regulator_enable(adc->reg);
+ if (ret)
+ return ret;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &max1118_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max1118_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max1118_channels);
+
+ /*
+ * To reinitiate a conversion on CH0, it is necessary to allow for a
+ * conversion to be complete and all of the data to be read out. Once
+ * a conversion has been completed, the MAX1117/MAX1118/MAX1119 will go
+ * into AutoShutdown mode until the next conversion is initiated.
+ */
+ max1118_read(spi, 0);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ max1118_trigger_handler, NULL);
+ if (ret)
+ goto err_reg_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_buffer_cleanup;
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_reg_disable:
+ if (id->driver_data == max1118)
+ regulator_disable(adc->reg);
+
+ return ret;
+}
+
+static int max1118_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct max1118 *adc = iio_priv(indio_dev);
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ if (id->driver_data == max1118)
+ return regulator_disable(adc->reg);
+
+ return 0;
+}
+
+static const struct spi_device_id max1118_id[] = {
+ { "max1117", max1117 },
+ { "max1118", max1118 },
+ { "max1119", max1119 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, max1118_id);
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id max1118_dt_ids[] = {
+ { .compatible = "maxim,max1117" },
+ { .compatible = "maxim,max1118" },
+ { .compatible = "maxim,max1119" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max1118_dt_ids);
+
+#endif
+
+static struct spi_driver max1118_spi_driver = {
+ .driver = {
+ .name = "max1118",
+ .of_match_table = of_match_ptr(max1118_dt_ids),
+ },
+ .probe = max1118_probe,
+ .remove = max1118_remove,
+ .id_table = max1118_id,
+};
+module_spi_driver(max1118_spi_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("MAXIM MAX1117/MAX1118/MAX1119 ADCs driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index c6c12feb4a08..80eada4886b3 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1007,7 +1007,7 @@ static struct attribute *max1363_event_attributes[] = {
NULL,
};
-static struct attribute_group max1363_event_attribute_group = {
+static const struct attribute_group max1363_event_attribute_group = {
.attrs = max1363_event_attributes,
};
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
new file mode 100644
index 000000000000..ec82106480e1
--- /dev/null
+++ b/drivers/iio/adc/max9611.c
@@ -0,0 +1,585 @@
+/*
+ * iio/adc/max9611.c
+ *
+ * Maxim max9611/max9612 high side current sense amplifier with
+ * 12-bit ADC interface.
+ *
+ * Copyright (C) 2017 Jacopo Mondi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This driver supports input common-mode voltage, current-sense
+ * amplifier with programmable gains and die temperature reading from
+ * Maxim max9611/max9612.
+ *
+ * Op-amp, analog comparator, and watchdog functionalities are not
+ * supported by this driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#define DRIVER_NAME "max9611"
+
+/* max9611 register addresses */
+#define MAX9611_REG_CSA_DATA 0x00
+#define MAX9611_REG_RS_DATA 0x02
+#define MAX9611_REG_TEMP_DATA 0x08
+#define MAX9611_REG_CTRL1 0x0a
+#define MAX9611_REG_CTRL2 0x0b
+
+/* max9611 REG1 mux configuration options */
+#define MAX9611_MUX_MASK GENMASK(3, 0)
+#define MAX9611_MUX_SENSE_1x 0x00
+#define MAX9611_MUX_SENSE_4x 0x01
+#define MAX9611_MUX_SENSE_8x 0x02
+#define MAX9611_INPUT_VOLT 0x03
+#define MAX9611_MUX_TEMP 0x06
+
+/* max9611 voltage (both csa and input) helper macros */
+#define MAX9611_VOLTAGE_SHIFT 0x04
+#define MAX9611_VOLTAGE_RAW(_r) ((_r) >> MAX9611_VOLTAGE_SHIFT)
+
+/*
+ * max9611 current sense amplifier voltage output:
+ * LSB and offset values depends on selected gain (1x, 4x, 8x)
+ *
+ * GAIN LSB (nV) OFFSET (LSB steps)
+ * 1x 107500 1
+ * 4x 26880 1
+ * 8x 13440 3
+ *
+ * The complete formula to calculate current sense voltage is:
+ * (((adc_read >> 4) - offset) / ((1 / LSB) * 10^-3)
+ */
+#define MAX9611_CSA_1X_LSB_nV 107500
+#define MAX9611_CSA_4X_LSB_nV 26880
+#define MAX9611_CSA_8X_LSB_nV 13440
+
+#define MAX9611_CSA_1X_OFFS_RAW 1
+#define MAX9611_CSA_4X_OFFS_RAW 1
+#define MAX9611_CSA_8X_OFFS_RAW 3
+
+/*
+ * max9611 common input mode (CIM): LSB is 14mV, with 14mV offset at 25 C
+ *
+ * The complete formula to calculate input common voltage is:
+ * (((adc_read >> 4) * 1000) - offset) / (1 / 14 * 1000)
+ */
+#define MAX9611_CIM_LSB_mV 14
+#define MAX9611_CIM_OFFSET_RAW 1
+
+/*
+ * max9611 temperature reading: LSB is 480 milli degrees Celsius
+ *
+ * The complete formula to calculate temperature is:
+ * ((adc_read >> 7) * 1000) / (1 / 480 * 1000)
+ */
+#define MAX9611_TEMP_MAX_POS 0x7f80
+#define MAX9611_TEMP_MAX_NEG 0xff80
+#define MAX9611_TEMP_MIN_NEG 0xd980
+#define MAX9611_TEMP_MASK GENMASK(7, 15)
+#define MAX9611_TEMP_SHIFT 0x07
+#define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
+#define MAX9611_TEMP_SCALE_NUM 1000000
+#define MAX9611_TEMP_SCALE_DIV 2083
+
+struct max9611_dev {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct mutex lock;
+ unsigned int shunt_resistor_uohm;
+};
+
+enum max9611_conf_ids {
+ CONF_SENSE_1x,
+ CONF_SENSE_4x,
+ CONF_SENSE_8x,
+ CONF_IN_VOLT,
+ CONF_TEMP,
+};
+
+/**
+ * max9611_mux_conf - associate ADC mux configuration with register address
+ * where data shall be read from
+ */
+static const unsigned int max9611_mux_conf[][2] = {
+ /* CONF_SENSE_1x */
+ { MAX9611_MUX_SENSE_1x, MAX9611_REG_CSA_DATA },
+ /* CONF_SENSE_4x */
+ { MAX9611_MUX_SENSE_4x, MAX9611_REG_CSA_DATA },
+ /* CONF_SENSE_8x */
+ { MAX9611_MUX_SENSE_8x, MAX9611_REG_CSA_DATA },
+ /* CONF_IN_VOLT */
+ { MAX9611_INPUT_VOLT, MAX9611_REG_RS_DATA },
+ /* CONF_TEMP */
+ { MAX9611_MUX_TEMP, MAX9611_REG_TEMP_DATA },
+};
+
+enum max9611_csa_gain {
+ CSA_GAIN_1x,
+ CSA_GAIN_4x,
+ CSA_GAIN_8x,
+};
+
+enum max9611_csa_gain_params {
+ CSA_GAIN_LSB_nV,
+ CSA_GAIN_OFFS_RAW,
+};
+
+/**
+ * max9611_csa_gain_conf - associate gain multiplier with LSB and
+ * offset values.
+ *
+ * Group together parameters associated with configurable gain
+ * on current sense amplifier path to ADC interface.
+ * Current sense read routine adjusts gain until it gets a meaningful
+ * value; use this structure to retrieve the correct LSB and offset values.
+ */
+static const unsigned int max9611_gain_conf[][2] = {
+ { /* [0] CSA_GAIN_1x */
+ MAX9611_CSA_1X_LSB_nV,
+ MAX9611_CSA_1X_OFFS_RAW,
+ },
+ { /* [1] CSA_GAIN_4x */
+ MAX9611_CSA_4X_LSB_nV,
+ MAX9611_CSA_4X_OFFS_RAW,
+ },
+ { /* [2] CSA_GAIN_8x */
+ MAX9611_CSA_8X_LSB_nV,
+ MAX9611_CSA_8X_OFFS_RAW,
+ },
+};
+
+enum max9611_chan_addrs {
+ MAX9611_CHAN_VOLTAGE_INPUT,
+ MAX9611_CHAN_VOLTAGE_SENSE,
+ MAX9611_CHAN_TEMPERATURE,
+ MAX9611_CHAN_CURRENT_LOAD,
+ MAX9611_CHAN_POWER_LOAD,
+};
+
+static const struct iio_chan_spec max9611_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = MAX9611_CHAN_TEMPERATURE,
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = MAX9611_CHAN_VOLTAGE_SENSE,
+ .indexed = 1,
+ .channel = 0,
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = MAX9611_CHAN_VOLTAGE_INPUT,
+ .indexed = 1,
+ .channel = 1,
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = MAX9611_CHAN_CURRENT_LOAD,
+ },
+ {
+ .type = IIO_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .address = MAX9611_CHAN_POWER_LOAD
+ },
+};
+
+/**
+ * max9611_read_single() - read a single value from ADC interface
+ *
+ * Data registers are 16 bit long, spread between two 8 bit registers
+ * with consecutive addresses.
+ * Configure ADC mux first, then read register at address "reg_addr".
+ * The smbus_read_word routine asks for 16 bits and the ADC is kind enough
+ * to return values from "reg_addr" and "reg_addr + 1" consecutively.
+ * Data are transmitted with big-endian ordering: MSB arrives first.
+ *
+ * @max9611: max9611 device
+ * @selector: index for mux and register configuration
+ * @raw_val: the value returned from ADC
+ */
+static int max9611_read_single(struct max9611_dev *max9611,
+ enum max9611_conf_ids selector,
+ u16 *raw_val)
+{
+ int ret;
+
+ u8 mux_conf = max9611_mux_conf[selector][0] & MAX9611_MUX_MASK;
+ u8 reg_addr = max9611_mux_conf[selector][1];
+
+ /*
+ * Keep mutex lock held during read-write to avoid mux register
+ * (CTRL1) re-configuration.
+ */
+ mutex_lock(&max9611->lock);
+ ret = i2c_smbus_write_byte_data(max9611->i2c_client,
+ MAX9611_REG_CTRL1, mux_conf);
+ if (ret) {
+ dev_err(max9611->dev, "i2c write byte failed: 0x%2x - 0x%2x\n",
+ MAX9611_REG_CTRL1, mux_conf);
+ mutex_unlock(&max9611->lock);
+ return ret;
+ }
+
+ /*
+ * need a delay here to make register configuration
+ * stabilize. 1 msec at least, from empirical testing.
+ */
+ usleep_range(1000, 2000);
+
+ ret = i2c_smbus_read_word_swapped(max9611->i2c_client, reg_addr);
+ if (ret < 0) {
+ dev_err(max9611->dev, "i2c read word from 0x%2x failed\n",
+ reg_addr);
+ mutex_unlock(&max9611->lock);
+ return ret;
+ }
+
+ *raw_val = ret;
+ mutex_unlock(&max9611->lock);
+
+ return 0;
+}
+
+/**
+ * max9611_read_csa_voltage() - read current sense amplifier output voltage
+ *
+ * Current sense amplifier output voltage is read through a configurable
+ * 1x, 4x or 8x gain.
+ * Start with plain 1x gain, and adjust gain control properly until a
+ * meaningful value is read from ADC output.
+ *
+ * @max9611: max9611 device
+ * @adc_raw: raw value read from ADC output
+ * @csa_gain: gain configuration option selector
+ */
+static int max9611_read_csa_voltage(struct max9611_dev *max9611,
+ u16 *adc_raw,
+ enum max9611_csa_gain *csa_gain)
+{
+ enum max9611_conf_ids gain_selectors[] = {
+ CONF_SENSE_1x,
+ CONF_SENSE_4x,
+ CONF_SENSE_8x
+ };
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(gain_selectors); ++i) {
+ ret = max9611_read_single(max9611, gain_selectors[i], adc_raw);
+ if (ret)
+ return ret;
+
+ if (*adc_raw > 0) {
+ *csa_gain = gain_selectors[i];
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+static int max9611_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max9611_dev *dev = iio_priv(indio_dev);
+ enum max9611_csa_gain gain_selector;
+ const unsigned int *csa_gain;
+ u16 adc_data;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+
+ switch (chan->address) {
+ case MAX9611_CHAN_TEMPERATURE:
+ ret = max9611_read_single(dev, CONF_TEMP,
+ &adc_data);
+ if (ret)
+ return -EINVAL;
+
+ *val = MAX9611_TEMP_RAW(adc_data);
+ return IIO_VAL_INT;
+
+ case MAX9611_CHAN_VOLTAGE_INPUT:
+ ret = max9611_read_single(dev, CONF_IN_VOLT,
+ &adc_data);
+ if (ret)
+ return -EINVAL;
+
+ *val = MAX9611_VOLTAGE_RAW(adc_data);
+ return IIO_VAL_INT;
+ }
+
+ break;
+
+ case IIO_CHAN_INFO_OFFSET:
+ /* MAX9611_CHAN_VOLTAGE_INPUT */
+ *val = MAX9611_CIM_OFFSET_RAW;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+
+ switch (chan->address) {
+ case MAX9611_CHAN_TEMPERATURE:
+ *val = MAX9611_TEMP_SCALE_NUM;
+ *val2 = MAX9611_TEMP_SCALE_DIV;
+
+ return IIO_VAL_FRACTIONAL;
+
+ case MAX9611_CHAN_VOLTAGE_INPUT:
+ *val = MAX9611_CIM_LSB_mV;
+
+ return IIO_VAL_INT;
+ }
+
+ break;
+
+ case IIO_CHAN_INFO_PROCESSED:
+
+ switch (chan->address) {
+ case MAX9611_CHAN_VOLTAGE_SENSE:
+ /*
+ * processed (mV): (raw - offset) * LSB (nV) / 10^6
+ *
+ * Even if max9611 can output raw csa voltage readings,
+ * use a produced value as scale depends on gain.
+ */
+ ret = max9611_read_csa_voltage(dev, &adc_data,
+ &gain_selector);
+ if (ret)
+ return -EINVAL;
+
+ csa_gain = max9611_gain_conf[gain_selector];
+
+ adc_data -= csa_gain[CSA_GAIN_OFFS_RAW];
+ *val = MAX9611_VOLTAGE_RAW(adc_data) *
+ csa_gain[CSA_GAIN_LSB_nV];
+ *val2 = 1000000;
+
+ return IIO_VAL_FRACTIONAL;
+
+ case MAX9611_CHAN_CURRENT_LOAD:
+ /* processed (mA): Vcsa (nV) / Rshunt (uOhm) */
+ ret = max9611_read_csa_voltage(dev, &adc_data,
+ &gain_selector);
+ if (ret)
+ return -EINVAL;
+
+ csa_gain = max9611_gain_conf[gain_selector];
+
+ adc_data -= csa_gain[CSA_GAIN_OFFS_RAW];
+ *val = MAX9611_VOLTAGE_RAW(adc_data) *
+ csa_gain[CSA_GAIN_LSB_nV];
+ *val2 = dev->shunt_resistor_uohm;
+
+ return IIO_VAL_FRACTIONAL;
+
+ case MAX9611_CHAN_POWER_LOAD:
+ /*
+ * processed (mW): Vin (mV) * Vcsa (uV) /
+ * Rshunt (uOhm)
+ */
+ ret = max9611_read_single(dev, CONF_IN_VOLT,
+ &adc_data);
+ if (ret)
+ return -EINVAL;
+
+ adc_data -= MAX9611_CIM_OFFSET_RAW;
+ *val = MAX9611_VOLTAGE_RAW(adc_data) *
+ MAX9611_CIM_LSB_mV;
+
+ ret = max9611_read_csa_voltage(dev, &adc_data,
+ &gain_selector);
+ if (ret)
+ return -EINVAL;
+
+ csa_gain = max9611_gain_conf[gain_selector];
+
+ /* divide by 10^3 here to avoid 32bit overflow */
+ adc_data -= csa_gain[CSA_GAIN_OFFS_RAW];
+ *val *= MAX9611_VOLTAGE_RAW(adc_data) *
+ csa_gain[CSA_GAIN_LSB_nV] / 1000;
+ *val2 = dev->shunt_resistor_uohm;
+
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t max9611_shunt_resistor_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
+ unsigned int i, r;
+
+ i = max9611->shunt_resistor_uohm / 1000;
+ r = max9611->shunt_resistor_uohm % 1000;
+
+ return sprintf(buf, "%u.%03u\n", i, r);
+}
+
+static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
+ max9611_shunt_resistor_show, NULL, 0);
+static IIO_DEVICE_ATTR(in_current_shunt_resistor, 0444,
+ max9611_shunt_resistor_show, NULL, 0);
+
+static struct attribute *max9611_attributes[] = {
+ &iio_dev_attr_in_power_shunt_resistor.dev_attr.attr,
+ &iio_dev_attr_in_current_shunt_resistor.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group max9611_attribute_group = {
+ .attrs = max9611_attributes,
+};
+
+static const struct iio_info indio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = max9611_read_raw,
+ .attrs = &max9611_attribute_group,
+};
+
+static int max9611_init(struct max9611_dev *max9611)
+{
+ struct i2c_client *client = max9611->i2c_client;
+ u16 regval;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ dev_err(max9611->dev,
+ "I2c adapter does not support smbus write_byte or read_word functionalities: aborting probe.\n");
+ return -EINVAL;
+ }
+
+ /* Make sure die temperature is in range to test communications. */
+ ret = max9611_read_single(max9611, CONF_TEMP, &regval);
+ if (ret)
+ return ret;
+
+ regval = ret & MAX9611_TEMP_MASK;
+
+ if ((regval > MAX9611_TEMP_MAX_POS &&
+ regval < MAX9611_TEMP_MIN_NEG) ||
+ regval > MAX9611_TEMP_MAX_NEG) {
+ dev_err(max9611->dev,
+ "Invalid value received from ADC 0x%4x: aborting\n",
+ regval);
+ return -EIO;
+ }
+
+ /* Mux shall be zeroed back before applying other configurations */
+ ret = i2c_smbus_write_byte_data(max9611->i2c_client,
+ MAX9611_REG_CTRL1, 0);
+ if (ret) {
+ dev_err(max9611->dev, "i2c write byte failed: 0x%2x - 0x%2x\n",
+ MAX9611_REG_CTRL1, 0);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte_data(max9611->i2c_client,
+ MAX9611_REG_CTRL2, 0);
+ if (ret) {
+ dev_err(max9611->dev, "i2c write byte failed: 0x%2x - 0x%2x\n",
+ MAX9611_REG_CTRL2, 0);
+ return ret;
+ }
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static const struct of_device_id max9611_of_table[] = {
+ {.compatible = "maxim,max9611", .data = "max9611"},
+ {.compatible = "maxim,max9612", .data = "max9612"},
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max9611_of_table);
+static int max9611_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const char * const shunt_res_prop = "shunt-resistor-micro-ohms";
+ const struct device_node *of_node = client->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(max9611_of_table, &client->dev);
+ struct max9611_dev *max9611;
+ struct iio_dev *indio_dev;
+ unsigned int of_shunt;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
+ if (IS_ERR(indio_dev))
+ return PTR_ERR(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ max9611 = iio_priv(indio_dev);
+ max9611->dev = &client->dev;
+ max9611->i2c_client = client;
+ mutex_init(&max9611->lock);
+
+ ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt);
+ if (ret) {
+ dev_err(&client->dev,
+ "Missing %s property for %s node\n",
+ shunt_res_prop, of_node->full_name);
+ return ret;
+ }
+ max9611->shunt_resistor_uohm = of_shunt;
+
+ ret = max9611_init(max9611);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->name = of_id->data;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &indio_info;
+ indio_dev->channels = max9611_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max9611_channels);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static struct i2c_driver max9611_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = max9611_of_table,
+ },
+ .probe = max9611_probe,
+};
+module_i2c_driver(max9611_driver);
+
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_DESCRIPTION("Maxim max9611/12 current sense amplifier with 12bit ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 89def6034f40..dd4190b50df6 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -18,7 +18,9 @@
#include <linux/io.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -163,6 +165,9 @@
#define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8)
#define MESON_SAR_ADC_MAX_FIFO_SIZE 32
+#define MESON_SAR_ADC_TIMEOUT 100 /* ms */
+/* for use with IIO_VAL_INT_PLUS_MICRO */
+#define MILLION 1000000
#define MESON_SAR_ADC_CHAN(_chan) { \
.type = IIO_VOLTAGE, \
@@ -170,7 +175,9 @@
.channel = _chan, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_AVERAGE_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
.datasheet_name = "SAR_ADC_CH"#_chan, \
}
@@ -229,6 +236,9 @@ struct meson_sar_adc_priv {
struct clk_gate clk_gate;
struct clk *adc_div_clk;
struct clk_divider clk_div;
+ struct completion done;
+ int calibbias;
+ int calibscale;
};
static const struct regmap_config meson_sar_adc_regmap_config = {
@@ -248,6 +258,17 @@ static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
return FIELD_GET(MESON_SAR_ADC_REG0_FIFO_COUNT_MASK, regval);
}
+static int meson_sar_adc_calib_val(struct iio_dev *indio_dev, int val)
+{
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ int tmp;
+
+ /* use val_calib = scale * val_raw + offset calibration function */
+ tmp = div_s64((s64)val * priv->calibscale, MILLION) + priv->calibbias;
+
+ return clamp(tmp, 0, (1 << priv->data->resolution) - 1);
+}
+
static int meson_sar_adc_wait_busy_clear(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -274,33 +295,31 @@ static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
int *val)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- int ret, regval, fifo_chan, fifo_val, sum = 0, count = 0;
+ int regval, fifo_chan, fifo_val, count;
- ret = meson_sar_adc_wait_busy_clear(indio_dev);
- if (ret)
- return ret;
-
- while (meson_sar_adc_get_fifo_count(indio_dev) > 0 &&
- count < MESON_SAR_ADC_MAX_FIFO_SIZE) {
- regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &regval);
-
- fifo_chan = FIELD_GET(MESON_SAR_ADC_FIFO_RD_CHAN_ID_MASK,
- regval);
- if (fifo_chan != chan->channel)
- continue;
-
- fifo_val = FIELD_GET(MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK,
- regval);
- fifo_val &= (BIT(priv->data->resolution) - 1);
+ if(!wait_for_completion_timeout(&priv->done,
+ msecs_to_jiffies(MESON_SAR_ADC_TIMEOUT)))
+ return -ETIMEDOUT;
- sum += fifo_val;
- count++;
+ count = meson_sar_adc_get_fifo_count(indio_dev);
+ if (count != 1) {
+ dev_err(&indio_dev->dev,
+ "ADC FIFO has %d element(s) instead of one\n", count);
+ return -EINVAL;
}
- if (!count)
- return -ENOENT;
+ regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &regval);
+ fifo_chan = FIELD_GET(MESON_SAR_ADC_FIFO_RD_CHAN_ID_MASK, regval);
+ if (fifo_chan != chan->channel) {
+ dev_err(&indio_dev->dev,
+ "ADC FIFO entry belongs to channel %d instead of %d\n",
+ fifo_chan, chan->channel);
+ return -EINVAL;
+ }
- *val = sum / count;
+ fifo_val = FIELD_GET(MESON_SAR_ADC_FIFO_RD_SAMPLE_VALUE_MASK, regval);
+ fifo_val &= GENMASK(priv->data->resolution - 1, 0);
+ *val = meson_sar_adc_calib_val(indio_dev, fifo_val);
return 0;
}
@@ -378,6 +397,12 @@ static void meson_sar_adc_start_sample_engine(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ reinit_completion(&priv->done);
+
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_FIFO_IRQ_EN,
+ MESON_SAR_ADC_REG0_FIFO_IRQ_EN);
+
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE,
MESON_SAR_ADC_REG0_SAMPLE_ENGINE_ENABLE);
@@ -392,6 +417,9 @@ static void meson_sar_adc_stop_sample_engine(struct iio_dev *indio_dev)
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_FIFO_IRQ_EN, 0);
+
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
MESON_SAR_ADC_REG0_SAMPLING_STOP,
MESON_SAR_ADC_REG0_SAMPLING_STOP);
@@ -516,6 +544,15 @@ static int meson_sar_adc_iio_info_read_raw(struct iio_dev *indio_dev,
*val2 = priv->data->resolution;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *val = priv->calibbias;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *val = priv->calibscale / MILLION;
+ *val2 = priv->calibscale % MILLION;
+ return IIO_VAL_INT_PLUS_MICRO;
+
default:
return -EINVAL;
}
@@ -643,6 +680,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
int ret;
+ u32 regval;
ret = meson_sar_adc_lock(indio_dev);
if (ret)
@@ -667,6 +705,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
goto err_sana_clk;
}
+ regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
+ MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
MESON_SAR_ADC_REG11_BANDGAP_EN,
MESON_SAR_ADC_REG11_BANDGAP_EN);
@@ -728,6 +769,66 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
return 0;
}
+static irqreturn_t meson_sar_adc_irq(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ unsigned int cnt, threshold;
+ u32 regval;
+
+ regmap_read(priv->regmap, MESON_SAR_ADC_REG0, &regval);
+ cnt = FIELD_GET(MESON_SAR_ADC_REG0_FIFO_COUNT_MASK, regval);
+ threshold = FIELD_GET(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
+
+ if (cnt < threshold)
+ return IRQ_NONE;
+
+ complete(&priv->done);
+
+ return IRQ_HANDLED;
+}
+
+static int meson_sar_adc_calib(struct iio_dev *indio_dev)
+{
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ int ret, nominal0, nominal1, value0, value1;
+
+ /* use points 25% and 75% for calibration */
+ nominal0 = (1 << priv->data->resolution) / 4;
+ nominal1 = (1 << priv->data->resolution) * 3 / 4;
+
+ meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_VDD_DIV4);
+ usleep_range(10, 20);
+ ret = meson_sar_adc_get_sample(indio_dev,
+ &meson_sar_adc_iio_channels[7],
+ MEAN_AVERAGING, EIGHT_SAMPLES, &value0);
+ if (ret < 0)
+ goto out;
+
+ meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_VDD_MUL3_DIV4);
+ usleep_range(10, 20);
+ ret = meson_sar_adc_get_sample(indio_dev,
+ &meson_sar_adc_iio_channels[7],
+ MEAN_AVERAGING, EIGHT_SAMPLES, &value1);
+ if (ret < 0)
+ goto out;
+
+ if (value1 <= value0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->calibscale = div_s64((nominal1 - nominal0) * (s64)MILLION,
+ value1 - value0);
+ priv->calibbias = nominal0 - div_s64((s64)value0 * priv->calibscale,
+ MILLION);
+ ret = 0;
+out:
+ meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_CH7_INPUT);
+
+ return ret;
+}
+
static const struct iio_info meson_sar_adc_iio_info = {
.read_raw = meson_sar_adc_iio_info_read_raw,
.driver_module = THIS_MODULE,
@@ -770,7 +871,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
const struct of_device_id *match;
- int ret;
+ int irq, ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
if (!indio_dev) {
@@ -779,6 +880,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
}
priv = iio_priv(indio_dev);
+ init_completion(&priv->done);
match = of_match_device(meson_sar_adc_of_match, &pdev->dev);
priv->data = match->data;
@@ -797,6 +899,15 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, meson_sar_adc_irq, IRQF_SHARED,
+ dev_name(&pdev->dev), indio_dev);
+ if (ret)
+ return ret;
+
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_sar_adc_regmap_config);
if (IS_ERR(priv->regmap))
@@ -857,6 +968,8 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
return PTR_ERR(priv->vref);
}
+ priv->calibscale = MILLION;
+
ret = meson_sar_adc_init(indio_dev);
if (ret)
goto err;
@@ -865,6 +978,10 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (ret)
goto err;
+ ret = meson_sar_adc_calib(indio_dev);
+ if (ret)
+ dev_warn(&pdev->dev, "calibration failed\n");
+
platform_set_drvdata(pdev, indio_dev);
ret = iio_device_register(indio_dev);
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
new file mode 100644
index 000000000000..cea8f1fb444a
--- /dev/null
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -0,0 +1,1036 @@
+/*
+ * Qualcomm PM8xxx PMIC XOADC driver
+ *
+ * These ADCs are known as HK/XO (house keeping / chrystal oscillator)
+ * "XO" in "XOADC" means Chrystal Oscillator. It's a bunch of
+ * specific-purpose and general purpose ADC converters and channels.
+ *
+ * Copyright (C) 2017 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+
+#include "qcom-vadc-common.h"
+
+/*
+ * Definitions for the "user processor" registers lifted from the v3.4
+ * Qualcomm tree. Their kernel has two out-of-tree drivers for the ADC:
+ * drivers/misc/pmic8058-xoadc.c
+ * drivers/hwmon/pm8xxx-adc.c
+ * None of them contain any complete register specification, so this is
+ * a best effort of combining the information.
+ */
+
+/* These appear to be "battery monitor" registers */
+#define ADC_ARB_BTM_CNTRL1 0x17e
+#define ADC_ARB_BTM_CNTRL1_EN_BTM BIT(0)
+#define ADC_ARB_BTM_CNTRL1_SEL_OP_MODE BIT(1)
+#define ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL1 BIT(2)
+#define ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL2 BIT(3)
+#define ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL3 BIT(4)
+#define ADC_ARB_BTM_CNTRL1_MEAS_INTERVAL4 BIT(5)
+#define ADC_ARB_BTM_CNTRL1_EOC BIT(6)
+#define ADC_ARB_BTM_CNTRL1_REQ BIT(7)
+
+#define ADC_ARB_BTM_AMUX_CNTRL 0x17f
+#define ADC_ARB_BTM_ANA_PARAM 0x180
+#define ADC_ARB_BTM_DIG_PARAM 0x181
+#define ADC_ARB_BTM_RSV 0x182
+#define ADC_ARB_BTM_DATA1 0x183
+#define ADC_ARB_BTM_DATA0 0x184
+#define ADC_ARB_BTM_BAT_COOL_THR1 0x185
+#define ADC_ARB_BTM_BAT_COOL_THR0 0x186
+#define ADC_ARB_BTM_BAT_WARM_THR1 0x187
+#define ADC_ARB_BTM_BAT_WARM_THR0 0x188
+#define ADC_ARB_BTM_CNTRL2 0x18c
+
+/* Proper ADC registers */
+
+#define ADC_ARB_USRP_CNTRL 0x197
+#define ADC_ARB_USRP_CNTRL_EN_ARB BIT(0)
+#define ADC_ARB_USRP_CNTRL_RSV1 BIT(1)
+#define ADC_ARB_USRP_CNTRL_RSV2 BIT(2)
+#define ADC_ARB_USRP_CNTRL_RSV3 BIT(3)
+#define ADC_ARB_USRP_CNTRL_RSV4 BIT(4)
+#define ADC_ARB_USRP_CNTRL_RSV5 BIT(5)
+#define ADC_ARB_USRP_CNTRL_EOC BIT(6)
+#define ADC_ARB_USRP_CNTRL_REQ BIT(7)
+
+#define ADC_ARB_USRP_AMUX_CNTRL 0x198
+/*
+ * The channel mask includes the bits selecting channel mux and prescaler
+ * on PM8058, or channel mux and premux on PM8921.
+ */
+#define ADC_ARB_USRP_AMUX_CNTRL_CHAN_MASK 0xfc
+#define ADC_ARB_USRP_AMUX_CNTRL_RSV0 BIT(0)
+#define ADC_ARB_USRP_AMUX_CNTRL_RSV1 BIT(1)
+/* On PM8058 this is prescaling, on PM8921 this is premux */
+#define ADC_ARB_USRP_AMUX_CNTRL_PRESCALEMUX0 BIT(2)
+#define ADC_ARB_USRP_AMUX_CNTRL_PRESCALEMUX1 BIT(3)
+#define ADC_ARB_USRP_AMUX_CNTRL_SEL0 BIT(4)
+#define ADC_ARB_USRP_AMUX_CNTRL_SEL1 BIT(5)
+#define ADC_ARB_USRP_AMUX_CNTRL_SEL2 BIT(6)
+#define ADC_ARB_USRP_AMUX_CNTRL_SEL3 BIT(7)
+#define ADC_AMUX_PREMUX_SHIFT 2
+#define ADC_AMUX_SEL_SHIFT 4
+
+/* We know very little about the bits in this register */
+#define ADC_ARB_USRP_ANA_PARAM 0x199
+#define ADC_ARB_USRP_ANA_PARAM_DIS 0xFE
+#define ADC_ARB_USRP_ANA_PARAM_EN 0xFF
+
+#define ADC_ARB_USRP_DIG_PARAM 0x19A
+#define ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT0 BIT(0)
+#define ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT1 BIT(1)
+#define ADC_ARB_USRP_DIG_PARAM_CLK_RATE0 BIT(2)
+#define ADC_ARB_USRP_DIG_PARAM_CLK_RATE1 BIT(3)
+#define ADC_ARB_USRP_DIG_PARAM_EOC BIT(4)
+/*
+ * On a later ADC the decimation factors are defined as
+ * 00 = 512, 01 = 1024, 10 = 2048, 11 = 4096 so assume this
+ * holds also for this older XOADC.
+ */
+#define ADC_ARB_USRP_DIG_PARAM_DEC_RATE0 BIT(5)
+#define ADC_ARB_USRP_DIG_PARAM_DEC_RATE1 BIT(6)
+#define ADC_ARB_USRP_DIG_PARAM_EN BIT(7)
+#define ADC_DIG_PARAM_DEC_SHIFT 5
+
+#define ADC_ARB_USRP_RSV 0x19B
+#define ADC_ARB_USRP_RSV_RST BIT(0)
+#define ADC_ARB_USRP_RSV_DTEST0 BIT(1)
+#define ADC_ARB_USRP_RSV_DTEST1 BIT(2)
+#define ADC_ARB_USRP_RSV_OP BIT(3)
+#define ADC_ARB_USRP_RSV_IP_SEL0 BIT(4)
+#define ADC_ARB_USRP_RSV_IP_SEL1 BIT(5)
+#define ADC_ARB_USRP_RSV_IP_SEL2 BIT(6)
+#define ADC_ARB_USRP_RSV_TRM BIT(7)
+#define ADC_RSV_IP_SEL_SHIFT 4
+
+#define ADC_ARB_USRP_DATA0 0x19D
+#define ADC_ARB_USRP_DATA1 0x19C
+
+/**
+ * Physical channels which MUST exist on all PM variants in order to provide
+ * proper reference points for calibration.
+ *
+ * @PM8XXX_CHANNEL_INTERNAL: 625mV reference channel
+ * @PM8XXX_CHANNEL_125V: 1250mV reference channel
+ * @PM8XXX_CHANNEL_INTERNAL_2: 325mV reference channel
+ * @PM8XXX_CHANNEL_MUXOFF: channel to reduce input load on mux, apparently also
+ * measures XO temperature
+ */
+#define PM8XXX_CHANNEL_INTERNAL 0x0c
+#define PM8XXX_CHANNEL_125V 0x0d
+#define PM8XXX_CHANNEL_INTERNAL_2 0x0e
+#define PM8XXX_CHANNEL_MUXOFF 0x0f
+
+/*
+ * PM8058 AMUX premux scaling, two bits. This is done of the channel before
+ * reaching the AMUX.
+ */
+#define PM8058_AMUX_PRESCALE_0 0x0 /* No scaling on the signal */
+#define PM8058_AMUX_PRESCALE_1 0x1 /* Unity scaling selected by the user */
+#define PM8058_AMUX_PRESCALE_1_DIV3 0x2 /* 1/3 prescaler on the input */
+
+/* Defines reference voltage for the XOADC */
+#define AMUX_RSV0 0x0 /* XO_IN/XOADC_GND, special selection to read XO temp */
+#define AMUX_RSV1 0x1 /* PMIC_IN/XOADC_GND */
+#define AMUX_RSV2 0x2 /* PMIC_IN/BMS_CSP */
+#define AMUX_RSV3 0x3 /* not used */
+#define AMUX_RSV4 0x4 /* XOADC_GND/XOADC_GND */
+#define AMUX_RSV5 0x5 /* XOADC_VREF/XOADC_GND */
+#define XOADC_RSV_MAX 5 /* 3 bits 0..7, 3 and 6,7 are invalid */
+
+/**
+ * struct xoadc_channel - encodes channel properties and defaults
+ * @datasheet_name: the hardwarename of this channel
+ * @pre_scale_mux: prescale (PM8058) or premux (PM8921) for selecting
+ * this channel. Both this and the amux channel is needed to uniquely
+ * identify a channel. Values 0..3.
+ * @amux_channel: value of the ADC_ARB_USRP_AMUX_CNTRL register for this
+ * channel, bits 4..7, selects the amux, values 0..f
+ * @prescale: the channels have hard-coded prescale ratios defined
+ * by the hardware, this tells us what it is
+ * @type: corresponding IIO channel type, usually IIO_VOLTAGE or
+ * IIO_TEMP
+ * @scale_fn_type: the liner interpolation etc to convert the
+ * ADC code to the value that IIO expects, in uV or millicelsius
+ * etc. This scale function can be pretty elaborate if different
+ * thermistors are connected or other hardware characteristics are
+ * deployed.
+ * @amux_ip_rsv: ratiometric scale value used by the analog muxer: this
+ * selects the reference voltage for ratiometric scaling
+ */
+struct xoadc_channel {
+ const char *datasheet_name;
+ u8 pre_scale_mux:2;
+ u8 amux_channel:4;
+ const struct vadc_prescale_ratio prescale;
+ enum iio_chan_type type;
+ enum vadc_scale_fn_type scale_fn_type;
+ u8 amux_ip_rsv:3;
+};
+
+/**
+ * struct xoadc_variant - encodes the XOADC variant characteristics
+ * @name: name of this PMIC variant
+ * @channels: the hardware channels and respective settings and defaults
+ * @broken_ratiometric: if the PMIC has broken ratiometric scaling (this
+ * is a known problem on PM8058)
+ * @prescaling: this variant uses AMUX bits 2 & 3 for prescaling (PM8058)
+ * @second_level_mux: this variant uses AMUX bits 2 & 3 for a second level
+ * mux
+ */
+struct xoadc_variant {
+ const char name[16];
+ const struct xoadc_channel *channels;
+ bool broken_ratiometric;
+ bool prescaling;
+ bool second_level_mux;
+};
+
+/*
+ * XOADC_CHAN macro parameters:
+ * _dname: the name of the channel
+ * _presmux: prescaler (PM8058) or premux (PM8921) setting for this channel
+ * _amux: the value in bits 2..7 of the ADC_ARB_USRP_AMUX_CNTRL register
+ * for this channel. On some PMICs some of the bits select a prescaler, and
+ * on some PMICs some of the bits select various complex multiplex settings.
+ * _type: IIO channel type
+ * _prenum: prescaler numerator (dividend)
+ * _preden: prescaler denominator (divisor)
+ * _scale: scaling function type, this selects how the raw valued is mangled
+ * to output the actual processed measurement
+ * _amip: analog mux input parent when using ratiometric measurements
+ */
+#define XOADC_CHAN(_dname, _presmux, _amux, _type, _prenum, _preden, _scale, _amip) \
+ { \
+ .datasheet_name = __stringify(_dname), \
+ .pre_scale_mux = _presmux, \
+ .amux_channel = _amux, \
+ .prescale = { .num = _prenum, .den = _preden }, \
+ .type = _type, \
+ .scale_fn_type = _scale, \
+ .amux_ip_rsv = _amip, \
+ }
+
+/*
+ * Taken from arch/arm/mach-msm/board-9615.c in the vendor tree:
+ * TODO: incomplete, needs testing.
+ */
+static const struct xoadc_channel pm8018_xoadc_channels[] = {
+ XOADC_CHAN(VCOIN, 0x00, 0x00, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VBAT, 0x00, 0x01, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VPH_PWR, 0x00, 0x02, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DIE_TEMP, 0x00, 0x0b, IIO_TEMP, 1, 1, SCALE_PMIC_THERM, AMUX_RSV1),
+ /* Used for battery ID or battery temperature */
+ XOADC_CHAN(AMUX8, 0x00, 0x08, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV2),
+ XOADC_CHAN(INTERNAL, 0x00, 0x0c, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(125V, 0x00, 0x0d, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(MUXOFF, 0x00, 0x0f, IIO_TEMP, 1, 1, SCALE_XOTHERM, AMUX_RSV0),
+ { }, /* Sentinel */
+};
+
+/*
+ * Taken from arch/arm/mach-msm/board-8930-pmic.c in the vendor tree:
+ * TODO: needs testing.
+ */
+static const struct xoadc_channel pm8038_xoadc_channels[] = {
+ XOADC_CHAN(VCOIN, 0x00, 0x00, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VBAT, 0x00, 0x01, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DCIN, 0x00, 0x02, IIO_VOLTAGE, 1, 6, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ICHG, 0x00, 0x03, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VPH_PWR, 0x00, 0x04, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX5, 0x00, 0x05, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX6, 0x00, 0x06, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX7, 0x00, 0x07, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* AMUX8 used for battery temperature in most cases */
+ XOADC_CHAN(AMUX8, 0x00, 0x08, IIO_TEMP, 1, 1, SCALE_THERM_100K_PULLUP, AMUX_RSV2),
+ XOADC_CHAN(AMUX9, 0x00, 0x09, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(USB_VBUS, 0x00, 0x0a, IIO_VOLTAGE, 1, 4, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DIE_TEMP, 0x00, 0x0b, IIO_TEMP, 1, 1, SCALE_PMIC_THERM, AMUX_RSV1),
+ XOADC_CHAN(INTERNAL, 0x00, 0x0c, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(125V, 0x00, 0x0d, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(INTERNAL_2, 0x00, 0x0e, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(MUXOFF, 0x00, 0x0f, IIO_TEMP, 1, 1, SCALE_XOTHERM, AMUX_RSV0),
+ { }, /* Sentinel */
+};
+
+/*
+ * This was created by cross-referencing the vendor tree
+ * arch/arm/mach-msm/board-msm8x60.c msm_adc_channels_data[]
+ * with the "channel types" (first field) to find the right
+ * configuration for these channels on an MSM8x60 i.e. PM8058
+ * setup.
+ */
+static const struct xoadc_channel pm8058_xoadc_channels[] = {
+ XOADC_CHAN(VCOIN, 0x00, 0x00, IIO_VOLTAGE, 1, 2, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VBAT, 0x00, 0x01, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DCIN, 0x00, 0x02, IIO_VOLTAGE, 1, 10, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ICHG, 0x00, 0x03, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VPH_PWR, 0x00, 0x04, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ /*
+ * AMUX channels 5 thru 9 are referred to as MPP5 thru MPP9 in
+ * some code and documentation. But they are really just 5
+ * channels just like any other. They are connected to a switching
+ * matrix where they can be routed to any of the MPPs, not just
+ * 1-to-1 onto MPP5 thru 9, so naming them MPP5 thru MPP9 is
+ * very confusing.
+ */
+ XOADC_CHAN(AMUX5, 0x00, 0x05, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX6, 0x00, 0x06, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX7, 0x00, 0x07, IIO_VOLTAGE, 1, 2, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX8, 0x00, 0x08, IIO_VOLTAGE, 1, 2, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX9, 0x00, 0x09, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(USB_VBUS, 0x00, 0x0a, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DIE_TEMP, 0x00, 0x0b, IIO_TEMP, 1, 1, SCALE_PMIC_THERM, AMUX_RSV1),
+ XOADC_CHAN(INTERNAL, 0x00, 0x0c, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(125V, 0x00, 0x0d, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(INTERNAL_2, 0x00, 0x0e, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(MUXOFF, 0x00, 0x0f, IIO_TEMP, 1, 1, SCALE_XOTHERM, AMUX_RSV0),
+ /* There are also "unity" and divided by 3 channels (prescaler) but noone is using them */
+ { }, /* Sentinel */
+};
+
+/*
+ * The PM8921 has some pre-muxing on its channels, this comes from the vendor tree
+ * include/linux/mfd/pm8xxx/pm8xxx-adc.h
+ * board-flo-pmic.c (Nexus 7) and board-8064-pmic.c
+ */
+static const struct xoadc_channel pm8921_xoadc_channels[] = {
+ XOADC_CHAN(VCOIN, 0x00, 0x00, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(VBAT, 0x00, 0x01, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DCIN, 0x00, 0x02, IIO_VOLTAGE, 1, 6, SCALE_DEFAULT, AMUX_RSV1),
+ /* channel "ICHG" is reserved and not used on PM8921 */
+ XOADC_CHAN(VPH_PWR, 0x00, 0x04, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(IBAT, 0x00, 0x05, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* CHAN 6 & 7 (MPP1 & MPP2) are reserved for MPP channels on PM8921 */
+ XOADC_CHAN(BATT_THERM, 0x00, 0x08, IIO_TEMP, 1, 1, SCALE_THERM_100K_PULLUP, AMUX_RSV1),
+ XOADC_CHAN(BATT_ID, 0x00, 0x09, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(USB_VBUS, 0x00, 0x0a, IIO_VOLTAGE, 1, 4, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DIE_TEMP, 0x00, 0x0b, IIO_TEMP, 1, 1, SCALE_PMIC_THERM, AMUX_RSV1),
+ XOADC_CHAN(INTERNAL, 0x00, 0x0c, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(125V, 0x00, 0x0d, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* FIXME: look into the scaling of this temperature */
+ XOADC_CHAN(CHG_TEMP, 0x00, 0x0e, IIO_TEMP, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(MUXOFF, 0x00, 0x0f, IIO_TEMP, 1, 1, SCALE_XOTHERM, AMUX_RSV0),
+ /* The following channels have premux bit 0 set to 1 (all end in 4) */
+ XOADC_CHAN(ATEST_8, 0x01, 0x00, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* Set scaling to 1/2 based on the name for these two */
+ XOADC_CHAN(USB_SNS_DIV20, 0x01, 0x01, IIO_VOLTAGE, 1, 2, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DCIN_SNS_DIV20, 0x01, 0x02, IIO_VOLTAGE, 1, 2, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX3, 0x01, 0x03, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX4, 0x01, 0x04, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX5, 0x01, 0x05, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX6, 0x01, 0x06, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX7, 0x01, 0x07, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX8, 0x01, 0x08, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* Internal test signals, I think */
+ XOADC_CHAN(ATEST_1, 0x01, 0x09, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_2, 0x01, 0x0a, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_3, 0x01, 0x0b, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_4, 0x01, 0x0c, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_5, 0x01, 0x0d, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_6, 0x01, 0x0e, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_7, 0x01, 0x0f, IIO_VOLTAGE, 1, 1, SCALE_DEFAULT, AMUX_RSV1),
+ /* The following channels have premux bit 1 set to 1 (all end in 8) */
+ /* I guess even ATEST8 will be divided by 3 here */
+ XOADC_CHAN(ATEST_8, 0x02, 0x00, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ /* I guess div 2 div 3 becomes div 6 */
+ XOADC_CHAN(USB_SNS_DIV20_DIV3, 0x02, 0x01, IIO_VOLTAGE, 1, 6, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(DCIN_SNS_DIV20_DIV3, 0x02, 0x02, IIO_VOLTAGE, 1, 6, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX3_DIV3, 0x02, 0x03, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX4_DIV3, 0x02, 0x04, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX5_DIV3, 0x02, 0x05, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX6_DIV3, 0x02, 0x06, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX7_DIV3, 0x02, 0x07, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(AMUX8_DIV3, 0x02, 0x08, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_1_DIV3, 0x02, 0x09, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_2_DIV3, 0x02, 0x0a, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_3_DIV3, 0x02, 0x0b, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_4_DIV3, 0x02, 0x0c, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_5_DIV3, 0x02, 0x0d, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_6_DIV3, 0x02, 0x0e, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ XOADC_CHAN(ATEST_7_DIV3, 0x02, 0x0f, IIO_VOLTAGE, 1, 3, SCALE_DEFAULT, AMUX_RSV1),
+ { }, /* Sentinel */
+};
+
+/**
+ * struct pm8xxx_chan_info - ADC channel information
+ * @name: name of this channel
+ * @hwchan: pointer to hardware channel information (muxing & scaling settings)
+ * @calibration: whether to use absolute or ratiometric calibration
+ * @scale_fn_type: scaling function type
+ * @decimation: 0,1,2,3
+ * @amux_ip_rsv: ratiometric scale value if using ratiometric
+ * calibration: 0, 1, 2, 4, 5.
+ */
+struct pm8xxx_chan_info {
+ const char *name;
+ const struct xoadc_channel *hwchan;
+ enum vadc_calibration calibration;
+ u8 decimation:2;
+ u8 amux_ip_rsv:3;
+};
+
+/**
+ * struct pm8xxx_xoadc - state container for the XOADC
+ * @dev: pointer to device
+ * @map: regmap to access registers
+ * @vref: reference voltage regulator
+ * characteristics of the channels, and sensible default settings
+ * @nchans: number of channels, configured by the device tree
+ * @chans: the channel information per-channel, configured by the device tree
+ * @iio_chans: IIO channel specifiers
+ * @graph: linear calibration parameters for absolute and
+ * ratiometric measurements
+ * @complete: completion to indicate end of conversion
+ * @lock: lock to restrict access to the hardware to one client at the time
+ */
+struct pm8xxx_xoadc {
+ struct device *dev;
+ struct regmap *map;
+ const struct xoadc_variant *variant;
+ struct regulator *vref;
+ unsigned int nchans;
+ struct pm8xxx_chan_info *chans;
+ struct iio_chan_spec *iio_chans;
+ struct vadc_linear_graph graph[2];
+ struct completion complete;
+ struct mutex lock;
+};
+
+static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
+{
+ struct iio_dev *indio_dev = d;
+ struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
+
+ complete(&adc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct pm8xxx_chan_info *
+pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
+{
+ struct pm8xxx_chan_info *ch;
+ int i;
+
+ for (i = 0; i < adc->nchans; i++) {
+ ch = &adc->chans[i];
+ if (ch->hwchan->amux_channel == chan)
+ break;
+ }
+ if (i == adc->nchans)
+ return NULL;
+
+ return ch;
+}
+
+static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
+ const struct pm8xxx_chan_info *ch,
+ u8 rsv, u16 *adc_code,
+ bool force_ratiometric)
+{
+ int ret;
+ unsigned int val;
+ u8 rsvmask, rsvval;
+ u8 lsb, msb;
+
+ dev_dbg(adc->dev, "read channel \"%s\", amux %d, prescale/mux: %d, rsv %d\n",
+ ch->name, ch->hwchan->amux_channel, ch->hwchan->pre_scale_mux, rsv);
+
+ mutex_lock(&adc->lock);
+
+ /* Mux in this channel */
+ val = ch->hwchan->amux_channel << ADC_AMUX_SEL_SHIFT;
+ val |= ch->hwchan->pre_scale_mux << ADC_AMUX_PREMUX_SHIFT;
+ ret = regmap_write(adc->map, ADC_ARB_USRP_AMUX_CNTRL, val);
+ if (ret)
+ goto unlock;
+
+ /* Set up ratiometric scale value, mask off all bits except these */
+ rsvmask = (ADC_ARB_USRP_RSV_RST | ADC_ARB_USRP_RSV_DTEST0 |
+ ADC_ARB_USRP_RSV_DTEST1 | ADC_ARB_USRP_RSV_OP);
+ if (adc->variant->broken_ratiometric && !force_ratiometric) {
+ /*
+ * Apparently the PM8058 has some kind of bug which is
+ * reflected in the vendor tree drivers/misc/pmix8058-xoadc.c
+ * which just hardcodes the RSV selector to SEL1 (0x20) for
+ * most cases and SEL0 (0x10) for the MUXOFF channel only.
+ * If we force ratiometric (currently only done when attempting
+ * to do ratiometric calibration) this doesn't seem to work
+ * very well and I suspect ratiometric conversion is simply
+ * broken or not supported on the PM8058.
+ *
+ * Maybe IO_SEL2 doesn't exist on PM8058 and bits 4 & 5 select
+ * the mode alone.
+ *
+ * Some PM8058 register documentation would be nice to get
+ * this right.
+ */
+ if (ch->hwchan->amux_channel == PM8XXX_CHANNEL_MUXOFF)
+ rsvval = ADC_ARB_USRP_RSV_IP_SEL0;
+ else
+ rsvval = ADC_ARB_USRP_RSV_IP_SEL1;
+ } else {
+ if (rsv == 0xff)
+ rsvval = (ch->amux_ip_rsv << ADC_RSV_IP_SEL_SHIFT) |
+ ADC_ARB_USRP_RSV_TRM;
+ else
+ rsvval = (rsv << ADC_RSV_IP_SEL_SHIFT) |
+ ADC_ARB_USRP_RSV_TRM;
+ }
+
+ ret = regmap_update_bits(adc->map,
+ ADC_ARB_USRP_RSV,
+ ~rsvmask,
+ rsvval);
+ if (ret)
+ goto unlock;
+
+ ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM,
+ ADC_ARB_USRP_ANA_PARAM_DIS);
+ if (ret)
+ goto unlock;
+
+ /* Decimation factor */
+ ret = regmap_write(adc->map, ADC_ARB_USRP_DIG_PARAM,
+ ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT0 |
+ ADC_ARB_USRP_DIG_PARAM_SEL_SHIFT1 |
+ ch->decimation << ADC_DIG_PARAM_DEC_SHIFT);
+ if (ret)
+ goto unlock;
+
+ ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM,
+ ADC_ARB_USRP_ANA_PARAM_EN);
+ if (ret)
+ goto unlock;
+
+ /* Enable the arbiter, the Qualcomm code does it twice like this */
+ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
+ ADC_ARB_USRP_CNTRL_EN_ARB);
+ if (ret)
+ goto unlock;
+ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
+ ADC_ARB_USRP_CNTRL_EN_ARB);
+ if (ret)
+ goto unlock;
+
+
+ /* Fire a request! */
+ reinit_completion(&adc->complete);
+ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL,
+ ADC_ARB_USRP_CNTRL_EN_ARB |
+ ADC_ARB_USRP_CNTRL_REQ);
+ if (ret)
+ goto unlock;
+
+ /* Next the interrupt occurs */
+ ret = wait_for_completion_timeout(&adc->complete,
+ VADC_CONV_TIME_MAX_US);
+ if (!ret) {
+ dev_err(adc->dev, "conversion timed out\n");
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ ret = regmap_read(adc->map, ADC_ARB_USRP_DATA0, &val);
+ if (ret)
+ goto unlock;
+ lsb = val;
+ ret = regmap_read(adc->map, ADC_ARB_USRP_DATA1, &val);
+ if (ret)
+ goto unlock;
+ msb = val;
+ *adc_code = (msb << 8) | lsb;
+
+ /* Turn off the ADC by setting the arbiter to 0 twice */
+ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0);
+ if (ret)
+ goto unlock;
+ ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0);
+ if (ret)
+ goto unlock;
+
+unlock:
+ mutex_unlock(&adc->lock);
+ return ret;
+}
+
+static int pm8xxx_read_channel(struct pm8xxx_xoadc *adc,
+ const struct pm8xxx_chan_info *ch,
+ u16 *adc_code)
+{
+ /*
+ * Normally we just use the ratiometric scale value (RSV) predefined
+ * for the channel, but during calibration we need to modify this
+ * so this wrapper is a helper hiding the more complex version.
+ */
+ return pm8xxx_read_channel_rsv(adc, ch, 0xff, adc_code, false);
+}
+
+static int pm8xxx_calibrate_device(struct pm8xxx_xoadc *adc)
+{
+ const struct pm8xxx_chan_info *ch;
+ u16 read_1250v;
+ u16 read_0625v;
+ u16 read_nomux_rsv5;
+ u16 read_nomux_rsv4;
+ int ret;
+
+ adc->graph[VADC_CALIB_ABSOLUTE].dx = VADC_ABSOLUTE_RANGE_UV;
+ adc->graph[VADC_CALIB_RATIOMETRIC].dx = VADC_RATIOMETRIC_RANGE;
+
+ /* Common reference channel calibration */
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_125V);
+ if (!ch)
+ return -ENODEV;
+ ret = pm8xxx_read_channel(adc, ch, &read_1250v);
+ if (ret) {
+ dev_err(adc->dev, "could not read 1.25V reference channel\n");
+ return -ENODEV;
+ }
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_INTERNAL);
+ if (!ch)
+ return -ENODEV;
+ ret = pm8xxx_read_channel(adc, ch, &read_0625v);
+ if (ret) {
+ dev_err(adc->dev, "could not read 0.625V reference channel\n");
+ return -ENODEV;
+ }
+ if (read_1250v == read_0625v) {
+ dev_err(adc->dev, "read same ADC code for 1.25V and 0.625V\n");
+ return -ENODEV;
+ }
+
+ adc->graph[VADC_CALIB_ABSOLUTE].dy = read_1250v - read_0625v;
+ adc->graph[VADC_CALIB_ABSOLUTE].gnd = read_0625v;
+
+ dev_info(adc->dev, "absolute calibration dx = %d uV, dy = %d units\n",
+ VADC_ABSOLUTE_RANGE_UV, adc->graph[VADC_CALIB_ABSOLUTE].dy);
+
+ /* Ratiometric calibration */
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_MUXOFF);
+ if (!ch)
+ return -ENODEV;
+ ret = pm8xxx_read_channel_rsv(adc, ch, AMUX_RSV5,
+ &read_nomux_rsv5, true);
+ if (ret) {
+ dev_err(adc->dev, "could not read MUXOFF reference channel\n");
+ return -ENODEV;
+ }
+ ret = pm8xxx_read_channel_rsv(adc, ch, AMUX_RSV4,
+ &read_nomux_rsv4, true);
+ if (ret) {
+ dev_err(adc->dev, "could not read MUXOFF reference channel\n");
+ return -ENODEV;
+ }
+ adc->graph[VADC_CALIB_RATIOMETRIC].dy =
+ read_nomux_rsv5 - read_nomux_rsv4;
+ adc->graph[VADC_CALIB_RATIOMETRIC].gnd = read_nomux_rsv4;
+
+ dev_info(adc->dev, "ratiometric calibration dx = %d, dy = %d units\n",
+ VADC_RATIOMETRIC_RANGE,
+ adc->graph[VADC_CALIB_RATIOMETRIC].dy);
+
+ return 0;
+}
+
+static int pm8xxx_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
+ const struct pm8xxx_chan_info *ch;
+ u16 adc_code;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ch = pm8xxx_get_channel(adc, chan->address);
+ if (!ch) {
+ dev_err(adc->dev, "no such channel %lu\n",
+ chan->address);
+ return -EINVAL;
+ }
+ ret = pm8xxx_read_channel(adc, ch, &adc_code);
+ if (ret)
+ return ret;
+
+ ret = qcom_vadc_scale(ch->hwchan->scale_fn_type,
+ &adc->graph[ch->calibration],
+ &ch->hwchan->prescale,
+ (ch->calibration == VADC_CALIB_ABSOLUTE),
+ adc_code, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ ch = pm8xxx_get_channel(adc, chan->address);
+ if (!ch) {
+ dev_err(adc->dev, "no such channel %lu\n",
+ chan->address);
+ return -EINVAL;
+ }
+ ret = pm8xxx_read_channel(adc, ch, &adc_code);
+ if (ret)
+ return ret;
+
+ *val = (int)adc_code;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
+ u8 pre_scale_mux;
+ u8 amux_channel;
+ unsigned int i;
+
+ /*
+ * First cell is prescaler or premux, second cell is analog
+ * mux.
+ */
+ if (iiospec->args_count != 2) {
+ dev_err(&indio_dev->dev, "wrong number of arguments for %s need 2 got %d\n",
+ iiospec->np->name,
+ iiospec->args_count);
+ return -EINVAL;
+ }
+ pre_scale_mux = (u8)iiospec->args[0];
+ amux_channel = (u8)iiospec->args[1];
+ dev_dbg(&indio_dev->dev, "pre scale/mux: %02x, amux: %02x\n",
+ pre_scale_mux, amux_channel);
+
+ /* We need to match exactly on the prescale/premux and channel */
+ for (i = 0; i < adc->nchans; i++)
+ if (adc->chans[i].hwchan->pre_scale_mux == pre_scale_mux &&
+ adc->chans[i].hwchan->amux_channel == amux_channel)
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct iio_info pm8xxx_xoadc_info = {
+ .driver_module = THIS_MODULE,
+ .of_xlate = pm8xxx_of_xlate,
+ .read_raw = pm8xxx_read_raw,
+};
+
+static int pm8xxx_xoadc_parse_channel(struct device *dev,
+ struct device_node *np,
+ const struct xoadc_channel *hw_channels,
+ struct iio_chan_spec *iio_chan,
+ struct pm8xxx_chan_info *ch)
+{
+ const char *name = np->name;
+ const struct xoadc_channel *hwchan;
+ u32 pre_scale_mux, amux_channel;
+ u32 rsv, dec;
+ int ret;
+ int chid;
+
+ ret = of_property_read_u32_index(np, "reg", 0, &pre_scale_mux);
+ if (ret) {
+ dev_err(dev, "invalid pre scale/mux number %s\n", name);
+ return ret;
+ }
+ ret = of_property_read_u32_index(np, "reg", 1, &amux_channel);
+ if (ret) {
+ dev_err(dev, "invalid amux channel number %s\n", name);
+ return ret;
+ }
+
+ /* Find the right channel setting */
+ chid = 0;
+ hwchan = &hw_channels[0];
+ while (hwchan && hwchan->datasheet_name) {
+ if (hwchan->pre_scale_mux == pre_scale_mux &&
+ hwchan->amux_channel == amux_channel)
+ break;
+ hwchan++;
+ chid++;
+ }
+ /* The sentinel does not have a name assigned */
+ if (!hwchan->datasheet_name) {
+ dev_err(dev, "could not locate channel %02x/%02x\n",
+ pre_scale_mux, amux_channel);
+ return -EINVAL;
+ }
+ ch->name = name;
+ ch->hwchan = hwchan;
+ /* Everyone seems to use absolute calibration except in special cases */
+ ch->calibration = VADC_CALIB_ABSOLUTE;
+ /* Everyone seems to use default ("type 2") decimation */
+ ch->decimation = VADC_DEF_DECIMATION;
+
+ if (!of_property_read_u32(np, "qcom,ratiometric", &rsv)) {
+ ch->calibration = VADC_CALIB_RATIOMETRIC;
+ if (rsv > XOADC_RSV_MAX) {
+ dev_err(dev, "%s too large RSV value %d\n", name, rsv);
+ return -EINVAL;
+ }
+ if (rsv == AMUX_RSV3) {
+ dev_err(dev, "%s invalid RSV value %d\n", name, rsv);
+ return -EINVAL;
+ }
+ }
+
+ /* Optional decimation, if omitted we use the default */
+ ret = of_property_read_u32(np, "qcom,decimation", &dec);
+ if (!ret) {
+ ret = qcom_vadc_decimation_from_dt(dec);
+ if (ret < 0) {
+ dev_err(dev, "%s invalid decimation %d\n",
+ name, dec);
+ return ret;
+ }
+ ch->decimation = ret;
+ }
+
+ iio_chan->channel = chid;
+ iio_chan->address = hwchan->amux_channel;
+ iio_chan->datasheet_name = hwchan->datasheet_name;
+ iio_chan->type = hwchan->type;
+ /* All channels are raw or processed */
+ iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED);
+ iio_chan->indexed = 1;
+
+ dev_dbg(dev, "channel [PRESCALE/MUX: %02x AMUX: %02x] \"%s\" "
+ "ref voltage: %d, decimation %d "
+ "prescale %d/%d, scale function %d\n",
+ hwchan->pre_scale_mux, hwchan->amux_channel, ch->name,
+ ch->amux_ip_rsv, ch->decimation, hwchan->prescale.num,
+ hwchan->prescale.den, hwchan->scale_fn_type);
+
+ return 0;
+}
+
+static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc,
+ struct device_node *np)
+{
+ struct device_node *child;
+ struct pm8xxx_chan_info *ch;
+ int ret;
+ int i;
+
+ adc->nchans = of_get_available_child_count(np);
+ if (!adc->nchans) {
+ dev_err(adc->dev, "no channel children\n");
+ return -ENODEV;
+ }
+ dev_dbg(adc->dev, "found %d ADC channels\n", adc->nchans);
+
+ adc->iio_chans = devm_kcalloc(adc->dev, adc->nchans,
+ sizeof(*adc->iio_chans), GFP_KERNEL);
+ if (!adc->iio_chans)
+ return -ENOMEM;
+
+ adc->chans = devm_kcalloc(adc->dev, adc->nchans,
+ sizeof(*adc->chans), GFP_KERNEL);
+ if (!adc->chans)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ ch = &adc->chans[i];
+ ret = pm8xxx_xoadc_parse_channel(adc->dev, child,
+ adc->variant->channels,
+ &adc->iio_chans[i],
+ ch);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ i++;
+ }
+
+ /* Check for required channels */
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_125V);
+ if (!ch) {
+ dev_err(adc->dev, "missing 1.25V reference channel\n");
+ return -ENODEV;
+ }
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_INTERNAL);
+ if (!ch) {
+ dev_err(adc->dev, "missing 0.625V reference channel\n");
+ return -ENODEV;
+ }
+ ch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_MUXOFF);
+ if (!ch) {
+ dev_err(adc->dev, "missing MUXOFF reference channel\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int pm8xxx_xoadc_probe(struct platform_device *pdev)
+{
+ const struct xoadc_variant *variant;
+ struct pm8xxx_xoadc *adc;
+ struct iio_dev *indio_dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *map;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ variant = of_device_get_match_data(dev);
+ if (!variant)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ adc = iio_priv(indio_dev);
+ adc->dev = dev;
+ adc->variant = variant;
+ init_completion(&adc->complete);
+ mutex_init(&adc->lock);
+
+ ret = pm8xxx_xoadc_parse_channels(adc, np);
+ if (ret)
+ return ret;
+
+ map = dev_get_regmap(dev->parent, NULL);
+ if (!map) {
+ dev_err(dev, "parent regmap unavailable.\n");
+ return -ENXIO;
+ }
+ adc->map = map;
+
+ /* Bring up regulator */
+ adc->vref = devm_regulator_get(dev, "xoadc-ref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(dev, "failed to get XOADC VREF regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(dev, "failed to enable XOADC VREF regulator\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ pm8xxx_eoc_irq, NULL, 0, variant->name, indio_dev);
+ if (ret) {
+ dev_err(dev, "unable to request IRQ\n");
+ goto out_disable_vref;
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = np;
+ indio_dev->name = variant->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &pm8xxx_xoadc_info;
+ indio_dev->channels = adc->iio_chans;
+ indio_dev->num_channels = adc->nchans;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_disable_vref;
+
+ ret = pm8xxx_calibrate_device(adc);
+ if (ret)
+ goto out_unreg_device;
+
+ dev_info(dev, "%s XOADC driver enabled\n", variant->name);
+
+ return 0;
+
+out_unreg_device:
+ iio_device_unregister(indio_dev);
+out_disable_vref:
+ regulator_disable(adc->vref);
+
+ return ret;
+}
+
+static int pm8xxx_xoadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static const struct xoadc_variant pm8018_variant = {
+ .name = "PM8018-XOADC",
+ .channels = pm8018_xoadc_channels,
+};
+
+static const struct xoadc_variant pm8038_variant = {
+ .name = "PM8038-XOADC",
+ .channels = pm8038_xoadc_channels,
+};
+
+static const struct xoadc_variant pm8058_variant = {
+ .name = "PM8058-XOADC",
+ .channels = pm8058_xoadc_channels,
+ .broken_ratiometric = true,
+ .prescaling = true,
+};
+
+static const struct xoadc_variant pm8921_variant = {
+ .name = "PM8921-XOADC",
+ .channels = pm8921_xoadc_channels,
+ .second_level_mux = true,
+};
+
+static const struct of_device_id pm8xxx_xoadc_id_table[] = {
+ {
+ .compatible = "qcom,pm8018-adc",
+ .data = &pm8018_variant,
+ },
+ {
+ .compatible = "qcom,pm8038-adc",
+ .data = &pm8038_variant,
+ },
+ {
+ .compatible = "qcom,pm8058-adc",
+ .data = &pm8058_variant,
+ },
+ {
+ .compatible = "qcom,pm8921-adc",
+ .data = &pm8921_variant,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_xoadc_id_table);
+
+static struct platform_driver pm8xxx_xoadc_driver = {
+ .driver = {
+ .name = "pm8xxx-adc",
+ .of_match_table = pm8xxx_xoadc_id_table,
+ },
+ .probe = pm8xxx_xoadc_probe,
+ .remove = pm8xxx_xoadc_remove,
+};
+module_platform_driver(pm8xxx_xoadc_driver);
+
+MODULE_DESCRIPTION("PM8xxx XOADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pm8xxx-xoadc");
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 0a19761d656c..9e600bfd1765 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -28,6 +28,8 @@
#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include "qcom-vadc-common.h"
+
/* VADC register and bit definitions */
#define VADC_REVISION2 0x1
#define VADC_REVISION2_SUPPORTED_VADC 1
@@ -75,84 +77,10 @@
#define VADC_DATA 0x60 /* 16 bits */
-#define VADC_CONV_TIME_MIN_US 2000
-#define VADC_CONV_TIME_MAX_US 2100
-
-/* Min ADC code represents 0V */
-#define VADC_MIN_ADC_CODE 0x6000
-/* Max ADC code represents full-scale range of 1.8V */
-#define VADC_MAX_ADC_CODE 0xa800
-
-#define VADC_ABSOLUTE_RANGE_UV 625000
-#define VADC_RATIOMETRIC_RANGE 1800
-
-#define VADC_DEF_PRESCALING 0 /* 1:1 */
-#define VADC_DEF_DECIMATION 0 /* 512 */
-#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
-#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
-#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
-
-#define VADC_DECIMATION_MIN 512
-#define VADC_DECIMATION_MAX 4096
-
-#define VADC_HW_SETTLE_DELAY_MAX 10000
-#define VADC_AVG_SAMPLES_MAX 512
-
-#define KELVINMIL_CELSIUSMIL 273150
-
-#define PMI_CHG_SCALE_1 -138890
-#define PMI_CHG_SCALE_2 391750000000LL
-
#define VADC_CHAN_MIN VADC_USBIN
#define VADC_CHAN_MAX VADC_LR_MUX3_BUF_PU1_PU2_XO_THERM
/**
- * struct vadc_map_pt - Map the graph representation for ADC channel
- * @x: Represent the ADC digitized code.
- * @y: Represent the physical data which can be temperature, voltage,
- * resistance.
- */
-struct vadc_map_pt {
- s32 x;
- s32 y;
-};
-
-/*
- * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
- * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
- * calibration.
- */
-enum vadc_calibration {
- VADC_CALIB_ABSOLUTE = 0,
- VADC_CALIB_RATIOMETRIC
-};
-
-/**
- * struct vadc_linear_graph - Represent ADC characteristics.
- * @dy: numerator slope to calculate the gain.
- * @dx: denominator slope to calculate the gain.
- * @gnd: A/D word of the ground reference used for the channel.
- *
- * Each ADC device has different offset and gain parameters which are
- * computed to calibrate the device.
- */
-struct vadc_linear_graph {
- s32 dy;
- s32 dx;
- s32 gnd;
-};
-
-/**
- * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
- * @num: the inverse numerator of the gain applied to the input channel.
- * @den: the inverse denominator of the gain applied to the input channel.
- */
-struct vadc_prescale_ratio {
- u32 num;
- u32 den;
-};
-
-/**
* struct vadc_channel_prop - VADC channel property.
* @channel: channel number, refer to the channel list.
* @calibration: calibration type.
@@ -162,9 +90,8 @@ struct vadc_prescale_ratio {
* start of conversion.
* @avg_samples: ability to provide single result from the ADC
* that is an average of multiple measurements.
- * @scale_fn: Represents the scaling function to convert voltage
+ * @scale_fn_type: Represents the scaling function to convert voltage
* physical units desired by the client for the channel.
- * Referenced from enum vadc_scale_fn_type.
*/
struct vadc_channel_prop {
unsigned int channel;
@@ -173,7 +100,7 @@ struct vadc_channel_prop {
unsigned int prescale;
unsigned int hw_settle_time;
unsigned int avg_samples;
- unsigned int scale_fn;
+ enum vadc_scale_fn_type scale_fn_type;
};
/**
@@ -204,35 +131,6 @@ struct vadc_priv {
struct mutex lock;
};
-/**
- * struct vadc_scale_fn - Scaling function prototype
- * @scale: Function pointer to one of the scaling functions
- * which takes the adc properties, channel properties,
- * and returns the physical result.
- */
-struct vadc_scale_fn {
- int (*scale)(struct vadc_priv *, const struct vadc_channel_prop *,
- u16, int *);
-};
-
-/**
- * enum vadc_scale_fn_type - Scaling function to convert ADC code to
- * physical scaled units for the channel.
- * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
- * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
- * Uses a mapping table with 100K pullup.
- * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
- * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
- * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
- */
-enum vadc_scale_fn_type {
- SCALE_DEFAULT = 0,
- SCALE_THERM_100K_PULLUP,
- SCALE_PMIC_THERM,
- SCALE_XOTHERM,
- SCALE_PMI_CHG_TEMP,
-};
-
static const struct vadc_prescale_ratio vadc_prescale_ratios[] = {
{.num = 1, .den = 1},
{.num = 1, .den = 3},
@@ -244,44 +142,6 @@ static const struct vadc_prescale_ratio vadc_prescale_ratios[] = {
{.num = 1, .den = 10}
};
-/* Voltage to temperature */
-static const struct vadc_map_pt adcmap_100k_104ef_104fb[] = {
- {1758, -40},
- {1742, -35},
- {1719, -30},
- {1691, -25},
- {1654, -20},
- {1608, -15},
- {1551, -10},
- {1483, -5},
- {1404, 0},
- {1315, 5},
- {1218, 10},
- {1114, 15},
- {1007, 20},
- {900, 25},
- {795, 30},
- {696, 35},
- {605, 40},
- {522, 45},
- {448, 50},
- {383, 55},
- {327, 60},
- {278, 65},
- {237, 70},
- {202, 75},
- {172, 80},
- {146, 85},
- {125, 90},
- {107, 95},
- {92, 100},
- {79, 105},
- {68, 110},
- {59, 115},
- {51, 120},
- {44, 125}
-};
-
static int vadc_read(struct vadc_priv *vadc, u16 offset, u8 *data)
{
return regmap_bulk_read(vadc->regmap, vadc->base + offset, data, 1);
@@ -553,159 +413,6 @@ err:
return ret;
}
-static int vadc_map_voltage_temp(const struct vadc_map_pt *pts,
- u32 tablesize, s32 input, s64 *output)
-{
- bool descending = 1;
- u32 i = 0;
-
- if (!pts)
- return -EINVAL;
-
- /* Check if table is descending or ascending */
- if (tablesize > 1) {
- if (pts[0].x < pts[1].x)
- descending = 0;
- }
-
- while (i < tablesize) {
- if ((descending) && (pts[i].x < input)) {
- /* table entry is less than measured*/
- /* value and table is descending, stop */
- break;
- } else if ((!descending) &&
- (pts[i].x > input)) {
- /* table entry is greater than measured*/
- /*value and table is ascending, stop */
- break;
- }
- i++;
- }
-
- if (i == 0) {
- *output = pts[0].y;
- } else if (i == tablesize) {
- *output = pts[tablesize - 1].y;
- } else {
- /* result is between search_index and search_index-1 */
- /* interpolate linearly */
- *output = (((s32)((pts[i].y - pts[i - 1].y) *
- (input - pts[i - 1].x)) /
- (pts[i].x - pts[i - 1].x)) +
- pts[i - 1].y);
- }
-
- return 0;
-}
-
-static void vadc_scale_calib(struct vadc_priv *vadc, u16 adc_code,
- const struct vadc_channel_prop *prop,
- s64 *scale_voltage)
-{
- *scale_voltage = (adc_code -
- vadc->graph[prop->calibration].gnd);
- *scale_voltage *= vadc->graph[prop->calibration].dx;
- *scale_voltage = div64_s64(*scale_voltage,
- vadc->graph[prop->calibration].dy);
- if (prop->calibration == VADC_CALIB_ABSOLUTE)
- *scale_voltage +=
- vadc->graph[prop->calibration].dx;
-
- if (*scale_voltage < 0)
- *scale_voltage = 0;
-}
-
-static int vadc_scale_volt(struct vadc_priv *vadc,
- const struct vadc_channel_prop *prop, u16 adc_code,
- int *result_uv)
-{
- const struct vadc_prescale_ratio *prescale;
- s64 voltage = 0, result = 0;
-
- vadc_scale_calib(vadc, adc_code, prop, &voltage);
-
- prescale = &vadc_prescale_ratios[prop->prescale];
- voltage = voltage * prescale->den;
- result = div64_s64(voltage, prescale->num);
- *result_uv = result;
-
- return 0;
-}
-
-static int vadc_scale_therm(struct vadc_priv *vadc,
- const struct vadc_channel_prop *prop, u16 adc_code,
- int *result_mdec)
-{
- s64 voltage = 0, result = 0;
-
- vadc_scale_calib(vadc, adc_code, prop, &voltage);
-
- if (prop->calibration == VADC_CALIB_ABSOLUTE)
- voltage = div64_s64(voltage, 1000);
-
- vadc_map_voltage_temp(adcmap_100k_104ef_104fb,
- ARRAY_SIZE(adcmap_100k_104ef_104fb),
- voltage, &result);
- result *= 1000;
- *result_mdec = result;
-
- return 0;
-}
-
-static int vadc_scale_die_temp(struct vadc_priv *vadc,
- const struct vadc_channel_prop *prop,
- u16 adc_code, int *result_mdec)
-{
- const struct vadc_prescale_ratio *prescale;
- s64 voltage = 0;
- u64 temp; /* Temporary variable for do_div */
-
- vadc_scale_calib(vadc, adc_code, prop, &voltage);
-
- if (voltage > 0) {
- prescale = &vadc_prescale_ratios[prop->prescale];
- temp = voltage * prescale->den;
- do_div(temp, prescale->num * 2);
- voltage = temp;
- } else {
- voltage = 0;
- }
-
- voltage -= KELVINMIL_CELSIUSMIL;
- *result_mdec = voltage;
-
- return 0;
-}
-
-static int vadc_scale_chg_temp(struct vadc_priv *vadc,
- const struct vadc_channel_prop *prop,
- u16 adc_code, int *result_mdec)
-{
- const struct vadc_prescale_ratio *prescale;
- s64 voltage = 0, result = 0;
-
- vadc_scale_calib(vadc, adc_code, prop, &voltage);
-
- prescale = &vadc_prescale_ratios[prop->prescale];
- voltage = voltage * prescale->den;
- voltage = div64_s64(voltage, prescale->num);
- voltage = ((PMI_CHG_SCALE_1) * (voltage * 2));
- voltage = (voltage + PMI_CHG_SCALE_2);
- result = div64_s64(voltage, 1000000);
- *result_mdec = result;
-
- return 0;
-}
-
-static int vadc_decimation_from_dt(u32 value)
-{
- if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN ||
- value > VADC_DECIMATION_MAX)
- return -EINVAL;
-
- return __ffs64(value / VADC_DECIMATION_MIN);
-}
-
static int vadc_prescaling_from_dt(u32 num, u32 den)
{
unsigned int pre;
@@ -742,14 +449,6 @@ static int vadc_avg_samples_from_dt(u32 value)
return __ffs64(value);
}
-static struct vadc_scale_fn scale_fn[] = {
- [SCALE_DEFAULT] = {vadc_scale_volt},
- [SCALE_THERM_100K_PULLUP] = {vadc_scale_therm},
- [SCALE_PMIC_THERM] = {vadc_scale_die_temp},
- [SCALE_XOTHERM] = {vadc_scale_therm},
- [SCALE_PMI_CHG_TEMP] = {vadc_scale_chg_temp},
-};
-
static int vadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2,
long mask)
@@ -766,7 +465,13 @@ static int vadc_read_raw(struct iio_dev *indio_dev,
if (ret)
break;
- scale_fn[prop->scale_fn].scale(vadc, prop, adc_code, val);
+ ret = qcom_vadc_scale(prop->scale_fn_type,
+ &vadc->graph[prop->calibration],
+ &vadc_prescale_ratios[prop->prescale],
+ (prop->calibration == VADC_CALIB_ABSOLUTE),
+ adc_code, val);
+ if (ret)
+ break;
return IIO_VAL_INT;
case IIO_CHAN_INFO_RAW:
@@ -809,7 +514,7 @@ struct vadc_channels {
unsigned int prescale_index;
enum iio_chan_type type;
long info_mask;
- unsigned int scale_fn;
+ enum vadc_scale_fn_type scale_fn_type;
};
#define VADC_CHAN(_dname, _type, _mask, _pre, _scale) \
@@ -818,7 +523,7 @@ struct vadc_channels {
.prescale_index = _pre, \
.type = _type, \
.info_mask = _mask, \
- .scale_fn = _scale \
+ .scale_fn_type = _scale \
}, \
#define VADC_NO_CHAN(_dname, _type, _mask, _pre) \
@@ -976,7 +681,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
ret = of_property_read_u32(node, "qcom,decimation", &value);
if (!ret) {
- ret = vadc_decimation_from_dt(value);
+ ret = qcom_vadc_decimation_from_dt(value);
if (ret < 0) {
dev_err(dev, "%02x invalid decimation %d\n",
chan, value);
@@ -1068,7 +773,7 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
return ret;
}
- prop.scale_fn = vadc_chans[prop.channel].scale_fn;
+ prop.scale_fn_type = vadc_chans[prop.channel].scale_fn_type;
vadc->chan_props[index] = prop;
vadc_chan = &vadc_chans[prop.channel];
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
new file mode 100644
index 000000000000..102fc51b10aa
--- /dev/null
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -0,0 +1,230 @@
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/math64.h>
+#include <linux/log2.h>
+#include <linux/err.h>
+
+#include "qcom-vadc-common.h"
+
+/* Voltage to temperature */
+static const struct vadc_map_pt adcmap_100k_104ef_104fb[] = {
+ {1758, -40},
+ {1742, -35},
+ {1719, -30},
+ {1691, -25},
+ {1654, -20},
+ {1608, -15},
+ {1551, -10},
+ {1483, -5},
+ {1404, 0},
+ {1315, 5},
+ {1218, 10},
+ {1114, 15},
+ {1007, 20},
+ {900, 25},
+ {795, 30},
+ {696, 35},
+ {605, 40},
+ {522, 45},
+ {448, 50},
+ {383, 55},
+ {327, 60},
+ {278, 65},
+ {237, 70},
+ {202, 75},
+ {172, 80},
+ {146, 85},
+ {125, 90},
+ {107, 95},
+ {92, 100},
+ {79, 105},
+ {68, 110},
+ {59, 115},
+ {51, 120},
+ {44, 125}
+};
+
+static int qcom_vadc_map_voltage_temp(const struct vadc_map_pt *pts,
+ u32 tablesize, s32 input, s64 *output)
+{
+ bool descending = 1;
+ u32 i = 0;
+
+ if (!pts)
+ return -EINVAL;
+
+ /* Check if table is descending or ascending */
+ if (tablesize > 1) {
+ if (pts[0].x < pts[1].x)
+ descending = 0;
+ }
+
+ while (i < tablesize) {
+ if ((descending) && (pts[i].x < input)) {
+ /* table entry is less than measured*/
+ /* value and table is descending, stop */
+ break;
+ } else if ((!descending) &&
+ (pts[i].x > input)) {
+ /* table entry is greater than measured*/
+ /*value and table is ascending, stop */
+ break;
+ }
+ i++;
+ }
+
+ if (i == 0) {
+ *output = pts[0].y;
+ } else if (i == tablesize) {
+ *output = pts[tablesize - 1].y;
+ } else {
+ /* result is between search_index and search_index-1 */
+ /* interpolate linearly */
+ *output = (((s32)((pts[i].y - pts[i - 1].y) *
+ (input - pts[i - 1].x)) /
+ (pts[i].x - pts[i - 1].x)) +
+ pts[i - 1].y);
+ }
+
+ return 0;
+}
+
+static void qcom_vadc_scale_calib(const struct vadc_linear_graph *calib_graph,
+ u16 adc_code,
+ bool absolute,
+ s64 *scale_voltage)
+{
+ *scale_voltage = (adc_code - calib_graph->gnd);
+ *scale_voltage *= calib_graph->dx;
+ *scale_voltage = div64_s64(*scale_voltage, calib_graph->dy);
+ if (absolute)
+ *scale_voltage += calib_graph->dx;
+
+ if (*scale_voltage < 0)
+ *scale_voltage = 0;
+}
+
+static int qcom_vadc_scale_volt(const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute, u16 adc_code,
+ int *result_uv)
+{
+ s64 voltage = 0, result = 0;
+
+ qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage);
+
+ voltage = voltage * prescale->den;
+ result = div64_s64(voltage, prescale->num);
+ *result_uv = result;
+
+ return 0;
+}
+
+static int qcom_vadc_scale_therm(const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute, u16 adc_code,
+ int *result_mdec)
+{
+ s64 voltage = 0, result = 0;
+ int ret;
+
+ qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage);
+
+ if (absolute)
+ voltage = div64_s64(voltage, 1000);
+
+ ret = qcom_vadc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ voltage, &result);
+ if (ret)
+ return ret;
+
+ result *= 1000;
+ *result_mdec = result;
+
+ return 0;
+}
+
+static int qcom_vadc_scale_die_temp(const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec)
+{
+ s64 voltage = 0;
+ u64 temp; /* Temporary variable for do_div */
+
+ qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage);
+
+ if (voltage > 0) {
+ temp = voltage * prescale->den;
+ do_div(temp, prescale->num * 2);
+ voltage = temp;
+ } else {
+ voltage = 0;
+ }
+
+ voltage -= KELVINMIL_CELSIUSMIL;
+ *result_mdec = voltage;
+
+ return 0;
+}
+
+static int qcom_vadc_scale_chg_temp(const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec)
+{
+ s64 voltage = 0, result = 0;
+
+ qcom_vadc_scale_calib(calib_graph, adc_code, absolute, &voltage);
+
+ voltage = voltage * prescale->den;
+ voltage = div64_s64(voltage, prescale->num);
+ voltage = ((PMI_CHG_SCALE_1) * (voltage * 2));
+ voltage = (voltage + PMI_CHG_SCALE_2);
+ result = div64_s64(voltage, 1000000);
+ *result_mdec = result;
+
+ return 0;
+}
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute,
+ u16 adc_code, int *result)
+{
+ switch (scaletype) {
+ case SCALE_DEFAULT:
+ return qcom_vadc_scale_volt(calib_graph, prescale,
+ absolute, adc_code,
+ result);
+ case SCALE_THERM_100K_PULLUP:
+ case SCALE_XOTHERM:
+ return qcom_vadc_scale_therm(calib_graph, prescale,
+ absolute, adc_code,
+ result);
+ case SCALE_PMIC_THERM:
+ return qcom_vadc_scale_die_temp(calib_graph, prescale,
+ absolute, adc_code,
+ result);
+ case SCALE_PMI_CHG_TEMP:
+ return qcom_vadc_scale_chg_temp(calib_graph, prescale,
+ absolute, adc_code,
+ result);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(qcom_vadc_scale);
+
+int qcom_vadc_decimation_from_dt(u32 value)
+{
+ if (!is_power_of_2(value) || value < VADC_DECIMATION_MIN ||
+ value > VADC_DECIMATION_MAX)
+ return -EINVAL;
+
+ return __ffs64(value / VADC_DECIMATION_MIN);
+}
+EXPORT_SYMBOL(qcom_vadc_decimation_from_dt);
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
new file mode 100644
index 000000000000..63c872a70adc
--- /dev/null
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -0,0 +1,108 @@
+/*
+ * Code shared between the different Qualcomm PMIC voltage ADCs
+ */
+
+#ifndef QCOM_VADC_COMMON_H
+#define QCOM_VADC_COMMON_H
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE 1800
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_AVG_SAMPLES_MAX 512
+
+#define KELVINMIL_CELSIUSMIL 273150
+
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000LL
+
+/**
+ * struct vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct vadc_map_pt {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
+ * @num: the inverse numerator of the gain applied to the input channel.
+ * @den: the inverse denominator of the gain applied to the input channel.
+ */
+struct vadc_prescale_ratio {
+ u32 num;
+ u32 den;
+};
+
+/**
+ * enum vadc_scale_fn_type - Scaling function to convert ADC code to
+ * physical scaled units for the channel.
+ * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * Uses a mapping table with 100K pullup.
+ * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ */
+enum vadc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_PMI_CHG_TEMP,
+};
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec);
+
+int qcom_vadc_decimation_from_dt(u32 value);
+
+#endif /* QCOM_VADC_COMMON_H */
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 85d701291654..ae6d3324f518 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -109,7 +109,7 @@ static int rockchip_saradc_read_raw(struct iio_dev *indio_dev,
static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
{
- struct rockchip_saradc *info = (struct rockchip_saradc *)dev_id;
+ struct rockchip_saradc *info = dev_id;
/* Read value */
info->last_val = readl_relaxed(info->regs + SARADC_DATA);
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 5dd61f6a57b9..5dd61f6a57b9 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 9b49a6addc2a..c28e7ff80e11 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -60,6 +60,8 @@
#define STM32F4_EOC BIT(1)
/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT 24
+#define STM32F4_RES_MASK GENMASK(25, 24)
#define STM32F4_SCAN BIT(8)
#define STM32F4_EOCIE BIT(5)
@@ -141,6 +143,7 @@ struct stm32_adc_regs {
* @lock: spinlock
* @bufi: data buffer index
* @num_conv: expected number of scan conversions
+ * @res: data resolution (e.g. RES bitfield value)
* @trigger_polarity: external trigger polarity (e.g. exten)
* @dma_chan: dma channel
* @rx_buf: dma rx buffer cpu address
@@ -157,6 +160,7 @@ struct stm32_adc {
spinlock_t lock; /* interrupt lock */
unsigned int bufi;
unsigned int num_conv;
+ u32 res;
u32 trigger_polarity;
struct dma_chan *dma_chan;
u8 *rx_buf;
@@ -196,6 +200,11 @@ static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
{ IIO_VOLTAGE, 15, "in15" },
};
+static const unsigned int stm32f4_adc_resolutions[] = {
+ /* sorted values so the index matches RES[1:0] in STM32F4_ADC_CR1 */
+ 12, 10, 8, 6,
+};
+
/**
* stm32f4_sq - describe regular sequence registers
* - L: sequence len (register & bit field)
@@ -302,6 +311,14 @@ static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
}
+static void stm32_adc_set_res(struct stm32_adc *adc)
+{
+ u32 val = stm32_adc_readl(adc, STM32F4_ADC_CR1);
+
+ val = (val & ~STM32F4_RES_MASK) | (adc->res << STM32F4_RES_SHIFT);
+ stm32_adc_writel(adc, STM32F4_ADC_CR1, val);
+}
+
/**
* stm32_adc_start_conv() - Start conversions for regular channels.
* @adc: stm32 adc instance
@@ -870,11 +887,37 @@ static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = {
{},
};
+static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
+{
+ struct device_node *node = indio_dev->dev.of_node;
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ unsigned int i;
+ u32 res;
+
+ if (of_property_read_u32(node, "assigned-resolution-bits", &res))
+ res = stm32f4_adc_resolutions[0];
+
+ for (i = 0; i < ARRAY_SIZE(stm32f4_adc_resolutions); i++)
+ if (res == stm32f4_adc_resolutions[i])
+ break;
+ if (i >= ARRAY_SIZE(stm32f4_adc_resolutions)) {
+ dev_err(&indio_dev->dev, "Bad resolution: %u bits\n", res);
+ return -EINVAL;
+ }
+
+ dev_dbg(&indio_dev->dev, "Using %u bits resolution\n", res);
+ adc->res = i;
+
+ return 0;
+}
+
static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
struct iio_chan_spec *chan,
const struct stm32_adc_chan_spec *channel,
int scan_index)
{
+ struct stm32_adc *adc = iio_priv(indio_dev);
+
chan->type = channel->type;
chan->channel = channel->channel;
chan->datasheet_name = channel->name;
@@ -883,7 +926,7 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
chan->scan_type.sign = 'u';
- chan->scan_type.realbits = 12;
+ chan->scan_type.realbits = stm32f4_adc_resolutions[adc->res];
chan->scan_type.storagebits = 16;
chan->ext_info = stm32_adc_ext_info;
}
@@ -1022,6 +1065,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
return ret;
}
+ ret = stm32_adc_of_get_resolution(indio_dev);
+ if (ret < 0)
+ goto err_clk_disable;
+ stm32_adc_set_res(adc);
+
ret = stm32_adc_chan_of_init(indio_dev);
if (ret < 0)
goto err_clk_disable;
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
index be2de48844bc..2df84fa5e3fc 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -318,6 +318,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
}
indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
priv = iio_priv(indio_dev);
priv->base = base[id];
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
new file mode 100644
index 000000000000..b23527309088
--- /dev/null
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -0,0 +1,719 @@
+/* ADC driver for sunxi platforms' (A10, A13 and A31) GPADC
+ *
+ * Copyright (c) 2016 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * The Allwinner SoCs all have an ADC that can also act as a touchscreen
+ * controller and a thermal sensor.
+ * The thermal sensor works only when the ADC acts as a touchscreen controller
+ * and is configured to throw an interrupt every fixed periods of time (let say
+ * every X seconds).
+ * One would be tempted to disable the IP on the hardware side rather than
+ * disabling interrupts to save some power but that resets the internal clock of
+ * the IP, resulting in having to wait X seconds every time we want to read the
+ * value of the thermal sensor.
+ * This is also the reason of using autosuspend in pm_runtime. If there was no
+ * autosuspend, the thermal sensor would need X seconds after every
+ * pm_runtime_get_sync to get a value from the ADC. The autosuspend allows the
+ * thermal sensor to be requested again in a certain time span before it gets
+ * shutdown for not being used.
+ */
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
+#include <linux/mfd/sun4i-gpadc.h>
+
+static unsigned int sun4i_gpadc_chan_select(unsigned int chan)
+{
+ return SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(chan);
+}
+
+static unsigned int sun6i_gpadc_chan_select(unsigned int chan)
+{
+ return SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(chan);
+}
+
+struct gpadc_data {
+ int temp_offset;
+ int temp_scale;
+ unsigned int tp_mode_en;
+ unsigned int tp_adc_select;
+ unsigned int (*adc_chan_select)(unsigned int chan);
+ unsigned int adc_chan_mask;
+};
+
+static const struct gpadc_data sun4i_gpadc_data = {
+ .temp_offset = -1932,
+ .temp_scale = 133,
+ .tp_mode_en = SUN4I_GPADC_CTRL1_TP_MODE_EN,
+ .tp_adc_select = SUN4I_GPADC_CTRL1_TP_ADC_SELECT,
+ .adc_chan_select = &sun4i_gpadc_chan_select,
+ .adc_chan_mask = SUN4I_GPADC_CTRL1_ADC_CHAN_MASK,
+};
+
+static const struct gpadc_data sun5i_gpadc_data = {
+ .temp_offset = -1447,
+ .temp_scale = 100,
+ .tp_mode_en = SUN4I_GPADC_CTRL1_TP_MODE_EN,
+ .tp_adc_select = SUN4I_GPADC_CTRL1_TP_ADC_SELECT,
+ .adc_chan_select = &sun4i_gpadc_chan_select,
+ .adc_chan_mask = SUN4I_GPADC_CTRL1_ADC_CHAN_MASK,
+};
+
+static const struct gpadc_data sun6i_gpadc_data = {
+ .temp_offset = -1623,
+ .temp_scale = 167,
+ .tp_mode_en = SUN6I_GPADC_CTRL1_TP_MODE_EN,
+ .tp_adc_select = SUN6I_GPADC_CTRL1_TP_ADC_SELECT,
+ .adc_chan_select = &sun6i_gpadc_chan_select,
+ .adc_chan_mask = SUN6I_GPADC_CTRL1_ADC_CHAN_MASK,
+};
+
+static const struct gpadc_data sun8i_a33_gpadc_data = {
+ .temp_offset = -1662,
+ .temp_scale = 162,
+ .tp_mode_en = SUN8I_GPADC_CTRL1_CHOP_TEMP_EN,
+};
+
+struct sun4i_gpadc_iio {
+ struct iio_dev *indio_dev;
+ struct completion completion;
+ int temp_data;
+ u32 adc_data;
+ struct regmap *regmap;
+ unsigned int fifo_data_irq;
+ atomic_t ignore_fifo_data_irq;
+ unsigned int temp_data_irq;
+ atomic_t ignore_temp_data_irq;
+ const struct gpadc_data *data;
+ bool no_irq;
+ /* prevents concurrent reads of temperature and ADC */
+ struct mutex mutex;
+};
+
+#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _name, \
+}
+
+static struct iio_map sun4i_gpadc_hwmon_maps[] = {
+ {
+ .adc_channel_label = "temp_adc",
+ .consumer_dev_name = "iio_hwmon.0",
+ },
+ { /* sentinel */ },
+};
+
+static const struct iio_chan_spec sun4i_gpadc_channels[] = {
+ SUN4I_GPADC_ADC_CHANNEL(0, "adc_chan0"),
+ SUN4I_GPADC_ADC_CHANNEL(1, "adc_chan1"),
+ SUN4I_GPADC_ADC_CHANNEL(2, "adc_chan2"),
+ SUN4I_GPADC_ADC_CHANNEL(3, "adc_chan3"),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .datasheet_name = "temp_adc",
+ },
+};
+
+static const struct iio_chan_spec sun4i_gpadc_channels_no_temp[] = {
+ SUN4I_GPADC_ADC_CHANNEL(0, "adc_chan0"),
+ SUN4I_GPADC_ADC_CHANNEL(1, "adc_chan1"),
+ SUN4I_GPADC_ADC_CHANNEL(2, "adc_chan2"),
+ SUN4I_GPADC_ADC_CHANNEL(3, "adc_chan3"),
+};
+
+static const struct iio_chan_spec sun8i_a33_gpadc_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .datasheet_name = "temp_adc",
+ },
+};
+
+static const struct regmap_config sun4i_gpadc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .fast_io = true,
+};
+
+static int sun4i_prepare_for_irq(struct iio_dev *indio_dev, int channel,
+ unsigned int irq)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+ int ret;
+ u32 reg;
+
+ pm_runtime_get_sync(indio_dev->dev.parent);
+
+ reinit_completion(&info->completion);
+
+ ret = regmap_write(info->regmap, SUN4I_GPADC_INT_FIFOC,
+ SUN4I_GPADC_INT_FIFOC_TP_FIFO_TRIG_LEVEL(1) |
+ SUN4I_GPADC_INT_FIFOC_TP_FIFO_FLUSH);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(info->regmap, SUN4I_GPADC_CTRL1, &reg);
+ if (ret)
+ return ret;
+
+ if (irq == info->fifo_data_irq) {
+ ret = regmap_write(info->regmap, SUN4I_GPADC_CTRL1,
+ info->data->tp_mode_en |
+ info->data->tp_adc_select |
+ info->data->adc_chan_select(channel));
+ /*
+ * When the IP changes channel, it needs a bit of time to get
+ * correct values.
+ */
+ if ((reg & info->data->adc_chan_mask) !=
+ info->data->adc_chan_select(channel))
+ mdelay(10);
+
+ } else {
+ /*
+ * The temperature sensor returns valid data only when the ADC
+ * operates in touchscreen mode.
+ */
+ ret = regmap_write(info->regmap, SUN4I_GPADC_CTRL1,
+ info->data->tp_mode_en);
+ }
+
+ if (ret)
+ return ret;
+
+ /*
+ * When the IP changes mode between ADC or touchscreen, it
+ * needs a bit of time to get correct values.
+ */
+ if ((reg & info->data->tp_adc_select) != info->data->tp_adc_select)
+ mdelay(100);
+
+ return 0;
+}
+
+static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
+ unsigned int irq)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&info->mutex);
+
+ ret = sun4i_prepare_for_irq(indio_dev, channel, irq);
+ if (ret)
+ goto err;
+
+ enable_irq(irq);
+
+ /*
+ * The temperature sensor throws an interruption periodically (currently
+ * set at periods of ~0.6s in sun4i_gpadc_runtime_resume). A 1s delay
+ * makes sure an interruption occurs in normal conditions. If it doesn't
+ * occur, then there is a timeout.
+ */
+ if (!wait_for_completion_timeout(&info->completion,
+ msecs_to_jiffies(1000))) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (irq == info->fifo_data_irq)
+ *val = info->adc_data;
+ else
+ *val = info->temp_data;
+
+ ret = 0;
+ pm_runtime_mark_last_busy(indio_dev->dev.parent);
+
+err:
+ pm_runtime_put_autosuspend(indio_dev->dev.parent);
+ mutex_unlock(&info->mutex);
+
+ return ret;
+}
+
+static int sun4i_gpadc_adc_read(struct iio_dev *indio_dev, int channel,
+ int *val)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+
+ return sun4i_gpadc_read(indio_dev, channel, val, info->fifo_data_irq);
+}
+
+static int sun4i_gpadc_temp_read(struct iio_dev *indio_dev, int *val)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+
+ if (info->no_irq) {
+ pm_runtime_get_sync(indio_dev->dev.parent);
+
+ regmap_read(info->regmap, SUN4I_GPADC_TEMP_DATA, val);
+
+ pm_runtime_mark_last_busy(indio_dev->dev.parent);
+ pm_runtime_put_autosuspend(indio_dev->dev.parent);
+
+ return 0;
+ }
+
+ return sun4i_gpadc_read(indio_dev, 0, val, info->temp_data_irq);
+}
+
+static int sun4i_gpadc_temp_offset(struct iio_dev *indio_dev, int *val)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+
+ *val = info->data->temp_offset;
+
+ return 0;
+}
+
+static int sun4i_gpadc_temp_scale(struct iio_dev *indio_dev, int *val)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+
+ *val = info->data->temp_scale;
+
+ return 0;
+}
+
+static int sun4i_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ ret = sun4i_gpadc_temp_offset(indio_dev, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_VOLTAGE)
+ ret = sun4i_gpadc_adc_read(indio_dev, chan->channel,
+ val);
+ else
+ ret = sun4i_gpadc_temp_read(indio_dev, val);
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_VOLTAGE) {
+ /* 3000mV / 4096 * raw */
+ *val = 0;
+ *val2 = 732421875;
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+
+ ret = sun4i_gpadc_temp_scale(indio_dev, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info sun4i_gpadc_iio_info = {
+ .read_raw = sun4i_gpadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
+{
+ struct sun4i_gpadc_iio *info = dev_id;
+
+ if (atomic_read(&info->ignore_temp_data_irq))
+ goto out;
+
+ if (!regmap_read(info->regmap, SUN4I_GPADC_TEMP_DATA, &info->temp_data))
+ complete(&info->completion);
+
+out:
+ disable_irq_nosync(info->temp_data_irq);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
+{
+ struct sun4i_gpadc_iio *info = dev_id;
+
+ if (atomic_read(&info->ignore_fifo_data_irq))
+ goto out;
+
+ if (!regmap_read(info->regmap, SUN4I_GPADC_DATA, &info->adc_data))
+ complete(&info->completion);
+
+out:
+ disable_irq_nosync(info->fifo_data_irq);
+ return IRQ_HANDLED;
+}
+
+static int sun4i_gpadc_runtime_suspend(struct device *dev)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(dev_get_drvdata(dev));
+
+ /* Disable the ADC on IP */
+ regmap_write(info->regmap, SUN4I_GPADC_CTRL1, 0);
+ /* Disable temperature sensor on IP */
+ regmap_write(info->regmap, SUN4I_GPADC_TPR, 0);
+
+ return 0;
+}
+
+static int sun4i_gpadc_runtime_resume(struct device *dev)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(dev_get_drvdata(dev));
+
+ /* clkin = 6MHz */
+ regmap_write(info->regmap, SUN4I_GPADC_CTRL0,
+ SUN4I_GPADC_CTRL0_ADC_CLK_DIVIDER(2) |
+ SUN4I_GPADC_CTRL0_FS_DIV(7) |
+ SUN4I_GPADC_CTRL0_T_ACQ(63));
+ regmap_write(info->regmap, SUN4I_GPADC_CTRL1, info->data->tp_mode_en);
+ regmap_write(info->regmap, SUN4I_GPADC_CTRL3,
+ SUN4I_GPADC_CTRL3_FILTER_EN |
+ SUN4I_GPADC_CTRL3_FILTER_TYPE(1));
+ /* period = SUN4I_GPADC_TPR_TEMP_PERIOD * 256 * 16 / clkin; ~0.6s */
+ regmap_write(info->regmap, SUN4I_GPADC_TPR,
+ SUN4I_GPADC_TPR_TEMP_ENABLE |
+ SUN4I_GPADC_TPR_TEMP_PERIOD(800));
+
+ return 0;
+}
+
+static int sun4i_gpadc_get_temp(void *data, int *temp)
+{
+ struct sun4i_gpadc_iio *info = data;
+ int val, scale, offset;
+
+ if (sun4i_gpadc_temp_read(info->indio_dev, &val))
+ return -ETIMEDOUT;
+
+ sun4i_gpadc_temp_scale(info->indio_dev, &scale);
+ sun4i_gpadc_temp_offset(info->indio_dev, &offset);
+
+ *temp = (val + offset) * scale;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+ .get_temp = &sun4i_gpadc_get_temp,
+};
+
+static const struct dev_pm_ops sun4i_gpadc_pm_ops = {
+ .runtime_suspend = &sun4i_gpadc_runtime_suspend,
+ .runtime_resume = &sun4i_gpadc_runtime_resume,
+};
+
+static int sun4i_irq_init(struct platform_device *pdev, const char *name,
+ irq_handler_t handler, const char *devname,
+ unsigned int *irq, atomic_t *atomic)
+{
+ int ret;
+ struct sun4i_gpadc_dev *mfd_dev = dev_get_drvdata(pdev->dev.parent);
+ struct sun4i_gpadc_iio *info = iio_priv(dev_get_drvdata(&pdev->dev));
+
+ /*
+ * Once the interrupt is activated, the IP continuously performs
+ * conversions thus throws interrupts. The interrupt is activated right
+ * after being requested but we want to control when these interrupts
+ * occur thus we disable it right after being requested. However, an
+ * interrupt might occur between these two instructions and we have to
+ * make sure that does not happen, by using atomic flags. We set the
+ * flag before requesting the interrupt and unset it right after
+ * disabling the interrupt. When an interrupt occurs between these two
+ * instructions, reading the atomic flag will tell us to ignore the
+ * interrupt.
+ */
+ atomic_set(atomic, 1);
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s interrupt registered\n", name);
+ return ret;
+ }
+
+ ret = regmap_irq_get_virq(mfd_dev->regmap_irqc, ret);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get virq for irq %s\n", name);
+ return ret;
+ }
+
+ *irq = ret;
+ ret = devm_request_any_context_irq(&pdev->dev, *irq, handler, 0,
+ devname, info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request %s interrupt: %d\n",
+ name, ret);
+ return ret;
+ }
+
+ disable_irq(*irq);
+ atomic_set(atomic, 0);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_gpadc_of_id[] = {
+ {
+ .compatible = "allwinner,sun8i-a33-ths",
+ .data = &sun8i_a33_gpadc_data,
+ },
+ { /* sentinel */ }
+};
+
+static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
+ struct iio_dev *indio_dev)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+ const struct of_device_id *of_dev;
+ struct thermal_zone_device *tzd;
+ struct resource *mem;
+ void __iomem *base;
+ int ret;
+
+ of_dev = of_match_device(sun4i_gpadc_of_id, &pdev->dev);
+ if (!of_dev)
+ return -ENODEV;
+
+ info->no_irq = true;
+ info->data = (struct gpadc_data *)of_dev->data;
+ indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
+ indio_dev->channels = sun8i_a33_gpadc_channels;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ info->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &sun4i_gpadc_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(&pdev->dev, "failed to init regmap: %d\n", ret);
+ return ret;
+ }
+
+ if (!IS_ENABLED(CONFIG_THERMAL_OF))
+ return 0;
+
+ tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(tzd))
+ dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
+ PTR_ERR(tzd));
+
+ return PTR_ERR_OR_ZERO(tzd);
+}
+
+static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
+ struct iio_dev *indio_dev)
+{
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+ struct sun4i_gpadc_dev *sun4i_gpadc_dev =
+ dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ info->no_irq = false;
+ info->regmap = sun4i_gpadc_dev->regmap;
+
+ indio_dev->num_channels = ARRAY_SIZE(sun4i_gpadc_channels);
+ indio_dev->channels = sun4i_gpadc_channels;
+
+ info->data = (struct gpadc_data *)platform_get_device_id(pdev)->driver_data;
+
+ /*
+ * Since the controller needs to be in touchscreen mode for its thermal
+ * sensor to operate properly, and that switching between the two modes
+ * needs a delay, always registering in the thermal framework will
+ * significantly slow down the conversion rate of the ADCs.
+ *
+ * Therefore, instead of depending on THERMAL_OF in Kconfig, we only
+ * register the sensor if that option is enabled, eventually leaving
+ * that choice to the user.
+ */
+
+ if (IS_ENABLED(CONFIG_THERMAL_OF)) {
+ /*
+ * This driver is a child of an MFD which has a node in the DT
+ * but not its children, because of DT backward compatibility
+ * for A10, A13 and A31 SoCs. Therefore, the resulting devices
+ * of this driver do not have an of_node variable.
+ * However, its parent (the MFD driver) has an of_node variable
+ * and since devm_thermal_zone_of_sensor_register uses its first
+ * argument to match the phandle defined in the node of the
+ * thermal driver with the of_node of the device passed as first
+ * argument and the third argument to call ops from
+ * thermal_zone_of_device_ops, the solution is to use the parent
+ * device as first argument to match the phandle with its
+ * of_node, and the device from this driver as third argument to
+ * return the temperature.
+ */
+ struct thermal_zone_device *tzd;
+ tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0,
+ info,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(tzd)) {
+ dev_err(&pdev->dev,
+ "could not register thermal sensor: %ld\n",
+ PTR_ERR(tzd));
+ return PTR_ERR(tzd);
+ }
+ } else {
+ indio_dev->num_channels =
+ ARRAY_SIZE(sun4i_gpadc_channels_no_temp);
+ indio_dev->channels = sun4i_gpadc_channels_no_temp;
+ }
+
+ if (IS_ENABLED(CONFIG_THERMAL_OF)) {
+ ret = sun4i_irq_init(pdev, "TEMP_DATA_PENDING",
+ sun4i_gpadc_temp_data_irq_handler,
+ "temp_data", &info->temp_data_irq,
+ &info->ignore_temp_data_irq);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = sun4i_irq_init(pdev, "FIFO_DATA_PENDING",
+ sun4i_gpadc_fifo_data_irq_handler, "fifo_data",
+ &info->fifo_data_irq, &info->ignore_fifo_data_irq);
+ if (ret < 0)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_THERMAL_OF)) {
+ ret = iio_map_array_register(indio_dev, sun4i_gpadc_hwmon_maps);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register iio map array\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sun4i_gpadc_probe(struct platform_device *pdev)
+{
+ struct sun4i_gpadc_iio *info;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ platform_set_drvdata(pdev, indio_dev);
+
+ mutex_init(&info->mutex);
+ info->indio_dev = indio_dev;
+ init_completion(&info->completion);
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &sun4i_gpadc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (pdev->dev.of_node)
+ ret = sun4i_gpadc_probe_dt(pdev, indio_dev);
+ else
+ ret = sun4i_gpadc_probe_mfd(pdev, indio_dev);
+
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ SUN4I_GPADC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not register the device\n");
+ goto err_map;
+ }
+
+ return 0;
+
+err_map:
+ if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF))
+ iio_map_array_unregister(indio_dev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sun4i_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF))
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id sun4i_gpadc_id[] = {
+ { "sun4i-a10-gpadc-iio", (kernel_ulong_t)&sun4i_gpadc_data },
+ { "sun5i-a13-gpadc-iio", (kernel_ulong_t)&sun5i_gpadc_data },
+ { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
+ { /* sentinel */ },
+};
+
+static struct platform_driver sun4i_gpadc_driver = {
+ .driver = {
+ .name = "sun4i-gpadc-iio",
+ .of_match_table = sun4i_gpadc_of_id,
+ .pm = &sun4i_gpadc_pm_ops,
+ },
+ .id_table = sun4i_gpadc_id,
+ .probe = sun4i_gpadc_probe,
+ .remove = sun4i_gpadc_remove,
+};
+
+module_platform_driver(sun4i_gpadc_driver);
+
+MODULE_DESCRIPTION("ADC driver for sunxi platforms");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 422b314f5a3f..f76d979fb7e8 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -15,6 +15,7 @@
*/
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -55,7 +56,7 @@
#define ADS1015_DEFAULT_DATA_RATE 4
#define ADS1015_DEFAULT_CHAN 0
-enum {
+enum chip_ids {
ADS1015,
ADS1115,
};
@@ -578,6 +579,7 @@ static int ads1015_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct ads1015_data *data;
int ret;
+ enum chip_ids chip;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -593,7 +595,11 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- switch (id->driver_data) {
+ if (client->dev.of_node)
+ chip = (enum chip_ids)of_device_get_match_data(&client->dev);
+ else
+ chip = id->driver_data;
+ switch (chip) {
case ADS1015:
indio_dev->channels = ads1015_channels;
indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
@@ -698,9 +704,23 @@ static const struct i2c_device_id ads1015_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
+static const struct of_device_id ads1015_of_match[] = {
+ {
+ .compatible = "ti,ads1015",
+ .data = (void *)ADS1015
+ },
+ {
+ .compatible = "ti,ads1115",
+ .data = (void *)ADS1115
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ads1015_of_match);
+
static struct i2c_driver ads1015_driver = {
.driver = {
.name = ADS1015_DRV_NAME,
+ .of_match_table = ads1015_of_match,
.pm = &ads1015_pm_ops,
},
.probe = ads1015_probe,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 228a003adeed..01fc76f7d660 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -584,7 +584,7 @@ static int vf610_adc_read_data(struct vf610_adc *info)
static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
{
- struct iio_dev *indio_dev = (struct iio_dev *)dev_id;
+ struct iio_dev *indio_dev = dev_id;
struct vf610_adc *info = iio_priv(indio_dev);
int coco;
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index 41a8e6f2e31d..c948ad2ee9ad 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -163,7 +163,7 @@ static int ams_iaqcore_probe(struct i2c_client *client,
mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ams_iaqcore_info,
+ indio_dev->info = &ams_iaqcore_info;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 8e0e4415c161..f75eea6822f2 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -393,7 +393,7 @@ static int vz89x_probe(struct i2c_client *client,
mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &vz89x_info,
+ indio_dev->info = &vz89x_info;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index c17596f7ed2c..38e8783e4b05 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -267,31 +267,12 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
else
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ cros_ec_sensors_capture, NULL);
if (ret)
return ret;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_uninit_buffer;
-
- return 0;
-
-error_uninit_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
-}
-
-static int cros_ec_sensors_remove(struct platform_device *pdev)
-{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
- return 0;
+ return devm_iio_device_register(dev, indio_dev);
}
static const struct platform_device_id cros_ec_sensors_ids[] = {
@@ -313,7 +294,6 @@ static struct platform_driver cros_ec_sensors_platform_driver = {
.name = "cros-ec-sensors",
},
.probe = cros_ec_sensors_probe,
- .remove = cros_ec_sensors_remove,
.id_table = cros_ec_sensors_ids,
};
module_platform_driver(cros_ec_sensors_platform_driver);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index 01e02b9926d4..1c0874cdf665 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -38,6 +38,12 @@ static struct {
{HID_USAGE_SENSOR_ACCEL_3D,
HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
+ {HID_USAGE_SENSOR_GRAVITY_VECTOR, 0, 9, 806650000},
+ {HID_USAGE_SENSOR_GRAVITY_VECTOR,
+ HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD, 1, 0},
+ {HID_USAGE_SENSOR_GRAVITY_VECTOR,
+ HID_USAGE_SENSOR_UNITS_G, 9, 806650000},
+
{HID_USAGE_SENSOR_GYRO_3D, 0, 0, 17453293},
{HID_USAGE_SENSOR_GYRO_3D,
HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND, 1, 0},
@@ -62,6 +68,11 @@ static struct {
{HID_USAGE_SENSOR_TIME_TIMESTAMP, 0, 1000000000, 0},
{HID_USAGE_SENSOR_TIME_TIMESTAMP, HID_USAGE_SENSOR_UNITS_MILLISECOND,
1000000, 0},
+
+ {HID_USAGE_SENSOR_TEMPERATURE, 0, 1000, 0},
+ {HID_USAGE_SENSOR_TEMPERATURE, HID_USAGE_SENSOR_UNITS_DEGREES, 1000, 0},
+
+ {HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0},
};
static int pow_10(unsigned power)
@@ -221,7 +232,15 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
if (ret < 0 || value < 0)
ret = -EINVAL;
- return ret;
+ ret = sensor_hub_get_feature(st->hsdev,
+ st->poll.report_id,
+ st->poll.index, sizeof(value), &value);
+ if (ret < 0 || value < 0)
+ return -EINVAL;
+
+ st->poll_interval = value;
+
+ return 0;
}
EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
@@ -266,7 +285,16 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
if (ret < 0 || value < 0)
ret = -EINVAL;
- return ret;
+ ret = sensor_hub_get_feature(st->hsdev,
+ st->sensitivity.report_id,
+ st->sensitivity.index, sizeof(value),
+ &value);
+ if (ret < 0 || value < 0)
+ return -EINVAL;
+
+ st->raw_hystersis = value;
+
+ return 0;
}
EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
@@ -369,6 +397,9 @@ int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
/* Default unit of measure is milliseconds */
if (st->poll.units == 0)
st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
+
+ st->poll_interval = -1;
+
return 0;
}
@@ -399,6 +430,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
&st->sensitivity);
+ st->raw_hystersis = -1;
+
sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT, usage_id,
HID_USAGE_SENSOR_TIME_TIMESTAMP,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index ecf592d69043..0b5dea050239 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,6 +51,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
st->report_state.report_id,
st->report_state.index,
HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+
+ poll_value = hid_sensor_read_poll_value(st);
} else {
int val;
@@ -87,9 +89,7 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
- if (state)
- poll_value = hid_sensor_read_poll_value(st);
- if (poll_value > 0)
+ if (state && poll_value)
msleep_interruptible(poll_value * 2);
return 0;
@@ -127,6 +127,20 @@ static void hid_sensor_set_power_work(struct work_struct *work)
struct hid_sensor_common *attrb = container_of(work,
struct hid_sensor_common,
work);
+
+ if (attrb->poll_interval >= 0)
+ sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id,
+ attrb->poll.index,
+ sizeof(attrb->poll_interval),
+ &attrb->poll_interval);
+
+ if (attrb->raw_hystersis >= 0)
+ sensor_hub_set_feature(attrb->hsdev,
+ attrb->sensitivity.report_id,
+ attrb->sensitivity.index,
+ sizeof(attrb->raw_hystersis),
+ &attrb->raw_hystersis);
+
_hid_sensor_power_state(attrb, true);
}
@@ -138,6 +152,10 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
{
+ pm_runtime_disable(&attrb->pdev->dev);
+ pm_runtime_set_suspended(&attrb->pdev->dev);
+ pm_runtime_put_noidle(&attrb->pdev->dev);
+
cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index ecf7721ecaf4..125b5ff61d19 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(ms_sensors_reset);
int ms_sensors_read_prom_word(void *cli, int cmd, u16 *word)
{
int ret;
- struct i2c_client *client = (struct i2c_client *)cli;
+ struct i2c_client *client = cli;
ret = i2c_smbus_read_word_swapped(client, cmd);
if (ret < 0) {
@@ -107,7 +107,7 @@ int ms_sensors_convert_and_read(void *cli, u8 conv, u8 rd,
{
int ret;
__be32 buf = 0;
- struct i2c_client *client = (struct i2c_client *)cli;
+ struct i2c_client *client = cli;
/* Trigger conversion */
ret = i2c_smbus_write_byte(client, conv);
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index f9b8fc9ae13f..ba3d9030cd51 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -551,6 +551,7 @@ static int quad8_probe(struct device *dev, unsigned int id)
indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
indio_dev->channels = quad8_channels;
indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
priv = iio_priv(indio_dev);
priv->base = base[id];
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
index 44627f6e4861..b37e5fc03149 100644
--- a/drivers/iio/counter/Kconfig
+++ b/drivers/iio/counter/Kconfig
@@ -7,7 +7,7 @@ menu "Counters"
config 104_QUAD_8
tristate "ACCES 104-QUAD-8 driver"
- depends on X86 && ISA_BUS_API
+ depends on PC104 && X86 && ISA_BUS_API
help
Say yes here to build support for the ACCES 104-QUAD-8 quadrature
encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index d3084028905b..df5abc46cd3f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -118,6 +118,16 @@ config AD5624R_SPI
Say yes here to build support for Analog Devices AD5624R, AD5644R and
AD5664R converters (DAC). This driver uses the common SPI interface.
+config LTC2632
+ tristate "Linear Technology LTC2632-12/10/8 DAC spi driver"
+ depends on SPI
+ help
+ Say yes here to build support for Linear Technology
+ LTC2632-12, LTC2632-10, LTC2632-8 converters (DAC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ltc2632.
+
config AD5686
tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
depends on SPI
@@ -274,6 +284,21 @@ config MCP4922
To compile this driver as a module, choose M here: the module
will be called mcp4922.
+config STM32_DAC
+ tristate "STMicroelectronics STM32 DAC"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ depends on REGULATOR
+ select STM32_DAC_CORE
+ help
+ Say yes here to build support for STMicroelectronics STM32 Digital
+ to Analog Converter (DAC).
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dac.
+
+config STM32_DAC_CORE
+ tristate
+
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index f01bf4a99867..603587cc2f07 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -24,9 +24,12 @@ obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
+obj-$(CONFIG_LTC2632) += ltc2632.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
+obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
+obj-$(CONFIG_STM32_DAC) += stm32-dac.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 788b3d6fd1cc..712d86b4be09 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -212,7 +212,7 @@ static struct attribute *ad5504_ev_attributes[] = {
NULL,
};
-static struct attribute_group ad5504_ev_attribute_group = {
+static const struct attribute_group ad5504_ev_attribute_group = {
.attrs = ad5504_ev_attributes,
};
@@ -223,7 +223,7 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns((struct iio_dev *)private));
+ iio_get_time_ns(private));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e690dd11e99f..4b0f942b8914 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
.address = (chan), \
.scan_type = { \
.sign = 'u', \
- .realbits = '8', \
- .storagebits = '8', \
- .shift = '0', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ .shift = 0, \
}, \
.ext_info = ad7303_ext_info, \
}
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index 5a743e2a779d..a0464227a3a0 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -119,6 +119,7 @@ static int cio_dac_probe(struct device *dev, unsigned int id)
indio_dev->channels = cio_dac_channels;
indio_dev->num_channels = CIO_DAC_NUM_CHAN;
indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
priv = iio_priv(indio_dev);
priv->base = base[id];
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
new file mode 100644
index 000000000000..ac5e05f6eb8b
--- /dev/null
+++ b/drivers/iio/dac/ltc2632.c
@@ -0,0 +1,314 @@
+/*
+ * LTC2632 Digital to analog convertors spi driver
+ *
+ * Copyright 2017 Maxime Roussin-Bélanger
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+#define LTC2632_DAC_CHANNELS 2
+
+#define LTC2632_ADDR_DAC0 0x0
+#define LTC2632_ADDR_DAC1 0x1
+
+#define LTC2632_CMD_WRITE_INPUT_N 0x0
+#define LTC2632_CMD_UPDATE_DAC_N 0x1
+#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
+#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define LTC2632_CMD_POWERDOWN_DAC_N 0x4
+#define LTC2632_CMD_POWERDOWN_CHIP 0x5
+#define LTC2632_CMD_INTERNAL_REFER 0x6
+#define LTC2632_CMD_EXTERNAL_REFER 0x7
+
+/**
+ * struct ltc2632_chip_info - chip specific information
+ * @channels: channel spec for the DAC
+ * @vref_mv: reference voltage
+ */
+struct ltc2632_chip_info {
+ const struct iio_chan_spec *channels;
+ const int vref_mv;
+};
+
+/**
+ * struct ltc2632_state - driver instance specific data
+ * @spi_dev: pointer to the spi_device struct
+ * @powerdown_cache_mask used to show current channel powerdown state
+ */
+struct ltc2632_state {
+ struct spi_device *spi_dev;
+ unsigned int powerdown_cache_mask;
+};
+
+enum ltc2632_supported_device_ids {
+ ID_LTC2632L12,
+ ID_LTC2632L10,
+ ID_LTC2632L8,
+ ID_LTC2632H12,
+ ID_LTC2632H10,
+ ID_LTC2632H8,
+};
+
+static int ltc2632_spi_write(struct spi_device *spi,
+ u8 cmd, u8 addr, u16 val, u8 shift)
+{
+ u32 data;
+ u8 msg[3];
+
+ /*
+ * The input shift register is 24 bits wide.
+ * The next four are the command bits, C3 to C0,
+ * followed by the 4-bit DAC address, A3 to A0, and then the
+ * 12-, 10-, 8-bit data-word. The data-word comprises the 12-,
+ * 10-, 8-bit input code followed by 4, 6, or 8 don't care bits.
+ */
+ data = (cmd << 20) | (addr << 16) | (val << shift);
+ msg[0] = data >> 16;
+ msg[1] = data >> 8;
+ msg[2] = data;
+
+ return spi_write(spi, msg, sizeof(msg));
+}
+
+static int ltc2632_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ltc2632_chip_info *chip_info;
+
+ const struct ltc2632_state *st = iio_priv(indio_dev);
+ const struct spi_device_id *spi_dev_id = spi_get_device_id(st->spi_dev);
+
+ chip_info = (struct ltc2632_chip_info *)spi_dev_id->driver_data;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = chip_info->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ return -EINVAL;
+}
+
+static int ltc2632_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ltc2632_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ltc2632_spi_write(st->spi_dev,
+ LTC2632_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val,
+ chan->scan_type.shift);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t ltc2632_read_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltc2632_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ !!(st->powerdown_cache_mask & (1 << chan->channel)));
+}
+
+static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf,
+ size_t len)
+{
+ bool pwr_down;
+ int ret;
+ struct ltc2632_state *st = iio_priv(indio_dev);
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ if (pwr_down)
+ st->powerdown_cache_mask |= (1 << chan->channel);
+ else
+ st->powerdown_cache_mask &= ~(1 << chan->channel);
+
+ ret = ltc2632_spi_write(st->spi_dev,
+ LTC2632_CMD_POWERDOWN_DAC_N,
+ chan->channel, 0, 0);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_info ltc2632_info = {
+ .write_raw = ltc2632_write_raw,
+ .read_raw = ltc2632_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ltc2632_read_dac_powerdown,
+ .write = ltc2632_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ { },
+};
+
+#define LTC2632_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = (_chan), \
+ .scan_type = { \
+ .realbits = (_bits), \
+ .shift = 16 - (_bits), \
+ }, \
+ .ext_info = ltc2632_ext_info, \
+}
+
+#define DECLARE_LTC2632_CHANNELS(_name, _bits) \
+ const struct iio_chan_spec _name ## _channels[] = { \
+ LTC2632_CHANNEL(0, _bits), \
+ LTC2632_CHANNEL(1, _bits), \
+ }
+
+static DECLARE_LTC2632_CHANNELS(ltc2632l12, 12);
+static DECLARE_LTC2632_CHANNELS(ltc2632l10, 10);
+static DECLARE_LTC2632_CHANNELS(ltc2632l8, 8);
+
+static DECLARE_LTC2632_CHANNELS(ltc2632h12, 12);
+static DECLARE_LTC2632_CHANNELS(ltc2632h10, 10);
+static DECLARE_LTC2632_CHANNELS(ltc2632h8, 8);
+
+static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = {
+ [ID_LTC2632L12] = {
+ .channels = ltc2632l12_channels,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2632L10] = {
+ .channels = ltc2632l10_channels,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2632L8] = {
+ .channels = ltc2632l8_channels,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2632H12] = {
+ .channels = ltc2632h12_channels,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2632H10] = {
+ .channels = ltc2632h10_channels,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2632H8] = {
+ .channels = ltc2632h8_channels,
+ .vref_mv = 4096,
+ },
+};
+
+static int ltc2632_probe(struct spi_device *spi)
+{
+ struct ltc2632_state *st;
+ struct iio_dev *indio_dev;
+ struct ltc2632_chip_info *chip_info;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi_dev = spi;
+
+ chip_info = (struct ltc2632_chip_info *)
+ spi_get_device_id(spi)->driver_data;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = dev_of_node(&spi->dev) ? dev_of_node(&spi->dev)->name
+ : spi_get_device_id(spi)->name;
+ indio_dev->info = &ltc2632_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = LTC2632_DAC_CHANNELS;
+
+ ret = ltc2632_spi_write(spi, LTC2632_CMD_INTERNAL_REFER, 0, 0, 0);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Set internal reference command failed, %d\n", ret);
+ return ret;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ltc2632_id[] = {
+ { "ltc2632-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632L12] },
+ { "ltc2632-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632L10] },
+ { "ltc2632-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632L8] },
+ { "ltc2632-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H12] },
+ { "ltc2632-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H10] },
+ { "ltc2632-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H8] },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ltc2632_id);
+
+static struct spi_driver ltc2632_driver = {
+ .driver = {
+ .name = "ltc2632",
+ },
+ .probe = ltc2632_probe,
+ .id_table = ltc2632_id,
+};
+module_spi_driver(ltc2632_driver);
+
+static const struct of_device_id ltc2632_of_match[] = {
+ {
+ .compatible = "lltc,ltc2632-l12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632L12]
+ }, {
+ .compatible = "lltc,ltc2632-l10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632L10]
+ }, {
+ .compatible = "lltc,ltc2632-l8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632L8]
+ }, {
+ .compatible = "lltc,ltc2632-h12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632H12]
+ }, {
+ .compatible = "lltc,ltc2632-h10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632H10]
+ }, {
+ .compatible = "lltc,ltc2632-h8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2632H8]
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ltc2632_of_match);
+
+MODULE_AUTHOR("Maxime Roussin-Belanger <maxime.roussinbelanger@gmail.com>");
+MODULE_DESCRIPTION("LTC2632 DAC SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 86e9e112f554..193fac3059a3 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -392,6 +392,7 @@ MODULE_DEVICE_TABLE(of, max5821_of_match);
static struct i2c_driver max5821_driver = {
.driver = {
.name = "max5821",
+ .of_match_table = max5821_of_match,
.pm = MAX5821_PM_OPS,
},
.probe = max5821_probe,
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index db109f0cdd8c..6ab1f23e5a79 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
+#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/iio/iio.h>
@@ -199,7 +200,7 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
return len;
}
-enum {
+enum chip_id {
MCP4725,
MCP4726,
};
@@ -406,7 +407,10 @@ static int mcp4725_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->id = id->driver_data;
+ if (client->dev.of_node)
+ data->id = (enum chip_id)of_device_get_match_data(&client->dev);
+ else
+ data->id = id->driver_data;
pdata = dev_get_platdata(&client->dev);
if (!pdata) {
@@ -525,9 +529,25 @@ static const struct i2c_device_id mcp4725_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mcp4725_id);
+#ifdef CONFIG_OF
+static const struct of_device_id mcp4725_of_match[] = {
+ {
+ .compatible = "microchip,mcp4725",
+ .data = (void *)MCP4725
+ },
+ {
+ .compatible = "microchip,mcp4726",
+ .data = (void *)MCP4726
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp4725_of_match);
+#endif
+
static struct i2c_driver mcp4725_driver = {
.driver = {
.name = MCP4725_DRV_NAME,
+ .of_match_table = of_match_ptr(mcp4725_of_match),
.pm = MCP4725_PM_OPS,
},
.probe = mcp4725_probe,
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
new file mode 100644
index 000000000000..75e48788c7ea
--- /dev/null
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -0,0 +1,180 @@
+/*
+ * This file is part of STM32 DAC driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#include "stm32-dac-core.h"
+
+/**
+ * struct stm32_dac_priv - stm32 DAC core private data
+ * @pclk: peripheral clock common for all DACs
+ * @rst: peripheral reset control
+ * @vref: regulator reference
+ * @common: Common data for all DAC instances
+ */
+struct stm32_dac_priv {
+ struct clk *pclk;
+ struct reset_control *rst;
+ struct regulator *vref;
+ struct stm32_dac_common common;
+};
+
+static struct stm32_dac_priv *to_stm32_dac_priv(struct stm32_dac_common *com)
+{
+ return container_of(com, struct stm32_dac_priv, common);
+}
+
+static const struct regmap_config stm32_dac_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x3fc,
+};
+
+static int stm32_dac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dac_priv *priv;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *mmio;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ regmap = devm_regmap_init_mmio(dev, mmio, &stm32_dac_regmap_cfg);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ priv->common.regmap = regmap;
+
+ priv->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(priv->vref)) {
+ ret = PTR_ERR(priv->vref);
+ dev_err(dev, "vref get failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref enable failed\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref get voltage failed, %d\n", ret);
+ goto err_vref;
+ }
+ priv->common.vref_mv = ret / 1000;
+ dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ dev_err(dev, "pclk get failed\n");
+ goto err_vref;
+ }
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret < 0) {
+ dev_err(dev, "pclk enable failed\n");
+ goto err_vref;
+ }
+
+ priv->rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(priv->rst)) {
+ reset_control_assert(priv->rst);
+ udelay(2);
+ reset_control_deassert(priv->rst);
+ }
+
+ /* When clock speed is higher than 80MHz, set HFSEL */
+ priv->common.hfsel = (clk_get_rate(priv->pclk) > 80000000UL);
+ ret = regmap_update_bits(regmap, STM32_DAC_CR, STM32H7_DAC_CR_HFSEL,
+ priv->common.hfsel ? STM32H7_DAC_CR_HFSEL : 0);
+ if (ret)
+ goto err_pclk;
+
+ platform_set_drvdata(pdev, &priv->common);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to populate DT children\n");
+ goto err_pclk;
+ }
+
+ return 0;
+
+err_pclk:
+ clk_disable_unprepare(priv->pclk);
+err_vref:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static int stm32_dac_remove(struct platform_device *pdev)
+{
+ struct stm32_dac_common *common = platform_get_drvdata(pdev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+
+ of_platform_depopulate(&pdev->dev);
+ clk_disable_unprepare(priv->pclk);
+ regulator_disable(priv->vref);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dac_of_match[] = {
+ { .compatible = "st,stm32h7-dac-core", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
+
+static struct platform_driver stm32_dac_driver = {
+ .probe = stm32_dac_probe,
+ .remove = stm32_dac_remove,
+ .driver = {
+ .name = "stm32-dac-core",
+ .of_match_table = stm32_dac_of_match,
+ },
+};
+module_platform_driver(stm32_dac_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 DAC core driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-dac-core");
diff --git a/drivers/iio/dac/stm32-dac-core.h b/drivers/iio/dac/stm32-dac-core.h
new file mode 100644
index 000000000000..daf09931857c
--- /dev/null
+++ b/drivers/iio/dac/stm32-dac-core.h
@@ -0,0 +1,51 @@
+/*
+ * This file is part of STM32 DAC driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __STM32_DAC_CORE_H
+#define __STM32_DAC_CORE_H
+
+#include <linux/regmap.h>
+
+/* STM32 DAC registers */
+#define STM32_DAC_CR 0x00
+#define STM32_DAC_DHR12R1 0x08
+#define STM32_DAC_DHR12R2 0x14
+#define STM32_DAC_DOR1 0x2C
+#define STM32_DAC_DOR2 0x30
+
+/* STM32_DAC_CR bit fields */
+#define STM32_DAC_CR_EN1 BIT(0)
+#define STM32H7_DAC_CR_HFSEL BIT(15)
+#define STM32_DAC_CR_EN2 BIT(16)
+
+/**
+ * struct stm32_dac_common - stm32 DAC driver common data (for all instances)
+ * @regmap: DAC registers shared via regmap
+ * @vref_mv: reference voltage (mv)
+ * @hfsel: high speed bus clock selected
+ */
+struct stm32_dac_common {
+ struct regmap *regmap;
+ int vref_mv;
+ bool hfsel;
+};
+
+#endif
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
new file mode 100644
index 000000000000..50f8ec091058
--- /dev/null
+++ b/drivers/iio/dac/stm32-dac.c
@@ -0,0 +1,334 @@
+/*
+ * This file is part of STM32 DAC driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Authors: Amelie Delaunay <amelie.delaunay@st.com>
+ * Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "stm32-dac-core.h"
+
+#define STM32_DAC_CHANNEL_1 1
+#define STM32_DAC_CHANNEL_2 2
+#define STM32_DAC_IS_CHAN_1(ch) ((ch) & STM32_DAC_CHANNEL_1)
+
+/**
+ * struct stm32_dac - private data of DAC driver
+ * @common: reference to DAC common data
+ */
+struct stm32_dac {
+ struct stm32_dac_common *common;
+};
+
+static int stm32_dac_is_enabled(struct iio_dev *indio_dev, int channel)
+{
+ struct stm32_dac *dac = iio_priv(indio_dev);
+ u32 en, val;
+ int ret;
+
+ ret = regmap_read(dac->common->regmap, STM32_DAC_CR, &val);
+ if (ret < 0)
+ return ret;
+ if (STM32_DAC_IS_CHAN_1(channel))
+ en = FIELD_GET(STM32_DAC_CR_EN1, val);
+ else
+ en = FIELD_GET(STM32_DAC_CR_EN2, val);
+
+ return !!en;
+}
+
+static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
+ bool enable)
+{
+ struct stm32_dac *dac = iio_priv(indio_dev);
+ u32 msk = STM32_DAC_IS_CHAN_1(ch) ? STM32_DAC_CR_EN1 : STM32_DAC_CR_EN2;
+ u32 en = enable ? msk : 0;
+ int ret;
+
+ ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "%s failed\n", en ?
+ "Enable" : "Disable");
+ return ret;
+ }
+
+ /*
+ * When HFSEL is set, it is not allowed to write the DHRx register
+ * during 8 clock cycles after the ENx bit is set. It is not allowed
+ * to make software/hardware trigger during this period either.
+ */
+ if (en && dac->common->hfsel)
+ udelay(1);
+
+ return 0;
+}
+
+static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
+{
+ int ret;
+
+ if (STM32_DAC_IS_CHAN_1(channel))
+ ret = regmap_read(dac->common->regmap, STM32_DAC_DOR1, val);
+ else
+ ret = regmap_read(dac->common->regmap, STM32_DAC_DOR2, val);
+
+ return ret ? ret : IIO_VAL_INT;
+}
+
+static int stm32_dac_set_value(struct stm32_dac *dac, int channel, int val)
+{
+ int ret;
+
+ if (STM32_DAC_IS_CHAN_1(channel))
+ ret = regmap_write(dac->common->regmap, STM32_DAC_DHR12R1, val);
+ else
+ ret = regmap_write(dac->common->regmap, STM32_DAC_DHR12R2, val);
+
+ return ret;
+}
+
+static int stm32_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return stm32_dac_get_value(dac, chan->channel, val);
+ case IIO_CHAN_INFO_SCALE:
+ *val = dac->common->vref_mv;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dac *dac = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return stm32_dac_set_value(dac, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_dac_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval)
+{
+ struct stm32_dac *dac = iio_priv(indio_dev);
+
+ if (!readval)
+ return regmap_write(dac->common->regmap, reg, writeval);
+ else
+ return regmap_read(dac->common->regmap, reg, readval);
+}
+
+static const struct iio_info stm32_dac_iio_info = {
+ .read_raw = stm32_dac_read_raw,
+ .write_raw = stm32_dac_write_raw,
+ .debugfs_reg_access = stm32_dac_debugfs_reg_access,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const stm32_dac_powerdown_modes[] = {
+ "three_state",
+};
+
+static int stm32_dac_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ return 0;
+}
+
+static int stm32_dac_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ return 0;
+}
+
+static ssize_t stm32_dac_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ int ret = stm32_dac_is_enabled(indio_dev, chan->channel);
+
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret ? 0 : 1);
+}
+
+static ssize_t stm32_dac_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ ret = stm32_dac_set_enable_state(indio_dev, chan->channel, !powerdown);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static const struct iio_enum stm32_dac_powerdown_mode_en = {
+ .items = stm32_dac_powerdown_modes,
+ .num_items = ARRAY_SIZE(stm32_dac_powerdown_modes),
+ .get = stm32_dac_get_powerdown_mode,
+ .set = stm32_dac_set_powerdown_mode,
+};
+
+static const struct iio_chan_spec_ext_info stm32_dac_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = stm32_dac_read_powerdown,
+ .write = stm32_dac_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &stm32_dac_powerdown_mode_en),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &stm32_dac_powerdown_mode_en),
+ {},
+};
+
+#define STM32_DAC_CHANNEL(chan, name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* scan_index is always 0 as num_channels is 1 */ \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ }, \
+ .datasheet_name = name, \
+ .ext_info = stm32_dac_ext_info \
+}
+
+static const struct iio_chan_spec stm32_dac_channels[] = {
+ STM32_DAC_CHANNEL(STM32_DAC_CHANNEL_1, "out1"),
+ STM32_DAC_CHANNEL(STM32_DAC_CHANNEL_2, "out2"),
+};
+
+static int stm32_dac_chan_of_init(struct iio_dev *indio_dev)
+{
+ struct device_node *np = indio_dev->dev.of_node;
+ unsigned int i;
+ u32 channel;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &channel);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Failed to read reg property\n");
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stm32_dac_channels); i++) {
+ if (stm32_dac_channels[i].channel == channel)
+ break;
+ }
+ if (i >= ARRAY_SIZE(stm32_dac_channels)) {
+ dev_err(&indio_dev->dev, "Invalid st,dac-channel\n");
+ return -EINVAL;
+ }
+
+ indio_dev->channels = &stm32_dac_channels[i];
+ /*
+ * Expose only one channel here, as they can be used independently,
+ * with separate trigger. Then separate IIO devices are instantiated
+ * to manage this.
+ */
+ indio_dev->num_channels = 1;
+
+ return 0;
+};
+
+static int stm32_dac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_dev *indio_dev;
+ struct stm32_dac *dac;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
+ dac = iio_priv(indio_dev);
+ dac->common = dev_get_drvdata(pdev->dev.parent);
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_dac_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = stm32_dac_chan_of_init(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static const struct of_device_id stm32_dac_of_match[] = {
+ { .compatible = "st,stm32-dac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
+
+static struct platform_driver stm32_dac_driver = {
+ .probe = stm32_dac_probe,
+ .driver = {
+ .name = "stm32-dac",
+ .of_match_table = stm32_dac_of_match,
+ },
+};
+module_platform_driver(stm32_dac_driver);
+
+MODULE_ALIAS("platform:stm32-dac");
+MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index c102a6325bb0..cfa2db04a8ab 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -377,9 +377,16 @@ static const struct i2c_device_id itg3200_id[] = {
};
MODULE_DEVICE_TABLE(i2c, itg3200_id);
+static const struct of_device_id itg3200_of_match[] = {
+ { .compatible = "invensense,itg3200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, itg3200_of_match);
+
static struct i2c_driver itg3200_driver = {
.driver = {
.name = "itg3200",
+ .of_match_table = itg3200_of_match,
.pm = &itg3200_pm_ops,
},
.id_table = itg3200_id,
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 06007200bf49..93f08b304a63 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -70,9 +70,8 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
dev_err(&client->dev, "failed to allocate I2C mux\n");
else {
mpu3050->i2cmux->priv = mpu3050;
- ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
- if (ret)
- dev_err(&client->dev, "failed to add I2C mux\n");
+ /* Ignore failure, not critical */
+ i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
}
return 0;
diff --git a/drivers/iio/health/Kconfig b/drivers/iio/health/Kconfig
index c5f004a8e447..a2ecb4c94c2a 100644
--- a/drivers/iio/health/Kconfig
+++ b/drivers/iio/health/Kconfig
@@ -46,6 +46,19 @@ config MAX30100
To compile this driver as a module, choose M here: the
module will be called max30100.
+config MAX30102
+ tristate "MAX30102 heart rate and pulse oximeter sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say Y here to build I2C interface support for the Maxim
+ MAX30102 heart rate, and pulse oximeter sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max30102.
+
endmenu
endmenu
diff --git a/drivers/iio/health/Makefile b/drivers/iio/health/Makefile
index 9955a2ae8df1..3558f9d64954 100644
--- a/drivers/iio/health/Makefile
+++ b/drivers/iio/health/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_AFE4403) += afe4403.o
obj-$(CONFIG_AFE4404) += afe4404.o
obj-$(CONFIG_MAX30100) += max30100.o
+obj-$(CONFIG_MAX30102) += max30102.o
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index f6e283c4d686..849d71747f9f 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -449,6 +449,7 @@ static int max30100_probe(struct i2c_client *client,
indio_dev->available_scan_masks = max30100_scan_masks;
indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
indio_dev->setup_ops = &max30100_buffer_setup_ops;
+ indio_dev->dev.parent = &client->dev;
data = iio_priv(indio_dev);
data->indio_dev = indio_dev;
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
new file mode 100644
index 000000000000..839b875c29b9
--- /dev/null
+++ b/drivers/iio/health/max30102.c
@@ -0,0 +1,486 @@
+/*
+ * max30102.c - Support for MAX30102 heart rate and pulse oximeter sensor
+ *
+ * Copyright (C) 2017 Matt Ranostay <matt@ranostay.consulting>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: proximity power saving feature
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define MAX30102_REGMAP_NAME "max30102_regmap"
+#define MAX30102_DRV_NAME "max30102"
+
+#define MAX30102_REG_INT_STATUS 0x00
+#define MAX30102_REG_INT_STATUS_PWR_RDY BIT(0)
+#define MAX30102_REG_INT_STATUS_PROX_INT BIT(4)
+#define MAX30102_REG_INT_STATUS_ALC_OVF BIT(5)
+#define MAX30102_REG_INT_STATUS_PPG_RDY BIT(6)
+#define MAX30102_REG_INT_STATUS_FIFO_RDY BIT(7)
+
+#define MAX30102_REG_INT_ENABLE 0x02
+#define MAX30102_REG_INT_ENABLE_PROX_INT_EN BIT(4)
+#define MAX30102_REG_INT_ENABLE_ALC_OVF_EN BIT(5)
+#define MAX30102_REG_INT_ENABLE_PPG_EN BIT(6)
+#define MAX30102_REG_INT_ENABLE_FIFO_EN BIT(7)
+#define MAX30102_REG_INT_ENABLE_MASK 0xf0
+#define MAX30102_REG_INT_ENABLE_MASK_SHIFT 4
+
+#define MAX30102_REG_FIFO_WR_PTR 0x04
+#define MAX30102_REG_FIFO_OVR_CTR 0x05
+#define MAX30102_REG_FIFO_RD_PTR 0x06
+#define MAX30102_REG_FIFO_DATA 0x07
+#define MAX30102_REG_FIFO_DATA_ENTRY_LEN 6
+
+#define MAX30102_REG_FIFO_CONFIG 0x08
+#define MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES BIT(1)
+#define MAX30102_REG_FIFO_CONFIG_AVG_SHIFT 5
+#define MAX30102_REG_FIFO_CONFIG_AFULL BIT(0)
+
+#define MAX30102_REG_MODE_CONFIG 0x09
+#define MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN BIT(0)
+#define MAX30102_REG_MODE_CONFIG_MODE_HR_EN BIT(1)
+#define MAX30102_REG_MODE_CONFIG_MODE_MASK 0x03
+#define MAX30102_REG_MODE_CONFIG_PWR BIT(7)
+
+#define MAX30102_REG_SPO2_CONFIG 0x0a
+#define MAX30102_REG_SPO2_CONFIG_PULSE_411_US 0x03
+#define MAX30102_REG_SPO2_CONFIG_SR_400HZ 0x03
+#define MAX30102_REG_SPO2_CONFIG_SR_MASK 0x07
+#define MAX30102_REG_SPO2_CONFIG_SR_MASK_SHIFT 2
+#define MAX30102_REG_SPO2_CONFIG_ADC_4096_STEPS BIT(0)
+#define MAX30102_REG_SPO2_CONFIG_ADC_MASK_SHIFT 5
+
+#define MAX30102_REG_RED_LED_CONFIG 0x0c
+#define MAX30102_REG_IR_LED_CONFIG 0x0d
+
+#define MAX30102_REG_TEMP_CONFIG 0x21
+#define MAX30102_REG_TEMP_CONFIG_TEMP_EN BIT(0)
+
+#define MAX30102_REG_TEMP_INTEGER 0x1f
+#define MAX30102_REG_TEMP_FRACTION 0x20
+
+struct max30102_data {
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct mutex lock;
+ struct regmap *regmap;
+
+ u8 buffer[8];
+ __be32 processed_buffer[2]; /* 2 x 18-bit (padded to 32-bits) */
+};
+
+static const struct regmap_config max30102_regmap_config = {
+ .name = MAX30102_REGMAP_NAME,
+
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const unsigned long max30102_scan_masks[] = {0x3, 0};
+
+static const struct iio_chan_spec max30102_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_RED,
+ .modified = 1,
+
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .shift = 8,
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .modified = 1,
+
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .shift = 8,
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+};
+
+static int max30102_set_powermode(struct max30102_data *data, bool state)
+{
+ return regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
+ MAX30102_REG_MODE_CONFIG_PWR,
+ state ? 0 : MAX30102_REG_MODE_CONFIG_PWR);
+}
+
+static int max30102_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct max30102_data *data = iio_priv(indio_dev);
+
+ return max30102_set_powermode(data, true);
+}
+
+static int max30102_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct max30102_data *data = iio_priv(indio_dev);
+
+ return max30102_set_powermode(data, false);
+}
+
+static const struct iio_buffer_setup_ops max30102_buffer_setup_ops = {
+ .postenable = max30102_buffer_postenable,
+ .predisable = max30102_buffer_predisable,
+};
+
+static inline int max30102_fifo_count(struct max30102_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX30102_REG_INT_STATUS, &val);
+ if (ret)
+ return ret;
+
+ /* FIFO has one sample slot left */
+ if (val & MAX30102_REG_INT_STATUS_FIFO_RDY)
+ return 1;
+
+ return 0;
+}
+
+static int max30102_read_measurement(struct max30102_data *data)
+{
+ int ret;
+ u8 *buffer = (u8 *) &data->buffer;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MAX30102_REG_FIFO_DATA,
+ MAX30102_REG_FIFO_DATA_ENTRY_LEN,
+ buffer);
+
+ memcpy(&data->processed_buffer[0], &buffer[0], 3);
+ memcpy(&data->processed_buffer[1], &buffer[3], 3);
+
+ return (ret == MAX30102_REG_FIFO_DATA_ENTRY_LEN) ? 0 : -EINVAL;
+}
+
+static irqreturn_t max30102_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct max30102_data *data = iio_priv(indio_dev);
+ int ret, cnt = 0;
+
+ mutex_lock(&data->lock);
+
+ while (cnt || (cnt = max30102_fifo_count(data)) > 0) {
+ ret = max30102_read_measurement(data);
+ if (ret)
+ break;
+
+ iio_push_to_buffers(data->indio_dev, data->processed_buffer);
+ cnt--;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int max30102_get_current_idx(unsigned int val, int *reg)
+{
+ /* each step is 0.200 mA */
+ *reg = val / 200;
+
+ return *reg > 0xff ? -EINVAL : 0;
+}
+
+static int max30102_led_init(struct max30102_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int val;
+ int reg, ret;
+
+ ret = of_property_read_u32(np, "maxim,red-led-current-microamp", &val);
+ if (ret) {
+ dev_info(dev, "no red-led-current-microamp set\n");
+
+ /* Default to 7 mA RED LED */
+ val = 7000;
+ }
+
+ ret = max30102_get_current_idx(val, &reg);
+ if (ret) {
+ dev_err(dev, "invalid RED LED current setting %d\n", val);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, MAX30102_REG_RED_LED_CONFIG, reg);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "maxim,ir-led-current-microamp", &val);
+ if (ret) {
+ dev_info(dev, "no ir-led-current-microamp set\n");
+
+ /* Default to 7 mA IR LED */
+ val = 7000;
+ }
+
+ ret = max30102_get_current_idx(val, &reg);
+ if (ret) {
+ dev_err(dev, "invalid IR LED current setting %d", val);
+ return ret;
+ }
+
+ return regmap_write(data->regmap, MAX30102_REG_IR_LED_CONFIG, reg);
+}
+
+static int max30102_chip_init(struct max30102_data *data)
+{
+ int ret;
+
+ /* setup LED current settings */
+ ret = max30102_led_init(data);
+ if (ret)
+ return ret;
+
+ /* enable 18-bit HR + SPO2 readings at 400Hz */
+ ret = regmap_write(data->regmap, MAX30102_REG_SPO2_CONFIG,
+ (MAX30102_REG_SPO2_CONFIG_ADC_4096_STEPS
+ << MAX30102_REG_SPO2_CONFIG_ADC_MASK_SHIFT) |
+ (MAX30102_REG_SPO2_CONFIG_SR_400HZ
+ << MAX30102_REG_SPO2_CONFIG_SR_MASK_SHIFT) |
+ MAX30102_REG_SPO2_CONFIG_PULSE_411_US);
+ if (ret)
+ return ret;
+
+ /* enable SPO2 mode */
+ ret = regmap_update_bits(data->regmap, MAX30102_REG_MODE_CONFIG,
+ MAX30102_REG_MODE_CONFIG_MODE_MASK,
+ MAX30102_REG_MODE_CONFIG_MODE_HR_EN |
+ MAX30102_REG_MODE_CONFIG_MODE_SPO2_EN);
+ if (ret)
+ return ret;
+
+ /* average 4 samples + generate FIFO interrupt */
+ ret = regmap_write(data->regmap, MAX30102_REG_FIFO_CONFIG,
+ (MAX30102_REG_FIFO_CONFIG_AVG_4SAMPLES
+ << MAX30102_REG_FIFO_CONFIG_AVG_SHIFT) |
+ MAX30102_REG_FIFO_CONFIG_AFULL);
+ if (ret)
+ return ret;
+
+ /* enable FIFO interrupt */
+ return regmap_update_bits(data->regmap, MAX30102_REG_INT_ENABLE,
+ MAX30102_REG_INT_ENABLE_MASK,
+ MAX30102_REG_INT_ENABLE_FIFO_EN);
+}
+
+static int max30102_read_temp(struct max30102_data *data, int *val)
+{
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(data->regmap, MAX30102_REG_TEMP_INTEGER, &reg);
+ if (ret < 0)
+ return ret;
+ *val = reg << 4;
+
+ ret = regmap_read(data->regmap, MAX30102_REG_TEMP_FRACTION, &reg);
+ if (ret < 0)
+ return ret;
+
+ *val |= reg & 0xf;
+ *val = sign_extend32(*val, 11);
+
+ return 0;
+}
+
+static int max30102_get_temp(struct max30102_data *data, int *val)
+{
+ int ret;
+
+ /* start acquisition */
+ ret = regmap_update_bits(data->regmap, MAX30102_REG_TEMP_CONFIG,
+ MAX30102_REG_TEMP_CONFIG_TEMP_EN,
+ MAX30102_REG_TEMP_CONFIG_TEMP_EN);
+ if (ret)
+ return ret;
+
+ msleep(35);
+
+ return max30102_read_temp(data, val);
+}
+
+static int max30102_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max30102_data *data = iio_priv(indio_dev);
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ /*
+ * Temperature reading can only be acquired while engine
+ * is running
+ */
+ mutex_lock(&indio_dev->mlock);
+
+ if (!iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else {
+ ret = max30102_get_temp(data, val);
+ if (!ret)
+ ret = IIO_VAL_INT;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1; /* 0.0625 */
+ *val2 = 16;
+ ret = IIO_VAL_FRACTIONAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info max30102_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = max30102_read_raw,
+};
+
+static int max30102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max30102_data *data;
+ struct iio_buffer *buffer;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ buffer = devm_iio_kfifo_allocate(&client->dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ indio_dev->name = MAX30102_DRV_NAME;
+ indio_dev->channels = max30102_channels;
+ indio_dev->info = &max30102_info;
+ indio_dev->num_channels = ARRAY_SIZE(max30102_channels);
+ indio_dev->available_scan_masks = max30102_scan_masks;
+ indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
+ indio_dev->setup_ops = &max30102_buffer_setup_ops;
+ indio_dev->dev.parent = &client->dev;
+
+ data = iio_priv(indio_dev);
+ data->indio_dev = indio_dev;
+ data->client = client;
+
+ mutex_init(&data->lock);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &max30102_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+ max30102_set_powermode(data, false);
+
+ ret = max30102_chip_init(data);
+ if (ret)
+ return ret;
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "no valid irq defined\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, max30102_interrupt_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max30102_irq", indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
+ return ret;
+ }
+
+ return iio_device_register(indio_dev);
+}
+
+static int max30102_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct max30102_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ max30102_set_powermode(data, false);
+
+ return 0;
+}
+
+static const struct i2c_device_id max30102_id[] = {
+ { "max30102", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max30102_id);
+
+static const struct of_device_id max30102_dt_ids[] = {
+ { .compatible = "maxim,max30102" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max30102_dt_ids);
+
+static struct i2c_driver max30102_driver = {
+ .driver = {
+ .name = MAX30102_DRV_NAME,
+ .of_match_table = of_match_ptr(max30102_dt_ids),
+ },
+ .probe = max30102_probe,
+ .remove = max30102_remove,
+ .id_table = max30102_id,
+};
+module_i2c_driver(max30102_driver);
+
+MODULE_AUTHOR("Matt Ranostay <matt@ranostay.consulting>");
+MODULE_DESCRIPTION("MAX30102 heart rate and pulse oximeter sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 912477d54be2..14b9ce453d9d 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -36,6 +36,20 @@ config HDC100X
To compile this driver as a module, choose M here: the module
will be called hdc100x.
+config HID_SENSOR_HUMIDITY
+ tristate "HID Environmental humidity sensor"
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ help
+ Say yes here to build support for the HID SENSOR
+ humidity driver
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-humidity.
+
config HTS221
tristate "STMicroelectronics HTS221 sensor Driver"
depends on (I2C || SPI)
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index a6850e47c100..be0dedeb8f3c 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
+obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
hts221_buffer.o
@@ -15,3 +16,5 @@ obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
obj-$(CONFIG_HTU21) += htu21.o
obj-$(CONFIG_SI7005) += si7005.o
obj-$(CONFIG_SI7020) += si7020.o
+
+ccflags-y += -I$(srctree)/drivers/iio/common/hid-sensors
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 265c34da52d1..aa17115f54c9 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -79,7 +79,7 @@ static struct attribute *hdc100x_attributes[] = {
NULL
};
-static struct attribute_group hdc100x_attribute_group = {
+static const struct attribute_group hdc100x_attribute_group = {
.attrs = hdc100x_attributes,
};
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
new file mode 100644
index 000000000000..6e09c1acfe51
--- /dev/null
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -0,0 +1,315 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ */
+#include <linux/device.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hid-sensor-trigger.h"
+
+struct hid_humidity_state {
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info humidity_attr;
+ s32 humidity_data;
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec humidity_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1)
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void humidity_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is s32 */
+ channels[channel].scan_type.storagebits = sizeof(s32) * 8;
+}
+
+static int humidity_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_HUMIDITYRELATIVE)
+ return -EINVAL;
+ hid_sensor_power_state(&humid_st->common_attributes, true);
+ *val = sensor_hub_input_attr_get_raw_value(
+ humid_st->common_attributes.hsdev,
+ HID_USAGE_SENSOR_HUMIDITY,
+ HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
+ humid_st->humidity_attr.report_id,
+ SENSOR_HUB_SYNC);
+ hid_sensor_power_state(&humid_st->common_attributes, false);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = humid_st->scale_pre_decml;
+ *val2 = humid_st->scale_post_decml;
+
+ return humid_st->scale_precision;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = humid_st->value_offset;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_read_samp_freq_value(
+ &humid_st->common_attributes, val, val2);
+
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_read_raw_hyst_value(
+ &humid_st->common_attributes, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int humidity_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_write_samp_freq_value(
+ &humid_st->common_attributes, val, val2);
+
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_write_raw_hyst_value(
+ &humid_st->common_attributes, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info humidity_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &humidity_read_raw,
+ .write_raw = &humidity_write_raw,
+};
+
+/* Callback handler to send event after all samples are received and captured */
+static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, void *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+
+ if (atomic_read(&humid_st->common_attributes.data_ready))
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &humid_st->humidity_data,
+ iio_get_time_ns(indio_dev));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, size_t raw_len,
+ char *raw_data, void *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
+ humid_st->humidity_data = *(s32 *)raw_data;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Parse report which is specific to an usage id */
+static int humidity_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned int usage_id,
+ struct hid_humidity_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
+ &st->humidity_attr);
+ if (ret < 0)
+ return ret;
+
+ humidity_adjust_channel_bit_mask(channels, 0, st->humidity_attr.size);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_HUMIDITY,
+ &st->humidity_attr,
+ &st->scale_pre_decml,
+ &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0)
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
+ &st->common_attributes.sensitivity);
+
+ return ret;
+}
+
+static struct hid_sensor_hub_callbacks humidity_callbacks = {
+ .send_event = &humidity_proc_event,
+ .capture_sample = &humidity_capture_sample,
+};
+
+/* Function to initialize the processing for usage id */
+static int hid_humidity_probe(struct platform_device *pdev)
+{
+ static const char *name = "humidity";
+ struct iio_dev *indio_dev;
+ struct hid_humidity_state *humid_st;
+ struct iio_chan_spec *humid_chans;
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*humid_st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ humid_st = iio_priv(indio_dev);
+ humid_st->common_attributes.hsdev = hsdev;
+ humid_st->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_HUMIDITY,
+ &humid_st->common_attributes);
+ if (ret)
+ return ret;
+
+ humid_chans = devm_kmemdup(&indio_dev->dev, humidity_channels,
+ sizeof(humidity_channels), GFP_KERNEL);
+ if (!humid_chans)
+ return -ENOMEM;
+
+ ret = humidity_parse_report(pdev, hsdev, humid_chans,
+ HID_USAGE_SENSOR_HUMIDITY, humid_st);
+ if (ret)
+ return ret;
+
+ indio_dev->channels = humid_chans;
+ indio_dev->num_channels = ARRAY_SIZE(humidity_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &humidity_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
+ &iio_pollfunc_store_time, NULL, NULL);
+ if (ret)
+ return ret;
+
+ atomic_set(&humid_st->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &humid_st->common_attributes);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ humidity_callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY,
+ &humidity_callbacks);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_remove_callback;
+
+ return ret;
+
+error_remove_callback:
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&humid_st->common_attributes);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_humidity_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct hid_humidity_state *humid_st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
+ hid_sensor_remove_trigger(&humid_st->common_attributes);
+
+ return 0;
+}
+
+static const struct platform_device_id hid_humidity_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200032",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_humidity_ids);
+
+static struct platform_driver hid_humidity_platform_driver = {
+ .id_table = hid_humidity_ids,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_humidity_probe,
+ .remove = hid_humidity_remove,
+};
+module_platform_driver(hid_humidity_platform_driver);
+
+MODULE_DESCRIPTION("HID Environmental humidity sensor");
+MODULE_AUTHOR("Song Hongyan <hongyan.song@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 72ddcdac21a2..7d19a3da7ab7 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -41,7 +41,7 @@ static const struct iio_trigger_ops hts221_trigger_ops = {
static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
{
- struct hts221_hw *hw = (struct hts221_hw *)private;
+ struct hts221_hw *hw = private;
u8 status;
int err;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b9fcbf18aa99..96dabbd2f004 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -114,6 +114,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.config = &chip_config_6050,
},
{
+ .whoami = INV_MPU9250_WHOAMI_VALUE,
+ .name = "MPU9250",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
+ {
.whoami = INV_ICM20608_WHOAMI_VALUE,
.name = "ICM20608",
.reg = &reg_set_6500,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 2c3f8964a3ea..64b5f5b92200 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include "inv_mpu_iio.h"
static const struct regmap_config inv_mpu_regmap_config = {
@@ -69,7 +70,8 @@ static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
return 0;
}
-static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+static const char *inv_mpu_match_acpi_device(struct device *dev,
+ enum inv_devices *chip_id)
{
const struct acpi_device_id *id;
@@ -93,7 +95,8 @@ static int inv_mpu_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct inv_mpu6050_state *st;
- int result, chip_type;
+ int result;
+ enum inv_devices chip_type;
struct regmap *regmap;
const char *name;
@@ -101,8 +104,13 @@ static int inv_mpu_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_I2C_BLOCK))
return -EOPNOTSUPP;
- if (id) {
- chip_type = (int)id->driver_data;
+ if (client->dev.of_node) {
+ chip_type = (enum inv_devices)
+ of_device_get_match_data(&client->dev);
+ name = client->name;
+ } else if (id) {
+ chip_type = (enum inv_devices)
+ id->driver_data;
name = id->name;
} else if (ACPI_HANDLE(&client->dev)) {
name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
@@ -170,12 +178,38 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"mpu9250", INV_MPU9250},
{"icm20608", INV_ICM20608},
{}
};
MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+static const struct of_device_id inv_of_match[] = {
+ {
+ .compatible = "invensense,mpu6050",
+ .data = (void *)INV_MPU6050
+ },
+ {
+ .compatible = "invensense,mpu6500",
+ .data = (void *)INV_MPU6500
+ },
+ {
+ .compatible = "invensense,mpu9150",
+ .data = (void *)INV_MPU9150
+ },
+ {
+ .compatible = "invensense,mpu9250",
+ .data = (void *)INV_MPU9250
+ },
+ {
+ .compatible = "invensense,icm20608",
+ .data = (void *)INV_ICM20608
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, inv_of_match);
+
static const struct acpi_device_id inv_acpi_match[] = {
{"INVN6500", INV_MPU6500},
{ },
@@ -188,6 +222,7 @@ static struct i2c_driver inv_mpu_driver = {
.remove = inv_mpu_remove,
.id_table = inv_mpu_id,
.driver = {
+ .of_match_table = inv_of_match,
.acpi_match_table = ACPI_PTR(inv_acpi_match),
.name = "inv-mpu6050-i2c",
.pm = &inv_mpu_pmops,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index f0e8c5dd9fae..ef13de7a2c20 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -70,6 +70,7 @@ enum inv_devices {
INV_MPU6500,
INV_MPU6000,
INV_MPU9150,
+ INV_MPU9250,
INV_ICM20608,
INV_NUM_PARTS
};
@@ -226,6 +227,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_WHOAMI_VALUE 0x68
#define INV_MPU6500_WHOAMI_VALUE 0x70
#define INV_MPU9150_WHOAMI_VALUE 0x68
+#define INV_MPU9250_WHOAMI_VALUE 0x71
#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 6e6476dfa188..74506e5ac0db 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -82,6 +82,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"mpu9250", INV_MPU9250},
{"icm20608", INV_ICM20608},
{}
};
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 935d4cd071a3..e57337159b57 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -8,7 +8,7 @@ config IIO_ST_LSM6DSX
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
- sensor. Supported devices: lsm6ds3, lsm6dsm
+ sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 69deafe1c10d..4839db7b9690 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -15,11 +15,16 @@
#include <linux/device.h>
#define ST_LSM6DS3_DEV_NAME "lsm6ds3"
+#define ST_LSM6DS3H_DEV_NAME "lsm6ds3h"
+#define ST_LSM6DSL_DEV_NAME "lsm6dsl"
#define ST_LSM6DSM_DEV_NAME "lsm6dsm"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
+ ST_LSM6DS3H_ID,
+ ST_LSM6DSL_ID,
ST_LSM6DSM_ID,
+ ST_LSM6DSX_MAX_ID,
};
#define ST_LSM6DSX_CHAN_SIZE 2
@@ -50,7 +55,7 @@ struct st_lsm6dsx_reg {
struct st_lsm6dsx_settings {
u8 wai;
u16 max_fifo_size;
- enum st_lsm6dsx_hw_id id;
+ enum st_lsm6dsx_hw_id id[ST_LSM6DSX_MAX_ID];
};
enum st_lsm6dsx_sensor_id {
@@ -66,6 +71,7 @@ enum st_lsm6dsx_fifo_mode {
/**
* struct st_lsm6dsx_sensor - ST IMU sensor instance
+ * @name: Sensor name.
* @id: Sensor identifier.
* @hw: Pointer to instance of struct st_lsm6dsx_hw.
* @gain: Configured sensor sensitivity.
@@ -78,6 +84,7 @@ enum st_lsm6dsx_fifo_mode {
* @ts: Latest timestamp from the interrupt handler.
*/
struct st_lsm6dsx_sensor {
+ char name[32];
enum st_lsm6dsx_sensor_id id;
struct st_lsm6dsx_hw *hw;
@@ -128,7 +135,7 @@ struct st_lsm6dsx_hw {
#endif /* CONFIG_SPI_MASTER */
};
-int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
+int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
const struct st_lsm6dsx_transfer_function *tf_ops);
int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor);
int st_lsm6dsx_sensor_disable(struct st_lsm6dsx_sensor *sensor);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 81b572d7699a..c8e5cfd0ef0b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -1,9 +1,10 @@
/*
* STMicroelectronics st_lsm6dsx FIFO buffer library driver
*
- * LSM6DS3/LSM6DSM: The FIFO buffer can be configured to store data
- * from gyroscope and accelerometer. Samples are queued without any tag
- * according to a specific pattern based on 'FIFO data sets' (6 bytes each):
+ * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM: The FIFO buffer can be configured
+ * to store data from gyroscope and accelerometer. Samples are queued
+ * without any tag according to a specific pattern based on 'FIFO data sets'
+ * (6 bytes each):
* - 1st data set is reserved for gyroscope data
* - 2nd data set is reserved for accelerometer data
* The FIFO pattern changes depending on the ODRs and decimation factors
@@ -206,7 +207,7 @@ out:
}
/**
- * st_lsm6dsx_read_fifo() - LSM6DS3-LSM6DSM read FIFO routine
+ * st_lsm6dsx_read_fifo() - LSM6DS3-LSM6DS3H-LSM6DSL-LSM6DSM read FIFO routine
* @hw: Pointer to instance of struct st_lsm6dsx_hw.
*
* Read samples from the hw FIFO and push them to IIO buffers.
@@ -363,7 +364,7 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
{
- struct st_lsm6dsx_hw *hw = (struct st_lsm6dsx_hw *)private;
+ struct st_lsm6dsx_hw *hw = private;
struct st_lsm6dsx_sensor *sensor;
int i;
@@ -387,7 +388,7 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
{
- struct st_lsm6dsx_hw *hw = (struct st_lsm6dsx_hw *)private;
+ struct st_lsm6dsx_hw *hw = private;
int count;
mutex_lock(&hw->fifo_lock);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index c92ddcc190e2..462a27b70453 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -17,7 +17,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
- * - LSM6DSM:
+ * - LSM6DS3H/LSM6DSL/LSM6DSM:
* - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
@@ -74,12 +74,6 @@
#define ST_LSM6DSX_REG_GYRO_OUT_Y_L_ADDR 0x24
#define ST_LSM6DSX_REG_GYRO_OUT_Z_L_ADDR 0x26
-#define ST_LSM6DS3_WHOAMI 0x69
-#define ST_LSM6DSM_WHOAMI 0x6a
-
-#define ST_LSM6DS3_MAX_FIFO_SIZE 8192
-#define ST_LSM6DSM_MAX_FIFO_SIZE 4096
-
#define ST_LSM6DSX_ACC_FS_2G_GAIN IIO_G_TO_M_S_2(61)
#define ST_LSM6DSX_ACC_FS_4G_GAIN IIO_G_TO_M_S_2(122)
#define ST_LSM6DSX_ACC_FS_8G_GAIN IIO_G_TO_M_S_2(244)
@@ -164,14 +158,26 @@ static const struct st_lsm6dsx_fs_table_entry st_lsm6dsx_fs_table[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
- .wai = ST_LSM6DS3_WHOAMI,
- .max_fifo_size = ST_LSM6DS3_MAX_FIFO_SIZE,
- .id = ST_LSM6DS3_ID,
+ .wai = 0x69,
+ .max_fifo_size = 8192,
+ .id = {
+ [0] = ST_LSM6DS3_ID,
+ },
},
{
- .wai = ST_LSM6DSM_WHOAMI,
- .max_fifo_size = ST_LSM6DSM_MAX_FIFO_SIZE,
- .id = ST_LSM6DSM_ID,
+ .wai = 0x69,
+ .max_fifo_size = 4096,
+ .id = {
+ [0] = ST_LSM6DS3H_ID,
+ },
+ },
+ {
+ .wai = 0x6a,
+ .max_fifo_size = 4096,
+ .id = {
+ [0] = ST_LSM6DSL_ID,
+ [1] = ST_LSM6DSM_ID,
+ },
},
};
@@ -241,11 +247,15 @@ out:
static int st_lsm6dsx_check_whoami(struct st_lsm6dsx_hw *hw, int id)
{
- int err, i;
+ int err, i, j;
u8 data;
for (i = 0; i < ARRAY_SIZE(st_lsm6dsx_sensor_settings); i++) {
- if (id == st_lsm6dsx_sensor_settings[i].id)
+ for (j = 0; j < ST_LSM6DSX_MAX_ID; j++) {
+ if (id == st_lsm6dsx_sensor_settings[i].id[j])
+ break;
+ }
+ if (j < ST_LSM6DSX_MAX_ID)
break;
}
@@ -298,32 +308,40 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
return 0;
}
-static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
+static int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr,
+ u8 *val)
{
- enum st_lsm6dsx_sensor_id id = sensor->id;
- int i, err;
- u8 val;
+ int i;
for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- if (st_lsm6dsx_odr_table[id].odr_avl[i].hz == odr)
+ if (st_lsm6dsx_odr_table[sensor->id].odr_avl[i].hz == odr)
break;
if (i == ST_LSM6DSX_ODR_LIST_SIZE)
return -EINVAL;
- val = st_lsm6dsx_odr_table[id].odr_avl[i].val;
- err = st_lsm6dsx_write_with_mask(sensor->hw,
- st_lsm6dsx_odr_table[id].reg.addr,
- st_lsm6dsx_odr_table[id].reg.mask,
- val);
- if (err < 0)
- return err;
-
+ *val = st_lsm6dsx_odr_table[sensor->id].odr_avl[i].val;
sensor->odr = odr;
return 0;
}
+static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
+{
+ enum st_lsm6dsx_sensor_id id = sensor->id;
+ int err;
+ u8 val;
+
+ err = st_lsm6dsx_check_odr(sensor, odr, &val);
+ if (err < 0)
+ return err;
+
+ return st_lsm6dsx_write_with_mask(sensor->hw,
+ st_lsm6dsx_odr_table[id].reg.addr,
+ st_lsm6dsx_odr_table[id].reg.mask,
+ val);
+}
+
int st_lsm6dsx_sensor_enable(struct st_lsm6dsx_sensor *sensor)
{
int err;
@@ -426,9 +444,12 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
err = st_lsm6dsx_set_full_scale(sensor, val2);
break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- err = st_lsm6dsx_set_odr(sensor, val);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ u8 data;
+
+ err = st_lsm6dsx_check_odr(sensor, val, &data);
break;
+ }
default:
err = -EINVAL;
break;
@@ -538,19 +559,11 @@ static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
{
struct device_node *np = hw->dev->of_node;
- int err;
if (!np)
return -EINVAL;
- err = of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
- if (err == -ENODATA) {
- /* if the property has not been specified use default value */
- *drdy_pin = 1;
- err = 0;
- }
-
- return err;
+ return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
}
static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
@@ -621,7 +634,8 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
}
static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
- enum st_lsm6dsx_sensor_id id)
+ enum st_lsm6dsx_sensor_id id,
+ const char *name)
{
struct st_lsm6dsx_sensor *sensor;
struct iio_dev *iio_dev;
@@ -645,27 +659,30 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
case ST_LSM6DSX_ID_ACC:
iio_dev->channels = st_lsm6dsx_acc_channels;
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_acc_channels);
- iio_dev->name = "lsm6dsx_accel";
iio_dev->info = &st_lsm6dsx_acc_info;
sensor->decimator_mask = ST_LSM6DSX_REG_ACC_DEC_MASK;
+ scnprintf(sensor->name, sizeof(sensor->name), "%s_accel",
+ name);
break;
case ST_LSM6DSX_ID_GYRO:
iio_dev->channels = st_lsm6dsx_gyro_channels;
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_gyro_channels);
- iio_dev->name = "lsm6dsx_gyro";
iio_dev->info = &st_lsm6dsx_gyro_info;
sensor->decimator_mask = ST_LSM6DSX_REG_GYRO_DEC_MASK;
+ scnprintf(sensor->name, sizeof(sensor->name), "%s_gyro",
+ name);
break;
default:
return NULL;
}
+ iio_dev->name = sensor->name;
return iio_dev;
}
-int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
+int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, const char *name,
const struct st_lsm6dsx_transfer_function *tf_ops)
{
struct st_lsm6dsx_hw *hw;
@@ -689,7 +706,7 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
- hw->iio_devs[i] = st_lsm6dsx_alloc_iiodev(hw, i);
+ hw->iio_devs[i] = st_lsm6dsx_alloc_iiodev(hw, i, name);
if (!hw->iio_devs[i])
return -ENOMEM;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index ea3041186e1e..09a51cfb9b5e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -61,7 +61,7 @@ static int st_lsm6dsx_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
return st_lsm6dsx_probe(&client->dev, client->irq,
- (int)id->driver_data,
+ (int)id->driver_data, id->name,
&st_lsm6dsx_transfer_fn);
}
@@ -71,6 +71,14 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.data = (void *)ST_LSM6DS3_ID,
},
{
+ .compatible = "st,lsm6ds3h",
+ .data = (void *)ST_LSM6DS3H_ID,
+ },
+ {
+ .compatible = "st,lsm6dsl",
+ .data = (void *)ST_LSM6DSL_ID,
+ },
+ {
.compatible = "st,lsm6dsm",
.data = (void *)ST_LSM6DSM_ID,
},
@@ -80,6 +88,8 @@ MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DS3_DEV_NAME, ST_LSM6DS3_ID },
+ { ST_LSM6DS3H_DEV_NAME, ST_LSM6DS3H_ID },
+ { ST_LSM6DSL_DEV_NAME, ST_LSM6DSL_ID },
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
{},
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index fbe72470ed1e..f765a5058488 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -78,7 +78,7 @@ static int st_lsm6dsx_spi_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
return st_lsm6dsx_probe(&spi->dev, spi->irq,
- (int)id->driver_data,
+ (int)id->driver_data, id->name,
&st_lsm6dsx_transfer_fn);
}
@@ -88,6 +88,14 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.data = (void *)ST_LSM6DS3_ID,
},
{
+ .compatible = "st,lsm6ds3h",
+ .data = (void *)ST_LSM6DS3H_ID,
+ },
+ {
+ .compatible = "st,lsm6dsl",
+ .data = (void *)ST_LSM6DSL_ID,
+ },
+ {
.compatible = "st,lsm6dsm",
.data = (void *)ST_LSM6DSM_ID,
},
@@ -97,6 +105,8 @@ MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DS3_DEV_NAME, ST_LSM6DS3_ID },
+ { ST_LSM6DS3H_DEV_NAME, ST_LSM6DS3H_ID },
+ { ST_LSM6DSL_DEV_NAME, ST_LSM6DSL_ID },
{ ST_LSM6DSM_DEV_NAME, ST_LSM6DSM_ID },
{},
};
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 3ff91e02fee3..57c14da5708f 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1719,18 +1719,13 @@ int iio_device_register(struct iio_dev *indio_dev)
cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
indio_dev->chrdev.owner = indio_dev->info->driver_module;
- indio_dev->chrdev.kobj.parent = &indio_dev->dev.kobj;
- ret = cdev_add(&indio_dev->chrdev, indio_dev->dev.devt, 1);
- if (ret < 0)
- goto error_unreg_eventset;
- ret = device_add(&indio_dev->dev);
+ ret = cdev_device_add(&indio_dev->chrdev, &indio_dev->dev);
if (ret < 0)
- goto error_cdev_del;
+ goto error_unreg_eventset;
return 0;
-error_cdev_del:
- cdev_del(&indio_dev->chrdev);
+
error_unreg_eventset:
iio_device_unregister_eventset(indio_dev);
error_free_sysfs:
@@ -1751,10 +1746,8 @@ void iio_device_unregister(struct iio_dev *indio_dev)
{
mutex_lock(&indio_dev->info_exist_lock);
- device_del(&indio_dev->dev);
+ cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
- if (indio_dev->chrdev.dev)
- cdev_del(&indio_dev->chrdev);
iio_device_unregister_debugfs(indio_dev);
iio_disable_all_buffers(indio_dev);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 5f731ead9d46..33e755d8d825 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -136,6 +136,16 @@ config CM36651
To compile this driver as a module, choose M here:
the module will be called cm36651.
+config IIO_CROS_EC_LIGHT_PROX
+ tristate "ChromeOS EC Light and Proximity Sensors"
+ depends on IIO_CROS_EC_SENSORS_CORE
+ help
+ Say Y here if you use the light and proximity sensors
+ presented by the ChromeOS EC Sensor hub.
+
+ To compile this driver as a module, choose M here:
+ the module will be called cros_ec_light_prox.
+
config GP2AP020A00F
tristate "Sharp GP2AP020A00F Proximity/ALS sensor"
depends on I2C
@@ -395,4 +405,14 @@ config VEML6070
To compile this driver as a module, choose M here: the
module will be called veml6070.
+config VL6180
+ tristate "VL6180 ALS, range and proximity sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the STMicroelectronics
+ VL6180 combined ambient light, range and proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vl6180.
+
endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c13a2399e6df..681363c2b298 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_CM3232) += cm3232.o
obj-$(CONFIG_CM3323) += cm3323.o
obj-$(CONFIG_CM3605) += cm3605.o
obj-$(CONFIG_CM36651) += cm36651.o
+obj-$(CONFIG_IIO_CROS_EC_LIGHT_PROX) += cros_ec_light_prox.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
@@ -37,3 +38,4 @@ obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VEML6070) += veml6070.o
+obj-$(CONFIG_VL6180) += vl6180.o
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index a4304edc3e0f..518a47e9377b 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -343,7 +343,7 @@ static struct attribute *apds9960_attributes[] = {
NULL,
};
-static struct attribute_group apds9960_attribute_group = {
+static const struct attribute_group apds9960_attribute_group = {
.attrs = apds9960_attributes,
};
@@ -1112,6 +1112,8 @@ static int apds9960_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops apds9960_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(apds9960_runtime_suspend,
apds9960_runtime_resume, NULL)
};
@@ -1122,9 +1124,16 @@ static const struct i2c_device_id apds9960_id[] = {
};
MODULE_DEVICE_TABLE(i2c, apds9960_id);
+static const struct of_device_id apds9960_of_match[] = {
+ { .compatible = "avago,apds9960" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apds9960_of_match);
+
static struct i2c_driver apds9960_driver = {
.driver = {
.name = APDS9960_DRV_NAME,
+ .of_match_table = apds9960_of_match,
.pm = &apds9960_pm_ops,
},
.probe = apds9960_probe,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index b05946604f80..6c61187e630f 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -212,7 +212,7 @@ static struct attribute *bh1750_attributes[] = {
NULL,
};
-static struct attribute_group bh1750_attribute_group = {
+static const struct attribute_group bh1750_attribute_group = {
.attrs = bh1750_attributes,
};
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
new file mode 100644
index 000000000000..721722376fd0
--- /dev/null
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -0,0 +1,289 @@
+/*
+ * cros_ec_light_prox - Driver for light and prox sensors behing CrosEC.
+ *
+ * Copyright (C) 2017 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../common/cros_ec_sensors/cros_ec_sensors_core.h"
+
+/*
+ * We only represent one entry for light or proximity. EC is merging different
+ * light sensors to return the what the eye would see. For proximity, we
+ * currently support only one light source.
+ */
+#define CROS_EC_LIGHT_PROX_MAX_CHANNELS (1 + 1)
+
+/* State data for ec_sensors iio driver. */
+struct cros_ec_light_prox_state {
+ /* Shared by all sensors */
+ struct cros_ec_sensors_core_state core;
+
+ struct iio_chan_spec channels[CROS_EC_LIGHT_PROX_MAX_CHANNELS];
+};
+
+static int cros_ec_light_prox_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_light_prox_state *st = iio_priv(indio_dev);
+ u16 data = 0;
+ s64 val64;
+ int ret = IIO_VAL_INT;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_PROXIMITY) {
+ if (cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
+ (s16 *)&data) < 0) {
+ ret = -EIO;
+ break;
+ }
+ *val = data;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_LIGHT) {
+ if (cros_ec_sensors_read_cmd(indio_dev, 1 << idx,
+ (s16 *)&data) < 0) {
+ ret = -EIO;
+ break;
+ }
+ /*
+ * The data coming from the light sensor is
+ * pre-processed and represents the ambient light
+ * illuminance reading expressed in lux.
+ */
+ *val = data;
+ ret = IIO_VAL_INT;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags = 0;
+
+ if (cros_ec_motion_send_host_cmd(&st->core, 0)) {
+ ret = -EIO;
+ break;
+ }
+
+ /* Save values */
+ st->core.calib[0] = st->core.resp->sensor_offset.offset[0];
+
+ *val = st->core.calib[idx];
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ /*
+ * RANGE is used for calibration
+ * scale is a number x.y, where x is coded on 16 bits,
+ * y coded on 16 bits, between 0 and 9999.
+ */
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
+
+ if (cros_ec_motion_send_host_cmd(&st->core, 0)) {
+ ret = -EIO;
+ break;
+ }
+
+ val64 = st->core.resp->sensor_range.ret;
+ *val = val64 >> 16;
+ *val2 = (val64 & 0xffff) * 100;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
+ mask);
+ break;
+ }
+
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static int cros_ec_light_prox_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct cros_ec_light_prox_state *st = iio_priv(indio_dev);
+ int ret = 0;
+ int idx = chan->scan_index;
+
+ mutex_lock(&st->core.cmd_lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ st->core.calib[idx] = val;
+ /* Send to EC for each axis, even if not complete */
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+ st->core.param.sensor_offset.flags = MOTION_SENSE_SET_OFFSET;
+ st->core.param.sensor_offset.offset[0] = st->core.calib[0];
+ st->core.param.sensor_offset.temp =
+ EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+ if (cros_ec_motion_send_host_cmd(&st->core, 0))
+ ret = -EIO;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+ st->core.param.sensor_range.data = (val << 16) | (val2 / 100);
+ if (cros_ec_motion_send_host_cmd(&st->core, 0))
+ ret = -EIO;
+ break;
+ default:
+ ret = cros_ec_sensors_core_write(&st->core, chan, val, val2,
+ mask);
+ break;
+ }
+
+ mutex_unlock(&st->core.cmd_lock);
+
+ return ret;
+}
+
+static const struct iio_info cros_ec_light_prox_info = {
+ .read_raw = &cros_ec_light_prox_read,
+ .write_raw = &cros_ec_light_prox_write,
+ .driver_module = THIS_MODULE,
+};
+
+static int cros_ec_light_prox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+ struct cros_ec_device *ec_device;
+ struct iio_dev *indio_dev;
+ struct cros_ec_light_prox_state *state;
+ struct iio_chan_spec *channel;
+ int ret;
+
+ if (!ec_dev || !ec_dev->ec_dev) {
+ dev_warn(dev, "No CROS EC device found.\n");
+ return -EINVAL;
+ }
+ ec_device = ec_dev->ec_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &cros_ec_light_prox_info;
+ state = iio_priv(indio_dev);
+ state->core.type = state->core.resp->info.type;
+ state->core.loc = state->core.resp->info.location;
+ channel = state->channels;
+
+ /* Common part */
+ channel->info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_FREQUENCY);
+ channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+ channel->scan_type.shift = 0;
+ channel->scan_index = 0;
+ channel->ext_info = cros_ec_sensors_ext_info;
+ channel->scan_type.sign = 'u';
+
+ state->core.calib[0] = 0;
+
+ /* Sensor specific */
+ switch (state->core.type) {
+ case MOTIONSENSE_TYPE_LIGHT:
+ channel->type = IIO_LIGHT;
+ channel->info_mask_separate =
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE);
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ channel->type = IIO_PROXIMITY;
+ channel->info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE);
+ break;
+ default:
+ dev_warn(dev, "Unknown motion sensor\n");
+ return -EINVAL;
+ }
+
+ /* Timestamp */
+ channel++;
+ channel->type = IIO_TIMESTAMP;
+ channel->channel = -1;
+ channel->scan_index = 1;
+ channel->scan_type.sign = 's';
+ channel->scan_type.realbits = 64;
+ channel->scan_type.storagebits = 64;
+
+ indio_dev->channels = state->channels;
+
+ indio_dev->num_channels = CROS_EC_LIGHT_PROX_MAX_CHANNELS;
+
+ state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ cros_ec_sensors_capture, NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct platform_device_id cros_ec_light_prox_ids[] = {
+ {
+ .name = "cros-ec-prox",
+ },
+ {
+ .name = "cros-ec-light",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids);
+
+static struct platform_driver cros_ec_light_prox_platform_driver = {
+ .driver = {
+ .name = "cros-ec-light-prox",
+ },
+ .probe = cros_ec_light_prox_probe,
+ .id_table = cros_ec_light_prox_ids,
+};
+module_platform_driver(cros_ec_light_prox_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC light/proximity sensors driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 45ca056f019e..73fced8a63b7 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -240,6 +240,13 @@ static int prox_parse_report(struct platform_device *pdev,
st->common_attributes.sensitivity.index,
st->common_attributes.sensitivity.report_id);
}
+ if (st->common_attributes.sensitivity.index < 0)
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_HUMAN_PRESENCE,
+ &st->common_attributes.sensitivity);
+
return ret;
}
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index f409c2047c05..0443fd2e8757 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -690,7 +690,7 @@ static struct attribute *lm3533_als_event_attributes[] = {
NULL
};
-static struct attribute_group lm3533_als_event_attribute_group = {
+static const struct attribute_group lm3533_als_event_attribute_group = {
.attrs = lm3533_als_event_attributes
};
@@ -714,7 +714,7 @@ static struct attribute *lm3533_als_attributes[] = {
NULL
};
-static struct attribute_group lm3533_als_attribute_group = {
+static const struct attribute_group lm3533_als_attribute_group = {
.attrs = lm3533_als_attributes
};
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 04598ae993d4..e7d4ea75e007 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -884,9 +884,19 @@ static const struct i2c_device_id tsl2563_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tsl2563_id);
+static const struct of_device_id tsl2563_of_match[] = {
+ { .compatible = "amstaos,tsl2560" },
+ { .compatible = "amstaos,tsl2561" },
+ { .compatible = "amstaos,tsl2562" },
+ { .compatible = "amstaos,tsl2563" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsl2563_of_match);
+
static struct i2c_driver tsl2563_i2c_driver = {
.driver = {
.name = "tsl2563",
+ .of_match_table = tsl2563_of_match,
.pm = TSL2563_PM_OPS,
},
.probe = tsl2563_probe,
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 18cf2e29e4d5..d571ad7291ed 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -972,10 +972,17 @@ static const struct i2c_device_id us5182d_id[] = {
MODULE_DEVICE_TABLE(i2c, us5182d_id);
+static const struct of_device_id us5182d_of_match[] = {
+ { .compatible = "upisemi,usd5182" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, us5182d_of_match);
+
static struct i2c_driver us5182d_driver = {
.driver = {
.name = US5182D_DRV_NAME,
.pm = &us5182d_pm_ops,
+ .of_match_table = us5182d_of_match,
.acpi_match_table = ACPI_PTR(us5182d_acpi_match),
},
.probe = us5182d_probe,
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
new file mode 100644
index 000000000000..6e25b724d941
--- /dev/null
+++ b/drivers/iio/light/vl6180.c
@@ -0,0 +1,543 @@
+/*
+ * vl6180.c - Support for STMicroelectronics VL6180 ALS, range and proximity
+ * sensor
+ *
+ * Copyright 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ * Copyright 2017 Manivannan Sadhasivam <manivannanece23@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for VL6180 (7-bit I2C slave address 0x29)
+ *
+ * Range: 0 to 100mm
+ * ALS: < 1 Lux up to 100 kLux
+ * IR: 850nm
+ *
+ * TODO: irq, threshold events, continuous mode, hardware buffer
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VL6180_DRV_NAME "vl6180"
+
+/* Device identification register and value */
+#define VL6180_MODEL_ID 0x000
+#define VL6180_MODEL_ID_VAL 0xb4
+
+/* Configuration registers */
+#define VL6180_INTR_CONFIG 0x014
+#define VL6180_INTR_CLEAR 0x015
+#define VL6180_OUT_OF_RESET 0x016
+#define VL6180_HOLD 0x017
+#define VL6180_RANGE_START 0x018
+#define VL6180_ALS_START 0x038
+#define VL6180_ALS_GAIN 0x03f
+#define VL6180_ALS_IT 0x040
+
+/* Status registers */
+#define VL6180_RANGE_STATUS 0x04d
+#define VL6180_ALS_STATUS 0x04e
+#define VL6180_INTR_STATUS 0x04f
+
+/* Result value registers */
+#define VL6180_ALS_VALUE 0x050
+#define VL6180_RANGE_VALUE 0x062
+#define VL6180_RANGE_RATE 0x066
+
+/* bits of the RANGE_START and ALS_START register */
+#define VL6180_MODE_CONT BIT(1) /* continuous mode */
+#define VL6180_STARTSTOP BIT(0) /* start measurement, auto-reset */
+
+/* bits of the INTR_STATUS and INTR_CONFIG register */
+#define VL6180_ALS_READY BIT(5)
+#define VL6180_RANGE_READY BIT(2)
+
+/* bits of the INTR_CLEAR register */
+#define VL6180_CLEAR_ERROR BIT(2)
+#define VL6180_CLEAR_ALS BIT(1)
+#define VL6180_CLEAR_RANGE BIT(0)
+
+/* bits of the HOLD register */
+#define VL6180_HOLD_ON BIT(0)
+
+/* default value for the ALS_IT register */
+#define VL6180_ALS_IT_100 0x63 /* 100 ms */
+
+/* values for the ALS_GAIN register */
+#define VL6180_ALS_GAIN_1 0x46
+#define VL6180_ALS_GAIN_1_25 0x45
+#define VL6180_ALS_GAIN_1_67 0x44
+#define VL6180_ALS_GAIN_2_5 0x43
+#define VL6180_ALS_GAIN_5 0x42
+#define VL6180_ALS_GAIN_10 0x41
+#define VL6180_ALS_GAIN_20 0x40
+#define VL6180_ALS_GAIN_40 0x47
+
+struct vl6180_data {
+ struct i2c_client *client;
+ struct mutex lock;
+};
+
+enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX };
+
+/**
+ * struct vl6180_chan_regs - Registers for accessing channels
+ * @drdy_mask: Data ready bit in status register
+ * @start_reg: Conversion start register
+ * @value_reg: Result value register
+ * @word: Register word length
+ */
+struct vl6180_chan_regs {
+ u8 drdy_mask;
+ u16 start_reg, value_reg;
+ bool word;
+};
+
+static const struct vl6180_chan_regs vl6180_chan_regs_table[] = {
+ [VL6180_ALS] = {
+ .drdy_mask = VL6180_ALS_READY,
+ .start_reg = VL6180_ALS_START,
+ .value_reg = VL6180_ALS_VALUE,
+ .word = true,
+ },
+ [VL6180_RANGE] = {
+ .drdy_mask = VL6180_RANGE_READY,
+ .start_reg = VL6180_RANGE_START,
+ .value_reg = VL6180_RANGE_VALUE,
+ .word = false,
+ },
+ [VL6180_PROX] = {
+ .drdy_mask = VL6180_RANGE_READY,
+ .start_reg = VL6180_RANGE_START,
+ .value_reg = VL6180_RANGE_RATE,
+ .word = true,
+ },
+};
+
+static int vl6180_read(struct i2c_client *client, u16 cmd, void *databuf,
+ u8 len)
+{
+ __be16 cmdbuf = cpu_to_be16(cmd);
+ struct i2c_msg msgs[2] = {
+ { .addr = client->addr, .len = sizeof(cmdbuf), .buf = (u8 *) &cmdbuf },
+ { .addr = client->addr, .len = len, .buf = databuf,
+ .flags = I2C_M_RD } };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ dev_err(&client->dev, "failed reading register 0x%04x\n", cmd);
+
+ return ret;
+}
+
+static int vl6180_read_byte(struct i2c_client *client, u16 cmd)
+{
+ u8 data;
+ int ret;
+
+ ret = vl6180_read(client, cmd, &data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ return data;
+}
+
+static int vl6180_read_word(struct i2c_client *client, u16 cmd)
+{
+ __be16 data;
+ int ret;
+
+ ret = vl6180_read(client, cmd, &data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(data);
+}
+
+static int vl6180_write_byte(struct i2c_client *client, u16 cmd, u8 val)
+{
+ u8 buf[3];
+ struct i2c_msg msgs[1] = {
+ { .addr = client->addr, .len = sizeof(buf), .buf = (u8 *) &buf } };
+ int ret;
+
+ buf[0] = cmd >> 8;
+ buf[1] = cmd & 0xff;
+ buf[2] = val;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing register 0x%04x\n", cmd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vl6180_write_word(struct i2c_client *client, u16 cmd, u16 val)
+{
+ __be16 buf[2];
+ struct i2c_msg msgs[1] = {
+ { .addr = client->addr, .len = sizeof(buf), .buf = (u8 *) &buf } };
+ int ret;
+
+ buf[0] = cpu_to_be16(cmd);
+ buf[1] = cpu_to_be16(val);
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing register 0x%04x\n", cmd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vl6180_measure(struct vl6180_data *data, int addr)
+{
+ struct i2c_client *client = data->client;
+ int tries = 20, ret;
+ u16 value;
+
+ mutex_lock(&data->lock);
+ /* Start single shot measurement */
+ ret = vl6180_write_byte(client,
+ vl6180_chan_regs_table[addr].start_reg, VL6180_STARTSTOP);
+ if (ret < 0)
+ goto fail;
+
+ while (tries--) {
+ ret = vl6180_read_byte(client, VL6180_INTR_STATUS);
+ if (ret < 0)
+ goto fail;
+
+ if (ret & vl6180_chan_regs_table[addr].drdy_mask)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Read result value from appropriate registers */
+ ret = vl6180_chan_regs_table[addr].word ?
+ vl6180_read_word(client, vl6180_chan_regs_table[addr].value_reg) :
+ vl6180_read_byte(client, vl6180_chan_regs_table[addr].value_reg);
+ if (ret < 0)
+ goto fail;
+ value = ret;
+
+ /* Clear the interrupt flag after data read */
+ ret = vl6180_write_byte(client, VL6180_INTR_CLEAR,
+ VL6180_CLEAR_ERROR | VL6180_CLEAR_ALS | VL6180_CLEAR_RANGE);
+ if (ret < 0)
+ goto fail;
+
+ ret = value;
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static const struct iio_chan_spec vl6180_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .address = VL6180_ALS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN),
+ }, {
+ .type = IIO_DISTANCE,
+ .address = VL6180_RANGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .address = VL6180_PROX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+/*
+ * Columns 3 & 4 represent the same value in decimal and hex notations.
+ * Kept in order to avoid the datatype conversion while reading the
+ * hardware_gain.
+ */
+static const int vl6180_als_gain[8][4] = {
+ { 1, 0, 70, VL6180_ALS_GAIN_1 },
+ { 1, 250000, 69, VL6180_ALS_GAIN_1_25 },
+ { 1, 670000, 68, VL6180_ALS_GAIN_1_67 },
+ { 2, 500000, 67, VL6180_ALS_GAIN_2_5 },
+ { 5, 0, 66, VL6180_ALS_GAIN_5 },
+ { 10, 0, 65, VL6180_ALS_GAIN_10 },
+ { 20, 0, 64, VL6180_ALS_GAIN_20 },
+ { 40, 0, 71, VL6180_ALS_GAIN_40 }
+};
+
+static int vl6180_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct vl6180_data *data = iio_priv(indio_dev);
+ int ret, i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = vl6180_measure(data, chan->address);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ ret = vl6180_read_word(data->client, VL6180_ALS_IT);
+ if (ret < 0)
+ return ret;
+ *val = 0; /* 1 count = 1ms (0 = 1ms) */
+ *val2 = (ret + 1) * 1000; /* convert to seconds */
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = 0; /* one ALS count is 0.32 Lux */
+ *val2 = 320000;
+ break;
+ case IIO_DISTANCE:
+ *val = 0; /* sensor reports mm, scale to meter */
+ *val2 = 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = vl6180_read_byte(data->client, VL6180_ALS_GAIN);
+ if (ret < 0)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
+ if (ret == vl6180_als_gain[i][2]) {
+ *val = vl6180_als_gain[i][0];
+ *val2 = vl6180_als_gain[i][1];
+ }
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR(als_gain_available, "1 1.25 1.67 2.5 5 10 20 40");
+
+static struct attribute *vl6180_attributes[] = {
+ &iio_const_attr_als_gain_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group vl6180_attribute_group = {
+ .attrs = vl6180_attributes,
+};
+
+/* HOLD is needed before updating any config registers */
+static int vl6180_hold(struct vl6180_data *data, bool hold)
+{
+ return vl6180_write_byte(data->client, VL6180_HOLD,
+ hold ? VL6180_HOLD_ON : 0);
+}
+
+static int vl6180_set_als_gain(struct vl6180_data *data, int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
+ if (val == vl6180_als_gain[i][0] &&
+ val2 == vl6180_als_gain[i][1]) {
+ mutex_lock(&data->lock);
+ ret = vl6180_hold(data, true);
+ if (ret < 0)
+ goto fail;
+ ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN,
+ vl6180_als_gain[i][3]);
+fail:
+ vl6180_hold(data, false);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vl6180_set_it(struct vl6180_data *data, int val2)
+{
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = vl6180_hold(data, true);
+ if (ret < 0)
+ goto fail;
+ ret = vl6180_write_word(data->client, VL6180_ALS_IT,
+ (val2 - 500) / 1000); /* write value in ms */
+fail:
+ vl6180_hold(data, false);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int vl6180_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct vl6180_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0 || val2 < 500 || val2 >= 512500)
+ return -EINVAL;
+
+ return vl6180_set_it(data, val2);
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ if (chan->type != IIO_LIGHT)
+ return -EINVAL;
+
+ return vl6180_set_als_gain(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vl6180_info = {
+ .read_raw = vl6180_read_raw,
+ .write_raw = vl6180_write_raw,
+ .attrs = &vl6180_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int vl6180_init(struct vl6180_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+
+ ret = vl6180_read_byte(client, VL6180_MODEL_ID);
+ if (ret < 0)
+ return ret;
+
+ if (ret != VL6180_MODEL_ID_VAL) {
+ dev_err(&client->dev, "invalid model ID %02x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = vl6180_hold(data, true);
+ if (ret < 0)
+ return ret;
+
+ ret = vl6180_read_byte(client, VL6180_OUT_OF_RESET);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Detect false reset condition here. This bit is always set when the
+ * system comes out of reset.
+ */
+ if (ret != 0x01)
+ dev_info(&client->dev, "device is not fresh out of reset\n");
+
+ /* Enable ALS and Range ready interrupts */
+ ret = vl6180_write_byte(client, VL6180_INTR_CONFIG,
+ VL6180_ALS_READY | VL6180_RANGE_READY);
+ if (ret < 0)
+ return ret;
+
+ /* ALS integration time: 100ms */
+ ret = vl6180_write_word(client, VL6180_ALS_IT, VL6180_ALS_IT_100);
+ if (ret < 0)
+ return ret;
+
+ /* ALS gain: 1 */
+ ret = vl6180_write_byte(client, VL6180_ALS_GAIN, VL6180_ALS_GAIN_1);
+ if (ret < 0)
+ return ret;
+
+ ret = vl6180_write_byte(client, VL6180_OUT_OF_RESET, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return vl6180_hold(data, false);
+}
+
+static int vl6180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct vl6180_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &vl6180_info;
+ indio_dev->channels = vl6180_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vl6180_channels);
+ indio_dev->name = VL6180_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = vl6180_init(data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id vl6180_of_match[] = {
+ { .compatible = "st,vl6180", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vl6180_of_match);
+
+static const struct i2c_device_id vl6180_id[] = {
+ { "vl6180", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, vl6180_id);
+
+static struct i2c_driver vl6180_driver = {
+ .driver = {
+ .name = VL6180_DRV_NAME,
+ .of_match_table = of_match_ptr(vl6180_of_match),
+ },
+ .probe = vl6180_probe,
+ .id_table = vl6180_id,
+};
+
+module_i2c_driver(vl6180_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannanece23@gmail.com>");
+MODULE_DESCRIPTION("STMicro VL6180 ALS, range and proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index ee05722587aa..57e40dd1222e 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -63,9 +63,18 @@ static const struct i2c_device_id bmc150_magn_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
+static const struct of_device_id bmc150_magn_of_match[] = {
+ { .compatible = "bosch,bmc150_magn" },
+ { .compatible = "bosch,bmc156_magn" },
+ { .compatible = "bosch,bmm150_magn" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bmc150_magn_of_match);
+
static struct i2c_driver bmc150_magn_driver = {
.driver = {
.name = "bmc150_magn_i2c",
+ .of_match_table = bmc150_magn_of_match,
.acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
.pm = &bmc150_magn_pm_ops,
},
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index b4f643fb3b1e..dad8d57f7402 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -441,9 +441,16 @@ static const struct i2c_device_id mag3110_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mag3110_id);
+static const struct of_device_id mag3110_of_match[] = {
+ { .compatible = "fsl,mag3110" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mag3110_of_match);
+
static struct i2c_driver mag3110_driver = {
.driver = {
.name = "mag3110",
+ .of_match_table = mag3110_of_match,
.pm = MAG3110_PM_OPS,
},
.probe = mag3110_probe,
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index e22714365022..afa8de3418d0 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -325,6 +325,7 @@ static int lmp91000_probe(struct i2c_client *client,
indio_dev->channels = lmp91000_channels;
indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
indio_dev->name = LMP91000_DRV_NAME;
+ indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 4d18826ac63c..d82b788374b6 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -175,11 +175,12 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
}
H6 = sign_extend32(tmp, 7);
- var = ((s32)data->t_fine) - 76800;
- var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
- * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
- + 2097152) * H2 + 8192) >> 14);
- var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
+ var = ((s32)data->t_fine) - (s32)76800;
+ var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
+ + (s32)16384) >> 15) * (((((((var * H6) >> 10)
+ * (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
+ + (s32)2097152) * H2 + 8192) >> 14);
+ var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
return var >> 12;
};
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index ac76515d5d49..8c7b3ec3d84a 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -297,9 +297,16 @@ static const struct i2c_device_id hp03_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hp03_id);
+static const struct of_device_id hp03_of_match[] = {
+ { .compatible = "hoperf,hp03" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, hp03_of_match);
+
static struct i2c_driver hp03_driver = {
.driver = {
.name = "hp03",
+ .of_match_table = hp03_of_match,
},
.probe = hp03_probe,
.remove = hp03_remove,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 525644a7442d..619b963714c7 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -321,9 +321,16 @@ static const struct i2c_device_id mpl3115_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mpl3115_id);
+static const struct of_device_id mpl3115_of_match[] = {
+ { .compatible = "fsl,mpl3115" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mpl3115_of_match);
+
static struct i2c_driver mpl3115_driver = {
.driver = {
.name = "mpl3115",
+ .of_match_table = mpl3115_of_match,
.pm = MPL3115_PM_OPS,
},
.probe = mpl3115_probe,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index c720c3ac0b9b..e58a0ad07477 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -751,7 +751,7 @@ static void zpa2326_suspend(struct iio_dev *indio_dev)
*/
static irqreturn_t zpa2326_handle_irq(int irq, void *data)
{
- struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct iio_dev *indio_dev = data;
if (iio_buffer_enabled(indio_dev)) {
/* Timestamping needed for buffered sampling only. */
@@ -790,7 +790,7 @@ static irqreturn_t zpa2326_handle_irq(int irq, void *data)
*/
static irqreturn_t zpa2326_handle_threaded_irq(int irq, void *data)
{
- struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct iio_dev *indio_dev = data;
struct zpa2326_private *priv = iio_priv(indio_dev);
unsigned int val;
bool cont;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index ab96cb7a0054..5b81a8c9d438 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,17 @@ config LIDAR_LITE_V2
To compile this driver as a module, choose M here: the
module will be called pulsedlight-lite-v2
+config SRF04
+ tristate "Devantech SRF04 ultrasonic ranger sensor"
+ depends on GPIOLIB
+ help
+ Say Y here to build a driver for Devantech SRF04 ultrasonic
+ ranger sensor. This driver can be used to measure the distance
+ of objects. It is using two GPIOs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called srf04.
+
config SX9500
tristate "SX9500 Semtech proximity sensor"
select IIO_BUFFER
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index e914c2a5dd49..ed1b6f4cc209 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -5,5 +5,6 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
+obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
obj-$(CONFIG_SX9500) += sx9500.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 5656deb17261..ddf9bee89f77 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -50,7 +50,6 @@
#define AS3935_TUNE_CAP 0x08
#define AS3935_CALIBRATE 0x3D
-#define AS3935_WRITE_DATA BIT(15)
#define AS3935_READ_DATA BIT(14)
#define AS3935_ADDRESS(x) ((x) << 8)
@@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st,
{
u8 *buf = st->buf;
- buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
+ buf[0] = AS3935_ADDRESS(reg) >> 8;
buf[1] = val;
return spi_write(st->spi, buf, 2);
@@ -155,7 +154,7 @@ static struct attribute *as3935_attributes[] = {
NULL,
};
-static struct attribute_group as3935_attribute_group = {
+static const struct attribute_group as3935_attribute_group = {
.attrs = as3935_attributes,
};
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 20c16a08c9d9..36c1ddc251aa 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -278,6 +278,7 @@ static int lidar_probe(struct i2c_client *client,
indio_dev->name = LIDAR_DRV_NAME;
indio_dev->channels = lidar_channels;
indio_dev->num_channels = ARRAY_SIZE(lidar_channels);
+ indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
new file mode 100644
index 000000000000..e37667f933b3
--- /dev/null
+++ b/drivers/iio/proximity/srf04.c
@@ -0,0 +1,304 @@
+/*
+ * SRF04: ultrasonic sensor for distance measuring by using GPIOs
+ *
+ * Copyright (c) 2017 Andreas Klinger <ak@it-klinger.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * For details about the device see:
+ * http://www.robot-electronics.co.uk/htm/srf04tech.htm
+ *
+ * the measurement cycle as timing diagram looks like:
+ *
+ * +---+
+ * GPIO | |
+ * trig: --+ +------------------------------------------------------
+ * ^ ^
+ * |<->|
+ * udelay(10)
+ *
+ * ultra +-+ +-+ +-+
+ * sonic | | | | | |
+ * burst: ---------+ +-+ +-+ +-----------------------------------------
+ * .
+ * ultra . +-+ +-+ +-+
+ * sonic . | | | | | |
+ * echo: ----------------------------------+ +-+ +-+ +----------------
+ * . .
+ * +------------------------+
+ * GPIO | |
+ * echo: -------------------+ +---------------
+ * ^ ^
+ * interrupt interrupt
+ * (ts_rising) (ts_falling)
+ * |<---------------------->|
+ * pulse time measured
+ * --> one round trip of ultra sonic waves
+ */
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+struct srf04_data {
+ struct device *dev;
+ struct gpio_desc *gpiod_trig;
+ struct gpio_desc *gpiod_echo;
+ struct mutex lock;
+ int irqnr;
+ ktime_t ts_rising;
+ ktime_t ts_falling;
+ struct completion rising;
+ struct completion falling;
+};
+
+static irqreturn_t srf04_handle_irq(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct srf04_data *data = iio_priv(indio_dev);
+ ktime_t now = ktime_get();
+
+ if (gpiod_get_value(data->gpiod_echo)) {
+ data->ts_rising = now;
+ complete(&data->rising);
+ } else {
+ data->ts_falling = now;
+ complete(&data->falling);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int srf04_read(struct srf04_data *data)
+{
+ int ret;
+ ktime_t ktime_dt;
+ u64 dt_ns;
+ u32 time_ns, distance_mm;
+
+ /*
+ * just one read-echo-cycle can take place at a time
+ * ==> lock against concurrent reading calls
+ */
+ mutex_lock(&data->lock);
+
+ reinit_completion(&data->rising);
+ reinit_completion(&data->falling);
+
+ gpiod_set_value(data->gpiod_trig, 1);
+ udelay(10);
+ gpiod_set_value(data->gpiod_trig, 0);
+
+ /* it cannot take more than 20 ms */
+ ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (ret == 0) {
+ mutex_unlock(&data->lock);
+ return -ETIMEDOUT;
+ }
+
+ ret = wait_for_completion_killable_timeout(&data->falling, HZ/50);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (ret == 0) {
+ mutex_unlock(&data->lock);
+ return -ETIMEDOUT;
+ }
+
+ ktime_dt = ktime_sub(data->ts_falling, data->ts_rising);
+
+ mutex_unlock(&data->lock);
+
+ dt_ns = ktime_to_ns(ktime_dt);
+ /*
+ * measuring more than 3 meters is beyond the capabilities of
+ * the sensor
+ * ==> filter out invalid results for not measuring echos of
+ * another us sensor
+ *
+ * formula:
+ * distance 3 m
+ * time = ---------- = --------- = 9404389 ns
+ * speed 319 m/s
+ *
+ * using a minimum speed at -20 °C of 319 m/s
+ */
+ if (dt_ns > 9404389)
+ return -EIO;
+
+ time_ns = dt_ns;
+
+ /*
+ * the speed as function of the temperature is approximately:
+ *
+ * speed = 331,5 + 0,6 * Temp
+ * with Temp in °C
+ * and speed in m/s
+ *
+ * use 343 m/s as ultrasonic speed at 20 °C here in absence of the
+ * temperature
+ *
+ * therefore:
+ * time 343
+ * distance = ------ * -----
+ * 10^6 2
+ * with time in ns
+ * and distance in mm (one way)
+ *
+ * because we limit to 3 meters the multiplication with 343 just
+ * fits into 32 bit
+ */
+ distance_mm = time_ns * 343 / 2000000;
+
+ return distance_mm;
+}
+
+static int srf04_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long info)
+{
+ struct srf04_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (channel->type != IIO_DISTANCE)
+ return -EINVAL;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = srf04_read(data);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * theoretical maximum resolution is 3 mm
+ * 1 LSB is 1 mm
+ */
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info srf04_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = srf04_read_raw,
+};
+
+static const struct iio_chan_spec srf04_chan_spec[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int srf04_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct srf04_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(struct srf04_data));
+ if (!indio_dev) {
+ dev_err(dev, "failed to allocate IIO device\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+
+ mutex_init(&data->lock);
+ init_completion(&data->rising);
+ init_completion(&data->falling);
+
+ data->gpiod_trig = devm_gpiod_get(dev, "trig", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_trig)) {
+ dev_err(dev, "failed to get trig-gpios: err=%ld\n",
+ PTR_ERR(data->gpiod_trig));
+ return PTR_ERR(data->gpiod_trig);
+ }
+
+ data->gpiod_echo = devm_gpiod_get(dev, "echo", GPIOD_IN);
+ if (IS_ERR(data->gpiod_echo)) {
+ dev_err(dev, "failed to get echo-gpios: err=%ld\n",
+ PTR_ERR(data->gpiod_echo));
+ return PTR_ERR(data->gpiod_echo);
+ }
+
+ if (gpiod_cansleep(data->gpiod_echo)) {
+ dev_err(data->dev, "cansleep-GPIOs not supported\n");
+ return -ENODEV;
+ }
+
+ data->irqnr = gpiod_to_irq(data->gpiod_echo);
+ if (data->irqnr < 0) {
+ dev_err(data->dev, "gpiod_to_irq: %d\n", data->irqnr);
+ return data->irqnr;
+ }
+
+ ret = devm_request_irq(dev, data->irqnr, srf04_handle_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ pdev->name, indio_dev);
+ if (ret < 0) {
+ dev_err(data->dev, "request_irq: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = "srf04";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &srf04_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = srf04_chan_spec;
+ indio_dev->num_channels = ARRAY_SIZE(srf04_chan_spec);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id of_srf04_match[] = {
+ { .compatible = "devantech,srf04", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_srf04_match);
+
+static struct platform_driver srf04_driver = {
+ .probe = srf04_probe,
+ .driver = {
+ .name = "srf04-gpio",
+ .of_match_table = of_srf04_match,
+ },
+};
+
+module_platform_driver(srf04_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("SRF04 ultrasonic sensor for distance measuring using GPIOs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:srf04");
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 3089e8d0a32d..5378976d6d27 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -19,6 +19,20 @@ config MAXIM_THERMOCOUPLE
This driver can also be built as a module. If so, the module will
be called maxim_thermocouple.
+config HID_SENSOR_TEMP
+ tristate "HID Environmental temperature sensor"
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ help
+ Say yes here to build support for the HID SENSOR
+ temperature driver
+
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-temperature.
+
config MLX90614
tristate "MLX90614 contact-less infrared sensor"
depends on I2C
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index 4c4377480726..ad1d668de546 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -2,6 +2,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MLX90614) += mlx90614.o
obj-$(CONFIG_TMP006) += tmp006.o
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
new file mode 100644
index 000000000000..c01efeca4002
--- /dev/null
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -0,0 +1,311 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ */
+#include <linux/device.h>
+#include <linux/hid-sensor-hub.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "../common/hid-sensors/hid-sensor-trigger.h"
+
+struct temperature_state {
+ struct hid_sensor_common common_attributes;
+ struct hid_sensor_hub_attribute_info temperature_attr;
+ s32 temperature_data;
+ int scale_pre_decml;
+ int scale_post_decml;
+ int scale_precision;
+ int value_offset;
+};
+
+/* Channel definitions */
+static const struct iio_chan_spec temperature_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/* Adjust channel real bits based on report descriptor */
+static void temperature_adjust_channel_bit_mask(struct iio_chan_spec *channels,
+ int channel, int size)
+{
+ channels[channel].scan_type.sign = 's';
+ /* Real storage bits will change based on the report desc. */
+ channels[channel].scan_type.realbits = size * 8;
+ /* Maximum size of a sample to capture is s32 */
+ channels[channel].scan_type.storagebits = sizeof(s32) * 8;
+}
+
+static int temperature_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct temperature_state *temp_st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+ hid_sensor_power_state(
+ &temp_st->common_attributes, true);
+ *val = sensor_hub_input_attr_get_raw_value(
+ temp_st->common_attributes.hsdev,
+ HID_USAGE_SENSOR_TEMPERATURE,
+ HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
+ temp_st->temperature_attr.report_id,
+ SENSOR_HUB_SYNC);
+ hid_sensor_power_state(
+ &temp_st->common_attributes,
+ false);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = temp_st->scale_pre_decml;
+ *val2 = temp_st->scale_post_decml;
+ return temp_st->scale_precision;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = temp_st->value_offset;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_read_samp_freq_value(
+ &temp_st->common_attributes, val, val2);
+
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_read_raw_hyst_value(
+ &temp_st->common_attributes, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int temperature_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct temperature_state *temp_st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return hid_sensor_write_samp_freq_value(
+ &temp_st->common_attributes, val, val2);
+ case IIO_CHAN_INFO_HYSTERESIS:
+ return hid_sensor_write_raw_hyst_value(
+ &temp_st->common_attributes, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info temperature_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &temperature_read_raw,
+ .write_raw = &temperature_write_raw,
+};
+
+/* Callback handler to send event after all samples are received and captured */
+static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, void *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct temperature_state *temp_st = iio_priv(indio_dev);
+
+ if (atomic_read(&temp_st->common_attributes.data_ready))
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ &temp_st->temperature_data,
+ iio_get_time_ns(indio_dev));
+
+ return 0;
+}
+
+/* Capture samples in local storage */
+static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
+ unsigned int usage_id, size_t raw_len,
+ char *raw_data, void *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct temperature_state *temp_st = iio_priv(indio_dev);
+
+ switch (usage_id) {
+ case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
+ temp_st->temperature_data = *(s32 *)raw_data;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Parse report which is specific to an usage id*/
+static int temperature_parse_report(struct platform_device *pdev,
+ struct hid_sensor_hub_device *hsdev,
+ struct iio_chan_spec *channels,
+ unsigned int usage_id,
+ struct temperature_state *st)
+{
+ int ret;
+
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
+ usage_id,
+ HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
+ &st->temperature_attr);
+ if (ret < 0)
+ return ret;
+
+ temperature_adjust_channel_bit_mask(channels, 0,
+ st->temperature_attr.size);
+
+ st->scale_precision = hid_sensor_format_scale(
+ HID_USAGE_SENSOR_TEMPERATURE,
+ &st->temperature_attr,
+ &st->scale_pre_decml, &st->scale_post_decml);
+
+ /* Set Sensitivity field ids, when there is no individual modifier */
+ if (st->common_attributes.sensitivity.index < 0)
+ sensor_hub_input_get_attribute_info(hsdev,
+ HID_FEATURE_REPORT, usage_id,
+ HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+ HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
+ &st->common_attributes.sensitivity);
+
+ return ret;
+}
+
+static struct hid_sensor_hub_callbacks temperature_callbacks = {
+ .send_event = &temperature_proc_event,
+ .capture_sample = &temperature_capture_sample,
+};
+
+/* Function to initialize the processing for usage id */
+static int hid_temperature_probe(struct platform_device *pdev)
+{
+ static const char *name = "temperature";
+ struct iio_dev *indio_dev;
+ struct temperature_state *temp_st;
+ struct iio_chan_spec *temp_chans;
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*temp_st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ temp_st = iio_priv(indio_dev);
+ temp_st->common_attributes.hsdev = hsdev;
+ temp_st->common_attributes.pdev = pdev;
+
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ HID_USAGE_SENSOR_TEMPERATURE,
+ &temp_st->common_attributes);
+ if (ret)
+ return ret;
+
+ temp_chans = devm_kmemdup(&indio_dev->dev, temperature_channels,
+ sizeof(temperature_channels), GFP_KERNEL);
+ if (!temp_chans)
+ return -ENOMEM;
+
+ ret = temperature_parse_report(pdev, hsdev, temp_chans,
+ HID_USAGE_SENSOR_TEMPERATURE, temp_st);
+ if (ret)
+ return ret;
+
+ indio_dev->channels = temp_chans;
+ indio_dev->num_channels = ARRAY_SIZE(temperature_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &temperature_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
+ &iio_pollfunc_store_time, NULL, NULL);
+ if (ret)
+ return ret;
+
+ atomic_set(&temp_st->common_attributes.data_ready, 0);
+ ret = hid_sensor_setup_trigger(indio_dev, name,
+ &temp_st->common_attributes);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ temperature_callbacks.pdev = pdev;
+ ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE,
+ &temperature_callbacks);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+ if (ret)
+ goto error_remove_callback;
+
+ return ret;
+
+error_remove_callback:
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
+error_remove_trigger:
+ hid_sensor_remove_trigger(&temp_st->common_attributes);
+ return ret;
+}
+
+/* Function to deinitialize the processing for usage id */
+static int hid_temperature_remove(struct platform_device *pdev)
+{
+ struct hid_sensor_hub_device *hsdev = dev_get_platdata(&pdev->dev);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct temperature_state *temp_st = iio_priv(indio_dev);
+
+ sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
+ hid_sensor_remove_trigger(&temp_st->common_attributes);
+
+ return 0;
+}
+
+static const struct platform_device_id hid_temperature_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200033",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_temperature_ids);
+
+static struct platform_driver hid_temperature_platform_driver = {
+ .id_table = hid_temperature_ids,
+ .driver = {
+ .name = "temperature-sensor",
+ .pm = &hid_sensor_pm_ops,
+ },
+ .probe = hid_temperature_probe,
+ .remove = hid_temperature_remove,
+};
+module_platform_driver(hid_temperature_platform_driver);
+
+MODULE_DESCRIPTION("HID Environmental temperature sensor");
+MODULE_AUTHOR("Song Hongyan <hongyan.song@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index f962f31a5eb2..557214202eff 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -231,6 +231,7 @@ static int maxim_thermocouple_probe(struct spi_device *spi)
indio_dev->available_scan_masks = chip->scan_masks;
indio_dev->num_channels = chip->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
data = iio_priv(indio_dev);
data->spi = spi;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 4b645fc672aa..2077eef4095c 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -585,6 +585,12 @@ static const struct i2c_device_id mlx90614_id[] = {
};
MODULE_DEVICE_TABLE(i2c, mlx90614_id);
+static const struct of_device_id mlx90614_of_match[] = {
+ { .compatible = "melexis,mlx90614" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mlx90614_of_match);
+
#ifdef CONFIG_PM_SLEEP
static int mlx90614_pm_suspend(struct device *dev)
{
@@ -644,6 +650,7 @@ static const struct dev_pm_ops mlx90614_pm_ops = {
static struct i2c_driver mlx90614_driver = {
.driver = {
.name = "mlx90614",
+ .of_match_table = mlx90614_of_match,
.pm = &mlx90614_pm_ops,
},
.probe = mlx90614_probe,
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index f04d0d1f6ac8..0615324d054c 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -11,9 +11,10 @@
*
* (7-bit I2C slave address (0x40 - 0x47), changeable via ADR pins)
*
- * Note: This driver assumes that the sensor has been calibrated beforehand
- *
- * TODO: ALERT irq, limit threshold events
+ * Note:
+ * 1. This driver assumes that the sensor has been calibrated beforehand
+ * 2. Limit threshold events are enabled at the start
+ * 3. Operating mode: INT
*
*/
@@ -24,25 +25,38 @@
#include <linux/pm.h>
#include <linux/bitops.h>
#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
#define TMP007_TDIE 0x01
#define TMP007_CONFIG 0x02
#define TMP007_TOBJECT 0x03
#define TMP007_STATUS 0x04
#define TMP007_STATUS_MASK 0x05
+#define TMP007_TOBJ_HIGH_LIMIT 0x06
+#define TMP007_TOBJ_LOW_LIMIT 0x07
+#define TMP007_TDIE_HIGH_LIMIT 0x08
+#define TMP007_TDIE_LOW_LIMIT 0x09
#define TMP007_MANUFACTURER_ID 0x1e
#define TMP007_DEVICE_ID 0x1f
#define TMP007_CONFIG_CONV_EN BIT(12)
-#define TMP007_CONFIG_COMP_EN BIT(5)
#define TMP007_CONFIG_TC_EN BIT(6)
#define TMP007_CONFIG_CR_MASK GENMASK(11, 9)
+#define TMP007_CONFIG_ALERT_EN BIT(8)
#define TMP007_CONFIG_CR_SHIFT 9
+/* Status register flags */
+#define TMP007_STATUS_ALERT BIT(15)
#define TMP007_STATUS_CONV_READY BIT(14)
+#define TMP007_STATUS_OHF BIT(13)
+#define TMP007_STATUS_OLF BIT(12)
+#define TMP007_STATUS_LHF BIT(11)
+#define TMP007_STATUS_LLF BIT(10)
#define TMP007_STATUS_DATA_VALID BIT(9)
#define TMP007_MANUFACTURER_MAGIC 0x5449
@@ -52,7 +66,9 @@
struct tmp007_data {
struct i2c_client *client;
+ struct mutex lock;
u16 config;
+ u16 status_mask;
};
static const int tmp007_avgs[5][2] = { {4, 0}, {2, 0}, {1, 0},
@@ -156,6 +172,188 @@ static int tmp007_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static irqreturn_t tmp007_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct tmp007_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(data->client, TMP007_STATUS);
+ if ((ret < 0) || !(ret & (TMP007_STATUS_OHF | TMP007_STATUS_OLF |
+ TMP007_STATUS_LHF | TMP007_STATUS_LLF)))
+ return IRQ_NONE;
+
+ if (ret & TMP007_STATUS_OHF)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_MOD_TEMP_OBJECT,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+
+ if (ret & TMP007_STATUS_OLF)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_MOD_TEMP_OBJECT,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+
+ if (ret & TMP007_STATUS_LHF)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_MOD_TEMP_AMBIENT,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+
+ if (ret & TMP007_STATUS_LLF)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_MOD_TEMP_AMBIENT,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+static int tmp007_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct tmp007_data *data = iio_priv(indio_dev);
+ unsigned int status_mask;
+ int ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ if (dir == IIO_EV_DIR_RISING)
+ status_mask = TMP007_STATUS_LHF;
+ else
+ status_mask = TMP007_STATUS_LLF;
+ break;
+ case IIO_MOD_TEMP_OBJECT:
+ if (dir == IIO_EV_DIR_RISING)
+ status_mask = TMP007_STATUS_OHF;
+ else
+ status_mask = TMP007_STATUS_OLF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_read_word_swapped(data->client, TMP007_STATUS_MASK);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ if (state)
+ ret |= status_mask;
+ else
+ ret &= ~status_mask;
+
+ return i2c_smbus_write_word_swapped(data->client, TMP007_STATUS_MASK,
+ data->status_mask = ret);
+}
+
+static int tmp007_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tmp007_data *data = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = TMP007_STATUS_LHF;
+ else
+ mask = TMP007_STATUS_LLF;
+ break;
+ case IIO_MOD_TEMP_OBJECT:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = TMP007_STATUS_OHF;
+ else
+ mask = TMP007_STATUS_OLF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return !!(data->status_mask & mask);
+}
+
+static int tmp007_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct tmp007_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 reg;
+
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT: /* LSB: 0.5 degree Celsius */
+ if (dir == IIO_EV_DIR_RISING)
+ reg = TMP007_TDIE_HIGH_LIMIT;
+ else
+ reg = TMP007_TDIE_LOW_LIMIT;
+ break;
+ case IIO_MOD_TEMP_OBJECT:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = TMP007_TOBJ_HIGH_LIMIT;
+ else
+ reg = TMP007_TOBJ_LOW_LIMIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = i2c_smbus_read_word_swapped(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Shift length 7 bits = 6(15:6) + 1(0.5 LSB) */
+ *val = sign_extend32(ret, 15) >> 7;
+
+ return IIO_VAL_INT;
+}
+
+static int tmp007_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ struct tmp007_data *data = iio_priv(indio_dev);
+ u8 reg;
+
+ switch (chan->channel2) {
+ case IIO_MOD_TEMP_AMBIENT:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = TMP007_TDIE_HIGH_LIMIT;
+ else
+ reg = TMP007_TDIE_LOW_LIMIT;
+ break;
+ case IIO_MOD_TEMP_OBJECT:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = TMP007_TOBJ_HIGH_LIMIT;
+ else
+ reg = TMP007_TOBJ_LOW_LIMIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Full scale threshold value is +/- 256 degree Celsius */
+ if (val < -256 || val > 255)
+ return -EINVAL;
+
+ /* Shift length 7 bits = 6(15:6) + 1(0.5 LSB) */
+ return i2c_smbus_write_word_swapped(data->client, reg, (val << 7));
+}
+
static IIO_CONST_ATTR(sampling_frequency_available, "4 2 1 0.5 0.25");
static struct attribute *tmp007_attributes[] = {
@@ -167,6 +365,36 @@ static const struct attribute_group tmp007_attribute_group = {
.attrs = tmp007_attributes,
};
+static const struct iio_event_spec tmp007_obj_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec tmp007_die_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_chan_spec tmp007_channels[] = {
{
.type = IIO_TEMP,
@@ -175,6 +403,8 @@ static const struct iio_chan_spec tmp007_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = tmp007_die_event,
+ .num_event_specs = ARRAY_SIZE(tmp007_die_event),
},
{
.type = IIO_TEMP,
@@ -183,12 +413,18 @@ static const struct iio_chan_spec tmp007_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = tmp007_obj_event,
+ .num_event_specs = ARRAY_SIZE(tmp007_obj_event),
}
};
static const struct iio_info tmp007_info = {
.read_raw = tmp007_read_raw,
.write_raw = tmp007_write_raw,
+ .read_event_config = tmp007_read_event_config,
+ .write_event_config = tmp007_write_event_config,
+ .read_event_value = tmp007_read_thresh,
+ .write_event_value = tmp007_write_thresh,
.attrs = &tmp007_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -214,7 +450,6 @@ static int tmp007_probe(struct i2c_client *client,
struct tmp007_data *data;
struct iio_dev *indio_dev;
int ret;
- u16 status;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
@@ -231,6 +466,7 @@ static int tmp007_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->name = "tmp007";
@@ -243,7 +479,7 @@ static int tmp007_probe(struct i2c_client *client,
/*
* Set Configuration register:
* 1. Conversion ON
- * 2. Comparator mode
+ * 2. ALERT enable
* 3. Transient correction enable
*/
@@ -252,7 +488,7 @@ static int tmp007_probe(struct i2c_client *client,
return ret;
data->config = ret;
- data->config |= (TMP007_CONFIG_CONV_EN | TMP007_CONFIG_COMP_EN | TMP007_CONFIG_TC_EN);
+ data->config |= (TMP007_CONFIG_CONV_EN | TMP007_CONFIG_ALERT_EN | TMP007_CONFIG_TC_EN);
ret = i2c_smbus_write_word_swapped(data->client, TMP007_CONFIG,
data->config);
@@ -260,22 +496,39 @@ static int tmp007_probe(struct i2c_client *client,
return ret;
/*
+ * Only the following flags can activate ALERT pin. Data conversion/validity flags
+ * flags can still be polled for getting temperature data
+ *
* Set Status Mask register:
- * 1. Conversion ready enable
- * 2. Data valid enable
+ * 1. Object temperature high limit enable
+ * 2. Object temperature low limit enable
+ * 3. TDIE temperature high limit enable
+ * 4. TDIE temperature low limit enable
*/
ret = i2c_smbus_read_word_swapped(data->client, TMP007_STATUS_MASK);
if (ret < 0)
goto error_powerdown;
- status = ret;
- status |= (TMP007_STATUS_CONV_READY | TMP007_STATUS_DATA_VALID);
+ data->status_mask = ret;
+ data->status_mask |= (TMP007_STATUS_OHF | TMP007_STATUS_OLF
+ | TMP007_STATUS_LHF | TMP007_STATUS_LLF);
- ret = i2c_smbus_write_word_swapped(data->client, TMP007_STATUS_MASK, status);
+ ret = i2c_smbus_write_word_swapped(data->client, TMP007_STATUS_MASK, data->status_mask);
if (ret < 0)
goto error_powerdown;
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, tmp007_interrupt_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ tmp007_id->name, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ goto error_powerdown;
+ }
+ }
+
return iio_device_register(indio_dev);
error_powerdown:
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 994b96d19750..25248d644e7c 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#define MAX_TRIGGERS 6
+#define MAX_VALIDS 5
/* List the triggers created by each timer */
static const void *triggers_table[][MAX_TRIGGERS] = {
@@ -32,12 +33,29 @@ static const void *triggers_table[][MAX_TRIGGERS] = {
{ TIM12_TRGO, TIM12_CH1, TIM12_CH2,},
};
+/* List the triggers accepted by each timer */
+static const void *valids_table[][MAX_VALIDS] = {
+ { TIM5_TRGO, TIM2_TRGO, TIM3_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM2_TRGO, TIM5_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM2_TRGO, TIM3_TRGO, TIM8_TRGO,},
+ { TIM2_TRGO, TIM3_TRGO, TIM4_TRGO, TIM8_TRGO,},
+ { }, /* timer 6 */
+ { }, /* timer 7 */
+ { TIM1_TRGO, TIM2_TRGO, TIM4_TRGO, TIM5_TRGO,},
+ { TIM2_TRGO, TIM3_TRGO,},
+ { }, /* timer 10 */
+ { }, /* timer 11 */
+ { TIM4_TRGO, TIM5_TRGO,},
+};
+
struct stm32_timer_trigger {
struct device *dev;
struct regmap *regmap;
struct clk *clk;
u32 max_arr;
const void *triggers;
+ const void *valids;
};
static int stm32_timer_start(struct stm32_timer_trigger *priv,
@@ -152,10 +170,10 @@ static ssize_t stm32_tt_read_frequency(struct device *dev,
regmap_read(priv->regmap, TIM_PSC, &psc);
regmap_read(priv->regmap, TIM_ARR, &arr);
- if (psc && arr && (cr1 & TIM_CR1_CEN)) {
+ if (cr1 & TIM_CR1_CEN) {
freq = (unsigned long long)clk_get_rate(priv->clk);
- do_div(freq, psc);
- do_div(freq, arr);
+ do_div(freq, psc + 1);
+ do_div(freq, arr + 1);
}
return sprintf(buf, "%d\n", (unsigned int)freq);
@@ -180,8 +198,7 @@ static ssize_t stm32_tt_show_master_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
u32 cr2;
regmap_read(priv->regmap, TIM_CR2, &cr2);
@@ -194,8 +211,7 @@ static ssize_t stm32_tt_store_master_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ struct stm32_timer_trigger *priv = dev_get_drvdata(dev);
int i;
for (i = 0; i < ARRAY_SIZE(master_mode_table); i++) {
@@ -275,6 +291,286 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
return 0;
}
+static int stm32_counter_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ {
+ u32 cnt;
+
+ regmap_read(priv->regmap, TIM_CNT, &cnt);
+ *val = cnt;
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ {
+ u32 smcr;
+
+ regmap_read(priv->regmap, TIM_SMCR, &smcr);
+ smcr &= TIM_SMCR_SMS;
+
+ *val = 1;
+ *val2 = 0;
+
+ /* in quadrature case scale = 0.25 */
+ if (smcr == 3)
+ *val2 = 2;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_counter_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ regmap_write(priv->regmap, TIM_CNT, val);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* fixed scale */
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_trigger_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = stm32_counter_read_raw,
+ .write_raw = stm32_counter_write_raw
+};
+
+static const char *const stm32_enable_modes[] = {
+ "always",
+ "gated",
+ "triggered",
+};
+
+static int stm32_enable_mode2sms(int mode)
+{
+ switch (mode) {
+ case 0:
+ return 0;
+ case 1:
+ return 5;
+ case 2:
+ return 6;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_set_enable_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ int sms = stm32_enable_mode2sms(mode);
+
+ if (sms < 0)
+ return sms;
+
+ regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
+
+ return 0;
+}
+
+static int stm32_sms2enable_mode(int mode)
+{
+ switch (mode) {
+ case 0:
+ return 0;
+ case 5:
+ return 1;
+ case 6:
+ return 2;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_get_enable_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 smcr;
+
+ regmap_read(priv->regmap, TIM_SMCR, &smcr);
+ smcr &= TIM_SMCR_SMS;
+
+ return stm32_sms2enable_mode(smcr);
+}
+
+static const struct iio_enum stm32_enable_mode_enum = {
+ .items = stm32_enable_modes,
+ .num_items = ARRAY_SIZE(stm32_enable_modes),
+ .set = stm32_set_enable_mode,
+ .get = stm32_get_enable_mode
+};
+
+static const char *const stm32_quadrature_modes[] = {
+ "channel_A",
+ "channel_B",
+ "quadrature",
+};
+
+static int stm32_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+
+ regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, mode + 1);
+
+ return 0;
+}
+
+static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 smcr;
+
+ regmap_read(priv->regmap, TIM_SMCR, &smcr);
+ smcr &= TIM_SMCR_SMS;
+
+ return smcr - 1;
+}
+
+static const struct iio_enum stm32_quadrature_mode_enum = {
+ .items = stm32_quadrature_modes,
+ .num_items = ARRAY_SIZE(stm32_quadrature_modes),
+ .set = stm32_set_quadrature_mode,
+ .get = stm32_get_quadrature_mode
+};
+
+static const char *const stm32_count_direction_states[] = {
+ "up",
+ "down"
+};
+
+static int stm32_set_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode);
+
+ return 0;
+}
+
+static int stm32_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 cr1;
+
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+
+ return (cr1 & TIM_CR1_DIR);
+}
+
+static const struct iio_enum stm32_count_direction_enum = {
+ .items = stm32_count_direction_states,
+ .num_items = ARRAY_SIZE(stm32_count_direction_states),
+ .set = stm32_set_count_direction,
+ .get = stm32_get_count_direction
+};
+
+static ssize_t stm32_count_get_preset(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 arr;
+
+ regmap_read(priv->regmap, TIM_ARR, &arr);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", arr);
+}
+
+static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ unsigned int preset;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->regmap, TIM_ARR, preset);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
+
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_count_get_preset,
+ .write = stm32_count_set_preset
+ },
+ IIO_ENUM("count_direction", IIO_SEPARATE, &stm32_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &stm32_count_direction_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &stm32_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_quadrature_mode_enum),
+ IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
+ IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec stm32_trigger_channel = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_trigger_count_info,
+ .indexed = 1
+};
+
+static struct stm32_timer_trigger *stm32_setup_counter_device(struct device *dev)
+{
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev,
+ sizeof(struct stm32_timer_trigger));
+ if (!indio_dev)
+ return NULL;
+
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &stm32_trigger_info;
+ indio_dev->num_channels = 1;
+ indio_dev->channels = &stm32_trigger_channel;
+ indio_dev->dev.of_node = dev->of_node;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return NULL;
+
+ return iio_priv(indio_dev);
+}
+
/**
* is_stm32_timer_trigger
* @trig: trigger to be checked
@@ -299,10 +595,15 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
if (of_property_read_u32(dev->of_node, "reg", &index))
return -EINVAL;
- if (index >= ARRAY_SIZE(triggers_table))
+ if (index >= ARRAY_SIZE(triggers_table) ||
+ index >= ARRAY_SIZE(valids_table))
return -EINVAL;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ /* Create an IIO device only if we have triggers to be validated */
+ if (*valids_table[index])
+ priv = stm32_setup_counter_device(dev);
+ else
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -312,6 +613,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
priv->triggers = triggers_table[index];
+ priv->valids = valids_table[index];
ret = stm32_setup_iio_triggers(priv);
if (ret)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 80d0fca05c06..112099c86a19 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1205,12 +1205,15 @@ static void ib_ucm_release_dev(struct device *dev)
struct ib_ucm_device *ucm_dev;
ucm_dev = container_of(dev, struct ib_ucm_device, dev);
- cdev_del(&ucm_dev->cdev);
+ kfree(ucm_dev);
+}
+
+static void ib_ucm_free_dev(struct ib_ucm_device *ucm_dev)
+{
if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
clear_bit(ucm_dev->devnum, dev_map);
else
clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
- kfree(ucm_dev);
}
static const struct file_operations ucm_fops = {
@@ -1266,7 +1269,9 @@ static void ib_ucm_add_one(struct ib_device *device)
if (!ucm_dev)
return;
+ device_initialize(&ucm_dev->dev);
ucm_dev->ib_dev = device;
+ ucm_dev->dev.release = ib_ucm_release_dev;
devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
if (devnum >= IB_UCM_MAX_DEVICES) {
@@ -1286,16 +1291,14 @@ static void ib_ucm_add_one(struct ib_device *device)
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
- if (cdev_add(&ucm_dev->cdev, base, 1))
- goto err;
ucm_dev->dev.class = &cm_class;
ucm_dev->dev.parent = device->dev.parent;
- ucm_dev->dev.devt = ucm_dev->cdev.dev;
- ucm_dev->dev.release = ib_ucm_release_dev;
+ ucm_dev->dev.devt = base;
+
dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
- if (device_register(&ucm_dev->dev))
- goto err_cdev;
+ if (cdev_device_add(&ucm_dev->cdev, &ucm_dev->dev))
+ goto err_devnum;
if (device_create_file(&ucm_dev->dev, &dev_attr_ibdev))
goto err_dev;
@@ -1304,15 +1307,11 @@ static void ib_ucm_add_one(struct ib_device *device)
return;
err_dev:
- device_unregister(&ucm_dev->dev);
-err_cdev:
- cdev_del(&ucm_dev->cdev);
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ cdev_device_del(&ucm_dev->cdev, &ucm_dev->dev);
+err_devnum:
+ ib_ucm_free_dev(ucm_dev);
err:
- kfree(ucm_dev);
+ put_device(&ucm_dev->dev);
return;
}
@@ -1323,7 +1322,9 @@ static void ib_ucm_remove_one(struct ib_device *device, void *client_data)
if (!ucm_dev)
return;
- device_unregister(&ucm_dev->dev);
+ cdev_device_del(&ucm_dev->cdev, &ucm_dev->dev);
+ ib_ucm_free_dev(ucm_dev);
+ put_device(&ucm_dev->dev);
}
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 200422d24299..36a6f5c8914c 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1187,7 +1187,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
cdev_init(&port->cdev, &umad_fops);
port->cdev.owner = THIS_MODULE;
- port->cdev.kobj.parent = &umad_dev->kobj;
+ cdev_set_parent(&port->cdev, &umad_dev->kobj);
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
@@ -1206,7 +1206,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
- port->sm_cdev.kobj.parent = &umad_dev->kobj;
+ cdev_set_parent(&port->sm_cdev, &umad_dev->kobj);
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3a9883d1257e..3d2609608f58 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -1093,7 +1093,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
+ cdev_set_parent(&uverbs_dev->cdev, &uverbs_dev->kobj);
kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 0f6916d2d549..5d6b1eeaa9a0 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1055,7 +1055,7 @@ static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void set_partition_keys(struct hfi1_pportdata *);
+static void set_partition_keys(struct hfi1_pportdata *ppd);
static const char *link_state_name(u32 state);
static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
u32 state);
@@ -1068,9 +1068,9 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
-static void handle_temp_err(struct hfi1_devdata *);
-static void dc_shutdown(struct hfi1_devdata *);
-static void dc_start(struct hfi1_devdata *);
+static void handle_temp_err(struct hfi1_devdata *dd);
+static void dc_shutdown(struct hfi1_devdata *dd);
+static void dc_start(struct hfi1_devdata *dd);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
@@ -10233,7 +10233,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
if (pstate == PLS_OFFLINE) {
do_transition = 0; /* in right state */
do_wait = 0; /* ...no need to wait */
- } else if ((pstate & 0xff) == PLS_OFFLINE) {
+ } else if ((pstate & 0xf0) == PLS_OFFLINE) {
do_transition = 0; /* in an offline transient state */
do_wait = 1; /* ...wait for it to settle */
} else {
@@ -12662,7 +12662,7 @@ u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
#define SET_STATIC_RATE_CONTROL_SMASK(r) \
(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-int hfi1_init_ctxt(struct send_context *sc)
+void hfi1_init_ctxt(struct send_context *sc)
{
if (sc) {
struct hfi1_devdata *dd = sc->dd;
@@ -12679,7 +12679,6 @@ int hfi1_init_ctxt(struct send_context *sc)
write_kctxt_csr(dd, sc->hw_context,
SEND_CTXT_CHECK_ENABLE, reg);
}
- return 0;
}
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
@@ -13841,14 +13840,14 @@ static void init_chip(struct hfi1_devdata *dd)
dd_dev_info(dd, "Resetting CSRs with FLR\n");
/* do the FLR, the DC reset will remain */
- hfi1_pcie_flr(dd);
+ pcie_flr(dd->pcidev);
/* restore command and BARs */
restore_pci_variables(dd);
if (is_ax(dd)) {
dd_dev_info(dd, "Resetting CSRs with FLR\n");
- hfi1_pcie_flr(dd);
+ pcie_flr(dd->pcidev);
restore_pci_variables(dd);
}
} else {
@@ -14528,30 +14527,24 @@ done:
return ret;
}
-int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
{
- struct hfi1_ctxtdata *rcd;
- unsigned sctxt;
- int ret = 0;
+ u8 hw_ctxt;
u64 reg;
- if (ctxt < dd->num_rcv_contexts) {
- rcd = dd->rcd[ctxt];
- } else {
- ret = -EINVAL;
- goto done;
- }
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ if (!ctxt || !ctxt->sc)
+ return -EINVAL;
+
+ if (ctxt->ctxt >= dd->num_rcv_contexts)
+ return -EINVAL;
+
+ hw_ctxt = ctxt->sc->hw_context;
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
-done:
- return ret;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
+
+ return 0;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index b9dbf16d7703..cbe455d9ab8b 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -636,7 +636,8 @@ static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
write_csr(dd, offset0 + (0x1000 * ctxt), value);
}
-u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32);
+u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
+ u32 dw_len);
/* firmware.c */
#define SBUS_MASTER_BROADCAST 0xfd
@@ -728,7 +729,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd);
void set_intr_state(struct hfi1_devdata *dd, u32 enable);
void apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
int refresh_widths);
-void update_usrhead(struct hfi1_ctxtdata *, u32, u32, u32, u32, u32);
+void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
+ u32 intr_adjust, u32 npkts);
int stop_drain_data_vls(struct hfi1_devdata *dd);
int open_fill_data_vls(struct hfi1_devdata *dd);
u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
@@ -1347,7 +1349,7 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct ib_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr);
-int hfi1_init_ctxt(struct send_context *sc);
+void hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
@@ -1360,7 +1362,7 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
-int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt);
+int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index bf64b5a7bfd7..bbb6069dec2a 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -69,7 +69,7 @@ int hfi1_cdev_init(int minor, const char *name,
cdev_init(cdev, fops);
cdev->owner = THIS_MODULE;
- cdev->kobj.parent = parent;
+ cdev_set_parent(cdev, parent);
kobject_set_name(&cdev->kobj, name);
ret = cdev_add(cdev, dev, 1);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 527895487175..a50870e455a3 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -85,8 +85,8 @@ module_param_named(cu, hfi1_cu, uint, S_IRUGO);
MODULE_PARM_DESC(cu, "Credit return units");
unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
-static int hfi1_caps_set(const char *, const struct kernel_param *);
-static int hfi1_caps_get(char *, const struct kernel_param *);
+static int hfi1_caps_set(const char *val, const struct kernel_param *kp);
+static int hfi1_caps_get(char *buffer, const struct kernel_param *kp);
static const struct kernel_param_ops cap_ops = {
.set = hfi1_caps_set,
.get = hfi1_caps_get
@@ -211,42 +211,6 @@ int hfi1_count_active_units(void)
}
/*
- * Return count of all units, optionally return in arguments
- * the number of usable (present) units, and the number of
- * ports that are up.
- */
-int hfi1_count_units(int *npresentp, int *nupp)
-{
- int nunits = 0, npresent = 0, nup = 0;
- struct hfi1_devdata *dd;
- unsigned long flags;
- int pidx;
- struct hfi1_pportdata *ppd;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
-
- list_for_each_entry(dd, &hfi1_dev_list, list) {
- nunits++;
- if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
- npresent++;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && ppd->linkup)
- nup++;
- }
- }
-
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
-
- return nunits;
-}
-
-/*
* Get address of eager buffer from it's index (allocated in chunks, not
* contiguous).
*/
@@ -1325,7 +1289,7 @@ int hfi1_reset_device(int unit)
if (dd->rcd)
for (i = dd->first_dyn_alloc_ctxt;
i < dd->num_rcv_contexts; i++) {
- if (!dd->rcd[i] || !dd->rcd[i]->cnt)
+ if (!dd->rcd[i])
continue;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
ret = -EBUSY;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 3d9bce4bfcc7..3158128d57e8 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -49,6 +49,7 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/sched/mm.h>
+#include <linux/bitmap.h>
#include <rdma/ib.h>
@@ -70,30 +71,37 @@
/*
* File operation functions
*/
-static int hfi1_file_open(struct inode *, struct file *);
-static int hfi1_file_close(struct inode *, struct file *);
-static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
-static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
-
-static u64 kvirt_to_phys(void *);
-static int assign_ctxt(struct file *, struct hfi1_user_info *);
-static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
-static int user_init(struct file *);
-static int get_ctxt_info(struct file *, void __user *, __u32);
-static int get_base_info(struct file *, void __user *, __u32);
-static int setup_ctxt(struct file *);
-static int setup_subctxt(struct hfi1_ctxtdata *);
-static int get_user_context(struct file *, struct hfi1_user_info *, int);
-static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
-static int allocate_ctxt(struct file *, struct hfi1_devdata *,
- struct hfi1_user_info *);
-static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
-static unsigned int poll_next(struct file *, struct poll_table_struct *);
-static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
-static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
-static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
-static int vma_fault(struct vm_fault *);
+static int hfi1_file_open(struct inode *inode, struct file *fp);
+static int hfi1_file_close(struct inode *inode, struct file *fp);
+static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
+static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
+
+static u64 kvirt_to_phys(void *addr);
+static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
+static int init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo);
+static int init_user_ctxt(struct hfi1_filedata *fd);
+static void user_init(struct hfi1_ctxtdata *uctxt);
+static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
+ __u32 len);
+static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
+ __u32 len);
+static int setup_base_ctxt(struct hfi1_filedata *fd);
+static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
+
+static int find_sub_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo);
+static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
+ struct hfi1_user_info *uinfo);
+static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
+static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ unsigned long events);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
+static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
+ int start_stop);
+static int vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -173,6 +181,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
struct hfi1_devdata,
user_cdev);
+ if (!((dd->flags & HFI1_PRESENT) && dd->kregbase))
+ return -EINVAL;
+
if (!atomic_inc_not_zero(&dd->user_refcount))
return -ENXIO;
@@ -187,6 +198,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
mmgrab(fd->mm);
+ fd->dd = dd;
fp->private_data = fd;
} else {
fp->private_data = NULL;
@@ -229,20 +241,14 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(uinfo)))
return -EFAULT;
- ret = assign_ctxt(fp, &uinfo);
- if (ret < 0)
- return ret;
- ret = setup_ctxt(fp);
- if (ret)
- return ret;
- ret = user_init(fp);
+ ret = assign_ctxt(fd, &uinfo);
break;
case HFI1_IOCTL_CTXT_INFO:
- ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
+ ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
sizeof(struct hfi1_ctxt_info));
break;
case HFI1_IOCTL_USER_INFO:
- ret = get_base_info(fp, (void __user *)(unsigned long)arg,
+ ret = get_base_info(fd, (void __user *)(unsigned long)arg,
sizeof(struct hfi1_base_info));
break;
case HFI1_IOCTL_CREDIT_UPD:
@@ -256,7 +262,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(tinfo)))
return -EFAULT;
- ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
+ ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
if (!ret) {
/*
* Copy the number of tidlist entries we used
@@ -278,7 +284,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(tinfo)))
return -EFAULT;
- ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
+ ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
if (ret)
break;
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
@@ -293,7 +299,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(tinfo)))
return -EFAULT;
- ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
+ ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
if (ret)
break;
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
@@ -430,7 +436,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
unsigned long count = 0;
ret = hfi1_user_sdma_process_request(
- kiocb->ki_filp, (struct iovec *)(from->iov + done),
+ fd, (struct iovec *)(from->iov + done),
dim, &count);
if (ret) {
reqs = ret;
@@ -756,6 +762,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
/* release the cpu */
hfi1_put_proc_affinity(fdata->rec_cpu_num);
+ /* clean up rcv side */
+ hfi1_user_exp_rcv_free(fdata);
+
/*
* Clear any left over, unhandled events so the next process that
* gets this context doesn't get confused.
@@ -764,8 +773,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
*ev = 0;
- if (--uctxt->cnt) {
- uctxt->active_slaves &= ~(1 << fdata->subctxt);
+ __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
+ if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
mutex_unlock(&hfi1_mutex);
goto done;
}
@@ -795,8 +804,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
dd->rcd[uctxt->ctxt] = NULL;
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+ hfi1_user_exp_rcv_grp_free(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
uctxt->rcvwait_to = 0;
uctxt->piowait_to = 0;
@@ -836,127 +845,135 @@ static u64 kvirt_to_phys(void *addr)
return paddr;
}
-static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
+static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
{
- int i_minor, ret = 0;
+ int ret;
unsigned int swmajor, swminor;
swmajor = uinfo->userversion >> 16;
- if (swmajor != HFI1_USER_SWMAJOR) {
- ret = -ENODEV;
- goto done;
- }
+ if (swmajor != HFI1_USER_SWMAJOR)
+ return -ENODEV;
swminor = uinfo->userversion & 0xffff;
mutex_lock(&hfi1_mutex);
- /* First, lets check if we need to setup a shared context? */
+ /*
+ * Get a sub context if necessary.
+ * ret < 0 error, 0 no context, 1 sub-context found
+ */
+ ret = 0;
if (uinfo->subctxt_cnt) {
- struct hfi1_filedata *fd = fp->private_data;
-
- ret = find_shared_ctxt(fp, uinfo);
- if (ret < 0)
- goto done_unlock;
- if (ret) {
+ ret = find_sub_ctxt(fd, uinfo);
+ if (ret > 0)
fd->rec_cpu_num =
hfi1_get_proc_affinity(fd->uctxt->numa_id);
- }
}
/*
- * We execute the following block if we couldn't find a
- * shared context or if context sharing is not required.
+ * Allocate a base context if context sharing is not required or we
+ * couldn't find a sub context.
*/
- if (!ret) {
- i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- ret = get_user_context(fp, uinfo, i_minor);
- }
-done_unlock:
+ if (!ret)
+ ret = allocate_ctxt(fd, fd->dd, uinfo);
+
mutex_unlock(&hfi1_mutex);
-done:
+
+ /* Depending on the context type, do the appropriate init */
+ if (ret > 0) {
+ /*
+ * sub-context info can only be set up after the base
+ * context has been completed.
+ */
+ ret = wait_event_interruptible(fd->uctxt->wait, !test_bit(
+ HFI1_CTXT_BASE_UNINIT,
+ &fd->uctxt->event_flags));
+ if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) {
+ clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
+ return -ENOMEM;
+ }
+ /* The only thing a sub context needs is the user_xxx stuff */
+ if (!ret)
+ ret = init_user_ctxt(fd);
+
+ if (ret)
+ clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
+ } else if (!ret) {
+ ret = setup_base_ctxt(fd);
+ if (fd->uctxt->subctxt_cnt) {
+ /* If there is an error, set the failed bit. */
+ if (ret)
+ set_bit(HFI1_CTXT_BASE_FAILED,
+ &fd->uctxt->event_flags);
+ /*
+ * Base context is done, notify anybody using a
+ * sub-context that is waiting for this completion
+ */
+ clear_bit(HFI1_CTXT_BASE_UNINIT,
+ &fd->uctxt->event_flags);
+ wake_up(&fd->uctxt->wait);
+ }
+ }
+
return ret;
}
-static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
- int devno)
+/*
+ * The hfi1_mutex must be held when this function is called. It is
+ * necessary to ensure serialized access to the bitmask in_use_ctxts.
+ */
+static int find_sub_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo)
{
- struct hfi1_devdata *dd = NULL;
- int devmax, npresent, nup;
+ int i;
+ struct hfi1_devdata *dd = fd->dd;
+ u16 subctxt;
- devmax = hfi1_count_units(&npresent, &nup);
- if (!npresent)
- return -ENXIO;
+ for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
+ struct hfi1_ctxtdata *uctxt = dd->rcd[i];
- if (!nup)
- return -ENETDOWN;
+ /* Skip ctxts which are not yet open */
+ if (!uctxt ||
+ bitmap_empty(uctxt->in_use_ctxts,
+ HFI1_MAX_SHARED_CTXTS))
+ continue;
- dd = hfi1_lookup(devno);
- if (!dd)
- return -ENODEV;
- else if (!dd->freectxts)
- return -EBUSY;
+ /* Skip dynamically allocted kernel contexts */
+ if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
+ continue;
- return allocate_ctxt(fp, dd, uinfo);
-}
+ /* Skip ctxt if it doesn't match the requested one */
+ if (memcmp(uctxt->uuid, uinfo->uuid,
+ sizeof(uctxt->uuid)) ||
+ uctxt->jkey != generate_jkey(current_uid()) ||
+ uctxt->subctxt_id != uinfo->subctxt_id ||
+ uctxt->subctxt_cnt != uinfo->subctxt_cnt)
+ continue;
-static int find_shared_ctxt(struct file *fp,
- const struct hfi1_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
- struct hfi1_filedata *fd = fp->private_data;
+ /* Verify the sharing process matches the master */
+ if (uctxt->userversion != uinfo->userversion)
+ return -EINVAL;
- devmax = hfi1_count_units(NULL, NULL);
+ /* Find an unused context */
+ subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
+ HFI1_MAX_SHARED_CTXTS);
+ if (subctxt >= uctxt->subctxt_cnt)
+ return -EBUSY;
- for (ndev = 0; ndev < devmax; ndev++) {
- struct hfi1_devdata *dd = hfi1_lookup(ndev);
+ fd->uctxt = uctxt;
+ fd->subctxt = subctxt;
+ __set_bit(fd->subctxt, uctxt->in_use_ctxts);
- if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
- continue;
- for (i = dd->first_dyn_alloc_ctxt;
- i < dd->num_rcv_contexts; i++) {
- struct hfi1_ctxtdata *uctxt = dd->rcd[i];
-
- /* Skip ctxts which are not yet open */
- if (!uctxt || !uctxt->cnt)
- continue;
-
- /* Skip dynamically allocted kernel contexts */
- if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
- continue;
-
- /* Skip ctxt if it doesn't match the requested one */
- if (memcmp(uctxt->uuid, uinfo->uuid,
- sizeof(uctxt->uuid)) ||
- uctxt->jkey != generate_jkey(current_uid()) ||
- uctxt->subctxt_id != uinfo->subctxt_id ||
- uctxt->subctxt_cnt != uinfo->subctxt_cnt)
- continue;
-
- /* Verify the sharing process matches the master */
- if (uctxt->userversion != uinfo->userversion ||
- uctxt->cnt >= uctxt->subctxt_cnt) {
- ret = -EINVAL;
- goto done;
- }
- fd->uctxt = uctxt;
- fd->subctxt = uctxt->cnt++;
- uctxt->active_slaves |= 1 << fd->subctxt;
- ret = 1;
- goto done;
- }
+ return 1;
}
-done:
- return ret;
+ return 0;
}
-static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
+static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt;
- unsigned ctxt;
+ unsigned int ctxt;
int ret, numa;
if (dd->flags & HFI1_FROZEN) {
@@ -970,6 +987,14 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
return -EIO;
}
+ /*
+ * This check is sort of redundant to the next EBUSY error. It would
+ * also indicate an inconsistancy in the driver if this value was
+ * zero, but there were still contexts available.
+ */
+ if (!dd->freectxts)
+ return -EBUSY;
+
for (ctxt = dd->first_dyn_alloc_ctxt;
ctxt < dd->num_rcv_contexts; ctxt++)
if (!dd->rcd[ctxt])
@@ -1013,12 +1038,12 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
goto ctxdata_free;
/*
- * Setup shared context resources if the user-level has requested
- * shared contexts and this is the 'master' process.
+ * Setup sub context resources if the user-level has requested
+ * sub contexts.
* This has to be done here so the rest of the sub-contexts find the
* proper master.
*/
- if (uinfo->subctxt_cnt && !fd->subctxt) {
+ if (uinfo->subctxt_cnt) {
ret = init_subctxts(uctxt, uinfo);
/*
* On error, we don't need to disable and de-allocate the
@@ -1055,7 +1080,7 @@ ctxdata_free:
static int init_subctxts(struct hfi1_ctxtdata *uctxt,
const struct hfi1_user_info *uinfo)
{
- unsigned num_subctxts;
+ u16 num_subctxts;
num_subctxts = uinfo->subctxt_cnt;
if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
@@ -1063,9 +1088,8 @@ static int init_subctxts(struct hfi1_ctxtdata *uctxt,
uctxt->subctxt_cnt = uinfo->subctxt_cnt;
uctxt->subctxt_id = uinfo->subctxt_id;
- uctxt->active_slaves = 1;
uctxt->redirect_seq_cnt = 1;
- set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
+ set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
return 0;
}
@@ -1073,13 +1097,12 @@ static int init_subctxts(struct hfi1_ctxtdata *uctxt,
static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
{
int ret = 0;
- unsigned num_subctxts = uctxt->subctxt_cnt;
+ u16 num_subctxts = uctxt->subctxt_cnt;
uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
- if (!uctxt->subctxt_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (!uctxt->subctxt_uregbase)
+ return -ENOMEM;
+
/* We can take the size of the RcvHdr Queue from the master */
uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
num_subctxts);
@@ -1094,25 +1117,22 @@ static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
ret = -ENOMEM;
goto bail_rhdr;
}
- goto bail;
+
+ return 0;
+
bail_rhdr:
vfree(uctxt->subctxt_rcvhdr_base);
+ uctxt->subctxt_rcvhdr_base = NULL;
bail_ureg:
vfree(uctxt->subctxt_uregbase);
uctxt->subctxt_uregbase = NULL;
-bail:
+
return ret;
}
-static int user_init(struct file *fp)
+static void user_init(struct hfi1_ctxtdata *uctxt)
{
unsigned int rcvctrl_ops = 0;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
-
- /* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
- return -EFAULT;
/* initialize poll variables... */
uctxt->urgent = 0;
@@ -1160,20 +1180,12 @@ static int user_init(struct file *fp)
else
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
-
- /* Notify any waiting slaves */
- if (uctxt->subctxt_cnt) {
- clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
- wake_up(&uctxt->wait);
- }
-
- return 0;
}
-static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
+static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
+ __u32 len)
{
struct hfi1_ctxt_info cinfo;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
int ret = 0;
@@ -1211,75 +1223,71 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
return ret;
}
-static int setup_ctxt(struct file *fp)
+static int init_user_ctxt(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ int ret;
+
+ ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
+ if (ret)
+ return ret;
+
+ ret = hfi1_user_exp_rcv_init(fd);
+
+ return ret;
+}
+
+static int setup_base_ctxt(struct hfi1_filedata *fd)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
- /*
- * Context should be set up only once, including allocation and
- * programming of eager buffers. This is done if context sharing
- * is not requested or by the master process.
- */
- if (!uctxt->subctxt_cnt || !fd->subctxt) {
- ret = hfi1_init_ctxt(uctxt->sc);
- if (ret)
- goto done;
+ hfi1_init_ctxt(uctxt->sc);
- /* Now allocate the RcvHdr queue and eager buffers. */
- ret = hfi1_create_rcvhdrq(dd, uctxt);
- if (ret)
- goto done;
- ret = hfi1_setup_eagerbufs(uctxt);
- if (ret)
- goto done;
- if (uctxt->subctxt_cnt && !fd->subctxt) {
- ret = setup_subctxt(uctxt);
- if (ret)
- goto done;
- }
- } else {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- if (ret)
- goto done;
- }
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ return ret;
- ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
+ ret = hfi1_setup_eagerbufs(uctxt);
if (ret)
- goto done;
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
+ goto setup_failed;
+
+ /* If sub-contexts are enabled, do the appropriate setup */
+ if (uctxt->subctxt_cnt)
+ ret = setup_subctxt(uctxt);
if (ret)
- goto done;
+ goto setup_failed;
- set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
-done:
+ ret = hfi1_user_exp_rcv_grp_init(fd);
+ if (ret)
+ goto setup_failed;
+
+ ret = init_user_ctxt(fd);
+ if (ret)
+ goto setup_failed;
+
+ user_init(uctxt);
+
+ return 0;
+
+setup_failed:
+ hfi1_free_ctxtdata(dd, uctxt);
return ret;
}
-static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
+static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
+ __u32 len)
{
struct hfi1_base_info binfo;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
ssize_t sz;
unsigned offset;
int ret = 0;
- trace_hfi1_uctxtdata(uctxt->dd, uctxt);
+ trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
@@ -1443,7 +1451,7 @@ done:
* overflow conditions. start_stop==1 re-enables, to be used to
* re-init the software copy of the head register
*/
-static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
+static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
int start_stop)
{
struct hfi1_devdata *dd = uctxt->dd;
@@ -1478,7 +1486,7 @@ bail:
* User process then performs actions appropriate to bit having been
* set, if desired, and checks again in future.
*/
-static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
+static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long events)
{
int i;
@@ -1499,8 +1507,7 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
return 0;
}
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
- u16 pkey)
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
{
int ret = -ENOENT, i, intable = 0;
struct hfi1_pportdata *ppd = uctxt->ppd;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index f06674317abf..da322e6668cc 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -196,12 +196,6 @@ struct hfi1_ctxtdata {
void *rcvhdrq;
/* kernel virtual address where hdrqtail is updated */
volatile __le64 *rcvhdrtail_kvaddr;
- /*
- * Shared page for kernel to signal user processes that send buffers
- * need disarming. The process should call HFI1_CMD_DISARM_BUFS
- * or HFI1_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
- */
- unsigned long *user_event_mask;
/* when waiting for rcv or pioavail */
wait_queue_head_t wait;
/* rcvhdrq size (for freeing) */
@@ -224,13 +218,12 @@ struct hfi1_ctxtdata {
* (ignoring forks, dup, etc. for now)
*/
int cnt;
+ /* Device context index */
+ unsigned ctxt;
/*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
+ * non-zero if ctxt can be shared, and defines the maximum number of
+ * sub-contexts for this device context.
*/
- /* instead of calculating it */
- unsigned ctxt;
- /* non-zero if ctxt is being shared. */
u16 subctxt_cnt;
/* non-zero if ctxt is being shared. */
u16 subctxt_id;
@@ -288,10 +281,10 @@ struct hfi1_ctxtdata {
void *subctxt_rcvegrbuf;
/* An array of pages for the eager header queue entries * N */
void *subctxt_rcvhdr_base;
+ /* Bitmask of in use context(s) */
+ DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
/* The version of the library which opened this ctxt */
u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
/* Type of packets or conditions we want to poll for */
u16 poll_type;
/* receive packet sequence counter */
@@ -1238,10 +1231,11 @@ struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
+ struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt;
- unsigned subctxt;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
+ u16 subctxt;
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
@@ -1263,27 +1257,27 @@ struct hfi1_devdata *hfi1_lookup(int unit);
extern u32 hfi1_cpulist_count;
extern unsigned long *hfi1_cpulist;
-int hfi1_init(struct hfi1_devdata *, int);
-int hfi1_count_units(int *npresentp, int *nupp);
+int hfi1_init(struct hfi1_devdata *dd, int reinit);
int hfi1_count_active_units(void);
-int hfi1_diag_add(struct hfi1_devdata *);
-void hfi1_diag_remove(struct hfi1_devdata *);
+int hfi1_diag_add(struct hfi1_devdata *dd);
+void hfi1_diag_remove(struct hfi1_devdata *dd);
void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
-int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *);
-int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *);
+int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
+int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
int hfi1_create_ctxts(struct hfi1_devdata *dd);
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32, int);
-void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *,
- struct hfi1_devdata *, u8, u8);
-void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *);
-
-int handle_receive_interrupt(struct hfi1_ctxtdata *, int);
-int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
-int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+ int numa);
+void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
+ struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
+void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
+
+int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
void set_all_slowpath(struct hfi1_devdata *dd);
void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
@@ -1580,7 +1574,7 @@ bad:
u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
int mtu_to_enum(u32 mtu, int default_if_bad);
-u16 enum_to_mtu(int);
+u16 enum_to_mtu(int mtu);
static inline int valid_ib_mtu(unsigned int mtu)
{
return mtu == 256 || mtu == 512 ||
@@ -1594,15 +1588,15 @@ static inline int valid_opa_max_mtu(unsigned int mtu)
(valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
}
-int set_mtu(struct hfi1_pportdata *);
+int set_mtu(struct hfi1_pportdata *ppd);
-int hfi1_set_lid(struct hfi1_pportdata *, u32, u8);
-void hfi1_disable_after_error(struct hfi1_devdata *);
-int hfi1_set_uevent_bits(struct hfi1_pportdata *, const int);
-int hfi1_rcvbuf_validate(u32, u8, u16 *);
+int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
+void hfi1_disable_after_error(struct hfi1_devdata *dd);
+int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
+int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
-int fm_get_table(struct hfi1_pportdata *, int, void *);
-int fm_set_table(struct hfi1_pportdata *, int, void *);
+int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
+int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd);
@@ -1724,19 +1718,19 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
/* ctxt_flag bit offsets */
- /* context has been setup */
-#define HFI1_CTXT_SETUP_DONE 1
+ /* base context has not finished initializing */
+#define HFI1_CTXT_BASE_UNINIT 1
+ /* base context initaliation failed */
+#define HFI1_CTXT_BASE_FAILED 2
/* waiting for a packet to arrive */
-#define HFI1_CTXT_WAITING_RCV 2
- /* master has not finished initializing */
-#define HFI1_CTXT_MASTER_UNINIT 4
+#define HFI1_CTXT_WAITING_RCV 3
/* waiting for an urgent packet to arrive */
-#define HFI1_CTXT_WAITING_URG 5
+#define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
- const struct pci_device_id *);
-void hfi1_free_devdata(struct hfi1_devdata *);
+struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+void hfi1_free_devdata(struct hfi1_devdata *dd);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
@@ -1811,24 +1805,24 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
extern const char ib_hfi1_version[];
-int hfi1_device_create(struct hfi1_devdata *);
-void hfi1_device_remove(struct hfi1_devdata *);
+int hfi1_device_create(struct hfi1_devdata *dd);
+void hfi1_device_remove(struct hfi1_devdata *dd);
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj);
-int hfi1_verbs_register_sysfs(struct hfi1_devdata *);
-void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *);
+int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
+void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
-int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
-void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
+int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
+void hfi1_pcie_cleanup(struct pci_dev *pdev);
+int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
-void hfi1_pcie_flr(struct hfi1_devdata *);
-int pcie_speeds(struct hfi1_devdata *);
-void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
-void hfi1_enable_intx(struct pci_dev *);
+int pcie_speeds(struct hfi1_devdata *dd);
+void request_msix(struct hfi1_devdata *dd, u32 *nent,
+ struct hfi1_msix_entry *entry);
+void hfi1_enable_intx(struct pci_dev *pdev);
void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 4d6b9f82efa3..4a11d4da4c92 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -53,6 +53,7 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
+#include <linux/bitmap.h>
#include <rdma/rdma_vt.h>
#include "hfi.h"
@@ -70,6 +71,7 @@
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
/*
* min buffers we want to have per context, after driver
*/
@@ -101,9 +103,9 @@ static unsigned hfi1_rcvarr_split = 25;
module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
-static uint eager_buffer_size = (2 << 20); /* 2MB */
+static uint eager_buffer_size = (8 << 20); /* 8MB */
module_param(eager_buffer_size, uint, S_IRUGO);
-MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
+MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
@@ -117,7 +119,7 @@ unsigned int user_credit_return_threshold = 33; /* default is 33% */
module_param(user_credit_return_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
-static inline u64 encode_rcv_header_entry_size(u16);
+static inline u64 encode_rcv_header_entry_size(u16 size);
static struct idr hfi1_unit_table;
u32 hfi1_cpulist_count;
@@ -175,13 +177,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
goto nomem;
}
- ret = hfi1_init_ctxt(rcd->sc);
- if (ret < 0) {
- dd_dev_err(dd,
- "Failed to setup kernel receive context, failing\n");
- ret = -EFAULT;
- goto bail;
- }
+ hfi1_init_ctxt(rcd->sc);
}
/*
@@ -193,7 +189,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
return 0;
nomem:
ret = -ENOMEM;
-bail:
+
if (dd->rcd) {
for (i = 0; i < dd->num_rcv_contexts; ++i)
hfi1_free_ctxtdata(dd, dd->rcd[i]);
@@ -227,7 +223,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
INIT_LIST_HEAD(&rcd->qp_wait_list);
rcd->ppd = ppd;
rcd->dd = dd;
- rcd->cnt = 1;
+ __set_bit(0, rcd->in_use_ctxts);
rcd->ctxt = ctxt;
dd->rcd[ctxt] = rcd;
rcd->numa_id = numa;
@@ -623,7 +619,7 @@ static int create_workqueues(struct hfi1_devdata *dd)
alloc_workqueue(
"hfi%d_%d",
WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
- dd->num_sdma,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
dd->unit, pidx);
if (!ppd->hfi1_wq)
goto wq_error;
@@ -968,7 +964,6 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
kfree(rcd->egrbufs.buffers);
sc_free(rcd->sc);
- vfree(rcd->user_event_mask);
vfree(rcd->subctxt_uregbase);
vfree(rcd->subctxt_rcvegrbuf);
vfree(rcd->subctxt_rcvhdr_base);
@@ -1687,8 +1682,6 @@ bail_free:
dd_dev_err(dd,
"attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
rcd->ctxt);
- vfree(rcd->user_event_mask);
- rcd->user_event_mask = NULL;
dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
@@ -1777,6 +1770,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
rcd->ctxt);
+ ret = -ENOMEM;
goto bail_rcvegrbuf_phys;
}
@@ -1854,7 +1848,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
"ctxt%u: current Eager buffer size is invalid %u\n",
rcd->ctxt, rcd->egrbufs.rcvtid_size);
ret = -EINVAL;
- goto bail;
+ goto bail_rcvegrbuf_phys;
}
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
@@ -1862,7 +1856,8 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.rcvtids[idx].dma, order);
cond_resched();
}
- goto bail;
+
+ return 0;
bail_rcvegrbuf_phys:
for (idx = 0; idx < rcd->egrbufs.alloced &&
@@ -1876,6 +1871,6 @@ bail_rcvegrbuf_phys:
rcd->egrbufs.buffers[idx].dma = 0;
rcd->egrbufs.buffers[idx].len = 0;
}
-bail:
+
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 232014d46f79..ba265d0ae93b 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -47,6 +47,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/bitmap.h>
#include "hfi.h"
#include "common.h"
@@ -189,7 +190,7 @@ void handle_user_interrupt(struct hfi1_ctxtdata *rcd)
unsigned long flags;
spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (!rcd->cnt)
+ if (bitmap_empty(rcd->in_use_ctxts, HFI1_MAX_SHARED_CTXTS))
goto done;
if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index e39e01b79382..93faf86d54b6 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -240,36 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
iounmap(dd->piobase);
}
-/*
- * Do a Function Level Reset (FLR) on the device.
- * Based on static function drivers/pci/pci.c:pcie_flr().
- */
-void hfi1_pcie_flr(struct hfi1_devdata *dd)
-{
- int i;
- u16 status;
-
- /* no need to check for the capability - we know the device has it */
-
- /* wait for Transaction Pending bit to clear, at most a few ms */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n");
-
-clear:
- pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- /* PCIe spec requires the function to be back within 100ms */
- msleep(100);
-}
-
static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
struct hfi1_msix_entry *hfi1_msix_entry)
{
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4573e4c9f35c..650305cc0373 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -731,9 +731,7 @@ void quiesce_qp(struct rvt_qp *qp)
void notify_qp_reset(struct rvt_qp *qp)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- priv->r_adefered = 0;
+ qp->r_adefered = 0;
clear_ahg(qp);
}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 75a729cd0c3d..069bdaf061ab 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -727,10 +727,9 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
struct ib_header hdr;
struct ib_other_headers *ohdr;
unsigned long flags;
- struct hfi1_qp_priv *priv = qp->priv;
/* clear the defer count */
- priv->r_adefered = 0;
+ qp->r_adefered = 0;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if (qp->s_flags & RVT_S_RESP_PENDING)
@@ -1604,9 +1603,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
static inline void rc_cancel_ack(struct rvt_qp *qp)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- priv->r_adefered = 0;
+ qp->r_adefered = 0;
if (list_empty(&qp->rspwait))
return;
list_del_init(&qp->rspwait);
@@ -2314,13 +2311,11 @@ send_last:
qp->r_nak_state = 0;
/* Send an ACK if requested or required. */
if (psn & IB_BTH_REQ_ACK) {
- struct hfi1_qp_priv *priv = qp->priv;
-
if (packet->numpkt == 0) {
rc_cancel_ack(qp);
goto send_ack;
}
- if (priv->r_adefered >= HFI1_PSN_CREDIT) {
+ if (qp->r_adefered >= HFI1_PSN_CREDIT) {
rc_cancel_ack(qp);
goto send_ack;
}
@@ -2328,7 +2323,7 @@ send_last:
rc_cancel_ack(qp);
goto send_ack;
}
- priv->r_adefered++;
+ qp->r_adefered++;
rc_defered_ack(rcd, qp);
}
return;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 891ba0a81bbd..3a17daba28a9 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -800,6 +800,43 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
/* when sending, force a reschedule every one of these periods */
#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
+/**
+ * schedule_send_yield - test for a yield required for QP send engine
+ * @timeout: Final time for timeout slice for jiffies
+ * @qp: a pointer to QP
+ * @ps: a pointer to a structure with commonly lookup values for
+ * the the send engine progress
+ *
+ * This routine checks if the time slice for the QP has expired
+ * for RC QPs, if so an additional work entry is queued. At this
+ * point, other QPs have an opportunity to be scheduled. It
+ * returns true if a yield is required, otherwise, false
+ * is returned.
+ */
+static bool schedule_send_yield(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps)
+{
+ if (unlikely(time_after(jiffies, ps->timeout))) {
+ if (!ps->in_thread ||
+ workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
+ spin_lock_irqsave(&qp->s_lock, ps->flags);
+ qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock, ps->flags);
+ this_cpu_inc(*ps->ppd->dd->send_schedule);
+ trace_hfi1_rc_expired_time_slice(qp, true);
+ return true;
+ }
+
+ cond_resched();
+ this_cpu_inc(*ps->ppd->dd->send_schedule);
+ ps->timeout = jiffies + ps->timeout_int;
+ }
+
+ trace_hfi1_rc_expired_time_slice(qp, false);
+ return false;
+}
+
void hfi1_do_send_from_rvt(struct rvt_qp *qp)
{
hfi1_do_send(qp, false);
@@ -827,13 +864,13 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
struct hfi1_pkt_state ps;
struct hfi1_qp_priv *priv = qp->priv;
int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
- unsigned long timeout;
- unsigned long timeout_int;
- int cpu;
ps.dev = to_idev(qp->ibqp.device);
ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
ps.ppd = ppd_from_ibp(ps.ibp);
+ ps.in_thread = in_thread;
+
+ trace_hfi1_rc_do_send(qp, in_thread);
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
@@ -844,7 +881,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
return;
}
make_req = hfi1_make_rc_req;
- timeout_int = (qp->timeout_jiffies);
+ ps.timeout_int = qp->timeout_jiffies;
break;
case IB_QPT_UC:
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
@@ -854,11 +891,11 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
return;
}
make_req = hfi1_make_uc_req;
- timeout_int = SEND_RESCHED_TIMEOUT;
+ ps.timeout_int = SEND_RESCHED_TIMEOUT;
break;
default:
make_req = hfi1_make_ud_req;
- timeout_int = SEND_RESCHED_TIMEOUT;
+ ps.timeout_int = SEND_RESCHED_TIMEOUT;
}
spin_lock_irqsave(&qp->s_lock, ps.flags);
@@ -871,9 +908,11 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
qp->s_flags |= RVT_S_BUSY;
- timeout = jiffies + (timeout_int) / 8;
- cpu = priv->s_sde ? priv->s_sde->cpu :
+ ps.timeout_int = ps.timeout_int / 8;
+ ps.timeout = jiffies + ps.timeout_int;
+ ps.cpu = priv->s_sde ? priv->s_sde->cpu :
cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+
/* insure a pre-built packet is handled */
ps.s_txreq = get_waiting_verbs_txreq(qp);
do {
@@ -889,28 +928,9 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
/* Record that s_ahg is empty. */
qp->s_hdrwords = 0;
/* allow other tasks to run */
- if (unlikely(time_after(jiffies, timeout))) {
- if (!in_thread ||
- workqueue_congested(
- cpu,
- ps.ppd->hfi1_wq)) {
- spin_lock_irqsave(
- &qp->s_lock,
- ps.flags);
- qp->s_flags &= ~RVT_S_BUSY;
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(
- &qp->s_lock,
- ps.flags);
- this_cpu_inc(
- *ps.ppd->dd->send_schedule);
- return;
- }
- cond_resched();
- this_cpu_inc(
- *ps.ppd->dd->send_schedule);
- timeout = jiffies + (timeout_int) / 8;
- }
+ if (schedule_send_yield(qp, &ps))
+ return;
+
spin_lock_irqsave(&qp->s_lock, ps.flags);
}
} while (make_req(qp, &ps));
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index 26ae789e47cf..4eb4cc798035 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -57,12 +57,14 @@
#define UCTXT_FMT \
"cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
- "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
+ "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx, subctxt_cnt:%u"
TRACE_EVENT(hfi1_uctxtdata,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
- TP_ARGS(dd, uctxt),
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt,
+ unsigned int subctxt),
+ TP_ARGS(dd, uctxt, subctxt),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(unsigned int, ctxt)
+ __field(unsigned int, subctxt)
__field(u32, credits)
__field(u64, hw_free)
__field(void __iomem *, piobase)
@@ -70,9 +72,11 @@ TRACE_EVENT(hfi1_uctxtdata,
__field(u64, rcvhdrq_dma)
__field(u32, eager_cnt)
__field(u64, rcvegr_dma)
+ __field(unsigned int, subctxt_cnt)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
+ __entry->subctxt = subctxt;
__entry->credits = uctxt->sc->credits;
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
@@ -80,17 +84,20 @@ TRACE_EVENT(hfi1_uctxtdata,
__entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
__entry->eager_cnt = uctxt->egrbufs.alloced;
__entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
+ __entry->subctxt_cnt = uctxt->subctxt_cnt;
),
- TP_printk("[%s] ctxt %u " UCTXT_FMT,
+ TP_printk("[%s] ctxt %u:%u " UCTXT_FMT,
__get_str(dev),
__entry->ctxt,
+ __entry->subctxt,
__entry->credits,
__entry->hw_free,
__entry->piobase,
__entry->rcvhdrq_cnt,
__entry->rcvhdrq_dma,
__entry->eager_cnt,
- __entry->rcvegr_dma
+ __entry->rcvegr_dma,
+ __entry->subctxt_cnt
)
);
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 2c9ac57657d3..c59809a7f121 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -676,6 +676,40 @@ TRACE_EVENT(
)
);
+DECLARE_EVENT_CLASS(
+ hfi1_do_send_template,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(bool, flag)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->flag = flag;
+ ),
+ TP_printk(
+ "[%s] qpn %x flag %d",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flag
+ )
+);
+
+DEFINE_EVENT(
+ hfi1_do_send_template, hfi1_rc_do_send,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
+DEFINE_EVENT(
+ hfi1_do_send_template, hfi1_rc_expired_time_slice,
+ TP_PROTO(struct rvt_qp *qp, bool flag),
+ TP_ARGS(qp, flag)
+);
+
#endif /* __HFI1_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 35c6e7ec8ad6..a8f0aa4722f6 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -53,7 +53,7 @@
struct tid_group {
struct list_head list;
- unsigned base;
+ u32 base;
u8 size;
u8 used;
u8 map;
@@ -82,20 +82,25 @@ struct tid_pageset {
(unsigned long)(len) - 1) & PAGE_MASK) - \
((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
-static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
- struct hfi1_filedata *);
-static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
-static int set_rcvarray_entry(struct file *, unsigned long, u32,
- struct tid_group *, struct page **, unsigned);
-static int tid_rb_insert(void *, struct mmu_rb_node *);
+static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
+ struct exp_tid_set *set,
+ struct hfi1_filedata *fd);
+static u32 find_phys_blocks(struct page **pages, unsigned npages,
+ struct tid_pageset *list);
+static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr,
+ u32 rcventry, struct tid_group *grp,
+ struct page **pages, unsigned npages);
+static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode);
-static void tid_rb_remove(void *, struct mmu_rb_node *);
-static int tid_rb_invalidate(void *, struct mmu_rb_node *);
-static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
- struct tid_pageset *, unsigned, u16, struct page **,
- u32 *, unsigned *, unsigned *);
-static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
+static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
+static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
+static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
+ struct tid_group *grp, struct tid_pageset *sets,
+ unsigned start, u16 count, struct page **pages,
+ u32 *tidlist, unsigned *tididx, unsigned *pmapped);
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
+ struct tid_group **grp);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
static struct mmu_rb_ops tid_rb_ops = {
@@ -149,52 +154,60 @@ static inline void tid_group_move(struct tid_group *group,
tid_group_add_tail(group, s2);
}
+int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = fd->dd;
+ u32 tidbase;
+ u32 i;
+ struct tid_group *grp, *gptr;
+
+ exp_tid_group_init(&uctxt->tid_group_list);
+ exp_tid_group_init(&uctxt->tid_used_list);
+ exp_tid_group_init(&uctxt->tid_full_list);
+
+ tidbase = uctxt->expected_base;
+ for (i = 0; i < uctxt->expected_count /
+ dd->rcv_entries.group_size; i++) {
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto grp_failed;
+
+ grp->size = dd->rcv_entries.group_size;
+ grp->base = tidbase;
+ tid_group_add_tail(grp, &uctxt->tid_group_list);
+ tidbase += dd->rcv_entries.group_size;
+ }
+
+ return 0;
+
+grp_failed:
+ list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+ list) {
+ list_del_init(&grp->list);
+ kfree(grp);
+ }
+
+ return -ENOMEM;
+}
+
/*
* Initialize context and file private data needed for Expected
* receive caching. This needs to be done after the context has
* been configured with the eager/expected RcvEntry counts.
*/
-int hfi1_user_exp_rcv_init(struct file *fp)
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned tidbase;
- int i, ret = 0;
+ int ret = 0;
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
- if (!uctxt->subctxt_cnt || !fd->subctxt) {
- exp_tid_group_init(&uctxt->tid_group_list);
- exp_tid_group_init(&uctxt->tid_used_list);
- exp_tid_group_init(&uctxt->tid_full_list);
-
- tidbase = uctxt->expected_base;
- for (i = 0; i < uctxt->expected_count /
- dd->rcv_entries.group_size; i++) {
- struct tid_group *grp;
-
- grp = kzalloc(sizeof(*grp), GFP_KERNEL);
- if (!grp) {
- /*
- * If we fail here, the groups already
- * allocated will be freed by the close
- * call.
- */
- ret = -ENOMEM;
- goto done;
- }
- grp->size = dd->rcv_entries.group_size;
- grp->base = tidbase;
- tid_group_add_tail(grp, &uctxt->tid_group_list);
- tidbase += dd->rcv_entries.group_size;
- }
- }
-
fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
- GFP_KERNEL);
+ sizeof(struct rb_node *),
+ GFP_KERNEL);
if (!fd->entry_to_rb)
return -ENOMEM;
@@ -204,8 +217,9 @@ int hfi1_user_exp_rcv_init(struct file *fp)
sizeof(*fd->invalid_tids),
GFP_KERNEL);
if (!fd->invalid_tids) {
- ret = -ENOMEM;
- goto done;
+ kfree(fd->entry_to_rb);
+ fd->entry_to_rb = NULL;
+ return -ENOMEM;
}
/*
@@ -248,41 +262,44 @@ int hfi1_user_exp_rcv_init(struct file *fp)
fd->tid_limit = uctxt->expected_count;
}
spin_unlock(&fd->tid_lock);
-done:
+
return ret;
}
-int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_group *grp, *gptr;
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
- return 0;
+ list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+ list) {
+ list_del_init(&grp->list);
+ kfree(grp);
+ }
+ hfi1_clear_tids(uctxt);
+}
+
+void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
/*
* The notifier would have been removed when the process'es mm
* was freed.
*/
- if (fd->handler)
+ if (fd->handler) {
hfi1_mmu_rb_unregister(fd->handler);
-
- kfree(fd->invalid_tids);
-
- if (!uctxt->cnt) {
+ } else {
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
- list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
- list) {
- list_del_init(&grp->list);
- kfree(grp);
- }
- hfi1_clear_tids(uctxt);
}
+ kfree(fd->invalid_tids);
+ fd->invalid_tids = NULL;
+
kfree(fd->entry_to_rb);
- return 0;
+ fd->entry_to_rb = NULL;
}
/*
@@ -351,10 +368,10 @@ static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
* can fit into the group. If the group becomes fully
* used, move it to tid_full_list.
*/
-int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
+int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
{
int ret = 0, need_group = 0, pinned;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
@@ -451,7 +468,7 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
struct tid_group *grp =
tid_group_pop(&uctxt->tid_group_list);
- ret = program_rcvarray(fp, vaddr, grp, pagesets,
+ ret = program_rcvarray(fd, vaddr, grp, pagesets,
pageidx, dd->rcv_entries.group_size,
pages, tidlist, &tididx, &mapped);
/*
@@ -497,7 +514,7 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
unsigned use = min_t(unsigned, pageset_count - pageidx,
grp->size - grp->used);
- ret = program_rcvarray(fp, vaddr, grp, pagesets,
+ ret = program_rcvarray(fd, vaddr, grp, pagesets,
pageidx, use, pages, tidlist,
&tididx, &mapped);
if (ret < 0) {
@@ -547,7 +564,7 @@ nomem:
* everything done so far so we don't leak resources.
*/
tinfo->tidlist = (unsigned long)&tidlist;
- hfi1_user_exp_rcv_clear(fp, tinfo);
+ hfi1_user_exp_rcv_clear(fd, tinfo);
tinfo->tidlist = 0;
ret = -EFAULT;
goto bail;
@@ -571,10 +588,10 @@ bail:
return ret > 0 ? 0 : ret;
}
-int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
+int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
{
int ret = 0;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
u32 *tidinfo;
unsigned tididx;
@@ -589,7 +606,7 @@ int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
mutex_lock(&uctxt->exp_lock);
for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
- ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL);
+ ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
if (ret) {
hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
ret);
@@ -606,9 +623,9 @@ int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
return ret;
}
-int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
+int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
unsigned long *ev = uctxt->dd->events +
(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
@@ -723,7 +740,7 @@ static u32 find_phys_blocks(struct page **pages, unsigned npages,
/**
* program_rcvarray() - program an RcvArray group with receive buffers
- * @fp: file pointer
+ * @fd: filedata pointer
* @vaddr: starting user virtual address
* @grp: RcvArray group
* @sets: array of struct tid_pageset holding information on physically
@@ -748,13 +765,12 @@ static u32 find_phys_blocks(struct page **pages, unsigned npages,
* -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
* number of RcvArray entries programmed.
*/
-static int program_rcvarray(struct file *fp, unsigned long vaddr,
+static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
struct tid_group *grp,
struct tid_pageset *sets,
unsigned start, u16 count, struct page **pages,
u32 *tidlist, unsigned *tididx, unsigned *pmapped)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
u16 idx;
@@ -795,7 +811,7 @@ static int program_rcvarray(struct file *fp, unsigned long vaddr,
npages = sets[setidx].count;
pageidx = sets[setidx].idx;
- ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE),
+ ret = set_rcvarray_entry(fd, vaddr + (pageidx * PAGE_SIZE),
rcventry, grp, pages + pageidx,
npages);
if (ret)
@@ -817,12 +833,11 @@ static int program_rcvarray(struct file *fp, unsigned long vaddr,
return idx;
}
-static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
+static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr,
u32 rcventry, struct tid_group *grp,
struct page **pages, unsigned npages)
{
int ret;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_rb_node *node;
struct hfi1_devdata *dd = uctxt->dd;
@@ -876,10 +891,9 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
return 0;
}
-static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
struct tid_group **grp)
{
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
struct tid_rb_node *node;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9fba87e..5250c897298d 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_USER_EXP_RCV_H
#define _HFI1_USER_EXP_RCV_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -70,10 +70,15 @@
(tid) |= EXP_TID_SET(field, (value)); \
} while (0)
-int hfi1_user_exp_rcv_init(struct file *);
-int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
-int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
-int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *);
-int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *);
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
+int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd);
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd);
+void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd);
+int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
+int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
+int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
+ struct hfi1_tid_info *tinfo);
#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0749689d7643..d55339f5d73b 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -143,7 +143,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* KDETH OM multipliers and switch over point */
#define KDETH_OM_SMALL 4
+#define KDETH_OM_SMALL_SHIFT 2
#define KDETH_OM_LARGE 64
+#define KDETH_OM_LARGE_SHIFT 6
#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
/* Tx request flag bits */
@@ -153,9 +155,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1
#define SDMA_REQ_SEND_DONE 2
-#define SDMA_REQ_HAVE_AHG 3
-#define SDMA_REQ_HAS_ERROR 4
-#define SDMA_REQ_DONE_ERROR 5
+#define SDMA_REQ_HAS_ERROR 3
+#define SDMA_REQ_DONE_ERROR 4
#define SDMA_PKT_Q_INACTIVE BIT(0)
#define SDMA_PKT_Q_ACTIVE BIT(1)
@@ -214,7 +215,7 @@ struct user_sdma_request {
* each request will need it's own engine pointer.
*/
struct sdma_engine *sde;
- u8 ahg_idx;
+ s8 ahg_idx;
u32 ahg[9];
/*
* KDETH.Offset (Eager) field
@@ -229,12 +230,6 @@ struct user_sdma_request {
*/
u32 tidoffset;
/*
- * KDETH.OM
- * Remember this because the header template always sets it
- * to 0.
- */
- u8 omfactor;
- /*
* We copy the iovs for this request (based on
* info.iovcnt). These are only the data vectors
*/
@@ -284,39 +279,43 @@ struct user_sdma_txreq {
hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
(pq)->subctxt, ##__VA_ARGS__)
-static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
-static int num_user_pages(const struct iovec *);
-static void user_sdma_txreq_cb(struct sdma_txreq *, int);
-static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
-static void user_sdma_free_request(struct user_sdma_request *, bool);
-static int pin_vector_pages(struct user_sdma_request *,
- struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
- unsigned);
-static int check_header_template(struct user_sdma_request *,
- struct hfi1_pkt_header *, u32, u32);
-static int set_txreq_header(struct user_sdma_request *,
- struct user_sdma_txreq *, u32);
-static int set_txreq_header_ahg(struct user_sdma_request *,
- struct user_sdma_txreq *, u32);
-static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
- struct hfi1_user_sdma_comp_q *,
- u16, enum hfi1_sdma_comp_state, int);
-static inline u32 set_pkt_bth_psn(__be32, u8, u32);
+static int user_sdma_send_pkts(struct user_sdma_request *req,
+ unsigned maxpkts);
+static int num_user_pages(const struct iovec *iov);
+static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
+static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
+static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
+static int pin_vector_pages(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec);
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned start, unsigned npages);
+static int check_header_template(struct user_sdma_request *req,
+ struct hfi1_pkt_header *hdr, u32 lrhlen,
+ u32 datalen);
+static int set_txreq_header(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 datalen);
+static int set_txreq_header_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx, u32 len);
+static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ struct hfi1_user_sdma_comp_q *cq,
+ u16 idx, enum hfi1_sdma_comp_state state,
+ int ret);
+static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
static int defer_packet_queue(
- struct sdma_engine *,
- struct iowait *,
- struct sdma_txreq *,
- unsigned seq);
-static void activate_packet_queue(struct iowait *, int);
-static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
-static int sdma_rb_insert(void *, struct mmu_rb_node *);
+ struct sdma_engine *sde,
+ struct iowait *wait,
+ struct sdma_txreq *txreq,
+ unsigned int seq);
+static void activate_packet_queue(struct iowait *wait, int reason);
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
void *arg2, bool *stop);
-static void sdma_rb_remove(void *, struct mmu_rb_node *);
-static int sdma_rb_invalidate(void *, struct mmu_rb_node *);
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
static struct mmu_rb_ops sdma_rb_ops = {
.filter = sdma_rb_filter,
@@ -372,45 +371,27 @@ static void sdma_kmem_cache_ctor(void *obj)
memset(tx, 0, sizeof(*tx));
}
-int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ struct hfi1_filedata *fd)
{
- struct hfi1_filedata *fd;
- int ret = 0;
+ int ret = -ENOMEM;
char buf[64];
struct hfi1_devdata *dd;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
unsigned long flags;
- if (!uctxt || !fp) {
- ret = -EBADF;
- goto done;
- }
-
- fd = fp->private_data;
+ if (!uctxt || !fd)
+ return -EBADF;
- if (!hfi1_sdma_comp_ring_size) {
- ret = -EINVAL;
- goto done;
- }
+ if (!hfi1_sdma_comp_ring_size)
+ return -EINVAL;
dd = uctxt->dd;
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq)
- goto pq_nomem;
-
- pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
- sizeof(*pq->reqs),
- GFP_KERNEL);
- if (!pq->reqs)
- goto pq_reqs_nomem;
-
- pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
- sizeof(*pq->req_in_use),
- GFP_KERNEL);
- if (!pq->req_in_use)
- goto pq_reqs_no_in_use;
+ return -ENOMEM;
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
@@ -426,10 +407,23 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
activate_packet_queue, NULL);
pq->reqidx = 0;
+
+ pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
+ sizeof(*pq->reqs),
+ GFP_KERNEL);
+ if (!pq->reqs)
+ goto pq_reqs_nomem;
+
+ pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
+ sizeof(*pq->req_in_use),
+ GFP_KERNEL);
+ if (!pq->req_in_use)
+ goto pq_reqs_no_in_use;
+
snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
fd->subctxt);
pq->txreq_cache = kmem_cache_create(buf,
- sizeof(struct user_sdma_txreq),
+ sizeof(struct user_sdma_txreq),
L1_CACHE_BYTES,
SLAB_HWCACHE_ALIGN,
sdma_kmem_cache_ctor);
@@ -438,7 +432,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
uctxt->ctxt);
goto pq_txreq_nomem;
}
- fd->pq = pq;
+
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
goto cq_nomem;
@@ -449,20 +443,25 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto cq_comps_nomem;
cq->nentries = hfi1_sdma_comp_ring_size;
- fd->cq = cq;
ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
&pq->handler);
if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret);
- goto done;
+ goto pq_mmu_fail;
}
+ fd->pq = pq;
+ fd->cq = cq;
+
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
list_add(&pq->list, &uctxt->sdma_queues);
spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
- goto done;
+ return 0;
+
+pq_mmu_fail:
+ vfree(cq->comps);
cq_comps_nomem:
kfree(cq);
cq_nomem:
@@ -473,10 +472,7 @@ pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
kfree(pq);
- fd->pq = NULL;
-pq_nomem:
- ret = -ENOMEM;
-done:
+
return ret;
}
@@ -536,11 +532,11 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash];
}
-int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
- unsigned long dim, unsigned long *count)
+int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ struct iovec *iovec, unsigned long dim,
+ unsigned long *count)
{
int ret = 0, i;
- struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
@@ -616,6 +612,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
req->pq = pq;
req->cq = cq;
req->status = -1;
+ req->ahg_idx = -1;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
@@ -766,14 +763,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
}
/* We don't need an AHG entry if the request contains only one packet */
- if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
- int ahg = sdma_ahg_alloc(req->sde);
-
- if (likely(ahg >= 0)) {
- req->ahg_idx = (u8)ahg;
- set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
- }
- }
+ if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
+ req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
atomic_inc(&pq->n_reqs);
@@ -991,7 +982,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
}
}
- if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
+ if (req->ahg_idx >= 0) {
if (!req->seqnum) {
u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
u32 lrhlen = get_lrh_len(req->hdr,
@@ -1121,7 +1112,7 @@ dosend:
* happen due to the sequential manner in which
* descriptors are processed.
*/
- if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
+ if (req->ahg_idx >= 0)
sdma_ahg_free(req->sde, req->ahg_idx);
}
return ret;
@@ -1323,6 +1314,7 @@ static int set_txreq_header(struct user_sdma_request *req,
{
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &tx->hdr;
+ u8 omfactor; /* KDETH.OM */
u16 pbclen;
int ret;
u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
@@ -1400,8 +1392,9 @@ static int set_txreq_header(struct user_sdma_request *req,
}
tidval = req->tids[req->tididx];
}
- req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
- KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
+ omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
+ KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
+ KDETH_OM_SMALL_SHIFT;
/* Set KDETH.TIDCtrl based on value for this TID. */
KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
EXP_TID_GET(tidval, CTRL));
@@ -1416,12 +1409,12 @@ static int set_txreq_header(struct user_sdma_request *req,
* transfer.
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
- req->tidoffset, req->tidoffset / req->omfactor,
- req->omfactor != KDETH_OM_SMALL);
+ req->tidoffset, req->tidoffset >> omfactor,
+ omfactor != KDETH_OM_SMALL_SHIFT);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
- req->tidoffset / req->omfactor);
+ req->tidoffset >> omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- req->omfactor != KDETH_OM_SMALL);
+ omfactor != KDETH_OM_SMALL_SHIFT);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
@@ -1433,6 +1426,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
struct user_sdma_txreq *tx, u32 len)
{
int diff = 0;
+ u8 omfactor; /* KDETH.OM */
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
@@ -1484,14 +1478,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
}
tidval = req->tids[req->tididx];
}
- req->omfactor = ((EXP_TID_GET(tidval, LEN) *
+ omfactor = ((EXP_TID_GET(tidval, LEN) *
PAGE_SIZE) >=
- KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
- KDETH_OM_SMALL;
+ KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
+ KDETH_OM_SMALL_SHIFT;
/* KDETH.OM and KDETH.OFFSET (TID) */
AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
- ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
- ((req->tidoffset / req->omfactor) & 0x7fff)));
+ ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
+ ((req->tidoffset >> omfactor)
+ & 0x7fff)));
/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
(EXP_TID_GET(tidval, IDX) & 0x3ff));
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 39001714f551..e5b10aefe212 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,5 +1,7 @@
+#ifndef _HFI1_USER_SDMA_H
+#define _HFI1_USER_SDMA_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -56,7 +58,7 @@ extern uint extended_psn;
struct hfi1_user_sdma_pkt_q {
struct list_head list;
unsigned ctxt;
- unsigned subctxt;
+ u16 subctxt;
u16 n_max_reqs;
atomic_t n_reqs;
u16 reqidx;
@@ -78,7 +80,11 @@ struct hfi1_user_sdma_comp_q {
struct hfi1_sdma_comp_entry *comps;
};
-int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *);
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *);
-int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long,
- unsigned long *);
+int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ struct hfi1_filedata *fd);
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd);
+int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ struct iovec *iovec, unsigned long dim,
+ unsigned long *count);
+
+#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 52ff275caf54..cd635d0c1d3b 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -125,7 +125,6 @@ struct hfi1_qp_priv {
struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */
u8 s_sc; /* SC[0..4] for next packet */
- u8 r_adefered; /* number of acks defered */
struct iowait s_iowait;
struct rvt_qp *owner;
};
@@ -140,6 +139,10 @@ struct hfi1_pkt_state {
struct hfi1_pportdata *ppd;
struct verbs_txreq *s_txreq;
unsigned long flags;
+ unsigned long timeout;
+ unsigned long timeout_int;
+ int cpu;
+ bool in_thread;
};
#define HFI1_PSN_CREDIT 16
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 392f4d57f3e3..b601c2929f8f 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -67,9 +67,7 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
unsigned int rcvctrl_ops = 0;
int ret;
- ret = hfi1_init_ctxt(uctxt->sc);
- if (ret)
- goto done;
+ hfi1_init_ctxt(uctxt->sc);
uctxt->do_interrupt = &handle_receive_interrupt;
@@ -82,8 +80,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
if (ret)
goto done;
- set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
-
if (uctxt->rcvhdrtail_kvaddr)
clear_rcvhdrtail(uctxt);
@@ -209,7 +205,7 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
hfi1_stats.sps_ctxts--;
hfi1_free_ctxtdata(dd, uctxt);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 014c8262bfff..37d5d29597a4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1721,7 +1721,7 @@ int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
- /* DMA momery regsiter */
+ /* DMA memory register */
if (mr->type == MR_TYPE_DMA)
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index dc5c97c8f070..80fc01ffd8bd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -205,7 +205,7 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
return 0;
}
- /* Note: if page_shift is zero, FAST memory regsiter */
+ /* Note: if page_shift is zero, FAST memory register */
mtt->page_shift = page_shift;
/* Compute MTT entry necessary */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9f3ba320ce70..d45772da0963 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3530,6 +3530,26 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
return num_counters;
}
+static struct net_device*
+mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *))
+{
+ if (type != RDMA_NETDEV_IPOIB)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
+ name, setup);
+}
+
+static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
+{
+ return mlx5_rdma_netdev_free(netdev);
+}
+
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
@@ -3660,6 +3680,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
+ dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
+ dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index ef11e770f822..6a72095d6c7a 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -35,6 +35,7 @@
#include <rdma/ib_user_verbs.h>
#include <linux/netdevice.h>
#include <linux/iommu.h>
+#include <linux/pci.h>
#include <net/addrconf.h>
#include <linux/qed/qede_roce.h>
#include <linux/qed/qed_chain.h>
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index f1e66efea98a..1d940a2885c9 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -512,7 +512,7 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
unsigned long flags;
int ret;
- static struct tree_descr files[] = {
+ static const struct tree_descr files[] = {
[2] = {"driver_stats", &driver_ops[0], S_IRUGO},
[3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
{""},
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ced15c4446bd..e37cc89987e1 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -368,7 +368,7 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
((void *)(uintptr_t)iova) : addr;
if (crcp)
- crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
+ *crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device),
*crcp, src, length);
memcpy(dest, src, length);
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 13ed2cc6eaa2..1b596fbbe251 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -114,7 +114,6 @@ enum rxe_device_param {
RXE_MAX_UCONTEXT = 512,
RXE_NUM_PORT = 1,
- RXE_NUM_COMP_VECTORS = 1,
RXE_MIN_QP_INDEX = 16,
RXE_MAX_QP_INDEX = 0x00020000,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 299b0f8423f2..83d709e74dfb 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1239,7 +1239,7 @@ int rxe_register_device(struct rxe_dev *rxe)
dev->owner = THIS_MODULE;
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
- dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
+ dev->num_comp_vectors = num_possible_cpus();
dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 379c02fb4181..874b24366e4d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -155,7 +155,66 @@ static int ipoib_get_sset_count(struct net_device __always_unused *dev,
return -EOPNOTSUPP;
}
+/* Return lane speed in unit of 1e6 bit/sec */
+static inline int ib_speed_enum_to_int(int speed)
+{
+ switch (speed) {
+ case IB_SPEED_SDR:
+ return SPEED_2500;
+ case IB_SPEED_DDR:
+ return SPEED_5000;
+ case IB_SPEED_QDR:
+ case IB_SPEED_FDR10:
+ return SPEED_10000;
+ case IB_SPEED_FDR:
+ return SPEED_14000;
+ case IB_SPEED_EDR:
+ return SPEED_25000;
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+static int ipoib_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(netdev);
+ struct ib_port_attr attr;
+ int ret, speed, width;
+
+ if (!netif_carrier_ok(netdev)) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+
+ ret = ib_query_port(priv->ca, priv->port, &attr);
+ if (ret < 0)
+ return -EINVAL;
+
+ speed = ib_speed_enum_to_int(attr.active_speed);
+ width = ib_width_enum_to_int(attr.active_width);
+
+ if (speed < 0 || width < 0)
+ return -EINVAL;
+
+ /* Except the following are set, the other members of
+ * the struct ethtool_link_settings are initialized to
+ * zero in the function __ethtool_get_link_ksettings.
+ */
+ cmd->base.speed = speed * width;
+ cmd->base.duplex = DUPLEX_FULL;
+
+ cmd->base.phy_address = 0xFF;
+
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ cmd->base.port = PORT_OTHER;
+
+ return 0;
+}
+
static const struct ethtool_ops ipoib_ethtool_ops = {
+ .get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index e9ae3d500a55..925571475005 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -1354,8 +1354,6 @@ static void evdev_cleanup(struct evdev *evdev)
evdev_mark_dead(evdev);
evdev_hangup(evdev);
- cdev_del(&evdev->cdev);
-
/* evdev is marked dead so no one else accesses evdev->open */
if (evdev->open) {
input_flush_device(handle, NULL);
@@ -1416,12 +1414,8 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
goto err_free_evdev;
cdev_init(&evdev->cdev, &evdev_fops);
- evdev->cdev.kobj.parent = &evdev->dev.kobj;
- error = cdev_add(&evdev->cdev, evdev->dev.devt, 1);
- if (error)
- goto err_unregister_handle;
- error = device_add(&evdev->dev);
+ error = cdev_device_add(&evdev->cdev, &evdev->dev);
if (error)
goto err_cleanup_evdev;
@@ -1429,7 +1423,6 @@ static int evdev_connect(struct input_handler *handler, struct input_dev *dev,
err_cleanup_evdev:
evdev_cleanup(evdev);
- err_unregister_handle:
input_unregister_handle(&evdev->handle);
err_free_evdev:
put_device(&evdev->dev);
@@ -1442,7 +1435,7 @@ static void evdev_disconnect(struct input_handle *handle)
{
struct evdev *evdev = handle->private;
- device_del(&evdev->dev);
+ cdev_device_del(&evdev->cdev, &evdev->dev);
evdev_cleanup(evdev);
input_free_minor(MINOR(evdev->dev.devt));
input_unregister_handle(handle);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 065e67bf56dd..29d677c714d2 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -742,8 +742,6 @@ static void joydev_cleanup(struct joydev *joydev)
joydev_mark_dead(joydev);
joydev_hangup(joydev);
- cdev_del(&joydev->cdev);
-
/* joydev is marked dead so no one else accesses joydev->open */
if (joydev->open)
input_close_device(handle);
@@ -913,12 +911,8 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
goto err_free_joydev;
cdev_init(&joydev->cdev, &joydev_fops);
- joydev->cdev.kobj.parent = &joydev->dev.kobj;
- error = cdev_add(&joydev->cdev, joydev->dev.devt, 1);
- if (error)
- goto err_unregister_handle;
- error = device_add(&joydev->dev);
+ error = cdev_device_add(&joydev->cdev, &joydev->dev);
if (error)
goto err_cleanup_joydev;
@@ -926,7 +920,6 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
err_cleanup_joydev:
joydev_cleanup(joydev);
- err_unregister_handle:
input_unregister_handle(&joydev->handle);
err_free_joydev:
put_device(&joydev->dev);
@@ -939,7 +932,7 @@ static void joydev_disconnect(struct input_handle *handle)
{
struct joydev *joydev = handle->private;
- device_del(&joydev->dev);
+ cdev_device_del(&joydev->cdev, &joydev->dev);
joydev_cleanup(joydev);
input_free_minor(MINOR(joydev->dev.devt));
input_unregister_handle(handle);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index f210a3322559..e37d37273182 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -320,9 +320,10 @@ static int soc_button_probe(struct platform_device *pdev)
button_info = (struct soc_button_info *)id->driver_data;
}
- if (gpiod_count(dev, NULL) <= 0) {
+ error = gpiod_count(dev, NULL);
+ if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n");
- return -ENODEV;
+ return error;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index b604564dec5c..0e0ff84088fd 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -812,8 +812,6 @@ static void mousedev_cleanup(struct mousedev *mousedev)
mousedev_mark_dead(mousedev);
mousedev_hangup(mousedev);
- cdev_del(&mousedev->cdev);
-
/* mousedev is marked dead so no one else accesses mousedev->open */
if (mousedev->open)
input_close_device(handle);
@@ -901,12 +899,8 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
}
cdev_init(&mousedev->cdev, &mousedev_fops);
- mousedev->cdev.kobj.parent = &mousedev->dev.kobj;
- error = cdev_add(&mousedev->cdev, mousedev->dev.devt, 1);
- if (error)
- goto err_unregister_handle;
- error = device_add(&mousedev->dev);
+ error = cdev_device_add(&mousedev->cdev, &mousedev->dev);
if (error)
goto err_cleanup_mousedev;
@@ -914,7 +908,6 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
err_cleanup_mousedev:
mousedev_cleanup(mousedev);
- err_unregister_handle:
if (!mixdev)
input_unregister_handle(&mousedev->handle);
err_free_mousedev:
@@ -927,7 +920,7 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
static void mousedev_destroy(struct mousedev *mousedev)
{
- device_del(&mousedev->dev);
+ cdev_device_del(&mousedev->cdev, &mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
if (mousedev != mousedev_mix)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 063343909b0d..6629c472eafd 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -696,9 +696,9 @@ out_clear_state:
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 591bb96047c9..380969aa60d5 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg {
};
struct arm_smmu_strtab_ent {
- bool valid;
-
- bool bypass; /* Overrides s1/s2 config */
+ /*
+ * An STE is "assigned" if the master emitting the corresponding SID
+ * is attached to a domain. The behaviour of an unassigned STE is
+ * determined by the disable_bypass parameter, whereas an assigned
+ * STE behaves according to s1_cfg/s2_cfg, which themselves are
+ * configured according to the domain type.
+ */
+ bool assigned;
struct arm_smmu_s1_cfg *s1_cfg;
struct arm_smmu_s2_cfg *s2_cfg;
};
@@ -632,6 +637,7 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
@@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
* This is hideously complicated, but we only really care about
* three cases at the moment:
*
- * 1. Invalid (all zero) -> bypass (init)
- * 2. Bypass -> translation (attach)
- * 3. Translation -> bypass (detach)
+ * 1. Invalid (all zero) -> bypass/fault (init)
+ * 2. Bypass/fault -> translation/bypass (attach)
+ * 3. Translation/bypass -> bypass/fault (detach)
*
* Given that we can't update the STE atomically and the SMMU
* doesn't read the thing in a defined order, that leaves us
@@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
/* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = ste->valid ? STRTAB_STE_0_V : 0;
+ val = STRTAB_STE_0_V;
+
+ /* Bypass/fault */
+ if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
+ if (!ste->assigned && disable_bypass)
+ val |= STRTAB_STE_0_CFG_ABORT;
+ else
+ val |= STRTAB_STE_0_CFG_BYPASS;
- if (ste->bypass) {
- val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
- : STRTAB_STE_0_CFG_BYPASS;
dst[0] = cpu_to_le64(val);
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
<< STRTAB_STE_1_SHCFG_SHIFT);
@@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
{
unsigned int i;
- struct arm_smmu_strtab_ent ste = {
- .valid = true,
- .bypass = true,
- };
+ struct arm_smmu_strtab_ent ste = { .assigned = false };
for (i = 0; i < nent; ++i) {
arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
@@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
/*
@@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ return 0;
+ }
+
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
@@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step;
}
-static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
int i;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
@@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
-
- return 0;
}
static void arm_smmu_detach_dev(struct device *dev)
{
struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
- master->ste.bypass = true;
- if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
- dev_warn(dev, "failed to install bypass STE\n");
+ master->ste.assigned = false;
+ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
ste = &master->ste;
/* Already attached to a different domain? */
- if (!ste->bypass)
+ if (ste->assigned)
arm_smmu_detach_dev(dev);
mutex_lock(&smmu_domain->init_mutex);
@@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
- ste->bypass = false;
- ste->valid = true;
+ ste->assigned = true;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
+ ste->s1_cfg = NULL;
+ ste->s2_cfg = NULL;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ste->s1_cfg = &smmu_domain->s1_cfg;
ste->s2_cfg = NULL;
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
@@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
ste->s2_cfg = &smmu_domain->s2_cfg;
}
- ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
- if (ret < 0)
- ste->valid = false;
-
+ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1704,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
if (!ops)
return 0;
@@ -1807,7 +1820,7 @@ static void arm_smmu_remove_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
- if (master && master->ste.valid)
+ if (master && master->ste.assigned)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
@@ -1837,6 +1850,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1852,6 +1868,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
@@ -1893,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
@@ -2761,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = {
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
+module_platform_driver(arm_smmu_driver);
-static int __init arm_smmu_init(void)
-{
- static bool registered;
- int ret = 0;
-
- if (!registered) {
- ret = platform_driver_register(&arm_smmu_driver);
- registered = !ret;
- }
- return ret;
-}
-
-static void __exit arm_smmu_exit(void)
-{
- return platform_driver_unregister(&arm_smmu_driver);
-}
-
-subsys_initcall(arm_smmu_init);
-module_exit(arm_smmu_exit);
-
-static int __init arm_smmu_of_init(struct device_node *np)
-{
- int ret = arm_smmu_init();
-
- if (ret)
- return ret;
-
- if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
- return -ENODEV;
-
- return 0;
-}
-IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
-
-#ifdef CONFIG_ACPI
-static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
-{
- if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
- return arm_smmu_init();
-
- return 0;
-}
-IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
-#endif
+IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b493c99e17f7..7ec30b08b3bd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -162,6 +162,7 @@
#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
#define sTLBGSTATUS_GSACTIVE (1 << 0)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+#define TLB_SPIN_COUNT 10
/* Stream mapping registers */
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
@@ -216,8 +217,7 @@ enum arm_smmu_s2cr_privcfg {
#define CBA2R_VMID_MASK 0xffff
/* Translation context bank */
-#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
-#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
+#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_ACTLR 0x4
@@ -238,6 +238,8 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_TLBSYNC 0x7f0
+#define ARM_SMMU_CB_TLBSTATUS 0x7f4
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
@@ -344,7 +346,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
- unsigned long size;
+ void __iomem *cb_base;
unsigned long pgshift;
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
@@ -404,18 +406,20 @@ enum arm_smmu_context_fmt {
struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
+ union {
+ u16 asid;
+ u16 vmid;
+ };
u32 cbar;
enum arm_smmu_context_fmt fmt;
};
#define INVALID_IRPTNDX 0xff
-#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
-
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
@@ -569,49 +573,67 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
+ void __iomem *sync, void __iomem *status)
{
- int count = 0;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
- while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
- & sTLBGSTATUS_GSACTIVE) {
- cpu_relax();
- if (++count == TLB_LOOP_TIMEOUT) {
- dev_err_ratelimited(smmu->dev,
- "TLB sync timed out -- SMMU may be deadlocked\n");
- return;
+ unsigned int spin_cnt, delay;
+
+ writel_relaxed(0, sync);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
}
- udelay(1);
+ udelay(delay);
}
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
}
-static void arm_smmu_tlb_sync(void *cookie)
+static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
+{
+ void __iomem *base = ARM_SMMU_GR0(smmu);
+
+ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
+ base + ARM_SMMU_GR0_sTLBGSTATUS);
+}
+
+static void arm_smmu_tlb_sync_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
+ base + ARM_SMMU_CB_TLBSTATUS);
}
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void arm_smmu_tlb_sync_vmid(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ arm_smmu_tlb_sync_global(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- void __iomem *base;
+ void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- if (stage1) {
- base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
- writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
- base + ARM_SMMU_CB_S1_TLBIASID);
- } else {
- base = ARM_SMMU_GR0(smmu);
- writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
- base + ARM_SMMU_GR0_TLBIVMID);
- }
+ writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ arm_smmu_tlb_sync_context(cookie);
+}
+
+static void arm_smmu_tlb_inv_context_s2(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *base = ARM_SMMU_GR0(smmu);
- __arm_smmu_tlb_sync(smmu);
+ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ arm_smmu_tlb_sync_global(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -619,31 +641,28 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- void __iomem *reg;
+ void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
if (stage1) {
- reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova &= ~12UL;
- iova |= ARM_SMMU_CB_ASID(smmu, cfg);
+ iova |= cfg->asid;
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
} else {
iova >>= 12;
- iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
+ iova |= (u64)cfg->asid << 48;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
}
- } else if (smmu->version == ARM_SMMU_V2) {
- reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ } else {
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
@@ -651,16 +670,40 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
smmu_write_atomic_lq(iova, reg);
iova += granule >> 12;
} while (size -= granule);
- } else {
- reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
- writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
}
}
-static const struct iommu_gather_ops arm_smmu_gather_ops = {
- .tlb_flush_all = arm_smmu_tlb_inv_context,
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
+ */
+static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+
+ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+}
+
+static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+};
+
+static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+};
+
+static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_vmid,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -673,7 +716,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (!(fsr & FSR_FAULT))
@@ -726,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
gr1_base = ARM_SMMU_GR1(smmu);
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
if (smmu->version > ARM_SMMU_V1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -735,7 +778,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
reg = CBA2R_RW64_32BIT;
/* 16-bit VMIDs live in CBA2R */
if (smmu->features & ARM_SMMU_FEAT_VMID16)
- reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
+ reg |= cfg->vmid << CBA2R_VMID_SHIFT;
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
@@ -754,34 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
/* 8-bit VMIDs live in CBAR */
- reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
+ reg |= cfg->vmid << CBAR_VMID_SHIFT;
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- /* TTBRs */
- if (stage1) {
- u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
-
- if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
- reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
- reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
- writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
- } else {
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
- }
- } else {
- reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
- }
-
- /* TTBCR */
+ /*
+ * TTBCR
+ * We must write this before the TTBRs, since it determines the
+ * access behaviour of some fields (in particular, ASID[15:8]).
+ */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
reg = pgtbl_cfg->arm_v7s_cfg.tcr;
@@ -800,6 +824,27 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ /* TTBRs */
+ if (stage1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
+ writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
+ } else {
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+ }
+ } else {
+ reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ }
+
/* MAIRs (stage-1 only) */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
@@ -833,11 +878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
goto out_unlock;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -904,6 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
+ tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -922,12 +975,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 40UL);
oas = min(oas, 40UL);
}
+ if (smmu->version == ARM_SMMU_V2)
+ tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ else
+ tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
goto out_unlock;
}
-
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (ret < 0)
@@ -941,11 +997,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+ cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
+ else
+ cfg->asid = cfg->cbndx + smmu->cavium_id_base;
+
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
- .tlb = &arm_smmu_gather_ops,
+ .tlb = tlb_ops,
.iommu_dev = smmu->dev,
};
@@ -998,14 +1059,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
return;
/*
* Disable the context bank and free the page tables before freeing
* it.
*/
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
if (cfg->irptndx != INVALID_IRPTNDX) {
@@ -1021,7 +1082,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1250,10 +1313,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
- enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
u8 cbndx = smmu_domain->cfg.cbndx;
+ enum arm_smmu_s2cr_type type;
int i, idx;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
+ type = S2CR_TYPE_BYPASS;
+ else
+ type = S2CR_TYPE_TRANS;
+
for_each_cfg_sme(fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1356,7 +1424,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
u64 phys;
unsigned long va;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
@@ -1391,6 +1459,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
if (!ops)
return 0;
@@ -1467,7 +1538,7 @@ static int arm_smmu_add_device(struct device *dev)
}
if (mask & ~smmu->smr_mask_mask) {
dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
- sid, smmu->smr_mask_mask);
+ mask, smmu->smr_mask_mask);
goto out_free;
}
}
@@ -1549,6 +1620,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1564,6 +1638,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
@@ -1590,13 +1667,15 @@ out_unlock:
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- u32 fwid = 0;
+ u32 mask, fwid = 0;
if (args->args_count > 0)
fwid |= (u16)args->args[0];
if (args->args_count > 1)
fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
+ fwid |= (u16)mask << SMR_MASK_SHIFT;
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
@@ -1613,6 +1692,8 @@ static void arm_smmu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
@@ -1683,7 +1764,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+ cb_base = ARM_SMMU_CB(smmu, i);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
/*
@@ -1729,7 +1810,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg |= sCR0_EXIDENABLE;
/* Push the button */
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_tlb_sync_global(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
@@ -1863,11 +1944,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* Check for size mismatch of SMMU address space from mapped region */
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
- size *= 2 << smmu->pgshift;
- if (smmu->size != size)
+ size <<= smmu->pgshift;
+ if (smmu->cb_base != gr0_base + size)
dev_warn(smmu->dev,
- "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
- size, smmu->size);
+ "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
+ size * 2, (smmu->cb_base - gr0_base) * 2);
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
@@ -1887,6 +1968,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
atomic_add_return(smmu->num_context_banks,
&cavium_smmu_context_count);
smmu->cavium_id_base -= smmu->num_context_banks;
+ dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
}
/* ID2 */
@@ -2075,6 +2157,23 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
return 0;
}
+static void arm_smmu_bus_init(void)
+{
+ /* Oh, for a proper bus abstraction */
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
+ if (!iommu_present(&amba_bustype))
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+#ifdef CONFIG_PCI
+ if (!iommu_present(&pci_bus_type)) {
+ pci_request_acs();
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ }
+#endif
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -2103,7 +2202,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- smmu->size = resource_size(res);
+ smmu->cb_base = smmu->base + resource_size(res) / 2;
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
@@ -2180,21 +2279,30 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype))
- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-#endif
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- pci_request_acs();
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- }
-#endif
+ /*
+ * For ACPI and generic DT bindings, an SMMU will be probed before
+ * any device which might need it, so we want the bus ops in place
+ * ready to handle default domain setup as soon as any SMMU exists.
+ */
+ if (!using_legacy_binding)
+ arm_smmu_bus_init();
+
+ return 0;
+}
+
+/*
+ * With the legacy DT binding in play, though, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+ if (using_legacy_binding)
+ arm_smmu_bus_init();
return 0;
}
+device_initcall_sync(arm_smmu_legacy_bus_init);
static int arm_smmu_device_remove(struct platform_device *pdev)
{
@@ -2219,56 +2327,14 @@ static struct platform_driver arm_smmu_driver = {
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
-
-static int __init arm_smmu_init(void)
-{
- static bool registered;
- int ret = 0;
-
- if (!registered) {
- ret = platform_driver_register(&arm_smmu_driver);
- registered = !ret;
- }
- return ret;
-}
-
-static void __exit arm_smmu_exit(void)
-{
- return platform_driver_unregister(&arm_smmu_driver);
-}
-
-subsys_initcall(arm_smmu_init);
-module_exit(arm_smmu_exit);
-
-static int __init arm_smmu_of_init(struct device_node *np)
-{
- int ret = arm_smmu_init();
-
- if (ret)
- return ret;
-
- if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
- return -ENODEV;
-
- return 0;
-}
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
-
-#ifdef CONFIG_ACPI
-static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
-{
- if (iort_node_match(ACPI_IORT_NODE_SMMU))
- return arm_smmu_init();
-
- return 0;
-}
-IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
-#endif
+module_platform_driver(arm_smmu_driver);
+
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 48d36ce59efb..8348f366ddd1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
return PAGE_SIZE;
}
-static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
-{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
-
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
- return &cookie->iovad;
- return NULL;
-}
-
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
@@ -167,22 +158,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
}
EXPORT_SYMBOL(iommu_put_dma_cookie);
-static void iova_reserve_pci_windows(struct pci_dev *dev,
- struct iova_domain *iovad)
+/**
+ * iommu_dma_get_resv_regions - Reserved region driver helper
+ * @dev: Device from iommu_get_resv_regions()
+ * @list: Reserved region list from iommu_get_resv_regions()
+ *
+ * IOMMU drivers can use this to implement their .get_resv_regions callback
+ * for general non-IOMMU-specific reservations. Currently, this covers host
+ * bridge windows for PCI devices.
+ */
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
- struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ struct pci_host_bridge *bridge;
struct resource_entry *window;
- unsigned long lo, hi;
+ if (!dev_is_pci(dev))
+ return;
+
+ bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
resource_list_for_each_entry(window, &bridge->windows) {
- if (resource_type(window->res) != IORESOURCE_MEM &&
- resource_type(window->res) != IORESOURCE_IO)
+ struct iommu_resv_region *region;
+ phys_addr_t start;
+ size_t length;
+
+ if (resource_type(window->res) != IORESOURCE_MEM)
+ continue;
+
+ start = window->res->start - window->offset;
+ length = window->res->end - window->res->start + 1;
+ region = iommu_alloc_resv_region(start, length, 0,
+ IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, list);
+ }
+}
+EXPORT_SYMBOL(iommu_dma_get_resv_regions);
+
+static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
+ phys_addr_t start, phys_addr_t end)
+{
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_dma_msi_page *msi_page;
+ int i, num_pages;
+
+ start -= iova_offset(iovad, start);
+ num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
+
+ msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
+ if (!msi_page)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pages; i++) {
+ msi_page[i].phys = start;
+ msi_page[i].iova = start;
+ INIT_LIST_HEAD(&msi_page[i].list);
+ list_add(&msi_page[i].list, &cookie->msi_page_list);
+ start += iovad->granule;
+ }
+
+ return 0;
+}
+
+static int iova_reserve_iommu_regions(struct device *dev,
+ struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_resv_region *region;
+ LIST_HEAD(resv_regions);
+ int ret = 0;
+
+ iommu_get_resv_regions(dev, &resv_regions);
+ list_for_each_entry(region, &resv_regions, list) {
+ unsigned long lo, hi;
+
+ /* We ARE the software that manages these! */
+ if (region->type == IOMMU_RESV_SW_MSI)
continue;
- lo = iova_pfn(iovad, window->res->start - window->offset);
- hi = iova_pfn(iovad, window->res->end - window->offset);
+ lo = iova_pfn(iovad, region->start);
+ hi = iova_pfn(iovad, region->start + region->length - 1);
reserve_iova(iovad, lo, hi);
+
+ if (region->type == IOMMU_RESV_MSI)
+ ret = cookie_init_hw_msi_region(cookie, region->start,
+ region->start + region->length);
+ if (ret)
+ break;
}
+ iommu_put_resv_regions(dev, &resv_regions);
+
+ return ret;
}
/**
@@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
- bool pci = dev && dev_is_pci(dev);
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
- if (pci)
+ if (dev && dev_is_pci(dev))
end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */
@@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* area cache limit down for the benefit of the smaller one.
*/
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
- } else {
- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
- if (pci)
- iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+
+ return 0;
}
- return 0;
+
+ init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ if (!dev)
+ return 0;
+
+ return iova_reserve_iommu_regions(dev, domain);
}
EXPORT_SYMBOL(iommu_dma_init_domain);
@@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
- dma_addr_t dma_limit, struct device *dev)
+static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+ size_t size, dma_addr_t dma_limit, struct device *dev)
{
- struct iova_domain *iovad = cookie_iovad(domain);
- unsigned long shift = iova_shift(iovad);
- unsigned long length = iova_align(iovad, size) >> shift;
- struct iova *iova = NULL;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ unsigned long shift, iova_len, iova = 0;
+
+ if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
+ cookie->msi_iova += size;
+ return cookie->msi_iova - size;
+ }
+
+ shift = iova_shift(iovad);
+ iova_len = size >> shift;
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
- iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
- true);
- /*
- * Enforce size-alignment to be safe - there could perhaps be an
- * attribute to control this per-device, or at least per-domain...
- */
+ iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
+
if (!iova)
- iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
- return iova;
+ return (dma_addr_t)iova << shift;
}
-/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
+static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+ dma_addr_t iova, size_t size)
{
- struct iova_domain *iovad = cookie_iovad(domain);
+ struct iova_domain *iovad = &cookie->iovad;
unsigned long shift = iova_shift(iovad);
- unsigned long pfn = dma_addr >> shift;
- struct iova *iova = find_iova(iovad, pfn);
- size_t size;
- if (WARN_ON(!iova))
- return;
+ /* The MSI case is only ever cleaning up its most recent allocation */
+ if (cookie->type == IOMMU_DMA_MSI_COOKIE)
+ cookie->msi_iova -= size;
+ else
+ free_iova_fast(iovad, iova >> shift, size >> shift);
+}
+
+static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+ size_t size)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, dma_addr);
+
+ dma_addr -= iova_off;
+ size = iova_align(iovad, size + iova_off);
- size = iova_size(iova) << shift;
- size -= iommu_unmap(domain, pfn << shift, size);
- /* ...and if we can't, then something is horribly, horribly wrong */
- WARN_ON(size > 0);
- __free_iova(iovad, iova);
+ WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ iommu_dma_free_iova(cookie, dma_addr, size);
}
static void __iommu_dma_free_pages(struct page **pages, int count)
@@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_ERROR_CODE;
}
@@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
struct sg_table sgt;
- dma_addr_t dma_addr;
+ dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
@@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ size = iova_align(iovad, size);
+ iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
- size = iova_align(iovad, size);
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
@@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
sg_miter_stop(&miter);
}
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
< size)
goto out_free_sg;
- *handle = dma_addr;
+ *handle = iova;
sg_free_table(&sgt);
return pages;
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, size);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -527,22 +615,22 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot)
{
- dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, phys);
- size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
+ dma_addr_t iova;
+ size = iova_align(iovad, size + iova_off);
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
- __free_iova(iovad, iova);
+ if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ iommu_dma_free_iova(cookie, iova, size);
return DMA_ERROR_CODE;
}
- return dma_addr + iova_off;
+ return iova + iova_off;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -554,7 +642,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
}
/*
@@ -643,10 +731,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
- dma_addr_t dma_addr;
+ dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
@@ -690,7 +778,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -698,14 +786,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
+ if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
- return __finalise_sg(dev, sg, nents, dma_addr);
+ return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, iova_len);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -714,11 +801,21 @@ out_restore_sg:
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
+ dma_addr_t start, end;
+ struct scatterlist *tmp;
+ int i;
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
+ start = sg_dma_address(sg);
+ for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ if (sg_dma_len(tmp) == 0)
+ break;
+ sg = tmp;
+ }
+ end = sg_dma_address(sg) + sg_dma_len(sg);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -731,7 +828,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -744,8 +841,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
@@ -758,29 +854,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- msi_page->phys = msi_addr;
- if (iovad) {
- iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
- goto out_free_page;
- msi_page->iova = iova_dma_addr(iovad, iova);
- } else {
- msi_page->iova = cookie->msi_iova;
- cookie->msi_iova += size;
- }
-
- if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
- goto out_free_iova;
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
+ if (iommu_dma_mapping_error(dev, iova))
+ goto out_free_page;
INIT_LIST_HEAD(&msi_page->list);
+ msi_page->phys = msi_addr;
+ msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
-out_free_iova:
- if (iovad)
- __free_iova(iovad, iova);
- else
- cookie->msi_iova -= size;
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 36e3f430d265..cbf7763d8091 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
((void *)drhd) + drhd->header.length,
dmaru->segment,
dmaru->devices, dmaru->devices_cnt);
- if (ret != 0)
+ if (ret)
break;
}
if (ret >= 0)
@@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
- int ret = 0;
+ int ret;
drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru = dmar_find_dmaru(drhd);
@@ -551,17 +551,16 @@ static int __init dmar_table_detect(void)
status = AE_NOT_FOUND;
}
- return (ACPI_SUCCESS(status) ? 1 : 0);
+ return ACPI_SUCCESS(status) ? 0 : -ENOENT;
}
static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
size_t len, struct dmar_res_callback *cb)
{
- int ret = 0;
struct acpi_dmar_header *iter, *next;
struct acpi_dmar_header *end = ((void *)start) + len;
- for (iter = start; iter < end && ret == 0; iter = next) {
+ for (iter = start; iter < end; iter = next) {
next = (void *)iter + iter->length;
if (iter->length == 0) {
/* Avoid looping forever on bad ACPI tables */
@@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
} else if (next > end) {
/* Avoid passing table end */
pr_warn(FW_BUG "Record passes table end\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
if (cb->print_entry)
@@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
pr_debug("Unknown DMAR structure type %d\n",
iter->type);
} else if (cb->cb[iter->type]) {
+ int ret;
+
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ if (ret)
+ return ret;
} else if (!cb->ignore_unhandled) {
pr_warn("No handler for DMAR structure type %d\n",
iter->type);
- ret = -EINVAL;
+ return -EINVAL;
}
}
- return ret;
+ return 0;
}
static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
@@ -607,8 +609,8 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- int ret = 0;
int drhd_count = 0;
+ int ret;
struct dmar_res_callback cb = {
.print_entry = true,
.ignore_unhandled = true,
@@ -891,17 +893,17 @@ int __init detect_intel_iommu(void)
down_write(&dmar_global_lock);
ret = dmar_table_detect();
- if (ret)
- ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
- &validate_drhd_cb);
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ if (!ret)
+ ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
}
#ifdef CONFIG_X86
- if (ret)
+ if (!ret)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
@@ -911,10 +913,9 @@ int __init detect_intel_iommu(void)
}
up_write(&dmar_global_lock);
- return ret ? 1 : -ENODEV;
+ return ret ? ret : 1;
}
-
static void unmap_iommu(struct intel_iommu *iommu)
{
iounmap(iommu->reg);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c01bfcdb2383..2395478dde75 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_V5_PT_BASE_PFN 0x00C
#define REG_V5_MMU_FLUSH_ALL 0x010
#define REG_V5_MMU_FLUSH_ENTRY 0x014
+#define REG_V5_MMU_FLUSH_RANGE 0x018
+#define REG_V5_MMU_FLUSH_START 0x020
+#define REG_V5_MMU_FLUSH_END 0x024
#define REG_V5_INT_STATUS 0x060
#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
@@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- for (i = 0; i < num_inv; i++) {
- if (MMU_MAJ_VER(data->version) < 5)
+ if (MMU_MAJ_VER(data->version) < 5) {
+ for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
data->sfrbase + REG_MMU_FLUSH_ENTRY);
- else
+ iova += SPAGE_SIZE;
+ }
+ } else {
+ if (num_inv == 1) {
writel((iova & SPAGE_MASK) | 1,
data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- iova += SPAGE_SIZE;
+ } else {
+ writel((iova & SPAGE_MASK),
+ data->sfrbase + REG_V5_MMU_FLUSH_START);
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ data->sfrbase + REG_V5_MMU_FLUSH_END);
+ writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
+ }
}
}
@@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
- for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
- domain->pgtable[i + 0] = ZERO_LV2LINK;
- domain->pgtable[i + 1] = ZERO_LV2LINK;
- domain->pgtable[i + 2] = ZERO_LV2LINK;
- domain->pgtable[i + 3] = ZERO_LV2LINK;
- domain->pgtable[i + 4] = ZERO_LV2LINK;
- domain->pgtable[i + 5] = ZERO_LV2LINK;
- domain->pgtable[i + 6] = ZERO_LV2LINK;
- domain->pgtable[i + 7] = ZERO_LV2LINK;
- }
+ for (i = 0; i < NUM_LV1ENTRIES; i++)
+ domain->pgtable[i] = ZERO_LV2LINK;
handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
DMA_TO_DEVICE);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index aab723f91f12..c3434f29c967 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -20,6 +20,7 @@
#define __FSL_PAMU_H
#include <linux/iommu.h>
+#include <linux/pci.h>
#include <asm/fsl_pamu_stash.h>
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d412a313a372..90ab0115d78e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -183,6 +183,7 @@ static int rwbf_quirk;
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
+int intel_iommu_tboot_noforce;
/*
* 0: Present
@@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str)
"Intel-IOMMU: enable pre-production PASID support\n");
intel_iommu_pasid28 = 1;
iommu_identity_mapping |= IDENTMAP_GFX;
+ } else if (!strncmp(str, "tboot_noforce", 13)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
+ intel_iommu_tboot_noforce = 1;
}
str += strcspn(str, ",");
@@ -4730,6 +4735,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu)
return 0;
}
+static void intel_disable_iommus(void)
+{
+ struct intel_iommu *iommu = NULL;
+ struct dmar_drhd_unit *drhd;
+
+ for_each_iommu(iommu, drhd)
+ iommu_disable_translation(iommu);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
return container_of(dev, struct intel_iommu, iommu.dev);
@@ -4840,8 +4854,28 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
- if (no_iommu || dmar_disabled)
+ if (no_iommu || dmar_disabled) {
+ /*
+ * We exit the function here to ensure IOMMU's remapping and
+ * mempool aren't setup, which means that the IOMMU's PMRs
+ * won't be disabled via the call to init_dmars(). So disable
+ * it explicitly here. The PMRs were setup by tboot prior to
+ * calling SENTER, but the kernel is expected to reset/tear
+ * down the PMRs.
+ */
+ if (intel_iommu_tboot_noforce) {
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+ }
+
+ /*
+ * Make sure the IOMMUs are switched off, even when we
+ * boot into a kexec kernel and the previous kernel left
+ * them enabled
+ */
+ intel_disable_iommus();
goto out_free_dmar;
+ }
if (list_empty(&dmar_rmrr_units))
pr_info("No RMRR found\n");
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index ac596928f6b4..a190cbd76ef7 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
size_t size;
u64 irta;
- if (!is_kdump_kernel()) {
- pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
- iommu->name);
- clear_ir_pre_enabled(iommu);
- iommu_disable_irq_remapping(iommu);
- return -EINVAL;
- }
-
/* Check whether the old ir-table has the same size as ours */
irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
@@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
init_ir_status(iommu);
if (ir_pre_enabled(iommu)) {
- if (iommu_load_old_irte(iommu))
+ if (!is_kdump_kernel()) {
+ pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
+ iommu->name);
+ clear_ir_pre_enabled(iommu);
+ iommu_disable_irq_remapping(iommu);
+ } else if (iommu_load_old_irte(iommu))
pr_err("Failed to copy IR table for %s from previous kernel\n",
iommu->name);
else
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f9bc6ebb8140..6e5df5e0a3bd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -74,7 +74,7 @@
/* Calculate the block/page mapping size at level l for pagetable in d. */
#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
/* Page table bits */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3b67144dead2..cf7ca7e70777 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,6 +36,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -112,6 +113,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
static void __iommu_detach_group(struct iommu_domain *domain,
struct iommu_group *group);
+static int __init iommu_set_def_domain_type(char *str)
+{
+ bool pt;
+
+ if (!str || strtobool(str, &pt))
+ return -EINVAL;
+
+ iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+ return 0;
+}
+early_param("iommu.passthrough", iommu_set_def_domain_type);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1015,10 +1028,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
* IOMMU driver.
*/
if (!group->default_domain) {
- group->default_domain = __iommu_domain_alloc(dev->bus,
- IOMMU_DOMAIN_DMA);
+ struct iommu_domain *dom;
+
+ dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
+ if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ }
+
+ group->default_domain = dom;
if (!group->domain)
- group->domain = group->default_domain;
+ group->domain = dom;
}
ret = iommu_group_add_device(group, dev);
@@ -1083,8 +1105,12 @@ static int iommu_bus_notifier(struct notifier_block *nb,
* result in ADD/DEL notifiers to group->notifier
*/
if (action == BUS_NOTIFY_ADD_DEVICE) {
- if (ops->add_device)
- return ops->add_device(dev);
+ if (ops->add_device) {
+ int ret;
+
+ ret = ops->add_device(dev);
+ return (ret) ? NOTIFY_DONE : NOTIFY_OK;
+ }
} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
if (ops->remove_device && dev->iommu_group) {
ops->remove_device(dev);
@@ -1652,6 +1678,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
+int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags)
+{
+ int ret = -ENOSYS;
+
+ /*
+ * if upper layers showed interest and installed a fault handler,
+ * invoke it.
+ */
+ if (domain->handler)
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
+
+ trace_io_page_fault(dev, iova, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(report_iommu_fault);
+
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7268a14184f..5c88ba70e4e0 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
}
}
+/* Insert the iova into domain rbtree by holding writer lock */
+static void
+iova_insert_rbtree(struct rb_root *root, struct iova *iova,
+ struct rb_node *start)
+{
+ struct rb_node **new, *parent = NULL;
+
+ new = (start) ? &start : &(root->rb_node);
+ /* Figure out where to put new node */
+ while (*new) {
+ struct iova *this = rb_entry(*new, struct iova, node);
+
+ parent = *new;
+
+ if (iova->pfn_lo < this->pfn_lo)
+ new = &((*new)->rb_left);
+ else if (iova->pfn_lo > this->pfn_lo)
+ new = &((*new)->rb_right);
+ else {
+ WARN_ON(1); /* this should not happen */
+ return;
+ }
+ }
+ /* Add new node and rebalance tree. */
+ rb_link_node(&iova->node, parent, new);
+ rb_insert_color(&iova->node, root);
+}
+
/*
* Computes the padding size required, to make the start address
* naturally aligned on the power-of-two order of its size
@@ -138,7 +166,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
break; /* found a free slot */
}
adjust_limit_pfn:
- limit_pfn = curr_iova->pfn_lo - 1;
+ limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left:
prev = curr;
curr = rb_prev(curr);
@@ -157,35 +185,8 @@ move_left:
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
new->pfn_hi = new->pfn_lo + size - 1;
- /* Insert the new_iova into domain rbtree by holding writer lock */
- /* Add new node and rebalance tree. */
- {
- struct rb_node **entry, *parent = NULL;
-
- /* If we have 'prev', it's a valid place to start the
- insertion. Otherwise, start from the root. */
- if (prev)
- entry = &prev;
- else
- entry = &iovad->rbroot.rb_node;
-
- /* Figure out where to put new node */
- while (*entry) {
- struct iova *this = rb_entry(*entry, struct iova, node);
- parent = *entry;
-
- if (new->pfn_lo < this->pfn_lo)
- entry = &((*entry)->rb_left);
- else if (new->pfn_lo > this->pfn_lo)
- entry = &((*entry)->rb_right);
- else
- BUG(); /* this should not happen */
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&new->node, parent, entry);
- rb_insert_color(&new->node, &iovad->rbroot);
- }
+ /* If we have 'prev', it's a valid place to start the insertion. */
+ iova_insert_rbtree(&iovad->rbroot, new, prev);
__cached_rbnode_insert_update(iovad, saved_pfn, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
@@ -194,28 +195,6 @@ move_left:
return 0;
}
-static void
-iova_insert_rbtree(struct rb_root *root, struct iova *iova)
-{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- /* Figure out where to put new node */
- while (*new) {
- struct iova *this = rb_entry(*new, struct iova, node);
-
- parent = *new;
-
- if (iova->pfn_lo < this->pfn_lo)
- new = &((*new)->rb_left);
- else if (iova->pfn_lo > this->pfn_lo)
- new = &((*new)->rb_right);
- else
- BUG(); /* this should not happen */
- }
- /* Add new node and rebalance tree. */
- rb_link_node(&iova->node, parent, new);
- rb_insert_color(&iova->node, root);
-}
-
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
@@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad,
iova = alloc_and_init_iova(pfn_lo, pfn_hi);
if (iova)
- iova_insert_rbtree(&iovad->rbroot, iova);
+ iova_insert_rbtree(&iovad->rbroot, iova, NULL);
return iova;
}
@@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
rb_erase(&iova->node, &iovad->rbroot);
if (prev) {
- iova_insert_rbtree(&iovad->rbroot, prev);
+ iova_insert_rbtree(&iovad->rbroot, prev, NULL);
iova->pfn_lo = pfn_lo;
}
if (next) {
- iova_insert_rbtree(&iovad->rbroot, next);
+ iova_insert_rbtree(&iovad->rbroot, next, NULL);
iova->pfn_hi = pfn_hi;
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 19e010083408..a27ef570c328 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -431,9 +431,10 @@ err_release_mapping:
static int mtk_iommu_add_device(struct device *dev)
{
- struct iommu_group *group;
struct of_phandle_args iommu_spec;
struct of_phandle_iterator it;
+ struct mtk_iommu_data *data;
+ struct iommu_group *group;
int err;
of_for_each_phandle(&it, err, dev->of_node, "iommus",
@@ -450,6 +451,9 @@ static int mtk_iommu_add_device(struct device *dev)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_link(&data->iommu, dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -460,9 +464,14 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
+
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_unlink(&data->iommu, dev);
+
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -627,6 +636,17 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -637,6 +657,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ iommu_device_sysfs_remove(&data->iommu);
+ iommu_device_unregister(&data->iommu);
+
if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 2683e9fc0dcf..9f44ee8ea1bc 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -96,6 +96,49 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
+static bool of_iommu_driver_present(struct device_node *np)
+{
+ /*
+ * If the IOMMU still isn't ready by the time we reach init, assume
+ * it never will be. We don't want to defer indefinitely, nor attempt
+ * to dereference __iommu_of_table after it's been freed.
+ */
+ if (system_state > SYSTEM_BOOTING)
+ return false;
+
+ return of_match_node(&__iommu_of_table, np);
+}
+
+static const struct iommu_ops
+*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec)
+{
+ const struct iommu_ops *ops;
+ struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
+ int err;
+
+ ops = iommu_ops_from_fwnode(fwnode);
+ if ((ops && !ops->of_xlate) ||
+ (!ops && !of_iommu_driver_present(iommu_spec->np)))
+ return NULL;
+
+ err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ if (err)
+ return ERR_PTR(err);
+ /*
+ * The otherwise-empty fwspec handily serves to indicate the specific
+ * IOMMU device we're waiting for, which will be useful if we ever get
+ * a proper probe-ordering dependency mechanism in future.
+ */
+ if (!ops)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ err = ops->of_xlate(dev, iommu_spec);
+ if (err)
+ return ERR_PTR(err);
+
+ return ops;
+}
+
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_phandle_args *iommu_spec = data;
@@ -105,10 +148,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
}
static const struct iommu_ops
-*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np)
+*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np)
{
const struct iommu_ops *ops;
struct of_phandle_args iommu_spec;
+ int err;
/*
* Start by tracing the RID alias down the PCI topology as
@@ -123,56 +167,76 @@ static const struct iommu_ops
* bus into the system beyond, and which IOMMU it ends up at.
*/
iommu_spec.np = NULL;
- if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
- "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
- return NULL;
+ err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
+ if (err)
+ return err == -ENODEV ? NULL : ERR_PTR(err);
- ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
- if (!ops || !ops->of_xlate ||
- iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
- ops->of_xlate(&pdev->dev, &iommu_spec))
- ops = NULL;
+ ops = of_iommu_xlate(&pdev->dev, &iommu_spec);
of_node_put(iommu_spec.np);
return ops;
}
-const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+static const struct iommu_ops
+*of_platform_iommu_init(struct device *dev, struct device_node *np)
{
struct of_phandle_args iommu_spec;
- struct device_node *np;
const struct iommu_ops *ops = NULL;
int idx = 0;
- if (dev_is_pci(dev))
- return of_pci_iommu_configure(to_pci_dev(dev), master_np);
-
/*
* We don't currently walk up the tree looking for a parent IOMMU.
* See the `Notes:' section of
* Documentation/devicetree/bindings/iommu/iommu.txt
*/
- while (!of_parse_phandle_with_args(master_np, "iommus",
- "#iommu-cells", idx,
- &iommu_spec)) {
- np = iommu_spec.np;
- ops = iommu_ops_from_fwnode(&np->fwnode);
-
- if (!ops || !ops->of_xlate ||
- iommu_fwspec_init(dev, &np->fwnode, ops) ||
- ops->of_xlate(dev, &iommu_spec))
- goto err_put_node;
-
- of_node_put(np);
+ while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
+ idx, &iommu_spec)) {
+ ops = of_iommu_xlate(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
idx++;
+ if (IS_ERR_OR_NULL(ops))
+ break;
}
return ops;
+}
+
+const struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
+{
+ const struct iommu_ops *ops;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (!master_np)
+ return NULL;
+
+ if (fwspec) {
+ if (fwspec->ops)
+ return fwspec->ops;
+
+ /* In the deferred case, start again from scratch */
+ iommu_fwspec_free(dev);
+ }
-err_put_node:
- of_node_put(np);
- return NULL;
+ if (dev_is_pci(dev))
+ ops = of_pci_iommu_init(to_pci_dev(dev), master_np);
+ else
+ ops = of_platform_iommu_init(dev, master_np);
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * add_device callback for dev, replay it to get things in order.
+ */
+ if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
+ dev->bus && !dev->iommu_group) {
+ int err = ops->add_device(dev);
+
+ if (err)
+ ops = ERR_PTR(err);
+ }
+
+ return ops;
}
static int __init of_iommu_init(void)
@@ -183,7 +247,7 @@ static int __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e2583cce2cc1..95dfca36ccb9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -36,28 +36,14 @@
#include "omap-iopgtable.h"
#include "omap-iommu.h"
+static const struct iommu_ops omap_iommu_ops;
+
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-/**
- * struct omap_iommu_domain - omap iommu domain
- * @pgtable: the page table
- * @iommu_dev: an omap iommu device attached to this domain. only a single
- * iommu device can be attached for now.
- * @dev: Device using this domain.
- * @lock: domain lock, should be taken when attaching/detaching
- */
-struct omap_iommu_domain {
- u32 *pgtable;
- struct omap_iommu *iommu_dev;
- struct device *dev;
- spinlock_t lock;
- struct iommu_domain domain;
-};
-
#define MMU_LOCK_BASE_SHIFT 10
#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_BASE(x) \
@@ -818,33 +804,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
return IRQ_NONE;
}
-static int device_match_by_alias(struct device *dev, void *data)
-{
- struct omap_iommu *obj = to_iommu(dev);
- const char *name = data;
-
- pr_debug("%s: %s %s\n", __func__, obj->name, name);
-
- return strcmp(obj->name, name) == 0;
-}
-
/**
* omap_iommu_attach() - attach iommu device to an iommu domain
- * @name: name of target omap iommu device
+ * @obj: target omap iommu device
* @iopgd: page table
**/
-static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
+static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
{
int err;
- struct device *dev;
- struct omap_iommu *obj;
-
- dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
- device_match_by_alias);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
@@ -857,11 +824,13 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
- return obj;
+
+ return 0;
err_enable:
spin_unlock(&obj->iommu_lock);
- return ERR_PTR(err);
+
+ return err;
}
/**
@@ -928,28 +897,26 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
+ if (!of) {
+ pr_err("%s: only DT-based devices are supported\n", __func__);
+ return -ENODEV;
+ }
+
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- if (of) {
- obj->name = dev_name(&pdev->dev);
- obj->nr_tlb_entries = 32;
- err = of_property_read_u32(of, "ti,#tlb-entries",
- &obj->nr_tlb_entries);
- if (err && err != -EINVAL)
- return err;
- if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
- return -EINVAL;
- if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
- obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
- } else {
- obj->nr_tlb_entries = pdata->nr_tlb_entries;
- obj->name = pdata->name;
- }
+ obj->name = dev_name(&pdev->dev);
+ obj->nr_tlb_entries = 32;
+ err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
+ if (err && err != -EINVAL)
+ return err;
+ if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
+ return -EINVAL;
+ if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
+ obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
@@ -976,19 +943,46 @@ static int omap_iommu_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, obj);
+ obj->group = iommu_group_alloc();
+ if (IS_ERR(obj->group))
+ return PTR_ERR(obj->group);
+
+ err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name);
+ if (err)
+ goto out_group;
+
+ iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+
+ err = iommu_device_register(&obj->iommu);
+ if (err)
+ goto out_sysfs;
+
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
+
return 0;
+
+out_sysfs:
+ iommu_device_sysfs_remove(&obj->iommu);
+out_group:
+ iommu_group_put(obj->group);
+ return err;
}
static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
+ iommu_group_put(obj->group);
+ obj->group = NULL;
+
+ iommu_device_sysfs_remove(&obj->iommu);
+ iommu_device_unregister(&obj->iommu);
+
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1077,11 +1071,11 @@ static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *oiommu;
int ret = 0;
- if (!arch_data || !arch_data->name) {
+ if (!arch_data || !arch_data->iommu_dev) {
dev_err(dev, "device doesn't have an associated iommu\n");
return -EINVAL;
}
@@ -1095,15 +1089,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out;
}
+ oiommu = arch_data->iommu_dev;
+
/* get a handle to and enable the omap iommu */
- oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
- if (IS_ERR(oiommu)) {
- ret = PTR_ERR(oiommu);
+ ret = omap_iommu_attach(oiommu, omap_domain->pgtable);
+ if (ret) {
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
- omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->iommu_dev = oiommu;
omap_domain->dev = dev;
oiommu->domain = domain;
@@ -1116,7 +1111,6 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
@@ -1128,7 +1122,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_iommu_detach(oiommu);
- omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->iommu_dev = NULL;
omap_domain->dev = NULL;
oiommu->domain = NULL;
}
@@ -1232,8 +1226,11 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
+ struct omap_iommu *oiommu;
+ struct iommu_group *group;
struct device_node *np;
struct platform_device *pdev;
+ int ret;
/*
* Allocate the archdata iommu structure for DT-based devices.
@@ -1254,15 +1251,41 @@ static int omap_iommu_add_device(struct device *dev)
return -EINVAL;
}
+ oiommu = platform_get_drvdata(pdev);
+ if (!oiommu) {
+ of_node_put(np);
+ return -EINVAL;
+ }
+
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data) {
of_node_put(np);
return -ENOMEM;
}
- arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
+ ret = iommu_device_link(&oiommu->iommu, dev);
+ if (ret) {
+ kfree(arch_data);
+ of_node_put(np);
+ return ret;
+ }
+
+ arch_data->iommu_dev = oiommu;
dev->archdata.iommu = arch_data;
+ /*
+ * IOMMU group initialization calls into omap_iommu_device_group, which
+ * needs a valid dev->archdata.iommu pointer
+ */
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group)) {
+ iommu_device_unlink(&oiommu->iommu, dev);
+ dev->archdata.iommu = NULL;
+ kfree(arch_data);
+ return PTR_ERR(group);
+ }
+ iommu_group_put(group);
+
of_node_put(np);
return 0;
@@ -1275,8 +1298,23 @@ static void omap_iommu_remove_device(struct device *dev)
if (!dev->of_node || !arch_data)
return;
- kfree(arch_data->name);
+ iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
+ iommu_group_remove_device(dev);
+
+ dev->archdata.iommu = NULL;
kfree(arch_data);
+
+}
+
+static struct iommu_group *omap_iommu_device_group(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct iommu_group *group = NULL;
+
+ if (arch_data->iommu_dev)
+ group = arch_data->iommu_dev->group;
+
+ return group;
}
static const struct iommu_ops omap_iommu_ops = {
@@ -1290,6 +1328,7 @@ static const struct iommu_ops omap_iommu_ops = {
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
+ .device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
@@ -1299,6 +1338,7 @@ static int __init omap_iommu_init(void)
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
+ int ret;
np = of_find_matching_node(NULL, omap_iommu_of_match);
if (!np)
@@ -1312,11 +1352,25 @@ static int __init omap_iommu_init(void)
return -ENOMEM;
iopte_cachep = p;
- bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
-
omap_iommu_debugfs_init();
- return platform_driver_register(&omap_iommu_driver);
+ ret = platform_driver_register(&omap_iommu_driver);
+ if (ret) {
+ pr_err("%s: failed to register driver\n", __func__);
+ goto fail_driver;
+ }
+
+ ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ if (ret)
+ goto fail_bus;
+
+ return 0;
+
+fail_bus:
+ platform_driver_unregister(&omap_iommu_driver);
+fail_driver:
+ kmem_cache_destroy(iopte_cachep);
+ return ret;
}
subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 59628e5017b4..6e70515e6038 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -14,6 +14,7 @@
#define _OMAP_IOMMU_H
#include <linux/bitops.h>
+#include <linux/iommu.h>
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
@@ -27,6 +28,23 @@ struct iotlb_entry {
u32 endian, elsz, mixed;
};
+/**
+ * struct omap_iommu_domain - omap iommu domain
+ * @pgtable: the page table
+ * @iommu_dev: an omap iommu device attached to this domain. only a single
+ * iommu device can be attached for now.
+ * @dev: Device using this domain.
+ * @lock: domain lock, should be taken when attaching/detaching
+ * @domain: generic domain handle used by iommu core code
+ */
+struct omap_iommu_domain {
+ u32 *pgtable;
+ struct omap_iommu *iommu_dev;
+ struct device *dev;
+ spinlock_t lock;
+ struct iommu_domain domain;
+};
+
struct omap_iommu {
const char *name;
void __iomem *regbase;
@@ -50,6 +68,22 @@ struct omap_iommu {
int has_bus_err_back;
u32 id;
+
+ struct iommu_device iommu;
+ struct iommu_group *group;
+};
+
+/**
+ * struct omap_iommu_arch_data - omap iommu private data
+ * @iommu_dev: handle of the iommu device
+ *
+ * This is an omap iommu private data object, which binds an iommu user
+ * to its iommu device. This object should be placed at the iommu user's
+ * dev_archdata so generic IOMMU API can be used without having to
+ * utilize omap-specific plumbing anymore.
+ */
+struct omap_iommu_arch_data {
+ struct omap_iommu *iommu_dev;
};
struct cr_regs {
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9afcbf79f0b0..4ba48a26b389 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -90,6 +91,7 @@ struct rk_iommu {
void __iomem **bases;
int num_mmu;
int irq;
+ struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
};
@@ -1032,6 +1034,7 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group,
static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
+ struct rk_iommu *iommu;
int ret;
if (!rk_iommu_is_dev_iommu_master(dev))
@@ -1054,6 +1057,10 @@ static int rk_iommu_add_device(struct device *dev)
if (ret)
goto err_remove_device;
+ iommu = rk_iommu_from_dev(dev);
+ if (iommu)
+ iommu_device_link(&iommu->iommu, dev);
+
iommu_group_put(group);
return 0;
@@ -1067,9 +1074,15 @@ err_put_group:
static void rk_iommu_remove_device(struct device *dev)
{
+ struct rk_iommu *iommu;
+
if (!rk_iommu_is_dev_iommu_master(dev))
return;
+ iommu = rk_iommu_from_dev(dev);
+ if (iommu)
+ iommu_device_unlink(&iommu->iommu, dev);
+
iommu_group_remove_device(dev);
}
@@ -1117,7 +1130,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int i;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1150,11 +1163,25 @@ static int rk_iommu_probe(struct platform_device *pdev)
return -ENXIO;
}
- return 0;
+ err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ if (err)
+ return err;
+
+ iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
+ err = iommu_device_register(&iommu->iommu);
+
+ return err;
}
static int rk_iommu_remove(struct platform_device *pdev)
{
+ struct rk_iommu *iommu = platform_get_drvdata(pdev);
+
+ if (iommu) {
+ iommu_device_sysfs_remove(&iommu->iommu);
+ iommu_device_unregister(&iommu->iommu);
+ }
+
return 0;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 9305964250ac..eeb19f560a05 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
#include <soc/tegra/mc.h>
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 549b315ca8fe..f53c8cda1bde 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -281,7 +281,7 @@ static void lp5521_firmware_loaded(struct lp55xx_chip *chip)
}
/*
- * Program momery sequence
+ * Program memory sequence
* 1) set engine mode to "LOAD"
* 2) write firmware data into program memory
*/
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index c5b30f06218a..e9ba8cd32d66 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -387,7 +387,7 @@ static void lp5523_firmware_loaded(struct lp55xx_chip *chip)
}
/*
- * Program momery sequence
+ * Program memory sequence
* 1) set engine mode to "LOAD"
* 2) write firmware data into program memory
*/
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index b75333803a63..90892585bcb5 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -270,7 +270,7 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
}
/*
- * Program momery sequence
+ * Program memory sequence
* 1) set engine mode to "LOAD"
* 2) write firmware data into program memory
*/
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 54a06c3a2b8c..6a4aa608ad95 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -74,7 +74,7 @@ static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
return 0;
err:
- while (--i > lun_begin)
+ while (--i >= lun_begin)
clear_bit(i, dev->lun_map);
return -EBUSY;
@@ -211,7 +211,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
return tgt_dev;
err_ch:
- while (--i > 0)
+ while (--i >= 0)
kfree(dev_map->chnls[i].lun_offs);
kfree(luns);
err_luns:
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 8eb40afbd0f5..0b1f9c76c68d 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -41,12 +41,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
char *buf)
{
- int len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
-
- buf[len] = '\n';
- buf[len+1] = 0;
-
- return len+1;
+ return of_device_modalias(dev, buf, PAGE_SIZE);
}
static ssize_t devspec_show(struct device *dev,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 7468a22f9d10..906103c168ea 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -200,6 +200,7 @@ config BLK_DEV_DM_BUILTIN
config BLK_DEV_DM
tristate "Device mapper support"
select BLK_DEV_DM_BUILTIN
+ select DAX
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
@@ -502,13 +503,24 @@ config DM_LOG_WRITES
If unsure, say N.
config DM_INTEGRITY
- tristate "Integrity target"
+ tristate "Integrity target support"
depends on BLK_DEV_DM
select BLK_DEV_INTEGRITY
select DM_BUFIO
select CRYPTO
select ASYNC_XOR
---help---
- This is the integrity target.
+ This device-mapper target emulates a block device that has
+ additional per-sector tags that can be used for storing
+ integrity information.
+
+ This integrity target is used with the dm-crypt target to
+ provide authenticated disk encryption or it can be used
+ standalone.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-integrity.
+
+ If unsure, say N.
endif # MD
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 85e3f21c2514..e57353e39168 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -767,16 +767,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
}
n = d->nr_stripes * sizeof(atomic_t);
- d->stripe_sectors_dirty = n < PAGE_SIZE << 6
- ? kzalloc(n, GFP_KERNEL)
- : vzalloc(n);
+ d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
if (!d->stripe_sectors_dirty)
return -ENOMEM;
n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
- d->full_dirty_stripes = n < PAGE_SIZE << 6
- ? kzalloc(n, GFP_KERNEL)
- : vzalloc(n);
+ d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
if (!d->full_dirty_stripes)
return -ENOMEM;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 5d13930f0f22..cb8d2ccbb6c6 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -43,11 +43,7 @@ struct closure;
(heap)->used = 0; \
(heap)->size = (_size); \
_bytes = (heap)->size * sizeof(*(heap)->data); \
- (heap)->data = NULL; \
- if (_bytes < KMALLOC_MAX_SIZE) \
- (heap)->data = kmalloc(_bytes, (gfp)); \
- if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
- (heap)->data = vmalloc(_bytes); \
+ (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(heap)->data; \
})
@@ -136,12 +132,8 @@ do { \
\
(fifo)->mask = _allocated_size - 1; \
(fifo)->front = (fifo)->back = 0; \
- (fifo)->data = NULL; \
\
- if (_bytes < KMALLOC_MAX_SIZE) \
- (fifo)->data = kmalloc(_bytes, (gfp)); \
- if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
- (fifo)->data = vmalloc(_bytes); \
+ (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
(fifo)->data; \
})
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c92c31b23e54..5db11a405129 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -406,7 +406,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
if (gfp_mask & __GFP_NORETRY)
noio_flag = memalloc_noio_save();
- ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
+ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 8568dbd50ba4..4a4e9c75fc4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1624,17 +1624,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
- int r;
+ int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
+ if (cmd->fail_io)
+ goto out;
+
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
-
out:
WRITE_UNLOCK(cmd);
return r;
@@ -1646,7 +1648,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
@@ -1658,7 +1661,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e0c40aec5e96..72479bd61e11 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1120,28 +1120,30 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
- unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ unsigned nr_clean;
- if (idle)
+ if (idle) {
/*
* We'd like to clean everything.
*/
return q_size(&mq->dirty) == 0u;
- else
- return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
- percent_to_target(mq, CLEAN_TARGET);
+ }
+
+ nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
+ return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
+ percent_to_target(mq, CLEAN_TARGET);
}
static bool free_target_met(struct smq_policy *mq, bool idle)
{
- unsigned nr_free = from_cblock(mq->cache_size) -
- mq->cache_alloc.nr_allocated;
+ unsigned nr_free;
- if (idle)
- return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
- percent_to_target(mq, FREE_TARGET);
- else
+ if (!idle)
return true;
+
+ nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+ percent_to_target(mq, FREE_TARGET);
}
/*----------------------------------------------------------------*/
@@ -1214,7 +1216,11 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
return;
if (allocator_empty(&mq->cache_alloc)) {
- if (!free_target_met(mq, false))
+ /*
+ * We always claim to be 'idle' to ensure some demotions happen
+ * with continuous loads.
+ */
+ if (!free_target_met(mq, true))
queue_demotion(mq);
return;
}
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 97db4d11c05a..52ca8d059e82 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -58,6 +58,7 @@ struct mapped_device {
struct target_type *immutable_target_type;
struct gendisk *disk;
+ struct dax_device *dax_dev;
char name[16];
void *interface_ptr;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2d5d7064acbf..0555b4410e05 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1691,6 +1691,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
struct dm_ioctl *dmi;
int secure_data;
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
+ unsigned noio_flag;
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
@@ -1713,15 +1714,9 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (param_kernel->data_size <= KMALLOC_MAX_SIZE)
- dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
-
- if (!dmi) {
- unsigned noio_flag;
- noio_flag = memalloc_noio_save();
- dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
- memalloc_noio_restore(noio_flag);
- }
+ noio_flag = memalloc_noio_save();
+ dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL);
+ memalloc_noio_restore(noio_flag);
if (!dmi) {
if (secure_data && clear_user(user, param_kernel->data_size))
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index a5120961632a..7d42a9d9f406 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
@@ -142,22 +143,20 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
-static long linear_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
+ long ret;
struct linear_c *lc = ti->private;
struct block_device *bdev = lc->dev->bdev;
- struct blk_dax_ctl dax = {
- .sector = linear_map_sector(ti, sector),
- .size = size,
- };
- long ret;
-
- ret = bdev_direct_access(bdev, &dax);
- *kaddr = dax.addr;
- *pfn = dax.pfn;
-
- return ret;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static struct target_type linear_target = {
@@ -171,7 +170,7 @@ static struct target_type linear_target = {
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
- .direct_access = linear_direct_access,
+ .direct_access = linear_dax_direct_access,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a48130b90157..2af27026aa2e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -720,11 +720,10 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return 0;
}
-static int dm_mq_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- return __dm_rq_init_rq(data, rq);
+ return __dm_rq_init_rq(set->driver_data, rq);
}
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c65feeada864..e152d9817c81 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2302,8 +2302,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return do_origin(o->dev, bio);
}
-static long origin_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
DMWARN("device does not support dax.");
return -EIO;
@@ -2368,7 +2368,7 @@ static struct target_type origin_target = {
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
- .direct_access = origin_direct_access,
+ .direct_access = origin_dax_direct_access,
};
static struct target_type snapshot_target = {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 0250e7e521ab..6028d8247f58 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -146,12 +146,7 @@ static void *dm_kvzalloc(size_t alloc_size, int node)
if (!claim_shared_memory(alloc_size))
return NULL;
- if (alloc_size <= KMALLOC_MAX_SIZE) {
- p = kzalloc_node(alloc_size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN, node);
- if (p)
- return p;
- }
- p = vzalloc_node(alloc_size, node);
+ p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
if (p)
return p;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4b50ae115c6d..75152482f3ad 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -310,27 +311,25 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-static long stripe_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
- uint32_t stripe;
+ struct dax_device *dax_dev;
struct block_device *bdev;
- struct blk_dax_ctl dax = {
- .size = size,
- };
+ uint32_t stripe;
long ret;
- stripe_map_sector(sc, sector, &stripe, &dax.sector);
-
- dax.sector += sc->stripe[stripe].physical_start;
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
- ret = bdev_direct_access(bdev, &dax);
- *kaddr = dax.addr;
- *pfn = dax.pfn;
-
- return ret;
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
/*
@@ -451,7 +450,7 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
- .direct_access = stripe_direct_access,
+ .direct_access = stripe_dax_direct_access,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 6264ff00dcf0..b242b750542f 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -142,8 +142,8 @@ static void io_err_release_clone_rq(struct request *clone)
{
}
-static long io_err_direct_access(struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
return -EIO;
}
@@ -157,7 +157,7 @@ static struct target_type error_target = {
.map = io_err_map,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
- .direct_access = io_err_direct_access,
+ .direct_access = io_err_dax_direct_access,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 268edf402bbb..6ef9500226c0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -16,6 +16,7 @@
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
@@ -629,6 +630,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
+ td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
return 0;
}
@@ -642,7 +644,9 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
+ put_dax(td->dm_dev.dax_dev);
td->dm_dev.bdev = NULL;
+ td->dm_dev.dax_dev = NULL;
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
@@ -920,31 +924,49 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
-static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
+ sector_t sector, int *srcu_idx)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_table *map;
struct dm_target *ti;
- int srcu_idx;
- long len, ret = -EIO;
- map = dm_get_live_table(md, &srcu_idx);
+ map = dm_get_live_table(md, srcu_idx);
if (!map)
- goto out;
+ return NULL;
ti = dm_table_find_target(map, sector);
if (!dm_target_is_valid(ti))
- goto out;
+ return NULL;
+
+ return ti;
+}
+
+static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ long len, ret = -EIO;
+ int srcu_idx;
- len = max_io_len(sector, ti) << SECTOR_SHIFT;
- size = min(len, size);
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+ if (!ti)
+ goto out;
+ if (!ti->type->direct_access)
+ goto out;
+ len = max_io_len(sector, ti) / PAGE_SECTORS;
+ if (len < 1)
+ goto out;
+ nr_pages = min(len, nr_pages);
if (ti->type->direct_access)
- ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
-out:
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+
+ out:
dm_put_live_table(md, srcu_idx);
- return min(ret, size);
+
+ return ret;
}
/*
@@ -1471,6 +1493,7 @@ static int next_free_minor(int *minor)
}
static const struct block_device_operations dm_blk_dops;
+static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
@@ -1517,6 +1540,12 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
+ if (md->dax_dev) {
+ kill_dax(md->dax_dev);
+ put_dax(md->dax_dev);
+ md->dax_dev = NULL;
+ }
+
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1544,6 +1573,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
+ struct dax_device *dax_dev;
struct mapped_device *md;
void *old_md;
@@ -1608,6 +1638,12 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
+
+ dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
+ if (!dax_dev)
+ goto bad;
+ md->dax_dev = dax_dev;
+
add_disk(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
@@ -2816,12 +2852,15 @@ static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
- .direct_access = dm_blk_direct_access,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
+static const struct dax_operations dm_dax_ops = {
+ .direct_access = dm_dax_direct_access,
+};
+
/*
* module hooks
*/
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 3512316e7a46..b72edd27f880 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -81,23 +81,15 @@ config MEDIA_RC_SUPPORT
Say Y when you have a TV or an IR device.
config MEDIA_CEC_SUPPORT
- bool "HDMI CEC support"
- select MEDIA_CEC_EDID
- ---help---
- Enable support for HDMI CEC (Consumer Electronics Control),
- which is an optional HDMI feature.
-
- Say Y when you have an HDMI receiver, transmitter or a USB CEC
- adapter that supports HDMI CEC.
+ bool "HDMI CEC support"
+ ---help---
+ Enable support for HDMI CEC (Consumer Electronics Control),
+ which is an optional HDMI feature.
-config MEDIA_CEC_DEBUG
- bool "HDMI CEC debugfs interface"
- depends on MEDIA_CEC_SUPPORT && DEBUG_FS
- ---help---
- Turns on the DebugFS interface for CEC devices.
+ Say Y when you have an HDMI receiver, transmitter or a USB CEC
+ adapter that supports HDMI CEC.
-config MEDIA_CEC_EDID
- bool
+source "drivers/media/cec/Kconfig"
#
# Media controller
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index d87ccb8eeabe..523fea3648ad 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,16 +2,10 @@
# Makefile for the kernel multimedia device drivers.
#
-ifeq ($(CONFIG_MEDIA_CEC_EDID),y)
- obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
-endif
-
-ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
- obj-$(CONFIG_MEDIA_SUPPORT) += cec/
-endif
-
media-objs := media-device.o media-devnode.o media-entity.o
+obj-$(CONFIG_CEC_CORE) += cec/
+
#
# I2C drivers should come before other drivers, otherwise they'll fail
# when compiled as builtin drivers
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
deleted file mode 100644
index 5719b991e340..000000000000
--- a/drivers/media/cec-edid.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/cec-edid.h>
-
-/*
- * This EDID is expected to be a CEA-861 compliant, which means that there are
- * at least two blocks and one or more of the extensions blocks are CEA-861
- * blocks.
- *
- * The returned location is guaranteed to be < size - 1.
- */
-static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
-{
- unsigned int blocks = size / 128;
- unsigned int block;
- u8 d;
-
- /* Sanity check: at least 2 blocks and a multiple of the block size */
- if (blocks < 2 || size % 128)
- return 0;
-
- /*
- * If there are fewer extension blocks than the size, then update
- * 'blocks'. It is allowed to have more extension blocks than the size,
- * since some hardware can only read e.g. 256 bytes of the EDID, even
- * though more blocks are present. The first CEA-861 extension block
- * should normally be in block 1 anyway.
- */
- if (edid[0x7e] + 1 < blocks)
- blocks = edid[0x7e] + 1;
-
- for (block = 1; block < blocks; block++) {
- unsigned int offset = block * 128;
-
- /* Skip any non-CEA-861 extension blocks */
- if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
- continue;
-
- /* search Vendor Specific Data Block (tag 3) */
- d = edid[offset + 2] & 0x7f;
- /* Check if there are Data Blocks */
- if (d <= 4)
- continue;
- if (d > 4) {
- unsigned int i = offset + 4;
- unsigned int end = offset + d;
-
- /* Note: 'end' is always < 'size' */
- do {
- u8 tag = edid[i] >> 5;
- u8 len = edid[i] & 0x1f;
-
- if (tag == 3 && len >= 5 && i + len <= end &&
- edid[i + 1] == 0x03 &&
- edid[i + 2] == 0x0c &&
- edid[i + 3] == 0x00)
- return i + 4;
- i += len + 1;
- } while (i < end);
- }
- }
- return 0;
-}
-
-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
- unsigned int *offset)
-{
- unsigned int loc = cec_get_edid_spa_location(edid, size);
-
- if (offset)
- *offset = loc;
- if (loc == 0)
- return CEC_PHYS_ADDR_INVALID;
- return (edid[loc] << 8) | edid[loc + 1];
-}
-EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
-
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
-{
- unsigned int loc = cec_get_edid_spa_location(edid, size);
- u8 sum = 0;
- unsigned int i;
-
- if (loc == 0)
- return;
- edid[loc] = phys_addr >> 8;
- edid[loc + 1] = phys_addr & 0xff;
- loc &= ~0x7f;
-
- /* update the checksum */
- for (i = loc; i < loc + 127; i++)
- sum += edid[i];
- edid[i] = 256 - sum;
-}
-EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
-
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
-{
- /* Check if input is sane */
- if (WARN_ON(input == 0 || input > 0xf))
- return CEC_PHYS_ADDR_INVALID;
-
- if (phys_addr == 0)
- return input << 12;
-
- if ((phys_addr & 0x0fff) == 0)
- return phys_addr | (input << 8);
-
- if ((phys_addr & 0x00ff) == 0)
- return phys_addr | (input << 4);
-
- if ((phys_addr & 0x000f) == 0)
- return phys_addr | input;
-
- /*
- * All nibbles are used so no valid physical addresses can be assigned
- * to the input.
- */
- return CEC_PHYS_ADDR_INVALID;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
-
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
-{
- int i;
-
- if (parent)
- *parent = phys_addr;
- if (port)
- *port = 0;
- if (phys_addr == CEC_PHYS_ADDR_INVALID)
- return 0;
- for (i = 0; i < 16; i += 4)
- if (phys_addr & (0xf << i))
- break;
- if (i == 16)
- return 0;
- if (parent)
- *parent = phys_addr & (0xfff0 << i);
- if (port)
- *port = (phys_addr >> i) & 0xf;
- for (i += 4; i < 16; i += 4)
- if ((phys_addr & (0xf << i)) == 0)
- return -EINVAL;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
-
-MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
-MODULE_DESCRIPTION("CEC EDID helper functions");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
new file mode 100644
index 000000000000..f944d93e3167
--- /dev/null
+++ b/drivers/media/cec/Kconfig
@@ -0,0 +1,19 @@
+config CEC_CORE
+ tristate
+ depends on MEDIA_CEC_SUPPORT
+ default y
+
+config MEDIA_CEC_NOTIFIER
+ bool
+
+config MEDIA_CEC_RC
+ bool "HDMI CEC RC integration"
+ depends on CEC_CORE && RC_CORE
+ ---help---
+ Pass on CEC remote control messages to the RC framework.
+
+config MEDIA_CEC_DEBUG
+ bool "HDMI CEC debugfs interface"
+ depends on CEC_CORE && DEBUG_FS
+ ---help---
+ Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index d6686337275f..402a6c62a3e8 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,5 +1,7 @@
-cec-objs := cec-core.o cec-adap.o cec-api.o
+cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
-ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
- obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
+ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ cec-objs += cec-notifier.o
endif
+
+obj-$(CONFIG_CEC_CORE) += cec.o
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index ccda41c2c9e4..f5fe01c9da8a 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -300,6 +300,40 @@ static void cec_data_cancel(struct cec_data *data)
}
/*
+ * Flush all pending transmits and cancel any pending timeout work.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_flush(struct cec_adapter *adap)
+{
+ struct cec_data *data, *n;
+
+ /*
+ * If the adapter is disabled, or we're asked to stop,
+ * then cancel any pending transmits.
+ */
+ while (!list_empty(&adap->transmit_queue)) {
+ data = list_first_entry(&adap->transmit_queue,
+ struct cec_data, list);
+ cec_data_cancel(data);
+ }
+ if (adap->transmitting)
+ cec_data_cancel(adap->transmitting);
+
+ /* Cancel the pending timeout work. */
+ list_for_each_entry_safe(data, n, &adap->wait_queue, list) {
+ if (cancel_delayed_work(&data->work))
+ cec_data_cancel(data);
+ /*
+ * If cancel_delayed_work returned false, then
+ * the cec_wait_timeout function is running,
+ * which will call cec_data_completed. So no
+ * need to do anything special in that case.
+ */
+ }
+}
+
+/*
* Main CEC state machine
*
* Wait until the thread should be stopped, or we are not transmitting and
@@ -333,7 +367,6 @@ int cec_thread_func(void *_adap)
*/
err = wait_event_interruptible_timeout(adap->kthread_waitq,
kthread_should_stop() ||
- (!adap->is_configured && !adap->is_configuring) ||
(!adap->transmitting &&
!list_empty(&adap->transmit_queue)),
msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
@@ -348,39 +381,8 @@ int cec_thread_func(void *_adap)
mutex_lock(&adap->lock);
- if ((!adap->is_configured && !adap->is_configuring) ||
- kthread_should_stop()) {
- /*
- * If the adapter is disabled, or we're asked to stop,
- * then cancel any pending transmits.
- */
- while (!list_empty(&adap->transmit_queue)) {
- data = list_first_entry(&adap->transmit_queue,
- struct cec_data, list);
- cec_data_cancel(data);
- }
- if (adap->transmitting)
- cec_data_cancel(adap->transmitting);
-
- /*
- * Cancel the pending timeout work. We have to unlock
- * the mutex when flushing the work since
- * cec_wait_timeout() will take it. This is OK since
- * no new entries can be added to wait_queue as long
- * as adap->transmitting is NULL, which it is due to
- * the cec_data_cancel() above.
- */
- while (!list_empty(&adap->wait_queue)) {
- data = list_first_entry(&adap->wait_queue,
- struct cec_data, list);
-
- if (!cancel_delayed_work(&data->work)) {
- mutex_unlock(&adap->lock);
- flush_scheduled_work();
- mutex_lock(&adap->lock);
- }
- cec_data_cancel(data);
- }
+ if (kthread_should_stop()) {
+ cec_flush(adap);
goto unlock;
}
@@ -410,6 +412,7 @@ int cec_thread_func(void *_adap)
struct cec_data, list);
list_del_init(&data->list);
adap->transmit_queue_sz--;
+
/* Make this the current transmitting message */
adap->transmitting = data;
@@ -603,17 +606,17 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
/* Sanity checks */
if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
- dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
+ dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
return -EINVAL;
}
if (msg->timeout && msg->len == 1) {
- dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
+ dprintk(1, "%s: can't reply for poll msg\n", __func__);
return -EINVAL;
}
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
if (msg->len == 1) {
if (cec_msg_destination(msg) == 0xf) {
- dprintk(1, "cec_transmit_msg: invalid poll message\n");
+ dprintk(1, "%s: invalid poll message\n", __func__);
return -EINVAL;
}
if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
@@ -634,20 +637,30 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
cec_has_log_addr(adap, cec_msg_destination(msg))) {
- dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
+ dprintk(1, "%s: destination is the adapter itself\n", __func__);
return -EINVAL;
}
if (msg->len > 1 && adap->is_configured &&
!cec_has_log_addr(adap, cec_msg_initiator(msg))) {
- dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
- cec_msg_initiator(msg));
+ dprintk(1, "%s: initiator has unknown logical address %d\n",
+ __func__, cec_msg_initiator(msg));
return -EINVAL;
}
- if (!adap->is_configured && !adap->is_configuring)
- return -ENONET;
+ if (!adap->is_configured && !adap->is_configuring) {
+ if (msg->msg[0] != 0xf0) {
+ dprintk(1, "%s: adapter is unconfigured\n", __func__);
+ return -ENONET;
+ }
+ if (msg->reply) {
+ dprintk(1, "%s: invalid msg->reply\n", __func__);
+ return -EINVAL;
+ }
+ }
- if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
+ if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
+ dprintk(1, "%s: transmit queue full\n", __func__);
return -EBUSY;
+ }
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -659,11 +672,11 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
if (msg->timeout)
- dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
- msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
+ dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n",
+ __func__, msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
else
- dprintk(2, "cec_transmit_msg: %*ph%s\n",
- msg->len, msg->msg, !block ? " (nb)" : "");
+ dprintk(2, "%s: %*ph%s\n",
+ __func__, msg->len, msg->msg, !block ? " (nb)" : "");
data->msg = *msg;
data->fh = fh;
@@ -692,6 +705,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
if (fh)
list_add_tail(&data->xfer_list, &fh->xfer_list);
+
list_add_tail(&data->list, &adap->transmit_queue);
adap->transmit_queue_sz++;
if (!adap->transmitting)
@@ -1117,6 +1131,7 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
adap->is_configuring = false;
adap->is_configured = false;
memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+ cec_flush(adap);
wake_up_interruptible(&adap->kthread_waitq);
cec_post_state_event(adap);
}
@@ -1348,19 +1363,30 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
/* Disabling monitor all mode should always succeed */
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
- WARN_ON(adap->ops->adap_enable(adap, false));
+ mutex_lock(&adap->devnode.lock);
+ if (list_empty(&adap->devnode.fhs))
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ mutex_unlock(&adap->devnode.lock);
if (phys_addr == CEC_PHYS_ADDR_INVALID)
return;
}
- if (adap->ops->adap_enable(adap, true))
+ mutex_lock(&adap->devnode.lock);
+ if (list_empty(&adap->devnode.fhs) &&
+ adap->ops->adap_enable(adap, true)) {
+ mutex_unlock(&adap->devnode.lock);
return;
+ }
if (adap->monitor_all_cnt &&
call_op(adap, adap_monitor_all_enable, true)) {
- WARN_ON(adap->ops->adap_enable(adap, false));
+ if (list_empty(&adap->devnode.fhs))
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ mutex_unlock(&adap->devnode.lock);
return;
}
+ mutex_unlock(&adap->devnode.lock);
+
adap->phys_addr = phys_addr;
cec_post_state_event(adap);
if (adap->log_addrs.num_log_addrs)
@@ -1435,12 +1461,16 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
* within the correct range.
*/
if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
- (log_addrs->vendor_id & 0xff000000) != 0)
+ (log_addrs->vendor_id & 0xff000000) != 0) {
+ dprintk(1, "invalid vendor ID\n");
return -EINVAL;
+ }
if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
- log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
+ log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0) {
+ dprintk(1, "invalid CEC version\n");
return -EINVAL;
+ }
if (log_addrs->num_log_addrs > 1)
for (i = 0; i < log_addrs->num_log_addrs; i++)
@@ -1585,6 +1615,9 @@ static int cec_feature_abort_reason(struct cec_adapter *adap,
*/
if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
return 0;
+ /* Don't Feature Abort messages from 'Unregistered' */
+ if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
+ return 0;
cec_msg_set_reply_to(&tx_msg, msg);
cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
return cec_transmit_msg(adap, &tx_msg, false);
@@ -1699,7 +1732,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
!(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
-#if IS_REACHABLE(CONFIG_RC_CORE)
+#ifdef CONFIG_MEDIA_CEC_RC
switch (msg->msg[2]) {
/*
* Play function, this message can have variable length
@@ -1736,7 +1769,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
if (!(adap->capabilities & CEC_CAP_RC) ||
!(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
-#if IS_REACHABLE(CONFIG_RC_CORE)
+#ifdef CONFIG_MEDIA_CEC_RC
rc_keyup(adap->rc);
#endif
break;
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 8950b6c9d6a9..0860fb458757 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -198,7 +198,11 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
return -EINVAL;
mutex_lock(&adap->lock);
- if (!adap->is_configured)
+ if (adap->log_addrs.num_log_addrs == 0)
+ err = -EPERM;
+ else if (adap->is_configuring)
+ err = -ENONET;
+ else if (!adap->is_configured && msg.msg[0] != 0xf0)
err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
@@ -515,9 +519,18 @@ static int cec_open(struct inode *inode, struct file *filp)
return err;
}
+ mutex_lock(&devnode->lock);
+ if (list_empty(&devnode->fhs) &&
+ adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
+ err = adap->ops->adap_enable(adap, true);
+ if (err) {
+ mutex_unlock(&devnode->lock);
+ kfree(fh);
+ return err;
+ }
+ }
filp->private_data = fh;
- mutex_lock(&devnode->lock);
/* Queue up initial state events */
ev_state.state_change.phys_addr = adap->phys_addr;
ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
@@ -551,6 +564,10 @@ static int cec_release(struct inode *inode, struct file *filp)
mutex_lock(&devnode->lock);
list_del(&fh->list);
+ if (list_empty(&devnode->fhs) &&
+ adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ }
mutex_unlock(&devnode->lock);
/* Unhook pending transmits from this filehandle. */
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 37217e205040..f9ebff90f8eb 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -137,24 +137,17 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 2: Initialize and register the character device */
cdev_init(&devnode->cdev, &cec_devnode_fops);
- devnode->cdev.kobj.parent = &devnode->dev.kobj;
devnode->cdev.owner = owner;
- ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
- if (ret < 0) {
- pr_err("%s: cdev_add failed\n", __func__);
+ ret = cdev_device_add(&devnode->cdev, &devnode->dev);
+ if (ret) {
+ pr_err("%s: cdev_device_add failed\n", __func__);
goto clr_bit;
}
- ret = device_add(&devnode->dev);
- if (ret)
- goto cdev_del;
-
devnode->registered = true;
return 0;
-cdev_del:
- cdev_del(&devnode->cdev);
clr_bit:
mutex_lock(&cec_devnode_lock);
clear_bit(devnode->minor, cec_devnode_nums);
@@ -190,11 +183,28 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
devnode->unregistered = true;
mutex_unlock(&devnode->lock);
- device_del(&devnode->dev);
- cdev_del(&devnode->cdev);
+ cdev_device_del(&devnode->cdev, &devnode->dev);
put_device(&devnode->dev);
}
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
+{
+ cec_s_phys_addr(adap, pa, false);
+}
+
+void cec_register_cec_notifier(struct cec_adapter *adap,
+ struct cec_notifier *notifier)
+{
+ if (WARN_ON(!adap->devnode.registered))
+ return;
+
+ adap->notifier = notifier;
+ cec_notifier_register(adap->notifier, adap, cec_cec_notify);
+}
+EXPORT_SYMBOL_GPL(cec_register_cec_notifier);
+#endif
+
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps,
u8 available_las)
@@ -202,6 +212,10 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
+#ifndef CONFIG_MEDIA_CEC_RC
+ caps &= ~CEC_CAP_RC;
+#endif
+
if (WARN_ON(!caps))
return ERR_PTR(-EINVAL);
if (WARN_ON(!ops))
@@ -234,10 +248,10 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
return ERR_PTR(res);
}
+#ifdef CONFIG_MEDIA_CEC_RC
if (!(caps & CEC_CAP_RC))
return adap;
-#if IS_REACHABLE(CONFIG_RC_CORE)
/* Prepare the RC input device */
adap->rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!adap->rc) {
@@ -264,8 +278,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_NS(100);
-#else
- adap->capabilities &= ~CEC_CAP_RC;
#endif
return adap;
}
@@ -285,9 +297,9 @@ int cec_register_adapter(struct cec_adapter *adap,
adap->owner = parent->driver->owner;
adap->devnode.dev.parent = parent;
-#if IS_REACHABLE(CONFIG_RC_CORE)
- adap->rc->dev.parent = parent;
+#ifdef CONFIG_MEDIA_CEC_RC
if (adap->capabilities & CEC_CAP_RC) {
+ adap->rc->dev.parent = parent;
res = rc_register_device(adap->rc);
if (res) {
@@ -302,7 +314,7 @@ int cec_register_adapter(struct cec_adapter *adap,
res = cec_devnode_register(&adap->devnode, adap->owner);
if (res) {
-#if IS_REACHABLE(CONFIG_RC_CORE)
+#ifdef CONFIG_MEDIA_CEC_RC
/* Note: rc_unregister also calls rc_free */
rc_unregister_device(adap->rc);
adap->rc = NULL;
@@ -337,12 +349,16 @@ void cec_unregister_adapter(struct cec_adapter *adap)
if (IS_ERR_OR_NULL(adap))
return;
-#if IS_REACHABLE(CONFIG_RC_CORE)
+#ifdef CONFIG_MEDIA_CEC_RC
/* Note: rc_unregister also calls rc_free */
rc_unregister_device(adap->rc);
adap->rc = NULL;
#endif
debugfs_remove_recursive(adap->cec_dir);
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+ if (adap->notifier)
+ cec_notifier_unregister(adap->notifier);
+#endif
cec_devnode_unregister(&adap->devnode);
}
EXPORT_SYMBOL_GPL(cec_unregister_adapter);
@@ -357,7 +373,7 @@ void cec_delete_adapter(struct cec_adapter *adap)
kthread_stop(adap->kthread);
if (adap->kthread_config)
kthread_stop(adap->kthread_config);
-#if IS_REACHABLE(CONFIG_RC_CORE)
+#ifdef CONFIG_MEDIA_CEC_RC
rc_free_device(adap->rc);
#endif
kfree(adap);
diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
new file mode 100644
index 000000000000..38e3fec6152b
--- /dev/null
+++ b/drivers/media/cec/cec-edid.c
@@ -0,0 +1,167 @@
+/*
+ * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <media/cec.h>
+
+/*
+ * This EDID is expected to be a CEA-861 compliant, which means that there are
+ * at least two blocks and one or more of the extensions blocks are CEA-861
+ * blocks.
+ *
+ * The returned location is guaranteed to be < size - 1.
+ */
+static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
+{
+ unsigned int blocks = size / 128;
+ unsigned int block;
+ u8 d;
+
+ /* Sanity check: at least 2 blocks and a multiple of the block size */
+ if (blocks < 2 || size % 128)
+ return 0;
+
+ /*
+ * If there are fewer extension blocks than the size, then update
+ * 'blocks'. It is allowed to have more extension blocks than the size,
+ * since some hardware can only read e.g. 256 bytes of the EDID, even
+ * though more blocks are present. The first CEA-861 extension block
+ * should normally be in block 1 anyway.
+ */
+ if (edid[0x7e] + 1 < blocks)
+ blocks = edid[0x7e] + 1;
+
+ for (block = 1; block < blocks; block++) {
+ unsigned int offset = block * 128;
+
+ /* Skip any non-CEA-861 extension blocks */
+ if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
+ continue;
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[offset + 2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ continue;
+ if (d > 4) {
+ unsigned int i = offset + 4;
+ unsigned int end = offset + d;
+
+ /* Note: 'end' is always < 'size' */
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ }
+ return 0;
+}
+
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
+
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
+
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
new file mode 100644
index 000000000000..74dc1c32080e
--- /dev/null
+++ b/drivers/media/cec/cec-notifier.c
@@ -0,0 +1,130 @@
+/*
+ * cec-notifier.c - notify CEC drivers of physical address changes
+ *
+ * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+#include <drm/drm_edid.h>
+
+struct cec_notifier {
+ struct mutex lock;
+ struct list_head head;
+ struct kref kref;
+ struct device *dev;
+ struct cec_adapter *cec_adap;
+ void (*callback)(struct cec_adapter *adap, u16 pa);
+
+ u16 phys_addr;
+};
+
+static LIST_HEAD(cec_notifiers);
+static DEFINE_MUTEX(cec_notifiers_lock);
+
+struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+ struct cec_notifier *n;
+
+ mutex_lock(&cec_notifiers_lock);
+ list_for_each_entry(n, &cec_notifiers, head) {
+ if (n->dev == dev) {
+ kref_get(&n->kref);
+ mutex_unlock(&cec_notifiers_lock);
+ return n;
+ }
+ }
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ goto unlock;
+ n->dev = dev;
+ n->phys_addr = CEC_PHYS_ADDR_INVALID;
+ mutex_init(&n->lock);
+ kref_init(&n->kref);
+ list_add_tail(&n->head, &cec_notifiers);
+unlock:
+ mutex_unlock(&cec_notifiers_lock);
+ return n;
+}
+EXPORT_SYMBOL_GPL(cec_notifier_get);
+
+static void cec_notifier_release(struct kref *kref)
+{
+ struct cec_notifier *n =
+ container_of(kref, struct cec_notifier, kref);
+
+ list_del(&n->head);
+ kfree(n);
+}
+
+void cec_notifier_put(struct cec_notifier *n)
+{
+ mutex_lock(&cec_notifiers_lock);
+ kref_put(&n->kref, cec_notifier_release);
+ mutex_unlock(&cec_notifiers_lock);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_put);
+
+void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
+{
+ mutex_lock(&n->lock);
+ n->phys_addr = pa;
+ if (n->callback)
+ n->callback(n->cec_adap, n->phys_addr);
+ mutex_unlock(&n->lock);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr);
+
+void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+ const struct edid *edid)
+{
+ u16 pa = CEC_PHYS_ADDR_INVALID;
+
+ if (edid && edid->extensions)
+ pa = cec_get_edid_phys_addr((const u8 *)edid,
+ EDID_LENGTH * (edid->extensions + 1), NULL);
+ cec_notifier_set_phys_addr(n, pa);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr_from_edid);
+
+void cec_notifier_register(struct cec_notifier *n,
+ struct cec_adapter *adap,
+ void (*callback)(struct cec_adapter *adap, u16 pa))
+{
+ kref_get(&n->kref);
+ mutex_lock(&n->lock);
+ n->cec_adap = adap;
+ n->callback = callback;
+ n->callback(adap, n->phys_addr);
+ mutex_unlock(&n->lock);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_register);
+
+void cec_notifier_unregister(struct cec_notifier *n)
+{
+ mutex_lock(&n->lock);
+ n->callback = NULL;
+ mutex_unlock(&n->lock);
+ cec_notifier_put(n);
+}
+EXPORT_SYMBOL_GPL(cec_notifier_unregister);
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index 5f10151ecec9..7636606f0be5 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -473,7 +473,7 @@ static int airstar_atsc1_attach(struct flexcop_device *fc,
/* AirStar ATSC 2nd generation */
#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL)
-static struct nxt200x_config samsung_tbmv_config = {
+static const struct nxt200x_config samsung_tbmv_config = {
.demod_address = 0x0a,
};
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 49237518d65f..3553ac4cba5c 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -365,9 +365,8 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- init_timer(&vv->vbi_dmaq.timeout);
- vv->vbi_dmaq.timeout.function = saa7146_buffer_timeout;
- vv->vbi_dmaq.timeout.data = (unsigned long)(&vv->vbi_dmaq);
+ setup_timer(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout,
+ (unsigned long)(&vv->vbi_dmaq));
vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index e034bcfcf757..b3b29d4f36ed 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -1201,9 +1201,8 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
INIT_LIST_HEAD(&vv->video_dmaq.queue);
- init_timer(&vv->video_dmaq.timeout);
- vv->video_dmaq.timeout.function = saa7146_buffer_timeout;
- vv->video_dmaq.timeout.data = (unsigned long)(&vv->video_dmaq);
+ setup_timer(&vv->video_dmaq.timeout, saa7146_buffer_timeout,
+ (unsigned long)(&vv->video_dmaq));
vv->video_dmaq.dev = dev;
/* set some default values */
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index 6e1020227f9f..ccf2d3b12aec 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -420,8 +420,8 @@ static int hasRadioTuner(int tunerType)
return 0;
}
-void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
- unsigned char *eeprom_data)
+void tveeprom_hauppauge_analog(struct tveeprom *tvee,
+ unsigned char *eeprom_data)
{
/* ----------------------------------------------
** The hauppauge eeprom format is tagged
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index e47b46e2d26c..3dd22da7e17d 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -927,7 +927,14 @@ static void precalculate_color(struct tpg_data *tpg, int k)
y >>= 4;
cb >>= 4;
cr >>= 4;
- if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
+ /*
+ * XV601/709 use the header/footer margins to encode R', G'
+ * and B' values outside the range [0-1]. So do not clamp
+ * XV601/709 values.
+ */
+ if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE &&
+ tpg->real_ycbcr_enc != V4L2_YCBCR_ENC_XV601 &&
+ tpg->real_ycbcr_enc != V4L2_YCBCR_ENC_XV709) {
y = clamp(y, 16, 235);
cb = clamp(cb, 16, 240);
cr = clamp(cr, 16, 240);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index 8d65028c7a74..d38bf9bce480 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -785,6 +785,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b
goto exit;
}
+ /*
+ * It may need some time for the CAM to settle down, or there might
+ * be a race condition between the CAM, writing HC and our last
+ * check for DA. This happens, if the CAM asserts DA, just after
+ * checking DA before we are setting HC. In this case it might be
+ * a bug in the CAM to keep the FR bit, the lower layer/HW
+ * communication requires a longer timeout or the CAM needs more
+ * time internally. But this happens in reality!
+ * We need to read the status from the HW again and do the same
+ * we did for the previous check for DA
+ */
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+
+ if (status & (STATUSREG_DA | STATUSREG_RE)) {
+ if (status & STATUSREG_DA)
+ dvb_ca_en50221_thread_wakeup(ca);
+
+ status = -EAGAIN;
+ goto exit;
+ }
+
/* send the amount of data */
if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
goto exit;
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 482912d3b77a..907a05bde162 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -643,6 +643,8 @@ struct dtv_frontend_properties {
/**
* struct dvb_frontend - Frontend structure to be used on drivers.
*
+ * @refcount: refcount to keep track of struct dvb_frontend
+ * references
* @ops: embedded struct dvb_frontend_ops
* @dvb: pointer to struct dvb_adapter
* @demodulator_priv: demod private data
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 614bfb3740f1..ce37dc2e89c7 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3852,7 +3852,9 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
FE_CAN_MUTE_TS |
FE_CAN_2G_MODULATION,
.frequency_min = 42000000,
- .frequency_max = 1002000000
+ .frequency_max = 1002000000,
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000
},
.init = cxd2841er_init_tc,
.sleep = cxd2841er_sleep_tc,
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
index 354ec07eae87..23ae72468025 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h
@@ -70,7 +70,7 @@
* (3) both long and short but short preferred and long only when necesarry
*
* These modes must be selected compile time via compile switches.
-* Compile switch settings for the diffrent modes:
+* Compile switch settings for the different modes:
* (1) DRXDAPFASI_LONG_ADDR_ALLOWED=0, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
* (2) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=0
* (3) DRXDAPFASI_LONG_ADDR_ALLOWED=1, DRXDAPFASI_SHORT_ADDR_ALLOWED=1
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 7e1bbbaad625..050fe34342d3 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1904,7 +1904,9 @@ static int get_lock_status(struct drxk_state *state, u32 *p_lock_status)
status = get_dvbt_lock_status(state, p_lock_status);
break;
default:
- break;
+ pr_debug("Unsupported operation mode %d in %s\n",
+ state->m_operation_mode, __func__);
+ return 0;
}
error:
if (status < 0)
@@ -5283,7 +5285,6 @@ static int qam_set_symbolrate(struct drxk_state *state)
/* Select & calculate correct IQM rate */
adc_frequency = (state->m_sys_clock_freq * 1000) / 3;
ratesel = 0;
- /* printk(KERN_DEBUG "drxk: SR %d\n", state->props.symbol_rate); */
if (state->props.symbol_rate <= 1188750)
ratesel = 3;
else if (state->props.symbol_rate <= 2377500)
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 29dd13b36c28..f6938f9607ac 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -28,8 +28,9 @@ static int mn88472_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct i2c_client *client = fe->demodulator_priv;
struct mn88472_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret;
- unsigned int utmp;
+ int ret, i, stmp;
+ unsigned int utmp, utmp1, utmp2;
+ u8 buf[5];
if (!dev->active) {
ret = -EAGAIN;
@@ -77,6 +78,127 @@ static int mn88472_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
}
+ /* Signal strength */
+ if (*status & FE_HAS_SIGNAL) {
+ for (i = 0; i < 2; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0x8e + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ utmp1 = buf[0] << 8 | buf[1] << 0 | buf[0] >> 2;
+ dev_dbg(&client->dev, "strength=%u\n", utmp1);
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = utmp1;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) {
+ /* DVB-T CNR */
+ ret = regmap_bulk_read(dev->regmap[0], 0x9c, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = buf[0] << 8 | buf[1] << 0;
+ if (utmp) {
+ /* CNR[dB]: 10 * log10(65536 / value) + 2 */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = ((u64)80807124 - intlog10(utmp) + 3355443)
+ * 10000 >> 24;
+
+ dev_dbg(&client->dev, "cnr=%d value=%u\n", stmp, utmp);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBT2) {
+ /* DVB-T2 CNR */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_bulk_read(dev->regmap[2], 0xbc + i,
+ &buf[i], 1);
+ if (ret)
+ goto err;
+ }
+
+ utmp = buf[1] << 8 | buf[2] << 0;
+ utmp1 = (buf[0] >> 2) & 0x01; /* 0=SISO, 1=MISO */
+ if (utmp) {
+ if (utmp1) {
+ /* CNR[dB]: 10 * log10(16384 / value) - 6 */
+ /* log10(16384) = 70706234, 0.6 = 10066330 */
+ stmp = ((u64)70706234 - intlog10(utmp)
+ - 10066330) * 10000 >> 24;
+ dev_dbg(&client->dev, "cnr=%d value=%u MISO\n",
+ stmp, utmp);
+ } else {
+ /* CNR[dB]: 10 * log10(65536 / value) + 2 */
+ /* log10(65536) = 80807124, 0.2 = 3355443 */
+ stmp = ((u64)80807124 - intlog10(utmp)
+ + 3355443) * 10000 >> 24;
+
+ dev_dbg(&client->dev, "cnr=%d value=%u SISO\n",
+ stmp, utmp);
+ }
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else if (*status & FE_HAS_VITERBI &&
+ c->delivery_system == SYS_DVBC_ANNEX_A) {
+ /* DVB-C CNR */
+ ret = regmap_bulk_read(dev->regmap[1], 0xa1, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0; /* signal */
+ utmp2 = buf[2] << 8 | buf[3] << 0; /* noise */
+ if (utmp1 && utmp2) {
+ /* CNR[dB]: 10 * log10(8 * (signal / noise)) */
+ /* log10(8) = 15151336 */
+ stmp = ((u64)15151336 + intlog10(utmp1)
+ - intlog10(utmp2)) * 10000 >> 24;
+
+ dev_dbg(&client->dev, "cnr=%d signal=%u noise=%u\n",
+ stmp, utmp1, utmp2);
+ } else {
+ stmp = 0;
+ }
+
+ c->cnr.stat[0].svalue = stmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* PER */
+ if (*status & FE_HAS_SYNC) {
+ ret = regmap_bulk_read(dev->regmap[0], 0xe1, buf, 4);
+ if (ret)
+ goto err;
+
+ utmp1 = buf[0] << 8 | buf[1] << 0;
+ utmp2 = buf[2] << 8 | buf[3] << 0;
+ dev_dbg(&client->dev, "block_error=%u block_count=%u\n",
+ utmp1, utmp2);
+
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue += utmp1;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue += utmp2;
+ } else {
+ c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -462,6 +584,7 @@ static int mn88472_probe(struct i2c_client *client,
{
struct mn88472_config *pdata = client->dev.platform_data;
struct mn88472_dev *dev;
+ struct dtv_frontend_properties *c;
int ret;
unsigned int utmp;
static const struct regmap_config regmap_config = {
@@ -547,6 +670,13 @@ static int mn88472_probe(struct i2c_client *client,
*pdata->fe = &dev->fe;
i2c_set_clientdata(client, dev);
+ /* Init stats to indicate which stats are supported */
+ c = &dev->fe.dtv_property_cache;
+ c->strength.len = 1;
+ c->cnr.len = 1;
+ c->block_error.len = 1;
+ c->block_count.len = 1;
+
/* Setup callbacks */
pdata->get_dvb_frontend = mn88472_get_dvb_frontend;
diff --git a/drivers/media/dvb-frontends/mn88472_priv.h b/drivers/media/dvb-frontends/mn88472_priv.h
index cdf2597a25d1..fb50f56ba30b 100644
--- a/drivers/media/dvb-frontends/mn88472_priv.h
+++ b/drivers/media/dvb-frontends/mn88472_priv.h
@@ -18,6 +18,7 @@
#define MN88472_PRIV_H
#include "dvb_frontend.h"
+#include "dvb_math.h"
#include "mn88472.h"
#include <linux/firmware.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 680ba06c29fb..172fc367ccaa 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -740,6 +740,9 @@ static int si2168_probe(struct i2c_client *client,
case SI2168_CHIP_ID_B40:
dev->firmware_name = SI2168_B40_FIRMWARE;
break;
+ case SI2168_CHIP_ID_D60:
+ dev->firmware_name = SI2168_D60_FIRMWARE;
+ break;
default:
dev_dbg(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
@@ -827,3 +830,4 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(SI2168_A20_FIRMWARE);
MODULE_FIRMWARE(SI2168_A30_FIRMWARE);
MODULE_FIRMWARE(SI2168_B40_FIRMWARE);
+MODULE_FIRMWARE(SI2168_D60_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2fecac6231ff..737cf416fbb2 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -26,6 +26,7 @@
#define SI2168_A20_FIRMWARE "dvb-demod-si2168-a20-01.fw"
#define SI2168_A30_FIRMWARE "dvb-demod-si2168-a30-01.fw"
#define SI2168_B40_FIRMWARE "dvb-demod-si2168-b40-01.fw"
+#define SI2168_D60_FIRMWARE "dvb-demod-si2168-d60-01.fw"
#define SI2168_B40_FIRMWARE_FALLBACK "dvb-demod-si2168-02.fw"
/* state struct */
@@ -38,6 +39,7 @@ struct si2168_dev {
#define SI2168_CHIP_ID_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
#define SI2168_CHIP_ID_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
#define SI2168_CHIP_ID_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
+ #define SI2168_CHIP_ID_D60 ('D' << 24 | 68 << 16 | '6' << 8 | '0' << 0)
unsigned int chip_id;
unsigned int version;
const char *firmware_name;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index cee1dae6e014..fd181c99ce11 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -209,7 +209,6 @@ config VIDEO_ADV7604
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on GPIOLIB || COMPILE_TEST
select HDMI
- select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -221,7 +220,7 @@ config VIDEO_ADV7604
config VIDEO_ADV7604_CEC
bool "Enable Analog Devices ADV7604 CEC support"
- depends on VIDEO_ADV7604 && MEDIA_CEC_SUPPORT
+ depends on VIDEO_ADV7604 && CEC_CORE
---help---
When selected the adv7604 will support the optional
HDMI CEC feature.
@@ -230,7 +229,6 @@ config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
- select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7842 video decoder.
@@ -242,7 +240,7 @@ config VIDEO_ADV7842
config VIDEO_ADV7842_CEC
bool "Enable Analog Devices ADV7842 CEC support"
- depends on VIDEO_ADV7842 && MEDIA_CEC_SUPPORT
+ depends on VIDEO_ADV7842 && CEC_CORE
---help---
When selected the adv7842 will support the optional
HDMI CEC feature.
@@ -470,7 +468,6 @@ config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
- select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7511 video encoder.
@@ -481,7 +478,7 @@ config VIDEO_ADV7511
config VIDEO_ADV7511_CEC
bool "Enable Analog Devices ADV7511 CEC support"
- depends on VIDEO_ADV7511 && MEDIA_CEC_SUPPORT
+ depends on VIDEO_ADV7511 && CEC_CORE
---help---
When selected the adv7511 will support the optional
HDMI CEC feature.
@@ -520,6 +517,17 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_OV2640
+ tristate "OmniVision OV2640 sensor support"
+ depends on VIDEO_V4L2 && I2C
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV2640 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2640.
+
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -531,6 +539,29 @@ config VIDEO_OV2659
To compile this driver as a module, choose M here: the
module will be called ov2659.
+config VIDEO_OV5645
+ tristate "OmniVision OV5645 sensor support"
+ depends on OF
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV5645 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5645.
+
+config VIDEO_OV5647
+ tristate "OmniVision OV5647 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV5647 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5647.
+
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 5bc7bbeb5499..62323ec66be8 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -57,6 +57,9 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
+obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
+obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
+obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index a9026a91855e..3d2a3c6b67d8 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -336,7 +336,7 @@ cleanup:
return ret;
}
-static int __exit ad5820_remove(struct i2c_client *client)
+static int ad5820_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ad5820_device *coil = to_ad5820_device(subdev);
@@ -362,7 +362,7 @@ static struct i2c_driver ad5820_i2c_driver = {
.pm = &ad5820_pm,
},
.probe = ad5820_probe,
- .remove = __exit_p(ad5820_remove),
+ .remove = ad5820_remove,
.id_table = ad5820_id_table,
};
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 8c9e28949ab1..ccc478605643 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -734,7 +734,7 @@ static int adv7511_s_power(struct v4l2_subdev *sd, int on)
#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct adv7511_state *state = adap->priv;
+ struct adv7511_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
if (state->i2c_cec == NULL)
@@ -769,7 +769,7 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
- struct adv7511_state *state = adap->priv;
+ struct adv7511_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -824,7 +824,7 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511_state *state = adap->priv;
+ struct adv7511_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
u8 len = msg->len;
unsigned int i;
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d8bf435db86d..f1fa9cec489f 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2050,7 +2050,7 @@ static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
static int adv76xx_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct adv76xx_state *state = adap->priv;
+ struct adv76xx_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
if (!state->cec_enabled_adap && enable) {
@@ -2080,7 +2080,7 @@ static int adv76xx_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int adv76xx_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
- struct adv76xx_state *state = adap->priv;
+ struct adv76xx_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
unsigned int i, free_idx = ADV76XX_MAX_ADDRS;
@@ -2135,7 +2135,7 @@ static int adv76xx_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
static int adv76xx_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct adv76xx_state *state = adap->priv;
+ struct adv76xx_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
u8 len = msg->len;
unsigned int i;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 2d61f0cc2b5b..303effda1a2e 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2250,7 +2250,7 @@ static void adv7842_cec_isr(struct v4l2_subdev *sd, bool *handled)
static int adv7842_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct adv7842_state *state = adap->priv;
+ struct adv7842_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
if (!state->cec_enabled_adap && enable) {
@@ -2279,7 +2279,7 @@ static int adv7842_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int adv7842_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
- struct adv7842_state *state = adap->priv;
+ struct adv7842_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
unsigned int i, free_idx = ADV7842_MAX_ADDRS;
@@ -2334,7 +2334,7 @@ static int adv7842_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
static int adv7842_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7842_state *state = adap->priv;
+ struct adv7842_state *state = cec_get_drvdata(adap);
struct v4l2_subdev *sd = &state->sd;
u8 len = msg->len;
unsigned int i;
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index bec4a563a09c..6e313d5243a0 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1485,6 +1485,7 @@ static const struct of_device_id et8ek8_of_table[] = {
{ .compatible = "toshiba,et8ek8" },
{ },
};
+MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static const struct i2c_device_id et8ek8_id_table[] = {
{ ET8EK8_NAME, 0 },
@@ -1495,6 +1496,7 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
static const struct dev_pm_ops et8ek8_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume)
};
+MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static struct i2c_driver et8ek8_i2c_driver = {
.driver = {
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
new file mode 100644
index 000000000000..e6d0c1f64f0b
--- /dev/null
+++ b/drivers/media/i2c/ov2640.c
@@ -0,0 +1,1201 @@
+/*
+ * ov2640 Camera Driver
+ *
+ * Copyright (C) 2010 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ *
+ * Based on ov772x, ov9640 drivers and previous non merged implementations.
+ *
+ * Copyright 2005-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2006, OmniVision
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-image-sizes.h>
+
+#define VAL_SET(x, mask, rshift, lshift) \
+ ((((x) >> rshift) & mask) << lshift)
+/*
+ * DSP registers
+ * register offset for BANK_SEL == BANK_SEL_DSP
+ */
+#define R_BYPASS 0x05 /* Bypass DSP */
+#define R_BYPASS_DSP_BYPAS 0x01 /* Bypass DSP, sensor out directly */
+#define R_BYPASS_USE_DSP 0x00 /* Use the internal DSP */
+#define QS 0x44 /* Quantization Scale Factor */
+#define CTRLI 0x50
+#define CTRLI_LP_DP 0x80
+#define CTRLI_ROUND 0x40
+#define CTRLI_V_DIV_SET(x) VAL_SET(x, 0x3, 0, 3)
+#define CTRLI_H_DIV_SET(x) VAL_SET(x, 0x3, 0, 0)
+#define HSIZE 0x51 /* H_SIZE[7:0] (real/4) */
+#define HSIZE_SET(x) VAL_SET(x, 0xFF, 2, 0)
+#define VSIZE 0x52 /* V_SIZE[7:0] (real/4) */
+#define VSIZE_SET(x) VAL_SET(x, 0xFF, 2, 0)
+#define XOFFL 0x53 /* OFFSET_X[7:0] */
+#define XOFFL_SET(x) VAL_SET(x, 0xFF, 0, 0)
+#define YOFFL 0x54 /* OFFSET_Y[7:0] */
+#define YOFFL_SET(x) VAL_SET(x, 0xFF, 0, 0)
+#define VHYX 0x55 /* Offset and size completion */
+#define VHYX_VSIZE_SET(x) VAL_SET(x, 0x1, (8+2), 7)
+#define VHYX_HSIZE_SET(x) VAL_SET(x, 0x1, (8+2), 3)
+#define VHYX_YOFF_SET(x) VAL_SET(x, 0x3, 8, 4)
+#define VHYX_XOFF_SET(x) VAL_SET(x, 0x3, 8, 0)
+#define DPRP 0x56
+#define TEST 0x57 /* Horizontal size completion */
+#define TEST_HSIZE_SET(x) VAL_SET(x, 0x1, (9+2), 7)
+#define ZMOW 0x5A /* Zoom: Out Width OUTW[7:0] (real/4) */
+#define ZMOW_OUTW_SET(x) VAL_SET(x, 0xFF, 2, 0)
+#define ZMOH 0x5B /* Zoom: Out Height OUTH[7:0] (real/4) */
+#define ZMOH_OUTH_SET(x) VAL_SET(x, 0xFF, 2, 0)
+#define ZMHH 0x5C /* Zoom: Speed and H&W completion */
+#define ZMHH_ZSPEED_SET(x) VAL_SET(x, 0x0F, 0, 4)
+#define ZMHH_OUTH_SET(x) VAL_SET(x, 0x1, (8+2), 2)
+#define ZMHH_OUTW_SET(x) VAL_SET(x, 0x3, (8+2), 0)
+#define BPADDR 0x7C /* SDE Indirect Register Access: Address */
+#define BPDATA 0x7D /* SDE Indirect Register Access: Data */
+#define CTRL2 0x86 /* DSP Module enable 2 */
+#define CTRL2_DCW_EN 0x20
+#define CTRL2_SDE_EN 0x10
+#define CTRL2_UV_ADJ_EN 0x08
+#define CTRL2_UV_AVG_EN 0x04
+#define CTRL2_CMX_EN 0x01
+#define CTRL3 0x87 /* DSP Module enable 3 */
+#define CTRL3_BPC_EN 0x80
+#define CTRL3_WPC_EN 0x40
+#define SIZEL 0x8C /* Image Size Completion */
+#define SIZEL_HSIZE8_11_SET(x) VAL_SET(x, 0x1, 11, 6)
+#define SIZEL_HSIZE8_SET(x) VAL_SET(x, 0x7, 0, 3)
+#define SIZEL_VSIZE8_SET(x) VAL_SET(x, 0x7, 0, 0)
+#define HSIZE8 0xC0 /* Image Horizontal Size HSIZE[10:3] */
+#define HSIZE8_SET(x) VAL_SET(x, 0xFF, 3, 0)
+#define VSIZE8 0xC1 /* Image Vertical Size VSIZE[10:3] */
+#define VSIZE8_SET(x) VAL_SET(x, 0xFF, 3, 0)
+#define CTRL0 0xC2 /* DSP Module enable 0 */
+#define CTRL0_AEC_EN 0x80
+#define CTRL0_AEC_SEL 0x40
+#define CTRL0_STAT_SEL 0x20
+#define CTRL0_VFIRST 0x10
+#define CTRL0_YUV422 0x08
+#define CTRL0_YUV_EN 0x04
+#define CTRL0_RGB_EN 0x02
+#define CTRL0_RAW_EN 0x01
+#define CTRL1 0xC3 /* DSP Module enable 1 */
+#define CTRL1_CIP 0x80
+#define CTRL1_DMY 0x40
+#define CTRL1_RAW_GMA 0x20
+#define CTRL1_DG 0x10
+#define CTRL1_AWB 0x08
+#define CTRL1_AWB_GAIN 0x04
+#define CTRL1_LENC 0x02
+#define CTRL1_PRE 0x01
+/* REG 0xC7 (unknown name): affects Auto White Balance (AWB)
+ * AWB_OFF 0x40
+ * AWB_SIMPLE 0x10
+ * AWB_ON 0x00 (Advanced AWB ?) */
+#define R_DVP_SP 0xD3 /* DVP output speed control */
+#define R_DVP_SP_AUTO_MODE 0x80
+#define R_DVP_SP_DVP_MASK 0x3F /* DVP PCLK = sysclk (48)/[6:0] (YUV0);
+ * = sysclk (48)/(2*[6:0]) (RAW);*/
+#define IMAGE_MODE 0xDA /* Image Output Format Select */
+#define IMAGE_MODE_Y8_DVP_EN 0x40
+#define IMAGE_MODE_JPEG_EN 0x10
+#define IMAGE_MODE_YUV422 0x00
+#define IMAGE_MODE_RAW10 0x04 /* (DVP) */
+#define IMAGE_MODE_RGB565 0x08
+#define IMAGE_MODE_HREF_VSYNC 0x02 /* HREF timing select in DVP JPEG output
+ * mode (0 for HREF is same as sensor) */
+#define IMAGE_MODE_LBYTE_FIRST 0x01 /* Byte swap enable for DVP
+ * 1: Low byte first UYVY (C2[4] =0)
+ * VYUY (C2[4] =1)
+ * 0: High byte first YUYV (C2[4]=0)
+ * YVYU (C2[4] = 1) */
+#define RESET 0xE0 /* Reset */
+#define RESET_MICROC 0x40
+#define RESET_SCCB 0x20
+#define RESET_JPEG 0x10
+#define RESET_DVP 0x04
+#define RESET_IPU 0x02
+#define RESET_CIF 0x01
+#define REGED 0xED /* Register ED */
+#define REGED_CLK_OUT_DIS 0x10
+#define MS_SP 0xF0 /* SCCB Master Speed */
+#define SS_ID 0xF7 /* SCCB Slave ID */
+#define SS_CTRL 0xF8 /* SCCB Slave Control */
+#define SS_CTRL_ADD_AUTO_INC 0x20
+#define SS_CTRL_EN 0x08
+#define SS_CTRL_DELAY_CLK 0x04
+#define SS_CTRL_ACC_EN 0x02
+#define SS_CTRL_SEN_PASS_THR 0x01
+#define MC_BIST 0xF9 /* Microcontroller misc register */
+#define MC_BIST_RESET 0x80 /* Microcontroller Reset */
+#define MC_BIST_BOOT_ROM_SEL 0x40
+#define MC_BIST_12KB_SEL 0x20
+#define MC_BIST_12KB_MASK 0x30
+#define MC_BIST_512KB_SEL 0x08
+#define MC_BIST_512KB_MASK 0x0C
+#define MC_BIST_BUSY_BIT_R 0x02
+#define MC_BIST_MC_RES_ONE_SH_W 0x02
+#define MC_BIST_LAUNCH 0x01
+#define BANK_SEL 0xFF /* Register Bank Select */
+#define BANK_SEL_DSP 0x00
+#define BANK_SEL_SENS 0x01
+
+/*
+ * Sensor registers
+ * register offset for BANK_SEL == BANK_SEL_SENS
+ */
+#define GAIN 0x00 /* AGC - Gain control gain setting */
+#define COM1 0x03 /* Common control 1 */
+#define COM1_1_DUMMY_FR 0x40
+#define COM1_3_DUMMY_FR 0x80
+#define COM1_7_DUMMY_FR 0xC0
+#define COM1_VWIN_LSB_UXGA 0x0F
+#define COM1_VWIN_LSB_SVGA 0x0A
+#define COM1_VWIN_LSB_CIF 0x06
+#define REG04 0x04 /* Register 04 */
+#define REG04_DEF 0x20 /* Always set */
+#define REG04_HFLIP_IMG 0x80 /* Horizontal mirror image ON/OFF */
+#define REG04_VFLIP_IMG 0x40 /* Vertical flip image ON/OFF */
+#define REG04_VREF_EN 0x10
+#define REG04_HREF_EN 0x08
+#define REG04_AEC_SET(x) VAL_SET(x, 0x3, 0, 0)
+#define REG08 0x08 /* Frame Exposure One-pin Control Pre-charge Row Num */
+#define COM2 0x09 /* Common control 2 */
+#define COM2_SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */
+ /* Output drive capability */
+#define COM2_OCAP_Nx_SET(N) (((N) - 1) & 0x03) /* N = [1x .. 4x] */
+#define PID 0x0A /* Product ID Number MSB */
+#define VER 0x0B /* Product ID Number LSB */
+#define COM3 0x0C /* Common control 3 */
+#define COM3_BAND_50H 0x04 /* 0 For Banding at 60H */
+#define COM3_BAND_AUTO 0x02 /* Auto Banding */
+#define COM3_SING_FR_SNAPSH 0x01 /* 0 For enable live video output after the
+ * snapshot sequence*/
+#define AEC 0x10 /* AEC[9:2] Exposure Value */
+#define CLKRC 0x11 /* Internal clock */
+#define CLKRC_EN 0x80
+#define CLKRC_DIV_SET(x) (((x) - 1) & 0x1F) /* CLK = XVCLK/(x) */
+#define COM7 0x12 /* Common control 7 */
+#define COM7_SRST 0x80 /* Initiates system reset. All registers are
+ * set to factory default values after which
+ * the chip resumes normal operation */
+#define COM7_RES_UXGA 0x00 /* Resolution selectors for UXGA */
+#define COM7_RES_SVGA 0x40 /* SVGA */
+#define COM7_RES_CIF 0x20 /* CIF */
+#define COM7_ZOOM_EN 0x04 /* Enable Zoom mode */
+#define COM7_COLOR_BAR_TEST 0x02 /* Enable Color Bar Test Pattern */
+#define COM8 0x13 /* Common control 8 */
+#define COM8_DEF 0xC0
+#define COM8_BNDF_EN 0x20 /* Banding filter ON/OFF */
+#define COM8_AGC_EN 0x04 /* AGC Auto/Manual control selection */
+#define COM8_AEC_EN 0x01 /* Auto/Manual Exposure control */
+#define COM9 0x14 /* Common control 9
+ * Automatic gain ceiling - maximum AGC value [7:5]*/
+#define COM9_AGC_GAIN_2x 0x00 /* 000 : 2x */
+#define COM9_AGC_GAIN_4x 0x20 /* 001 : 4x */
+#define COM9_AGC_GAIN_8x 0x40 /* 010 : 8x */
+#define COM9_AGC_GAIN_16x 0x60 /* 011 : 16x */
+#define COM9_AGC_GAIN_32x 0x80 /* 100 : 32x */
+#define COM9_AGC_GAIN_64x 0xA0 /* 101 : 64x */
+#define COM9_AGC_GAIN_128x 0xC0 /* 110 : 128x */
+#define COM10 0x15 /* Common control 10 */
+#define COM10_PCLK_HREF 0x20 /* PCLK output qualified by HREF */
+#define COM10_PCLK_RISE 0x10 /* Data is updated at the rising edge of
+ * PCLK (user can latch data at the next
+ * falling edge of PCLK).
+ * 0 otherwise. */
+#define COM10_HREF_INV 0x08 /* Invert HREF polarity:
+ * HREF negative for valid data*/
+#define COM10_VSINC_INV 0x02 /* Invert VSYNC polarity */
+#define HSTART 0x17 /* Horizontal Window start MSB 8 bit */
+#define HEND 0x18 /* Horizontal Window end MSB 8 bit */
+#define VSTART 0x19 /* Vertical Window start MSB 8 bit */
+#define VEND 0x1A /* Vertical Window end MSB 8 bit */
+#define MIDH 0x1C /* Manufacturer ID byte - high */
+#define MIDL 0x1D /* Manufacturer ID byte - low */
+#define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */
+#define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */
+#define VV 0x26 /* AGC/AEC Fast mode operating region */
+#define VV_HIGH_TH_SET(x) VAL_SET(x, 0xF, 0, 4)
+#define VV_LOW_TH_SET(x) VAL_SET(x, 0xF, 0, 0)
+#define REG2A 0x2A /* Dummy pixel insert MSB */
+#define FRARL 0x2B /* Dummy pixel insert LSB */
+#define ADDVFL 0x2D /* LSB of insert dummy lines in Vertical direction */
+#define ADDVFH 0x2E /* MSB of insert dummy lines in Vertical direction */
+#define YAVG 0x2F /* Y/G Channel Average value */
+#define REG32 0x32 /* Common Control 32 */
+#define REG32_PCLK_DIV_2 0x80 /* PCLK freq divided by 2 */
+#define REG32_PCLK_DIV_4 0xC0 /* PCLK freq divided by 4 */
+#define ARCOM2 0x34 /* Zoom: Horizontal start point */
+#define REG45 0x45 /* Register 45 */
+#define FLL 0x46 /* Frame Length Adjustment LSBs */
+#define FLH 0x47 /* Frame Length Adjustment MSBs */
+#define COM19 0x48 /* Zoom: Vertical start point */
+#define ZOOMS 0x49 /* Zoom: Vertical start point */
+#define COM22 0x4B /* Flash light control */
+#define COM25 0x4E /* For Banding operations */
+#define COM25_50HZ_BANDING_AEC_MSBS_MASK 0xC0 /* 50Hz Bd. AEC 2 MSBs */
+#define COM25_60HZ_BANDING_AEC_MSBS_MASK 0x30 /* 60Hz Bd. AEC 2 MSBs */
+#define COM25_50HZ_BANDING_AEC_MSBS_SET(x) VAL_SET(x, 0x3, 8, 6)
+#define COM25_60HZ_BANDING_AEC_MSBS_SET(x) VAL_SET(x, 0x3, 8, 4)
+#define BD50 0x4F /* 50Hz Banding AEC 8 LSBs */
+#define BD50_50HZ_BANDING_AEC_LSBS_SET(x) VAL_SET(x, 0xFF, 0, 0)
+#define BD60 0x50 /* 60Hz Banding AEC 8 LSBs */
+#define BD60_60HZ_BANDING_AEC_LSBS_SET(x) VAL_SET(x, 0xFF, 0, 0)
+#define REG5A 0x5A /* 50/60Hz Banding Maximum AEC Step */
+#define BD50_MAX_AEC_STEP_MASK 0xF0 /* 50Hz Banding Max. AEC Step */
+#define BD60_MAX_AEC_STEP_MASK 0x0F /* 60Hz Banding Max. AEC Step */
+#define BD50_MAX_AEC_STEP_SET(x) VAL_SET((x - 1), 0x0F, 0, 4)
+#define BD60_MAX_AEC_STEP_SET(x) VAL_SET((x - 1), 0x0F, 0, 0)
+#define REG5D 0x5D /* AVGsel[7:0], 16-zone average weight option */
+#define REG5E 0x5E /* AVGsel[15:8], 16-zone average weight option */
+#define REG5F 0x5F /* AVGsel[23:16], 16-zone average weight option */
+#define REG60 0x60 /* AVGsel[31:24], 16-zone average weight option */
+#define HISTO_LOW 0x61 /* Histogram Algorithm Low Level */
+#define HISTO_HIGH 0x62 /* Histogram Algorithm High Level */
+
+/*
+ * ID
+ */
+#define MANUFACTURER_ID 0x7FA2
+#define PID_OV2640 0x2642
+#define VERSION(pid, ver) ((pid << 8) | (ver & 0xFF))
+
+/*
+ * Struct
+ */
+struct regval_list {
+ u8 reg_num;
+ u8 value;
+};
+
+struct ov2640_win_size {
+ char *name;
+ u32 width;
+ u32 height;
+ const struct regval_list *regs;
+};
+
+
+struct ov2640_priv {
+ struct v4l2_subdev subdev;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
+ struct v4l2_ctrl_handler hdl;
+ u32 cfmt_code;
+ struct clk *clk;
+ const struct ov2640_win_size *win;
+
+ struct gpio_desc *resetb_gpio;
+ struct gpio_desc *pwdn_gpio;
+};
+
+/*
+ * Registers settings
+ */
+
+#define ENDMARKER { 0xff, 0xff }
+
+static const struct regval_list ov2640_init_regs[] = {
+ { BANK_SEL, BANK_SEL_DSP },
+ { 0x2c, 0xff },
+ { 0x2e, 0xdf },
+ { BANK_SEL, BANK_SEL_SENS },
+ { 0x3c, 0x32 },
+ { CLKRC, CLKRC_DIV_SET(1) },
+ { COM2, COM2_OCAP_Nx_SET(3) },
+ { REG04, REG04_DEF | REG04_HREF_EN },
+ { COM8, COM8_DEF | COM8_BNDF_EN | COM8_AGC_EN | COM8_AEC_EN },
+ { COM9, COM9_AGC_GAIN_8x | 0x08},
+ { 0x2c, 0x0c },
+ { 0x33, 0x78 },
+ { 0x3a, 0x33 },
+ { 0x3b, 0xfb },
+ { 0x3e, 0x00 },
+ { 0x43, 0x11 },
+ { 0x16, 0x10 },
+ { 0x39, 0x02 },
+ { 0x35, 0x88 },
+ { 0x22, 0x0a },
+ { 0x37, 0x40 },
+ { 0x23, 0x00 },
+ { ARCOM2, 0xa0 },
+ { 0x06, 0x02 },
+ { 0x06, 0x88 },
+ { 0x07, 0xc0 },
+ { 0x0d, 0xb7 },
+ { 0x0e, 0x01 },
+ { 0x4c, 0x00 },
+ { 0x4a, 0x81 },
+ { 0x21, 0x99 },
+ { AEW, 0x40 },
+ { AEB, 0x38 },
+ { VV, VV_HIGH_TH_SET(0x08) | VV_LOW_TH_SET(0x02) },
+ { 0x5c, 0x00 },
+ { 0x63, 0x00 },
+ { FLL, 0x22 },
+ { COM3, 0x38 | COM3_BAND_AUTO },
+ { REG5D, 0x55 },
+ { REG5E, 0x7d },
+ { REG5F, 0x7d },
+ { REG60, 0x55 },
+ { HISTO_LOW, 0x70 },
+ { HISTO_HIGH, 0x80 },
+ { 0x7c, 0x05 },
+ { 0x20, 0x80 },
+ { 0x28, 0x30 },
+ { 0x6c, 0x00 },
+ { 0x6d, 0x80 },
+ { 0x6e, 0x00 },
+ { 0x70, 0x02 },
+ { 0x71, 0x94 },
+ { 0x73, 0xc1 },
+ { 0x3d, 0x34 },
+ { COM7, COM7_RES_UXGA | COM7_ZOOM_EN },
+ { REG5A, BD50_MAX_AEC_STEP_SET(6)
+ | BD60_MAX_AEC_STEP_SET(8) }, /* 0x57 */
+ { COM25, COM25_50HZ_BANDING_AEC_MSBS_SET(0x0bb)
+ | COM25_60HZ_BANDING_AEC_MSBS_SET(0x09c) }, /* 0x00 */
+ { BD50, BD50_50HZ_BANDING_AEC_LSBS_SET(0x0bb) }, /* 0xbb */
+ { BD60, BD60_60HZ_BANDING_AEC_LSBS_SET(0x09c) }, /* 0x9c */
+ { BANK_SEL, BANK_SEL_DSP },
+ { 0xe5, 0x7f },
+ { MC_BIST, MC_BIST_RESET | MC_BIST_BOOT_ROM_SEL },
+ { 0x41, 0x24 },
+ { RESET, RESET_JPEG | RESET_DVP },
+ { 0x76, 0xff },
+ { 0x33, 0xa0 },
+ { 0x42, 0x20 },
+ { 0x43, 0x18 },
+ { 0x4c, 0x00 },
+ { CTRL3, CTRL3_BPC_EN | CTRL3_WPC_EN | 0x10 },
+ { 0x88, 0x3f },
+ { 0xd7, 0x03 },
+ { 0xd9, 0x10 },
+ { R_DVP_SP, R_DVP_SP_AUTO_MODE | 0x2 },
+ { 0xc8, 0x08 },
+ { 0xc9, 0x80 },
+ { BPADDR, 0x00 },
+ { BPDATA, 0x00 },
+ { BPADDR, 0x03 },
+ { BPDATA, 0x48 },
+ { BPDATA, 0x48 },
+ { BPADDR, 0x08 },
+ { BPDATA, 0x20 },
+ { BPDATA, 0x10 },
+ { BPDATA, 0x0e },
+ { 0x90, 0x00 },
+ { 0x91, 0x0e },
+ { 0x91, 0x1a },
+ { 0x91, 0x31 },
+ { 0x91, 0x5a },
+ { 0x91, 0x69 },
+ { 0x91, 0x75 },
+ { 0x91, 0x7e },
+ { 0x91, 0x88 },
+ { 0x91, 0x8f },
+ { 0x91, 0x96 },
+ { 0x91, 0xa3 },
+ { 0x91, 0xaf },
+ { 0x91, 0xc4 },
+ { 0x91, 0xd7 },
+ { 0x91, 0xe8 },
+ { 0x91, 0x20 },
+ { 0x92, 0x00 },
+ { 0x93, 0x06 },
+ { 0x93, 0xe3 },
+ { 0x93, 0x03 },
+ { 0x93, 0x03 },
+ { 0x93, 0x00 },
+ { 0x93, 0x02 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x93, 0x00 },
+ { 0x96, 0x00 },
+ { 0x97, 0x08 },
+ { 0x97, 0x19 },
+ { 0x97, 0x02 },
+ { 0x97, 0x0c },
+ { 0x97, 0x24 },
+ { 0x97, 0x30 },
+ { 0x97, 0x28 },
+ { 0x97, 0x26 },
+ { 0x97, 0x02 },
+ { 0x97, 0x98 },
+ { 0x97, 0x80 },
+ { 0x97, 0x00 },
+ { 0x97, 0x00 },
+ { 0xa4, 0x00 },
+ { 0xa8, 0x00 },
+ { 0xc5, 0x11 },
+ { 0xc6, 0x51 },
+ { 0xbf, 0x80 },
+ { 0xc7, 0x10 }, /* simple AWB */
+ { 0xb6, 0x66 },
+ { 0xb8, 0xA5 },
+ { 0xb7, 0x64 },
+ { 0xb9, 0x7C },
+ { 0xb3, 0xaf },
+ { 0xb4, 0x97 },
+ { 0xb5, 0xFF },
+ { 0xb0, 0xC5 },
+ { 0xb1, 0x94 },
+ { 0xb2, 0x0f },
+ { 0xc4, 0x5c },
+ { 0xa6, 0x00 },
+ { 0xa7, 0x20 },
+ { 0xa7, 0xd8 },
+ { 0xa7, 0x1b },
+ { 0xa7, 0x31 },
+ { 0xa7, 0x00 },
+ { 0xa7, 0x18 },
+ { 0xa7, 0x20 },
+ { 0xa7, 0xd8 },
+ { 0xa7, 0x19 },
+ { 0xa7, 0x31 },
+ { 0xa7, 0x00 },
+ { 0xa7, 0x18 },
+ { 0xa7, 0x20 },
+ { 0xa7, 0xd8 },
+ { 0xa7, 0x19 },
+ { 0xa7, 0x31 },
+ { 0xa7, 0x00 },
+ { 0xa7, 0x18 },
+ { 0x7f, 0x00 },
+ { 0xe5, 0x1f },
+ { 0xe1, 0x77 },
+ { 0xdd, 0x7f },
+ { CTRL0, CTRL0_YUV422 | CTRL0_YUV_EN | CTRL0_RGB_EN },
+ ENDMARKER,
+};
+
+/*
+ * Register settings for window size
+ * The preamble, setup the internal DSP to input an UXGA (1600x1200) image.
+ * Then the different zooming configurations will setup the output image size.
+ */
+static const struct regval_list ov2640_size_change_preamble_regs[] = {
+ { BANK_SEL, BANK_SEL_DSP },
+ { RESET, RESET_DVP },
+ { SIZEL, SIZEL_HSIZE8_11_SET(UXGA_WIDTH) |
+ SIZEL_HSIZE8_SET(UXGA_WIDTH) |
+ SIZEL_VSIZE8_SET(UXGA_HEIGHT) },
+ { HSIZE8, HSIZE8_SET(UXGA_WIDTH) },
+ { VSIZE8, VSIZE8_SET(UXGA_HEIGHT) },
+ { CTRL2, CTRL2_DCW_EN | CTRL2_SDE_EN |
+ CTRL2_UV_AVG_EN | CTRL2_CMX_EN | CTRL2_UV_ADJ_EN },
+ { HSIZE, HSIZE_SET(UXGA_WIDTH) },
+ { VSIZE, VSIZE_SET(UXGA_HEIGHT) },
+ { XOFFL, XOFFL_SET(0) },
+ { YOFFL, YOFFL_SET(0) },
+ { VHYX, VHYX_HSIZE_SET(UXGA_WIDTH) | VHYX_VSIZE_SET(UXGA_HEIGHT) |
+ VHYX_XOFF_SET(0) | VHYX_YOFF_SET(0)},
+ { TEST, TEST_HSIZE_SET(UXGA_WIDTH) },
+ ENDMARKER,
+};
+
+#define PER_SIZE_REG_SEQ(x, y, v_div, h_div, pclk_div) \
+ { CTRLI, CTRLI_LP_DP | CTRLI_V_DIV_SET(v_div) | \
+ CTRLI_H_DIV_SET(h_div)}, \
+ { ZMOW, ZMOW_OUTW_SET(x) }, \
+ { ZMOH, ZMOH_OUTH_SET(y) }, \
+ { ZMHH, ZMHH_OUTW_SET(x) | ZMHH_OUTH_SET(y) }, \
+ { R_DVP_SP, pclk_div }, \
+ { RESET, 0x00}
+
+static const struct regval_list ov2640_qcif_regs[] = {
+ PER_SIZE_REG_SEQ(QCIF_WIDTH, QCIF_HEIGHT, 3, 3, 4),
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_qvga_regs[] = {
+ PER_SIZE_REG_SEQ(QVGA_WIDTH, QVGA_HEIGHT, 2, 2, 4),
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_cif_regs[] = {
+ PER_SIZE_REG_SEQ(CIF_WIDTH, CIF_HEIGHT, 2, 2, 8),
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_vga_regs[] = {
+ PER_SIZE_REG_SEQ(VGA_WIDTH, VGA_HEIGHT, 0, 0, 2),
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_svga_regs[] = {
+ PER_SIZE_REG_SEQ(SVGA_WIDTH, SVGA_HEIGHT, 1, 1, 2),
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_xga_regs[] = {
+ PER_SIZE_REG_SEQ(XGA_WIDTH, XGA_HEIGHT, 0, 0, 2),
+ { CTRLI, 0x00},
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_sxga_regs[] = {
+ PER_SIZE_REG_SEQ(SXGA_WIDTH, SXGA_HEIGHT, 0, 0, 2),
+ { CTRLI, 0x00},
+ { R_DVP_SP, 2 | R_DVP_SP_AUTO_MODE },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_uxga_regs[] = {
+ PER_SIZE_REG_SEQ(UXGA_WIDTH, UXGA_HEIGHT, 0, 0, 0),
+ { CTRLI, 0x00},
+ { R_DVP_SP, 0 | R_DVP_SP_AUTO_MODE },
+ ENDMARKER,
+};
+
+#define OV2640_SIZE(n, w, h, r) \
+ {.name = n, .width = w , .height = h, .regs = r }
+
+static const struct ov2640_win_size ov2640_supported_win_sizes[] = {
+ OV2640_SIZE("QCIF", QCIF_WIDTH, QCIF_HEIGHT, ov2640_qcif_regs),
+ OV2640_SIZE("QVGA", QVGA_WIDTH, QVGA_HEIGHT, ov2640_qvga_regs),
+ OV2640_SIZE("CIF", CIF_WIDTH, CIF_HEIGHT, ov2640_cif_regs),
+ OV2640_SIZE("VGA", VGA_WIDTH, VGA_HEIGHT, ov2640_vga_regs),
+ OV2640_SIZE("SVGA", SVGA_WIDTH, SVGA_HEIGHT, ov2640_svga_regs),
+ OV2640_SIZE("XGA", XGA_WIDTH, XGA_HEIGHT, ov2640_xga_regs),
+ OV2640_SIZE("SXGA", SXGA_WIDTH, SXGA_HEIGHT, ov2640_sxga_regs),
+ OV2640_SIZE("UXGA", UXGA_WIDTH, UXGA_HEIGHT, ov2640_uxga_regs),
+};
+
+/*
+ * Register settings for pixel formats
+ */
+static const struct regval_list ov2640_format_change_preamble_regs[] = {
+ { BANK_SEL, BANK_SEL_DSP },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_yuyv_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_YUV422 },
+ { 0xd7, 0x03 },
+ { 0x33, 0xa0 },
+ { 0xe5, 0x1f },
+ { 0xe1, 0x67 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_uyvy_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_YUV422 },
+ { 0xd7, 0x01 },
+ { 0x33, 0xa0 },
+ { 0xe1, 0x67 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_rgb565_be_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_RGB565 },
+ { 0xd7, 0x03 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static const struct regval_list ov2640_rgb565_le_regs[] = {
+ { IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_RGB565 },
+ { 0xd7, 0x03 },
+ { RESET, 0x00 },
+ { R_BYPASS, R_BYPASS_USE_DSP },
+ ENDMARKER,
+};
+
+static u32 ov2640_codes[] = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ MEDIA_BUS_FMT_RGB565_2X8_BE,
+ MEDIA_BUS_FMT_RGB565_2X8_LE,
+};
+
+/*
+ * General functions
+ */
+static struct ov2640_priv *to_ov2640(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov2640_priv,
+ subdev);
+}
+
+static int ov2640_write_array(struct i2c_client *client,
+ const struct regval_list *vals)
+{
+ int ret;
+
+ while ((vals->reg_num != 0xff) || (vals->value != 0xff)) {
+ ret = i2c_smbus_write_byte_data(client,
+ vals->reg_num, vals->value);
+ dev_vdbg(&client->dev, "array: 0x%02x, 0x%02x",
+ vals->reg_num, vals->value);
+
+ if (ret < 0)
+ return ret;
+ vals++;
+ }
+ return 0;
+}
+
+static int ov2640_mask_set(struct i2c_client *client,
+ u8 reg, u8 mask, u8 set)
+{
+ s32 val = i2c_smbus_read_byte_data(client, reg);
+ if (val < 0)
+ return val;
+
+ val &= ~mask;
+ val |= set & mask;
+
+ dev_vdbg(&client->dev, "masks: 0x%02x, 0x%02x", reg, val);
+
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int ov2640_reset(struct i2c_client *client)
+{
+ int ret;
+ const struct regval_list reset_seq[] = {
+ {BANK_SEL, BANK_SEL_SENS},
+ {COM7, COM7_SRST},
+ ENDMARKER,
+ };
+
+ ret = ov2640_write_array(client, reset_seq);
+ if (ret)
+ goto err;
+
+ msleep(5);
+err:
+ dev_dbg(&client->dev, "%s: (ret %d)", __func__, ret);
+ return ret;
+}
+
+/*
+ * functions
+ */
+static int ov2640_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd =
+ &container_of(ctrl->handler, struct ov2640_priv, hdl)->subdev;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 val;
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, BANK_SEL, BANK_SEL_SENS);
+ if (ret < 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ val = ctrl->val ? REG04_VFLIP_IMG | REG04_VREF_EN : 0x00;
+ return ov2640_mask_set(client, REG04,
+ REG04_VFLIP_IMG | REG04_VREF_EN, val);
+ /* NOTE: REG04_VREF_EN: 1 line shift / even/odd line swap */
+ case V4L2_CID_HFLIP:
+ val = ctrl->val ? REG04_HFLIP_IMG : 0x00;
+ return ov2640_mask_set(client, REG04, REG04_HFLIP_IMG, val);
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov2640_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ reg->size = 1;
+ if (reg->reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_byte_data(client, reg->reg);
+ if (ret < 0)
+ return ret;
+
+ reg->val = ret;
+
+ return 0;
+}
+
+static int ov2640_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg > 0xff ||
+ reg->val > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(client, reg->reg, reg->val);
+}
+#endif
+
+static int ov2640_s_power(struct v4l2_subdev *sd, int on)
+{
+#ifdef CONFIG_GPIOLIB
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2640_priv *priv = to_ov2640(client);
+
+ if (priv->pwdn_gpio)
+ gpiod_direction_output(priv->pwdn_gpio, !on);
+ if (on && priv->resetb_gpio) {
+ /* Active the resetb pin to perform a reset pulse */
+ gpiod_direction_output(priv->resetb_gpio, 1);
+ usleep_range(3000, 5000);
+ gpiod_set_value(priv->resetb_gpio, 0);
+ }
+#endif
+ return 0;
+}
+
+/* Select the nearest higher resolution for capture */
+static const struct ov2640_win_size *ov2640_select_win(u32 width, u32 height)
+{
+ int i, default_size = ARRAY_SIZE(ov2640_supported_win_sizes) - 1;
+
+ for (i = 0; i < ARRAY_SIZE(ov2640_supported_win_sizes); i++) {
+ if (ov2640_supported_win_sizes[i].width >= width &&
+ ov2640_supported_win_sizes[i].height >= height)
+ return &ov2640_supported_win_sizes[i];
+ }
+
+ return &ov2640_supported_win_sizes[default_size];
+}
+
+static int ov2640_set_params(struct i2c_client *client,
+ const struct ov2640_win_size *win, u32 code)
+{
+ struct ov2640_priv *priv = to_ov2640(client);
+ const struct regval_list *selected_cfmt_regs;
+ u8 val;
+ int ret;
+
+ /* select win */
+ priv->win = win;
+
+ /* select format */
+ priv->cfmt_code = 0;
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_be_regs;
+ break;
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
+ selected_cfmt_regs = ov2640_rgb565_le_regs;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
+ selected_cfmt_regs = ov2640_yuyv_regs;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ default:
+ dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
+ selected_cfmt_regs = ov2640_uyvy_regs;
+ break;
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt YVYU", __func__);
+ selected_cfmt_regs = ov2640_yuyv_regs;
+ break;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ dev_dbg(&client->dev, "%s: Selected cfmt VYUY", __func__);
+ selected_cfmt_regs = ov2640_uyvy_regs;
+ break;
+ }
+
+ /* reset hardware */
+ ov2640_reset(client);
+
+ /* initialize the sensor with default data */
+ dev_dbg(&client->dev, "%s: Init default", __func__);
+ ret = ov2640_write_array(client, ov2640_init_regs);
+ if (ret < 0)
+ goto err;
+
+ /* select preamble */
+ dev_dbg(&client->dev, "%s: Set size to %s", __func__, priv->win->name);
+ ret = ov2640_write_array(client, ov2640_size_change_preamble_regs);
+ if (ret < 0)
+ goto err;
+
+ /* set size win */
+ ret = ov2640_write_array(client, priv->win->regs);
+ if (ret < 0)
+ goto err;
+
+ /* cfmt preamble */
+ dev_dbg(&client->dev, "%s: Set cfmt", __func__);
+ ret = ov2640_write_array(client, ov2640_format_change_preamble_regs);
+ if (ret < 0)
+ goto err;
+
+ /* set cfmt */
+ ret = ov2640_write_array(client, selected_cfmt_regs);
+ if (ret < 0)
+ goto err;
+ val = (code == MEDIA_BUS_FMT_YVYU8_2X8)
+ || (code == MEDIA_BUS_FMT_VYUY8_2X8) ? CTRL0_VFIRST : 0x00;
+ ret = ov2640_mask_set(client, CTRL0, CTRL0_VFIRST, val);
+ if (ret < 0)
+ goto err;
+
+ priv->cfmt_code = code;
+
+ return 0;
+
+err:
+ dev_err(&client->dev, "%s: Error %d", __func__, ret);
+ ov2640_reset(client);
+ priv->win = NULL;
+
+ return ret;
+}
+
+static int ov2640_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2640_priv *priv = to_ov2640(client);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!priv->win) {
+ priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
+ priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ }
+
+ mf->width = priv->win->width;
+ mf->height = priv->win->height;
+ mf->code = priv->cfmt_code;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int ov2640_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct ov2640_win_size *win;
+
+ if (format->pad)
+ return -EINVAL;
+
+ /* select suitable win */
+ win = ov2640_select_win(mf->width, mf->height);
+ mf->width = win->width;
+ mf->height = win->height;
+
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ switch (mf->code) {
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ break;
+ default:
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ break;
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return ov2640_set_params(client, win, mf->code);
+ cfg->try_fmt = *mf;
+ return 0;
+}
+
+static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index >= ARRAY_SIZE(ov2640_codes))
+ return -EINVAL;
+
+ code->code = ov2640_codes[code->index];
+ return 0;
+}
+
+static int ov2640_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = UXGA_WIDTH;
+ sel->r.height = UXGA_HEIGHT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ov2640_video_probe(struct i2c_client *client)
+{
+ struct ov2640_priv *priv = to_ov2640(client);
+ u8 pid, ver, midh, midl;
+ const char *devname;
+ int ret;
+
+ ret = ov2640_s_power(&priv->subdev, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * check and show product ID and manufacturer ID
+ */
+ i2c_smbus_write_byte_data(client, BANK_SEL, BANK_SEL_SENS);
+ pid = i2c_smbus_read_byte_data(client, PID);
+ ver = i2c_smbus_read_byte_data(client, VER);
+ midh = i2c_smbus_read_byte_data(client, MIDH);
+ midl = i2c_smbus_read_byte_data(client, MIDL);
+
+ switch (VERSION(pid, ver)) {
+ case PID_OV2640:
+ devname = "ov2640";
+ break;
+ default:
+ dev_err(&client->dev,
+ "Product ID error %x:%x\n", pid, ver);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ dev_info(&client->dev,
+ "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
+ devname, pid, ver, midh, midl);
+
+ ret = v4l2_ctrl_handler_setup(&priv->hdl);
+
+done:
+ ov2640_s_power(&priv->subdev, 0);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov2640_ctrl_ops = {
+ .s_ctrl = ov2640_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov2640_g_register,
+ .s_register = ov2640_s_register,
+#endif
+ .s_power = ov2640_s_power,
+};
+
+static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
+ .enum_mbus_code = ov2640_enum_mbus_code,
+ .get_selection = ov2640_get_selection,
+ .get_fmt = ov2640_get_fmt,
+ .set_fmt = ov2640_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2640_subdev_ops = {
+ .core = &ov2640_subdev_core_ops,
+ .pad = &ov2640_subdev_pad_ops,
+};
+
+static int ov2640_probe_dt(struct i2c_client *client,
+ struct ov2640_priv *priv)
+{
+ int ret;
+
+ /* Request the reset GPIO deasserted */
+ priv->resetb_gpio = devm_gpiod_get_optional(&client->dev, "resetb",
+ GPIOD_OUT_LOW);
+
+ if (!priv->resetb_gpio)
+ dev_dbg(&client->dev, "resetb gpio is not assigned!\n");
+
+ ret = PTR_ERR_OR_ZERO(priv->resetb_gpio);
+ if (ret && ret != -ENOSYS) {
+ dev_dbg(&client->dev,
+ "Error %d while getting resetb gpio\n", ret);
+ return ret;
+ }
+
+ /* Request the power down GPIO asserted */
+ priv->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "pwdn",
+ GPIOD_OUT_HIGH);
+
+ if (!priv->pwdn_gpio)
+ dev_dbg(&client->dev, "pwdn gpio is not assigned!\n");
+
+ ret = PTR_ERR_OR_ZERO(priv->pwdn_gpio);
+ if (ret && ret != -ENOSYS) {
+ dev_dbg(&client->dev,
+ "Error %d while getting pwdn gpio\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * i2c_driver functions
+ */
+static int ov2640_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ov2640_priv *priv;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&adapter->dev,
+ "OV2640: I2C-Adapter doesn't support SMBUS\n");
+ return -EIO;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&adapter->dev,
+ "Failed to allocate memory for private data!\n");
+ return -ENOMEM;
+ }
+
+ if (client->dev.of_node) {
+ priv->clk = devm_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(priv->clk))
+ return -EPROBE_DEFER;
+ clk_prepare_enable(priv->clk);
+ }
+
+ ret = ov2640_probe_dt(client, priv);
+ if (ret)
+ goto err_clk;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
+ priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_ctrl_handler_init(&priv->hdl, 2);
+ v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ priv->subdev.ctrl_handler = &priv->hdl;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto err_hdl;
+ }
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ priv->pad.flags = MEDIA_PAD_FL_SOURCE;
+ priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
+ if (ret < 0)
+ goto err_hdl;
+#endif
+
+ ret = ov2640_video_probe(client);
+ if (ret < 0)
+ goto err_videoprobe;
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret < 0)
+ goto err_videoprobe;
+
+ dev_info(&adapter->dev, "OV2640 Probed\n");
+
+ return 0;
+
+err_videoprobe:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&priv->subdev.entity);
+#endif
+err_hdl:
+ v4l2_ctrl_handler_free(&priv->hdl);
+err_clk:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int ov2640_remove(struct i2c_client *client)
+{
+ struct ov2640_priv *priv = to_ov2640(client);
+
+ v4l2_async_unregister_subdev(&priv->subdev);
+ v4l2_ctrl_handler_free(&priv->hdl);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&priv->subdev.entity);
+#endif
+ v4l2_device_unregister_subdev(&priv->subdev);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static const struct i2c_device_id ov2640_id[] = {
+ { "ov2640", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov2640_id);
+
+static const struct of_device_id ov2640_of_match[] = {
+ {.compatible = "ovti,ov2640", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ov2640_of_match);
+
+static struct i2c_driver ov2640_i2c_driver = {
+ .driver = {
+ .name = "ov2640",
+ .of_match_table = of_match_ptr(ov2640_of_match),
+ },
+ .probe = ov2640_probe,
+ .remove = ov2640_remove,
+ .id_table = ov2640_id,
+};
+
+module_i2c_driver(ov2640_i2c_driver);
+
+MODULE_DESCRIPTION("Driver for Omni Vision 2640 sensor");
+MODULE_AUTHOR("Alberto Panizzo");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
new file mode 100644
index 000000000000..57bd591ea54b
--- /dev/null
+++ b/drivers/media/i2c/ov5645.c
@@ -0,0 +1,1345 @@
+/*
+ * Driver for the OV5645 camera sensor.
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 By Tech Design S.L. All Rights Reserved.
+ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Based on:
+ * - the OV5645 driver from QC msm-3.10 kernel on codeaurora.org:
+ * https://us.codeaurora.org/cgit/quic/la/kernel/msm-3.10/tree/drivers/
+ * media/platform/msm/camera_v2/sensor/ov5645.c?h=LA.BR.1.2.4_rb1.41
+ * - the OV5640 driver posted on linux-media:
+ * https://www.mail-archive.com/linux-media%40vger.kernel.org/msg92671.html
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-subdev.h>
+
+#define OV5645_VOLTAGE_ANALOG 2800000
+#define OV5645_VOLTAGE_DIGITAL_CORE 1500000
+#define OV5645_VOLTAGE_DIGITAL_IO 1800000
+
+#define OV5645_SYSTEM_CTRL0 0x3008
+#define OV5645_SYSTEM_CTRL0_START 0x02
+#define OV5645_SYSTEM_CTRL0_STOP 0x42
+#define OV5645_CHIP_ID_HIGH 0x300a
+#define OV5645_CHIP_ID_HIGH_BYTE 0x56
+#define OV5645_CHIP_ID_LOW 0x300b
+#define OV5645_CHIP_ID_LOW_BYTE 0x45
+#define OV5645_AWB_MANUAL_CONTROL 0x3406
+#define OV5645_AWB_MANUAL_ENABLE BIT(0)
+#define OV5645_AEC_PK_MANUAL 0x3503
+#define OV5645_AEC_MANUAL_ENABLE BIT(0)
+#define OV5645_AGC_MANUAL_ENABLE BIT(1)
+#define OV5645_TIMING_TC_REG20 0x3820
+#define OV5645_SENSOR_VFLIP BIT(1)
+#define OV5645_ISP_VFLIP BIT(2)
+#define OV5645_TIMING_TC_REG21 0x3821
+#define OV5645_SENSOR_MIRROR BIT(1)
+#define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
+#define OV5645_TEST_PATTERN_MASK 0x3
+#define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
+#define OV5645_TEST_PATTERN_ENABLE BIT(7)
+#define OV5645_SDE_SAT_U 0x5583
+#define OV5645_SDE_SAT_V 0x5584
+
+struct reg_value {
+ u16 reg;
+ u8 val;
+};
+
+struct ov5645_mode_info {
+ u32 width;
+ u32 height;
+ const struct reg_value *data;
+ u32 data_size;
+};
+
+struct ov5645 {
+ struct i2c_client *i2c_client;
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_of_endpoint ep;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct clk *xclk;
+
+ struct regulator *io_regulator;
+ struct regulator *core_regulator;
+ struct regulator *analog_regulator;
+
+ const struct ov5645_mode_info *current_mode;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ /* Cached register values */
+ u8 aec_pk_manual;
+ u8 timing_tc_reg20;
+ u8 timing_tc_reg21;
+
+ struct mutex power_lock; /* lock to protect power state */
+ int power_count;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *rst_gpio;
+};
+
+static inline struct ov5645 *to_ov5645(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov5645, sd);
+}
+
+static const struct reg_value ov5645_global_init_setting[] = {
+ { 0x3103, 0x11 },
+ { 0x3008, 0x82 },
+ { 0x3008, 0x42 },
+ { 0x3103, 0x03 },
+ { 0x3503, 0x07 },
+ { 0x3002, 0x1c },
+ { 0x3006, 0xc3 },
+ { 0x300e, 0x45 },
+ { 0x3017, 0x00 },
+ { 0x3018, 0x00 },
+ { 0x302e, 0x0b },
+ { 0x3037, 0x13 },
+ { 0x3108, 0x01 },
+ { 0x3611, 0x06 },
+ { 0x3500, 0x00 },
+ { 0x3501, 0x01 },
+ { 0x3502, 0x00 },
+ { 0x350a, 0x00 },
+ { 0x350b, 0x3f },
+ { 0x3620, 0x33 },
+ { 0x3621, 0xe0 },
+ { 0x3622, 0x01 },
+ { 0x3630, 0x2e },
+ { 0x3631, 0x00 },
+ { 0x3632, 0x32 },
+ { 0x3633, 0x52 },
+ { 0x3634, 0x70 },
+ { 0x3635, 0x13 },
+ { 0x3636, 0x03 },
+ { 0x3703, 0x5a },
+ { 0x3704, 0xa0 },
+ { 0x3705, 0x1a },
+ { 0x3709, 0x12 },
+ { 0x370b, 0x61 },
+ { 0x370f, 0x10 },
+ { 0x3715, 0x78 },
+ { 0x3717, 0x01 },
+ { 0x371b, 0x20 },
+ { 0x3731, 0x12 },
+ { 0x3901, 0x0a },
+ { 0x3905, 0x02 },
+ { 0x3906, 0x10 },
+ { 0x3719, 0x86 },
+ { 0x3810, 0x00 },
+ { 0x3811, 0x10 },
+ { 0x3812, 0x00 },
+ { 0x3821, 0x01 },
+ { 0x3824, 0x01 },
+ { 0x3826, 0x03 },
+ { 0x3828, 0x08 },
+ { 0x3a19, 0xf8 },
+ { 0x3c01, 0x34 },
+ { 0x3c04, 0x28 },
+ { 0x3c05, 0x98 },
+ { 0x3c07, 0x07 },
+ { 0x3c09, 0xc2 },
+ { 0x3c0a, 0x9c },
+ { 0x3c0b, 0x40 },
+ { 0x3c01, 0x34 },
+ { 0x4001, 0x02 },
+ { 0x4514, 0x00 },
+ { 0x4520, 0xb0 },
+ { 0x460b, 0x37 },
+ { 0x460c, 0x20 },
+ { 0x4818, 0x01 },
+ { 0x481d, 0xf0 },
+ { 0x481f, 0x50 },
+ { 0x4823, 0x70 },
+ { 0x4831, 0x14 },
+ { 0x5000, 0xa7 },
+ { 0x5001, 0x83 },
+ { 0x501d, 0x00 },
+ { 0x501f, 0x00 },
+ { 0x503d, 0x00 },
+ { 0x505c, 0x30 },
+ { 0x5181, 0x59 },
+ { 0x5183, 0x00 },
+ { 0x5191, 0xf0 },
+ { 0x5192, 0x03 },
+ { 0x5684, 0x10 },
+ { 0x5685, 0xa0 },
+ { 0x5686, 0x0c },
+ { 0x5687, 0x78 },
+ { 0x5a00, 0x08 },
+ { 0x5a21, 0x00 },
+ { 0x5a24, 0x00 },
+ { 0x3008, 0x02 },
+ { 0x3503, 0x00 },
+ { 0x5180, 0xff },
+ { 0x5181, 0xf2 },
+ { 0x5182, 0x00 },
+ { 0x5183, 0x14 },
+ { 0x5184, 0x25 },
+ { 0x5185, 0x24 },
+ { 0x5186, 0x09 },
+ { 0x5187, 0x09 },
+ { 0x5188, 0x0a },
+ { 0x5189, 0x75 },
+ { 0x518a, 0x52 },
+ { 0x518b, 0xea },
+ { 0x518c, 0xa8 },
+ { 0x518d, 0x42 },
+ { 0x518e, 0x38 },
+ { 0x518f, 0x56 },
+ { 0x5190, 0x42 },
+ { 0x5191, 0xf8 },
+ { 0x5192, 0x04 },
+ { 0x5193, 0x70 },
+ { 0x5194, 0xf0 },
+ { 0x5195, 0xf0 },
+ { 0x5196, 0x03 },
+ { 0x5197, 0x01 },
+ { 0x5198, 0x04 },
+ { 0x5199, 0x12 },
+ { 0x519a, 0x04 },
+ { 0x519b, 0x00 },
+ { 0x519c, 0x06 },
+ { 0x519d, 0x82 },
+ { 0x519e, 0x38 },
+ { 0x5381, 0x1e },
+ { 0x5382, 0x5b },
+ { 0x5383, 0x08 },
+ { 0x5384, 0x0a },
+ { 0x5385, 0x7e },
+ { 0x5386, 0x88 },
+ { 0x5387, 0x7c },
+ { 0x5388, 0x6c },
+ { 0x5389, 0x10 },
+ { 0x538a, 0x01 },
+ { 0x538b, 0x98 },
+ { 0x5300, 0x08 },
+ { 0x5301, 0x30 },
+ { 0x5302, 0x10 },
+ { 0x5303, 0x00 },
+ { 0x5304, 0x08 },
+ { 0x5305, 0x30 },
+ { 0x5306, 0x08 },
+ { 0x5307, 0x16 },
+ { 0x5309, 0x08 },
+ { 0x530a, 0x30 },
+ { 0x530b, 0x04 },
+ { 0x530c, 0x06 },
+ { 0x5480, 0x01 },
+ { 0x5481, 0x08 },
+ { 0x5482, 0x14 },
+ { 0x5483, 0x28 },
+ { 0x5484, 0x51 },
+ { 0x5485, 0x65 },
+ { 0x5486, 0x71 },
+ { 0x5487, 0x7d },
+ { 0x5488, 0x87 },
+ { 0x5489, 0x91 },
+ { 0x548a, 0x9a },
+ { 0x548b, 0xaa },
+ { 0x548c, 0xb8 },
+ { 0x548d, 0xcd },
+ { 0x548e, 0xdd },
+ { 0x548f, 0xea },
+ { 0x5490, 0x1d },
+ { 0x5580, 0x02 },
+ { 0x5583, 0x40 },
+ { 0x5584, 0x10 },
+ { 0x5589, 0x10 },
+ { 0x558a, 0x00 },
+ { 0x558b, 0xf8 },
+ { 0x5800, 0x3f },
+ { 0x5801, 0x16 },
+ { 0x5802, 0x0e },
+ { 0x5803, 0x0d },
+ { 0x5804, 0x17 },
+ { 0x5805, 0x3f },
+ { 0x5806, 0x0b },
+ { 0x5807, 0x06 },
+ { 0x5808, 0x04 },
+ { 0x5809, 0x04 },
+ { 0x580a, 0x06 },
+ { 0x580b, 0x0b },
+ { 0x580c, 0x09 },
+ { 0x580d, 0x03 },
+ { 0x580e, 0x00 },
+ { 0x580f, 0x00 },
+ { 0x5810, 0x03 },
+ { 0x5811, 0x08 },
+ { 0x5812, 0x0a },
+ { 0x5813, 0x03 },
+ { 0x5814, 0x00 },
+ { 0x5815, 0x00 },
+ { 0x5816, 0x04 },
+ { 0x5817, 0x09 },
+ { 0x5818, 0x0f },
+ { 0x5819, 0x08 },
+ { 0x581a, 0x06 },
+ { 0x581b, 0x06 },
+ { 0x581c, 0x08 },
+ { 0x581d, 0x0c },
+ { 0x581e, 0x3f },
+ { 0x581f, 0x1e },
+ { 0x5820, 0x12 },
+ { 0x5821, 0x13 },
+ { 0x5822, 0x21 },
+ { 0x5823, 0x3f },
+ { 0x5824, 0x68 },
+ { 0x5825, 0x28 },
+ { 0x5826, 0x2c },
+ { 0x5827, 0x28 },
+ { 0x5828, 0x08 },
+ { 0x5829, 0x48 },
+ { 0x582a, 0x64 },
+ { 0x582b, 0x62 },
+ { 0x582c, 0x64 },
+ { 0x582d, 0x28 },
+ { 0x582e, 0x46 },
+ { 0x582f, 0x62 },
+ { 0x5830, 0x60 },
+ { 0x5831, 0x62 },
+ { 0x5832, 0x26 },
+ { 0x5833, 0x48 },
+ { 0x5834, 0x66 },
+ { 0x5835, 0x44 },
+ { 0x5836, 0x64 },
+ { 0x5837, 0x28 },
+ { 0x5838, 0x66 },
+ { 0x5839, 0x48 },
+ { 0x583a, 0x2c },
+ { 0x583b, 0x28 },
+ { 0x583c, 0x26 },
+ { 0x583d, 0xae },
+ { 0x5025, 0x00 },
+ { 0x3a0f, 0x30 },
+ { 0x3a10, 0x28 },
+ { 0x3a1b, 0x30 },
+ { 0x3a1e, 0x26 },
+ { 0x3a11, 0x60 },
+ { 0x3a1f, 0x14 },
+ { 0x0601, 0x02 },
+ { 0x3008, 0x42 },
+ { 0x3008, 0x02 }
+};
+
+static const struct reg_value ov5645_setting_sxga[] = {
+ { 0x3612, 0xa9 },
+ { 0x3614, 0x50 },
+ { 0x3618, 0x00 },
+ { 0x3034, 0x18 },
+ { 0x3035, 0x21 },
+ { 0x3036, 0x70 },
+ { 0x3600, 0x09 },
+ { 0x3601, 0x43 },
+ { 0x3708, 0x66 },
+ { 0x370c, 0xc3 },
+ { 0x3800, 0x00 },
+ { 0x3801, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3803, 0x06 },
+ { 0x3804, 0x0a },
+ { 0x3805, 0x3f },
+ { 0x3806, 0x07 },
+ { 0x3807, 0x9d },
+ { 0x3808, 0x05 },
+ { 0x3809, 0x00 },
+ { 0x380a, 0x03 },
+ { 0x380b, 0xc0 },
+ { 0x380c, 0x07 },
+ { 0x380d, 0x68 },
+ { 0x380e, 0x03 },
+ { 0x380f, 0xd8 },
+ { 0x3813, 0x06 },
+ { 0x3814, 0x31 },
+ { 0x3815, 0x31 },
+ { 0x3820, 0x47 },
+ { 0x3a02, 0x03 },
+ { 0x3a03, 0xd8 },
+ { 0x3a08, 0x01 },
+ { 0x3a09, 0xf8 },
+ { 0x3a0a, 0x01 },
+ { 0x3a0b, 0xa4 },
+ { 0x3a0e, 0x02 },
+ { 0x3a0d, 0x02 },
+ { 0x3a14, 0x03 },
+ { 0x3a15, 0xd8 },
+ { 0x3a18, 0x00 },
+ { 0x4004, 0x02 },
+ { 0x4005, 0x18 },
+ { 0x4300, 0x32 },
+ { 0x4202, 0x00 }
+};
+
+static const struct reg_value ov5645_setting_1080p[] = {
+ { 0x3612, 0xab },
+ { 0x3614, 0x50 },
+ { 0x3618, 0x04 },
+ { 0x3034, 0x18 },
+ { 0x3035, 0x11 },
+ { 0x3036, 0x54 },
+ { 0x3600, 0x08 },
+ { 0x3601, 0x33 },
+ { 0x3708, 0x63 },
+ { 0x370c, 0xc0 },
+ { 0x3800, 0x01 },
+ { 0x3801, 0x50 },
+ { 0x3802, 0x01 },
+ { 0x3803, 0xb2 },
+ { 0x3804, 0x08 },
+ { 0x3805, 0xef },
+ { 0x3806, 0x05 },
+ { 0x3807, 0xf1 },
+ { 0x3808, 0x07 },
+ { 0x3809, 0x80 },
+ { 0x380a, 0x04 },
+ { 0x380b, 0x38 },
+ { 0x380c, 0x09 },
+ { 0x380d, 0xc4 },
+ { 0x380e, 0x04 },
+ { 0x380f, 0x60 },
+ { 0x3813, 0x04 },
+ { 0x3814, 0x11 },
+ { 0x3815, 0x11 },
+ { 0x3820, 0x47 },
+ { 0x4514, 0x88 },
+ { 0x3a02, 0x04 },
+ { 0x3a03, 0x60 },
+ { 0x3a08, 0x01 },
+ { 0x3a09, 0x50 },
+ { 0x3a0a, 0x01 },
+ { 0x3a0b, 0x18 },
+ { 0x3a0e, 0x03 },
+ { 0x3a0d, 0x04 },
+ { 0x3a14, 0x04 },
+ { 0x3a15, 0x60 },
+ { 0x3a18, 0x00 },
+ { 0x4004, 0x06 },
+ { 0x4005, 0x18 },
+ { 0x4300, 0x32 },
+ { 0x4202, 0x00 },
+ { 0x4837, 0x0b }
+};
+
+static const struct reg_value ov5645_setting_full[] = {
+ { 0x3612, 0xab },
+ { 0x3614, 0x50 },
+ { 0x3618, 0x04 },
+ { 0x3034, 0x18 },
+ { 0x3035, 0x11 },
+ { 0x3036, 0x54 },
+ { 0x3600, 0x08 },
+ { 0x3601, 0x33 },
+ { 0x3708, 0x63 },
+ { 0x370c, 0xc0 },
+ { 0x3800, 0x00 },
+ { 0x3801, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3803, 0x00 },
+ { 0x3804, 0x0a },
+ { 0x3805, 0x3f },
+ { 0x3806, 0x07 },
+ { 0x3807, 0x9f },
+ { 0x3808, 0x0a },
+ { 0x3809, 0x20 },
+ { 0x380a, 0x07 },
+ { 0x380b, 0x98 },
+ { 0x380c, 0x0b },
+ { 0x380d, 0x1c },
+ { 0x380e, 0x07 },
+ { 0x380f, 0xb0 },
+ { 0x3813, 0x06 },
+ { 0x3814, 0x11 },
+ { 0x3815, 0x11 },
+ { 0x3820, 0x47 },
+ { 0x4514, 0x88 },
+ { 0x3a02, 0x07 },
+ { 0x3a03, 0xb0 },
+ { 0x3a08, 0x01 },
+ { 0x3a09, 0x27 },
+ { 0x3a0a, 0x00 },
+ { 0x3a0b, 0xf6 },
+ { 0x3a0e, 0x06 },
+ { 0x3a0d, 0x08 },
+ { 0x3a14, 0x07 },
+ { 0x3a15, 0xb0 },
+ { 0x3a18, 0x01 },
+ { 0x4004, 0x06 },
+ { 0x4005, 0x18 },
+ { 0x4300, 0x32 },
+ { 0x4837, 0x0b },
+ { 0x4202, 0x00 }
+};
+
+static const struct ov5645_mode_info ov5645_mode_info_data[] = {
+ {
+ .width = 1280,
+ .height = 960,
+ .data = ov5645_setting_sxga,
+ .data_size = ARRAY_SIZE(ov5645_setting_sxga)
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .data = ov5645_setting_1080p,
+ .data_size = ARRAY_SIZE(ov5645_setting_1080p)
+ },
+ {
+ .width = 2592,
+ .height = 1944,
+ .data = ov5645_setting_full,
+ .data_size = ARRAY_SIZE(ov5645_setting_full)
+ },
+};
+
+static int ov5645_regulators_enable(struct ov5645 *ov5645)
+{
+ int ret;
+
+ ret = regulator_enable(ov5645->io_regulator);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "set io voltage failed\n");
+ return ret;
+ }
+
+ ret = regulator_enable(ov5645->analog_regulator);
+ if (ret) {
+ dev_err(ov5645->dev, "set analog voltage failed\n");
+ goto err_disable_io;
+ }
+
+ ret = regulator_enable(ov5645->core_regulator);
+ if (ret) {
+ dev_err(ov5645->dev, "set core voltage failed\n");
+ goto err_disable_analog;
+ }
+
+ return 0;
+
+err_disable_analog:
+ regulator_disable(ov5645->analog_regulator);
+err_disable_io:
+ regulator_disable(ov5645->io_regulator);
+
+ return ret;
+}
+
+static void ov5645_regulators_disable(struct ov5645 *ov5645)
+{
+ int ret;
+
+ ret = regulator_disable(ov5645->core_regulator);
+ if (ret < 0)
+ dev_err(ov5645->dev, "core regulator disable failed\n");
+
+ ret = regulator_disable(ov5645->analog_regulator);
+ if (ret < 0)
+ dev_err(ov5645->dev, "analog regulator disable failed\n");
+
+ ret = regulator_disable(ov5645->io_regulator);
+ if (ret < 0)
+ dev_err(ov5645->dev, "io regulator disable failed\n");
+}
+
+static int ov5645_write_reg(struct ov5645 *ov5645, u16 reg, u8 val)
+{
+ u8 regbuf[3];
+ int ret;
+
+ regbuf[0] = reg >> 8;
+ regbuf[1] = reg & 0xff;
+ regbuf[2] = val;
+
+ ret = i2c_master_send(ov5645->i2c_client, regbuf, 3);
+ if (ret < 0)
+ dev_err(ov5645->dev, "%s: write reg error %d: reg=%x, val=%x\n",
+ __func__, ret, reg, val);
+
+ return ret;
+}
+
+static int ov5645_read_reg(struct ov5645 *ov5645, u16 reg, u8 *val)
+{
+ u8 regbuf[2];
+ int ret;
+
+ regbuf[0] = reg >> 8;
+ regbuf[1] = reg & 0xff;
+
+ ret = i2c_master_send(ov5645->i2c_client, regbuf, 2);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "%s: write reg error %d: reg=%x\n",
+ __func__, ret, reg);
+ return ret;
+ }
+
+ ret = i2c_master_recv(ov5645->i2c_client, val, 1);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "%s: read reg error %d: reg=%x\n",
+ __func__, ret, reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5645_set_aec_mode(struct ov5645 *ov5645, u32 mode)
+{
+ u8 val = ov5645->aec_pk_manual;
+ int ret;
+
+ if (mode == V4L2_EXPOSURE_AUTO)
+ val &= ~OV5645_AEC_MANUAL_ENABLE;
+ else /* V4L2_EXPOSURE_MANUAL */
+ val |= OV5645_AEC_MANUAL_ENABLE;
+
+ ret = ov5645_write_reg(ov5645, OV5645_AEC_PK_MANUAL, val);
+ if (!ret)
+ ov5645->aec_pk_manual = val;
+
+ return ret;
+}
+
+static int ov5645_set_agc_mode(struct ov5645 *ov5645, u32 enable)
+{
+ u8 val = ov5645->aec_pk_manual;
+ int ret;
+
+ if (enable)
+ val &= ~OV5645_AGC_MANUAL_ENABLE;
+ else
+ val |= OV5645_AGC_MANUAL_ENABLE;
+
+ ret = ov5645_write_reg(ov5645, OV5645_AEC_PK_MANUAL, val);
+ if (!ret)
+ ov5645->aec_pk_manual = val;
+
+ return ret;
+}
+
+static int ov5645_set_register_array(struct ov5645 *ov5645,
+ const struct reg_value *settings,
+ unsigned int num_settings)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_settings; ++i, ++settings) {
+ ret = ov5645_write_reg(ov5645, settings->reg, settings->val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5645_set_power_on(struct ov5645 *ov5645)
+{
+ int ret;
+
+ ret = ov5645_regulators_enable(ov5645);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ov5645->xclk);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "clk prepare enable failed\n");
+ ov5645_regulators_disable(ov5645);
+ return ret;
+ }
+
+ usleep_range(5000, 15000);
+ gpiod_set_value_cansleep(ov5645->enable_gpio, 1);
+
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ov5645->rst_gpio, 0);
+
+ msleep(20);
+
+ return 0;
+}
+
+static void ov5645_set_power_off(struct ov5645 *ov5645)
+{
+ gpiod_set_value_cansleep(ov5645->rst_gpio, 1);
+ gpiod_set_value_cansleep(ov5645->enable_gpio, 0);
+ clk_disable_unprepare(ov5645->xclk);
+ ov5645_regulators_disable(ov5645);
+}
+
+static int ov5645_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov5645 *ov5645 = to_ov5645(sd);
+ int ret = 0;
+
+ mutex_lock(&ov5645->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (ov5645->power_count == !on) {
+ if (on) {
+ ret = ov5645_set_power_on(ov5645);
+ if (ret < 0)
+ goto exit;
+
+ ret = ov5645_set_register_array(ov5645,
+ ov5645_global_init_setting,
+ ARRAY_SIZE(ov5645_global_init_setting));
+ if (ret < 0) {
+ dev_err(ov5645->dev,
+ "could not set init registers\n");
+ ov5645_set_power_off(ov5645);
+ goto exit;
+ }
+
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_STOP);
+ if (ret < 0) {
+ ov5645_set_power_off(ov5645);
+ goto exit;
+ }
+ } else {
+ ov5645_set_power_off(ov5645);
+ }
+ }
+
+ /* Update the power count. */
+ ov5645->power_count += on ? 1 : -1;
+ WARN_ON(ov5645->power_count < 0);
+
+exit:
+ mutex_unlock(&ov5645->power_lock);
+
+ return ret;
+}
+
+static int ov5645_set_saturation(struct ov5645 *ov5645, s32 value)
+{
+ u32 reg_value = (value * 0x10) + 0x40;
+ int ret;
+
+ ret = ov5645_write_reg(ov5645, OV5645_SDE_SAT_U, reg_value);
+ if (ret < 0)
+ return ret;
+
+ return ov5645_write_reg(ov5645, OV5645_SDE_SAT_V, reg_value);
+}
+
+static int ov5645_set_hflip(struct ov5645 *ov5645, s32 value)
+{
+ u8 val = ov5645->timing_tc_reg21;
+ int ret;
+
+ if (value == 0)
+ val &= ~(OV5645_SENSOR_MIRROR);
+ else
+ val |= (OV5645_SENSOR_MIRROR);
+
+ ret = ov5645_write_reg(ov5645, OV5645_TIMING_TC_REG21, val);
+ if (!ret)
+ ov5645->timing_tc_reg21 = val;
+
+ return ret;
+}
+
+static int ov5645_set_vflip(struct ov5645 *ov5645, s32 value)
+{
+ u8 val = ov5645->timing_tc_reg20;
+ int ret;
+
+ if (value == 0)
+ val |= (OV5645_SENSOR_VFLIP | OV5645_ISP_VFLIP);
+ else
+ val &= ~(OV5645_SENSOR_VFLIP | OV5645_ISP_VFLIP);
+
+ ret = ov5645_write_reg(ov5645, OV5645_TIMING_TC_REG20, val);
+ if (!ret)
+ ov5645->timing_tc_reg20 = val;
+
+ return ret;
+}
+
+static int ov5645_set_test_pattern(struct ov5645 *ov5645, s32 value)
+{
+ u8 val = 0;
+
+ if (value) {
+ val = OV5645_SET_TEST_PATTERN(value - 1);
+ val |= OV5645_TEST_PATTERN_ENABLE;
+ }
+
+ return ov5645_write_reg(ov5645, OV5645_PRE_ISP_TEST_SETTING_1, val);
+}
+
+static const char * const ov5645_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bars",
+ "Pseudo-Random Data",
+ "Color Square",
+ "Black Image",
+};
+
+static int ov5645_set_awb(struct ov5645 *ov5645, s32 enable_auto)
+{
+ u8 val = 0;
+
+ if (!enable_auto)
+ val = OV5645_AWB_MANUAL_ENABLE;
+
+ return ov5645_write_reg(ov5645, OV5645_AWB_MANUAL_CONTROL, val);
+}
+
+static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5645 *ov5645 = container_of(ctrl->handler,
+ struct ov5645, ctrls);
+ int ret;
+
+ mutex_lock(&ov5645->power_lock);
+ if (!ov5645->power_count) {
+ mutex_unlock(&ov5645->power_lock);
+ return 0;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_SATURATION:
+ ret = ov5645_set_saturation(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov5645_set_awb(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ ret = ov5645_set_agc_mode(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov5645_set_aec_mode(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov5645_set_test_pattern(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = ov5645_set_hflip(ov5645, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ret = ov5645_set_vflip(ov5645, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&ov5645->power_lock);
+
+ return ret;
+}
+
+static struct v4l2_ctrl_ops ov5645_ctrl_ops = {
+ .s_ctrl = ov5645_s_ctrl,
+};
+
+static int ov5645_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ return 0;
+}
+
+static int ov5645_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
+ return -EINVAL;
+
+ if (fse->index >= ARRAY_SIZE(ov5645_mode_info_data))
+ return -EINVAL;
+
+ fse->min_width = ov5645_mode_info_data[fse->index].width;
+ fse->max_width = ov5645_mode_info_data[fse->index].width;
+ fse->min_height = ov5645_mode_info_data[fse->index].height;
+ fse->max_height = ov5645_mode_info_data[fse->index].height;
+
+ return 0;
+}
+
+static struct v4l2_mbus_framefmt *
+__ov5645_get_pad_format(struct ov5645 *ov5645,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&ov5645->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5645->fmt;
+ default:
+ return NULL;
+ }
+}
+
+static int ov5645_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5645 *ov5645 = to_ov5645(sd);
+
+ format->format = *__ov5645_get_pad_format(ov5645, cfg, format->pad,
+ format->which);
+ return 0;
+}
+
+static struct v4l2_rect *
+__ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&ov5645->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &ov5645->crop;
+ default:
+ return NULL;
+ }
+}
+
+static const struct ov5645_mode_info *
+ov5645_find_nearest_mode(unsigned int width, unsigned int height)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ov5645_mode_info_data) - 1; i >= 0; i--) {
+ if (ov5645_mode_info_data[i].width <= width &&
+ ov5645_mode_info_data[i].height <= height)
+ break;
+ }
+
+ if (i < 0)
+ i = 0;
+
+ return &ov5645_mode_info_data[i];
+}
+
+static int ov5645_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5645 *ov5645 = to_ov5645(sd);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ const struct ov5645_mode_info *new_mode;
+
+ __crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad,
+ format->which);
+
+ new_mode = ov5645_find_nearest_mode(format->format.width,
+ format->format.height);
+ __crop->width = new_mode->width;
+ __crop->height = new_mode->height;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ ov5645->current_mode = new_mode;
+
+ __format = __ov5645_get_pad_format(ov5645, cfg, format->pad,
+ format->which);
+ __format->width = __crop->width;
+ __format->height = __crop->height;
+ __format->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ __format->field = V4L2_FIELD_NONE;
+ __format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ format->format = *__format;
+
+ return 0;
+}
+
+static int ov5645_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = 1920;
+ fmt.format.height = 1080;
+
+ ov5645_set_format(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int ov5645_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov5645 *ov5645 = to_ov5645(sd);
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sel->r = *__ov5645_get_pad_crop(ov5645, cfg, sel->pad,
+ sel->which);
+ return 0;
+}
+
+static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct ov5645 *ov5645 = to_ov5645(subdev);
+ int ret;
+
+ if (enable) {
+ ret = ov5645_set_register_array(ov5645,
+ ov5645->current_mode->data,
+ ov5645->current_mode->data_size);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "could not set mode %dx%d\n",
+ ov5645->current_mode->width,
+ ov5645->current_mode->height);
+ return ret;
+ }
+ ret = v4l2_ctrl_handler_setup(&ov5645->ctrls);
+ if (ret < 0) {
+ dev_err(ov5645->dev, "could not sync v4l2 controls\n");
+ return ret;
+ }
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_START);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_STOP);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ov5645_core_ops = {
+ .s_power = ov5645_s_power,
+};
+
+static const struct v4l2_subdev_video_ops ov5645_video_ops = {
+ .s_stream = ov5645_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov5645_subdev_pad_ops = {
+ .init_cfg = ov5645_entity_init_cfg,
+ .enum_mbus_code = ov5645_enum_mbus_code,
+ .enum_frame_size = ov5645_enum_frame_size,
+ .get_fmt = ov5645_get_format,
+ .set_fmt = ov5645_set_format,
+ .get_selection = ov5645_get_selection,
+};
+
+static const struct v4l2_subdev_ops ov5645_subdev_ops = {
+ .core = &ov5645_core_ops,
+ .video = &ov5645_video_ops,
+ .pad = &ov5645_subdev_pad_ops,
+};
+
+static int ov5645_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *endpoint;
+ struct ov5645 *ov5645;
+ u8 chip_id_high, chip_id_low;
+ u32 xclk_freq;
+ int ret;
+
+ ov5645 = devm_kzalloc(dev, sizeof(struct ov5645), GFP_KERNEL);
+ if (!ov5645)
+ return -ENOMEM;
+
+ ov5645->i2c_client = client;
+ ov5645->dev = dev;
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_of_parse_endpoint(endpoint, &ov5645->ep);
+ if (ret < 0) {
+ dev_err(dev, "parsing endpoint node failed\n");
+ return ret;
+ }
+
+ of_node_put(endpoint);
+
+ if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(dev, "invalid bus type, must be CSI2\n");
+ return -EINVAL;
+ }
+
+ /* get system clock (xclk) */
+ ov5645->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(ov5645->xclk)) {
+ dev_err(dev, "could not get xclk");
+ return PTR_ERR(ov5645->xclk);
+ }
+
+ ret = of_property_read_u32(dev->of_node, "clock-frequency", &xclk_freq);
+ if (ret) {
+ dev_err(dev, "could not get xclk frequency\n");
+ return ret;
+ }
+
+ if (xclk_freq != 23880000) {
+ dev_err(dev, "external clock frequency %u is not supported\n",
+ xclk_freq);
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(ov5645->xclk, xclk_freq);
+ if (ret) {
+ dev_err(dev, "could not set xclk frequency\n");
+ return ret;
+ }
+
+ ov5645->io_regulator = devm_regulator_get(dev, "vdddo");
+ if (IS_ERR(ov5645->io_regulator)) {
+ dev_err(dev, "cannot get io regulator\n");
+ return PTR_ERR(ov5645->io_regulator);
+ }
+
+ ret = regulator_set_voltage(ov5645->io_regulator,
+ OV5645_VOLTAGE_DIGITAL_IO,
+ OV5645_VOLTAGE_DIGITAL_IO);
+ if (ret < 0) {
+ dev_err(dev, "cannot set io voltage\n");
+ return ret;
+ }
+
+ ov5645->core_regulator = devm_regulator_get(dev, "vddd");
+ if (IS_ERR(ov5645->core_regulator)) {
+ dev_err(dev, "cannot get core regulator\n");
+ return PTR_ERR(ov5645->core_regulator);
+ }
+
+ ret = regulator_set_voltage(ov5645->core_regulator,
+ OV5645_VOLTAGE_DIGITAL_CORE,
+ OV5645_VOLTAGE_DIGITAL_CORE);
+ if (ret < 0) {
+ dev_err(dev, "cannot set core voltage\n");
+ return ret;
+ }
+
+ ov5645->analog_regulator = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(ov5645->analog_regulator)) {
+ dev_err(dev, "cannot get analog regulator\n");
+ return PTR_ERR(ov5645->analog_regulator);
+ }
+
+ ret = regulator_set_voltage(ov5645->analog_regulator,
+ OV5645_VOLTAGE_ANALOG,
+ OV5645_VOLTAGE_ANALOG);
+ if (ret < 0) {
+ dev_err(dev, "cannot set analog voltage\n");
+ return ret;
+ }
+
+ ov5645->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov5645->enable_gpio)) {
+ dev_err(dev, "cannot get enable gpio\n");
+ return PTR_ERR(ov5645->enable_gpio);
+ }
+
+ ov5645->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ov5645->rst_gpio)) {
+ dev_err(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ov5645->rst_gpio);
+ }
+
+ mutex_init(&ov5645->power_lock);
+
+ v4l2_ctrl_handler_init(&ov5645->ctrls, 7);
+ v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_SATURATION, -4, 4, 1, 0);
+ v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ v4l2_ctrl_new_std_menu(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL,
+ 0, V4L2_EXPOSURE_AUTO);
+ v4l2_ctrl_new_std_menu_items(&ov5645->ctrls, &ov5645_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov5645_test_pattern_menu) - 1,
+ 0, 0, ov5645_test_pattern_menu);
+
+ ov5645->sd.ctrl_handler = &ov5645->ctrls;
+
+ if (ov5645->ctrls.error) {
+ dev_err(dev, "%s: control initialization error %d\n",
+ __func__, ov5645->ctrls.error);
+ ret = ov5645->ctrls.error;
+ goto free_ctrl;
+ }
+
+ v4l2_i2c_subdev_init(&ov5645->sd, client, &ov5645_subdev_ops);
+ ov5645->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5645->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ov5645->sd.dev = &client->dev;
+
+ ret = media_entity_pads_init(&ov5645->sd.entity, 1, &ov5645->pad);
+ if (ret < 0) {
+ dev_err(dev, "could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ ret = ov5645_s_power(&ov5645->sd, true);
+ if (ret < 0) {
+ dev_err(dev, "could not power up OV5645\n");
+ goto free_entity;
+ }
+
+ ret = ov5645_read_reg(ov5645, OV5645_CHIP_ID_HIGH, &chip_id_high);
+ if (ret < 0 || chip_id_high != OV5645_CHIP_ID_HIGH_BYTE) {
+ dev_err(dev, "could not read ID high\n");
+ ret = -ENODEV;
+ goto power_down;
+ }
+ ret = ov5645_read_reg(ov5645, OV5645_CHIP_ID_LOW, &chip_id_low);
+ if (ret < 0 || chip_id_low != OV5645_CHIP_ID_LOW_BYTE) {
+ dev_err(dev, "could not read ID low\n");
+ ret = -ENODEV;
+ goto power_down;
+ }
+
+ dev_info(dev, "OV5645 detected at address 0x%02x\n", client->addr);
+
+ ret = ov5645_read_reg(ov5645, OV5645_AEC_PK_MANUAL,
+ &ov5645->aec_pk_manual);
+ if (ret < 0) {
+ dev_err(dev, "could not read AEC/AGC mode\n");
+ ret = -ENODEV;
+ goto power_down;
+ }
+
+ ret = ov5645_read_reg(ov5645, OV5645_TIMING_TC_REG20,
+ &ov5645->timing_tc_reg20);
+ if (ret < 0) {
+ dev_err(dev, "could not read vflip value\n");
+ ret = -ENODEV;
+ goto power_down;
+ }
+
+ ret = ov5645_read_reg(ov5645, OV5645_TIMING_TC_REG21,
+ &ov5645->timing_tc_reg21);
+ if (ret < 0) {
+ dev_err(dev, "could not read hflip value\n");
+ ret = -ENODEV;
+ goto power_down;
+ }
+
+ ov5645_s_power(&ov5645->sd, false);
+
+ ret = v4l2_async_register_subdev(&ov5645->sd);
+ if (ret < 0) {
+ dev_err(dev, "could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ ov5645_entity_init_cfg(&ov5645->sd, NULL);
+
+ return 0;
+
+power_down:
+ ov5645_s_power(&ov5645->sd, false);
+free_entity:
+ media_entity_cleanup(&ov5645->sd.entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&ov5645->ctrls);
+ mutex_destroy(&ov5645->power_lock);
+
+ return ret;
+}
+
+static int ov5645_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5645 *ov5645 = to_ov5645(sd);
+
+ v4l2_async_unregister_subdev(&ov5645->sd);
+ media_entity_cleanup(&ov5645->sd.entity);
+ v4l2_ctrl_handler_free(&ov5645->ctrls);
+ mutex_destroy(&ov5645->power_lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov5645_id[] = {
+ { "ov5645", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ov5645_id);
+
+static const struct of_device_id ov5645_of_match[] = {
+ { .compatible = "ovti,ov5645" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov5645_of_match);
+
+static struct i2c_driver ov5645_i2c_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(ov5645_of_match),
+ .name = "ov5645",
+ },
+ .probe = ov5645_probe,
+ .remove = ov5645_remove,
+ .id_table = ov5645_id,
+};
+
+module_i2c_driver(ov5645_i2c_driver);
+
+MODULE_DESCRIPTION("Omnivision OV5645 Camera Driver");
+MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
new file mode 100644
index 000000000000..f57a0b354cf6
--- /dev/null
+++ b/drivers/media/i2c/ov5647.c
@@ -0,0 +1,634 @@
+/*
+ * A V4L2 driver for OmniVision OV5647 cameras.
+ *
+ * Based on Samsung S5K6AAFX SXGA 1/6" 1.3M CMOS Image Sensor driver
+ * Copyright (C) 2011 Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Based on Omnivision OV7670 Camera Driver
+ * Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Copyright (C) 2016, Synopsys, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-of.h>
+
+#define SENSOR_NAME "ov5647"
+
+#define OV5647_SW_RESET 0x0103
+#define OV5647_REG_CHIPID_H 0x300A
+#define OV5647_REG_CHIPID_L 0x300B
+
+#define REG_TERM 0xfffe
+#define VAL_TERM 0xfe
+#define REG_DLY 0xffff
+
+#define OV5647_ROW_START 0x01
+#define OV5647_ROW_START_MIN 0
+#define OV5647_ROW_START_MAX 2004
+#define OV5647_ROW_START_DEF 54
+
+#define OV5647_COLUMN_START 0x02
+#define OV5647_COLUMN_START_MIN 0
+#define OV5647_COLUMN_START_MAX 2750
+#define OV5647_COLUMN_START_DEF 16
+
+#define OV5647_WINDOW_HEIGHT 0x03
+#define OV5647_WINDOW_HEIGHT_MIN 2
+#define OV5647_WINDOW_HEIGHT_MAX 2006
+#define OV5647_WINDOW_HEIGHT_DEF 1944
+
+#define OV5647_WINDOW_WIDTH 0x04
+#define OV5647_WINDOW_WIDTH_MIN 2
+#define OV5647_WINDOW_WIDTH_MAX 2752
+#define OV5647_WINDOW_WIDTH_DEF 2592
+
+struct regval_list {
+ u16 addr;
+ u8 data;
+};
+
+struct ov5647 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct mutex lock;
+ struct v4l2_mbus_framefmt format;
+ unsigned int width;
+ unsigned int height;
+ int power_count;
+ struct clk *xclk;
+};
+
+static inline struct ov5647 *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov5647, sd);
+}
+
+static struct regval_list sensor_oe_disable_regs[] = {
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+};
+
+static struct regval_list sensor_oe_enable_regs[] = {
+ {0x3000, 0x0f},
+ {0x3001, 0xff},
+ {0x3002, 0xe4},
+};
+
+static struct regval_list ov5647_640x480[] = {
+ {0x0100, 0x00},
+ {0x0103, 0x01},
+ {0x3034, 0x08},
+ {0x3035, 0x21},
+ {0x3036, 0x46},
+ {0x303c, 0x11},
+ {0x3106, 0xf5},
+ {0x3821, 0x07},
+ {0x3820, 0x41},
+ {0x3827, 0xec},
+ {0x370c, 0x0f},
+ {0x3612, 0x59},
+ {0x3618, 0x00},
+ {0x5000, 0x06},
+ {0x5001, 0x01},
+ {0x5002, 0x41},
+ {0x5003, 0x08},
+ {0x5a00, 0x08},
+ {0x3000, 0x00},
+ {0x3001, 0x00},
+ {0x3002, 0x00},
+ {0x3016, 0x08},
+ {0x3017, 0xe0},
+ {0x3018, 0x44},
+ {0x301c, 0xf8},
+ {0x301d, 0xf0},
+ {0x3a18, 0x00},
+ {0x3a19, 0xf8},
+ {0x3c01, 0x80},
+ {0x3b07, 0x0c},
+ {0x380c, 0x07},
+ {0x380d, 0x68},
+ {0x380e, 0x03},
+ {0x380f, 0xd8},
+ {0x3814, 0x31},
+ {0x3815, 0x31},
+ {0x3708, 0x64},
+ {0x3709, 0x52},
+ {0x3808, 0x02},
+ {0x3809, 0x80},
+ {0x380a, 0x01},
+ {0x380b, 0xE0},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x0a},
+ {0x3805, 0x3f},
+ {0x3806, 0x07},
+ {0x3807, 0xa1},
+ {0x3811, 0x08},
+ {0x3813, 0x02},
+ {0x3630, 0x2e},
+ {0x3632, 0xe2},
+ {0x3633, 0x23},
+ {0x3634, 0x44},
+ {0x3636, 0x06},
+ {0x3620, 0x64},
+ {0x3621, 0xe0},
+ {0x3600, 0x37},
+ {0x3704, 0xa0},
+ {0x3703, 0x5a},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x3731, 0x02},
+ {0x370b, 0x60},
+ {0x3705, 0x1a},
+ {0x3f05, 0x02},
+ {0x3f06, 0x10},
+ {0x3f01, 0x0a},
+ {0x3a08, 0x01},
+ {0x3a09, 0x27},
+ {0x3a0a, 0x00},
+ {0x3a0b, 0xf6},
+ {0x3a0d, 0x04},
+ {0x3a0e, 0x03},
+ {0x3a0f, 0x58},
+ {0x3a10, 0x50},
+ {0x3a1b, 0x58},
+ {0x3a1e, 0x50},
+ {0x3a11, 0x60},
+ {0x3a1f, 0x28},
+ {0x4001, 0x02},
+ {0x4004, 0x02},
+ {0x4000, 0x09},
+ {0x4837, 0x24},
+ {0x4050, 0x6e},
+ {0x4051, 0x8f},
+ {0x0100, 0x01},
+};
+
+static int ov5647_write(struct v4l2_subdev *sd, u16 reg, u8 val)
+{
+ int ret;
+ unsigned char data[3] = { reg >> 8, reg & 0xff, val};
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ ret = i2c_master_send(client, data, 3);
+ if (ret < 0)
+ dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+
+ return ret;
+}
+
+static int ov5647_read(struct v4l2_subdev *sd, u16 reg, u8 *val)
+{
+ int ret;
+ unsigned char data_w[2] = { reg >> 8, reg & 0xff };
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ ret = i2c_master_send(client, data_w, 2);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+ return ret;
+ }
+
+ ret = i2c_master_recv(client, val, 1);
+ if (ret < 0)
+ dev_dbg(&client->dev, "%s: i2c read error, reg: %x\n",
+ __func__, reg);
+
+ return ret;
+}
+
+static int ov5647_write_array(struct v4l2_subdev *sd,
+ struct regval_list *regs, int array_size)
+{
+ int i, ret;
+
+ for (i = 0; i < array_size; i++) {
+ ret = ov5647_write(sd, regs[i].addr, regs[i].data);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
+{
+ u8 channel_id;
+ int ret;
+
+ ret = ov5647_read(sd, 0x4814, &channel_id);
+ if (ret < 0)
+ return ret;
+
+ channel_id &= ~(3 << 6);
+ return ov5647_write(sd, 0x4814, channel_id | (channel << 6));
+}
+
+static int ov5647_stream_on(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = ov5647_write(sd, 0x4202, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, 0x300D, 0x00);
+}
+
+static int ov5647_stream_off(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = ov5647_write(sd, 0x4202, 0x0f);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, 0x300D, 0x01);
+}
+
+static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
+{
+ int ret;
+ u8 rdval;
+
+ ret = ov5647_read(sd, 0x0100, &rdval);
+ if (ret < 0)
+ return ret;
+
+ if (standby)
+ rdval &= ~0x01;
+ else
+ rdval |= 0x01;
+
+ return ov5647_write(sd, 0x0100, rdval);
+}
+
+static int __sensor_init(struct v4l2_subdev *sd)
+{
+ int ret;
+ u8 resetval, rdval;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ ret = ov5647_read(sd, 0x0100, &rdval);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_write_array(sd, ov5647_640x480,
+ ARRAY_SIZE(ov5647_640x480));
+ if (ret < 0) {
+ dev_err(&client->dev, "write sensor default regs error\n");
+ return ret;
+ }
+
+ ret = ov5647_set_virtual_channel(sd, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_read(sd, 0x0100, &resetval);
+ if (ret < 0)
+ return ret;
+
+ if (!(resetval & 0x01)) {
+ dev_err(&client->dev, "Device was in SW standby");
+ ret = ov5647_write(sd, 0x0100, 0x01);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ov5647_write(sd, 0x4800, 0x04);
+}
+
+static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
+{
+ int ret = 0;
+ struct ov5647 *ov5647 = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&ov5647->lock);
+
+ if (on && !ov5647->power_count) {
+ dev_dbg(&client->dev, "OV5647 power on\n");
+
+ ret = clk_prepare_enable(ov5647->xclk);
+ if (ret < 0) {
+ dev_err(&client->dev, "clk prepare enable failed\n");
+ goto out;
+ }
+
+ ret = ov5647_write_array(sd, sensor_oe_enable_regs,
+ ARRAY_SIZE(sensor_oe_enable_regs));
+ if (ret < 0) {
+ clk_disable_unprepare(ov5647->xclk);
+ dev_err(&client->dev,
+ "write sensor_oe_enable_regs error\n");
+ goto out;
+ }
+
+ ret = __sensor_init(sd);
+ if (ret < 0) {
+ clk_disable_unprepare(ov5647->xclk);
+ dev_err(&client->dev,
+ "Camera not available, check Power\n");
+ goto out;
+ }
+ } else if (!on && ov5647->power_count == 1) {
+ dev_dbg(&client->dev, "OV5647 power off\n");
+
+ ret = ov5647_write_array(sd, sensor_oe_disable_regs,
+ ARRAY_SIZE(sensor_oe_disable_regs));
+
+ if (ret < 0)
+ dev_dbg(&client->dev, "disable oe failed\n");
+
+ ret = set_sw_standby(sd, true);
+
+ if (ret < 0)
+ dev_dbg(&client->dev, "soft stby failed\n");
+
+ clk_disable_unprepare(ov5647->xclk);
+ }
+
+ /* Update the power count. */
+ ov5647->power_count += on ? 1 : -1;
+ WARN_ON(ov5647->power_count < 0);
+
+out:
+ mutex_unlock(&ov5647->lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov5647_sensor_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ u8 val;
+ int ret;
+
+ ret = ov5647_read(sd, reg->reg & 0xff, &val);
+ if (ret < 0)
+ return ret;
+
+ reg->val = val;
+ reg->size = 1;
+
+ return 0;
+}
+
+static int ov5647_sensor_set_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ return ov5647_write(sd, reg->reg & 0xff, reg->val & 0xff);
+}
+#endif
+
+/**
+ * @short Subdev core operations registration
+ */
+static const struct v4l2_subdev_core_ops ov5647_subdev_core_ops = {
+ .s_power = ov5647_sensor_power,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov5647_sensor_get_register,
+ .s_register = ov5647_sensor_set_register,
+#endif
+};
+
+static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ if (enable)
+ return ov5647_stream_on(sd);
+ else
+ return ov5647_stream_off(sd);
+}
+
+static const struct v4l2_subdev_video_ops ov5647_subdev_video_ops = {
+ .s_stream = ov5647_s_stream,
+};
+
+static int ov5647_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ov5647_subdev_pad_ops = {
+ .enum_mbus_code = ov5647_enum_mbus_code,
+};
+
+static const struct v4l2_subdev_ops ov5647_subdev_ops = {
+ .core = &ov5647_subdev_core_ops,
+ .video = &ov5647_subdev_video_ops,
+ .pad = &ov5647_subdev_pad_ops,
+};
+
+static int ov5647_detect(struct v4l2_subdev *sd)
+{
+ u8 read;
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ ret = ov5647_write(sd, OV5647_SW_RESET, 0x01);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_read(sd, OV5647_REG_CHIPID_H, &read);
+ if (ret < 0)
+ return ret;
+
+ if (read != 0x56) {
+ dev_err(&client->dev, "ID High expected 0x56 got %x", read);
+ return -ENODEV;
+ }
+
+ ret = ov5647_read(sd, OV5647_REG_CHIPID_L, &read);
+ if (ret < 0)
+ return ret;
+
+ if (read != 0x47) {
+ dev_err(&client->dev, "ID Low expected 0x47 got %x", read);
+ return -ENODEV;
+ }
+
+ return ov5647_write(sd, OV5647_SW_RESET, 0x00);
+}
+
+static int ov5647_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_rect *crop =
+ v4l2_subdev_get_try_crop(sd, fh->pad, 0);
+
+ crop->left = OV5647_COLUMN_START_DEF;
+ crop->top = OV5647_ROW_START_DEF;
+ crop->width = OV5647_WINDOW_WIDTH_DEF;
+ crop->height = OV5647_WINDOW_HEIGHT_DEF;
+
+ format->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+
+ format->width = OV5647_WINDOW_WIDTH_DEF;
+ format->height = OV5647_WINDOW_HEIGHT_DEF;
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops ov5647_subdev_internal_ops = {
+ .open = ov5647_open,
+};
+
+static int ov5647_parse_dt(struct device_node *np)
+{
+ struct v4l2_of_endpoint bus_cfg;
+ struct device_node *ep;
+
+ int ret;
+
+ ep = of_graph_get_next_endpoint(np, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+
+ of_node_put(ep);
+ return ret;
+}
+
+static int ov5647_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ov5647 *sensor;
+ int ret;
+ struct v4l2_subdev *sd;
+ struct device_node *np = client->dev.of_node;
+ u32 xclk_freq;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_OF) && np) {
+ ret = ov5647_parse_dt(np);
+ if (ret) {
+ dev_err(dev, "DT parsing error: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* get system clock (xclk) */
+ sensor->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk)) {
+ dev_err(dev, "could not get xclk");
+ return PTR_ERR(sensor->xclk);
+ }
+
+ xclk_freq = clk_get_rate(sensor->xclk);
+ if (xclk_freq != 25000000) {
+ dev_err(dev, "Unsupported clock frequency: %u\n", xclk_freq);
+ return -EINVAL;
+ }
+
+ mutex_init(&sensor->lock);
+
+ sd = &sensor->sd;
+ v4l2_i2c_subdev_init(sd, client, &ov5647_subdev_ops);
+ sensor->sd.internal_ops = &ov5647_subdev_internal_ops;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &sensor->pad);
+ if (ret < 0)
+ goto mutex_remove;
+
+ ret = ov5647_detect(sd);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto error;
+
+ dev_dbg(dev, "OmniVision OV5647 camera driver probed\n");
+ return 0;
+error:
+ media_entity_cleanup(&sd->entity);
+mutex_remove:
+ mutex_destroy(&sensor->lock);
+ return ret;
+}
+
+static int ov5647_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5647 *ov5647 = to_state(sd);
+
+ v4l2_async_unregister_subdev(&ov5647->sd);
+ media_entity_cleanup(&ov5647->sd.entity);
+ v4l2_device_unregister_subdev(sd);
+ mutex_destroy(&ov5647->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov5647_id[] = {
+ { "ov5647", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov5647_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ov5647_of_match[] = {
+ { .compatible = "ovti,ov5647" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov5647_of_match);
+#endif
+
+static struct i2c_driver ov5647_driver = {
+ .driver = {
+ .of_match_table = of_match_ptr(ov5647_of_match),
+ .name = SENSOR_NAME,
+ },
+ .probe = ov5647_probe,
+ .remove = ov5647_remove,
+ .id_table = ov5647_id,
+};
+
+module_i2c_driver(ov5647_driver);
+
+MODULE_AUTHOR("Ramiro Oliveira <roliveir@synopsys.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision ov5647 sensors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 56cfb5ca9c95..7270c68ed18a 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -10,12 +10,15 @@
* This file may be distributed under the terms of the GNU General
* Public License, version 2.
*/
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/videodev2.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mediabus.h>
@@ -227,6 +230,9 @@ struct ov7670_info {
struct v4l2_ctrl *hue;
};
struct ov7670_format_struct *fmt; /* Current format */
+ struct clk *clk;
+ struct gpio_desc *resetb_gpio;
+ struct gpio_desc *pwdn_gpio;
int min_width; /* Filter out smaller sizes */
int min_height; /* Filter out smaller sizes */
int clock_speed; /* External clock speed (MHz) */
@@ -589,8 +595,6 @@ static int ov7670_init(struct v4l2_subdev *sd, u32 val)
return ov7670_write_array(sd, ov7670_default_regs);
}
-
-
static int ov7670_detect(struct v4l2_subdev *sd)
{
unsigned char v;
@@ -1046,7 +1050,6 @@ static int ov7670_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
info->devtype->get_framerate(sd, &cp->timeperframe);
@@ -1061,9 +1064,8 @@ static int ov7670_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (cp->extendedmode != 0)
- return -EINVAL;
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
return info->devtype->set_framerate(sd, tpf);
}
@@ -1549,6 +1551,27 @@ static const struct ov7670_devtype ov7670_devdata[] = {
},
};
+static int ov7670_init_gpio(struct i2c_client *client, struct ov7670_info *info)
+{
+ info->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(info->pwdn_gpio)) {
+ dev_info(&client->dev, "can't get %s GPIO\n", "powerdown");
+ return PTR_ERR(info->pwdn_gpio);
+ }
+
+ info->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(info->resetb_gpio)) {
+ dev_info(&client->dev, "can't get %s GPIO\n", "reset");
+ return PTR_ERR(info->resetb_gpio);
+ }
+
+ usleep_range(3000, 5000);
+
+ return 0;
+}
+
static int ov7670_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1589,13 +1612,28 @@ static int ov7670_probe(struct i2c_client *client,
info->pclk_hb_disable = true;
}
+ info->clk = devm_clk_get(&client->dev, "xclk");
+ if (IS_ERR(info->clk))
+ return -EPROBE_DEFER;
+ clk_prepare_enable(info->clk);
+
+ ret = ov7670_init_gpio(client, info);
+ if (ret)
+ goto clk_disable;
+
+ info->clock_speed = clk_get_rate(info->clk) / 1000000;
+ if (info->clock_speed < 10 || info->clock_speed > 48) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
/* Make sure it's an ov7670 */
ret = ov7670_detect(sd);
if (ret) {
v4l_dbg(1, debug, client,
"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
client->addr << 1, client->adapter->name);
- return ret;
+ goto clk_disable;
}
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
@@ -1636,10 +1674,9 @@ static int ov7670_probe(struct i2c_client *client,
V4L2_EXPOSURE_AUTO);
sd->ctrl_handler = &info->hdl;
if (info->hdl.error) {
- int err = info->hdl.error;
+ ret = info->hdl.error;
- v4l2_ctrl_handler_free(&info->hdl);
- return err;
+ goto hdl_free;
}
/*
* We have checked empirically that hw allows to read back the gain
@@ -1651,7 +1688,17 @@ static int ov7670_probe(struct i2c_client *client,
v4l2_ctrl_cluster(2, &info->saturation);
v4l2_ctrl_handler_setup(&info->hdl);
+ ret = v4l2_async_register_subdev(&info->sd);
+ if (ret < 0)
+ goto hdl_free;
+
return 0;
+
+hdl_free:
+ v4l2_ctrl_handler_free(&info->hdl);
+clk_disable:
+ clk_disable_unprepare(info->clk);
+ return ret;
}
@@ -1662,6 +1709,7 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
+ clk_disable_unprepare(info->clk);
return 0;
}
@@ -1672,9 +1720,18 @@ static const struct i2c_device_id ov7670_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov7670_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ov7670_of_match[] = {
+ { .compatible = "ovti,ov7670", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ov7670_of_match);
+#endif
+
static struct i2c_driver ov7670_driver = {
.driver = {
.name = "ov7670",
+ .of_match_table = of_match_ptr(ov7670_of_match),
},
.probe = ov7670_probe,
.remove = ov7670_remove,
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig
index 7704bcf5cc25..96859f37cb1c 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/media/i2c/soc_camera/Kconfig
@@ -41,12 +41,6 @@ config SOC_CAMERA_MT9V022
help
This driver supports MT9V022 cameras from Micron
-config SOC_CAMERA_OV2640
- tristate "ov2640 camera support"
- depends on SOC_CAMERA && I2C
- help
- This is a ov2640 camera driver
-
config SOC_CAMERA_OV5642
tristate "ov5642 camera support"
depends on SOC_CAMERA && I2C
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index 6f994f9353a0..974bdb721dbe 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
-obj-$(CONFIG_SOC_CAMERA_OV2640) += ov2640.o
obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o
obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
diff --git a/drivers/media/i2c/soc_camera/imx074.c b/drivers/media/i2c/soc_camera/imx074.c
index 05b55cfe8147..77f1e0243d6e 100644
--- a/drivers/media/i2c/soc_camera/imx074.c
+++ b/drivers/media/i2c/soc_camera/imx074.c
@@ -180,7 +180,7 @@ static int imx074_set_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- priv->fmt = imx074_find_datafmt(mf->code);
+ priv->fmt = fmt;
else
cfg->try_fmt = *mf;
@@ -271,12 +271,12 @@ static int imx074_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops imx074_subdev_video_ops = {
.s_stream = imx074_s_stream,
.g_mbus_config = imx074_g_mbus_config,
};
-static struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops imx074_subdev_core_ops = {
.s_power = imx074_s_power,
};
@@ -287,7 +287,7 @@ static const struct v4l2_subdev_pad_ops imx074_subdev_pad_ops = {
.set_fmt = imx074_set_fmt,
};
-static struct v4l2_subdev_ops imx074_subdev_ops = {
+static const struct v4l2_subdev_ops imx074_subdev_ops = {
.core = &imx074_subdev_core_ops,
.video = &imx074_subdev_video_ops,
.pad = &imx074_subdev_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/mt9m001.c b/drivers/media/i2c/soc_camera/mt9m001.c
index 3d6378d4491c..1bfb0d53059e 100644
--- a/drivers/media/i2c/soc_camera/mt9m001.c
+++ b/drivers/media/i2c/soc_camera/mt9m001.c
@@ -278,6 +278,7 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd,
}
static int mt9m001_s_fmt(struct v4l2_subdev *sd,
+ const struct mt9m001_datafmt *fmt,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -297,9 +298,8 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd,
if (!ret) {
mf->width = mt9m001->rect.width;
mf->height = mt9m001->rect.height;
- mt9m001->fmt = mt9m001_find_datafmt(mf->code,
- mt9m001->fmts, mt9m001->num_fmts);
- mf->colorspace = mt9m001->fmt->colorspace;
+ mt9m001->fmt = fmt;
+ mf->colorspace = fmt->colorspace;
}
return ret;
@@ -335,7 +335,7 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = fmt->colorspace;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return mt9m001_s_fmt(sd, mf);
+ return mt9m001_s_fmt(sd, fmt, mf);
cfg->try_fmt = *mf;
return 0;
}
@@ -574,7 +574,7 @@ static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = {
.s_ctrl = mt9m001_s_ctrl,
};
-static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m001_g_register,
.s_register = mt9m001_s_register,
@@ -630,7 +630,7 @@ static int mt9m001_s_mbus_config(struct v4l2_subdev *sd,
return bps == 10 ? 0 : -EINVAL;
}
-static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
.g_mbus_config = mt9m001_g_mbus_config,
.s_mbus_config = mt9m001_s_mbus_config,
@@ -648,7 +648,7 @@ static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = {
.set_fmt = mt9m001_set_fmt,
};
-static struct v4l2_subdev_ops mt9m001_subdev_ops = {
+static const struct v4l2_subdev_ops mt9m001_subdev_ops = {
.core = &mt9m001_subdev_core_ops,
.video = &mt9m001_subdev_video_ops,
.sensor = &mt9m001_subdev_sensor_ops,
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 3aa5569065ad..714fb3555b34 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -679,7 +679,7 @@ static const struct v4l2_ctrl_ops mt9t031_ctrl_ops = {
.s_ctrl = mt9t031_s_ctrl,
};
-static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
.s_power = mt9t031_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9t031_g_register,
@@ -726,7 +726,7 @@ static int mt9t031_s_mbus_config(struct v4l2_subdev *sd,
return reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
}
-static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.s_stream = mt9t031_s_stream,
.g_mbus_config = mt9t031_g_mbus_config,
.s_mbus_config = mt9t031_s_mbus_config,
@@ -744,7 +744,7 @@ static const struct v4l2_subdev_pad_ops mt9t031_subdev_pad_ops = {
.set_fmt = mt9t031_set_fmt,
};
-static struct v4l2_subdev_ops mt9t031_subdev_ops = {
+static const struct v4l2_subdev_ops mt9t031_subdev_ops = {
.core = &mt9t031_subdev_core_ops,
.video = &mt9t031_subdev_video_ops,
.sensor = &mt9t031_subdev_sensor_ops,
diff --git a/drivers/media/i2c/soc_camera/mt9t112.c b/drivers/media/i2c/soc_camera/mt9t112.c
index 2ef22241ec14..297d22ebcb18 100644
--- a/drivers/media/i2c/soc_camera/mt9t112.c
+++ b/drivers/media/i2c/soc_camera/mt9t112.c
@@ -773,7 +773,7 @@ static int mt9t112_s_power(struct v4l2_subdev *sd, int on)
return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
}
-static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9t112_g_register,
.s_register = mt9t112_s_register,
@@ -1031,7 +1031,7 @@ static int mt9t112_s_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
.s_stream = mt9t112_s_stream,
.g_mbus_config = mt9t112_g_mbus_config,
.s_mbus_config = mt9t112_s_mbus_config,
@@ -1048,7 +1048,7 @@ static const struct v4l2_subdev_pad_ops mt9t112_subdev_pad_ops = {
/************************************************************************
i2c driver
************************************************************************/
-static struct v4l2_subdev_ops mt9t112_subdev_ops = {
+static const struct v4l2_subdev_ops mt9t112_subdev_ops = {
.core = &mt9t112_subdev_core_ops,
.video = &mt9t112_subdev_video_ops,
.pad = &mt9t112_subdev_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/mt9v022.c b/drivers/media/i2c/soc_camera/mt9v022.c
index 6a14ab5e4f2d..762f06919329 100644
--- a/drivers/media/i2c/soc_camera/mt9v022.c
+++ b/drivers/media/i2c/soc_camera/mt9v022.c
@@ -403,6 +403,7 @@ static int mt9v022_get_fmt(struct v4l2_subdev *sd,
}
static int mt9v022_s_fmt(struct v4l2_subdev *sd,
+ const struct mt9v022_datafmt *fmt,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -441,9 +442,8 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd,
if (!ret) {
mf->width = mt9v022->rect.width;
mf->height = mt9v022->rect.height;
- mt9v022->fmt = mt9v022_find_datafmt(mf->code,
- mt9v022->fmts, mt9v022->num_fmts);
- mf->colorspace = mt9v022->fmt->colorspace;
+ mt9v022->fmt = fmt;
+ mf->colorspace = fmt->colorspace;
}
return ret;
@@ -478,7 +478,7 @@ static int mt9v022_set_fmt(struct v4l2_subdev *sd,
mf->colorspace = fmt->colorspace;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return mt9v022_s_fmt(sd, mf);
+ return mt9v022_s_fmt(sd, fmt, mf);
cfg->try_fmt = *mf;
return 0;
}
@@ -770,7 +770,7 @@ static const struct v4l2_ctrl_ops mt9v022_ctrl_ops = {
.s_ctrl = mt9v022_s_ctrl,
};
-static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v022_g_register,
.s_register = mt9v022_s_register,
@@ -858,7 +858,7 @@ static int mt9v022_s_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.s_stream = mt9v022_s_stream,
.g_mbus_config = mt9v022_g_mbus_config,
.s_mbus_config = mt9v022_s_mbus_config,
@@ -876,7 +876,7 @@ static const struct v4l2_subdev_pad_ops mt9v022_subdev_pad_ops = {
.set_fmt = mt9v022_set_fmt,
};
-static struct v4l2_subdev_ops mt9v022_subdev_ops = {
+static const struct v4l2_subdev_ops mt9v022_subdev_ops = {
.core = &mt9v022_subdev_core_ops,
.video = &mt9v022_subdev_video_ops,
.sensor = &mt9v022_subdev_sensor_ops,
diff --git a/drivers/media/i2c/soc_camera/ov2640.c b/drivers/media/i2c/soc_camera/ov2640.c
deleted file mode 100644
index 56de18263359..000000000000
--- a/drivers/media/i2c/soc_camera/ov2640.c
+++ /dev/null
@@ -1,1204 +0,0 @@
-/*
- * ov2640 Camera Driver
- *
- * Copyright (C) 2010 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- *
- * Based on ov772x, ov9640 drivers and previous non merged implementations.
- *
- * Copyright 2005-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2006, OmniVision
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/videodev2.h>
-
-#include <media/soc_camera.h>
-#include <media/v4l2-clk.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-image-sizes.h>
-
-#define VAL_SET(x, mask, rshift, lshift) \
- ((((x) >> rshift) & mask) << lshift)
-/*
- * DSP registers
- * register offset for BANK_SEL == BANK_SEL_DSP
- */
-#define R_BYPASS 0x05 /* Bypass DSP */
-#define R_BYPASS_DSP_BYPAS 0x01 /* Bypass DSP, sensor out directly */
-#define R_BYPASS_USE_DSP 0x00 /* Use the internal DSP */
-#define QS 0x44 /* Quantization Scale Factor */
-#define CTRLI 0x50
-#define CTRLI_LP_DP 0x80
-#define CTRLI_ROUND 0x40
-#define CTRLI_V_DIV_SET(x) VAL_SET(x, 0x3, 0, 3)
-#define CTRLI_H_DIV_SET(x) VAL_SET(x, 0x3, 0, 0)
-#define HSIZE 0x51 /* H_SIZE[7:0] (real/4) */
-#define HSIZE_SET(x) VAL_SET(x, 0xFF, 2, 0)
-#define VSIZE 0x52 /* V_SIZE[7:0] (real/4) */
-#define VSIZE_SET(x) VAL_SET(x, 0xFF, 2, 0)
-#define XOFFL 0x53 /* OFFSET_X[7:0] */
-#define XOFFL_SET(x) VAL_SET(x, 0xFF, 0, 0)
-#define YOFFL 0x54 /* OFFSET_Y[7:0] */
-#define YOFFL_SET(x) VAL_SET(x, 0xFF, 0, 0)
-#define VHYX 0x55 /* Offset and size completion */
-#define VHYX_VSIZE_SET(x) VAL_SET(x, 0x1, (8+2), 7)
-#define VHYX_HSIZE_SET(x) VAL_SET(x, 0x1, (8+2), 3)
-#define VHYX_YOFF_SET(x) VAL_SET(x, 0x3, 8, 4)
-#define VHYX_XOFF_SET(x) VAL_SET(x, 0x3, 8, 0)
-#define DPRP 0x56
-#define TEST 0x57 /* Horizontal size completion */
-#define TEST_HSIZE_SET(x) VAL_SET(x, 0x1, (9+2), 7)
-#define ZMOW 0x5A /* Zoom: Out Width OUTW[7:0] (real/4) */
-#define ZMOW_OUTW_SET(x) VAL_SET(x, 0xFF, 2, 0)
-#define ZMOH 0x5B /* Zoom: Out Height OUTH[7:0] (real/4) */
-#define ZMOH_OUTH_SET(x) VAL_SET(x, 0xFF, 2, 0)
-#define ZMHH 0x5C /* Zoom: Speed and H&W completion */
-#define ZMHH_ZSPEED_SET(x) VAL_SET(x, 0x0F, 0, 4)
-#define ZMHH_OUTH_SET(x) VAL_SET(x, 0x1, (8+2), 2)
-#define ZMHH_OUTW_SET(x) VAL_SET(x, 0x3, (8+2), 0)
-#define BPADDR 0x7C /* SDE Indirect Register Access: Address */
-#define BPDATA 0x7D /* SDE Indirect Register Access: Data */
-#define CTRL2 0x86 /* DSP Module enable 2 */
-#define CTRL2_DCW_EN 0x20
-#define CTRL2_SDE_EN 0x10
-#define CTRL2_UV_ADJ_EN 0x08
-#define CTRL2_UV_AVG_EN 0x04
-#define CTRL2_CMX_EN 0x01
-#define CTRL3 0x87 /* DSP Module enable 3 */
-#define CTRL3_BPC_EN 0x80
-#define CTRL3_WPC_EN 0x40
-#define SIZEL 0x8C /* Image Size Completion */
-#define SIZEL_HSIZE8_11_SET(x) VAL_SET(x, 0x1, 11, 6)
-#define SIZEL_HSIZE8_SET(x) VAL_SET(x, 0x7, 0, 3)
-#define SIZEL_VSIZE8_SET(x) VAL_SET(x, 0x7, 0, 0)
-#define HSIZE8 0xC0 /* Image Horizontal Size HSIZE[10:3] */
-#define HSIZE8_SET(x) VAL_SET(x, 0xFF, 3, 0)
-#define VSIZE8 0xC1 /* Image Vertical Size VSIZE[10:3] */
-#define VSIZE8_SET(x) VAL_SET(x, 0xFF, 3, 0)
-#define CTRL0 0xC2 /* DSP Module enable 0 */
-#define CTRL0_AEC_EN 0x80
-#define CTRL0_AEC_SEL 0x40
-#define CTRL0_STAT_SEL 0x20
-#define CTRL0_VFIRST 0x10
-#define CTRL0_YUV422 0x08
-#define CTRL0_YUV_EN 0x04
-#define CTRL0_RGB_EN 0x02
-#define CTRL0_RAW_EN 0x01
-#define CTRL1 0xC3 /* DSP Module enable 1 */
-#define CTRL1_CIP 0x80
-#define CTRL1_DMY 0x40
-#define CTRL1_RAW_GMA 0x20
-#define CTRL1_DG 0x10
-#define CTRL1_AWB 0x08
-#define CTRL1_AWB_GAIN 0x04
-#define CTRL1_LENC 0x02
-#define CTRL1_PRE 0x01
-#define R_DVP_SP 0xD3 /* DVP output speed control */
-#define R_DVP_SP_AUTO_MODE 0x80
-#define R_DVP_SP_DVP_MASK 0x3F /* DVP PCLK = sysclk (48)/[6:0] (YUV0);
- * = sysclk (48)/(2*[6:0]) (RAW);*/
-#define IMAGE_MODE 0xDA /* Image Output Format Select */
-#define IMAGE_MODE_Y8_DVP_EN 0x40
-#define IMAGE_MODE_JPEG_EN 0x10
-#define IMAGE_MODE_YUV422 0x00
-#define IMAGE_MODE_RAW10 0x04 /* (DVP) */
-#define IMAGE_MODE_RGB565 0x08
-#define IMAGE_MODE_HREF_VSYNC 0x02 /* HREF timing select in DVP JPEG output
- * mode (0 for HREF is same as sensor) */
-#define IMAGE_MODE_LBYTE_FIRST 0x01 /* Byte swap enable for DVP
- * 1: Low byte first UYVY (C2[4] =0)
- * VYUY (C2[4] =1)
- * 0: High byte first YUYV (C2[4]=0)
- * YVYU (C2[4] = 1) */
-#define RESET 0xE0 /* Reset */
-#define RESET_MICROC 0x40
-#define RESET_SCCB 0x20
-#define RESET_JPEG 0x10
-#define RESET_DVP 0x04
-#define RESET_IPU 0x02
-#define RESET_CIF 0x01
-#define REGED 0xED /* Register ED */
-#define REGED_CLK_OUT_DIS 0x10
-#define MS_SP 0xF0 /* SCCB Master Speed */
-#define SS_ID 0xF7 /* SCCB Slave ID */
-#define SS_CTRL 0xF8 /* SCCB Slave Control */
-#define SS_CTRL_ADD_AUTO_INC 0x20
-#define SS_CTRL_EN 0x08
-#define SS_CTRL_DELAY_CLK 0x04
-#define SS_CTRL_ACC_EN 0x02
-#define SS_CTRL_SEN_PASS_THR 0x01
-#define MC_BIST 0xF9 /* Microcontroller misc register */
-#define MC_BIST_RESET 0x80 /* Microcontroller Reset */
-#define MC_BIST_BOOT_ROM_SEL 0x40
-#define MC_BIST_12KB_SEL 0x20
-#define MC_BIST_12KB_MASK 0x30
-#define MC_BIST_512KB_SEL 0x08
-#define MC_BIST_512KB_MASK 0x0C
-#define MC_BIST_BUSY_BIT_R 0x02
-#define MC_BIST_MC_RES_ONE_SH_W 0x02
-#define MC_BIST_LAUNCH 0x01
-#define BANK_SEL 0xFF /* Register Bank Select */
-#define BANK_SEL_DSP 0x00
-#define BANK_SEL_SENS 0x01
-
-/*
- * Sensor registers
- * register offset for BANK_SEL == BANK_SEL_SENS
- */
-#define GAIN 0x00 /* AGC - Gain control gain setting */
-#define COM1 0x03 /* Common control 1 */
-#define COM1_1_DUMMY_FR 0x40
-#define COM1_3_DUMMY_FR 0x80
-#define COM1_7_DUMMY_FR 0xC0
-#define COM1_VWIN_LSB_UXGA 0x0F
-#define COM1_VWIN_LSB_SVGA 0x0A
-#define COM1_VWIN_LSB_CIF 0x06
-#define REG04 0x04 /* Register 04 */
-#define REG04_DEF 0x20 /* Always set */
-#define REG04_HFLIP_IMG 0x80 /* Horizontal mirror image ON/OFF */
-#define REG04_VFLIP_IMG 0x40 /* Vertical flip image ON/OFF */
-#define REG04_VREF_EN 0x10
-#define REG04_HREF_EN 0x08
-#define REG04_AEC_SET(x) VAL_SET(x, 0x3, 0, 0)
-#define REG08 0x08 /* Frame Exposure One-pin Control Pre-charge Row Num */
-#define COM2 0x09 /* Common control 2 */
-#define COM2_SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */
- /* Output drive capability */
-#define COM2_OCAP_Nx_SET(N) (((N) - 1) & 0x03) /* N = [1x .. 4x] */
-#define PID 0x0A /* Product ID Number MSB */
-#define VER 0x0B /* Product ID Number LSB */
-#define COM3 0x0C /* Common control 3 */
-#define COM3_BAND_50H 0x04 /* 0 For Banding at 60H */
-#define COM3_BAND_AUTO 0x02 /* Auto Banding */
-#define COM3_SING_FR_SNAPSH 0x01 /* 0 For enable live video output after the
- * snapshot sequence*/
-#define AEC 0x10 /* AEC[9:2] Exposure Value */
-#define CLKRC 0x11 /* Internal clock */
-#define CLKRC_EN 0x80
-#define CLKRC_DIV_SET(x) (((x) - 1) & 0x1F) /* CLK = XVCLK/(x) */
-#define COM7 0x12 /* Common control 7 */
-#define COM7_SRST 0x80 /* Initiates system reset. All registers are
- * set to factory default values after which
- * the chip resumes normal operation */
-#define COM7_RES_UXGA 0x00 /* Resolution selectors for UXGA */
-#define COM7_RES_SVGA 0x40 /* SVGA */
-#define COM7_RES_CIF 0x20 /* CIF */
-#define COM7_ZOOM_EN 0x04 /* Enable Zoom mode */
-#define COM7_COLOR_BAR_TEST 0x02 /* Enable Color Bar Test Pattern */
-#define COM8 0x13 /* Common control 8 */
-#define COM8_DEF 0xC0 /* Banding filter ON/OFF */
-#define COM8_BNDF_EN 0x20 /* Banding filter ON/OFF */
-#define COM8_AGC_EN 0x04 /* AGC Auto/Manual control selection */
-#define COM8_AEC_EN 0x01 /* Auto/Manual Exposure control */
-#define COM9 0x14 /* Common control 9
- * Automatic gain ceiling - maximum AGC value [7:5]*/
-#define COM9_AGC_GAIN_2x 0x00 /* 000 : 2x */
-#define COM9_AGC_GAIN_4x 0x20 /* 001 : 4x */
-#define COM9_AGC_GAIN_8x 0x40 /* 010 : 8x */
-#define COM9_AGC_GAIN_16x 0x60 /* 011 : 16x */
-#define COM9_AGC_GAIN_32x 0x80 /* 100 : 32x */
-#define COM9_AGC_GAIN_64x 0xA0 /* 101 : 64x */
-#define COM9_AGC_GAIN_128x 0xC0 /* 110 : 128x */
-#define COM10 0x15 /* Common control 10 */
-#define COM10_PCLK_HREF 0x20 /* PCLK output qualified by HREF */
-#define COM10_PCLK_RISE 0x10 /* Data is updated at the rising edge of
- * PCLK (user can latch data at the next
- * falling edge of PCLK).
- * 0 otherwise. */
-#define COM10_HREF_INV 0x08 /* Invert HREF polarity:
- * HREF negative for valid data*/
-#define COM10_VSINC_INV 0x02 /* Invert VSYNC polarity */
-#define HSTART 0x17 /* Horizontal Window start MSB 8 bit */
-#define HEND 0x18 /* Horizontal Window end MSB 8 bit */
-#define VSTART 0x19 /* Vertical Window start MSB 8 bit */
-#define VEND 0x1A /* Vertical Window end MSB 8 bit */
-#define MIDH 0x1C /* Manufacturer ID byte - high */
-#define MIDL 0x1D /* Manufacturer ID byte - low */
-#define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */
-#define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */
-#define VV 0x26 /* AGC/AEC Fast mode operating region */
-#define VV_HIGH_TH_SET(x) VAL_SET(x, 0xF, 0, 4)
-#define VV_LOW_TH_SET(x) VAL_SET(x, 0xF, 0, 0)
-#define REG2A 0x2A /* Dummy pixel insert MSB */
-#define FRARL 0x2B /* Dummy pixel insert LSB */
-#define ADDVFL 0x2D /* LSB of insert dummy lines in Vertical direction */
-#define ADDVFH 0x2E /* MSB of insert dummy lines in Vertical direction */
-#define YAVG 0x2F /* Y/G Channel Average value */
-#define REG32 0x32 /* Common Control 32 */
-#define REG32_PCLK_DIV_2 0x80 /* PCLK freq divided by 2 */
-#define REG32_PCLK_DIV_4 0xC0 /* PCLK freq divided by 4 */
-#define ARCOM2 0x34 /* Zoom: Horizontal start point */
-#define REG45 0x45 /* Register 45 */
-#define FLL 0x46 /* Frame Length Adjustment LSBs */
-#define FLH 0x47 /* Frame Length Adjustment MSBs */
-#define COM19 0x48 /* Zoom: Vertical start point */
-#define ZOOMS 0x49 /* Zoom: Vertical start point */
-#define COM22 0x4B /* Flash light control */
-#define COM25 0x4E /* For Banding operations */
-#define BD50 0x4F /* 50Hz Banding AEC 8 LSBs */
-#define BD60 0x50 /* 60Hz Banding AEC 8 LSBs */
-#define REG5D 0x5D /* AVGsel[7:0], 16-zone average weight option */
-#define REG5E 0x5E /* AVGsel[15:8], 16-zone average weight option */
-#define REG5F 0x5F /* AVGsel[23:16], 16-zone average weight option */
-#define REG60 0x60 /* AVGsel[31:24], 16-zone average weight option */
-#define HISTO_LOW 0x61 /* Histogram Algorithm Low Level */
-#define HISTO_HIGH 0x62 /* Histogram Algorithm High Level */
-
-/*
- * ID
- */
-#define MANUFACTURER_ID 0x7FA2
-#define PID_OV2640 0x2642
-#define VERSION(pid, ver) ((pid << 8) | (ver & 0xFF))
-
-/*
- * Struct
- */
-struct regval_list {
- u8 reg_num;
- u8 value;
-};
-
-struct ov2640_win_size {
- char *name;
- u32 width;
- u32 height;
- const struct regval_list *regs;
-};
-
-
-struct ov2640_priv {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- u32 cfmt_code;
- struct v4l2_clk *clk;
- const struct ov2640_win_size *win;
-
- struct soc_camera_subdev_desc ssdd_dt;
- struct gpio_desc *resetb_gpio;
- struct gpio_desc *pwdn_gpio;
-};
-
-/*
- * Registers settings
- */
-
-#define ENDMARKER { 0xff, 0xff }
-
-static const struct regval_list ov2640_init_regs[] = {
- { BANK_SEL, BANK_SEL_DSP },
- { 0x2c, 0xff },
- { 0x2e, 0xdf },
- { BANK_SEL, BANK_SEL_SENS },
- { 0x3c, 0x32 },
- { CLKRC, CLKRC_DIV_SET(1) },
- { COM2, COM2_OCAP_Nx_SET(3) },
- { REG04, REG04_DEF | REG04_HREF_EN },
- { COM8, COM8_DEF | COM8_BNDF_EN | COM8_AGC_EN | COM8_AEC_EN },
- { COM9, COM9_AGC_GAIN_8x | 0x08},
- { 0x2c, 0x0c },
- { 0x33, 0x78 },
- { 0x3a, 0x33 },
- { 0x3b, 0xfb },
- { 0x3e, 0x00 },
- { 0x43, 0x11 },
- { 0x16, 0x10 },
- { 0x39, 0x02 },
- { 0x35, 0x88 },
- { 0x22, 0x0a },
- { 0x37, 0x40 },
- { 0x23, 0x00 },
- { ARCOM2, 0xa0 },
- { 0x06, 0x02 },
- { 0x06, 0x88 },
- { 0x07, 0xc0 },
- { 0x0d, 0xb7 },
- { 0x0e, 0x01 },
- { 0x4c, 0x00 },
- { 0x4a, 0x81 },
- { 0x21, 0x99 },
- { AEW, 0x40 },
- { AEB, 0x38 },
- { VV, VV_HIGH_TH_SET(0x08) | VV_LOW_TH_SET(0x02) },
- { 0x5c, 0x00 },
- { 0x63, 0x00 },
- { FLL, 0x22 },
- { COM3, 0x38 | COM3_BAND_AUTO },
- { REG5D, 0x55 },
- { REG5E, 0x7d },
- { REG5F, 0x7d },
- { REG60, 0x55 },
- { HISTO_LOW, 0x70 },
- { HISTO_HIGH, 0x80 },
- { 0x7c, 0x05 },
- { 0x20, 0x80 },
- { 0x28, 0x30 },
- { 0x6c, 0x00 },
- { 0x6d, 0x80 },
- { 0x6e, 0x00 },
- { 0x70, 0x02 },
- { 0x71, 0x94 },
- { 0x73, 0xc1 },
- { 0x3d, 0x34 },
- { COM7, COM7_RES_UXGA | COM7_ZOOM_EN },
- { 0x5a, 0x57 },
- { BD50, 0xbb },
- { BD60, 0x9c },
- { BANK_SEL, BANK_SEL_DSP },
- { 0xe5, 0x7f },
- { MC_BIST, MC_BIST_RESET | MC_BIST_BOOT_ROM_SEL },
- { 0x41, 0x24 },
- { RESET, RESET_JPEG | RESET_DVP },
- { 0x76, 0xff },
- { 0x33, 0xa0 },
- { 0x42, 0x20 },
- { 0x43, 0x18 },
- { 0x4c, 0x00 },
- { CTRL3, CTRL3_BPC_EN | CTRL3_WPC_EN | 0x10 },
- { 0x88, 0x3f },
- { 0xd7, 0x03 },
- { 0xd9, 0x10 },
- { R_DVP_SP , R_DVP_SP_AUTO_MODE | 0x2 },
- { 0xc8, 0x08 },
- { 0xc9, 0x80 },
- { BPADDR, 0x00 },
- { BPDATA, 0x00 },
- { BPADDR, 0x03 },
- { BPDATA, 0x48 },
- { BPDATA, 0x48 },
- { BPADDR, 0x08 },
- { BPDATA, 0x20 },
- { BPDATA, 0x10 },
- { BPDATA, 0x0e },
- { 0x90, 0x00 },
- { 0x91, 0x0e },
- { 0x91, 0x1a },
- { 0x91, 0x31 },
- { 0x91, 0x5a },
- { 0x91, 0x69 },
- { 0x91, 0x75 },
- { 0x91, 0x7e },
- { 0x91, 0x88 },
- { 0x91, 0x8f },
- { 0x91, 0x96 },
- { 0x91, 0xa3 },
- { 0x91, 0xaf },
- { 0x91, 0xc4 },
- { 0x91, 0xd7 },
- { 0x91, 0xe8 },
- { 0x91, 0x20 },
- { 0x92, 0x00 },
- { 0x93, 0x06 },
- { 0x93, 0xe3 },
- { 0x93, 0x03 },
- { 0x93, 0x03 },
- { 0x93, 0x00 },
- { 0x93, 0x02 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x93, 0x00 },
- { 0x96, 0x00 },
- { 0x97, 0x08 },
- { 0x97, 0x19 },
- { 0x97, 0x02 },
- { 0x97, 0x0c },
- { 0x97, 0x24 },
- { 0x97, 0x30 },
- { 0x97, 0x28 },
- { 0x97, 0x26 },
- { 0x97, 0x02 },
- { 0x97, 0x98 },
- { 0x97, 0x80 },
- { 0x97, 0x00 },
- { 0x97, 0x00 },
- { 0xa4, 0x00 },
- { 0xa8, 0x00 },
- { 0xc5, 0x11 },
- { 0xc6, 0x51 },
- { 0xbf, 0x80 },
- { 0xc7, 0x10 },
- { 0xb6, 0x66 },
- { 0xb8, 0xA5 },
- { 0xb7, 0x64 },
- { 0xb9, 0x7C },
- { 0xb3, 0xaf },
- { 0xb4, 0x97 },
- { 0xb5, 0xFF },
- { 0xb0, 0xC5 },
- { 0xb1, 0x94 },
- { 0xb2, 0x0f },
- { 0xc4, 0x5c },
- { 0xa6, 0x00 },
- { 0xa7, 0x20 },
- { 0xa7, 0xd8 },
- { 0xa7, 0x1b },
- { 0xa7, 0x31 },
- { 0xa7, 0x00 },
- { 0xa7, 0x18 },
- { 0xa7, 0x20 },
- { 0xa7, 0xd8 },
- { 0xa7, 0x19 },
- { 0xa7, 0x31 },
- { 0xa7, 0x00 },
- { 0xa7, 0x18 },
- { 0xa7, 0x20 },
- { 0xa7, 0xd8 },
- { 0xa7, 0x19 },
- { 0xa7, 0x31 },
- { 0xa7, 0x00 },
- { 0xa7, 0x18 },
- { 0x7f, 0x00 },
- { 0xe5, 0x1f },
- { 0xe1, 0x77 },
- { 0xdd, 0x7f },
- { CTRL0, CTRL0_YUV422 | CTRL0_YUV_EN | CTRL0_RGB_EN },
- ENDMARKER,
-};
-
-/*
- * Register settings for window size
- * The preamble, setup the internal DSP to input an UXGA (1600x1200) image.
- * Then the different zooming configurations will setup the output image size.
- */
-static const struct regval_list ov2640_size_change_preamble_regs[] = {
- { BANK_SEL, BANK_SEL_DSP },
- { RESET, RESET_DVP },
- { HSIZE8, HSIZE8_SET(UXGA_WIDTH) },
- { VSIZE8, VSIZE8_SET(UXGA_HEIGHT) },
- { CTRL2, CTRL2_DCW_EN | CTRL2_SDE_EN |
- CTRL2_UV_AVG_EN | CTRL2_CMX_EN | CTRL2_UV_ADJ_EN },
- { HSIZE, HSIZE_SET(UXGA_WIDTH) },
- { VSIZE, VSIZE_SET(UXGA_HEIGHT) },
- { XOFFL, XOFFL_SET(0) },
- { YOFFL, YOFFL_SET(0) },
- { VHYX, VHYX_HSIZE_SET(UXGA_WIDTH) | VHYX_VSIZE_SET(UXGA_HEIGHT) |
- VHYX_XOFF_SET(0) | VHYX_YOFF_SET(0)},
- { TEST, TEST_HSIZE_SET(UXGA_WIDTH) },
- ENDMARKER,
-};
-
-#define PER_SIZE_REG_SEQ(x, y, v_div, h_div, pclk_div) \
- { CTRLI, CTRLI_LP_DP | CTRLI_V_DIV_SET(v_div) | \
- CTRLI_H_DIV_SET(h_div)}, \
- { ZMOW, ZMOW_OUTW_SET(x) }, \
- { ZMOH, ZMOH_OUTH_SET(y) }, \
- { ZMHH, ZMHH_OUTW_SET(x) | ZMHH_OUTH_SET(y) }, \
- { R_DVP_SP, pclk_div }, \
- { RESET, 0x00}
-
-static const struct regval_list ov2640_qcif_regs[] = {
- PER_SIZE_REG_SEQ(QCIF_WIDTH, QCIF_HEIGHT, 3, 3, 4),
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_qvga_regs[] = {
- PER_SIZE_REG_SEQ(QVGA_WIDTH, QVGA_HEIGHT, 2, 2, 4),
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_cif_regs[] = {
- PER_SIZE_REG_SEQ(CIF_WIDTH, CIF_HEIGHT, 2, 2, 8),
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_vga_regs[] = {
- PER_SIZE_REG_SEQ(VGA_WIDTH, VGA_HEIGHT, 0, 0, 2),
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_svga_regs[] = {
- PER_SIZE_REG_SEQ(SVGA_WIDTH, SVGA_HEIGHT, 1, 1, 2),
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_xga_regs[] = {
- PER_SIZE_REG_SEQ(XGA_WIDTH, XGA_HEIGHT, 0, 0, 2),
- { CTRLI, 0x00},
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_sxga_regs[] = {
- PER_SIZE_REG_SEQ(SXGA_WIDTH, SXGA_HEIGHT, 0, 0, 2),
- { CTRLI, 0x00},
- { R_DVP_SP, 2 | R_DVP_SP_AUTO_MODE },
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_uxga_regs[] = {
- PER_SIZE_REG_SEQ(UXGA_WIDTH, UXGA_HEIGHT, 0, 0, 0),
- { CTRLI, 0x00},
- { R_DVP_SP, 0 | R_DVP_SP_AUTO_MODE },
- ENDMARKER,
-};
-
-#define OV2640_SIZE(n, w, h, r) \
- {.name = n, .width = w , .height = h, .regs = r }
-
-static const struct ov2640_win_size ov2640_supported_win_sizes[] = {
- OV2640_SIZE("QCIF", QCIF_WIDTH, QCIF_HEIGHT, ov2640_qcif_regs),
- OV2640_SIZE("QVGA", QVGA_WIDTH, QVGA_HEIGHT, ov2640_qvga_regs),
- OV2640_SIZE("CIF", CIF_WIDTH, CIF_HEIGHT, ov2640_cif_regs),
- OV2640_SIZE("VGA", VGA_WIDTH, VGA_HEIGHT, ov2640_vga_regs),
- OV2640_SIZE("SVGA", SVGA_WIDTH, SVGA_HEIGHT, ov2640_svga_regs),
- OV2640_SIZE("XGA", XGA_WIDTH, XGA_HEIGHT, ov2640_xga_regs),
- OV2640_SIZE("SXGA", SXGA_WIDTH, SXGA_HEIGHT, ov2640_sxga_regs),
- OV2640_SIZE("UXGA", UXGA_WIDTH, UXGA_HEIGHT, ov2640_uxga_regs),
-};
-
-/*
- * Register settings for pixel formats
- */
-static const struct regval_list ov2640_format_change_preamble_regs[] = {
- { BANK_SEL, BANK_SEL_DSP },
- { R_BYPASS, R_BYPASS_USE_DSP },
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_yuyv_regs[] = {
- { IMAGE_MODE, IMAGE_MODE_YUV422 },
- { 0xd7, 0x03 },
- { 0x33, 0xa0 },
- { 0xe5, 0x1f },
- { 0xe1, 0x67 },
- { RESET, 0x00 },
- { R_BYPASS, R_BYPASS_USE_DSP },
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_uyvy_regs[] = {
- { IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_YUV422 },
- { 0xd7, 0x01 },
- { 0x33, 0xa0 },
- { 0xe1, 0x67 },
- { RESET, 0x00 },
- { R_BYPASS, R_BYPASS_USE_DSP },
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_rgb565_be_regs[] = {
- { IMAGE_MODE, IMAGE_MODE_RGB565 },
- { 0xd7, 0x03 },
- { RESET, 0x00 },
- { R_BYPASS, R_BYPASS_USE_DSP },
- ENDMARKER,
-};
-
-static const struct regval_list ov2640_rgb565_le_regs[] = {
- { IMAGE_MODE, IMAGE_MODE_LBYTE_FIRST | IMAGE_MODE_RGB565 },
- { 0xd7, 0x03 },
- { RESET, 0x00 },
- { R_BYPASS, R_BYPASS_USE_DSP },
- ENDMARKER,
-};
-
-static u32 ov2640_codes[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_RGB565_2X8_BE,
- MEDIA_BUS_FMT_RGB565_2X8_LE,
-};
-
-/*
- * General functions
- */
-static struct ov2640_priv *to_ov2640(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct ov2640_priv,
- subdev);
-}
-
-static int ov2640_write_array(struct i2c_client *client,
- const struct regval_list *vals)
-{
- int ret;
-
- while ((vals->reg_num != 0xff) || (vals->value != 0xff)) {
- ret = i2c_smbus_write_byte_data(client,
- vals->reg_num, vals->value);
- dev_vdbg(&client->dev, "array: 0x%02x, 0x%02x",
- vals->reg_num, vals->value);
-
- if (ret < 0)
- return ret;
- vals++;
- }
- return 0;
-}
-
-static int ov2640_mask_set(struct i2c_client *client,
- u8 reg, u8 mask, u8 set)
-{
- s32 val = i2c_smbus_read_byte_data(client, reg);
- if (val < 0)
- return val;
-
- val &= ~mask;
- val |= set & mask;
-
- dev_vdbg(&client->dev, "masks: 0x%02x, 0x%02x", reg, val);
-
- return i2c_smbus_write_byte_data(client, reg, val);
-}
-
-static int ov2640_reset(struct i2c_client *client)
-{
- int ret;
- const struct regval_list reset_seq[] = {
- {BANK_SEL, BANK_SEL_SENS},
- {COM7, COM7_SRST},
- ENDMARKER,
- };
-
- ret = ov2640_write_array(client, reset_seq);
- if (ret)
- goto err;
-
- msleep(5);
-err:
- dev_dbg(&client->dev, "%s: (ret %d)", __func__, ret);
- return ret;
-}
-
-/*
- * soc_camera_ops functions
- */
-static int ov2640_s_stream(struct v4l2_subdev *sd, int enable)
-{
- return 0;
-}
-
-static int ov2640_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd =
- &container_of(ctrl->handler, struct ov2640_priv, hdl)->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 val;
- int ret;
-
- ret = i2c_smbus_write_byte_data(client, BANK_SEL, BANK_SEL_SENS);
- if (ret < 0)
- return ret;
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- val = ctrl->val ? REG04_VFLIP_IMG : 0x00;
- return ov2640_mask_set(client, REG04, REG04_VFLIP_IMG, val);
- case V4L2_CID_HFLIP:
- val = ctrl->val ? REG04_HFLIP_IMG : 0x00;
- return ov2640_mask_set(client, REG04, REG04_HFLIP_IMG, val);
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov2640_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- reg->size = 1;
- if (reg->reg > 0xff)
- return -EINVAL;
-
- ret = i2c_smbus_read_byte_data(client, reg->reg);
- if (ret < 0)
- return ret;
-
- reg->val = ret;
-
- return 0;
-}
-
-static int ov2640_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg > 0xff ||
- reg->val > 0xff)
- return -EINVAL;
-
- return i2c_smbus_write_byte_data(client, reg->reg, reg->val);
-}
-#endif
-
-static int ov2640_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct ov2640_priv *priv = to_ov2640(client);
-
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
-}
-
-/* Select the nearest higher resolution for capture */
-static const struct ov2640_win_size *ov2640_select_win(u32 *width, u32 *height)
-{
- int i, default_size = ARRAY_SIZE(ov2640_supported_win_sizes) - 1;
-
- for (i = 0; i < ARRAY_SIZE(ov2640_supported_win_sizes); i++) {
- if (ov2640_supported_win_sizes[i].width >= *width &&
- ov2640_supported_win_sizes[i].height >= *height) {
- *width = ov2640_supported_win_sizes[i].width;
- *height = ov2640_supported_win_sizes[i].height;
- return &ov2640_supported_win_sizes[i];
- }
- }
-
- *width = ov2640_supported_win_sizes[default_size].width;
- *height = ov2640_supported_win_sizes[default_size].height;
- return &ov2640_supported_win_sizes[default_size];
-}
-
-static int ov2640_set_params(struct i2c_client *client, u32 *width, u32 *height,
- u32 code)
-{
- struct ov2640_priv *priv = to_ov2640(client);
- const struct regval_list *selected_cfmt_regs;
- int ret;
-
- /* select win */
- priv->win = ov2640_select_win(width, height);
-
- /* select format */
- priv->cfmt_code = 0;
- switch (code) {
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__);
- selected_cfmt_regs = ov2640_rgb565_be_regs;
- break;
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- dev_dbg(&client->dev, "%s: Selected cfmt RGB565 LE", __func__);
- selected_cfmt_regs = ov2640_rgb565_le_regs;
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- dev_dbg(&client->dev, "%s: Selected cfmt YUYV (YUV422)", __func__);
- selected_cfmt_regs = ov2640_yuyv_regs;
- break;
- default:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "%s: Selected cfmt UYVY", __func__);
- selected_cfmt_regs = ov2640_uyvy_regs;
- }
-
- /* reset hardware */
- ov2640_reset(client);
-
- /* initialize the sensor with default data */
- dev_dbg(&client->dev, "%s: Init default", __func__);
- ret = ov2640_write_array(client, ov2640_init_regs);
- if (ret < 0)
- goto err;
-
- /* select preamble */
- dev_dbg(&client->dev, "%s: Set size to %s", __func__, priv->win->name);
- ret = ov2640_write_array(client, ov2640_size_change_preamble_regs);
- if (ret < 0)
- goto err;
-
- /* set size win */
- ret = ov2640_write_array(client, priv->win->regs);
- if (ret < 0)
- goto err;
-
- /* cfmt preamble */
- dev_dbg(&client->dev, "%s: Set cfmt", __func__);
- ret = ov2640_write_array(client, ov2640_format_change_preamble_regs);
- if (ret < 0)
- goto err;
-
- /* set cfmt */
- ret = ov2640_write_array(client, selected_cfmt_regs);
- if (ret < 0)
- goto err;
-
- priv->cfmt_code = code;
- *width = priv->win->width;
- *height = priv->win->height;
-
- return 0;
-
-err:
- dev_err(&client->dev, "%s: Error %d", __func__, ret);
- ov2640_reset(client);
- priv->win = NULL;
-
- return ret;
-}
-
-static int ov2640_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov2640_priv *priv = to_ov2640(client);
-
- if (format->pad)
- return -EINVAL;
-
- if (!priv->win) {
- u32 width = SVGA_WIDTH, height = SVGA_HEIGHT;
- priv->win = ov2640_select_win(&width, &height);
- priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
- }
-
- mf->width = priv->win->width;
- mf->height = priv->win->height;
- mf->code = priv->cfmt_code;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
- break;
- default:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
- }
- mf->field = V4L2_FIELD_NONE;
-
- return 0;
-}
-
-static int ov2640_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (format->pad)
- return -EINVAL;
-
- /*
- * select suitable win, but don't store it
- */
- ov2640_select_win(&mf->width, &mf->height);
-
- mf->field = V4L2_FIELD_NONE;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_RGB565_2X8_BE:
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
- break;
- default:
- mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
- }
-
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov2640_set_params(client, &mf->width,
- &mf->height, mf->code);
- cfg->try_fmt = *mf;
- return 0;
-}
-
-static int ov2640_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(ov2640_codes))
- return -EINVAL;
-
- code->code = ov2640_codes[code->index];
- return 0;
-}
-
-static int ov2640_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
-{
- if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP:
- sel->r.left = 0;
- sel->r.top = 0;
- sel->r.width = UXGA_WIDTH;
- sel->r.height = UXGA_HEIGHT;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static int ov2640_video_probe(struct i2c_client *client)
-{
- struct ov2640_priv *priv = to_ov2640(client);
- u8 pid, ver, midh, midl;
- const char *devname;
- int ret;
-
- ret = ov2640_s_power(&priv->subdev, 1);
- if (ret < 0)
- return ret;
-
- /*
- * check and show product ID and manufacturer ID
- */
- i2c_smbus_write_byte_data(client, BANK_SEL, BANK_SEL_SENS);
- pid = i2c_smbus_read_byte_data(client, PID);
- ver = i2c_smbus_read_byte_data(client, VER);
- midh = i2c_smbus_read_byte_data(client, MIDH);
- midl = i2c_smbus_read_byte_data(client, MIDL);
-
- switch (VERSION(pid, ver)) {
- case PID_OV2640:
- devname = "ov2640";
- break;
- default:
- dev_err(&client->dev,
- "Product ID error %x:%x\n", pid, ver);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "%s Product ID %0x:%0x Manufacturer ID %x:%x\n",
- devname, pid, ver, midh, midl);
-
- ret = v4l2_ctrl_handler_setup(&priv->hdl);
-
-done:
- ov2640_s_power(&priv->subdev, 0);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov2640_ctrl_ops = {
- .s_ctrl = ov2640_s_ctrl,
-};
-
-static struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ov2640_g_register,
- .s_register = ov2640_s_register,
-#endif
- .s_power = ov2640_s_power,
-};
-
-static int ov2640_g_mbus_config(struct v4l2_subdev *sd,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
-
- cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER |
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH |
- V4L2_MBUS_DATA_ACTIVE_HIGH;
- cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
-
- return 0;
-}
-
-static struct v4l2_subdev_video_ops ov2640_subdev_video_ops = {
- .s_stream = ov2640_s_stream,
- .g_mbus_config = ov2640_g_mbus_config,
-};
-
-static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = {
- .enum_mbus_code = ov2640_enum_mbus_code,
- .get_selection = ov2640_get_selection,
- .get_fmt = ov2640_get_fmt,
- .set_fmt = ov2640_set_fmt,
-};
-
-static struct v4l2_subdev_ops ov2640_subdev_ops = {
- .core = &ov2640_subdev_core_ops,
- .video = &ov2640_subdev_video_ops,
- .pad = &ov2640_subdev_pad_ops,
-};
-
-/* OF probe functions */
-static int ov2640_hw_power(struct device *dev, int on)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ov2640_priv *priv = to_ov2640(client);
-
- dev_dbg(&client->dev, "%s: %s the camera\n",
- __func__, on ? "ENABLE" : "DISABLE");
-
- if (priv->pwdn_gpio)
- gpiod_direction_output(priv->pwdn_gpio, !on);
-
- return 0;
-}
-
-static int ov2640_hw_reset(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ov2640_priv *priv = to_ov2640(client);
-
- if (priv->resetb_gpio) {
- /* Active the resetb pin to perform a reset pulse */
- gpiod_direction_output(priv->resetb_gpio, 1);
- usleep_range(3000, 5000);
- gpiod_direction_output(priv->resetb_gpio, 0);
- }
-
- return 0;
-}
-
-static int ov2640_probe_dt(struct i2c_client *client,
- struct ov2640_priv *priv)
-{
- /* Request the reset GPIO deasserted */
- priv->resetb_gpio = devm_gpiod_get_optional(&client->dev, "resetb",
- GPIOD_OUT_LOW);
- if (!priv->resetb_gpio)
- dev_dbg(&client->dev, "resetb gpio is not assigned!\n");
- else if (IS_ERR(priv->resetb_gpio))
- return PTR_ERR(priv->resetb_gpio);
-
- /* Request the power down GPIO asserted */
- priv->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "pwdn",
- GPIOD_OUT_HIGH);
- if (!priv->pwdn_gpio)
- dev_dbg(&client->dev, "pwdn gpio is not assigned!\n");
- else if (IS_ERR(priv->pwdn_gpio))
- return PTR_ERR(priv->pwdn_gpio);
-
- /* Initialize the soc_camera_subdev_desc */
- priv->ssdd_dt.power = ov2640_hw_power;
- priv->ssdd_dt.reset = ov2640_hw_reset;
- client->dev.platform_data = &priv->ssdd_dt;
-
- return 0;
-}
-
-/*
- * i2c_driver functions
- */
-static int ov2640_probe(struct i2c_client *client,
- const struct i2c_device_id *did)
-{
- struct ov2640_priv *priv;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- int ret;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&adapter->dev,
- "OV2640: I2C-Adapter doesn't support SMBUS\n");
- return -EIO;
- }
-
- priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&adapter->dev,
- "Failed to allocate memory for private data!\n");
- return -ENOMEM;
- }
-
- priv->clk = v4l2_clk_get(&client->dev, "xvclk");
- if (IS_ERR(priv->clk))
- return -EPROBE_DEFER;
-
- if (!ssdd && !client->dev.of_node) {
- dev_err(&client->dev, "Missing platform_data for driver\n");
- ret = -EINVAL;
- goto err_clk;
- }
-
- if (!ssdd) {
- ret = ov2640_probe_dt(client, priv);
- if (ret)
- goto err_clk;
- }
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
- v4l2_ctrl_handler_init(&priv->hdl, 2);
- v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto err_clk;
- }
-
- ret = ov2640_video_probe(client);
- if (ret < 0)
- goto err_videoprobe;
-
- ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret < 0)
- goto err_videoprobe;
-
- dev_info(&adapter->dev, "OV2640 Probed\n");
-
- return 0;
-
-err_videoprobe:
- v4l2_ctrl_handler_free(&priv->hdl);
-err_clk:
- v4l2_clk_put(priv->clk);
- return ret;
-}
-
-static int ov2640_remove(struct i2c_client *client)
-{
- struct ov2640_priv *priv = to_ov2640(client);
-
- v4l2_async_unregister_subdev(&priv->subdev);
- v4l2_clk_put(priv->clk);
- v4l2_device_unregister_subdev(&priv->subdev);
- v4l2_ctrl_handler_free(&priv->hdl);
- return 0;
-}
-
-static const struct i2c_device_id ov2640_id[] = {
- { "ov2640", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ov2640_id);
-
-static const struct of_device_id ov2640_of_match[] = {
- {.compatible = "ovti,ov2640", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ov2640_of_match);
-
-static struct i2c_driver ov2640_i2c_driver = {
- .driver = {
- .name = "ov2640",
- .of_match_table = of_match_ptr(ov2640_of_match),
- },
- .probe = ov2640_probe,
- .remove = ov2640_remove,
- .id_table = ov2640_id,
-};
-
-module_i2c_driver(ov2640_i2c_driver);
-
-MODULE_DESCRIPTION("SoC Camera driver for Omni Vision 2640 sensor");
-MODULE_AUTHOR("Alberto Panizzo");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 3d185bd622a3..39f420db9c70 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -811,7 +811,7 @@ static int ov5642_set_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- priv->fmt = ov5642_find_datafmt(mf->code);
+ priv->fmt = fmt;
else
cfg->try_fmt = *mf;
return 0;
@@ -943,7 +943,7 @@ static int ov5642_s_power(struct v4l2_subdev *sd, int on)
return ret;
}
-static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
.g_mbus_config = ov5642_g_mbus_config,
};
@@ -955,7 +955,7 @@ static const struct v4l2_subdev_pad_ops ov5642_subdev_pad_ops = {
.set_fmt = ov5642_set_fmt,
};
-static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
.s_power = ov5642_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov5642_get_register,
@@ -963,7 +963,7 @@ static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
#endif
};
-static struct v4l2_subdev_ops ov5642_subdev_ops = {
+static const struct v4l2_subdev_ops ov5642_subdev_ops = {
.core = &ov5642_subdev_core_ops,
.video = &ov5642_subdev_video_ops,
.pad = &ov5642_subdev_pad_ops,
@@ -1063,9 +1063,18 @@ static const struct i2c_device_id ov5642_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ov5642_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ov5642_of_match[] = {
+ { .compatible = "ovti,ov5642" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ov5642_of_match);
+#endif
+
static struct i2c_driver ov5642_i2c_driver = {
.driver = {
.name = "ov5642",
+ .of_match_table = of_match_ptr(ov5642_of_match),
},
.probe = ov5642_probe,
.remove = ov5642_remove,
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 4bf2995e1cb8..dbd6d92c589f 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -885,7 +885,7 @@ static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
.s_ctrl = ov6550_s_ctrl,
};
-static struct v4l2_subdev_core_ops ov6650_core_ops = {
+static const struct v4l2_subdev_core_ops ov6650_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov6650_get_register,
.s_register = ov6650_set_register,
@@ -942,7 +942,7 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
return ret;
}
-static struct v4l2_subdev_video_ops ov6650_video_ops = {
+static const struct v4l2_subdev_video_ops ov6650_video_ops = {
.s_stream = ov6650_s_stream,
.g_parm = ov6650_g_parm,
.s_parm = ov6650_s_parm,
@@ -958,7 +958,7 @@ static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
.set_fmt = ov6650_set_fmt,
};
-static struct v4l2_subdev_ops ov6650_subdev_ops = {
+static const struct v4l2_subdev_ops ov6650_subdev_ops = {
.core = &ov6650_core_ops,
.video = &ov6650_video_ops,
.pad = &ov6650_pad_ops,
@@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client,
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
priv->colorspace = V4L2_COLORSPACE_JPEG;
- priv->clk = v4l2_clk_get(&client->dev, "mclk");
+ priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
goto eclkget;
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 985a3672b243..0f7b9d1b9c57 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -894,38 +894,15 @@ static int ov772x_get_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
-{
- struct ov772x_priv *priv = to_ov772x(sd);
- const struct ov772x_color_format *cfmt;
- const struct ov772x_win_size *win;
- int ret;
-
- ov772x_select_params(mf, &cfmt, &win);
-
- ret = ov772x_set_params(priv, cfmt, win);
- if (ret < 0)
- return ret;
-
- priv->win = win;
- priv->cfmt = cfmt;
-
- mf->code = cfmt->code;
- mf->width = win->rect.width;
- mf->height = win->rect.height;
- mf->field = V4L2_FIELD_NONE;
- mf->colorspace = cfmt->colorspace;
-
- return 0;
-}
-
static int ov772x_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
{
+ struct ov772x_priv *priv = to_ov772x(sd);
struct v4l2_mbus_framefmt *mf = &format->format;
const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
+ int ret;
if (format->pad)
return -EINVAL;
@@ -938,9 +915,17 @@ static int ov772x_set_fmt(struct v4l2_subdev *sd,
mf->field = V4L2_FIELD_NONE;
mf->colorspace = cfmt->colorspace;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov772x_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *mf;
+ return 0;
+ }
+
+ ret = ov772x_set_params(priv, cfmt, win);
+ if (ret < 0)
+ return ret;
+
+ priv->win = win;
+ priv->cfmt = cfmt;
return 0;
}
@@ -993,7 +978,7 @@ static const struct v4l2_ctrl_ops ov772x_ctrl_ops = {
.s_ctrl = ov772x_s_ctrl,
};
-static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov772x_g_register,
.s_register = ov772x_s_register,
@@ -1027,7 +1012,7 @@ static int ov772x_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.s_stream = ov772x_s_stream,
.g_mbus_config = ov772x_g_mbus_config,
};
@@ -1039,7 +1024,7 @@ static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = {
.set_fmt = ov772x_set_fmt,
};
-static struct v4l2_subdev_ops ov772x_subdev_ops = {
+static const struct v4l2_subdev_ops ov772x_subdev_ops = {
.core = &ov772x_subdev_core_ops,
.video = &ov772x_subdev_video_ops,
.pad = &ov772x_subdev_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 65085a235128..0146d1f7aacb 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -486,11 +486,8 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9640_reg_alt alts = {0};
- enum v4l2_colorspace cspace;
- u32 code = mf->code;
int ret;
- ov9640_res_roundup(&mf->width, &mf->height);
ov9640_alter_regs(mf->code, &alts);
ov9640_reset(client);
@@ -499,24 +496,7 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd,
if (ret)
return ret;
- switch (code) {
- case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
- case MEDIA_BUS_FMT_RGB565_2X8_LE:
- cspace = V4L2_COLORSPACE_SRGB;
- break;
- default:
- code = MEDIA_BUS_FMT_UYVY8_2X8;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- cspace = V4L2_COLORSPACE_JPEG;
- }
-
- ret = ov9640_write_regs(client, mf->width, code, &alts);
- if (!ret) {
- mf->code = code;
- mf->colorspace = cspace;
- }
-
- return ret;
+ return ov9640_write_regs(client, mf->width, mf->code, &alts);
}
static int ov9640_set_fmt(struct v4l2_subdev *sd,
@@ -539,8 +519,10 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd,
break;
default:
mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ /* fall through */
case MEDIA_BUS_FMT_UYVY8_2X8:
mf->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
}
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -637,7 +619,7 @@ static const struct v4l2_ctrl_ops ov9640_ctrl_ops = {
.s_ctrl = ov9640_s_ctrl,
};
-static struct v4l2_subdev_core_ops ov9640_core_ops = {
+static const struct v4l2_subdev_core_ops ov9640_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov9640_get_register,
.s_register = ov9640_set_register,
@@ -661,7 +643,7 @@ static int ov9640_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops ov9640_video_ops = {
+static const struct v4l2_subdev_video_ops ov9640_video_ops = {
.s_stream = ov9640_s_stream,
.g_mbus_config = ov9640_g_mbus_config,
};
@@ -672,7 +654,7 @@ static const struct v4l2_subdev_pad_ops ov9640_pad_ops = {
.set_fmt = ov9640_set_fmt,
};
-static struct v4l2_subdev_ops ov9640_subdev_ops = {
+static const struct v4l2_subdev_ops ov9640_subdev_ops = {
.core = &ov9640_core_ops,
.video = &ov9640_video_ops,
.pad = &ov9640_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index f11f76cdacad..cc07b7ae5407 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -673,20 +673,8 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov9740_priv *priv = to_ov9740(sd);
- enum v4l2_colorspace cspace;
- u32 code = mf->code;
int ret;
- ov9740_res_roundup(&mf->width, &mf->height);
-
- switch (code) {
- case MEDIA_BUS_FMT_YUYV8_2X8:
- cspace = V4L2_COLORSPACE_SRGB;
- break;
- default:
- return -EINVAL;
- }
-
ret = ov9740_reg_write_array(client, ov9740_defaults,
ARRAY_SIZE(ov9740_defaults));
if (ret < 0)
@@ -696,11 +684,7 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- mf->code = code;
- mf->colorspace = cspace;
-
- memcpy(&priv->current_mf, mf, sizeof(struct v4l2_mbus_framefmt));
-
+ priv->current_mf = *mf;
return ret;
}
@@ -907,12 +891,12 @@ static int ov9740_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops ov9740_video_ops = {
+static const struct v4l2_subdev_video_ops ov9740_video_ops = {
.s_stream = ov9740_s_stream,
.g_mbus_config = ov9740_g_mbus_config,
};
-static struct v4l2_subdev_core_ops ov9740_core_ops = {
+static const struct v4l2_subdev_core_ops ov9740_core_ops = {
.s_power = ov9740_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov9740_get_register,
@@ -926,7 +910,7 @@ static const struct v4l2_subdev_pad_ops ov9740_pad_ops = {
.set_fmt = ov9740_set_fmt,
};
-static struct v4l2_subdev_ops ov9740_subdev_ops = {
+static const struct v4l2_subdev_ops ov9740_subdev_ops = {
.core = &ov9740_core_ops,
.video = &ov9740_video_ops,
.pad = &ov9740_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/rj54n1cb0c.c b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
index bc8ec59a3fbd..02398d0bc649 100644
--- a/drivers/media/i2c/soc_camera/rj54n1cb0c.c
+++ b/drivers/media/i2c/soc_camera/rj54n1cb0c.c
@@ -1213,7 +1213,7 @@ static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = {
.s_ctrl = rj54n1_s_ctrl,
};
-static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = rj54n1_g_register,
.s_register = rj54n1_s_register,
@@ -1251,7 +1251,7 @@ static int rj54n1_s_mbus_config(struct v4l2_subdev *sd,
return reg_write(client, RJ54N1_OUT_SIGPO, 0);
}
-static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.s_stream = rj54n1_s_stream,
.g_mbus_config = rj54n1_g_mbus_config,
.s_mbus_config = rj54n1_s_mbus_config,
@@ -1265,7 +1265,7 @@ static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = {
.set_fmt = rj54n1_set_fmt,
};
-static struct v4l2_subdev_ops rj54n1_subdev_ops = {
+static const struct v4l2_subdev_ops rj54n1_subdev_ops = {
.core = &rj54n1_subdev_core_ops,
.video = &rj54n1_subdev_video_ops,
.pad = &rj54n1_subdev_pad_ops,
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index c9c49ed707b8..bdb5e0a431e9 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -837,7 +837,7 @@ done:
return ret;
}
-static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = tw9910_g_register,
.s_register = tw9910_s_register,
@@ -901,7 +901,7 @@ static int tw9910_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
-static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
.s_std = tw9910_s_std,
.g_std = tw9910_g_std,
.s_stream = tw9910_s_stream,
@@ -917,7 +917,7 @@ static const struct v4l2_subdev_pad_ops tw9910_subdev_pad_ops = {
.set_fmt = tw9910_set_fmt,
};
-static struct v4l2_subdev_ops tw9910_subdev_ops = {
+static const struct v4l2_subdev_ops tw9910_subdev_ops = {
.core = &tw9910_subdev_core_ops,
.video = &tw9910_subdev_video_ops,
.pad = &tw9910_subdev_pad_ops,
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index f569a05fe105..acef4eca269f 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -194,57 +194,61 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
}
}
-static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+static noinline u32 i2c_rdreg(struct v4l2_subdev *sd, u16 reg, u32 n)
{
- u8 val;
+ __le32 val = 0;
+
+ i2c_rd(sd, reg, (u8 __force *)&val, n);
- i2c_rd(sd, reg, &val, 1);
+ return le32_to_cpu(val);
+}
+
+static noinline void i2c_wrreg(struct v4l2_subdev *sd, u16 reg, u32 val, u32 n)
+{
+ __le32 raw = cpu_to_le32(val);
- return val;
+ i2c_wr(sd, reg, (u8 __force *)&raw, n);
+}
+
+static u8 i2c_rd8(struct v4l2_subdev *sd, u16 reg)
+{
+ return i2c_rdreg(sd, reg, 1);
}
static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val)
{
- i2c_wr(sd, reg, &val, 1);
+ i2c_wrreg(sd, reg, val, 1);
}
static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg,
u8 mask, u8 val)
{
- i2c_wr8(sd, reg, (i2c_rd8(sd, reg) & mask) | val);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
}
static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
{
- u16 val;
-
- i2c_rd(sd, reg, (u8 *)&val, 2);
-
- return val;
+ return i2c_rdreg(sd, reg, 2);
}
static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
{
- i2c_wr(sd, reg, (u8 *)&val, 2);
+ i2c_wrreg(sd, reg, val, 2);
}
static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
{
- i2c_wr16(sd, reg, (i2c_rd16(sd, reg) & mask) | val);
+ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
}
static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
{
- u32 val;
-
- i2c_rd(sd, reg, (u8 *)&val, 4);
-
- return val;
+ return i2c_rdreg(sd, reg, 4);
}
static void i2c_wr32(struct v4l2_subdev *sd, u16 reg, u32 val)
{
- i2c_wr(sd, reg, (u8 *)&val, 4);
+ i2c_wrreg(sd, reg, val, 4);
}
/* --------------- STATUS --------------- */
@@ -1227,7 +1231,7 @@ static int tc358743_g_register(struct v4l2_subdev *sd,
reg->size = tc358743_get_reg_size(reg->reg);
- i2c_rd(sd, reg->reg, (u8 *)&reg->val, reg->size);
+ reg->val = i2c_rdreg(sd, reg->reg, reg->size);
return 0;
}
@@ -1253,7 +1257,7 @@ static int tc358743_s_register(struct v4l2_subdev *sd,
reg->reg == BCAPS)
return 0;
- i2c_wr(sd, (u16)reg->reg, (u8 *)&reg->val,
+ i2c_wrreg(sd, (u16)reg->reg, reg->val,
tc358743_get_reg_size(reg->reg));
return 0;
@@ -1459,6 +1463,10 @@ static int tc358743_g_mbus_config(struct v4l2_subdev *sd,
static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
{
enable_stream(sd, enable);
+ if (!enable) {
+ /* Put all lanes in PL-11 state (STOPSTATE) */
+ tc358743_set_csi(sd);
+ }
return 0;
}
@@ -1951,9 +1959,18 @@ static struct i2c_device_id tc358743_id[] = {
MODULE_DEVICE_TABLE(i2c, tc358743_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tc358743_of_match[] = {
+ { .compatible = "toshiba,tc358743" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tc358743_of_match);
+#endif
+
static struct i2c_driver tc358743_driver = {
.driver = {
.name = "tc358743",
+ .of_match_table = of_match_ptr(tc358743_of_match),
},
.probe = tc358743_probe,
.remove = tc358743_remove,
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 48646a7f3fb0..04e96b3057bb 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -865,13 +865,13 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *f;
struct tvp5150 *decoder = to_tvp5150(sd);
- if (!format || format->pad)
+ if (!format || (format->pad != DEMOD_PAD_VID_OUT))
return -EINVAL;
f = &format->format;
f->width = decoder->rect.width;
- f->height = decoder->rect.height / 2;
+ f->height = decoder->rect.height;
f->code = MEDIA_BUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_ALTERNATE;
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index ae46753c90cb..423248f577b6 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -248,31 +248,22 @@ int __must_check media_devnode_register(struct media_device *mdev,
dev_set_name(&devnode->dev, "media%d", devnode->minor);
device_initialize(&devnode->dev);
- /* Part 2: Initialize and register the character device */
+ /* Part 2: Initialize the character device */
cdev_init(&devnode->cdev, &media_devnode_fops);
devnode->cdev.owner = owner;
- devnode->cdev.kobj.parent = &devnode->dev.kobj;
- ret = cdev_add(&devnode->cdev, MKDEV(MAJOR(media_dev_t), devnode->minor), 1);
+ /* Part 3: Add the media and char device */
+ ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret < 0) {
- pr_err("%s: cdev_add failed\n", __func__);
+ pr_err("%s: cdev_device_add failed\n", __func__);
goto cdev_add_error;
}
- /* Part 3: Add the media device */
- ret = device_add(&devnode->dev);
- if (ret < 0) {
- pr_err("%s: device_add failed\n", __func__);
- goto device_add_error;
- }
-
/* Part 4: Activate this minor. The char device can now be used. */
set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
return 0;
-device_add_error:
- cdev_del(&devnode->cdev);
cdev_add_error:
mutex_lock(&media_devnode_lock);
clear_bit(devnode->minor, media_devnode_nums);
@@ -298,9 +289,8 @@ void media_devnode_unregister(struct media_devnode *devnode)
{
mutex_lock(&media_devnode_lock);
/* Delete the cdev on this minor as well */
- cdev_del(&devnode->cdev);
+ cdev_device_del(&devnode->cdev, &devnode->dev);
mutex_unlock(&media_devnode_lock);
- device_del(&devnode->dev);
devnode->media_dev = NULL;
put_device(&devnode->dev);
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 5640ca29da8c..bc44193efa47 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -199,12 +199,12 @@ void media_gobj_create(struct media_device *mdev,
void media_gobj_destroy(struct media_gobj *gobj)
{
- dev_dbg_obj(__func__, gobj);
-
/* Do nothing if the object is not linked. */
if (gobj->mdev == NULL)
return;
+ dev_dbg_obj(__func__, gobj);
+
gobj->mdev->topology_version++;
/* Remove the object from mdev list */
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index a1b0f3193bc0..5cc42b426715 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -3717,7 +3717,7 @@ static void hauppauge_eeprom(struct bttv *btv)
{
struct tveeprom tv;
- tveeprom_hauppauge_analog(&btv->i2c_client, &tv, eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
btv->tuner_type = tv.tuner_type;
btv->has_radio = tv.has_radio;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index fb4aefbcc8f8..ed319f18ba48 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4043,9 +4043,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
INIT_LIST_HEAD(&btv->capture);
INIT_LIST_HEAD(&btv->vcapture);
- init_timer(&btv->timeout);
- btv->timeout.function = bttv_irq_timeout;
- btv->timeout.data = (unsigned long)btv;
+ setup_timer(&btv->timeout, bttv_irq_timeout, (unsigned long)btv);
btv->i2c_rc = -1;
btv->tuner_type = UNSET;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 206db81ef78e..8bce49cdad46 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -339,7 +339,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
case CX18_CARD_HVR_1600_S5H1411:
- tveeprom_hauppauge_analog(c, tv, eedata);
+ tveeprom_hauppauge_analog(tv, eedata);
break;
case CX18_CARD_YUAN_MPC718:
case CX18_CARD_GOTVIEW_PCI_DVD3:
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 7c9381448966..3c45e0071530 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -282,9 +282,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
INIT_WORK(&s->out_work_order, cx18_out_work_handler);
INIT_LIST_HEAD(&s->vb_capture);
- s->vb_timeout.function = cx18_vb_timeout;
- s->vb_timeout.data = (unsigned long)s;
- init_timer(&s->vb_timeout);
+ setup_timer(&s->vb_timeout, cx18_vb_timeout, (unsigned long)s);
spin_lock_init(&s->vb_lock);
if (type == CX18_ENC_STREAM_TYPE_YUV) {
spin_lock_init(&s->vbuf_q_lock);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 0350f13c5a9f..9e39aea85df6 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1143,8 +1143,7 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
- tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
- eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index cdfbde277b8b..73cc7a67a8bc 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -2854,7 +2854,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
struct tveeprom tv;
- tveeprom_hauppauge_analog(&core->i2c_client, &tv, eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
core->board.tuner_type = tv.tuner_type;
core->tuner_formats = tv.tuner_formats;
core->board.radio.type = tv.has_radio ? CX88_RADIO : 0;
@@ -3670,7 +3670,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
if (!core)
return NULL;
- atomic_inc(&core->refcount);
+ refcount_set(&core->refcount, 1);
core->pci_bus = pci->bus->number;
core->pci_slot = PCI_SLOT(pci->devfn);
core->pci_irqmask = PCI_INT_RISC_RD_BERRINT | PCI_INT_RISC_WR_BERRINT |
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 973a9cd4c635..8bfa5b7ed91b 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1052,7 +1052,7 @@ struct cx88_core *cx88_core_get(struct pci_dev *pci)
mutex_unlock(&devlist);
return NULL;
}
- atomic_inc(&core->refcount);
+ refcount_inc(&core->refcount);
mutex_unlock(&devlist);
return core;
}
@@ -1073,7 +1073,7 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
release_mem_region(pci_resource_start(pci, 0),
pci_resource_len(pci, 0));
- if (!atomic_dec_and_test(&core->refcount))
+ if (!refcount_dec_and_test(&core->refcount))
return;
mutex_lock(&devlist);
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ddf90678df34..49a335f4603e 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -306,7 +306,7 @@ static const struct zl10353_config cx88_terratec_cinergy_ht_pci_mkii_config = {
.if2 = 45600,
};
-static struct mb86a16_config twinhan_vp1027 = {
+static const struct mb86a16_config twinhan_vp1027 = {
.demod_address = 0x08,
};
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 115414cf520f..6777926f20f2 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -24,6 +24,7 @@
#include <linux/i2c-algo-bit.h>
#include <linux/videodev2.h>
#include <linux/kdev_t.h>
+#include <linux/refcount.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
@@ -339,7 +340,7 @@ struct cx8802_dev;
struct cx88_core {
struct list_head devlist;
- atomic_t refcount;
+ refcount_t refcount;
/* board name */
int nr;
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index a7724b78fbb4..1d41934cfaf5 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -815,7 +815,7 @@ static void dm1105_hw_exit(struct dm1105_dev *dev)
dm1105_dma_unmap(dev);
}
-static struct stv0299_config sharp_z0194a_config = {
+static const struct stv0299_config sharp_z0194a_config = {
.demod_address = 0x68,
.inittab = sharp_z0194a_inittab,
.mclk = 88000000UL,
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index e73c153285f0..e8fa99b6c7b4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -409,7 +409,7 @@ void ivtv_read_eeprom(struct ivtv *itv, struct tveeprom *tv)
itv->i2c_client.addr = 0xA0 >> 1;
tveeprom_read(&itv->i2c_client, eedata, sizeof(eedata));
- tveeprom_hauppauge_analog(&itv->i2c_client, tv, eedata);
+ tveeprom_hauppauge_analog(tv, eedata);
}
static void ivtv_process_eeprom(struct ivtv *itv)
@@ -770,9 +770,8 @@ static int ivtv_init_struct1(struct ivtv *itv)
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
init_waitqueue_head(&itv->dma_waitq);
- init_timer(&itv->dma_timer);
- itv->dma_timer.function = ivtv_unfinished_dma;
- itv->dma_timer.data = (unsigned long)itv;
+ setup_timer(&itv->dma_timer, ivtv_unfinished_dma,
+ (unsigned long)itv);
itv->cur_dma_stream = -1;
itv->cur_pio_stream = -1;
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index f956188f7f19..670462d195b5 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -1506,10 +1506,8 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subs
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
- case V4L2_EVENT_CTRL:
- return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
default:
- return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
}
}
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 2c9232ef7baa..3b33e87ed73b 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -76,7 +76,7 @@ void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32
int i;
struct scatterlist *sg;
- for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) {
+ for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
diff --git a/drivers/media/pci/mantis/mantis_vp1034.c b/drivers/media/pci/mantis/mantis_vp1034.c
index 3b1928594b12..e4972ff823c2 100644
--- a/drivers/media/pci/mantis/mantis_vp1034.c
+++ b/drivers/media/pci/mantis/mantis_vp1034.c
@@ -36,7 +36,7 @@
#include "mantis_vp1034.h"
#include "mantis_reg.h"
-static struct mb86a16_config vp1034_mb86a16_config = {
+static const struct mb86a16_config vp1034_mb86a16_config = {
.demod_address = 0x08,
.set_voltage = vp1034_set_voltage,
};
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 191bd8299dc3..9444483fb942 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -663,9 +663,8 @@ static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
spin_lock_init(&dma->lock);
INIT_WORK(&dma->work, netup_unidvb_dma_worker);
INIT_LIST_HEAD(&dma->free_buffers);
- dma->timeout.function = netup_unidvb_dma_timeout;
- dma->timeout.data = (unsigned long)dma;
- init_timer(&dma->timeout);
+ setup_timer(&dma->timeout, netup_unidvb_dma_timeout,
+ (unsigned long)dma);
dma->ring_buffer_size = ndev->dma_size / 2;
dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 321253827997..f79380faf499 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7319,7 +7319,7 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
- tveeprom_hauppauge_analog(&dev->i2c_client, &tv, eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index efdece5ab11c..731dee0a66e7 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1042,11 +1042,11 @@ static int md8800_set_high_voltage2(struct dvb_frontend *fe, long arg)
* nxt200x based ATSC cards, helper functions
*/
-static struct nxt200x_config avertvhda180 = {
+static const struct nxt200x_config avertvhda180 = {
.demod_address = 0x0a,
};
-static struct nxt200x_config kworldatsc110 = {
+static const struct nxt200x_config kworldatsc110 = {
.demod_address = 0x0a,
};
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 578e03f8c041..7414878af9e0 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -223,9 +223,8 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
dev->ts.nr_packets = ts_nr_packets;
INIT_LIST_HEAD(&dev->ts_q.queue);
- init_timer(&dev->ts_q.timeout);
- dev->ts_q.timeout.function = saa7134_buffer_timeout;
- dev->ts_q.timeout.data = (unsigned long)(&dev->ts_q);
+ setup_timer(&dev->ts_q.timeout, saa7134_buffer_timeout,
+ (unsigned long)(&dev->ts_q));
dev->ts_q.dev = dev;
dev->ts_q.need_two = 1;
dev->ts_started = 0;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index 46193370e41a..bcad9b2d9bb3 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -181,9 +181,8 @@ struct vb2_ops saa7134_vbi_qops = {
int saa7134_vbi_init1(struct saa7134_dev *dev)
{
INIT_LIST_HEAD(&dev->vbi_q.queue);
- init_timer(&dev->vbi_q.timeout);
- dev->vbi_q.timeout.function = saa7134_buffer_timeout;
- dev->vbi_q.timeout.data = (unsigned long)(&dev->vbi_q);
+ setup_timer(&dev->vbi_q.timeout, saa7134_buffer_timeout,
+ (unsigned long)(&dev->vbi_q));
dev->vbi_q.dev = dev;
if (vbibufs < 2)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 4b1c4327f112..51d42bbf969e 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2145,9 +2145,8 @@ int saa7134_video_init1(struct saa7134_dev *dev)
dev->automute = 0;
INIT_LIST_HEAD(&dev->video_q.queue);
- init_timer(&dev->video_q.timeout);
- dev->video_q.timeout.function = saa7134_buffer_timeout;
- dev->video_q.timeout.data = (unsigned long)(&dev->video_q);
+ setup_timer(&dev->video_q.timeout, saa7134_buffer_timeout,
+ (unsigned long)(&dev->video_q));
dev->video_q.dev = dev;
dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
dev->width = 720;
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index 0e1cd7e153ca..3af16062e79d 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -780,9 +780,7 @@ static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
- /* TODO: Assumption: eeprom on bus 0 */
- tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
- eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index f55c177fd1e4..175015ca79f2 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -130,14 +130,13 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
* -bus/c running buffer. */
static int saa7164_cmd_dequeue(struct saa7164_dev *dev)
{
- int loop = 1;
int ret;
u32 timeout;
wait_queue_head_t *q = NULL;
u8 tmp[512];
dprintk(DBGLVL_CMD, "%s()\n", __func__);
- while (loop) {
+ while (true) {
struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
ret = saa7164_bus_get(dev, &tRsp, NULL, 1);
@@ -178,8 +177,6 @@ static int saa7164_cmd_dequeue(struct saa7164_dev *dev)
wake_up(q);
return SAA_OK;
}
-
- return SAA_OK;
}
static int saa7164_cmd_set(struct saa7164_dev *dev, struct tmComResInfo *msg,
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 25a2137ab799..25f9f2ebff1d 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -1140,14 +1140,13 @@ static int solo_subscribe_event(struct v4l2_fh *fh,
{
switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
case V4L2_EVENT_MOTION_DET:
/* Allow for up to 30 events (1 second for NTSC) to be
* stored. */
return v4l2_event_subscribe(fh, sub, 30, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
}
- return -EINVAL;
}
static const struct v4l2_file_operations solo_enc_fops = {
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 896bec6627aa..3266fc21825f 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -341,6 +341,17 @@ static void solo_stop_streaming(struct vb2_queue *q)
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
solo_stop_thread(solo_dev);
+
+ spin_lock(&solo_dev->slock);
+ while (!list_empty(&solo_dev->vidq_active)) {
+ struct solo_vb2_buf *buf = list_entry(
+ solo_dev->vidq_active.next,
+ struct solo_vb2_buf, list);
+
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock(&solo_dev->slock);
INIT_LIST_HEAD(&solo_dev->vidq_active);
}
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index 10e28f067b45..ca05198de2c2 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -333,9 +333,8 @@ int av7110_ir_init(struct av7110 *av7110)
av_list[av_cnt++] = av7110;
av7110_check_ir_config(av7110, true);
- init_timer(&av7110->ir.keyup_timer);
- av7110->ir.keyup_timer.function = av7110_emit_keyup;
- av7110->ir.keyup_timer.data = (unsigned long) &av7110->ir;
+ setup_timer(&av7110->ir.keyup_timer, av7110_emit_keyup,
+ (unsigned long)&av7110->ir);
input_dev = input_allocate_device();
if (!input_dev)
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 19f07d4aba6a..dc7be8fac9a3 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -577,7 +577,7 @@ static u8 typhoon_cinergy1200s_inittab[] = {
0xff, 0xff
};
-static struct stv0299_config typhoon_config = {
+static const struct stv0299_config typhoon_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
@@ -590,7 +590,7 @@ static struct stv0299_config typhoon_config = {
};
-static struct stv0299_config cinergy_1200s_config = {
+static const struct stv0299_config cinergy_1200s_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
@@ -602,7 +602,7 @@ static struct stv0299_config cinergy_1200s_config = {
.set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
};
-static struct stv0299_config cinergy_1200s_1894_0010_config = {
+static const struct stv0299_config cinergy_1200s_1894_0010_config = {
.demod_address = 0x68,
.inittab = typhoon_cinergy1200s_inittab,
.mclk = 88000000UL,
@@ -876,7 +876,7 @@ static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
return 0;
}
-static struct stv0299_config philips_sd1878_config = {
+static const struct stv0299_config philips_sd1878_config = {
.demod_address = 0x68,
.inittab = philips_sd1878_inittab,
.mclk = 88000000UL,
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 68355484ba7d..11b9227307bf 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -693,7 +693,7 @@ static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
return 0;
}
-static struct stv0299_config philips_su1278_tt_config = {
+static const struct stv0299_config philips_su1278_tt_config = {
.demod_address = 0x68,
.inittab = philips_su1278_tt_inittab,
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index 5f17e1c9a207..81fe35cedd10 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -397,7 +397,7 @@ static struct tda10086_config tda10086_config = {
.xtal_freq = TDA10086_XTAL_16M,
};
-static struct stv0299_config alps_bsru6_config_activy = {
+static const struct stv0299_config alps_bsru6_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsru6_inittab,
.mclk = 88000000UL,
@@ -407,7 +407,7 @@ static struct stv0299_config alps_bsru6_config_activy = {
.set_symbol_rate = alps_bsru6_set_symbol_rate,
};
-static struct stv0299_config alps_bsbe1_config_activy = {
+static const struct stv0299_config alps_bsbe1_config_activy = {
.demod_address = 0x68,
.inittab = alps_bsbe1_inittab,
.mclk = 88000000UL,
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 9421216bb942..2a044be729da 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -664,15 +664,14 @@ static int tw5864_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
case V4L2_EVENT_MOTION_DET:
/*
* Allow for up to 30 events (1 second for NTSC) to be stored.
*/
return v4l2_event_subscribe(fh, sub, 30, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
}
- return -EINVAL;
}
static void tw5864_frame_interval_set(struct tw5864_input *input)
@@ -717,6 +716,8 @@ static void tw5864_frame_interval_set(struct tw5864_input *input)
static int tw5864_frameinterval_get(struct tw5864_input *input,
struct v4l2_fract *frameinterval)
{
+ struct tw5864_dev *dev = input->root;
+
switch (input->std) {
case STD_NTSC:
frameinterval->numerator = 1001;
@@ -728,8 +729,8 @@ static int tw5864_frameinterval_get(struct tw5864_input *input,
frameinterval->denominator = 25;
break;
default:
- WARN(1, "tw5864_frameinterval_get requested for unknown std %d\n",
- input->std);
+ dev_warn(&dev->pci->dev, "tw5864_frameinterval_get requested for unknown std %d\n",
+ input->std);
return -EINVAL;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c9106e105bab..ac026ee1ca07 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -151,7 +151,7 @@ if V4L_MEM2MEM_DRIVERS
config VIDEO_CODA
tristate "Chips&Media Coda multi-standard codec IP"
- depends on VIDEO_DEV && VIDEO_V4L2 && ARCH_MXC
+ depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_MXC || COMPILE_TEST)
depends on HAS_DMA
select SRAM
select VIDEOBUF2_DMA_CONTIG
@@ -165,6 +165,21 @@ config VIDEO_CODA
config VIDEO_IMX_VDOA
def_tristate VIDEO_CODA if SOC_IMX6Q || COMPILE_TEST
+config VIDEO_MEDIATEK_JPEG
+ tristate "Mediatek JPEG Codec driver"
+ depends on MTK_IOMMU_V1 || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Mediatek jpeg codec driver provides HW capability to decode
+ JPEG format
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-jpeg
+
config VIDEO_MEDIATEK_VPU
tristate "Mediatek Video Processor Unit"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
@@ -405,6 +420,7 @@ config VIDEO_RENESAS_VSP1
depends on (ARCH_RENESAS && OF) || COMPILE_TEST
depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
@@ -451,6 +467,8 @@ menuconfig V4L_TEST_DRIVERS
if V4L_TEST_DRIVERS
+source "drivers/media/platform/vimc/Kconfig"
+
source "drivers/media/platform/vivid/Kconfig"
config VIDEO_VIM2M
@@ -474,3 +492,31 @@ menuconfig DVB_PLATFORM_DRIVERS
if DVB_PLATFORM_DRIVERS
source "drivers/media/platform/sti/c8sectpfe/Kconfig"
endif #DVB_PLATFORM_DRIVERS
+
+menuconfig CEC_PLATFORM_DRIVERS
+ bool "CEC platform devices"
+ depends on MEDIA_CEC_SUPPORT
+
+if CEC_PLATFORM_DRIVERS
+
+config VIDEO_SAMSUNG_S5P_CEC
+ tristate "Samsung S5P CEC driver"
+ depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+ select MEDIA_CEC_NOTIFIER
+ ---help---
+ This is a driver for Samsung S5P HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_STI_HDMI_CEC
+ tristate "STMicroelectronics STiH4xx HDMI CEC driver"
+ depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
+ select MEDIA_CEC_NOTIFIER
+ ---help---
+ This is a driver for STIH4xx HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+endif #CEC_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 349ddf6a69da..63303d63c64c 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
+obj-$(CONFIG_VIDEO_VIMC) += vimc/
obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
@@ -33,11 +34,13 @@ obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_G2D) += s5p-g2d/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC) += exynos-gsc/
obj-$(CONFIG_VIDEO_STI_BDISP) += sti/bdisp/
obj-$(CONFIG_VIDEO_STI_HVA) += sti/hva/
obj-$(CONFIG_DVB_C8SECTPFE) += sti/c8sectpfe/
+obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
@@ -63,6 +66,7 @@ obj-$(CONFIG_VIDEO_XILINX) += xilinx/
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
ccflags-y += -I$(srctree)/drivers/media/i2c
@@ -71,3 +75,5 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 867dca22a473..9bd0f19b127f 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -6,4 +6,13 @@ config VIDEO_ATMEL_ISC
select REGMAP_MMIO
help
This module makes the ATMEL Image Sensor Controller available
- as a v4l2 device. \ No newline at end of file
+ as a v4l2 device.
+
+config VIDEO_ATMEL_ISI
+ tristate "ATMEL Image Sensor Interface (ISI) support"
+ depends on VIDEO_V4L2 && OF && HAS_DMA
+ depends on ARCH_AT91 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This module makes the ATMEL Image Sensor Interface available
+ as a v4l2 device.
diff --git a/drivers/media/platform/atmel/Makefile b/drivers/media/platform/atmel/Makefile
index 9d7c999d434d..27000d099a5e 100644
--- a/drivers/media/platform/atmel/Makefile
+++ b/drivers/media/platform/atmel/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel-isc.o
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index 00c449717cde..6936ac467609 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -65,6 +65,7 @@
#define ISC_INTSR 0x00000034
#define ISC_INT_DDONE BIT(8)
+#define ISC_INT_HISDONE BIT(12)
/* ISC White Balance Control Register */
#define ISC_WB_CTRL 0x00000058
@@ -72,30 +73,98 @@
/* ISC White Balance Configuration Register */
#define ISC_WB_CFG 0x0000005c
+/* ISC White Balance Offset for R, GR Register */
+#define ISC_WB_O_RGR 0x00000060
+
+/* ISC White Balance Offset for B, GB Register */
+#define ISC_WB_O_BGR 0x00000064
+
+/* ISC White Balance Gain for R, GR Register */
+#define ISC_WB_G_RGR 0x00000068
+
+/* ISC White Balance Gain for B, GB Register */
+#define ISC_WB_G_BGR 0x0000006c
+
/* ISC Color Filter Array Control Register */
#define ISC_CFA_CTRL 0x00000070
/* ISC Color Filter Array Configuration Register */
#define ISC_CFA_CFG 0x00000074
+#define ISC_CFA_CFG_EITPOL BIT(4)
#define ISC_BAY_CFG_GRGR 0x0
#define ISC_BAY_CFG_RGRG 0x1
#define ISC_BAY_CFG_GBGB 0x2
#define ISC_BAY_CFG_BGBG 0x3
-#define ISC_BAY_CFG_MASK GENMASK(1, 0)
/* ISC Color Correction Control Register */
#define ISC_CC_CTRL 0x00000078
+/* ISC Color Correction RR RG Register */
+#define ISC_CC_RR_RG 0x0000007c
+
+/* ISC Color Correction RB OR Register */
+#define ISC_CC_RB_OR 0x00000080
+
+/* ISC Color Correction GR GG Register */
+#define ISC_CC_GR_GG 0x00000084
+
+/* ISC Color Correction GB OG Register */
+#define ISC_CC_GB_OG 0x00000088
+
+/* ISC Color Correction BR BG Register */
+#define ISC_CC_BR_BG 0x0000008c
+
+/* ISC Color Correction BB OB Register */
+#define ISC_CC_BB_OB 0x00000090
+
/* ISC Gamma Correction Control Register */
#define ISC_GAM_CTRL 0x00000094
+/* ISC_Gamma Correction Blue Entry Register */
+#define ISC_GAM_BENTRY 0x00000098
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_GENTRY 0x00000198
+
+/* ISC_Gamma Correction Green Entry Register */
+#define ISC_GAM_RENTRY 0x00000298
+
/* Color Space Conversion Control Register */
#define ISC_CSC_CTRL 0x00000398
+/* Color Space Conversion YR YG Register */
+#define ISC_CSC_YR_YG 0x0000039c
+
+/* Color Space Conversion YB OY Register */
+#define ISC_CSC_YB_OY 0x000003a0
+
+/* Color Space Conversion CBR CBG Register */
+#define ISC_CSC_CBR_CBG 0x000003a4
+
+/* Color Space Conversion CBB OCB Register */
+#define ISC_CSC_CBB_OCB 0x000003a8
+
+/* Color Space Conversion CRR CRG Register */
+#define ISC_CSC_CRR_CRG 0x000003ac
+
+/* Color Space Conversion CRB OCR Register */
+#define ISC_CSC_CRB_OCR 0x000003b0
+
/* Contrast And Brightness Control Register */
#define ISC_CBC_CTRL 0x000003b4
+/* Contrast And Brightness Configuration Register */
+#define ISC_CBC_CFG 0x000003b8
+
+/* Brightness Register */
+#define ISC_CBC_BRIGHT 0x000003bc
+#define ISC_CBC_BRIGHT_MASK GENMASK(10, 0)
+
+/* Contrast Register */
+#define ISC_CBC_CONTRAST 0x000003c0
+#define ISC_CBC_CONTRAST_MASK GENMASK(11, 0)
+
/* Subsampling 4:4:4 to 4:2:2 Control Register */
#define ISC_SUB422_CTRL 0x000003c4
@@ -120,6 +189,27 @@
#define ISC_RLP_CFG_MODE_YYCC_LIMITED 0xc
#define ISC_RLP_CFG_MODE_MASK GENMASK(3, 0)
+/* Histogram Control Register */
+#define ISC_HIS_CTRL 0x000003d4
+
+#define ISC_HIS_CTRL_EN BIT(0)
+#define ISC_HIS_CTRL_DIS 0x0
+
+/* Histogram Configuration Register */
+#define ISC_HIS_CFG 0x000003d8
+
+#define ISC_HIS_CFG_MODE_GR 0x0
+#define ISC_HIS_CFG_MODE_R 0x1
+#define ISC_HIS_CFG_MODE_GB 0x2
+#define ISC_HIS_CFG_MODE_B 0x3
+#define ISC_HIS_CFG_MODE_Y 0x4
+#define ISC_HIS_CFG_MODE_RAW 0x5
+#define ISC_HIS_CFG_MODE_YCCIR656 0x6
+
+#define ISC_HIS_CFG_BAYSEL_SHIFT 4
+
+#define ISC_HIS_CFG_RAR BIT(8)
+
/* DMA Configuration Register */
#define ISC_DCFG 0x000003e0
#define ISC_DCFG_IMODE_PACKED8 0x0
@@ -159,7 +249,13 @@
/* DMA Address 0 Register */
#define ISC_DAD0 0x000003ec
-/* DMA Stride 0 Register */
-#define ISC_DST0 0x000003f0
+/* DMA Address 1 Register */
+#define ISC_DAD1 0x000003f4
+
+/* DMA Address 2 Register */
+#define ISC_DAD2 0x000003fc
+
+/* Histogram Entry */
+#define ISC_HIS_ENTRY 0x00000410
#endif
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index fa68fe912c95..c4b2115559a5 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -29,6 +29,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -36,7 +37,9 @@
#include <linux/regmap.h>
#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-of.h>
@@ -89,10 +92,12 @@ struct isc_subdev_entity {
* struct isc_format - ISC media bus format information
* @fourcc: Fourcc code for this format
* @mbus_code: V4L2 media bus format code.
- * @bpp: Bytes per pixel (when stored in memory)
+ * @bpp: Bits per pixel (when stored in memory)
* @reg_bps: reg value for bits per sample
* (when transferred over a bus)
- * @support: Indicates format supported by subdev
+ * @pipeline: pipeline switch
+ * @sd_support: Subdev supports this format
+ * @isc_support: ISC can convert raw format to this format
*/
struct isc_format {
u32 fourcc;
@@ -100,11 +105,42 @@ struct isc_format {
u8 bpp;
u32 reg_bps;
+ u32 reg_bay_cfg;
u32 reg_rlp_mode;
u32 reg_dcfg_imode;
u32 reg_dctrl_dview;
- bool support;
+ u32 pipeline;
+
+ bool sd_support;
+ bool isc_support;
+};
+
+
+#define HIST_ENTRIES 512
+#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
+
+enum{
+ HIST_INIT = 0,
+ HIST_ENABLED,
+ HIST_DISABLED,
+};
+
+struct isc_ctrls {
+ struct v4l2_ctrl_handler handler;
+
+ u32 brightness;
+ u32 contrast;
+ u8 gamma_index;
+ u8 awb;
+
+ u32 r_gain;
+ u32 b_gain;
+
+ u32 hist_entry[HIST_ENTRIES];
+ u32 hist_count[HIST_BAYER];
+ u8 hist_id;
+ u8 hist_stat;
};
#define ISC_PIPE_LINE_NODE_NUM 11
@@ -131,6 +167,10 @@ struct isc_device {
struct isc_format **user_formats;
unsigned int num_user_formats;
const struct isc_format *current_fmt;
+ const struct isc_format *raw_fmt;
+
+ struct isc_ctrls ctrls;
+ struct work_struct awb_work;
struct mutex lock;
@@ -140,51 +180,134 @@ struct isc_device {
struct list_head subdev_entities;
};
+#define RAW_FMT_IND_START 0
+#define RAW_FMT_IND_END 11
+#define ISC_FMT_IND_START 12
+#define ISC_FMT_IND_END 14
+
static struct isc_format isc_formats[] = {
- { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8,
- 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8,
- 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8,
- 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
-
- { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10,
- 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10,
- 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10,
- 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10,
- 2, ISC_PFG_CFG0_BPS_TEN, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
-
- { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12,
- 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12,
- 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12,
- 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
- { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12,
- 2, ISC_PFG_CFG0_BPS_TWELVE, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, false },
-
- { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8,
- 2, ISC_PFE_CFG0_BPS_EIGHT, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, false },
+ { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8, 8,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8, 8,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8, 8,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8, 8,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+
+ { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10, 16,
+ ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10, 16,
+ ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10, 16,
+ ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10, 16,
+ ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT10,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+
+ { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12, 16,
+ ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12, 16,
+ ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12, 16,
+ ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+ { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12, 16,
+ ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT12,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+
+ { V4L2_PIX_FMT_YUV420, 0x0, 12,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
+ ISC_DCFG_IMODE_YC420P | ISC_DCFG_YMBSIZE_BEATS8 |
+ ISC_DCFG_CMBSIZE_BEATS8, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
+ false, false },
+ { V4L2_PIX_FMT_YUV422P, 0x0, 16,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
+ ISC_DCFG_IMODE_YC422P | ISC_DCFG_YMBSIZE_BEATS8 |
+ ISC_DCFG_CMBSIZE_BEATS8, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
+ false, false },
+ { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_RGB565_2X8_LE, 16,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_RGB565,
+ ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x7b,
+ false, false },
+
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8, 16,
+ ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
+ ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
+ false, false },
+};
+
+#define GAMMA_MAX 2
+#define GAMMA_ENTRIES 64
+
+/* Gamma table with gamma 1/2.2 */
+static const u32 isc_gamma_table[GAMMA_MAX + 1][GAMMA_ENTRIES] = {
+ /* 0 --> gamma 1/1.8 */
+ { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A,
+ 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012,
+ 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F,
+ 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E,
+ 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C,
+ 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B,
+ 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A,
+ 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A,
+ 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A,
+ 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009,
+ 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 },
+
+ /* 1 --> gamma 1/2 */
+ { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B,
+ 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013,
+ 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F,
+ 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D,
+ 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B,
+ 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A,
+ 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A,
+ 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009,
+ 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009,
+ 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009,
+ 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 },
+
+ /* 2 --> gamma 1/2.2 */
+ { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B,
+ 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012,
+ 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F,
+ 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C,
+ 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B,
+ 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A,
+ 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009,
+ 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009,
+ 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008,
+ 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007,
+ 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 },
};
+static unsigned int sensor_preferred = 1;
+module_param(sensor_preferred, uint, 0644);
+MODULE_PARM_DESC(sensor_preferred,
+ "Sensor is preferred to output the specified format (1-on 0-off), default 1");
+
static int isc_clk_enable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
@@ -447,27 +570,155 @@ static int isc_buffer_prepare(struct vb2_buffer *vb)
return 0;
}
-static inline void isc_start_dma(struct regmap *regmap,
- struct isc_buffer *frm, u32 dview)
+static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
+{
+ return (sensor_preferred && isc_fmt->sd_support) ||
+ !isc_fmt->isc_support;
+}
+
+static void isc_start_dma(struct isc_device *isc)
{
- dma_addr_t addr;
+ struct regmap *regmap = isc->regmap;
+ struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
+ u32 sizeimage = pixfmt->sizeimage;
+ u32 dctrl_dview;
+ dma_addr_t addr0;
+
+ addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
+ regmap_write(regmap, ISC_DAD0, addr0);
+
+ switch (pixfmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ regmap_write(regmap, ISC_DAD1, addr0 + (sizeimage * 2) / 3);
+ regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 5) / 6);
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ regmap_write(regmap, ISC_DAD1, addr0 + sizeimage / 2);
+ regmap_write(regmap, ISC_DAD2, addr0 + (sizeimage * 3) / 4);
+ break;
+ default:
+ break;
+ }
- addr = vb2_dma_contig_plane_dma_addr(&frm->vb.vb2_buf, 0);
+ if (sensor_is_preferred(isc->current_fmt))
+ dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
+ else
+ dctrl_dview = isc->current_fmt->reg_dctrl_dview;
- regmap_write(regmap, ISC_DCTRL, dview | ISC_DCTRL_IE_IS);
- regmap_write(regmap, ISC_DAD0, addr);
+ regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
}
static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
- u32 val;
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 val, bay_cfg;
+ const u32 *gamma;
unsigned int i;
+ /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */
for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) {
val = pipeline & BIT(i) ? 1 : 0;
regmap_field_write(isc->pipeline[i], val);
}
+
+ if (!pipeline)
+ return;
+
+ bay_cfg = isc->raw_fmt->reg_bay_cfg;
+
+ regmap_write(regmap, ISC_WB_CFG, bay_cfg);
+ regmap_write(regmap, ISC_WB_O_RGR, 0x0);
+ regmap_write(regmap, ISC_WB_O_BGR, 0x0);
+ regmap_write(regmap, ISC_WB_G_RGR, ctrls->r_gain | (0x1 << 25));
+ regmap_write(regmap, ISC_WB_G_BGR, ctrls->b_gain | (0x1 << 25));
+
+ regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL);
+
+ gamma = &isc_gamma_table[ctrls->gamma_index][0];
+ regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES);
+ regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES);
+
+ /* Convert RGB to YUV */
+ regmap_write(regmap, ISC_CSC_YR_YG, 0x42 | (0x81 << 16));
+ regmap_write(regmap, ISC_CSC_YB_OY, 0x19 | (0x10 << 16));
+ regmap_write(regmap, ISC_CSC_CBR_CBG, 0xFDA | (0xFB6 << 16));
+ regmap_write(regmap, ISC_CSC_CBB_OCB, 0x70 | (0x80 << 16));
+ regmap_write(regmap, ISC_CSC_CRR_CRG, 0x70 | (0xFA2 << 16));
+ regmap_write(regmap, ISC_CSC_CRB_OCR, 0xFEE | (0x80 << 16));
+
+ regmap_write(regmap, ISC_CBC_BRIGHT, ctrls->brightness);
+ regmap_write(regmap, ISC_CBC_CONTRAST, ctrls->contrast);
+}
+
+static int isc_update_profile(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ u32 sr;
+ int counter = 100;
+
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
+
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ while ((sr & ISC_CTRL_UPPRO) && counter--) {
+ usleep_range(1000, 2000);
+ regmap_read(regmap, ISC_CTRLSR, &sr);
+ }
+
+ if (counter < 0) {
+ v4l2_warn(&isc->v4l2_dev, "Time out to update profie\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void isc_set_histogram(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
+ regmap_write(regmap, ISC_HIS_CFG, ISC_HIS_CFG_MODE_R |
+ (isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
+ regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
+ ctrls->hist_id = ISC_HIS_CFG_MODE_R;
+ isc_update_profile(isc);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ ctrls->hist_stat = HIST_ENABLED;
+ } else if (!ctrls->awb && (ctrls->hist_stat != HIST_DISABLED)) {
+ regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE);
+ regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_DIS);
+
+ ctrls->hist_stat = HIST_DISABLED;
+ }
+}
+
+static inline void isc_get_param(const struct isc_format *fmt,
+ u32 *rlp_mode, u32 *dcfg_imode)
+{
+ switch (fmt->fourcc) {
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ *rlp_mode = fmt->reg_rlp_mode;
+ *dcfg_imode = fmt->reg_dcfg_imode;
+ break;
+ default:
+ *rlp_mode = ISC_RLP_CFG_MODE_DAT8;
+ *dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ break;
+ }
}
static int isc_configure(struct isc_device *isc)
@@ -475,39 +726,40 @@ static int isc_configure(struct isc_device *isc)
struct regmap *regmap = isc->regmap;
const struct isc_format *current_fmt = isc->current_fmt;
struct isc_subdev_entity *subdev = isc->current_subdev;
- u32 val, mask;
- int counter = 10;
+ u32 pfe_cfg0, rlp_mode, dcfg_imode, mask, pipeline;
+
+ if (sensor_is_preferred(current_fmt)) {
+ pfe_cfg0 = current_fmt->reg_bps;
+ pipeline = 0x0;
+ isc_get_param(current_fmt, &rlp_mode, &dcfg_imode);
+ isc->ctrls.hist_stat = HIST_INIT;
+ } else {
+ pfe_cfg0 = isc->raw_fmt->reg_bps;
+ pipeline = current_fmt->pipeline;
+ rlp_mode = current_fmt->reg_rlp_mode;
+ dcfg_imode = current_fmt->reg_dcfg_imode;
+ }
- val = current_fmt->reg_bps | subdev->pfe_cfg0 |
- ISC_PFE_CFG0_MODE_PROGRESSIVE;
+ pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW |
ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW |
ISC_PFE_CFG0_MODE_MASK;
- regmap_update_bits(regmap, ISC_PFE_CFG0, mask, val);
+ regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0);
regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
- current_fmt->reg_rlp_mode);
+ rlp_mode);
- regmap_update_bits(regmap, ISC_DCFG, ISC_DCFG_IMODE_MASK,
- current_fmt->reg_dcfg_imode);
+ regmap_update_bits(regmap, ISC_DCFG, ISC_DCFG_IMODE_MASK, dcfg_imode);
- /* Disable the pipeline */
- isc_set_pipeline(isc, 0x0);
+ /* Set the pipeline */
+ isc_set_pipeline(isc, pipeline);
- /* Update profile */
- regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO);
-
- regmap_read(regmap, ISC_CTRLSR, &val);
- while ((val & ISC_CTRL_UPPRO) && counter--) {
- usleep_range(1000, 2000);
- regmap_read(regmap, ISC_CTRLSR, &val);
- }
+ if (pipeline)
+ isc_set_histogram(isc);
- if (counter < 0)
- return -ETIMEDOUT;
-
- return 0;
+ /* Update profile */
+ return isc_update_profile(isc);
}
static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -517,7 +769,6 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
struct isc_buffer *buf;
unsigned long flags;
int ret;
- u32 val;
/* Enable stream on the sub device */
ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1);
@@ -528,12 +779,6 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
pm_runtime_get_sync(isc->dev);
- /* Disable all the interrupts */
- regmap_write(isc->regmap, ISC_INTDIS, (u32)~0UL);
-
- /* Clean the interrupt status register */
- regmap_read(regmap, ISC_INTSR, &val);
-
ret = isc_configure(isc);
if (unlikely(ret))
goto err_configure;
@@ -551,7 +796,7 @@ static int isc_start_streaming(struct vb2_queue *vq, unsigned int count)
struct isc_buffer, list);
list_del(&isc->cur_frm->list);
- isc_start_dma(regmap, isc->cur_frm, isc->current_fmt->reg_dctrl_dview);
+ isc_start_dma(isc);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
@@ -620,8 +865,7 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
vb2_is_streaming(vb->vb2_queue)) {
isc->cur_frm = buf;
- isc_start_dma(isc->regmap, isc->cur_frm,
- isc->current_fmt->reg_dctrl_dview);
+ isc_start_dma(isc);
} else
list_add_tail(&buf->list, &isc->dma_queue);
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
@@ -691,13 +935,14 @@ static struct isc_format *find_format_by_fourcc(struct isc_device *isc,
}
static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
- struct isc_format **current_fmt)
+ struct isc_format **current_fmt, u32 *code)
{
struct isc_format *isc_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
+ u32 mbus_code;
int ret;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -717,7 +962,12 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
if (pixfmt->height > ISC_MAX_SUPPORT_HEIGHT)
pixfmt->height = ISC_MAX_SUPPORT_HEIGHT;
- v4l2_fill_mbus_format(&format.format, pixfmt, isc_fmt->mbus_code);
+ if (sensor_is_preferred(isc_fmt))
+ mbus_code = isc_fmt->mbus_code;
+ else
+ mbus_code = isc->raw_fmt->mbus_code;
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
isc->current_subdev->config, &format);
if (ret < 0)
@@ -726,12 +976,15 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
v4l2_fill_pix_format(pixfmt, &format.format);
pixfmt->field = V4L2_FIELD_NONE;
- pixfmt->bytesperline = pixfmt->width * isc_fmt->bpp;
+ pixfmt->bytesperline = (pixfmt->width * isc_fmt->bpp) >> 3;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
if (current_fmt)
*current_fmt = isc_fmt;
+ if (code)
+ *code = mbus_code;
+
return 0;
}
@@ -741,14 +994,14 @@ static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct isc_format *current_fmt;
+ u32 mbus_code;
int ret;
- ret = isc_try_fmt(isc, f, &current_fmt);
+ ret = isc_try_fmt(isc, f, &current_fmt, &mbus_code);
if (ret)
return ret;
- v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
- current_fmt->mbus_code);
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
set_fmt, NULL, &format);
if (ret < 0)
@@ -776,7 +1029,7 @@ static int isc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct isc_device *isc = video_drvdata(file);
- return isc_try_fmt(isc, f, NULL);
+ return isc_try_fmt(isc, f, NULL, NULL);
}
static int isc_enum_input(struct file *file, void *priv,
@@ -842,7 +1095,10 @@ static int isc_enum_framesizes(struct file *file, void *fh,
if (!isc_fmt)
return -EINVAL;
- fse.code = isc_fmt->mbus_code;
+ if (sensor_is_preferred(isc_fmt))
+ fse.code = isc_fmt->mbus_code;
+ else
+ fse.code = isc->raw_fmt->mbus_code;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size,
NULL, &fse);
@@ -873,7 +1129,10 @@ static int isc_enum_frameintervals(struct file *file, void *fh,
if (!isc_fmt)
return -EINVAL;
- fie.code = isc_fmt->mbus_code;
+ if (sensor_is_preferred(isc_fmt))
+ fie.code = isc_fmt->mbus_code;
+ else
+ fie.code = isc->raw_fmt->mbus_code;
ret = v4l2_subdev_call(isc->current_subdev->sd, pad,
enum_frame_interval, NULL, &fie);
@@ -911,6 +1170,10 @@ static const struct v4l2_ioctl_ops isc_ioctl_ops = {
.vidioc_s_parm = isc_s_parm,
.vidioc_enum_framesizes = isc_enum_framesizes,
.vidioc_enum_frameintervals = isc_enum_frameintervals,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static int isc_open(struct file *file)
@@ -984,14 +1247,13 @@ static irqreturn_t isc_interrupt(int irq, void *dev_id)
u32 isc_intsr, isc_intmask, pending;
irqreturn_t ret = IRQ_NONE;
- spin_lock(&isc->dma_queue_lock);
-
regmap_read(regmap, ISC_INTSR, &isc_intsr);
regmap_read(regmap, ISC_INTMASK, &isc_intmask);
pending = isc_intsr & isc_intmask;
if (likely(pending & ISC_INT_DDONE)) {
+ spin_lock(&isc->dma_queue_lock);
if (isc->cur_frm) {
struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb;
struct vb2_buffer *vb = &vbuf->vb2_buf;
@@ -1007,21 +1269,144 @@ static irqreturn_t isc_interrupt(int irq, void *dev_id)
struct isc_buffer, list);
list_del(&isc->cur_frm->list);
- isc_start_dma(regmap, isc->cur_frm,
- isc->current_fmt->reg_dctrl_dview);
+ isc_start_dma(isc);
}
if (isc->stop)
complete(&isc->comp);
ret = IRQ_HANDLED;
+ spin_unlock(&isc->dma_queue_lock);
}
- spin_unlock(&isc->dma_queue_lock);
+ if (pending & ISC_INT_HISDONE) {
+ schedule_work(&isc->awb_work);
+ ret = IRQ_HANDLED;
+ }
return ret;
}
+static void isc_hist_count(struct isc_device *isc)
+{
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 *hist_count = &ctrls->hist_count[ctrls->hist_id];
+ u32 *hist_entry = &ctrls->hist_entry[0];
+ u32 i;
+
+ regmap_bulk_read(regmap, ISC_HIS_ENTRY, hist_entry, HIST_ENTRIES);
+
+ *hist_count = 0;
+ for (i = 0; i < HIST_ENTRIES; i++)
+ *hist_count += i * (*hist_entry++);
+}
+
+static void isc_wb_update(struct isc_ctrls *ctrls)
+{
+ u32 *hist_count = &ctrls->hist_count[0];
+ u64 g_count = (u64)hist_count[ISC_HIS_CFG_MODE_GB] << 9;
+ u32 hist_r = hist_count[ISC_HIS_CFG_MODE_R];
+ u32 hist_b = hist_count[ISC_HIS_CFG_MODE_B];
+
+ if (hist_r)
+ ctrls->r_gain = div_u64(g_count, hist_r);
+
+ if (hist_b)
+ ctrls->b_gain = div_u64(g_count, hist_b);
+}
+
+static void isc_awb_work(struct work_struct *w)
+{
+ struct isc_device *isc =
+ container_of(w, struct isc_device, awb_work);
+ struct regmap *regmap = isc->regmap;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ u32 hist_id = ctrls->hist_id;
+ u32 baysel;
+
+ if (ctrls->hist_stat != HIST_ENABLED)
+ return;
+
+ isc_hist_count(isc);
+
+ if (hist_id != ISC_HIS_CFG_MODE_B) {
+ hist_id++;
+ } else {
+ isc_wb_update(ctrls);
+ hist_id = ISC_HIS_CFG_MODE_R;
+ }
+
+ ctrls->hist_id = hist_id;
+ baysel = isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+
+ pm_runtime_get_sync(isc->dev);
+
+ regmap_write(regmap, ISC_HIS_CFG, hist_id | baysel | ISC_HIS_CFG_RAR);
+ isc_update_profile(isc);
+ regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ);
+
+ pm_runtime_put_sync(isc->dev);
+}
+
+static int isc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct isc_device *isc = container_of(ctrl->handler,
+ struct isc_device, ctrls.handler);
+ struct isc_ctrls *ctrls = &isc->ctrls;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK;
+ break;
+ case V4L2_CID_CONTRAST:
+ ctrls->contrast = ctrl->val & ISC_CBC_CONTRAST_MASK;
+ break;
+ case V4L2_CID_GAMMA:
+ ctrls->gamma_index = ctrl->val;
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrls->awb = ctrl->val;
+ if (ctrls->hist_stat != HIST_ENABLED) {
+ ctrls->r_gain = 0x1 << 9;
+ ctrls->b_gain = 0x1 << 9;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops isc_ctrl_ops = {
+ .s_ctrl = isc_s_ctrl,
+};
+
+static int isc_ctrl_init(struct isc_device *isc)
+{
+ const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops;
+ struct isc_ctrls *ctrls = &isc->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ ctrls->hist_stat = HIST_INIT;
+
+ ret = v4l2_ctrl_handler_init(hdl, 4);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 2);
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+
static int isc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -1047,10 +1432,11 @@ static void isc_async_unbind(struct v4l2_async_notifier *notifier,
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
-
+ cancel_work_sync(&isc->awb_work);
video_unregister_device(&isc->video_dev);
if (isc->current_subdev->config)
v4l2_subdev_free_pad_config(isc->current_subdev->config);
+ v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
static struct isc_format *find_format_by_code(unsigned int code, int *index)
@@ -1074,14 +1460,16 @@ static int isc_formats_init(struct isc_device *isc)
{
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
- int num_fmts = 0, i, j;
+ unsigned int num_fmts, i, j;
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
fmt = &isc_formats[0];
for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
- fmt->support = false;
+ fmt->isc_support = false;
+ fmt->sd_support = false;
+
fmt++;
}
@@ -1092,8 +1480,22 @@ static int isc_formats_init(struct isc_device *isc)
if (!fmt)
continue;
- fmt->support = true;
- num_fmts++;
+ fmt->sd_support = true;
+
+ if (i <= RAW_FMT_IND_END) {
+ for (j = ISC_FMT_IND_START; j <= ISC_FMT_IND_END; j++)
+ isc_formats[j].isc_support = true;
+
+ isc->raw_fmt = fmt;
+ }
+ }
+
+ fmt = &isc_formats[0];
+ for (i = 0, num_fmts = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ if (fmt->isc_support || fmt->sd_support)
+ num_fmts++;
+
+ fmt++;
}
if (!num_fmts)
@@ -1110,7 +1512,7 @@ static int isc_formats_init(struct isc_device *isc)
fmt = &isc_formats[0];
for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
- if (fmt->support)
+ if (fmt->isc_support || fmt->sd_support)
isc->user_formats[j++] = fmt;
fmt++;
@@ -1132,7 +1534,7 @@ static int isc_set_default_fmt(struct isc_device *isc)
};
int ret;
- ret = isc_try_fmt(isc, &f, NULL);
+ ret = isc_try_fmt(isc, &f, NULL, NULL);
if (ret)
return ret;
@@ -1151,6 +1553,12 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
+ ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
isc->current_subdev = container_of(notifier,
struct isc_subdev_entity, notifier);
sd_entity = isc->current_subdev;
@@ -1198,6 +1606,14 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return ret;
}
+ ret = isc_ctrl_init(isc);
+ if (ret) {
+ v4l2_err(&isc->v4l2_dev, "Init isc ctrols failed: %d\n", ret);
+ return ret;
+ }
+
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
/* Register video device */
strlcpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
@@ -1207,7 +1623,7 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = q;
vdev->lock = &isc->lock;
- vdev->ctrl_handler = isc->current_subdev->sd->ctrl_handler;
+ vdev->ctrl_handler = &isc->ctrls.handler;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vdev, isc);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
new file mode 100644
index 000000000000..e4867f84514c
--- /dev/null
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-of.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-image-sizes.h>
+
+#include "atmel-isi.h"
+
+#define MAX_SUPPORT_WIDTH 2048
+#define MAX_SUPPORT_HEIGHT 2048
+#define MIN_FRAME_RATE 15
+#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
+
+/* Frame buffer descriptor */
+struct fbd {
+ /* Physical address of the frame buffer */
+ u32 fb_address;
+ /* DMA Control Register(only in HISI2) */
+ u32 dma_ctrl;
+ /* Physical address of the next fbd */
+ u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+ fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+ struct list_head list;
+ struct fbd *p_fbd;
+ dma_addr_t fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct isi_dma_desc *p_dma_desc;
+ struct list_head list;
+};
+
+struct isi_graph_entity {
+ struct device_node *node;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+/*
+ * struct isi_format - ISI media bus format information
+ * @fourcc: Fourcc code for this format
+ * @mbus_code: V4L2 media bus format code.
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @swap: Byte swap configuration value
+ * @support: Indicates format supported by subdev
+ * @skip: Skip duplicate format supported by subdev
+ */
+struct isi_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+ u32 swap;
+};
+
+
+struct atmel_isi {
+ /* Protects the access of variables shared with the ISR */
+ spinlock_t irqlock;
+ struct device *dev;
+ void __iomem *regs;
+
+ int sequence;
+
+ /* Allocate descriptors for dma buffer use */
+ struct fbd *p_fb_descriptors;
+ dma_addr_t fb_descriptors_phys;
+ struct list_head dma_desc_head;
+ struct isi_dma_desc dma_desc[VIDEO_MAX_FRAME];
+ bool enable_preview_path;
+
+ struct completion complete;
+ /* ISI peripherial clock */
+ struct clk *pclk;
+ unsigned int irq;
+
+ struct isi_platform_data pdata;
+ u16 width_flags; /* max 12 bits */
+
+ struct list_head video_buffer_list;
+ struct frame_buffer *active;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_async_notifier notifier;
+ struct isi_graph_entity entity;
+ struct v4l2_format fmt;
+
+ const struct isi_format **user_formats;
+ unsigned int num_user_formats;
+ const struct isi_format *current_fmt;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+};
+
+#define notifier_to_isi(n) container_of(n, struct atmel_isi, notifier)
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+ writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+ return readl(isi->regs + reg);
+}
+
+static void configure_geometry(struct atmel_isi *isi)
+{
+ u32 cfg2, psize;
+ u32 fourcc = isi->current_fmt->fourcc;
+
+ isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 ||
+ fourcc == V4L2_PIX_FMT_RGB32;
+
+ /* According to sensor's output format to set cfg2 */
+ cfg2 = isi->current_fmt->swap;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ /* Set width */
+ cfg2 |= ((isi->fmt.fmt.pix.width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+ ISI_CFG2_IM_HSIZE_MASK;
+ /* Set height */
+ cfg2 |= ((isi->fmt.fmt.pix.height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+ & ISI_CFG2_IM_VSIZE_MASK;
+ isi_writel(isi, ISI_CFG2, cfg2);
+
+ /* No down sampling, preview size equal to sensor output size */
+ psize = ((isi->fmt.fmt.pix.width - 1) << ISI_PSIZE_PREV_HSIZE_OFFSET) &
+ ISI_PSIZE_PREV_HSIZE_MASK;
+ psize |= ((isi->fmt.fmt.pix.height - 1) << ISI_PSIZE_PREV_VSIZE_OFFSET) &
+ ISI_PSIZE_PREV_VSIZE_MASK;
+ isi_writel(isi, ISI_PSIZE, psize);
+ isi_writel(isi, ISI_PDECF, ISI_PDECF_NO_SAMPLING);
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+ if (isi->active) {
+ struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
+ struct frame_buffer *buf = isi->active;
+
+ list_del_init(&buf->list);
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vbuf->sequence = isi->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&isi->video_buffer_list)) {
+ isi->active = NULL;
+ } else {
+ /* start next dma frame. */
+ isi->active = list_entry(isi->video_buffer_list.next,
+ struct frame_buffer, list);
+ if (!isi->enable_preview_path) {
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ (u32)isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ } else {
+ isi_writel(isi, ISI_DMA_P_DSCR,
+ (u32)isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_P_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_isi *isi = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isi->irqlock);
+
+ status = isi_readl(isi, ISI_STATUS);
+ mask = isi_readl(isi, ISI_INTMASK);
+ pending = status & mask;
+
+ if (pending & ISI_CTRL_SRST) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+ ret = IRQ_HANDLED;
+ } else if (pending & ISI_CTRL_DIS) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+ ret = IRQ_HANDLED;
+ } else {
+ if (likely(pending & ISI_SR_CXFR_DONE) ||
+ likely(pending & ISI_SR_PXFR_DONE))
+ ret = atmel_isi_handle_streaming(isi);
+ }
+
+ spin_unlock(&isi->irqlock);
+ return ret;
+}
+
+#define WAIT_ISI_RESET 1
+#define WAIT_ISI_DISABLE 0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+ unsigned long timeout;
+ /*
+ * The reset or disable will only succeed if we have a
+ * pixel clock from the camera.
+ */
+ init_completion(&isi->complete);
+
+ if (wait_reset) {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+ } else {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ }
+
+ timeout = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(500));
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ unsigned long size;
+
+ size = isi->fmt.fmt.pix.sizeimage;
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ isi->active = NULL;
+
+ dev_dbg(isi->dev, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+
+ buf->p_dma_desc = NULL;
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+ struct isi_dma_desc *desc;
+
+ size = isi->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(isi->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->p_dma_desc) {
+ if (list_empty(&isi->dma_desc_head)) {
+ dev_err(isi->dev, "Not enough dma descriptors.\n");
+ return -EINVAL;
+ } else {
+ /* Get an available descriptor */
+ desc = list_entry(isi->dma_desc_head.next,
+ struct isi_dma_desc, list);
+ /* Delete the descriptor since now it is used */
+ list_del_init(&desc->list);
+
+ /* Initialize the dma descriptor */
+ desc->p_fbd->fb_address =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ desc->p_fbd->next_fbd_address = 0;
+ set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+ buf->p_dma_desc = desc;
+ }
+ }
+ return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+
+ /* This descriptor is available now and we add to head list */
+ if (buf->p_dma_desc)
+ list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+ u32 ctrl, cfg1;
+
+ cfg1 = isi_readl(isi, ISI_CFG1);
+ /* Enable irq: cxfr for the codec path, pxfr for the preview path */
+ isi_writel(isi, ISI_INTEN,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Check if already in a frame */
+ if (!isi->enable_preview_path) {
+ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+ dev_err(isi->dev, "Already in frame handling.\n");
+ return;
+ }
+
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ (u32)buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ } else {
+ isi_writel(isi, ISI_DMA_P_DSCR,
+ (u32)buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_P_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
+ }
+
+ cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
+ /* Enable linked list */
+ cfg1 |= isi->pdata.frate | ISI_CFG1_DISCR;
+
+ /* Enable ISI */
+ ctrl = ISI_CTRL_EN;
+
+ if (!isi->enable_preview_path)
+ ctrl |= ISI_CTRL_CDC;
+
+ isi_writel(isi, ISI_CTRL, ctrl);
+ isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&isi->irqlock, flags);
+ list_add_tail(&buf->list, &isi->video_buffer_list);
+
+ if (isi->active == NULL) {
+ isi->active = buf;
+ if (vb2_is_streaming(vb->vb2_queue))
+ start_dma(isi, buf);
+ }
+ spin_unlock_irqrestore(&isi->irqlock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ struct frame_buffer *buf, *node;
+ int ret;
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_err(isi->dev, "stream on failed in subdev\n");
+ goto err_start_stream;
+ }
+
+ pm_runtime_get_sync(isi->dev);
+
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(isi->dev, "Reset ISI timed out\n");
+ goto err_reset;
+ }
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, (u32)~0UL);
+
+ isi->sequence = 0;
+ configure_geometry(isi);
+
+ spin_lock_irq(&isi->irqlock);
+ /* Clear any pending interrupt */
+ isi_readl(isi, ISI_STATUS);
+
+ start_dma(isi, isi->active);
+ spin_unlock_irq(&isi->irqlock);
+
+ return 0;
+
+err_reset:
+ pm_runtime_put(isi->dev);
+ v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0);
+
+err_start_stream:
+ spin_lock_irq(&isi->irqlock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irq(&isi->irqlock);
+
+ return ret;
+}
+
+/* abort streaming and wait for last buffer */
+static void stop_streaming(struct vb2_queue *vq)
+{
+ struct atmel_isi *isi = vb2_get_drv_priv(vq);
+ struct frame_buffer *buf, *node;
+ int ret = 0;
+ unsigned long timeout;
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ dev_err(isi->dev, "stream off failed in subdev\n");
+
+ spin_lock_irq(&isi->irqlock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&isi->irqlock);
+
+ if (!isi->enable_preview_path) {
+ timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout))
+ dev_err(isi->dev,
+ "Timeout waiting for finishing codec request\n");
+ }
+
+ /* Disable interrupts */
+ isi_writel(isi, ISI_INTDIS,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Disable ISI and wait for it is done */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+ if (ret < 0)
+ dev_err(isi->dev, "Disable ISI timed out\n");
+
+ pm_runtime_put(isi->dev);
+}
+
+static const struct vb2_ops isi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int isi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ *fmt = isi->fmt;
+
+ return 0;
+}
+
+static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = isi->num_user_formats;
+ const struct isi_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = isi->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
+ const struct isi_format **current_fmt)
+{
+ const struct isi_format *isi_fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ isi_fmt = find_format_by_fourcc(isi, pixfmt->pixelformat);
+ if (!isi_fmt) {
+ isi_fmt = isi->user_formats[isi->num_user_formats - 1];
+ pixfmt->pixelformat = isi_fmt->fourcc;
+ }
+
+ /* Limit to Atmel ISC hardware capabilities */
+ if (pixfmt->width > MAX_SUPPORT_WIDTH)
+ pixfmt->width = MAX_SUPPORT_WIDTH;
+ if (pixfmt->height > MAX_SUPPORT_HEIGHT)
+ pixfmt->height = MAX_SUPPORT_HEIGHT;
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
+ ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = pixfmt->width * isi_fmt->bpp;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (current_fmt)
+ *current_fmt = isi_fmt;
+
+ return 0;
+}
+
+static int isi_set_fmt(struct atmel_isi *isi, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct isi_format *current_fmt;
+ int ret;
+
+ ret = isi_try_fmt(isi, f, &current_fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
+ current_fmt->mbus_code);
+ ret = v4l2_subdev_call(isi->entity.subdev, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ isi->fmt = *f;
+ isi->current_fmt = current_fmt;
+
+ return 0;
+}
+
+static int isi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (vb2_is_streaming(&isi->queue))
+ return -EBUSY;
+
+ return isi_set_fmt(isi, f);
+}
+
+static int isi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ return isi_try_fmt(isi, f, NULL);
+}
+
+static int isi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (f->index >= isi->num_user_formats)
+ return -EINVAL;
+
+ f->pixelformat = isi->user_formats[f->index]->fourcc;
+ return 0;
+}
+
+static int isi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "atmel-isi", sizeof(cap->driver));
+ strlcpy(cap->card, "Atmel Image Sensor Interface", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:isi", sizeof(cap->bus_info));
+ return 0;
+}
+
+static int isi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+ return 0;
+}
+
+static int isi_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int isi_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int isi_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->parm.capture.readbuffers = 2;
+ return v4l2_subdev_call(isi->entity.subdev, video, g_parm, a);
+}
+
+static int isi_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ a->parm.capture.readbuffers = 2;
+ return v4l2_subdev_call(isi->entity.subdev, video, s_parm, a);
+}
+
+static int isi_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ const struct isi_format *isi_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isi_fmt = find_format_by_fourcc(isi, fsize->pixel_format);
+ if (!isi_fmt)
+ return -EINVAL;
+
+ fse.code = isi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int isi_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ const struct isi_format *isi_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ isi_fmt = find_format_by_fourcc(isi, fival->pixel_format);
+ if (!isi_fmt)
+ return -EINVAL;
+
+ fie.code = isi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(isi->entity.subdev, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static void isi_camera_set_bus_param(struct atmel_isi *isi)
+{
+ u32 cfg1 = 0;
+
+ /* set bus param for ISI */
+ if (isi->pdata.hsync_act_low)
+ cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+ if (isi->pdata.vsync_act_low)
+ cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+ if (isi->pdata.pclk_act_falling)
+ cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+ if (isi->pdata.has_emb_sync)
+ cfg1 |= ISI_CFG1_EMB_SYNC;
+ if (isi->pdata.full_mode)
+ cfg1 |= ISI_CFG1_FULL_MODE;
+
+ cfg1 |= ISI_CFG1_THMASK_BEATS_16;
+
+ /* Enable PM and peripheral clock before operate isi registers */
+ pm_runtime_get_sync(isi->dev);
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CFG1, cfg1);
+
+ pm_runtime_put(isi->dev);
+}
+
+/* -----------------------------------------------------------------------*/
+static int atmel_isi_parse_dt(struct atmel_isi *isi,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct v4l2_of_endpoint ep;
+ int err;
+
+ /* Default settings for ISI */
+ isi->pdata.full_mode = 1;
+ isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
+
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(&pdev->dev, "Could not find the endpoint\n");
+ return -EINVAL;
+ }
+
+ err = v4l2_of_parse_endpoint(np, &ep);
+ of_node_put(np);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ return err;
+ }
+
+ switch (ep.bus.parallel.bus_width) {
+ case 8:
+ isi->pdata.data_width_flags = ISI_DATAWIDTH_8;
+ break;
+ case 10:
+ isi->pdata.data_width_flags =
+ ISI_DATAWIDTH_8 | ISI_DATAWIDTH_10;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported bus width: %d\n",
+ ep.bus.parallel.bus_width);
+ return -EINVAL;
+ }
+
+ if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ isi->pdata.hsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ isi->pdata.vsync_act_low = true;
+ if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ isi->pdata.pclk_act_falling = true;
+
+ if (ep.bus_type == V4L2_MBUS_BT656)
+ isi->pdata.has_emb_sync = true;
+
+ return 0;
+}
+
+static int isi_open(struct file *file)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ struct v4l2_subdev *sd = isi->entity.subdev;
+ int ret;
+
+ if (mutex_lock_interruptible(&isi->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto fh_rel;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto fh_rel;
+
+ ret = isi_set_fmt(isi, &isi->fmt);
+ if (ret)
+ v4l2_subdev_call(sd, core, s_power, 0);
+fh_rel:
+ if (ret)
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&isi->lock);
+ return ret;
+}
+
+static int isi_release(struct file *file)
+{
+ struct atmel_isi *isi = video_drvdata(file);
+ struct v4l2_subdev *sd = isi->entity.subdev;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&isi->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&isi->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops isi_ioctl_ops = {
+ .vidioc_querycap = isi_querycap,
+
+ .vidioc_try_fmt_vid_cap = isi_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = isi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = isi_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = isi_enum_fmt_vid_cap,
+
+ .vidioc_enum_input = isi_enum_input,
+ .vidioc_g_input = isi_g_input,
+ .vidioc_s_input = isi_s_input,
+
+ .vidioc_g_parm = isi_g_parm,
+ .vidioc_s_parm = isi_s_parm,
+ .vidioc_enum_framesizes = isi_enum_framesizes,
+ .vidioc_enum_frameintervals = isi_enum_frameintervals,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations isi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = isi_open,
+ .release = isi_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+static int isi_set_default_fmt(struct atmel_isi *isi)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = isi->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = isi_try_fmt(isi, &f, NULL);
+ if (ret)
+ return ret;
+ isi->current_fmt = isi->user_formats[0];
+ isi->fmt = f;
+ return 0;
+}
+
+static const struct isi_format isi_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_DEFAULT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_3,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_3,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_DEFAULT,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .bpp = 2,
+ .swap = ISI_CFG2_YCC_SWAP_MODE_1,
+ },
+};
+
+static int isi_formats_init(struct atmel_isi *isi)
+{
+ const struct isi_format *isi_fmts[ARRAY_SIZE(isi_formats)];
+ unsigned int num_fmts = 0, i, j;
+ struct v4l2_subdev *subdev = isi->entity.subdev;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ for (i = 0; i < ARRAY_SIZE(isi_formats); i++) {
+ if (isi_formats[i].mbus_code != mbus_code.code)
+ continue;
+
+ /* Code supported, have we got this fourcc yet? */
+ for (j = 0; j < num_fmts; j++)
+ if (isi_fmts[j]->fourcc == isi_formats[i].fourcc)
+ /* Already available */
+ break;
+ if (j == num_fmts)
+ /* new */
+ isi_fmts[num_fmts++] = isi_formats + i;
+ }
+ mbus_code.index++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ isi->num_user_formats = num_fmts;
+ isi->user_formats = devm_kcalloc(isi->dev,
+ num_fmts, sizeof(struct isi_format *),
+ GFP_KERNEL);
+ if (!isi->user_formats) {
+ dev_err(isi->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(isi->user_formats, isi_fmts,
+ num_fmts * sizeof(struct isi_format *));
+ isi->current_fmt = isi->user_formats[0];
+
+ return 0;
+}
+
+static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+ int ret;
+
+ isi->vdev->ctrl_handler = isi->entity.subdev->ctrl_handler;
+ ret = isi_formats_init(isi);
+ if (ret) {
+ dev_err(isi->dev, "No supported mediabus format found\n");
+ return ret;
+ }
+ isi_camera_set_bus_param(isi);
+
+ ret = isi_set_default_fmt(isi);
+ if (ret) {
+ dev_err(isi->dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = video_register_device(isi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(isi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_dbg(isi->dev, "Device registered as %s\n",
+ video_device_node_name(isi->vdev));
+ return 0;
+}
+
+static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+
+ dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev));
+
+ /* Checks internaly if vdev have been init or not */
+ video_unregister_device(isi->vdev);
+}
+
+static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct atmel_isi *isi = notifier_to_isi(notifier);
+
+ dev_dbg(isi->dev, "subdev %s bound\n", subdev->name);
+
+ isi->entity.subdev = subdev;
+
+ return 0;
+}
+
+static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
+{
+ struct device_node *ep = NULL;
+ struct device_node *remote;
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ /* Remote node to connect */
+ isi->entity.node = remote;
+ isi->entity.asd.match_type = V4L2_ASYNC_MATCH_OF;
+ isi->entity.asd.match.of.node = remote;
+ return 0;
+ }
+}
+
+static int isi_graph_init(struct atmel_isi *isi)
+{
+ struct v4l2_async_subdev **subdevs = NULL;
+ int ret;
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = isi_graph_parse(isi, isi->dev->of_node);
+ if (ret < 0) {
+ dev_err(isi->dev, "Graph parsing failed\n");
+ return ret;
+ }
+
+ /* Register the subdevices notifier. */
+ subdevs = devm_kzalloc(isi->dev, sizeof(*subdevs), GFP_KERNEL);
+ if (subdevs == NULL) {
+ of_node_put(isi->entity.node);
+ return -ENOMEM;
+ }
+
+ subdevs[0] = &isi->entity.asd;
+
+ isi->notifier.subdevs = subdevs;
+ isi->notifier.num_subdevs = 1;
+ isi->notifier.bound = isi_graph_notify_bound;
+ isi->notifier.unbind = isi_graph_notify_unbind;
+ isi->notifier.complete = isi_graph_notify_complete;
+
+ ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
+ if (ret < 0) {
+ dev_err(isi->dev, "Notifier registration failed\n");
+ of_node_put(isi->entity.node);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int atmel_isi_probe(struct platform_device *pdev)
+{
+ int irq;
+ struct atmel_isi *isi;
+ struct vb2_queue *q;
+ struct resource *regs;
+ int ret, i;
+
+ isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
+ if (!isi) {
+ dev_err(&pdev->dev, "Can't allocate interface!\n");
+ return -ENOMEM;
+ }
+
+ isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(isi->pclk))
+ return PTR_ERR(isi->pclk);
+
+ ret = atmel_isi_parse_dt(isi, pdev);
+ if (ret)
+ return ret;
+
+ isi->active = NULL;
+ isi->dev = &pdev->dev;
+ mutex_init(&isi->lock);
+ spin_lock_init(&isi->irqlock);
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ INIT_LIST_HEAD(&isi->dma_desc_head);
+
+ q = &isi->queue;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &isi->v4l2_dev);
+ if (ret)
+ return ret;
+
+ isi->vdev = video_device_alloc();
+ if (isi->vdev == NULL) {
+ ret = -ENOMEM;
+ goto err_vdev_alloc;
+ }
+
+ /* video node */
+ isi->vdev->fops = &isi_fops;
+ isi->vdev->v4l2_dev = &isi->v4l2_dev;
+ isi->vdev->queue = &isi->queue;
+ strlcpy(isi->vdev->name, KBUILD_MODNAME, sizeof(isi->vdev->name));
+ isi->vdev->release = video_device_release;
+ isi->vdev->ioctl_ops = &isi_ioctl_ops;
+ isi->vdev->lock = &isi->lock;
+ isi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(isi->vdev, isi);
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &isi->lock;
+ q->drv_priv = isi;
+ q->buf_struct_size = sizeof(struct frame_buffer);
+ q->ops = &isi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = &pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize VB2 queue\n");
+ goto err_vb2_queue;
+ }
+ isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ &isi->fb_descriptors_phys,
+ GFP_KERNEL);
+ if (!isi->p_fb_descriptors) {
+ dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+ isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+ i * sizeof(struct fbd);
+ list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(isi->regs)) {
+ ret = PTR_ERR(isi->regs);
+ goto err_ioremap;
+ }
+
+ if (isi->pdata.data_width_flags & ISI_DATAWIDTH_8)
+ isi->width_flags = 1 << 7;
+ if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10)
+ isi->width_flags |= 1 << 9;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_req_irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err_req_irq;
+ }
+ isi->irq = irq;
+
+ ret = isi_graph_init(isi);
+ if (ret < 0)
+ goto err_req_irq;
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+ platform_set_drvdata(pdev, isi);
+ return 0;
+
+err_req_irq:
+err_ioremap:
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+err_dma_alloc:
+err_vb2_queue:
+ video_device_release(isi->vdev);
+err_vdev_alloc:
+ v4l2_device_unregister(&isi->v4l2_dev);
+
+ return ret;
+}
+
+static int atmel_isi_remove(struct platform_device *pdev)
+{
+ struct atmel_isi *isi = platform_get_drvdata(pdev);
+
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * VIDEO_MAX_FRAME,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+ pm_runtime_disable(&pdev->dev);
+ v4l2_async_notifier_unregister(&isi->notifier);
+ v4l2_device_unregister(&isi->v4l2_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_isi_runtime_suspend(struct device *dev)
+{
+ struct atmel_isi *isi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(isi->pclk);
+
+ return 0;
+}
+static int atmel_isi_runtime_resume(struct device *dev)
+{
+ struct atmel_isi *isi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(isi->pclk);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
+ atmel_isi_runtime_resume, NULL)
+};
+
+static const struct of_device_id atmel_isi_of_match[] = {
+ { .compatible = "atmel,at91sam9g45-isi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atmel_isi_of_match);
+
+static struct platform_driver atmel_isi_driver = {
+ .driver = {
+ .name = "atmel_isi",
+ .of_match_table = of_match_ptr(atmel_isi_of_match),
+ .pm = &atmel_isi_dev_pm_ops,
+ },
+ .probe = atmel_isi_probe,
+ .remove = atmel_isi_remove,
+};
+
+module_platform_driver(atmel_isi_driver);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/soc_camera/atmel-isi.h b/drivers/media/platform/atmel/atmel-isi.h
index 0acb32a2b65c..0acb32a2b65c 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.h
+++ b/drivers/media/platform/atmel/atmel-isi.h
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 466a44e4549e..403214e00e95 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -179,6 +179,25 @@ static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
}
+static int coda_bitstream_pad(struct coda_ctx *ctx, u32 size)
+{
+ unsigned char *buf;
+ u32 n;
+
+ if (size < 6)
+ size = 6;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ coda_h264_filler_nal(size, buf);
+ n = kfifo_in(&ctx->bitstream_fifo, buf, size);
+ kfree(buf);
+
+ return (n < size) ? -ENOSPC : 0;
+}
+
static int coda_bitstream_queue(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *src_buf)
{
@@ -198,10 +217,10 @@ static int coda_bitstream_queue(struct coda_ctx *ctx,
static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *src_buf)
{
+ unsigned long payload = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
int ret;
- if (coda_get_bitstream_payload(ctx) +
- vb2_get_plane_payload(&src_buf->vb2_buf, 0) + 512 >=
+ if (coda_get_bitstream_payload(ctx) + payload + 512 >=
ctx->bitstream.size)
return false;
@@ -210,6 +229,11 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
return true;
}
+ /* Add zero padding before the first H.264 buffer, if it is too small */
+ if (ctx->qsequence == 0 && payload < 512 &&
+ ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+ coda_bitstream_pad(ctx, 512 - payload);
+
ret = coda_bitstream_queue(ctx, src_buf);
if (ret < 0) {
v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
@@ -224,7 +248,7 @@ static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
return true;
}
-void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
+void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
{
struct vb2_v4l2_buffer *src_buf;
struct coda_buffer_meta *meta;
@@ -252,9 +276,16 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
"dropping invalid JPEG frame %d\n",
ctx->qsequence);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- v4l2_m2m_buf_done(src_buf, streaming ?
- VB2_BUF_STATE_ERROR :
- VB2_BUF_STATE_QUEUED);
+ if (buffer_list) {
+ struct v4l2_m2m_buffer *m2m_buf;
+
+ m2m_buf = container_of(src_buf,
+ struct v4l2_m2m_buffer,
+ vb);
+ list_add_tail(&m2m_buf->list, buffer_list);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
continue;
}
@@ -295,7 +326,16 @@ void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming)
trace_coda_bit_queue(ctx, src_buf, meta);
}
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ if (buffer_list) {
+ struct v4l2_m2m_buffer *m2m_buf;
+
+ m2m_buf = container_of(src_buf,
+ struct v4l2_m2m_buffer,
+ vb);
+ list_add_tail(&m2m_buf->list, buffer_list);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
} else {
break;
}
@@ -1508,6 +1548,47 @@ static int coda_decoder_reqbufs(struct coda_ctx *ctx,
return 0;
}
+static bool coda_reorder_enable(struct coda_ctx *ctx)
+{
+ const char * const *profile_names;
+ const char * const *level_names;
+ struct coda_dev *dev = ctx->dev;
+ int profile, level;
+
+ if (dev->devtype->product != CODA_7541 &&
+ dev->devtype->product != CODA_960)
+ return false;
+
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
+ return false;
+
+ if (ctx->codec->src_fourcc != V4L2_PIX_FMT_H264)
+ return true;
+
+ profile = coda_h264_profile(ctx->params.h264_profile_idc);
+ if (profile < 0) {
+ v4l2_warn(&dev->v4l2_dev, "Invalid H264 Profile: %d\n",
+ ctx->params.h264_profile_idc);
+ return false;
+ }
+
+ level = coda_h264_level(ctx->params.h264_level_idc);
+ if (level < 0) {
+ v4l2_warn(&dev->v4l2_dev, "Invalid H264 Level: %d\n",
+ ctx->params.h264_level_idc);
+ return false;
+ }
+
+ profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
+ level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "H264 Profile/Level: %s L%s\n",
+ profile_names[profile], level_names[level]);
+
+ /* Baseline profile does not support reordering */
+ return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+}
+
static int __coda_start_decoding(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
@@ -1554,8 +1635,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
val = 0;
- if ((dev->devtype->product == CODA_7541) ||
- (dev->devtype->product == CODA_960))
+ if (coda_reorder_enable(ctx))
val |= CODA_REORDER_ENABLE;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
val |= CODA_NO_INT_ENABLE;
@@ -1747,7 +1827,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
/* Try to copy source buffer contents into the bitstream ringbuffer */
mutex_lock(&ctx->bitstream_mutex);
- coda_fill_bitstream(ctx, true);
+ coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
if (coda_get_bitstream_payload(ctx) < 512 &&
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index eb6548f46cba..d523e990d509 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -71,6 +71,10 @@ static int disable_vdoa;
module_param(disable_vdoa, int, 0644);
MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster-scan conversion");
+static int enable_bwb = 0;
+module_param(enable_bwb, int, 0644);
+MODULE_PARM_DESC(enable_bwb, "Enable BWB unit, may crash on certain streams");
+
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
v4l2_dbg(2, coda_debug, &dev->v4l2_dev,
@@ -386,6 +390,7 @@ static int coda_enum_fmt(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
const struct coda_video_device *cvd = to_coda_video_device(vdev);
+ struct coda_ctx *ctx = fh_to_ctx(priv);
const u32 *formats;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -398,6 +403,11 @@ static int coda_enum_fmt(struct file *file, void *priv,
if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
return -EINVAL;
+ /* Skip YUYV if the vdoa is not available */
+ if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ formats[f->index] == V4L2_PIX_FMT_YUYV)
+ return -EINVAL;
+
f->pixelformat = formats[f->index];
return 0;
@@ -813,10 +823,6 @@ static int coda_qbuf(struct file *file, void *priv,
static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *buf)
{
- struct vb2_queue *src_vq;
-
- src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-
return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
(buf->sequence == (ctx->qsequence - 1)));
}
@@ -881,6 +887,47 @@ static int coda_g_selection(struct file *file, void *fh,
return 0;
}
+static int coda_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP)
+ return -EINVAL;
+
+ if (ec->flags & V4L2_ENC_CMD_STOP_AT_GOP_END)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int coda_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ ret = coda_try_encoder_cmd(file, fh, ec);
+ if (ret < 0)
+ return ret;
+
+ /* Ignore encoder stop command silently in decoder context */
+ if (ctx->inst_type != CODA_INST_ENCODER)
+ return 0;
+
+ /* Set the stream-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ /* If there is no buffer in flight, wake up */
+ if (ctx->qsequence == ctx->osequence) {
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ dst_vq->last_buffer_dequeued = true;
+ wake_up(&dst_vq->done_wq);
+ }
+
+ return 0;
+}
+
static int coda_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
@@ -1054,6 +1101,8 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_g_selection = coda_g_selection,
+ .vidioc_try_encoder_cmd = coda_try_encoder_cmd,
+ .vidioc_encoder_cmd = coda_encoder_cmd,
.vidioc_try_decoder_cmd = coda_try_decoder_cmd,
.vidioc_decoder_cmd = coda_decoder_cmd,
@@ -1327,12 +1376,28 @@ static void coda_buf_queue(struct vb2_buffer *vb)
*/
if (vb2_get_plane_payload(vb, 0) == 0)
coda_bit_stream_end_flag(ctx);
+
+ if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /*
+ * Unless already done, try to obtain profile_idc and
+ * level_idc from the SPS header. This allows to decide
+ * whether to enable reordering during sequence
+ * initialization.
+ */
+ if (!ctx->params.h264_profile_idc)
+ coda_sps_parse_profile(ctx, vb);
+ }
+
mutex_lock(&ctx->bitstream_mutex);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
if (vb2_is_streaming(vb->vb2_queue))
- coda_fill_bitstream(ctx, true);
+ /* This set buf->sequence = ctx->qsequence++ */
+ coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
} else {
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ vbuf->sequence = ctx->qsequence++;
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
}
@@ -1344,7 +1409,7 @@ int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
GFP_KERNEL);
if (!buf->vaddr) {
v4l2_err(&dev->v4l2_dev,
- "Failed to allocate %s buffer of size %u\n",
+ "Failed to allocate %s buffer of size %zu\n",
name, size);
return -ENOMEM;
}
@@ -1382,18 +1447,22 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
+ struct v4l2_m2m_buffer *m2m_buf, *tmp;
struct vb2_v4l2_buffer *buf;
+ struct list_head list;
int ret = 0;
if (count < 1)
return -EINVAL;
+ INIT_LIST_HEAD(&list);
+
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
/* copy the buffers that were queued before streamon */
mutex_lock(&ctx->bitstream_mutex);
- coda_fill_bitstream(ctx, false);
+ coda_fill_bitstream(ctx, &list);
mutex_unlock(&ctx->bitstream_mutex);
if (coda_get_bitstream_payload(ctx) < 512) {
@@ -1408,8 +1477,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
}
/* Don't start the coda unless both queues are on */
- if (!(ctx->streamon_out & ctx->streamon_cap))
- return 0;
+ if (!(ctx->streamon_out && ctx->streamon_cap))
+ goto out;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if ((q_data_src->width != q_data_dst->width &&
@@ -1444,15 +1513,26 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
ret = ctx->ops->start_streaming(ctx);
if (ctx->inst_type == CODA_INST_DECODER) {
if (ret == -EAGAIN)
- return 0;
- else if (ret < 0)
- goto err;
+ goto out;
}
+ if (ret < 0)
+ goto err;
- return ret;
+out:
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
+ list_del(&m2m_buf->list);
+ v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_DONE);
+ }
+ }
+ return 0;
err:
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
+ list_del(&m2m_buf->list);
+ v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_QUEUED);
+ }
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
} else {
@@ -1832,7 +1912,8 @@ static int coda_open(struct file *file)
ctx->idx = idx;
switch (dev->devtype->product) {
case CODA_960:
- ctx->frame_mem_ctrl = 1 << 12;
+ if (enable_bwb)
+ ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB;
/* fallthrough */
case CODA_7541:
ctx->reg_idx = 0;
@@ -2126,7 +2207,12 @@ static void coda_fw_callback(const struct firmware *fw, void *context);
static int coda_firmware_request(struct coda_dev *dev)
{
- char *fw = dev->devtype->firmware[dev->firmware];
+ char *fw;
+
+ if (dev->firmware >= ARRAY_SIZE(dev->devtype->firmware))
+ return -EINVAL;
+
+ fw = dev->devtype->firmware[dev->firmware];
dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
coda_product_name(dev->devtype->product));
@@ -2142,16 +2228,16 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
struct platform_device *pdev = dev->plat_dev;
int i, ret;
- if (!fw && dev->firmware == 1) {
- v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
- goto put_pm;
- }
if (!fw) {
- dev->firmware = 1;
- coda_firmware_request(dev);
+ dev->firmware++;
+ ret = coda_firmware_request(dev);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
+ goto put_pm;
+ }
return;
}
- if (dev->firmware == 1) {
+ if (dev->firmware > 0) {
/*
* Since we can't suppress warnings for failed asynchronous
* firmware requests, report that the fallback firmware was
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 09dfcca7cc50..0e27412e01f5 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -13,12 +13,59 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/videodev2.h>
#include <coda.h>
-static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
+static const u8 *coda_find_nal_header(const u8 *buf, const u8 *end)
+{
+ u32 val = 0xffffffff;
+
+ do {
+ val = val << 8 | *buf++;
+ if (buf >= end)
+ return NULL;
+ } while (val != 0x00000001);
+
+ return buf;
+}
+
+int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb)
+{
+ const u8 *buf = vb2_plane_vaddr(vb, 0);
+ const u8 *end = buf + vb2_get_plane_payload(vb, 0);
+
+ /* Find SPS header */
+ do {
+ buf = coda_find_nal_header(buf, end);
+ if (!buf)
+ return -EINVAL;
+ } while ((*buf++ & 0x1f) != 0x7);
+
+ ctx->params.h264_profile_idc = buf[0];
+ ctx->params.h264_level_idc = buf[2];
+
+ return 0;
+}
+
+int coda_h264_filler_nal(int size, char *p)
+{
+ if (size < 6)
+ return -EINVAL;
+
+ p[0] = 0x00;
+ p[1] = 0x00;
+ p[2] = 0x00;
+ p[3] = 0x01;
+ p[4] = 0x0c;
+ memset(p + 5, 0xff, size - 6);
+ /* Add rbsp stop bit and trailing at the end */
+ p[size - 1] = 0x80;
+
+ return 0;
+}
+
int coda_h264_padding(int size, char *p)
{
int nal_size;
@@ -29,10 +76,38 @@ int coda_h264_padding(int size, char *p)
return 0;
nal_size = coda_filler_size[diff];
- memcpy(p, coda_filler_nal, nal_size);
-
- /* Add rbsp stop bit and trailing at the end */
- *(p + nal_size - 1) = 0x80;
+ coda_h264_filler_nal(nal_size, p);
return nal_size;
}
+
+int coda_h264_profile(int profile_idc)
+{
+ switch (profile_idc) {
+ case 66: return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
+ case 77: return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
+ case 88: return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
+ case 100: return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ default: return -EINVAL;
+ }
+}
+
+int coda_h264_level(int level_idc)
+{
+ switch (level_idc) {
+ case 10: return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
+ case 9: return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
+ case 11: return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
+ case 12: return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
+ case 13: return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
+ case 20: return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
+ case 21: return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
+ case 22: return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
+ case 30: return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
+ case 31: return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
+ case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
+ case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+ case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
+ default: return -EINVAL;
+ }
+}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 4b831c91ae4a..20222befb9b2 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -28,7 +28,7 @@
#include "coda_regs.h"
-#define CODA_MAX_FRAMEBUFFERS 8
+#define CODA_MAX_FRAMEBUFFERS 17
#define FMO_SLICE_SAVE_BUF_SIZE (32)
enum {
@@ -117,6 +117,8 @@ struct coda_params {
u8 h264_deblk_enabled;
u8 h264_deblk_alpha;
u8 h264_deblk_beta;
+ u8 h264_profile_idc;
+ u8 h264_level_idc;
u8 mpeg4_intra_qp;
u8 mpeg4_inter_qp;
u8 gop_size;
@@ -259,7 +261,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
int coda_hw_reset(struct coda_ctx *ctx);
-void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming);
+void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list);
void coda_set_gdi_regs(struct coda_ctx *ctx);
@@ -290,7 +292,11 @@ void coda_bit_stream_end_flag(struct coda_ctx *ctx);
void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state);
+int coda_h264_filler_nal(int size, char *p);
int coda_h264_padding(int size, char *p);
+int coda_h264_profile(int profile_idc);
+int coda_h264_level(int level_idc);
+int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb);
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb);
int coda_jpeg_write_tables(struct coda_ctx *ctx);
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 3490602fa6e1..77ee46a93427 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -51,6 +51,7 @@
#define CODA7_STREAM_SEL_64BITS_ENDIAN (1 << 1)
#define CODA_STREAM_ENDIAN_SELECT (1 << 0)
#define CODA_REG_BIT_FRAME_MEM_CTRL 0x110
+#define CODA9_FRAME_ENABLE_BWB (1 << 12)
#define CODA9_FRAME_TILED2LINEAR (1 << 11)
#define CODA_FRAME_CHROMA_INTERLEAVE (1 << 2)
#define CODA_IMAGE_ENDIAN_SELECT (1 << 0)
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 50c30731bb78..7e5cf9923c8d 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1287,7 +1287,7 @@ static __init int vpif_probe(struct platform_device *pdev)
}
if (!vpif_obj.config->asd_sizes) {
- i2c_adap = i2c_get_adapter(1);
+ i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id);
for (i = 0; i < subdev_count; i++) {
vpif_obj.sd[i] =
v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 0f0c389f8897..59a634201830 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -112,6 +112,15 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
+ .name = "YUV 4:2:2 non-contig, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16M,
+ .depth = { 8, 8 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CBCR,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
.name = "YUV 4:2:2 planar, Y/CrCb",
.pixelformat = V4L2_PIX_FMT_NV61,
.depth = { 16 },
@@ -121,6 +130,15 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
+ .name = "YUV 4:2:2 non-contig, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61M,
+ .depth = { 8, 8 },
+ .color = GSC_YUV422,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
.name = "YUV 4:2:0 planar, YCbCr",
.pixelformat = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
@@ -158,6 +176,15 @@ static const struct gsc_fmt gsc_formats[] = {
.num_planes = 1,
.num_comp = 2,
}, {
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .depth = { 8, 4 },
+ .color = GSC_YUV420,
+ .yorder = GSC_LSB_Y,
+ .corder = GSC_CRCB,
+ .num_planes = 2,
+ .num_comp = 2,
+ }, {
.name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.pixelformat = V4L2_PIX_FMT_NV12M,
.depth = { 8, 4 },
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ae8c6b35a357..97e164b2075a 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1466,9 +1466,8 @@ static int viu_of_probe(struct platform_device *op)
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
- viu_dev->vidq.timeout.function = viu_vid_timeout;
- viu_dev->vidq.timeout.data = (unsigned long)viu_dev;
- init_timer(&viu_dev->vidq.timeout);
+ setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
+ (unsigned long)viu_dev);
viu_dev->std = V4L2_STD_NTSC_M;
viu_dev->first = 1;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index bedc7cc4c7d6..980066b8d32a 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -1017,6 +1017,7 @@ static int deinterlace_probe(struct platform_device *pdev)
if (!dma_has_cap(DMA_INTERLEAVE, pcdev->dma_chan->device->cap_mask)) {
dev_err(&pdev->dev, "DMA does not support INTERLEAVE\n");
+ ret = -ENODEV;
goto rel_dma;
}
diff --git a/drivers/media/platform/mtk-jpeg/Makefile b/drivers/media/platform/mtk-jpeg/Makefile
new file mode 100644
index 000000000000..b2e6069f3959
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/Makefile
@@ -0,0 +1,2 @@
+mtk_jpeg-objs := mtk_jpeg_core.o mtk_jpeg_hw.o mtk_jpeg_parse.o
+obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk_jpeg.o
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
new file mode 100644
index 000000000000..451a54039e65
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -0,0 +1,1292 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_jpeg_hw.h"
+#include "mtk_jpeg_core.h"
+#include "mtk_jpeg_parse.h"
+
+static struct mtk_jpeg_fmt mtk_jpeg_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .colplanes = 1,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_OUTPUT,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .h_sample = {4, 2, 2},
+ .v_sample = {4, 2, 2},
+ .colplanes = 3,
+ .h_align = 5,
+ .v_align = 4,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .h_sample = {4, 2, 2},
+ .v_sample = {4, 4, 4},
+ .colplanes = 3,
+ .h_align = 5,
+ .v_align = 3,
+ .flags = MTK_JPEG_FMT_FLAG_DEC_CAPTURE,
+ },
+};
+
+#define MTK_JPEG_NUM_FORMATS ARRAY_SIZE(mtk_jpeg_formats)
+
+enum {
+ MTK_JPEG_BUF_FLAGS_INIT = 0,
+ MTK_JPEG_BUF_FLAGS_LAST_FRAME = 1,
+};
+
+struct mtk_jpeg_src_buf {
+ struct vb2_v4l2_buffer b;
+ struct list_head list;
+ int flags;
+ struct mtk_jpeg_dec_param dec_param;
+};
+
+static int debug;
+module_param(debug, int, 0644);
+
+static inline struct mtk_jpeg_ctx *mtk_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_jpeg_ctx, fh);
+}
+
+static inline struct mtk_jpeg_src_buf *mtk_jpeg_vb2_to_srcbuf(
+ struct vb2_buffer *vb)
+{
+ return container_of(to_vb2_v4l2_buffer(vb), struct mtk_jpeg_src_buf, b);
+}
+
+static int mtk_jpeg_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+
+ strlcpy(cap->driver, MTK_JPEG_NAME " decoder", sizeof(cap->driver));
+ strlcpy(cap->card, MTK_JPEG_NAME " decoder", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(jpeg->dev));
+
+ return 0;
+}
+
+static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
+ struct v4l2_fmtdesc *f, u32 type)
+{
+ int i, num = 0;
+
+ for (i = 0; i < n; ++i) {
+ if (mtk_jpeg_formats[i].flags & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i >= n)
+ return -EINVAL;
+
+ f->pixelformat = mtk_jpeg_formats[i].fourcc;
+
+ return 0;
+}
+
+static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
+ MTK_JPEG_FMT_FLAG_DEC_CAPTURE);
+}
+
+static int mtk_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return mtk_jpeg_enum_fmt(mtk_jpeg_formats, MTK_JPEG_NUM_FORMATS, f,
+ MTK_JPEG_FMT_FLAG_DEC_OUTPUT);
+}
+
+static struct mtk_jpeg_q_data *mtk_jpeg_get_q_data(struct mtk_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ return &ctx->cap_q;
+}
+
+static struct mtk_jpeg_fmt *mtk_jpeg_find_format(struct mtk_jpeg_ctx *ctx,
+ u32 pixelformat,
+ unsigned int fmt_type)
+{
+ unsigned int k, fmt_flag;
+
+ fmt_flag = (fmt_type == MTK_JPEG_FMT_TYPE_OUTPUT) ?
+ MTK_JPEG_FMT_FLAG_DEC_OUTPUT :
+ MTK_JPEG_FMT_FLAG_DEC_CAPTURE;
+
+ for (k = 0; k < MTK_JPEG_NUM_FORMATS; k++) {
+ struct mtk_jpeg_fmt *fmt = &mtk_jpeg_formats[k];
+
+ if (fmt->fourcc == pixelformat && fmt->flags & fmt_flag)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void mtk_jpeg_bound_align_image(u32 *w, unsigned int wmin,
+ unsigned int wmax, unsigned int walign,
+ u32 *h, unsigned int hmin,
+ unsigned int hmax, unsigned int halign)
+{
+ int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+ w_step = 1 << walign;
+ h_step = 1 << halign;
+
+ v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+ if (*w < width && (*w + w_step) <= wmax)
+ *w += w_step;
+ if (*h < height && (*h + h_step) <= hmax)
+ *h += h_step;
+}
+
+static void mtk_jpeg_adjust_fmt_mplane(struct mtk_jpeg_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_q_data *q_data;
+ int i;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ pix_mp->width = q_data->w;
+ pix_mp->height = q_data->h;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->colplanes;
+
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ pix_mp->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ }
+}
+
+static int mtk_jpeg_try_fmt_mplane(struct v4l2_format *f,
+ struct mtk_jpeg_fmt *fmt,
+ struct mtk_jpeg_ctx *ctx, int q_type)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ if (ctx->state != MTK_JPEG_INIT) {
+ mtk_jpeg_adjust_fmt_mplane(ctx, f);
+ goto end;
+ }
+
+ pix_mp->num_planes = fmt->colplanes;
+ pix_mp->pixelformat = fmt->fourcc;
+
+ if (q_type == MTK_JPEG_FMT_TYPE_OUTPUT) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[0];
+
+ mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
+ MTK_JPEG_MAX_WIDTH, 0,
+ &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
+ MTK_JPEG_MAX_HEIGHT, 0);
+
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+ pfmt->bytesperline = 0;
+ /* Source size must be aligned to 128 */
+ pfmt->sizeimage = mtk_jpeg_align(pfmt->sizeimage, 128);
+ if (pfmt->sizeimage == 0)
+ pfmt->sizeimage = MTK_JPEG_DEFAULT_SIZEIMAGE;
+ goto end;
+ }
+
+ /* type is MTK_JPEG_FMT_TYPE_CAPTURE */
+ mtk_jpeg_bound_align_image(&pix_mp->width, MTK_JPEG_MIN_WIDTH,
+ MTK_JPEG_MAX_WIDTH, fmt->h_align,
+ &pix_mp->height, MTK_JPEG_MIN_HEIGHT,
+ MTK_JPEG_MAX_HEIGHT, fmt->v_align);
+
+ for (i = 0; i < fmt->colplanes; i++) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
+ u32 stride = pix_mp->width * fmt->h_sample[i] / 4;
+ u32 h = pix_mp->height * fmt->v_sample[i] / 4;
+
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+ pfmt->bytesperline = stride;
+ pfmt->sizeimage = stride * h;
+ }
+end:
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev, "wxh:%ux%u\n",
+ pix_mp->width, pix_mp->height);
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i,
+ pix_mp->plane_fmt[i].bytesperline,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+ return 0;
+}
+
+static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+ pix_mp->width = q_data->w;
+ pix_mp->height = q_data->h;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->pixelformat = q_data->fmt->fourcc;
+ pix_mp->num_planes = q_data->fmt->colplanes;
+ pix_mp->colorspace = ctx->colorspace;
+ pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+ pix_mp->xfer_func = ctx->xfer_func;
+ pix_mp->quantization = ctx->quantization;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) g_fmt:%c%c%c%c wxh:%ux%u\n",
+ f->type,
+ (pix_mp->pixelformat & 0xff),
+ (pix_mp->pixelformat >> 8 & 0xff),
+ (pix_mp->pixelformat >> 16 & 0xff),
+ (pix_mp->pixelformat >> 24 & 0xff),
+ pix_mp->width, pix_mp->height);
+
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ struct v4l2_plane_pix_format *pfmt = &pix_mp->plane_fmt[i];
+
+ pfmt->bytesperline = q_data->bytesperline[i];
+ pfmt->sizeimage = q_data->sizeimage[i];
+ memset(pfmt->reserved, 0, sizeof(pfmt->reserved));
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i,
+ pfmt->bytesperline,
+ pfmt->sizeimage);
+ }
+ return 0;
+}
+
+static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_fmt *fmt;
+
+ fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+ if (!fmt)
+ fmt = ctx->cap_q.fmt;
+
+ v4l2_dbg(2, debug, &ctx->jpeg->v4l2_dev, "(%d) try_fmt:%c%c%c%c\n",
+ f->type,
+ (fmt->fourcc & 0xff),
+ (fmt->fourcc >> 8 & 0xff),
+ (fmt->fourcc >> 16 & 0xff),
+ (fmt->fourcc >> 24 & 0xff));
+
+ return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_CAPTURE);
+}
+
+static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_fmt *fmt;
+
+ fmt = mtk_jpeg_find_format(ctx, f->fmt.pix_mp.pixelformat,
+ MTK_JPEG_FMT_TYPE_OUTPUT);
+ if (!fmt)
+ fmt = ctx->out_q.fmt;
+
+ v4l2_dbg(2, debug, &ctx->jpeg->v4l2_dev, "(%d) try_fmt:%c%c%c%c\n",
+ f->type,
+ (fmt->fourcc & 0xff),
+ (fmt->fourcc >> 8 & 0xff),
+ (fmt->fourcc >> 16 & 0xff),
+ (fmt->fourcc >> 24 & 0xff));
+
+ return mtk_jpeg_try_fmt_mplane(f, fmt, ctx, MTK_JPEG_FMT_TYPE_OUTPUT);
+}
+
+static int mtk_jpeg_s_fmt_mplane(struct mtk_jpeg_ctx *ctx,
+ struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ unsigned int f_type;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_jpeg_get_q_data(ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&jpeg->v4l2_dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ MTK_JPEG_FMT_TYPE_OUTPUT : MTK_JPEG_FMT_TYPE_CAPTURE;
+
+ q_data->fmt = mtk_jpeg_find_format(ctx, pix_mp->pixelformat, f_type);
+ q_data->w = pix_mp->width;
+ q_data->h = pix_mp->height;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->xfer_func = pix_mp->xfer_func;
+ ctx->quantization = pix_mp->quantization;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) s_fmt:%c%c%c%c wxh:%ux%u\n",
+ f->type,
+ (q_data->fmt->fourcc & 0xff),
+ (q_data->fmt->fourcc >> 8 & 0xff),
+ (q_data->fmt->fourcc >> 16 & 0xff),
+ (q_data->fmt->fourcc >> 24 & 0xff),
+ q_data->w, q_data->h);
+
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ q_data->bytesperline[i] = pix_mp->plane_fmt[i].bytesperline;
+ q_data->sizeimage[i] = pix_mp->plane_fmt[i].sizeimage;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "plane[%d] bpl=%u, size=%u\n",
+ i, q_data->bytesperline[i], q_data->sizeimage[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_s_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = mtk_jpeg_try_fmt_vid_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+}
+
+static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+
+ ret = mtk_jpeg_try_fmt_vid_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f);
+}
+
+static void mtk_jpeg_queue_src_chg_event(struct mtk_jpeg_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes =
+ V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static int mtk_jpeg_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mtk_jpeg_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ s->r.width = ctx->cap_q.w;
+ s->r.height = ctx->cap_q.h;
+ s->r.left = 0;
+ s->r.top = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mtk_jpeg_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = ctx->out_q.w;
+ s->r.height = ctx->out_q.h;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ goto end;
+
+ vq = v4l2_m2m_get_vq(fh->m2m_ctx, buf->type);
+ if (buf->index >= vq->num_buffers) {
+ dev_err(ctx->jpeg->dev, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = vq->bufs[buf->index];
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ jpeg_src_buf->flags = (buf->m.planes[0].bytesused == 0) ?
+ MTK_JPEG_BUF_FLAGS_LAST_FRAME : MTK_JPEG_BUF_FLAGS_INIT;
+end:
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+
+static const struct v4l2_ioctl_ops mtk_jpeg_ioctl_ops = {
+ .vidioc_querycap = mtk_jpeg_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = mtk_jpeg_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out_mplane = mtk_jpeg_enum_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap_mplane = mtk_jpeg_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mtk_jpeg_try_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mtk_jpeg_g_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mtk_jpeg_s_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mtk_jpeg_s_fmt_vid_out_mplane,
+ .vidioc_qbuf = mtk_jpeg_qbuf,
+ .vidioc_subscribe_event = mtk_jpeg_subscribe_event,
+ .vidioc_g_selection = mtk_jpeg_g_selection,
+ .vidioc_s_selection = mtk_jpeg_s_selection,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int mtk_jpeg_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_jpeg_q_data *q_data = NULL;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ int i;
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "(%d) buf_req count=%u\n",
+ q->type, *num_buffers);
+
+ q_data = mtk_jpeg_get_q_data(ctx, q->type);
+ if (!q_data)
+ return -EINVAL;
+
+ *num_planes = q_data->fmt->colplanes;
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ sizes[i] = q_data->sizeimage[i];
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "sizeimage[%d]=%u\n",
+ i, sizes[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_jpeg_q_data *q_data = NULL;
+ int i;
+
+ q_data = mtk_jpeg_get_q_data(ctx, vb->vb2_queue->type);
+ if (!q_data)
+ return -EINVAL;
+
+ for (i = 0; i < q_data->fmt->colplanes; i++)
+ vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+
+ return 0;
+}
+
+static bool mtk_jpeg_check_resolution_change(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param)
+{
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_q_data *q_data;
+
+ q_data = &ctx->out_q;
+ if (q_data->w != param->pic_w || q_data->h != param->pic_h) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Picture size change\n");
+ return true;
+ }
+
+ q_data = &ctx->cap_q;
+ if (q_data->fmt != mtk_jpeg_find_format(ctx, param->dst_fourcc,
+ MTK_JPEG_FMT_TYPE_CAPTURE)) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "format change\n");
+ return true;
+ }
+ return false;
+}
+
+static void mtk_jpeg_set_queue_data(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param)
+{
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_q_data *q_data;
+ int i;
+
+ q_data = &ctx->out_q;
+ q_data->w = param->pic_w;
+ q_data->h = param->pic_h;
+
+ q_data = &ctx->cap_q;
+ q_data->w = param->dec_w;
+ q_data->h = param->dec_h;
+ q_data->fmt = mtk_jpeg_find_format(ctx,
+ param->dst_fourcc,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+
+ for (i = 0; i < q_data->fmt->colplanes; i++) {
+ q_data->bytesperline[i] = param->mem_stride[i];
+ q_data->sizeimage[i] = param->comp_size[i];
+ }
+
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev,
+ "set_parse cap:%c%c%c%c pic(%u, %u), buf(%u, %u)\n",
+ (param->dst_fourcc & 0xff),
+ (param->dst_fourcc >> 8 & 0xff),
+ (param->dst_fourcc >> 16 & 0xff),
+ (param->dst_fourcc >> 24 & 0xff),
+ param->pic_w, param->pic_h,
+ param->dec_w, param->dec_h);
+}
+
+static void mtk_jpeg_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_jpeg_dec_param *param;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ bool header_valid;
+
+ v4l2_dbg(2, debug, &jpeg->v4l2_dev, "(%d) buf_q id=%d, vb=%p\n",
+ vb->vb2_queue->type, vb->index, vb);
+
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ goto end;
+
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ param = &jpeg_src_buf->dec_param;
+ memset(param, 0, sizeof(*param));
+
+ if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
+ v4l2_dbg(1, debug, &jpeg->v4l2_dev, "Got eos\n");
+ goto end;
+ }
+ header_valid = mtk_jpeg_parse(param, (u8 *)vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+ if (!header_valid) {
+ v4l2_err(&jpeg->v4l2_dev, "Header invalid.\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ if (ctx->state == MTK_JPEG_INIT) {
+ struct vb2_queue *dst_vq = v4l2_m2m_get_vq(
+ ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ mtk_jpeg_queue_src_chg_event(ctx);
+ mtk_jpeg_set_queue_data(ctx, param);
+ ctx->state = vb2_is_streaming(dst_vq) ?
+ MTK_JPEG_SOURCE_CHANGE : MTK_JPEG_RUNNING;
+ }
+end:
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ return v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+}
+
+static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *vb;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(ctx->jpeg->dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *vb;
+
+ /*
+ * STREAMOFF is an acknowledgment for source change event.
+ * Before STREAMOFF, we still have to return the old resolution and
+ * subsampling. Update capture queue when the stream is off.
+ */
+ if (ctx->state == MTK_JPEG_SOURCE_CHANGE &&
+ !V4L2_TYPE_IS_OUTPUT(q->type)) {
+ struct mtk_jpeg_src_buf *src_buf;
+
+ vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
+ mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
+ ctx->state = MTK_JPEG_RUNNING;
+ } else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ ctx->state = MTK_JPEG_INIT;
+ }
+
+ while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put_sync(ctx->jpeg->dev);
+}
+
+static struct vb2_ops mtk_jpeg_qops = {
+ .queue_setup = mtk_jpeg_queue_setup,
+ .buf_prepare = mtk_jpeg_buf_prepare,
+ .buf_queue = mtk_jpeg_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = mtk_jpeg_start_streaming,
+ .stop_streaming = mtk_jpeg_stop_streaming,
+};
+
+static void mtk_jpeg_set_dec_src(struct mtk_jpeg_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct mtk_jpeg_bs *bs)
+{
+ bs->str_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ bs->end_addr = bs->str_addr +
+ mtk_jpeg_align(vb2_get_plane_payload(src_buf, 0), 16);
+ bs->size = mtk_jpeg_align(vb2_plane_size(src_buf, 0), 128);
+}
+
+static int mtk_jpeg_set_dec_dst(struct mtk_jpeg_ctx *ctx,
+ struct mtk_jpeg_dec_param *param,
+ struct vb2_buffer *dst_buf,
+ struct mtk_jpeg_fb *fb)
+{
+ int i;
+
+ if (param->comp_num != dst_buf->num_planes) {
+ dev_err(ctx->jpeg->dev, "plane number mismatch (%u != %u)\n",
+ param->comp_num, dst_buf->num_planes);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dst_buf->num_planes; i++) {
+ if (vb2_plane_size(dst_buf, i) < param->comp_size[i]) {
+ dev_err(ctx->jpeg->dev,
+ "buffer size is underflow (%lu < %u)\n",
+ vb2_plane_size(dst_buf, 0),
+ param->comp_size[i]);
+ return -EINVAL;
+ }
+ fb->plane_addr[i] = vb2_dma_contig_plane_dma_addr(dst_buf, i);
+ }
+
+ return 0;
+}
+
+static void mtk_jpeg_device_run(void *priv)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+ struct mtk_jpeg_dev *jpeg = ctx->jpeg;
+ struct vb2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ unsigned long flags;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ struct mtk_jpeg_bs bs;
+ struct mtk_jpeg_fb fb;
+ int i;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
+
+ if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
+ for (i = 0; i < dst_buf->num_planes; i++)
+ vb2_set_plane_payload(dst_buf, i, 0);
+ buf_state = VB2_BUF_STATE_DONE;
+ goto dec_end;
+ }
+
+ if (mtk_jpeg_check_resolution_change(ctx, &jpeg_src_buf->dec_param)) {
+ mtk_jpeg_queue_src_chg_event(ctx);
+ ctx->state = MTK_JPEG_SOURCE_CHANGE;
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
+ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
+ goto dec_end;
+
+ spin_lock_irqsave(&jpeg->hw_lock, flags);
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+ mtk_jpeg_dec_set_config(jpeg->dec_reg_base,
+ &jpeg_src_buf->dec_param, &bs, &fb);
+
+ mtk_jpeg_dec_start(jpeg->dec_reg_base);
+ spin_unlock_irqrestore(&jpeg->hw_lock, flags);
+ return;
+
+dec_end:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static int mtk_jpeg_job_ready(void *priv)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+
+ return (ctx->state == MTK_JPEG_RUNNING) ? 1 : 0;
+}
+
+static void mtk_jpeg_job_abort(void *priv)
+{
+}
+
+static struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
+ .device_run = mtk_jpeg_device_run,
+ .job_ready = mtk_jpeg_job_ready,
+ .job_abort = mtk_jpeg_job_abort,
+};
+
+static int mtk_jpeg_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_jpeg_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_jpeg_src_buf);
+ src_vq->ops = &mtk_jpeg_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpeg->lock;
+ src_vq->dev = ctx->jpeg->dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &mtk_jpeg_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpeg->lock;
+ dst_vq->dev = ctx->jpeg->dev;
+ ret = vb2_queue_init(dst_vq);
+
+ return ret;
+}
+
+static void mtk_jpeg_clk_on(struct mtk_jpeg_dev *jpeg)
+{
+ int ret;
+
+ ret = mtk_smi_larb_get(jpeg->larb);
+ if (ret)
+ dev_err(jpeg->dev, "mtk_smi_larb_get larbvdec fail %d\n", ret);
+ clk_prepare_enable(jpeg->clk_jdec_smi);
+ clk_prepare_enable(jpeg->clk_jdec);
+}
+
+static void mtk_jpeg_clk_off(struct mtk_jpeg_dev *jpeg)
+{
+ clk_disable_unprepare(jpeg->clk_jdec);
+ clk_disable_unprepare(jpeg->clk_jdec_smi);
+ mtk_smi_larb_put(jpeg->larb);
+}
+
+static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
+{
+ struct mtk_jpeg_dev *jpeg = priv;
+ struct mtk_jpeg_ctx *ctx;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct mtk_jpeg_src_buf *jpeg_src_buf;
+ enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+ u32 dec_irq_ret;
+ u32 dec_ret;
+ int i;
+
+ dec_ret = mtk_jpeg_dec_get_int_status(jpeg->dec_reg_base);
+ dec_irq_ret = mtk_jpeg_dec_enum_result(dec_ret);
+ ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&jpeg->v4l2_dev, "Context is NULL\n");
+ return IRQ_HANDLED;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
+
+ if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+
+ if (dec_irq_ret != MTK_JPEG_DEC_RESULT_EOF_DONE) {
+ dev_err(jpeg->dev, "decode failed\n");
+ goto dec_end;
+ }
+
+ for (i = 0; i < dst_buf->num_planes; i++)
+ vb2_set_plane_payload(dst_buf, i,
+ jpeg_src_buf->dec_param.comp_size[i]);
+
+ buf_state = VB2_BUF_STATE_DONE;
+
+dec_end:
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+}
+
+static void mtk_jpeg_set_default_params(struct mtk_jpeg_ctx *ctx)
+{
+ struct mtk_jpeg_q_data *q = &ctx->out_q;
+ int i;
+
+ ctx->colorspace = V4L2_COLORSPACE_JPEG,
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
+ MTK_JPEG_FMT_TYPE_OUTPUT);
+ q->w = MTK_JPEG_MIN_WIDTH;
+ q->h = MTK_JPEG_MIN_HEIGHT;
+ q->bytesperline[0] = 0;
+ q->sizeimage[0] = MTK_JPEG_DEFAULT_SIZEIMAGE;
+
+ q = &ctx->cap_q;
+ q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_YUV420M,
+ MTK_JPEG_FMT_TYPE_CAPTURE);
+ q->w = MTK_JPEG_MIN_WIDTH;
+ q->h = MTK_JPEG_MIN_HEIGHT;
+
+ for (i = 0; i < q->fmt->colplanes; i++) {
+ u32 stride = q->w * q->fmt->h_sample[i] / 4;
+ u32 h = q->h * q->fmt->v_sample[i] / 4;
+
+ q->bytesperline[i] = stride;
+ q->sizeimage[i] = stride * h;
+ }
+}
+
+static int mtk_jpeg_open(struct file *file)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct mtk_jpeg_ctx *ctx;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&jpeg->lock)) {
+ ret = -ERESTARTSYS;
+ goto free;
+ }
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpeg = jpeg;
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx,
+ mtk_jpeg_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error;
+ }
+
+ mtk_jpeg_set_default_params(ctx);
+ mutex_unlock(&jpeg->lock);
+ return 0;
+
+error:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&jpeg->lock);
+free:
+ kfree(ctx);
+ return ret;
+}
+
+static int mtk_jpeg_release(struct file *file)
+{
+ struct mtk_jpeg_dev *jpeg = video_drvdata(file);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(file->private_data);
+
+ mutex_lock(&jpeg->lock);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&jpeg->lock);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_jpeg_open,
+ .release = mtk_jpeg_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_jpeg_clk_init(struct mtk_jpeg_dev *jpeg)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_parse_phandle(jpeg->dev->of_node, "mediatek,larb", 0);
+ if (!node)
+ return -EINVAL;
+ pdev = of_find_device_by_node(node);
+ if (WARN_ON(!pdev)) {
+ of_node_put(node);
+ return -EINVAL;
+ }
+ of_node_put(node);
+
+ jpeg->larb = &pdev->dev;
+
+ jpeg->clk_jdec = devm_clk_get(jpeg->dev, "jpgdec");
+ if (IS_ERR(jpeg->clk_jdec))
+ return -EINVAL;
+
+ jpeg->clk_jdec_smi = devm_clk_get(jpeg->dev, "jpgdec-smi");
+ if (IS_ERR(jpeg->clk_jdec_smi))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mtk_jpeg_probe(struct platform_device *pdev)
+{
+ struct mtk_jpeg_dev *jpeg;
+ struct resource *res;
+ int dec_irq;
+ int ret;
+
+ jpeg = devm_kzalloc(&pdev->dev, sizeof(*jpeg), GFP_KERNEL);
+ if (!jpeg)
+ return -ENOMEM;
+
+ mutex_init(&jpeg->lock);
+ spin_lock_init(&jpeg->hw_lock);
+ jpeg->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ jpeg->dec_reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(jpeg->dec_reg_base)) {
+ ret = PTR_ERR(jpeg->dec_reg_base);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ dec_irq = platform_get_irq(pdev, 0);
+ if (!res || dec_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get dec_irq %d.\n", dec_irq);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dec_irq, mtk_jpeg_dec_irq, 0,
+ pdev->name, jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request dec_irq %d (%d)\n",
+ dec_irq, ret);
+ ret = -EINVAL;
+ goto err_req_irq;
+ }
+
+ ret = mtk_jpeg_clk_init(jpeg);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init clk, err %d\n", ret);
+ goto err_clk_init;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_dev_register;
+ }
+
+ jpeg->m2m_dev = v4l2_m2m_init(&mtk_jpeg_m2m_ops);
+ if (IS_ERR(jpeg->m2m_dev)) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpeg->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ jpeg->dec_vdev = video_device_alloc();
+ if (!jpeg->dec_vdev) {
+ ret = -ENOMEM;
+ goto err_dec_vdev_alloc;
+ }
+ snprintf(jpeg->dec_vdev->name, sizeof(jpeg->dec_vdev->name),
+ "%s-dec", MTK_JPEG_NAME);
+ jpeg->dec_vdev->fops = &mtk_jpeg_fops;
+ jpeg->dec_vdev->ioctl_ops = &mtk_jpeg_ioctl_ops;
+ jpeg->dec_vdev->minor = -1;
+ jpeg->dec_vdev->release = video_device_release;
+ jpeg->dec_vdev->lock = &jpeg->lock;
+ jpeg->dec_vdev->v4l2_dev = &jpeg->v4l2_dev;
+ jpeg->dec_vdev->vfl_dir = VFL_DIR_M2M;
+ jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(jpeg->dec_vdev, VFL_TYPE_GRABBER, 3);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_vdev_register;
+ }
+
+ video_set_drvdata(jpeg->dec_vdev, jpeg);
+ v4l2_info(&jpeg->v4l2_dev,
+ "decoder device registered as /dev/video%d (%d,%d)\n",
+ jpeg->dec_vdev->num, VIDEO_MAJOR, jpeg->dec_vdev->minor);
+
+ platform_set_drvdata(pdev, jpeg);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+err_dec_vdev_register:
+ video_device_release(jpeg->dec_vdev);
+
+err_dec_vdev_alloc:
+ v4l2_m2m_release(jpeg->m2m_dev);
+
+err_m2m_init:
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+err_dev_register:
+
+err_clk_init:
+
+err_req_irq:
+
+ return ret;
+}
+
+static int mtk_jpeg_remove(struct platform_device *pdev)
+{
+ struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(jpeg->dec_vdev);
+ video_device_release(jpeg->dec_vdev);
+ v4l2_m2m_release(jpeg->m2m_dev);
+ v4l2_device_unregister(&jpeg->v4l2_dev);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_pm_suspend(struct device *dev)
+{
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+ mtk_jpeg_clk_off(jpeg);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_pm_resume(struct device *dev)
+{
+ struct mtk_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ mtk_jpeg_clk_on(jpeg);
+ mtk_jpeg_dec_reset(jpeg->dec_reg_base);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_jpeg_suspend(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = mtk_jpeg_pm_suspend(dev);
+ return ret;
+}
+
+static __maybe_unused int mtk_jpeg_resume(struct device *dev)
+{
+ int ret;
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ ret = mtk_jpeg_pm_resume(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mtk_jpeg_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_jpeg_suspend, mtk_jpeg_resume)
+ SET_RUNTIME_PM_OPS(mtk_jpeg_pm_suspend, mtk_jpeg_pm_resume, NULL)
+};
+
+static const struct of_device_id mtk_jpeg_match[] = {
+ {
+ .compatible = "mediatek,mt8173-jpgdec",
+ .data = NULL,
+ },
+ {
+ .compatible = "mediatek,mt2701-jpgdec",
+ .data = NULL,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_jpeg_match);
+
+static struct platform_driver mtk_jpeg_driver = {
+ .probe = mtk_jpeg_probe,
+ .remove = mtk_jpeg_remove,
+ .driver = {
+ .name = MTK_JPEG_NAME,
+ .of_match_table = mtk_jpeg_match,
+ .pm = &mtk_jpeg_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_jpeg_driver);
+
+MODULE_DESCRIPTION("MediaTek JPEG codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
new file mode 100644
index 000000000000..1a6cdfd4ea70
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_CORE_H
+#define _MTK_JPEG_CORE_H
+
+#include <linux/interrupt.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+
+#define MTK_JPEG_NAME "mtk-jpeg"
+
+#define MTK_JPEG_FMT_FLAG_DEC_OUTPUT BIT(0)
+#define MTK_JPEG_FMT_FLAG_DEC_CAPTURE BIT(1)
+
+#define MTK_JPEG_FMT_TYPE_OUTPUT 1
+#define MTK_JPEG_FMT_TYPE_CAPTURE 2
+
+#define MTK_JPEG_MIN_WIDTH 32
+#define MTK_JPEG_MIN_HEIGHT 32
+#define MTK_JPEG_MAX_WIDTH 8192
+#define MTK_JPEG_MAX_HEIGHT 8192
+
+#define MTK_JPEG_DEFAULT_SIZEIMAGE (1 * 1024 * 1024)
+
+enum mtk_jpeg_ctx_state {
+ MTK_JPEG_INIT = 0,
+ MTK_JPEG_RUNNING,
+ MTK_JPEG_SOURCE_CHANGE,
+};
+
+/**
+ * struct mt_jpeg - JPEG IP abstraction
+ * @lock: the mutex protecting this structure
+ * @hw_lock: spinlock protecting the hw device resource
+ * @workqueue: decode work queue
+ * @dev: JPEG device
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @alloc_ctx: videobuf2 memory allocator's context
+ * @dec_vdev: video device node for decoder mem2mem mode
+ * @dec_reg_base: JPEG registers mapping
+ * @clk_jdec: JPEG hw working clock
+ * @clk_jdec_smi: JPEG SMI bus clock
+ * @larb: SMI device
+ */
+struct mtk_jpeg_dev {
+ struct mutex lock;
+ spinlock_t hw_lock;
+ struct workqueue_struct *workqueue;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ void *alloc_ctx;
+ struct video_device *dec_vdev;
+ void __iomem *dec_reg_base;
+ struct clk *clk_jdec;
+ struct clk *clk_jdec_smi;
+ struct device *larb;
+};
+
+/**
+ * struct jpeg_fmt - driver's internal color format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @h_sample: horizontal sample count of plane in 4 * 4 pixel image
+ * @v_sample: vertical sample count of plane in 4 * 4 pixel image
+ * @colplanes: number of color planes (1 for packed formats)
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @flags: flags describing format applicability
+ */
+struct mtk_jpeg_fmt {
+ u32 fourcc;
+ int h_sample[VIDEO_MAX_PLANES];
+ int v_sample[VIDEO_MAX_PLANES];
+ int colplanes;
+ int h_align;
+ int v_align;
+ u32 flags;
+};
+
+/**
+ * mtk_jpeg_q_data - parameters of one queue
+ * @fmt: driver-specific format of this queue
+ * @w: image width
+ * @h: image height
+ * @bytesperline: distance in bytes between the leftmost pixels in two adjacent
+ * lines
+ * @sizeimage: image buffer size in bytes
+ */
+struct mtk_jpeg_q_data {
+ struct mtk_jpeg_fmt *fmt;
+ u32 w;
+ u32 h;
+ u32 bytesperline[VIDEO_MAX_PLANES];
+ u32 sizeimage[VIDEO_MAX_PLANES];
+};
+
+/**
+ * mtk_jpeg_ctx - the device context data
+ * @jpeg: JPEG IP device for this context
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue queue information
+ * @fh: V4L2 file handle
+ * @dec_param parameters for HW decoding
+ * @state: state of the context
+ * @header_valid: set if header has been parsed and valid
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ */
+struct mtk_jpeg_ctx {
+ struct mtk_jpeg_dev *jpeg;
+ struct mtk_jpeg_q_data out_q;
+ struct mtk_jpeg_q_data cap_q;
+ struct v4l2_fh fh;
+ enum mtk_jpeg_ctx_state state;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+};
+
+#endif /* _MTK_JPEG_CORE_H */
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c
new file mode 100644
index 000000000000..77b4cc6a8873
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <media/videobuf2-core.h>
+
+#include "mtk_jpeg_hw.h"
+
+#define MTK_JPEG_DUNUM_MASK(val) (((val) - 1) & 0x3)
+
+enum mtk_jpeg_color {
+ MTK_JPEG_COLOR_420 = 0x00221111,
+ MTK_JPEG_COLOR_422 = 0x00211111,
+ MTK_JPEG_COLOR_444 = 0x00111111,
+ MTK_JPEG_COLOR_422V = 0x00121111,
+ MTK_JPEG_COLOR_422X2 = 0x00412121,
+ MTK_JPEG_COLOR_422VX2 = 0x00222121,
+ MTK_JPEG_COLOR_400 = 0x00110000
+};
+
+static inline int mtk_jpeg_verify_align(u32 val, int align, u32 reg)
+{
+ if (val & (align - 1)) {
+ pr_err("mtk-jpeg: write reg %x without %d align\n", reg, align);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mtk_jpeg_decide_format(struct mtk_jpeg_dec_param *param)
+{
+ param->src_color = (param->sampling_w[0] << 20) |
+ (param->sampling_h[0] << 16) |
+ (param->sampling_w[1] << 12) |
+ (param->sampling_h[1] << 8) |
+ (param->sampling_w[2] << 4) |
+ (param->sampling_h[2]);
+
+ param->uv_brz_w = 0;
+ switch (param->src_color) {
+ case MTK_JPEG_COLOR_444:
+ param->uv_brz_w = 1;
+ param->dst_fourcc = V4L2_PIX_FMT_YUV422M;
+ break;
+ case MTK_JPEG_COLOR_422X2:
+ case MTK_JPEG_COLOR_422:
+ param->dst_fourcc = V4L2_PIX_FMT_YUV422M;
+ break;
+ case MTK_JPEG_COLOR_422V:
+ case MTK_JPEG_COLOR_422VX2:
+ param->uv_brz_w = 1;
+ param->dst_fourcc = V4L2_PIX_FMT_YUV420M;
+ break;
+ case MTK_JPEG_COLOR_420:
+ param->dst_fourcc = V4L2_PIX_FMT_YUV420M;
+ break;
+ case MTK_JPEG_COLOR_400:
+ param->dst_fourcc = V4L2_PIX_FMT_GREY;
+ break;
+ default:
+ param->dst_fourcc = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mtk_jpeg_calc_mcu(struct mtk_jpeg_dec_param *param)
+{
+ u32 factor_w, factor_h;
+ u32 i, comp, blk;
+
+ factor_w = 2 + param->sampling_w[0];
+ factor_h = 2 + param->sampling_h[0];
+ param->mcu_w = (param->pic_w + (1 << factor_w) - 1) >> factor_w;
+ param->mcu_h = (param->pic_h + (1 << factor_h) - 1) >> factor_h;
+ param->total_mcu = param->mcu_w * param->mcu_h;
+ param->unit_num = ((param->pic_w + 7) >> 3) * ((param->pic_h + 7) >> 3);
+ param->blk_num = 0;
+ for (i = 0; i < MTK_JPEG_COMP_MAX; i++) {
+ param->blk_comp[i] = 0;
+ if (i >= param->comp_num)
+ continue;
+ param->blk_comp[i] = param->sampling_w[i] *
+ param->sampling_h[i];
+ param->blk_num += param->blk_comp[i];
+ }
+
+ param->membership = 0;
+ for (i = 0, blk = 0, comp = 0; i < MTK_JPEG_BLOCK_MAX; i++) {
+ if (i < param->blk_num && comp < param->comp_num) {
+ u32 tmp;
+
+ tmp = (0x04 + (comp & 0x3));
+ param->membership |= tmp << (i * 3);
+ if (++blk == param->blk_comp[comp]) {
+ comp++;
+ blk = 0;
+ }
+ } else {
+ param->membership |= 7 << (i * 3);
+ }
+ }
+}
+
+static void mtk_jpeg_calc_dma_group(struct mtk_jpeg_dec_param *param)
+{
+ u32 factor_mcu = 3;
+
+ if (param->src_color == MTK_JPEG_COLOR_444 &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV422M)
+ factor_mcu = 4;
+ else if (param->src_color == MTK_JPEG_COLOR_422V &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV420M)
+ factor_mcu = 4;
+ else if (param->src_color == MTK_JPEG_COLOR_422X2 &&
+ param->dst_fourcc == V4L2_PIX_FMT_YUV422M)
+ factor_mcu = 2;
+ else if (param->src_color == MTK_JPEG_COLOR_400 ||
+ (param->src_color & 0x0FFFF) == 0)
+ factor_mcu = 4;
+
+ param->dma_mcu = 1 << factor_mcu;
+ param->dma_group = param->mcu_w / param->dma_mcu;
+ param->dma_last_mcu = param->mcu_w % param->dma_mcu;
+ if (param->dma_last_mcu)
+ param->dma_group++;
+ else
+ param->dma_last_mcu = param->dma_mcu;
+}
+
+static int mtk_jpeg_calc_dst_size(struct mtk_jpeg_dec_param *param)
+{
+ u32 i, padding_w;
+ u32 ds_row_h[3];
+ u32 brz_w[3];
+
+ brz_w[0] = 0;
+ brz_w[1] = param->uv_brz_w;
+ brz_w[2] = brz_w[1];
+
+ for (i = 0; i < param->comp_num; i++) {
+ if (brz_w[i] > 3)
+ return -1;
+
+ padding_w = param->mcu_w * MTK_JPEG_DCTSIZE *
+ param->sampling_w[i];
+ /* output format is 420/422 */
+ param->comp_w[i] = padding_w >> brz_w[i];
+ param->comp_w[i] = mtk_jpeg_align(param->comp_w[i],
+ MTK_JPEG_DCTSIZE);
+ param->img_stride[i] = i ? mtk_jpeg_align(param->comp_w[i], 16)
+ : mtk_jpeg_align(param->comp_w[i], 32);
+ ds_row_h[i] = (MTK_JPEG_DCTSIZE * param->sampling_h[i]);
+ }
+ param->dec_w = param->img_stride[0];
+ param->dec_h = ds_row_h[0] * param->mcu_h;
+
+ for (i = 0; i < MTK_JPEG_COMP_MAX; i++) {
+ /* They must be equal in frame mode. */
+ param->mem_stride[i] = param->img_stride[i];
+ param->comp_size[i] = param->mem_stride[i] * ds_row_h[i] *
+ param->mcu_h;
+ }
+
+ param->y_size = param->comp_size[0];
+ param->uv_size = param->comp_size[1];
+ param->dec_size = param->y_size + (param->uv_size << 1);
+
+ return 0;
+}
+
+int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param)
+{
+ if (mtk_jpeg_decide_format(param))
+ return -1;
+
+ mtk_jpeg_calc_mcu(param);
+ mtk_jpeg_calc_dma_group(param);
+ if (mtk_jpeg_calc_dst_size(param))
+ return -2;
+
+ return 0;
+}
+
+u32 mtk_jpeg_dec_get_int_status(void __iomem *base)
+{
+ u32 ret;
+
+ ret = readl(base + JPGDEC_REG_INTERRUPT_STATUS) & BIT_INQST_MASK_ALLIRQ;
+ if (ret)
+ writel(ret, base + JPGDEC_REG_INTERRUPT_STATUS);
+
+ return ret;
+}
+
+u32 mtk_jpeg_dec_enum_result(u32 irq_result)
+{
+ if (irq_result & BIT_INQST_MASK_EOF)
+ return MTK_JPEG_DEC_RESULT_EOF_DONE;
+ if (irq_result & BIT_INQST_MASK_PAUSE)
+ return MTK_JPEG_DEC_RESULT_PAUSE;
+ if (irq_result & BIT_INQST_MASK_UNDERFLOW)
+ return MTK_JPEG_DEC_RESULT_UNDERFLOW;
+ if (irq_result & BIT_INQST_MASK_OVERFLOW)
+ return MTK_JPEG_DEC_RESULT_OVERFLOW;
+ if (irq_result & BIT_INQST_MASK_ERROR_BS)
+ return MTK_JPEG_DEC_RESULT_ERROR_BS;
+
+ return MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN;
+}
+
+void mtk_jpeg_dec_start(void __iomem *base)
+{
+ writel(0, base + JPGDEC_REG_TRIG);
+}
+
+static void mtk_jpeg_dec_soft_reset(void __iomem *base)
+{
+ writel(0x0000FFFF, base + JPGDEC_REG_INTERRUPT_STATUS);
+ writel(0x00, base + JPGDEC_REG_RESET);
+ writel(0x01, base + JPGDEC_REG_RESET);
+}
+
+static void mtk_jpeg_dec_hard_reset(void __iomem *base)
+{
+ writel(0x00, base + JPGDEC_REG_RESET);
+ writel(0x10, base + JPGDEC_REG_RESET);
+}
+
+void mtk_jpeg_dec_reset(void __iomem *base)
+{
+ mtk_jpeg_dec_soft_reset(base);
+ mtk_jpeg_dec_hard_reset(base);
+}
+
+static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w,
+ u8 yscale_h, u8 uvscale_w, u8 uvscale_h)
+{
+ u32 val;
+
+ val = (uvscale_h << 12) | (uvscale_w << 8) |
+ (yscale_h << 4) | yscale_w;
+ writel(val, base + JPGDEC_REG_BRZ_FACTOR);
+}
+
+static void mtk_jpeg_dec_set_dst_bank0(void __iomem *base, u32 addr_y,
+ u32 addr_u, u32 addr_v)
+{
+ mtk_jpeg_verify_align(addr_y, 16, JPGDEC_REG_DEST_ADDR0_Y);
+ writel(addr_y, base + JPGDEC_REG_DEST_ADDR0_Y);
+ mtk_jpeg_verify_align(addr_u, 16, JPGDEC_REG_DEST_ADDR0_U);
+ writel(addr_u, base + JPGDEC_REG_DEST_ADDR0_U);
+ mtk_jpeg_verify_align(addr_v, 16, JPGDEC_REG_DEST_ADDR0_V);
+ writel(addr_v, base + JPGDEC_REG_DEST_ADDR0_V);
+}
+
+static void mtk_jpeg_dec_set_dst_bank1(void __iomem *base, u32 addr_y,
+ u32 addr_u, u32 addr_v)
+{
+ writel(addr_y, base + JPGDEC_REG_DEST_ADDR1_Y);
+ writel(addr_u, base + JPGDEC_REG_DEST_ADDR1_U);
+ writel(addr_v, base + JPGDEC_REG_DEST_ADDR1_V);
+}
+
+static void mtk_jpeg_dec_set_mem_stride(void __iomem *base, u32 stride_y,
+ u32 stride_uv)
+{
+ writel((stride_y & 0xFFFF), base + JPGDEC_REG_STRIDE_Y);
+ writel((stride_uv & 0xFFFF), base + JPGDEC_REG_STRIDE_UV);
+}
+
+static void mtk_jpeg_dec_set_img_stride(void __iomem *base, u32 stride_y,
+ u32 stride_uv)
+{
+ writel((stride_y & 0xFFFF), base + JPGDEC_REG_IMG_STRIDE_Y);
+ writel((stride_uv & 0xFFFF), base + JPGDEC_REG_IMG_STRIDE_UV);
+}
+
+static void mtk_jpeg_dec_set_pause_mcu_idx(void __iomem *base, u32 idx)
+{
+ writel(idx & 0x0003FFFFFF, base + JPGDEC_REG_PAUSE_MCU_NUM);
+}
+
+static void mtk_jpeg_dec_set_dec_mode(void __iomem *base, u32 mode)
+{
+ writel(mode & 0x03, base + JPGDEC_REG_OPERATION_MODE);
+}
+
+static void mtk_jpeg_dec_set_bs_write_ptr(void __iomem *base, u32 ptr)
+{
+ mtk_jpeg_verify_align(ptr, 16, JPGDEC_REG_FILE_BRP);
+ writel(ptr, base + JPGDEC_REG_FILE_BRP);
+}
+
+static void mtk_jpeg_dec_set_bs_info(void __iomem *base, u32 addr, u32 size)
+{
+ mtk_jpeg_verify_align(addr, 16, JPGDEC_REG_FILE_ADDR);
+ mtk_jpeg_verify_align(size, 128, JPGDEC_REG_FILE_TOTAL_SIZE);
+ writel(addr, base + JPGDEC_REG_FILE_ADDR);
+ writel(size, base + JPGDEC_REG_FILE_TOTAL_SIZE);
+}
+
+static void mtk_jpeg_dec_set_comp_id(void __iomem *base, u32 id_y, u32 id_u,
+ u32 id_v)
+{
+ u32 val;
+
+ val = ((id_y & 0x00FF) << 24) | ((id_u & 0x00FF) << 16) |
+ ((id_v & 0x00FF) << 8);
+ writel(val, base + JPGDEC_REG_COMP_ID);
+}
+
+static void mtk_jpeg_dec_set_total_mcu(void __iomem *base, u32 num)
+{
+ writel(num - 1, base + JPGDEC_REG_TOTAL_MCU_NUM);
+}
+
+static void mtk_jpeg_dec_set_comp0_du(void __iomem *base, u32 num)
+{
+ writel(num - 1, base + JPGDEC_REG_COMP0_DATA_UNIT_NUM);
+}
+
+static void mtk_jpeg_dec_set_du_membership(void __iomem *base, u32 member,
+ u32 gmc, u32 isgray)
+{
+ if (isgray)
+ member = 0x3FFFFFFC;
+ member |= (isgray << 31) | (gmc << 30);
+ writel(member, base + JPGDEC_REG_DU_CTRL);
+}
+
+static void mtk_jpeg_dec_set_q_table(void __iomem *base, u32 id0, u32 id1,
+ u32 id2)
+{
+ u32 val;
+
+ val = ((id0 & 0x0f) << 8) | ((id1 & 0x0f) << 4) | ((id2 & 0x0f) << 0);
+ writel(val, base + JPGDEC_REG_QT_ID);
+}
+
+static void mtk_jpeg_dec_set_dma_group(void __iomem *base, u32 mcu_group,
+ u32 group_num, u32 last_mcu)
+{
+ u32 val;
+
+ val = (((mcu_group - 1) & 0x00FF) << 16) |
+ (((group_num - 1) & 0x007F) << 8) |
+ ((last_mcu - 1) & 0x00FF);
+ writel(val, base + JPGDEC_REG_WDMA_CTRL);
+}
+
+static void mtk_jpeg_dec_set_sampling_factor(void __iomem *base, u32 comp_num,
+ u32 y_w, u32 y_h, u32 u_w,
+ u32 u_h, u32 v_w, u32 v_h)
+{
+ u32 val;
+ u32 y_wh = (MTK_JPEG_DUNUM_MASK(y_w) << 2) | MTK_JPEG_DUNUM_MASK(y_h);
+ u32 u_wh = (MTK_JPEG_DUNUM_MASK(u_w) << 2) | MTK_JPEG_DUNUM_MASK(u_h);
+ u32 v_wh = (MTK_JPEG_DUNUM_MASK(v_w) << 2) | MTK_JPEG_DUNUM_MASK(v_h);
+
+ if (comp_num == 1)
+ val = 0;
+ else
+ val = (y_wh << 8) | (u_wh << 4) | v_wh;
+ writel(val, base + JPGDEC_REG_DU_NUM);
+}
+
+void mtk_jpeg_dec_set_config(void __iomem *base,
+ struct mtk_jpeg_dec_param *config,
+ struct mtk_jpeg_bs *bs,
+ struct mtk_jpeg_fb *fb)
+{
+ mtk_jpeg_dec_set_brz_factor(base, 0, 0, config->uv_brz_w, 0);
+ mtk_jpeg_dec_set_dec_mode(base, 0);
+ mtk_jpeg_dec_set_comp0_du(base, config->unit_num);
+ mtk_jpeg_dec_set_total_mcu(base, config->total_mcu);
+ mtk_jpeg_dec_set_bs_info(base, bs->str_addr, bs->size);
+ mtk_jpeg_dec_set_bs_write_ptr(base, bs->end_addr);
+ mtk_jpeg_dec_set_du_membership(base, config->membership, 1,
+ (config->comp_num == 1) ? 1 : 0);
+ mtk_jpeg_dec_set_comp_id(base, config->comp_id[0], config->comp_id[1],
+ config->comp_id[2]);
+ mtk_jpeg_dec_set_q_table(base, config->qtbl_num[0],
+ config->qtbl_num[1], config->qtbl_num[2]);
+ mtk_jpeg_dec_set_sampling_factor(base, config->comp_num,
+ config->sampling_w[0],
+ config->sampling_h[0],
+ config->sampling_w[1],
+ config->sampling_h[1],
+ config->sampling_w[2],
+ config->sampling_h[2]);
+ mtk_jpeg_dec_set_mem_stride(base, config->mem_stride[0],
+ config->mem_stride[1]);
+ mtk_jpeg_dec_set_img_stride(base, config->img_stride[0],
+ config->img_stride[1]);
+ mtk_jpeg_dec_set_dst_bank0(base, fb->plane_addr[0],
+ fb->plane_addr[1], fb->plane_addr[2]);
+ mtk_jpeg_dec_set_dst_bank1(base, 0, 0, 0);
+ mtk_jpeg_dec_set_dma_group(base, config->dma_mcu, config->dma_group,
+ config->dma_last_mcu);
+ mtk_jpeg_dec_set_pause_mcu_idx(base, config->total_mcu);
+}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h
new file mode 100644
index 000000000000..37152a630dea
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_hw.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_HW_H
+#define _MTK_JPEG_HW_H
+
+#include <media/videobuf2-core.h>
+
+#include "mtk_jpeg_core.h"
+#include "mtk_jpeg_reg.h"
+
+enum {
+ MTK_JPEG_DEC_RESULT_EOF_DONE = 0,
+ MTK_JPEG_DEC_RESULT_PAUSE = 1,
+ MTK_JPEG_DEC_RESULT_UNDERFLOW = 2,
+ MTK_JPEG_DEC_RESULT_OVERFLOW = 3,
+ MTK_JPEG_DEC_RESULT_ERROR_BS = 4,
+ MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN = 6
+};
+
+struct mtk_jpeg_dec_param {
+ u32 pic_w;
+ u32 pic_h;
+ u32 dec_w;
+ u32 dec_h;
+ u32 src_color;
+ u32 dst_fourcc;
+ u32 mcu_w;
+ u32 mcu_h;
+ u32 total_mcu;
+ u32 unit_num;
+ u32 comp_num;
+ u32 comp_id[MTK_JPEG_COMP_MAX];
+ u32 sampling_w[MTK_JPEG_COMP_MAX];
+ u32 sampling_h[MTK_JPEG_COMP_MAX];
+ u32 qtbl_num[MTK_JPEG_COMP_MAX];
+ u32 blk_num;
+ u32 blk_comp[MTK_JPEG_COMP_MAX];
+ u32 membership;
+ u32 dma_mcu;
+ u32 dma_group;
+ u32 dma_last_mcu;
+ u32 img_stride[MTK_JPEG_COMP_MAX];
+ u32 mem_stride[MTK_JPEG_COMP_MAX];
+ u32 comp_w[MTK_JPEG_COMP_MAX];
+ u32 comp_size[MTK_JPEG_COMP_MAX];
+ u32 y_size;
+ u32 uv_size;
+ u32 dec_size;
+ u8 uv_brz_w;
+};
+
+static inline u32 mtk_jpeg_align(u32 val, u32 align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+struct mtk_jpeg_bs {
+ dma_addr_t str_addr;
+ dma_addr_t end_addr;
+ size_t size;
+};
+
+struct mtk_jpeg_fb {
+ dma_addr_t plane_addr[MTK_JPEG_COMP_MAX];
+ size_t size;
+};
+
+int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param);
+u32 mtk_jpeg_dec_get_int_status(void __iomem *dec_reg_base);
+u32 mtk_jpeg_dec_enum_result(u32 irq_result);
+void mtk_jpeg_dec_set_config(void __iomem *base,
+ struct mtk_jpeg_dec_param *config,
+ struct mtk_jpeg_bs *bs,
+ struct mtk_jpeg_fb *fb);
+void mtk_jpeg_dec_reset(void __iomem *dec_reg_base);
+void mtk_jpeg_dec_start(void __iomem *dec_reg_base);
+
+#endif /* _MTK_JPEG_HW_H */
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c
new file mode 100644
index 000000000000..38868547f5d4
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+
+#include "mtk_jpeg_parse.h"
+
+#define TEM 0x01
+#define SOF0 0xc0
+#define RST 0xd0
+#define SOI 0xd8
+#define EOI 0xd9
+
+struct mtk_jpeg_stream {
+ u8 *addr;
+ u32 size;
+ u32 curr;
+};
+
+static int read_byte(struct mtk_jpeg_stream *stream)
+{
+ if (stream->curr >= stream->size)
+ return -1;
+ return stream->addr[stream->curr++];
+}
+
+static int read_word_be(struct mtk_jpeg_stream *stream, u32 *word)
+{
+ u32 temp;
+ int byte;
+
+ byte = read_byte(stream);
+ if (byte == -1)
+ return -1;
+ temp = byte << 8;
+ byte = read_byte(stream);
+ if (byte == -1)
+ return -1;
+ *word = (u32)byte | temp;
+
+ return 0;
+}
+
+static void read_skip(struct mtk_jpeg_stream *stream, long len)
+{
+ if (len <= 0)
+ return;
+ while (len--)
+ read_byte(stream);
+}
+
+static bool mtk_jpeg_do_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size)
+{
+ bool notfound = true;
+ struct mtk_jpeg_stream stream;
+
+ stream.addr = src_addr_va;
+ stream.size = src_size;
+ stream.curr = 0;
+
+ while (notfound) {
+ int i, length, byte;
+ u32 word;
+
+ byte = read_byte(&stream);
+ if (byte == -1)
+ return false;
+ if (byte != 0xff)
+ continue;
+ do
+ byte = read_byte(&stream);
+ while (byte == 0xff);
+ if (byte == -1)
+ return false;
+ if (byte == 0)
+ continue;
+
+ length = 0;
+ switch (byte) {
+ case SOF0:
+ /* length */
+ if (read_word_be(&stream, &word))
+ break;
+
+ /* precision */
+ if (read_byte(&stream) == -1)
+ break;
+
+ if (read_word_be(&stream, &word))
+ break;
+ param->pic_h = word;
+
+ if (read_word_be(&stream, &word))
+ break;
+ param->pic_w = word;
+
+ param->comp_num = read_byte(&stream);
+ if (param->comp_num != 1 && param->comp_num != 3)
+ break;
+
+ for (i = 0; i < param->comp_num; i++) {
+ param->comp_id[i] = read_byte(&stream);
+ if (param->comp_id[i] == -1)
+ break;
+
+ /* sampling */
+ byte = read_byte(&stream);
+ if (byte == -1)
+ break;
+ param->sampling_w[i] = (byte >> 4) & 0x0F;
+ param->sampling_h[i] = byte & 0x0F;
+
+ param->qtbl_num[i] = read_byte(&stream);
+ if (param->qtbl_num[i] == -1)
+ break;
+ }
+
+ notfound = !(i == param->comp_num);
+ break;
+ case RST ... RST + 7:
+ case SOI:
+ case EOI:
+ case TEM:
+ break;
+ default:
+ if (read_word_be(&stream, &word))
+ break;
+ length = (long)word - 2;
+ read_skip(&stream, length);
+ break;
+ }
+ }
+
+ return !notfound;
+}
+
+bool mtk_jpeg_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size)
+{
+ if (!mtk_jpeg_do_parse(param, src_addr_va, src_size))
+ return false;
+ if (mtk_jpeg_dec_fill_param(param))
+ return false;
+
+ return true;
+}
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h
new file mode 100644
index 000000000000..5d92340ea81b
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_parse.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_PARSE_H
+#define _MTK_JPEG_PARSE_H
+
+#include "mtk_jpeg_hw.h"
+
+bool mtk_jpeg_parse(struct mtk_jpeg_dec_param *param, u8 *src_addr_va,
+ u32 src_size);
+
+#endif /* _MTK_JPEG_PARSE_H */
+
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h b/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h
new file mode 100644
index 000000000000..fc490d62b012
--- /dev/null
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_reg.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ * Rick Chang <rick.chang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_JPEG_REG_H
+#define _MTK_JPEG_REG_H
+
+#define MTK_JPEG_COMP_MAX 3
+#define MTK_JPEG_BLOCK_MAX 10
+#define MTK_JPEG_DCTSIZE 8
+
+#define BIT_INQST_MASK_ERROR_BS 0x20
+#define BIT_INQST_MASK_PAUSE 0x10
+#define BIT_INQST_MASK_OVERFLOW 0x04
+#define BIT_INQST_MASK_UNDERFLOW 0x02
+#define BIT_INQST_MASK_EOF 0x01
+#define BIT_INQST_MASK_ALLIRQ 0x37
+
+#define JPGDEC_REG_RESET 0x0090
+#define JPGDEC_REG_BRZ_FACTOR 0x00F8
+#define JPGDEC_REG_DU_NUM 0x00FC
+#define JPGDEC_REG_DEST_ADDR0_Y 0x0140
+#define JPGDEC_REG_DEST_ADDR0_U 0x0144
+#define JPGDEC_REG_DEST_ADDR0_V 0x0148
+#define JPGDEC_REG_DEST_ADDR1_Y 0x014C
+#define JPGDEC_REG_DEST_ADDR1_U 0x0150
+#define JPGDEC_REG_DEST_ADDR1_V 0x0154
+#define JPGDEC_REG_STRIDE_Y 0x0158
+#define JPGDEC_REG_STRIDE_UV 0x015C
+#define JPGDEC_REG_IMG_STRIDE_Y 0x0160
+#define JPGDEC_REG_IMG_STRIDE_UV 0x0164
+#define JPGDEC_REG_WDMA_CTRL 0x016C
+#define JPGDEC_REG_PAUSE_MCU_NUM 0x0170
+#define JPGDEC_REG_OPERATION_MODE 0x017C
+#define JPGDEC_REG_FILE_ADDR 0x0200
+#define JPGDEC_REG_COMP_ID 0x020C
+#define JPGDEC_REG_TOTAL_MCU_NUM 0x0210
+#define JPGDEC_REG_COMP0_DATA_UNIT_NUM 0x0224
+#define JPGDEC_REG_DU_CTRL 0x023C
+#define JPGDEC_REG_TRIG 0x0240
+#define JPGDEC_REG_FILE_BRP 0x0248
+#define JPGDEC_REG_FILE_TOTAL_SIZE 0x024C
+#define JPGDEC_REG_QT_ID 0x0270
+#define JPGDEC_REG_INTERRUPT_STATUS 0x0274
+#define JPGDEC_REG_STATUS 0x0278
+
+#endif /* _MTK_JPEG_REG_H */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 502877a4b1df..a60b538686ea 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -420,6 +420,11 @@ static void mtk_vdec_worker(struct work_struct *work)
dst_buf->index,
ret, res_chg);
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ src_buf_info->error = true;
+ mutex_unlock(&ctx->lock);
+ }
v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
} else if (res_chg == false) {
/*
@@ -1170,8 +1175,16 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
*/
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
- VB2_BUF_STATE_DONE);
+ if (ret == -EIO) {
+ mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
+ ctx->id);
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ }
mtk_v4l2_debug(ret ? 0 : 1,
"[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
ctx->id, src_buf->index,
@@ -1216,16 +1229,22 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
struct mtk_video_dec_buf *buf;
-
- if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return;
+ bool buf_error;
vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
mutex_lock(&ctx->lock);
- buf->queued_in_v4l2 = false;
- buf->queued_in_vb2 = false;
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ buf->queued_in_v4l2 = false;
+ buf->queued_in_vb2 = false;
+ }
+ buf_error = buf->error;
mutex_unlock(&ctx->lock);
+
+ if (buf_error) {
+ mtk_v4l2_err("Unrecoverable error on buffer.");
+ ctx->state = MTK_STATE_ABORT;
+ }
}
static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index 362f5a85762e..dc4fc1df63c5 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -50,6 +50,7 @@ struct vdec_fb {
* @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
* queue yet
* @lastframe: Intput buffer is last buffer - EOS
+ * @error: An unrecoverable error occurs on this buffer.
* @frame_buffer: Decode status, and buffer information of Capture buffer
*
* Note : These status information help us track and debug buffer state
@@ -63,6 +64,7 @@ struct mtk_video_dec_buf {
bool queued_in_vb2;
bool queued_in_v4l2;
bool lastframe;
+ bool error;
struct vdec_fb frame_buffer;
};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index aa81f3ce9463..83f859e8509c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -269,11 +269,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
for (i = VENC_SYS, j = 0; i < NUM_MAX_VCODEC_REG_BASE; i++, j++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, j);
- if (res == NULL) {
- dev_err(&pdev->dev, "get memory resource failed.");
- ret = -ENXIO;
- goto err_res;
- }
dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR((__force void *)dev->reg_base[i])) {
ret = PTR_ERR((__force void *)dev->reg_base[i]);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index 7d55975d3185..237e144c194f 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -31,7 +31,6 @@ struct mtk_vcodec_dev;
extern int mtk_v4l2_dbg_level;
extern bool mtk_vcodec_dbg;
-#define DEBUG 1
#if defined(DEBUG)
@@ -67,15 +66,15 @@ extern bool mtk_vcodec_dbg;
#else
-#define mtk_v4l2_debug(level, fmt, args...)
-#define mtk_v4l2_err(fmt, args...)
-#define mtk_v4l2_debug_enter()
-#define mtk_v4l2_debug_leave()
+#define mtk_v4l2_debug(level, fmt, args...) {}
+#define mtk_v4l2_err(fmt, args...) {}
+#define mtk_v4l2_debug_enter() {}
+#define mtk_v4l2_debug_leave() {}
-#define mtk_vcodec_debug(h, fmt, args...)
-#define mtk_vcodec_err(h, fmt, args...)
-#define mtk_vcodec_debug_enter(h)
-#define mtk_vcodec_debug_leave(h)
+#define mtk_vcodec_debug(h, fmt, args...) {}
+#define mtk_vcodec_err(h, fmt, args...) {}
+#define mtk_vcodec_debug_enter(h) {}
+#define mtk_vcodec_debug_leave(h) {}
#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index e91a3b425b0c..5539b1853f16 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -718,6 +718,26 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
*out_fb = fb;
}
+static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
+ struct vdec_vp9_vsi *vsi) {
+ if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
+ mtk_vcodec_err(inst, "Invalid vsi->sf_frm_idx=%u.",
+ vsi->sf_frm_idx);
+ return -EIO;
+ }
+ if (vsi->frm_to_show_idx >= VP9_MAX_FRM_BUF_NUM) {
+ mtk_vcodec_err(inst, "Invalid vsi->frm_to_show_idx=%u.",
+ vsi->frm_to_show_idx);
+ return -EIO;
+ }
+ if (vsi->new_fb_idx >= VP9_MAX_FRM_BUF_NUM) {
+ mtk_vcodec_err(inst, "Invalid vsi->new_fb_idx=%u.",
+ vsi->new_fb_idx);
+ return -EIO;
+ }
+ return 0;
+}
+
static void vdec_vp9_deinit(unsigned long h_vdec)
{
struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
@@ -834,6 +854,12 @@ static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
goto DECODE_ERROR;
}
+ ret = validate_vsi_array_indexes(inst, vsi);
+ if (ret) {
+ mtk_vcodec_err(inst, "Invalid values from VPU.");
+ goto DECODE_ERROR;
+ }
+
if (vsi->resolution_changed) {
if (!vp9_alloc_work_buf(inst)) {
ret = -EINVAL;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index db6b5205ffb1..ded1154481cd 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -85,6 +85,8 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
* @res_chg : [out] resolution change happens if current bs have different
* picture width/height
* Note: To flush the decoder when reaching EOF, set input bitstream as NULL.
+ *
+ * Return: 0 on success. -EIO on unrecoverable error.
*/
int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg);
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index a6fa145f2c54..acb639c4abd2 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -155,7 +155,7 @@ static void vp8_enc_free_work_buf(struct venc_vp8_inst *inst)
/* Buffers need to be freed by AP. */
for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
- if ((inst->work_bufs[i].size == 0))
+ if (inst->work_bufs[i].size == 0)
continue;
mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
}
@@ -172,7 +172,7 @@ static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
mtk_vcodec_debug_enter(inst);
for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
- if ((wb[i].size == 0))
+ if (wb[i].size == 0)
continue;
/*
* This 'wb' structure is set by VPU side and shared to AP for
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index aa44e11decca..853d598937f6 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -23,6 +23,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/sched.h>
#include <linux/sizes.h>
+#include <linux/dma-mapping.h>
#include "mtk_vpu.h"
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 084ecf4aa9a4..0d984a28a003 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1943,30 +1943,13 @@ static void isp_detach_iommu(struct isp_device *isp)
{
arm_iommu_release_mapping(isp->mapping);
isp->mapping = NULL;
- iommu_group_remove_device(isp->dev);
}
static int isp_attach_iommu(struct isp_device *isp)
{
struct dma_iommu_mapping *mapping;
- struct iommu_group *group;
int ret;
- /* Create a device group and add the device to it. */
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(isp->dev, "failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
-
- ret = iommu_group_add_device(group, isp->dev);
- iommu_group_put(group);
-
- if (ret < 0) {
- dev_err(isp->dev, "failed to add device to IPMMU group\n");
- return ret;
- }
-
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 7e6f6638433b..2f2ae609c548 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -23,7 +23,6 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
-#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/wait.h>
diff --git a/drivers/staging/media/s5p-cec/Makefile b/drivers/media/platform/s5p-cec/Makefile
index 0e2cf457825a..0e2cf457825a 100644
--- a/drivers/staging/media/s5p-cec/Makefile
+++ b/drivers/media/platform/s5p-cec/Makefile
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h b/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
index 7d9453505dce..7d9453505dce 100644
--- a/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..1edf667d562a 100644
--- a/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
diff --git a/drivers/staging/media/s5p-cec/regs-cec.h b/drivers/media/platform/s5p-cec/regs-cec.h
index b2e7e129920e..b2e7e129920e 100644
--- a/drivers/staging/media/s5p-cec/regs-cec.h
+++ b/drivers/media/platform/s5p-cec/regs-cec.h
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
new file mode 100644
index 000000000000..664937b61fa4
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -0,0 +1,304 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.c
+ *
+ * Samsung S5P CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This driver is based on the "cec interface driver for exynos soc" by
+ * SangPil Moon.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ if (enable) {
+ pm_runtime_get_sync(cec->dev);
+
+ s5p_cec_reset(cec);
+
+ s5p_cec_set_divider(cec);
+ s5p_cec_threshold(cec);
+
+ s5p_cec_unmask_tx_interrupts(cec);
+ s5p_cec_unmask_rx_interrupts(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ s5p_cec_mask_tx_interrupts(cec);
+ s5p_cec_mask_rx_interrupts(cec);
+ pm_runtime_disable(cec->dev);
+ }
+
+ return 0;
+}
+
+static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ s5p_cec_set_addr(cec, addr);
+ return 0;
+}
+
+static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct s5p_cec_dev *cec = cec_get_drvdata(adap);
+
+ /*
+ * Unclear if 0 retries are allowed by the hardware, so have 1 as
+ * the minimum.
+ */
+ s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1));
+ return 0;
+}
+
+static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+ u32 status = 0;
+
+ status = s5p_cec_get_status(cec);
+
+ dev_dbg(cec->dev, "irq received\n");
+
+ if (status & CEC_STATUS_TX_DONE) {
+ if (status & CEC_STATUS_TX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
+ cec->tx = STATE_ERROR;
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n");
+ cec->tx = STATE_DONE;
+ }
+ s5p_clr_pending_tx(cec);
+ }
+
+ if (status & CEC_STATUS_RX_DONE) {
+ if (status & CEC_STATUS_RX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n");
+ s5p_cec_rx_reset(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n");
+ if (cec->rx != STATE_IDLE)
+ dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
+ cec->rx = STATE_BUSY;
+ cec->msg.len = status >> 24;
+ cec->msg.rx_status = CEC_RX_STATUS_OK;
+ s5p_cec_get_rx_buf(cec, cec->msg.len,
+ cec->msg.msg);
+ cec->rx = STATE_DONE;
+ s5p_cec_enable_rx(cec);
+ }
+ /* Clear interrupt pending bit */
+ s5p_clr_pending_rx(cec);
+ }
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+
+ dev_dbg(cec->dev, "irq processing thread\n");
+ switch (cec->tx) {
+ case STATE_DONE:
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_ERROR:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_BUSY:
+ dev_err(cec->dev, "state set to busy, this should not occur here\n");
+ break;
+ default:
+ break;
+ }
+
+ switch (cec->rx) {
+ case STATE_DONE:
+ cec_received_msg(cec->adap, &cec->msg);
+ cec->rx = STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct cec_adap_ops s5p_cec_adap_ops = {
+ .adap_enable = s5p_cec_adap_enable,
+ .adap_log_addr = s5p_cec_adap_log_addr,
+ .adap_transmit = s5p_cec_adap_transmit,
+};
+
+static int s5p_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct platform_device *hdmi_dev;
+ struct resource *res;
+ struct s5p_cec_dev *cec;
+ int ret;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler,
+ s5p_cec_irq_handler_thread, 0, pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "hdmicec");
+ if (IS_ERR(cec->clk))
+ return PTR_ERR(cec->clk);
+
+ cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(cec->pmu))
+ return -EPROBE_DEFER;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->reg))
+ return PTR_ERR(cec->reg);
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (cec->notifier == NULL)
+ return -ENOMEM;
+
+ cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
+ CEC_NAME,
+ CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto err_delete_adapter;
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ platform_set_drvdata(pdev, cec);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "successfuly probed\n");
+ return 0;
+
+err_delete_adapter:
+ cec_delete_adapter(cec->adap);
+ return ret;
+}
+
+static int s5p_cec_remove(struct platform_device *pdev)
+{
+ struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(cec->clk);
+ return 0;
+}
+
+static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static const struct dev_pm_ops s5p_cec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id s5p_cec_match[] = {
+ {
+ .compatible = "samsung,s5p-cec",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s5p_cec_match);
+
+static struct platform_driver s5p_cec_pdrv = {
+ .probe = s5p_cec_probe,
+ .remove = s5p_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = s5p_cec_match,
+ .pm = &s5p_cec_pm_ops,
+ },
+};
+
+module_platform_driver(s5p_cec_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <kamil@wypas.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S5P CEC driver");
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
new file mode 100644
index 000000000000..7015845c1caa
--- /dev/null
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -0,0 +1,79 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.h
+ *
+ * Samsung S5P HDMI CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _S5P_CEC_H_
+#define _S5P_CEC_H_ __FILE__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+#define CEC_STATUS_TX_RUNNING (1 << 0)
+#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
+#define CEC_STATUS_TX_DONE (1 << 2)
+#define CEC_STATUS_TX_ERROR (1 << 3)
+#define CEC_STATUS_TX_BYTES (0xFF << 8)
+#define CEC_STATUS_RX_RUNNING (1 << 16)
+#define CEC_STATUS_RX_RECEIVING (1 << 17)
+#define CEC_STATUS_RX_DONE (1 << 18)
+#define CEC_STATUS_RX_ERROR (1 << 19)
+#define CEC_STATUS_RX_BCAST (1 << 20)
+#define CEC_STATUS_RX_BYTES (0xFF << 24)
+
+#define CEC_WORKER_TX_DONE (1 << 0)
+#define CEC_WORKER_RX_MSG (1 << 1)
+
+/* CEC Rx buffer size */
+#define CEC_RX_BUFF_SIZE 16
+/* CEC Tx buffer size */
+#define CEC_TX_BUFF_SIZE 16
+
+enum cec_state {
+ STATE_IDLE,
+ STATE_BUSY,
+ STATE_DONE,
+ STATE_ERROR
+};
+
+struct cec_notifier;
+
+struct s5p_cec_dev {
+ struct cec_adapter *adap;
+ struct clk *clk;
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *pmu;
+ struct cec_notifier *notifier;
+ int irq;
+ void __iomem *reg;
+
+ enum cec_state rx;
+ enum cec_state tx;
+ struct cec_msg msg;
+};
+
+#endif /* _S5P_CEC_H_ */
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 62c0dec30b59..81ed5cd5cd5d 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -679,7 +679,7 @@ static int g2d_probe(struct platform_device *pdev)
0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "failed to install IRQ\n");
- goto put_clk_gate;
+ goto unprep_clk_gate;
}
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index d2cd35916dc5..c0166ee9a455 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -403,7 +403,7 @@
#define MFC_OTHER_ENC_CTX_BUF_SIZE_V6 (12 * SZ_1K) /* 12KB */
/* MFCv6 variant defines */
-#define MAX_FW_SIZE_V6 (SZ_1M) /* 1MB */
+#define MAX_FW_SIZE_V6 (SZ_512K) /* 512KB */
#define MAX_CPB_SIZE_V6 (3 * SZ_1M) /* 3MB */
#define MFC_VERSION_V6 0x61
#define MFC_NUM_PORTS_V6 1
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
index 1a5c6fdf7846..9f220769d970 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
@@ -34,7 +34,7 @@
#define S5P_FIMV_E_VP8_NUM_T_LAYER_V7 0xfdc4
/* MFCv7 variant defines */
-#define MAX_FW_SIZE_V7 (SZ_1M) /* 1MB */
+#define MAX_FW_SIZE_V7 (SZ_512K) /* 512KB */
#define MAX_CPB_SIZE_V7 (3 * SZ_1M) /* 3MB */
#define MFC_VERSION_V7 0x72
#define MFC_NUM_PORTS_V7 1
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
index 4d1c3750eb5e..75f5f7511d72 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
@@ -116,7 +116,7 @@
#define S5P_FIMV_D_ALIGN_PLANE_SIZE_V8 64
/* MFCv8 variant defines */
-#define MAX_FW_SIZE_V8 (SZ_1M) /* 1MB */
+#define MAX_FW_SIZE_V8 (SZ_512K) /* 512KB */
#define MAX_CPB_SIZE_V8 (3 * SZ_1M) /* 3MB */
#define MFC_VERSION_V8 0x80
#define MFC_NUM_PORTS_V8 1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index bb0a5887c9a9..1afde5021ca6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -22,6 +22,7 @@
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
@@ -42,6 +43,10 @@ int mfc_debug_level;
module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
+static char *mfc_mem_size;
+module_param_named(mem, mfc_mem_size, charp, 0644);
+MODULE_PARM_DESC(mem, "Preallocated memory size for the firmware and context buffers");
+
/* Helper functions for interrupt processing */
/* Remove from hw execution round robin */
@@ -206,6 +211,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
}
s5p_mfc_clock_on();
ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
if (ret)
mfc_err("Failed to reinit FW\n");
}
@@ -666,9 +672,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
break;
}
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
- wake_up_ctx(ctx, reason, err);
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
s5p_mfc_clock_off();
+ wake_up_ctx(ctx, reason, err);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
} else {
s5p_mfc_handle_frame(ctx, reason, err);
@@ -682,15 +688,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
ctx->state = MFCINST_GOT_INST;
- clear_work_bit(ctx);
- wake_up(&ctx->queue);
goto irq_cleanup_hw;
case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
- clear_work_bit(ctx);
ctx->inst_no = MFC_NO_INSTANCE_SET;
ctx->state = MFCINST_FREE;
- wake_up(&ctx->queue);
goto irq_cleanup_hw;
case S5P_MFC_R2H_CMD_SYS_INIT_RET:
@@ -700,9 +702,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
if (ctx)
clear_work_bit(ctx);
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
- wake_up_dev(dev, reason, err);
clear_bit(0, &dev->hw_lock);
clear_bit(0, &dev->enter_suspend);
+ wake_up_dev(dev, reason, err);
break;
case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
@@ -717,9 +719,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
break;
case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
- clear_work_bit(ctx);
ctx->state = MFCINST_RUNNING;
- wake_up(&ctx->queue);
goto irq_cleanup_hw;
default:
@@ -738,6 +738,8 @@ irq_cleanup_hw:
mfc_err("Failed to unlock hw\n");
s5p_mfc_clock_off();
+ clear_work_bit(ctx);
+ wake_up(&ctx->queue);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
spin_unlock(&dev->irqlock);
@@ -764,6 +766,7 @@ static int s5p_mfc_open(struct file *file)
ret = -ENOMEM;
goto err_alloc;
}
+ init_waitqueue_head(&ctx->queue);
v4l2_fh_init(&ctx->fh, vdev);
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
@@ -866,7 +869,6 @@ static int s5p_mfc_open(struct file *file)
/* Init videobuf2 queue for OUTPUT */
q = &ctx->vq_src;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- q->io_modes = VB2_MMAP;
q->drv_priv = &ctx->fh;
q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
@@ -899,7 +901,6 @@ static int s5p_mfc_open(struct file *file)
mfc_err("Failed to initialize videobuf2 queue(output)\n");
goto err_queue_init;
}
- init_waitqueue_head(&ctx->queue);
mutex_unlock(&dev->mfc_mutex);
mfc_debug_leave();
return ret;
@@ -1106,58 +1107,158 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
return NULL;
}
-static int s5p_mfc_configure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+static int s5p_mfc_configure_2port_memory(struct s5p_mfc_dev *mfc_dev)
{
struct device *dev = &mfc_dev->plat_dev->dev;
-
- /*
- * When IOMMU is available, we cannot use the default configuration,
- * because of MFC firmware requirements: address space limited to
- * 256M and non-zero default start address.
- * This is still simplified, not optimal configuration, but for now
- * IOMMU core doesn't allow to configure device's IOMMUs channel
- * separately.
- */
- if (exynos_is_iommu_available(dev)) {
- int ret = exynos_configure_iommu(dev, S5P_MFC_IOMMU_DMA_BASE,
- S5P_MFC_IOMMU_DMA_SIZE);
- if (ret == 0)
- mfc_dev->mem_dev_l = mfc_dev->mem_dev_r = dev;
- return ret;
- }
+ void *bank2_virt;
+ dma_addr_t bank2_dma_addr;
+ unsigned long align_size = 1 << MFC_BASE_ALIGN_ORDER;
+ int ret;
/*
* Create and initialize virtual devices for accessing
* reserved memory regions.
*/
- mfc_dev->mem_dev_l = s5p_mfc_alloc_memdev(dev, "left",
- MFC_BANK1_ALLOC_CTX);
- if (!mfc_dev->mem_dev_l)
+ mfc_dev->mem_dev[BANK_L_CTX] = s5p_mfc_alloc_memdev(dev, "left",
+ BANK_L_CTX);
+ if (!mfc_dev->mem_dev[BANK_L_CTX])
return -ENODEV;
- mfc_dev->mem_dev_r = s5p_mfc_alloc_memdev(dev, "right",
- MFC_BANK2_ALLOC_CTX);
- if (!mfc_dev->mem_dev_r) {
- device_unregister(mfc_dev->mem_dev_l);
+ mfc_dev->mem_dev[BANK_R_CTX] = s5p_mfc_alloc_memdev(dev, "right",
+ BANK_R_CTX);
+ if (!mfc_dev->mem_dev[BANK_R_CTX]) {
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
return -ENODEV;
}
+ /* Allocate memory for firmware and initialize both banks addresses */
+ ret = s5p_mfc_alloc_firmware(mfc_dev);
+ if (ret) {
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ return ret;
+ }
+
+ mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->fw_buf.dma;
+
+ bank2_virt = dma_alloc_coherent(mfc_dev->mem_dev[BANK_R_CTX],
+ align_size, &bank2_dma_addr, GFP_KERNEL);
+ if (!bank2_virt) {
+ mfc_err("Allocating bank2 base failed\n");
+ s5p_mfc_release_firmware(mfc_dev);
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ return -ENOMEM;
+ }
+
+ /* Valid buffers passed to MFC encoder with LAST_FRAME command
+ * should not have address of bank2 - MFC will treat it as a null frame.
+ * To avoid such situation we set bank2 address below the pool address.
+ */
+ mfc_dev->dma_base[BANK_R_CTX] = bank2_dma_addr - align_size;
+
+ dma_free_coherent(mfc_dev->mem_dev[BANK_R_CTX], align_size, bank2_virt,
+ bank2_dma_addr);
+
+ vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX],
+ DMA_BIT_MASK(32));
+ vb2_dma_contig_set_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX],
+ DMA_BIT_MASK(32));
+
return 0;
}
-static void s5p_mfc_unconfigure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+static void s5p_mfc_unconfigure_2port_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ device_unregister(mfc_dev->mem_dev[BANK_L_CTX]);
+ device_unregister(mfc_dev->mem_dev[BANK_R_CTX]);
+ vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_L_CTX]);
+ vb2_dma_contig_clear_max_seg_size(mfc_dev->mem_dev[BANK_R_CTX]);
+}
+
+static int s5p_mfc_configure_common_memory(struct s5p_mfc_dev *mfc_dev)
{
struct device *dev = &mfc_dev->plat_dev->dev;
+ unsigned long mem_size = SZ_4M;
+ unsigned int bitmap_size;
- if (exynos_is_iommu_available(dev)) {
- exynos_unconfigure_iommu(dev);
- return;
+ if (IS_ENABLED(CONFIG_DMA_CMA) || exynos_is_iommu_available(dev))
+ mem_size = SZ_8M;
+
+ if (mfc_mem_size)
+ mem_size = memparse(mfc_mem_size, NULL);
+
+ bitmap_size = BITS_TO_LONGS(mem_size >> PAGE_SHIFT) * sizeof(long);
+
+ mfc_dev->mem_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mfc_dev->mem_bitmap)
+ return -ENOMEM;
+
+ mfc_dev->mem_virt = dma_alloc_coherent(dev, mem_size,
+ &mfc_dev->mem_base, GFP_KERNEL);
+ if (!mfc_dev->mem_virt) {
+ kfree(mfc_dev->mem_bitmap);
+ dev_err(dev, "failed to preallocate %ld MiB for the firmware and context buffers\n",
+ (mem_size / SZ_1M));
+ return -ENOMEM;
}
+ mfc_dev->mem_size = mem_size;
+ mfc_dev->dma_base[BANK_L_CTX] = mfc_dev->mem_base;
+ mfc_dev->dma_base[BANK_R_CTX] = mfc_dev->mem_base;
+
+ /*
+ * MFC hardware cannot handle 0 as a base address, so mark first 128K
+ * as used (to keep required base alignment) and adjust base address
+ */
+ if (mfc_dev->mem_base == (dma_addr_t)0) {
+ unsigned int offset = 1 << MFC_BASE_ALIGN_ORDER;
+
+ bitmap_set(mfc_dev->mem_bitmap, 0, offset >> PAGE_SHIFT);
+ mfc_dev->dma_base[BANK_L_CTX] += offset;
+ mfc_dev->dma_base[BANK_R_CTX] += offset;
+ }
+
+ /* Firmware allocation cannot fail in this case */
+ s5p_mfc_alloc_firmware(mfc_dev);
- device_unregister(mfc_dev->mem_dev_l);
- device_unregister(mfc_dev->mem_dev_r);
+ mfc_dev->mem_dev[BANK_L_CTX] = mfc_dev->mem_dev[BANK_R_CTX] = dev;
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ dev_info(dev, "preallocated %ld MiB buffer for the firmware and context buffers\n",
+ (mem_size / SZ_1M));
+
+ return 0;
+}
+
+static void s5p_mfc_unconfigure_common_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ dma_free_coherent(dev, mfc_dev->mem_size, mfc_dev->mem_virt,
+ mfc_dev->mem_base);
+ kfree(mfc_dev->mem_bitmap);
+ vb2_dma_contig_clear_max_seg_size(dev);
+}
+
+static int s5p_mfc_configure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev))
+ return s5p_mfc_configure_common_memory(mfc_dev);
+ else
+ return s5p_mfc_configure_2port_memory(mfc_dev);
}
-static void *mfc_get_drv_data(struct platform_device *pdev);
+static void s5p_mfc_unconfigure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ s5p_mfc_release_firmware(mfc_dev);
+ if (exynos_is_iommu_available(dev) || !IS_TWOPORT(mfc_dev))
+ s5p_mfc_unconfigure_common_memory(mfc_dev);
+ else
+ s5p_mfc_unconfigure_2port_memory(mfc_dev);
+}
/* MFC probe function */
static int s5p_mfc_probe(struct platform_device *pdev)
@@ -1182,7 +1283,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return -ENODEV;
}
- dev->variant = mfc_get_drv_data(pdev);
+ dev->variant = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
@@ -1214,19 +1315,18 @@ static int s5p_mfc_probe(struct platform_device *pdev)
goto err_dma;
}
- vb2_dma_contig_set_max_seg_size(dev->mem_dev_l, DMA_BIT_MASK(32));
- vb2_dma_contig_set_max_seg_size(dev->mem_dev_r, DMA_BIT_MASK(32));
-
mutex_init(&dev->mfc_mutex);
-
- ret = s5p_mfc_alloc_firmware(dev);
- if (ret)
- goto err_res;
+ init_waitqueue_head(&dev->queue);
+ dev->hw_lock = 0;
+ INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
+ atomic_set(&dev->watchdog_cnt, 0);
+ init_timer(&dev->watchdog_timer);
+ dev->watchdog_timer.data = (unsigned long)dev;
+ dev->watchdog_timer.function = s5p_mfc_watchdog;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto err_v4l2_dev_reg;
- init_waitqueue_head(&dev->queue);
/* decoder */
vfd = video_device_alloc();
@@ -1263,13 +1363,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
video_set_drvdata(vfd, dev);
platform_set_drvdata(pdev, dev);
- dev->hw_lock = 0;
- INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
- atomic_set(&dev->watchdog_cnt, 0);
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = s5p_mfc_watchdog;
-
/* Initialize HW ops and commands based on MFC version */
s5p_mfc_init_hw_ops(dev);
s5p_mfc_init_hw_cmds(dev);
@@ -1305,8 +1398,6 @@ err_enc_alloc:
err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_v4l2_dev_reg:
- s5p_mfc_release_firmware(dev);
-err_res:
s5p_mfc_final_pm(dev);
err_dma:
s5p_mfc_unconfigure_dma_memory(dev);
@@ -1348,10 +1439,7 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_device_release(dev->vfd_enc);
video_device_release(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
- s5p_mfc_release_firmware(dev);
s5p_mfc_unconfigure_dma_memory(dev);
- vb2_dma_contig_clear_max_seg_size(dev->mem_dev_l);
- vb2_dma_contig_clear_max_seg_size(dev->mem_dev_r);
s5p_mfc_final_pm(dev);
return 0;
@@ -1423,16 +1511,11 @@ static struct s5p_mfc_buf_size buf_size_v5 = {
.priv = &mfc_buf_size_v5,
};
-static struct s5p_mfc_buf_align mfc_buf_align_v5 = {
- .base = MFC_BASE_ALIGN_ORDER,
-};
-
static struct s5p_mfc_variant mfc_drvdata_v5 = {
.version = MFC_VERSION,
.version_bit = MFC_V5_BIT,
.port_num = MFC_NUM_PORTS,
.buf_size = &buf_size_v5,
- .buf_align = &mfc_buf_align_v5,
.fw_name[0] = "s5p-mfc.fw",
.clk_names = {"mfc", "sclk_mfc"},
.num_clocks = 2,
@@ -1453,16 +1536,11 @@ static struct s5p_mfc_buf_size buf_size_v6 = {
.priv = &mfc_buf_size_v6,
};
-static struct s5p_mfc_buf_align mfc_buf_align_v6 = {
- .base = 0,
-};
-
static struct s5p_mfc_variant mfc_drvdata_v6 = {
.version = MFC_VERSION_V6,
.version_bit = MFC_V6_BIT,
.port_num = MFC_NUM_PORTS_V6,
.buf_size = &buf_size_v6,
- .buf_align = &mfc_buf_align_v6,
.fw_name[0] = "s5p-mfc-v6.fw",
/*
* v6-v2 firmware contains bug fixes and interface change
@@ -1487,16 +1565,11 @@ static struct s5p_mfc_buf_size buf_size_v7 = {
.priv = &mfc_buf_size_v7,
};
-static struct s5p_mfc_buf_align mfc_buf_align_v7 = {
- .base = 0,
-};
-
static struct s5p_mfc_variant mfc_drvdata_v7 = {
.version = MFC_VERSION_V7,
.version_bit = MFC_V7_BIT,
.port_num = MFC_NUM_PORTS_V7,
.buf_size = &buf_size_v7,
- .buf_align = &mfc_buf_align_v7,
.fw_name[0] = "s5p-mfc-v7.fw",
.clk_names = {"mfc", "sclk_mfc"},
.num_clocks = 2,
@@ -1516,16 +1589,11 @@ static struct s5p_mfc_buf_size buf_size_v8 = {
.priv = &mfc_buf_size_v8,
};
-static struct s5p_mfc_buf_align mfc_buf_align_v8 = {
- .base = 0,
-};
-
static struct s5p_mfc_variant mfc_drvdata_v8 = {
.version = MFC_VERSION_V8,
.version_bit = MFC_V8_BIT,
.port_num = MFC_NUM_PORTS_V8,
.buf_size = &buf_size_v8,
- .buf_align = &mfc_buf_align_v8,
.fw_name[0] = "s5p-mfc-v8.fw",
.clk_names = {"mfc"},
.num_clocks = 1,
@@ -1536,7 +1604,6 @@ static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
.version_bit = MFC_V8_BIT,
.port_num = MFC_NUM_PORTS_V8,
.buf_size = &buf_size_v8,
- .buf_align = &mfc_buf_align_v8,
.fw_name[0] = "s5p-mfc-v8.fw",
.clk_names = {"pclk", "aclk", "aclk_xiu"},
.num_clocks = 3,
@@ -1563,18 +1630,6 @@ static const struct of_device_id exynos_mfc_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos_mfc_match);
-static void *mfc_get_drv_data(struct platform_device *pdev)
-{
- struct s5p_mfc_variant *driver_data = NULL;
- const struct of_device_id *match;
-
- match = of_match_node(exynos_mfc_match, pdev->dev.of_node);
- if (match)
- driver_data = (struct s5p_mfc_variant *)match->data;
-
- return driver_data;
-}
-
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
.remove = s5p_mfc_remove,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
index 8c4739ca16d6..4c80bb4243be 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v5.c
@@ -47,7 +47,7 @@ static int s5p_mfc_sys_init_cmd_v5(struct s5p_mfc_dev *dev)
struct s5p_mfc_cmd_args h2r_args;
memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
- h2r_args.arg[0] = dev->fw_size;
+ h2r_args.arg[0] = dev->fw_buf.size;
return s5p_mfc_cmd_host2risc_v5(dev, S5P_FIMV_H2R_CMD_SYS_INIT,
&h2r_args);
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index ab23236aa942..4220914529b2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -33,8 +33,9 @@
* while mmaping */
#define DST_QUEUE_OFF_BASE (1 << 30)
-#define MFC_BANK1_ALLOC_CTX 0
-#define MFC_BANK2_ALLOC_CTX 1
+#define BANK_L_CTX 0
+#define BANK_R_CTX 1
+#define BANK_CTX_NUM 2
#define MFC_BANK1_ALIGN_ORDER 13
#define MFC_BANK2_ALIGN_ORDER 13
@@ -44,14 +45,6 @@
#include <media/videobuf2-dma-contig.h>
-static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
-{
- /* Same functionality as the vb2_dma_contig_plane_paddr */
- dma_addr_t *paddr = vb2_dma_contig_memops.cookie(b);
-
- return *paddr;
-}
-
/* MFC definitions */
#define MFC_MAX_EXTRA_DPB 5
#define MFC_MAX_BUFFERS 32
@@ -200,7 +193,7 @@ struct s5p_mfc_buf {
*/
struct s5p_mfc_pm {
struct clk *clock_gate;
- const char **clk_names;
+ const char * const *clk_names;
struct clk *clocks[MFC_MAX_CLOCKS];
int num_clocks;
bool use_clock_gating;
@@ -229,16 +222,11 @@ struct s5p_mfc_buf_size {
void *priv;
};
-struct s5p_mfc_buf_align {
- unsigned int base;
-};
-
struct s5p_mfc_variant {
unsigned int version;
unsigned int port_num;
u32 version_bit;
struct s5p_mfc_buf_size *buf_size;
- struct s5p_mfc_buf_align *buf_align;
char *fw_name[MFC_FW_MAX_VERSIONS];
const char *clk_names[MFC_MAX_CLOCKS];
int num_clocks;
@@ -252,12 +240,14 @@ struct s5p_mfc_variant {
* buffer accessed by driver
* @dma: DMA address, only valid when kernel DMA API used
* @size: size of the buffer
+ * @ctx: memory context (bank) used for this allocation
*/
struct s5p_mfc_priv_buf {
unsigned long ofs;
void *virt;
dma_addr_t dma;
size_t size;
+ unsigned int ctx;
};
/**
@@ -267,8 +257,7 @@ struct s5p_mfc_priv_buf {
* @vfd_dec: video device for decoding
* @vfd_enc: video device for encoding
* @plat_dev: platform device
- * @mem_dev_l: child device of the left memory bank (0)
- * @mem_dev_r: child device of the right memory bank (1)
+ * @mem_dev[]: child devices of the memory banks
* @regs_base: base address of the MFC hw registers
* @irq: irq resource
* @dec_ctrl_handler: control framework handler for decoding
@@ -286,8 +275,7 @@ struct s5p_mfc_priv_buf {
* @queue: waitqueue for waiting for completion of device commands
* @fw_size: size of firmware
* @fw_virt_addr: virtual firmware address
- * @bank1: address of the beginning of bank 1 memory
- * @bank2: address of the beginning of bank 2 memory
+ * @dma_base[]: address of the beginning of memory banks
* @hw_lock: used for hardware locking
* @ctx: array of driver contexts
* @curr_ctx: number of the currently running context
@@ -310,14 +298,13 @@ struct s5p_mfc_dev {
struct video_device *vfd_dec;
struct video_device *vfd_enc;
struct platform_device *plat_dev;
- struct device *mem_dev_l;
- struct device *mem_dev_r;
+ struct device *mem_dev[BANK_CTX_NUM];
void __iomem *regs_base;
int irq;
struct v4l2_ctrl_handler dec_ctrl_handler;
struct v4l2_ctrl_handler enc_ctrl_handler;
struct s5p_mfc_pm pm;
- struct s5p_mfc_variant *variant;
+ const struct s5p_mfc_variant *variant;
int num_inst;
spinlock_t irqlock; /* lock when operating on context */
spinlock_t condlock; /* lock when changing/checking if a context is
@@ -327,10 +314,12 @@ struct s5p_mfc_dev {
int int_type;
unsigned int int_err;
wait_queue_head_t queue;
- size_t fw_size;
- void *fw_virt_addr;
- dma_addr_t bank1;
- dma_addr_t bank2;
+ struct s5p_mfc_priv_buf fw_buf;
+ size_t mem_size;
+ dma_addr_t mem_base;
+ unsigned long *mem_bitmap;
+ void *mem_virt;
+ dma_addr_t dma_base[BANK_CTX_NUM];
unsigned long hw_lock;
struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
int curr_ctx;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index cc888713b3b6..69ef9c23a99a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -26,51 +26,22 @@
/* Allocate memory for firmware */
int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
{
- void *bank2_virt;
- dma_addr_t bank2_dma_addr;
+ struct s5p_mfc_priv_buf *fw_buf = &dev->fw_buf;
+ int err;
- dev->fw_size = dev->variant->buf_size->fw;
+ fw_buf->size = dev->variant->buf_size->fw;
- if (dev->fw_virt_addr) {
+ if (fw_buf->virt) {
mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
return -ENOMEM;
}
- dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
- &dev->bank1, GFP_KERNEL);
-
- if (!dev->fw_virt_addr) {
+ err = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &dev->fw_buf);
+ if (err) {
mfc_err("Allocating bitprocessor buffer failed\n");
- return -ENOMEM;
+ return err;
}
- if (HAS_PORTNUM(dev) && IS_TWOPORT(dev)) {
- bank2_virt = dma_alloc_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
- &bank2_dma_addr, GFP_KERNEL);
-
- if (!bank2_virt) {
- mfc_err("Allocating bank2 base failed\n");
- dma_free_coherent(dev->mem_dev_l, dev->fw_size,
- dev->fw_virt_addr, dev->bank1);
- dev->fw_virt_addr = NULL;
- return -ENOMEM;
- }
-
- /* Valid buffers passed to MFC encoder with LAST_FRAME command
- * should not have address of bank2 - MFC will treat it as a null frame.
- * To avoid such situation we set bank2 address below the pool address.
- */
- dev->bank2 = bank2_dma_addr - (1 << MFC_BASE_ALIGN_ORDER);
-
- dma_free_coherent(dev->mem_dev_r, 1 << MFC_BASE_ALIGN_ORDER,
- bank2_virt, bank2_dma_addr);
-
- } else {
- /* In this case bank2 can point to the same address as bank1.
- * Firmware will always occupy the beginning of this area so it is
- * impossible having a video frame buffer with zero address. */
- dev->bank2 = dev->bank1;
- }
return 0;
}
@@ -99,17 +70,17 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev)
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
}
- if (fw_blob->size > dev->fw_size) {
+ if (fw_blob->size > dev->fw_buf.size) {
mfc_err("MFC firmware is too big to be loaded\n");
release_firmware(fw_blob);
return -ENOMEM;
}
- if (!dev->fw_virt_addr) {
+ if (!dev->fw_buf.virt) {
mfc_err("MFC firmware is not allocated\n");
release_firmware(fw_blob);
return -EINVAL;
}
- memcpy(dev->fw_virt_addr, fw_blob->data, fw_blob->size);
+ memcpy(dev->fw_buf.virt, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
@@ -121,11 +92,7 @@ int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
{
/* Before calling this function one has to make sure
* that MFC is no longer processing */
- if (!dev->fw_virt_addr)
- return -EINVAL;
- dma_free_coherent(dev->mem_dev_l, dev->fw_size, dev->fw_virt_addr,
- dev->bank1);
- dev->fw_virt_addr = NULL;
+ s5p_mfc_release_priv_buf(dev, &dev->fw_buf);
return 0;
}
@@ -210,13 +177,18 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
{
if (IS_MFCV6_PLUS(dev)) {
- mfc_write(dev, dev->bank1, S5P_FIMV_RISC_BASE_ADDRESS_V6);
- mfc_debug(2, "Base Address : %pad\n", &dev->bank1);
+ mfc_write(dev, dev->dma_base[BANK_L_CTX],
+ S5P_FIMV_RISC_BASE_ADDRESS_V6);
+ mfc_debug(2, "Base Address : %pad\n",
+ &dev->dma_base[BANK_L_CTX]);
} else {
- mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
- mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
+ mfc_write(dev, dev->dma_base[BANK_L_CTX],
+ S5P_FIMV_MC_DRAMBASE_ADR_A);
+ mfc_write(dev, dev->dma_base[BANK_R_CTX],
+ S5P_FIMV_MC_DRAMBASE_ADR_B);
mfc_debug(2, "Bank1: %pad, Bank2: %pad\n",
- &dev->bank1, &dev->bank2);
+ &dev->dma_base[BANK_L_CTX],
+ &dev->dma_base[BANK_R_CTX]);
}
}
@@ -240,7 +212,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
int ret;
mfc_debug_enter();
- if (!dev->fw_virt_addr) {
+ if (!dev->fw_buf.virt) {
mfc_err("Firmware memory is not allocated.\n");
return -EINVAL;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
index 8e5df041edf7..45c807bf19cc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.h
@@ -18,7 +18,6 @@
int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev);
-int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
void s5p_mfc_deinit_hw(struct s5p_mfc_dev *dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 367ef8e8dbf0..8937b0af7cb3 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -931,14 +931,14 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[1] = ctx->chroma_size;
if (IS_MFCV6_PLUS(dev))
- alloc_devs[0] = ctx->dev->mem_dev_l;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
else
- alloc_devs[0] = ctx->dev->mem_dev_r;
- alloc_devs[1] = ctx->dev->mem_dev_l;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
ctx->state == MFCINST_INIT) {
psize[0] = ctx->dec_src_buf_size;
- alloc_devs[0] = ctx->dev->mem_dev_l;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
} else {
mfc_err("This video node is dedicated to decoding. Decoding not initialized\n");
return -EINVAL;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index e39d9e06e299..2a5fd7c42cd5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1832,7 +1832,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
if (*buf_count > MFC_MAX_BUFFERS)
*buf_count = MFC_MAX_BUFFERS;
psize[0] = ctx->enc_dst_buf_size;
- alloc_devs[0] = ctx->dev->mem_dev_l;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (ctx->src_fmt)
*plane_count = ctx->src_fmt->num_planes;
@@ -1848,11 +1848,11 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[1] = ctx->chroma_size;
if (IS_MFCV6_PLUS(dev)) {
- alloc_devs[0] = ctx->dev->mem_dev_l;
- alloc_devs[1] = ctx->dev->mem_dev_l;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_L_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_L_CTX];
} else {
- alloc_devs[0] = ctx->dev->mem_dev_r;
- alloc_devs[1] = ctx->dev->mem_dev_r;
+ alloc_devs[0] = ctx->dev->mem_dev[BANK_R_CTX];
+ alloc_devs[1] = ctx->dev->mem_dev[BANK_R_CTX];
}
} else {
mfc_err("invalid queue type: %d\n", vq->type);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
index 6962132ae8fa..76667924ee2a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
@@ -11,54 +11,13 @@
#ifndef S5P_MFC_IOMMU_H_
#define S5P_MFC_IOMMU_H_
-#define S5P_MFC_IOMMU_DMA_BASE 0x20000000lu
-#define S5P_MFC_IOMMU_DMA_SIZE SZ_256M
-
-#if defined(CONFIG_EXYNOS_IOMMU) && defined(CONFIG_ARM_DMA_USE_IOMMU)
-
-#include <asm/dma-iommu.h>
+#if defined(CONFIG_EXYNOS_IOMMU)
static inline bool exynos_is_iommu_available(struct device *dev)
{
return dev->archdata.iommu != NULL;
}
-static inline void exynos_unconfigure_iommu(struct device *dev)
-{
- struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
-
- arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(mapping);
-}
-
-static inline int exynos_configure_iommu(struct device *dev,
- unsigned int base, unsigned int size)
-{
- struct dma_iommu_mapping *mapping = NULL;
- int ret;
-
- /* Disable the default mapping created by device core */
- if (to_dma_iommu_mapping(dev))
- exynos_unconfigure_iommu(dev);
-
- mapping = arm_iommu_create_mapping(dev->bus, base, size);
- if (IS_ERR(mapping)) {
- pr_warn("Failed to create IOMMU mapping for device %s\n",
- dev_name(dev));
- return PTR_ERR(mapping);
- }
-
- ret = arm_iommu_attach_device(dev, mapping);
- if (ret) {
- pr_warn("Failed to attached device %s to IOMMU_mapping\n",
- dev_name(dev));
- arm_iommu_release_mapping(mapping);
- return ret;
- }
-
- return 0;
-}
-
#else
static inline bool exynos_is_iommu_available(struct device *dev)
@@ -66,14 +25,6 @@ static inline bool exynos_is_iommu_available(struct device *dev)
return false;
}
-static inline int exynos_configure_iommu(struct device *dev,
- unsigned int base, unsigned int size)
-{
- return -ENOSYS;
-}
-
-static inline void exynos_unconfigure_iommu(struct device *dev) { }
-
#endif
#endif /* S5P_MFC_IOMMU_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 99f65a92a6be..7f33cf23947f 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -37,38 +37,91 @@ void s5p_mfc_init_regs(struct s5p_mfc_dev *dev)
dev->mfc_regs = s5p_mfc_init_regs_v6_plus(dev);
}
-int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
- struct s5p_mfc_priv_buf *b)
+int s5p_mfc_alloc_priv_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b)
{
+ unsigned int bits = dev->mem_size >> PAGE_SHIFT;
+ unsigned int count = b->size >> PAGE_SHIFT;
+ unsigned int align = (SZ_64K >> PAGE_SHIFT) - 1;
+ unsigned int start, offset;
+
mfc_debug(3, "Allocating priv: %zu\n", b->size);
- b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
+ if (dev->mem_virt) {
+ start = bitmap_find_next_zero_area(dev->mem_bitmap, bits, 0, count, align);
+ if (start > bits)
+ goto no_mem;
- if (!b->virt) {
- mfc_err("Allocating private buffer of size %zu failed\n",
- b->size);
- return -ENOMEM;
- }
+ bitmap_set(dev->mem_bitmap, start, count);
+ offset = start << PAGE_SHIFT;
+ b->virt = dev->mem_virt + offset;
+ b->dma = dev->mem_base + offset;
+ } else {
+ struct device *mem_dev = dev->mem_dev[mem_ctx];
+ dma_addr_t base = dev->dma_base[mem_ctx];
- if (b->dma < base) {
- mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
- &b->dma, &base);
- dma_free_coherent(dev, b->size, b->virt, b->dma);
- return -ENOMEM;
+ b->ctx = mem_ctx;
+ b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
+ if (!b->virt)
+ goto no_mem;
+ if (b->dma < base) {
+ mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
+ &b->dma, &base);
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
+ return -ENOMEM;
+ }
}
mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
return 0;
+no_mem:
+ mfc_err("Allocating private buffer of size %zu failed\n", b->size);
+ return -ENOMEM;
+}
+
+int s5p_mfc_alloc_generic_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b)
+{
+ struct device *mem_dev = dev->mem_dev[mem_ctx];
+
+ mfc_debug(3, "Allocating generic buf: %zu\n", b->size);
+
+ b->ctx = mem_ctx;
+ b->virt = dma_alloc_coherent(mem_dev, b->size, &b->dma, GFP_KERNEL);
+ if (!b->virt)
+ goto no_mem;
+
+ mfc_debug(3, "Allocated addr %p %pad\n", b->virt, &b->dma);
+ return 0;
+no_mem:
+ mfc_err("Allocating generic buffer of size %zu failed\n", b->size);
+ return -ENOMEM;
}
-void s5p_mfc_release_priv_buf(struct device *dev,
- struct s5p_mfc_priv_buf *b)
+void s5p_mfc_release_priv_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b)
{
- if (b->virt) {
- dma_free_coherent(dev, b->size, b->virt, b->dma);
- b->virt = NULL;
- b->dma = 0;
- b->size = 0;
+ if (dev->mem_virt) {
+ unsigned int start = (b->dma - dev->mem_base) >> PAGE_SHIFT;
+ unsigned int count = b->size >> PAGE_SHIFT;
+
+ bitmap_clear(dev->mem_bitmap, start, count);
+ } else {
+ struct device *mem_dev = dev->mem_dev[b->ctx];
+
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
}
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
}
+void s5p_mfc_release_generic_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b)
+{
+ struct device *mem_dev = dev->mem_dev[b->ctx];
+ dma_free_coherent(mem_dev, b->size, b->virt, b->dma);
+ b->virt = NULL;
+ b->dma = 0;
+ b->size = 0;
+}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
index b6ac417ab63e..16d553fcff08 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.h
@@ -315,10 +315,14 @@ struct s5p_mfc_hw_ops {
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev);
void s5p_mfc_init_regs(struct s5p_mfc_dev *dev);
-int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
- struct s5p_mfc_priv_buf *b);
-void s5p_mfc_release_priv_buf(struct device *dev,
- struct s5p_mfc_priv_buf *b);
+int s5p_mfc_alloc_priv_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_priv_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b);
+int s5p_mfc_alloc_generic_buf(struct s5p_mfc_dev *dev, unsigned int mem_ctx,
+ struct s5p_mfc_priv_buf *b);
+void s5p_mfc_release_generic_buf(struct s5p_mfc_dev *dev,
+ struct s5p_mfc_priv_buf *b);
#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index f4301d5bbd32..b41ee608c171 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -30,8 +30,8 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
-#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
+#define OFFSETA(x) (((x) - dev->dma_base[BANK_L_CTX]) >> MFC_OFFSET_SHIFT)
+#define OFFSETB(x) (((x) - dev->dma_base[BANK_R_CTX]) >> MFC_OFFSET_SHIFT)
/* Allocate temporary buffers for decoding */
static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
@@ -41,7 +41,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
int ret;
ctx->dsc.size = buf_size->dsc;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->dsc);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->dsc);
if (ret) {
mfc_err("Failed to allocate temporary buffer\n");
return ret;
@@ -57,7 +57,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Release temporary buffers for decoding */
static void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->dsc);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->dsc);
}
/* Allocate codec buffers */
@@ -172,8 +172,7 @@ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Allocate only if memory from bank 1 is necessary */
if (ctx->bank1.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
- &ctx->bank1);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->bank1);
if (ret) {
mfc_err("Failed to allocate Bank1 temporary buffer\n");
return ret;
@@ -182,11 +181,10 @@ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
}
/* Allocate only if memory from bank 2 is necessary */
if (ctx->bank2.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, dev->bank2,
- &ctx->bank2);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_R_CTX, &ctx->bank2);
if (ret) {
mfc_err("Failed to allocate Bank2 temporary buffer\n");
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank1);
return ret;
}
BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
@@ -197,8 +195,8 @@ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
static void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx)
{
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_r, &ctx->bank2);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank1);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->bank2);
}
/* Allocate memory for instance data buffer */
@@ -214,7 +212,7 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
else
ctx->ctx.size = buf_size->non_h264_ctx;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->ctx);
if (ret) {
mfc_err("Failed to allocate instance buffer\n");
return ret;
@@ -227,15 +225,15 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
/* Initialize shared memory */
ctx->shm.size = buf_size->shm;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->shm);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->shm);
if (ret) {
mfc_err("Failed to allocate shared memory buffer\n");
- s5p_mfc_release_priv_buf(dev->mem_dev_l, &ctx->ctx);
+ s5p_mfc_release_priv_buf(dev, &ctx->ctx);
return ret;
}
/* shared memory offset only keeps the offset from base (port a) */
- ctx->shm.ofs = ctx->shm.dma - dev->bank1;
+ ctx->shm.ofs = ctx->shm.dma - dev->dma_base[BANK_L_CTX];
BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
memset(ctx->shm.virt, 0, buf_size->shm);
@@ -246,8 +244,8 @@ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
static void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx)
{
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->shm);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->ctx);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->shm);
}
static int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev)
@@ -534,10 +532,10 @@ static void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev = ctx->dev;
- *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
- << MFC_OFFSET_SHIFT);
- *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
- << MFC_OFFSET_SHIFT);
+ *y_addr = dev->dma_base[BANK_R_CTX] +
+ (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR) << MFC_OFFSET_SHIFT);
+ *c_addr = dev->dma_base[BANK_R_CTX] +
+ (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR) << MFC_OFFSET_SHIFT);
}
/* Set encoding ref & codec buffer */
@@ -1214,7 +1212,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
}
if (list_empty(&ctx->src_queue)) {
/* send null frame */
- s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2);
+ s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->dma_base[BANK_R_CTX],
+ dev->dma_base[BANK_R_CTX]);
src_mb = NULL;
} else {
src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
@@ -1222,8 +1221,9 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
src_mb->flags |= MFC_BUF_FLAG_USED;
if (src_mb->b->vb2_buf.planes[0].bytesused == 0) {
/* send null frame */
- s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2,
- dev->bank2);
+ s5p_mfc_set_enc_frame_buffer_v5(ctx,
+ dev->dma_base[BANK_R_CTX],
+ dev->dma_base[BANK_R_CTX]);
ctx->state = MFCINST_FINISHING;
} else {
src_y_addr = vb2_dma_contig_plane_dma_addr(
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index d6f207e859ab..85880e9106be 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -239,8 +239,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
/* Allocate only if memory from bank 1 is necessary */
if (ctx->bank1.size > 0) {
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
- &ctx->bank1);
+ ret = s5p_mfc_alloc_generic_buf(dev, BANK_L_CTX, &ctx->bank1);
if (ret) {
mfc_err("Failed to allocate Bank1 memory\n");
return ret;
@@ -253,7 +252,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
/* Release buffers allocated for codec */
static void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
{
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1);
+ s5p_mfc_release_generic_buf(ctx->dev, &ctx->bank1);
}
/* Allocate memory for instance data buffer */
@@ -292,7 +291,7 @@ static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
break;
}
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1, &ctx->ctx);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &ctx->ctx);
if (ret) {
mfc_err("Failed to allocate instance buffer\n");
return ret;
@@ -309,7 +308,7 @@ static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
/* Release instance buffer */
static void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
{
- s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx);
+ s5p_mfc_release_priv_buf(ctx->dev, &ctx->ctx);
}
/* Allocate context buffers for SYS_INIT */
@@ -321,8 +320,7 @@ static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
mfc_debug_enter();
dev->ctx_buf.size = buf_size->dev_ctx;
- ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, dev->bank1,
- &dev->ctx_buf);
+ ret = s5p_mfc_alloc_priv_buf(dev, BANK_L_CTX, &dev->ctx_buf);
if (ret) {
mfc_err("Failed to allocate device context buffer\n");
return ret;
@@ -339,7 +337,7 @@ static int s5p_mfc_alloc_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
/* Release context buffers for SYS_INIT */
static void s5p_mfc_release_dev_context_buffer_v6(struct s5p_mfc_dev *dev)
{
- s5p_mfc_release_priv_buf(dev->mem_dev_l, &dev->ctx_buf);
+ s5p_mfc_release_priv_buf(dev, &dev->ctx_buf);
}
static int calc_plane(int width, int height)
@@ -497,7 +495,7 @@ static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx)
}
}
- mfc_debug(2, "Buf1: %zu, buf_size1: %d (frames %d)\n",
+ mfc_debug(2, "Buf1: %zx, buf_size1: %d (frames %d)\n",
buf_addr1, buf_size1, ctx->total_dpb_count);
if (buf_size1 < 0) {
mfc_debug(2, "Not enough memory has been allocated.\n");
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index ef2a519bcd4c..992d61a8b961 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -813,8 +813,8 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
{
switch (bus_fmt) {
default:
- pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n",
- __func__, bus_fmt);
+ pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
case SH_VOU_BUS_8BIT:
return 1;
case SH_VOU_BUS_16BIT:
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 86d74788544f..f5979c12ad61 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -1,7 +1,6 @@
config SOC_CAMERA
tristate "SoC camera support"
depends on VIDEO_V4L2 && HAS_DMA && I2C
- select VIDEOBUF_GEN
select VIDEOBUF2_CORE
help
SoC Camera is a common API to several cameras, not connecting
@@ -26,14 +25,3 @@ config VIDEO_SH_MOBILE_CEU
select SOC_CAMERA_SCALE_CROP
---help---
This is a v4l2 driver for the SuperH Mobile CEU Interface
-
-config VIDEO_ATMEL_ISI
- tristate "ATMEL Image Sensor Interface (ISI) support"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on ARCH_AT91 || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- This module makes the ATMEL Image Sensor Interface available
- as a v4l2 device.
-
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 7633a0f2f66f..07a451e8b228 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o
obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
# soc-camera host drivers have to be linked after camera drivers
-obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
deleted file mode 100644
index 46de657c3e6d..000000000000
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ /dev/null
@@ -1,1167 +0,0 @@
-/*
- * Copyright (c) 2011 Atmel Corporation
- * Josh Wu, <josh.wu@atmel.com>
- *
- * Based on previous work by Lars Haring, <lars.haring@atmel.com>
- * and Sedji Gaouaou
- * Based on the bttv driver for Bt848 with respective copyright holders
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#include <media/soc_camera.h>
-#include <media/drv-intf/soc_mediabus.h>
-#include <media/v4l2-of.h>
-#include <media/videobuf2-dma-contig.h>
-
-#include "atmel-isi.h"
-
-#define MAX_BUFFER_NUM 32
-#define MAX_SUPPORT_WIDTH 2048
-#define MAX_SUPPORT_HEIGHT 2048
-#define VID_LIMIT_BYTES (16 * 1024 * 1024)
-#define MIN_FRAME_RATE 15
-#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
-
-/* Frame buffer descriptor */
-struct fbd {
- /* Physical address of the frame buffer */
- u32 fb_address;
- /* DMA Control Register(only in HISI2) */
- u32 dma_ctrl;
- /* Physical address of the next fbd */
- u32 next_fbd_address;
-};
-
-static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
-{
- fb_desc->dma_ctrl = ctrl;
-}
-
-struct isi_dma_desc {
- struct list_head list;
- struct fbd *p_fbd;
- dma_addr_t fbd_phys;
-};
-
-/* Frame buffer data */
-struct frame_buffer {
- struct vb2_v4l2_buffer vb;
- struct isi_dma_desc *p_dma_desc;
- struct list_head list;
-};
-
-struct atmel_isi {
- /* Protects the access of variables shared with the ISR */
- spinlock_t lock;
- void __iomem *regs;
-
- int sequence;
-
- /* Allocate descriptors for dma buffer use */
- struct fbd *p_fb_descriptors;
- dma_addr_t fb_descriptors_phys;
- struct list_head dma_desc_head;
- struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
- bool enable_preview_path;
-
- struct completion complete;
- /* ISI peripherial clock */
- struct clk *pclk;
- unsigned int irq;
-
- struct isi_platform_data pdata;
- u16 width_flags; /* max 12 bits */
-
- struct list_head video_buffer_list;
- struct frame_buffer *active;
-
- struct soc_camera_host soc_host;
-};
-
-static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
-{
- writel(val, isi->regs + reg);
-}
-static u32 isi_readl(struct atmel_isi *isi, u32 reg)
-{
- return readl(isi->regs + reg);
-}
-
-static u32 setup_cfg2_yuv_swap(struct atmel_isi *isi,
- const struct soc_camera_format_xlate *xlate)
-{
- if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUYV) {
- /* all convert to YUYV */
- switch (xlate->code) {
- case MEDIA_BUS_FMT_VYUY8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_3;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_2;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_1;
- }
- } else if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_RGB565) {
- /*
- * Preview path is enabled, it will convert UYVY to RGB format.
- * But if sensor output format is not UYVY, we need to set
- * YCC_SWAP_MODE to convert it as UYVY.
- */
- switch (xlate->code) {
- case MEDIA_BUS_FMT_VYUY8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_1;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_2;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- return ISI_CFG2_YCC_SWAP_MODE_3;
- }
- }
-
- /*
- * By default, no swap for the codec path of Atmel ISI. So codec
- * output is same as sensor's output.
- * For instance, if sensor's output is YUYV, then codec outputs YUYV.
- * And if sensor's output is UYVY, then codec outputs UYVY.
- */
- return ISI_CFG2_YCC_SWAP_DEFAULT;
-}
-
-static void configure_geometry(struct atmel_isi *isi, u32 width,
- u32 height, const struct soc_camera_format_xlate *xlate)
-{
- u32 cfg2, psize;
- u32 fourcc = xlate->host_fmt->fourcc;
-
- isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 ||
- fourcc == V4L2_PIX_FMT_RGB32;
-
- /* According to sensor's output format to set cfg2 */
- switch (xlate->code) {
- default:
- /* Grey */
- case MEDIA_BUS_FMT_Y8_1X8:
- cfg2 = ISI_CFG2_GRAYSCALE | ISI_CFG2_COL_SPACE_YCbCr;
- break;
- /* YUV */
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- cfg2 = ISI_CFG2_COL_SPACE_YCbCr |
- setup_cfg2_yuv_swap(isi, xlate);
- break;
- /* RGB, TODO */
- }
-
- isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
- /* Set width */
- cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
- ISI_CFG2_IM_HSIZE_MASK;
- /* Set height */
- cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
- & ISI_CFG2_IM_VSIZE_MASK;
- isi_writel(isi, ISI_CFG2, cfg2);
-
- /* No down sampling, preview size equal to sensor output size */
- psize = ((width - 1) << ISI_PSIZE_PREV_HSIZE_OFFSET) &
- ISI_PSIZE_PREV_HSIZE_MASK;
- psize |= ((height - 1) << ISI_PSIZE_PREV_VSIZE_OFFSET) &
- ISI_PSIZE_PREV_VSIZE_MASK;
- isi_writel(isi, ISI_PSIZE, psize);
- isi_writel(isi, ISI_PDECF, ISI_PDECF_NO_SAMPLING);
-
- return;
-}
-
-static bool is_supported(struct soc_camera_device *icd,
- const u32 pixformat)
-{
- switch (pixformat) {
- /* YUV, including grey */
- case V4L2_PIX_FMT_GREY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_VYUY:
- /* RGB */
- case V4L2_PIX_FMT_RGB565:
- return true;
- default:
- return false;
- }
-}
-
-static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
-{
- if (isi->active) {
- struct vb2_v4l2_buffer *vbuf = &isi->active->vb;
- struct frame_buffer *buf = isi->active;
-
- list_del_init(&buf->list);
- vbuf->vb2_buf.timestamp = ktime_get_ns();
- vbuf->sequence = isi->sequence++;
- vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
- }
-
- if (list_empty(&isi->video_buffer_list)) {
- isi->active = NULL;
- } else {
- /* start next dma frame. */
- isi->active = list_entry(isi->video_buffer_list.next,
- struct frame_buffer, list);
- if (!isi->enable_preview_path) {
- isi_writel(isi, ISI_DMA_C_DSCR,
- (u32)isi->active->p_dma_desc->fbd_phys);
- isi_writel(isi, ISI_DMA_C_CTRL,
- ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
- isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
- } else {
- isi_writel(isi, ISI_DMA_P_DSCR,
- (u32)isi->active->p_dma_desc->fbd_phys);
- isi_writel(isi, ISI_DMA_P_CTRL,
- ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
- isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
- }
- }
- return IRQ_HANDLED;
-}
-
-/* ISI interrupt service routine */
-static irqreturn_t isi_interrupt(int irq, void *dev_id)
-{
- struct atmel_isi *isi = dev_id;
- u32 status, mask, pending;
- irqreturn_t ret = IRQ_NONE;
-
- spin_lock(&isi->lock);
-
- status = isi_readl(isi, ISI_STATUS);
- mask = isi_readl(isi, ISI_INTMASK);
- pending = status & mask;
-
- if (pending & ISI_CTRL_SRST) {
- complete(&isi->complete);
- isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
- ret = IRQ_HANDLED;
- } else if (pending & ISI_CTRL_DIS) {
- complete(&isi->complete);
- isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
- ret = IRQ_HANDLED;
- } else {
- if (likely(pending & ISI_SR_CXFR_DONE) ||
- likely(pending & ISI_SR_PXFR_DONE))
- ret = atmel_isi_handle_streaming(isi);
- }
-
- spin_unlock(&isi->lock);
- return ret;
-}
-
-#define WAIT_ISI_RESET 1
-#define WAIT_ISI_DISABLE 0
-static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
-{
- unsigned long timeout;
- /*
- * The reset or disable will only succeed if we have a
- * pixel clock from the camera.
- */
- init_completion(&isi->complete);
-
- if (wait_reset) {
- isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
- isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
- } else {
- isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
- isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
- }
-
- timeout = wait_for_completion_timeout(&isi->complete,
- msecs_to_jiffies(500));
- if (timeout == 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-/* ------------------------------------------------------------------
- Videobuf operations
- ------------------------------------------------------------------*/
-static int queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- unsigned long size;
-
- size = icd->sizeimage;
-
- if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
- *nbuffers = MAX_BUFFER_NUM;
-
- if (size * *nbuffers > VID_LIMIT_BYTES)
- *nbuffers = VID_LIMIT_BYTES / size;
-
- *nplanes = 1;
- sizes[0] = size;
-
- isi->sequence = 0;
- isi->active = NULL;
-
- dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
- *nbuffers, size);
-
- return 0;
-}
-
-static int buffer_init(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
-
- buf->p_dma_desc = NULL;
- INIT_LIST_HEAD(&buf->list);
-
- return 0;
-}
-
-static int buffer_prepare(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- unsigned long size;
- struct isi_dma_desc *desc;
-
- size = icd->sizeimage;
-
- if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- vb2_set_plane_payload(vb, 0, size);
-
- if (!buf->p_dma_desc) {
- if (list_empty(&isi->dma_desc_head)) {
- dev_err(icd->parent, "Not enough dma descriptors.\n");
- return -EINVAL;
- } else {
- /* Get an available descriptor */
- desc = list_entry(isi->dma_desc_head.next,
- struct isi_dma_desc, list);
- /* Delete the descriptor since now it is used */
- list_del_init(&desc->list);
-
- /* Initialize the dma descriptor */
- desc->p_fbd->fb_address =
- vb2_dma_contig_plane_dma_addr(vb, 0);
- desc->p_fbd->next_fbd_address = 0;
- set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
-
- buf->p_dma_desc = desc;
- }
- }
- return 0;
-}
-
-static void buffer_cleanup(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
-
- /* This descriptor is available now and we add to head list */
- if (buf->p_dma_desc)
- list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
-}
-
-static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
-{
- u32 ctrl, cfg1;
-
- cfg1 = isi_readl(isi, ISI_CFG1);
- /* Enable irq: cxfr for the codec path, pxfr for the preview path */
- isi_writel(isi, ISI_INTEN,
- ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
-
- /* Check if already in a frame */
- if (!isi->enable_preview_path) {
- if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
- dev_err(isi->soc_host.icd->parent, "Already in frame handling.\n");
- return;
- }
-
- isi_writel(isi, ISI_DMA_C_DSCR,
- (u32)buffer->p_dma_desc->fbd_phys);
- isi_writel(isi, ISI_DMA_C_CTRL,
- ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
- isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
- } else {
- isi_writel(isi, ISI_DMA_P_DSCR,
- (u32)buffer->p_dma_desc->fbd_phys);
- isi_writel(isi, ISI_DMA_P_CTRL,
- ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
- isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH);
- }
-
- cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK;
- /* Enable linked list */
- cfg1 |= isi->pdata.frate | ISI_CFG1_DISCR;
-
- /* Enable ISI */
- ctrl = ISI_CTRL_EN;
-
- if (!isi->enable_preview_path)
- ctrl |= ISI_CTRL_CDC;
-
- isi_writel(isi, ISI_CTRL, ctrl);
- isi_writel(isi, ISI_CFG1, cfg1);
-}
-
-static void buffer_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb);
- unsigned long flags = 0;
-
- spin_lock_irqsave(&isi->lock, flags);
- list_add_tail(&buf->list, &isi->video_buffer_list);
-
- if (isi->active == NULL) {
- isi->active = buf;
- if (vb2_is_streaming(vb->vb2_queue))
- start_dma(isi, buf);
- }
- spin_unlock_irqrestore(&isi->lock, flags);
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- int ret;
-
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- /* Reset ISI */
- ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
- if (ret < 0) {
- dev_err(icd->parent, "Reset ISI timed out\n");
- pm_runtime_put(ici->v4l2_dev.dev);
- return ret;
- }
- /* Disable all interrupts */
- isi_writel(isi, ISI_INTDIS, (u32)~0UL);
-
- configure_geometry(isi, icd->user_width, icd->user_height,
- icd->current_fmt);
-
- spin_lock_irq(&isi->lock);
- /* Clear any pending interrupt */
- isi_readl(isi, ISI_STATUS);
-
- if (count)
- start_dma(isi, isi->active);
- spin_unlock_irq(&isi->lock);
-
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void stop_streaming(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- struct frame_buffer *buf, *node;
- int ret = 0;
- unsigned long timeout;
-
- spin_lock_irq(&isi->lock);
- isi->active = NULL;
- /* Release all active buffers */
- list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
- list_del_init(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irq(&isi->lock);
-
- if (!isi->enable_preview_path) {
- timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
- /* Wait until the end of the current frame. */
- while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
- time_before(jiffies, timeout))
- msleep(1);
-
- if (time_after(jiffies, timeout))
- dev_err(icd->parent,
- "Timeout waiting for finishing codec request\n");
- }
-
- /* Disable interrupts */
- isi_writel(isi, ISI_INTDIS,
- ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
-
- /* Disable ISI and wait for it is done */
- ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
- if (ret < 0)
- dev_err(icd->parent, "Disable ISI timed out\n");
-
- pm_runtime_put(ici->v4l2_dev.dev);
-}
-
-static const struct vb2_ops isi_video_qops = {
- .queue_setup = queue_setup,
- .buf_init = buffer_init,
- .buf_prepare = buffer_prepare,
- .buf_cleanup = buffer_cleanup,
- .buf_queue = buffer_queue,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
-};
-
-/* ------------------------------------------------------------------
- SOC camera operations for the device
- ------------------------------------------------------------------*/
-static int isi_camera_init_videobuf(struct vb2_queue *q,
- struct soc_camera_device *icd)
-{
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP;
- q->drv_priv = icd;
- q->buf_struct_size = sizeof(struct frame_buffer);
- q->ops = &isi_video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->lock = &ici->host_lock;
- q->dev = ici->v4l2_dev.dev;
-
- return vb2_queue_init(q);
-}
-
-static int isi_camera_set_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- const struct soc_camera_format_xlate *xlate;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- int ret;
-
- /* check with atmel-isi support format, if not support use YUYV */
- if (!is_supported(icd, pix->pixelformat))
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
- if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n",
- pix->pixelformat);
- return -EINVAL;
- }
-
- dev_dbg(icd->parent, "Plan to set format %dx%d\n",
- pix->width, pix->height);
-
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->colorspace = pix->colorspace;
- mf->code = xlate->code;
-
- ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &format);
- if (ret < 0)
- return ret;
-
- if (mf->code != xlate->code)
- return -EINVAL;
-
- pix->width = mf->width;
- pix->height = mf->height;
- pix->field = mf->field;
- pix->colorspace = mf->colorspace;
- icd->current_fmt = xlate;
-
- dev_dbg(icd->parent, "Finally set format %dx%d\n",
- pix->width, pix->height);
-
- return ret;
-}
-
-static int isi_camera_try_fmt(struct soc_camera_device *icd,
- struct v4l2_format *f)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- const struct soc_camera_format_xlate *xlate;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_subdev_pad_config pad_cfg;
- struct v4l2_subdev_format format = {
- .which = V4L2_SUBDEV_FORMAT_TRY,
- };
- struct v4l2_mbus_framefmt *mf = &format.format;
- u32 pixfmt = pix->pixelformat;
- int ret;
-
- /* check with atmel-isi support format, if not support use YUYV */
- if (!is_supported(icd, pix->pixelformat))
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (pixfmt && !xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
-
- /* limit to Atmel ISI hardware capabilities */
- if (pix->height > MAX_SUPPORT_HEIGHT)
- pix->height = MAX_SUPPORT_HEIGHT;
- if (pix->width > MAX_SUPPORT_WIDTH)
- pix->width = MAX_SUPPORT_WIDTH;
-
- /* limit to sensor capabilities */
- mf->width = pix->width;
- mf->height = pix->height;
- mf->field = pix->field;
- mf->colorspace = pix->colorspace;
- mf->code = xlate->code;
-
- ret = v4l2_subdev_call(sd, pad, set_fmt, &pad_cfg, &format);
- if (ret < 0)
- return ret;
-
- pix->width = mf->width;
- pix->height = mf->height;
- pix->colorspace = mf->colorspace;
-
- switch (mf->field) {
- case V4L2_FIELD_ANY:
- pix->field = V4L2_FIELD_NONE;
- break;
- case V4L2_FIELD_NONE:
- break;
- default:
- dev_err(icd->parent, "Field type %d unsupported.\n",
- mf->field);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_YUYV,
- .name = "Packed YUV422 16 bit",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .name = "RGB565",
- .bits_per_sample = 8,
- .packing = SOC_MBUS_PACKING_2X8_PADHI,
- .order = SOC_MBUS_ORDER_LE,
- .layout = SOC_MBUS_LAYOUT_PACKED,
- },
-};
-
-/* This will be corrected as we get more formats */
-static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
-{
- return fmt->packing == SOC_MBUS_PACKING_NONE ||
- (fmt->bits_per_sample == 8 &&
- fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
- (fmt->bits_per_sample > 8 &&
- fmt->packing == SOC_MBUS_PACKING_EXTEND16);
-}
-
-#define ISI_BUS_PARAM (V4L2_MBUS_MASTER | \
- V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_HSYNC_ACTIVE_LOW | \
- V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
- V4L2_MBUS_VSYNC_ACTIVE_LOW | \
- V4L2_MBUS_PCLK_SAMPLE_RISING | \
- V4L2_MBUS_PCLK_SAMPLE_FALLING | \
- V4L2_MBUS_DATA_ACTIVE_HIGH)
-
-static int isi_camera_try_bus_param(struct soc_camera_device *icd,
- unsigned char buswidth)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- unsigned long common_flags;
- int ret;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
- ISI_BUS_PARAM);
- if (!common_flags) {
- dev_warn(icd->parent,
- "Flags incompatible: camera 0x%x, host 0x%x\n",
- cfg.flags, ISI_BUS_PARAM);
- return -EINVAL;
- }
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- }
-
- if ((1 << (buswidth - 1)) & isi->width_flags)
- return 0;
- return -EINVAL;
-}
-
-
-static int isi_camera_get_formats(struct soc_camera_device *icd,
- unsigned int idx,
- struct soc_camera_format_xlate *xlate)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- int formats = 0, ret, i, n;
- /* sensor format */
- struct v4l2_subdev_mbus_code_enum code = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- .index = idx,
- };
- /* soc camera host format */
- const struct soc_mbus_pixelfmt *fmt;
-
- ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code);
- if (ret < 0)
- /* No more formats */
- return 0;
-
- fmt = soc_mbus_get_fmtdesc(code.code);
- if (!fmt) {
- dev_err(icd->parent,
- "Invalid format code #%u: %d\n", idx, code.code);
- return 0;
- }
-
- /* This also checks support for the requested bits-per-sample */
- ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
- if (ret < 0) {
- dev_err(icd->parent,
- "Fail to try the bus parameters.\n");
- return 0;
- }
-
- switch (code.code) {
- case MEDIA_BUS_FMT_UYVY8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- n = ARRAY_SIZE(isi_camera_formats);
- formats += n;
- for (i = 0; xlate && i < n; i++, xlate++) {
- xlate->host_fmt = &isi_camera_formats[i];
- xlate->code = code.code;
- dev_dbg(icd->parent, "Providing format %s using code %d\n",
- xlate->host_fmt->name, xlate->code);
- }
- break;
- default:
- if (!isi_camera_packing_supported(fmt))
- return 0;
- if (xlate)
- dev_dbg(icd->parent,
- "Providing format %s in pass-through mode\n",
- fmt->name);
- }
-
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = fmt;
- xlate->code = code.code;
- xlate++;
- }
-
- return formats;
-}
-
-static int isi_camera_add_device(struct soc_camera_device *icd)
-{
- dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
- icd->devnum);
-
- return 0;
-}
-
-static void isi_camera_remove_device(struct soc_camera_device *icd)
-{
- dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
- icd->devnum);
-}
-
-static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
-{
- struct soc_camera_device *icd = file->private_data;
-
- return vb2_poll(&icd->vb2_vidq, file, pt);
-}
-
-static int isi_camera_querycap(struct soc_camera_host *ici,
- struct v4l2_capability *cap)
-{
- strcpy(cap->driver, "atmel-isi");
- strcpy(cap->card, "Atmel Image Sensor Interface");
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int isi_camera_set_bus_param(struct soc_camera_device *icd)
-{
- struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct atmel_isi *isi = ici->priv;
- struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- unsigned long common_flags;
- int ret;
- u32 cfg1 = 0;
-
- ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
- if (!ret) {
- common_flags = soc_mbus_config_compatible(&cfg,
- ISI_BUS_PARAM);
- if (!common_flags) {
- dev_warn(icd->parent,
- "Flags incompatible: camera 0x%x, host 0x%x\n",
- cfg.flags, ISI_BUS_PARAM);
- return -EINVAL;
- }
- } else if (ret != -ENOIOCTLCMD) {
- return ret;
- } else {
- common_flags = ISI_BUS_PARAM;
- }
- dev_dbg(icd->parent, "Flags cam: 0x%x host: 0x%x common: 0x%lx\n",
- cfg.flags, ISI_BUS_PARAM, common_flags);
-
- /* Make choises, based on platform preferences */
- if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
- if (isi->pdata.hsync_act_low)
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
- }
-
- if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
- (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
- if (isi->pdata.vsync_act_low)
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
- else
- common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
- }
-
- if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) &&
- (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) {
- if (isi->pdata.pclk_act_falling)
- common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING;
- else
- common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING;
- }
-
- cfg.flags = common_flags;
- ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n",
- common_flags, ret);
- return ret;
- }
-
- /* set bus param for ISI */
- if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
- if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
- if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
-
- dev_dbg(icd->parent, "vsync active %s, hsync active %s, sampling on pix clock %s edge\n",
- common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? "low" : "high",
- common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? "low" : "high",
- common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING ? "falling" : "rising");
-
- if (isi->pdata.has_emb_sync)
- cfg1 |= ISI_CFG1_EMB_SYNC;
- if (isi->pdata.full_mode)
- cfg1 |= ISI_CFG1_FULL_MODE;
-
- cfg1 |= ISI_CFG1_THMASK_BEATS_16;
-
- /* Enable PM and peripheral clock before operate isi registers */
- pm_runtime_get_sync(ici->v4l2_dev.dev);
-
- isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
- isi_writel(isi, ISI_CFG1, cfg1);
-
- pm_runtime_put(ici->v4l2_dev.dev);
-
- return 0;
-}
-
-static struct soc_camera_host_ops isi_soc_camera_host_ops = {
- .owner = THIS_MODULE,
- .add = isi_camera_add_device,
- .remove = isi_camera_remove_device,
- .set_fmt = isi_camera_set_fmt,
- .try_fmt = isi_camera_try_fmt,
- .get_formats = isi_camera_get_formats,
- .init_videobuf2 = isi_camera_init_videobuf,
- .poll = isi_camera_poll,
- .querycap = isi_camera_querycap,
- .set_bus_param = isi_camera_set_bus_param,
-};
-
-/* -----------------------------------------------------------------------*/
-static int atmel_isi_remove(struct platform_device *pdev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
- struct atmel_isi *isi = container_of(soc_host,
- struct atmel_isi, soc_host);
-
- soc_camera_host_unregister(soc_host);
- dma_free_coherent(&pdev->dev,
- sizeof(struct fbd) * MAX_BUFFER_NUM,
- isi->p_fb_descriptors,
- isi->fb_descriptors_phys);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static int atmel_isi_parse_dt(struct atmel_isi *isi,
- struct platform_device *pdev)
-{
- struct device_node *np= pdev->dev.of_node;
- struct v4l2_of_endpoint ep;
- int err;
-
- /* Default settings for ISI */
- isi->pdata.full_mode = 1;
- isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL;
-
- np = of_graph_get_next_endpoint(np, NULL);
- if (!np) {
- dev_err(&pdev->dev, "Could not find the endpoint\n");
- return -EINVAL;
- }
-
- err = v4l2_of_parse_endpoint(np, &ep);
- of_node_put(np);
- if (err) {
- dev_err(&pdev->dev, "Could not parse the endpoint\n");
- return err;
- }
-
- switch (ep.bus.parallel.bus_width) {
- case 8:
- isi->pdata.data_width_flags = ISI_DATAWIDTH_8;
- break;
- case 10:
- isi->pdata.data_width_flags =
- ISI_DATAWIDTH_8 | ISI_DATAWIDTH_10;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported bus width: %d\n",
- ep.bus.parallel.bus_width);
- return -EINVAL;
- }
-
- if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
- isi->pdata.hsync_act_low = true;
- if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
- isi->pdata.vsync_act_low = true;
- if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
- isi->pdata.pclk_act_falling = true;
-
- if (ep.bus_type == V4L2_MBUS_BT656)
- isi->pdata.has_emb_sync = true;
-
- return 0;
-}
-
-static int atmel_isi_probe(struct platform_device *pdev)
-{
- int irq;
- struct atmel_isi *isi;
- struct resource *regs;
- int ret, i;
- struct soc_camera_host *soc_host;
-
- isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
- if (!isi) {
- dev_err(&pdev->dev, "Can't allocate interface!\n");
- return -ENOMEM;
- }
-
- isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
- if (IS_ERR(isi->pclk))
- return PTR_ERR(isi->pclk);
-
- ret = atmel_isi_parse_dt(isi, pdev);
- if (ret)
- return ret;
-
- isi->active = NULL;
- spin_lock_init(&isi->lock);
- INIT_LIST_HEAD(&isi->video_buffer_list);
- INIT_LIST_HEAD(&isi->dma_desc_head);
-
- isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
- sizeof(struct fbd) * MAX_BUFFER_NUM,
- &isi->fb_descriptors_phys,
- GFP_KERNEL);
- if (!isi->p_fb_descriptors) {
- dev_err(&pdev->dev, "Can't allocate descriptors!\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
- isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
- i * sizeof(struct fbd);
- list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
- }
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- isi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(isi->regs)) {
- ret = PTR_ERR(isi->regs);
- goto err_ioremap;
- }
-
- if (isi->pdata.data_width_flags & ISI_DATAWIDTH_8)
- isi->width_flags = 1 << 7;
- if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10)
- isi->width_flags |= 1 << 9;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_req_irq;
- }
-
- ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- goto err_req_irq;
- }
- isi->irq = irq;
-
- soc_host = &isi->soc_host;
- soc_host->drv_name = "isi-camera";
- soc_host->ops = &isi_soc_camera_host_ops;
- soc_host->priv = isi;
- soc_host->v4l2_dev.dev = &pdev->dev;
- soc_host->nr = pdev->id;
-
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
-
- ret = soc_camera_host_register(soc_host);
- if (ret) {
- dev_err(&pdev->dev, "Unable to register soc camera host\n");
- goto err_register_soc_camera_host;
- }
- return 0;
-
-err_register_soc_camera_host:
- pm_runtime_disable(&pdev->dev);
-err_req_irq:
-err_ioremap:
- dma_free_coherent(&pdev->dev,
- sizeof(struct fbd) * MAX_BUFFER_NUM,
- isi->p_fb_descriptors,
- isi->fb_descriptors_phys);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int atmel_isi_runtime_suspend(struct device *dev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(dev);
- struct atmel_isi *isi = container_of(soc_host,
- struct atmel_isi, soc_host);
-
- clk_disable_unprepare(isi->pclk);
-
- return 0;
-}
-static int atmel_isi_runtime_resume(struct device *dev)
-{
- struct soc_camera_host *soc_host = to_soc_camera_host(dev);
- struct atmel_isi *isi = container_of(soc_host,
- struct atmel_isi, soc_host);
-
- return clk_prepare_enable(isi->pclk);
-}
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops atmel_isi_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend,
- atmel_isi_runtime_resume, NULL)
-};
-
-static const struct of_device_id atmel_isi_of_match[] = {
- { .compatible = "atmel,at91sam9g45-isi" },
- { }
-};
-MODULE_DEVICE_TABLE(of, atmel_isi_of_match);
-
-static struct platform_driver atmel_isi_driver = {
- .remove = atmel_isi_remove,
- .driver = {
- .name = "atmel_isi",
- .of_match_table = of_match_ptr(atmel_isi_of_match),
- .pm = &atmel_isi_dev_pm_ops,
- },
-};
-
-module_platform_driver_probe(atmel_isi_driver, atmel_isi_probe);
-
-MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
-MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index a15bfb5aea47..96dc01750bc0 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1801,18 +1801,7 @@ static struct platform_driver sh_mobile_ceu_driver = {
.remove = sh_mobile_ceu_remove,
};
-static int __init sh_mobile_ceu_init(void)
-{
- return platform_driver_register(&sh_mobile_ceu_driver);
-}
-
-static void __exit sh_mobile_ceu_exit(void)
-{
- platform_driver_unregister(&sh_mobile_ceu_driver);
-}
-
-module_init(sh_mobile_ceu_init);
-module_exit(sh_mobile_ceu_exit);
+module_platform_driver(sh_mobile_ceu_driver);
MODULE_DESCRIPTION("SuperH Mobile CEU driver");
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index edd1c1de4e33..3c9421f4d8e3 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -37,18 +37,12 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-of.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-v4l2.h>
/* Default to VGA resolution */
#define DEFAULT_WIDTH 640
#define DEFAULT_HEIGHT 480
-#define is_streaming(ici, icd) \
- (((ici)->ops->init_videobuf) ? \
- (icd)->vb_vidq.streaming : \
- vb2_is_streaming(&(icd)->vb2_vidq))
-
#define MAP_MAX_NUM 32
static DECLARE_BITMAP(device_map, MAP_MAX_NUM);
static LIST_HEAD(hosts);
@@ -367,23 +361,13 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
{
int ret;
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
if (icd->streamer && icd->streamer != file)
return -EBUSY;
- if (ici->ops->init_videobuf) {
- ret = videobuf_reqbufs(&icd->vb_vidq, p);
- if (ret < 0)
- return ret;
-
- ret = ici->ops->reqbufs(icd, p);
- } else {
- ret = vb2_reqbufs(&icd->vb2_vidq, p);
- }
-
+ ret = vb2_reqbufs(&icd->vb2_vidq, p);
if (!ret)
icd->streamer = p->count ? file : NULL;
return ret;
@@ -393,61 +377,44 @@ static int soc_camera_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
- if (ici->ops->init_videobuf)
- return videobuf_querybuf(&icd->vb_vidq, p);
- else
- return vb2_querybuf(&icd->vb2_vidq, p);
+ return vb2_querybuf(&icd->vb2_vidq, p);
}
static int soc_camera_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
if (icd->streamer != file)
return -EBUSY;
- if (ici->ops->init_videobuf)
- return videobuf_qbuf(&icd->vb_vidq, p);
- else
- return vb2_qbuf(&icd->vb2_vidq, p);
+ return vb2_qbuf(&icd->vb2_vidq, p);
}
static int soc_camera_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
if (icd->streamer != file)
return -EBUSY;
- if (ici->ops->init_videobuf)
- return videobuf_dqbuf(&icd->vb_vidq, p, file->f_flags & O_NONBLOCK);
- else
- return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
+ return vb2_dqbuf(&icd->vb2_vidq, p, file->f_flags & O_NONBLOCK);
}
static int soc_camera_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *create)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int ret;
- /* videobuf2 only */
- if (ici->ops->init_videobuf)
- return -ENOTTY;
-
if (icd->streamer && icd->streamer != file)
return -EBUSY;
@@ -461,24 +428,14 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- /* videobuf2 only */
- if (ici->ops->init_videobuf)
- return -EINVAL;
- else
- return vb2_prepare_buf(&icd->vb2_vidq, b);
+ return vb2_prepare_buf(&icd->vb2_vidq, b);
}
static int soc_camera_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
-
- /* videobuf2 only */
- if (ici->ops->init_videobuf)
- return -ENOTTY;
if (icd->streamer && icd->streamer != file)
return -EBUSY;
@@ -602,8 +559,6 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
icd->sizeimage = pix->sizeimage;
icd->colorspace = pix->colorspace;
icd->field = pix->field;
- if (ici->ops->init_videobuf)
- icd->vb_vidq.field = pix->field;
dev_dbg(icd->pdev, "set width: %d height: %d\n",
icd->user_width, icd->user_height);
@@ -745,13 +700,9 @@ static int soc_camera_open(struct file *file)
if (ret < 0)
goto esfmt;
- if (ici->ops->init_videobuf) {
- ici->ops->init_videobuf(&icd->vb_vidq, icd);
- } else {
- ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd);
- if (ret < 0)
- goto einitvb;
- }
+ ret = ici->ops->init_videobuf2(&icd->vb2_vidq, icd);
+ if (ret < 0)
+ goto einitvb;
v4l2_ctrl_handler_setup(&icd->ctrl_handler);
}
mutex_unlock(&ici->host_lock);
@@ -842,10 +793,7 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
if (mutex_lock_interruptible(&ici->host_lock))
return -ERESTARTSYS;
- if (ici->ops->init_videobuf)
- err = videobuf_mmap_mapper(&icd->vb_vidq, vma);
- else
- err = vb2_mmap(&icd->vb2_vidq, vma);
+ err = vb2_mmap(&icd->vb2_vidq, vma);
mutex_unlock(&ici->host_lock);
dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
@@ -866,10 +814,7 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
return POLLERR;
mutex_lock(&ici->host_lock);
- if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream))
- dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
- else
- res = ici->ops->poll(file, pt);
+ res = ici->ops->poll(file, pt);
mutex_unlock(&ici->host_lock);
return res;
}
@@ -900,7 +845,7 @@ static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
if (icd->streamer && icd->streamer != file)
return -EBUSY;
- if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
+ if (vb2_is_streaming(&icd->vb2_vidq)) {
dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
return -EBUSY;
}
@@ -971,7 +916,6 @@ static int soc_camera_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -983,12 +927,8 @@ static int soc_camera_streamon(struct file *file, void *priv,
if (icd->streamer != file)
return -EBUSY;
- /* This calls buf_queue from host driver's videobuf_queue_ops */
- if (ici->ops->init_videobuf)
- ret = videobuf_streamon(&icd->vb_vidq);
- else
- ret = vb2_streamon(&icd->vb2_vidq, i);
-
+ /* This calls buf_queue from host driver's videobuf2_queue_ops */
+ ret = vb2_streamon(&icd->vb2_vidq, i);
if (!ret)
v4l2_subdev_call(sd, video, s_stream, 1);
@@ -1000,7 +940,6 @@ static int soc_camera_streamoff(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int ret;
WARN_ON(priv != file->private_data);
@@ -1012,13 +951,10 @@ static int soc_camera_streamoff(struct file *file, void *priv,
return -EBUSY;
/*
- * This calls buf_release from host driver's videobuf_queue_ops for all
+ * This calls buf_release from host driver's videobuf2_queue_ops for all
* remaining buffers. When the last buffer is freed, stop capture
*/
- if (ici->ops->init_videobuf)
- ret = videobuf_streamoff(&icd->vb_vidq);
- else
- ret = vb2_streamoff(&icd->vb2_vidq, i);
+ ret = vb2_streamoff(&icd->vb2_vidq, i);
v4l2_subdev_call(sd, video, s_stream, 0);
@@ -1053,7 +989,7 @@ static int soc_camera_s_selection(struct file *file, void *fh,
if (s->target == V4L2_SEL_TGT_COMPOSE) {
/* No output size change during a running capture! */
- if (is_streaming(ici, icd) &&
+ if (vb2_is_streaming(&icd->vb2_vidq) &&
(icd->user_width != s->r.width ||
icd->user_height != s->r.height))
return -EBUSY;
@@ -1066,7 +1002,8 @@ static int soc_camera_s_selection(struct file *file, void *fh,
return -EBUSY;
}
- if (s->target == V4L2_SEL_TGT_CROP && is_streaming(ici, icd) &&
+ if (s->target == V4L2_SEL_TGT_CROP &&
+ vb2_is_streaming(&icd->vb2_vidq) &&
ici->ops->set_liveselection)
ret = ici->ops->set_liveselection(icd, s);
else
@@ -1910,9 +1847,7 @@ int soc_camera_host_register(struct soc_camera_host *ici)
!ici->ops->set_fmt ||
!ici->ops->set_bus_param ||
!ici->ops->querycap ||
- ((!ici->ops->init_videobuf ||
- !ici->ops->reqbufs) &&
- !ici->ops->init_videobuf2) ||
+ !ici->ops->init_videobuf2 ||
!ici->ops->poll ||
!ici->v4l2_dev.dev)
return -EINVAL;
diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
index f77252d6ccd3..0116097c0c0f 100644
--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
+++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
@@ -62,7 +62,8 @@ int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect)
EXPORT_SYMBOL(soc_camera_client_g_rect);
/* Client crop has changed, update our sub-rectangle to remain within the area */
-static void update_subrect(struct v4l2_rect *rect, struct v4l2_rect *subrect)
+static void move_and_crop_subrect(struct v4l2_rect *rect,
+ struct v4l2_rect *subrect)
{
if (rect->width < subrect->width)
subrect->width = rect->width;
@@ -72,14 +73,14 @@ static void update_subrect(struct v4l2_rect *rect, struct v4l2_rect *subrect)
if (rect->left > subrect->left)
subrect->left = rect->left;
- else if (rect->left + rect->width >
+ else if (rect->left + rect->width <
subrect->left + subrect->width)
subrect->left = rect->left + rect->width -
subrect->width;
if (rect->top > subrect->top)
subrect->top = rect->top;
- else if (rect->top + rect->height >
+ else if (rect->top + rect->height <
subrect->top + subrect->height)
subrect->top = rect->top + rect->height -
subrect->height;
@@ -216,7 +217,7 @@ int soc_camera_client_s_selection(struct v4l2_subdev *sd,
if (!ret) {
*target_rect = *cam_rect;
- update_subrect(target_rect, subrect);
+ move_and_crop_subrect(target_rect, subrect);
}
return ret;
@@ -299,7 +300,7 @@ update_cache:
if (host_1to1)
*subrect = *rect;
else
- update_subrect(rect, subrect);
+ move_and_crop_subrect(rect, subrect);
return 0;
}
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 7652ce2ec1dc..59280ac31937 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -865,9 +865,8 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
/* Setup timer interrupt */
- init_timer(&fei->timer);
- fei->timer.function = c8sectpfe_timer_interrupt;
- fei->timer.data = (unsigned long)fei;
+ setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
+ (unsigned long)fei);
mutex_init(&fei->lock);
diff --git a/drivers/staging/media/st-cec/Makefile b/drivers/media/platform/sti/cec/Makefile
index f07905e1448a..f07905e1448a 100644
--- a/drivers/staging/media/st-cec/Makefile
+++ b/drivers/media/platform/sti/cec/Makefile
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
new file mode 100644
index 000000000000..39ff55145a82
--- /dev/null
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -0,0 +1,404 @@
+/*
+ * STIH4xx CEC driver
+ * Copyright (C) STMicroelectronic SA 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#define CEC_NAME "stih-cec"
+
+/* CEC registers */
+#define CEC_CLK_DIV 0x0
+#define CEC_CTRL 0x4
+#define CEC_IRQ_CTRL 0x8
+#define CEC_STATUS 0xC
+#define CEC_EXT_STATUS 0x10
+#define CEC_TX_CTRL 0x14
+#define CEC_FREE_TIME_THRESH 0x18
+#define CEC_BIT_TOUT_THRESH 0x1C
+#define CEC_BIT_PULSE_THRESH 0x20
+#define CEC_DATA 0x24
+#define CEC_TX_ARRAY_CTRL 0x28
+#define CEC_CTRL2 0x2C
+#define CEC_TX_ERROR_STS 0x30
+#define CEC_ADDR_TABLE 0x34
+#define CEC_DATA_ARRAY_CTRL 0x38
+#define CEC_DATA_ARRAY_STATUS 0x3C
+#define CEC_TX_DATA_BASE 0x40
+#define CEC_TX_DATA_TOP 0x50
+#define CEC_TX_DATA_SIZE 0x1
+#define CEC_RX_DATA_BASE 0x54
+#define CEC_RX_DATA_TOP 0x64
+#define CEC_RX_DATA_SIZE 0x1
+
+/* CEC_CTRL2 */
+#define CEC_LINE_INACTIVE_EN BIT(0)
+#define CEC_AUTO_BUS_ERR_EN BIT(1)
+#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
+#define CEC_TX_REQ_WAIT_EN BIT(3)
+
+/* CEC_DATA_ARRAY_CTRL */
+#define CEC_TX_ARRAY_EN BIT(0)
+#define CEC_RX_ARRAY_EN BIT(1)
+#define CEC_TX_ARRAY_RESET BIT(2)
+#define CEC_RX_ARRAY_RESET BIT(3)
+#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
+#define CEC_TX_STOP_ON_NACK BIT(7)
+
+/* CEC_TX_ARRAY_CTRL */
+#define CEC_TX_N_OF_BYTES 0x1F
+#define CEC_TX_START BIT(5)
+#define CEC_TX_AUTO_SOM_EN BIT(6)
+#define CEC_TX_AUTO_EOM_EN BIT(7)
+
+/* CEC_IRQ_CTRL */
+#define CEC_TX_DONE_IRQ_EN BIT(0)
+#define CEC_ERROR_IRQ_EN BIT(2)
+#define CEC_RX_DONE_IRQ_EN BIT(3)
+#define CEC_RX_SOM_IRQ_EN BIT(4)
+#define CEC_RX_EOM_IRQ_EN BIT(5)
+#define CEC_FREE_TIME_IRQ_EN BIT(6)
+#define CEC_PIN_STS_IRQ_EN BIT(7)
+
+/* CEC_CTRL */
+#define CEC_IN_FILTER_EN BIT(0)
+#define CEC_PWR_SAVE_EN BIT(1)
+#define CEC_EN BIT(4)
+#define CEC_ACK_CTRL BIT(5)
+#define CEC_RX_RESET_EN BIT(6)
+#define CEC_IGNORE_RX_ERROR BIT(7)
+
+/* CEC_STATUS */
+#define CEC_TX_DONE_STS BIT(0)
+#define CEC_TX_ACK_GET_STS BIT(1)
+#define CEC_ERROR_STS BIT(2)
+#define CEC_RX_DONE_STS BIT(3)
+#define CEC_RX_SOM_STS BIT(4)
+#define CEC_RX_EOM_STS BIT(5)
+#define CEC_FREE_TIME_IRQ_STS BIT(6)
+#define CEC_PIN_STS BIT(7)
+#define CEC_SBIT_TOUT_STS BIT(8)
+#define CEC_DBIT_TOUT_STS BIT(9)
+#define CEC_LPULSE_ERROR_STS BIT(10)
+#define CEC_HPULSE_ERROR_STS BIT(11)
+#define CEC_TX_ERROR BIT(12)
+#define CEC_TX_ARB_ERROR BIT(13)
+#define CEC_RX_ERROR_MIN BIT(14)
+#define CEC_RX_ERROR_MAX BIT(15)
+
+/* Signal free time in bit periods (2.4ms) */
+#define CEC_PRESENT_INIT_SFT 7
+#define CEC_NEW_INIT_SFT 5
+#define CEC_RETRANSMIT_SFT 3
+
+/* Constants for CEC_BIT_TOUT_THRESH register */
+#define CEC_SBIT_TOUT_47MS BIT(1)
+#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
+#define CEC_SBIT_TOUT_50MS BIT(2)
+#define CEC_DBIT_TOUT_27MS BIT(0)
+#define CEC_DBIT_TOUT_28MS BIT(1)
+#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
+
+/* Constants for CEC_BIT_PULSE_THRESH register */
+#define CEC_BIT_LPULSE_03MS BIT(1)
+#define CEC_BIT_HPULSE_03MS BIT(3)
+
+/* Constants for CEC_DATA_ARRAY_STATUS register */
+#define CEC_RX_N_OF_BYTES 0x1F
+#define CEC_TX_N_OF_BYTES_SENT BIT(5)
+#define CEC_RX_OVERRUN BIT(6)
+
+struct stih_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+ struct cec_notifier *notifier;
+};
+
+static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+
+ if (enable) {
+ /* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
+ unsigned long clk_freq = clk_get_rate(cec->clk);
+ u32 cec_clk_div = clk_freq / 10000;
+
+ writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
+
+ /* Configuration of the durations activating a timeout */
+ writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
+ cec->regs + CEC_BIT_TOUT_THRESH);
+
+ /* Configuration of the smallest allowed duration for pulses */
+ writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
+ cec->regs + CEC_BIT_PULSE_THRESH);
+
+ /* Minimum received bit period threshold */
+ writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
+
+ /* Configuration of transceiver data arrays */
+ writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
+ cec->regs + CEC_DATA_ARRAY_CTRL);
+
+ /* Configuration of the control bits for CEC Transceiver */
+ writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
+ cec->regs + CEC_CTRL);
+
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Enable the interrupts */
+ writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
+ CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
+ CEC_ERROR_IRQ_EN,
+ cec->regs + CEC_IRQ_CTRL);
+
+ } else {
+ /* Clear logical addresses */
+ writel(0, cec->regs + CEC_ADDR_TABLE);
+
+ /* Clear the status register */
+ writel(0x0, cec->regs + CEC_STATUS);
+
+ /* Disable the interrupts */
+ writel(0, cec->regs + CEC_IRQ_CTRL);
+ }
+
+ return 0;
+}
+
+static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+ u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
+
+ reg |= 1 << logical_addr;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ reg = 0;
+
+ writel(reg, cec->regs + CEC_ADDR_TABLE);
+
+ return 0;
+}
+
+static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct stih_cec *cec = cec_get_drvdata(adap);
+ int i;
+
+ /* Copy message into registers */
+ for (i = 0; i < msg->len; i++)
+ writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
+
+ /* Start transmission, configure hardware to add start and stop bits
+ * Signal free time is handled by the hardware
+ */
+ writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
+ msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
+
+ return 0;
+}
+
+static void stih_tx_done(struct stih_cec *cec, u32 status)
+{
+ if (status & CEC_TX_ERROR) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, 0, 0, 0, 1);
+ return;
+ }
+
+ if (status & CEC_TX_ARB_ERROR) {
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+ return;
+ }
+
+ if (!(status & CEC_TX_ACK_GET_STS)) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, 0, 1, 0, 0);
+ return;
+ }
+
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+}
+
+static void stih_rx_done(struct stih_cec *cec, u32 status)
+{
+ struct cec_msg msg = {};
+ u8 i;
+
+ if (status & CEC_RX_ERROR_MIN)
+ return;
+
+ if (status & CEC_RX_ERROR_MAX)
+ return;
+
+ msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
+
+ if (!msg.len)
+ return;
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
+
+ cec_received_msg(cec->adap, &msg);
+}
+
+static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ if (cec->irq_status & CEC_TX_DONE_STS)
+ stih_tx_done(cec, cec->irq_status);
+
+ if (cec->irq_status & CEC_RX_DONE_STS)
+ stih_rx_done(cec, cec->irq_status);
+
+ cec->irq_status = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
+{
+ struct stih_cec *cec = priv;
+
+ cec->irq_status = readl(cec->regs + CEC_STATUS);
+ writel(cec->irq_status, cec->regs + CEC_STATUS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct cec_adap_ops sti_cec_adap_ops = {
+ .adap_enable = stih_cec_adap_enable,
+ .adap_log_addr = stih_cec_adap_log_addr,
+ .adap_transmit = stih_cec_adap_transmit,
+};
+
+static int stih_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct stih_cec *cec;
+ struct device_node *np;
+ struct platform_device *hdmi_dev;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+
+ hdmi_dev = of_find_device_by_node(np);
+ if (!hdmi_dev)
+ return -EPROBE_DEFER;
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->regs))
+ return PTR_ERR(cec->regs);
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
+ stih_cec_irq_handler_thread, 0,
+ pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "cec-clk");
+ if (IS_ERR(cec->clk)) {
+ dev_err(dev, "Cannot get cec clock\n");
+ return PTR_ERR(cec->clk);
+ }
+
+ cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
+ CEC_NAME,
+ CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
+ CEC_CAP_TRANSMIT, 1);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int stih_cec_remove(struct platform_device *pdev)
+{
+ struct stih_cec *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+static const struct of_device_id stih_cec_match[] = {
+ {
+ .compatible = "st,stih-cec",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stih_cec_match);
+
+static struct platform_driver stih_cec_pdrv = {
+ .probe = stih_cec_probe,
+ .remove = stih_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = stih_cec_match,
+ },
+};
+
+module_platform_driver(stih_cec_pdrv);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("STIH4xx CEC driver");
diff --git a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
index e79bdc611432..84ea43c0eb46 100644
--- a/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
+++ b/drivers/media/platform/sti/delta/delta-mjpeg-dec.c
@@ -375,7 +375,7 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
int ret;
struct delta_au au = *pau;
- unsigned int data_offset;
+ unsigned int data_offset = 0;
struct mjpeg_header *header = &ctx->header_struct;
if (!ctx->header) {
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 23472e3784ff..e2cf2b90e500 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -801,17 +801,17 @@ static void dump_dtd(struct vpdma_dtd *dtd)
* flags: VPDMA flags to configure some descriptor fileds
*/
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
int max_w, int max_h, enum vpdma_channel chan, u32 flags)
{
- vpdma_rawchan_add_out_dtd(list, width, c_rect, fmt, dma_addr,
+ vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
max_w, max_h, chan_info[chan].num, flags);
}
EXPORT_SYMBOL(vpdma_add_out_dtd);
void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
int max_w, int max_h, int raw_vpdma_chan, u32 flags)
{
@@ -821,7 +821,6 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
int channel, next_chan;
struct v4l2_rect rect = *c_rect;
int depth = fmt->depth;
- int stride;
struct vpdma_dtd *dtd;
channel = next_chan = raw_vpdma_chan;
@@ -833,8 +832,6 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
depth = 8;
}
- stride = ALIGN((depth * width) >> 3, VPDMA_STRIDE_ALIGN);
-
dma_addr += rect.top * stride + (rect.left * depth >> 3);
dtd = list->next;
@@ -882,7 +879,7 @@ EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
* contribute to the client)
*/
void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, int field, u32 flags, int frame_width,
int frame_height, int start_h, int start_v)
@@ -892,7 +889,6 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
int depth = fmt->depth;
int channel, next_chan;
struct v4l2_rect rect = *c_rect;
- int stride;
struct vpdma_dtd *dtd;
channel = next_chan = chan_info[chan].num;
@@ -904,8 +900,6 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
depth = 8;
}
- stride = ALIGN((depth * width) >> 3, VPDMA_STRIDE_ALIGN);
-
dma_addr += rect.top * stride + (rect.left * depth >> 3);
dtd = list->next;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 131700c112b2..7e611501c291 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -242,16 +242,16 @@ void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
int chan_num);
void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
int max_w, int max_h, enum vpdma_channel chan, u32 flags);
void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
int max_w, int max_h, int raw_vpdma_chan, u32 flags);
void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
- const struct v4l2_rect *c_rect,
+ int stride, const struct v4l2_rect *c_rect,
const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
enum vpdma_channel chan, int field, u32 flags, int frame_width,
int frame_height, int start_h, int start_v);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index f0156b7759e9..c47151495b6f 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -1085,7 +1085,8 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
MAX_W, MAX_H);
- vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
+ vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
+ q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1169,7 +1170,8 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
+ vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
+ q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
frame_height, 0, 0);
}
@@ -1595,6 +1597,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
struct v4l2_plane_pix_format *plane_fmt;
unsigned int w_align;
int i, depth, depth_bytes, height;
+ unsigned int stride = 0;
if (!fmt || !(fmt->types & type)) {
vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1681,16 +1684,27 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
plane_fmt = &pix->plane_fmt[i];
depth = fmt->vpdma_fmt[i]->depth;
- if (i == VPE_LUMA)
- plane_fmt->bytesperline = (pix->width * depth) >> 3;
- else
- plane_fmt->bytesperline = pix->width;
+ stride = (pix->width * fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
+ if (stride > plane_fmt->bytesperline)
+ plane_fmt->bytesperline = stride;
+
+ plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
+ VPDMA_STRIDE_ALIGN);
- if (pix->num_planes == 1 && fmt->coplanar)
- depth += fmt->vpdma_fmt[VPE_CHROMA]->depth;
- plane_fmt->sizeimage =
- (pix->height * pix->width * depth) >> 3;
+ if (i == VPE_LUMA) {
+ plane_fmt->sizeimage = pix->height *
+ plane_fmt->bytesperline;
+ if (pix->num_planes == 1 && fmt->coplanar)
+ plane_fmt->sizeimage += pix->height *
+ plane_fmt->bytesperline *
+ fmt->vpdma_fmt[VPE_CHROMA]->depth >> 3;
+
+ } else { /* i == VIP_CHROMA */
+ plane_fmt->sizeimage = (pix->height *
+ plane_fmt->bytesperline *
+ depth) >> 3;
+ }
memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
}
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/platform/vimc/Kconfig
new file mode 100644
index 000000000000..a18f6352c422
--- /dev/null
+++ b/drivers/media/platform/vimc/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_VIMC
+ tristate "Virtual Media Controller Driver (VIMC)"
+ depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_VMALLOC
+ default n
+ ---help---
+ Skeleton driver for Virtual Media Controller
+
+ This driver can be compared to the vivid driver for emulating
+ a media node that exposes a complex media topology. The topology
+ is hard coded for now but is meant to be highly configurable in
+ the future.
+
+ When in doubt, say N.
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
new file mode 100644
index 000000000000..c45195e5e05c
--- /dev/null
+++ b/drivers/media/platform/vimc/Makefile
@@ -0,0 +1,3 @@
+vimc-objs := vimc-core.o vimc-capture.o vimc-sensor.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
new file mode 100644
index 000000000000..9adb06d7e13d
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -0,0 +1,498 @@
+/*
+ * vimc-capture.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vimc-capture.h"
+
+struct vimc_cap_device {
+ struct vimc_ent_device ved;
+ struct video_device vdev;
+ struct v4l2_pix_format format;
+ struct vb2_queue queue;
+ struct list_head buf_list;
+ /*
+ * NOTE: in a real driver, a spin lock must be used to access the
+ * queue because the frames are generated from a hardware interruption
+ * and the isr is not allowed to sleep.
+ * Even if it is not necessary a spinlock in the vimc driver, we
+ * use it here as a code reference
+ */
+ spinlock_t qlock;
+ struct mutex lock;
+ u32 sequence;
+ struct media_pipeline pipe;
+};
+
+struct vimc_cap_buffer {
+ /*
+ * struct vb2_v4l2_buffer must be the first element
+ * the videobuf2 framework will allocate this struct based on
+ * buf_struct_size and use the first sizeof(struct vb2_buffer) bytes of
+ * memory as a vb2_buffer
+ */
+ struct vb2_v4l2_buffer vb2;
+ struct list_head list;
+};
+
+static int vimc_cap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", vcap->vdev.v4l2_dev->name);
+
+ return 0;
+}
+
+static int vimc_cap_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ f->fmt.pix = vcap->format;
+
+ return 0;
+}
+
+static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ /* We only support one format for now */
+ f->pixelformat = vcap->format.pixelformat;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vimc_cap_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops vimc_cap_ioctl_ops = {
+ .vidioc_querycap = vimc_cap_querycap,
+
+ .vidioc_g_fmt_vid_cap = vimc_cap_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vimc_cap_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vimc_cap_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = vimc_cap_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static void vimc_cap_return_all_buffers(struct vimc_cap_device *vcap,
+ enum vb2_buffer_state state)
+{
+ struct vimc_cap_buffer *vbuf, *node;
+
+ spin_lock(&vcap->qlock);
+
+ list_for_each_entry_safe(vbuf, node, &vcap->buf_list, list) {
+ list_del(&vbuf->list);
+ vb2_buffer_done(&vbuf->vb2.vb2_buf, state);
+ }
+
+ spin_unlock(&vcap->qlock);
+}
+
+static int vimc_cap_pipeline_s_stream(struct vimc_cap_device *vcap, int enable)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
+
+ /* Start the stream in the subdevice direct connected */
+ pad = media_entity_remote_pad(&vcap->vdev.entity.pads[0]);
+
+ /*
+ * if it is a raw node from vimc-core, there is nothing to activate
+ * TODO: remove this when there are no more raw nodes in the
+ * core and return error instead
+ */
+ if (pad->entity->obj_type == MEDIA_ENTITY_TYPE_BASE)
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, enable);
+ if (ret && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+ struct media_entity *entity = &vcap->vdev.entity;
+ int ret;
+
+ vcap->sequence = 0;
+
+ /* Start the media pipeline */
+ ret = media_pipeline_start(entity, &vcap->pipe);
+ if (ret) {
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ /* Enable streaming from the pipe */
+ ret = vimc_cap_pipeline_s_stream(vcap, 1);
+ if (ret) {
+ media_pipeline_stop(entity);
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Stop the stream engine. Any remaining buffers in the stream queue are
+ * dequeued and passed on to the vb2 framework marked as STATE_ERROR.
+ */
+static void vimc_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+ /* Disable streaming from the pipe */
+ vimc_cap_pipeline_s_stream(vcap, 0);
+
+ /* Stop the media pipeline */
+ media_pipeline_stop(&vcap->vdev.entity);
+
+ /* Release all active buffers */
+ vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_ERROR);
+}
+
+static void vimc_cap_buf_queue(struct vb2_buffer *vb2_buf)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vb2_buf->vb2_queue);
+ struct vimc_cap_buffer *buf = container_of(vb2_buf,
+ struct vimc_cap_buffer,
+ vb2.vb2_buf);
+
+ spin_lock(&vcap->qlock);
+ list_add_tail(&buf->list, &vcap->buf_list);
+ spin_unlock(&vcap->qlock);
+}
+
+static int vimc_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
+
+ if (*nplanes)
+ return sizes[0] < vcap->format.sizeimage ? -EINVAL : 0;
+ /* We don't support multiplanes for now */
+ *nplanes = 1;
+ sizes[0] = vcap->format.sizeimage;
+
+ return 0;
+}
+
+static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vimc_cap_device *vcap = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vcap->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(vcap->vdev.v4l2_dev->dev,
+ "%s: buffer too small (%lu < %lu)\n",
+ vcap->vdev.name, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct vb2_ops vimc_cap_qops = {
+ .start_streaming = vimc_cap_start_streaming,
+ .stop_streaming = vimc_cap_stop_streaming,
+ .buf_queue = vimc_cap_buf_queue,
+ .queue_setup = vimc_cap_queue_setup,
+ .buf_prepare = vimc_cap_buffer_prepare,
+ /*
+ * Since q->lock is set we can use the standard
+ * vb2_ops_wait_prepare/finish helper functions.
+ */
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * NOTE: this function is a copy of v4l2_subdev_link_validate_get_format
+ * maybe the v4l2 function should be public
+ */
+static int vimc_cap_v4l2_subdev_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+}
+
+static int vimc_cap_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev_format source_fmt;
+ const struct vimc_pix_map *vpix;
+ struct vimc_cap_device *vcap = container_of(link->sink->entity,
+ struct vimc_cap_device,
+ vdev.entity);
+ struct v4l2_pix_format *sink_fmt = &vcap->format;
+ int ret;
+
+ /*
+ * if it is a raw node from vimc-core, ignore the link for now
+ * TODO: remove this when there are no more raw nodes in the
+ * core and return error instead
+ */
+ if (link->source->entity->obj_type == MEDIA_ENTITY_TYPE_BASE)
+ return 0;
+
+ /* Get the the format of the subdev */
+ ret = vimc_cap_v4l2_subdev_link_validate_get_format(link->source,
+ &source_fmt);
+ if (ret)
+ return ret;
+
+ dev_dbg(vcap->vdev.v4l2_dev->dev,
+ "%s: link validate formats src:%dx%d %d sink:%dx%d %d\n",
+ vcap->vdev.name,
+ source_fmt.format.width, source_fmt.format.height,
+ source_fmt.format.code,
+ sink_fmt->width, sink_fmt->height,
+ sink_fmt->pixelformat);
+
+ /* The width, height and code must match. */
+ vpix = vimc_pix_map_by_pixelformat(sink_fmt->pixelformat);
+ if (source_fmt.format.width != sink_fmt->width
+ || source_fmt.format.height != sink_fmt->height
+ || vpix->code != source_fmt.format.code)
+ return -EPIPE;
+
+ /*
+ * The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt.format.field != sink_fmt->field &&
+ sink_fmt->field != V4L2_FIELD_NONE)
+ return -EPIPE;
+
+ return 0;
+}
+
+static const struct media_entity_operations vimc_cap_mops = {
+ .link_validate = vimc_cap_link_validate,
+};
+
+static void vimc_cap_destroy(struct vimc_ent_device *ved)
+{
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+
+ vb2_queue_release(&vcap->queue);
+ media_entity_cleanup(ved->ent);
+ video_unregister_device(&vcap->vdev);
+ vimc_pads_cleanup(vcap->ved.pads);
+ kfree(vcap);
+}
+
+static void vimc_cap_process_frame(struct vimc_ent_device *ved,
+ struct media_pad *sink, const void *frame)
+{
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+ struct vimc_cap_buffer *vimc_buf;
+ void *vbuf;
+
+ spin_lock(&vcap->qlock);
+
+ /* Get the first entry of the list */
+ vimc_buf = list_first_entry_or_null(&vcap->buf_list,
+ typeof(*vimc_buf), list);
+ if (!vimc_buf) {
+ spin_unlock(&vcap->qlock);
+ return;
+ }
+
+ /* Remove this entry from the list */
+ list_del(&vimc_buf->list);
+
+ spin_unlock(&vcap->qlock);
+
+ /* Fill the buffer */
+ vimc_buf->vb2.vb2_buf.timestamp = ktime_get_ns();
+ vimc_buf->vb2.sequence = vcap->sequence++;
+ vimc_buf->vb2.field = vcap->format.field;
+
+ vbuf = vb2_plane_vaddr(&vimc_buf->vb2.vb2_buf, 0);
+
+ memcpy(vbuf, frame, vcap->format.sizeimage);
+
+ /* Set it as ready */
+ vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
+ vcap->format.sizeimage);
+ vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u16 num_pads,
+ const unsigned long *pads_flag)
+{
+ const struct vimc_pix_map *vpix;
+ struct vimc_cap_device *vcap;
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int ret;
+
+ /*
+ * Check entity configuration params
+ * NOTE: we only support a single sink pad
+ */
+ if (!name || num_pads != 1 || !pads_flag ||
+ !(pads_flag[0] & MEDIA_PAD_FL_SINK))
+ return ERR_PTR(-EINVAL);
+
+ /* Allocate the vimc_cap_device struct */
+ vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
+ if (!vcap)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate the pads */
+ vcap->ved.pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(vcap->ved.pads)) {
+ ret = PTR_ERR(vcap->ved.pads);
+ goto err_free_vcap;
+ }
+
+ /* Initialize the media entity */
+ vcap->vdev.entity.name = name;
+ vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ ret = media_entity_pads_init(&vcap->vdev.entity,
+ num_pads, vcap->ved.pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Initialize the lock */
+ mutex_init(&vcap->lock);
+
+ /* Initialize the vb2 queue */
+ q = &vcap->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = vcap;
+ q->buf_struct_size = sizeof(struct vimc_cap_buffer);
+ q->ops = &vimc_cap_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &vcap->lock;
+
+ ret = vb2_queue_init(q);
+ if (ret) {
+ dev_err(vcap->vdev.v4l2_dev->dev,
+ "%s: vb2 queue init failed (err=%d)\n",
+ vcap->vdev.name, ret);
+ goto err_clean_m_ent;
+ }
+
+ /* Initialize buffer list and its lock */
+ INIT_LIST_HEAD(&vcap->buf_list);
+ spin_lock_init(&vcap->qlock);
+
+ /* Set the frame format (this is hardcoded for now) */
+ vcap->format.width = 640;
+ vcap->format.height = 480;
+ vcap->format.pixelformat = V4L2_PIX_FMT_RGB24;
+ vcap->format.field = V4L2_FIELD_NONE;
+ vcap->format.colorspace = V4L2_COLORSPACE_SRGB;
+
+ vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
+
+ vcap->format.bytesperline = vcap->format.width * vpix->bpp;
+ vcap->format.sizeimage = vcap->format.bytesperline *
+ vcap->format.height;
+
+ /* Fill the vimc_ent_device struct */
+ vcap->ved.destroy = vimc_cap_destroy;
+ vcap->ved.ent = &vcap->vdev.entity;
+ vcap->ved.process_frame = vimc_cap_process_frame;
+
+ /* Initialize the video_device struct */
+ vdev = &vcap->vdev;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->entity.ops = &vimc_cap_mops;
+ vdev->release = video_device_release_empty;
+ vdev->fops = &vimc_cap_fops;
+ vdev->ioctl_ops = &vimc_cap_ioctl_ops;
+ vdev->lock = &vcap->lock;
+ vdev->queue = q;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ strlcpy(vdev->name, name, sizeof(vdev->name));
+ video_set_drvdata(vdev, &vcap->ved);
+
+ /* Register the video_device with the v4l2 and the media framework */
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(vcap->vdev.v4l2_dev->dev,
+ "%s: video register failed (err=%d)\n",
+ vcap->vdev.name, ret);
+ goto err_release_queue;
+ }
+
+ return &vcap->ved;
+
+err_release_queue:
+ vb2_queue_release(q);
+err_clean_m_ent:
+ media_entity_cleanup(&vcap->vdev.entity);
+err_clean_pads:
+ vimc_pads_cleanup(vcap->ved.pads);
+err_free_vcap:
+ kfree(vcap);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/vimc/vimc-capture.h b/drivers/media/platform/vimc/vimc-capture.h
new file mode 100644
index 000000000000..581a813abdf1
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-capture.h
@@ -0,0 +1,28 @@
+/*
+ * vimc-capture.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIMC_CAPTURE_H_
+#define _VIMC_CAPTURE_H_
+
+#include "vimc-core.h"
+
+struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u16 num_pads,
+ const unsigned long *pads_flag);
+
+#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
new file mode 100644
index 000000000000..bc107da8fbd5
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -0,0 +1,695 @@
+/*
+ * vimc-core.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#include "vimc-capture.h"
+#include "vimc-core.h"
+#include "vimc-sensor.h"
+
+#define VIMC_PDEV_NAME "vimc"
+#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
+
+#define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \
+ .src_ent = src, \
+ .src_pad = srcpad, \
+ .sink_ent = sink, \
+ .sink_pad = sinkpad, \
+ .flags = link_flags, \
+}
+
+struct vimc_device {
+ /*
+ * The pipeline configuration
+ * (filled before calling vimc_device_register)
+ */
+ const struct vimc_pipeline_config *pipe_cfg;
+
+ /* The Associated media_device parent */
+ struct media_device mdev;
+
+ /* Internal v4l2 parent device*/
+ struct v4l2_device v4l2_dev;
+
+ /* Internal topology */
+ struct vimc_ent_device **ved;
+};
+
+/**
+ * enum vimc_ent_node - Select the functionality of a node in the topology
+ * @VIMC_ENT_NODE_SENSOR: A node of type SENSOR simulates a camera sensor
+ * generating internal images in bayer format and
+ * propagating those images through the pipeline
+ * @VIMC_ENT_NODE_CAPTURE: A node of type CAPTURE is a v4l2 video_device
+ * that exposes the received image from the
+ * pipeline to the user space
+ * @VIMC_ENT_NODE_INPUT: A node of type INPUT is a v4l2 video_device that
+ * receives images from the user space and
+ * propagates them through the pipeline
+ * @VIMC_ENT_NODE_DEBAYER: A node type DEBAYER expects to receive a frame
+ * in bayer format converts it to RGB
+ * @VIMC_ENT_NODE_SCALER: A node of type SCALER scales the received image
+ * by a given multiplier
+ *
+ * This enum is used in the entity configuration struct to allow the definition
+ * of a custom topology specifying the role of each node on it.
+ */
+enum vimc_ent_node {
+ VIMC_ENT_NODE_SENSOR,
+ VIMC_ENT_NODE_CAPTURE,
+ VIMC_ENT_NODE_INPUT,
+ VIMC_ENT_NODE_DEBAYER,
+ VIMC_ENT_NODE_SCALER,
+};
+
+/* Structure which describes individual configuration for each entity */
+struct vimc_ent_config {
+ const char *name;
+ size_t pads_qty;
+ const unsigned long *pads_flag;
+ enum vimc_ent_node node;
+};
+
+/* Structure which describes links between entities */
+struct vimc_ent_link {
+ unsigned int src_ent;
+ u16 src_pad;
+ unsigned int sink_ent;
+ u16 sink_pad;
+ u32 flags;
+};
+
+/* Structure which describes the whole topology */
+struct vimc_pipeline_config {
+ const struct vimc_ent_config *ents;
+ size_t num_ents;
+ const struct vimc_ent_link *links;
+ size_t num_links;
+};
+
+/* --------------------------------------------------------------------------
+ * Topology Configuration
+ */
+
+static const struct vimc_ent_config ent_config[] = {
+ {
+ .name = "Sensor A",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_SENSOR,
+ },
+ {
+ .name = "Sensor B",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_SENSOR,
+ },
+ {
+ .name = "Debayer A",
+ .pads_qty = 2,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_DEBAYER,
+ },
+ {
+ .name = "Debayer B",
+ .pads_qty = 2,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_DEBAYER,
+ },
+ {
+ .name = "Raw Capture 0",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
+ .node = VIMC_ENT_NODE_CAPTURE,
+ },
+ {
+ .name = "Raw Capture 1",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
+ .node = VIMC_ENT_NODE_CAPTURE,
+ },
+ {
+ .name = "RGB/YUV Input",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_INPUT,
+ },
+ {
+ .name = "Scaler",
+ .pads_qty = 2,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ .node = VIMC_ENT_NODE_SCALER,
+ },
+ {
+ .name = "RGB/YUV Capture",
+ .pads_qty = 1,
+ .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
+ .node = VIMC_ENT_NODE_CAPTURE,
+ },
+};
+
+static const struct vimc_ent_link ent_links[] = {
+ /* Link: Sensor A (Pad 0)->(Pad 0) Debayer A */
+ VIMC_ENT_LINK(0, 0, 2, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor A (Pad 0)->(Pad 0) Raw Capture 0 */
+ VIMC_ENT_LINK(0, 0, 4, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor B (Pad 0)->(Pad 0) Debayer B */
+ VIMC_ENT_LINK(1, 0, 3, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Sensor B (Pad 0)->(Pad 0) Raw Capture 1 */
+ VIMC_ENT_LINK(1, 0, 5, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+ /* Link: Debayer A (Pad 1)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(2, 1, 7, 0, MEDIA_LNK_FL_ENABLED),
+ /* Link: Debayer B (Pad 1)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(3, 1, 7, 0, 0),
+ /* Link: RGB/YUV Input (Pad 0)->(Pad 0) Scaler */
+ VIMC_ENT_LINK(6, 0, 7, 0, 0),
+ /* Link: Scaler (Pad 1)->(Pad 0) RGB/YUV Capture */
+ VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
+};
+
+static const struct vimc_pipeline_config pipe_cfg = {
+ .ents = ent_config,
+ .num_ents = ARRAY_SIZE(ent_config),
+ .links = ent_links,
+ .num_links = ARRAY_SIZE(ent_links)
+};
+
+/* -------------------------------------------------------------------------- */
+
+static const struct vimc_pix_map vimc_pix_map_list[] = {
+ /* TODO: add all missing formats */
+
+ /* RGB formats */
+ {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .bpp = 3,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .bpp = 3,
+ },
+ {
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .bpp = 4,
+ },
+
+ /* Bayer formats */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .bpp = 2,
+ },
+
+ /* 10bit raw bayer a-law compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
+ .bpp = 1,
+ },
+
+ /* 10bit raw bayer DPCM compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
+ .bpp = 1,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .bpp = 2,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .bpp = 2,
+ },
+};
+
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].code == code)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].pixelformat == pixelformat)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+
+int vimc_propagate_frame(struct media_pad *src, const void *frame)
+{
+ struct media_link *link;
+
+ if (!(src->flags & MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+
+ /* Send this frame to all sink pads that are direct linked */
+ list_for_each_entry(link, &src->entity->links, list) {
+ if (link->source == src &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ struct vimc_ent_device *ved = NULL;
+ struct media_entity *entity = link->sink->entity;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd =
+ container_of(entity, struct v4l2_subdev,
+ entity);
+ ved = v4l2_get_subdevdata(sd);
+ } else if (is_media_entity_v4l2_video_device(entity)) {
+ struct video_device *vdev =
+ container_of(entity,
+ struct video_device,
+ entity);
+ ved = video_get_drvdata(vdev);
+ }
+ if (ved && ved->process_frame)
+ ved->process_frame(ved, link->sink, frame);
+ }
+ }
+
+ return 0;
+}
+
+static void vimc_device_unregister(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ media_device_unregister(&vimc->mdev);
+ /* Cleanup (only initialized) entities */
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ if (vimc->ved[i] && vimc->ved[i]->destroy)
+ vimc->ved[i]->destroy(vimc->ved[i]);
+
+ vimc->ved[i] = NULL;
+ }
+ v4l2_device_unregister(&vimc->v4l2_dev);
+ media_device_cleanup(&vimc->mdev);
+}
+
+/* Helper function to allocate and initialize pads */
+struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+{
+ struct media_pad *pads;
+ unsigned int i;
+
+ /* Allocate memory for the pads */
+ pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the pads */
+ for (i = 0; i < num_pads; i++) {
+ pads[i].index = i;
+ pads[i].flags = pads_flag[i];
+ }
+
+ return pads;
+}
+
+/*
+ * TODO: remove this function when all the
+ * entities specific code are implemented
+ */
+static void vimc_raw_destroy(struct vimc_ent_device *ved)
+{
+ media_device_unregister_entity(ved->ent);
+
+ media_entity_cleanup(ved->ent);
+
+ vimc_pads_cleanup(ved->pads);
+
+ kfree(ved->ent);
+
+ kfree(ved);
+}
+
+/*
+ * TODO: remove this function when all the
+ * entities specific code are implemented
+ */
+static struct vimc_ent_device *vimc_raw_create(struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u16 num_pads,
+ const unsigned long *pads_flag)
+{
+ struct vimc_ent_device *ved;
+ int ret;
+
+ /* Allocate the main ved struct */
+ ved = kzalloc(sizeof(*ved), GFP_KERNEL);
+ if (!ved)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate the media entity */
+ ved->ent = kzalloc(sizeof(*ved->ent), GFP_KERNEL);
+ if (!ved->ent) {
+ ret = -ENOMEM;
+ goto err_free_ved;
+ }
+
+ /* Allocate the pads */
+ ved->pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(ved->pads)) {
+ ret = PTR_ERR(ved->pads);
+ goto err_free_ent;
+ }
+
+ /* Initialize the media entity */
+ ved->ent->name = name;
+ ved->ent->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ ret = media_entity_pads_init(ved->ent, num_pads, ved->pads);
+ if (ret)
+ goto err_cleanup_pads;
+
+ /* Register the media entity */
+ ret = media_device_register_entity(v4l2_dev->mdev, ved->ent);
+ if (ret)
+ goto err_cleanup_entity;
+
+ /* Fill out the destroy function and return */
+ ved->destroy = vimc_raw_destroy;
+ return ved;
+
+err_cleanup_entity:
+ media_entity_cleanup(ved->ent);
+err_cleanup_pads:
+ vimc_pads_cleanup(ved->pads);
+err_free_ent:
+ kfree(ved->ent);
+err_free_ved:
+ kfree(ved);
+
+ return ERR_PTR(ret);
+}
+
+static int vimc_device_register(struct vimc_device *vimc)
+{
+ unsigned int i;
+ int ret;
+
+ /* Allocate memory for the vimc_ent_devices pointers */
+ vimc->ved = devm_kcalloc(vimc->mdev.dev, vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->ved), GFP_KERNEL);
+ if (!vimc->ved)
+ return -ENOMEM;
+
+ /* Link the media device within the v4l2_device */
+ vimc->v4l2_dev.mdev = &vimc->mdev;
+
+ /* Register the v4l2 struct */
+ ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "v4l2 device register failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* Initialize entities */
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ struct vimc_ent_device *(*create_func)(struct v4l2_device *,
+ const char *const,
+ u16,
+ const unsigned long *);
+
+ /* Register the specific node */
+ switch (vimc->pipe_cfg->ents[i].node) {
+ case VIMC_ENT_NODE_SENSOR:
+ create_func = vimc_sen_create;
+ break;
+
+ case VIMC_ENT_NODE_CAPTURE:
+ create_func = vimc_cap_create;
+ break;
+
+ /* TODO: Instantiate the specific topology node */
+ case VIMC_ENT_NODE_INPUT:
+ case VIMC_ENT_NODE_DEBAYER:
+ case VIMC_ENT_NODE_SCALER:
+ default:
+ /*
+ * TODO: remove this when all the entities specific
+ * code are implemented
+ */
+ create_func = vimc_raw_create;
+ break;
+ }
+
+ vimc->ved[i] = create_func(&vimc->v4l2_dev,
+ vimc->pipe_cfg->ents[i].name,
+ vimc->pipe_cfg->ents[i].pads_qty,
+ vimc->pipe_cfg->ents[i].pads_flag);
+ if (IS_ERR(vimc->ved[i])) {
+ ret = PTR_ERR(vimc->ved[i]);
+ vimc->ved[i] = NULL;
+ goto err;
+ }
+ }
+
+ /* Initialize the links between entities */
+ for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
+ const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
+
+ ret = media_create_pad_link(vimc->ved[link->src_ent]->ent,
+ link->src_pad,
+ vimc->ved[link->sink_ent]->ent,
+ link->sink_pad,
+ link->flags);
+ if (ret)
+ goto err;
+ }
+
+ /* Register the media device */
+ ret = media_device_register(&vimc->mdev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ /* Expose all subdev's nodes*/
+ ret = v4l2_device_register_subdev_nodes(&vimc->v4l2_dev);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "vimc subdev nodes registration failed (err=%d)\n",
+ ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /* Destroy the so far created topology */
+ vimc_device_unregister(vimc);
+
+ return ret;
+}
+
+static int vimc_probe(struct platform_device *pdev)
+{
+ struct vimc_device *vimc;
+ int ret;
+
+ /* Prepare the vimc topology structure */
+
+ /* Allocate memory for the vimc structure */
+ vimc = kzalloc(sizeof(*vimc), GFP_KERNEL);
+ if (!vimc)
+ return -ENOMEM;
+
+ /* Set the pipeline configuration struct */
+ vimc->pipe_cfg = &pipe_cfg;
+
+ /* Initialize media device */
+ strlcpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
+ sizeof(vimc->mdev.model));
+ vimc->mdev.dev = &pdev->dev;
+ media_device_init(&vimc->mdev);
+
+ /* Create vimc topology */
+ ret = vimc_device_register(vimc);
+ if (ret) {
+ dev_err(vimc->mdev.dev,
+ "vimc device registration failed (err=%d)\n", ret);
+ kfree(vimc);
+ return ret;
+ }
+
+ /* Link the topology object with the platform device object */
+ platform_set_drvdata(pdev, vimc);
+
+ return 0;
+}
+
+static int vimc_remove(struct platform_device *pdev)
+{
+ struct vimc_device *vimc = platform_get_drvdata(pdev);
+
+ /* Destroy all the topology */
+ vimc_device_unregister(vimc);
+ kfree(vimc);
+
+ return 0;
+}
+
+static void vimc_dev_release(struct device *dev)
+{
+}
+
+static struct platform_device vimc_pdev = {
+ .name = VIMC_PDEV_NAME,
+ .dev.release = vimc_dev_release,
+};
+
+static struct platform_driver vimc_pdrv = {
+ .probe = vimc_probe,
+ .remove = vimc_remove,
+ .driver = {
+ .name = VIMC_PDEV_NAME,
+ },
+};
+
+static int __init vimc_init(void)
+{
+ int ret;
+
+ ret = platform_device_register(&vimc_pdev);
+ if (ret) {
+ dev_err(&vimc_pdev.dev,
+ "platform device registration failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&vimc_pdrv);
+ if (ret) {
+ dev_err(&vimc_pdev.dev,
+ "platform driver registration failed (err=%d)\n", ret);
+
+ platform_device_unregister(&vimc_pdev);
+ }
+
+ return ret;
+}
+
+static void __exit vimc_exit(void)
+{
+ platform_driver_unregister(&vimc_pdrv);
+
+ platform_device_unregister(&vimc_pdev);
+}
+
+module_init(vimc_init);
+module_exit(vimc_exit);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC)");
+MODULE_AUTHOR("Helen Fornazier <helen.fornazier@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-core.h b/drivers/media/platform/vimc/vimc-core.h
new file mode 100644
index 000000000000..4525d23211ca
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-core.h
@@ -0,0 +1,112 @@
+/*
+ * vimc-core.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIMC_CORE_H_
+#define _VIMC_CORE_H_
+
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+
+/**
+ * struct vimc_pix_map - maps media bus code with v4l2 pixel format
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ * @bbp: number of bytes each pixel occupies
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ *
+ * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
+ * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
+ */
+struct vimc_pix_map {
+ unsigned int code;
+ unsigned int bpp;
+ u32 pixelformat;
+};
+
+/**
+ * struct vimc_ent_device - core struct that represents a node in the topology
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @pads: the list of pads of the node
+ * @destroy: callback to destroy the node
+ * @process_frame: callback send a frame to that node
+ *
+ * Each node of the topology must create a vimc_ent_device struct. Depending on
+ * the node it will be of an instance of v4l2_subdev or video_device struct
+ * where both contains a struct media_entity.
+ * Those structures should embedded the vimc_ent_device struct through
+ * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
+ * vimc_ent_device struct to be retrieved from the corresponding struct
+ * media_entity
+ */
+struct vimc_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+ void (*destroy)(struct vimc_ent_device *);
+ void (*process_frame)(struct vimc_ent_device *ved,
+ struct media_pad *sink, const void *frame);
+};
+
+/**
+ * vimc_propagate_frame - propagate a frame through the topology
+ *
+ * @src: the source pad where the frame is being originated
+ * @frame: the frame to be propagated
+ *
+ * This function will call the process_frame callback from the vimc_ent_device
+ * struct of the nodes directly connected to the @src pad
+ */
+int vimc_propagate_frame(struct media_pad *src, const void *frame);
+
+/**
+ * vimc_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+ * @pads_flags: flags to use in each pad
+ *
+ * Helper functions to allocate/initialize pads
+ */
+struct media_pad *vimc_pads_init(u16 num_pads,
+ const unsigned long *pads_flag);
+
+/**
+ * vimc_pads_cleanup - free pads
+ *
+ * @pads: pointer to the pads
+ *
+ * Helper function to free the pads initialized with vimc_pads_init
+ */
+static inline void vimc_pads_cleanup(struct media_pad *pads)
+{
+ kfree(pads);
+}
+
+/**
+ * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
+
+/**
+ * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
+ *
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
+
+#endif
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
new file mode 100644
index 000000000000..591f6a4f8bd3
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -0,0 +1,276 @@
+/*
+ * vimc-sensor.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-sensor.h"
+
+struct vimc_sen_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct task_struct *kthread_sen;
+ u8 *frame;
+ /* The active format */
+ struct v4l2_mbus_framefmt mbus_format;
+ int frame_size;
+};
+
+static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ /* TODO: Add support for other codes */
+ if (code->index)
+ return -EINVAL;
+
+ code->code = vsen->mbus_format.code;
+
+ return 0;
+}
+
+static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ /* TODO: Add support to other formats */
+ if (fse->index)
+ return -EINVAL;
+
+ /* TODO: Add support for other codes */
+ if (fse->code != vsen->mbus_format.code)
+ return -EINVAL;
+
+ fse->min_width = vsen->mbus_format.width;
+ fse->max_width = vsen->mbus_format.width;
+ fse->min_height = vsen->mbus_format.height;
+ fse->max_height = vsen->mbus_format.height;
+
+ return 0;
+}
+
+static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+
+ format->format = vsen->mbus_format;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
+ .enum_mbus_code = vimc_sen_enum_mbus_code,
+ .enum_frame_size = vimc_sen_enum_frame_size,
+ .get_fmt = vimc_sen_get_fmt,
+ /* TODO: Add support to other formats */
+ .set_fmt = vimc_sen_get_fmt,
+};
+
+/* media operations */
+static const struct media_entity_operations vimc_sen_mops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int vimc_thread_sen(void *data)
+{
+ struct vimc_sen_device *vsen = data;
+ unsigned int i;
+
+ set_freezable();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ for (;;) {
+ try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ memset(vsen->frame, 100, vsen->frame_size);
+
+ /* Send the frame to all source pads */
+ for (i = 0; i < vsen->sd.entity.num_pads; i++)
+ vimc_propagate_frame(&vsen->sd.entity.pads[i],
+ vsen->frame);
+
+ /* 60 frames per second */
+ schedule_timeout(HZ/60);
+ }
+
+ return 0;
+}
+
+static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_sen_device *vsen =
+ container_of(sd, struct vimc_sen_device, sd);
+ int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+
+ if (vsen->kthread_sen)
+ return -EINVAL;
+
+ /* Calculate the frame size */
+ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
+ vsen->frame_size = vsen->mbus_format.width * vpix->bpp *
+ vsen->mbus_format.height;
+
+ /*
+ * Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vsen->frame = vmalloc(vsen->frame_size);
+ if (!vsen->frame)
+ return -ENOMEM;
+
+ /* Initialize the image generator thread */
+ vsen->kthread_sen = kthread_run(vimc_thread_sen, vsen, "%s-sen",
+ vsen->sd.v4l2_dev->name);
+ if (IS_ERR(vsen->kthread_sen)) {
+ dev_err(vsen->sd.v4l2_dev->dev,
+ "%s: kernel_thread() failed\n", vsen->sd.name);
+ vfree(vsen->frame);
+ vsen->frame = NULL;
+ return PTR_ERR(vsen->kthread_sen);
+ }
+ } else {
+ if (!vsen->kthread_sen)
+ return -EINVAL;
+
+ /* Stop image generator */
+ ret = kthread_stop(vsen->kthread_sen);
+ vsen->kthread_sen = NULL;
+
+ vfree(vsen->frame);
+ vsen->frame = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+struct v4l2_subdev_video_ops vimc_sen_video_ops = {
+ .s_stream = vimc_sen_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_sen_ops = {
+ .pad = &vimc_sen_pad_ops,
+ .video = &vimc_sen_video_ops,
+};
+
+static void vimc_sen_destroy(struct vimc_ent_device *ved)
+{
+ struct vimc_sen_device *vsen =
+ container_of(ved, struct vimc_sen_device, ved);
+
+ v4l2_device_unregister_subdev(&vsen->sd);
+ media_entity_cleanup(ved->ent);
+ kfree(vsen);
+}
+
+struct vimc_ent_device *vimc_sen_create(struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u16 num_pads,
+ const unsigned long *pads_flag)
+{
+ struct vimc_sen_device *vsen;
+ unsigned int i;
+ int ret;
+
+ /* NOTE: a sensor node may be created with more then one pad */
+ if (!name || !num_pads || !pads_flag)
+ return ERR_PTR(-EINVAL);
+
+ /* check if all pads are sources */
+ for (i = 0; i < num_pads; i++)
+ if (!(pads_flag[i] & MEDIA_PAD_FL_SOURCE))
+ return ERR_PTR(-EINVAL);
+
+ /* Allocate the vsen struct */
+ vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
+ if (!vsen)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate the pads */
+ vsen->ved.pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(vsen->ved.pads)) {
+ ret = PTR_ERR(vsen->ved.pads);
+ goto err_free_vsen;
+ }
+
+ /* Fill the vimc_ent_device struct */
+ vsen->ved.destroy = vimc_sen_destroy;
+ vsen->ved.ent = &vsen->sd.entity;
+
+ /* Initialize the subdev */
+ v4l2_subdev_init(&vsen->sd, &vimc_sen_ops);
+ vsen->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ vsen->sd.entity.ops = &vimc_sen_mops;
+ vsen->sd.owner = THIS_MODULE;
+ strlcpy(vsen->sd.name, name, sizeof(vsen->sd.name));
+ v4l2_set_subdevdata(&vsen->sd, &vsen->ved);
+
+ /* Expose this subdev to user space */
+ vsen->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize the media entity */
+ ret = media_entity_pads_init(&vsen->sd.entity,
+ num_pads, vsen->ved.pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Set the active frame format (this is hardcoded for now) */
+ vsen->mbus_format.width = 640;
+ vsen->mbus_format.height = 480;
+ vsen->mbus_format.code = MEDIA_BUS_FMT_RGB888_1X24;
+ vsen->mbus_format.field = V4L2_FIELD_NONE;
+ vsen->mbus_format.colorspace = V4L2_COLORSPACE_SRGB;
+ vsen->mbus_format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ vsen->mbus_format.xfer_func = V4L2_XFER_FUNC_SRGB;
+
+ /* Register the subdev with the v4l2 and the media framework */
+ ret = v4l2_device_register_subdev(v4l2_dev, &vsen->sd);
+ if (ret) {
+ dev_err(vsen->sd.v4l2_dev->dev,
+ "%s: subdev register failed (err=%d)\n",
+ vsen->sd.name, ret);
+ goto err_clean_m_ent;
+ }
+
+ return &vsen->ved;
+
+err_clean_m_ent:
+ media_entity_cleanup(&vsen->sd.entity);
+err_clean_pads:
+ vimc_pads_cleanup(vsen->ved.pads);
+err_free_vsen:
+ kfree(vsen);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/vimc/vimc-sensor.h b/drivers/media/platform/vimc/vimc-sensor.h
new file mode 100644
index 000000000000..505310e8aeb7
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-sensor.h
@@ -0,0 +1,28 @@
+/*
+ * vimc-sensor.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIMC_SENSOR_H_
+#define _VIMC_SENSOR_H_
+
+#include "vimc-core.h"
+
+struct vimc_ent_device *vimc_sen_create(struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u16 num_pads,
+ const unsigned long *pads_flag);
+
+#endif
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index db0dd19d227a..b36ac19dc6e4 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -1,13 +1,14 @@
config VIDEO_VIVID
tristate "Virtual Video Test Driver"
depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
+ depends on HAS_DMA
select FONT_SUPPORT
select FONT_8x16
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select MEDIA_CEC_EDID
select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG
default n
---help---
@@ -25,7 +26,7 @@ config VIDEO_VIVID
config VIDEO_VIVID_CEC
bool "Enable CEC emulation support"
- depends on VIDEO_VIVID && MEDIA_CEC_SUPPORT
+ depends on VIDEO_VIVID && CEC_CORE
---help---
When selected the vivid module will emulate the optional
HDMI CEC feature.
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index cb4933592a3c..653f4099f737 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -135,7 +135,7 @@ static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct vivid_dev *dev = adap->priv;
+ struct vivid_dev *dev = cec_get_drvdata(adap);
struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
long delta_jiffies = 0;
@@ -166,7 +166,7 @@ static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
{
- struct vivid_dev *dev = adap->priv;
+ struct vivid_dev *dev = cec_get_drvdata(adap);
struct cec_msg reply;
u8 dest = cec_msg_destination(msg);
u8 disp_ctl;
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 51e37812ec98..ef344b9a48af 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -30,6 +30,7 @@
#include <linux/videodev2.h>
#include <linux/v4l2-dv-timings.h>
#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
@@ -151,6 +152,12 @@ static bool no_error_inj;
module_param(no_error_inj, bool, 0444);
MODULE_PARM_DESC(no_error_inj, " if set disable the error injecting controls");
+static unsigned int allocators[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0 };
+module_param_array(allocators, uint, NULL, 0444);
+MODULE_PARM_DESC(allocators, " memory allocator selection, default is 0.\n"
+ "\t\t 0 == vmalloc\n"
+ "\t\t 1 == dma-contig");
+
static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
const struct v4l2_rect vivid_min_rect = {
@@ -636,6 +643,10 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
+ static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
+ &vb2_vmalloc_memops,
+ &vb2_dma_contig_memops,
+ };
unsigned in_type_counter[4] = { 0, 0, 0, 0 };
unsigned out_type_counter[4] = { 0, 0, 0, 0 };
int ccs_cap = ccs_cap_mode[inst];
@@ -646,6 +657,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
struct video_device *vfd;
struct vb2_queue *q;
unsigned node_type = node_types[inst];
+ unsigned int allocator = allocators[inst];
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
@@ -1039,6 +1051,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
+ if (allocator == 1)
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
+ allocator = 0;
+
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
/* initialize vid_cap queue */
@@ -1049,10 +1066,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_cap_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vivid_mem_ops[allocator];
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
@@ -1068,10 +1086,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vid_out_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vivid_mem_ops[allocator];
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
@@ -1087,10 +1106,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_cap_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vivid_mem_ops[allocator];
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
@@ -1106,10 +1126,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_vbi_out_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vivid_mem_ops[allocator];
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
@@ -1124,10 +1145,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vivid_buffer);
q->ops = &vivid_sdr_cap_qops;
- q->mem_ops = &vb2_vmalloc_memops;
+ q->mem_ops = vivid_mem_ops[allocator];
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 8;
q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index a18e6fec219b..01419455e545 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -616,7 +616,7 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
/* This driver supports custom bytesperline values */
mp->num_planes = fmt->buffers;
- for (p = 0; p < mp->num_planes; p++) {
+ for (p = 0; p < fmt->buffers; p++) {
/* Calculate the minimum supported bytesperline value */
bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
/* Calculate the maximum supported bytesperline value */
@@ -626,10 +626,17 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
pfmt[p].bytesperline = max_bpl;
if (pfmt[p].bytesperline < bytesperline)
pfmt[p].bytesperline = bytesperline;
- pfmt[p].sizeimage = tpg_calc_line_width(&dev->tpg, p, pfmt[p].bytesperline) *
- mp->height + fmt->data_offset[p];
+
+ pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
+ fmt->vdownsampling[p] + fmt->data_offset[p];
+
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
+ for (p = fmt->buffers; p < fmt->planes; p++)
+ pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
+ (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
+ (fmt->bit_depth[0] / fmt->vdownsampling[0]);
+
mp->colorspace = vivid_colorspace_cap(dev);
if (fmt->color_enc == TGP_COLOR_ENC_HSV)
mp->hsv_enc = vivid_hsv_enc_cap(dev);
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 5fc010f6ce67..f0f423c7ca41 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -858,7 +858,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
return -EINVAL;
if (edid->start_block + edid->blocks > dev->edid_blocks)
edid->blocks = dev->edid_blocks - edid->start_block;
- memcpy(edid->edid, dev->edid, edid->blocks * 128);
- cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
+ cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
+ memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
return 0;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 7ba52ee98371..0b1b6218ede8 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -390,22 +390,28 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
/* This driver supports custom bytesperline values */
- /* Calculate the minimum supported bytesperline value */
- bytesperline = (mp->width * fmt->bit_depth[0]) >> 3;
- /* Calculate the maximum supported bytesperline value */
- max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[0]) >> 3;
mp->num_planes = fmt->buffers;
- for (p = 0; p < mp->num_planes; p++) {
+ for (p = 0; p < fmt->buffers; p++) {
+ /* Calculate the minimum supported bytesperline value */
+ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
+ /* Calculate the maximum supported bytesperline value */
+ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
+
if (pfmt[p].bytesperline > max_bpl)
pfmt[p].bytesperline = max_bpl;
if (pfmt[p].bytesperline < bytesperline)
pfmt[p].bytesperline = bytesperline;
- pfmt[p].sizeimage = pfmt[p].bytesperline * mp->height;
+
+ pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
+ fmt->vdownsampling[p];
+
memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
}
for (p = fmt->buffers; p < fmt->planes; p++)
- pfmt[0].sizeimage += (pfmt[0].bytesperline * fmt->bit_depth[p]) /
- (fmt->bit_depth[0] * fmt->vdownsampling[p]);
+ pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
+ (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
+ (fmt->bit_depth[0] / fmt->vdownsampling[0]);
+
mp->xfer_func = V4L2_XFER_FUNC_DEFAULT;
mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
mp->quantization = V4L2_QUANTIZATION_DEFAULT;
@@ -1172,14 +1178,12 @@ int vidioc_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
case V4L2_EVENT_SOURCE_CHANGE:
if (fh->vdev->vfl_dir == VFL_DIR_RX)
return v4l2_src_change_event_subscribe(fh, sub);
break;
default:
- break;
+ return v4l2_ctrl_subscribe_event(fh, sub);
}
return -EINVAL;
}
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 1328e1bd2143..a33afc385a48 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -3,6 +3,7 @@ vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
vsp1-y += vsp1_bru.o vsp1_sru.o vsp1_uds.o
+vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
vsp1-y += vsp1_lif.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index b23fa879a9aa..85387a64179a 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -32,6 +32,8 @@ struct vsp1_entity;
struct vsp1_platform_data;
struct vsp1_bru;
struct vsp1_clu;
+struct vsp1_hgo;
+struct vsp1_hgt;
struct vsp1_hsit;
struct vsp1_lif;
struct vsp1_lut;
@@ -50,6 +52,8 @@ struct vsp1_uds;
#define VSP1_HAS_CLU (1 << 4)
#define VSP1_HAS_WPF_VFLIP (1 << 5)
#define VSP1_HAS_WPF_HFLIP (1 << 6)
+#define VSP1_HAS_HGO (1 << 7)
+#define VSP1_HAS_HGT (1 << 8)
struct vsp1_device_info {
u32 version;
@@ -73,6 +77,8 @@ struct vsp1_device {
struct vsp1_bru *bru;
struct vsp1_clu *clu;
+ struct vsp1_hgo *hgo;
+ struct vsp1_hgt *hgt;
struct vsp1_hsit *hsi;
struct vsp1_hsit *hst;
struct vsp1_lif *lif;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index ee8355c28f94..85362c5ef57a 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -251,7 +251,8 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
- /* Scaling isn't supported, the compose rectangle size must be identical
+ /*
+ * Scaling isn't supported, the compose rectangle size must be identical
* to the sink format size.
*/
format = vsp1_entity_get_pad_format(&bru->entity, config, sel->pad);
@@ -300,13 +301,15 @@ static void bru_configure(struct vsp1_entity *entity,
format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
bru->entity.source_pad);
- /* The hardware is extremely flexible but we have no userspace API to
+ /*
+ * The hardware is extremely flexible but we have no userspace API to
* expose all the parameters, nor is it clear whether we would have use
* cases for all the supported modes. Let's just harcode the parameters
* to sane default values for now.
*/
- /* Disable dithering and enable color data normalization unless the
+ /*
+ * Disable dithering and enable color data normalization unless the
* format at the pipeline output is premultiplied.
*/
flags = pipe->output ? pipe->output->format.flags : 0;
@@ -314,7 +317,8 @@ static void bru_configure(struct vsp1_entity *entity,
flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
0 : VI6_BRU_INCTRL_NRM);
- /* Set the background position to cover the whole output image and
+ /*
+ * Set the background position to cover the whole output image and
* configure its color.
*/
vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_SIZE,
@@ -325,7 +329,8 @@ static void bru_configure(struct vsp1_entity *entity,
vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_COL, bru->bgcolor |
(0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
- /* Route BRU input 1 as SRC input to the ROP unit and configure the ROP
+ /*
+ * Route BRU input 1 as SRC input to the ROP unit and configure the ROP
* unit with a NOP operation to make BRU input 1 available as the
* Blend/ROP unit B SRC input.
*/
@@ -337,7 +342,8 @@ static void bru_configure(struct vsp1_entity *entity,
bool premultiplied = false;
u32 ctrl = 0;
- /* Configure all Blend/ROP units corresponding to an enabled BRU
+ /*
+ * Configure all Blend/ROP units corresponding to an enabled BRU
* input for alpha blending. Blend/ROP units corresponding to
* disabled BRU inputs are used in ROP NOP mode to ignore the
* SRC input.
@@ -352,13 +358,15 @@ static void bru_configure(struct vsp1_entity *entity,
| VI6_BRU_CTRL_AROP(VI6_ROP_NOP);
}
- /* Select the virtual RPF as the Blend/ROP unit A DST input to
+ /*
+ * Select the virtual RPF as the Blend/ROP unit A DST input to
* serve as a background color.
*/
if (i == 0)
ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
- /* Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to
+ /*
+ * Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to
* D in that order. The Blend/ROP unit B SRC is hardwired to the
* ROP unit output, the corresponding register bits must be set
* to 0.
@@ -368,7 +376,8 @@ static void bru_configure(struct vsp1_entity *entity,
vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl);
- /* Harcode the blending formula to
+ /*
+ * Harcode the blending formula to
*
* DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
* DSTa = DSTa * (1 - SRCa) + SRCa
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index ad545aff4e35..7d8f37772b56 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -240,7 +240,8 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
INIT_LIST_HEAD(&dl->fragments);
dl->dlm = dlm;
- /* Initialize the display list body and allocate DMA memory for the body
+ /*
+ * Initialize the display list body and allocate DMA memory for the body
* and the optional header. Both are allocated together to avoid memory
* fragmentation, with the header located right after the body in
* memory.
@@ -511,7 +512,8 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
goto done;
}
- /* Once the UPD bit has been set the hardware can start processing the
+ /*
+ * Once the UPD bit has been set the hardware can start processing the
* display list at any time and we can't touch the address and size
* registers. In that case mark the update as pending, it will be
* queued up to the hardware by the frame end interrupt handler.
@@ -523,7 +525,8 @@ void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
goto done;
}
- /* Program the hardware with the display list body address and size.
+ /*
+ * Program the hardware with the display list body address and size.
* The UPD bit will be cleared by the device when the display list is
* processed.
*/
@@ -547,7 +550,8 @@ void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
{
spin_lock(&dlm->lock);
- /* The display start interrupt signals the end of the display list
+ /*
+ * The display start interrupt signals the end of the display list
* processing by the device. The active display list, if any, won't be
* accessed anymore and can be reused.
*/
@@ -566,14 +570,16 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
__vsp1_dl_list_put(dlm->active);
dlm->active = NULL;
- /* Header mode is used for mem-to-mem pipelines only. We don't need to
+ /*
+ * Header mode is used for mem-to-mem pipelines only. We don't need to
* perform any operation as there can't be any new display list queued
* in that case.
*/
if (dlm->mode == VSP1_DL_MODE_HEADER)
goto done;
- /* The UPD bit set indicates that the commit operation raced with the
+ /*
+ * The UPD bit set indicates that the commit operation raced with the
* interrupt and occurred after the frame end event and UPD clear but
* before interrupt processing. The hardware hasn't taken the update
* into account yet, we'll thus skip one frame and retry.
@@ -581,7 +587,8 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
goto done;
- /* The device starts processing the queued display list right after the
+ /*
+ * The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
if (dlm->queued) {
@@ -589,7 +596,8 @@ void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
dlm->queued = NULL;
}
- /* Now that the UPD bit has been cleared we can queue the next display
+ /*
+ * Now that the UPD bit has been cleared we can queue the next display
* list to the hardware if one has been prepared.
*/
if (dlm->pending) {
@@ -615,7 +623,8 @@ void vsp1_dlm_setup(struct vsp1_device *vsp1)
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE;
- /* The DRM pipeline operates with display lists in Continuous Frame
+ /*
+ * The DRM pipeline operates with display lists in Continuous Frame
* Mode, all other pipelines use manual start.
*/
if (vsp1->drm)
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index b4c0f10fc3b0..9d235e830f5a 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -78,7 +78,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
int ret;
if (!cfg) {
- /* NULL configuration means the CRTC is being disabled, stop
+ /*
+ * NULL configuration means the CRTC is being disabled, stop
* the pipeline and turn the light off.
*/
ret = vsp1_pipeline_stop(pipe);
@@ -106,7 +107,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
__func__, cfg->width, cfg->height);
- /* Configure the format at the BRU sinks and propagate it through the
+ /*
+ * Configure the format at the BRU sinks and propagate it through the
* pipeline.
*/
memset(&format, 0, sizeof(format));
@@ -175,7 +177,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
__func__, format.format.width, format.format.height,
format.format.code);
- /* Verify that the format at the output of the pipeline matches the
+ /*
+ * Verify that the format at the output of the pipeline matches the
* requested frame size and media bus code.
*/
if (format.format.width != cfg->width ||
@@ -185,7 +188,8 @@ int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
return -EPIPE;
}
- /* Mark the pipeline as streaming and enable the VSP1. This will store
+ /*
+ * Mark the pipeline as streaming and enable the VSP1. This will store
* the pipeline pointer in all entities, which the s_stream handlers
* will need. We don't start the entities themselves right at this point
* as there's no plane configured yet, so we can't start processing
@@ -219,9 +223,6 @@ void vsp1_du_atomic_begin(struct device *dev)
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
vsp1->drm->num_inputs = pipe->num_inputs;
-
- /* Prepare the display list. */
- pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
@@ -320,7 +321,8 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
const struct v4l2_rect *crop;
int ret;
- /* Configure the format on the RPF sink pad and propagate it up to the
+ /*
+ * Configure the format on the RPF sink pad and propagate it up to the
* BRU sink pad.
*/
crop = &vsp1->drm->inputs[rpf->entity.index].crop;
@@ -359,7 +361,8 @@ static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
__func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
rpf->entity.index);
- /* RPF source, hardcode the format to ARGB8888 to turn on format
+ /*
+ * RPF source, hardcode the format to ARGB8888 to turn on format
* conversion if needed.
*/
format.pad = RWPF_PAD_SOURCE;
@@ -425,10 +428,14 @@ void vsp1_du_atomic_flush(struct device *dev)
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
struct vsp1_entity *entity;
+ struct vsp1_dl_list *dl;
unsigned long flags;
unsigned int i;
int ret;
+ /* Prepare the display list. */
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+
/* Count the number of enabled inputs and sort them by Z-order. */
pipe->num_inputs = 0;
@@ -483,26 +490,25 @@ void vsp1_du_atomic_flush(struct device *dev)
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
if (!pipe->inputs[rpf->entity.index]) {
- vsp1_dl_list_write(pipe->dl, entity->route->reg,
+ vsp1_dl_list_write(dl, entity->route->reg,
VI6_DPR_NODE_UNUSED);
continue;
}
}
- vsp1_entity_route_setup(entity, pipe->dl);
+ vsp1_entity_route_setup(entity, pipe, dl);
if (entity->ops->configure) {
- entity->ops->configure(entity, pipe, pipe->dl,
+ entity->ops->configure(entity, pipe, dl,
VSP1_ENTITY_PARAMS_INIT);
- entity->ops->configure(entity, pipe, pipe->dl,
+ entity->ops->configure(entity, pipe, dl,
VSP1_ENTITY_PARAMS_RUNTIME);
- entity->ops->configure(entity, pipe, pipe->dl,
+ entity->ops->configure(entity, pipe, dl,
VSP1_ENTITY_PARAMS_PARTITION);
}
}
- vsp1_dl_list_commit(pipe->dl);
- pipe->dl = NULL;
+ vsp1_dl_list_commit(dl);
/* Start or stop the pipeline if needed. */
if (!vsp1->drm->num_inputs && pipe->num_inputs) {
@@ -528,7 +534,8 @@ int vsp1_drm_create_links(struct vsp1_device *vsp1)
unsigned int i;
int ret;
- /* VSPD instances require a BRU to perform composition and a LIF to
+ /*
+ * VSPD instances require a BRU to perform composition and a LIF to
* output to the DU.
*/
if (!vsp1->bru || !vsp1->lif)
@@ -595,6 +602,7 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
pipe->bru = &vsp1->bru->entity;
pipe->lif = &vsp1->lif->entity;
pipe->output = vsp1->wpf[0];
+ pipe->output->pipe = pipe;
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
index 9e28ab9254ba..c8d2f88fc483 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -21,7 +21,7 @@
* vsp1_drm - State for the API exposed to the DRM driver
* @pipe: the VSP1 pipeline used for display
* @num_inputs: number of active pipeline inputs at the beginning of an update
- * @planes: source crop rectangle, destination compose rectangle and z-order
+ * @inputs: source crop rectangle, destination compose rectangle and z-order
* position for every input
*/
struct vsp1_drm {
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index aa237b48ad55..048446af5ae7 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -30,6 +30,8 @@
#include "vsp1_clu.h"
#include "vsp1_dl.h"
#include "vsp1_drm.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
#include "vsp1_hsit.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
@@ -105,7 +107,9 @@ static int vsp1_create_sink_links(struct vsp1_device *vsp1,
if (source->type == sink->type)
continue;
- if (source->type == VSP1_ENTITY_LIF ||
+ if (source->type == VSP1_ENTITY_HGO ||
+ source->type == VSP1_ENTITY_HGT ||
+ source->type == VSP1_ENTITY_LIF ||
source->type == VSP1_ENTITY_WPF)
continue;
@@ -148,6 +152,26 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
return ret;
}
+ if (vsp1->hgo) {
+ ret = media_create_pad_link(&vsp1->hgo->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgo->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->hgt) {
+ ret = media_create_pad_link(&vsp1->hgt->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgt->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
if (vsp1->lif) {
ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
RWPF_PAD_SOURCE,
@@ -170,7 +194,8 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
}
for (i = 0; i < vsp1->info->wpf_count; ++i) {
- /* Connect the video device to the WPF. All connections are
+ /*
+ * Connect the video device to the WPF. All connections are
* immutable.
*/
struct vsp1_rwpf *wpf = vsp1->wpf[i];
@@ -227,7 +252,8 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
media_device_init(mdev);
vsp1->media_ops.link_setup = vsp1_entity_link_setup;
- /* Don't perform link validation when the userspace API is disabled as
+ /*
+ * Don't perform link validation when the userspace API is disabled as
* the pipeline is configured internally by the driver in that case, and
* its configuration can thus be trusted.
*/
@@ -279,7 +305,30 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- /* The LIF is only supported when used in conjunction with the DU, in
+ if (vsp1->info->features & VSP1_HAS_HGO && vsp1->info->uapi) {
+ vsp1->hgo = vsp1_hgo_create(vsp1);
+ if (IS_ERR(vsp1->hgo)) {
+ ret = PTR_ERR(vsp1->hgo);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgo->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ if (vsp1->info->features & VSP1_HAS_HGT && vsp1->info->uapi) {
+ vsp1->hgt = vsp1_hgt_create(vsp1);
+ if (IS_ERR(vsp1->hgt)) {
+ ret = PTR_ERR(vsp1->hgt);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgt->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ /*
+ * The LIF is only supported when used in conjunction with the DU, in
* which case the userspace API is disabled. If the userspace API is
* enabled skip the LIF, even when present.
*/
@@ -391,7 +440,8 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
if (ret < 0)
goto done;
- /* Register subdev nodes if the userspace API is enabled or initialize
+ /*
+ * Register subdev nodes if the userspace API is enabled or initialize
* the DRM pipeline otherwise.
*/
if (vsp1->info->uapi) {
@@ -562,8 +612,9 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPS_H2,
.model = "VSP1-S",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
- | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -583,7 +634,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
.model = "VSP1-D",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LIF
+ | VSP1_HAS_LUT,
.rpf_count = 4,
.uds_count = 1,
.wpf_count = 1,
@@ -593,8 +645,9 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPS_M2,
.model = "VSP1-S",
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
- | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 1,
.wpf_count = 4,
@@ -626,8 +679,9 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
.model = "VSP2-I",
.gen = 3,
- .features = VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU
- | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
+ | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 1,
.uds_count = 1,
.wpf_count = 1,
@@ -645,8 +699,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
.model = "VSP2-BC",
.gen = 3,
- .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
- | VSP1_HAS_WPF_VFLIP,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index da673495c222..4bdb3b141611 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -21,6 +21,8 @@
#include "vsp1.h"
#include "vsp1_dl.h"
#include "vsp1_entity.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
static inline struct vsp1_entity *
media_entity_to_vsp1_entity(struct media_entity *entity)
@@ -28,11 +30,42 @@ media_entity_to_vsp1_entity(struct media_entity *entity)
return container_of(entity, struct vsp1_entity, subdev.entity);
}
-void vsp1_entity_route_setup(struct vsp1_entity *source,
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl)
{
+ struct vsp1_entity *source;
struct vsp1_entity *sink;
+ if (entity->type == VSP1_ENTITY_HGO) {
+ u32 smppt;
+
+ /*
+ * The HGO is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = media_entity_to_vsp1_entity(entity->sources[0]);
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_list_write(dl, VI6_DPR_HGO_SMPPT, smppt);
+ return;
+ } else if (entity->type == VSP1_ENTITY_HGT) {
+ u32 smppt;
+
+ /*
+ * The HGT is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = media_entity_to_vsp1_entity(entity->sources[0]);
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_list_write(dl, VI6_DPR_HGT_SMPPT, smppt);
+ return;
+ }
+
+ source = entity;
if (source->route->reg == 0)
return;
@@ -199,7 +232,8 @@ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- /* The entity can't perform format conversion, the sink format
+ /*
+ * The entity can't perform format conversion, the sink format
* is always identical to the source format.
*/
if (code->index)
@@ -263,7 +297,8 @@ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
fse->min_height = min_height;
fse->max_height = max_height;
} else {
- /* The size on the source pad are fixed and always identical to
+ /*
+ * The size on the source pad are fixed and always identical to
* the size on the sink pad.
*/
fse->min_width = format->width;
@@ -281,25 +316,32 @@ done:
* Media Operations
*/
-int vsp1_entity_link_setup(struct media_entity *entity,
- const struct media_pad *local,
- const struct media_pad *remote, u32 flags)
+static int vsp1_entity_link_setup_source(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
{
struct vsp1_entity *source;
- if (!(local->flags & MEDIA_PAD_FL_SOURCE))
- return 0;
-
- source = media_entity_to_vsp1_entity(local->entity);
+ source = media_entity_to_vsp1_entity(source_pad->entity);
if (!source->route)
return 0;
if (flags & MEDIA_LNK_FL_ENABLED) {
- if (source->sink)
- return -EBUSY;
- source->sink = remote->entity;
- source->sink_pad = remote->index;
+ struct vsp1_entity *sink
+ = media_entity_to_vsp1_entity(sink_pad->entity);
+
+ /*
+ * Fan-out is limited to one for the normal data path plus
+ * optional HGO and HGT. We ignore the HGO and HGT here.
+ */
+ if (sink->type != VSP1_ENTITY_HGO &&
+ sink->type != VSP1_ENTITY_HGT) {
+ if (source->sink)
+ return -EBUSY;
+ source->sink = sink_pad->entity;
+ source->sink_pad = sink_pad->index;
+ }
} else {
source->sink = NULL;
source->sink_pad = 0;
@@ -308,6 +350,85 @@ int vsp1_entity_link_setup(struct media_entity *entity,
return 0;
}
+static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
+{
+ struct vsp1_entity *sink;
+
+ sink = media_entity_to_vsp1_entity(sink_pad->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ /* Fan-in is limited to one. */
+ if (sink->sources[sink_pad->index])
+ return -EBUSY;
+
+ sink->sources[sink_pad->index] = source_pad->entity;
+ } else {
+ sink->sources[sink_pad->index] = NULL;
+ }
+
+ return 0;
+}
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (local->flags & MEDIA_PAD_FL_SOURCE)
+ return vsp1_entity_link_setup_source(local, remote, flags);
+ else
+ return vsp1_entity_link_setup_sink(remote, local, flags);
+}
+
+/**
+ * vsp1_entity_remote_pad - Find the pad at the remote end of a link
+ * @pad: Pad at the local end of the link
+ *
+ * Search for a remote pad connected to the given pad by iterating over all
+ * links originating or terminating at that pad until an enabled link is found.
+ *
+ * Our link setup implementation guarantees that the output fan-out will not be
+ * higher than one for the data pipelines, except for the links to the HGO and
+ * HGT that can be enabled in addition to a regular data link. When traversing
+ * outgoing links this function ignores HGO and HGT entities and should thus be
+ * used in place of the generic media_entity_remote_pad() function to traverse
+ * data pipelines.
+ *
+ * Return a pointer to the pad at the remote end of the first found enabled
+ * link, or NULL if no enabled link has been found.
+ */
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
+{
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ struct vsp1_entity *entity;
+
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ /* If we're the sink the source will never be an HGO or HGT. */
+ if (link->sink == pad)
+ return link->source;
+
+ if (link->source != pad)
+ continue;
+
+ /* If the sink isn't a subdevice it can't be an HGO or HGT. */
+ if (!is_media_entity_v4l2_subdev(link->sink->entity))
+ return link->sink;
+
+ entity = media_entity_to_vsp1_entity(link->sink->entity);
+ if (entity->type != VSP1_ENTITY_HGO &&
+ entity->type != VSP1_ENTITY_HGT)
+ return link->sink;
+ }
+
+ return NULL;
+
+}
+
/* -----------------------------------------------------------------------------
* Initialization
*/
@@ -334,6 +455,8 @@ static const struct vsp1_route vsp1_routes[] = {
VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
VI6_DPR_NODE_BRU_IN(4) }, VI6_DPR_NODE_BRU_OUT },
VSP1_ENTITY_ROUTE(CLU),
+ { VSP1_ENTITY_HGO, 0, 0, { 0, }, 0 },
+ { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 },
VSP1_ENTITY_ROUTE(HSI),
VSP1_ENTITY_ROUTE(HST),
{ VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, }, VI6_DPR_NODE_LIF },
@@ -386,7 +509,14 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
for (i = 0; i < num_pads - 1; ++i)
entity->pads[i].flags = MEDIA_PAD_FL_SINK;
- entity->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
+ entity->sources = devm_kcalloc(vsp1->dev, max(num_pads - 1, 1U),
+ sizeof(*entity->sources), GFP_KERNEL);
+ if (entity->sources == NULL)
+ return -ENOMEM;
+
+ /* Single-pad entities only have a sink. */
+ entity->pads[num_pads - 1].flags = num_pads > 1 ? MEDIA_PAD_FL_SOURCE
+ : MEDIA_PAD_FL_SINK;
/* Initialize the media entity. */
ret = media_entity_pads_init(&entity->subdev.entity, num_pads,
@@ -407,7 +537,8 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
vsp1_entity_init_cfg(subdev, NULL);
- /* Allocate the pad configuration to store formats and selection
+ /*
+ * Allocate the pad configuration to store formats and selection
* rectangles.
*/
entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 901146f807b9..c169a060b6d2 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -25,6 +25,8 @@ struct vsp1_pipeline;
enum vsp1_entity_type {
VSP1_ENTITY_BRU,
VSP1_ENTITY_CLU,
+ VSP1_ENTITY_HGO,
+ VSP1_ENTITY_HGT,
VSP1_ENTITY_HSI,
VSP1_ENTITY_HST,
VSP1_ENTITY_LIF,
@@ -102,6 +104,7 @@ struct vsp1_entity {
struct media_pad *pads;
unsigned int source_pad;
+ struct media_entity **sources;
struct media_entity *sink;
unsigned int sink_pad;
@@ -142,9 +145,12 @@ vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg);
-void vsp1_entity_route_setup(struct vsp1_entity *source,
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
struct vsp1_dl_list *dl);
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
+
int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt);
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.c b/drivers/media/platform/vsp1/vsp1_hgo.c
new file mode 100644
index 000000000000..50309c053b78
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgo.c
@@ -0,0 +1,230 @@
+/*
+ * vsp1_hgo.c -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgo.h"
+
+#define HGO_DATA_SIZE ((2 + 256) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgo_read(struct vsp1_hgo *hgo, u32 reg)
+{
+ return vsp1_read(hgo->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgo_write(struct vsp1_hgo *hgo, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
+{
+ vsp1_dl_list_write(dl, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgo_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int i;
+ size_t size;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgo->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ if (hgo->num_bins == 256) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 256; ++i) {
+ vsp1_write(hgo->histo.entity.vsp1,
+ VI6_HGO_EXT_HIST_ADDR, i);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_EXT_HIST_DATA);
+ }
+
+ size = (2 + 256) * sizeof(u32);
+ } else if (hgo->max_rgb) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 64; ++i)
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+
+ size = (2 + 64) * sizeof(u32);
+ } else {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_MAXMIN);
+
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_SUM);
+
+ for (i = 0; i < 64; ++i) {
+ data[i] = vsp1_hgo_read(hgo, VI6_HGO_R_HISTO(i));
+ data[i+64] = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+ data[i+128] = vsp1_hgo_read(hgo, VI6_HGO_B_HISTO(i));
+ }
+
+ size = (6 + 64 * 3) * sizeof(u32);
+ }
+
+ vsp1_histogram_buffer_complete(&hgo->histo, buf, size);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGO_MAX_RGB (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_HGO_NUM_BINS (V4L2_CID_USER_BASE | 0x1002)
+
+static const struct v4l2_ctrl_config hgo_max_rgb_control = {
+ .id = V4L2_CID_VSP1_HGO_MAX_RGB,
+ .name = "Maximum RGB Mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+static const s64 hgo_num_bins[] = {
+ 64, 256,
+};
+
+static const struct v4l2_ctrl_config hgo_num_bins_control = {
+ .id = V4L2_CID_VSP1_HGO_NUM_BINS,
+ .name = "Number of Bins",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .qmenu_int = hgo_num_bins,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgo_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ if (params != VSP1_ENTITY_PARAMS_INIT)
+ return;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgo_write(hgo, dl, VI6_HGO_REGRST, VI6_HGO_REGRST_RCLEA);
+
+ vsp1_hgo_write(hgo, dl, VI6_HGO_OFFSET,
+ (crop->left << VI6_HGO_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGO_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgo_write(hgo, dl, VI6_HGO_SIZE,
+ (crop->width << VI6_HGO_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGO_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgo->ctrls.handler.lock);
+ hgo->max_rgb = hgo->ctrls.max_rgb->cur.val;
+ if (hgo->ctrls.num_bins)
+ hgo->num_bins = hgo_num_bins[hgo->ctrls.num_bins->cur.val];
+ mutex_unlock(hgo->ctrls.handler.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgo_write(hgo, dl, VI6_HGO_MODE,
+ (hgo->num_bins == 256 ? VI6_HGO_MODE_STEP : 0) |
+ (hgo->max_rgb ? VI6_HGO_MODE_MAXRGB : 0) |
+ (hratio << VI6_HGO_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGO_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgo_entity_ops = {
+ .configure = hgo_configure,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgo_mbus_formats[] = {
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgo *hgo;
+ int ret;
+
+ hgo = devm_kzalloc(vsp1->dev, sizeof(*hgo), GFP_KERNEL);
+ if (hgo == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgo->ctrls.handler,
+ vsp1->info->gen == 3 ? 2 : 1);
+ hgo->ctrls.max_rgb = v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_max_rgb_control, NULL);
+ if (vsp1->info->gen == 3)
+ hgo->ctrls.num_bins =
+ v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_num_bins_control, NULL);
+
+ hgo->max_rgb = false;
+ hgo->num_bins = 64;
+
+ hgo->histo.entity.subdev.ctrl_handler = &hgo->ctrls.handler;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgo->histo, VSP1_ENTITY_HGO, "hgo",
+ &hgo_entity_ops, hgo_mbus_formats,
+ ARRAY_SIZE(hgo_mbus_formats),
+ HGO_DATA_SIZE, V4L2_META_FMT_VSP1_HGO);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgo->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ return hgo;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hgo.h b/drivers/media/platform/vsp1/vsp1_hgo.h
new file mode 100644
index 000000000000..c6c0b7a80e0c
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgo.h
@@ -0,0 +1,45 @@
+/*
+ * vsp1_hgo.h -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HGO_H__
+#define __VSP1_HGO_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+struct vsp1_hgo {
+ struct vsp1_histogram histo;
+
+ struct {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *max_rgb;
+ struct v4l2_ctrl *num_bins;
+ } ctrls;
+
+ bool max_rgb;
+ unsigned int num_bins;
+};
+
+static inline struct vsp1_hgo *to_hgo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgo, histo.entity.subdev);
+}
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1);
+void vsp1_hgo_frame_end(struct vsp1_entity *hgo);
+
+#endif /* __VSP1_HGO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.c b/drivers/media/platform/vsp1/vsp1_hgt.c
new file mode 100644
index 000000000000..b5ce305e3e6f
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgt.c
@@ -0,0 +1,222 @@
+/*
+ * vsp1_hgt.c -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgt.h"
+
+#define HGT_DATA_SIZE ((2 + 6 * 32) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgt_read(struct vsp1_hgt *hgt, u32 reg)
+{
+ return vsp1_read(hgt->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgt_write(struct vsp1_hgt *hgt, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
+{
+ vsp1_dl_list_write(dl, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgt_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int m;
+ unsigned int n;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgt->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_MAXMIN);
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_SUM);
+
+ for (m = 0; m < 6; ++m)
+ for (n = 0; n < 32; ++n)
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_HISTO(m, n));
+
+ vsp1_histogram_buffer_complete(&hgt->histo, buf, HGT_DATA_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGT_HUE_AREAS (V4L2_CID_USER_BASE | 0x1001)
+
+static int hgt_hue_areas_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ const u8 *values = ctrl->p_new.p_u8;
+ unsigned int i;
+
+ /*
+ * The hardware has constraints on the hue area boundaries beyond the
+ * control min, max and step. The values must match one of the following
+ * expressions.
+ *
+ * 0L <= 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U
+ * 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U <= 0L
+ *
+ * Start by verifying the common part...
+ */
+ for (i = 1; i < (HGT_NUM_HUE_AREAS * 2) - 1; ++i) {
+ if (values[i] > values[i+1])
+ return -EINVAL;
+ }
+
+ /* ... and handle 0L separately. */
+ if (values[0] > values[1] && values[11] > values[0])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hgt_hue_areas_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_hgt *hgt = container_of(ctrl->handler, struct vsp1_hgt,
+ ctrls);
+
+ memcpy(hgt->hue_areas, ctrl->p_new.p_u8, sizeof(hgt->hue_areas));
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hgt_hue_areas_ctrl_ops = {
+ .try_ctrl = hgt_hue_areas_try_ctrl,
+ .s_ctrl = hgt_hue_areas_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config hgt_hue_areas = {
+ .ops = &hgt_hue_areas_ctrl_ops,
+ .id = V4L2_CID_VSP1_HGT_HUE_AREAS,
+ .name = "Boundary Values for Hue Area",
+ .type = V4L2_CTRL_TYPE_U8,
+ .min = 0,
+ .max = 255,
+ .def = 0,
+ .step = 1,
+ .dims = { 12 },
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgt_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ enum vsp1_entity_params params)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+ u8 lower;
+ u8 upper;
+ unsigned int i;
+
+ if (params != VSP1_ENTITY_PARAMS_INIT)
+ return;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgt_write(hgt, dl, VI6_HGT_REGRST, VI6_HGT_REGRST_RCLEA);
+
+ vsp1_hgt_write(hgt, dl, VI6_HGT_OFFSET,
+ (crop->left << VI6_HGT_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGT_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgt_write(hgt, dl, VI6_HGT_SIZE,
+ (crop->width << VI6_HGT_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGT_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgt->ctrls.lock);
+ for (i = 0; i < HGT_NUM_HUE_AREAS; ++i) {
+ lower = hgt->hue_areas[i*2 + 0];
+ upper = hgt->hue_areas[i*2 + 1];
+ vsp1_hgt_write(hgt, dl, VI6_HGT_HUE_AREA(i),
+ (lower << VI6_HGT_HUE_AREA_LOWER_SHIFT) |
+ (upper << VI6_HGT_HUE_AREA_UPPER_SHIFT));
+ }
+ mutex_unlock(hgt->ctrls.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgt_write(hgt, dl, VI6_HGT_MODE,
+ (hratio << VI6_HGT_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGT_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgt_entity_ops = {
+ .configure = hgt_configure,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgt_mbus_formats[] = {
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgt *hgt;
+ int ret;
+
+ hgt = devm_kzalloc(vsp1->dev, sizeof(*hgt), GFP_KERNEL);
+ if (hgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgt->ctrls, 1);
+ v4l2_ctrl_new_custom(&hgt->ctrls, &hgt_hue_areas, NULL);
+
+ hgt->histo.entity.subdev.ctrl_handler = &hgt->ctrls;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgt->histo, VSP1_ENTITY_HGT, "hgt",
+ &hgt_entity_ops, hgt_mbus_formats,
+ ARRAY_SIZE(hgt_mbus_formats),
+ HGT_DATA_SIZE, V4L2_META_FMT_VSP1_HGT);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgt->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&hgt->ctrls);
+
+ return hgt;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_hgt.h b/drivers/media/platform/vsp1/vsp1_hgt.h
new file mode 100644
index 000000000000..83f2e130942a
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_hgt.h
@@ -0,0 +1,42 @@
+/*
+ * vsp1_hgt.h -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HGT_H__
+#define __VSP1_HGT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+#define HGT_NUM_HUE_AREAS 6
+
+struct vsp1_hgt {
+ struct vsp1_histogram histo;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ u8 hue_areas[HGT_NUM_HUE_AREAS * 2];
+};
+
+static inline struct vsp1_hgt *to_hgt(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgt, histo.entity.subdev);
+}
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1);
+void vsp1_hgt_frame_end(struct vsp1_entity *hgt);
+
+#endif /* __VSP1_HGT_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_histo.c b/drivers/media/platform/vsp1/vsp1_histo.c
new file mode 100644
index 000000000000..afab77cf4fa5
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_histo.c
@@ -0,0 +1,646 @@
+/*
+ * vsp1_histo.c -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_histo.h"
+#include "vsp1_pipe.h"
+
+#define HISTO_MIN_SIZE 4U
+#define HISTO_MAX_SIZE 8192U
+
+/* -----------------------------------------------------------------------------
+ * Buffer Operations
+ */
+
+static inline struct vsp1_histogram_buffer *
+to_vsp1_histogram_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vsp1_histogram_buffer, buf);
+}
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+{
+ struct vsp1_histogram_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ if (list_empty(&histo->irqqueue))
+ goto done;
+
+ buf = list_first_entry(&histo->irqqueue, struct vsp1_histogram_buffer,
+ queue);
+ list_del(&buf->queue);
+ histo->readout = true;
+
+done:
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+ return buf;
+}
+
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size)
+{
+ struct vsp1_pipeline *pipe = histo->pipe;
+ unsigned long flags;
+
+ /*
+ * The pipeline pointer is guaranteed to be valid as this function is
+ * called from the frame completion interrupt handler, which can only
+ * occur when video streaming is active.
+ */
+ buf->buf.sequence = pipe->sequence;
+ buf->buf.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ histo->readout = false;
+ wake_up(&histo->wait_queue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int histo_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+
+ if (*nplanes) {
+ if (*nplanes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < histo->data_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = 1;
+ sizes[0] = histo->data_size;
+
+ return 0;
+}
+
+static int histo_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+
+ if (vb->num_planes != 1)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < histo->data_size)
+ return -EINVAL;
+
+ buf->addr = vb2_plane_vaddr(vb, 0);
+
+ return 0;
+}
+
+static void histo_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ list_add_tail(&buf->queue, &histo->irqqueue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ return 0;
+}
+
+static void histo_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+ struct vsp1_histogram_buffer *buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ /* Remove all buffers from the IRQ queue. */
+ list_for_each_entry(buffer, &histo->irqqueue, queue)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&histo->irqqueue);
+
+ /* Wait for the buffer being read out (if any) to complete. */
+ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
+
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static const struct vb2_ops histo_video_queue_qops = {
+ .queue_setup = histo_queue_setup,
+ .buf_prepare = histo_buffer_prepare,
+ .buf_queue = histo_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = histo_start_streaming,
+ .stop_streaming = histo_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+
+ if (code->pad == HISTO_PAD_SOURCE) {
+ code->code = MEDIA_BUS_FMT_FIXED;
+ return 0;
+ }
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, histo->formats,
+ histo->num_formats);
+}
+
+static int histo_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HISTO_MIN_SIZE,
+ HISTO_MIN_SIZE, HISTO_MAX_SIZE,
+ HISTO_MAX_SIZE);
+}
+
+static int histo_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ crop = vsp1_entity_get_pad_selection(&histo->entity, config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ format = vsp1_entity_get_pad_format(&histo->entity, config,
+ HISTO_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad, sel->target);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+
+ /* The crop rectangle must be inside the input frame. */
+ format = vsp1_entity_get_pad_format(&histo->entity, config,
+ HISTO_PAD_SINK);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, HISTO_MIN_SIZE,
+ format->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height, HISTO_MIN_SIZE,
+ format->height - sel->r.top);
+
+ /* Set the crop rectangle and reset the compose rectangle. */
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad, V4L2_SEL_TGT_CROP);
+ *selection = sel->r;
+
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *selection = sel->r;
+
+ return 0;
+}
+
+static int histo_set_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *config,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int ratio;
+
+ /*
+ * The compose rectangle is used to configure downscaling, the top left
+ * corner is fixed to (0,0) and the size to 1/2 or 1/4 of the crop
+ * rectangle.
+ */
+ sel->r.left = 0;
+ sel->r.top = 0;
+
+ crop = vsp1_entity_get_pad_selection(&histo->entity, config, sel->pad,
+ V4L2_SEL_TGT_CROP);
+
+ /*
+ * Clamp the width and height to acceptable values first and then
+ * compute the closest rounded dividing ratio.
+ *
+ * Ratio Rounded ratio
+ * --------------------------
+ * [1.0 1.5[ 1
+ * [1.5 3.0[ 2
+ * [3.0 4.0] 4
+ *
+ * The rounded ratio can be computed using
+ *
+ * 1 << (ceil(ratio * 2) / 3)
+ */
+ sel->r.width = clamp(sel->r.width, crop->width / 4, crop->width);
+ ratio = 1 << (crop->width * 2 / sel->r.width / 3);
+ sel->r.width = crop->width / ratio;
+
+
+ sel->r.height = clamp(sel->r.height, crop->height / 4, crop->height);
+ ratio = 1 << (crop->height * 2 / sel->r.height / 3);
+ sel->r.height = crop->height / ratio;
+
+ compose = vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *compose = sel->r;
+
+ return 0;
+}
+
+static int histo_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_pad_config *config;
+ int ret;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, cfg, sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (sel->target == V4L2_SEL_TGT_CROP)
+ ret = histo_set_crop(subdev, config, sel);
+ else if (sel->target == V4L2_SEL_TGT_COMPOSE)
+ ret = histo_set_compose(subdev, config, sel);
+ else
+ ret = -EINVAL;
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->pad == HISTO_PAD_SOURCE) {
+ fmt->format.code = MEDIA_BUS_FMT_FIXED;
+ fmt->format.width = 0;
+ fmt->format.height = 0;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ return 0;
+ }
+
+ return vsp1_subdev_get_pad_format(subdev, cfg, fmt);
+}
+
+static int histo_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+ unsigned int i;
+ int ret = 0;
+
+ if (fmt->pad != HISTO_PAD_SINK)
+ return histo_get_format(subdev, cfg, fmt);
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, cfg, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Default to the first format if the requested format is not
+ * supported.
+ */
+ for (i = 0; i < histo->num_formats; ++i) {
+ if (fmt->format.code == histo->formats[i])
+ break;
+ }
+ if (i == histo->num_formats)
+ fmt->format.code = histo->formats[0];
+
+ format = vsp1_entity_get_pad_format(&histo->entity, config, fmt->pad);
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ HISTO_MIN_SIZE, HISTO_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ HISTO_MIN_SIZE, HISTO_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Reset the crop and compose rectangles */
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ fmt->pad, V4L2_SEL_TGT_CROP);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+ selection = vsp1_entity_get_pad_selection(&histo->entity, config,
+ fmt->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops histo_pad_ops = {
+ .enum_mbus_code = histo_enum_mbus_code,
+ .enum_frame_size = histo_enum_frame_size,
+ .get_fmt = histo_get_format,
+ .set_fmt = histo_set_format,
+ .get_selection = histo_get_selection,
+ .set_selection = histo_set_selection,
+};
+
+static const struct v4l2_subdev_ops histo_ops = {
+ .pad = &histo_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int histo_v4l2_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ | V4L2_CAP_META_CAPTURE;
+ cap->device_caps = V4L2_CAP_META_CAPTURE
+ | V4L2_CAP_STREAMING;
+
+ strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strlcpy(cap->card, histo->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(histo->entity.vsp1->dev));
+
+ return 0;
+}
+
+static int histo_v4l2_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ if (f->index > 0 || f->type != histo->queue.type)
+ return -EINVAL;
+
+ f->pixelformat = histo->meta_format;
+
+ return 0;
+}
+
+static int histo_v4l2_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+ struct v4l2_meta_format *meta = &format->fmt.meta;
+
+ if (format->type != histo->queue.type)
+ return -EINVAL;
+
+ memset(meta, 0, sizeof(*meta));
+
+ meta->dataformat = histo->meta_format;
+ meta->buffersize = histo->data_size;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops histo_v4l2_ioctl_ops = {
+ .vidioc_querycap = histo_v4l2_querycap,
+ .vidioc_enum_fmt_meta_cap = histo_v4l2_enum_format,
+ .vidioc_g_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_s_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_try_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static const struct v4l2_file_operations histo_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static void vsp1_histogram_cleanup(struct vsp1_histogram *histo)
+{
+ if (video_is_registered(&histo->video))
+ video_unregister_device(&histo->video);
+
+ media_entity_cleanup(&histo->video.entity);
+}
+
+void vsp1_histogram_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(&entity->subdev);
+
+ vsp1_histogram_cleanup(histo);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format)
+{
+ int ret;
+
+ histo->formats = formats;
+ histo->num_formats = num_formats;
+ histo->data_size = data_size;
+ histo->meta_format = meta_format;
+
+ histo->pad.flags = MEDIA_PAD_FL_SINK;
+ histo->video.vfl_dir = VFL_DIR_RX;
+
+ mutex_init(&histo->lock);
+ spin_lock_init(&histo->irqlock);
+ INIT_LIST_HEAD(&histo->irqqueue);
+ init_waitqueue_head(&histo->wait_queue);
+
+ /* Initialize the VSP entity... */
+ histo->entity.ops = ops;
+ histo->entity.type = type;
+
+ ret = vsp1_entity_init(vsp1, &histo->entity, name, 2, &histo_ops,
+ MEDIA_ENT_F_PROC_VIDEO_STATISTICS);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the media entity... */
+ ret = media_entity_pads_init(&histo->video.entity, 1, &histo->pad);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the video node... */
+ histo->video.v4l2_dev = &vsp1->v4l2_dev;
+ histo->video.fops = &histo_v4l2_fops;
+ snprintf(histo->video.name, sizeof(histo->video.name),
+ "%s histo", histo->entity.subdev.name);
+ histo->video.vfl_type = VFL_TYPE_GRABBER;
+ histo->video.release = video_device_release_empty;
+ histo->video.ioctl_ops = &histo_v4l2_ioctl_ops;
+
+ video_set_drvdata(&histo->video, histo);
+
+ /* ... and the buffers queue... */
+ histo->queue.type = V4L2_BUF_TYPE_META_CAPTURE;
+ histo->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ histo->queue.lock = &histo->lock;
+ histo->queue.drv_priv = histo;
+ histo->queue.buf_struct_size = sizeof(struct vsp1_histogram_buffer);
+ histo->queue.ops = &histo_video_queue_qops;
+ histo->queue.mem_ops = &vb2_vmalloc_memops;
+ histo->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ histo->queue.dev = vsp1->dev;
+ ret = vb2_queue_init(&histo->queue);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ histo->video.queue = &histo->queue;
+ ret = video_register_device(&histo->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ vsp1_histogram_cleanup(histo);
+ return ret;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_histo.h b/drivers/media/platform/vsp1/vsp1_histo.h
new file mode 100644
index 000000000000..af2874f6031d
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_histo.h
@@ -0,0 +1,84 @@
+/*
+ * vsp1_histo.h -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_HISTO_H__
+#define __VSP1_HISTO_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_pipeline;
+
+#define HISTO_PAD_SINK 0
+#define HISTO_PAD_SOURCE 1
+
+struct vsp1_histogram_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ void *addr;
+};
+
+struct vsp1_histogram {
+ struct vsp1_pipeline *pipe;
+
+ struct vsp1_entity entity;
+ struct video_device video;
+ struct media_pad pad;
+
+ const u32 *formats;
+ unsigned int num_formats;
+ size_t data_size;
+ u32 meta_format;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+
+ wait_queue_head_t wait_queue;
+ bool readout;
+};
+
+static inline struct vsp1_histogram *vdev_to_histo(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_histogram, video);
+}
+
+static inline struct vsp1_histogram *subdev_to_histo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_histogram, entity.subdev);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format);
+void vsp1_histogram_destroy(struct vsp1_entity *entity);
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo);
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size);
+
+#endif /* __VSP1_HISTO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 94316afc54ff..764d405345ee 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -84,7 +84,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
if (fmt->pad == HSIT_PAD_SOURCE) {
- /* The HST and HSI output format code and resolution can't be
+ /*
+ * The HST and HSI output format code and resolution can't be
* modified.
*/
fmt->format = *format;
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index e32acae1fc6e..702487f895b3 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -84,7 +84,8 @@ static int lif_set_format(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&lif->entity, config, fmt->pad);
if (fmt->pad == LIF_PAD_SOURCE) {
- /* The LIF source format is always identical to its sink
+ /*
+ * The LIF source format is always identical to its sink
* format.
*/
fmt->format = *format;
@@ -176,7 +177,8 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
lif->entity.ops = &lif_entity_ops;
lif->entity.type = VSP1_ENTITY_LIF;
- /* The LIF is never exposed to userspace, but media entity registration
+ /*
+ * The LIF is never exposed to userspace, but media entity registration
* requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
* avoid triggering a WARN_ON(), the value won't be seen anywhere.
*/
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 280ba0804699..edebf3fa926f 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -23,6 +23,8 @@
#include "vsp1_bru.h"
#include "vsp1_dl.h"
#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_uds.h"
@@ -157,9 +159,15 @@ const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
{
unsigned int i;
- /* Special case, the VYUY format is supported on Gen2 only. */
- if (vsp1->info->gen != 2 && fourcc == V4L2_PIX_FMT_VYUY)
- return NULL;
+ /* Special case, the VYUY and HSV formats are supported on Gen2 only. */
+ if (vsp1->info->gen != 2) {
+ switch (fourcc) {
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_HSV24:
+ case V4L2_PIX_FMT_HSV32:
+ return NULL;
+ }
+ }
for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
const struct vsp1_format_info *info = &vsp1_video_formats[i];
@@ -198,11 +206,25 @@ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
pipe->output = NULL;
}
+ if (pipe->hgo) {
+ struct vsp1_hgo *hgo = to_hgo(&pipe->hgo->subdev);
+
+ hgo->histo.pipe = NULL;
+ }
+
+ if (pipe->hgt) {
+ struct vsp1_hgt *hgt = to_hgt(&pipe->hgt->subdev);
+
+ hgt->histo.pipe = NULL;
+ }
+
INIT_LIST_HEAD(&pipe->entities);
pipe->state = VSP1_PIPELINE_STOPPED;
pipe->buffers_ready = 0;
pipe->num_inputs = 0;
pipe->bru = NULL;
+ pipe->hgo = NULL;
+ pipe->hgt = NULL;
pipe->lif = NULL;
pipe->uds = NULL;
}
@@ -246,16 +268,17 @@ bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
struct vsp1_entity *entity;
unsigned long flags;
int ret;
if (pipe->lif) {
- /* When using display lists in continuous frame mode the only
+ /*
+ * When using display lists in continuous frame mode the only
* way to stop the pipeline is to reset the hardware.
*/
- ret = vsp1_reset_wpf(pipe->output->entity.vsp1,
- pipe->output->entity.index);
+ ret = vsp1_reset_wpf(vsp1, pipe->output->entity.index);
if (ret == 0) {
spin_lock_irqsave(&pipe->irqlock, flags);
pipe->state = VSP1_PIPELINE_STOPPED;
@@ -275,10 +298,20 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
list_for_each_entry(entity, &pipe->entities, list_pipe) {
if (entity->route && entity->route->reg)
- vsp1_write(entity->vsp1, entity->route->reg,
+ vsp1_write(vsp1, entity->route->reg,
VI6_DPR_NODE_UNUSED);
}
+ if (pipe->hgo)
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ if (pipe->hgt)
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
return ret;
@@ -302,6 +335,12 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
vsp1_dlm_irq_frame_end(pipe->output->dlm);
+ if (pipe->hgo)
+ vsp1_hgo_frame_end(pipe->hgo);
+
+ if (pipe->hgt)
+ vsp1_hgt_frame_end(pipe->hgt);
+
if (pipe->frame_end)
pipe->frame_end(pipe);
@@ -322,7 +361,8 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
if (!pipe->uds)
return;
- /* The BRU background color has a fixed alpha value set to 255, the
+ /*
+ * The BRU background color has a fixed alpha value set to 255, the
* output alpha value is thus always equal to 255.
*/
if (pipe->uds_input->type == VSP1_ENTITY_BRU)
@@ -337,7 +377,8 @@ void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
unsigned int i;
int ret;
- /* To avoid increasing the system suspend time needlessly, loop over the
+ /*
+ * To avoid increasing the system suspend time needlessly, loop over the
* pipelines twice, first to set them all to the stopping state, and
* then to wait for the stop to complete.
*/
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index ac4ad2655551..91a784a13422 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -25,11 +25,12 @@ struct vsp1_rwpf;
/*
* struct vsp1_format_info - VSP1 video format description
- * @mbus: media bus format code
* @fourcc: V4L2 pixel format FCC identifier
+ * @mbus: media bus format code
+ * @hwfmt: VSP1 hardware format
+ * @swap: swap register control
* @planes: number of planes
* @bpp: bits per pixel
- * @hwfmt: VSP1 hardware format
* @swap_yc: the Y and C components are swapped (Y comes before C)
* @swap_uv: the U and V components are swapped (V comes before U)
* @hsub: horizontal subsampling factor
@@ -72,6 +73,8 @@ enum vsp1_pipeline_state {
* @inputs: array of RPFs in the pipeline (indexed by RPF index)
* @output: WPF at the output of the pipeline
* @bru: BRU entity, if present
+ * @hgo: HGO entity, if present
+ * @hgt: HGT entity, if present
* @lif: LIF entity, if present
* @uds: UDS entity, if present
* @uds_input: entity at the input of the UDS, if the UDS is present
@@ -100,6 +103,8 @@ struct vsp1_pipeline {
struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
struct vsp1_rwpf *output;
struct vsp1_entity *bru;
+ struct vsp1_entity *hgo;
+ struct vsp1_entity *hgt;
struct vsp1_entity *lif;
struct vsp1_entity *uds;
struct vsp1_entity *uds_input;
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 47b1dee044fb..cd3e32af6e3b 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -328,8 +328,8 @@
#define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
#define VI6_DPR_ROUTE_RT_SHIFT 0
-#define VI6_DPR_HGO_SMPPT 0x2050
-#define VI6_DPR_HGT_SMPPT 0x2054
+#define VI6_DPR_HGO_SMPPT 0x2054
+#define VI6_DPR_HGT_SMPPT 0x2058
#define VI6_DPR_SMPPT_TGW_MASK (7 << 8)
#define VI6_DPR_SMPPT_TGW_SHIFT 8
#define VI6_DPR_SMPPT_PT_MASK (0x3f << 0)
@@ -590,33 +590,55 @@
*/
#define VI6_HGO_OFFSET 0x3000
+#define VI6_HGO_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGO_OFFSET_VOFFSET_SHIFT 0
#define VI6_HGO_SIZE 0x3004
+#define VI6_HGO_SIZE_HSIZE_SHIFT 16
+#define VI6_HGO_SIZE_VSIZE_SHIFT 0
#define VI6_HGO_MODE 0x3008
+#define VI6_HGO_MODE_STEP (1 << 10)
+#define VI6_HGO_MODE_MAXRGB (1 << 7)
+#define VI6_HGO_MODE_OFSB_R (1 << 6)
+#define VI6_HGO_MODE_OFSB_G (1 << 5)
+#define VI6_HGO_MODE_OFSB_B (1 << 4)
+#define VI6_HGO_MODE_HRATIO_SHIFT 2
+#define VI6_HGO_MODE_VRATIO_SHIFT 0
#define VI6_HGO_LB_TH 0x300c
#define VI6_HGO_LBn_H(n) (0x3010 + (n) * 8)
#define VI6_HGO_LBn_V(n) (0x3014 + (n) * 8)
-#define VI6_HGO_R_HISTO 0x3030
+#define VI6_HGO_R_HISTO(n) (0x3030 + (n) * 4)
#define VI6_HGO_R_MAXMIN 0x3130
#define VI6_HGO_R_SUM 0x3134
#define VI6_HGO_R_LB_DET 0x3138
-#define VI6_HGO_G_HISTO 0x3140
+#define VI6_HGO_G_HISTO(n) (0x3140 + (n) * 4)
#define VI6_HGO_G_MAXMIN 0x3240
#define VI6_HGO_G_SUM 0x3244
#define VI6_HGO_G_LB_DET 0x3248
-#define VI6_HGO_B_HISTO 0x3250
+#define VI6_HGO_B_HISTO(n) (0x3250 + (n) * 4)
#define VI6_HGO_B_MAXMIN 0x3350
#define VI6_HGO_B_SUM 0x3354
#define VI6_HGO_B_LB_DET 0x3358
+#define VI6_HGO_EXT_HIST_ADDR 0x335c
+#define VI6_HGO_EXT_HIST_DATA 0x3360
#define VI6_HGO_REGRST 0x33fc
+#define VI6_HGO_REGRST_RCLEA (1 << 0)
/* -----------------------------------------------------------------------------
* HGT Control Registers
*/
#define VI6_HGT_OFFSET 0x3400
+#define VI6_HGT_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGT_OFFSET_VOFFSET_SHIFT 0
#define VI6_HGT_SIZE 0x3404
+#define VI6_HGT_SIZE_HSIZE_SHIFT 16
+#define VI6_HGT_SIZE_VSIZE_SHIFT 0
#define VI6_HGT_MODE 0x3408
+#define VI6_HGT_MODE_HRATIO_SHIFT 2
+#define VI6_HGT_MODE_VRATIO_SHIFT 0
#define VI6_HGT_HUE_AREA(n) (0x340c + (n) * 4)
+#define VI6_HGT_HUE_AREA_LOWER_SHIFT 16
+#define VI6_HGT_HUE_AREA_UPPER_SHIFT 0
#define VI6_HGT_LB_TH 0x3424
#define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8)
#define VI6_HGT_LBn_V(n) (0x342c + (n) * 8)
@@ -625,6 +647,7 @@
#define VI6_HGT_SUM 0x3754
#define VI6_HGT_LB_DET 0x3758
#define VI6_HGT_REGRST 0x37fc
+#define VI6_HGT_REGRST_RCLEA (1 << 0)
/* -----------------------------------------------------------------------------
* LIF Control Registers
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index b2e34a800ffa..8feddd59cf8d 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -72,7 +72,8 @@ static void rpf_configure(struct vsp1_entity *entity,
}
if (params == VSP1_ENTITY_PARAMS_PARTITION) {
- unsigned int offsets[2];
+ struct vsp1_device *vsp1 = rpf->entity.vsp1;
+ struct vsp1_rwpf_memory mem = rpf->mem;
struct v4l2_rect crop;
/*
@@ -105,7 +106,7 @@ static void rpf_configure(struct vsp1_entity *entity,
* of the pipeline.
*/
output = vsp1_entity_get_pad_format(wpf, wpf->config,
- RWPF_PAD_SOURCE);
+ RWPF_PAD_SINK);
crop.width = pipe->partition.width * input_width
/ output->width;
@@ -120,22 +121,30 @@ static void rpf_configure(struct vsp1_entity *entity,
(crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
(crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
- offsets[0] = crop.top * format->plane_fmt[0].bytesperline
- + crop.left * fmtinfo->bpp[0] / 8;
-
- if (format->num_planes > 1)
- offsets[1] = crop.top * format->plane_fmt[1].bytesperline
- + crop.left / fmtinfo->hsub
- * fmtinfo->bpp[1] / 8;
- else
- offsets[1] = 0;
-
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
- rpf->mem.addr[0] + offsets[0]);
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
- rpf->mem.addr[1] + offsets[1]);
- vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
- rpf->mem.addr[2] + offsets[1]);
+ mem.addr[0] += crop.top * format->plane_fmt[0].bytesperline
+ + crop.left * fmtinfo->bpp[0] / 8;
+
+ if (format->num_planes > 1) {
+ unsigned int offset;
+
+ offset = crop.top * format->plane_fmt[1].bytesperline
+ + crop.left / fmtinfo->hsub
+ * fmtinfo->bpp[1] / 8;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+
+ /*
+ * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
return;
}
@@ -186,7 +195,8 @@ static void rpf_configure(struct vsp1_entity *entity,
(left << VI6_RPF_LOC_HCOORD_SHIFT) |
(top << VI6_RPF_LOC_VCOORD_SHIFT));
- /* On Gen2 use the alpha channel (extended to 8 bits) when available or
+ /*
+ * On Gen2 use the alpha channel (extended to 8 bits) when available or
* a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
* otherwise.
*
@@ -216,7 +226,8 @@ static void rpf_configure(struct vsp1_entity *entity,
u32 mult;
if (fmtinfo->alpha) {
- /* When the input contains an alpha channel enable the
+ /*
+ * When the input contains an alpha channel enable the
* alpha multiplier. If the input is premultiplied we
* need to multiply both the alpha channel and the pixel
* components by the global alpha value to keep them
@@ -231,7 +242,8 @@ static void rpf_configure(struct vsp1_entity *entity,
VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
VI6_RPF_MULT_ALPHA_P_MMD_NONE);
} else {
- /* When the input doesn't contain an alpha channel the
+ /*
+ * When the input doesn't contain an alpha channel the
* global alpha value is applied in the unpacking unit,
* the alpha multiplier isn't needed and must be
* disabled.
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 04104ef28fb5..cfd8f1904fa6 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -86,7 +86,8 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
if (fmt->pad == RWPF_PAD_SOURCE) {
- /* The RWPF performs format conversion but can't scale, only the
+ /*
+ * The RWPF performs format conversion but can't scale, only the
* format code can be changed on the source pad.
*/
format->code = fmt->format.code;
@@ -120,6 +121,11 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
RWPF_PAD_SOURCE);
*format = fmt->format;
+ if (rwpf->flip.rotate) {
+ format->width = fmt->format.height;
+ format->height = fmt->format.width;
+ }
+
done:
mutex_unlock(&rwpf->entity.lock);
return ret;
@@ -205,7 +211,8 @@ static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
format = vsp1_entity_get_pad_format(&rwpf->entity, config,
RWPF_PAD_SINK);
- /* Restrict the crop rectangle coordinates to multiples of 2 to avoid
+ /*
+ * Restrict the crop rectangle coordinates to multiples of 2 to avoid
* shifting the color plane.
*/
if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 1c98aff3da5d..58215a7ab631 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -56,9 +56,14 @@ struct vsp1_rwpf {
struct {
spinlock_t lock;
- struct v4l2_ctrl *ctrls[2];
+ struct {
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *rotate;
+ } ctrls;
unsigned int pending;
unsigned int active;
+ bool rotate;
} flip;
struct vsp1_rwpf_memory mem;
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index b4e568a3b4ed..30142793dfcd 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -191,7 +191,8 @@ static void sru_try_format(struct vsp1_sru *sru,
SRU_PAD_SINK);
fmt->code = format->code;
- /* We can upscale by 2 in both direction, but not independently.
+ /*
+ * We can upscale by 2 in both direction, but not independently.
* Compare the input and output rectangles areas (avoiding
* integer overflows on the output): if the requested output
* area is larger than 1.5^2 the input area upscale by two,
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index da8f89a31ea4..4226403ad235 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -293,7 +293,8 @@ static void uds_configure(struct vsp1_entity *entity,
dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
- /* Multi-tap scaling can't be enabled along with alpha scaling when
+ /*
+ * Multi-tap scaling can't be enabled along with alpha scaling when
* scaling down with a factor lower than or equal to 1/2 in either
* direction.
*/
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 3eaadbf7a876..eab3c3ea85d7 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -31,6 +31,8 @@
#include "vsp1_bru.h"
#include "vsp1_dl.h"
#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_uds.h"
@@ -103,7 +105,8 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
unsigned int height = pix->height;
unsigned int i;
- /* Backward compatibility: replace deprecated RGB formats by their XRGB
+ /*
+ * Backward compatibility: replace deprecated RGB formats by their XRGB
* equivalent. This selects the format older userspace applications want
* while still exposing the new format.
*/
@@ -114,7 +117,8 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
}
}
- /* Retrieve format information and select the default format if the
+ /*
+ * Retrieve format information and select the default format if the
* requested format isn't supported.
*/
info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
@@ -140,7 +144,8 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
VSP1_VIDEO_MAX_HEIGHT);
- /* Compute and clamp the stride and image size. While not documented in
+ /*
+ * Compute and clamp the stride and image size. While not documented in
* the datasheet, strides not aligned to a multiple of 128 bytes result
* in image corruption.
*/
@@ -184,9 +189,13 @@ static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
struct vsp1_entity *entity;
unsigned int div_size;
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
format = vsp1_entity_get_pad_format(&pipe->output->entity,
pipe->output->entity.config,
- RWPF_PAD_SOURCE);
+ RWPF_PAD_SINK);
div_size = format->width;
/* Gen2 hardware doesn't require image partitioning. */
@@ -226,9 +235,13 @@ static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
struct v4l2_rect partition;
unsigned int modulus;
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
format = vsp1_entity_get_pad_format(&pipe->output->entity,
pipe->output->entity.config,
- RWPF_PAD_SOURCE);
+ RWPF_PAD_SINK);
/* A single partition simply processes the output size in full. */
if (pipe->partitions <= 1) {
@@ -449,7 +462,8 @@ static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
state = pipe->state;
pipe->state = VSP1_PIPELINE_STOPPED;
- /* If a stop has been requested, mark the pipeline as stopped and
+ /*
+ * If a stop has been requested, mark the pipeline as stopped and
* return. Otherwise restart the pipeline if ready.
*/
if (state == VSP1_PIPELINE_STOPPING)
@@ -474,7 +488,12 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
if (ret < 0)
return ret;
- pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
+ /*
+ * The main data path doesn't include the HGO or HGT, use
+ * vsp1_entity_remote_pad() to traverse the graph.
+ */
+
+ pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
while (1) {
if (pad == NULL) {
@@ -491,7 +510,8 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
entity = to_vsp1_entity(
media_entity_to_v4l2_subdev(pad->entity));
- /* A BRU is present in the pipeline, store the BRU input pad
+ /*
+ * A BRU is present in the pipeline, store the BRU input pad
* number in the input RPF for use when configuring the RPF.
*/
if (entity->type == VSP1_ENTITY_BRU) {
@@ -526,13 +546,9 @@ static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
: &input->entity;
}
- /* Follow the source link. The link setup operations ensure
- * that the output fan-out can't be more than one, there is thus
- * no need to verify here that only a single source link is
- * activated.
- */
+ /* Follow the source link, ignoring any HGO or HGT. */
pad = &entity->pads[entity->source_pad];
- pad = media_entity_remote_pad(pad);
+ pad = vsp1_entity_remote_pad(pad);
}
/* The last entity must be the output WPF. */
@@ -587,6 +603,16 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
pipe->lif = e;
} else if (e->type == VSP1_ENTITY_BRU) {
pipe->bru = e;
+ } else if (e->type == VSP1_ENTITY_HGO) {
+ struct vsp1_hgo *hgo = to_hgo(subdev);
+
+ pipe->hgo = e;
+ hgo->histo.pipe = pipe;
+ } else if (e->type == VSP1_ENTITY_HGT) {
+ struct vsp1_hgt *hgt = to_hgt(subdev);
+
+ pipe->hgt = e;
+ hgt->histo.pipe = pipe;
}
}
@@ -596,7 +622,8 @@ static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
if (pipe->num_inputs == 0 || !pipe->output)
return -EPIPE;
- /* Follow links downstream for each input and make sure the graph
+ /*
+ * Follow links downstream for each input and make sure the graph
* contains no loop and that all branches end at the output WPF.
*/
for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
@@ -627,7 +654,8 @@ static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
struct vsp1_pipeline *pipe;
int ret;
- /* Get a pipeline object for the video node. If a pipeline has already
+ /*
+ * Get a pipeline object for the video node. If a pipeline has already
* been allocated just increment its reference count and return it.
* Otherwise allocate a new pipeline and initialize it, it will be freed
* when the last reference is released.
@@ -767,7 +795,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
if (pipe->uds) {
struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
- /* If a BRU is present in the pipeline before the UDS, the alpha
+ /*
+ * If a BRU is present in the pipeline before the UDS, the alpha
* component doesn't need to be scaled as the BRU output alpha
* value is fixed to 255. Otherwise we need to scale the alpha
* component only when available at the input RPF.
@@ -783,7 +812,7 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
}
list_for_each_entry(entity, &pipe->entities, list_pipe) {
- vsp1_entity_route_setup(entity, pipe->dl);
+ vsp1_entity_route_setup(entity, pipe, pipe->dl);
if (entity->ops->configure)
entity->ops->configure(entity, pipe, pipe->dl,
@@ -797,6 +826,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
struct vsp1_pipeline *pipe = video->rwpf->pipe;
+ bool start_pipeline = false;
unsigned long flags;
int ret;
@@ -807,11 +837,23 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
mutex_unlock(&pipe->lock);
return ret;
}
+
+ start_pipeline = true;
}
pipe->stream_count++;
mutex_unlock(&pipe->lock);
+ /*
+ * vsp1_pipeline_ready() is not sufficient to establish that all streams
+ * are prepared and the pipeline is configured, as multiple streams
+ * can race through streamon with buffers already queued; Therefore we
+ * don't even attempt to start the pipeline until the last stream has
+ * called through here.
+ */
+ if (!start_pipeline)
+ return 0;
+
spin_lock_irqsave(&pipe->irqlock, flags);
if (vsp1_pipeline_ready(pipe))
vsp1_video_pipeline_run(pipe);
@@ -968,7 +1010,8 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (video->queue.owner && video->queue.owner != file->private_data)
return -EBUSY;
- /* Get a pipeline for the video node and start streaming on it. No link
+ /*
+ * Get a pipeline for the video node and start streaming on it. No link
* touching an entity in the pipeline can be activated or deactivated
* once streaming is started.
*/
@@ -988,7 +1031,8 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
mutex_unlock(&mdev->graph_mutex);
- /* Verify that the configured format matches the output of the connected
+ /*
+ * Verify that the configured format matches the output of the connected
* subdev.
*/
ret = vsp1_video_verify_format(video);
@@ -1050,6 +1094,7 @@ static int vsp1_video_open(struct file *file)
ret = vsp1_device_get(video->vsp1);
if (ret < 0) {
v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
kfree(vfh);
}
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 7c48f81cd5c1..32df109b119f 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -43,32 +43,90 @@ static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
enum wpf_flip_ctrl {
WPF_CTRL_VFLIP = 0,
WPF_CTRL_HFLIP = 1,
- WPF_CTRL_MAX,
};
+static int vsp1_wpf_set_rotation(struct vsp1_rwpf *wpf, unsigned int rotation)
+{
+ struct vsp1_video *video = wpf->video;
+ struct v4l2_mbus_framefmt *sink_format;
+ struct v4l2_mbus_framefmt *source_format;
+ bool rotate;
+ int ret = 0;
+
+ /*
+ * Only consider the 0°/180° from/to 90°/270° modifications, the rest
+ * is taken care of by the flipping configuration.
+ */
+ rotate = rotation == 90 || rotation == 270;
+ if (rotate == wpf->flip.rotate)
+ return 0;
+
+ /* Changing rotation isn't allowed when buffers are allocated. */
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ mutex_lock(&wpf->entity.lock);
+
+ if (rotate) {
+ source_format->width = sink_format->height;
+ source_format->height = sink_format->width;
+ } else {
+ source_format->width = sink_format->width;
+ source_format->height = sink_format->height;
+ }
+
+ wpf->flip.rotate = rotate;
+
+ mutex_unlock(&wpf->entity.lock);
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
static int vsp1_wpf_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vsp1_rwpf *wpf =
container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
- unsigned int i;
+ unsigned int rotation;
u32 flip = 0;
+ int ret;
- switch (ctrl->id) {
- case V4L2_CID_HFLIP:
- case V4L2_CID_VFLIP:
- for (i = 0; i < WPF_CTRL_MAX; ++i) {
- if (wpf->flip.ctrls[i])
- flip |= wpf->flip.ctrls[i]->val ? BIT(i) : 0;
- }
+ /* Update the rotation. */
+ rotation = wpf->flip.ctrls.rotate ? wpf->flip.ctrls.rotate->val : 0;
+ ret = vsp1_wpf_set_rotation(wpf, rotation);
+ if (ret < 0)
+ return ret;
- spin_lock_irq(&wpf->flip.lock);
- wpf->flip.pending = flip;
- spin_unlock_irq(&wpf->flip.lock);
- break;
+ /*
+ * Compute the flip value resulting from all three controls, with
+ * rotation by 180° flipping the image in both directions. Store the
+ * result in the pending flip field for the next frame that will be
+ * processed.
+ */
+ if (wpf->flip.ctrls.vflip->val)
+ flip |= BIT(WPF_CTRL_VFLIP);
- default:
- return -EINVAL;
- }
+ if (wpf->flip.ctrls.hflip && wpf->flip.ctrls.hflip->val)
+ flip |= BIT(WPF_CTRL_HFLIP);
+
+ if (rotation == 180 || rotation == 270)
+ flip ^= BIT(WPF_CTRL_VFLIP) | BIT(WPF_CTRL_HFLIP);
+
+ spin_lock_irq(&wpf->flip.lock);
+ wpf->flip.pending = flip;
+ spin_unlock_irq(&wpf->flip.lock);
return 0;
}
@@ -88,12 +146,14 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
/* Only WPF0 supports flipping. */
num_flip_ctrls = 0;
} else if (vsp1->info->features & VSP1_HAS_WPF_HFLIP) {
- /* When horizontal flip is supported the WPF implements two
- * controls (horizontal flip and vertical flip).
+ /*
+ * When horizontal flip is supported the WPF implements three
+ * controls (horizontal flip, vertical flip and rotation).
*/
- num_flip_ctrls = 2;
+ num_flip_ctrls = 3;
} else if (vsp1->info->features & VSP1_HAS_WPF_VFLIP) {
- /* When only vertical flip is supported the WPF implements a
+ /*
+ * When only vertical flip is supported the WPF implements a
* single control (vertical flip).
*/
num_flip_ctrls = 1;
@@ -105,17 +165,19 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
if (num_flip_ctrls >= 1) {
- wpf->flip.ctrls[WPF_CTRL_VFLIP] =
+ wpf->flip.ctrls.vflip =
v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
}
- if (num_flip_ctrls == 2) {
- wpf->flip.ctrls[WPF_CTRL_HFLIP] =
+ if (num_flip_ctrls == 3) {
+ wpf->flip.ctrls.hflip =
v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
-
- v4l2_ctrl_cluster(2, wpf->flip.ctrls);
+ wpf->flip.ctrls.rotate =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip);
}
if (wpf->ctrls.error) {
@@ -139,7 +201,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
if (enable)
return 0;
- /* Write to registers directly when stopping the stream as there will be
+ /*
+ * Write to registers directly when stopping the stream as there will be
* no pipeline run to apply the display list.
*/
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
@@ -216,10 +279,11 @@ static void wpf_configure(struct vsp1_entity *entity,
if (params == VSP1_ENTITY_PARAMS_PARTITION) {
const struct v4l2_pix_format_mplane *format = &wpf->format;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
struct vsp1_rwpf_memory mem = wpf->mem;
unsigned int flip = wpf->flip.active;
- unsigned int width = source_format->width;
- unsigned int height = source_format->height;
+ unsigned int width = sink_format->width;
+ unsigned int height = sink_format->height;
unsigned int offset;
/*
@@ -242,45 +306,86 @@ static void wpf_configure(struct vsp1_entity *entity,
/*
* Update the memory offsets based on flipping configuration.
* The destination addresses point to the locations where the
- * VSP starts writing to memory, which can be different corners
- * of the image depending on vertical flipping.
+ * VSP starts writing to memory, which can be any corner of the
+ * image depending on the combination of flipping and rotation.
*/
- if (pipe->partitions > 1) {
- const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
- /*
- * Horizontal flipping is handled through a line buffer
- * and doesn't modify the start address, but still needs
- * to be handled when image partitioning is in effect to
- * order the partitions correctly.
- */
- if (flip & BIT(WPF_CTRL_HFLIP))
- offset = format->width - pipe->partition.left
- - pipe->partition.width;
+ /*
+ * First take the partition left coordinate into account.
+ * Compute the offset to order the partitions correctly on the
+ * output based on whether flipping is enabled. Consider
+ * horizontal flipping when rotation is disabled but vertical
+ * flipping when rotation is enabled, as rotating the image
+ * switches the horizontal and vertical directions. The offset
+ * is applied horizontally or vertically accordingly.
+ */
+ if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate)
+ offset = format->width - pipe->partition.left
+ - pipe->partition.width;
+ else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate)
+ offset = format->height - pipe->partition.left
+ - pipe->partition.width;
+ else
+ offset = pipe->partition.left;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+ unsigned int vsub = i > 0 ? fmtinfo->vsub : 1;
+
+ if (wpf->flip.rotate)
+ mem.addr[i] += offset / vsub
+ * format->plane_fmt[i].bytesperline;
else
- offset = pipe->partition.left;
-
- mem.addr[0] += offset * fmtinfo->bpp[0] / 8;
- if (format->num_planes > 1) {
- mem.addr[1] += offset / fmtinfo->hsub
- * fmtinfo->bpp[1] / 8;
- mem.addr[2] += offset / fmtinfo->hsub
- * fmtinfo->bpp[2] / 8;
- }
+ mem.addr[i] += offset / hsub
+ * fmtinfo->bpp[i] / 8;
}
if (flip & BIT(WPF_CTRL_VFLIP)) {
- mem.addr[0] += (format->height - 1)
+ /*
+ * When rotating the output (after rotation) image
+ * height is equal to the partition width (before
+ * rotation). Otherwise it is equal to the output
+ * image height.
+ */
+ if (wpf->flip.rotate)
+ height = pipe->partition.width;
+ else
+ height = format->height;
+
+ mem.addr[0] += (height - 1)
* format->plane_fmt[0].bytesperline;
if (format->num_planes > 1) {
- offset = (format->height / wpf->fmtinfo->vsub - 1)
+ offset = (height / fmtinfo->vsub - 1)
* format->plane_fmt[1].bytesperline;
mem.addr[1] += offset;
mem.addr[2] += offset;
}
}
+ if (wpf->flip.rotate && !(flip & BIT(WPF_CTRL_HFLIP))) {
+ unsigned int hoffset = max(0, (int)format->width - 16);
+
+ /*
+ * Compute the output coordinate. The partition
+ * horizontal (left) offset becomes a vertical offset.
+ */
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+
+ mem.addr[i] += hoffset / hsub
+ * fmtinfo->bpp[i] / 8;
+ }
+ }
+
+ /*
+ * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
@@ -294,6 +399,9 @@ static void wpf_configure(struct vsp1_entity *entity,
outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
+ if (wpf->flip.rotate)
+ outfmt |= VI6_WPF_OUTFMT_ROT;
+
if (fmtinfo->alpha)
outfmt |= VI6_WPF_OUTFMT_PXA;
if (fmtinfo->swap_yc)
@@ -327,7 +435,8 @@ static void wpf_configure(struct vsp1_entity *entity,
vsp1_dl_list_write(dl, VI6_WPF_WRBCK_CTRL, 0);
- /* Sources. If the pipeline has a single input and BRU is not used,
+ /*
+ * Sources. If the pipeline has a single input and BRU is not used,
* configure it as the master layer. Otherwise configure all
* inputs as sub-layers and select the virtual RPF as the master
* layer.
@@ -354,9 +463,18 @@ static void wpf_configure(struct vsp1_entity *entity,
VI6_WFP_IRQ_ENB_DFEE);
}
+static unsigned int wpf_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+
+ return wpf->flip.rotate ? 256 : wpf->max_width;
+}
+
static const struct vsp1_entity_operations wpf_entity_ops = {
.destroy = vsp1_wpf_destroy,
.configure = wpf_configure,
+ .max_width = wpf_max_width,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 60f026a58076..f4a53f1e856e 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1656,9 +1656,18 @@ static const struct i2c_device_id si4713_id[] = {
};
MODULE_DEVICE_TABLE(i2c, si4713_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id si4713_of_match[] = {
+ { .compatible = "silabs,si4713" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, si4713_of_match);
+#endif
+
static struct i2c_driver si4713_i2c_driver = {
.driver = {
.name = "si4713",
+ .of_match_table = of_match_ptr(si4713_of_match),
},
.probe = si4713_probe,
.remove = si4713_remove,
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 74a1b3ecb30a..588e2d61c3b4 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1550,9 +1550,8 @@ int fmc_prepare(struct fmdev *fmdev)
atomic_set(&fmdev->tx_cnt, 1);
fmdev->resp_comp = NULL;
- init_timer(&fmdev->irq_info.timer);
- fmdev->irq_info.timer.function = &int_timeout_handler;
- fmdev->irq_info.timer.data = (unsigned long)fmdev;
+ setup_timer(&fmdev->irq_info.timer, &int_timeout_handler,
+ (unsigned long)fmdev);
/*TODO: add FM_STIC_EVENT later */
fmdev->irq_info.mask = FM_MAL_EVENT;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index d1d3fd00ed89..e422f3d56f76 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -426,4 +426,13 @@ config IR_SERIAL_TRANSMITTER
---help---
Serial Port Transmitter support
+config IR_SIR
+ tristate "Built-in SIR IrDA port"
+ depends on RC_CORE
+ ---help---
+ Say Y if you want to use a IrDA SIR port Transceivers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sir-ir.
+
endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 679aa0af85cd..245e2c2d0b22 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -39,4 +39,5 @@ obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
+obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 4a4895e4d599..b4f773b9dc1d 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -158,7 +158,7 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->input_id.version = 0x0100;
rcdev->dev.parent = &pdev->dev;
rcdev->driver_name = GPIO_IR_DRIVER_NAME;
- rcdev->min_timeout = 0;
+ rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
if (pdata->allowed_protos)
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index 0f0ed4ea4d06..cb6d4f1247da 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -205,7 +205,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
rc->allowed_protocols = RC_BIT_ALL_IR_DECODER & ~(RC_BIT_NEC |
RC_BIT_NECX | RC_BIT_NEC32 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE |
- RC_BIT_SONY20 | RC_BIT_MCE_KBD | RC_BIT_SANYO);
+ RC_BIT_SONY20 | RC_BIT_SANYO);
rc->priv = ir;
rc->driver_name = DRIVER_NAME;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 89823d24a384..3489010601b5 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2412,9 +2412,8 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
mutex_lock(&ictx->lock);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
- init_timer(&ictx->ttimer);
- ictx->ttimer.data = (unsigned long)ictx;
- ictx->ttimer.function = imon_touch_display_timeout;
+ setup_timer(&ictx->ttimer, imon_touch_display_timeout,
+ (unsigned long)ictx);
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 8517d5153fcf..de85f1d7ce43 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -139,7 +139,7 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
}
if (!dev->tx_ir) {
- ret = -ENOSYS;
+ ret = -EINVAL;
goto out;
}
@@ -221,19 +221,19 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
/* TX settings */
case LIRC_SET_TRANSMITTER_MASK:
if (!dev->s_tx_mask)
- return -ENOSYS;
+ return -ENOTTY;
return dev->s_tx_mask(dev, val);
case LIRC_SET_SEND_CARRIER:
if (!dev->s_tx_carrier)
- return -ENOSYS;
+ return -ENOTTY;
return dev->s_tx_carrier(dev, val);
case LIRC_SET_SEND_DUTY_CYCLE:
if (!dev->s_tx_duty_cycle)
- return -ENOSYS;
+ return -ENOTTY;
if (val <= 0 || val >= 100)
return -EINVAL;
@@ -243,7 +243,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
/* RX settings */
case LIRC_SET_REC_CARRIER:
if (!dev->s_rx_carrier_range)
- return -ENOSYS;
+ return -ENOTTY;
if (val <= 0)
return -EINVAL;
@@ -253,6 +253,9 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
val);
case LIRC_SET_REC_CARRIER_RANGE:
+ if (!dev->s_rx_carrier_range)
+ return -ENOTTY;
+
if (val <= 0)
return -EINVAL;
@@ -260,37 +263,40 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
return 0;
case LIRC_GET_REC_RESOLUTION:
+ if (!dev->rx_resolution)
+ return -ENOTTY;
+
val = dev->rx_resolution;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
if (!dev->s_learning_mode)
- return -ENOSYS;
+ return -ENOTTY;
return dev->s_learning_mode(dev, !!val);
case LIRC_SET_MEASURE_CARRIER_MODE:
if (!dev->s_carrier_report)
- return -ENOSYS;
+ return -ENOTTY;
return dev->s_carrier_report(dev, !!val);
/* Generic timeout support */
case LIRC_GET_MIN_TIMEOUT:
if (!dev->max_timeout)
- return -ENOSYS;
+ return -ENOTTY;
val = DIV_ROUND_UP(dev->min_timeout, 1000);
break;
case LIRC_GET_MAX_TIMEOUT:
if (!dev->max_timeout)
- return -ENOSYS;
+ return -ENOTTY;
val = dev->max_timeout / 1000;
break;
case LIRC_SET_REC_TIMEOUT:
if (!dev->max_timeout)
- return -ENOSYS;
+ return -ENOTTY;
tmp = val * 1000;
@@ -305,6 +311,9 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
break;
case LIRC_SET_REC_TIMEOUT_REPORTS:
+ if (!dev->timeout)
+ return -ENOTTY;
+
lirc->send_timeout_reports = !!val;
break;
@@ -361,8 +370,11 @@ static int ir_lirc_register(struct rc_dev *dev)
if (rc)
goto rbuf_init_failed;
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
features |= LIRC_CAN_REC_MODE2;
+ if (dev->rx_resolution)
+ features |= LIRC_CAN_GET_REC_RESOLUTION;
+ }
if (dev->tx_ir) {
features |= LIRC_CAN_SEND_PULSE;
if (dev->s_tx_mask)
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 5226d510e847..6a4d58b88d91 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -23,7 +23,7 @@
* - MCIR-2 29-bit IR signals used for mouse movement and buttons
* - MCIR-2 32-bit IR signals used for standard keyboard keys
*
- * The media keys on the keyboard send RC-6 signals that are inditinguishable
+ * The media keys on the keyboard send RC-6 signals that are indistinguishable
* from the keys of the same name on the stock MCE remote, and will be handled
* by the standard RC-6 decoder, and be made available to the system via the
* input device for the remote, rather than the keyboard/mouse one.
@@ -339,6 +339,7 @@ again:
}
data->state = STATE_INACTIVE;
+ input_event(data->idev, EV_MSC, MSC_SCAN, scancode);
input_sync(data->idev);
return 0;
}
@@ -418,9 +419,53 @@ static int ir_mce_kbd_unregister(struct rc_dev *dev)
return 0;
}
+static const struct ir_raw_timings_manchester ir_mce_kbd_timings = {
+ .leader = MCIR2_PREFIX_PULSE,
+ .invert = 1,
+ .clock = MCIR2_UNIT,
+ .trailer_space = MCIR2_UNIT * 10,
+};
+
+/**
+ * ir_mce_kbd_encode() - Encode a scancode as a stream of raw events
+ *
+ * @protocol: protocol to encode
+ * @scancode: scancode to encode
+ * @events: array of raw ir events to write into
+ * @max: maximum size of @events
+ *
+ * Returns: The number of events written.
+ * -ENOBUFS if there isn't enough space in the array to fit the
+ * encoding. In this case all @max events will have been written.
+ */
+static int ir_mce_kbd_encode(enum rc_type protocol, u32 scancode,
+ struct ir_raw_event *events, unsigned int max)
+{
+ struct ir_raw_event *e = events;
+ int len, ret;
+ u64 raw;
+
+ if (protocol == RC_TYPE_MCIR2_KBD) {
+ raw = scancode |
+ ((u64)MCIR2_KEYBOARD_HEADER << MCIR2_KEYBOARD_NBITS);
+ len = MCIR2_KEYBOARD_NBITS + MCIR2_HEADER_NBITS + 1;
+ } else {
+ raw = scancode |
+ ((u64)MCIR2_MOUSE_HEADER << MCIR2_MOUSE_NBITS);
+ len = MCIR2_MOUSE_NBITS + MCIR2_HEADER_NBITS + 1;
+ }
+
+ ret = ir_raw_gen_manchester(&e, max, &ir_mce_kbd_timings, len, raw);
+ if (ret < 0)
+ return ret;
+
+ return e - events;
+}
+
static struct ir_raw_handler mce_kbd_handler = {
- .protocols = RC_BIT_MCE_KBD,
+ .protocols = RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE,
.decode = ir_mce_kbd_decode,
+ .encode = ir_mce_kbd_encode,
.raw_register = ir_mce_kbd_register,
.raw_unregister = ir_mce_kbd_unregister,
};
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index ffe9e612f8d6..2945f99907b5 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -57,7 +57,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-kworld-pc150u.o \
rc-kworld-plus-tv-analog.o \
rc-leadtek-y04g0051.o \
- rc-lirc.o \
rc-lme2510.o \
rc-manli.o \
rc-medion-x10.o \
diff --git a/drivers/media/rc/keymaps/rc-dvico-mce.c b/drivers/media/rc/keymaps/rc-dvico-mce.c
index e5f098c50235..d1e861f4d095 100644
--- a/drivers/media/rc/keymaps/rc-dvico-mce.c
+++ b/drivers/media/rc/keymaps/rc-dvico-mce.c
@@ -12,58 +12,58 @@
#include <linux/module.h>
static struct rc_map_table rc_map_dvico_mce_table[] = {
- { 0xfe02, KEY_TV },
- { 0xfe0e, KEY_MP3 },
- { 0xfe1a, KEY_DVD },
- { 0xfe1e, KEY_FAVORITES },
- { 0xfe16, KEY_SETUP },
- { 0xfe46, KEY_POWER2 },
- { 0xfe0a, KEY_EPG },
- { 0xfe49, KEY_BACK },
- { 0xfe4d, KEY_MENU },
- { 0xfe51, KEY_UP },
- { 0xfe5b, KEY_LEFT },
- { 0xfe5f, KEY_RIGHT },
- { 0xfe53, KEY_DOWN },
- { 0xfe5e, KEY_OK },
- { 0xfe59, KEY_INFO },
- { 0xfe55, KEY_TAB },
- { 0xfe0f, KEY_PREVIOUSSONG },/* Replay */
- { 0xfe12, KEY_NEXTSONG }, /* Skip */
- { 0xfe42, KEY_ENTER }, /* Windows/Start */
- { 0xfe15, KEY_VOLUMEUP },
- { 0xfe05, KEY_VOLUMEDOWN },
- { 0xfe11, KEY_CHANNELUP },
- { 0xfe09, KEY_CHANNELDOWN },
- { 0xfe52, KEY_CAMERA },
- { 0xfe5a, KEY_TUNER }, /* Live */
- { 0xfe19, KEY_OPEN },
- { 0xfe0b, KEY_1 },
- { 0xfe17, KEY_2 },
- { 0xfe1b, KEY_3 },
- { 0xfe07, KEY_4 },
- { 0xfe50, KEY_5 },
- { 0xfe54, KEY_6 },
- { 0xfe48, KEY_7 },
- { 0xfe4c, KEY_8 },
- { 0xfe58, KEY_9 },
- { 0xfe13, KEY_ANGLE }, /* Aspect */
- { 0xfe03, KEY_0 },
- { 0xfe1f, KEY_ZOOM },
- { 0xfe43, KEY_REWIND },
- { 0xfe47, KEY_PLAYPAUSE },
- { 0xfe4f, KEY_FASTFORWARD },
- { 0xfe57, KEY_MUTE },
- { 0xfe0d, KEY_STOP },
- { 0xfe01, KEY_RECORD },
- { 0xfe4e, KEY_POWER },
+ { 0x0102, KEY_TV },
+ { 0x010e, KEY_MP3 },
+ { 0x011a, KEY_DVD },
+ { 0x011e, KEY_FAVORITES },
+ { 0x0116, KEY_SETUP },
+ { 0x0146, KEY_POWER2 },
+ { 0x010a, KEY_EPG },
+ { 0x0149, KEY_BACK },
+ { 0x014d, KEY_MENU },
+ { 0x0151, KEY_UP },
+ { 0x015b, KEY_LEFT },
+ { 0x015f, KEY_RIGHT },
+ { 0x0153, KEY_DOWN },
+ { 0x015e, KEY_OK },
+ { 0x0159, KEY_INFO },
+ { 0x0155, KEY_TAB },
+ { 0x010f, KEY_PREVIOUSSONG },/* Replay */
+ { 0x0112, KEY_NEXTSONG }, /* Skip */
+ { 0x0142, KEY_ENTER }, /* Windows/Start */
+ { 0x0115, KEY_VOLUMEUP },
+ { 0x0105, KEY_VOLUMEDOWN },
+ { 0x0111, KEY_CHANNELUP },
+ { 0x0109, KEY_CHANNELDOWN },
+ { 0x0152, KEY_CAMERA },
+ { 0x015a, KEY_TUNER }, /* Live */
+ { 0x0119, KEY_OPEN },
+ { 0x010b, KEY_1 },
+ { 0x0117, KEY_2 },
+ { 0x011b, KEY_3 },
+ { 0x0107, KEY_4 },
+ { 0x0150, KEY_5 },
+ { 0x0154, KEY_6 },
+ { 0x0148, KEY_7 },
+ { 0x014c, KEY_8 },
+ { 0x0158, KEY_9 },
+ { 0x0113, KEY_ANGLE }, /* Aspect */
+ { 0x0103, KEY_0 },
+ { 0x011f, KEY_ZOOM },
+ { 0x0143, KEY_REWIND },
+ { 0x0147, KEY_PLAYPAUSE },
+ { 0x014f, KEY_FASTFORWARD },
+ { 0x0157, KEY_MUTE },
+ { 0x010d, KEY_STOP },
+ { 0x0101, KEY_RECORD },
+ { 0x014e, KEY_POWER },
};
static struct rc_map_list dvico_mce_map = {
.map = {
.scan = rc_map_dvico_mce_table,
.size = ARRAY_SIZE(rc_map_dvico_mce_table),
- .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .rc_type = RC_TYPE_NEC,
.name = RC_MAP_DVICO_MCE,
}
};
diff --git a/drivers/media/rc/keymaps/rc-dvico-portable.c b/drivers/media/rc/keymaps/rc-dvico-portable.c
index 94ceeee94b3f..ac4cb515cbf1 100644
--- a/drivers/media/rc/keymaps/rc-dvico-portable.c
+++ b/drivers/media/rc/keymaps/rc-dvico-portable.c
@@ -12,49 +12,49 @@
#include <linux/module.h>
static struct rc_map_table rc_map_dvico_portable_table[] = {
- { 0xfc02, KEY_SETUP }, /* Profile */
- { 0xfc43, KEY_POWER2 },
- { 0xfc06, KEY_EPG },
- { 0xfc5a, KEY_BACK },
- { 0xfc05, KEY_MENU },
- { 0xfc47, KEY_INFO },
- { 0xfc01, KEY_TAB },
- { 0xfc42, KEY_PREVIOUSSONG },/* Replay */
- { 0xfc49, KEY_VOLUMEUP },
- { 0xfc09, KEY_VOLUMEDOWN },
- { 0xfc54, KEY_CHANNELUP },
- { 0xfc0b, KEY_CHANNELDOWN },
- { 0xfc16, KEY_CAMERA },
- { 0xfc40, KEY_TUNER }, /* ATV/DTV */
- { 0xfc45, KEY_OPEN },
- { 0xfc19, KEY_1 },
- { 0xfc18, KEY_2 },
- { 0xfc1b, KEY_3 },
- { 0xfc1a, KEY_4 },
- { 0xfc58, KEY_5 },
- { 0xfc59, KEY_6 },
- { 0xfc15, KEY_7 },
- { 0xfc14, KEY_8 },
- { 0xfc17, KEY_9 },
- { 0xfc44, KEY_ANGLE }, /* Aspect */
- { 0xfc55, KEY_0 },
- { 0xfc07, KEY_ZOOM },
- { 0xfc0a, KEY_REWIND },
- { 0xfc08, KEY_PLAYPAUSE },
- { 0xfc4b, KEY_FASTFORWARD },
- { 0xfc5b, KEY_MUTE },
- { 0xfc04, KEY_STOP },
- { 0xfc56, KEY_RECORD },
- { 0xfc57, KEY_POWER },
- { 0xfc41, KEY_UNKNOWN }, /* INPUT */
- { 0xfc00, KEY_UNKNOWN }, /* HD */
+ { 0x0302, KEY_SETUP }, /* Profile */
+ { 0x0343, KEY_POWER2 },
+ { 0x0306, KEY_EPG },
+ { 0x035a, KEY_BACK },
+ { 0x0305, KEY_MENU },
+ { 0x0347, KEY_INFO },
+ { 0x0301, KEY_TAB },
+ { 0x0342, KEY_PREVIOUSSONG },/* Replay */
+ { 0x0349, KEY_VOLUMEUP },
+ { 0x0309, KEY_VOLUMEDOWN },
+ { 0x0354, KEY_CHANNELUP },
+ { 0x030b, KEY_CHANNELDOWN },
+ { 0x0316, KEY_CAMERA },
+ { 0x0340, KEY_TUNER }, /* ATV/DTV */
+ { 0x0345, KEY_OPEN },
+ { 0x0319, KEY_1 },
+ { 0x0318, KEY_2 },
+ { 0x031b, KEY_3 },
+ { 0x031a, KEY_4 },
+ { 0x0358, KEY_5 },
+ { 0x0359, KEY_6 },
+ { 0x0315, KEY_7 },
+ { 0x0314, KEY_8 },
+ { 0x0317, KEY_9 },
+ { 0x0344, KEY_ANGLE }, /* Aspect */
+ { 0x0355, KEY_0 },
+ { 0x0307, KEY_ZOOM },
+ { 0x030a, KEY_REWIND },
+ { 0x0308, KEY_PLAYPAUSE },
+ { 0x034b, KEY_FASTFORWARD },
+ { 0x035b, KEY_MUTE },
+ { 0x0304, KEY_STOP },
+ { 0x0356, KEY_RECORD },
+ { 0x0357, KEY_POWER },
+ { 0x0341, KEY_UNKNOWN }, /* INPUT */
+ { 0x0300, KEY_UNKNOWN }, /* HD */
};
static struct rc_map_list dvico_portable_map = {
.map = {
.scan = rc_map_dvico_portable_table,
.size = ARRAY_SIZE(rc_map_dvico_portable_table),
- .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .rc_type = RC_TYPE_NEC,
.name = RC_MAP_DVICO_PORTABLE,
}
};
diff --git a/drivers/media/rc/keymaps/rc-lirc.c b/drivers/media/rc/keymaps/rc-lirc.c
deleted file mode 100644
index e172f5db5803..000000000000
--- a/drivers/media/rc/keymaps/rc-lirc.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/* rc-lirc.c - Empty dummy keytable, for use when its preferred to pass
- * all raw IR data to the lirc userspace decoder.
- *
- * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <media/rc-core.h>
-#include <linux/module.h>
-
-static struct rc_map_table lirc[] = {
- { },
-};
-
-static struct rc_map_list lirc_map = {
- .map = {
- .scan = lirc,
- .size = ARRAY_SIZE(lirc),
- .rc_type = RC_TYPE_OTHER,
- .name = RC_MAP_LIRC,
- }
-};
-
-static int __init init_rc_map_lirc(void)
-{
- return rc_map_register(&lirc_map);
-}
-
-static void __exit exit_rc_map_lirc(void)
-{
- rc_map_unregister(&lirc_map);
-}
-
-module_init(init_rc_map_lirc)
-module_exit(exit_rc_map_lirc)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 1688893a65bb..8d60c9f00df9 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -54,7 +54,8 @@ struct irctl {
struct lirc_buffer *buf;
unsigned int chunk_size;
- struct cdev *cdev;
+ struct device dev;
+ struct cdev cdev;
struct task_struct *task;
long jiffies_to_wait;
@@ -76,15 +77,21 @@ static void lirc_irctl_init(struct irctl *ir)
ir->d.minor = NOPLUG;
}
-static void lirc_irctl_cleanup(struct irctl *ir)
+static void lirc_release(struct device *ld)
{
- device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor));
+ struct irctl *ir = container_of(ld, struct irctl, dev);
+
+ put_device(ir->dev.parent);
if (ir->buf != ir->d.rbuf) {
lirc_buffer_free(ir->buf);
kfree(ir->buf);
}
- ir->buf = NULL;
+
+ mutex_lock(&lirc_dev_lock);
+ irctls[ir->d.minor] = NULL;
+ mutex_unlock(&lirc_dev_lock);
+ kfree(ir);
}
/* helper function
@@ -157,32 +164,21 @@ static int lirc_cdev_add(struct irctl *ir)
struct cdev *cdev;
int retval;
- cdev = cdev_alloc();
- if (!cdev)
- return -ENOMEM;
+ cdev = &ir->cdev;
if (d->fops) {
- cdev->ops = d->fops;
+ cdev_init(cdev, d->fops);
cdev->owner = d->owner;
} else {
- cdev->ops = &lirc_dev_fops;
+ cdev_init(cdev, &lirc_dev_fops);
cdev->owner = THIS_MODULE;
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
if (retval)
- goto err_out;
-
- retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval)
- goto err_out;
-
- ir->cdev = cdev;
-
- return 0;
+ return retval;
-err_out:
- cdev_del(cdev);
- return retval;
+ cdev->kobj.parent = &ir->dev.kobj;
+ return cdev_add(cdev, ir->dev.devt, 1);
}
static int lirc_allocate_buffer(struct irctl *ir)
@@ -304,9 +300,12 @@ static int lirc_allocate_driver(struct lirc_driver *d)
ir->d = *d;
- device_create(lirc_class, ir->d.dev,
- MKDEV(MAJOR(lirc_base_dev), ir->d.minor), NULL,
- "lirc%u", ir->d.minor);
+ ir->dev.devt = MKDEV(MAJOR(lirc_base_dev), ir->d.minor);
+ ir->dev.class = lirc_class;
+ ir->dev.parent = d->dev;
+ ir->dev.release = lirc_release;
+ dev_set_name(&ir->dev, "lirc%d", ir->d.minor);
+ device_initialize(&ir->dev);
if (d->sample_rate) {
ir->jiffies_to_wait = HZ / d->sample_rate;
@@ -329,14 +328,22 @@ static int lirc_allocate_driver(struct lirc_driver *d)
goto out_sysfs;
ir->attached = 1;
+
+ err = device_add(&ir->dev);
+ if (err)
+ goto out_cdev;
+
mutex_unlock(&lirc_dev_lock);
+ get_device(ir->dev.parent);
+
dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
ir->d.name, ir->d.minor);
return minor;
-
+out_cdev:
+ cdev_del(&ir->cdev);
out_sysfs:
- device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor));
+ put_device(&ir->dev);
out_lock:
mutex_unlock(&lirc_dev_lock);
@@ -364,7 +371,6 @@ EXPORT_SYMBOL(lirc_register_driver);
int lirc_unregister_driver(int minor)
{
struct irctl *ir;
- struct cdev *cdev;
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
pr_err("minor (%d) must be between 0 and %d!\n",
@@ -378,8 +384,6 @@ int lirc_unregister_driver(int minor)
return -ENOENT;
}
- cdev = ir->cdev;
-
mutex_lock(&lirc_dev_lock);
if (ir->d.minor != minor) {
@@ -401,22 +405,20 @@ int lirc_unregister_driver(int minor)
dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n",
ir->d.name, ir->d.minor);
wake_up_interruptible(&ir->buf->wait_poll);
- mutex_lock(&ir->irctl_lock);
+ }
- if (ir->d.set_use_dec)
- ir->d.set_use_dec(ir->d.data);
+ mutex_lock(&ir->irctl_lock);
- module_put(cdev->owner);
- mutex_unlock(&ir->irctl_lock);
- } else {
- lirc_irctl_cleanup(ir);
- cdev_del(cdev);
- kfree(ir);
- irctls[minor] = NULL;
- }
+ if (ir->d.set_use_dec)
+ ir->d.set_use_dec(ir->d.data);
+ mutex_unlock(&ir->irctl_lock);
mutex_unlock(&lirc_dev_lock);
+ device_del(&ir->dev);
+ cdev_del(&ir->cdev);
+ put_device(&ir->dev);
+
return 0;
}
EXPORT_SYMBOL(lirc_unregister_driver);
@@ -424,7 +426,6 @@ EXPORT_SYMBOL(lirc_unregister_driver);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
struct irctl *ir;
- struct cdev *cdev;
int retval = 0;
if (iminor(inode) >= MAX_IRCTL_DEVICES) {
@@ -461,18 +462,14 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- cdev = ir->cdev;
- if (try_module_get(cdev->owner)) {
- ir->open++;
- if (ir->d.set_use_inc)
- retval = ir->d.set_use_inc(ir->d.data);
-
- if (retval) {
- module_put(cdev->owner);
- ir->open--;
- } else if (ir->buf) {
+ ir->open++;
+ if (ir->d.set_use_inc)
+ retval = ir->d.set_use_inc(ir->d.data);
+ if (retval) {
+ ir->open--;
+ } else {
+ if (ir->buf)
lirc_buffer_clear(ir->buf);
- }
if (ir->task)
wake_up_process(ir->task);
}
@@ -487,7 +484,6 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
- struct cdev *cdev;
int ret;
if (!ir) {
@@ -495,25 +491,14 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
return -EINVAL;
}
- cdev = ir->cdev;
-
ret = mutex_lock_killable(&lirc_dev_lock);
WARN_ON(ret);
rc_close(ir->d.rdev);
ir->open--;
- if (ir->attached) {
- if (ir->d.set_use_dec)
- ir->d.set_use_dec(ir->d.data);
- module_put(cdev->owner);
- } else {
- lirc_irctl_cleanup(ir);
- cdev_del(cdev);
- irctls[ir->d.minor] = NULL;
- kfree(ir);
- }
-
+ if (ir->d.set_use_dec)
+ ir->d.set_use_dec(ir->d.data);
if (!ret)
mutex_unlock(&lirc_dev_lock);
@@ -623,7 +608,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(ir->d.max_timeout, (__u32 __user *)arg);
break;
default:
- result = -EINVAL;
+ result = -ENOTTY;
}
mutex_unlock(&ir->irctl_lock);
@@ -780,15 +765,12 @@ static int __init lirc_dev_init(void)
return retval;
}
-
pr_info("IR Remote Control driver registered, major %d\n",
MAJOR(lirc_base_dev));
return 0;
}
-
-
static void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 238d8eaf7d94..93b16fe3ab38 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1288,8 +1288,8 @@ static int mceusb_dev_probe(struct usb_interface *intf,
}
}
}
- if (ep_in == NULL) {
- dev_dbg(&intf->dev, "inbound and/or endpoint not found");
+ if (!ep_in || !ep_out) {
+ dev_dbg(&intf->dev, "required endpoints not found\n");
return -ENODEV;
}
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index a70a5c557434..0455b273c2fc 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -185,7 +185,7 @@ struct ir_raw_timings_manchester {
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data);
+ unsigned int n, u64 data);
/**
* ir_raw_gen_pulse_space() - generate pulse and space raw events.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 7fa84b64a2ae..90f66dc7c0d7 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -258,13 +258,13 @@ static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
*/
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data)
+ unsigned int n, u64 data)
{
bool need_pulse;
- unsigned int i;
+ u64 i;
int ret = -ENOBUFS;
- i = 1 << (n - 1);
+ i = BIT_ULL(n - 1);
if (timings->leader) {
if (!max--)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d84533699668..6ec73357fa47 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -746,6 +746,8 @@ static int rc_validate_filter(struct rc_dev *dev,
[RC_TYPE_NECX] = 0xffffff,
[RC_TYPE_NEC32] = 0xffffffff,
[RC_TYPE_SANYO] = 0x1fffff,
+ [RC_TYPE_MCIR2_KBD] = 0xffff,
+ [RC_TYPE_MCIR2_MSE] = 0x1fffff,
[RC_TYPE_RC6_0] = 0xffff,
[RC_TYPE_RC6_6A_20] = 0xfffff,
[RC_TYPE_RC6_6A_24] = 0xffffff,
@@ -878,7 +880,8 @@ static const struct {
{ RC_BIT_RC5_SZ, "rc-5-sz", "ir-rc5-decoder" },
{ RC_BIT_SANYO, "sanyo", "ir-sanyo-decoder" },
{ RC_BIT_SHARP, "sharp", "ir-sharp-decoder" },
- { RC_BIT_MCE_KBD, "mce_kbd", "ir-mce_kbd-decoder" },
+ { RC_BIT_MCIR2_KBD |
+ RC_BIT_MCIR2_MSE, "mce_kbd", "ir-mce_kbd-decoder" },
{ RC_BIT_XMP, "xmp", "ir-xmp-decoder" },
{ RC_BIT_CEC, "cec", NULL },
};
@@ -1346,7 +1349,8 @@ static const char * const proto_variant_names[] = {
[RC_TYPE_NECX] = "nec-x",
[RC_TYPE_NEC32] = "nec-32",
[RC_TYPE_SANYO] = "sanyo",
- [RC_TYPE_MCE_KBD] = "mce_kbd",
+ [RC_TYPE_MCIR2_KBD] = "mcir2-kbd",
+ [RC_TYPE_MCIR2_MSE] = "mcir2-mse",
[RC_TYPE_RC6_0] = "rc-6-0",
[RC_TYPE_RC6_6A_20] = "rc-6-6a-20",
[RC_TYPE_RC6_6A_24] = "rc-6-6a-24",
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 41b54e40176c..2f0a0d248936 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -56,7 +56,7 @@ struct serial_ir_hw {
static int type;
static int io;
static int irq;
-static bool iommap;
+static ulong iommap;
static int ioshift;
static bool softcarrier = true;
static bool share_irq;
@@ -837,7 +837,7 @@ module_param(io, int, 0444);
MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
/* some architectures (e.g. intel xscale) have memory mapped registers */
-module_param(iommap, bool, 0444);
+module_param(iommap, ulong, 0444);
MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)");
/*
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
new file mode 100644
index 000000000000..e12ec50bf0bf
--- /dev/null
+++ b/drivers/media/rc/sir_ir.c
@@ -0,0 +1,438 @@
+/*
+ * IR SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
+ *
+ * sir_ir - Device driver for use with SIR (serial infra red)
+ * mode of IrDA on many notebooks.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <media/rc-core.h>
+
+/* SECTION: Definitions */
+#define PULSE '['
+
+/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
+#define TIME_CONST (9000000ul / 115200ul)
+
+/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
+#define SIR_TIMEOUT (HZ * 5 / 100)
+
+/* onboard sir ports are typically com3 */
+static int io = 0x3e8;
+static int irq = 4;
+static int threshold = 3;
+
+static DEFINE_SPINLOCK(timer_lock);
+static struct timer_list timerlist;
+/* time of last signal change detected */
+static ktime_t last;
+/* time of last UART data ready interrupt */
+static ktime_t last_intr_time;
+static int last_value;
+static struct rc_dev *rcdev;
+
+static struct platform_device *sir_ir_dev;
+
+static DEFINE_SPINLOCK(hardware_lock);
+
+/* SECTION: Prototypes */
+
+/* Communication with user-space */
+static void add_read_queue(int flag, unsigned long val);
+static int init_chrdev(void);
+/* Hardware */
+static irqreturn_t sir_interrupt(int irq, void *dev_id);
+static void send_space(unsigned long len);
+static void send_pulse(unsigned long len);
+static int init_hardware(void);
+static void drop_hardware(void);
+/* Initialisation */
+static int init_port(void);
+static void drop_port(void);
+
+static inline unsigned int sinp(int offset)
+{
+ return inb(io + offset);
+}
+
+static inline void soutp(int offset, int value)
+{
+ outb(value, io + offset);
+}
+
+/* SECTION: Communication with user-space */
+static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
+ unsigned int count)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for (i = 0; i < count;) {
+ if (tx_buf[i])
+ send_pulse(tx_buf[i]);
+ i++;
+ if (i >= count)
+ break;
+ if (tx_buf[i])
+ send_space(tx_buf[i]);
+ i++;
+ }
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static void add_read_queue(int flag, unsigned long val)
+{
+ DEFINE_IR_RAW_EVENT(ev);
+
+ pr_debug("add flag %d with val %lu\n", flag, val);
+
+ /*
+ * statistically, pulses are ~TIME_CONST/2 too long. we could
+ * maybe make this more exact, but this is good enough
+ */
+ if (flag) {
+ /* pulse */
+ if (val > TIME_CONST / 2)
+ val -= TIME_CONST / 2;
+ else /* should not ever happen */
+ val = 1;
+ ev.pulse = true;
+ } else {
+ val += TIME_CONST / 2;
+ }
+ ev.duration = US_TO_NS(val);
+
+ ir_raw_event_store_with_filter(rcdev, &ev);
+}
+
+static int init_chrdev(void)
+{
+ rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->input_name = "SIR IrDA port";
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->tx_ir = sir_tx_ir;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->dev.parent = &sir_ir_dev->dev;
+
+ return devm_rc_register_device(&sir_ir_dev->dev, rcdev);
+}
+
+/* SECTION: Hardware */
+static void sir_timeout(unsigned long data)
+{
+ /*
+ * if last received signal was a pulse, but receiving stopped
+ * within the 9 bit frame, we need to finish this pulse and
+ * simulate a signal change to from pulse to space. Otherwise
+ * upper layers will receive two sequences next time.
+ */
+
+ unsigned long flags;
+ unsigned long pulse_end;
+
+ /* avoid interference with interrupt */
+ spin_lock_irqsave(&timer_lock, flags);
+ if (last_value) {
+ /* clear unread bits in UART and restart */
+ outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
+ /* determine 'virtual' pulse end: */
+ pulse_end = min_t(unsigned long,
+ ktime_us_delta(last, last_intr_time),
+ IR_MAX_DURATION);
+ dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
+ last_value, pulse_end);
+ add_read_queue(last_value, pulse_end);
+ last_value = 0;
+ last = last_intr_time;
+ }
+ spin_unlock_irqrestore(&timer_lock, flags);
+ ir_raw_event_handle(rcdev);
+}
+
+static irqreturn_t sir_interrupt(int irq, void *dev_id)
+{
+ unsigned char data;
+ ktime_t curr_time;
+ static unsigned long delt;
+ unsigned long deltintr;
+ unsigned long flags;
+ int iir, lsr;
+
+ while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
+ switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
+ case UART_IIR_MSI:
+ (void)inb(io + UART_MSR);
+ break;
+ case UART_IIR_RLSI:
+ case UART_IIR_THRI:
+ (void)inb(io + UART_LSR);
+ break;
+ case UART_IIR_RDI:
+ /* avoid interference with timer */
+ spin_lock_irqsave(&timer_lock, flags);
+ do {
+ del_timer(&timerlist);
+ data = inb(io + UART_RX);
+ curr_time = ktime_get();
+ delt = min_t(unsigned long,
+ ktime_us_delta(last, curr_time),
+ IR_MAX_DURATION);
+ deltintr = min_t(unsigned long,
+ ktime_us_delta(last_intr_time,
+ curr_time),
+ IR_MAX_DURATION);
+ dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
+ deltintr, (int)data);
+ /*
+ * if nothing came in last X cycles,
+ * it was gap
+ */
+ if (deltintr > TIME_CONST * threshold) {
+ if (last_value) {
+ dev_dbg(&sir_ir_dev->dev, "GAP\n");
+ /* simulate signal change */
+ add_read_queue(last_value,
+ delt -
+ deltintr);
+ last_value = 0;
+ last = last_intr_time;
+ delt = deltintr;
+ }
+ }
+ data = 1;
+ if (data ^ last_value) {
+ /*
+ * deltintr > 2*TIME_CONST, remember?
+ * the other case is timeout
+ */
+ add_read_queue(last_value,
+ delt - TIME_CONST);
+ last_value = data;
+ last = curr_time;
+ last = ktime_sub_us(last,
+ TIME_CONST);
+ }
+ last_intr_time = curr_time;
+ if (data) {
+ /*
+ * start timer for end of
+ * sequence detection
+ */
+ timerlist.expires = jiffies +
+ SIR_TIMEOUT;
+ add_timer(&timerlist);
+ }
+
+ lsr = inb(io + UART_LSR);
+ } while (lsr & UART_LSR_DR); /* data ready */
+ spin_unlock_irqrestore(&timer_lock, flags);
+ break;
+ default:
+ break;
+ }
+ }
+ ir_raw_event_handle(rcdev);
+ return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void send_space(unsigned long len)
+{
+ usleep_range(len, len + 25);
+}
+
+static void send_pulse(unsigned long len)
+{
+ long bytes_out = len / TIME_CONST;
+
+ if (bytes_out == 0)
+ bytes_out++;
+
+ while (bytes_out--) {
+ outb(PULSE, io + UART_TX);
+ /* FIXME treba seriozne cakanie z char/serial.c */
+ while (!(inb(io + UART_LSR) & UART_LSR_THRE))
+ ;
+ }
+}
+
+static int init_hardware(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hardware_lock, flags);
+ /* reset UART */
+ outb(0, io + UART_MCR);
+ outb(0, io + UART_IER);
+ /* init UART */
+ /* set DLAB, speed = 115200 */
+ outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
+ outb(1, io + UART_DLL); outb(0, io + UART_DLM);
+ /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
+ outb(UART_LCR_WLEN7, io + UART_LCR);
+ /* FIFO operation */
+ outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
+ /* interrupts */
+ /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
+ outb(UART_IER_RDI, io + UART_IER);
+ /* turn on UART */
+ outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
+ spin_unlock_irqrestore(&hardware_lock, flags);
+ return 0;
+}
+
+static void drop_hardware(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hardware_lock, flags);
+
+ /* turn off interrupts */
+ outb(0, io + UART_IER);
+
+ spin_unlock_irqrestore(&hardware_lock, flags);
+}
+
+/* SECTION: Initialisation */
+
+static int init_port(void)
+{
+ int retval;
+
+ setup_timer(&timerlist, sir_timeout, 0);
+
+ /* get I/O port access and IRQ line */
+ if (!request_region(io, 8, KBUILD_MODNAME)) {
+ pr_err("i/o port 0x%.4x already in use.\n", io);
+ return -EBUSY;
+ }
+ retval = request_irq(irq, sir_interrupt, 0,
+ KBUILD_MODNAME, NULL);
+ if (retval < 0) {
+ release_region(io, 8);
+ pr_err("IRQ %d already in use.\n", irq);
+ return retval;
+ }
+ pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
+
+ return 0;
+}
+
+static void drop_port(void)
+{
+ free_irq(irq, NULL);
+ del_timer_sync(&timerlist);
+ release_region(io, 8);
+}
+
+static int init_sir_ir(void)
+{
+ int retval;
+
+ retval = init_port();
+ if (retval < 0)
+ return retval;
+ init_hardware();
+ return 0;
+}
+
+static int sir_ir_probe(struct platform_device *dev)
+{
+ int retval;
+
+ retval = init_chrdev();
+ if (retval < 0)
+ return retval;
+
+ return init_sir_ir();
+}
+
+static int sir_ir_remove(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver sir_ir_driver = {
+ .probe = sir_ir_probe,
+ .remove = sir_ir_remove,
+ .driver = {
+ .name = "sir_ir",
+ },
+};
+
+static int __init sir_ir_init(void)
+{
+ int retval;
+
+ retval = platform_driver_register(&sir_ir_driver);
+ if (retval)
+ return retval;
+
+ sir_ir_dev = platform_device_alloc("sir_ir", 0);
+ if (!sir_ir_dev) {
+ retval = -ENOMEM;
+ goto pdev_alloc_fail;
+ }
+
+ retval = platform_device_add(sir_ir_dev);
+ if (retval)
+ goto pdev_add_fail;
+
+ return 0;
+
+pdev_add_fail:
+ platform_device_put(sir_ir_dev);
+pdev_alloc_fail:
+ platform_driver_unregister(&sir_ir_driver);
+ return retval;
+}
+
+static void __exit sir_ir_exit(void)
+{
+ drop_hardware();
+ drop_port();
+ platform_device_unregister(sir_ir_dev);
+ platform_driver_unregister(&sir_ir_driver);
+}
+
+module_init(sir_ir_init);
+module_exit(sir_ir_exit);
+
+MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
+MODULE_AUTHOR("Milan Pikula");
+MODULE_LICENSE("GPL");
+
+module_param(io, int, 0444);
+MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
+
+module_param(irq, int, 0444);
+MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
+
+module_param(threshold, int, 0444);
+MODULE_PARM_DESC(threshold, "space detection threshold (3)");
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index f0d7190e3919..a08e1dd06124 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -165,8 +165,7 @@ static void st_rc_hardware_init(struct st_rc_device *dev)
unsigned int rx_sampling_freq_div;
/* Enable the IP */
- if (dev->rstc)
- reset_control_deassert(dev->rstc);
+ reset_control_deassert(dev->rstc);
clk_prepare_enable(dev->sys_clock);
baseclock = clk_get_rate(dev->sys_clock);
@@ -281,10 +280,11 @@ static int st_rc_probe(struct platform_device *pdev)
else
rc_dev->rx_base = rc_dev->base;
-
rc_dev->rstc = reset_control_get_optional(dev, NULL);
- if (IS_ERR(rc_dev->rstc))
- rc_dev->rstc = NULL;
+ if (IS_ERR(rc_dev->rstc)) {
+ ret = PTR_ERR(rc_dev->rstc);
+ goto err;
+ }
rc_dev->dev = dev;
platform_set_drvdata(pdev, rc_dev);
@@ -298,7 +298,7 @@ static int st_rc_probe(struct platform_device *pdev)
rdev->open = st_rc_open;
rdev->close = st_rc_close;
rdev->driver_name = IR_ST_NAME;
- rdev->map_name = RC_MAP_LIRC;
+ rdev->map_name = RC_MAP_EMPTY;
rdev->input_name = "ST Remote Control Receiver";
ret = rc_register_device(rdev);
@@ -352,8 +352,7 @@ static int st_rc_suspend(struct device *dev)
writel(0x00, rc_dev->rx_base + IRB_RX_EN);
writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN);
clk_disable_unprepare(rc_dev->sys_clock);
- if (rc_dev->rstc)
- reset_control_assert(rc_dev->rstc);
+ reset_control_assert(rc_dev->rstc);
}
return 0;
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 25b006167810..4b785dd775c1 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -174,16 +174,11 @@ static int sunxi_ir_probe(struct platform_device *pdev)
/* Reset (optional) */
ir->rst = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(ir->rst)) {
- ret = PTR_ERR(ir->rst);
- if (ret == -EPROBE_DEFER)
- return ret;
- ir->rst = NULL;
- } else {
- ret = reset_control_deassert(ir->rst);
- if (ret)
- return ret;
- }
+ if (IS_ERR(ir->rst))
+ return PTR_ERR(ir->rst);
+ ret = reset_control_deassert(ir->rst);
+ if (ret)
+ return ret;
ret = clk_set_rate(ir->clk, SUNXI_IR_BASE_CLK);
if (ret) {
@@ -291,8 +286,7 @@ exit_clkdisable_clk:
exit_clkdisable_apb_clk:
clk_disable_unprepare(ir->apb_clk);
exit_reset_assert:
- if (ir->rst)
- reset_control_assert(ir->rst);
+ reset_control_assert(ir->rst);
return ret;
}
@@ -304,8 +298,7 @@ static int sunxi_ir_remove(struct platform_device *pdev)
clk_disable_unprepare(ir->clk);
clk_disable_unprepare(ir->apb_clk);
- if (ir->rst)
- reset_control_assert(ir->rst);
+ reset_control_assert(ir->rst);
spin_lock_irqsave(&ir->ir_lock, flags);
/* disable IR IRQ */
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index dc1c8305ad23..5a4d4a611197 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1082,7 +1082,9 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
- data->dev->timeout = MS_TO_NS(100);
+ data->dev->min_timeout = 1;
+ data->dev->timeout = IR_DEFAULT_TIMEOUT;
+ data->dev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
data->dev->rx_resolution = US_TO_NS(2);
data->dev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
data->dev->allowed_wakeup_protocols = RC_BIT_NEC | RC_BIT_NECX |
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 57b250847cd3..e35b1faf0ddc 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -106,6 +106,9 @@ static int si2157_init(struct dvb_frontend *fe)
if (dev->chiptype == SI2157_CHIPTYPE_SI2146) {
memcpy(cmd.args, "\xc0\x05\x01\x00\x00\x0b\x00\x00\x01", 9);
cmd.wlen = 9;
+ } else if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+ memcpy(cmd.args, "\xc0\x00\x0d\x0e\x00\x01\x01\x01\x01\x03", 10);
+ cmd.wlen = 10;
} else {
memcpy(cmd.args, "\xc0\x00\x0c\x00\x00\x01\x01\x01\x01\x01\x01\x02\x00\x00\x01", 15);
cmd.wlen = 15;
@@ -115,6 +118,15 @@ static int si2157_init(struct dvb_frontend *fe)
if (ret)
goto err;
+ /* Si2141 needs a second command before it answers the revision query */
+ if (dev->chiptype == SI2157_CHIPTYPE_SI2141) {
+ memcpy(cmd.args, "\xc0\x08\x01\x02\x00\x00\x01", 7);
+ cmd.wlen = 7;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+ }
+
/* query chip revision */
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
@@ -131,12 +143,16 @@ static int si2157_init(struct dvb_frontend *fe)
#define SI2157_A30 ('A' << 24 | 57 << 16 | '3' << 8 | '0' << 0)
#define SI2147_A30 ('A' << 24 | 47 << 16 | '3' << 8 | '0' << 0)
#define SI2146_A10 ('A' << 24 | 46 << 16 | '1' << 8 | '0' << 0)
+ #define SI2141_A10 ('A' << 24 | 41 << 16 | '1' << 8 | '0' << 0)
switch (chip_id) {
case SI2158_A20:
case SI2148_A20:
fw_name = SI2158_A20_FIRMWARE;
break;
+ case SI2141_A10:
+ fw_name = SI2141_A10_FIRMWARE;
+ break;
case SI2157_A30:
case SI2147_A30:
case SI2146_A10:
@@ -371,7 +387,7 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
static const struct dvb_tuner_ops si2157_ops = {
.info = {
- .name = "Silicon Labs Si2146/2147/2148/2157/2158",
+ .name = "Silicon Labs Si2141/Si2146/2147/2148/2157/2158",
.frequency_min = 42000000,
.frequency_max = 870000000,
},
@@ -471,6 +487,7 @@ static int si2157_probe(struct i2c_client *client,
#endif
dev_info(&client->dev, "Silicon Labs %s successfully attached\n",
+ dev->chiptype == SI2157_CHIPTYPE_SI2141 ? "Si2141" :
dev->chiptype == SI2157_CHIPTYPE_SI2146 ?
"Si2146" : "Si2147/2148/2157/2158");
@@ -508,6 +525,7 @@ static int si2157_remove(struct i2c_client *client)
static const struct i2c_device_id si2157_id_table[] = {
{"si2157", SI2157_CHIPTYPE_SI2157},
{"si2146", SI2157_CHIPTYPE_SI2146},
+ {"si2141", SI2157_CHIPTYPE_SI2141},
{}
};
MODULE_DEVICE_TABLE(i2c, si2157_id_table);
@@ -524,7 +542,8 @@ static struct i2c_driver si2157_driver = {
module_i2c_driver(si2157_driver);
-MODULE_DESCRIPTION("Silicon Labs Si2146/2147/2148/2157/2158 silicon tuner driver");
+MODULE_DESCRIPTION("Silicon Labs Si2141/Si2146/2147/2148/2157/2158 silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(SI2158_A20_FIRMWARE);
+MODULE_FIRMWARE(SI2141_A10_FIRMWARE);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index d6b2c7b44053..e6436f74abaa 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -42,6 +42,7 @@ struct si2157_dev {
#define SI2157_CHIPTYPE_SI2157 0
#define SI2157_CHIPTYPE_SI2146 1
+#define SI2157_CHIPTYPE_SI2141 2
/* firmware command struct */
#define SI2157_ARGLEN 30
@@ -52,5 +53,6 @@ struct si2157_cmd {
};
#define SI2158_A20_FIRMWARE "dvb-tuner-si2158-a20-01.fw"
+#define SI2141_A10_FIRMWARE "dvb-tuner-si2141-a10-01.fw"
#endif
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 91947cf1950e..e823aafce276 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1184,8 +1184,7 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force)
/* Start the tuner self-calibration process */
ret = xc_initialize(priv);
if (ret) {
- printk(KERN_ERR
- "xc5000: Can't request Self-callibration.");
+ printk(KERN_ERR "xc5000: Can't request self-calibration.");
continue;
}
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index c9644b62f91a..b24e753c4766 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -63,6 +63,7 @@ endif
if MEDIA_CEC_SUPPORT
comment "USB HDMI CEC adapters"
source "drivers/media/usb/pulse8-cec/Kconfig"
+source "drivers/media/usb/rainshadow-cec/Kconfig"
endif
endif #MEDIA_USB_SUPPORT
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 0f15e3351ddc..738b993ec8b0 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_VIDEO_USBTV) += usbtv/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_DVB_AS102) += as102/
obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
+obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow-cec/
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index 313f659f0bfb..43bfa778b070 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -153,7 +153,7 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
- tveeprom_hauppauge_analog(&dev->i2c_client, &tv, eeprom_data);
+ tveeprom_hauppauge_analog(&tv, eeprom_data);
dev->board.tuner_type = tv.tuner_type;
/* Make sure we support the board model */
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 16f9125a985a..2a255bd32bb3 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -809,16 +809,9 @@ static void au0828_analog_stream_reset(struct au0828_dev *dev)
*/
static int au0828_stream_interrupt(struct au0828_dev *dev)
{
- int ret = 0;
-
dev->stream_state = STREAM_INTERRUPT;
if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
- else if (ret) {
- set_bit(DEV_MISCONFIGURED, &dev->dev_state);
- dprintk(1, "%s device is misconfigured!\n", __func__);
- return ret;
- }
return 0;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index cf80842dfa08..a050d125934c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -670,10 +670,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
spin_lock_init(&adev->slock);
err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto err_free_card;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx231xx_pcm_capture);
@@ -687,10 +685,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
INIT_WORK(&dev->wq_trigger, audio_trigger);
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto err_free_card;
+
adev->sndcard = card;
adev->udev = dev->udev;
@@ -700,6 +697,11 @@ static int cx231xx_audio_init(struct cx231xx *dev)
hs_config_info[0].interface_info.
audio_index + 1];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+ err = -ENODEV;
+ goto err_free_card;
+ }
+
adev->end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -709,13 +711,20 @@ static int cx231xx_audio_init(struct cx231xx *dev)
"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
adev->end_point_addr, adev->num_alt);
adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
-
- if (adev->alt_max_pkt_size == NULL)
- return -ENOMEM;
+ if (!adev->alt_max_pkt_size) {
+ err = -ENOMEM;
+ goto err_free_card;
+ }
for (i = 0; i < adev->num_alt; i++) {
- u16 tmp =
- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+ err = -ENODEV;
+ goto err_free_pkt_size;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
wMaxPacketSize);
adev->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -725,6 +734,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
}
return 0;
+
+err_free_pkt_size:
+ kfree(adev->alt_max_pkt_size);
+err_free_card:
+ snd_card_free(card);
+
+ return err;
}
static int cx231xx_audio_fini(struct cx231xx *dev)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index f730fdbc9156..a1007d005290 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1165,8 +1165,7 @@ void cx231xx_card_setup(struct cx231xx *dev)
e->client.addr = 0xa0 >> 1;
read_eeprom(dev, &e->client, e->eeprom, sizeof(e->eeprom));
- tveeprom_hauppauge_analog(&e->client,
- &e->tvee, e->eeprom + 0xc0);
+ tveeprom_hauppauge_analog(&e->tvee, e->eeprom + 0xc0);
kfree(e);
break;
}
@@ -1426,6 +1425,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
dev->video_mode.num_alt = uif->num_altsetting;
@@ -1439,7 +1441,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->video_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
dev_dbg(dev->dev,
"Alternate setting %i, max size= %i\n", i,
@@ -1456,6 +1463,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->vbi_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -1472,8 +1482,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
- u16 tmp =
- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->vbi_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1493,6 +1507,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->sliced_cc_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -1507,7 +1524,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
return -ENOMEM;
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->sliced_cc_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1676,6 +1698,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+ retval = -ENODEV;
+ goto err_video_alt;
+ }
+
dev->ts1_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].
desc.bEndpointAddress;
@@ -1693,7 +1720,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
}
for (i = 0; i < dev->ts1_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+ retval = -ENODEV;
+ goto err_video_alt;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].
endpoint[isoc_pipe].desc.
wMaxPacketSize);
dev->ts1_mode.alt_max_pkt_size[i] =
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index dff514e147da..8d95b1154e12 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -491,20 +491,24 @@ void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port)
{
unsigned char buf;
int i, rc;
- struct i2c_client client;
+ struct i2c_adapter *adap;
+ struct i2c_msg msg = {
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = &buf,
+ };
if (!i2c_scan)
return;
/* Don't generate I2C errors during scan */
dev->i2c_scan_running = true;
-
- memset(&client, 0, sizeof(client));
- client.adapter = cx231xx_get_i2c_adap(dev, i2c_port);
+ adap = cx231xx_get_i2c_adap(dev, i2c_port);
for (i = 0; i < 128; i++) {
- client.addr = i;
- rc = i2c_master_recv(&client, &buf, 0);
+ msg.addr = i;
+ rc = i2c_transfer(adap, &msg, 1);
+
if (rc < 0)
continue;
dev_info(dev->dev,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 80c635980526..abf69d8fa469 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -919,7 +919,12 @@ static int mxl111sf_init(struct dvb_usb_device *d)
struct mxl111sf_state *state = d_to_priv(d);
int ret;
static u8 eeprom[256];
- struct i2c_client c;
+ u8 reg = 0;
+ struct i2c_msg msg[2] = {
+ { .addr = 0xa0 >> 1, .len = 1, .buf = &reg },
+ { .addr = 0xa0 >> 1, .flags = I2C_M_RD,
+ .len = sizeof(eeprom), .buf = eeprom },
+ };
ret = get_chip_info(state);
if (mxl_fail(ret))
@@ -930,14 +935,11 @@ static int mxl111sf_init(struct dvb_usb_device *d)
if (state->chip_rev > MXL111SF_V6)
mxl111sf_config_pin_mux_modes(state, PIN_MUX_TS_SPI_IN_MODE_1);
- c.adapter = &d->i2c_adap;
- c.addr = 0xa0 >> 1;
-
- ret = tveeprom_read(&c, eeprom, sizeof(eeprom));
+ ret = i2c_transfer(&d->i2c_adap, msg, 2);
if (mxl_fail(ret))
return 0;
- tveeprom_hauppauge_analog(&c, &state->tv, (0x84 == eeprom[0xa0]) ?
- eeprom + 0xa0 : eeprom + 0x80);
+ tveeprom_hauppauge_analog(&state->tv, (0x84 == eeprom[0xa0]) ?
+ eeprom + 0xa0 : eeprom + 0x80);
#if 0
switch (state->tv.model) {
case 117001:
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 51620e02292f..99a3f3625944 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -458,8 +458,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
if (ircode[2] || ircode[3])
- rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN,
- RC_SCANCODE_RC5(ircode[2], ircode[3]), 0);
+ rc_keydown(d->rc_dev, RC_TYPE_NEC,
+ RC_SCANCODE_NEC(~ircode[2] & 0xff, ircode[3]), 0);
return 0;
}
@@ -473,8 +473,8 @@ static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d)
return 0;
if (ircode[1] || ircode[2])
- rc_keydown(d->rc_dev, RC_TYPE_UNKNOWN,
- RC_SCANCODE_RC5(ircode[1], ircode[2]), 0);
+ rc_keydown(d->rc_dev, RC_TYPE_NEC,
+ RC_SCANCODE_NEC(~ircode[1] & 0xff, ircode[2]), 0);
return 0;
}
@@ -1239,6 +1239,82 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int cxusb_mygica_t230c_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dvb_usb_device *d = adap->dev;
+ struct cxusb_state *st = d->priv;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client_demod;
+ struct i2c_client *client_tuner;
+ struct i2c_board_info info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+
+ /* Select required USB configuration */
+ if (usb_set_interface(d->udev, 0, 0) < 0)
+ err("set interface failed");
+
+ /* Unblock all USB pipes */
+ usb_clear_halt(d->udev,
+ usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_clear_halt(d->udev,
+ usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint));
+ usb_clear_halt(d->udev,
+ usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint));
+
+ /* attach frontend */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &adap->fe_adap[0].fe;
+ si2168_config.ts_mode = SI2168_TS_PARALLEL;
+ si2168_config.ts_clock_inv = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client_demod = i2c_new_device(&d->i2c_adap, &info);
+ if (client_demod == NULL || client_demod->dev.driver == NULL)
+ return -ENODEV;
+
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ return -ENODEV;
+ }
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = adap->fe_adap[0].fe;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2141", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("si2157");
+ client_tuner = i2c_new_device(adapter, &info);
+ if (client_tuner == NULL || client_tuner->dev.driver == NULL) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ return -ENODEV;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ return -ENODEV;
+ }
+
+ st->i2c_client_demod = client_demod;
+ st->i2c_client_tuner = client_tuner;
+
+ /* hook fe: need to resync the slave fifo when signal locks. */
+ mutex_init(&st->stream_mutex);
+ st->last_lock = 0;
+ st->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
+ adap->fe_adap[0].fe->ops.read_status = cxusb_read_status;
+
+ return 0;
+}
+
/*
* DViCO has shipped two devices with the same USB ID, but only one of them
* needs a firmware download. Check the device class details to see if they
@@ -1321,6 +1397,7 @@ static struct dvb_usb_device_properties cxusb_aver_a868r_properties;
static struct dvb_usb_device_properties cxusb_d680_dmb_properties;
static struct dvb_usb_device_properties cxusb_mygica_d689_properties;
static struct dvb_usb_device_properties cxusb_mygica_t230_properties;
+static struct dvb_usb_device_properties cxusb_mygica_t230c_properties;
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -1353,6 +1430,8 @@ static int cxusb_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &cxusb_mygica_t230_properties,
THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, &cxusb_mygica_t230c_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
0)
return 0;
@@ -1404,6 +1483,7 @@ enum cxusb_table_index {
CONEXANT_D680_DMB,
MYGICA_D689,
MYGICA_T230,
+ MYGICA_T230C,
NR__cxusb_table_index
};
@@ -1471,6 +1551,9 @@ static struct usb_device_id cxusb_table[NR__cxusb_table_index + 1] = {
[MYGICA_T230] = {
USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230)
},
+ [MYGICA_T230C] = {
+ USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230+1)
+ },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, cxusb_table);
@@ -1563,7 +1646,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = {
.rc_codes = RC_MAP_DVICO_PORTABLE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1620,7 +1703,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = {
.rc_codes = RC_MAP_DVICO_MCE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1685,7 +1768,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = {
.rc_codes = RC_MAP_DVICO_PORTABLE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1741,7 +1824,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = {
.rc_codes = RC_MAP_DVICO_PORTABLE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -1796,7 +1879,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = {
.rc_codes = RC_MAP_DVICO_MCE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_bluebird2_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.num_device_descs = 1,
@@ -1850,7 +1933,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = {
.rc_codes = RC_MAP_DVICO_PORTABLE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_bluebird2_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.num_device_descs = 1,
@@ -1906,7 +1989,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_prope
.rc_codes = RC_MAP_DVICO_PORTABLE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.num_device_descs = 1,
@@ -2005,7 +2088,7 @@ struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = {
.rc_codes = RC_MAP_DVICO_MCE,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_rc_query,
- .allowed_protos = RC_BIT_UNKNOWN,
+ .allowed_protos = RC_BIT_NEC,
},
.num_device_descs = 1,
@@ -2165,7 +2248,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
.rc.core = {
.rc_interval = 100,
- .rc_codes = RC_MAP_D680_DMB,
+ .rc_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02,
.module_name = KBUILD_MODNAME,
.rc_query = cxusb_d680_dmb_rc_query,
.allowed_protos = RC_BIT_UNKNOWN,
@@ -2181,6 +2264,60 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
}
};
+static struct dvb_usb_device_properties cxusb_mygica_t230c_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = CYPRESS_FX2,
+
+ .size_of_priv = sizeof(struct cxusb_state),
+
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .num_frontends = 1,
+ .fe = {{
+ .streaming_ctrl = cxusb_streaming_ctrl,
+ .frontend_attach = cxusb_mygica_t230c_frontend_attach,
+
+ /* parameter for the MPEG2-data transfer */
+ .stream = {
+ .type = USB_BULK,
+ .count = 5,
+ .endpoint = 0x02,
+ .u = {
+ .bulk = {
+ .buffersize = 8192,
+ }
+ }
+ },
+ } },
+ },
+ },
+
+ .power_ctrl = cxusb_d680_dmb_power_ctrl,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+
+ .rc.core = {
+ .rc_interval = 100,
+ .rc_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = cxusb_d680_dmb_rc_query,
+ .allowed_protos = RC_BIT_UNKNOWN,
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ {
+ "Mygica T230C DVB-T/T2/C",
+ { NULL },
+ { &cxusb_table[MYGICA_T230C], NULL },
+ },
+ }
+};
+
static struct usb_driver cxusb_driver = {
.name = "dvb_usb_cxusb",
.probe = cxusb_probe,
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index dd5edd3a17ee..08acdd32e412 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -809,6 +809,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
+ if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+ return -ENODEV;
+
purb = usb_alloc_urb(0, GFP_KERNEL);
if (purb == NULL)
return -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index c989cac9343d..0c2bc97436d5 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -11,6 +11,8 @@
#include "dibusb.h"
+MODULE_LICENSE("GPL");
+
/* 3000MC/P stuff */
// Config Adjacent channels Perf -cal22
static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 4284f6984dc1..475a3c0cdee7 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -33,6 +33,9 @@ static int digitv_ctrl_msg(struct dvb_usb_device *d,
wo = (rbuf == NULL || rlen == 0); /* write-only */
+ if (wlen > 4 || rlen > 4)
+ return -EIO;
+
memset(st->sndbuf, 0, 7);
memset(st->rcvbuf, 0, 7);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 4f42d57f81d9..6e654e5026dd 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -204,6 +204,20 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
switch (num) {
case 2:
+ if (msg[0].len != 1) {
+ warn("i2c rd: len=%d is not 1!\n",
+ msg[0].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
+ if (2 + msg[1].len > sizeof(buf6)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
/* read si2109 register by number */
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
@@ -219,6 +233,13 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
case 1:
switch (msg[0].addr) {
case 0x68:
+ if (2 + msg[0].len > sizeof(buf6)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[0].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
/* write to si2109 register */
buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
@@ -262,6 +283,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
/* first write first register number */
u8 ibuf[MAX_XFER_SIZE], obuf[3];
+ if (2 + msg[0].len != sizeof(obuf)) {
+ warn("i2c rd: len=%d is not 1!\n",
+ msg[0].len);
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
+
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
@@ -462,6 +490,12 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
/* first write first register number */
u8 ibuf[MAX_XFER_SIZE], obuf[3];
+ if (2 + msg[0].len != sizeof(obuf)) {
+ warn("i2c rd: len=%d is not 1!\n",
+ msg[0].len);
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
if (2 + msg[1].len > sizeof(ibuf)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
@@ -696,6 +730,13 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
msg[0].buf[0] = state->data[1];
break;
default:
+ if (3 + msg[0].len > sizeof(state->data)) {
+ warn("i2c wr: len=%d is too big!\n",
+ msg[0].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
/* always i2c write*/
state->data[0] = 0x08;
state->data[1] = msg[0].addr;
@@ -711,6 +752,19 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
case 2:
/* always i2c read */
+ if (4 + msg[0].len > sizeof(state->data)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[0].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+ if (1 + msg[1].len > sizeof(state->data)) {
+ warn("i2c rd: len=%d is too big!\n",
+ msg[1].len);
+ num = -EOPNOTSUPP;
+ break;
+ }
+
state->data[0] = 0x09;
state->data[1] = msg[0].len;
state->data[2] = msg[1].len;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index ecc207fbaf3c..9e0d6a4166d2 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
u8 *s, *r = NULL;
int ret = 0;
+ if (4 + rlen > 64)
+ return -EIO;
+
s = kzalloc(wlen+4, GFP_KERNEL);
if (!s)
return -ENOMEM;
@@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
read = msg[i].flags & I2C_M_RD;
+ if (3 + msg[i].len > sizeof(obuf)) {
+ err("i2c wr len=%d too high", msg[i].len);
+ break;
+ }
+ if (write_read) {
+ if (3 + msg[i+1].len > sizeof(ibuf)) {
+ err("i2c rd len=%d too high", msg[i+1].len);
+ break;
+ }
+ } else if (read) {
+ if (3 + msg[i].len > sizeof(ibuf)) {
+ err("i2c rd len=%d too high", msg[i].len);
+ break;
+ }
+ }
+
obuf[0] = (msg[i].addr << 1) | (write_read | read);
if (read)
obuf[1] = 0;
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index aa131cf9989b..4cc029f18aa8 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -12,7 +12,7 @@ config VIDEO_EM28XX_V4L2
select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
-
+ select VIDEO_OV2640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
---help---
This is a video4linux driver for Empia 28xx based TV cards.
@@ -39,6 +39,7 @@ config VIDEO_EM28XX_DVB
depends on VIDEO_EM28XX && DVB_CORE
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_LGDT3305 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LGDT3306A if MEDIA_SUBDRV_AUTOSELECT
select DVB_ZL10353 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10023 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S921 if MEDIA_SUBDRV_AUTOSELECT
@@ -61,6 +62,10 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_QM1D1C0042 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 89c890ba7dd6..ae87dd3e671f 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -23,9 +23,7 @@
#include <linux/i2c.h>
#include <linux/usb.h>
-#include <media/soc_camera.h>
#include <media/i2c/mt9v011.h>
-#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
/* Possible i2c addresses of Micron sensors */
@@ -43,13 +41,6 @@ static unsigned short omnivision_sensor_addrs[] = {
I2C_CLIENT_END
};
-static struct soc_camera_link camlink = {
- .bus_id = 0,
- .flags = 0,
- .module_name = "em28xx",
- .unbalanced_power = true,
-};
-
/* FIXME: Should be replaced by a proper mt9m111 driver */
static int em28xx_initialize_mt9m111(struct em28xx *dev)
{
@@ -106,55 +97,35 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
{
int ret, i;
char *name;
- u8 reg;
- __be16 id_be;
u16 id;
- struct i2c_client client = dev->i2c_client[dev->def_i2c_bus];
+ struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus];
dev->em28xx_sensor = EM28XX_NOSENSOR;
for (i = 0; micron_sensor_addrs[i] != I2C_CLIENT_END; i++) {
- client.addr = micron_sensor_addrs[i];
- /* NOTE: i2c_smbus_read_word_data() doesn't work with BE data */
+ client->addr = micron_sensor_addrs[i];
/* Read chip ID from register 0x00 */
- reg = 0x00;
- ret = i2c_master_send(&client, &reg, 1);
+ ret = i2c_smbus_read_word_data(client, 0x00); /* assumes LE */
if (ret < 0) {
if (ret != -ENXIO)
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
- continue;
- }
- ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
- if (ret < 0) {
- dev_err(&dev->intf->dev,
- "couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
- id = be16_to_cpu(id_be);
+ id = swab16(ret); /* LE -> BE */
/* Read chip ID from register 0xff */
- reg = 0xff;
- ret = i2c_master_send(&client, &reg, 1);
+ ret = i2c_smbus_read_word_data(client, 0xff);
if (ret < 0) {
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
- continue;
- }
- ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
- if (ret < 0) {
- dev_err(&dev->intf->dev,
- "couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
/* Validate chip ID to be sure we have a Micron device */
- if (id != be16_to_cpu(id_be))
+ if (id != swab16(ret))
continue;
/* Check chip ID */
- id = be16_to_cpu(id_be);
switch (id) {
case 0x1222:
name = "MT9V012"; /* MI370 */ /* 640x480 */
@@ -197,7 +168,6 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
dev_info(&dev->intf->dev,
"sensor %s detected\n", name);
- dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
}
@@ -213,30 +183,30 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
char *name;
u8 reg;
u16 id;
- struct i2c_client client = dev->i2c_client[dev->def_i2c_bus];
+ struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus];
dev->em28xx_sensor = EM28XX_NOSENSOR;
/* NOTE: these devices have the register auto incrementation disabled
* by default, so we have to use single byte reads ! */
for (i = 0; omnivision_sensor_addrs[i] != I2C_CLIENT_END; i++) {
- client.addr = omnivision_sensor_addrs[i];
+ client->addr = omnivision_sensor_addrs[i];
/* Read manufacturer ID from registers 0x1c-0x1d (BE) */
reg = 0x1c;
- ret = i2c_smbus_read_byte_data(&client, reg);
+ ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0) {
if (ret != -ENXIO)
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x1d;
- ret = i2c_smbus_read_byte_data(&client, reg);
+ ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0) {
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
id += ret;
@@ -245,20 +215,20 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
continue;
/* Read product ID from registers 0x0a-0x0b (BE) */
reg = 0x0a;
- ret = i2c_smbus_read_byte_data(&client, reg);
+ ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0) {
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
id = ret << 8;
reg = 0x0b;
- ret = i2c_smbus_read_byte_data(&client, reg);
+ ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0) {
dev_err(&dev->intf->dev,
"couldn't read from i2c device 0x%02x: error %i\n",
- client.addr << 1, ret);
+ client->addr << 1, ret);
continue;
}
id += ret;
@@ -309,7 +279,6 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
dev_info(&dev->intf->dev,
"sensor %s detected\n", name);
- dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
return 0;
}
@@ -341,17 +310,9 @@ int em28xx_detect_sensor(struct em28xx *dev)
int em28xx_init_camera(struct em28xx *dev)
{
- char clk_name[V4L2_CLK_NAME_SIZE];
struct i2c_client *client = &dev->i2c_client[dev->def_i2c_bus];
struct i2c_adapter *adap = &dev->i2c_adap[dev->def_i2c_bus];
struct em28xx_v4l2 *v4l2 = dev->v4l2;
- int ret = 0;
-
- v4l2_clk_name_i2c(clk_name, sizeof(clk_name),
- i2c_adapter_id(adap), client->addr);
- v4l2->clk = v4l2_clk_register_fixed(clk_name, -EINVAL);
- if (IS_ERR(v4l2->clk))
- return PTR_ERR(v4l2->clk);
switch (dev->em28xx_sensor) {
case EM28XX_MT9V011:
@@ -381,12 +342,9 @@ int em28xx_init_camera(struct em28xx *dev)
pdata.xtal = v4l2->sensor_xtal;
if (NULL ==
v4l2_i2c_new_subdev_board(&v4l2->v4l2_dev, adap,
- &mt9v011_info, NULL)) {
- ret = -ENODEV;
- break;
- }
- /* probably means GRGB 16 bit bayer */
- v4l2->vinmode = 0x0d;
+ &mt9v011_info, NULL))
+ return -ENODEV;
+ v4l2->vinmode = EM28XX_VINMODE_RGB8_GRBG;
v4l2->vinctl = 0x00;
break;
@@ -397,8 +355,7 @@ int em28xx_init_camera(struct em28xx *dev)
em28xx_initialize_mt9m001(dev);
- /* probably means BGGR 16 bit bayer */
- v4l2->vinmode = 0x0c;
+ v4l2->vinmode = EM28XX_VINMODE_RGB8_BGGR;
v4l2->vinctl = 0x00;
break;
@@ -410,7 +367,7 @@ int em28xx_init_camera(struct em28xx *dev)
em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk);
em28xx_initialize_mt9m111(dev);
- v4l2->vinmode = 0x0a;
+ v4l2->vinmode = EM28XX_VINMODE_YUV422_UYVY;
v4l2->vinctl = 0x00;
break;
@@ -421,7 +378,6 @@ int em28xx_init_camera(struct em28xx *dev)
.type = "ov2640",
.flags = I2C_CLIENT_SCCB,
.addr = client->addr,
- .platform_data = &camlink,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -441,10 +397,8 @@ int em28xx_init_camera(struct em28xx *dev)
subdev =
v4l2_i2c_new_subdev_board(&v4l2->v4l2_dev, adap,
&ov2640_info, NULL);
- if (NULL == subdev) {
- ret = -ENODEV;
- break;
- }
+ if (subdev == NULL)
+ return -ENODEV;
format.format.code = MEDIA_BUS_FMT_YUYV8_2X8;
format.format.width = 640;
@@ -454,21 +408,16 @@ int em28xx_init_camera(struct em28xx *dev)
/* NOTE: for UXGA=1600x1200 switch to 12MHz */
dev->board.xclk = EM28XX_XCLK_FREQUENCY_24MHZ;
em28xx_write_reg(dev, EM28XX_R0F_XCLK, dev->board.xclk);
- v4l2->vinmode = 0x08;
+ v4l2->vinmode = EM28XX_VINMODE_YUV422_YUYV;
v4l2->vinctl = 0x00;
break;
}
case EM28XX_NOSENSOR:
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- if (ret < 0) {
- v4l2_clk_unregister_fixed(v4l2->clk);
- v4l2->clk = NULL;
- }
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(em28xx_init_camera);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5f90d0899a45..a12b599a1fa2 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2600,6 +2600,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD },
{ USB_DEVICE(0x3275, 0x0085),
.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
+ { USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */
+ .driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2917,7 +2919,9 @@ static void em28xx_card_setup(struct em28xx *dev)
* If sensor is not found, then it isn't a webcam.
*/
if (dev->board.is_webcam) {
- if (em28xx_detect_sensor(dev) < 0)
+ em28xx_detect_sensor(dev);
+ if (dev->em28xx_sensor == EM28XX_NOSENSOR)
+ /* NOTE: error/unknown sensor/no sensor */
dev->board.is_webcam = 0;
}
@@ -2974,8 +2978,7 @@ static void em28xx_card_setup(struct em28xx *dev)
#endif
/* Call first TVeeprom */
- dev->i2c_client[dev->def_i2c_bus].addr = 0xa0 >> 1;
- tveeprom_hauppauge_analog(&dev->i2c_client[dev->def_i2c_bus], &tv, dev->eedata);
+ tveeprom_hauppauge_analog(&tv, dev->eedata);
dev->tuner_type = tv.tuner_type;
@@ -3666,9 +3669,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
try_bulk = usb_xfer_mode > 0;
}
- /* Disable V4L2 if the device doesn't have a decoder */
+ /* Disable V4L2 if the device doesn't have a decoder or image sensor */
if (has_video &&
- dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
+ dev->board.decoder == EM28XX_NODECODER &&
+ dev->em28xx_sensor == EM28XX_NOSENSOR) {
+
dev_err(&interface->dev,
"Currently, V4L2 is not supported on this model\n");
has_video = false;
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index afe7a66d7dc8..747525ca7ed5 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -93,6 +93,24 @@
#define EM28XX_XCLK_FREQUENCY_24MHZ 0x0b
#define EM28XX_R10_VINMODE 0x10
+ /* used by all non-camera devices: */
+#define EM28XX_VINMODE_YUV422_CbYCrY 0x10
+ /* used by camera devices: */
+#define EM28XX_VINMODE_YUV422_YUYV 0x08
+#define EM28XX_VINMODE_YUV422_YVYU 0x09
+#define EM28XX_VINMODE_YUV422_UYVY 0x0a
+#define EM28XX_VINMODE_YUV422_VYUY 0x0b
+#define EM28XX_VINMODE_RGB8_BGGR 0x0c
+#define EM28XX_VINMODE_RGB8_GRBG 0x0d
+#define EM28XX_VINMODE_RGB8_GBRG 0x0e
+#define EM28XX_VINMODE_RGB8_RGGB 0x0f
+ /*
+ * apparently:
+ * bit 0: swap component 1+2 with 3+4
+ * => e.g.: YUYV => YVYU, BGGR => GRBG
+ * bit 1: swap component 1 with 2 and 3 with 4
+ * => e.g.: YUYV => UYVY, BGGR => GBRG
+ */
#define EM28XX_R11_VINCTRL 0x11
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 8d93100334ea..8d253a5df0a9 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -43,7 +43,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-clk.h>
#include <media/drv-intf/msp3400.h>
#include <media/tuner.h>
@@ -117,6 +116,11 @@ static struct em28xx_fmt format[] = {
.depth = 16,
.reg = EM28XX_OUTFMT_RGB_16_656,
}, {
+ .name = "8 bpp Bayer RGRG..GBGB",
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .depth = 8,
+ .reg = EM28XX_OUTFMT_RGB_8_RGRG,
+ }, {
.name = "8 bpp Bayer BGBG..GRGR",
.fourcc = V4L2_PIX_FMT_SBGGR8,
.depth = 8,
@@ -2140,11 +2144,6 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
v4l2_ctrl_handler_free(&v4l2->ctrl_handler);
v4l2_device_unregister(&v4l2->v4l2_dev);
- if (v4l2->clk) {
- v4l2_clk_unregister_fixed(v4l2->clk);
- v4l2->clk = NULL;
- }
-
kref_put(&v4l2->ref, em28xx_free_v4l2);
mutex_unlock(&dev->lock);
@@ -2465,7 +2464,7 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/*
* Default format, used for tvp5150 or saa711x output formats
*/
- v4l2->vinmode = 0x10;
+ v4l2->vinmode = EM28XX_VINMODE_YUV422_CbYCrY;
v4l2->vinctl = EM28XX_VINCTRL_INTERLACED |
EM28XX_VINCTRL_CCIR656_ENABLE;
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index e9f379959fa5..e8d97d5ec161 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -510,7 +510,6 @@ struct em28xx_v4l2 {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_clk *clk;
struct video_device vdev;
struct video_device vbi_dev;
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 4eaba0c24629..ed5ec9773969 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -792,14 +792,13 @@ static int vidioc_subscribe_event(struct v4l2_fh *fh,
{
switch (sub->type) {
- case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
case V4L2_EVENT_MOTION_DET:
/* Allow for up to 30 events (1 second for NTSC) to be
* stored. */
return v4l2_event_subscribe(fh, sub, 30, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
}
- return -EINVAL;
}
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 71f273377f83..31b2117e8f1d 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -184,6 +184,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 6ffc407de62f..8937f3986a01 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,6 @@
config USB_PULSE8_CEC
tristate "Pulse Eight HDMI CEC"
- depends on USB_ACM && MEDIA_CEC_SUPPORT
+ depends on USB_ACM && CEC_CORE
select SERIO
select SERIO_SERPORT
---help---
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 7c18daeb0ade..1dfc2de1fe77 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -461,7 +461,7 @@ static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
- struct pulse8 *pulse8 = adap->priv;
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
u8 cmd[16];
int err;
@@ -474,7 +474,7 @@ static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
- struct pulse8 *pulse8 = adap->priv;
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
u16 mask = 0;
u16 pa = adap->phys_addr;
u8 cmd[16];
@@ -594,7 +594,7 @@ unlock:
static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct pulse8 *pulse8 = adap->priv;
+ struct pulse8 *pulse8 = cec_get_drvdata(adap);
u8 cmd[2];
unsigned int i;
int err;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index 4af2fb5c85d5..8b643d511a0b 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -118,15 +118,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw);
- if (!eeprom) return -EINVAL;
-
- {
- struct i2c_client fake_client;
- /* Newer version expects a useless client interface */
- fake_client.addr = hdw->eeprom_addr;
- fake_client.adapter = &hdw->i2c_adap;
- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
- }
+ if (!eeprom)
+ return -EINVAL;
+
+ tveeprom_hauppauge_analog(&tvdata, eeprom);
trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
new file mode 100644
index 000000000000..3eb86607efb8
--- /dev/null
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -0,0 +1,10 @@
+config USB_RAINSHADOW_CEC
+ tristate "RainShadow Tech HDMI CEC"
+ depends on USB_ACM && CEC_CORE
+ select SERIO
+ select SERIO_SERPORT
+ ---help---
+ This is a cec driver for the RainShadow Tech HDMI CEC device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rainshadow-cec.
diff --git a/drivers/media/usb/rainshadow-cec/Makefile b/drivers/media/usb/rainshadow-cec/Makefile
new file mode 100644
index 000000000000..a79fbc77e1f7
--- /dev/null
+++ b/drivers/media/usb/rainshadow-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_RAINSHADOW_CEC) += rainshadow-cec.o
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
new file mode 100644
index 000000000000..541ca543f71f
--- /dev/null
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -0,0 +1,388 @@
+/*
+ * RainShadow Tech HDMI CEC driver
+ *
+ * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version of 2 of the License, or (at your
+ * option) any later version. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+/*
+ * Notes:
+ *
+ * The higher level protocols are currently disabled. This can be added
+ * later, similar to how this is done for the Pulse Eight CEC driver.
+ *
+ * Documentation of the protocol is available here:
+ *
+ * http://rainshadowtech.com/doc/HDMICECtoUSBandRS232v2.0.pdf
+ */
+
+#include <linux/completion.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <linux/workqueue.h>
+
+#include <media/cec.h>
+
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver");
+MODULE_LICENSE("GPL");
+
+#define DATA_SIZE 256
+
+struct rain {
+ struct device *dev;
+ struct serio *serio;
+ struct cec_adapter *adap;
+ struct completion cmd_done;
+ struct work_struct work;
+
+ /* Low-level ringbuffer, collecting incoming characters */
+ char buf[DATA_SIZE];
+ unsigned int buf_rd_idx;
+ unsigned int buf_wr_idx;
+ unsigned int buf_len;
+ spinlock_t buf_lock;
+
+ /* command buffer */
+ char cmd[DATA_SIZE];
+ unsigned int cmd_idx;
+ bool cmd_started;
+
+ /* reply to a command, only used to store the firmware version */
+ char cmd_reply[DATA_SIZE];
+
+ struct mutex write_lock;
+};
+
+static void rain_process_msg(struct rain *rain)
+{
+ struct cec_msg msg = {};
+ const char *cmd = rain->cmd + 3;
+ int stat = -1;
+
+ for (; *cmd; cmd++) {
+ if (!isxdigit(*cmd))
+ continue;
+ if (isxdigit(cmd[0]) && isxdigit(cmd[1])) {
+ if (msg.len == CEC_MAX_MSG_SIZE)
+ break;
+ if (hex2bin(msg.msg + msg.len, cmd, 1))
+ continue;
+ msg.len++;
+ cmd++;
+ continue;
+ }
+ if (!cmd[1])
+ stat = hex_to_bin(cmd[0]);
+ break;
+ }
+
+ if (rain->cmd[0] == 'R') {
+ if (stat == 1 || stat == 2)
+ cec_received_msg(rain->adap, &msg);
+ return;
+ }
+
+ switch (stat) {
+ case 1:
+ cec_transmit_done(rain->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
+ break;
+ case 2:
+ cec_transmit_done(rain->adap, CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ break;
+ default:
+ cec_transmit_done(rain->adap, CEC_TX_STATUS_LOW_DRIVE,
+ 0, 0, 0, 1);
+ break;
+ }
+}
+
+static void rain_irq_work_handler(struct work_struct *work)
+{
+ struct rain *rain =
+ container_of(work, struct rain, work);
+
+ while (true) {
+ unsigned long flags;
+ bool exit_loop;
+ char data;
+
+ spin_lock_irqsave(&rain->buf_lock, flags);
+ exit_loop = rain->buf_len == 0;
+ if (rain->buf_len) {
+ data = rain->buf[rain->buf_rd_idx];
+ rain->buf_len--;
+ rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
+ }
+ spin_unlock_irqrestore(&rain->buf_lock, flags);
+
+ if (exit_loop)
+ break;
+
+ if (!rain->cmd_started && data != '?')
+ continue;
+
+ switch (data) {
+ case '\r':
+ rain->cmd[rain->cmd_idx] = '\0';
+ dev_dbg(rain->dev, "received: %s\n", rain->cmd);
+ if (!memcmp(rain->cmd, "REC", 3) ||
+ !memcmp(rain->cmd, "STA", 3)) {
+ rain_process_msg(rain);
+ } else {
+ strcpy(rain->cmd_reply, rain->cmd);
+ complete(&rain->cmd_done);
+ }
+ rain->cmd_idx = 0;
+ rain->cmd_started = false;
+ break;
+
+ case '\n':
+ rain->cmd_idx = 0;
+ rain->cmd_started = false;
+ break;
+
+ case '?':
+ rain->cmd_idx = 0;
+ rain->cmd_started = true;
+ break;
+
+ default:
+ if (rain->cmd_idx >= DATA_SIZE - 1) {
+ dev_dbg(rain->dev,
+ "throwing away %d bytes of garbage\n", rain->cmd_idx);
+ rain->cmd_idx = 0;
+ }
+ rain->cmd[rain->cmd_idx++] = data;
+ break;
+ }
+ }
+}
+
+static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct rain *rain = serio_get_drvdata(serio);
+
+ if (rain->buf_len == DATA_SIZE) {
+ dev_warn_once(rain->dev, "buffer overflow\n");
+ return IRQ_HANDLED;
+ }
+ spin_lock(&rain->buf_lock);
+ rain->buf_len++;
+ rain->buf[rain->buf_wr_idx] = data;
+ rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff;
+ spin_unlock(&rain->buf_lock);
+ schedule_work(&rain->work);
+ return IRQ_HANDLED;
+}
+
+static void rain_disconnect(struct serio *serio)
+{
+ struct rain *rain = serio_get_drvdata(serio);
+
+ cancel_work_sync(&rain->work);
+ cec_unregister_adapter(rain->adap);
+ dev_info(&serio->dev, "disconnected\n");
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ kfree(rain);
+}
+
+static int rain_send(struct rain *rain, const char *command)
+{
+ int err = serio_write(rain->serio, '!');
+
+ dev_dbg(rain->dev, "send: %s\n", command);
+ while (!err && *command)
+ err = serio_write(rain->serio, *command++);
+ if (!err)
+ err = serio_write(rain->serio, '~');
+
+ return err;
+}
+
+static int rain_send_and_wait(struct rain *rain,
+ const char *cmd, const char *reply)
+{
+ int err;
+
+ init_completion(&rain->cmd_done);
+
+ mutex_lock(&rain->write_lock);
+ err = rain_send(rain, cmd);
+ if (err)
+ goto err;
+
+ if (!wait_for_completion_timeout(&rain->cmd_done, HZ)) {
+ err = -ETIMEDOUT;
+ goto err;
+ }
+ if (reply && strncmp(rain->cmd_reply, reply, strlen(reply))) {
+ dev_dbg(rain->dev,
+ "transmit of '%s': received '%s' instead of '%s'\n",
+ cmd, rain->cmd_reply, reply);
+ err = -EIO;
+ }
+err:
+ mutex_unlock(&rain->write_lock);
+ return err;
+}
+
+static int rain_setup(struct rain *rain, struct serio *serio,
+ struct cec_log_addrs *log_addrs, u16 *pa)
+{
+ int err;
+
+ err = rain_send_and_wait(rain, "R", "REV");
+ if (err)
+ return err;
+ dev_info(rain->dev, "Firmware version %s\n", rain->cmd_reply + 4);
+
+ err = rain_send_and_wait(rain, "Q 1", "QTY");
+ if (err)
+ return err;
+ err = rain_send_and_wait(rain, "c0000", "CFG");
+ if (err)
+ return err;
+ return rain_send_and_wait(rain, "A F 0000", "ADR");
+}
+
+static int rain_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ return 0;
+}
+
+static int rain_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct rain *rain = cec_get_drvdata(adap);
+ u8 cmd[16];
+
+ if (log_addr == CEC_LOG_ADDR_INVALID)
+ log_addr = CEC_LOG_ADDR_UNREGISTERED;
+ snprintf(cmd, sizeof(cmd), "A %x", log_addr);
+ return rain_send_and_wait(rain, cmd, "ADR");
+}
+
+static int rain_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct rain *rain = cec_get_drvdata(adap);
+ char cmd[2 * CEC_MAX_MSG_SIZE + 16];
+ unsigned int i;
+ int err;
+
+ if (msg->len == 1) {
+ snprintf(cmd, sizeof(cmd), "x%x", cec_msg_destination(msg));
+ } else {
+ char hex[3];
+
+ snprintf(cmd, sizeof(cmd), "x%x %02x ",
+ cec_msg_destination(msg), msg->msg[1]);
+ for (i = 2; i < msg->len; i++) {
+ snprintf(hex, sizeof(hex), "%02x", msg->msg[i]);
+ strncat(cmd, hex, sizeof(cmd));
+ }
+ }
+ mutex_lock(&rain->write_lock);
+ err = rain_send(rain, cmd);
+ mutex_unlock(&rain->write_lock);
+ return err;
+}
+
+static const struct cec_adap_ops rain_cec_adap_ops = {
+ .adap_enable = rain_cec_adap_enable,
+ .adap_log_addr = rain_cec_adap_log_addr,
+ .adap_transmit = rain_cec_adap_transmit,
+};
+
+static int rain_connect(struct serio *serio, struct serio_driver *drv)
+{
+ u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
+ struct rain *rain;
+ int err = -ENOMEM;
+ struct cec_log_addrs log_addrs = {};
+ u16 pa = CEC_PHYS_ADDR_INVALID;
+
+ rain = kzalloc(sizeof(*rain), GFP_KERNEL);
+
+ if (!rain)
+ return -ENOMEM;
+
+ rain->serio = serio;
+ rain->adap = cec_allocate_adapter(&rain_cec_adap_ops, rain,
+ "HDMI CEC", caps, 1);
+ err = PTR_ERR_OR_ZERO(rain->adap);
+ if (err < 0)
+ goto free_device;
+
+ rain->dev = &serio->dev;
+ serio_set_drvdata(serio, rain);
+ INIT_WORK(&rain->work, rain_irq_work_handler);
+ mutex_init(&rain->write_lock);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto delete_adap;
+
+ err = rain_setup(rain, serio, &log_addrs, &pa);
+ if (err)
+ goto close_serio;
+
+ err = cec_register_adapter(rain->adap, &serio->dev);
+ if (err < 0)
+ goto close_serio;
+
+ rain->dev = &rain->adap->devnode.dev;
+ return 0;
+
+close_serio:
+ serio_close(serio);
+delete_adap:
+ cec_delete_adapter(rain->adap);
+ serio_set_drvdata(serio, NULL);
+free_device:
+ kfree(rain);
+ return err;
+}
+
+static struct serio_device_id rain_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_RAINSHADOW_CEC,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, rain_serio_ids);
+
+static struct serio_driver rain_drv = {
+ .driver = {
+ .name = "rainshadow-cec",
+ },
+ .description = "RainShadow Tech HDMI CEC driver",
+ .id_table = rain_serio_ids,
+ .interrupt = rain_interrupt,
+ .connect = rain_connect,
+ .disconnect = rain_disconnect,
+};
+
+module_serio_driver(rain_drv);
diff --git a/drivers/media/usb/stk1160/Kconfig b/drivers/media/usb/stk1160/Kconfig
index 22dff4f3b921..425ed00e2599 100644
--- a/drivers/media/usb/stk1160/Kconfig
+++ b/drivers/media/usb/stk1160/Kconfig
@@ -6,7 +6,11 @@ config VIDEO_STK1160_COMMON
This is a video4linux driver for STK1160 based video capture devices.
To compile this driver as a module, choose M here: the
- module will be called stk1160
+ module will be called stk1160.
+
+ This driver only provides support for video capture. For audio
+ capture, you need to select the snd-usb-audio driver (i.e.
+ CONFIG_SND_USB_AUDIO).
config VIDEO_STK1160
tristate
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index c4fdc1fa32ef..7e960d0a5b92 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -631,7 +631,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
urb = usb_alloc_urb(max_packets, GFP_KERNEL);
if (!urb) {
tm6000_uninit_isoc(dev);
- usb_free_urb(urb);
+ tm6000_free_urb_buffers(dev);
return -ENOMEM;
}
dev->isoc_ctl.urb[i] = urb;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index f5c635a67d74..f9c3325aa4d4 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1501,7 +1501,14 @@ static int usbvision_probe(struct usb_interface *intf,
}
for (i = 0; i < usbvision->num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < 2) {
+ ret = -ENODEV;
+ goto err_pkt;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
wMaxPacketSize);
usbvision->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 04bf35063c4c..46d6be0bb316 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -188,6 +188,21 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_GR16,
.fcc = V4L2_PIX_FMT_SGRBG16,
},
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_INVZ,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Greyscale 10-bit (Y10 )",
+ .guid = UVC_GUID_FORMAT_INVI,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .name = "IR:Depth 26-bit (INZI)",
+ .guid = UVC_GUID_FORMAT_INZI,
+ .fcc = V4L2_PIX_FMT_INZI,
+ },
};
/* ------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 07a6c833ef7b..47d93a938dde 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -818,7 +818,7 @@ static void uvc_video_stats_decode(struct uvc_streaming *stream,
/* Update the packets counters. */
stream->stats.frame.nb_packets++;
- if (len > header_size)
+ if (len <= header_size)
stream->stats.frame.nb_empty++;
if (data[1] & UVC_STREAM_ERR)
@@ -868,14 +868,8 @@ size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
struct timespec ts;
size_t count = 0;
- ts.tv_sec = stream->stats.stream.stop_ts.tv_sec
- - stream->stats.stream.start_ts.tv_sec;
- ts.tv_nsec = stream->stats.stream.stop_ts.tv_nsec
- - stream->stats.stream.start_ts.tv_nsec;
- if (ts.tv_nsec < 0) {
- ts.tv_sec--;
- ts.tv_nsec += 1000000000;
- }
+ ts = timespec_sub(stream->stats.stream.stop_ts,
+ stream->stats.stream.start_ts);
/* Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF
* frequency this will not overflow before more than 1h.
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 4205e7a423f0..15e415e32c7f 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -143,6 +143,15 @@
#define UVC_GUID_FORMAT_RW10 \
{ 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_INVZ \
+ { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
+ 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
+#define UVC_GUID_FORMAT_INZI \
+ { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
+ 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
+#define UVC_GUID_FORMAT_INVI \
+ { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
+ 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
/* ------------------------------------------------------------------------
* Driver specific constants.
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index f2d6fc03dda0..efdcd5bd6a4c 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -600,6 +600,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
ptr = pdest = frm->lpvbits;
if (frm->ulState == ZR364XX_READ_IDLE) {
+ if (purb->actual_length < 128) {
+ /* header incomplete */
+ dev_info(&cam->udev->dev,
+ "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
+ __func__, purb->actual_length);
+ return -EINVAL;
+ }
+
frm->ulState = ZR364XX_READ_FRAME;
frm->cur_size = 0;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index eac9565dc3d8..6f52970f8b54 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -161,6 +161,20 @@ static inline int put_v4l2_sdr_format(struct v4l2_sdr_format *kp, struct v4l2_sd
return 0;
}
+static inline int get_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_meta_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_meta_format(struct v4l2_meta_format *kp, struct v4l2_meta_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_meta_format)))
+ return -EFAULT;
+ return 0;
+}
+
struct v4l2_format32 {
__u32 type; /* enum v4l2_buf_type */
union {
@@ -170,6 +184,7 @@ struct v4l2_format32 {
struct v4l2_vbi_format vbi;
struct v4l2_sliced_vbi_format sliced;
struct v4l2_sdr_format sdr;
+ struct v4l2_meta_format meta;
__u8 raw_data[200]; /* user-defined */
} fmt;
};
@@ -216,6 +231,8 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
return get_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ return get_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
default:
pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
kp->type);
@@ -263,6 +280,8 @@ static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
return put_v4l2_sdr_format(&kp->fmt.sdr, &up->fmt.sdr);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ return put_v4l2_meta_format(&kp->fmt.meta, &up->fmt.meta);
default:
pr_info("compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
kp->type);
@@ -990,6 +1009,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
if (put_v4l2_ext_controls32(&karg.v2ecs, up))
err = -EFAULT;
break;
+ case VIDIOC_S_EDID:
+ if (put_v4l2_edid32(&karg.v2edid, up))
+ err = -EFAULT;
+ break;
}
if (err)
return err;
@@ -1011,7 +1034,6 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
break;
case VIDIOC_G_EDID:
- case VIDIOC_S_EDID:
err = put_v4l2_edid32(&karg.v2edid, up);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index b9e08e3d6e0e..ec42872d11cf 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -459,8 +459,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
};
static const char * const dv_rgb_range[] = {
"Automatic",
- "RGB limited range (16-235)",
- "RGB full range (0-255)",
+ "RGB Limited Range (16-235)",
+ "RGB Full Range (0-255)",
NULL,
};
static const char * const dv_it_content_type[] = {
@@ -997,6 +997,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
*min = 0;
*max = *step = 1;
break;
+ case V4L2_CID_ROTATE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ break;
case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
*type = V4L2_CTRL_TYPE_INTEGER;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index fa2124cb31bd..c647ba648805 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -575,30 +575,34 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
if (is_vid || is_tch) {
- /* video specific ioctls */
+ /* video and metadata specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
ops->vidioc_enum_fmt_vid_cap_mplane ||
- ops->vidioc_enum_fmt_vid_overlay)) ||
+ ops->vidioc_enum_fmt_vid_overlay ||
+ ops->vidioc_enum_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_enum_fmt_vid_out ||
ops->vidioc_enum_fmt_vid_out_mplane)))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay)) ||
+ ops->vidioc_g_fmt_vid_overlay ||
+ ops->vidioc_g_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay)) ||
+ ops->vidioc_s_fmt_vid_overlay ||
+ ops->vidioc_s_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay)) ||
+ ops->vidioc_try_fmt_vid_overlay ||
+ ops->vidioc_try_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
ops->vidioc_try_fmt_vid_out_overlay)))
@@ -664,7 +668,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
}
if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, vbi or sdr */
+ /* ioctls valid for video, metadata, vbi or sdr */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index f364cc1b521d..937c6de85606 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -235,6 +235,9 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
continue;
+ if (sd->devnode)
+ continue;
+
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 0c3f238a2e76..e5a2187381db 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -155,6 +155,7 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
[V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
[V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
+ [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -246,6 +247,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_sliced_vbi_format *sliced;
const struct v4l2_window *win;
const struct v4l2_sdr_format *sdr;
+ const struct v4l2_meta_format *meta;
unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -325,6 +327,15 @@ static void v4l_print_format(const void *arg, bool write_only)
(sdr->pixelformat >> 16) & 0xff,
(sdr->pixelformat >> 24) & 0xff);
break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ meta = &p->fmt.meta;
+ pr_cont(", dataformat=%c%c%c%c, buffersize=%u\n",
+ (meta->dataformat >> 0) & 0xff,
+ (meta->dataformat >> 8) & 0xff,
+ (meta->dataformat >> 16) & 0xff,
+ (meta->dataformat >> 24) & 0xff,
+ meta->buffersize);
+ break;
}
}
@@ -943,6 +954,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out)
return 0;
break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ return 0;
+ break;
default:
break;
}
@@ -1131,6 +1146,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
+ case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break;
case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
@@ -1217,6 +1233,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit signed deltas"; break;
case V4L2_TCH_FMT_TU16: descr = "16-bit unsigned touch data"; break;
case V4L2_TCH_FMT_TU08: descr = "8-bit unsigned touch data"; break;
+ case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
+ case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
default:
/* Compressed formats */
@@ -1326,6 +1344,11 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
break;
ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_enum_fmt_meta_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
+ break;
}
if (ret == 0)
v4l_fill_fmtdesc(p);
@@ -1425,6 +1448,10 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!is_tx || !is_sdr || !ops->vidioc_g_fmt_sdr_out))
break;
return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_g_fmt_meta_cap))
+ break;
+ return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1530,6 +1557,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_s_fmt_meta_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.meta);
+ return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
}
return -EINVAL;
}
@@ -1615,6 +1647,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.sdr);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!is_rx || !is_vid || !ops->vidioc_try_fmt_meta_cap))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.meta);
+ return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
}
return -EINVAL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 7c1d390ea438..94afbbf92807 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -984,11 +984,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb)
+ if (pb) {
ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
vb, pb, planes);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
for (plane = 0; plane < vb->num_planes; ++plane) {
/* Skip the plane if already verified */
@@ -1101,11 +1102,12 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb)
+ if (pb) {
ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
vb, pb, planes);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 2db0413f5d57..4f246d166111 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -12,6 +12,7 @@
#include <linux/dma-buf.h>
#include <linux/module.h>
+#include <linux/refcount.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -34,7 +35,7 @@ struct vb2_dc_buf {
/* MMAP related */
struct vb2_vmarea_handler handler;
- atomic_t refcount;
+ refcount_t refcount;
struct sg_table *sgt_base;
/* DMABUF related */
@@ -86,7 +87,7 @@ static unsigned int vb2_dc_num_users(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- return atomic_read(&buf->refcount);
+ return refcount_read(&buf->refcount);
}
static void vb2_dc_prepare(void *buf_priv)
@@ -122,7 +123,7 @@ static void vb2_dc_put(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- if (!atomic_dec_and_test(&buf->refcount))
+ if (!refcount_dec_and_test(&buf->refcount))
return;
if (buf->sgt_base) {
@@ -170,7 +171,7 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
buf->handler.put = vb2_dc_put;
buf->handler.arg = buf;
- atomic_inc(&buf->refcount);
+ refcount_set(&buf->refcount, 1);
return buf;
}
@@ -407,7 +408,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
return NULL;
/* dmabuf keeps reference to vb2 buffer */
- atomic_inc(&buf->refcount);
+ refcount_inc(&buf->refcount);
return dbuf;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 6fd1343b7c13..8e8798a74760 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/refcount.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -46,7 +47,7 @@ struct vb2_dma_sg_buf {
struct sg_table *dma_sgt;
size_t size;
unsigned int num_pages;
- atomic_t refcount;
+ refcount_t refcount;
struct vb2_vmarea_handler handler;
struct dma_buf_attachment *db_attach;
@@ -150,7 +151,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
- atomic_inc(&buf->refcount);
+ refcount_set(&buf->refcount, 1);
dprintk(1, "%s: Allocated buffer of %d pages\n",
__func__, buf->num_pages);
@@ -176,7 +177,7 @@ static void vb2_dma_sg_put(void *buf_priv)
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
- if (atomic_dec_and_test(&buf->refcount)) {
+ if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
@@ -320,7 +321,7 @@ static unsigned int vb2_dma_sg_num_users(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
- return atomic_read(&buf->refcount);
+ return refcount_read(&buf->refcount);
}
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
@@ -530,7 +531,7 @@ static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags
return NULL;
/* dmabuf keeps reference to vb2 buffer */
- atomic_inc(&buf->refcount);
+ refcount_inc(&buf->refcount);
return dbuf;
}
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 1cd322e939c7..4bb8424114ce 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -96,10 +96,10 @@ static void vb2_common_vm_open(struct vm_area_struct *vma)
struct vb2_vmarea_handler *h = vma->vm_private_data;
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
- __func__, h, atomic_read(h->refcount), vma->vm_start,
+ __func__, h, refcount_read(h->refcount), vma->vm_start,
vma->vm_end);
- atomic_inc(h->refcount);
+ refcount_inc(h->refcount);
}
/**
@@ -114,7 +114,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
struct vb2_vmarea_handler *h = vma->vm_private_data;
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
- __func__, h, atomic_read(h->refcount), vma->vm_start,
+ __func__, h, refcount_read(h->refcount), vma->vm_start,
vma->vm_end);
h->put(h->arg);
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 3529849d2218..0c0669976bdc 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -544,6 +544,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
case V4L2_BUF_TYPE_SDR_OUTPUT:
requested_sizes[0] = f->fmt.sdr.buffersize;
break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ requested_sizes[0] = f->fmt.meta.buffersize;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 27d1db3bb8cf..b337d780844c 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -26,7 +27,7 @@ struct vb2_vmalloc_buf {
struct frame_vector *vec;
enum dma_data_direction dma_dir;
unsigned long size;
- atomic_t refcount;
+ refcount_t refcount;
struct vb2_vmarea_handler handler;
struct dma_buf *dbuf;
};
@@ -56,7 +57,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
return ERR_PTR(-ENOMEM);
}
- atomic_inc(&buf->refcount);
+ refcount_set(&buf->refcount, 1);
return buf;
}
@@ -64,7 +65,7 @@ static void vb2_vmalloc_put(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
- if (atomic_dec_and_test(&buf->refcount)) {
+ if (refcount_dec_and_test(&buf->refcount)) {
vfree(buf->vaddr);
kfree(buf);
}
@@ -161,7 +162,7 @@ static void *vb2_vmalloc_vaddr(void *buf_priv)
static unsigned int vb2_vmalloc_num_users(void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
- return atomic_read(&buf->refcount);
+ return refcount_read(&buf->refcount);
}
static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
@@ -368,7 +369,7 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
return NULL;
/* dmabuf keeps reference to vb2 buffer */
- atomic_inc(&buf->refcount);
+ refcount_inc(&buf->refcount);
return dbuf;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 1e73064b0fb2..62cff5afc6bd 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7396,7 +7396,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
- "SAS Device Status Change: No Persistancy: "
+ "SAS Device Status Change: No Persistency: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 98eafae78576..d065062240bc 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1329,7 +1329,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
WQ_MEM_RECLAIM);
if (!ioc->fc_rescan_work_q) {
error = -ENOMEM;
- goto out_mptfc_probe;
+ goto out_mptfc_host;
}
/*
@@ -1351,6 +1351,9 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+out_mptfc_host:
+ scsi_remove_host(sh);
+
out_mptfc_probe:
mptscsih_remove(pdev);
@@ -1530,6 +1533,8 @@ static void mptfc_remove(struct pci_dev *pdev)
}
}
+ scsi_remove_host(ioc->sh);
+
mptscsih_remove(pdev);
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 08a807d6a44f..6ba07c7feb92 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1176,8 +1176,6 @@ mptscsih_remove(struct pci_dev *pdev)
MPT_SCSI_HOST *hd;
int sz1;
- scsi_remove_host(host);
-
if((hd = shost_priv(host)) == NULL)
return;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 031e088edb5e..9a336a161d9f 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1548,11 +1548,19 @@ out_mptspi_probe:
return error;
}
+static void mptspi_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ scsi_remove_host(ioc->sh);
+ mptscsih_remove(pdev);
+}
+
static struct pci_driver mptspi_driver = {
.name = "mptspi",
.id_table = mptspi_pci_table,
.probe = mptspi_probe,
- .remove = mptscsih_remove,
+ .remove = mptspi_remove,
.shutdown = mptscsih_shutdown,
#ifdef CONFIG_PM
.suspend = mptscsih_suspend,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c290990d73ed..2cba76e6fa3c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -419,16 +419,6 @@ config VMWARE_BALLOON
To compile this driver as a module, choose M here: the
module will be called vmw_balloon.
-config ARM_CHARLCD
- bool "ARM Ltd. Character LCD Driver"
- depends on PLAT_VERSATILE
- help
- This is a driver for the character LCD found on the ARM Ltd.
- Versatile and RealView Platform Baseboards. It doesn't do
- very much more than display the text "ARM Linux" on the first
- line and the Linux version on the second line, but that's
- still useful.
-
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
@@ -492,284 +482,20 @@ config VEXPRESS_SYSCFG
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
-config PANEL
- tristate "Parallel port LCD/Keypad Panel support"
- depends on PARPORT
- ---help---
- Say Y here if you have an HD44780 or KS-0074 LCD connected to your
- parallel port. This driver also features 4 and 6-key keypads. The LCD
- is accessible through the /dev/lcd char device (10, 156), and the
- keypad through /dev/keypad (10, 185). This code can either be
- compiled as a module, or linked into the kernel and started at boot.
- If you don't understand what all this is about, say N.
-
-if PANEL
-
-config PANEL_PARPORT
- int "Default parallel port number (0=LPT1)"
- range 0 255
- default "0"
- ---help---
- This is the index of the parallel port the panel is connected to. One
- driver instance only supports one parallel port, so if your keypad
- and LCD are connected to two separate ports, you have to start two
- modules with different arguments. Numbering starts with '0' for LPT1,
- and so on.
-
-config PANEL_PROFILE
- int "Default panel profile (0-5, 0=custom)"
- range 0 5
- default "5"
- ---help---
- To ease configuration, the driver supports different configuration
- profiles for past and recent wirings. These profiles can also be
- used to define an approximative configuration, completed by a few
- other options. Here are the profiles :
-
- 0 = custom (see further)
- 1 = 2x16 parallel LCD, old keypad
- 2 = 2x16 serial LCD (KS-0074), new keypad
- 3 = 2x16 parallel LCD (Hantronix), no keypad
- 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
- 5 = 2x40 parallel LCD (old one), with old keypad
-
- Custom configurations allow you to define how your display is
- wired to the parallel port, and how it works. This is only intended
- for experts.
-
-config PANEL_KEYPAD
- depends on PANEL_PROFILE="0"
- int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
- range 0 3
- default 0
- ---help---
- This enables and configures a keypad connected to the parallel port.
- The keys will be read from character device 10,185. Valid values are :
-
- 0 : do not enable this driver
- 1 : old 6 keys keypad
- 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
- 3 : Nexcom NSA1045's 4 keys keypad
-
- New profiles can be described in the driver source. The driver also
- supports simultaneous keys pressed when the keypad supports them.
-
-config PANEL_LCD
- depends on PANEL_PROFILE="0"
- int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
- range 0 5
- default 0
- ---help---
- This enables and configures an LCD connected to the parallel port.
- The driver includes an interpreter for escape codes starting with
- '\e[L' which are specific to the LCD, and a few ANSI codes. The
- driver will be registered as character device 10,156, usually
- under the name '/dev/lcd'. There are a total of 6 supported types :
-
- 0 : do not enable the driver
- 1 : custom configuration and wiring (see further)
- 2 : 2x16 & 2x40 parallel LCD (old wiring)
- 3 : 2x16 serial LCD (KS-0074 based)
- 4 : 2x16 parallel LCD (Hantronix wiring)
- 5 : 2x16 parallel LCD (Nexcom wiring)
-
- When type '1' is specified, other options will appear to configure
- more precise aspects (wiring, dimensions, protocol, ...). Please note
- that those values changed from the 2.4 driver for better consistency.
-
-config PANEL_LCD_HEIGHT
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of lines on the LCD (1-2)"
- range 1 2
- default 2
- ---help---
- This is the number of visible character lines on the LCD in custom profile.
- It can either be 1 or 2.
-
-config PANEL_LCD_WIDTH
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of characters per line on the LCD (1-40)"
- range 1 40
- default 40
- ---help---
- This is the number of characters per line on the LCD in custom profile.
- Common values are 16,20,24,40.
-
-config PANEL_LCD_BWIDTH
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Internal LCD line width (1-40, 40 by default)"
- range 1 40
- default 40
- ---help---
- Most LCDs use a standard controller which supports hardware lines of 40
- characters, although sometimes only 16, 20 or 24 of them are really wired
- to the terminal. This results in some non-visible but addressable characters,
- and is the case for most parallel LCDs. Other LCDs, and some serial ones,
- however, use the same line width internally as what is visible. The KS0074
- for example, uses 16 characters per line for 16 visible characters per line.
-
- This option lets you configure the value used by your LCD in 'custom' profile.
- If you don't know, put '40' here.
-
-config PANEL_LCD_HWIDTH
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Hardware LCD line width (1-64, 64 by default)"
- range 1 64
- default 64
- ---help---
- Most LCDs use a single address bit to differentiate line 0 and line 1. Since
- some of them need to be able to address 40 chars with the lower bits, they
- often use the immediately superior power of 2, which is 64, to address the
- next line.
-
- If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
- 64 here for a 2x40.
-
-config PANEL_LCD_CHARSET
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD character set (0=normal, 1=KS0074)"
- range 0 1
- default 0
- ---help---
- Some controllers such as the KS0074 use a somewhat strange character set
- where many symbols are at unusual places. The driver knows how to map
- 'standard' ASCII characters to the character sets used by these controllers.
- Valid values are :
-
- 0 : normal (untranslated) character set
- 1 : KS0074 character set
-
- If you don't know, use the normal one (0).
-
-config PANEL_LCD_PROTO
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD communication mode (0=parallel 8 bits, 1=serial)"
- range 0 1
- default 0
- ---help---
- This driver now supports any serial or parallel LCD wired to a parallel
- port. But before assigning signals, the driver needs to know if it will
- be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
- (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
- (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
- parallel LCD, and 1 for a serial LCD.
-
-config PANEL_LCD_PIN_E
- depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
- range -17 17
- default 14
- ---help---
- This describes the number of the parallel port pin to which the LCD 'E'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'E' pin in custom profile is '14' (AUTOFEED).
-
-config PANEL_LCD_PIN_RS
- depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
- range -17 17
- default 17
- ---help---
- This describes the number of the parallel port pin to which the LCD 'RS'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RS' pin in custom profile is '17' (SELECT IN).
-
-config PANEL_LCD_PIN_RW
- depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
- range -17 17
- default 16
+config ASPEED_LPC_CTRL
+ depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
---help---
- This describes the number of the parallel port pin to which the LCD 'RW'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RW' pin in custom profile is '16' (INIT).
-
-config PANEL_LCD_PIN_SCL
- depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
- range -17 17
- default 1
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SCL' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SCL' pin in custom profile is '1' (STROBE).
+ Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
+ ioctl()s, the driver also provides a read/write interface to a BMC ram
+ region where the host LPC read/write region can be buffered.
-config PANEL_LCD_PIN_SDA
- depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
- range -17 17
- default 2
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SDA' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SDA' pin in custom profile is '2' (D0).
-
-config PANEL_LCD_PIN_BL
- depends on PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
- range -17 17
- default 0
- ---help---
- This describes the number of the parallel port pin to which the LCD 'BL' signal
- has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'BL' pin in custom profile is '0' (uncontrolled).
-
-config PANEL_CHANGE_MESSAGE
- bool "Change LCD initialization message ?"
- default "n"
- ---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
- say 'N' and keep the default message with the version.
-
-config PANEL_BOOT_MESSAGE
- depends on PANEL_CHANGE_MESSAGE="y"
- string "New initialization message"
- default ""
+config PCI_ENDPOINT_TEST
+ depends on PCI
+ tristate "PCI Endpoint Test driver"
---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- An empty message will only clear the display at driver init time. Any other
- printf()-formatted message is valid with newline and escape codes.
-
-endif # PANEL
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7a3ea89339b4..81ef3e67acc9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -37,7 +37,6 @@ obj-y += eeprom/
obj-y += cb710/
obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
-obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-y += lis3lv02d/
@@ -53,7 +52,8 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
-obj-$(CONFIG_PANEL) += panel.o
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
@@ -62,6 +62,8 @@ lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
+KCOV_INSTRUMENT_lkdtm_rodata.o := n
+
OBJCOPYFLAGS :=
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
--set-section-flags .text=alloc,readonly \
diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/misc/aspeed-lpc-ctrl.c
new file mode 100644
index 000000000000..b5439643f54b
--- /dev/null
+++ b/drivers/misc/aspeed-lpc-ctrl.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2017 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/regmap.h>
+
+#include <linux/aspeed-lpc-ctrl.h>
+
+#define DEVICE_NAME "aspeed-lpc-ctrl"
+
+#define HICR7 0x8
+#define HICR8 0xc
+
+struct aspeed_lpc_ctrl {
+ struct miscdevice miscdev;
+ struct regmap *regmap;
+ phys_addr_t mem_base;
+ resource_size_t mem_size;
+ u32 pnor_size;
+ u32 pnor_base;
+};
+
+static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
+{
+ return container_of(file->private_data, struct aspeed_lpc_ctrl,
+ miscdev);
+}
+
+static int aspeed_lpc_ctrl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ pgprot_t prot = vma->vm_page_prot;
+
+ if (vma->vm_pgoff + vsize > lpc_ctrl->mem_base + lpc_ctrl->mem_size)
+ return -EINVAL;
+
+ /* ast2400/2500 AHB accesses are not cache coherent */
+ prot = pgprot_noncached(prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ (lpc_ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
+ vsize, prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
+ void __user *p = (void __user *)param;
+ struct aspeed_lpc_ctrl_mapping map;
+ u32 addr;
+ u32 size;
+ long rc;
+
+ if (copy_from_user(&map, p, sizeof(map)))
+ return -EFAULT;
+
+ if (map.flags != 0)
+ return -EINVAL;
+
+ switch (cmd) {
+ case ASPEED_LPC_CTRL_IOCTL_GET_SIZE:
+ /* The flash windows don't report their size */
+ if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY)
+ return -EINVAL;
+
+ /* Support more than one window id in the future */
+ if (map.window_id != 0)
+ return -EINVAL;
+
+ map.size = lpc_ctrl->mem_size;
+
+ return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
+ case ASPEED_LPC_CTRL_IOCTL_MAP:
+
+ /*
+ * The top half of HICR7 is the MSB of the BMC address of the
+ * mapping.
+ * The bottom half of HICR7 is the MSB of the HOST LPC
+ * firmware space address of the mapping.
+ *
+ * The 1 bits in the top of half of HICR8 represent the bits
+ * (in the requested address) that should be ignored and
+ * replaced with those from the top half of HICR7.
+ * The 1 bits in the bottom half of HICR8 represent the bits
+ * (in the requested address) that should be kept and pass
+ * into the BMC address space.
+ */
+
+ /*
+ * It doesn't make sense to talk about a size or offset with
+ * low 16 bits set. Both HICR7 and HICR8 talk about the top 16
+ * bits of addresses and sizes.
+ */
+
+ if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff))
+ return -EINVAL;
+
+ /*
+ * Because of the way the masks work in HICR8 offset has to
+ * be a multiple of size.
+ */
+ if (map.offset & (map.size - 1))
+ return -EINVAL;
+
+ if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
+ addr = lpc_ctrl->pnor_base;
+ size = lpc_ctrl->pnor_size;
+ } else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
+ addr = lpc_ctrl->mem_base;
+ size = lpc_ctrl->mem_size;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Check overflow first! */
+ if (map.offset + map.size < map.offset ||
+ map.offset + map.size > size)
+ return -EINVAL;
+
+ if (map.size == 0 || map.size > size)
+ return -EINVAL;
+
+ addr += map.offset;
+
+ /*
+ * addr (host lpc address) is safe regardless of values. This
+ * simply changes the address the host has to request on its
+ * side of the LPC bus. This cannot impact the hosts own
+ * memory space by surprise as LPC specific accessors are
+ * required. The only strange thing that could be done is
+ * setting the lower 16 bits but the shift takes care of that.
+ */
+
+ rc = regmap_write(lpc_ctrl->regmap, HICR7,
+ (addr | (map.addr >> 16)));
+ if (rc)
+ return rc;
+
+ return regmap_write(lpc_ctrl->regmap, HICR8,
+ (~(map.size - 1)) | ((map.size >> 16) - 1));
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations aspeed_lpc_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .mmap = aspeed_lpc_ctrl_mmap,
+ .unlocked_ioctl = aspeed_lpc_ctrl_ioctl,
+};
+
+static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl;
+ struct device_node *node;
+ struct resource resm;
+ struct device *dev;
+ int rc;
+
+ dev = &pdev->dev;
+
+ lpc_ctrl = devm_kzalloc(dev, sizeof(*lpc_ctrl), GFP_KERNEL);
+ if (!lpc_ctrl)
+ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "flash", 0);
+ if (!node) {
+ dev_err(dev, "Didn't find host pnor flash node\n");
+ return -ENODEV;
+ }
+
+ rc = of_address_to_resource(node, 1, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for flash\n");
+ return rc;
+ }
+
+ lpc_ctrl->pnor_size = resource_size(&resm);
+ lpc_ctrl->pnor_base = resm.start;
+
+ dev_set_drvdata(&pdev->dev, lpc_ctrl);
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(dev, "Didn't find reserved memory\n");
+ return -EINVAL;
+ }
+
+ rc = of_address_to_resource(node, 0, &resm);
+ of_node_put(node);
+ if (rc) {
+ dev_err(dev, "Couldn't address to resource for reserved memory\n");
+ return -ENOMEM;
+ }
+
+ lpc_ctrl->mem_size = resource_size(&resm);
+ lpc_ctrl->mem_base = resm.start;
+
+ lpc_ctrl->regmap = syscon_node_to_regmap(
+ pdev->dev.parent->of_node);
+ if (IS_ERR(lpc_ctrl->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
+ lpc_ctrl->miscdev.name = DEVICE_NAME;
+ lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
+ lpc_ctrl->miscdev.parent = dev;
+ rc = misc_register(&lpc_ctrl->miscdev);
+ if (rc)
+ dev_err(dev, "Unable to register device\n");
+ else
+ dev_info(dev, "Loaded at %pr\n", &resm);
+
+ return rc;
+}
+
+static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
+{
+ struct aspeed_lpc_ctrl *lpc_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&lpc_ctrl->miscdev);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_lpc_ctrl_match[] = {
+ { .compatible = "aspeed,ast2400-lpc-ctrl" },
+ { .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { },
+};
+
+static struct platform_driver aspeed_lpc_ctrl_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_lpc_ctrl_match,
+ },
+ .probe = aspeed_lpc_ctrl_probe,
+ .remove = aspeed_lpc_ctrl_remove,
+};
+
+module_platform_driver(aspeed_lpc_ctrl_driver);
+
+MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
+MODULE_DESCRIPTION("Control for aspeed 2400/2500 LPC HOST to BMC mappings");
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301d57d9..3dc61ea7dc64 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL);
- if (!duramar2150_c2port_dev) {
- ret = -ENODEV;
+ if (IS_ERR(duramar2150_c2port_dev)) {
+ ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region;
}
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index bcc030eacab7..1a138c83f877 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -14,6 +14,7 @@
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/sched/mm.h>
#include "cxl.h"
@@ -321,19 +322,29 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
- ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
kernel = false;
ctx->real_mode = false;
+
+ /* acquire a reference to the task's mm */
+ ctx->mm = get_task_mm(current);
+
+ /* ensure this mm_struct can't be freed */
+ cxl_context_mm_count_get(ctx);
+
+ /* decrement the use count */
+ if (ctx->mm)
+ mmput(ctx->mm);
}
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
- put_pid(ctx->glpid);
put_pid(ctx->pid);
- ctx->glpid = ctx->pid = NULL;
+ ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
+ if (task)
+ cxl_context_mm_count_put(ctx);
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 062bf6ca2625..4472ce11f98d 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/sched/mm.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -38,23 +39,26 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
- spin_lock_init(&ctx->sste_lock);
ctx->afu = afu;
ctx->master = master;
- ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
+ ctx->pid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = NULL;
- /*
- * Allocate the segment table before we put it in the IDR so that we
- * can always access it when dereferenced from IDR. For the same
- * reason, the segment table is only destroyed after the context is
- * removed from the IDR. Access to this in the IOCTL is protected by
- * Linux filesytem symantics (can't IOCTL until open is complete).
- */
- i = cxl_alloc_sst(ctx);
- if (i)
- return i;
+ if (cxl_is_psl8(afu)) {
+ spin_lock_init(&ctx->sste_lock);
+
+ /*
+ * Allocate the segment table before we put it in the IDR so that we
+ * can always access it when dereferenced from IDR. For the same
+ * reason, the segment table is only destroyed after the context is
+ * removed from the IDR. Access to this in the IOCTL is protected by
+ * Linux filesytem symantics (can't IOCTL until open is complete).
+ */
+ i = cxl_alloc_sst(ctx);
+ if (i)
+ return i;
+ }
INIT_WORK(&ctx->fault_work, cxl_handle_fault);
@@ -184,13 +188,26 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
if (start + len > ctx->afu->adapter->ps_size)
return -EINVAL;
+
+ if (cxl_is_psl9(ctx->afu)) {
+ /*
+ * Make sure there is a valid problem state
+ * area space for this AFU.
+ */
+ if (ctx->master && !ctx->afu->psa) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
+
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
+ }
} else {
if (start + len > ctx->psn_size)
return -EINVAL;
- }
- if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
- /* make sure there is a valid per process space for this AFU */
+ /* Make sure there is a valid per process space for this AFU */
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
pr_devel("AFU doesn't support mmio space\n");
return -EINVAL;
@@ -242,12 +259,16 @@ int __detach_context(struct cxl_context *ctx)
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
- put_pid(ctx->glpid);
cxl_ctx_put();
/* Decrease the attached context count on the adapter */
cxl_adapter_context_put(ctx->afu->adapter);
+
+ /* Decrease the mm count on the context */
+ cxl_context_mm_count_put(ctx);
+ ctx->mm = NULL;
+
return 0;
}
@@ -303,7 +324,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
{
struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
- free_page((u64)ctx->sstp);
+ if (cxl_is_psl8(ctx->afu))
+ free_page((u64)ctx->sstp);
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
@@ -325,3 +347,15 @@ void cxl_context_free(struct cxl_context *ctx)
mutex_unlock(&ctx->afu->contexts_lock);
call_rcu(&ctx->rcu, reclaim_ctx);
}
+
+void cxl_context_mm_count_get(struct cxl_context *ctx)
+{
+ if (ctx->mm)
+ atomic_inc(&ctx->mm->mm_count);
+}
+
+void cxl_context_mm_count_put(struct cxl_context *ctx)
+{
+ if (ctx->mm)
+ mmdrop(ctx->mm);
+}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 79e60ec70bd3..c8568ea7c518 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -63,7 +63,7 @@ typedef struct {
/* Memory maps. Ref CXL Appendix A */
/* PSL Privilege 1 Memory Map */
-/* Configuration and Control area */
+/* Configuration and Control area - CAIA 1&2 */
static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000};
static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008};
static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010};
@@ -73,7 +73,7 @@ static const cxl_p1_reg_t CXL_PSL_Control = {0x0020};
static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060};
static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068};
-/* PSL Lookaside Buffer Management Area */
+/* PSL Lookaside Buffer Management Area - CAIA 1 */
static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080};
static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088};
static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090};
@@ -82,7 +82,7 @@ static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
/* 0x00C0:7EFF Implementation dependent area */
-/* PSL registers */
+/* PSL registers - CAIA 1 */
static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
@@ -98,61 +98,83 @@ static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100};
static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108};
static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
+/* PSL registers - CAIA 2 */
+static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
+static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168};
+static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300};
+static const cxl_p1_reg_t CXL_PSL9_FIR2 = {0x0308};
+static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310};
+static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320};
+static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348};
+static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350};
+static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340};
+static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368};
+static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378};
+static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380};
+static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388};
+static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398};
+static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588};
+static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590};
+
/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
/* PSL Slice Privilege 1 Memory Map */
-/* Configuration Area */
+/* Configuration Area - CAIA 1&2 */
static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00};
static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08};
static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10};
static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18};
static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20};
static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28};
-/* Memory Management and Lookaside Buffer Management */
+/* Memory Management and Lookaside Buffer Management - CAIA 1*/
static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30};
+/* Memory Management and Lookaside Buffer Management - CAIA 1&2 */
static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38};
-/* Pointer Area */
+/* Pointer Area - CAIA 1&2 */
static const cxl_p1n_reg_t CXL_HAURP_An = {0x80};
static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88};
static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90};
-/* Control Area */
+/* Control Area - CAIA 1&2 */
static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0};
static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8};
static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0};
static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8};
-/* 0xC0:FF Implementation Dependent Area */
+/* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */
static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0};
static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8};
+/* 0xC0:FF Implementation Dependent Area - CAIA 1 */
static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0};
static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8};
static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0};
static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8};
/* PSL Slice Privilege 2 Memory Map */
-/* Configuration and Control Area */
+/* Configuration and Control Area - CAIA 1&2 */
static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000};
static const cxl_p2n_reg_t CXL_CSRP_An = {0x008};
+/* Configuration and Control Area - CAIA 1 */
static const cxl_p2n_reg_t CXL_AURP0_An = {0x010};
static const cxl_p2n_reg_t CXL_AURP1_An = {0x018};
static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020};
static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028};
+/* Configuration and Control Area - CAIA 1 */
static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030};
-/* Segment Lookaside Buffer Management */
+/* Segment Lookaside Buffer Management - CAIA 1 */
static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040};
static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048};
static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050};
-/* Interrupt Registers */
+/* Interrupt Registers - CAIA 1&2 */
static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060};
static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068};
static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070};
static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078};
static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080};
static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088};
-/* AFU Registers */
+/* AFU Registers - CAIA 1&2 */
static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090};
static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098};
-/* Work Element Descriptor */
+/* Work Element Descriptor - CAIA 1&2 */
static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
/* 0x0C0:FFF Implementation Dependent Area */
@@ -179,6 +201,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */
#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */
#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */
+#define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */
+#define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */
+#define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */
+#define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */
#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */
#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */
#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */
@@ -202,6 +228,24 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6))
#define CXL_PSL_SERR_An_afupar (1ull << (63-7))
#define CXL_PSL_SERR_An_afudup (1ull << (63-8))
+#define CXL_PSL_SERR_An_IRQS ( \
+ CXL_PSL_SERR_An_afuto | CXL_PSL_SERR_An_afudis | CXL_PSL_SERR_An_afuov | \
+ CXL_PSL_SERR_An_badsrc | CXL_PSL_SERR_An_badctx | CXL_PSL_SERR_An_llcmdis | \
+ CXL_PSL_SERR_An_llcmdto | CXL_PSL_SERR_An_afupar | CXL_PSL_SERR_An_afudup)
+#define CXL_PSL_SERR_An_afuto_mask (1ull << (63-32))
+#define CXL_PSL_SERR_An_afudis_mask (1ull << (63-33))
+#define CXL_PSL_SERR_An_afuov_mask (1ull << (63-34))
+#define CXL_PSL_SERR_An_badsrc_mask (1ull << (63-35))
+#define CXL_PSL_SERR_An_badctx_mask (1ull << (63-36))
+#define CXL_PSL_SERR_An_llcmdis_mask (1ull << (63-37))
+#define CXL_PSL_SERR_An_llcmdto_mask (1ull << (63-38))
+#define CXL_PSL_SERR_An_afupar_mask (1ull << (63-39))
+#define CXL_PSL_SERR_An_afudup_mask (1ull << (63-40))
+#define CXL_PSL_SERR_An_IRQ_MASKS ( \
+ CXL_PSL_SERR_An_afuto_mask | CXL_PSL_SERR_An_afudis_mask | CXL_PSL_SERR_An_afuov_mask | \
+ CXL_PSL_SERR_An_badsrc_mask | CXL_PSL_SERR_An_badctx_mask | CXL_PSL_SERR_An_llcmdis_mask | \
+ CXL_PSL_SERR_An_llcmdto_mask | CXL_PSL_SERR_An_afupar_mask | CXL_PSL_SERR_An_afudup_mask)
+
#define CXL_PSL_SERR_An_AE (1ull << (63-30))
/****** CXL_PSL_SCNTL_An ****************************************************/
@@ -257,7 +301,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1))
#define CXL_SSTP1_An_V (1ull << (63-63))
-/****** CXL_PSL_SLBIE_[An] **************************************************/
+/****** CXL_PSL_SLBIE_[An] - CAIA 1 **************************************************/
/* write: */
#define CXL_SLBIE_C PPC_BIT(36) /* Class */
#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */
@@ -267,10 +311,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_SLBIE_MAX PPC_BITMASK(24, 31)
#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63)
-/****** Common to all CXL_TLBIA/SLBIA_[An] **********************************/
+/****** Common to all CXL_TLBIA/SLBIA_[An] - CAIA 1 **********************************/
#define CXL_TLB_SLB_P (1ull) /* Pending (read) */
-/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers **********************/
+/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers - CAIA 1 **********************/
#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */
#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */
#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */
@@ -278,7 +322,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
/****** CXL_PSL_AFUSEL ******************************************************/
#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */
-/****** CXL_PSL_DSISR_An ****************************************************/
+/****** CXL_PSL_DSISR_An - CAIA 1 ****************************************************/
#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */
#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */
#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */
@@ -295,12 +339,39 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */
#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */
+/****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/
+#define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */
+#define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
+#define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
+#define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */
+#define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC)
+/*
+ * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1
+ * Status (0:7) Encoding
+ */
+#define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL
+#define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */
+#define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */
+#define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */
+#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */
+#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */
+#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */
+
/****** CXL_PSL_TFC_An ******************************************************/
#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */
#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
+/****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
+#define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */
+#define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */
+#define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */
+#define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */
+#define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */
+#define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */
+
/* cxl_process_element->software_status */
#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */
#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */
@@ -482,8 +553,6 @@ struct cxl_context {
unsigned int sst_size, sst_lru;
wait_queue_head_t wq;
- /* pid of the group leader associated with the pid */
- struct pid *glpid;
/* use mm context associated with this pid for ds faults */
struct pid *pid;
spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
@@ -551,15 +620,27 @@ struct cxl_context {
* CX4 only:
*/
struct list_head extra_irq_contexts;
+
+ struct mm_struct *mm;
};
+struct cxl_irq_info;
+
struct cxl_service_layer_ops {
int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev);
+ int (*invalidate_all)(struct cxl *adapter);
int (*afu_regs_init)(struct cxl_afu *afu);
+ int (*sanitise_afu_regs)(struct cxl_afu *afu);
int (*register_serr_irq)(struct cxl_afu *afu);
void (*release_serr_irq)(struct cxl_afu *afu);
- void (*debugfs_add_adapter_sl_regs)(struct cxl *adapter, struct dentry *dir);
- void (*debugfs_add_afu_sl_regs)(struct cxl_afu *afu, struct dentry *dir);
+ irqreturn_t (*handle_interrupt)(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+ irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
+ int (*activate_dedicated_process)(struct cxl_afu *afu);
+ int (*attach_afu_directed)(struct cxl_context *ctx, u64 wed, u64 amr);
+ int (*attach_dedicated_process)(struct cxl_context *ctx, u64 wed, u64 amr);
+ void (*update_dedicated_ivtes)(struct cxl_context *ctx);
+ void (*debugfs_add_adapter_regs)(struct cxl *adapter, struct dentry *dir);
+ void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir);
void (*psl_irq_dump_registers)(struct cxl_context *ctx);
void (*err_irq_dump_registers)(struct cxl *adapter);
void (*debugfs_stop_trace)(struct cxl *adapter);
@@ -641,25 +722,38 @@ int cxl_pci_reset(struct cxl *adapter);
void cxl_pci_release_afu(struct device *dev);
ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
-/* common == phyp + powernv */
+/* common == phyp + powernv - CAIA 1&2 */
struct cxl_process_element_common {
__be32 tid;
__be32 pid;
__be64 csrp;
- __be64 aurp0;
- __be64 aurp1;
- __be64 sstp0;
- __be64 sstp1;
+ union {
+ struct {
+ __be64 aurp0;
+ __be64 aurp1;
+ __be64 sstp0;
+ __be64 sstp1;
+ } psl8; /* CAIA 1 */
+ struct {
+ u8 reserved2[8];
+ u8 reserved3[8];
+ u8 reserved4[8];
+ u8 reserved5[8];
+ } psl9; /* CAIA 2 */
+ } u;
__be64 amr;
- u8 reserved3[4];
+ u8 reserved6[4];
__be64 wed;
} __packed;
-/* just powernv */
+/* just powernv - CAIA 1&2 */
struct cxl_process_element {
__be64 sr;
__be64 SPOffset;
- __be64 sdr;
+ union {
+ __be64 sdr; /* CAIA 1 */
+ u8 reserved1[8]; /* CAIA 2 */
+ } u;
__be64 haurp;
__be32 ctxtime;
__be16 ivte_offsets[4];
@@ -739,6 +833,39 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
return ~0ULL;
}
+static inline bool cxl_is_power8(void)
+{
+ if ((pvr_version_is(PVR_POWER8E)) ||
+ (pvr_version_is(PVR_POWER8NVL)) ||
+ (pvr_version_is(PVR_POWER8)))
+ return true;
+ return false;
+}
+
+static inline bool cxl_is_power9(void)
+{
+ /* intermediate solution */
+ if (!cxl_is_power8() &&
+ (cpu_has_feature(CPU_FTRS_POWER9) ||
+ cpu_has_feature(CPU_FTR_POWER9_DD1)))
+ return true;
+ return false;
+}
+
+static inline bool cxl_is_psl8(struct cxl_afu *afu)
+{
+ if (afu->adapter->caia_major == 1)
+ return true;
+ return false;
+}
+
+static inline bool cxl_is_psl9(struct cxl_afu *afu)
+{
+ if (afu->adapter->caia_major == 2)
+ return true;
+ return false;
+}
+
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
@@ -765,7 +892,6 @@ int cxl_update_properties(struct device_node *dn, struct property *new_prop);
void cxl_remove_adapter_nr(struct cxl *adapter);
-int cxl_alloc_spa(struct cxl_afu *afu);
void cxl_release_spa(struct cxl_afu *afu);
dev_t cxl_get_dev(void);
@@ -803,6 +929,15 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count);
void afu_release_irqs(struct cxl_context *ctx, void *cookie);
void afu_irq_name_free(struct cxl_context *ctx);
+int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
+int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
+int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu);
+int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu);
+int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
+int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
+void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx);
+void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx);
+
#ifdef CONFIG_DEBUG_FS
int cxl_debugfs_init(void);
@@ -811,10 +946,13 @@ int cxl_debugfs_adapter_add(struct cxl *adapter);
void cxl_debugfs_adapter_remove(struct cxl *adapter);
int cxl_debugfs_afu_add(struct cxl_afu *afu);
void cxl_debugfs_afu_remove(struct cxl_afu *afu);
-void cxl_stop_trace(struct cxl *cxl);
-void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir);
+void cxl_stop_trace_psl9(struct cxl *cxl);
+void cxl_stop_trace_psl8(struct cxl *cxl);
+void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir);
+void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir);
#else /* CONFIG_DEBUG_FS */
@@ -845,21 +983,34 @@ static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu)
{
}
-static inline void cxl_stop_trace(struct cxl *cxl)
+static inline void cxl_stop_trace_psl9(struct cxl *cxl)
{
}
-static inline void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter,
+static inline void cxl_stop_trace_psl8(struct cxl *cxl)
+{
+}
+
+static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter,
struct dentry *dir)
{
}
-static inline void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter,
+static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter,
struct dentry *dir)
{
}
-static inline void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir)
+static inline void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter,
+ struct dentry *dir)
+{
+}
+
+static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
+{
+}
+
+static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
{
}
@@ -888,27 +1039,15 @@ int __detach_context(struct cxl_context *ctx);
/*
* This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined
* in PAPR.
- * A word about endianness: a pointer to this structure is passed when
- * calling the hcall. However, it is not a block of memory filled up by
- * the hypervisor. The return values are found in registers, and copied
- * one by one when returning from the hcall. See the end of the call to
- * plpar_hcall9() in hvCall.S
- * As a consequence:
- * - we don't need to do any endianness conversion
- * - the pid and tid are an exception. They are 32-bit values returned in
- * the same 64-bit register. So we do need to worry about byte ordering.
+ * Field pid_tid is now 'reserved' because it's no more used on bare-metal.
+ * On a guest environment, PSL_PID_An is located on the upper 32 bits and
+ * PSL_TID_An register in the lower 32 bits.
*/
struct cxl_irq_info {
u64 dsisr;
u64 dar;
u64 dsr;
-#ifndef CONFIG_CPU_LITTLE_ENDIAN
- u32 pid;
- u32 tid;
-#else
- u32 tid;
- u32 pid;
-#endif
+ u64 reserved;
u64 afu_err;
u64 errstat;
u64 proc_handle;
@@ -916,19 +1055,23 @@ struct cxl_irq_info {
};
void cxl_assign_psn_space(struct cxl_context *ctx);
-irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+int cxl_invalidate_all_psl9(struct cxl *adapter);
+int cxl_invalidate_all_psl8(struct cxl *adapter);
+irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
void *cookie, irq_hw_number_t *dest_hwirq,
unsigned int *dest_virq, const char *name);
int cxl_check_error(struct cxl_afu *afu);
int cxl_afu_slbia(struct cxl_afu *afu);
-int cxl_tlb_slb_invalidate(struct cxl *adapter);
int cxl_data_cache_flush(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
-void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx);
+void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
+void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
void cxl_native_err_irq_dump_regs(struct cxl *adapter);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
@@ -1024,4 +1167,10 @@ int cxl_adapter_context_lock(struct cxl *adapter);
/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
void cxl_adapter_context_unlock(struct cxl *adapter);
+/* Increases the reference count to "struct mm_struct" */
+void cxl_context_mm_count_get(struct cxl_context *ctx);
+
+/* Decrements the reference count to "struct mm_struct" */
+void cxl_context_mm_count_put(struct cxl_context *ctx);
+
#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 9c06ac8fa5ac..eae9d749f967 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -15,7 +15,13 @@
static struct dentry *cxl_debugfs;
-void cxl_stop_trace(struct cxl *adapter)
+void cxl_stop_trace_psl9(struct cxl *adapter)
+{
+ /* Stop the trace */
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x4480000000000000ULL);
+}
+
+void cxl_stop_trace_psl8(struct cxl *adapter)
{
int slice;
@@ -53,7 +59,15 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
(void __force *)value, &fops_io_x64);
}
-void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
+void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
+{
+ debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
+ debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR2));
+ debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
+}
+
+void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
@@ -61,7 +75,7 @@ void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
}
-void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir)
+void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC));
}
@@ -82,8 +96,8 @@ int cxl_debugfs_adapter_add(struct cxl *adapter)
debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
- if (adapter->native->sl_ops->debugfs_add_adapter_sl_regs)
- adapter->native->sl_ops->debugfs_add_adapter_sl_regs(adapter, dir);
+ if (adapter->native->sl_ops->debugfs_add_adapter_regs)
+ adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir);
return 0;
}
@@ -92,8 +106,16 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter)
debugfs_remove_recursive(adapter->debugfs);
}
-void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir)
+void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
{
+ debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
+}
+
+void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
+{
+ debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
+ debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
+
debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
@@ -117,12 +139,11 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu)
debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
- debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
- debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
+
debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
- if (afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs)
- afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs(afu, dir);
+ if (afu->adapter->native->sl_ops->debugfs_add_afu_regs)
+ afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir);
return 0;
}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 2fa015c05561..5344448f514e 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -146,108 +146,67 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
return cxl_ack_ae(ctx);
}
- /*
- * update_mmu_cache() will not have loaded the hash since current->trap
- * is not a 0x400 or 0x300, so just call hash_page_mm() here.
- */
- access = _PAGE_PRESENT | _PAGE_READ;
- if (dsisr & CXL_PSL_DSISR_An_S)
- access |= _PAGE_WRITE;
-
- access |= _PAGE_PRIVILEGED;
- if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
- access &= ~_PAGE_PRIVILEGED;
-
- if (dsisr & DSISR_NOHPTE)
- inv_flags |= HPTE_NOHPTE_UPDATE;
-
- local_irq_save(flags);
- hash_page_mm(mm, dar, access, 0x300, inv_flags);
- local_irq_restore(flags);
-
+ if (!radix_enabled()) {
+ /*
+ * update_mmu_cache() will not have loaded the hash since current->trap
+ * is not a 0x400 or 0x300, so just call hash_page_mm() here.
+ */
+ access = _PAGE_PRESENT | _PAGE_READ;
+ if (dsisr & CXL_PSL_DSISR_An_S)
+ access |= _PAGE_WRITE;
+
+ access |= _PAGE_PRIVILEGED;
+ if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
+ access &= ~_PAGE_PRIVILEGED;
+
+ if (dsisr & DSISR_NOHPTE)
+ inv_flags |= HPTE_NOHPTE_UPDATE;
+
+ local_irq_save(flags);
+ hash_page_mm(mm, dar, access, 0x300, inv_flags);
+ local_irq_restore(flags);
+ }
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
/*
- * Returns the mm_struct corresponding to the context ctx via ctx->pid
- * In case the task has exited we use the task group leader accessible
- * via ctx->glpid to find the next task in the thread group that has a
- * valid mm_struct associated with it. If a task with valid mm_struct
- * is found the ctx->pid is updated to use the task struct for subsequent
- * translations. In case no valid mm_struct is found in the task group to
- * service the fault a NULL is returned.
+ * Returns the mm_struct corresponding to the context ctx.
+ * mm_users == 0, the context may be in the process of being closed.
*/
static struct mm_struct *get_mem_context(struct cxl_context *ctx)
{
- struct task_struct *task = NULL;
- struct mm_struct *mm = NULL;
- struct pid *old_pid = ctx->pid;
-
- if (old_pid == NULL) {
- pr_warn("%s: Invalid context for pe=%d\n",
- __func__, ctx->pe);
+ if (ctx->mm == NULL)
return NULL;
- }
- task = get_pid_task(old_pid, PIDTYPE_PID);
+ if (!atomic_inc_not_zero(&ctx->mm->mm_users))
+ return NULL;
- /*
- * pid_alive may look racy but this saves us from costly
- * get_task_mm when the task is a zombie. In worst case
- * we may think a task is alive, which is about to die
- * but get_task_mm will return NULL.
- */
- if (task != NULL && pid_alive(task))
- mm = get_task_mm(task);
+ return ctx->mm;
+}
- /* release the task struct that was taken earlier */
- if (task)
- put_task_struct(task);
- else
- pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
- __func__, pid_nr(old_pid), ctx->pe);
-
- /*
- * If we couldn't find the mm context then use the group
- * leader to iterate over the task group and find a task
- * that gives us mm_struct.
- */
- if (unlikely(mm == NULL && ctx->glpid != NULL)) {
-
- rcu_read_lock();
- task = pid_task(ctx->glpid, PIDTYPE_PID);
- if (task)
- do {
- mm = get_task_mm(task);
- if (mm) {
- ctx->pid = get_task_pid(task,
- PIDTYPE_PID);
- break;
- }
- task = next_thread(task);
- } while (task && !thread_group_leader(task));
- rcu_read_unlock();
-
- /* check if we switched pid */
- if (ctx->pid != old_pid) {
- if (mm)
- pr_devel("%s:pe=%i switch pid %i->%i\n",
- __func__, ctx->pe, pid_nr(old_pid),
- pid_nr(ctx->pid));
- else
- pr_devel("%s:Cannot find mm for pid=%i\n",
- __func__, pid_nr(old_pid));
-
- /* drop the reference to older pid */
- put_pid(old_pid);
- }
- }
+static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
+{
+ if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS))
+ return true;
- return mm;
+ return false;
}
+static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
+{
+ if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM))
+ return true;
+
+ if ((cxl_is_psl9(ctx->afu)) &&
+ ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
+ (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
+ CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
+ CXL_PSL9_DSISR_An_PF_STEG)))
+ return true;
+ return false;
+}
void cxl_handle_fault(struct work_struct *fault_work)
{
@@ -282,7 +241,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
if (!ctx->kernel) {
mm = get_mem_context(ctx);
- /* indicates all the thread in task group have exited */
if (mm == NULL) {
pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
__func__, ctx->pe, pid_nr(ctx->pid));
@@ -294,9 +252,9 @@ void cxl_handle_fault(struct work_struct *fault_work)
}
}
- if (dsisr & CXL_PSL_DSISR_An_DS)
+ if (cxl_is_segment_miss(ctx, dsisr))
cxl_handle_segment_miss(ctx, mm, dar);
- else if (dsisr & CXL_PSL_DSISR_An_DM)
+ else if (cxl_is_page_fault(ctx, dsisr))
cxl_handle_page_fault(ctx, mm, dsisr, dar);
else
WARN(1, "cxl_handle_fault has nothing to handle\n");
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e7139c76f961..17b433f1ce23 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -216,8 +217,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
* process is still accessible.
*/
ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /* acquire a reference to the task's mm */
+ ctx->mm = get_task_mm(current);
+
+ /* ensure this mm_struct can't be freed */
+ cxl_context_mm_count_get(ctx);
+
+ /* decrement the use count */
+ if (ctx->mm)
+ mmput(ctx->mm);
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
@@ -225,9 +234,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
amr))) {
afu_release_irqs(ctx, ctx);
cxl_adapter_context_put(ctx->afu->adapter);
- put_pid(ctx->glpid);
put_pid(ctx->pid);
- ctx->glpid = ctx->pid = NULL;
+ ctx->pid = NULL;
+ cxl_context_mm_count_put(ctx);
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index e04bc4ddfd74..f58b4b6c79f2 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -169,7 +169,7 @@ static irqreturn_t guest_psl_irq(int irq, void *data)
return IRQ_HANDLED;
}
- rc = cxl_irq(irq, ctx, &irq_info);
+ rc = cxl_irq_psl8(irq, ctx, &irq_info);
return rc;
}
@@ -551,13 +551,13 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
elem->common.tid = cpu_to_be32(0); /* Unused */
elem->common.pid = cpu_to_be32(pid);
elem->common.csrp = cpu_to_be64(0); /* disable */
- elem->common.aurp0 = cpu_to_be64(0); /* disable */
- elem->common.aurp1 = cpu_to_be64(0); /* disable */
+ elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
+ elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
cxl_prefault(ctx, wed);
- elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
- elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+ elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
+ elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
/*
* Ensure we have at least one interrupt allocated to take faults for
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
index d6d11f4056d7..9b8bb0f80c3b 100644
--- a/drivers/misc/cxl/hcalls.c
+++ b/drivers/misc/cxl/hcalls.c
@@ -413,9 +413,9 @@ long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
switch (rc) {
case H_SUCCESS: /* The interrupt info is returned in return registers. */
- pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid:%u, tid:%u, afu_err:%#llx, errstat:%#llx\n",
- info->dsisr, info->dar, info->dsr, info->pid,
- info->tid, info->afu_err, info->errstat);
+ pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n",
+ info->dsisr, info->dar, info->dsr, info->reserved,
+ info->afu_err, info->errstat);
return 0;
case H_PARAMETER: /* An incorrect parameter was supplied. */
return -EINVAL;
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 1a402bbed687..ce08a9f22308 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -34,7 +34,58 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
return IRQ_HANDLED;
}
-irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
+irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
+{
+ u64 dsisr, dar;
+
+ dsisr = irq_info->dsisr;
+ dar = irq_info->dar;
+
+ trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
+
+ pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
+
+ if (dsisr & CXL_PSL9_DSISR_An_TF) {
+ pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
+ return schedule_cxl_fault(ctx, dsisr, dar);
+ }
+
+ if (dsisr & CXL_PSL9_DSISR_An_PE)
+ return cxl_ops->handle_psl_slice_error(ctx, dsisr,
+ irq_info->errstat);
+ if (dsisr & CXL_PSL9_DSISR_An_AE) {
+ pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
+
+ if (ctx->pending_afu_err) {
+ /*
+ * This shouldn't happen - the PSL treats these errors
+ * as fatal and will have reset the AFU, so there's not
+ * much point buffering multiple AFU errors.
+ * OTOH if we DO ever see a storm of these come in it's
+ * probably best that we log them somewhere:
+ */
+ dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
+ ctx->pe, irq_info->afu_err);
+ } else {
+ spin_lock(&ctx->lock);
+ ctx->afu_err = irq_info->afu_err;
+ ctx->pending_afu_err = 1;
+ spin_unlock(&ctx->lock);
+
+ wake_up_all(&ctx->wq);
+ }
+
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
+ return IRQ_HANDLED;
+ }
+ if (dsisr & CXL_PSL9_DSISR_An_OC)
+ pr_devel("CXL interrupt: OS Context Warning\n");
+
+ WARN(1, "Unhandled CXL PSL IRQ\n");
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
u64 dsisr, dar;
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index b0b6ed31918e..1703655072b1 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -59,16 +59,10 @@ int cxl_afu_slbia(struct cxl_afu *afu)
static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
{
- struct task_struct *task;
unsigned long flags;
- if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
- pr_devel("%s unable to get task %i\n",
- __func__, pid_nr(ctx->pid));
- return;
- }
- if (task->mm != mm)
- goto out_put;
+ if (ctx->mm != mm)
+ return;
pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
@@ -79,8 +73,6 @@ static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
spin_unlock_irqrestore(&ctx->sste_lock, flags);
mb();
cxl_afu_slbia(ctx->afu);
-out_put:
- put_task_struct(task);
}
static inline void cxl_slbia_core(struct mm_struct *mm)
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 7ae710585267..871a2f09c718 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -95,12 +95,23 @@ int cxl_afu_disable(struct cxl_afu *afu)
/* This will disable as well as reset */
static int native_afu_reset(struct cxl_afu *afu)
{
+ int rc;
+ u64 serr;
+
pr_devel("AFU reset request\n");
- return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
+ rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
false);
+
+ /* Re-enable any masked interrupts */
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+
+ return rc;
}
static int native_afu_check_and_enable(struct cxl_afu *afu)
@@ -120,6 +131,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
u64 dsisr, dar;
u64 start, end;
+ u64 trans_fault = 0x0ULL;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
@@ -127,6 +139,11 @@ int cxl_psl_purge(struct cxl_afu *afu)
pr_devel("PSL purge request\n");
+ if (cxl_is_psl8(afu))
+ trans_fault = CXL_PSL_DSISR_TRANS;
+ if (cxl_is_psl9(afu))
+ trans_fault = CXL_PSL9_DSISR_An_TF;
+
if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
rc = -EIO;
@@ -155,13 +172,17 @@ int cxl_psl_purge(struct cxl_afu *afu)
}
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
- if (dsisr & CXL_PSL_DSISR_TRANS) {
+ pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
+ PSL_CNTL, dsisr);
+
+ if (dsisr & trans_fault) {
dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
+ dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
+ dsisr, dar);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
} else if (dsisr) {
- dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
+ dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
+ dsisr);
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
} else {
cpu_relax();
@@ -196,7 +217,7 @@ static int spa_max_procs(int spa_size)
return ((spa_size / 8) - 96) / 17;
}
-int cxl_alloc_spa(struct cxl_afu *afu)
+static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
{
unsigned spa_size;
@@ -209,7 +230,8 @@ int cxl_alloc_spa(struct cxl_afu *afu)
if (spa_size > 0x100000) {
dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
afu->native->spa_max_procs, afu->native->spa_size);
- afu->num_procs = afu->native->spa_max_procs;
+ if (mode != CXL_MODE_DEDICATED)
+ afu->num_procs = afu->native->spa_max_procs;
break;
}
@@ -258,7 +280,37 @@ void cxl_release_spa(struct cxl_afu *afu)
}
}
-int cxl_tlb_slb_invalidate(struct cxl *adapter)
+/*
+ * Invalidation of all ERAT entries is no longer required by CAIA2. Use
+ * only for debug.
+ */
+int cxl_invalidate_all_psl9(struct cxl *adapter)
+{
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+ u64 ierat;
+
+ pr_devel("CXL adapter - invalidation of all ERAT entries\n");
+
+ /* Invalidates all ERAT entries for Radix or HPT */
+ ierat = CXL_XSL9_IERAT_IALL;
+ if (radix_enabled())
+ ierat |= CXL_XSL9_IERAT_INVR;
+ cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
+
+ while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&adapter->dev,
+ "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
+ return -EBUSY;
+ }
+ if (!cxl_ops->link_ok(adapter, NULL))
+ return -EIO;
+ cpu_relax();
+ }
+ return 0;
+}
+
+int cxl_invalidate_all_psl8(struct cxl *adapter)
{
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
@@ -466,7 +518,8 @@ static int remove_process_element(struct cxl_context *ctx)
if (!rc)
ctx->pe_inserted = false;
- slb_invalid(ctx);
+ if (cxl_is_power8())
+ slb_invalid(ctx);
pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
mutex_unlock(&ctx->afu->native->spa_mutex);
@@ -493,13 +546,14 @@ static int activate_afu_directed(struct cxl_afu *afu)
afu->num_procs = afu->max_procs_virtualised;
if (afu->native->spa == NULL) {
- if (cxl_alloc_spa(afu))
+ if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
return -ENOMEM;
}
attach_spa(afu);
cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
- cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
+ if (cxl_is_power8())
+ cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
afu->current_mode = CXL_MODE_DIRECTED;
@@ -542,10 +596,19 @@ static u64 calculate_sr(struct cxl_context *ctx)
sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
} else {
sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
- sr &= ~(CXL_PSL_SR_An_HV);
+ if (radix_enabled())
+ sr |= CXL_PSL_SR_An_HV;
+ else
+ sr &= ~(CXL_PSL_SR_An_HV);
if (!test_tsk_thread_flag(current, TIF_32BIT))
sr |= CXL_PSL_SR_An_SF;
}
+ if (cxl_is_psl9(ctx->afu)) {
+ if (radix_enabled())
+ sr |= CXL_PSL_SR_An_XLAT_ror;
+ else
+ sr |= CXL_PSL_SR_An_XLAT_hpt;
+ }
return sr;
}
@@ -578,7 +641,71 @@ static void update_ivtes_directed(struct cxl_context *ctx)
WARN_ON(add_process_element(ctx));
}
-static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
+static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ u32 pid;
+
+ cxl_assign_psn_space(ctx);
+
+ ctx->elem->ctxtime = 0; /* disable */
+ ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
+ ctx->elem->haurp = 0; /* disable */
+
+ if (ctx->kernel)
+ pid = 0;
+ else {
+ if (ctx->mm == NULL) {
+ pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
+ __func__, ctx->pe, pid_nr(ctx->pid));
+ return -EINVAL;
+ }
+ pid = ctx->mm->context.id;
+ }
+
+ ctx->elem->common.tid = 0;
+ ctx->elem->common.pid = cpu_to_be32(pid);
+
+ ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
+
+ ctx->elem->common.csrp = 0; /* disable */
+
+ cxl_prefault(ctx, wed);
+
+ /*
+ * Ensure we have the multiplexed PSL interrupt set up to take faults
+ * for kernel contexts that may not have allocated any AFU IRQs at all:
+ */
+ if (ctx->irqs.range[0] == 0) {
+ ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
+ ctx->irqs.range[0] = 1;
+ }
+
+ ctx->elem->common.amr = cpu_to_be64(amr);
+ ctx->elem->common.wed = cpu_to_be64(wed);
+
+ return 0;
+}
+
+int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ int result;
+
+ /* fill the process element entry */
+ result = process_element_entry_psl9(ctx, wed, amr);
+ if (result)
+ return result;
+
+ update_ivtes_directed(ctx);
+
+ /* first guy needs to enable */
+ result = cxl_ops->afu_check_and_enable(ctx->afu);
+ if (result)
+ return result;
+
+ return add_process_element(ctx);
+}
+
+int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
{
u32 pid;
int result;
@@ -588,7 +715,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->ctxtime = 0; /* disable */
ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
ctx->elem->haurp = 0; /* disable */
- ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
+ ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
pid = current->pid;
if (ctx->kernel)
@@ -599,13 +726,13 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
ctx->elem->common.csrp = 0; /* disable */
- ctx->elem->common.aurp0 = 0; /* disable */
- ctx->elem->common.aurp1 = 0; /* disable */
+ ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
+ ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
cxl_prefault(ctx, wed);
- ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
- ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+ ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
+ ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
/*
* Ensure we have the multiplexed PSL interrupt set up to take faults
@@ -671,7 +798,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
return 0;
}
-static int activate_dedicated_process(struct cxl_afu *afu)
+int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Activating dedicated process mode\n");
+
+ /*
+ * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
+ * XSL and AFU are programmed to work with a single context.
+ * The context information should be configured in the SPA area
+ * index 0 (so PSL_SPAP must be configured before enabling the
+ * AFU).
+ */
+ afu->num_procs = 1;
+ if (afu->native->spa == NULL) {
+ if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
+ return -ENOMEM;
+ }
+ attach_spa(afu);
+
+ cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
+ cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
+
+ afu->current_mode = CXL_MODE_DEDICATED;
+
+ return cxl_chardev_d_afu_add(afu);
+}
+
+int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
{
dev_info(&afu->dev, "Activating dedicated process mode\n");
@@ -694,7 +847,17 @@ static int activate_dedicated_process(struct cxl_afu *afu)
return cxl_chardev_d_afu_add(afu);
}
-static void update_ivtes_dedicated(struct cxl_context *ctx)
+void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
+{
+ int r;
+
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
+ ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
+ }
+}
+
+void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
{
struct cxl_afu *afu = ctx->afu;
@@ -710,7 +873,27 @@ static void update_ivtes_dedicated(struct cxl_context *ctx)
((u64)ctx->irqs.range[3] & 0xffff));
}
-static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
+int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ struct cxl_afu *afu = ctx->afu;
+ int result;
+
+ /* fill the process element entry */
+ result = process_element_entry_psl9(ctx, wed, amr);
+ if (result)
+ return result;
+
+ if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
+ afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
+
+ result = cxl_ops->afu_reset(afu);
+ if (result)
+ return result;
+
+ return afu_enable(afu);
+}
+
+int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_afu *afu = ctx->afu;
u64 pid;
@@ -728,7 +911,8 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
cxl_prefault(ctx, wed);
- update_ivtes_dedicated(ctx);
+ if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
+ afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
@@ -778,8 +962,9 @@ static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
if (mode == CXL_MODE_DIRECTED)
return activate_afu_directed(afu);
- if (mode == CXL_MODE_DEDICATED)
- return activate_dedicated_process(afu);
+ if ((mode == CXL_MODE_DEDICATED) &&
+ (afu->adapter->native->sl_ops->activate_dedicated_process))
+ return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
return -EINVAL;
}
@@ -793,11 +978,13 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
}
ctx->kernel = kernel;
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return attach_afu_directed(ctx, wed, amr);
+ if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
+ (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
+ return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
- return attach_dedicated(ctx, wed, amr);
+ if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
+ (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
+ return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
return -EINVAL;
}
@@ -830,8 +1017,9 @@ static void native_update_ivtes(struct cxl_context *ctx)
{
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return update_ivtes_directed(ctx);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
- return update_ivtes_dedicated(ctx);
+ if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
+ (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
+ return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
WARN(1, "native_update_ivtes: Bad mode\n");
}
@@ -859,8 +1047,6 @@ static int native_detach_process(struct cxl_context *ctx)
static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
{
- u64 pidtid;
-
/* If the adapter has gone away, we can't get any meaningful
* information.
*/
@@ -869,10 +1055,8 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
- pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
- info->pid = pidtid >> 32;
- info->tid = pidtid & 0xffffffff;
+ if (cxl_is_power8())
+ info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
info->proc_handle = 0;
@@ -880,7 +1064,22 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
return 0;
}
-void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
+void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
+{
+ u64 fir1, fir2, serr;
+
+ fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
+ fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2);
+
+ dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+ dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+ if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ cxl_afu_decode_psl_serr(ctx->afu, serr);
+ }
+}
+
+void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
{
u64 fir1, fir2, fir_slice, serr, afu_debug;
@@ -916,9 +1115,20 @@ static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
return cxl_ops->ack_irq(ctx, 0, errstat);
}
-static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
+static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
+{
+ if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS))
+ return true;
+
+ if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF))
+ return true;
+
+ return false;
+}
+
+irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
{
- if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
+ if (cxl_is_translation_fault(afu, irq_info->dsisr))
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
@@ -932,7 +1142,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
- int ph, ret;
+ int ph, ret = IRQ_HANDLED, res;
/* check if eeh kicked in while the interrupt was in flight */
if (unlikely(phreg == ~0ULL)) {
@@ -943,15 +1153,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
}
/* Mask the pe-handle from register value */
ph = phreg & 0xffff;
- if ((ret = native_get_irq_info(afu, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
- return fail_psl_irq(afu, &irq_info);
+ if ((res = native_get_irq_info(afu, &irq_info))) {
+ WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
+ if (afu->adapter->native->sl_ops->fail_irq)
+ return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
+ return ret;
}
rcu_read_lock();
ctx = idr_find(&afu->contexts_idr, ph);
if (ctx) {
- ret = cxl_irq(irq, ctx, &irq_info);
+ if (afu->adapter->native->sl_ops->handle_interrupt)
+ ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
rcu_read_unlock();
return ret;
}
@@ -961,7 +1174,9 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
" %016llx\n(Possible AFU HW issue - was a term/remove acked"
" with outstanding transactions?)\n", ph, irq_info.dsisr,
irq_info.dar);
- return fail_psl_irq(afu, &irq_info);
+ if (afu->adapter->native->sl_ops->fail_irq)
+ ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
+ return ret;
}
static void native_irq_wait(struct cxl_context *ctx)
@@ -979,7 +1194,11 @@ static void native_irq_wait(struct cxl_context *ctx)
if (ph != ctx->pe)
return;
dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
- if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ if (cxl_is_psl8(ctx->afu) &&
+ ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
+ return;
+ if (cxl_is_psl9(ctx->afu) &&
+ ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
return;
/*
* We are waiting for the workqueue to process our
@@ -996,25 +1215,33 @@ static void native_irq_wait(struct cxl_context *ctx)
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
- u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
+ u64 errstat, serr, afu_error, dsisr;
+ u64 fir_slice, afu_debug, irq_mask;
/*
* slice err interrupt is only used with full PSL (no XSL)
*/
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
cxl_afu_decode_psl_serr(afu, serr);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+
+ if (cxl_is_power8()) {
+ fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
+ afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
+ dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+ }
dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
+ /* mask off the IRQ so it won't retrigger until the AFU is reset */
+ irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
+ serr |= irq_mask;
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+ dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
return IRQ_HANDLED;
}
@@ -1103,7 +1330,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
}
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
+ if (cxl_is_power8())
+ serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
+ if (cxl_is_power9()) {
+ /*
+ * By default, all errors are masked. So don't set all masks.
+ * Slice errors will be transfered.
+ */
+ serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
+ }
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
return 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index b27ea98b781f..6dc1ee5b92c9 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -60,7 +60,7 @@
#define CXL_VSEC_PROTOCOL_MASK 0xe0
#define CXL_VSEC_PROTOCOL_1024TB 0x80
#define CXL_VSEC_PROTOCOL_512TB 0x40
-#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */
+#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */
#define CXL_VSEC_PROTOCOL_ENABLE 0x01
#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
@@ -123,6 +123,8 @@ static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
+ { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
{ PCI_DEVICE_CLASS(0x120000, ~0), },
{ }
@@ -324,38 +326,59 @@ static void dump_afu_descriptor(struct cxl_afu *afu)
#undef show_reg
}
-#define CAPP_UNIT0_ID 0xBA
-#define CAPP_UNIT1_ID 0XBE
+#define P8_CAPP_UNIT0_ID 0xBA
+#define P8_CAPP_UNIT1_ID 0XBE
+#define P9_CAPP_UNIT0_ID 0xC0
+#define P9_CAPP_UNIT1_ID 0xE0
-static u64 get_capp_unit_id(struct device_node *np)
+static int get_phb_index(struct device_node *np, u32 *phb_index)
{
- u32 phb_index;
+ if (of_property_read_u32(np, "ibm,phb-index", phb_index))
+ return -ENODEV;
+ return 0;
+}
+static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
+{
/*
- * For chips other than POWER8NVL, we only have CAPP 0,
- * irrespective of which PHB is used.
+ * POWER 8:
+ * - For chips other than POWER8NVL, we only have CAPP 0,
+ * irrespective of which PHB is used.
+ * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and
+ * CAPP 1 is attached to PHB1.
*/
- if (!pvr_version_is(PVR_POWER8NVL))
- return CAPP_UNIT0_ID;
+ if (cxl_is_power8()) {
+ if (!pvr_version_is(PVR_POWER8NVL))
+ return P8_CAPP_UNIT0_ID;
+
+ if (phb_index == 0)
+ return P8_CAPP_UNIT0_ID;
+
+ if (phb_index == 1)
+ return P8_CAPP_UNIT1_ID;
+ }
/*
- * For POWER8NVL, assume CAPP 0 is attached to PHB0 and
- * CAPP 1 is attached to PHB1.
+ * POWER 9:
+ * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000)
+ * PEC1 (PHB1 - PHB2). No capi mode
+ * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000)
*/
- if (of_property_read_u32(np, "ibm,phb-index", &phb_index))
- return 0;
+ if (cxl_is_power9()) {
+ if (phb_index == 0)
+ return P9_CAPP_UNIT0_ID;
- if (phb_index == 0)
- return CAPP_UNIT0_ID;
-
- if (phb_index == 1)
- return CAPP_UNIT1_ID;
+ if (phb_index == 3)
+ return P9_CAPP_UNIT1_ID;
+ }
return 0;
}
-static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id)
+static int calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ u32 *phb_index, u64 *capp_unit_id)
{
+ int rc;
struct device_node *np;
const __be32 *prop;
@@ -366,8 +389,16 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
+
*chipid = be32_to_cpup(prop);
- *capp_unit_id = get_capp_unit_id(np);
+
+ rc = get_phb_index(np, phb_index);
+ if (rc) {
+ pr_err("cxl: invalid phb index\n");
+ return rc;
+ }
+
+ *capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
pr_err("cxl: invalid capp unit id\n");
@@ -377,14 +408,104 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id
return 0;
}
-static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
+static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 xsl_dsnctl, psl_fircntl;
+ u64 chipid;
+ u32 phb_index;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
+ if (rc)
+ return rc;
+
+ /*
+ * CAPI Identifier bits [0:7]
+ * bit 61:60 MSI bits --> 0
+ * bit 59 TVT selector --> 0
+ */
+
+ /*
+ * Tell XSL where to route data to.
+ * The field chipid should match the PHB CAPI_CMPM register
+ */
+ xsl_dsnctl = ((u64)0x2 << (63-7)); /* Bit 57 */
+ xsl_dsnctl |= (capp_unit_id << (63-15));
+
+ /* nMMU_ID Defaults to: b’000001001’*/
+ xsl_dsnctl |= ((u64)0x09 << (63-28));
+
+ if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ /*
+ * Used to identify CAPI packets which should be sorted into
+ * the Non-Blocking queues by the PHB. This field should match
+ * the PHB PBL_NBW_CMPM register
+ * nbwind=0x03, bits [57:58], must include capi indicator.
+ * Not supported on P9 DD1.
+ */
+ xsl_dsnctl |= ((u64)0x03 << (63-47));
+
+ /*
+ * Upper 16b address bits of ASB_Notify messages sent to the
+ * system. Need to match the PHB’s ASN Compare/Mask Register.
+ * Not supported on P9 DD1.
+ */
+ xsl_dsnctl |= ((u64)0x04 << (63-55));
+ }
+
+ cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
+
+ /* Set fir_cntl to recommended value for production env */
+ psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
+ psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
+ psl_fircntl |= 0x1ULL; /* ce_thresh */
+ cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
+
+ /* vccredits=0x1 pcklat=0x4 */
+ cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0000000000001810ULL);
+
+ /*
+ * For debugging with trace arrays.
+ * Configure RX trace 0 segmented mode.
+ * Configure CT trace 0 segmented mode.
+ * Configure LA0 trace 0 segmented mode.
+ * Configure LA1 trace 0 segmented mode.
+ */
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000000ULL);
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000003ULL);
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000005ULL);
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000006ULL);
+
+ /*
+ * A response to an ASB_Notify request is returned by the
+ * system as an MMIO write to the address defined in
+ * the PSL_TNR_ADDR register
+ */
+ /* PSL_TNR_ADDR */
+
+ /* NORST */
+ cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
+
+ /* allocate the apc machines */
+ cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
+
+ /* Disable vc dd1 fix */
+ if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1)))
+ cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
+
+ return 0;
+}
+
+static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
{
u64 psl_dsnctl, psl_fircntl;
u64 chipid;
+ u32 phb_index;
u64 capp_unit_id;
int rc;
- rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
@@ -409,14 +530,15 @@ static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_
return 0;
}
-static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev)
+static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_dev *dev)
{
u64 xsl_dsnctl;
u64 chipid;
+ u32 phb_index;
u64 capp_unit_id;
int rc;
- rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
@@ -434,7 +556,13 @@ static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_
/* For the PSL this is a multiple for 0 < n <= 7: */
#define PSL_2048_250MHZ_CYCLES 1
-static void write_timebase_ctrl_psl(struct cxl *adapter)
+static void write_timebase_ctrl_psl9(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_PSL9_TB_CTLSTAT,
+ TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
+}
+
+static void write_timebase_ctrl_psl8(struct cxl *adapter)
{
cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
@@ -455,7 +583,12 @@ static void write_timebase_ctrl_xsl(struct cxl *adapter)
TBSYNC_CNT(XSL_4000_CLOCKS));
}
-static u64 timebase_read_psl(struct cxl *adapter)
+static u64 timebase_read_psl9(struct cxl *adapter)
+{
+ return cxl_p1_read(adapter, CXL_PSL9_Timebase);
+}
+
+static u64 timebase_read_psl8(struct cxl *adapter)
{
return cxl_p1_read(adapter, CXL_PSL_Timebase);
}
@@ -513,7 +646,12 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
return;
}
-static int init_implementation_afu_psl_regs(struct cxl_afu *afu)
+static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
+{
+ return 0;
+}
+
+static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
{
/* read/write masks for this slice */
cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
@@ -611,7 +749,7 @@ static int setup_cxl_bars(struct pci_dev *dev)
/*
* BAR 4/5 has a special meaning for CXL and must be programmed with a
* special value corresponding to the CXL protocol address range.
- * For POWER 8 that means bits 48:49 must be set to 10
+ * For POWER 8/9 that means bits 48:49 must be set to 10
*/
pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
@@ -968,7 +1106,7 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
}
if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
- dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
+ dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
for (i = 0; i < afu->crs_num; i++) {
rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
@@ -996,7 +1134,53 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
return 0;
}
-static int sanitise_afu_regs(struct cxl_afu *afu)
+static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
+{
+ u64 reg;
+
+ /*
+ * Clear out any regs that contain either an IVTE or address or may be
+ * waiting on an acknowledgment to try to be a bit safer as we bring
+ * it online
+ */
+ reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
+ dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
+ if (cxl_ops->afu_reset(afu))
+ return -EIO;
+ if (cxl_afu_disable(afu))
+ return -EIO;
+ if (cxl_psl_purge(afu))
+ return -EIO;
+ }
+ cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
+ cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
+ reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ if (reg) {
+ dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
+ if (reg & CXL_PSL9_DSISR_An_TF)
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ else
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+ }
+ if (afu->adapter->native->sl_ops->register_serr_irq) {
+ reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ if (reg) {
+ if (reg & ~0x000000007fffffff)
+ dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ }
+ }
+ reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ if (reg) {
+ dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
+ cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
+ }
+
+ return 0;
+}
+
+static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
{
u64 reg;
@@ -1102,8 +1286,11 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = pci_map_slice_regs(afu, adapter, dev)))
return rc;
- if ((rc = sanitise_afu_regs(afu)))
- goto err1;
+ if (adapter->native->sl_ops->sanitise_afu_regs) {
+ rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
+ if (rc)
+ goto err1;
+ }
/* We need to reset the AFU before we can read the AFU descriptor */
if ((rc = cxl_ops->afu_reset(afu)))
@@ -1248,8 +1435,13 @@ int cxl_pci_reset(struct cxl *adapter)
dev_info(&dev->dev, "CXL reset\n");
- /* the adapter is about to be reset, so ignore errors */
- cxl_data_cache_flush(adapter);
+ /*
+ * The adapter is about to be reset, so ignore errors.
+ * Not supported on P9 DD1
+ */
+ if ((cxl_is_power8()) ||
+ ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+ cxl_data_cache_flush(adapter);
/* pcie_warm_reset requests a fundamental pci reset which includes a
* PERST assert/deassert. PERST triggers a loading of the image
@@ -1332,6 +1524,7 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
+ adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
@@ -1378,6 +1571,17 @@ static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
}
+static bool cxl_compatible_caia_version(struct cxl *adapter)
+{
+ if (cxl_is_power8() && (adapter->caia_major == 1))
+ return true;
+
+ if (cxl_is_power9() && (adapter->caia_major == 2))
+ return true;
+
+ return false;
+}
+
static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
{
if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
@@ -1388,6 +1592,12 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
return -EINVAL;
}
+ if (!cxl_compatible_caia_version(adapter)) {
+ dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
+ adapter->caia_major);
+ return -ENODEV;
+ }
+
if (!adapter->slices) {
/* Once we support dynamic reprogramming we can use the card if
* it supports loadable AFUs */
@@ -1431,9 +1641,19 @@ static void cxl_release_adapter(struct device *dev)
static int sanitise_adapter_regs(struct cxl *adapter)
{
+ int rc = 0;
+
/* Clear PSL tberror bit by writing 1 to it */
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
- return cxl_tlb_slb_invalidate(adapter);
+
+ if (adapter->native->sl_ops->invalidate_all) {
+ /* do not invalidate ERAT entries when not reloading on PERST */
+ if (cxl_is_power9() && (adapter->perst_loads_image))
+ return 0;
+ rc = adapter->native->sl_ops->invalidate_all(adapter);
+ }
+
+ return rc;
}
/* This should contain *only* operations that can safely be done in
@@ -1496,8 +1716,6 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
- /* Release the context lock as adapter is configured */
- cxl_adapter_context_unlock(adapter);
return 0;
err:
@@ -1516,25 +1734,65 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
pci_disable_device(pdev);
}
-static const struct cxl_service_layer_ops psl_ops = {
- .adapter_regs_init = init_implementation_adapter_psl_regs,
- .afu_regs_init = init_implementation_afu_psl_regs,
+static const struct cxl_service_layer_ops psl9_ops = {
+ .adapter_regs_init = init_implementation_adapter_regs_psl9,
+ .invalidate_all = cxl_invalidate_all_psl9,
+ .afu_regs_init = init_implementation_afu_regs_psl9,
+ .sanitise_afu_regs = sanitise_afu_regs_psl9,
.register_serr_irq = cxl_native_register_serr_irq,
.release_serr_irq = cxl_native_release_serr_irq,
- .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs,
- .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs,
- .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs,
+ .handle_interrupt = cxl_irq_psl9,
+ .fail_irq = cxl_fail_irq_psl,
+ .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
+ .attach_afu_directed = cxl_attach_afu_directed_psl9,
+ .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
+ .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
+ .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
+ .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
+ .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
.err_irq_dump_registers = cxl_native_err_irq_dump_regs,
- .debugfs_stop_trace = cxl_stop_trace,
- .write_timebase_ctrl = write_timebase_ctrl_psl,
- .timebase_read = timebase_read_psl,
+ .debugfs_stop_trace = cxl_stop_trace_psl9,
+ .write_timebase_ctrl = write_timebase_ctrl_psl9,
+ .timebase_read = timebase_read_psl9,
+ .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
+ .needs_reset_before_disable = true,
+};
+
+static const struct cxl_service_layer_ops psl8_ops = {
+ .adapter_regs_init = init_implementation_adapter_regs_psl8,
+ .invalidate_all = cxl_invalidate_all_psl8,
+ .afu_regs_init = init_implementation_afu_regs_psl8,
+ .sanitise_afu_regs = sanitise_afu_regs_psl8,
+ .register_serr_irq = cxl_native_register_serr_irq,
+ .release_serr_irq = cxl_native_release_serr_irq,
+ .handle_interrupt = cxl_irq_psl8,
+ .fail_irq = cxl_fail_irq_psl,
+ .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
+ .attach_afu_directed = cxl_attach_afu_directed_psl8,
+ .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
+ .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
+ .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
+ .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
+ .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
+ .debugfs_stop_trace = cxl_stop_trace_psl8,
+ .write_timebase_ctrl = write_timebase_ctrl_psl8,
+ .timebase_read = timebase_read_psl8,
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
.needs_reset_before_disable = true,
};
static const struct cxl_service_layer_ops xsl_ops = {
- .adapter_regs_init = init_implementation_adapter_xsl_regs,
- .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs,
+ .adapter_regs_init = init_implementation_adapter_regs_xsl,
+ .invalidate_all = cxl_invalidate_all_psl8,
+ .sanitise_afu_regs = sanitise_afu_regs_psl8,
+ .handle_interrupt = cxl_irq_psl8,
+ .fail_irq = cxl_fail_irq_psl,
+ .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
+ .attach_afu_directed = cxl_attach_afu_directed_psl8,
+ .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
+ .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
+ .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_xsl,
.write_timebase_ctrl = write_timebase_ctrl_xsl,
.timebase_read = timebase_read_xsl,
.capi_mode = OPAL_PHB_CAPI_MODE_DMA,
@@ -1548,8 +1806,13 @@ static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
adapter->native->sl_ops = &xsl_ops;
adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else {
- dev_info(&dev->dev, "Device uses a PSL\n");
- adapter->native->sl_ops = &psl_ops;
+ if (cxl_is_power8()) {
+ dev_info(&dev->dev, "Device uses a PSL8\n");
+ adapter->native->sl_ops = &psl8_ops;
+ } else {
+ dev_info(&dev->dev, "Device uses a PSL9\n");
+ adapter->native->sl_ops = &psl9_ops;
+ }
}
}
@@ -1596,6 +1859,9 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
@@ -1619,8 +1885,13 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
cxl_sysfs_adapter_remove(adapter);
cxl_debugfs_adapter_remove(adapter);
- /* Flush adapter datacache as its about to be removed */
- cxl_data_cache_flush(adapter);
+ /*
+ * Flush adapter datacache as its about to be removed.
+ * Not supported on P9 DD1.
+ */
+ if ((cxl_is_power8()) ||
+ ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1))))
+ cxl_data_cache_flush(adapter);
cxl_deconfigure_adapter(adapter);
@@ -1704,6 +1975,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ if (cxl_is_power9() && !radix_enabled()) {
+ dev_info(&dev->dev, "Only Radix mode supported\n");
+ return -ENODEV;
+ }
+
if (cxl_verbose)
dump_cxl_config_space(dev);
@@ -1781,7 +2057,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
{
struct cxl *adapter = pci_get_drvdata(pdev);
struct cxl_afu *afu;
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+ pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
int i;
/* At this point, we could still have an interrupt pending.
@@ -1885,16 +2161,26 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- result = cxl_vphb_error_detected(afu, state);
-
- /* Only continue if everyone agrees on NEED_RESET */
- if (result != PCI_ERS_RESULT_NEED_RESET)
- return result;
+ afu_result = cxl_vphb_error_detected(afu, state);
cxl_context_detach_all(afu);
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
pci_deconfigure_afu(afu);
+
+ /* Disconnect trumps all, NONE trumps NEED_RESET */
+ if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+ result = PCI_ERS_RESULT_DISCONNECT;
+ else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+ (result == PCI_ERS_RESULT_NEED_RESET))
+ result = PCI_ERS_RESULT_NONE;
}
+
+ /* should take the context lock here */
+ if (cxl_adapter_context_lock(adapter) != 0)
+ dev_warn(&adapter->dev,
+ "Couldn't take context lock with %d active-contexts\n",
+ atomic_read(&adapter->contexts_num));
+
cxl_deconfigure_adapter(adapter);
return result;
@@ -1913,6 +2199,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
if (cxl_configure_adapter(adapter, pdev))
goto err;
+ /*
+ * Unlock context activation for the adapter. Ideally this should be
+ * done in cxl_pci_resume but cxlflash module tries to activate the
+ * master context as part of slot_reset callback.
+ */
+ cxl_adapter_context_unlock(adapter);
+
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index 751d6119683e..b8e300af0e55 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -17,6 +17,15 @@
#include "cxl.h"
+#define dsisr_psl9_flags(flags) \
+ __print_flags(flags, "|", \
+ { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \
+ { CXL_PSL9_DSISR_An_TF, "TF" }, \
+ { CXL_PSL9_DSISR_An_PE, "PE" }, \
+ { CXL_PSL9_DSISR_An_AE, "AE" }, \
+ { CXL_PSL9_DSISR_An_OC, "OC" }, \
+ { CXL_PSL9_DSISR_An_S, "S" })
+
#define DSISR_FLAGS \
{ CXL_PSL_DSISR_An_DS, "DS" }, \
{ CXL_PSL_DSISR_An_DM, "DM" }, \
@@ -154,6 +163,40 @@ TRACE_EVENT(cxl_afu_irq,
)
);
+TRACE_EVENT(cxl_psl9_irq,
+ TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
+
+ TP_ARGS(ctx, irq, dsisr, dar),
+
+ TP_STRUCT__entry(
+ __field(u8, card)
+ __field(u8, afu)
+ __field(u16, pe)
+ __field(int, irq)
+ __field(u64, dsisr)
+ __field(u64, dar)
+ ),
+
+ TP_fast_assign(
+ __entry->card = ctx->afu->adapter->adapter_num;
+ __entry->afu = ctx->afu->slice;
+ __entry->pe = ctx->pe;
+ __entry->irq = irq;
+ __entry->dsisr = dsisr;
+ __entry->dar = dar;
+ ),
+
+ TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx",
+ __entry->card,
+ __entry->afu,
+ __entry->pe,
+ __entry->irq,
+ __entry->dsisr,
+ dsisr_psl9_flags(__entry->dsisr),
+ __entry->dar
+ )
+);
+
TRACE_EVENT(cxl_psl_irq,
TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index c7112276a039..28bb495f0cf1 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -227,9 +227,16 @@ static const struct i2c_device_id ds1682_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1682_id);
+static const struct of_device_id ds1682_of_match[] = {
+ { .compatible = "dallas,ds1682", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1682_of_match);
+
static struct i2c_driver ds1682_driver = {
.driver = {
.name = "ds1682",
+ .of_match_table = ds1682_of_match,
},
.probe = ds1682_probe,
.remove = ds1682_remove,
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 4a22a1d99395..ab0df6a17690 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1541,12 +1541,69 @@ static const struct i2c_device_id idt_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, idt_ids);
+static const struct of_device_id idt_of_match[] = {
+ { .compatible = "idt,89hpes8nt2", },
+ { .compatible = "idt,89hpes12nt3", },
+
+ { .compatible = "idt,89hpes24nt6ag2", },
+ { .compatible = "idt,89hpes32nt8ag2", },
+ { .compatible = "idt,89hpes32nt8bg2", },
+ { .compatible = "idt,89hpes12nt12g2", },
+ { .compatible = "idt,89hpes16nt16g2", },
+ { .compatible = "idt,89hpes24nt24g2", },
+ { .compatible = "idt,89hpes32nt24ag2", },
+ { .compatible = "idt,89hpes32nt24bg2", },
+
+ { .compatible = "idt,89hpes12n3", },
+ { .compatible = "idt,89hpes12n3a", },
+ { .compatible = "idt,89hpes24n3", },
+ { .compatible = "idt,89hpes24n3a", },
+
+ { .compatible = "idt,89hpes32h8", },
+ { .compatible = "idt,89hpes32h8g2", },
+ { .compatible = "idt,89hpes48h12", },
+ { .compatible = "idt,89hpes48h12g2", },
+ { .compatible = "idt,89hpes48h12ag2", },
+ { .compatible = "idt,89hpes16h16", },
+ { .compatible = "idt,89hpes22h16", },
+ { .compatible = "idt,89hpes22h16g2", },
+ { .compatible = "idt,89hpes34h16", },
+ { .compatible = "idt,89hpes34h16g2", },
+ { .compatible = "idt,89hpes64h16", },
+ { .compatible = "idt,89hpes64h16g2", },
+ { .compatible = "idt,89hpes64h16ag2", },
+
+ { .compatible = "idt,89hpes12t3g2", },
+ { .compatible = "idt,89hpes24t3g2", },
+
+ { .compatible = "idt,89hpes16t4", },
+ { .compatible = "idt,89hpes4t4g2", },
+ { .compatible = "idt,89hpes10t4g2", },
+ { .compatible = "idt,89hpes16t4g2", },
+ { .compatible = "idt,89hpes16t4ag2", },
+ { .compatible = "idt,89hpes5t5", },
+ { .compatible = "idt,89hpes6t5", },
+ { .compatible = "idt,89hpes8t5", },
+ { .compatible = "idt,89hpes8t5a", },
+ { .compatible = "idt,89hpes24t6", },
+ { .compatible = "idt,89hpes6t6g2", },
+ { .compatible = "idt,89hpes24t6g2", },
+ { .compatible = "idt,89hpes16t7", },
+ { .compatible = "idt,89hpes32t8", },
+ { .compatible = "idt,89hpes32t8g2", },
+ { .compatible = "idt,89hpes48t12", },
+ { .compatible = "idt,89hpes48t12g2", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, idt_of_match);
+
/*
* idt_driver - IDT 89HPESx driver structure
*/
static struct i2c_driver idt_driver = {
.driver = {
.name = IDT_NAME,
+ .of_match_table = idt_of_match,
},
.probe = idt_probe,
.remove = idt_remove,
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 65fed7146e9b..d3fe3ea902d4 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
for (i = 0; i < components; i++) {
edev->component[i].number = -1;
edev->component[i].slot = -1;
- edev->component[i].power_status = 1;
+ edev->component[i].power_status = -1;
}
mutex_lock(&container_list_lock);
@@ -594,6 +594,11 @@ static ssize_t get_component_power_status(struct device *cdev,
if (edev->cb->get_power_status)
edev->cb->get_power_status(edev, ecomp);
+
+ /* If still uninitialized, the callback failed or does not exist. */
+ if (ecomp->power_status == -1)
+ return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
+
return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
}
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index 67d27be60405..3b4976396ec4 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -27,6 +27,7 @@ void lkdtm_REFCOUNT_ZERO_SUB(void);
void lkdtm_REFCOUNT_ZERO_ADD(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
+void lkdtm_CORRUPT_USER_DS(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index e3f4cd8876b5..d9028ef50fbe 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -8,6 +8,8 @@
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
struct lkdtm_list {
struct list_head node;
@@ -67,7 +69,7 @@ void lkdtm_WARNING(void)
void lkdtm_EXCEPTION(void)
{
- *((int *) 0) = 0;
+ *((volatile int *) 0) = 0;
}
void lkdtm_LOOP(void)
@@ -279,3 +281,12 @@ void lkdtm_CORRUPT_LIST_DEL(void)
else
pr_err("list_del() corruption not detected!\n");
}
+
+void lkdtm_CORRUPT_USER_DS(void)
+{
+ pr_info("setting bad task size limit\n");
+ set_fs(KERNEL_DS);
+
+ /* Make sure we do not keep running with a KERNEL_DS! */
+ force_sig(SIGKILL, current);
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index b9a4cd4a9b68..42d2b8e31e6b 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -199,6 +199,7 @@ struct crashtype crashtypes[] = {
CRASHTYPE(OVERFLOW),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(CORRUPT_USER_DS),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 59e6b0aede34..12cceb011a23 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -8,7 +8,6 @@ mei-objs += hbm.o
mei-objs += interrupt.o
mei-objs += client.o
mei-objs += main.o
-mei-objs += amthif.o
mei-objs += bus.o
mei-objs += bus-fixup.o
mei-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
deleted file mode 100644
index 0e7406ccb6dd..000000000000
--- a/drivers/misc/mei/amthif.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-
-#include <linux/mei.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-const uuid_le mei_amthif_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
- 0xac, 0xa8, 0x46, 0xe0,
- 0xff, 0x65, 0x81, 0x4c);
-
-/**
- * mei_amthif_reset_params - initializes mei device iamthif
- *
- * @dev: the device structure
- */
-void mei_amthif_reset_params(struct mei_device *dev)
-{
- /* reset iamthif parameters. */
- dev->iamthif_canceled = false;
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_stall_timer = 0;
- dev->iamthif_open_count = 0;
-}
-
-/**
- * mei_amthif_host_init - mei initialization amthif client.
- *
- * @dev: the device structure
- * @me_cl: me client
- *
- * Return: 0 on success, <0 on failure.
- */
-int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
- struct mei_cl *cl = &dev->iamthif_cl;
- int ret;
-
- mutex_lock(&dev->device_lock);
-
- if (mei_cl_is_connected(cl)) {
- ret = 0;
- goto out;
- }
-
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
- mei_cl_init(cl, dev);
-
- ret = mei_cl_link(cl);
- if (ret < 0) {
- dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
- goto out;
- }
-
- ret = mei_cl_connect(cl, me_cl, NULL);
-
-out:
- mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_amthif_read_start - queue message for sending read credential
- *
- * @cl: host client
- * @fp: file pointer of message recipient
- *
- * Return: 0 on success, <0 on failure.
- */
-static int mei_amthif_read_start(struct mei_cl *cl, const struct file *fp)
-{
- struct mei_device *dev = cl->dev;
- struct mei_cl_cb *cb;
-
- cb = mei_cl_enqueue_ctrl_wr_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, fp);
- if (!cb)
- return -ENOMEM;
-
- cl->rx_flow_ctrl_creds++;
-
- dev->iamthif_state = MEI_IAMTHIF_READING;
- cl->fp = cb->fp;
-
- return 0;
-}
-
-/**
- * mei_amthif_run_next_cmd - send next amt command from queue
- *
- * @dev: the device structure
- *
- * Return: 0 on success, <0 on failure.
- */
-int mei_amthif_run_next_cmd(struct mei_device *dev)
-{
- struct mei_cl *cl = &dev->iamthif_cl;
- struct mei_cl_cb *cb;
- int ret;
-
- dev->iamthif_canceled = false;
-
- dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
-
- cb = list_first_entry_or_null(&dev->amthif_cmd_list, typeof(*cb), list);
- if (!cb) {
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- cl->fp = NULL;
- return 0;
- }
-
- list_del_init(&cb->list);
- dev->iamthif_state = MEI_IAMTHIF_WRITING;
- cl->fp = cb->fp;
-
- ret = mei_cl_write(cl, cb);
- if (ret < 0)
- return ret;
-
- if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->fp);
-
- return 0;
-}
-
-/**
- * mei_amthif_write - write amthif data to amthif client
- *
- * @cl: host client
- * @cb: mei call back struct
- *
- * Return: 0 on success, <0 on failure.
- */
-int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
-{
-
- struct mei_device *dev = cl->dev;
-
- list_add_tail(&cb->list, &dev->amthif_cmd_list);
-
- /*
- * The previous request is still in processing, queue this one.
- */
- if (dev->iamthif_state != MEI_IAMTHIF_IDLE)
- return 0;
-
- return mei_amthif_run_next_cmd(dev);
-}
-
-/**
- * mei_amthif_poll - the amthif poll function
- *
- * @file: pointer to file structure
- * @wait: pointer to poll_table structure
- *
- * Return: poll mask
- *
- * Locking: called under "dev->device_lock" lock
- */
-unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
-{
- struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *cb = mei_cl_read_cb(cl, file);
- unsigned int mask = 0;
-
- poll_wait(file, &cl->rx_wait, wait);
- if (cb)
- mask |= POLLIN | POLLRDNORM;
-
- return mask;
-}
-
-/**
- * mei_amthif_irq_write - write iamthif command in irq thread context.
- *
- * @cl: private data of the file object.
- * @cb: callback block.
- * @cmpl_list: complete list.
- *
- * Return: 0, OK; otherwise, error.
- */
-int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct list_head *cmpl_list)
-{
- int ret;
-
- ret = mei_cl_irq_write(cl, cb, cmpl_list);
- if (ret)
- return ret;
-
- if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->fp);
-
- return 0;
-}
-
-/**
- * mei_amthif_irq_read_msg - read routine after ISR to
- * handle the read amthif message
- *
- * @cl: mei client
- * @mei_hdr: header of amthif message
- * @cmpl_list: completed callbacks list
- *
- * Return: -ENODEV if cb is NULL 0 otherwise; error message is in cb->status
- */
-int mei_amthif_irq_read_msg(struct mei_cl *cl,
- struct mei_msg_hdr *mei_hdr,
- struct list_head *cmpl_list)
-{
- struct mei_device *dev;
- int ret;
-
- dev = cl->dev;
-
- if (dev->iamthif_state != MEI_IAMTHIF_READING) {
- mei_irq_discard_msg(dev, mei_hdr);
- return 0;
- }
-
- ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
- if (ret)
- return ret;
-
- if (!mei_hdr->msg_complete)
- return 0;
-
- dev_dbg(dev->dev, "completed amthif read.\n ");
- dev->iamthif_stall_timer = 0;
-
- return 0;
-}
-
-/**
- * mei_amthif_complete - complete amthif callback.
- *
- * @cl: host client
- * @cb: callback block.
- */
-void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
-{
- struct mei_device *dev = cl->dev;
-
- dev_dbg(dev->dev, "completing amthif call back.\n");
- switch (cb->fop_type) {
- case MEI_FOP_WRITE:
- if (!cb->status) {
- dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
- mei_schedule_stall_timer(dev);
- mei_io_cb_free(cb);
- return;
- }
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
- cl->fp = NULL;
- if (!dev->iamthif_canceled) {
- /*
- * in case of error enqueue the write cb to complete
- * read list so it can be propagated to the reader
- */
- list_add_tail(&cb->list, &cl->rd_completed);
- wake_up_interruptible(&cl->rx_wait);
- } else {
- mei_io_cb_free(cb);
- }
- break;
- case MEI_FOP_READ:
- if (!dev->iamthif_canceled) {
- list_add_tail(&cb->list, &cl->rd_completed);
- dev_dbg(dev->dev, "amthif read completed\n");
- wake_up_interruptible(&cl->rx_wait);
- } else {
- mei_io_cb_free(cb);
- }
-
- dev->iamthif_stall_timer = 0;
- mei_amthif_run_next_cmd(dev);
- break;
- default:
- WARN_ON(1);
- }
-}
-
-/**
-* mei_amthif_release - the release function
-*
-* @dev: device structure
-* @fp: pointer to file structure
-*
-* Return: 0 on success, <0 on error
-*/
-int mei_amthif_release(struct mei_device *dev, struct file *fp)
-{
- struct mei_cl *cl = fp->private_data;
-
- if (dev->iamthif_open_count > 0)
- dev->iamthif_open_count--;
-
- if (cl->fp == fp && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
-
- dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
- dev->iamthif_state);
- dev->iamthif_canceled = true;
- }
-
- /* Don't clean ctrl_rd_list here, the reads has to be completed */
- mei_io_list_free_fp(&dev->amthif_cmd_list, fp);
- mei_io_list_free_fp(&cl->rd_completed, fp);
-
- return 0;
-}
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 29f2daed37e0..0208c4b027c5 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -110,12 +110,13 @@ struct mkhi_msg {
u8 data[0];
} __packed;
+#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
+ sizeof(struct mkhi_fwcaps) + \
+ sizeof(struct mei_os_ver))
static int mei_osver(struct mei_cl_device *cldev)
{
- const size_t size = sizeof(struct mkhi_msg_hdr) +
- sizeof(struct mkhi_fwcaps) +
- sizeof(struct mei_os_ver);
- char buf[size];
+ const size_t size = MKHI_OSVER_BUF_LEN;
+ char buf[MKHI_OSVER_BUF_LEN];
struct mkhi_msg *req;
struct mkhi_fwcaps *fwcaps;
struct mei_os_ver *os_ver;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index df5f78ae3d25..d1928fdd0f43 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1076,12 +1076,6 @@ void mei_cl_bus_rescan_work(struct work_struct *work)
{
struct mei_device *bus =
container_of(work, struct mei_device, bus_rescan_work);
- struct mei_me_client *me_cl;
-
- me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
- if (me_cl)
- mei_amthif_host_init(bus, me_cl);
- mei_me_cl_put(me_cl);
mei_cl_bus_rescan(bus);
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index d3e3372424d6..be64969d986a 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -428,7 +428,7 @@ static inline void mei_io_list_free_cl(struct list_head *head,
* @head: io list
* @fp: file pointer (matching cb file object), may be NULL
*/
-void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
+static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
{
struct mei_cl_cb *cb, *next;
@@ -554,7 +554,7 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
* @cl: host client to be initialized
* @dev: mei device
*/
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
memset(cl, 0, sizeof(struct mei_cl));
init_waitqueue_head(&cl->wait);
@@ -600,7 +600,6 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
int mei_cl_link(struct mei_cl *cl)
{
struct mei_device *dev;
- long open_handle_count;
int id;
if (WARN_ON(!cl || !cl->dev))
@@ -614,8 +613,7 @@ int mei_cl_link(struct mei_cl *cl)
return -EMFILE;
}
- open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
- if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
+ if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
dev_err(dev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
@@ -649,8 +647,7 @@ int mei_cl_unlink(struct mei_cl *cl)
if (!cl)
return 0;
- /* amthif might not be initialized */
- if (!cl->dev)
+ if (WARN_ON(!cl->dev))
return 0;
dev = cl->dev;
@@ -763,7 +760,6 @@ static void mei_cl_set_disconnected(struct mei_cl *cl)
mei_io_list_free_cl(&dev->write_waiting_list, cl);
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
- mei_io_list_free_cl(&dev->amthif_cmd_list, cl);
mei_cl_wake_all(cl);
cl->rx_flow_ctrl_creds = 0;
cl->tx_flow_ctrl_creds = 0;
@@ -1478,7 +1474,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
return -ENOTTY;
}
- if (mei_cl_is_fixed_address(cl) || cl == &dev->iamthif_cl)
+ if (mei_cl_is_fixed_address(cl))
return 0;
/* HW currently supports only one pending read */
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 545ae319ba90..5371df4d8af3 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -83,15 +83,12 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
* MEI IO Functions
*/
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-void mei_io_list_free_fp(struct list_head *head, const struct file *fp);
/*
* MEI Host Client Functions
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-
int mei_cl_link(struct mei_cl *cl);
int mei_cl_unlink(struct mei_cl *cl);
@@ -205,8 +202,6 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
-int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
- struct list_head *cmpl_list);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index ba3a774c8d71..fe6595fe94f1 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -166,9 +166,8 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
*
* Return: 0 on success, <0 on failure.
*/
-static inline
-int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
- u8 hbm_cmd, u8 *buf, size_t len)
+static inline int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
+ u8 hbm_cmd, void *buf, size_t len)
{
struct mei_msg_hdr mei_hdr;
@@ -632,11 +631,11 @@ static int mei_hbm_stop_req(struct mei_device *dev)
*/
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
- const size_t len = sizeof(struct hbm_flow_control);
- u8 buf[len];
+ struct hbm_flow_control req;
cl_dbg(dev, cl, "sending flow control\n");
- return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len);
+ return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD,
+ &req, sizeof(req));
}
/**
@@ -710,10 +709,10 @@ static void mei_hbm_cl_tx_flow_ctrl_creds_res(struct mei_device *dev,
*/
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
- const size_t len = sizeof(struct hbm_client_connect_request);
- u8 buf[len];
+ struct hbm_client_connect_request req;
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD,
+ &req, sizeof(req));
}
/**
@@ -726,10 +725,10 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
*/
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
{
- const size_t len = sizeof(struct hbm_client_connect_response);
- u8 buf[len];
+ struct hbm_client_connect_response resp;
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD,
+ &resp, sizeof(resp));
}
/**
@@ -763,10 +762,10 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
*/
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
- const size_t len = sizeof(struct hbm_client_connect_request);
- u8 buf[len];
+ struct hbm_client_connect_request req;
- return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD,
+ &req, sizeof(req));
}
/**
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 13c55b8f9261..c8ad9ee7cb80 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -146,18 +146,9 @@ int mei_reset(struct mei_device *dev)
/* fall through and remove the sw state even if hw reset has failed */
/* no need to clean up software state in case of power up */
- if (state != MEI_DEV_INITIALIZING &&
- state != MEI_DEV_POWER_UP) {
-
- /* remove all waiting requests */
+ if (state != MEI_DEV_INITIALIZING && state != MEI_DEV_POWER_UP)
mei_cl_all_disconnect(dev);
- /* remove entry if already in list */
- dev_dbg(dev->dev, "remove iamthif from the file list.\n");
- mei_cl_unlink(&dev->iamthif_cl);
- mei_amthif_reset_params(dev);
- }
-
mei_hbm_reset(dev);
dev->rd_msg_hdr = 0;
@@ -401,9 +392,6 @@ void mei_device_init(struct mei_device *dev,
INIT_WORK(&dev->reset_work, mei_reset_work);
INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
- INIT_LIST_HEAD(&dev->iamthif_cl.link);
- INIT_LIST_HEAD(&dev->amthif_cmd_list);
-
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 406e9e2b2fff..c14e35201721 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -47,10 +47,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
list_del_init(&cb->list);
dev_dbg(dev->dev, "completing call back.\n");
- if (cl == &dev->iamthif_cl)
- mei_amthif_complete(cl, cb);
- else
- mei_cl_complete(cl, cb);
+ mei_cl_complete(cl, cb);
}
}
EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
@@ -76,7 +73,7 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl,
* @dev: mei device
* @hdr: message header
*/
-void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
+static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
{
/*
* no need to check for size as it is guarantied
@@ -96,9 +93,9 @@ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
*
* Return: always 0
*/
-int mei_cl_irq_read_msg(struct mei_cl *cl,
- struct mei_msg_hdr *mei_hdr,
- struct list_head *cmpl_list)
+static int mei_cl_irq_read_msg(struct mei_cl *cl,
+ struct mei_msg_hdr *mei_hdr,
+ struct list_head *cmpl_list)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
@@ -313,11 +310,7 @@ int mei_irq_read_handler(struct mei_device *dev,
goto end;
}
- if (cl == &dev->iamthif_cl) {
- ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list);
- } else {
- ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
- }
+ ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
reset_slots:
@@ -423,10 +416,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
dev_dbg(dev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
cl = cb->cl;
- if (cl == &dev->iamthif_cl)
- ret = mei_amthif_irq_write(cl, cb, cmpl_list);
- else
- ret = mei_cl_irq_write(cl, cb, cmpl_list);
+ ret = mei_cl_irq_write(cl, cb, cmpl_list);
if (ret)
return ret;
}
@@ -512,20 +502,6 @@ void mei_timer(struct work_struct *work)
}
}
- if (!mei_cl_is_connected(&dev->iamthif_cl))
- goto out;
-
- if (dev->iamthif_stall_timer) {
- if (--dev->iamthif_stall_timer == 0) {
- dev_err(dev->dev, "timer: amthif hanged.\n");
- mei_reset(dev);
-
- mei_amthif_run_next_cmd(dev);
- goto out;
- }
- reschedule_timer = true;
- }
-
out:
if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
mei_schedule_stall_timer(dev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index bf816449cd40..e825f013e54e 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -103,10 +103,7 @@ static int mei_release(struct inode *inode, struct file *file)
dev = cl->dev;
mutex_lock(&dev->device_lock);
- if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_release(dev, file);
- goto out;
- }
+
rets = mei_cl_disconnect(cl);
mei_cl_flush_queues(cl, file);
@@ -117,7 +114,7 @@ static int mei_release(struct inode *inode, struct file *file)
file->private_data = NULL;
kfree(cl);
-out:
+
mutex_unlock(&dev->device_lock);
return rets;
}
@@ -182,8 +179,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
-
-again:
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->rx_wait,
!list_empty(&cl->rd_completed) ||
@@ -201,14 +196,6 @@ again:
cb = mei_cl_read_cb(cl, file);
if (!cb) {
- /*
- * For amthif all the waiters are woken up,
- * but only fp with matching cb->fp get the cb,
- * the others have to return to wait on read.
- */
- if (cl == &dev->iamthif_cl)
- goto again;
-
rets = 0;
goto out;
}
@@ -319,13 +306,6 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_write(cl, cb);
- if (!rets)
- rets = length;
- goto out;
- }
-
rets = mei_cl_write(cl, cb);
out:
mutex_unlock(&dev->device_lock);
@@ -388,30 +368,6 @@ static int mei_ioctl_connect_client(struct file *file,
dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
me_cl->props.max_msg_length);
- /* if we're connecting to amthif client then we will use the
- * existing connection
- */
- if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
- dev_dbg(dev->dev, "FW Client is amthi\n");
- if (!mei_cl_is_connected(&dev->iamthif_cl)) {
- rets = -ENODEV;
- goto end;
- }
- mei_cl_unlink(cl);
-
- kfree(cl);
- cl = NULL;
- dev->iamthif_open_count++;
- file->private_data = &dev->iamthif_cl;
-
- client = &data->out_client_properties;
- client->max_msg_length = me_cl->props.max_msg_length;
- client->protocol_version = me_cl->props.protocol_version;
- rets = dev->iamthif_cl.status;
-
- goto end;
- }
-
/* prepare the output buffer */
client = &data->out_client_properties;
client->max_msg_length = me_cl->props.max_msg_length;
@@ -615,11 +571,6 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
mask |= POLLPRI;
}
- if (cl == &dev->iamthif_cl) {
- mask |= mei_amthif_poll(file, wait);
- goto out;
- }
-
if (req_events & (POLLIN | POLLRDNORM)) {
poll_wait(file, &cl->rx_wait, wait);
@@ -635,6 +586,77 @@ out:
}
/**
+ * mei_cl_is_write_queued - check if the client has pending writes.
+ *
+ * @cl: writing host client
+ *
+ * Return: true if client is writing, false otherwise.
+ */
+static bool mei_cl_is_write_queued(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb;
+
+ list_for_each_entry(cb, &dev->write_list, list)
+ if (cb->cl == cl)
+ return true;
+ list_for_each_entry(cb, &dev->write_waiting_list, list)
+ if (cb->cl == cl)
+ return true;
+ return false;
+}
+
+/**
+ * mei_fsync - the fsync handler
+ *
+ * @fp: pointer to file structure
+ * @start: unused
+ * @end: unused
+ * @datasync: unused
+ *
+ * Return: 0 on success, -ENODEV if client is not connected
+ */
+static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
+{
+ struct mei_cl *cl = fp->private_data;
+ struct mei_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ mutex_lock(&dev->device_lock);
+
+ if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+
+ while (mei_cl_is_write_queued(cl)) {
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_interruptible(cl->tx_wait,
+ cl->writing_state == MEI_WRITE_COMPLETE ||
+ !mei_cl_is_connected(cl));
+ mutex_lock(&dev->device_lock);
+ if (rets) {
+ if (signal_pending(current))
+ rets = -EINTR;
+ goto out;
+ }
+ if (!mei_cl_is_connected(cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+ }
+ rets = 0;
+out:
+ mutex_unlock(&dev->device_lock);
+ return rets;
+}
+
+/**
* mei_fasync - asynchronous io support
*
* @fd: file descriptor
@@ -749,6 +771,7 @@ static const struct file_operations mei_fops = {
.release = mei_release,
.write = mei_write,
.poll = mei_poll,
+ .fsync = mei_fsync,
.fasync = mei_fasync,
.llseek = no_llseek
};
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index d41aac53a2ac..63a67c99fc78 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -26,12 +26,6 @@
#include "hw.h"
#include "hbm.h"
-
-/*
- * AMTHI Client UUID
- */
-extern const uuid_le mei_amthif_guid;
-
#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
/*
@@ -78,12 +72,6 @@ enum mei_dev_state {
const char *mei_dev_state_str(int state);
-enum iamthif_states {
- MEI_IAMTHIF_IDLE,
- MEI_IAMTHIF_WRITING,
- MEI_IAMTHIF_READING,
-};
-
enum mei_file_transaction_states {
MEI_IDLE,
MEI_WRITING,
@@ -418,13 +406,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @allow_fixed_address: allow user space to connect a fixed client
* @override_fixed_address: force allow fixed address behavior
*
- * @amthif_cmd_list : amthif list for cmd waiting
- * @iamthif_cl : amthif host client
- * @iamthif_open_count : number of opened amthif connections
- * @iamthif_stall_timer : timer to detect amthif hang
- * @iamthif_state : amthif processor state
- * @iamthif_canceled : current amthif command is canceled
- *
* @reset_work : work item for the device reset
* @bus_rescan_work : work item for the bus rescan
*
@@ -500,14 +481,6 @@ struct mei_device {
bool allow_fixed_address;
bool override_fixed_address;
- /* amthif list for cmd waiting */
- struct list_head amthif_cmd_list;
- struct mei_cl iamthif_cl;
- long iamthif_open_count;
- u32 iamthif_stall_timer;
- enum iamthif_states iamthif_state;
- bool iamthif_canceled;
-
struct work_struct reset_work;
struct work_struct bus_rescan_work;
@@ -579,28 +552,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list);
void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
/*
- * AMTHIF - AMT Host Interface Functions
- */
-void mei_amthif_reset_params(struct mei_device *dev);
-
-int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-
-unsigned int mei_amthif_poll(struct file *file, poll_table *wait);
-
-int mei_amthif_release(struct mei_device *dev, struct file *file);
-
-int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
-int mei_amthif_run_next_cmd(struct mei_device *dev);
-int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
- struct list_head *cmpl_list);
-
-void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
-int mei_amthif_irq_read_msg(struct mei_cl *cl,
- struct mei_msg_hdr *mei_hdr,
- struct list_head *cmpl_list);
-int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
-
-/*
* Register Access Function
*/
@@ -711,8 +662,6 @@ bool mei_hbuf_acquire(struct mei_device *dev);
bool mei_write_is_idle(struct mei_device *dev);
-void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr);
-
#if IS_ENABLED(CONFIG_DEBUG_FS)
int mei_dbgfs_register(struct mei_device *dev, const char *name);
void mei_dbgfs_deregister(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 0a668fdfbbe9..8621a198a2ce 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -242,11 +242,38 @@ end:
}
/**
+ * mei_me_shutdown - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_me_shutdown is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void mei_me_shutdown(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ dev_dbg(&pdev->dev, "shutdown\n");
+ mei_stop(dev);
+
+ if (!pci_dev_run_wake(pdev))
+ mei_me_unset_pm_domain(dev);
+
+ mei_disable_interrupts(dev);
+ free_irq(pdev->irq, dev);
+}
+
+/**
* mei_me_remove - Device Removal Routine
*
* @pdev: PCI device structure
*
- * mei_remove is called by the PCI subsystem to alert the driver
+ * mei_me_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device.
*/
static void mei_me_remove(struct pci_dev *pdev)
@@ -456,7 +483,7 @@ static struct pci_driver mei_me_driver = {
.id_table = mei_me_pci_tbl,
.probe = mei_me_probe,
.remove = mei_me_remove,
- .shutdown = mei_me_remove,
+ .shutdown = mei_me_shutdown,
.driver.pm = MEI_ME_PM_OPS,
};
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index fe088b40daf9..f811cd524468 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -161,6 +161,33 @@ end:
}
/**
+ * mei_txe_remove - Device Shutdown Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_txe_shutdown is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void mei_txe_shutdown(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ dev_dbg(&pdev->dev, "shutdown\n");
+ mei_stop(dev);
+
+ if (!pci_dev_run_wake(pdev))
+ mei_txe_unset_pm_domain(dev);
+
+ mei_disable_interrupts(dev);
+ free_irq(pdev->irq, dev);
+}
+
+/**
* mei_txe_remove - Device Removal Routine
*
* @pdev: PCI device structure
@@ -386,7 +413,7 @@ static struct pci_driver mei_txe_driver = {
.id_table = mei_txe_pci_tbl,
.probe = mei_txe_probe,
.remove = mei_txe_remove,
- .shutdown = mei_txe_remove,
+ .shutdown = mei_txe_shutdown,
.driver.pm = MEI_TXE_PM_OPS,
};
diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
deleted file mode 100644
index ef2ece0f26af..000000000000
--- a/drivers/misc/panel.c
+++ /dev/null
@@ -1,2439 +0,0 @@
-/*
- * Front panel driver for Linux
- * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad)
- * connected to a parallel printer port.
- *
- * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit
- * serial module compatible with Samsung's KS0074. The pins may be connected in
- * any combination, everything is programmable.
- *
- * The keypad consists in a matrix of push buttons connecting input pins to
- * data output pins or to the ground. The combinations have to be hard-coded
- * in the driver, though several profiles exist and adding new ones is easy.
- *
- * Several profiles are provided for commonly found LCD+keypad modules on the
- * market, such as those found in Nexcom's appliances.
- *
- * FIXME:
- * - the initialization/deinitialization process is very dirty and should
- * be rewritten. It may even be buggy.
- *
- * TODO:
- * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs)
- * - make the LCD a part of a virtual screen of Vx*Vy
- * - make the inputs list smp-safe
- * - change the keyboard to a double mapping : signals -> key_id -> values
- * so that applications can change values without knowing signals
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/miscdevice.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/parport.h>
-#include <linux/list.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/workqueue.h>
-#include <generated/utsrelease.h>
-
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#define LCD_MINOR 156
-#define KEYPAD_MINOR 185
-
-#define LCD_MAXBYTES 256 /* max burst write */
-
-#define KEYPAD_BUFFER 64
-
-/* poll the keyboard this every second */
-#define INPUT_POLL_TIME (HZ / 50)
-/* a key starts to repeat after this times INPUT_POLL_TIME */
-#define KEYPAD_REP_START (10)
-/* a key repeats this times INPUT_POLL_TIME */
-#define KEYPAD_REP_DELAY (2)
-
-/* keep the light on this many seconds for each flash */
-#define FLASH_LIGHT_TEMPO (4)
-
-/* converts an r_str() input to an active high, bits string : 000BAOSE */
-#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
-
-#define PNL_PBUSY 0x80 /* inverted input, active low */
-#define PNL_PACK 0x40 /* direct input, active low */
-#define PNL_POUTPA 0x20 /* direct input, active high */
-#define PNL_PSELECD 0x10 /* direct input, active high */
-#define PNL_PERRORP 0x08 /* direct input, active low */
-
-#define PNL_PBIDIR 0x20 /* bi-directional ports */
-/* high to read data in or-ed with data out */
-#define PNL_PINTEN 0x10
-#define PNL_PSELECP 0x08 /* inverted output, active low */
-#define PNL_PINITP 0x04 /* direct output, active low */
-#define PNL_PAUTOLF 0x02 /* inverted output, active low */
-#define PNL_PSTROBE 0x01 /* inverted output */
-
-#define PNL_PD0 0x01
-#define PNL_PD1 0x02
-#define PNL_PD2 0x04
-#define PNL_PD3 0x08
-#define PNL_PD4 0x10
-#define PNL_PD5 0x20
-#define PNL_PD6 0x40
-#define PNL_PD7 0x80
-
-#define PIN_NONE 0
-#define PIN_STROBE 1
-#define PIN_D0 2
-#define PIN_D1 3
-#define PIN_D2 4
-#define PIN_D3 5
-#define PIN_D4 6
-#define PIN_D5 7
-#define PIN_D6 8
-#define PIN_D7 9
-#define PIN_AUTOLF 14
-#define PIN_INITP 16
-#define PIN_SELECP 17
-#define PIN_NOT_SET 127
-
-#define LCD_FLAG_B 0x0004 /* blink on */
-#define LCD_FLAG_C 0x0008 /* cursor on */
-#define LCD_FLAG_D 0x0010 /* display on */
-#define LCD_FLAG_F 0x0020 /* large font mode */
-#define LCD_FLAG_N 0x0040 /* 2-rows mode */
-#define LCD_FLAG_L 0x0080 /* backlight enabled */
-
-/* LCD commands */
-#define LCD_CMD_DISPLAY_CLEAR 0x01 /* Clear entire display */
-
-#define LCD_CMD_ENTRY_MODE 0x04 /* Set entry mode */
-#define LCD_CMD_CURSOR_INC 0x02 /* Increment cursor */
-
-#define LCD_CMD_DISPLAY_CTRL 0x08 /* Display control */
-#define LCD_CMD_DISPLAY_ON 0x04 /* Set display on */
-#define LCD_CMD_CURSOR_ON 0x02 /* Set cursor on */
-#define LCD_CMD_BLINK_ON 0x01 /* Set blink on */
-
-#define LCD_CMD_SHIFT 0x10 /* Shift cursor/display */
-#define LCD_CMD_DISPLAY_SHIFT 0x08 /* Shift display instead of cursor */
-#define LCD_CMD_SHIFT_RIGHT 0x04 /* Shift display/cursor to the right */
-
-#define LCD_CMD_FUNCTION_SET 0x20 /* Set function */
-#define LCD_CMD_DATA_LEN_8BITS 0x10 /* Set data length to 8 bits */
-#define LCD_CMD_TWO_LINES 0x08 /* Set to two display lines */
-#define LCD_CMD_FONT_5X10_DOTS 0x04 /* Set char font to 5x10 dots */
-
-#define LCD_CMD_SET_CGRAM_ADDR 0x40 /* Set char generator RAM address */
-
-#define LCD_CMD_SET_DDRAM_ADDR 0x80 /* Set display data RAM address */
-
-#define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */
-#define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */
-
-#define NOT_SET -1
-
-/* macros to simplify use of the parallel port */
-#define r_ctr(x) (parport_read_control((x)->port))
-#define r_dtr(x) (parport_read_data((x)->port))
-#define r_str(x) (parport_read_status((x)->port))
-#define w_ctr(x, y) (parport_write_control((x)->port, (y)))
-#define w_dtr(x, y) (parport_write_data((x)->port, (y)))
-
-/* this defines which bits are to be used and which ones to be ignored */
-/* logical or of the output bits involved in the scan matrix */
-static __u8 scan_mask_o;
-/* logical or of the input bits involved in the scan matrix */
-static __u8 scan_mask_i;
-
-enum input_type {
- INPUT_TYPE_STD,
- INPUT_TYPE_KBD,
-};
-
-enum input_state {
- INPUT_ST_LOW,
- INPUT_ST_RISING,
- INPUT_ST_HIGH,
- INPUT_ST_FALLING,
-};
-
-struct logical_input {
- struct list_head list;
- __u64 mask;
- __u64 value;
- enum input_type type;
- enum input_state state;
- __u8 rise_time, fall_time;
- __u8 rise_timer, fall_timer, high_timer;
-
- union {
- struct { /* valid when type == INPUT_TYPE_STD */
- void (*press_fct)(int);
- void (*release_fct)(int);
- int press_data;
- int release_data;
- } std;
- struct { /* valid when type == INPUT_TYPE_KBD */
- /* strings can be non null-terminated */
- char press_str[sizeof(void *) + sizeof(int)];
- char repeat_str[sizeof(void *) + sizeof(int)];
- char release_str[sizeof(void *) + sizeof(int)];
- } kbd;
- } u;
-};
-
-static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
-
-/* physical contacts history
- * Physical contacts are a 45 bits string of 9 groups of 5 bits each.
- * The 8 lower groups correspond to output bits 0 to 7, and the 9th group
- * corresponds to the ground.
- * Within each group, bits are stored in the same order as read on the port :
- * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
- * So, each __u64 is represented like this :
- * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
- * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
- */
-
-/* what has just been read from the I/O ports */
-static __u64 phys_read;
-/* previous phys_read */
-static __u64 phys_read_prev;
-/* stabilized phys_read (phys_read|phys_read_prev) */
-static __u64 phys_curr;
-/* previous phys_curr */
-static __u64 phys_prev;
-/* 0 means that at least one logical signal needs be computed */
-static char inputs_stable;
-
-/* these variables are specific to the keypad */
-static struct {
- bool enabled;
-} keypad;
-
-static char keypad_buffer[KEYPAD_BUFFER];
-static int keypad_buflen;
-static int keypad_start;
-static char keypressed;
-static wait_queue_head_t keypad_read_wait;
-
-/* lcd-specific variables */
-static struct {
- bool enabled;
- bool initialized;
- bool must_clear;
-
- int height;
- int width;
- int bwidth;
- int hwidth;
- int charset;
- int proto;
-
- struct delayed_work bl_work;
- struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
- bool bl_tempo;
-
- /* TODO: use union here? */
- struct {
- int e;
- int rs;
- int rw;
- int cl;
- int da;
- int bl;
- } pins;
-
- /* contains the LCD config state */
- unsigned long int flags;
-
- /* Contains the LCD X and Y offset */
- struct {
- unsigned long int x;
- unsigned long int y;
- } addr;
-
- /* Current escape sequence and it's length or -1 if outside */
- struct {
- char buf[LCD_ESCAPE_LEN + 1];
- int len;
- } esc_seq;
-} lcd;
-
-/* Needed only for init */
-static int selected_lcd_type = NOT_SET;
-
-/*
- * Bit masks to convert LCD signals to parallel port outputs.
- * _d_ are values for data port, _c_ are for control port.
- * [0] = signal OFF, [1] = signal ON, [2] = mask
- */
-#define BIT_CLR 0
-#define BIT_SET 1
-#define BIT_MSK 2
-#define BIT_STATES 3
-/*
- * one entry for each bit on the LCD
- */
-#define LCD_BIT_E 0
-#define LCD_BIT_RS 1
-#define LCD_BIT_RW 2
-#define LCD_BIT_BL 3
-#define LCD_BIT_CL 4
-#define LCD_BIT_DA 5
-#define LCD_BITS 6
-
-/*
- * each bit can be either connected to a DATA or CTRL port
- */
-#define LCD_PORT_C 0
-#define LCD_PORT_D 1
-#define LCD_PORTS 2
-
-static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
-
-/*
- * LCD protocols
- */
-#define LCD_PROTO_PARALLEL 0
-#define LCD_PROTO_SERIAL 1
-#define LCD_PROTO_TI_DA8XX_LCD 2
-
-/*
- * LCD character sets
- */
-#define LCD_CHARSET_NORMAL 0
-#define LCD_CHARSET_KS0074 1
-
-/*
- * LCD types
- */
-#define LCD_TYPE_NONE 0
-#define LCD_TYPE_CUSTOM 1
-#define LCD_TYPE_OLD 2
-#define LCD_TYPE_KS0074 3
-#define LCD_TYPE_HANTRONIX 4
-#define LCD_TYPE_NEXCOM 5
-
-/*
- * keypad types
- */
-#define KEYPAD_TYPE_NONE 0
-#define KEYPAD_TYPE_OLD 1
-#define KEYPAD_TYPE_NEW 2
-#define KEYPAD_TYPE_NEXCOM 3
-
-/*
- * panel profiles
- */
-#define PANEL_PROFILE_CUSTOM 0
-#define PANEL_PROFILE_OLD 1
-#define PANEL_PROFILE_NEW 2
-#define PANEL_PROFILE_HANTRONIX 3
-#define PANEL_PROFILE_NEXCOM 4
-#define PANEL_PROFILE_LARGE 5
-
-/*
- * Construct custom config from the kernel's configuration
- */
-#define DEFAULT_PARPORT 0
-#define DEFAULT_PROFILE PANEL_PROFILE_LARGE
-#define DEFAULT_KEYPAD_TYPE KEYPAD_TYPE_OLD
-#define DEFAULT_LCD_TYPE LCD_TYPE_OLD
-#define DEFAULT_LCD_HEIGHT 2
-#define DEFAULT_LCD_WIDTH 40
-#define DEFAULT_LCD_BWIDTH 40
-#define DEFAULT_LCD_HWIDTH 64
-#define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL
-#define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL
-
-#define DEFAULT_LCD_PIN_E PIN_AUTOLF
-#define DEFAULT_LCD_PIN_RS PIN_SELECP
-#define DEFAULT_LCD_PIN_RW PIN_INITP
-#define DEFAULT_LCD_PIN_SCL PIN_STROBE
-#define DEFAULT_LCD_PIN_SDA PIN_D0
-#define DEFAULT_LCD_PIN_BL PIN_NOT_SET
-
-#ifdef CONFIG_PANEL_PARPORT
-#undef DEFAULT_PARPORT
-#define DEFAULT_PARPORT CONFIG_PANEL_PARPORT
-#endif
-
-#ifdef CONFIG_PANEL_PROFILE
-#undef DEFAULT_PROFILE
-#define DEFAULT_PROFILE CONFIG_PANEL_PROFILE
-#endif
-
-#if DEFAULT_PROFILE == 0 /* custom */
-#ifdef CONFIG_PANEL_KEYPAD
-#undef DEFAULT_KEYPAD_TYPE
-#define DEFAULT_KEYPAD_TYPE CONFIG_PANEL_KEYPAD
-#endif
-
-#ifdef CONFIG_PANEL_LCD
-#undef DEFAULT_LCD_TYPE
-#define DEFAULT_LCD_TYPE CONFIG_PANEL_LCD
-#endif
-
-#ifdef CONFIG_PANEL_LCD_HEIGHT
-#undef DEFAULT_LCD_HEIGHT
-#define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT
-#endif
-
-#ifdef CONFIG_PANEL_LCD_WIDTH
-#undef DEFAULT_LCD_WIDTH
-#define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_BWIDTH
-#undef DEFAULT_LCD_BWIDTH
-#define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_HWIDTH
-#undef DEFAULT_LCD_HWIDTH
-#define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH
-#endif
-
-#ifdef CONFIG_PANEL_LCD_CHARSET
-#undef DEFAULT_LCD_CHARSET
-#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PROTO
-#undef DEFAULT_LCD_PROTO
-#define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_E
-#undef DEFAULT_LCD_PIN_E
-#define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_RS
-#undef DEFAULT_LCD_PIN_RS
-#define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_RW
-#undef DEFAULT_LCD_PIN_RW
-#define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_SCL
-#undef DEFAULT_LCD_PIN_SCL
-#define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_SDA
-#undef DEFAULT_LCD_PIN_SDA
-#define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA
-#endif
-
-#ifdef CONFIG_PANEL_LCD_PIN_BL
-#undef DEFAULT_LCD_PIN_BL
-#define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL
-#endif
-
-#endif /* DEFAULT_PROFILE == 0 */
-
-/* global variables */
-
-/* Device single-open policy control */
-static atomic_t lcd_available = ATOMIC_INIT(1);
-static atomic_t keypad_available = ATOMIC_INIT(1);
-
-static struct pardevice *pprt;
-
-static int keypad_initialized;
-
-static void (*lcd_write_cmd)(int);
-static void (*lcd_write_data)(int);
-static void (*lcd_clear_fast)(void);
-
-static DEFINE_SPINLOCK(pprt_lock);
-static struct timer_list scan_timer;
-
-MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver");
-
-static int parport = DEFAULT_PARPORT;
-module_param(parport, int, 0000);
-MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)");
-
-static int profile = DEFAULT_PROFILE;
-module_param(profile, int, 0000);
-MODULE_PARM_DESC(profile,
- "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; "
- "4=16x2 nexcom; default=40x2, old kp");
-
-static int keypad_type = NOT_SET;
-module_param(keypad_type, int, 0000);
-MODULE_PARM_DESC(keypad_type,
- "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, 3=nexcom 4 keys");
-
-static int lcd_type = NOT_SET;
-module_param(lcd_type, int, 0000);
-MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");
-
-static int lcd_height = NOT_SET;
-module_param(lcd_height, int, 0000);
-MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD");
-
-static int lcd_width = NOT_SET;
-module_param(lcd_width, int, 0000);
-MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD");
-
-static int lcd_bwidth = NOT_SET; /* internal buffer width (usually 40) */
-module_param(lcd_bwidth, int, 0000);
-MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)");
-
-static int lcd_hwidth = NOT_SET; /* hardware buffer width (usually 64) */
-module_param(lcd_hwidth, int, 0000);
-MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)");
-
-static int lcd_charset = NOT_SET;
-module_param(lcd_charset, int, 0000);
-MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074");
-
-static int lcd_proto = NOT_SET;
-module_param(lcd_proto, int, 0000);
-MODULE_PARM_DESC(lcd_proto,
- "LCD communication: 0=parallel (//), 1=serial, 2=TI LCD Interface");
-
-/*
- * These are the parallel port pins the LCD control signals are connected to.
- * Set this to 0 if the signal is not used. Set it to its opposite value
- * (negative) if the signal is negated. -MAXINT is used to indicate that the
- * pin has not been explicitly specified.
- *
- * WARNING! no check will be performed about collisions with keypad !
- */
-
-static int lcd_e_pin = PIN_NOT_SET;
-module_param(lcd_e_pin, int, 0000);
-MODULE_PARM_DESC(lcd_e_pin,
- "# of the // port pin connected to LCD 'E' signal, with polarity (-17..17)");
-
-static int lcd_rs_pin = PIN_NOT_SET;
-module_param(lcd_rs_pin, int, 0000);
-MODULE_PARM_DESC(lcd_rs_pin,
- "# of the // port pin connected to LCD 'RS' signal, with polarity (-17..17)");
-
-static int lcd_rw_pin = PIN_NOT_SET;
-module_param(lcd_rw_pin, int, 0000);
-MODULE_PARM_DESC(lcd_rw_pin,
- "# of the // port pin connected to LCD 'RW' signal, with polarity (-17..17)");
-
-static int lcd_cl_pin = PIN_NOT_SET;
-module_param(lcd_cl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_cl_pin,
- "# of the // port pin connected to serial LCD 'SCL' signal, with polarity (-17..17)");
-
-static int lcd_da_pin = PIN_NOT_SET;
-module_param(lcd_da_pin, int, 0000);
-MODULE_PARM_DESC(lcd_da_pin,
- "# of the // port pin connected to serial LCD 'SDA' signal, with polarity (-17..17)");
-
-static int lcd_bl_pin = PIN_NOT_SET;
-module_param(lcd_bl_pin, int, 0000);
-MODULE_PARM_DESC(lcd_bl_pin,
- "# of the // port pin connected to LCD backlight, with polarity (-17..17)");
-
-/* Deprecated module parameters - consider not using them anymore */
-
-static int lcd_enabled = NOT_SET;
-module_param(lcd_enabled, int, 0000);
-MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead");
-
-static int keypad_enabled = NOT_SET;
-module_param(keypad_enabled, int, 0000);
-MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead");
-
-static const unsigned char *lcd_char_conv;
-
-/* for some LCD drivers (ks0074) we need a charset conversion table. */
-static const unsigned char lcd_char_conv_ks0074[256] = {
- /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */
- /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
- /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
- /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27,
- /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
- /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
- /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
- /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
- /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
- /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
- /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4,
- /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
- /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
- /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20,
- /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
- /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
- /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
- /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
- /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f,
- /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96,
- /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd,
- /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60,
- /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9,
- /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3,
- /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78,
- /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe,
- /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8,
- /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69,
- /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25,
- /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79,
-};
-
-static const char old_keypad_profile[][4][9] = {
- {"S0", "Left\n", "Left\n", ""},
- {"S1", "Down\n", "Down\n", ""},
- {"S2", "Up\n", "Up\n", ""},
- {"S3", "Right\n", "Right\n", ""},
- {"S4", "Esc\n", "Esc\n", ""},
- {"S5", "Ret\n", "Ret\n", ""},
- {"", "", "", ""}
-};
-
-/* signals, press, repeat, release */
-static const char new_keypad_profile[][4][9] = {
- {"S0", "Left\n", "Left\n", ""},
- {"S1", "Down\n", "Down\n", ""},
- {"S2", "Up\n", "Up\n", ""},
- {"S3", "Right\n", "Right\n", ""},
- {"S4s5", "", "Esc\n", "Esc\n"},
- {"s4S5", "", "Ret\n", "Ret\n"},
- {"S4S5", "Help\n", "", ""},
- /* add new signals above this line */
- {"", "", "", ""}
-};
-
-/* signals, press, repeat, release */
-static const char nexcom_keypad_profile[][4][9] = {
- {"a-p-e-", "Down\n", "Down\n", ""},
- {"a-p-E-", "Ret\n", "Ret\n", ""},
- {"a-P-E-", "Esc\n", "Esc\n", ""},
- {"a-P-e-", "Up\n", "Up\n", ""},
- /* add new signals above this line */
- {"", "", "", ""}
-};
-
-static const char (*keypad_profile)[4][9] = old_keypad_profile;
-
-static DECLARE_BITMAP(bits, LCD_BITS);
-
-static void lcd_get_bits(unsigned int port, int *val)
-{
- unsigned int bit, state;
-
- for (bit = 0; bit < LCD_BITS; bit++) {
- state = test_bit(bit, bits) ? BIT_SET : BIT_CLR;
- *val &= lcd_bits[port][bit][BIT_MSK];
- *val |= lcd_bits[port][bit][state];
- }
-}
-
-/* sets data port bits according to current signals values */
-static int set_data_bits(void)
-{
- int val;
-
- val = r_dtr(pprt);
- lcd_get_bits(LCD_PORT_D, &val);
- w_dtr(pprt, val);
- return val;
-}
-
-/* sets ctrl port bits according to current signals values */
-static int set_ctrl_bits(void)
-{
- int val;
-
- val = r_ctr(pprt);
- lcd_get_bits(LCD_PORT_C, &val);
- w_ctr(pprt, val);
- return val;
-}
-
-/* sets ctrl & data port bits according to current signals values */
-static void panel_set_bits(void)
-{
- set_data_bits();
- set_ctrl_bits();
-}
-
-/*
- * Converts a parallel port pin (from -25 to 25) to data and control ports
- * masks, and data and control port bits. The signal will be considered
- * unconnected if it's on pin 0 or an invalid pin (<-25 or >25).
- *
- * Result will be used this way :
- * out(dport, in(dport) & d_val[2] | d_val[signal_state])
- * out(cport, in(cport) & c_val[2] | c_val[signal_state])
- */
-static void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val)
-{
- int d_bit, c_bit, inv;
-
- d_val[0] = 0;
- c_val[0] = 0;
- d_val[1] = 0;
- c_val[1] = 0;
- d_val[2] = 0xFF;
- c_val[2] = 0xFF;
-
- if (pin == 0)
- return;
-
- inv = (pin < 0);
- if (inv)
- pin = -pin;
-
- d_bit = 0;
- c_bit = 0;
-
- switch (pin) {
- case PIN_STROBE: /* strobe, inverted */
- c_bit = PNL_PSTROBE;
- inv = !inv;
- break;
- case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */
- d_bit = 1 << (pin - 2);
- break;
- case PIN_AUTOLF: /* autofeed, inverted */
- c_bit = PNL_PAUTOLF;
- inv = !inv;
- break;
- case PIN_INITP: /* init, direct */
- c_bit = PNL_PINITP;
- break;
- case PIN_SELECP: /* select_in, inverted */
- c_bit = PNL_PSELECP;
- inv = !inv;
- break;
- default: /* unknown pin, ignore */
- break;
- }
-
- if (c_bit) {
- c_val[2] &= ~c_bit;
- c_val[!inv] = c_bit;
- } else if (d_bit) {
- d_val[2] &= ~d_bit;
- d_val[!inv] = d_bit;
- }
-}
-
-/* sleeps that many milliseconds with a reschedule */
-static void long_sleep(int ms)
-{
- if (in_interrupt())
- mdelay(ms);
- else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
-}
-
-/*
- * send a serial byte to the LCD panel. The caller is responsible for locking
- * if needed.
- */
-static void lcd_send_serial(int byte)
-{
- int bit;
-
- /*
- * the data bit is set on D0, and the clock on STROBE.
- * LCD reads D0 on STROBE's rising edge.
- */
- for (bit = 0; bit < 8; bit++) {
- clear_bit(LCD_BIT_CL, bits); /* CLK low */
- panel_set_bits();
- if (byte & 1) {
- set_bit(LCD_BIT_DA, bits);
- } else {
- clear_bit(LCD_BIT_DA, bits);
- }
-
- panel_set_bits();
- udelay(2); /* maintain the data during 2 us before CLK up */
- set_bit(LCD_BIT_CL, bits); /* CLK high */
- panel_set_bits();
- udelay(1); /* maintain the strobe during 1 us */
- byte >>= 1;
- }
-}
-
-/* turn the backlight on or off */
-static void __lcd_backlight(int on)
-{
- /* The backlight is activated by setting the AUTOFEED line to +5V */
- spin_lock_irq(&pprt_lock);
- if (on)
- set_bit(LCD_BIT_BL, bits);
- else
- clear_bit(LCD_BIT_BL, bits);
- panel_set_bits();
- spin_unlock_irq(&pprt_lock);
-}
-
-static void lcd_backlight(int on)
-{
- if (lcd.pins.bl == PIN_NONE)
- return;
-
- mutex_lock(&lcd.bl_tempo_lock);
- if (!lcd.bl_tempo)
- __lcd_backlight(on);
- mutex_unlock(&lcd.bl_tempo_lock);
-}
-
-static void lcd_bl_off(struct work_struct *work)
-{
- mutex_lock(&lcd.bl_tempo_lock);
- if (lcd.bl_tempo) {
- lcd.bl_tempo = false;
- if (!(lcd.flags & LCD_FLAG_L))
- __lcd_backlight(0);
- }
- mutex_unlock(&lcd.bl_tempo_lock);
-}
-
-/* turn the backlight on for a little while */
-static void lcd_poke(void)
-{
- if (lcd.pins.bl == PIN_NONE)
- return;
-
- cancel_delayed_work_sync(&lcd.bl_work);
-
- mutex_lock(&lcd.bl_tempo_lock);
- if (!lcd.bl_tempo && !(lcd.flags & LCD_FLAG_L))
- __lcd_backlight(1);
- lcd.bl_tempo = true;
- schedule_delayed_work(&lcd.bl_work, FLASH_LIGHT_TEMPO * HZ);
- mutex_unlock(&lcd.bl_tempo_lock);
-}
-
-/* send a command to the LCD panel in serial mode */
-static void lcd_write_cmd_s(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- lcd_send_serial(0x1F); /* R/W=W, RS=0 */
- lcd_send_serial(cmd & 0x0F);
- lcd_send_serial((cmd >> 4) & 0x0F);
- udelay(40); /* the shortest command takes at least 40 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the LCD panel in serial mode */
-static void lcd_write_data_s(int data)
-{
- spin_lock_irq(&pprt_lock);
- lcd_send_serial(0x5F); /* R/W=W, RS=1 */
- lcd_send_serial(data & 0x0F);
- lcd_send_serial((data >> 4) & 0x0F);
- udelay(40); /* the shortest data takes at least 40 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send a command to the LCD panel in 8 bits parallel mode */
-static void lcd_write_cmd_p8(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, cmd);
- udelay(20); /* maintain the data during 20 us before the strobe */
-
- set_bit(LCD_BIT_E, bits);
- clear_bit(LCD_BIT_RS, bits);
- clear_bit(LCD_BIT_RW, bits);
- set_ctrl_bits();
-
- udelay(40); /* maintain the strobe during 40 us */
-
- clear_bit(LCD_BIT_E, bits);
- set_ctrl_bits();
-
- udelay(120); /* the shortest command takes at least 120 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the LCD panel in 8 bits parallel mode */
-static void lcd_write_data_p8(int data)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, data);
- udelay(20); /* maintain the data during 20 us before the strobe */
-
- set_bit(LCD_BIT_E, bits);
- set_bit(LCD_BIT_RS, bits);
- clear_bit(LCD_BIT_RW, bits);
- set_ctrl_bits();
-
- udelay(40); /* maintain the strobe during 40 us */
-
- clear_bit(LCD_BIT_E, bits);
- set_ctrl_bits();
-
- udelay(45); /* the shortest data takes at least 45 us */
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send a command to the TI LCD panel */
-static void lcd_write_cmd_tilcd(int cmd)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the control port */
- w_ctr(pprt, cmd);
- udelay(60);
- spin_unlock_irq(&pprt_lock);
-}
-
-/* send data to the TI LCD panel */
-static void lcd_write_data_tilcd(int data)
-{
- spin_lock_irq(&pprt_lock);
- /* present the data to the data port */
- w_dtr(pprt, data);
- udelay(60);
- spin_unlock_irq(&pprt_lock);
-}
-
-static void lcd_gotoxy(void)
-{
- lcd_write_cmd(LCD_CMD_SET_DDRAM_ADDR
- | (lcd.addr.y ? lcd.hwidth : 0)
- /*
- * we force the cursor to stay at the end of the
- * line if it wants to go farther
- */
- | ((lcd.addr.x < lcd.bwidth) ? lcd.addr.x &
- (lcd.hwidth - 1) : lcd.bwidth - 1));
-}
-
-static void lcd_home(void)
-{
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- lcd_gotoxy();
-}
-
-static void lcd_print(char c)
-{
- if (lcd.addr.x < lcd.bwidth) {
- if (lcd_char_conv)
- c = lcd_char_conv[(unsigned char)c];
- lcd_write_data(c);
- lcd.addr.x++;
- }
- /* prevents the cursor from wrapping onto the next line */
- if (lcd.addr.x == lcd.bwidth)
- lcd_gotoxy();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_s(void)
-{
- int pos;
-
- lcd_home();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- lcd_send_serial(0x5F); /* R/W=W, RS=1 */
- lcd_send_serial(' ' & 0x0F);
- lcd_send_serial((' ' >> 4) & 0x0F);
- /* the shortest data takes at least 40 us */
- udelay(40);
- }
- spin_unlock_irq(&pprt_lock);
-
- lcd_home();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_p8(void)
-{
- int pos;
-
- lcd_home();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- /* present the data to the data port */
- w_dtr(pprt, ' ');
-
- /* maintain the data during 20 us before the strobe */
- udelay(20);
-
- set_bit(LCD_BIT_E, bits);
- set_bit(LCD_BIT_RS, bits);
- clear_bit(LCD_BIT_RW, bits);
- set_ctrl_bits();
-
- /* maintain the strobe during 40 us */
- udelay(40);
-
- clear_bit(LCD_BIT_E, bits);
- set_ctrl_bits();
-
- /* the shortest data takes at least 45 us */
- udelay(45);
- }
- spin_unlock_irq(&pprt_lock);
-
- lcd_home();
-}
-
-/* fills the display with spaces and resets X/Y */
-static void lcd_clear_fast_tilcd(void)
-{
- int pos;
-
- lcd_home();
-
- spin_lock_irq(&pprt_lock);
- for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
- /* present the data to the data port */
- w_dtr(pprt, ' ');
- udelay(60);
- }
-
- spin_unlock_irq(&pprt_lock);
-
- lcd_home();
-}
-
-/* clears the display and resets X/Y */
-static void lcd_clear_display(void)
-{
- lcd_write_cmd(LCD_CMD_DISPLAY_CLEAR);
- lcd.addr.x = 0;
- lcd.addr.y = 0;
- /* we must wait a few milliseconds (15) */
- long_sleep(15);
-}
-
-static void lcd_init_display(void)
-{
- lcd.flags = ((lcd.height > 1) ? LCD_FLAG_N : 0)
- | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B;
-
- long_sleep(20); /* wait 20 ms after power-up for the paranoid */
-
- /* 8bits, 1 line, small fonts; let's do it 3 times */
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS);
- long_sleep(10);
-
- /* set font height and lines number */
- lcd_write_cmd(LCD_CMD_FUNCTION_SET | LCD_CMD_DATA_LEN_8BITS
- | ((lcd.flags & LCD_FLAG_F) ? LCD_CMD_FONT_5X10_DOTS : 0)
- | ((lcd.flags & LCD_FLAG_N) ? LCD_CMD_TWO_LINES : 0)
- );
- long_sleep(10);
-
- /* display off, cursor off, blink off */
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL);
- long_sleep(10);
-
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL /* set display mode */
- | ((lcd.flags & LCD_FLAG_D) ? LCD_CMD_DISPLAY_ON : 0)
- | ((lcd.flags & LCD_FLAG_C) ? LCD_CMD_CURSOR_ON : 0)
- | ((lcd.flags & LCD_FLAG_B) ? LCD_CMD_BLINK_ON : 0)
- );
-
- lcd_backlight((lcd.flags & LCD_FLAG_L) ? 1 : 0);
-
- long_sleep(10);
-
- /* entry mode set : increment, cursor shifting */
- lcd_write_cmd(LCD_CMD_ENTRY_MODE | LCD_CMD_CURSOR_INC);
-
- lcd_clear_display();
-}
-
-/*
- * These are the file operation function for user access to /dev/lcd
- * This function can also be called from inside the kernel, by
- * setting file and ppos to NULL.
- *
- */
-
-static inline int handle_lcd_special_code(void)
-{
- /* LCD special codes */
-
- int processed = 0;
-
- char *esc = lcd.esc_seq.buf + 2;
- int oldflags = lcd.flags;
-
- /* check for display mode flags */
- switch (*esc) {
- case 'D': /* Display ON */
- lcd.flags |= LCD_FLAG_D;
- processed = 1;
- break;
- case 'd': /* Display OFF */
- lcd.flags &= ~LCD_FLAG_D;
- processed = 1;
- break;
- case 'C': /* Cursor ON */
- lcd.flags |= LCD_FLAG_C;
- processed = 1;
- break;
- case 'c': /* Cursor OFF */
- lcd.flags &= ~LCD_FLAG_C;
- processed = 1;
- break;
- case 'B': /* Blink ON */
- lcd.flags |= LCD_FLAG_B;
- processed = 1;
- break;
- case 'b': /* Blink OFF */
- lcd.flags &= ~LCD_FLAG_B;
- processed = 1;
- break;
- case '+': /* Back light ON */
- lcd.flags |= LCD_FLAG_L;
- processed = 1;
- break;
- case '-': /* Back light OFF */
- lcd.flags &= ~LCD_FLAG_L;
- processed = 1;
- break;
- case '*':
- /* flash back light */
- lcd_poke();
- processed = 1;
- break;
- case 'f': /* Small Font */
- lcd.flags &= ~LCD_FLAG_F;
- processed = 1;
- break;
- case 'F': /* Large Font */
- lcd.flags |= LCD_FLAG_F;
- processed = 1;
- break;
- case 'n': /* One Line */
- lcd.flags &= ~LCD_FLAG_N;
- processed = 1;
- break;
- case 'N': /* Two Lines */
- lcd.flags |= LCD_FLAG_N;
- break;
- case 'l': /* Shift Cursor Left */
- if (lcd.addr.x > 0) {
- /* back one char if not at end of line */
- if (lcd.addr.x < lcd.bwidth)
- lcd_write_cmd(LCD_CMD_SHIFT);
- lcd.addr.x--;
- }
- processed = 1;
- break;
- case 'r': /* shift cursor right */
- if (lcd.addr.x < lcd.width) {
- /* allow the cursor to pass the end of the line */
- if (lcd.addr.x < (lcd.bwidth - 1))
- lcd_write_cmd(LCD_CMD_SHIFT |
- LCD_CMD_SHIFT_RIGHT);
- lcd.addr.x++;
- }
- processed = 1;
- break;
- case 'L': /* shift display left */
- lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT);
- processed = 1;
- break;
- case 'R': /* shift display right */
- lcd_write_cmd(LCD_CMD_SHIFT | LCD_CMD_DISPLAY_SHIFT |
- LCD_CMD_SHIFT_RIGHT);
- processed = 1;
- break;
- case 'k': { /* kill end of line */
- int x;
-
- for (x = lcd.addr.x; x < lcd.bwidth; x++)
- lcd_write_data(' ');
-
- /* restore cursor position */
- lcd_gotoxy();
- processed = 1;
- break;
- }
- case 'I': /* reinitialize display */
- lcd_init_display();
- processed = 1;
- break;
- case 'G': {
- /* Generator : LGcxxxxx...xx; must have <c> between '0'
- * and '7', representing the numerical ASCII code of the
- * redefined character, and <xx...xx> a sequence of 16
- * hex digits representing 8 bytes for each character.
- * Most LCDs will only use 5 lower bits of the 7 first
- * bytes.
- */
-
- unsigned char cgbytes[8];
- unsigned char cgaddr;
- int cgoffset;
- int shift;
- char value;
- int addr;
-
- if (!strchr(esc, ';'))
- break;
-
- esc++;
-
- cgaddr = *(esc++) - '0';
- if (cgaddr > 7) {
- processed = 1;
- break;
- }
-
- cgoffset = 0;
- shift = 0;
- value = 0;
- while (*esc && cgoffset < 8) {
- shift ^= 4;
- if (*esc >= '0' && *esc <= '9') {
- value |= (*esc - '0') << shift;
- } else if (*esc >= 'A' && *esc <= 'Z') {
- value |= (*esc - 'A' + 10) << shift;
- } else if (*esc >= 'a' && *esc <= 'z') {
- value |= (*esc - 'a' + 10) << shift;
- } else {
- esc++;
- continue;
- }
-
- if (shift == 0) {
- cgbytes[cgoffset++] = value;
- value = 0;
- }
-
- esc++;
- }
-
- lcd_write_cmd(LCD_CMD_SET_CGRAM_ADDR | (cgaddr * 8));
- for (addr = 0; addr < cgoffset; addr++)
- lcd_write_data(cgbytes[addr]);
-
- /* ensures that we stop writing to CGRAM */
- lcd_gotoxy();
- processed = 1;
- break;
- }
- case 'x': /* gotoxy : LxXXX[yYYY]; */
- case 'y': /* gotoxy : LyYYY[xXXX]; */
- if (!strchr(esc, ';'))
- break;
-
- while (*esc) {
- if (*esc == 'x') {
- esc++;
- if (kstrtoul(esc, 10, &lcd.addr.x) < 0)
- break;
- } else if (*esc == 'y') {
- esc++;
- if (kstrtoul(esc, 10, &lcd.addr.y) < 0)
- break;
- } else {
- break;
- }
- }
-
- lcd_gotoxy();
- processed = 1;
- break;
- }
-
- /* TODO: This indent party here got ugly, clean it! */
- /* Check whether one flag was changed */
- if (oldflags != lcd.flags) {
- /* check whether one of B,C,D flags were changed */
- if ((oldflags ^ lcd.flags) &
- (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D))
- /* set display mode */
- lcd_write_cmd(LCD_CMD_DISPLAY_CTRL
- | ((lcd.flags & LCD_FLAG_D)
- ? LCD_CMD_DISPLAY_ON : 0)
- | ((lcd.flags & LCD_FLAG_C)
- ? LCD_CMD_CURSOR_ON : 0)
- | ((lcd.flags & LCD_FLAG_B)
- ? LCD_CMD_BLINK_ON : 0));
- /* check whether one of F,N flags was changed */
- else if ((oldflags ^ lcd.flags) & (LCD_FLAG_F | LCD_FLAG_N))
- lcd_write_cmd(LCD_CMD_FUNCTION_SET
- | LCD_CMD_DATA_LEN_8BITS
- | ((lcd.flags & LCD_FLAG_F)
- ? LCD_CMD_FONT_5X10_DOTS
- : 0)
- | ((lcd.flags & LCD_FLAG_N)
- ? LCD_CMD_TWO_LINES
- : 0));
- /* check whether L flag was changed */
- else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L))
- lcd_backlight(!!(lcd.flags & LCD_FLAG_L));
- }
-
- return processed;
-}
-
-static void lcd_write_char(char c)
-{
- /* first, we'll test if we're in escape mode */
- if ((c != '\n') && lcd.esc_seq.len >= 0) {
- /* yes, let's add this char to the buffer */
- lcd.esc_seq.buf[lcd.esc_seq.len++] = c;
- lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
- } else {
- /* aborts any previous escape sequence */
- lcd.esc_seq.len = -1;
-
- switch (c) {
- case LCD_ESCAPE_CHAR:
- /* start of an escape sequence */
- lcd.esc_seq.len = 0;
- lcd.esc_seq.buf[lcd.esc_seq.len] = 0;
- break;
- case '\b':
- /* go back one char and clear it */
- if (lcd.addr.x > 0) {
- /*
- * check if we're not at the
- * end of the line
- */
- if (lcd.addr.x < lcd.bwidth)
- /* back one char */
- lcd_write_cmd(LCD_CMD_SHIFT);
- lcd.addr.x--;
- }
- /* replace with a space */
- lcd_write_data(' ');
- /* back one char again */
- lcd_write_cmd(LCD_CMD_SHIFT);
- break;
- case '\014':
- /* quickly clear the display */
- lcd_clear_fast();
- break;
- case '\n':
- /*
- * flush the remainder of the current line and
- * go to the beginning of the next line
- */
- for (; lcd.addr.x < lcd.bwidth; lcd.addr.x++)
- lcd_write_data(' ');
- lcd.addr.x = 0;
- lcd.addr.y = (lcd.addr.y + 1) % lcd.height;
- lcd_gotoxy();
- break;
- case '\r':
- /* go to the beginning of the same line */
- lcd.addr.x = 0;
- lcd_gotoxy();
- break;
- case '\t':
- /* print a space instead of the tab */
- lcd_print(' ');
- break;
- default:
- /* simply print this char */
- lcd_print(c);
- break;
- }
- }
-
- /*
- * now we'll see if we're in an escape mode and if the current
- * escape sequence can be understood.
- */
- if (lcd.esc_seq.len >= 2) {
- int processed = 0;
-
- if (!strcmp(lcd.esc_seq.buf, "[2J")) {
- /* clear the display */
- lcd_clear_fast();
- processed = 1;
- } else if (!strcmp(lcd.esc_seq.buf, "[H")) {
- /* cursor to home */
- lcd_home();
- processed = 1;
- }
- /* codes starting with ^[[L */
- else if ((lcd.esc_seq.len >= 3) &&
- (lcd.esc_seq.buf[0] == '[') &&
- (lcd.esc_seq.buf[1] == 'L')) {
- processed = handle_lcd_special_code();
- }
-
- /* LCD special escape codes */
- /*
- * flush the escape sequence if it's been processed
- * or if it is getting too long.
- */
- if (processed || (lcd.esc_seq.len >= LCD_ESCAPE_LEN))
- lcd.esc_seq.len = -1;
- } /* escape codes */
-}
-
-static ssize_t lcd_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- const char __user *tmp = buf;
- char c;
-
- for (; count-- > 0; (*ppos)++, tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- /*
- * let's be a little nice with other processes
- * that need some CPU
- */
- schedule();
-
- if (get_user(c, tmp))
- return -EFAULT;
-
- lcd_write_char(c);
- }
-
- return tmp - buf;
-}
-
-static int lcd_open(struct inode *inode, struct file *file)
-{
- if (!atomic_dec_and_test(&lcd_available))
- return -EBUSY; /* open only once at a time */
-
- if (file->f_mode & FMODE_READ) /* device is write-only */
- return -EPERM;
-
- if (lcd.must_clear) {
- lcd_clear_display();
- lcd.must_clear = false;
- }
- return nonseekable_open(inode, file);
-}
-
-static int lcd_release(struct inode *inode, struct file *file)
-{
- atomic_inc(&lcd_available);
- return 0;
-}
-
-static const struct file_operations lcd_fops = {
- .write = lcd_write,
- .open = lcd_open,
- .release = lcd_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice lcd_dev = {
- .minor = LCD_MINOR,
- .name = "lcd",
- .fops = &lcd_fops,
-};
-
-/* public function usable from the kernel for any purpose */
-static void panel_lcd_print(const char *s)
-{
- const char *tmp = s;
- int count = strlen(s);
-
- if (lcd.enabled && lcd.initialized) {
- for (; count-- > 0; tmp++) {
- if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
- /*
- * let's be a little nice with other processes
- * that need some CPU
- */
- schedule();
-
- lcd_write_char(*tmp);
- }
- }
-}
-
-/* initialize the LCD driver */
-static void lcd_init(void)
-{
- switch (selected_lcd_type) {
- case LCD_TYPE_OLD:
- /* parallel mode, 8 bits */
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_STROBE;
- lcd.pins.rs = PIN_AUTOLF;
-
- lcd.width = 40;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- case LCD_TYPE_KS0074:
- /* serial mode, ks0074 */
- lcd.proto = LCD_PROTO_SERIAL;
- lcd.charset = LCD_CHARSET_KS0074;
- lcd.pins.bl = PIN_AUTOLF;
- lcd.pins.cl = PIN_STROBE;
- lcd.pins.da = PIN_D0;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 16;
- lcd.height = 2;
- break;
- case LCD_TYPE_NEXCOM:
- /* parallel mode, 8 bits, generic */
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_AUTOLF;
- lcd.pins.rs = PIN_SELECP;
- lcd.pins.rw = PIN_INITP;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- case LCD_TYPE_CUSTOM:
- /* customer-defined */
- lcd.proto = DEFAULT_LCD_PROTO;
- lcd.charset = DEFAULT_LCD_CHARSET;
- /* default geometry will be set later */
- break;
- case LCD_TYPE_HANTRONIX:
- /* parallel mode, 8 bits, hantronix-like */
- default:
- lcd.proto = LCD_PROTO_PARALLEL;
- lcd.charset = LCD_CHARSET_NORMAL;
- lcd.pins.e = PIN_STROBE;
- lcd.pins.rs = PIN_SELECP;
-
- lcd.width = 16;
- lcd.bwidth = 40;
- lcd.hwidth = 64;
- lcd.height = 2;
- break;
- }
-
- /* Overwrite with module params set on loading */
- if (lcd_height != NOT_SET)
- lcd.height = lcd_height;
- if (lcd_width != NOT_SET)
- lcd.width = lcd_width;
- if (lcd_bwidth != NOT_SET)
- lcd.bwidth = lcd_bwidth;
- if (lcd_hwidth != NOT_SET)
- lcd.hwidth = lcd_hwidth;
- if (lcd_charset != NOT_SET)
- lcd.charset = lcd_charset;
- if (lcd_proto != NOT_SET)
- lcd.proto = lcd_proto;
- if (lcd_e_pin != PIN_NOT_SET)
- lcd.pins.e = lcd_e_pin;
- if (lcd_rs_pin != PIN_NOT_SET)
- lcd.pins.rs = lcd_rs_pin;
- if (lcd_rw_pin != PIN_NOT_SET)
- lcd.pins.rw = lcd_rw_pin;
- if (lcd_cl_pin != PIN_NOT_SET)
- lcd.pins.cl = lcd_cl_pin;
- if (lcd_da_pin != PIN_NOT_SET)
- lcd.pins.da = lcd_da_pin;
- if (lcd_bl_pin != PIN_NOT_SET)
- lcd.pins.bl = lcd_bl_pin;
-
- /* this is used to catch wrong and default values */
- if (lcd.width <= 0)
- lcd.width = DEFAULT_LCD_WIDTH;
- if (lcd.bwidth <= 0)
- lcd.bwidth = DEFAULT_LCD_BWIDTH;
- if (lcd.hwidth <= 0)
- lcd.hwidth = DEFAULT_LCD_HWIDTH;
- if (lcd.height <= 0)
- lcd.height = DEFAULT_LCD_HEIGHT;
-
- if (lcd.proto == LCD_PROTO_SERIAL) { /* SERIAL */
- lcd_write_cmd = lcd_write_cmd_s;
- lcd_write_data = lcd_write_data_s;
- lcd_clear_fast = lcd_clear_fast_s;
-
- if (lcd.pins.cl == PIN_NOT_SET)
- lcd.pins.cl = DEFAULT_LCD_PIN_SCL;
- if (lcd.pins.da == PIN_NOT_SET)
- lcd.pins.da = DEFAULT_LCD_PIN_SDA;
-
- } else if (lcd.proto == LCD_PROTO_PARALLEL) { /* PARALLEL */
- lcd_write_cmd = lcd_write_cmd_p8;
- lcd_write_data = lcd_write_data_p8;
- lcd_clear_fast = lcd_clear_fast_p8;
-
- if (lcd.pins.e == PIN_NOT_SET)
- lcd.pins.e = DEFAULT_LCD_PIN_E;
- if (lcd.pins.rs == PIN_NOT_SET)
- lcd.pins.rs = DEFAULT_LCD_PIN_RS;
- if (lcd.pins.rw == PIN_NOT_SET)
- lcd.pins.rw = DEFAULT_LCD_PIN_RW;
- } else {
- lcd_write_cmd = lcd_write_cmd_tilcd;
- lcd_write_data = lcd_write_data_tilcd;
- lcd_clear_fast = lcd_clear_fast_tilcd;
- }
-
- if (lcd.pins.bl == PIN_NOT_SET)
- lcd.pins.bl = DEFAULT_LCD_PIN_BL;
-
- if (lcd.pins.e == PIN_NOT_SET)
- lcd.pins.e = PIN_NONE;
- if (lcd.pins.rs == PIN_NOT_SET)
- lcd.pins.rs = PIN_NONE;
- if (lcd.pins.rw == PIN_NOT_SET)
- lcd.pins.rw = PIN_NONE;
- if (lcd.pins.bl == PIN_NOT_SET)
- lcd.pins.bl = PIN_NONE;
- if (lcd.pins.cl == PIN_NOT_SET)
- lcd.pins.cl = PIN_NONE;
- if (lcd.pins.da == PIN_NOT_SET)
- lcd.pins.da = PIN_NONE;
-
- if (lcd.charset == NOT_SET)
- lcd.charset = DEFAULT_LCD_CHARSET;
-
- if (lcd.charset == LCD_CHARSET_KS0074)
- lcd_char_conv = lcd_char_conv_ks0074;
- else
- lcd_char_conv = NULL;
-
- if (lcd.pins.bl != PIN_NONE) {
- mutex_init(&lcd.bl_tempo_lock);
- INIT_DELAYED_WORK(&lcd.bl_work, lcd_bl_off);
- }
-
- pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
- lcd_bits[LCD_PORT_C][LCD_BIT_E]);
- pin_to_bits(lcd.pins.rs, lcd_bits[LCD_PORT_D][LCD_BIT_RS],
- lcd_bits[LCD_PORT_C][LCD_BIT_RS]);
- pin_to_bits(lcd.pins.rw, lcd_bits[LCD_PORT_D][LCD_BIT_RW],
- lcd_bits[LCD_PORT_C][LCD_BIT_RW]);
- pin_to_bits(lcd.pins.bl, lcd_bits[LCD_PORT_D][LCD_BIT_BL],
- lcd_bits[LCD_PORT_C][LCD_BIT_BL]);
- pin_to_bits(lcd.pins.cl, lcd_bits[LCD_PORT_D][LCD_BIT_CL],
- lcd_bits[LCD_PORT_C][LCD_BIT_CL]);
- pin_to_bits(lcd.pins.da, lcd_bits[LCD_PORT_D][LCD_BIT_DA],
- lcd_bits[LCD_PORT_C][LCD_BIT_DA]);
-
- /*
- * before this line, we must NOT send anything to the display.
- * Since lcd_init_display() needs to write data, we have to
- * enable mark the LCD initialized just before.
- */
- lcd.initialized = true;
- lcd_init_display();
-
- /* display a short message */
-#ifdef CONFIG_PANEL_CHANGE_MESSAGE
-#ifdef CONFIG_PANEL_BOOT_MESSAGE
- panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
-#endif
-#else
- panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE);
-#endif
- /* clear the display on the next device opening */
- lcd.must_clear = true;
- lcd_home();
-}
-
-/*
- * These are the file operation function for user access to /dev/keypad
- */
-
-static ssize_t keypad_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
-{
- unsigned i = *ppos;
- char __user *tmp = buf;
-
- if (keypad_buflen == 0) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(keypad_read_wait,
- keypad_buflen != 0))
- return -EINTR;
- }
-
- for (; count-- > 0 && (keypad_buflen > 0);
- ++i, ++tmp, --keypad_buflen) {
- put_user(keypad_buffer[keypad_start], tmp);
- keypad_start = (keypad_start + 1) % KEYPAD_BUFFER;
- }
- *ppos = i;
-
- return tmp - buf;
-}
-
-static int keypad_open(struct inode *inode, struct file *file)
-{
- if (!atomic_dec_and_test(&keypad_available))
- return -EBUSY; /* open only once at a time */
-
- if (file->f_mode & FMODE_WRITE) /* device is read-only */
- return -EPERM;
-
- keypad_buflen = 0; /* flush the buffer on opening */
- return 0;
-}
-
-static int keypad_release(struct inode *inode, struct file *file)
-{
- atomic_inc(&keypad_available);
- return 0;
-}
-
-static const struct file_operations keypad_fops = {
- .read = keypad_read, /* read */
- .open = keypad_open, /* open */
- .release = keypad_release, /* close */
- .llseek = default_llseek,
-};
-
-static struct miscdevice keypad_dev = {
- .minor = KEYPAD_MINOR,
- .name = "keypad",
- .fops = &keypad_fops,
-};
-
-static void keypad_send_key(const char *string, int max_len)
-{
- /* send the key to the device only if a process is attached to it. */
- if (!atomic_read(&keypad_available)) {
- while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) {
- keypad_buffer[(keypad_start + keypad_buflen++) %
- KEYPAD_BUFFER] = *string++;
- }
- wake_up_interruptible(&keypad_read_wait);
- }
-}
-
-/* this function scans all the bits involving at least one logical signal,
- * and puts the results in the bitfield "phys_read" (one bit per established
- * contact), and sets "phys_read_prev" to "phys_read".
- *
- * Note: to debounce input signals, we will only consider as switched a signal
- * which is stable across 2 measures. Signals which are different between two
- * reads will be kept as they previously were in their logical form (phys_prev).
- * A signal which has just switched will have a 1 in
- * (phys_read ^ phys_read_prev).
- */
-static void phys_scan_contacts(void)
-{
- int bit, bitval;
- char oldval;
- char bitmask;
- char gndmask;
-
- phys_prev = phys_curr;
- phys_read_prev = phys_read;
- phys_read = 0; /* flush all signals */
-
- /* keep track of old value, with all outputs disabled */
- oldval = r_dtr(pprt) | scan_mask_o;
- /* activate all keyboard outputs (active low) */
- w_dtr(pprt, oldval & ~scan_mask_o);
-
- /* will have a 1 for each bit set to gnd */
- bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
- /* disable all matrix signals */
- w_dtr(pprt, oldval);
-
- /* now that all outputs are cleared, the only active input bits are
- * directly connected to the ground
- */
-
- /* 1 for each grounded input */
- gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
-
- /* grounded inputs are signals 40-44 */
- phys_read |= (__u64)gndmask << 40;
-
- if (bitmask != gndmask) {
- /*
- * since clearing the outputs changed some inputs, we know
- * that some input signals are currently tied to some outputs.
- * So we'll scan them.
- */
- for (bit = 0; bit < 8; bit++) {
- bitval = BIT(bit);
-
- if (!(scan_mask_o & bitval)) /* skip unused bits */
- continue;
-
- w_dtr(pprt, oldval & ~bitval); /* enable this output */
- bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
- phys_read |= (__u64)bitmask << (5 * bit);
- }
- w_dtr(pprt, oldval); /* disable all outputs */
- }
- /*
- * this is easy: use old bits when they are flapping,
- * use new ones when stable
- */
- phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) |
- (phys_read & ~(phys_read ^ phys_read_prev));
-}
-
-static inline int input_state_high(struct logical_input *input)
-{
-#if 0
- /* FIXME:
- * this is an invalid test. It tries to catch
- * transitions from single-key to multiple-key, but
- * doesn't take into account the contacts polarity.
- * The only solution to the problem is to parse keys
- * from the most complex to the simplest combinations,
- * and mark them as 'caught' once a combination
- * matches, then unmatch it for all other ones.
- */
-
- /* try to catch dangerous transitions cases :
- * someone adds a bit, so this signal was a false
- * positive resulting from a transition. We should
- * invalidate the signal immediately and not call the
- * release function.
- * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release.
- */
- if (((phys_prev & input->mask) == input->value) &&
- ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- return 1;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if ((input->type == INPUT_TYPE_STD) &&
- (input->high_timer == 0)) {
- input->high_timer++;
- if (input->u.std.press_fct)
- input->u.std.press_fct(input->u.std.press_data);
- } else if (input->type == INPUT_TYPE_KBD) {
- /* will turn on the light */
- keypressed = 1;
-
- if (input->high_timer == 0) {
- char *press_str = input->u.kbd.press_str;
-
- if (press_str[0]) {
- int s = sizeof(input->u.kbd.press_str);
-
- keypad_send_key(press_str, s);
- }
- }
-
- if (input->u.kbd.repeat_str[0]) {
- char *repeat_str = input->u.kbd.repeat_str;
-
- if (input->high_timer >= KEYPAD_REP_START) {
- int s = sizeof(input->u.kbd.repeat_str);
-
- input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str, s);
- }
- /* we will need to come back here soon */
- inputs_stable = 0;
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
- return 1;
- }
-
- /* else signal falling down. Let's fall through. */
- input->state = INPUT_ST_FALLING;
- input->fall_timer = 0;
-
- return 0;
-}
-
-static inline void input_state_falling(struct logical_input *input)
-{
-#if 0
- /* FIXME !!! same comment as in input_state_high */
- if (((phys_prev & input->mask) == input->value) &&
- ((phys_curr & input->mask) > input->value)) {
- input->state = INPUT_ST_LOW; /* invalidate */
- return;
- }
-#endif
-
- if ((phys_curr & input->mask) == input->value) {
- if (input->type == INPUT_TYPE_KBD) {
- /* will turn on the light */
- keypressed = 1;
-
- if (input->u.kbd.repeat_str[0]) {
- char *repeat_str = input->u.kbd.repeat_str;
-
- if (input->high_timer >= KEYPAD_REP_START) {
- int s = sizeof(input->u.kbd.repeat_str);
-
- input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str, s);
- }
- /* we will need to come back here soon */
- inputs_stable = 0;
- }
-
- if (input->high_timer < 255)
- input->high_timer++;
- }
- input->state = INPUT_ST_HIGH;
- } else if (input->fall_timer >= input->fall_time) {
- /* call release event */
- if (input->type == INPUT_TYPE_STD) {
- void (*release_fct)(int) = input->u.std.release_fct;
-
- if (release_fct)
- release_fct(input->u.std.release_data);
- } else if (input->type == INPUT_TYPE_KBD) {
- char *release_str = input->u.kbd.release_str;
-
- if (release_str[0]) {
- int s = sizeof(input->u.kbd.release_str);
-
- keypad_send_key(release_str, s);
- }
- }
-
- input->state = INPUT_ST_LOW;
- } else {
- input->fall_timer++;
- inputs_stable = 0;
- }
-}
-
-static void panel_process_inputs(void)
-{
- struct list_head *item;
- struct logical_input *input;
-
- keypressed = 0;
- inputs_stable = 1;
- list_for_each(item, &logical_inputs) {
- input = list_entry(item, struct logical_input, list);
-
- switch (input->state) {
- case INPUT_ST_LOW:
- if ((phys_curr & input->mask) != input->value)
- break;
- /* if all needed ones were already set previously,
- * this means that this logical signal has been
- * activated by the releasing of another combined
- * signal, so we don't want to match.
- * eg: AB -(release B)-> A -(release A)-> 0 :
- * don't match A.
- */
- if ((phys_prev & input->mask) == input->value)
- break;
- input->rise_timer = 0;
- input->state = INPUT_ST_RISING;
- /* no break here, fall through */
- case INPUT_ST_RISING:
- if ((phys_curr & input->mask) != input->value) {
- input->state = INPUT_ST_LOW;
- break;
- }
- if (input->rise_timer < input->rise_time) {
- inputs_stable = 0;
- input->rise_timer++;
- break;
- }
- input->high_timer = 0;
- input->state = INPUT_ST_HIGH;
- /* no break here, fall through */
- case INPUT_ST_HIGH:
- if (input_state_high(input))
- break;
- /* no break here, fall through */
- case INPUT_ST_FALLING:
- input_state_falling(input);
- }
- }
-}
-
-static void panel_scan_timer(void)
-{
- if (keypad.enabled && keypad_initialized) {
- if (spin_trylock_irq(&pprt_lock)) {
- phys_scan_contacts();
-
- /* no need for the parport anymore */
- spin_unlock_irq(&pprt_lock);
- }
-
- if (!inputs_stable || phys_curr != phys_prev)
- panel_process_inputs();
- }
-
- if (keypressed && lcd.enabled && lcd.initialized)
- lcd_poke();
-
- mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
-}
-
-static void init_scan_timer(void)
-{
- if (scan_timer.function)
- return; /* already started */
-
- setup_timer(&scan_timer, (void *)&panel_scan_timer, 0);
- scan_timer.expires = jiffies + INPUT_POLL_TIME;
- add_timer(&scan_timer);
-}
-
-/* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits.
- * if <omask> or <imask> are non-null, they will be or'ed with the bits
- * corresponding to out and in bits respectively.
- * returns 1 if ok, 0 if error (in which case, nothing is written).
- */
-static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value,
- u8 *imask, u8 *omask)
-{
- const char sigtab[] = "EeSsPpAaBb";
- u8 im, om;
- __u64 m, v;
-
- om = 0;
- im = 0;
- m = 0ULL;
- v = 0ULL;
- while (*name) {
- int in, out, bit, neg;
- const char *idx;
-
- idx = strchr(sigtab, *name);
- if (!idx)
- return 0; /* input name not found */
-
- in = idx - sigtab;
- neg = (in & 1); /* odd (lower) names are negated */
- in >>= 1;
- im |= BIT(in);
-
- name++;
- if (*name >= '0' && *name <= '7') {
- out = *name - '0';
- om |= BIT(out);
- } else if (*name == '-') {
- out = 8;
- } else {
- return 0; /* unknown bit name */
- }
-
- bit = (out * 5) + in;
-
- m |= 1ULL << bit;
- if (!neg)
- v |= 1ULL << bit;
- name++;
- }
- *mask = m;
- *value = v;
- if (imask)
- *imask |= im;
- if (omask)
- *omask |= om;
- return 1;
-}
-
-/* tries to bind a key to the signal name <name>. The key will send the
- * strings <press>, <repeat>, <release> for these respective events.
- * Returns the pointer to the new key if ok, NULL if the key could not be bound.
- */
-static struct logical_input *panel_bind_key(const char *name, const char *press,
- const char *repeat,
- const char *release)
-{
- struct logical_input *key;
-
- key = kzalloc(sizeof(*key), GFP_KERNEL);
- if (!key)
- return NULL;
-
- if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i,
- &scan_mask_o)) {
- kfree(key);
- return NULL;
- }
-
- key->type = INPUT_TYPE_KBD;
- key->state = INPUT_ST_LOW;
- key->rise_time = 1;
- key->fall_time = 1;
-
- strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str));
- strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str));
- strncpy(key->u.kbd.release_str, release,
- sizeof(key->u.kbd.release_str));
- list_add(&key->list, &logical_inputs);
- return key;
-}
-
-#if 0
-/* tries to bind a callback function to the signal name <name>. The function
- * <press_fct> will be called with the <press_data> arg when the signal is
- * activated, and so on for <release_fct>/<release_data>
- * Returns the pointer to the new signal if ok, NULL if the signal could not
- * be bound.
- */
-static struct logical_input *panel_bind_callback(char *name,
- void (*press_fct)(int),
- int press_data,
- void (*release_fct)(int),
- int release_data)
-{
- struct logical_input *callback;
-
- callback = kmalloc(sizeof(*callback), GFP_KERNEL);
- if (!callback)
- return NULL;
-
- memset(callback, 0, sizeof(struct logical_input));
- if (!input_name2mask(name, &callback->mask, &callback->value,
- &scan_mask_i, &scan_mask_o))
- return NULL;
-
- callback->type = INPUT_TYPE_STD;
- callback->state = INPUT_ST_LOW;
- callback->rise_time = 1;
- callback->fall_time = 1;
- callback->u.std.press_fct = press_fct;
- callback->u.std.press_data = press_data;
- callback->u.std.release_fct = release_fct;
- callback->u.std.release_data = release_data;
- list_add(&callback->list, &logical_inputs);
- return callback;
-}
-#endif
-
-static void keypad_init(void)
-{
- int keynum;
-
- init_waitqueue_head(&keypad_read_wait);
- keypad_buflen = 0; /* flushes any eventual noisy keystroke */
-
- /* Let's create all known keys */
-
- for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) {
- panel_bind_key(keypad_profile[keynum][0],
- keypad_profile[keynum][1],
- keypad_profile[keynum][2],
- keypad_profile[keynum][3]);
- }
-
- init_scan_timer();
- keypad_initialized = 1;
-}
-
-/**************************************************/
-/* device initialization */
-/**************************************************/
-
-static int panel_notify_sys(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (lcd.enabled && lcd.initialized) {
- switch (code) {
- case SYS_DOWN:
- panel_lcd_print
- ("\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- case SYS_HALT:
- panel_lcd_print
- ("\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- case SYS_POWER_OFF:
- panel_lcd_print("\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+");
- break;
- default:
- break;
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block panel_notifier = {
- panel_notify_sys,
- NULL,
- 0
-};
-
-static void panel_attach(struct parport *port)
-{
- struct pardev_cb panel_cb;
-
- if (port->number != parport)
- return;
-
- if (pprt) {
- pr_err("%s: port->number=%d parport=%d, already registered!\n",
- __func__, port->number, parport);
- return;
- }
-
- memset(&panel_cb, 0, sizeof(panel_cb));
- panel_cb.private = &pprt;
- /* panel_cb.flags = 0 should be PARPORT_DEV_EXCL? */
-
- pprt = parport_register_dev_model(port, "panel", &panel_cb, 0);
- if (!pprt) {
- pr_err("%s: port->number=%d parport=%d, parport_register_device() failed\n",
- __func__, port->number, parport);
- return;
- }
-
- if (parport_claim(pprt)) {
- pr_err("could not claim access to parport%d. Aborting.\n",
- parport);
- goto err_unreg_device;
- }
-
- /* must init LCD first, just in case an IRQ from the keypad is
- * generated at keypad init
- */
- if (lcd.enabled) {
- lcd_init();
- if (misc_register(&lcd_dev))
- goto err_unreg_device;
- }
-
- if (keypad.enabled) {
- keypad_init();
- if (misc_register(&keypad_dev))
- goto err_lcd_unreg;
- }
- register_reboot_notifier(&panel_notifier);
- return;
-
-err_lcd_unreg:
- if (lcd.enabled)
- misc_deregister(&lcd_dev);
-err_unreg_device:
- parport_unregister_device(pprt);
- pprt = NULL;
-}
-
-static void panel_detach(struct parport *port)
-{
- if (port->number != parport)
- return;
-
- if (!pprt) {
- pr_err("%s: port->number=%d parport=%d, nothing to unregister.\n",
- __func__, port->number, parport);
- return;
- }
- if (scan_timer.function)
- del_timer_sync(&scan_timer);
-
- if (keypad.enabled) {
- misc_deregister(&keypad_dev);
- keypad_initialized = 0;
- }
-
- if (lcd.enabled) {
- panel_lcd_print("\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
- misc_deregister(&lcd_dev);
- if (lcd.pins.bl != PIN_NONE) {
- cancel_delayed_work_sync(&lcd.bl_work);
- __lcd_backlight(0);
- }
- lcd.initialized = false;
- }
-
- /* TODO: free all input signals */
- parport_release(pprt);
- parport_unregister_device(pprt);
- pprt = NULL;
- unregister_reboot_notifier(&panel_notifier);
-}
-
-static struct parport_driver panel_driver = {
- .name = "panel",
- .match_port = panel_attach,
- .detach = panel_detach,
- .devmodel = true,
-};
-
-/* init function */
-static int __init panel_init_module(void)
-{
- int selected_keypad_type = NOT_SET, err;
-
- /* take care of an eventual profile */
- switch (profile) {
- case PANEL_PROFILE_CUSTOM:
- /* custom profile */
- selected_keypad_type = DEFAULT_KEYPAD_TYPE;
- selected_lcd_type = DEFAULT_LCD_TYPE;
- break;
- case PANEL_PROFILE_OLD:
- /* 8 bits, 2*16, old keypad */
- selected_keypad_type = KEYPAD_TYPE_OLD;
- selected_lcd_type = LCD_TYPE_OLD;
-
- /* TODO: This two are a little hacky, sort it out later */
- if (lcd_width == NOT_SET)
- lcd_width = 16;
- if (lcd_hwidth == NOT_SET)
- lcd_hwidth = 16;
- break;
- case PANEL_PROFILE_NEW:
- /* serial, 2*16, new keypad */
- selected_keypad_type = KEYPAD_TYPE_NEW;
- selected_lcd_type = LCD_TYPE_KS0074;
- break;
- case PANEL_PROFILE_HANTRONIX:
- /* 8 bits, 2*16 hantronix-like, no keypad */
- selected_keypad_type = KEYPAD_TYPE_NONE;
- selected_lcd_type = LCD_TYPE_HANTRONIX;
- break;
- case PANEL_PROFILE_NEXCOM:
- /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */
- selected_keypad_type = KEYPAD_TYPE_NEXCOM;
- selected_lcd_type = LCD_TYPE_NEXCOM;
- break;
- case PANEL_PROFILE_LARGE:
- /* 8 bits, 2*40, old keypad */
- selected_keypad_type = KEYPAD_TYPE_OLD;
- selected_lcd_type = LCD_TYPE_OLD;
- break;
- }
-
- /*
- * Overwrite selection with module param values (both keypad and lcd),
- * where the deprecated params have lower prio.
- */
- if (keypad_enabled != NOT_SET)
- selected_keypad_type = keypad_enabled;
- if (keypad_type != NOT_SET)
- selected_keypad_type = keypad_type;
-
- keypad.enabled = (selected_keypad_type > 0);
-
- if (lcd_enabled != NOT_SET)
- selected_lcd_type = lcd_enabled;
- if (lcd_type != NOT_SET)
- selected_lcd_type = lcd_type;
-
- lcd.enabled = (selected_lcd_type > 0);
-
- if (lcd.enabled) {
- /*
- * Init lcd struct with load-time values to preserve exact
- * current functionality (at least for now).
- */
- lcd.height = lcd_height;
- lcd.width = lcd_width;
- lcd.bwidth = lcd_bwidth;
- lcd.hwidth = lcd_hwidth;
- lcd.charset = lcd_charset;
- lcd.proto = lcd_proto;
- lcd.pins.e = lcd_e_pin;
- lcd.pins.rs = lcd_rs_pin;
- lcd.pins.rw = lcd_rw_pin;
- lcd.pins.cl = lcd_cl_pin;
- lcd.pins.da = lcd_da_pin;
- lcd.pins.bl = lcd_bl_pin;
-
- /* Leave it for now, just in case */
- lcd.esc_seq.len = -1;
- }
-
- switch (selected_keypad_type) {
- case KEYPAD_TYPE_OLD:
- keypad_profile = old_keypad_profile;
- break;
- case KEYPAD_TYPE_NEW:
- keypad_profile = new_keypad_profile;
- break;
- case KEYPAD_TYPE_NEXCOM:
- keypad_profile = nexcom_keypad_profile;
- break;
- default:
- keypad_profile = NULL;
- break;
- }
-
- if (!lcd.enabled && !keypad.enabled) {
- /* no device enabled, let's exit */
- pr_err("panel driver disabled.\n");
- return -ENODEV;
- }
-
- err = parport_register_driver(&panel_driver);
- if (err) {
- pr_err("could not register with parport. Aborting.\n");
- return err;
- }
-
- if (pprt)
- pr_info("panel driver registered on parport%d (io=0x%lx).\n",
- parport, pprt->port->base);
- else
- pr_info("panel driver not yet registered\n");
- return 0;
-}
-
-static void __exit panel_cleanup_module(void)
-{
- parport_unregister_driver(&panel_driver);
-}
-
-module_init(panel_init_module);
-module_exit(panel_cleanup_module);
-MODULE_AUTHOR("Willy Tarreau");
-MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 8
- * End:
- */
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
new file mode 100644
index 000000000000..09c10f426b64
--- /dev/null
+++ b/drivers/misc/pci_endpoint_test.c
@@ -0,0 +1,534 @@
+/**
+ * Host side test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#include <linux/pci_regs.h>
+
+#include <uapi/linux/pcitest.h>
+
+#define DRV_MODULE_NAME "pci-endpoint-test"
+
+#define PCI_ENDPOINT_TEST_MAGIC 0x0
+
+#define PCI_ENDPOINT_TEST_COMMAND 0x4
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define MSI_NUMBER_SHIFT 2
+/* 6 bits for MSI number */
+#define COMMAND_READ BIT(8)
+#define COMMAND_WRITE BIT(9)
+#define COMMAND_COPY BIT(10)
+
+#define PCI_ENDPOINT_TEST_STATUS 0x8
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
+#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
+
+#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
+#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
+
+#define PCI_ENDPOINT_TEST_SIZE 0x1c
+#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
+
+static DEFINE_IDA(pci_endpoint_test_ida);
+
+#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
+ miscdev)
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+struct pci_endpoint_test {
+ struct pci_dev *pdev;
+ void __iomem *base;
+ void __iomem *bar[6];
+ struct completion irq_raised;
+ int last_irq;
+ /* mutex to protect the ioctls */
+ struct mutex mutex;
+ struct miscdevice miscdev;
+};
+
+static int bar_size[] = { 4, 512, 1024, 16384, 131072, 1048576 };
+
+static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
+ u32 offset)
+{
+ return readl(test->base + offset);
+}
+
+static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
+ u32 offset, u32 value)
+{
+ writel(value, test->base + offset);
+}
+
+static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
+ int bar, int offset)
+{
+ return readl(test->bar[bar] + offset);
+}
+
+static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
+ int bar, u32 offset, u32 value)
+{
+ writel(value, test->bar[bar] + offset);
+}
+
+static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
+{
+ struct pci_endpoint_test *test = dev_id;
+ u32 reg;
+
+ reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (reg & STATUS_IRQ_RAISED) {
+ test->last_irq = irq;
+ complete(&test->irq_raised);
+ reg &= ~STATUS_IRQ_RAISED;
+ }
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
+ reg);
+
+ return IRQ_HANDLED;
+}
+
+static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
+ enum pci_barno barno)
+{
+ int j;
+ u32 val;
+ int size;
+
+ if (!test->bar[barno])
+ return false;
+
+ size = bar_size[barno];
+
+ for (j = 0; j < size; j += 4)
+ pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
+
+ for (j = 0; j < size; j += 4) {
+ val = pci_endpoint_test_bar_readl(test, barno, j);
+ if (val != 0xA0A0A0A0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
+{
+ u32 val;
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ COMMAND_RAISE_LEGACY_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+ msecs_to_jiffies(1000));
+ if (!val)
+ return false;
+
+ return true;
+}
+
+static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
+ u8 msi_num)
+{
+ u32 val;
+ struct pci_dev *pdev = test->pdev;
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ msi_num << MSI_NUMBER_SHIFT |
+ COMMAND_RAISE_MSI_IRQ);
+ val = wait_for_completion_timeout(&test->irq_raised,
+ msecs_to_jiffies(1000));
+ if (!val)
+ return false;
+
+ if (test->last_irq - pdev->irq == msi_num - 1)
+ return true;
+
+ return false;
+}
+
+static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ void *src_addr;
+ void *dst_addr;
+ dma_addr_t src_phys_addr;
+ dma_addr_t dst_phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 src_crc32;
+ u32 dst_crc32;
+
+ src_addr = dma_alloc_coherent(dev, size, &src_phys_addr, GFP_KERNEL);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate source buffer\n");
+ ret = false;
+ goto err;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+ lower_32_bits(src_phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+ upper_32_bits(src_phys_addr));
+
+ get_random_bytes(src_addr, size);
+ src_crc32 = crc32_le(~0, src_addr, size);
+
+ dst_addr = dma_alloc_coherent(dev, size, &dst_phys_addr, GFP_KERNEL);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ ret = false;
+ goto err_src_addr;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+ lower_32_bits(dst_phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+ upper_32_bits(dst_phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
+ size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
+
+ wait_for_completion(&test->irq_raised);
+
+ dst_crc32 = crc32_le(~0, dst_addr, size);
+ if (dst_crc32 == src_crc32)
+ ret = true;
+
+ dma_free_coherent(dev, size, dst_addr, dst_phys_addr);
+
+err_src_addr:
+ dma_free_coherent(dev, size, src_addr, src_phys_addr);
+
+err:
+ return ret;
+}
+
+static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ u32 reg;
+ void *addr;
+ dma_addr_t phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 crc32;
+
+ addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!addr) {
+ dev_err(dev, "failed to allocate address\n");
+ ret = false;
+ goto err;
+ }
+
+ get_random_bytes(addr, size);
+
+ crc32 = crc32_le(~0, addr, size);
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
+ crc32);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
+ lower_32_bits(phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
+ upper_32_bits(phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
+
+ wait_for_completion(&test->irq_raised);
+
+ reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
+ if (reg & STATUS_READ_SUCCESS)
+ ret = true;
+
+ dma_free_coherent(dev, size, addr, phys_addr);
+
+err:
+ return ret;
+}
+
+static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
+{
+ bool ret = false;
+ void *addr;
+ dma_addr_t phys_addr;
+ struct pci_dev *pdev = test->pdev;
+ struct device *dev = &pdev->dev;
+ u32 crc32;
+
+ addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ ret = false;
+ goto err;
+ }
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
+ lower_32_bits(phys_addr));
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
+ upper_32_bits(phys_addr));
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
+
+ pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
+ 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
+
+ wait_for_completion(&test->irq_raised);
+
+ crc32 = crc32_le(~0, addr, size);
+ if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
+ ret = true;
+
+ dma_free_coherent(dev, size, addr, phys_addr);
+err:
+ return ret;
+}
+
+static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ enum pci_barno bar;
+ struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
+
+ mutex_lock(&test->mutex);
+ switch (cmd) {
+ case PCITEST_BAR:
+ bar = arg;
+ if (bar < 0 || bar > 5)
+ goto ret;
+ ret = pci_endpoint_test_bar(test, bar);
+ break;
+ case PCITEST_LEGACY_IRQ:
+ ret = pci_endpoint_test_legacy_irq(test);
+ break;
+ case PCITEST_MSI:
+ ret = pci_endpoint_test_msi_irq(test, arg);
+ break;
+ case PCITEST_WRITE:
+ ret = pci_endpoint_test_write(test, arg);
+ break;
+ case PCITEST_READ:
+ ret = pci_endpoint_test_read(test, arg);
+ break;
+ case PCITEST_COPY:
+ ret = pci_endpoint_test_copy(test, arg);
+ break;
+ }
+
+ret:
+ mutex_unlock(&test->mutex);
+ return ret;
+}
+
+static const struct file_operations pci_endpoint_test_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = pci_endpoint_test_ioctl,
+};
+
+static int pci_endpoint_test_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int i;
+ int err;
+ int irq;
+ int id;
+ char name[20];
+ enum pci_barno bar;
+ void __iomem *base;
+ struct device *dev = &pdev->dev;
+ struct pci_endpoint_test *test;
+ struct miscdevice *misc_device;
+
+ if (pci_is_bridge(pdev))
+ return -ENODEV;
+
+ test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ test->pdev = pdev;
+ init_completion(&test->irq_raised);
+ mutex_init(&test->mutex);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_MODULE_NAME);
+ if (err) {
+ dev_err(dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
+ if (irq < 0)
+ dev_err(dev, "failed to get MSI interrupts\n");
+
+ err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err) {
+ dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
+ goto err_disable_msi;
+ }
+
+ for (i = 1; i < irq; i++) {
+ err = devm_request_irq(dev, pdev->irq + i,
+ pci_endpoint_test_irqhandler,
+ IRQF_SHARED, DRV_MODULE_NAME, test);
+ if (err)
+ dev_err(dev, "failed to request IRQ %d for MSI %d\n",
+ pdev->irq + i, i + 1);
+ }
+
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ base = pci_ioremap_bar(pdev, bar);
+ if (!base) {
+ dev_err(dev, "failed to read BAR%d\n", bar);
+ WARN_ON(bar == BAR_0);
+ }
+ test->bar[bar] = base;
+ }
+
+ test->base = test->bar[0];
+ if (!test->base) {
+ dev_err(dev, "Cannot perform PCI test without BAR0\n");
+ goto err_iounmap;
+ }
+
+ pci_set_drvdata(pdev, test);
+
+ id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(dev, "unable to get id\n");
+ goto err_iounmap;
+ }
+
+ snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
+ misc_device = &test->miscdev;
+ misc_device->minor = MISC_DYNAMIC_MINOR;
+ misc_device->name = name;
+ misc_device->fops = &pci_endpoint_test_fops,
+
+ err = misc_register(misc_device);
+ if (err) {
+ dev_err(dev, "failed to register device\n");
+ goto err_ida_remove;
+ }
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+
+err_iounmap:
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
+
+err_disable_msi:
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci_endpoint_test_remove(struct pci_dev *pdev)
+{
+ int id;
+ enum pci_barno bar;
+ struct pci_endpoint_test *test = pci_get_drvdata(pdev);
+ struct miscdevice *misc_device = &test->miscdev;
+
+ if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
+ return;
+
+ misc_deregister(&test->miscdev);
+ ida_simple_remove(&pci_endpoint_test_ida, id);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (test->bar[bar])
+ pci_iounmap(pdev, test->bar[bar]);
+ }
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci_endpoint_test_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
+
+static struct pci_driver pci_endpoint_test_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = pci_endpoint_test_tbl,
+ .probe = pci_endpoint_test_probe,
+ .remove = pci_endpoint_test_remove,
+};
+module_pci_driver(pci_endpoint_test_driver);
+
+MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/sram-exec.c b/drivers/misc/sram-exec.c
index ac522417c462..3d528a13b8fc 100644
--- a/drivers/misc/sram-exec.c
+++ b/drivers/misc/sram-exec.c
@@ -16,9 +16,10 @@
#include <linux/device.h>
#include <linux/genalloc.h>
+#include <linux/mm.h>
#include <linux/sram.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include "sram.h"
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 87a13374fdc0..adf46072cb37 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -443,9 +443,16 @@ static const struct i2c_device_id tsl2550_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tsl2550_id);
+static const struct of_device_id tsl2550_of_match[] = {
+ { .compatible = "taos,tsl2550" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tsl2550_of_match);
+
static struct i2c_driver tsl2550_driver = {
.driver = {
.name = TSL2550_DRV_NAME,
+ .of_match_table = tsl2550_of_match,
.pm = TSL2550_PM_OPS,
},
.probe = tsl2550_probe,
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 498c0854305f..06c4974ee8dd 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
size_t pas_size;
size_t vas_size;
size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages >
(SIZE_MAX - queue_size) /
(sizeof(*queue->kernel_if->u.g.pas) +
@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
{
struct vmci_queue *queue;
size_t queue_page_size;
- const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ u64 num_pages;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ if (size > SIZE_MAX - PAGE_SIZE)
+ return NULL;
+ num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
if (num_pages > (SIZE_MAX - queue_size) /
sizeof(*queue->kernel_if->u.h.page))
return NULL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index c84742671a5f..092c9bd225be 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -40,6 +40,7 @@
#include <linux/list.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
@@ -1368,31 +1369,18 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
return 0;
}
-static int set_memalloc(void)
-{
- if (current->flags & PF_MEMALLOC)
- return 0;
- current->flags |= PF_MEMALLOC;
- return 1;
-}
-
-static void clear_memalloc(int memalloc)
-{
- if (memalloc)
- current->flags &= ~PF_MEMALLOC;
-}
-
static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
ssize_t tx;
- int err, memalloc;
+ int err;
+ unsigned int noreclaim_flag;
err = get_pages(ns, file, count, pos);
if (err)
return err;
- memalloc = set_memalloc();
+ noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_read(file, pos, buf, count);
- clear_memalloc(memalloc);
+ memalloc_noreclaim_restore(noreclaim_flag);
put_pages(ns);
return tx;
}
@@ -1400,14 +1388,15 @@ static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_
static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
{
ssize_t tx;
- int err, memalloc;
+ int err;
+ unsigned int noreclaim_flag;
err = get_pages(ns, file, count, pos);
if (err)
return err;
- memalloc = set_memalloc();
+ noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_write(file, buf, count, pos);
- clear_memalloc(memalloc);
+ memalloc_noreclaim_restore(noreclaim_flag);
put_pages(ns);
return tx;
}
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 51f2be8889b5..5497e65439df 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -334,10 +334,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
}
-static int ubiblock_init_request(void *data, struct request *req,
- unsigned int hctx_idx,
- unsigned int request_idx,
- unsigned int numa_node)
+static int ubiblock_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 77513195f50e..8bae3731d039 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -421,41 +421,6 @@ static void dev_release(struct device *dev)
}
/**
- * ubi_sysfs_init - initialize sysfs for an UBI device.
- * @ubi: UBI device description object
- * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
- * taken
- *
- * This function returns zero in case of success and a negative error code in
- * case of failure.
- */
-static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
-{
- int err;
-
- ubi->dev.release = dev_release;
- ubi->dev.devt = ubi->cdev.dev;
- ubi->dev.class = &ubi_class;
- ubi->dev.groups = ubi_dev_groups;
- dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
- err = device_register(&ubi->dev);
- if (err)
- return err;
-
- *ref = 1;
- return 0;
-}
-
-/**
- * ubi_sysfs_close - close sysfs for an UBI device.
- * @ubi: UBI device description object
- */
-static void ubi_sysfs_close(struct ubi_device *ubi)
-{
- device_unregister(&ubi->dev);
-}
-
-/**
* kill_volumes - destroy all user volumes.
* @ubi: UBI device description object
*/
@@ -471,27 +436,19 @@ static void kill_volumes(struct ubi_device *ubi)
/**
* uif_init - initialize user interfaces for an UBI device.
* @ubi: UBI device description object
- * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
- * taken, otherwise set to %0
*
* This function initializes various user interfaces for an UBI device. If the
* initialization fails at an early stage, this function frees all the
- * resources it allocated, returns an error, and @ref is set to %0. However,
- * if the initialization fails after the UBI device was registered in the
- * driver core subsystem, this function takes a reference to @ubi->dev, because
- * otherwise the release function ('dev_release()') would free whole @ubi
- * object. The @ref argument is set to %1 in this case. The caller has to put
- * this reference.
+ * resources it allocated, returns an error.
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int uif_init(struct ubi_device *ubi, int *ref)
+static int uif_init(struct ubi_device *ubi)
{
int i, err;
dev_t dev;
- *ref = 0;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
/*
@@ -508,20 +465,17 @@ static int uif_init(struct ubi_device *ubi, int *ref)
return err;
}
+ ubi->dev.devt = dev;
+
ubi_assert(MINOR(dev) == 0);
cdev_init(&ubi->cdev, &ubi_cdev_operations);
dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi->cdev.owner = THIS_MODULE;
- err = cdev_add(&ubi->cdev, dev, 1);
- if (err) {
- ubi_err(ubi, "cannot add character device");
- goto out_unreg;
- }
-
- err = ubi_sysfs_init(ubi, ref);
+ dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
+ err = cdev_device_add(&ubi->cdev, &ubi->dev);
if (err)
- goto out_sysfs;
+ goto out_unreg;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
@@ -536,11 +490,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
out_volumes:
kill_volumes(ubi);
-out_sysfs:
- if (*ref)
- get_device(&ubi->dev);
- ubi_sysfs_close(ubi);
- cdev_del(&ubi->cdev);
+ cdev_device_del(&ubi->cdev, &ubi->dev);
out_unreg:
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
ubi_err(ubi, "cannot initialize UBI %s, error %d",
@@ -559,8 +509,7 @@ out_unreg:
static void uif_close(struct ubi_device *ubi)
{
kill_volumes(ubi);
- ubi_sysfs_close(ubi);
- cdev_del(&ubi->cdev);
+ cdev_device_del(&ubi->cdev, &ubi->dev);
unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
@@ -857,7 +806,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
- int i, err, ref = 0;
+ int i, err;
if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
return -EINVAL;
@@ -919,6 +868,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi)
return -ENOMEM;
+ device_initialize(&ubi->dev);
+ ubi->dev.release = dev_release;
+ ubi->dev.class = &ubi_class;
+ ubi->dev.groups = ubi_dev_groups;
+
ubi->mtd = mtd;
ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
@@ -995,7 +949,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make device "available" before it becomes accessible via sysfs */
ubi_devices[ubi_num] = ubi;
- err = uif_init(ubi, &ref);
+ err = uif_init(ubi);
if (err)
goto out_detach;
@@ -1045,8 +999,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
out_debugfs:
ubi_debugfs_exit_dev(ubi);
out_uif:
- get_device(&ubi->dev);
- ubi_assert(ref);
uif_close(ubi);
out_detach:
ubi_devices[ubi_num] = NULL;
@@ -1056,10 +1008,7 @@ out_detach:
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
- if (ref)
- put_device(&ubi->dev);
- else
- kfree(ubi);
+ put_device(&ubi->dev);
return err;
}
@@ -1120,12 +1069,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
if (ubi->bgt_thread)
kthread_stop(ubi->bgt_thread);
- /*
- * Get a reference to the device in order to prevent 'dev_release()'
- * from freeing the @ubi object.
- */
- get_device(&ubi->dev);
-
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 7ac78c13dd1c..85237cf661f9 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -155,11 +155,10 @@ static void vol_release(struct device *dev)
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
- int i, err, vol_id = req->vol_id, do_free = 1;
+ int i, err, vol_id = req->vol_id;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *eba_tbl = NULL;
- dev_t dev;
if (ubi->ro_mode)
return -EROFS;
@@ -168,6 +167,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (!vol)
return -ENOMEM;
+ device_initialize(&vol->dev);
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
+
spin_lock(&ubi->volumes_lock);
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
@@ -268,24 +273,13 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
- dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
- err = cdev_add(&vol->cdev, dev, 1);
- if (err) {
- ubi_err(ubi, "cannot add character device");
- goto out_mapping;
- }
-
- vol->dev.release = vol_release;
- vol->dev.parent = &ubi->dev;
- vol->dev.devt = dev;
- vol->dev.class = &ubi_class;
- vol->dev.groups = volume_dev_groups;
+ vol->dev.devt = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
- err = device_register(&vol->dev);
+ err = cdev_device_add(&vol->cdev, &vol->dev);
if (err) {
- ubi_err(ubi, "cannot register device");
- goto out_cdev;
+ ubi_err(ubi, "cannot add device");
+ goto out_mapping;
}
/* Fill volume table record */
@@ -318,28 +312,17 @@ out_sysfs:
* We have registered our device, we should not free the volume
* description object in this function in case of an error - it is
* freed by the release function.
- *
- * Get device reference to prevent the release function from being
- * called just after sysfs has been closed.
*/
- do_free = 0;
- get_device(&vol->dev);
- device_unregister(&vol->dev);
-out_cdev:
- cdev_del(&vol->cdev);
+ cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
- if (do_free)
- ubi_eba_destroy_table(eba_tbl);
+ ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
out_unlock:
spin_unlock(&ubi->volumes_lock);
- if (do_free)
- kfree(vol);
- else
- put_device(&vol->dev);
+ put_device(&vol->dev);
ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
return err;
}
@@ -391,8 +374,8 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
goto out_err;
}
- cdev_del(&vol->cdev);
- device_unregister(&vol->dev);
+ cdev_device_del(&vol->cdev, &vol->dev);
+ put_device(&vol->dev);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c502c139d3bc..47a8103610bc 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -549,7 +549,8 @@ static int bond_fill_info(struct sk_buff *skb,
targets_added = 0;
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i]) {
- nla_put_be32(skb, i, bond->params.arp_targets[i]);
+ if (nla_put_be32(skb, i, bond->params.arp_targets[i]))
+ goto nla_put_failure;
targets_added = 1;
}
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 4ef07d97156d..602c19e23f05 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -413,7 +413,7 @@
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-/* CAN FD mode specific regsiter map */
+/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index f0fc4de4fc9a..a19e1781e9bb 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -256,6 +256,9 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
return -ENOMEM;
ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+
ps->netdev = dev_get_by_name(&init_net, pdata->netdev);
if (!ps->netdev)
return -EPROBE_DEFER;
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 7cdb18512407..2a57b46fd6a6 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -48,7 +48,7 @@ eg., if the value 10011010b is written into the least significant byte of a comm
/* 32 bit registers */
#define ASF_STAT 0x00 /* ASF status register */
-#define CHIPID 0x04 /* Chip ID regsiter */
+#define CHIPID 0x04 /* Chip ID register */
#define MIB_DATA 0x10 /* MIB data register */
#define MIB_ADDR 0x14 /* MIB address register */
#define STAT0 0x30 /* Status0 register */
@@ -648,7 +648,7 @@ typedef enum {
/* driver ioctl parameters */
#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
-/* amd8111e desriptor format */
+/* amd8111e descriptor format */
struct amd8111e_tx_dr{
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 796c37a5bbde..c5b81268c284 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -42,8 +42,8 @@
*/
-static char version[] = "atarilance.c: v1.3 04/04/96 "
- "Roman.Hodek@informatik.uni-erlangen.de\n";
+static const char version[] = "atarilance.c: v1.3 04/04/96 "
+ "Roman.Hodek@informatik.uni-erlangen.de\n";
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 6c98901f1b89..82cc81385033 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -72,7 +72,7 @@
#include <asm/dec/machtype.h>
#include <asm/dec/system.h>
-static char version[] =
+static const char version[] =
"declance.c: v0.011 by Linux MIPS DECstation task force\n";
MODULE_AUTHOR("Linux MIPS DECstation task force");
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 12bb4f1489fc..77b1db267730 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -21,7 +21,8 @@
*/
-static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
+static const char version[] =
+"sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
#include <linux/module.h>
#include <linux/stddef.h>
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 5f99237a9d52..214986436ece 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -68,7 +68,7 @@
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
-#define AQ_CFG_DRV_NAME "aquantia"
+#define AQ_CFG_DRV_NAME "atlantic"
#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
__stringify(NIC_MINOR_DRIVER_VERSION)"."\
__stringify(NIC_BUILD_DRIVER_VERSION)"."\
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index cdb02991f249..9ee1c5016784 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -755,7 +755,7 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
data += count;
aq_vec_get_sw_stats(aq_vec, data, &count);
}
@@ -959,8 +959,10 @@ void aq_nic_free_hot_resources(struct aq_nic_s *self)
goto err_exit;
for (i = AQ_DIMOF(self->aq_vec); i--;) {
- if (self->aq_vec[i])
+ if (self->aq_vec[i]) {
aq_vec_free(self->aq_vec[i]);
+ self->aq_vec[i] = NULL;
+ }
}
err_exit:;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index a8c2db881b75..567ee54504bc 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -838,7 +838,7 @@ static int alx_enable_msix(struct alx_priv *alx)
err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
PCI_IRQ_MSIX);
- if (err) {
+ if (err < 0) {
netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
return err;
}
@@ -904,7 +904,7 @@ static int alx_init_intr(struct alx_priv *alx)
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_LEGACY);
- if (ret)
+ if (ret < 0)
return ret;
alx->num_vec = 1;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index a8b80c56ac25..73efdb05a490 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -307,7 +307,7 @@ void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
/*
* atl1c_read_phy_core
- * core function to read register in PHY via MDIO control regsiter.
+ * core function to read register in PHY via MDIO control register.
* ext: extension register (see IEEE 802.3)
* dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
* reg: reg to read
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index cea6bdcde33f..8baf9d3eb4b1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1591,7 +1591,7 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
if (rc != 0) {
__bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
- /* Calling function should not diffrentiate between this case
+ /* Calling function should not differentiate between this case
* and the case in which there is already a pending ramrod
*/
rc = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b3ba66032980..b56c54d68d5e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3000,7 +3000,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ sizeof(long),
GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f395b951f5e7..537d571ee601 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11729,10 +11729,6 @@ static int tg3_close(struct net_device *dev)
tg3_stop(tp);
- /* Clear stats across close / open calls */
- memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
- memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
-
if (pci_device_is_present(tp->pdev)) {
tg3_power_down_prepare(tp);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0f6811860ad5..a36e38676640 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 286593922139..31032de5843b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -547,8 +547,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
ETH_GSTRING_LEN));
- memcpy(string, bnad_net_stats_strings[i],
- ETH_GSTRING_LEN);
+ strncpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
string += ETH_GSTRING_LEN;
}
bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
index 920d918ed193..f04e81f33795 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h
@@ -41,9 +41,6 @@
#define VALIDATE_TID 1
-void *cxgb_alloc_mem(unsigned long size);
-void cxgb_free_mem(void *addr);
-
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 76684dcb874c..fa81445e334c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1152,27 +1152,6 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
}
/*
- * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
- * The allocated memory is cleared.
- */
-void *cxgb_alloc_mem(unsigned long size)
-{
- void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
- return p;
-}
-
-/*
- * Free memory allocated through t3_alloc_mem().
- */
-void cxgb_free_mem(void *addr)
-{
- kvfree(addr);
-}
-
-/*
* Allocate and initialize the TID tables. Returns 0 on success.
*/
static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
@@ -1182,7 +1161,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
unsigned long size = ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
- t->tid_tab = cxgb_alloc_mem(size);
+ t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
return -ENOMEM;
@@ -1218,7 +1197,7 @@ static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
static void free_tid_maps(struct tid_info *t)
{
- cxgb_free_mem(t->tid_tab);
+ kvfree(t->tid_tab);
}
static inline void add_adapter(struct adapter *adap)
@@ -1293,7 +1272,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
return 0;
out_free_l2t:
- t3_free_l2t(l2td);
+ kvfree(l2td);
out_free:
kfree(t);
return err;
@@ -1302,7 +1281,7 @@ out_free:
static void clean_l2_data(struct rcu_head *head)
{
struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
- t3_free_l2t(d);
+ kvfree(d);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 52063587e1e9..26264125865f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -444,7 +444,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
- d = cxgb_alloc_mem(size);
+ d = kvzalloc(size, GFP_KERNEL);
if (!d)
return NULL;
@@ -462,9 +462,3 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
}
return d;
}
-
-void t3_free_l2t(struct l2t_data *d)
-{
- cxgb_free_mem(d);
-}
-
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 8cffcdfd5678..c2fd323c4078 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -115,7 +115,6 @@ int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
-void t3_free_l2t(struct l2t_data *d);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 7ad43af6bde1..3103ef9b561d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -290,8 +290,8 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
if (clipt_size < CLIPT_MIN_HASH_BUCKETS)
return NULL;
- ctbl = t4_alloc_mem(sizeof(*ctbl) +
- clipt_size*sizeof(struct list_head));
+ ctbl = kvzalloc(sizeof(*ctbl) +
+ clipt_size*sizeof(struct list_head), GFP_KERNEL);
if (!ctbl)
return NULL;
@@ -305,9 +305,9 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
for (i = 0; i < ctbl->clipt_size; ++i)
INIT_LIST_HEAD(&ctbl->hash_list[i]);
- cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+ cl_list = kvzalloc(clipt_size*sizeof(struct clip_entry), GFP_KERNEL);
if (!cl_list) {
- t4_free_mem(ctbl);
+ kvfree(ctbl);
return NULL;
}
ctbl->cl_list = (void *)cl_list;
@@ -326,8 +326,8 @@ void t4_cleanup_clip_tbl(struct adapter *adap)
if (ctbl) {
if (ctbl->cl_list)
- t4_free_mem(ctbl->cl_list);
- t4_free_mem(ctbl);
+ kvfree(ctbl->cl_list);
+ kvfree(ctbl);
}
}
EXPORT_SYMBOL(t4_cleanup_clip_tbl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 163543b1ea0b..e88c1808e46f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -108,6 +108,12 @@ enum {
PAUSE_AUTONEG = 1 << 2
};
+enum {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */
+};
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -432,6 +438,9 @@ struct link_config {
unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
+ unsigned char auto_fec; /* Forward Error Correction: */
+ unsigned char requested_fec; /* "automatic" (IEEE 802.3), */
+ unsigned char fec; /* requested, and actual in use */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
unsigned char link_down_rc; /* link down reason */
@@ -1184,8 +1193,6 @@ extern const char cxgb4_driver_version[];
void t4_os_portmod_changed(const struct adapter *adap, int port_id);
void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
-void *t4_alloc_mem(size_t size);
-
void t4_free_sge_resources(struct adapter *adap);
void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
irq_handler_t t4_intr_handler(struct adapter *adap);
@@ -1557,7 +1564,6 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
-void t4_free_mem(void *addr);
void t4_idma_monitor_init(struct adapter *adapter,
struct sge_idma_monitor_state *idma);
void t4_idma_monitor(struct adapter *adapter,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index f6e739da7bb7..1fa34b009891 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2634,7 +2634,7 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
if (count > avail - pos)
count = avail - pos;
- data = t4_alloc_mem(count);
+ data = kvzalloc(count, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -2642,12 +2642,12 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
spin_unlock(&adap->win0_lock);
if (ret) {
- t4_free_mem(data);
+ kvfree(data);
return ret;
}
ret = copy_to_user(buf, data, count);
- t4_free_mem(data);
+ kvfree(data);
if (ret)
return -EFAULT;
@@ -2753,7 +2753,7 @@ static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
adap->sge.egr_sz, adap->sge.blocked_fl);
len += sprintf(buf + len, "\n");
size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
- t4_free_mem(buf);
+ kvfree(buf);
return size;
}
@@ -2773,7 +2773,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
return err;
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- t4_free_mem(t);
+ kvfree(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 02f80febeb91..0ba7866c8259 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -969,7 +969,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
{
int i, err = 0;
struct adapter *adapter = netdev2adap(dev);
- u8 *buf = t4_alloc_mem(EEPROMSIZE);
+ u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -980,7 +980,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
if (!err)
memcpy(data, buf + e->offset, e->len);
- t4_free_mem(buf);
+ kvfree(buf);
return err;
}
@@ -1009,7 +1009,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
/* RMW possibly needed for first or last words.
*/
- buf = t4_alloc_mem(aligned_len);
+ buf = kvzalloc(aligned_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -1037,7 +1037,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
err = t4_seeprom_wp(adapter, true);
out:
if (buf != data)
- t4_free_mem(buf);
+ kvfree(buf);
return err;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c12c4a3b82b5..38a5c6764bb5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -880,27 +880,6 @@ freeout:
return err;
}
-/*
- * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
- * The allocated memory is cleared.
- */
-void *t4_alloc_mem(size_t size)
-{
- void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
- return p;
-}
-
-/*
- * Free memory allocated through alloc_mem().
- */
-void t4_free_mem(void *addr)
-{
- kvfree(addr);
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -1299,7 +1278,7 @@ static int tid_init(struct tid_info *t)
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long);
- t->tid_tab = t4_alloc_mem(size);
+ t->tid_tab = kvzalloc(size, GFP_KERNEL);
if (!t->tid_tab)
return -ENOMEM;
@@ -3445,7 +3424,7 @@ static int adap_init0(struct adapter *adap)
/* allocate memory to read the header of the firmware on the
* card
*/
- card_fw = t4_alloc_mem(sizeof(*card_fw));
+ card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
/* Get FW from from /lib/firmware/ */
ret = request_firmware(&fw, fw_info->fw_mod_name,
@@ -3465,7 +3444,7 @@ static int adap_init0(struct adapter *adap)
/* Cleaning up */
release_firmware(fw);
- t4_free_mem(card_fw);
+ kvfree(card_fw);
if (ret < 0)
goto bye;
@@ -4470,9 +4449,9 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
- t4_free_mem(adapter->l2t);
+ kvfree(adapter->l2t);
t4_cleanup_sched(adapter);
- t4_free_mem(adapter->tids.tid_tab);
+ kvfree(adapter->tids.tid_tab);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index a1b19422b339..ef06ce8247ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -432,9 +432,9 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
- t4_free_mem(link->tid_map);
+ kvfree(link->tid_map);
}
- t4_free_mem(adap->tc_u32);
+ kvfree(adap->tc_u32);
}
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
@@ -446,8 +446,8 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
if (!max_tids)
return NULL;
- t = t4_alloc_mem(sizeof(*t) +
- (max_tids * sizeof(struct cxgb4_link)));
+ t = kvzalloc(sizeof(*t) +
+ (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL);
if (!t)
return NULL;
@@ -458,7 +458,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
unsigned int bmap_size;
bmap_size = BITS_TO_LONGS(max_tids);
- link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ link->tid_map = kvzalloc(sizeof(unsigned long) * bmap_size, GFP_KERNEL);
if (!link->tid_map)
goto out_no_mem;
bitmap_zero(link->tid_map, max_tids);
@@ -471,11 +471,11 @@ out_no_mem:
struct cxgb4_link *link = &t->table[i];
if (link->tid_map)
- t4_free_mem(link->tid_map);
+ kvfree(link->tid_map);
}
if (t)
- t4_free_mem(t);
+ kvfree(t);
return NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 7c8c5b9a3c22..6f3692db29af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
if (l2t_size < L2T_MIN_HASH_BUCKETS)
return NULL;
- d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
+ d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index c9026352a842..02acff741f11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -177,7 +177,7 @@ static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
}
list_del(&qe->list);
- t4_free_mem(qe);
+ kvfree(qe);
if (atomic_dec_and_test(&e->refcnt)) {
e->state = SCHED_STATE_UNUSED;
memset(&e->info, 0, sizeof(e->info));
@@ -201,7 +201,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
if (p->queue < 0 || p->queue >= pi->nqsets)
return -ERANGE;
- qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
+ qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
if (!qe)
return -ENOMEM;
@@ -211,7 +211,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
if (err) {
- t4_free_mem(qe);
+ kvfree(qe);
goto out;
}
@@ -224,7 +224,7 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
spin_lock(&e->lock);
err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
if (err) {
- t4_free_mem(qe);
+ kvfree(qe);
spin_unlock(&e->lock);
goto out;
}
@@ -512,7 +512,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size)
struct sched_table *s;
unsigned int i;
- s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
+ s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
if (!s)
return NULL;
@@ -548,6 +548,6 @@ void t4_cleanup_sched(struct adapter *adap)
t4_sched_class_free(pi, e);
write_unlock(&s->rw_lock);
}
- t4_free_mem(s);
+ kvfree(s);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 0de8eb72325c..aded42b96f6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3707,7 +3707,8 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
- unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
+ unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
+ unsigned int fc = 0, fec = 0, fw_fec = 0;
lc->link_ok = 0;
if (lc->requested_fc & PAUSE_RX)
@@ -3715,6 +3716,13 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
if (lc->requested_fc & PAUSE_TX)
fc |= FW_PORT_CAP_FC_TX;
+ fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec;
+
+ if (fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP_FEC_RS;
+ if (fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP_FEC_BASER_RS;
+
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
@@ -3725,13 +3733,15 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc);
+ fc | fw_fec);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
+ fw_fec | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc |
+ fw_fec | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -7407,13 +7417,26 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc, unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+ unsigned int acaps)
{
- lc->supported = caps;
+ lc->supported = pcaps;
lc->lp_advertising = 0;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+ lc->auto_fec = 0;
+
+ /* For Forward Error Control, we default to whatever the Firmware
+ * tells us the Link is currently advertising.
+ */
+ if (acaps & FW_PORT_CAP_FEC_RS)
+ lc->auto_fec |= FEC_RS;
+ if (acaps & FW_PORT_CAP_FEC_BASER_RS)
+ lc->auto_fec |= FEC_BASER_RS;
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
if (lc->supported & FW_PORT_CAP_ANEG) {
lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
@@ -7991,7 +8014,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
pi->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+ init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap),
+ be16_to_cpu(c.u.info.acap));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 8f8c079d0d2b..251a35e9795c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2263,9 +2263,9 @@ enum fw_port_cap {
FW_PORT_CAP_ANEG = 0x0100,
FW_PORT_CAP_MDIX = 0x0200,
FW_PORT_CAP_MDIAUTO = 0x0400,
- FW_PORT_CAP_FEC = 0x0800,
- FW_PORT_CAP_TECHKR = 0x1000,
- FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_FEC_RS = 0x0800,
+ FW_PORT_CAP_FEC_BASER_RS = 0x1000,
+ FW_PORT_CAP_FEC_RESERVED = 0x2000,
FW_PORT_CAP_802_3_PAUSE = 0x4000,
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index b600fbbbf679..f910f0f386d6 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -56,7 +56,7 @@
local_irq_{dis,en}able()
*/
-static char *version =
+static const char version[] =
"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
/* ======================= configure the driver here ======================= */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 24dfba53a0f2..bbc0a98e7ca3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -405,7 +405,7 @@ struct mac_driver {
};
struct mac_stats_string {
- char desc[ETH_GSTRING_LEN];
+ const char desc[ETH_GSTRING_LEN];
unsigned long offset;
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4fcd2f0378ba..4f2d329dba99 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -194,7 +194,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
if (!ltb->buff)
return;
- if (!adapter->failover)
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+ adapter->reset_reason != VNIC_RESET_MOBILITY)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
@@ -292,9 +293,6 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
{
int i;
- if (adapter->migrated)
- return;
-
adapter->replenish_task_cycles++;
for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
i++) {
@@ -350,7 +348,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
free_long_term_buff(adapter, &rx_pool->long_term_buff);
if (!rx_pool->rx_buff)
- continue;
+ continue;
for (j = 0; j < rx_pool->size; j++) {
if (rx_pool->rx_buff[j].skb) {
@@ -554,11 +552,20 @@ static int ibmvnic_login(struct net_device *netdev)
static void release_resources(struct ibmvnic_adapter *adapter)
{
+ int i;
+
release_tx_pools(adapter);
release_rx_pools(adapter);
release_stats_token(adapter);
release_error_buffers(adapter);
+
+ if (adapter->napi) {
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (&adapter->napi[i])
+ netif_napi_del(&adapter->napi[i]);
+ }
+ }
}
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
@@ -569,11 +576,6 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
bool resend;
int rc;
- if (adapter->logical_link_state == link_state) {
- netdev_dbg(netdev, "Link state already %d\n", link_state);
- return 0;
- }
-
netdev_err(netdev, "setting link state %d\n", link_state);
memset(&crq, 0, sizeof(crq));
crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
@@ -624,22 +626,10 @@ static int set_real_num_queues(struct net_device *netdev)
return rc;
}
-static int ibmvnic_open(struct net_device *netdev)
+static int init_resources(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct device *dev = &adapter->vdev->dev;
- int rc = 0;
- int i;
-
- if (adapter->is_closed) {
- rc = ibmvnic_init(adapter);
- if (rc)
- return rc;
- }
-
- rc = ibmvnic_login(netdev);
- if (rc)
- return rc;
+ struct net_device *netdev = adapter->netdev;
+ int i, rc;
rc = set_real_num_queues(netdev);
if (rc)
@@ -647,7 +637,7 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_sub_crq_irqs(adapter);
if (rc) {
- dev_err(dev, "failed to initialize sub crq irqs\n");
+ netdev_err(netdev, "failed to initialize sub crq irqs\n");
return -1;
}
@@ -659,90 +649,184 @@ static int ibmvnic_open(struct net_device *netdev)
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
if (!adapter->napi)
- goto ibmvnic_open_fail;
+ return -ENOMEM;
+
for (i = 0; i < adapter->req_rx_queues; i++) {
netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
NAPI_POLL_WEIGHT);
- napi_enable(&adapter->napi[i]);
}
send_map_query(adapter);
rc = init_rx_pools(netdev);
if (rc)
- goto ibmvnic_open_fail;
+ return rc;
rc = init_tx_pools(netdev);
- if (rc)
- goto ibmvnic_open_fail;
+ return rc;
+}
+
+static int __ibmvnic_open(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ enum vnic_state prev_state = adapter->state;
+ int i, rc;
+ adapter->state = VNIC_OPENING;
replenish_pools(adapter);
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_enable(&adapter->napi[i]);
+
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
*/
- for (i = 0; i < adapter->req_rx_queues; i++)
- enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ if (prev_state == VNIC_CLOSED)
+ enable_irq(adapter->rx_scrq[i]->irq);
+ else
+ enable_scrq_irq(adapter, adapter->rx_scrq[i]);
+ }
- for (i = 0; i < adapter->req_tx_queues; i++)
- enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ if (prev_state == VNIC_CLOSED)
+ enable_irq(adapter->tx_scrq[i]->irq);
+ else
+ enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ }
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
- if (rc)
- goto ibmvnic_open_fail;
+ if (rc) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_disable(&adapter->napi[i]);
+ release_resources(adapter);
+ return rc;
+ }
netif_tx_start_all_queues(netdev);
- adapter->is_closed = false;
- return 0;
+ if (prev_state == VNIC_CLOSED) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+ }
-ibmvnic_open_fail:
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_disable(&adapter->napi[i]);
- release_resources(adapter);
- return -ENOMEM;
+ adapter->state = VNIC_OPEN;
+ return rc;
}
-static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
+static int ibmvnic_open(struct net_device *netdev)
{
- int i;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++)
- if (adapter->tx_scrq[i])
- disable_irq(adapter->tx_scrq[i]->irq);
+ mutex_lock(&adapter->reset_lock);
+
+ if (adapter->state != VNIC_CLOSED) {
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ mutex_unlock(&adapter->reset_lock);
+ return rc;
+ }
+
+ rc = init_resources(adapter);
+ if (rc) {
+ netdev_err(netdev, "failed to initialize resources\n");
+ release_resources(adapter);
+ mutex_unlock(&adapter->reset_lock);
+ return rc;
+ }
}
- if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++)
- if (adapter->rx_scrq[i])
- disable_irq(adapter->rx_scrq[i]->irq);
+ rc = __ibmvnic_open(netdev);
+ mutex_unlock(&adapter->reset_lock);
+
+ return rc;
+}
+
+static void clean_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_tx_pool *tx_pool;
+ u64 tx_entries;
+ int tx_scrqs;
+ int i, j;
+
+ if (!adapter->tx_pool)
+ return;
+
+ tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_entries = adapter->req_tx_entries_per_subcrq;
+
+ /* Free any remaining skbs in the tx buffer pools */
+ for (i = 0; i < tx_scrqs; i++) {
+ tx_pool = &adapter->tx_pool[i];
+ if (!tx_pool)
+ continue;
+
+ for (j = 0; j < tx_entries; j++) {
+ if (tx_pool->tx_buff[j].skb) {
+ dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
+ tx_pool->tx_buff[j].skb = NULL;
+ }
+ }
}
}
-static int ibmvnic_close(struct net_device *netdev)
+static int __ibmvnic_close(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc = 0;
int i;
- adapter->closing = true;
- disable_sub_crqs(adapter);
+ adapter->state = VNIC_CLOSING;
+ netif_tx_stop_all_queues(netdev);
if (adapter->napi) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
}
- if (!adapter->failover)
- netif_tx_stop_all_queues(netdev);
+ clean_tx_pools(adapter);
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i]->irq)
+ disable_irq(adapter->tx_scrq[i]->irq);
+ }
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ if (rc)
+ return rc;
- release_resources(adapter);
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ int retries = 10;
+
+ while (pending_scrq(adapter, adapter->rx_scrq[i])) {
+ retries--;
+ mdelay(100);
+
+ if (retries == 0)
+ break;
+ }
+
+ if (adapter->rx_scrq[i]->irq)
+ disable_irq(adapter->rx_scrq[i]->irq);
+ }
+ }
+
+ adapter->state = VNIC_CLOSED;
+ return rc;
+}
+
+static int ibmvnic_close(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
+
+ mutex_lock(&adapter->reset_lock);
+ rc = __ibmvnic_close(netdev);
+ mutex_unlock(&adapter->reset_lock);
- adapter->is_closed = true;
- adapter->closing = false;
return rc;
}
@@ -901,13 +985,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int index = 0;
int ret = 0;
- tx_pool = &adapter->tx_pool[queue_num];
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_txsubm_subcrqs));
- if (adapter->migrated) {
+ if (adapter->resetting) {
if (!netif_subqueue_stopped(netdev, skb))
netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
@@ -918,6 +996,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ tx_pool = &adapter->tx_pool[queue_num];
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
+ handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+
index = tx_pool->free_map[tx_pool->consumer_index];
offset = index * adapter->req_mtu;
dst = tx_pool->long_term_buff.buff + offset;
@@ -1099,18 +1183,185 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-static void ibmvnic_tx_timeout(struct net_device *dev)
+/**
+ * do_reset returns zero if we are able to keep processing reset events, or
+ * non-zero if we hit a fatal error and must halt.
+ */
+static int do_reset(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_rwi *rwi, u32 reset_state)
{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int rc;
+ struct net_device *netdev = adapter->netdev;
+ int i, rc;
+
+ netif_carrier_off(netdev);
+ adapter->reset_reason = rwi->reset_reason;
+
+ if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ return 0;
+ }
- /* Adapter timed out, resetting it */
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ return rc;
+
+ /* remove the closed state so when we call open it appears
+ * we are coming from the probed state.
+ */
+ adapter->state = VNIC_PROBED;
+
+ release_resources(adapter);
release_sub_crqs(adapter);
- rc = ibmvnic_reset_crq(adapter);
+ release_crq_queue(adapter);
+
+ rc = ibmvnic_init(adapter);
if (rc)
- dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
- else
- ibmvnic_send_crq_init(adapter);
+ return 0;
+
+ /* If the adapter was in PROBE state prior to the reset, exit here. */
+ if (reset_state == VNIC_PROBED)
+ return 0;
+
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+ adapter->state = VNIC_PROBED;
+ return 0;
+ }
+
+ rtnl_lock();
+ rc = init_resources(adapter);
+ rtnl_unlock();
+ if (rc)
+ return rc;
+
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+
+ rc = __ibmvnic_open(netdev);
+ if (rc) {
+ if (list_empty(&adapter->rwi_list))
+ adapter->state = VNIC_CLOSED;
+ else
+ adapter->state = reset_state;
+
+ return 0;
+ }
+
+ netif_carrier_on(netdev);
+
+ /* kick napi */
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ napi_schedule(&adapter->napi[i]);
+
+ return 0;
+}
+
+static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rwi *rwi;
+
+ mutex_lock(&adapter->rwi_lock);
+
+ if (!list_empty(&adapter->rwi_list)) {
+ rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
+ list);
+ list_del(&rwi->list);
+ } else {
+ rwi = NULL;
+ }
+
+ mutex_unlock(&adapter->rwi_lock);
+ return rwi;
+}
+
+static void free_all_rwi(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rwi *rwi;
+
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
+ kfree(rwi);
+ rwi = get_next_rwi(adapter);
+ }
+}
+
+static void __ibmvnic_reset(struct work_struct *work)
+{
+ struct ibmvnic_rwi *rwi;
+ struct ibmvnic_adapter *adapter;
+ struct net_device *netdev;
+ u32 reset_state;
+ int rc;
+
+ adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ netdev = adapter->netdev;
+
+ mutex_lock(&adapter->reset_lock);
+ adapter->resetting = true;
+ reset_state = adapter->state;
+
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
+ rc = do_reset(adapter, rwi, reset_state);
+ kfree(rwi);
+ if (rc)
+ break;
+
+ rwi = get_next_rwi(adapter);
+ }
+
+ if (rc) {
+ free_all_rwi(adapter);
+ return;
+ }
+
+ adapter->resetting = false;
+ mutex_unlock(&adapter->reset_lock);
+}
+
+static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
+ enum ibmvnic_reset_reason reason)
+{
+ struct ibmvnic_rwi *rwi, *tmp;
+ struct net_device *netdev = adapter->netdev;
+ struct list_head *entry;
+
+ if (adapter->state == VNIC_REMOVING ||
+ adapter->state == VNIC_REMOVED) {
+ netdev_dbg(netdev, "Adapter removing, skipping reset\n");
+ return;
+ }
+
+ mutex_lock(&adapter->rwi_lock);
+
+ list_for_each(entry, &adapter->rwi_list) {
+ tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ if (tmp->reset_reason == reason) {
+ netdev_err(netdev, "Matching reset found, skipping\n");
+ mutex_unlock(&adapter->rwi_lock);
+ return;
+ }
+ }
+
+ rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+ if (!rwi) {
+ mutex_unlock(&adapter->rwi_lock);
+ ibmvnic_close(netdev);
+ return;
+ }
+
+ rwi->reset_reason = reason;
+ list_add_tail(&rwi->list, &adapter->rwi_list);
+ mutex_unlock(&adapter->rwi_lock);
+ schedule_work(&adapter->ibmvnic_reset);
+}
+
+static void ibmvnic_tx_timeout(struct net_device *dev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(dev);
+
+ ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
}
static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
@@ -1153,7 +1404,7 @@ restart_poll:
/* free the entry */
next->rx_comp.first = 0;
remove_buff_from_pool(adapter, rx_buff);
- break;
+ continue;
}
length = be32_to_cpu(next->rx_comp.len);
@@ -1177,6 +1428,7 @@ restart_poll:
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, scrq_num);
if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
@@ -1557,19 +1809,8 @@ restart_loop:
}
if (txbuff->last_frag) {
- if (atomic_sub_return(next->tx_comp.num_comps,
- &scrq->used) <=
- (adapter->req_tx_entries_per_subcrq / 2) &&
- netif_subqueue_stopped(adapter->netdev,
- txbuff->skb)) {
- netif_wake_subqueue(adapter->netdev,
- scrq->pool_index);
- netdev_dbg(adapter->netdev,
- "Started queue %d\n",
- scrq->pool_index);
- }
-
dev_kfree_skb_any(txbuff->skb);
+ txbuff->skb = NULL;
}
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
@@ -1580,6 +1821,15 @@ restart_loop:
}
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
+
+ if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ __netif_subqueue_stopped(adapter->netdev,
+ scrq->pool_index)) {
+ netif_wake_subqueue(adapter->netdev, scrq->pool_index);
+ netdev_info(adapter->netdev, "Started queue %d\n",
+ scrq->pool_index);
+ }
}
enable_scrq_irq(adapter, scrq);
@@ -1853,7 +2103,8 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
+ if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
+ adapter->state == VNIC_CLOSING)
return 1;
else
return 0;
@@ -1991,18 +2242,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
return ibmvnic_send_crq(adapter, &crq);
}
-static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
-{
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
- crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
- netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
-
- return ibmvnic_send_crq(adapter, &crq);
-}
-
static int send_version_xchg(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -2500,6 +2739,9 @@ static void handle_error_indication(union ibmvnic_crq *crq,
if (be32_to_cpu(crq->error_indication.error_id))
request_error_information(adapter, crq);
+
+ if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
}
static void handle_change_mac_rsp(union ibmvnic_crq *crq,
@@ -2888,26 +3130,6 @@ out:
}
}
-static void ibmvnic_xport_event(struct work_struct *work)
-{
- struct ibmvnic_adapter *adapter = container_of(work,
- struct ibmvnic_adapter,
- ibmvnic_xport);
- struct device *dev = &adapter->vdev->dev;
- long rc;
-
- release_sub_crqs(adapter);
- if (adapter->migrated) {
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
- }
-}
-
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -2925,12 +3147,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
- /* Send back a response */
- rc = ibmvnic_send_crq_init_complete(adapter);
- if (!rc)
- schedule_work(&adapter->vnic_crq_init);
- else
- dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -2941,19 +3157,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
return;
case IBMVNIC_CRQ_XPORT_EVENT:
+ netif_carrier_off(netdev);
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
- dev_info(dev, "Re-enabling adapter\n");
- adapter->migrated = true;
- schedule_work(&adapter->ibmvnic_xport);
+ dev_info(dev, "Migrated, re-enabling adapter\n");
+ ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
- netif_carrier_off(netdev);
- adapter->failover = true;
+ ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
} else {
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- schedule_work(&adapter->ibmvnic_xport);
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3234,64 +3449,6 @@ map_failed:
return retrc;
}
-static void handle_crq_init_rsp(struct work_struct *work)
-{
- struct ibmvnic_adapter *adapter = container_of(work,
- struct ibmvnic_adapter,
- vnic_crq_init);
- struct device *dev = &adapter->vdev->dev;
- struct net_device *netdev = adapter->netdev;
- unsigned long timeout = msecs_to_jiffies(30000);
- bool restart = false;
- int rc;
-
- if (adapter->failover) {
- release_sub_crqs(adapter);
- if (netif_running(netdev)) {
- netif_tx_disable(netdev);
- ibmvnic_close(netdev);
- restart = true;
- }
- }
-
- reinit_completion(&adapter->init_done);
- send_version_xchg(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Passive init timeout\n");
- goto task_failed;
- }
-
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
-
- if (adapter->failover) {
- adapter->failover = false;
- if (restart) {
- rc = ibmvnic_open(netdev);
- if (rc)
- goto restart_failed;
- }
- netif_carrier_on(netdev);
- return;
- }
-
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(dev,
- "failed to register netdev rc=%d\n", rc);
- goto register_failed;
- }
- dev_info(dev, "ibmvnic registered\n");
-
- return;
-
-restart_failed:
- dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
-register_failed:
- release_sub_crqs(adapter);
-task_failed:
- dev_err(dev, "Passive initialization was not successful\n");
-}
-
static int ibmvnic_init(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
@@ -3346,10 +3503,10 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -ENOMEM;
adapter = netdev_priv(netdev);
+ adapter->state = VNIC_PROBING;
dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->failover = false;
ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -3358,14 +3515,17 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
- INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
-
spin_lock_init(&adapter->stats_lock);
INIT_LIST_HEAD(&adapter->errors);
spin_lock_init(&adapter->error_list_lock);
+ INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+ INIT_LIST_HEAD(&adapter->rwi_list);
+ mutex_init(&adapter->reset_lock);
+ mutex_init(&adapter->rwi_lock);
+ adapter->resetting = false;
+
rc = ibmvnic_init(adapter);
if (rc) {
free_netdev(netdev);
@@ -3373,7 +3533,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->mtu = adapter->req_mtu - ETH_HLEN;
- adapter->is_closed = false;
rc = register_netdev(netdev);
if (rc) {
@@ -3383,6 +3542,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
dev_info(&dev->dev, "ibmvnic registered\n");
+ adapter->state = VNIC_PROBED;
return 0;
}
@@ -3391,12 +3551,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
struct net_device *netdev = dev_get_drvdata(&dev->dev);
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ adapter->state = VNIC_REMOVING;
unregister_netdev(netdev);
+ mutex_lock(&adapter->reset_lock);
release_resources(adapter);
release_sub_crqs(adapter);
release_crq_queue(adapter);
+ adapter->state = VNIC_REMOVED;
+
+ mutex_unlock(&adapter->reset_lock);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index a69979f6f19d..4702b48cfa44 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -913,6 +913,25 @@ struct ibmvnic_error_buff {
__be32 error_id;
};
+enum vnic_state {VNIC_PROBING = 1,
+ VNIC_PROBED,
+ VNIC_OPENING,
+ VNIC_OPEN,
+ VNIC_CLOSING,
+ VNIC_CLOSED,
+ VNIC_REMOVING,
+ VNIC_REMOVED};
+
+enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
+ VNIC_RESET_MOBILITY,
+ VNIC_RESET_FATAL,
+ VNIC_RESET_TIMEOUT};
+
+struct ibmvnic_rwi {
+ enum ibmvnic_reset_reason reset_reason;
+ struct list_head list;
+};
+
struct ibmvnic_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
@@ -922,7 +941,6 @@ struct ibmvnic_adapter {
dma_addr_t ip_offload_tok;
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
- bool migrated;
u32 msg_enable;
/* Statistics */
@@ -962,7 +980,6 @@ struct ibmvnic_adapter {
u64 promisc;
struct ibmvnic_tx_pool *tx_pool;
- bool closing;
struct completion init_done;
int init_done_rc;
@@ -1007,9 +1024,11 @@ struct ibmvnic_adapter {
__be64 tx_rx_desc_req;
u8 map_id;
- struct work_struct vnic_crq_init;
- struct work_struct ibmvnic_xport;
struct tasklet_struct tasklet;
- bool failover;
- bool is_closed;
+ enum vnic_state state;
+ enum ibmvnic_reset_reason reset_reason;
+ struct mutex reset_lock, rwi_lock;
+ struct list_head rwi_list;
+ struct work_struct ibmvnic_reset;
+ bool resetting;
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 68812d783f33..413025bdcb50 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -127,7 +127,7 @@ out:
* @offset: register offset to be read
* @data: pointer to the read data
*
- * Reads the MDI control regsiter in the PHY at offset and stores the
+ * Reads the MDI control register in the PHY at offset and stores the
* information read to data.
**/
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 22a29df1d29e..d39cba214320 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7332,18 +7332,6 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
-static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
- struct pci_dev *vfdev)
-{
- if (!pci_wait_for_pending_transaction(vfdev))
- e_dev_warn("Issuing VFLR with pending transactions\n");
-
- e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
- pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
-
- msleep(100);
-}
-
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7376,7 +7364,7 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
status_reg & PCI_STATUS_REC_MASTER_ABORT)
- ixgbe_issue_vf_flr(adapter, vfdev);
+ pcie_flr(vfdev);
}
}
@@ -10602,7 +10590,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
* VFLR. Just clean up the AER in that case.
*/
if (vfdev) {
- ixgbe_issue_vf_flr(adapter, vfdev);
+ pcie_flr(vfdev);
/* Free device reference count */
pci_dev_put(vfdev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0e0fa7030565..c1af47e45d3f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1789,9 +1789,17 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
- vhcr->op, slave, vhcr->errno, err);
+ if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
+ if (vhcr->op == MLX4_CMD_ALLOC_RES &&
+ (vhcr->in_modifier & 0xff) == RES_COUNTER &&
+ err == -EDQUOT)
+ mlx4_dbg(dev,
+ "Unable to allocate counter for slave %d (%d)\n",
+ slave, err);
+ else
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ }
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ffbcb27c05e5..ae5fdc2df654 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,6 +1562,11 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = priv->drop_qp.qpn;
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
+ if (qpn < priv->rss_map.base_qpn ||
+ qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
+ en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
+ return -EINVAL;
+ }
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index aa074e57ce06..77abd1813047 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -997,7 +997,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
- en_err(priv,
+ en_dbg(DRV,
+ priv,
" frag:%d - size:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ba89bc43d74..6ffd1849a604 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -70,13 +70,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
- ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
+ ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
if (!ring->tx_info) {
- ring->tx_info = vmalloc(tmp);
- if (!ring->tx_info) {
- err = -ENOMEM;
- goto err_ring;
- }
+ err = -ENOMEM;
+ goto err_ring;
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index db65f72879e9..ce852ca22a96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -115,12 +115,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
- if (!buddy->bits[i]) {
- buddy->bits[i] = vzalloc(s * sizeof(long));
- if (!buddy->bits[i])
- goto err_out_free;
- }
+ buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
+ if (!buddy->bits[i])
+ goto err_out_free;
}
set_bit(0, buddy->bits[buddy->max_order]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4aa29ee93013..07516545474f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -311,7 +311,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
- int err = -EINVAL;
+ int err = -EDQUOT;
int allocated, free, reserved, guaranteed, from_free;
int from_rsvd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index a84b652f9b54..fc52d742b7f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -35,6 +35,6 @@ config MLX5_CORE_EN_DCB
config MLX5_CORE_IPOIB
bool "Mellanox Technologies ConnectX-4 IPoIB offloads support"
depends on MLX5_CORE_EN
- default y
+ default n
---help---
MLX5 IPoIB offloads & acceleration support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
index 3c84e36af018..019c230da498 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <rdma/ib_verbs.h>
#include <linux/mlx5/fs.h>
#include "en.h"
#include "ipoib.h"
@@ -359,10 +360,10 @@ unlock:
return 0;
}
-#ifdef notusedyet
/* IPoIB RDMA netdev callbacks */
static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
- union ib_gid *gid, u16 lid, int set_qkey)
+ union ib_gid *gid, u16 lid, int set_qkey,
+ u32 qkey)
{
struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
struct mlx5_core_dev *mdev = epriv->mdev;
@@ -375,6 +376,12 @@ static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
ipriv->qp.qpn, gid->raw);
+ if (set_qkey) {
+ mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
+ netdev->name, qkey);
+ ipriv->qkey = qkey;
+ }
+
return err;
}
@@ -397,15 +404,15 @@ static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
}
static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
- struct ib_ah *address, u32 dqpn, u32 dqkey)
+ struct ib_ah *address, u32 dqpn)
{
struct mlx5e_priv *epriv = mlx5i_epriv(dev);
struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)];
struct mlx5_ib_ah *mah = to_mah(address);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
- return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey);
+ return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
}
-#endif
static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
@@ -414,22 +421,23 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
}
-static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *))
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
{
const struct mlx5e_profile *profile = &mlx5i_nic_profile;
int nch = profile->max_nch(mdev);
struct net_device *netdev;
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
+ struct rdma_netdev *rn;
int err;
if (mlx5i_check_required_hca_cap(mdev)) {
@@ -464,13 +472,13 @@ static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
mlx5e_attach_netdev(epriv);
netif_carrier_off(netdev);
- /* TODO: set rdma_netdev func pointers
- * rn = &ipriv->rn;
- * rn->hca = ibdev;
- * rn->send = mlx5i_xmit;
- * rn->attach_mcast = mlx5i_attach_mcast;
- * rn->detach_mcast = mlx5i_detach_mcast;
- */
+ /* set rdma_netdev func pointers */
+ rn = &ipriv->rn;
+ rn->hca = ibdev;
+ rn->send = mlx5i_xmit;
+ rn->attach_mcast = mlx5i_attach_mcast;
+ rn->detach_mcast = mlx5i_detach_mcast;
+
return netdev;
err_free_netdev:
@@ -482,7 +490,7 @@ free_mdev_resources:
}
EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
-static void mlx5_rdma_netdev_free(struct net_device *netdev)
+void mlx5_rdma_netdev_free(struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
const struct mlx5e_profile *profile = priv->profile;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
index bae0a5cbc8ad..213191a78464 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.h
@@ -40,7 +40,9 @@
/* ipoib rdma netdev's private data structure */
struct mlx5i_priv {
+ struct rdma_netdev rn; /* keep this first */
struct mlx5_core_qp qp;
+ u32 qkey;
char *mlx5e_priv[0];
};
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 07091dd27e5d..7b0a8db57af9 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -444,7 +444,7 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
(entry * SIZEOF_SONIC_RR) + offset);
}
-static const char *version =
+static const char version[] =
"sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
#endif /* SONIC_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index db20376260f5..82bd6b0935f1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2532,11 +2532,11 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
if (!dp->xdp_prog)
return 0;
if (dp->fl_bufsz > PAGE_SIZE) {
- NL_MOD_TRY_SET_ERR_MSG(extack, "MTU too large w/ XDP enabled");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
if (dp->num_tx_rings > nn->max_tx_rings) {
- NL_MOD_TRY_SET_ERR_MSG(extack, "Insufficient number of TX rings w/ XDP enabled");
+ NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 978d32944c80..aa912f43e15f 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4248,11 +4248,9 @@ static int nv_get_link_ksettings(struct net_device *dev,
/* We do not track link speed / duplex setting if the
* interface is disabled. Force a link check */
if (nv_update_linkspeed(dev)) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
+ netif_carrier_on(dev);
} else {
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
+ netif_carrier_off(dev);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index b3aaa985956e..694845793af2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -1460,6 +1460,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
+ params.num_tids = iids.tids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
params.num_vf_pqs = qm_info->num_vf_pqs;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 5f31140d0b77..463927f17032 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1370,7 +1370,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
NULL) +
qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
NULL);
- norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
@@ -2536,6 +2536,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
}
+ p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
+ link->speed.autoneg;
+
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
link->pause.autoneg = !!(link_temp &
@@ -3586,7 +3589,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce, u16 qid, u16 sb_id)
{
struct ustorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res;
@@ -3607,7 +3610,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
timeset = (u8)(coalesce >> timer_res);
- rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid);
if (rc)
return rc;
@@ -3628,7 +3631,7 @@ out:
}
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce, u16 qid, u16 sb_id)
{
struct xstorm_eth_queue_zone eth_qzone;
u8 timeset, timer_res;
@@ -3649,7 +3652,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
timeset = (u8)(coalesce >> timer_res);
- rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index cefe3ee9064a..12d16c096e36 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -454,7 +454,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
* @return int
*/
int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
+ u16 coalesce, u16 qid, u16 sb_id);
/**
* @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
@@ -471,7 +471,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
* @return int
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
+ u16 coalesce, u16 qid, u16 sb_id);
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 0ed24d6e6c65..40f057edeafc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -3058,7 +3058,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
* the number of VF SBs [especially for first VF on engine, as we can't
- * diffrentiate between empty entries and its entries].
+ * differentiate between empty entries and its entries].
* Since we don't really support more SBs than VFs today, prevent any
* such configuration by sanitizing the number of SBs to equal the
* number of VFs.
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 59992cf20d42..537d1236a4fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -978,7 +978,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (rc)
goto err2;
- /* First Dword used to diffrentiate between various sources */
+ /* First Dword used to differentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
qed_dbg_pf_init(cdev);
@@ -1093,10 +1093,12 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
+ }
+
+ qed_nic_stop(cdev);
- qed_nic_stop(cdev);
+ if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
- }
qed_disable_msix(cdev);
@@ -1372,7 +1374,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
/* TODO - at the moment assume supported and advertised speed equal */
if_link->supported_caps = QED_LM_FIBRE_BIT;
- if (params.speed.autoneg)
+ if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
(params.pause.forced_rx && params.pause.forced_tx))
@@ -1382,6 +1384,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->supported_caps |= QED_LM_Pause_BIT;
if_link->advertised_caps = if_link->supported_caps;
+ if (params.speed.autoneg)
+ if_link->advertised_caps |= QED_LM_Autoneg_BIT;
+ else
+ if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
if (params.speed.advertised_speeds &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
@@ -1521,7 +1527,7 @@ static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
}
static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u8 qid, u16 sb_id)
+ u16 qid, u16 sb_id)
{
struct qed_hwfn *hwfn;
struct qed_ptt *ptt;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 5ae35d6cc7d1..2b09b8545236 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -61,6 +61,7 @@ struct qed_mcp_link_params {
struct qed_mcp_link_capabilities {
u32 speed_capabilities;
+ bool default_speed_autoneg;
};
struct qed_mcp_link_state {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index d5df29f787c5..f5ed54d611ec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -625,7 +625,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
* - If !ARI, VFs would start on next device.
* so offset - (256 - pf_id) would provide the number.
* Utilize the fact that (256 - pf_id) is achieved only by later
- * to diffrentiate between the two.
+ * to differentiate between the two.
*/
if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 4dcfe9614731..172b292241a5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -493,6 +493,11 @@ static int qede_set_link_ksettings(struct net_device *dev,
params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
if (base->autoneg == AUTONEG_ENABLE) {
+ if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
+ DP_INFO(edev, "Auto negotiation is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
params.autoneg = true;
params.forced_speed = 0;
QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
@@ -706,8 +711,7 @@ static int qede_set_coalesce(struct net_device *dev,
{
struct qede_dev *edev = netdev_priv(dev);
int i, rc = 0;
- u16 rxc, txc;
- u8 sb_id;
+ u16 rxc, txc, sb_id;
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
@@ -729,7 +733,7 @@ static int qede_set_coalesce(struct net_device *dev,
for_each_queue(i) {
sb_id = edev->fp_array[i].sb_info->igu_sb_id;
rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
- (u8)i, sb_id);
+ (u16)i, sb_id);
if (rc) {
DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
return rc;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index eb5652073ca8..333876c19d7d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1028,11 +1028,6 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
struct qede_dev *edev = netdev_priv(dev);
- if (IS_VF(edev)) {
- DP_NOTICE(edev, "VFs don't support XDP\n");
- return -EOPNOTSUPP;
- }
-
switch (xdp->command) {
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b9ba23d71c61..38b77bbfe4ee 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -563,6 +563,23 @@ static const struct net_device_ops qede_netdev_ops = {
#endif
};
+static const struct net_device_ops qede_netdev_vf_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+ .ndo_features_check = qede_features_check,
+};
+
/* -------------------------------------------------------------------------
* START OF PROBE / REMOVE
* -------------------------------------------------------------------------
@@ -622,7 +639,10 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->watchdog_timeo = TX_TIMEOUT;
- ndev->netdev_ops = &qede_netdev_ops;
+ if (IS_VF(edev))
+ ndev->netdev_ops = &qede_netdev_vf_ops;
+ else
+ ndev->netdev_ops = &qede_netdev_ops;
qede_set_ethtool_ops(ndev);
@@ -1313,6 +1333,9 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
if (fp->type & QEDE_FASTPATH_RX)
qede_free_mem_rxq(edev, fp->rxq);
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_free_mem_txq(edev, fp->xdp_tx);
+
if (fp->type & QEDE_FASTPATH_TX)
qede_free_mem_txq(edev, fp->txq);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 829be21f97b2..28ea0af89aef 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_mpi_coredump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_reg_dump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index d54490d3f7ad..1e594351a60f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -387,7 +387,7 @@ static void sxgbe_free_rx_buffers(struct net_device *dev,
/**
* init_tx_ring - init the TX descriptor ring
* @dev: net device structure
- * @tx_ring: ring to be intialised
+ * @tx_ring: ring to be initialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
@@ -437,7 +437,7 @@ dmamem_err:
/**
* free_rx_ring - free the RX descriptor ring
* @dev: net device structure
- * @rx_ring: ring to be intialised
+ * @rx_ring: ring to be initialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
@@ -453,7 +453,7 @@ static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
/**
* init_rx_ring - init the RX descriptor ring
* @dev: net device structure
- * @rx_ring: ring to be intialised
+ * @rx_ring: ring to be initialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
@@ -539,7 +539,7 @@ err_free_dma_rx:
/**
* free_tx_ring - free the TX descriptor ring
* @dev: net device structure
- * @tx_ring: ring to be intialised
+ * @tx_ring: ring to be initialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index fa5ca0992be6..ea1bbc355b4d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -25,7 +25,7 @@
* LAN9215, LAN9216, LAN9217, LAN9218
* LAN9210, LAN9211
* LAN9220, LAN9221
- * LAN89218
+ * LAN89218,LAN9250
*
*/
@@ -1450,6 +1450,8 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
unsigned int timeout;
unsigned int temp;
int ret;
+ unsigned int reset_offset = HW_CFG;
+ unsigned int reset_mask = HW_CFG_SRST_;
/*
* Make sure to power-up the PHY chip before doing a reset, otherwise
@@ -1476,15 +1478,23 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
}
}
+ if ((pdata->idrev & 0xFFFF0000) == LAN9250) {
+ /* special reset for LAN9250 */
+ reset_offset = RESET_CTL;
+ reset_mask = RESET_CTL_DIGITAL_RST_;
+ }
+
/* Reset the LAN911x */
- smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
+ smsc911x_reg_write(pdata, reset_offset, reset_mask);
+
+ /* verify reset bit is cleared */
timeout = 10;
do {
udelay(10);
- temp = smsc911x_reg_read(pdata, HW_CFG);
- } while ((--timeout) && (temp & HW_CFG_SRST_));
+ temp = smsc911x_reg_read(pdata, reset_offset);
+ } while ((--timeout) && (temp & reset_mask));
- if (unlikely(temp & HW_CFG_SRST_)) {
+ if (unlikely(temp & reset_mask)) {
SMSC_WARN(pdata, drv, "Failed to complete reset");
return -EIO;
}
@@ -2253,28 +2263,29 @@ static int smsc911x_init(struct net_device *dev)
pdata->idrev = smsc911x_reg_read(pdata, ID_REV);
switch (pdata->idrev & 0xFFFF0000) {
- case 0x01180000:
- case 0x01170000:
- case 0x01160000:
- case 0x01150000:
- case 0x218A0000:
+ case LAN9118:
+ case LAN9117:
+ case LAN9116:
+ case LAN9115:
+ case LAN89218:
/* LAN911[5678] family */
pdata->generation = pdata->idrev & 0x0000FFFF;
break;
- case 0x118A0000:
- case 0x117A0000:
- case 0x116A0000:
- case 0x115A0000:
+ case LAN9218:
+ case LAN9217:
+ case LAN9216:
+ case LAN9215:
/* LAN921[5678] family */
pdata->generation = 3;
break;
- case 0x92100000:
- case 0x92110000:
- case 0x92200000:
- case 0x92210000:
- /* LAN9210/LAN9211/LAN9220/LAN9221 */
+ case LAN9210:
+ case LAN9211:
+ case LAN9220:
+ case LAN9221:
+ case LAN9250:
+ /* LAN9210/LAN9211/LAN9220/LAN9221/LAN9250 */
pdata->generation = 4;
break;
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 54d648920a1b..8d75508acd2b 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -20,6 +20,22 @@
#ifndef __SMSC911X_H__
#define __SMSC911X_H__
+/*Chip ID*/
+#define LAN9115 0x01150000
+#define LAN9116 0x01160000
+#define LAN9117 0x01170000
+#define LAN9118 0x01180000
+#define LAN9215 0x115A0000
+#define LAN9216 0x116A0000
+#define LAN9217 0x117A0000
+#define LAN9218 0x118A0000
+#define LAN9210 0x92100000
+#define LAN9211 0x92110000
+#define LAN9220 0x92200000
+#define LAN9221 0x92210000
+#define LAN9250 0x92500000
+#define LAN89218 0x218A0000
+
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
@@ -303,6 +319,9 @@
#define E2P_DATA_EEPROM_DATA_ 0x000000FF
#define LAN_REGISTER_EXTENT 0x00000100
+#define RESET_CTL 0x1F8
+#define RESET_CTL_DIGITAL_RST_ 0x00000001
+
/*
* MAC Control and Status Register (Indirect Address)
* Offset (through the MAC_CSR CMD and DATA port)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 39be96779145..22f910795be4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -70,11 +70,8 @@ static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
return -ENODEV;
}
-static void stmmac_default_data(struct plat_stmmacenet_data *plat)
+static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->bus_id = 1;
- plat->phy_addr = 0;
- plat->interface = PHY_INTERFACE_MODE_GMII;
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
@@ -82,10 +79,6 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->mdio_bus_data->phy_reset = NULL;
plat->mdio_bus_data->phy_mask = 0;
- plat->dma_cfg->pbl = 32;
- plat->dma_cfg->pblx8 = true;
- /* TODO: AXI */
-
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -107,12 +100,29 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->rx_queues_cfg[0].pkt_route = 0x0;
}
+static void stmmac_default_data(struct plat_stmmacenet_data *plat)
+{
+ /* Set common default data first */
+ common_default_data(plat);
+
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_GMII;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ /* TODO: AXI */
+}
+
static int quark_default_data(struct plat_stmmacenet_data *plat,
struct stmmac_pci_info *info)
{
struct pci_dev *pdev = info->pdev;
int ret;
+ /* Set common default data first */
+ common_default_data(plat);
+
/*
* Refuse to load the driver and register net device if MAC controller
* does not connect to any PHY interface.
@@ -124,27 +134,12 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
plat->phy_addr = ret;
plat->interface = PHY_INTERFACE_MODE_RMII;
- plat->clk_csr = 2;
- plat->has_gmac = 1;
- plat->force_sf_dma_mode = 1;
-
- plat->mdio_bus_data->phy_reset = NULL;
- plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 16;
plat->dma_cfg->pblx8 = true;
plat->dma_cfg->fixed_burst = 1;
/* AXI (TODO) */
- /* Set default value for multicast hash bins */
- plat->multicast_filter_bins = HASH_TABLE_SIZE;
-
- /* Set default value for unicast filter entries */
- plat->unicast_filter_entries = 1;
-
- /* Set the maxmtu to a default of JUMBO_LEN */
- plat->maxmtu = JUMBO_LEN;
-
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index fa674a8bda0c..f4d7aec50479 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -287,6 +287,10 @@ struct cpsw_ss_regs {
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
@@ -1278,11 +1282,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
switch (cpsw->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
break;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3dadee1080b9..d9db8a06afd2 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -23,7 +23,7 @@
*/
#define DRV_VERSION "1.39"
-static const char *version = "tc35815.c:v" DRV_VERSION "\n";
+static const char version[] = "tc35815.c:v" DRV_VERSION "\n";
#define MODNAME "tc35815"
#include <linux/module.h>
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index b0de8ecd7fe8..f4a816cf012a 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -228,7 +228,7 @@
#define DRV_VERSION "v1.11"
#define DRV_RELDATE "2014/07/01"
-static char version[] =
+static const char version[] =
DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
" Lawrence V. Stefani and others\n";
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index b6891ada1d7b..7a7c5224a336 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -976,12 +976,10 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
- if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
- return -ENOBUFS;
- if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
- kfree(ym);
- return -EFAULT;
- }
+ ym = memdup_user(ifr->ifr_data,
+ sizeof(struct yamdrv_ioctl_mcs));
+ if (IS_ERR(ym))
+ return PTR_ERR(ym);
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index dd7fc6659ad4..1ce6239a4849 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -60,7 +60,8 @@ MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
MODULE_LICENSE("GPL");
-static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
+static const char version[] =
+"rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
static const struct net_device_ops rr_netdev_ops = {
@@ -1615,17 +1616,14 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EPERM;
}
- image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
- oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
- if (!image || !oldimage) {
- error = -ENOMEM;
- goto wf_out;
- }
+ image = memdup_user(rq->ifr_data, EEPROM_BYTES);
+ if (IS_ERR(image))
+ return PTR_ERR(image);
- error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
- if (error) {
- error = -EFAULT;
- goto wf_out;
+ oldimage = kmalloc(EEPROM_BYTES, GFP_KERNEL);
+ if (!oldimage) {
+ kfree(image);
+ return -ENOMEM;
}
if (rrpriv->fw_running){
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 15749d359e60..652453d9fb08 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1322,6 +1322,10 @@ int netvsc_device_add(struct hv_device *device,
nvchan->channel = device->channel;
}
+ /* Enable NAPI handler before init callbacks */
+ netif_napi_add(ndev, &net_device->chan_table[0].napi,
+ netvsc_poll, NAPI_POLL_WEIGHT);
+
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
@@ -1329,6 +1333,7 @@ int netvsc_device_add(struct hv_device *device,
net_device->chan_table);
if (ret != 0) {
+ netif_napi_del(&net_device->chan_table[0].napi);
netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup;
}
@@ -1336,9 +1341,6 @@ int netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- /* Enable NAPI handler for init callbacks */
- netif_napi_add(ndev, &net_device->chan_table[0].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index ab92c3c95951..f9d5b0b8209a 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1018,7 +1018,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (ret == 0)
napi_enable(&nvchan->napi);
else
- netdev_err(ndev, "sub channel open failed (%d)\n", ret);
+ netif_napi_del(&nvchan->napi);
if (refcount_dec_and_test(&nvscdev->sc_offered))
complete(&nvscdev->channel_init_wait);
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0a0412524cec..0a5f62e0efcc 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -203,11 +203,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
&md->mux_handle, md, md->mii_bus);
if (rc) {
dev_info(md->dev, "mdiomux initialization failed\n");
- goto out;
+ goto out_register;
}
dev_info(md->dev, "iProc mdiomux registered\n");
return 0;
+
+out_register:
+ mdiobus_unregister(bus);
out:
mdiobus_free(bus);
return rc;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index bb3f71f9fbde..b5cec1824a78 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1088,6 +1088,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
u16 n = 0, index, ndplen;
u8 ready2send = 0;
u32 delayed_ndp_size;
+ size_t padding_count;
/* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
* accordingly. Otherwise, we should check here.
@@ -1244,11 +1245,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* a ZLP after full sized NTBs.
*/
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
- skb_out->len > ctx->min_tx_pkt)
- memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
- ctx->tx_max - skb_out->len);
- else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
+ skb_out->len > ctx->min_tx_pkt) {
+ padding_count = ctx->tx_max - skb_out->len;
+ memset(skb_put(skb_out, padding_count), 0, padding_count);
+ } else if (skb_out->len < ctx->tx_max &&
+ (skb_out->len % dev->maxpacket) == 0) {
*skb_put(skb_out, 1) = 0; /* force short packet */
+ }
/* set final frame length */
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a3ed8115747c..d7165767ca9d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1201,6 +1201,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3d0bc484b3d7..1c6d3923c224 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1891,17 +1891,17 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
- NL_SET_ERR_MSG(extack, "can't set XDP while host is implementing LRO, disable LRO first");
+ NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
return -EOPNOTSUPP;
}
if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
- NL_SET_ERR_MSG(extack, "XDP expects header/data in single page, any_header_sg required");
+ NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
return -EINVAL;
}
if (dev->mtu > max_sz) {
- NL_SET_ERR_MSG(extack, "MTU too large to enable XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
return -EINVAL;
}
@@ -1912,7 +1912,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
- NL_SET_ERR_MSG(extack, "Too few free TX rings available");
+ NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
netdev_warn(dev, "request %i queues but max is %i\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
return -ENOMEM;
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 649ecad6844c..eff4f464a23e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -131,7 +131,7 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
unsigned long now;
now = jiffies;
- if (now - edc->timestart > timeframe) {
+ if (time_after(now, edc->timestart + timeframe)) {
edc->errorcount = 1;
edc->timestart = now;
} else if (++edc->errorcount > max_err) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 68fcbe03bce2..b3f20b3c0210 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -522,7 +522,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
- rxs->enc_flags |= (rxsp->status4 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
+ rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20;
rxs->evm0 = rxsp->status6;
rxs->evm1 = rxsp->status7;
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 6128c2bb23d8..77c94f9e7b61 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -580,8 +580,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
/* directly mapped flags for ieee80211_rx_status */
rs->enc_flags |=
(ads.ds_rxstatus3 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
- rs->enc_flags |=
- (ads.ds_rxstatus3 & AR_2040) ? RX_ENC_FLAG_40MHZ : 0;
+ rs->bw = (ads.ds_rxstatus3 & AR_2040) ? RATE_INFO_BW_40 :
+ RATE_INFO_BW_20;
if (AR_SREV_9280_20_OR_LATER(ah))
rs->enc_flags |=
(ads.ds_rxstatus3 & AR_STBC) ?
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 5d5faa3cad24..49a2ff15ddae 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -734,7 +734,9 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.encoding = RX_ENC_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
+ rx_status.bw = RATE_INFO_BW_40;
+ else
+ rx_status.bw = RATE_INFO_BW_20;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 1ee1ba9931a7..adfd6307edca 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -889,7 +889,9 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.encoding = RX_ENC_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
+ rx_status.bw = RATE_INFO_BW_40;
+ else
+ rx_status.bw = RATE_INFO_BW_20;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_MCS_GF_MSK)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 87444af20fc5..002b25cff5b6 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1201,7 +1201,13 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.encoding = RX_ENC_HT;
}
if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- rx_status.enc_flags |= RX_ENC_FLAG_40MHZ;
+ rx_status.bw = RATE_INFO_BW_40;
+ else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ rx_status.bw = RATE_INFO_BW_80;
+ else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ rx_status.bw = RATE_INFO_BW_160;
+ else
+ rx_status.bw = RATE_INFO_BW_20;
if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
/* TODO: simulate real signal strength (and optional packet loss) */
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 59e750183b7f..5bdd499b5f4f 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -20,6 +20,7 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
+ select DAX
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 97dd2925ed6e..4b76af2b8715 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -314,7 +314,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
if (rc < 0) {
struct nd_btt *nd_btt = to_nd_btt(btt_dev);
- __nd_detach_ndns(btt_dev, &nd_btt->ndns);
+ nd_detach_ndns(btt_dev, &nd_btt->ndns);
put_device(btt_dev);
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 351bac8f6503..e9361bffe5ee 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -27,6 +27,7 @@
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
+#include "pfn.h"
int nvdimm_major;
static int nvdimm_bus_major;
@@ -171,6 +172,57 @@ void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
}
EXPORT_SYMBOL_GPL(nvdimm_region_notify);
+struct clear_badblocks_context {
+ resource_size_t phys, cleared;
+};
+
+static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
+{
+ struct clear_badblocks_context *ctx = data;
+ struct nd_region *nd_region;
+ resource_size_t ndr_end;
+ sector_t sector;
+
+ /* make sure device is a region */
+ if (!is_nd_pmem(dev))
+ return 0;
+
+ nd_region = to_nd_region(dev);
+ ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
+
+ /* make sure we are in the region */
+ if (ctx->phys < nd_region->ndr_start
+ || (ctx->phys + ctx->cleared) > ndr_end)
+ return 0;
+
+ sector = (ctx->phys - nd_region->ndr_start) / 512;
+ badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
+
+ return 0;
+}
+
+static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t phys, u64 cleared)
+{
+ struct clear_badblocks_context ctx = {
+ .phys = phys,
+ .cleared = cleared,
+ };
+
+ device_for_each_child(&nvdimm_bus->dev, &ctx,
+ nvdimm_clear_badblocks_region);
+}
+
+static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t phys, u64 cleared)
+{
+ if (cleared > 0)
+ nvdimm_forget_poison(nvdimm_bus, phys, cleared);
+
+ if (cleared > 0 && cleared / 512)
+ nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
+}
+
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len)
{
@@ -218,7 +270,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
if (cmd_rc < 0)
return cmd_rc;
- nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
+ nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
+
return clear_err.cleared;
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
@@ -286,6 +339,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
init_waitqueue_head(&nvdimm_bus->probe_wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex);
+ spin_lock_init(&nvdimm_bus->poison_lock);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
@@ -354,9 +408,9 @@ static int nd_bus_remove(struct device *dev)
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
free_poison_list(&nvdimm_bus->poison_list);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
@@ -769,16 +823,55 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
} while (true);
}
-static int pmem_active(struct device *dev, void *data)
+static int nd_pmem_forget_poison_check(struct device *dev, void *data)
{
- if (is_nd_pmem(dev) && dev->driver)
+ struct nd_cmd_clear_error *clear_err =
+ (struct nd_cmd_clear_error *)data;
+ struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
+ struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
+ struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
+ struct nd_namespace_common *ndns = NULL;
+ struct nd_namespace_io *nsio;
+ resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
+
+ if (nd_dax || !dev->driver)
+ return 0;
+
+ start = clear_err->address;
+ end = clear_err->address + clear_err->cleared - 1;
+
+ if (nd_btt || nd_pfn || nd_dax) {
+ if (nd_btt)
+ ndns = nd_btt->ndns;
+ else if (nd_pfn)
+ ndns = nd_pfn->ndns;
+ else if (nd_dax)
+ ndns = nd_dax->nd_pfn.ndns;
+
+ if (!ndns)
+ return 0;
+ } else
+ ndns = to_ndns(dev);
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ pstart = nsio->res.start + offset;
+ pend = nsio->res.end - end_trunc;
+
+ if ((pstart >= start) && (pend <= end))
return -EBUSY;
+
return 0;
+
+}
+
+static int nd_ns_forget_poison_check(struct device *dev, void *data)
+{
+ return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
}
/* set_config requires an idle interleave set */
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
- struct nvdimm *nvdimm, unsigned int cmd)
+ struct nvdimm *nvdimm, unsigned int cmd, void *data)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -792,8 +885,8 @@ static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
/* require clear error to go through the pmem driver */
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
- return device_for_each_child(&nvdimm_bus->dev, NULL,
- pmem_active);
+ return device_for_each_child(&nvdimm_bus->dev, data,
+ nd_ns_forget_poison_check);
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
return 0;
@@ -820,7 +913,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
const char *cmd_name, *dimm_name;
unsigned long cmd_mask;
void *buf;
- int rc, i;
+ int rc, i, cmd_rc;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -927,13 +1020,20 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
nvdimm_bus_lock(&nvdimm_bus->dev);
- rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd);
+ rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd, buf);
if (rc)
goto out_unlock;
- rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
+ rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
if (rc < 0)
goto out_unlock;
+
+ if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
+ struct nd_cmd_clear_error *clear_err = buf;
+
+ nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
+ clear_err->cleared);
+ }
nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index ca6d572c48fc..93d128da1c92 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -21,8 +21,13 @@
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
+ struct nvdimm_bus *nvdimm_bus;
- lockdep_assert_held(&ndns->dev.mutex);
+ if (!ndns)
+ return;
+
+ nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
+ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
ndns->claim = NULL;
*_ndns = NULL;
@@ -37,18 +42,20 @@ void nd_detach_ndns(struct device *dev,
if (!ndns)
return;
get_device(&ndns->dev);
- device_lock(&ndns->dev);
+ nvdimm_bus_lock(&ndns->dev);
__nd_detach_ndns(dev, _ndns);
- device_unlock(&ndns->dev);
+ nvdimm_bus_unlock(&ndns->dev);
put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns)
{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
+
if (attach->claim)
return false;
- lockdep_assert_held(&attach->dev.mutex);
+ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
attach->claim = dev;
*_ndns = attach;
@@ -61,9 +68,9 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
{
bool claimed;
- device_lock(&attach->dev);
+ nvdimm_bus_lock(&attach->dev);
claimed = __nd_attach_ndns(dev, attach, _ndns);
- device_unlock(&attach->dev);
+ nvdimm_bus_unlock(&attach->dev);
return claimed;
}
@@ -114,7 +121,7 @@ static void nd_detach_and_reset(struct device *dev,
struct nd_namespace_common **_ndns)
{
/* detach the namespace and destroy / reset the device */
- nd_detach_ndns(dev, _ndns);
+ __nd_detach_ndns(dev, _ndns);
if (is_idle(dev, *_ndns)) {
nd_device_unregister(dev, ND_ASYNC);
} else if (is_nd_btt(dev)) {
@@ -184,7 +191,7 @@ ssize_t nd_namespace_store(struct device *dev,
}
WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
- if (!nd_attach_ndns(dev, ndns, _ndns)) {
+ if (!__nd_attach_ndns(dev, ndns, _ndns)) {
dev_dbg(dev, "%s already claimed\n",
dev_name(&ndns->dev));
len = -EBUSY;
@@ -239,22 +246,24 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
if (rw == READ) {
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
- return memcpy_from_pmem(buf, nsio->addr + offset, size);
+ return memcpy_mcsafe(buf, nsio->addr + offset, size);
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
/*
* FIXME: nsio_rw_bytes() may be called from atomic
- * context in the btt case and nvdimm_clear_poison()
- * takes a sleeping lock. Until the locking can be
- * reworked this capability requires that the namespace
- * is not claimed by btt.
+ * context in the btt case and the ACPI DSM path for
+ * clearing the error takes sleeping locks and allocates
+ * memory. An explicit error clearing path, and support
+ * for tracking badblocks in BTT metadata is needed to
+ * work around this collision.
*/
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
long cleared;
- cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
+ cleared = nvdimm_clear_poison(&ndns->dev,
+ nsio->res.start + offset, size);
if (cleared < size)
rc = -EIO;
if (cleared > 0 && cleared / 512) {
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9303cfeb8bee..2dee908e4bae 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -518,6 +518,15 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
+ struct nd_poison *pl, u64 addr, u64 length)
+{
+ lockdep_assert_held(&nvdimm_bus->poison_lock);
+ pl->start = addr;
+ pl->length = length;
+ list_add_tail(&pl->list, &nvdimm_bus->poison_list);
+}
+
static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
gfp_t flags)
{
@@ -527,19 +536,24 @@ static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
if (!pl)
return -ENOMEM;
- pl->start = addr;
- pl->length = length;
- list_add_tail(&pl->list, &nvdimm_bus->poison_list);
-
+ append_poison_entry(nvdimm_bus, pl, addr, length);
return 0;
}
static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
- struct nd_poison *pl;
+ struct nd_poison *pl, *pl_new;
- if (list_empty(&nvdimm_bus->poison_list))
- return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
+ spin_unlock(&nvdimm_bus->poison_lock);
+ pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
+ spin_lock(&nvdimm_bus->poison_lock);
+
+ if (list_empty(&nvdimm_bus->poison_list)) {
+ if (!pl_new)
+ return -ENOMEM;
+ append_poison_entry(nvdimm_bus, pl_new, addr, length);
+ return 0;
+ }
/*
* There is a chance this is a duplicate, check for those first.
@@ -551,6 +565,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
/* If length has changed, update this list entry */
if (pl->length != length)
pl->length = length;
+ kfree(pl_new);
return 0;
}
@@ -559,29 +574,33 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
- return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
+ if (!pl_new)
+ return -ENOMEM;
+ append_poison_entry(nvdimm_bus, pl_new, addr, length);
+
+ return 0;
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
int rc;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
rc = bus_add_poison(nvdimm_bus, addr, length);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
return rc;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
-void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t start, unsigned int len)
+void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
+ unsigned int len)
{
struct list_head *poison_list = &nvdimm_bus->poison_list;
u64 clr_end = start + len - 1;
struct nd_poison *pl, *next;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
WARN_ON_ONCE(list_empty(poison_list));
/*
@@ -628,15 +647,15 @@ void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
u64 new_len = pl_end - new_start + 1;
/* Add new entry covering the right half */
- add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+ add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
/* Adjust this entry to cover the left half */
pl->length = start - pl->start;
continue;
}
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
}
-EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
+EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 45fa82cae87c..c1b6556aea6e 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -124,7 +124,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
dev_dbg(dev, "%s: dax: %s\n", __func__,
rc == 0 ? dev_name(dax_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(dax_dev, &nd_pfn->ndns);
+ nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev);
} else
__nd_device_register(dax_dev);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index ee0b412827bf..e0f0e3ce1a32 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -49,6 +49,8 @@ static int nvdimm_probe(struct device *dev)
kref_init(&ndd->kref);
rc = nvdimm_init_nsarea(ndd);
+ if (rc == -EACCES)
+ nvdimm_set_locked(dev);
if (rc)
goto err;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 8b721321be5b..9852a3355509 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -34,7 +34,7 @@ int nvdimm_check_config_data(struct device *dev)
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
- if (nvdimm->flags & NDD_ALIASING)
+ if (test_bit(NDD_ALIASING, &nvdimm->flags))
return -ENXIO;
else
return -ENOTTY;
@@ -67,6 +67,7 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc;
int rc = validate_dimm(ndd);
+ int cmd_rc = 0;
if (rc)
return rc;
@@ -76,8 +77,11 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
memset(cmd, 0, sizeof(*cmd));
nd_desc = nvdimm_bus->nd_desc;
- return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
+ rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
@@ -102,10 +106,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
return -ENXIO;
}
- ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
- if (!ndd->data)
- ndd->data = vmalloc(ndd->nsarea.config_size);
-
+ ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
if (!ndd->data)
return -ENOMEM;
@@ -188,7 +189,14 @@ void nvdimm_set_aliasing(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- nvdimm->flags |= NDD_ALIASING;
+ set_bit(NDD_ALIASING, &nvdimm->flags);
+}
+
+void nvdimm_set_locked(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ set_bit(NDD_LOCKED, &nvdimm->flags);
}
static void nvdimm_release(struct device *dev)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1b481a5fb966..2f9dfbd2dbec 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2236,14 +2236,21 @@ static int init_active_labels(struct nd_region *nd_region)
int count, j;
/*
- * If the dimm is disabled then prevent the region from
- * being activated if it aliases DPA.
+ * If the dimm is disabled then we may need to prevent
+ * the region from being activated.
*/
if (!ndd) {
- if ((nvdimm->flags & NDD_ALIASING) == 0)
+ if (test_bit(NDD_LOCKED, &nvdimm->flags))
+ /* fail, label data may be unreadable */;
+ else if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ /* fail, labels needed to disambiguate dpa */;
+ else
return 0;
- dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
- dev_name(&nd_mapping->nvdimm->dev));
+
+ dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
+ dev_name(&nd_mapping->nvdimm->dev),
+ test_bit(NDD_LOCKED, &nvdimm->flags)
+ ? "locked" : "disabled");
return -ENXIO;
}
nd_mapping->ndd = ndd;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 8623e57c2ce3..4c4bd209e725 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -32,6 +32,7 @@ struct nvdimm_bus {
struct list_head poison_list;
struct list_head mapping_list;
struct mutex reconfig_mutex;
+ spinlock_t poison_lock;
};
struct nvdimm {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 2a99c83aa19f..77d032192bf7 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -154,6 +154,7 @@ struct nd_region {
u64 ndr_start;
int id, num_lanes, ro, numa_node;
void *provider_data;
+ struct badblocks bb;
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
struct nd_mapping mapping[0];
@@ -239,6 +240,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
void nvdimm_set_aliasing(struct device *dev);
+void nvdimm_set_locked(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6c033c9a2f06..335c8175410b 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -484,7 +484,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
dev_dbg(dev, "%s: pfn: %s\n", __func__,
rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+ nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev);
} else
__nd_device_register(pfn_dev);
@@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
altmap = NULL;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+ nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
+ - offset) / PAGE_SIZE);
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
@@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start += start_pad;
size = resource_size(&nsio->res);
- npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ / PAGE_SIZE);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* vmemmap_populate_hugepages() allocates the memmap array in
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fbc640bf06b0..c544d466ea51 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -29,6 +29,7 @@
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/pmem.h>
+#include <linux/dax.h>
#include <linux/nd.h>
#include "pmem.h"
#include "pfn.h"
@@ -89,7 +90,7 @@ static int read_pmem(struct page *page, unsigned int off,
int rc;
void *mem = kmap_atomic(page);
- rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+ rc = memcpy_mcsafe(mem + off, pmem_addr, len);
kunmap_atomic(mem);
if (rc)
return -EIO;
@@ -200,13 +201,13 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
-__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct pmem_device *pmem = bdev->bd_queue->queuedata;
- resource_size_t offset = sector * 512 + pmem->data_offset;
+ resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
- if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
+ PFN_PHYS(nr_pages))))
return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
@@ -216,17 +217,28 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
* requested range.
*/
if (unlikely(pmem->bb.count))
- return size;
- return pmem->size - pmem->pfn_pad - offset;
+ return nr_pages;
+ return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.rw_page = pmem_rw_page,
- .direct_access = pmem_direct_access,
.revalidate_disk = nvdimm_revalidate_disk,
};
+static long pmem_dax_direct_access(struct dax_device *dax_dev,
+ pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
+}
+
+static const struct dax_operations pmem_dax_ops = {
+ .direct_access = pmem_dax_direct_access,
+};
+
static void pmem_release_queue(void *q)
{
blk_cleanup_queue(q);
@@ -237,10 +249,14 @@ static void pmem_freeze_queue(void *q)
blk_freeze_queue_start(q);
}
-static void pmem_release_disk(void *disk)
+static void pmem_release_disk(void *__pmem)
{
- del_gendisk(disk);
- put_disk(disk);
+ struct pmem_device *pmem = __pmem;
+
+ kill_dax(pmem->dax_dev);
+ put_dax(pmem->dax_dev);
+ del_gendisk(pmem->disk);
+ put_disk(pmem->disk);
}
static int pmem_attach_disk(struct device *dev,
@@ -251,6 +267,7 @@ static int pmem_attach_disk(struct device *dev,
struct vmem_altmap __altmap, *altmap = NULL;
struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL;
+ struct dax_device *dax_dev;
int nid = dev_to_node(dev);
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
@@ -334,6 +351,7 @@ static int pmem_attach_disk(struct device *dev,
disk = alloc_disk_node(0, nid);
if (!disk)
return -ENOMEM;
+ pmem->disk = disk;
disk->fops = &pmem_fops;
disk->queue = q;
@@ -345,9 +363,16 @@ static int pmem_attach_disk(struct device *dev,
return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
disk->bb = &pmem->bb;
- device_add_disk(dev, disk);
- if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
+ dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
+ if (!dax_dev) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ pmem->dax_dev = dax_dev;
+
+ device_add_disk(dev, disk);
+ if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
revalidate_disk(disk);
@@ -397,12 +422,12 @@ static void nd_pmem_shutdown(struct device *dev)
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
- struct nd_region *nd_region = to_region(pmem);
+ struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
struct resource res;
+ struct badblocks *bb;
if (event != NVDIMM_REVALIDATE_POISON)
return;
@@ -411,20 +436,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct nd_btt *nd_btt = to_nd_btt(dev);
ndns = nd_btt->ndns;
- } else if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ nd_region = to_nd_region(ndns->dev.parent);
+ nsio = to_nd_namespace_io(&ndns->dev);
+ bb = &nsio->bb;
+ } else {
+ struct pmem_device *pmem = dev_get_drvdata(dev);
- ndns = nd_pfn->ndns;
- offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
- end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- } else
- ndns = to_ndns(dev);
+ nd_region = to_region(pmem);
+ bb = &pmem->bb;
+
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ ndns = nd_pfn->ndns;
+ offset = pmem->data_offset +
+ __le32_to_cpu(pfn_sb->start_pad);
+ end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ } else {
+ ndns = to_ndns(dev);
+ }
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ }
- nsio = to_nd_namespace_io(&ndns->dev);
res.start = nsio->res.start + offset;
res.end = nsio->res.end - end_trunc;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
+ nvdimm_badblocks_populate(nd_region, bb, &res);
}
MODULE_ALIAS("pmem");
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index b4ee4f71b4a1..7f4dbd72a90a 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -5,8 +5,6 @@
#include <linux/pfn_t.h>
#include <linux/fs.h>
-long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size);
/* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device {
/* One contiguous memory region per device */
@@ -20,5 +18,10 @@ struct pmem_device {
/* trim size when namespace capacity has been section aligned */
u32 pfn_pad;
struct badblocks bb;
+ struct dax_device *dax_dev;
+ struct gendisk *disk;
};
+
+long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
#endif /* __NVDIMM_PMEM_H__ */
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 8f241772ec0b..869a886c292e 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/nd.h>
+#include "nd-core.h"
#include "nd.h"
static int nd_region_probe(struct device *dev)
@@ -52,6 +53,17 @@ static int nd_region_probe(struct device *dev)
if (rc && err && rc == err)
return -ENODEV;
+ if (is_nd_pmem(&nd_region->dev)) {
+ struct resource ndr_res;
+
+ if (devm_init_badblocks(dev, &nd_region->bb))
+ return -ENODEV;
+ ndr_res.start = nd_region->ndr_start;
+ ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
+ nvdimm_badblocks_populate(nd_region,
+ &nd_region->bb, &ndr_res);
+ }
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
@@ -104,6 +116,18 @@ static int child_notify(struct device *dev, void *data)
static void nd_region_notify(struct device *dev, enum nvdimm_event event)
{
+ if (event == NVDIMM_REVALIDATE_POISON) {
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct resource res;
+
+ if (is_nd_pmem(&nd_region->dev)) {
+ res.start = nd_region->ndr_start;
+ res.end = nd_region->ndr_start +
+ nd_region->ndr_size - 1;
+ nvdimm_badblocks_populate(nd_region,
+ &nd_region->bb, &res);
+ }
+ }
device_for_each_child(dev, &event, child_notify);
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index b7cb5066d961..b550edf2571f 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -222,7 +222,7 @@ int nd_region_to_nstype(struct nd_region *nd_region)
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (nvdimm->flags & NDD_ALIASING)
+ if (test_bit(NDD_ALIASING, &nvdimm->flags))
alias++;
}
if (alias)
@@ -255,6 +255,35 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
+static ssize_t deep_flush_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ /*
+ * NOTE: in the nvdimm_has_flush() error case this attribute is
+ * not visible.
+ */
+ return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
+}
+
+static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool flush;
+ int rc = strtobool(buf, &flush);
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ if (rc)
+ return rc;
+ if (!flush)
+ return -EINVAL;
+ nvdimm_flush(nd_region);
+
+ return len;
+}
+static DEVICE_ATTR_RW(deep_flush);
+
static ssize_t mappings_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -448,6 +477,25 @@ static ssize_t read_only_store(struct device *dev,
}
static DEVICE_ATTR_RW(read_only);
+static ssize_t region_badblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return badblocks_show(&nd_region->bb, buf, 0);
+}
+
+static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
+
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%#llx\n", nd_region->ndr_start);
+}
+static DEVICE_ATTR_RO(resource);
+
static struct attribute *nd_region_attributes[] = {
&dev_attr_size.attr,
&dev_attr_nstype.attr,
@@ -455,11 +503,14 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_btt_seed.attr,
&dev_attr_pfn_seed.attr,
&dev_attr_dax_seed.attr,
+ &dev_attr_deep_flush.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
&dev_attr_namespace_seed.attr,
&dev_attr_init_namespaces.attr,
+ &dev_attr_badblocks.attr,
+ &dev_attr_resource.attr,
NULL,
};
@@ -476,6 +527,23 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
return 0;
+ if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
+ return 0;
+
+ if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
+ return 0;
+
+ if (a == &dev_attr_deep_flush.attr) {
+ int has_flush = nvdimm_has_flush(nd_region);
+
+ if (has_flush == 1)
+ return a->mode;
+ else if (has_flush == 0)
+ return 0444;
+ else
+ return 0;
+ }
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -813,7 +881,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return NULL;
}
- if (nvdimm->flags & NDD_UNARMED)
+ if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
}
@@ -968,17 +1036,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
*/
int nvdimm_has_flush(struct nd_region *nd_region)
{
- struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
int i;
/* no nvdimm == flushing capability unknown */
if (nd_region->ndr_mappings == 0)
return -ENXIO;
- for (i = 0; i < nd_region->ndr_mappings; i++)
- /* flush hints present, flushing required */
- if (ndrd_get_flush_wpq(ndrd, i, 0))
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ /* flush hints present / available */
+ if (nvdimm->num_flush)
return 1;
+ }
/*
* The platform defines dimm devices without hints, assume
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 4976db56e351..70e689bf1cad 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1172,12 +1172,12 @@ __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
}
static void
-nvme_fc_exit_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx)
+nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- return __nvme_fc_exit_request(data, op);
+ return __nvme_fc_exit_request(set->driver_data, op);
}
static int
@@ -1434,11 +1434,10 @@ out_on_error:
}
static int
-nvme_fc_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
@@ -1446,11 +1445,10 @@ nvme_fc_init_request(void *data, struct request *rq,
}
static int
-nvme_fc_init_admin_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_fc_ctrl *ctrl = data;
+ struct nvme_fc_ctrl *ctrl = set->driver_data;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_queue *queue = &ctrl->queues[0];
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index e4e4e60b1224..8c4adac6fafc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -503,6 +503,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
if (!cmd)
return -ENOMEM;
+ nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
+
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
if (IS_ERR(rq)) {
kfree(cmd);
@@ -517,8 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
rq->__data_len = 0;
}
- nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
-
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c8541c3dcd19..fed803232edc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -132,7 +132,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
- char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
struct nvme_command __iomem *sq_cmds_io;
@@ -329,11 +328,6 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
}
-static int nvmeq_irq(struct nvme_queue *nvmeq)
-{
- return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -356,11 +350,11 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
nvmeq->tags = NULL;
}
-static int nvme_admin_init_request(void *data, struct request *req,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_admin_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
- struct nvme_dev *dev = data;
+ struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[0];
@@ -383,11 +377,10 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int nvme_init_request(void *data, struct request *req,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_dev *dev = data;
+ struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
@@ -1079,7 +1072,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
return 1;
}
- vector = nvmeq_irq(nvmeq);
+ vector = nvmeq->cq_vector;
nvmeq->dev->online_queues--;
nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -1087,7 +1080,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
- free_irq(vector, nvmeq);
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
return 0;
}
@@ -1172,8 +1165,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
- snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
- dev->ctrl.instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
@@ -1196,12 +1187,16 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
static int queue_request_irq(struct nvme_queue *nvmeq)
{
- if (use_threaded_interrupts)
- return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check,
- nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq);
- else
- return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED,
- nvmeq->irqname, nvmeq);
+ struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+ int nr = nvmeq->dev->ctrl.instance;
+
+ if (use_threaded_interrupts) {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
+ nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ } else {
+ return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
+ NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
+ }
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1558,7 +1553,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(pci_irq_vector(pdev, 0), adminq);
+ pci_free_irq(pdev, 0, adminq);
/*
* If we enable msix early due to not intx, disable it again before
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 29cf88ac3f61..dd1c6deef82f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -315,16 +315,16 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
DMA_TO_DEVICE);
}
-static void nvme_rdma_exit_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx)
+static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx)
{
- return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
+ return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
}
-static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx)
+static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx)
{
- return __nvme_rdma_exit_request(data, rq, 0);
+ return __nvme_rdma_exit_request(set->driver_data, rq, 0);
}
static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
@@ -358,18 +358,18 @@ out_free_qe:
return -ENOMEM;
}
-static int nvme_rdma_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx,
+ unsigned int numa_node)
{
- return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
+ return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
}
-static int nvme_rdma_init_admin_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx,
+ unsigned int numa_node)
{
- return __nvme_rdma_init_request(data, rq, 0);
+ return __nvme_rdma_init_request(set->driver_data, rq, 0);
}
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 304f1c87c160..feb497134aee 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -230,18 +230,19 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
return 0;
}
-static int nvme_loop_init_request(void *data, struct request *req,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_loop_init_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
- return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
+ return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
+ hctx_idx + 1);
}
-static int nvme_loop_init_admin_request(void *data, struct request *req,
- unsigned int hctx_idx, unsigned int rq_idx,
- unsigned int numa_node)
+static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
+ struct request *req, unsigned int hctx_idx,
+ unsigned int numa_node)
{
- return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
+ return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
}
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 650f1b1797ad..101ced4c84be 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -13,6 +13,17 @@ menuconfig NVMEM
if NVMEM
+config NVMEM_IMX_IIM
+ tristate "i.MX IC Identification Module support"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This is a driver for the IC Identification Module (IIM) available on
+ i.MX SoCs, providing access to 4 Kbits of programmable
+ eFuses.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-imx-iim.
+
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
depends on SOC_IMX6 || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 86e45995fdad..173140658693 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -8,6 +8,8 @@ nvmem_core-y := core.o
# Devices
obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
nvmem-bcm-ocotp-y := bcm-ocotp.o
+obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
+nvmem-imx-iim-y := imx-iim.o
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 408b521ee520..8c830a80a648 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -468,7 +468,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
np = config->dev->of_node;
nvmem->dev.of_node = np;
dev_set_name(&nvmem->dev, "%s%d",
- config->name ? : "nvmem", config->id);
+ config->name ? : "nvmem",
+ config->name ? config->id : nvmem->id);
nvmem->read_only = of_property_read_bool(np, "read-only") |
config->read_only;
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
new file mode 100644
index 000000000000..52ff65e0673f
--- /dev/null
+++ b/drivers/nvmem/imx-iim.c
@@ -0,0 +1,173 @@
+/*
+ * i.MX IIM driver
+ *
+ * Copyright (c) 2017 Pengutronix, Michael Grzeschik <m.grzeschik@pengutronix.de>
+ *
+ * Based on the barebox iim driver,
+ * Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
+ * Orex Computed Radiography
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#define IIM_BANK_BASE(n) (0x800 + 0x400 * (n))
+
+struct imx_iim_drvdata {
+ unsigned int nregs;
+};
+
+struct iim_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct nvmem_config nvmem;
+};
+
+static int imx_iim_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct iim_priv *iim = context;
+ int i, ret;
+ u8 *buf8 = buf;
+
+ ret = clk_prepare_enable(iim->clk);
+ if (ret)
+ return ret;
+
+ for (i = offset; i < offset + bytes; i++) {
+ int bank = i >> 5;
+ int reg = i & 0x1f;
+
+ *buf8++ = readl(iim->base + IIM_BANK_BASE(bank) + reg * 4);
+ }
+
+ clk_disable_unprepare(iim->clk);
+
+ return 0;
+}
+
+static struct imx_iim_drvdata imx27_drvdata = {
+ .nregs = 2 * 32,
+};
+
+static struct imx_iim_drvdata imx25_imx31_imx35_drvdata = {
+ .nregs = 3 * 32,
+};
+
+static struct imx_iim_drvdata imx51_drvdata = {
+ .nregs = 4 * 32,
+};
+
+static struct imx_iim_drvdata imx53_drvdata = {
+ .nregs = 4 * 32 + 16,
+};
+
+static const struct of_device_id imx_iim_dt_ids[] = {
+ {
+ .compatible = "fsl,imx25-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx27-iim",
+ .data = &imx27_drvdata,
+ }, {
+ .compatible = "fsl,imx31-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx35-iim",
+ .data = &imx25_imx31_imx35_drvdata,
+ }, {
+ .compatible = "fsl,imx51-iim",
+ .data = &imx51_drvdata,
+ }, {
+ .compatible = "fsl,imx53-iim",
+ .data = &imx53_drvdata,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, imx_iim_dt_ids);
+
+static int imx_iim_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct iim_priv *iim;
+ struct nvmem_device *nvmem;
+ struct nvmem_config *cfg;
+ const struct imx_iim_drvdata *drvdata = NULL;
+
+ iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
+ if (!iim)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iim->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iim->base))
+ return PTR_ERR(iim->base);
+
+ of_id = of_match_device(imx_iim_dt_ids, dev);
+ if (!of_id)
+ return -ENODEV;
+
+ drvdata = of_id->data;
+
+ iim->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(iim->clk))
+ return PTR_ERR(iim->clk);
+
+ cfg = &iim->nvmem;
+
+ cfg->name = "imx-iim",
+ cfg->read_only = true,
+ cfg->word_size = 1,
+ cfg->stride = 1,
+ cfg->owner = THIS_MODULE,
+ cfg->reg_read = imx_iim_read,
+ cfg->dev = dev;
+ cfg->size = drvdata->nregs;
+ cfg->priv = iim;
+
+ nvmem = nvmem_register(cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int imx_iim_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver imx_iim_driver = {
+ .probe = imx_iim_probe,
+ .remove = imx_iim_remove,
+ .driver = {
+ .name = "imx-iim",
+ .of_match_table = imx_iim_dt_ids,
+ },
+};
+module_platform_driver(imx_iim_driver);
+
+MODULE_AUTHOR("Michael Grzeschik <m.grzeschik@pengutronix.de>");
+MODULE_DESCRIPTION("i.MX IIM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index b8ca1e677b01..193ca8fd350a 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -7,6 +7,9 @@
* Copyright (c) 2010 Baruch Siach <baruch@tkos.co.il>,
* Orex Computed Radiography
*
+ * Write support based on the fsl_otp driver,
+ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
@@ -24,14 +27,88 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+
+#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
+ * OTP Bank0 Word0
+ */
+#define IMX_OCOTP_OFFSET_PER_WORD 0x10 /* Offset between the start addr
+ * of two consecutive OTP words.
+ */
+
+#define IMX_OCOTP_ADDR_CTRL 0x0000
+#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
+#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
+#define IMX_OCOTP_ADDR_TIMING 0x0010
+#define IMX_OCOTP_ADDR_DATA 0x0020
+
+#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
+#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
+#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
+#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+
+#define DEF_RELAX 20 /* > 16.5ns */
+#define IMX_OCOTP_WR_UNLOCK 0x3E770000
+#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
+
+static DEFINE_MUTEX(ocotp_mutex);
struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
unsigned int nregs;
+ struct nvmem_config *config;
};
+static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
+{
+ int count;
+ u32 c, mask;
+
+ mask = IMX_OCOTP_BM_CTRL_BUSY | IMX_OCOTP_BM_CTRL_ERROR | flags;
+
+ for (count = 10000; count >= 0; count--) {
+ c = readl(base + IMX_OCOTP_ADDR_CTRL);
+ if (!(c & mask))
+ break;
+ cpu_relax();
+ }
+
+ if (count < 0) {
+ /* HW_OCOTP_CTRL[ERROR] will be set under the following
+ * conditions:
+ * - A write is performed to a shadow register during a shadow
+ * reload (essentially, while HW_OCOTP_CTRL[RELOAD_SHADOWS] is
+ * set. In addition, the contents of the shadow register shall
+ * not be updated.
+ * - A write is performed to a shadow register which has been
+ * locked.
+ * - A read is performed to from a shadow register which has
+ * been read locked.
+ * - A program is performed to a fuse word which has been locked
+ * - A read is performed to from a fuse word which has been read
+ * locked.
+ */
+ if (c & IMX_OCOTP_BM_CTRL_ERROR)
+ return -EPERM;
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void imx_ocotp_clr_err_if_set(void __iomem *base)
+{
+ u32 c;
+
+ c = readl(base + IMX_OCOTP_ADDR_CTRL);
+ if (!(c & IMX_OCOTP_BM_CTRL_ERROR))
+ return;
+
+ writel(IMX_OCOTP_BM_CTRL_ERROR, base + IMX_OCOTP_ADDR_CTRL_CLR);
+}
+
static int imx_ocotp_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -47,26 +124,188 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (count > (priv->nregs - index))
count = priv->nregs - index;
+ mutex_lock(&ocotp_mutex);
+
ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
+ mutex_unlock(&ocotp_mutex);
dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
return ret;
}
- for (i = index; i < (index + count); i++)
- *buf++ = readl(priv->base + 0x400 + i * 0x10);
+ ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+
+ for (i = index; i < (index + count); i++) {
+ *buf++ = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 +
+ i * IMX_OCOTP_OFFSET_PER_WORD);
+
+ /* 47.3.1.2
+ * For "read locked" registers 0xBADABADA will be returned and
+ * HW_OCOTP_CTRL[ERROR] will be set. It must be cleared by
+ * software before any new write, read or reload access can be
+ * issued
+ */
+ if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
+ imx_ocotp_clr_err_if_set(priv->base);
+ }
+ ret = 0;
+
+read_end:
clk_disable_unprepare(priv->clk);
+ mutex_unlock(&ocotp_mutex);
+ return ret;
+}
- return 0;
+static int imx_ocotp_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ u32 *buf = val;
+ int ret;
+
+ unsigned long clk_rate = 0;
+ unsigned long strobe_read, relax, strobe_prog;
+ u32 timing = 0;
+ u32 ctrl;
+ u8 waddr;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != priv->config->word_size) ||
+ (offset % priv->config->word_size))
+ return -EINVAL;
+
+ mutex_lock(&ocotp_mutex);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ mutex_unlock(&ocotp_mutex);
+ dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+ return ret;
+ }
+
+ /* 47.3.1.3.1
+ * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+ relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+
+ /* 47.3.1.3.2
+ * Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
+ * Overlapped accesses are not supported by the controller. Any pending
+ * write or reload must be completed before a write access can be
+ * requested.
+ */
+ ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout during timing setup\n");
+ goto write_end;
+ }
+
+ /* 47.3.1.3.3
+ * Write the requested address to HW_OCOTP_CTRL[ADDR] and program the
+ * unlock code into HW_OCOTP_CTRL[WR_UNLOCK]. This must be programmed
+ * for each write access. The lock code is documented in the register
+ * description. Both the unlock code and address can be written in the
+ * same operation.
+ */
+ /* OTP write/read address specifies one of 128 word address locations */
+ waddr = offset / 4;
+
+ ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
+ ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
+ ctrl |= waddr & IMX_OCOTP_BM_CTRL_ADDR;
+ ctrl |= IMX_OCOTP_WR_UNLOCK;
+
+ writel(ctrl, priv->base + IMX_OCOTP_ADDR_CTRL);
+
+ /* 47.3.1.3.4
+ * Write the data to the HW_OCOTP_DATA register. This will automatically
+ * set HW_OCOTP_CTRL[BUSY] and clear HW_OCOTP_CTRL[WR_UNLOCK]. To
+ * protect programming same OTP bit twice, before program OCOTP will
+ * automatically read fuse value in OTP and use read value to mask
+ * program data. The controller will use masked program data to program
+ * a 32-bit word in the OTP per the address in HW_OCOTP_CTRL[ADDR]. Bit
+ * fields with 1's will result in that OTP bit being programmed. Bit
+ * fields with 0's will be ignored. At the same time that the write is
+ * accepted, the controller makes an internal copy of
+ * HW_OCOTP_CTRL[ADDR] which cannot be updated until the next write
+ * sequence is initiated. This copy guarantees that erroneous writes to
+ * HW_OCOTP_CTRL[ADDR] will not affect an active write operation. It
+ * should also be noted that during the programming HW_OCOTP_DATA will
+ * shift right (with zero fill). This shifting is required to program
+ * the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
+ * modified.
+ */
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA);
+
+ /* 47.4.1.4.5
+ * Once complete, the controller will clear BUSY. A write request to a
+ * protected or locked region will result in no OTP access and no
+ * setting of HW_OCOTP_CTRL[BUSY]. In addition HW_OCOTP_CTRL[ERROR] will
+ * be set. It must be cleared by software before any new write access
+ * can be issued.
+ */
+ ret = imx_ocotp_wait_for_busy(priv->base, 0);
+ if (ret < 0) {
+ if (ret == -EPERM) {
+ dev_err(priv->dev, "failed write to locked region");
+ imx_ocotp_clr_err_if_set(priv->base);
+ } else {
+ dev_err(priv->dev, "timeout during data write\n");
+ }
+ goto write_end;
+ }
+
+ /* 47.3.1.4
+ * Write Postamble: Due to internal electrical characteristics of the
+ * OTP during writes, all OTP operations following a write must be
+ * separated by 2 us after the clearing of HW_OCOTP_CTRL_BUSY following
+ * the write.
+ */
+ udelay(2);
+
+ /* reload all shadow registers */
+ writel(IMX_OCOTP_BM_CTRL_REL_SHADOWS,
+ priv->base + IMX_OCOTP_ADDR_CTRL_SET);
+ ret = imx_ocotp_wait_for_busy(priv->base,
+ IMX_OCOTP_BM_CTRL_REL_SHADOWS);
+ if (ret < 0) {
+ dev_err(priv->dev, "timeout during shadow register reload\n");
+ goto write_end;
+ }
+
+write_end:
+ clk_disable_unprepare(priv->clk);
+ mutex_unlock(&ocotp_mutex);
+ if (ret < 0)
+ return ret;
+ return bytes;
}
static struct nvmem_config imx_ocotp_nvmem_config = {
.name = "imx-ocotp",
- .read_only = true,
+ .read_only = false,
.word_size = 4,
.stride = 4,
.owner = THIS_MODULE,
.reg_read = imx_ocotp_read,
+ .reg_write = imx_ocotp_write,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -74,6 +313,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ .compatible = "fsl,imx6ul-ocotp", (void *)128 },
+ { .compatible = "fsl,imx7d-ocotp", (void *)64 },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -90,12 +330,14 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = dev;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
@@ -104,7 +346,9 @@ static int imx_ocotp_probe(struct platform_device *pdev)
imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
+ priv->config = &imx_ocotp_nvmem_config;
nvmem = nvmem_register(&imx_ocotp_nvmem_config);
+
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index f207c3b10482..70bfc9839bb2 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -27,7 +27,7 @@ static int meson_efuse_read(void *context, unsigned int offset,
u8 *buf = val;
int ret;
- ret = meson_sm_call_read(buf, SM_EFUSE_READ, offset,
+ ret = meson_sm_call_read(buf, bytes, SM_EFUSE_READ, offset,
bytes, 0, 0, 0);
if (ret < 0)
return ret;
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 1567ccca8de3..0d6648be93b8 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -17,13 +17,24 @@
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/random.h>
+/* Registers and special values for doing register-based SID readout on H3 */
+#define SUN8I_SID_PRCTL 0x40
+#define SUN8I_SID_RDKEY 0x60
+
+#define SUN8I_SID_OFFSET_MASK 0x1FF
+#define SUN8I_SID_OFFSET_SHIFT 16
+#define SUN8I_SID_OP_LOCK (0xAC << 8)
+#define SUN8I_SID_READ BIT(1)
+
static struct nvmem_config econfig = {
.name = "sunxi-sid",
.read_only = true,
@@ -32,8 +43,15 @@ static struct nvmem_config econfig = {
.owner = THIS_MODULE,
};
+struct sunxi_sid_cfg {
+ u32 value_offset;
+ u32 size;
+ bool need_register_readout;
+};
+
struct sunxi_sid {
void __iomem *base;
+ u32 value_offset;
};
/* We read the entire key, due to a 32 bit read alignment requirement. Since we
@@ -58,12 +76,36 @@ static int sunxi_sid_read(void *context, unsigned int offset,
struct sunxi_sid *sid = context;
u8 *buf = val;
+ /* Offset the read operation to the real position of SID */
+ offset += sid->value_offset;
+
while (bytes--)
*buf++ = sunxi_sid_read_byte(sid, offset++);
return 0;
}
+static int sun8i_sid_register_readout(const struct sunxi_sid *sid,
+ const unsigned int word)
+{
+ u32 reg_val;
+ int ret;
+
+ /* Set word, lock access, and set read command */
+ reg_val = (word & SUN8I_SID_OFFSET_MASK)
+ << SUN8I_SID_OFFSET_SHIFT;
+ reg_val |= SUN8I_SID_OP_LOCK | SUN8I_SID_READ;
+ writel(reg_val, sid->base + SUN8I_SID_PRCTL);
+
+ ret = readl_poll_timeout(sid->base + SUN8I_SID_PRCTL, reg_val,
+ !(reg_val & SUN8I_SID_READ), 100, 250000);
+ if (ret)
+ return ret;
+
+ writel(0, sid->base + SUN8I_SID_PRCTL);
+ return 0;
+}
+
static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -72,18 +114,42 @@ static int sunxi_sid_probe(struct platform_device *pdev)
struct sunxi_sid *sid;
int ret, i, size;
char *randomness;
+ const struct sunxi_sid_cfg *cfg;
sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
if (!sid)
return -ENOMEM;
+ cfg = of_device_get_match_data(dev);
+ if (!cfg)
+ return -EINVAL;
+ sid->value_offset = cfg->value_offset;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sid->base = devm_ioremap_resource(dev, res);
if (IS_ERR(sid->base))
return PTR_ERR(sid->base);
- size = resource_size(res) - 1;
- econfig.size = resource_size(res);
+ size = cfg->size;
+
+ if (cfg->need_register_readout) {
+ /*
+ * H3's SID controller have a bug that the value at 0x200
+ * offset is not the correct value when the hardware is reseted.
+ * However, after doing a register-based read operation, the
+ * value become right.
+ * Do a full read operation here, but ignore its value
+ * (as it's more fast to read by direct MMIO value than
+ * with registers)
+ */
+ for (i = 0; i < (size >> 2); i++) {
+ ret = sun8i_sid_register_readout(sid, i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ econfig.size = size;
econfig.dev = dev;
econfig.reg_read = sunxi_sid_read;
econfig.priv = sid;
@@ -119,9 +185,24 @@ static int sunxi_sid_remove(struct platform_device *pdev)
return nvmem_unregister(nvmem);
}
+static const struct sunxi_sid_cfg sun4i_a10_cfg = {
+ .size = 0x10,
+};
+
+static const struct sunxi_sid_cfg sun7i_a20_cfg = {
+ .size = 0x200,
+};
+
+static const struct sunxi_sid_cfg sun8i_h3_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+ .need_register_readout = true,
+};
+
static const struct of_device_id sunxi_sid_of_match[] = {
- { .compatible = "allwinner,sun4i-a10-sid" },
- { .compatible = "allwinner,sun7i-a20-sid" },
+ { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
+ { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
+ { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 02b2903fe9d2..72914cdfce2a 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -263,7 +263,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
if (!parser->range || parser->range + parser->np > parser->end)
return NULL;
- range->pci_space = parser->range[0];
+ range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
range->cpu_addr = of_translate_address(parser->node,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d7c4629a3a2d..28d5f53bc631 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1213,6 +1213,37 @@ int of_property_read_u32_index(const struct device_node *np,
EXPORT_SYMBOL_GPL(of_property_read_u32_index);
/**
+ * of_property_read_u64_index - Find and read a u64 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u64 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 64-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value)
+{
+ const u64 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0, NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be64_to_cpup(((__be64 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64_index);
+
+/**
* of_property_read_variable_u8_array - Find and read an array of u8 from a
* property, with bounds on the minimum and maximum array size.
*
@@ -2250,15 +2281,14 @@ EXPORT_SYMBOL_GPL(of_console_check);
*/
struct device_node *of_find_next_cache_node(const struct device_node *np)
{
- struct device_node *child;
- const phandle *handle;
+ struct device_node *child, *cache_node;
- handle = of_get_property(np, "l2-cache", NULL);
- if (!handle)
- handle = of_get_property(np, "next-level-cache", NULL);
+ cache_node = of_parse_phandle(np, "l2-cache", 0);
+ if (!cache_node)
+ cache_node = of_parse_phandle(np, "next-level-cache", 0);
- if (handle)
- return of_find_node_by_phandle(be32_to_cpup(handle));
+ if (cache_node)
+ return cache_node;
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
diff --git a/drivers/of/device.c b/drivers/of/device.c
index b1e6bebda3f3..9416d052cb89 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -82,7 +82,7 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+int of_dma_configure(struct device *dev, struct device_node *np)
{
u64 dma_addr, paddr, size;
int ret;
@@ -107,7 +107,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
} else {
offset = PFN_DOWN(paddr - dma_addr);
@@ -123,7 +123,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
if (!size) {
dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
+ return -EINVAL;
}
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -144,13 +144,30 @@ void of_dma_configure(struct device *dev, struct device_node *np)
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
+
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(of_dma_configure);
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
+}
+
int of_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
@@ -176,7 +193,7 @@ const void *of_device_get_match_data(const struct device *dev)
}
EXPORT_SYMBOL(of_device_get_match_data);
-ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
+static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
{
const char *compat;
int cplen, i;
@@ -223,9 +240,8 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
str[i] = '_';
}
- return tsize;
+ return repend;
}
-EXPORT_SYMBOL_GPL(of_device_get_modalias);
int of_device_request_module(struct device *dev)
{
@@ -251,6 +267,21 @@ int of_device_request_module(struct device *dev)
EXPORT_SYMBOL_GPL(of_device_request_module);
/**
+ * of_device_modalias - Fill buffer with newline terminated modalias string
+ */
+ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
+{
+ ssize_t sl = of_device_get_modalias(dev, str, len - 2);
+ if (sl < 0)
+ return sl;
+
+ str[sl++] = '\n';
+ str[sl] = 0;
+ return sl;
+}
+EXPORT_SYMBOL_GPL(of_device_modalias);
+
+/**
* of_device_uevent - Display OF related uevent information
*/
void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index e5ce4b59e162..a0972219ccfc 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -31,6 +31,8 @@
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
+#include "of_private.h"
+
/*
* of_fdt_limit_memory - limit the number of regions in the /memory node
* @limit: maximum entries
@@ -46,8 +48,8 @@ void of_fdt_limit_memory(int limit)
const void *val;
int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
- const uint32_t *addr_prop;
- const uint32_t *size_prop;
+ const __be32 *addr_prop;
+ const __be32 *size_prop;
int root_offset;
int cell_size;
@@ -469,11 +471,11 @@ static int unflatten_dt_nodes(const void *blob,
* Returns NULL on failure or the memory chunk containing the unflattened
* device tree on success.
*/
-static void *__unflatten_device_tree(const void *blob,
- struct device_node *dad,
- struct device_node **mynodes,
- void *(*dt_alloc)(u64 size, u64 align),
- bool detached)
+void *__unflatten_device_tree(const void *blob,
+ struct device_node *dad,
+ struct device_node **mynodes,
+ void *(*dt_alloc)(u64 size, u64 align),
+ bool detached)
{
int size;
void *mem;
@@ -1261,6 +1263,8 @@ void __init unflatten_device_tree(void)
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(early_init_dt_alloc_memory_arch);
+
+ unittest_unflatten_overlay_base();
}
/**
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7c56b72d1dc6..d11437cb1187 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -102,7 +102,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
__be32 initial_match_array[MAX_PHANDLE_ARGS];
const __be32 *match_array = initial_match_array;
- const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
+ const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i, rc = -EINVAL;
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index a53982a330ea..2db1f7a04baf 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -57,6 +57,8 @@ static void __init of_numa_parse_cpu_nodes(void)
else
node_set(nid, numa_nodes_parsed);
}
+
+ of_node_put(cpus);
}
static int __init of_numa_parse_memory_nodes(void)
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 0ee42c3e66a1..c9d4d3a7b0fe 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -285,51 +285,6 @@ parse_failed:
EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
-#ifdef CONFIG_PCI_MSI
-
-static LIST_HEAD(of_pci_msi_chip_list);
-static DEFINE_MUTEX(of_pci_msi_chip_mutex);
-
-int of_pci_msi_chip_add(struct msi_controller *chip)
-{
- if (!of_property_read_bool(chip->of_node, "msi-controller"))
- return -EINVAL;
-
- mutex_lock(&of_pci_msi_chip_mutex);
- list_add(&chip->list, &of_pci_msi_chip_list);
- mutex_unlock(&of_pci_msi_chip_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
-
-void of_pci_msi_chip_remove(struct msi_controller *chip)
-{
- mutex_lock(&of_pci_msi_chip_mutex);
- list_del(&chip->list);
- mutex_unlock(&of_pci_msi_chip_mutex);
-}
-EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
-
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
-{
- struct msi_controller *c;
-
- mutex_lock(&of_pci_msi_chip_mutex);
- list_for_each_entry(c, &of_pci_msi_chip_list, list) {
- if (c->of_node == of_node) {
- mutex_unlock(&of_pci_msi_chip_mutex);
- return c;
- }
- }
- mutex_unlock(&of_pci_msi_chip_mutex);
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
-
-#endif /* CONFIG_PCI_MSI */
-
/**
* of_pci_map_rid - Translate a requester ID through a downstream mapping.
* @np: root complex device node.
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 18bbb4517e25..4ebb0149d118 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -55,6 +55,18 @@ static inline int of_property_notify(int action, struct device_node *np,
}
#endif /* CONFIG_OF_DYNAMIC */
+#if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY)
+extern void __init unittest_unflatten_overlay_base(void);
+#else
+static inline void unittest_unflatten_overlay_base(void) {};
+#endif
+
+extern void *__unflatten_device_tree(const void *blob,
+ struct device_node *dad,
+ struct device_node **mynodes,
+ void *(*dt_alloc)(u64 size, u64 align),
+ bool detached);
+
/**
* General utilities for working with live trees.
*
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c3569a88..4dec07ea510f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -197,7 +197,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
const struct of_device_id *i;
for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
- reservedmem_of_init_fn initfn = i->data;
+ int const (*initfn)(struct reserved_mem *rmem) = i->data;
const char *compat = i->compatible;
if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 45b413e5a444..71fecc2debfc 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -158,11 +159,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -191,11 +187,9 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -253,7 +247,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -547,7 +540,6 @@ static int of_platform_device_destroy(struct device *dev, void *data)
amba_device_unregister(to_amba_device(dev));
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 7ae9863cb0a4..771f4844c781 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -92,7 +92,7 @@ static void adjust_overlay_phandles(struct device_node *overlay,
if (phandle == OF_PHANDLE_ILLEGAL)
continue;
- *(uint32_t *)prop->value = cpu_to_be32(overlay->phandle);
+ *(__be32 *)prop->value = cpu_to_be32(overlay->phandle);
}
for_each_child_of_node(overlay, child)
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 1ac5cc01d627..6e00a9c69e58 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -1,7 +1,18 @@
obj-y += testcases.dtb.o
+obj-y += overlay.dtb.o
+obj-y += overlay_bad_phandle.dtb.o
+obj-y += overlay_base.dtb.o
targets += testcases.dtb testcases.dtb.S
+targets += overlay.dtb overlay.dtb.S
+targets += overlay_bad_phandle.dtb overlay_bad_phandle.dtb.S
+targets += overlay_base.dtb overlay_base.dtb.S
-.SECONDARY: \
- $(obj)/testcases.dtb.S \
- $(obj)/testcases.dtb
+.PRECIOUS: \
+ $(obj)/%.dtb.S \
+ $(obj)/%.dtb
+
+# enable creation of __symbols__ node
+DTC_FLAGS_overlay := -@
+DTC_FLAGS_overlay_bad_phandle := -@
+DTC_FLAGS_overlay_base := -@
diff --git a/drivers/of/unittest-data/overlay.dts b/drivers/of/unittest-data/overlay.dts
new file mode 100644
index 000000000000..6cd7e6a0c13e
--- /dev/null
+++ b/drivers/of/unittest-data/overlay.dts
@@ -0,0 +1,53 @@
+/dts-v1/;
+/plugin/;
+
+/ {
+
+ fragment@0 {
+ target = <&electric_1>;
+
+ __overlay__ {
+ status = "ok";
+
+ hvac_2: hvac-large-1 {
+ compatible = "ot,hvac-large";
+ heat-range = < 40 75 >;
+ cool-range = < 65 80 >;
+ };
+ };
+ };
+
+ fragment@1 {
+ target = <&rides_1>;
+
+ __overlay__ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "ok";
+
+ ride@200 {
+ compatible = "ot,ferris-wheel";
+ reg = < 0x00000200 0x100 >;
+ hvac-provider = < &hvac_2 >;
+ hvac-thermostat = < 27 32 > ;
+ hvac-zones = < 12 5 >;
+ hvac-zone-names = "operator", "snack-bar";
+ spin-controller = < &spin_ctrl_1 3 >;
+ spin-rph = < 30 >;
+ gondolas = < 16 >;
+ gondola-capacity = < 6 >;
+ };
+ };
+ };
+
+ fragment@2 {
+ target = <&lights_2>;
+
+ __overlay__ {
+ status = "ok";
+ color = "purple", "white", "red", "green";
+ rate = < 3 256 >;
+ };
+ };
+
+};
diff --git a/drivers/of/unittest-data/overlay_bad_phandle.dts b/drivers/of/unittest-data/overlay_bad_phandle.dts
new file mode 100644
index 000000000000..270ee885a623
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_bad_phandle.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+/plugin/;
+
+/ {
+
+ fragment@0 {
+ target = <&electric_1>;
+
+ __overlay__ {
+
+ // This label should cause an error when the overlay
+ // is applied. There is already a phandle value
+ // in the base tree for motor-1.
+ spin_ctrl_1_conflict: motor-1 {
+ accelerate = < 3 >;
+ decelerate = < 5 >;
+ };
+ };
+ };
+};
diff --git a/drivers/of/unittest-data/overlay_base.dts b/drivers/of/unittest-data/overlay_base.dts
new file mode 100644
index 000000000000..5566b27fb61a
--- /dev/null
+++ b/drivers/of/unittest-data/overlay_base.dts
@@ -0,0 +1,80 @@
+/dts-v1/;
+/plugin/;
+
+/*
+ * Base device tree that overlays will be applied against.
+ *
+ * Do not add any properties in node "/".
+ * Do not add any nodes other than "/testcase-data-2" in node "/".
+ * Do not add anything that would result in dtc creating node "/__fixups__".
+ * dtc will create nodes "/__symbols__" and "/__local_fixups__".
+ */
+
+/ {
+ testcase-data-2 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ electric_1: substation@100 {
+ compatible = "ot,big-volts-control";
+ reg = < 0x00000100 0x100 >;
+ status = "disabled";
+
+ hvac_1: hvac-medium-1 {
+ compatible = "ot,hvac-medium";
+ heat-range = < 50 75 >;
+ cool-range = < 60 80 >;
+ };
+
+ spin_ctrl_1: motor-1 {
+ compatible = "ot,ferris-wheel-motor";
+ spin = "clockwise";
+ };
+
+ spin_ctrl_2: motor-8 {
+ compatible = "ot,roller-coaster-motor";
+ };
+ };
+
+ rides_1: fairway-1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ot,rides";
+ status = "disabled";
+ orientation = < 127 >;
+
+ ride@100 {
+ compatible = "ot,roller-coaster";
+ reg = < 0x00000100 0x100 >;
+ hvac-provider = < &hvac_1 >;
+ hvac-thermostat = < 29 > ;
+ hvac-zones = < 14 >;
+ hvac-zone-names = "operator";
+ spin-controller = < &spin_ctrl_2 5 &spin_ctrl_2 7 >;
+ spin-controller-names = "track_1", "track_2";
+ queues = < 2 >;
+ };
+ };
+
+ lights_1: lights@30000 {
+ compatible = "ot,work-lights";
+ reg = < 0x00030000 0x1000 >;
+ status = "disabled";
+ };
+
+ lights_2: lights@40000 {
+ compatible = "ot,show-lights";
+ reg = < 0x00040000 0x1000 >;
+ status = "disabled";
+ rate = < 13 138 >;
+ };
+
+ retail_1: vending@50000 {
+ reg = < 0x00050000 0x1000 >;
+ compatible = "ot,tickets";
+ status = "disabled";
+ };
+
+ };
+};
+
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 62db55b97c10..987a1530282a 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/hashtable.h>
+#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
@@ -1925,6 +1926,324 @@ out:
static inline void __init of_unittest_overlay(void) { }
#endif
+/*
+ * __dtb_ot_begin[] and __dtb_ot_end[] are created by cmd_dt_S_dtb
+ * in scripts/Makefile.lib
+ */
+
+#define OVERLAY_INFO_EXTERN(name) \
+ extern uint8_t __dtb_##name##_begin[]; \
+ extern uint8_t __dtb_##name##_end[]
+
+#define OVERLAY_INFO(name, expected) \
+{ .dtb_begin = __dtb_##name##_begin, \
+ .dtb_end = __dtb_##name##_end, \
+ .expected_result = expected, \
+}
+
+struct overlay_info {
+ uint8_t *dtb_begin;
+ uint8_t *dtb_end;
+ void *data;
+ struct device_node *np_overlay;
+ int expected_result;
+ int overlay_id;
+};
+
+OVERLAY_INFO_EXTERN(overlay_base);
+OVERLAY_INFO_EXTERN(overlay);
+OVERLAY_INFO_EXTERN(overlay_bad_phandle);
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* order of entries is hard-coded into users of overlays[] */
+static struct overlay_info overlays[] = {
+ OVERLAY_INFO(overlay_base, -9999),
+ OVERLAY_INFO(overlay, 0),
+ OVERLAY_INFO(overlay_bad_phandle, -EINVAL),
+ {}
+};
+
+static struct device_node *overlay_base_root;
+
+/*
+ * Create base device tree for the overlay unittest.
+ *
+ * This is called from very early boot code.
+ *
+ * Do as much as possible the same way as done in __unflatten_device_tree
+ * and other early boot steps for the normal FDT so that the overlay base
+ * unflattened tree will have the same characteristics as the real tree
+ * (such as having memory allocated by the early allocator). The goal
+ * is to test "the real thing" as much as possible, and test "test setup
+ * code" as little as possible.
+ *
+ * Have to stop before resolving phandles, because that uses kmalloc.
+ */
+void __init unittest_unflatten_overlay_base(void)
+{
+ struct overlay_info *info;
+ u32 data_size;
+ u32 size;
+
+ info = &overlays[0];
+
+ if (info->expected_result != -9999) {
+ pr_err("No dtb 'overlay_base' to attach\n");
+ return;
+ }
+
+ data_size = info->dtb_end - info->dtb_begin;
+ if (!data_size) {
+ pr_err("No dtb 'overlay_base' to attach\n");
+ return;
+ }
+
+ size = fdt_totalsize(info->dtb_begin);
+ if (size != data_size) {
+ pr_err("dtb 'overlay_base' header totalsize != actual size");
+ return;
+ }
+
+ info->data = early_init_dt_alloc_memory_arch(size,
+ roundup_pow_of_two(FDT_V17_SIZE));
+ if (!info->data) {
+ pr_err("alloc for dtb 'overlay_base' failed");
+ return;
+ }
+
+ memcpy(info->data, info->dtb_begin, size);
+
+ __unflatten_device_tree(info->data, NULL, &info->np_overlay,
+ early_init_dt_alloc_memory_arch, true);
+ overlay_base_root = info->np_overlay;
+}
+
+/*
+ * The purpose of of_unittest_overlay_data_add is to add an
+ * overlay in the normal fashion. This is a test of the whole
+ * picture, instead of testing individual elements.
+ *
+ * A secondary purpose is to be able to verify that the contents of
+ * /proc/device-tree/ contains the updated structure and values from
+ * the overlay. That must be verified separately in user space.
+ *
+ * Return 0 on unexpected error.
+ */
+static int __init overlay_data_add(int onum)
+{
+ struct overlay_info *info;
+ int k;
+ int ret;
+ u32 size;
+ u32 size_from_header;
+
+ for (k = 0, info = overlays; info; info++, k++) {
+ if (k == onum)
+ break;
+ }
+ if (onum > k)
+ return 0;
+
+ size = info->dtb_end - info->dtb_begin;
+ if (!size) {
+ pr_err("no overlay to attach, %d\n", onum);
+ ret = 0;
+ }
+
+ size_from_header = fdt_totalsize(info->dtb_begin);
+ if (size_from_header != size) {
+ pr_err("overlay header totalsize != actual size, %d", onum);
+ return 0;
+ }
+
+ /*
+ * Must create permanent copy of FDT because of_fdt_unflatten_tree()
+ * will create pointers to the passed in FDT in the EDT.
+ */
+ info->data = kmemdup(info->dtb_begin, size, GFP_KERNEL);
+ if (!info->data) {
+ pr_err("unable to allocate memory for data, %d\n", onum);
+ return 0;
+ }
+
+ of_fdt_unflatten_tree(info->data, NULL, &info->np_overlay);
+ if (!info->np_overlay) {
+ pr_err("unable to unflatten overlay, %d\n", onum);
+ ret = 0;
+ goto out_free_data;
+ }
+ of_node_set_flag(info->np_overlay, OF_DETACHED);
+
+ ret = of_resolve_phandles(info->np_overlay);
+ if (ret) {
+ pr_err("resolve ot phandles (ret=%d), %d\n", ret, onum);
+ goto out_free_np_overlay;
+ }
+
+ ret = of_overlay_create(info->np_overlay);
+ if (ret < 0) {
+ pr_err("of_overlay_create() (ret=%d), %d\n", ret, onum);
+ goto out_free_np_overlay;
+ } else {
+ info->overlay_id = ret;
+ ret = 0;
+ }
+
+ pr_debug("__dtb_overlay_begin applied, overlay id %d\n", ret);
+
+ goto out;
+
+out_free_np_overlay:
+ /*
+ * info->np_overlay is the unflattened device tree
+ * It has not been spliced into the live tree.
+ */
+
+ /* todo: function to free unflattened device tree */
+
+out_free_data:
+ kfree(info->data);
+
+out:
+ return (ret == info->expected_result);
+}
+
+/*
+ * The purpose of of_unittest_overlay_high_level is to add an overlay
+ * in the normal fashion. This is a test of the whole picture,
+ * instead of individual elements.
+ *
+ * The first part of the function is _not_ normal overlay usage; it is
+ * finishing splicing the base overlay device tree into the live tree.
+ */
+static __init void of_unittest_overlay_high_level(void)
+{
+ struct device_node *last_sibling;
+ struct device_node *np;
+ struct device_node *of_symbols;
+ struct device_node *overlay_base_symbols;
+ struct device_node **pprev;
+ struct property *prop;
+ int ret;
+
+ if (!overlay_base_root) {
+ unittest(0, "overlay_base_root not initialized\n");
+ return;
+ }
+
+ /*
+ * Could not fixup phandles in unittest_unflatten_overlay_base()
+ * because kmalloc() was not yet available.
+ */
+ of_resolve_phandles(overlay_base_root);
+
+ /*
+ * do not allow overlay_base to duplicate any node already in
+ * tree, this greatly simplifies the code
+ */
+
+ /*
+ * remove overlay_base_root node "__local_fixups", after
+ * being used by of_resolve_phandles()
+ */
+ pprev = &overlay_base_root->child;
+ for (np = overlay_base_root->child; np; np = np->sibling) {
+ if (!of_node_cmp(np->name, "__local_fixups__")) {
+ *pprev = np->sibling;
+ break;
+ }
+ pprev = &np->sibling;
+ }
+
+ /* remove overlay_base_root node "__symbols__" if in live tree */
+ of_symbols = of_get_child_by_name(of_root, "__symbols__");
+ if (of_symbols) {
+ /* will have to graft properties from node into live tree */
+ pprev = &overlay_base_root->child;
+ for (np = overlay_base_root->child; np; np = np->sibling) {
+ if (!of_node_cmp(np->name, "__symbols__")) {
+ overlay_base_symbols = np;
+ *pprev = np->sibling;
+ break;
+ }
+ pprev = &np->sibling;
+ }
+ }
+
+ for (np = overlay_base_root->child; np; np = np->sibling) {
+ if (of_get_child_by_name(of_root, np->name)) {
+ unittest(0, "illegal node name in overlay_base %s",
+ np->name);
+ return;
+ }
+ }
+
+ /*
+ * overlay 'overlay_base' is not allowed to have root
+ * properties, so only need to splice nodes into main device tree.
+ *
+ * root node of *overlay_base_root will not be freed, it is lost
+ * memory.
+ */
+
+ for (np = overlay_base_root->child; np; np = np->sibling)
+ np->parent = of_root;
+
+ mutex_lock(&of_mutex);
+
+ for (last_sibling = np = of_root->child; np; np = np->sibling)
+ last_sibling = np;
+
+ if (last_sibling)
+ last_sibling->sibling = overlay_base_root->child;
+ else
+ of_root->child = overlay_base_root->child;
+
+ for_each_of_allnodes_from(overlay_base_root, np)
+ __of_attach_node_sysfs(np);
+
+ if (of_symbols) {
+ for_each_property_of_node(overlay_base_symbols, prop) {
+ ret = __of_add_property(of_symbols, prop);
+ if (ret) {
+ unittest(0,
+ "duplicate property '%s' in overlay_base node __symbols__",
+ prop->name);
+ goto err_unlock;
+ }
+ ret = __of_add_property_sysfs(of_symbols, prop);
+ if (ret) {
+ unittest(0,
+ "unable to add property '%s' in overlay_base node __symbols__ to sysfs",
+ prop->name);
+ goto err_unlock;
+ }
+ }
+ }
+
+ mutex_unlock(&of_mutex);
+
+
+ /* now do the normal overlay usage test */
+
+ unittest(overlay_data_add(1),
+ "Adding overlay 'overlay' failed\n");
+
+ unittest(overlay_data_add(2),
+ "Adding overlay 'overlay_bad_phandle' failed\n");
+ return;
+
+err_unlock:
+ mutex_unlock(&of_mutex);
+}
+
+#else
+
+static inline __init void of_unittest_overlay_high_level(void) {}
+
+#endif
+
static int __init of_unittest(void)
{
struct device_node *np;
@@ -1962,6 +2281,8 @@ static int __init of_unittest(void)
/* Double check linkage after removing testcase data */
of_unittest_check_tree_linkage();
+ of_unittest_overlay_high_level();
+
pr_info("end of unittest - %i passed, %i failed\n",
unittest_results.passed, unittest_results.failed);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index df141420c902..e0cacb7b8563 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -134,3 +134,5 @@ config PCI_HYPERV
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/dwc/Kconfig"
source "drivers/pci/host/Kconfig"
+source "drivers/pci/endpoint/Kconfig"
+source "drivers/pci/switch/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 8db5079f09a7..462c1f5f5546 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o setup-bus.o vc.o
+ irq.o vpd.o setup-bus.o vc.o mmap.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -68,3 +68,4 @@ ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
# PCI host controller drivers
obj-y += host/
+obj-y += switch/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 8b7382705bf2..74cf5fffb1e1 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -629,7 +629,7 @@ void pci_vpd_release(struct pci_dev *dev)
*
* When access is locked, any userspace reads or writes to config
* space and concurrent lock requests will sleep until access is
- * allowed via pci_cfg_access_unlocked again.
+ * allowed via pci_cfg_access_unlock() again.
*/
void pci_cfg_access_lock(struct pci_dev *dev)
{
@@ -700,7 +700,8 @@ static bool pcie_downstream_port(const struct pci_dev *dev)
int type = pci_pcie_type(dev);
return type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_DOWNSTREAM;
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
@@ -890,3 +891,59 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
return ret;
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
+
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_byte);
+
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_word);
+
+int pci_read_config_dword(const struct pci_dev *dev, int where,
+ u32 *val)
+{
+ if (pci_dev_is_disconnected(dev)) {
+ *val = ~0;
+ return -ENODEV;
+ }
+ return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_read_config_dword);
+
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_byte);
+
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_word);
+
+int pci_write_config_dword(const struct pci_dev *dev, int where,
+ u32 val)
+{
+ if (pci_dev_is_disconnected(dev))
+ return -ENODEV;
+ return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
+}
+EXPORT_SYMBOL(pci_write_config_dword);
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index d2d2ba5b8a68..b7e15526d676 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -9,16 +9,44 @@ config PCIE_DW_HOST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
+config PCIE_DW_EP
+ bool
+ depends on PCI_ENDPOINT
+ select PCIE_DW
+
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
- depends on PCI
+ depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC. There
+ are two instances of PCIe controller in DRA7xx. This controller can
+ work either as EP or RC. In order to enable host-specific features
+ PCI_DRA7XX_HOST must be selected and in order to enable device-
+ specific features PCI_DRA7XX_EP must be selected. This uses
+ the Designware core.
+
+if PCI_DRA7XX
+
+config PCI_DRA7XX_HOST
+ bool "PCI DRA7xx Host Mode"
+ depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
+ default y
help
- Enables support for the PCIe controller in the DRA7xx SoC. There
- are two instances of PCIe controller in DRA7xx. This controller can
- act both as EP and RC. This reuses the Designware core.
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode.
+
+config PCI_DRA7XX_EP
+ bool "PCI DRA7xx Endpoint Mode"
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode.
+
+endif
config PCIE_DW_PLAT
bool "Platform bus based DesignWare PCIe Controller"
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index a2df13c28798..f31a8596442a 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -1,7 +1,10 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
+obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
-obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
+ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+endif
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 0984baff07e3..8decf46cf525 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -10,12 +10,14 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -24,6 +26,8 @@
#include <linux/pm_runtime.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pcie-designware.h"
@@ -57,6 +61,11 @@
#define MSI BIT(4)
#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
+#define DEVICE_TYPE_EP 0x0
+#define DEVICE_TYPE_LEG_EP 0x1
+#define DEVICE_TYPE_RC 0x4
+
#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
#define LTSSM_EN 0x1
@@ -66,6 +75,13 @@
#define EXP_CAP_ID_OFFSET 0x70
+#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
+#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
+
+#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
+#define MSI_REQ_GRANT BIT(0)
+#define MSI_VECTOR_SHIFT 7
+
struct dra7xx_pcie {
struct dw_pcie *pci;
void __iomem *base; /* DT ti_conf */
@@ -73,6 +89,11 @@ struct dra7xx_pcie {
struct phy **phy;
int link_gen;
struct irq_domain *irq_domain;
+ enum dw_pcie_device_mode mode;
+};
+
+struct dra7xx_pcie_of_data {
+ enum dw_pcie_device_mode mode;
};
#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
@@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
+static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+ return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+}
+
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
@@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci)
return !!(reg & LINK_UP);
}
-static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = dra7xx->pci;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+}
+
+static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
@@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx)
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
-static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
{
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
- ~INTERRUPTS);
- dra7xx_pcie_writel(dra7xx,
- PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
~LEG_EP_INTERRUPTS & ~MSI);
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
MSI | LEG_EP_INTERRUPTS);
}
+static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ ~INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
+ INTERRUPTS);
+}
+
+static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+ dra7xx_pcie_enable_msi_interrupts(dra7xx);
+}
+
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
- pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
-
dw_pcie_setup_rc(pp);
- dra7xx_pcie_establish_link(dra7xx);
+ dra7xx_pcie_establish_link(pci);
+ dw_pcie_wait_for_link(pci);
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
}
@@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
struct dra7xx_pcie *dra7xx = arg;
struct dw_pcie *pci = dra7xx->pci;
struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
@@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
if (reg & LINK_REQ_RST)
dev_dbg(dev, "Link Request Reset\n");
- if (reg & LINK_UP_EVT)
+ if (reg & LINK_UP_EVT) {
+ if (dra7xx->mode == DW_PCIE_EP_TYPE)
+ dw_pcie_ep_linkup(ep);
dev_dbg(dev, "Link-up state change\n");
+ }
if (reg & CFG_BME_EVT)
dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
@@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
+}
+
+static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+{
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
+ mdelay(1);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
+}
+
+static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
+ u8 interrupt_num)
+{
+ u32 reg;
+
+ reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
+ reg |= MSI_REQ_GRANT;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
+}
+
+static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
+ enum pci_epc_irq_type type, u8 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ dra7xx_pcie_raise_legacy_irq(dra7xx);
+ break;
+ case PCI_EPC_IRQ_MSI:
+ dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = dra7xx_pcie_ep_init,
+ .raise_irq = dra7xx_pcie_raise_irq,
+};
+
+static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = dra7xx->pci;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
+ pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pci->dbi_base)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
+ pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pci->dbi_base2)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
@@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
}
static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
+ .start_link = dra7xx_pcie_establish_link,
+ .stop_link = dra7xx_pcie_stop_link,
.link_up = dra7xx_pcie_link_up,
};
@@ -371,6 +510,68 @@ err_phy:
return ret;
}
+static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ {
+ .compatible = "ti,dra7-pcie",
+ .data = &dra7xx_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra7-pcie-ep",
+ .data = &dra7xx_pcie_ep_of_data,
+ },
+ {},
+};
+
+/*
+ * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * @dra7xx: the dra7xx device where the workaround should be applied
+ *
+ * Access to the PCIe slave port that are not 32-bit aligned will result
+ * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
+ * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
+ * 0x3.
+ *
+ * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
+ */
+static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+{
+ int ret;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ struct regmap *regmap;
+
+ regmap = syscon_regmap_lookup_by_phandle(np,
+ "ti,syscon-unaligned-access");
+ if (IS_ERR(regmap)) {
+ dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
+ return -EINVAL;
+ }
+
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
+ 2, 0, &args);
+ if (ret) {
+ dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(regmap, args.args[0], args.args[1],
+ args.args[1]);
+ if (ret)
+ dev_err(dev, "failed to enable unaligned access\n");
+
+ of_node_put(args.np);
+
+ return ret;
+}
+
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
@@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
char name[10];
struct gpio_desc *reset;
+ const struct of_device_id *match;
+ const struct dra7xx_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
+
+ match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct dra7xx_pcie_of_data *)match->data;
+ mode = (enum dw_pcie_device_mode)data->mode;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
base = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!base)
@@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
dra7xx->link_gen = 2;
- ret = dra7xx_add_pcie_port(dra7xx, pdev);
- if (ret < 0)
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_RC);
+ ret = dra7xx_add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ case DW_PCIE_EP_TYPE:
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
+ DEVICE_TYPE_EP);
+
+ ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+ if (ret)
+ goto err_gpio;
+
+ ret = dra7xx_add_pcie_ep(dra7xx, pdev);
+ if (ret < 0)
+ goto err_gpio;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ }
+ dra7xx->mode = mode;
+
+ ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
goto err_gpio;
+ }
return 0;
@@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev)
struct dw_pcie *pci = dra7xx->pci;
u32 val;
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
/* clear MSE */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= ~PCI_COMMAND_MEMORY;
@@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev)
struct dw_pcie *pci = dra7xx->pci;
u32 val;
+ if (dra7xx->mode != DW_PCIE_RC_TYPE)
+ return 0;
+
/* set MSE */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val |= PCI_COMMAND_MEMORY;
@@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
dra7xx_pcie_resume_noirq)
};
-static const struct of_device_id of_dra7xx_pcie_match[] = {
- { .compatible = "ti,dra7-pcie", },
- {},
-};
-
static struct platform_driver dra7xx_pcie_driver = {
.driver = {
.name = "dra7-pcie",
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 44f774c12fb2..546082ad5a3f 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
exynos_pcie_msi_init(ep);
}
-static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
exynos_pcie_sideband_dbi_r_mode(ep, true);
- val = readl(pci->dbi_base + reg);
+ dw_pcie_read(base + reg, size, &val);
exynos_pcie_sideband_dbi_r_mode(ep, false);
return val;
}
-static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
{
struct exynos_pcie *ep = to_exynos_pcie(pci);
exynos_pcie_sideband_dbi_w_mode(ep, true);
- writel(val, pci->dbi_base + reg);
+ dw_pcie_write(base + reg, size, val);
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
@@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
}
static const struct dw_pcie_ops dw_pcie_ops = {
- .readl_dbi = exynos_pcie_readl_dbi,
- .writel_dbi = exynos_pcie_writel_dbi,
+ .read_dbi = exynos_pcie_read_dbi,
+ .write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
};
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 801e46cd266d..a98cba55c7f0 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
@@ -27,6 +28,7 @@
#include <linux/signal.h>
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/reset.h>
#include "pcie-designware.h"
@@ -36,6 +38,7 @@ enum imx6_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
+ IMX7D,
};
struct imx6_pcie {
@@ -47,6 +50,8 @@ struct imx6_pcie {
struct clk *pcie_inbound_axi;
struct clk *pcie;
struct regmap *iomuxc_gpr;
+ struct reset_control *pciephy_reset;
+ struct reset_control *apps_reset;
enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
@@ -56,6 +61,11 @@ struct imx6_pcie {
int link_gen;
};
+/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
+#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
+#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
+#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
@@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->variant) {
+ case IMX7D:
+ reset_control_assert(imx6_pcie->pciephy_reset);
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
@@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
break;
+ case IMX7D:
+ break;
}
return ret;
}
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+{
+ u32 val;
+ unsigned int retries;
+ struct device *dev = imx6_pcie->pci->dev;
+
+ for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
+
+ if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
+ return;
+
+ usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
}
switch (imx6_pcie->variant) {
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->pciephy_reset);
+ imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
+ break;
case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
@@ -377,35 +416,44 @@ err_pcie_bus:
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
- if (imx6_pcie->variant == IMX6SX)
+ switch (imx6_pcie->variant) {
+ case IMX7D:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
+ break;
+ case IMX6SX:
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK,
IMX6SX_GPR12_PCIE_RX_EQ_2);
+ /* FALLTHROUGH */
+ default:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
+ break;
+ }
- /* configure constant input signal to the pcie ctrl and phy */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
}
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
@@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ if (imx6_pcie->variant == IMX7D)
+ reset_control_deassert(imx6_pcie->apps_reset);
+ else
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret)
@@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
- } else {
- dev_info(dev, "Link: Gen2 disabled\n");
- }
-
- /*
- * Start Directed Speed Change so the best possible speed both link
- * partners support can be negotiated.
- */
- tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- tmp |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
+ /*
+ * Start Directed Speed Change so the best possible
+ * speed both link partners support can be negotiated.
+ */
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ if (imx6_pcie->variant != IMX7D) {
+ /*
+ * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
+ * from i.MX6 family when no link speed transition
+ * occurs and we go Gen1 -> yep, Gen1. The difference
+ * is that, in such case, it will not be cleared by HW
+ * which will cause the following code to report false
+ * failure.
+ */
+
+ ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ }
- /* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
+ /* Make sure link training is finished as well! */
+ ret = imx6_pcie_wait_for_link(imx6_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
+ goto err_reset_phy;
+ }
+ } else {
+ dev_info(dev, "Link: Gen2 disabled\n");
}
tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
@@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
-static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
+static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = imx6_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
};
-static int __init imx6_pcie_probe(struct platform_device *pdev)
+static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
@@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
imx6_pcie->variant =
(enum imx6_pcie_variants)of_device_get_match_data(dev);
- /* Added for PCI abort handling */
- hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
- "imprecise external abort");
-
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
@@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "unable to get reset gpio\n");
return ret;
}
+ } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
+ return imx6_pcie->reset_gpio;
}
/* Fetch clocks */
@@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
- if (imx6_pcie->variant == IMX6SX) {
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
"pcie_inbound_axi");
if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_inbound_axi);
}
+ break;
+ case IMX7D:
+ imx6_pcie->pciephy_reset = devm_reset_control_get(dev,
+ "pciephy");
+ if (IS_ERR(imx6_pcie->pciephy_reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx6_pcie->pciephy_reset);
+ }
+
+ imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps");
+ if (IS_ERR(imx6_pcie->apps_reset)) {
+ dev_err(dev, "Failed to get PCIE APPS reset control\n");
+ return PTR_ERR(imx6_pcie->apps_reset);
+ }
+ break;
+ default:
+ break;
}
/* Grab GPR config register range */
@@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
{ .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
{ .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
+ { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
{},
};
@@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
.of_match_table = imx6_pcie_of_match,
+ .suppress_bind_attrs = true,
},
+ .probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
};
static int __init imx6_pcie_init(void)
{
- return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+ /*
+ * Since probe() can be deferred we need to make sure that
+ * hook_fault_code is not called after __init memory is freed
+ * by kernel and since imx6q_pcie_abort_handler() is a no-op,
+ * we can install the handler here without risking it
+ * accessing some uninitialized driver state.
+ */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ return platform_driver_register(&imx6_pcie_driver);
}
device_initcall(imx6_pcie_init);
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c
index 6b396f6b4615..8bc626e640c8 100644
--- a/drivers/pci/dwc/pci-keystone-dw.c
+++ b/drivers/pci/dwc/pci-keystone-dw.c
@@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index c32e392a0ae6..27d638c4e134 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
@@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = {
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index f110e3b24a26..495b023042b3 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
/* Get the dw-pcie unit configuration/control registers base. */
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- pci->dbi_base = devm_ioremap_resource(dev, base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
@@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = {
.driver = {
.name = "armada8k-pcie",
.of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 6d23683c0892..82a04acc42fd 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
+static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
+{
+ return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
+}
+
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
@@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
*/
dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN);
- pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
- pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
-
/* setup root complex */
dw_pcie_setup_rc(pp);
@@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
}
static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = {
.driver = {
.name = "artpec6-pcie",
.of_match_table = artpec6_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c
new file mode 100644
index 000000000000..398406393f37
--- /dev/null
+++ b/drivers/pci/dwc/pcie-designware-ep.c
@@ -0,0 +1,342 @@
+/**
+ * Synopsys Designware PCIe Endpoint controller driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+
+#include "pcie-designware.h"
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+
+static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
+{
+ u32 reg;
+
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ dw_pcie_writel_dbi2(pci, reg, 0x0);
+ dw_pcie_writel_dbi(pci, reg, 0x0);
+}
+
+static int dw_pcie_ep_write_header(struct pci_epc *epc,
+ struct pci_epf_header *hdr)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
+
+ return 0;
+}
+
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
+ dma_addr_t cpu_addr,
+ enum dw_pcie_as_type as_type)
+{
+ int ret;
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ free_win = find_first_zero_bit(&ep->ib_window_map,
+ sizeof(ep->ib_window_map));
+ if (free_win >= ep->num_ib_windows) {
+ dev_err(pci->dev, "no free inbound window\n");
+ return -EINVAL;
+ }
+
+ ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
+ as_type);
+ if (ret < 0) {
+ dev_err(pci->dev, "Failed to program IB window\n");
+ return ret;
+ }
+
+ ep->bar_to_atu[bar] = free_win;
+ set_bit(free_win, &ep->ib_window_map);
+
+ return 0;
+}
+
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ u32 free_win;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ free_win = find_first_zero_bit(&ep->ob_window_map,
+ sizeof(ep->ob_window_map));
+ if (free_win >= ep->num_ob_windows) {
+ dev_err(pci->dev, "no free outbound window\n");
+ return -EINVAL;
+ }
+
+ dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+
+ set_bit(free_win, &ep->ob_window_map);
+ ep->outbound_addr[free_win] = phys_addr;
+
+ return 0;
+}
+
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ clear_bit(atu_index, &ep->ib_window_map);
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum dw_pcie_as_type as_type;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ as_type = DW_PCIE_AS_MEM;
+ else
+ as_type = DW_PCIE_AS_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type);
+ if (ret)
+ return ret;
+
+ dw_pcie_writel_dbi2(pci, reg, size - 1);
+ dw_pcie_writel_dbi(pci, reg, flags);
+
+ return 0;
+}
+
+static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
+ u32 *atu_index)
+{
+ u32 index;
+
+ for (index = 0; index < ep->num_ob_windows; index++) {
+ if (ep->outbound_addr[index] != addr)
+ continue;
+ *atu_index = index;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr)
+{
+ int ret;
+ u32 atu_index;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_find_index(ep, addr, &atu_index);
+ if (ret < 0)
+ return;
+
+ dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ clear_bit(atu_index, &ep->ob_window_map);
+}
+
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size)
+{
+ int ret;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ if (ret) {
+ dev_err(pci->dev, "failed to enable address\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_get_msi(struct pci_epc *epc)
+{
+ int val;
+ u32 lower_addr;
+ u32 upper_addr;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL);
+ val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
+
+ lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
+ upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
+
+ if (!(lower_addr || upper_addr))
+ return -EINVAL;
+
+ return val;
+}
+
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int)
+{
+ int val;
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ val = (encode_int << MSI_CAP_MMC_SHIFT);
+ dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
+
+ return 0;
+}
+
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc,
+ enum pci_epc_irq_type type, u8 interrupt_num)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->raise_irq)
+ return -EINVAL;
+
+ return ep->ops->raise_irq(ep, type, interrupt_num);
+}
+
+static void dw_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!pci->ops->stop_link)
+ return;
+
+ pci->ops->stop_link(pci);
+}
+
+static int dw_pcie_ep_start(struct pci_epc *epc)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ if (!pci->ops->start_link)
+ return -EINVAL;
+
+ return pci->ops->start_link(pci);
+}
+
+static const struct pci_epc_ops epc_ops = {
+ .write_header = dw_pcie_ep_write_header,
+ .set_bar = dw_pcie_ep_set_bar,
+ .clear_bar = dw_pcie_ep_clear_bar,
+ .map_addr = dw_pcie_ep_map_addr,
+ .unmap_addr = dw_pcie_ep_unmap_addr,
+ .set_msi = dw_pcie_ep_set_msi,
+ .get_msi = dw_pcie_ep_get_msi,
+ .raise_irq = dw_pcie_ep_raise_irq,
+ .start = dw_pcie_ep_start,
+ .stop = dw_pcie_ep_stop,
+};
+
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_mem_exit(epc);
+}
+
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ void *addr;
+ enum pci_barno bar;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!pci->dbi_base || !pci->dbi_base2) {
+ dev_err(dev, "dbi_base/deb_base2 is not populated\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-ib-windows* property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-ob-windows* property\n");
+ return ret;
+ }
+
+ addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows,
+ GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ep->outbound_addr = addr;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ if (ep->ops->ep_init)
+ ep->ops->ep_init(ep);
+
+ epc = devm_pci_epc_create(dev, &epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ return PTR_ERR(epc);
+ }
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize address space\n");
+ return ret;
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+ dw_pcie_setup(pci);
+
+ return 0;
+}
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 5ba334938b52..28ed32ba4f1b 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = {
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
- unsigned long val;
+ u32 val;
int i, pos, irq;
irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- (u32 *)&val);
- if (val) {
- ret = IRQ_HANDLED;
- pos = 0;
- while ((pos = find_next_bit(&val, 32, pos)) != 32) {
- irq = irq_find_mapping(pp->irq_domain,
- i * 32 + pos);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- i * 12, 4, 1 << pos);
- generic_handle_irq(irq);
- pos++;
- }
+ &val);
+ if (!val)
+ continue;
+
+ ret = IRQ_HANDLED;
+ pos = 0;
+ while ((pos = find_next_bit((unsigned long *) &val, 32,
+ pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
+ generic_handle_irq(irq);
+ pos++;
}
}
@@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pci->dbi_base) {
- pci->dbi_base = devm_ioremap(dev, pp->cfg->start,
- resource_size(pp->cfg));
+ pci->dbi_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg->start,
+ resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "error with ioremap\n");
ret = -ENOMEM;
@@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
- pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base,
- pp->cfg0_size);
+ pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "error with ioremap in function\n");
ret = -ENOMEM;
@@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base,
+ pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
+ pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "error with ioremap\n");
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index f20d494922ab..32091b32f6e1 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = {
.driver = {
.name = "dw-pcie",
.of_match_table = dw_plat_pcie_of_match,
+ .suppress_bind_attrs = true,
},
.probe = dw_plat_pcie_probe,
};
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
index 7e1fb7d6643c..0e03af279259 100644
--- a/drivers/pci/dwc/pcie-designware.c
+++ b/drivers/pci/dwc/pcie-designware.c
@@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
{
- if (pci->ops->readl_dbi)
- return pci->ops->readl_dbi(pci, reg);
+ int ret;
+ u32 val;
- return readl(pci->dbi_base + reg);
+ if (pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, base, reg, size);
+
+ ret = dw_pcie_read(base + reg, size, &val);
+ if (ret)
+ dev_err(pci->dev, "read DBI address failed\n");
+
+ return val;
}
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
{
- if (pci->ops->writel_dbi)
- pci->ops->writel_dbi(pci, reg, val);
- else
- writel(val, pci->dbi_base + reg);
+ int ret;
+
+ if (pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, base, reg, size, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
return dw_pcie_readl_dbi(pci, offset + reg);
}
-static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
dw_pcie_writel_dbi(pci, offset + reg, val);
}
+void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u32 size)
+{
+ u32 retries, val;
+
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
+ type);
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE);
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_ob_unroll(pci, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u64 cpu_addr, u64 pci_addr, u32 size)
{
u32 retries, val;
+ if (pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
+
if (pci->iatu_unroll_enabled) {
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
- dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
- } else {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
+ pci_addr, size);
+ return;
}
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
+ PCIE_ATU_REGION_OUTBOUND | index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
+ lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- if (pci->iatu_unroll_enabled)
- val = dw_pcie_readl_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- else
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
-
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
if (val == PCIE_ATU_ENABLE)
return;
usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
}
- dev_err(pci->dev, "iATU is not being enabled\n");
+ dev_err(pci->dev, "outbound iATU is not being enabled\n");
+}
+
+static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+ return dw_pcie_readl_dbi(pci, offset + reg);
+}
+
+static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
+
+ dw_pcie_writel_dbi(pci, offset + reg, val);
+}
+
+int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+ int type;
+ u32 retries, val;
+
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ switch (as_type) {
+ case DW_PCIE_AS_MEM:
+ type = PCIE_ATU_TYPE_MEM;
+ break;
+ case DW_PCIE_AS_IO:
+ type = PCIE_ATU_TYPE_IO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
+ dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
+ PCIE_ATU_ENABLE |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_ib_unroll(pci, index,
+ PCIE_ATU_UNR_REGION_CTRL2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+ return -EBUSY;
+}
+
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type)
+{
+ int type;
+ u32 retries, val;
+
+ if (pci->iatu_unroll_enabled)
+ return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
+ cpu_addr, as_type);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
+ index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
+ dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
+
+ switch (as_type) {
+ case DW_PCIE_AS_MEM:
+ type = PCIE_ATU_TYPE_MEM;
+ break;
+ case DW_PCIE_AS_IO:
+ type = PCIE_ATU_TYPE_IO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
+ | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+
+ /*
+ * Make sure ATU enable takes effect before any subsequent config
+ * and I/O accesses.
+ */
+ for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ if (val & PCIE_ATU_ENABLE)
+ return 0;
+
+ usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ }
+ dev_err(pci->dev, "inbound iATU is not being enabled\n");
+
+ return -EBUSY;
+}
+
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ enum dw_pcie_region_type type)
+{
+ int region;
+
+ switch (type) {
+ case DW_PCIE_REGION_INBOUND:
+ region = PCIE_ATU_REGION_INBOUND;
+ break;
+ case DW_PCIE_REGION_OUTBOUND:
+ region = PCIE_ATU_REGION_OUTBOUND;
+ break;
+ default:
+ return;
+ }
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index cd3b8713fe50..c6a840575796 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -18,6 +18,9 @@
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
@@ -89,6 +92,16 @@
#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9))
+#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
+ ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
+
+#define MSI_MESSAGE_CONTROL 0x52
+#define MSI_CAP_MMC_SHIFT 1
+#define MSI_CAP_MME_SHIFT 4
+#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
+#define MSI_MESSAGE_ADDR_L32 0x54
+#define MSI_MESSAGE_ADDR_U32 0x58
+
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
* it 32 as of now. Probably we will never need more than 32. If needed,
@@ -99,6 +112,20 @@
struct pcie_port;
struct dw_pcie;
+struct dw_pcie_ep;
+
+enum dw_pcie_region_type {
+ DW_PCIE_REGION_UNKNOWN,
+ DW_PCIE_REGION_INBOUND,
+ DW_PCIE_REGION_OUTBOUND,
+};
+
+enum dw_pcie_device_mode {
+ DW_PCIE_UNKNOWN_TYPE,
+ DW_PCIE_EP_TYPE,
+ DW_PCIE_LEG_EP_TYPE,
+ DW_PCIE_RC_TYPE,
+};
struct dw_pcie_host_ops {
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
@@ -142,35 +169,116 @@ struct pcie_port {
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
+enum dw_pcie_as_type {
+ DW_PCIE_AS_UNKNOWN,
+ DW_PCIE_AS_MEM,
+ DW_PCIE_AS_IO,
+};
+
+struct dw_pcie_ep_ops {
+ void (*ep_init)(struct dw_pcie_ep *ep);
+ int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+};
+
+struct dw_pcie_ep {
+ struct pci_epc *epc;
+ struct dw_pcie_ep_ops *ops;
+ phys_addr_t phys_base;
+ size_t addr_size;
+ u8 bar_to_atu[6];
+ phys_addr_t *outbound_addr;
+ unsigned long ib_window_map;
+ unsigned long ob_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+};
+
struct dw_pcie_ops {
- u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg);
- void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val);
+ u64 (*cpu_addr_fixup)(u64 cpu_addr);
+ u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size);
+ void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int (*link_up)(struct dw_pcie *pcie);
+ int (*start_link)(struct dw_pcie *pcie);
+ void (*stop_link)(struct dw_pcie *pcie);
};
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ void __iomem *dbi_base2;
u32 num_viewport;
u8 iatu_unroll_enabled;
struct pcie_port pp;
+ struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
+#define to_dw_pcie_from_ep(endpoint) \
+ container_of((endpoint), struct dw_pcie, ep)
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
-u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg);
-void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val);
+u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size);
+void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
int type, u64 cpu_addr, u64 pci_addr,
u32 size);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
+ u64 cpu_addr, enum dw_pcie_as_type as_type);
+void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
+ enum dw_pcie_region_type type);
void dw_pcie_setup(struct dw_pcie *pci);
+static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+}
+
+static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+}
+
+static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+}
+
+static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
+{
+ __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+{
+ return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
@@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
return 0;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_EP
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init(struct dw_pcie_ep *ep);
+void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+#else
+static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+{
+}
+#endif
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c
index cf9d6a9d9fd4..e51acee0ddf3 100644
--- a/drivers/pci/dwc/pcie-hisi.c
+++ b/drivers/pci/dwc/pcie-hisi.c
@@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return -ENOMEM;
}
- reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
if (!reg_base)
return -ENOMEM;
@@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev)
}
reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- pci->dbi_base = devm_ioremap_resource(dev, reg);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, hisi_pcie);
ret = hisi_add_pcie_port(hisi_pcie, pdev);
@@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = {
.driver = {
.name = "hisi-pcie",
.of_match_table = hisi_pcie_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(hisi_pcie_driver);
@@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return -EINVAL;
}
- reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
if (!reg_base)
return -ENOMEM;
@@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = {
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
+ .suppress_bind_attrs = true,
},
};
builtin_platform_driver(hisi_pcie_almost_ecam_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 67eb7f5926dd..5bf23d432fdb 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->parf);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index eaa4ea8e2ea4..8ff36b3dbbdf 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
ret = PTR_ERR(pci->dbi_base);
@@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.driver = {
.name = "spear-pcie",
.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 2fee61bb6559..c228a2eb7faa 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -84,12 +84,14 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
if (!cfg->winp)
goto err_exit_malloc;
for (i = 0; i < bus_range; i++) {
- cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz);
+ cfg->winp[i] =
+ pci_remap_cfgspace(cfgres->start + i * bsz,
+ bsz);
if (!cfg->winp[i])
goto err_exit_iomap;
}
} else {
- cfg->win = ioremap(cfgres->start, bus_range * bsz);
+ cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz);
if (!cfg->win)
goto err_exit_iomap;
}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
new file mode 100644
index 000000000000..c23f146fb5a6
--- /dev/null
+++ b/drivers/pci/endpoint/Kconfig
@@ -0,0 +1,31 @@
+#
+# PCI Endpoint Support
+#
+
+menu "PCI Endpoint"
+
+config PCI_ENDPOINT
+ bool "PCI Endpoint Support"
+ help
+ Enable this configuration option to support configurable PCI
+ endpoint. This should be enabled if the platform has a PCI
+ controller that can operate in endpoint mode.
+
+ Enabling this option will build the endpoint library, which
+ includes endpoint controller library and endpoint function
+ library.
+
+ If in doubt, say "N" to disable Endpoint support.
+
+config PCI_ENDPOINT_CONFIGFS
+ bool "PCI Endpoint Configfs Support"
+ depends on PCI_ENDPOINT
+ select CONFIGFS_FS
+ help
+ This will enable the configfs entry that can be used to
+ configure the endpoint function and used to bind the
+ function with a endpoint controller.
+
+source "drivers/pci/endpoint/functions/Kconfig"
+
+endmenu
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
new file mode 100644
index 000000000000..1041f80a4645
--- /dev/null
+++ b/drivers/pci/endpoint/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for PCI Endpoint Support
+#
+
+obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
+obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
+ pci-epc-mem.o functions/
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
new file mode 100644
index 000000000000..175edad42d2f
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -0,0 +1,12 @@
+#
+# PCI Endpoint Functions
+#
+
+config PCI_EPF_TEST
+ tristate "PCI Endpoint Test driver"
+ depends on PCI_ENDPOINT
+ help
+ Enable this configuration option to enable the test driver
+ for PCI Endpoint.
+
+ If in doubt, say "N" to disable Endpoint test driver.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
new file mode 100644
index 000000000000..6d94a4801838
--- /dev/null
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for PCI Endpoint Functions
+#
+
+obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
new file mode 100644
index 000000000000..53fff8030337
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -0,0 +1,510 @@
+/**
+ * Test driver to test endpoint functionality
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci_regs.h>
+
+#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_MSI_IRQ BIT(1)
+#define MSI_NUMBER_SHIFT 2
+#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
+#define COMMAND_READ BIT(8)
+#define COMMAND_WRITE BIT(9)
+#define COMMAND_COPY BIT(10)
+
+#define STATUS_READ_SUCCESS BIT(0)
+#define STATUS_READ_FAIL BIT(1)
+#define STATUS_WRITE_SUCCESS BIT(2)
+#define STATUS_WRITE_FAIL BIT(3)
+#define STATUS_COPY_SUCCESS BIT(4)
+#define STATUS_COPY_FAIL BIT(5)
+#define STATUS_IRQ_RAISED BIT(6)
+#define STATUS_SRC_ADDR_INVALID BIT(7)
+#define STATUS_DST_ADDR_INVALID BIT(8)
+
+#define TIMER_RESOLUTION 1
+
+static struct workqueue_struct *kpcitest_workqueue;
+
+struct pci_epf_test {
+ void *reg[6];
+ struct pci_epf *epf;
+ struct delayed_work cmd_handler;
+};
+
+struct pci_epf_test_reg {
+ u32 magic;
+ u32 command;
+ u32 status;
+ u64 src_addr;
+ u64 dst_addr;
+ u32 size;
+ u32 checksum;
+} __packed;
+
+static struct pci_epf_header test_header = {
+ .vendorid = PCI_ANY_ID,
+ .deviceid = PCI_ANY_ID,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 };
+
+static int pci_epf_test_copy(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *src_addr;
+ void __iomem *dst_addr;
+ phys_addr_t src_phys_addr;
+ phys_addr_t dst_phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map source address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_src_addr;
+ }
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err_src_map_addr;
+ }
+
+ ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map destination address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_dst_addr;
+ }
+
+ memcpy(dst_addr, src_addr, reg->size);
+
+ pci_epc_unmap_addr(epc, dst_phys_addr);
+
+err_dst_addr:
+ pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+
+err_src_map_addr:
+ pci_epc_unmap_addr(epc, src_phys_addr);
+
+err_src_addr:
+ pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static int pci_epf_test_read(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *src_addr;
+ void *buf;
+ u32 crc32;
+ phys_addr_t phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!src_addr) {
+ dev_err(dev, "failed to allocate address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map address\n");
+ reg->status = STATUS_SRC_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ memcpy(buf, src_addr, reg->size);
+
+ crc32 = crc32_le(~0, buf, reg->size);
+ if (crc32 != reg->checksum)
+ ret = -EIO;
+
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static int pci_epf_test_write(struct pci_epf_test *epf_test)
+{
+ int ret;
+ void __iomem *dst_addr;
+ void *buf;
+ phys_addr_t phys_addr;
+ struct pci_epf *epf = epf_test->epf;
+ struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
+ if (!dst_addr) {
+ dev_err(dev, "failed to allocate address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size);
+ if (ret) {
+ dev_err(dev, "failed to map address\n");
+ reg->status = STATUS_DST_ADDR_INVALID;
+ goto err_addr;
+ }
+
+ buf = kzalloc(reg->size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_map_addr;
+ }
+
+ get_random_bytes(buf, reg->size);
+ reg->checksum = crc32_le(~0, buf, reg->size);
+
+ memcpy(dst_addr, buf, reg->size);
+
+ /*
+ * wait 1ms inorder for the write to complete. Without this delay L3
+ * error in observed in the host system.
+ */
+ mdelay(1);
+
+ kfree(buf);
+
+err_map_addr:
+ pci_epc_unmap_addr(epc, phys_addr);
+
+err_addr:
+ pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
+
+err:
+ return ret;
+}
+
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+{
+ u8 irq;
+ u8 msi_count;
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ reg->status |= STATUS_IRQ_RAISED;
+ msi_count = pci_epc_get_msi(epc);
+ irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (irq > msi_count || msi_count <= 0)
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+ else
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+}
+
+static void pci_epf_test_cmd_handler(struct work_struct *work)
+{
+ int ret;
+ u8 irq;
+ u8 msi_count;
+ struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
+ cmd_handler.work);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_test_reg *reg = epf_test->reg[0];
+
+ if (!reg->command)
+ goto reset_handler;
+
+ if (reg->command & COMMAND_RAISE_LEGACY_IRQ) {
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_WRITE) {
+ ret = pci_epf_test_write(epf_test);
+ if (ret)
+ reg->status |= STATUS_WRITE_FAIL;
+ else
+ reg->status |= STATUS_WRITE_SUCCESS;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_READ) {
+ ret = pci_epf_test_read(epf_test);
+ if (!ret)
+ reg->status |= STATUS_READ_SUCCESS;
+ else
+ reg->status |= STATUS_READ_FAIL;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_COPY) {
+ ret = pci_epf_test_copy(epf_test);
+ if (!ret)
+ reg->status |= STATUS_COPY_SUCCESS;
+ else
+ reg->status |= STATUS_COPY_FAIL;
+ pci_epf_test_raise_irq(epf_test);
+ goto reset_handler;
+ }
+
+ if (reg->command & COMMAND_RAISE_MSI_IRQ) {
+ msi_count = pci_epc_get_msi(epc);
+ irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+ if (irq > msi_count || msi_count <= 0)
+ goto reset_handler;
+ reg->status = STATUS_IRQ_RAISED;
+ pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq);
+ goto reset_handler;
+ }
+
+reset_handler:
+ reg->command = 0;
+
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_linkup(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
+}
+
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ cancel_delayed_work(&epf_test->cmd_handler);
+ pci_epc_stop(epc);
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ if (epf_test->reg[bar]) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ pci_epc_clear_bar(epc, bar);
+ }
+ }
+}
+
+static int pci_epf_test_set_bar(struct pci_epf *epf)
+{
+ int flags;
+ int bar;
+ int ret;
+ struct pci_epf_bar *epf_bar;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (sizeof(dma_addr_t) == 0x8)
+ flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++) {
+ epf_bar = &epf->bar[bar];
+ ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr,
+ epf_bar->size, flags);
+ if (ret) {
+ pci_epf_free_space(epf, epf_test->reg[bar], bar);
+ dev_err(dev, "failed to set BAR%d\n", bar);
+ if (bar == BAR_0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_alloc_space(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+ void *base;
+ int bar;
+
+ base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
+ BAR_0);
+ if (!base) {
+ dev_err(dev, "failed to allocated register space\n");
+ return -ENOMEM;
+ }
+ epf_test->reg[0] = base;
+
+ for (bar = BAR_1; bar <= BAR_5; bar++) {
+ base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar);
+ if (!base)
+ dev_err(dev, "failed to allocate space for BAR%d\n",
+ bar);
+ epf_test->reg[bar] = base;
+ }
+
+ return 0;
+}
+
+static int pci_epf_test_bind(struct pci_epf *epf)
+{
+ int ret;
+ struct pci_epf_header *header = epf->header;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ ret = pci_epc_write_header(epc, header);
+ if (ret) {
+ dev_err(dev, "configuration header write failed\n");
+ return ret;
+ }
+
+ ret = pci_epf_test_alloc_space(epf);
+ if (ret)
+ return ret;
+
+ ret = pci_epf_test_set_bar(epf);
+ if (ret)
+ return ret;
+
+ ret = pci_epc_set_msi(epc, epf->msi_interrupts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pci_epf_test_probe(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test;
+ struct device *dev = &epf->dev;
+
+ epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
+ if (!epf_test)
+ return -ENOMEM;
+
+ epf->header = &test_header;
+ epf_test->epf = epf;
+
+ INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
+
+ epf_set_drvdata(epf, epf_test);
+ return 0;
+}
+
+static int pci_epf_test_remove(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ kfree(epf_test);
+ return 0;
+}
+
+static struct pci_epf_ops ops = {
+ .unbind = pci_epf_test_unbind,
+ .bind = pci_epf_test_bind,
+ .linkup = pci_epf_test_linkup,
+};
+
+static const struct pci_epf_device_id pci_epf_test_ids[] = {
+ {
+ .name = "pci_epf_test",
+ },
+ {},
+};
+
+static struct pci_epf_driver test_driver = {
+ .driver.name = "pci_epf_test",
+ .probe = pci_epf_test_probe,
+ .remove = pci_epf_test_remove,
+ .id_table = pci_epf_test_ids,
+ .ops = &ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_test_init(void)
+{
+ int ret;
+
+ kpcitest_workqueue = alloc_workqueue("kpcitest",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ ret = pci_epf_register_driver(&test_driver);
+ if (ret) {
+ pr_err("failed to register pci epf test driver --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_test_init);
+
+static void __exit pci_epf_test_exit(void)
+{
+ pci_epf_unregister_driver(&test_driver);
+}
+module_exit(pci_epf_test_exit);
+
+MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
new file mode 100644
index 000000000000..424fdd6ed1ca
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -0,0 +1,509 @@
+/**
+ * configfs to configure the PCI endpoint
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct config_group *functions_group;
+static struct config_group *controllers_group;
+
+struct pci_epf_group {
+ struct config_group group;
+ struct pci_epf *epf;
+};
+
+struct pci_epc_group {
+ struct config_group group;
+ struct pci_epc *epc;
+ bool start;
+ unsigned long function_num_map;
+};
+
+static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epf_group, group);
+}
+
+static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct pci_epc_group, group);
+}
+
+static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ int ret;
+ bool start;
+ struct pci_epc *epc;
+ struct pci_epc_group *epc_group = to_pci_epc_group(item);
+
+ epc = epc_group->epc;
+
+ ret = kstrtobool(page, &start);
+ if (ret)
+ return ret;
+
+ if (!start) {
+ pci_epc_stop(epc);
+ return len;
+ }
+
+ ret = pci_epc_start(epc);
+ if (ret) {
+ dev_err(&epc->dev, "failed to start endpoint controller\n");
+ return -EINVAL;
+ }
+
+ epc_group->start = start;
+
+ return len;
+}
+
+static ssize_t pci_epc_start_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epc_group(item)->start);
+}
+
+CONFIGFS_ATTR(pci_epc_, start);
+
+static struct configfs_attribute *pci_epc_attrs[] = {
+ &pci_epc_attr_start,
+ NULL,
+};
+
+static int pci_epc_epf_link(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ int ret;
+ u32 func_no = 0;
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ ret = pci_epc_add_epf(epc, epf);
+ if (ret)
+ goto err_add_epf;
+
+ func_no = find_first_zero_bit(&epc_group->function_num_map,
+ sizeof(epc_group->function_num_map));
+ set_bit(func_no, &epc_group->function_num_map);
+ epf->func_no = func_no;
+
+ ret = pci_epf_bind(epf);
+ if (ret)
+ goto err_epf_bind;
+
+ return 0;
+
+err_epf_bind:
+ pci_epc_remove_epf(epc, epf);
+
+err_add_epf:
+ clear_bit(func_no, &epc_group->function_num_map);
+
+ return ret;
+}
+
+static void pci_epc_epf_unlink(struct config_item *epc_item,
+ struct config_item *epf_item)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+ struct pci_epf_group *epf_group = to_pci_epf_group(epf_item);
+ struct pci_epc_group *epc_group = to_pci_epc_group(epc_item);
+
+ WARN_ON_ONCE(epc_group->start);
+
+ epc = epc_group->epc;
+ epf = epf_group->epf;
+ clear_bit(epf->func_no, &epc_group->function_num_map);
+ pci_epf_unbind(epf);
+ pci_epc_remove_epf(epc, epf);
+}
+
+static struct configfs_item_operations pci_epc_item_ops = {
+ .allow_link = pci_epc_epf_link,
+ .drop_link = pci_epc_epf_unlink,
+};
+
+static struct config_item_type pci_epc_type = {
+ .ct_item_ops = &pci_epc_item_ops,
+ .ct_attrs = pci_epc_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct config_group *group;
+ struct pci_epc_group *epc_group;
+
+ epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL);
+ if (!epc_group) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ group = &epc_group->group;
+
+ config_group_init_type_name(group, name, &pci_epc_type);
+ ret = configfs_register_group(controllers_group, group);
+ if (ret) {
+ pr_err("failed to register configfs group for %s\n", name);
+ goto err_register_group;
+ }
+
+ epc = pci_epc_get(name);
+ if (IS_ERR(epc)) {
+ ret = PTR_ERR(epc);
+ goto err_epc_get;
+ }
+
+ epc_group->epc = epc;
+
+ return group;
+
+err_epc_get:
+ configfs_unregister_group(group);
+
+err_register_group:
+ kfree(epc_group);
+
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epc_group);
+
+void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+ struct pci_epc_group *epc_group;
+
+ if (!group)
+ return;
+
+ epc_group = container_of(group, struct pci_epc_group, group);
+ pci_epc_put(epc_group->epc);
+ configfs_unregister_group(&epc_group->group);
+ kfree(epc_group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group);
+
+#define PCI_EPF_HEADER_R(_name) \
+static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \
+{ \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ return sprintf(page, "0x%04x\n", epf->header->_name); \
+}
+
+#define PCI_EPF_HEADER_W_u32(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u32 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u16(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u16 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou16(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+#define PCI_EPF_HEADER_W_u8(_name) \
+static ssize_t pci_epf_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ u8 val; \
+ int ret; \
+ struct pci_epf *epf = to_pci_epf_group(item)->epf; \
+ if (WARN_ON_ONCE(!epf->header)) \
+ return -EINVAL; \
+ ret = kstrtou8(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ epf->header->_name = val; \
+ return len; \
+}
+
+static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret)
+ return ret;
+
+ to_pci_epf_group(item)->epf->msi_interrupts = val;
+
+ return len;
+}
+
+static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%d\n",
+ to_pci_epf_group(item)->epf->msi_interrupts);
+}
+
+PCI_EPF_HEADER_R(vendorid)
+PCI_EPF_HEADER_W_u16(vendorid)
+
+PCI_EPF_HEADER_R(deviceid)
+PCI_EPF_HEADER_W_u16(deviceid)
+
+PCI_EPF_HEADER_R(revid)
+PCI_EPF_HEADER_W_u8(revid)
+
+PCI_EPF_HEADER_R(progif_code)
+PCI_EPF_HEADER_W_u8(progif_code)
+
+PCI_EPF_HEADER_R(subclass_code)
+PCI_EPF_HEADER_W_u8(subclass_code)
+
+PCI_EPF_HEADER_R(baseclass_code)
+PCI_EPF_HEADER_W_u8(baseclass_code)
+
+PCI_EPF_HEADER_R(cache_line_size)
+PCI_EPF_HEADER_W_u8(cache_line_size)
+
+PCI_EPF_HEADER_R(subsys_vendor_id)
+PCI_EPF_HEADER_W_u16(subsys_vendor_id)
+
+PCI_EPF_HEADER_R(subsys_id)
+PCI_EPF_HEADER_W_u16(subsys_id)
+
+PCI_EPF_HEADER_R(interrupt_pin)
+PCI_EPF_HEADER_W_u8(interrupt_pin)
+
+CONFIGFS_ATTR(pci_epf_, vendorid);
+CONFIGFS_ATTR(pci_epf_, deviceid);
+CONFIGFS_ATTR(pci_epf_, revid);
+CONFIGFS_ATTR(pci_epf_, progif_code);
+CONFIGFS_ATTR(pci_epf_, subclass_code);
+CONFIGFS_ATTR(pci_epf_, baseclass_code);
+CONFIGFS_ATTR(pci_epf_, cache_line_size);
+CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
+CONFIGFS_ATTR(pci_epf_, subsys_id);
+CONFIGFS_ATTR(pci_epf_, interrupt_pin);
+CONFIGFS_ATTR(pci_epf_, msi_interrupts);
+
+static struct configfs_attribute *pci_epf_attrs[] = {
+ &pci_epf_attr_vendorid,
+ &pci_epf_attr_deviceid,
+ &pci_epf_attr_revid,
+ &pci_epf_attr_progif_code,
+ &pci_epf_attr_subclass_code,
+ &pci_epf_attr_baseclass_code,
+ &pci_epf_attr_cache_line_size,
+ &pci_epf_attr_subsys_vendor_id,
+ &pci_epf_attr_subsys_id,
+ &pci_epf_attr_interrupt_pin,
+ &pci_epf_attr_msi_interrupts,
+ NULL,
+};
+
+static void pci_epf_release(struct config_item *item)
+{
+ struct pci_epf_group *epf_group = to_pci_epf_group(item);
+
+ pci_epf_destroy(epf_group->epf);
+ kfree(epf_group);
+}
+
+static struct configfs_item_operations pci_epf_ops = {
+ .release = pci_epf_release,
+};
+
+static struct config_item_type pci_epf_type = {
+ .ct_item_ops = &pci_epf_ops,
+ .ct_attrs = pci_epf_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *pci_epf_make(struct config_group *group,
+ const char *name)
+{
+ struct pci_epf_group *epf_group;
+ struct pci_epf *epf;
+
+ epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL);
+ if (!epf_group)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&epf_group->group, name, &pci_epf_type);
+
+ epf = pci_epf_create(group->cg_item.ci_name);
+ if (IS_ERR(epf)) {
+ pr_err("failed to create endpoint function device\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ epf_group->epf = epf;
+
+ return &epf_group->group;
+}
+
+static void pci_epf_drop(struct config_group *group, struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations pci_epf_group_ops = {
+ .make_group = &pci_epf_make,
+ .drop_item = &pci_epf_drop,
+};
+
+static struct config_item_type pci_epf_group_type = {
+ .ct_group_ops = &pci_epf_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ struct config_group *group;
+
+ group = configfs_register_default_group(functions_group, name,
+ &pci_epf_group_type);
+ if (IS_ERR(group))
+ pr_err("failed to register configfs group for %s function\n",
+ name);
+
+ return group;
+}
+EXPORT_SYMBOL(pci_ep_cfs_add_epf_group);
+
+void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+ if (IS_ERR_OR_NULL(group))
+ return;
+
+ configfs_unregister_default_group(group);
+}
+EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
+
+static struct config_item_type pci_functions_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type pci_controllers_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item_type pci_ep_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem pci_ep_cfs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "pci_ep",
+ .ci_type = &pci_ep_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex),
+};
+
+static int __init pci_ep_cfs_init(void)
+{
+ int ret;
+ struct config_group *root = &pci_ep_cfs_subsys.su_group;
+
+ config_group_init(root);
+
+ ret = configfs_register_subsystem(&pci_ep_cfs_subsys);
+ if (ret) {
+ pr_err("Error %d while registering subsystem %s\n",
+ ret, root->cg_item.ci_namebuf);
+ goto err;
+ }
+
+ functions_group = configfs_register_default_group(root, "functions",
+ &pci_functions_type);
+ if (IS_ERR(functions_group)) {
+ ret = PTR_ERR(functions_group);
+ pr_err("Error %d while registering functions group\n",
+ ret);
+ goto err_functions_group;
+ }
+
+ controllers_group =
+ configfs_register_default_group(root, "controllers",
+ &pci_controllers_type);
+ if (IS_ERR(controllers_group)) {
+ ret = PTR_ERR(controllers_group);
+ pr_err("Error %d while registering controllers group\n",
+ ret);
+ goto err_controllers_group;
+ }
+
+ return 0;
+
+err_controllers_group:
+ configfs_unregister_default_group(functions_group);
+
+err_functions_group:
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+
+err:
+ return ret;
+}
+module_init(pci_ep_cfs_init);
+
+static void __exit pci_ep_cfs_exit(void)
+{
+ configfs_unregister_default_group(controllers_group);
+ configfs_unregister_default_group(functions_group);
+ configfs_unregister_subsystem(&pci_ep_cfs_subsys);
+}
+module_exit(pci_ep_cfs_exit);
+
+MODULE_DESCRIPTION("PCI EP CONFIGFS");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
new file mode 100644
index 000000000000..caa7be10e473
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -0,0 +1,580 @@
+/**
+ * PCI Endpoint *Controller* (EPC) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct class *pci_epc_class;
+
+static void devm_pci_epc_release(struct device *dev, void *res)
+{
+ struct pci_epc *epc = *(struct pci_epc **)res;
+
+ pci_epc_destroy(epc);
+}
+
+static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
+{
+ struct pci_epc **epc = res;
+
+ return *epc == match_data;
+}
+
+/**
+ * pci_epc_put() - release the PCI endpoint controller
+ * @epc: epc returned by pci_epc_get()
+ *
+ * release the refcount the caller obtained by invoking pci_epc_get()
+ */
+void pci_epc_put(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ module_put(epc->ops->owner);
+ put_device(&epc->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epc_put);
+
+/**
+ * pci_epc_get() - get the PCI endpoint controller
+ * @epc_name: device name of the endpoint controller
+ *
+ * Invoke to get struct pci_epc * corresponding to the device name of the
+ * endpoint controller
+ */
+struct pci_epc *pci_epc_get(const char *epc_name)
+{
+ int ret = -EINVAL;
+ struct pci_epc *epc;
+ struct device *dev;
+ struct class_dev_iter iter;
+
+ class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (strcmp(epc_name, dev_name(dev)))
+ continue;
+
+ epc = to_pci_epc(dev);
+ if (!try_module_get(epc->ops->owner)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ class_dev_iter_exit(&iter);
+ get_device(&epc->dev);
+ return epc;
+ }
+
+err:
+ class_dev_iter_exit(&iter);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epc_get);
+
+/**
+ * pci_epc_stop() - stop the PCI link
+ * @epc: the link of the EPC device that has to be stopped
+ *
+ * Invoke to stop the PCI link
+ */
+void pci_epc_stop(struct pci_epc *epc)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc) || !epc->ops->stop)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->stop(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_stop);
+
+/**
+ * pci_epc_start() - start the PCI link
+ * @epc: the link of *this* EPC device has to be started
+ *
+ * Invoke to start the PCI link
+ */
+int pci_epc_start(struct pci_epc *epc)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->start)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->start(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_start);
+
+/**
+ * pci_epc_raise_irq() - interrupt the host system
+ * @epc: the EPC device which has to interrupt the host
+ * @type: specify the type of interrupt; legacy or MSI
+ * @interrupt_num: the MSI interrupt number
+ *
+ * Invoke to raise an MSI or legacy interrupt
+ */
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->raise_irq)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->raise_irq(epc, type, interrupt_num);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
+
+/**
+ * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
+ * @epc: the EPC device to which MSI interrupts was requested
+ *
+ * Invoke to get the number of MSI interrupts allocated by the RC
+ */
+int pci_epc_get_msi(struct pci_epc *epc)
+{
+ int interrupt;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return 0;
+
+ if (!epc->ops->get_msi)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ interrupt = epc->ops->get_msi(epc);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ if (interrupt < 0)
+ return 0;
+
+ interrupt = 1 << interrupt;
+
+ return interrupt;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_msi);
+
+/**
+ * pci_epc_set_msi() - set the number of MSI interrupt numbers required
+ * @epc: the EPC device on which MSI has to be configured
+ * @interrupts: number of MSI interrupts required by the EPF
+ *
+ * Invoke to set the required number of MSI interrupts.
+ */
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts)
+{
+ int ret;
+ u8 encode_int;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->set_msi)
+ return 0;
+
+ encode_int = order_base_2(interrupts);
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->set_msi(epc, encode_int);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_msi);
+
+/**
+ * pci_epc_unmap_addr() - unmap CPU address from PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ *
+ * Invoke to unmap the CPU address from PCI address.
+ */
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return;
+
+ if (!epc->ops->unmap_addr)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->unmap_addr(epc, phys_addr);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
+
+/**
+ * pci_epc_map_addr() - map CPU address to PCI address
+ * @epc: the EPC device on which address is allocated
+ * @phys_addr: physical address of the local system
+ * @pci_addr: PCI address to which the physical address should be mapped
+ * @size: the size of the allocation
+ *
+ * Invoke to map CPU address with PCI address.
+ */
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->map_addr)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_map_addr);
+
+/**
+ * pci_epc_clear_bar() - reset the BAR
+ * @epc: the EPC device for which the BAR has to be cleared
+ * @bar: the BAR number that has to be reset
+ *
+ * Invoke to reset the BAR of the endpoint device.
+ */
+void pci_epc_clear_bar(struct pci_epc *epc, int bar)
+{
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return;
+
+ if (!epc->ops->clear_bar)
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ epc->ops->clear_bar(epc, bar);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
+
+/**
+ * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
+ * @epc: the EPC device on which BAR has to be configured
+ * @bar: the BAR number that has to be configured
+ * @size: the size of the addr space
+ * @flags: specify memory allocation/io allocation/32bit address/64 bit address
+ *
+ * Invoke to configure the BAR of the endpoint device.
+ */
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->set_bar)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, irq_flags);
+ ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags);
+ spin_unlock_irqrestore(&epc->lock, irq_flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_set_bar);
+
+/**
+ * pci_epc_write_header() - write standard configuration header
+ * @epc: the EPC device to which the configuration header should be written
+ * @header: standard configuration header fields
+ *
+ * Invoke to write the configuration header to the endpoint controller. Every
+ * endpoint controller will have a dedicated location to which the standard
+ * configuration header would be written. The callback function should write
+ * the header fields to this dedicated location.
+ */
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header)
+{
+ int ret;
+ unsigned long flags;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (!epc->ops->write_header)
+ return 0;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ ret = epc->ops->write_header(epc, header);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_write_header);
+
+/**
+ * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
+ * @epc: the EPC device to which the endpoint function should be added
+ * @epf: the endpoint function to be added
+ *
+ * A PCI endpoint device can have one or more functions. In the case of PCIe,
+ * the specification allows up to 8 PCIe endpoint functions. Invoke
+ * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
+ */
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+ unsigned long flags;
+
+ if (epf->epc)
+ return -EBUSY;
+
+ if (IS_ERR(epc))
+ return -EINVAL;
+
+ if (epf->func_no > epc->max_functions - 1)
+ return -EINVAL;
+
+ epf->epc = epc;
+ dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask);
+ epf->dev.dma_mask = epc->dev.dma_mask;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_add_tail(&epf->list, &epc->pci_epf);
+ spin_unlock_irqrestore(&epc->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_add_epf);
+
+/**
+ * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
+ * @epc: the EPC device from which the endpoint function should be removed
+ * @epf: the endpoint function to be removed
+ *
+ * Invoke to remove PCI endpoint function from the endpoint controller.
+ */
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf)
+{
+ unsigned long flags;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_del(&epf->list);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
+
+/**
+ * pci_epc_linkup() - Notify the EPF device that EPC device has established a
+ * connection with the Root Complex.
+ * @epc: the EPC device which has established link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has established a
+ * connection with the Root Complex.
+ */
+void pci_epc_linkup(struct pci_epc *epc)
+{
+ unsigned long flags;
+ struct pci_epf *epf;
+
+ if (!epc || IS_ERR(epc))
+ return;
+
+ spin_lock_irqsave(&epc->lock, flags);
+ list_for_each_entry(epf, &epc->pci_epf, list)
+ pci_epf_linkup(epf);
+ spin_unlock_irqrestore(&epc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkup);
+
+/**
+ * pci_epc_destroy() - destroy the EPC device
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the PCI EPC device
+ */
+void pci_epc_destroy(struct pci_epc *epc)
+{
+ pci_ep_cfs_remove_epc_group(epc->group);
+ device_unregister(&epc->dev);
+ kfree(epc);
+}
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
+
+/**
+ * devm_pci_epc_destroy() - destroy the EPC device
+ * @dev: device that wants to destroy the EPC
+ * @epc: the EPC device that has to be destroyed
+ *
+ * Invoke to destroy the devres associated with this
+ * pci_epc and destroy the EPC device.
+ */
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+{
+ int r;
+
+ r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
+ epc);
+ dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+}
+EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+
+/**
+ * __pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ */
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ int ret;
+ struct pci_epc *epc;
+
+ if (WARN_ON(!dev)) {
+ ret = -EINVAL;
+ goto err_ret;
+ }
+
+ epc = kzalloc(sizeof(*epc), GFP_KERNEL);
+ if (!epc) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ spin_lock_init(&epc->lock);
+ INIT_LIST_HEAD(&epc->pci_epf);
+
+ device_initialize(&epc->dev);
+ dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask);
+ epc->dev.class = pci_epc_class;
+ epc->dev.dma_mask = dev->dma_mask;
+ epc->ops = ops;
+
+ ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
+ if (ret)
+ goto put_dev;
+
+ ret = device_add(&epc->dev);
+ if (ret)
+ goto put_dev;
+
+ epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
+
+ return epc;
+
+put_dev:
+ put_device(&epc->dev);
+ kfree(epc);
+
+err_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__pci_epc_create);
+
+/**
+ * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
+ * @dev: device that is creating the new EPC
+ * @ops: function pointers for performing EPC operations
+ * @owner: the owner of the module that creates the EPC device
+ *
+ * Invoke to create a new EPC device and add it to pci_epc class.
+ * While at that, it also associates the device with the pci_epc using devres.
+ * On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed.
+ */
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner)
+{
+ struct pci_epc **ptr, *epc;
+
+ ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ epc = __pci_epc_create(dev, ops, owner);
+ if (!IS_ERR(epc)) {
+ *ptr = epc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return epc;
+}
+EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
+
+static int __init pci_epc_init(void)
+{
+ pci_epc_class = class_create(THIS_MODULE, "pci_epc");
+ if (IS_ERR(pci_epc_class)) {
+ pr_err("failed to create pci epc class --> %ld\n",
+ PTR_ERR(pci_epc_class));
+ return PTR_ERR(pci_epc_class);
+ }
+
+ return 0;
+}
+module_init(pci_epc_init);
+
+static void __exit pci_epc_exit(void)
+{
+ class_destroy(pci_epc_class);
+}
+module_exit(pci_epc_exit);
+
+MODULE_DESCRIPTION("PCI EPC Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
new file mode 100644
index 000000000000..3a94cc1caf22
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -0,0 +1,143 @@
+/**
+ * PCI Endpoint *Controller* Address Space Management
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/pci-epc.h>
+
+/**
+ * pci_epc_mem_init() - initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @phys_base: the physical address of the base
+ * @size: the size of the address space
+ *
+ * Invoke to initialize the pci_epc_mem structure used by the
+ * endpoint functions to allocate mapped PCI address.
+ */
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size)
+{
+ int ret;
+ struct pci_epc_mem *mem;
+ unsigned long *bitmap;
+ int pages = size >> PAGE_SHIFT;
+ int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ mem->bitmap = bitmap;
+ mem->phys_base = phys_base;
+ mem->pages = pages;
+ mem->size = size;
+
+ epc->mem = mem;
+
+ return 0;
+
+err_mem:
+ kfree(mem);
+
+err:
+return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
+
+/**
+ * pci_epc_mem_exit() - cleanup the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_exit
+ *
+ * Invoke to cleanup the pci_epc_mem structure allocated in
+ * pci_epc_mem_init().
+ */
+void pci_epc_mem_exit(struct pci_epc *epc)
+{
+ struct pci_epc_mem *mem = epc->mem;
+
+ epc->mem = NULL;
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
+
+/**
+ * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space
+ * @epc: the EPC device on which memory has to be allocated
+ * @phys_addr: populate the allocated physical address here
+ * @size: the size of the address space that has to be allocated
+ *
+ * Invoke to allocate memory address from the EPC address space. This
+ * is usually done to map the remote RC address into the local system.
+ */
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size)
+{
+ int pageno;
+ void __iomem *virt_addr;
+ struct pci_epc_mem *mem = epc->mem;
+ int order = get_order(size);
+
+ pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
+ if (pageno < 0)
+ return NULL;
+
+ *phys_addr = mem->phys_base + (pageno << PAGE_SHIFT);
+ virt_addr = ioremap(*phys_addr, size);
+ if (!virt_addr)
+ bitmap_release_region(mem->bitmap, pageno, order);
+
+ return virt_addr;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+
+/**
+ * pci_epc_mem_free_addr() - free the allocated memory address
+ * @epc: the EPC device on which memory was allocated
+ * @phys_addr: the allocated physical address
+ * @virt_addr: virtual address of the allocated mem space
+ * @size: the size of the allocated address space
+ *
+ * Invoke to free the memory allocated using pci_epc_mem_alloc_addr.
+ */
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size)
+{
+ int pageno;
+ int order = get_order(size);
+ struct pci_epc_mem *mem = epc->mem;
+
+ iounmap(virt_addr);
+ pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT;
+ bitmap_release_region(mem->bitmap, pageno, order);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
+
+MODULE_DESCRIPTION("PCI EPC Address Space Management");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
new file mode 100644
index 000000000000..6877d6a5bcc9
--- /dev/null
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -0,0 +1,359 @@
+/**
+ * PCI Endpoint *Function* (EPF) library
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+
+static struct bus_type pci_epf_bus_type;
+static struct device_type pci_epf_type;
+
+/**
+ * pci_epf_linkup() - Notify the function driver that EPC device has
+ * established a connection with the Root Complex.
+ * @epf: the EPF device bound to the EPC device which has established
+ * the connection with the host
+ *
+ * Invoke to notify the function driver that EPC device has established
+ * a connection with the Root Complex.
+ */
+void pci_epf_linkup(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return;
+ }
+
+ epf->driver->ops->linkup(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_linkup);
+
+/**
+ * pci_epf_unbind() - Notify the function driver that the binding between the
+ * EPF device and EPC device has been lost
+ * @epf: the EPF device which has lost the binding with the EPC device
+ *
+ * Invoke to notify the function driver that the binding between the EPF device
+ * and EPC device has been lost.
+ */
+void pci_epf_unbind(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return;
+ }
+
+ epf->driver->ops->unbind(epf);
+ module_put(epf->driver->owner);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unbind);
+
+/**
+ * pci_epf_bind() - Notify the function driver that the EPF device has been
+ * bound to a EPC device
+ * @epf: the EPF device which has been bound to the EPC device
+ *
+ * Invoke to notify the function driver that it has been bound to a EPC device
+ */
+int pci_epf_bind(struct pci_epf *epf)
+{
+ if (!epf->driver) {
+ dev_WARN(&epf->dev, "epf device not bound to driver\n");
+ return -EINVAL;
+ }
+
+ if (!try_module_get(epf->driver->owner))
+ return -EAGAIN;
+
+ return epf->driver->ops->bind(epf);
+}
+EXPORT_SYMBOL_GPL(pci_epf_bind);
+
+/**
+ * pci_epf_free_space() - free the allocated PCI EPF register space
+ * @addr: the virtual address of the PCI EPF register space
+ * @bar: the BAR number corresponding to the register space
+ *
+ * Invoke to free the allocated PCI EPF register space.
+ */
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
+{
+ struct device *dev = &epf->dev;
+
+ if (!addr)
+ return;
+
+ dma_free_coherent(dev, epf->bar[bar].size, addr,
+ epf->bar[bar].phys_addr);
+
+ epf->bar[bar].phys_addr = 0;
+ epf->bar[bar].size = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_space);
+
+/**
+ * pci_epf_alloc_space() - allocate memory for the PCI EPF register space
+ * @size: the size of the memory that has to be allocated
+ * @bar: the BAR number corresponding to the allocated register space
+ *
+ * Invoke to allocate memory for the PCI EPF register space.
+ */
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
+{
+ void *space;
+ struct device *dev = &epf->dev;
+ dma_addr_t phys_addr;
+
+ if (size < 128)
+ size = 128;
+ size = roundup_pow_of_two(size);
+
+ space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ if (!space) {
+ dev_err(dev, "failed to allocate mem space\n");
+ return NULL;
+ }
+
+ epf->bar[bar].phys_addr = phys_addr;
+ epf->bar[bar].size = size;
+
+ return space;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+
+/**
+ * pci_epf_unregister_driver() - unregister the PCI EPF driver
+ * @driver: the PCI EPF driver that has to be unregistered
+ *
+ * Invoke to unregister the PCI EPF driver.
+ */
+void pci_epf_unregister_driver(struct pci_epf_driver *driver)
+{
+ pci_ep_cfs_remove_epf_group(driver->group);
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+
+/**
+ * __pci_epf_register_driver() - register a new PCI EPF driver
+ * @driver: structure representing PCI EPF driver
+ * @owner: the owner of the module that registers the PCI EPF driver
+ *
+ * Invoke to register a new PCI EPF driver.
+ */
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner)
+{
+ int ret;
+
+ if (!driver->ops)
+ return -EINVAL;
+
+ if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup)
+ return -EINVAL;
+
+ driver->driver.bus = &pci_epf_bus_type;
+ driver->driver.owner = owner;
+
+ ret = driver_register(&driver->driver);
+ if (ret)
+ return ret;
+
+ driver->group = pci_ep_cfs_add_epf_group(driver->driver.name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pci_epf_register_driver);
+
+/**
+ * pci_epf_destroy() - destroy the created PCI EPF device
+ * @epf: the PCI EPF device that has to be destroyed.
+ *
+ * Invoke to destroy the PCI EPF device created by invoking pci_epf_create().
+ */
+void pci_epf_destroy(struct pci_epf *epf)
+{
+ device_unregister(&epf->dev);
+}
+EXPORT_SYMBOL_GPL(pci_epf_destroy);
+
+/**
+ * pci_epf_create() - create a new PCI EPF device
+ * @name: the name of the PCI EPF device. This name will be used to bind the
+ * the EPF device to a EPF driver
+ *
+ * Invoke to create a new PCI EPF device by providing the name of the function
+ * device.
+ */
+struct pci_epf *pci_epf_create(const char *name)
+{
+ int ret;
+ struct pci_epf *epf;
+ struct device *dev;
+ char *func_name;
+ char *buf;
+
+ epf = kzalloc(sizeof(*epf), GFP_KERNEL);
+ if (!epf) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ buf = kstrdup(name, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto free_epf;
+ }
+
+ func_name = buf;
+ buf = strchrnul(buf, '.');
+ *buf = '\0';
+
+ epf->name = kstrdup(func_name, GFP_KERNEL);
+ if (!epf->name) {
+ ret = -ENOMEM;
+ goto free_func_name;
+ }
+
+ dev = &epf->dev;
+ device_initialize(dev);
+ dev->bus = &pci_epf_bus_type;
+ dev->type = &pci_epf_type;
+
+ ret = dev_set_name(dev, "%s", name);
+ if (ret)
+ goto put_dev;
+
+ ret = device_add(dev);
+ if (ret)
+ goto put_dev;
+
+ kfree(func_name);
+ return epf;
+
+put_dev:
+ put_device(dev);
+ kfree(epf->name);
+
+free_func_name:
+ kfree(func_name);
+
+free_epf:
+ kfree(epf);
+
+err_ret:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pci_epf_create);
+
+static void pci_epf_dev_release(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+
+ kfree(epf->name);
+ kfree(epf);
+}
+
+static struct device_type pci_epf_type = {
+ .release = pci_epf_dev_release,
+};
+
+static int
+pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
+{
+ while (id->name[0]) {
+ if (strcmp(epf->name, id->name) == 0)
+ return true;
+ id++;
+ }
+
+ return false;
+}
+
+static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+
+ if (driver->id_table)
+ return pci_epf_match_id(driver->id_table, epf);
+
+ return !strcmp(epf->name, drv->name);
+}
+
+static int pci_epf_device_probe(struct device *dev)
+{
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ if (!driver->probe)
+ return -ENODEV;
+
+ epf->driver = driver;
+
+ return driver->probe(epf);
+}
+
+static int pci_epf_device_remove(struct device *dev)
+{
+ int ret;
+ struct pci_epf *epf = to_pci_epf(dev);
+ struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
+
+ ret = driver->remove(epf);
+ epf->driver = NULL;
+
+ return ret;
+}
+
+static struct bus_type pci_epf_bus_type = {
+ .name = "pci-epf",
+ .match = pci_epf_device_match,
+ .probe = pci_epf_device_probe,
+ .remove = pci_epf_device_remove,
+};
+
+static int __init pci_epf_init(void)
+{
+ int ret;
+
+ ret = bus_register(&pci_epf_bus_type);
+ if (ret) {
+ pr_err("failed to register pci epf bus --> %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_init);
+
+static void __exit pci_epf_exit(void)
+{
+ bus_unregister(&pci_epf_bus_type);
+}
+module_exit(pci_epf_exit);
+
+MODULE_DESCRIPTION("PCI EPF Library");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index f7c1d4d5c665..7f47cd5e10a5 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -27,6 +27,12 @@ config PCIE_XILINX_NWL
or End Point. The current option selection will only
support root port enabling.
+config PCI_FTPCI100
+ bool "Faraday Technology FTPCI100 PCI controller"
+ depends on OF
+ depends on ARM
+ default ARCH_GEMINI
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA
@@ -95,6 +101,7 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate
+ select PCI_DOMAINS
help
This enables the iProc PCIe core controller support for Broadcom's
iProc family of SoCs. An appropriate bus interface driver needs
@@ -115,7 +122,6 @@ config PCIE_IPROC_BCMA
depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
select PCIE_IPROC
select BCMA
- select PCI_DOMAINS
default ARCH_BCM_5301X
help
Say Y here if you want to use the Broadcom iProc PCIe controller
@@ -164,7 +170,7 @@ config PCI_HOST_THUNDER_ECAM
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
config PCIE_ROCKCHIP
- bool "Rockchip PCIe controller"
+ tristate "Rockchip PCIe controller"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 4d3686676cc3..cab879578003 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 4fce494271cc..37d0bcd31f8a 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -200,10 +200,12 @@ struct advk_pcie {
struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
- struct msi_controller msi;
struct irq_domain *msi_domain;
+ struct irq_domain *msi_inner_domain;
+ struct irq_chip msi_bottom_irq_chip;
struct irq_chip msi_irq_chip;
- DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+ struct msi_domain_info msi_domain_info;
+ DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
@@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = {
.write = advk_pcie_wr_conf,
};
-static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
{
- int hwirq;
+ struct advk_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg);
- mutex_lock(&pcie->msi_used_lock);
- hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
- if (hwirq >= MSI_IRQ_NUM)
- hwirq = -ENOSPC;
- else
- set_bit(hwirq, pcie->msi_irq_in_use);
- mutex_unlock(&pcie->msi_used_lock);
-
- return hwirq;
+ msg->address_lo = lower_32_bits(msi_msg);
+ msg->address_hi = upper_32_bits(msi_msg);
+ msg->data = data->irq;
}
-static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+static int advk_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- struct device *dev = &pcie->pdev->dev;
-
- mutex_lock(&pcie->msi_used_lock);
- if (!test_bit(hwirq, pcie->msi_irq_in_use))
- dev_err(dev, "trying to free unused MSI#%d\n", hwirq);
- else
- clear_bit(hwirq, pcie->msi_irq_in_use);
- mutex_unlock(&pcie->msi_used_lock);
+ return -EINVAL;
}
-static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
+static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
{
- struct advk_pcie *pcie = pdev->bus->sysdata;
- struct msi_msg msg;
- int virq, hwirq;
- phys_addr_t msi_msg_phys;
-
- /* We support MSI, but not MSI-X */
- if (desc->msi_attrib.is_msix)
- return -EINVAL;
-
- hwirq = advk_pcie_alloc_msi(pcie);
- if (hwirq < 0)
- return hwirq;
+ struct advk_pcie *pcie = domain->host_data;
+ int hwirq, i;
- virq = irq_create_mapping(pcie->msi_domain, hwirq);
- if (!virq) {
- advk_pcie_free_msi(pcie, hwirq);
- return -EINVAL;
+ mutex_lock(&pcie->msi_used_lock);
+ hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM,
+ 0, nr_irqs, 0);
+ if (hwirq >= MSI_IRQ_NUM) {
+ mutex_unlock(&pcie->msi_used_lock);
+ return -ENOSPC;
}
- irq_set_msi_desc(virq, desc);
-
- msi_msg_phys = virt_to_phys(&pcie->msi_msg);
-
- msg.address_lo = lower_32_bits(msi_msg_phys);
- msg.address_hi = upper_32_bits(msi_msg_phys);
- msg.data = virq;
-
- pci_write_msi_msg(virq, &msg);
-
- return 0;
-}
+ bitmap_set(pcie->msi_used, hwirq, nr_irqs);
+ mutex_unlock(&pcie->msi_used_lock);
-static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
- unsigned int irq)
-{
- struct irq_data *d = irq_get_irq_data(irq);
- struct msi_desc *msi = irq_data_get_msi_desc(d);
- struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
- unsigned long hwirq = d->hwirq;
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &pcie->msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
- irq_dispose_mapping(irq);
- advk_pcie_free_msi(pcie, hwirq);
+ return hwirq;
}
-static int advk_pcie_msi_map(struct irq_domain *domain,
- unsigned int virq, irq_hw_number_t hw)
+static void advk_msi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct advk_pcie *pcie = domain->host_data;
- irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
- handle_simple_irq);
-
- return 0;
+ mutex_lock(&pcie->msi_used_lock);
+ bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs);
+ mutex_unlock(&pcie->msi_used_lock);
}
-static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
- .map = advk_pcie_msi_map,
+static const struct irq_domain_ops advk_msi_domain_ops = {
+ .alloc = advk_msi_irq_domain_alloc,
+ .free = advk_msi_irq_domain_free,
};
static void advk_pcie_irq_mask(struct irq_data *d)
@@ -680,30 +652,25 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *node = dev->of_node;
- struct irq_chip *msi_irq_chip;
- struct msi_controller *msi;
+ struct irq_chip *bottom_ic, *msi_ic;
+ struct msi_domain_info *msi_di;
phys_addr_t msi_msg_phys;
- int ret;
- msi_irq_chip = &pcie->msi_irq_chip;
+ mutex_init(&pcie->msi_used_lock);
- msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
- dev_name(dev));
- if (!msi_irq_chip->name)
- return -ENOMEM;
+ bottom_ic = &pcie->msi_bottom_irq_chip;
- msi_irq_chip->irq_enable = pci_msi_unmask_irq;
- msi_irq_chip->irq_disable = pci_msi_mask_irq;
- msi_irq_chip->irq_mask = pci_msi_mask_irq;
- msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+ bottom_ic->name = "MSI";
+ bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg;
+ bottom_ic->irq_set_affinity = advk_msi_set_affinity;
- msi = &pcie->msi;
+ msi_ic = &pcie->msi_irq_chip;
+ msi_ic->name = "advk-MSI";
- msi->setup_irq = advk_pcie_setup_msi_irq;
- msi->teardown_irq = advk_pcie_teardown_msi_irq;
- msi->of_node = node;
-
- mutex_init(&pcie->msi_used_lock);
+ msi_di = &pcie->msi_domain_info;
+ msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI;
+ msi_di->chip = msi_ic;
msi_msg_phys = virt_to_phys(&pcie->msi_msg);
@@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
advk_writel(pcie, upper_32_bits(msi_msg_phys),
PCIE_MSI_ADDR_HIGH_REG);
- pcie->msi_domain =
+ pcie->msi_inner_domain =
irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_pcie_msi_irq_ops, pcie);
- if (!pcie->msi_domain)
+ &advk_msi_domain_ops, pcie);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- ret = of_pci_msi_chip_add(msi);
- if (ret < 0) {
- irq_domain_remove(pcie->msi_domain);
- return ret;
+ pcie->msi_domain =
+ pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi_di, pcie->msi_inner_domain);
+ if (!pcie->msi_domain) {
+ irq_domain_remove(pcie->msi_inner_domain);
+ return -ENOMEM;
}
return 0;
@@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- of_pci_msi_chip_remove(&pcie->msi);
irq_domain_remove(pcie->msi_domain);
+ irq_domain_remove(pcie->msi_inner_domain);
}
static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
@@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
- struct msi_controller *msi;
- struct device_node *msi_node;
int ret, irq;
pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
@@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
- if (msi_node)
- msi = of_pci_find_msi_chip_by_node(msi_node);
- else
- msi = NULL;
-
- bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops,
- pcie, &pcie->resources, &pcie->msi);
+ bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
+ pcie, &pcie->resources);
if (!bus) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
new file mode 100644
index 000000000000..d26501c4145a
--- /dev/null
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -0,0 +1,563 @@
+/*
+ * Support for Faraday Technology FTPC100 PCI Controller
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the out-of-tree OpenWRT patch for Cortina Gemini:
+ * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com>
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ * Based on SL2312 PCI controller code
+ * Storlink (C) 2003
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+
+/*
+ * Special configuration registers directly in the first few words
+ * in I/O space.
+ */
+#define PCI_IOSIZE 0x00
+#define PCI_PROT 0x04 /* AHB protection */
+#define PCI_CTRL 0x08 /* PCI control signal */
+#define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */
+#define PCI_CONFIG 0x28 /* PCI configuration command register */
+#define PCI_DATA 0x2C
+
+#define FARADAY_PCI_PMC 0x40 /* Power management control */
+#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
+#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
+#define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */
+#define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */
+#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
+#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
+
+/* Bits 31..28 gives INTD..INTA status */
+#define PCI_CTRL2_INTSTS_SHIFT 28
+#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
+#define PCI_CTRL2_INTMASK_PARERR BIT(26)
+/* Bits 25..22 masks INTD..INTA */
+#define PCI_CTRL2_INTMASK_SHIFT 22
+#define PCI_CTRL2_INTMASK_MABRT_RX BIT(21)
+#define PCI_CTRL2_INTMASK_TABRT_RX BIT(20)
+#define PCI_CTRL2_INTMASK_TABRT_TX BIT(19)
+#define PCI_CTRL2_INTMASK_RETRY4 BIT(18)
+#define PCI_CTRL2_INTMASK_SERR_RX BIT(17)
+#define PCI_CTRL2_INTMASK_PERR_RX BIT(16)
+/* Bit 15 reserved */
+#define PCI_CTRL2_MSTPRI_REQ6 BIT(14)
+#define PCI_CTRL2_MSTPRI_REQ5 BIT(13)
+#define PCI_CTRL2_MSTPRI_REQ4 BIT(12)
+#define PCI_CTRL2_MSTPRI_REQ3 BIT(11)
+#define PCI_CTRL2_MSTPRI_REQ2 BIT(10)
+#define PCI_CTRL2_MSTPRI_REQ1 BIT(9)
+#define PCI_CTRL2_MSTPRI_REQ0 BIT(8)
+/* Bits 7..4 reserved */
+/* Bits 3..0 TRDYW */
+
+/*
+ * Memory configs:
+ * Bit 31..20 defines the PCI side memory base
+ * Bit 19..16 (4 bits) defines the size per below
+ */
+#define FARADAY_PCI_MEMBASE_MASK 0xfff00000
+#define FARADAY_PCI_MEMSIZE_1MB 0x0
+#define FARADAY_PCI_MEMSIZE_2MB 0x1
+#define FARADAY_PCI_MEMSIZE_4MB 0x2
+#define FARADAY_PCI_MEMSIZE_8MB 0x3
+#define FARADAY_PCI_MEMSIZE_16MB 0x4
+#define FARADAY_PCI_MEMSIZE_32MB 0x5
+#define FARADAY_PCI_MEMSIZE_64MB 0x6
+#define FARADAY_PCI_MEMSIZE_128MB 0x7
+#define FARADAY_PCI_MEMSIZE_256MB 0x8
+#define FARADAY_PCI_MEMSIZE_512MB 0x9
+#define FARADAY_PCI_MEMSIZE_1GB 0xa
+#define FARADAY_PCI_MEMSIZE_2GB 0xb
+#define FARADAY_PCI_MEMSIZE_SHIFT 16
+
+/*
+ * The DMA base is set to 0x0 for all memory segments, it reflects the
+ * fact that the memory of the host system starts at 0x0.
+ */
+#define FARADAY_PCI_DMA_MEM1_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM2_BASE 0x00000000
+#define FARADAY_PCI_DMA_MEM3_BASE 0x00000000
+
+/* Defines for PCI configuration command register */
+#define PCI_CONF_ENABLE BIT(31)
+#define PCI_CONF_WHERE(r) ((r) & 0xFC)
+#define PCI_CONF_BUS(b) (((b) & 0xFF) << 16)
+#define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11)
+#define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8)
+
+/**
+ * struct faraday_pci_variant - encodes IP block differences
+ * @cascaded_irq: this host has cascaded IRQs from an interrupt controller
+ * embedded in the host bridge.
+ */
+struct faraday_pci_variant {
+ bool cascaded_irq;
+};
+
+struct faraday_pci {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irqdomain;
+ struct pci_bus *bus;
+};
+
+static int faraday_res_to_memcfg(resource_size_t mem_base,
+ resource_size_t mem_size, u32 *val)
+{
+ u32 outval;
+
+ switch (mem_size) {
+ case SZ_1M:
+ outval = FARADAY_PCI_MEMSIZE_1MB;
+ break;
+ case SZ_2M:
+ outval = FARADAY_PCI_MEMSIZE_2MB;
+ break;
+ case SZ_4M:
+ outval = FARADAY_PCI_MEMSIZE_4MB;
+ break;
+ case SZ_8M:
+ outval = FARADAY_PCI_MEMSIZE_8MB;
+ break;
+ case SZ_16M:
+ outval = FARADAY_PCI_MEMSIZE_16MB;
+ break;
+ case SZ_32M:
+ outval = FARADAY_PCI_MEMSIZE_32MB;
+ break;
+ case SZ_64M:
+ outval = FARADAY_PCI_MEMSIZE_64MB;
+ break;
+ case SZ_128M:
+ outval = FARADAY_PCI_MEMSIZE_128MB;
+ break;
+ case SZ_256M:
+ outval = FARADAY_PCI_MEMSIZE_256MB;
+ break;
+ case SZ_512M:
+ outval = FARADAY_PCI_MEMSIZE_512MB;
+ break;
+ case SZ_1G:
+ outval = FARADAY_PCI_MEMSIZE_1GB;
+ break;
+ case SZ_2G:
+ outval = FARADAY_PCI_MEMSIZE_2GB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ outval <<= FARADAY_PCI_MEMSIZE_SHIFT;
+
+ /* This is probably not good */
+ if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK))
+ pr_warn("truncated PCI memory base\n");
+ /* Translate to bridge side address space */
+ outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK);
+ pr_debug("Translated pci base @%pap, size %pap to config %08x\n",
+ &mem_base, &mem_size, outval);
+
+ *val = outval;
+ return 0;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
+ writel(PCI_CONF_BUS(bus->number) |
+ PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+ PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ PCI_CONF_WHERE(config) |
+ PCI_CONF_ENABLE,
+ p->base + PCI_CONFIG);
+
+ *value = readl(p->base + PCI_DATA);
+
+ if (size == 1)
+ *value = (*value >> (8 * (config & 3))) & 0xFF;
+ else if (size == 2)
+ *value = (*value >> (8 * (config & 3))) & 0xFFFF;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct faraday_pci *p = bus->sysdata;
+ int ret = PCIBIOS_SUCCESSFUL;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+ writel(PCI_CONF_BUS(bus->number) |
+ PCI_CONF_DEVICE(PCI_SLOT(fn)) |
+ PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
+ PCI_CONF_WHERE(config) |
+ PCI_CONF_ENABLE,
+ p->base + PCI_CONFIG);
+
+ switch (size) {
+ case 4:
+ writel(value, p->base + PCI_DATA);
+ break;
+ case 2:
+ writew(value, p->base + PCI_DATA + (config & 3));
+ break;
+ case 1:
+ writeb(value, p->base + PCI_DATA + (config & 3));
+ break;
+ default:
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ return ret;
+}
+
+static struct pci_ops faraday_pci_ops = {
+ .read = faraday_pci_read_config,
+ .write = faraday_pci_write_config,
+};
+
+static void faraday_pci_ack_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_mask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
+ | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_unmask_irq(struct irq_data *d)
+{
+ struct faraday_pci *p = irq_data_get_irq_chip_data(d);
+ unsigned int reg;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
+ reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+}
+
+static void faraday_pci_irq_handler(struct irq_desc *desc)
+{
+ struct faraday_pci *p = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned int irq_stat, reg, i;
+
+ faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
+
+ chained_irq_enter(irqchip, desc);
+
+ for (i = 0; i < 4; i++) {
+ if ((irq_stat & BIT(i)) == 0)
+ continue;
+ generic_handle_irq(irq_find_mapping(p->irqdomain, i));
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static struct irq_chip faraday_pci_irq_chip = {
+ .name = "PCI",
+ .irq_ack = faraday_pci_ack_irq,
+ .irq_mask = faraday_pci_mask_irq,
+ .irq_unmask = faraday_pci_unmask_irq,
+};
+
+static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops faraday_pci_irqdomain_ops = {
+ .map = faraday_pci_irq_map,
+};
+
+static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+{
+ struct device_node *intc = of_get_next_child(p->dev->of_node, NULL);
+ int irq;
+ int i;
+
+ if (!intc) {
+ dev_err(p->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* All PCI IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (!irq) {
+ dev_err(p->dev, "failed to get parent IRQ\n");
+ return -EINVAL;
+ }
+
+ p->irqdomain = irq_domain_add_linear(intc, 4,
+ &faraday_pci_irqdomain_ops, p);
+ if (!p->irqdomain) {
+ dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p);
+
+ for (i = 0; i < 4; i++)
+ irq_create_mapping(p->irqdomain, i);
+
+ return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = p->dev;
+ u32 confreg[3] = {
+ FARADAY_PCI_MEM1_BASE_SIZE,
+ FARADAY_PCI_MEM2_BASE_SIZE,
+ FARADAY_PCI_MEM3_BASE_SIZE,
+ };
+ int i = 0;
+ u32 val;
+
+ if (pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.pci_addr + range.size - 1;
+ int ret;
+
+ ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+ if (ret) {
+ dev_err(dev,
+ "DMA range %d: illegal MEM resource size\n", i);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
+ i + 1, range.pci_addr, end, val);
+ if (i <= 2) {
+ faraday_pci_write_config(p->bus, 0, confreg[i],
+ 4, val);
+ } else {
+ dev_err(dev, "ignore extraneous dma-range %d\n", i);
+ break;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int faraday_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct faraday_pci_variant *variant =
+ of_device_get_match_data(dev);
+ struct resource *regs;
+ resource_size_t io_base;
+ struct resource_entry *win;
+ struct faraday_pci *p;
+ struct resource *mem;
+ struct resource *io;
+ struct pci_host_bridge *host;
+ int ret;
+ u32 val;
+ LIST_HEAD(res);
+
+ host = pci_alloc_host_bridge(sizeof(*p));
+ if (!host)
+ return -ENOMEM;
+
+ host->dev.parent = dev;
+ host->ops = &faraday_pci_ops;
+ host->busnr = 0;
+ host->msi = NULL;
+ p = pci_host_bridge_priv(host);
+ host->sysdata = p;
+ p->dev = dev;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(p->base))
+ return PTR_ERR(p->base);
+
+ ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff,
+ &res, &io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(dev, &res);
+ if (ret)
+ return ret;
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "Gemini PCI I/O";
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + PCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
+ }
+ ret = pci_remap_iospace(io, io_base);
+ if (ret) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ ret, io);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ mem->name = "Gemini PCI MEM";
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Setup hostbridge */
+ val = readl(p->base + PCI_CTRL);
+ val |= PCI_COMMAND_IO;
+ val |= PCI_COMMAND_MEMORY;
+ val |= PCI_COMMAND_MASTER;
+ writel(val, p->base + PCI_CTRL);
+
+ list_splice_init(&res, &host->windows);
+ ret = pci_register_host_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to register host: %d\n", ret);
+ return ret;
+ }
+ p->bus = host->bus;
+
+ /* Mask and clear all interrupts */
+ faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+ if (variant->cascaded_irq) {
+ ret = faraday_pci_setup_cascaded_irq(p);
+ if (ret) {
+ dev_err(dev, "failed to setup cascaded IRQ\n");
+ return ret;
+ }
+ }
+
+ ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+ if (ret)
+ return ret;
+
+ pci_scan_child_bus(p->bus);
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_bus_assign_resources(p->bus);
+ pci_bus_add_devices(p->bus);
+ pci_free_resource_list(&res);
+
+ return 0;
+}
+
+/*
+ * We encode bridge variants here, we have at least two so it doesn't
+ * hurt to have infrastructure to encompass future variants as well.
+ */
+const struct faraday_pci_variant faraday_regular = {
+ .cascaded_irq = true,
+};
+
+const struct faraday_pci_variant faraday_dual = {
+ .cascaded_irq = false,
+};
+
+static const struct of_device_id faraday_pci_of_match[] = {
+ {
+ .compatible = "faraday,ftpci100",
+ .data = &faraday_regular,
+ },
+ {
+ .compatible = "faraday,ftpci100-dual",
+ .data = &faraday_dual,
+ },
+ {},
+};
+
+static struct platform_driver faraday_pci_driver = {
+ .driver = {
+ .name = "ftpci100",
+ .of_match_table = of_match_ptr(faraday_pci_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = faraday_pci_probe,
+};
+builtin_platform_driver(faraday_pci_driver);
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index c05ea9d72f69..7d709a7e0aa8 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -60,6 +60,7 @@ static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
.of_match_table = gen_pci_of_match,
+ .suppress_bind_attrs = true,
},
.probe = gen_pci_probe,
};
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index ada98569b78e..84936383e269 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -56,6 +56,7 @@
#include <asm/apic.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
+#include <linux/refcount.h>
#include <asm/mshyperv.h>
/*
@@ -72,6 +73,7 @@ enum {
PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
};
+#define CPU_AFFINITY_ALL -1ULL
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -350,6 +352,7 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removed,
hv_pcibus_maximum
};
@@ -421,7 +424,7 @@ enum hv_pcidev_ref_reason {
struct hv_pci_dev {
/* List protected by pci_rescan_remove_lock */
struct list_head list_entry;
- atomic_t refs;
+ refcount_t refs;
enum hv_pcichild_state state;
struct pci_function_description desc;
bool reported_missing;
@@ -876,7 +879,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
hv_int_desc_free(hpdev, int_desc);
}
- int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+ int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
if (!int_desc)
goto drop_reference;
@@ -897,9 +900,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* processors because Hyper-V only supports 64 in a guest.
*/
affinity = irq_data_get_affinity_mask(data);
- for_each_cpu_and(cpu, affinity, cpu_online_mask) {
- int_pkt->int_desc.cpu_mask |=
- (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ if (cpumask_weight(affinity) >= 32) {
+ int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+ } else {
+ for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+ int_pkt->int_desc.cpu_mask |=
+ (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ }
}
ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
@@ -1208,9 +1215,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
hbus->pci_bus->msi = &hbus->msi_chip;
hbus->pci_bus->msi->dev = &hbus->hdev->device;
+ pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
pci_bus_add_devices(hbus->pci_bus);
+ pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
return 0;
}
@@ -1254,13 +1263,13 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
static void get_pcichild(struct hv_pci_dev *hpdev,
enum hv_pcidev_ref_reason reason)
{
- atomic_inc(&hpdev->refs);
+ refcount_inc(&hpdev->refs);
}
static void put_pcichild(struct hv_pci_dev *hpdev,
enum hv_pcidev_ref_reason reason)
{
- if (atomic_dec_and_test(&hpdev->refs))
+ if (refcount_dec_and_test(&hpdev->refs))
kfree(hpdev);
}
@@ -1314,7 +1323,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
wait_for_completion(&comp_pkt.host_event);
hpdev->desc = *desc;
- get_pcichild(hpdev, hv_pcidev_ref_initial);
+ refcount_set(&hpdev->refs, 1);
get_pcichild(hpdev, hv_pcidev_ref_childlist);
spin_lock_irqsave(&hbus->device_list_lock, flags);
@@ -1504,13 +1513,24 @@ static void pci_devices_present_work(struct work_struct *work)
put_pcichild(hpdev, hv_pcidev_ref_initial);
}
- /* Tell the core to rescan bus because there may have been changes. */
- if (hbus->state == hv_pcibus_installed) {
+ switch(hbus->state) {
+ case hv_pcibus_installed:
+ /*
+ * Tell the core to rescan bus
+ * because there may have been changes.
+ */
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
- } else {
+ break;
+
+ case hv_pcibus_init:
+ case hv_pcibus_probed:
survey_child_resources(hbus);
+ break;
+
+ default:
+ break;
}
up(&hbus->enum_sem);
@@ -1600,8 +1620,10 @@ static void hv_eject_device_work(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
wslot);
if (pdev) {
+ pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
pci_dev_put(pdev);
+ pci_unlock_rescan_remove();
}
spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
@@ -2185,6 +2207,7 @@ static int hv_pci_probe(struct hv_device *hdev,
hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
if (!hbus)
return -ENOMEM;
+ hbus->state = hv_pcibus_init;
/*
* The PCI bus "domain" is what is called "segment" in ACPI and
@@ -2348,6 +2371,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_stop_root_bus(hbus->pci_bus);
pci_remove_root_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
+ hbus->state = hv_pcibus_removed;
}
hv_pci_bus_exit(hdev);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index cd7d51988738..f353a6eb2f01 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -752,10 +752,11 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
* If the mask is 0xffff0000, then we only want to write
* the link control register, rather than clearing the
* RW1C bits in the link status register. Mask out the
- * status register bits.
+ * RW1C status register bits.
*/
if (mask == 0xffff0000)
- value &= 0xffff;
+ value &= ~((PCI_EXP_LNKSTA_LABS |
+ PCI_EXP_LNKSTA_LBMS) << 16);
mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
@@ -1005,22 +1006,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -ENOENT;
}
-static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
-{
- struct device_node *msi_node;
-
- msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
- "msi-parent", 0);
- if (!msi_node)
- return;
-
- pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
- of_node_put(msi_node);
-
- if (pcie->msi)
- pcie->msi->dev = &pcie->pdev->dev;
-}
-
#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
@@ -1298,7 +1283,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
- mvebu_pcie_msi_enable(pcie);
mvebu_pcie_enable(pcie);
platform_set_drvdata(pdev, pcie);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index ed8a93f2bfb5..2618f875a600 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -380,7 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
unsigned int busnr)
{
struct device *dev = pcie->dev;
- pgprot_t prot = pgprot_device(PAGE_KERNEL);
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
phys_addr_t cs = pcie->cs->start;
struct tegra_pcie_bus *bus;
unsigned int i;
@@ -1962,7 +1962,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->pcie = pcie;
rp->np = port;
- rp->base = devm_ioremap_resource(dev, &rp->regs);
+ rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
if (IS_ERR(rp->base))
return PTR_ERR(rp->base);
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index 3f54a43bbbea..fc0ca03f280e 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -373,6 +373,7 @@ static struct platform_driver thunder_ecam_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_ecam_of_match,
+ .suppress_bind_attrs = true,
},
.probe = thunder_ecam_probe,
};
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6e031b522529..6e066f8b74df 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -474,6 +474,7 @@ static struct platform_driver thunder_pem_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_pem_of_match,
+ .suppress_bind_attrs = true,
},
.probe = thunder_pem_probe,
};
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 5ebee7d37ff5..9281eee2d000 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -138,7 +138,8 @@ static int versatile_pci_probe(struct platform_device *pdev)
return PTR_ERR(versatile_cfg_base[0]);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
+ versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev,
+ res);
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
@@ -221,6 +222,7 @@ static struct platform_driver versatile_pci_driver = {
.driver = {
.name = "versatile-pci",
.of_match_table = versatile_pci_of_match,
+ .suppress_bind_attrs = true,
},
.probe = versatile_pci_probe,
};
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1a6108788f6f..8cae013e7188 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -248,7 +248,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
dev_err(dev, "can't get CSR resource\n");
return ret;
}
- port->csr_base = devm_ioremap_resource(dev, &csr);
+ port->csr_base = devm_pci_remap_cfg_resource(dev, &csr);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
@@ -359,7 +359,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
- port->csr_base = devm_ioremap_resource(dev, res);
+ port->csr_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
@@ -697,6 +697,7 @@ static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
.of_match_table = of_match_ptr(xgene_pcie_match_table),
+ .suppress_bind_attrs = true,
},
.probe = xgene_pcie_probe_bridge,
};
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 8c6a327ca6cd..90d2bdd94e41 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -67,7 +67,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return ret;
}
- pcie->base = devm_ioremap(dev, reg.start, resource_size(&reg));
+ pcie->base = devm_pci_remap_cfgspace(dev, reg.start,
+ resource_size(&reg));
if (!pcie->base) {
dev_err(dev, "unable to map controller registers\n");
return -ENOMEM;
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 26ddd3535272..0e020b6e0943 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -26,6 +26,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
@@ -223,9 +224,11 @@ struct rockchip_pcie {
int link_gen;
struct device *dev;
struct irq_domain *irq_domain;
- u32 io_size;
int offset;
+ struct pci_bus *root_bus;
+ struct resource *io;
phys_addr_t io_bus_addr;
+ u32 io_size;
void __iomem *msg_region;
u32 mem_size;
phys_addr_t msg_bus_addr;
@@ -425,7 +428,8 @@ static struct pci_ops rockchip_pcie_ops = {
static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
{
- u32 status, curr, scale, power;
+ int curr;
+ u32 status, scale, power;
if (IS_ERR(rockchip->vpcie3v3))
return;
@@ -437,24 +441,25 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
* to the actual power supply.
*/
curr = regulator_get_current_limit(rockchip->vpcie3v3);
- if (curr > 0) {
- scale = 3; /* 0.001x */
- curr = curr / 1000; /* convert to mA */
- power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
- if (!scale) {
- dev_warn(rockchip->dev, "invalid power supply\n");
- return;
- }
- scale--;
- power = power / 10;
- }
+ if (curr <= 0)
+ return;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ scale = 3; /* 0.001x */
+ curr = curr / 1000; /* convert to mA */
+ power = (curr * 3300) / 1000; /* milliwatt */
+ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ if (!scale) {
+ dev_warn(rockchip->dev, "invalid power supply\n");
+ return;
+ }
+ scale--;
+ power = power / 10;
}
+
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+ status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+ (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
}
/**
@@ -596,7 +601,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
/* Set RC's clock architecture as common clock */
status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
- status |= PCI_EXP_LNKCTL_CCC;
+ status |= PCI_EXP_LNKSTA_SLC << 16;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
+ /* Set RC's RCB to 128 */
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RCB;
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
/* Enable Gen1 training */
@@ -822,7 +832,7 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
regs = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
"axi-base");
- rockchip->reg_base = devm_ioremap_resource(dev, regs);
+ rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs);
if (IS_ERR(rockchip->reg_base))
return PTR_ERR(rockchip->reg_base);
@@ -1359,6 +1369,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err, io);
continue;
}
+ rockchip->io = io;
break;
case IORESOURCE_MEM:
mem = win->res;
@@ -1390,6 +1401,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_free_res;
}
+ rockchip->root_bus = bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1397,7 +1409,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
- return err;
+ return 0;
err_free_res:
pci_free_resource_list(&res);
@@ -1420,6 +1432,34 @@ err_aclk_pcie:
return err;
}
+static int rockchip_pcie_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
+
+ pci_stop_root_bus(rockchip->root_bus);
+ pci_remove_root_bus(rockchip->root_bus);
+ pci_unmap_iospace(rockchip->io);
+ irq_domain_remove(rockchip->irq_domain);
+
+ phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
+
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+ clk_disable_unprepare(rockchip->hclk_pcie);
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+ clk_disable_unprepare(rockchip->aclk_pcie);
+
+ if (!IS_ERR(rockchip->vpcie3v3))
+ regulator_disable(rockchip->vpcie3v3);
+ if (!IS_ERR(rockchip->vpcie1v8))
+ regulator_disable(rockchip->vpcie1v8);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
+ return 0;
+}
+
static const struct dev_pm_ops rockchip_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
rockchip_pcie_resume_noirq)
@@ -1429,6 +1469,7 @@ static const struct of_device_id rockchip_pcie_of_match[] = {
{ .compatible = "rockchip,rk3399-pcie", },
{}
};
+MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
static struct platform_driver rockchip_pcie_driver = {
.driver = {
@@ -1437,6 +1478,10 @@ static struct platform_driver rockchip_pcie_driver = {
.pm = &rockchip_pcie_pm_ops,
},
.probe = rockchip_pcie_probe,
-
+ .remove = rockchip_pcie_remove,
};
-builtin_platform_driver(rockchip_pcie_driver);
+module_platform_driver(rockchip_pcie_driver);
+
+MODULE_AUTHOR("Rockchip Inc");
+MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4c3e0ab35496..4b16b26ae909 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -761,7 +761,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
pcie->phys_pcie_reg_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
- pcie->ecam_base = devm_ioremap_resource(dev, res);
+ pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pcie->ecam_base))
return PTR_ERR(pcie->ecam_base);
pcie->phys_ecam_base = res->start;
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 7f030f5d750b..2fe2df51f9f8 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -606,7 +606,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
return err;
}
- port->reg_base = devm_ioremap_resource(dev, &regs);
+ port->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
if (IS_ERR(port->reg_base))
return PTR_ERR(port->reg_base);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 9e69403be632..19f30a9f461d 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -109,6 +109,12 @@ int pciehp_unconfigure_device(struct slot *p_slot)
break;
}
}
+ if (!presence) {
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate,
+ pci_dev_set_disconnected, NULL);
+ }
pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 2479ae876482..d9dc7363ac77 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -450,6 +450,7 @@ found:
iov->total_VFs = total;
iov->pgsz = pgsz;
iov->self = dev;
+ iov->drivers_autoprobe = true;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index f9f2a0324ecc..83d30953ce19 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -1,7 +1,8 @@
/*
- * PCI IRQ failure handing code
+ * PCI IRQ handling code
*
* Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ * Copyright (C) 2017 Christoph Hellwig.
*/
#include <linux/acpi.h>
@@ -59,3 +60,61 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
return PCI_LOST_IRQ_NO_INFORMATION;
}
EXPORT_SYMBOL(pci_lost_interrupt);
+
+/**
+ * pci_request_irq - allocate an interrupt line for a PCI device
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts.
+ * If NULL and thread_fn != NULL the default primary handler is
+ * installed.
+ * @thread_fn: Function called from the IRQ handler thread
+ * If NULL, no IRQ thread is created
+ * @dev_id: Cookie passed back to the handler function
+ * @fmt: Printf-like format string naming the handler
+ *
+ * This call allocates interrupt resources and enables the interrupt line and
+ * IRQ handling. From the point this call is made @handler and @thread_fn may
+ * be invoked. All interrupts requested using this function might be shared.
+ *
+ * @dev_id must not be NULL and must be globally unique.
+ */
+int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
+ irq_handler_t thread_fn, void *dev_id, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *devname;
+
+ va_start(ap, fmt);
+ devname = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
+ IRQF_SHARED, devname, dev_id);
+ if (ret)
+ kfree(devname);
+ return ret;
+}
+EXPORT_SYMBOL(pci_request_irq);
+
+/**
+ * pci_free_irq - free an interrupt allocated with pci_request_irq
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ * @dev_id: Device identity to free
+ *
+ * Remove an interrupt handler. The handler is removed and if the interrupt
+ * line is no longer in use by any driver it is disabled. The caller must
+ * ensure the interrupt is disabled on the device before calling this function.
+ * The function does not return until any executing interrupts for this IRQ
+ * have completed.
+ *
+ * This function must not be called from interrupt context.
+ */
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
+{
+ kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
+}
+EXPORT_SYMBOL(pci_free_irq);
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
new file mode 100644
index 000000000000..9a5e5a9055eb
--- /dev/null
+++ b/drivers/pci/mmap.c
@@ -0,0 +1,99 @@
+/*
+ * mmap.c — generic PCI resource mmap helper
+ *
+ * Copyright © 2017 Amazon.com, Inc. or its affiliates.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+/*
+ * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
+ * pci_mmap_page_range() (if needed) as a wrapper round it.
+ */
+
+#ifdef HAVE_PCI_MMAP
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ resource_size_t start, end;
+
+ pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
+ write_combine);
+}
+#endif
+
+static const struct vm_operations_struct pci_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long size;
+ int ret;
+
+ size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > size)
+ return -EINVAL;
+
+ if (write_combine)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+
+ if (mmap_state == pci_mmap_io) {
+ ret = pci_iobar_pfn(pdev, bar, vma);
+ if (ret)
+ return ret;
+ } else
+ vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
+
+ vma->vm_ops = &pci_phys_vm_ops;
+
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
+
+/*
+ * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around
+ * the architecture's pci_mmap_page_range(), converting to "user visible"
+ * addresses as necessary.
+ */
+
+int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ resource_size_t start, end;
+
+ /*
+ * pci_mmap_page_range() expects the same kind of entry as coming
+ * from /proc/bus/pci/ which is a "user visible" value. If this is
+ * different from the resource itself, arch will do necessary fixup.
+ */
+ pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
+ vma->vm_pgoff += start >> PAGE_SHIFT;
+ return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
+}
+#endif
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0042c365b29b..ba44fdfda66b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -298,7 +298,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
- if (dev->current_state != PCI_D0) {
+ if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
@@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
- pr_err("Unable to allocate affinity masks, ignoring\n");
+ dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
+ nvec);
}
/* MSI Entry Initialization */
@@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
if (affd) {
masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
- pr_err("Unable to allocate affinity masks, ignoring\n");
+ dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
+ nvec);
}
for (i = 0, curmsk = masks; i < nvec; i++) {
@@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_msi_vec_count);
-void pci_msi_shutdown(struct pci_dev *dev)
+static void pci_msi_shutdown(struct pci_dev *dev)
{
struct msi_desc *desc;
u32 mask;
@@ -973,13 +975,18 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
return msix_capability_init(dev, entries, nvec, affd);
}
-void pci_msix_shutdown(struct pci_dev *dev)
+static void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ if (pci_dev_is_disconnected(dev)) {
+ dev->msix_enabled = 0;
+ return;
+ }
+
/* Return the device with MSI-X masked as initial states */
for_each_pci_msi_entry(entry, dev) {
/* Keep cached states to be restored */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index afa72717a979..192e7b681b96 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -394,6 +394,18 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
{
}
+#ifdef CONFIG_PCI_IOV
+static inline bool pci_device_can_probe(struct pci_dev *pdev)
+{
+ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
+}
+#else
+static inline bool pci_device_can_probe(struct pci_dev *pdev)
+{
+ return true;
+}
+#endif
+
static int pci_device_probe(struct device *dev)
{
int error;
@@ -405,10 +417,12 @@ static int pci_device_probe(struct device *dev)
return error;
pci_dev_get(pci_dev);
- error = __pci_device_probe(drv, pci_dev);
- if (error) {
- pcibios_free_irq(pci_dev);
- pci_dev_put(pci_dev);
+ if (pci_device_can_probe(pci_dev)) {
+ error = __pci_device_probe(drv, pci_dev);
+ if (error) {
+ pcibios_free_irq(pci_dev);
+ pci_dev_put(pci_dev);
+ }
}
return error;
@@ -461,8 +475,6 @@ static void pci_device_shutdown(struct device *dev)
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
- pci_msi_shutdown(pci_dev);
- pci_msix_shutdown(pci_dev);
/*
* If this is a kexec reboot, turn off Bus Master bit on the
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 25d010d449a3..31e99613a12e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -526,10 +526,37 @@ exit:
return count;
}
+static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe);
+}
+
+static ssize_t sriov_drivers_autoprobe_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool drivers_autoprobe;
+
+ if (kstrtobool(buf, &drivers_autoprobe) < 0)
+ return -EINVAL;
+
+ pdev->sriov->drivers_autoprobe = drivers_autoprobe;
+
+ return count;
+}
+
static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
static struct device_attribute sriov_numvfs_attr =
__ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_numvfs_show, sriov_numvfs_store);
+static struct device_attribute sriov_drivers_autoprobe_attr =
+ __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
+ sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
#endif /* CONFIG_PCI_IOV */
static ssize_t driver_override_store(struct device *dev,
@@ -980,20 +1007,24 @@ void pci_remove_legacy_files(struct pci_bus *b)
}
#endif /* HAVE_PCI_LEGACY */
-#ifdef HAVE_PCI_MMAP
+#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size, pci_start;
+ unsigned long nr, start, size;
+ resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0)
return 0;
nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
- pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;
@@ -1013,37 +1044,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- struct resource *res = attr->private;
+ int bar = (unsigned long)attr->private;
enum pci_mmap_state mmap_type;
- resource_size_t start, end;
- int i;
-
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
- if (res == &pdev->resource[i])
- break;
- if (i >= PCI_ROM_RESOURCE)
- return -ENODEV;
+ struct resource *res = &pdev->resource[bar];
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
return -EINVAL;
- if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
- pci_name(pdev), i,
- (u64)pci_resource_start(pdev, i),
- (u64)pci_resource_len(pdev, i));
+ pci_name(pdev), bar,
+ (u64)pci_resource_start(pdev, bar),
+ (u64)pci_resource_len(pdev, bar));
return -EINVAL;
}
-
- /* pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, i, res, &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
- return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
+
+ return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
@@ -1065,22 +1083,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
loff_t off, size_t count, bool write)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- struct resource *res = attr->private;
+ int bar = (unsigned long)attr->private;
+ struct resource *res;
unsigned long port = off;
- int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
- if (res == &pdev->resource[i])
- break;
- if (i >= PCI_ROM_RESOURCE)
- return -ENODEV;
+ res = &pdev->resource[bar];
- port += pci_resource_start(pdev, i);
+ port += pci_resource_start(pdev, bar);
- if (port > pci_resource_end(pdev, i))
+ if (port > pci_resource_end(pdev, bar))
return 0;
- if (port + count - 1 > pci_resource_end(pdev, i))
+ if (port + count - 1 > pci_resource_end(pdev, bar))
return -EINVAL;
switch (count) {
@@ -1170,16 +1184,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num);
- res_attr->mmap = pci_mmap_resource_uc;
- }
- if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+ res_attr->write = pci_write_resource_io;
+ if (arch_can_pci_mmap_io())
+ res_attr->mmap = pci_mmap_resource_uc;
+ } else {
+ res_attr->mmap = pci_mmap_resource_uc;
+ }
}
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num);
- res_attr->private = &pdev->resource[num];
+ res_attr->private = (void *)(unsigned long)num;
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
if (retval)
kfree(res_attr);
@@ -1207,9 +1224,9 @@ static int pci_create_resource_files(struct pci_dev *pdev)
retval = pci_create_attr(pdev, i, 0);
/* for prefetchable resources, create a WC mappable file */
- if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
+ if (!retval && arch_can_pci_mmap_wc() &&
+ pdev->resource[i].flags & IORESOURCE_PREFETCH)
retval = pci_create_attr(pdev, i, 1);
-
if (retval) {
pci_remove_resource_files(pdev);
return retval;
@@ -1549,6 +1566,7 @@ static struct attribute_group pci_dev_hp_attr_group = {
static struct attribute *sriov_dev_attrs[] = {
&sriov_totalvfs_attr.attr,
&sriov_numvfs_attr.attr,
+ &sriov_drivers_autoprobe_attr.attr,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7904d02ffdb9..b01bd5bba8e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -66,7 +66,8 @@ static void pci_dev_d3_sleep(struct pci_dev *dev)
if (delay < pci_pm_d3_delay)
delay = pci_pm_d3_delay;
- msleep(delay);
+ if (delay)
+ msleep(delay);
}
#ifdef CONFIG_PCI_DOMAINS
@@ -827,7 +828,8 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- msleep(dev->d3cold_delay);
+ if (dev->d3cold_delay)
+ msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@@ -1782,8 +1784,8 @@ static void pci_pme_list_scan(struct work_struct *work)
}
}
if (!list_empty(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq, &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@@ -1848,8 +1850,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
if (list_is_singular(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq,
+ &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
} else {
mutex_lock(&pci_pme_list_mutex);
@@ -3363,7 +3366,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
* Only architectures that have memory mapped IO functions defined
* (and the PCI_IOBASE value defined) should call this function.
*/
-int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
@@ -3383,6 +3386,7 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
return -ENODEV;
#endif
}
+EXPORT_SYMBOL(pci_remap_iospace);
/**
* pci_unmap_iospace - Unmap the memory mapped I/O space
@@ -3400,6 +3404,89 @@ void pci_unmap_iospace(struct resource *res)
unmap_kernel_range(vaddr, resource_size(res));
#endif
}
+EXPORT_SYMBOL(pci_unmap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+ name = res->name ?: dev_name(dev);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
@@ -3773,24 +3860,41 @@ static void pci_flr_wait(struct pci_dev *dev)
(i - 1) * 100);
}
-static int pcie_flr(struct pci_dev *dev, int probe)
+/**
+ * pcie_has_flr - check if a device supports function level resets
+ * @dev: device to check
+ *
+ * Returns true if the device advertises support for PCIe function level
+ * resets.
+ */
+static bool pcie_has_flr(struct pci_dev *dev)
{
u32 cap;
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return -ENOTTY;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return false;
- if (probe)
- return 0;
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+ return cap & PCI_EXP_DEVCAP_FLR;
+}
+/**
+ * pcie_flr - initiate a PCIe function level reset
+ * @dev: device to reset
+ *
+ * Initiate a function level reset on @dev. The caller should ensure the
+ * device supports FLR before calling this function, e.g. by using the
+ * pcie_has_flr() helper.
+ */
+void pcie_flr(struct pci_dev *dev)
+{
if (!pci_wait_for_pending_transaction(dev))
dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
pci_flr_wait(dev);
- return 0;
}
+EXPORT_SYMBOL_GPL(pcie_flr);
static int pci_af_flr(struct pci_dev *dev, int probe)
{
@@ -3801,6 +3905,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
if (!pos)
return -ENOTTY;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
+ return -ENOTTY;
+
pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
@@ -3971,9 +4078,12 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
if (rc != -ENOTTY)
goto done;
- rc = pcie_flr(dev, probe);
- if (rc != -ENOTTY)
+ if (pcie_has_flr(dev)) {
+ if (!probe)
+ pcie_flr(dev);
+ rc = 0;
goto done;
+ }
rc = pci_af_flr(dev, probe);
if (rc != -ENOTTY)
@@ -4932,6 +5042,8 @@ bool pci_device_is_present(struct pci_dev *pdev)
{
u32 v;
+ if (pci_dev_is_disconnected(pdev))
+ return false;
return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
}
EXPORT_SYMBOL_GPL(pci_device_is_present);
@@ -4947,6 +5059,11 @@ void pci_ignore_hotplug(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
+resource_size_t __weak pcibios_default_alignment(void)
+{
+ return 0;
+}
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
@@ -4954,22 +5071,25 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
* @dev: the PCI device to get
+ * @resize: whether or not to change resources' size when reassigning alignment
*
* RETURNS: Resource alignment if it is specified.
* Zero if it is not specified.
*/
-static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
+ bool *resize)
{
int seg, bus, slot, func, align_order, count;
unsigned short vendor, device, subsystem_vendor, subsystem_device;
- resource_size_t align = 0;
+ resource_size_t align = pcibios_default_alignment();
char *p;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
- if (!*p)
+ if (!*p && !align)
goto out;
if (pci_has_flag(PCI_PROBE_ONLY)) {
+ align = 0;
pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
goto out;
}
@@ -4999,6 +5119,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
(!device || (device == dev->device)) &&
(!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
(!subsystem_device || (subsystem_device == dev->subsystem_device))) {
+ *resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
@@ -5024,6 +5145,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
+ *resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
@@ -5043,6 +5165,68 @@ out:
return align;
}
+static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
+ resource_size_t align, bool resize)
+{
+ struct resource *r = &dev->resource[bar];
+ resource_size_t size;
+
+ if (!(r->flags & IORESOURCE_MEM))
+ return;
+
+ if (r->flags & IORESOURCE_PCI_FIXED) {
+ dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
+ bar, r, (unsigned long long)align);
+ return;
+ }
+
+ size = resource_size(r);
+ if (size >= align)
+ return;
+
+ /*
+ * Increase the alignment of the resource. There are two ways we
+ * can do this:
+ *
+ * 1) Increase the size of the resource. BARs are aligned on their
+ * size, so when we reallocate space for this resource, we'll
+ * allocate it with the larger alignment. This also prevents
+ * assignment of any other BARs inside the alignment region, so
+ * if we're requesting page alignment, this means no other BARs
+ * will share the page.
+ *
+ * The disadvantage is that this makes the resource larger than
+ * the hardware BAR, which may break drivers that compute things
+ * based on the resource size, e.g., to find registers at a
+ * fixed offset before the end of the BAR.
+ *
+ * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
+ * set r->start to the desired alignment. By itself this
+ * doesn't prevent other BARs being put inside the alignment
+ * region, but if we realign *every* resource of every device in
+ * the system, none of them will share an alignment region.
+ *
+ * When the user has requested alignment for only some devices via
+ * the "pci=resource_alignment" argument, "resize" is true and we
+ * use the first method. Otherwise we assume we're aligning all
+ * devices and we use the second.
+ */
+
+ dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n",
+ bar, r, (unsigned long long)align);
+
+ if (resize) {
+ r->start = 0;
+ r->end = align - 1;
+ } else {
+ r->flags &= ~IORESOURCE_SIZEALIGN;
+ r->flags |= IORESOURCE_STARTALIGN;
+ r->start = align;
+ r->end = r->start + size - 1;
+ }
+ r->flags |= IORESOURCE_UNSET;
+}
+
/*
* This function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -5054,8 +5238,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
{
int i;
struct resource *r;
- resource_size_t align, size;
+ resource_size_t align;
u16 command;
+ bool resize = false;
/*
* VF BARs are read-only zero according to SR-IOV spec r1.1, sec
@@ -5067,7 +5252,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
return;
/* check if specified PCI is target device to reassign */
- align = pci_specified_resource_alignment(dev);
+ align = pci_specified_resource_alignment(dev, &resize);
if (!align)
return;
@@ -5084,28 +5269,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev)
command &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(dev, PCI_COMMAND, command);
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
- r = &dev->resource[i];
- if (!(r->flags & IORESOURCE_MEM))
- continue;
- if (r->flags & IORESOURCE_PCI_FIXED) {
- dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n",
- i, r);
- continue;
- }
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ pci_request_resource_alignment(dev, i, align, resize);
- size = resource_size(r);
- if (size < align) {
- size = align;
- dev_info(&dev->dev,
- "Rounding up size of resource #%d to %#llx.\n",
- i, (unsigned long long)size);
- }
- r->flags |= IORESOURCE_UNSET;
- r->end = size - 1;
- r->start = 0;
- }
- /* Need to disable bridge's resource window,
+ /*
+ * Need to disable bridge's resource window,
* to enable the kernel to reassign new resource
* window later on.
*/
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4dbf9f96ae5b..f8113e5b9812 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -23,14 +23,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev);
void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
void pci_cleanup_rom(struct pci_dev *dev);
-#ifdef HAVE_PCI_MMAP
+
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
};
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
-#endif
+
int pci_probe_reset_function(struct pci_dev *dev);
/**
@@ -274,8 +274,23 @@ struct pci_sriov {
struct pci_dev *self; /* this PF */
struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+ bool drivers_autoprobe; /* auto probing of VFs by driver */
};
+/* pci_dev priv_flags */
+#define PCI_DEV_DISCONNECTED 0
+
+static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+{
+ set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ return 0;
+}
+
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index d4d70ef4a2d7..77d2ca99d2ec 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pcieport_if.h>
+#include "../pci.h"
struct dpc_dev {
struct pcie_device *dev;
@@ -66,6 +67,10 @@ static void interrupt_event_handler(struct work_struct *work)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
+ pci_dev_set_disconnected(dev, NULL);
+ if (pci_has_subordinate(dev))
+ pci_walk_bus(dev->subordinate,
+ pci_dev_set_disconnected, NULL);
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 90592d424e9b..19c8950c6c38 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -175,7 +175,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int pos)
{
- u32 l, sz, mask;
+ u32 l = 0, sz = 0, mask;
u64 l64, sz64, mask64;
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
@@ -231,7 +231,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
- mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+ mask64 = PCI_ROM_ADDRESS_MASK;
}
if (res->flags & IORESOURCE_MEM_64) {
@@ -1914,33 +1914,6 @@ static void pci_set_msi_domain(struct pci_dev *dev)
dev_set_msi_domain(&dev->dev, d);
}
-/**
- * pci_dma_configure - Setup DMA configuration
- * @dev: ptr to pci_dev struct of the PCI device
- *
- * Function to update PCI devices's DMA configuration using the same
- * info from the OF node or ACPI node of host bridge's parent (if any).
- */
-static void pci_dma_configure(struct pci_dev *dev)
-{
- struct device *bridge = pci_get_host_bridge_device(dev);
-
- if (IS_ENABLED(CONFIG_OF) &&
- bridge->parent && bridge->parent->of_node) {
- of_dma_configure(&dev->dev, bridge->parent->of_node);
- } else if (has_acpi_companion(bridge)) {
- struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
- enum dev_dma_attr attr = acpi_get_dma_attr(adev);
-
- if (attr == DEV_DMA_NOT_SUPPORTED)
- dev_warn(&dev->dev, "DMA not supported.\n");
- else
- acpi_dma_configure(&dev->dev, attr);
- }
-
- pci_put_host_bridge_device(bridge);
-}
-
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
{
int ret;
@@ -1954,7 +1927,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- pci_dma_configure(dev);
pci_set_dma_max_seg_size(dev, 65536);
pci_set_dma_seg_boundary(dev, 0xffffffff);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f82710a8694d..098360d7ff81 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
case PCIIOC_MMAP_IS_IO:
+ if (!arch_can_pci_mmap_io())
+ return -EINVAL;
fpriv->mmap_state = pci_mmap_io;
break;
@@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
case PCIIOC_WRITE_COMBINE:
- if (arg)
- fpriv->write_combine = 1;
- else
- fpriv->write_combine = 0;
- break;
-
+ if (arch_can_pci_mmap_wc()) {
+ if (arg)
+ fpriv->write_combine = 1;
+ else
+ fpriv->write_combine = 0;
+ break;
+ }
+ /* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
-
default:
ret = -EINVAL;
break;
@@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
- int i, ret, write_combine;
+ int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (fpriv->mmap_state == pci_mmap_io) {
+ if (!arch_can_pci_mmap_io())
+ return -EINVAL;
+ res_bit = IORESOURCE_IO;
+ }
+
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
- if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
+ if (dev->resource[i].flags & res_bit &&
+ pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
- if (fpriv->mmap_state == pci_mmap_mem)
- write_combine = fpriv->write_combine;
- else
- write_combine = 0;
- ret = pci_mmap_page_range(dev, vma,
+ if (fpriv->mmap_state == pci_mmap_mem &&
+ fpriv->write_combine) {
+ if (dev->resource[i].flags & IORESOURCE_PREFETCH)
+ write_combine = 1;
+ else
+ return -EINVAL;
+ }
+ ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 673683660b5c..085fb787aa9e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1685,6 +1685,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
#ifdef CONFIG_X86_IO_APIC
+static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
+{
+ noioapicreroute = 1;
+ pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
+
+ return 0;
+}
+
+static struct dmi_system_id boot_interrupt_dmi_table[] = {
+ /*
+ * Systems to exclude from boot interrupt reroute quirks
+ */
+ {
+ .callback = dmi_disable_ioapicreroute,
+ .ident = "ASUSTek Computer INC. M2N-LR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
+ },
+ },
+ {}
+};
+
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
* remap the original interrupt in the linux kernel to the boot interrupt, so
@@ -1693,6 +1716,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
*/
static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
{
+ dmi_check_system(boot_interrupt_dmi_table);
if (noioapicquirk || noioapicreroute)
return;
@@ -3642,19 +3666,11 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
*
* The 82599 supports FLR on VFs, but FLR support is reported only
* in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
- * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
+ * Thus we must call pcie_flr() directly without first checking if it is
+ * supported.
*/
-
- if (probe)
- return 0;
-
- if (!pci_wait_for_pending_transaction(dev))
- dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
-
- msleep(100);
-
+ if (!probe)
+ pcie_flr(dev);
return 0;
}
@@ -3759,20 +3775,7 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL);
- /*
- * Start of pcie_flr() code sequence. This reset code is a copy of
- * the guts of pcie_flr() because that's not an exported function.
- */
-
- if (!pci_wait_for_pending_transaction(dev))
- dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
-
- /*
- * End of pcie_flr() code sequence.
- */
+ pcie_flr(dev);
/*
* Restore the configuration information (BAR values, etc.) including
@@ -3939,6 +3942,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+/* ITE 8893 has the same problem as the 8892 */
+DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
@@ -3958,6 +3963,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
+ * associated not at the root bus, but at a bridge below. This quirk avoids
+ * generating invalid DMA aliases.
+ */
+static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
+ quirk_bridge_cavm_thrx2_pcie_root);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
+ quirk_bridge_cavm_thrx2_pcie_root);
+
+/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
*/
@@ -4095,6 +4114,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ return -ENOTTY;
+
return acs_flags ? 0 : 1;
}
@@ -4634,3 +4656,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+
+/* FLR may cause some 82579 devices to hang. */
+static void quirk_intel_no_flr(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 33e0f033a48e..4c6044ad7368 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -60,6 +60,10 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
tmp = bus->self;
+ /* stop at bridge where translation unit is associated */
+ if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT)
+ return ret;
+
/*
* PCIe-to-PCI/X bridges alias transactions from downstream
* devices using the subordinate bus number (PCI Express to
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index cb389277df41..958da7db9033 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1066,10 +1066,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
r->flags = 0;
continue;
}
- size += r_size;
+ size += max(r_size, align);
/* Exclude ranges with size > align from
calculation of the alignment. */
- if (r_size == align)
+ if (r_size <= align)
aligns[order] += align;
if (order > max_order)
max_order = order;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 4bc589ee78d0..85774b7a316a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
} else if (resno == PCI_ROM_RESOURCE) {
- mask = (u32)PCI_ROM_ADDRESS_MASK;
+ mask = PCI_ROM_ADDRESS_MASK;
} else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig
new file mode 100644
index 000000000000..4c49648e0646
--- /dev/null
+++ b/drivers/pci/switch/Kconfig
@@ -0,0 +1,13 @@
+menu "PCI switch controller drivers"
+ depends on PCI
+
+config PCI_SW_SWITCHTEC
+ tristate "MicroSemi Switchtec PCIe Switch Management Driver"
+ help
+ Enables support for the management interface for the MicroSemi
+ Switchtec series of PCIe switches. Supports userspace access
+ to submit MRPC commands to the switch via /dev/switchtecX
+ devices. See <file:Documentation/switchtec.txt> for more
+ information.
+
+endmenu
diff --git a/drivers/pci/switch/Makefile b/drivers/pci/switch/Makefile
new file mode 100644
index 000000000000..37d8cfb03f3f
--- /dev/null
+++ b/drivers/pci/switch/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCI_SW_SWITCHTEC) += switchtec.o
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
new file mode 100644
index 000000000000..cc6e085008fb
--- /dev/null
+++ b/drivers/pci/switch/switchtec.c
@@ -0,0 +1,1600 @@
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/switchtec_ioctl.h>
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+
+MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsemi Corporation");
+
+static int max_devices = 16;
+module_param(max_devices, int, 0644);
+MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
+
+static dev_t switchtec_devt;
+static struct class *switchtec_class;
+static DEFINE_IDA(switchtec_minor_ida);
+
+#define MICROSEMI_VENDOR_ID 0x11f8
+#define MICROSEMI_NTB_CLASSCODE 0x068000
+#define MICROSEMI_MGMT_CLASSCODE 0x058000
+
+#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
+#define SWITCHTEC_MAX_PFF_CSR 48
+
+#define SWITCHTEC_EVENT_OCCURRED BIT(0)
+#define SWITCHTEC_EVENT_CLEAR BIT(0)
+#define SWITCHTEC_EVENT_EN_LOG BIT(1)
+#define SWITCHTEC_EVENT_EN_CLI BIT(2)
+#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
+#define SWITCHTEC_EVENT_FATAL BIT(4)
+
+enum {
+ SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
+ SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
+ SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
+ SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
+ SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
+ SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
+ SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
+};
+
+struct mrpc_regs {
+ u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ u32 cmd;
+ u32 status;
+ u32 ret_value;
+} __packed;
+
+enum mrpc_status {
+ SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
+ SWITCHTEC_MRPC_STATUS_DONE = 2,
+ SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
+ SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
+};
+
+struct sw_event_regs {
+ u64 event_report_ctrl;
+ u64 reserved1;
+ u64 part_event_bitmap;
+ u64 reserved2;
+ u32 global_summary;
+ u32 reserved3[3];
+ u32 stack_error_event_hdr;
+ u32 stack_error_event_data;
+ u32 reserved4[4];
+ u32 ppu_error_event_hdr;
+ u32 ppu_error_event_data;
+ u32 reserved5[4];
+ u32 isp_error_event_hdr;
+ u32 isp_error_event_data;
+ u32 reserved6[4];
+ u32 sys_reset_event_hdr;
+ u32 reserved7[5];
+ u32 fw_exception_hdr;
+ u32 reserved8[5];
+ u32 fw_nmi_hdr;
+ u32 reserved9[5];
+ u32 fw_non_fatal_hdr;
+ u32 reserved10[5];
+ u32 fw_fatal_hdr;
+ u32 reserved11[5];
+ u32 twi_mrpc_comp_hdr;
+ u32 twi_mrpc_comp_data;
+ u32 reserved12[4];
+ u32 twi_mrpc_comp_async_hdr;
+ u32 twi_mrpc_comp_async_data;
+ u32 reserved13[4];
+ u32 cli_mrpc_comp_hdr;
+ u32 cli_mrpc_comp_data;
+ u32 reserved14[4];
+ u32 cli_mrpc_comp_async_hdr;
+ u32 cli_mrpc_comp_async_data;
+ u32 reserved15[4];
+ u32 gpio_interrupt_hdr;
+ u32 gpio_interrupt_data;
+ u32 reserved16[4];
+} __packed;
+
+struct sys_info_regs {
+ u32 device_id;
+ u32 device_version;
+ u32 firmware_version;
+ u32 reserved1;
+ u32 vendor_table_revision;
+ u32 table_format_version;
+ u32 partition_id;
+ u32 cfg_file_fmt_version;
+ u32 reserved2[58];
+ char vendor_id[8];
+ char product_id[16];
+ char product_revision[4];
+ char component_vendor[8];
+ u16 component_id;
+ u8 component_revision;
+} __packed;
+
+struct flash_info_regs {
+ u32 flash_part_map_upd_idx;
+
+ struct active_partition_info {
+ u32 address;
+ u32 build_version;
+ u32 build_string;
+ } active_img;
+
+ struct active_partition_info active_cfg;
+ struct active_partition_info inactive_img;
+ struct active_partition_info inactive_cfg;
+
+ u32 flash_length;
+
+ struct partition_info {
+ u32 address;
+ u32 length;
+ } cfg0;
+
+ struct partition_info cfg1;
+ struct partition_info img0;
+ struct partition_info img1;
+ struct partition_info nvlog;
+ struct partition_info vendor[8];
+};
+
+struct ntb_info_regs {
+ u8 partition_count;
+ u8 partition_id;
+ u16 reserved1;
+ u64 ep_map;
+ u16 requester_id;
+} __packed;
+
+struct part_cfg_regs {
+ u32 status;
+ u32 state;
+ u32 port_cnt;
+ u32 usp_port_mode;
+ u32 usp_pff_inst_id;
+ u32 vep_pff_inst_id;
+ u32 dsp_pff_inst_id[47];
+ u32 reserved1[11];
+ u16 vep_vector_number;
+ u16 usp_vector_number;
+ u32 port_event_bitmap;
+ u32 reserved2[3];
+ u32 part_event_summary;
+ u32 reserved3[3];
+ u32 part_reset_hdr;
+ u32 part_reset_data[5];
+ u32 mrpc_comp_hdr;
+ u32 mrpc_comp_data[5];
+ u32 mrpc_comp_async_hdr;
+ u32 mrpc_comp_async_data[5];
+ u32 dyn_binding_hdr;
+ u32 dyn_binding_data[5];
+ u32 reserved4[159];
+} __packed;
+
+enum {
+ SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
+ SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
+ SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
+ SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
+};
+
+struct pff_csr_regs {
+ u16 vendor_id;
+ u16 device_id;
+ u32 pci_cfg_header[15];
+ u32 pci_cap_region[48];
+ u32 pcie_cap_region[448];
+ u32 indirect_gas_window[128];
+ u32 indirect_gas_window_off;
+ u32 reserved[127];
+ u32 pff_event_summary;
+ u32 reserved2[3];
+ u32 aer_in_p2p_hdr;
+ u32 aer_in_p2p_data[5];
+ u32 aer_in_vep_hdr;
+ u32 aer_in_vep_data[5];
+ u32 dpc_hdr;
+ u32 dpc_data[5];
+ u32 cts_hdr;
+ u32 cts_data[5];
+ u32 reserved3[6];
+ u32 hotplug_hdr;
+ u32 hotplug_data[5];
+ u32 ier_hdr;
+ u32 ier_data[5];
+ u32 threshold_hdr;
+ u32 threshold_data[5];
+ u32 power_mgmt_hdr;
+ u32 power_mgmt_data[5];
+ u32 tlp_throttling_hdr;
+ u32 tlp_throttling_data[5];
+ u32 force_speed_hdr;
+ u32 force_speed_data[5];
+ u32 credit_timeout_hdr;
+ u32 credit_timeout_data[5];
+ u32 link_state_hdr;
+ u32 link_state_data[5];
+ u32 reserved4[174];
+} __packed;
+
+struct switchtec_dev {
+ struct pci_dev *pdev;
+ struct device dev;
+ struct cdev cdev;
+
+ int partition;
+ int partition_count;
+ int pff_csr_count;
+ char pff_local[SWITCHTEC_MAX_PFF_CSR];
+
+ void __iomem *mmio;
+ struct mrpc_regs __iomem *mmio_mrpc;
+ struct sw_event_regs __iomem *mmio_sw_event;
+ struct sys_info_regs __iomem *mmio_sys_info;
+ struct flash_info_regs __iomem *mmio_flash_info;
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct part_cfg_regs __iomem *mmio_part_cfg;
+ struct part_cfg_regs __iomem *mmio_part_cfg_all;
+ struct pff_csr_regs __iomem *mmio_pff_csr;
+
+ /*
+ * The mrpc mutex must be held when accessing the other
+ * mrpc_ fields, alive flag and stuser->state field
+ */
+ struct mutex mrpc_mutex;
+ struct list_head mrpc_queue;
+ int mrpc_busy;
+ struct work_struct mrpc_work;
+ struct delayed_work mrpc_timeout;
+ bool alive;
+
+ wait_queue_head_t event_wq;
+ atomic_t event_cnt;
+};
+
+static struct switchtec_dev *to_stdev(struct device *dev)
+{
+ return container_of(dev, struct switchtec_dev, dev);
+}
+
+enum mrpc_state {
+ MRPC_IDLE = 0,
+ MRPC_QUEUED,
+ MRPC_RUNNING,
+ MRPC_DONE,
+};
+
+struct switchtec_user {
+ struct switchtec_dev *stdev;
+
+ enum mrpc_state state;
+
+ struct completion comp;
+ struct kref kref;
+ struct list_head list;
+
+ u32 cmd;
+ u32 status;
+ u32 return_code;
+ size_t data_len;
+ size_t read_len;
+ unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
+ int event_cnt;
+};
+
+static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
+{
+ struct switchtec_user *stuser;
+
+ stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
+ if (!stuser)
+ return ERR_PTR(-ENOMEM);
+
+ get_device(&stdev->dev);
+ stuser->stdev = stdev;
+ kref_init(&stuser->kref);
+ INIT_LIST_HEAD(&stuser->list);
+ init_completion(&stuser->comp);
+ stuser->event_cnt = atomic_read(&stdev->event_cnt);
+
+ dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
+
+ return stuser;
+}
+
+static void stuser_free(struct kref *kref)
+{
+ struct switchtec_user *stuser;
+
+ stuser = container_of(kref, struct switchtec_user, kref);
+
+ dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
+
+ put_device(&stuser->stdev->dev);
+ kfree(stuser);
+}
+
+static void stuser_put(struct switchtec_user *stuser)
+{
+ kref_put(&stuser->kref, stuser_free);
+}
+
+static void stuser_set_state(struct switchtec_user *stuser,
+ enum mrpc_state state)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ const char * const state_names[] = {
+ [MRPC_IDLE] = "IDLE",
+ [MRPC_QUEUED] = "QUEUED",
+ [MRPC_RUNNING] = "RUNNING",
+ [MRPC_DONE] = "DONE",
+ };
+
+ stuser->state = state;
+
+ dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
+ stuser, state_names[state]);
+}
+
+static void mrpc_complete_cmd(struct switchtec_dev *stdev);
+
+static void mrpc_cmd_submit(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_user *stuser;
+
+ if (stdev->mrpc_busy)
+ return;
+
+ if (list_empty(&stdev->mrpc_queue))
+ return;
+
+ stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
+ list);
+
+ stuser_set_state(stuser, MRPC_RUNNING);
+ stdev->mrpc_busy = 1;
+ memcpy_toio(&stdev->mmio_mrpc->input_data,
+ stuser->data, stuser->data_len);
+ iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
+
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
+ mrpc_complete_cmd(stdev);
+
+ schedule_delayed_work(&stdev->mrpc_timeout,
+ msecs_to_jiffies(500));
+}
+
+static int mrpc_queue_cmd(struct switchtec_user *stuser)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_dev *stdev = stuser->stdev;
+
+ kref_get(&stuser->kref);
+ stuser->read_len = sizeof(stuser->data);
+ stuser_set_state(stuser, MRPC_QUEUED);
+ init_completion(&stuser->comp);
+ list_add_tail(&stuser->list, &stdev->mrpc_queue);
+
+ mrpc_cmd_submit(stdev);
+
+ return 0;
+}
+
+static void mrpc_complete_cmd(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+ struct switchtec_user *stuser;
+
+ if (list_empty(&stdev->mrpc_queue))
+ return;
+
+ stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
+ list);
+
+ stuser->status = ioread32(&stdev->mmio_mrpc->status);
+ if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
+ return;
+
+ stuser_set_state(stuser, MRPC_DONE);
+ stuser->return_code = 0;
+
+ if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
+ goto out;
+
+ stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
+ if (stuser->return_code != 0)
+ goto out;
+
+ memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
+ stuser->read_len);
+
+out:
+ complete_all(&stuser->comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ stdev->mrpc_busy = 0;
+
+ mrpc_cmd_submit(stdev);
+}
+
+static void mrpc_event_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+
+ stdev = container_of(work, struct switchtec_dev, mrpc_work);
+
+ dev_dbg(&stdev->dev, "%s\n", __func__);
+
+ mutex_lock(&stdev->mrpc_mutex);
+ cancel_delayed_work(&stdev->mrpc_timeout);
+ mrpc_complete_cmd(stdev);
+ mutex_unlock(&stdev->mrpc_mutex);
+}
+
+static void mrpc_timeout_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+ u32 status;
+
+ stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
+
+ dev_dbg(&stdev->dev, "%s\n", __func__);
+
+ mutex_lock(&stdev->mrpc_mutex);
+
+ status = ioread32(&stdev->mmio_mrpc->status);
+ if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
+ schedule_delayed_work(&stdev->mrpc_timeout,
+ msecs_to_jiffies(500));
+ goto out;
+ }
+
+ mrpc_complete_cmd(stdev);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+}
+
+static ssize_t device_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ u32 ver;
+
+ ver = ioread32(&stdev->mmio_sys_info->device_version);
+
+ return sprintf(buf, "%x\n", ver);
+}
+static DEVICE_ATTR_RO(device_version);
+
+static ssize_t fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ u32 ver;
+
+ ver = ioread32(&stdev->mmio_sys_info->firmware_version);
+
+ return sprintf(buf, "%08x\n", ver);
+}
+static DEVICE_ATTR_RO(fw_version);
+
+static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
+{
+ int i;
+
+ memcpy_fromio(buf, attr, len);
+ buf[len] = '\n';
+ buf[len + 1] = 0;
+
+ for (i = len - 1; i > 0; i--) {
+ if (buf[i] != ' ')
+ break;
+ buf[i] = '\n';
+ buf[i + 1] = 0;
+ }
+
+ return strlen(buf);
+}
+
+#define DEVICE_ATTR_SYS_INFO_STR(field) \
+static ssize_t field ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct switchtec_dev *stdev = to_stdev(dev); \
+ return io_string_show(buf, &stdev->mmio_sys_info->field, \
+ sizeof(stdev->mmio_sys_info->field)); \
+} \
+\
+static DEVICE_ATTR_RO(field)
+
+DEVICE_ATTR_SYS_INFO_STR(vendor_id);
+DEVICE_ATTR_SYS_INFO_STR(product_id);
+DEVICE_ATTR_SYS_INFO_STR(product_revision);
+DEVICE_ATTR_SYS_INFO_STR(component_vendor);
+
+static ssize_t component_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ int id = ioread16(&stdev->mmio_sys_info->component_id);
+
+ return sprintf(buf, "PM%04X\n", id);
+}
+static DEVICE_ATTR_RO(component_id);
+
+static ssize_t component_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ int rev = ioread8(&stdev->mmio_sys_info->component_revision);
+
+ return sprintf(buf, "%d\n", rev);
+}
+static DEVICE_ATTR_RO(component_revision);
+
+static ssize_t partition_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ return sprintf(buf, "%d\n", stdev->partition);
+}
+static DEVICE_ATTR_RO(partition);
+
+static ssize_t partition_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ return sprintf(buf, "%d\n", stdev->partition_count);
+}
+static DEVICE_ATTR_RO(partition_count);
+
+static struct attribute *switchtec_device_attrs[] = {
+ &dev_attr_device_version.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_product_revision.attr,
+ &dev_attr_component_vendor.attr,
+ &dev_attr_component_id.attr,
+ &dev_attr_component_revision.attr,
+ &dev_attr_partition.attr,
+ &dev_attr_partition_count.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(switchtec_device);
+
+static int switchtec_dev_open(struct inode *inode, struct file *filp)
+{
+ struct switchtec_dev *stdev;
+ struct switchtec_user *stuser;
+
+ stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
+
+ stuser = stuser_create(stdev);
+ if (IS_ERR(stuser))
+ return PTR_ERR(stuser);
+
+ filp->private_data = stuser;
+ nonseekable_open(inode, filp);
+
+ dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
+
+ return 0;
+}
+
+static int switchtec_dev_release(struct inode *inode, struct file *filp)
+{
+ struct switchtec_user *stuser = filp->private_data;
+
+ stuser_put(stuser);
+
+ return 0;
+}
+
+static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
+{
+ if (mutex_lock_interruptible(&stdev->mrpc_mutex))
+ return -EINTR;
+
+ if (!stdev->alive) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
+ size_t size, loff_t *off)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+
+ if (size < sizeof(stuser->cmd) ||
+ size > sizeof(stuser->cmd) + sizeof(stuser->data))
+ return -EINVAL;
+
+ stuser->data_len = size - sizeof(stuser->cmd);
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state != MRPC_IDLE) {
+ rc = -EBADE;
+ goto out;
+ }
+
+ rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ data += sizeof(stuser->cmd);
+ rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mrpc_queue_cmd(stuser);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
+ size_t size, loff_t *off)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+
+ if (size < sizeof(stuser->cmd) ||
+ size > sizeof(stuser->cmd) + sizeof(stuser->data))
+ return -EINVAL;
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state == MRPC_IDLE) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EBADE;
+ }
+
+ stuser->read_len = size - sizeof(stuser->return_code);
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!try_wait_for_completion(&stuser->comp))
+ return -EAGAIN;
+ } else {
+ rc = wait_for_completion_interruptible(&stuser->comp);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ if (stuser->state != MRPC_DONE) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EBADE;
+ }
+
+ rc = copy_to_user(data, &stuser->return_code,
+ sizeof(stuser->return_code));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ data += sizeof(stuser->return_code);
+ rc = copy_to_user(data, &stuser->data,
+ size - sizeof(stuser->return_code));
+ if (rc) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ stuser_set_state(stuser, MRPC_IDLE);
+
+out:
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
+ return size;
+ else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
+ return -ENXIO;
+ else
+ return -EBADMSG;
+}
+
+static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int ret = 0;
+
+ poll_wait(filp, &stuser->comp.wait, wait);
+ poll_wait(filp, &stdev->event_wq, wait);
+
+ if (lock_mutex_and_test_alive(stdev))
+ return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ if (try_wait_for_completion(&stuser->comp))
+ ret |= POLLIN | POLLRDNORM;
+
+ if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
+ ret |= POLLPRI | POLLRDBAND;
+
+ return ret;
+}
+
+static int ioctl_flash_info(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_info __user *uinfo)
+{
+ struct switchtec_ioctl_flash_info info = {0};
+ struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+
+ info.flash_length = ioread32(&fi->flash_length);
+ info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
+
+ if (copy_to_user(uinfo, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
+ struct partition_info __iomem *pi)
+{
+ info->address = ioread32(&pi->address);
+ info->length = ioread32(&pi->length);
+}
+
+static int ioctl_flash_part_info(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_flash_part_info __user *uinfo)
+{
+ struct switchtec_ioctl_flash_part_info info = {0};
+ struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+ u32 active_addr = -1;
+
+ if (copy_from_user(&info, uinfo, sizeof(info)))
+ return -EFAULT;
+
+ switch (info.flash_partition) {
+ case SWITCHTEC_IOCTL_PART_CFG0:
+ active_addr = ioread32(&fi->active_cfg);
+ set_fw_info_part(&info, &fi->cfg0);
+ break;
+ case SWITCHTEC_IOCTL_PART_CFG1:
+ active_addr = ioread32(&fi->active_cfg);
+ set_fw_info_part(&info, &fi->cfg1);
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG0:
+ active_addr = ioread32(&fi->active_img);
+ set_fw_info_part(&info, &fi->img0);
+ break;
+ case SWITCHTEC_IOCTL_PART_IMG1:
+ active_addr = ioread32(&fi->active_img);
+ set_fw_info_part(&info, &fi->img1);
+ break;
+ case SWITCHTEC_IOCTL_PART_NVLOG:
+ set_fw_info_part(&info, &fi->nvlog);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR0:
+ set_fw_info_part(&info, &fi->vendor[0]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR1:
+ set_fw_info_part(&info, &fi->vendor[1]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR2:
+ set_fw_info_part(&info, &fi->vendor[2]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR3:
+ set_fw_info_part(&info, &fi->vendor[3]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR4:
+ set_fw_info_part(&info, &fi->vendor[4]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR5:
+ set_fw_info_part(&info, &fi->vendor[5]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR6:
+ set_fw_info_part(&info, &fi->vendor[6]);
+ break;
+ case SWITCHTEC_IOCTL_PART_VENDOR7:
+ set_fw_info_part(&info, &fi->vendor[7]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (info.address == active_addr)
+ info.active = 1;
+
+ if (copy_to_user(uinfo, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_event_summary(struct switchtec_dev *stdev,
+ struct switchtec_user *stuser,
+ struct switchtec_ioctl_event_summary __user *usum)
+{
+ struct switchtec_ioctl_event_summary s = {0};
+ int i;
+ u32 reg;
+
+ s.global = ioread32(&stdev->mmio_sw_event->global_summary);
+ s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
+
+ for (i = 0; i < stdev->partition_count; i++) {
+ reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
+ s.part[i] = reg;
+ }
+
+ for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
+ reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
+ if (reg != MICROSEMI_VENDOR_ID)
+ break;
+
+ reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
+ s.pff[i] = reg;
+ }
+
+ if (copy_to_user(usum, &s, sizeof(s)))
+ return -EFAULT;
+
+ stuser->event_cnt = atomic_read(&stdev->event_cnt);
+
+ return 0;
+}
+
+static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)stdev->mmio_sw_event + offset;
+}
+
+static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
+}
+
+static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
+ size_t offset, int index)
+{
+ return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
+}
+
+#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
+#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
+#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
+
+const struct event_reg {
+ size_t offset;
+ u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
+ size_t offset, int index);
+} event_regs[] = {
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
+ twi_mrpc_comp_async_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
+ cli_mrpc_comp_async_hdr),
+ EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
+ EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
+ EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
+};
+
+static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
+ int event_id, int index)
+{
+ size_t off;
+
+ if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
+ return ERR_PTR(-EINVAL);
+
+ off = event_regs[event_id].offset;
+
+ if (event_regs[event_id].map_reg == part_ev_reg) {
+ if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
+ index = stdev->partition;
+ else if (index < 0 || index >= stdev->partition_count)
+ return ERR_PTR(-EINVAL);
+ } else if (event_regs[event_id].map_reg == pff_ev_reg) {
+ if (index < 0 || index >= stdev->pff_csr_count)
+ return ERR_PTR(-EINVAL);
+ }
+
+ return event_regs[event_id].map_reg(stdev, off, index);
+}
+
+static int event_ctl(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_event_ctl *ctl)
+{
+ int i;
+ u32 __iomem *reg;
+ u32 hdr;
+
+ reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ hdr = ioread32(reg);
+ for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
+ ctl->data[i] = ioread32(&reg[i + 1]);
+
+ ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
+ ctl->count = (hdr >> 5) & 0xFF;
+
+ if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
+ hdr &= ~SWITCHTEC_EVENT_CLEAR;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
+ hdr |= SWITCHTEC_EVENT_EN_IRQ;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
+ hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
+ hdr |= SWITCHTEC_EVENT_EN_LOG;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
+ hdr &= ~SWITCHTEC_EVENT_EN_LOG;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
+ hdr |= SWITCHTEC_EVENT_EN_CLI;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
+ hdr &= ~SWITCHTEC_EVENT_EN_CLI;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
+ hdr |= SWITCHTEC_EVENT_FATAL;
+ if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
+ hdr &= ~SWITCHTEC_EVENT_FATAL;
+
+ if (ctl->flags)
+ iowrite32(hdr, reg);
+
+ ctl->flags = 0;
+ if (hdr & SWITCHTEC_EVENT_EN_IRQ)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
+ if (hdr & SWITCHTEC_EVENT_EN_LOG)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
+ if (hdr & SWITCHTEC_EVENT_EN_CLI)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
+ if (hdr & SWITCHTEC_EVENT_FATAL)
+ ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
+
+ return 0;
+}
+
+static int ioctl_event_ctl(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_event_ctl __user *uctl)
+{
+ int ret;
+ int nr_idxs;
+ struct switchtec_ioctl_event_ctl ctl;
+
+ if (copy_from_user(&ctl, uctl, sizeof(ctl)))
+ return -EFAULT;
+
+ if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
+ return -EINVAL;
+
+ if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
+ return -EINVAL;
+
+ if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
+ if (event_regs[ctl.event_id].map_reg == global_ev_reg)
+ nr_idxs = 1;
+ else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
+ nr_idxs = stdev->partition_count;
+ else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
+ nr_idxs = stdev->pff_csr_count;
+ else
+ return -EINVAL;
+
+ for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+ ret = event_ctl(stdev, &ctl);
+ if (ret < 0)
+ return ret;
+ }
+ } else {
+ ret = event_ctl(stdev, &ctl);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (copy_to_user(uctl, &ctl, sizeof(ctl)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_pff_to_port(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_pff_port *up)
+{
+ int i, part;
+ u32 reg;
+ struct part_cfg_regs *pcfg;
+ struct switchtec_ioctl_pff_port p;
+
+ if (copy_from_user(&p, up, sizeof(p)))
+ return -EFAULT;
+
+ p.port = -1;
+ for (part = 0; part < stdev->partition_count; part++) {
+ pcfg = &stdev->mmio_part_cfg_all[part];
+ p.partition = part;
+
+ reg = ioread32(&pcfg->usp_pff_inst_id);
+ if (reg == p.pff) {
+ p.port = 0;
+ break;
+ }
+
+ reg = ioread32(&pcfg->vep_pff_inst_id);
+ if (reg == p.pff) {
+ p.port = SWITCHTEC_IOCTL_PFF_VEP;
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
+ reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
+ if (reg != p.pff)
+ continue;
+
+ p.port = i + 1;
+ break;
+ }
+
+ if (p.port != -1)
+ break;
+ }
+
+ if (copy_to_user(up, &p, sizeof(p)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ioctl_port_to_pff(struct switchtec_dev *stdev,
+ struct switchtec_ioctl_pff_port *up)
+{
+ struct switchtec_ioctl_pff_port p;
+ struct part_cfg_regs *pcfg;
+
+ if (copy_from_user(&p, up, sizeof(p)))
+ return -EFAULT;
+
+ if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
+ pcfg = stdev->mmio_part_cfg;
+ else if (p.partition < stdev->partition_count)
+ pcfg = &stdev->mmio_part_cfg_all[p.partition];
+ else
+ return -EINVAL;
+
+ switch (p.port) {
+ case 0:
+ p.pff = ioread32(&pcfg->usp_pff_inst_id);
+ break;
+ case SWITCHTEC_IOCTL_PFF_VEP:
+ p.pff = ioread32(&pcfg->vep_pff_inst_id);
+ break;
+ default:
+ if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
+ return -EINVAL;
+ p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
+ break;
+ }
+
+ if (copy_to_user(up, &p, sizeof(p)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct switchtec_user *stuser = filp->private_data;
+ struct switchtec_dev *stdev = stuser->stdev;
+ int rc;
+ void __user *argp = (void __user *)arg;
+
+ rc = lock_mutex_and_test_alive(stdev);
+ if (rc)
+ return rc;
+
+ switch (cmd) {
+ case SWITCHTEC_IOCTL_FLASH_INFO:
+ rc = ioctl_flash_info(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_FLASH_PART_INFO:
+ rc = ioctl_flash_part_info(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_EVENT_SUMMARY:
+ rc = ioctl_event_summary(stdev, stuser, argp);
+ break;
+ case SWITCHTEC_IOCTL_EVENT_CTL:
+ rc = ioctl_event_ctl(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_PFF_TO_PORT:
+ rc = ioctl_pff_to_port(stdev, argp);
+ break;
+ case SWITCHTEC_IOCTL_PORT_TO_PFF:
+ rc = ioctl_port_to_pff(stdev, argp);
+ break;
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ mutex_unlock(&stdev->mrpc_mutex);
+ return rc;
+}
+
+static const struct file_operations switchtec_fops = {
+ .owner = THIS_MODULE,
+ .open = switchtec_dev_open,
+ .release = switchtec_dev_release,
+ .write = switchtec_dev_write,
+ .read = switchtec_dev_read,
+ .poll = switchtec_dev_poll,
+ .unlocked_ioctl = switchtec_dev_ioctl,
+ .compat_ioctl = switchtec_dev_ioctl,
+};
+
+static void stdev_release(struct device *dev)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+
+ kfree(stdev);
+}
+
+static void stdev_kill(struct switchtec_dev *stdev)
+{
+ struct switchtec_user *stuser, *tmpuser;
+
+ pci_clear_master(stdev->pdev);
+
+ cancel_delayed_work_sync(&stdev->mrpc_timeout);
+
+ /* Mark the hardware as unavailable and complete all completions */
+ mutex_lock(&stdev->mrpc_mutex);
+ stdev->alive = false;
+
+ /* Wake up and kill any users waiting on an MRPC request */
+ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
+ complete_all(&stuser->comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ }
+
+ mutex_unlock(&stdev->mrpc_mutex);
+
+ /* Wake up any users waiting on event_wq */
+ wake_up_interruptible(&stdev->event_wq);
+}
+
+static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
+{
+ struct switchtec_dev *stdev;
+ int minor;
+ struct device *dev;
+ struct cdev *cdev;
+ int rc;
+
+ stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!stdev)
+ return ERR_PTR(-ENOMEM);
+
+ stdev->alive = true;
+ stdev->pdev = pdev;
+ INIT_LIST_HEAD(&stdev->mrpc_queue);
+ mutex_init(&stdev->mrpc_mutex);
+ stdev->mrpc_busy = 0;
+ INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
+ INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
+ init_waitqueue_head(&stdev->event_wq);
+ atomic_set(&stdev->event_cnt, 0);
+
+ dev = &stdev->dev;
+ device_initialize(dev);
+ dev->class = switchtec_class;
+ dev->parent = &pdev->dev;
+ dev->groups = switchtec_device_groups;
+ dev->release = stdev_release;
+
+ minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
+ GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto err_put;
+ }
+
+ dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
+ dev_set_name(dev, "switchtec%d", minor);
+
+ cdev = &stdev->cdev;
+ cdev_init(cdev, &switchtec_fops);
+ cdev->owner = THIS_MODULE;
+ cdev->kobj.parent = &dev->kobj;
+
+ return stdev;
+
+err_put:
+ put_device(&stdev->dev);
+ return ERR_PTR(rc);
+}
+
+static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
+{
+ size_t off = event_regs[eid].offset;
+ u32 __iomem *hdr_reg;
+ u32 hdr;
+
+ hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
+ hdr = ioread32(hdr_reg);
+
+ if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
+ return 0;
+
+ dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
+ hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
+ iowrite32(hdr, hdr_reg);
+
+ return 1;
+}
+
+static int mask_all_events(struct switchtec_dev *stdev, int eid)
+{
+ int idx;
+ int count = 0;
+
+ if (event_regs[eid].map_reg == part_ev_reg) {
+ for (idx = 0; idx < stdev->partition_count; idx++)
+ count += mask_event(stdev, eid, idx);
+ } else if (event_regs[eid].map_reg == pff_ev_reg) {
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ if (!stdev->pff_local[idx])
+ continue;
+ count += mask_event(stdev, eid, idx);
+ }
+ } else {
+ count += mask_event(stdev, eid, 0);
+ }
+
+ return count;
+}
+
+static irqreturn_t switchtec_event_isr(int irq, void *dev)
+{
+ struct switchtec_dev *stdev = dev;
+ u32 reg;
+ irqreturn_t ret = IRQ_NONE;
+ int eid, event_count = 0;
+
+ reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
+ if (reg & SWITCHTEC_EVENT_OCCURRED) {
+ dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
+ ret = IRQ_HANDLED;
+ schedule_work(&stdev->mrpc_work);
+ iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
+ }
+
+ for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
+ event_count += mask_all_events(stdev, eid);
+
+ if (event_count) {
+ atomic_inc(&stdev->event_cnt);
+ wake_up_interruptible(&stdev->event_wq);
+ dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
+ event_count);
+ return IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int switchtec_init_isr(struct switchtec_dev *stdev)
+{
+ int nvecs;
+ int event_irq;
+
+ nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvecs < 0)
+ return nvecs;
+
+ event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
+ if (event_irq < 0 || event_irq >= nvecs)
+ return -EFAULT;
+
+ event_irq = pci_irq_vector(stdev->pdev, event_irq);
+ if (event_irq < 0)
+ return event_irq;
+
+ return devm_request_irq(&stdev->pdev->dev, event_irq,
+ switchtec_event_isr, 0,
+ KBUILD_MODNAME, stdev);
+}
+
+static void init_pff(struct switchtec_dev *stdev)
+{
+ int i;
+ u32 reg;
+ struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
+
+ for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
+ reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
+ if (reg != MICROSEMI_VENDOR_ID)
+ break;
+ }
+
+ stdev->pff_csr_count = i;
+
+ reg = ioread32(&pcfg->usp_pff_inst_id);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+
+ reg = ioread32(&pcfg->vep_pff_inst_id);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+
+ for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
+ reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
+ if (reg < SWITCHTEC_MAX_PFF_CSR)
+ stdev->pff_local[reg] = 1;
+ }
+}
+
+static int switchtec_init_pci(struct switchtec_dev *stdev,
+ struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ stdev->mmio = pcim_iomap_table(pdev)[0];
+ stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
+ stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
+ stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
+ stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
+ stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
+ stdev->partition = ioread8(&stdev->mmio_ntb->partition_id);
+ stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
+ stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
+ stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
+ stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
+
+ init_pff(stdev);
+
+ pci_set_drvdata(pdev, stdev);
+
+ return 0;
+}
+
+static int switchtec_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct switchtec_dev *stdev;
+ int rc;
+
+ stdev = stdev_create(pdev);
+ if (IS_ERR(stdev))
+ return PTR_ERR(stdev);
+
+ rc = switchtec_init_pci(stdev, pdev);
+ if (rc)
+ goto err_put;
+
+ rc = switchtec_init_isr(stdev);
+ if (rc) {
+ dev_err(&stdev->dev, "failed to init isr.\n");
+ goto err_put;
+ }
+
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_part_cfg->mrpc_comp_hdr);
+
+ rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1);
+ if (rc)
+ goto err_put;
+
+ rc = device_add(&stdev->dev);
+ if (rc)
+ goto err_devadd;
+
+ dev_info(&stdev->dev, "Management device registered.\n");
+
+ return 0;
+
+err_devadd:
+ cdev_del(&stdev->cdev);
+ stdev_kill(stdev);
+err_put:
+ ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ put_device(&stdev->dev);
+ return rc;
+}
+
+static void switchtec_pci_remove(struct pci_dev *pdev)
+{
+ struct switchtec_dev *stdev = pci_get_drvdata(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ device_del(&stdev->dev);
+ cdev_del(&stdev->cdev);
+ ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
+ dev_info(&stdev->dev, "unregistered.\n");
+
+ stdev_kill(stdev);
+ put_device(&stdev->dev);
+}
+
+#define SWITCHTEC_PCI_DEVICE(device_id) \
+ { \
+ .vendor = MICROSEMI_VENDOR_ID, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = MICROSEMI_MGMT_CLASSCODE, \
+ .class_mask = 0xFFFFFFFF, \
+ }, \
+ { \
+ .vendor = MICROSEMI_VENDOR_ID, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = MICROSEMI_NTB_CLASSCODE, \
+ .class_mask = 0xFFFFFFFF, \
+ }
+
+static const struct pci_device_id switchtec_pci_tbl[] = {
+ SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
+ SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
+ SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
+ SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
+ SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
+ SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
+ SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
+
+static struct pci_driver switchtec_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = switchtec_pci_tbl,
+ .probe = switchtec_pci_probe,
+ .remove = switchtec_pci_remove,
+};
+
+static int __init switchtec_init(void)
+{
+ int rc;
+
+ rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
+ "switchtec");
+ if (rc)
+ return rc;
+
+ switchtec_class = class_create(THIS_MODULE, "switchtec");
+ if (IS_ERR(switchtec_class)) {
+ rc = PTR_ERR(switchtec_class);
+ goto err_create_class;
+ }
+
+ rc = pci_register_driver(&switchtec_pci_driver);
+ if (rc)
+ goto err_pci_register;
+
+ pr_info(KBUILD_MODNAME ": loaded.\n");
+
+ return 0;
+
+err_pci_register:
+ class_destroy(switchtec_class);
+
+err_create_class:
+ unregister_chrdev_region(switchtec_devt, max_devices);
+
+ return rc;
+}
+module_init(switchtec_init);
+
+static void __exit switchtec_exit(void)
+{
+ pci_unregister_driver(&switchtec_pci_driver);
+ class_destroy(switchtec_class);
+ unregister_chrdev_region(switchtec_devt, max_devices);
+ ida_destroy(&switchtec_minor_ida);
+
+ pr_info(KBUILD_MODNAME ": unloaded.\n");
+}
+module_exit(switchtec_exit);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 4d7bc3f4124a..c6fe2a4a7a6a 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -207,7 +207,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
return -ENOMEM;
setup_timer(&cf->timer, electra_cf_timer, (unsigned long)cf);
- cf->irq = NO_IRQ;
+ cf->irq = 0;
cf->ofdev = ofdev;
cf->mem_phys = mem.start;
@@ -313,7 +313,7 @@ fail3:
fail2:
release_mem_region(cf->mem_phys, cf->mem_size);
fail1:
- if (cf->irq != NO_IRQ)
+ if (cf->irq)
free_irq(cf->irq, cf);
if (cf->io_virt)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 93651907874f..aa587edaf9ea 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,6 +12,10 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config ARM_PMU_ACPI
+ depends on ARM_PMU && ACPI
+ def_bool y
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
@@ -21,6 +25,16 @@ config QCOM_L2_PMU
Adds the L2 cache PMU into the perf events subsystem for
monitoring L2 cache events.
+config QCOM_L3_PMU
+ bool "Qualcomm Technologies L3-cache PMU"
+ depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
+ select QCOM_IRQ_COMBINER
+ help
+ Provides support for the L3 cache performance monitor unit (PMU)
+ in Qualcomm Technologies processors.
+ Adds the L3 cache PMU into the perf events subsystem for
+ monitoring L3 cache events.
+
config XGENE_PMU
depends on PERF_EVENTS && ARCH_XGENE
bool "APM X-Gene SoC PMU"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index ef24833c94a8..6420bd4394d5 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
+obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
+obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9612b84bc3e0..dc459eb1246b 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -16,7 +16,6 @@
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -25,7 +24,6 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
-#include <asm/cputype.h>
#include <asm/irq_regs.h>
static int
@@ -235,20 +233,15 @@ armpmu_add(struct perf_event *event, int flags)
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
- int err = 0;
/* An event following a process won't be stopped earlier */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return -ENOENT;
- perf_pmu_disable(event->pmu);
-
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
- if (idx < 0) {
- err = idx;
- goto out;
- }
+ if (idx < 0)
+ return idx;
/*
* If there is an event in the counter we are going to use then make
@@ -265,9 +258,7 @@ armpmu_add(struct perf_event *event, int flags)
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
-out:
- perf_pmu_enable(event->pmu);
- return err;
+ return 0;
}
static int
@@ -323,10 +314,16 @@ validate_group(struct perf_event *event)
return 0;
}
+static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
+{
+ struct platform_device *pdev = armpmu->plat_device;
+
+ return pdev ? dev_get_platdata(&pdev->dev) : NULL;
+}
+
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -338,8 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
- plat_device = armpmu->plat_device;
- plat = dev_get_platdata(&plat_device->dev);
+
+ plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)
@@ -352,37 +349,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
return ret;
}
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
- armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
- int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
- if (err) {
- armpmu_release_hardware(armpmu);
- return err;
- }
-
- return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- atomic_t *active_events = &armpmu->active_events;
- struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
- if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
- armpmu_release_hardware(armpmu);
- mutex_unlock(pmu_reserve_mutex);
- }
-}
-
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
@@ -455,8 +421,6 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- int err = 0;
- atomic_t *active_events = &armpmu->active_events;
/*
* Reject CPU-affine events for CPUs that are of a different class to
@@ -476,26 +440,7 @@ static int armpmu_event_init(struct perf_event *event)
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
- event->destroy = hw_perf_event_destroy;
-
- if (!atomic_inc_not_zero(active_events)) {
- mutex_lock(&armpmu->reserve_mutex);
- if (atomic_read(active_events) == 0)
- err = armpmu_reserve_hardware(armpmu);
-
- if (!err)
- atomic_inc(active_events);
- mutex_unlock(&armpmu->reserve_mutex);
- }
-
- if (err)
- return err;
-
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err;
+ return __hw_perf_event_init(event);
}
static void armpmu_enable(struct pmu *pmu)
@@ -553,27 +498,6 @@ static struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-static void armpmu_init(struct arm_pmu *armpmu)
-{
- atomic_set(&armpmu->active_events, 0);
- mutex_init(&armpmu->reserve_mutex);
-
- armpmu->pmu = (struct pmu) {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
- .filter_match = armpmu_filter_match,
- .attr_groups = armpmu->attr_groups,
- };
- armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
- &armpmu_common_attr_group;
-}
-
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;
@@ -601,113 +525,85 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
-static void cpu_pmu_enable_percpu_irq(void *data)
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
{
- int irq = *(int *)data;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ int irq = per_cpu(hw_events->irq, cpu);
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
+ return;
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
- int irq = *(int *)data;
+ if (irq_is_percpu(irq)) {
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
+ cpumask_clear(&armpmu->active_irqs);
+ return;
+ }
- disable_percpu_irq(irq);
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+void armpmu_free_irqs(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
-
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_disable_percpu_irq, &irq, 1);
- free_percpu_irq(irq, &hw_events->percpu_pmu);
- } else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
-
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ int cpu;
- if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq > 0)
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- }
- }
+ for_each_cpu(cpu, &armpmu->supported_cpus)
+ armpmu_free_irq(armpmu, cpu);
}
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
{
- int i, err, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- if (!pmu_device)
- return -ENODEV;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
- pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ int err = 0;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ const irq_handler_t handler = armpmu_dispatch_irq;
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
return 0;
- }
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
+ if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ } else if (irq_is_percpu(irq)) {
+ int other_cpu = cpumask_first(&armpmu->active_irqs);
+ int other_irq = per_cpu(hw_events->irq, other_cpu);
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_enable_percpu_irq, &irq, 1);
+ if (irq != other_irq) {
+ pr_warn("mismatched PPIs detected.\n");
+ err = -EINVAL;
+ }
} else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ }
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- continue;
- }
-
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
-
- cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
- }
+ return 0;
+}
+
+int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+ int cpu, err;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ err = armpmu_request_irq(armpmu, cpu);
+ if (err)
+ break;
}
- return 0;
+ return err;
+}
+
+static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
+{
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ return per_cpu(hw_events->irq, cpu);
}
/*
@@ -719,11 +615,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
if (pmu->reset)
pmu->reset(pmu);
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq) {
+ if (irq_is_percpu(irq)) {
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ if (irq_force_affinity(irq, cpumask_of(cpu)) &&
+ num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
+
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && irq_is_percpu(irq))
+ disable_percpu_irq(irq);
+
return 0;
}
@@ -828,56 +755,22 @@ static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
- int cpu;
- struct pmu_hw_events __percpu *cpu_hw_events;
-
- cpu_hw_events = alloc_percpu(struct pmu_hw_events);
- if (!cpu_hw_events)
- return -ENOMEM;
- err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
- &cpu_pmu->node);
+ err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
if (err)
- goto out_free;
+ goto out;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
goto out_unregister;
- for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
- events->percpu_pmu = cpu_pmu;
- }
-
- cpu_pmu->hw_events = cpu_hw_events;
- cpu_pmu->request_irq = cpu_pmu_request_irq;
- cpu_pmu->free_irq = cpu_pmu_free_irq;
-
- /* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu->reset)
- on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
- cpu_pmu, 1);
-
- /* If no interrupts available, set the corresponding capability flag */
- if (!platform_get_irq(cpu_pmu->plat_device, 0))
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
- /*
- * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
- * big.LITTLE). This is not an uncore PMU, and we have taken ctx
- * sharing into account (e.g. with our pmu::filter_match callback and
- * pmu::event_init group validation).
- */
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
-
return 0;
out_unregister:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
-out_free:
- free_percpu(cpu_hw_events);
+out:
return err;
}
@@ -886,177 +779,78 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
cpu_pm_pmu_unregister(cpu_pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
- free_percpu(cpu_pmu->hw_events);
}
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
- const struct pmu_probe_info *info)
+struct arm_pmu *armpmu_alloc(void)
{
- int cpu = get_cpu();
- unsigned int cpuid = read_cpuid_id();
- int ret = -ENODEV;
-
- pr_info("probing PMU on CPU %d\n", cpu);
+ struct arm_pmu *pmu;
+ int cpu;
- for (; info->init != NULL; info++) {
- if ((cpuid & info->mask) != info->cpuid)
- continue;
- ret = info->init(pmu);
- break;
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu) {
+ pr_info("failed to allocate PMU device!\n");
+ goto out;
}
- put_cpu();
- return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
- int *irqs, i = 0;
- bool using_spi = false;
- struct platform_device *pdev = pmu->plat_device;
-
- irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
- if (!irqs)
- return -ENOMEM;
-
- do {
- struct device_node *dn;
- int cpu, irq;
-
- /* See if we have an affinity entry */
- dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
- if (!dn)
- break;
-
- /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
- irq = platform_get_irq(pdev, i);
- if (irq > 0) {
- bool spi = !irq_is_percpu(irq);
-
- if (i > 0 && spi != using_spi) {
- pr_err("PPI/SPI IRQ type mismatch for %s!\n",
- dn->name);
- of_node_put(dn);
- kfree(irqs);
- return -EINVAL;
- }
-
- using_spi = spi;
- }
-
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
+ pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!pmu->hw_events) {
+ pr_info("failed to allocate per-cpu PMU data.\n");
+ goto out_free_pmu;
+ }
- if (cpu >= nr_cpu_ids) {
- pr_warn("Failed to find logical CPU for %s\n",
- dn->name);
- of_node_put(dn);
- cpumask_setall(&pmu->supported_cpus);
- break;
- }
- of_node_put(dn);
+ pmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ .filter_match = armpmu_filter_match,
+ .attr_groups = pmu->attr_groups,
+ /*
+ * This is a CPU PMU potentially in a heterogeneous
+ * configuration (e.g. big.LITTLE). This is not an uncore PMU,
+ * and we have taken ctx sharing into account (e.g. with our
+ * pmu::filter_match callback and pmu::event_init group
+ * validation).
+ */
+ .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+ };
- /* For SPIs, we need to track the affinity per IRQ */
- if (using_spi) {
- if (i >= pdev->num_resources)
- break;
+ pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
+ &armpmu_common_attr_group;
- irqs[i] = cpu;
- }
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events;
- /* Keep track of the CPUs containing this PMU type */
- cpumask_set_cpu(cpu, &pmu->supported_cpus);
- i++;
- } while (1);
-
- /* If we didn't manage to parse anything, try the interrupt affinity */
- if (cpumask_weight(&pmu->supported_cpus) == 0) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq > 0 && irq_is_percpu(irq)) {
- /* If using PPIs, check the affinity of the partition */
- int ret;
-
- ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
- if (ret) {
- kfree(irqs);
- return ret;
- }
- } else {
- /* Otherwise default to all CPUs */
- cpumask_setall(&pmu->supported_cpus);
- }
+ events = per_cpu_ptr(pmu->hw_events, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ events->percpu_pmu = pmu;
}
- /* If we matched up the IRQ affinities, use them to route the SPIs */
- if (using_spi && i == pdev->num_resources)
- pmu->irq_affinity = irqs;
- else
- kfree(irqs);
+ return pmu;
- return 0;
+out_free_pmu:
+ kfree(pmu);
+out:
+ return NULL;
}
-int arm_pmu_device_probe(struct platform_device *pdev,
- const struct of_device_id *of_table,
- const struct pmu_probe_info *probe_table)
+void armpmu_free(struct arm_pmu *pmu)
{
- const struct of_device_id *of_id;
- const int (*init_fn)(struct arm_pmu *);
- struct device_node *node = pdev->dev.of_node;
- struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
- if (!pmu) {
- pr_info("failed to allocate PMU device!\n");
- return -ENOMEM;
- }
-
- armpmu_init(pmu);
-
- pmu->plat_device = pdev;
-
- if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
- init_fn = of_id->data;
-
- pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
- "secure-reg-access");
-
- /* arm64 systems boot only as non-secure */
- if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
- pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
- pmu->secure_access = false;
- }
-
- ret = of_pmu_irq_cfg(pmu);
- if (!ret)
- ret = init_fn(pmu);
- } else if (probe_table) {
- cpumask_setall(&pmu->supported_cpus);
- ret = probe_current_pmu(pmu, probe_table);
- }
-
- if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
- goto out_free;
- }
+ free_percpu(pmu->hw_events);
+ kfree(pmu);
+}
+int armpmu_register(struct arm_pmu *pmu)
+{
+ int ret;
ret = cpu_pmu_init(pmu);
if (ret)
- goto out_free;
+ return ret;
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
@@ -1066,17 +860,12 @@ int arm_pmu_device_probe(struct platform_device *pdev,
__oprofile_cpu_pmu = pmu;
pr_info("enabled with %s PMU driver, %d counters available\n",
- pmu->name, pmu->num_events);
+ pmu->name, pmu->num_events);
return 0;
out_destroy:
cpu_pmu_destroy(pmu);
-out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
- kfree(pmu->irq_affinity);
- kfree(pmu);
return ret;
}
@@ -1086,7 +875,8 @@ static int arm_pmu_hp_init(void)
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
"perf/arm/pmu:starting",
- arm_perf_starting_cpu, NULL);
+ arm_perf_starting_cpu,
+ arm_perf_teardown_cpu);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
new file mode 100644
index 000000000000..34c862f213c7
--- /dev/null
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -0,0 +1,256 @@
+/*
+ * ACPI probing code for ARM performance counters.
+ *
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/perf/arm_pmu.h>
+
+#include <asm/cputype.h>
+
+static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
+static DEFINE_PER_CPU(int, pmu_irqs);
+
+static int arm_pmu_acpi_register_irq(int cpu)
+{
+ struct acpi_madt_generic_interrupt *gicc;
+ int gsi, trigger;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (WARN_ON(!gicc))
+ return -EINVAL;
+
+ gsi = gicc->performance_interrupt;
+ if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
+ trigger = ACPI_EDGE_SENSITIVE;
+ else
+ trigger = ACPI_LEVEL_SENSITIVE;
+
+ /*
+ * Helpfully, the MADT GICC doesn't have a polarity flag for the
+ * "performance interrupt". Luckily, on compliant GICs the polarity is
+ * a fixed value in HW (for both SPIs and PPIs) that we cannot change
+ * from SW.
+ *
+ * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
+ * may not match the real polarity, but that should not matter.
+ *
+ * Other interrupt controllers are not supported with ACPI.
+ */
+ return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
+}
+
+static void arm_pmu_acpi_unregister_irq(int cpu)
+{
+ struct acpi_madt_generic_interrupt *gicc;
+ int gsi;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (!gicc)
+ return;
+
+ gsi = gicc->performance_interrupt;
+ acpi_unregister_gsi(gsi);
+}
+
+static int arm_pmu_acpi_parse_irqs(void)
+{
+ int irq, cpu, irq_cpu, err;
+
+ for_each_possible_cpu(cpu) {
+ irq = arm_pmu_acpi_register_irq(cpu);
+ if (irq < 0) {
+ err = irq;
+ pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
+ cpu, err);
+ goto out_err;
+ } else if (irq == 0) {
+ pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
+ }
+
+ per_cpu(pmu_irqs, cpu) = irq;
+ }
+
+ return 0;
+
+out_err:
+ for_each_possible_cpu(cpu) {
+ irq = per_cpu(pmu_irqs, cpu);
+ if (!irq)
+ continue;
+
+ arm_pmu_acpi_unregister_irq(cpu);
+
+ /*
+ * Blat all copies of the IRQ so that we only unregister the
+ * corresponding GSI once (e.g. when we have PPIs).
+ */
+ for_each_possible_cpu(irq_cpu) {
+ if (per_cpu(pmu_irqs, irq_cpu) == irq)
+ per_cpu(pmu_irqs, irq_cpu) = 0;
+ }
+ }
+
+ return err;
+}
+
+static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
+{
+ unsigned long cpuid = read_cpuid_id();
+ struct arm_pmu *pmu;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ pmu = per_cpu(probed_pmus, cpu);
+ if (!pmu || pmu->acpi_cpuid != cpuid)
+ continue;
+
+ return pmu;
+ }
+
+ pmu = armpmu_alloc();
+ if (!pmu) {
+ pr_warn("Unable to allocate PMU for CPU%d\n",
+ smp_processor_id());
+ return NULL;
+ }
+
+ pmu->acpi_cpuid = cpuid;
+
+ return pmu;
+}
+
+/*
+ * This must run before the common arm_pmu hotplug logic, so that we can
+ * associate a CPU and its interrupt before the common code tries to manage the
+ * affinity and so on.
+ *
+ * Note that hotplug events are serialized, so we cannot race with another CPU
+ * coming up. The perf core won't open events while a hotplug event is in
+ * progress.
+ */
+static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
+{
+ struct arm_pmu *pmu;
+ struct pmu_hw_events __percpu *hw_events;
+ int irq;
+
+ /* If we've already probed this CPU, we have nothing to do */
+ if (per_cpu(probed_pmus, cpu))
+ return 0;
+
+ irq = per_cpu(pmu_irqs, cpu);
+
+ pmu = arm_pmu_acpi_find_alloc_pmu();
+ if (!pmu)
+ return -ENOMEM;
+
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+
+ per_cpu(probed_pmus, cpu) = pmu;
+
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage it. In
+ * some situations (e.g. mismatched PPIs), we may fail to request the
+ * IRQ. However, it may be too late for us to do anything about it.
+ * The common ARM PMU code will log a warning in this case.
+ */
+ hw_events = pmu->hw_events;
+ per_cpu(hw_events->irq, cpu) = irq;
+ armpmu_request_irq(pmu, cpu);
+
+ /*
+ * Ideally, we'd probe the PMU here when we find the first matching
+ * CPU. We can't do that for several reasons; see the comment in
+ * arm_pmu_acpi_init().
+ *
+ * So for the time being, we're done.
+ */
+ return 0;
+}
+
+int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
+{
+ int pmu_idx = 0;
+ int cpu, ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ /*
+ * Initialise and register the set of PMUs which we know about right
+ * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
+ * could handle late hotplug, but this may lead to deadlock since we
+ * might try to register a hotplug notifier instance from within a
+ * hotplug notifier.
+ *
+ * There's also the problem of having access to the right init_fn,
+ * without tying this too deeply into the "real" PMU driver.
+ *
+ * For the moment, as with the platform/DT case, we need at least one
+ * of a PMU's CPUs to be online at probe time.
+ */
+ for_each_possible_cpu(cpu) {
+ struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
+ char *base_name;
+
+ if (!pmu || pmu->name)
+ continue;
+
+ ret = init_fn(pmu);
+ if (ret == -ENODEV) {
+ /* PMU not handled by this driver, or not present */
+ continue;
+ } else if (ret) {
+ pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
+ return ret;
+ }
+
+ base_name = pmu->name;
+ pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
+ if (!pmu->name) {
+ pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ ret = armpmu_register(pmu);
+ if (ret) {
+ pr_warn("Failed to register PMU for CPU%d\n", cpu);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int arm_pmu_acpi_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ /*
+ * We can't request IRQs yet, since we don't know the cookie value
+ * until we know which CPUs share the same logical PMU. We'll handle
+ * that in arm_pmu_acpi_cpu_starting().
+ */
+ ret = arm_pmu_acpi_parse_irqs();
+ if (ret)
+ return ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
+ "perf/arm/pmu_acpi:starting",
+ arm_pmu_acpi_cpu_starting, NULL);
+
+ return ret;
+}
+subsys_initcall(arm_pmu_acpi_init)
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
new file mode 100644
index 000000000000..69255f53057a
--- /dev/null
+++ b/drivers/perf/arm_pmu_platform.c
@@ -0,0 +1,235 @@
+/*
+ * platform_device probing code for ARM performance counters.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/kconfig.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/percpu.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+static int probe_current_pmu(struct arm_pmu *pmu,
+ const struct pmu_probe_info *info)
+{
+ int cpu = get_cpu();
+ unsigned int cpuid = read_cpuid_id();
+ int ret = -ENODEV;
+
+ pr_info("probing PMU on CPU %d\n", cpu);
+
+ for (; info->init != NULL; info++) {
+ if ((cpuid & info->mask) != info->cpuid)
+ continue;
+ ret = info->init(pmu);
+ break;
+ }
+
+ put_cpu();
+ return ret;
+}
+
+static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
+{
+ int cpu, ret;
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+
+ ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
+ if (ret)
+ return ret;
+
+ for_each_cpu(cpu, &pmu->supported_cpus)
+ per_cpu(hw_events->irq, cpu) = irq;
+
+ return 0;
+}
+
+static bool pmu_has_irq_affinity(struct device_node *node)
+{
+ return !!of_find_property(node, "interrupt-affinity", NULL);
+}
+
+static int pmu_parse_irq_affinity(struct device_node *node, int i)
+{
+ struct device_node *dn;
+ int cpu;
+
+ /*
+ * If we don't have an interrupt-affinity property, we guess irq
+ * affinity matches our logical CPU order, as we used to assume.
+ * This is fragile, so we'll warn in pmu_parse_irqs().
+ */
+ if (!pmu_has_irq_affinity(node))
+ return i;
+
+ dn = of_parse_phandle(node, "interrupt-affinity", i);
+ if (!dn) {
+ pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
+ i, node->name);
+ return -EINVAL;
+ }
+
+ /* Now look up the logical CPU number */
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_dn;
+
+ cpu_dn = of_cpu_device_node_get(cpu);
+ of_node_put(cpu_dn);
+
+ if (dn == cpu_dn)
+ break;
+ }
+
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("failed to find logical CPU for %s\n", dn->name);
+ }
+
+ of_node_put(dn);
+
+ return cpu;
+}
+
+static int pmu_parse_irqs(struct arm_pmu *pmu)
+{
+ int i = 0, num_irqs;
+ struct platform_device *pdev = pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ pr_err("unable to count PMU IRQs\n");
+ return num_irqs;
+ }
+
+ /*
+ * In this case we have no idea which CPUs are covered by the PMU.
+ * To match our prior behaviour, we assume all CPUs in this case.
+ */
+ if (num_irqs == 0) {
+ pr_warn("no irqs for PMU, sampling events not supported\n");
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ cpumask_setall(&pmu->supported_cpus);
+ return 0;
+ }
+
+ if (num_irqs == 1) {
+ int irq = platform_get_irq(pdev, 0);
+ if (irq && irq_is_percpu(irq))
+ return pmu_parse_percpu_irq(pmu, irq);
+ }
+
+ if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
+ pr_warn("no interrupt-affinity property for %s, guessing.\n",
+ of_node_full_name(pdev->dev.of_node));
+ }
+
+ /*
+ * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
+ * special platdata function that attempts to demux them.
+ */
+ if (dev_get_platdata(&pdev->dev))
+ cpumask_setall(&pmu->supported_cpus);
+
+ for (i = 0; i < num_irqs; i++) {
+ int cpu, irq;
+
+ irq = platform_get_irq(pdev, i);
+ if (WARN_ON(irq <= 0))
+ continue;
+
+ if (irq_is_percpu(irq)) {
+ pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+ return -EINVAL;
+ }
+
+ cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+ if (cpu < 0)
+ return cpu;
+ if (cpu >= nr_cpu_ids)
+ continue;
+
+ if (per_cpu(hw_events->irq, cpu)) {
+ pr_warn("multiple PMU IRQs for the same CPU detected\n");
+ return -EINVAL;
+ }
+
+ per_cpu(hw_events->irq, cpu) = irq;
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ }
+
+ return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table)
+{
+ const struct of_device_id *of_id;
+ armpmu_init_fn init_fn;
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+ int ret = -ENODEV;
+
+ pmu = armpmu_alloc();
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->plat_device = pdev;
+
+ ret = pmu_parse_irqs(pmu);
+ if (ret)
+ goto out_free;
+
+ if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+ init_fn = of_id->data;
+
+ pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ "secure-reg-access");
+
+ /* arm64 systems boot only as non-secure */
+ if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
+ pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ pmu->secure_access = false;
+ }
+
+ ret = init_fn(pmu);
+ } else if (probe_table) {
+ cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
+ }
+
+ if (ret) {
+ pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ goto out_free;
+ }
+
+ ret = armpmu_request_irqs(pmu);
+ if (ret)
+ goto out_free_irqs;
+
+ ret = armpmu_register(pmu);
+ if (ret)
+ goto out_free;
+
+ return 0;
+
+out_free_irqs:
+ armpmu_free_irqs(pmu);
+out_free:
+ pr_info("%s: failed to register PMU devices!\n",
+ of_node_full_name(node));
+ armpmu_free(pmu);
+ return ret;
+}
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
new file mode 100644
index 000000000000..7f6b62b29e9d
--- /dev/null
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -0,0 +1,849 @@
+/*
+ * Driver for the L3 cache PMUs in Qualcomm Technologies chips.
+ *
+ * The driver supports a distributed cache architecture where the overall
+ * cache for a socket is comprised of multiple slices each with its own PMU.
+ * Access to each individual PMU is provided even though all CPUs share all
+ * the slices. User space needs to aggregate to individual counts to provide
+ * a global picture.
+ *
+ * See Documentation/perf/qcom_l3_pmu.txt for more details.
+ *
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/*
+ * General constants
+ */
+
+/* Number of counters on each PMU */
+#define L3_NUM_COUNTERS 8
+/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
+#define L3_EVTYPE_MASK 0xFF
+/*
+ * Bit position of the 'long counter' flag within perf_event_attr.config.
+ * Reserve some space between the event type and this flag to allow expansion
+ * in the event type field.
+ */
+#define L3_EVENT_LC_BIT 32
+
+/*
+ * Register offsets
+ */
+
+/* Perfmon registers */
+#define L3_HML3_PM_CR 0x000
+#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_FILTRA 0x300
+#define L3_HML3_PM_FILTRB 0x308
+#define L3_HML3_PM_FILTRC 0x310
+#define L3_HML3_PM_FILTRAM 0x304
+#define L3_HML3_PM_FILTRBM 0x30C
+#define L3_HML3_PM_FILTRCM 0x314
+
+/* Basic counter registers */
+#define L3_M_BC_CR 0x500
+#define L3_M_BC_SATROLL_CR 0x504
+#define L3_M_BC_CNTENSET 0x508
+#define L3_M_BC_CNTENCLR 0x50C
+#define L3_M_BC_INTENSET 0x510
+#define L3_M_BC_INTENCLR 0x514
+#define L3_M_BC_GANG 0x718
+#define L3_M_BC_OVSR 0x740
+#define L3_M_BC_IRQCTL 0x96C
+
+/*
+ * Bit field definitions
+ */
+
+/* L3_HML3_PM_CR */
+#define PM_CR_RESET (0)
+
+/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */
+#define PMCNT_RESET (0)
+
+/* L3_HML3_PM_EVTYPEx */
+#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK)
+
+/* Reset value for all the filter registers */
+#define PM_FLTR_RESET (0)
+
+/* L3_M_BC_CR */
+#define BC_RESET (1UL << 1)
+#define BC_ENABLE (1UL << 0)
+
+/* L3_M_BC_SATROLL_CR */
+#define BC_SATROLL_CR_RESET (0)
+
+/* L3_M_BC_CNTENSET */
+#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7))
+
+/* L3_M_BC_CNTENCLR */
+#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_CNTENCLR_RESET (0xFF)
+
+/* L3_M_BC_INTENSET */
+#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7))
+
+/* L3_M_BC_INTENCLR */
+#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_INTENCLR_RESET (0xFF)
+
+/* L3_M_BC_GANG */
+#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_GANG_RESET (0)
+
+/* L3_M_BC_OVSR */
+#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define PMOVSRCLR_RESET (0xFF)
+
+/* L3_M_BC_IRQCTL */
+#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_IRQCTL_RESET (0x0)
+
+/*
+ * Events
+ */
+
+#define L3_EVENT_CYCLES 0x01
+#define L3_EVENT_READ_HIT 0x20
+#define L3_EVENT_READ_MISS 0x21
+#define L3_EVENT_READ_HIT_D 0x22
+#define L3_EVENT_READ_MISS_D 0x23
+#define L3_EVENT_WRITE_HIT 0x24
+#define L3_EVENT_WRITE_MISS 0x25
+
+/*
+ * Decoding of settings from perf_event_attr
+ *
+ * The config format for perf events is:
+ * - config: bits 0-7: event type
+ * bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits
+ */
+
+static inline u32 get_event_type(struct perf_event *event)
+{
+ return (event->attr.config) & L3_EVTYPE_MASK;
+}
+
+static inline bool event_uses_long_counter(struct perf_event *event)
+{
+ return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT));
+}
+
+static inline int event_num_counters(struct perf_event *event)
+{
+ return event_uses_long_counter(event) ? 2 : 1;
+}
+
+/*
+ * Main PMU, inherits from the core perf PMU type
+ */
+struct l3cache_pmu {
+ struct pmu pmu;
+ struct hlist_node node;
+ void __iomem *regs;
+ struct perf_event *events[L3_NUM_COUNTERS];
+ unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)];
+ cpumask_t cpumask;
+};
+
+#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
+
+/*
+ * Type used to group hardware counter operations
+ *
+ * Used to implement two types of hardware counters, standard (32bits) and
+ * long (64bits). The hardware supports counter chaining which we use to
+ * implement long counters. This support is exposed via the 'lc' flag field
+ * in perf_event_attr.config.
+ */
+struct l3cache_event_ops {
+ /* Called to start event monitoring */
+ void (*start)(struct perf_event *event);
+ /* Called to stop event monitoring */
+ void (*stop)(struct perf_event *event, int flags);
+ /* Called to update the perf_event */
+ void (*update)(struct perf_event *event);
+};
+
+/*
+ * Implementation of long counter operations
+ *
+ * 64bit counters are implemented by chaining two of the 32bit physical
+ * counters. The PMU only supports chaining of adjacent even/odd pairs
+ * and for simplicity the driver always configures the odd counter to
+ * count the overflows of the lower-numbered even counter. Note that since
+ * the resulting hardware counter is 64bits no IRQs are required to maintain
+ * the software counter which is also 64bits.
+ */
+
+static void qcom_l3_cache__64bit_counter_start(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 evsel = get_event_type(event);
+ u32 gang;
+
+ /* Set the odd counter to count the overflows of the even counter */
+ gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
+ gang |= GANG_EN(idx + 1);
+ writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG);
+
+ /* Initialize the hardware counters and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+
+ /*
+ * Set the event types, the upper half must use zero and the lower
+ * half the actual event type
+ */
+ writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1));
+ writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
+
+ /* Finally, enable the counters */
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1));
+ writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET);
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
+ writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
+}
+
+static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event,
+ int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
+
+ /* Disable the counters */
+ writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
+ writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR);
+
+ /* Disable chaining */
+ writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG);
+}
+
+static void qcom_l3_cache__64bit_counter_update(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 hi, lo;
+ u64 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ do {
+ hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
+ lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+ } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)));
+ new = ((u64)hi << 32) | lo;
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static const struct l3cache_event_ops event_ops_long = {
+ .start = qcom_l3_cache__64bit_counter_start,
+ .stop = qcom_l3_cache__64bit_counter_stop,
+ .update = qcom_l3_cache__64bit_counter_update,
+};
+
+/*
+ * Implementation of standard counter operations
+ *
+ * 32bit counters use a single physical counter and a hardware feature that
+ * asserts the overflow IRQ on the toggling of the most significant bit in
+ * the counter. This feature allows the counters to be left free-running
+ * without needing the usual reprogramming required to properly handle races
+ * during concurrent calls to update.
+ */
+
+static void qcom_l3_cache__32bit_counter_start(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 evsel = get_event_type(event);
+ u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Set the counter to assert the overflow IRQ on MSB toggling */
+ writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Initialize the hardware counter and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+
+ /* Set the event type */
+ writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
+
+ /* Enable interrupt generation by this counter */
+ writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET);
+
+ /* Finally, enable the counter */
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
+ writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
+}
+
+static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event,
+ int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Disable the counter */
+ writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
+
+ /* Disable interrupt generation by this counter */
+ writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR);
+
+ /* Set the counter to not assert the overflow IRQ on MSB toggling */
+ writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
+}
+
+static void qcom_l3_cache__32bit_counter_update(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static const struct l3cache_event_ops event_ops_std = {
+ .start = qcom_l3_cache__32bit_counter_start,
+ .stop = qcom_l3_cache__32bit_counter_stop,
+ .update = qcom_l3_cache__32bit_counter_update,
+};
+
+/* Retrieve the appropriate operations for the given event */
+static
+const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event)
+{
+ if (event_uses_long_counter(event))
+ return &event_ops_long;
+ else
+ return &event_ops_std;
+}
+
+/*
+ * Top level PMU functions.
+ */
+
+static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu)
+{
+ int i;
+
+ writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR);
+
+ /*
+ * Use writel for the first programming command to ensure the basic
+ * counter unit is stopped before proceeding
+ */
+ writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR);
+
+ writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR);
+ writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR);
+ writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR);
+ writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG);
+ writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL);
+ writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR);
+
+ for (i = 0; i < L3_NUM_COUNTERS; ++i) {
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i));
+ writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i));
+ }
+
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM);
+
+ /*
+ * Use writel here to ensure all programming commands are done
+ * before proceeding
+ */
+ writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
+}
+
+static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data)
+{
+ struct l3cache_pmu *l3pmu = data;
+ /* Read the overflow status register */
+ long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR);
+ int idx;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ /* Clear the bits we read on the overflow status register */
+ writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR);
+
+ for_each_set_bit(idx, &status, L3_NUM_COUNTERS) {
+ struct perf_event *event;
+ const struct l3cache_event_ops *ops;
+
+ event = l3pmu->events[idx];
+ if (!event)
+ continue;
+
+ /*
+ * Since the IRQ is not enabled for events using long counters
+ * we should never see one of those here, however, be consistent
+ * and use the ops indirections like in the other operations.
+ */
+
+ ops = l3cache_event_get_ops(event);
+ ops->update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Implementation of abstract pmu functionality required by
+ * the core perf events code.
+ */
+
+static void qcom_l3_cache__pmu_enable(struct pmu *pmu)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
+
+ /* Ensure the other programming commands are observed before enabling */
+ wmb();
+
+ writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
+}
+
+static void qcom_l3_cache__pmu_disable(struct pmu *pmu)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
+
+ writel_relaxed(0, l3pmu->regs + L3_M_BC_CR);
+
+ /* Ensure the basic counter unit is stopped before proceeding */
+ wmb();
+}
+
+/*
+ * We must NOT create groups containing events from multiple hardware PMUs,
+ * although mixing different software and hardware PMUs is allowed.
+ */
+static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counters = 0;
+
+ if (leader->pmu != event->pmu && !is_software_event(leader))
+ return false;
+
+ counters = event_num_counters(event);
+ counters += event_num_counters(leader);
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (is_software_event(sibling))
+ continue;
+ if (sibling->pmu != event->pmu)
+ return false;
+ counters += event_num_counters(sibling);
+ }
+
+ /*
+ * If the group requires more counters than the HW has, it
+ * cannot ever be scheduled.
+ */
+ return counters <= L3_NUM_COUNTERS;
+}
+
+static int qcom_l3_cache__event_init(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Is the event for this PMU?
+ */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * There are no per-counter mode filters in the PMU.
+ */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_hv || event->attr.exclude_idle)
+ return -EINVAL;
+
+ /*
+ * Sampling not supported since these events are not core-attributable.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * Task mode not available, we run the counters as socket counters,
+ * not attributable to any CPU and therefore cannot attribute per-task.
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Validate the group */
+ if (!qcom_l3_cache__validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+
+ /*
+ * Many perf core operations (eg. events rotation) operate on a
+ * single CPU context. This is obvious for CPU PMUs, where one
+ * expects the same sets of events being observed on all CPUs,
+ * but can lead to issues for off-core PMUs, like this one, where
+ * each event could be theoretically assigned to a different CPU.
+ * To mitigate this, we enforce CPU assignment to one designated
+ * processor (the one described in the "cpumask" attribute exported
+ * by the PMU device). perf user space tools honor this and avoid
+ * opening more than one copy of the events.
+ */
+ event->cpu = cpumask_first(&l3pmu->cpumask);
+
+ return 0;
+}
+
+static void qcom_l3_cache__event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ hwc->state = 0;
+ ops->start(event);
+}
+
+static void qcom_l3_cache__event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ ops->stop(event, flags);
+ if (flags & PERF_EF_UPDATE)
+ ops->update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int qcom_l3_cache__event_add(struct perf_event *event, int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int order = event_uses_long_counter(event) ? 1 : 0;
+ int idx;
+
+ /*
+ * Try to allocate a counter.
+ */
+ idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order);
+ if (idx < 0)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ l3pmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ qcom_l3_cache__event_start(event, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void qcom_l3_cache__event_del(struct perf_event *event, int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int order = event_uses_long_counter(event) ? 1 : 0;
+
+ /* Stop and clean up */
+ qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE);
+ l3pmu->events[hwc->idx] = NULL;
+ bitmap_release_region(l3pmu->used_mask, hwc->idx, order);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+}
+
+static void qcom_l3_cache__event_read(struct perf_event *event)
+{
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ ops->update(event);
+}
+
+/*
+ * Add sysfs attributes
+ *
+ * We export:
+ * - formats, used by perf user space and other tools to configure events
+ * - events, used by perf user space and other tools to create events
+ * symbolically, e.g.:
+ * perf stat -a -e l3cache_0_0/event=read-miss/ ls
+ * perf stat -a -e l3cache_0_0/event=0x21/ ls
+ * - cpumask, used by perf user space and other tools to know on which CPUs
+ * to open the events
+ */
+
+/* formats */
+
+static ssize_t l3cache_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *) eattr->var);
+}
+
+#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *qcom_l3_cache_pmu_formats[] = {
+ L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
+ L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)),
+ NULL,
+};
+
+static struct attribute_group qcom_l3_cache_pmu_format_group = {
+ .name = "format",
+ .attrs = qcom_l3_cache_pmu_formats,
+};
+
+/* events */
+
+static ssize_t l3cache_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define L3CACHE_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *qcom_l3_cache_pmu_events[] = {
+ L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
+ L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
+ L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
+ L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
+ L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
+ L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
+ L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
+ NULL
+};
+
+static struct attribute_group qcom_l3_cache_pmu_events_group = {
+ .name = "events",
+ .attrs = qcom_l3_cache_pmu_events,
+};
+
+/* cpumask */
+
+static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL);
+
+static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
+ .attrs = qcom_l3_cache_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = {
+ &qcom_l3_cache_pmu_format_group,
+ &qcom_l3_cache_pmu_events_group,
+ &qcom_l3_cache_pmu_cpumask_attr_group,
+ NULL,
+};
+
+/*
+ * Probing functions and data.
+ */
+
+static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
+
+ /* If there is not a CPU/PMU association pick this CPU */
+ if (cpumask_empty(&l3pmu->cpumask))
+ cpumask_set_cpu(cpu, &l3pmu->cpumask);
+
+ return 0;
+}
+
+static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+ perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
+ cpumask_set_cpu(target, &l3pmu->cpumask);
+ return 0;
+}
+
+static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
+{
+ struct l3cache_pmu *l3pmu;
+ struct acpi_device *acpi_dev;
+ struct resource *memrc;
+ int ret;
+ char *name;
+
+ /* Initialize the PMU data structures */
+
+ acpi_dev = ACPI_COMPANION(&pdev->dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
+ acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id);
+ if (!l3pmu || !name)
+ return -ENOMEM;
+
+ l3pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+
+ .pmu_enable = qcom_l3_cache__pmu_enable,
+ .pmu_disable = qcom_l3_cache__pmu_disable,
+ .event_init = qcom_l3_cache__event_init,
+ .add = qcom_l3_cache__event_add,
+ .del = qcom_l3_cache__event_del,
+ .start = qcom_l3_cache__event_start,
+ .stop = qcom_l3_cache__event_stop,
+ .read = qcom_l3_cache__event_read,
+
+ .attr_groups = qcom_l3_cache_pmu_attr_grps,
+ };
+
+ memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
+ if (IS_ERR(l3pmu->regs)) {
+ dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start);
+ return PTR_ERR(l3pmu->regs);
+ }
+
+ qcom_l3_cache__init(l3pmu);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0,
+ name, l3pmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n",
+ &memrc->start);
+ return ret;
+ }
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug", ret);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&l3pmu->pmu, name, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type);
+
+ return 0;
+}
+
+static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = {
+ { "QCOM8081", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match);
+
+static struct platform_driver qcom_l3_cache_pmu_driver = {
+ .driver = {
+ .name = "qcom-l3cache-pmu",
+ .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
+ },
+ .probe = qcom_l3_cache_pmu_probe,
+};
+
+static int __init register_qcom_l3_cache_pmu_driver(void)
+{
+ int ret;
+
+ /* Install a hook to update the reader CPU in case it goes offline */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ "perf/qcom/l3cache:online",
+ qcom_l3_cache_pmu_online_cpu,
+ qcom_l3_cache_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&qcom_l3_cache_pmu_driver);
+}
+device_initcall(register_qcom_l3_cache_pmu_driver);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 005cadb7a3f8..afaf7b643eeb 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -439,6 +439,25 @@ config PHY_STIH407_USB
Enable this support to enable the picoPHY device used by USB2
and USB3 controllers on STMicroelectronics STiH407 SoC families.
+config PHY_QCOM_QMP
+ tristate "Qualcomm QMP PHY Driver"
+ depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support the QMP PHY transceiver that is used
+ with controllers such as PCIe, UFS, and USB on Qualcomm chips.
+
+config PHY_QCOM_QUSB2
+ tristate "Qualcomm QUSB2 PHY Driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on NVMEM || !NVMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the HighSpeed QUSB2 PHY transceiver for USB
+ controllers on Qualcomm chips. This driver supports the high-speed
+ PHY which is usually paired with either the ChipIdea or Synopsys DWC3
+ USB IPs on MSM SOCs.
+
config PHY_QCOM_UFS
tristate "Qualcomm UFS PHY driver"
depends on OF && ARCH_QCOM
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index dd8f3b5d2918..f8047b4639fa 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -50,6 +50,8 @@ obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o
+obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
+obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
diff --git a/drivers/phy/phy-bcm-ns-usb3.c b/drivers/phy/phy-bcm-ns-usb3.c
index f420fa4bebfc..22b5e7047fa6 100644
--- a/drivers/phy/phy-bcm-ns-usb3.c
+++ b/drivers/phy/phy-bcm-ns-usb3.c
@@ -2,6 +2,7 @@
* Broadcom Northstar USB 3.0 PHY Driver
*
* Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ * Copyright (C) 2016 Broadcom
*
* All magic values used for initialization (and related comments) were obtained
* from Broadcom's SDK:
@@ -23,6 +24,23 @@
#define BCM_NS_USB3_MII_MNG_TIMEOUT_US 1000 /* usecs */
+#define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f
+#define BCM_NS_USB3_PHY_PLL30_BLOCK 0x8000
+#define BCM_NS_USB3_PHY_TX_PMD_BLOCK 0x8040
+#define BCM_NS_USB3_PHY_PIPE_BLOCK 0x8060
+
+/* Registers of PLL30 block */
+#define BCM_NS_USB3_PLL_CONTROL 0x01
+#define BCM_NS_USB3_PLLA_CONTROL0 0x0a
+#define BCM_NS_USB3_PLLA_CONTROL1 0x0b
+
+/* Registers of TX PMD block */
+#define BCM_NS_USB3_TX_PMD_CONTROL1 0x01
+
+/* Registers of PIPE block */
+#define BCM_NS_USB3_LFPS_CMP 0x02
+#define BCM_NS_USB3_LFPS_DEGLITCH 0x03
+
enum bcm_ns_family {
BCM_NS_UNKNOWN,
BCM_NS_AX,
@@ -76,8 +94,10 @@ static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3)
usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US));
}
-static int bcm_ns_usb3_mii_mng_write32(struct bcm_ns_usb3 *usb3, u32 value)
+static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
+ u16 value)
{
+ u32 tmp = 0;
int err;
err = bcm_ns_usb3_mii_mng_wait_idle(usb3);
@@ -86,7 +106,11 @@ static int bcm_ns_usb3_mii_mng_write32(struct bcm_ns_usb3 *usb3, u32 value)
return err;
}
- writel(value, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
+ /* TODO: Use a proper MDIO bus layer */
+ tmp |= 0x58020000; /* Magic value for MDIO PHY write */
+ tmp |= reg << 18;
+ tmp |= value;
+ writel(tmp, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA);
return 0;
}
@@ -102,21 +126,22 @@ static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
udelay(2);
/* USB3 PLL Block */
- err = bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8000);
+ err = bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
+ BCM_NS_USB3_PHY_PLL30_BLOCK);
if (err < 0)
return err;
/* Assert Ana_Pllseq start */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x58061000);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLL_CONTROL, 0x1000);
/* Assert CML Divider ratio to 26 */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x582a6400);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLLA_CONTROL0, 0x6400);
/* Asserting PLL Reset */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x582ec000);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLLA_CONTROL1, 0xc000);
/* Deaaserting PLL Reset */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x582e8000);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLLA_CONTROL1, 0x8000);
/* Waiting MII Mgt interface idle */
bcm_ns_usb3_mii_mng_wait_idle(usb3);
@@ -125,22 +150,24 @@ static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3)
writel(0, usb3->dmp + BCMA_RESET_CTL);
/* PLL frequency monitor enable */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x58069000);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLL_CONTROL, 0x9000);
/* PIPE Block */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8060);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
+ BCM_NS_USB3_PHY_PIPE_BLOCK);
/* CMPMAX & CMPMINTH setting */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x580af30d);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_LFPS_CMP, 0xf30d);
/* DEGLITCH MIN & MAX setting */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x580e6302);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_LFPS_DEGLITCH, 0x6302);
/* TXPMD block */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8040);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
+ BCM_NS_USB3_PHY_TX_PMD_BLOCK);
/* Enabling SSC */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x58061003);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_TX_PMD_CONTROL1, 0x1003);
/* Waiting MII Mgt interface idle */
bcm_ns_usb3_mii_mng_wait_idle(usb3);
@@ -159,22 +186,24 @@ static int bcm_ns_usb3_phy_init_ns_ax(struct bcm_ns_usb3 *usb3)
udelay(2);
/* PLL30 block */
- err = bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8000);
+ err = bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
+ BCM_NS_USB3_PHY_PLL30_BLOCK);
if (err < 0)
return err;
- bcm_ns_usb3_mii_mng_write32(usb3, 0x582a6400);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PLLA_CONTROL0, 0x6400);
- bcm_ns_usb3_mii_mng_write32(usb3, 0x587e80e0);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG, 0x80e0);
- bcm_ns_usb3_mii_mng_write32(usb3, 0x580a009c);
+ bcm_ns_usb3_mdio_phy_write(usb3, 0x02, 0x009c);
/* Enable SSC */
- bcm_ns_usb3_mii_mng_write32(usb3, 0x587e8040);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_PHY_BASE_ADDR_REG,
+ BCM_NS_USB3_PHY_TX_PMD_BLOCK);
- bcm_ns_usb3_mii_mng_write32(usb3, 0x580a21d3);
+ bcm_ns_usb3_mdio_phy_write(usb3, 0x02, 0x21d3);
- bcm_ns_usb3_mii_mng_write32(usb3, 0x58061003);
+ bcm_ns_usb3_mdio_phy_write(usb3, BCM_NS_USB3_TX_PMD_CONTROL1, 0x1003);
/* Waiting MII Mgt interface idle */
bcm_ns_usb3_mii_mng_wait_idle(usb3);
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 34b06154e5d9..bb3279dbf88c 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -14,12 +14,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/exynos5-pmu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
struct exynos_dp_video_phy_drvdata {
u32 phy_ctrl_offset;
@@ -36,7 +36,7 @@ static int exynos_dp_video_phy_power_on(struct phy *phy)
/* Disable power isolation on DP-PHY */
return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
- EXYNOS5_PHY_ENABLE, EXYNOS5_PHY_ENABLE);
+ EXYNOS4_PHY_ENABLE, EXYNOS4_PHY_ENABLE);
}
static int exynos_dp_video_phy_power_off(struct phy *phy)
@@ -45,7 +45,7 @@ static int exynos_dp_video_phy_power_off(struct phy *phy)
/* Enable power isolation on DP-PHY */
return regmap_update_bits(state->regs, state->drvdata->phy_ctrl_offset,
- EXYNOS5_PHY_ENABLE, 0);
+ EXYNOS4_PHY_ENABLE, 0);
}
static const struct phy_ops exynos_dp_video_phy_ops = {
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 6bee04cc4d53..c198886f80a3 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -12,8 +12,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/mfd/syscon/exynos4-pmu.h>
-#include <linux/mfd/syscon/exynos5-pmu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -21,6 +19,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/mfd/syscon.h>
enum exynos_mipi_phy_id {
@@ -64,7 +63,7 @@ static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
- .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
@@ -73,7 +72,7 @@ static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
- .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
@@ -82,7 +81,7 @@ static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
- .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
@@ -91,7 +90,7 @@ static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
- .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_val = EXYNOS4_PHY_ENABLE,
.enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
@@ -109,47 +108,47 @@ static const struct mipi_phy_device_desc exynos5420_mipi_phy = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
- .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
- .resetn_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
- .resetn_val = EXYNOS5_MIPI_PHY_M_RESETN,
- .resetn_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(0),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
- .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
- .resetn_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
- .resetn_val = EXYNOS5_MIPI_PHY_M_RESETN,
- .resetn_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(1),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS2 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5420_MIPI_PHY2_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY_CONTROL(2),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
- .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
- .resetn_reg = EXYNOS5420_MIPI_PHY2_CONTROL,
+ .resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY_CONTROL(2),
.resetn_map = EXYNOS_MIPI_REGMAP_PMU,
},
},
@@ -172,8 +171,8 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
{
/* EXYNOS_MIPI_PHY_ID_CSIS0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5433_MIPI_PHY0_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
@@ -181,8 +180,8 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM0 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5433_MIPI_PHY0_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
@@ -190,8 +189,8 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5433_MIPI_PHY1_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(1),
.resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
@@ -199,8 +198,8 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_DSIM1 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5433_MIPI_PHY1_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(1),
.resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
@@ -208,8 +207,8 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
}, {
/* EXYNOS_MIPI_PHY_ID_CSIS2 */
.coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
- .enable_val = EXYNOS5_PHY_ENABLE,
- .enable_reg = EXYNOS5433_MIPI_PHY2_CONTROL,
+ .enable_val = EXYNOS4_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(2),
.enable_map = EXYNOS_MIPI_REGMAP_PMU,
.resetn_val = BIT(0),
.resetn_reg = EXYNOS5433_SYSREG_CAM1_MIPI_DPHY_CON,
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c
index 60baf25d98e2..a89c12faff39 100644
--- a/drivers/phy/phy-exynos-pcie.c
+++ b/drivers/phy/phy-exynos-pcie.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -228,7 +228,6 @@ static const struct of_device_id exynos_pcie_phy_match[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, exynos_pcie_phy_match);
static int exynos_pcie_phy_probe(struct platform_device *pdev)
{
@@ -278,8 +277,5 @@ static struct platform_driver exynos_pcie_phy_driver = {
.name = "exynos_pcie_phy",
}
};
-module_platform_driver(exynos_pcie_phy_driver);
-MODULE_DESCRIPTION("Samsung S5P/EXYNOS SoC PCIe PHY driver");
-MODULE_AUTHOR("Jaehoon Chung <jh80.chung@samsung.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(exynos_pcie_phy_driver);
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index 07ed608905ac..7c41daa2c625 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -22,9 +22,9 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/exynos5-pmu.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
/* Exynos USB PHY registers */
#define EXYNOS5_FSEL_9MHZ6 0x0
@@ -235,10 +235,10 @@ static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst,
if (!inst->reg_pmu)
return;
- val = on ? 0 : EXYNOS5_PHY_ENABLE;
+ val = on ? 0 : EXYNOS4_PHY_ENABLE;
regmap_update_bits(inst->reg_pmu, inst->pmu_offset,
- EXYNOS5_PHY_ENABLE, val);
+ EXYNOS4_PHY_ENABLE, val);
}
/*
diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/phy-meson8b-usb2.c
index 33c9f4ba157d..30f56a6a411f 100644
--- a/drivers/phy/phy-meson8b-usb2.c
+++ b/drivers/phy/phy-meson8b-usb2.c
@@ -81,9 +81,9 @@
#define REG_ADP_BC_ACA_PIN_GND BIT(25)
#define REG_ADP_BC_ACA_PIN_FLOAT BIT(26)
-#define REG_DBG_UART 0x14
+#define REG_DBG_UART 0x10
-#define REG_TEST 0x18
+#define REG_TEST 0x14
#define REG_TEST_DATA_IN_MASK GENMASK(3, 0)
#define REG_TEST_EN_MASK GENMASK(7, 4)
#define REG_TEST_ADDR_MASK GENMASK(11, 8)
@@ -93,7 +93,7 @@
#define REG_TEST_DATA_OUT_MASK GENMASK(19, 16)
#define REG_TEST_DISABLE_ID_PULLUP BIT(20)
-#define REG_TUNE 0x1c
+#define REG_TUNE 0x18
#define REG_TUNE_TX_RES_TUNE_MASK GENMASK(1, 0)
#define REG_TUNE_TX_HSXV_TUNE_MASK GENMASK(3, 2)
#define REG_TUNE_TX_VREF_TUNE_MASK GENMASK(7, 4)
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index d9720675b9db..59b110f795c3 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -23,47 +23,55 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-/*
- * for sifslv2 register, but exclude port's;
- * relative to USB3_SIF2_BASE base address
- */
-#define SSUSB_SIFSLV_SPLLC 0x0000
-#define SSUSB_SIFSLV_U2FREQ 0x0100
-
-/* offsets of sub-segment in each port registers */
-#define SSUSB_SIFSLV_U2PHY_COM_BASE 0x0000
-#define SSUSB_SIFSLV_U3PHYD_BASE 0x0100
-#define SSUSB_USB30_PHYA_SIV_B_BASE 0x0300
-#define SSUSB_SIFSLV_U3PHYA_DA_BASE 0x0400
-
-#define U3P_USBPHYACR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0000)
+/* version V1 sub-banks offset base address */
+/* banks shared by multiple phys */
+#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
+#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
+/* u2 phy bank */
+#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
+/* u3 phy banks */
+#define SSUSB_SIFSLV_V1_U3PHYD 0x000
+#define SSUSB_SIFSLV_V1_U3PHYA 0x200
+
+/* version V2 sub-banks offset base address */
+/* u2 phy banks */
+#define SSUSB_SIFSLV_V2_MISC 0x000
+#define SSUSB_SIFSLV_V2_U2FREQ 0x100
+#define SSUSB_SIFSLV_V2_U2PHY_COM 0x300
+/* u3 phy banks */
+#define SSUSB_SIFSLV_V2_SPLLC 0x000
+#define SSUSB_SIFSLV_V2_CHIP 0x100
+#define SSUSB_SIFSLV_V2_U3PHYD 0x200
+#define SSUSB_SIFSLV_V2_U3PHYA 0x400
+
+#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
+#define PA0_RG_USB20_INTR_EN BIT(5)
-#define U3P_USBPHYACR2 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0008)
+#define U3P_USBPHYACR2 0x008
#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
-#define U3P_USBPHYACR5 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0014)
+#define U3P_USBPHYACR5 0x014
#define PA5_RG_U2_HSTX_SRCAL_EN BIT(15)
#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12)
#define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
#define PA5_RG_U2_HS_100U_U3_EN BIT(11)
-#define U3P_USBPHYACR6 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0018)
-#define PA6_RG_U2_ISO_EN BIT(31)
+#define U3P_USBPHYACR6 0x018
#define PA6_RG_U2_BC11_SW_EN BIT(23)
#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
#define PA6_RG_U2_SQTH GENMASK(3, 0)
#define PA6_RG_U2_SQTH_VAL(x) (0xf & (x))
-#define U3P_U2PHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0020)
+#define U3P_U2PHYACR4 0x020
#define P2C_RG_USB20_GPIO_CTL BIT(9)
#define P2C_USB20_GPIO_MODE BIT(8)
#define P2C_U2_GPIO_CTR_MSK (P2C_RG_USB20_GPIO_CTL | P2C_USB20_GPIO_MODE)
-#define U3D_U2PHYDCR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0060)
+#define U3D_U2PHYDCR0 0x060
#define P2C_RG_SIF_U2PLL_FORCE_ON BIT(24)
-#define U3P_U2PHYDTM0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0068)
+#define U3P_U2PHYDTM0 0x068
#define P2C_FORCE_UART_EN BIT(26)
#define P2C_FORCE_DATAIN BIT(23)
#define P2C_FORCE_DM_PULLDOWN BIT(21)
@@ -85,47 +93,56 @@
P2C_FORCE_TERMSEL | P2C_RG_DMPULLDOWN | \
P2C_RG_DPPULLDOWN | P2C_RG_TERMSEL)
-#define U3P_U2PHYDTM1 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x006C)
+#define U3P_U2PHYDTM1 0x06C
#define P2C_RG_UART_EN BIT(16)
#define P2C_RG_VBUSVALID BIT(5)
#define P2C_RG_SESSEND BIT(4)
#define P2C_RG_AVALID BIT(2)
-#define U3P_U3_PHYA_REG0 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0000)
-#define P3A_RG_U3_VUSB10_ON BIT(5)
-
-#define U3P_U3_PHYA_REG6 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0018)
+#define U3P_U3_PHYA_REG6 0x018
#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28)
#define P3A_RG_TX_EIDLE_CM_VAL(x) ((0xf & (x)) << 28)
-#define U3P_U3_PHYA_REG9 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0024)
+#define U3P_U3_PHYA_REG9 0x024
#define P3A_RG_RX_DAC_MUX GENMASK(5, 1)
#define P3A_RG_RX_DAC_MUX_VAL(x) ((0x1f & (x)) << 1)
-#define U3P_U3PHYA_DA_REG0 (SSUSB_SIFSLV_U3PHYA_DA_BASE + 0x0000)
+#define U3P_U3_PHYA_DA_REG0 0x100
#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10)
#define P3A_RG_XTAL_EXT_EN_U3_VAL(x) ((0x3 & (x)) << 10)
-#define U3P_PHYD_CDR1 (SSUSB_SIFSLV_U3PHYD_BASE + 0x005c)
+#define U3P_U3_PHYD_LFPS1 0x00c
+#define P3D_RG_FWAKE_TH GENMASK(21, 16)
+#define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
+
+#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
#define P3D_RG_CDR_BIR_LTD0_VAL(x) ((0x1f & (x)) << 8)
-#define U3P_XTALCTL3 (SSUSB_SIFSLV_SPLLC + 0x0018)
+#define U3P_U3_PHYD_RXDET1 0x128
+#define P3D_RG_RXDET_STB2_SET GENMASK(17, 9)
+#define P3D_RG_RXDET_STB2_SET_VAL(x) ((0x1ff & (x)) << 9)
+
+#define U3P_U3_PHYD_RXDET2 0x12c
+#define P3D_RG_RXDET_STB2_SET_P3 GENMASK(8, 0)
+#define P3D_RG_RXDET_STB2_SET_P3_VAL(x) (0x1ff & (x))
+
+#define U3P_SPLLC_XTALCTL3 0x018
#define XC3_RG_U3_XTAL_RX_PWD BIT(9)
#define XC3_RG_U3_FRC_XTAL_RX_PWD BIT(8)
-#define U3P_U2FREQ_FMCR0 (SSUSB_SIFSLV_U2FREQ + 0x00)
+#define U3P_U2FREQ_FMCR0 0x00
#define P2F_RG_MONCLK_SEL GENMASK(27, 26)
#define P2F_RG_MONCLK_SEL_VAL(x) ((0x3 & (x)) << 26)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x))
-#define U3P_U2FREQ_VALUE (SSUSB_SIFSLV_U2FREQ + 0x0c)
+#define U3P_U2FREQ_VALUE 0x0c
-#define U3P_U2FREQ_FMMONR1 (SSUSB_SIFSLV_U2FREQ + 0x10)
+#define U3P_U2FREQ_FMMONR1 0x10
#define P2F_USB_FM_VALID BIT(0)
#define P2F_RG_FRCK_EN BIT(8)
@@ -134,21 +151,46 @@
#define U3P_SR_COEF_DIVISOR 1000
#define U3P_FM_DET_CYCLE_CNT 1024
+enum mt_phy_version {
+ MT_PHY_V1 = 1,
+ MT_PHY_V2,
+};
+
struct mt65xx_phy_pdata {
/* avoid RX sensitivity level degradation only for mt8173 */
bool avoid_rx_sen_degradation;
+ enum mt_phy_version version;
+};
+
+struct u2phy_banks {
+ void __iomem *misc;
+ void __iomem *fmreg;
+ void __iomem *com;
+};
+
+struct u3phy_banks {
+ void __iomem *spllc;
+ void __iomem *chip;
+ void __iomem *phyd; /* include u3phyd_bank2 */
+ void __iomem *phya; /* include u3phya_da */
};
struct mt65xx_phy_instance {
struct phy *phy;
void __iomem *port_base;
+ union {
+ struct u2phy_banks u2_banks;
+ struct u3phy_banks u3_banks;
+ };
+ struct clk *ref_clk; /* reference clock of anolog phy */
u32 index;
u8 type;
};
struct mt65xx_u3phy {
struct device *dev;
- void __iomem *sif_base; /* include sif2, but exclude port's */
+ void __iomem *sif_base; /* only shared sif */
+ /* deprecated, use @ref_clk instead in phy instance */
struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */
const struct mt65xx_phy_pdata *pdata;
struct mt65xx_phy_instance **phys;
@@ -158,49 +200,53 @@ struct mt65xx_u3phy {
static void hs_slew_rate_calibrate(struct mt65xx_u3phy *u3phy,
struct mt65xx_phy_instance *instance)
{
- void __iomem *sif_base = u3phy->sif_base;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *fmreg = u2_banks->fmreg;
+ void __iomem *com = u2_banks->com;
int calibration_val;
int fm_out;
u32 tmp;
/* enable USB ring oscillator */
- tmp = readl(instance->port_base + U3P_USBPHYACR5);
+ tmp = readl(com + U3P_USBPHYACR5);
tmp |= PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, instance->port_base + U3P_USBPHYACR5);
+ writel(tmp, com + U3P_USBPHYACR5);
udelay(1);
/*enable free run clock */
- tmp = readl(sif_base + U3P_U2FREQ_FMMONR1);
+ tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
tmp |= P2F_RG_FRCK_EN;
- writel(tmp, sif_base + U3P_U2FREQ_FMMONR1);
+ writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
/* set cycle count as 1024, and select u2 channel */
- tmp = readl(sif_base + U3P_U2FREQ_FMCR0);
+ tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp &= ~(P2F_RG_CYCLECNT | P2F_RG_MONCLK_SEL);
tmp |= P2F_RG_CYCLECNT_VAL(U3P_FM_DET_CYCLE_CNT);
- tmp |= P2F_RG_MONCLK_SEL_VAL(instance->index);
- writel(tmp, sif_base + U3P_U2FREQ_FMCR0);
+ if (u3phy->pdata->version == MT_PHY_V1)
+ tmp |= P2F_RG_MONCLK_SEL_VAL(instance->index >> 1);
+
+ writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/* enable frequency meter */
- tmp = readl(sif_base + U3P_U2FREQ_FMCR0);
+ tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp |= P2F_RG_FREQDET_EN;
- writel(tmp, sif_base + U3P_U2FREQ_FMCR0);
+ writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/* ignore return value */
- readl_poll_timeout(sif_base + U3P_U2FREQ_FMMONR1, tmp,
- (tmp & P2F_USB_FM_VALID), 10, 200);
+ readl_poll_timeout(fmreg + U3P_U2FREQ_FMMONR1, tmp,
+ (tmp & P2F_USB_FM_VALID), 10, 200);
- fm_out = readl(sif_base + U3P_U2FREQ_VALUE);
+ fm_out = readl(fmreg + U3P_U2FREQ_VALUE);
/* disable frequency meter */
- tmp = readl(sif_base + U3P_U2FREQ_FMCR0);
+ tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp &= ~P2F_RG_FREQDET_EN;
- writel(tmp, sif_base + U3P_U2FREQ_FMCR0);
+ writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/*disable free run clock */
- tmp = readl(sif_base + U3P_U2FREQ_FMMONR1);
+ tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
tmp &= ~P2F_RG_FRCK_EN;
- writel(tmp, sif_base + U3P_U2FREQ_FMMONR1);
+ writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
if (fm_out) {
/* ( 1024 / FM_OUT ) x reference clock frequency x 0.028 */
@@ -215,85 +261,125 @@ static void hs_slew_rate_calibrate(struct mt65xx_u3phy *u3phy,
instance->index, fm_out, calibration_val);
/* set HS slew rate */
- tmp = readl(instance->port_base + U3P_USBPHYACR5);
+ tmp = readl(com + U3P_USBPHYACR5);
tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val);
- writel(tmp, instance->port_base + U3P_USBPHYACR5);
+ writel(tmp, com + U3P_USBPHYACR5);
/* disable USB ring oscillator */
- tmp = readl(instance->port_base + U3P_USBPHYACR5);
+ tmp = readl(com + U3P_USBPHYACR5);
tmp &= ~PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, instance->port_base + U3P_USBPHYACR5);
+ writel(tmp, com + U3P_USBPHYACR5);
+}
+
+static void u3_phy_instance_init(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ struct u3phy_banks *u3_banks = &instance->u3_banks;
+ u32 tmp;
+
+ /* gating PCIe Analog XTAL clock */
+ tmp = readl(u3_banks->spllc + U3P_SPLLC_XTALCTL3);
+ tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
+ writel(tmp, u3_banks->spllc + U3P_SPLLC_XTALCTL3);
+
+ /* gating XSQ */
+ tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
+ tmp &= ~P3A_RG_XTAL_EXT_EN_U3;
+ tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2);
+ writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0);
+
+ tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG9);
+ tmp &= ~P3A_RG_RX_DAC_MUX;
+ tmp |= P3A_RG_RX_DAC_MUX_VAL(4);
+ writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG9);
+
+ tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG6);
+ tmp &= ~P3A_RG_TX_EIDLE_CM;
+ tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe);
+ writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG6);
+
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_CDR1);
+ tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1);
+ tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3);
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_CDR1);
+
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_LFPS1);
+ tmp &= ~P3D_RG_FWAKE_TH;
+ tmp |= P3D_RG_FWAKE_TH_VAL(0x34);
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_LFPS1);
+
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1);
+ tmp &= ~P3D_RG_RXDET_STB2_SET;
+ tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10);
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1);
+
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+ tmp &= ~P3D_RG_RXDET_STB2_SET_P3;
+ tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10);
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+
+ dev_dbg(u3phy->dev, "%s(%d)\n", __func__, instance->index);
}
static void phy_instance_init(struct mt65xx_u3phy *u3phy,
struct mt65xx_phy_instance *instance)
{
- void __iomem *port_base = instance->port_base;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *com = u2_banks->com;
u32 index = instance->index;
u32 tmp;
/* switch to USB function. (system register, force ip into usb mode) */
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~P2C_FORCE_UART_EN;
tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0);
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
- tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp = readl(com + U3P_U2PHYDTM1);
tmp &= ~P2C_RG_UART_EN;
- writel(tmp, port_base + U3P_U2PHYDTM1);
+ writel(tmp, com + U3P_U2PHYDTM1);
+
+ tmp = readl(com + U3P_USBPHYACR0);
+ tmp |= PA0_RG_USB20_INTR_EN;
+ writel(tmp, com + U3P_USBPHYACR0);
+
+ /* disable switch 100uA current to SSUSB */
+ tmp = readl(com + U3P_USBPHYACR5);
+ tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
+ writel(tmp, com + U3P_USBPHYACR5);
if (!index) {
- tmp = readl(port_base + U3P_U2PHYACR4);
+ tmp = readl(com + U3P_U2PHYACR4);
tmp &= ~P2C_U2_GPIO_CTR_MSK;
- writel(tmp, port_base + U3P_U2PHYACR4);
+ writel(tmp, com + U3P_U2PHYACR4);
}
if (u3phy->pdata->avoid_rx_sen_degradation) {
if (!index) {
- tmp = readl(port_base + U3P_USBPHYACR2);
+ tmp = readl(com + U3P_USBPHYACR2);
tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
- writel(tmp, port_base + U3P_USBPHYACR2);
+ writel(tmp, com + U3P_USBPHYACR2);
- tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp = readl(com + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
+ writel(tmp, com + U3D_U2PHYDCR0);
} else {
- tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp = readl(com + U3D_U2PHYDCR0);
tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
+ writel(tmp, com + U3D_U2PHYDCR0);
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
}
}
- tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp = readl(com + U3P_USBPHYACR6);
tmp &= ~PA6_RG_U2_BC11_SW_EN; /* DP/DM BC1.1 path Disable */
tmp &= ~PA6_RG_U2_SQTH;
tmp |= PA6_RG_U2_SQTH_VAL(2);
- writel(tmp, port_base + U3P_USBPHYACR6);
-
- tmp = readl(port_base + U3P_U3PHYA_DA_REG0);
- tmp &= ~P3A_RG_XTAL_EXT_EN_U3;
- tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2);
- writel(tmp, port_base + U3P_U3PHYA_DA_REG0);
-
- tmp = readl(port_base + U3P_U3_PHYA_REG9);
- tmp &= ~P3A_RG_RX_DAC_MUX;
- tmp |= P3A_RG_RX_DAC_MUX_VAL(4);
- writel(tmp, port_base + U3P_U3_PHYA_REG9);
-
- tmp = readl(port_base + U3P_U3_PHYA_REG6);
- tmp &= ~P3A_RG_TX_EIDLE_CM;
- tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe);
- writel(tmp, port_base + U3P_U3_PHYA_REG6);
-
- tmp = readl(port_base + U3P_PHYD_CDR1);
- tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1);
- tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3);
- writel(tmp, port_base + U3P_PHYD_CDR1);
+ writel(tmp, com + U3P_USBPHYACR6);
dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
}
@@ -301,58 +387,35 @@ static void phy_instance_init(struct mt65xx_u3phy *u3phy,
static void phy_instance_power_on(struct mt65xx_u3phy *u3phy,
struct mt65xx_phy_instance *instance)
{
- void __iomem *port_base = instance->port_base;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *com = u2_banks->com;
u32 index = instance->index;
u32 tmp;
- if (!index) {
- /* Set RG_SSUSB_VUSB10_ON as 1 after VUSB10 ready */
- tmp = readl(port_base + U3P_U3_PHYA_REG0);
- tmp |= P3A_RG_U3_VUSB10_ON;
- writel(tmp, port_base + U3P_U3_PHYA_REG0);
- }
-
/* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL);
tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
/* OTG Enable */
- tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp = readl(com + U3P_USBPHYACR6);
tmp |= PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, port_base + U3P_USBPHYACR6);
+ writel(tmp, com + U3P_USBPHYACR6);
- if (!index) {
- tmp = readl(u3phy->sif_base + U3P_XTALCTL3);
- tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
- writel(tmp, u3phy->sif_base + U3P_XTALCTL3);
-
- /* switch 100uA current to SSUSB */
- tmp = readl(port_base + U3P_USBPHYACR5);
- tmp |= PA5_RG_U2_HS_100U_U3_EN;
- writel(tmp, port_base + U3P_USBPHYACR5);
- }
-
- tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp = readl(com + U3P_U2PHYDTM1);
tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID;
tmp &= ~P2C_RG_SESSEND;
- writel(tmp, port_base + U3P_U2PHYDTM1);
-
- /* USB 2.0 slew rate calibration */
- tmp = readl(port_base + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
- tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(4);
- writel(tmp, port_base + U3P_USBPHYACR5);
+ writel(tmp, com + U3P_U2PHYDTM1);
if (u3phy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp = readl(com + U3D_U2PHYDCR0);
tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
+ writel(tmp, com + U3D_U2PHYDCR0);
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
}
dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
}
@@ -360,48 +423,36 @@ static void phy_instance_power_on(struct mt65xx_u3phy *u3phy,
static void phy_instance_power_off(struct mt65xx_u3phy *u3phy,
struct mt65xx_phy_instance *instance)
{
- void __iomem *port_base = instance->port_base;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *com = u2_banks->com;
u32 index = instance->index;
u32 tmp;
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN);
tmp |= P2C_FORCE_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
/* OTG Disable */
- tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp = readl(com + U3P_USBPHYACR6);
tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, port_base + U3P_USBPHYACR6);
-
- if (!index) {
- /* switch 100uA current back to USB2.0 */
- tmp = readl(port_base + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
- writel(tmp, port_base + U3P_USBPHYACR5);
- }
+ writel(tmp, com + U3P_USBPHYACR6);
/* let suspendm=0, set utmi into analog power down */
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~P2C_RG_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
udelay(1);
- tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp = readl(com + U3P_U2PHYDTM1);
tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID);
tmp |= P2C_RG_SESSEND;
- writel(tmp, port_base + U3P_U2PHYDTM1);
-
- if (!index) {
- tmp = readl(port_base + U3P_U3_PHYA_REG0);
- tmp &= ~P3A_RG_U3_VUSB10_ON;
- writel(tmp, port_base + U3P_U3_PHYA_REG0);
- }
+ writel(tmp, com + U3P_U2PHYDTM1);
if (u3phy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp = readl(com + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
+ writel(tmp, com + U3D_U2PHYDCR0);
}
dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
@@ -410,18 +461,55 @@ static void phy_instance_power_off(struct mt65xx_u3phy *u3phy,
static void phy_instance_exit(struct mt65xx_u3phy *u3phy,
struct mt65xx_phy_instance *instance)
{
- void __iomem *port_base = instance->port_base;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ void __iomem *com = u2_banks->com;
u32 index = instance->index;
u32 tmp;
if (u3phy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp = readl(com + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
+ writel(tmp, com + U3D_U2PHYDCR0);
- tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp = readl(com + U3P_U2PHYDTM0);
tmp &= ~P2C_FORCE_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ writel(tmp, com + U3P_U2PHYDTM0);
+ }
+}
+
+static void phy_v1_banks_init(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ struct u3phy_banks *u3_banks = &instance->u3_banks;
+
+ if (instance->type == PHY_TYPE_USB2) {
+ u2_banks->misc = NULL;
+ u2_banks->fmreg = u3phy->sif_base + SSUSB_SIFSLV_V1_U2FREQ;
+ u2_banks->com = instance->port_base + SSUSB_SIFSLV_V1_U2PHY_COM;
+ } else if (instance->type == PHY_TYPE_USB3) {
+ u3_banks->spllc = u3phy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
+ u3_banks->chip = NULL;
+ u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
+ u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
+ }
+}
+
+static void phy_v2_banks_init(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ struct u3phy_banks *u3_banks = &instance->u3_banks;
+
+ if (instance->type == PHY_TYPE_USB2) {
+ u2_banks->misc = instance->port_base + SSUSB_SIFSLV_V2_MISC;
+ u2_banks->fmreg = instance->port_base + SSUSB_SIFSLV_V2_U2FREQ;
+ u2_banks->com = instance->port_base + SSUSB_SIFSLV_V2_U2PHY_COM;
+ } else if (instance->type == PHY_TYPE_USB3) {
+ u3_banks->spllc = instance->port_base + SSUSB_SIFSLV_V2_SPLLC;
+ u3_banks->chip = instance->port_base + SSUSB_SIFSLV_V2_CHIP;
+ u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V2_U3PHYD;
+ u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V2_U3PHYA;
}
}
@@ -437,7 +525,17 @@ static int mt65xx_phy_init(struct phy *phy)
return ret;
}
- phy_instance_init(u3phy, instance);
+ ret = clk_prepare_enable(instance->ref_clk);
+ if (ret) {
+ dev_err(u3phy->dev, "failed to enable ref_clk\n");
+ return ret;
+ }
+
+ if (instance->type == PHY_TYPE_USB2)
+ phy_instance_init(u3phy, instance);
+ else
+ u3_phy_instance_init(u3phy, instance);
+
return 0;
}
@@ -446,8 +544,10 @@ static int mt65xx_phy_power_on(struct phy *phy)
struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
- phy_instance_power_on(u3phy, instance);
- hs_slew_rate_calibrate(u3phy, instance);
+ if (instance->type == PHY_TYPE_USB2) {
+ phy_instance_power_on(u3phy, instance);
+ hs_slew_rate_calibrate(u3phy, instance);
+ }
return 0;
}
@@ -456,7 +556,9 @@ static int mt65xx_phy_power_off(struct phy *phy)
struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
- phy_instance_power_off(u3phy, instance);
+ if (instance->type == PHY_TYPE_USB2)
+ phy_instance_power_off(u3phy, instance);
+
return 0;
}
@@ -465,7 +567,10 @@ static int mt65xx_phy_exit(struct phy *phy)
struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
- phy_instance_exit(u3phy, instance);
+ if (instance->type == PHY_TYPE_USB2)
+ phy_instance_exit(u3phy, instance);
+
+ clk_disable_unprepare(instance->ref_clk);
clk_disable_unprepare(u3phy->u3phya_ref);
return 0;
}
@@ -478,7 +583,6 @@ static struct phy *mt65xx_phy_xlate(struct device *dev,
struct device_node *phy_np = args->np;
int index;
-
if (args->args_count != 1) {
dev_err(dev, "invalid number of cells in 'phy' property\n");
return ERR_PTR(-EINVAL);
@@ -496,13 +600,21 @@ static struct phy *mt65xx_phy_xlate(struct device *dev,
}
instance->type = args->args[0];
-
if (!(instance->type == PHY_TYPE_USB2 ||
instance->type == PHY_TYPE_USB3)) {
dev_err(dev, "unsupported device type: %d\n", instance->type);
return ERR_PTR(-EINVAL);
}
+ if (u3phy->pdata->version == MT_PHY_V1) {
+ phy_v1_banks_init(u3phy, instance);
+ } else if (u3phy->pdata->version == MT_PHY_V2) {
+ phy_v2_banks_init(u3phy, instance);
+ } else {
+ dev_err(dev, "phy version is not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
return instance->phy;
}
@@ -516,14 +628,22 @@ static const struct phy_ops mt65xx_u3phy_ops = {
static const struct mt65xx_phy_pdata mt2701_pdata = {
.avoid_rx_sen_degradation = false,
+ .version = MT_PHY_V1,
+};
+
+static const struct mt65xx_phy_pdata mt2712_pdata = {
+ .avoid_rx_sen_degradation = false,
+ .version = MT_PHY_V2,
};
static const struct mt65xx_phy_pdata mt8173_pdata = {
.avoid_rx_sen_degradation = true,
+ .version = MT_PHY_V1,
};
static const struct of_device_id mt65xx_u3phy_id_table[] = {
{ .compatible = "mediatek,mt2701-u3phy", .data = &mt2701_pdata },
+ { .compatible = "mediatek,mt2712-u3phy", .data = &mt2712_pdata },
{ .compatible = "mediatek,mt8173-u3phy", .data = &mt8173_pdata },
{ },
};
@@ -559,17 +679,23 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
u3phy->dev = dev;
platform_set_drvdata(pdev, u3phy);
- sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- u3phy->sif_base = devm_ioremap_resource(dev, sif_res);
- if (IS_ERR(u3phy->sif_base)) {
- dev_err(dev, "failed to remap sif regs\n");
- return PTR_ERR(u3phy->sif_base);
+ if (u3phy->pdata->version == MT_PHY_V1) {
+ /* get banks shared by multiple phys */
+ sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ u3phy->sif_base = devm_ioremap_resource(dev, sif_res);
+ if (IS_ERR(u3phy->sif_base)) {
+ dev_err(dev, "failed to remap sif regs\n");
+ return PTR_ERR(u3phy->sif_base);
+ }
}
+ /* it's deprecated, make it optional for backward compatibility */
u3phy->u3phya_ref = devm_clk_get(dev, "u3phya_ref");
if (IS_ERR(u3phy->u3phya_ref)) {
- dev_err(dev, "error to get u3phya_ref\n");
- return PTR_ERR(u3phy->u3phya_ref);
+ if (PTR_ERR(u3phy->u3phya_ref) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ u3phy->u3phya_ref = NULL;
}
port = 0;
@@ -610,6 +736,17 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
instance->index = port;
phy_set_drvdata(phy, instance);
port++;
+
+ /* if deprecated clock is provided, ignore instance's one */
+ if (u3phy->u3phya_ref)
+ continue;
+
+ instance->ref_clk = devm_clk_get(&phy->dev, "ref");
+ if (IS_ERR(instance->ref_clk)) {
+ dev_err(dev, "failed to get ref_clk(id-%d)\n", port);
+ retval = PTR_ERR(instance->ref_clk);
+ goto put_child;
+ }
}
provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/phy-qcom-qmp.c
new file mode 100644
index 000000000000..727e23be7cac
--- /dev/null
+++ b/drivers/phy/phy-qcom-qmp.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+/* QMP PHY QSERDES COM registers */
+#define QSERDES_COM_BG_TIMER 0x00c
+#define QSERDES_COM_SSC_EN_CENTER 0x010
+#define QSERDES_COM_SSC_ADJ_PER1 0x014
+#define QSERDES_COM_SSC_ADJ_PER2 0x018
+#define QSERDES_COM_SSC_PER1 0x01c
+#define QSERDES_COM_SSC_PER2 0x020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x028
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x034
+#define QSERDES_COM_CLK_ENABLE1 0x038
+#define QSERDES_COM_SYS_CLK_CTRL 0x03c
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x040
+#define QSERDES_COM_PLL_IVCO 0x048
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x04c
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x050
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x054
+#define QSERDES_COM_LOCK_CMP1_MODE1 0x058
+#define QSERDES_COM_LOCK_CMP2_MODE1 0x05c
+#define QSERDES_COM_LOCK_CMP3_MODE1 0x060
+#define QSERDES_COM_BG_TRIM 0x070
+#define QSERDES_COM_CLK_EP_DIV 0x074
+#define QSERDES_COM_CP_CTRL_MODE0 0x078
+#define QSERDES_COM_CP_CTRL_MODE1 0x07c
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x084
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x088
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x090
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x094
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0ac
+#define QSERDES_COM_RESETSM_CNTRL 0x0b4
+#define QSERDES_COM_RESTRIM_CTRL 0x0bc
+#define QSERDES_COM_RESCODE_DIV_NUM 0x0c4
+#define QSERDES_COM_LOCK_CMP_EN 0x0c8
+#define QSERDES_COM_LOCK_CMP_CFG 0x0cc
+#define QSERDES_COM_DEC_START_MODE0 0x0d0
+#define QSERDES_COM_DEC_START_MODE1 0x0d4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x0dc
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x0e0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x0e4
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x0e8
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x0ec
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x0f0
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10c
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x110
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x114
+#define QSERDES_COM_VCO_TUNE_CTRL 0x124
+#define QSERDES_COM_VCO_TUNE_MAP 0x128
+#define QSERDES_COM_VCO_TUNE1_MODE0 0x12c
+#define QSERDES_COM_VCO_TUNE2_MODE0 0x130
+#define QSERDES_COM_VCO_TUNE1_MODE1 0x134
+#define QSERDES_COM_VCO_TUNE2_MODE1 0x138
+#define QSERDES_COM_VCO_TUNE_TIMER1 0x144
+#define QSERDES_COM_VCO_TUNE_TIMER2 0x148
+#define QSERDES_COM_BG_CTRL 0x170
+#define QSERDES_COM_CLK_SELECT 0x174
+#define QSERDES_COM_HSCLK_SEL 0x178
+#define QSERDES_COM_CORECLK_DIV 0x184
+#define QSERDES_COM_CORE_CLK_EN 0x18c
+#define QSERDES_COM_C_READY_STATUS 0x190
+#define QSERDES_COM_CMN_CONFIG 0x194
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x19c
+#define QSERDES_COM_DEBUG_BUS0 0x1a0
+#define QSERDES_COM_DEBUG_BUS1 0x1a4
+#define QSERDES_COM_DEBUG_BUS2 0x1a8
+#define QSERDES_COM_DEBUG_BUS3 0x1ac
+#define QSERDES_COM_DEBUG_BUS_SEL 0x1b0
+#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
+
+/* QMP PHY TX registers */
+#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
+#define QSERDES_TX_DEBUG_BUS_SEL 0x064
+#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
+#define QSERDES_TX_LANE_MODE 0x094
+#define QSERDES_TX_RCV_DETECT_LVL_2 0x0ac
+
+/* QMP PHY RX registers */
+#define QSERDES_RX_UCDR_SO_GAIN_HALF 0x010
+#define QSERDES_RX_UCDR_SO_GAIN 0x01c
+#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x040
+#define QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x048
+#define QSERDES_RX_RX_TERM_BW 0x090
+#define QSERDES_RX_RX_EQ_GAIN1_LSB 0x0c4
+#define QSERDES_RX_RX_EQ_GAIN1_MSB 0x0c8
+#define QSERDES_RX_RX_EQ_GAIN2_LSB 0x0cc
+#define QSERDES_RX_RX_EQ_GAIN2_MSB 0x0d0
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0d8
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x0dc
+#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0e0
+#define QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x108
+#define QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x10c
+#define QSERDES_RX_SIGDET_ENABLES 0x110
+#define QSERDES_RX_SIGDET_CNTRL 0x114
+#define QSERDES_RX_SIGDET_LVL 0x118
+#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x11c
+#define QSERDES_RX_RX_BAND 0x120
+#define QSERDES_RX_RX_INTERFACE_MODE 0x12c
+
+/* QMP PHY PCS registers */
+#define QPHY_POWER_DOWN_CONTROL 0x04
+#define QPHY_TXDEEMPH_M6DB_V0 0x24
+#define QPHY_TXDEEMPH_M3P5DB_V0 0x28
+#define QPHY_ENDPOINT_REFCLK_DRIVE 0x54
+#define QPHY_RX_IDLE_DTCT_CNTRL 0x58
+#define QPHY_POWER_STATE_CONFIG1 0x60
+#define QPHY_POWER_STATE_CONFIG2 0x64
+#define QPHY_POWER_STATE_CONFIG4 0x6c
+#define QPHY_LOCK_DETECT_CONFIG1 0x80
+#define QPHY_LOCK_DETECT_CONFIG2 0x84
+#define QPHY_LOCK_DETECT_CONFIG3 0x88
+#define QPHY_PWRUP_RESET_DLY_TIME_AUXCLK 0xa0
+#define QPHY_LP_WAKEUP_DLY_TIME_AUXCLK 0xa4
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+#define REFCLK_DRV_DSBL BIT(1)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+#define PLL_READY_GATE_EN BIT(3)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+/* QPHY_COM_PCS_READY_STATUS bit */
+#define PCS_READY BIT(0)
+
+#define PHY_INIT_COMPLETE_TIMEOUT 1000
+#define POWER_DOWN_DELAY_US_MIN 10
+#define POWER_DOWN_DELAY_US_MAX 11
+
+#define MAX_PROP_NAME 32
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * register part of layout ?
+ * if yes, then offset gives index in the reg-layout
+ */
+ int in_layout;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+#define QMP_PHY_INIT_CFG_L(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .in_layout = 1, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* Common block control registers */
+ QPHY_COM_SW_RESET,
+ QPHY_COM_POWER_DOWN_CONTROL,
+ QPHY_COM_START_CONTROL,
+ QPHY_COM_PCS_READY_STATUS,
+ /* PCS registers */
+ QPHY_PLL_LOCK_CHK_DLY_TIME,
+ QPHY_FLL_CNTRL1,
+ QPHY_FLL_CNTRL2,
+ QPHY_FLL_CNT_VAL_L,
+ QPHY_FLL_CNT_VAL_H_TOL,
+ QPHY_FLL_MAN_CODE,
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
+};
+
+static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_COM_SW_RESET] = 0x400,
+ [QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
+ [QPHY_COM_START_CONTROL] = 0x408,
+ [QPHY_COM_PCS_READY_STATUS] = 0x448,
+ [QPHY_PLL_LOCK_CHK_DLY_TIME] = 0xa8,
+ [QPHY_FLL_CNTRL1] = 0xc4,
+ [QPHY_FLL_CNTRL2] = 0xc8,
+ [QPHY_FLL_CNT_VAL_L] = 0xcc,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xd0,
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x174,
+};
+
+static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_CNTRL1] = 0xc0,
+ [QPHY_FLL_CNTRL2] = 0xc4,
+ [QPHY_FLL_CNT_VAL_L] = 0xc8,
+ [QPHY_FLL_CNT_VAL_H_TOL] = 0xcc,
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+ [QPHY_PCS_READY_STATUS] = 0x17c,
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORECLK_DIV, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x33),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x19),
+};
+
+static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_RX_IDLE_DTCT_CNTRL, 0x4c),
+ QMP_PHY_INIT_CFG(QPHY_PWRUP_RESET_DLY_TIME_AUXCLK, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_LP_WAKEUP_DLY_TIME_AUXCLK, 0x01),
+
+ QMP_PHY_INIT_CFG_L(QPHY_PLL_LOCK_CHK_DLY_TIME, 0x05),
+
+ QMP_PHY_INIT_CFG(QPHY_ENDPOINT_REFCLK_DRIVE, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG1, 0xa3),
+ QMP_PHY_INIT_CFG(QPHY_TXDEEMPH_M3P5DB_V0, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SYS_CLK_CTRL, 0x04),
+ /* PLL and Loop filter settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+ QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_BG_TIMER, 0x0a),
+ /* SSC settings */
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+};
+
+static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL2, 0x03),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNTRL1, 0x02),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_CNT_VAL_H_TOL, 0x42),
+ QMP_PHY_INIT_CFG_L(QPHY_FLL_MAN_CODE, 0x85),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_POWER_STATE_CONFIG2, 0x08),
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ /* phy-type - PCIE/UFS/USB */
+ unsigned int type;
+ /* number of lanes provided by phy */
+ int nlanes;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+ unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+ bool has_phy_com_ctrl;
+ /* true, if PHY has a reset for individual lanes */
+ bool has_lane_rst;
+ /* true, if PHY needs delay after POWER_DOWN */
+ bool has_pwrdn_delay;
+ /* power_down delay in usec */
+ int pwrdn_delay_min;
+ int pwrdn_delay_max;
+};
+
+/**
+ * struct qmp_phy - per-lane phy descriptor
+ *
+ * @phy: generic phy
+ * @tx: iomapped memory space for lane's tx
+ * @rx: iomapped memory space for lane's rx
+ * @pcs: iomapped memory space for lane's pcs
+ * @pipe_clk: pipe lock
+ * @index: lane index
+ * @qmp: QMP phy to which this lane belongs
+ * @lane_rst: lane's reset controller
+ */
+struct qmp_phy {
+ struct phy *phy;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *pcs;
+ struct clk *pipe_clk;
+ unsigned int index;
+ struct qcom_qmp *qmp;
+ struct reset_control *lane_rst;
+};
+
+/**
+ * struct qcom_qmp - structure holding QMP phy block attributes
+ *
+ * @dev: device
+ * @serdes: iomapped memory space for phy's serdes
+ *
+ * @clks: array of clocks required by phy
+ * @resets: array of resets required by phy
+ * @vregs: regulator supplies bulk data
+ *
+ * @cfg: phy specific configuration
+ * @phys: array of per-lane phy descriptors
+ * @phy_mutex: mutex lock for PHY common block initialization
+ * @init_count: phy common block initialization count
+ */
+struct qcom_qmp {
+ struct device *dev;
+ void __iomem *serdes;
+
+ struct clk **clks;
+ struct reset_control **resets;
+ struct regulator_bulk_data *vregs;
+
+ const struct qmp_phy_cfg *cfg;
+ struct qmp_phy **phys;
+
+ struct mutex phy_mutex;
+ int init_count;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const msm8996_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref",
+};
+
+/* list of resets */
+static const char * const msm8996_pciephy_reset_l[] = {
+ "phy", "common", "cfg",
+};
+
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+/* list of regulators */
+static const char * const msm8996_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 3,
+
+ .serdes_tbl = msm8996_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
+ .tx_tbl = msm8996_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_pcie_tx_tbl),
+ .rx_tbl = msm8996_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_pcie_rx_tbl),
+ .pcs_tbl = msm8996_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_pcie_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = pciephy_regs_layout,
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+ .has_lane_rst = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = msm8996_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
+ .tx_tbl = msm8996_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(msm8996_usb3_tx_tbl),
+ .rx_tbl = msm8996_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(msm8996_usb3_rx_tbl),
+ .pcs_tbl = msm8996_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(msm8996_usb3_pcs_tbl),
+ .clk_list = msm8996_phy_clk_l,
+ .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = msm8996_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .regs = usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PHYSTATUS,
+};
+
+static void qcom_qmp_phy_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (t->in_layout)
+ writel(t->val, base + regs[t->offset]);
+ else
+ writel(t->val, base + t->offset);
+ }
+}
+
+static int qcom_qmp_phy_poweron(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ int num = qmp->cfg->num_vregs;
+ int ret;
+
+ dev_vdbg(&phy->dev, "Powering on QMP phy\n");
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(num, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed, err=%d\n", ret);
+ regulator_bulk_disable(num, qmp->vregs);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_poweroff(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+
+ clk_disable_unprepare(qphy->pipe_clk);
+
+ regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int ret, i;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (qmp->init_count++) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < cfg->num_resets; i++) {
+ ret = reset_control_deassert(qmp->resets[i]);
+ if (ret) {
+ dev_err(qmp->dev, "%s reset deassert failed\n",
+ qmp->cfg->reset_list[i]);
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+ goto err_rst;
+ }
+ }
+
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ /* Serdes configuration */
+ qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
+ cfg->serdes_tbl_num);
+
+ if (cfg->has_phy_com_ctrl) {
+ void __iomem *status;
+ unsigned int mask, val;
+
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+
+ status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS];
+ mask = cfg->mask_com_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, (val & mask), 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev,
+ "phy common block init timed-out\n");
+ goto err_com_init;
+ }
+ }
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+
+err_com_init:
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+err_rst:
+ mutex_unlock(&qmp->phy_mutex);
+ return ret;
+}
+
+static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ int i = cfg->num_resets;
+
+ mutex_lock(&qmp->phy_mutex);
+ if (--qmp->init_count) {
+ mutex_unlock(&qmp->phy_mutex);
+ return 0;
+ }
+
+ if (cfg->has_phy_com_ctrl) {
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
+ SERDES_START | PCS_START);
+ qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET],
+ SW_RESET);
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ }
+
+ while (--i >= 0)
+ reset_control_assert(qmp->resets[i]);
+
+ mutex_unlock(&qmp->phy_mutex);
+
+ return 0;
+}
+
+/* PHY Initialization */
+static int qcom_qmp_phy_init(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qphy->tx;
+ void __iomem *rx = qphy->rx;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret, i;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+
+ for (i = 0; i < qmp->cfg->num_clks; i++) {
+ ret = clk_prepare_enable(qmp->clks[i]);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable %s clk, err=%d\n",
+ qmp->cfg->clk_list[i], ret);
+ while (--i >= 0)
+ clk_disable_unprepare(qmp->clks[i]);
+ }
+ }
+
+ ret = qcom_qmp_phy_com_init(qmp);
+ if (ret)
+ goto err_com_init;
+
+ if (cfg->has_lane_rst) {
+ ret = reset_control_deassert(qphy->lane_rst);
+ if (ret) {
+ dev_err(qmp->dev, "lane%d reset deassert failed\n",
+ qphy->index);
+ goto err_lane_rst;
+ }
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
+ qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
+ qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ /*
+ * Pull out PHY from POWER DOWN state.
+ * This is active low enable signal to power-down PHY.
+ */
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_pwrdn_delay)
+ usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_pcs_ready;
+ }
+
+ return ret;
+
+err_pcs_ready:
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+err_lane_rst:
+ qcom_qmp_phy_com_exit(qmp);
+err_com_init:
+ while (--i >= 0)
+ clk_disable_unprepare(qmp->clks[i]);
+
+ return ret;
+}
+
+static int qcom_qmp_phy_exit(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int i = cfg->num_clks;
+
+ /* PHY reset */
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
+ if (cfg->has_lane_rst)
+ reset_control_assert(qphy->lane_rst);
+
+ qcom_qmp_phy_com_exit(qmp);
+
+ while (--i >= 0)
+ clk_disable_unprepare(qmp->clks[i]);
+
+ return 0;
+}
+
+static int qcom_qmp_phy_vreg_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int num = qmp->cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = qmp->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qcom_qmp_phy_reset_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int i;
+
+ qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_resets; i++) {
+ struct reset_control *rst;
+ const char *name = qmp->cfg->reset_list[i];
+
+ rst = devm_reset_control_get(dev, name);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get %s reset\n", name);
+ return PTR_ERR(rst);
+ }
+ qmp->resets[i] = rst;
+ }
+
+ return 0;
+}
+
+static int qcom_qmp_phy_clk_init(struct device *dev)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ int ret, i;
+
+ qmp->clks = devm_kcalloc(dev, qmp->cfg->num_clks,
+ sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < qmp->cfg->num_clks; i++) {
+ struct clk *_clk;
+ const char *name = qmp->cfg->clk_list[i];
+
+ _clk = devm_clk_get(dev, name);
+ if (IS_ERR(_clk)) {
+ ret = PTR_ERR(_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s clk, %d\n",
+ name, ret);
+ return ret;
+ }
+ qmp->clks[i] = _clk;
+ }
+
+ return 0;
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qcom_qmp *qmp, int id)
+{
+ char name[24];
+ struct clk_fixed_rate *fixed;
+ struct clk_init_data init = { };
+
+ switch (qmp->cfg->type) {
+ case PHY_TYPE_USB3:
+ snprintf(name, sizeof(name), "usb3_phy_pipe_clk_src");
+ break;
+ case PHY_TYPE_PCIE:
+ snprintf(name, sizeof(name), "pcie_%d_pipe_clk_src", id);
+ break;
+ default:
+ /* not all phys register pipe clocks, so return success */
+ return 0;
+ }
+
+ fixed = devm_kzalloc(qmp->dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -ENOMEM;
+
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ return devm_clk_hw_register(qmp->dev, &fixed->hw);
+}
+
+static const struct phy_ops qcom_qmp_phy_gen_ops = {
+ .init = qcom_qmp_phy_init,
+ .exit = qcom_qmp_phy_exit,
+ .power_on = qcom_qmp_phy_poweron,
+ .power_off = qcom_qmp_phy_poweroff,
+ .owner = THIS_MODULE,
+};
+
+static
+int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
+{
+ struct qcom_qmp *qmp = dev_get_drvdata(dev);
+ struct phy *generic_phy;
+ struct qmp_phy *qphy;
+ char prop_name[MAX_PROP_NAME];
+ int ret;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ /*
+ * Get memory resources for each phy lane:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ */
+ qphy->tx = of_iomap(np, 0);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
+
+ qphy->rx = of_iomap(np, 1);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
+
+ qphy->pcs = of_iomap(np, 2);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
+
+ /*
+ * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+ * based phys, so they essentially have pipe clock. So,
+ * we return error in case phy is USB3 or PIPE type.
+ * Otherwise, we initialize pipe clock to NULL for
+ * all phys that don't need this.
+ */
+ snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
+ qphy->pipe_clk = of_clk_get_by_name(np, prop_name);
+ if (IS_ERR(qphy->pipe_clk)) {
+ if (qmp->cfg->type == PHY_TYPE_PCIE ||
+ qmp->cfg->type == PHY_TYPE_USB3) {
+ ret = PTR_ERR(qphy->pipe_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "failed to get lane%d pipe_clk, %d\n",
+ id, ret);
+ return ret;
+ }
+ qphy->pipe_clk = NULL;
+ }
+
+ /* Get lane reset, if any */
+ if (qmp->cfg->has_lane_rst) {
+ snprintf(prop_name, sizeof(prop_name), "lane%d", id);
+ qphy->lane_rst = of_reset_control_get(np, prop_name);
+ if (IS_ERR(qphy->lane_rst)) {
+ dev_err(dev, "failed to get lane%d reset\n", id);
+ return PTR_ERR(qphy->lane_rst);
+ }
+ }
+
+ generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create qphy %d\n", ret);
+ return ret;
+ }
+
+ qphy->phy = generic_phy;
+ qphy->index = id;
+ qphy->qmp = qmp;
+ qmp->phys[id] = qphy;
+ phy_set_drvdata(generic_phy, qphy);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qmp-pcie-phy",
+ .data = &msm8996_pciephy_cfg,
+ }, {
+ .compatible = "qcom,msm8996-qmp-usb3-phy",
+ .data = &msm8996_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table);
+
+static int qcom_qmp_phy_probe(struct platform_device *pdev)
+{
+ struct qcom_qmp *qmp;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct device_node *child;
+ struct phy_provider *phy_provider;
+ void __iomem *base;
+ int num, id;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+ dev_set_drvdata(dev, qmp);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* per PHY serdes; usually located at base address */
+ qmp->serdes = base;
+
+ mutex_init(&qmp->phy_mutex);
+
+ /* Get the specific init parameters of QMP phy */
+ qmp->cfg = of_device_get_match_data(dev);
+
+ ret = qcom_qmp_phy_clk_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_reset_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qcom_qmp_phy_vreg_init(dev);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ num = of_get_available_child_count(dev->of_node);
+ /* do we have a rogue child node ? */
+ if (num > qmp->cfg->nlanes)
+ return -EINVAL;
+
+ qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL);
+ if (!qmp->phys)
+ return -ENOMEM;
+
+ id = 0;
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* Create per-lane phy */
+ ret = qcom_qmp_phy_create(dev, child, id);
+ if (ret) {
+ dev_err(dev, "failed to create lane%d phy, %d\n",
+ id, ret);
+ return ret;
+ }
+
+ /*
+ * Register the pipe clock provided by phy.
+ * See function description to see details of this pipe clock.
+ */
+ ret = phy_pipe_clk_register(qmp, id);
+ if (ret) {
+ dev_err(qmp->dev,
+ "failed to register pipe clock source\n");
+ return ret;
+ }
+ id++;
+ }
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QMP phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_qmp_phy_driver = {
+ .probe = qcom_qmp_phy_probe,
+ .driver = {
+ .name = "qcom-qmp-phy",
+ .of_match_table = qcom_qmp_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_qmp_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-qusb2.c b/drivers/phy/phy-qcom-qusb2.c
new file mode 100644
index 000000000000..6c575244c0fb
--- /dev/null
+++ b/drivers/phy/phy-qcom-qusb2.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define QUSB2PHY_PLL_TEST 0x04
+#define CLK_REF_SEL BIT(7)
+
+#define QUSB2PHY_PLL_TUNE 0x08
+#define QUSB2PHY_PLL_USER_CTL1 0x0c
+#define QUSB2PHY_PLL_USER_CTL2 0x10
+#define QUSB2PHY_PLL_AUTOPGM_CTL1 0x1c
+#define QUSB2PHY_PLL_PWR_CTRL 0x18
+
+#define QUSB2PHY_PLL_STATUS 0x38
+#define PLL_LOCKED BIT(5)
+
+#define QUSB2PHY_PORT_TUNE1 0x80
+#define QUSB2PHY_PORT_TUNE2 0x84
+#define QUSB2PHY_PORT_TUNE3 0x88
+#define QUSB2PHY_PORT_TUNE4 0x8c
+#define QUSB2PHY_PORT_TUNE5 0x90
+#define QUSB2PHY_PORT_TEST2 0x9c
+
+#define QUSB2PHY_PORT_POWERDOWN 0xb4
+#define CLAMP_N_EN BIT(5)
+#define FREEZIO_N BIT(1)
+#define POWER_DOWN BIT(0)
+
+#define QUSB2PHY_REFCLK_ENABLE BIT(0)
+
+#define PHY_CLK_SCHEME_SEL BIT(0)
+
+struct qusb2_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+};
+
+#define QUSB2_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+static const struct qusb2_phy_init_tbl msm8996_init_tbl[] = {
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PORT_TUNE1, 0xf8),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PORT_TUNE2, 0xb3),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PORT_TUNE3, 0x83),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PORT_TUNE4, 0xc0),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_TUNE, 0x30),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL1, 0x79),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_USER_CTL2, 0x21),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PORT_TEST2, 0x14),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_AUTOPGM_CTL1, 0x9f),
+ QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00),
+};
+
+struct qusb2_phy_cfg {
+ const struct qusb2_phy_init_tbl *tbl;
+ /* number of entries in the table */
+ unsigned int tbl_num;
+ /* offset to PHY_CLK_SCHEME register in TCSR map */
+ unsigned int clk_scheme_offset;
+};
+
+static const struct qusb2_phy_cfg msm8996_phy_cfg = {
+ .tbl = msm8996_init_tbl,
+ .tbl_num = ARRAY_SIZE(msm8996_init_tbl),
+};
+
+static const char * const qusb2_phy_vreg_names[] = {
+ "vdda-pll", "vdda-phy-dpdm",
+};
+
+#define QUSB2_NUM_VREGS ARRAY_SIZE(qusb2_phy_vreg_names)
+
+/**
+ * struct qusb2_phy - structure holding qusb2 phy attributes
+ *
+ * @phy: generic phy
+ * @base: iomapped memory space for qubs2 phy
+ *
+ * @cfg_ahb_clk: AHB2PHY interface clock
+ * @ref_clk: phy reference clock
+ * @iface_clk: phy interface clock
+ * @phy_reset: phy reset control
+ * @vregs: regulator supplies bulk data
+ *
+ * @tcsr: TCSR syscon register map
+ * @cell: nvmem cell containing phy tuning value
+ *
+ * @cfg: phy config data
+ * @has_se_clk_scheme: indicate if PHY has single-ended ref clock scheme
+ */
+struct qusb2_phy {
+ struct phy *phy;
+ void __iomem *base;
+
+ struct clk *cfg_ahb_clk;
+ struct clk *ref_clk;
+ struct clk *iface_clk;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data vregs[QUSB2_NUM_VREGS];
+
+ struct regmap *tcsr;
+ struct nvmem_cell *cell;
+
+ const struct qusb2_phy_cfg *cfg;
+ bool has_se_clk_scheme;
+};
+
+static inline void qusb2_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl(base + offset);
+}
+
+static inline void qusb2_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl(base + offset);
+}
+
+static inline
+void qcom_qusb2_phy_configure(void __iomem *base,
+ const struct qusb2_phy_init_tbl tbl[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ writel(tbl[i].val, base + tbl[i].offset);
+}
+
+/*
+ * Fetches HS Tx tuning value from nvmem and sets the
+ * QUSB2PHY_PORT_TUNE2 register.
+ * For error case, skip setting the value and use the default value.
+ */
+static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
+{
+ struct device *dev = &qphy->phy->dev;
+ u8 *val;
+
+ /*
+ * Read efuse register having TUNE2 parameter's high nibble.
+ * If efuse register shows value as 0x0, or if we fail to find
+ * a valid efuse register settings, then use default value
+ * as 0xB for high nibble that we have already set while
+ * configuring phy.
+ */
+ val = nvmem_cell_read(qphy->cell, NULL);
+ if (IS_ERR(val) || !val[0]) {
+ dev_dbg(dev, "failed to read a valid hs-tx trim value\n");
+ return;
+ }
+
+ /* Fused TUNE2 value is the higher nibble only */
+ qusb2_setbits(qphy->base, QUSB2PHY_PORT_TUNE2, val[0] << 0x4);
+}
+
+static int qusb2_phy_poweron(struct phy *phy)
+{
+ struct qusb2_phy *qphy = phy_get_drvdata(phy);
+ int num = ARRAY_SIZE(qphy->vregs);
+ int ret;
+
+ dev_vdbg(&phy->dev, "%s(): Powering-on QUSB2 phy\n", __func__);
+
+ /* turn on regulator supplies */
+ ret = regulator_bulk_enable(num, qphy->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qphy->iface_clk);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable iface_clk, %d\n", ret);
+ regulator_bulk_disable(num, qphy->vregs);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qusb2_phy_poweroff(struct phy *phy)
+{
+ struct qusb2_phy *qphy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(qphy->iface_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(qphy->vregs), qphy->vregs);
+
+ return 0;
+}
+
+static int qusb2_phy_init(struct phy *phy)
+{
+ struct qusb2_phy *qphy = phy_get_drvdata(phy);
+ unsigned int val;
+ unsigned int clk_scheme;
+ int ret;
+
+ dev_vdbg(&phy->dev, "%s(): Initializing QUSB2 phy\n", __func__);
+
+ /* enable ahb interface clock to program phy */
+ ret = clk_prepare_enable(qphy->cfg_ahb_clk);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ return ret;
+ }
+
+ /* Perform phy reset */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ /* 100 us delay to keep PHY in reset mode */
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ /* Disable the PHY */
+ qusb2_setbits(qphy->base, QUSB2PHY_PORT_POWERDOWN,
+ CLAMP_N_EN | FREEZIO_N | POWER_DOWN);
+
+ /* save reset value to override reference clock scheme later */
+ val = readl(qphy->base + QUSB2PHY_PLL_TEST);
+
+ qcom_qusb2_phy_configure(qphy->base, qphy->cfg->tbl,
+ qphy->cfg->tbl_num);
+
+ /* Set efuse value for tuning the PHY */
+ qusb2_phy_set_tune2_param(qphy);
+
+ /* Enable the PHY */
+ qusb2_clrbits(qphy->base, QUSB2PHY_PORT_POWERDOWN, POWER_DOWN);
+
+ /* Required to get phy pll lock successfully */
+ usleep_range(150, 160);
+
+ /* Default is single-ended clock on msm8996 */
+ qphy->has_se_clk_scheme = true;
+ /*
+ * read TCSR_PHY_CLK_SCHEME register to check if single-ended
+ * clock scheme is selected. If yes, then disable differential
+ * ref_clk and use single-ended clock, otherwise use differential
+ * ref_clk only.
+ */
+ if (qphy->tcsr) {
+ ret = regmap_read(qphy->tcsr, qphy->cfg->clk_scheme_offset,
+ &clk_scheme);
+ if (ret) {
+ dev_err(&phy->dev, "failed to read clk scheme reg\n");
+ goto assert_phy_reset;
+ }
+
+ /* is it a differential clock scheme ? */
+ if (!(clk_scheme & PHY_CLK_SCHEME_SEL)) {
+ dev_vdbg(&phy->dev, "%s(): select differential clk\n",
+ __func__);
+ qphy->has_se_clk_scheme = false;
+ } else {
+ dev_vdbg(&phy->dev, "%s(): select single-ended clk\n",
+ __func__);
+ }
+ }
+
+ if (!qphy->has_se_clk_scheme) {
+ val &= ~CLK_REF_SEL;
+ ret = clk_prepare_enable(qphy->ref_clk);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable ref clk, %d\n",
+ ret);
+ goto assert_phy_reset;
+ }
+ } else {
+ val |= CLK_REF_SEL;
+ }
+
+ writel(val, qphy->base + QUSB2PHY_PLL_TEST);
+
+ /* ensure above write is through */
+ readl(qphy->base + QUSB2PHY_PLL_TEST);
+
+ /* Required to get phy pll lock successfully */
+ usleep_range(100, 110);
+
+ val = readb(qphy->base + QUSB2PHY_PLL_STATUS);
+ if (!(val & PLL_LOCKED)) {
+ dev_err(&phy->dev,
+ "QUSB2PHY pll lock failed: status reg = %x\n", val);
+ ret = -EBUSY;
+ goto disable_ref_clk;
+ }
+
+ return 0;
+
+disable_ref_clk:
+ if (!qphy->has_se_clk_scheme)
+ clk_disable_unprepare(qphy->ref_clk);
+assert_phy_reset:
+ reset_control_assert(qphy->phy_reset);
+disable_ahb_clk:
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ return ret;
+}
+
+static int qusb2_phy_exit(struct phy *phy)
+{
+ struct qusb2_phy *qphy = phy_get_drvdata(phy);
+
+ /* Disable the PHY */
+ qusb2_setbits(qphy->base, QUSB2PHY_PORT_POWERDOWN,
+ CLAMP_N_EN | FREEZIO_N | POWER_DOWN);
+
+ if (!qphy->has_se_clk_scheme)
+ clk_disable_unprepare(qphy->ref_clk);
+
+ reset_control_assert(qphy->phy_reset);
+
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+
+ return 0;
+}
+
+static const struct phy_ops qusb2_phy_gen_ops = {
+ .init = qusb2_phy_init,
+ .exit = qusb2_phy_exit,
+ .power_on = qusb2_phy_poweron,
+ .power_off = qusb2_phy_poweroff,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id qusb2_phy_of_match_table[] = {
+ {
+ .compatible = "qcom,msm8996-qusb2-phy",
+ .data = &msm8996_phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qusb2_phy_of_match_table);
+
+static int qusb2_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qusb2_phy *qphy;
+ struct phy_provider *phy_provider;
+ struct phy *generic_phy;
+ struct resource *res;
+ int ret, i;
+ int num;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ qphy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb");
+ if (IS_ERR(qphy->cfg_ahb_clk)) {
+ ret = PTR_ERR(qphy->cfg_ahb_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get cfg ahb clk, %d\n", ret);
+ return ret;
+ }
+
+ qphy->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(qphy->ref_clk)) {
+ ret = PTR_ERR(qphy->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get ref clk, %d\n", ret);
+ return ret;
+ }
+
+ qphy->iface_clk = devm_clk_get(dev, "iface");
+ if (IS_ERR(qphy->iface_clk)) {
+ ret = PTR_ERR(qphy->iface_clk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ qphy->iface_clk = NULL;
+ dev_dbg(dev, "failed to get iface clk, %d\n", ret);
+ }
+
+ qphy->phy_reset = devm_reset_control_get_by_index(&pdev->dev, 0);
+ if (IS_ERR(qphy->phy_reset)) {
+ dev_err(dev, "failed to get phy core reset\n");
+ return PTR_ERR(qphy->phy_reset);
+ }
+
+ num = ARRAY_SIZE(qphy->vregs);
+ for (i = 0; i < num; i++)
+ qphy->vregs[i].supply = qusb2_phy_vreg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
+ if (ret) {
+ dev_err(dev, "failed to get regulator supplies\n");
+ return ret;
+ }
+
+ /* Get the specific init parameters of QMP phy */
+ qphy->cfg = of_device_get_match_data(dev);
+
+ qphy->tcsr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,tcsr-syscon");
+ if (IS_ERR(qphy->tcsr)) {
+ dev_dbg(dev, "failed to lookup TCSR regmap\n");
+ qphy->tcsr = NULL;
+ }
+
+ qphy->cell = devm_nvmem_cell_get(dev, NULL);
+ if (IS_ERR(qphy->cell)) {
+ if (PTR_ERR(qphy->cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ qphy->cell = NULL;
+ dev_dbg(dev, "failed to lookup tune2 hstx trim value\n");
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, &qusb2_phy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create phy, %d\n", ret);
+ return ret;
+ }
+ qphy->phy = generic_phy;
+
+ dev_set_drvdata(dev, qphy);
+ phy_set_drvdata(generic_phy, qphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered Qcom-QUSB2 phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qusb2_phy_driver = {
+ .probe = qusb2_phy_probe,
+ .driver = {
+ .name = "qcom-qusb2-phy",
+ .of_match_table = qusb2_phy_of_match_table,
+ },
+};
+
+module_platform_driver(qusb2_phy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index afb4d048d3e9..54c34298a000 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -20,6 +20,7 @@
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
@@ -395,7 +396,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
struct resource *res;
- int irq;
+ int irq, ret = 0;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -434,17 +435,24 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
}
}
- /* devm_phy_create() will call pm_runtime_enable(dev); */
+ /*
+ * devm_phy_create() will call pm_runtime_enable(&phy->dev);
+ * And then, phy-core will manage runtime pm for this device.
+ */
+ pm_runtime_enable(dev);
channel->phy = devm_phy_create(dev, NULL, &rcar_gen3_phy_usb2_ops);
if (IS_ERR(channel->phy)) {
dev_err(dev, "Failed to create USB2 PHY\n");
- return PTR_ERR(channel->phy);
+ ret = PTR_ERR(channel->phy);
+ goto error;
}
channel->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(channel->vbus)) {
- if (PTR_ERR(channel->vbus) == -EPROBE_DEFER)
- return PTR_ERR(channel->vbus);
+ if (PTR_ERR(channel->vbus) == -EPROBE_DEFER) {
+ ret = PTR_ERR(channel->vbus);
+ goto error;
+ }
channel->vbus = NULL;
}
@@ -454,15 +462,22 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(provider)) {
dev_err(dev, "Failed to register PHY provider\n");
+ ret = PTR_ERR(provider);
+ goto error;
} else if (channel->has_otg) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
if (ret < 0)
- return ret;
+ goto error;
}
- return PTR_ERR_OR_ZERO(provider);
+ return 0;
+
+error:
+ pm_runtime_disable(dev);
+
+ return ret;
}
static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
@@ -472,6 +487,8 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
if (channel->has_otg)
device_remove_file(&pdev->dev, &dev_attr_role);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
};
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c
index 4ea95c28a66f..8efe78a49916 100644
--- a/drivers/phy/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/phy-rockchip-inno-usb2.c
@@ -554,8 +554,7 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
case USB_CHG_STATE_DETECTED:
switch (rphy->chg_type) {
case POWER_SUPPLY_TYPE_USB:
- dev_dbg(&rport->phy->dev,
- "sdp cable is connecetd\n");
+ dev_dbg(&rport->phy->dev, "sdp cable is connected\n");
rockchip_usb2phy_power_on(rport->phy);
rport->state = OTG_STATE_B_PERIPHERAL;
notify_charger = true;
@@ -563,16 +562,14 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
cable = EXTCON_CHG_USB_SDP;
break;
case POWER_SUPPLY_TYPE_USB_DCP:
- dev_dbg(&rport->phy->dev,
- "dcp cable is connecetd\n");
+ dev_dbg(&rport->phy->dev, "dcp cable is connected\n");
rockchip_usb2phy_power_off(rport->phy);
notify_charger = true;
sch_work = true;
cable = EXTCON_CHG_USB_DCP;
break;
case POWER_SUPPLY_TYPE_USB_CDP:
- dev_dbg(&rport->phy->dev,
- "cdp cable is connecetd\n");
+ dev_dbg(&rport->phy->dev, "cdp cable is connected\n");
rockchip_usb2phy_power_on(rport->phy);
rport->state = OTG_STATE_B_PERIPHERAL;
notify_charger = true;
@@ -1141,6 +1138,49 @@ disable_clks:
return ret;
}
+static const struct rockchip_usb2phy_cfg rk3328_phy_cfgs[] = {
+ {
+ .reg = 0x100,
+ .num_ports = 2,
+ .clkout_ctl = { 0x108, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0100, 15, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x0110, 2, 2, 0, 1 },
+ .bvalid_det_st = { 0x0114, 2, 2, 0, 1 },
+ .bvalid_det_clr = { 0x0118, 2, 2, 0, 1 },
+ .ls_det_en = { 0x0110, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0114, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0118, 0, 0, 0, 1 },
+ .utmi_avalid = { 0x0120, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0x0120, 9, 9, 0, 1 },
+ .utmi_ls = { 0x0120, 5, 4, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x104, 15, 0, 0, 0x1d1 },
+ .ls_det_en = { 0x110, 1, 1, 0, 1 },
+ .ls_det_st = { 0x114, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x118, 1, 1, 0, 1 },
+ .utmi_ls = { 0x120, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x120, 19, 19, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .opmode = { 0x0100, 3, 0, 5, 1 },
+ .cp_det = { 0x0120, 24, 24, 0, 1 },
+ .dcp_det = { 0x0120, 23, 23, 0, 1 },
+ .dp_det = { 0x0120, 25, 25, 0, 1 },
+ .idm_sink_en = { 0x0108, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0108, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0108, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0108, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0108, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0108, 11, 11, 0, 1 },
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
{
.reg = 0x700,
@@ -1223,6 +1263,7 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
};
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
{}
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 734987fa0ad7..3378eeb7a562 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -66,6 +66,7 @@ struct rockchip_usb_phy {
struct phy *phy;
bool uart_enabled;
struct reset_control *reset;
+ struct regulator *vbus;
};
static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
@@ -88,6 +89,9 @@ static void rockchip_usb_phy480m_disable(struct clk_hw *hw)
struct rockchip_usb_phy,
clk480m_hw);
+ if (phy->vbus)
+ regulator_disable(phy->vbus);
+
/* Power down usb phy analog blocks by set siddq 1 */
rockchip_usb_phy_power(phy, 1);
}
@@ -143,6 +147,14 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
if (phy->uart_enabled)
return -EBUSY;
+ if (phy->vbus) {
+ int ret;
+
+ ret = regulator_enable(phy->vbus);
+ if (ret)
+ return ret;
+ }
+
return clk_prepare_enable(phy->clk480m);
}
@@ -268,6 +280,13 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
}
phy_set_drvdata(rk_phy->phy, rk_phy);
+ rk_phy->vbus = devm_regulator_get_optional(&rk_phy->phy->dev, "vbus");
+ if (IS_ERR(rk_phy->vbus)) {
+ if (PTR_ERR(rk_phy->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(rk_phy->vbus);
+ rk_phy->vbus = NULL;
+ }
+
/*
* When acting as uart-pipe, just keep clock on otherwise
* only power up usb phy when it use, so disable it when init
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index a21b5f24a340..bbf06cfe5898 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -49,12 +49,14 @@
#define REG_PHYBIST 0x08
#define REG_PHYTUNE 0x0c
#define REG_PHYCTL_A33 0x10
-#define REG_PHY_UNK_H3 0x20
+#define REG_PHY_OTGCTL 0x20
#define REG_PMU_UNK1 0x10
#define PHYCTL_DATA BIT(7)
+#define OTGCTL_ROUTE_MUSB BIT(0)
+
#define SUNXI_AHB_ICHR8_EN BIT(10)
#define SUNXI_AHB_INCR4_BURST_EN BIT(9)
#define SUNXI_AHB_INCRX_ALIGN_EN BIT(8)
@@ -110,6 +112,7 @@ struct sun4i_usb_phy_cfg {
u8 phyctl_offset;
bool dedicated_clocks;
bool enable_pmu_unk1;
+ bool phy0_dual_route;
};
struct sun4i_usb_phy_data {
@@ -188,10 +191,8 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
spin_lock_irqsave(&phy_data->reg_lock, flags);
- if (phy_data->cfg->type == sun8i_a33_phy ||
- phy_data->cfg->type == sun50i_a64_phy ||
- phy_data->cfg->type == sun8i_v3s_phy) {
- /* A33 or A64 needs us to set phyctl to 0 explicitly */
+ if (phy_data->cfg->phyctl_offset == REG_PHYCTL_A33) {
+ /* SoCs newer than A33 need us to set phyctl to 0 explicitly */
writel(0, phyctl);
}
@@ -271,23 +272,16 @@ static int sun4i_usb_phy_init(struct phy *_phy)
writel(val & ~2, phy->pmu + REG_PMU_UNK1);
}
- if (data->cfg->type == sun8i_h3_phy) {
- if (phy->index == 0) {
- val = readl(data->base + REG_PHY_UNK_H3);
- writel(val & ~1, data->base + REG_PHY_UNK_H3);
- }
- } else {
- /* Enable USB 45 Ohm resistor calibration */
- if (phy->index == 0)
- sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1);
+ /* Enable USB 45 Ohm resistor calibration */
+ if (phy->index == 0)
+ sun4i_usb_phy_write(phy, PHY_RES45_CAL_EN, 0x01, 1);
- /* Adjust PHY's magnitude and rate */
- sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
+ /* Adjust PHY's magnitude and rate */
+ sun4i_usb_phy_write(phy, PHY_TX_AMPLITUDE_TUNE, 0x14, 5);
- /* Disconnect threshold adjustment */
- sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL,
- data->cfg->disc_thresh, 2);
- }
+ /* Disconnect threshold adjustment */
+ sun4i_usb_phy_write(phy, PHY_DISCON_TH_SEL,
+ data->cfg->disc_thresh, 2);
sun4i_usb_phy_passby(phy, 1);
@@ -486,6 +480,21 @@ static const struct phy_ops sun4i_usb_phy_ops = {
.owner = THIS_MODULE,
};
+static void sun4i_usb_phy0_reroute(struct sun4i_usb_phy_data *data, int id_det)
+{
+ u32 regval;
+
+ regval = readl(data->base + REG_PHY_OTGCTL);
+ if (id_det == 0) {
+ /* Host mode. Route phy0 to EHCI/OHCI */
+ regval &= ~OTGCTL_ROUTE_MUSB;
+ } else {
+ /* Peripheral mode. Route phy0 to MUSB */
+ regval |= OTGCTL_ROUTE_MUSB;
+ }
+ writel(regval, data->base + REG_PHY_OTGCTL);
+}
+
static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
{
struct sun4i_usb_phy_data *data =
@@ -546,6 +555,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
mutex_unlock(&phy0->mutex);
}
+
+ /* Re-route PHY0 if necessary */
+ if (data->cfg->phy0_dual_route)
+ sun4i_usb_phy0_reroute(data, id_det);
}
if (vbus_notify)
@@ -700,7 +713,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(phy->reset);
}
- if (i) { /* No pmu for usbc0 */
+ if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */
snprintf(name, sizeof(name), "pmu%d", i);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, name);
@@ -823,8 +836,10 @@ static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
.num_phys = 4,
.type = sun8i_h3_phy,
.disc_thresh = 3,
+ .phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.enable_pmu_unk1 = true,
+ .phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
@@ -843,6 +858,7 @@ static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.enable_pmu_unk1 = true,
+ .phy0_dual_route = true,
};
static const struct of_device_id sun4i_usb_phy_of_match[] = {
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 6f09da4dadb8..6aa120cd0574 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -391,7 +391,6 @@ static int ec_device_probe(struct platform_device *pdev)
int retval = -ENOMEM;
struct device *dev = &pdev->dev;
struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
- dev_t devno = MKDEV(ec_major, pdev->id);
struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
@@ -407,23 +406,11 @@ static int ec_device_probe(struct platform_device *pdev)
cdev_init(&ec->cdev, &fops);
/*
- * Add the character device
- * Link cdev to the class device to be sure device is not used
- * before unbinding it.
- */
- ec->cdev.kobj.parent = &ec->class_dev.kobj;
- retval = cdev_add(&ec->cdev, devno, 1);
- if (retval) {
- dev_err(dev, ": failed to add character device\n");
- goto cdev_add_failed;
- }
-
- /*
* Add the class device
* Link to the character device for creating the /dev entry
* in devtmpfs.
*/
- ec->class_dev.devt = ec->cdev.dev;
+ ec->class_dev.devt = MKDEV(ec_major, pdev->id);
ec->class_dev.class = &cros_class;
ec->class_dev.parent = dev;
ec->class_dev.release = __remove;
@@ -431,13 +418,13 @@ static int ec_device_probe(struct platform_device *pdev)
retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
if (retval) {
dev_err(dev, "dev_set_name failed => %d\n", retval);
- goto set_named_failed;
+ goto failed;
}
- retval = device_add(&ec->class_dev);
+ retval = cdev_device_add(&ec->cdev, &ec->class_dev);
if (retval) {
- dev_err(dev, "device_register failed => %d\n", retval);
- goto dev_reg_failed;
+ dev_err(dev, "cdev_device_add failed => %d\n", retval);
+ goto failed;
}
/* check whether this EC is a sensor hub. */
@@ -446,12 +433,8 @@ static int ec_device_probe(struct platform_device *pdev)
return 0;
-dev_reg_failed:
-set_named_failed:
- dev_set_drvdata(dev, NULL);
- cdev_del(&ec->cdev);
-cdev_add_failed:
- kfree(ec);
+failed:
+ put_device(&ec->class_dev);
return retval;
}
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2b21033f11f0..2de1e603bd2b 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -1,8 +1,8 @@
/*
- * Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
* Copyright (C) 2014 Linaro Limited
+ * Copyright (C) 2011-2016 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -46,6 +46,7 @@
* exchange is properly mapped during a transfer.
*/
+
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -63,122 +64,232 @@
#include <linux/acpi.h>
/*
+ * Update this when something changes in the driver's behavior so the host
+ * can benefit from knowing it
+ */
+enum {
+ PIPE_DRIVER_VERSION = 2,
+ PIPE_CURRENT_DEVICE_VERSION = 2
+};
+
+/*
* IMPORTANT: The following constants must match the ones used and defined
* in external/qemu/hw/goldfish_pipe.c in the Android source tree.
*/
-/* pipe device registers */
-#define PIPE_REG_COMMAND 0x00 /* write: value = command */
-#define PIPE_REG_STATUS 0x04 /* read */
-#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
-#define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
-#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
-#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
-#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
-#define PIPE_REG_WAKES 0x14 /* read: wake flags */
-#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
-#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
-#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
-#define PIPE_REG_VERSION 0x24 /* read: device version */
-
-/* list of commands for PIPE_REG_COMMAND */
-#define CMD_OPEN 1 /* open new channel */
-#define CMD_CLOSE 2 /* close channel (from guest) */
-#define CMD_POLL 3 /* poll read/write status */
-
/* List of bitflags returned in status of CMD_POLL command */
-#define PIPE_POLL_IN (1 << 0)
-#define PIPE_POLL_OUT (1 << 1)
-#define PIPE_POLL_HUP (1 << 2)
-
-/* The following commands are related to write operations */
-#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
-#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
- is possible */
-#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
-#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
- * is possible */
+enum PipePollFlags {
+ PIPE_POLL_IN = 1 << 0,
+ PIPE_POLL_OUT = 1 << 1,
+ PIPE_POLL_HUP = 1 << 2
+};
/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
-#define PIPE_ERROR_INVAL -1
-#define PIPE_ERROR_AGAIN -2
-#define PIPE_ERROR_NOMEM -3
-#define PIPE_ERROR_IO -4
+enum PipeErrors {
+ PIPE_ERROR_INVAL = -1,
+ PIPE_ERROR_AGAIN = -2,
+ PIPE_ERROR_NOMEM = -3,
+ PIPE_ERROR_IO = -4
+};
/* Bit-flags used to signal events from the emulator */
-#define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
-#define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
-#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
-
-struct access_params {
- unsigned long channel;
- u32 size;
- unsigned long address;
- u32 cmd;
- u32 result;
- /* reserved for future extension */
+enum PipeWakeFlags {
+ PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
+ PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
+ PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+enum PipeRegs {
+ PIPE_REG_CMD = 0,
+
+ PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
+ PIPE_REG_SIGNAL_BUFFER = 8,
+ PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
+
+ PIPE_REG_OPEN_BUFFER_HIGH = 20,
+ PIPE_REG_OPEN_BUFFER = 24,
+
+ PIPE_REG_VERSION = 36,
+
+ PIPE_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+ PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */
+ PIPE_CMD_CLOSE,
+ PIPE_CMD_POLL,
+ PIPE_CMD_WRITE,
+ PIPE_CMD_WAKE_ON_WRITE,
+ PIPE_CMD_READ,
+ PIPE_CMD_WAKE_ON_READ,
+
+ /*
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
+ */
+ PIPE_CMD_WAKE_ON_DONE_IO,
+};
+
+enum {
+ MAX_BUFFERS_PER_COMMAND = 336,
+ MAX_SIGNALLED_PIPES = 64,
+ INITIAL_PIPES_CAPACITY = 64
+};
+
+struct goldfish_pipe_dev;
+struct goldfish_pipe;
+struct goldfish_pipe_command;
+
+/* A per-pipe command structure, shared with the host */
+struct goldfish_pipe_command {
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
+ s32 reserved; /* to pad to 64-bit boundary */
+ union {
+ /* Parameters for PIPE_CMD_{READ,WRITE} */
+ struct {
+ /* number of buffers, guest -> host */
+ u32 buffers_count;
+ /* number of consumed bytes, host -> guest */
+ s32 consumed_size;
+ /* buffer pointers, guest -> host */
+ u64 ptrs[MAX_BUFFERS_PER_COMMAND];
+ /* buffer sizes, guest -> host */
+ u32 sizes[MAX_BUFFERS_PER_COMMAND];
+ } rw_params;
+ };
+};
+
+/* A single signalled pipe information */
+struct signalled_pipe_buffer {
+ u32 id;
u32 flags;
};
-/* The global driver data. Holds a reference to the i/o page used to
- * communicate with the emulator, and a wake queue for blocked tasks
- * waiting to be awoken.
- */
-struct goldfish_pipe_dev {
- spinlock_t lock;
- unsigned char __iomem *base;
- struct access_params *aps;
- int irq;
- u32 version;
+/* Parameters for the PIPE_CMD_OPEN command */
+struct open_command_param {
+ u64 command_buffer_ptr;
+ u32 rw_params_max_count;
};
-static struct goldfish_pipe_dev pipe_dev[1];
+/* Device-level set of buffers shared with the host */
+struct goldfish_pipe_dev_buffers {
+ struct open_command_param open_command_params;
+ struct signalled_pipe_buffer signalled_pipe_buffers[
+ MAX_SIGNALLED_PIPES];
+};
/* This data type models a given pipe instance */
struct goldfish_pipe {
- struct goldfish_pipe_dev *dev;
- struct mutex lock;
+ /* pipe ID - index into goldfish_pipe_dev::pipes array */
+ u32 id;
+ /* The wake flags pipe is waiting for
+ * Note: not protected with any lock, uses atomic operations
+ * and barriers to make it thread-safe.
+ */
unsigned long flags;
+ /* wake flags host have signalled,
+ * - protected by goldfish_pipe_dev::lock
+ */
+ unsigned long signalled_flags;
+
+ /* A pointer to command buffer */
+ struct goldfish_pipe_command *command_buffer;
+
+ /* doubly linked list of signalled pipes, protected by
+ * goldfish_pipe_dev::lock
+ */
+ struct goldfish_pipe *prev_signalled;
+ struct goldfish_pipe *next_signalled;
+
+ /*
+ * A pipe's own lock. Protects the following:
+ * - *command_buffer - makes sure a command can safely write its
+ * parameters to the host and read the results back.
+ */
+ struct mutex lock;
+
+ /* A wake queue for sleeping until host signals an event */
wait_queue_head_t wake_queue;
+ /* Pointer to the parent goldfish_pipe_dev instance */
+ struct goldfish_pipe_dev *dev;
};
+/* The global driver data. Holds a reference to the i/o page used to
+ * communicate with the emulator, and a wake queue for blocked tasks
+ * waiting to be awoken.
+ */
+struct goldfish_pipe_dev {
+ /*
+ * Global device spinlock. Protects the following members:
+ * - pipes, pipes_capacity
+ * - [*pipes, *pipes + pipes_capacity) - array data
+ * - first_signalled_pipe,
+ * goldfish_pipe::prev_signalled,
+ * goldfish_pipe::next_signalled,
+ * goldfish_pipe::signalled_flags - all singnalled-related fields,
+ * in all allocated pipes
+ * - open_command_params - PIPE_CMD_OPEN-related buffers
+ *
+ * It looks like a lot of different fields, but the trick is that
+ * the only operation that happens often is the signalled pipes array
+ * manipulation. That's why it's OK for now to keep the rest of the
+ * fields under the same lock. If we notice too much contention because
+ * of PIPE_CMD_OPEN, then we should add a separate lock there.
+ */
+ spinlock_t lock;
-/* Bit flags for the 'flags' field */
-enum {
- BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
- BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
- BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+ /*
+ * Array of the pipes of |pipes_capacity| elements,
+ * indexed by goldfish_pipe::id
+ */
+ struct goldfish_pipe **pipes;
+ u32 pipes_capacity;
+
+ /* Pointers to the buffers host uses for interaction with this driver */
+ struct goldfish_pipe_dev_buffers *buffers;
+
+ /* Head of a doubly linked list of signalled pipes */
+ struct goldfish_pipe *first_signalled_pipe;
+
+ /* Some device-specific data */
+ int irq;
+ int version;
+ unsigned char __iomem *base;
};
+struct goldfish_pipe_dev pipe_dev[1] = {};
-static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
+static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
- unsigned long flags;
- u32 status;
- struct goldfish_pipe_dev *dev = pipe->dev;
-
- spin_lock_irqsave(&dev->lock, flags);
- gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
- dev->base + PIPE_REG_CHANNEL_HIGH);
- writel(cmd, dev->base + PIPE_REG_COMMAND);
- status = readl(dev->base + PIPE_REG_STATUS);
- spin_unlock_irqrestore(&dev->lock, flags);
- return status;
+ pipe->command_buffer->cmd = cmd;
+ /* failure by default */
+ pipe->command_buffer->status = PIPE_ERROR_INVAL;
+ writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
+ return pipe->command_buffer->status;
}
-static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
+static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
- unsigned long flags;
- struct goldfish_pipe_dev *dev = pipe->dev;
+ int status;
- spin_lock_irqsave(&dev->lock, flags);
- gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
- dev->base + PIPE_REG_CHANNEL_HIGH);
- writel(cmd, dev->base + PIPE_REG_COMMAND);
- spin_unlock_irqrestore(&dev->lock, flags);
+ if (mutex_lock_interruptible(&pipe->lock))
+ return PIPE_ERROR_IO;
+ status = goldfish_cmd_locked(pipe, cmd);
+ mutex_unlock(&pipe->lock);
+ return status;
}
-/* This function converts an error code returned by the emulator through
+/*
+ * This function converts an error code returned by the emulator through
* the PIPE_REG_STATUS i/o register into a valid negative errno value.
*/
static int goldfish_pipe_error_convert(int status)
@@ -195,184 +306,202 @@ static int goldfish_pipe_error_convert(int status)
}
}
-/*
- * Notice: QEMU will return 0 for un-known register access, indicating
- * param_acess is supported or not
- */
-static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
- struct access_params *aps)
+static int pin_user_pages(unsigned long first_page, unsigned long last_page,
+ unsigned int last_page_size, int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
{
- u32 aph, apl;
- u64 paddr;
- aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
- apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+ int ret;
+ int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
+
+ if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
+ requested_pages = MAX_BUFFERS_PER_COMMAND;
+ *iter_last_page_size = PAGE_SIZE;
+ } else {
+ *iter_last_page_size = last_page_size;
+ }
+
+ ret = get_user_pages_fast(
+ first_page, requested_pages, !is_write, pages);
+ if (ret <= 0)
+ return -EFAULT;
+ if (ret < requested_pages)
+ *iter_last_page_size = PAGE_SIZE;
+ return ret;
- paddr = ((u64)aph << 32) | apl;
- if (paddr != (__pa(aps)))
- return 0;
- return 1;
}
-/* 0 on success */
-static int setup_access_params_addr(struct platform_device *pdev,
- struct goldfish_pipe_dev *dev)
+static void release_user_pages(struct page **pages, int pages_count,
+ int is_write, s32 consumed_size)
{
- dma_addr_t dma_handle;
- struct access_params *aps;
+ int i;
- aps = dmam_alloc_coherent(&pdev->dev, sizeof(struct access_params),
- &dma_handle, GFP_KERNEL);
- if (!aps)
- return -ENOMEM;
+ for (i = 0; i < pages_count; i++) {
+ if (!is_write && consumed_size > 0)
+ set_page_dirty(pages[i]);
+ put_page(pages[i]);
+ }
+}
+
+/* Populate the call parameters, merging adjacent pages together */
+static void populate_rw_params(
+ struct page **pages, int pages_count,
+ unsigned long address, unsigned long address_end,
+ unsigned long first_page, unsigned long last_page,
+ unsigned int iter_last_page_size, int is_write,
+ struct goldfish_pipe_command *command)
+{
+ /*
+ * Process the first page separately - it's the only page that
+ * needs special handling for its start address.
+ */
+ unsigned long xaddr = page_to_phys(pages[0]);
+ unsigned long xaddr_prev = xaddr;
+ int buffer_idx = 0;
+ int i = 1;
+ int size_on_page = first_page == last_page
+ ? (int)(address_end - address)
+ : (PAGE_SIZE - (address & ~PAGE_MASK));
+ command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
+ command->rw_params.sizes[0] = size_on_page;
+ for (; i < pages_count; ++i) {
+ xaddr = page_to_phys(pages[i]);
+ size_on_page = (i == pages_count - 1) ?
+ iter_last_page_size : PAGE_SIZE;
+ if (xaddr == xaddr_prev + PAGE_SIZE) {
+ command->rw_params.sizes[buffer_idx] += size_on_page;
+ } else {
+ ++buffer_idx;
+ command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
+ command->rw_params.sizes[buffer_idx] = size_on_page;
+ }
+ xaddr_prev = xaddr;
+ }
+ command->rw_params.buffers_count = buffer_idx + 1;
+}
- writel(upper_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
- writel(lower_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+static int transfer_max_buffers(struct goldfish_pipe *pipe,
+ unsigned long address, unsigned long address_end, int is_write,
+ unsigned long last_page, unsigned int last_page_size,
+ s32 *consumed_size, int *status)
+{
+ static struct page *pages[MAX_BUFFERS_PER_COMMAND];
+ unsigned long first_page = address & PAGE_MASK;
+ unsigned int iter_last_page_size;
+ int pages_count = pin_user_pages(first_page, last_page,
+ last_page_size, is_write,
+ pages, &iter_last_page_size);
- if (valid_batchbuffer_addr(dev, aps)) {
- dev->aps = aps;
- return 0;
- } else
- return -1;
+ if (pages_count < 0)
+ return pages_count;
+
+ /* Serialize access to the pipe command buffers */
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ populate_rw_params(pages, pages_count, address, address_end,
+ first_page, last_page, iter_last_page_size, is_write,
+ pipe->command_buffer);
+
+ /* Transfer the data */
+ *status = goldfish_cmd_locked(pipe,
+ is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
+
+ *consumed_size = pipe->command_buffer->rw_params.consumed_size;
+
+ release_user_pages(pages, pages_count, is_write, *consumed_size);
+
+ mutex_unlock(&pipe->lock);
+
+ return 0;
}
-/* A value that will not be set by qemu emulator */
-#define INITIAL_BATCH_RESULT (0xdeadbeaf)
-static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
- unsigned long address, unsigned long avail,
- struct goldfish_pipe *pipe, int *status)
+static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
{
- struct access_params *aps = dev->aps;
+ u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
- if (aps == NULL)
- return -1;
+ set_bit(wakeBit, &pipe->flags);
+
+ /* Tell the emulator we're going to wait for a wake event */
+ (void)goldfish_cmd(pipe,
+ is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
+
+ while (test_bit(wakeBit, &pipe->flags)) {
+ if (wait_event_interruptible(
+ pipe->wake_queue,
+ !test_bit(wakeBit, &pipe->flags)))
+ return -ERESTARTSYS;
+
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
+ }
- aps->result = INITIAL_BATCH_RESULT;
- aps->channel = (unsigned long)pipe;
- aps->size = avail;
- aps->address = address;
- aps->cmd = cmd;
- writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
- /*
- * If the aps->result has not changed, that means
- * that the batch command failed
- */
- if (aps->result == INITIAL_BATCH_RESULT)
- return -1;
- *status = aps->result;
return 0;
}
-static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
- size_t bufflen, int is_write)
+static ssize_t goldfish_pipe_read_write(struct file *filp,
+ char __user *buffer, size_t bufflen, int is_write)
{
- unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
- struct goldfish_pipe_dev *dev = pipe->dev;
- unsigned long address, address_end;
int count = 0, ret = -EINVAL;
+ unsigned long address, address_end, last_page;
+ unsigned int last_page_size;
/* If the emulator already closed the pipe, no need to go further */
- if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
return -EIO;
-
/* Null reads or writes succeeds */
if (unlikely(bufflen == 0))
return 0;
-
/* Check the buffer range for access */
- if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
- buffer, bufflen))
+ if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
+ buffer, bufflen)))
return -EFAULT;
- /* Serialize access to the pipe */
- if (mutex_lock_interruptible(&pipe->lock))
- return -ERESTARTSYS;
-
- address = (unsigned long)(void *)buffer;
+ address = (unsigned long)buffer;
address_end = address + bufflen;
+ last_page = (address_end - 1) & PAGE_MASK;
+ last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
while (address < address_end) {
- unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
- unsigned long next = page_end < address_end ? page_end
- : address_end;
- unsigned long avail = next - address;
- int status, wakeBit;
- struct page *page;
-
- /* Either vaddr or paddr depending on the device version */
- unsigned long xaddr;
+ s32 consumed_size;
+ int status;
- /*
- * We grab the pages on a page-by-page basis in case user
- * space gives us a potentially huge buffer but the read only
- * returns a small amount, then there's no need to pin that
- * much memory to the process.
- */
- ret = get_user_pages_unlocked(address, 1, &page,
- is_write ? 0 : FOLL_WRITE);
+ ret = transfer_max_buffers(pipe, address, address_end, is_write,
+ last_page, last_page_size, &consumed_size,
+ &status);
if (ret < 0)
break;
- if (dev->version) {
- /* Device version 1 or newer (qemu-android) expects the
- * physical address.
+ if (consumed_size > 0) {
+ /* No matter what's the status, we've transferred
+ * something.
*/
- xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
- } else {
- /* Device version 0 (classic emulator) expects the
- * virtual address.
- */
- xaddr = address;
+ count += consumed_size;
+ address += consumed_size;
}
-
- /* Now, try to transfer the bytes in the current page */
- spin_lock_irqsave(&dev->lock, irq_flags);
- if (access_with_param(dev,
- is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
- xaddr, avail, pipe, &status)) {
- gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
- dev->base + PIPE_REG_CHANNEL_HIGH);
- writel(avail, dev->base + PIPE_REG_SIZE);
- gf_write_ptr((void *)xaddr,
- dev->base + PIPE_REG_ADDRESS,
- dev->base + PIPE_REG_ADDRESS_HIGH);
- writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
- dev->base + PIPE_REG_COMMAND);
- status = readl(dev->base + PIPE_REG_STATUS);
- }
- spin_unlock_irqrestore(&dev->lock, irq_flags);
-
- if (status > 0 && !is_write)
- set_page_dirty(page);
- put_page(page);
-
- if (status > 0) { /* Correct transfer */
- count += status;
- address += status;
+ if (status > 0)
continue;
- } else if (status == 0) { /* EOF */
+ if (status == 0) {
+ /* EOF */
ret = 0;
break;
- } else if (status < 0 && count > 0) {
+ }
+ if (count > 0) {
/*
- * An error occurred and we already transferred
- * something on one of the previous pages.
+ * An error occurred, but we already transferred
+ * something on one of the previous iterations.
* Just return what we already copied and log this
* err.
- *
- * Note: This seems like an incorrect approach but
- * cannot change it until we check if any user space
- * ABI relies on this behavior.
*/
if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+ pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
status, is_write ? "write" : "read");
- ret = 0;
break;
}
/*
- * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * If the error is not PIPE_ERROR_AGAIN, or if we are in
* non-blocking mode, just return the error code.
*/
if (status != PIPE_ERROR_AGAIN ||
@@ -381,139 +510,214 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
break;
}
- /*
- * The backend blocked the read/write, wait until the backend
- * tells us it's ready to process more data.
- */
- wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
- set_bit(wakeBit, &pipe->flags);
-
- /* Tell the emulator we're going to wait for a wake event */
- goldfish_cmd(pipe,
- is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
-
- /* Unlock the pipe, then wait for the wake signal */
- mutex_unlock(&pipe->lock);
-
- while (test_bit(wakeBit, &pipe->flags)) {
- if (wait_event_interruptible(
- pipe->wake_queue,
- !test_bit(wakeBit, &pipe->flags)))
- return -ERESTARTSYS;
-
- if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
- return -EIO;
- }
-
- /* Try to re-acquire the lock */
- if (mutex_lock_interruptible(&pipe->lock))
- return -ERESTARTSYS;
+ status = wait_for_host_signal(pipe, is_write);
+ if (status < 0)
+ return status;
}
- mutex_unlock(&pipe->lock);
- if (ret < 0)
- return ret;
- else
+ if (count > 0)
return count;
+ return ret;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
- size_t bufflen, loff_t *ppos)
+ size_t bufflen, loff_t *ppos)
{
- return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
+ return goldfish_pipe_read_write(filp, buffer, bufflen,
+ /* is_write */ 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
const char __user *buffer, size_t bufflen,
loff_t *ppos)
{
- return goldfish_pipe_read_write(filp, (char __user *)buffer,
- bufflen, 1);
+ return goldfish_pipe_read_write(filp,
+ /* cast away the const */(char __user *)buffer, bufflen,
+ /* is_write */ 1);
}
-
static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
unsigned int mask = 0;
int status;
- mutex_lock(&pipe->lock);
-
poll_wait(filp, &pipe->wake_queue, wait);
- status = goldfish_cmd_status(pipe, CMD_POLL);
-
- mutex_unlock(&pipe->lock);
+ status = goldfish_cmd(pipe, PIPE_CMD_POLL);
+ if (status < 0)
+ return -ERESTARTSYS;
if (status & PIPE_POLL_IN)
mask |= POLLIN | POLLRDNORM;
-
if (status & PIPE_POLL_OUT)
mask |= POLLOUT | POLLWRNORM;
-
if (status & PIPE_POLL_HUP)
mask |= POLLHUP;
-
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
mask |= POLLERR;
return mask;
}
-static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
+ u32 id, u32 flags)
{
- struct goldfish_pipe_dev *dev = dev_id;
- unsigned long irq_flags;
- int count = 0;
+ struct goldfish_pipe *pipe;
- /*
- * We're going to read from the emulator a list of (channel,flags)
- * pairs corresponding to the wake events that occurred on each
- * blocked pipe (i.e. channel).
- */
- spin_lock_irqsave(&dev->lock, irq_flags);
- for (;;) {
- /* First read the channel, 0 means the end of the list */
- struct goldfish_pipe *pipe;
- unsigned long wakes;
- unsigned long channel = 0;
+ if (WARN_ON(id >= dev->pipes_capacity))
+ return;
+
+ pipe = dev->pipes[id];
+ if (!pipe)
+ return;
+ pipe->signalled_flags |= flags;
+
+ if (pipe->prev_signalled || pipe->next_signalled
+ || dev->first_signalled_pipe == pipe)
+ return; /* already in the list */
+ pipe->next_signalled = dev->first_signalled_pipe;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = pipe;
+ dev->first_signalled_pipe = pipe;
+}
-#ifdef CONFIG_64BIT
- channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
+static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
+ struct goldfish_pipe *pipe) {
+ if (pipe->prev_signalled)
+ pipe->prev_signalled->next_signalled = pipe->next_signalled;
+ if (pipe->next_signalled)
+ pipe->next_signalled->prev_signalled = pipe->prev_signalled;
+ if (pipe == dev->first_signalled_pipe)
+ dev->first_signalled_pipe = pipe->next_signalled;
+ pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+}
- if (channel == 0)
- break;
-#endif
- channel |= readl(dev->base + PIPE_REG_CHANNEL);
+static struct goldfish_pipe *signalled_pipes_pop_front(
+ struct goldfish_pipe_dev *dev, int *wakes)
+{
+ struct goldfish_pipe *pipe;
+ unsigned long flags;
- if (channel == 0)
- break;
+ spin_lock_irqsave(&dev->lock, flags);
- /* Convert channel to struct pipe pointer + read wake flags */
- wakes = readl(dev->base + PIPE_REG_WAKES);
- pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
+ pipe = dev->first_signalled_pipe;
+ if (pipe) {
+ *wakes = pipe->signalled_flags;
+ pipe->signalled_flags = 0;
+ /*
+ * This is an optimized version of
+ * signalled_pipes_remove_locked()
+ * - We want to make it as fast as possible to
+ * wake the sleeping pipe operations faster.
+ */
+ dev->first_signalled_pipe = pipe->next_signalled;
+ if (dev->first_signalled_pipe)
+ dev->first_signalled_pipe->prev_signalled = NULL;
+ pipe->next_signalled = NULL;
+ }
- /* Did the emulator just closed a pipe? */
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return pipe;
+}
+
+static void goldfish_interrupt_task(unsigned long unused)
+{
+ struct goldfish_pipe_dev *dev = pipe_dev;
+ /* Iterate over the signalled pipes and wake them one by one */
+ struct goldfish_pipe *pipe;
+ int wakes;
+
+ while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
if (wakes & PIPE_WAKE_CLOSED) {
- set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
- wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
+ pipe->flags = 1 << BIT_CLOSED_ON_HOST;
+ } else {
+ if (wakes & PIPE_WAKE_READ)
+ clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
+ if (wakes & PIPE_WAKE_WRITE)
+ clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
}
- if (wakes & PIPE_WAKE_READ)
- clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
- if (wakes & PIPE_WAKE_WRITE)
- clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
-
+ /*
+ * wake_up_interruptible() implies a write barrier, so don't
+ * explicitly add another one here.
+ */
wake_up_interruptible(&pipe->wake_queue);
- count++;
}
- spin_unlock_irqrestore(&dev->lock, irq_flags);
+}
+DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
- return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
+/*
+ * The general idea of the interrupt handling:
+ *
+ * 1. device raises an interrupt if there's at least one signalled pipe
+ * 2. IRQ handler reads the signalled pipes and their count from the device
+ * 3. device writes them into a shared buffer and returns the count
+ * it only resets the IRQ if it has returned all signalled pipes,
+ * otherwise it leaves it raised, so IRQ handler will be called
+ * again for the next chunk
+ * 4. IRQ handler adds all returned pipes to the device's signalled pipes list
+ * 5. IRQ handler launches a tasklet to process the signalled pipes from the
+ * list in a separate context
+ */
+static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
+{
+ u32 count;
+ u32 i;
+ unsigned long flags;
+ struct goldfish_pipe_dev *dev = dev_id;
+
+ if (dev != pipe_dev)
+ return IRQ_NONE;
+
+ /* Request the signalled pipes from the device */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
+ if (count == 0) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return IRQ_NONE;
+ }
+ if (count > MAX_SIGNALLED_PIPES)
+ count = MAX_SIGNALLED_PIPES;
+
+ for (i = 0; i < count; ++i)
+ signalled_pipes_add_locked(dev,
+ dev->buffers->signalled_pipe_buffers[i].id,
+ dev->buffers->signalled_pipe_buffers[i].flags);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ tasklet_schedule(&goldfish_interrupt_tasklet);
+ return IRQ_HANDLED;
+}
+
+static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
+{
+ int id;
+
+ for (id = 0; id < dev->pipes_capacity; ++id)
+ if (!dev->pipes[id])
+ return id;
+
+ {
+ /* Reallocate the array */
+ u32 new_capacity = 2 * dev->pipes_capacity;
+ struct goldfish_pipe **pipes =
+ kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL);
+ if (!pipes)
+ return -ENOMEM;
+ memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
+ kfree(dev->pipes);
+ dev->pipes = pipes;
+ id = dev->pipes_capacity;
+ dev->pipes_capacity = new_capacity;
+ }
+ return id;
}
/**
- * goldfish_pipe_open - open a channel to the AVD
+ * goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
* @file: file struct of opener
*
@@ -525,12 +729,13 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
- struct goldfish_pipe *pipe;
struct goldfish_pipe_dev *dev = pipe_dev;
- int32_t status;
+ unsigned long flags;
+ int id;
+ int status;
/* Allocate new pipe kernel object */
- pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
if (pipe == NULL)
return -ENOMEM;
@@ -539,29 +744,69 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
init_waitqueue_head(&pipe->wake_queue);
/*
- * Now, tell the emulator we're opening a new pipe. We use the
- * pipe object's address as the channel identifier for simplicity.
+ * Command buffer needs to be allocated on its own page to make sure
+ * it is physically contiguous in host's address space.
*/
+ pipe->command_buffer =
+ (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
+ if (!pipe->command_buffer) {
+ status = -ENOMEM;
+ goto err_pipe;
+ }
- status = goldfish_cmd_status(pipe, CMD_OPEN);
- if (status < 0) {
- kfree(pipe);
- return status;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ id = get_free_pipe_id_locked(dev);
+ if (id < 0) {
+ status = id;
+ goto err_id_locked;
}
+ dev->pipes[id] = pipe;
+ pipe->id = id;
+ pipe->command_buffer->id = id;
+
+ /* Now tell the emulator we're opening a new pipe. */
+ dev->buffers->open_command_params.rw_params_max_count =
+ MAX_BUFFERS_PER_COMMAND;
+ dev->buffers->open_command_params.command_buffer_ptr =
+ (u64)(unsigned long)__pa(pipe->command_buffer);
+ status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (status < 0)
+ goto err_cmd;
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
return 0;
+
+err_cmd:
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[id] = NULL;
+err_id_locked:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ free_page((unsigned long)pipe->command_buffer);
+err_pipe:
+ kfree(pipe);
+ return status;
}
static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
+ unsigned long flags;
struct goldfish_pipe *pipe = filp->private_data;
+ struct goldfish_pipe_dev *dev = pipe->dev;
/* The guest is closing the channel, so tell the emulator right now */
- goldfish_cmd(pipe, CMD_CLOSE);
- kfree(pipe);
+ (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pipes[pipe->id] = NULL;
+ signalled_pipes_remove_locked(dev, pipe);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
filp->private_data = NULL;
+ free_page((unsigned long)pipe->command_buffer);
+ kfree(pipe);
return 0;
}
@@ -574,18 +819,91 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_device = {
+static struct miscdevice goldfish_pipe_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "goldfish_pipe",
.fops = &goldfish_pipe_fops,
};
+static int goldfish_pipe_device_init(struct platform_device *pdev)
+{
+ char *page;
+ struct goldfish_pipe_dev *dev = pipe_dev;
+ int err = devm_request_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt,
+ IRQF_SHARED, "goldfish_pipe", dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
+ return err;
+ }
+
+ err = misc_register(&goldfish_pipe_dev);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register v2 device\n");
+ return err;
+ }
+
+ dev->first_signalled_pipe = NULL;
+ dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
+ dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
+ GFP_KERNEL);
+ if (!dev->pipes)
+ return -ENOMEM;
+
+ /*
+ * We're going to pass two buffers, open_command_params and
+ * signalled_pipe_buffers, to the host. This means each of those buffers
+ * needs to be contained in a single physical page. The easiest choice
+ * is to just allocate a page and place the buffers in it.
+ */
+ if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE))
+ return -ENOMEM;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page) {
+ kfree(dev->pipes);
+ return -ENOMEM;
+ }
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
+
+ /* Send the buffer addresses to the host */
+ {
+ u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
+
+ writel((u32)(unsigned long)(paddr >> 32),
+ dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+ writel((u32)(unsigned long)paddr,
+ dev->base + PIPE_REG_SIGNAL_BUFFER);
+ writel((u32)MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+ paddr = __pa(&dev->buffers->open_command_params);
+ writel((u32)(unsigned long)(paddr >> 32),
+ dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
+ writel((u32)(unsigned long)paddr,
+ dev->base + PIPE_REG_OPEN_BUFFER);
+ }
+ return 0;
+}
+
+static void goldfish_pipe_device_deinit(struct platform_device *pdev)
+{
+ struct goldfish_pipe_dev *dev = pipe_dev;
+
+ misc_deregister(&goldfish_pipe_dev);
+ kfree(dev->pipes);
+ free_page((unsigned long)dev->buffers);
+}
+
static int goldfish_pipe_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
struct goldfish_pipe_dev *dev = pipe_dev;
+ if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE))
+ return -ENOMEM;
+
/* not thread safe, but this should not happen */
WARN_ON(dev->base != NULL);
@@ -609,26 +927,21 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
}
dev->irq = r->start;
- err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
- IRQF_SHARED, "goldfish_pipe", dev);
- if (err) {
- dev_err(&pdev->dev, "unable to allocate IRQ\n");
- goto error;
- }
-
- err = misc_register(&goldfish_pipe_device);
- if (err) {
- dev_err(&pdev->dev, "unable to register device\n");
- goto error;
- }
- setup_access_params_addr(pdev, dev);
-
- /* Although the pipe device in the classic Android emulator does not
- * recognize the 'version' register, it won't treat this as an error
- * either and will simply return 0, which is fine.
+ /*
+ * Exchange the versions with the host device
+ *
+ * Note: v1 driver used to not report its version, so we write it before
+ * reading device version back: this allows the host implementation to
+ * detect the old driver (if there was no version write before read).
*/
+ writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
dev->version = readl(dev->base + PIPE_REG_VERSION);
- return 0;
+ if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
+ return -EINVAL;
+
+ err = goldfish_pipe_device_init(pdev);
+ if (!err)
+ return 0;
error:
dev->base = NULL;
@@ -638,7 +951,7 @@ error:
static int goldfish_pipe_remove(struct platform_device *pdev)
{
struct goldfish_pipe_dev *dev = pipe_dev;
- misc_deregister(&goldfish_pipe_device);
+ goldfish_pipe_device_deinit(pdev);
dev->base = NULL;
return 0;
}
@@ -655,17 +968,16 @@ static const struct of_device_id goldfish_pipe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
-static struct platform_driver goldfish_pipe = {
+static struct platform_driver goldfish_pipe_driver = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
.name = "goldfish_pipe",
- .owner = THIS_MODULE,
.of_match_table = goldfish_pipe_of_match,
.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
}
};
-module_platform_driver(goldfish_pipe);
+module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <digit@google.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 883fbe7a2466..8489020ecf44 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -182,7 +182,8 @@ config FUJITSU_LAPTOP
depends on INPUT
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
- depends on LEDS_CLASS || LEDS_CLASS=n
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
---help---
This is a driver for laptops built by Fujitsu:
@@ -780,6 +781,19 @@ config ACPI_CMPC
keys as input device, backlight device, tablet and accelerometer
devices.
+config INTEL_CHT_INT33FE
+ tristate "Intel Cherry Trail ACPI INT33FE Driver"
+ depends on X86 && ACPI && I2C
+ ---help---
+ This driver add support for the INT33FE ACPI device found on
+ some Intel Cherry Trail devices.
+
+ The INT33FE ACPI device has a CRS table with I2cSerialBusV2
+ resources for 3 devices: Maxim MAX17047 Fuel Gauge Controller,
+ FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
+ This driver instantiates i2c-clients for these, so that standard
+ i2c drivers for these chips can bind to the them.
+
config INTEL_HID_EVENT
tristate "INTEL HID Event"
depends on ACPI
@@ -1087,7 +1101,7 @@ config INTEL_TURBO_MAX_3
config SILEAD_DMI
bool "Tablets with Silead touchscreens"
- depends on ACPI && DMI && I2C=y && INPUT
+ depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
---help---
Certain ACPI based tablets with Silead touchscreens do not have
enough data in ACPI tables for the touchscreen driver to handle
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 776b3a7a4984..182a3ed6605a 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
+obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index dac0fbe87460..79fa5ab3fd00 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1896,7 +1896,7 @@ static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
if (!strcmp(ctx, "SENR")) {
if (acpi_bus_get_device(ah, &dev))
return AE_OK;
- if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+ if (strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
return AE_OK;
} else
return AE_OK;
@@ -1917,8 +1917,7 @@ static int __init acer_wmi_get_handle(const char *name, const char *prop,
handle = NULL;
status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
(void *)name, &handle);
-
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status) && handle) {
*ah = handle;
return 0;
} else {
@@ -1987,7 +1986,7 @@ static int __init acer_wmi_input_setup(void)
acer_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
- goto err_free_keymap;
+ goto err_free_dev;
}
err = input_register_device(acer_wmi_input_dev);
@@ -1998,8 +1997,6 @@ static int __init acer_wmi_input_setup(void)
err_uninstall_notifier:
wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
-err_free_keymap:
- sparse_keymap_free(acer_wmi_input_dev);
err_free_dev:
input_free_device(acer_wmi_input_dev);
return err;
@@ -2008,7 +2005,6 @@ err_free_dev:
static void acer_wmi_input_destroy(void)
{
wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
- sparse_keymap_free(acer_wmi_input_dev);
input_unregister_device(acer_wmi_input_dev);
}
@@ -2290,8 +2286,8 @@ static int __init acer_wmi_init(void)
if (err)
return err;
err = acer_wmi_accel_setup();
- if (err)
- return err;
+ if (err && err != -ENODEV)
+ pr_warn("Cannot enable accelerometer\n");
}
err = platform_driver_register(&acer_platform_driver);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 28551f5a2e07..c4768be24ba9 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1516,14 +1516,12 @@ static int asus_input_init(struct asus_laptop *asus)
error = input_register_device(input);
if (error) {
pr_warn("Unable to register input device\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
asus->inputdev = input;
return 0;
-err_free_keymap:
- sparse_keymap_free(input);
err_free_dev:
input_free_device(input);
return error;
@@ -1531,10 +1529,8 @@ err_free_dev:
static void asus_input_exit(struct asus_laptop *asus)
{
- if (asus->inputdev) {
- sparse_keymap_free(asus->inputdev);
+ if (asus->inputdev)
input_unregister_device(asus->inputdev);
- }
asus->inputdev = NULL;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index dea98ffb6f60..5269a01d9bdd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -111,6 +111,10 @@ static struct quirk_entry quirk_asus_x550lb = {
.xusb2pr = 0x01D9,
};
+static struct quirk_entry quirk_asus_ux330uak = {
+ .wmi_force_als_set = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -144,6 +148,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X302UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X401U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -369,6 +382,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX330UAK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
+ },
+ .driver_data = &quirk_asus_ux330uak,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X550LB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8fe5890bf539..6c7d86074b38 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -269,12 +269,10 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
err = input_register_device(asus->inputdev);
if (err)
- goto err_free_keymap;
+ goto err_free_dev;
return 0;
-err_free_keymap:
- sparse_keymap_free(asus->inputdev);
err_free_dev:
input_free_device(asus->inputdev);
return err;
@@ -282,10 +280,8 @@ err_free_dev:
static void asus_wmi_input_exit(struct asus_wmi *asus)
{
- if (asus->inputdev) {
- sparse_keymap_free(asus->inputdev);
+ if (asus->inputdev)
input_unregister_device(asus->inputdev);
- }
asus->inputdev = NULL;
}
@@ -1109,6 +1105,15 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
}
/*
+ * Some devices dont support or have borcken get_als method
+ * but still support set method.
+ */
+static void asus_wmi_set_als(void)
+{
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_ALS_ENABLE, 1, NULL);
+}
+
+/*
* Hwmon device
*/
static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
@@ -1761,7 +1766,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
-static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
+static ssize_t cpufv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int value, rv;
@@ -1778,7 +1783,7 @@ static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
+static DEVICE_ATTR_WO(cpufv);
static struct attribute *platform_attributes[] = {
&dev_attr_cpufv.attr,
@@ -2117,6 +2122,9 @@ static int asus_wmi_add(struct platform_device *pdev)
goto fail_rfkill;
}
+ if (asus->driver->quirks->wmi_force_als_set)
+ asus_wmi_set_als();
+
/* Some Asus desktop boards export an acpi-video backlight interface,
stop this from showing up */
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index c9589d9342bb..6c1311f4b04d 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -44,6 +44,7 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
bool wmi_backlight_native;
+ bool wmi_force_als_set;
int wapf;
/*
* For machines with AMD graphic chips, it will send out WMI event
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2e237bad4995..ec202094bd50 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -45,6 +45,7 @@
#define KBD_LED_AUTO_100_TOKEN 0x02F6
#define GLOBAL_MIC_MUTE_ENABLE 0x0364
#define GLOBAL_MIC_MUTE_DISABLE 0x0365
+#define KBD_LED_AC_TOKEN 0x0451
struct quirk_entry {
u8 touchpad_led;
@@ -1027,7 +1028,7 @@ static void touchpad_led_exit(void)
* bit 2 Pointing stick
* bit 3 Any mouse
* bits 4-7 Reserved for future use
- * cbRES2, byte3 Current Timeout
+ * cbRES2, byte3 Current Timeout on battery
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
@@ -1039,6 +1040,15 @@ static void touchpad_led_exit(void)
* cbRES3, byte0 Current setting of ALS value that turns the light on or off.
* cbRES3, byte1 Current ALS reading
* cbRES3, byte2 Current keyboard light level.
+ * cbRES3, byte3 Current timeout on AC Power
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * Bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte2
+ * are set upon return from the upon return from the [Get Feature information] call.
*
* cbArg1 0x2 = Set New State
* cbRES1 Standard return codes (0, -1, -2)
@@ -1061,7 +1071,7 @@ static void touchpad_led_exit(void)
* bit 2 Pointing stick
* bit 3 Any mouse
* bits 4-7 Reserved for future use
- * cbArg2, byte3 Desired Timeout
+ * cbArg2, byte3 Desired Timeout on battery
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
@@ -1070,6 +1080,13 @@ static void touchpad_led_exit(void)
* bits 5:0 Timeout value (0-63) in sec/min/hr/day
* cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
* cbArg3, byte2 Desired keyboard light level.
+ * cbArg3, byte3 Desired Timeout on AC power
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
*/
@@ -1115,6 +1132,8 @@ struct kbd_state {
u8 triggers;
u8 timeout_value;
u8 timeout_unit;
+ u8 timeout_value_ac;
+ u8 timeout_unit_ac;
u8 als_setting;
u8 als_value;
u8 level;
@@ -1134,6 +1153,7 @@ static u16 kbd_token_bits;
static struct kbd_info kbd_info;
static bool kbd_als_supported;
static bool kbd_triggers_supported;
+static bool kbd_timeout_ac_supported;
static u8 kbd_mode_levels[16];
static int kbd_mode_levels_count;
@@ -1142,6 +1162,7 @@ static u8 kbd_previous_level;
static u8 kbd_previous_mode_bit;
static bool kbd_led_present;
+static DEFINE_MUTEX(kbd_led_mutex);
/*
* NOTE: there are three ways to set the keyboard backlight level.
@@ -1272,6 +1293,8 @@ static int kbd_get_state(struct kbd_state *state)
state->als_setting = buffer->output[2] & 0xFF;
state->als_value = (buffer->output[2] >> 8) & 0xFF;
state->level = (buffer->output[2] >> 16) & 0xFF;
+ state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
+ state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
out:
dell_smbios_release_buffer();
@@ -1291,6 +1314,8 @@ static int kbd_set_state(struct kbd_state *state)
buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
buffer->input[2] = state->als_setting & 0xFF;
buffer->input[2] |= (state->level & 0xFF) << 16;
+ buffer->input[2] |= (state->timeout_value_ac & 0x3F) << 24;
+ buffer->input[2] |= (state->timeout_unit_ac & 0x3) << 30;
dell_smbios_send_request(4, 11);
ret = buffer->output[0];
dell_smbios_release_buffer();
@@ -1397,6 +1422,13 @@ static inline int kbd_init_info(void)
if (ret)
return ret;
+ /* NOTE: Old models without KBD_LED_AC_TOKEN token supports only one
+ * timeout value which is shared for both battery and AC power
+ * settings. So do not try to set AC values on old models.
+ */
+ if (dell_smbios_find_token(KBD_LED_AC_TOKEN))
+ kbd_timeout_ac_supported = true;
+
kbd_get_state(&state);
/* NOTE: timeout value is stored in 6 bits so max value is 63 */
@@ -1571,35 +1603,56 @@ static ssize_t kbd_led_timeout_store(struct device *dev,
}
}
+ mutex_lock(&kbd_led_mutex);
+
ret = kbd_get_state(&state);
if (ret)
- return ret;
+ goto out;
new_state = state;
- new_state.timeout_value = value;
- new_state.timeout_unit = unit;
+
+ if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
+ new_state.timeout_value_ac = value;
+ new_state.timeout_unit_ac = unit;
+ } else {
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+ }
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
- return ret;
+ goto out;
- return count;
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
}
static ssize_t kbd_led_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kbd_state state;
+ int value;
int ret;
int len;
+ u8 unit;
ret = kbd_get_state(&state);
if (ret)
return ret;
- len = sprintf(buf, "%d", state.timeout_value);
+ if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
+ value = state.timeout_value_ac;
+ unit = state.timeout_unit_ac;
+ } else {
+ value = state.timeout_value;
+ unit = state.timeout_unit;
+ }
+
+ len = sprintf(buf, "%d", value);
- switch (state.timeout_unit) {
+ switch (unit) {
case KBD_TIMEOUT_SECONDS:
return len + sprintf(buf+len, "s\n");
case KBD_TIMEOUT_MINUTES:
@@ -1643,9 +1696,11 @@ static ssize_t kbd_led_triggers_store(struct device *dev,
if (trigger[0] != '+' && trigger[0] != '-')
return -EINVAL;
+ mutex_lock(&kbd_led_mutex);
+
ret = kbd_get_state(&state);
if (ret)
- return ret;
+ goto out;
if (kbd_triggers_supported)
triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
@@ -1659,48 +1714,62 @@ static ssize_t kbd_led_triggers_store(struct device *dev,
if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
continue;
if (trigger[0] == '+' &&
- triggers_enabled && (state.triggers & BIT(i)))
- return count;
+ triggers_enabled && (state.triggers & BIT(i))) {
+ ret = count;
+ goto out;
+ }
if (trigger[0] == '-' &&
- (!triggers_enabled || !(state.triggers & BIT(i))))
- return count;
+ (!triggers_enabled || !(state.triggers & BIT(i)))) {
+ ret = count;
+ goto out;
+ }
trigger_bit = i;
break;
}
}
- if (trigger_bit != -1) {
- new_state = state;
- if (trigger[0] == '+')
- new_state.triggers |= BIT(trigger_bit);
- else {
- new_state.triggers &= ~BIT(trigger_bit);
- /* NOTE: trackstick bit (2) must be disabled when
- * disabling touchpad bit (1), otherwise touchpad
- * bit (1) will not be disabled */
- if (trigger_bit == 1)
- new_state.triggers &= ~BIT(2);
- }
- if ((kbd_info.triggers & new_state.triggers) !=
- new_state.triggers)
- return -EINVAL;
- if (new_state.triggers && !triggers_enabled) {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- } else if (new_state.triggers == 0) {
- kbd_set_level(&new_state, 0);
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- if (new_state.mode_bit != KBD_MODE_BIT_OFF)
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
+ if (trigger_bit == -1) {
+ ret = -EINVAL;
+ goto out;
}
- return -EINVAL;
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /*
+ * NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled
+ */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (new_state.triggers && !triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else if (new_state.triggers == 0) {
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ goto out;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
}
static ssize_t kbd_led_triggers_show(struct device *dev,
@@ -1757,12 +1826,16 @@ static ssize_t kbd_led_als_enabled_store(struct device *dev,
if (ret)
return ret;
+ mutex_lock(&kbd_led_mutex);
+
ret = kbd_get_state(&state);
if (ret)
- return ret;
+ goto out;
- if (enable == kbd_is_als_mode_bit(state.mode_bit))
- return count;
+ if (enable == kbd_is_als_mode_bit(state.mode_bit)) {
+ ret = count;
+ goto out;
+ }
new_state = state;
@@ -1782,15 +1855,20 @@ static ssize_t kbd_led_als_enabled_store(struct device *dev,
new_state.mode_bit = KBD_MODE_BIT_ON;
}
}
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
+ if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
- return ret;
+ goto out;
kbd_previous_mode_bit = new_state.mode_bit;
- return count;
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
}
static ssize_t kbd_led_als_enabled_show(struct device *dev,
@@ -1825,18 +1903,23 @@ static ssize_t kbd_led_als_setting_store(struct device *dev,
if (ret)
return ret;
+ mutex_lock(&kbd_led_mutex);
+
ret = kbd_get_state(&state);
if (ret)
- return ret;
+ goto out;
new_state = state;
new_state.als_setting = setting;
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
- return ret;
+ goto out;
- return count;
+ ret = count;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
}
static ssize_t kbd_led_als_setting_show(struct device *dev,
@@ -1921,31 +2004,37 @@ static int kbd_led_level_set(struct led_classdev *led_cdev,
u16 num;
int ret;
+ mutex_lock(&kbd_led_mutex);
+
if (kbd_get_max_level()) {
ret = kbd_get_state(&state);
if (ret)
- return ret;
+ goto out;
new_state = state;
ret = kbd_set_level(&new_state, value);
if (ret)
- return ret;
- return kbd_set_state_safe(&new_state, &state);
- }
-
- if (kbd_get_valid_token_counts()) {
+ goto out;
+ ret = kbd_set_state_safe(&new_state, &state);
+ } else if (kbd_get_valid_token_counts()) {
for (num = kbd_token_bits; num != 0 && value > 0; --value)
num &= num - 1; /* clear the first bit set */
if (num == 0)
- return 0;
- return kbd_set_token_bit(ffs(num) - 1);
+ ret = 0;
+ else
+ ret = kbd_set_token_bit(ffs(num) - 1);
+ } else {
+ pr_warn("Keyboard brightness level control not supported\n");
+ ret = -ENXIO;
}
- pr_warn("Keyboard brightness level control not supported\n");
- return -ENXIO;
+out:
+ mutex_unlock(&kbd_led_mutex);
+ return ret;
}
static struct led_classdev kbd_led = {
.name = "dell::kbd_backlight",
+ .flags = LED_BRIGHT_HW_CHANGED,
.brightness_set_blocking = kbd_led_level_set,
.brightness_get = kbd_led_level_get,
.groups = kbd_led_groups,
@@ -1953,6 +2042,8 @@ static struct led_classdev kbd_led = {
static int __init kbd_led_init(struct device *dev)
{
+ int ret;
+
kbd_init();
if (!kbd_led_present)
return -ENODEV;
@@ -1964,7 +2055,11 @@ static int __init kbd_led_init(struct device *dev)
if (kbd_led.max_brightness)
kbd_led.max_brightness--;
}
- return led_classdev_register(dev, &kbd_led);
+ ret = led_classdev_register(dev, &kbd_led);
+ if (ret)
+ kbd_led_present = false;
+
+ return ret;
}
static void brightness_set_exit(struct led_classdev *led_cdev,
@@ -1981,6 +2076,26 @@ static void kbd_led_exit(void)
led_classdev_unregister(&kbd_led);
}
+static int dell_laptop_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ switch (action) {
+ case DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED:
+ if (!kbd_led_present)
+ break;
+
+ led_classdev_notify_brightness_hw_changed(&kbd_led,
+ kbd_led_level_get(&kbd_led));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dell_laptop_notifier = {
+ .notifier_call = dell_laptop_notifier_call,
+};
+
int dell_micmute_led_set(int state)
{
struct calling_interface_buffer *buffer;
@@ -2049,6 +2164,8 @@ static int __init dell_init(void)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
&dell_debugfs_fops);
+ dell_laptop_register_notifier(&dell_laptop_notifier);
+
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
@@ -2081,11 +2198,17 @@ static int __init dell_init(void)
dell_backlight_device->props.brightness =
dell_get_intensity(dell_backlight_device);
+ if (dell_backlight_device->props.brightness < 0) {
+ ret = dell_backlight_device->props.brightness;
+ goto fail_get_brightness;
+ }
backlight_update_status(dell_backlight_device);
}
return 0;
+fail_get_brightness:
+ backlight_device_unregister(dell_backlight_device);
fail_backlight:
dell_cleanup_rfkill();
fail_rfkill:
@@ -2100,6 +2223,7 @@ fail_platform_driver:
static void __exit dell_exit(void)
{
+ dell_laptop_unregister_notifier(&dell_laptop_notifier);
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios.c
index d2412ab097da..0a5723468bff 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios.c
@@ -105,6 +105,26 @@ struct calling_interface_token *dell_smbios_find_token(int tokenid)
}
EXPORT_SYMBOL_GPL(dell_smbios_find_token);
+static BLOCKING_NOTIFIER_HEAD(dell_laptop_chain_head);
+
+int dell_laptop_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&dell_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_register_notifier);
+
+int dell_laptop_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&dell_laptop_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_unregister_notifier);
+
+void dell_laptop_call_notifier(unsigned long action, void *data)
+{
+ blocking_notifier_call_chain(&dell_laptop_chain_head, action, data);
+}
+EXPORT_SYMBOL_GPL(dell_laptop_call_notifier);
+
static void __init parse_da_table(const struct dmi_header *dm)
{
/* Final token is a terminator, so we don't want to copy it */
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index ec7d40ae5e6e..45cbc2292cd3 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -16,6 +16,8 @@
#ifndef _DELL_SMBIOS_H_
#define _DELL_SMBIOS_H_
+struct notifier_block;
+
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -43,4 +45,13 @@ void dell_smbios_release_buffer(void);
void dell_smbios_send_request(int class, int select);
struct calling_interface_token *dell_smbios_find_token(int tokenid);
+
+enum dell_laptop_notifier_actions {
+ DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED,
+};
+
+int dell_laptop_register_notifier(struct notifier_block *nb);
+int dell_laptop_unregister_notifier(struct notifier_block *nb);
+void dell_laptop_call_notifier(unsigned long action, void *data);
+
#endif
diff --git a/drivers/platform/x86/dell-wmi-aio.c b/drivers/platform/x86/dell-wmi-aio.c
index dbc97a33bbc8..50c2078715d6 100644
--- a/drivers/platform/x86/dell-wmi-aio.c
+++ b/drivers/platform/x86/dell-wmi-aio.c
@@ -152,12 +152,10 @@ static int __init dell_wmi_aio_input_setup(void)
err = input_register_device(dell_wmi_aio_input_dev);
if (err) {
pr_info("Unable to register input device\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
return 0;
-err_free_keymap:
- sparse_keymap_free(dell_wmi_aio_input_dev);
err_free_dev:
input_free_device(dell_wmi_aio_input_dev);
return err;
@@ -192,7 +190,6 @@ static int __init dell_wmi_aio_init(void)
err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL);
if (err) {
pr_err("Unable to register notify handler - %d\n", err);
- sparse_keymap_free(dell_wmi_aio_input_dev);
input_unregister_device(dell_wmi_aio_input_dev);
return err;
}
@@ -206,7 +203,6 @@ static void __exit dell_wmi_aio_exit(void)
guid = dell_wmi_aio_find();
wmi_remove_notify_handler(guid);
- sparse_keymap_free(dell_wmi_aio_input_dev);
input_unregister_device(dell_wmi_aio_input_dev);
}
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 75e637047d36..8a64c7967753 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -329,6 +329,10 @@ static void dell_wmi_process_key(int type, int code)
if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
return;
+ if (key->keycode == KEY_KBDILLUMTOGGLE)
+ dell_laptop_call_notifier(
+ DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED, NULL);
+
sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
}
@@ -603,23 +607,15 @@ static int __init dell_wmi_input_setup(void)
err = input_register_device(dell_wmi_input_dev);
if (err)
- goto err_free_keymap;
+ goto err_free_dev;
return 0;
- err_free_keymap:
- sparse_keymap_free(dell_wmi_input_dev);
err_free_dev:
input_free_device(dell_wmi_input_dev);
return err;
}
-static void dell_wmi_input_destroy(void)
-{
- sparse_keymap_free(dell_wmi_input_dev);
- input_unregister_device(dell_wmi_input_dev);
-}
-
/*
* Descriptor buffer is 128 byte long and contains:
*
@@ -740,7 +736,7 @@ static int __init dell_wmi_init(void)
status = wmi_install_notify_handler(DELL_EVENT_GUID,
dell_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
- dell_wmi_input_destroy();
+ input_unregister_device(dell_wmi_input_dev);
pr_err("Unable to register notify handler - %d\n", status);
return -ENODEV;
}
@@ -752,7 +748,7 @@ static int __init dell_wmi_init(void)
if (err) {
pr_err("Failed to enable WMI events\n");
wmi_remove_notify_handler(DELL_EVENT_GUID);
- dell_wmi_input_destroy();
+ input_unregister_device(dell_wmi_input_dev);
return err;
}
}
@@ -766,6 +762,6 @@ static void __exit dell_wmi_exit(void)
if (wmi_requires_smbios_request)
dell_wmi_events_set_enabled(false);
wmi_remove_notify_handler(DELL_EVENT_GUID);
- dell_wmi_input_destroy();
+ input_unregister_device(dell_wmi_input_dev);
}
module_exit(dell_wmi_exit);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 8cdf315f9730..2426399e1e04 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -150,6 +150,8 @@ static const struct key_entry eeepc_keymap[] = {
{ KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
{ KE_KEY, 0x38, { KEY_F14 } },
+ { KE_IGNORE, 0x50, { KEY_RESERVED } }, /* AC plugged */
+ { KE_IGNORE, 0x51, { KEY_RESERVED } }, /* AC unplugged */
{ KE_END, 0 },
};
@@ -1205,14 +1207,12 @@ static int eeepc_input_init(struct eeepc_laptop *eeepc)
error = input_register_device(input);
if (error) {
pr_err("Unable to register input device\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
eeepc->inputdev = input;
return 0;
-err_free_keymap:
- sparse_keymap_free(input);
err_free_dev:
input_free_device(input);
return error;
@@ -1220,10 +1220,8 @@ err_free_dev:
static void eeepc_input_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc->inputdev) {
- sparse_keymap_free(eeepc->inputdev);
+ if (eeepc->inputdev)
input_unregister_device(eeepc->inputdev);
- }
eeepc->inputdev = NULL;
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index e12cc3504d48..7f49d92914c9 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -32,18 +32,9 @@
* features made available on a range of Fujitsu laptops including the
* P2xxx/P5xxx/S6xxx/S7xxx series.
*
- * This driver exports a few files in /sys/devices/platform/fujitsu-laptop/;
- * others may be added at a later date.
- *
- * lcd_level - Screen brightness: contains a single integer in the
- * range 0..7. (rw)
- *
- * In addition to these platform device attributes the driver
- * registers itself in the Linux backlight control subsystem and is
- * available to userspace under /sys/class/backlight/fujitsu-laptop/.
- *
- * Hotkeys present on certain Fujitsu laptops (eg: the S6xxx series) are
- * also supported by this driver.
+ * This driver implements a vendor-specific backlight control interface for
+ * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
+ * laptops.
*
* This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
* P8010. It should work on most P-series and S-series Lifebooks, but
@@ -66,12 +57,11 @@
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
#include <linux/kfifo.h>
+#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
-#include <linux/leds.h>
-#endif
#include <acpi/video.h>
#define FUJITSU_DRIVER_VERSION "0.6.0"
@@ -102,7 +92,6 @@
#define FLAG_LID 0x100
#define FLAG_DOCK 0x200
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
#define FUNC_LED_ON 0x30001
@@ -112,7 +101,6 @@
#define RADIO_LED_ON 0x20
#define ECO_LED 0x10000
#define ECO_LED_ON 0x80000
-#endif
/* Hotkey details */
#define KEY1_CODE 0x410 /* codes for the keys in the GIRB register */
@@ -143,21 +131,16 @@
/* Device controlling the backlight and associated keys */
struct fujitsu_bl {
acpi_handle acpi_handle;
- struct acpi_device *dev;
struct input_dev *input;
char phys[32];
struct backlight_device *bl_device;
- struct platform_device *pf_device;
- int keycode1, keycode2, keycode3, keycode4, keycode5;
-
unsigned int max_brightness;
- unsigned int brightness_changed;
unsigned int brightness_level;
};
static struct fujitsu_bl *fujitsu_bl;
static int use_alt_lcd_levels = -1;
-static int disable_brightness_adjust = -1;
+static bool disable_brightness_adjust;
/* Device used to access hotkeys and other features on the laptop */
struct fujitsu_laptop {
@@ -170,247 +153,77 @@ struct fujitsu_laptop {
spinlock_t fifo_lock;
int flags_supported;
int flags_state;
- int logolamp_registered;
- int kblamps_registered;
- int radio_led_registered;
- int eco_led_registered;
};
static struct fujitsu_laptop *fujitsu_laptop;
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
-static enum led_brightness logolamp_get(struct led_classdev *cdev);
-static int logolamp_set(struct led_classdev *cdev,
- enum led_brightness brightness);
-
-static struct led_classdev logolamp_led = {
- .name = "fujitsu::logolamp",
- .brightness_get = logolamp_get,
- .brightness_set_blocking = logolamp_set
-};
-
-static enum led_brightness kblamps_get(struct led_classdev *cdev);
-static int kblamps_set(struct led_classdev *cdev,
- enum led_brightness brightness);
-
-static struct led_classdev kblamps_led = {
- .name = "fujitsu::kblamps",
- .brightness_get = kblamps_get,
- .brightness_set_blocking = kblamps_set
-};
-
-static enum led_brightness radio_led_get(struct led_classdev *cdev);
-static int radio_led_set(struct led_classdev *cdev,
- enum led_brightness brightness);
-
-static struct led_classdev radio_led = {
- .name = "fujitsu::radio_led",
- .default_trigger = "rfkill-any",
- .brightness_get = radio_led_get,
- .brightness_set_blocking = radio_led_set
-};
-
-static enum led_brightness eco_led_get(struct led_classdev *cdev);
-static int eco_led_set(struct led_classdev *cdev,
- enum led_brightness brightness);
-
-static struct led_classdev eco_led = {
- .name = "fujitsu::eco_led",
- .brightness_get = eco_led_get,
- .brightness_set_blocking = eco_led_set
-};
-#endif
-
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
static u32 dbg_level = 0x03;
#endif
/* Fujitsu ACPI interface function */
-static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
+static int call_fext_func(int func, int op, int feature, int state)
{
- acpi_status status = AE_OK;
union acpi_object params[4] = {
- { .type = ACPI_TYPE_INTEGER },
- { .type = ACPI_TYPE_INTEGER },
- { .type = ACPI_TYPE_INTEGER },
- { .type = ACPI_TYPE_INTEGER }
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = func },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = op },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = feature },
+ { .integer.type = ACPI_TYPE_INTEGER, .integer.value = state }
};
- struct acpi_object_list arg_list = { 4, &params[0] };
+ struct acpi_object_list arg_list = { 4, params };
unsigned long long value;
- acpi_handle handle = NULL;
-
- status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
- if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_ERROR,
- "FUNC interface is not present\n");
- return -ENODEV;
- }
+ acpi_status status;
- params[0].integer.value = cmd;
- params[1].integer.value = arg0;
- params[2].integer.value = arg1;
- params[3].integer.value = arg2;
-
- status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
+ status = acpi_evaluate_integer(fujitsu_laptop->acpi_handle, "FUNC",
+ &arg_list, &value);
if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
- cmd, arg0, arg1, arg2);
+ vdbg_printk(FUJLAPTOP_DBG_ERROR, "Failed to evaluate FUNC\n");
return -ENODEV;
}
- vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
- cmd, arg0, arg1, arg2, (int)value);
+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
+ func, op, feature, state, (int)value);
return value;
}
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
-/* LED class callbacks */
-
-static int logolamp_set(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- int poweron = FUNC_LED_ON, always = FUNC_LED_ON;
- int ret;
-
- if (brightness < LED_HALF)
- poweron = FUNC_LED_OFF;
-
- if (brightness < LED_FULL)
- always = FUNC_LED_OFF;
-
- ret = call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
- if (ret < 0)
- return ret;
-
- return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
-}
-
-static int kblamps_set(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- if (brightness >= LED_FULL)
- return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
- else
- return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
-}
-
-static int radio_led_set(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- if (brightness >= LED_FULL)
- return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
- else
- return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
-}
-
-static int eco_led_set(struct led_classdev *cdev,
- enum led_brightness brightness)
-{
- int curr;
-
- curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
- if (brightness >= LED_FULL)
- return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
- else
- return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
-}
-
-static enum led_brightness logolamp_get(struct led_classdev *cdev)
-{
- int ret;
-
- ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
- if (ret == FUNC_LED_ON)
- return LED_FULL;
-
- ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
- if (ret == FUNC_LED_ON)
- return LED_HALF;
-
- return LED_OFF;
-}
-
-static enum led_brightness kblamps_get(struct led_classdev *cdev)
-{
- enum led_brightness brightness = LED_OFF;
-
- if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
- brightness = LED_FULL;
-
- return brightness;
-}
-
-static enum led_brightness radio_led_get(struct led_classdev *cdev)
-{
- enum led_brightness brightness = LED_OFF;
-
- if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
- brightness = LED_FULL;
-
- return brightness;
-}
-
-static enum led_brightness eco_led_get(struct led_classdev *cdev)
-{
- enum led_brightness brightness = LED_OFF;
-
- if (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
- brightness = LED_FULL;
-
- return brightness;
-}
-#endif
-
/* Hardware access for LCD brightness control */
static int set_lcd_level(int level)
{
- acpi_status status = AE_OK;
- acpi_handle handle = NULL;
-
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
- level);
+ acpi_status status;
+ char *method;
- if (level < 0 || level >= fujitsu_bl->max_brightness)
- return -EINVAL;
-
- status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
- if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
- return -ENODEV;
+ switch (use_alt_lcd_levels) {
+ case -1:
+ if (acpi_has_method(fujitsu_bl->acpi_handle, "SBL2"))
+ method = "SBL2";
+ else
+ method = "SBLL";
+ break;
+ case 1:
+ method = "SBL2";
+ break;
+ default:
+ method = "SBLL";
+ break;
}
-
- status = acpi_execute_simple_method(handle, NULL, level);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return 0;
-}
-
-static int set_lcd_level_alt(int level)
-{
- acpi_status status = AE_OK;
- acpi_handle handle = NULL;
-
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
- level);
+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via %s [%d]\n",
+ method, level);
if (level < 0 || level >= fujitsu_bl->max_brightness)
return -EINVAL;
- status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
+ status = acpi_execute_simple_method(fujitsu_bl->acpi_handle, method,
+ level);
if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
+ vdbg_printk(FUJLAPTOP_DBG_ERROR, "Failed to evaluate %s\n",
+ method);
return -ENODEV;
}
- status = acpi_execute_simple_method(handle, NULL, level);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ fujitsu_bl->brightness_level = level;
return 0;
}
@@ -429,11 +242,6 @@ static int get_lcd_level(void)
fujitsu_bl->brightness_level = state & 0x0fffffff;
- if (state & 0x80000000)
- fujitsu_bl->brightness_changed = 1;
- else
- fujitsu_bl->brightness_changed = 0;
-
return fujitsu_bl->brightness_level;
}
@@ -458,30 +266,17 @@ static int get_max_brightness(void)
static int bl_get_brightness(struct backlight_device *b)
{
- return get_lcd_level();
+ return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level();
}
static int bl_update_status(struct backlight_device *b)
{
- int ret;
if (b->props.power == FB_BLANK_POWERDOWN)
- ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+ call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
else
- ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
- if (ret != 0)
- vdbg_printk(FUJLAPTOP_DBG_ERROR,
- "Unable to adjust backlight power, error code %i\n",
- ret);
-
- if (use_alt_lcd_levels)
- ret = set_lcd_level_alt(b->props.brightness);
- else
- ret = set_lcd_level(b->props.brightness);
- if (ret != 0)
- vdbg_printk(FUJLAPTOP_DBG_ERROR,
- "Unable to adjust LCD brightness, error code %i\n",
- ret);
- return ret;
+ call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+
+ return set_lcd_level(b->props.brightness);
}
static const struct backlight_ops fujitsu_bl_ops = {
@@ -489,84 +284,8 @@ static const struct backlight_ops fujitsu_bl_ops = {
.update_status = bl_update_status,
};
-/* Platform LCD brightness device */
-
-static ssize_t
-show_max_brightness(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
-
- int ret;
-
- ret = get_max_brightness();
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t
-show_brightness_changed(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
-
- int ret;
-
- ret = fujitsu_bl->brightness_changed;
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t show_lcd_level(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
-
- int ret;
-
- ret = get_lcd_level();
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%i\n", ret);
-}
-
-static ssize_t store_lcd_level(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
-
- int level, ret;
-
- if (sscanf(buf, "%i", &level) != 1
- || (level < 0 || level >= fujitsu_bl->max_brightness))
- return -EINVAL;
-
- if (use_alt_lcd_levels)
- ret = set_lcd_level_alt(level);
- else
- ret = set_lcd_level(level);
- if (ret < 0)
- return ret;
-
- ret = get_lcd_level();
- if (ret < 0)
- return ret;
-
- return count;
-}
-
-static ssize_t
-ignore_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return count;
-}
-
-static ssize_t
-show_lid_state(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
if (!(fujitsu_laptop->flags_supported & FLAG_LID))
return sprintf(buf, "unknown\n");
@@ -576,9 +295,8 @@ show_lid_state(struct device *dev,
return sprintf(buf, "closed\n");
}
-static ssize_t
-show_dock_state(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
return sprintf(buf, "unknown\n");
@@ -588,9 +306,8 @@ show_dock_state(struct device *dev,
return sprintf(buf, "undocked\n");
}
-static ssize_t
-show_radios_state(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
return sprintf(buf, "unknown\n");
@@ -600,18 +317,11 @@ show_radios_state(struct device *dev,
return sprintf(buf, "killed\n");
}
-static DEVICE_ATTR(max_brightness, 0444, show_max_brightness, ignore_store);
-static DEVICE_ATTR(brightness_changed, 0444, show_brightness_changed,
- ignore_store);
-static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
-static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
-static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
-static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
+static DEVICE_ATTR_RO(lid);
+static DEVICE_ATTR_RO(dock);
+static DEVICE_ATTR_RO(radios);
static struct attribute *fujitsu_pf_attributes[] = {
- &dev_attr_brightness_changed.attr,
- &dev_attr_max_brightness.attr,
- &dev_attr_lcd_level.attr,
&dev_attr_lid.attr,
&dev_attr_dock.attr,
&dev_attr_radios.attr,
@@ -628,69 +338,66 @@ static struct platform_driver fujitsu_pf_driver = {
}
};
-static void __init dmi_check_cb_common(const struct dmi_system_id *id)
-{
- pr_info("Identified laptop model '%s'\n", id->ident);
-}
+/* ACPI device for LCD brightness control */
-static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
-{
- dmi_check_cb_common(id);
- fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
- return 1;
-}
+static const struct key_entry keymap_backlight[] = {
+ { KE_KEY, true, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, false, { KEY_BRIGHTNESSDOWN } },
+ { KE_END, 0 }
+};
-static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
+static int acpi_fujitsu_bl_input_setup(struct acpi_device *device)
{
- dmi_check_cb_common(id);
- fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
- return 1;
+ struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
+ int ret;
+
+ fujitsu_bl->input = devm_input_allocate_device(&device->dev);
+ if (!fujitsu_bl->input)
+ return -ENOMEM;
+
+ snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
+ "%s/video/input0", acpi_device_hid(device));
+
+ fujitsu_bl->input->name = acpi_device_name(device);
+ fujitsu_bl->input->phys = fujitsu_bl->phys;
+ fujitsu_bl->input->id.bustype = BUS_HOST;
+ fujitsu_bl->input->id.product = 0x06;
+
+ ret = sparse_keymap_setup(fujitsu_bl->input, keymap_backlight, NULL);
+ if (ret)
+ return ret;
+
+ return input_register_device(fujitsu_bl->input);
}
-static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
+static int fujitsu_backlight_register(struct acpi_device *device)
{
- dmi_check_cb_common(id);
- fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
- fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
- fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
- return 1;
-}
+ const struct backlight_properties props = {
+ .brightness = fujitsu_bl->brightness_level,
+ .max_brightness = fujitsu_bl->max_brightness - 1,
+ .type = BACKLIGHT_PLATFORM
+ };
+ struct backlight_device *bd;
-static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
- {
- .ident = "Fujitsu Siemens S6410",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
- },
- .callback = dmi_check_cb_s6410},
- {
- .ident = "Fujitsu Siemens S6420",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
- },
- .callback = dmi_check_cb_s6420},
- {
- .ident = "Fujitsu LifeBook P8010",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
- },
- .callback = dmi_check_cb_p8010},
- {}
-};
+ bd = devm_backlight_device_register(&device->dev, "fujitsu-laptop",
+ &device->dev, NULL,
+ &fujitsu_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
-/* ACPI device for LCD brightness control */
+ fujitsu_bl->bl_device = bd;
+
+ return 0;
+}
static int acpi_fujitsu_bl_add(struct acpi_device *device)
{
int state = 0;
- struct input_dev *input;
int error;
+ if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
+ return -ENODEV;
+
if (!device)
return -EINVAL;
@@ -699,41 +406,20 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
device->driver_data = fujitsu_bl;
- fujitsu_bl->input = input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_stop;
- }
-
- snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
- "%s/video/input0", acpi_device_hid(device));
-
- input->name = acpi_device_name(device);
- input->phys = fujitsu_bl->phys;
- input->id.bustype = BUS_HOST;
- input->id.product = 0x06;
- input->dev.parent = &device->dev;
- input->evbit[0] = BIT(EV_KEY);
- set_bit(KEY_BRIGHTNESSUP, input->keybit);
- set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
- set_bit(KEY_UNKNOWN, input->keybit);
-
- error = input_register_device(input);
+ error = acpi_fujitsu_bl_input_setup(device);
if (error)
- goto err_free_input_dev;
+ return error;
error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
- goto err_unregister_input_dev;
+ return error;
}
pr_info("ACPI: %s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- fujitsu_bl->dev = device;
-
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
if (ACPI_FAILURE
@@ -742,45 +428,13 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
pr_err("_INI Method failed\n");
}
- if (use_alt_lcd_levels == -1) {
- if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
- use_alt_lcd_levels = 1;
- else
- use_alt_lcd_levels = 0;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
- use_alt_lcd_levels);
- }
-
- /* do config (detect defaults) */
- use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
- disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
- vdbg_printk(FUJLAPTOP_DBG_INFO,
- "config: [alt interface: %d], [adjust disable: %d]\n",
- use_alt_lcd_levels, disable_brightness_adjust);
-
if (get_max_brightness() <= 0)
fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level();
- return 0;
-
-err_unregister_input_dev:
- input_unregister_device(input);
- input = NULL;
-err_free_input_dev:
- input_free_device(input);
-err_stop:
- return error;
-}
-
-static int acpi_fujitsu_bl_remove(struct acpi_device *device)
-{
- struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
- struct input_dev *input = fujitsu_bl->input;
-
- input_unregister_device(input);
-
- fujitsu_bl->acpi_handle = NULL;
+ error = fujitsu_backlight_register(device);
+ if (error)
+ return error;
return 0;
}
@@ -790,62 +444,332 @@ static int acpi_fujitsu_bl_remove(struct acpi_device *device)
static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
- int keycode;
int oldb, newb;
input = fujitsu_bl->input;
- switch (event) {
- case ACPI_FUJITSU_NOTIFY_CODE1:
- keycode = 0;
- oldb = fujitsu_bl->brightness_level;
- get_lcd_level();
- newb = fujitsu_bl->brightness_level;
-
- vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "brightness button event [%i -> %i (%i)]\n",
- oldb, newb, fujitsu_bl->brightness_changed);
-
- if (oldb < newb) {
- if (disable_brightness_adjust != 1) {
- if (use_alt_lcd_levels)
- set_lcd_level_alt(newb);
- else
- set_lcd_level(newb);
- }
- keycode = KEY_BRIGHTNESSUP;
- } else if (oldb > newb) {
- if (disable_brightness_adjust != 1) {
- if (use_alt_lcd_levels)
- set_lcd_level_alt(newb);
- else
- set_lcd_level(newb);
- }
- keycode = KEY_BRIGHTNESSDOWN;
- }
- break;
- default:
- keycode = KEY_UNKNOWN;
+ if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"unsupported event [0x%x]\n", event);
- break;
+ sparse_keymap_report_event(input, -1, 1, true);
+ return;
}
- if (keycode != 0) {
- input_report_key(input, keycode, 1);
- input_sync(input);
- input_report_key(input, keycode, 0);
- input_sync(input);
- }
+ oldb = fujitsu_bl->brightness_level;
+ get_lcd_level();
+ newb = fujitsu_bl->brightness_level;
+
+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "brightness button event [%i -> %i]\n",
+ oldb, newb);
+
+ if (oldb == newb)
+ return;
+
+ if (!disable_brightness_adjust)
+ set_lcd_level(newb);
+
+ sparse_keymap_report_event(input, oldb < newb, 1, true);
}
/* ACPI device for hotkey handling */
+static const struct key_entry keymap_default[] = {
+ { KE_KEY, KEY1_CODE, { KEY_PROG1 } },
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } },
+ { KE_KEY, KEY3_CODE, { KEY_PROG3 } },
+ { KE_KEY, KEY4_CODE, { KEY_PROG4 } },
+ { KE_KEY, KEY5_CODE, { KEY_RFKILL } },
+ { KE_KEY, BIT(26), { KEY_TOUCHPAD_TOGGLE } },
+ { KE_END, 0 }
+};
+
+static const struct key_entry keymap_s64x0[] = {
+ { KE_KEY, KEY1_CODE, { KEY_SCREENLOCK } }, /* "Lock" */
+ { KE_KEY, KEY2_CODE, { KEY_HELP } }, /* "Mobility Center */
+ { KE_KEY, KEY3_CODE, { KEY_PROG3 } },
+ { KE_KEY, KEY4_CODE, { KEY_PROG4 } },
+ { KE_END, 0 }
+};
+
+static const struct key_entry keymap_p8010[] = {
+ { KE_KEY, KEY1_CODE, { KEY_HELP } }, /* "Support" */
+ { KE_KEY, KEY2_CODE, { KEY_PROG2 } },
+ { KE_KEY, KEY3_CODE, { KEY_SWITCHVIDEOMODE } }, /* "Presentation" */
+ { KE_KEY, KEY4_CODE, { KEY_WWW } }, /* "WWW" */
+ { KE_END, 0 }
+};
+
+static const struct key_entry *keymap = keymap_default;
+
+static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
+{
+ pr_info("Identified laptop model '%s'\n", id->ident);
+ keymap = id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu Siemens S6410",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
+ },
+ .driver_data = (void *)keymap_s64x0
+ },
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu Siemens S6420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
+ },
+ .driver_data = (void *)keymap_s64x0
+ },
+ {
+ .callback = fujitsu_laptop_dmi_keymap_override,
+ .ident = "Fujitsu LifeBook P8010",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
+ },
+ .driver_data = (void *)keymap_p8010
+ },
+ {}
+};
+
+static int acpi_fujitsu_laptop_input_setup(struct acpi_device *device)
+{
+ struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+ int ret;
+
+ fujitsu_laptop->input = devm_input_allocate_device(&device->dev);
+ if (!fujitsu_laptop->input)
+ return -ENOMEM;
+
+ snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
+ "%s/video/input0", acpi_device_hid(device));
+
+ fujitsu_laptop->input->name = acpi_device_name(device);
+ fujitsu_laptop->input->phys = fujitsu_laptop->phys;
+ fujitsu_laptop->input->id.bustype = BUS_HOST;
+ fujitsu_laptop->input->id.product = 0x06;
+
+ dmi_check_system(fujitsu_laptop_dmi_table);
+ ret = sparse_keymap_setup(fujitsu_laptop->input, keymap, NULL);
+ if (ret)
+ return ret;
+
+ return input_register_device(fujitsu_laptop->input);
+}
+
+static int fujitsu_laptop_platform_add(void)
+{
+ int ret;
+
+ fujitsu_laptop->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ if (!fujitsu_laptop->pf_device)
+ return -ENOMEM;
+
+ ret = platform_device_add(fujitsu_laptop->pf_device);
+ if (ret)
+ goto err_put_platform_device;
+
+ ret = sysfs_create_group(&fujitsu_laptop->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
+ if (ret)
+ goto err_del_platform_device;
+
+ return 0;
+
+err_del_platform_device:
+ platform_device_del(fujitsu_laptop->pf_device);
+err_put_platform_device:
+ platform_device_put(fujitsu_laptop->pf_device);
+
+ return ret;
+}
+
+static void fujitsu_laptop_platform_remove(void)
+{
+ sysfs_remove_group(&fujitsu_laptop->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
+ platform_device_unregister(fujitsu_laptop->pf_device);
+}
+
+static int logolamp_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ int poweron = FUNC_LED_ON, always = FUNC_LED_ON;
+ int ret;
+
+ if (brightness < LED_HALF)
+ poweron = FUNC_LED_OFF;
+
+ if (brightness < LED_FULL)
+ always = FUNC_LED_OFF;
+
+ ret = call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
+ if (ret < 0)
+ return ret;
+
+ return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
+}
+
+static enum led_brightness logolamp_get(struct led_classdev *cdev)
+{
+ int ret;
+
+ ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
+ if (ret == FUNC_LED_ON)
+ return LED_FULL;
+
+ ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
+ if (ret == FUNC_LED_ON)
+ return LED_HALF;
+
+ return LED_OFF;
+}
+
+static struct led_classdev logolamp_led = {
+ .name = "fujitsu::logolamp",
+ .brightness_set_blocking = logolamp_set,
+ .brightness_get = logolamp_get
+};
+
+static int kblamps_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ if (brightness >= LED_FULL)
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ FUNC_LED_ON);
+ else
+ return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ FUNC_LED_OFF);
+}
+
+static enum led_brightness kblamps_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static struct led_classdev kblamps_led = {
+ .name = "fujitsu::kblamps",
+ .brightness_set_blocking = kblamps_set,
+ .brightness_get = kblamps_get
+};
+
+static int radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ if (brightness >= LED_FULL)
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON,
+ RADIO_LED_ON);
+ else
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
+}
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static struct led_classdev radio_led = {
+ .name = "fujitsu::radio_led",
+ .brightness_set_blocking = radio_led_set,
+ .brightness_get = radio_led_get,
+ .default_trigger = "rfkill-any"
+};
+
+static int eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ int curr;
+
+ curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
+ if (brightness >= LED_FULL)
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED,
+ curr | ECO_LED_ON);
+ else
+ return call_fext_func(FUNC_LEDS, 0x1, ECO_LED,
+ curr & ~ECO_LED_ON);
+}
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
+
+static struct led_classdev eco_led = {
+ .name = "fujitsu::eco_led",
+ .brightness_set_blocking = eco_led_set,
+ .brightness_get = eco_led_get
+};
+
+static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
+{
+ int result;
+
+ if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
+ result = devm_led_classdev_register(&device->dev,
+ &logolamp_led);
+ if (result)
+ return result;
+ }
+
+ if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
+ (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
+ result = devm_led_classdev_register(&device->dev, &kblamps_led);
+ if (result)
+ return result;
+ }
+
+ /*
+ * BTNI bit 24 seems to indicate the presence of a radio toggle
+ * button in place of a slide switch, and all such machines appear
+ * to also have an RF LED. Therefore use bit 24 as an indicator
+ * that an RF LED is present.
+ */
+ if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ result = devm_led_classdev_register(&device->dev, &radio_led);
+ if (result)
+ return result;
+ }
+
+ /* Support for eco led is not always signaled in bit corresponding
+ * to the bit used to control the led. According to the DSDT table,
+ * bit 14 seems to indicate presence of said led as well.
+ * Confirm by testing the status.
+ */
+ if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
+ (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
+ result = devm_led_classdev_register(&device->dev, &eco_led);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
static int acpi_fujitsu_laptop_add(struct acpi_device *device)
{
- int result = 0;
int state = 0;
- struct input_dev *input;
int error;
int i;
@@ -867,38 +791,14 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
goto err_stop;
}
- fujitsu_laptop->input = input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_free_fifo;
- }
-
- snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
- "%s/video/input0", acpi_device_hid(device));
-
- input->name = acpi_device_name(device);
- input->phys = fujitsu_laptop->phys;
- input->id.bustype = BUS_HOST;
- input->id.product = 0x06;
- input->dev.parent = &device->dev;
-
- set_bit(EV_KEY, input->evbit);
- set_bit(fujitsu_bl->keycode1, input->keybit);
- set_bit(fujitsu_bl->keycode2, input->keybit);
- set_bit(fujitsu_bl->keycode3, input->keybit);
- set_bit(fujitsu_bl->keycode4, input->keybit);
- set_bit(fujitsu_bl->keycode5, input->keybit);
- set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
- set_bit(KEY_UNKNOWN, input->keybit);
-
- error = input_register_device(input);
+ error = acpi_fujitsu_laptop_input_setup(device);
if (error)
- goto err_free_input_dev;
+ goto err_free_fifo;
error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
- goto err_unregister_input_dev;
+ goto err_free_fifo;
}
pr_info("ACPI: %s [%s] (%s)\n",
@@ -936,72 +836,25 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
- if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
- result = led_classdev_register(&fujitsu_bl->pf_device->dev,
- &logolamp_led);
- if (result == 0) {
- fujitsu_laptop->logolamp_registered = 1;
- } else {
- pr_err("Could not register LED handler for logo lamp, error %i\n",
- result);
- }
- }
-
- if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
- (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
- result = led_classdev_register(&fujitsu_bl->pf_device->dev,
- &kblamps_led);
- if (result == 0) {
- fujitsu_laptop->kblamps_registered = 1;
- } else {
- pr_err("Could not register LED handler for keyboard lamps, error %i\n",
- result);
- }
+ /* Sync backlight power status */
+ if (fujitsu_bl->bl_device &&
+ acpi_video_get_backlight_type() == acpi_backlight_vendor) {
+ if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
+ fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
+ else
+ fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
}
- /*
- * BTNI bit 24 seems to indicate the presence of a radio toggle
- * button in place of a slide switch, and all such machines appear
- * to also have an RF LED. Therefore use bit 24 as an indicator
- * that an RF LED is present.
- */
- if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
- result = led_classdev_register(&fujitsu_bl->pf_device->dev,
- &radio_led);
- if (result == 0) {
- fujitsu_laptop->radio_led_registered = 1;
- } else {
- pr_err("Could not register LED handler for radio LED, error %i\n",
- result);
- }
- }
+ error = acpi_fujitsu_laptop_leds_register(device);
+ if (error)
+ goto err_free_fifo;
- /* Support for eco led is not always signaled in bit corresponding
- * to the bit used to control the led. According to the DSDT table,
- * bit 14 seems to indicate presence of said led as well.
- * Confirm by testing the status.
- */
- if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
- (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
- result = led_classdev_register(&fujitsu_bl->pf_device->dev,
- &eco_led);
- if (result == 0) {
- fujitsu_laptop->eco_led_registered = 1;
- } else {
- pr_err("Could not register LED handler for eco LED, error %i\n",
- result);
- }
- }
-#endif
+ error = fujitsu_laptop_platform_add();
+ if (error)
+ goto err_free_fifo;
- return result;
+ return 0;
-err_unregister_input_dev:
- input_unregister_device(input);
- input = NULL;
-err_free_input_dev:
- input_free_device(input);
err_free_fifo:
kfifo_free(&fujitsu_laptop->fifo);
err_stop:
@@ -1011,86 +864,62 @@ err_stop:
static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
- struct input_dev *input = fujitsu_laptop->input;
-
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
- if (fujitsu_laptop->logolamp_registered)
- led_classdev_unregister(&logolamp_led);
-
- if (fujitsu_laptop->kblamps_registered)
- led_classdev_unregister(&kblamps_led);
- if (fujitsu_laptop->radio_led_registered)
- led_classdev_unregister(&radio_led);
-
- if (fujitsu_laptop->eco_led_registered)
- led_classdev_unregister(&eco_led);
-#endif
-
- input_unregister_device(input);
+ fujitsu_laptop_platform_remove();
kfifo_free(&fujitsu_laptop->fifo);
- fujitsu_laptop->acpi_handle = NULL;
-
return 0;
}
-static void acpi_fujitsu_laptop_press(int keycode)
+static void acpi_fujitsu_laptop_press(int scancode)
{
struct input_dev *input = fujitsu_laptop->input;
int status;
status = kfifo_in_locked(&fujitsu_laptop->fifo,
- (unsigned char *)&keycode, sizeof(keycode),
+ (unsigned char *)&scancode, sizeof(scancode),
&fujitsu_laptop->fifo_lock);
- if (status != sizeof(keycode)) {
+ if (status != sizeof(scancode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Could not push keycode [0x%x]\n", keycode);
+ "Could not push scancode [0x%x]\n", scancode);
return;
}
- input_report_key(input, keycode, 1);
- input_sync(input);
+ sparse_keymap_report_event(input, scancode, 1, false);
vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "Push keycode into ringbuffer [%d]\n", keycode);
+ "Push scancode into ringbuffer [0x%x]\n", scancode);
}
static void acpi_fujitsu_laptop_release(void)
{
struct input_dev *input = fujitsu_laptop->input;
- int keycode, status;
+ int scancode, status;
while (true) {
status = kfifo_out_locked(&fujitsu_laptop->fifo,
- (unsigned char *)&keycode,
- sizeof(keycode),
+ (unsigned char *)&scancode,
+ sizeof(scancode),
&fujitsu_laptop->fifo_lock);
- if (status != sizeof(keycode))
+ if (status != sizeof(scancode))
return;
- input_report_key(input, keycode, 0);
- input_sync(input);
+ sparse_keymap_report_event(input, scancode, 0, false);
vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "Pop keycode from ringbuffer [%d]\n", keycode);
+ "Pop scancode from ringbuffer [0x%x]\n", scancode);
}
}
static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
- int keycode;
- unsigned int irb = 1;
- int i;
+ int scancode, i = 0;
+ unsigned int irb;
input = fujitsu_laptop->input;
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
- keycode = KEY_UNKNOWN;
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Unsupported event [0x%x]\n", event);
- input_report_key(input, keycode, 1);
- input_sync(input);
- input_report_key(input, keycode, 0);
- input_sync(input);
+ sparse_keymap_report_event(input, -1, 1, true);
return;
}
@@ -1098,40 +927,16 @@ static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
fujitsu_laptop->flags_state =
call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
- i = 0;
- while ((irb =
- call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0
- && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
- switch (irb & 0x4ff) {
- case KEY1_CODE:
- keycode = fujitsu_bl->keycode1;
- break;
- case KEY2_CODE:
- keycode = fujitsu_bl->keycode2;
- break;
- case KEY3_CODE:
- keycode = fujitsu_bl->keycode3;
- break;
- case KEY4_CODE:
- keycode = fujitsu_bl->keycode4;
- break;
- case KEY5_CODE:
- keycode = fujitsu_bl->keycode5;
- break;
- case 0:
- keycode = 0;
- break;
- default:
+ while ((irb = call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 &&
+ i++ < MAX_HOTKEY_RINGBUFFER_SIZE) {
+ scancode = irb & 0x4ff;
+ if (sparse_keymap_entry_from_scancode(input, scancode))
+ acpi_fujitsu_laptop_press(scancode);
+ else if (scancode == 0)
+ acpi_fujitsu_laptop_release();
+ else
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Unknown GIRB result [%x]\n", irb);
- keycode = -1;
- break;
- }
-
- if (keycode > 0)
- acpi_fujitsu_laptop_press(keycode);
- else if (keycode == 0)
- acpi_fujitsu_laptop_release();
}
/* On some models (first seen on the Skylake-based Lifebook
@@ -1139,14 +944,8 @@ static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
* handled in software; its state is queried using FUNC_FLAGS
*/
if ((fujitsu_laptop->flags_supported & BIT(26)) &&
- (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
- keycode = KEY_TOUCHPAD_TOGGLE;
- input_report_key(input, keycode, 1);
- input_sync(input);
- input_report_key(input, keycode, 0);
- input_sync(input);
- }
-
+ (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26)))
+ sparse_keymap_report_event(input, BIT(26), 1, true);
}
/* Initialization */
@@ -1162,7 +961,6 @@ static struct acpi_driver acpi_fujitsu_bl_driver = {
.ids = fujitsu_bl_device_ids,
.ops = {
.add = acpi_fujitsu_bl_add,
- .remove = acpi_fujitsu_bl_remove,
.notify = acpi_fujitsu_bl_notify,
},
};
@@ -1192,7 +990,7 @@ MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
static int __init fujitsu_init(void)
{
- int ret, max_brightness;
+ int ret;
if (acpi_disabled)
return -ENODEV;
@@ -1200,100 +998,40 @@ static int __init fujitsu_init(void)
fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
if (!fujitsu_bl)
return -ENOMEM;
- fujitsu_bl->keycode1 = KEY_PROG1;
- fujitsu_bl->keycode2 = KEY_PROG2;
- fujitsu_bl->keycode3 = KEY_PROG3;
- fujitsu_bl->keycode4 = KEY_PROG4;
- fujitsu_bl->keycode5 = KEY_RFKILL;
- dmi_check_system(fujitsu_dmi_table);
ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
if (ret)
- goto fail_acpi;
+ goto err_free_fujitsu_bl;
/* Register platform stuff */
- fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
- if (!fujitsu_bl->pf_device) {
- ret = -ENOMEM;
- goto fail_platform_driver;
- }
-
- ret = platform_device_add(fujitsu_bl->pf_device);
- if (ret)
- goto fail_platform_device1;
-
- ret =
- sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
- &fujitsu_pf_attribute_group);
- if (ret)
- goto fail_platform_device2;
-
- /* Register backlight stuff */
-
- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- max_brightness = fujitsu_bl->max_brightness;
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = max_brightness - 1;
- fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
- NULL, NULL,
- &fujitsu_bl_ops,
- &props);
- if (IS_ERR(fujitsu_bl->bl_device)) {
- ret = PTR_ERR(fujitsu_bl->bl_device);
- fujitsu_bl->bl_device = NULL;
- goto fail_sysfs_group;
- }
- fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
- }
-
ret = platform_driver_register(&fujitsu_pf_driver);
if (ret)
- goto fail_backlight;
+ goto err_unregister_acpi;
/* Register laptop driver */
fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
if (!fujitsu_laptop) {
ret = -ENOMEM;
- goto fail_laptop;
+ goto err_unregister_platform_driver;
}
ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
if (ret)
- goto fail_laptop1;
-
- /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
- fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
- else
- fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
- }
+ goto err_free_fujitsu_laptop;
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
-fail_laptop1:
+err_free_fujitsu_laptop:
kfree(fujitsu_laptop);
-fail_laptop:
+err_unregister_platform_driver:
platform_driver_unregister(&fujitsu_pf_driver);
-fail_backlight:
- backlight_device_unregister(fujitsu_bl->bl_device);
-fail_sysfs_group:
- sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
- &fujitsu_pf_attribute_group);
-fail_platform_device2:
- platform_device_del(fujitsu_bl->pf_device);
-fail_platform_device1:
- platform_device_put(fujitsu_bl->pf_device);
-fail_platform_driver:
+err_unregister_acpi:
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
-fail_acpi:
+err_free_fujitsu_bl:
kfree(fujitsu_bl);
return ret;
@@ -1307,13 +1045,6 @@ static void __exit fujitsu_cleanup(void)
platform_driver_unregister(&fujitsu_pf_driver);
- backlight_device_unregister(fujitsu_bl->bl_device);
-
- sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
- &fujitsu_pf_attribute_group);
-
- platform_device_unregister(fujitsu_bl->pf_device);
-
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
kfree(fujitsu_bl);
@@ -1324,11 +1055,10 @@ static void __exit fujitsu_cleanup(void)
module_init(fujitsu_init);
module_exit(fujitsu_cleanup);
-module_param(use_alt_lcd_levels, uint, 0644);
-MODULE_PARM_DESC(use_alt_lcd_levels,
- "Use alternative interface for lcd_levels (needed for Lifebook s6410).");
-module_param(disable_brightness_adjust, uint, 0644);
-MODULE_PARM_DESC(disable_brightness_adjust, "Disable brightness adjustment .");
+module_param(use_alt_lcd_levels, int, 0644);
+MODULE_PARM_DESC(use_alt_lcd_levels, "Interface used for setting LCD brightness level (-1 = auto, 0 = force SBLL, 1 = force SBL2)");
+module_param(disable_brightness_adjust, bool, 0644);
+MODULE_PARM_DESC(disable_brightness_adjust, "Disable LCD brightness adjustment");
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
module_param_named(debug, dbg_level, uint, 0644);
MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 988eedbd7c63..d6ea5e998fb8 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -1,7 +1,7 @@
/*
- * hp-wireless button for Windows 8
+ * Airplane mode button for HP & Xiaomi laptops
*
- * Copyright (C) 2014 Alex Hung <alex.hung@canonical.com>
+ * Copyright (C) 2014-2017 Alex Hung <alex.hung@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,11 +29,13 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
+MODULE_ALIAS("acpi*:WSTADEF:*");
static struct input_dev *hpwl_input_dev;
static const struct acpi_device_id hpwl_ids[] = {
{"HPQ6001", 0},
+ {"WSTADEF", 0},
{"", 0},
};
@@ -108,23 +110,4 @@ static struct acpi_driver hpwl_driver = {
},
};
-static int __init hpwl_init(void)
-{
- int err;
-
- pr_info("Initializing HPQ6001 module\n");
- err = acpi_bus_register_driver(&hpwl_driver);
- if (err)
- pr_err("Unable to register HP wireless control driver.\n");
-
- return err;
-}
-
-static void __exit hpwl_exit(void)
-{
- pr_info("Exiting HPQ6001 module\n");
- acpi_bus_unregister_driver(&hpwl_driver);
-}
-
-module_init(hpwl_init);
-module_exit(hpwl_exit);
+module_acpi_driver(hpwl_driver);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 96ffda493266..0df4209648d1 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -48,41 +48,29 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
-#define HPWMI_DISPLAY_QUERY 0x1
-#define HPWMI_HDDTEMP_QUERY 0x2
-#define HPWMI_ALS_QUERY 0x3
-#define HPWMI_HARDWARE_QUERY 0x4
-#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
-#define HPWMI_FEATURE_QUERY 0xb
-#define HPWMI_HOTKEY_QUERY 0xc
-#define HPWMI_FEATURE2_QUERY 0xd
-#define HPWMI_WIRELESS2_QUERY 0x1b
-#define HPWMI_POSTCODEERROR_QUERY 0x2a
-
enum hp_wmi_radio {
- HPWMI_WIFI = 0,
- HPWMI_BLUETOOTH = 1,
- HPWMI_WWAN = 2,
- HPWMI_GPS = 3,
+ HPWMI_WIFI = 0x0,
+ HPWMI_BLUETOOTH = 0x1,
+ HPWMI_WWAN = 0x2,
+ HPWMI_GPS = 0x3,
};
enum hp_wmi_event_ids {
- HPWMI_DOCK_EVENT = 1,
- HPWMI_PARK_HDD = 2,
- HPWMI_SMART_ADAPTER = 3,
- HPWMI_BEZEL_BUTTON = 4,
- HPWMI_WIRELESS = 5,
- HPWMI_CPU_BATTERY_THROTTLE = 6,
- HPWMI_LOCK_SWITCH = 7,
- HPWMI_LID_SWITCH = 8,
- HPWMI_SCREEN_ROTATION = 9,
- HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
- HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
- HPWMI_PROXIMITY_SENSOR = 0x0C,
- HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
- HPWMI_PEAKSHIFT_PERIOD = 0x0F,
- HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
+ HPWMI_DOCK_EVENT = 0x01,
+ HPWMI_PARK_HDD = 0x02,
+ HPWMI_SMART_ADAPTER = 0x03,
+ HPWMI_BEZEL_BUTTON = 0x04,
+ HPWMI_WIRELESS = 0x05,
+ HPWMI_CPU_BATTERY_THROTTLE = 0x06,
+ HPWMI_LOCK_SWITCH = 0x07,
+ HPWMI_LID_SWITCH = 0x08,
+ HPWMI_SCREEN_ROTATION = 0x09,
+ HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+ HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+ HPWMI_PROXIMITY_SENSOR = 0x0C,
+ HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+ HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+ HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
};
struct bios_args {
@@ -93,6 +81,39 @@ struct bios_args {
u32 data;
};
+enum hp_wmi_commandtype {
+ HPWMI_DISPLAY_QUERY = 0x01,
+ HPWMI_HDDTEMP_QUERY = 0x02,
+ HPWMI_ALS_QUERY = 0x03,
+ HPWMI_HARDWARE_QUERY = 0x04,
+ HPWMI_WIRELESS_QUERY = 0x05,
+ HPWMI_BATTERY_QUERY = 0x07,
+ HPWMI_BIOS_QUERY = 0x09,
+ HPWMI_FEATURE_QUERY = 0x0b,
+ HPWMI_HOTKEY_QUERY = 0x0c,
+ HPWMI_FEATURE2_QUERY = 0x0d,
+ HPWMI_WIRELESS2_QUERY = 0x1b,
+ HPWMI_POSTCODEERROR_QUERY = 0x2a,
+};
+
+enum hp_wmi_command {
+ HPWMI_READ = 0x01,
+ HPWMI_WRITE = 0x02,
+ HPWMI_ODM = 0x03,
+};
+
+enum hp_wmi_hardware_mask {
+ HPWMI_DOCK_MASK = 0x01,
+ HPWMI_TABLET_MASK = 0x04,
+};
+
+#define BIOS_ARGS_INIT(write, ctype, size) \
+ (struct bios_args) { .signature = 0x55434553, \
+ .command = (write) ? 0x2 : 0x1, \
+ .commandtype = (ctype), \
+ .datasize = (size), \
+ .data = 0 }
+
struct bios_return {
u32 sigpass;
u32 return_code;
@@ -170,8 +191,8 @@ static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
/*
* hp_wmi_perform_query
*
- * query: The commandtype -> What should be queried
- * write: The command -> 0 read, 1 write, 3 ODM specific
+ * query: The commandtype (enum hp_wmi_commandtype)
+ * write: The command (enum hp_wmi_command)
* buffer: Buffer used as input and/or output
* insize: Size of input buffer
* outsize: Size of output buffer
@@ -182,27 +203,27 @@ static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
* -EINVAL if the output buffer size exceeds buffersize
*
* Note: The buffersize must at least be the maximum of the input and output
- * size. E.g. Battery info query (0x7) is defined to have 1 byte input
+ * size. E.g. Battery info query is defined to have 1 byte input
* and 128 byte output. The caller would do:
* buffer = kzalloc(128, GFP_KERNEL);
- * ret = hp_wmi_perform_query(0x7, 0, buffer, 1, 128)
+ * ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128)
*/
-static int hp_wmi_perform_query(int query, int write, void *buffer,
- int insize, int outsize)
+static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
+ void *buffer, int insize, int outsize)
{
struct bios_return *bios_return;
int actual_outsize;
union acpi_object *obj;
struct bios_args args = {
.signature = 0x55434553,
- .command = write ? 0x2 : 0x1,
+ .command = command,
.commandtype = query,
.datasize = insize,
.data = 0,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- u32 rc;
+ int ret = 0;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
@@ -214,91 +235,61 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
if (!obj)
return -EINVAL;
- else if (obj->type != ACPI_TYPE_BUFFER) {
- kfree(obj);
- return -EINVAL;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ ret = -EINVAL;
+ goto out_free;
}
bios_return = (struct bios_return *)obj->buffer.pointer;
- rc = bios_return->return_code;
+ ret = bios_return->return_code;
- if (rc) {
- if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
- pr_warn("query 0x%x returned error 0x%x\n", query, rc);
- kfree(obj);
- return rc;
+ if (ret) {
+ if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, ret);
+ goto out_free;
}
- if (!outsize) {
- /* ignore output data */
- kfree(obj);
- return 0;
- }
+ /* Ignore output data of zero size */
+ if (!outsize)
+ goto out_free;
actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
memset(buffer + actual_outsize, 0, outsize - actual_outsize);
+
+out_free:
kfree(obj);
- return 0;
+ return ret;
}
-static int hp_wmi_display_state(void)
+static int hp_wmi_read_int(int query)
{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
- sizeof(state), sizeof(state));
- if (ret)
- return -EINVAL;
- return state;
-}
+ int val = 0, ret;
-static int hp_wmi_hddtemp_state(void)
-{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
- sizeof(state), sizeof(state));
- if (ret)
- return -EINVAL;
- return state;
-}
+ ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
+ sizeof(val), sizeof(val));
-static int hp_wmi_als_state(void)
-{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
- sizeof(state), sizeof(state));
if (ret)
- return -EINVAL;
- return state;
+ return ret < 0 ? ret : -EINVAL;
+
+ return val;
}
-static int hp_wmi_dock_state(void)
+static int hp_wmi_hw_state(int mask)
{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
- sizeof(state), sizeof(state));
+ int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
- if (ret)
- return -EINVAL;
+ if (state < 0)
+ return state;
return state & 0x1;
}
-static int hp_wmi_tablet_state(void)
-{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
- sizeof(state), sizeof(state));
- if (ret)
- return ret;
-
- return (state & 0x4) ? 1 : 0;
-}
-
static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
sizeof(state), sizeof(state));
if (!ret)
return 1;
@@ -309,7 +300,7 @@ static int __init hp_wmi_bios_2008_later(void)
static int __init hp_wmi_bios_2009_later(void)
{
int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
sizeof(state), sizeof(state));
if (!ret)
return 1;
@@ -320,11 +311,10 @@ static int __init hp_wmi_bios_2009_later(void)
static int __init hp_wmi_enable_hotkeys(void)
{
int value = 0x6e;
- int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+ int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value,
sizeof(value), 0);
- if (ret)
- return -EINVAL;
- return 0;
+
+ return ret <= 0 ? ret : -EINVAL;
}
static int hp_wmi_set_block(void *data, bool blocked)
@@ -333,11 +323,10 @@ static int hp_wmi_set_block(void *data, bool blocked)
int query = BIT(r + 8) | ((!blocked) << r);
int ret;
- ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
+ ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE,
&query, sizeof(query), 0);
- if (ret)
- return -EINVAL;
- return 0;
+
+ return ret <= 0 ? ret : -EINVAL;
}
static const struct rfkill_ops hp_wmi_rfkill_ops = {
@@ -346,47 +335,38 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
- int wireless = 0;
- int mask;
- hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- &wireless, sizeof(wireless),
- sizeof(wireless));
- /* TBD: Pass error */
+ int mask = 0x200 << (r * 8);
- mask = 0x200 << (r * 8);
+ int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
- if (wireless & mask)
- return false;
- else
- return true;
+ /* TBD: Pass error */
+ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+
+ return !(wireless & mask);
}
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
- int wireless = 0;
- int mask;
- hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
- &wireless, sizeof(wireless),
- sizeof(wireless));
- /* TBD: Pass error */
+ int mask = 0x800 << (r * 8);
- mask = 0x800 << (r * 8);
+ int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
- if (wireless & mask)
- return false;
- else
- return true;
+ /* TBD: Pass error */
+ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
+
+ return !(wireless & mask);
}
static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
{
int rfkill_id = (int)(long)data;
char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
+ int ret;
- if (hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 1,
- buffer, sizeof(buffer), 0))
- return -EINVAL;
- return 0;
+ ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE,
+ buffer, sizeof(buffer), 0);
+
+ return ret <= 0 ? ret : -EINVAL;
}
static const struct rfkill_ops hp_wmi_rfkill2_ops = {
@@ -395,10 +375,10 @@ static const struct rfkill_ops hp_wmi_rfkill2_ops = {
static int hp_wmi_rfkill2_refresh(void)
{
- int err, i;
struct bios_rfkill2_state state;
+ int err, i;
- err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
+ err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
0, sizeof(state));
if (err)
return err;
@@ -422,119 +402,113 @@ static int hp_wmi_rfkill2_refresh(void)
return 0;
}
-static int hp_wmi_post_code_state(void)
-{
- int state = 0;
- int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
- sizeof(state), sizeof(state));
- if (ret)
- return -EINVAL;
- return state;
-}
-
-static ssize_t show_display(struct device *dev, struct device_attribute *attr,
+static ssize_t display_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- int value = hp_wmi_display_state();
+ int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_hddtemp(struct device *dev, struct device_attribute *attr,
+static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- int value = hp_wmi_hddtemp_state();
+ int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_als(struct device *dev, struct device_attribute *attr,
+static ssize_t als_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- int value = hp_wmi_als_state();
+ int value = hp_wmi_read_int(HPWMI_ALS_QUERY);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_dock(struct device *dev, struct device_attribute *attr,
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- int value = hp_wmi_dock_state();
+ int value = hp_wmi_hw_state(HPWMI_DOCK_MASK);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- int value = hp_wmi_tablet_state();
+ int value = hp_wmi_hw_state(HPWMI_TABLET_MASK);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_postcode(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
/* Get the POST error code of previous boot failure. */
- int value = hp_wmi_post_code_state();
+ int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY);
if (value < 0)
- return -EINVAL;
+ return value;
return sprintf(buf, "0x%x\n", value);
}
-static ssize_t set_als(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t als_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
u32 tmp = simple_strtoul(buf, NULL, 10);
- int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
+ int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), sizeof(tmp));
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
-static ssize_t set_postcode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
+ long unsigned int tmp2;
int ret;
u32 tmp;
- long unsigned int tmp2;
ret = kstrtoul(buf, 10, &tmp2);
- if (ret || tmp2 != 1)
- return -EINVAL;
+ if (!ret && tmp2 != 1)
+ ret = -EINVAL;
+ if (ret)
+ goto out;
/* Clear the POST error code. It is kept until until cleared. */
tmp = (u32) tmp2;
- ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
+ ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), sizeof(tmp));
+
+out:
if (ret)
- return -EINVAL;
+ return ret < 0 ? ret : -EINVAL;
return count;
}
-static DEVICE_ATTR(display, S_IRUGO, show_display, NULL);
-static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
-static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
-static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
-static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
-static DEVICE_ATTR(postcode, S_IRUGO | S_IWUSR, show_postcode, set_postcode);
+static DEVICE_ATTR_RO(display);
+static DEVICE_ATTR_RO(hddtemp);
+static DEVICE_ATTR_RW(als);
+static DEVICE_ATTR_RO(dock);
+static DEVICE_ATTR_RO(tablet);
+static DEVICE_ATTR_RW(postcode);
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
u32 event_id, event_data;
- int key_code = 0, ret;
- u32 *location;
+ union acpi_object *obj;
acpi_status status;
+ u32 *location;
+ int key_code;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -572,10 +546,12 @@ static void hp_wmi_notify(u32 value, void *context)
switch (event_id) {
case HPWMI_DOCK_EVENT:
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_hw_state(HPWMI_DOCK_MASK));
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_hw_state(HPWMI_TABLET_MASK));
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
@@ -583,11 +559,8 @@ static void hp_wmi_notify(u32 value, void *context)
case HPWMI_SMART_ADAPTER:
break;
case HPWMI_BEZEL_BUTTON:
- ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- &key_code,
- sizeof(key_code),
- sizeof(key_code));
- if (ret)
+ key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
+ if (key_code < 0)
break;
if (!sparse_keymap_report_event(hp_wmi_input_dev,
@@ -643,7 +616,7 @@ static void hp_wmi_notify(u32 value, void *context)
static int __init hp_wmi_input_setup(void)
{
acpi_status status;
- int err;
+ int err, val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
@@ -654,17 +627,26 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
- __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Dock */
+ val = hp_wmi_hw_state(HPWMI_DOCK_MASK);
+ if (!(val < 0)) {
+ __set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
+ }
+
+ /* Tablet mode */
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (!(val < 0)) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
- input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
@@ -673,7 +655,7 @@ static int __init hp_wmi_input_setup(void)
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
- goto err_free_keymap;
+ goto err_free_dev;
}
err = input_register_device(hp_wmi_input_dev);
@@ -684,8 +666,6 @@ static int __init hp_wmi_input_setup(void)
err_uninstall_notifier:
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
- err_free_keymap:
- sparse_keymap_free(hp_wmi_input_dev);
err_free_dev:
input_free_device(hp_wmi_input_dev);
return err;
@@ -694,7 +674,6 @@ static int __init hp_wmi_input_setup(void)
static void hp_wmi_input_destroy(void)
{
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
- sparse_keymap_free(hp_wmi_input_dev);
input_unregister_device(hp_wmi_input_dev);
}
@@ -710,15 +689,13 @@ static void cleanup_sysfs(struct platform_device *device)
static int __init hp_wmi_rfkill_setup(struct platform_device *device)
{
- int err;
- int wireless = 0;
+ int err, wireless;
- err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
- sizeof(wireless), sizeof(wireless));
- if (err)
- return err;
+ wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
+ if (wireless < 0)
+ return wireless;
- err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
+ err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless,
sizeof(wireless), 0);
if (err)
return err;
@@ -795,12 +772,13 @@ register_wifi_error:
static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
{
- int err, i;
struct bios_rfkill2_state state;
- err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, 0, &state,
+ int err, i;
+
+ err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
0, sizeof(state));
if (err)
- return err;
+ return err < 0 ? err : -EINVAL;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
pr_warn("unable to parse 0x1b query output\n");
@@ -950,10 +928,12 @@ static int hp_wmi_resume_handler(struct device *device)
* changed.
*/
if (hp_wmi_input_dev) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_hw_state(HPWMI_DOCK_MASK));
+ if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_hw_state(HPWMI_TABLET_MASK));
input_sync(hp_wmi_input_dev);
}
@@ -991,9 +971,9 @@ static struct platform_driver hp_wmi_driver = {
static int __init hp_wmi_init(void)
{
- int err;
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
+ int err;
if (!bios_capable && !event_capable)
return -ENODEV;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index f46ece2ce3c4..24ca9fbe31cc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -604,14 +604,12 @@ static int ideapad_input_init(struct ideapad_private *priv)
error = input_register_device(inputdev);
if (error) {
pr_err("Unable to register input device\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
priv->inputdev = inputdev;
return 0;
-err_free_keymap:
- sparse_keymap_free(inputdev);
err_free_dev:
input_free_device(inputdev);
return error;
@@ -619,7 +617,6 @@ err_free_dev:
static void ideapad_input_exit(struct ideapad_private *priv)
{
- sparse_keymap_free(priv->inputdev);
input_unregister_device(priv->inputdev);
priv->inputdev = NULL;
}
@@ -872,6 +869,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo V310-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad 310-15IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-15ACZ",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index bcf438f38781..63ba2cbd04c2 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -77,14 +77,12 @@ struct intel_hid_priv {
struct input_dev *array;
};
-static int intel_hid_set_enable(struct device *device, int enable)
+static int intel_hid_set_enable(struct device *device, bool enable)
{
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
acpi_status status;
- arg0.integer.value = enable;
- status = acpi_evaluate_object(ACPI_HANDLE(device), "HDSM", &args, NULL);
+ status = acpi_execute_simple_method(ACPI_HANDLE(device), "HDSM",
+ enable);
if (ACPI_FAILURE(status)) {
dev_warn(device, "failed to %sable hotkeys\n",
enable ? "en" : "dis");
@@ -120,7 +118,7 @@ static void intel_button_array_enable(struct device *device, bool enable)
static int intel_hid_pl_suspend_handler(struct device *device)
{
- intel_hid_set_enable(device, 0);
+ intel_hid_set_enable(device, false);
intel_button_array_enable(device, false);
return 0;
@@ -128,7 +126,7 @@ static int intel_hid_pl_suspend_handler(struct device *device)
static int intel_hid_pl_resume_handler(struct device *device)
{
- intel_hid_set_enable(device, 1);
+ intel_hid_set_enable(device, true);
intel_button_array_enable(device, true);
return 0;
@@ -136,6 +134,7 @@ static int intel_hid_pl_resume_handler(struct device *device)
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
.freeze = intel_hid_pl_suspend_handler,
+ .thaw = intel_hid_pl_resume_handler,
.restore = intel_hid_pl_resume_handler,
.suspend = intel_hid_pl_suspend_handler,
.resume = intel_hid_pl_resume_handler,
@@ -146,28 +145,18 @@ static int intel_hid_input_setup(struct platform_device *device)
struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
int ret;
- priv->input_dev = input_allocate_device();
+ priv->input_dev = devm_input_allocate_device(&device->dev);
if (!priv->input_dev)
return -ENOMEM;
ret = sparse_keymap_setup(priv->input_dev, intel_hid_keymap, NULL);
if (ret)
- goto err_free_device;
+ return ret;
- priv->input_dev->dev.parent = &device->dev;
priv->input_dev->name = "Intel HID events";
priv->input_dev->id.bustype = BUS_HOST;
- set_bit(KEY_RFKILL, priv->input_dev->keybit);
-
- ret = input_register_device(priv->input_dev);
- if (ret)
- goto err_free_device;
- return 0;
-
-err_free_device:
- input_free_device(priv->input_dev);
- return ret;
+ return input_register_device(priv->input_dev);
}
static int intel_button_array_input_setup(struct platform_device *device)
@@ -184,20 +173,12 @@ static int intel_button_array_input_setup(struct platform_device *device)
if (ret)
return ret;
- priv->array->dev.parent = &device->dev;
priv->array->name = "Intel HID 5 button array";
priv->array->id.bustype = BUS_HOST;
return input_register_device(priv->array);
}
-static void intel_hid_input_destroy(struct platform_device *device)
-{
- struct intel_hid_priv *priv = dev_get_drvdata(&device->dev);
-
- input_unregister_device(priv->input_dev);
-}
-
static void notify_handler(acpi_handle handle, u32 event, void *context)
{
struct platform_device *device = context;
@@ -272,12 +253,10 @@ static int intel_hid_probe(struct platform_device *device)
ACPI_DEVICE_NOTIFY,
notify_handler,
device);
- if (ACPI_FAILURE(status)) {
- err = -EBUSY;
- goto err_remove_input;
- }
+ if (ACPI_FAILURE(status))
+ return -EBUSY;
- err = intel_hid_set_enable(&device->dev, 1);
+ err = intel_hid_set_enable(&device->dev, true);
if (err)
goto err_remove_notify;
@@ -296,9 +275,6 @@ static int intel_hid_probe(struct platform_device *device)
err_remove_notify:
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
-err_remove_input:
- intel_hid_input_destroy(device);
-
return err;
}
@@ -307,8 +283,7 @@ static int intel_hid_remove(struct platform_device *device)
acpi_handle handle = ACPI_HANDLE(&device->dev);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
- intel_hid_input_destroy(device);
- intel_hid_set_enable(&device->dev, 0);
+ intel_hid_set_enable(&device->dev, false);
intel_button_array_enable(&device->dev, false);
/*
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 554e82ebe83c..c2035e121ac2 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -37,6 +37,10 @@ static const struct acpi_device_id intel_vbtn_ids[] = {
static const struct key_entry intel_vbtn_keymap[] = {
{ KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */
{ KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
+ { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
+ { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
+ { KE_IGNORE, 0xC7, { KEY_VOLUMEDOWN } }, /* volume-down key release */
{ KE_END },
};
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
new file mode 100644
index 000000000000..6a1b2ca5b6fe
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -0,0 +1,143 @@
+/*
+ * Intel Cherry Trail ACPI INT33FE pseudo device driver
+ *
+ * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Some Intel Cherry Trail based device which ship with Windows 10, have
+ * this weird INT33FE ACPI device with a CRS table with 4 I2cSerialBusV2
+ * resources, for 4 different chips attached to various i2c busses:
+ * 1. The Whiskey Cove pmic, which is also described by the INT34D3 ACPI device
+ * 2. Maxim MAX17047 Fuel Gauge Controller
+ * 3. FUSB302 USB Type-C Controller
+ * 4. PI3USB30532 USB switch
+ *
+ * So this driver is a stub / pseudo driver whose only purpose is to
+ * instantiate i2c-clients for chips 2 - 4, so that standard i2c drivers
+ * for these chips can bind to the them.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define EXPECTED_PTYPE 4
+
+struct cht_int33fe_data {
+ struct i2c_client *max17047;
+ struct i2c_client *fusb302;
+ struct i2c_client *pi3usb30532;
+};
+
+static int cht_int33fe_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct i2c_board_info board_info;
+ struct cht_int33fe_data *data;
+ unsigned long long ptyp;
+ acpi_status status;
+ int fusb302_irq;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Error getting PTYPE\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The same ACPI HID is used for different configurations check PTYP
+ * to ensure that we are dealing with the expected config.
+ */
+ if (ptyp != EXPECTED_PTYPE)
+ return -ENODEV;
+
+ /* The FUSB302 uses the irq at index 1 and is the only irq user */
+ fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
+ if (fusb302_irq < 0) {
+ if (fusb302_irq != -EPROBE_DEFER)
+ dev_err(dev, "Error getting FUSB302 irq\n");
+ return fusb302_irq;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+
+ data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+ if (!data->max17047)
+ return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "fusb302", I2C_NAME_SIZE);
+ board_info.irq = fusb302_irq;
+
+ data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
+ if (!data->fusb302)
+ goto out_unregister_max17047;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+
+ data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
+ if (!data->pi3usb30532)
+ goto out_unregister_fusb302;
+
+ i2c_set_clientdata(client, data);
+
+ return 0;
+
+out_unregister_fusb302:
+ i2c_unregister_device(data->fusb302);
+
+out_unregister_max17047:
+ i2c_unregister_device(data->max17047);
+
+ return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+}
+
+static int cht_int33fe_remove(struct i2c_client *i2c)
+{
+ struct cht_int33fe_data *data = i2c_get_clientdata(i2c);
+
+ i2c_unregister_device(data->pi3usb30532);
+ i2c_unregister_device(data->fusb302);
+ i2c_unregister_device(data->max17047);
+
+ return 0;
+}
+
+static const struct i2c_device_id cht_int33fe_i2c_id[] = {
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cht_int33fe_i2c_id);
+
+static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
+ { "INT33FE", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
+
+static struct i2c_driver cht_int33fe_driver = {
+ .driver = {
+ .name = "Intel Cherry Trail ACPI INT33FE driver",
+ .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
+ },
+ .probe_new = cht_int33fe_probe,
+ .remove = cht_int33fe_remove,
+ .id_table = cht_int33fe_i2c_id,
+ .disable_i2c_core_irq_mapping = true,
+};
+
+module_i2c_driver(cht_int33fe_driver);
+
+MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 0651d47b8eeb..e4d4dfe3e1d1 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -57,10 +57,6 @@
#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
-/* PMC Global Control Registers */
-#define GCR_TELEM_DEEP_S0IX_OFFSET 0x1078
-#define GCR_TELEM_SHLW_S0IX_OFFSET 0x1080
-
/* Residency with clock rate at 19.2MHz to usecs */
#define S0IX_RESIDENCY_IN_USECS(d, s) \
({ \
@@ -82,7 +78,7 @@
/* exported resources from IFWI */
#define PLAT_RESOURCE_IPC_INDEX 0
#define PLAT_RESOURCE_IPC_SIZE 0x1000
-#define PLAT_RESOURCE_GCR_OFFSET 0x1008
+#define PLAT_RESOURCE_GCR_OFFSET 0x1000
#define PLAT_RESOURCE_GCR_SIZE 0x1000
#define PLAT_RESOURCE_BIOS_DATA_INDEX 1
#define PLAT_RESOURCE_BIOS_IFACE_INDEX 2
@@ -112,6 +108,13 @@
#define TCO_PMC_OFFSET 0x8
#define TCO_PMC_SIZE 0x4
+/* PMC register bit definitions */
+
+/* PMC_CFG_REG bit masks */
+#define PMC_CFG_NO_REBOOT_MASK (1 << 4)
+#define PMC_CFG_NO_REBOOT_EN (1 << 4)
+#define PMC_CFG_NO_REBOOT_DIS (0 << 4)
+
static struct intel_pmc_ipc_dev {
struct device *dev;
void __iomem *ipc_base;
@@ -126,8 +129,7 @@ static struct intel_pmc_ipc_dev {
struct platform_device *tco_dev;
/* gcr */
- resource_size_t gcr_base;
- int gcr_size;
+ void __iomem *gcr_mem_base;
bool has_gcr_regs;
/* punit */
@@ -196,7 +198,128 @@ static inline u32 ipc_data_readl(u32 offset)
static inline u64 gcr_data_readq(u32 offset)
{
- return readq(ipcdev.ipc_base + offset);
+ return readq(ipcdev.gcr_mem_base + offset);
+}
+
+static inline int is_gcr_valid(u32 offset)
+{
+ if (!ipcdev.has_gcr_regs)
+ return -EACCES;
+
+ if (offset > PLAT_RESOURCE_GCR_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * intel_pmc_gcr_read() - Read PMC GCR register
+ * @offset: offset of GCR register from GCR address base
+ * @data: data pointer for storing the register output
+ *
+ * Reads the PMC GCR register of given offset.
+ *
+ * Return: negative value on error or 0 on success.
+ */
+int intel_pmc_gcr_read(u32 offset, u32 *data)
+{
+ int ret;
+
+ mutex_lock(&ipclock);
+
+ ret = is_gcr_valid(offset);
+ if (ret < 0) {
+ mutex_unlock(&ipclock);
+ return ret;
+ }
+
+ *data = readl(ipcdev.gcr_mem_base + offset);
+
+ mutex_unlock(&ipclock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_gcr_read);
+
+/**
+ * intel_pmc_gcr_write() - Write PMC GCR register
+ * @offset: offset of GCR register from GCR address base
+ * @data: register update value
+ *
+ * Writes the PMC GCR register of given offset with given
+ * value.
+ *
+ * Return: negative value on error or 0 on success.
+ */
+int intel_pmc_gcr_write(u32 offset, u32 data)
+{
+ int ret;
+
+ mutex_lock(&ipclock);
+
+ ret = is_gcr_valid(offset);
+ if (ret < 0) {
+ mutex_unlock(&ipclock);
+ return ret;
+ }
+
+ writel(data, ipcdev.gcr_mem_base + offset);
+
+ mutex_unlock(&ipclock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_gcr_write);
+
+/**
+ * intel_pmc_gcr_update() - Update PMC GCR register bits
+ * @offset: offset of GCR register from GCR address base
+ * @mask: bit mask for update operation
+ * @val: update value
+ *
+ * Updates the bits of given GCR register as specified by
+ * @mask and @val.
+ *
+ * Return: negative value on error or 0 on success.
+ */
+int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
+{
+ u32 new_val;
+ int ret = 0;
+
+ mutex_lock(&ipclock);
+
+ ret = is_gcr_valid(offset);
+ if (ret < 0)
+ goto gcr_ipc_unlock;
+
+ new_val = readl(ipcdev.gcr_mem_base + offset);
+
+ new_val &= ~mask;
+ new_val |= val & mask;
+
+ writel(new_val, ipcdev.gcr_mem_base + offset);
+
+ new_val = readl(ipcdev.gcr_mem_base + offset);
+
+ /* check whether the bit update is successful */
+ if ((new_val & mask) != (val & mask)) {
+ ret = -EIO;
+ goto gcr_ipc_unlock;
+ }
+
+gcr_ipc_unlock:
+ mutex_unlock(&ipclock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
+
+static int update_no_reboot_bit(void *priv, bool set)
+{
+ u32 value = set ? PMC_CFG_NO_REBOOT_EN : PMC_CFG_NO_REBOOT_DIS;
+
+ return intel_pmc_gcr_update(PMC_GCR_PMC_CFG_REG,
+ PMC_CFG_NO_REBOOT_MASK, value);
}
static int intel_pmc_ipc_check_status(void)
@@ -516,15 +639,13 @@ static struct resource tco_res[] = {
{
.flags = IORESOURCE_IO,
},
- /* GCS */
- {
- .flags = IORESOURCE_MEM,
- },
};
static struct itco_wdt_platform_data tco_info = {
.name = "Apollo Lake SoC",
.version = 5,
+ .no_reboot_priv = &ipcdev,
+ .update_no_reboot_bit = update_no_reboot_bit,
};
#define TELEMETRY_RESOURCE_PUNIT_SSRAM 0
@@ -581,10 +702,6 @@ static int ipc_create_tco_device(void)
res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
res->end = res->start + SMI_EN_SIZE - 1;
- res = tco_res + TCO_RESOURCE_GCR_MEM;
- res->start = ipcdev.gcr_base + TCO_PMC_OFFSET;
- res->end = res->start + TCO_PMC_SIZE - 1;
-
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -746,8 +863,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
}
ipcdev.ipc_base = addr;
- ipcdev.gcr_base = res->start + PLAT_RESOURCE_GCR_OFFSET;
- ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
+ ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
dev_info(&pdev->dev, "ipc res: %pR\n", res);
ipcdev.telem_res_inval = 0;
@@ -782,8 +898,8 @@ int intel_pmc_s0ix_counter_read(u64 *data)
if (!ipcdev.has_gcr_regs)
return -EACCES;
- deep = gcr_data_readq(GCR_TELEM_DEEP_S0IX_OFFSET);
- shlw = gcr_data_readq(GCR_TELEM_SHLW_S0IX_OFFSET);
+ deep = gcr_data_readq(PMC_GCR_TELEM_DEEP_S0IX_REG);
+ shlw = gcr_data_readq(PMC_GCR_TELEM_SHLW_S0IX_REG);
*data = S0IX_RESIDENCY_IN_USECS(deep, shlw);
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index e81daff65f62..f7cf981502cd 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -491,6 +491,69 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
}
EXPORT_SYMBOL(intel_scu_ipc_command);
+#define IPC_SPTR 0x08
+#define IPC_DPTR 0x0C
+
+/**
+ * intel_scu_ipc_raw_command() - IPC command with data and pointers
+ * @cmd: IPC command code.
+ * @sub: IPC command sub type.
+ * @in: input data of this IPC command.
+ * @inlen: input data length in dwords.
+ * @out: output data of this IPC command.
+ * @outlen: output data length in dwords.
+ * @sptr: data writing to SPTR register.
+ * @dptr: data writing to DPTR register.
+ *
+ * Send an IPC command to SCU with input/output data and source/dest pointers.
+ *
+ * Return: an IPC error code or 0 on success.
+ */
+int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen,
+ u32 *out, int outlen, u32 dptr, u32 sptr)
+{
+ struct intel_scu_ipc_dev *scu = &ipcdev;
+ int inbuflen = DIV_ROUND_UP(inlen, 4);
+ u32 inbuf[4];
+ int i, err;
+
+ /* Up to 16 bytes */
+ if (inbuflen > 4)
+ return -EINVAL;
+
+ mutex_lock(&ipclock);
+ if (scu->dev == NULL) {
+ mutex_unlock(&ipclock);
+ return -ENODEV;
+ }
+
+ writel(dptr, scu->ipc_base + IPC_DPTR);
+ writel(sptr, scu->ipc_base + IPC_SPTR);
+
+ /*
+ * SRAM controller doesn't support 8-bit writes, it only
+ * supports 32-bit writes, so we have to copy input data into
+ * the temporary buffer, and SCU FW will use the inlen to
+ * determine the actual input data length in the temporary
+ * buffer.
+ */
+ memcpy(inbuf, in, inlen);
+
+ for (i = 0; i < inbuflen; i++)
+ ipc_data_writel(scu, inbuf[i], 4 * i);
+
+ ipc_command(scu, (inlen << 16) | (sub << 12) | cmd);
+ err = intel_scu_ipc_check_status(scu);
+ if (!err) {
+ for (i = 0; i < outlen; i++)
+ *out++ = ipc_data_readl(scu, 4 * i);
+ }
+
+ mutex_unlock(&ipclock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command);
+
/* I2C commands */
#define IPC_I2C_WRITE 1 /* I2C Write command */
#define IPC_I2C_READ 2 /* I2C Read command */
@@ -566,21 +629,17 @@ static irqreturn_t ioc(int irq, void *dev_id)
*/
static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int platform; /* Platform type */
int err;
struct intel_scu_ipc_dev *scu = &ipcdev;
struct intel_scu_ipc_pdata_t *pdata;
- platform = intel_mid_identify_cpu();
- if (platform == 0)
- return -ENODEV;
-
if (scu->dev) /* We support only one SCU */
return -EBUSY;
pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data;
+ if (!pdata)
+ return -ENODEV;
- scu->dev = &pdev->dev;
scu->irq_mode = pdata->irq_mode;
err = pcim_enable_device(pdev);
@@ -593,39 +652,34 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
init_completion(&scu->cmd_complete);
- err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
- scu);
- if (err)
- return err;
-
scu->ipc_base = pcim_iomap_table(pdev)[0];
scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len);
if (!scu->i2c_base)
return -ENOMEM;
+ err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc",
+ scu);
+ if (err)
+ return err;
+
+ /* Assign device at last */
+ scu->dev = &pdev->dev;
+
intel_scu_devices_create();
pci_set_drvdata(pdev, scu);
return 0;
}
+#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata}
+
static const struct pci_device_id pci_ids[] = {
- {
- PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
- (kernel_ulong_t)&intel_scu_ipc_lincroft_pdata,
- }, {
- PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL),
- (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
- }, {
- PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CLOVERVIEW),
- (kernel_ulong_t)&intel_scu_ipc_penwell_pdata,
- }, {
- PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER),
- (kernel_ulong_t)&intel_scu_ipc_tangier_pdata,
- }, {
- 0,
- }
+ SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata),
+ SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata),
+ SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata),
+ SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata),
+ {}
};
static struct pci_driver ipc_driver = {
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 42317704629d..9e90827c176a 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -976,23 +976,15 @@ static int __init msi_laptop_input_setup(void)
err = input_register_device(msi_laptop_input_dev);
if (err)
- goto err_free_keymap;
+ goto err_free_dev;
return 0;
-err_free_keymap:
- sparse_keymap_free(msi_laptop_input_dev);
err_free_dev:
input_free_device(msi_laptop_input_dev);
return err;
}
-static void msi_laptop_input_destroy(void)
-{
- sparse_keymap_free(msi_laptop_input_dev);
- input_unregister_device(msi_laptop_input_dev);
-}
-
static int __init load_scm_model_init(struct platform_device *sdev)
{
u8 data;
@@ -1037,7 +1029,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
return 0;
fail_filter:
- msi_laptop_input_destroy();
+ input_unregister_device(msi_laptop_input_dev);
fail_input:
rfkill_cleanup();
@@ -1158,7 +1150,7 @@ static void __exit msi_cleanup(void)
{
if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
- msi_laptop_input_destroy();
+ input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 9a32f8627ecc..f6209b739ec0 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -281,14 +281,12 @@ static int __init msi_wmi_input_setup(void)
err = input_register_device(msi_wmi_input_dev);
if (err)
- goto err_free_keymap;
+ goto err_free_dev;
last_pressed = 0;
return 0;
-err_free_keymap:
- sparse_keymap_free(msi_wmi_input_dev);
err_free_dev:
input_free_device(msi_wmi_input_dev);
return err;
@@ -342,10 +340,8 @@ err_uninstall_handler:
if (event_wmi)
wmi_remove_notify_handler(event_wmi->guid);
err_free_input:
- if (event_wmi) {
- sparse_keymap_free(msi_wmi_input_dev);
+ if (event_wmi)
input_unregister_device(msi_wmi_input_dev);
- }
return err;
}
@@ -353,7 +349,6 @@ static void __exit msi_wmi_exit(void)
{
if (event_wmi) {
wmi_remove_notify_handler(event_wmi->guid);
- sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
}
backlight_device_unregister(backlight);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 59b8eb626dcc..975f4e100dbd 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -520,29 +520,17 @@ static int acpi_pcc_init_input(struct pcc_acpi *pcc)
if (error) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Unable to register input device\n"));
- goto err_free_keymap;
+ goto err_free_dev;
}
pcc->input_dev = input_dev;
return 0;
- err_free_keymap:
- sparse_keymap_free(input_dev);
err_free_dev:
input_free_device(input_dev);
return error;
}
-static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
-{
- sparse_keymap_free(pcc->input_dev);
- input_unregister_device(pcc->input_dev);
- /*
- * No need to input_free_device() since core input API refcounts
- * and free()s the device.
- */
-}
-
/* kernel module interface */
#ifdef CONFIG_PM_SLEEP
@@ -640,7 +628,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
out_backlight:
backlight_device_unregister(pcc->backlight);
out_input:
- acpi_pcc_destroy_input(pcc);
+ input_unregister_device(pcc->input_dev);
out_sinf:
kfree(pcc->sinf);
out_hotkey:
@@ -660,7 +648,7 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device)
backlight_device_unregister(pcc->backlight);
- acpi_pcc_destroy_input(pcc);
+ input_unregister_device(pcc->input_dev);
kfree(pcc->sinf);
kfree(pcc);
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c
index 02e11fdbf375..a3a57d93cf06 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/silead_dmi.c
@@ -22,10 +22,10 @@
struct silead_ts_dmi_data {
const char *acpi_name;
- struct property_entry *properties;
+ const struct property_entry *properties;
};
-static struct property_entry cube_iwork8_air_props[] = {
+static const struct property_entry cube_iwork8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
PROPERTY_ENTRY_U32("touchscreen-size-y", 900),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
@@ -39,7 +39,7 @@ static const struct silead_ts_dmi_data cube_iwork8_air_data = {
.properties = cube_iwork8_air_props,
};
-static struct property_entry jumper_ezpad_mini3_props[] = {
+static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
@@ -53,6 +53,33 @@ static const struct silead_ts_dmi_data jumper_ezpad_mini3_data = {
.properties = jumper_ezpad_mini3_props,
};
+static const struct property_entry dexp_ursus_7w_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct silead_ts_dmi_data dexp_ursus_7w_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_7w_props,
+};
+
+static const struct property_entry surftab_wintron70_st70416_6_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1686-surftab-wintron70-st70416-6.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = surftab_wintron70_st70416_6_props,
+};
+
static const struct dmi_system_id silead_ts_dmi_table[] = {
{
/* CUBE iwork8 Air */
@@ -72,24 +99,37 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
+ {
+ /* DEXP Ursus 7W */
+ .driver_data = (void *)&dexp_ursus_7w_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
+ },
+ },
+ {
+ /* Trekstor Surftab Wintron 7.0 ST70416-6 */
+ .driver_data = (void *)&surftab_wintron70_st70416_6_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ST70416-6"),
+ /* Exact match, different versions need different fw */
+ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
+ },
+ },
{ },
};
-static void silead_ts_dmi_add_props(struct device *dev)
+static const struct silead_ts_dmi_data *silead_ts_data;
+
+static void silead_ts_dmi_add_props(struct i2c_client *client)
{
- struct i2c_client *client = to_i2c_client(dev);
- const struct dmi_system_id *dmi_id;
- const struct silead_ts_dmi_data *ts_data;
+ struct device *dev = &client->dev;
int error;
- dmi_id = dmi_first_match(silead_ts_dmi_table);
- if (!dmi_id)
- return;
-
- ts_data = dmi_id->driver_data;
if (has_acpi_companion(dev) &&
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
- error = device_add_properties(dev, ts_data->properties);
+ !strncmp(silead_ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ error = device_add_properties(dev, silead_ts_data->properties);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
}
@@ -99,10 +139,13 @@ static int silead_ts_dmi_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
+ struct i2c_client *client;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- silead_ts_dmi_add_props(dev);
+ client = i2c_verify_client(dev);
+ if (client)
+ silead_ts_dmi_add_props(client);
break;
default:
@@ -118,8 +161,15 @@ static struct notifier_block silead_ts_dmi_notifier = {
static int __init silead_ts_dmi_init(void)
{
+ const struct dmi_system_id *dmi_id;
int error;
+ dmi_id = dmi_first_match(silead_ts_dmi_table);
+ if (!dmi_id)
+ return 0; /* Not an error */
+
+ silead_ts_data = dmi_id->driver_data;
+
error = bus_register_notifier(&i2c_bus_type, &silead_ts_dmi_notifier);
if (error)
pr_err("%s: failed to register i2c bus notifier: %d\n",
diff --git a/drivers/platform/x86/surface3_button.c b/drivers/platform/x86/surface3_button.c
index 8bfd7f613d36..57f51476bb65 100644
--- a/drivers/platform/x86/surface3_button.c
+++ b/drivers/platform/x86/surface3_button.c
@@ -196,9 +196,10 @@ static int surface3_button_probe(struct i2c_client *client,
strlen(SURFACE_BUTTON_OBJ_NAME)))
return -ENODEV;
- if (gpiod_count(dev, KBUILD_MODNAME) <= 0) {
+ error = gpiod_count(dev, NULL);
+ if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n");
- return -ENODEV;
+ return error;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 1d18b32628ec..7b6cb0c69b02 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1922,7 +1922,9 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_UNK7,
TP_ACPI_HOTKEYSCAN_UNK8,
- TP_ACPI_HOTKEYSCAN_MUTE2,
+ /* Adaptive keyboard keycodes */
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
+ TP_ACPI_HOTKEYSCAN_MUTE2 = TP_ACPI_HOTKEYSCAN_ADAPTIVE_START,
TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO,
TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL,
TP_ACPI_HOTKEYSCAN_CLOUD,
@@ -1943,6 +1945,15 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_CAMERA_MODE,
TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY,
+ /* Lenovo extended keymap, starting at 0x1300 */
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START,
+ /* first new observed key (star, favorites) is 0x1311 */
+ TP_ACPI_HOTKEYSCAN_STAR = 69,
+ TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL2,
+ TP_ACPI_HOTKEYSCAN_UNK25,
+ TP_ACPI_HOTKEYSCAN_BLUETOOTH,
+ TP_ACPI_HOTKEYSCAN_KEYBOARD,
+
/* Hotkey keymap size */
TPACPI_HOTKEY_MAP_LEN
};
@@ -3250,6 +3261,15 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /* No assignment, used for newer Lenovo models */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN
+
},
/* Generic keymap for Lenovo ThinkPads */
@@ -3335,6 +3355,29 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_RESERVED, /* Microphone cancellation */
KEY_RESERVED, /* Camera mode */
KEY_RESERVED, /* Rotate display, 0x116 */
+
+ /*
+ * These are found in 2017 models (e.g. T470s, X270).
+ * The lowest known value is 0x311, which according to
+ * the manual should launch a user defined favorite
+ * application.
+ *
+ * The offset for these is TP_ACPI_HOTKEYSCAN_EXTENDED_START,
+ * corresponding to 0x34.
+ */
+
+ /* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN,
+
+ KEY_FAVORITES, /* Favorite app, 0x311 */
+ KEY_RESERVED, /* Clipping tool */
+ KEY_RESERVED,
+ KEY_BLUETOOTH, /* Bluetooth */
+ KEY_KEYBOARD /* Keyboard, 0x315 */
},
};
@@ -3656,7 +3699,6 @@ static const int adaptive_keyboard_modes[] = {
#define DFR_CHANGE_ROW 0x101
#define DFR_SHOW_QUICKVIEW_ROW 0x102
#define FIRST_ADAPTIVE_KEY 0x103
-#define ADAPTIVE_KEY_OFFSET 0x020
/* press Fn key a while second, it will switch to Function Mode. Then
* release Fn key, previous mode be restored.
@@ -3746,13 +3788,15 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
default:
if (scancode < FIRST_ADAPTIVE_KEY ||
- scancode >= FIRST_ADAPTIVE_KEY + TPACPI_HOTKEY_MAP_LEN -
- ADAPTIVE_KEY_OFFSET) {
+ scancode >= FIRST_ADAPTIVE_KEY +
+ TP_ACPI_HOTKEYSCAN_EXTENDED_START -
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
pr_info("Unhandled adaptive keyboard key: 0x%x\n",
scancode);
return false;
}
- keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY + ADAPTIVE_KEY_OFFSET];
+ keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY +
+ TP_ACPI_HOTKEYSCAN_ADAPTIVE_START];
if (keycode != KEY_RESERVED) {
mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -3777,19 +3821,44 @@ static bool hotkey_notify_hotkey(const u32 hkey,
*send_acpi_ev = true;
*ignore_acpi_ev = false;
- /* HKEY event 0x1001 is scancode 0x00 */
- if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) {
- scancode--;
- if (!(hotkey_source_mask & (1 << scancode))) {
- tpacpi_input_send_key_masked(scancode);
- *send_acpi_ev = false;
- } else {
- *ignore_acpi_ev = true;
+ /*
+ * Original events are in the 0x10XX range, the adaptive keyboard
+ * found in 2014 X1 Carbon emits events are of 0x11XX. In 2017
+ * models, additional keys are emitted through 0x13XX.
+ */
+ switch ((hkey >> 8) & 0xf) {
+ case 0:
+ if (scancode > 0 &&
+ scancode <= TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
+ /* HKEY event 0x1001 is scancode 0x00 */
+ scancode--;
+ if (!(hotkey_source_mask & (1 << scancode))) {
+ tpacpi_input_send_key_masked(scancode);
+ *send_acpi_ev = false;
+ } else {
+ *ignore_acpi_ev = true;
+ }
+ return true;
}
- return true;
- } else {
+ break;
+
+ case 1:
return adaptive_keyboard_hotkey_notify_hotkey(scancode);
+
+ case 3:
+ /* Extended keycodes start at 0x300 and our offset into the map
+ * TP_ACPI_HOTKEYSCAN_EXTENDED_START. The calculated scancode
+ * will be positive, but might not be in the correct range.
+ */
+ scancode -= (0x300 - TP_ACPI_HOTKEYSCAN_EXTENDED_START);
+ if (scancode >= TP_ACPI_HOTKEYSCAN_EXTENDED_START &&
+ scancode < TPACPI_HOTKEY_MAP_LEN) {
+ tpacpi_input_send_key(scancode);
+ return true;
+ }
+ break;
}
+
return false;
}
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index e597de05e6c2..70205d222da9 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -113,14 +113,12 @@ static int acpi_topstar_init_hkey(struct topstar_hkey *hkey)
error = input_register_device(input);
if (error) {
pr_err("Unable to register input device\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
hkey->inputdev = input;
return 0;
- err_free_keymap:
- sparse_keymap_free(input);
err_free_dev:
input_free_device(input);
return error;
@@ -157,7 +155,6 @@ static int acpi_topstar_remove(struct acpi_device *device)
acpi_topstar_fncx_switch(device, false);
- sparse_keymap_free(tps_hkey->inputdev);
input_unregister_device(tps_hkey->inputdev);
kfree(tps_hkey);
diff --git a/drivers/platform/x86/toshiba-wmi.c b/drivers/platform/x86/toshiba-wmi.c
index 2df07ee8f3c3..440528676170 100644
--- a/drivers/platform/x86/toshiba-wmi.c
+++ b/drivers/platform/x86/toshiba-wmi.c
@@ -96,7 +96,7 @@ static int __init toshiba_wmi_input_setup(void)
toshiba_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
- goto err_free_keymap;
+ goto err_free_dev;
}
err = input_register_device(toshiba_wmi_input_dev);
@@ -107,8 +107,6 @@ static int __init toshiba_wmi_input_setup(void)
err_remove_notifier:
wmi_remove_notify_handler(WMI_EVENT_GUID);
- err_free_keymap:
- sparse_keymap_free(toshiba_wmi_input_dev);
err_free_dev:
input_free_device(toshiba_wmi_input_dev);
return err;
@@ -117,7 +115,6 @@ static int __init toshiba_wmi_input_setup(void)
static void toshiba_wmi_input_destroy(void)
{
wmi_remove_notify_handler(WMI_EVENT_GUID);
- sparse_keymap_free(toshiba_wmi_input_dev);
input_unregister_device(toshiba_wmi_input_dev);
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 074bf2fa1c55..d0daf75cbed1 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2849,7 +2849,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
error = i8042_install_filter(toshiba_acpi_i8042_filter);
if (error) {
pr_err("Error installing key filter\n");
- goto err_free_keymap;
+ goto err_free_dev;
}
dev->ntfy_supported = 1;
@@ -2880,8 +2880,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
err_remove_filter:
if (dev->ntfy_supported)
i8042_remove_filter(toshiba_acpi_i8042_filter);
- err_free_keymap:
- sparse_keymap_free(dev->hotkey_dev);
err_free_dev:
input_free_device(dev->hotkey_dev);
dev->hotkey_dev = NULL;
@@ -3018,10 +3016,8 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
cancel_work_sync(&dev->hotkey_work);
}
- if (dev->hotkey_dev) {
+ if (dev->hotkey_dev)
input_unregister_device(dev->hotkey_dev);
- sparse_keymap_free(dev->hotkey_dev);
- }
backlight_device_unregister(dev->backlight_dev);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 9a25110c4a46..9ddad0815ba9 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1164,6 +1164,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2f07cd615665..6eb0db37dd88 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -64,6 +64,43 @@ static int pps_cdev_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &pps->async_queue);
}
+static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata)
+{
+ unsigned int ev = pps->last_ev;
+ int err = 0;
+
+ /* Manage the timeout */
+ if (fdata->timeout.flags & PPS_TIME_INVALID)
+ err = wait_event_interruptible(pps->queue,
+ ev != pps->last_ev);
+ else {
+ unsigned long ticks;
+
+ dev_dbg(pps->dev, "timeout %lld.%09d\n",
+ (long long) fdata->timeout.sec,
+ fdata->timeout.nsec);
+ ticks = fdata->timeout.sec * HZ;
+ ticks += fdata->timeout.nsec / (NSEC_PER_SEC / HZ);
+
+ if (ticks != 0) {
+ err = wait_event_interruptible_timeout(
+ pps->queue,
+ ev != pps->last_ev,
+ ticks);
+ if (err == 0)
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Check for pending signals */
+ if (err == -ERESTARTSYS) {
+ dev_dbg(pps->dev, "pending signal caught\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
+
static long pps_cdev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -144,7 +181,6 @@ static long pps_cdev_ioctl(struct file *file,
case PPS_FETCH: {
struct pps_fdata fdata;
- unsigned int ev;
dev_dbg(pps->dev, "PPS_FETCH\n");
@@ -152,36 +188,9 @@ static long pps_cdev_ioctl(struct file *file,
if (err)
return -EFAULT;
- ev = pps->last_ev;
-
- /* Manage the timeout */
- if (fdata.timeout.flags & PPS_TIME_INVALID)
- err = wait_event_interruptible(pps->queue,
- ev != pps->last_ev);
- else {
- unsigned long ticks;
-
- dev_dbg(pps->dev, "timeout %lld.%09d\n",
- (long long) fdata.timeout.sec,
- fdata.timeout.nsec);
- ticks = fdata.timeout.sec * HZ;
- ticks += fdata.timeout.nsec / (NSEC_PER_SEC / HZ);
-
- if (ticks != 0) {
- err = wait_event_interruptible_timeout(
- pps->queue,
- ev != pps->last_ev,
- ticks);
- if (err == 0)
- return -ETIMEDOUT;
- }
- }
-
- /* Check for pending signals */
- if (err == -ERESTARTSYS) {
- dev_dbg(pps->dev, "pending signal caught\n");
- return -EINTR;
- }
+ err = pps_cdev_pps_fetch(pps, &fdata);
+ if (err)
+ return err;
/* Return the fetched timestamp */
spin_lock_irq(&pps->lock);
@@ -242,6 +251,57 @@ static long pps_cdev_ioctl(struct file *file,
return 0;
}
+#ifdef CONFIG_COMPAT
+static long pps_cdev_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pps_device *pps = file->private_data;
+ void __user *uarg = (void __user *) arg;
+
+ cmd = _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(void *));
+
+ if (cmd == PPS_FETCH) {
+ struct pps_fdata_compat compat;
+ struct pps_fdata fdata;
+ int err;
+
+ dev_dbg(pps->dev, "PPS_FETCH\n");
+
+ err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat));
+ if (err)
+ return -EFAULT;
+
+ memcpy(&fdata.timeout, &compat.timeout,
+ sizeof(struct pps_ktime_compat));
+
+ err = pps_cdev_pps_fetch(pps, &fdata);
+ if (err)
+ return err;
+
+ /* Return the fetched timestamp */
+ spin_lock_irq(&pps->lock);
+
+ compat.info.assert_sequence = pps->assert_sequence;
+ compat.info.clear_sequence = pps->clear_sequence;
+ compat.info.current_mode = pps->current_mode;
+
+ memcpy(&compat.info.assert_tu, &pps->assert_tu,
+ sizeof(struct pps_ktime_compat));
+ memcpy(&compat.info.clear_tu, &pps->clear_tu,
+ sizeof(struct pps_ktime_compat));
+
+ spin_unlock_irq(&pps->lock);
+
+ return copy_to_user(uarg, &compat,
+ sizeof(struct pps_fdata_compat)) ? -EFAULT : 0;
+ }
+
+ return pps_cdev_ioctl(file, cmd, arg);
+}
+#else
+#define pps_cdev_compat_ioctl NULL
+#endif
+
static int pps_cdev_open(struct inode *inode, struct file *file)
{
struct pps_device *pps = container_of(inode->i_cdev,
@@ -268,6 +328,7 @@ static const struct file_operations pps_cdev_fops = {
.llseek = no_llseek,
.poll = pps_cdev_poll,
.fasync = pps_cdev_fasync,
+ .compat_ioctl = pps_cdev_compat_ioctl,
.unlocked_ioctl = pps_cdev_ioctl,
.open = pps_cdev_open,
.release = pps_cdev_release,
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 42e37c20b361..313c10789ca2 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -293,6 +293,15 @@ config PWM_MTK_DISP
To compile this driver as a module, choose M here: the module
will be called pwm-mtk-disp.
+config PWM_MEDIATEK
+ tristate "MediaTek PWM support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ Generic PWM framework driver for Mediatek ARM SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-mxs.
+
config PWM_MXS
tristate "Freescale MXS PWM support"
depends on ARCH_MXS && OF
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 346a83b00f28..93da1f79a3b8 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
obj-$(CONFIG_PWM_MESON) += pwm-meson.o
+obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o
obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 999187277ea5..54c6633d9b5d 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -49,172 +49,181 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
return container_of(chip, struct atmel_hlcdc_pwm, chip);
}
-static int atmel_hlcdc_pwm_config(struct pwm_chip *c,
- struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
struct atmel_hlcdc *hlcdc = chip->hlcdc;
- struct clk *new_clk = hlcdc->slow_clk;
- u64 pwmcval = duty_ns * 256;
- unsigned long clk_freq;
- u64 clk_period_ns;
- u32 pwmcfg;
- int pres;
-
- if (!chip->errata || !chip->errata->slow_clk_erratum) {
- clk_freq = clk_get_rate(new_clk);
- if (!clk_freq)
- return -EINVAL;
+ unsigned int status;
+ int ret;
- clk_period_ns = (u64)NSEC_PER_SEC * 256;
- do_div(clk_period_ns, clk_freq);
- }
+ if (state->enabled) {
+ struct clk *new_clk = hlcdc->slow_clk;
+ u64 pwmcval = state->duty_cycle * 256;
+ unsigned long clk_freq;
+ u64 clk_period_ns;
+ u32 pwmcfg;
+ int pres;
+
+ if (!chip->errata || !chip->errata->slow_clk_erratum) {
+ clk_freq = clk_get_rate(new_clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ /* Errata: cannot use slow clk on some IP revisions */
+ if ((chip->errata && chip->errata->slow_clk_erratum) ||
+ clk_period_ns > state->period) {
+ new_clk = hlcdc->sys_clk;
+ clk_freq = clk_get_rate(new_clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ clk_period_ns = (u64)NSEC_PER_SEC * 256;
+ do_div(clk_period_ns, clk_freq);
+ }
+
+ for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
+ /* Errata: cannot divide by 1 on some IP revisions */
+ if (!pres && chip->errata &&
+ chip->errata->div1_clk_erratum)
+ continue;
+
+ if ((clk_period_ns << pres) >= state->period)
+ break;
+ }
- /* Errata: cannot use slow clk on some IP revisions */
- if ((chip->errata && chip->errata->slow_clk_erratum) ||
- clk_period_ns > period_ns) {
- new_clk = hlcdc->sys_clk;
- clk_freq = clk_get_rate(new_clk);
- if (!clk_freq)
+ if (pres > ATMEL_HLCDC_PWMPS_MAX)
return -EINVAL;
- clk_period_ns = (u64)NSEC_PER_SEC * 256;
- do_div(clk_period_ns, clk_freq);
- }
+ pwmcfg = ATMEL_HLCDC_PWMPS(pres);
- for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
- /* Errata: cannot divide by 1 on some IP revisions */
- if (!pres && chip->errata && chip->errata->div1_clk_erratum)
- continue;
+ if (new_clk != chip->cur_clk) {
+ u32 gencfg = 0;
+ int ret;
- if ((clk_period_ns << pres) >= period_ns)
- break;
- }
+ ret = clk_prepare_enable(new_clk);
+ if (ret)
+ return ret;
- if (pres > ATMEL_HLCDC_PWMPS_MAX)
- return -EINVAL;
+ clk_disable_unprepare(chip->cur_clk);
+ chip->cur_clk = new_clk;
- pwmcfg = ATMEL_HLCDC_PWMPS(pres);
+ if (new_clk == hlcdc->sys_clk)
+ gencfg = ATMEL_HLCDC_CLKPWMSEL;
- if (new_clk != chip->cur_clk) {
- u32 gencfg = 0;
- int ret;
+ ret = regmap_update_bits(hlcdc->regmap,
+ ATMEL_HLCDC_CFG(0),
+ ATMEL_HLCDC_CLKPWMSEL,
+ gencfg);
+ if (ret)
+ return ret;
+ }
- ret = clk_prepare_enable(new_clk);
- if (ret)
- return ret;
+ do_div(pwmcval, state->period);
- clk_disable_unprepare(chip->cur_clk);
- chip->cur_clk = new_clk;
+ /*
+ * The PWM duty cycle is configurable from 0/256 to 255/256 of
+ * the period cycle. Hence we can't set a duty cycle occupying
+ * the whole period cycle if we're asked to.
+ * Set it to 255 if pwmcval is greater than 256.
+ */
+ if (pwmcval > 255)
+ pwmcval = 255;
- if (new_clk == hlcdc->sys_clk)
- gencfg = ATMEL_HLCDC_CLKPWMSEL;
+ pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval);
- ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(0),
- ATMEL_HLCDC_CLKPWMSEL, gencfg);
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ pwmcfg |= ATMEL_HLCDC_PWMPOL;
+
+ ret = regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
+ ATMEL_HLCDC_PWMCVAL_MASK |
+ ATMEL_HLCDC_PWMPS_MASK |
+ ATMEL_HLCDC_PWMPOL,
+ pwmcfg);
if (ret)
return ret;
- }
- do_div(pwmcval, period_ns);
-
- /*
- * The PWM duty cycle is configurable from 0/256 to 255/256 of the
- * period cycle. Hence we can't set a duty cycle occupying the
- * whole period cycle if we're asked to.
- * Set it to 255 if pwmcval is greater than 256.
- */
- if (pwmcval > 255)
- pwmcval = 255;
-
- pwmcfg |= ATMEL_HLCDC_PWMCVAL(pwmcval);
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN,
+ ATMEL_HLCDC_PWM);
+ if (ret)
+ return ret;
- return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
- ATMEL_HLCDC_PWMCVAL_MASK |
- ATMEL_HLCDC_PWMPS_MASK,
- pwmcfg);
-}
+ ret = regmap_read_poll_timeout(hlcdc->regmap, ATMEL_HLCDC_SR,
+ status,
+ status & ATMEL_HLCDC_PWM,
+ 10, 0);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS,
+ ATMEL_HLCDC_PWM);
+ if (ret)
+ return ret;
-static int atmel_hlcdc_pwm_set_polarity(struct pwm_chip *c,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
- u32 cfg = 0;
+ ret = regmap_read_poll_timeout(hlcdc->regmap, ATMEL_HLCDC_SR,
+ status,
+ !(status & ATMEL_HLCDC_PWM),
+ 10, 0);
+ if (ret)
+ return ret;
- if (polarity == PWM_POLARITY_NORMAL)
- cfg = ATMEL_HLCDC_PWMPOL;
+ clk_disable_unprepare(chip->cur_clk);
+ chip->cur_clk = NULL;
+ }
- return regmap_update_bits(hlcdc->regmap, ATMEL_HLCDC_CFG(6),
- ATMEL_HLCDC_PWMPOL, cfg);
+ return 0;
}
-static int atmel_hlcdc_pwm_enable(struct pwm_chip *c, struct pwm_device *pwm)
-{
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
- u32 status;
- int ret;
+static const struct pwm_ops atmel_hlcdc_pwm_ops = {
+ .apply = atmel_hlcdc_pwm_apply,
+ .owner = THIS_MODULE,
+};
- ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PWM);
- if (ret)
- return ret;
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
+ .slow_clk_erratum = true,
+};
- while (true) {
- ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
- if (ret)
- return ret;
+static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
+ .div1_clk_erratum = true,
+};
- if ((status & ATMEL_HLCDC_PWM) != 0)
- break;
+#ifdef CONFIG_PM_SLEEP
+static int atmel_hlcdc_pwm_suspend(struct device *dev)
+{
+ struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
- usleep_range(1, 10);
- }
+ /* Keep the periph clock enabled if the PWM is still running. */
+ if (pwm_is_enabled(&chip->chip.pwms[0]))
+ clk_disable_unprepare(chip->hlcdc->periph_clk);
return 0;
}
-static void atmel_hlcdc_pwm_disable(struct pwm_chip *c,
- struct pwm_device *pwm)
+static int atmel_hlcdc_pwm_resume(struct device *dev)
{
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
- u32 status;
+ struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
+ struct pwm_state state;
int ret;
- ret = regmap_write(hlcdc->regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PWM);
- if (ret)
- return;
+ pwm_get_state(&chip->chip.pwms[0], &state);
- while (true) {
- ret = regmap_read(hlcdc->regmap, ATMEL_HLCDC_SR, &status);
+ /* Re-enable the periph clock it was stopped during suspend. */
+ if (!state.enabled) {
+ ret = clk_prepare_enable(chip->hlcdc->periph_clk);
if (ret)
- return;
-
- if ((status & ATMEL_HLCDC_PWM) == 0)
- break;
-
- usleep_range(1, 10);
+ return ret;
}
-}
-
-static const struct pwm_ops atmel_hlcdc_pwm_ops = {
- .config = atmel_hlcdc_pwm_config,
- .set_polarity = atmel_hlcdc_pwm_set_polarity,
- .enable = atmel_hlcdc_pwm_enable,
- .disable = atmel_hlcdc_pwm_disable,
- .owner = THIS_MODULE,
-};
-static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
- .slow_clk_erratum = true,
-};
+ return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state);
+}
+#endif
-static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
- .div1_clk_erratum = true,
-};
+static SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
+ atmel_hlcdc_pwm_suspend, atmel_hlcdc_pwm_resume);
static const struct of_device_id atmel_hlcdc_dt_ids[] = {
{
@@ -305,6 +314,7 @@ static struct platform_driver atmel_hlcdc_pwm_driver = {
.driver = {
.name = "atmel-hlcdc-pwm",
.of_match_table = atmel_hlcdc_pwm_dt_ids,
+ .pm = &atmel_hlcdc_pwm_pm_ops,
},
.probe = atmel_hlcdc_pwm_probe,
.remove = atmel_hlcdc_pwm_remove,
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 67a7023be5c2..530d7dc5f1b5 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -58,17 +58,22 @@
#define PWM_MAX_PRD 0xFFFF
#define PRD_MAX_PRES 10
+struct atmel_pwm_registers {
+ u8 period;
+ u8 period_upd;
+ u8 duty;
+ u8 duty_upd;
+};
+
struct atmel_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
+ const struct atmel_pwm_registers *regs;
unsigned int updated_pwms;
/* ISR is cleared when read, ensure only one thread does that */
struct mutex isr_lock;
-
- void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- unsigned long dty, unsigned long prd);
};
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
@@ -105,153 +110,71 @@ static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip,
writel_relaxed(val, chip->base + base + offset);
}
-static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
+ const struct pwm_state *state,
+ unsigned long *cprd, u32 *pres)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- unsigned long prd, dty;
- unsigned long long div;
- unsigned int pres = 0;
- u32 val;
- int ret;
-
- if (pwm_is_enabled(pwm) && (period_ns != pwm_get_period(pwm))) {
- dev_err(chip->dev, "cannot change PWM period while enabled\n");
- return -EBUSY;
- }
+ unsigned long long cycles = state->period;
/* Calculate the period cycles and prescale value */
- div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns;
- do_div(div, NSEC_PER_SEC);
+ cycles *= clk_get_rate(atmel_pwm->clk);
+ do_div(cycles, NSEC_PER_SEC);
- while (div > PWM_MAX_PRD) {
- div >>= 1;
- pres++;
- }
+ for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1)
+ (*pres)++;
- if (pres > PRD_MAX_PRES) {
+ if (*pres > PRD_MAX_PRES) {
dev_err(chip->dev, "pres exceeds the maximum value\n");
return -EINVAL;
}
- /* Calculate the duty cycles */
- prd = div;
- div *= duty_ns;
- do_div(div, period_ns);
- dty = prd - div;
-
- ret = clk_enable(atmel_pwm->clk);
- if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return ret;
- }
-
- /* It is necessary to preserve CPOL, inside CMR */
- val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
- val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK);
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
- atmel_pwm->config(chip, pwm, dty, prd);
- mutex_lock(&atmel_pwm->isr_lock);
- atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
- atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm);
- mutex_unlock(&atmel_pwm->isr_lock);
-
- clk_disable(atmel_pwm->clk);
- return ret;
-}
-
-static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
- unsigned long dty, unsigned long prd)
-{
- struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- unsigned int val;
-
+ *cprd = cycles;
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty);
-
- val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
- val &= ~PWM_CMR_UPD_CDTY;
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
-
- /*
- * If the PWM channel is enabled, only update CDTY by using the update
- * register, it needs to set bit 10 of CMR to 0
- */
- if (pwm_is_enabled(pwm))
- return;
- /*
- * If the PWM channel is disabled, write value to duty and period
- * registers directly.
- */
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty);
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd);
+ return 0;
}
-static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
- unsigned long dty, unsigned long prd)
+static void atmel_pwm_calculate_cdty(const struct pwm_state *state,
+ unsigned long cprd, unsigned long *cdty)
{
- struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ unsigned long long cycles = state->duty_cycle;
- if (pwm_is_enabled(pwm)) {
- /*
- * If the PWM channel is enabled, using the duty update register
- * to update the value.
- */
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTYUPD, dty);
- } else {
- /*
- * If the PWM channel is disabled, write value to duty and
- * period registers directly.
- */
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CDTY, dty);
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV2_CPRD, prd);
- }
+ cycles *= cprd;
+ do_div(cycles, state->period);
+ *cdty = cprd - cycles;
}
-static int atmel_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity)
+static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm,
+ unsigned long cdty)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
u32 val;
- int ret;
-
- val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
- if (polarity == PWM_POLARITY_NORMAL)
- val &= ~PWM_CMR_CPOL;
- else
- val |= PWM_CMR_CPOL;
-
- ret = clk_enable(atmel_pwm->clk);
- if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return ret;
+ if (atmel_pwm->regs->duty_upd ==
+ atmel_pwm->regs->period_upd) {
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+ val &= ~PWM_CMR_UPD_CDTY;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
}
- atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
-
- clk_disable(atmel_pwm->clk);
-
- return 0;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->regs->duty_upd, cdty);
}
-static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ unsigned long cprd, unsigned long cdty)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- int ret;
-
- ret = clk_enable(atmel_pwm->clk);
- if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
- return ret;
- }
- atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
-
- return 0;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->regs->duty, cdty);
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->regs->period, cprd);
}
-static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
+ bool disable_clk)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned long timeout = jiffies + 2 * HZ;
@@ -282,37 +205,99 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
time_before(jiffies, timeout))
usleep_range(10, 100);
- clk_disable(atmel_pwm->clk);
+ if (disable_clk)
+ clk_disable(atmel_pwm->clk);
+}
+
+static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
+ struct pwm_state cstate;
+ unsigned long cprd, cdty;
+ u32 pres, val;
+ int ret;
+
+ pwm_get_state(pwm, &cstate);
+
+ if (state->enabled) {
+ if (cstate.enabled &&
+ cstate.polarity == state->polarity &&
+ cstate.period == state->period) {
+ cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
+ atmel_pwm->regs->period);
+ atmel_pwm_calculate_cdty(state, cprd, &cdty);
+ atmel_pwm_update_cdty(chip, pwm, cdty);
+ return 0;
+ }
+
+ ret = atmel_pwm_calculate_cprd_and_pres(chip, state, &cprd,
+ &pres);
+ if (ret) {
+ dev_err(chip->dev,
+ "failed to calculate cprd and prescaler\n");
+ return ret;
+ }
+
+ atmel_pwm_calculate_cdty(state, cprd, &cdty);
+
+ if (cstate.enabled) {
+ atmel_pwm_disable(chip, pwm, false);
+ } else {
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(chip->dev, "failed to enable clock\n");
+ return ret;
+ }
+ }
+
+ /* It is necessary to preserve CPOL, inside CMR */
+ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
+ val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK);
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ val &= ~PWM_CMR_CPOL;
+ else
+ val |= PWM_CMR_CPOL;
+ atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
+ atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
+ mutex_lock(&atmel_pwm->isr_lock);
+ atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR);
+ atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm);
+ mutex_unlock(&atmel_pwm->isr_lock);
+ atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
+ } else if (cstate.enabled) {
+ atmel_pwm_disable(chip, pwm, true);
+ }
+
+ return 0;
}
static const struct pwm_ops atmel_pwm_ops = {
- .config = atmel_pwm_config,
- .set_polarity = atmel_pwm_set_polarity,
- .enable = atmel_pwm_enable,
- .disable = atmel_pwm_disable,
+ .apply = atmel_pwm_apply,
.owner = THIS_MODULE,
};
-struct atmel_pwm_data {
- void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- unsigned long dty, unsigned long prd);
+static const struct atmel_pwm_registers atmel_pwm_regs_v1 = {
+ .period = PWMV1_CPRD,
+ .period_upd = PWMV1_CUPD,
+ .duty = PWMV1_CDTY,
+ .duty_upd = PWMV1_CUPD,
};
-static const struct atmel_pwm_data atmel_pwm_data_v1 = {
- .config = atmel_pwm_config_v1,
-};
-
-static const struct atmel_pwm_data atmel_pwm_data_v2 = {
- .config = atmel_pwm_config_v2,
+static const struct atmel_pwm_registers atmel_pwm_regs_v2 = {
+ .period = PWMV2_CPRD,
+ .period_upd = PWMV2_CPRDUPD,
+ .duty = PWMV2_CDTY,
+ .duty_upd = PWMV2_CDTYUPD,
};
static const struct platform_device_id atmel_pwm_devtypes[] = {
{
.name = "at91sam9rl-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_data_v1,
+ .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1,
}, {
.name = "sama5d3-pwm",
- .driver_data = (kernel_ulong_t)&atmel_pwm_data_v2,
+ .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2,
}, {
/* sentinel */
},
@@ -322,17 +307,20 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes);
static const struct of_device_id atmel_pwm_dt_ids[] = {
{
.compatible = "atmel,at91sam9rl-pwm",
- .data = &atmel_pwm_data_v1,
+ .data = &atmel_pwm_regs_v1,
}, {
.compatible = "atmel,sama5d3-pwm",
- .data = &atmel_pwm_data_v2,
+ .data = &atmel_pwm_regs_v2,
+ }, {
+ .compatible = "atmel,sama5d2-pwm",
+ .data = &atmel_pwm_regs_v2,
}, {
/* sentinel */
},
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static inline const struct atmel_pwm_data *
+static inline const struct atmel_pwm_registers *
atmel_pwm_get_driver_data(struct platform_device *pdev)
{
const struct platform_device_id *id;
@@ -342,18 +330,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev)
id = platform_get_device_id(pdev);
- return (struct atmel_pwm_data *)id->driver_data;
+ return (struct atmel_pwm_registers *)id->driver_data;
}
static int atmel_pwm_probe(struct platform_device *pdev)
{
- const struct atmel_pwm_data *data;
+ const struct atmel_pwm_registers *regs;
struct atmel_pwm_chip *atmel_pwm;
struct resource *res;
int ret;
- data = atmel_pwm_get_driver_data(pdev);
- if (!data)
+ regs = atmel_pwm_get_driver_data(pdev);
+ if (!regs)
return -ENODEV;
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
@@ -385,7 +373,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->chip.base = -1;
atmel_pwm->chip.npwm = 4;
- atmel_pwm->config = data->config;
+ atmel_pwm->regs = regs;
atmel_pwm->updated_pwms = 0;
mutex_init(&atmel_pwm->isr_lock);
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
new file mode 100644
index 000000000000..5c11bc708a3c
--- /dev/null
+++ b/drivers/pwm/pwm-mediatek.c
@@ -0,0 +1,219 @@
+/*
+ * Mediatek Pulse Width Modulator driver
+ *
+ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* PWM registers and bits definitions */
+#define PWMCON 0x00
+#define PWMHDUR 0x04
+#define PWMLDUR 0x08
+#define PWMGDUR 0x0c
+#define PWMWAVENUM 0x28
+#define PWMDWIDTH 0x2c
+#define PWMTHRES 0x30
+
+enum {
+ MTK_CLK_MAIN = 0,
+ MTK_CLK_TOP,
+ MTK_CLK_PWM1,
+ MTK_CLK_PWM2,
+ MTK_CLK_PWM3,
+ MTK_CLK_PWM4,
+ MTK_CLK_PWM5,
+ MTK_CLK_MAX,
+};
+
+static const char * const mtk_pwm_clk_name[] = {
+ "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+};
+
+/**
+ * struct mtk_pwm_chip - struct representing PWM chip
+ * @chip: linux PWM chip representation
+ * @regs: base address of PWM chip
+ * @clks: list of clocks
+ */
+struct mtk_pwm_chip {
+ struct pwm_chip chip;
+ void __iomem *regs;
+ struct clk *clks[MTK_CLK_MAX];
+};
+
+static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct mtk_pwm_chip, chip);
+}
+
+static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
+ unsigned int offset)
+{
+ return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+}
+
+static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
+ unsigned int num, unsigned int offset,
+ u32 value)
+{
+ writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+}
+
+static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
+ u32 resolution, clkdiv = 0;
+
+ resolution = NSEC_PER_SEC / clk_get_rate(clk);
+
+ while (period_ns / resolution > 8191) {
+ resolution *= 2;
+ clkdiv++;
+ }
+
+ if (clkdiv > 7)
+ return -EINVAL;
+
+ mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | BIT(3) | clkdiv);
+ mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
+ mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
+
+ return 0;
+}
+
+static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ u32 value;
+ int ret;
+
+ ret = clk_prepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]);
+ if (ret < 0)
+ return ret;
+
+ value = readl(pc->regs);
+ value |= BIT(pwm->hwpwm);
+ writel(value, pc->regs);
+
+ return 0;
+}
+
+static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ u32 value;
+
+ value = readl(pc->regs);
+ value &= ~BIT(pwm->hwpwm);
+ writel(value, pc->regs);
+
+ clk_unprepare(pc->clks[MTK_CLK_PWM1 + pwm->hwpwm]);
+}
+
+static const struct pwm_ops mtk_pwm_ops = {
+ .config = mtk_pwm_config,
+ .enable = mtk_pwm_enable,
+ .disable = mtk_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_pwm_probe(struct platform_device *pdev)
+{
+ struct mtk_pwm_chip *pc;
+ struct resource *res;
+ unsigned int i;
+ int ret;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pc->regs))
+ return PTR_ERR(pc->regs);
+
+ for (i = 0; i < MTK_CLK_MAX; i++) {
+ pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
+ if (IS_ERR(pc->clks[i]))
+ return PTR_ERR(pc->clks[i]);
+ }
+
+ ret = clk_prepare(pc->clks[MTK_CLK_TOP]);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare(pc->clks[MTK_CLK_MAIN]);
+ if (ret < 0)
+ goto disable_clk_top;
+
+ platform_set_drvdata(pdev, pc);
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &mtk_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = 5;
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ goto disable_clk_main;
+ }
+
+ return 0;
+
+disable_clk_main:
+ clk_unprepare(pc->clks[MTK_CLK_MAIN]);
+disable_clk_top:
+ clk_unprepare(pc->clks[MTK_CLK_TOP]);
+
+ return ret;
+}
+
+static int mtk_pwm_remove(struct platform_device *pdev)
+{
+ struct mtk_pwm_chip *pc = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < pc->chip.npwm; i++)
+ pwm_disable(&pc->chip.pwms[i]);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id mtk_pwm_of_match[] = {
+ { .compatible = "mediatek,mt7623-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
+
+static struct platform_driver mtk_pwm_driver = {
+ .driver = {
+ .name = "mtk-pwm",
+ .of_match_table = mtk_pwm_of_match,
+ },
+ .probe = mtk_pwm_probe,
+ .remove = mtk_pwm_remove,
+};
+module_platform_driver(mtk_pwm_driver);
+
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_ALIAS("platform:mtk-pwm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 0cfb3571a732..5f55cfab9b1c 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -30,6 +30,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
/*
* Because the PCA9685 has only one prescaler per chip, changing the period of
@@ -79,7 +80,6 @@
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
- int active_cnt;
int duty_ns;
int period_ns;
#if IS_ENABLED(CONFIG_GPIOLIB)
@@ -111,20 +111,10 @@ static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
pwm_set_chip_data(pwm, (void *)1);
mutex_unlock(&pca->lock);
+ pm_runtime_get_sync(pca->chip.dev);
return 0;
}
-static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
-{
- struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm;
-
- mutex_lock(&pca->lock);
- pwm = &pca->chip.pwms[offset];
- pwm_set_chip_data(pwm, NULL);
- mutex_unlock(&pca->lock);
-}
-
static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm)
{
bool is_gpio = false;
@@ -177,6 +167,19 @@ static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
regmap_write(pca->regmap, LED_N_ON_H(pwm->hwpwm), on);
}
+static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_device *pwm;
+
+ pca9685_pwm_gpio_set(gpio, offset, 0);
+ pm_runtime_put(pca->chip.dev);
+ mutex_lock(&pca->lock);
+ pwm = &pca->chip.pwms[offset];
+ pwm_set_chip_data(pwm, NULL);
+ mutex_unlock(&pca->lock);
+}
+
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -238,6 +241,16 @@ static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
}
#endif
+static void pca9685_set_sleep_mode(struct pca9685 *pca, int sleep)
+{
+ regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, sleep ? MODE1_SLEEP : 0);
+ if (!sleep) {
+ /* Wait 500us for the oscillator to be back up */
+ udelay(500);
+ }
+}
+
static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
@@ -252,19 +265,20 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (prescale >= PCA9685_PRESCALE_MIN &&
prescale <= PCA9685_PRESCALE_MAX) {
+ /*
+ * putting the chip briefly into SLEEP mode
+ * at this point won't interfere with the
+ * pm_runtime framework, because the pm_runtime
+ * state is guaranteed active here.
+ */
/* Put chip into sleep mode */
- regmap_update_bits(pca->regmap, PCA9685_MODE1,
- MODE1_SLEEP, MODE1_SLEEP);
+ pca9685_set_sleep_mode(pca, 1);
/* Change the chip-wide output frequency */
regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
/* Wake the chip up */
- regmap_update_bits(pca->regmap, PCA9685_MODE1,
- MODE1_SLEEP, 0x0);
-
- /* Wait 500us for the oscillator to be back up */
- udelay(500);
+ pca9685_set_sleep_mode(pca, 0);
pca->period_ns = period_ns;
} else {
@@ -406,21 +420,15 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (pca9685_pwm_is_gpio(pca, pwm))
return -EBUSY;
-
- if (pca->active_cnt++ == 0)
- return regmap_update_bits(pca->regmap, PCA9685_MODE1,
- MODE1_SLEEP, 0x0);
+ pm_runtime_get_sync(chip->dev);
return 0;
}
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct pca9685 *pca = to_pca(chip);
-
- if (--pca->active_cnt == 0)
- regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP,
- MODE1_SLEEP);
+ pca9685_pwm_disable(chip, pwm);
+ pm_runtime_put(chip->dev);
}
static const struct pwm_ops pca9685_pwm_ops = {
@@ -492,22 +500,54 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return ret;
ret = pca9685_pwm_gpio_probe(pca);
- if (ret < 0)
+ if (ret < 0) {
pwmchip_remove(&pca->chip);
+ return ret;
+ }
+
+ /* the chip comes out of power-up in the active state */
+ pm_runtime_set_active(&client->dev);
+ /*
+ * enable will put the chip into suspend, which is what we
+ * want as all outputs are disabled at this point
+ */
+ pm_runtime_enable(&client->dev);
- return ret;
+ return 0;
}
static int pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
+ int ret;
- regmap_update_bits(pca->regmap, PCA9685_MODE1, MODE1_SLEEP,
- MODE1_SLEEP);
+ ret = pwmchip_remove(&pca->chip);
+ if (ret)
+ return ret;
+ pm_runtime_disable(&client->dev);
+ return 0;
+}
- return pwmchip_remove(&pca->chip);
+#ifdef CONFIG_PM
+static int pca9685_pwm_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pca9685 *pca = i2c_get_clientdata(client);
+
+ pca9685_set_sleep_mode(pca, 1);
+ return 0;
}
+static int pca9685_pwm_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pca9685 *pca = i2c_get_clientdata(client);
+
+ pca9685_set_sleep_mode(pca, 0);
+ return 0;
+}
+#endif
+
static const struct i2c_device_id pca9685_id[] = {
{ "pca9685", 0 },
{ /* sentinel */ },
@@ -530,11 +570,17 @@ static const struct of_device_id pca9685_dt_ids[] = {
MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
#endif
+static const struct dev_pm_ops pca9685_pwm_pm = {
+ SET_RUNTIME_PM_OPS(pca9685_pwm_runtime_suspend,
+ pca9685_pwm_runtime_resume, NULL)
+};
+
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
.acpi_match_table = ACPI_PTR(pca9685_acpi_ids),
.of_match_table = of_match_ptr(pca9685_dt_ids),
+ .pm = &pca9685_pwm_pm,
},
.probe = pca9685_pwm_probe,
.remove = pca9685_pwm_remove,
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index e4647840cd6e..8c6ed556db28 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -29,6 +29,7 @@
#include <linux/of_device.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/slab.h>
#include <linux/reset.h>
@@ -49,6 +50,8 @@ struct tegra_pwm_chip {
struct clk *clk;
struct reset_control*rst;
+ unsigned long clk_rate;
+
void __iomem *regs;
const struct tegra_pwm_soc *soc;
@@ -74,8 +77,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- unsigned long long c = duty_ns;
- unsigned long rate, hz;
+ unsigned long long c = duty_ns, hz;
+ unsigned long rate;
u32 val = 0;
int err;
@@ -85,8 +88,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* nearest integer during division.
*/
c *= (1 << PWM_DUTY_WIDTH);
- c += period_ns / 2;
- do_div(c, period_ns);
+ c = DIV_ROUND_CLOSEST_ULL(c, period_ns);
val = (u32)c << PWM_DUTY_SHIFT;
@@ -94,10 +96,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* Compute the prescaler value for which (1 << PWM_DUTY_WIDTH)
* cycles at the PWM clock rate will take period_ns nanoseconds.
*/
- rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
- hz = NSEC_PER_SEC / period_ns;
+ rate = pc->clk_rate >> PWM_DUTY_WIDTH;
- rate = (rate + (hz / 2)) / hz;
+ /* Consider precision in PWM_SCALE_WIDTH rate calculation */
+ hz = DIV_ROUND_CLOSEST_ULL(100ULL * NSEC_PER_SEC, period_ns);
+ rate = DIV_ROUND_CLOSEST_ULL(100ULL * rate, hz);
/*
* Since the actual PWM divider is the register's frequency divider
@@ -198,6 +201,9 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
+ /* Read PWM clock rate from source */
+ pwm->clk_rate = clk_get_rate(pwm->clk);
+
pwm->rst = devm_reset_control_get(&pdev->dev, "pwm");
if (IS_ERR(pwm->rst)) {
ret = PTR_ERR(pwm->rst);
@@ -253,6 +259,18 @@ static int tegra_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int tegra_pwm_suspend(struct device *dev)
+{
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int tegra_pwm_resume(struct device *dev)
+{
+ return pinctrl_pm_select_default_state(dev);
+}
+#endif
+
static const struct tegra_pwm_soc tegra20_pwm_soc = {
.num_channels = 4,
};
@@ -269,10 +287,15 @@ static const struct of_device_id tegra_pwm_of_match[] = {
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
+static const struct dev_pm_ops tegra_pwm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume)
+};
+
static struct platform_driver tegra_pwm_driver = {
.driver = {
.name = "tegra-pwm",
.of_match_table = tegra_pwm_of_match,
+ .pm = &tegra_pwm_pm_ops,
},
.probe = tegra_pwm_probe,
.remove = tegra_pwm_remove,
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 50b617af81bd..5beb0c361076 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2444,31 +2444,25 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
mutex_init(&md->buf_mutex);
mutex_init(&md->file_mutex);
INIT_LIST_HEAD(&md->file_list);
- cdev_init(&md->cdev, &mport_fops);
- md->cdev.owner = THIS_MODULE;
- ret = cdev_add(&md->cdev, MKDEV(MAJOR(dev_number), mport->id), 1);
- if (ret < 0) {
- kfree(md);
- rmcd_error("Unable to register a device, err=%d", ret);
- return NULL;
- }
- md->dev.devt = md->cdev.dev;
+ device_initialize(&md->dev);
+ md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
md->dev.class = dev_class;
md->dev.parent = &mport->dev;
md->dev.release = mport_device_release;
dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
atomic_set(&md->active, 1);
- ret = device_register(&md->dev);
+ cdev_init(&md->cdev, &mport_fops);
+ md->cdev.owner = THIS_MODULE;
+
+ ret = cdev_device_add(&md->cdev, &md->dev);
if (ret) {
rmcd_error("Failed to register mport %d (err=%d)",
mport->id, ret);
goto err_cdev;
}
- get_device(&md->dev);
-
INIT_LIST_HEAD(&md->doorbells);
spin_lock_init(&md->db_lock);
INIT_LIST_HEAD(&md->portwrites);
@@ -2513,8 +2507,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
return md;
err_cdev:
- cdev_del(&md->cdev);
- kfree(md);
+ put_device(&md->dev);
return NULL;
}
@@ -2578,7 +2571,7 @@ static void mport_cdev_remove(struct mport_dev *md)
atomic_set(&md->active, 0);
mport_cdev_terminate_dma(md);
rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
- cdev_del(&(md->cdev));
+ cdev_device_del(&md->cdev, &md->dev);
mport_cdev_kill_fasync(md);
flush_workqueue(dma_wq);
@@ -2603,7 +2596,6 @@ static void mport_cdev_remove(struct mport_dev *md)
rio_release_inb_dbell(md->mport, 0, 0x0fff);
- device_unregister(&md->dev);
put_device(&md->dev);
}
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index eda41563d06d..73e4b407f162 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -108,15 +108,11 @@ static struct attribute *rio_dev_attrs[] = {
&dev_attr_lprev.attr,
&dev_attr_destid.attr,
&dev_attr_modalias.attr,
- NULL,
-};
-static const struct attribute_group rio_dev_group = {
- .attrs = rio_dev_attrs,
-};
-
-const struct attribute_group *rio_dev_groups[] = {
- &rio_dev_group,
+ /* Switch-only attributes */
+ &dev_attr_routes.attr,
+ &dev_attr_lnext.attr,
+ &dev_attr_hopcount.attr,
NULL,
};
@@ -259,46 +255,40 @@ static struct bin_attribute rio_config_attr = {
.write = rio_write_config,
};
-/**
- * rio_create_sysfs_dev_files - create RIO specific sysfs files
- * @rdev: device whose entries should be created
- *
- * Create files when @rdev is added to sysfs.
- */
-int rio_create_sysfs_dev_files(struct rio_dev *rdev)
-{
- int err = 0;
-
- err = device_create_bin_file(&rdev->dev, &rio_config_attr);
+static struct bin_attribute *rio_dev_bin_attrs[] = {
+ &rio_config_attr,
+ NULL,
+};
- if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
- err |= device_create_file(&rdev->dev, &dev_attr_routes);
- err |= device_create_file(&rdev->dev, &dev_attr_lnext);
- err |= device_create_file(&rdev->dev, &dev_attr_hopcount);
+static umode_t rio_dev_is_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct rio_dev *rdev = to_rio_dev(kobj_to_dev(kobj));
+ umode_t mode = attr->mode;
+
+ if (!(rdev->pef & RIO_PEF_SWITCH) &&
+ (attr == &dev_attr_routes.attr ||
+ attr == &dev_attr_lnext.attr ||
+ attr == &dev_attr_hopcount.attr)) {
+ /*
+ * Hide switch-specific attributes for a non-switch device.
+ */
+ mode = 0;
}
- if (err)
- pr_warning("RIO: Failed to create attribute file(s) for %s\n",
- rio_name(rdev));
-
- return err;
+ return mode;
}
-/**
- * rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files
- * @rdev: device whose entries we should free
- *
- * Cleanup when @rdev is removed from sysfs.
- */
-void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
-{
- device_remove_bin_file(&rdev->dev, &rio_config_attr);
- if (rdev->pef & RIO_PEF_SWITCH) {
- device_remove_file(&rdev->dev, &dev_attr_routes);
- device_remove_file(&rdev->dev, &dev_attr_lnext);
- device_remove_file(&rdev->dev, &dev_attr_hopcount);
- }
-}
+static const struct attribute_group rio_dev_group = {
+ .attrs = rio_dev_attrs,
+ .is_visible = rio_dev_is_attr_visible,
+ .bin_attrs = rio_dev_bin_attrs,
+};
+
+const struct attribute_group *rio_dev_groups[] = {
+ &rio_dev_group,
+ NULL,
+};
static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
size_t count)
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 37042858c2db..38d949405618 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -192,8 +192,6 @@ int rio_add_device(struct rio_dev *rdev)
}
spin_unlock(&rio_global_list_lock);
- rio_create_sysfs_dev_files(rdev);
-
return 0;
}
EXPORT_SYMBOL_GPL(rio_add_device);
@@ -220,7 +218,6 @@ void rio_del_device(struct rio_dev *rdev, enum rio_device_state state)
}
}
spin_unlock(&rio_global_list_lock);
- rio_remove_sysfs_dev_files(rdev);
device_unregister(&rdev->dev);
}
EXPORT_SYMBOL_GPL(rio_del_device);
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 9796b3fee70d..b2abf8576397 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -27,8 +27,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
u8 hopcount);
-extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
-extern void rio_remove_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_lock_device(struct rio_mport *port, u16 destid,
u8 hopcount, int wait_ms);
extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index f4cdfe94b9ec..d21c07ccc94e 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -14,6 +14,13 @@ menuconfig RESET_CONTROLLER
if RESET_CONTROLLER
+config RESET_A10SR
+ tristate "Altera Arria10 System Resource Reset"
+ depends on MFD_ALTERA_A10SR
+ help
+ This option enables support for the external reset functions for
+ peripheral PHYs on the Altera Arria10 System Resource Chip.
+
config RESET_ATH79
bool "AR71xx Reset Driver" if COMPILE_TEST
default ATH79
@@ -27,6 +34,13 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
+config RESET_IMX7
+ bool "i.MX7 Reset Driver" if COMPILE_TEST
+ default SOC_IMX7D
+ select MFD_SYSCON
+ help
+ This enables the reset controller driver for i.MX7 SoCs.
+
config RESET_LPC18XX
bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST
default ARCH_LPC18XX
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 2cd3f6c45165..02a74db94339 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -2,8 +2,10 @@ obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
@@ -15,3 +17,4 @@ obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_ZX2967) += reset-zx2967.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
+
diff --git a/drivers/reset/reset-a10sr.c b/drivers/reset/reset-a10sr.c
new file mode 100644
index 000000000000..37496bd27fa2
--- /dev/null
+++ b/drivers/reset/reset-a10sr.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright Intel Corporation (C) 2017. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Reset driver for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from reset-socfpga.c
+ */
+
+#include <linux/err.h>
+#include <linux/mfd/altera-a10sr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/reset/altr,rst-mgr-a10sr.h>
+
+struct a10sr_reset {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+};
+
+static inline struct a10sr_reset *to_a10sr_rst(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct a10sr_reset, rcdev);
+}
+
+static inline int a10sr_reset_shift(unsigned long id)
+{
+ switch (id) {
+ case A10SR_RESET_ENET_HPS:
+ return 1;
+ case A10SR_RESET_PCIE:
+ case A10SR_RESET_FILE:
+ case A10SR_RESET_BQSPI:
+ case A10SR_RESET_USB:
+ return id + 11;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int a10sr_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct a10sr_reset *a10r = to_a10sr_rst(rcdev);
+ int offset = a10sr_reset_shift(id);
+ u8 mask = ALTR_A10SR_REG_BIT_MASK(offset);
+ int index = ALTR_A10SR_HPS_RST_REG + ALTR_A10SR_REG_OFFSET(offset);
+
+ return regmap_update_bits(a10r->regmap, index, mask, assert ? 0 : mask);
+}
+
+static int a10sr_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return a10sr_reset_update(rcdev, id, true);
+}
+
+static int a10sr_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return a10sr_reset_update(rcdev, id, false);
+}
+
+static int a10sr_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ int ret;
+ struct a10sr_reset *a10r = to_a10sr_rst(rcdev);
+ int offset = a10sr_reset_shift(id);
+ u8 mask = ALTR_A10SR_REG_BIT_MASK(offset);
+ int index = ALTR_A10SR_HPS_RST_REG + ALTR_A10SR_REG_OFFSET(offset);
+ unsigned int value;
+
+ ret = regmap_read(a10r->regmap, index, &value);
+ if (ret < 0)
+ return ret;
+
+ return !!(value & mask);
+}
+
+static const struct reset_control_ops a10sr_reset_ops = {
+ .assert = a10sr_reset_assert,
+ .deassert = a10sr_reset_deassert,
+ .status = a10sr_reset_status,
+};
+
+static int a10sr_reset_probe(struct platform_device *pdev)
+{
+ struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent);
+ struct a10sr_reset *a10r;
+
+ a10r = devm_kzalloc(&pdev->dev, sizeof(struct a10sr_reset),
+ GFP_KERNEL);
+ if (!a10r)
+ return -ENOMEM;
+
+ a10r->rcdev.owner = THIS_MODULE;
+ a10r->rcdev.nr_resets = A10SR_RESET_NUM;
+ a10r->rcdev.ops = &a10sr_reset_ops;
+ a10r->rcdev.of_node = pdev->dev.of_node;
+ a10r->regmap = a10sr->regmap;
+
+ platform_set_drvdata(pdev, a10r);
+
+ return devm_reset_controller_register(&pdev->dev, &a10r->rcdev);
+}
+
+static const struct of_device_id a10sr_reset_of_match[] = {
+ { .compatible = "altr,a10sr-reset" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, a10sr_reset_of_match);
+
+static struct platform_driver a10sr_reset_driver = {
+ .probe = a10sr_reset_probe,
+ .driver = {
+ .name = "altr_a10sr_reset",
+ },
+};
+module_platform_driver(a10sr_reset_driver);
+
+MODULE_AUTHOR("Thor Thayer <thor.thayer@linux.intel.com>");
+MODULE_DESCRIPTION("Altera Arria10 System Resource Reset Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 6b97631f5489..2674880e5492 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -1,4 +1,7 @@
/*
+ * AR71xx Reset Controller Driver
+ * Author: Alban Bedel
+ *
* Copyright (C) 2015 Alban Bedel <albeu@free.fr>
*
* This program is free software; you can redistribute it and/or modify
@@ -13,7 +16,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reboot.h>
@@ -127,31 +130,17 @@ static int ath79_reset_probe(struct platform_device *pdev)
return 0;
}
-static int ath79_reset_remove(struct platform_device *pdev)
-{
- struct ath79_reset *ath79_reset = platform_get_drvdata(pdev);
-
- unregister_restart_handler(&ath79_reset->restart_nb);
-
- return 0;
-}
-
static const struct of_device_id ath79_reset_dt_ids[] = {
{ .compatible = "qca,ar7100-reset", },
{ },
};
-MODULE_DEVICE_TABLE(of, ath79_reset_dt_ids);
static struct platform_driver ath79_reset_driver = {
.probe = ath79_reset_probe,
- .remove = ath79_reset_remove,
.driver = {
- .name = "ath79-reset",
- .of_match_table = ath79_reset_dt_ids,
+ .name = "ath79-reset",
+ .of_match_table = ath79_reset_dt_ids,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(ath79_reset_driver);
-
-MODULE_AUTHOR("Alban Bedel <albeu@free.fr>");
-MODULE_DESCRIPTION("AR71xx Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(ath79_reset_driver);
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
new file mode 100644
index 000000000000..4db177bc89bc
--- /dev/null
+++ b/drivers/reset/reset-imx7.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2017, Impinj, Inc.
+ *
+ * i.MX7 System Reset Controller (SRC) driver
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/regmap.h>
+#include <dt-bindings/reset/imx7-reset.h>
+
+struct imx7_src {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+};
+
+enum imx7_src_registers {
+ SRC_A7RCR0 = 0x0004,
+ SRC_M4RCR = 0x000c,
+ SRC_ERCR = 0x0014,
+ SRC_HSICPHY_RCR = 0x001c,
+ SRC_USBOPHY1_RCR = 0x0020,
+ SRC_USBOPHY2_RCR = 0x0024,
+ SRC_MIPIPHY_RCR = 0x0028,
+ SRC_PCIEPHY_RCR = 0x002c,
+ SRC_DDRC_RCR = 0x1000,
+};
+
+struct imx7_src_signal {
+ unsigned int offset, bit;
+};
+
+static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
+ [IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
+ [IMX7_RESET_A7_CORE_POR_RESET1] = { SRC_A7RCR0, BIT(1) },
+ [IMX7_RESET_A7_CORE_RESET0] = { SRC_A7RCR0, BIT(4) },
+ [IMX7_RESET_A7_CORE_RESET1] = { SRC_A7RCR0, BIT(5) },
+ [IMX7_RESET_A7_DBG_RESET0] = { SRC_A7RCR0, BIT(8) },
+ [IMX7_RESET_A7_DBG_RESET1] = { SRC_A7RCR0, BIT(9) },
+ [IMX7_RESET_A7_ETM_RESET0] = { SRC_A7RCR0, BIT(12) },
+ [IMX7_RESET_A7_ETM_RESET1] = { SRC_A7RCR0, BIT(13) },
+ [IMX7_RESET_A7_SOC_DBG_RESET] = { SRC_A7RCR0, BIT(20) },
+ [IMX7_RESET_A7_L2RESET] = { SRC_A7RCR0, BIT(21) },
+ [IMX7_RESET_SW_M4C_RST] = { SRC_M4RCR, BIT(1) },
+ [IMX7_RESET_SW_M4P_RST] = { SRC_M4RCR, BIT(2) },
+ [IMX7_RESET_EIM_RST] = { SRC_ERCR, BIT(0) },
+ [IMX7_RESET_HSICPHY_PORT_RST] = { SRC_HSICPHY_RCR, BIT(1) },
+ [IMX7_RESET_USBPHY1_POR] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX7_RESET_USBPHY1_PORT_RST] = { SRC_USBOPHY1_RCR, BIT(1) },
+ [IMX7_RESET_USBPHY2_POR] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX7_RESET_USBPHY2_PORT_RST] = { SRC_USBOPHY2_RCR, BIT(1) },
+ [IMX7_RESET_MIPI_PHY_MRST] = { SRC_MIPIPHY_RCR, BIT(1) },
+ [IMX7_RESET_MIPI_PHY_SRST] = { SRC_MIPIPHY_RCR, BIT(2) },
+ [IMX7_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) | BIT(1) },
+ [IMX7_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX7_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX7_RESET_DDRC_PRST] = { SRC_DDRC_RCR, BIT(0) },
+ [IMX7_RESET_DDRC_CORE_RST] = { SRC_DDRC_RCR, BIT(1) },
+};
+
+static struct imx7_src *to_imx7_src(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct imx7_src, rcdev);
+}
+
+static int imx7_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const struct imx7_src_signal *signal = &imx7_src_signals[id];
+ unsigned int value = 0;
+
+ switch (id) {
+ case IMX7_RESET_PCIEPHY:
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX7_RESET_PCIE_CTRL_APPS_EN:
+ value = (assert) ? 0 : signal->bit;
+ break;
+ }
+
+ return regmap_update_bits(imx7src->regmap,
+ signal->offset, signal->bit, value);
+}
+
+static int imx7_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx7_reset_set(rcdev, id, true);
+}
+
+static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx7_reset_set(rcdev, id, false);
+}
+
+static const struct reset_control_ops imx7_reset_ops = {
+ .assert = imx7_reset_assert,
+ .deassert = imx7_reset_deassert,
+};
+
+static int imx7_reset_probe(struct platform_device *pdev)
+{
+ struct imx7_src *imx7src;
+ struct device *dev = &pdev->dev;
+ struct regmap_config config = { .name = "src" };
+
+ imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
+ if (!imx7src)
+ return -ENOMEM;
+
+ imx7src->regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(imx7src->regmap)) {
+ dev_err(dev, "Unable to get imx7-src regmap");
+ return PTR_ERR(imx7src->regmap);
+ }
+ regmap_attach_dev(dev, imx7src->regmap, &config);
+
+ imx7src->rcdev.owner = THIS_MODULE;
+ imx7src->rcdev.nr_resets = IMX7_RESET_NUM;
+ imx7src->rcdev.ops = &imx7_reset_ops;
+ imx7src->rcdev.of_node = dev->of_node;
+
+ return devm_reset_controller_register(dev, &imx7src->rcdev);
+}
+
+static const struct of_device_id imx7_reset_dt_ids[] = {
+ { .compatible = "fsl,imx7d-src", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver imx7_reset_driver = {
+ .probe = imx7_reset_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = imx7_reset_dt_ids,
+ },
+};
+builtin_platform_driver(imx7_reset_driver);
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index c32f11a30c5f..a8b915eb8b58 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -1,4 +1,6 @@
/*
+ * Amlogic Meson Reset Controller driver
+ *
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
@@ -53,7 +55,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -95,7 +97,6 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-gxbb-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
@@ -128,9 +129,4 @@ static struct platform_driver meson_reset_driver = {
.of_match_table = meson_reset_dt_ids,
},
};
-
-module_platform_driver(meson_reset_driver);
-
-MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
-MODULE_LICENSE("Dual BSD/GPL");
+builtin_platform_driver(meson_reset_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 0d9036dea010..cf5b9742b86e 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -18,7 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -83,7 +83,6 @@ static const struct of_device_id oxnas_reset_dt_ids[] = {
{ .compatible = "oxsemi,ox820-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
static int oxnas_reset_probe(struct platform_device *pdev)
{
@@ -123,5 +122,4 @@ static struct platform_driver oxnas_reset_driver = {
.of_match_table = oxnas_reset_dt_ids,
},
};
-
-module_platform_driver(oxnas_reset_driver);
+builtin_platform_driver(oxnas_reset_driver);
diff --git a/drivers/reset/reset-pistachio.c b/drivers/reset/reset-pistachio.c
index bbc4c06dd33b..11d651b44e81 100644
--- a/drivers/reset/reset-pistachio.c
+++ b/drivers/reset/reset-pistachio.c
@@ -10,7 +10,7 @@
* version 2, as published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -128,7 +128,6 @@ static const struct of_device_id pistachio_reset_dt_ids[] = {
{ .compatible = "img,pistachio-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids);
static struct platform_driver pistachio_reset_driver = {
.probe = pistachio_reset_probe,
@@ -137,8 +136,4 @@ static struct platform_driver pistachio_reset_driver = {
.of_match_table = pistachio_reset_dt_ids,
},
};
-module_platform_driver(pistachio_reset_driver);
-
-MODULE_AUTHOR("Damien Horsley <Damien.Horsley@imgtec.com>");
-MODULE_DESCRIPTION("Pistacho Reset Controller Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(pistachio_reset_driver);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 43e4a9f39b9b..07224c019892 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -25,7 +25,8 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#define NR_BANKS 4
+#define BANK_INCREMENT 4
+#define NR_BANKS 8
struct socfpga_reset_data {
spinlock_t lock;
@@ -46,8 +47,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + (bank * NR_BANKS));
- writel(reg | BIT(offset), data->membase + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * BANK_INCREMENT));
+ writel(reg | BIT(offset), data->membase + (bank * BANK_INCREMENT));
spin_unlock_irqrestore(&data->lock, flags);
return 0;
@@ -67,8 +68,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + (bank * NR_BANKS));
- writel(reg & ~BIT(offset), data->membase + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * BANK_INCREMENT));
+ writel(reg & ~BIT(offset), data->membase + (bank * BANK_INCREMENT));
spin_unlock_irqrestore(&data->lock, flags);
@@ -84,7 +85,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
int offset = id % BITS_PER_LONG;
u32 reg;
- reg = readl(data->membase + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * BANK_INCREMENT));
return !(reg & BIT(offset));
}
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index b44f6b5f87b6..cd585cd2f04d 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -34,15 +34,16 @@ static int sunxi_reset_assert(struct reset_controller_dev *rcdev,
struct sunxi_reset_data *data = container_of(rcdev,
struct sunxi_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + (bank * 4));
- writel(reg & ~BIT(offset), data->membase + (bank * 4));
+ reg = readl(data->membase + (bank * reg_width));
+ writel(reg & ~BIT(offset), data->membase + (bank * reg_width));
spin_unlock_irqrestore(&data->lock, flags);
@@ -55,15 +56,16 @@ static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
struct sunxi_reset_data *data = container_of(rcdev,
struct sunxi_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + (bank * 4));
- writel(reg | BIT(offset), data->membase + (bank * 4));
+ reg = readl(data->membase + (bank * reg_width));
+ writel(reg | BIT(offset), data->membase + (bank * reg_width));
spin_unlock_irqrestore(&data->lock, flags);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 9c11be3d3450..c4ba89832796 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -50,6 +50,15 @@ struct uniphier_reset_data {
}
/* System reset data */
+#define UNIPHIER_SLD3_SYS_RESET_NAND(id) \
+ UNIPHIER_RESETX((id), 0x2004, 2)
+
+#define UNIPHIER_LD11_SYS_RESET_NAND(id) \
+ UNIPHIER_RESETX((id), 0x200c, 0)
+
+#define UNIPHIER_LD11_SYS_RESET_EMMC(id) \
+ UNIPHIER_RESETX((id), 0x200c, 2)
+
#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \
UNIPHIER_RESETX((id), 0x2000, 10)
@@ -65,12 +74,14 @@ struct uniphier_reset_data {
#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \
UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17)
-const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_NAND(2),
UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_NAND(2),
UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */
UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */
UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
@@ -78,7 +89,8 @@ const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_NAND(2),
UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */
UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
@@ -86,7 +98,8 @@ const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_NAND(2),
UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */
UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
@@ -100,12 +113,16 @@ const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_NAND(2),
+ UNIPHIER_LD11_SYS_RESET_EMMC(4),
UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_NAND(2),
+ UNIPHIER_LD11_SYS_RESET_EMMC(4),
UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */
UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
@@ -134,7 +151,7 @@ const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
#define UNIPHIER_MIO_RESET_DMAC(id) \
UNIPHIER_RESETX((id), 0x110, 17)
-const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
+static const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_SD(2, 2),
@@ -154,7 +171,7 @@ const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
+static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
@@ -171,7 +188,7 @@ const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
UNIPHIER_RESETX((id), 0x114, 24 + (ch))
-const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
+static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_UART(0, 0),
UNIPHIER_PERI_RESET_UART(1, 1),
UNIPHIER_PERI_RESET_UART(2, 2),
@@ -184,7 +201,7 @@ const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
+static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_UART(0, 0),
UNIPHIER_PERI_RESET_UART(1, 1),
UNIPHIER_PERI_RESET_UART(2, 2),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 74fd9746aeca..5fb439897fe1 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -195,6 +195,8 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
goto exit_ida;
}
+ device_initialize(&rtc->dev);
+
rtc->id = id;
rtc->ops = ops;
rtc->owner = owner;
@@ -233,14 +235,19 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc_dev_prepare(rtc);
- err = device_register(&rtc->dev);
+ err = cdev_device_add(&rtc->char_dev, &rtc->dev);
if (err) {
+ dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n",
+ rtc->name, MAJOR(rtc->dev.devt), rtc->id);
+
/* This will free both memory and the ID */
put_device(&rtc->dev);
goto exit;
+ } else {
+ dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name,
+ MAJOR(rtc->dev.devt), rtc->id);
}
- rtc_dev_add_device(rtc);
rtc_proc_add_device(rtc);
dev_info(dev, "rtc core: registered %s as %s\n",
@@ -271,9 +278,8 @@ void rtc_device_unregister(struct rtc_device *rtc)
* Remove innards of this RTC, then disable it, before
* letting any rtc_class_open() users access it again
*/
- rtc_dev_del_device(rtc);
rtc_proc_del_device(rtc);
- device_del(&rtc->dev);
+ cdev_device_del(&rtc->char_dev, &rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index a098aea197fc..7a4ed2f7c7d7 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -3,8 +3,6 @@
extern void __init rtc_dev_init(void);
extern void __exit rtc_dev_exit(void);
extern void rtc_dev_prepare(struct rtc_device *rtc);
-extern void rtc_dev_add_device(struct rtc_device *rtc);
-extern void rtc_dev_del_device(struct rtc_device *rtc);
#else
@@ -20,14 +18,6 @@ static inline void rtc_dev_prepare(struct rtc_device *rtc)
{
}
-static inline void rtc_dev_add_device(struct rtc_device *rtc)
-{
-}
-
-static inline void rtc_dev_del_device(struct rtc_device *rtc)
-{
-}
-
#endif
#ifdef CONFIG_RTC_INTF_PROC
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 6dc8f29697ab..e81a8711fea7 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -477,23 +477,6 @@ void rtc_dev_prepare(struct rtc_device *rtc)
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
- rtc->char_dev.kobj.parent = &rtc->dev.kobj;
-}
-
-void rtc_dev_add_device(struct rtc_device *rtc)
-{
- if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
- dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n",
- rtc->name, MAJOR(rtc_devt), rtc->id);
- else
- dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name,
- MAJOR(rtc_devt), rtc->id);
-}
-
-void rtc_dev_del_device(struct rtc_device *rtc)
-{
- if (rtc->dev.devt)
- cdev_del(&rtc->char_dev);
}
void __init rtc_dev_init(void)
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 4a3b62326183..0acb8c2f9475 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -14,6 +14,7 @@ config BLK_DEV_XPRAM
config DCSSBLK
def_tristate m
+ select DAX
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 415d10a67b7a..36e5280af3e4 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pfn_t.h>
+#include <linux/dax.h>
#include <asm/extmem.h>
#include <asm/io.h>
@@ -30,8 +31,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
-static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void **kaddr, pfn_t *pfn, long size);
+static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -40,7 +41,10 @@ static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.open = dcssblk_open,
.release = dcssblk_release,
- .direct_access = dcssblk_direct_access,
+};
+
+static const struct dax_operations dcssblk_dax_ops = {
+ .direct_access = dcssblk_dax_direct_access,
};
struct dcssblk_dev_info {
@@ -57,6 +61,7 @@ struct dcssblk_dev_info {
struct request_queue *dcssblk_queue;
int num_of_segments;
struct list_head seg_list;
+ struct dax_device *dax_dev;
};
struct segment_info {
@@ -389,6 +394,8 @@ removeseg:
}
list_del(&dev_info->lh);
+ kill_dax(dev_info->dax_dev);
+ put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
@@ -654,6 +661,13 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
+ dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
+ &dcssblk_dax_ops);
+ if (!dev_info->dax_dev) {
+ rc = -ENOMEM;
+ goto put_dev;
+ }
+
get_device(&dev_info->dev);
device_add_disk(&dev_info->dev, dev_info->gd);
@@ -752,6 +766,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
}
list_del(&dev_info->lh);
+ kill_dax(dev_info->dax_dev);
+ put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
@@ -883,21 +899,26 @@ fail:
}
static long
-dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void **kaddr, pfn_t *pfn, long size)
+__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct dcssblk_dev_info *dev_info;
- unsigned long offset, dev_sz;
+ resource_size_t offset = pgoff * PAGE_SIZE;
+ unsigned long dev_sz;
- dev_info = bdev->bd_disk->private_data;
- if (!dev_info)
- return -ENODEV;
dev_sz = dev_info->end - dev_info->start + 1;
- offset = secnum * 512;
*kaddr = (void *) dev_info->start + offset;
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
- return dev_sz - offset;
+ return (dev_sz - offset) / PAGE_SIZE;
+}
+
+static long
+dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
+
+ return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
}
static void
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 519ec1787117..efd84d1d178b 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -40,7 +40,8 @@ struct read_info_sccb {
u8 fac85; /* 85 */
u8 _pad_86[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
- u8 _pad_92[99 - 92]; /* 92-98 */
+ u8 _pad_92[98 - 92]; /* 92-97 */
+ u8 fac98; /* 98 */
u8 hamaxpow; /* 99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
@@ -99,6 +100,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
+ sclp.has_kss = !!(sccb->fac98 & 0x01);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index c7be7bb37209..35380a58d3f0 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3009,7 +3009,7 @@ static int blogic_hostreset(struct scsi_cmnd *SCpnt)
spin_lock_irq(SCpnt->device->host->host_lock);
- blogic_inc_count(&stats->adatper_reset_req);
+ blogic_inc_count(&stats->adapter_reset_req);
rc = blogic_resetadapter(adapter, false);
spin_unlock_irq(SCpnt->device->host->host_lock);
@@ -3560,8 +3560,16 @@ Target Requested Completed Requested Completed Requested Completed\n\
struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt];
if (!tgt_flags->tgt_exists)
continue;
- seq_printf(m, "\
- %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", tgt, tgt_stats[tgt].aborts_request, tgt_stats[tgt].aborts_tried, tgt_stats[tgt].aborts_done, tgt_stats[tgt].bdr_request, tgt_stats[tgt].bdr_tried, tgt_stats[tgt].bdr_done, tgt_stats[tgt].adatper_reset_req, tgt_stats[tgt].adapter_reset_attempt, tgt_stats[tgt].adapter_reset_done);
+ seq_printf(m, " %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
+ tgt, tgt_stats[tgt].aborts_request,
+ tgt_stats[tgt].aborts_tried,
+ tgt_stats[tgt].aborts_done,
+ tgt_stats[tgt].bdr_request,
+ tgt_stats[tgt].bdr_tried,
+ tgt_stats[tgt].bdr_done,
+ tgt_stats[tgt].adapter_reset_req,
+ tgt_stats[tgt].adapter_reset_attempt,
+ tgt_stats[tgt].adapter_reset_done);
}
seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets);
seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index b53ec2f1e8cd..8d47e2c88d24 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -935,7 +935,7 @@ struct blogic_tgt_stats {
unsigned short bdr_request;
unsigned short bdr_tried;
unsigned short bdr_done;
- unsigned short adatper_reset_req;
+ unsigned short adapter_reset_req;
unsigned short adapter_reset_attempt;
unsigned short adapter_reset_done;
};
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index e3e93def722b..43d88389e899 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1678,8 +1678,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
sizeof(struct sgentry) + sizeof(struct sgentry64);
datasize = sizeof(struct aac_ciss_identify_pd);
- identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr);
-
+ identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
+ GFP_KERNEL);
if (!identify_resp)
goto fib_free_ptr;
@@ -1720,7 +1720,7 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
- pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
+ dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
aac_fib_complete(fibptr);
@@ -1814,9 +1814,8 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
datasize = sizeof(struct aac_ciss_phys_luns_resp)
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
- phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent(
- dev->pdev, datasize, &addr);
-
+ phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
+ GFP_KERNEL);
if (phys_luns == NULL) {
rcode = -ENOMEM;
goto err_out;
@@ -1861,7 +1860,7 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
aac_update_hba_map(dev, phys_luns, rescan);
}
- pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr);
+ dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
err_out:
return rcode;
}
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index f6afd50579c0..d2f8d5954840 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -100,7 +100,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
goto cleanup;
}
- kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
+ kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
+ GFP_KERNEL);
if (!kfib) {
retval = -ENOMEM;
goto cleanup;
@@ -160,7 +161,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
retval = -EFAULT;
cleanup:
if (hw_fib) {
- pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
+ dma_free_coherent(&dev->pdev->dev, size, kfib,
+ fibptr->hw_fib_pa);
fibptr->hw_fib_pa = hw_fib_pa;
fibptr->hw_fib_va = hw_fib;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 35607005f7e1..1151505853cf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -99,8 +99,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
size = fibsize + aac_init_size + commsize + commalign +
printfbufsiz + host_rrq_size;
- base = pci_alloc_consistent(dev->pdev, size, &phys);
-
+ base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL);
if (base == NULL) {
printk(KERN_ERR "aacraid: unable to create mapping.\n");
return 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 1f4918355fdb..7a1b8a2ce658 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -73,13 +73,13 @@ static int fib_map_alloc(struct aac_dev *dev)
}
dprintk((KERN_INFO
- "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
- dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
+ "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
+ &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
- dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
+ dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
- &dev->hw_fib_pa);
+ &dev->hw_fib_pa, GFP_KERNEL);
if (dev->hw_fib_va == NULL)
return -ENOMEM;
return 0;
@@ -106,8 +106,8 @@ void aac_fib_map_free(struct aac_dev *dev)
fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
alloc_size = fib_size * num_fibs + ALIGN32 - 1;
- pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va,
- dev->hw_fib_pa);
+ dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
+ dev->hw_fib_pa);
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
@@ -1571,7 +1571,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
* case.
*/
aac_fib_map_free(aac);
- pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+ dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
@@ -2319,7 +2320,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
if (!fibptr)
goto out;
- dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr);
+ dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
+ GFP_KERNEL);
if (!dma_buf)
goto fib_free_out;
@@ -2354,7 +2356,7 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
FsaNormal, 1, 1, NULL, NULL);
- pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr);
+ dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
/*
* Do not set XferState to zero unless
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 520ada8266af..372a07533026 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1592,8 +1592,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
out_unmap:
aac_fib_map_free(aac);
if (aac->comm_addr)
- pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
- aac->comm_phys);
+ dma_free_coherent(&aac->pdev->dev, aac->comm_size,
+ aac->comm_addr, aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);
@@ -1729,8 +1729,8 @@ static void aac_remove_one(struct pci_dev *pdev)
__aac_shutdown(aac);
aac_fib_map_free(aac);
- pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
- aac->comm_phys);
+ dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
kfree(aac->queues);
aac_adapter_ioremap(aac, 0);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 5d19c31e3bba..93ef7c37e568 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -355,14 +355,16 @@ static int aac_rx_check_health(struct aac_dev *dev)
if (likely((status & 0xFF000000L) == 0xBC000000L))
return (status >> 16) & 0xFF;
- buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
+ buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr,
+ GFP_KERNEL);
ret = -2;
if (unlikely(buffer == NULL))
return ret;
- post = pci_alloc_consistent(dev->pdev,
- sizeof(struct POSTSTATUS), &paddr);
+ post = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(struct POSTSTATUS), &paddr,
+ GFP_KERNEL);
if (unlikely(post == NULL)) {
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
+ dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
return ret;
}
memset(buffer, 0, 512);
@@ -371,13 +373,13 @@ static int aac_rx_check_health(struct aac_dev *dev)
rx_writel(dev, MUnit.IMRx[0], paddr);
rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
- pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
- post, paddr);
+ dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS),
+ post, paddr);
if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
ret = (hex_to_bin(buffer[2]) << 4) +
hex_to_bin(buffer[3]);
}
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
+ dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
return ret;
}
/*
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 81dd0927246b..24e57e770432 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -6291,18 +6291,17 @@ static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time)
static uchar
AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
{
- EXT_MSG sdtr_buf;
- uchar sdtr_period_index;
- PortAddr iop_base;
-
- iop_base = asc_dvc->iop_base;
- sdtr_buf.msg_type = EXTENDED_MESSAGE;
- sdtr_buf.msg_len = MS_SDTR_LEN;
- sdtr_buf.msg_req = EXTENDED_SDTR;
- sdtr_buf.xfer_period = sdtr_period;
+ PortAddr iop_base = asc_dvc->iop_base;
+ uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+ EXT_MSG sdtr_buf = {
+ .msg_type = EXTENDED_MESSAGE,
+ .msg_len = MS_SDTR_LEN,
+ .msg_req = EXTENDED_SDTR,
+ .xfer_period = sdtr_period,
+ .req_ack_offset = sdtr_offset,
+ };
sdtr_offset &= ASC_SYN_MAX_OFFSET;
- sdtr_buf.req_ack_offset = sdtr_offset;
- sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
+
if (sdtr_period_index <= asc_dvc->max_sdtr_index) {
AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
(uchar *)&sdtr_buf,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 22d5a949ec83..673e826d7adb 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -601,8 +601,8 @@ static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table);
#define STA 0x08
#define DPR 0x01
-static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device,
- uint16_t subvendor, uint16_t subdevice);
+static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
+ uint16_t subdevice, uint16_t subvendor);
static int ahc_ext_scbram_present(struct ahc_softc *ahc);
static void ahc_scbram_config(struct ahc_softc *ahc, int enable,
int pcheck, int fast, int large);
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 662b2321d1b0..a14ba7a6b81e 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -703,7 +703,6 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
{
int err;
- scsi_remove_host(asd_ha->sas_ha.core.shost);
err = sas_unregister_ha(&asd_ha->sas_ha);
sas_remove_host(asd_ha->sas_ha.core.shost);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index ca9440fb2325..55e3f8b40eb3 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,18 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#ifndef BEISCSI_H
@@ -154,7 +151,6 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 12s timeout */
-#define BEISCSI_LOGOUT_SYNC_DELAY 250
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 5d59e2630ce6..a79a5e72c777 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,18 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#include <scsi/iscsi_proto.h>
@@ -246,6 +243,12 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
{
int rc = 0;
+ if (!tag || tag > MAX_MCC_CMD) {
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : invalid tag %u\n", tag);
+ return -EINVAL;
+ }
+
if (beiscsi_hba_in_error(phba)) {
clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 1d40e83b0790..d9b6773facdb 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,18 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#ifndef BEISCSI_CMDS_H
@@ -1145,24 +1142,49 @@ struct tcp_connect_and_offload_out {
#define DB_DEF_PDU_EVENT_SHIFT 15
#define DB_DEF_PDU_CQPROC_SHIFT 16
-struct dmsg_cqe {
- u32 dw[4];
+struct be_invalidate_connection_params_in {
+ struct be_cmd_req_hdr hdr;
+ u32 session_handle;
+ u16 cid;
+ u16 unused;
+#define BE_CLEANUP_TYPE_INVALIDATE 0x8001
+#define BE_CLEANUP_TYPE_ISSUE_TCP_RST 0x8002
+ u16 cleanup_type;
+ u16 save_cfg;
+} __packed;
+
+struct be_invalidate_connection_params_out {
+ u32 session_handle;
+ u16 cid;
+ u16 unused;
+} __packed;
+
+union be_invalidate_connection_params {
+ struct be_invalidate_connection_params_in req;
+ struct be_invalidate_connection_params_out resp;
} __packed;
-struct tcp_upload_params_in {
+struct be_tcp_upload_params_in {
struct be_cmd_req_hdr hdr;
u16 id;
+#define BE_UPLOAD_TYPE_GRACEFUL 1
+/* abortive upload with reset */
+#define BE_UPLOAD_TYPE_ABORT_RESET 2
+/* abortive upload without reset */
+#define BE_UPLOAD_TYPE_ABORT 3
+/* abortive upload with reset, sequence number by driver */
+#define BE_UPLOAD_TYPE_ABORT_WITH_SEQ 4
u16 upload_type;
u32 reset_seq;
} __packed;
-struct tcp_upload_params_out {
+struct be_tcp_upload_params_out {
u32 dw[32];
} __packed;
-union tcp_upload_params {
- struct tcp_upload_params_in request;
- struct tcp_upload_params_out response;
+union be_tcp_upload_params {
+ struct be_tcp_upload_params_in request;
+ struct be_tcp_upload_params_out response;
} __packed;
struct be_ulp_fw_cfg {
@@ -1243,10 +1265,7 @@ struct be_cmd_get_port_name {
#define OPCODE_COMMON_WRITE_FLASH 96
#define OPCODE_COMMON_READ_FLASH 97
-/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */
#define CMD_ISCSI_COMMAND_INVALIDATE 1
-#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001
-#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002
#define INI_WR_CMD 1 /* Initiator write command */
#define INI_TMF_CMD 2 /* Initiator TMF command */
@@ -1269,27 +1288,6 @@ struct be_cmd_get_port_name {
* preparedby
* driver should not be touched
*/
-/* --- CMD_CHUTE_TYPE --- */
-#define CMD_CONNECTION_CHUTE_0 1
-#define CMD_CONNECTION_CHUTE_1 2
-#define CMD_CONNECTION_CHUTE_2 3
-
-#define EQ_MAJOR_CODE_COMPLETION 0
-
-#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0
-#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1
-
-/* --- CONNECTION_UPLOAD_PARAMS --- */
-/* These parameters are used to define the type of upload desired. */
-#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */
-#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with
- * reset
- */
-#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without
- * reset
- */
-#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
- * sequence number by driver */
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index a4844578e357..97dca4681784 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#include <scsi/libiscsi.h>
@@ -1263,31 +1258,58 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
}
/**
- * beiscsi_close_conn - Upload the connection
+ * beiscsi_conn_close - Invalidate and upload connection
* @ep: The iscsi endpoint
- * @flag: The type of connection closure
+ *
+ * Returns 0 on success, -1 on failure.
*/
-static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
+static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep)
{
- int ret = 0;
- unsigned int tag;
struct beiscsi_hba *phba = beiscsi_ep->phba;
+ unsigned int tag, attempts;
+ int ret;
- tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
- if (!tag) {
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : upload failed for cid 0x%x\n",
- beiscsi_ep->ep_cid);
-
- ret = -EAGAIN;
+ /**
+ * Without successfully invalidating and uploading connection
+ * driver can't reuse the CID so attempt more than once.
+ */
+ attempts = 0;
+ while (attempts++ < 3) {
+ tag = beiscsi_invalidate_cxn(phba, beiscsi_ep);
+ if (tag) {
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+ if (!ret)
+ break;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : invalidate conn failed cid %d\n",
+ beiscsi_ep->ep_cid);
+ }
}
- ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
-
- /* Flush the CQ entries */
+ /* wait for all completions to arrive, then process them */
+ msleep(250);
+ /* flush CQ entries */
beiscsi_flush_cq(phba);
- return ret;
+ if (attempts > 3)
+ return -1;
+
+ attempts = 0;
+ while (attempts++ < 3) {
+ tag = beiscsi_upload_cxn(phba, beiscsi_ep);
+ if (tag) {
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+ if (!ret)
+ break;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : upload conn failed cid %d\n",
+ beiscsi_ep->ep_cid);
+ }
+ }
+ if (attempts > 3)
+ return -1;
+
+ return 0;
}
/**
@@ -1298,12 +1320,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
*/
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
{
- struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
+ struct beiscsi_conn *beiscsi_conn;
struct beiscsi_hba *phba;
- unsigned int tag;
- uint8_t mgmt_invalidate_flag, tcp_upload_flag;
- unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
uint16_t cri_index;
beiscsi_ep = ep->dd_data;
@@ -1324,39 +1343,27 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
- mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
- tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
- } else {
- mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
- tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
}
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
- goto free_ep;
- }
-
- tag = mgmt_invalidate_connection(phba, beiscsi_ep,
- beiscsi_ep->ep_cid,
- mgmt_invalidate_flag,
- savecfg_flag);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
- beiscsi_ep->ep_cid);
+ } else {
+ /**
+ * Make CID available even if close fails.
+ * If not freed, FW might fail open using the CID.
+ */
+ if (beiscsi_conn_close(beiscsi_ep) < 0)
+ __beiscsi_log(phba, KERN_ERR,
+ "BS_%d : close conn failed cid %d\n",
+ beiscsi_ep->ep_cid);
}
- beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
- beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
-free_ep:
- msleep(BEISCSI_LOGOUT_SYNC_DELAY);
beiscsi_free_ep(beiscsi_ep);
if (!phba->conn_table[cri_index])
__beiscsi_log(phba, KERN_ERR,
- "BS_%d : conn_table empty at %u: cid %u\n",
- cri_index,
- beiscsi_ep->ep_cid);
+ "BS_%d : conn_table empty at %u: cid %u\n",
+ cri_index, beiscsi_ep->ep_cid);
phba->conn_table[cri_index] = NULL;
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index e4d67dfea4cb..b9d459a21f25 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Avago Technologies
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#ifndef _BE_ISCSI_
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 32b2713cec93..f862332261f8 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#include <linux/reboot.h>
@@ -337,7 +332,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
inv_tbl->task[nents] = task;
nents++;
}
- spin_unlock_bh(&session->back_lock);
+ spin_unlock(&session->back_lock);
spin_unlock_bh(&session->frwd_lock);
rc = SUCCESS;
@@ -636,7 +631,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
(total_cid_count +
BE2_TMFS + BE2_NOPOUT_REQ));
phba->params.cxns_per_ctrl = total_cid_count;
- phba->params.asyncpdus_per_ctrl = total_cid_count;
phba->params.icds_per_ctrl = total_icd_count;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -802,12 +796,12 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
struct pci_dev *pcidev = phba->pcidev;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- int ret, msix_vec, i, j;
+ int ret, i, j;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled) {
+ if (pcidev->msix_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
GFP_KERNEL);
@@ -818,9 +812,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
phba->shost->host_no, i);
- msix_vec = phba->msix_entries[i].vector;
- ret = request_irq(msix_vec, be_isr_msix, 0,
- phba->msi_name[i],
+ ret = request_irq(pci_irq_vector(pcidev, i),
+ be_isr_msix, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -838,9 +831,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
}
sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
phba->shost->host_no);
- msix_vec = phba->msix_entries[i].vector;
- ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
- &phwi_context->be_eq[i]);
+ ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
+ phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
"BM_%d : beiscsi_init_irqs-"
@@ -862,9 +854,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
return 0;
free_msix_irqs:
for (j = i - 1; j >= 0; j--) {
+ free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]);
kfree(phba->msi_name[j]);
- msix_vec = phba->msix_entries[j].vector;
- free_irq(msix_vec, &phwi_context->be_eq[j]);
}
return ret;
}
@@ -1454,30 +1445,45 @@ static inline void
beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
struct hd_async_handle *pasync_handle)
{
- if (pasync_handle->is_header) {
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_header.free_list);
- pasync_ctx->async_header.free_entries++;
- } else {
- list_add_tail(&pasync_handle->link,
- &pasync_ctx->async_data.free_list);
- pasync_ctx->async_data.free_entries++;
- }
+ pasync_handle->is_final = 0;
+ pasync_handle->buffer_len = 0;
+ pasync_handle->in_use = 0;
+ list_del_init(&pasync_handle->link);
+}
+
+static void
+beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
+ struct hd_async_context *pasync_ctx,
+ u16 cri)
+{
+ struct hd_async_handle *pasync_handle, *tmp_handle;
+ struct list_head *plist;
+
+ plist = &pasync_ctx->async_entry[cri].wq.list;
+ list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link)
+ beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+
+ INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
+ pasync_ctx->async_entry[cri].wq.hdr_len = 0;
+ pasync_ctx->async_entry[cri].wq.bytes_received = 0;
+ pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
}
static struct hd_async_handle *
beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
struct hd_async_context *pasync_ctx,
- struct i_t_dpdu_cqe *pdpdu_cqe)
+ struct i_t_dpdu_cqe *pdpdu_cqe,
+ u8 *header)
{
struct beiscsi_hba *phba = beiscsi_conn->phba;
struct hd_async_handle *pasync_handle;
struct be_bus_address phys_addr;
+ u16 cid, code, ci, cri;
u8 final, error = 0;
- u16 cid, code, ci;
u32 dpl;
cid = beiscsi_conn->beiscsi_conn_cid;
+ cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
/**
* This function is invoked to get the right async_handle structure
* from a given DEF PDU CQ entry.
@@ -1516,6 +1522,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
switch (code) {
case UNSOL_HDR_NOTIFY:
pasync_handle = pasync_ctx->async_entry[ci].header;
+ *header = 1;
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
error = 1;
@@ -1524,15 +1531,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
break;
/* called only for above codes */
default:
- pasync_handle = NULL;
- break;
- }
-
- if (!pasync_handle) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
- "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
- cid, code, ci, phys_addr.u.a64.address);
- return pasync_handle;
+ return NULL;
}
if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
@@ -1549,47 +1548,32 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
}
/**
- * Each CID is associated with unique CRI.
- * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
- **/
- pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
- pasync_handle->is_final = final;
- pasync_handle->buffer_len = dpl;
- /* empty the slot */
- if (pasync_handle->is_header)
- pasync_ctx->async_entry[ci].header = NULL;
- else
- pasync_ctx->async_entry[ci].data = NULL;
-
- /**
* DEF PDU header and data buffers with errors should be simply
* dropped as there are no consumers for it.
*/
if (error) {
beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
- pasync_handle = NULL;
+ return NULL;
}
- return pasync_handle;
-}
-
-static void
-beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
- struct hd_async_context *pasync_ctx,
- u16 cri)
-{
- struct hd_async_handle *pasync_handle, *tmp_handle;
- struct list_head *plist;
- plist = &pasync_ctx->async_entry[cri].wq.list;
- list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
- list_del(&pasync_handle->link);
- beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+ if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+ "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n",
+ cid, code, ci, phys_addr.u.a64.address);
+ beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
}
- INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
- pasync_ctx->async_entry[cri].wq.hdr_len = 0;
- pasync_ctx->async_entry[cri].wq.bytes_received = 0;
- pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
+ list_del_init(&pasync_handle->link);
+ /**
+ * Each CID is associated with unique CRI.
+ * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
+ **/
+ pasync_handle->cri = cri;
+ pasync_handle->is_final = final;
+ pasync_handle->buffer_len = dpl;
+ pasync_handle->in_use = 1;
+
+ return pasync_handle;
}
static unsigned int
@@ -1619,6 +1603,10 @@ beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
dlen = pasync_handle->buffer_len;
continue;
}
+ if (!pasync_handle->buffer_len ||
+ (dlen + pasync_handle->buffer_len) >
+ pasync_ctx->async_data.buffer_size)
+ break;
memcpy(pdata + dlen, pasync_handle->pbuffer,
pasync_handle->buffer_len);
dlen += pasync_handle->buffer_len;
@@ -1627,8 +1615,9 @@ beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
if (!plast_handle->is_final) {
/* last handle should have final PDU notification from FW */
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
- "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
+ "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n",
beiscsi_conn->beiscsi_conn_cid, plast_handle,
+ AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr),
pasync_ctx->async_entry[cri].wq.hdr_len,
pasync_ctx->async_entry[cri].wq.bytes_needed,
pasync_ctx->async_entry[cri].wq.bytes_received);
@@ -1709,85 +1698,53 @@ drop_pdu:
static void
beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
- u8 header, u8 ulp_num)
+ u8 header, u8 ulp_num, u16 nbuf)
{
- struct hd_async_handle *pasync_handle, *tmp, **slot;
+ struct hd_async_handle *pasync_handle;
struct hd_async_context *pasync_ctx;
struct hwi_controller *phwi_ctrlr;
- struct list_head *hfree_list;
struct phys_addr *pasync_sge;
u32 ring_id, doorbell = 0;
u32 doorbell_offset;
- u16 prod = 0, cons;
- u16 index;
+ u16 prod, pi;
phwi_ctrlr = phba->phwi_ctrlr;
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
if (header) {
- cons = pasync_ctx->async_header.free_entries;
- hfree_list = &pasync_ctx->async_header.free_list;
+ pasync_sge = pasync_ctx->async_header.ring_base;
+ pi = pasync_ctx->async_header.pi;
ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
doorbell_offset;
} else {
- cons = pasync_ctx->async_data.free_entries;
- hfree_list = &pasync_ctx->async_data.free_list;
+ pasync_sge = pasync_ctx->async_data.ring_base;
+ pi = pasync_ctx->async_data.pi;
ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
doorbell_offset;
}
- /* number of entries posted must be in multiples of 8 */
- if (cons % 8)
- return;
-
- list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
- list_del_init(&pasync_handle->link);
- pasync_handle->is_final = 0;
- pasync_handle->buffer_len = 0;
- /* handles can be consumed out of order, use index in handle */
- index = pasync_handle->index;
- WARN_ON(pasync_handle->is_header != header);
+ for (prod = 0; prod < nbuf; prod++) {
if (header)
- slot = &pasync_ctx->async_entry[index].header;
+ pasync_handle = pasync_ctx->async_entry[pi].header;
else
- slot = &pasync_ctx->async_entry[index].data;
- /**
- * The slot just tracks handle's hold and release, so
- * overwriting at the same index won't do any harm but
- * needs to be caught.
- */
- if (*slot != NULL) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
- "BM_%d : async PDU %s slot at %u not empty\n",
- header ? "header" : "data", index);
+ pasync_handle = pasync_ctx->async_entry[pi].data;
+ WARN_ON(pasync_handle->is_header != header);
+ WARN_ON(pasync_handle->index != pi);
+ /* setup the ring only once */
+ if (nbuf == pasync_ctx->num_entries) {
+ /* note hi is lo */
+ pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo;
+ pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi;
}
- /**
- * We use same freed index as in completion to post so this
- * operation is not required for refills. Its required only
- * for ring creation.
- */
- if (header)
- pasync_sge = pasync_ctx->async_header.ring_base;
- else
- pasync_sge = pasync_ctx->async_data.ring_base;
- pasync_sge += index;
- /* if its a refill then address is same; hi is lo */
- WARN_ON(pasync_sge->hi &&
- pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
- WARN_ON(pasync_sge->lo &&
- pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
- pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
- pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
-
- *slot = pasync_handle;
- if (++prod == cons)
- break;
+ if (++pi == pasync_ctx->num_entries)
+ pi = 0;
}
+
if (header)
- pasync_ctx->async_header.free_entries -= prod;
+ pasync_ctx->async_header.pi = pi;
else
- pasync_ctx->async_data.free_entries -= prod;
+ pasync_ctx->async_data.pi = pi;
doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
@@ -1804,20 +1761,26 @@ beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
struct hd_async_handle *pasync_handle = NULL;
struct hd_async_context *pasync_ctx;
struct hwi_controller *phwi_ctrlr;
+ u8 ulp_num, consumed, header = 0;
u16 cid_cri;
- u8 ulp_num;
phwi_ctrlr = phba->phwi_ctrlr;
cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
- pdpdu_cqe);
- if (!pasync_handle)
- return;
-
- beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
- beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
+ pdpdu_cqe, &header);
+ if (is_chip_be2_be3r(phba))
+ consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ num_cons, pdpdu_cqe);
+ else
+ consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ num_cons, pdpdu_cqe);
+ if (pasync_handle)
+ beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
+ /* num_cons indicates number of 8 RQEs consumed */
+ if (consumed)
+ beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed);
}
void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
@@ -2407,22 +2370,22 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
num_async_pdu_buf_sgl_pages =
- PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
phba, ulp_num) *
sizeof(struct phys_addr));
num_async_pdu_buf_pages =
- PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
phba, ulp_num) *
phba->params.defpdu_hdr_sz);
num_async_pdu_data_pages =
- PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
phba, ulp_num) *
phba->params.defpdu_data_sz);
num_async_pdu_data_sgl_pages =
- PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+ PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE(
phba, ulp_num) *
sizeof(struct phys_addr));
@@ -2459,21 +2422,21 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
- BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct hd_async_handle);
+ BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
+ sizeof(struct hd_async_handle);
mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
- BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct hd_async_handle);
+ BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
+ sizeof(struct hd_async_handle);
mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
(ulp_num * MEM_DESCR_OFFSET));
phba->mem_req[mem_descr_index] =
- sizeof(struct hd_async_context) +
- (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct hd_async_entry));
+ sizeof(struct hd_async_context) +
+ (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) *
+ sizeof(struct hd_async_entry));
}
}
}
@@ -2757,7 +2720,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
((long unsigned int)pasync_ctx +
sizeof(struct hd_async_context));
- pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+ pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba,
ulp_num);
/* setup header buffers */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
@@ -2776,6 +2739,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
"BM_%d : No Virtual address for ULP : %d\n",
ulp_num);
+ pasync_ctx->async_header.pi = 0;
pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
pasync_ctx->async_header.va_base =
mem_descr->mem_array[0].virtual_address;
@@ -2823,7 +2787,6 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.handle_base =
mem_descr->mem_array[0].virtual_address;
- INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
/* setup data buffer sgls */
mem_descr = (struct be_mem_descriptor *)phba->init_mem;
@@ -2857,7 +2820,6 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_data.handle_base =
mem_descr->mem_array[0].virtual_address;
- INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
pasync_header_h =
(struct hd_async_handle *)
@@ -2884,6 +2846,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
ulp_num);
idx = 0;
+ pasync_ctx->async_data.pi = 0;
pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
pasync_ctx->async_data.va_base =
mem_descr->mem_array[idx].virtual_address;
@@ -2895,7 +2858,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
phba->params.defpdu_data_sz);
num_per_mem = 0;
- for (index = 0; index < BEISCSI_GET_CID_COUNT
+ for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE
(phba, ulp_num); index++) {
pasync_header_h->cri = -1;
pasync_header_h->is_header = 1;
@@ -2911,14 +2874,11 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.pa_base.u.a64.
address + (p->defpdu_hdr_sz * index);
- list_add_tail(&pasync_header_h->link,
- &pasync_ctx->async_header.
- free_list);
+ pasync_ctx->async_entry[index].header =
+ pasync_header_h;
pasync_header_h++;
- pasync_ctx->async_header.free_entries++;
INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
wq.list);
- pasync_ctx->async_entry[index].header = NULL;
pasync_data_h->cri = -1;
pasync_data_h->is_header = 0;
@@ -2952,12 +2912,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
num_per_mem++;
num_async_data--;
- list_add_tail(&pasync_data_h->link,
- &pasync_ctx->async_data.
- free_list);
+ pasync_ctx->async_entry[index].data =
+ pasync_data_h;
pasync_data_h++;
- pasync_ctx->async_data.free_entries++;
- pasync_ctx->async_entry[index].data = NULL;
}
}
}
@@ -3040,7 +2997,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
sizeof(struct be_eq_entry));
- if (phba->msix_enabled)
+ if (phba->pcidev->msix_enabled)
eq_for_mcc = 1;
else
eq_for_mcc = 0;
@@ -3550,7 +3507,7 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba,
sizeof(struct be_mcc_compl)))
goto err;
/* Ask BE to create MCC compl queue; */
- if (phba->msix_enabled) {
+ if (phba->pcidev->msix_enabled) {
if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
[phba->num_cpus].q, false, true, 0))
goto mcc_cq_free;
@@ -3581,42 +3538,35 @@ err:
return -ENOMEM;
}
-/**
- * find_num_cpus()- Get the CPU online count
- * @phba: ptr to priv structure
- *
- * CPU count is used for creating EQ.
- **/
-static void find_num_cpus(struct beiscsi_hba *phba)
+static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
{
- int num_cpus = 0;
-
- num_cpus = num_online_cpus();
+ int nvec = 1;
switch (phba->generation) {
case BE_GEN2:
case BE_GEN3:
- phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
- BEISCSI_MAX_NUM_CPUS : num_cpus;
+ nvec = BEISCSI_MAX_NUM_CPUS + 1;
break;
case BE_GEN4:
- /*
- * If eqid_count == 1 fall back to
- * INTX mechanism
- **/
- if (phba->fw_config.eqid_count == 1) {
- enable_msix = 0;
- phba->num_cpus = 1;
- return;
- }
-
- phba->num_cpus =
- (num_cpus > (phba->fw_config.eqid_count - 1)) ?
- (phba->fw_config.eqid_count - 1) : num_cpus;
+ nvec = phba->fw_config.eqid_count;
break;
default:
- phba->num_cpus = 1;
+ nvec = 2;
+ break;
}
+
+ /* if eqid_count == 1 fall back to INTX */
+ if (enable_msix && nvec > 1) {
+ const struct irq_affinity desc = { .post_vectors = 1 };
+
+ if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
+ phba->num_cpus = nvec - 1;
+ return;
+ }
+ }
+
+ phba->num_cpus = 1;
}
static void hwi_purge_eq(struct beiscsi_hba *phba)
@@ -3633,7 +3583,7 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled)
+ if (phba->pcidev->msix_enabled)
eq_msix = 1;
else
eq_msix = 0;
@@ -3711,7 +3661,7 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
}
be_mcc_queues_destroy(phba);
- if (phba->msix_enabled)
+ if (phba->pcidev->msix_enabled)
eq_for_mcc = 1;
else
eq_for_mcc = 0;
@@ -3735,6 +3685,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
unsigned int def_pdu_ring_sz;
struct be_ctrl_info *ctrl = &phba->ctrl;
int status, ulp_num;
+ u16 nbufs;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3771,9 +3722,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
- def_pdu_ring_sz =
- BEISCSI_GET_CID_COUNT(phba, ulp_num) *
- sizeof(struct phys_addr);
+ nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries;
+ def_pdu_ring_sz = nbufs * sizeof(struct phys_addr);
status = beiscsi_create_def_hdr(phba, phwi_context,
phwi_ctrlr,
@@ -3801,9 +3751,9 @@ static int hwi_init_port(struct beiscsi_hba *phba)
* let EP know about it.
*/
beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
- ulp_num);
+ ulp_num, nbufs);
beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
- ulp_num);
+ ulp_num, nbufs);
}
}
@@ -4157,7 +4107,7 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
iowrite32(reg, addr);
}
- if (!phba->msix_enabled) {
+ if (!phba->pcidev->msix_enabled) {
eq = &phwi_context->be_eq[0].q;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : eq->id=%d\n", eq->id);
@@ -5280,19 +5230,6 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
}
-static void beiscsi_msix_enable(struct beiscsi_hba *phba)
-{
- int i, status;
-
- for (i = 0; i <= phba->num_cpus; i++)
- phba->msix_entries[i].entry = i;
-
- status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
- phba->num_cpus + 1, phba->num_cpus + 1);
- if (status > 0)
- phba->msix_enabled = true;
-}
-
static void beiscsi_hw_tpe_check(unsigned long ptr)
{
struct beiscsi_hba *phba;
@@ -5360,15 +5297,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
if (ret)
return ret;
- if (enable_msix)
- find_num_cpus(phba);
- else
- phba->num_cpus = 1;
- if (enable_msix) {
- beiscsi_msix_enable(phba);
- if (!phba->msix_enabled)
- phba->num_cpus = 1;
- }
+ be2iscsi_enable_msix(phba);
beiscsi_get_params(phba);
/* Re-enable UER. If different TPE occurs then it is recoverable. */
@@ -5397,7 +5326,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
}
- i = (phba->msix_enabled) ? i : 0;
+ i = (phba->pcidev->msix_enabled) ? i : 0;
/* Work item for MCC handling */
pbe_eq = &phwi_context->be_eq[i];
INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
@@ -5435,9 +5364,7 @@ cleanup_port:
hwi_cleanup_port(phba);
disable_msix:
- if (phba->msix_enabled)
- pci_disable_msix(phba->pcidev);
-
+ pci_free_irq_vectors(phba->pcidev);
return ret;
}
@@ -5454,7 +5381,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
struct hwi_context_memory *phwi_context;
struct hwi_controller *phwi_ctrlr;
struct be_eq_obj *pbe_eq;
- unsigned int i, msix_vec;
+ unsigned int i;
if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
return;
@@ -5462,16 +5389,16 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
hwi_disable_intr(phba);
- if (phba->msix_enabled) {
+ if (phba->pcidev->msix_enabled) {
for (i = 0; i <= phba->num_cpus; i++) {
- msix_vec = phba->msix_entries[i].vector;
- free_irq(msix_vec, &phwi_context->be_eq[i]);
+ free_irq(pci_irq_vector(phba->pcidev, i),
+ &phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
} else
if (phba->pcidev->irq)
free_irq(phba->pcidev->irq, phba);
- pci_disable_msix(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
@@ -5681,21 +5608,12 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
beiscsi_get_params(phba);
beiscsi_set_uer_feature(phba);
- if (enable_msix)
- find_num_cpus(phba);
- else
- phba->num_cpus = 1;
+ be2iscsi_enable_msix(phba);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : num_cpus = %d\n",
phba->num_cpus);
- if (enable_msix) {
- beiscsi_msix_enable(phba);
- if (!phba->msix_enabled)
- phba->num_cpus = 1;
- }
-
phba->shost->max_id = phba->params.cxns_per_ctrl;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_get_memory(phba);
@@ -5745,7 +5663,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
}
- i = (phba->msix_enabled) ? i : 0;
+ i = (phba->pcidev->msix_enabled) ? i : 0;
/* Work item for MCC handling */
pbe_eq = &phwi_context->be_eq[i];
INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
@@ -5816,8 +5734,7 @@ free_port:
phba->ctrl.mbox_mem_alloced.dma);
beiscsi_unmap_pci_function(phba);
hba_free:
- if (phba->msix_enabled)
- pci_disable_msix(phba->pcidev);
+ pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
pci_set_drvdata(pcidev, NULL);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 218857926566..338dbe0800c1 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#ifndef _BEISCSI_MAIN_
@@ -36,7 +31,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "11.2.1.0"
+#define BUILD_STR "11.4.0.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -235,7 +230,6 @@ struct sgl_handle {
struct hba_parameters {
unsigned int ios_per_ctrl;
unsigned int cxns_per_ctrl;
- unsigned int asyncpdus_per_ctrl;
unsigned int icds_per_ctrl;
unsigned int num_sge_per_io;
unsigned int defpdu_hdr_sz;
@@ -323,9 +317,7 @@ struct beiscsi_hba {
struct pci_dev *pcidev;
unsigned int num_cpus;
unsigned int nxt_cqid;
- struct msix_entry msix_entries[MAX_CPUS];
char *msi_name[MAX_CPUS];
- bool msix_enabled;
struct be_mem_descriptor *init_mem;
unsigned short io_sgl_alloc_index;
@@ -597,8 +589,12 @@ struct hd_async_handle {
u16 cri;
u8 is_header;
u8 is_final;
+ u8 in_use;
};
+#define BEISCSI_ASYNC_HDQ_SIZE(phba, ulp) \
+ (BEISCSI_GET_CID_COUNT((phba), (ulp)) * 2)
+
/**
* This has list of async PDUs that are waiting to be processed.
* Buffers live in this list for a brief duration before they get
@@ -624,14 +620,8 @@ struct hd_async_buf_context {
void *va_base;
void *ring_base;
struct hd_async_handle *handle_base;
- u16 free_entries;
u32 buffer_size;
- /**
- * Once iSCSI layer finishes processing an async PDU, the
- * handles used for the PDU are added to this list.
- * They are posted back to FW in groups of 8.
- */
- struct list_head free_list;
+ u16 pi;
};
/**
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 2f6d5c2ac329..c73775368d09 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#include <linux/bsg-lib.h>
@@ -126,67 +121,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag;
}
-unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
- struct beiscsi_endpoint *beiscsi_ep,
- unsigned short cid,
- unsigned short issue_reset,
- unsigned short savecfg_flag)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct iscsi_invalidate_connection_params_in *req;
- unsigned int tag = 0;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
- sizeof(*req));
- req->session_handle = beiscsi_ep->fw_handle;
- req->cid = cid;
- if (issue_reset)
- req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST;
- else
- req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
- req->save_cfg = savecfg_flag;
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
- unsigned short cid, unsigned int upload_flag)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct tcp_upload_params_in *req;
- unsigned int tag;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
- OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
- req->id = (unsigned short)cid;
- req->upload_type = (unsigned char)upload_flag;
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
/**
* mgmt_open_connection()- Establish a TCP CXN
* @dst_addr: Destination Address
@@ -1449,6 +1383,72 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
exp_statsn) / 32] + 1));
}
+unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep)
+{
+ struct be_invalidate_connection_params_in *req;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag = 0;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(union be_invalidate_connection_params),
+ true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
+ sizeof(*req));
+ req->session_handle = beiscsi_ep->fw_handle;
+ req->cid = beiscsi_ep->ep_cid;
+ if (beiscsi_ep->conn)
+ req->cleanup_type = BE_CLEANUP_TYPE_INVALIDATE;
+ else
+ req->cleanup_type = BE_CLEANUP_TYPE_ISSUE_TCP_RST;
+ /**
+ * 0 - non-persistent targets
+ * 1 - save session info on flash
+ */
+ req->save_cfg = 0;
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
+unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_mcc_wrb *wrb;
+ struct be_tcp_upload_params_in *req;
+ unsigned int tag;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_hdr_prepare(wrb, sizeof(union be_tcp_upload_params), true, 0);
+ be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
+ OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
+ req->id = beiscsi_ep->ep_cid;
+ if (beiscsi_ep->conn)
+ req->upload_type = BE_UPLOAD_TYPE_GRACEFUL;
+ else
+ req->upload_type = BE_UPLOAD_TYPE_ABORT;
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+ return tag;
+}
+
int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invldt_cmd_tbl *inv_tbl,
unsigned int nents)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 308f1472f98a..06ddc5ad6874 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,20 +1,15 @@
-/**
- * Copyright (C) 2005 - 2016 Broadcom
- * All rights reserved.
+/*
+ * Copyright 2017 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation. The full GNU General
+ * as published by the Free Software Foundation. The full GNU General
* Public License is included in this distribution in the file called COPYING.
*
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
- *
* Contact Information:
* linux-drivers@broadcom.com
*
- * Emulex
- * 3333 Susan Street
- * Costa Mesa, CA 92626
*/
#ifndef _BEISCSI_MGMT_
@@ -41,35 +36,11 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
struct be_dma_mem *nonemb_cmd);
-unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
- unsigned short cid,
- unsigned int upload_flag);
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
struct bsg_job *job,
struct be_dma_mem *nonemb_cmd);
-#define BEISCSI_NO_RST_ISSUE 0
-struct iscsi_invalidate_connection_params_in {
- struct be_cmd_req_hdr hdr;
- unsigned int session_handle;
- unsigned short cid;
- unsigned short unused;
- unsigned short cleanup_type;
- unsigned short save_cfg;
-} __packed;
-
-struct iscsi_invalidate_connection_params_out {
- unsigned int session_handle;
- unsigned short cid;
- unsigned short unused;
-} __packed;
-
-union iscsi_invalidate_connection_params {
- struct iscsi_invalidate_connection_params_in request;
- struct iscsi_invalidate_connection_params_out response;
-} __packed;
-
#define BE_INVLDT_CMD_TBL_SZ 128
struct invldt_cmd_tbl {
unsigned short icd;
@@ -265,6 +236,12 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle,
struct hwi_wrb_context *pwrb_context);
+unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep);
+
+unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba,
+ struct beiscsi_endpoint *beiscsi_ep);
+
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
struct be_set_eqd *, int num);
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 7209afad82f7..3e1caec82554 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -23,22 +23,6 @@
BFA_TRC_FILE(HAL, CORE);
/*
- * BFA module list terminated by NULL
- */
-static struct bfa_module_s *hal_mods[] = {
- &hal_mod_fcdiag,
- &hal_mod_sgpg,
- &hal_mod_fcport,
- &hal_mod_fcxp,
- &hal_mod_lps,
- &hal_mod_uf,
- &hal_mod_rport,
- &hal_mod_fcp,
- &hal_mod_dconf,
- NULL
-};
-
-/*
* Message handlers for various modules.
*/
static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
@@ -1191,8 +1175,13 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
for (i = 0; i < BFI_IOC_MAX_CQS; i++)
bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->start(bfa);
+ bfa_fcport_start(bfa);
+ bfa_uf_start(bfa);
+ /*
+ * bfa_init() with flash read is complete. now invalidate the stale
+ * content of lun mask like unit attention, rp tag and lp tag.
+ */
+ bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa);
bfa->iocfc.submod_enabled = BFA_TRUE;
}
@@ -1203,13 +1192,16 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
static void
bfa_iocfc_disable_submod(struct bfa_s *bfa)
{
- int i;
-
if (bfa->iocfc.submod_enabled == BFA_FALSE)
return;
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->iocdisable(bfa);
+ bfa_fcdiag_iocdisable(bfa);
+ bfa_fcport_iocdisable(bfa);
+ bfa_fcxp_iocdisable(bfa);
+ bfa_lps_iocdisable(bfa);
+ bfa_rport_iocdisable(bfa);
+ bfa_fcp_iocdisable(bfa);
+ bfa_dconf_iocdisable(bfa);
bfa->iocfc.submod_enabled = BFA_FALSE;
}
@@ -1773,7 +1765,6 @@ void
bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
- int i;
struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
@@ -1792,9 +1783,14 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
INIT_LIST_HEAD(&meminfo->kva_info.qe);
bfa_iocfc_meminfo(cfg, meminfo, bfa);
-
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->meminfo(cfg, meminfo, bfa);
+ bfa_sgpg_meminfo(cfg, meminfo, bfa);
+ bfa_fcport_meminfo(cfg, meminfo, bfa);
+ bfa_fcxp_meminfo(cfg, meminfo, bfa);
+ bfa_lps_meminfo(cfg, meminfo, bfa);
+ bfa_uf_meminfo(cfg, meminfo, bfa);
+ bfa_rport_meminfo(cfg, meminfo, bfa);
+ bfa_fcp_meminfo(cfg, meminfo, bfa);
+ bfa_dconf_meminfo(cfg, meminfo, bfa);
/* dma info setup */
bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
@@ -1840,7 +1836,6 @@ void
bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
- int i;
struct bfa_mem_dma_s *dma_info, *dma_elem;
struct bfa_mem_kva_s *kva_info, *kva_elem;
struct list_head *dm_qe, *km_qe;
@@ -1869,10 +1864,15 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
}
bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
-
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
-
+ bfa_fcdiag_attach(bfa, bfad, cfg, pcidev);
+ bfa_sgpg_attach(bfa, bfad, cfg, pcidev);
+ bfa_fcport_attach(bfa, bfad, cfg, pcidev);
+ bfa_fcxp_attach(bfa, bfad, cfg, pcidev);
+ bfa_lps_attach(bfa, bfad, cfg, pcidev);
+ bfa_uf_attach(bfa, bfad, cfg, pcidev);
+ bfa_rport_attach(bfa, bfad, cfg, pcidev);
+ bfa_fcp_attach(bfa, bfad, cfg, pcidev);
+ bfa_dconf_attach(bfa, bfad, cfg);
bfa_com_port_attach(bfa);
bfa_com_ablk_attach(bfa);
bfa_com_cee_attach(bfa);
@@ -1899,10 +1899,6 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
void
bfa_detach(struct bfa_s *bfa)
{
- int i;
-
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->detach(bfa);
bfa_ioc_detach(&bfa->ioc);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 20982e7cdd81..5f53b3276234 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -25,7 +25,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -339,7 +338,7 @@ bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
bfa_ioim_attach(fcpim);
}
-static void
+void
bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
{
struct bfa_fcpim_s *fcpim = &fcp->fcpim;
@@ -2105,7 +2104,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
* is complete by driver. now invalidate the stale content of lun mask
* like unit attention, rp tag and lp tag.
*/
-static void
+void
bfa_ioim_lm_init(struct bfa_s *bfa)
{
struct bfa_lun_mask_s *lunm_list;
@@ -3634,11 +3633,7 @@ bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
}
}
-/* BFA FCP module - parent module for fcpim */
-
-BFA_MODULE(fcp);
-
-static void
+void
bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -3696,7 +3691,7 @@ bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
bfa_mem_kva_setup(minfo, fcp_kva, km_len);
}
-static void
+void
bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -3739,29 +3734,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
(fcp->num_itns * sizeof(struct bfa_itn_s)));
}
-static void
-bfa_fcp_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcp_start(struct bfa_s *bfa)
-{
- struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
-
- /*
- * bfa_init() with flash read is complete. now invalidate the stale
- * content of lun mask like unit attention, rp tag and lp tag.
- */
- bfa_ioim_lm_init(fcp->bfa);
-}
-
-static void
-bfa_fcp_stop(struct bfa_s *bfa)
-{
-}
-
-static void
+void
bfa_fcp_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 4ddda72f60e6..638c0a2857f7 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -89,16 +89,27 @@ static struct {
void (*online) (struct bfa_fcs_lport_s *port);
void (*offline) (struct bfa_fcs_lport_s *port);
} __port_action[] = {
- {
- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
- bfa_fcs_lport_unknown_offline}, {
- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
- bfa_fcs_lport_fab_offline}, {
- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
- bfa_fcs_lport_n2n_offline}, {
- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
- bfa_fcs_lport_loop_offline},
- };
+ [BFA_FCS_FABRIC_UNKNOWN] = {
+ .init = bfa_fcs_lport_unknown_init,
+ .online = bfa_fcs_lport_unknown_online,
+ .offline = bfa_fcs_lport_unknown_offline
+ },
+ [BFA_FCS_FABRIC_SWITCHED] = {
+ .init = bfa_fcs_lport_fab_init,
+ .online = bfa_fcs_lport_fab_online,
+ .offline = bfa_fcs_lport_fab_offline
+ },
+ [BFA_FCS_FABRIC_N2N] = {
+ .init = bfa_fcs_lport_n2n_init,
+ .online = bfa_fcs_lport_n2n_online,
+ .offline = bfa_fcs_lport_n2n_offline
+ },
+ [BFA_FCS_FABRIC_LOOP] = {
+ .init = bfa_fcs_lport_loop_init,
+ .online = bfa_fcs_lport_loop_online,
+ .offline = bfa_fcs_lport_loop_offline
+ },
+};
/*
* fcs_port_sm FCS logical port state machine
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index a1ada4a31c97..256f4afaccf9 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -5822,12 +5822,6 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
}
/*
- * DCONF module specific
- */
-
-BFA_MODULE(dconf);
-
-/*
* DCONF state machine events
*/
enum bfa_dconf_event {
@@ -6073,7 +6067,7 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
/*
* Compute and return memory needed by DRV_CFG module.
*/
-static void
+void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
struct bfa_s *bfa)
{
@@ -6087,9 +6081,8 @@ bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
sizeof(struct bfa_dconf_s));
}
-static void
-bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_pcidev_s *pcidev)
+void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
@@ -6134,33 +6127,20 @@ bfa_dconf_modinit(struct bfa_s *bfa)
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
}
-static void
-bfa_dconf_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_dconf_stop(struct bfa_s *bfa)
-{
-}
static void bfa_dconf_timer(void *cbarg)
{
struct bfa_dconf_mod_s *dconf = cbarg;
bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
}
-static void
+
+void
bfa_dconf_iocdisable(struct bfa_s *bfa)
{
struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
}
-static void
-bfa_dconf_detach(struct bfa_s *bfa)
-{
-}
-
static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
{
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 53135f21fa0e..1c2ab395e616 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -61,54 +61,8 @@ enum {
BFA_TRC_HAL_IOCFC_CB = 5,
};
-/*
- * Macro to define a new BFA module
- */
-#define BFA_MODULE(__mod) \
- static void bfa_ ## __mod ## _meminfo( \
- struct bfa_iocfc_cfg_s *cfg, \
- struct bfa_meminfo_s *meminfo, \
- struct bfa_s *bfa); \
- static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
- void *bfad, struct bfa_iocfc_cfg_s *cfg, \
- struct bfa_pcidev_s *pcidev); \
- static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
- static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
- static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \
- static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \
- \
- extern struct bfa_module_s hal_mod_ ## __mod; \
- struct bfa_module_s hal_mod_ ## __mod = { \
- bfa_ ## __mod ## _meminfo, \
- bfa_ ## __mod ## _attach, \
- bfa_ ## __mod ## _detach, \
- bfa_ ## __mod ## _start, \
- bfa_ ## __mod ## _stop, \
- bfa_ ## __mod ## _iocdisable, \
- }
-
#define BFA_CACHELINE_SZ (256)
-/*
- * Structure used to interact between different BFA sub modules
- *
- * Each sub module needs to implement only the entry points relevant to it (and
- * can leave entry points as NULL)
- */
-struct bfa_module_s {
- void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo,
- struct bfa_s *bfa);
- void (*attach) (struct bfa_s *bfa, void *bfad,
- struct bfa_iocfc_cfg_s *cfg,
- struct bfa_pcidev_s *pcidev);
- void (*detach) (struct bfa_s *bfa);
- void (*start) (struct bfa_s *bfa);
- void (*stop) (struct bfa_s *bfa);
- void (*iocdisable) (struct bfa_s *bfa);
-};
-
-
struct bfa_s {
void *bfad; /* BFA driver instance */
struct bfa_plog_s *plog; /* portlog buffer */
@@ -127,14 +81,51 @@ struct bfa_s {
};
extern bfa_boolean_t bfa_auto_recover;
-extern struct bfa_module_s hal_mod_fcdiag;
-extern struct bfa_module_s hal_mod_sgpg;
-extern struct bfa_module_s hal_mod_fcport;
-extern struct bfa_module_s hal_mod_fcxp;
-extern struct bfa_module_s hal_mod_lps;
-extern struct bfa_module_s hal_mod_uf;
-extern struct bfa_module_s hal_mod_rport;
-extern struct bfa_module_s hal_mod_fcp;
-extern struct bfa_module_s hal_mod_dconf;
+
+void bfa_dconf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *);
+void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_dconf_iocdisable(struct bfa_s *);
+void bfa_fcp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_fcp_iocdisable(struct bfa_s *bfa);
+void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *);
+void bfa_fcport_start(struct bfa_s *);
+void bfa_fcport_iocdisable(struct bfa_s *);
+void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_fcport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_fcxp_iocdisable(struct bfa_s *);
+void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_fcxp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_fcdiag_iocdisable(struct bfa_s *);
+void bfa_fcdiag_attach(struct bfa_s *bfa, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_ioim_lm_init(struct bfa_s *);
+void bfa_lps_iocdisable(struct bfa_s *bfa);
+void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_lps_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_rport_iocdisable(struct bfa_s *bfa);
+void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_rport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_uf_iocdisable(struct bfa_s *);
+void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *,
+ struct bfa_s *);
+void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *,
+ struct bfa_pcidev_s *);
+void bfa_uf_start(struct bfa_s *);
#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 12de292175ef..e640223bab3c 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -23,13 +23,6 @@
#include "bfa_modules.h"
BFA_TRC_FILE(HAL, FCXP);
-BFA_MODULE(fcdiag);
-BFA_MODULE(fcxp);
-BFA_MODULE(sgpg);
-BFA_MODULE(lps);
-BFA_MODULE(fcport);
-BFA_MODULE(rport);
-BFA_MODULE(uf);
/*
* LPS related definitions
@@ -121,15 +114,6 @@ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
/*
* forward declarations for LPS functions
*/
-static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
-static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
- struct bfa_iocfc_cfg_s *cfg,
- struct bfa_pcidev_s *pcidev);
-static void bfa_lps_detach(struct bfa_s *bfa);
-static void bfa_lps_start(struct bfa_s *bfa);
-static void bfa_lps_stop(struct bfa_s *bfa);
-static void bfa_lps_iocdisable(struct bfa_s *bfa);
static void bfa_lps_login_rsp(struct bfa_s *bfa,
struct bfi_lps_login_rsp_s *rsp);
static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
@@ -484,7 +468,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
bfa_mem_kva_curp(mod) = (void *)fcxp;
}
-static void
+void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -522,7 +506,7 @@ bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
}
-static void
+void
bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -544,22 +528,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
claim_fcxps_mem(mod);
}
-static void
-bfa_fcxp_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcxp_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcxp_stop(struct bfa_s *bfa)
-{
-}
-
-static void
+void
bfa_fcxp_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
@@ -1510,7 +1479,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
/*
* return memory requirement
*/
-static void
+void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -1527,7 +1496,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
/*
* bfa module attach at initialization time
*/
-static void
+void
bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -1557,25 +1526,10 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
}
}
-static void
-bfa_lps_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_lps_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_lps_stop(struct bfa_s *bfa)
-{
-}
-
/*
* IOC in disabled state -- consider all lps offline
*/
-static void
+void
bfa_lps_iocdisable(struct bfa_s *bfa)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
@@ -3055,7 +3009,7 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
BFA_CACHELINE_SZ))
-static void
+void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -3086,7 +3040,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
/*
* Memory initialization.
*/
-static void
+void
bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -3131,34 +3085,16 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
-static void
-bfa_fcport_detach(struct bfa_s *bfa)
-{
-}
-
-/*
- * Called when IOC is ready.
- */
-static void
+void
bfa_fcport_start(struct bfa_s *bfa)
{
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
}
/*
- * Called before IOC is stopped.
- */
-static void
-bfa_fcport_stop(struct bfa_s *bfa)
-{
- bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
- bfa_trunk_iocdisable(bfa);
-}
-
-/*
* Called when IOC failure is detected.
*/
-static void
+void
bfa_fcport_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
@@ -4886,7 +4822,7 @@ bfa_rport_qresume(void *cbarg)
bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
}
-static void
+void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -4900,7 +4836,7 @@ bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
}
-static void
+void
bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -4940,22 +4876,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_mem_kva_curp(mod) = (u8 *) rp;
}
-static void
-bfa_rport_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_rport_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_rport_stop(struct bfa_s *bfa)
-{
-}
-
-static void
+void
bfa_rport_iocdisable(struct bfa_s *bfa)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
@@ -5246,7 +5167,7 @@ bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
/*
* Compute and return memory needed by FCP(im) module.
*/
-static void
+void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -5281,7 +5202,7 @@ bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
}
-static void
+void
bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -5344,26 +5265,6 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
}
-static void
-bfa_sgpg_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_iocdisable(struct bfa_s *bfa)
-{
-}
-
bfa_status_t
bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
{
@@ -5547,7 +5448,7 @@ uf_mem_claim(struct bfa_uf_mod_s *ufm)
claim_uf_post_msgs(ufm);
}
-static void
+void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
struct bfa_s *bfa)
{
@@ -5575,7 +5476,7 @@ bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
}
-static void
+void
bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -5590,11 +5491,6 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
uf_mem_claim(ufm);
}
-static void
-bfa_uf_detach(struct bfa_s *bfa)
-{
-}
-
static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
{
@@ -5682,12 +5578,7 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
}
-static void
-bfa_uf_stop(struct bfa_s *bfa)
-{
-}
-
-static void
+void
bfa_uf_iocdisable(struct bfa_s *bfa)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
@@ -5704,7 +5595,7 @@ bfa_uf_iocdisable(struct bfa_s *bfa)
}
}
-static void
+void
bfa_uf_start(struct bfa_s *bfa)
{
bfa_uf_post_all(BFA_UF_MOD(bfa));
@@ -5845,13 +5736,7 @@ bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
fcport->diag_busy = BFA_FALSE;
}
-static void
-bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
- struct bfa_s *bfa)
-{
-}
-
-static void
+void
bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_pcidev_s *pcidev)
{
@@ -5870,7 +5755,7 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
}
-static void
+void
bfa_fcdiag_iocdisable(struct bfa_s *bfa)
{
struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
@@ -5888,21 +5773,6 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa)
}
static void
-bfa_fcdiag_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcdiag_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcdiag_stop(struct bfa_s *bfa)
-{
-}
-
-static void
bfa_fcdiag_queuetest_timeout(void *cbarg)
{
struct bfa_fcdiag_s *fcdiag = cbarg;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 029bef82c057..62758e830d3b 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -95,7 +95,6 @@ enum {
};
struct csio_msix_entries {
- unsigned short vector; /* Assigned MSI-X vector */
void *dev_id; /* Priv object associated w/ this msix*/
char desc[24]; /* Description of this vector */
};
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 2fb71c6c3b37..7c8814715711 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -383,17 +383,15 @@ csio_request_irqs(struct csio_hw *hw)
int rv, i, j, k = 0;
struct csio_msix_entries *entryp = &hw->msix_entries[0];
struct csio_scsi_cpu_info *info;
+ struct pci_dev *pdev = hw->pdev;
if (hw->intr_mode != CSIO_IM_MSIX) {
- rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
- (hw->intr_mode == CSIO_IM_MSI) ?
- 0 : IRQF_SHARED,
- KBUILD_MODNAME, hw);
+ rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
+ hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
+ KBUILD_MODNAME, hw);
if (rv) {
- if (hw->intr_mode == CSIO_IM_MSI)
- pci_disable_msi(hw->pdev);
csio_err(hw, "Failed to allocate interrupt line.\n");
- return -EINVAL;
+ goto out_free_irqs;
}
goto out;
@@ -402,22 +400,22 @@ csio_request_irqs(struct csio_hw *hw)
/* Add the MSIX vector descriptions */
csio_add_msix_desc(hw);
- rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
+ rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
- entryp[k].vector, rv);
- goto err;
+ pci_irq_vector(pdev, k), rv);
+ goto out_free_irqs;
}
- entryp[k++].dev_id = (void *)hw;
+ entryp[k++].dev_id = hw;
- rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
+ rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
entryp[k].desc, hw);
if (rv) {
csio_err(hw, "IRQ request failed for vec %d err:%d\n",
- entryp[k].vector, rv);
- goto err;
+ pci_irq_vector(pdev, k), rv);
+ goto out_free_irqs;
}
entryp[k++].dev_id = (void *)hw;
@@ -429,51 +427,31 @@ csio_request_irqs(struct csio_hw *hw)
struct csio_scsi_qset *sqset = &hw->sqset[i][j];
struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
- rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
+ rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
entryp[k].desc, q);
if (rv) {
csio_err(hw,
"IRQ request failed for vec %d err:%d\n",
- entryp[k].vector, rv);
- goto err;
+ pci_irq_vector(pdev, k), rv);
+ goto out_free_irqs;
}
- entryp[k].dev_id = (void *)q;
+ entryp[k].dev_id = q;
} /* for all scsi cpus */
} /* for all ports */
out:
hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
-
return 0;
-err:
- for (i = 0; i < k; i++) {
- entryp = &hw->msix_entries[i];
- free_irq(entryp->vector, entryp->dev_id);
- }
- pci_disable_msix(hw->pdev);
-
+out_free_irqs:
+ for (i = 0; i < k; i++)
+ free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
+ pci_free_irq_vectors(hw->pdev);
return -EINVAL;
}
-static void
-csio_disable_msix(struct csio_hw *hw, bool free)
-{
- int i;
- struct csio_msix_entries *entryp;
- int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
-
- if (free) {
- for (i = 0; i < cnt; i++) {
- entryp = &hw->msix_entries[i];
- free_irq(entryp->vector, entryp->dev_id);
- }
- }
- pci_disable_msix(hw->pdev);
-}
-
/* Reduce per-port max possible CPUs */
static void
csio_reduce_sqsets(struct csio_hw *hw, int cnt)
@@ -500,10 +478,9 @@ static int
csio_enable_msix(struct csio_hw *hw)
{
int i, j, k, n, min, cnt;
- struct csio_msix_entries *entryp;
- struct msix_entry *entries;
int extra = CSIO_EXTRA_VECS;
struct csio_scsi_cpu_info *info;
+ struct irq_affinity desc = { .pre_vectors = 2 };
min = hw->num_pports + extra;
cnt = hw->num_sqsets + extra;
@@ -512,50 +489,35 @@ csio_enable_msix(struct csio_hw *hw)
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
cnt = min_t(uint8_t, hw->cfg_niq, cnt);
- entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- for (i = 0; i < cnt; i++)
- entries[i].entry = (uint16_t)i;
-
csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
- cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
- if (cnt < 0) {
- kfree(entries);
+ cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
+ if (cnt < 0)
return cnt;
- }
if (cnt < (hw->num_sqsets + extra)) {
csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
csio_reduce_sqsets(hw, cnt - extra);
}
- /* Save off vectors */
- for (i = 0; i < cnt; i++) {
- entryp = &hw->msix_entries[i];
- entryp->vector = entries[i].vector;
- }
-
/* Distribute vectors */
k = 0;
- csio_set_nondata_intr_idx(hw, entries[k].entry);
- csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
- csio_set_fwevt_intr_idx(hw, entries[k++].entry);
+ csio_set_nondata_intr_idx(hw, k);
+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
+ csio_set_fwevt_intr_idx(hw, k++);
for (i = 0; i < hw->num_pports; i++) {
info = &hw->scsi_cpu_info[i];
for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
n = (j % info->max_cpus) + k;
- hw->sqset[i][j].intr_idx = entries[n].entry;
+ hw->sqset[i][j].intr_idx = n;
}
k += info->max_cpus;
}
- kfree(entries);
return 0;
}
@@ -597,22 +559,26 @@ csio_intr_disable(struct csio_hw *hw, bool free)
{
csio_hw_intr_disable(hw);
- switch (hw->intr_mode) {
- case CSIO_IM_MSIX:
- csio_disable_msix(hw, free);
- break;
- case CSIO_IM_MSI:
- if (free)
- free_irq(hw->pdev->irq, hw);
- pci_disable_msi(hw->pdev);
- break;
- case CSIO_IM_INTX:
- if (free)
- free_irq(hw->pdev->irq, hw);
- break;
- default:
- break;
+ if (free) {
+ int i;
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_MSIX:
+ for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
+ free_irq(pci_irq_vector(hw->pdev, i),
+ hw->msix_entries[i].dev_id);
+ }
+ break;
+ case CSIO_IM_MSI:
+ case CSIO_IM_INTX:
+ free_irq(pci_irq_vector(hw->pdev, 0), hw);
+ break;
+ default:
+ break;
+ }
}
+
+ pci_free_irq_vectors(hw->pdev);
hw->intr_mode = CSIO_IM_NONE;
hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 3fb3f5708ff7..1076c1578322 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -36,7 +36,7 @@ static unsigned int dbg_level;
#include "../libcxgbi.h"
#define DRV_MODULE_NAME "cxgb4i"
-#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver"
+#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.5-ko"
#define DRV_MODULE_RELDATE "Apr. 2015"
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index d11dcc59ff46..256af819377d 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -15,6 +15,7 @@
#ifndef _CXLFLASH_COMMON_H
#define _CXLFLASH_COMMON_H
+#include <linux/irq_poll.h>
#include <linux/list.h>
#include <linux/rwsem.h>
#include <linux/types.h>
@@ -24,30 +25,32 @@
extern const struct file_operations cxlflash_cxl_fops;
-#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
+#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
+#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */
+#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */
-#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
+#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
+#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
+
+#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
+#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
+#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
+
+#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
- max_sectors
- in units of
- 512 byte
- sectors
- */
+ * max_sectors
+ * in units of
+ * 512 byte
+ * sectors
+ */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
-#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
- certain AFU errors */
+#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */
/* Command management definitions */
-#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
- alignment and more
- efficient array
- index derivation
- */
-
#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
@@ -57,10 +60,16 @@ extern const struct file_operations cxlflash_cxl_fops;
/* SQ for master issued cmds */
#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
+/* Hardware queue definitions */
+#define CXLFLASH_DEF_HWQS 1
+#define CXLFLASH_MAX_HWQS 8
+#define PRIMARY_HWQ 0
+
static inline void check_sizes(void)
{
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
+ BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
}
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
@@ -80,11 +89,20 @@ enum cxlflash_init_state {
};
enum cxlflash_state {
+ STATE_PROBING, /* Initial state during probe */
+ STATE_PROBED, /* Temporary state, probe completed but EEH occurred */
STATE_NORMAL, /* Normal running state, everything good */
STATE_RESET, /* Reset state, trying to reset/recover */
STATE_FAILTERM /* Failed/terminating state, error out users/threads */
};
+enum cxlflash_hwq_mode {
+ HWQ_MODE_RR, /* Roundrobin (default) */
+ HWQ_MODE_TAG, /* Distribute based on block MQ tag */
+ HWQ_MODE_CPU, /* CPU affinity */
+ MAX_HWQ_MODE
+};
+
/*
* Each context has its own set of resource handles that is visible
* only from that context.
@@ -92,11 +110,11 @@ enum cxlflash_state {
struct cxlflash_cfg {
struct afu *afu;
- struct cxl_context *mcctx;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
+ int num_fc_ports;
ulong cxlflash_regs_pci;
@@ -117,7 +135,7 @@ struct cxlflash_cfg {
struct file_operations cxl_fops;
/* Parameters that are LUN table related */
- int last_lun_index[CXLFLASH_NUM_FC_PORTS];
+ int last_lun_index[MAX_FC_PORTS];
int promote_lun_index;
struct list_head lluns; /* list of llun_info structs */
@@ -134,6 +152,8 @@ struct afu_cmd {
struct afu *parent;
struct scsi_cmnd *scp;
struct completion cevent;
+ struct list_head queue;
+ u32 hwq_index;
u8 cmd_tmf:1;
@@ -156,7 +176,7 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
return afuc;
}
-struct afu {
+struct hwq {
/* Stuff requiring alignment go first. */
struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
@@ -164,40 +184,67 @@ struct afu {
/* Beware of alignment till here. Preferably introduce new
* fields after this point
*/
-
- int (*send_cmd)(struct afu *, struct afu_cmd *);
- void (*context_reset)(struct afu_cmd *);
-
- /* AFU HW */
+ struct afu *afu;
+ struct cxl_context *ctx;
struct cxl_ioctl_start_work work;
- struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
-
ctx_hndl_t ctx_hndl; /* master's context handle */
+ u32 index; /* Index of this hwq */
atomic_t hsq_credits;
spinlock_t hsq_slock;
struct sisl_ioarcb *hsq_start;
struct sisl_ioarcb *hsq_end;
struct sisl_ioarcb *hsq_curr;
+ spinlock_t hrrq_slock;
u64 *hrrq_start;
u64 *hrrq_end;
u64 *hrrq_curr;
bool toggle;
- atomic_t cmds_active; /* Number of currently active AFU commands */
+
s64 room;
spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
+
+ struct irq_poll irqpoll;
+} __aligned(cache_line_size());
+
+struct afu {
+ struct hwq hwqs[CXLFLASH_MAX_HWQS];
+ int (*send_cmd)(struct afu *, struct afu_cmd *);
+ void (*context_reset)(struct afu_cmd *);
+
+ /* AFU HW */
+ struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
+
+ atomic_t cmds_active; /* Number of currently active AFU commands */
u64 hb;
u32 internal_lun; /* User-desired LUN mode for this AFU */
+ u32 num_hwqs; /* Number of hardware queues */
+ u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */
+ enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
+ u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */
+
char version[16];
u64 interface_version;
+ u32 irqpoll_weight;
struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
-
};
+static inline struct hwq *get_hwq(struct afu *afu, u32 index)
+{
+ WARN_ON(index >= CXLFLASH_MAX_HWQS);
+
+ return &afu->hwqs[index];
+}
+
+static inline bool afu_is_irqpoll_enabled(struct afu *afu)
+{
+ return !!afu->irqpoll_weight;
+}
+
static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
{
u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
@@ -223,14 +270,36 @@ static inline u64 lun_to_lunid(u64 lun)
return be64_to_cpu(lun_id);
}
-int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
+static inline struct fc_port_bank __iomem *get_fc_port_bank(
+ struct cxlflash_cfg *cfg, int i)
+{
+ struct afu *afu = cfg->afu;
+
+ return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
+}
+
+static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
+{
+ struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
+
+ return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
+}
+
+static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
+{
+ struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
+
+ return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
+}
+
+int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
void cxlflash_list_init(void);
void cxlflash_term_global_luns(void);
void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *, int, void __user *);
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
-void cxlflash_term_local_luns(struct cxlflash_cfg *);
-void cxlflash_restore_luntable(struct cxlflash_cfg *);
+int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
+void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
+int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
+void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
+void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index 0efed177cc8b..4d232e271af6 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -252,7 +252,7 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
* in unpacked, AFU-friendly format, and hang LUN reference in
* the sdev.
*/
- lli->port_sel |= CHAN2PORT(chan);
+ lli->port_sel |= CHAN2PORTMASK(chan);
lli->lun_id[chan] = lun_to_lunid(sdev->lun);
sdev->hostdata = lli;
} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
@@ -264,7 +264,7 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
* tracking when no more references exist.
*/
sdev->hostdata = NULL;
- lli->port_sel &= ~CHAN2PORT(chan);
+ lli->port_sel &= ~CHAN2PORTMASK(chan);
if (lli->port_sel == 0U)
lli->in_table = false;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 3061d8045382..a7d57c343492 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -176,7 +176,6 @@ static void cmd_complete(struct afu_cmd *cmd)
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
- scsi_dma_unmap(scp);
scp->scsi_done(scp);
if (cmd_is_tmf) {
@@ -224,8 +223,9 @@ static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
static void context_reset_ioarrin(struct afu_cmd *cmd)
{
struct afu *afu = cmd->parent;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- context_reset(cmd, &afu->host_map->ioarrin);
+ context_reset(cmd, &hwq->host_map->ioarrin);
}
/**
@@ -235,8 +235,9 @@ static void context_reset_ioarrin(struct afu_cmd *cmd)
static void context_reset_sq(struct afu_cmd *cmd)
{
struct afu *afu = cmd->parent;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- context_reset(cmd, &afu->host_map->sq_ctx_reset);
+ context_reset(cmd, &hwq->host_map->sq_ctx_reset);
}
/**
@@ -251,6 +252,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
int rc = 0;
s64 room;
ulong lock_flags;
@@ -259,23 +261,23 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
* To avoid the performance penalty of MMIO, spread the update of
* 'room' over multiple commands.
*/
- spin_lock_irqsave(&afu->rrin_slock, lock_flags);
- if (--afu->room < 0) {
- room = readq_be(&afu->host_map->cmd_room);
+ spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
+ if (--hwq->room < 0) {
+ room = readq_be(&hwq->host_map->cmd_room);
if (room <= 0) {
dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
"0x%02X, room=0x%016llX\n",
__func__, cmd->rcb.cdb[0], room);
- afu->room = 0;
+ hwq->room = 0;
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- afu->room = room - 1;
+ hwq->room = room - 1;
}
- writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
+ writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
- spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
+ spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
@@ -293,11 +295,12 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
int rc = 0;
int newval;
ulong lock_flags;
- newval = atomic_dec_if_positive(&afu->hsq_credits);
+ newval = atomic_dec_if_positive(&hwq->hsq_credits);
if (newval <= 0) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
@@ -305,22 +308,22 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
cmd->rcb.ioasa = &cmd->sa;
- spin_lock_irqsave(&afu->hsq_slock, lock_flags);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- *afu->hsq_curr = cmd->rcb;
- if (afu->hsq_curr < afu->hsq_end)
- afu->hsq_curr++;
+ *hwq->hsq_curr = cmd->rcb;
+ if (hwq->hsq_curr < hwq->hsq_end)
+ hwq->hsq_curr++;
else
- afu->hsq_curr = afu->hsq_start;
- writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
+ hwq->hsq_curr = hwq->hsq_start;
+ writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
- spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
out:
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
"head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
- cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
- readq_be(&afu->host_map->sq_head),
- readq_be(&afu->host_map->sq_tail));
+ cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
+ readq_be(&hwq->host_map->sq_head),
+ readq_be(&hwq->host_map->sq_tail));
return rc;
}
@@ -355,6 +358,43 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
}
/**
+ * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
+ * @host: SCSI host associated with device.
+ * @scp: SCSI command to send.
+ * @afu: SCSI command to send.
+ *
+ * Hashes a command based upon the hardware queue mode.
+ *
+ * Return: Trusted index of target hardware queue
+ */
+static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
+ struct afu *afu)
+{
+ u32 tag;
+ u32 hwq = 0;
+
+ if (afu->num_hwqs == 1)
+ return 0;
+
+ switch (afu->hwq_mode) {
+ case HWQ_MODE_RR:
+ hwq = afu->hwq_rr_count++ % afu->num_hwqs;
+ break;
+ case HWQ_MODE_TAG:
+ tag = blk_mq_unique_tag(scp->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ break;
+ case HWQ_MODE_CPU:
+ hwq = smp_processor_id() % afu->num_hwqs;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return hwq;
+}
+
+/**
* send_tmf() - sends a Task Management Function (TMF)
* @afu: AFU to checkout from.
* @scp: SCSI command from stack.
@@ -365,10 +405,12 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
*/
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
- u32 port_sel = scp->device->channel + 1;
- struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = shost_priv(host);
struct afu_cmd *cmd = sc_to_afucz(scp);
struct device *dev = &cfg->dev->dev;
+ int hwq_index = cmd_to_target_hwq(host, scp, afu);
+ struct hwq *hwq = get_hwq(afu, hwq_index);
ulong lock_flags;
int rc = 0;
ulong to;
@@ -385,10 +427,11 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
cmd->scp = scp;
cmd->parent = afu;
cmd->cmd_tmf = true;
+ cmd->hwq_index = hwq_index;
- cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = port_sel;
+ cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
SISL_REQ_FLAGS_SUP_UNDERRUN |
@@ -444,10 +487,10 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = sc_to_afucz(scp);
struct scatterlist *sg = scsi_sglist(scp);
- u32 port_sel = scp->device->channel + 1;
+ int hwq_index = cmd_to_target_hwq(host, scp, afu);
+ struct hwq *hwq = get_hwq(afu, hwq_index);
u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
ulong lock_flags;
- int nseg = 0;
int rc = 0;
dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
@@ -472,6 +515,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
switch (cfg->state) {
+ case STATE_PROBING:
+ case STATE_PROBED:
case STATE_RESET:
dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -487,23 +532,17 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
}
if (likely(sg)) {
- nseg = scsi_dma_map(scp);
- if (unlikely(nseg < 0)) {
- dev_err(dev, "%s: Fail DMA map\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
- cmd->rcb.data_len = sg_dma_len(sg);
- cmd->rcb.data_ea = sg_dma_address(sg);
+ cmd->rcb.data_len = sg->length;
+ cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
}
cmd->scp = scp;
cmd->parent = afu;
+ cmd->hwq_index = hwq_index;
- cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = port_sel;
+ cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
if (scp->sc_data_direction == DMA_TO_DEVICE)
@@ -513,8 +552,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc))
- scsi_dma_unmap(scp);
out:
return rc;
}
@@ -554,17 +591,28 @@ static void free_mem(struct cxlflash_cfg *cfg)
* Safe to call with AFU in a partially allocated/initialized state.
*
* Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout and then unmaps the MMIO space.
+ * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
*/
static void stop_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
+ struct hwq *hwq;
+ int i;
cancel_work_sync(&cfg->work_q);
if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
ssleep(1);
+
+ if (afu_is_irqpoll_enabled(afu)) {
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ irq_poll_disable(&hwq->irqpoll);
+ }
+ }
+
if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL;
@@ -576,28 +624,40 @@ static void stop_afu(struct cxlflash_cfg *cfg)
* term_intr() - disables all AFU interrupts
* @cfg: Internal structure associated with the host.
* @level: Depth of allocation, where to begin waterfall tear down.
+ * @index: Index of the hardware queue.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
*/
-static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
+static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
+ u32 index)
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq;
- if (!afu || !cfg->mcctx) {
- dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+ if (!afu) {
+ dev_err(dev, "%s: returning with NULL afu\n", __func__);
+ return;
+ }
+
+ hwq = get_hwq(afu, index);
+
+ if (!hwq->ctx) {
+ dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
switch (level) {
case UNMAP_THREE:
- cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
+ /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
+ if (index == PRIMARY_HWQ)
+ cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
+ cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
+ cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(cfg->mcctx);
+ cxl_free_afu_irqs(hwq->ctx);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -608,24 +668,32 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
/**
* term_mc() - terminates the master context
* @cfg: Internal structure associated with the host.
- * @level: Depth of allocation, where to begin waterfall tear down.
+ * @index: Index of the hardware queue.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
*/
-static void term_mc(struct cxlflash_cfg *cfg)
+static void term_mc(struct cxlflash_cfg *cfg, u32 index)
{
- int rc = 0;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq;
- if (!afu || !cfg->mcctx) {
- dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+ if (!afu) {
+ dev_err(dev, "%s: returning with NULL afu\n", __func__);
return;
}
- rc = cxl_stop_context(cfg->mcctx);
- WARN_ON(rc);
- cfg->mcctx = NULL;
+ hwq = get_hwq(afu, index);
+
+ if (!hwq->ctx) {
+ dev_err(dev, "%s: returning with NULL MC\n", __func__);
+ return;
+ }
+
+ WARN_ON(cxl_stop_context(hwq->ctx));
+ if (index != PRIMARY_HWQ)
+ WARN_ON(cxl_release_context(hwq->ctx));
+ hwq->ctx = NULL;
}
/**
@@ -637,21 +705,25 @@ static void term_mc(struct cxlflash_cfg *cfg)
static void term_afu(struct cxlflash_cfg *cfg)
{
struct device *dev = &cfg->dev->dev;
+ int k;
/*
* Tear down is carefully orchestrated to ensure
* no interrupts can come in when the problem state
* area is unmapped.
*
- * 1) Disable all AFU interrupts
+ * 1) Disable all AFU interrupts for each master
* 2) Unmap the problem state area
- * 3) Stop the master context
+ * 3) Stop each master context
*/
- term_intr(cfg, UNMAP_THREE);
+ for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
+ term_intr(cfg, UNMAP_THREE, k);
+
if (cfg->afu)
stop_afu(cfg);
- term_mc(cfg);
+ for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
+ term_mc(cfg, k);
dev_dbg(dev, "%s: returning\n", __func__);
}
@@ -670,8 +742,8 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct sisl_global_map __iomem *global;
struct dev_dependent_vals *ddv;
+ __be64 __iomem *fc_port_regs;
u64 reg, status;
int i, retry_cnt = 0;
@@ -684,23 +756,25 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
return;
}
- global = &afu->afu_map->global;
-
/* Notify AFU */
- for (i = 0; i < NUM_FC_PORTS; i++) {
- reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
+ for (i = 0; i < cfg->num_fc_ports; i++) {
+ fc_port_regs = get_fc_port_regs(cfg, i);
+
+ reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
reg |= SISL_FC_SHUTDOWN_NORMAL;
- writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
+ writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
}
if (!wait)
return;
/* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < NUM_FC_PORTS; i++) {
+ for (i = 0; i < cfg->num_fc_ports; i++) {
+ fc_port_regs = get_fc_port_regs(cfg, i);
retry_cnt = 0;
+
while (true) {
- status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
+ status = readq_be(&fc_port_regs[FC_STATUS / 8]);
if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
break;
if (++retry_cnt >= MC_RETRY_CNT) {
@@ -717,7 +791,8 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
- * Safe to use as a cleanup in partially allocated/initialized state.
+ * Safe to use as a cleanup in partially allocated/initialized state. Note that
+ * the reset_waitq is flushed as part of the stop/termination of user contexts.
*/
static void cxlflash_remove(struct pci_dev *pdev)
{
@@ -750,7 +825,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
- /* fall through */
case INIT_STATE_AFU:
term_afu(cfg);
case INIT_STATE_PCI:
@@ -789,6 +863,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
goto out;
}
cfg->afu->parent = cfg;
+ cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
cfg->afu->afu_map = NULL;
out:
return rc;
@@ -1024,53 +1099,16 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
}
-/*
- * Asynchronous interrupt information table
- */
-static const struct asyc_intr_info ainfo[] = {
- {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
- {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
- {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
- {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
- {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
- {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
- {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
- {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
- {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
- {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
- {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
- {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
- {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
- {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
- {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
- {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
- {0x0, "", 0, 0} /* terminator */
-};
-
-/**
- * find_ainfo() - locates and returns asynchronous interrupt information
- * @status: Status code set by AFU on error.
- *
- * Return: The located information or NULL when the status code is invalid.
- */
-static const struct asyc_intr_info *find_ainfo(u64 status)
-{
- const struct asyc_intr_info *info;
-
- for (info = &ainfo[0]; info->status; info++)
- if (info->status == status)
- return info;
-
- return NULL;
-}
-
/**
* afu_err_intr_init() - clears and initializes the AFU for error interrupts
* @afu: AFU associated with the host.
*/
static void afu_err_intr_init(struct afu *afu)
{
+ struct cxlflash_cfg *cfg = afu->parent;
+ __be64 __iomem *fc_port_regs;
int i;
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
u64 reg;
/* global async interrupts: AFU clears afu_ctrl on context exit
@@ -1082,8 +1120,8 @@ static void afu_err_intr_init(struct afu *afu)
/* mask all */
writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
- /* set LISN# to send and point to master context */
- reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
+ /* set LISN# to send and point to primary master context */
+ reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
if (afu->internal_lun)
reg |= 1; /* Bit 63 indicates local lun */
@@ -1098,17 +1136,19 @@ static void afu_err_intr_init(struct afu *afu)
writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
/* Clear/Set internal lun bits */
- reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+ fc_port_regs = get_fc_port_regs(cfg, 0);
+ reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
reg &= SISL_FC_INTERNAL_MASK;
if (afu->internal_lun)
reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
- writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+ writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
/* now clear FC errors */
- for (i = 0; i < NUM_FC_PORTS; i++) {
- writeq_be(0xFFFFFFFFU,
- &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
- writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
+ for (i = 0; i < cfg->num_fc_ports; i++) {
+ fc_port_regs = get_fc_port_regs(cfg, i);
+
+ writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
+ writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
}
/* sync interrupts for master's IOARRIN write */
@@ -1117,8 +1157,12 @@ static void afu_err_intr_init(struct afu *afu)
/* IOARRIN yet), so there is nothing to clear. */
/* set LISN#, it is always sent to the context that wrote IOARRIN */
- writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
- writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
+ writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
+ }
}
/**
@@ -1130,13 +1174,13 @@ static void afu_err_intr_init(struct afu *afu)
*/
static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
{
- struct afu *afu = (struct afu *)data;
- struct cxlflash_cfg *cfg = afu->parent;
+ struct hwq *hwq = (struct hwq *)data;
+ struct cxlflash_cfg *cfg = hwq->afu->parent;
struct device *dev = &cfg->dev->dev;
u64 reg;
u64 reg_unmasked;
- reg = readq_be(&afu->host_map->intr_status);
+ reg = readq_be(&hwq->host_map->intr_status);
reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
if (reg_unmasked == 0UL) {
@@ -1148,32 +1192,36 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
__func__, reg);
- writeq_be(reg_unmasked, &afu->host_map->intr_clear);
+ writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
cxlflash_sync_err_irq_exit:
return IRQ_HANDLED;
}
/**
- * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
+ * process_hrrq() - process the read-response queue
+ * @afu: AFU associated with the host.
+ * @doneq: Queue of commands harvested from the RRQ.
+ * @budget: Threshold of RRQ entries to process.
*
- * Return: Always return IRQ_HANDLED.
+ * This routine must be called holding the disabled RRQ spin lock.
+ *
+ * Return: The number of entries processed.
*/
-static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
+static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
{
- struct afu *afu = (struct afu *)data;
+ struct afu *afu = hwq->afu;
struct afu_cmd *cmd;
struct sisl_ioasa *ioasa;
struct sisl_ioarcb *ioarcb;
- bool toggle = afu->toggle;
+ bool toggle = hwq->toggle;
+ int num_hrrq = 0;
u64 entry,
- *hrrq_start = afu->hrrq_start,
- *hrrq_end = afu->hrrq_end,
- *hrrq_curr = afu->hrrq_curr;
+ *hrrq_start = hwq->hrrq_start,
+ *hrrq_end = hwq->hrrq_end,
+ *hrrq_curr = hwq->hrrq_curr;
- /* Process however many RRQ entries that are ready */
+ /* Process ready RRQ entries up to the specified budget (if any) */
while (true) {
entry = *hrrq_curr;
@@ -1190,7 +1238,7 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
cmd = container_of(ioarcb, struct afu_cmd, rcb);
}
- cmd_complete(cmd);
+ list_add_tail(&cmd->queue, doneq);
/* Advance to next entry or wrap and flip the toggle bit */
if (hrrq_curr < hrrq_end)
@@ -1200,15 +1248,123 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
toggle ^= SISL_RESP_HANDLE_T_BIT;
}
- atomic_inc(&afu->hsq_credits);
+ atomic_inc(&hwq->hsq_credits);
+ num_hrrq++;
+
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ hwq->hrrq_curr = hrrq_curr;
+ hwq->toggle = toggle;
+
+ return num_hrrq;
+}
+
+/**
+ * process_cmd_doneq() - process a queue of harvested RRQ commands
+ * @doneq: Queue of completed commands.
+ *
+ * Note that upon return the queue can no longer be trusted.
+ */
+static void process_cmd_doneq(struct list_head *doneq)
+{
+ struct afu_cmd *cmd, *tmp;
+
+ WARN_ON(list_empty(doneq));
+
+ list_for_each_entry_safe(cmd, tmp, doneq, queue)
+ cmd_complete(cmd);
+}
+
+/**
+ * cxlflash_irqpoll() - process a queue of harvested RRQ commands
+ * @irqpoll: IRQ poll structure associated with queue to poll.
+ * @budget: Threshold of RRQ entries to process per poll.
+ *
+ * Return: The number of entries processed.
+ */
+static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
+{
+ struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
+ unsigned long hrrq_flags;
+ LIST_HEAD(doneq);
+ int num_entries = 0;
+
+ spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
+
+ num_entries = process_hrrq(hwq, &doneq, budget);
+ if (num_entries < budget)
+ irq_poll_complete(irqpoll);
+
+ spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
+
+ process_cmd_doneq(&doneq);
+ return num_entries;
+}
+
+/**
+ * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
+ * @irq: Interrupt number.
+ * @data: Private data provided at interrupt registration, the AFU.
+ *
+ * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
+ */
+static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
+{
+ struct hwq *hwq = (struct hwq *)data;
+ struct afu *afu = hwq->afu;
+ unsigned long hrrq_flags;
+ LIST_HEAD(doneq);
+ int num_entries = 0;
+
+ spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
+
+ if (afu_is_irqpoll_enabled(afu)) {
+ irq_poll_sched(&hwq->irqpoll);
+ spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
+ return IRQ_HANDLED;
}
- afu->hrrq_curr = hrrq_curr;
- afu->toggle = toggle;
+ num_entries = process_hrrq(hwq, &doneq, -1);
+ spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
+
+ if (num_entries == 0)
+ return IRQ_NONE;
+ process_cmd_doneq(&doneq);
return IRQ_HANDLED;
}
+/*
+ * Asynchronous interrupt information table
+ *
+ * NOTE:
+ * - Order matters here as this array is indexed by bit position.
+ *
+ * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
+ * as complex and complains due to a lack of parentheses/braces.
+ */
+#define ASTATUS_FC(_a, _b, _c, _d) \
+ { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
+
+#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
+ ASTATUS_FC(_a, LINK_UP, "link up", 0), \
+ ASTATUS_FC(_a, LINK_DN, "link down", 0), \
+ ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
+ ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
+ ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
+ ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
+ ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
+ ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
+
+static const struct asyc_intr_info ainfo[] = {
+ BUILD_SISL_ASTATUS_FC_PORT(1),
+ BUILD_SISL_ASTATUS_FC_PORT(0),
+ BUILD_SISL_ASTATUS_FC_PORT(3),
+ BUILD_SISL_ASTATUS_FC_PORT(2)
+};
+
/**
* cxlflash_async_err_irq() - interrupt handler for asynchronous errors
* @irq: Interrupt number.
@@ -1218,20 +1374,22 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
*/
static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
{
- struct afu *afu = (struct afu *)data;
+ struct hwq *hwq = (struct hwq *)data;
+ struct afu *afu = hwq->afu;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- u64 reg_unmasked;
const struct asyc_intr_info *info;
struct sisl_global_map __iomem *global = &afu->afu_map->global;
+ __be64 __iomem *fc_port_regs;
+ u64 reg_unmasked;
u64 reg;
+ u64 bit;
u8 port;
- int i;
reg = readq_be(&global->regs.aintr_status);
reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
- if (reg_unmasked == 0) {
+ if (unlikely(reg_unmasked == 0)) {
dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
__func__, reg);
goto out;
@@ -1241,16 +1399,24 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
writeq_be(reg_unmasked, &global->regs.aintr_clear);
/* Check each bit that is on */
- for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
- info = find_ainfo(1ULL << i);
- if (((reg_unmasked & 0x1) == 0) || !info)
+ for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
+ if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
+ WARN_ON_ONCE(1);
continue;
+ }
+
+ info = &ainfo[bit];
+ if (unlikely(info->status != 1ULL << bit)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
port = info->port;
+ fc_port_regs = get_fc_port_regs(cfg, port);
dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
__func__, port, info->desc,
- readq_be(&global->fc_regs[port][FC_STATUS / 8]));
+ readq_be(&fc_port_regs[FC_STATUS / 8]));
/*
* Do link reset first, some OTHER errors will set FC_ERROR
@@ -1265,7 +1431,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
}
if (info->action & CLR_FC_ERROR) {
- reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
+ reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
/*
* Since all errors are unmasked, FC_ERROR and FC_ERRCAP
@@ -1275,8 +1441,8 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
__func__, port, reg);
- writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
- writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
+ writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
+ writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
}
if (info->action & SCAN_HOST) {
@@ -1292,16 +1458,18 @@ out:
/**
* start_context() - starts the master context
* @cfg: Internal structure associated with the host.
+ * @index: Index of the hardware queue.
*
* Return: A success or failure value from CXL services.
*/
-static int start_context(struct cxlflash_cfg *cfg)
+static int start_context(struct cxlflash_cfg *cfg, u32 index)
{
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
- rc = cxl_start_context(cfg->mcctx,
- cfg->afu->work.work_element_descriptor,
+ rc = cxl_start_context(hwq->ctx,
+ hwq->work.work_element_descriptor,
NULL);
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
@@ -1311,7 +1479,7 @@ static int start_context(struct cxlflash_cfg *cfg)
/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
- * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
+ * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
*
* Return: 0 on success, -errno on failure
*/
@@ -1324,7 +1492,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
- char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
+ char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
@@ -1362,7 +1530,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* because the conversion service requires that the ASCII
* string be terminated.
*/
- for (k = 0; k < NUM_FC_PORTS; k++) {
+ for (k = 0; k < cfg->num_fc_ports; k++) {
j = ro_size;
i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
@@ -1391,6 +1559,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
rc = -ENODEV;
goto out;
}
+
+ dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
}
out:
@@ -1409,6 +1579,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
+ struct hwq *hwq;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1420,13 +1591,17 @@ static void init_pcr(struct cxlflash_cfg *cfg)
writeq_be(0, &ctrl_map->ctx_cap);
}
- /* Copy frequently used fields into afu */
- afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
- afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
- afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
+ /* Copy frequently used fields into hwq */
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
+ hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
- /* Program the Endian Control for the master context */
- writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
+ /* Program the Endian Control for the master context */
+ writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
+ }
}
/**
@@ -1437,7 +1612,10 @@ static int init_global(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
+ struct hwq *hwq;
+ struct sisl_host_map __iomem *hmap;
+ __be64 __iomem *fc_port_regs;
+ u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
int i = 0, num_ports = 0;
int rc = 0;
u64 reg;
@@ -1448,16 +1626,18 @@ static int init_global(struct cxlflash_cfg *cfg)
goto out;
}
- dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
- __func__, wwpn[0], wwpn[1]);
+ /* Set up RRQ and SQ in HWQ for master issued cmds */
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+ hmap = hwq->host_map;
- /* Set up RRQ and SQ in AFU for master issued cmds */
- writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
- writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
+ writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
+ writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
- if (afu_is_sq_cmd_mode(afu)) {
- writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
- writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
+ if (afu_is_sq_cmd_mode(afu)) {
+ writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
+ writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
+ }
}
/* AFU configuration */
@@ -1473,26 +1653,25 @@ static int init_global(struct cxlflash_cfg *cfg)
if (afu->internal_lun) {
/* Only use port 0 */
writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = NUM_FC_PORTS - 1;
+ num_ports = 0;
} else {
- writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = NUM_FC_PORTS;
+ writeq_be(PORT_MASK(cfg->num_fc_ports),
+ &afu->afu_map->global.regs.afu_port_sel);
+ num_ports = cfg->num_fc_ports;
}
for (i = 0; i < num_ports; i++) {
+ fc_port_regs = get_fc_port_regs(cfg, i);
+
/* Unmask all errors (but they are still masked at AFU) */
- writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
+ writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
/* Clear CRC error cnt & set a threshold */
- (void)readq_be(&afu->afu_map->global.
- fc_regs[i][FC_CNT_CRCERR / 8]);
- writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
- [FC_CRC_THRESH / 8]);
+ (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
+ writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
/* Set WWPNs. If already programmed, wwpn[i] is 0 */
if (wwpn[i] != 0)
- afu_set_wwpn(afu, i,
- &afu->afu_map->global.fc_regs[i][0],
- wwpn[i]);
+ afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
/* Programming WWPN back to back causes additional
* offline/online transitions and a PLOGI
*/
@@ -1502,11 +1681,15 @@ static int init_global(struct cxlflash_cfg *cfg)
/* Set up master's own CTX_CAP to allow real mode, host translation */
/* tables, afu cmds and read/write GSCSI cmds. */
/* First, unlock ctx_cap write by reading mbox */
- (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
- writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
- SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
- SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
- &afu->ctrl_map->ctx_cap);
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
+ writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
+ SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
+ SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
+ &hwq->ctrl_map->ctx_cap);
+ }
/* Initialize heartbeat */
afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
out:
@@ -1521,28 +1704,42 @@ static int start_afu(struct cxlflash_cfg *cfg)
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq;
int rc = 0;
+ int i;
init_pcr(cfg);
- /* After an AFU reset, RRQ entries are stale, clear them */
- memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
+ /* Initialize each HWQ */
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
- /* Initialize RRQ pointers */
- afu->hrrq_start = &afu->rrq_entry[0];
- afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
- afu->hrrq_curr = afu->hrrq_start;
- afu->toggle = 1;
+ /* After an AFU reset, RRQ entries are stale, clear them */
+ memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
- /* Initialize SQ */
- if (afu_is_sq_cmd_mode(afu)) {
- memset(&afu->sq, 0, sizeof(afu->sq));
- afu->hsq_start = &afu->sq[0];
- afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
- afu->hsq_curr = afu->hsq_start;
+ /* Initialize RRQ pointers */
+ hwq->hrrq_start = &hwq->rrq_entry[0];
+ hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
+ hwq->hrrq_curr = hwq->hrrq_start;
+ hwq->toggle = 1;
+ spin_lock_init(&hwq->hrrq_slock);
+
+ /* Initialize SQ */
+ if (afu_is_sq_cmd_mode(afu)) {
+ memset(&hwq->sq, 0, sizeof(hwq->sq));
+ hwq->hsq_start = &hwq->sq[0];
+ hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
+ hwq->hsq_curr = hwq->hsq_start;
+
+ spin_lock_init(&hwq->hsq_slock);
+ atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
+ }
+
+ /* Initialize IRQ poll */
+ if (afu_is_irqpoll_enabled(afu))
+ irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
+ cxlflash_irqpoll);
- spin_lock_init(&afu->hsq_slock);
- atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
}
rc = init_global(cfg);
@@ -1554,18 +1751,21 @@ static int start_afu(struct cxlflash_cfg *cfg)
/**
* init_intr() - setup interrupt handlers for the master context
* @cfg: Internal structure associated with the host.
+ * @hwq: Hardware queue to initialize.
*
* Return: 0 on success, -errno on failure
*/
static enum undo_level init_intr(struct cxlflash_cfg *cfg,
- struct cxl_context *ctx)
+ struct hwq *hwq)
{
- struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ struct cxl_context *ctx = hwq->ctx;
int rc = 0;
enum undo_level level = UNDO_NOOP;
+ bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
+ int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, 3);
+ rc = cxl_allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1573,7 +1773,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
+ rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
"SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
@@ -1581,7 +1781,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
+ rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
"SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
@@ -1589,7 +1789,11 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
+ /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
+ if (!is_primary_hwq)
+ goto out;
+
+ rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
"SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
@@ -1603,55 +1807,106 @@ out:
/**
* init_mc() - create and register as the master context
* @cfg: Internal structure associated with the host.
+ * index: HWQ Index of the master context.
*
* Return: 0 on success, -errno on failure
*/
-static int init_mc(struct cxlflash_cfg *cfg)
+static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
struct cxl_context *ctx;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
enum undo_level level;
- ctx = cxl_get_context(cfg->dev);
+ hwq->afu = cfg->afu;
+ hwq->index = index;
+
+ if (index == PRIMARY_HWQ)
+ ctx = cxl_get_context(cfg->dev);
+ else
+ ctx = cxl_dev_context_init(cfg->dev);
if (unlikely(!ctx)) {
rc = -ENOMEM;
- goto ret;
+ goto err1;
}
- cfg->mcctx = ctx;
+
+ WARN_ON(hwq->ctx);
+ hwq->ctx = ctx;
/* Set it up as a master with the CXL */
cxl_set_master(ctx);
- /* During initialization reset the AFU to start from a clean slate */
- rc = cxl_afu_reset(cfg->mcctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
- goto ret;
+ /* Reset AFU when initializing primary context */
+ if (index == PRIMARY_HWQ) {
+ rc = cxl_afu_reset(ctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: AFU reset failed rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
}
- level = init_intr(cfg, ctx);
+ level = init_intr(cfg, hwq);
if (unlikely(level)) {
dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
- goto out;
+ goto err2;
}
/* This performs the equivalent of the CXL_IOCTL_START_WORK.
* The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
* element (pe) that is embedded in the context (ctx)
*/
- rc = start_context(cfg);
+ rc = start_context(cfg, index);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
- goto out;
+ goto err2;
}
-ret:
+
+out:
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
-out:
- term_intr(cfg, level);
- goto ret;
+err2:
+ term_intr(cfg, level, index);
+ if (index != PRIMARY_HWQ)
+ cxl_release_context(ctx);
+err1:
+ hwq->ctx = NULL;
+ goto out;
+}
+
+/**
+ * get_num_afu_ports() - determines and configures the number of AFU ports
+ * @cfg: Internal structure associated with the host.
+ *
+ * This routine determines the number of AFU ports by converting the global
+ * port selection mask. The converted value is only valid following an AFU
+ * reset (explicit or power-on). This routine must be invoked shortly after
+ * mapping as other routines are dependent on the number of ports during the
+ * initialization sequence.
+ *
+ * To support legacy AFUs that might not have reflected an initial global
+ * port mask (value read is 0), default to the number of ports originally
+ * supported by the cxlflash driver (2) before hardware with other port
+ * offerings was introduced.
+ */
+static void get_num_afu_ports(struct cxlflash_cfg *cfg)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ u64 port_mask;
+ int num_fc_ports = LEGACY_FC_PORTS;
+
+ port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
+ if (port_mask != 0ULL)
+ num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
+
+ dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
+ __func__, port_mask, num_fc_ports);
+
+ cfg->num_fc_ports = num_fc_ports;
+ cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
}
/**
@@ -1669,18 +1924,24 @@ static int init_afu(struct cxlflash_cfg *cfg)
int rc = 0;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ struct hwq *hwq;
+ int i;
cxl_perst_reloads_same_image(cfg->cxl_afu, true);
- rc = init_mc(cfg);
- if (rc) {
- dev_err(dev, "%s: init_mc failed rc=%d\n",
- __func__, rc);
- goto out;
+ afu->num_hwqs = afu->desired_hwqs;
+ for (i = 0; i < afu->num_hwqs; i++) {
+ rc = init_mc(cfg, i);
+ if (rc) {
+ dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
+ __func__, rc, i);
+ goto err1;
+ }
}
- /* Map the entire MMIO space of the AFU */
- afu->afu_map = cxl_psa_map(cfg->mcctx);
+ /* Map the entire MMIO space of the AFU using the first context */
+ hwq = get_hwq(afu, PRIMARY_HWQ);
+ afu->afu_map = cxl_psa_map(hwq->ctx);
if (!afu->afu_map) {
dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
rc = -ENOMEM;
@@ -1711,6 +1972,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
afu->version, afu->interface_version);
+ get_num_afu_ports(cfg);
+
rc = start_afu(cfg);
if (rc) {
dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
@@ -1718,8 +1981,12 @@ static int init_afu(struct cxlflash_cfg *cfg)
}
afu_err_intr_init(cfg->afu);
- spin_lock_init(&afu->rrin_slock);
- afu->room = readq_be(&afu->host_map->cmd_room);
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ spin_lock_init(&hwq->rrin_slock);
+ hwq->room = readq_be(&hwq->host_map->cmd_room);
+ }
/* Restore the LUN mappings */
cxlflash_restore_luntable(cfg);
@@ -1728,8 +1995,10 @@ out:
return rc;
err1:
- term_intr(cfg, UNMAP_THREE);
- term_mc(cfg);
+ for (i = afu->num_hwqs - 1; i >= 0; i--) {
+ term_intr(cfg, UNMAP_THREE, i);
+ term_mc(cfg, i);
+ }
goto out;
}
@@ -1761,6 +2030,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
struct afu_cmd *cmd = NULL;
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
char *buf = NULL;
int rc = 0;
static DEFINE_MUTEX(sync_active);
@@ -1783,11 +2053,12 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
init_completion(&cmd->cevent);
cmd->parent = afu;
+ cmd->hwq_index = hwq->index;
dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- cmd->rcb.ctx_id = afu->ctx_hndl;
+ cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
@@ -1971,22 +2242,30 @@ static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
/**
* cxlflash_show_port_status() - queries and presents the current port status
* @port: Desired port for status reporting.
- * @afu: AFU owning the specified port.
+ * @cfg: Internal structure associated with the host.
* @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
*
- * Return: The size of the ASCII string returned in @buf.
+ * Return: The size of the ASCII string returned in @buf or -EINVAL.
*/
-static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
+static ssize_t cxlflash_show_port_status(u32 port,
+ struct cxlflash_cfg *cfg,
+ char *buf)
{
+ struct device *dev = &cfg->dev->dev;
char *disp_status;
u64 status;
- __be64 __iomem *fc_regs;
+ __be64 __iomem *fc_port_regs;
- if (port >= NUM_FC_PORTS)
- return 0;
+ WARN_ON(port >= MAX_FC_PORTS);
+
+ if (port >= cfg->num_fc_ports) {
+ dev_info(dev, "%s: Port %d not supported on this card.\n",
+ __func__, port);
+ return -EINVAL;
+ }
- fc_regs = &afu->afu_map->global.fc_regs[port][0];
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ fc_port_regs = get_fc_port_regs(cfg, port);
+ status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
status &= FC_MTIP_STATUS_MASK;
if (status == FC_MTIP_STATUS_ONLINE)
@@ -2012,9 +2291,8 @@ static ssize_t port0_show(struct device *dev,
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_status(0, afu, buf);
+ return cxlflash_show_port_status(0, cfg, buf);
}
/**
@@ -2030,9 +2308,42 @@ static ssize_t port1_show(struct device *dev,
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_status(1, afu, buf);
+ return cxlflash_show_port_status(1, cfg, buf);
+}
+
+/**
+ * port2_show() - queries and presents the current status of port 2
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port2_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_status(2, cfg, buf);
+}
+
+/**
+ * port3_show() - queries and presents the current status of port 3
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port3_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_status(3, cfg, buf);
}
/**
@@ -2090,12 +2401,13 @@ static ssize_t lun_mode_store(struct device *dev,
/*
* When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be 2 (default).
+ * channel number 0, else there will be one less than the number
+ * of fc ports for this card.
*/
if (afu->internal_lun)
shost->max_channel = 0;
else
- shost->max_channel = NUM_FC_PORTS - 1;
+ shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
afu_reset(cfg);
scsi_scan_host(cfg->host);
@@ -2121,27 +2433,34 @@ static ssize_t ioctl_version_show(struct device *dev,
/**
* cxlflash_show_port_lun_table() - queries and presents the port LUN table
* @port: Desired port for status reporting.
- * @afu: AFU owning the specified port.
+ * @cfg: Internal structure associated with the host.
* @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
*
- * Return: The size of the ASCII string returned in @buf.
+ * Return: The size of the ASCII string returned in @buf or -EINVAL.
*/
static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct afu *afu,
+ struct cxlflash_cfg *cfg,
char *buf)
{
+ struct device *dev = &cfg->dev->dev;
+ __be64 __iomem *fc_port_luns;
int i;
ssize_t bytes = 0;
- __be64 __iomem *fc_port;
- if (port >= NUM_FC_PORTS)
- return 0;
+ WARN_ON(port >= MAX_FC_PORTS);
- fc_port = &afu->afu_map->global.fc_port[port][0];
+ if (port >= cfg->num_fc_ports) {
+ dev_info(dev, "%s: Port %d not supported on this card.\n",
+ __func__, port);
+ return -EINVAL;
+ }
+
+ fc_port_luns = get_fc_port_luns(cfg, port);
for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llx\n", i, readq_be(&fc_port[i]));
+ "%03d: %016llx\n",
+ i, readq_be(&fc_port_luns[i]));
return bytes;
}
@@ -2158,9 +2477,8 @@ static ssize_t port0_lun_table_show(struct device *dev,
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- return cxlflash_show_port_lun_table(0, afu, buf);
+ return cxlflash_show_port_lun_table(0, cfg, buf);
}
/**
@@ -2176,9 +2494,272 @@ static ssize_t port1_lun_table_show(struct device *dev,
char *buf)
{
struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_lun_table(1, cfg, buf);
+}
+
+/**
+ * port2_lun_table_show() - presents the current LUN table of port 2
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port2_lun_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_lun_table(2, cfg, buf);
+}
+
+/**
+ * port3_lun_table_show() - presents the current LUN table of port 3
+ * @dev: Generic device associated with the host owning the port.
+ * @attr: Device attribute representing the port.
+ * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port3_lun_table_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+
+ return cxlflash_show_port_lun_table(3, cfg, buf);
+}
+
+/**
+ * irqpoll_weight_show() - presents the current IRQ poll weight for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the IRQ poll weight.
+ * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
+ * weight in ASCII.
+ *
+ * An IRQ poll weight of 0 indicates polling is disabled.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t irqpoll_weight_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+ struct afu *afu = cfg->afu;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
+}
+
+/**
+ * irqpoll_weight_store() - sets the current IRQ poll weight for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the IRQ poll weight.
+ * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
+ * weight in ASCII.
+ * @count: Length of data resizing in @buf.
+ *
+ * An IRQ poll weight of 0 indicates polling is disabled.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t irqpoll_weight_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+ struct device *cfgdev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct hwq *hwq;
+ u32 weight;
+ int rc, i;
+
+ rc = kstrtouint(buf, 10, &weight);
+ if (rc)
+ return -EINVAL;
+
+ if (weight > 256) {
+ dev_info(cfgdev,
+ "Invalid IRQ poll weight. It must be 256 or less.\n");
+ return -EINVAL;
+ }
+
+ if (weight == afu->irqpoll_weight) {
+ dev_info(cfgdev,
+ "Current IRQ poll weight has the same weight.\n");
+ return -EINVAL;
+ }
+
+ if (afu_is_irqpoll_enabled(afu)) {
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ irq_poll_disable(&hwq->irqpoll);
+ }
+ }
+
+ afu->irqpoll_weight = weight;
+
+ if (weight > 0) {
+ for (i = 0; i < afu->num_hwqs; i++) {
+ hwq = get_hwq(afu, i);
+
+ irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * num_hwqs_show() - presents the number of hardware queues for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the number of hardware queues.
+ * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
+ * queues in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t num_hwqs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
struct afu *afu = cfg->afu;
- return cxlflash_show_port_lun_table(1, afu, buf);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
+}
+
+/**
+ * num_hwqs_store() - sets the number of hardware queues for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the number of hardware queues.
+ * @buf: Buffer of length PAGE_SIZE containing the number of hardware
+ * queues in ASCII.
+ * @count: Length of data resizing in @buf.
+ *
+ * n > 0: num_hwqs = n
+ * n = 0: num_hwqs = num_online_cpus()
+ * n < 0: num_online_cpus() / abs(n)
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t num_hwqs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+ struct afu *afu = cfg->afu;
+ int rc;
+ int nhwqs, num_hwqs;
+
+ rc = kstrtoint(buf, 10, &nhwqs);
+ if (rc)
+ return -EINVAL;
+
+ if (nhwqs >= 1)
+ num_hwqs = nhwqs;
+ else if (nhwqs == 0)
+ num_hwqs = num_online_cpus();
+ else
+ num_hwqs = num_online_cpus() / abs(nhwqs);
+
+ afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
+ WARN_ON_ONCE(afu->desired_hwqs == 0);
+
+retry:
+ switch (cfg->state) {
+ case STATE_NORMAL:
+ cfg->state = STATE_RESET;
+ drain_ioctls(cfg);
+ cxlflash_mark_contexts_error(cfg);
+ rc = afu_reset(cfg);
+ if (rc)
+ cfg->state = STATE_FAILTERM;
+ else
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->reset_waitq);
+ break;
+ case STATE_RESET:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ if (cfg->state == STATE_NORMAL)
+ goto retry;
+ default:
+ /* Ideally should not happen */
+ dev_err(dev, "%s: Device is not ready, state=%d\n",
+ __func__, cfg->state);
+ break;
+ }
+
+ return count;
+}
+
+static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
+
+/**
+ * hwq_mode_show() - presents the HWQ steering mode for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the HWQ steering mode.
+ * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
+ * as a character string.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t hwq_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
+ struct afu *afu = cfg->afu;
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
+}
+
+/**
+ * hwq_mode_store() - sets the HWQ steering mode for the host
+ * @dev: Generic device associated with the host.
+ * @attr: Device attribute representing the HWQ steering mode.
+ * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
+ * as a character string.
+ * @count: Length of data resizing in @buf.
+ *
+ * rr = Round-Robin
+ * tag = Block MQ Tagging
+ * cpu = CPU Affinity
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t hwq_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct cxlflash_cfg *cfg = shost_priv(shost);
+ struct device *cfgdev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ int i;
+ u32 mode = MAX_HWQ_MODE;
+
+ for (i = 0; i < MAX_HWQ_MODE; i++) {
+ if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
+ mode = i;
+ break;
+ }
+ }
+
+ if (mode >= MAX_HWQ_MODE) {
+ dev_info(cfgdev, "Invalid HWQ steering mode.\n");
+ return -EINVAL;
+ }
+
+ if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
+ dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
+ "HWQ steering mode.\n");
+ return -EINVAL;
+ }
+
+ afu->hwq_mode = mode;
+
+ return count;
}
/**
@@ -2203,18 +2784,32 @@ static ssize_t mode_show(struct device *dev,
*/
static DEVICE_ATTR_RO(port0);
static DEVICE_ATTR_RO(port1);
+static DEVICE_ATTR_RO(port2);
+static DEVICE_ATTR_RO(port3);
static DEVICE_ATTR_RW(lun_mode);
static DEVICE_ATTR_RO(ioctl_version);
static DEVICE_ATTR_RO(port0_lun_table);
static DEVICE_ATTR_RO(port1_lun_table);
+static DEVICE_ATTR_RO(port2_lun_table);
+static DEVICE_ATTR_RO(port3_lun_table);
+static DEVICE_ATTR_RW(irqpoll_weight);
+static DEVICE_ATTR_RW(num_hwqs);
+static DEVICE_ATTR_RW(hwq_mode);
static struct device_attribute *cxlflash_host_attrs[] = {
&dev_attr_port0,
&dev_attr_port1,
+ &dev_attr_port2,
+ &dev_attr_port3,
&dev_attr_lun_mode,
&dev_attr_ioctl_version,
&dev_attr_port0_lun_table,
&dev_attr_port1_lun_table,
+ &dev_attr_port2_lun_table,
+ &dev_attr_port3_lun_table,
+ &dev_attr_irqpoll_weight,
+ &dev_attr_num_hwqs,
+ &dev_attr_hwq_mode,
NULL
};
@@ -2292,6 +2887,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
work_q);
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
+ __be64 __iomem *fc_port_regs;
int port;
ulong lock_flags;
@@ -2312,8 +2908,8 @@ static void cxlflash_worker_thread(struct work_struct *work)
lock_flags);
/* The reset can block... */
- afu_link_reset(afu, port,
- &afu->afu_map->global.fc_regs[port][0]);
+ fc_port_regs = get_fc_port_regs(cfg, port);
+ afu_link_reset(afu, port, fc_port_regs);
spin_lock_irqsave(cfg->host->host_lock, lock_flags);
}
@@ -2331,6 +2927,15 @@ static void cxlflash_worker_thread(struct work_struct *work)
* @pdev: PCI device associated with the host.
* @dev_id: PCI device id associated with device.
*
+ * The device will initially start out in a 'probing' state and
+ * transition to the 'normal' state at the end of a successful
+ * probe. Should an EEH event occur during probe, the notification
+ * thread (error_detected()) will wait until the probe handler
+ * is nearly complete. At that time, the device will be moved to
+ * a 'probed' state and the EEH thread woken up to drive the slot
+ * reset and recovery (device moves to 'normal' state). Meanwhile,
+ * the probe will be allowed to exit successfully.
+ *
* Return: 0 on success, -errno on failure
*/
static int cxlflash_probe(struct pci_dev *pdev,
@@ -2341,6 +2946,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
struct device *dev = &pdev->dev;
struct dev_dependent_vals *ddv;
int rc = 0;
+ int k;
dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
__func__, pdev->irq);
@@ -2357,7 +2963,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
- host->max_channel = NUM_FC_PORTS - 1;
host->unique_id = host->host_no;
host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
@@ -2376,14 +2981,16 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->cxl_fops = cxlflash_cxl_fops;
/*
- * The promoted LUNs move to the top of the LUN table. The rest stay
- * on the bottom half. The bottom half grows from the end
- * (index = 255), whereas the top half grows from the beginning
- * (index = 0).
+ * Promoted LUNs move to the top of the LUN table. The rest stay on
+ * the bottom half. The bottom half grows from the end (index = 255),
+ * whereas the top half grows from the beginning (index = 0).
+ *
+ * Initialize the last LUN index for all possible ports.
*/
- cfg->promote_lun_index = 0;
- cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
- cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
+ cfg->promote_lun_index = 0;
+
+ for (k = 0; k < MAX_FC_PORTS; k++)
+ cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
cfg->dev_id = (struct pci_device_id *)dev_id;
@@ -2412,7 +3019,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_PCI;
rc = init_afu(cfg);
- if (rc) {
+ if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
goto out_remove;
}
@@ -2425,6 +3032,11 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_SCSI;
+ if (wq_has_sleeper(&cfg->reset_waitq)) {
+ cfg->state = STATE_PROBED;
+ wake_up_all(&cfg->reset_waitq);
+ } else
+ cfg->state = STATE_NORMAL;
out:
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
return rc;
@@ -2455,7 +3067,8 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
switch (state) {
case pci_channel_io_frozen:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
+ cfg->state != STATE_PROBING);
if (cfg->state == STATE_FAILTERM)
return PCI_ERS_RESULT_DISCONNECT;
@@ -2546,6 +3159,7 @@ static struct pci_driver cxlflash_driver = {
*/
static int __init init_cxlflash(void)
{
+ check_sizes();
cxlflash_list_init();
return pci_register_driver(&cxlflash_driver);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0be2261e6312..49657f1f409e 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -37,8 +37,6 @@
#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
-#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */
-
/* FC defines */
#define FC_MTIP_CMDCONFIG 0x010
#define FC_MTIP_STATUS 0x018
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index a6e48a893fef..a768360d2fa6 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -90,15 +90,15 @@ struct sisl_rc {
#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
- may retry if afu_retry is off
- possible on master exit
+ * may retry if afu_retry is off
+ * possible on master exit
*/
#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
- may retry if afu_retry is off
- possible on master exit
+ * may retry if afu_retry is off
+ * possible on master exit
*/
#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
@@ -111,11 +111,11 @@ struct sisl_rc {
*/
#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
- afu reset/master restart
+ * afu reset/master restart
*/
#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
- may retry if afu_retry is off
+ * may retry if afu_retry is off
*/
u8 scsi_rc; /* SCSI status byte, retry as appropriate */
@@ -149,8 +149,9 @@ struct sisl_rc {
#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- reported len, possibly due to dropped
- frames */
+ * reported len, possibly due to dropped
+ * frames
+ */
#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
};
@@ -227,10 +228,10 @@ struct sisl_ioasa {
/* per context host transport MMIO */
struct sisl_host_map {
- __be64 endian_ctrl; /* Per context Endian Control. The AFU will
- * operate on whatever the context is of the
- * host application.
- */
+ __be64 endian_ctrl; /* Per context Endian Control. The AFU will
+ * operate on whatever the context is of the
+ * host application.
+ */
__be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
* Only recovery in a PERM_ERR is a context
@@ -292,28 +293,54 @@ struct sisl_ctrl_map {
/* single copy global regs */
struct sisl_global_regs {
__be64 aintr_status;
- /* In cxlflash, each FC port/link gets a byte of status */
-#define SISL_ASTATUS_FC0_OTHER 0x8000ULL /* b48, other err,
- FC_ERRCAP[31:20] */
-#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
- while logged in */
-#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
-#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state machine timed out
- and retrying */
-#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
- FC_ERROR[19:0] */
-#define SISL_ASTATUS_FC0_LOGI_S 0x0400ULL /* b53, login succeeded */
-#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */
-#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */
-
-#define SISL_ASTATUS_FC1_OTHER 0x0080ULL /* b56 */
-#define SISL_ASTATUS_FC1_LOGO 0x0040ULL /* b57 */
-#define SISL_ASTATUS_FC1_CRC_T 0x0020ULL /* b58 */
-#define SISL_ASTATUS_FC1_LOGI_R 0x0010ULL /* b59 */
-#define SISL_ASTATUS_FC1_LOGI_F 0x0008ULL /* b60 */
-#define SISL_ASTATUS_FC1_LOGI_S 0x0004ULL /* b61 */
-#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */
-#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */
+ /*
+ * In cxlflash, FC port/link are arranged in port pairs, each
+ * gets a byte of status:
+ *
+ * *_OTHER: other err, FC_ERRCAP[31:20]
+ * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in
+ * *_CRC_T: CRC threshold exceeded
+ * *_LOGI_R: login state machine timed out and retrying
+ * *_LOGI_F: login failed, FC_ERROR[19:0]
+ * *_LOGI_S: login succeeded
+ * *_LINK_DN: link online to offline
+ * *_LINK_UP: link offline to online
+ */
+#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */
+#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */
+#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */
+#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */
+#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */
+#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */
+#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */
+#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */
+
+#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */
+#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */
+#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */
+#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */
+#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */
+#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */
+#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */
+#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */
+
+#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */
+#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */
+#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */
+#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */
+#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */
+#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */
+#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */
+#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */
+
+#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */
+#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */
+#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */
+#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */
+#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */
+#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */
+#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */
+#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */
#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
@@ -325,7 +352,7 @@ struct sisl_global_regs {
#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
-#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */
+#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */
#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
__be64 aintr_clear;
@@ -367,9 +394,18 @@ struct sisl_global_regs {
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
};
-#define CXLFLASH_NUM_FC_PORTS 2
-#define CXLFLASH_MAX_CONTEXT 512 /* how many contexts per afu */
-#define CXLFLASH_NUM_VLUNS 512
+#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
+#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */
+#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \
+ CXLFLASH_MAX_FC_BANKS)
+#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */
+#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */
+#define CXLFLASH_NUM_REGS 512 /* number of registers per port */
+
+struct fc_port_bank {
+ __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS];
+ __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS];
+};
struct sisl_global_map {
union {
@@ -379,11 +415,9 @@ struct sisl_global_map {
char page1[SIZE_4K]; /* page 1 */
- /* pages 2 & 3 */
- __be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+ struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */
- /* pages 4 & 5 (lun tbl) */
- __be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+ /* pages 10 - 15 are reserved */
};
@@ -402,7 +436,7 @@ struct sisl_global_map {
* | 64 KB Global |
* | Trusted Process accessible |
* +-------------------------------+
-*/
+ */
struct cxlflash_afu_map {
union {
struct sisl_host_map host;
@@ -478,7 +512,9 @@ struct sisl_rht_entry_f1 {
#define PORT0 0x01U
#define PORT1 0x02U
-#define BOTH_PORTS (PORT0 | PORT1)
+#define PORT2 0x04U
+#define PORT3 0x08U
+#define PORT_MASK(_n) ((1 << (_n)) - 1)
/* AFU Sync Mode byte */
#define AFU_LW_SYNC 0x0U
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index b46fd2f45628..fe9f17a6268b 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -78,17 +78,18 @@ void cxlflash_free_errpage(void)
* memory freed. This is accomplished by putting the contexts in error
* state which will notify the user and let them 'drive' the tear down.
* Meanwhile, this routine camps until all user contexts have been removed.
+ *
+ * Note that the main loop in this routine will always execute at least once
+ * to flush the reset_waitq.
*/
void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
{
struct device *dev = &cfg->dev->dev;
- int i, found;
+ int i, found = true;
cxlflash_mark_contexts_error(cfg);
while (true) {
- found = false;
-
for (i = 0; i < MAX_CONTEXT; i++)
if (cfg->ctx_tbl[i]) {
found = true;
@@ -102,6 +103,7 @@ void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
__func__);
wake_up_all(&cfg->reset_waitq);
ssleep(1);
+ found = false;
}
}
@@ -252,6 +254,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
int rc = 0;
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
u64 val;
/* Unlock cap and restrict user to read/write cmds in translated mode */
@@ -268,7 +271,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
/* Set up MMIO registers pointing to the RHT */
writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
- val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
+ val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
writeq_be(val, &ctrl_map->rht_cnt_id);
out:
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
@@ -1624,6 +1627,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct ctx_info *ctxi = NULL;
struct mutex *mutex = &cfg->ctx_recovery_mutex;
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
u64 flags;
u64 ctxid = DECODE_CTXID(recover->context_id),
rctxid = recover->context_id;
@@ -1694,7 +1698,7 @@ retry_recover:
}
/* Test if in error state */
- reg = readq_be(&afu->ctrl_map->mbox_r);
+ reg = readq_be(&hwq->ctrl_map->mbox_r);
if (reg == -1) {
dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
@@ -1933,7 +1937,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
u64 lun_size = 0;
u64 last_lba = 0;
u64 rsrc_handle = -1;
- u32 port = CHAN2PORT(sdev->channel);
+ u32 port = CHAN2PORTMASK(sdev->channel);
int rc = 0;
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 9e62ff304e4b..0b5976829913 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -24,8 +24,8 @@ extern struct cxlflash_global global;
*/
/* Chunk size parms: note sislite minimum chunk size is
- 0x10000 LBAs corresponding to a NMASK or 16.
-*/
+ * 0x10000 LBAs corresponding to a NMASK or 16.
+ */
#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
#define CMD_TIMEOUT 30 /* 30 secs */
@@ -33,9 +33,6 @@ extern struct cxlflash_global global;
#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
-#define CHAN2PORT(_x) ((_x) + 1)
-#define PORT2CHAN(_x) ((_x) - 1)
-
enum lun_mode {
MODE_NONE = 0,
MODE_VIRTUAL,
@@ -59,7 +56,7 @@ struct glun_info {
/* Local (per-adapter) lun_info structure */
struct llun_info {
- u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */
+ u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */
u32 lun_index; /* Index in the LUN table */
u32 host_no; /* host_no from Scsi_host */
u32 port_sel; /* What port to use for this LUN */
@@ -92,7 +89,8 @@ enum ctx_ctrl {
struct ctx_info {
struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
- alloc/free on attach/detach */
+ * alloc/free on attach/detach
+ */
u32 rht_out; /* Number of checked out RHT entries */
u32 rht_perms; /* User-defined permissions for RHT entries */
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
@@ -120,34 +118,40 @@ struct cxlflash_global {
struct page *err_page; /* One page of all 0xF for error notification */
};
-int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *);
-int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *,
- struct dk_cxlflash_resize *);
+int cxlflash_vlun_resize(struct scsi_device *sdev,
+ struct dk_cxlflash_resize *resize);
+int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
+ struct dk_cxlflash_resize *resize);
-int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *);
-int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *,
- struct dk_cxlflash_release *);
+int cxlflash_disk_release(struct scsi_device *sdev,
+ struct dk_cxlflash_release *release);
+int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
+ struct dk_cxlflash_release *release);
-int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *);
+int cxlflash_disk_clone(struct scsi_device *sdev,
+ struct dk_cxlflash_clone *clone);
-int cxlflash_disk_virtual_open(struct scsi_device *, void *);
+int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
-int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool);
-void cxlflash_lun_detach(struct glun_info *);
+int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked);
+void cxlflash_lun_detach(struct glun_info *gli);
-struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl);
-void put_context(struct ctx_info *);
+struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg,
+ enum ctx_ctrl ctrl);
+void put_context(struct ctx_info *ctxi);
-struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t,
- struct llun_info *);
+struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
+ struct llun_info *lli);
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *);
-void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *);
+struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
+ struct llun_info *lli);
+void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
-void cxlflash_ba_terminate(struct ba_lun *);
+void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
+int cxlflash_manage_lun(struct scsi_device *sdev,
+ struct dk_cxlflash_manage_lun *manage);
-int check_state(struct cxlflash_cfg *);
+int check_state(struct cxlflash_cfg *cfg);
#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 7aa06ef229fd..90b5c19f81f0 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -819,11 +819,10 @@ int cxlflash_vlun_resize(struct scsi_device *sdev,
void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
{
struct llun_info *lli, *temp;
- u32 chan;
u32 lind;
- struct afu *afu = cfg->afu;
+ int k;
struct device *dev = &cfg->dev->dev;
- struct sisl_global_map __iomem *agm = &afu->afu_map->global;
+ __be64 __iomem *fc_port_luns;
mutex_lock(&global.mutex);
@@ -832,33 +831,41 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
continue;
lind = lli->lun_index;
+ dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
- if (lli->port_sel == BOTH_PORTS) {
- writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
- writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
- dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx "
- "id1=%llx\n", __func__, lind,
- lli->lun_id[0], lli->lun_id[1]);
- } else {
- chan = PORT2CHAN(lli->port_sel);
- writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
- dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d "
- "id=%llx\n", __func__, lind, chan,
- lli->lun_id[chan]);
- }
+ for (k = 0; k < cfg->num_fc_ports; k++)
+ if (lli->port_sel & (1 << k)) {
+ fc_port_luns = get_fc_port_luns(cfg, k);
+ writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
+ dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
+ }
}
mutex_unlock(&global.mutex);
}
/**
+ * get_num_ports() - compute number of ports from port selection mask
+ * @psm: Port selection mask.
+ *
+ * Return: Population count of port selection mask
+ */
+static inline u8 get_num_ports(u32 psm)
+{
+ static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
+ 1, 2, 2, 3, 2, 3, 3, 4 };
+
+ return bits[psm & 0xf];
+}
+
+/**
* init_luntable() - write an entry in the LUN table
* @cfg: Internal structure associated with the host.
* @lli: Per adapter LUN information structure.
*
- * On successful return, a LUN table entry is created.
- * At the top for LUNs visible on both ports.
- * At the bottom for LUNs visible only on one port.
+ * On successful return, a LUN table entry is created:
+ * - at the top for LUNs visible on multiple ports.
+ * - at the bottom for LUNs visible only on one port.
*
* Return: 0 on success, -errno on failure
*/
@@ -866,48 +873,68 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
{
u32 chan;
u32 lind;
+ u32 nports;
int rc = 0;
- struct afu *afu = cfg->afu;
+ int k;
struct device *dev = &cfg->dev->dev;
- struct sisl_global_map __iomem *agm = &afu->afu_map->global;
+ __be64 __iomem *fc_port_luns;
mutex_lock(&global.mutex);
if (lli->in_table)
goto out;
- if (lli->port_sel == BOTH_PORTS) {
+ nports = get_num_ports(lli->port_sel);
+ if (nports == 0 || nports > cfg->num_fc_ports) {
+ WARN(1, "Unsupported port configuration nports=%u", nports);
+ rc = -EIO;
+ goto out;
+ }
+
+ if (nports > 1) {
/*
- * If this LUN is visible from both ports, we will put
+ * When LUN is visible from multiple ports, we will put
* it in the top half of the LUN table.
*/
- if ((cfg->promote_lun_index == cfg->last_lun_index[0]) ||
- (cfg->promote_lun_index == cfg->last_lun_index[1])) {
- rc = -ENOSPC;
- goto out;
+ for (k = 0; k < cfg->num_fc_ports; k++) {
+ if (!(lli->port_sel & (1 << k)))
+ continue;
+
+ if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
+ rc = -ENOSPC;
+ goto out;
+ }
}
lind = lli->lun_index = cfg->promote_lun_index;
- writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
- writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
+ dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
+
+ for (k = 0; k < cfg->num_fc_ports; k++) {
+ if (!(lli->port_sel & (1 << k)))
+ continue;
+
+ fc_port_luns = get_fc_port_luns(cfg, k);
+ writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
+ dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
+ }
+
cfg->promote_lun_index++;
- dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx id1=%llx\n",
- __func__, lind, lli->lun_id[0], lli->lun_id[1]);
} else {
/*
- * If this LUN is visible only from one port, we will put
+ * When LUN is visible only from one port, we will put
* it in the bottom half of the LUN table.
*/
- chan = PORT2CHAN(lli->port_sel);
+ chan = PORTMASK2CHAN(lli->port_sel);
if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
rc = -ENOSPC;
goto out;
}
lind = lli->lun_index = cfg->last_lun_index[chan];
- writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
+ fc_port_luns = get_fc_port_luns(cfg, chan);
+ writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
cfg->last_lun_index[chan]--;
- dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d id=%llx\n",
+ dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
__func__, lind, chan, lli->lun_id[chan]);
}
@@ -1016,7 +1043,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
virt->last_lba = last_lba;
virt->rsrc_handle = rsrc_handle;
- if (lli->port_sel == BOTH_PORTS)
+ if (get_num_ports(lli->port_sel) > 1)
virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
out:
if (likely(ctxi))
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
index 8b29a74946e4..27a63a0155ce 100644
--- a/drivers/scsi/cxlflash/vlun.h
+++ b/drivers/scsi/cxlflash/vlun.h
@@ -47,7 +47,7 @@
* not stored anywhere.
*
* The LXT table is re-allocated whenever it needs to cross into another group.
-*/
+ */
#define LXT_GROUP_SIZE 8
#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
index a82030aa8577..65fdf22b0ba9 100644
--- a/drivers/scsi/esas2r/esas2r_log.c
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -130,11 +130,6 @@ static int esas2r_log_master(const long level,
spin_lock_irqsave(&event_buffer_lock, flags);
- if (buffer == NULL) {
- spin_unlock_irqrestore(&event_buffer_lock, flags);
- return -1;
- }
-
memset(buffer, 0, buflen);
/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ab7bc1505e0b..90939f66bc0d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,11 +63,11 @@ unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-unsigned int fcoe_e_d_tov = 2 * 1000;
+static unsigned int fcoe_e_d_tov = 2 * 1000;
module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
-unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+static unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 9e4b7709043e..67aab965c0f4 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.21"
+#define DRV_VERSION "1.6.0.34"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -217,7 +217,6 @@ struct fnic {
struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */
struct vnic_dev_bar bar0;
- struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX];
struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX];
struct vnic_stats *stats;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 3b7da66e2771..245dcd95e11f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -342,8 +342,11 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
fnic_fcoe_reset_vlans(fnic);
fnic->set_vlan(fnic, 0);
- FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
- "Sending VLAN request...\n");
+
+ if (printk_ratelimit())
+ FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+ "Sending VLAN request...\n");
+
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
@@ -359,7 +362,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
- vlan->fip.fip_subcode = FIP_SC_VL_NOTE;
+ vlan->fip.fip_subcode = FIP_SC_VL_REQ;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
@@ -1313,10 +1316,11 @@ void fnic_handle_fip_timer(struct fnic *fnic)
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (list_empty(&fnic->vlans)) {
- /* no vlans available, try again */
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ /* no vlans available, try again */
+ if (printk_ratelimit())
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
@@ -1332,10 +1336,11 @@ void fnic_handle_fip_timer(struct fnic *fnic)
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
break;
case FIP_VLAN_FAILED:
- /* if all vlans are in failed state, restart vlan disc */
- FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
- "Start VLAN Discovery\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+ /* if all vlans are in failed state, restart vlan disc */
+ if (printk_ratelimit())
+ FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+ "Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
break;
case FIP_VLAN_SENT:
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index a0dd1b67a467..4e3a50202e8c 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -154,13 +154,13 @@ void fnic_free_intr(struct fnic *fnic)
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSI:
- free_irq(fnic->pdev->irq, fnic);
+ free_irq(pci_irq_vector(fnic->pdev, 0), fnic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
if (fnic->msix[i].requested)
- free_irq(fnic->msix_entry[i].vector,
+ free_irq(pci_irq_vector(fnic->pdev, i),
fnic->msix[i].devid);
break;
@@ -177,12 +177,12 @@ int fnic_request_intr(struct fnic *fnic)
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
- IRQF_SHARED, DRV_NAME, fnic);
+ err = request_irq(pci_irq_vector(fnic->pdev, 0),
+ &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic);
break;
case VNIC_DEV_INTR_MODE_MSI:
- err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
+ err = request_irq(pci_irq_vector(fnic->pdev, 0), &fnic_isr_msi,
0, fnic->name, fnic);
break;
@@ -210,7 +210,7 @@ int fnic_request_intr(struct fnic *fnic)
fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
- err = request_irq(fnic->msix_entry[i].vector,
+ err = request_irq(pci_irq_vector(fnic->pdev, i),
fnic->msix[i].isr, 0,
fnic->msix[i].devname,
fnic->msix[i].devid);
@@ -237,7 +237,6 @@ int fnic_set_intr_mode(struct fnic *fnic)
unsigned int n = ARRAY_SIZE(fnic->rq);
unsigned int m = ARRAY_SIZE(fnic->wq);
unsigned int o = ARRAY_SIZE(fnic->wq_copy);
- unsigned int i;
/*
* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -248,23 +247,20 @@ int fnic_set_intr_mode(struct fnic *fnic)
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
-
- BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1);
- for (i = 0; i < n + m + o + 1; i++)
- fnic->msix_entry[i].entry = i;
-
if (fnic->rq_count >= n &&
fnic->raw_wq_count >= m &&
fnic->wq_copy_count >= o &&
fnic->cq_count >= n + m + o) {
- if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry,
- n + m + o + 1)) {
+ int vecs = n + m + o + 1;
+
+ if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
+ PCI_IRQ_MSIX) < 0) {
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
fnic->wq_count = m + o;
fnic->cq_count = n + m + o;
- fnic->intr_count = n + m + o + 1;
+ fnic->intr_count = vecs;
fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
@@ -284,8 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 1 &&
- !pci_enable_msi(fnic->pdev)) {
-
+ pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
@@ -334,17 +329,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
void fnic_clear_intr_mode(struct fnic *fnic)
{
- switch (vnic_dev_get_intr_mode(fnic->vdev)) {
- case VNIC_DEV_INTR_MODE_MSIX:
- pci_disable_msix(fnic->pdev);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- pci_disable_msi(fnic->pdev);
- break;
- default:
- break;
- }
-
+ pci_free_irq_vectors(fnic->pdev);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index adb3d5871e74..d048f3b5006f 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -823,6 +823,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
spinlock_t *io_lock;
u64 cmd_trace;
unsigned long start_time;
+ unsigned long io_duration_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
@@ -876,32 +877,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/*
* if SCSI-ML has already issued abort on this command,
- * ignore completion of the IO. The abts path will clean it up
+ * set completion of the IO. The abts path will clean it up
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
- spin_unlock_irqrestore(io_lock, flags);
+
+ /*
+ * set the FNIC_IO_DONE so that this doesn't get
+ * flagged as 'out of order' if it was not aborted
+ */
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
- switch (hdr_status) {
- case FCPIO_SUCCESS:
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "icmnd_cmpl ABTS pending hdr status = %s "
- "sc 0x%p scsi_status %x residual %d\n",
- fnic_fcpio_status_to_str(hdr_status), sc,
- icmnd_cmpl->scsi_status,
- icmnd_cmpl->residual);
- break;
- case FCPIO_ABORTED:
+ spin_unlock_irqrestore(io_lock, flags);
+ if(FCPIO_ABORTED == hdr_status)
CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
- break;
- default:
- FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
- "icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p\n",
- fnic_fcpio_status_to_str(hdr_status),
- id, sc);
- break;
- }
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p"
+ "scsi_status = %x residual = %d\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
return;
}
@@ -919,6 +916,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual;
+ if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
+ atomic64_inc(&fnic_stats->misc_stats.check_condition);
+
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
break;
@@ -1017,6 +1017,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+ io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+
+ if(io_duration_time <= 10)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
+ else if(io_duration_time <= 100)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
+ else if(io_duration_time <= 500)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
+ else if(io_duration_time <= 5000)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
+ else if(io_duration_time <= 10000)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
+ else if(io_duration_time <= 30000)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
+ else {
+ atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
+
+ if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
+ atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
+ }
+
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
@@ -1128,18 +1150,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
/* If the status is IO not found consider it as success */
if (hdr_status == FCPIO_IO_NOT_FOUND)
CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
- else
- CMD_ABTS_STATUS(sc) = hdr_status;
-
- atomic64_dec(&fnic_stats->io_stats.active_ios);
- if (atomic64_read(&fnic->io_cmpl_skip))
- atomic64_dec(&fnic->io_cmpl_skip);
- else
- atomic64_inc(&fnic_stats->io_stats.io_completions);
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
@@ -1181,6 +1196,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
(((u64)CMD_FLAGS(sc) << 32) |
CMD_STATE(sc)));
sc->scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
}
@@ -1793,6 +1813,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
int tag;
+ unsigned long abt_issued_time;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1846,6 +1867,25 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
+
+ abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ if (abt_issued_time <= 6000)
+ atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
+ else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
+ atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
+ else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
+ atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
+ else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
+ atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
+ else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
+ atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
+ else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
+ atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
+ else
+ atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
+
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
/*
* Command is still pending, need to abort it
* If the firmware completes the command after this point,
@@ -1970,6 +2010,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Call SCSI completion function to complete the IO */
sc->result = (DID_ABORT << 16);
sc->scsi_done(sc);
+ atomic64_dec(&fnic_stats->io_stats.active_ios);
+ if (atomic64_read(&fnic->io_cmpl_skip))
+ atomic64_dec(&fnic->io_cmpl_skip);
+ else
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
}
fnic_abort_cmd_end:
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 540cceb843cd..88c73cccb015 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -26,6 +26,14 @@ struct io_path_stats {
atomic64_t sc_null;
atomic64_t io_not_found;
atomic64_t num_ios;
+ atomic64_t io_btw_0_to_10_msec;
+ atomic64_t io_btw_10_to_100_msec;
+ atomic64_t io_btw_100_to_500_msec;
+ atomic64_t io_btw_500_to_5000_msec;
+ atomic64_t io_btw_5000_to_10000_msec;
+ atomic64_t io_btw_10000_to_30000_msec;
+ atomic64_t io_greater_than_30000_msec;
+ atomic64_t current_max_io_time;
};
struct abort_stats {
@@ -34,6 +42,13 @@ struct abort_stats {
atomic64_t abort_drv_timeouts;
atomic64_t abort_fw_timeouts;
atomic64_t abort_io_not_found;
+ atomic64_t abort_issued_btw_0_to_6_sec;
+ atomic64_t abort_issued_btw_6_to_20_sec;
+ atomic64_t abort_issued_btw_20_to_30_sec;
+ atomic64_t abort_issued_btw_30_to_40_sec;
+ atomic64_t abort_issued_btw_40_to_50_sec;
+ atomic64_t abort_issued_btw_50_to_60_sec;
+ atomic64_t abort_issued_greater_than_60_sec;
};
struct terminate_stats {
@@ -88,6 +103,7 @@ struct misc_stats {
atomic64_t devrst_cpwq_alloc_failures;
atomic64_t io_cpwq_alloc_failures;
atomic64_t no_icmnd_itmf_cmpls;
+ atomic64_t check_condition;
atomic64_t queue_fulls;
atomic64_t rport_not_ready;
atomic64_t frame_errors;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 5a5fa01576b7..b5ac5381a0d7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -229,7 +229,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
"Number of Memory alloc Failures: %lld\n"
"Number of IOREQ Null: %lld\n"
- "Number of SCSI cmd pointer Null: %lld\n",
+ "Number of SCSI cmd pointer Null: %lld\n"
+
+ "\nIO completion times: \n"
+ " < 10 ms : %lld\n"
+ " 10 ms - 100 ms : %lld\n"
+ " 100 ms - 500 ms : %lld\n"
+ " 500 ms - 5 sec: %lld\n"
+ " 5 sec - 10 sec: %lld\n"
+ " 10 sec - 30 sec: %lld\n"
+ " > 30 sec: %lld\n",
(u64)atomic64_read(&stats->io_stats.active_ios),
(u64)atomic64_read(&stats->io_stats.max_active_ios),
(u64)atomic64_read(&stats->io_stats.num_ios),
@@ -238,28 +247,58 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->io_stats.io_not_found),
(u64)atomic64_read(&stats->io_stats.alloc_failures),
(u64)atomic64_read(&stats->io_stats.ioreq_null),
- (u64)atomic64_read(&stats->io_stats.sc_null));
+ (u64)atomic64_read(&stats->io_stats.sc_null),
+ (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec),
+ (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec),
+ (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec),
+ (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec),
+ (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec),
+ (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
+ (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "\nCurrent Max IO time : %lld\n",
+ (u64)atomic64_read(&stats->io_stats.current_max_io_time));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
+
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
"Number of Abort FW Timeouts: %lld\n"
- "Number of Abort IO NOT Found: %lld\n",
+ "Number of Abort IO NOT Found: %lld\n"
+
+ "Abord issued times: \n"
+ " < 6 sec : %lld\n"
+ " 6 sec - 20 sec : %lld\n"
+ " 20 sec - 30 sec : %lld\n"
+ " 30 sec - 40 sec : %lld\n"
+ " 40 sec - 50 sec : %lld\n"
+ " 50 sec - 60 sec : %lld\n"
+ " > 60 sec: %lld\n",
+
(u64)atomic64_read(&stats->abts_stats.aborts),
(u64)atomic64_read(&stats->abts_stats.abort_failures),
(u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
(u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
- (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+ (u64)atomic64_read(&stats->abts_stats.abort_io_not_found),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
+ (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
+
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
@@ -357,6 +396,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
"Number of Copy WQ Alloc Failures for IOs: %lld\n"
"Number of no icmnd itmf Completions: %lld\n"
+ "Number of Check Conditions encountered: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
@@ -377,6 +417,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
&stats->misc_stats.devrst_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+ (u64)atomic64_read(&stats->misc_stats.check_condition),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index d1dd1616f983..374a329b91fc 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -4,5 +4,6 @@ config SCSI_HISI_SAS
depends on ARM64 || COMPILE_TEST
select SCSI_SAS_LIBSAS
select BLK_DEV_INTEGRITY
+ depends on ATA
help
This driver supports HiSilicon's SAS HBA
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 9216deaa3ff5..4e28f32e90b0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -31,6 +31,7 @@
#define HISI_SAS_QUEUE_SLOTS 512
#define HISI_SAS_MAX_ITCT_ENTRIES 2048
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
+#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_STATUS_BUF_SZ \
(sizeof(struct hisi_sas_err_record) + 1024)
@@ -90,7 +91,6 @@ struct hisi_sas_port {
struct asd_sas_port sas_port;
u8 port_attached;
u8 id; /* from hw */
- struct list_head list;
};
struct hisi_sas_cq {
@@ -113,7 +113,9 @@ struct hisi_sas_device {
u64 attached_phy;
u64 device_id;
atomic64_t running_req;
+ struct list_head list;
u8 dev_status;
+ int sata_idx;
};
struct hisi_sas_slot {
@@ -136,6 +138,7 @@ struct hisi_sas_slot {
struct hisi_sas_sge_page *sge_page;
dma_addr_t sge_page_dma;
struct work_struct abort_slot;
+ struct timer_list internal_abort_timer;
};
struct hisi_sas_tmf_task {
@@ -165,7 +168,8 @@ struct hisi_sas_hw {
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort);
int (*slot_complete)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int abort);
+ struct hisi_sas_slot *slot);
+ void (*phys_init)(struct hisi_hba *hisi_hba);
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
@@ -175,6 +179,7 @@ struct hisi_sas_hw {
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+ int (*soft_reset)(struct hisi_hba *hisi_hba);
int max_command_entries;
int complete_hdr_size;
};
@@ -193,7 +198,6 @@ struct hisi_hba {
u8 sas_addr[SAS_ADDR_SIZE];
int n_phy;
- int scan_finished;
spinlock_t lock;
struct timer_list timer;
@@ -201,6 +205,7 @@ struct hisi_hba {
int slot_index_count;
unsigned long *slot_index_tags;
+ unsigned long reject_stp_links_msk;
/* SCSI/SAS glue */
struct sas_ha_struct sha;
@@ -233,7 +238,10 @@ struct hisi_hba {
struct hisi_sas_breakpoint *sata_breakpoint;
dma_addr_t sata_breakpoint_dma;
struct hisi_sas_slot *slot_info;
+ unsigned long flags;
const struct hisi_sas_hw *hw; /* Low level hw interface */
+ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
+ struct work_struct rst_work;
};
/* Generic HW DMA host memory structures */
@@ -346,6 +354,8 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_smp smp;
struct hisi_sas_command_table_stp stp;
};
+
+extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern int hisi_sas_remove(struct platform_device *pdev);
@@ -354,4 +364,7 @@ extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
+extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
+extern void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
+ u32 state);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 53637a941b94..d622db502ec9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -21,12 +21,19 @@ static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
+static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
}
+struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
+{
+ return container_of(sas_port, struct hisi_sas_port, sas_port);
+}
+EXPORT_SYMBOL_GPL(to_hisi_sas_port);
+
static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
@@ -70,17 +77,22 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
- struct device *dev = &hisi_hba->pdev->dev;
- struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- if (!slot->task)
- return;
+ if (task) {
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
- if (!sas_protocol_ata(task->task_proto))
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
- task->data_dir);
+ if (!sas_protocol_ata(task->task_proto))
+ if (slot->n_elem)
+ dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ task->data_dir);
+
+ task->lldd_task = NULL;
+
+ if (sas_dev)
+ atomic64_dec(&sas_dev->running_req);
+ }
if (slot->command_table)
dma_pool_free(hisi_hba->command_table_pool,
@@ -95,12 +107,10 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
slot->sge_page_dma);
list_del_init(&slot->entry);
- task->lldd_task = NULL;
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
- if (sas_dev)
- atomic64_dec(&sas_dev->running_req);
+
/* slot memory is fully zeroed when it is reused */
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -178,10 +188,12 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ struct asd_sas_port *sas_port = device->port;
struct device *dev = &hisi_hba->pdev->dev;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
+ unsigned long flags;
- if (!device->port) {
+ if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
@@ -192,7 +204,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
*/
if (device->dev_type != SAS_SATA_DEV)
task->task_done(task);
- return 0;
+ return SAS_PHY_DOWN;
}
if (DEV_IS_GONE(sas_dev)) {
@@ -203,13 +215,13 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
dev_info(dev, "task prep: device %016llx not ready\n",
SAS_ADDR(device->sas_addr));
- rc = SAS_PHY_DOWN;
- return rc;
+ return SAS_PHY_DOWN;
}
- port = device->port->lldd_port;
+
+ port = to_hisi_sas_port(sas_port);
if (port && !port->port_attached) {
dev_info(dev, "task prep: %s port%d not attach device\n",
- (sas_protocol_ata(task->task_proto)) ?
+ (dev_is_sata(device)) ?
"SATA/STP" : "SAS",
device->port->id);
@@ -299,10 +311,10 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
goto err_out_command_table;
}
- list_add_tail(&slot->entry, &port->list);
- spin_lock(&task->task_state_lock);
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_hba->slot_prep = slot;
@@ -343,6 +355,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
struct device *dev = &hisi_hba->pdev->dev;
+ if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+ return -EINVAL;
+
/* protect task_prep and start_delivery sequence */
spin_lock_irqsave(&hisi_hba->lock, flags);
rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
@@ -412,6 +427,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
+ INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
}
@@ -482,12 +498,8 @@ static int hisi_sas_slave_configure(struct scsi_device *sdev)
static void hisi_sas_scan_start(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
- int i;
-
- for (i = 0; i < hisi_hba->n_phy; ++i)
- hisi_sas_bytes_dmaed(hisi_hba, i);
- hisi_hba->scan_finished = 1;
+ hisi_hba->hw->phys_init(hisi_hba);
}
static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
@@ -495,7 +507,8 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
struct hisi_hba *hisi_hba = shost_priv(shost);
struct sas_ha_struct *sha = &hisi_hba->sha;
- if (hisi_hba->scan_finished == 0)
+ /* Wait for PHY up interrupt to occur */
+ if (time < HZ)
return 0;
sas_drain_work(sha);
@@ -545,7 +558,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
unsigned long flags;
if (!sas_port)
@@ -559,50 +572,54 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
-static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
- struct domain_device *device)
+static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
+ struct hisi_sas_slot *slot)
{
- struct hisi_sas_phy *phy;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot, *slot2;
- struct device *dev = &hisi_hba->pdev->dev;
-
- phy = &hisi_hba->phy[phy_no];
- port = phy->port;
- if (!port)
- return;
+ if (task) {
+ unsigned long flags;
+ struct task_status_struct *ts;
+
+ ts = &task->task_status;
+
+ ts->resp = SAS_TASK_COMPLETE;
+ ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
- list_for_each_entry_safe(slot, slot2, &port->list, entry) {
- struct sas_task *task;
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+}
- task = slot->task;
- if (device && task->dev != device)
- continue;
+/* hisi_hba.lock should be locked */
+static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
+ struct domain_device *device)
+{
+ struct hisi_sas_slot *slot, *slot2;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
- dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
- slot->dlvry_queue, slot->dlvry_queue_slot, task);
- hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
- }
+ list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
+ hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
+static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
+ struct hisi_sas_device *sas_dev;
struct domain_device *device;
- struct hisi_sas_phy *phy = sas_phy->lldd_phy;
- struct asd_sas_port *sas_port = sas_phy->port;
+ int i;
- list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
- hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
-}
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ sas_dev = &hisi_hba->devices[i];
+ device = sas_dev->sas_device;
-static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
- struct domain_device *device)
-{
- struct asd_sas_port *port = device->port;
- struct asd_sas_phy *sas_phy;
+ if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
+ !device)
+ continue;
- list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
- hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
+ hisi_sas_release_task(hisi_hba, device);
+ }
}
static void hisi_sas_dev_gone(struct domain_device *device)
@@ -644,8 +661,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_LINK_RESET:
+ hisi_hba->hw->phy_disable(hisi_hba, phy_no);
+ msleep(100);
hisi_hba->hw->phy_enable(hisi_hba, phy_no);
- hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
break;
case PHY_FUNC_DISABLE:
@@ -698,7 +716,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->dev = device;
task->task_proto = device->tproto;
- memcpy(&task->ssp_task, parameter, para_len);
+ if (dev_is_sata(device)) {
+ task->ata_task.device_control_reg_update = 1;
+ memcpy(&task->ata_task.fis, parameter, para_len);
+ } else {
+ memcpy(&task->ssp_task, parameter, para_len);
+ }
task->task_done = hisi_sas_task_done;
task->slow_task->timer.data = (unsigned long) task;
@@ -720,15 +743,11 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
- tmf->tag_of_task_to_be_managed);
- if (task->lldd_task) {
- struct hisi_sas_slot *slot =
- task->lldd_task;
+ struct hisi_sas_slot *slot = task->lldd_task;
- hisi_sas_slot_task_free(hisi_hba,
- task, slot);
- }
+ dev_err(dev, "abort tmf: TMF task timeout\n");
+ if (slot)
+ slot->task = NULL;
goto ex_err;
}
@@ -781,6 +800,63 @@ ex_err:
return res;
}
+static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
+ bool reset, int pmp, u8 *fis)
+{
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+ if (reset)
+ tf.ctl |= ATA_SRST;
+ else
+ tf.ctl &= ~ATA_SRST;
+ tf.command = ATA_CMD_DEV_RESET;
+ ata_tf_to_fis(&tf, pmp, 0, fis);
+}
+
+static int hisi_sas_softreset_ata_disk(struct domain_device *device)
+{
+ u8 fis[20] = {0};
+ struct ata_port *ap = device->sata_dev.ap;
+ struct ata_link *link;
+ int rc = TMF_RESP_FUNC_FAILED;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = &hisi_hba->pdev->dev;
+ int s = sizeof(struct host_to_dev_fis);
+ unsigned long flags;
+
+ ata_for_each_link(link, ap, EDGE) {
+ int pmp = sata_srst_pmp(link);
+
+ hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ break;
+ }
+
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ ata_for_each_link(link, ap, EDGE) {
+ int pmp = sata_srst_pmp(link);
+
+ hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis,
+ s, NULL);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_err(dev, "ata disk de-reset failed\n");
+ }
+ } else {
+ dev_err(dev, "ata disk reset failed\n");
+ }
+
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
+ return rc;
+}
+
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
u8 *lun, struct hisi_sas_tmf_task *tmf)
{
@@ -795,6 +871,40 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
+static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
+{
+ int rc;
+
+ if (!hisi_hba->hw->soft_reset)
+ return -1;
+
+ if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ unsigned long flags;
+
+ dev_dbg(dev, "controller reset begins!\n");
+ scsi_block_requests(hisi_hba->shost);
+ rc = hisi_hba->hw->soft_reset(hisi_hba);
+ if (rc) {
+ dev_warn(dev, "controller reset failed (%d)\n", rc);
+ goto out;
+ }
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_ha->notify_ha_event(sas_ha, HAE_RESET);
+ dev_dbg(dev, "controller reset successful!\n");
+ } else
+ return -1;
+
+out:
+ scsi_unblock_requests(hisi_hba->shost);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ return rc;
+}
+
static int hisi_sas_abort_task(struct sas_task *task)
{
struct scsi_lun lun;
@@ -811,19 +921,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
return TMF_RESP_FUNC_FAILED;
}
- spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
- spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
}
- spin_unlock_irqrestore(&task->task_state_lock, flags);
sas_dev->dev_status = HISI_SAS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
+ int rc2;
int_to_scsilun(cmnd->device->lun, &lun);
tmf_task.tmf = TMF_ABORT_TASK;
@@ -832,35 +940,41 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
&tmf_task);
- /* if successful, clear the task and callback forwards.*/
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_CMD, tag);
+ /*
+ * If the TMF finds that the IO is not in the device and also
+ * the internal abort does not succeed, then it is safe to
+ * free the slot.
+ * Note: if the internal abort succeeds then the slot
+ * will have already been completed
+ */
+ if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
if (task->lldd_task) {
- struct hisi_sas_slot *slot;
-
- slot = &hisi_hba->slot_info
- [tmf_task.tag_of_task_to_be_managed];
spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
+ hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
}
-
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- rc = TMF_RESP_FUNC_COMPLETE;
+ rc = hisi_sas_softreset_ata_disk(device);
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_CMD, tag);
+ if (rc == TMF_RESP_FUNC_FAILED) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_do_release_task(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
}
out:
@@ -915,37 +1029,66 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
rc = hisi_sas_debug_I_T_nexus_reset(device);
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
- return 0;
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+ return rc;
}
static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
{
- struct hisi_sas_tmf_task tmf_task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = &hisi_hba->pdev->dev;
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
- tmf_task.tmf = TMF_LU_RESET;
sas_dev->dev_status = HISI_SAS_DEV_EH;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
- spin_lock_irqsave(&hisi_hba->lock, flags);
- hisi_sas_release_task(hisi_hba, device);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- }
+ if (dev_is_sata(device)) {
+ struct sas_phy *phy;
+
+ /* Clear internal IO and then hardreset */
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc == TMF_RESP_FUNC_FAILED)
+ goto out;
+
+ phy = sas_get_local_phy(device);
- /* If failed, fall-through I_T_Nexus reset */
- dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
- sas_dev->device_id, rc);
+ rc = sas_phy_reset(phy, 1);
+
+ if (rc == 0) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+ sas_put_local_phy(phy);
+ } else {
+ struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+
+ rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+ }
+out:
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
+ sas_dev->device_id, rc);
return rc;
}
+static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
+{
+ struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+
+ return hisi_sas_controller_reset(hisi_hba);
+}
+
static int hisi_sas_query_task(struct sas_task *task)
{
struct scsi_lun lun;
@@ -990,13 +1133,18 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
struct device *dev = &hisi_hba->pdev->dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
+ struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
+ unsigned long flags;
+
+ if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
+ return -EINVAL;
if (!device->port)
return -1;
- port = device->port->lldd_port;
+ port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
@@ -1027,14 +1175,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
if (rc)
goto err_out_tag;
- /* Port structure is static for the HBA, so
- * even if the port is deformed it is ok
- * to reference.
- */
- list_add_tail(&slot->entry, &port->list);
- spin_lock(&task->task_state_lock);
+
+ list_add_tail(&slot->entry, &sas_dev->list);
+ spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_hba->slot_prep = slot;
@@ -1085,7 +1230,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->task_done = hisi_sas_task_done;
task->slow_task->timer.data = (unsigned long)task;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + 20*HZ;
+ task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
/* Lock as we are alloc'ing a slot, which cannot be interrupted */
@@ -1108,15 +1253,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
goto exit;
}
- /* TMF timed out, return direct. */
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ goto exit;
+ }
+
+ /* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
dev_err(dev, "internal task abort: timeout.\n");
- if (task->lldd_task) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- hisi_sas_slot_task_free(hisi_hba, task, slot);
- }
}
}
@@ -1137,11 +1283,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
- hisi_sas_port_notify_deformed(sas_phy);
-}
-
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1181,6 +1322,37 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
+ u32 state)
+{
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct asd_sas_port *sas_port = sas_phy->port;
+ struct domain_device *dev;
+
+ if (sas_phy->enabled) {
+ /* Report PHY state change to libsas */
+ if (state & (1 << phy_no))
+ continue;
+
+ if (old_state & (1 << phy_no))
+ /* PHY down but was up before */
+ hisi_sas_phy_down(hisi_hba, phy_no, 0);
+ }
+ if (!sas_port)
+ continue;
+ dev = sas_port->port_dev;
+
+ if (DEV_IS_EXPANDER(dev->dev_type))
+ sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
+
static struct scsi_transport_template *hisi_sas_stt;
static struct scsi_host_template hisi_sas_sht = {
@@ -1215,10 +1387,41 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
+ .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
- .lldd_port_deformed = hisi_sas_port_deformed,
};
+void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
+{
+ int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ struct hisi_sas_dq *dq = &hisi_hba->dq[i];
+
+ s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
+ memset(hisi_hba->cmd_hdr[i], 0, s);
+ dq->wr_point = 0;
+
+ s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+ memset(hisi_hba->complete_hdr[i], 0, s);
+ cq->rd_point = 0;
+ }
+
+ s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
+ memset(hisi_hba->initial_fis, 0, s);
+
+ s = max_command_entries * sizeof(struct hisi_sas_iost);
+ memset(hisi_hba->iost, 0, s);
+
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
+ memset(hisi_hba->breakpoint, 0, s);
+
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ memset(hisi_hba->sata_breakpoint, 0, s);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
+
static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
struct platform_device *pdev = hisi_hba->pdev;
@@ -1230,7 +1433,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
hisi_sas_phy_init(hisi_hba, i);
hisi_hba->port[i].port_attached = 0;
hisi_hba->port[i].id = -1;
- INIT_LIST_HEAD(&hisi_hba->port[i].list);
}
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
@@ -1257,7 +1459,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
&hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
if (!hisi_hba->cmd_hdr[i])
goto err_out;
- memset(hisi_hba->cmd_hdr[i], 0, s);
/* Completion queue */
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
@@ -1265,7 +1466,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
&hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
if (!hisi_hba->complete_hdr[i])
goto err_out;
- memset(hisi_hba->complete_hdr[i], 0, s);
}
s = HISI_SAS_STATUS_BUF_SZ;
@@ -1300,16 +1500,12 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->iost)
goto err_out;
- memset(hisi_hba->iost, 0, s);
-
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->breakpoint)
goto err_out;
- memset(hisi_hba->breakpoint, 0, s);
-
hisi_hba->slot_index_count = max_command_entries;
s = hisi_hba->slot_index_count / BITS_PER_BYTE;
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
@@ -1326,14 +1522,13 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
&hisi_hba->initial_fis_dma, GFP_KERNEL);
if (!hisi_hba->initial_fis)
goto err_out;
- memset(hisi_hba->initial_fis, 0, s);
s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
goto err_out;
- memset(hisi_hba->sata_breakpoint, 0, s);
+ hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba);
@@ -1404,6 +1599,14 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
destroy_workqueue(hisi_hba->wq);
}
+static void hisi_sas_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_hba *hisi_hba =
+ container_of(work, struct hisi_hba, rst_work);
+
+ hisi_sas_controller_reset(hisi_hba);
+}
+
static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -1421,6 +1624,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
}
hisi_hba = shost_priv(shost);
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
hisi_hba->hw = hw;
hisi_hba->pdev = pdev;
hisi_hba->shost = shost;
@@ -1583,7 +1787,6 @@ int hisi_sas_remove(struct platform_device *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- scsi_remove_host(sha->core.shost);
sas_unregister_ha(sha);
sas_remove_host(sha->core.shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 854fbeaade3e..fc1c1b2c1a19 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -508,6 +508,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
struct device *dev = &hisi_hba->pdev->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
memset(itct, 0, sizeof(*itct));
@@ -528,7 +530,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(1 << ITCT_HDR_AWT_CONTROL_OFF) |
(device->max_linkrate << ITCT_HDR_MAX_CONN_RATE_OFF) |
(1 << ITCT_HDR_VALID_LINK_NUM_OFF) |
- (device->port->id << ITCT_HDR_PORT_ID_OFF));
+ (port->id << ITCT_HDR_PORT_ID_OFF));
itct->qw0 = cpu_to_le64(qw0);
/* qw1 */
@@ -1275,7 +1277,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot, int abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -1286,6 +1288,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
+ unsigned long flags;
u32 cmplt_hdr_data;
complete_hdr = &complete_queue[slot->cmplt_queue_slot];
@@ -1298,16 +1301,17 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
device = task->dev;
sas_dev = device->lldd_dev;
+ spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
- if (unlikely(!sas_dev || abort)) {
- if (!sas_dev)
- dev_dbg(dev, "slot complete: port has not device\n");
+ if (unlikely(!sas_dev)) {
+ dev_dbg(dev, "slot complete: port has no device\n");
ts->stat = SAS_PHY_DOWN;
goto out;
}
@@ -1620,7 +1624,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
*/
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
- slot_complete_v1_hw(hisi_hba, slot, 0);
+ slot_complete_v1_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1845,8 +1849,6 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba)
if (rc)
return rc;
- phys_init_v1_hw(hisi_hba);
-
return 0;
}
@@ -1860,6 +1862,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.get_free_slot = get_free_slot_v1_hw,
.start_delivery = start_delivery_v1_hw,
.slot_complete = slot_complete_v1_hw,
+ .phys_init = phys_init_v1_hw,
.phy_enable = enable_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 1b214450dcb5..e241921bee10 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -194,9 +194,9 @@
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
#define SL_CONTROL_CTA_OFF 17
#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
-#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
-#define RX_BCAST_CHG_OFF 1
-#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
+#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF 1
+#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -207,8 +207,10 @@
#define TXID_AUTO (PORT_BASE + 0xb8)
#define TXID_AUTO_CT3_OFF 1
#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
-#define TX_HARDRST_OFF 2
-#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
+#define TXID_AUTO_CTB_OFF 11
+#define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF)
+#define TX_HARDRST_OFF 2
+#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
@@ -218,6 +220,9 @@
#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define CON_CONTROL (PORT_BASE + 0x118)
+#define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0
+#define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \
+ (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF)
#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
#define CHL_INT0 (PORT_BASE + 0x1b4)
#define CHL_INT0_HOTPLUG_TOUT_OFF 0
@@ -240,6 +245,17 @@
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
+#define DMA_TX_DFX0 (PORT_BASE + 0x200)
+#define DMA_TX_DFX1 (PORT_BASE + 0x204)
+#define DMA_TX_DFX1_IPTT_OFF 0
+#define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF)
+#define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240)
+#define PORT_DFX0 (PORT_BASE + 0x258)
+#define LINK_DFX2 (PORT_BASE + 0X264)
+#define LINK_DFX2_RCVR_HOLD_STS_OFF 9
+#define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF)
+#define LINK_DFX2_SEND_HOLD_STS_OFF 10
+#define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF)
#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
@@ -257,6 +273,10 @@
#define AM_CFG_MAX_TRANS (0x5010)
#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
+#define AXI_MASTER_CFG_BASE (0x5000)
+#define AM_CTRL_GLOBAL (0x0)
+#define AM_CURR_TRANS_RETURN (0x150)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -309,6 +329,8 @@
/* Completion header */
/* dw0 */
+#define CMPLT_HDR_ERR_PHASE_OFF 2
+#define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF)
#define CMPLT_HDR_RSPNS_XFRD_OFF 10
#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
#define CMPLT_HDR_ERX_OFF 12
@@ -385,10 +407,10 @@ enum {
enum {
TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
- TRANS_RX_FAIL_BASE = 0x100, /* dw1 */
- DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */
- SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/
- DMA_RX_ERR_BASE = 0x400, /* dw3 */
+ TRANS_RX_FAIL_BASE = 0x20, /* dw1 */
+ DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */
+ SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/
+ DMA_RX_ERR_BASE = 0x60, /* dw3 */
/* trans tx*/
TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
@@ -428,100 +450,104 @@ enum {
TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
/* trans rx */
- TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */
- TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */
- TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */
- /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
- TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */
- TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */
- TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */
- /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
- TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/
- TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */
- TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */
- TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */
- TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */
- RESERVED1, /* 0x10b */
- TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */
- TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */
- TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */
- TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */
- TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */
- TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */
- /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
- TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/
- /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
- TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */
- /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
- RESERVED2, /* 0x114 */
- RESERVED3, /* 0x115 */
- RESERVED4, /* 0x116 */
- RESERVED5, /* 0x117 */
- TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */
- TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */
- TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */
- RESERVED6, /* 0x11b */
- RESERVED7, /* 0x11c */
- RESERVED8, /* 0x11d */
- RESERVED9, /* 0x11e */
- TRANS_RX_R_ERR, /* 0x11f */
+ TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */
+ TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */
+ /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */
+ /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/
+ TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */
+ TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */
+ TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */
+ TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */
+ RESERVED1, /* 0x2b */
+ TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */
+ TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */
+ TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */
+ TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */
+ TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */
+ TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */
+ /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */
+ TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/
+ /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */
+ TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */
+ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */
+ RESERVED2, /* 0x34 */
+ RESERVED3, /* 0x35 */
+ RESERVED4, /* 0x36 */
+ RESERVED5, /* 0x37 */
+ TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */
+ TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */
+ TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */
+ RESERVED6, /* 0x3b */
+ RESERVED7, /* 0x3c */
+ RESERVED8, /* 0x3d */
+ RESERVED9, /* 0x3e */
+ TRANS_RX_R_ERR, /* 0x3f */
/* dma tx */
- DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */
- DMA_TX_DIF_APP_ERR, /* 0x201 */
- DMA_TX_DIF_RPP_ERR, /* 0x202 */
- DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */
- DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */
- DMA_TX_UNEXP_XFER_ERR, /* 0x205 */
- DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */
- DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */
- DMA_TX_XFER_OFFSET_ERR, /* 0x208 */
- DMA_TX_RAM_ECC_ERR, /* 0x209 */
- DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */
+ DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */
+ DMA_TX_DIF_APP_ERR, /* 0x41 */
+ DMA_TX_DIF_RPP_ERR, /* 0x42 */
+ DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */
+ DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */
+ DMA_TX_UNEXP_XFER_ERR, /* 0x45 */
+ DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */
+ DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */
+ DMA_TX_XFER_OFFSET_ERR, /* 0x48 */
+ DMA_TX_RAM_ECC_ERR, /* 0x49 */
+ DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */
+ DMA_TX_MAX_ERR_CODE,
/* sipc rx */
- SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */
- SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */
- SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */
- SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */
- SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */
- SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */
- SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */
- SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */
- SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */
- SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */
- SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */
+ SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */
+ SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */
+ SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */
+ SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */
+ SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */
+ SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */
+ SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */
+ SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */
+ SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */
+ SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */
+ SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */
+ SIPC_RX_MAX_ERR_CODE,
/* dma rx */
- DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */
- DMA_RX_DIF_APP_ERR, /* 0x401 */
- DMA_RX_DIF_RPP_ERR, /* 0x402 */
- DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */
- DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */
- DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */
- DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */
- DMA_RX_DATA_OFFSET_ERR, /* 0x407 */
- RESERVED10, /* 0x408 */
- DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */
- DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */
- DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */
- DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */
- DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */
- DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */
- DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */
- DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */
- DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */
- DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */
- DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */
- DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */
- DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */
- DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */
- DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */
- DMA_RX_RAM_ECC_ERR, /* 0x418 */
- DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */
+ DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */
+ DMA_RX_DIF_APP_ERR, /* 0x61 */
+ DMA_RX_DIF_RPP_ERR, /* 0x62 */
+ DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */
+ DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */
+ DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */
+ DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */
+ DMA_RX_DATA_OFFSET_ERR, /* 0x67 */
+ RESERVED10, /* 0x68 */
+ DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */
+ DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */
+ DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */
+ DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */
+ DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */
+ DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */
+ DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */
+ DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */
+ DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */
+ DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */
+ DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */
+ DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */
+ DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */
+ DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */
+ DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */
+ DMA_RX_RAM_ECC_ERR, /* 0x78 */
+ DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */
+ DMA_RX_MAX_ERR_CODE,
};
#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
+#define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1)
#define DIR_NO_DATA 0
#define DIR_TO_INI 1
@@ -534,7 +560,13 @@ enum {
#define SATA_PROTOCOL_FPDMA 0x8
#define SATA_PROTOCOL_ATAPI 0x10
-static void hisi_sas_link_timeout_disable_link(unsigned long data);
+#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \
+ err_phase == 0x4 || err_phase == 0x8 ||\
+ err_phase == 0x6 || err_phase == 0xa)
+#define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \
+ err_phase == 0x20 || err_phase == 0x40)
+
+static void link_timeout_disable_link(unsigned long data);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -576,38 +608,86 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
/* This function needs to be protected from pre-emption. */
static int
slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
- struct domain_device *device)
+ struct domain_device *device)
{
- unsigned int index = 0;
- void *bitmap = hisi_hba->slot_index_tags;
int sata_dev = dev_is_sata(device);
+ void *bitmap = hisi_hba->slot_index_tags;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ int sata_idx = sas_dev->sata_idx;
+ int start, end;
+
+ if (!sata_dev) {
+ /*
+ * STP link SoC bug workaround: index starts from 1.
+ * additionally, we can only allocate odd IPTT(1~4095)
+ * for SAS/SMP device.
+ */
+ start = 1;
+ end = hisi_hba->slot_index_count;
+ } else {
+ if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW)
+ return -EINVAL;
+
+ /*
+ * For SATA device: allocate even IPTT in this interval
+ * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device
+ * own 32 IPTTs. IPTT 0 shall not be used duing to STP link
+ * SoC bug workaround. So we ignore the first 32 even IPTTs.
+ */
+ start = 64 * (sata_idx + 1);
+ end = 64 * (sata_idx + 2);
+ }
while (1) {
- index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- index);
- if (index >= hisi_hba->slot_index_count)
+ start = find_next_zero_bit(bitmap,
+ hisi_hba->slot_index_count, start);
+ if (start >= end)
return -SAS_QUEUE_FULL;
/*
- * SAS IPTT bit0 should be 1
- */
- if (sata_dev || (index & 1))
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
+ if (sata_dev ^ (start & 1))
break;
- index++;
+ start++;
}
- set_bit(index, bitmap);
- *slot_idx = index;
+ set_bit(start, bitmap);
+ *slot_idx = start;
return 0;
}
+static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
+{
+ unsigned int index;
+ struct device *dev = &hisi_hba->pdev->dev;
+ void *bitmap = hisi_hba->sata_dev_bitmap;
+
+ index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW);
+ if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) {
+ dev_warn(dev, "alloc sata index failed, index=%d\n", index);
+ return false;
+ }
+
+ set_bit(index, bitmap);
+ *idx = index;
+ return true;
+}
+
+
static struct
hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
{
struct hisi_hba *hisi_hba = device->port->ha->lldd_ha;
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
+ int sata_idx = -1;
spin_lock(&hisi_hba->lock);
+
+ if (sata_dev)
+ if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
+ goto out;
+
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
/*
* SATA device id bit0 should be 0
@@ -621,9 +701,13 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
+ sas_dev->sata_idx = sata_idx;
+ INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
}
+
+out:
spin_unlock(&hisi_hba->lock);
return sas_dev;
@@ -676,7 +760,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct domain_device *parent_dev = device->parent;
- struct hisi_sas_port *port = device->port->lldd_port;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
memset(itct, 0, sizeof(*itct));
@@ -729,6 +814,10 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
int i;
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -858,6 +947,46 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+/* This function needs to be called after resetting SAS controller. */
+static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba)
+{
+ u32 cfg;
+ int phy_no;
+
+ hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1;
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL);
+ if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK))
+ continue;
+
+ cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg);
+ }
+}
+
+static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int phy_no;
+ u32 dma_tx_dfx1;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no)))
+ continue;
+
+ dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ DMA_TX_DFX1);
+ if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) {
+ u32 cfg = hisi_sas_phy_read32(hisi_hba,
+ phy_no, CON_CONTROL);
+
+ cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CON_CONTROL, cfg);
+ clear_bit(phy_no, &hisi_hba->reject_stp_links_msk);
+ }
+ }
+}
+
static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = &hisi_hba->pdev->dev;
@@ -876,7 +1005,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
- hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0);
hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
@@ -885,9 +1014,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
- hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
- hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
- hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3);
hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
@@ -910,14 +1039,14 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2);
- hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
+ hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
- hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -989,12 +1118,15 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
upper_32_bits(hisi_hba->initial_fis_dma));
}
-static void hisi_sas_link_timeout_enable_link(unsigned long data)
+static void link_timeout_enable_link(unsigned long data)
{
struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
int i, reg_val;
for (i = 0; i < hisi_hba->n_phy; i++) {
+ if (hisi_hba->reject_stp_links_msk & BIT(i))
+ continue;
+
reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL);
if (!(reg_val & BIT(0))) {
hisi_sas_phy_write32(hisi_hba, i,
@@ -1003,17 +1135,20 @@ static void hisi_sas_link_timeout_enable_link(unsigned long data)
}
}
- hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
-static void hisi_sas_link_timeout_disable_link(unsigned long data)
+static void link_timeout_disable_link(unsigned long data)
{
struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
int i, reg_val;
reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
for (i = 0; i < hisi_hba->n_phy && reg_val; i++) {
+ if (hisi_hba->reject_stp_links_msk & BIT(i))
+ continue;
+
if (reg_val & BIT(i)) {
hisi_sas_phy_write32(hisi_hba, i,
CON_CONTROL, 0x6);
@@ -1021,14 +1156,14 @@ static void hisi_sas_link_timeout_disable_link(unsigned long data)
}
}
- hisi_hba->timer.function = hisi_sas_link_timeout_enable_link;
+ hisi_hba->timer.function = link_timeout_enable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
}
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
hisi_hba->timer.data = (unsigned long)hisi_hba;
- hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
}
@@ -1058,12 +1193,138 @@ static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
}
+static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 context;
+
+ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+ if (context & (1 << phy_no))
+ return true;
+
+ return false;
+}
+
+static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 dfx_val;
+
+ dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1);
+
+ if (dfx_val & BIT(16))
+ return false;
+
+ return true;
+}
+
+static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ int i, max_loop = 1000;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 status, axi_status, dfx_val, dfx_tx_val;
+
+ for (i = 0; i < max_loop; i++) {
+ status = hisi_sas_read32_relaxed(hisi_hba,
+ AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN);
+
+ axi_status = hisi_sas_read32(hisi_hba, AXI_CFG);
+ dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1);
+ dfx_tx_val = hisi_sas_phy_read32(hisi_hba,
+ phy_no, DMA_TX_FIFO_DFX0);
+
+ if ((status == 0x3) && (axi_status == 0x0) &&
+ (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10)))
+ return true;
+ udelay(10);
+ }
+ dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n",
+ phy_no, status, axi_status,
+ dfx_val, dfx_tx_val);
+ return false;
+}
+
+static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ int i, max_loop = 1000;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 status, tx_dfx0;
+
+ for (i = 0; i < max_loop; i++) {
+ status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2);
+ status = (status & 0x3fc0) >> 6;
+
+ if (status != 0x1)
+ return true;
+
+ tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0);
+ if ((tx_dfx0 & 0x1ff) == 0x2)
+ return true;
+ udelay(10);
+ }
+ dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n",
+ phy_no, status, tx_dfx0);
+ return false;
+}
+
+static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no))
+ return true;
+
+ if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no))
+ return false;
+
+ if (!wait_io_done_v2_hw(hisi_hba, phy_no))
+ return false;
+
+ return true;
+}
+
+
static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
- u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+ u32 cfg, axi_val, dfx0_val, txid_auto;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ /* Close axi bus. */
+ axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ axi_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, axi_val);
+
+ if (is_sata_phy_v2_hw(hisi_hba, phy_no)) {
+ if (allowed_disable_phy_v2_hw(hisi_hba, phy_no))
+ goto do_disable;
+
+ /* Reset host controller. */
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ return;
+ }
+
+ dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0);
+ dfx0_val = (dfx0_val & 0x1fc0) >> 6;
+ if (dfx0_val != 0x4)
+ goto do_disable;
+ if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) {
+ dev_warn(dev, "phy%d, wait tx fifo need send break\n",
+ phy_no);
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no,
+ TXID_AUTO);
+ txid_auto |= TXID_AUTO_CTB_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto);
+ }
+
+do_disable:
+ cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
cfg &= ~PHY_CFG_ENA_MSK;
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+
+ /* Open axi bus. */
+ axi_val &= ~0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, axi_val);
}
static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1078,6 +1339,14 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
disable_phy_v2_hw(hisi_hba, phy_no);
}
+static void stop_phys_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ stop_phy_v2_hw(hisi_hba, i);
+}
+
static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
@@ -1437,10 +1706,205 @@ static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
ts->buf_valid_size = sizeof(*resp);
}
+#define TRANS_TX_ERR 0
+#define TRANS_RX_ERR 1
+#define DMA_TX_ERR 2
+#define SIPC_RX_ERR 3
+#define DMA_RX_ERR 4
+
+#define DMA_TX_ERR_OFF 0
+#define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF)
+#define SIPC_RX_ERR_OFF 16
+#define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF)
+
+static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
+{
+ const u8 trans_tx_err_code_prio[] = {
+ TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
+ TRANS_TX_ERR_PHY_NOT_ENABLE,
+ TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
+ TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION,
+ TRANS_TX_OPEN_CNX_ERR_BY_OTHER,
+ RESERVED0,
+ TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT,
+ TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY,
+ TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED,
+ TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED,
+ TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION,
+ TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD,
+ TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER,
+ TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED,
+ TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT,
+ TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION,
+ TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED,
+ TRANS_TX_ERR_WITH_CLOSE_PHYDISALE,
+ TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT,
+ TRANS_TX_ERR_WITH_CLOSE_COMINIT,
+ TRANS_TX_ERR_WITH_BREAK_TIMEOUT,
+ TRANS_TX_ERR_WITH_BREAK_REQUEST,
+ TRANS_TX_ERR_WITH_BREAK_RECEVIED,
+ TRANS_TX_ERR_WITH_CLOSE_TIMEOUT,
+ TRANS_TX_ERR_WITH_CLOSE_NORMAL,
+ TRANS_TX_ERR_WITH_NAK_RECEVIED,
+ TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT,
+ TRANS_TX_ERR_WITH_CREDIT_TIMEOUT,
+ TRANS_TX_ERR_WITH_IPTT_CONFLICT,
+ TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS,
+ TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT,
+ };
+ int index, i;
+
+ for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) {
+ index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE;
+ if (err_msk & (1 << index))
+ return trans_tx_err_code_prio[i];
+ }
+ return -1;
+}
+
+static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
+{
+ const u8 trans_rx_err_code_prio[] = {
+ TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
+ TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
+ TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
+ TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR,
+ TRANS_RX_ERR_WITH_RXFIS_CRC_ERR,
+ TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN,
+ TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP,
+ TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN,
+ TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE,
+ TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT,
+ TRANS_RX_ERR_WITH_CLOSE_COMINIT,
+ TRANS_RX_ERR_WITH_BREAK_TIMEOUT,
+ TRANS_RX_ERR_WITH_BREAK_REQUEST,
+ TRANS_RX_ERR_WITH_BREAK_RECEVIED,
+ RESERVED1,
+ TRANS_RX_ERR_WITH_CLOSE_NORMAL,
+ TRANS_RX_ERR_WITH_DATA_LEN0,
+ TRANS_RX_ERR_WITH_BAD_HASH,
+ TRANS_RX_XRDY_WLEN_ZERO_ERR,
+ TRANS_RX_SSP_FRM_LEN_ERR,
+ RESERVED2,
+ RESERVED3,
+ RESERVED4,
+ RESERVED5,
+ TRANS_RX_ERR_WITH_BAD_FRM_TYPE,
+ TRANS_RX_SMP_FRM_LEN_ERR,
+ TRANS_RX_SMP_RESP_TIMEOUT_ERR,
+ RESERVED6,
+ RESERVED7,
+ RESERVED8,
+ RESERVED9,
+ TRANS_RX_R_ERR,
+ };
+ int index, i;
+
+ for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) {
+ index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE;
+ if (err_msk & (1 << index))
+ return trans_rx_err_code_prio[i];
+ }
+ return -1;
+}
+
+static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
+{
+ const u8 dma_tx_err_code_prio[] = {
+ DMA_TX_UNEXP_XFER_ERR,
+ DMA_TX_UNEXP_RETRANS_ERR,
+ DMA_TX_XFER_LEN_OVERFLOW,
+ DMA_TX_XFER_OFFSET_ERR,
+ DMA_TX_RAM_ECC_ERR,
+ DMA_TX_DIF_LEN_ALIGN_ERR,
+ DMA_TX_DIF_CRC_ERR,
+ DMA_TX_DIF_APP_ERR,
+ DMA_TX_DIF_RPP_ERR,
+ DMA_TX_DATA_SGL_OVERFLOW,
+ DMA_TX_DIF_SGL_OVERFLOW,
+ };
+ int index, i;
+
+ for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) {
+ index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE;
+ err_msk = err_msk & DMA_TX_ERR_MSK;
+ if (err_msk & (1 << index))
+ return dma_tx_err_code_prio[i];
+ }
+ return -1;
+}
+
+static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
+{
+ const u8 sipc_rx_err_code_prio[] = {
+ SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
+ SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
+ SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
+ SIPC_RX_WRSETUP_LEN_ODD_ERR,
+ SIPC_RX_WRSETUP_LEN_ZERO_ERR,
+ SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR,
+ SIPC_RX_NCQ_WRSETUP_OFFSET_ERR,
+ SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR,
+ SIPC_RX_SATA_UNEXP_FIS_ERR,
+ SIPC_RX_WRSETUP_ESTATUS_ERR,
+ SIPC_RX_DATA_UNDERFLOW_ERR,
+ };
+ int index, i;
+
+ for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) {
+ index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE;
+ err_msk = err_msk & SIPC_RX_ERR_MSK;
+ if (err_msk & (1 << (index + 0x10)))
+ return sipc_rx_err_code_prio[i];
+ }
+ return -1;
+}
+
+static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
+{
+ const u8 dma_rx_err_code_prio[] = {
+ DMA_RX_UNKNOWN_FRM_ERR,
+ DMA_RX_DATA_LEN_OVERFLOW,
+ DMA_RX_DATA_LEN_UNDERFLOW,
+ DMA_RX_DATA_OFFSET_ERR,
+ RESERVED10,
+ DMA_RX_SATA_FRAME_TYPE_ERR,
+ DMA_RX_RESP_BUF_OVERFLOW,
+ DMA_RX_UNEXP_RETRANS_RESP_ERR,
+ DMA_RX_UNEXP_NORM_RESP_ERR,
+ DMA_RX_UNEXP_RDFRAME_ERR,
+ DMA_RX_PIO_DATA_LEN_ERR,
+ DMA_RX_RDSETUP_STATUS_ERR,
+ DMA_RX_RDSETUP_STATUS_DRQ_ERR,
+ DMA_RX_RDSETUP_STATUS_BSY_ERR,
+ DMA_RX_RDSETUP_LEN_ODD_ERR,
+ DMA_RX_RDSETUP_LEN_ZERO_ERR,
+ DMA_RX_RDSETUP_LEN_OVER_ERR,
+ DMA_RX_RDSETUP_OFFSET_ERR,
+ DMA_RX_RDSETUP_ACTIVE_ERR,
+ DMA_RX_RDSETUP_ESTATUS_ERR,
+ DMA_RX_RAM_ECC_ERR,
+ DMA_RX_DIF_CRC_ERR,
+ DMA_RX_DIF_APP_ERR,
+ DMA_RX_DIF_RPP_ERR,
+ DMA_RX_DATA_SGL_OVERFLOW,
+ DMA_RX_DIF_SGL_OVERFLOW,
+ };
+ int index, i;
+
+ for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) {
+ index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE;
+ if (err_msk & (1 << index))
+ return dma_rx_err_code_prio[i];
+ }
+ return -1;
+}
+
/* by default, task resp is complete */
static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
struct sas_task *task,
- struct hisi_sas_slot *slot)
+ struct hisi_sas_slot *slot,
+ int err_phase)
{
struct task_status_struct *ts = &task->task_status;
struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
@@ -1451,21 +1915,23 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
int error = -1;
- if (dma_rx_err_type) {
- error = ffs(dma_rx_err_type)
- - 1 + DMA_RX_ERR_BASE;
- } else if (sipc_rx_err_type) {
- error = ffs(sipc_rx_err_type)
- - 1 + SIPC_RX_ERR_BASE;
- } else if (dma_tx_err_type) {
- error = ffs(dma_tx_err_type)
- - 1 + DMA_TX_ERR_BASE;
- } else if (trans_rx_fail_type) {
- error = ffs(trans_rx_fail_type)
- - 1 + TRANS_RX_FAIL_BASE;
- } else if (trans_tx_fail_type) {
- error = ffs(trans_tx_fail_type)
- - 1 + TRANS_TX_FAIL_BASE;
+ if (err_phase == 1) {
+ /* error in TX phase, the priority of error is: DW2 > DW0 */
+ error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type);
+ if (error == -1)
+ error = parse_trans_tx_err_code_v2_hw(
+ trans_tx_fail_type);
+ } else if (err_phase == 2) {
+ /* error in RX phase, the priority is: DW1 > DW3 > DW2 */
+ error = parse_trans_rx_err_code_v2_hw(
+ trans_rx_fail_type);
+ if (error == -1) {
+ error = parse_dma_rx_err_code_v2_hw(
+ dma_rx_err_type);
+ if (error == -1)
+ error = parse_sipc_rx_err_code_v2_hw(
+ sipc_rx_err_type);
+ }
}
switch (task->task_proto) {
@@ -1478,12 +1944,6 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
ts->open_rej_reason = SAS_OREJ_NO_DEST;
break;
}
- case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
- {
- ts->stat = SAS_OPEN_REJECT;
- ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED;
- break;
- }
case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
{
ts->stat = SAS_OPEN_REJECT;
@@ -1502,19 +1962,15 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
ts->open_rej_reason = SAS_OREJ_BAD_DEST;
break;
}
- case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
- {
- ts->stat = SAS_OPEN_REJECT;
- ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
- break;
- }
case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
{
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
}
+ case DMA_RX_UNEXP_NORM_RESP_ERR:
case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+ case DMA_RX_RESP_BUF_OVERFLOW:
{
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_UNKNOWN;
@@ -1526,16 +1982,6 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_DEV_NO_RESPONSE;
break;
}
- case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
- {
- ts->stat = SAS_PHY_DOWN;
- break;
- }
- case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
- {
- ts->stat = SAS_OPEN_TO;
- break;
- }
case DMA_RX_DATA_LEN_OVERFLOW:
{
ts->stat = SAS_DATA_OVERRUN;
@@ -1543,60 +1989,65 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
case DMA_RX_DATA_LEN_UNDERFLOW:
- case SIPC_RX_DATA_UNDERFLOW_ERR:
{
- ts->residual = trans_tx_fail_type;
+ ts->residual = dma_rx_err_type;
ts->stat = SAS_DATA_UNDERRUN;
break;
}
- case TRANS_TX_ERR_FRAME_TXED:
- {
- /* This will request a retry */
- ts->stat = SAS_QUEUE_FULL;
- slot->abort = 1;
- break;
- }
case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
case TRANS_TX_ERR_PHY_NOT_ENABLE:
case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
case TRANS_TX_ERR_WITH_BREAK_REQUEST:
case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE:
case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
case TRANS_TX_ERR_WITH_NAK_RECEVIED:
case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
- case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN:
case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
case TRANS_RX_ERR_WITH_BREAK_REQUEST:
case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_TX_ERR_FRAME_TXED:
+ case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
case TRANS_RX_ERR_WITH_DATA_LEN0:
case TRANS_RX_ERR_WITH_BAD_HASH:
case TRANS_RX_XRDY_WLEN_ZERO_ERR:
case TRANS_RX_SSP_FRM_LEN_ERR:
case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
+ case DMA_TX_DATA_SGL_OVERFLOW:
case DMA_TX_UNEXP_XFER_ERR:
case DMA_TX_UNEXP_RETRANS_ERR:
case DMA_TX_XFER_LEN_OVERFLOW:
case DMA_TX_XFER_OFFSET_ERR:
+ case SIPC_RX_DATA_UNDERFLOW_ERR:
+ case DMA_RX_DATA_SGL_OVERFLOW:
case DMA_RX_DATA_OFFSET_ERR:
- case DMA_RX_UNEXP_NORM_RESP_ERR:
- case DMA_RX_UNEXP_RDFRAME_ERR:
+ case DMA_RX_RDSETUP_LEN_ODD_ERR:
+ case DMA_RX_RDSETUP_LEN_ZERO_ERR:
+ case DMA_RX_RDSETUP_LEN_OVER_ERR:
+ case DMA_RX_SATA_FRAME_TYPE_ERR:
case DMA_RX_UNKNOWN_FRM_ERR:
{
- ts->stat = SAS_OPEN_REJECT;
- ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ /* This will request a retry */
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
break;
}
default:
@@ -1613,57 +2064,92 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
switch (error) {
- case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
- case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
{
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_NO_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+ {
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_DEV_NO_RESPONSE;
break;
}
case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ }
case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ }
case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
- case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ }
case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
- case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
- case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
{
ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
break;
}
- case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+ case DMA_RX_RESP_BUF_OVERFLOW:
+ case DMA_RX_UNEXP_NORM_RESP_ERR:
+ case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
{
- ts->stat = SAS_OPEN_TO;
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
break;
}
case DMA_RX_DATA_LEN_OVERFLOW:
{
ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ break;
+ }
+ case DMA_RX_DATA_LEN_UNDERFLOW:
+ {
+ ts->residual = dma_rx_err_type;
+ ts->stat = SAS_DATA_UNDERRUN;
break;
}
case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
case TRANS_TX_ERR_PHY_NOT_ENABLE:
case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
case TRANS_TX_ERR_WITH_BREAK_REQUEST:
case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE:
case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
- case TRANS_TX_ERR_WITH_NAK_RECEVIED:
case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS:
case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
- case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
+ case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN:
+ case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_RX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
@@ -1671,7 +2157,12 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
case TRANS_RX_ERR_WITH_DATA_LEN0:
case TRANS_RX_ERR_WITH_BAD_HASH:
case TRANS_RX_XRDY_WLEN_ZERO_ERR:
- case TRANS_RX_SSP_FRM_LEN_ERR:
+ case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
+ case DMA_TX_DATA_SGL_OVERFLOW:
+ case DMA_TX_UNEXP_XFER_ERR:
+ case DMA_TX_UNEXP_RETRANS_ERR:
+ case DMA_TX_XFER_LEN_OVERFLOW:
+ case DMA_TX_XFER_OFFSET_ERR:
case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
@@ -1679,6 +2170,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
case SIPC_RX_SATA_UNEXP_FIS_ERR:
+ case DMA_RX_DATA_SGL_OVERFLOW:
+ case DMA_RX_DATA_OFFSET_ERR:
case DMA_RX_SATA_FRAME_TYPE_ERR:
case DMA_RX_UNEXP_RDFRAME_ERR:
case DMA_RX_PIO_DATA_LEN_ERR:
@@ -1692,8 +2185,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
case DMA_RX_RDSETUP_ACTIVE_ERR:
case DMA_RX_RDSETUP_ESTATUS_ERR:
case DMA_RX_UNKNOWN_FRM_ERR:
+ case TRANS_RX_SSP_FRM_LEN_ERR:
+ case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
{
- ts->stat = SAS_OPEN_REJECT;
+ slot->abort = 1;
+ ts->stat = SAS_PHY_DOWN;
break;
}
default:
@@ -1711,8 +2207,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
static int
-slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
- int abort)
+slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -1724,6 +2219,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
&complete_queue[slot->cmplt_queue_slot];
+ unsigned long flags;
+ int aborted;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -EINVAL;
@@ -1732,16 +2229,23 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
device = task->dev;
sas_dev = device->lldd_dev;
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
- task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
- if (unlikely(!sas_dev || abort)) {
- if (!sas_dev)
- dev_dbg(dev, "slot complete: port has not device\n");
+ if (unlikely(aborted)) {
+ ts->stat = SAS_ABORTED_TASK;
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ return -1;
+ }
+
+ if (unlikely(!sas_dev)) {
+ dev_dbg(dev, "slot complete: port has no device\n");
ts->stat = SAS_PHY_DOWN;
goto out;
}
@@ -1755,16 +2259,19 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
goto out;
case STAT_IO_COMPLETE:
/* internal abort command complete */
- ts->stat = TMF_RESP_FUNC_COMPLETE;
+ ts->stat = TMF_RESP_FUNC_SUCC;
+ del_timer(&slot->internal_abort_timer);
goto out;
case STAT_IO_NO_DEVICE:
ts->stat = TMF_RESP_FUNC_COMPLETE;
+ del_timer(&slot->internal_abort_timer);
goto out;
case STAT_IO_NOT_VALID:
/* abort single io, controller don't find
* the io need to abort
*/
ts->stat = TMF_RESP_FUNC_FAILED;
+ del_timer(&slot->internal_abort_timer);
goto out;
default:
break;
@@ -1772,13 +2279,17 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+ u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
+ >> CMPLT_HDR_ERR_PHASE_OFF;
+
+ /* Analyse error happens on which phase TX or RX */
+ if (ERR_ON_TX_PHASE(err_phase))
+ slot_err_v2_hw(hisi_hba, task, slot, 1);
+ else if (ERR_ON_RX_PHASE(err_phase))
+ slot_err_v2_hw(hisi_hba, task, slot, 2);
- slot_err_v2_hw(hisi_hba, task, slot);
- if (unlikely(slot->abort)) {
- queue_work(hisi_hba->wq, &slot->abort_slot);
- /* immediately return and do not complete */
+ if (unlikely(slot->abort))
return ts->stat;
- }
goto out;
}
@@ -1830,7 +2341,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
}
out:
-
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
@@ -1920,7 +2433,8 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
- struct hisi_sas_port *port = device->port->lldd_port;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u8 *buf_cmd;
int has_data = 0, rc = 0, hdr_tag = 0;
u32 dw1 = 0, dw2 = 0;
@@ -1947,7 +2461,8 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 &= ~CMD_HDR_DIR_MSK;
}
- if (0 == task->ata_task.fis.command)
+ if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
+ (task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
@@ -1990,6 +2505,40 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
return 0;
}
+static void hisi_sas_internal_abort_quirk_timeout(unsigned long data)
+{
+ struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data;
+ struct hisi_sas_port *port = slot->port;
+ struct asd_sas_port *asd_sas_port;
+ struct asd_sas_phy *sas_phy;
+
+ if (!port)
+ return;
+
+ asd_sas_port = &port->sas_port;
+
+ /* Kick the hardware - send break command */
+ list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) {
+ struct hisi_sas_phy *phy = sas_phy->lldd_phy;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ int phy_no = sas_phy->id;
+ u32 link_dfx2;
+
+ link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2);
+ if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) ||
+ (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) {
+ u32 txid_auto;
+
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no,
+ TXID_AUTO);
+ txid_auto |= TXID_AUTO_CTB_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto);
+ return;
+ }
+ }
+}
+
static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort)
@@ -1998,6 +2547,13 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
+ struct timer_list *timer = &slot->internal_abort_timer;
+
+ /* setup the quirk timer */
+ setup_timer(timer, hisi_sas_internal_abort_quirk_timeout,
+ (unsigned long)slot);
+ /* Set the timeout to 10ms less than internal abort timeout */
+ mod_timer(timer, jiffies + msecs_to_jiffies(100));
/* dw0 */
hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
@@ -2018,8 +2574,8 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
- int i, res = 0;
- u32 context, port_id, link_rate, hard_phy_linkrate;
+ int i, res = IRQ_HANDLED;
+ u32 port_id, link_rate, hard_phy_linkrate;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = &hisi_hba->pdev->dev;
@@ -2028,9 +2584,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
- /* Check for SATA dev */
- context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
- if (context & (1 << phy_no))
+ if (is_sata_phy_v2_hw(hisi_hba, phy_no))
goto end;
if (phy_no == 8) {
@@ -2106,7 +2660,6 @@ static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba)
static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
- int res = 0;
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
@@ -2131,7 +2684,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
- return res;
+ return IRQ_HANDLED;
}
static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
@@ -2139,35 +2692,58 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
struct hisi_hba *hisi_hba = p;
u32 irq_msk;
int phy_no = 0;
- irqreturn_t res = IRQ_HANDLED;
irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
>> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
while (irq_msk) {
if (irq_msk & 1) {
- u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
- if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+ switch (reg_value & (CHL_INT0_NOT_RDY_MSK |
+ CHL_INT0_SL_PHY_ENABLE_MSK)) {
+
+ case CHL_INT0_SL_PHY_ENABLE_MSK:
/* phy up */
- if (phy_up_v2_hw(phy_no, hisi_hba)) {
- res = IRQ_NONE;
- goto end;
- }
+ if (phy_up_v2_hw(phy_no, hisi_hba) ==
+ IRQ_NONE)
+ return IRQ_NONE;
+ break;
- if (irq_value & CHL_INT0_NOT_RDY_MSK)
+ case CHL_INT0_NOT_RDY_MSK:
/* phy down */
- if (phy_down_v2_hw(phy_no, hisi_hba)) {
- res = IRQ_NONE;
- goto end;
+ if (phy_down_v2_hw(phy_no, hisi_hba) ==
+ IRQ_NONE)
+ return IRQ_NONE;
+ break;
+
+ case (CHL_INT0_NOT_RDY_MSK |
+ CHL_INT0_SL_PHY_ENABLE_MSK):
+ reg_value = hisi_sas_read32(hisi_hba,
+ PHY_STATE);
+ if (reg_value & BIT(phy_no)) {
+ /* phy up */
+ if (phy_up_v2_hw(phy_no, hisi_hba) ==
+ IRQ_NONE)
+ return IRQ_NONE;
+ } else {
+ /* phy down */
+ if (phy_down_v2_hw(phy_no, hisi_hba) ==
+ IRQ_NONE)
+ return IRQ_NONE;
}
+ break;
+
+ default:
+ break;
+ }
+
}
irq_msk >>= 1;
phy_no++;
}
-end:
- return res;
+ return IRQ_HANDLED;
}
static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2342,94 +2918,105 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
- panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+ dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \
Ram address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
HGC_DQE_ECC_MB_ADDR_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
- panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+ dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \
Ram address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
HGC_IOST_ECC_MB_ADDR_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
- panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+ dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \
Ram address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
HGC_ITCT_ECC_MB_ADDR_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
- panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+ dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
- panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+ dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
- panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+ dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \
Ram address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
HGC_CQE_ECC_MB_ADDR_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
- panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+ dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
HGC_RXM_DFX_STATUS14_MEM0_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
- panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+ dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
HGC_RXM_DFX_STATUS14_MEM1_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
- panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+ dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
HGC_RXM_DFX_STATUS14_MEM2_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
- panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+ dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \
memory address is 0x%08X\n",
- dev_name(dev), irq_value,
+ irq_value,
(reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
HGC_RXM_DFX_STATUS15_MEM3_OFF);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
+ return;
}
static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
@@ -2487,23 +3074,27 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
1 << ENT_INT_SRC3_WP_DEPTH_OFF);
- panic("%s: write pointer and depth error (0x%x) \
+ dev_warn(dev, "write pointer and depth error (0x%x) \
found!\n",
- dev_name(dev), irq_value);
+ irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
1 <<
ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
- panic("%s: iptt no match slot error (0x%x) found!\n",
- dev_name(dev), irq_value);
+ dev_warn(dev, "iptt no match slot error (0x%x) found!\n",
+ irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
- if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
- panic("%s: read pointer and depth error (0x%x) \
+ if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) {
+ dev_warn(dev, "read pointer and depth error (0x%x) \
found!\n",
- dev_name(dev), irq_value);
+ irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
int i;
@@ -2514,10 +3105,11 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
HGC_AXI_FIFO_ERR_INFO);
for (i = 0; i < AXI_ERR_NR; i++) {
- if (err_value & BIT(i))
- panic("%s: %s (0x%x) found!\n",
- dev_name(dev),
+ if (err_value & BIT(i)) {
+ dev_warn(dev, "%s (0x%x) found!\n",
axi_err_info[i], irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
}
}
@@ -2530,10 +3122,11 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
HGC_AXI_FIFO_ERR_INFO);
for (i = 0; i < FIFO_ERR_NR; i++) {
- if (err_value & BIT(AXI_ERR_NR + i))
- panic("%s: %s (0x%x) found!\n",
- dev_name(dev),
+ if (err_value & BIT(AXI_ERR_NR + i)) {
+ dev_warn(dev, "%s (0x%x) found!\n",
fifo_err_info[i], irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
}
}
@@ -2541,15 +3134,17 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
1 << ENT_INT_SRC3_LM_OFF);
- panic("%s: LM add/fetch list error (0x%x) found!\n",
- dev_name(dev), irq_value);
+ dev_warn(dev, "LM add/fetch list error (0x%x) found!\n",
+ irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
1 << ENT_INT_SRC3_ABT_OFF);
- panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
- dev_name(dev), irq_value);
+ dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+ irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -2568,6 +3163,9 @@ static void cq_tasklet_v2_hw(unsigned long val)
u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
+ if (unlikely(hisi_hba->reject_stp_links_msk))
+ phys_try_accept_stp_links_v2_hw(hisi_hba);
+
complete_queue = hisi_hba->complete_hdr[queue];
spin_lock(&hisi_hba->lock);
@@ -2600,7 +3198,7 @@ static void cq_tasklet_v2_hw(unsigned long val)
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
- slot_complete_v2_hw(hisi_hba, slot, 0);
+ slot_complete_v2_hw(hisi_hba, slot);
act_tmp &= ~(1 << ncq_tag_count);
ncq_tag_count = ffs(act_tmp);
@@ -2610,7 +3208,7 @@ static void cq_tasklet_v2_hw(unsigned long val)
slot = &hisi_hba->slot_info[iptt];
slot->cmplt_queue_slot = rd_point;
slot->cmplt_queue = queue;
- slot_complete_v2_hw(hisi_hba, slot, 0);
+ slot_complete_v2_hw(hisi_hba, slot);
}
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
@@ -2842,6 +3440,8 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
{
int rc;
+ memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap));
+
rc = hw_init_v2_hw(hisi_hba);
if (rc)
return rc;
@@ -2850,7 +3450,88 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
if (rc)
return rc;
- phys_init_v2_hw(hisi_hba);
+ return 0;
+}
+
+static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->pdev;
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
+ }
+
+ for (i = 0; i < 128; i++)
+ synchronize_irq(platform_get_irq(pdev, i));
+}
+
+static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 old_state, state;
+ int rc, cnt;
+ int phy_no;
+
+ old_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ interrupt_disable_v2_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+
+ stop_phys_v2_hw(hisi_hba);
+
+ mdelay(10);
+
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
+
+ /* wait until bus idle */
+ cnt = 0;
+ while (1) {
+ u32 status = hisi_sas_read32_relaxed(hisi_hba,
+ AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN);
+
+ if (status == 0x3)
+ break;
+
+ udelay(10);
+ if (cnt++ > 10) {
+ dev_info(dev, "wait axi bus state to idle timeout!\n");
+ return -1;
+ }
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ rc = hw_init_v2_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ phys_reject_stp_links_v2_hw(hisi_hba);
+
+ /* Re-enable the PHYs */
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ if (sas_phy->enabled)
+ start_phy_v2_hw(hisi_hba, phy_no);
+ }
+
+ /* Wait for the PHYs to come up and read the PHY state */
+ msleep(1000);
+
+ state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ hisi_sas_rescan_topology(hisi_hba, old_state, state);
return 0;
}
@@ -2870,6 +3551,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.get_free_slot = get_free_slot_v2_hw,
.start_delivery = start_delivery_v2_hw,
.slot_complete = slot_complete_v2_hw,
+ .phys_init = phys_init_v2_hw,
.phy_enable = enable_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
@@ -2877,6 +3559,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
+ .soft_reset = soft_reset_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9d659aaace15..73daace478cb 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.16-0"
+#define HPSA_DRIVER_VERSION "3.4.18-0"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -108,10 +108,12 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
@@ -171,10 +173,12 @@ static struct board_type products[] = {
{0x3354103C, "Smart Array P420i", &SA5_access},
{0x3355103C, "Smart Array P220i", &SA5_access},
{0x3356103C, "Smart Array P721m", &SA5_access},
+ {0x1920103C, "Smart Array P430i", &SA5_access},
{0x1921103C, "Smart Array P830i", &SA5_access},
{0x1922103C, "Smart Array P430", &SA5_access},
{0x1923103C, "Smart Array P431", &SA5_access},
{0x1924103C, "Smart Array P830", &SA5_access},
+ {0x1925103C, "Smart Array P831", &SA5_access},
{0x1926103C, "Smart Array P731m", &SA5_access},
{0x1928103C, "Smart Array P230i", &SA5_access},
{0x1929103C, "Smart Array P530", &SA5_access},
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 2c92dabb55f6..26cd3c28186a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3910,12 +3910,6 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
- if (!tgt) {
- dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
- scsi_id);
- return -ENOMEM;
- }
-
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
tgt->new_scsi_id = scsi_id;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5d5e272fd815..b0c68d24db01 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -820,7 +820,7 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_sata_eh_done - done function for aborted SATA commands
+ * __ipr_sata_eh_done - done function for aborted SATA commands
* @ipr_cmd: ipr command struct
*
* This function is invoked for ops generated to SATA
@@ -829,19 +829,41 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* none
**/
-static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
- * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * ipr_sata_eh_done - done function for aborted SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked for ops generated to SATA
+ * devices which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_sata_eh_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
+ * __ipr_scsi_eh_done - mid-layer done function for aborted ops
* @ipr_cmd: ipr command struct
*
* This function is invoked by the interrupt handler for
@@ -850,7 +872,7 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
@@ -864,6 +886,26 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long hrrq_flags;
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_scsi_eh_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
* ipr_fail_all_ops - Fails all outstanding ops.
* @ioa_cfg: ioa config struct
*
@@ -890,9 +932,9 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
cpu_to_be32(IPR_DRIVER_ILID);
if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
+ ipr_cmd->done = __ipr_scsi_eh_done;
else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ ipr_cmd->done = __ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
IPR_IOASC_IOA_WAS_RESET);
@@ -5006,6 +5048,42 @@ static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
}
/**
+ * ipr_cmnd_is_free - Check if a command is free or not
+ * @ipr_cmd ipr command struct
+ *
+ * Returns:
+ * true / false
+ **/
+static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_cmnd *loop_cmd;
+
+ list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
+ if (loop_cmd == ipr_cmd)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ipr_match_res - Match function for specified resource entry
+ * @ipr_cmd: ipr command struct
+ * @resource: resource entry to match
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
+{
+ struct ipr_resource_entry *res = resource;
+
+ if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
+ return 1;
+ return 0;
+}
+
+/**
* ipr_wait_for_ops - Wait for matching commands to complete
* @ipr_cmd: ipr command struct
* @device: device to match (sdev)
@@ -5018,7 +5096,7 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
int (*match)(struct ipr_cmnd *, void *))
{
struct ipr_cmnd *ipr_cmd;
- int wait;
+ int wait, i;
unsigned long flags;
struct ipr_hrr_queue *hrrq;
signed long timeout = IPR_ABORT_TASK_TIMEOUT;
@@ -5030,10 +5108,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock_irqsave(hrrq->lock, flags);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (match(ipr_cmd, device)) {
- ipr_cmd->eh_comp = &comp;
- wait++;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+ if (!ipr_cmnd_is_free(ipr_cmd)) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
}
}
spin_unlock_irqrestore(hrrq->lock, flags);
@@ -5047,10 +5128,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock_irqsave(hrrq->lock, flags);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (match(ipr_cmd, device)) {
- ipr_cmd->eh_comp = NULL;
- wait++;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+ if (!ipr_cmnd_is_free(ipr_cmd)) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
}
}
spin_unlock_irqrestore(hrrq->lock, flags);
@@ -5179,7 +5263,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_resource_entry *res;
unsigned long lock_flags = 0;
- int rc = -ENXIO;
+ int rc = -ENXIO, ret;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -5193,9 +5277,19 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
if (res) {
rc = ipr_device_reset(ioa_cfg, res);
*classes = res->ata_class;
- }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
+ if (ret != SUCCESS) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ }
+ } else
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
LEAVE;
return rc;
}
@@ -5217,16 +5311,13 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ata_port *ap;
- int rc = 0;
+ int rc = 0, i;
struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
- if (!res)
- return FAILED;
-
/*
* If we are currently going through reset/reload, return failed. This will force the
* mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
@@ -5239,14 +5330,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
+
if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc &&
- !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ if (!ipr_cmd->qc)
+ continue;
+ if (ipr_cmnd_is_free(ipr_cmd))
+ continue;
+
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
}
@@ -5262,19 +5356,6 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
spin_unlock_irq(scsi_cmd->device->host->host_lock);
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
-
- for_each_hrrq(hrrq, ioa_cfg) {
- spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd,
- &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle ==
- res->res_handle) {
- rc = -EIO;
- break;
- }
- }
- spin_unlock(&hrrq->_lock);
- }
} else
rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
@@ -5288,15 +5369,24 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_resource_entry *res;
ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ res = cmd->device->hostdata;
+
+ if (!res)
+ return FAILED;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
- if (rc == SUCCESS)
- rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+ if (rc == SUCCESS) {
+ if (ipr_is_gata(res) && res->sata_port)
+ rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
+ else
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+ }
return rc;
}
@@ -5393,7 +5483,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
- int op_found = 0;
+ int i, op_found = 0;
struct ipr_hrr_queue *hrrq;
ENTER;
@@ -5422,11 +5512,12 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
for_each_hrrq(hrrq, ioa_cfg) {
spin_lock(&hrrq->_lock);
- list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
+ if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
+ op_found = 1;
+ break;
+ }
}
}
spin_unlock(&hrrq->_lock);
@@ -5917,7 +6008,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
}
/**
- * ipr_erp_done - Process completion of ERP for a device
+ * __ipr_erp_done - Process completion of ERP for a device
* @ipr_cmd: ipr command struct
*
* This function copies the sense buffer into the scsi_cmd
@@ -5926,7 +6017,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* nothing
**/
-static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
@@ -5947,8 +6038,30 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+}
+
+/**
+ * ipr_erp_done - Process completion of ERP for a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function copies the sense buffer into the scsi_cmd
+ * struct and pushes the scsi_done function.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_erp_done(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
}
/**
@@ -5983,7 +6096,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_erp_request_sense - Send request sense to a device
+ * __ipr_erp_request_sense - Send request sense to a device
* @ipr_cmd: ipr command struct
*
* This function sends a request sense to a device as a result
@@ -5992,13 +6105,13 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
* Return value:
* nothing
**/
-static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
{
struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
- ipr_erp_done(ipr_cmd);
+ __ipr_erp_done(ipr_cmd);
return;
}
@@ -6019,6 +6132,26 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_erp_request_sense - Send request sense to a device
+ * @ipr_cmd: ipr command struct
+ *
+ * This function sends a request sense to a device as a result
+ * of a check condition.
+ *
+ * Return value:
+ * nothing
+ **/
+static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
+ unsigned long hrrq_flags;
+
+ spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
+ __ipr_erp_request_sense(ipr_cmd);
+ spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
+}
+
+/**
* ipr_erp_cancel_all - Send cancel all to a device
* @ipr_cmd: ipr command struct
*
@@ -6041,7 +6174,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
if (!scsi_cmd->device->simple_tags) {
- ipr_erp_request_sense(ipr_cmd);
+ __ipr_erp_request_sense(ipr_cmd);
return;
}
@@ -6261,7 +6394,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
if (!res) {
- ipr_scsi_eh_done(ipr_cmd);
+ __ipr_scsi_eh_done(ipr_cmd);
return;
}
@@ -6343,8 +6476,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -6370,8 +6505,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b7d2e98eb45b..e98a87a65335 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.6.3"
-#define IPR_DRIVER_DATE "(October 17, 2015)"
+#define IPR_DRIVER_VERSION "2.6.4"
+#define IPR_DRIVER_DATE "(March 14, 2017)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 0b5b5db0d0f8..45371179ab87 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -272,7 +272,6 @@ static void isci_unregister(struct isci_host *isci_host)
return;
shost = to_shost(isci_host);
- scsi_remove_host(shost);
sas_unregister_ha(&isci_host->sas_ha);
sas_remove_host(shost);
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 97f3ceb8d724..63468cfe3e4a 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -652,7 +652,7 @@ struct scu_iit_entry {
/*
- * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_LNKTOV register?
* TODO: Where is the SAS_PHYTOV register? */
#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
@@ -1827,7 +1827,7 @@ struct scu_peg_registers {
};
/**
- * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ * struct scu_registers - SCU registers including both PEG registers if we turn
* on that compile option. All of these registers are in the memory mapped
* space returned from BAR1.
*
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bbea8eac9abb..4842fc0e809d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
@@ -371,10 +372,10 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
- unsigned long pflags = current->flags;
+ unsigned int noreclaim_flag;
int rc = 0;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
rc = iscsi_sw_tcp_xmit(conn);
@@ -387,7 +388,7 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
rc = 0;
}
- current_restore_flags(pflags, PF_MEMALLOC);
+ memalloc_noreclaim_restore(noreclaim_flag);
return rc;
}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 0e67621477a8..a808e8ef1d08 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -154,7 +154,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
fsp->xfer_ddp = FC_XID_UNKNOWN;
- atomic_set(&fsp->ref_cnt, 1);
+ refcount_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
@@ -175,7 +175,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
*/
static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
{
- if (atomic_dec_and_test(&fsp->ref_cnt)) {
+ if (refcount_dec_and_test(&fsp->ref_cnt)) {
struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
mempool_free(fsp, si->scsi_pkt_pool);
@@ -188,7 +188,7 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
*/
static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
{
- atomic_inc(&fsp->ref_cnt);
+ refcount_inc(&fsp->ref_cnt);
}
/**
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index aa76f36abe03..2fd0ec651170 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -887,8 +887,6 @@ out:
static void fc_lport_recv_els_req(struct fc_lport *lport,
struct fc_frame *fp)
{
- void (*recv)(struct fc_lport *, struct fc_frame *);
-
mutex_lock(&lport->lp_mutex);
/*
@@ -902,31 +900,31 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
/*
* Check opcode.
*/
- recv = fc_rport_recv_req;
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
- recv = fc_lport_recv_flogi_req;
+ fc_lport_recv_flogi_req(lport, fp);
break;
case ELS_LOGO:
if (fc_frame_sid(fp) == FC_FID_FLOGI)
- recv = fc_lport_recv_logo_req;
+ fc_lport_recv_logo_req(lport, fp);
break;
case ELS_RSCN:
- recv = lport->tt.disc_recv_req;
+ lport->tt.disc_recv_req(lport, fp);
break;
case ELS_ECHO:
- recv = fc_lport_recv_echo_req;
+ fc_lport_recv_echo_req(lport, fp);
break;
case ELS_RLIR:
- recv = fc_lport_recv_rlir_req;
+ fc_lport_recv_rlir_req(lport, fp);
break;
case ELS_RNID:
- recv = fc_lport_recv_rnid_req;
+ fc_lport_recv_rnid_req(lport, fp);
+ break;
+ default:
+ fc_rport_recv_req(lport, fp);
break;
}
-
- recv(lport, fp);
}
mutex_unlock(&lport->lp_mutex);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 894b1e3ebd56..dd6828f7f772 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -517,13 +517,13 @@ static void iscsi_free_task(struct iscsi_task *task)
void __iscsi_get_task(struct iscsi_task *task)
{
- atomic_inc(&task->refcount);
+ refcount_inc(&task->refcount);
}
EXPORT_SYMBOL_GPL(__iscsi_get_task);
void __iscsi_put_task(struct iscsi_task *task)
{
- if (atomic_dec_and_test(&task->refcount))
+ if (refcount_dec_and_test(&task->refcount))
iscsi_free_task(task);
}
EXPORT_SYMBOL_GPL(__iscsi_put_task);
@@ -749,7 +749,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* released by the lld when it has transmitted the task for
* pdus we do not expect a response for.
*/
- atomic_set(&task->refcount, 1);
+ refcount_set(&task->refcount, 1);
task->conn = conn;
task->sc = NULL;
INIT_LIST_HEAD(&task->running);
@@ -1638,7 +1638,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
sc->SCp.phase = conn->session->age;
sc->SCp.ptr = (char *) task;
- atomic_set(&task->refcount, 1);
+ refcount_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
task->conn = conn;
task->sc = sc;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 15ef8e2e685c..64e9cdda1c3c 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -566,13 +566,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
-
-void sas_domain_release_transport(struct scsi_transport_template *stt)
-{
- sas_release_transport(stt);
-}
-EXPORT_SYMBOL_GPL(sas_domain_release_transport);
-
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9bd55bce83af..87e5079d816b 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -491,9 +491,6 @@ int sas_eh_abort_handler(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host;
struct sas_internal *i = to_sas_internal(host->transportt);
- if (current != host->ehandler)
- return FAILED;
-
if (!i->dft->lldd_abort_task)
return FAILED;
@@ -616,8 +613,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
- cmd->eh_eflags = 0;
-
switch (res) {
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 513fd07715cd..4830370bfab1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -181,7 +181,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
phba->targetport->port_id);
- len += snprintf(buf + len, PAGE_SIZE,
+ len += snprintf(buf + len, PAGE_SIZE - len,
"\nNVME Target: Statistics\n");
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -326,7 +326,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
spin_unlock_irq(shost->host_lock);
- len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
"LS: Xmt %016llx Cmpl %016llx\n",
phba->fc4NvmeLsRequests,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index bb567d3b0693..cdb61eaa2d1f 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -55,6 +55,7 @@ struct mac_esp_priv {
int error;
};
static struct esp *esp_chips[2];
+static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
platform_get_drvdata((struct platform_device *) \
@@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev)
}
host->irq = IRQ_MAC_SCSI;
- esp_chips[dev->id] = esp;
- mb();
- if (esp_chips[!dev->id] == NULL) {
- err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
- if (err < 0) {
- esp_chips[dev->id] = NULL;
- goto fail_free_priv;
- }
+
+ /* The request_irq() call is intended to succeed for the first device
+ * and fail for the second device.
+ */
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
+ spin_lock(&esp_chips_lock);
+ if (err < 0 && esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
+ goto fail_free_priv;
}
+ esp_chips[dev->id] = esp;
+ spin_unlock(&esp_chips_lock);
err = scsi_esp_register(esp, &dev->dev);
if (err)
@@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev)
return 0;
fail_free_irq:
- if (esp_chips[!dev->id] == NULL)
- free_irq(host->irq, esp);
+ spin_lock(&esp_chips_lock);
+ esp_chips[dev->id] = NULL;
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
+ free_irq(host->irq, NULL);
+ } else
+ spin_unlock(&esp_chips_lock);
fail_free_priv:
kfree(mep);
fail_free_command_block:
@@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev)
scsi_esp_unregister(esp);
+ spin_lock(&esp_chips_lock);
esp_chips[dev->id] = NULL;
- if (!(esp_chips[0] || esp_chips[1]))
+ if (esp_chips[!dev->id] == NULL) {
+ spin_unlock(&esp_chips_lock);
free_irq(irq, NULL);
+ } else
+ spin_unlock(&esp_chips_lock);
kfree(mep);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0016f12cc563..316c3df0c3fd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -244,7 +244,7 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
-inline void
+void
megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
unsigned long flags;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5b7aec5d575a..18039bba26c4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1025,7 +1025,6 @@ _base_interrupt(int irq, void *bus_id)
0 : ioc->reply_free_host_index + 1;
ioc->reply_free[ioc->reply_free_host_index] =
cpu_to_le32(reply);
- wmb();
writel(ioc->reply_free_host_index,
&ioc->chip->ReplyFreeHostIndex);
}
@@ -1074,7 +1073,6 @@ _base_interrupt(int irq, void *bus_id)
return IRQ_NONE;
}
- wmb();
if (ioc->is_warpdrive) {
writel(reply_q->reply_post_host_index,
ioc->reply_post_host_index[msix_index]);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 8981806fb13f..099ab4ca7edf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1421,7 +1421,7 @@ void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventNotificationReply_t *mpi_reply);
void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
- u8 bits_to_regsiter);
+ u8 bits_to_register);
int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u8 *issue_reset);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 919ba2bb15f1..a5d872664257 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -8283,7 +8283,6 @@ static void scsih_remove(struct pci_dev *pdev)
}
sas_remove_host(shost);
- scsi_remove_host(shost);
mpt3sas_base_detach(ioc);
spin_lock(&gioc_lock);
list_del(&ioc->list);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8280046fd1f0..4e047b5001a6 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -642,7 +642,6 @@ static void mvs_pci_remove(struct pci_dev *pdev)
tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#endif
- scsi_remove_host(mvi->shost);
sas_unregister_ha(sha);
sas_remove_host(mvi->shost);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 247df5e79b71..fe97401ad192 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -210,39 +210,27 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
unsigned int sgnum = scsi_sg_count(scmd);
dma_addr_t busaddr;
- if (sgnum) {
- sg = scsi_sglist(scmd);
- *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
- if (*sg_count > mhba->max_sge) {
- dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
- "than max sg[0x%x].\n",
- *sg_count, mhba->max_sge);
- return -1;
- }
- for (i = 0; i < *sg_count; i++) {
- busaddr = sg_dma_address(&sg[i]);
- m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
- m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
- m_sg->flags = 0;
- sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
- if ((i + 1) == *sg_count)
- m_sg->flags |= 1U << mhba->eot_flag;
-
- sgd_inc(mhba, m_sg);
- }
- } else {
- scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
- pci_map_single(mhba->pdev, scsi_sglist(scmd),
- scsi_bufflen(scmd),
- (int) scmd->sc_data_direction)
- : 0;
- busaddr = scmd->SCp.dma_handle;
+ sg = scsi_sglist(scmd);
+ *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ if (*sg_count > mhba->max_sge) {
+ dev_err(&mhba->pdev->dev,
+ "sg count[0x%x] is bigger than max sg[0x%x].\n",
+ *sg_count, mhba->max_sge);
+ pci_unmap_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ return -1;
+ }
+ for (i = 0; i < *sg_count; i++) {
+ busaddr = sg_dma_address(&sg[i]);
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
- m_sg->flags = 1U << mhba->eot_flag;
- sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
- *sg_count = 1;
+ m_sg->flags = 0;
+ sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
+ if ((i + 1) == *sg_count)
+ m_sg->flags |= 1U << mhba->eot_flag;
+
+ sgd_inc(mhba, m_sg);
}
return 0;
@@ -1350,21 +1338,10 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
break;
}
- if (scsi_bufflen(scmd)) {
- if (scsi_sg_count(scmd)) {
- pci_unmap_sg(mhba->pdev,
- scsi_sglist(scmd),
- scsi_sg_count(scmd),
- (int) scmd->sc_data_direction);
- } else {
- pci_unmap_single(mhba->pdev,
- scmd->SCp.dma_handle,
- scsi_bufflen(scmd),
- (int) scmd->sc_data_direction);
-
- scmd->SCp.dma_handle = 0;
- }
- }
+ if (scsi_bufflen(scmd))
+ pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int) scmd->sc_data_direction);
cmd->scmd->scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2171,19 +2148,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
- if (scsi_sg_count(scmd)) {
- pci_unmap_sg(mhba->pdev,
- scsi_sglist(scmd),
- scsi_sg_count(scmd),
- (int)scmd->sc_data_direction);
- } else {
- pci_unmap_single(mhba->pdev,
- scmd->SCp.dma_handle,
- scsi_bufflen(scmd),
- (int)scmd->sc_data_direction);
-
- scmd->SCp.dma_handle = 0;
- }
+ pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int)scmd->sc_data_direction);
}
mvumi_return_cmd(mhba, cmd);
spin_unlock_irqrestore(mhba->shost->host_lock, flags);
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index e0ce5d2fd14d..0e56f1eb05dc 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -400,9 +400,6 @@ static void __remove(struct device *dev)
kfree(oud->odi.osdname);
- if (oud->cdev.owner)
- cdev_del(&oud->cdev);
-
osd_dev_fini(&oud->od);
scsi_device_put(scsi_device);
@@ -411,7 +408,6 @@ static void __remove(struct device *dev)
if (oud->disk)
put_disk(oud->disk);
- ida_remove(&osd_minor_ida, oud->minor);
kfree(oud);
}
@@ -446,8 +442,21 @@ static int osd_probe(struct device *dev)
if (NULL == oud)
goto err_retract_minor;
+ /* class device member */
+ device_initialize(&oud->class_dev);
dev_set_drvdata(dev, oud);
oud->minor = minor;
+ oud->class_dev.devt = MKDEV(SCSI_OSD_MAJOR, oud->minor);
+ oud->class_dev.class = &osd_uld_class;
+ oud->class_dev.parent = dev;
+ oud->class_dev.release = __remove;
+
+ /* hold one more reference to the scsi_device that will get released
+ * in __release, in case a logout is happening while fs is mounted
+ */
+ if (scsi_device_get(scsi_device))
+ goto err_retract_minor;
+ osd_dev_init(&oud->od, scsi_device);
/* allocate a disk and set it up */
/* FIXME: do we need this since sg has already done that */
@@ -461,59 +470,34 @@ static int osd_probe(struct device *dev)
sprintf(disk->disk_name, "osd%d", oud->minor);
oud->disk = disk;
- /* hold one more reference to the scsi_device that will get released
- * in __release, in case a logout is happening while fs is mounted
- */
- scsi_device_get(scsi_device);
- osd_dev_init(&oud->od, scsi_device);
-
/* Detect the OSD Version */
error = __detect_osd(oud);
if (error) {
OSD_ERR("osd detection failed, non-compatible OSD device\n");
- goto err_put_disk;
+ goto err_free_osd;
}
/* init the char-device for communication with user-mode */
cdev_init(&oud->cdev, &osd_fops);
oud->cdev.owner = THIS_MODULE;
- error = cdev_add(&oud->cdev,
- MKDEV(SCSI_OSD_MAJOR, oud->minor), 1);
- if (error) {
- OSD_ERR("cdev_add failed\n");
- goto err_put_disk;
- }
- /* class device member */
- oud->class_dev.devt = oud->cdev.dev;
- oud->class_dev.class = &osd_uld_class;
- oud->class_dev.parent = dev;
- oud->class_dev.release = __remove;
error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
if (error) {
OSD_ERR("dev_set_name failed => %d\n", error);
- goto err_put_cdev;
+ goto err_free_osd;
}
- error = device_register(&oud->class_dev);
+ error = cdev_device_add(&oud->cdev, &oud->class_dev);
if (error) {
OSD_ERR("device_register failed => %d\n", error);
- goto err_put_cdev;
+ goto err_free_osd;
}
- get_device(&oud->class_dev);
-
OSD_INFO("osd_probe %s\n", disk->disk_name);
return 0;
-err_put_cdev:
- cdev_del(&oud->cdev);
-err_put_disk:
- scsi_device_put(scsi_device);
- put_disk(disk);
err_free_osd:
- dev_set_drvdata(dev, NULL);
- kfree(oud);
+ put_device(&oud->class_dev);
err_retract_minor:
ida_remove(&osd_minor_ida, minor);
return error;
@@ -524,15 +508,15 @@ static int osd_remove(struct device *dev)
struct scsi_device *scsi_device = to_scsi_device(dev);
struct osd_uld_device *oud = dev_get_drvdata(dev);
- if (!oud || (oud->od.scsi_device != scsi_device)) {
- OSD_ERR("Half cooked osd-device %p,%p || %p!=%p",
- dev, oud, oud ? oud->od.scsi_device : NULL,
- scsi_device);
+ if (oud->od.scsi_device != scsi_device) {
+ OSD_ERR("Half cooked osd-device %p, || %p!=%p",
+ dev, oud->od.scsi_device, scsi_device);
}
- device_unregister(&oud->class_dev);
-
+ cdev_device_del(&oud->cdev, &oud->class_dev);
+ ida_remove(&osd_minor_ida, oud->minor);
put_device(&oud->class_dev);
+
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 417368ccb686..034b2f7d1135 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1088,7 +1088,6 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
struct pm8001_hba_info *pm8001_ha;
int i, j;
pm8001_ha = sha->lldd_ha;
- scsi_remove_host(pm8001_ha->shost);
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 49e70a383afa..a4aadf5f4dc6 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -77,7 +77,7 @@ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
*/
static unsigned int pmcraid_major;
static struct class *pmcraid_class;
-DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
+static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
* Module parameters
@@ -175,7 +175,7 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
if (fw_version <= PMCRAID_FW_VERSION_1)
target = temp->cfg_entry.unique_flags1;
else
- target = temp->cfg_entry.array_id & 0xFF;
+ target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF;
if (target > PMCRAID_MAX_VSET_TARGETS)
continue;
@@ -330,7 +330,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
ioarcb->request_flags0 = 0;
ioarcb->request_flags1 = 0;
ioarcb->cmd_timeout = 0;
- ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
@@ -345,7 +345,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
cmd->scsi_cmd = NULL;
cmd->release = 0;
cmd->completion_req = 0;
- cmd->sense_buffer = 0;
+ cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
cmd->dma_handle = 0;
init_timer(&cmd->timer);
@@ -898,8 +898,7 @@ static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
/* driver writes lower 32-bit value of IOARCB address only */
mb();
- iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
- pinstance->ioarrin);
+ iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin);
}
/**
@@ -1051,7 +1050,7 @@ static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
- ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length = cpu_to_le32(data_size);
@@ -1077,7 +1076,7 @@ static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
int index = cmd->hrrq_index;
__be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
- u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
+ __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
void (*done_function)(struct pmcraid_cmd *);
pmcraid_reinit_cmdblk(cmd);
@@ -1202,7 +1201,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
ioadl[0].data_len = cpu_to_le32(rcb_size);
- ioadl[0].address = cpu_to_le32(dma);
+ ioadl[0].address = cpu_to_le64(dma);
cmd->cmd_done = cmd_done;
return cmd;
@@ -1237,7 +1236,13 @@ static void pmcraid_prepare_cancel_cmd(
)
{
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
+ __be64 ioarcb_addr;
+
+ /* IOARCB address of the command to be cancelled is given in
+ * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
+ * IOARCB address are not masked.
+ */
+ ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr));
/* Get the resource handle to where the command to be aborted has been
* sent.
@@ -1247,11 +1252,6 @@ static void pmcraid_prepare_cancel_cmd(
memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
- /* IOARCB address of the command to be cancelled is given in
- * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
- * IOARCB address are not masked.
- */
- ioarcb_addr = cpu_to_be64(ioarcb_addr);
memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
}
@@ -1493,7 +1493,7 @@ static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ccn.msg,
- pinstance->ccn.hcam->data_len +
+ le32_to_cpu(pinstance->ccn.hcam->data_len) +
sizeof(struct pmcraid_hcam_hdr));
}
@@ -1508,7 +1508,7 @@ static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
{
return pmcraid_notify_aen(pinstance,
pinstance->ldn.msg,
- pinstance->ldn.hcam->data_len +
+ le32_to_cpu(pinstance->ldn.hcam->data_len) +
sizeof(struct pmcraid_hcam_hdr));
}
@@ -1556,10 +1556,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
res: %x:%x:%x:%x\n",
- pinstance->ccn.hcam->ilid,
+ le32_to_cpu(pinstance->ccn.hcam->ilid),
pinstance->ccn.hcam->op_code,
- ((pinstance->ccn.hcam->timestamp1) |
- ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
+ (le32_to_cpu(pinstance->ccn.hcam->timestamp1) |
+ ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)),
pinstance->ccn.hcam->notification_type,
pinstance->ccn.hcam->notification_lost,
pinstance->ccn.hcam->flags,
@@ -1570,7 +1570,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
RES_IS_VSET(*cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
cfg_entry->unique_flags1 :
- cfg_entry->array_id & 0xFF) :
+ le16_to_cpu(cfg_entry->array_id) & 0xFF) :
RES_TARGET(cfg_entry->resource_address),
RES_LUN(cfg_entry->resource_address));
@@ -1658,7 +1658,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
if (fw_version <= PMCRAID_FW_VERSION_1)
res->cfg_entry.unique_flags1 &= 0x7F;
else
- res->cfg_entry.array_id &= 0xFF;
+ res->cfg_entry.array_id &= cpu_to_le16(0xFF);
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
@@ -1716,8 +1716,8 @@ static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
/* log the error string */
pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
cmd->ioa_cb->ioarcb.cdb[0],
- cmd->ioa_cb->ioarcb.resource_handle,
- le32_to_cpu(ioasc), error_info->error_string);
+ le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
+ ioasc, error_info->error_string);
}
/**
@@ -2034,7 +2034,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
cmd->ioa_cb->ioasa.ioasc =
cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
cmd->ioa_cb->ioasa.ilid =
- cpu_to_be32(PMCRAID_DRIVER_ILID);
+ cpu_to_le32(PMCRAID_DRIVER_ILID);
/* In case the command timer is still running */
del_timer(&cmd->timer);
@@ -2373,46 +2373,43 @@ static int pmcraid_reset_reload(
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
if (pinstance->ioa_state == IOA_STATE_DEAD) {
- spin_unlock_irqrestore(pinstance->host->host_lock,
- lock_flags);
pmcraid_info("reset_reload: IOA is dead\n");
- return reset;
- } else if (pinstance->ioa_state == target_state) {
+ goto out_unlock;
+ }
+
+ if (pinstance->ioa_state == target_state) {
reset = 0;
+ goto out_unlock;
}
}
- if (reset) {
- pmcraid_info("reset_reload: proceeding with reset\n");
- scsi_block_requests(pinstance->host);
- reset_cmd = pmcraid_get_free_cmd(pinstance);
-
- if (reset_cmd == NULL) {
- pmcraid_err("no free cmnd for reset_reload\n");
- spin_unlock_irqrestore(pinstance->host->host_lock,
- lock_flags);
- return reset;
- }
+ pmcraid_info("reset_reload: proceeding with reset\n");
+ scsi_block_requests(pinstance->host);
+ reset_cmd = pmcraid_get_free_cmd(pinstance);
+ if (reset_cmd == NULL) {
+ pmcraid_err("no free cmnd for reset_reload\n");
+ goto out_unlock;
+ }
- if (shutdown_type == SHUTDOWN_NORMAL)
- pinstance->ioa_bringdown = 1;
+ if (shutdown_type == SHUTDOWN_NORMAL)
+ pinstance->ioa_bringdown = 1;
- pinstance->ioa_shutdown_type = shutdown_type;
- pinstance->reset_cmd = reset_cmd;
- pinstance->force_ioa_reset = reset;
- pmcraid_info("reset_reload: initiating reset\n");
- pmcraid_ioa_reset(reset_cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
- pmcraid_info("reset_reload: waiting for reset to complete\n");
- wait_event(pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress);
+ pinstance->ioa_shutdown_type = shutdown_type;
+ pinstance->reset_cmd = reset_cmd;
+ pinstance->force_ioa_reset = reset;
+ pmcraid_info("reset_reload: initiating reset\n");
+ pmcraid_ioa_reset(reset_cmd);
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+ pmcraid_info("reset_reload: waiting for reset to complete\n");
+ wait_event(pinstance->reset_wait_q,
+ !pinstance->ioa_reset_in_progress);
- pmcraid_info("reset_reload: reset is complete !!\n");
- scsi_unblock_requests(pinstance->host);
- if (pinstance->ioa_state == target_state)
- reset = 0;
- }
+ pmcraid_info("reset_reload: reset is complete !!\n");
+ scsi_unblock_requests(pinstance->host);
+ return pinstance->ioa_state != target_state;
+out_unlock:
+ spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
return reset;
}
@@ -2529,7 +2526,7 @@ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
ioarcb->ioadl_bus_addr = 0;
ioarcb->ioadl_length = 0;
ioarcb->data_transfer_length = 0;
- ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL));
/* writing to IOARRIN must be protected by host_lock, as mid-layer
* schedule queuecommand while we are doing this
@@ -2692,8 +2689,8 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
* mid-layer
*/
if (ioasa->auto_sense_length != 0) {
- short sense_len = ioasa->auto_sense_length;
- int data_size = min_t(u16, le16_to_cpu(sense_len),
+ short sense_len = le16_to_cpu(ioasa->auto_sense_length);
+ int data_size = min_t(u16, sense_len,
SCSI_SENSE_BUFFERSIZE);
memcpy(scsi_cmd->sense_buffer,
@@ -2915,7 +2912,7 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
cmd->ioa_cb->ioarcb.cdb[0],
- cmd->ioa_cb->ioarcb.response_handle >> 2);
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
init_completion(&cancel_cmd->wait_for_completion);
cancel_cmd->completion_req = 1;
@@ -3140,9 +3137,8 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
int ioadl_count = 0;
if (ioarcb->add_cmd_param_length)
- ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
- ioarcb->ioadl_length =
- sizeof(struct pmcraid_ioadl_desc) * sgcount;
+ ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16);
+ ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount);
if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
/* external ioadls start at offset 0x80 from control_block
@@ -3150,7 +3146,7 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
* It is necessary to indicate to firmware that driver is
* using ioadls to be treated as external to IOARCB.
*/
- ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
ioarcb->ioadl_bus_addr =
cpu_to_le64((cmd->ioa_cb_bus_addr) +
offsetof(struct pmcraid_ioarcb,
@@ -3164,7 +3160,7 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
ioarcb->ioarcb_bus_addr |=
- DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
+ cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8));
}
return ioadl;
@@ -3325,7 +3321,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
*/
static int pmcraid_copy_sglist(
struct pmcraid_sglist *sglist,
- unsigned long buffer,
+ void __user *buffer,
u32 len,
int direction
)
@@ -3346,11 +3342,9 @@ static int pmcraid_copy_sglist(
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
- rc = __copy_from_user(kaddr,
- (void *)buffer,
- bsize_elem);
+ rc = copy_from_user(kaddr, buffer, bsize_elem);
else
- rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
+ rc = copy_to_user(buffer, kaddr, bsize_elem);
kunmap(page);
@@ -3368,13 +3362,9 @@ static int pmcraid_copy_sglist(
kaddr = kmap(page);
if (direction == DMA_TO_DEVICE)
- rc = __copy_from_user(kaddr,
- (void *)buffer,
- len % bsize_elem);
+ rc = copy_from_user(kaddr, buffer, len % bsize_elem);
else
- rc = __copy_to_user((void *)buffer,
- kaddr,
- len % bsize_elem);
+ rc = copy_to_user(buffer, kaddr, len % bsize_elem);
kunmap(page);
@@ -3496,7 +3486,7 @@ static int pmcraid_queuecommand_lck(
RES_IS_VSET(res->cfg_entry) ?
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
- res->cfg_entry.array_id & 0xFF) :
+ le16_to_cpu(res->cfg_entry.array_id) & 0xFF) :
RES_TARGET(res->cfg_entry.resource_address),
RES_LUN(res->cfg_entry.resource_address));
@@ -3652,17 +3642,17 @@ static long pmcraid_ioctl_passthrough(
struct pmcraid_instance *pinstance,
unsigned int ioctl_cmd,
unsigned int buflen,
- unsigned long arg
+ void __user *arg
)
{
struct pmcraid_passthrough_ioctl_buffer *buffer;
struct pmcraid_ioarcb *ioarcb;
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cancel_cmd;
- unsigned long request_buffer;
+ void __user *request_buffer;
unsigned long request_offset;
unsigned long lock_flags;
- void *ioasa;
+ void __user *ioasa;
u32 ioasc;
int request_size;
int buffer_size;
@@ -3701,13 +3691,10 @@ static long pmcraid_ioctl_passthrough(
request_buffer = arg + request_offset;
- rc = __copy_from_user(buffer,
- (struct pmcraid_passthrough_ioctl_buffer *) arg,
+ rc = copy_from_user(buffer, arg,
sizeof(struct pmcraid_passthrough_ioctl_buffer));
- ioasa =
- (void *)(arg +
- offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+ ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
if (rc) {
pmcraid_err("ioctl: can't copy passthrough buffer\n");
@@ -3715,7 +3702,7 @@ static long pmcraid_ioctl_passthrough(
goto out_free_buffer;
}
- request_size = buffer->ioarcb.data_transfer_length;
+ request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
access = VERIFY_READ;
@@ -3725,20 +3712,14 @@ static long pmcraid_ioctl_passthrough(
direction = DMA_FROM_DEVICE;
}
- if (request_size > 0) {
- rc = access_ok(access, arg, request_offset + request_size);
-
- if (!rc) {
- rc = -EFAULT;
- goto out_free_buffer;
- }
- } else if (request_size < 0) {
+ if (request_size < 0) {
rc = -EINVAL;
goto out_free_buffer;
}
/* check if we have any additional command parameters */
- if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
+ if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
+ > PMCRAID_ADD_CMD_PARAM_LEN) {
rc = -EINVAL;
goto out_free_buffer;
}
@@ -3770,7 +3751,7 @@ static long pmcraid_ioctl_passthrough(
buffer->ioarcb.add_cmd_param_offset;
memcpy(ioarcb->add_data.u.add_cmd_params,
buffer->ioarcb.add_data.u.add_cmd_params,
- buffer->ioarcb.add_cmd_param_length);
+ le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
}
/* set hrrq number where the IOA should respond to. Note that all cmds
@@ -3840,10 +3821,10 @@ static long pmcraid_ioctl_passthrough(
wait_for_completion(&cmd->wait_for_completion);
} else if (!wait_for_completion_timeout(
&cmd->wait_for_completion,
- msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
+ msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
+ le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
cmd->ioa_cb->ioarcb.cdb[0]);
spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
@@ -3852,7 +3833,7 @@ static long pmcraid_ioctl_passthrough(
if (cancel_cmd) {
wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = cancel_cmd->ioa_cb->ioasa.ioasc;
+ ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
pmcraid_return_cmd(cancel_cmd);
/* if abort task couldn't find the command i.e it got
@@ -3941,11 +3922,6 @@ static long pmcraid_ioctl_driver(
{
int rc = -ENOSYS;
- if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
- pmcraid_err("ioctl_driver: access fault in request buffer\n");
- return -EFAULT;
- }
-
switch (cmd) {
case PMCRAID_IOCTL_RESET_ADAPTER:
pmcraid_reset_bringup(pinstance);
@@ -3977,8 +3953,7 @@ static int pmcraid_check_ioctl_buffer(
struct pmcraid_ioctl_header *hdr
)
{
- int rc = 0;
- int access = VERIFY_READ;
+ int rc;
if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
pmcraid_err("couldn't copy ioctl header from user buffer\n");
@@ -3994,19 +3969,6 @@ static int pmcraid_check_ioctl_buffer(
return -EINVAL;
}
- /* check for appropriate buffer access */
- if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
- access = VERIFY_WRITE;
-
- rc = access_ok(access,
- (arg + sizeof(struct pmcraid_ioctl_header)),
- hdr->buffer_length);
- if (!rc) {
- pmcraid_err("access failed for user buffer of size %d\n",
- hdr->buffer_length);
- return -EFAULT;
- }
-
return 0;
}
@@ -4021,6 +3983,7 @@ static long pmcraid_chr_ioctl(
{
struct pmcraid_instance *pinstance = NULL;
struct pmcraid_ioctl_header *hdr = NULL;
+ void __user *argp = (void __user *)arg;
int retval = -ENOTTY;
hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
@@ -4030,7 +3993,7 @@ static long pmcraid_chr_ioctl(
return -ENOMEM;
}
- retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
+ retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr);
if (retval) {
pmcraid_info("chr_ioctl: header check failed\n");
@@ -4055,10 +4018,8 @@ static long pmcraid_chr_ioctl(
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_block_requests(pinstance->host);
- retval = pmcraid_ioctl_passthrough(pinstance,
- cmd,
- hdr->buffer_length,
- arg);
+ retval = pmcraid_ioctl_passthrough(pinstance, cmd,
+ hdr->buffer_length, argp);
if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
scsi_unblock_requests(pinstance->host);
@@ -4066,10 +4027,8 @@ static long pmcraid_chr_ioctl(
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
- retval = pmcraid_ioctl_driver(pinstance,
- cmd,
- hdr->buffer_length,
- (void __user *)arg);
+ retval = pmcraid_ioctl_driver(pinstance, cmd,
+ hdr->buffer_length, argp);
break;
default:
@@ -4470,7 +4429,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
if (fw_version <= PMCRAID_FW_VERSION_1)
target = res->cfg_entry.unique_flags1;
else
- target = res->cfg_entry.array_id & 0xFF;
+ target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF;
lun = PMCRAID_VSET_LUN_ID;
} else {
bus = PMCRAID_PHYS_BUS_ID;
@@ -4509,7 +4468,7 @@ static void pmcraid_tasklet_function(unsigned long instance)
unsigned long host_lock_flags;
spinlock_t *lockp; /* hrrq buffer lock */
int id;
- __le32 resp;
+ u32 resp;
hrrq_vector = (struct pmcraid_isr_param *)instance;
pinstance = hrrq_vector->drv_inst;
@@ -4833,7 +4792,7 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
buffer_size,
&(pinstance->hrrq_start_bus_addr[i]));
- if (pinstance->hrrq_start[i] == 0) {
+ if (!pinstance->hrrq_start[i]) {
pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
i);
pmcraid_release_host_rrqs(pinstance, i);
@@ -5549,8 +5508,7 @@ static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
__be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
-
- __le64 timestamp;
+ u64 timestamp;
timestamp = ktime_get_real_seconds() * 1000;
@@ -5572,7 +5530,7 @@ static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
- ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
@@ -5631,7 +5589,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
list_move_tail(&res->queue, &old_res);
- for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
+ for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) {
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
cfgte = &pinstance->cfg_table->entries[i];
@@ -5686,7 +5644,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
res->cfg_entry.resource_type,
(fw_version <= PMCRAID_FW_VERSION_1 ?
res->cfg_entry.unique_flags1 :
- res->cfg_entry.array_id & 0xFF),
+ le16_to_cpu(res->cfg_entry.array_id) & 0xFF),
le32_to_cpu(res->cfg_entry.resource_address));
}
}
@@ -5724,7 +5682,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
struct pmcraid_instance *pinstance = cmd->drv_inst;
- int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
+ __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
if (be16_to_cpu(pinstance->inq_data->fw_version) <=
PMCRAID_FW_VERSION_1)
@@ -5749,7 +5707,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
offsetof(struct pmcraid_ioarcb,
add_data.u.ioadl[0]));
ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
- ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+ ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
ioarcb->request_flags0 |= NO_LINK_DESCS;
ioarcb->data_transfer_length =
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 568b18a2f47d..01eb2bc16dc1 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -554,7 +554,7 @@ struct pmcraid_inquiry_data {
__u8 add_page_len;
__u8 length;
__u8 reserved2;
- __le16 fw_version;
+ __be16 fw_version;
__u8 reserved3[16];
};
@@ -697,13 +697,13 @@ struct pmcraid_instance {
dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS];
/* Pointer to 1st entry of HRRQ */
- __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
+ __le32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
/* Pointer to last entry of HRRQ */
- __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
+ __le32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
/* Pointer to current pointer of hrrq */
- __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
+ __le32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
/* Lock for HRRQ access */
spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index cb08b625c594..00a1d6405ebe 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -449,7 +449,7 @@ const struct file_operations qedf_dbg_fops[] = {
qedf_dbg_fileops(qedf, clear_stats),
qedf_dbg_fileops_seq(qedf, offload_stats),
/* This must be last */
- { NULL, NULL },
+ { },
};
#else /* CONFIG_DEBUG_FS */
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 59417199bf36..39d77818a677 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -240,5 +240,5 @@ const struct file_operations qedi_dbg_fops[] = {
qedi_dbg_fileops_seq(qedi, gbl_ctx),
qedi_dbg_fileops(qedi, do_not_recover),
qedi_dbg_fileops_seq(qedi, io_trace),
- { NULL, NULL },
+ { },
};
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index d1de172bebac..3548d46f9b27 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -1370,7 +1370,7 @@ static void qedi_cleanup_task(struct iscsi_task *task)
{
if (!task->sc || task->state == ISCSI_TASK_PENDING) {
QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
- atomic_read(&task->refcount));
+ refcount_read(&task->refcount));
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 435ff7fd6384..7c8d6c54ab70 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -695,7 +695,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
- "FCoE ctx reset no supported.\n");
+ "FCoE ctx reset not supported.\n");
return -EPERM;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index b6e40fd4c3c1..16d1cd50feed 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1822,7 +1822,7 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
/* Check if operating mode is P2P */
if (ha->operating_mode != P2P) {
ql_log(ql_log_warn, vha, 0x70a4,
- "Host is operating mode is not P2p\n");
+ "Host operating mode is not P2p\n");
rval = EXT_STATUS_INVALID_CFG;
goto done;
}
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ab0f873fd6a1..9bc9aa9e164a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -144,7 +144,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
cpu_to_be16(CT_ACCEPT_RESPONSE)) {
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
- "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
+ "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
routine, vha->d_id.b.domain,
vha->d_id.b.area, vha->d_id.b.al_pa,
comp_status, ct_rsp->header.response);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f9d2fe7b1ade..034743309ada 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2289,7 +2289,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
goto chip_diag_failed;
/* Check product ID of chip */
- ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
+ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3203367a4f42..aac03504d9a3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2100,14 +2100,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
case CS_DATA_OVERRUN:
ql_dbg(ql_dbg_user, vha, 0x70b1,
- "Command completed with date overrun thread_id=%d\n",
+ "Command completed with data overrun thread_id=%d\n",
thread_id);
rval = EXT_STATUS_DATA_OVERRUN;
break;
case CS_DATA_UNDERRUN:
ql_dbg(ql_dbg_user, vha, 0x70b2,
- "Command completed with date underrun thread_id=%d\n",
+ "Command completed with data underrun thread_id=%d\n",
thread_id);
rval = EXT_STATUS_DATA_UNDERRUN;
break;
@@ -2134,7 +2134,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
case CS_BIDIR_RD_UNDERRUN:
ql_dbg(ql_dbg_user, vha, 0x70b6,
- "Command completed with read data data underrun "
+ "Command completed with read data underrun "
"thread_id=%d\n", thread_id);
rval = EXT_STATUS_DATA_UNDERRUN;
break;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 83d61d2142e9..1c7957903283 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -423,7 +423,6 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
kfree(req->outstanding_cmds);
kfree(req);
- req = NULL;
}
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
@@ -439,7 +438,6 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
rsp->ring, rsp->dma);
}
kfree(rsp);
- rsp = NULL;
}
static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -653,7 +651,6 @@ qla2x00_sp_free_dma(void *ptr)
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
mempool_free(ctx1, ha->ctx_mempool);
- ctx1 = NULL;
}
CMD_SP(cmd) = NULL;
@@ -3256,7 +3253,6 @@ iospace_config_failed:
}
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
- ha = NULL;
probe_out:
pci_disable_device(pdev);
@@ -3504,7 +3500,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
pci_release_selected_regions(ha->pdev, ha->bars);
kfree(ha);
- ha = NULL;
pci_disable_pcie_error_reporting(pdev);
@@ -3568,7 +3563,6 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
list_del(&fcport->list);
qla2x00_clear_loop_id(fcport);
kfree(fcport);
- fcport = NULL;
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4180d6d9fe78..5d6d158bbfd6 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -389,7 +389,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
goto alloc_cleanup;
DEBUG2(ql4_printk(KERN_INFO, ha,
- "Minidump Tempalate Size = 0x%x KB\n",
+ "Minidump Template Size = 0x%x KB\n",
ha->fw_dump_tmplt_size));
DEBUG2(ql4_printk(KERN_INFO, ha,
"Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ac52150d1569..64c6fa563fdb 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8664,7 +8664,6 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
init_completion(&ha->disable_acb_comp);
init_completion(&ha->idc_comp);
init_completion(&ha->link_up_comp);
- init_completion(&ha->disable_acb_comp);
spin_lock_init(&ha->hardware_lock);
spin_lock_init(&ha->work_lock);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2db412dd4b44..ecc07dab893d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -46,6 +46,8 @@
#include <trace/events/scsi.h>
+#include <asm/unaligned.h>
+
static void scsi_eh_done(struct scsi_cmnd *scmd);
/*
@@ -162,13 +164,7 @@ scmd_eh_abort_handler(struct work_struct *work)
}
}
- if (!scsi_eh_scmd_add(scmd, 0)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "terminate aborted command\n"));
- set_host_byte(scmd, DID_TIME_OUT);
- scsi_finish_command(scmd);
- }
+ scsi_eh_scmd_add(scmd);
}
/**
@@ -188,7 +184,6 @@ scsi_abort_command(struct scsi_cmnd *scmd)
/*
* Retry after abort failed, escalate to next level.
*/
- scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"previous abort failed\n"));
@@ -196,19 +191,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
return FAILED;
}
- /*
- * Do not try a command abort if
- * SCSI EH has already started.
- */
spin_lock_irqsave(shost->host_lock, flags);
- if (scsi_host_in_recovery(shost)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "not aborting, host in recovery\n"));
- return FAILED;
- }
-
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -221,40 +204,47 @@ scsi_abort_command(struct scsi_cmnd *scmd)
}
/**
- * scsi_eh_scmd_add - add scsi cmd to error handling.
+ * scsi_eh_reset - call into ->eh_action to reset internal counters
* @scmd: scmd to run eh on.
- * @eh_flag: optional SCSI_EH flag.
*
- * Return value:
- * 0 on failure.
+ * The scsi driver might be carrying internal state about the
+ * devices, so we need to call into the driver to reset the
+ * internal state once the error handler is started.
*/
-int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
+static void scsi_eh_reset(struct scsi_cmnd *scmd)
+{
+ if (!blk_rq_is_passthrough(scmd->request)) {
+ struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
+ if (sdrv->eh_reset)
+ sdrv->eh_reset(scmd);
+ }
+}
+
+/**
+ * scsi_eh_scmd_add - add scsi cmd to error handling.
+ * @scmd: scmd to run eh on.
+ */
+void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
{
struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
- int ret = 0;
+ int ret;
- if (!shost->ehandler)
- return 0;
+ WARN_ON_ONCE(!shost->ehandler);
spin_lock_irqsave(shost->host_lock, flags);
- if (scsi_host_set_state(shost, SHOST_RECOVERY))
- if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
- goto out_unlock;
-
+ if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
+ ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY);
+ WARN_ON_ONCE(ret);
+ }
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
- ret = 1;
- if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
- eh_flag &= ~SCSI_EH_CANCEL_CMD;
- scmd->eh_eflags |= eh_flag;
+ scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
shost->host_failed++;
scsi_eh_wakeup(shost);
- out_unlock:
spin_unlock_irqrestore(shost->host_lock, flags);
- return ret;
}
/**
@@ -283,13 +273,10 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_NOT_HANDLED) {
- if (!host->hostt->no_async_abort &&
- scsi_abort_command(scmd) == SUCCESS)
- return BLK_EH_NOT_HANDLED;
-
- set_host_byte(scmd, DID_TIME_OUT);
- if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
- rtn = BLK_EH_HANDLED;
+ if (scsi_abort_command(scmd) != SUCCESS) {
+ set_host_byte(scmd, DID_TIME_OUT);
+ scsi_eh_scmd_add(scmd);
+ }
}
return rtn;
@@ -341,7 +328,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
list_for_each_entry(scmd, work_q, eh_entry) {
if (scmd->device == sdev) {
++total_failures;
- if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
+ if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
++cmd_cancel;
else
++cmd_failed;
@@ -931,6 +918,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->result = scmd->result;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
+ ses->eh_eflags = scmd->eh_eflags;
scmd->prot_op = SCSI_PROT_NORMAL;
scmd->eh_eflags = 0;
@@ -994,6 +982,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
scmd->result = ses->result;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
+ scmd->eh_eflags = ses->eh_eflags;
}
EXPORT_SYMBOL(scsi_eh_restore_cmnd);
@@ -1126,7 +1115,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
*/
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
- scmd->eh_eflags = 0;
list_move_tail(&scmd->eh_entry, done_q);
}
EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -1163,8 +1151,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
* should not get sense.
*/
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
- (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
+ if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
SCSI_SENSE_VALID(scmd))
continue;
@@ -1304,61 +1291,6 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
return list_empty(work_q);
}
-
-/**
- * scsi_eh_abort_cmds - abort pending commands.
- * @work_q: &list_head for pending commands.
- * @done_q: &list_head for processed commands.
- *
- * Decription:
- * Try and see whether or not it makes sense to try and abort the
- * running command. This only works out to be the case if we have one
- * command that has timed out. If the command simply failed, it makes
- * no sense to try and abort the command, since as far as the shost
- * adapter is concerned, it isn't running.
- */
-static int scsi_eh_abort_cmds(struct list_head *work_q,
- struct list_head *done_q)
-{
- struct scsi_cmnd *scmd, *next;
- LIST_HEAD(check_list);
- int rtn;
- struct Scsi_Host *shost;
-
- list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
- if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
- continue;
- shost = scmd->device->host;
- if (scsi_host_eh_past_deadline(shost)) {
- list_splice_init(&check_list, work_q);
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "%s: skip aborting cmd, past eh deadline\n",
- current->comm));
- return list_empty(work_q);
- }
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "%s: aborting cmd\n", current->comm));
- rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
- if (rtn == FAILED) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "%s: aborting cmd failed\n",
- current->comm));
- list_splice_init(&check_list, work_q);
- return list_empty(work_q);
- }
- scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
- if (rtn == FAST_IO_FAIL)
- scsi_eh_finish_cmd(scmd, done_q);
- else
- list_move_tail(&scmd->eh_entry, &check_list);
- }
-
- return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
-}
-
/**
* scsi_eh_try_stu - Send START_UNIT to device.
* @scmd: &scsi_cmnd to send START_UNIT
@@ -1701,11 +1633,6 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
"not ready after error recovery\n");
scsi_device_set_state(scmd->device, SDEV_OFFLINE);
- if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
- /*
- * FIXME: Handle lost cmds.
- */
- }
scsi_eh_finish_cmd(scmd, done_q);
}
return;
@@ -2149,8 +2076,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
- if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
- scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
+ scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->eh_deadline != -1)
@@ -2437,44 +2363,34 @@ EXPORT_SYMBOL(scsi_command_normalize_sense);
* field will be placed if found.
*
* Return value:
- * 1 if information field found, 0 if not found.
+ * true if information field found, false if not found.
*/
-int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
- u64 * info_out)
+bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
+ u64 *info_out)
{
- int j;
const u8 * ucp;
- u64 ull;
if (sb_len < 7)
- return 0;
+ return false;
switch (sense_buffer[0] & 0x7f) {
case 0x70:
case 0x71:
if (sense_buffer[0] & 0x80) {
- *info_out = (sense_buffer[3] << 24) +
- (sense_buffer[4] << 16) +
- (sense_buffer[5] << 8) + sense_buffer[6];
- return 1;
- } else
- return 0;
+ *info_out = get_unaligned_be32(&sense_buffer[3]);
+ return true;
+ }
+ return false;
case 0x72:
case 0x73:
ucp = scsi_sense_desc_find(sense_buffer, sb_len,
0 /* info desc */);
if (ucp && (0xa == ucp[1])) {
- ull = 0;
- for (j = 0; j < 8; ++j) {
- if (j > 0)
- ull <<= 8;
- ull |= ucp[4 + j];
- }
- *info_out = ull;
- return 1;
- } else
- return 0;
+ *info_out = get_unaligned_be64(&ucp[4]);
+ return true;
+ }
+ return false;
default:
- return 0;
+ return false;
}
}
EXPORT_SYMBOL(scsi_get_sense_info_fld);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1c3e87d6c48f..814a4bd8405d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1593,8 +1593,8 @@ static void scsi_softirq_done(struct request *rq)
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
break;
default:
- if (!scsi_eh_scmd_add(cmd, 0))
- scsi_finish_command(cmd);
+ scsi_eh_scmd_add(cmd);
+ break;
}
}
@@ -1999,11 +1999,10 @@ static enum blk_eh_timer_return scsi_timeout(struct request *req,
return scsi_times_out(req);
}
-static int scsi_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct Scsi_Host *shost = data;
+ struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
cmd->sense_buffer =
@@ -2014,10 +2013,10 @@ static int scsi_init_request(void *data, struct request *rq,
return 0;
}
-static void scsi_exit_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx)
+static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
{
- struct Scsi_Host *shost = data;
+ struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
scsi_free_sense_buffer(shost, cmd->sense_buffer);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f11bd102d6d5..59ebc1795bb3 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -18,7 +18,6 @@ struct scsi_nl_hdr;
/*
* Scsi Error Handler Flags
*/
-#define SCSI_EH_CANCEL_CMD 0x0001 /* Cancel this cmd */
#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
#define SCSI_SENSE_VALID(scmd) \
@@ -72,7 +71,7 @@ extern enum blk_eh_timer_return scsi_times_out(struct request *req);
extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
-extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
+extern void scsi_eh_scmd_add(struct scsi_cmnd *);
void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2d753c93e07a..d4cf32d55546 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -289,9 +289,10 @@ static const struct {
u32 value;
char *name;
} fc_port_role_names[] = {
- { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
- { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
- { FC_PORT_ROLE_IP_PORT, "IP Port" },
+ { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
+ { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
+ { FC_PORT_ROLE_IP_PORT, "IP Port" },
+ { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" },
};
fc_bitfield_name_search(port_roles, fc_port_role_names)
@@ -850,7 +851,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
char *cp;
*val = simple_strtoul(buf, &cp, 0);
- if ((*cp && (*cp != '\n')) || (*val < 0))
+ if (*cp && (*cp != '\n'))
return -EINVAL;
/*
* Check for overflow; dev_loss_tmo is u32
@@ -2628,7 +2629,8 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel,
spin_lock_irqsave(shost->host_lock, flags);
rport->number = fc_host->next_rport_number++;
- if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
+ if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
+ (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
rport->scsi_target_id = fc_host->next_target_id++;
else
rport->scsi_target_id = -1;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 568c9f26a561..a424eaeafeb0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2158,7 +2158,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
void iscsi_remove_session(struct iscsi_cls_session *session)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
unsigned long flags;
int err;
@@ -2185,7 +2184,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
/* flush running scans then delete devices */
- scsi_flush_work(shost);
+ flush_work(&session->scan_work);
__iscsi_unbind_session(&session->unbind_work);
/* hw iscsi may not have removed all connections from session */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9fdbd50c31b4..0ebe2f1bb908 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -370,12 +370,16 @@ EXPORT_SYMBOL(sas_remove_children);
* sas_remove_host - tear down a Scsi_Host's SAS data structures
* @shost: Scsi Host that is torn down
*
- * Removes all SAS PHYs and remote PHYs for a given Scsi_Host.
- * Must be called just before scsi_remove_host for SAS HBAs.
+ * Removes all SAS PHYs and remote PHYs for a given Scsi_Host and remove the
+ * Scsi_Host as well.
+ *
+ * Note: Do not call scsi_remove_host() on the Scsi_Host any more, as it is
+ * already removed.
*/
void sas_remove_host(struct Scsi_Host *shost)
{
sas_remove_children(&shost->shost_gendev);
+ scsi_remove_host(shost);
}
EXPORT_SYMBOL(sas_remove_host);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0dc95e102e69..f9d1432d7cc5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -115,6 +115,7 @@ static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *SCpnt);
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
static int sd_done(struct scsi_cmnd *);
+static void sd_eh_reset(struct scsi_cmnd *);
static int sd_eh_action(struct scsi_cmnd *, int);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct device *cdev);
@@ -573,6 +574,7 @@ static struct scsi_driver sd_template = {
.uninit_command = sd_uninit_command,
.done = sd_done,
.eh_action = sd_eh_action,
+ .eh_reset = sd_eh_reset,
};
/*
@@ -888,8 +890,8 @@ out:
* sd_setup_write_same_cmnd - write the same data to multiple blocks
* @cmd: command to prepare
*
- * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
- * preference indicated by target device.
+ * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
+ * the preference indicated by the target device.
**/
static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
@@ -908,7 +910,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
if (sd_is_zoned(sdkp)) {
- ret = sd_zbc_setup_write_cmnd(cmd);
+ ret = sd_zbc_write_lock_zone(cmd);
if (ret != BLKPREP_OK)
return ret;
}
@@ -980,7 +982,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
unsigned char protect;
if (zoned_write) {
- ret = sd_zbc_setup_write_cmnd(SCpnt);
+ ret = sd_zbc_write_lock_zone(SCpnt);
if (ret != BLKPREP_OK)
return ret;
}
@@ -1207,7 +1209,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
ret = BLKPREP_OK;
out:
if (zoned_write && ret != BLKPREP_OK)
- sd_zbc_cancel_write_cmnd(SCpnt);
+ sd_zbc_write_unlock_zone(SCpnt);
return ret;
}
@@ -1264,8 +1266,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
/**
* sd_open - open a scsi disk device
- * @inode: only i_rdev member may be used
- * @filp: only f_mode and f_flags may be used
+ * @bdev: Block device of the scsi disk to open
+ * @mode: FMODE_* mask
*
* Returns 0 if successful. Returns a negated errno value in case
* of error.
@@ -1341,8 +1343,8 @@ error_out:
/**
* sd_release - invoked when the (last) close(2) is called on this
* scsi disk.
- * @inode: only i_rdev member may be used
- * @filp: only f_mode and f_flags may be used
+ * @disk: disk to release
+ * @mode: FMODE_* mask
*
* Returns 0.
*
@@ -1398,8 +1400,8 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/**
* sd_ioctl - process an ioctl
- * @inode: only i_rdev/i_bdev members may be used
- * @filp: only f_mode and f_flags may be used
+ * @bdev: target block device
+ * @mode: FMODE_* mask
* @cmd: ioctl command number
* @arg: this is third argument given to ioctl(2) system call.
* Often contains a pointer.
@@ -1762,6 +1764,26 @@ static const struct block_device_operations sd_fops = {
};
/**
+ * sd_eh_reset - reset error handling callback
+ * @scmd: sd-issued command that has failed
+ *
+ * This function is called by the SCSI midlayer before starting
+ * SCSI EH. When counting medium access failures we have to be
+ * careful to register it only only once per device and SCSI EH run;
+ * there might be several timed out commands which will cause the
+ * 'max_medium_access_timeouts' counter to trigger after the first
+ * SCSI EH run already and set the device to offline.
+ * So this function resets the internal counter before starting SCSI EH.
+ **/
+static void sd_eh_reset(struct scsi_cmnd *scmd)
+{
+ struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+
+ /* New SCSI EH run, reset gate variable */
+ sdkp->ignore_medium_access_errors = false;
+}
+
+/**
* sd_eh_action - error handling callback
* @scmd: sd-issued command that has failed
* @eh_disp: The recovery disposition suggested by the midlayer
@@ -1790,7 +1812,10 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
* process of recovering or has it suffered an internal failure
* that prevents access to the storage medium.
*/
- sdkp->medium_access_timed_out++;
+ if (!sdkp->ignore_medium_access_errors) {
+ sdkp->medium_access_timed_out++;
+ sdkp->ignore_medium_access_errors = true;
+ }
/*
* If the device keeps failing read/write commands but TEST UNIT
@@ -1802,7 +1827,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
"Medium access timeout failure. Offlining disk!\n");
scsi_device_set_state(scmd->device, SDEV_OFFLINE);
- return FAILED;
+ return SUCCESS;
}
return eh_disp;
@@ -1810,41 +1835,44 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
- u64 start_lba = blk_rq_pos(scmd->request);
- u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
- u64 factor = scmd->device->sector_size / 512;
- u64 bad_lba;
- int info_valid;
+ struct request *req = scmd->request;
+ struct scsi_device *sdev = scmd->device;
+ unsigned int transferred, good_bytes;
+ u64 start_lba, end_lba, bad_lba;
+
/*
- * resid is optional but mostly filled in. When it's unused,
- * its value is zero, so we assume the whole buffer transferred
+ * Some commands have a payload smaller than the device logical
+ * block size (e.g. INQUIRY on a 4K disk).
*/
- unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
- unsigned int good_bytes;
-
- info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- &bad_lba);
- if (!info_valid)
+ if (scsi_bufflen(scmd) <= sdev->sector_size)
return 0;
- if (scsi_bufflen(scmd) <= scmd->device->sector_size)
+ /* Check if we have a 'bad_lba' information */
+ if (!scsi_get_sense_info_fld(scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ &bad_lba))
return 0;
- /* be careful ... don't want any overflows */
- do_div(start_lba, factor);
- do_div(end_lba, factor);
-
- /* The bad lba was reported incorrectly, we have no idea where
+ /*
+ * If the bad lba was reported incorrectly, we have no idea where
* the error is.
*/
- if (bad_lba < start_lba || bad_lba >= end_lba)
+ start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
+ end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
+ if (bad_lba < start_lba || bad_lba >= end_lba)
return 0;
- /* This computation should always be done in terms of
- * the resolution of the device's medium.
+ /*
+ * resid is optional but mostly filled in. When it's unused,
+ * its value is zero, so we assume the whole buffer transferred
+ */
+ transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
+
+ /* This computation should always be done in terms of the
+ * resolution of the device's medium.
*/
- good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
+ good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
+
return min(good_bytes, transferred);
}
@@ -1866,8 +1894,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
struct request *req = SCpnt->request;
int sense_valid = 0;
int sense_deferred = 0;
- unsigned char op = SCpnt->cmnd[0];
- unsigned char unmap = SCpnt->cmnd[1] & 8;
switch (req_op(req)) {
case REQ_OP_DISCARD:
@@ -1941,26 +1967,27 @@ static int sd_done(struct scsi_cmnd *SCpnt)
good_bytes = sd_completed_bytes(SCpnt);
break;
case ILLEGAL_REQUEST:
- if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
+ switch (sshdr.asc) {
+ case 0x10: /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
- /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
- if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
- switch (op) {
+ break;
+ case 0x20: /* INVALID COMMAND OPCODE */
+ case 0x24: /* INVALID FIELD IN CDB */
+ switch (SCpnt->cmnd[0]) {
case UNMAP:
sd_config_discard(sdkp, SD_LBP_DISABLE);
break;
case WRITE_SAME_16:
case WRITE_SAME:
- if (unmap)
+ if (SCpnt->cmnd[1] & 8) { /* UNMAP */
sd_config_discard(sdkp, SD_LBP_DISABLE);
- else {
+ } else {
sdkp->device->no_write_same = 1;
sd_config_write_same(sdkp);
-
- good_bytes = 0;
req->__data_len = blk_rq_bytes(req);
req->rq_flags |= RQF_QUIET;
}
+ break;
}
}
break;
@@ -2798,7 +2825,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
/**
* sd_read_block_limits - Query disk device for preferred I/O sizes.
- * @disk: disk to query
+ * @sdkp: disk to query
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
@@ -2864,7 +2891,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
/**
* sd_read_block_characteristics - Query block dev. characteristics
- * @disk: disk to query
+ * @sdkp: disk to query
*/
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
@@ -2912,7 +2939,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
/**
* sd_read_block_provisioning - Query provisioning VPD page
- * @disk: disk to query
+ * @sdkp: disk to query
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a2c4b5c35379..61d02efd366c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -114,6 +114,7 @@ struct scsi_disk {
unsigned rc_basis: 2;
unsigned zoned: 2;
unsigned urswrz : 1;
+ unsigned ignore_medium_access_errors : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
@@ -176,6 +177,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
return blocks * sdev->sector_size;
}
+static inline sector_t bytes_to_logical(struct scsi_device *sdev, unsigned int bytes)
+{
+ return bytes >> ilog2(sdev->sector_size);
+}
+
static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector)
{
return sector >> (ilog2(sdev->sector_size) - 9);
@@ -274,8 +280,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd);
-extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd);
+extern void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
@@ -293,13 +299,13 @@ static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+static inline int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
{
/* Let the drive fail requests */
return BLKPREP_OK;
}
-static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {}
+static inline void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) {}
static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 1994f7799fce..96855df9f49d 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -237,7 +237,6 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
sector_t block = sectors_to_logical(sdkp->device, sector);
- unsigned int zno = block >> sdkp->zone_shift;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
@@ -250,11 +249,6 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
/* Unaligned request */
return BLKPREP_KILL;
- /* Do not allow concurrent reset and writes */
- if (sdkp->zones_wlock &&
- test_and_set_bit(zno, sdkp->zones_wlock))
- return BLKPREP_DEFER;
-
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
cmd->cmnd[0] = ZBC_OUT;
@@ -269,7 +263,7 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
-int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -303,8 +297,9 @@ int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
-static void sd_zbc_unlock_zone(struct request *rq)
+void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
{
+ struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
if (sdkp->zones_wlock) {
@@ -315,11 +310,6 @@ static void sd_zbc_unlock_zone(struct request *rq)
}
}
-void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd)
-{
- sd_zbc_unlock_zone(cmd->request);
-}
-
void sd_zbc_complete(struct scsi_cmnd *cmd,
unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
@@ -328,39 +318,35 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
struct request *rq = cmd->request;
switch (req_op(rq)) {
+ case REQ_OP_ZONE_RESET:
+
+ if (result &&
+ sshdr->sense_key == ILLEGAL_REQUEST &&
+ sshdr->asc == 0x24)
+ /*
+ * INVALID FIELD IN CDB error: reset of a conventional
+ * zone was attempted. Nothing to worry about, so be
+ * quiet about the error.
+ */
+ rq->rq_flags |= RQF_QUIET;
+ break;
+
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
- case REQ_OP_ZONE_RESET:
/* Unlock the zone */
- sd_zbc_unlock_zone(rq);
+ sd_zbc_write_unlock_zone(cmd);
- if (!result ||
- sshdr->sense_key != ILLEGAL_REQUEST)
- break;
-
- switch (sshdr->asc) {
- case 0x24:
- /*
- * INVALID FIELD IN CDB error: For a zone reset,
- * this means that a reset of a conventional
- * zone was attempted. Nothing to worry about in
- * this case, so be quiet about the error.
- */
- if (req_op(rq) == REQ_OP_ZONE_RESET)
- rq->rq_flags |= RQF_QUIET;
- break;
- case 0x21:
+ if (result &&
+ sshdr->sense_key == ILLEGAL_REQUEST &&
+ sshdr->asc == 0x21)
/*
* INVALID ADDRESS FOR WRITE error: It is unlikely that
* retrying write requests failed with any kind of
* alignement error will result in success. So don't.
*/
cmd->allowed = 0;
- break;
- }
-
break;
case REQ_OP_ZONE_REPORT:
@@ -565,8 +551,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
int sd_zbc_read_zones(struct scsi_disk *sdkp,
unsigned char *buf)
{
- sector_t capacity;
- int ret = 0;
+ int ret;
if (!sd_is_zoned(sdkp))
/*
@@ -598,7 +583,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
ret = sd_zbc_check_capacity(sdkp, buf);
if (ret)
goto err;
- capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
/*
* Check zone size: only devices with a constant zone size (except
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 50adabbb5808..f1cdf32d7514 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -548,7 +548,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
ecomp = &edev->component[components++];
if (!IS_ERR(ecomp)) {
- ses_get_power_status(edev, ecomp);
if (addl_desc_ptr)
ses_process_descriptor(
ecomp,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0b60245bd740..0a38ba01b7b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -122,7 +122,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@@ -142,19 +142,19 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
- char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -198,7 +198,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -525,6 +524,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
} else
count = (old_hdr->result == 0) ? 0 : -EIO;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
retval = count;
free_old_hdr:
kfree(old_hdr);
@@ -565,6 +565,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
err_out:
err2 = sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return err ? : err2 ? : count;
}
@@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -662,18 +665,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
- static char cmd[TASK_COMM_LEN];
- if (strcmp(current->comm, cmd)) {
- printk_ratelimited(KERN_WARNING
- "sg_write: data in/out %d/%d bytes "
- "for SCSI command 0x%x-- guessing "
- "data in;\n program %s not setting "
- "count and/or reply_len properly\n",
- old_hdr.reply_len - (int)SZ_SG_HEADER,
- input_size, (unsigned int) cmnd[0],
- current->comm);
- strcpy(cmd, current->comm);
- }
+ printk_ratelimited(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes "
+ "for SCSI command 0x%x-- guessing "
+ "data in;\n program %s not setting "
+ "count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
@@ -721,7 +720,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -752,6 +751,29 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
return count;
}
+static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
+{
+ switch (hp->dxfer_direction) {
+ case SG_DXFER_NONE:
+ if (hp->dxferp || hp->dxfer_len > 0)
+ return false;
+ return true;
+ case SG_DXFER_TO_DEV:
+ case SG_DXFER_FROM_DEV:
+ case SG_DXFER_TO_FROM_DEV:
+ if (!hp->dxferp || hp->dxfer_len == 0)
+ return false;
+ return true;
+ case SG_DXFER_UNKNOWN:
+ if ((!hp->dxferp && hp->dxfer_len) ||
+ (hp->dxferp && hp->dxfer_len == 0))
+ return false;
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
@@ -772,11 +794,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
+ if (!sg_is_valid_dxfer(hp))
+ return -EINVAL;
+
k = sg_start_req(srp, cmnd);
if (k) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
"sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return k; /* probably out of space --> ENOMEM */
}
if (atomic_read(&sdp->detaching)) {
@@ -787,6 +813,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
return -ENODEV;
}
@@ -885,24 +912,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
/* strange ..., for backward compatibility */
return sfp->timeout_user;
case SG_SET_FORCE_LOW_DMA:
- result = get_user(val, ip);
- if (result)
- return result;
- if (val) {
- sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
- val = (int) sfp->reserve.bufflen;
- sg_remove_scat(sfp, &sfp->reserve);
- sg_build_reserve(sfp, val);
- }
- } else {
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- sfp->low_dma = sdp->device->host->unchecked_isa_dma;
- }
+ /*
+ * N.B. This ioctl never worked properly, but failed to
+ * return an error value. So returning '0' to keep compability
+ * with legacy applications.
+ */
return 0;
case SG_GET_LOW_DMA:
- return put_user((int) sfp->low_dma, ip);
+ return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
return -EFAULT;
@@ -936,7 +953,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@@ -949,7 +966,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@@ -965,12 +983,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1018,35 +1042,33 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
}
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
+ result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
@@ -1152,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@@ -1269,6 +1291,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
struct sg_fd *sfp = srp->parentfp;
sg_finish_rem_req(srp);
+ sg_remove_request(sfp, srp);
kref_put(&sfp->f_ref, sg_remove_sfp);
}
@@ -1732,13 +1755,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
+ return -EBUSY;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -1806,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp)
else
sg_remove_scat(sfp, req_schp);
- sg_remove_request(sfp, srp);
-
return ret;
}
@@ -1831,6 +1861,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
int sg_tablesize = sfp->parentdp->sg_tablesize;
int blk_size = buff_size, order;
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ struct sg_device *sdp = sfp->parentdp;
if (blk_size < 0)
return -EFAULT;
@@ -1856,7 +1887,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
scatter_elem_sz_prev = num;
}
- if (sfp->low_dma)
+ if (sdp->device->host->unchecked_isa_dma)
gfp_mask |= GFP_DMA;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
@@ -2026,8 +2057,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2037,7 +2069,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@@ -2055,70 +2087,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@@ -2137,13 +2144,12 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
- sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
- sdp->device->host->unchecked_isa_dma : 1;
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
@@ -2177,10 +2183,18 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
+ unsigned long iflags;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
+ }
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@@ -2214,20 +2228,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
@@ -2597,7 +2597,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@@ -2613,17 +2613,15 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
jiffies_to_msecs(fp->timeout),
fp->reserve.bufflen,
(int) fp->reserve.k_use_sg,
- (int) fp->low_dma);
+ (int) sdp->device->host->unchecked_isa_dma);
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
- if (new_interface &&
+ if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
@@ -2654,7 +2652,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 6d215e2fb46d..71b4b91d2215 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -297,7 +297,7 @@ out:
return err;
}
-static int __exit sgiwd93_remove(struct platform_device *pdev)
+static int sgiwd93_remove(struct platform_device *pdev)
{
struct Scsi_Host *host = platform_get_drvdata(pdev);
struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 76278072147e..1f9a087daf69 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -117,7 +117,7 @@ static int snirm710_probe(struct platform_device *dev)
return -ENODEV;
}
-static int __exit snirm710_driver_remove(struct platform_device *dev)
+static int snirm710_driver_remove(struct platform_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
struct NCR_700_Host_Parameters *hostdata =
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index d30280326bde..269ddf791a73 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -548,7 +548,7 @@ snic_trc_debugfs_init(void)
&snic_trc_fops);
if (!de) {
- SNIC_ERR("Cann't create trace file.\n");
+ SNIC_ERR("Cannot create trace file.\n");
return ret;
}
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 5b23175a584c..9b20643ab49d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
+#include <linux/reboot.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -38,9 +39,9 @@
#include <scsi/scsi_eh.h>
#define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "5.00.0000.01"
-#define ST_VER_MAJOR 5
-#define ST_VER_MINOR 00
+#define ST_DRIVER_VERSION "6.02.0000.01"
+#define ST_VER_MAJOR 6
+#define ST_VER_MINOR 02
#define ST_OEM 0000
#define ST_BUILD_VER 01
@@ -64,6 +65,13 @@ enum {
YI2H_INT_C = 0xa0,
YH2I_REQ = 0xc0,
YH2I_REQ_HI = 0xc4,
+ PSCRATCH0 = 0xb0,
+ PSCRATCH1 = 0xb4,
+ PSCRATCH2 = 0xb8,
+ PSCRATCH3 = 0xbc,
+ PSCRATCH4 = 0xc8,
+ MAILBOX_BASE = 0x1000,
+ MAILBOX_HNDSHK_STS = 0x0,
/* MU register value */
MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0),
@@ -87,7 +95,7 @@ enum {
MU_STATE_STOP = 5,
MU_STATE_NOCONNECT = 6,
- MU_MAX_DELAY = 120,
+ MU_MAX_DELAY = 50,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000,
MU_HARD_RESET_WAIT = 30000,
@@ -135,6 +143,7 @@ enum {
st_yosemite = 2,
st_seq = 3,
st_yel = 4,
+ st_P3 = 5,
PASSTHRU_REQ_TYPE = 0x00000001,
PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
@@ -339,6 +348,7 @@ struct st_hba {
u16 rq_size;
u16 sts_count;
u8 supports_pm;
+ int msi_lock;
};
struct st_card_info {
@@ -353,6 +363,12 @@ struct st_card_info {
u16 sts_count;
};
+static int S6flag;
+static int stex_halt(struct notifier_block *nb, ulong event, void *buf);
+static struct notifier_block stex_notifier = {
+ stex_halt, NULL, 0
+};
+
static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
@@ -540,11 +556,15 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
++hba->req_head;
hba->req_head %= hba->rq_count+1;
-
- writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
- readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
- writel(addr, hba->mmio_base + YH2I_REQ);
- readl(hba->mmio_base + YH2I_REQ); /* flush */
+ if (hba->cardtype == st_P3) {
+ writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
+ writel(addr, hba->mmio_base + YH2I_REQ);
+ } else {
+ writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
+ readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
+ writel(addr, hba->mmio_base + YH2I_REQ);
+ readl(hba->mmio_base + YH2I_REQ); /* flush */
+ }
}
static void return_abnormal_state(struct st_hba *hba, int status)
@@ -974,15 +994,31 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba)
spin_lock_irqsave(hba->host->host_lock, flags);
- data = readl(base + YI2H_INT);
- if (data && data != 0xffffffff) {
- /* clear the interrupt */
- writel(data, base + YI2H_INT_C);
- stex_ss_mu_intr(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (unlikely(data & SS_I2H_REQUEST_RESET))
- queue_work(hba->work_q, &hba->reset_work);
- return IRQ_HANDLED;
+ if (hba->cardtype == st_yel) {
+ data = readl(base + YI2H_INT);
+ if (data && data != 0xffffffff) {
+ /* clear the interrupt */
+ writel(data, base + YI2H_INT_C);
+ stex_ss_mu_intr(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (unlikely(data & SS_I2H_REQUEST_RESET))
+ queue_work(hba->work_q, &hba->reset_work);
+ return IRQ_HANDLED;
+ }
+ } else {
+ data = readl(base + PSCRATCH4);
+ if (data != 0xffffffff) {
+ if (data != 0) {
+ /* clear the interrupt */
+ writel(data, base + PSCRATCH1);
+ writel((1 << 22), base + YH2I_INT);
+ }
+ stex_ss_mu_intr(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (unlikely(data & SS_I2H_REQUEST_RESET))
+ queue_work(hba->work_q, &hba->reset_work);
+ return IRQ_HANDLED;
+ }
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1080,19 +1116,36 @@ static int stex_ss_handshake(struct st_hba *hba)
struct st_msg_header *msg_h;
struct handshake_frame *h;
__le32 *scratch;
- u32 data, scratch_size;
+ u32 data, scratch_size, mailboxdata, operationaldata;
unsigned long before;
int ret = 0;
before = jiffies;
- while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
- if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
- printk(KERN_ERR DRV_NAME
- "(%s): firmware not operational\n",
- pci_name(hba->pdev));
- return -1;
+
+ if (hba->cardtype == st_yel) {
+ operationaldata = readl(base + YIOA_STATUS);
+ while (operationaldata != SS_MU_OPERATIONAL) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): firmware not operational\n",
+ pci_name(hba->pdev));
+ return -1;
+ }
+ msleep(1);
+ operationaldata = readl(base + YIOA_STATUS);
+ }
+ } else {
+ operationaldata = readl(base + PSCRATCH3);
+ while (operationaldata != SS_MU_OPERATIONAL) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): firmware not operational\n",
+ pci_name(hba->pdev));
+ return -1;
+ }
+ msleep(1);
+ operationaldata = readl(base + PSCRATCH3);
}
- msleep(1);
}
msg_h = (struct st_msg_header *)hba->dma_mem;
@@ -1111,30 +1164,60 @@ static int stex_ss_handshake(struct st_hba *hba)
scratch_size = (hba->sts_count+1)*sizeof(u32);
h->scratch_size = cpu_to_le32(scratch_size);
- data = readl(base + YINT_EN);
- data &= ~4;
- writel(data, base + YINT_EN);
- writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
- readl(base + YH2I_REQ_HI);
- writel(hba->dma_handle, base + YH2I_REQ);
- readl(base + YH2I_REQ); /* flush */
+ if (hba->cardtype == st_yel) {
+ data = readl(base + YINT_EN);
+ data &= ~4;
+ writel(data, base + YINT_EN);
+ writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
+ readl(base + YH2I_REQ_HI);
+ writel(hba->dma_handle, base + YH2I_REQ);
+ readl(base + YH2I_REQ); /* flush */
+ } else {
+ data = readl(base + YINT_EN);
+ data &= ~(1 << 0);
+ data &= ~(1 << 2);
+ writel(data, base + YINT_EN);
+ if (hba->msi_lock == 0) {
+ /* P3 MSI Register cannot access twice */
+ writel((1 << 6), base + YH2I_INT);
+ hba->msi_lock = 1;
+ }
+ writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
+ writel(hba->dma_handle, base + YH2I_REQ);
+ }
- scratch = hba->scratch;
before = jiffies;
- while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
- if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
- printk(KERN_ERR DRV_NAME
- "(%s): no signature after handshake frame\n",
- pci_name(hba->pdev));
- ret = -1;
- break;
+ scratch = hba->scratch;
+ if (hba->cardtype == st_yel) {
+ while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): no signature after handshake frame\n",
+ pci_name(hba->pdev));
+ ret = -1;
+ break;
+ }
+ rmb();
+ msleep(1);
+ }
+ } else {
+ mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
+ while (mailboxdata != SS_STS_HANDSHAKE) {
+ if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+ printk(KERN_ERR DRV_NAME
+ "(%s): no signature after handshake frame\n",
+ pci_name(hba->pdev));
+ ret = -1;
+ break;
+ }
+ rmb();
+ msleep(1);
+ mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
}
- rmb();
- msleep(1);
}
-
memset(scratch, 0, scratch_size);
msg_h->flag = 0;
+
return ret;
}
@@ -1144,8 +1227,10 @@ static int stex_handshake(struct st_hba *hba)
unsigned long flags;
unsigned int mu_status;
- err = (hba->cardtype == st_yel) ?
- stex_ss_handshake(hba) : stex_common_handshake(hba);
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3)
+ err = stex_ss_handshake(hba);
+ else
+ err = stex_common_handshake(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
mu_status = hba->mu_status;
if (err == 0) {
@@ -1190,6 +1275,15 @@ static int stex_abort(struct scsi_cmnd *cmd)
writel(data, base + YI2H_INT_C);
stex_ss_mu_intr(hba);
+ } else if (hba->cardtype == st_P3) {
+ data = readl(base + PSCRATCH4);
+ if (data == 0xffffffff)
+ goto fail_out;
+ if (data != 0) {
+ writel(data, base + PSCRATCH1);
+ writel((1 << 22), base + YH2I_INT);
+ }
+ stex_ss_mu_intr(hba);
} else {
data = readl(base + ODBL);
if (data == 0 || data == 0xffffffff)
@@ -1197,7 +1291,6 @@ static int stex_abort(struct scsi_cmnd *cmd)
writel(data, base + ODBL);
readl(base + ODBL); /* flush */
-
stex_mu_intr(hba, data);
}
if (hba->wait_ccb == NULL) {
@@ -1293,6 +1386,12 @@ static void stex_ss_reset(struct st_hba *hba)
ssleep(5);
}
+static void stex_p3_reset(struct st_hba *hba)
+{
+ writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
+ ssleep(5);
+}
+
static int stex_do_reset(struct st_hba *hba)
{
unsigned long flags;
@@ -1329,7 +1428,8 @@ static int stex_do_reset(struct st_hba *hba)
stex_hard_reset(hba);
else if (hba->cardtype == st_yel)
stex_ss_reset(hba);
-
+ else if (hba->cardtype == st_P3)
+ stex_p3_reset(hba);
return_abnormal_state(hba, DID_RESET);
@@ -1414,6 +1514,26 @@ static struct pci_device_id stex_pci_tbl[] = {
/* st_yel */
{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
+
+ /* st_P3, pluto */
+ { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
+ 0x8870, 0, 0, st_P3 },
+ /* st_P3, p3 */
+ { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
+ 0x4300, 0, 0, st_P3 },
+
+ /* st_P3, SymplyStor4E */
+ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
+ 0x4311, 0, 0, st_P3 },
+ /* st_P3, SymplyStor8E */
+ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
+ 0x4312, 0, 0, st_P3 },
+ /* st_P3, SymplyStor4 */
+ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
+ 0x4321, 0, 0, st_P3 },
+ /* st_P3, SymplyStor8 */
+ { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
+ 0x4322, 0, 0, st_P3 },
{ } /* terminate list */
};
@@ -1482,6 +1602,19 @@ static struct st_card_info stex_card_info[] = {
.map_sg = stex_ss_map_sg,
.send = stex_ss_send_cmd,
},
+
+ /* st_P3 */
+ {
+ .max_id = 129,
+ .max_lun = 256,
+ .max_channel = 0,
+ .rq_count = 801,
+ .rq_size = 512,
+ .sts_count = 801,
+ .alloc_rq = stex_ss_alloc_req,
+ .map_sg = stex_ss_map_sg,
+ .send = stex_ss_send_cmd,
+ },
};
static int stex_set_dma_mask(struct pci_dev * pdev)
@@ -1502,7 +1635,7 @@ static int stex_request_irq(struct st_hba *hba)
struct pci_dev *pdev = hba->pdev;
int status;
- if (msi) {
+ if (msi || hba->cardtype == st_P3) {
status = pci_enable_msi(pdev);
if (status != 0)
printk(KERN_ERR DRV_NAME
@@ -1513,7 +1646,8 @@ static int stex_request_irq(struct st_hba *hba)
} else
hba->msi_enabled = 0;
- status = request_irq(pdev->irq, hba->cardtype == st_yel ?
+ status = request_irq(pdev->irq,
+ (hba->cardtype == st_yel || hba->cardtype == st_P3) ?
stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
if (status != 0) {
@@ -1546,6 +1680,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ S6flag = 0;
+ register_reboot_notifier(&stex_notifier);
+
host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
if (!host) {
@@ -1597,12 +1734,12 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x4265:
break;
default:
- if (hba->cardtype == st_yel)
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3)
hba->supports_pm = 1;
}
sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
- if (hba->cardtype == st_yel)
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3)
sts_offset += (ci->sts_count+1) * sizeof(u32);
cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
hba->dma_size = cp_offset + sizeof(struct st_frame);
@@ -1642,7 +1779,7 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_pci_free;
}
- if (hba->cardtype == st_yel)
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3)
hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
hba->copy_buffer = hba->dma_mem + cp_offset;
@@ -1653,8 +1790,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->map_sg = ci->map_sg;
hba->send = ci->send;
hba->mu_status = MU_STATE_STARTING;
+ hba->msi_lock = 0;
- if (hba->cardtype == st_yel)
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3)
host->sg_tablesize = 38;
else
host->sg_tablesize = 32;
@@ -1736,28 +1874,29 @@ static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->cardtype == st_yel && hba->supports_pm == 1)
- {
- if(st_sleep_mic == ST_NOTHANDLED)
- {
+ if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
+ hba->supports_pm == 1) {
+ if (st_sleep_mic == ST_NOTHANDLED) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
}
}
req = hba->alloc_rq(hba);
- if (hba->cardtype == st_yel) {
+ if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
msg_h = (struct st_msg_header *)req - 1;
memset(msg_h, 0, hba->rq_size);
} else
memset(req, 0, hba->rq_size);
- if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel)
+ if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
+ || hba->cardtype == st_P3)
&& st_sleep_mic == ST_IGNORED) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
req->cdb[3] = CTLR_SHUTDOWN;
- } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) {
+ } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
+ && st_sleep_mic != ST_IGNORED) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
@@ -1768,16 +1907,13 @@ static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
req->cdb[1] = CTLR_POWER_STATE_CHANGE;
req->cdb[2] = CTLR_POWER_SAVING;
}
-
hba->ccb[tag].cmd = NULL;
hba->ccb[tag].sg_count = 0;
hba->ccb[tag].sense_bufflen = 0;
hba->ccb[tag].sense_buffer = NULL;
hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
-
hba->send(hba, req, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
-
before = jiffies;
while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
@@ -1821,24 +1957,30 @@ static void stex_remove(struct pci_dev *pdev)
scsi_host_put(hba->host);
pci_disable_device(pdev);
+
+ unregister_reboot_notifier(&stex_notifier);
}
static void stex_shutdown(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
- if (hba->supports_pm == 0)
+ if (hba->supports_pm == 0) {
stex_hba_stop(hba, ST_IGNORED);
- else
+ } else if (hba->supports_pm == 1 && S6flag) {
+ unregister_reboot_notifier(&stex_notifier);
+ stex_hba_stop(hba, ST_S6);
+ } else
stex_hba_stop(hba, ST_S5);
}
-static int stex_choice_sleep_mic(pm_message_t state)
+static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
{
switch (state.event) {
case PM_EVENT_SUSPEND:
return ST_S3;
case PM_EVENT_HIBERNATE:
+ hba->msi_lock = 0;
return ST_S4;
default:
return ST_NOTHANDLED;
@@ -1849,8 +1991,9 @@ static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct st_hba *hba = pci_get_drvdata(pdev);
- if (hba->cardtype == st_yel && hba->supports_pm == 1)
- stex_hba_stop(hba, stex_choice_sleep_mic(state));
+ if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
+ && hba->supports_pm == 1)
+ stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
else
stex_hba_stop(hba, ST_IGNORED);
return 0;
@@ -1864,6 +2007,12 @@ static int stex_resume(struct pci_dev *pdev)
stex_handshake(hba);
return 0;
}
+
+static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf)
+{
+ S6flag = 1;
+ return NOTIFY_OK;
+}
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
static struct pci_driver stex_pci_driver = {
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 016639d7fef1..ae966dc3bbc5 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -476,6 +476,9 @@ struct storvsc_device {
*/
u64 node_name;
u64 port_name;
+#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+ struct fc_rport *rport;
+#endif
};
struct hv_host_device {
@@ -864,7 +867,7 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
* We will however populate all the slots to evenly distribute
* the load.
*/
- stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(),
+ stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *),
GFP_KERNEL);
if (stor_device->stor_chns == NULL)
return -ENOMEM;
@@ -1189,8 +1192,6 @@ static void storvsc_on_channel_callback(void *context)
break;
}
} while (1);
-
- return;
}
static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
@@ -1823,19 +1824,27 @@ static int storvsc_probe(struct hv_device *device,
target = (device->dev_instance.b[5] << 8 |
device->dev_instance.b[4]);
ret = scsi_add_device(host, 0, target, 0);
- if (ret) {
- scsi_remove_host(host);
- goto err_out2;
- }
+ if (ret)
+ goto err_out3;
}
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
if (host->transportt == fc_transport_template) {
+ struct fc_rport_identifiers ids = {
+ .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR,
+ };
+
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
+ stor_device->rport = fc_remote_port_add(host, 0, &ids);
+ if (!stor_device->rport)
+ goto err_out3;
}
#endif
return 0;
+err_out3:
+ scsi_remove_host(host);
+
err_out2:
/*
* Once we have connected with the host, we would need to
@@ -1861,8 +1870,10 @@ static int storvsc_remove(struct hv_device *dev)
struct Scsi_Host *host = stor_device->host;
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
- if (host->transportt == fc_transport_template)
+ if (host->transportt == fc_transport_template) {
+ fc_remote_port_delete(stor_device->rport);
fc_remove_host(host);
+ }
#endif
scsi_remove_host(host);
storvsc_dev_remove(dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 096e95b911bd..abc7e87937cc 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -130,19 +130,12 @@ enum {
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
};
-/* Interrupt configuration options */
-enum {
- UFSHCD_INT_DISABLE,
- UFSHCD_INT_ENABLE,
- UFSHCD_INT_CLEAR,
-};
-
#define ufshcd_set_eh_in_progress(h) \
- (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+ ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
- (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
+ ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
#define ufshcd_clear_eh_in_progress(h) \
- (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+ ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
@@ -540,15 +533,14 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
case UFSHCI_VERSION_10:
intr_mask = INTERRUPT_MASK_ALL_VER_10;
break;
- /* allow fall through */
case UFSHCI_VERSION_11:
case UFSHCI_VERSION_20:
intr_mask = INTERRUPT_MASK_ALL_VER_11;
break;
- /* allow fall through */
case UFSHCI_VERSION_21:
default:
intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ break;
}
return intr_mask;
@@ -573,12 +565,12 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
* the host controller
* @hba: pointer to adapter instance
*
- * Returns 1 if device present, 0 if no device detected
+ * Returns true if device present, false if no device detected
*/
-static inline int ufshcd_is_device_present(struct ufs_hba *hba)
+static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
- DEVICE_PRESENT) ? 1 : 0;
+ DEVICE_PRESENT) ? true : false;
}
/**
@@ -668,16 +660,7 @@ static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
- /*
- * The mask 0xFF is for the following HCS register bits
- * Bit Description
- * 0 Device Present
- * 1 UTRLRDY
- * 2 UTMRLRDY
- * 3 UCRDY
- * 4-7 reserved
- */
- return ((reg & 0xFF) >> 1) ^ 0x07;
+ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
}
/**
@@ -820,11 +803,12 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
* ufshcd_is_hba_active - Get controller state
* @hba: per adapter instance
*
- * Returns zero if controller is active, 1 otherwise
+ * Returns false if controller is active, true otherwise
*/
-static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
+static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
{
- return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
+ return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
+ ? false : true;
}
static const char *ufschd_uic_link_state_to_string(
@@ -1478,7 +1462,7 @@ start:
break;
}
/*
- * If we here, it means gating work is either done or
+ * If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
@@ -3103,18 +3087,7 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
u8 *buf,
u32 size)
{
- int err = 0;
- int retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* Read descriptor*/
- err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
- if (!err)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
-
- return err;
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
}
static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -4272,24 +4245,16 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
{
int ret = 0;
u8 lun_qdepth;
- int retries;
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
lun_qdepth = hba->nutrs;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* Read descriptor*/
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
- if (!ret || ret == -ENOTSUPP)
- break;
-
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
- }
+ ret = ufshcd_read_unit_desc_param(hba,
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -4717,7 +4682,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
goto out;
val = hba->ee_ctrl_mask & ~mask;
- val &= 0xFFFF; /* 2 bytes */
+ val &= MASK_EE_STATUS;
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
@@ -4745,7 +4710,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
goto out;
val = hba->ee_ctrl_mask | mask;
- val &= 0xFFFF; /* 2 bytes */
+ val &= MASK_EE_STATUS;
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
@@ -5960,24 +5925,6 @@ out:
return icc_level;
}
-static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
-{
- int ret = 0;
- int retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* write attribute */
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
- if (!ret)
- break;
-
- dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
- }
-
- return ret;
-}
-
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
@@ -5998,8 +5945,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_set_icc_levels_attr(hba,
- hba->init_prefetch_data.icc_level);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -8000,7 +7948,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
host->host_no);
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d14e9b965d1e..f60145d4a66e 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -48,6 +48,7 @@ enum {
REG_UFS_VERSION = 0x08,
REG_CONTROLLER_DEV_ID = 0x10,
REG_CONTROLLER_PROD_ID = 0x14,
+ REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
REG_CONTROLLER_STATUS = 0x30,
@@ -159,6 +160,10 @@ enum {
#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
+#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
+ UTP_TASK_REQ_LIST_READY |\
+ UIC_COMMAND_READY)
+
enum {
PWR_OK = 0x0,
PWR_LOCAL = 0x01,
@@ -171,6 +176,7 @@ enum {
/* HCE - Host Controller Enable 34h */
#define CONTROLLER_ENABLE UFS_BIT(0)
#define CONTROLLER_DISABLE 0x0
+#define CRYPTO_GENERAL_ENABLE UFS_BIT(1)
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 939c47df73fa..a29d068b7696 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,6 +29,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
#include <linux/blk-mq-virtio.h>
@@ -705,6 +706,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
+static int virtscsi_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
+ * may have transfer limits which come from the host SCSI
+ * controller or something on the host side other than the
+ * target itself.
+ *
+ * To make this work properly, the hypervisor can adjust the
+ * target's VPD information to advertise these limits. But
+ * for that to work, the guest has to look at the VPD pages,
+ * which we won't do by default if it is an SPC-2 device, even
+ * if it does actually support it.
+ *
+ * So, set the blist to always try to read the VPD pages.
+ */
+ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
+
/**
* virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
* @sdev: Virtscsi target whose queue depth to change
@@ -783,6 +806,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9aa1fe1fc939..a6a8b60d4902 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -434,7 +434,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
if (seg_grants) {
page = virt_to_page(seg);
- off = (unsigned long)seg & ~PAGE_MASK;
+ off = offset_in_page(seg);
len = sizeof(struct scsiif_request_segment) * data_grants;
while (len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 97ccb0383539..b2cf1faa819d 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -167,7 +167,7 @@ static struct parisc_device_id zalon_tbl[] = {
MODULE_DEVICE_TABLE(parisc, zalon_tbl);
-static int __exit zalon_remove(struct parisc_device *dev)
+static int zalon_remove(struct parisc_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->dev);
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index f09023f7ab11..309643fe35f9 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,7 +1,9 @@
menu "SOC (System On Chip) specific Drivers"
+source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
+source "drivers/soc/imx/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/rockchip/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 05eae52a30b4..824b44281efa 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,10 +2,12 @@
# Makefile for the Linux Kernel SOC specific device drivers.
#
+obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
+obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_RENESAS) += renesas/
diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig
new file mode 100644
index 000000000000..6242ebb41abb
--- /dev/null
+++ b/drivers/soc/atmel/Kconfig
@@ -0,0 +1,6 @@
+config AT91_SOC_ID
+ bool "SoC bus for Atmel ARM SoCs"
+ depends on ARCH_AT91 || COMPILE_TEST
+ default ARCH_AT91
+ help
+ Include support for the SoC bus on the Atmel ARM SoCs.
diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile
new file mode 100644
index 000000000000..2d92f32e4ea5
--- /dev/null
+++ b/drivers/soc/atmel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AT91_SOC_ID) += soc.o
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
new file mode 100644
index 000000000000..4790094b498e
--- /dev/null
+++ b/drivers/soc/atmel/soc.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2015 Atmel
+ *
+ * Alexandre Belloni <alexandre.belloni@free-electrons.com
+ * Boris Brezillon <boris.brezillon@free-electrons.com
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) "AT91: " fmt
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include "soc.h"
+
+#define AT91_DBGU_CIDR 0x40
+#define AT91_DBGU_EXID 0x44
+#define AT91_CHIPID_CIDR 0x00
+#define AT91_CHIPID_EXID 0x04
+#define AT91_CIDR_VERSION(x) ((x) & 0x1f)
+#define AT91_CIDR_EXT BIT(31)
+#define AT91_CIDR_MATCH_MASK 0x7fffffe0
+
+static const struct at91_soc __initconst socs[] = {
+#ifdef CONFIG_SOC_AT91RM9200
+ AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"),
+#endif
+#ifdef CONFIG_SOC_AT91SAM9
+ AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL),
+ AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL),
+ AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL),
+ AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL),
+ AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH,
+ "at91sam9m11", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH,
+ "at91sam9m10", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH,
+ "at91sam9g46", "at91sam9g45"),
+ AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH,
+ "at91sam9g45", "at91sam9g45"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH,
+ "at91sam9g15", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH,
+ "at91sam9g35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH,
+ "at91sam9x35", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH,
+ "at91sam9g25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH,
+ "at91sam9x25", "at91sam9x5"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH,
+ "at91sam9cn12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH,
+ "at91sam9n12", "at91sam9n12"),
+ AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH,
+ "at91sam9cn11", "at91sam9n12"),
+ AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"),
+ AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"),
+ AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"),
+#endif
+#ifdef CONFIG_SOC_SAMA5
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
+ "sama5d21", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
+ "sama5d22", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
+ "sama5d23", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH,
+ "sama5d24", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH,
+ "sama5d26", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
+ "sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
+ "sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
+ "sama5d31", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
+ "sama5d33", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH,
+ "sama5d34", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH,
+ "sama5d35", "sama5d3"),
+ AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH,
+ "sama5d36", "sama5d3"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH,
+ "sama5d41", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH,
+ "sama5d42", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH,
+ "sama5d43", "sama5d4"),
+ AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH,
+ "sama5d44", "sama5d4"),
+#endif
+ { /* sentinel */ },
+};
+
+static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "atmel,at91sam9260-dbgu");
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_DBGU_CIDR);
+ *exid = readl(regs + AT91_DBGU_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
+{
+ struct device_node *np;
+ void __iomem *regs;
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid");
+ if (!np)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!regs) {
+ pr_warn("Could not map DBGU iomem range");
+ return -ENXIO;
+ }
+
+ *cidr = readl(regs + AT91_CHIPID_CIDR);
+ *exid = readl(regs + AT91_CHIPID_EXID);
+
+ iounmap(regs);
+
+ return 0;
+}
+
+struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const struct at91_soc *soc;
+ struct soc_device *soc_dev;
+ u32 cidr, exid;
+ int ret;
+
+ /*
+ * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more
+ * in the dbgu device but in the chipid device whose purpose is only
+ * to expose these two registers.
+ */
+ ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid);
+ if (ret)
+ ret = at91_get_cidr_exid_from_chipid(&cidr, &exid);
+ if (ret) {
+ if (ret == -ENODEV)
+ pr_warn("Could not find identification node");
+ return NULL;
+ }
+
+ for (soc = socs; soc->name; soc++) {
+ if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK))
+ continue;
+
+ if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid)
+ break;
+ }
+
+ if (!soc->name) {
+ pr_warn("Could not find matching SoC description\n");
+ return NULL;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return NULL;
+
+ soc_dev_attr->family = soc->family;
+ soc_dev_attr->soc_id = soc->name;
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X",
+ AT91_CIDR_VERSION(cidr));
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ pr_warn("Could not register SoC device\n");
+ return NULL;
+ }
+
+ if (soc->family)
+ pr_info("Detected SoC family: %s\n", soc->family);
+ pr_info("Detected SoC: %s, revision %X\n", soc->name,
+ AT91_CIDR_VERSION(cidr));
+
+ return soc_dev;
+}
+
+static int __init atmel_soc_device_init(void)
+{
+ at91_soc_init(socs);
+
+ return 0;
+}
+subsys_initcall(atmel_soc_device_init);
diff --git a/arch/arm/mach-at91/soc.h b/drivers/soc/atmel/soc.h
index 228efded5085..228efded5085 100644
--- a/arch/arm/mach-at91/soc.h
+++ b/drivers/soc/atmel/soc.h
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 94e7335553f4..b6195fdf0d00 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -41,6 +41,15 @@ bool soc_is_brcmstb(void)
}
static const struct of_device_id sun_top_ctrl_match[] = {
+ { .compatible = "brcm,bcm7125-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7346-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7358-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7360-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7362-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7420-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7425-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7429-sun-top-ctrl", },
+ { .compatible = "brcm,bcm7425-sun-top-ctrl", },
{ .compatible = "brcm,brcmstb-sun-top-ctrl", },
{ }
};
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 22725bdc6f15..5fe9faf6232e 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -33,6 +33,7 @@
#include "dpaa_sys.h"
#include <soc/fsl/qman.h>
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#if defined(CONFIG_FSL_PAMU)
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
new file mode 100644
index 000000000000..357a5d8f8da0
--- /dev/null
+++ b/drivers/soc/imx/Kconfig
@@ -0,0 +1,9 @@
+menu "i.MX SoC drivers"
+
+config IMX7_PM_DOMAINS
+ bool "i.MX7 PM domains"
+ select PM_GENERIC_DOMAINS
+ depends on SOC_IMX7D || (COMPILE_TEST && OF)
+ default y if SOC_IMX7D
+
+endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
new file mode 100644
index 000000000000..5b6e396c1121
--- /dev/null
+++ b/drivers/soc/imx/Makefile
@@ -0,0 +1,2 @@
+obj-y += gpc.o
+obj-$(CONFIG_IMX7_PM_DOMAINS) += gpcv2.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
new file mode 100644
index 000000000000..47e7aa963dbb
--- /dev/null
+++ b/drivers/soc/imx/gpc.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ * Copyright 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define GPC_CNTR 0x000
+
+#define GPC_PGC_CTRL_OFFS 0x0
+#define GPC_PGC_PUPSCR_OFFS 0x4
+#define GPC_PGC_PDNSCR_OFFS 0x8
+#define GPC_PGC_SW2ISO_SHIFT 0x8
+#define GPC_PGC_SW_SHIFT 0x0
+
+#define GPC_PGC_GPU_PDN 0x260
+#define GPC_PGC_GPU_PUPSCR 0x264
+#define GPC_PGC_GPU_PDNSCR 0x268
+
+#define GPU_VPU_PUP_REQ BIT(1)
+#define GPU_VPU_PDN_REQ BIT(0)
+
+#define GPC_CLK_MAX 6
+
+#define PGC_DOMAIN_FLAG_NO_PD BIT(0)
+
+struct imx_pm_domain {
+ struct generic_pm_domain base;
+ struct regmap *regmap;
+ struct regulator *supply;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
+ unsigned int reg_offs;
+ signed char cntr_pdn_bit;
+ unsigned int ipg_rate_mhz;
+ unsigned int flags;
+};
+
+static inline struct imx_pm_domain *
+to_imx_pm_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx_pm_domain, base);
+}
+
+static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
+ int iso, iso2sw;
+ u32 val;
+
+ if (pd->flags & PGC_DOMAIN_FLAG_NO_PD)
+ return -EBUSY;
+
+ /* Read ISO and ISO2SW power down delays */
+ regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
+ iso = val & 0x3f;
+ iso2sw = (val >> 8) & 0x3f;
+
+ /* Gate off domain when powered down */
+ regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
+ 0x1, 0x1);
+
+ /* Request GPC to power down domain */
+ val = BIT(pd->cntr_pdn_bit);
+ regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+
+ /* Wait ISO + ISO2SW IPG clock cycles */
+ udelay(DIV_ROUND_UP(iso + iso2sw, pd->ipg_rate_mhz));
+
+ if (pd->supply)
+ regulator_disable(pd->supply);
+
+ return 0;
+}
+
+static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
+ int i, ret, sw, sw2iso;
+ u32 val;
+
+ if (pd->supply) {
+ ret = regulator_enable(pd->supply);
+ if (ret) {
+ pr_err("%s: failed to enable regulator: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ /* Enable reset clocks for all devices in the domain */
+ for (i = 0; i < pd->num_clks; i++)
+ clk_prepare_enable(pd->clk[i]);
+
+ /* Gate off domain when powered down */
+ regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
+ 0x1, 0x1);
+
+ /* Read ISO and ISO2SW power up delays */
+ regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
+ sw = val & 0x3f;
+ sw2iso = (val >> 8) & 0x3f;
+
+ /* Request GPC to power up domain */
+ val = BIT(pd->cntr_pdn_bit + 1);
+ regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
+
+ /* Wait ISO + ISO2SW IPG clock cycles */
+ udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
+
+ /* Disable reset clocks for all devices in the domain */
+ for (i = 0; i < pd->num_clks; i++)
+ clk_disable_unprepare(pd->clk[i]);
+
+ return 0;
+}
+
+static int imx_pgc_get_clocks(struct device *dev, struct imx_pm_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pm_domain *domain)
+{
+ int i;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
+static int imx_pgc_parse_dt(struct device *dev, struct imx_pm_domain *domain)
+{
+ /* try to get the domain supply regulator */
+ domain->supply = devm_regulator_get_optional(dev, "power");
+ if (IS_ERR(domain->supply)) {
+ if (PTR_ERR(domain->supply) == -ENODEV)
+ domain->supply = NULL;
+ else
+ return PTR_ERR(domain->supply);
+ }
+
+ /* try to get all clocks needed for reset propagation */
+ return imx_pgc_get_clocks(dev, domain);
+}
+
+static int imx_pgc_power_domain_probe(struct platform_device *pdev)
+{
+ struct imx_pm_domain *domain = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ /* if this PD is associated with a DT node try to parse it */
+ if (dev->of_node) {
+ ret = imx_pgc_parse_dt(dev, domain);
+ if (ret)
+ return ret;
+ }
+
+ /* initially power on the domain */
+ if (domain->base.power_on)
+ domain->base.power_on(&domain->base);
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ pm_genpd_init(&domain->base, NULL, false);
+ ret = of_genpd_add_provider_simple(dev->of_node, &domain->base);
+ if (ret)
+ goto genpd_err;
+ }
+
+ device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE);
+
+ return 0;
+
+genpd_err:
+ pm_genpd_remove(&domain->base);
+ imx_pgc_put_clocks(domain);
+
+ return ret;
+}
+
+static int imx_pgc_power_domain_remove(struct platform_device *pdev)
+{
+ struct imx_pm_domain *domain = pdev->dev.platform_data;
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&domain->base);
+ imx_pgc_put_clocks(domain);
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id imx_pgc_power_domain_id[] = {
+ { "imx-pgc-power-domain"},
+ { },
+};
+
+static struct platform_driver imx_pgc_power_domain_driver = {
+ .driver = {
+ .name = "imx-pgc-pd",
+ },
+ .probe = imx_pgc_power_domain_probe,
+ .remove = imx_pgc_power_domain_remove,
+ .id_table = imx_pgc_power_domain_id,
+};
+builtin_platform_driver(imx_pgc_power_domain_driver)
+
+#define GPC_PGC_DOMAIN_ARM 0
+#define GPC_PGC_DOMAIN_PU 1
+#define GPC_PGC_DOMAIN_DISPLAY 2
+
+static struct genpd_power_state imx6_pm_domain_pu_state = {
+ .power_off_latency_ns = 25000,
+ .power_on_latency_ns = 2000000,
+};
+
+static struct imx_pm_domain imx_gpc_domains[] = {
+ {
+ .base = {
+ .name = "ARM",
+ },
+ }, {
+ .base = {
+ .name = "PU",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ .states = &imx6_pm_domain_pu_state,
+ .state_count = 1,
+ },
+ .reg_offs = 0x260,
+ .cntr_pdn_bit = 0,
+ }, {
+ .base = {
+ .name = "DISPLAY",
+ .power_off = imx6_pm_domain_power_off,
+ .power_on = imx6_pm_domain_power_on,
+ },
+ .reg_offs = 0x240,
+ .cntr_pdn_bit = 4,
+ }
+};
+
+struct imx_gpc_dt_data {
+ int num_domains;
+ bool err009619_present;
+};
+
+static const struct imx_gpc_dt_data imx6q_dt_data = {
+ .num_domains = 2,
+ .err009619_present = false,
+};
+
+static const struct imx_gpc_dt_data imx6qp_dt_data = {
+ .num_domains = 2,
+ .err009619_present = true,
+};
+
+static const struct imx_gpc_dt_data imx6sl_dt_data = {
+ .num_domains = 3,
+ .err009619_present = false,
+};
+
+static const struct of_device_id imx_gpc_dt_ids[] = {
+ { .compatible = "fsl,imx6q-gpc", .data = &imx6q_dt_data },
+ { .compatible = "fsl,imx6qp-gpc", .data = &imx6qp_dt_data },
+ { .compatible = "fsl,imx6sl-gpc", .data = &imx6sl_dt_data },
+ { }
+};
+
+static const struct regmap_config imx_gpc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x2ac,
+};
+
+static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
+ &imx_gpc_domains[0].base,
+ &imx_gpc_domains[1].base,
+};
+
+static struct genpd_onecell_data imx_gpc_onecell_data = {
+ .domains = imx_gpc_onecell_domains,
+ .num_domains = 2,
+};
+
+static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
+ unsigned int num_domains)
+{
+ struct imx_pm_domain *domain;
+ int i, ret;
+
+ for (i = 0; i < num_domains; i++) {
+ domain = &imx_gpc_domains[i];
+ domain->regmap = regmap;
+ domain->ipg_rate_mhz = 66;
+
+ if (i == 1) {
+ domain->supply = devm_regulator_get(dev, "pu");
+ if (IS_ERR(domain->supply))
+ return PTR_ERR(domain->supply);;
+
+ ret = imx_pgc_get_clocks(dev, domain);
+ if (ret)
+ goto clk_err;
+
+ domain->base.power_on(&domain->base);
+ }
+ }
+
+ for (i = 0; i < num_domains; i++)
+ pm_genpd_init(&imx_gpc_domains[i].base, NULL, false);
+
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+ ret = of_genpd_add_provider_onecell(dev->of_node,
+ &imx_gpc_onecell_data);
+ if (ret)
+ goto genpd_err;
+ }
+
+ return 0;
+
+genpd_err:
+ for (i = 0; i < num_domains; i++)
+ pm_genpd_remove(&imx_gpc_domains[i].base);
+ imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
+clk_err:
+ return ret;
+}
+
+static int imx_gpc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_gpc_dt_ids, &pdev->dev);
+ const struct imx_gpc_dt_data *of_id_data = of_id->data;
+ struct device_node *pgc_node;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+ /* bail out if DT too old and doesn't provide the necessary info */
+ if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+ !pgc_node)
+ return 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ &imx_gpc_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&pdev->dev, "failed to init regmap: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Disable PU power down in normal operation if ERR009619 is present */
+ if (of_id_data->err009619_present)
+ imx_gpc_domains[GPC_PGC_DOMAIN_PU].flags |=
+ PGC_DOMAIN_FLAG_NO_PD;
+
+ if (!pgc_node) {
+ ret = imx_gpc_old_dt_init(&pdev->dev, regmap,
+ of_id_data->num_domains);
+ if (ret)
+ return ret;
+ } else {
+ struct imx_pm_domain *domain;
+ struct platform_device *pd_pdev;
+ struct device_node *np;
+ struct clk *ipg_clk;
+ unsigned int ipg_rate_mhz;
+ int domain_index;
+
+ ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(ipg_clk))
+ return PTR_ERR(ipg_clk);
+ ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000;
+
+ for_each_child_of_node(pgc_node, np) {
+ ret = of_property_read_u32(np, "reg", &domain_index);
+ if (ret) {
+ of_node_put(np);
+ return ret;
+ }
+ if (domain_index >= of_id_data->num_domains)
+ continue;
+
+ domain = &imx_gpc_domains[domain_index];
+ domain->regmap = regmap;
+ domain->ipg_rate_mhz = ipg_rate_mhz;
+
+ pd_pdev = platform_device_alloc("imx-pgc-power-domain",
+ domain_index);
+ if (!pd_pdev) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ pd_pdev->dev.platform_data = domain;
+ pd_pdev->dev.parent = &pdev->dev;
+ pd_pdev->dev.of_node = np;
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx_gpc_remove(struct platform_device *pdev)
+{
+ int ret;
+
+ /*
+ * If the old DT binding is used the toplevel driver needs to
+ * de-register the power domains
+ */
+ if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) {
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
+ if (ret)
+ return ret;
+ imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
+
+ ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_gpc_driver = {
+ .driver = {
+ .name = "imx-gpc",
+ .of_match_table = imx_gpc_dt_ids,
+ },
+ .probe = imx_gpc_probe,
+ .remove = imx_gpc_remove,
+};
+builtin_platform_driver(imx_gpc_driver)
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
new file mode 100644
index 000000000000..3039072911a5
--- /dev/null
+++ b/drivers/soc/imx/gpcv2.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2017 Impinj, Inc
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * Based on the code of analogus driver:
+ *
+ * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/power/imx7-power.h>
+
+#define GPC_LPCR_A7_BSC 0x000
+
+#define GPC_PGC_CPU_MAPPING 0x0ec
+#define USB_HSIC_PHY_A7_DOMAIN BIT(6)
+#define USB_OTG2_PHY_A7_DOMAIN BIT(5)
+#define USB_OTG1_PHY_A7_DOMAIN BIT(4)
+#define PCIE_PHY_A7_DOMAIN BIT(3)
+#define MIPI_PHY_A7_DOMAIN BIT(2)
+
+#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
+#define GPC_PU_PGC_SW_PDN_REQ 0x104
+#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4)
+#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3)
+#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2)
+#define PCIE_PHY_SW_Pxx_REQ BIT(1)
+#define MIPI_PHY_SW_Pxx_REQ BIT(0)
+
+#define GPC_M4_PU_PDN_FLG 0x1bc
+
+
+#define PGC_MIPI 4
+#define PGC_PCIE 5
+#define PGC_USB_HSIC 8
+#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
+#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
+
+#define GPC_PGC_CTRL_PCR BIT(0)
+
+struct imx7_pgc_domain {
+ struct generic_pm_domain genpd;
+ struct regmap *regmap;
+ struct regulator *regulator;
+
+ unsigned int pgc;
+
+ const struct {
+ u32 pxx;
+ u32 map;
+ } bits;
+
+ const int voltage;
+ struct device *dev;
+};
+
+static int imx7_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
+ bool on)
+{
+ struct imx7_pgc_domain *domain = container_of(genpd,
+ struct imx7_pgc_domain,
+ genpd);
+ unsigned int offset = on ?
+ GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
+ const bool enable_power_control = !on;
+ const bool has_regulator = !IS_ERR(domain->regulator);
+ unsigned long deadline;
+ int ret = 0;
+
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, domain->bits.map);
+
+ if (has_regulator && on) {
+ ret = regulator_enable(domain->regulator);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable regulator\n");
+ goto unmap;
+ }
+ }
+
+ if (enable_power_control)
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+
+ regmap_update_bits(domain->regmap, offset,
+ domain->bits.pxx, domain->bits.pxx);
+
+ /*
+ * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
+ * for PUP_REQ/PDN_REQ bit to be cleared
+ */
+ deadline = jiffies + msecs_to_jiffies(1);
+ while (true) {
+ u32 pxx_req;
+
+ regmap_read(domain->regmap, offset, &pxx_req);
+
+ if (!(pxx_req & domain->bits.pxx))
+ break;
+
+ if (time_after(jiffies, deadline)) {
+ dev_err(domain->dev, "falied to command PGC\n");
+ ret = -ETIMEDOUT;
+ /*
+ * If we were in a process of enabling a
+ * domain and failed we might as well disable
+ * the regulator we just enabled. And if it
+ * was the opposite situation and we failed to
+ * power down -- keep the regulator on
+ */
+ on = !on;
+ break;
+ }
+
+ cpu_relax();
+ }
+
+ if (enable_power_control)
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, 0);
+
+ if (has_regulator && !on) {
+ int err;
+
+ err = regulator_disable(domain->regulator);
+ if (err)
+ dev_err(domain->dev,
+ "failed to disable regulator: %d\n", ret);
+ /* Preserve earlier error code */
+ ret = ret ?: err;
+ }
+unmap:
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, 0);
+ return ret;
+}
+
+static int imx7_gpc_pu_pgc_sw_pup_req(struct generic_pm_domain *genpd)
+{
+ return imx7_gpc_pu_pgc_sw_pxx_req(genpd, true);
+}
+
+static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd)
+{
+ return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false);
+}
+
+static struct imx7_pgc_domain imx7_pgc_domains[] = {
+ [IMX7_POWER_DOMAIN_MIPI_PHY] = {
+ .genpd = {
+ .name = "mipi-phy",
+ },
+ .bits = {
+ .pxx = MIPI_PHY_SW_Pxx_REQ,
+ .map = MIPI_PHY_A7_DOMAIN,
+ },
+ .voltage = 1000000,
+ .pgc = PGC_MIPI,
+ },
+
+ [IMX7_POWER_DOMAIN_PCIE_PHY] = {
+ .genpd = {
+ .name = "pcie-phy",
+ },
+ .bits = {
+ .pxx = PCIE_PHY_SW_Pxx_REQ,
+ .map = PCIE_PHY_A7_DOMAIN,
+ },
+ .voltage = 1000000,
+ .pgc = PGC_PCIE,
+ },
+
+ [IMX7_POWER_DOMAIN_USB_HSIC_PHY] = {
+ .genpd = {
+ .name = "usb-hsic-phy",
+ },
+ .bits = {
+ .pxx = USB_HSIC_PHY_SW_Pxx_REQ,
+ .map = USB_HSIC_PHY_A7_DOMAIN,
+ },
+ .voltage = 1200000,
+ .pgc = PGC_USB_HSIC,
+ },
+};
+
+static int imx7_pgc_domain_probe(struct platform_device *pdev)
+{
+ struct imx7_pgc_domain *domain = pdev->dev.platform_data;
+ int ret;
+
+ domain->dev = &pdev->dev;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err(domain->dev, "Failed to init power domain\n");
+ return ret;
+ }
+
+ domain->regulator = devm_regulator_get_optional(domain->dev, "power");
+ if (IS_ERR(domain->regulator)) {
+ if (PTR_ERR(domain->regulator) != -ENODEV) {
+ dev_err(domain->dev, "Failed to get domain's regulator\n");
+ return PTR_ERR(domain->regulator);
+ }
+ } else {
+ regulator_set_voltage(domain->regulator,
+ domain->voltage, domain->voltage);
+ }
+
+ ret = of_genpd_add_provider_simple(domain->dev->of_node,
+ &domain->genpd);
+ if (ret) {
+ dev_err(domain->dev, "Failed to add genpd provider\n");
+ pm_genpd_remove(&domain->genpd);
+ }
+
+ return ret;
+}
+
+static int imx7_pgc_domain_remove(struct platform_device *pdev)
+{
+ struct imx7_pgc_domain *domain = pdev->dev.platform_data;
+
+ of_genpd_del_provider(domain->dev->of_node);
+ pm_genpd_remove(&domain->genpd);
+
+ return 0;
+}
+
+static const struct platform_device_id imx7_pgc_domain_id[] = {
+ { "imx7-pgc-domain", },
+ { },
+};
+
+static struct platform_driver imx7_pgc_domain_driver = {
+ .driver = {
+ .name = "imx7-pgc",
+ },
+ .probe = imx7_pgc_domain_probe,
+ .remove = imx7_pgc_domain_remove,
+ .id_table = imx7_pgc_domain_id,
+};
+builtin_platform_driver(imx7_pgc_domain_driver)
+
+static int imx_gpcv2_probe(struct platform_device *pdev)
+{
+ static const struct regmap_range yes_ranges[] = {
+ regmap_reg_range(GPC_LPCR_A7_BSC,
+ GPC_M4_PU_PDN_FLG),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_MIPI),
+ GPC_PGC_SR(PGC_MIPI)),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_PCIE),
+ GPC_PGC_SR(PGC_PCIE)),
+ regmap_reg_range(GPC_PGC_CTRL(PGC_USB_HSIC),
+ GPC_PGC_SR(PGC_USB_HSIC)),
+ };
+ static const struct regmap_access_table access_table = {
+ .yes_ranges = yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
+ };
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &access_table,
+ .wr_table = &access_table,
+ .max_register = SZ_4K,
+ };
+ struct device *dev = &pdev->dev;
+ struct device_node *pgc_np, *np;
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ pgc_np = of_get_child_by_name(dev->of_node, "pgc");
+ if (!pgc_np) {
+ dev_err(dev, "No power domains specified in DT\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev, "failed to init regmap (%d)\n", ret);
+ return ret;
+ }
+
+ for_each_child_of_node(pgc_np, np) {
+ struct platform_device *pd_pdev;
+ struct imx7_pgc_domain *domain;
+ u32 domain_index;
+
+ ret = of_property_read_u32(np, "reg", &domain_index);
+ if (ret) {
+ dev_err(dev, "Failed to read 'reg' property\n");
+ of_node_put(np);
+ return ret;
+ }
+
+ if (domain_index >= ARRAY_SIZE(imx7_pgc_domains)) {
+ dev_warn(dev,
+ "Domain index %d is out of bounds\n",
+ domain_index);
+ continue;
+ }
+
+ domain = &imx7_pgc_domains[domain_index];
+ domain->regmap = regmap;
+ domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req;
+ domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req;
+
+ pd_pdev = platform_device_alloc("imx7-pgc-domain",
+ domain_index);
+ if (!pd_pdev) {
+ dev_err(dev, "Failed to allocate platform device\n");
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ pd_pdev->dev.platform_data = domain;
+ pd_pdev->dev.parent = dev;
+ pd_pdev->dev.of_node = np;
+
+ ret = platform_device_add(pd_pdev);
+ if (ret) {
+ platform_device_put(pd_pdev);
+ of_node_put(np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id imx_gpcv2_dt_ids[] = {
+ { .compatible = "fsl,imx7d-gpc" },
+ { }
+};
+
+static struct platform_driver imx_gpc_driver = {
+ .driver = {
+ .name = "imx-gpcv2",
+ .of_match_table = imx_gpcv2_dt_ids,
+ },
+ .probe = imx_gpcv2_probe,
+};
+builtin_platform_driver(imx_gpc_driver)
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
index 5e7537c96f7b..7412666187b3 100644
--- a/drivers/soc/renesas/r8a7795-sysc.c
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -1,7 +1,7 @@
/*
* Renesas R-Car H3 System Controller
*
- * Copyright (C) 2016 Glider bvba
+ * Copyright (C) 2016-2017 Glider bvba
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,12 +10,13 @@
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/sys_soc.h>
#include <dt-bindings/power/r8a7795-sysc.h>
#include "rcar-sysc.h"
-static const struct rcar_sysc_area r8a7795_areas[] __initconst = {
+static struct rcar_sysc_area r8a7795_areas[] __initdata = {
{ "always-on", 0, 0, R8A7795_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "ca57-scu", 0x1c0, 0, R8A7795_PD_CA57_SCU, R8A7795_PD_ALWAYS_ON,
PD_SCU },
@@ -40,6 +41,7 @@ static const struct rcar_sysc_area r8a7795_areas[] __initconst = {
{ "a3vp", 0x340, 0, R8A7795_PD_A3VP, R8A7795_PD_ALWAYS_ON },
{ "cr7", 0x240, 0, R8A7795_PD_CR7, R8A7795_PD_ALWAYS_ON },
{ "a3vc", 0x380, 0, R8A7795_PD_A3VC, R8A7795_PD_ALWAYS_ON },
+ /* A2VC0 exists on ES1.x only */
{ "a2vc0", 0x3c0, 0, R8A7795_PD_A2VC0, R8A7795_PD_A3VC },
{ "a2vc1", 0x3c0, 1, R8A7795_PD_A2VC1, R8A7795_PD_A3VC },
{ "3dg-a", 0x100, 0, R8A7795_PD_3DG_A, R8A7795_PD_ALWAYS_ON },
@@ -50,7 +52,27 @@ static const struct rcar_sysc_area r8a7795_areas[] __initconst = {
{ "a3ir", 0x180, 0, R8A7795_PD_A3IR, R8A7795_PD_ALWAYS_ON },
};
+
+ /*
+ * Fixups for R-Car H3 revisions after ES1.x
+ */
+
+static const struct soc_device_attribute r8a7795es1[] __initconst = {
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { /* sentinel */ }
+};
+
+static int __init r8a7795_sysc_init(void)
+{
+ if (!soc_device_match(r8a7795es1))
+ rcar_sysc_nullify(r8a7795_areas, ARRAY_SIZE(r8a7795_areas),
+ R8A7795_PD_A2VC0);
+
+ return 0;
+}
+
const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+ .init = r8a7795_sysc_init,
.areas = r8a7795_areas,
.num_areas = ARRAY_SIZE(r8a7795_areas),
};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 225c35c79d9a..528a13742aeb 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -2,7 +2,7 @@
* R-Car SYSC Power management support
*
* Copyright (C) 2014 Magnus Damm
- * Copyright (C) 2015-2016 Glider bvba
+ * Copyright (C) 2015-2017 Glider bvba
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -334,6 +334,12 @@ static int __init rcar_sysc_pd_init(void)
info = match->data;
+ if (info->init) {
+ error = info->init();
+ if (error)
+ return error;
+ }
+
has_cpg_mstp = of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks");
@@ -377,6 +383,11 @@ static int __init rcar_sysc_pd_init(void)
const struct rcar_sysc_area *area = &info->areas[i];
struct rcar_sysc_pd *pd;
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
if (!pd) {
error = -ENOMEM;
@@ -406,6 +417,18 @@ out_put:
}
early_initcall(rcar_sysc_pd_init);
+void __init rcar_sysc_nullify(struct rcar_sysc_area *areas,
+ unsigned int num_areas, u8 id)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_areas; i++)
+ if (areas[i].isr_bit == id) {
+ areas[i].name = NULL;
+ return;
+ }
+}
+
void __init rcar_sysc_init(phys_addr_t base, u32 syscier)
{
u32 syscimr;
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index f6e842e2976e..07edb049a401 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -46,6 +46,7 @@ struct rcar_sysc_area {
*/
struct rcar_sysc_info {
+ int (*init)(void); /* Optional */
const struct rcar_sysc_area *areas;
unsigned int num_areas;
};
@@ -59,4 +60,13 @@ extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
extern const struct rcar_sysc_info r8a7796_sysc_info;
+
+
+ /*
+ * Helpers for fixing up power area tables depending on SoC revision
+ */
+
+extern void rcar_sysc_nullify(struct rcar_sysc_area *areas,
+ unsigned int num_areas, u8 id);
+
#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 330960312296..ca26f13d399c 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -80,11 +80,21 @@ static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
.id = 0x40,
};
+static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x45,
+};
+
static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
.family = &fam_rzg,
.id = 0x47,
};
+static const struct renesas_soc soc_rz_g1n __initconst __maybe_unused = {
+ .family = &fam_rzg,
+ .id = 0x4b,
+};
+
static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
.family = &fam_rzg,
.id = 0x4c,
@@ -150,9 +160,15 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7740
{ .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 },
#endif
+#ifdef CONFIG_ARCH_R8A7742
+ { .compatible = "renesas,r8a7742", .data = &soc_rz_g1h },
+#endif
#ifdef CONFIG_ARCH_R8A7743
{ .compatible = "renesas,r8a7743", .data = &soc_rz_g1m },
#endif
+#ifdef CONFIG_ARCH_R8A7744
+ { .compatible = "renesas,r8a7744", .data = &soc_rz_g1n },
+#endif
#ifdef CONFIG_ARCH_R8A7745
{ .compatible = "renesas,r8a7745", .data = &soc_rz_g1e },
#endif
@@ -254,4 +270,4 @@ static int __init renesas_soc_init(void)
return 0;
}
-core_initcall(renesas_soc_init);
+early_initcall(renesas_soc_init);
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 245533907d1b..8b25bd55e648 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -8,7 +8,13 @@ if SOC_SAMSUNG
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
- depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST)
+ depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
+ select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
+
+# There is no need to enable these drivers for ARMv8
+config EXYNOS_PMU_ARM_DRIVERS
+ bool "Exynos PMU ARMv7-specific driver extensions" if COMPILE_TEST
+ depends on EXYNOS_PMU
config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 3619f2ecddaa..4d7694a4e7a4 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
+obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
+
+obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 56d9244ff981..bd4a76f27bc2 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -69,27 +69,37 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
}
/*
+ * Split the data between ARM architectures because it is relatively big
+ * and useless on other arch.
+ */
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
+#define exynos_pmu_data_arm_ptr(data) (&data)
+#else
+#define exynos_pmu_data_arm_ptr(data) NULL
+#endif
+
+/*
* PMU platform driver and devicetree bindings.
*/
static const struct of_device_id exynos_pmu_of_device_ids[] = {
{
.compatible = "samsung,exynos3250-pmu",
- .data = &exynos3250_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
}, {
.compatible = "samsung,exynos4210-pmu",
- .data = &exynos4210_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
}, {
.compatible = "samsung,exynos4212-pmu",
- .data = &exynos4212_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
}, {
.compatible = "samsung,exynos4412-pmu",
- .data = &exynos4412_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
}, {
.compatible = "samsung,exynos5250-pmu",
- .data = &exynos5250_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data),
}, {
.compatible = "samsung,exynos5420-pmu",
- .data = &exynos5420_pmu_data,
+ .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data),
}, {
.compatible = "samsung,exynos5433-pmu",
},
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index a469e366fead..40d4229abfb5 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -31,6 +31,8 @@ struct exynos_pmu_data {
};
extern void __iomem *pmu_base_addr;
+
+#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS
/* list of all exported SoC specific data */
extern const struct exynos_pmu_data exynos3250_pmu_data;
extern const struct exynos_pmu_data exynos4210_pmu_data;
@@ -38,6 +40,7 @@ extern const struct exynos_pmu_data exynos4212_pmu_data;
extern const struct exynos_pmu_data exynos4412_pmu_data;
extern const struct exynos_pmu_data exynos5250_pmu_data;
extern const struct exynos_pmu_data exynos5420_pmu_data;
+#endif
extern void pmu_raw_writel(u32 val, u32 offset);
extern u32 pmu_raw_readl(u32 offset);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index e5e124c07066..dcf088db40b6 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -12,6 +12,8 @@ config ARCH_TEGRA_2x_SOC
select PINCTRL_TEGRA20
select PL310_ERRATA_727915 if CACHE_L2X0
select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
select TEGRA_TIMER
help
Support for NVIDIA Tegra AP20 and T20 processors, based on the
@@ -23,6 +25,8 @@ config ARCH_TEGRA_3x_SOC
select ARM_ERRATA_764369 if SMP
select PINCTRL_TEGRA30
select PL310_ERRATA_769419 if CACHE_L2X0
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
select TEGRA_TIMER
help
Support for NVIDIA Tegra T30 processor family, based on the
@@ -33,6 +37,8 @@ config ARCH_TEGRA_114_SOC
select ARM_ERRATA_798181 if SMP
select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA114
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
select TEGRA_TIMER
help
Support for NVIDIA Tegra T114 processor family, based on the
@@ -42,6 +48,8 @@ config ARCH_TEGRA_124_SOC
bool "Enable support for Tegra124 family"
select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
select TEGRA_TIMER
help
Support for NVIDIA Tegra T124 processor family, based on the
@@ -55,6 +63,8 @@ if ARM64
config ARCH_TEGRA_132_SOC
bool "NVIDIA Tegra132 SoC"
select PINCTRL_TEGRA124
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
help
Enable support for NVIDIA Tegra132 SoC, based on the Denver
ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
@@ -64,6 +74,8 @@ config ARCH_TEGRA_132_SOC
config ARCH_TEGRA_210_SOC
bool "NVIDIA Tegra210 SoC"
select PINCTRL_TEGRA210
+ select SOC_TEGRA_FLOWCTRL
+ select SOC_TEGRA_PMC
help
Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1,
the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53
@@ -83,6 +95,7 @@ config ARCH_TEGRA_186_SOC
select TEGRA_BPMP
select TEGRA_HSP_MBOX
select TEGRA_IVC
+ select SOC_TEGRA_PMC_TEGRA186
help
Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
combination of Denver and Cortex-A57 CPU cores and a GPU based on
@@ -93,3 +106,12 @@ config ARCH_TEGRA_186_SOC
endif
endif
+
+config SOC_TEGRA_FLOWCTRL
+ bool
+
+config SOC_TEGRA_PMC
+ bool
+
+config SOC_TEGRA_PMC_TEGRA186
+ bool
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index ae857ff7d53d..4f81dd55e5d1 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,4 +1,6 @@
obj-y += fuse/
obj-y += common.o
-obj-y += pmc.o
+obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
+obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
+obj-$(CONFIG_SOC_TEGRA_PMC_TEGRA186) += pmc-tegra186.o
diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c
new file mode 100644
index 000000000000..0e345c05fc65
--- /dev/null
+++ b/drivers/soc/tegra/flowctrl.c
@@ -0,0 +1,224 @@
+/*
+ * drivers/soc/tegra/flowctrl.c
+ *
+ * Functions and macros to control the flowcontroller
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/flowctrl.h>
+#include <soc/tegra/fuse.h>
+
+static u8 flowctrl_offset_halt_cpu[] = {
+ FLOW_CTRL_HALT_CPU0_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 8,
+ FLOW_CTRL_HALT_CPU1_EVENTS + 16,
+};
+
+static u8 flowctrl_offset_cpu_csr[] = {
+ FLOW_CTRL_CPU0_CSR,
+ FLOW_CTRL_CPU1_CSR,
+ FLOW_CTRL_CPU1_CSR + 8,
+ FLOW_CTRL_CPU1_CSR + 16,
+};
+
+static void __iomem *tegra_flowctrl_base;
+
+static void flowctrl_update(u8 offset, u32 value)
+{
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return;
+
+ writel(value, tegra_flowctrl_base + offset);
+
+ /* ensure the update has reached the flow controller */
+ wmb();
+ readl_relaxed(tegra_flowctrl_base + offset);
+}
+
+u32 flowctrl_read_cpu_csr(unsigned int cpuid)
+{
+ u8 offset = flowctrl_offset_cpu_csr[cpuid];
+
+ if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base),
+ "Tegra flowctrl not initialised!\n"))
+ return 0;
+
+ return readl(tegra_flowctrl_base + offset);
+}
+
+void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
+}
+
+void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
+{
+ return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
+}
+
+void flowctrl_cpu_suspend_enter(unsigned int cpuid)
+{
+ unsigned int reg;
+ int i;
+
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ /* pwr gating on wfe */
+ reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+ /* pwr gating on wfi */
+ reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid;
+ break;
+ }
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */
+ reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */
+ flowctrl_write_cpu_csr(cpuid, reg);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (i == cpuid)
+ continue;
+ reg = flowctrl_read_cpu_csr(i);
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG;
+ reg |= FLOW_CTRL_CSR_INTR_FLAG;
+ flowctrl_write_cpu_csr(i, reg);
+ }
+}
+
+void flowctrl_cpu_suspend_exit(unsigned int cpuid)
+{
+ unsigned int reg;
+
+ /* Disable powergating via flow controller for CPU0 */
+ reg = flowctrl_read_cpu_csr(cpuid);
+ switch (tegra_get_chip_id()) {
+ case TEGRA20:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ case TEGRA30:
+ case TEGRA114:
+ case TEGRA124:
+ /* clear wfe bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP;
+ /* clear wfi bitmap */
+ reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP;
+ break;
+ }
+ reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */
+ reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */
+ reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */
+ flowctrl_write_cpu_csr(cpuid, reg);
+}
+
+static int tegra_flowctrl_probe(struct platform_device *pdev)
+{
+ void __iomem *base = tegra_flowctrl_base;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra_flowctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra_flowctrl_base))
+ return PTR_ERR(base);
+
+ iounmap(base);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_flowctrl_match[] = {
+ { .compatible = "nvidia,tegra210-flowctrl" },
+ { .compatible = "nvidia,tegra124-flowctrl" },
+ { .compatible = "nvidia,tegra114-flowctrl" },
+ { .compatible = "nvidia,tegra30-flowctrl" },
+ { .compatible = "nvidia,tegra20-flowctrl" },
+ { }
+};
+
+static struct platform_driver tegra_flowctrl_driver = {
+ .driver = {
+ .name = "tegra-flowctrl",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_flowctrl_match,
+ },
+ .probe = tegra_flowctrl_probe,
+};
+builtin_platform_driver(tegra_flowctrl_driver);
+
+static int __init tegra_flowctrl_init(void)
+{
+ struct resource res;
+ struct device_node *np;
+
+ if (!soc_is_tegra())
+ return 0;
+
+ np = of_find_matching_node(NULL, tegra_flowctrl_match);
+ if (np) {
+ if (of_address_to_resource(np, 0, &res) < 0) {
+ pr_err("failed to get flowctrl register\n");
+ return -ENXIO;
+ }
+ of_node_put(np);
+ } else if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * Hardcoded fallback for 32-bit Tegra
+ * devices if device tree node is missing.
+ */
+ res.start = 0x60007000;
+ res.end = 0x60007fff;
+ res.flags = IORESOURCE_MEM;
+ } else {
+ /*
+ * At this point we're running on a Tegra,
+ * that doesn't support the flow controller
+ * (eg. Tegra186), so just return.
+ */
+ return 0;
+ }
+
+ tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res));
+ if (!tegra_flowctrl_base)
+ return -ENXIO;
+
+ return 0;
+}
+early_initcall(tegra_flowctrl_init);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index de2c1bfe28b5..7413f60fa855 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -18,7 +18,7 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -168,7 +168,7 @@ static struct platform_driver tegra_fuse_driver = {
},
.probe = tegra_fuse_probe,
};
-module_platform_driver(tegra_fuse_driver);
+builtin_platform_driver(tegra_fuse_driver);
bool __init tegra_fuse_read_spare(unsigned int spare)
{
diff --git a/drivers/soc/tegra/pmc-tegra186.c b/drivers/soc/tegra/pmc-tegra186.c
new file mode 100644
index 000000000000..6f5c6f98ba92
--- /dev/null
+++ b/drivers/soc/tegra/pmc-tegra186.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#include <asm/system_misc.h>
+
+#define PMC_CNTRL 0x000
+#define PMC_CNTRL_MAIN_RST BIT(4)
+
+#define PMC_RST_STATUS 0x070
+
+#define WAKE_AOWAKE_CTRL 0x4f4
+#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+
+#define SCRATCH_SCRATCH0 0x2000
+#define SCRATCH_SCRATCH0_MODE_RECOVERY BIT(31)
+#define SCRATCH_SCRATCH0_MODE_BOOTLOADER BIT(30)
+#define SCRATCH_SCRATCH0_MODE_RCM BIT(1)
+#define SCRATCH_SCRATCH0_MODE_MASK (SCRATCH_SCRATCH0_MODE_RECOVERY | \
+ SCRATCH_SCRATCH0_MODE_BOOTLOADER | \
+ SCRATCH_SCRATCH0_MODE_RCM)
+
+struct tegra_pmc {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *wake;
+ void __iomem *aotag;
+ void __iomem *scratch;
+
+ void (*system_restart)(enum reboot_mode mode, const char *cmd);
+ struct notifier_block restart;
+};
+
+static int tegra186_pmc_restart_notify(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct tegra_pmc *pmc = container_of(nb, struct tegra_pmc, restart);
+ const char *cmd = data;
+ u32 value;
+
+ value = readl(pmc->scratch + SCRATCH_SCRATCH0);
+ value &= ~SCRATCH_SCRATCH0_MODE_MASK;
+
+ if (cmd) {
+ if (strcmp(cmd, "recovery") == 0)
+ value |= SCRATCH_SCRATCH0_MODE_RECOVERY;
+
+ if (strcmp(cmd, "bootloader") == 0)
+ value |= SCRATCH_SCRATCH0_MODE_BOOTLOADER;
+
+ if (strcmp(cmd, "forced-recovery") == 0)
+ value |= SCRATCH_SCRATCH0_MODE_RCM;
+ }
+
+ writel(value, pmc->scratch + SCRATCH_SCRATCH0);
+
+ /*
+ * If available, call the system restart implementation that was
+ * registered earlier (typically PSCI).
+ */
+ if (pmc->system_restart) {
+ pmc->system_restart(reboot_mode, cmd);
+ return NOTIFY_DONE;
+ }
+
+ /* reset everything but SCRATCH0_SCRATCH0 and PMC_RST_STATUS */
+ value = readl(pmc->regs + PMC_CNTRL);
+ value |= PMC_CNTRL_MAIN_RST;
+ writel(value, pmc->regs + PMC_CNTRL);
+
+ return NOTIFY_DONE;
+}
+
+static int tegra186_pmc_setup(struct tegra_pmc *pmc)
+{
+ struct device_node *np = pmc->dev->of_node;
+ bool invert;
+ u32 value;
+
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+
+ value = readl(pmc->wake + WAKE_AOWAKE_CTRL);
+
+ if (invert)
+ value |= WAKE_AOWAKE_CTRL_INTR_POLARITY;
+ else
+ value &= ~WAKE_AOWAKE_CTRL_INTR_POLARITY;
+
+ writel(value, pmc->wake + WAKE_AOWAKE_CTRL);
+
+ /*
+ * We need to hook any system restart implementation registered
+ * previously so we can write SCRATCH_SCRATCH0 before reset.
+ */
+ pmc->system_restart = arm_pm_restart;
+ arm_pm_restart = NULL;
+
+ pmc->restart.notifier_call = tegra186_pmc_restart_notify;
+ pmc->restart.priority = 128;
+
+ return register_restart_handler(&pmc->restart);
+}
+
+static int tegra186_pmc_probe(struct platform_device *pdev)
+{
+ struct tegra_pmc *pmc;
+ struct resource *res;
+
+ pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL);
+ if (!pmc)
+ return -ENOMEM;
+
+ pmc->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmc");
+ pmc->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->regs))
+ return PTR_ERR(pmc->regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
+ pmc->wake = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->wake))
+ return PTR_ERR(pmc->wake);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
+ pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->aotag))
+ return PTR_ERR(pmc->aotag);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+
+ return tegra186_pmc_setup(pmc);
+}
+
+static const struct of_device_id tegra186_pmc_of_match[] = {
+ { .compatible = "nvidia,tegra186-pmc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra186_pmc_of_match);
+
+static struct platform_driver tegra186_pmc_driver = {
+ .driver = {
+ .name = "tegra186-pmc",
+ .of_match_table = tegra186_pmc_of_match,
+ },
+ .probe = tegra186_pmc_probe,
+};
+builtin_platform_driver(tegra186_pmc_driver);
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 3557c5e32a93..39e152abe6b9 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -38,4 +38,16 @@ config WKUP_M3_IPC
to communicate and use the Wakeup M3 for PM features like suspend
resume and boots it using wkup_m3_rproc driver.
+config TI_SCI_PM_DOMAINS
+ tristate "TI SCI PM Domains Driver"
+ depends on TI_SCI_PROTOCOL
+ depends on PM_GENERIC_DOMAINS
+ help
+ Generic power domain implementation for TI device implementing
+ the TI SCI protocol.
+
+ To compile this as a module, choose M here. The module will be
+ called ti_sci_pm_domains. Note this is needed early in boot before
+ rootfs may be available.
+
endif # SOC_TI
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 48ff3a79634f..7d572736c86e 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
+obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
new file mode 100644
index 000000000000..b0b283810e72
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -0,0 +1,202 @@
+/*
+ * TI SCI Generic Power Domain Driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_genpd_dev_data: holds data needed for every device attached
+ * to this genpd
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ */
+struct ti_sci_genpd_dev_data {
+ int idx;
+};
+
+/**
+ * struct ti_sci_pm_domain: TI specific data needed for power domain
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd: generic_pm_domain for use with the genpd framework
+ */
+struct ti_sci_pm_domain {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct generic_pm_domain pd;
+};
+
+#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
+
+/**
+ * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev
+ * @dev: pointer to device associated with this genpd
+ *
+ * Returns device_id stored from ti,sci_id property
+ */
+static int ti_sci_dev_id(struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
+ struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+
+ return sci_dev_data->idx;
+}
+
+/**
+ * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
+ * @dev: pointer to device associated with this genpd
+ *
+ * Returns ti_sci_handle to be used to communicate with system
+ * control processor.
+ */
+static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev)
+{
+ struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
+ struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd);
+
+ return ti_sci_genpd->ti_sci;
+}
+
+/**
+ * ti_sci_dev_start(): genpd device start hook called to turn device on
+ * @dev: pointer to device associated with this genpd to be powered on
+ */
+static int ti_sci_dev_start(struct device *dev)
+{
+ const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
+ int idx = ti_sci_dev_id(dev);
+
+ return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+}
+
+/**
+ * ti_sci_dev_stop(): genpd device stop hook called to turn device off
+ * @dev: pointer to device associated with this genpd to be powered off
+ */
+static int ti_sci_dev_stop(struct device *dev)
+{
+ const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
+ int idx = ti_sci_dev_id(dev);
+
+ return ti_sci->ops.dev_ops.put_device(ti_sci, idx);
+}
+
+static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args pd_args;
+ struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci;
+ struct ti_sci_genpd_dev_data *sci_dev_data;
+ struct generic_pm_domain_data *genpd_data;
+ int idx, ret = 0;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 1)
+ return -EINVAL;
+
+ idx = pd_args.args[0];
+
+ /*
+ * Check the validity of the requested idx, if the index is not valid
+ * the PMMC will return a NAK here and we will not allocate it.
+ */
+ ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx);
+ if (ret)
+ return -EINVAL;
+
+ sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL);
+ if (!sci_dev_data)
+ return -ENOMEM;
+
+ sci_dev_data->idx = idx;
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = sci_dev_data;
+
+ return 0;
+}
+
+static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
+ struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
+
+ kfree(sci_dev_data);
+ genpd_data->data = NULL;
+}
+
+static const struct of_device_id ti_sci_pm_domain_matches[] = {
+ { .compatible = "ti,sci-pm-domain", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+
+static int ti_sci_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ti_sci_pm_domain *ti_sci_pd;
+ int ret;
+
+ ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL);
+ if (!ti_sci_pd)
+ return -ENOMEM;
+
+ ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(ti_sci_pd->ti_sci))
+ return PTR_ERR(ti_sci_pd->ti_sci);
+
+ ti_sci_pd->dev = dev;
+
+ ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
+ ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
+
+ ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start;
+ ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop;
+
+ pm_genpd_init(&ti_sci_pd->pd, NULL, true);
+
+ ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd);
+
+ return ret;
+}
+
+static struct platform_driver ti_sci_pm_domains_driver = {
+ .probe = ti_sci_pm_domain_probe,
+ .driver = {
+ .name = "ti_sci_pm_domains",
+ .of_match_table = ti_sci_pm_domain_matches,
+ },
+};
+module_platform_driver(ti_sci_pm_domains_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface (SCI) Power Domain driver");
+MODULE_AUTHOR("Dave Gerlach");
diff --git a/drivers/soc/zte/zx296718_pm_domains.c b/drivers/soc/zte/zx296718_pm_domains.c
index 5ed924fee855..4dc5d62ee81b 100644
--- a/drivers/soc/zte/zx296718_pm_domains.c
+++ b/drivers/soc/zte/zx296718_pm_domains.c
@@ -169,7 +169,6 @@ static const struct of_device_id zx296718_pm_domain_matches[] = {
static struct platform_driver zx296718_pd_driver = {
.driver = {
.name = "zx296718-powerdomain",
- .owner = THIS_MODULE,
.of_match_table = zx296718_pm_domain_matches,
},
.probe = zx296718_pd_probe,
diff --git a/drivers/soc/zte/zx2967_pm_domains.c b/drivers/soc/zte/zx2967_pm_domains.c
index 61c8d84bf315..c42aeaaa34ba 100644
--- a/drivers/soc/zte/zx2967_pm_domains.c
+++ b/drivers/soc/zte/zx2967_pm_domains.c
@@ -125,10 +125,8 @@ int zx2967_pd_probe(struct platform_device *pdev,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pcubase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pcubase)) {
- dev_err(&pdev->dev, "ioremap fail.\n");
+ if (IS_ERR(pcubase))
return PTR_ERR(pcubase);
- }
for (i = 0; i < domain_num; ++i) {
zx_pm_domains[i]->power_on = zx2967_power_on;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4c360f8071a8..268d4e6ef48a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -34,6 +34,8 @@ source "drivers/staging/rtl8192u/Kconfig"
source "drivers/staging/rtl8192e/Kconfig"
+source "drivers/staging/rtl8723bs/Kconfig"
+
source "drivers/staging/rtl8712/Kconfig"
source "drivers/staging/rtl8188eu/Kconfig"
@@ -92,6 +94,8 @@ source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/fsl-mc/Kconfig"
+source "drivers/staging/fsl-dpaa2/Kconfig"
+
source "drivers/staging/wilc1000/Kconfig"
source "drivers/staging/most/Kconfig"
@@ -102,6 +106,8 @@ source "drivers/staging/greybus/Kconfig"
source "drivers/staging/vc04_services/Kconfig"
-source "drivers/staging/bcm2835-audio/Kconfig"
+source "drivers/staging/ccree/Kconfig"
+
+source "drivers/staging/typec/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 29cec5aa2945..b93e6f5f0f6e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,11 +1,13 @@
# Makefile for staging directory
obj-y += media/
+obj-y += typec/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
+obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
@@ -35,10 +37,10 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
-obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
-
+obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6c00d6f765c6..71a50b99caff 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,16 +14,6 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_LOW_MEMORY_KILLER
- bool "Android Low Memory Killer"
- ---help---
- Registers processes to be killed when low memory conditions, this is useful
- as there is no particular swap space on android.
-
- The registered process will kill according to the priorities in android init
- scripts (/init.rc), and it defines priority values with minimum free memory size
- for each priority.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 7ed1be798909..7cf1564a49a5 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,4 +3,3 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 8f3ac37bfe12..5f14247392bf 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -7,23 +7,10 @@ TODO:
ion/
- - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
- interface on top of dma-buf. flush_for_device needs to be added to dma-buf
- first.
- - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
- vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
- begin/end_cpu_access hooks to userspace.
- - Clarify the tricks ion plays with explicitly managing coherency behind the
- dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
- explicit coherency management mode to flush_for_device to be used by drivers
- which want to manage caches themselves and which indicates whether cpu caches
- need flushing.
- - With those removed there's probably no use for ION_IOC_IMPORT anymore either
- since ion would just be the central allocator for shared buffers.
- - Add dt-binding to expose cma regions as ion heaps, with the rule that any
- such cma regions must already be used by some device for dma. I.e. ion only
- exposes existing cma regions and doesn't reserve unecessarily memory when
- booting a system which doesn't use ion.
+ - Add dt-bindings for remaining heaps (chunk and carveout heaps). This would
+ involve putting appropriate bindings in a memory node for Ion to find.
+ - Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0)
+ - Better test framework (integration with VGEM was suggested)
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c8fb4134c55d..a517b2d29f1b 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -10,45 +10,35 @@ menuconfig ION
If you're not using Android its probably safe to
say N here.
-config ION_TEST
- tristate "Ion Test Device"
+config ION_SYSTEM_HEAP
+ bool "Ion system heap"
depends on ION
help
- Choose this option to create a device that can be used to test the
- kernel and device side ION functions.
+ Choose this option to enable the Ion system heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
-config ION_DUMMY
- bool "Dummy Ion driver"
+config ION_CARVEOUT_HEAP
+ bool "Ion carveout heap support"
depends on ION
help
- Provides a dummy ION driver that registers the
- /dev/ion device and some basic heaps. This can
- be used for testing the ION infrastructure if
- one doesn't have access to hardware drivers that
- use ION.
+ Choose this option to enable carveout heaps with Ion. Carveout heaps
+ are backed by memory reserved from the system. Allocation times are
+ typically faster at the cost of memory not being used. Unless you
+ know your system has these regions, you should say N here.
-config ION_TEGRA
- tristate "Ion for Tegra"
- depends on ARCH_TEGRA && ION
+config ION_CHUNK_HEAP
+ bool "Ion chunk heap support"
+ depends on ION
help
- Choose this option if you wish to use ion on an nVidia Tegra.
+ Choose this option to enable chunk heaps with Ion. This heap is
+ similar in function the carveout heap but memory is broken down
+ into smaller chunk sizes, typically corresponding to a TLB size.
+ Unless you know your system has these regions, you should say N here.
-config ION_HISI
- tristate "Ion for Hisilicon"
- depends on ARCH_HISI && ION
- select ION_OF
+config ION_CMA_HEAP
+ bool "Ion CMA heap support"
+ depends on ION && CMA
help
- Choose this option if you wish to use ion on Hisilicon Platform.
-
-source "drivers/staging/android/ion/hisilicon/Kconfig"
-
-config ION_OF
- bool "Devicetree support for Ion"
- depends on ION && OF_ADDRESS
- help
- Provides base support for defining Ion heaps in devicetree
- and setting them up. Also includes functions for platforms
- to parse the devicetree and expand for their own custom
- extensions
-
- If using Ion and devicetree, you should say Y here
+ Choose this option to enable CMA heaps with Ion. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5d630a088381..eb7eeed6ae40 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,13 +1,5 @@
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
- ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
-obj-$(CONFIG_ION_TEST) += ion_test.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_ION) += compat_ion.o
-endif
-
-obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
-obj-$(CONFIG_ION_TEGRA) += tegra/
-obj-$(CONFIG_ION_HISI) += hisilicon/
-obj-$(CONFIG_ION_OF) += ion_of.o
-
+obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
+obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
+obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
+obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
+obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
deleted file mode 100644
index 9a978d21785e..000000000000
--- a/drivers/staging/android/ion/compat_ion.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.c
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "ion.h"
-#include "compat_ion.h"
-
-/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
-struct compat_ion_allocation_data {
- compat_size_t len;
- compat_size_t align;
- compat_uint_t heap_id_mask;
- compat_uint_t flags;
- compat_int_t handle;
-};
-
-struct compat_ion_custom_data {
- compat_uint_t cmd;
- compat_ulong_t arg;
-};
-
-struct compat_ion_handle_data {
- compat_int_t handle;
-};
-
-#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
- struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
- struct compat_ion_handle_data)
-#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
- struct compat_ion_custom_data)
-
-static int compat_get_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data32->len);
- err |= put_user(s, &data->len);
- err |= get_user(s, &data32->align);
- err |= put_user(s, &data->align);
- err |= get_user(u, &data32->heap_id_mask);
- err |= put_user(u, &data->heap_id_mask);
- err |= get_user(u, &data32->flags);
- err |= put_user(u, &data->flags);
- err |= get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_get_ion_handle_data(
- struct compat_ion_handle_data __user *data32,
- struct ion_handle_data __user *data)
-{
- compat_int_t i;
- int err;
-
- err = get_user(i, &data32->handle);
- err |= put_user(i, &data->handle);
-
- return err;
-}
-
-static int compat_put_ion_allocation_data(
- struct compat_ion_allocation_data __user *data32,
- struct ion_allocation_data __user *data)
-{
- compat_size_t s;
- compat_uint_t u;
- compat_int_t i;
- int err;
-
- err = get_user(s, &data->len);
- err |= put_user(s, &data32->len);
- err |= get_user(s, &data->align);
- err |= put_user(s, &data32->align);
- err |= get_user(u, &data->heap_id_mask);
- err |= put_user(u, &data32->heap_id_mask);
- err |= get_user(u, &data->flags);
- err |= put_user(u, &data32->flags);
- err |= get_user(i, &data->handle);
- err |= put_user(i, &data32->handle);
-
- return err;
-}
-
-static int compat_get_ion_custom_data(
- struct compat_ion_custom_data __user *data32,
- struct ion_custom_data __user *data)
-{
- compat_uint_t cmd;
- compat_ulong_t arg;
- int err;
-
- err = get_user(cmd, &data32->cmd);
- err |= put_user(cmd, &data->cmd);
- err |= get_user(arg, &data32->arg);
- err |= put_user(arg, &data->arg);
-
- return err;
-};
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd) {
- case COMPAT_ION_IOC_ALLOC:
- {
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_allocation_data(data32, data);
- if (err)
- return err;
- ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
- (unsigned long)data);
- err = compat_put_ion_allocation_data(data32, data);
- return ret ? ret : err;
- }
- case COMPAT_ION_IOC_FREE:
- {
- struct compat_ion_handle_data __user *data32;
- struct ion_handle_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_handle_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
- (unsigned long)data);
- }
- case COMPAT_ION_IOC_CUSTOM: {
- struct compat_ion_custom_data __user *data32;
- struct ion_custom_data __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(*data));
- if (!data)
- return -EFAULT;
-
- err = compat_get_ion_custom_data(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
- (unsigned long)data);
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- case ION_IOC_IMPORT:
- case ION_IOC_SYNC:
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-}
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
deleted file mode 100644
index 9da8f917670b..000000000000
--- a/drivers/staging/android/ion/compat_ion.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * drivers/staging/android/ion/compat_ion.h
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_COMPAT_ION_H
-#define _LINUX_COMPAT_ION_H
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-#else
-
-#define compat_ion_ioctl NULL
-
-#endif /* CONFIG_COMPAT */
-#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/android/ion/hisilicon/Kconfig b/drivers/staging/android/ion/hisilicon/Kconfig
deleted file mode 100644
index 2b4bd0798290..000000000000
--- a/drivers/staging/android/ion/hisilicon/Kconfig
+++ /dev/null
@@ -1,5 +0,0 @@
-config HI6220_ION
- bool "Hi6220 ION Driver"
- depends on ARCH_HISI && ION
- help
- Build the Hisilicon Hi6220 ion driver.
diff --git a/drivers/staging/android/ion/hisilicon/Makefile b/drivers/staging/android/ion/hisilicon/Makefile
deleted file mode 100644
index 2a89414280ac..000000000000
--- a/drivers/staging/android/ion/hisilicon/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_HI6220_ION) += hi6220_ion.o
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
deleted file mode 100644
index 0de7897fd4bf..000000000000
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Hisilicon Hi6220 ION Driver
- *
- * Copyright (c) 2015 Hisilicon Limited.
- *
- * Author: Chen Feng <puck.chen@hisilicon.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define pr_fmt(fmt) "Ion: " fmt
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/mm.h>
-#include "../ion_priv.h"
-#include "../ion.h"
-#include "../ion_of.h"
-
-struct hisi_ion_dev {
- struct ion_heap **heaps;
- struct ion_device *idev;
- struct ion_platform_data *data;
-};
-
-static struct ion_of_heap hisi_heaps[] = {
- PLATFORM_HEAP("hisilicon,sys_user", 0,
- ION_HEAP_TYPE_SYSTEM, "sys_user"),
- PLATFORM_HEAP("hisilicon,sys_contig", 1,
- ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"),
- PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
- "cma"),
- {}
-};
-
-static int hi6220_ion_probe(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
- if (!ipdev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ipdev);
-
- ipdev->idev = ion_device_create(NULL);
- if (IS_ERR(ipdev->idev))
- return PTR_ERR(ipdev->idev);
-
- ipdev->data = ion_parse_dt(pdev, hisi_heaps);
- if (IS_ERR(ipdev->data))
- return PTR_ERR(ipdev->data);
-
- ipdev->heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap) * ipdev->data->nr,
- GFP_KERNEL);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
-
- for (i = 0; i < ipdev->data->nr; i++) {
- ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
- if (!ipdev->heaps) {
- ion_destroy_platform_data(ipdev->data);
- return -ENOMEM;
- }
- ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
- }
- return 0;
-}
-
-static int hi6220_ion_remove(struct platform_device *pdev)
-{
- struct hisi_ion_dev *ipdev;
- int i;
-
- ipdev = platform_get_drvdata(pdev);
-
- for (i = 0; i < ipdev->data->nr; i++)
- ion_heap_destroy(ipdev->heaps[i]);
-
- ion_destroy_platform_data(ipdev->data);
- ion_device_destroy(ipdev->idev);
-
- return 0;
-}
-
-static const struct of_device_id hi6220_ion_match_table[] = {
- {.compatible = "hisilicon,hi6220-ion"},
- {},
-};
-
-static struct platform_driver hi6220_ion_driver = {
- .probe = hi6220_ion_probe,
- .remove = hi6220_ion_remove,
- .driver = {
- .name = "ion-hi6220",
- .of_match_table = hi6220_ion_match_table,
- },
-};
-
-static int __init hi6220_ion_init(void)
-{
- return platform_driver_register(&hi6220_ion_driver);
-}
-
-subsys_initcall(hi6220_ion_init);
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index 9ff815ad1cb1..76427e4773a8 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -19,14 +19,9 @@
#include <linux/uaccess.h>
#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
union ion_ioctl_arg {
- struct ion_fd_data fd;
struct ion_allocation_data allocation;
- struct ion_handle_data handle;
- struct ion_custom_data custom;
struct ion_heap_query query;
};
@@ -51,10 +46,6 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
- case ION_IOC_SYNC:
- case ION_IOC_FREE:
- case ION_IOC_CUSTOM:
- return _IOC_WRITE;
default:
return _IOC_DIR(cmd);
}
@@ -62,9 +53,6 @@ static unsigned int ion_ioctl_dir(unsigned int cmd)
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct ion_client *client = filp->private_data;
- struct ion_device *dev = client->dev;
- struct ion_handle *cleanup_handle = NULL;
int ret = 0;
unsigned int dir;
union ion_ioctl_arg data;
@@ -92,87 +80,28 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case ION_IOC_ALLOC:
{
- struct ion_handle *handle;
+ int fd;
- handle = ion_alloc(client, data.allocation.len,
- data.allocation.align,
+ fd = ion_alloc(data.allocation.len,
data.allocation.heap_id_mask,
data.allocation.flags);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (fd < 0)
+ return fd;
- data.allocation.handle = handle->id;
+ data.allocation.fd = fd;
- cleanup_handle = handle;
- break;
- }
- case ION_IOC_FREE:
- {
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client,
- data.handle.handle);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- return PTR_ERR(handle);
- }
- ion_free_nolock(client, handle);
- ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
- break;
- }
- case ION_IOC_SHARE:
- case ION_IOC_MAP:
- {
- struct ion_handle *handle;
-
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
- if (data.fd.fd < 0)
- ret = data.fd.fd;
- break;
- }
- case ION_IOC_IMPORT:
- {
- struct ion_handle *handle;
-
- handle = ion_import_dma_buf_fd(client, data.fd.fd);
- if (IS_ERR(handle))
- ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
- break;
- }
- case ION_IOC_SYNC:
- {
- ret = ion_sync_for_device(client, data.fd.fd);
- break;
- }
- case ION_IOC_CUSTOM:
- {
- if (!dev->custom_ioctl)
- return -ENOTTY;
- ret = dev->custom_ioctl(client, data.custom.cmd,
- data.custom.arg);
break;
}
case ION_IOC_HEAP_QUERY:
- ret = ion_query_heaps(client, &data.query);
+ ret = ion_query_heaps(&data.query);
break;
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
return -EFAULT;
- }
}
return ret;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 95a7f1648c00..03d3a4fce0e2 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -39,40 +39,15 @@
#include <linux/sched/task.h>
#include "ion.h"
-#include "ion_priv.h"
-#include "compat_ion.h"
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
- return (buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
-}
+static struct ion_device *internal_dev;
+static int heap_id;
bool ion_buffer_cached(struct ion_buffer *buffer)
{
return !!(buffer->flags & ION_FLAG_CACHED);
}
-static inline struct page *ion_buffer_page(struct page *page)
-{
- return (struct page *)((unsigned long)page & ~(1UL));
-}
-
-static inline bool ion_buffer_page_is_dirty(struct page *page)
-{
- return !!((unsigned long)page & 1UL);
-}
-
-static inline void ion_buffer_page_dirty(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) | 1UL);
-}
-
-static inline void ion_buffer_page_clean(struct page **page)
-{
- *page = (struct page *)((unsigned long)(*page) & ~(1UL));
-}
-
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -103,13 +78,11 @@ static void ion_buffer_add(struct ion_device *dev,
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
+ int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
@@ -117,17 +90,15 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->heap = heap;
buffer->flags = flags;
- kref_init(&buffer->ref);
- ret = heap->ops->allocate(heap, buffer, len, align, flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
- ret = heap->ops->allocate(heap, buffer, len, align,
- flags);
+ ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret)
goto err2;
}
@@ -142,43 +113,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->dev = dev;
buffer->size = len;
- if (ion_buffer_fault_user_mappings(buffer)) {
- int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- struct scatterlist *sg;
- int i, j, k = 0;
-
- buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
- if (!buffer->pages) {
- ret = -ENOMEM;
- goto err1;
- }
-
- for_each_sg(table->sgl, sg, table->nents, i) {
- struct page *page = sg_page(sg);
-
- for (j = 0; j < sg->length / PAGE_SIZE; j++)
- buffer->pages[k++] = page++;
- }
- }
-
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
+ INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
- /*
- * this will set up dma addresses for the sglist -- it is not
- * technically correct as per the dma api -- a specific
- * device isn't really taking ownership here. However, in practice on
- * our systems the only dma_address space is physical addresses.
- * Additionally, we can't afford the overhead of invalidating every
- * allocation via dma_map_sg. The implicit contract here is that
- * memory coming from the heaps is ready for dma, ie if it has a
- * cached mapping that mapping has been invalidated
- */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- sg_dma_address(sg) = sg_phys(sg);
- sg_dma_len(sg) = sg->length;
- }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
@@ -200,9 +139,8 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
kfree(buffer);
}
-static void _ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
@@ -216,273 +154,6 @@ static void _ion_buffer_destroy(struct kref *kref)
ion_buffer_destroy(buffer);
}
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
- kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
- return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
-
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- RB_CLEAR_NODE(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
-
- return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
-
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
-
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
-
- kfree(handle);
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
- kref_get(&handle->ref);
-}
-
-int ion_handle_put_nolock(struct ion_handle *handle)
-{
- return kref_put(&handle->ref, ion_handle_destroy);
-}
-
-int ion_handle_put(struct ion_handle *handle)
-{
- struct ion_client *client = handle->client;
- int ret;
-
- mutex_lock(&client->lock);
- ret = ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
-
- return ret;
-}
-
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct rb_node *n = client->handles.rb_node;
-
- while (n) {
- struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
-
- if (buffer < entry->buffer)
- n = n->rb_left;
- else if (buffer > entry->buffer)
- n = n->rb_right;
- else
- return entry;
- }
- return ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- handle = idr_find(&client->idr, id);
- if (handle)
- ion_handle_get(handle);
-
- return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
-
- return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return idr_find(&client->idr, handle->id) == handle;
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
- int id;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
-
- id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- handle->id = id;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
-
- if (handle->buffer < entry->buffer)
- p = &(*p)->rb_left;
- else if (handle->buffer > entry->buffer)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
-
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
-
- return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
-
- pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
- len, align, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- len = PAGE_ALIGN(len);
-
- if (!len)
- return ERR_PTR(-EINVAL);
-
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- buffer = ion_buffer_create(heap, dev, len, align, flags);
- if (!IS_ERR(buffer))
- break;
- }
- up_read(&dev->lock);
-
- if (buffer == NULL)
- return ERR_PTR(-ENODEV);
-
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- handle = ion_handle_create(client, buffer);
-
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
-
- if (IS_ERR(handle))
- return handle;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
- return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
-{
- if (!ion_handle_validate(client, handle)) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- return;
- }
- ion_handle_put_nolock(handle);
-}
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
- BUG_ON(client != handle->client);
-
- mutex_lock(&client->lock);
- ion_free_nolock(client, handle);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_free);
-
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
@@ -502,22 +173,6 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return vaddr;
}
-static void *ion_handle_kmap_get(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
- void *vaddr;
-
- if (handle->kmap_cnt) {
- handle->kmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- handle->kmap_cnt++;
- return vaddr;
-}
-
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
@@ -527,408 +182,117 @@ static void ion_buffer_kmap_put(struct ion_buffer *buffer)
}
}
-static void ion_handle_kmap_put(struct ion_handle *handle)
+static struct sg_table *dup_sg_table(struct sg_table *table)
{
- struct ion_buffer *buffer = handle->buffer;
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
- if (!handle->kmap_cnt) {
- WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
- return;
- }
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
-}
-
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
- void *vaddr;
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
- }
-
- buffer = handle->buffer;
-
- if (!handle->buffer->heap->ops->map_kernel) {
- pr_err("%s: map_kernel is not implemented by this heap.\n",
- __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&buffer->lock);
- vaddr = ion_handle_kmap_get(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return vaddr;
-}
-EXPORT_SYMBOL(ion_map_kernel);
-
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
-{
- struct ion_buffer *buffer;
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_unmap_kernel);
-
-static struct mutex debugfs_mutex;
-static struct rb_root *ion_root_client;
-static int is_client_alive(struct ion_client *client)
-{
- struct rb_node *node;
- struct ion_client *tmp;
- struct ion_device *dev;
-
- node = ion_root_client->rb_node;
- dev = container_of(ion_root_client, struct ion_device, clients);
-
- down_read(&dev->lock);
- while (node) {
- tmp = rb_entry(node, struct ion_client, node);
- if (client < tmp) {
- node = node->rb_left;
- } else if (client > tmp) {
- node = node->rb_right;
- } else {
- up_read(&dev->lock);
- return 1;
- }
- }
-
- up_read(&dev->lock);
- return 0;
-}
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
- struct ion_client *client = s->private;
- struct rb_node *n;
- size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {NULL};
- int i;
-
- mutex_lock(&debugfs_mutex);
- if (!is_client_alive(client)) {
- seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
- client);
- mutex_unlock(&debugfs_mutex);
- return 0;
+ ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
}
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- unsigned int id = handle->buffer->heap->id;
-
- if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
+ new_sg = new_table->sgl;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ memcpy(new_sg, sg, sizeof(*sg));
+ sg->dma_address = 0;
+ new_sg = sg_next(new_sg);
}
- mutex_unlock(&client->lock);
- mutex_unlock(&debugfs_mutex);
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
- for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
- if (!names[i])
- continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
- }
- return 0;
+ return new_table;
}
-static int ion_debug_client_open(struct inode *inode, struct file *file)
+static void free_duped_table(struct sg_table *table)
{
- return single_open(file, ion_debug_client_show, inode->i_private);
+ sg_free_table(table);
+ kfree(table);
}
-static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+struct ion_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
};
-static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
-{
- int serial = -1;
- struct rb_node *node;
-
- for (node = rb_first(root); node; node = rb_next(node)) {
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
-
- if (strcmp(client->name, name))
- continue;
- serial = max(serial, client->display_serial);
- }
- return serial + 1;
-}
-
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
+static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+ struct dma_buf_attachment *attachment)
{
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
-
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ struct ion_dma_buf_attachment *a;
+ struct sg_table *table;
+ struct ion_buffer *buffer = dmabuf->priv;
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /*
- * don't bother to store task struct for kernel threads,
- * they can't be killed anyway
- */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- goto err_put_task_struct;
-
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
- client->task = task;
- client->pid = pid;
- client->name = kstrdup(name, GFP_KERNEL);
- if (!client->name)
- goto err_free_client;
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
- down_write(&dev->lock);
- client->display_serial = ion_get_client_serial(&dev->clients, name);
- client->display_name = kasprintf(
- GFP_KERNEL, "%s-%d", name, client->display_serial);
- if (!client->display_name) {
- up_write(&dev->lock);
- goto err_free_client_name;
+ table = dup_sg_table(buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
}
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
-
- client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
-
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
- }
+ a->table = table;
+ a->dev = dev;
+ INIT_LIST_HEAD(&a->list);
- up_write(&dev->lock);
+ attachment->priv = a;
- return client;
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
-err_free_client_name:
- kfree(client->name);
-err_free_client:
- kfree(client);
-err_put_task_struct:
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
+ return 0;
}
-EXPORT_SYMBOL(ion_client_create);
-void ion_client_destroy(struct ion_client *client)
+static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
{
- struct ion_device *dev = client->dev;
- struct rb_node *n;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&debugfs_mutex);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
-
- idr_destroy(&client->idr);
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct ion_buffer *buffer = dmabuf->priv;
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
- up_write(&dev->lock);
+ free_duped_table(a->table);
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
- kfree(client->display_name);
- kfree(client->name);
- kfree(client);
- mutex_unlock(&debugfs_mutex);
+ kfree(a);
}
-EXPORT_SYMBOL(ion_client_destroy);
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction direction);
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
- struct dma_buf *dmabuf = attachment->dmabuf;
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_sync_for_device(buffer, attachment->dev, direction);
- return buffer->sg_table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
-}
-
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir)
-{
- struct scatterlist sg;
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- /*
- * This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the targeted device, but this works on the currently targeted
- * hardware.
- */
- sg_dma_address(&sg) = page_to_phys(page);
- dma_sync_sg_for_device(dev, &sg, 1, dir);
-}
-
-struct ion_vma_list {
- struct list_head list;
- struct vm_area_struct *vma;
-};
-
-static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
- struct device *dev,
- enum dma_data_direction dir)
-{
- struct ion_vma_list *vma_list;
- int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
- int i;
-
- pr_debug("%s: syncing for device %s\n", __func__,
- dev ? dev_name(dev) : "null");
-
- if (!ion_buffer_fault_user_mappings(buffer))
- return;
-
- mutex_lock(&buffer->lock);
- for (i = 0; i < pages; i++) {
- struct page *page = buffer->pages[i];
-
- if (ion_buffer_page_is_dirty(page))
- ion_pages_sync_for_device(dev, ion_buffer_page(page),
- PAGE_SIZE, dir);
-
- ion_buffer_page_clean(buffer->pages + i);
- }
- list_for_each_entry(vma_list, &buffer->vmas, list) {
- struct vm_area_struct *vma = vma_list->vma;
-
- zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static int ion_vm_fault(struct vm_fault *vmf)
-{
- struct ion_buffer *buffer = vmf->vma->vm_private_data;
- unsigned long pfn;
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
int ret;
- mutex_lock(&buffer->lock);
- ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
- BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
-
- pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
- mutex_unlock(&buffer->lock);
- if (ret)
- return VM_FAULT_ERROR;
+ table = a->table;
- return VM_FAULT_NOPAGE;
-}
-
-static void ion_vm_open(struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list;
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction)){
+ ret = -ENOMEM;
+ goto err;
+ }
+ return table;
- vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
- if (!vma_list)
- return;
- vma_list->vma = vma;
- mutex_lock(&buffer->lock);
- list_add(&vma_list->list, &buffer->vmas);
- mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
+err:
+ free_duped_table(table);
+ return ERR_PTR(ret);
}
-static void ion_vm_close(struct vm_area_struct *vma)
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
{
- struct ion_buffer *buffer = vma->vm_private_data;
- struct ion_vma_list *vma_list, *tmp;
-
- pr_debug("%s\n", __func__);
- mutex_lock(&buffer->lock);
- list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
- if (vma_list->vma != vma)
- continue;
- list_del(&vma_list->list);
- kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
- break;
- }
- mutex_unlock(&buffer->lock);
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
}
-static const struct vm_operations_struct ion_vma_ops = {
- .open = ion_vm_open,
- .close = ion_vm_close,
- .fault = ion_vm_fault,
-};
-
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
@@ -940,15 +304,6 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
return -EINVAL;
}
- if (ion_buffer_fault_user_mappings(buffer)) {
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
- VM_DONTDUMP;
- vma->vm_private_data = buffer;
- vma->vm_ops = &ion_vma_ops;
- ion_vm_open(vma);
- return 0;
- }
-
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -968,7 +323,7 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_put(buffer);
+ _ion_buffer_destroy(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
@@ -988,26 +343,45 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
+ struct ion_dma_buf_attachment *a;
- if (!buffer->heap->ops->map_kernel) {
- pr_err("%s: map kernel is not implemented by this heap.\n",
- __func__);
- return -ENODEV;
+ /*
+ * TODO: Move this elsewhere because we don't always need a vaddr
+ */
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
}
+
mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ DMA_BIDIRECTIONAL);
+ }
mutex_unlock(&buffer->lock);
- return PTR_ERR_OR_ZERO(vaddr);
+
+ return 0;
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_dma_buf_attachment *a;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ DMA_BIDIRECTIONAL);
+ }
mutex_unlock(&buffer->lock);
return 0;
@@ -1018,6 +392,8 @@ static const struct dma_buf_ops dma_buf_ops = {
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
.release = ion_dma_buf_release,
+ .attach = ion_dma_buf_attach,
+ .detach = ion_dma_buf_detatch,
.begin_cpu_access = ion_dma_buf_begin_cpu_access,
.end_cpu_access = ion_dma_buf_end_cpu_access,
.map_atomic = ion_dma_buf_kmap,
@@ -1026,24 +402,44 @@ static const struct dma_buf_ops dma_buf_ops = {
.unmap = ion_dma_buf_kunmap,
};
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
{
+ struct ion_device *dev = internal_dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ion_buffer *buffer;
+ int fd;
struct dma_buf *dmabuf;
- bool valid_handle;
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+ len, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return -EINVAL;
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, flags);
+ if (!IS_ERR(buffer))
+ break;
}
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return -ENODEV;
+
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
@@ -1052,22 +448,9 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
-
- return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
- struct dma_buf *dmabuf;
- int fd;
-
- dmabuf = ion_share_dma_buf(client, handle);
- if (IS_ERR(dmabuf))
+ _ion_buffer_destroy(buffer);
return PTR_ERR(dmabuf);
+ }
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
@@ -1075,93 +458,10 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
return fd;
}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf)
-{
- struct ion_buffer *buffer;
- struct ion_handle *handle;
- int ret;
-
- /* if this memory came from ion */
-
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not import dmabuf from another exporter\n",
- __func__);
- return ERR_PTR(-EINVAL);
- }
- buffer = dmabuf->priv;
-
- mutex_lock(&client->lock);
- /* if a handle exists for this buffer just take a reference to it */
- handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR(handle)) {
- ion_handle_get(handle);
- mutex_unlock(&client->lock);
- goto end;
- }
-
- handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle)) {
- mutex_unlock(&client->lock);
- goto end;
- }
-
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
-end:
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf);
-
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_handle *handle;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return ERR_CAST(dmabuf);
-
- handle = ion_import_dma_buf(client, dmabuf);
- dma_buf_put(dmabuf);
- return handle;
-}
-EXPORT_SYMBOL(ion_import_dma_buf_fd);
-
-int ion_sync_for_device(struct ion_client *client, int fd)
-{
- struct dma_buf *dmabuf;
- struct ion_buffer *buffer;
-
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- /* if this memory came from ion */
- if (dmabuf->ops != &dma_buf_ops) {
- pr_err("%s: can not sync dmabuf from another exporter\n",
- __func__);
- dma_buf_put(dmabuf);
- return -EINVAL;
- }
- buffer = dmabuf->priv;
-
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
- dma_buf_put(dmabuf);
- return 0;
-}
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+int ion_query_heaps(struct ion_heap_query *query)
{
- struct ion_device *dev = client->dev;
+ struct ion_device *dev = internal_dev;
struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
int ret = -EINVAL, cnt = 0, max_cnt;
struct ion_heap *heap;
@@ -1198,138 +498,18 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
}
query->cnt = cnt;
+ ret = 0;
out:
up_read(&dev->lock);
return ret;
}
-static int ion_release(struct inode *inode, struct file *file)
-{
- struct ion_client *client = file->private_data;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR(client))
- return PTR_ERR(client);
- file->private_data = client;
-
- return 0;
-}
-
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
.unlocked_ioctl = ion_ioctl,
- .compat_ioctl = compat_ion_ioctl,
-};
-
-static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
-{
- size_t size = 0;
- struct rb_node *n;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
-
- seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
- seq_puts(s, "----------------------------------------------------\n");
-
- mutex_lock(&debugfs_mutex);
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
-
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16s %16u %16zu\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16s %16u %16zu\n", client->name,
- client->pid, size);
- }
- }
- mutex_unlock(&debugfs_mutex);
-
- seq_puts(s, "----------------------------------------------------\n");
- seq_puts(s, "orphaned allocations (info is from last known client):\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16s %16u %16zu %d %d\n",
- buffer->task_comm, buffer->pid,
- buffer->size, buffer->kmap_cnt,
- kref_read(&buffer->ref));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_puts(s, "----------------------------------------------------\n");
- seq_printf(s, "%16s %16zu\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16s %16zu\n", "total ", total_size);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
- seq_puts(s, "----------------------------------------------------\n");
-
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
-
- return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ion_ioctl,
+#endif
};
static int debug_shrink_set(void *data, u64 val)
@@ -1367,9 +547,10 @@ static int debug_shrink_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+void ion_device_add_heap(struct ion_heap *heap)
{
struct dentry *debug_file;
+ struct ion_device *dev = internal_dev;
if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
@@ -1386,35 +567,25 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
heap->dev = dev;
down_write(&dev->lock);
+ heap->id = heap_id++;
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
-
- if (!debug_file) {
- char buf[256], *path;
-
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
- }
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
+ debug_name, 0644, dev->debug_root, heap,
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
+ path = dentry_path(dev->debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
}
@@ -1425,17 +596,14 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
}
EXPORT_SYMBOL(ion_device_add_heap);
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg))
+int ion_device_create(void)
{
struct ion_device *idev;
int ret;
idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
idev->dev.minor = MISC_DYNAMIC_MINOR;
idev->dev.name = "ion";
@@ -1445,7 +613,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
if (ret) {
pr_err("ion: failed to register misc device.\n");
kfree(idev);
- return ERR_PTR(ret);
+ return ret;
}
idev->debug_root = debugfs_create_dir("ion", NULL);
@@ -1453,35 +621,13 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
pr_err("ion: failed to create debugfs root directory.\n");
goto debugfs_done;
}
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
-
- idev->custom_ioctl = custom_ioctl;
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- ion_root_client = &idev->clients;
- mutex_init(&debugfs_mutex);
- return idev;
-}
-EXPORT_SYMBOL(ion_device_create);
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
+ internal_dev = idev;
+ return 0;
}
-EXPORT_SYMBOL(ion_device_destroy);
+subsys_initcall(ion_device_create);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 93dafb4586e4..ace8416bd509 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -14,38 +14,31 @@
*
*/
-#ifndef _LINUX_ION_H
-#define _LINUX_ION_H
+#ifndef _ION_H
+#define _ION_H
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
#include <linux/types.h>
+#include <linux/miscdevice.h>
#include "../uapi/ion.h"
-struct ion_handle;
-struct ion_device;
-struct ion_heap;
-struct ion_mapper;
-struct ion_client;
-struct ion_buffer;
-
-/*
- * This should be removed some day when phys_addr_t's are fully
- * plumbed in the kernel, and all instances of ion_phys_addr_t should
- * be converted to phys_addr_t. For the time being many kernel interfaces
- * do not accept phys_addr_t's that would have to
- */
-#define ion_phys_addr_t unsigned long
-
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
- * @id: unique identifier for heap. When allocating higher numbers
+ * @id: unique identifier for heap. When allocating higher numb ers
* will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
- * @align: required alignment in physical memory if applicable
* @priv: private info passed from the board file
*
* Provided by the board file.
@@ -54,123 +47,329 @@ struct ion_platform_heap {
enum ion_heap_type type;
unsigned int id;
const char *name;
- ion_phys_addr_t base;
+ phys_addr_t base;
size_t size;
- ion_phys_addr_t align;
+ phys_addr_t align;
void *priv;
};
/**
- * struct ion_platform_data - array of platform heaps passed from board file
- * @nr: number of structures in the array
- * @heaps: array of platform_heap structions
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref: reference count
+ * @node: node in the ion_device buffers tree
+ * @dev: back pointer to the ion_device
+ * @heap: back pointer to the heap the buffer came from
+ * @flags: buffer specific flags
+ * @private_flags: internal buffer specific flags
+ * @size: size of the buffer
+ * @priv_virt: private data to the buffer representable as
+ * a void *
+ * @lock: protects the buffers cnt fields
+ * @kmap_cnt: number of times the buffer is mapped to the kernel
+ * @vaddr: the kernel mapping if kmap_cnt is not zero
+ * @sg_table: the sg table for the buffer if dmap_cnt is not zero
+ * @pages: flat array of pages in the buffer -- used by fault
+ * handler and only valid for buffers that are faulted in
+ * @vmas: list of vma's mapping this buffer
+ * @handle_count: count of handles referencing this buffer
+ * @task_comm: taskcomm of last client to reference this buffer in a
+ * handle, used for debugging
+ * @pid: pid of last client to reference this buffer in a
+ * handle, used for debugging
+ */
+struct ion_buffer {
+ union {
+ struct rb_node node;
+ struct list_head list;
+ };
+ struct ion_device *dev;
+ struct ion_heap *heap;
+ unsigned long flags;
+ unsigned long private_flags;
+ size_t size;
+ void *priv_virt;
+ struct mutex lock;
+ int kmap_cnt;
+ void *vaddr;
+ struct sg_table *sg_table;
+ struct page **pages;
+ struct list_head vmas;
+ struct list_head attachments;
+ /* used to track orphaned buffers */
+ int handle_count;
+ char task_comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev: the actual misc device
+ * @buffers: an rb tree of all the existing buffers
+ * @buffer_lock: lock protecting the tree of buffers
+ * @lock: rwsem protecting the tree of heaps and clients
+ */
+struct ion_device {
+ struct miscdevice dev;
+ struct rb_root buffers;
+ struct mutex buffer_lock;
+ struct rw_semaphore lock;
+ struct plist_head heaps;
+ struct dentry *debug_root;
+ int heap_cnt;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate: allocate memory
+ * @free: free memory
+ * @map_kernel map memory to the kernel
+ * @unmap_kernel unmap memory to the kernel
+ * @map_user map memory to userspace
*
- * Provided by the board file in the form of platform data to a platform device.
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
*/
-struct ion_platform_data {
- int nr;
- struct ion_platform_heap *heaps;
+struct ion_heap_ops {
+ int (*allocate)(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long flags);
+ void (*free)(struct ion_buffer *buffer);
+ void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+ int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+ int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};
/**
- * ion_client_create() - allocate a client and returns it
- * @dev: the global ion device
- * @name: used for debugging
+ * heap flags - flags between the heaps and core ion code
*/
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name);
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
- * ion_client_destroy() - free's a client and all it's handles
- * @client: the client
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node: rb node to put the heap on the device's tree of heaps
+ * @dev: back pointer to the ion_device
+ * @type: type of heap
+ * @ops: ops struct as above
+ * @flags: flags
+ * @id: id of heap, also indicates priority of this heap when
+ * allocating. These are specified by platform data and
+ * MUST be unique
+ * @name: used for debugging
+ * @shrinker: a shrinker for the heap
+ * @free_list: free list head if deferred free is used
+ * @free_list_size size of the deferred free list in bytes
+ * @lock: protects the free list
+ * @waitqueue: queue to wait on from deferred free thread
+ * @task: task struct of deferred free thread
+ * @debug_show: called when heap debug file is read to add any
+ * heap specific debug info to output
*
- * Free the provided client and all it's resources including
- * any handles it is holding.
+ * Represents a pool of memory from which buffers can be made. In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
*/
-void ion_client_destroy(struct ion_client *client);
+struct ion_heap {
+ struct plist_node node;
+ struct ion_device *dev;
+ enum ion_heap_type type;
+ struct ion_heap_ops *ops;
+ unsigned long flags;
+ unsigned int id;
+ const char *name;
+ struct shrinker shrinker;
+ struct list_head free_list;
+ size_t free_list_size;
+ spinlock_t free_lock;
+ wait_queue_head_t waitqueue;
+ struct task_struct *task;
+
+ int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
/**
- * ion_alloc - allocate ion memory
- * @client: the client
- * @len: size of the allocation
- * @align: requested allocation alignment, lots of hardware blocks
- * have alignment requirements of some kind
- * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
- * heaps will be tried in order from highest to lowest
- * id
- * @flags: heap flags, the low 16 bits are consumed by ion, the
- * high 16 bits are passed on to the respective heap and
- * can be heap custom
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer: buffer
*
- * Allocate memory in one of the heaps provided in heap mask and return
- * an opaque handle to it.
+ * indicates whether this ion buffer is cached
*/
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int heap_id_mask,
- unsigned int flags);
+bool ion_buffer_cached(struct ion_buffer *buffer);
/**
- * ion_free - free a handle
- * @client: the client
- * @handle: the handle to free
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer: buffer
*
- * Free the provided handle.
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
*/
-void ion_free(struct ion_client *client, struct ion_handle *handle);
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
/**
- * ion_map_kernel - create mapping for the given handle
- * @client: the client
- * @handle: handle to map
+ * ion_device_add_heap - adds a heap to the ion device
+ * @heap: the heap to add
+ */
+void ion_device_add_heap(struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+int ion_alloc(size_t len,
+ unsigned int heap_id_mask,
+ unsigned int flags);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap: the heap
*
- * Map the given handle into the kernel and return a kernel address that
- * can be used to access this address.
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
*/
-void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+void ion_heap_init_shrinker(struct ion_heap *heap);
/**
- * ion_unmap_kernel() - destroy a kernel mapping for a handle
- * @client: the client
- * @handle: handle to unmap
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap: the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
*/
-void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
- * ion_share_dma_buf() - share buffer as dma-buf
- * @client: the client
- * @handle: the handle
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap: the heap
+ * @buffer: the buffer
+ *
+ * Adds an item to the deferred freelist.
*/
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle);
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
/**
- * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
- * @client: the client
- * @handle: the handle
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
*/
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
- * ion_import_dma_buf() - get ion_handle from dma-buf
- * @client: the client
- * @dmabuf: the dma-buf
+ * ion_heap_freelist_shrink - drain the deferred free
+ * list, skipping any heap-specific
+ * pooling or caching mechanisms
+ *
+ * @heap: the heap
+ * @size: amount of memory to drain in bytes
*
- * Get the ion_buffer associated with the dma-buf and return the ion_handle.
- * If no ion_handle exists for this buffer, return newly created ion_handle.
- * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed. The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
*/
-struct ion_handle *ion_import_dma_buf(struct ion_client *client,
- struct dma_buf *dmabuf);
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+ size_t size);
/**
- * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
- * @client: the client
- * @fd: the dma-buf fd
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap: the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap. Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant performance benefit on
+ * many systems
+ */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count: number of highmem items in the pool
+ * @low_count: number of lowmem items in the pool
+ * @high_items: list of highmem items
+ * @low_items: list of lowmem items
+ * @mutex: lock protecting this struct and especially the count
+ * item list
+ * @gfp_mask: gfp_mask to use from alloc
+ * @order: order of pages in the pool
+ * @list: plist node for list of pools
+ * @cached: it's cached pool or not
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant performance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+ int high_count;
+ int low_count;
+ bool cached;
+ struct list_head high_items;
+ struct list_head low_items;
+ struct mutex mutex;
+ gfp_t gfp_mask;
+ unsigned int order;
+ struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+ bool cached);
+void ion_page_pool_destroy(struct ion_page_pool *pool);
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool: the pool
+ * @gfp_mask: the memory type to reclaim
+ * @nr_to_scan: number of items to shrink in pages
*
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
- * import that fd and return a handle representing it. If a dma-buf from
- * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ * returns the number of items freed in pages
*/
-struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+ int nr_to_scan);
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+int ion_query_heaps(struct ion_heap_query *query);
-#endif /* _LINUX_ION_H */
+#endif /* _ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index a8ea97391c40..5fdc1f328f61 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -23,19 +23,17 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
#define ION_CARVEOUT_ALLOCATE_FAIL -1
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
- ion_phys_addr_t base;
+ phys_addr_t base;
};
-static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
- unsigned long size,
- unsigned long align)
+static phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+ unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -47,7 +45,7 @@ static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
return offset;
}
-static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr,
unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
@@ -60,16 +58,13 @@ static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct sg_table *table;
- ion_phys_addr_t paddr;
+ phys_addr_t paddr;
int ret;
- if (align > PAGE_SIZE)
- return -EINVAL;
-
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -77,7 +72,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
if (ret)
goto err_free;
- paddr = ion_carveout_allocate(heap, size, align);
+ paddr = ion_carveout_allocate(heap, size);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
@@ -100,14 +95,10 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
- ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+ phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
@@ -132,8 +123,6 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -156,13 +145,3 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
return &carveout_heap->heap;
}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- carveout_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 70495dc645ea..102c09398317 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -22,12 +22,11 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
- ion_phys_addr_t base;
+ phys_addr_t base;
unsigned long chunk_size;
unsigned long size;
unsigned long allocated;
@@ -35,7 +34,7 @@ struct ion_chunk_heap {
static int ion_chunk_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
@@ -46,9 +45,6 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
unsigned long num_chunks;
unsigned long allocated_size;
- if (align > chunk_heap->chunk_size)
- return -EINVAL;
-
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
@@ -104,10 +100,6 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer);
- if (ion_buffer_cached(buffer))
- dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
-
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
@@ -135,8 +127,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
-
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
@@ -160,8 +150,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
- pr_debug("%s: base %lu size %zu align %ld\n", __func__,
- chunk_heap->base, heap_data->size, heap_data->align);
+ pr_debug("%s: base %pa size %zu\n", __func__,
+ &chunk_heap->base, heap_data->size);
return &chunk_heap->heap;
@@ -170,12 +160,3 @@ error_gen_pool_create:
return ERR_PTR(ret);
}
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 6c4068523781..a0949bc0dcf4 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -19,124 +19,75 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/dma-mapping.h>
+#include <linux/cma.h>
+#include <linux/scatterlist.h>
#include "ion.h"
-#include "ion_priv.h"
struct ion_cma_heap {
struct ion_heap heap;
- struct device *dev;
+ struct cma *cma;
};
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-struct ion_cma_buffer_info {
- void *cpu_addr;
- dma_addr_t handle;
- struct sg_table *table;
-};
-
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
- unsigned long len, unsigned long align,
+ unsigned long len,
unsigned long flags)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info;
-
- dev_dbg(dev, "Request buffer allocation len %ld\n", len);
-
- if (buffer->flags & ION_FLAG_CACHED)
- return -EINVAL;
-
- if (align > PAGE_SIZE)
- return -EINVAL;
+ struct sg_table *table;
+ struct page *pages;
+ int ret;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
- GFP_HIGHUSER | __GFP_ZERO);
-
- if (!info->cpu_addr) {
- dev_err(dev, "Fail to allocate buffer\n");
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table)
goto err;
- }
- info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
- if (!info->table)
+ ret = sg_alloc_table(table, 1, GFP_KERNEL);
+ if (ret)
goto free_mem;
- if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
- len))
- goto free_table;
- /* keep this for memory release */
- buffer->priv_virt = info;
- buffer->sg_table = info->table;
- dev_dbg(dev, "Allocate buffer %p\n", buffer);
+ sg_set_page(table->sgl, pages, len, 0);
+
+ buffer->priv_virt = pages;
+ buffer->sg_table = table;
return 0;
-free_table:
- kfree(info->table);
free_mem:
- dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+ kfree(table);
err:
- kfree(info);
+ cma_release(cma_heap->cma, pages, buffer->size);
return -ENOMEM;
}
static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
+ struct page *pages = buffer->priv_virt;
- dev_dbg(dev, "Release buffer %p\n", buffer);
/* release memory */
- dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+ cma_release(cma_heap->cma, pages, buffer->size);
/* release sg table */
- sg_free_table(info->table);
- kfree(info->table);
- kfree(info);
-}
-
-static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
- struct device *dev = cma_heap->dev;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
-
- return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
- buffer->size);
-}
-
-static void *ion_cma_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- /* kernel memory mapping has been done at allocation time */
- return info->cpu_addr;
-}
-
-static void ion_cma_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
- .unmap_kernel = ion_cma_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
{
struct ion_cma_heap *cma_heap;
@@ -150,14 +101,28 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
- cma_heap->dev = data->priv;
+ cma_heap->cma = cma;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
}
-void ion_cma_heap_destroy(struct ion_heap *heap)
+int __ion_add_cma_heaps(struct cma *cma, void *data)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct ion_heap *heap;
- kfree(cma_heap);
+ heap = __ion_cma_heap_create(cma);
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+
+ heap->name = cma_get_name(cma);
+
+ ion_device_add_heap(heap);
+ return 0;
+}
+
+static int ion_add_cma_heaps(void)
+{
+ cma_for_each_area(__ion_add_cma_heaps, NULL);
+ return 0;
}
+device_initcall(ion_add_cma_heaps);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
deleted file mode 100644
index cf5c010d32bc..000000000000
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * drivers/gpu/ion/ion_dummy_driver.c
- *
- * Copyright (C) 2013 Linaro, Inc
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/memblock.h>
-#include <linux/sizes.h>
-#include <linux/io.h>
-#include "ion.h"
-#include "ion_priv.h"
-
-static struct ion_device *idev;
-static struct ion_heap **heaps;
-
-static void *carveout_ptr;
-static void *chunk_ptr;
-
-static struct ion_platform_heap dummy_heaps[] = {
- {
- .id = ION_HEAP_TYPE_SYSTEM,
- .type = ION_HEAP_TYPE_SYSTEM,
- .name = "system",
- },
- {
- .id = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .type = ION_HEAP_TYPE_SYSTEM_CONTIG,
- .name = "system contig",
- },
- {
- .id = ION_HEAP_TYPE_CARVEOUT,
- .type = ION_HEAP_TYPE_CARVEOUT,
- .name = "carveout",
- .size = SZ_4M,
- },
- {
- .id = ION_HEAP_TYPE_CHUNK,
- .type = ION_HEAP_TYPE_CHUNK,
- .name = "chunk",
- .size = SZ_4M,
- .align = SZ_16K,
- .priv = (void *)(SZ_16K),
- },
-};
-
-static const struct ion_platform_data dummy_ion_pdata = {
- .nr = ARRAY_SIZE(dummy_heaps),
- .heaps = dummy_heaps,
-};
-
-static int __init ion_dummy_init(void)
-{
- int i, err;
-
- idev = ion_device_create(NULL);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
- heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
- GFP_KERNEL);
- if (!heaps)
- return -ENOMEM;
-
-
- /* Allocate a dummy carveout heap */
- carveout_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
- GFP_KERNEL);
- if (carveout_ptr)
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
- virt_to_phys(carveout_ptr);
- else
- pr_err("ion_dummy: Could not allocate carveout\n");
-
- /* Allocate a dummy chunk heap */
- chunk_ptr = alloc_pages_exact(
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
- GFP_KERNEL);
- if (chunk_ptr)
- dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
- else
- pr_err("ion_dummy: Could not allocate chunk\n");
-
- for (i = 0; i < dummy_ion_pdata.nr; i++) {
- struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];
-
- if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
- !heap_data->base)
- continue;
-
- if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
- continue;
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- return 0;
-err:
- for (i = 0; i < dummy_ion_pdata.nr; ++i)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
- return err;
-}
-device_initcall(ion_dummy_init);
-
-static void __exit ion_dummy_exit(void)
-{
- int i;
-
- ion_device_destroy(idev);
-
- for (i = 0; i < dummy_ion_pdata.nr; i++)
- ion_heap_destroy(heaps[i]);
- kfree(heaps);
-
- if (carveout_ptr) {
- free_pages_exact(carveout_ptr,
- dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
- carveout_ptr = NULL;
- }
- if (chunk_ptr) {
- free_pages_exact(chunk_ptr,
- dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
- chunk_ptr = NULL;
- }
-}
-__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index c69d0bd53693..91faa7f035b9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -24,7 +24,6 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
void *ion_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
@@ -315,70 +314,3 @@ void ion_heap_init_shrinker(struct ion_heap *heap)
heap->shrinker.batch = 0;
register_shrinker(&heap->shrinker);
}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch (heap_data->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- heap = ion_system_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- heap = ion_carveout_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CHUNK:
- heap = ion_chunk_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap_data->type);
- return ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
- __func__, heap_data->name, heap_data->type,
- heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- return heap;
-}
-EXPORT_SYMBOL(ion_heap_create);
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
-EXPORT_SYMBOL(ion_heap_destroy);
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
deleted file mode 100644
index 7791c705ea31..000000000000
--- a/drivers/staging/android/ion/ion_of.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/cma.h>
-#include <linux/dma-contiguous.h>
-#include <linux/io.h>
-#include <linux/of_reserved_mem.h>
-#include "ion.h"
-#include "ion_priv.h"
-#include "ion_of.h"
-
-static int ion_parse_dt_heap_common(struct device_node *heap_node,
- struct ion_platform_heap *heap,
- struct ion_of_heap *compatible)
-{
- int i;
-
- for (i = 0; compatible[i].name; i++) {
- if (of_device_is_compatible(heap_node, compatible[i].compat))
- break;
- }
-
- if (!compatible[i].name)
- return -ENODEV;
-
- heap->id = compatible[i].heap_id;
- heap->type = compatible[i].type;
- heap->name = compatible[i].name;
- heap->align = compatible[i].align;
-
- /* Some kind of callback function pointer? */
-
- pr_info("%s: id %d type %d name %s align %lx\n", __func__,
- heap->id, heap->type, heap->name, heap->align);
- return 0;
-}
-
-static int ion_setup_heap_common(struct platform_device *parent,
- struct device_node *heap_node,
- struct ion_platform_heap *heap)
-{
- int ret = 0;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_CARVEOUT:
- case ION_HEAP_TYPE_CHUNK:
- if (heap->base && heap->size)
- return 0;
-
- ret = of_reserved_mem_device_init(heap->priv);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible)
-{
- int num_heaps, ret;
- const struct device_node *dt_node = pdev->dev.of_node;
- struct device_node *node;
- struct ion_platform_heap *heaps;
- struct ion_platform_data *data;
- int i = 0;
-
- num_heaps = of_get_available_child_count(dt_node);
-
- if (!num_heaps)
- return ERR_PTR(-EINVAL);
-
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_platform_heap) * num_heaps,
- GFP_KERNEL);
- if (!heaps)
- return ERR_PTR(-ENOMEM);
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
- GFP_KERNEL);
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- for_each_available_child_of_node(dt_node, node) {
- struct platform_device *heap_pdev;
-
- ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
- if (ret)
- return ERR_PTR(ret);
-
- heap_pdev = of_platform_device_create(node, heaps[i].name,
- &pdev->dev);
- if (!heap_pdev)
- return ERR_PTR(-ENOMEM);
- heap_pdev->dev.platform_data = &heaps[i];
-
- heaps[i].priv = &heap_pdev->dev;
-
- ret = ion_setup_heap_common(pdev, node, &heaps[i]);
- if (ret)
- goto out_err;
- i++;
- }
-
- data->heaps = heaps;
- data->nr = num_heaps;
- return data;
-
-out_err:
- for ( ; i >= 0; i--)
- if (heaps[i].priv)
- of_device_unregister(to_platform_device(heaps[i].priv));
-
- return ERR_PTR(ret);
-}
-
-void ion_destroy_platform_data(struct ion_platform_data *data)
-{
- int i;
-
- for (i = 0; i < data->nr; i++)
- if (data->heaps[i].priv)
- of_device_unregister(to_platform_device(
- data->heaps[i].priv));
-}
-
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct ion_platform_heap *heap = pdev->dev.platform_data;
-
- heap->base = rmem->base;
- heap->base = rmem->size;
- pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
- heap->name, &rmem->base, &rmem->size, dev);
- return 0;
-}
-
-static void rmem_ion_device_release(struct reserved_mem *rmem,
- struct device *dev)
-{
-}
-
-static const struct reserved_mem_ops rmem_dma_ops = {
- .device_init = rmem_ion_device_init,
- .device_release = rmem_ion_device_release,
-};
-
-static int __init rmem_ion_setup(struct reserved_mem *rmem)
-{
- phys_addr_t size = rmem->size;
-
- size = size / 1024;
-
- pr_info("Ion memory setup at %pa size %pa MiB\n",
- &rmem->base, &size);
- rmem->ops = &rmem_dma_ops;
- return 0;
-}
-
-RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
-#endif
diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h
deleted file mode 100644
index 8241a1770f0a..000000000000
--- a/drivers/staging/android/ion/ion_of.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Based on work from:
- * Andrew Andrianov <andrew@ncrmnt.org>
- * Google
- * The Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ION_OF_H
-#define _ION_OF_H
-
-struct ion_of_heap {
- const char *compat;
- int heap_id;
- int type;
- const char *name;
- int align;
-};
-
-#define PLATFORM_HEAP(_compat, _id, _type, _name) \
-{ \
- .compat = _compat, \
- .heap_id = _id, \
- .type = _type, \
- .name = _name, \
- .align = PAGE_SIZE, \
-}
-
-struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
- struct ion_of_heap *compatible);
-
-void ion_destroy_platform_data(struct ion_platform_data *data);
-
-#endif
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index aea89c1ec345..817849df9de3 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -22,7 +22,8 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include "ion_priv.h"
+
+#include "ion.h"
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
@@ -30,9 +31,6 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page)
return NULL;
- if (!pool->cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
return page;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
deleted file mode 100644
index 5b3059c78431..000000000000
--- a/drivers/staging/android/ion/ion_priv.h
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- * drivers/staging/android/ion/ion_priv.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _ION_PRIV_H
-#define _ION_PRIV_H
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-
-#include "ion.h"
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @ref: reference count
- * @node: node in the ion_device buffers tree
- * @dev: back pointer to the ion_device
- * @heap: back pointer to the heap the buffer came from
- * @flags: buffer specific flags
- * @private_flags: internal buffer specific flags
- * @size: size of the buffer
- * @priv_virt: private data to the buffer representable as
- * a void *
- * @lock: protects the buffers cnt fields
- * @kmap_cnt: number of times the buffer is mapped to the kernel
- * @vaddr: the kernel mapping if kmap_cnt is not zero
- * @dmap_cnt: number of times the buffer is mapped for dma
- * @sg_table: the sg table for the buffer if dmap_cnt is not zero
- * @pages: flat array of pages in the buffer -- used by fault
- * handler and only valid for buffers that are faulted in
- * @vmas: list of vma's mapping this buffer
- * @handle_count: count of handles referencing this buffer
- * @task_comm: taskcomm of last client to reference this buffer in a
- * handle, used for debugging
- * @pid: pid of last client to reference this buffer in a
- * handle, used for debugging
- */
-struct ion_buffer {
- struct kref ref;
- union {
- struct rb_node node;
- struct list_head list;
- };
- struct ion_device *dev;
- struct ion_heap *heap;
- unsigned long flags;
- unsigned long private_flags;
- size_t size;
- void *priv_virt;
- struct mutex lock;
- int kmap_cnt;
- void *vaddr;
- int dmap_cnt;
- struct sg_table *sg_table;
- struct page **pages;
- struct list_head vmas;
- /* used to track orphaned buffers */
- int handle_count;
- char task_comm[TASK_COMM_LEN];
- pid_t pid;
-};
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev: the actual misc device
- * @buffers: an rb tree of all the existing buffers
- * @buffer_lock: lock protecting the tree of buffers
- * @lock: rwsem protecting the tree of heaps and clients
- * @heaps: list of all the heaps in the system
- * @user_clients: list of all the clients created from userspace
- */
-struct ion_device {
- struct miscdevice dev;
- struct rb_root buffers;
- struct mutex buffer_lock;
- struct rw_semaphore lock;
- struct plist_head heaps;
- long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
- unsigned long arg);
- struct rb_root clients;
- struct dentry *debug_root;
- struct dentry *heaps_debug_root;
- struct dentry *clients_debug_root;
- int heap_cnt;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node: node in the tree of all clients
- * @dev: backpointer to ion device
- * @handles: an rb tree of all the handles in this client
- * @idr: an idr space for allocating handle ids
- * @lock: lock protecting the tree of handles
- * @name: used for debugging
- * @display_name: used for debugging (unique version of @name)
- * @display_serial: used for debugging (to make display_name unique)
- * @task: used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
- struct rb_node node;
- struct ion_device *dev;
- struct rb_root handles;
- struct idr idr;
- struct mutex lock;
- const char *name;
- char *display_name;
- int display_serial;
- struct task_struct *task;
- pid_t pid;
- struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref: reference count
- * @client: back pointer to the client the buffer resides in
- * @buffer: pointer to the buffer
- * @node: node in the client's handle rbtree
- * @kmap_cnt: count of times this client has mapped to kernel
- * @id: client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client. Other fields are never changed after initialization.
- */
-struct ion_handle {
- struct kref ref;
- struct ion_client *client;
- struct ion_buffer *buffer;
- struct rb_node node;
- unsigned int kmap_cnt;
- int id;
-};
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate: allocate memory
- * @free: free memory
- * @map_kernel map memory to the kernel
- * @unmap_kernel unmap memory to the kernel
- * @map_user map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on
- * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
- * the buffer's private_flags when called from a shrinker. In that
- * case, the pages being free'd must be truly free'd back to the
- * system, not put in a page pool or otherwise cached.
- */
-struct ion_heap_ops {
- int (*allocate)(struct ion_heap *heap,
- struct ion_buffer *buffer, unsigned long len,
- unsigned long align, unsigned long flags);
- void (*free)(struct ion_buffer *buffer);
- void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
- int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
- int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
-
-/**
- * private flags - flags internal to ion
- */
-/*
- * Buffer is being freed from a shrinker function. Skip any possible
- * heap-specific caching mechanism (e.g. page pools). Guarantees that
- * any buffer storage that came from the system allocator will be
- * returned to the system allocator.
- */
-#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node: rb node to put the heap on the device's tree of heaps
- * @dev: back pointer to the ion_device
- * @type: type of heap
- * @ops: ops struct as above
- * @flags: flags
- * @id: id of heap, also indicates priority of this heap when
- * allocating. These are specified by platform data and
- * MUST be unique
- * @name: used for debugging
- * @shrinker: a shrinker for the heap
- * @free_list: free list head if deferred free is used
- * @free_list_size size of the deferred free list in bytes
- * @lock: protects the free list
- * @waitqueue: queue to wait on from deferred free thread
- * @task: task struct of deferred free thread
- * @debug_show: called when heap debug file is read to add any
- * heap specific debug info to output
- *
- * Represents a pool of memory from which buffers can be made. In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
- struct plist_node node;
- struct ion_device *dev;
- enum ion_heap_type type;
- struct ion_heap_ops *ops;
- unsigned long flags;
- unsigned int id;
- const char *name;
- struct shrinker shrinker;
- struct list_head free_list;
- size_t free_list_size;
- spinlock_t free_lock;
- wait_queue_head_t waitqueue;
- struct task_struct *task;
-
- int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
-};
-
-/**
- * ion_buffer_cached - this ion buffer is cached
- * @buffer: buffer
- *
- * indicates whether this ion buffer is cached
- */
-bool ion_buffer_cached(struct ion_buffer *buffer);
-
-/**
- * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
- * @buffer: buffer
- *
- * indicates whether userspace mappings of this buffer will be faulted
- * in, this can affect how buffers are allocated from the heap.
- */
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-
-/**
- * ion_device_create - allocates and returns an ion device
- * @custom_ioctl: arch specific ioctl function if applicable
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(long (*custom_ioctl)
- (struct ion_client *client,
- unsigned int cmd,
- unsigned long arg));
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @dev: the device
- * @heap: the heap to add
- */
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
-
-/**
- * ion_heap_init_shrinker
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
- * this function will be called to setup a shrinker to shrink the freelists
- * and call the heap's shrink op.
- */
-void ion_heap_init_shrinker(struct ion_heap *heap);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap: the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap: the heap
- * @buffer: the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_shrink - drain the deferred free
- * list, skipping any heap-specific
- * pooling or caching mechanisms
- *
- * @heap: the heap
- * @size: amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed. The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
- */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
- size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap: the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data);
-void ion_heap_destroy(struct ion_heap *heap);
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
-void ion_system_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
-void ion_system_contig_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
-void ion_carveout_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
-void ion_chunk_heap_destroy(struct ion_heap *heap);
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
-void ion_cma_heap_destroy(struct ion_heap *heap);
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap. Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant performance benefit on
- * many systems
- */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count: number of highmem items in the pool
- * @low_count: number of lowmem items in the pool
- * @high_items: list of highmem items
- * @low_items: list of lowmem items
- * @mutex: lock protecting this struct and especially the count
- * item list
- * @gfp_mask: gfp_mask to use from alloc
- * @order: order of pages in the pool
- * @list: plist node for list of pools
- * @cached: it's cached pool or not
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant performance benefit
- * on many systems
- */
-struct ion_page_pool {
- int high_count;
- int low_count;
- bool cached;
- struct list_head high_items;
- struct list_head low_items;
- struct mutex mutex;
- gfp_t gfp_mask;
- unsigned int order;
- struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
- bool cached);
-void ion_page_pool_destroy(struct ion_page_pool *pool);
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
-void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool: the pool
- * @gfp_mask: the memory type to reclaim
- * @nr_to_scan: number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
- int nr_to_scan);
-
-/**
- * ion_pages_sync_for_device - cache flush pages for use with the specified
- * device
- * @dev: the device the pages will be used with
- * @page: the first page to be flushed
- * @size: size in bytes of region to be flushed
- * @dir: direction of dma transfer
- */
-void ion_pages_sync_for_device(struct device *dev, struct page *page,
- size_t size, enum dma_data_direction dir);
-
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_sync_for_device(struct ion_client *client, int fd);
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id);
-
-void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
-
-int ion_handle_put_nolock(struct ion_handle *handle);
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id);
-
-int ion_handle_put(struct ion_handle *handle);
-
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
-
-#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 3ebbb75746e8..c50f2d9fc58c 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
-#include "ion_priv.h"
#define NUM_ORDERS ARRAY_SIZE(orders)
@@ -75,9 +74,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
page = ion_page_pool_alloc(pool);
- if (cached)
- ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
return page;
}
@@ -129,7 +125,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap,
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
- unsigned long size, unsigned long align,
+ unsigned long size,
unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
@@ -143,9 +139,6 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
- if (align > PAGE_SIZE)
- return -EINVAL;
-
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
@@ -327,7 +320,7 @@ err_create_pool:
return -ENOMEM;
}
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_heap_create(void)
{
struct ion_system_heap *heap;
@@ -355,24 +348,23 @@ free_heap:
return ERR_PTR(-ENOMEM);
}
-void ion_system_heap_destroy(struct ion_heap *heap)
+static int ion_system_heap_create(void)
{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
+ struct ion_heap *heap;
- for (i = 0; i < NUM_ORDERS; i++) {
- ion_page_pool_destroy(sys_heap->uncached_pools[i]);
- ion_page_pool_destroy(sys_heap->cached_pools[i]);
- }
- kfree(sys_heap);
+ heap = __ion_system_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+ heap->name = "ion_system_heap";
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_heap_create);
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
- unsigned long align,
unsigned long flags)
{
int order = get_order(len);
@@ -381,9 +373,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
unsigned long i;
int ret;
- if (align > (PAGE_SIZE << order))
- return -EINVAL;
-
page = alloc_pages(low_order_gfp_flags, order);
if (!page)
return -ENOMEM;
@@ -408,8 +397,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
buffer->sg_table = table;
- ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
-
return 0;
free_table:
@@ -442,7 +429,7 @@ static struct ion_heap_ops kmalloc_ops = {
.map_user = ion_heap_map_user,
};
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_contig_heap_create(void)
{
struct ion_heap *heap;
@@ -451,10 +438,20 @@ struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ heap->name = "ion_system_contig_heap";
return heap;
}
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
+static int ion_system_contig_heap_create(void)
{
- kfree(heap);
+ struct ion_heap *heap;
+
+ heap = __ion_system_contig_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_contig_heap_create);
+
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
deleted file mode 100644
index 5abf8320a96a..000000000000
--- a/drivers/staging/android/ion/ion_test.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *
- * Copyright (C) 2013 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "ion-test: " fmt
-
-#include <linux/dma-buf.h>
-#include <linux/dma-direction.h>
-#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include "ion.h"
-#include "../uapi/ion_test.h"
-
-#define u64_to_uptr(x) ((void __user *)(unsigned long)(x))
-
-struct ion_test_device {
- struct miscdevice misc;
-};
-
-struct ion_test_data {
- struct dma_buf *dma_buf;
- struct device *dev;
-};
-
-static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf,
- void __user *ptr, size_t offset, size_t size,
- bool write)
-{
- int ret = 0;
- struct dma_buf_attachment *attach;
- struct sg_table *table;
- pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- struct sg_page_iter sg_iter;
- unsigned long offset_page;
-
- attach = dma_buf_attach(dma_buf, dev);
- if (IS_ERR(attach))
- return PTR_ERR(attach);
-
- table = dma_buf_map_attachment(attach, dir);
- if (IS_ERR(table))
- return PTR_ERR(table);
-
- offset_page = offset >> PAGE_SHIFT;
- offset %= PAGE_SIZE;
-
- for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) {
- struct page *page = sg_page_iter_page(&sg_iter);
- void *vaddr = vmap(&page, 1, VM_MAP, pgprot);
- size_t to_copy = PAGE_SIZE - offset;
-
- to_copy = min(to_copy, size);
- if (!vaddr) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (write)
- ret = copy_from_user(vaddr + offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + offset, to_copy);
-
- vunmap(vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
- size -= to_copy;
- if (!size)
- break;
- ptr += to_copy;
- offset = 0;
- }
-
-err:
- dma_buf_unmap_attachment(attach, table, dir);
- dma_buf_detach(dma_buf, attach);
- return ret;
-}
-
-static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
- size_t offset, size_t size, bool write)
-{
- int ret;
- unsigned long page_offset = offset >> PAGE_SHIFT;
- size_t copy_offset = offset % PAGE_SIZE;
- size_t copy_size = size;
- enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (offset > dma_buf->size || size > dma_buf->size - offset)
- return -EINVAL;
-
- ret = dma_buf_begin_cpu_access(dma_buf, dir);
- if (ret)
- return ret;
-
- while (copy_size > 0) {
- size_t to_copy;
- void *vaddr = dma_buf_kmap(dma_buf, page_offset);
-
- if (!vaddr)
- goto err;
-
- to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size);
-
- if (write)
- ret = copy_from_user(vaddr + copy_offset, ptr, to_copy);
- else
- ret = copy_to_user(ptr, vaddr + copy_offset, to_copy);
-
- dma_buf_kunmap(dma_buf, page_offset, vaddr);
- if (ret) {
- ret = -EFAULT;
- goto err;
- }
-
- copy_size -= to_copy;
- ptr += to_copy;
- page_offset++;
- copy_offset = 0;
- }
-err:
- dma_buf_end_cpu_access(dma_buf, dir);
- return ret;
-}
-
-static long ion_test_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct ion_test_data *test_data = filp->private_data;
- int ret = 0;
-
- union {
- struct ion_test_rw_data test_rw;
- } data;
-
- if (_IOC_SIZE(cmd) > sizeof(data))
- return -EINVAL;
-
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
-
- switch (cmd) {
- case ION_IOC_TEST_SET_FD:
- {
- struct dma_buf *dma_buf = NULL;
- int fd = arg;
-
- if (fd >= 0) {
- dma_buf = dma_buf_get((int)arg);
- if (IS_ERR(dma_buf))
- return PTR_ERR(dma_buf);
- }
- if (test_data->dma_buf)
- dma_buf_put(test_data->dma_buf);
- test_data->dma_buf = dma_buf;
- break;
- }
- case ION_IOC_TEST_DMA_MAPPING:
- {
- ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
- break;
- }
- case ION_IOC_TEST_KERNEL_MAPPING:
- {
- ret = ion_handle_test_kernel(test_data->dma_buf,
- u64_to_uptr(data.test_rw.ptr),
- data.test_rw.offset,
- data.test_rw.size,
- data.test_rw.write);
- break;
- }
- default:
- return -ENOTTY;
- }
-
- if (_IOC_DIR(cmd) & _IOC_READ) {
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
- return -EFAULT;
- }
- return ret;
-}
-
-static int ion_test_open(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data;
- struct miscdevice *miscdev = file->private_data;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->dev = miscdev->parent;
-
- file->private_data = data;
-
- return 0;
-}
-
-static int ion_test_release(struct inode *inode, struct file *file)
-{
- struct ion_test_data *data = file->private_data;
-
- kfree(data);
-
- return 0;
-}
-
-static const struct file_operations ion_test_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = ion_test_ioctl,
- .compat_ioctl = ion_test_ioctl,
- .open = ion_test_open,
- .release = ion_test_release,
-};
-
-static int __init ion_test_probe(struct platform_device *pdev)
-{
- int ret;
- struct ion_test_device *testdev;
-
- testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device),
- GFP_KERNEL);
- if (!testdev)
- return -ENOMEM;
-
- testdev->misc.minor = MISC_DYNAMIC_MINOR;
- testdev->misc.name = "ion-test";
- testdev->misc.fops = &ion_test_fops;
- testdev->misc.parent = &pdev->dev;
- ret = misc_register(&testdev->misc);
- if (ret) {
- pr_err("failed to register misc device.\n");
- return ret;
- }
-
- platform_set_drvdata(pdev, testdev);
-
- return 0;
-}
-
-static int ion_test_remove(struct platform_device *pdev)
-{
- struct ion_test_device *testdev;
-
- testdev = platform_get_drvdata(pdev);
- if (!testdev)
- return -ENODATA;
-
- misc_deregister(&testdev->misc);
- return 0;
-}
-
-static struct platform_device *ion_test_pdev;
-static struct platform_driver ion_test_platform_driver = {
- .remove = ion_test_remove,
- .driver = {
- .name = "ion-test",
- },
-};
-
-static int __init ion_test_init(void)
-{
- ion_test_pdev = platform_device_register_simple("ion-test",
- -1, NULL, 0);
- if (IS_ERR(ion_test_pdev))
- return PTR_ERR(ion_test_pdev);
-
- return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
-}
-
-static void __exit ion_test_exit(void)
-{
- platform_driver_unregister(&ion_test_platform_driver);
- platform_device_unregister(ion_test_pdev);
-}
-
-module_init(ion_test_init);
-module_exit(ion_test_exit);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile
deleted file mode 100644
index 808f1f53f11a..000000000000
--- a/drivers/staging/android/ion/tegra/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ION_TEGRA) += tegra_ion.o
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
deleted file mode 100644
index 49e55e5acead..000000000000
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * drivers/gpu/tegra/tegra_ion.c
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include "../ion.h"
-#include "../ion_priv.h"
-
-static struct ion_device *idev;
-static int num_heaps;
-static struct ion_heap **heaps;
-
-static int tegra_ion_probe(struct platform_device *pdev)
-{
- struct ion_platform_data *pdata = pdev->dev.platform_data;
- int err;
- int i;
-
- num_heaps = pdata->nr;
-
- heaps = devm_kcalloc(&pdev->dev, pdata->nr,
- sizeof(struct ion_heap *), GFP_KERNEL);
-
- idev = ion_device_create(NULL);
- if (IS_ERR(idev))
- return PTR_ERR(idev);
-
- /* create the heaps as specified in the board file */
- for (i = 0; i < num_heaps; i++) {
- struct ion_platform_heap *heap_data = &pdata->heaps[i];
-
- heaps[i] = ion_heap_create(heap_data);
- if (IS_ERR_OR_NULL(heaps[i])) {
- err = PTR_ERR(heaps[i]);
- goto err;
- }
- ion_device_add_heap(idev, heaps[i]);
- }
- platform_set_drvdata(pdev, idev);
- return 0;
-err:
- for (i = 0; i < num_heaps; ++i)
- ion_heap_destroy(heaps[i]);
- return err;
-}
-
-static int tegra_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- ion_device_destroy(idev);
- for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
- return 0;
-}
-
-static struct platform_driver ion_driver = {
- .probe = tegra_ion_probe,
- .remove = tegra_ion_remove,
- .driver = { .name = "ion-tegra" }
-};
-
-module_platform_driver(ion_driver);
-
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
deleted file mode 100644
index 054660049395..000000000000
--- a/drivers/staging/android/lowmemorykiller.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/* drivers/misc/lowmemorykiller.c
- *
- * The lowmemorykiller driver lets user-space specify a set of memory thresholds
- * where processes with a range of oom_score_adj values will get killed. Specify
- * the minimum oom_score_adj values in
- * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in
- * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma
- * separated list of numbers in ascending order.
- *
- * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
- * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
- * processes with a oom_score_adj value of 8 or higher when the free memory
- * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or
- * higher when the free memory drops below 1024 pages.
- *
- * The driver considers memory used for caches to be free, but if a large
- * percentage of the cached memory is locked this can be very inaccurate
- * and processes may not get killed until the normal oom killer is triggered.
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/oom.h>
-#include <linux/sched/signal.h>
-#include <linux/swap.h>
-#include <linux/rcupdate.h>
-#include <linux/profile.h>
-#include <linux/notifier.h>
-
-static u32 lowmem_debug_level = 1;
-static short lowmem_adj[6] = {
- 0,
- 1,
- 6,
- 12,
-};
-
-static int lowmem_adj_size = 4;
-static int lowmem_minfree[6] = {
- 3 * 512, /* 6MB */
- 2 * 1024, /* 8MB */
- 4 * 1024, /* 16MB */
- 16 * 1024, /* 64MB */
-};
-
-static int lowmem_minfree_size = 4;
-
-static unsigned long lowmem_deathpending_timeout;
-
-#define lowmem_print(level, x...) \
- do { \
- if (lowmem_debug_level >= (level)) \
- pr_info(x); \
- } while (0)
-
-static unsigned long lowmem_count(struct shrinker *s,
- struct shrink_control *sc)
-{
- return global_node_page_state(NR_ACTIVE_ANON) +
- global_node_page_state(NR_ACTIVE_FILE) +
- global_node_page_state(NR_INACTIVE_ANON) +
- global_node_page_state(NR_INACTIVE_FILE);
-}
-
-static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
-{
- struct task_struct *tsk;
- struct task_struct *selected = NULL;
- unsigned long rem = 0;
- int tasksize;
- int i;
- short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
- int minfree = 0;
- int selected_tasksize = 0;
- short selected_oom_score_adj;
- int array_size = ARRAY_SIZE(lowmem_adj);
- int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- total_swapcache_pages();
-
- if (lowmem_adj_size < array_size)
- array_size = lowmem_adj_size;
- if (lowmem_minfree_size < array_size)
- array_size = lowmem_minfree_size;
- for (i = 0; i < array_size; i++) {
- minfree = lowmem_minfree[i];
- if (other_free < minfree && other_file < minfree) {
- min_score_adj = lowmem_adj[i];
- break;
- }
- }
-
- lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
- sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_score_adj);
-
- if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
- lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
- sc->nr_to_scan, sc->gfp_mask);
- return 0;
- }
-
- selected_oom_score_adj = min_score_adj;
-
- rcu_read_lock();
- for_each_process(tsk) {
- struct task_struct *p;
- short oom_score_adj;
-
- if (tsk->flags & PF_KTHREAD)
- continue;
-
- p = find_lock_task_mm(tsk);
- if (!p)
- continue;
-
- if (task_lmk_waiting(p) &&
- time_before_eq(jiffies, lowmem_deathpending_timeout)) {
- task_unlock(p);
- rcu_read_unlock();
- return 0;
- }
- oom_score_adj = p->signal->oom_score_adj;
- if (oom_score_adj < min_score_adj) {
- task_unlock(p);
- continue;
- }
- tasksize = get_mm_rss(p->mm);
- task_unlock(p);
- if (tasksize <= 0)
- continue;
- if (selected) {
- if (oom_score_adj < selected_oom_score_adj)
- continue;
- if (oom_score_adj == selected_oom_score_adj &&
- tasksize <= selected_tasksize)
- continue;
- }
- selected = p;
- selected_tasksize = tasksize;
- selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
- p->comm, p->pid, oom_score_adj, tasksize);
- }
- if (selected) {
- task_lock(selected);
- send_sig(SIGKILL, selected, 0);
- if (selected->mm)
- task_set_lmk_waiting(selected);
- task_unlock(selected);
- lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
- " to free %ldkB on behalf of '%s' (%d) because\n"
- " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
- " Free memory is %ldkB above reserved\n",
- selected->comm, selected->pid,
- selected_oom_score_adj,
- selected_tasksize * (long)(PAGE_SIZE / 1024),
- current->comm, current->pid,
- other_file * (long)(PAGE_SIZE / 1024),
- minfree * (long)(PAGE_SIZE / 1024),
- min_score_adj,
- other_free * (long)(PAGE_SIZE / 1024));
- lowmem_deathpending_timeout = jiffies + HZ;
- rem += selected_tasksize;
- }
-
- lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
- sc->nr_to_scan, sc->gfp_mask, rem);
- rcu_read_unlock();
- return rem;
-}
-
-static struct shrinker lowmem_shrinker = {
- .scan_objects = lowmem_scan,
- .count_objects = lowmem_count,
- .seeks = DEFAULT_SEEKS * 16
-};
-
-static int __init lowmem_init(void)
-{
- register_shrinker(&lowmem_shrinker);
- return 0;
-}
-device_initcall(lowmem_init);
-
-/*
- * not really modular, but the easiest way to keep compat with existing
- * bootargs behaviour is to continue using module_param here.
- */
-module_param_named(cost, lowmem_shrinker.seeks, int, 0644);
-module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
-module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
- 0644);
-module_param_named(debug_level, lowmem_debug_level, uint, 0644);
-
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 14cd8738ecfc..b76db1b2e197 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -20,8 +20,6 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-typedef int ion_user_handle_t;
-
/**
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
@@ -76,7 +74,6 @@ enum ion_heap_type {
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
- * @align: required alignment of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
@@ -85,47 +82,11 @@ enum ion_heap_type {
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
- size_t len;
- size_t align;
- unsigned int heap_id_mask;
- unsigned int flags;
- ion_user_handle_t handle;
-};
-
-/**
- * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
- * @handle: a handle
- * @fd: a file descriptor representing that handle
- *
- * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
- * the handle returned from ion alloc, and the kernel returns the file
- * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
- * provides the file descriptor and the kernel returns the handle.
- */
-struct ion_fd_data {
- ion_user_handle_t handle;
- int fd;
-};
-
-/**
- * struct ion_handle_data - a handle passed to/from the kernel
- * @handle: a handle
- */
-struct ion_handle_data {
- ion_user_handle_t handle;
-};
-
-/**
- * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
- * @cmd: the custom ioctl function to call
- * @arg: additional data to pass to the custom ioctl, typically a user
- * pointer to a predefined structure
- *
- * This works just like the regular cmd and arg fields of an ioctl.
- */
-struct ion_custom_data {
- unsigned int cmd;
- unsigned long arg;
+ __u64 len;
+ __u32 heap_id_mask;
+ __u32 flags;
+ __u32 fd;
+ __u32 unused;
};
#define MAX_HEAP_NAME 32
@@ -177,16 +138,6 @@ struct ion_heap_query {
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
/**
- * DOC: ION_IOC_MAP - get a file descriptor to mmap
- *
- * Takes an ion_fd_data struct with the handle field populated with a valid
- * opaque handle. Returns the struct with the fd field set to a file
- * descriptor open in the current address space. This file descriptor
- * can then be used as an argument to mmap.
- */
-#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
-
-/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
@@ -198,33 +149,6 @@ struct ion_heap_query {
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/**
- * DOC: ION_IOC_IMPORT - imports a shared file descriptor
- *
- * Takes an ion_fd_data struct with the fd field populated with a valid file
- * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
- * filed set to the corresponding opaque handle.
- */
-#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
- *
- * Deprecated in favor of using the dma_buf api's correctly (syncing
- * will happen automatically when the buffer is mapped to a device).
- * If necessary should be used after touching a cached buffer from the cpu,
- * this will make the buffer in memory coherent.
- */
-#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
-
-/**
- * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
- *
- * Takes the argument of the architecture specific ioctl to call and
- * passes appropriate userdata for that ioctl
- */
-#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
-
-/**
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
*
* Takes an ion_heap_query structure and populates information about
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
deleted file mode 100644
index 480242e02f8d..000000000000
--- a/drivers/staging/android/uapi/ion_test.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * drivers/staging/android/uapi/ion.h
- *
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_ION_TEST_H
-#define _UAPI_LINUX_ION_TEST_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct ion_test_rw_data - metadata passed to the kernel to read handle
- * @ptr: a pointer to an area at least as large as size
- * @offset: offset into the ion buffer to start reading
- * @size: size to read or write
- * @write: 1 to write, 0 to read
- */
-struct ion_test_rw_data {
- __u64 ptr;
- __u64 offset;
- __u64 size;
- int write;
- int __padding;
-};
-
-#define ION_IOC_MAGIC 'I'
-
-/**
- * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
- *
- * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
- * release the first fd.
- */
-#define ION_IOC_TEST_SET_FD \
- _IO(ION_IOC_MAGIC, 0xf0)
-
-/**
- * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
- *
- * Reads or writes the memory from a handle using an uncached mapping. Can be
- * used by unit tests to emulate a DMA engine as close as possible. Only
- * expected to be used for debugging and testing, may not always be available.
- */
-#define ION_IOC_TEST_DMA_MAPPING \
- _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
-
-/**
- * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
- *
- * Reads or writes the memory from a handle using a kernel mapping. Can be
- * used by unit tests to test heap map_kernel functions. Only expected to be
- * used for debugging and testing, may not always be available.
- */
-#define ION_IOC_TEST_KERNEL_MAPPING \
- _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
-
-#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/bcm2835-audio/Kconfig b/drivers/staging/bcm2835-audio/Kconfig
deleted file mode 100644
index 32a2ff9ef9b2..000000000000
--- a/drivers/staging/bcm2835-audio/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config SND_BCM2835
- tristate "BCM2835 ALSA driver"
- depends on ARCH_BCM2835 && BCM2835_VCHIQ && SND
- select SND_PCM
- help
- Say Y or M if you want to support BCM2835 Alsa pcm card driver
-
diff --git a/drivers/staging/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/bcm2835-audio/bcm2835-ctl.c
deleted file mode 100644
index a4ffa1bf53e5..000000000000
--- a/drivers/staging/bcm2835-audio/bcm2835-ctl.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
-
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/sched.h>
-
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/rawmidi.h>
-#include <sound/initval.h>
-#include <sound/tlv.h>
-#include <sound/asoundef.h>
-
-#include "bcm2835.h"
-
-/* volume maximum and minimum in terms of 0.01dB */
-#define CTRL_VOL_MAX 400
-#define CTRL_VOL_MIN -10239 /* originally -10240 */
-
-static int snd_bcm2835_ctl_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- audio_info(" ... IN\n");
- if (kcontrol->private_value == PCM_PLAYBACK_VOLUME) {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = CTRL_VOL_MIN;
- uinfo->value.integer.max = CTRL_VOL_MAX; /* 2303 */
- } else if (kcontrol->private_value == PCM_PLAYBACK_MUTE) {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->count = 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = 1;
- } else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE) {
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = AUDIO_DEST_MAX - 1;
- }
- audio_info(" ... OUT\n");
- return 0;
-}
-
-/* toggles mute on or off depending on the value of nmute, and returns
- * 1 if the mute value was changed, otherwise 0
- */
-static int toggle_mute(struct bcm2835_chip *chip, int nmute)
-{
- /* if settings are ok, just return 0 */
- if (chip->mute == nmute)
- return 0;
-
- /* if the sound is muted then we need to unmute */
- if (chip->mute == CTRL_VOL_MUTE) {
- chip->volume = chip->old_volume; /* copy the old volume back */
- audio_info("Unmuting, old_volume = %d, volume = %d ...\n", chip->old_volume, chip->volume);
- } else /* otherwise we mute */ {
- chip->old_volume = chip->volume;
- chip->volume = 26214; /* set volume to minimum level AKA mute */
- audio_info("Muting, old_volume = %d, volume = %d ...\n", chip->old_volume, chip->volume);
- }
-
- chip->mute = nmute;
- return 1;
-}
-
-static int snd_bcm2835_ctl_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- BUG_ON(!chip && !(chip->avail_substreams & AVAIL_SUBSTREAMS_MASK));
-
- if (kcontrol->private_value == PCM_PLAYBACK_VOLUME)
- ucontrol->value.integer.value[0] = chip2alsa(chip->volume);
- else if (kcontrol->private_value == PCM_PLAYBACK_MUTE)
- ucontrol->value.integer.value[0] = chip->mute;
- else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE)
- ucontrol->value.integer.value[0] = chip->dest;
-
- mutex_unlock(&chip->audio_mutex);
- return 0;
-}
-
-static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
- int changed = 0;
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- if (kcontrol->private_value == PCM_PLAYBACK_VOLUME) {
- audio_info("Volume change attempted.. volume = %d new_volume = %d\n", chip->volume, (int) ucontrol->value.integer.value[0]);
- if (chip->mute == CTRL_VOL_MUTE) {
- /* changed = toggle_mute(chip, CTRL_VOL_UNMUTE); */
- changed = 1; /* should return 0 to signify no change but the mixer takes this as the opposite sign (no idea why) */
- goto unlock;
- }
- if (changed
- || (ucontrol->value.integer.value[0] != chip2alsa(chip->volume))) {
-
- chip->volume = alsa2chip(ucontrol->value.integer.value[0]);
- changed = 1;
- }
-
- } else if (kcontrol->private_value == PCM_PLAYBACK_MUTE) {
- /* Now implemented */
- audio_info(" Mute attempted\n");
- changed = toggle_mute(chip, ucontrol->value.integer.value[0]);
-
- } else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE) {
- if (ucontrol->value.integer.value[0] != chip->dest) {
- chip->dest = ucontrol->value.integer.value[0];
- changed = 1;
- }
- }
-
- if (changed) {
- if (bcm2835_audio_set_ctls(chip))
- printk(KERN_ERR "Failed to set ALSA controls..\n");
- }
-
-unlock:
- mutex_unlock(&chip->audio_mutex);
- return changed;
-}
-
-static DECLARE_TLV_DB_SCALE(snd_bcm2835_db_scale, CTRL_VOL_MIN, 1, 1);
-
-static struct snd_kcontrol_new snd_bcm2835_ctl[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
- .private_value = PCM_PLAYBACK_VOLUME,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- .tlv = {.p = snd_bcm2835_db_scale}
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_MUTE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Route",
- .index = 0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .private_value = PCM_PLAYBACK_DEVICE,
- .info = snd_bcm2835_ctl_info,
- .get = snd_bcm2835_ctl_get,
- .put = snd_bcm2835_ctl_put,
- .count = 1,
- },
-};
-
-static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
- uinfo->count = 1;
- return 0;
-}
-
-static int snd_bcm2835_spdif_default_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
- int i;
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- for (i = 0; i < 4; i++)
- ucontrol->value.iec958.status[i] =
- (chip->spdif_status >> (i * 8)) & 0xff;
-
- mutex_unlock(&chip->audio_mutex);
- return 0;
-}
-
-static int snd_bcm2835_spdif_default_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
- unsigned int val = 0;
- int i, change;
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- for (i = 0; i < 4; i++)
- val |= (unsigned int) ucontrol->value.iec958.status[i] << (i * 8);
-
- change = val != chip->spdif_status;
- chip->spdif_status = val;
-
- mutex_unlock(&chip->audio_mutex);
- return change;
-}
-
-static int snd_bcm2835_spdif_mask_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
- uinfo->count = 1;
- return 0;
-}
-
-static int snd_bcm2835_spdif_mask_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* bcm2835 supports only consumer mode and sets all other format flags
- * automatically. So the only thing left is signalling non-audio
- * content */
- ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO;
- return 0;
-}
-
-static int snd_bcm2835_spdif_stream_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
- uinfo->count = 1;
- return 0;
-}
-
-static int snd_bcm2835_spdif_stream_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
- int i;
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- for (i = 0; i < 4; i++)
- ucontrol->value.iec958.status[i] =
- (chip->spdif_status >> (i * 8)) & 0xff;
-
- mutex_unlock(&chip->audio_mutex);
- return 0;
-}
-
-static int snd_bcm2835_spdif_stream_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
- unsigned int val = 0;
- int i, change;
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- for (i = 0; i < 4; i++)
- val |= (unsigned int) ucontrol->value.iec958.status[i] << (i * 8);
- change = val != chip->spdif_status;
- chip->spdif_status = val;
-
- mutex_unlock(&chip->audio_mutex);
- return change;
-}
-
-static struct snd_kcontrol_new snd_bcm2835_spdif[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
- .info = snd_bcm2835_spdif_default_info,
- .get = snd_bcm2835_spdif_default_get,
- .put = snd_bcm2835_spdif_default_put
- },
- {
- .access = SNDRV_CTL_ELEM_ACCESS_READ,
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
- .info = snd_bcm2835_spdif_mask_info,
- .get = snd_bcm2835_spdif_mask_get,
- },
- {
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
- SNDRV_CTL_ELEM_ACCESS_INACTIVE,
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM),
- .info = snd_bcm2835_spdif_stream_info,
- .get = snd_bcm2835_spdif_stream_get,
- .put = snd_bcm2835_spdif_stream_put,
- },
-};
-
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
-{
- int err;
- unsigned int idx;
-
- strcpy(chip->card->mixername, "Broadcom Mixer");
- for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_ctl); idx++) {
- err = snd_ctl_add(chip->card,
- snd_ctl_new1(&snd_bcm2835_ctl[idx], chip));
- if (err < 0)
- return err;
- }
- for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_spdif); idx++) {
- err = snd_ctl_add(chip->card,
- snd_ctl_new1(&snd_bcm2835_spdif[idx], chip));
- if (err < 0)
- return err;
- }
- return 0;
-}
diff --git a/drivers/staging/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/bcm2835-audio/bcm2835-pcm.c
deleted file mode 100644
index 16127e062661..000000000000
--- a/drivers/staging/bcm2835-audio/bcm2835-pcm.c
+++ /dev/null
@@ -1,554 +0,0 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
-
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-
-#include <sound/asoundef.h>
-
-#include "bcm2835.h"
-
-/* hardware definition */
-static struct snd_pcm_hardware snd_bcm2835_playback_hw = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
- .rate_min = 8000,
- .rate_max = 48000,
- .channels_min = 1,
- .channels_max = 2,
- .buffer_bytes_max = 128 * 1024,
- .period_bytes_min = 1 * 1024,
- .period_bytes_max = 128 * 1024,
- .periods_min = 1,
- .periods_max = 128,
-};
-
-static struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000,
- .rate_min = 44100,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 128 * 1024,
- .period_bytes_min = 1 * 1024,
- .period_bytes_max = 128 * 1024,
- .periods_min = 1,
- .periods_max = 128,
-};
-
-static void snd_bcm2835_playback_free(struct snd_pcm_runtime *runtime)
-{
- audio_info("Freeing up alsa stream here ..\n");
- if (runtime->private_data)
- kfree(runtime->private_data);
- runtime->private_data = NULL;
-}
-
-void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream)
-{
- unsigned int consumed = 0;
- int new_period = 0;
-
- audio_info(" .. IN\n");
-
- audio_info("alsa_stream=%p substream=%p\n", alsa_stream,
- alsa_stream ? alsa_stream->substream : 0);
-
- if (alsa_stream->open)
- consumed = bcm2835_audio_retrieve_buffers(alsa_stream);
-
- /* We get called only if playback was triggered, So, the number of buffers we retrieve in
- * each iteration are the buffers that have been played out already
- */
-
- if (alsa_stream->period_size) {
- if ((alsa_stream->pos / alsa_stream->period_size) !=
- ((alsa_stream->pos + consumed) / alsa_stream->period_size))
- new_period = 1;
- }
- audio_debug("updating pos cur: %d + %d max:%d period_bytes:%d, hw_ptr: %d new_period:%d\n",
- alsa_stream->pos,
- consumed,
- alsa_stream->buffer_size,
- (int) (alsa_stream->period_size * alsa_stream->substream->runtime->periods),
- frames_to_bytes(alsa_stream->substream->runtime, alsa_stream->substream->runtime->status->hw_ptr),
- new_period);
- if (alsa_stream->buffer_size) {
- alsa_stream->pos += consumed &~(1 << 30);
- alsa_stream->pos %= alsa_stream->buffer_size;
- }
-
- if (alsa_stream->substream) {
- if (new_period)
- snd_pcm_period_elapsed(alsa_stream->substream);
- } else {
- audio_warning(" unexpected NULL substream\n");
- }
- audio_info(" .. OUT\n");
-}
-
-/* open callback */
-static int snd_bcm2835_playback_open_generic(
- struct snd_pcm_substream *substream, int spdif)
-{
- struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream;
- int idx;
- int err;
-
- audio_info(" .. IN (%d)\n", substream->number);
-
- if (mutex_lock_interruptible(&chip->audio_mutex)) {
- audio_error("Interrupted whilst waiting for lock\n");
- return -EINTR;
- }
- audio_info("Alsa open (%d)\n", substream->number);
- idx = substream->number;
-
- if (spdif && chip->opened) {
- err = -EBUSY;
- goto out;
- } else if (!spdif && (chip->opened & (1 << idx))) {
- err = -EBUSY;
- goto out;
- }
- if (idx >= MAX_SUBSTREAMS) {
- audio_error
- ("substream(%d) device doesn't exist max(%d) substreams allowed\n",
- idx, MAX_SUBSTREAMS);
- err = -ENODEV;
- goto out;
- }
-
- /* Check if we are ready */
- if (!(chip->avail_substreams & (1 << idx))) {
- /* We are not ready yet */
- audio_error("substream(%d) device is not ready yet\n", idx);
- err = -EAGAIN;
- goto out;
- }
-
- alsa_stream = kzalloc(sizeof(*alsa_stream), GFP_KERNEL);
- if (!alsa_stream) {
- err = -ENOMEM;
- goto out;
- }
-
- /* Initialise alsa_stream */
- alsa_stream->chip = chip;
- alsa_stream->substream = substream;
- alsa_stream->idx = idx;
-
- sema_init(&alsa_stream->buffers_update_sem, 0);
- sema_init(&alsa_stream->control_sem, 0);
- spin_lock_init(&alsa_stream->lock);
-
- err = bcm2835_audio_open(alsa_stream);
- if (err) {
- kfree(alsa_stream);
- goto out;
- }
- runtime->private_data = alsa_stream;
- runtime->private_free = snd_bcm2835_playback_free;
- if (spdif) {
- runtime->hw = snd_bcm2835_playback_spdif_hw;
- } else {
- /* clear spdif status, as we are not in spdif mode */
- chip->spdif_status = 0;
- runtime->hw = snd_bcm2835_playback_hw;
- }
- /* minimum 16 bytes alignment (for vchiq bulk transfers) */
- snd_pcm_hw_constraint_step(runtime,
- 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
- 16);
-
- chip->alsa_stream[idx] = alsa_stream;
-
- chip->opened |= (1 << idx);
- alsa_stream->open = 1;
- alsa_stream->draining = 1;
-
-out:
- mutex_unlock(&chip->audio_mutex);
-
- audio_info(" .. OUT =%d\n", err);
-
- return err;
-}
-
-static int snd_bcm2835_playback_open(struct snd_pcm_substream *substream)
-{
- return snd_bcm2835_playback_open_generic(substream, 0);
-}
-
-static int snd_bcm2835_playback_spdif_open(struct snd_pcm_substream *substream)
-{
- return snd_bcm2835_playback_open_generic(substream, 1);
-}
-
-/* close callback */
-static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
-{
- /* the hardware-specific codes will be here */
-
- struct bcm2835_chip *chip;
- struct snd_pcm_runtime *runtime;
- struct bcm2835_alsa_stream *alsa_stream;
-
- audio_info(" .. IN\n");
-
- chip = snd_pcm_substream_chip(substream);
- if (mutex_lock_interruptible(&chip->audio_mutex)) {
- audio_error("Interrupted whilst waiting for lock\n");
- return -EINTR;
- }
- runtime = substream->runtime;
- alsa_stream = runtime->private_data;
-
- audio_info("Alsa close\n");
-
- /*
- * Call stop if it's still running. This happens when app
- * is force killed and we don't get a stop trigger.
- */
- if (alsa_stream->running) {
- int err;
- err = bcm2835_audio_stop(alsa_stream);
- alsa_stream->running = 0;
- if (err)
- audio_error(" Failed to STOP alsa device\n");
- }
-
- alsa_stream->period_size = 0;
- alsa_stream->buffer_size = 0;
-
- if (alsa_stream->open) {
- alsa_stream->open = 0;
- bcm2835_audio_close(alsa_stream);
- }
- if (alsa_stream->chip)
- alsa_stream->chip->alsa_stream[alsa_stream->idx] = NULL;
- /*
- * Do not free up alsa_stream here, it will be freed up by
- * runtime->private_free callback we registered in *_open above
- */
-
- chip->opened &= ~(1 << substream->number);
-
- mutex_unlock(&chip->audio_mutex);
- audio_info(" .. OUT\n");
-
- return 0;
-}
-
-/* hw_params callback */
-static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- int err;
-
- audio_info(" .. IN\n");
-
- err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
- if (err < 0) {
- audio_error
- (" pcm_lib_malloc failed to allocated pages for buffers\n");
- return err;
- }
-
- alsa_stream->channels = params_channels(params);
- alsa_stream->params_rate = params_rate(params);
- alsa_stream->pcm_format_width = snd_pcm_format_width(params_format(params));
- audio_info(" .. OUT\n");
-
- return err;
-}
-
-/* hw_free callback */
-static int snd_bcm2835_pcm_hw_free(struct snd_pcm_substream *substream)
-{
- audio_info(" .. IN\n");
- return snd_pcm_lib_free_pages(substream);
-}
-
-/* prepare callback */
-static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- int channels;
- int err;
-
- audio_info(" .. IN\n");
-
- if (mutex_lock_interruptible(&chip->audio_mutex))
- return -EINTR;
-
- /* notify the vchiq that it should enter spdif passthrough mode by
- * setting channels=0 (see
- * https://github.com/raspberrypi/linux/issues/528) */
- if (chip->spdif_status & IEC958_AES0_NONAUDIO)
- channels = 0;
- else
- channels = alsa_stream->channels;
-
- err = bcm2835_audio_set_params(alsa_stream, channels,
- alsa_stream->params_rate,
- alsa_stream->pcm_format_width);
- if (err < 0) {
- audio_error(" error setting hw params\n");
- }
-
- bcm2835_audio_setup(alsa_stream);
-
- /* in preparation of the stream, set the controls (volume level) of the stream */
- bcm2835_audio_set_ctls(alsa_stream->chip);
-
-
- memset(&alsa_stream->pcm_indirect, 0, sizeof(alsa_stream->pcm_indirect));
-
- alsa_stream->pcm_indirect.hw_buffer_size =
- alsa_stream->pcm_indirect.sw_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
-
- alsa_stream->buffer_size = snd_pcm_lib_buffer_bytes(substream);
- alsa_stream->period_size = snd_pcm_lib_period_bytes(substream);
- alsa_stream->pos = 0;
-
- audio_debug("buffer_size=%d, period_size=%d pos=%d frame_bits=%d\n",
- alsa_stream->buffer_size, alsa_stream->period_size,
- alsa_stream->pos, runtime->frame_bits);
-
- mutex_unlock(&chip->audio_mutex);
- audio_info(" .. OUT\n");
- return 0;
-}
-
-static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect *rec, size_t bytes)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- void *src = (void *) (substream->runtime->dma_area + rec->sw_data);
- int err;
-
- err = bcm2835_audio_write(alsa_stream, bytes, src);
- if (err)
- audio_error(" Failed to transfer to alsa device (%d)\n", err);
-
-}
-
-static int snd_bcm2835_pcm_ack(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- struct snd_pcm_indirect *pcm_indirect = &alsa_stream->pcm_indirect;
-
- pcm_indirect->hw_queue_size = runtime->hw.buffer_bytes_max;
- snd_pcm_indirect_playback_transfer(substream, pcm_indirect,
- snd_bcm2835_pcm_transfer);
- return 0;
-}
-
-/* trigger callback */
-static int snd_bcm2835_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
- int err = 0;
-
- audio_info(" .. IN\n");
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- audio_debug("bcm2835_AUDIO_TRIGGER_START running=%d\n",
- alsa_stream->running);
- if (!alsa_stream->running) {
- err = bcm2835_audio_start(alsa_stream);
- if (!err) {
- alsa_stream->pcm_indirect.hw_io =
- alsa_stream->pcm_indirect.hw_data =
- bytes_to_frames(runtime,
- alsa_stream->pos);
- substream->ops->ack(substream);
- alsa_stream->running = 1;
- alsa_stream->draining = 1;
- } else {
- audio_error(" Failed to START alsa device (%d)\n", err);
- }
- }
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- audio_debug
- ("bcm2835_AUDIO_TRIGGER_STOP running=%d draining=%d\n",
- alsa_stream->running, runtime->status->state == SNDRV_PCM_STATE_DRAINING);
- if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
- audio_info("DRAINING\n");
- alsa_stream->draining = 1;
- } else {
- audio_info("DROPPING\n");
- alsa_stream->draining = 0;
- }
- if (alsa_stream->running) {
- err = bcm2835_audio_stop(alsa_stream);
- if (err != 0)
- audio_error(" Failed to STOP alsa device (%d)\n", err);
- alsa_stream->running = 0;
- }
- break;
- default:
- err = -EINVAL;
- }
-
- audio_info(" .. OUT\n");
- return err;
-}
-
-/* pointer callback */
-static snd_pcm_uframes_t
-snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
-
- audio_info(" .. IN\n");
-
- audio_debug("pcm_pointer... (%d) hwptr=%d appl=%d pos=%d\n", 0,
- frames_to_bytes(runtime, runtime->status->hw_ptr),
- frames_to_bytes(runtime, runtime->control->appl_ptr),
- alsa_stream->pos);
-
- audio_info(" .. OUT\n");
- return snd_pcm_indirect_playback_pointer(substream,
- &alsa_stream->pcm_indirect,
- alsa_stream->pos);
-}
-
-static int snd_bcm2835_pcm_lib_ioctl(struct snd_pcm_substream *substream,
- unsigned int cmd, void *arg)
-{
- int ret = snd_pcm_lib_ioctl(substream, cmd, arg);
-
- audio_info(" .. substream=%p, cmd=%d, arg=%p (%x) ret=%d\n", substream,
- cmd, arg, arg ? *(unsigned *) arg : 0, ret);
- return ret;
-}
-
-/* operators */
-static struct snd_pcm_ops snd_bcm2835_playback_ops = {
- .open = snd_bcm2835_playback_open,
- .close = snd_bcm2835_playback_close,
- .ioctl = snd_bcm2835_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
- .prepare = snd_bcm2835_pcm_prepare,
- .trigger = snd_bcm2835_pcm_trigger,
- .pointer = snd_bcm2835_pcm_pointer,
- .ack = snd_bcm2835_pcm_ack,
-};
-
-static struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
- .open = snd_bcm2835_playback_spdif_open,
- .close = snd_bcm2835_playback_close,
- .ioctl = snd_bcm2835_pcm_lib_ioctl,
- .hw_params = snd_bcm2835_pcm_hw_params,
- .hw_free = snd_bcm2835_pcm_hw_free,
- .prepare = snd_bcm2835_pcm_prepare,
- .trigger = snd_bcm2835_pcm_trigger,
- .pointer = snd_bcm2835_pcm_pointer,
- .ack = snd_bcm2835_pcm_ack,
-};
-
-/* create a pcm device */
-int snd_bcm2835_new_pcm(struct bcm2835_chip *chip)
-{
- struct snd_pcm *pcm;
- int err;
-
- audio_info(" .. IN\n");
- mutex_init(&chip->audio_mutex);
- if (mutex_lock_interruptible(&chip->audio_mutex)) {
- audio_error("Interrupted whilst waiting for lock\n");
- return -EINTR;
- }
- err = snd_pcm_new(chip->card, "bcm2835 ALSA", 0, MAX_SUBSTREAMS, 0, &pcm);
- if (err < 0)
- goto out;
- pcm->private_data = chip;
- strcpy(pcm->name, "bcm2835 ALSA");
- chip->pcm = pcm;
- chip->dest = AUDIO_DEST_AUTO;
- chip->volume = alsa2chip(0);
- chip->mute = CTRL_VOL_UNMUTE; /*disable mute on startup */
- /* set operators */
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_bcm2835_playback_ops);
-
- /* pre-allocation of buffers */
- /* NOTE: this may fail */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- snd_bcm2835_playback_hw.buffer_bytes_max,
- snd_bcm2835_playback_hw.buffer_bytes_max);
-
-
-out:
- mutex_unlock(&chip->audio_mutex);
- audio_info(" .. OUT\n");
-
- return 0;
-}
-
-int snd_bcm2835_new_spdif_pcm(struct bcm2835_chip *chip)
-{
- struct snd_pcm *pcm;
- int err;
-
- audio_info(" .. IN\n");
- if (mutex_lock_interruptible(&chip->audio_mutex)) {
- audio_error("Interrupted whilst waiting for lock\n");
- return -EINTR;
- }
- err = snd_pcm_new(chip->card, "bcm2835 ALSA", 1, 1, 0, &pcm);
- if (err < 0)
- goto out;
-
- pcm->private_data = chip;
- strcpy(pcm->name, "bcm2835 IEC958/HDMI");
- chip->pcm_spdif = pcm;
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_bcm2835_playback_spdif_ops);
-
- /* pre-allocation of buffers */
- /* NOTE: this may fail */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- snd_bcm2835_playback_spdif_hw.buffer_bytes_max, snd_bcm2835_playback_spdif_hw.buffer_bytes_max);
-out:
- mutex_unlock(&chip->audio_mutex);
- audio_info(" .. OUT\n");
-
- return 0;
-}
diff --git a/drivers/staging/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/bcm2835-audio/bcm2835-vchiq.c
deleted file mode 100644
index fa23a13f8d95..000000000000
--- a/drivers/staging/bcm2835-audio/bcm2835-vchiq.c
+++ /dev/null
@@ -1,912 +0,0 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
-
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/atomic.h>
-#include <linux/module.h>
-#include <linux/completion.h>
-
-#include "bcm2835.h"
-
-/* ---- Include Files -------------------------------------------------------- */
-
-#include "interface/vchi/vchi.h"
-#include "vc_vchi_audioserv_defs.h"
-
-/* ---- Private Constants and Types ------------------------------------------ */
-
-#define BCM2835_AUDIO_STOP 0
-#define BCM2835_AUDIO_START 1
-#define BCM2835_AUDIO_WRITE 2
-
-/* Logging macros (for remapping to other logging mechanisms, i.e., printf) */
-#ifdef AUDIO_DEBUG_ENABLE
-#define LOG_ERR(fmt, arg...) pr_err("%s:%d " fmt, __func__, __LINE__, ##arg)
-#define LOG_WARN(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
-#define LOG_INFO(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
-#define LOG_DBG(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
-#else
-#define LOG_ERR(fmt, arg...) pr_err("%s:%d " fmt, __func__, __LINE__, ##arg)
-#define LOG_WARN(fmt, arg...) no_printk(fmt, ##arg)
-#define LOG_INFO(fmt, arg...) no_printk(fmt, ##arg)
-#define LOG_DBG(fmt, arg...) no_printk(fmt, ##arg)
-#endif
-
-struct bcm2835_audio_instance {
- unsigned int num_connections;
- VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
- struct completion msg_avail_comp;
- struct mutex vchi_mutex;
- struct bcm2835_alsa_stream *alsa_stream;
- int result;
- short peer_version;
-};
-
-static bool force_bulk;
-
-/* ---- Private Variables ---------------------------------------------------- */
-
-/* ---- Private Function Prototypes ------------------------------------------ */
-
-/* ---- Private Functions ---------------------------------------------------- */
-
-static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream);
-static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream);
-static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int count, void *src);
-
-// Routine to send a message across a service
-
-static int
-bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
- void *data,
- unsigned int size)
-{
- return vchi_queue_kernel_message(handle,
- data,
- size);
-}
-
-static const u32 BCM2835_AUDIO_WRITE_COOKIE1 = ('B' << 24 | 'C' << 16 |
- 'M' << 8 | 'A');
-static const u32 BCM2835_AUDIO_WRITE_COOKIE2 = ('D' << 24 | 'A' << 16 |
- 'T' << 8 | 'A');
-
-struct bcm2835_audio_work {
- struct work_struct my_work;
- struct bcm2835_alsa_stream *alsa_stream;
- int cmd;
- void *src;
- unsigned int count;
-};
-
-static void my_wq_function(struct work_struct *work)
-{
- struct bcm2835_audio_work *w =
- container_of(work, struct bcm2835_audio_work, my_work);
- int ret = -9;
-
- LOG_DBG(" .. IN %p:%d\n", w->alsa_stream, w->cmd);
- switch (w->cmd) {
- case BCM2835_AUDIO_START:
- ret = bcm2835_audio_start_worker(w->alsa_stream);
- break;
- case BCM2835_AUDIO_STOP:
- ret = bcm2835_audio_stop_worker(w->alsa_stream);
- break;
- case BCM2835_AUDIO_WRITE:
- ret = bcm2835_audio_write_worker(w->alsa_stream, w->count,
- w->src);
- break;
- default:
- LOG_ERR(" Unexpected work: %p:%d\n", w->alsa_stream, w->cmd);
- break;
- }
- kfree((void *) work);
- LOG_DBG(" .. OUT %d\n", ret);
-}
-
-int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream)
-{
- int ret = -1;
-
- LOG_DBG(" .. IN\n");
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (work) {
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_START;
- if (queue_work(alsa_stream->my_wq, &work->my_work))
- ret = 0;
- } else
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- }
- LOG_DBG(" .. OUT %d\n", ret);
- return ret;
-}
-
-int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
-{
- int ret = -1;
-
- LOG_DBG(" .. IN\n");
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (work) {
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_STOP;
- if (queue_work(alsa_stream->my_wq, &work->my_work))
- ret = 0;
- } else
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- }
- LOG_DBG(" .. OUT %d\n", ret);
- return ret;
-}
-
-int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int count, void *src)
-{
- int ret = -1;
-
- LOG_DBG(" .. IN\n");
- if (alsa_stream->my_wq) {
- struct bcm2835_audio_work *work;
-
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- /*--- Queue some work (item 1) ---*/
- if (work) {
- INIT_WORK(&work->my_work, my_wq_function);
- work->alsa_stream = alsa_stream;
- work->cmd = BCM2835_AUDIO_WRITE;
- work->src = src;
- work->count = count;
- if (queue_work(alsa_stream->my_wq, &work->my_work))
- ret = 0;
- } else
- LOG_ERR(" .. Error: NULL work kmalloc\n");
- }
- LOG_DBG(" .. OUT %d\n", ret);
- return ret;
-}
-
-static void my_workqueue_init(struct bcm2835_alsa_stream *alsa_stream)
-{
- alsa_stream->my_wq = alloc_workqueue("my_queue", WQ_HIGHPRI, 1);
- return;
-}
-
-static void my_workqueue_quit(struct bcm2835_alsa_stream *alsa_stream)
-{
- if (alsa_stream->my_wq) {
- flush_workqueue(alsa_stream->my_wq);
- destroy_workqueue(alsa_stream->my_wq);
- alsa_stream->my_wq = NULL;
- }
- return;
-}
-
-static void audio_vchi_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
- void *msg_handle)
-{
- struct bcm2835_audio_instance *instance = param;
- int status;
- int msg_len;
- struct vc_audio_msg m;
-
- LOG_DBG(" .. IN instance=%p, handle=%p, alsa=%p, reason=%d, handle=%p\n",
- instance, instance ? instance->vchi_handle[0] : NULL, instance ? instance->alsa_stream : NULL, reason, msg_handle);
-
- if (reason != VCHI_CALLBACK_MSG_AVAILABLE) {
- return;
- }
- if (!instance) {
- LOG_ERR(" .. instance is null\n");
- BUG();
- return;
- }
- if (!instance->vchi_handle[0]) {
- LOG_ERR(" .. instance->vchi_handle[0] is null\n");
- BUG();
- return;
- }
- status = vchi_msg_dequeue(instance->vchi_handle[0],
- &m, sizeof(m), &msg_len, VCHI_FLAGS_NONE);
- if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
- LOG_DBG(" .. instance=%p, m.type=VC_AUDIO_MSG_TYPE_RESULT, success=%d\n",
- instance, m.u.result.success);
- instance->result = m.u.result.success;
- complete(&instance->msg_avail_comp);
- } else if (m.type == VC_AUDIO_MSG_TYPE_COMPLETE) {
- struct bcm2835_alsa_stream *alsa_stream = instance->alsa_stream;
-
- LOG_DBG(" .. instance=%p, m.type=VC_AUDIO_MSG_TYPE_COMPLETE, complete=%d\n",
- instance, m.u.complete.count);
- if (m.u.complete.cookie1 != BCM2835_AUDIO_WRITE_COOKIE1 ||
- m.u.complete.cookie2 != BCM2835_AUDIO_WRITE_COOKIE2)
- LOG_ERR(" .. response is corrupt\n");
- else if (alsa_stream) {
- atomic_add(m.u.complete.count,
- &alsa_stream->retrieved);
- bcm2835_playback_fifo(alsa_stream);
- } else {
- LOG_ERR(" .. unexpected alsa_stream=%p\n",
- alsa_stream);
- }
- } else {
- LOG_ERR(" .. unexpected m.type=%d\n", m.type);
- }
- LOG_DBG(" .. OUT\n");
-}
-
-static struct bcm2835_audio_instance *
-vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
- VCHI_CONNECTION_T **vchi_connections,
- unsigned int num_connections)
-{
- unsigned int i;
- struct bcm2835_audio_instance *instance;
- int status;
-
- LOG_DBG("%s: start", __func__);
-
- if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
- LOG_ERR("%s: unsupported number of connections %u (max=%u)\n",
- __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
-
- return NULL;
- }
- /* Allocate memory for this instance */
- instance = kmalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance)
- return NULL;
-
- memset(instance, 0, sizeof(*instance));
- instance->num_connections = num_connections;
-
- /* Create a lock for exclusive, serialized VCHI connection access */
- mutex_init(&instance->vchi_mutex);
- /* Open the VCHI service connections */
- for (i = 0; i < num_connections; i++) {
- SERVICE_CREATION_T params = {
- VCHI_VERSION_EX(VC_AUDIOSERV_VER, VC_AUDIOSERV_MIN_VER),
- VC_AUDIO_SERVER_NAME, // 4cc service code
- vchi_connections[i], // passed in fn pointers
- 0, // rx fifo size (unused)
- 0, // tx fifo size (unused)
- audio_vchi_callback, // service callback
- instance, // service callback parameter
- 1, //TODO: remove VCOS_FALSE, // unaligned bulk recieves
- 1, //TODO: remove VCOS_FALSE, // unaligned bulk transmits
- 0 // want crc check on bulk transfers
- };
-
- LOG_DBG("%s: about to open %i\n", __func__, i);
- status = vchi_service_open(vchi_instance, &params,
- &instance->vchi_handle[i]);
- LOG_DBG("%s: opened %i: %p=%d\n", __func__, i, instance->vchi_handle[i], status);
- if (status) {
- LOG_ERR("%s: failed to open VCHI service connection (status=%d)\n",
- __func__, status);
-
- goto err_close_services;
- }
- /* Finished with the service for now */
- vchi_service_release(instance->vchi_handle[i]);
- }
-
- LOG_DBG("%s: okay\n", __func__);
- return instance;
-
-err_close_services:
- for (i = 0; i < instance->num_connections; i++) {
- LOG_ERR("%s: closing %i: %p\n", __func__, i, instance->vchi_handle[i]);
- if (instance->vchi_handle[i])
- vchi_service_close(instance->vchi_handle[i]);
- }
-
- kfree(instance);
- LOG_ERR("%s: error\n", __func__);
-
- return NULL;
-}
-
-static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
-{
- unsigned int i;
-
- LOG_DBG(" .. IN\n");
-
- if (!instance) {
- LOG_ERR("%s: invalid handle %p\n", __func__, instance);
-
- return -1;
- }
-
- LOG_DBG(" .. about to lock (%d)\n", instance->num_connections);
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
-
- /* Close all VCHI service connections */
- for (i = 0; i < instance->num_connections; i++) {
- int status;
-
- LOG_DBG(" .. %i:closing %p\n", i, instance->vchi_handle[i]);
- vchi_service_use(instance->vchi_handle[i]);
-
- status = vchi_service_close(instance->vchi_handle[i]);
- if (status) {
- LOG_DBG("%s: failed to close VCHI service connection (status=%d)\n",
- __func__, status);
- }
- }
-
- mutex_unlock(&instance->vchi_mutex);
-
- kfree(instance);
-
- LOG_DBG(" .. OUT\n");
-
- return 0;
-}
-
-static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream)
-{
- static VCHI_INSTANCE_T vchi_instance;
- static VCHI_CONNECTION_T *vchi_connection;
- static int initted;
- struct bcm2835_audio_instance *instance =
- (struct bcm2835_audio_instance *)alsa_stream->instance;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- LOG_INFO("%s: start\n", __func__);
- BUG_ON(instance);
- if (instance) {
- LOG_ERR("%s: VCHI instance already open (%p)\n",
- __func__, instance);
- instance->alsa_stream = alsa_stream;
- alsa_stream->instance = instance;
- ret = 0; // xxx todo -1;
- goto err_free_mem;
- }
-
- /* Initialize and create a VCHI connection */
- if (!initted) {
- ret = vchi_initialise(&vchi_instance);
- if (ret) {
- LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
- __func__, ret);
-
- ret = -EIO;
- goto err_free_mem;
- }
- ret = vchi_connect(NULL, 0, vchi_instance);
- if (ret) {
- LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
- __func__, ret);
-
- ret = -EIO;
- goto err_free_mem;
- }
- initted = 1;
- }
-
- /* Initialize an instance of the audio service */
- instance = vc_vchi_audio_init(vchi_instance, &vchi_connection, 1);
-
- if (!instance) {
- LOG_ERR("%s: failed to initialize audio service\n", __func__);
-
- ret = -EPERM;
- goto err_free_mem;
- }
-
- instance->alsa_stream = alsa_stream;
- alsa_stream->instance = instance;
-
- LOG_DBG(" success !\n");
- ret = 0;
-err_free_mem:
- LOG_DBG(" .. OUT\n");
-
- return ret;
-}
-
-int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
-{
- struct bcm2835_audio_instance *instance;
- struct vc_audio_msg m;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- my_workqueue_init(alsa_stream);
-
- ret = bcm2835_audio_open_connection(alsa_stream);
- if (ret) {
- ret = -1;
- goto exit;
- }
- instance = alsa_stream->instance;
- LOG_DBG(" instance (%p)\n", instance);
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- m.type = VC_AUDIO_MSG_TYPE_OPEN;
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
-exit:
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-static int bcm2835_audio_set_ctls_chan(struct bcm2835_alsa_stream *alsa_stream,
- struct bcm2835_chip *chip)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- LOG_INFO(" Setting ALSA dest(%d), volume(%d)\n",
- chip->dest, chip->volume);
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- instance->result = -1;
-
- m.type = VC_AUDIO_MSG_TYPE_CONTROL;
- m.u.control.dest = chip->dest;
- m.u.control.volume = chip->volume;
-
- /* Create the message available completion */
- init_completion(&instance->msg_avail_comp);
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
-
- /* We are expecting a reply from the videocore */
- wait_for_completion(&instance->msg_avail_comp);
-
- if (instance->result) {
- LOG_ERR("%s: result=%d\n", __func__, instance->result);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
-
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-int bcm2835_audio_set_ctls(struct bcm2835_chip *chip)
-{
- int i;
- int ret = 0;
-
- LOG_DBG(" .. IN\n");
- LOG_DBG(" Setting ALSA dest(%d), volume(%d)\n", chip->dest, chip->volume);
-
- /* change ctls for all substreams */
- for (i = 0; i < MAX_SUBSTREAMS; i++) {
- if (chip->avail_substreams & (1 << i)) {
- if (!chip->alsa_stream[i]) {
- LOG_DBG(" No ALSA stream available?! %i:%p (%x)\n", i, chip->alsa_stream[i], chip->avail_substreams);
- ret = 0;
- } else if (bcm2835_audio_set_ctls_chan(chip->alsa_stream[i], chip) != 0) {
- LOG_ERR("Couldn't set the controls for stream %d\n", i);
- ret = -1;
- } else {
- LOG_DBG(" Controls set for stream %d\n", i);
- }
- }
- }
- LOG_DBG(" .. OUT ret=%d\n", ret);
- return ret;
-}
-
-int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int channels, unsigned int samplerate,
- unsigned int bps)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- LOG_INFO(" Setting ALSA channels(%d), samplerate(%d), bits-per-sample(%d)\n",
- channels, samplerate, bps);
-
- /* resend ctls - alsa_stream may not have been open when first send */
- ret = bcm2835_audio_set_ctls_chan(alsa_stream, alsa_stream->chip);
- if (ret) {
- LOG_ERR(" Alsa controls not supported\n");
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- instance->result = -1;
-
- m.type = VC_AUDIO_MSG_TYPE_CONFIG;
- m.u.config.channels = channels;
- m.u.config.samplerate = samplerate;
- m.u.config.bps = bps;
-
- /* Create the message available completion */
- init_completion(&instance->msg_avail_comp);
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
-
- /* We are expecting a reply from the videocore */
- wait_for_completion(&instance->msg_avail_comp);
-
- if (instance->result) {
- LOG_ERR("%s: result=%d", __func__, instance->result);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
-
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream)
-{
- LOG_DBG(" .. IN\n");
-
- LOG_DBG(" .. OUT\n");
-
- return 0;
-}
-
-static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- m.type = VC_AUDIO_MSG_TYPE_START;
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- m.type = VC_AUDIO_MSG_TYPE_STOP;
- m.u.stop.draining = alsa_stream->draining;
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- my_workqueue_quit(alsa_stream);
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- m.type = VC_AUDIO_MSG_TYPE_CLOSE;
-
- /* Create the message available completion */
- init_completion(&instance->msg_avail_comp);
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
- ret = -1;
- goto unlock;
- }
-
- /* We are expecting a reply from the videocore */
- wait_for_completion(&instance->msg_avail_comp);
-
- if (instance->result) {
- LOG_ERR("%s: failed result (result=%d)\n",
- __func__, instance->result);
-
- ret = -1;
- goto unlock;
- }
-
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
-
- /* Stop the audio service */
- vc_vchi_audio_deinit(instance);
- alsa_stream->instance = NULL;
-
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int count, void *src)
-{
- struct vc_audio_msg m;
- struct bcm2835_audio_instance *instance = alsa_stream->instance;
- int status;
- int ret;
-
- LOG_DBG(" .. IN\n");
-
- LOG_INFO(" Writing %d bytes from %p\n", count, src);
-
- if (mutex_lock_interruptible(&instance->vchi_mutex)) {
- LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
- instance->num_connections);
- return -EINTR;
- }
- vchi_service_use(instance->vchi_handle[0]);
-
- if (instance->peer_version == 0 && vchi_get_peer_version(instance->vchi_handle[0], &instance->peer_version) == 0) {
- LOG_DBG("%s: client version %d connected\n", __func__, instance->peer_version);
- }
- m.type = VC_AUDIO_MSG_TYPE_WRITE;
- m.u.write.count = count;
- // old version uses bulk, new version uses control
- m.u.write.max_packet = instance->peer_version < 2 || force_bulk ? 0 : 4000;
- m.u.write.cookie1 = BCM2835_AUDIO_WRITE_COOKIE1;
- m.u.write.cookie2 = BCM2835_AUDIO_WRITE_COOKIE2;
- m.u.write.silence = src == NULL;
-
- /* Send the message to the videocore */
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- &m, sizeof(m));
-
- if (status) {
- LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
- if (!m.u.write.silence) {
- if (!m.u.write.max_packet) {
- /* Send the message to the videocore */
- status = vchi_bulk_queue_transmit(instance->vchi_handle[0],
- src, count,
- 0 *
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED
- +
- 1 *
- VCHI_FLAGS_BLOCK_UNTIL_DATA_READ,
- NULL);
- } else {
- while (count > 0) {
- int bytes = min((int) m.u.write.max_packet, (int) count);
-
- status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
- src, bytes);
- src = (char *)src + bytes;
- count -= bytes;
- }
- }
- if (status) {
- LOG_ERR("%s: failed on vchi_bulk_queue_transmit (status=%d)\n",
- __func__, status);
-
- ret = -1;
- goto unlock;
- }
- }
- ret = 0;
-
-unlock:
- vchi_service_release(instance->vchi_handle[0]);
- mutex_unlock(&instance->vchi_mutex);
- LOG_DBG(" .. OUT\n");
- return ret;
-}
-
-/**
- * Returns all buffers from arm->vc
- */
-void bcm2835_audio_flush_buffers(struct bcm2835_alsa_stream *alsa_stream)
-{
- LOG_DBG(" .. IN\n");
- LOG_DBG(" .. OUT\n");
- return;
-}
-
-/**
- * Forces VC to flush(drop) its filled playback buffers and
- * return them the us. (VC->ARM)
- */
-void bcm2835_audio_flush_playback_buffers(struct bcm2835_alsa_stream *alsa_stream)
-{
- LOG_DBG(" .. IN\n");
- LOG_DBG(" .. OUT\n");
-}
-
-unsigned int bcm2835_audio_retrieve_buffers(struct bcm2835_alsa_stream *alsa_stream)
-{
- unsigned int count = atomic_read(&alsa_stream->retrieved);
-
- atomic_sub(count, &alsa_stream->retrieved);
- return count;
-}
-
-module_param(force_bulk, bool, 0444);
-MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio");
diff --git a/drivers/staging/bcm2835-audio/bcm2835.c b/drivers/staging/bcm2835-audio/bcm2835.c
deleted file mode 100644
index 3a5e528e0ec6..000000000000
--- a/drivers/staging/bcm2835-audio/bcm2835.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
-
-#include <linux/platform_device.h>
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include "bcm2835.h"
-
-/* HACKY global pointers needed for successive probes to work : ssp
- * But compared against the changes we will have to do in VC audio_ipc code
- * to export 8 audio_ipc devices as a single IPC device and then monitor all
- * four devices in a thread, this gets things done quickly and should be easier
- * to debug if we run into issues
- */
-
-static struct snd_card *g_card;
-static struct bcm2835_chip *g_chip;
-
-static int snd_bcm2835_free(struct bcm2835_chip *chip)
-{
- kfree(chip);
- return 0;
-}
-
-/* component-destructor
- * (see "Management of Cards and Components")
- */
-static int snd_bcm2835_dev_free(struct snd_device *device)
-{
- return snd_bcm2835_free(device->device_data);
-}
-
-/* chip-specific constructor
- * (see "Management of Cards and Components")
- */
-static int snd_bcm2835_create(struct snd_card *card,
- struct platform_device *pdev,
- struct bcm2835_chip **rchip)
-{
- struct bcm2835_chip *chip;
- int err;
- static struct snd_device_ops ops = {
- .dev_free = snd_bcm2835_dev_free,
- };
-
- *rchip = NULL;
-
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- chip->card = card;
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
- if (err < 0) {
- snd_bcm2835_free(chip);
- return err;
- }
-
- *rchip = chip;
- return 0;
-}
-
-static int snd_bcm2835_alsa_probe_dt(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct bcm2835_chip *chip;
- struct snd_card *card;
- u32 numchans;
- int err, i;
-
- err = of_property_read_u32(dev->of_node, "brcm,pwm-channels",
- &numchans);
- if (err) {
- dev_err(dev, "Failed to get DT property 'brcm,pwm-channels'");
- return err;
- }
-
- if (numchans == 0 || numchans > MAX_SUBSTREAMS) {
- numchans = MAX_SUBSTREAMS;
- dev_warn(dev, "Illegal 'brcm,pwm-channels' value, will use %u\n",
- numchans);
- }
-
- err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
- if (err) {
- dev_err(dev, "Failed to create soundcard structure\n");
- return err;
- }
-
- snd_card_set_dev(card, dev);
- strcpy(card->driver, "bcm2835");
- strcpy(card->shortname, "bcm2835 ALSA");
- sprintf(card->longname, "%s", card->shortname);
-
- err = snd_bcm2835_create(card, pdev, &chip);
- if (err < 0) {
- dev_err(dev, "Failed to create bcm2835 chip\n");
- goto err_free;
- }
-
- err = snd_bcm2835_new_pcm(chip);
- if (err < 0) {
- dev_err(dev, "Failed to create new bcm2835 pcm device\n");
- goto err_free;
- }
-
- err = snd_bcm2835_new_spdif_pcm(chip);
- if (err < 0) {
- dev_err(dev, "Failed to create new bcm2835 spdif pcm device\n");
- goto err_free;
- }
-
- err = snd_bcm2835_new_ctl(chip);
- if (err < 0) {
- dev_err(dev, "Failed to create new bcm2835 ctl\n");
- goto err_free;
- }
-
- for (i = 0; i < numchans; i++) {
- chip->avail_substreams |= (1 << i);
- chip->pdev[i] = pdev;
- }
-
- err = snd_card_register(card);
- if (err) {
- dev_err(dev, "Failed to register bcm2835 ALSA card\n");
- goto err_free;
- }
-
- g_card = card;
- g_chip = chip;
- platform_set_drvdata(pdev, card);
- audio_info("bcm2835 ALSA card created with %u channels\n", numchans);
-
- return 0;
-
-err_free:
- snd_card_free(card);
-
- return err;
-}
-
-static int snd_bcm2835_alsa_remove(struct platform_device *pdev)
-{
- int idx;
- void *drv_data;
-
- drv_data = platform_get_drvdata(pdev);
-
- if (drv_data == (void *)g_card) {
- /* This is the card device */
- snd_card_free((struct snd_card *)drv_data);
- g_card = NULL;
- g_chip = NULL;
- } else {
- idx = (int)(long)drv_data;
- if (g_card) {
- BUG_ON(!g_chip);
- /* We pass chip device numbers in audio ipc devices
- * other than the one we registered our card with
- */
- idx = (int)(long)drv_data;
- BUG_ON(!idx || idx > MAX_SUBSTREAMS);
- g_chip->avail_substreams &= ~(1 << idx);
- /* There should be atleast one substream registered
- * after we are done here, as it wil be removed when
- * the *remove* is called for the card device
- */
- BUG_ON(!g_chip->avail_substreams);
- }
- }
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int snd_bcm2835_alsa_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int snd_bcm2835_alsa_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-#endif
-
-static const struct of_device_id snd_bcm2835_of_match_table[] = {
- { .compatible = "brcm,bcm2835-audio",},
- {},
-};
-MODULE_DEVICE_TABLE(of, snd_bcm2835_of_match_table);
-
-static struct platform_driver bcm2835_alsa0_driver = {
- .probe = snd_bcm2835_alsa_probe_dt,
- .remove = snd_bcm2835_alsa_remove,
-#ifdef CONFIG_PM
- .suspend = snd_bcm2835_alsa_suspend,
- .resume = snd_bcm2835_alsa_resume,
-#endif
- .driver = {
- .name = "bcm2835_AUD0",
- .owner = THIS_MODULE,
- .of_match_table = snd_bcm2835_of_match_table,
- },
-};
-
-static int bcm2835_alsa_device_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&bcm2835_alsa0_driver);
- if (retval)
- pr_err("Error registering bcm2835_alsa0_driver %d .\n", retval);
-
- return retval;
-}
-
-static void bcm2835_alsa_device_exit(void)
-{
- platform_driver_unregister(&bcm2835_alsa0_driver);
-}
-
-late_initcall(bcm2835_alsa_device_init);
-module_exit(bcm2835_alsa_device_exit);
-
-MODULE_AUTHOR("Dom Cobley");
-MODULE_DESCRIPTION("Alsa driver for BCM2835 chip");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/bcm2835-audio/bcm2835.h b/drivers/staging/bcm2835-audio/bcm2835.h
deleted file mode 100644
index 36e3ef80e60c..000000000000
--- a/drivers/staging/bcm2835-audio/bcm2835.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*****************************************************************************
- * Copyright 2011 Broadcom Corporation. All rights reserved.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available at
- * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
- *****************************************************************************/
-
-#ifndef __SOUND_ARM_BCM2835_H
-#define __SOUND_ARM_BCM2835_H
-
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/pcm-indirect.h>
-#include <linux/workqueue.h>
-
-/*
-#define AUDIO_DEBUG_ENABLE
-#define AUDIO_VERBOSE_DEBUG_ENABLE
- */
-
-/* Debug macros */
-
-#ifdef AUDIO_DEBUG_ENABLE
-#ifdef AUDIO_VERBOSE_DEBUG_ENABLE
-
-#define audio_debug(fmt, arg...) \
- printk(KERN_INFO"%s:%d " fmt, __func__, __LINE__, ##arg)
-
-#define audio_info(fmt, arg...) \
- printk(KERN_INFO"%s:%d " fmt, __func__, __LINE__, ##arg)
-
-#else
-
-#define audio_debug(fmt, arg...)
-
-#define audio_info(fmt, arg...)
-
-#endif /* AUDIO_VERBOSE_DEBUG_ENABLE */
-
-#else
-
-#define audio_debug(fmt, arg...)
-
-#define audio_info(fmt, arg...)
-
-#endif /* AUDIO_DEBUG_ENABLE */
-
-#define audio_error(fmt, arg...) \
- printk(KERN_ERR"%s:%d " fmt, __func__, __LINE__, ##arg)
-
-#define audio_warning(fmt, arg...) \
- printk(KERN_WARNING"%s:%d " fmt, __func__, __LINE__, ##arg)
-
-#define audio_alert(fmt, arg...) \
- printk(KERN_ALERT"%s:%d " fmt, __func__, __LINE__, ##arg)
-
-#define MAX_SUBSTREAMS (8)
-#define AVAIL_SUBSTREAMS_MASK (0xff)
-
-enum {
- CTRL_VOL_MUTE,
- CTRL_VOL_UNMUTE
-};
-
-/* macros for alsa2chip and chip2alsa, instead of functions */
-
-#define alsa2chip(vol) (uint)(-((vol << 8) / 100)) /* convert alsa to chip volume (defined as macro rather than function call) */
-#define chip2alsa(vol) -((vol * 100) >> 8) /* convert chip to alsa volume */
-
-/* Some constants for values .. */
-enum snd_bcm2835_route {
- AUDIO_DEST_AUTO = 0,
- AUDIO_DEST_HEADPHONES = 1,
- AUDIO_DEST_HDMI = 2,
- AUDIO_DEST_MAX,
-};
-
-enum snd_bcm2835_ctrl {
- PCM_PLAYBACK_VOLUME,
- PCM_PLAYBACK_MUTE,
- PCM_PLAYBACK_DEVICE,
-};
-
-/* definition of the chip-specific record */
-struct bcm2835_chip {
- struct snd_card *card;
- struct snd_pcm *pcm;
- struct snd_pcm *pcm_spdif;
- /* Bitmat for valid reg_base and irq numbers */
- unsigned int avail_substreams;
- struct platform_device *pdev[MAX_SUBSTREAMS];
- struct bcm2835_alsa_stream *alsa_stream[MAX_SUBSTREAMS];
-
- int volume;
- int old_volume; /* stores the volume value whist muted */
- int dest;
- int mute;
-
- unsigned int opened;
- unsigned int spdif_status;
- struct mutex audio_mutex;
-};
-
-struct bcm2835_alsa_stream {
- struct bcm2835_chip *chip;
- struct snd_pcm_substream *substream;
- struct snd_pcm_indirect pcm_indirect;
-
- struct semaphore buffers_update_sem;
- struct semaphore control_sem;
- spinlock_t lock;
- volatile unsigned int control;
- volatile unsigned int status;
-
- int open;
- int running;
- int draining;
-
- int channels;
- int params_rate;
- int pcm_format_width;
-
- unsigned int pos;
- unsigned int buffer_size;
- unsigned int period_size;
-
- atomic_t retrieved;
- struct bcm2835_audio_instance *instance;
- struct workqueue_struct *my_wq;
- int idx;
-};
-
-int snd_bcm2835_new_ctl(struct bcm2835_chip *chip);
-int snd_bcm2835_new_pcm(struct bcm2835_chip *chip);
-int snd_bcm2835_new_spdif_pcm(struct bcm2835_chip *chip);
-
-int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream);
-int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream);
-int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int channels, unsigned int samplerate,
- unsigned int bps);
-int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream);
-int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream);
-int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream);
-int bcm2835_audio_set_ctls(struct bcm2835_chip *chip);
-int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
- unsigned int count,
- void *src);
-void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream);
-unsigned int bcm2835_audio_retrieve_buffers(struct bcm2835_alsa_stream *alsa_stream);
-void bcm2835_audio_flush_buffers(struct bcm2835_alsa_stream *alsa_stream);
-void bcm2835_audio_flush_playback_buffers(struct bcm2835_alsa_stream *alsa_stream);
-
-#endif /* __SOUND_ARM_BCM2835_H */
diff --git a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
new file mode 100644
index 000000000000..2ea65175c032
--- /dev/null
+++ b/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -0,0 +1,27 @@
+Arm TrustZone CryptoCell cryptographic accelerators
+
+Required properties:
+- compatible: must be "arm,cryptocell-712-ree".
+- reg: shall contain base register location and length.
+ Typically length is 0x10000.
+- interrupts: shall contain the interrupt for the device.
+
+Optional properties:
+- interrupt-parent: can designate the interrupt controller the
+ device interrupt is connected to, if needed.
+- clocks: may contain the clock handling the device, if needed.
+- power-domains: may contain a reference to the PM domain, if applicable.
+
+
+Examples:
+
+Zynq FPGA device
+----------------
+
+ arm_cc7x: arm_cc7x@80000000 {
+ compatible = "arm,cryptocell-712-ree";
+ interrupt-parent = <&intc>;
+ interrupts = < 0 30 4 >;
+ reg = < 0x80000000 0x10000 >;
+ };
+
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
new file mode 100644
index 000000000000..ae627049c499
--- /dev/null
+++ b/drivers/staging/ccree/Kconfig
@@ -0,0 +1,43 @@
+config CRYPTO_DEV_CCREE
+ tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
+ depends on CRYPTO_HW && OF && HAS_DMA
+ default n
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
+ select CRYPTO_AEAD
+ select CRYPTO_AUTHENC
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_CTR
+ select CRYPTO_XTS
+ help
+ Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
+ C7xx. Currently only the CryptoCell 712 REE is supported.
+ Choose this if you wish to use hardware acceleration of
+ cryptographic operations on the system REE.
+ If unsure say Y.
+
+config CCREE_FIPS_SUPPORT
+ bool "Turn on CryptoCell 7XX REE FIPS mode support"
+ depends on CRYPTO_DEV_CCREE
+ default n
+ help
+ Say 'Y' to enable support for FIPS compliant mode by the
+ CCREE driver.
+ If unsure say N.
+
+config CCREE_DISABLE_COHERENT_DMA_OPS
+ bool "Disable Coherent DMA operations for the CCREE driver"
+ depends on CRYPTO_DEV_CCREE
+ default n
+ help
+ Say 'Y' to disable the use of coherent DMA operations by the
+ CCREE driver for debugging purposes.
+ If unsure say N.
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
new file mode 100644
index 000000000000..44f3e3e33989
--- /dev/null
+++ b/drivers/staging/ccree/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
+ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_cipher.o ssi_hash.o ssi_aead.o ssi_ivgen.o ssi_sram_mgr.o ssi_pm.o ssi_pm_ext.o
+ccree-$(CCREE_FIPS_SUPPORT) += ssi_fips.o ssi_fips_ll.o ssi_fips_ext.o ssi_fips_local.o
diff --git a/drivers/staging/ccree/TODO b/drivers/staging/ccree/TODO
new file mode 100644
index 000000000000..c9f5754d062d
--- /dev/null
+++ b/drivers/staging/ccree/TODO
@@ -0,0 +1,30 @@
+
+
+*************************************************************************
+* *
+* Arm Trust Zone CryptoCell REE Linux driver upstreaming TODO items *
+* *
+*************************************************************************
+
+ccree specific items
+a.k.a stuff fixing for this driver to move out of staging
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Move to using Crypto Engine to handle backlog queueing.
+2. Remove synchronous algorithm support leftovers.
+3. Separate platform specific code for FIPS and power management into separate platform modules.
+4. Drop legacy kernel support code.
+5. Move most (all?) #ifdef CONFIG into inline functions.
+6. Remove all unused definitions.
+7. Re-factor to accomediate newer/older HW revisions besides the 712.
+8. Handle the many checkpatch errors.
+9. Implement ahash import/export correctly.
+10. Go through a proper review of DT bindings and sysfs ABI
+11. Sort out FIPS mode: bake tests into testmgr, sort out behaviour on error,
+ figure if 3DES weak key check is needed
+
+Kernel infrastructure items
+a.k.a stuff we either neither need to fix in the kernel or understand what we're doing wrong
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. ahash import/export context has a PAGE_SIZE/8 size limit. We need more.
+2. Crypto Engine seems to be built for HW with hardware queue depth of 1, we have 600++.
diff --git a/drivers/staging/ccree/cc_bitops.h b/drivers/staging/ccree/cc_bitops.h
new file mode 100644
index 000000000000..3a39565ef73b
--- /dev/null
+++ b/drivers/staging/ccree/cc_bitops.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*!
+ * \file cc_bitops.h
+ * Bit fields operations macros.
+ */
+#ifndef _CC_BITOPS_H_
+#define _CC_BITOPS_H_
+
+#define BITMASK(mask_size) (((mask_size) < 32) ? \
+ ((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL)
+#define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset))
+
+#define BITFIELD_GET(word, bit_offset, bit_size) \
+ (((word) >> (bit_offset)) & BITMASK(bit_size))
+#define BITFIELD_SET(word, bit_offset, bit_size, new_val) do { \
+ word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) | \
+ (((new_val) & BITMASK(bit_size)) << (bit_offset)); \
+} while (0)
+
+/* Is val aligned to "align" ("align" must be power of 2) */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(val, align) \
+ (((uintptr_t)(val) & ((align) - 1)) == 0)
+#endif
+
+#define SWAP_ENDIAN(word) \
+ (((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \
+ (((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24))
+
+#ifdef BIG__ENDIAN
+#define SWAP_TO_LE(word) SWAP_ENDIAN(word)
+#define SWAP_TO_BE(word) word
+#else
+#define SWAP_TO_LE(word) word
+#define SWAP_TO_BE(word) SWAP_ENDIAN(word)
+#endif
+
+
+
+/* Is val a multiple of "mult" ("mult" must be power of 2) */
+#define IS_MULT(val, mult) \
+ (((val) & ((mult) - 1)) == 0)
+
+#define IS_NULL_ADDR(adr) \
+ (!(adr))
+
+#endif /*_CC_BITOPS_H_*/
diff --git a/drivers/staging/ccree/cc_crypto_ctx.h b/drivers/staging/ccree/cc_crypto_ctx.h
new file mode 100644
index 000000000000..9e10b2670313
--- /dev/null
+++ b/drivers/staging/ccree/cc_crypto_ctx.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#ifndef _CC_CRYPTO_CTX_H_
+#define _CC_CRYPTO_CTX_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#define INT32_MAX 0x7FFFFFFFL
+#else
+#include <stdint.h>
+#endif
+
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+/* context size */
+#ifndef CC_CTX_SIZE_LOG2
+#if (CC_SUPPORT_SHA > 256)
+#define CC_CTX_SIZE_LOG2 8
+#else
+#define CC_CTX_SIZE_LOG2 7
+#endif
+#endif
+#define CC_CTX_SIZE (1<<CC_CTX_SIZE_LOG2)
+#define CC_DRV_CTX_SIZE_WORDS (CC_CTX_SIZE >> 2)
+
+#define CC_DRV_DES_IV_SIZE 8
+#define CC_DRV_DES_BLOCK_SIZE 8
+
+#define CC_DRV_DES_ONE_KEY_SIZE 8
+#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
+#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
+#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
+
+#define CC_AES_IV_SIZE 16
+#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
+
+#define CC_AES_BLOCK_SIZE 16
+#define CC_AES_BLOCK_SIZE_WORDS 4
+
+#define CC_AES_128_BIT_KEY_SIZE 16
+#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
+#define CC_AES_192_BIT_KEY_SIZE 24
+#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
+#define CC_AES_256_BIT_KEY_SIZE 32
+#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
+#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
+#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
+
+#define CC_MD5_DIGEST_SIZE 16
+#define CC_SHA1_DIGEST_SIZE 20
+#define CC_SHA224_DIGEST_SIZE 28
+#define CC_SHA256_DIGEST_SIZE 32
+#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
+#define CC_SHA384_DIGEST_SIZE 48
+#define CC_SHA512_DIGEST_SIZE 64
+
+#define CC_SHA1_BLOCK_SIZE 64
+#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
+#define CC_MD5_BLOCK_SIZE 64
+#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA224_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE 64
+#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
+#define CC_SHA1_224_256_BLOCK_SIZE 64
+#define CC_SHA384_BLOCK_SIZE 128
+#define CC_SHA512_BLOCK_SIZE 128
+
+#if (CC_SUPPORT_SHA > 256)
+#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
+#else /* Only up to SHA256 */
+#define CC_DIGEST_SIZE_MAX CC_SHA256_DIGEST_SIZE
+#define CC_HASH_BLOCK_SIZE_MAX CC_SHA256_BLOCK_SIZE /*512b*/
+#endif
+
+#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
+
+#define CC_MULTI2_SYSTEM_KEY_SIZE 32
+#define CC_MULTI2_DATA_KEY_SIZE 8
+#define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE (CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE)
+#define CC_MULTI2_BLOCK_SIZE 8
+#define CC_MULTI2_IV_SIZE 8
+#define CC_MULTI2_MIN_NUM_ROUNDS 8
+#define CC_MULTI2_MAX_NUM_ROUNDS 128
+
+
+#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+
+
+enum drv_engine_type {
+ DRV_ENGINE_NULL = 0,
+ DRV_ENGINE_AES = 1,
+ DRV_ENGINE_DES = 2,
+ DRV_ENGINE_HASH = 3,
+ DRV_ENGINE_RC4 = 4,
+ DRV_ENGINE_DOUT = 5,
+ DRV_ENGINE_RESERVE32B = INT32_MAX,
+};
+
+enum drv_crypto_alg {
+ DRV_CRYPTO_ALG_NULL = -1,
+ DRV_CRYPTO_ALG_AES = 0,
+ DRV_CRYPTO_ALG_DES = 1,
+ DRV_CRYPTO_ALG_HASH = 2,
+ DRV_CRYPTO_ALG_C2 = 3,
+ DRV_CRYPTO_ALG_HMAC = 4,
+ DRV_CRYPTO_ALG_AEAD = 5,
+ DRV_CRYPTO_ALG_BYPASS = 6,
+ DRV_CRYPTO_ALG_NUM = 7,
+ DRV_CRYPTO_ALG_RESERVE32B = INT32_MAX
+};
+
+enum drv_crypto_direction {
+ DRV_CRYPTO_DIRECTION_NULL = -1,
+ DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
+ DRV_CRYPTO_DIRECTION_DECRYPT = 1,
+ DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DRV_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX
+};
+
+enum drv_cipher_mode {
+ DRV_CIPHER_NULL_MODE = -1,
+ DRV_CIPHER_ECB = 0,
+ DRV_CIPHER_CBC = 1,
+ DRV_CIPHER_CTR = 2,
+ DRV_CIPHER_CBC_MAC = 3,
+ DRV_CIPHER_XTS = 4,
+ DRV_CIPHER_XCBC_MAC = 5,
+ DRV_CIPHER_OFB = 6,
+ DRV_CIPHER_CMAC = 7,
+ DRV_CIPHER_CCM = 8,
+ DRV_CIPHER_CBC_CTS = 11,
+ DRV_CIPHER_GCTR = 12,
+ DRV_CIPHER_ESSIV = 13,
+ DRV_CIPHER_BITLOCKER = 14,
+ DRV_CIPHER_RESERVE32B = INT32_MAX
+};
+
+enum drv_hash_mode {
+ DRV_HASH_NULL = -1,
+ DRV_HASH_SHA1 = 0,
+ DRV_HASH_SHA256 = 1,
+ DRV_HASH_SHA224 = 2,
+ DRV_HASH_SHA512 = 3,
+ DRV_HASH_SHA384 = 4,
+ DRV_HASH_MD5 = 5,
+ DRV_HASH_CBC_MAC = 6,
+ DRV_HASH_XCBC_MAC = 7,
+ DRV_HASH_CMAC = 8,
+ DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_RESERVE32B = INT32_MAX
+};
+
+enum drv_hash_hw_mode {
+ DRV_HASH_HW_MD5 = 0,
+ DRV_HASH_HW_SHA1 = 1,
+ DRV_HASH_HW_SHA256 = 2,
+ DRV_HASH_HW_SHA224 = 10,
+ DRV_HASH_HW_SHA512 = 4,
+ DRV_HASH_HW_SHA384 = 12,
+ DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_RESERVE32B = INT32_MAX
+};
+
+enum drv_multi2_mode {
+ DRV_MULTI2_NULL = -1,
+ DRV_MULTI2_ECB = 0,
+ DRV_MULTI2_CBC = 1,
+ DRV_MULTI2_OFB = 2,
+ DRV_MULTI2_RESERVE32B = INT32_MAX
+};
+
+
+/* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */
+/* drv_crypto_key_type[2] is mapped to cipher_config2 */
+enum drv_crypto_key_type {
+ DRV_NULL_KEY = -1,
+ DRV_USER_KEY = 0, /* 0x000 */
+ DRV_ROOT_KEY = 1, /* 0x001 */
+ DRV_PROVISIONING_KEY = 2, /* 0x010 */
+ DRV_SESSION_KEY = 3, /* 0x011 */
+ DRV_APPLET_KEY = 4, /* NA */
+ DRV_PLATFORM_KEY = 5, /* 0x101 */
+ DRV_CUSTOMER_KEY = 6, /* 0x110 */
+ DRV_END_OF_KEYS = INT32_MAX,
+};
+
+enum drv_crypto_padding_type {
+ DRV_PADDING_NONE = 0,
+ DRV_PADDING_PKCS7 = 1,
+ DRV_PADDING_RESERVE32B = INT32_MAX
+};
+
+/*******************************************************************/
+/***************** DESCRIPTOR BASED CONTEXTS ***********************/
+/*******************************************************************/
+
+ /* Generic context ("super-class") */
+struct drv_ctx_generic {
+ enum drv_crypto_alg alg;
+} __attribute__((__may_alias__));
+
+
+struct drv_ctx_hash {
+ enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HASH */
+ enum drv_hash_mode mode;
+ uint8_t digest[CC_DIGEST_SIZE_MAX];
+ /* reserve to end of allocated context size */
+ uint8_t reserved[CC_CTX_SIZE - 2 * sizeof(uint32_t) -
+ CC_DIGEST_SIZE_MAX];
+};
+
+/* !!!! drv_ctx_hmac should have the same structure as drv_ctx_hash except
+ k0, k0_size fields */
+struct drv_ctx_hmac {
+ enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HMAC */
+ enum drv_hash_mode mode;
+ uint8_t digest[CC_DIGEST_SIZE_MAX];
+ uint32_t k0[CC_HMAC_BLOCK_SIZE_MAX/sizeof(uint32_t)];
+ uint32_t k0_size;
+ /* reserve to end of allocated context size */
+ uint8_t reserved[CC_CTX_SIZE - 3 * sizeof(uint32_t) -
+ CC_DIGEST_SIZE_MAX - CC_HMAC_BLOCK_SIZE_MAX];
+};
+
+struct drv_ctx_cipher {
+ enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_AES */
+ enum drv_cipher_mode mode;
+ enum drv_crypto_direction direction;
+ enum drv_crypto_key_type crypto_key_type;
+ enum drv_crypto_padding_type padding_type;
+ uint32_t key_size; /* numeric value in bytes */
+ uint32_t data_unit_size; /* required for XTS */
+ /* block_state is the AES engine block state.
+ * It is used by the host to pass IV or counter at initialization.
+ * It is used by SeP for intermediate block chaining state and for
+ * returning MAC algorithms results. */
+ uint8_t block_state[CC_AES_BLOCK_SIZE];
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ uint8_t xex_key[CC_AES_KEY_SIZE_MAX];
+ /* reserve to end of allocated context size */
+ uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 7 -
+ CC_AES_BLOCK_SIZE/sizeof(uint32_t) - 2 *
+ (CC_AES_KEY_SIZE_MAX/sizeof(uint32_t))];
+};
+
+/* authentication and encryption with associated data class */
+struct drv_ctx_aead {
+ enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_AES */
+ enum drv_cipher_mode mode;
+ enum drv_crypto_direction direction;
+ uint32_t key_size; /* numeric value in bytes */
+ uint32_t nonce_size; /* nonce size (octets) */
+ uint32_t header_size; /* finit additional data size (octets) */
+ uint32_t text_size; /* finit text data size (octets) */
+ uint32_t tag_size; /* mac size, element of {4, 6, 8, 10, 12, 14, 16} */
+ /* block_state1/2 is the AES engine block state */
+ uint8_t block_state[CC_AES_BLOCK_SIZE];
+ uint8_t mac_state[CC_AES_BLOCK_SIZE]; /* MAC result */
+ uint8_t nonce[CC_AES_BLOCK_SIZE]; /* nonce buffer */
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ /* reserve to end of allocated context size */
+ uint32_t reserved[CC_DRV_CTX_SIZE_WORDS - 8 -
+ 3 * (CC_AES_BLOCK_SIZE/sizeof(uint32_t)) -
+ CC_AES_KEY_SIZE_MAX/sizeof(uint32_t)];
+};
+
+/*******************************************************************/
+/***************** MESSAGE BASED CONTEXTS **************************/
+/*******************************************************************/
+
+
+/* Get the address of a @member within a given @ctx address
+ @ctx: The context address
+ @type: Type of context structure
+ @member: Associated context field */
+#define GET_CTX_FIELD_ADDR(ctx, type, member) (ctx + offsetof(type, member))
+
+#endif /* _CC_CRYPTO_CTX_H_ */
+
diff --git a/drivers/staging/ccree/cc_hal.h b/drivers/staging/ccree/cc_hal.h
new file mode 100644
index 000000000000..75a0ce3e80d6
--- /dev/null
+++ b/drivers/staging/ccree/cc_hal.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from CC drivers) */
+
+#ifndef __CC_HAL_H__
+#define __CC_HAL_H__
+
+#include <linux/io.h>
+
+#define READ_REGISTER(_addr) ioread32((_addr))
+#define WRITE_REGISTER(_addr, _data) iowrite32((_data), (_addr))
+
+#define CC_HAL_WRITE_REGISTER(offset, val) WRITE_REGISTER(cc_base + offset, val)
+#define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + offset)
+
+#endif
diff --git a/drivers/staging/ccree/cc_hw_queue_defs.h b/drivers/staging/ccree/cc_hw_queue_defs.h
new file mode 100644
index 000000000000..fbaf1b6fcd90
--- /dev/null
+++ b/drivers/staging/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __CC_HW_QUEUE_DEFS_H__
+#define __CC_HW_QUEUE_DEFS_H__
+
+#include "cc_pal_log.h"
+#include "cc_regs.h"
+#include "dx_crys_kernel.h"
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#define UINT32_MAX 0xFFFFFFFFL
+#define INT32_MAX 0x7FFFFFFFL
+#define UINT16_MAX 0xFFFFL
+#else
+#include <stdint.h>
+#endif
+
+/******************************************************************************
+* DEFINITIONS
+******************************************************************************/
+
+
+/* Dma AXI Secure bit */
+#define AXI_SECURE 0
+#define AXI_NOT_SECURE 1
+
+#define HW_DESC_SIZE_WORDS 6
+#define HW_QUEUE_SLOTS_MAX 15 /* Max. available slots in HW queue */
+
+#define _HW_DESC_MONITOR_KICK 0x7FFFC00
+
+/******************************************************************************
+* TYPE DEFINITIONS
+******************************************************************************/
+
+typedef struct HwDesc {
+ uint32_t word[HW_DESC_SIZE_WORDS];
+} HwDesc_s;
+
+typedef enum DescDirection {
+ DESC_DIRECTION_ILLEGAL = -1,
+ DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
+ DESC_DIRECTION_DECRYPT_DECRYPT = 1,
+ DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
+ DESC_DIRECTION_END = INT32_MAX,
+}DescDirection_t;
+
+typedef enum DmaMode {
+ DMA_MODE_NULL = -1,
+ NO_DMA = 0,
+ DMA_SRAM = 1,
+ DMA_DLLI = 2,
+ DMA_MLLI = 3,
+ DmaMode_OPTIONTS,
+ DmaMode_END = INT32_MAX,
+}DmaMode_t;
+
+typedef enum FlowMode {
+ FLOW_MODE_NULL = -1,
+ /* data flows */
+ BYPASS = 0,
+ DIN_AES_DOUT = 1,
+ AES_to_HASH = 2,
+ AES_and_HASH = 3,
+ DIN_DES_DOUT = 4,
+ DES_to_HASH = 5,
+ DES_and_HASH = 6,
+ DIN_HASH = 7,
+ DIN_HASH_and_BYPASS = 8,
+ AESMAC_and_BYPASS = 9,
+ AES_to_HASH_and_DOUT = 10,
+ DIN_RC4_DOUT = 11,
+ DES_to_HASH_and_DOUT = 12,
+ AES_to_AES_to_HASH_and_DOUT = 13,
+ AES_to_AES_to_HASH = 14,
+ AES_to_HASH_and_AES = 15,
+ DIN_MULTI2_DOUT = 16,
+ DIN_AES_AESMAC = 17,
+ HASH_to_DOUT = 18,
+ /* setup flows */
+ S_DIN_to_AES = 32,
+ S_DIN_to_AES2 = 33,
+ S_DIN_to_DES = 34,
+ S_DIN_to_RC4 = 35,
+ S_DIN_to_MULTI2 = 36,
+ S_DIN_to_HASH = 37,
+ S_AES_to_DOUT = 38,
+ S_AES2_to_DOUT = 39,
+ S_RC4_to_DOUT = 41,
+ S_DES_to_DOUT = 42,
+ S_HASH_to_DOUT = 43,
+ SET_FLOW_ID = 44,
+ FlowMode_OPTIONTS,
+ FlowMode_END = INT32_MAX,
+}FlowMode_t;
+
+typedef enum TunnelOp {
+ TUNNEL_OP_INVALID = -1,
+ TUNNEL_OFF = 0,
+ TUNNEL_ON = 1,
+ TunnelOp_OPTIONS,
+ TunnelOp_END = INT32_MAX,
+} TunnelOp_t;
+
+typedef enum SetupOp {
+ SETUP_LOAD_NOP = 0,
+ SETUP_LOAD_STATE0 = 1,
+ SETUP_LOAD_STATE1 = 2,
+ SETUP_LOAD_STATE2 = 3,
+ SETUP_LOAD_KEY0 = 4,
+ SETUP_LOAD_XEX_KEY = 5,
+ SETUP_WRITE_STATE0 = 8,
+ SETUP_WRITE_STATE1 = 9,
+ SETUP_WRITE_STATE2 = 10,
+ SETUP_WRITE_STATE3 = 11,
+ setupOp_OPTIONTS,
+ setupOp_END = INT32_MAX,
+}SetupOp_t;
+
+enum AesMacSelector {
+ AES_SK = 1,
+ AES_CMAC_INIT = 2,
+ AES_CMAC_SIZE0 = 3,
+ AesMacEnd = INT32_MAX,
+};
+
+#define HW_KEY_MASK_CIPHER_DO 0x3
+#define HW_KEY_SHIFT_CIPHER_CFG2 2
+
+
+/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
+/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
+typedef enum HwCryptoKey {
+ USER_KEY = 0, /* 0x0000 */
+ ROOT_KEY = 1, /* 0x0001 */
+ PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
+ SESSION_KEY = 3, /* 0x0011 */
+ RESERVED_KEY = 4, /* NA */
+ PLATFORM_KEY = 5, /* 0x0101 */
+ CUSTOMER_KEY = 6, /* 0x0110 */
+ KFDE0_KEY = 7, /* 0x0111 */
+ KFDE1_KEY = 9, /* 0x1001 */
+ KFDE2_KEY = 10, /* 0x1010 */
+ KFDE3_KEY = 11, /* 0x1011 */
+ END_OF_KEYS = INT32_MAX,
+}HwCryptoKey_t;
+
+typedef enum HwAesKeySize {
+ AES_128_KEY = 0,
+ AES_192_KEY = 1,
+ AES_256_KEY = 2,
+ END_OF_AES_KEYS = INT32_MAX,
+}HwAesKeySize_t;
+
+typedef enum HwDesKeySize {
+ DES_ONE_KEY = 0,
+ DES_TWO_KEYS = 1,
+ DES_THREE_KEYS = 2,
+ END_OF_DES_KEYS = INT32_MAX,
+}HwDesKeySize_t;
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+#define GET_HW_Q_DESC_WORD_IDX(descWordIdx) (CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD ## descWordIdx) )
+
+#define HW_DESC_INIT(pDesc) do { \
+ (pDesc)->word[0] = 0; \
+ (pDesc)->word[1] = 0; \
+ (pDesc)->word[2] = 0; \
+ (pDesc)->word[3] = 0; \
+ (pDesc)->word[4] = 0; \
+ (pDesc)->word[5] = 0; \
+} while (0)
+
+/* HW descriptor debug functions */
+int createDetailedDump(HwDesc_s *pDesc);
+void descriptor_log(HwDesc_s *desc);
+
+#if defined(HW_DESCRIPTOR_LOG) || defined(HW_DESC_DUMP_HOST_BUF)
+#define LOG_HW_DESC(pDesc) descriptor_log(pDesc)
+#else
+#define LOG_HW_DESC(pDesc)
+#endif
+
+#if (CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) || defined(OEMFW_LOG)
+
+#ifdef UART_PRINTF
+#define CREATE_DETAILED_DUMP(pDesc) createDetailedDump(pDesc)
+#else
+#define CREATE_DETAILED_DUMP(pDesc)
+#endif
+
+#define HW_DESC_DUMP(pDesc) do { \
+ CC_PAL_LOG_TRACE("\n---------------------------------------------------\n"); \
+ CREATE_DETAILED_DUMP(pDesc); \
+ CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[0]); \
+ CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[1]); \
+ CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[2]); \
+ CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[3]); \
+ CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[4]); \
+ CC_PAL_LOG_TRACE("0x%08X\n", (unsigned int)(pDesc)->word[5]); \
+ CC_PAL_LOG_TRACE("---------------------------------------------------\n\n"); \
+} while (0)
+
+#else
+#define HW_DESC_DUMP(pDesc) do {} while (0)
+#endif
+
+
+/*!
+ * This macro indicates the end of current HW descriptors flow and release the HW engines.
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_QUEUE_LAST_IND(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1); \
+ } while (0)
+
+/*!
+ * This macro signs the end of HW descriptors flow by asking for completion ack, and release the HW engines
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_ACK_LAST(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, ACK_NEEDED, (pDesc)->word[4], 1); \
+ } while (0)
+
+
+#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
+
+/*!
+ * This macro sets the DIN field of a HW descriptors
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * \param dinAdr DIN address
+ * \param dinSize Data size in bytes
+ * \param axiNs AXI secure bit
+ */
+#define HW_DESC_SET_DIN_TYPE(pDesc, dmaMode, dinAdr, dinSize, axiNs) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (dinAdr)&UINT32_MAX ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DIN_ADDR_HIGH, (pDesc)->word[5], MSB64(dinAdr) ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], (dmaMode)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NS_BIT, (pDesc)->word[1], (axiNs)); \
+ } while (0)
+
+
+/*!
+ * This macro sets the DIN field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
+ * other special modes
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param dinAdr DIN address
+ * \param dinSize Data size in bytes
+ */
+#define HW_DESC_SET_DIN_NO_DMA(pDesc, dinAdr, dinSize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \
+ } while (0)
+
+/*!
+ * This macro sets the DIN field of a HW descriptors to SRAM mode.
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param dinAdr DIN address
+ * \param dinSize Data size in bytes
+ */
+#define HW_DESC_SET_DIN_SRAM(pDesc, dinAdr, dinSize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \
+ } while (0)
+
+/*! This macro sets the DIN field of a HW descriptors to CONST mode
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param val DIN const value
+ * \param dinSize Data size in bytes
+ */
+#define HW_DESC_SET_DIN_CONST(pDesc, val, dinSize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(val)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_CONST_VALUE, (pDesc)->word[1], 1); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \
+ } while (0)
+
+/*!
+ * This macro sets the DIN not last input data indicator
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_DIN_NOT_LAST_INDICATION(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NOT_LAST, (pDesc)->word[1], 1); \
+ } while (0)
+
+/*!
+ * This macro sets the DOUT field of a HW descriptors
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
+ * \param doutAdr DOUT address
+ * \param doutSize Data size in bytes
+ * \param axiNs AXI secure bit
+ */
+#define HW_DESC_SET_DOUT_TYPE(pDesc, dmaMode, doutAdr, doutSize, axiNs) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], (dmaMode)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \
+ } while (0)
+
+/*!
+ * This macro sets the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param doutAdr DOUT address
+ * \param doutSize Data size in bytes
+ * \param lastInd The last indication bit
+ * \param axiNs AXI secure bit
+ */
+#define HW_DESC_SET_DOUT_DLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_DLLI); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \
+ } while (0)
+
+/*!
+ * This macro sets the DOUT field of a HW descriptors to DLLI type
+ * The LAST INDICATION is provided by the user
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param doutAdr DOUT address
+ * \param doutSize Data size in bytes
+ * \param lastInd The last indication bit
+ * \param axiNs AXI secure bit
+ */
+#define HW_DESC_SET_DOUT_MLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_MLLI); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \
+ } while (0)
+
+/*!
+ * This macro sets the DOUT field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and
+ * other special modes
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param doutAdr DOUT address
+ * \param doutSize Data size in bytes
+ * \param registerWriteEnable Enables a write operation to a register
+ */
+#define HW_DESC_SET_DOUT_NO_DMA(pDesc, doutAdr, doutSize, registerWriteEnable) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], (registerWriteEnable)); \
+ } while (0)
+
+/*!
+ * This macro sets the word for the XOR operation.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param xorVal xor data value
+ */
+#define HW_DESC_SET_XOR_VAL(pDesc, xorVal) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(xorVal)); \
+ } while (0)
+
+/*!
+ * This macro sets the XOR indicator bit in the descriptor
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_XOR_ACTIVE(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, HASH_XOR_BIT, (pDesc)->word[3], 1); \
+ } while (0)
+
+/*!
+ * This macro selects the AES engine instead of HASH engine when setting up combined mode with AES XCBC MAC
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_AES_NOT_HASH_MODE(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, AES_SEL_N_HASH, (pDesc)->word[4], 1); \
+ } while (0)
+
+/*!
+ * This macro sets the DOUT field of a HW descriptors to SRAM mode
+ * Note: No need to check SRAM alignment since host requests do not use SRAM and
+ * adaptor will enforce alignment check.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param doutAdr DOUT address
+ * \param doutSize Data size in bytes
+ */
+#define HW_DESC_SET_DOUT_SRAM(pDesc, doutAdr, doutSize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr)); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_SRAM); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \
+ } while (0)
+
+
+/*!
+ * This macro sets the data unit size for XEX mode in data_out_addr[15:0]
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param dataUnitSize data unit size for XEX mode
+ */
+#define HW_DESC_SET_XEX_DATA_UNIT_SIZE(pDesc, dataUnitSize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(dataUnitSize)); \
+ } while (0)
+
+/*!
+ * This macro sets the number of rounds for Multi2 in data_out_addr[15:0]
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param numRounds number of rounds for Multi2
+*/
+#define HW_DESC_SET_MULTI2_NUM_ROUNDS(pDesc, numRounds) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(numRounds)); \
+ } while (0)
+
+/*!
+ * This macro sets the flow mode.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param flowMode Any one of the modes defined in [CC7x-DESC]
+*/
+
+#define HW_DESC_SET_FLOW_MODE(pDesc, flowMode) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, DATA_FLOW_MODE, (pDesc)->word[4], (flowMode)); \
+ } while (0)
+
+/*!
+ * This macro sets the cipher mode.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param cipherMode Any one of the modes defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_CIPHER_MODE(pDesc, cipherMode) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_MODE, (pDesc)->word[4], (cipherMode)); \
+ } while (0)
+
+/*!
+ * This macro sets the cipher configuration fields.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param cipherConfig Any one of the modes defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_CIPHER_CONFIG0(pDesc, cipherConfig) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF0, (pDesc)->word[4], (cipherConfig)); \
+ } while (0)
+
+/*!
+ * This macro sets the cipher configuration fields.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param cipherConfig Any one of the modes defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_CIPHER_CONFIG1(pDesc, cipherConfig) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF1, (pDesc)->word[4], (cipherConfig)); \
+ } while (0)
+
+/*!
+ * This macro sets HW key configuration fields.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param hwKey The hw key number as in enun HwCryptoKey
+*/
+#define HW_DESC_SET_HW_CRYPTO_KEY(pDesc, hwKey) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (hwKey)&HW_KEY_MASK_CIPHER_DO); \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF2, (pDesc)->word[4], (hwKey>>HW_KEY_SHIFT_CIPHER_CFG2)); \
+ } while (0)
+
+/*!
+ * This macro changes the bytes order of all setup-finalize descriptosets.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param swapConfig Any one of the modes defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_BYTES_SWAP(pDesc, swapConfig) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, BYTES_SWAP, (pDesc)->word[4], (swapConfig)); \
+ } while (0)
+
+/*!
+ * This macro sets the CMAC_SIZE0 mode.
+ *
+ * \param pDesc pointer HW descriptor struct
+*/
+#define HW_DESC_SET_CMAC_SIZE0_MODE(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CMAC_SIZE0, (pDesc)->word[4], 0x1); \
+ } while (0)
+
+/*!
+ * This macro sets the key size for AES engine.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param keySize key size in bytes (NOT size code)
+*/
+#define HW_DESC_SET_KEY_SIZE_AES(pDesc, keySize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 2); \
+ } while (0)
+
+/*!
+ * This macro sets the key size for DES engine.
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param keySize key size in bytes (NOT size code)
+*/
+#define HW_DESC_SET_KEY_SIZE_DES(pDesc, keySize) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 1); \
+ } while (0)
+
+/*!
+ * This macro sets the descriptor's setup mode
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param setupMode Any one of the setup modes defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_SETUP_MODE(pDesc, setupMode) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, SETUP_OPERATION, (pDesc)->word[4], (setupMode)); \
+ } while (0)
+
+/*!
+ * This macro sets the descriptor's cipher do
+ *
+ * \param pDesc pointer HW descriptor struct
+ * \param cipherDo Any one of the cipher do defined in [CC7x-DESC]
+*/
+#define HW_DESC_SET_CIPHER_DO(pDesc, cipherDo) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (cipherDo)&HW_KEY_MASK_CIPHER_DO); \
+ } while (0)
+
+/*!
+ * This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor.
+ * Used for performance measurements and debug purposes.
+ *
+ * \param pDesc pointer HW descriptor struct
+ */
+#define HW_DESC_SET_DIN_MONITOR_CNTR(pDesc) \
+ do { \
+ CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR, VALUE, (pDesc)->word[1], _HW_DESC_MONITOR_KICK); \
+ } while (0)
+
+
+
+#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
new file mode 100644
index 000000000000..697f1ed181e0
--- /dev/null
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#ifndef _CC_LLI_DEFS_H_
+#define _CC_LLI_DEFS_H_
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+#include "cc_bitops.h"
+
+/* Max DLLI size */
+#define DLLI_SIZE_BIT_SIZE 0x18 // DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
+
+#define CC_MAX_MLLI_ENTRY_SIZE 0x10000
+
+#define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX)
+
+#define LLI_SET_ADDR(lli_p, addr) \
+ BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD0_OFFSET], LLI_LADDR_BIT_OFFSET, LLI_LADDR_BIT_SIZE, (addr & UINT32_MAX)); \
+ BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_HADDR_BIT_OFFSET, LLI_HADDR_BIT_SIZE, MSB64(addr));
+
+#define LLI_SET_SIZE(lli_p, size) \
+ BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_SIZE_BIT_OFFSET, LLI_SIZE_BIT_SIZE, size)
+
+/* Size of entry */
+#define LLI_ENTRY_WORD_SIZE 2
+#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(uint32_t))
+
+/* Word0[31:0] = ADDR[31:0] */
+#define LLI_WORD0_OFFSET 0
+#define LLI_LADDR_BIT_OFFSET 0
+#define LLI_LADDR_BIT_SIZE 32
+/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
+#define LLI_WORD1_OFFSET 1
+#define LLI_SIZE_BIT_OFFSET 0
+#define LLI_SIZE_BIT_SIZE 16
+#define LLI_HADDR_BIT_OFFSET 16
+#define LLI_HADDR_BIT_SIZE 16
+
+
+#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/staging/ccree/cc_pal_log.h b/drivers/staging/ccree/cc_pal_log.h
new file mode 100644
index 000000000000..e5f5a8737833
--- /dev/null
+++ b/drivers/staging/ccree/cc_pal_log.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CC_PAL_LOG_H_
+#define _CC_PAL_LOG_H_
+
+#include "cc_pal_types.h"
+#include "cc_pal_log_plat.h"
+
+/*!
+@file
+@brief This file contains the PAL layer log definitions, by default the log is disabled.
+@defgroup cc_pal_log CryptoCell PAL logging APIs and definitions
+@{
+@ingroup cc_pal
+*/
+
+/* PAL log levels (to be used in CC_PAL_logLevel) */
+/*! PAL log level - disabled. */
+#define CC_PAL_LOG_LEVEL_NULL (-1) /*!< \internal Disable logging */
+/*! PAL log level - error. */
+#define CC_PAL_LOG_LEVEL_ERR 0
+/*! PAL log level - warning. */
+#define CC_PAL_LOG_LEVEL_WARN 1
+/*! PAL log level - info. */
+#define CC_PAL_LOG_LEVEL_INFO 2
+/*! PAL log level - debug. */
+#define CC_PAL_LOG_LEVEL_DEBUG 3
+/*! PAL log level - trace. */
+#define CC_PAL_LOG_LEVEL_TRACE 4
+/*! PAL log level - data. */
+#define CC_PAL_LOG_LEVEL_DATA 5
+
+#ifndef CC_PAL_LOG_CUR_COMPONENT
+/* Setting default component mask in case caller did not define */
+/* (a mask that is always on for every log mask value but full masking) */
+/*! Default log debugged component.*/
+#define CC_PAL_LOG_CUR_COMPONENT 0xFFFFFFFF
+#endif
+#ifndef CC_PAL_LOG_CUR_COMPONENT_NAME
+/*! Default log debugged component.*/
+#define CC_PAL_LOG_CUR_COMPONENT_NAME "CC"
+#endif
+
+/* Select compile time log level (default if not explicitly specified by caller) */
+#ifndef CC_PAL_MAX_LOG_LEVEL /* Can be overriden by external definition of this constant */
+#ifdef DEBUG
+/*! Default debug log level (when debug is set to on).*/
+#define CC_PAL_MAX_LOG_LEVEL CC_PAL_LOG_LEVEL_ERR /*CC_PAL_LOG_LEVEL_DEBUG*/
+#else /* Disable logging */
+/*! Default debug log level (when debug is set to on).*/
+#define CC_PAL_MAX_LOG_LEVEL CC_PAL_LOG_LEVEL_NULL
+#endif
+#endif /*CC_PAL_MAX_LOG_LEVEL*/
+/*! Evaluate CC_PAL_MAX_LOG_LEVEL in case provided by caller */
+#define __CC_PAL_LOG_LEVEL_EVAL(level) level
+/*! Maximal log level defintion.*/
+#define _CC_PAL_MAX_LOG_LEVEL __CC_PAL_LOG_LEVEL_EVAL(CC_PAL_MAX_LOG_LEVEL)
+
+
+#ifdef ARM_DSM
+/*! Log init function. */
+#define CC_PalLogInit() do {} while (0)
+/*! Log set level function - sets the level of logging in case of debug. */
+#define CC_PalLogLevelSet(setLevel) do {} while (0)
+/*! Log set mask function - sets the component masking in case of debug. */
+#define CC_PalLogMaskSet(setMask) do {} while (0)
+#else
+#if _CC_PAL_MAX_LOG_LEVEL > CC_PAL_LOG_LEVEL_NULL
+/*! Log init function. */
+void CC_PalLogInit(void);
+/*! Log set level function - sets the level of logging in case of debug. */
+void CC_PalLogLevelSet(int setLevel);
+/*! Log set mask function - sets the component masking in case of debug. */
+void CC_PalLogMaskSet(uint32_t setMask);
+/*! Global variable for log level */
+extern int CC_PAL_logLevel;
+/*! Global variable for log mask */
+extern uint32_t CC_PAL_logMask;
+#else /* No log */
+/*! Log init function. */
+static inline void CC_PalLogInit(void) {}
+/*! Log set level function - sets the level of logging in case of debug. */
+static inline void CC_PalLogLevelSet(int setLevel) {CC_UNUSED_PARAM(setLevel);}
+/*! Log set mask function - sets the component masking in case of debug. */
+static inline void CC_PalLogMaskSet(uint32_t setMask) {CC_UNUSED_PARAM(setMask);}
+#endif
+#endif
+
+/*! Filter logging based on logMask and dispatch to platform specific logging mechanism. */
+#define _CC_PAL_LOG(level, format, ...) \
+ if (CC_PAL_logMask & CC_PAL_LOG_CUR_COMPONENT) \
+ __CC_PAL_LOG_PLAT(CC_PAL_LOG_LEVEL_ ## level, "%s:%s: " format, CC_PAL_LOG_CUR_COMPONENT_NAME, __func__, ##__VA_ARGS__)
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_ERR)
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_ERR(format, ... ) \
+ _CC_PAL_LOG(ERR, format, ##__VA_ARGS__)
+#else
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_ERR( ... ) do {} while (0)
+#endif
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_WARN)
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_WARN(format, ... ) \
+ if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_WARN) \
+ _CC_PAL_LOG(WARN, format, ##__VA_ARGS__)
+#else
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_WARN( ... ) do {} while (0)
+#endif
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_INFO)
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_INFO(format, ... ) \
+ if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_INFO) \
+ _CC_PAL_LOG(INFO, format, ##__VA_ARGS__)
+#else
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_INFO( ... ) do {} while (0)
+#endif
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_DEBUG)
+/*! Log messages according to log level.*/
+#define CC_PAL_LOG_DEBUG(format, ... ) \
+ if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_DEBUG) \
+ _CC_PAL_LOG(DEBUG, format, ##__VA_ARGS__)
+
+/*! Log message buffer.*/
+#define CC_PAL_LOG_DUMP_BUF(msg, buf, size) \
+ do { \
+ int i; \
+ uint8_t *pData = (uint8_t*)buf; \
+ \
+ PRINTF("%s (%d):\n", msg, size); \
+ for (i = 0; i < size; i++) { \
+ PRINTF("0x%02X ", pData[i]); \
+ if ((i & 0xF) == 0xF) { \
+ PRINTF("\n"); \
+ } \
+ } \
+ PRINTF("\n"); \
+ } while (0)
+#else
+/*! Log debug messages.*/
+#define CC_PAL_LOG_DEBUG( ... ) do {} while (0)
+/*! Log debug buffer.*/
+#define CC_PAL_LOG_DUMP_BUF(msg, buf, size) do {} while (0)
+#endif
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE)
+/*! Log debug trace.*/
+#define CC_PAL_LOG_TRACE(format, ... ) \
+ if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \
+ _CC_PAL_LOG(TRACE, format, ##__VA_ARGS__)
+#else
+/*! Log debug trace.*/
+#define CC_PAL_LOG_TRACE(...) do {} while (0)
+#endif
+
+#if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE)
+/*! Log debug data.*/
+#define CC_PAL_LOG_DATA(format, ...) \
+ if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \
+ _CC_PAL_LOG(DATA, format, ##__VA_ARGS__)
+#else
+/*! Log debug data.*/
+#define CC_PAL_LOG_DATA( ...) do {} while (0)
+#endif
+/**
+@}
+ */
+
+#endif /*_CC_PAL_LOG_H_*/
diff --git a/drivers/staging/ccree/cc_pal_log_plat.h b/drivers/staging/ccree/cc_pal_log_plat.h
new file mode 100644
index 000000000000..a05a200cf6eb
--- /dev/null
+++ b/drivers/staging/ccree/cc_pal_log_plat.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Dummy pal_log_plat for test driver in kernel */
+
+#ifndef _SSI_PAL_LOG_PLAT_H_
+#define _SSI_PAL_LOG_PLAT_H_
+
+#if defined(DEBUG)
+
+#define __CC_PAL_LOG_PLAT(level, format, ...) printk(level "cc7x_test::" format , ##__VA_ARGS__)
+
+#else /* Disable all prints */
+
+#define __CC_PAL_LOG_PLAT(...) do {} while (0)
+
+#endif
+
+#endif /*_SASI_PAL_LOG_PLAT_H_*/
+
diff --git a/drivers/staging/ccree/cc_pal_types.h b/drivers/staging/ccree/cc_pal_types.h
new file mode 100644
index 000000000000..9b59bbb34515
--- /dev/null
+++ b/drivers/staging/ccree/cc_pal_types.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CC_PAL_TYPES_H
+#define CC_PAL_TYPES_H
+
+/*!
+@file
+@brief This file contains platform-dependent definitions and types.
+@defgroup cc_pal_types CryptoCell PAL platform dependant types
+@{
+@ingroup cc_pal
+
+*/
+
+#include "cc_pal_types_plat.h"
+
+/*! Boolean definition.*/
+typedef enum {
+ /*! Boolean false definition.*/
+ CC_FALSE = 0,
+ /*! Boolean true definition.*/
+ CC_TRUE = 1
+} CCBool;
+
+/*! Success definition. */
+#define CC_SUCCESS 0UL
+/*! Failure definition. */
+#define CC_FAIL 1UL
+
+/*! Defintion of 1KB in bytes. */
+#define CC_1K_SIZE_IN_BYTES 1024
+/*! Defintion of number of bits in a byte. */
+#define CC_BITS_IN_BYTE 8
+/*! Defintion of number of bits in a 32bits word. */
+#define CC_BITS_IN_32BIT_WORD 32
+/*! Defintion of number of bytes in a 32bits word. */
+#define CC_32BIT_WORD_SIZE (sizeof(uint32_t))
+
+/*! Success (OK) defintion. */
+#define CC_OK 0
+
+/*! Macro that handles unused parameters in the code (to avoid compilation warnings). */
+#define CC_UNUSED_PARAM(prm) ((void)prm)
+
+/*! Maximal uint32 value.*/
+#define CC_MAX_UINT32_VAL (0xFFFFFFFF)
+
+
+/* Minimum and Maximum macros */
+#ifdef min
+/*! Definition for minimum. */
+#define CC_MIN(a,b) min( a , b )
+#else
+/*! Definition for minimum. */
+#define CC_MIN( a , b ) ( ( (a) < (b) ) ? (a) : (b) )
+#endif
+
+#ifdef max
+/*! Definition for maximum. */
+#define CC_MAX(a,b) max( a , b )
+#else
+/*! Definition for maximum. */
+#define CC_MAX( a , b ) ( ( (a) > (b) ) ? (a) : (b) )
+#endif
+
+/*! Macro that calculates number of full bytes from bits (i.e. 7 bits are 1 byte). */
+#define CALC_FULL_BYTES(numBits) ((numBits)/CC_BITS_IN_BYTE + (((numBits) & (CC_BITS_IN_BYTE-1)) > 0))
+/*! Macro that calculates number of full 32bits words from bits (i.e. 31 bits are 1 word). */
+#define CALC_FULL_32BIT_WORDS(numBits) ((numBits)/CC_BITS_IN_32BIT_WORD + (((numBits) & (CC_BITS_IN_32BIT_WORD-1)) > 0))
+/*! Macro that calculates number of full 32bits words from bytes (i.e. 3 bytes are 1 word). */
+#define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) ((sizeBytes)/CC_32BIT_WORD_SIZE + (((sizeBytes) & (CC_32BIT_WORD_SIZE-1)) > 0))
+/*! Macro that round up bits to 32bits words. */
+#define ROUNDUP_BITS_TO_32BIT_WORD(numBits) (CALC_FULL_32BIT_WORDS(numBits) * CC_BITS_IN_32BIT_WORD)
+/*! Macro that round up bits to bytes. */
+#define ROUNDUP_BITS_TO_BYTES(numBits) (CALC_FULL_BYTES(numBits) * CC_BITS_IN_BYTE)
+/*! Macro that round up bytes to 32bits words. */
+#define ROUNDUP_BYTES_TO_32BIT_WORD(sizeBytes) (CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) * CC_32BIT_WORD_SIZE)
+
+
+/**
+@}
+ */
+#endif
diff --git a/drivers/staging/ccree/cc_pal_types_plat.h b/drivers/staging/ccree/cc_pal_types_plat.h
new file mode 100644
index 000000000000..6e4211262231
--- /dev/null
+++ b/drivers/staging/ccree/cc_pal_types_plat.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#ifndef SSI_PAL_TYPES_PLAT_H
+#define SSI_PAL_TYPES_PLAT_H
+/* Linux kernel types */
+
+#include <linux/types.h>
+
+#ifndef NULL /* Missing in Linux kernel */
+#define NULL (0x0L)
+#endif
+
+
+#endif /*SSI_PAL_TYPES_PLAT_H*/
diff --git a/drivers/staging/ccree/cc_regs.h b/drivers/staging/ccree/cc_regs.h
new file mode 100644
index 000000000000..963f8148cd28
--- /dev/null
+++ b/drivers/staging/ccree/cc_regs.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/*!
+ * @file
+ * @brief This file contains macro definitions for accessing ARM TrustZone CryptoCell register space.
+ */
+
+#ifndef _CC_REGS_H_
+#define _CC_REGS_H_
+
+#include "cc_bitops.h"
+
+/* Register Offset macro */
+#define CC_REG_OFFSET(unit_name, reg_name) \
+ (DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
+
+#define CC_REG_BIT_SHIFT(reg_name, field_name) \
+ (DX_ ## reg_name ## _ ## field_name ## _BIT_SHIFT)
+
+/* Register Offset macros (from registers base address in host) */
+#include "dx_reg_base_host.h"
+
+/* Read-Modify-Write a field of a register */
+#define MODIFY_REGISTER_FLD(unitName, regName, fldName, fldVal) \
+do { \
+ uint32_t regVal; \
+ regVal = READ_REGISTER(CC_REG_ADDR(unitName, regName)); \
+ CC_REG_FLD_SET(unitName, regName, fldName, regVal, fldVal); \
+ WRITE_REGISTER(CC_REG_ADDR(unitName, regName), regVal); \
+} while (0)
+
+/* Registers address macros for ENV registers (development FPGA only) */
+#ifdef DX_BASE_ENV_REGS
+
+/* This offset should be added to mapping address of DX_BASE_ENV_REGS */
+#define CC_ENV_REG_OFFSET(reg_name) (DX_ENV_ ## reg_name ## _REG_OFFSET)
+
+#endif /*DX_BASE_ENV_REGS*/
+
+/*! Bit fields get */
+#define CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val) \
+ (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ? \
+ reg_val /*!< \internal Optimization for 32b fields */ : \
+ BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+ DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
+
+/*! Bit fields access */
+#define CC_REG_FLD_GET2(unit_name, reg_name, fld_name, reg_val) \
+ (CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ? \
+ reg_val /*!< \internal Optimization for 32b fields */ : \
+ BITFIELD_GET(reg_val, CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+ CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE))
+
+/* yael TBD !!! - *
+* all HW includes should start with CC_ and not DX_ !! */
+
+
+/*! Bit fields set */
+#define CC_REG_FLD_SET( \
+ unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val) \
+do { \
+ if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20) \
+ reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\
+ else \
+ BITFIELD_SET(reg_shadow_var, \
+ DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+ DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE, \
+ new_fld_val); \
+} while (0)
+
+/*! Bit fields set */
+#define CC_REG_FLD_SET2( \
+ unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val) \
+do { \
+ if (CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20) \
+ reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\
+ else \
+ BITFIELD_SET(reg_shadow_var, \
+ CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \
+ CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE, \
+ new_fld_val); \
+} while (0)
+
+/* Usage example:
+ uint32_t reg_shadow = READ_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL));
+ CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3);
+ CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1);
+ WRITE_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow);
+ */
+
+#endif /*_CC_REGS_H_*/
diff --git a/drivers/staging/ccree/dx_crys_kernel.h b/drivers/staging/ccree/dx_crys_kernel.h
new file mode 100644
index 000000000000..703469c4a828
--- /dev/null
+++ b/drivers/staging/ccree/dx_crys_kernel.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DX_CRYS_KERNEL_H__
+#define __DX_CRYS_KERNEL_H__
+
+// --------------------------------------
+// BLOCK: DSCRPTR
+// --------------------------------------
+#define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
+#define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
+#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
+#define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
+#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
+#define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
+#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
+#define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
+#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
+#define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
+#define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
+#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
+#define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
+#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
+#define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
+#define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
+#define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
+#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
+#define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
+#define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
+#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
+#define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
+#define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
+#define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
+#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
+#define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
+#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
+#define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
+#define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
+#define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
+#define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
+#define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
+#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
+#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
+#define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
+#define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
+#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
+#define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
+// --------------------------------------
+// BLOCK: AXI_P
+// --------------------------------------
+#define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
+#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
+#define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
+#define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
+#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
+#define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
+#define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL
+#define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
+#define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
+#define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
+#define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
+#define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
+#define DX_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
+#define DX_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
+#define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
+#define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
+#define DX_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
+#define DX_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
+#define DX_AXIM_CFG_REG_OFFSET 0xBE8UL
+#define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
+#define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
+#define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
+#define DX_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
+#define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
+#define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
+#define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
+#define DX_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
+#define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
+#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
+#define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
+#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
+#define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
+#define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
+#define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
+#define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
+#define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
+#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
+#define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
+#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
+#define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
+#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
+#define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
+#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
+#define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
+#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
+#define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
+#define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
+#define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
+#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
+#define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
+#endif // __DX_CRYS_KERNEL_H__
diff --git a/drivers/staging/ccree/dx_env.h b/drivers/staging/ccree/dx_env.h
new file mode 100644
index 000000000000..08040604b6da
--- /dev/null
+++ b/drivers/staging/ccree/dx_env.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DX_ENV_H__
+#define __DX_ENV_H__
+
+// --------------------------------------
+// BLOCK: FPGA_ENV_REGS
+// --------------------------------------
+#define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET 0x024UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_SCAN_MODE_REG_OFFSET 0x030UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET 0x034UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_HOST_INT_REG_OFFSET 0x0A0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_PUB_HOST_INT_REG_OFFSET 0x0A4UL
+#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_RST_N_REG_OFFSET 0x0A8UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_RST_N_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_RST_OVERRIDE_REG_OFFSET 0x0ACUL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_POR_N_ADDR_REG_OFFSET 0x0E0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_COLD_RST_REG_OFFSET 0x0FCUL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_DUMMY_ADDR_REG_OFFSET 0x108UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_COUNTER_CLR_REG_OFFSET 0x118UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_COUNTER_RD_REG_OFFSET 0x11CUL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET 0x430UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_LCS_REG_OFFSET 0x43CUL
+#define DX_ENV_CC_LCS_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_LCS_VALUE_BIT_SIZE 0x8UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET 0x440UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE 0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT 0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE 0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT 0x2UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT 0x3UL
+#define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE 0x1UL
+#define DX_ENV_DCU_EN_REG_OFFSET 0x444UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_DCU_EN_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET 0x448UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_POWER_DOWN_REG_OFFSET 0x478UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_DCU_H_EN_REG_OFFSET 0x484UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_VERSION_REG_OFFSET 0x488UL
+#define DX_ENV_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_VERSION_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_ROSC_WRITE_REG_OFFSET 0x48CUL
+#define DX_ENV_ROSC_WRITE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_ROSC_WRITE_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_ROSC_ADDR_REG_OFFSET 0x490UL
+#define DX_ENV_ROSC_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_ROSC_ADDR_VALUE_BIT_SIZE 0x8UL
+#define DX_ENV_RESET_SESSION_KEY_REG_OFFSET 0x494UL
+#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_SESSION_KEY_0_REG_OFFSET 0x4A0UL
+#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SESSION_KEY_0_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_SESSION_KEY_1_REG_OFFSET 0x4A4UL
+#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SESSION_KEY_1_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_SESSION_KEY_2_REG_OFFSET 0x4A8UL
+#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SESSION_KEY_2_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_SESSION_KEY_3_REG_OFFSET 0x4ACUL
+#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SESSION_KEY_3_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_SESSION_KEY_VALID_REG_OFFSET 0x4B0UL
+#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_SPIDEN_REG_OFFSET 0x4D0UL
+#define DX_ENV_SPIDEN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_SPIDEN_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_AXIM_USER_PARAMS_REG_OFFSET 0x600UL
+#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SHIFT 0x0UL
+#define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SIZE 0x5UL
+#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SHIFT 0x5UL
+#define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SIZE 0x5UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_REG_OFFSET 0x604UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SHIFT 0x0UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SIZE 0x1UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SHIFT 0x1UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SIZE 0x1UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SHIFT 0x2UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SIZE 0x1UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SHIFT 0x3UL
+#define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SIZE 0x1UL
+#define DX_ENV_AO_CC_KPLT_0_REG_OFFSET 0x620UL
+#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KPLT_1_REG_OFFSET 0x624UL
+#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KPLT_2_REG_OFFSET 0x628UL
+#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KPLT_3_REG_OFFSET 0x62CUL
+#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KCST_0_REG_OFFSET 0x630UL
+#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KCST_1_REG_OFFSET 0x634UL
+#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KCST_2_REG_OFFSET 0x638UL
+#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_AO_CC_KCST_3_REG_OFFSET 0x63CUL
+#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APB_FIPS_ADDR_REG_OFFSET 0x650UL
+#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL
+#define DX_ENV_APB_FIPS_VAL_REG_OFFSET 0x654UL
+#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APB_FIPS_MASK_REG_OFFSET 0x658UL
+#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APB_FIPS_CNT_REG_OFFSET 0x65CUL
+#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APB_FIPS_NEW_ADDR_REG_OFFSET 0x660UL
+#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL
+#define DX_ENV_APB_FIPS_NEW_VAL_REG_OFFSET 0x664UL
+#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APBP_FIPS_ADDR_REG_OFFSET 0x670UL
+#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL
+#define DX_ENV_APBP_FIPS_VAL_REG_OFFSET 0x674UL
+#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APBP_FIPS_MASK_REG_OFFSET 0x678UL
+#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APBP_FIPS_CNT_REG_OFFSET 0x67CUL
+#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_APBP_FIPS_NEW_ADDR_REG_OFFSET 0x680UL
+#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL
+#define DX_ENV_APBP_FIPS_NEW_VAL_REG_OFFSET 0x684UL
+#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_CC_POWERDOWN_EN_REG_OFFSET 0x690UL
+#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_CC_POWERDOWN_RST_EN_REG_OFFSET 0x694UL
+#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_POWERDOWN_RST_CNTR_REG_OFFSET 0x698UL
+#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SIZE 0x20UL
+#define DX_ENV_POWERDOWN_EN_DEBUG_REG_OFFSET 0x69CUL
+#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: ENV_CC_MEMORIES
+// --------------------------------------
+#define DX_ENV_FUSE_READY_REG_OFFSET 0x000UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_FUSE_READY_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_PERF_RAM_MASTER_REG_OFFSET 0x0ECUL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE 0x1UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET 0x0F0UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE 0x2UL
+#define DX_ENV_FUSES_RAM_REG_OFFSET 0x3ECUL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE 0x20UL
+// --------------------------------------
+// BLOCK: ENV_PERF_RAM_BASE
+// --------------------------------------
+#define DX_ENV_PERF_RAM_BASE_REG_OFFSET 0x000UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT 0x0UL
+#define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE 0x20UL
+
+#endif /*__DX_ENV_H__*/
diff --git a/drivers/staging/ccree/dx_host.h b/drivers/staging/ccree/dx_host.h
new file mode 100644
index 000000000000..4e42e748dc5f
--- /dev/null
+++ b/drivers/staging/ccree/dx_host.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DX_HOST_H__
+#define __DX_HOST_H__
+
+// --------------------------------------
+// BLOCK: HOST_P
+// --------------------------------------
+#define DX_HOST_IRR_REG_OFFSET 0xA00UL
+#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
+#define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
+#define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define DX_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
+#define DX_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
+#define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
+#define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_REG_OFFSET 0xA04UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
+#define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
+#define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
+#define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
+#define DX_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
+#define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
+#define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+#define DX_HOST_ICR_REG_OFFSET 0xA08UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
+#define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
+#define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
+#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
+#define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
+#define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
+#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
+#define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL
+#define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
+#define DX_HOST_BOOT_REG_OFFSET 0xA28UL
+#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
+#define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
+#define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
+#define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
+#define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
+#define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
+#define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
+#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
+#define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
+#define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
+#define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
+#define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
+#define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
+#define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
+#define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
+#define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
+#define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
+#define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
+#define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
+#define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
+#define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
+#define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
+#define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
+#define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
+#define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
+#define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
+#define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
+#define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
+#define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
+#define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
+#define DX_HOST_VERSION_REG_OFFSET 0xA40UL
+#define DX_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
+#define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
+#define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
+#define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
+#define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
+#define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
+#define DX_HOST_GPR0_REG_OFFSET 0xA70UL
+#define DX_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
+#define DX_GPR_HOST_REG_OFFSET 0xA74UL
+#define DX_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
+#define DX_GPR_HOST_VALUE_BIT_SIZE 0x20UL
+#define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
+#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
+#define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
+// --------------------------------------
+// BLOCK: HOST_SRAM
+// --------------------------------------
+#define DX_SRAM_DATA_REG_OFFSET 0xF00UL
+#define DX_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
+#define DX_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
+#define DX_SRAM_ADDR_REG_OFFSET 0xF04UL
+#define DX_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
+#define DX_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
+#define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
+#define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
+
+#endif //__DX_HOST_H__
diff --git a/drivers/staging/ccree/dx_reg_base_host.h b/drivers/staging/ccree/dx_reg_base_host.h
new file mode 100644
index 000000000000..58dafe05fbeb
--- /dev/null
+++ b/drivers/staging/ccree/dx_reg_base_host.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DX_REG_BASE_HOST_H__
+#define __DX_REG_BASE_HOST_H__
+
+/* Identify platform: Xilinx Zynq7000 ZC706 */
+#define DX_PLAT_ZYNQ7000 1
+#define DX_PLAT_ZYNQ7000_ZC706 1
+
+#define DX_BASE_CC 0x80000000
+
+#define DX_BASE_ENV_REGS 0x40008000
+#define DX_BASE_ENV_CC_MEMORIES 0x40008000
+#define DX_BASE_ENV_PERF_RAM 0x40009000
+
+#define DX_BASE_HOST_RGF 0x0UL
+#define DX_BASE_CRY_KERNEL 0x0UL
+#define DX_BASE_ROM 0x40000000
+
+#endif /*__DX_REG_BASE_HOST_H__*/
diff --git a/drivers/staging/ccree/dx_reg_common.h b/drivers/staging/ccree/dx_reg_common.h
new file mode 100644
index 000000000000..4ffed386521c
--- /dev/null
+++ b/drivers/staging/ccree/dx_reg_common.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DX_REG_COMMON_H__
+#define __DX_REG_COMMON_H__
+
+#define DX_DEV_SIGNATURE 0xDCC71200UL
+
+#define CC_HW_VERSION 0xef840015UL
+
+#define DX_DEV_SHA_MAX 512
+
+#endif /*__DX_REG_COMMON_H__*/
diff --git a/drivers/staging/ccree/hash_defs.h b/drivers/staging/ccree/hash_defs.h
new file mode 100644
index 000000000000..5ab0861fd1bb
--- /dev/null
+++ b/drivers/staging/ccree/hash_defs.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HASH_DEFS_H__
+#define _HASH_DEFS_H__
+
+#include "cc_crypto_ctx.h"
+
+/* this files provides definitions required for hash engine drivers */
+#ifndef CC_CONFIG_HASH_SHA_512_SUPPORTED
+#define SEP_HASH_LENGTH_WORDS 2
+#else
+#define SEP_HASH_LENGTH_WORDS 4
+#endif
+
+#ifdef BIG__ENDIAN
+#define OPAD_CURRENT_LENGTH 0x40000000, 0x00000000 , 0x00000000, 0x00000000
+#define HASH_LARVAL_MD5 0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567
+#define HASH_LARVAL_SHA1 0xF0E1D2C3, 0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567
+#define HASH_LARVAL_SHA224 0XA44FFABE, 0XA78FF964, 0X11155868, 0X310BC0FF, 0X39590EF7, 0X17DD7030, 0X07D57C36, 0XD89E05C1
+#define HASH_LARVAL_SHA256 0X19CDE05B, 0XABD9831F, 0X8C68059B, 0X7F520E51, 0X3AF54FA5, 0X72F36E3C, 0X85AE67BB, 0X67E6096A
+#define HASH_LARVAL_SHA384 0X1D48B547, 0XA44FFABE, 0X0D2E0CDB, 0XA78FF964, 0X874AB48E, 0X11155868, 0X67263367, 0X310BC0FF, 0XD8EC2F15, 0X39590EF7, 0X5A015991, 0X17DD7030, 0X2A299A62, 0X07D57C36, 0X5D9DBBCB, 0XD89E05C1
+#define HASH_LARVAL_SHA512 0X19CDE05B, 0X79217E13, 0XABD9831F, 0X6BBD41FB, 0X8C68059B, 0X1F6C3E2B, 0X7F520E51, 0XD182E6AD, 0X3AF54FA5, 0XF1361D5F, 0X72F36E3C, 0X2BF894FE, 0X85AE67BB, 0X3BA7CA84, 0X67E6096A, 0X08C9BCF3
+#else
+#define OPAD_CURRENT_LENGTH 0x00000040, 0x00000000, 0x00000000, 0x00000000
+#define HASH_LARVAL_MD5 0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301
+#define HASH_LARVAL_SHA1 0xC3D2E1F0, 0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301
+#define HASH_LARVAL_SHA224 0xbefa4fa4, 0x64f98fa7, 0x68581511, 0xffc00b31, 0xf70e5939, 0x3070dd17, 0x367cd507, 0xc1059ed8
+#define HASH_LARVAL_SHA256 0x5be0cd19, 0x1f83d9ab, 0x9b05688c, 0x510e527f, 0xa54ff53a, 0x3c6ef372, 0xbb67ae85, 0x6a09e667
+#define HASH_LARVAL_SHA384 0X47B5481D, 0XBEFA4FA4, 0XDB0C2E0D, 0X64F98FA7, 0X8EB44A87, 0X68581511, 0X67332667, 0XFFC00B31, 0X152FECD8, 0XF70E5939, 0X9159015A, 0X3070DD17, 0X629A292A, 0X367CD507, 0XCBBB9D5D, 0XC1059ED8
+#define HASH_LARVAL_SHA512 0x5be0cd19, 0x137e2179, 0x1f83d9ab, 0xfb41bd6b, 0x9b05688c, 0x2b3e6c1f, 0x510e527f, 0xade682d1, 0xa54ff53a, 0x5f1d36f1, 0x3c6ef372, 0xfe94f82b, 0xbb67ae85, 0x84caa73b, 0x6a09e667, 0xf3bcc908
+#endif
+
+enum HashConfig1Padding {
+ HASH_PADDING_DISABLED = 0,
+ HASH_PADDING_ENABLED = 1,
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
+ HASH_CONFIG1_PADDING_RESERVE32 = INT32_MAX,
+};
+
+enum HashCipherDoPadding {
+ DO_NOT_PAD = 0,
+ DO_PAD = 1,
+ HASH_CIPHER_DO_PADDING_RESERVE32 = INT32_MAX,
+};
+
+typedef struct SepHashPrivateContext {
+ /* The current length is placed at the end of the context buffer because the hash
+ context is used for all HMAC operations as well. HMAC context includes a 64 bytes
+ K0 field. The size of struct drv_ctx_hash reserved field is 88/184 bytes depend if t
+ he SHA512 is supported ( in this case teh context size is 256 bytes).
+ The size of struct drv_ctx_hash reseved field is 20 or 52 depend if the SHA512 is supported.
+ This means that this structure size (without the reserved field can be up to 20 bytes ,
+ in case sha512 is not suppported it is 20 bytes (SEP_HASH_LENGTH_WORDS define to 2 ) and in the other
+ case it is 28 (SEP_HASH_LENGTH_WORDS define to 4) */
+ uint32_t reserved[(sizeof(struct drv_ctx_hash)/sizeof(uint32_t)) - SEP_HASH_LENGTH_WORDS - 3];
+ uint32_t CurrentDigestedLength[SEP_HASH_LENGTH_WORDS];
+ uint32_t KeyType;
+ uint32_t dataCompleted;
+ uint32_t hmacFinalization;
+ /* no space left */
+} SepHashPrivateContext_s;
+
+#endif /*_HASH_DEFS_H__*/
+
diff --git a/drivers/staging/ccree/hw_queue_defs_plat.h b/drivers/staging/ccree/hw_queue_defs_plat.h
new file mode 100644
index 000000000000..aee02cc7588a
--- /dev/null
+++ b/drivers/staging/ccree/hw_queue_defs_plat.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HW_QUEUE_DEFS_PLAT_H__
+#define __HW_QUEUE_DEFS_PLAT_H__
+
+
+/*****************************/
+/* Descriptor packing macros */
+/*****************************/
+
+#define HW_QUEUE_FREE_SLOTS_GET() (CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT)) & HW_QUEUE_SLOTS_MAX)
+
+#define HW_QUEUE_POLL_QUEUE_UNTIL_FREE_SLOTS(seqLen) \
+ do { \
+ } while (HW_QUEUE_FREE_SLOTS_GET() < (seqLen))
+
+#define HW_DESC_PUSH_TO_QUEUE(pDesc) do { \
+ LOG_HW_DESC(pDesc); \
+ HW_DESC_DUMP(pDesc); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(0), (pDesc)->word[0]); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(1), (pDesc)->word[1]); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(2), (pDesc)->word[2]); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(3), (pDesc)->word[3]); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(4), (pDesc)->word[4]); \
+ wmb(); \
+ CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(5), (pDesc)->word[5]); \
+} while (0)
+
+#endif /*__HW_QUEUE_DEFS_PLAT_H__*/
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
new file mode 100644
index 000000000000..038291773b59
--- /dev/null
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -0,0 +1,2832 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/sha.h>
+#include <crypto/ctr.h>
+#include <crypto/authenc.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <linux/rtnetlink.h>
+#include <linux/version.h>
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_aead.h"
+#include "ssi_request_mgr.h"
+#include "ssi_hash.h"
+#include "ssi_sysfs.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_fips_local.h"
+
+#define template_aead template_u.aead
+
+#define MAX_AEAD_SETKEY_SEQ 12
+#define MAX_AEAD_PROCESS_SEQ 23
+
+#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
+#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
+
+#define AES_CCM_RFC4309_NONCE_SIZE 3
+#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
+
+
+/* Value of each ICV_CMP byte (of 8) in case of success */
+#define ICV_VERIF_OK 0x01
+
+struct ssi_aead_handle {
+ ssi_sram_addr_t sram_workspace_addr;
+ struct list_head aead_list;
+};
+
+struct ssi_aead_ctx {
+ struct ssi_drvdata *drvdata;
+ uint8_t ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
+ uint8_t *enckey;
+ dma_addr_t enckey_dma_addr;
+ union {
+ struct {
+ uint8_t *padded_authkey;
+ uint8_t *ipad_opad; /* IPAD, OPAD*/
+ dma_addr_t padded_authkey_dma_addr;
+ dma_addr_t ipad_opad_dma_addr;
+ } hmac;
+ struct {
+ uint8_t *xcbc_keys; /* K1,K2,K3 */
+ dma_addr_t xcbc_keys_dma_addr;
+ } xcbc;
+ } auth_state;
+ unsigned int enc_keylen;
+ unsigned int auth_keylen;
+ unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ enum drv_cipher_mode cipher_mode;
+ enum FlowMode flow_mode;
+ enum drv_hash_mode auth_mode;
+};
+
+static inline bool valid_assoclen(struct aead_request *req)
+{
+ return ((req->assoclen == 16) || (req->assoclen == 20));
+}
+
+static void ssi_aead_exit(struct crypto_aead *tfm)
+{
+ struct device *dev = NULL;
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ SSI_LOG_DEBUG("Clearing context @%p for %s\n",
+ crypto_aead_ctx(tfm), crypto_tfm_alg_name(&(tfm->base)));
+
+ dev = &ctx->drvdata->plat_dev->dev;
+ /* Unmap enckey buffer */
+ if (ctx->enckey != NULL) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr);
+ dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
+ SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->enckey_dma_addr);
+ ctx->enckey_dma_addr = 0;
+ ctx->enckey = NULL;
+ }
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ if (ctx->auth_state.xcbc.xcbc_keys != NULL) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
+ ctx->auth_state.xcbc.xcbc_keys,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ }
+ SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->auth_state.xcbc.xcbc_keys_dma_addr);
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr = 0;
+ ctx->auth_state.xcbc.xcbc_keys = NULL;
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
+ if (ctx->auth_state.hmac.ipad_opad != NULL) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.hmac.ipad_opad_dma_addr);
+ dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
+ ctx->auth_state.hmac.ipad_opad,
+ ctx->auth_state.hmac.ipad_opad_dma_addr);
+ SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->auth_state.hmac.ipad_opad_dma_addr);
+ ctx->auth_state.hmac.ipad_opad_dma_addr = 0;
+ ctx->auth_state.hmac.ipad_opad = NULL;
+ }
+ if (ctx->auth_state.hmac.padded_authkey != NULL) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.hmac.padded_authkey_dma_addr);
+ dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
+ ctx->auth_state.hmac.padded_authkey,
+ ctx->auth_state.hmac.padded_authkey_dma_addr);
+ SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->auth_state.hmac.padded_authkey_dma_addr);
+ ctx->auth_state.hmac.padded_authkey_dma_addr = 0;
+ ctx->auth_state.hmac.padded_authkey = NULL;
+ }
+ }
+}
+
+static int ssi_aead_init(struct crypto_aead *tfm)
+{
+ struct device *dev;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct ssi_crypto_alg *ssi_alg =
+ container_of(alg, struct ssi_crypto_alg, aead_alg);
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&(tfm->base)));
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ /* Initialize modes in instance */
+ ctx->cipher_mode = ssi_alg->cipher_mode;
+ ctx->flow_mode = ssi_alg->flow_mode;
+ ctx->auth_mode = ssi_alg->auth_mode;
+ ctx->drvdata = ssi_alg->drvdata;
+ dev = &ctx->drvdata->plat_dev->dev;
+ crypto_aead_set_reqsize(tfm,sizeof(struct aead_req_ctx));
+
+ /* Allocate key buffer, cache line aligned */
+ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
+ &ctx->enckey_dma_addr, GFP_KERNEL);
+ if (ctx->enckey == NULL) {
+ SSI_LOG_ERR("Failed allocating key buffer\n");
+ goto init_failed;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->enckey_dma_addr, AES_MAX_KEY_SIZE);
+ SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
+
+ /* Set default authlen value */
+
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
+ /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
+ /* (and temporary for user key - up to 256b) */
+ ctx->auth_state.xcbc.xcbc_keys = dma_alloc_coherent(dev,
+ CC_AES_128_BIT_KEY_SIZE * 3,
+ &ctx->auth_state.xcbc.xcbc_keys_dma_addr, GFP_KERNEL);
+ if (ctx->auth_state.xcbc.xcbc_keys == NULL) {
+ SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
+ goto init_failed;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ CC_AES_128_BIT_KEY_SIZE * 3);
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
+ /* Allocate dma-coherent buffer for IPAD + OPAD */
+ ctx->auth_state.hmac.ipad_opad = dma_alloc_coherent(dev,
+ 2 * MAX_HMAC_DIGEST_SIZE,
+ &ctx->auth_state.hmac.ipad_opad_dma_addr, GFP_KERNEL);
+ if (ctx->auth_state.hmac.ipad_opad == NULL) {
+ SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
+ goto init_failed;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.hmac.ipad_opad_dma_addr,
+ 2 * MAX_HMAC_DIGEST_SIZE);
+ SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
+ ctx->auth_state.hmac.ipad_opad);
+
+ ctx->auth_state.hmac.padded_authkey = dma_alloc_coherent(dev,
+ MAX_HMAC_BLOCK_SIZE,
+ &ctx->auth_state.hmac.padded_authkey_dma_addr, GFP_KERNEL);
+ if (ctx->auth_state.hmac.padded_authkey == NULL) {
+ SSI_LOG_ERR("failed to allocate padded_authkey\n");
+ goto init_failed;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(
+ ctx->auth_state.hmac.padded_authkey_dma_addr,
+ MAX_HMAC_BLOCK_SIZE);
+ } else {
+ ctx->auth_state.hmac.ipad_opad = NULL;
+ ctx->auth_state.hmac.padded_authkey = NULL;
+ }
+
+ return 0;
+
+init_failed:
+ ssi_aead_exit(tfm);
+ return -ENOMEM;
+}
+
+
+static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+{
+ struct aead_request *areq = (struct aead_request *)ssi_req;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int err = 0;
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ START_CYCLE_COUNT();
+
+ ssi_buffer_mgr_unmap_aead_request(dev, areq);
+
+ /* Restore ordinary iv pointer */
+ areq->iv = areq_ctx->backup_iv;
+
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
+ ctx->authsize) != 0) {
+ SSI_LOG_DEBUG("Payload authentication failure, "
+ "(auth-size=%d, cipher=%d).\n",
+ ctx->authsize, ctx->cipher_mode);
+ /* In case of payload authentication failure, MUST NOT
+ revealed the decrypted message --> zero its memory. */
+ ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ err = -EBADMSG;
+ }
+ } else { /*ENCRYPT*/
+ if (unlikely(areq_ctx->is_icv_fragmented == true))
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen+areq_ctx->dstOffset,
+ areq->cryptlen+areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+
+ /* If an IV was generated, copy it back to the user provided buffer. */
+ if (areq_ctx->backup_giv != NULL) {
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
+ } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
+ }
+ }
+ }
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
+ aead_request_complete(areq, err);
+}
+
+static int xcbc_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
+{
+ /* Load the AES key */
+ HW_DESC_INIT(&desc[0]);
+ /* We are using for the source/user key the same buffer as for the output keys,
+ because after this key loading it is not needed anymore */
+ HW_DESC_SET_DIN_TYPE(&desc[0], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, NS_BIT);
+ HW_DESC_SET_CIPHER_MODE(&desc[0], DRV_CIPHER_ECB);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[0], ctx->auth_keylen);
+ HW_DESC_SET_FLOW_MODE(&desc[0], S_DIN_to_AES);
+ HW_DESC_SET_SETUP_MODE(&desc[0], SETUP_LOAD_KEY0);
+
+ HW_DESC_INIT(&desc[1]);
+ HW_DESC_SET_DIN_CONST(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[1], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT, 0);
+
+ HW_DESC_INIT(&desc[2]);
+ HW_DESC_SET_DIN_CONST(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[2], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ HW_DESC_INIT(&desc[3]);
+ HW_DESC_SET_DIN_CONST(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[3], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
+ + 2 * AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT, 0);
+
+ return 4;
+}
+
+static int hmac_setkey(HwDesc_s *desc, struct ssi_aead_ctx *ctx)
+{
+ unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ unsigned int digest_ofs = 0;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+
+ int idx = 0;
+ int i;
+
+ /* calc derived HMAC key */
+ for (i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx],
+ ssi_ahash_get_larval_digest_sram_addr(
+ ctx->drvdata, ctx->auth_mode),
+ digest_size);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->auth_state.hmac.padded_authkey_dma_addr,
+ SHA256_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the digset */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (ctx->auth_state.hmac.ipad_opad_dma_addr +
+ digest_ofs),
+ digest_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ idx++;
+
+ digest_ofs += digest_size;
+ }
+
+ return idx;
+}
+
+static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
+{
+ SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ break;
+ case DRV_HASH_XCBC_MAC:
+ if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
+ (ctx->auth_keylen != AES_KEYSIZE_192) &&
+ (ctx->auth_keylen != AES_KEYSIZE_256))
+ return -ENOTSUPP;
+ break;
+ case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
+ if (ctx->auth_keylen > 0)
+ return -EINVAL;
+ break;
+ default:
+ SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
+ return -EINVAL;
+ }
+ /* Check cipher key size */
+ if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
+ if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
+ SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ } else { /* Default assumed to be AES ciphers */
+ if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
+ (ctx->enc_keylen != AES_KEYSIZE_192) &&
+ (ctx->enc_keylen != AES_KEYSIZE_256)) {
+ SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
+ return -EINVAL;
+ }
+ }
+
+ return 0; /* All tests of keys sizes passed */
+}
+/*This function prepers the user key so it can pass to the hmac processing
+ (copy to intenral buffer or hash in case of key longer than block */
+static int
+ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ dma_addr_t key_dma_addr = 0;
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ uint32_t larval_addr = ssi_ahash_get_larval_digest_sram_addr(
+ ctx->drvdata, ctx->auth_mode);
+ struct ssi_crypto_req ssi_req = {};
+ unsigned int blocksize;
+ unsigned int digestsize;
+ unsigned int hashmode;
+ unsigned int idx = 0;
+ int rc = 0;
+ HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
+ dma_addr_t padded_authkey_dma_addr =
+ ctx->auth_state.hmac.padded_authkey_dma_addr;
+
+ switch (ctx->auth_mode) { /* auth_key required and >0 */
+ case DRV_HASH_SHA1:
+ blocksize = SHA1_BLOCK_SIZE;
+ digestsize = SHA1_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA1;
+ break;
+ case DRV_HASH_SHA256:
+ default:
+ blocksize = SHA256_BLOCK_SIZE;
+ digestsize = SHA256_DIGEST_SIZE;
+ hashmode = DRV_HASH_HW_SHA256;
+ }
+
+ if (likely(keylen != 0)) {
+ key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
+ SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
+ " DMA failed\n", key, keylen);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(key_dma_addr, keylen);
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, digestsize);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr,
+ keylen, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hashmode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ padded_authkey_dma_addr,
+ digestsize,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx],
+ HASH_PADDING_DISABLED);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (padded_authkey_dma_addr + digestsize),
+ (blocksize - digestsize),
+ NS_BIT, 0);
+ idx++;
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr,
+ keylen, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (padded_authkey_dma_addr),
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen) != 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0,
+ (blocksize - keylen));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (padded_authkey_dma_addr + keylen),
+ (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0,
+ (blocksize - keylen));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ padded_authkey_dma_addr,
+ blocksize,
+ NS_BIT, 0);
+ idx++;
+ }
+
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_SETKEY;
+#endif
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (unlikely(rc != 0))
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+
+ if (likely(key_dma_addr != 0)) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(key_dma_addr);
+ dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ }
+
+ return rc;
+}
+
+
+static int
+ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct rtattr *rta = (struct rtattr *)key;
+ struct ssi_crypto_req ssi_req = {};
+ struct crypto_authenc_key_param *param;
+ HwDesc_s desc[MAX_AEAD_SETKEY_SEQ];
+ int seq_len = 0, rc = -EINVAL;
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ /* STAT_PHASE_0: Init and sanity checks */
+ START_CYCLE_COUNT();
+
+ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+ param = RTA_DATA(rta);
+ ctx->enc_keylen = be32_to_cpu(param->enckeylen);
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ if (keylen < ctx->enc_keylen)
+ goto badkey;
+ ctx->auth_keylen = keylen - ctx->enc_keylen;
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* the nonce is stored in bytes at end of key */
+ if (ctx->enc_keylen <
+ (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
+ goto badkey;
+ /* Copy nonce from last 4 bytes in CTR key to
+ * first 4 bytes in CTR IV */
+ memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
+ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ /* Set CTR key size */
+ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+ } else { /* non-authenc - has just one key */
+ ctx->enc_keylen = keylen;
+ ctx->auth_keylen = 0;
+ }
+
+ rc = validate_keys_sizes(ctx);
+ if (unlikely(rc != 0))
+ goto badkey;
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
+ /* STAT_PHASE_1: Copy key to ctx */
+ START_CYCLE_COUNT();
+
+ /* Get key material */
+ memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
+ if (ctx->enc_keylen == 24)
+ memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
+ rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ if (rc != 0)
+ goto badkey;
+ }
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
+
+ /* STAT_PHASE_2: Create sequence */
+ START_CYCLE_COUNT();
+
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ seq_len = hmac_setkey(desc, ctx);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ seq_len = xcbc_setkey(desc, ctx);
+ break;
+ case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
+ break; /* No auth. key setup */
+ default:
+ SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ rc = -ENOTSUPP;
+ goto badkey;
+ }
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_2);
+
+ /* STAT_PHASE_3: Submit sequence to HW */
+ START_CYCLE_COUNT();
+
+ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_SETKEY;
+#endif
+ rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ goto setkey_error;
+ }
+ }
+
+ /* Update STAT_PHASE_3 */
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_3);
+ return rc;
+
+badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+setkey_error:
+ return rc;
+}
+
+#if SSI_CC_HAS_AES_CCM
+static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int rc = 0;
+
+ if (keylen < 3)
+ return -EINVAL;
+
+ keylen -= 3;
+ memcpy(ctx->ctr_nonce, key + keylen, 3);
+
+ rc = ssi_aead_setkey(tfm, key, keylen);
+
+ return rc;
+}
+#endif /*SSI_CC_HAS_AES_CCM*/
+
+static int ssi_aead_setauthsize(
+ struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ /* Unsupported auth. sizes */
+ if ((authsize == 0) ||
+ (authsize >crypto_aead_maxauthsize(authenc))) {
+ return -ENOTSUPP;
+ }
+
+ ctx->authsize = authsize;
+ SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
+
+ return 0;
+}
+
+#if SSI_CC_HAS_AES_CCM
+static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ssi_aead_setauthsize(authenc, authsize);
+}
+
+static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ssi_aead_setauthsize(authenc, authsize);
+}
+#endif /*SSI_CC_HAS_AES_CCM*/
+
+static inline void
+ssi_aead_create_assoc_desc(
+ struct aead_request *areq,
+ unsigned int flow_mode,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ unsigned int idx = *seq_size;
+
+ switch (assoc_dma_type) {
+ case SSI_DMA_BUF_DLLI:
+ SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq->src),
+ areq->assoclen, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
+ HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
+ break;
+ case SSI_DMA_BUF_MLLI:
+ SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
+ areq_ctx->assoc.sram_addr,
+ areq_ctx->assoc.mlli_nents,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC && (areq_ctx->cryptlen > 0) )
+ HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx]);
+ break;
+ case SSI_DMA_BUF_NULL:
+ default:
+ SSI_LOG_ERR("Invalid ASSOC buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static inline void
+ssi_aead_process_authenc_data_desc(
+ struct aead_request *areq,
+ unsigned int flow_mode,
+ HwDesc_s desc[],
+ unsigned int *seq_size,
+ int direct)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ unsigned int idx = *seq_size;
+
+ switch (data_dma_type) {
+ case SSI_DMA_BUF_DLLI:
+ {
+ struct scatterlist *cipher =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dstSgl : areq_ctx->srcSgl;
+
+ unsigned int offset =
+ (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ areq_ctx->dstOffset : areq_ctx->srcOffset;
+ SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (sg_dma_address(cipher)+ offset), areq_ctx->cryptlen,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ break;
+ }
+ case SSI_DMA_BUF_MLLI:
+ {
+ /* DOUBLE-PASS flow (as default)
+ * assoc. + iv + data -compact in one table
+ * if assoclen is ZERO only IV perform */
+ ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ uint32_t mlli_nents = areq_ctx->assoc.mlli_nents;
+
+ if (likely(areq_ctx->is_single_pass == true)) {
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT){
+ mlli_addr = areq_ctx->dst.sram_addr;
+ mlli_nents = areq_ctx->dst.mlli_nents;
+ } else {
+ mlli_addr = areq_ctx->src.sram_addr;
+ mlli_nents = areq_ctx->src.mlli_nents;
+ }
+ }
+
+ SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
+ mlli_addr, mlli_nents, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ break;
+ }
+ case SSI_DMA_BUF_NULL:
+ default:
+ SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static inline void
+ssi_aead_process_cipher_data_desc(
+ struct aead_request *areq,
+ unsigned int flow_mode,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
+ enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+
+ if (areq_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ switch (data_dma_type) {
+ case SSI_DMA_BUF_DLLI:
+ SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (sg_dma_address(areq_ctx->srcSgl)+areq_ctx->srcOffset),
+ areq_ctx->cryptlen, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (sg_dma_address(areq_ctx->dstSgl)+areq_ctx->dstOffset),
+ areq_ctx->cryptlen, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ break;
+ case SSI_DMA_BUF_MLLI:
+ SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
+ areq_ctx->src.sram_addr,
+ areq_ctx->src.mlli_nents, NS_BIT);
+ HW_DESC_SET_DOUT_MLLI(&desc[idx],
+ areq_ctx->dst.sram_addr,
+ areq_ctx->dst.mlli_nents, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ break;
+ case SSI_DMA_BUF_NULL:
+ default:
+ SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
+ }
+
+ *seq_size = (++idx);
+}
+
+static inline void ssi_aead_process_digest_result_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Get final ICV result */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->icv_dma_addr,
+ ctx->authsize, NS_BIT, 1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ } else {
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],
+ HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ }
+ } else { /*Decrypt*/
+ /* Get ICV out from hardware */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT, 1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ } else {
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ }
+ }
+
+ *seq_size = (++idx);
+}
+
+static inline void ssi_aead_setup_cipher_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = req_ctx->hw_iv_size;
+ unsigned int idx = *seq_size;
+ int direct = req_ctx->gen_ctx.op_type;
+
+ /* Setup cipher state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr, hw_iv_size, NS_BIT);
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ } else {
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ }
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ /* Setup enc. key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direct);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], ctx->flow_mode);
+ if (ctx->flow_mode == S_DIN_to_AES) {
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ?
+ CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ } else {
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_DES(&desc[idx], ctx->enc_keylen);
+ }
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->cipher_mode);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_process_cipher(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size,
+ unsigned int data_flow_mode)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->cryptlen == 0)
+ return; /*null processing*/
+
+ ssi_aead_setup_cipher_desc(req, desc, &idx);
+ ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* We must wait for DMA to write all cipher */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_hmac_setup_digest_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ /* Loading hash ipad xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->auth_state.hmac.ipad_opad_dma_addr,
+ digest_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx],
+ ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
+ HASH_LEN_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_xcbc_setup_digest_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int idx = *seq_size;
+
+ /* Loading MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, CC_AES_BLOCK_SIZE);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K1 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->auth_state.xcbc.xcbc_keys_dma_addr,
+ AES_KEYSIZE_128, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
+ 2 * AES_KEYSIZE_128),
+ AES_KEYSIZE_128, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_process_digest_header_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ /* Hash associated data */
+ if (req->assoclen > 0)
+ ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+
+ /* Hash IV */
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_process_digest_scheme_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
+ unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
+ CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
+ unsigned int idx = *seq_size;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
+ HASH_LEN_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+ idx++;
+
+ /* Get final ICV result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
+ digest_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load init. digest len (64 bytes) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hash_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx],
+ ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, hash_mode),
+ HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], aead_handle->sram_workspace_addr,
+ digest_size);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_load_mlli_to_sram(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (unlikely(
+ (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
+ (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
+ (req_ctx->is_single_pass == false))) {
+ SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ /* Copy MLLI table host-to-sram */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len, NS_BIT);
+ HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
+ ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+ }
+}
+
+static inline enum FlowMode ssi_aead_get_data_flow_mode(
+ enum drv_crypto_direction direct,
+ enum FlowMode setup_flow_mode,
+ bool is_single_pass)
+{
+ enum FlowMode data_flow_mode;
+
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = likely(is_single_pass) ?
+ AES_to_HASH_and_DOUT : DIN_AES_DOUT;
+ else
+ data_flow_mode = likely(is_single_pass) ?
+ DES_to_HASH_and_DOUT : DIN_DES_DOUT;
+ } else { /* Decrypt */
+ if (setup_flow_mode == S_DIN_to_AES)
+ data_flow_mode = likely(is_single_pass) ?
+ AES_and_HASH : DIN_AES_DOUT;
+ else
+ data_flow_mode = likely(is_single_pass) ?
+ DES_and_HASH : DIN_DES_DOUT;
+ }
+
+ return data_flow_mode;
+}
+
+static inline void ssi_aead_hmac_authenc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
+ direct, ctx->flow_mode, req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass == true) {
+ /**
+ * Single-pass flow
+ */
+ ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_setup_cipher_desc(req, desc, seq_size);
+ ssi_aead_process_digest_header_desc(req, desc, seq_size);
+ ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
+ ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after..*/
+ ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+
+ } else { /*DECRYPT*/
+ /* authenc first..*/
+ ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ /* decrypt after.. */
+ ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ must be after the cipher operation */
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ }
+}
+
+static inline void
+ssi_aead_xcbc_authenc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ int direct = req_ctx->gen_ctx.op_type;
+ unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
+ direct, ctx->flow_mode, req_ctx->is_single_pass);
+
+ if (req_ctx->is_single_pass == true) {
+ /**
+ * Single-pass flow
+ */
+ ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_setup_cipher_desc(req, desc, seq_size);
+ ssi_aead_process_digest_header_desc(req, desc, seq_size);
+ ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ return;
+ }
+
+ /**
+ * Double-pass flow
+ * Fallback for unsupported single-pass modes,
+ * i.e. using assoc. data of non-word-multiple */
+ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* encrypt first.. */
+ ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ /* authenc after.. */
+ ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ } else { /*DECRYPT*/
+ /* authenc first.. */
+ ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
+ ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
+ /* decrypt after..*/
+ ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ /* read the digest result with setting the completion bit
+ must be after the cipher operation */
+ ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ }
+}
+
+static int validate_data_size(struct ssi_aead_ctx *ctx,
+ enum drv_crypto_direction direct, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int assoclen = req->assoclen;
+ unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ (req->cryptlen - ctx->authsize) : req->cryptlen;
+
+ if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+ (req->cryptlen < ctx->authsize)))
+ goto data_size_err;
+
+ areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
+
+ switch (ctx->flow_mode) {
+ case S_DIN_to_AES:
+ if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ goto data_size_err;
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
+ break;
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR)
+ {
+ if (areq_ctx->plaintext_authenticate_only == true)
+ areq_ctx->is_single_pass = false;
+ break;
+ }
+
+ if (!IS_ALIGNED(assoclen, sizeof(uint32_t)))
+ areq_ctx->is_single_pass = false;
+
+ if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
+ !IS_ALIGNED(cipherlen, sizeof(uint32_t)))
+ areq_ctx->is_single_pass = false;
+
+ break;
+ case S_DIN_to_DES:
+ if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
+ goto data_size_err;
+ if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
+ areq_ctx->is_single_pass = false;
+ break;
+ default:
+ SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
+ goto data_size_err;
+ }
+
+ return 0;
+
+data_size_err:
+ return -EINVAL;
+}
+
+#if SSI_CC_HAS_AES_CCM
+static unsigned int format_ccm_a0(uint8_t *pA0Buff, uint32_t headerSize)
+{
+ unsigned int len = 0;
+ if ( headerSize == 0 ) {
+ return 0;
+ }
+ if ( headerSize < ((1UL << 16) - (1UL << 8) )) {
+ len = 2;
+
+ pA0Buff[0] = (headerSize >> 8) & 0xFF;
+ pA0Buff[1] = headerSize & 0xFF;
+ } else {
+ len = 6;
+
+ pA0Buff[0] = 0xFF;
+ pA0Buff[1] = 0xFE;
+ pA0Buff[2] = (headerSize >> 24) & 0xFF;
+ pA0Buff[3] = (headerSize >> 16) & 0xFF;
+ pA0Buff[4] = (headerSize >> 8) & 0xFF;
+ pA0Buff[5] = headerSize & 0xFF;
+ }
+
+ return len;
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static inline int ssi_aead_ccm(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int cipher_flow_mode;
+ dma_addr_t mac_result;
+
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_and_HASH;
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* load key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ?
+ CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load ctr state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load MAC key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ((ctx->enc_keylen == 24) ?
+ CC_AES_KEY_SIZE_MAX : ctx->enc_keylen),
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* load MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+
+ /* process assoc data */
+ if (req->assoclen > 0) {
+ ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ sg_dma_address(&req_ctx->ccm_adata_sg),
+ AES_BLOCK_SIZE + req_ctx->ccm_hdr_size,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* process the cipher */
+ if (req_ctx->cryptlen != 0) {
+ ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
+ }
+
+ /* Read temporal MAC */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
+ ctx->authsize, NS_BIT, 0);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* load AES-CTR state (for last MAC calculation)*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->ccm_iv0_dma_addr ,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* encrypt the "T" value and store MAC in mac_state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->mac_buf_dma_addr , ctx->authsize, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result , ctx->authsize, NS_BIT, 1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+ return 0;
+}
+
+static int config_ccm_adata(struct aead_request *req) {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ //unsigned int size_of_a = 0, rem_a_size = 0;
+ unsigned int lp = req->iv[0];
+ /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
+ unsigned int l = lp + 1; /* This is L' of RFC 3610. */
+ unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
+ uint8_t *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
+ uint8_t *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
+ uint8_t *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ int rc;
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+ memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE*3);
+
+ /* taken from crypto/ccm.c */
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (2 > l || l > 8) {
+ SSI_LOG_ERR("illegal iv value %X\n",req->iv[0]);
+ return -EINVAL;
+ }
+ memcpy(b0, req->iv, AES_BLOCK_SIZE);
+
+ /* format control info per RFC 3610 and
+ * NIST Special Publication 800-38C
+ */
+ *b0 |= (8 * ((m - 2) / 2));
+ if (req->assoclen > 0)
+ *b0 |= 64; /* Enable bit 6 if Adata exists. */
+
+ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
+ if (rc != 0) {
+ return rc;
+ }
+ /* END of "taken from crypto/ccm.c" */
+
+ /* l(a) - size of associated data. */
+ req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
+
+ memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
+ req->iv [15] = 1;
+
+ memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE) ;
+ ctr_count_0[15] = 0;
+
+ return 0;
+}
+
+static void ssi_rfc4309_ccm_process(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ /* L' */
+ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
+ areq_ctx->ctr_iv[0] = 3; /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
+
+ /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= CCM_BLOCK_IV_SIZE;
+}
+#endif /*SSI_CC_HAS_AES_CCM*/
+
+#if SSI_CC_HAS_AES_GCM
+
+static inline void ssi_aead_gcm_setup_ghash_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* process one zero block to generate hkey */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ req_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Load GHASH subkey */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Configure Hash Engine to work with GHASH.
+ Since it was not possible to extend HASH submodes to add GHASH,
+ The following command is necessary in order to select GHASH (according to HW designers)*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_gcm_setup_gctr_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+
+ /* load key to AES*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
+ ctx->enc_keylen, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ if ((req_ctx->cryptlen != 0) && (req_ctx->plaintext_authenticate_only==false)){
+ /* load AES/CTR initial CTR value inc by 2*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ }
+
+ *seq_size = idx;
+}
+
+static inline void ssi_aead_process_gcm_result_desc(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ dma_addr_t mac_result;
+ unsigned int idx = *seq_size;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ mac_result = req_ctx->mac_buf_dma_addr;
+ } else { /* Encrypt */
+ mac_result = req_ctx->icv_dma_addr;
+ }
+
+ /* process(ghash) gcm_block_len */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT, 0);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 1*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->enc_keylen);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* process GCTR on stored GHASH and store MAC in mac_state*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ req_ctx->mac_buf_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ *seq_size = idx;
+}
+
+static inline int ssi_aead_gcm(
+ struct aead_request *req,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ unsigned int idx = *seq_size;
+ unsigned int cipher_flow_mode;
+
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
+
+ //in RFC4543 no data to encrypt. just copy data from src to dest.
+ if (req_ctx->plaintext_authenticate_only==true){
+ ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
+ ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
+ ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+ ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ idx = *seq_size;
+ return 0;
+ }
+
+ // for gcm and rfc4106.
+ ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ /* process(ghash) assoc data */
+ if (req->assoclen > 0)
+ ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
+ ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+ /* process(gctr+ghash) */
+ if (req_ctx->cryptlen != 0)
+ ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
+ ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+
+ idx = *seq_size;
+ return 0;
+}
+
+#ifdef CC_DEBUG
+static inline void ssi_aead_dump_gcm(
+ const char* title,
+ struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ if (ctx->cipher_mode != DRV_CIPHER_GCTR)
+ return;
+
+ if (title != NULL) {
+ SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
+ SSI_LOG_DEBUG("%s\n", title);
+ }
+
+ SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d \n", \
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen );
+
+ if ( ctx->enckey != NULL ) {
+ dump_byte_array("mac key",ctx->enckey, 16);
+ }
+
+ dump_byte_array("req->iv",req->iv, AES_BLOCK_SIZE);
+
+ dump_byte_array("gcm_iv_inc1",req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
+
+ dump_byte_array("gcm_iv_inc2",req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
+
+ dump_byte_array("hkey",req_ctx->hkey, AES_BLOCK_SIZE);
+
+ dump_byte_array("mac_buf",req_ctx->mac_buf, AES_BLOCK_SIZE);
+
+ dump_byte_array("gcm_len_block",req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
+
+ if (req->src!=NULL && req->cryptlen) {
+ dump_byte_array("req->src",sg_virt(req->src), req->cryptlen+req->assoclen);
+ }
+
+ if (req->dst!=NULL) {
+ dump_byte_array("req->dst",sg_virt(req->dst), req->cryptlen+ctx->authsize+req->assoclen);
+ }
+}
+#endif
+
+static int config_gcm_context(struct aead_request *req) {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - ctx->authsize);
+ __be32 counter = cpu_to_be32(2);
+
+ SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d \n", cryptlen, req->assoclen, ctx->authsize);
+
+ memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
+
+ memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
+
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
+
+ counter = cpu_to_be32(1);
+ memcpy(req->iv + 12, &counter, 4);
+ memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
+
+
+ if (req_ctx->plaintext_authenticate_only == false)
+ {
+ __be64 temp64;
+ temp64 = cpu_to_be64(req->assoclen * 8);
+ memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
+ temp64 = cpu_to_be64(cryptlen * 8);
+ memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
+ }
+ else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
+ __be64 temp64;
+ temp64 = cpu_to_be64((req->assoclen+GCM_BLOCK_RFC4_IV_SIZE+cryptlen) * 8);
+ memcpy ( &req_ctx->gcm_len_block.lenA , &temp64, sizeof(temp64) );
+ temp64 = 0;
+ memcpy ( &req_ctx->gcm_len_block.lenC , &temp64, 8 );
+ }
+
+ return 0;
+}
+
+static void ssi_rfc4_gcm_process(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
+ memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE);
+ req->iv = areq_ctx->ctr_iv;
+ req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+}
+
+
+#endif /*SSI_CC_HAS_AES_GCM*/
+
+static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
+{
+ int rc = 0;
+ int seq_len = 0;
+ HwDesc_s desc[MAX_AEAD_PROCESS_SEQ];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct ssi_crypto_req ssi_req = {};
+
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"), ctx, req, req->iv,
+ sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ /* STAT_PHASE_0: Init and sanity checks */
+ START_CYCLE_COUNT();
+
+ /* Check data length according to mode */
+ if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
+ SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, req->assoclen);
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ return -EINVAL;
+ }
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_aead_complete;
+ ssi_req.user_arg = (void *)req;
+
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+#endif
+ /* Setup request context */
+ areq_ctx->gen_ctx.op_type = direct;
+ areq_ctx->req_authsize = ctx->authsize;
+ areq_ctx->cipher_mode = ctx->cipher_mode;
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
+
+ /* STAT_PHASE_1: Map buffers */
+ START_CYCLE_COUNT();
+
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ /* Build CTR IV - Copy nonce from last 4 bytes in
+ * CTR key to first 4 bytes in CTR IV */
+ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
+ if (areq_ctx->backup_giv == NULL) /*User none-generated IV*/
+ memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
+ req->iv, CTR_RFC3686_IV_SIZE);
+ /* Initialize counter portion of counter block */
+ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+ /* Replace with counter iv */
+ req->iv = areq_ctx->ctr_iv;
+ areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
+ } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
+ (ctx->cipher_mode == DRV_CIPHER_GCTR) ) {
+ areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
+ if (areq_ctx->ctr_iv != req->iv) {
+ memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
+ req->iv = areq_ctx->ctr_iv;
+ }
+ } else {
+ areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
+ }
+
+#if SSI_CC_HAS_AES_CCM
+ if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ rc = config_ccm_adata(req);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
+ goto exit;
+ }
+ } else {
+ areq_ctx->ccm_hdr_size = ccm_header_size_null;
+ }
+#else
+ areq_ctx->ccm_hdr_size = ccm_header_size_null;
+#endif /*SSI_CC_HAS_AES_CCM*/
+
+#if SSI_CC_HAS_AES_GCM
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ rc = config_gcm_context(req);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
+ goto exit;
+ }
+ }
+#endif /*SSI_CC_HAS_AES_GCM*/
+
+ rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("map_request() failed\n");
+ goto exit;
+ }
+
+ /* do we need to generate IV? */
+ if (areq_ctx->backup_giv != NULL) {
+
+ /* set the DMA mapped IV address*/
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
+ ssi_req.ivgen_dma_addr_len = 1;
+ } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ /* In ccm, the IV needs to exist both inside B0 and inside the counter.
+ It is also copied to iv_dma_addr for other reasons (like returning
+ it to the user).
+ So, using 3 (identical) IV outputs. */
+ ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
+ ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
+ ssi_req.ivgen_dma_addr_len = 3;
+ } else {
+ ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
+ ssi_req.ivgen_dma_addr_len = 1;
+ }
+
+ /* set the IV size (8/16 B long)*/
+ ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
+ }
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
+
+ /* STAT_PHASE_2: Create sequence */
+ START_CYCLE_COUNT();
+
+ /* Load MLLI tables to SRAM if necessary */
+ ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
+
+ /*TODO: move seq len by reference */
+ switch (ctx->auth_mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA256:
+ ssi_aead_hmac_authenc(req, desc, &seq_len);
+ break;
+ case DRV_HASH_XCBC_MAC:
+ ssi_aead_xcbc_authenc(req, desc, &seq_len);
+ break;
+#if ( SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM )
+ case DRV_HASH_NULL:
+#if SSI_CC_HAS_AES_CCM
+ if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ ssi_aead_ccm(req, desc, &seq_len);
+ }
+#endif /*SSI_CC_HAS_AES_CCM*/
+#if SSI_CC_HAS_AES_GCM
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ ssi_aead_gcm(req, desc, &seq_len);
+ }
+#endif /*SSI_CC_HAS_AES_GCM*/
+ break;
+#endif
+ default:
+ SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ ssi_buffer_mgr_unmap_aead_request(dev, req);
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+ START_CYCLE_COUNT();
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
+
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_aead_request(dev, req);
+ }
+
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
+exit:
+ return rc;
+}
+
+static int ssi_aead_encrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+#if SSI_CC_HAS_AES_CCM
+static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen );
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = true;
+
+ ssi_rfc4309_ccm_process(req);
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+#endif /* SSI_CC_HAS_AES_CCM */
+
+static int ssi_aead_decrypt(struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+ areq_ctx->is_gcm4543 = false;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+
+}
+
+#if SSI_CC_HAS_AES_CCM
+static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_decrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->is_gcm4543 = true;
+ ssi_rfc4309_ccm_process(req);
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+
+out:
+ return rc;
+}
+#endif /* SSI_CC_HAS_AES_CCM */
+
+#if SSI_CC_HAS_AES_GCM
+
+static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int rc = 0;
+
+ SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p \n", keylen, key );
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ rc = ssi_aead_setkey(tfm, key, keylen);
+
+ return rc;
+}
+
+static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ int rc = 0;
+
+ SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p \n", keylen, key );
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ keylen -= 4;
+ memcpy(ctx->ctr_nonce, key + keylen, 4);
+
+ rc = ssi_aead_setkey(tfm, key, keylen);
+
+ return rc;
+}
+
+static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ssi_aead_setauthsize(authenc, authsize);
+}
+
+static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d \n", authsize );
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ssi_aead_setauthsize(authenc, authsize);
+}
+
+static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d \n", authsize );
+
+ if (authsize != 16)
+ return -EINVAL;
+
+ return ssi_aead_setauthsize(authenc, authsize);
+}
+
+static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ ssi_rfc4_gcm_process(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_encrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not encryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ ssi_rfc4_gcm_process(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+
+static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_decrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = -EINVAL;
+
+ if (!valid_assoclen(req)) {
+ SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ goto out;
+ }
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ areq_ctx->plaintext_authenticate_only = false;
+
+ ssi_rfc4_gcm_process(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+out:
+ return rc;
+}
+
+static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
+{
+ /* Very similar to ssi_aead_decrypt() above. */
+
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc;
+
+ //plaintext is not decryped with rfc4543
+ areq_ctx->plaintext_authenticate_only = true;
+
+ /* No generated IV required */
+ areq_ctx->backup_iv = req->iv;
+ areq_ctx->backup_giv = NULL;
+
+ ssi_rfc4_gcm_process(req);
+ areq_ctx->is_gcm4543 = true;
+
+ rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ if (rc != -EINPROGRESS)
+ req->iv = areq_ctx->backup_iv;
+
+ return rc;
+}
+#endif /* SSI_CC_HAS_AES_GCM */
+
+/* DX Block aead alg */
+static struct ssi_alg_template aead_algs[] = {
+ {
+ .name = "authenc(hmac(sha1),cbc(aes))",
+ .driver_name = "authenc-hmac-sha1-cbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ },
+ {
+ .name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha1-cbc-des3-dx",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA1,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(aes))",
+ .driver_name = "authenc-hmac-sha256-cbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ },
+ {
+ .name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-sha256-cbc-des3-dx",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .auth_mode = DRV_HASH_SHA256,
+ },
+ {
+ .name = "authenc(xcbc(aes),cbc(aes))",
+ .driver_name = "authenc-xcbc-aes-cbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA1,
+ },
+ {
+ .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_SHA256,
+ },
+ {
+ .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
+ .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_aead_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_XCBC_MAC,
+ },
+#if SSI_CC_HAS_AES_CCM
+ {
+ .name = "ccm(aes)",
+ .driver_name = "ccm-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_ccm_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ },
+ {
+ .name = "rfc4309(ccm(aes))",
+ .driver_name = "rfc4309-ccm-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_rfc4309_ccm_setkey,
+ .setauthsize = ssi_rfc4309_ccm_setauthsize,
+ .encrypt = ssi_rfc4309_ccm_encrypt,
+ .decrypt = ssi_rfc4309_ccm_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = CCM_BLOCK_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CCM,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ },
+#endif /*SSI_CC_HAS_AES_CCM*/
+#if SSI_CC_HAS_AES_GCM
+ {
+ .name = "gcm(aes)",
+ .driver_name = "gcm-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_aead_setkey,
+ .setauthsize = ssi_gcm_setauthsize,
+ .encrypt = ssi_aead_encrypt,
+ .decrypt = ssi_aead_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ },
+ {
+ .name = "rfc4106(gcm(aes))",
+ .driver_name = "rfc4106-gcm-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_rfc4106_gcm_setkey,
+ .setauthsize = ssi_rfc4106_gcm_setauthsize,
+ .encrypt = ssi_rfc4106_gcm_encrypt,
+ .decrypt = ssi_rfc4106_gcm_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ },
+ {
+ .name = "rfc4543(gcm(aes))",
+ .driver_name = "rfc4543-gcm-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = ssi_rfc4543_gcm_setkey,
+ .setauthsize = ssi_rfc4543_gcm_setauthsize,
+ .encrypt = ssi_rfc4543_gcm_encrypt,
+ .decrypt = ssi_rfc4543_gcm_decrypt,
+ .init = ssi_aead_init,
+ .exit = ssi_aead_exit,
+ .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_GCTR,
+ .flow_mode = S_DIN_to_AES,
+ .auth_mode = DRV_HASH_NULL,
+ },
+#endif /*SSI_CC_HAS_AES_GCM*/
+};
+
+static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
+{
+ struct ssi_crypto_alg *t_alg;
+ struct aead_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
+ if (!t_alg) {
+ SSI_LOG_ERR("failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ alg = &template->template_aead;
+
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = SSI_CRA_PRIO;
+
+ alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ alg->init = ssi_aead_init;
+ alg->exit = ssi_aead_exit;
+
+ t_alg->aead_alg = *alg;
+
+ t_alg->cipher_mode = template->cipher_mode;
+ t_alg->flow_mode = template->flow_mode;
+ t_alg->auth_mode = template->auth_mode;
+
+ return t_alg;
+}
+
+int ssi_aead_free(struct ssi_drvdata *drvdata)
+{
+ struct ssi_crypto_alg *t_alg, *n;
+ struct ssi_aead_handle *aead_handle =
+ (struct ssi_aead_handle *)drvdata->aead_handle;
+
+ if (aead_handle != NULL) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
+ crypto_unregister_aead(&t_alg->aead_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(aead_handle);
+ drvdata->aead_handle = NULL;
+ }
+
+ return 0;
+}
+
+int ssi_aead_alloc(struct ssi_drvdata *drvdata)
+{
+ struct ssi_aead_handle *aead_handle;
+ struct ssi_crypto_alg *t_alg;
+ int rc = -ENOMEM;
+ int alg;
+
+ aead_handle = kmalloc(sizeof(struct ssi_aead_handle), GFP_KERNEL);
+ if (aead_handle == NULL) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
+ drvdata->aead_handle = aead_handle;
+
+ aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
+ drvdata, MAX_HMAC_DIGEST_SIZE);
+ if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
+ SSI_LOG_ERR("SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
+ INIT_LIST_HEAD(&aead_handle->aead_list);
+
+ /* Linux crypto */
+ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
+ t_alg = ssi_aead_create_alg(&aead_algs[alg]);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ SSI_LOG_ERR("%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
+ goto fail1;
+ }
+ t_alg->drvdata = drvdata;
+ rc = crypto_register_aead(&t_alg->aead_alg);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
+ goto fail2;
+ } else {
+ list_add_tail(&t_alg->entry, &aead_handle->aead_list);
+ SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
+ }
+ }
+
+ return 0;
+
+fail2:
+ kfree(t_alg);
+fail1:
+ ssi_aead_free(drvdata);
+fail0:
+ return rc;
+}
+
+
+
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
new file mode 100644
index 000000000000..fe88c9e04f88
--- /dev/null
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_aead.h
+ ARM CryptoCell AEAD Crypto API
+ */
+
+#ifndef __SSI_AEAD_H__
+#define __SSI_AEAD_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include <crypto/ctr.h>
+
+
+/* mac_cmp - HW writes 8 B but all bytes hold the same value */
+#define ICV_CMP_SIZE 8
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE*3)
+#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
+
+
+/* defines for AES GCM configuration buffer */
+#define GCM_BLOCK_LEN_SIZE 8
+
+#define GCM_BLOCK_RFC4_IV_OFFSET 4
+#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
+#define GCM_BLOCK_RFC4_NONCE_SIZE 4
+
+
+
+/* Offsets into AES CCM configuration buffer */
+#define CCM_B0_OFFSET 0
+#define CCM_A0_OFFSET 16
+#define CCM_CTR_COUNT_0_OFFSET 32
+/* CCM B0 and CTR_COUNT constants. */
+#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
+#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
+
+enum aead_ccm_header_size {
+ ccm_header_size_null = -1,
+ ccm_header_size_zero = 0,
+ ccm_header_size_2 = 2,
+ ccm_header_size_6 = 6,
+ ccm_header_size_max = INT32_MAX
+};
+
+struct aead_req_ctx {
+ /* Allocate cache line although only 4 bytes are needed to
+ * assure next field falls @ cache line
+ * Used for both: digest HW compare and CCM/GCM MAC value */
+ uint8_t mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
+ uint8_t ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
+
+ //used in gcm
+ uint8_t gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
+ uint8_t gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
+ uint8_t hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
+ struct {
+ uint8_t lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
+ uint8_t lenC[GCM_BLOCK_LEN_SIZE] ;
+ } gcm_len_block;
+
+ uint8_t ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
+ unsigned int hw_iv_size ____cacheline_aligned; /*HW actual size input*/
+ uint8_t backup_mac[MAX_MAC_SIZE]; /*used to prevent cache coherence problem*/
+ uint8_t *backup_iv; /*store iv for generated IV flow*/
+ uint8_t *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
+ dma_addr_t ccm_iv0_dma_addr; /* buffer for internal ccm configurations */
+ dma_addr_t icv_dma_addr; /* Phys. address of ICV */
+
+ //used in gcm
+ dma_addr_t gcm_iv_inc1_dma_addr; /* buffer for internal gcm configurations */
+ dma_addr_t gcm_iv_inc2_dma_addr; /* buffer for internal gcm configurations */
+ dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
+ dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
+ bool is_gcm4543;
+
+ uint8_t *icv_virt_addr; /* Virt. address of ICV */
+ struct async_gen_req_ctx gen_ctx;
+ struct ssi_mlli assoc;
+ struct ssi_mlli src;
+ struct ssi_mlli dst;
+ struct scatterlist* srcSgl;
+ struct scatterlist* dstSgl;
+ unsigned int srcOffset;
+ unsigned int dstOffset;
+ enum ssi_req_dma_buf_type assoc_buff_type;
+ enum ssi_req_dma_buf_type data_buff_type;
+ struct mlli_params mlli_params;
+ unsigned int cryptlen;
+ struct scatterlist ccm_adata_sg;
+ enum aead_ccm_header_size ccm_hdr_size;
+ unsigned int req_authsize;
+ enum drv_cipher_mode cipher_mode;
+ bool is_icv_fragmented;
+ bool is_single_pass;
+ bool plaintext_authenticate_only; //for gcm_rfc4543
+};
+
+int ssi_aead_alloc(struct ssi_drvdata *drvdata);
+int ssi_aead_free(struct ssi_drvdata *drvdata);
+
+#endif /*__SSI_AEAD_H__*/
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
new file mode 100644
index 000000000000..038e2ff5e545
--- /dev/null
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -0,0 +1,1873 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/crypto.h>
+#include <linux/version.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/hash.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ssi_buffer_mgr.h"
+#include "cc_lli_defs.h"
+#include "ssi_cipher.h"
+#include "ssi_hash.h"
+#include "ssi_aead.h"
+
+#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */
+#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
+#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES )
+
+#ifdef CC_DEBUG
+#define DUMP_SGL(sg) \
+ while (sg) { \
+ SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \
+ "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \
+ (sg)->length, sg_dma_len(sg), (sg)->dma_address); \
+ (sg) = sg_next(sg); \
+ }
+#define DUMP_MLLI_TABLE(mlli_p, nents) \
+ do { \
+ SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \
+ while((nents)--) { \
+ SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \
+ (mlli_p)[LLI_WORD0_OFFSET], \
+ (mlli_p)[LLI_WORD1_OFFSET]); \
+ (mlli_p) += LLI_ENTRY_WORD_SIZE; \
+ } \
+ } while (0)
+#define GET_DMA_BUFFER_TYPE(buff_type) ( \
+ ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
+ ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
+ ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
+#else
+#define DX_BUFFER_MGR_DUMP_SGL(sg)
+#define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents)
+#define GET_DMA_BUFFER_TYPE(buff_type)
+#endif
+
+
+enum dma_buffer_type {
+ DMA_NULL_TYPE = -1,
+ DMA_SGL_TYPE = 1,
+ DMA_BUFF_TYPE = 2,
+};
+
+struct buff_mgr_handle {
+ struct dma_pool *mlli_buffs_pool;
+};
+
+union buffer_array_entry {
+ struct scatterlist *sgl;
+ dma_addr_t buffer_dma;
+};
+
+struct buffer_array {
+ unsigned int num_of_buffers;
+ union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
+ uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
+};
+
+#ifdef CC_DMA_48BIT_SIM
+dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len)
+{
+ dma_addr_t tmp_dma_addr;
+#ifdef CC_DMA_48BIT_SIM_FULL
+ /* With this code all addresses will be switched to 48 bits. */
+ /* The if condition protects from double expention */
+ if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) &&
+ (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
+#else
+ if((!(((orig_addr >> 16) & 0xFF) % 2)) &&
+ (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) {
+#endif
+ tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 |
+ (orig_addr & UINT16_MAX));
+ SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX "
+ "dma_address=0x%llX\n",
+ orig_addr, tmp_dma_addr);
+ return tmp_dma_addr;
+ }
+ return orig_addr;
+}
+
+dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr)
+{
+ dma_addr_t tmp_dma_addr;
+#ifdef CC_DMA_48BIT_SIM_FULL
+ /* With this code all addresses will be restored from 48 bits. */
+ /* The if condition protects from double restoring */
+ if((orig_addr >> 32) & 0xFFFF ) {
+#else
+ if(((orig_addr >> 32) & 0xFFFF) &&
+ !(((orig_addr >> 32) & 0xFF) % 2) ) {
+#endif
+ /*return high 16 bits*/
+ tmp_dma_addr = ((orig_addr >> 16));
+ /*clean the 0xFFFF in the lower bits (set in the add expansion)*/
+ tmp_dma_addr &= 0xFFFF0000;
+ /* Set the original 16 bits */
+ tmp_dma_addr |= (orig_addr & UINT16_MAX);
+ SSI_LOG_DEBUG("Release DMA: orig address=0x%llX "
+ "dma_address=0x%llX\n",
+ orig_addr, tmp_dma_addr);
+ return tmp_dma_addr;
+ }
+ return orig_addr;
+}
+#endif
+/**
+ * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
+ *
+ * @sg_list: SG list
+ * @nbytes: [IN] Total SGL data bytes.
+ * @lbytes: [OUT] Returns the amount of bytes at the last entry
+ */
+static unsigned int ssi_buffer_mgr_get_sgl_nents(
+ struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained)
+{
+ unsigned int nents = 0;
+ while (nbytes != 0) {
+ if (sg_is_chain(sg_list)) {
+ SSI_LOG_ERR("Unexpected chanined entry "
+ "in sg (entry =0x%X) \n", nents);
+ BUG();
+ }
+ if (sg_list->length != 0) {
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
+ } else {
+ sg_list = (struct scatterlist *)sg_page(sg_list);
+ if (is_chained != NULL) {
+ *is_chained = true;
+ }
+ }
+ }
+ SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
+ return nents;
+}
+
+/**
+ * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
+ *
+ * @sgl:
+ */
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len)
+{
+ struct scatterlist *current_sg = sgl;
+ int sg_index = 0;
+
+ while (sg_index <= data_len) {
+ if (current_sg == NULL) {
+ /* reached the end of the sgl --> just return back */
+ return;
+ }
+ memset(sg_virt(current_sg), 0, current_sg->length);
+ sg_index += current_sg->length;
+ current_sg = sg_next(current_sg);
+ }
+}
+
+/**
+ * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
+ * from to_skip to end, to dest and vice versa
+ *
+ * @dest:
+ * @sg:
+ * @to_skip:
+ * @end:
+ * @direct:
+ */
+void ssi_buffer_mgr_copy_scatterlist_portion(
+ u8 *dest, struct scatterlist *sg,
+ uint32_t to_skip, uint32_t end,
+ enum ssi_sg_cpy_direct direct)
+{
+ uint32_t nents, lbytes;
+
+ nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
+ sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
+}
+
+static inline int ssi_buffer_mgr_render_buff_to_mlli(
+ dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents,
+ uint32_t **mlli_entry_pp)
+{
+ uint32_t *mlli_entry_p = *mlli_entry_pp;
+ uint32_t new_nents;;
+
+ /* Verify there is no memory overflow*/
+ new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
+ return -ENOMEM;
+ }
+
+ /*handle buffer longer than 64 kbytes */
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE);
+ LLI_SET_ADDR(mlli_entry_p,buff_dma);
+ LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma);
+ buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
+ buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
+ mlli_entry_p = mlli_entry_p + 2;
+ (*curr_nents)++;
+ }
+ /*Last entry */
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size);
+ LLI_SET_ADDR(mlli_entry_p,buff_dma);
+ LLI_SET_SIZE(mlli_entry_p, buff_size);
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+ mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
+ mlli_entry_p = mlli_entry_p + 2;
+ *mlli_entry_pp = mlli_entry_p;
+ (*curr_nents)++;
+ return 0;
+}
+
+
+static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
+ struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents,
+ uint32_t **mlli_entry_pp)
+{
+ struct scatterlist *curr_sgl = sgl;
+ uint32_t *mlli_entry_p = *mlli_entry_pp;
+ int32_t rc = 0;
+
+ for ( ; (curr_sgl != NULL) && (sgl_data_len != 0);
+ curr_sgl = sg_next(curr_sgl)) {
+ uint32_t entry_data_len =
+ (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
+ sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
+ sgl_data_len -= entry_data_len;
+ rc = ssi_buffer_mgr_render_buff_to_mlli(
+ sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
+ &mlli_entry_p);
+ if(rc != 0) {
+ return rc;
+ }
+ sglOffset=0;
+ }
+ *mlli_entry_pp = mlli_entry_p;
+ return 0;
+}
+
+static int ssi_buffer_mgr_generate_mlli(
+ struct device *dev,
+ struct buffer_array *sg_data,
+ struct mlli_params *mlli_params)
+{
+ uint32_t *mlli_p;
+ uint32_t total_nents = 0,prev_total_nents = 0;
+ int rc = 0, i;
+
+ SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
+
+ /* Allocate memory from the pointed pool */
+ mlli_params->mlli_virt_addr = dma_pool_alloc(
+ mlli_params->curr_pool, GFP_KERNEL,
+ &(mlli_params->mlli_dma_addr));
+ if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
+ SSI_LOG_ERR("dma_pool_alloc() failed\n");
+ rc =-ENOMEM;
+ goto build_mlli_exit;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr,
+ (MAX_NUM_OF_TOTAL_MLLI_ENTRIES*
+ LLI_ENTRY_BYTE_SIZE));
+ /* Point to start of MLLI */
+ mlli_p = (uint32_t *)mlli_params->mlli_virt_addr;
+ /* go over all SG's and link it to one MLLI table */
+ for (i = 0; i < sg_data->num_of_buffers; i++) {
+ if (sg_data->type[i] == DMA_SGL_TYPE)
+ rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
+ sg_data->entry[i].sgl,
+ sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
+ &mlli_p);
+ else /*DMA_BUFF_TYPE*/
+ rc = ssi_buffer_mgr_render_buff_to_mlli(
+ sg_data->entry[i].buffer_dma,
+ sg_data->total_data_len[i], &total_nents,
+ &mlli_p);
+ if(rc != 0) {
+ return rc;
+ }
+
+ /* set last bit in the current table */
+ if (sg_data->mlli_nents[i] != NULL) {
+ /*Calculate the current MLLI table length for the
+ length field in the descriptor*/
+ *(sg_data->mlli_nents[i]) +=
+ (total_nents - prev_total_nents);
+ prev_total_nents = total_nents;
+ }
+ }
+
+ /* Set MLLI size for the bypass operation */
+ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
+
+ SSI_LOG_DEBUG("MLLI params: "
+ "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr,
+ (unsigned long long)mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
+
+build_mlli_exit:
+ return rc;
+}
+
+static inline void ssi_buffer_mgr_add_buffer_entry(
+ struct buffer_array *sgl_data,
+ dma_addr_t buffer_dma, unsigned int buffer_len,
+ bool is_last_entry, uint32_t *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ SSI_LOG_DEBUG("index=%u single_buff=0x%llX "
+ "buffer_len=0x%08X is_last=%d\n",
+ index, (unsigned long long)buffer_dma, buffer_len, is_last_entry);
+ sgl_data->nents[index] = 1;
+ sgl_data->entry[index].buffer_dma = buffer_dma;
+ sgl_data->offset[index] = 0;
+ sgl_data->total_data_len[index] = buffer_len;
+ sgl_data->type[index] = DMA_BUFF_TYPE;
+ sgl_data->is_last[index] = is_last_entry;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index] != NULL)
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static inline void ssi_buffer_mgr_add_scatterlist_entry(
+ struct buffer_array *sgl_data,
+ unsigned int nents,
+ struct scatterlist *sgl,
+ unsigned int data_len,
+ unsigned int data_offset,
+ bool is_last_table,
+ uint32_t *mlli_nents)
+{
+ unsigned int index = sgl_data->num_of_buffers;
+
+ SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
+ sgl_data->nents[index] = nents;
+ sgl_data->entry[index].sgl = sgl;
+ sgl_data->offset[index] = data_offset;
+ sgl_data->total_data_len[index] = data_len;
+ sgl_data->type[index] = DMA_SGL_TYPE;
+ sgl_data->is_last[index] = is_last_table;
+ sgl_data->mlli_nents[index] = mlli_nents;
+ if (sgl_data->mlli_nents[index] != NULL)
+ *sgl_data->mlli_nents[index] = 0;
+ sgl_data->num_of_buffers++;
+}
+
+static int
+ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents,
+ enum dma_data_direction direction)
+{
+ uint32_t i , j;
+ struct scatterlist *l_sg = sg;
+ for (i = 0; i < nents; i++) {
+ if (l_sg == NULL) {
+ break;
+ }
+ if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
+ SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
+ goto err;
+ }
+ l_sg = sg_next(l_sg);
+ }
+ return nents;
+
+err:
+ /* Restore mapped parts */
+ for (j = 0; j < i; j++) {
+ if (sg == NULL) {
+ break;
+ }
+ dma_unmap_sg(dev,sg,1,direction);
+ sg = sg_next(sg);
+ }
+ return 0;
+}
+
+static int ssi_buffer_mgr_map_scatterlist(
+ struct device *dev, struct scatterlist *sg,
+ unsigned int nbytes, int direction,
+ uint32_t *nents, uint32_t max_sg_nents,
+ uint32_t *lbytes, uint32_t *mapped_nents)
+{
+ bool is_chained = false;
+
+ if (sg_is_last(sg)) {
+ /* One entry only case -set to DLLI */
+ if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
+ SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
+ return -ENOMEM;
+ }
+ SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX "
+ "page_link=0x%08lX addr=%pK offset=%u "
+ "length=%u\n",
+ (unsigned long long)sg_dma_address(sg),
+ sg->page_link,
+ sg_virt(sg),
+ sg->offset, sg->length);
+ *lbytes = nbytes;
+ *nents = 1;
+ *mapped_nents = 1;
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg));
+ } else { /*sg_is_last*/
+ *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
+ &is_chained);
+ if (*nents > max_sg_nents) {
+ *nents = 0;
+ SSI_LOG_ERR("Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
+ return -ENOMEM;
+ }
+ if (!is_chained) {
+ /* In case of mmu the number of mapped nents might
+ be changed from the original sgl nents */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (unlikely(*mapped_nents == 0)){
+ *nents = 0;
+ SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ } else {
+ /*In this case the driver maps entry by entry so it
+ must have the same nents before and after map */
+ *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
+ sg,
+ *nents,
+ direction);
+ if (unlikely(*mapped_nents != *nents)){
+ *nents = *mapped_nents;
+ SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+ssi_aead_handle_config_buf(struct device *dev,
+ struct aead_req_ctx *areq_ctx,
+ uint8_t* config_data,
+ struct buffer_array *sg_data,
+ unsigned int assoclen)
+{
+ SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
+ /* create sg for the current buffer */
+ sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
+ if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
+ DMA_TO_DEVICE) != 1)) {
+ SSI_LOG_ERR("dma_map_sg() "
+ "config buffer failed\n");
+ return -ENOMEM;
+ }
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ "page_link=0x%08lX addr=%pK "
+ "offset=%u length=%u\n",
+ (unsigned long long)sg_dma_address(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.page_link,
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset,
+ areq_ctx->ccm_adata_sg.length);
+ /* prepare for case of MLLI */
+ if (assoclen > 0) {
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
+ &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE +
+ areq_ctx->ccm_hdr_size), 0,
+ false, NULL);
+ }
+ return 0;
+}
+
+
+static inline int ssi_ahash_handle_curr_buf(struct device *dev,
+ struct ahash_req_ctx *areq_ctx,
+ uint8_t* curr_buff,
+ uint32_t curr_buff_cnt,
+ struct buffer_array *sg_data)
+{
+ SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
+ /* create sg for the current buffer */
+ sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
+ if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
+ DMA_TO_DEVICE) != 1)) {
+ SSI_LOG_ERR("dma_map_sg() "
+ "src buffer failed\n");
+ return -ENOMEM;
+ }
+ SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX "
+ "page_link=0x%08lX addr=%pK "
+ "offset=%u length=%u\n",
+ (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ areq_ctx->buff_sg->page_link,
+ sg_virt(areq_ctx->buff_sg),
+ areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->in_nents = 0;
+ /* prepare for case of MLLI */
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
+ curr_buff_cnt, 0, false, NULL);
+ return 0;
+}
+
+void ssi_buffer_mgr_unmap_blkcipher_request(
+ struct device *dev,
+ void *ctx,
+ unsigned int ivsize,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+
+ if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
+ SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=0x%llX iv_size=%u\n",
+ (unsigned long long)req_ctx->gen_ctx.iv_dma_addr,
+ ivsize);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr);
+ dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+ DMA_TO_DEVICE);
+ }
+ /* Release pool */
+ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(req_ctx->mlli_params.mlli_dma_addr);
+ dma_pool_free(req_ctx->mlli_params.curr_pool,
+ req_ctx->mlli_params.mlli_virt_addr,
+ req_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
+ dma_unmap_sg(dev, src, req_ctx->in_nents,
+ DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->src=%pK\n",
+ sg_virt(src));
+
+ if (src != dst) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(dst));
+ dma_unmap_sg(dev, dst, req_ctx->out_nents,
+ DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped req->dst=%pK\n",
+ sg_virt(dst));
+ }
+}
+
+int ssi_buffer_mgr_map_blkcipher_request(
+ struct ssi_drvdata *drvdata,
+ void *ctx,
+ unsigned int ivsize,
+ unsigned int nbytes,
+ void *info,
+ struct scatterlist *src,
+ struct scatterlist *dst)
+{
+ struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+ struct mlli_params *mlli_params = &req_ctx->mlli_params;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ struct device *dev = &drvdata->plat_dev->dev;
+ struct buffer_array sg_data;
+ uint32_t dummy = 0;
+ int rc = 0;
+ uint32_t mapped_nents = 0;
+
+ req_ctx->dma_buf_type = SSI_DMA_BUF_DLLI;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+ /* Map IV buffer */
+ if (likely(ivsize != 0) ) {
+ dump_byte_array("iv", (uint8_t *)info, ivsize);
+ req_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, (void *)info,
+ ivsize,
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL:
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev,
+ req_ctx->gen_ctx.iv_dma_addr))) {
+ SSI_LOG_ERR("Mapping iv %u B at va=%pK "
+ "for DMA failed\n", ivsize, info);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(req_ctx->gen_ctx.iv_dma_addr,
+ ivsize);
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
+ ivsize, info,
+ (unsigned long long)req_ctx->gen_ctx.iv_dma_addr);
+ } else
+ req_ctx->gen_ctx.iv_dma_addr = 0;
+
+ /* Map the src SGL */
+ rc = ssi_buffer_mgr_map_scatterlist(dev, src,
+ nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (unlikely(rc != 0)) {
+ rc = -ENOMEM;
+ goto ablkcipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
+
+ if (unlikely(src == dst)) {
+ /* Handle inplace operation */
+ if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ req_ctx->out_nents = 0;
+ ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ req_ctx->in_nents, src,
+ nbytes, 0, true, &req_ctx->in_mlli_nents);
+ }
+ } else {
+ /* Map the dst sg */
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(
+ dev,dst, nbytes,
+ DMA_BIDIRECTIONAL, &req_ctx->out_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents))){
+ rc = -ENOMEM;
+ goto ablkcipher_exit;
+ }
+ if (mapped_nents > 1)
+ req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
+
+ if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
+ ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
+ }
+ }
+
+ if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
+ if (unlikely(rc!= 0))
+ goto ablkcipher_exit;
+
+ }
+
+ SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+
+ return 0;
+
+ablkcipher_exit:
+ ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ return rc;
+}
+
+void ssi_buffer_mgr_unmap_aead_request(
+ struct device *dev, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ uint32_t dummy;
+ bool chained;
+ uint32_t size_to_unmap = 0;
+
+ if (areq_ctx->mac_buf_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr);
+ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
+ MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+#if SSI_CC_HAS_AES_GCM
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (areq_ctx->hkey_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr);
+ dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
+ AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ if (areq_ctx->gcm_block_len_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr);
+ dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc1_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr);
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ if (areq_ctx->gcm_iv_inc2_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr);
+ dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+ }
+#endif
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if (areq_ctx->ccm_iv0_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr);
+ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ }
+
+ dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
+ }
+ if (areq_ctx->gen_ctx.iv_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr);
+ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
+ hw_iv_size, DMA_BIDIRECTIONAL);
+ }
+
+ /*In case a pool was set, a table was
+ allocated and should be released */
+ if (areq_ctx->mlli_params.curr_pool != NULL) {
+ SSI_LOG_DEBUG("free MLLI buffer: dma=0x%08llX virt=%pK\n",
+ (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->src));
+ size_to_unmap = req->assoclen+req->cryptlen;
+ if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
+ size_to_unmap += areq_ctx->req_authsize;
+ }
+ if (areq_ctx->is_gcm4543)
+ size_to_unmap += crypto_aead_ivsize(tfm);
+
+ dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
+ if (unlikely(req->src != req->dst)) {
+ SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(req->dst));
+ dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
+ DMA_BIDIRECTIONAL);
+ }
+#if DX_HAS_ACP
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+ likely(req->src == req->dst))
+ {
+ uint32_t size_to_skip = req->assoclen;
+ if (areq_ctx->is_gcm4543) {
+ size_to_skip += crypto_aead_ivsize(tfm);
+ }
+ /* copy mac to a temporary location to deal with possible
+ data memory overriding that caused by cache coherence problem. */
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->backup_mac, req->src,
+ size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
+ }
+#endif
+}
+
+static inline int ssi_buffer_mgr_get_aead_icv_nents(
+ struct scatterlist *sgl,
+ unsigned int sgl_nents,
+ unsigned int authsize,
+ uint32_t last_entry_data_size,
+ bool *is_icv_fragmented)
+{
+ unsigned int icv_max_size = 0;
+ unsigned int icv_required_size = authsize > last_entry_data_size ? (authsize - last_entry_data_size) : authsize;
+ unsigned int nents;
+ unsigned int i;
+
+ if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
+ *is_icv_fragmented = false;
+ return 0;
+ }
+
+ for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+ if (sgl == NULL) {
+ break;
+ }
+ sgl = sg_next(sgl);
+ }
+
+ if (sgl != NULL) {
+ icv_max_size = sgl->length;
+ }
+
+ if (last_entry_data_size > authsize) {
+ nents = 0; /* ICV attached to data in last entry (not fragmented!) */
+ *is_icv_fragmented = false;
+ } else if (last_entry_data_size == authsize) {
+ nents = 1; /* ICV placed in whole last entry (not fragmented!) */
+ *is_icv_fragmented = false;
+ } else if (icv_max_size > icv_required_size) {
+ nents = 1;
+ *is_icv_fragmented = true;
+ } else if (icv_max_size == icv_required_size) {
+ nents = 2;
+ *is_icv_fragmented = true;
+ } else {
+ SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
+ MAX_ICV_NENTS_SUPPORTED);
+ nents = -1; /*unsupported*/
+ }
+ SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
+ (*is_icv_fragmented ? "true" : "false"), nents);
+
+ return nents;
+}
+
+static inline int ssi_buffer_mgr_aead_chain_iv(
+ struct ssi_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ unsigned int hw_iv_size = areq_ctx->hw_iv_size;
+ struct device *dev = &drvdata->plat_dev->dev;
+ int rc = 0;
+
+ if (unlikely(req->iv == NULL)) {
+ areq_ctx->gen_ctx.iv_dma_addr = 0;
+ goto chain_iv_exit;
+ }
+
+ areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
+ hw_iv_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
+ SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
+ rc = -ENOMEM;
+ goto chain_iv_exit;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size);
+
+ SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
+ hw_iv_size, req->iv,
+ (unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
+ if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
+ unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
+ /* Chain to given list */
+ ssi_buffer_mgr_add_buffer_entry(
+ sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ }
+
+chain_iv_exit:
+ return rc;
+}
+
+static inline int ssi_buffer_mgr_aead_chain_assoc(
+ struct ssi_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ int rc = 0;
+ uint32_t mapped_nents = 0;
+ struct scatterlist *current_sg = req->src;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int sg_index = 0;
+ uint32_t size_of_assoc = req->assoclen;
+
+ if (areq_ctx->is_gcm4543) {
+ size_of_assoc += crypto_aead_ivsize(tfm);
+ }
+
+ if (sg_data == NULL) {
+ rc = -EINVAL;
+ goto chain_assoc_exit;
+ }
+
+ if (unlikely(req->assoclen == 0)) {
+ areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
+ areq_ctx->assoc.nents = 0;
+ areq_ctx->assoc.mlli_nents = 0;
+ SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ goto chain_assoc_exit;
+ }
+
+ //iterate over the sgl to see how many entries are for associated data
+ //it is assumed that if we reach here , the sgl is already mapped
+ sg_index = current_sg->length;
+ if (sg_index > size_of_assoc) { //the first entry in the scatter list contains all the associated data
+ mapped_nents++;
+ }
+ else{
+ while (sg_index <= size_of_assoc) {
+ current_sg = sg_next(current_sg);
+ //if have reached the end of the sgl, then this is unexpected
+ if (current_sg == NULL) {
+ SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ BUG();
+ }
+ sg_index += current_sg->length;
+ mapped_nents++;
+ }
+ }
+ if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ SSI_LOG_ERR("Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->assoc.nents = mapped_nents;
+
+ /* in CCM case we have additional entry for
+ * ccm header configurations */
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ if (unlikely((mapped_nents + 1) >
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+
+ SSI_LOG_ERR("CCM case.Too many fragments. "
+ "Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ rc = -ENOMEM;
+ goto chain_assoc_exit;
+ }
+ }
+
+ if (likely(mapped_nents == 1) &&
+ (areq_ctx->ccm_hdr_size == ccm_header_size_null))
+ areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
+ else
+ areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+
+ if (unlikely((do_chain == true) ||
+ (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
+
+ SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
+ ssi_buffer_mgr_add_scatterlist_entry(
+ sg_data, areq_ctx->assoc.nents,
+ req->src, req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
+ areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
+ }
+
+chain_assoc_exit:
+ return rc;
+}
+
+static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
+ struct aead_request *req,
+ uint32_t *src_last_bytes, uint32_t *dst_last_bytes)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+
+ areq_ctx->is_icv_fragmented = false;
+ if (likely(req->src == req->dst)) {
+ /*INPLACE*/
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ areq_ctx->srcSgl)+
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ areq_ctx->srcSgl) +
+ (*src_last_bytes - authsize);
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ areq_ctx->srcSgl) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ areq_ctx->srcSgl) +
+ (*src_last_bytes - authsize);
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ areq_ctx->dstSgl) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ areq_ctx->dstSgl)+
+ (*dst_last_bytes - authsize);
+ }
+}
+
+static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
+ struct ssi_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ uint32_t *src_last_bytes, uint32_t *dst_last_bytes,
+ bool is_last_table)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ int rc = 0, icv_nents;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (likely(req->src == req->dst)) {
+ /*INPLACE*/
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ areq_ctx->src.nents, areq_ctx->srcSgl,
+ areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
+ areq_ctx->src.nents, authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (unlikely(icv_nents < 0)) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (unlikely(areq_ctx->is_icv_fragmented == true)) {
+ /* Backup happens only when ICV is fragmented, ICV
+ verification is made by CPU compare in order to simplify
+ MAC verification upon request completion */
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+#if !DX_HAS_ACP
+ /* In ACP platform we already copying ICV
+ for any INPLACE-DECRYPT operation, hence
+ we must neglect this code. */
+ uint32_t size_to_skip = req->assoclen;
+ if (areq_ctx->is_gcm4543) {
+ size_to_skip += crypto_aead_ivsize(tfm);
+ }
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->backup_mac, req->src,
+ size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+#endif
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+ } else {
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ }
+ } else { /* Contig. ICV */
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ /*NON-INPLACE and DECRYPT*/
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ areq_ctx->src.nents, areq_ctx->srcSgl,
+ areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+ &areq_ctx->src.mlli_nents);
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ areq_ctx->dst.nents, areq_ctx->dstSgl,
+ areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
+ areq_ctx->src.nents, authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (unlikely(icv_nents < 0)) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (unlikely(areq_ctx->is_icv_fragmented == true)) {
+ /* Backup happens only when ICV is fragmented, ICV
+ verification is made by CPU compare in order to simplify
+ MAC verification upon request completion */
+ uint32_t size_to_skip = req->assoclen;
+ if (areq_ctx->is_gcm4543) {
+ size_to_skip += crypto_aead_ivsize(tfm);
+ }
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->backup_mac, req->src,
+ size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+ areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
+ } else { /* Contig. ICV */
+ /*Should hanlde if the sg is not contig.*/
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ (*src_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ &areq_ctx->srcSgl[areq_ctx->src.nents - 1]) +
+ (*src_last_bytes - authsize);
+ }
+
+ } else {
+ /*NON-INPLACE and ENCRYPT*/
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ areq_ctx->dst.nents, areq_ctx->dstSgl,
+ areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ areq_ctx->src.nents, areq_ctx->srcSgl,
+ areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
+ areq_ctx->dst.nents, authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
+ if (unlikely(icv_nents < 0)) {
+ rc = -ENOTSUPP;
+ goto prepare_data_mlli_exit;
+ }
+
+ if (likely(areq_ctx->is_icv_fragmented == false)) {
+ /* Contig. ICV */
+ areq_ctx->icv_dma_addr = sg_dma_address(
+ &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ (*dst_last_bytes - authsize);
+ areq_ctx->icv_virt_addr = sg_virt(
+ &areq_ctx->dstSgl[areq_ctx->dst.nents - 1]) +
+ (*dst_last_bytes - authsize);
+ } else {
+ areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
+ areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
+ }
+ }
+
+prepare_data_mlli_exit:
+ return rc;
+}
+
+static inline int ssi_buffer_mgr_aead_chain_data(
+ struct ssi_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ bool is_last_table, bool do_chain)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = &drvdata->plat_dev->dev;
+ enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
+ unsigned int authsize = areq_ctx->req_authsize;
+ int src_last_bytes = 0, dst_last_bytes = 0;
+ int rc = 0;
+ uint32_t src_mapped_nents = 0, dst_mapped_nents = 0;
+ uint32_t offset = 0;
+ unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ uint32_t sg_index = 0;
+ bool chained = false;
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+ uint32_t size_to_skip = req->assoclen;
+ if (is_gcm4543) {
+ size_to_skip += crypto_aead_ivsize(tfm);
+ }
+ offset = size_to_skip;
+
+ if (sg_data == NULL) {
+ rc = -EINVAL;
+ goto chain_data_exit;
+ }
+ areq_ctx->srcSgl = req->src;
+ areq_ctx->dstSgl = req->dst;
+
+ if (is_gcm4543) {
+ size_for_map += crypto_aead_ivsize(tfm);
+ }
+
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
+ src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
+ sg_index = areq_ctx->srcSgl->length;
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+ offset -= areq_ctx->srcSgl->length;
+ areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (areq_ctx->srcSgl == NULL) {
+ SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ BUG();
+ }
+ sg_index += areq_ctx->srcSgl->length;
+ src_mapped_nents--;
+ }
+ if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
+ {
+ SSI_LOG_ERR("Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+
+ areq_ctx->src.nents = src_mapped_nents;
+
+ areq_ctx->srcOffset = offset;
+
+ if (req->src != req->dst) {
+ size_for_map = req->assoclen +req->cryptlen;
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+ if (is_gcm4543) {
+ size_for_map += crypto_aead_ivsize(tfm);
+ }
+
+ rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
+ DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
+ if (unlikely(rc != 0)) {
+ rc = -ENOMEM;
+ goto chain_data_exit;
+ }
+ }
+
+ dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
+ sg_index = areq_ctx->dstSgl->length;
+ offset = size_to_skip;
+
+ //check where the data starts
+ while (sg_index <= size_to_skip) {
+
+ offset -= areq_ctx->dstSgl->length;
+ areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
+ //if have reached the end of the sgl, then this is unexpected
+ if (areq_ctx->dstSgl == NULL) {
+ SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ BUG();
+ }
+ sg_index += areq_ctx->dstSgl->length;
+ dst_mapped_nents--;
+ }
+ if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES))
+ {
+ SSI_LOG_ERR("Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ return -ENOMEM;
+ }
+ areq_ctx->dst.nents = dst_mapped_nents;
+ areq_ctx->dstOffset = offset;
+ if ((src_mapped_nents > 1) ||
+ (dst_mapped_nents > 1) ||
+ (do_chain == true)) {
+ areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
+ rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes, is_last_table);
+ } else {
+ areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
+ ssi_buffer_mgr_prepare_aead_data_dlli(
+ req, &src_last_bytes, &dst_last_bytes);
+ }
+
+chain_data_exit:
+ return rc;
+}
+
+static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
+ struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ uint32_t curr_mlli_size = 0;
+
+ if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
+ areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
+ curr_mlli_size = areq_ctx->assoc.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ }
+
+ if (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
+ /*Inplace case dst nents equal to src nents*/
+ if (req->src == req->dst) {
+ areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
+ areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
+ if (areq_ctx->is_single_pass == false)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ if (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_DECRYPT) {
+ areq_ctx->src.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->dst.sram_addr =
+ areq_ctx->src.sram_addr +
+ areq_ctx->src.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (areq_ctx->is_single_pass == false)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->src.mlli_nents;
+ } else {
+ areq_ctx->dst.sram_addr =
+ drvdata->mlli_sram_addr +
+ curr_mlli_size;
+ areq_ctx->src.sram_addr =
+ areq_ctx->dst.sram_addr +
+ areq_ctx->dst.mlli_nents *
+ LLI_ENTRY_BYTE_SIZE;
+ if (areq_ctx->is_single_pass == false)
+ areq_ctx->assoc.mlli_nents +=
+ areq_ctx->dst.mlli_nents;
+ }
+ }
+ }
+}
+
+int ssi_buffer_mgr_map_aead_request(
+ struct ssi_drvdata *drvdata, struct aead_request *req)
+{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct device *dev = &drvdata->plat_dev->dev;
+ struct buffer_array sg_data;
+ unsigned int authsize = areq_ctx->req_authsize;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ int rc = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ bool is_gcm4543 = areq_ctx->is_gcm4543;
+
+ uint32_t mapped_nents = 0;
+ uint32_t dummy = 0; /*used for the assoc data fragments */
+ uint32_t size_to_map = 0;
+
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+
+#if DX_HAS_ACP
+ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+ likely(req->src == req->dst))
+ {
+ uint32_t size_to_skip = req->assoclen;
+ if (is_gcm4543) {
+ size_to_skip += crypto_aead_ivsize(tfm);
+ }
+ /* copy mac to a temporary location to deal with possible
+ data memory overriding that caused by cache coherence problem. */
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->backup_mac, req->src,
+ size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+ }
+#endif
+
+ /* cacluate the size for cipher remove ICV in decrypt*/
+ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
+ DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ req->cryptlen :
+ (req->cryptlen - authsize);
+
+ areq_ctx->mac_buf_dma_addr = dma_map_single(dev,
+ areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
+ SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE);
+
+ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
+ areq_ctx->ccm_iv0_dma_addr = dma_map_single(dev,
+ (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET),
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
+ SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
+ "for DMA failed\n", AES_BLOCK_SIZE,
+ (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
+ areq_ctx->ccm_iv0_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->ccm_iv0_dma_addr,
+ AES_BLOCK_SIZE);
+ if (ssi_aead_handle_config_buf(dev, areq_ctx,
+ areq_ctx->ccm_config, &sg_data, req->assoclen) != 0) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ }
+
+#if SSI_CC_HAS_AES_GCM
+ if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ areq_ctx->hkey_dma_addr = dma_map_single(dev,
+ areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
+ SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->hkey_dma_addr, AES_BLOCK_SIZE);
+
+ areq_ctx->gcm_block_len_dma_addr = dma_map_single(dev,
+ &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
+ SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE);
+
+ areq_ctx->gcm_iv_inc1_dma_addr = dma_map_single(dev,
+ areq_ctx->gcm_iv_inc1,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
+ SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
+ "for DMA failed\n", AES_BLOCK_SIZE,
+ (areq_ctx->gcm_iv_inc1));
+ areq_ctx->gcm_iv_inc1_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc1_dma_addr,
+ AES_BLOCK_SIZE);
+
+ areq_ctx->gcm_iv_inc2_dma_addr = dma_map_single(dev,
+ areq_ctx->gcm_iv_inc2,
+ AES_BLOCK_SIZE, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
+ SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
+ "for DMA failed\n", AES_BLOCK_SIZE,
+ (areq_ctx->gcm_iv_inc2));
+ areq_ctx->gcm_iv_inc2_dma_addr = 0;
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(areq_ctx->gcm_iv_inc2_dma_addr,
+ AES_BLOCK_SIZE);
+ }
+#endif /*SSI_CC_HAS_AES_GCM*/
+
+ size_to_map = req->cryptlen + req->assoclen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ size_to_map += authsize;
+ }
+ if (is_gcm4543)
+ size_to_map += crypto_aead_ivsize(tfm);
+ rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
+ size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ if (unlikely(rc != 0)) {
+ rc = -ENOMEM;
+ goto aead_map_failure;
+ }
+
+ if (likely(areq_ctx->is_single_pass == true)) {
+ /*
+ * Create MLLI table for:
+ * (1) Assoc. data
+ * (2) Src/Dst SGLs
+ * Note: IV is contg. buffer (not an SGL)
+ */
+ rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ } else { /* DOUBLE-PASS flow */
+ /*
+ * Prepare MLLI table(s) in this order:
+ *
+ * If ENCRYPT/DECRYPT (inplace):
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src/dst (inplace operation)
+ *
+ * If ENCRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for dst
+ * (4) MLLI for src
+ *
+ * If DECRYPT (non-inplace)
+ * (1) MLLI table for assoc
+ * (2) IV entry (chained right after end of assoc)
+ * (3) MLLI for src
+ * (4) MLLI for dst
+ */
+ rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
+ if (unlikely(rc != 0))
+ goto aead_map_failure;
+ }
+
+ /* Mlli support -start building the MLLI according to the above results */
+ if (unlikely(
+ (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
+ (areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
+
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
+ if (unlikely(rc != 0)) {
+ goto aead_map_failure;
+ }
+
+ ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
+ SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
+ SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
+ SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
+ }
+ return 0;
+
+aead_map_failure:
+ ssi_buffer_mgr_unmap_aead_request(dev, req);
+ return rc;
+}
+
+int ssi_buffer_mgr_map_hash_request_final(
+ struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = &drvdata->plat_dev->dev;
+ uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+ areq_ctx->buff0;
+ uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+ &areq_ctx->buff0_cnt;
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ uint32_t dummy = 0;
+ uint32_t mapped_nents = 0;
+
+ SSI_LOG_DEBUG(" final params : curr_buff=%pK "
+ "curr_buff_cnt=0x%X nbytes = 0x%X "
+ "src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes,
+ src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ /*TODO: copy data in case that buffer is enough for operation */
+ /* map the previous buffer */
+ if (*curr_buff_cnt != 0 ) {
+ if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
+ *curr_buff_cnt, &sg_data) != 0) {
+ return -ENOMEM;
+ }
+ }
+
+ if (src && (nbytes > 0) && do_update) {
+ if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
+ nbytes,
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents))){
+ goto unmap_curr_buff;
+ }
+ if ( src && (mapped_nents == 1)
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
+ memcpy(areq_ctx->buff_sg,src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = nbytes;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ } else {
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ }
+
+ }
+
+ /*build mlli */
+ if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ areq_ctx->in_nents,
+ src,
+ nbytes, 0,
+ true, &areq_ctx->mlli_nents);
+ if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
+ mlli_params) != 0)) {
+ goto fail_unmap_din;
+ }
+ }
+ /* change the buffer index for the unmap function */
+ areq_ctx->buff_index = (areq_ctx->buff_index^1);
+ SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt != 0 ) {
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+int ssi_buffer_mgr_map_hash_request_update(
+ struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ struct device *dev = &drvdata->plat_dev->dev;
+ uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
+ areq_ctx->buff0;
+ uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
+ &areq_ctx->buff0_cnt;
+ uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 :
+ areq_ctx->buff1;
+ uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
+ &areq_ctx->buff1_cnt;
+ struct mlli_params *mlli_params = &areq_ctx->mlli_params;
+ unsigned int update_data_len;
+ uint32_t total_in_len = nbytes + *curr_buff_cnt;
+ struct buffer_array sg_data;
+ struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+ unsigned int swap_index = 0;
+ uint32_t dummy = 0;
+ uint32_t mapped_nents = 0;
+
+ SSI_LOG_DEBUG(" update params : curr_buff=%pK "
+ "curr_buff_cnt=0x%X nbytes=0x%X "
+ "src=%pK curr_index=%u \n",
+ curr_buff, *curr_buff_cnt, nbytes,
+ src, areq_ctx->buff_index);
+ /* Init the type of the dma buffer */
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
+ mlli_params->curr_pool = NULL;
+ areq_ctx->curr_sg = NULL;
+ sg_data.num_of_buffers = 0;
+ areq_ctx->in_nents = 0;
+
+ if (unlikely(total_in_len < block_size)) {
+ SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
+ "*curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt,
+ &curr_buff[*curr_buff_cnt]);
+ areq_ctx->in_nents =
+ ssi_buffer_mgr_get_sgl_nents(src,
+ nbytes,
+ &dummy, NULL);
+ sg_copy_to_buffer(src, areq_ctx->in_nents,
+ &curr_buff[*curr_buff_cnt], nbytes);
+ *curr_buff_cnt += nbytes;
+ return 1;
+ }
+
+ /* Calculate the residue size*/
+ *next_buff_cnt = total_in_len & (block_size - 1);
+ /* update data len */
+ update_data_len = total_in_len - *next_buff_cnt;
+
+ SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
+ "update_data_len=0x%X\n",
+ *next_buff_cnt, update_data_len);
+
+ /* Copy the new residue to next buffer */
+ if (*next_buff_cnt != 0) {
+ SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
+ " residue %u \n", next_buff,
+ (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
+ (update_data_len -*curr_buff_cnt),
+ nbytes,SSI_SG_TO_BUF);
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if (*curr_buff_cnt != 0) {
+ if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
+ *curr_buff_cnt, &sg_data) != 0) {
+ return -ENOMEM;
+ }
+ /* change the buffer index for next operation */
+ swap_index = 1;
+ }
+
+ if ( update_data_len > *curr_buff_cnt ) {
+ if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
+ (update_data_len -*curr_buff_cnt),
+ DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents))){
+ goto unmap_curr_buff;
+ }
+ if ( (mapped_nents == 1)
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
+ /* only one entry in the SG and no previous data */
+ memcpy(areq_ctx->buff_sg,src,
+ sizeof(struct scatterlist));
+ areq_ctx->buff_sg->length = update_data_len;
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
+ areq_ctx->curr_sg = areq_ctx->buff_sg;
+ } else {
+ areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
+ }
+ }
+
+ if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+ /* add the src data to the sg_data */
+ ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ areq_ctx->in_nents,
+ src,
+ (update_data_len - *curr_buff_cnt), 0,
+ true, &areq_ctx->mlli_nents);
+ if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
+ mlli_params) != 0)) {
+ goto fail_unmap_din;
+ }
+
+ }
+ areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
+
+ return 0;
+
+fail_unmap_din:
+ dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
+
+unmap_curr_buff:
+ if (*curr_buff_cnt != 0 ) {
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ }
+ return -ENOMEM;
+}
+
+void ssi_buffer_mgr_unmap_hash_request(
+ struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
+{
+ struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
+ uint32_t *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
+ &areq_ctx->buff1_cnt;
+
+ /*In case a pool was set, a table was
+ allocated and should be released */
+ if (areq_ctx->mlli_params.curr_pool != NULL) {
+ SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n",
+ (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr);
+ dma_pool_free(areq_ctx->mlli_params.curr_pool,
+ areq_ctx->mlli_params.mlli_virt_addr,
+ areq_ctx->mlli_params.mlli_dma_addr);
+ }
+
+ if ((src) && likely(areq_ctx->in_nents != 0)) {
+ SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n",
+ sg_virt(src),
+ (unsigned long long)sg_dma_address(src),
+ sg_dma_len(src));
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src));
+ dma_unmap_sg(dev, src,
+ areq_ctx->in_nents, DMA_TO_DEVICE);
+ }
+
+ if (*prev_len != 0) {
+ SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
+ "dma=0x%llX len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ (unsigned long long)sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
+ dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
+ if (!do_revert) {
+ /* clean the previous data length for update operation */
+ *prev_len = 0;
+ } else {
+ areq_ctx->buff_index ^= 1;
+ }
+ }
+}
+
+int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle;
+ struct device *dev = &drvdata->plat_dev->dev;
+
+ buff_mgr_handle = (struct buff_mgr_handle *)
+ kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL);
+ if (buff_mgr_handle == NULL)
+ return -ENOMEM;
+
+ drvdata->buff_mgr_handle = buff_mgr_handle;
+
+ buff_mgr_handle->mlli_buffs_pool = dma_pool_create(
+ "dx_single_mlli_tables", dev,
+ MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
+ LLI_ENTRY_BYTE_SIZE,
+ MLLI_TABLE_MIN_ALIGNMENT, 0);
+
+ if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL))
+ goto error;
+
+ return 0;
+
+error:
+ ssi_buffer_mgr_fini(drvdata);
+ return -ENOMEM;
+}
+
+int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+{
+ struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
+
+ if (buff_mgr_handle != NULL) {
+ dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
+ kfree(drvdata->buff_mgr_handle);
+ drvdata->buff_mgr_handle = NULL;
+
+ }
+ return 0;
+}
+
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
new file mode 100644
index 000000000000..5f4b032389f1
--- /dev/null
+++ b/drivers/staging/ccree/ssi_buffer_mgr.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file buffer_mgr.h
+ Buffer Manager
+ */
+
+#ifndef __SSI_BUFFER_MGR_H__
+#define __SSI_BUFFER_MGR_H__
+
+#include <crypto/algapi.h>
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+
+
+enum ssi_req_dma_buf_type {
+ SSI_DMA_BUF_NULL = 0,
+ SSI_DMA_BUF_DLLI,
+ SSI_DMA_BUF_MLLI
+};
+
+enum ssi_sg_cpy_direct {
+ SSI_SG_TO_BUF = 0,
+ SSI_SG_FROM_BUF = 1
+};
+
+struct ssi_mlli {
+ ssi_sram_addr_t sram_addr;
+ unsigned int nents; //sg nents
+ unsigned int mlli_nents; //mlli nents might be different than the above
+};
+
+struct mlli_params {
+ struct dma_pool *curr_pool;
+ uint8_t *mlli_virt_addr;
+ dma_addr_t mlli_dma_addr;
+ uint32_t mlli_len;
+};
+
+int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
+
+int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata);
+
+int ssi_buffer_mgr_map_blkcipher_request(
+ struct ssi_drvdata *drvdata,
+ void *ctx,
+ unsigned int ivsize,
+ unsigned int nbytes,
+ void *info,
+ struct scatterlist *src,
+ struct scatterlist *dst);
+
+void ssi_buffer_mgr_unmap_blkcipher_request(
+ struct device *dev,
+ void *ctx,
+ unsigned int ivsize,
+ struct scatterlist *src,
+ struct scatterlist *dst);
+
+int ssi_buffer_mgr_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
+
+void ssi_buffer_mgr_unmap_aead_request(struct device *dev, struct aead_request *req);
+
+int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update);
+
+int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size);
+
+void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
+
+void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, uint32_t to_skip, uint32_t end, enum ssi_sg_cpy_direct direct);
+
+void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len);
+
+
+#ifdef CC_DMA_48BIT_SIM
+dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len);
+dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr);
+
+#define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = \
+ ssi_buff_mgr_update_dma_addr(addr,size)
+#define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = \
+ ssi_buff_mgr_restore_dma_addr(addr)
+#else
+
+#define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = addr
+#define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = addr
+
+#endif
+
+#endif /*__BUFFER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
new file mode 100644
index 000000000000..664ed7e52cf2
--- /dev/null
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -0,0 +1,1503 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "cc_lli_defs.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_cipher.h"
+#include "ssi_request_mgr.h"
+#include "ssi_sysfs.h"
+#include "ssi_fips_local.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_ablkcipher template_u.ablkcipher
+#define template_sblkcipher template_u.blkcipher
+
+#define SSI_MIN_AES_XTS_SIZE 0x10
+#define SSI_MAX_AES_XTS_SIZE 0x2000
+struct ssi_blkcipher_handle {
+ struct list_head blkcipher_alg_list;
+};
+
+struct cc_user_key_info {
+ uint8_t *key;
+ dma_addr_t key_dma_addr;
+};
+struct cc_hw_key_info {
+ enum HwCryptoKey key1_slot;
+ enum HwCryptoKey key2_slot;
+};
+
+struct ssi_ablkcipher_ctx {
+ struct ssi_drvdata *drvdata;
+ int keylen;
+ int key_round_number;
+ int cipher_mode;
+ int flow_mode;
+ unsigned int flags;
+ struct blkcipher_req_ctx *sync_ctx;
+ struct cc_user_key_info user;
+ struct cc_hw_key_info hw;
+ struct crypto_shash *shash_tfm;
+};
+
+static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
+
+
+static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, uint32_t size) {
+ switch (ctx_p->flow_mode){
+ case S_DIN_to_AES:
+ switch (size){
+ case CC_AES_128_BIT_KEY_SIZE:
+ case CC_AES_192_BIT_KEY_SIZE:
+ if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
+ (ctx_p->cipher_mode != DRV_CIPHER_ESSIV) &&
+ (ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)))
+ return 0;
+ break;
+ case CC_AES_256_BIT_KEY_SIZE:
+ return 0;
+ case (CC_AES_192_BIT_KEY_SIZE*2):
+ case (CC_AES_256_BIT_KEY_SIZE*2):
+ if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
+ (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
+ (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ case S_DIN_to_DES:
+ if (likely(size == DES3_EDE_KEY_SIZE ||
+ size == DES_KEY_SIZE))
+ return 0;
+ break;
+#if SSI_CC_HAS_MULTI2
+ case S_DIN_to_MULTI2:
+ if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
+ return 0;
+ break;
+#endif
+ default:
+ break;
+
+ }
+ return -EINVAL;
+}
+
+
+static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
+ switch (ctx_p->flow_mode){
+ case S_DIN_to_AES:
+ switch (ctx_p->cipher_mode){
+ case DRV_CIPHER_XTS:
+ if ((size >= SSI_MIN_AES_XTS_SIZE) &&
+ (size <= SSI_MAX_AES_XTS_SIZE) &&
+ IS_ALIGNED(size, AES_BLOCK_SIZE))
+ return 0;
+ break;
+ case DRV_CIPHER_CBC_CTS:
+ if (likely(size >= AES_BLOCK_SIZE))
+ return 0;
+ break;
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ break;
+ case S_DIN_to_DES:
+ if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
+ return 0;
+ break;
+#if SSI_CC_HAS_MULTI2
+ case S_DIN_to_MULTI2:
+ switch (ctx_p->cipher_mode) {
+ case DRV_MULTI2_CBC:
+ if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
+ return 0;
+ break;
+ case DRV_MULTI2_OFB:
+ return 0;
+ default:
+ break;
+ }
+ break;
+#endif /*SSI_CC_HAS_MULTI2*/
+ default:
+ break;
+
+ }
+ return -EINVAL;
+}
+
+static unsigned int get_max_keysize(struct crypto_tfm *tfm)
+{
+ struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER) {
+ return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
+ }
+
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) {
+ return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
+ }
+
+ return 0;
+}
+
+static int ssi_blkcipher_init(struct crypto_tfm *tfm)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct ssi_crypto_alg *ssi_alg =
+ container_of(alg, struct ssi_crypto_alg, crypto_alg);
+ struct device *dev;
+ int rc = 0;
+ unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+ SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ ctx_p->cipher_mode = ssi_alg->cipher_mode;
+ ctx_p->flow_mode = ssi_alg->flow_mode;
+ ctx_p->drvdata = ssi_alg->drvdata;
+ dev = &ctx_p->drvdata->plat_dev->dev;
+
+ /* Allocate key buffer, cache line aligned */
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL|GFP_DMA);
+ if (!ctx_p->user.key) {
+ SSI_LOG_ERR("Allocating key buffer in context failed\n");
+ rc = -ENOMEM;
+ }
+ SSI_LOG_DEBUG("Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
+
+ /* Map key buffer */
+ ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+ max_key_buf_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+ SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr, max_key_buf_size);
+ SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
+ max_key_buf_size, ctx_p->user.key,
+ (unsigned long long)ctx_p->user.key_dma_addr);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Alloc hash tfm for essiv */
+ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+ if (IS_ERR(ctx_p->shash_tfm)) {
+ SSI_LOG_ERR("Error allocating hash tfm for ESSIV.\n");
+ return PTR_ERR(ctx_p->shash_tfm);
+ }
+ }
+
+ return rc;
+}
+
+static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+ SSI_LOG_DEBUG("Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* Free hash tfm for essiv */
+ crypto_free_shash(ctx_p->shash_tfm);
+ ctx_p->shash_tfm = NULL;
+ }
+
+ /* Unmap key buffer */
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
+ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+ DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
+ (unsigned long long)ctx_p->user.key_dma_addr);
+
+ /* Free key buffer in context */
+ kfree(ctx_p->user.key);
+ SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+
+typedef struct tdes_keys{
+ u8 key1[DES_KEY_SIZE];
+ u8 key2[DES_KEY_SIZE];
+ u8 key3[DES_KEY_SIZE];
+}tdes_keys_t;
+
+static const u8 zero_buff[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/* The function verifies that tdes keys are not weak.*/
+static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
+{
+#ifdef CCREE_FIPS_SUPPORT
+ tdes_keys_t *tdes_key = (tdes_keys_t*)key;
+
+ /* verify key1 != key2 and key3 != key2*/
+ if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
+ (memcmp((u8*)tdes_key->key3, (u8*)tdes_key->key2, sizeof(tdes_key->key3)) == 0) )) {
+ return -ENOEXEC;
+ }
+#endif /* CCREE_FIPS_SUPPORT */
+
+ return 0;
+}
+
+/* The function verifies that xts keys are not weak.*/
+static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen)
+{
+#ifdef CCREE_FIPS_SUPPORT
+ /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
+ int singleKeySize = keylen >> 1;
+
+ if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0)) {
+ return -ENOEXEC;
+ }
+#endif /* CCREE_FIPS_SUPPORT */
+
+ return 0;
+}
+
+static enum HwCryptoKey hw_key_to_cc_hw_key(int slot_num)
+{
+ switch (slot_num) {
+ case 0:
+ return KFDE0_KEY;
+ case 1:
+ return KFDE1_KEY;
+ case 2:
+ return KFDE2_KEY;
+ case 3:
+ return KFDE3_KEY;
+ }
+ return END_OF_KEYS;
+}
+
+static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ u32 tmp[DES_EXPKEY_WORDS];
+ unsigned int max_key_buf_size = get_max_keysize(tfm);
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dump_byte_array("key", (uint8_t *)key, keylen);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
+
+ /* STAT_PHASE_0: Init and sanity checks */
+ START_CYCLE_COUNT();
+
+#if SSI_CC_HAS_MULTI2
+ /*last byte of key buffer is round number and should not be a part of key size*/
+ if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
+ keylen -=1;
+ }
+#endif /*SSI_CC_HAS_MULTI2*/
+
+ if (unlikely(validate_keys_sizes(ctx_p,keylen) != 0)) {
+ SSI_LOG_ERR("Unsupported key size %d.\n", keylen);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ssi_is_hw_key(tfm)) {
+ /* setting HW key slots */
+ struct arm_hw_key_info *hki = (struct arm_hw_key_info*)key;
+
+ if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
+ SSI_LOG_ERR("HW key not supported for non-AES flows\n");
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
+ if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
+ SSI_LOG_ERR("Unsupported hw key1 number (%d)\n", hki->hw_key1);
+ return -EINVAL;
+ }
+
+ if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
+ (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
+ (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
+ if (unlikely(hki->hw_key1 == hki->hw_key2)) {
+ SSI_LOG_ERR("Illegal hw key numbers (%d,%d)\n", hki->hw_key1, hki->hw_key2);
+ return -EINVAL;
+ }
+ ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
+ if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
+ SSI_LOG_ERR("Unsupported hw key2 number (%d)\n", hki->hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->keylen = keylen;
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");
+
+ return 0;
+ }
+
+ // verify weak keys
+ if (ctx_p->flow_mode == S_DIN_to_DES) {
+ if (unlikely(!des_ekey(tmp, key)) &&
+ (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak DES key");
+ return -EINVAL;
+ }
+ }
+ if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
+ ssi_fips_verify_xts_keys(key, keylen) != 0) {
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
+ return -EINVAL;
+ }
+ if ((ctx_p->flow_mode == S_DIN_to_DES) &&
+ (keylen == DES3_EDE_KEY_SIZE) &&
+ ssi_fips_verify_3des_keys(key, keylen) != 0) {
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
+ return -EINVAL;
+ }
+
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_0);
+
+ /* STAT_PHASE_1: Copy key to ctx */
+ START_CYCLE_COUNT();
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr);
+ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+#if SSI_CC_HAS_MULTI2
+ if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
+ memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
+ ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
+ if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
+ ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
+ return -EINVAL;
+ }
+ } else
+#endif /*SSI_CC_HAS_MULTI2*/
+ {
+ memcpy(ctx_p->user.key, key, keylen);
+ if (keylen == 24)
+ memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+ /* sha256 for key2 - use sw implementation */
+ int key_len = keylen >> 1;
+ int err;
+ SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+ desc->tfm = ctx_p->shash_tfm;
+
+ err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
+ if (err) {
+ SSI_LOG_ERR("Failed to hash ESSIV key.\n");
+ return err;
+ }
+ }
+ }
+ dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+ max_key_buf_size, DMA_TO_DEVICE);
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx_p->user.key_dma_addr ,max_key_buf_size);
+ ctx_p->keylen = keylen;
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
+
+ SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
+ return 0;
+}
+
+static inline void
+ssi_blkcipher_create_setup_desc(
+ struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ unsigned int ivsize,
+ unsigned int nbytes,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+ unsigned int du_size = nbytes;
+
+ struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
+
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_512)
+ du_size = 512;
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_4096)
+ du_size = 4096;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Load cipher state */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ iv_dma_addr, ivsize,
+ NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
+ if ((cipher_mode == DRV_CIPHER_CTR) ||
+ (cipher_mode == DRV_CIPHER_OFB) ) {
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
+ SETUP_LOAD_STATE1);
+ } else {
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size],
+ SETUP_LOAD_STATE0);
+ }
+ (*seq_size)++;
+ /*FALLTHROUGH*/
+ case DRV_CIPHER_ECB:
+ /* Load key */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ if (flow_mode == S_DIN_to_AES) {
+
+ if (ssi_is_hw_key(tfm)) {
+ HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+ } else {
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr,
+ ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
+ NS_BIT);
+ }
+ HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len,
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_DES(&desc[*seq_size], key_len);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ if (ssi_is_hw_key(tfm)) {
+ HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key1_slot);
+ } else {
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len/2,
+ NS_BIT);
+ }
+ HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+
+ /* load XEX key */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ if (ssi_is_hw_key(tfm)) {
+ HW_DESC_SET_HW_CRYPTO_KEY(&desc[*seq_size], ctx_p->hw.key2_slot);
+ } else {
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ (key_dma_addr+key_len/2), key_len/2,
+ NS_BIT);
+ }
+ HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[*seq_size], du_size);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], S_DIN_to_AES2);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+ (*seq_size)++;
+
+ /* Set state */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[*seq_size], key_len/2);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT);
+ (*seq_size)++;
+ break;
+ default:
+ SSI_LOG_ERR("Unsupported cipher mode (%d)\n", cipher_mode);
+ BUG();
+ }
+}
+
+#if SSI_CC_HAS_MULTI2
+static inline void ssi_blkcipher_create_multi2_setup_desc(
+ struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ unsigned int ivsize,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+ int direction = req_ctx->gen_ctx.op_type;
+ /* Load system key */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
+ CC_MULTI2_SYSTEM_KEY_SIZE,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+
+ /* load data key */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ (ctx_p->user.key_dma_addr +
+ CC_MULTI2_SYSTEM_KEY_SIZE),
+ CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
+ HW_DESC_SET_MULTI2_NUM_ROUNDS(&desc[*seq_size],
+ ctx_p->key_round_number);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE0 );
+ (*seq_size)++;
+
+
+ /* Set state */
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ req_ctx->gen_ctx.iv_dma_addr,
+ ivsize, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[*seq_size], direction);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], ctx_p->flow_mode);
+ HW_DESC_SET_CIPHER_MODE(&desc[*seq_size], ctx_p->cipher_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[*seq_size], SETUP_LOAD_STATE1);
+ (*seq_size)++;
+
+}
+#endif /*SSI_CC_HAS_MULTI2*/
+
+static inline void
+ssi_blkcipher_create_data_desc(
+ struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes,
+ void *areq,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int flow_mode = ctx_p->flow_mode;
+
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ flow_mode = DIN_AES_DOUT;
+ break;
+ case S_DIN_to_DES:
+ flow_mode = DIN_DES_DOUT;
+ break;
+#if SSI_CC_HAS_MULTI2
+ case S_DIN_to_MULTI2:
+ flow_mode = DIN_MULTI2_DOUT;
+ break;
+#endif /*SSI_CC_HAS_MULTI2*/
+ default:
+ SSI_LOG_ERR("invalid flow mode, flow_mode = %d \n", flow_mode);
+ return;
+ }
+ /* Process */
+ if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)){
+ SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
+ (unsigned long long)sg_dma_address(src),
+ nbytes);
+ SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
+ (unsigned long long)sg_dma_address(dst),
+ nbytes);
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ sg_dma_address(src),
+ nbytes, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[*seq_size],
+ sg_dma_address(dst),
+ nbytes,
+ NS_BIT, (areq == NULL)? 0:1);
+ if (areq != NULL) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
+ /* bypass */
+ SSI_LOG_DEBUG(" bypass params addr 0x%llX "
+ "length 0x%X addr 0x%08X\n",
+ (unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_DLLI,
+ req_ctx->mlli_params.mlli_dma_addr,
+ req_ctx->mlli_params.mlli_len,
+ NS_BIT);
+ HW_DESC_SET_DOUT_SRAM(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], BYPASS);
+ (*seq_size)++;
+
+ HW_DESC_INIT(&desc[*seq_size]);
+ HW_DESC_SET_DIN_TYPE(&desc[*seq_size], DMA_MLLI,
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents, NS_BIT);
+ if (req_ctx->out_nents == 0) {
+ SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
+ "addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
+ ctx_p->drvdata->mlli_sram_addr,
+ req_ctx->in_mlli_nents,
+ NS_BIT,(areq == NULL)? 0:1);
+ } else {
+ SSI_LOG_DEBUG(" din/dout params "
+ "addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr +
+ (uint32_t)LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_nents);
+ HW_DESC_SET_DOUT_MLLI(&desc[*seq_size],
+ (ctx_p->drvdata->mlli_sram_addr +
+ LLI_ENTRY_BYTE_SIZE *
+ req_ctx->in_mlli_nents),
+ req_ctx->out_mlli_nents, NS_BIT,(areq == NULL)? 0:1);
+ }
+ if (areq != NULL) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[*seq_size]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ }
+}
+
+static int ssi_blkcipher_complete(struct device *dev,
+ struct ssi_ablkcipher_ctx *ctx_p,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ void *info, //req info
+ unsigned int ivsize,
+ void *areq,
+ void __iomem *cc_base)
+{
+ int completion_error = 0;
+ uint32_t inflight_counter;
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ START_CYCLE_COUNT();
+ ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ info = req_ctx->backup_info;
+ END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_4);
+
+
+ /*Set the inflight couter value to local variable*/
+ inflight_counter = ctx_p->drvdata->inflight_counter;
+ /*Decrease the inflight counter*/
+ if(ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
+ ctx_p->drvdata->inflight_counter--;
+
+ if(areq){
+ ablkcipher_request_complete(areq, completion_error);
+ return 0;
+ }
+ return completion_error;
+}
+
+static int ssi_blkcipher_process(
+ struct crypto_tfm *tfm,
+ struct blkcipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes,
+ void *info, //req info
+ unsigned int ivsize,
+ void *areq,
+ enum drv_crypto_direction direction)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ HwDesc_s desc[MAX_ABLKCIPHER_SEQ_LEN];
+ struct ssi_crypto_req ssi_req = {};
+ int rc, seq_len = 0,cts_restore_flag = 0;
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
+ ((direction==DRV_CRYPTO_DIRECTION_ENCRYPT)?"Encrypt":"Decrypt"),
+ areq, info, nbytes);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ /* STAT_PHASE_0: Init and sanity checks */
+ START_CYCLE_COUNT();
+
+ /* TODO: check data length according to mode */
+ if (unlikely(validate_data_size(ctx_p, nbytes))) {
+ SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
+ crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+ return -EINVAL;
+ }
+ if (nbytes == 0) {
+ /* No data to process is valid */
+ return 0;
+ }
+ /*For CTS in case of data size aligned to 16 use CBC mode*/
+ if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)){
+
+ ctx_p->cipher_mode = DRV_CIPHER_CBC;
+ cts_restore_flag = 1;
+ }
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_ablkcipher_complete;
+ ssi_req.user_arg = (void *)areq;
+
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+
+#endif
+
+ /* Setup request context */
+ req_ctx->gen_ctx.op_type = direction;
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_0);
+
+ /* STAT_PHASE_1: Map buffers */
+ START_CYCLE_COUNT();
+
+ rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("map_request() failed\n");
+ goto exit_process;
+ }
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_1);
+
+ /* STAT_PHASE_2: Create sequence */
+ START_CYCLE_COUNT();
+
+ /* Setup processing */
+#if SSI_CC_HAS_MULTI2
+ if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
+ ssi_blkcipher_create_multi2_setup_desc(tfm,
+ req_ctx,
+ ivsize,
+ desc,
+ &seq_len);
+ } else
+#endif /*SSI_CC_HAS_MULTI2*/
+ {
+ ssi_blkcipher_create_setup_desc(tfm,
+ req_ctx,
+ ivsize,
+ nbytes,
+ desc,
+ &seq_len);
+ }
+ /* Data processing */
+ ssi_blkcipher_create_data_desc(tfm,
+ req_ctx,
+ dst, src,
+ nbytes,
+ areq,
+ desc, &seq_len);
+
+ /* do we need to generate IV? */
+ if (req_ctx->is_giv == true) {
+ ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ ssi_req.ivgen_dma_addr_len = 1;
+ /* set the IV size (8/16 B long)*/
+ ssi_req.ivgen_size = ivsize;
+ }
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_2);
+
+ /* STAT_PHASE_3: Lock HW and push sequence */
+ START_CYCLE_COUNT();
+
+ rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (areq == NULL)? 0:1);
+ if(areq != NULL) {
+ if (unlikely(rc != -EINPROGRESS)) {
+ /* Failed to send the request or request completed synchronously */
+ ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ }
+
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
+ } else {
+ if (rc != 0) {
+ ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
+ } else {
+ END_CYCLE_COUNT(ssi_req.op_type, STAT_PHASE_3);
+ rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst, src, info, ivsize, NULL, ctx_p->drvdata->cc_base);
+ }
+ }
+
+exit_process:
+ if (cts_restore_flag != 0)
+ ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+
+ return rc;
+}
+
+static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+{
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+
+ CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
+
+ ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src, areq->info, ivsize, areq, cc_base);
+}
+
+
+
+static int ssi_sblkcipher_init(struct crypto_tfm *tfm)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+ /* Allocate sync ctx buffer */
+ ctx_p->sync_ctx = kmalloc(sizeof(struct blkcipher_req_ctx), GFP_KERNEL|GFP_DMA);
+ if (!ctx_p->sync_ctx) {
+ SSI_LOG_ERR("Allocating sync ctx buffer in context failed\n");
+ return -ENOMEM;
+ }
+ SSI_LOG_DEBUG("Allocated sync ctx buffer in context ctx_p->sync_ctx=@%p\n",
+ ctx_p->sync_ctx);
+
+ return ssi_blkcipher_init(tfm);
+}
+
+
+static void ssi_sblkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+
+ kfree(ctx_p->sync_ctx);
+ SSI_LOG_DEBUG("Free sync ctx buffer in context ctx_p->sync_ctx=@%p\n", ctx_p->sync_ctx);
+
+ ssi_blkcipher_exit(tfm);
+}
+
+#ifdef SYNC_ALGS
+static int ssi_sblkcipher_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_blkcipher *blk_tfm = desc->tfm;
+ struct crypto_tfm *tfm = crypto_blkcipher_tfm(blk_tfm);
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct blkcipher_req_ctx *req_ctx = ctx_p->sync_ctx;
+ unsigned int ivsize = crypto_blkcipher_ivsize(blk_tfm);
+
+ req_ctx->backup_info = desc->info;
+ req_ctx->is_giv = false;
+
+ return ssi_blkcipher_process(tfm, req_ctx, dst, src, nbytes, desc->info, ivsize, NULL, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int ssi_sblkcipher_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_blkcipher *blk_tfm = desc->tfm;
+ struct crypto_tfm *tfm = crypto_blkcipher_tfm(blk_tfm);
+ struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct blkcipher_req_ctx *req_ctx = ctx_p->sync_ctx;
+ unsigned int ivsize = crypto_blkcipher_ivsize(blk_tfm);
+
+ req_ctx->backup_info = desc->info;
+ req_ctx->is_giv = false;
+
+ return ssi_blkcipher_process(tfm, req_ctx, dst, src, nbytes, desc->info, ivsize, NULL, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+#endif
+
+/* Async wrap functions */
+
+static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+
+ ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+ return ssi_blkcipher_init(tfm);
+}
+
+
+static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
+}
+
+static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+
+ req_ctx->backup_info = req->info;
+ req_ctx->is_giv = false;
+
+ return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+ struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+
+ req_ctx->backup_info = req->info;
+ req_ctx->is_giv = false;
+ return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+
+/* DX Block cipher alg */
+static struct ssi_alg_template blkcipher_algs[] = {
+/* Async template */
+#if SSI_CC_HAS_AES_XTS
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ .geniv = "eseqiv",
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-du512-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "xts(aes)",
+ .driver_name = "xts-aes-du4096-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_XTS,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+#endif /*SSI_CC_HAS_AES_XTS*/
+#if SSI_CC_HAS_AES_ESSIV
+ {
+ .name = "essiv(aes)",
+ .driver_name = "essiv-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "essiv(aes)",
+ .driver_name = "essiv-aes-du512-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "essiv(aes)",
+ .driver_name = "essiv-aes-du4096-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_ESSIV,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+#endif /*SSI_CC_HAS_AES_ESSIV*/
+#if SSI_CC_HAS_AES_BITLOCKER
+ {
+ .name = "bitlocker(aes)",
+ .driver_name = "bitlocker-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "bitlocker(aes)",
+ .driver_name = "bitlocker-aes-du512-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "bitlocker(aes)",
+ .driver_name = "bitlocker-aes-du4096-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_BITLOCKER,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+#endif /*SSI_CC_HAS_AES_BITLOCKER*/
+ {
+ .name = "ecb(aes)",
+ .driver_name = "ecb-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "ofb(aes)",
+ .driver_name = "ofb-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_OFB,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+#if SSI_CC_HAS_AES_CTS
+ {
+ .name = "cts1(cbc(aes))",
+ .driver_name = "cts1-cbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC_CTS,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+#endif
+ {
+ .name = "ctr(aes)",
+ .driver_name = "ctr-aes-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_AES,
+ .synchronous = false,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-dx",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .synchronous = false,
+ },
+ {
+ .name = "ecb(des3_ede)",
+ .driver_name = "ecb-3des-dx",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .synchronous = false,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-dx",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_DES,
+ .synchronous = false,
+ },
+ {
+ .name = "ecb(des)",
+ .driver_name = "ecb-des-dx",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_DES,
+ .synchronous = false,
+ },
+#if SSI_CC_HAS_MULTI2
+ {
+ .name = "cbc(multi2)",
+ .driver_name = "cbc-multi2-dx",
+ .blocksize = CC_MULTI2_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_decrypt,
+ .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
+ .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
+ .ivsize = CC_MULTI2_IV_SIZE,
+ },
+ .cipher_mode = DRV_MULTI2_CBC,
+ .flow_mode = S_DIN_to_MULTI2,
+ .synchronous = false,
+ },
+ {
+ .name = "ofb(multi2)",
+ .driver_name = "ofb-multi2-dx",
+ .blocksize = 1,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ssi_ablkcipher_setkey,
+ .encrypt = ssi_ablkcipher_encrypt,
+ .decrypt = ssi_ablkcipher_encrypt,
+ .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
+ .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
+ .ivsize = CC_MULTI2_IV_SIZE,
+ },
+ .cipher_mode = DRV_MULTI2_OFB,
+ .flow_mode = S_DIN_to_MULTI2,
+ .synchronous = false,
+ },
+#endif /*SSI_CC_HAS_MULTI2*/
+};
+
+static
+struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
+{
+ struct ssi_crypto_alg *t_alg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
+ if (!t_alg) {
+ SSI_LOG_ERR("failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ alg = &t_alg->crypto_alg;
+
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ alg->cra_module = THIS_MODULE;
+ alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
+
+ alg->cra_init = template->synchronous? ssi_sblkcipher_init:ssi_ablkcipher_init;
+ alg->cra_exit = template->synchronous? ssi_sblkcipher_exit:ssi_blkcipher_exit;
+ alg->cra_type = template->synchronous? &crypto_blkcipher_type:&crypto_ablkcipher_type;
+ if(template->synchronous) {
+ alg->cra_blkcipher = template->template_sblkcipher;
+ alg->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ } else {
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+ template->type;
+ }
+
+ t_alg->cipher_mode = template->cipher_mode;
+ t_alg->flow_mode = template->flow_mode;
+
+ return t_alg;
+}
+
+int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
+{
+ struct ssi_crypto_alg *t_alg, *n;
+ struct ssi_blkcipher_handle *blkcipher_handle =
+ drvdata->blkcipher_handle;
+ struct device *dev;
+ dev = &drvdata->plat_dev->dev;
+
+ if (blkcipher_handle != NULL) {
+ /* Remove registered algs */
+ list_for_each_entry_safe(t_alg, n,
+ &blkcipher_handle->blkcipher_alg_list,
+ entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
+ kfree(blkcipher_handle);
+ drvdata->blkcipher_handle = NULL;
+ }
+ return 0;
+}
+
+
+
+int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
+{
+ struct ssi_blkcipher_handle *ablkcipher_handle;
+ struct ssi_crypto_alg *t_alg;
+ int rc = -ENOMEM;
+ int alg;
+
+ ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
+ GFP_KERNEL);
+ if (ablkcipher_handle == NULL)
+ return -ENOMEM;
+
+ drvdata->blkcipher_handle = ablkcipher_handle;
+
+ INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
+
+ /* Linux crypto */
+ SSI_LOG_DEBUG("Number of algorithms = %zu\n", ARRAY_SIZE(blkcipher_algs));
+ for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
+ SSI_LOG_DEBUG("creating %s\n", blkcipher_algs[alg].driver_name);
+ t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg]);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ SSI_LOG_ERR("%s alg allocation failed\n",
+ blkcipher_algs[alg].driver_name);
+ goto fail0;
+ }
+ t_alg->drvdata = drvdata;
+
+ SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
+ rc = crypto_register_alg(&t_alg->crypto_alg);
+ SSI_LOG_DEBUG("%s alg registration rc = %x\n",
+ t_alg->crypto_alg.cra_driver_name, rc);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ goto fail0;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &ablkcipher_handle->blkcipher_alg_list);
+ SSI_LOG_DEBUG("Registered %s\n",
+ t_alg->crypto_alg.cra_driver_name);
+ }
+ }
+ return 0;
+
+fail0:
+ ssi_ablkcipher_free(drvdata);
+ return rc;
+}
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
new file mode 100644
index 000000000000..ba4eb7c4893f
--- /dev/null
+++ b/drivers/staging/ccree/ssi_cipher.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_cipher.h
+ ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __SSI_CIPHER_H__
+#define __SSI_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "ssi_driver.h"
+#include "ssi_buffer_mgr.h"
+
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 (1 << 0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 (1 << 1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 (1 << 2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 (1 << 3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B (1 << 4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+
+struct blkcipher_req_ctx {
+ struct async_gen_req_ctx gen_ctx;
+ enum ssi_req_dma_buf_type dma_buf_type;
+ uint32_t in_nents;
+ uint32_t in_mlli_nents;
+ uint32_t out_nents;
+ uint32_t out_mlli_nents;
+ uint8_t *backup_info; /*store iv for generated IV flow*/
+ bool is_giv;
+ struct mlli_params mlli_params;
+};
+
+
+
+int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata);
+
+int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512 0x00002000
+#define CRYPTO_ALG_BULK_DU_4096 0x00004000
+#define CRYPTO_ALG_BULK_MASK (CRYPTO_ALG_BULK_DU_512 |\
+ CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
+{
+ return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+ int hw_key1;
+ int hw_key2;
+};
+
+static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+
+#endif /*__SSI_CIPHER_H__*/
diff --git a/drivers/staging/ccree/ssi_config.h b/drivers/staging/ccree/ssi_config.h
new file mode 100644
index 000000000000..d96a5436f6d7
--- /dev/null
+++ b/drivers/staging/ccree/ssi_config.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_config.h
+ Definitions for ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __SSI_CONFIG_H__
+#define __SSI_CONFIG_H__
+
+#include <linux/version.h>
+
+#define DISABLE_COHERENT_DMA_OPS
+//#define FLUSH_CACHE_ALL
+//#define COMPLETION_DELAY
+//#define DX_DUMP_DESCS
+// #define DX_DUMP_BYTES
+// #define CC_DEBUG
+#define ENABLE_CC_SYSFS /* Enable sysfs interface for debugging REE driver */
+//#define ENABLE_CC_CYCLE_COUNT
+//#define DX_IRQ_DELAY 100000
+#define DMA_BIT_MASK_LEN 48 /* was 32 bit, but for juno's sake it was enlarged to 48 bit */
+
+#if defined ENABLE_CC_CYCLE_COUNT && defined ENABLE_CC_SYSFS
+#define CC_CYCLE_COUNT
+#endif
+
+
+#if defined (CONFIG_ARM64) // TODO currently only this mode was test on Juno (which is ARM64), need to enable coherent also.
+#define DISABLE_COHERENT_DMA_OPS
+#endif
+
+/* Define the CryptoCell DMA cache coherency signals configuration */
+#if defined (DISABLE_COHERENT_DMA_OPS)
+ /* Software Controlled Cache Coherency (SCCC) */
+ #define SSI_CACHE_PARAMS (0x000)
+ /* CC attached to NONE-ACP such as HPP/ACE/AMBA4.
+ * The customer is responsible to enable/disable this feature
+ * according to his platform type. */
+ #define DX_HAS_ACP 0
+#else
+ #define SSI_CACHE_PARAMS (0xEEE)
+ /* CC attached to ACP */
+ #define DX_HAS_ACP 1
+#endif
+
+#endif /*__DX_CONFIG_H__*/
+
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
new file mode 100644
index 000000000000..bc19adce6dee
--- /dev/null
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/random.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/sysctl.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/pm.h>
+
+/* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */
+#include <linux/cache.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/of.h>
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "ssi_request_mgr.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_sysfs.h"
+#include "ssi_cipher.h"
+#include "ssi_aead.h"
+#include "ssi_hash.h"
+#include "ssi_ivgen.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_pm.h"
+#include "ssi_fips_local.h"
+
+
+#ifdef DX_DUMP_BYTES
+void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size)
+{
+ int i , line_offset = 0, ret = 0;
+ const uint8_t *cur_byte;
+ char line_buf[80];
+
+ if (the_array == NULL) {
+ SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n");
+ return;
+ }
+
+ ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ",
+ name, size);
+ if (ret < 0) {
+ SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
+ return;
+ }
+ line_offset = ret;
+ for (i = 0 , cur_byte = the_array;
+ (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
+ ret = snprintf(line_buf + line_offset,
+ sizeof(line_buf) - line_offset,
+ "0x%02X ", *cur_byte);
+ if (ret < 0) {
+ SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret);
+ return;
+ }
+ line_offset += ret;
+ if (line_offset > 75) { /* Cut before line end */
+ SSI_LOG_DEBUG("%s\n", line_buf);
+ line_offset = 0;
+ }
+ }
+
+ if (line_offset > 0) /* Dump remaining line */
+ SSI_LOG_DEBUG("%s\n", line_buf);
+}
+#endif
+
+static irqreturn_t cc_isr(int irq, void *dev_id)
+{
+ struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
+ void __iomem *cc_base = drvdata->cc_base;
+ uint32_t irr;
+ uint32_t imr;
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
+ START_CYCLE_COUNT();
+
+ /* read the interrupt status */
+ irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
+ SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr);
+ if (unlikely(irr == 0)) { /* Probably shared interrupt line */
+ SSI_LOG_ERR("Got interrupt with empty IRR\n");
+ return IRQ_NONE;
+ }
+ imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR));
+
+ /* clear interrupt - must be before processing events */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr);
+
+ drvdata->irq = irr;
+ /* Completion interrupt - most probable */
+ if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
+ /* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK);
+ irr &= ~SSI_COMP_IRQ_MASK;
+ complete_request(drvdata);
+ }
+#ifdef CC_SUPPORT_FIPS
+ /* TEE FIPS interrupt */
+ if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
+ /* Mask interrupt - will be unmasked in Deferred service handler */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
+ irr &= ~SSI_GPR0_IRQ_MASK;
+ fips_handler(drvdata);
+ }
+#endif
+ /* AXI error interrupt */
+ if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) {
+ uint32_t axi_err;
+
+ /* Read the AXI error ID */
+ axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
+
+ irr &= ~SSI_AXI_ERR_IRQ_MASK;
+ }
+
+ if (unlikely(irr != 0)) {
+ SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr);
+ /* Just warning */
+ }
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_0);
+ START_CYCLE_COUNT_AT(drvdata->isr_exit_cycles);
+
+ return IRQ_HANDLED;
+}
+
+int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
+{
+ unsigned int val;
+ void __iomem *cc_base = drvdata->cc_base;
+
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG));
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+ SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)));
+
+ /* Clear all pending interrupts */
+ val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
+ SSI_LOG_DEBUG("IRR=0x%08X\n", val);
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val);
+
+ /* Unmask relevant interrupt cause */
+ val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
+
+#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
+#ifdef DX_IRQ_DELAY
+ /* Set CC IRQ delay */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
+ DX_IRQ_DELAY);
+#endif
+ if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
+ SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
+ }
+#endif
+
+ val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ if (is_probe == true) {
+ SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
+ }
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS), SSI_CACHE_PARAMS);
+ val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ if (is_probe == true) {
+ SSI_LOG_INFO("Cache params current: 0x%08X (expected: 0x%08X)\n", val, SSI_CACHE_PARAMS);
+ }
+
+ return 0;
+}
+
+static int init_cc_resources(struct platform_device *plat_dev)
+{
+ struct resource *req_mem_cc_regs = NULL;
+ void __iomem *cc_base = NULL;
+ bool irq_registered = false;
+ struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL);
+ uint32_t signature_val;
+ int rc = 0;
+
+ if (unlikely(new_drvdata == NULL)) {
+ SSI_LOG_ERR("Failed to allocate drvdata");
+ rc = -ENOMEM;
+ goto init_cc_res_err;
+ }
+
+ /*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
+ new_drvdata->inflight_counter = 0;
+
+ dev_set_drvdata(&plat_dev->dev, new_drvdata);
+ /* Get device resources */
+ /* First CC registers space */
+ new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (unlikely(new_drvdata->res_mem == NULL)) {
+ SSI_LOG_ERR("Failed getting IO memory resource\n");
+ rc = -ENODEV;
+ goto init_cc_res_err;
+ }
+ SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n",
+ new_drvdata->res_mem->name,
+ (unsigned long long)new_drvdata->res_mem->start,
+ (unsigned long long)new_drvdata->res_mem->end);
+ /* Map registers space */
+ req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
+ if (unlikely(req_mem_cc_regs == NULL)) {
+ SSI_LOG_ERR("Couldn't allocate registers memory region at "
+ "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
+ rc = -EBUSY;
+ goto init_cc_res_err;
+ }
+ cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
+ if (unlikely(cc_base == NULL)) {
+ SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
+ (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem));
+ rc = -ENOMEM;
+ goto init_cc_res_err;
+ }
+ SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
+ new_drvdata->cc_base = cc_base;
+
+
+ /* Then IRQ */
+ new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
+ if (unlikely(new_drvdata->res_irq == NULL)) {
+ SSI_LOG_ERR("Failed getting IRQ resource\n");
+ rc = -ENODEV;
+ goto init_cc_res_err;
+ }
+ rc = request_irq(new_drvdata->res_irq->start, cc_isr,
+ IRQF_SHARED, "arm_cc7x", new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("Could not register to interrupt %llu\n",
+ (unsigned long long)new_drvdata->res_irq->start);
+ goto init_cc_res_err;
+ }
+ init_completion(&new_drvdata->icache_setup_completion);
+
+ irq_registered = true;
+ SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
+ new_drvdata->res_irq->name,
+ (unsigned long long)new_drvdata->res_irq->start);
+
+ new_drvdata->plat_dev = plat_dev;
+
+ if(new_drvdata->plat_dev->dev.dma_mask == NULL)
+ {
+ new_drvdata->plat_dev->dev.dma_mask = & new_drvdata->plat_dev->dev.coherent_dma_mask;
+ }
+ if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
+ {
+ new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ }
+
+ /* Verify correct mapping */
+ signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ if (signature_val != DX_DEV_SIGNATURE) {
+ SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, (uint32_t)DX_DEV_SIGNATURE);
+ rc = -EINVAL;
+ goto init_cc_res_err;
+ }
+ SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val);
+
+ /* Display HW versions */
+ SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR,
+ CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION);
+
+ rc = init_cc_regs(new_drvdata, true);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("init_cc_regs failed\n");
+ goto init_cc_res_err;
+ }
+
+#ifdef ENABLE_CC_SYSFS
+ rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("init_stat_db failed\n");
+ goto init_cc_res_err;
+ }
+#endif
+
+ rc = ssi_sram_mgr_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_sram_mgr_init failed\n");
+ goto init_cc_res_err;
+ }
+
+ new_drvdata->mlli_sram_addr =
+ ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
+ SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n");
+ rc = -ENOMEM;
+ goto init_cc_res_err;
+ }
+
+ rc = request_mgr_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("request_mgr_init failed\n");
+ goto init_cc_res_err;
+ }
+
+ rc = ssi_buffer_mgr_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("buffer_mgr_init failed\n");
+ goto init_cc_res_err;
+ }
+
+ rc = ssi_power_mgr_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_power_mgr_init failed\n");
+ goto init_cc_res_err;
+ }
+
+ rc = ssi_fips_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("SSI_FIPS_INIT failed 0x%x\n", rc);
+ goto init_cc_res_err;
+ }
+
+ rc = ssi_ivgen_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_ivgen_init failed\n");
+ goto init_cc_res_err;
+ }
+
+ /* Allocate crypto algs */
+ rc = ssi_ablkcipher_alloc(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_ablkcipher_alloc failed\n");
+ goto init_cc_res_err;
+ }
+
+ /* hash must be allocated before aead since hash exports APIs */
+ rc = ssi_hash_alloc(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_hash_alloc failed\n");
+ goto init_cc_res_err;
+ }
+
+ rc = ssi_aead_alloc(new_drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("ssi_aead_alloc failed\n");
+ goto init_cc_res_err;
+ }
+
+ return 0;
+
+init_cc_res_err:
+ SSI_LOG_ERR("Freeing CC HW resources!\n");
+
+ if (new_drvdata != NULL) {
+ ssi_aead_free(new_drvdata);
+ ssi_hash_free(new_drvdata);
+ ssi_ablkcipher_free(new_drvdata);
+ ssi_ivgen_fini(new_drvdata);
+ ssi_power_mgr_fini(new_drvdata);
+ ssi_buffer_mgr_fini(new_drvdata);
+ request_mgr_fini(new_drvdata);
+ ssi_sram_mgr_fini(new_drvdata);
+ ssi_fips_fini(new_drvdata);
+#ifdef ENABLE_CC_SYSFS
+ ssi_sysfs_fini();
+#endif
+
+ if (req_mem_cc_regs != NULL) {
+ if (irq_registered) {
+ free_irq(new_drvdata->res_irq->start, new_drvdata);
+ new_drvdata->res_irq = NULL;
+ iounmap(cc_base);
+ new_drvdata->cc_base = NULL;
+ }
+ release_mem_region(new_drvdata->res_mem->start,
+ resource_size(new_drvdata->res_mem));
+ new_drvdata->res_mem = NULL;
+ }
+ kfree(new_drvdata);
+ dev_set_drvdata(&plat_dev->dev, NULL);
+ }
+
+ return rc;
+}
+
+void fini_cc_regs(struct ssi_drvdata *drvdata)
+{
+ /* Mask all interrupts */
+ WRITE_REGISTER(drvdata->cc_base +
+ CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
+
+}
+
+static void cleanup_cc_resources(struct platform_device *plat_dev)
+{
+ struct ssi_drvdata *drvdata =
+ (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
+
+ ssi_aead_free(drvdata);
+ ssi_hash_free(drvdata);
+ ssi_ablkcipher_free(drvdata);
+ ssi_ivgen_fini(drvdata);
+ ssi_power_mgr_fini(drvdata);
+ ssi_buffer_mgr_fini(drvdata);
+ request_mgr_fini(drvdata);
+ ssi_sram_mgr_fini(drvdata);
+ ssi_fips_fini(drvdata);
+#ifdef ENABLE_CC_SYSFS
+ ssi_sysfs_fini();
+#endif
+
+ /* Mask all interrupts */
+ WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_IMR),
+ 0xFFFFFFFF);
+ free_irq(drvdata->res_irq->start, drvdata);
+ drvdata->res_irq = NULL;
+
+ fini_cc_regs(drvdata);
+
+ if (drvdata->cc_base != NULL) {
+ iounmap(drvdata->cc_base);
+ release_mem_region(drvdata->res_mem->start,
+ resource_size(drvdata->res_mem));
+ drvdata->cc_base = NULL;
+ drvdata->res_mem = NULL;
+ }
+
+ kfree(drvdata);
+ dev_set_drvdata(&plat_dev->dev, NULL);
+}
+
+static int cc7x_probe(struct platform_device *plat_dev)
+{
+ int rc;
+#if defined(CONFIG_ARM) && defined(CC_DEBUG)
+ uint32_t ctr, cacheline_size;
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+ cacheline_size = 4 << ((ctr >> 16) & 0xf);
+ SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
+ cacheline_size, L1_CACHE_BYTES);
+
+ asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
+ SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X,"
+ " Part 0x%03X, Rev r%dp%d\n",
+ (ctr>>24), (ctr>>16)&0xF, (ctr>>4)&0xFFF, (ctr>>20)&0xF, ctr&0xF);
+#endif
+
+ /* Map registers space */
+ rc = init_cc_resources(plat_dev);
+ if (rc != 0)
+ return rc;
+
+ SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n");
+
+ return 0;
+}
+
+static int cc7x_remove(struct platform_device *plat_dev)
+{
+ SSI_LOG_DEBUG("Releasing cc7x resources...\n");
+
+ cleanup_cc_resources(plat_dev);
+
+ SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
+#ifdef ENABLE_CYCLE_COUNT
+ display_all_stat_db();
+#endif
+
+ return 0;
+}
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+static struct dev_pm_ops arm_cc7x_driver_pm = {
+ SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
+};
+#endif
+
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+#define DX_DRIVER_RUNTIME_PM (&arm_cc7x_driver_pm)
+#else
+#define DX_DRIVER_RUNTIME_PM NULL
+#endif
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id arm_cc7x_dev_of_match[] = {
+ {.compatible = "arm,cryptocell-712-ree"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match);
+#endif
+
+static struct platform_driver cc7x_driver = {
+ .driver = {
+ .name = "cc7xree",
+#ifdef CONFIG_OF
+ .of_match_table = arm_cc7x_dev_of_match,
+#endif
+ .pm = DX_DRIVER_RUNTIME_PM,
+ },
+ .probe = cc7x_probe,
+ .remove = cc7x_remove,
+};
+module_platform_driver(cc7x_driver);
+
+/* Module description */
+MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_AUTHOR("ARM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
new file mode 100644
index 000000000000..891958b99634
--- /dev/null
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_driver.h
+ ARM CryptoCell Linux Crypto Driver
+ */
+
+#ifndef __SSI_DRIVER_H__
+#define __SSI_DRIVER_H__
+
+#include "ssi_config.h"
+#ifdef COMP_IN_WQ
+#include <linux/workqueue.h>
+#else
+#include <linux/interrupt.h>
+#endif
+#include <linux/dma-mapping.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <linux/version.h>
+
+#ifndef INT32_MAX /* Missing in Linux kernel */
+#define INT32_MAX 0x7FFFFFFFL
+#endif
+
+/* Registers definitions from shared/hw/ree_include */
+#include "dx_reg_base_host.h"
+#include "dx_host.h"
+#define DX_CC_HOST_VIRT /* must be defined before including dx_cc_regs.h */
+#include "cc_hw_queue_defs.h"
+#include "cc_regs.h"
+#include "dx_reg_common.h"
+#include "cc_hal.h"
+#include "ssi_sram_mgr.h"
+#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
+#include "cc_crypto_ctx.h"
+#include "ssi_sysfs.h"
+#include "hash_defs.h"
+#include "ssi_fips_local.h"
+
+#define DRV_MODULE_VERSION "3.0"
+
+#define SSI_DEV_NAME_STR "cc715ree"
+#define SSI_CC_HAS_AES_CCM 1
+#define SSI_CC_HAS_AES_GCM 1
+#define SSI_CC_HAS_AES_XTS 1
+#define SSI_CC_HAS_AES_ESSIV 1
+#define SSI_CC_HAS_AES_BITLOCKER 1
+#define SSI_CC_HAS_AES_CTS 1
+#define SSI_CC_HAS_MULTI2 0
+#define SSI_CC_HAS_CMAC 1
+
+#define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
+ (1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT))
+
+#define SSI_AXI_ERR_IRQ_MASK (1 << DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+
+#define SSI_COMP_IRQ_MASK (1 << DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+/* TEE FIPS status interrupt */
+#define SSI_GPR0_IRQ_MASK (1 << DX_HOST_IRR_GPR0_BIT_SHIFT)
+
+#define SSI_CRA_PRIO 3000
+
+#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
+
+#define MAX_REQUEST_QUEUE_SIZE 4096
+#define MAX_MLLI_BUFF_SIZE 2080
+#define MAX_ICV_NENTS_SUPPORTED 2
+
+/* Definitions for HW descriptors DIN/DOUT fields */
+#define NS_BIT 1
+#define AXI_ID 0
+/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
+ field in the HW descriptor. The DMA engine +8 that value. */
+
+/* Logging macros */
+#define SSI_LOG(level, format, ...) \
+ printk(level "cc715ree::%s: " format , __func__, ##__VA_ARGS__)
+#define SSI_LOG_ERR(format, ...) SSI_LOG(KERN_ERR, format, ##__VA_ARGS__)
+#define SSI_LOG_WARNING(format, ...) SSI_LOG(KERN_WARNING, format, ##__VA_ARGS__)
+#define SSI_LOG_NOTICE(format, ...) SSI_LOG(KERN_NOTICE, format, ##__VA_ARGS__)
+#define SSI_LOG_INFO(format, ...) SSI_LOG(KERN_INFO, format, ##__VA_ARGS__)
+#ifdef CC_DEBUG
+#define SSI_LOG_DEBUG(format, ...) SSI_LOG(KERN_DEBUG, format, ##__VA_ARGS__)
+#else /* Debug log messages are removed at compile time for non-DEBUG config. */
+#define SSI_LOG_DEBUG(format, ...) do {} while (0)
+#endif
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+
+#define SSI_MAX_IVGEN_DMA_ADDRESSES 3
+struct ssi_crypto_req {
+ void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base);
+ void *user_arg;
+ dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES]; /* For the first 'ivgen_dma_addr_len' addresses of this array,
+ generated IV would be placed in it by send_request().
+ Same generated IV for all addresses! */
+ unsigned int ivgen_dma_addr_len; /* Amount of 'ivgen_dma_addr' elements to be filled. */
+ unsigned int ivgen_size; /* The generated IV size required, 8/16 B allowed. */
+ struct completion seq_compl; /* request completion */
+#ifdef ENABLE_CYCLE_COUNT
+ enum stat_op op_type;
+ cycles_t submit_cycle;
+ bool is_monitored_p;
+#endif
+};
+
+/**
+ * struct ssi_drvdata - driver private data context
+ * @cc_base: virt address of the CC registers
+ * @irq: device IRQ number
+ * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
+ * @fw_ver: SeP loaded firmware version
+ */
+struct ssi_drvdata {
+ struct resource *res_mem;
+ struct resource *res_irq;
+ void __iomem *cc_base;
+#ifdef DX_BASE_ENV_REGS
+ void __iomem *env_base; /* ARM CryptoCell development FPGAs only */
+#endif
+ unsigned int irq;
+ uint32_t irq_mask;
+ uint32_t fw_ver;
+ /* Calibration time of start/stop
+ * monitor descriptors */
+ uint32_t monitor_null_cycles;
+ struct platform_device *plat_dev;
+ ssi_sram_addr_t mlli_sram_addr;
+ struct completion icache_setup_completion;
+ void *buff_mgr_handle;
+ void *hash_handle;
+ void *aead_handle;
+ void *blkcipher_handle;
+ void *request_mgr_handle;
+ void *fips_handle;
+ void *ivgen_handle;
+ void *sram_mgr_handle;
+
+#ifdef ENABLE_CYCLE_COUNT
+ cycles_t isr_exit_cycles; /* Save for isr-to-tasklet latency */
+#endif
+ uint32_t inflight_counter;
+
+};
+
+struct ssi_crypto_alg {
+ struct list_head entry;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ struct ssi_drvdata *drvdata;
+ struct crypto_alg crypto_alg;
+ struct aead_alg aead_alg;
+};
+
+struct ssi_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ } template_u;
+ int cipher_mode;
+ int flow_mode; /* Note: currently, refers to the cipher mode only. */
+ int auth_mode;
+ bool synchronous;
+ struct ssi_drvdata *drvdata;
+};
+
+struct async_gen_req_ctx {
+ dma_addr_t iv_dma_addr;
+ enum drv_crypto_direction op_type;
+};
+
+#ifdef DX_DUMP_BYTES
+void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size);
+#else
+#define dump_byte_array(name, array, size) do { \
+} while (0);
+#endif
+
+#ifdef ENABLE_CYCLE_COUNT
+#define DECL_CYCLE_COUNT_RESOURCES cycles_t _last_cycles_read
+#define START_CYCLE_COUNT() do { _last_cycles_read = get_cycles(); } while (0)
+#define END_CYCLE_COUNT(_stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _last_cycles_read)
+#define GET_START_CYCLE_COUNT() _last_cycles_read
+#define START_CYCLE_COUNT_AT(_var) do { _var = get_cycles(); } while(0)
+#define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _var)
+#else
+#define DECL_CYCLE_COUNT_RESOURCES
+#define START_CYCLE_COUNT() do { } while (0)
+#define END_CYCLE_COUNT(_stat_op_type, _stat_phase) do { } while (0)
+#define GET_START_CYCLE_COUNT() 0
+#define START_CYCLE_COUNT_AT(_var) do { } while (0)
+#define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) do { } while (0)
+#endif /*ENABLE_CYCLE_COUNT*/
+
+int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct ssi_drvdata *drvdata);
+
+#endif /*__SSI_DRIVER_H__*/
+
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
new file mode 100644
index 000000000000..50f748511979
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/**************************************************************
+This file defines the driver FIPS APIs *
+***************************************************************/
+
+#include <linux/module.h>
+#include "ssi_fips.h"
+
+
+extern int ssi_fips_ext_get_state(ssi_fips_state_t *p_state);
+extern int ssi_fips_ext_get_error(ssi_fips_error_t *p_err);
+
+/*
+This function returns the REE FIPS state.
+It should be called by kernel module.
+*/
+int ssi_fips_get_state(ssi_fips_state_t *p_state)
+{
+ int rc = 0;
+
+ if (p_state == NULL) {
+ return -EINVAL;
+ }
+
+ rc = ssi_fips_ext_get_state(p_state);
+
+ return rc;
+}
+
+EXPORT_SYMBOL(ssi_fips_get_state);
+
+/*
+This function returns the REE FIPS error.
+It should be called by kernel module.
+*/
+int ssi_fips_get_error(ssi_fips_error_t *p_err)
+{
+ int rc = 0;
+
+ if (p_err == NULL) {
+ return -EINVAL;
+ }
+
+ rc = ssi_fips_ext_get_error(p_err);
+
+ return rc;
+}
+
+EXPORT_SYMBOL(ssi_fips_get_error);
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
new file mode 100644
index 000000000000..19bcdeb34308
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SSI_FIPS_H__
+#define __SSI_FIPS_H__
+
+
+#ifndef INT32_MAX /* Missing in Linux kernel */
+#define INT32_MAX 0x7FFFFFFFL
+#endif
+
+
+/*!
+@file
+@brief This file contains FIPS related defintions and APIs.
+*/
+
+typedef enum ssi_fips_state {
+ CC_FIPS_STATE_NOT_SUPPORTED = 0,
+ CC_FIPS_STATE_SUPPORTED,
+ CC_FIPS_STATE_ERROR,
+ CC_FIPS_STATE_RESERVE32B = INT32_MAX
+} ssi_fips_state_t;
+
+
+typedef enum ssi_fips_error {
+ CC_REE_FIPS_ERROR_OK = 0,
+ CC_REE_FIPS_ERROR_GENERAL,
+ CC_REE_FIPS_ERROR_FROM_TEE,
+ CC_REE_FIPS_ERROR_AES_ECB_PUT,
+ CC_REE_FIPS_ERROR_AES_CBC_PUT,
+ CC_REE_FIPS_ERROR_AES_OFB_PUT,
+ CC_REE_FIPS_ERROR_AES_CTR_PUT,
+ CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT,
+ CC_REE_FIPS_ERROR_AES_XTS_PUT,
+ CC_REE_FIPS_ERROR_AES_CMAC_PUT,
+ CC_REE_FIPS_ERROR_AESCCM_PUT,
+ CC_REE_FIPS_ERROR_AESGCM_PUT,
+ CC_REE_FIPS_ERROR_DES_ECB_PUT,
+ CC_REE_FIPS_ERROR_DES_CBC_PUT,
+ CC_REE_FIPS_ERROR_SHA1_PUT,
+ CC_REE_FIPS_ERROR_SHA256_PUT,
+ CC_REE_FIPS_ERROR_SHA512_PUT,
+ CC_REE_FIPS_ERROR_HMAC_SHA1_PUT,
+ CC_REE_FIPS_ERROR_HMAC_SHA256_PUT,
+ CC_REE_FIPS_ERROR_HMAC_SHA512_PUT,
+ CC_REE_FIPS_ERROR_ROM_CHECKSUM,
+ CC_REE_FIPS_ERROR_RESERVE32B = INT32_MAX
+} ssi_fips_error_t;
+
+
+
+int ssi_fips_get_state(ssi_fips_state_t *p_state);
+int ssi_fips_get_error(ssi_fips_error_t *p_err);
+
+#endif /*__SSI_FIPS_H__*/
+
diff --git a/drivers/staging/ccree/ssi_fips_data.h b/drivers/staging/ccree/ssi_fips_data.h
new file mode 100644
index 000000000000..3fddd8f74e07
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips_data.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+The test vectors were taken from:
+
+* AES
+NIST Special Publication 800-38A 2001 Edition
+Recommendation for Block Cipher Modes of Operation
+http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
+Appendix F: Example Vectors for Modes of Operation of the AES
+
+* AES CTS
+Advanced Encryption Standard (AES) Encryption for Kerberos 5
+February 2005
+https://tools.ietf.org/html/rfc3962#appendix-B
+B. Sample Test Vectors
+
+* AES XTS
+http://csrc.nist.gov/groups/STM/cavp/#08
+http://csrc.nist.gov/groups/STM/cavp/documents/aes/XTSTestVectors.zip
+
+* AES CMAC
+http://csrc.nist.gov/groups/STM/cavp/index.html#07
+http://csrc.nist.gov/groups/STM/cavp/documents/mac/cmactestvectors.zip
+
+* AES-CCM
+http://csrc.nist.gov/groups/STM/cavp/#07
+http://csrc.nist.gov/groups/STM/cavp/documents/mac/ccmtestvectors.zip
+
+* AES-GCM
+http://csrc.nist.gov/groups/STM/cavp/documents/mac/gcmtestvectors.zip
+
+* Triple-DES
+NIST Special Publication 800-67 January 2012
+Recommendation for the Triple Data Encryption Algorithm (TDEA) Block Cipher
+http://csrc.nist.gov/publications/nistpubs/800-67-Rev1/SP-800-67-Rev1.pdf
+APPENDIX B: EXAMPLE OF TDEA FORWARD AND INVERSE CIPHER OPERATIONS
+and
+http://csrc.nist.gov/groups/STM/cavp/#01
+http://csrc.nist.gov/groups/STM/cavp/documents/des/tdesmct_intermediate.zip
+
+* HASH
+http://csrc.nist.gov/groups/STM/cavp/#03
+http://csrc.nist.gov/groups/STM/cavp/documents/shs/shabytetestvectors.zip
+
+* HMAC
+http://csrc.nist.gov/groups/STM/cavp/#07
+http://csrc.nist.gov/groups/STM/cavp/documents/mac/hmactestvectors.zip
+
+*/
+
+/* NIST AES */
+#define AES_128_BIT_KEY_SIZE 16
+#define AES_192_BIT_KEY_SIZE 24
+#define AES_256_BIT_KEY_SIZE 32
+#define AES_512_BIT_KEY_SIZE 64
+
+#define NIST_AES_IV_SIZE 16
+
+#define NIST_AES_128_KEY { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }
+#define NIST_AES_192_KEY { 0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, \
+ 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b }
+#define NIST_AES_256_KEY { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, \
+ 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }
+#define NIST_AES_VECTOR_SIZE 16
+#define NIST_AES_PLAIN_DATA { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }
+
+#define NIST_AES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define NIST_AES_128_ECB_CIPHER { 0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97 }
+#define NIST_AES_192_ECB_CIPHER { 0xbd, 0x33, 0x4f, 0x1d, 0x6e, 0x45, 0xf2, 0x5f, 0xf7, 0x12, 0xa2, 0x14, 0x57, 0x1f, 0xa5, 0xcc }
+#define NIST_AES_256_ECB_CIPHER { 0xf3, 0xee, 0xd1, 0xbd, 0xb5, 0xd2, 0xa0, 0x3c, 0x06, 0x4b, 0x5a, 0x7e, 0x3d, 0xb1, 0x81, 0xf8 }
+
+#define NIST_AES_CBC_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
+#define NIST_AES_128_CBC_CIPHER { 0x76, 0x49, 0xab, 0xac, 0x81, 0x19, 0xb2, 0x46, 0xce, 0xe9, 0x8e, 0x9b, 0x12, 0xe9, 0x19, 0x7d }
+#define NIST_AES_192_CBC_CIPHER { 0x4f, 0x02, 0x1d, 0xb2, 0x43, 0xbc, 0x63, 0x3d, 0x71, 0x78, 0x18, 0x3a, 0x9f, 0xa0, 0x71, 0xe8 }
+#define NIST_AES_256_CBC_CIPHER { 0xf5, 0x8c, 0x4c, 0x04, 0xd6, 0xe5, 0xf1, 0xba, 0x77, 0x9e, 0xab, 0xfb, 0x5f, 0x7b, 0xfb, 0xd6 }
+
+#define NIST_AES_OFB_IV { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }
+#define NIST_AES_128_OFB_CIPHER { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a }
+#define NIST_AES_192_OFB_CIPHER { 0xcd, 0xc8, 0x0d, 0x6f, 0xdd, 0xf1, 0x8c, 0xab, 0x34, 0xc2, 0x59, 0x09, 0xc9, 0x9a, 0x41, 0x74 }
+#define NIST_AES_256_OFB_CIPHER { 0xdc, 0x7e, 0x84, 0xbf, 0xda, 0x79, 0x16, 0x4b, 0x7e, 0xcd, 0x84, 0x86, 0x98, 0x5d, 0x38, 0x60 }
+
+#define NIST_AES_CTR_IV { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }
+#define NIST_AES_128_CTR_CIPHER { 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce }
+#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
+#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
+
+
+#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
+#define RFC3962_AES_VECTOR_SIZE 17
+#define RFC3962_AES_PLAIN_DATA { 0x49, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20 }
+#define RFC3962_AES_CBC_CTS_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+#define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 }
+
+
+#define NIST_AES_256_XTS_KEY { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \
+ 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
+#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
+#define NIST_AES_256_XTS_VECTOR_SIZE 16
+#define NIST_AES_256_XTS_PLAIN { 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c }
+#define NIST_AES_256_XTS_CIPHER { 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 }
+
+#define NIST_AES_512_XTS_KEY { 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, \
+ 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, \
+ 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d, 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3, \
+ 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58, 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08 }
+#define NIST_AES_512_XTS_IV { 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64, }
+#define NIST_AES_512_XTS_VECTOR_SIZE 32
+#define NIST_AES_512_XTS_PLAIN { 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, \
+ 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e }
+#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
+ 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
+
+
+/* NIST AES-CMAC */
+#define NIST_AES_128_CMAC_KEY { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 }
+#define NIST_AES_128_CMAC_PLAIN_DATA { 0xa8, 0xde, 0x55, 0x17, 0x0c, 0x6d, 0xc0, 0xd8, 0x0d, 0xe3, 0x2f, 0x50, 0x8b, 0xf4, 0x9b, 0x70 }
+#define NIST_AES_128_CMAC_MAC { 0xcf, 0xef, 0x9b, 0x78, 0x39, 0x84, 0x1f, 0xdb, 0xcc, 0xbb, 0x6c, 0x2c, 0xf2, 0x38, 0xf7 }
+#define NIST_AES_128_CMAC_VECTOR_SIZE 16
+#define NIST_AES_128_CMAC_OUTPUT_SIZE 15
+
+#define NIST_AES_192_CMAC_KEY { 0x20, 0x51, 0xaf, 0x34, 0x76, 0x2e, 0xbe, 0x55, 0x6f, 0x72, 0xa5, 0xc6, 0xed, 0xc7, 0x77, 0x1e, \
+ 0xb9, 0x24, 0x5f, 0xad, 0x76, 0xf0, 0x34, 0xbe }
+#define NIST_AES_192_CMAC_PLAIN_DATA { 0xae, 0x8e, 0x93, 0xc9, 0xc9, 0x91, 0xcf, 0x89, 0x6a, 0x49, 0x1a, 0x89, 0x07, 0xdf, 0x4e, 0x4b, \
+ 0xe5, 0x18, 0x6a, 0xe4, 0x96, 0xcd, 0x34, 0x0d, 0xc1, 0x9b, 0x23, 0x78, 0x21, 0xdb, 0x7b, 0x60 }
+#define NIST_AES_192_CMAC_MAC { 0x74, 0xf7, 0x46, 0x08, 0xc0, 0x4f, 0x0f, 0x4e, 0x47, 0xfa, 0x64, 0x04, 0x33, 0xb6, 0xe6, 0xfb }
+#define NIST_AES_192_CMAC_VECTOR_SIZE 32
+#define NIST_AES_192_CMAC_OUTPUT_SIZE 16
+
+#define NIST_AES_256_CMAC_KEY { 0x3a, 0x75, 0xa9, 0xd2, 0xbd, 0xb8, 0xc8, 0x04, 0xba, 0x4a, 0xb4, 0x98, 0x35, 0x73, 0xa6, 0xb2, \
+ 0x53, 0x16, 0x0d, 0xd9, 0x0f, 0x8e, 0xdd, 0xfb, 0x2f, 0xdc, 0x2a, 0xb1, 0x76, 0x04, 0xf5, 0xc5 }
+#define NIST_AES_256_CMAC_PLAIN_DATA { 0x42, 0xf3, 0x5d, 0x5a, 0xa5, 0x33, 0xa7, 0xa0, 0xa5, 0xf7, 0x4e, 0x14, 0x4f, 0x2a, 0x5f, 0x20 }
+#define NIST_AES_256_CMAC_MAC { 0xf1, 0x53, 0x2f, 0x87, 0x32, 0xd9, 0xf5, 0x90, 0x30, 0x07 }
+#define NIST_AES_256_CMAC_VECTOR_SIZE 16
+#define NIST_AES_256_CMAC_OUTPUT_SIZE 10
+
+
+/* NIST TDES */
+#define TDES_NUM_OF_KEYS 3
+#define NIST_TDES_VECTOR_SIZE 8
+#define NIST_TDES_IV_SIZE 8
+
+#define NIST_TDES_ECB_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+
+#define NIST_TDES_ECB3_KEY { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, \
+ 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, \
+ 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23 }
+#define NIST_TDES_ECB3_PLAIN_DATA { 0x54, 0x68, 0x65, 0x20, 0x71, 0x75, 0x66, 0x63 }
+#define NIST_TDES_ECB3_CIPHER { 0xa8, 0x26, 0xfd, 0x8c, 0xe5, 0x3b, 0x85, 0x5f }
+
+#define NIST_TDES_CBC3_IV { 0xf8, 0xee, 0xe1, 0x35, 0x9c, 0x6e, 0x54, 0x40 }
+#define NIST_TDES_CBC3_KEY { 0xe9, 0xda, 0x37, 0xf8, 0xdc, 0x97, 0x6d, 0x5b, \
+ 0xb6, 0x8c, 0x04, 0xe3, 0xec, 0x98, 0x20, 0x15, \
+ 0xf4, 0x0e, 0x08, 0xb5, 0x97, 0x29, 0xf2, 0x8f }
+#define NIST_TDES_CBC3_PLAIN_DATA { 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
+#define NIST_TDES_CBC3_CIPHER { 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
+
+
+/* NIST AES-CCM */
+#define NIST_AESCCM_128_BIT_KEY_SIZE 16
+#define NIST_AESCCM_192_BIT_KEY_SIZE 24
+#define NIST_AESCCM_256_BIT_KEY_SIZE 32
+
+#define NIST_AESCCM_B0_VAL 0x79 /* L'[0:2]=1 , M'[3-5]=7 , Adata[6]=1, reserved[7]=0 */
+#define NIST_AESCCM_NONCE_SIZE 13
+#define NIST_AESCCM_IV_SIZE 16
+#define NIST_AESCCM_ADATA_SIZE 32
+#define NIST_AESCCM_TEXT_SIZE 16
+#define NIST_AESCCM_TAG_SIZE 16
+
+#define NIST_AESCCM_128_KEY { 0x70, 0x01, 0x0e, 0xd9, 0x0e, 0x61, 0x86, 0xec, 0xad, 0x41, 0xf0, 0xd3, 0xc7, 0xc4, 0x2f, 0xf8 }
+#define NIST_AESCCM_128_NONCE { 0xa5, 0xf4, 0xf4, 0x98, 0x6e, 0x98, 0x47, 0x29, 0x65, 0xf5, 0xab, 0xcc, 0x4b }
+#define NIST_AESCCM_128_ADATA { 0x3f, 0xec, 0x0e, 0x5c, 0xc2, 0x4d, 0x67, 0x13, 0x94, 0x37, 0xcb, 0xc8, 0x11, 0x24, 0x14, 0xfc, \
+ 0x8d, 0xac, 0xcd, 0x1a, 0x94, 0xb4, 0x9a, 0x4c, 0x76, 0xe2, 0xd3, 0x93, 0x03, 0x54, 0x73, 0x17 }
+#define NIST_AESCCM_128_PLAIN_TEXT { 0xbe, 0x32, 0x2f, 0x58, 0xef, 0xa7, 0xf8, 0xc6, 0x8a, 0x63, 0x5e, 0x0b, 0x9c, 0xce, 0x77, 0xf2 }
+#define NIST_AESCCM_128_CIPHER { 0x8e, 0x44, 0x25, 0xae, 0x57, 0x39, 0x74, 0xf0, 0xf0, 0x69, 0x3a, 0x18, 0x8b, 0x52, 0x58, 0x12 }
+#define NIST_AESCCM_128_MAC { 0xee, 0xf0, 0x8e, 0x3f, 0xb1, 0x5f, 0x42, 0x27, 0xe0, 0xd9, 0x89, 0xa4, 0xd5, 0x87, 0xa8, 0xcf }
+
+#define NIST_AESCCM_192_KEY { 0x68, 0x73, 0xf1, 0xc6, 0xc3, 0x09, 0x75, 0xaf, 0xf6, 0xf0, 0x84, 0x70, 0x26, 0x43, 0x21, 0x13, \
+ 0x0a, 0x6e, 0x59, 0x84, 0xad, 0xe3, 0x24, 0xe9 }
+#define NIST_AESCCM_192_NONCE { 0x7c, 0x4d, 0x2f, 0x7c, 0xec, 0x04, 0x36, 0x1f, 0x18, 0x7f, 0x07, 0x26, 0xd5 }
+#define NIST_AESCCM_192_ADATA { 0x77, 0x74, 0x3b, 0x5d, 0x83, 0xa0, 0x0d, 0x2c, 0x8d, 0x5f, 0x7e, 0x10, 0x78, 0x15, 0x31, 0xb4, \
+ 0x96, 0xe0, 0x9f, 0x3b, 0xc9, 0x29, 0x5d, 0x7a, 0xe9, 0x79, 0x9e, 0x64, 0x66, 0x8e, 0xf8, 0xc5 }
+#define NIST_AESCCM_192_PLAIN_TEXT { 0x50, 0x51, 0xa0, 0xb0, 0xb6, 0x76, 0x6c, 0xd6, 0xea, 0x29, 0xa6, 0x72, 0x76, 0x9d, 0x40, 0xfe }
+#define NIST_AESCCM_192_CIPHER { 0x0c, 0xe5, 0xac, 0x8d, 0x6b, 0x25, 0x6f, 0xb7, 0x58, 0x0b, 0xf6, 0xac, 0xc7, 0x64, 0x26, 0xaf }
+#define NIST_AESCCM_192_MAC { 0x40, 0xbc, 0xe5, 0x8f, 0xd4, 0xcd, 0x65, 0x48, 0xdf, 0x90, 0xa0, 0x33, 0x7c, 0x84, 0x20, 0x04 }
+
+#define NIST_AESCCM_256_KEY { 0xee, 0x8c, 0xe1, 0x87, 0x16, 0x97, 0x79, 0xd1, 0x3e, 0x44, 0x3d, 0x64, 0x28, 0xe3, 0x8b, 0x38, \
+ 0xb5, 0x5d, 0xfb, 0x90, 0xf0, 0x22, 0x8a, 0x8a, 0x4e, 0x62, 0xf8, 0xf5, 0x35, 0x80, 0x6e, 0x62 }
+#define NIST_AESCCM_256_NONCE { 0x12, 0x16, 0x42, 0xc4, 0x21, 0x8b, 0x39, 0x1c, 0x98, 0xe6, 0x26, 0x9c, 0x8a }
+#define NIST_AESCCM_256_ADATA { 0x71, 0x8d, 0x13, 0xe4, 0x75, 0x22, 0xac, 0x4c, 0xdf, 0x3f, 0x82, 0x80, 0x63, 0x98, 0x0b, 0x6d, \
+ 0x45, 0x2f, 0xcd, 0xcd, 0x6e, 0x1a, 0x19, 0x04, 0xbf, 0x87, 0xf5, 0x48, 0xa5, 0xfd, 0x5a, 0x05 }
+#define NIST_AESCCM_256_PLAIN_TEXT { 0xd1, 0x5f, 0x98, 0xf2, 0xc6, 0xd6, 0x70, 0xf5, 0x5c, 0x78, 0xa0, 0x66, 0x48, 0x33, 0x2b, 0xc9 }
+#define NIST_AESCCM_256_CIPHER { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a }
+#define NIST_AESCCM_256_MAC { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 }
+
+
+/* NIST AES-GCM */
+#define NIST_AESGCM_128_BIT_KEY_SIZE 16
+#define NIST_AESGCM_192_BIT_KEY_SIZE 24
+#define NIST_AESGCM_256_BIT_KEY_SIZE 32
+
+#define NIST_AESGCM_IV_SIZE 12
+#define NIST_AESGCM_ADATA_SIZE 16
+#define NIST_AESGCM_TEXT_SIZE 16
+#define NIST_AESGCM_TAG_SIZE 16
+
+#define NIST_AESGCM_128_KEY { 0x81, 0x6e, 0x39, 0x07, 0x04, 0x10, 0xcf, 0x21, 0x84, 0x90, 0x4d, 0xa0, 0x3e, 0xa5, 0x07, 0x5a }
+#define NIST_AESGCM_128_IV { 0x32, 0xc3, 0x67, 0xa3, 0x36, 0x26, 0x13, 0xb2, 0x7f, 0xc3, 0xe6, 0x7e }
+#define NIST_AESGCM_128_ADATA { 0xf2, 0xa3, 0x07, 0x28, 0xed, 0x87, 0x4e, 0xe0, 0x29, 0x83, 0xc2, 0x94, 0x43, 0x5d, 0x3c, 0x16 }
+#define NIST_AESGCM_128_PLAIN_TEXT { 0xec, 0xaf, 0xe9, 0x6c, 0x67, 0xa1, 0x64, 0x67, 0x44, 0xf1, 0xc8, 0x91, 0xf5, 0xe6, 0x94, 0x27 }
+#define NIST_AESGCM_128_CIPHER { 0x55, 0x2e, 0xbe, 0x01, 0x2e, 0x7b, 0xcf, 0x90, 0xfc, 0xef, 0x71, 0x2f, 0x83, 0x44, 0xe8, 0xf1 }
+#define NIST_AESGCM_128_MAC { 0xec, 0xaa, 0xe9, 0xfc, 0x68, 0x27, 0x6a, 0x45, 0xab, 0x0c, 0xa3, 0xcb, 0x9d, 0xd9, 0x53, 0x9f }
+
+#define NIST_AESGCM_192_KEY { 0x0c, 0x44, 0xd6, 0xc9, 0x28, 0xee, 0x11, 0x2c, 0xe6, 0x65, 0xfe, 0x54, 0x7e, 0xbd, 0x38, 0x72, \
+ 0x98, 0xa9, 0x54, 0xb4, 0x62, 0xf6, 0x95, 0xd8 }
+#define NIST_AESGCM_192_IV { 0x18, 0xb8, 0xf3, 0x20, 0xfe, 0xf4, 0xae, 0x8c, 0xcb, 0xe8, 0xf9, 0x52 }
+#define NIST_AESGCM_192_ADATA { 0x73, 0x41, 0xd4, 0x3f, 0x98, 0xcf, 0x38, 0x82, 0x21, 0x18, 0x09, 0x41, 0x97, 0x03, 0x76, 0xe8 }
+#define NIST_AESGCM_192_PLAIN_TEXT { 0x96, 0xad, 0x07, 0xf9, 0xb6, 0x28, 0xb6, 0x52, 0xcf, 0x86, 0xcb, 0x73, 0x17, 0x88, 0x6f, 0x51 }
+#define NIST_AESGCM_192_CIPHER { 0xa6, 0x64, 0x07, 0x81, 0x33, 0x40, 0x5e, 0xb9, 0x09, 0x4d, 0x36, 0xf7, 0xe0, 0x70, 0x19, 0x1f }
+#define NIST_AESGCM_192_MAC { 0xe8, 0xf9, 0xc3, 0x17, 0x84, 0x7c, 0xe3, 0xf3, 0xc2, 0x39, 0x94, 0xa4, 0x02, 0xf0, 0x65, 0x81 }
+
+#define NIST_AESGCM_256_KEY { 0x54, 0xe3, 0x52, 0xea, 0x1d, 0x84, 0xbf, 0xe6, 0x4a, 0x10, 0x11, 0x09, 0x61, 0x11, 0xfb, 0xe7, \
+ 0x66, 0x8a, 0xd2, 0x20, 0x3d, 0x90, 0x2a, 0x01, 0x45, 0x8c, 0x3b, 0xbd, 0x85, 0xbf, 0xce, 0x14 }
+#define NIST_AESGCM_256_IV { 0xdf, 0x7c, 0x3b, 0xca, 0x00, 0x39, 0x6d, 0x0c, 0x01, 0x84, 0x95, 0xd9 }
+#define NIST_AESGCM_256_ADATA { 0x7e, 0x96, 0x8d, 0x71, 0xb5, 0x0c, 0x1f, 0x11, 0xfd, 0x00, 0x1f, 0x3f, 0xef, 0x49, 0xd0, 0x45 }
+#define NIST_AESGCM_256_PLAIN_TEXT { 0x85, 0xfc, 0x3d, 0xfa, 0xd9, 0xb5, 0xa8, 0xd3, 0x25, 0x8e, 0x4f, 0xc4, 0x45, 0x71, 0xbd, 0x3b }
+#define NIST_AESGCM_256_CIPHER { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d }
+#define NIST_AESGCM_256_MAC { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 }
+
+
+/* NIST HASH */
+#define NIST_SHA_MSG_SIZE 16
+
+#define NIST_SHA_1_MSG { 0x35, 0x52, 0x69, 0x4c, 0xdf, 0x66, 0x3f, 0xd9, 0x4b, 0x22, 0x47, 0x47, 0xac, 0x40, 0x6a, 0xaf }
+#define NIST_SHA_1_MD { 0xa1, 0x50, 0xde, 0x92, 0x74, 0x54, 0x20, 0x2d, 0x94, 0xe6, 0x56, 0xde, 0x4c, 0x7c, 0x0c, 0xa6, \
+ 0x91, 0xde, 0x95, 0x5d }
+
+#define NIST_SHA_256_MSG { 0x0a, 0x27, 0x84, 0x7c, 0xdc, 0x98, 0xbd, 0x6f, 0x62, 0x22, 0x0b, 0x04, 0x6e, 0xdd, 0x76, 0x2b }
+#define NIST_SHA_256_MD { 0x80, 0xc2, 0x5e, 0xc1, 0x60, 0x05, 0x87, 0xe7, 0xf2, 0x8b, 0x18, 0xb1, 0xb1, 0x8e, 0x3c, 0xdc, \
+ 0x89, 0x92, 0x8e, 0x39, 0xca, 0xb3, 0xbc, 0x25, 0xe4, 0xd4, 0xa4, 0xc1, 0x39, 0xbc, 0xed, 0xc4 }
+
+#define NIST_SHA_512_MSG { 0xcd, 0x67, 0xbd, 0x40, 0x54, 0xaa, 0xa3, 0xba, 0xa0, 0xdb, 0x17, 0x8c, 0xe2, 0x32, 0xfd, 0x5a }
+#define NIST_SHA_512_MD { 0x0d, 0x85, 0x21, 0xf8, 0xf2, 0xf3, 0x90, 0x03, 0x32, 0xd1, 0xa1, 0xa5, 0x5c, 0x60, 0xba, 0x81, \
+ 0xd0, 0x4d, 0x28, 0xdf, 0xe8, 0xc5, 0x04, 0xb6, 0x32, 0x8a, 0xe7, 0x87, 0x92, 0x5f, 0xe0, 0x18, \
+ 0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \
+ 0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 }
+
+
+/* NIST HMAC */
+#define NIST_HMAC_MSG_SIZE 128
+
+#define NIST_HMAC_SHA1_KEY_SIZE 10
+#define NIST_HMAC_SHA1_KEY { 0x59, 0x78, 0x59, 0x28, 0xd7, 0x25, 0x16, 0xe3, 0x12, 0x72 }
+#define NIST_HMAC_SHA1_MSG { 0xa3, 0xce, 0x88, 0x99, 0xdf, 0x10, 0x22, 0xe8, 0xd2, 0xd5, 0x39, 0xb4, 0x7b, 0xf0, 0xe3, 0x09, \
+ 0xc6, 0x6f, 0x84, 0x09, 0x5e, 0x21, 0x43, 0x8e, 0xc3, 0x55, 0xbf, 0x11, 0x9c, 0xe5, 0xfd, 0xcb, \
+ 0x4e, 0x73, 0xa6, 0x19, 0xcd, 0xf3, 0x6f, 0x25, 0xb3, 0x69, 0xd8, 0xc3, 0x8f, 0xf4, 0x19, 0x99, \
+ 0x7f, 0x0c, 0x59, 0x83, 0x01, 0x08, 0x22, 0x36, 0x06, 0xe3, 0x12, 0x23, 0x48, 0x3f, 0xd3, 0x9e, \
+ 0xde, 0xaa, 0x4d, 0x3f, 0x0d, 0x21, 0x19, 0x88, 0x62, 0xd2, 0x39, 0xc9, 0xfd, 0x26, 0x07, 0x41, \
+ 0x30, 0xff, 0x6c, 0x86, 0x49, 0x3f, 0x52, 0x27, 0xab, 0x89, 0x5c, 0x8f, 0x24, 0x4b, 0xd4, 0x2c, \
+ 0x7a, 0xfc, 0xe5, 0xd1, 0x47, 0xa2, 0x0a, 0x59, 0x07, 0x98, 0xc6, 0x8e, 0x70, 0x8e, 0x96, 0x49, \
+ 0x02, 0xd1, 0x24, 0xda, 0xde, 0xcd, 0xbd, 0xa9, 0xdb, 0xd0, 0x05, 0x1e, 0xd7, 0x10, 0xe9, 0xbf }
+#define NIST_HMAC_SHA1_MD { 0x3c, 0x81, 0x62, 0x58, 0x9a, 0xaf, 0xae, 0xe0, 0x24, 0xfc, 0x9a, 0x5c, 0xa5, 0x0d, 0xd2, 0x33, \
+ 0x6f, 0xe3, 0xeb, 0x28 }
+
+#define NIST_HMAC_SHA256_KEY_SIZE 40
+#define NIST_HMAC_SHA256_KEY { 0x97, 0x79, 0xd9, 0x12, 0x06, 0x42, 0x79, 0x7f, 0x17, 0x47, 0x02, 0x5d, 0x5b, 0x22, 0xb7, 0xac, \
+ 0x60, 0x7c, 0xab, 0x08, 0xe1, 0x75, 0x8f, 0x2f, 0x3a, 0x46, 0xc8, 0xbe, 0x1e, 0x25, 0xc5, 0x3b, \
+ 0x8c, 0x6a, 0x8f, 0x58, 0xff, 0xef, 0xa1, 0x76 }
+#define NIST_HMAC_SHA256_MSG { 0xb1, 0x68, 0x9c, 0x25, 0x91, 0xea, 0xf3, 0xc9, 0xe6, 0x60, 0x70, 0xf8, 0xa7, 0x79, 0x54, 0xff, \
+ 0xb8, 0x17, 0x49, 0xf1, 0xb0, 0x03, 0x46, 0xf9, 0xdf, 0xe0, 0xb2, 0xee, 0x90, 0x5d, 0xcc, 0x28, \
+ 0x8b, 0xaf, 0x4a, 0x92, 0xde, 0x3f, 0x40, 0x01, 0xdd, 0x9f, 0x44, 0xc4, 0x68, 0xc3, 0xd0, 0x7d, \
+ 0x6c, 0x6e, 0xe8, 0x2f, 0xac, 0xea, 0xfc, 0x97, 0xc2, 0xfc, 0x0f, 0xc0, 0x60, 0x17, 0x19, 0xd2, \
+ 0xdc, 0xd0, 0xaa, 0x2a, 0xec, 0x92, 0xd1, 0xb0, 0xae, 0x93, 0x3c, 0x65, 0xeb, 0x06, 0xa0, 0x3c, \
+ 0x9c, 0x93, 0x5c, 0x2b, 0xad, 0x04, 0x59, 0x81, 0x02, 0x41, 0x34, 0x7a, 0xb8, 0x7e, 0x9f, 0x11, \
+ 0xad, 0xb3, 0x04, 0x15, 0x42, 0x4c, 0x6c, 0x7f, 0x5f, 0x22, 0xa0, 0x03, 0xb8, 0xab, 0x8d, 0xe5, \
+ 0x4f, 0x6d, 0xed, 0x0e, 0x3a, 0xb9, 0x24, 0x5f, 0xa7, 0x95, 0x68, 0x45, 0x1d, 0xfa, 0x25, 0x8e }
+#define NIST_HMAC_SHA256_MD { 0x76, 0x9f, 0x00, 0xd3, 0xe6, 0xa6, 0xcc, 0x1f, 0xb4, 0x26, 0xa1, 0x4a, 0x4f, 0x76, 0xc6, 0x46, \
+ 0x2e, 0x61, 0x49, 0x72, 0x6e, 0x0d, 0xee, 0x0e, 0xc0, 0xcf, 0x97, 0xa1, 0x66, 0x05, 0xac, 0x8b }
+
+#define NIST_HMAC_SHA512_KEY_SIZE 100
+#define NIST_HMAC_SHA512_KEY { 0x57, 0xc2, 0xeb, 0x67, 0x7b, 0x50, 0x93, 0xb9, 0xe8, 0x29, 0xea, 0x4b, 0xab, 0xb5, 0x0b, 0xde, \
+ 0x55, 0xd0, 0xad, 0x59, 0xfe, 0xc3, 0x4a, 0x61, 0x89, 0x73, 0x80, 0x2b, 0x2a, 0xd9, 0xb7, 0x8e, \
+ 0x26, 0xb2, 0x04, 0x5d, 0xda, 0x78, 0x4d, 0xf3, 0xff, 0x90, 0xae, 0x0f, 0x2c, 0xc5, 0x1c, 0xe3, \
+ 0x9c, 0xf5, 0x48, 0x67, 0x32, 0x0a, 0xc6, 0xf3, 0xba, 0x2c, 0x6f, 0x0d, 0x72, 0x36, 0x04, 0x80, \
+ 0xc9, 0x66, 0x14, 0xae, 0x66, 0x58, 0x1f, 0x26, 0x6c, 0x35, 0xfb, 0x79, 0xfd, 0x28, 0x77, 0x4a, \
+ 0xfd, 0x11, 0x3f, 0xa5, 0x18, 0x7e, 0xff, 0x92, 0x06, 0xd7, 0xcb, 0xe9, 0x0d, 0xd8, 0xbf, 0x67, \
+ 0xc8, 0x44, 0xe2, 0x02 }
+#define NIST_HMAC_SHA512_MSG { 0x24, 0x23, 0xdf, 0xf4, 0x8b, 0x31, 0x2b, 0xe8, 0x64, 0xcb, 0x34, 0x90, 0x64, 0x1f, 0x79, 0x3d, \
+ 0x2b, 0x9f, 0xb6, 0x8a, 0x77, 0x63, 0xb8, 0xe2, 0x98, 0xc8, 0x6f, 0x42, 0x24, 0x5e, 0x45, 0x40, \
+ 0xeb, 0x01, 0xae, 0x4d, 0x2d, 0x45, 0x00, 0x37, 0x0b, 0x18, 0x86, 0xf2, 0x3c, 0xa2, 0xcf, 0x97, \
+ 0x01, 0x70, 0x4c, 0xad, 0x5b, 0xd2, 0x1b, 0xa8, 0x7b, 0x81, 0x1d, 0xaf, 0x7a, 0x85, 0x4e, 0xa2, \
+ 0x4a, 0x56, 0x56, 0x5c, 0xed, 0x42, 0x5b, 0x35, 0xe4, 0x0e, 0x1a, 0xcb, 0xeb, 0xe0, 0x36, 0x03, \
+ 0xe3, 0x5d, 0xcf, 0x4a, 0x10, 0x0e, 0x57, 0x21, 0x84, 0x08, 0xa1, 0xd8, 0xdb, 0xcc, 0x3b, 0x99, \
+ 0x29, 0x6c, 0xfe, 0xa9, 0x31, 0xef, 0xe3, 0xeb, 0xd8, 0xf7, 0x19, 0xa6, 0xd9, 0xa1, 0x54, 0x87, \
+ 0xb9, 0xad, 0x67, 0xea, 0xfe, 0xdf, 0x15, 0x55, 0x9c, 0xa4, 0x24, 0x45, 0xb0, 0xf9, 0xb4, 0x2e }
+#define NIST_HMAC_SHA512_MD { 0x33, 0xc5, 0x11, 0xe9, 0xbc, 0x23, 0x07, 0xc6, 0x27, 0x58, 0xdf, 0x61, 0x12, 0x5a, 0x98, 0x0e, \
+ 0xe6, 0x4c, 0xef, 0xeb, 0xd9, 0x09, 0x31, 0xcb, 0x91, 0xc1, 0x37, 0x42, 0xd4, 0x71, 0x4c, 0x06, \
+ 0xde, 0x40, 0x03, 0xfa, 0xf3, 0xc4, 0x1c, 0x06, 0xae, 0xfc, 0x63, 0x8a, 0xd4, 0x7b, 0x21, 0x90, \
+ 0x6e, 0x6b, 0x10, 0x48, 0x16, 0xb7, 0x2d, 0xe6, 0x26, 0x9e, 0x04, 0x5a, 0x1f, 0x44, 0x29, 0xd4 }
+
diff --git a/drivers/staging/ccree/ssi_fips_ext.c b/drivers/staging/ccree/ssi_fips_ext.c
new file mode 100644
index 000000000000..2ac432fe1233
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips_ext.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************
+This file defines the driver FIPS functions that should be
+implemented by the driver user. Current implementation is sample code only.
+***************************************************************/
+
+#include <linux/module.h>
+#include "ssi_fips_local.h"
+#include "ssi_driver.h"
+
+
+static bool tee_error;
+module_param(tee_error, bool, 0644);
+MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured ");
+
+static ssi_fips_state_t fips_state = CC_FIPS_STATE_NOT_SUPPORTED;
+static ssi_fips_error_t fips_error = CC_REE_FIPS_ERROR_OK;
+
+/*
+This function returns the FIPS REE state.
+The function should be implemented by the driver user, depends on where .
+the state value is stored.
+The reference code uses global variable.
+*/
+int ssi_fips_ext_get_state(ssi_fips_state_t *p_state)
+{
+ int rc = 0;
+
+ if (p_state == NULL) {
+ return -EINVAL;
+ }
+
+ *p_state = fips_state;
+
+ return rc;
+}
+
+/*
+This function returns the FIPS REE error.
+The function should be implemented by the driver user, depends on where .
+the error value is stored.
+The reference code uses global variable.
+*/
+int ssi_fips_ext_get_error(ssi_fips_error_t *p_err)
+{
+ int rc = 0;
+
+ if (p_err == NULL) {
+ return -EINVAL;
+ }
+
+ *p_err = fips_error;
+
+ return rc;
+}
+
+/*
+This function sets the FIPS REE state.
+The function should be implemented by the driver user, depends on where .
+the state value is stored.
+The reference code uses global variable.
+*/
+int ssi_fips_ext_set_state(ssi_fips_state_t state)
+{
+ fips_state = state;
+ return 0;
+}
+
+/*
+This function sets the FIPS REE error.
+The function should be implemented by the driver user, depends on where .
+the error value is stored.
+The reference code uses global variable.
+*/
+int ssi_fips_ext_set_error(ssi_fips_error_t err)
+{
+ fips_error = err;
+ return 0;
+}
+
+
diff --git a/drivers/staging/ccree/ssi_fips_ll.c b/drivers/staging/ccree/ssi_fips_ll.c
new file mode 100644
index 000000000000..d573574bbb98
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips_ll.c
@@ -0,0 +1,1681 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************
+This file defines the driver FIPS Low Level implmentaion functions,
+that executes the KAT.
+***************************************************************/
+#include <linux/kernel.h>
+
+#include "ssi_driver.h"
+#include "ssi_fips_local.h"
+#include "ssi_fips_data.h"
+#include "cc_crypto_ctx.h"
+#include "ssi_hash.h"
+#include "ssi_request_mgr.h"
+
+
+static const uint32_t digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const uint32_t sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const uint32_t sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+#if (CC_SUPPORT_SHA > 256)
+static const uint32_t digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static const uint64_t sha512_init[] = {
+ SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+#endif
+
+
+#define NIST_CIPHER_AES_MAX_VECTOR_SIZE 32
+
+struct fips_cipher_ctx {
+ uint8_t iv[CC_AES_IV_SIZE];
+ uint8_t key[AES_512_BIT_KEY_SIZE];
+ uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+ uint8_t dout[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+};
+
+typedef struct _FipsCipherData {
+ uint8_t isAes;
+ uint8_t key[AES_512_BIT_KEY_SIZE];
+ size_t keySize;
+ uint8_t iv[CC_AES_IV_SIZE];
+ enum drv_crypto_direction direction;
+ enum drv_cipher_mode oprMode;
+ uint8_t dataIn[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+ uint8_t dataOut[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+ size_t dataInSize;
+} FipsCipherData;
+
+
+struct fips_cmac_ctx {
+ uint8_t key[AES_256_BIT_KEY_SIZE];
+ uint8_t din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+};
+
+typedef struct _FipsCmacData {
+ enum drv_crypto_direction direction;
+ uint8_t key[AES_256_BIT_KEY_SIZE];
+ size_t key_size;
+ uint8_t data_in[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
+ size_t data_in_size;
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+ size_t mac_res_size;
+} FipsCmacData;
+
+
+struct fips_hash_ctx {
+ uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
+ uint8_t din[NIST_SHA_MSG_SIZE];
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+};
+
+typedef struct _FipsHashData {
+ enum drv_hash_mode hash_mode;
+ uint8_t data_in[NIST_SHA_MSG_SIZE];
+ size_t data_in_size;
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+} FipsHashData;
+
+
+/* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
+struct fips_hmac_ctx {
+ uint8_t initial_digest[CC_DIGEST_SIZE_MAX];
+ uint8_t key[CC_HMAC_BLOCK_SIZE_MAX];
+ uint8_t k0[CC_HMAC_BLOCK_SIZE_MAX];
+ uint8_t digest_bytes_len[HASH_LEN_SIZE];
+ uint8_t tmp_digest[CC_DIGEST_SIZE_MAX];
+ uint8_t din[NIST_HMAC_MSG_SIZE];
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+};
+
+typedef struct _FipsHmacData {
+ enum drv_hash_mode hash_mode;
+ uint8_t key[CC_HMAC_BLOCK_SIZE_MAX];
+ size_t key_size;
+ uint8_t data_in[NIST_HMAC_MSG_SIZE];
+ size_t data_in_size;
+ uint8_t mac_res[CC_DIGEST_SIZE_MAX];
+} FipsHmacData;
+
+
+#define FIPS_CCM_B0_A0_ADATA_SIZE (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
+
+struct fips_ccm_ctx {
+ uint8_t b0_a0_adata[FIPS_CCM_B0_A0_ADATA_SIZE];
+ uint8_t iv[NIST_AESCCM_IV_SIZE];
+ uint8_t ctr_cnt_0[NIST_AESCCM_IV_SIZE];
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ uint8_t din[NIST_AESCCM_TEXT_SIZE];
+ uint8_t dout[NIST_AESCCM_TEXT_SIZE];
+ uint8_t mac_res[NIST_AESCCM_TAG_SIZE];
+};
+
+typedef struct _FipsCcmData {
+ enum drv_crypto_direction direction;
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ size_t keySize;
+ uint8_t nonce[NIST_AESCCM_NONCE_SIZE];
+ uint8_t adata[NIST_AESCCM_ADATA_SIZE];
+ size_t adataSize;
+ uint8_t dataIn[NIST_AESCCM_TEXT_SIZE];
+ size_t dataInSize;
+ uint8_t dataOut[NIST_AESCCM_TEXT_SIZE];
+ uint8_t tagSize;
+ uint8_t macResOut[NIST_AESCCM_TAG_SIZE];
+} FipsCcmData;
+
+
+struct fips_gcm_ctx {
+ uint8_t adata[NIST_AESGCM_ADATA_SIZE];
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ uint8_t hkey[CC_AES_KEY_SIZE_MAX];
+ uint8_t din[NIST_AESGCM_TEXT_SIZE];
+ uint8_t dout[NIST_AESGCM_TEXT_SIZE];
+ uint8_t mac_res[NIST_AESGCM_TAG_SIZE];
+ uint8_t len_block[AES_BLOCK_SIZE];
+ uint8_t iv_inc1[AES_BLOCK_SIZE];
+ uint8_t iv_inc2[AES_BLOCK_SIZE];
+};
+
+typedef struct _FipsGcmData {
+ enum drv_crypto_direction direction;
+ uint8_t key[CC_AES_KEY_SIZE_MAX];
+ size_t keySize;
+ uint8_t iv[NIST_AESGCM_IV_SIZE];
+ uint8_t adata[NIST_AESGCM_ADATA_SIZE];
+ size_t adataSize;
+ uint8_t dataIn[NIST_AESGCM_TEXT_SIZE];
+ size_t dataInSize;
+ uint8_t dataOut[NIST_AESGCM_TEXT_SIZE];
+ uint8_t tagSize;
+ uint8_t macResOut[NIST_AESGCM_TAG_SIZE];
+} FipsGcmData;
+
+
+typedef union _fips_ctx {
+ struct fips_cipher_ctx cipher;
+ struct fips_cmac_ctx cmac;
+ struct fips_hash_ctx hash;
+ struct fips_hmac_ctx hmac;
+ struct fips_ccm_ctx ccm;
+ struct fips_gcm_ctx gcm;
+} fips_ctx;
+
+
+/* test data tables */
+static const FipsCipherData FipsCipherDataTable[] = {
+ /* AES */
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_128_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_128_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_192_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_192_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_AES_PLAIN_DATA, NIST_AES_256_ECB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_AES_256_ECB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_128_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_128_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_192_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_192_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_AES_PLAIN_DATA, NIST_AES_256_CBC_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CBC_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_AES_256_CBC_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_128_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_128_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_192_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_192_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_OFB, NIST_AES_PLAIN_DATA, NIST_AES_256_OFB_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_OFB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_OFB, NIST_AES_256_OFB_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_128_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_128_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_192_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_192_KEY, CC_AES_192_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_192_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CTR, NIST_AES_PLAIN_DATA, NIST_AES_256_CTR_CIPHER, NIST_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_CTR_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CTR, NIST_AES_256_CTR_CIPHER, NIST_AES_PLAIN_DATA, NIST_AES_VECTOR_SIZE },
+ { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_PLAIN_DATA, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_VECTOR_SIZE },
+ { 1, RFC3962_AES_128_KEY, CC_AES_128_BIT_KEY_SIZE, RFC3962_AES_CBC_CTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC_CTS, RFC3962_AES_128_CBC_CTS_CIPHER, RFC3962_AES_PLAIN_DATA, RFC3962_AES_VECTOR_SIZE },
+ { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_VECTOR_SIZE },
+ { 1, NIST_AES_256_XTS_KEY, CC_AES_256_BIT_KEY_SIZE, NIST_AES_256_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_256_XTS_CIPHER, NIST_AES_256_XTS_PLAIN, NIST_AES_256_XTS_VECTOR_SIZE },
+#if (CC_SUPPORT_SHA > 256)
+ { 1, NIST_AES_512_XTS_KEY, 2*CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_VECTOR_SIZE },
+ { 1, NIST_AES_512_XTS_KEY, 2*CC_AES_256_BIT_KEY_SIZE, NIST_AES_512_XTS_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_XTS, NIST_AES_512_XTS_CIPHER, NIST_AES_512_XTS_PLAIN, NIST_AES_512_XTS_VECTOR_SIZE },
+#endif
+ /* DES */
+ { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_ECB3_CIPHER, NIST_TDES_VECTOR_SIZE },
+ { 0, NIST_TDES_ECB3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_ECB_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_ECB, NIST_TDES_ECB3_CIPHER, NIST_TDES_ECB3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
+ { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE },
+ { 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
+};
+#define FIPS_CIPHER_NUM_OF_TESTS (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData))
+
+static const FipsCmacData FipsCmacDataTable[] = {
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_128_CMAC_KEY, AES_128_BIT_KEY_SIZE, NIST_AES_128_CMAC_PLAIN_DATA, NIST_AES_128_CMAC_VECTOR_SIZE, NIST_AES_128_CMAC_MAC, NIST_AES_128_CMAC_OUTPUT_SIZE },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE },
+};
+#define FIPS_CMAC_NUM_OF_TESTS (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData))
+
+static const FipsHashData FipsHashDataTable[] = {
+ { DRV_HASH_SHA1, NIST_SHA_1_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_1_MD },
+ { DRV_HASH_SHA256, NIST_SHA_256_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_256_MD },
+#if (CC_SUPPORT_SHA > 256)
+// { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD },
+#endif
+};
+#define FIPS_HASH_NUM_OF_TESTS (sizeof(FipsHashDataTable) / sizeof(FipsHashData))
+
+static const FipsHmacData FipsHmacDataTable[] = {
+ { DRV_HASH_SHA1, NIST_HMAC_SHA1_KEY, NIST_HMAC_SHA1_KEY_SIZE, NIST_HMAC_SHA1_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA1_MD },
+ { DRV_HASH_SHA256, NIST_HMAC_SHA256_KEY, NIST_HMAC_SHA256_KEY_SIZE, NIST_HMAC_SHA256_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA256_MD },
+#if (CC_SUPPORT_SHA > 256)
+// { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD },
+#endif
+};
+#define FIPS_HMAC_NUM_OF_TESTS (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData))
+
+static const FipsCcmData FipsCcmDataTable[] = {
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_128_KEY, NIST_AESCCM_128_BIT_KEY_SIZE, NIST_AESCCM_128_NONCE, NIST_AESCCM_128_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_128_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_128_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_128_MAC },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_192_KEY, NIST_AESCCM_192_BIT_KEY_SIZE, NIST_AESCCM_192_NONCE, NIST_AESCCM_192_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_192_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_192_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_192_MAC },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
+};
+#define FIPS_CCM_NUM_OF_TESTS (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData))
+
+static const FipsGcmData FipsGcmDataTable[] = {
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_128_KEY, NIST_AESGCM_128_BIT_KEY_SIZE, NIST_AESGCM_128_IV, NIST_AESGCM_128_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_128_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_128_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_128_MAC },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_192_KEY, NIST_AESGCM_192_BIT_KEY_SIZE, NIST_AESGCM_192_IV, NIST_AESGCM_192_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_192_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_192_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_192_MAC },
+ { DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
+ { DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
+};
+#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
+
+
+static inline ssi_fips_error_t
+FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
+{
+ switch (mode)
+ {
+ case DRV_CIPHER_ECB:
+ return is_aes ? CC_REE_FIPS_ERROR_AES_ECB_PUT : CC_REE_FIPS_ERROR_DES_ECB_PUT ;
+ case DRV_CIPHER_CBC:
+ return is_aes ? CC_REE_FIPS_ERROR_AES_CBC_PUT : CC_REE_FIPS_ERROR_DES_CBC_PUT ;
+ case DRV_CIPHER_OFB:
+ return CC_REE_FIPS_ERROR_AES_OFB_PUT;
+ case DRV_CIPHER_CTR:
+ return CC_REE_FIPS_ERROR_AES_CTR_PUT;
+ case DRV_CIPHER_CBC_CTS:
+ return CC_REE_FIPS_ERROR_AES_CBC_CTS_PUT;
+ case DRV_CIPHER_XTS:
+ return CC_REE_FIPS_ERROR_AES_XTS_PUT;
+ default:
+ return CC_REE_FIPS_ERROR_GENERAL;
+ }
+
+ return CC_REE_FIPS_ERROR_GENERAL;
+}
+
+
+static inline int
+ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
+ bool is_aes,
+ int cipher_mode,
+ int direction,
+ dma_addr_t key_dma_addr,
+ size_t key_len,
+ dma_addr_t iv_dma_addr,
+ size_t iv_len,
+ dma_addr_t din_dma_addr,
+ dma_addr_t dout_dma_addr,
+ size_t data_size)
+{
+ /* max number of descriptors used for the flow */
+ #define FIPS_CIPHER_MAX_SEQ_LEN 6
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_CIPHER_MAX_SEQ_LEN];
+ int idx = 0;
+ int s_flow_mode = is_aes ? S_DIN_to_AES : S_DIN_to_DES;
+
+ /* create setup descriptors */
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Load cipher state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ iv_dma_addr, iv_len, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
+ if ((cipher_mode == DRV_CIPHER_CTR) ||
+ (cipher_mode == DRV_CIPHER_OFB) ) {
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ } else {
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ }
+ idx++;
+ /*FALLTHROUGH*/
+ case DRV_CIPHER_ECB:
+ /* Load key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+ if (is_aes) {
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr,
+ ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len),
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
+ } else {/*des*/
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr, key_len,
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_DES(&desc[idx], key_len);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+ break;
+ case DRV_CIPHER_XTS:
+ /* Load AES key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr, key_len/2, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* load XEX key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (key_dma_addr+key_len/2), key_len/2, NS_BIT);
+ HW_DESC_SET_XEX_DATA_UNIT_SIZE(&desc[idx], data_size);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_XEX_KEY);
+ idx++;
+
+ /* Set state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], cipher_mode);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], direction);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len/2);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], s_flow_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+ idx++;
+ break;
+ default:
+ FIPS_LOG("Unsupported cipher mode (%d)\n", cipher_mode);
+ BUG();
+ }
+
+ /* create data descriptor */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, data_size, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], dout_dma_addr, data_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], is_aes ? DIN_AES_DOUT : DIN_DES_DOUT);
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_CIPHER_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ // send_request returns error just in some corner cases which should not appear in this flow.
+ return rc;
+}
+
+
+ssi_fips_error_t
+ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_cipher_ctx *virt_ctx = (struct fips_cipher_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers for iv, key, din, dout */
+ dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, iv);
+ dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, key);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, din);
+ dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_cipher_ctx, dout);
+
+ for (i = 0; i < FIPS_CIPHER_NUM_OF_TESTS; ++i)
+ {
+ FipsCipherData *cipherData = (FipsCipherData*)&FipsCipherDataTable[i];
+ int rc = 0;
+ size_t iv_size = cipherData->isAes ? NIST_AES_IV_SIZE : NIST_TDES_IV_SIZE ;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_cipher_ctx));
+
+ /* copy into the allocated buffer */
+ memcpy(virt_ctx->iv, cipherData->iv, iv_size);
+ memcpy(virt_ctx->key, cipherData->key, cipherData->keySize);
+ memcpy(virt_ctx->din, cipherData->dataIn, cipherData->dataInSize);
+
+ FIPS_DBG("ssi_cipher_fips_run_test - (i = %d) \n", i);
+ rc = ssi_cipher_fips_run_test(drvdata,
+ cipherData->isAes,
+ cipherData->oprMode,
+ cipherData->direction,
+ key_dma_addr,
+ cipherData->keySize,
+ iv_dma_addr,
+ iv_size,
+ din_dma_addr,
+ dout_dma_addr,
+ cipherData->dataInSize);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_cipher_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
+ break;
+ }
+
+ /* compare actual dout to expected */
+ if (memcmp(virt_ctx->dout, cipherData->dataOut, cipherData->dataInSize) != 0)
+ {
+ FIPS_LOG("dout comparison error %d - oprMode=%d, isAes=%d\n", i, cipherData->oprMode, cipherData->isAes);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x (size=%d) \n", (size_t)cipherData->dataOut, (size_t)virt_ctx->dout, cipherData->dataInSize);
+ for (i = 0; i < cipherData->dataInSize; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cipherData->dataOut[i], virt_ctx->dout[i]);
+ }
+
+ error = FIPS_CipherToFipsError(cipherData->oprMode, cipherData->isAes);
+ break;
+ }
+ }
+
+ return error;
+}
+
+
+static inline int
+ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
+ dma_addr_t key_dma_addr,
+ size_t key_len,
+ dma_addr_t din_dma_addr,
+ size_t din_len,
+ dma_addr_t digest_dma_addr,
+ size_t digest_len)
+{
+ /* max number of descriptors used for the flow */
+ #define FIPS_CMAC_MAX_SEQ_LEN 4
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_CMAC_MAX_SEQ_LEN];
+ int idx = 0;
+
+ /* Setup CMAC Key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
+ ((key_len == 24) ? AES_MAX_KEY_SIZE : key_len), NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+
+ //ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ din_dma_addr,
+ din_len, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], digest_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_CMAC_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ // send_request returns error just in some corner cases which should not appear in this flow.
+ return rc;
+}
+
+ssi_fips_error_t
+ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_cmac_ctx *virt_ctx = (struct fips_cmac_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers for key, din, dout */
+ dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, key);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, din);
+ dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_cmac_ctx, mac_res);
+
+ for (i = 0; i < FIPS_CMAC_NUM_OF_TESTS; ++i)
+ {
+ FipsCmacData *cmac_data = (FipsCmacData*)&FipsCmacDataTable[i];
+ int rc = 0;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_cmac_ctx));
+
+ /* copy into the allocated buffer */
+ memcpy(virt_ctx->key, cmac_data->key, cmac_data->key_size);
+ memcpy(virt_ctx->din, cmac_data->data_in, cmac_data->data_in_size);
+
+ BUG_ON(cmac_data->direction != DRV_CRYPTO_DIRECTION_ENCRYPT);
+
+ FIPS_DBG("ssi_cmac_fips_run_test - (i = %d) \n", i);
+ rc = ssi_cmac_fips_run_test(drvdata,
+ key_dma_addr,
+ cmac_data->key_size,
+ din_dma_addr,
+ cmac_data->data_in_size,
+ mac_res_dma_addr,
+ cmac_data->mac_res_size);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_cmac_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
+ break;
+ }
+
+ /* compare actual mac result to expected */
+ if (memcmp(virt_ctx->mac_res, cmac_data->mac_res, cmac_data->mac_res_size) != 0)
+ {
+ FIPS_LOG("comparison error %d - digest_size=%d \n", i, cmac_data->mac_res_size);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)cmac_data->mac_res, (size_t)virt_ctx->mac_res);
+ for (i = 0; i < cmac_data->mac_res_size; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, cmac_data->mac_res[i], virt_ctx->mac_res[i]);
+ }
+
+ error = CC_REE_FIPS_ERROR_AES_CMAC_PUT;
+ break;
+ }
+ }
+
+ return error;
+}
+
+
+static inline ssi_fips_error_t
+FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
+{
+ switch (hash_mode) {
+ case DRV_HASH_SHA1:
+ return CC_REE_FIPS_ERROR_SHA1_PUT;
+ case DRV_HASH_SHA256:
+ return CC_REE_FIPS_ERROR_SHA256_PUT;
+#if (CC_SUPPORT_SHA > 256)
+ case DRV_HASH_SHA512:
+ return CC_REE_FIPS_ERROR_SHA512_PUT;
+#endif
+ default:
+ return CC_REE_FIPS_ERROR_GENERAL;
+ }
+
+ return CC_REE_FIPS_ERROR_GENERAL;
+}
+
+static inline int
+ssi_hash_fips_run_test(struct ssi_drvdata *drvdata,
+ dma_addr_t initial_digest_dma_addr,
+ dma_addr_t din_dma_addr,
+ size_t data_in_size,
+ dma_addr_t mac_res_dma_addr,
+ enum drv_hash_mode hash_mode,
+ enum drv_hash_hw_mode hw_mode,
+ int digest_size,
+ int inter_digestsize)
+{
+ /* max number of descriptors used for the flow */
+ #define FIPS_HASH_MAX_SEQ_LEN 4
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_HASH_MAX_SEQ_LEN];
+ int idx = 0;
+
+ /* Load initial digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, initial_digest_dma_addr, inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* data descriptor */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, data_in_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ if (unlikely((hash_mode == DRV_HASH_MD5) ||
+ (hash_mode == DRV_HASH_SHA384) ||
+ (hash_mode == DRV_HASH_SHA512))) {
+ HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+ } else {
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_HASH_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ return rc;
+}
+
+ssi_fips_error_t
+ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_hash_ctx *virt_ctx = (struct fips_hash_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers for initial_digest, din, mac_res */
+ dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, initial_digest);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, din);
+ dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hash_ctx, mac_res);
+
+ for (i = 0; i < FIPS_HASH_NUM_OF_TESTS; ++i)
+ {
+ FipsHashData *hash_data = (FipsHashData*)&FipsHashDataTable[i];
+ int rc = 0;
+ enum drv_hash_hw_mode hw_mode = 0;
+ int digest_size = 0;
+ int inter_digestsize = 0;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_hash_ctx));
+
+ switch (hash_data->hash_mode) {
+ case DRV_HASH_SHA1:
+ hw_mode = DRV_HASH_HW_SHA1;
+ digest_size = CC_SHA1_DIGEST_SIZE;
+ inter_digestsize = CC_SHA1_DIGEST_SIZE;
+ /* copy the initial digest into the allocated cache coherent buffer */
+ memcpy(virt_ctx->initial_digest, (void*)sha1_init, CC_SHA1_DIGEST_SIZE);
+ break;
+ case DRV_HASH_SHA256:
+ hw_mode = DRV_HASH_HW_SHA256;
+ digest_size = CC_SHA256_DIGEST_SIZE;
+ inter_digestsize = CC_SHA256_DIGEST_SIZE;
+ memcpy(virt_ctx->initial_digest, (void*)sha256_init, CC_SHA256_DIGEST_SIZE);
+ break;
+#if (CC_SUPPORT_SHA > 256)
+ case DRV_HASH_SHA512:
+ hw_mode = DRV_HASH_HW_SHA512;
+ digest_size = CC_SHA512_DIGEST_SIZE;
+ inter_digestsize = CC_SHA512_DIGEST_SIZE;
+ memcpy(virt_ctx->initial_digest, (void*)sha512_init, CC_SHA512_DIGEST_SIZE);
+ break;
+#endif
+ default:
+ error = FIPS_HashToFipsError(hash_data->hash_mode);
+ break;
+ }
+
+ /* copy the din data into the allocated buffer */
+ memcpy(virt_ctx->din, hash_data->data_in, hash_data->data_in_size);
+
+ /* run the test on HW */
+ FIPS_DBG("ssi_hash_fips_run_test - (i = %d) \n", i);
+ rc = ssi_hash_fips_run_test(drvdata,
+ initial_digest_dma_addr,
+ din_dma_addr,
+ hash_data->data_in_size,
+ mac_res_dma_addr,
+ hash_data->hash_mode,
+ hw_mode,
+ digest_size,
+ inter_digestsize);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_hash_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = FIPS_HashToFipsError(hash_data->hash_mode);
+ break;
+ }
+
+ /* compare actual mac result to expected */
+ if (memcmp(virt_ctx->mac_res, hash_data->mac_res, digest_size) != 0)
+ {
+ FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hash_data->hash_mode, digest_size);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hash_data->mac_res, (size_t)virt_ctx->mac_res);
+ for (i = 0; i < digest_size; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hash_data->mac_res[i], virt_ctx->mac_res[i]);
+ }
+
+ error = FIPS_HashToFipsError(hash_data->hash_mode);
+ break;
+ }
+ }
+
+ return error;
+}
+
+
+static inline ssi_fips_error_t
+FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
+{
+ switch (hash_mode) {
+ case DRV_HASH_SHA1:
+ return CC_REE_FIPS_ERROR_HMAC_SHA1_PUT;
+ case DRV_HASH_SHA256:
+ return CC_REE_FIPS_ERROR_HMAC_SHA256_PUT;
+#if (CC_SUPPORT_SHA > 256)
+ case DRV_HASH_SHA512:
+ return CC_REE_FIPS_ERROR_HMAC_SHA512_PUT;
+#endif
+ default:
+ return CC_REE_FIPS_ERROR_GENERAL;
+ }
+
+ return CC_REE_FIPS_ERROR_GENERAL;
+}
+
+static inline int
+ssi_hmac_fips_run_test(struct ssi_drvdata *drvdata,
+ dma_addr_t initial_digest_dma_addr,
+ dma_addr_t key_dma_addr,
+ size_t key_size,
+ dma_addr_t din_dma_addr,
+ size_t data_in_size,
+ dma_addr_t mac_res_dma_addr,
+ enum drv_hash_mode hash_mode,
+ enum drv_hash_hw_mode hw_mode,
+ size_t digest_size,
+ size_t inter_digestsize,
+ size_t block_size,
+ dma_addr_t k0_dma_addr,
+ dma_addr_t tmp_digest_dma_addr,
+ dma_addr_t digest_bytes_len_dma_addr)
+{
+ /* The implemented flow is not the same as the one implemented in ssi_hash.c (setkey + digest flows).
+ In this flow, there is no need to store and reload some of the intermidiate results. */
+
+ /* max number of descriptors used for the flow */
+ #define FIPS_HMAC_MAX_SEQ_LEN 12
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_HMAC_MAX_SEQ_LEN];
+ int idx = 0;
+ int i;
+ /* calc the hash opad first and ipad only afterwards (unlike the flow in ssi_hash.c) */
+ unsigned int hmacPadConst[2] = { HMAC_OPAD_CONST, HMAC_IPAD_CONST };
+
+ // assume (key_size <= block_size)
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr, key_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, key_size, NS_BIT, 0);
+ idx++;
+
+ // if needed, append Key with zeros to create K0
+ if ((block_size - key_size) != 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, (block_size - key_size));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (k0_dma_addr + key_size), (block_size - key_size),
+ NS_BIT, 0);
+ idx++;
+ }
+
+ BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, 0);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ return rc;
+ }
+ idx = 0;
+
+ /* calc derived HMAC key */
+ for (i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, initial_digest_dma_addr, inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+
+ /* Load the hash current length*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare opad/ipad key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ k0_dma_addr,
+ block_size, NS_BIT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx],hw_mode);
+ HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ if (i == 0) {
+ /* First iteration - calc H(K0^opad) into tmp_digest_dma_addr */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ tmp_digest_dma_addr,
+ inter_digestsize,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ // is this needed?? or continue with current descriptors??
+ BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, 0);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ return rc;
+ }
+ idx = 0;
+ }
+ }
+
+ /* data descriptor */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ din_dma_addr, data_in_size,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* HW last hash block padding (aka. "DO_PAD") */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+ idx++;
+
+ /* store the hash digest result in the context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], k0_dma_addr, digest_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ if (unlikely((hash_mode == DRV_HASH_MD5) ||
+ (hash_mode == DRV_HASH_SHA384) ||
+ (hash_mode == DRV_HASH_SHA512))) {
+ HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+ } else {
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* at this point:
+ tmp_digest = H(o_key_pad)
+ k0 = H(i_key_pad || m)
+ */
+
+ /* Loading hash opad xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, tmp_digest_dma_addr, inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, k0_dma_addr, digest_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, digest_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ if (unlikely((hash_mode == DRV_HASH_MD5) ||
+ (hash_mode == DRV_HASH_SHA384) ||
+ (hash_mode == DRV_HASH_SHA512))) {
+ HW_DESC_SET_BYTES_SWAP(&desc[idx], 1);
+ } else {
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_HMAC_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ return rc;
+}
+
+ssi_fips_error_t
+ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_hmac_ctx *virt_ctx = (struct fips_hmac_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers */
+ dma_addr_t initial_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, initial_digest);
+ dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, key);
+ dma_addr_t k0_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, k0);
+ dma_addr_t tmp_digest_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, tmp_digest);
+ dma_addr_t digest_bytes_len_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, digest_bytes_len);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, din);
+ dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_hmac_ctx, mac_res);
+
+ for (i = 0; i < FIPS_HMAC_NUM_OF_TESTS; ++i)
+ {
+ FipsHmacData *hmac_data = (FipsHmacData*)&FipsHmacDataTable[i];
+ int rc = 0;
+ enum drv_hash_hw_mode hw_mode = 0;
+ int digest_size = 0;
+ int block_size = 0;
+ int inter_digestsize = 0;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_hmac_ctx));
+
+ switch (hmac_data->hash_mode) {
+ case DRV_HASH_SHA1:
+ hw_mode = DRV_HASH_HW_SHA1;
+ digest_size = CC_SHA1_DIGEST_SIZE;
+ block_size = CC_SHA1_BLOCK_SIZE;
+ inter_digestsize = CC_SHA1_DIGEST_SIZE;
+ memcpy(virt_ctx->initial_digest, (void*)sha1_init, CC_SHA1_DIGEST_SIZE);
+ memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ break;
+ case DRV_HASH_SHA256:
+ hw_mode = DRV_HASH_HW_SHA256;
+ digest_size = CC_SHA256_DIGEST_SIZE;
+ block_size = CC_SHA256_BLOCK_SIZE;
+ inter_digestsize = CC_SHA256_DIGEST_SIZE;
+ memcpy(virt_ctx->initial_digest, (void*)sha256_init, CC_SHA256_DIGEST_SIZE);
+ memcpy(virt_ctx->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ break;
+#if (CC_SUPPORT_SHA > 256)
+ case DRV_HASH_SHA512:
+ hw_mode = DRV_HASH_HW_SHA512;
+ digest_size = CC_SHA512_DIGEST_SIZE;
+ block_size = CC_SHA512_BLOCK_SIZE;
+ inter_digestsize = CC_SHA512_DIGEST_SIZE;
+ memcpy(virt_ctx->initial_digest, (void*)sha512_init, CC_SHA512_DIGEST_SIZE);
+ memcpy(virt_ctx->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
+ break;
+#endif
+ default:
+ error = FIPS_HmacToFipsError(hmac_data->hash_mode);
+ break;
+ }
+
+ /* copy into the allocated buffer */
+ memcpy(virt_ctx->key, hmac_data->key, hmac_data->key_size);
+ memcpy(virt_ctx->din, hmac_data->data_in, hmac_data->data_in_size);
+
+ /* run the test on HW */
+ FIPS_DBG("ssi_hmac_fips_run_test - (i = %d) \n", i);
+ rc = ssi_hmac_fips_run_test(drvdata,
+ initial_digest_dma_addr,
+ key_dma_addr,
+ hmac_data->key_size,
+ din_dma_addr,
+ hmac_data->data_in_size,
+ mac_res_dma_addr,
+ hmac_data->hash_mode,
+ hw_mode,
+ digest_size,
+ inter_digestsize,
+ block_size,
+ k0_dma_addr,
+ tmp_digest_dma_addr,
+ digest_bytes_len_dma_addr);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_hmac_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = FIPS_HmacToFipsError(hmac_data->hash_mode);
+ break;
+ }
+
+ /* compare actual mac result to expected */
+ if (memcmp(virt_ctx->mac_res, hmac_data->mac_res, digest_size) != 0)
+ {
+ FIPS_LOG("comparison error %d - hash_mode=%d digest_size=%d \n", i, hmac_data->hash_mode, digest_size);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)hmac_data->mac_res, (size_t)virt_ctx->mac_res);
+ for (i = 0; i < digest_size; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, hmac_data->mac_res[i], virt_ctx->mac_res[i]);
+ }
+
+ error = FIPS_HmacToFipsError(hmac_data->hash_mode);
+ break;
+ }
+ }
+
+ return error;
+}
+
+
+static inline int
+ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
+ enum drv_crypto_direction direction,
+ dma_addr_t key_dma_addr,
+ size_t key_size,
+ dma_addr_t iv_dma_addr,
+ dma_addr_t ctr_cnt_0_dma_addr,
+ dma_addr_t b0_a0_adata_dma_addr,
+ size_t b0_a0_adata_size,
+ dma_addr_t din_dma_addr,
+ size_t din_size,
+ dma_addr_t dout_dma_addr,
+ dma_addr_t mac_res_dma_addr)
+{
+ /* max number of descriptors used for the flow */
+ #define FIPS_CCM_MAX_SEQ_LEN 10
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_CCM_MAX_SEQ_LEN];
+ unsigned int idx = 0;
+ unsigned int cipher_flow_mode;
+
+ if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_and_HASH;
+ }
+
+ /* load key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
+ ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? CC_AES_KEY_SIZE_MAX : key_size),
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load ctr state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ iv_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load MAC key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, key_dma_addr,
+ ((key_size == NIST_AESCCM_192_BIT_KEY_SIZE) ? CC_AES_KEY_SIZE_MAX : key_size),
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* load MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* prcess assoc data */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, b0_a0_adata_dma_addr, b0_a0_adata_size, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* process the cipher */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, din_dma_addr, din_size, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], dout_dma_addr, din_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], cipher_flow_mode);
+ idx++;
+
+ /* Read temporal MAC */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CBC_MAC);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT, 0);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* load AES-CTR state (for last MAC calculation)*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CTR);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctr_cnt_0_dma_addr,
+ AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* encrypt the "T" value and store MAC inplace */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], mac_res_dma_addr, NIST_AESCCM_TAG_SIZE, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_CCM_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ return rc;
+}
+
+ssi_fips_error_t
+ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_ccm_ctx *virt_ctx = (struct fips_ccm_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers */
+ dma_addr_t b0_a0_adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, b0_a0_adata);
+ dma_addr_t iv_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, iv);
+ dma_addr_t ctr_cnt_0_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, ctr_cnt_0);
+ dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, key);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, din);
+ dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, dout);
+ dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_ccm_ctx, mac_res);
+
+ for (i = 0; i < FIPS_CCM_NUM_OF_TESTS; ++i)
+ {
+ FipsCcmData *ccmData = (FipsCcmData*)&FipsCcmDataTable[i];
+ int rc = 0;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_ccm_ctx));
+
+ /* copy the nonce, key, adata, din data into the allocated buffer */
+ memcpy(virt_ctx->key, ccmData->key, ccmData->keySize);
+ memcpy(virt_ctx->din, ccmData->dataIn, ccmData->dataInSize);
+ {
+ /* build B0 -- B0, nonce, l(m) */
+ __be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE);
+ virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL;
+ memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
+ memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16));
+ /* build A0+ADATA */
+ virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 0] = (ccmData->adataSize >> 8) & 0xFF;
+ virt_ctx->b0_a0_adata[NIST_AESCCM_IV_SIZE + 1] = ccmData->adataSize & 0xFF;
+ memcpy(virt_ctx->b0_a0_adata + NIST_AESCCM_IV_SIZE + 2, ccmData->adata, ccmData->adataSize);
+ /* iv */
+ virt_ctx->iv[0] = 1; /* L' */
+ memcpy(virt_ctx->iv + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
+ virt_ctx->iv[15] = 1;
+ /* ctr_count_0 */
+ memcpy(virt_ctx->ctr_cnt_0, virt_ctx->iv, NIST_AESCCM_IV_SIZE);
+ virt_ctx->ctr_cnt_0[15] = 0;
+ }
+
+ FIPS_DBG("ssi_ccm_fips_run_test - (i = %d) \n", i);
+ rc = ssi_ccm_fips_run_test(drvdata,
+ ccmData->direction,
+ key_dma_addr,
+ ccmData->keySize,
+ iv_dma_addr,
+ ctr_cnt_0_dma_addr,
+ b0_a0_adata_dma_addr,
+ FIPS_CCM_B0_A0_ADATA_SIZE,
+ din_dma_addr,
+ ccmData->dataInSize,
+ dout_dma_addr,
+ mac_res_dma_addr);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_ccm_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = CC_REE_FIPS_ERROR_AESCCM_PUT;
+ break;
+ }
+
+ /* compare actual dout to expected */
+ if (memcmp(virt_ctx->dout, ccmData->dataOut, ccmData->dataInSize) != 0)
+ {
+ FIPS_LOG("dout comparison error %d - size=%d \n", i, ccmData->dataInSize);
+ error = CC_REE_FIPS_ERROR_AESCCM_PUT;
+ break;
+ }
+
+ /* compare actual mac result to expected */
+ if (memcmp(virt_ctx->mac_res, ccmData->macResOut, ccmData->tagSize) != 0)
+ {
+ FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, ccmData->tagSize);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)ccmData->macResOut, (size_t)virt_ctx->mac_res);
+ for (i = 0; i < ccmData->tagSize; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, ccmData->macResOut[i], virt_ctx->mac_res[i]);
+ }
+
+ error = CC_REE_FIPS_ERROR_AESCCM_PUT;
+ break;
+ }
+ }
+
+ return error;
+}
+
+
+static inline int
+ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
+ enum drv_crypto_direction direction,
+ dma_addr_t key_dma_addr,
+ size_t key_size,
+ dma_addr_t hkey_dma_addr,
+ dma_addr_t block_len_dma_addr,
+ dma_addr_t iv_inc1_dma_addr,
+ dma_addr_t iv_inc2_dma_addr,
+ dma_addr_t adata_dma_addr,
+ size_t adata_size,
+ dma_addr_t din_dma_addr,
+ size_t din_size,
+ dma_addr_t dout_dma_addr,
+ dma_addr_t mac_res_dma_addr)
+{
+ /* max number of descriptors used for the flow */
+ #define FIPS_GCM_MAX_SEQ_LEN 15
+
+ int rc;
+ struct ssi_crypto_req ssi_req = {0};
+ HwDesc_s desc[FIPS_GCM_MAX_SEQ_LEN];
+ unsigned int idx = 0;
+ unsigned int cipher_flow_mode;
+
+ if (direction == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ cipher_flow_mode = AES_and_HASH;
+ } else { /* Encrypt */
+ cipher_flow_mode = AES_to_HASH_and_DOUT;
+ }
+
+///////////////////////////////// 1 ////////////////////////////////////
+// ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+///////////////////////////////// 1 ////////////////////////////////////
+
+ /* load key to AES*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx],
+ DMA_DLLI, key_dma_addr, key_size,
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* process one zero block to generate hkey */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ hkey_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Load GHASH subkey */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ hkey_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Configure Hash Engine to work with GHASH.
+ Since it was not possible to extend HASH submodes to add GHASH,
+ The following command is necessary in order to select GHASH (according to HW designers)*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], 1); //1=AES_SK RKEK
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x0, AES_BLOCK_SIZE);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+
+
+///////////////////////////////// 2 ////////////////////////////////////
+ /* prcess(ghash) assoc data */
+// if (req->assoclen > 0)
+// ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
+///////////////////////////////// 2 ////////////////////////////////////
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ adata_dma_addr, adata_size,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+
+///////////////////////////////// 3 ////////////////////////////////////
+// ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+///////////////////////////////// 3 ////////////////////////////////////
+
+ /* load key to AES*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ key_dma_addr, key_size,
+ NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 2*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ iv_inc2_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+
+///////////////////////////////// 4 ////////////////////////////////////
+ /* process(gctr+ghash) */
+// if (req_ctx->cryptlen != 0)
+// ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
+///////////////////////////////// 4 ////////////////////////////////////
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ din_dma_addr, din_size,
+ NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ dout_dma_addr, din_size,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], cipher_flow_mode);
+ idx++;
+
+
+///////////////////////////////// 5 ////////////////////////////////////
+// ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+///////////////////////////////// 5 ////////////////////////////////////
+
+ /* prcess(ghash) gcm_block_len */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ block_len_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_HASH_HW_GHASH);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ mac_res_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_AES_NOT_HASH_MODE(&desc[idx]);
+ idx++;
+
+ /* load AES/CTR initial CTR value inc by 1*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_size);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ iv_inc1_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Memory Barrier */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* process GCTR on stored GHASH and store MAC inplace */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_GCTR);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ mac_res_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ mac_res_dma_addr, AES_BLOCK_SIZE,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* perform the operation - Lock HW and push sequence */
+ BUG_ON(idx > FIPS_GCM_MAX_SEQ_LEN);
+ rc = send_request(drvdata, &ssi_req, desc, idx, false);
+
+ return rc;
+}
+
+ssi_fips_error_t
+ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
+{
+ ssi_fips_error_t error = CC_REE_FIPS_ERROR_OK;
+ size_t i;
+ struct fips_gcm_ctx *virt_ctx = (struct fips_gcm_ctx *)cpu_addr_buffer;
+
+ /* set the phisical pointers */
+ dma_addr_t adata_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, adata);
+ dma_addr_t key_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, key);
+ dma_addr_t hkey_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, hkey);
+ dma_addr_t din_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, din);
+ dma_addr_t dout_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, dout);
+ dma_addr_t mac_res_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, mac_res);
+ dma_addr_t len_block_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, len_block);
+ dma_addr_t iv_inc1_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc1);
+ dma_addr_t iv_inc2_dma_addr = dma_coherent_buffer + offsetof(struct fips_gcm_ctx, iv_inc2);
+
+ for (i = 0; i < FIPS_GCM_NUM_OF_TESTS; ++i)
+ {
+ FipsGcmData *gcmData = (FipsGcmData*)&FipsGcmDataTable[i];
+ int rc = 0;
+
+ memset(cpu_addr_buffer, 0, sizeof(struct fips_gcm_ctx));
+
+ /* copy the key, adata, din data - into the allocated buffer */
+ memcpy(virt_ctx->key, gcmData->key, gcmData->keySize);
+ memcpy(virt_ctx->adata, gcmData->adata, gcmData->adataSize);
+ memcpy(virt_ctx->din, gcmData->dataIn, gcmData->dataInSize);
+
+ /* len_block */
+ {
+ __be64 len_bits;
+ len_bits = cpu_to_be64(gcmData->adataSize * 8);
+ memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits));
+ len_bits = cpu_to_be64(gcmData->dataInSize * 8);
+ memcpy(virt_ctx->len_block + 8, &len_bits, sizeof(len_bits));
+ }
+ /* iv_inc1, iv_inc2 */
+ {
+ __be32 counter = cpu_to_be32(1);
+ memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE);
+ memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
+ counter = cpu_to_be32(2);
+ memcpy(virt_ctx->iv_inc2, gcmData->iv, NIST_AESGCM_IV_SIZE);
+ memcpy(virt_ctx->iv_inc2 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
+ }
+
+ FIPS_DBG("ssi_gcm_fips_run_test - (i = %d) \n", i);
+ rc = ssi_gcm_fips_run_test(drvdata,
+ gcmData->direction,
+ key_dma_addr,
+ gcmData->keySize,
+ hkey_dma_addr,
+ len_block_dma_addr,
+ iv_inc1_dma_addr,
+ iv_inc2_dma_addr,
+ adata_dma_addr,
+ gcmData->adataSize,
+ din_dma_addr,
+ gcmData->dataInSize,
+ dout_dma_addr,
+ mac_res_dma_addr);
+ if (rc != 0)
+ {
+ FIPS_LOG("ssi_gcm_fips_run_test %d returned error - rc = %d \n", i, rc);
+ error = CC_REE_FIPS_ERROR_AESGCM_PUT;
+ break;
+ }
+
+ if (gcmData->direction == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ /* compare actual dout to expected */
+ if (memcmp(virt_ctx->dout, gcmData->dataOut, gcmData->dataInSize) != 0)
+ {
+ FIPS_LOG("dout comparison error %d - size=%d \n", i, gcmData->dataInSize);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->dataOut, (size_t)virt_ctx->dout);
+ for (i = 0; i < gcmData->dataInSize; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->dataOut[i], virt_ctx->dout[i]);
+ }
+
+ error = CC_REE_FIPS_ERROR_AESGCM_PUT;
+ break;
+ }
+ }
+
+ /* compare actual mac result to expected */
+ if (memcmp(virt_ctx->mac_res, gcmData->macResOut, gcmData->tagSize) != 0)
+ {
+ FIPS_LOG("mac_res comparison error %d - mac_size=%d \n", i, gcmData->tagSize);
+ FIPS_LOG(" i expected received \n");
+ FIPS_LOG(" i 0x%08x 0x%08x \n", (size_t)gcmData->macResOut, (size_t)virt_ctx->mac_res);
+ for (i = 0; i < gcmData->tagSize; ++i)
+ {
+ FIPS_LOG(" %d 0x%02x 0x%02x \n", i, gcmData->macResOut[i], virt_ctx->mac_res[i]);
+ }
+
+ error = CC_REE_FIPS_ERROR_AESGCM_PUT;
+ break;
+ }
+ }
+ return error;
+}
+
+
+size_t ssi_fips_max_mem_alloc_size(void)
+{
+ FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx));
+ FIPS_DBG("sizeof(struct fips_cmac_ctx) %d \n", sizeof(struct fips_cmac_ctx));
+ FIPS_DBG("sizeof(struct fips_hash_ctx) %d \n", sizeof(struct fips_hash_ctx));
+ FIPS_DBG("sizeof(struct fips_hmac_ctx) %d \n", sizeof(struct fips_hmac_ctx));
+ FIPS_DBG("sizeof(struct fips_ccm_ctx) %d \n", sizeof(struct fips_ccm_ctx));
+ FIPS_DBG("sizeof(struct fips_gcm_ctx) %d \n", sizeof(struct fips_gcm_ctx));
+
+ return sizeof(fips_ctx);
+}
+
diff --git a/drivers/staging/ccree/ssi_fips_local.c b/drivers/staging/ccree/ssi_fips_local.c
new file mode 100644
index 000000000000..51b535a2a09a
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips_local.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**************************************************************
+This file defines the driver FIPS internal function, used by the driver itself.
+***************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <crypto/des.h>
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "cc_hal.h"
+
+
+#define FIPS_POWER_UP_TEST_CIPHER 1
+#define FIPS_POWER_UP_TEST_CMAC 1
+#define FIPS_POWER_UP_TEST_HASH 1
+#define FIPS_POWER_UP_TEST_HMAC 1
+#define FIPS_POWER_UP_TEST_CCM 1
+#define FIPS_POWER_UP_TEST_GCM 1
+
+static bool ssi_fips_support = 1;
+module_param(ssi_fips_support, bool, 0644);
+MODULE_PARM_DESC(ssi_fips_support, "FIPS supported flag: 0 - off , 1 - on (default)");
+
+static void fips_dsr(unsigned long devarg);
+
+struct ssi_fips_handle {
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work fipswork;
+#else
+ struct tasklet_struct fipstask;
+#endif
+};
+
+
+extern int ssi_fips_get_state(ssi_fips_state_t *p_state);
+extern int ssi_fips_get_error(ssi_fips_error_t *p_err);
+extern int ssi_fips_ext_set_state(ssi_fips_state_t state);
+extern int ssi_fips_ext_set_error(ssi_fips_error_t err);
+
+/* FIPS power-up tests */
+extern ssi_fips_error_t ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern ssi_fips_error_t ssi_cmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern ssi_fips_error_t ssi_hash_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern ssi_fips_error_t ssi_hmac_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern ssi_fips_error_t ssi_ccm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern ssi_fips_error_t ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
+extern size_t ssi_fips_max_mem_alloc_size(void);
+
+
+/* The function called once at driver entry point to check whether TEE FIPS error occured.*/
+static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
+{
+ uint32_t regVal;
+ void __iomem *cc_base = drvdata->cc_base;
+
+ regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+ return CC_REE_FIPS_ERROR_OK;
+ }
+ return CC_REE_FIPS_ERROR_FROM_TEE;
+}
+
+
+/*
+ This function should push the FIPS REE library status towards the TEE library.
+ By writing the error state to HOST_GPR0 register. The function is called from .
+ driver entry point so no need to protect by mutex.
+*/
+static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, ssi_fips_error_t err)
+{
+ void __iomem *cc_base = drvdata->cc_base;
+ if (err == CC_REE_FIPS_ERROR_OK) {
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS|CC_FIPS_SYNC_MODULE_OK));
+ } else {
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS|CC_FIPS_SYNC_MODULE_ERROR));
+ }
+}
+
+
+
+void ssi_fips_fini(struct ssi_drvdata *drvdata)
+{
+ struct ssi_fips_handle *fips_h = drvdata->fips_handle;
+
+ if (fips_h == NULL)
+ return; /* Not allocated */
+
+#ifdef COMP_IN_WQ
+ if (fips_h->workq != NULL) {
+ flush_workqueue(fips_h->workq);
+ destroy_workqueue(fips_h->workq);
+ }
+#else
+ /* Kill tasklet */
+ tasklet_kill(&fips_h->fipstask);
+#endif
+ memset(fips_h, 0, sizeof(struct ssi_fips_handle));
+ kfree(fips_h);
+ drvdata->fips_handle = NULL;
+}
+
+void fips_handler(struct ssi_drvdata *drvdata)
+{
+ struct ssi_fips_handle *fips_handle_ptr =
+ drvdata->fips_handle;
+#ifdef COMP_IN_WQ
+ queue_delayed_work(fips_handle_ptr->workq, &fips_handle_ptr->fipswork, 0);
+#else
+ tasklet_schedule(&fips_handle_ptr->fipstask);
+#endif
+}
+
+
+
+#ifdef COMP_IN_WQ
+static void fips_wq_handler(struct work_struct *work)
+{
+ struct ssi_drvdata *drvdata =
+ container_of(work, struct ssi_drvdata, fipswork.work);
+
+ fips_dsr((unsigned long)drvdata);
+}
+#endif
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void fips_dsr(unsigned long devarg)
+{
+ struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ void __iomem *cc_base = drvdata->cc_base;
+ uint32_t irq;
+ uint32_t teeFipsError = 0;
+
+ irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
+
+ if (irq & SSI_GPR0_IRQ_MASK) {
+ teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+ ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
+ }
+ }
+
+ /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
+ CC_HAL_READ_REGISTER(
+ CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+}
+
+
+ssi_fips_error_t cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
+{
+ ssi_fips_error_t fips_error = CC_REE_FIPS_ERROR_OK;
+ void * cpu_addr_buffer = NULL;
+ dma_addr_t dma_handle;
+ size_t alloc_buff_size = ssi_fips_max_mem_alloc_size();
+ struct device *dev = &drvdata->plat_dev->dev;
+
+ // allocate memory using dma_alloc_coherent - for phisical, consecutive and cache coherent buffer (memory map is not needed)
+ // the return value is the virtual address - use it to copy data into the buffer
+ // the dma_handle is the returned phy address - use it in the HW descriptor
+ FIPS_DBG("dma_alloc_coherent \n");
+ cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL);
+ if (cpu_addr_buffer == NULL) {
+ return CC_REE_FIPS_ERROR_GENERAL;
+ }
+ FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size);
+
+#if FIPS_POWER_UP_TEST_CIPHER
+ FIPS_DBG("ssi_cipher_fips_power_up_tests ...\n");
+ fips_error = ssi_cipher_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_cipher_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+#endif
+#if FIPS_POWER_UP_TEST_CMAC
+ if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
+ FIPS_DBG("ssi_cmac_fips_power_up_tests ...\n");
+ fips_error = ssi_cmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_cmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+ }
+#endif
+#if FIPS_POWER_UP_TEST_HASH
+ if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
+ FIPS_DBG("ssi_hash_fips_power_up_tests ...\n");
+ fips_error = ssi_hash_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_hash_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+ }
+#endif
+#if FIPS_POWER_UP_TEST_HMAC
+ if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
+ FIPS_DBG("ssi_hmac_fips_power_up_tests ...\n");
+ fips_error = ssi_hmac_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_hmac_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+ }
+#endif
+#if FIPS_POWER_UP_TEST_CCM
+ if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
+ FIPS_DBG("ssi_ccm_fips_power_up_tests ...\n");
+ fips_error = ssi_ccm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_ccm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+ }
+#endif
+#if FIPS_POWER_UP_TEST_GCM
+ if (likely(fips_error == CC_REE_FIPS_ERROR_OK)) {
+ FIPS_DBG("ssi_gcm_fips_power_up_tests ...\n");
+ fips_error = ssi_gcm_fips_power_up_tests(drvdata, cpu_addr_buffer, dma_handle);
+ FIPS_DBG("ssi_gcm_fips_power_up_tests - done. (fips_error = %d) \n", fips_error);
+ }
+#endif
+ /* deallocate the buffer when all tests are done... */
+ FIPS_DBG("dma_free_coherent \n");
+ dma_free_coherent(dev, alloc_buff_size, cpu_addr_buffer, dma_handle);
+
+ return fips_error;
+}
+
+
+
+/* The function checks if FIPS supported and FIPS error exists.*
+* It should be used in every driver API.*/
+int ssi_fips_check_fips_error(void)
+{
+ ssi_fips_state_t fips_state;
+
+ if (ssi_fips_get_state(&fips_state) != 0) {
+ FIPS_LOG("ssi_fips_get_state FAILED, returning.. \n");
+ return -ENOEXEC;
+ }
+ if (fips_state == CC_FIPS_STATE_ERROR) {
+ FIPS_LOG("ssi_fips_get_state: fips_state is %d, returning.. \n", fips_state);
+ return -ENOEXEC;
+ }
+ return 0;
+}
+
+
+/* The function sets the REE FIPS state.*
+* It should be used while driver is being loaded .*/
+int ssi_fips_set_state(ssi_fips_state_t state)
+{
+ return ssi_fips_ext_set_state(state);
+}
+
+/* The function sets the REE FIPS error, and pushes the error to TEE library. *
+* It should be used when any of the KAT tests fails .*/
+int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err)
+{
+ int rc = 0;
+ ssi_fips_error_t current_err;
+
+ FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
+
+ // setting no error is not allowed
+ if (err == CC_REE_FIPS_ERROR_OK) {
+ return -ENOEXEC;
+ }
+ // If error exists, do not set new error
+ if (ssi_fips_get_error(&current_err) != 0) {
+ return -ENOEXEC;
+ }
+ if (current_err != CC_REE_FIPS_ERROR_OK) {
+ return -ENOEXEC;
+ }
+ // set REE internal error and state
+ rc = ssi_fips_ext_set_error(err);
+ if (rc != 0) {
+ return -ENOEXEC;
+ }
+ rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
+ if (rc != 0) {
+ return -ENOEXEC;
+ }
+
+ // push error towards TEE libraray, if it's not TEE error
+ if (err != CC_REE_FIPS_ERROR_FROM_TEE) {
+ ssi_fips_update_tee_upon_ree_status(p_drvdata, err);
+ }
+ return rc;
+}
+
+
+/* The function called once at driver entry point .*/
+int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ ssi_fips_error_t rc = CC_REE_FIPS_ERROR_OK;
+ struct ssi_fips_handle *fips_h;
+
+ FIPS_DBG("CC FIPS code .. (fips=%d) \n", ssi_fips_support);
+
+ fips_h = kzalloc(sizeof(struct ssi_fips_handle),GFP_KERNEL);
+ if (fips_h == NULL) {
+ ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
+ return -ENOMEM;
+ }
+
+ p_drvdata->fips_handle = fips_h;
+
+#ifdef COMP_IN_WQ
+ SSI_LOG_DEBUG("Initializing fips workqueue\n");
+ fips_h->workq = create_singlethread_workqueue("arm_cc7x_fips_wq");
+ if (unlikely(fips_h->workq == NULL)) {
+ SSI_LOG_ERR("Failed creating fips work queue\n");
+ ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
+ rc = -ENOMEM;
+ goto ssi_fips_init_err;
+ }
+ INIT_DELAYED_WORK(&fips_h->fipswork, fips_wq_handler);
+#else
+ SSI_LOG_DEBUG("Initializing fips tasklet\n");
+ tasklet_init(&fips_h->fipstask, fips_dsr, (unsigned long)p_drvdata);
+#endif
+
+ /* init fips driver data */
+ rc = ssi_fips_set_state((ssi_fips_support == 0)? CC_FIPS_STATE_NOT_SUPPORTED : CC_FIPS_STATE_SUPPORTED);
+ if (unlikely(rc != 0)) {
+ ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
+ rc = -EAGAIN;
+ goto ssi_fips_init_err;
+ }
+
+ /* Run power up tests (before registration and operating the HW engines) */
+ FIPS_DBG("ssi_fips_get_tee_error \n");
+ rc = ssi_fips_get_tee_error(p_drvdata);
+ if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
+ ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
+ rc = -EAGAIN;
+ goto ssi_fips_init_err;
+ }
+
+ FIPS_DBG("cc_fips_run_power_up_tests \n");
+ rc = cc_fips_run_power_up_tests(p_drvdata);
+ if (unlikely(rc != CC_REE_FIPS_ERROR_OK)) {
+ ssi_fips_set_error(p_drvdata, rc);
+ rc = -EAGAIN;
+ goto ssi_fips_init_err;
+ }
+ FIPS_LOG("cc_fips_run_power_up_tests - done ... fips_error = %d \n", rc);
+
+ /* when all tests passed, update TEE with fips OK status after power up tests */
+ ssi_fips_update_tee_upon_ree_status(p_drvdata, CC_REE_FIPS_ERROR_OK);
+
+ if (unlikely(rc != 0)) {
+ rc = -EAGAIN;
+ ssi_fips_set_error(p_drvdata, CC_REE_FIPS_ERROR_GENERAL);
+ goto ssi_fips_init_err;
+ }
+
+ return 0;
+
+ssi_fips_init_err:
+ ssi_fips_fini(p_drvdata);
+ return rc;
+}
+
diff --git a/drivers/staging/ccree/ssi_fips_local.h b/drivers/staging/ccree/ssi_fips_local.h
new file mode 100644
index 000000000000..65997c15a20e
--- /dev/null
+++ b/drivers/staging/ccree/ssi_fips_local.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SSI_FIPS_LOCAL_H__
+#define __SSI_FIPS_LOCAL_H__
+
+
+#ifdef CONFIG_CCX7REE_FIPS_SUPPORT
+
+#include "ssi_fips.h"
+struct ssi_drvdata;
+
+// IG - how to make 1 file for TEE and REE
+typedef enum CC_FipsSyncStatus{
+ CC_FIPS_SYNC_MODULE_OK = 0x0,
+ CC_FIPS_SYNC_MODULE_ERROR = 0x1,
+ CC_FIPS_SYNC_REE_STATUS = 0x4,
+ CC_FIPS_SYNC_TEE_STATUS = 0x8,
+ CC_FIPS_SYNC_STATUS_RESERVE32B = INT32_MAX
+}CCFipsSyncStatus_t;
+
+
+#define CHECK_AND_RETURN_UPON_FIPS_ERROR() {\
+ if (ssi_fips_check_fips_error() != 0) {\
+ return -ENOEXEC;\
+ }\
+}
+#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\
+ if (ssi_fips_check_fips_error() != 0) {\
+ return;\
+ }\
+}
+#define SSI_FIPS_INIT(p_drvData) (ssi_fips_init(p_drvData))
+#define SSI_FIPS_FINI(p_drvData) (ssi_fips_fini(p_drvData))
+
+#define FIPS_LOG(...) SSI_LOG(KERN_INFO, __VA_ARGS__)
+#define FIPS_DBG(...) //SSI_LOG(KERN_INFO, __VA_ARGS__)
+
+/* FIPS functions */
+int ssi_fips_init(struct ssi_drvdata *p_drvdata);
+void ssi_fips_fini(struct ssi_drvdata *drvdata);
+int ssi_fips_check_fips_error(void);
+int ssi_fips_set_error(struct ssi_drvdata *p_drvdata, ssi_fips_error_t err);
+void fips_handler(struct ssi_drvdata *drvdata);
+
+#else /* CONFIG_CC7XXREE_FIPS_SUPPORT */
+
+#define CHECK_AND_RETURN_UPON_FIPS_ERROR()
+#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR()
+
+static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+{
+ return 0;
+}
+
+static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
+
+void fips_handler(struct ssi_drvdata *drvdata);
+
+#endif /* CONFIG_CC7XXREE_FIPS_SUPPORT */
+
+
+#endif /*__SSI_FIPS_LOCAL_H__*/
+
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
new file mode 100644
index 000000000000..f99d4219b01e
--- /dev/null
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -0,0 +1,2758 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+#include <crypto/internal/hash.h>
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "ssi_request_mgr.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_sysfs.h"
+#include "ssi_hash.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_fips_local.h"
+
+#define SSI_MAX_AHASH_SEQ_LEN 12
+#define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE)
+
+struct ssi_hash_handle {
+ ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ struct list_head hash_list;
+ struct completion init_comp;
+};
+
+static const uint32_t digest_len_init[] = {
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
+static const uint32_t md5_init[] = {
+ SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const uint32_t sha1_init[] = {
+ SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
+static const uint32_t sha224_init[] = {
+ SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
+static const uint32_t sha256_init[] = {
+ SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
+#if (DX_DEV_SHA_MAX > 256)
+static const uint32_t digest_len_sha512_init[] = {
+ 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
+static const uint64_t sha384_init[] = {
+ SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
+static const uint64_t sha512_init[] = {
+ SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+#endif
+
+static void ssi_hash_create_xcbc_setup(
+ struct ahash_request *areq,
+ HwDesc_s desc[],
+ unsigned int *seq_size);
+
+static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
+ HwDesc_s desc[],
+ unsigned int *seq_size);
+
+struct ssi_hash_alg {
+ struct list_head entry;
+ bool synchronize;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct ssi_drvdata *drvdata;
+ union {
+ struct ahash_alg ahash_alg;
+ struct shash_alg shash_alg;
+ };
+};
+
+
+struct hash_key_req_ctx {
+ uint32_t keylen;
+ dma_addr_t key_dma_addr;
+};
+
+/* hash per-session context */
+struct ssi_hash_ctx {
+ struct ssi_drvdata *drvdata;
+ /* holds the origin digest; the digest after "setkey" if HMAC,*
+ the initial digest if HASH. */
+ uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
+ uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
+ dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
+ dma_addr_t digest_buff_dma_addr;
+ /* use for hmac with key large then mode block size */
+ struct hash_key_req_ctx key_params;
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct completion setkey_comp;
+ bool is_hmac;
+};
+
+static const struct crypto_type crypto_shash_type;
+
+static void ssi_hash_create_data_desc(
+ struct ahash_req_ctx *areq_ctx,
+ struct ssi_hash_ctx *ctx,
+ unsigned int flow_mode,HwDesc_s desc[],
+ bool is_not_last_data,
+ unsigned int *seq_size);
+
+static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc)
+{
+ if (unlikely((mode == DRV_HASH_MD5) ||
+ (mode == DRV_HASH_SHA384) ||
+ (mode == DRV_HASH_SHA512))) {
+ HW_DESC_SET_BYTES_SWAP(desc, 1);
+ } else {
+ HW_DESC_SET_CIPHER_CONFIG0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
+ }
+}
+
+static int ssi_hash_map_result(struct device *dev,
+ struct ahash_req_ctx *state,
+ unsigned int digestsize)
+{
+ state->digest_result_dma_addr =
+ dma_map_single(dev, (void *)state->digest_result_buff,
+ digestsize,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
+ SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr,
+ digestsize);
+ SSI_LOG_DEBUG("Mapped digest result buffer %u B "
+ "at va=%pK to dma=0x%llX\n",
+ digestsize, state->digest_result_buff,
+ (unsigned long long)state->digest_result_dma_addr);
+
+ return 0;
+}
+
+static int ssi_hash_map_request(struct device *dev,
+ struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx)
+{
+ bool is_hmac = ctx->is_hmac;
+ ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
+ ctx->drvdata, ctx->hash_mode);
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc;
+ int rc = -ENOMEM;
+
+ state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
+ if (!state->buff0) {
+ SSI_LOG_ERR("Allocating buff0 in context failed\n");
+ goto fail0;
+ }
+ state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA);
+ if (!state->buff1) {
+ SSI_LOG_ERR("Allocating buff1 in context failed\n");
+ goto fail_buff0;
+ }
+ state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE ,GFP_KERNEL|GFP_DMA);
+ if (!state->digest_result_buff) {
+ SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
+ goto fail_buff1;
+ }
+ state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
+ if (!state->digest_buff) {
+ SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
+ goto fail_digest_result_buff;
+ }
+
+ SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL|GFP_DMA);
+ if (!state->digest_bytes_len) {
+ SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
+ goto fail1;
+ }
+ SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
+ } else {
+ state->digest_bytes_len = NULL;
+ }
+
+ state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA);
+ if (!state->opad_digest_buff) {
+ SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
+ goto fail2;
+ }
+ SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
+
+ state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
+ SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
+ goto fail3;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
+ ctx->inter_digestsize);
+ SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n",
+ ctx->inter_digestsize, state->digest_buff,
+ (unsigned long long)state->digest_buff_dma_addr);
+
+ if (is_hmac) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
+ dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize);
+ if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) {
+ memset(state->digest_buff, 0, ctx->inter_digestsize);
+ } else { /*sha*/
+ memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
+#if (DX_DEV_SHA_MAX > 256)
+ if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) {
+ memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
+ } else {
+ memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+ }
+#else
+ memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
+#endif
+ }
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
+ dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr,
+ ctx->inter_digestsize);
+
+ if (ctx->hash_mode != DRV_HASH_NULL) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize);
+ }
+ } else { /*hash*/
+ /* Copy the initial digests if hash flow. The SRAM contains the
+ initial digests in the expected order for all SHA* */
+ HW_DESC_INIT(&desc);
+ HW_DESC_SET_DIN_SRAM(&desc, larval_digest_addr, ctx->inter_digestsize);
+ HW_DESC_SET_DOUT_DLLI(&desc, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc, BYPASS);
+
+ rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ goto fail4;
+ }
+ }
+
+ if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
+ state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
+ SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_LEN_SIZE, state->digest_bytes_len);
+ goto fail4;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE);
+ SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n",
+ HASH_LEN_SIZE, state->digest_bytes_len,
+ (unsigned long long)state->digest_bytes_len_dma_addr);
+ } else {
+ state->digest_bytes_len_dma_addr = 0;
+ }
+
+ if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
+ state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
+ SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->opad_digest_buff);
+ goto fail5;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr,
+ ctx->inter_digestsize);
+ SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ (unsigned long long)state->opad_digest_dma_addr);
+ } else {
+ state->opad_digest_dma_addr = 0;
+ }
+ state->buff0_cnt = 0;
+ state->buff1_cnt = 0;
+ state->buff_index = 0;
+ state->mlli_params.curr_pool = NULL;
+
+ return 0;
+
+fail5:
+ if (state->digest_bytes_len_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+fail4:
+ if (state->digest_buff_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
+ dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ state->digest_buff_dma_addr = 0;
+ }
+fail3:
+ kfree(state->opad_digest_buff);
+fail2:
+ kfree(state->digest_bytes_len);
+fail1:
+ kfree(state->digest_buff);
+fail_digest_result_buff:
+ if (state->digest_result_buff != NULL) {
+ kfree(state->digest_result_buff);
+ state->digest_result_buff = NULL;
+ }
+fail_buff1:
+ if (state->buff1 != NULL) {
+ kfree(state->buff1);
+ state->buff1 = NULL;
+ }
+fail_buff0:
+ if (state->buff0 != NULL) {
+ kfree(state->buff0);
+ state->buff0 = NULL;
+ }
+fail0:
+ return rc;
+}
+
+static void ssi_hash_unmap_request(struct device *dev,
+ struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx)
+{
+ if (state->digest_buff_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr);
+ dma_unmap_single(dev, state->digest_buff_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n",
+ (unsigned long long)state->digest_buff_dma_addr);
+ state->digest_buff_dma_addr = 0;
+ }
+ if (state->digest_bytes_len_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr);
+ dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
+ HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n",
+ (unsigned long long)state->digest_bytes_len_dma_addr);
+ state->digest_bytes_len_dma_addr = 0;
+ }
+ if (state->opad_digest_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr);
+ dma_unmap_single(dev, state->opad_digest_dma_addr,
+ ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n",
+ (unsigned long long)state->opad_digest_dma_addr);
+ state->opad_digest_dma_addr = 0;
+ }
+
+ kfree(state->opad_digest_buff);
+ kfree(state->digest_bytes_len);
+ kfree(state->digest_buff);
+ kfree(state->digest_result_buff);
+ kfree(state->buff1);
+ kfree(state->buff0);
+}
+
+static void ssi_hash_unmap_result(struct device *dev,
+ struct ahash_req_ctx *state,
+ unsigned int digestsize, u8 *result)
+{
+ if (state->digest_result_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr);
+ dma_unmap_single(dev,
+ state->digest_result_dma_addr,
+ digestsize,
+ DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("unmpa digest result buffer "
+ "va (%pK) pa (%llx) len %u\n",
+ state->digest_result_buff,
+ (unsigned long long)state->digest_result_dma_addr,
+ digestsize);
+ memcpy(result,
+ state->digest_result_buff,
+ digestsize);
+ }
+ state->digest_result_dma_addr = 0;
+}
+
+static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+{
+ struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+
+ SSI_LOG_DEBUG("req=%pK\n", req);
+
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ req->base.complete(&req->base, 0);
+}
+
+static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+{
+ struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ SSI_LOG_DEBUG("req=%pK\n", req);
+
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ req->base.complete(&req->base, 0);
+}
+
+static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+{
+ struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ SSI_LOG_DEBUG("req=%pK\n", req);
+
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ req->base.complete(&req->base, 0);
+}
+
+static int ssi_hash_digest(struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx,
+ unsigned int digestsize,
+ struct scatterlist *src,
+ unsigned int nbytes, u8 *result,
+ void *async_req)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ bool is_hmac = ctx->is_hmac;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr(
+ ctx->drvdata, ctx->hash_mode);
+ int idx = 0;
+ int rc = 0;
+
+
+ SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ SSI_LOG_ERR("map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+
+ if (async_req) {
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_digest_complete;
+ ssi_req.user_arg = (void *)async_req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+ }
+
+ /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ if (is_hmac) {
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
+ } else {
+ HW_DESC_SET_DIN_SRAM(&desc[idx], larval_digest_addr, ctx->inter_digestsize);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+
+ if (is_hmac) {
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+ } else {
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ if (likely(nbytes != 0)) {
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ } else {
+ HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+ }
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* HW last hash block padding (aka. "DO_PAD") */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+ idx++;
+
+ /* store the hash digest result in the context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash opad xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
+ if (async_req) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]);
+ idx++;
+
+ if (async_req) {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ }
+ } else {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (rc != 0) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ } else {
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ }
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ }
+ return rc;
+}
+
+static int ssi_hash_update(struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx,
+ unsigned int block_size,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ void *async_req)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ uint32_t idx = 0;
+ int rc;
+
+ SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac":"hash", nbytes);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ if (nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) {
+ if (rc == 1) {
+ SSI_LOG_DEBUG(" data size not require HW update %x\n",
+ nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (async_req) {
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_update_complete;
+ ssi_req.user_arg = async_req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+ }
+
+ /* Restore hash digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+ /* Restore hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ /* store the hash digest result in context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* store current hash length in context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, async_req? 1:0);
+ if (async_req) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+ idx++;
+
+ if (async_req) {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ }
+ } else {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (rc != 0) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ } else {
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ }
+ }
+ return rc;
+}
+
+static int ssi_hash_finup(struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx,
+ unsigned int digestsize,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ u8 *result,
+ void *async_req)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ bool is_hmac = ctx->is_hmac;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ int idx = 0;
+ int rc;
+
+ SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src , nbytes, 1) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ if (async_req) {
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_complete;
+ ssi_req.user_arg = async_req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+ }
+
+ /* Restore hash digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ if (is_hmac) {
+ /* Store the hash digest result in the context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
+ ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash OPAD xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update on last digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/
+ if (async_req) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ if (async_req) {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ }
+ } else {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (rc != 0) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ } else {
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ }
+ }
+ return rc;
+}
+
+static int ssi_hash_final(struct ahash_req_ctx *state,
+ struct ssi_hash_ctx *ctx,
+ unsigned int digestsize,
+ struct scatterlist *src,
+ unsigned int nbytes,
+ u8 *result,
+ void *async_req)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ bool is_hmac = ctx->is_hmac;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ int idx = 0;
+ int rc;
+
+ SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac?"hmac":"hash", nbytes);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ if (async_req) {
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_complete;
+ ssi_req.user_arg = async_req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+ }
+
+ /* Restore hash digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Restore hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx);
+
+ /* "DO-PAD" must be enabled only when writing current length to HW */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, 0);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ idx++;
+
+ if (is_hmac) {
+ /* Store the hash digest result in the context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0);
+ ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Loading hash OPAD xor key state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+
+ /* Perform HASH update on last digest */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0);
+ if (async_req) {
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ }
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ if (async_req) {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ }
+ } else {
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (rc != 0) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ } else {
+ ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ ssi_hash_unmap_result(dev, state, digestsize, result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ }
+ }
+ return rc;
+}
+
+static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ state->xcbc_count = 0;
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ ssi_hash_map_request(dev, state, ctx);
+
+ return 0;
+}
+
+#ifdef EXPORT_FIXED
+static int ssi_hash_export(struct ssi_hash_ctx *ctx, void *out)
+{
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ memcpy(out, ctx, sizeof(struct ssi_hash_ctx));
+ return 0;
+}
+
+static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in)
+{
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ memcpy(ctx, in, sizeof(struct ssi_hash_ctx));
+ return 0;
+}
+#endif
+
+static int ssi_hash_setkey(void *hash,
+ const u8 *key,
+ unsigned int keylen,
+ bool synchronize)
+{
+ unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
+ struct ssi_crypto_req ssi_req = {};
+ struct ssi_hash_ctx *ctx = NULL;
+ int blocksize = 0;
+ int digestsize = 0;
+ int i, idx = 0, rc = 0;
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ ssi_sram_addr_t larval_addr;
+
+ SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ if (synchronize) {
+ ctx = crypto_shash_ctx(((struct crypto_shash *)hash));
+ blocksize = crypto_tfm_alg_blocksize(&((struct crypto_shash *)hash)->base);
+ digestsize = crypto_shash_digestsize(((struct crypto_shash *)hash));
+ } else {
+ ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+ blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
+ digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
+ }
+
+ larval_addr = ssi_ahash_get_larval_digest_sram_addr(
+ ctx->drvdata, ctx->hash_mode);
+
+ /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
+ any NON-ZERO value utilizes HMAC flow */
+ ctx->key_params.keylen = keylen;
+ ctx->key_params.key_dma_addr = 0;
+ ctx->is_hmac = true;
+
+ if (keylen != 0) {
+ ctx->key_params.key_dma_addr = dma_map_single(
+ &ctx->drvdata->plat_dev->dev,
+ (void *)key,
+ keylen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
+ ctx->key_params.key_dma_addr))) {
+ SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
+ " DMA failed\n", key, keylen);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
+ "keylen=%u\n",
+ (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
+
+ if (keylen > blocksize) {
+ /* Load hash initial state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get hashed key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr,
+ digestsize, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED);
+ ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + digestsize),
+ (blocksize - digestsize),
+ NS_BIT, 0);
+ idx++;
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->key_params.key_dma_addr,
+ keylen, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr),
+ keylen, NS_BIT, 0);
+ idx++;
+
+ if ((blocksize - keylen) != 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen));
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr + keylen),
+ (blocksize - keylen),
+ NS_BIT, 0);
+ idx++;
+ }
+ }
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ (ctx->opad_tmp_keys_dma_addr),
+ blocksize,
+ NS_BIT, 0);
+ idx++;
+ }
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ goto out;
+ }
+
+ /* calc derived HMAC key */
+ for (idx = 0, i = 0; i < 2; i++) {
+ /* Load hash initial state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr,
+ ctx->inter_digestsize);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ idx++;
+
+ /* Load the hash current length*/
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ /* Prepare ipad key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ idx++;
+
+ /* Perform HASH update */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ ctx->opad_tmp_keys_dma_addr,
+ blocksize, NS_BIT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx],ctx->hw_mode);
+ HW_DESC_SET_XOR_ACTIVE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH);
+ idx++;
+
+ /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ if (i > 0) /* Not first iteration */
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ ctx->opad_tmp_keys_dma_addr,
+ ctx->inter_digestsize,
+ NS_BIT, 0);
+ else /* First iteration */
+ HW_DESC_SET_DOUT_DLLI(&desc[idx],
+ ctx->digest_buff_dma_addr,
+ ctx->inter_digestsize,
+ NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+ }
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+
+out:
+ if (rc != 0) {
+ if (synchronize) {
+ crypto_shash_set_flags((struct crypto_shash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ } else {
+ crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ }
+ }
+
+ if (ctx->key_params.key_dma_addr) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
+ dma_unmap_single(&ctx->drvdata->plat_dev->dev,
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
+ (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
+ }
+ return rc;
+}
+
+
+static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct ssi_crypto_req ssi_req = {};
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int idx = 0, rc = 0;
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+
+ SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ ctx->key_params.key_dma_addr = dma_map_single(
+ &ctx->drvdata->plat_dev->dev,
+ (void *)key,
+ keylen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
+ ctx->key_params.key_dma_addr))) {
+ SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
+ " DMA failed\n", key, keylen);
+ return -ENOMEM;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen);
+ SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX "
+ "keylen=%u\n",
+ (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
+
+ ctx->is_hmac = true;
+ /* 1. Load the AES key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keylen);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
+ idx++;
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+
+ if (rc != 0)
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr);
+ dma_unmap_single(&ctx->drvdata->plat_dev->dev,
+ ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen, DMA_TO_DEVICE);
+ SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n",
+ (unsigned long long)ctx->key_params.key_dma_addr,
+ ctx->key_params.keylen);
+
+ return rc;
+}
+#if SSI_CC_HAS_CMAC
+static int ssi_cmac_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ DECL_CYCLE_COUNT_RESOURCES;
+ SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ ctx->is_hmac = true;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ case AES_KEYSIZE_192:
+ case AES_KEYSIZE_256:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->key_params.keylen = keylen;
+
+ /* STAT_PHASE_1: Copy key to ctx */
+ START_CYCLE_COUNT();
+
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
+ dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
+ ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->opad_tmp_keys_buff, key, keylen);
+ if (keylen == 24)
+ memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+ dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
+ ctx->opad_tmp_keys_dma_addr,
+ keylen, DMA_TO_DEVICE);
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen);
+
+ ctx->key_params.keylen = keylen;
+
+ END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1);
+
+ return 0;
+}
+#endif
+
+static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+
+ if (ctx->digest_buff_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr);
+ dma_unmap_single(dev, ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped digest-buffer: "
+ "digest_buff_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->digest_buff_dma_addr);
+ ctx->digest_buff_dma_addr = 0;
+ }
+ if (ctx->opad_tmp_keys_dma_addr != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr);
+ dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff),
+ DMA_BIDIRECTIONAL);
+ SSI_LOG_DEBUG("Unmapped opad-digest: "
+ "opad_tmp_keys_dma_addr=0x%llX\n",
+ (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+ ctx->opad_tmp_keys_dma_addr = 0;
+ }
+
+ ctx->key_params.keylen = 0;
+
+}
+
+
+static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
+{
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+
+ ctx->key_params.keylen = 0;
+
+ ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
+ SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
+ goto fail;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr,
+ sizeof(ctx->digest_buff));
+ SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ (unsigned long long)ctx->digest_buff_dma_addr);
+
+ ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
+ SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
+ goto fail;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr,
+ sizeof(ctx->opad_tmp_keys_buff));
+ SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ (unsigned long long)ctx->opad_tmp_keys_dma_addr);
+
+ ctx->is_hmac = false;
+ return 0;
+
+fail:
+ ssi_hash_free_ctx(ctx);
+ return -ENOMEM;
+}
+
+static int ssi_shash_cra_init(struct crypto_tfm *tfm)
+{
+ struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct shash_alg * shash_alg =
+ container_of(tfm->__crt_alg, struct shash_alg, base);
+ struct ssi_hash_alg *ssi_alg =
+ container_of(shash_alg, struct ssi_hash_alg, shash_alg);
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ ctx->hash_mode = ssi_alg->hash_mode;
+ ctx->hw_mode = ssi_alg->hw_mode;
+ ctx->inter_digestsize = ssi_alg->inter_digestsize;
+ ctx->drvdata = ssi_alg->drvdata;
+
+ return ssi_hash_alloc_ctx(ctx);
+}
+
+static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
+{
+ struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct hash_alg_common * hash_alg_common =
+ container_of(tfm->__crt_alg, struct hash_alg_common, base);
+ struct ahash_alg *ahash_alg =
+ container_of(hash_alg_common, struct ahash_alg, halg);
+ struct ssi_hash_alg *ssi_alg =
+ container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
+
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct ahash_req_ctx));
+
+ ctx->hash_mode = ssi_alg->hash_mode;
+ ctx->hw_mode = ssi_alg->hw_mode;
+ ctx->inter_digestsize = ssi_alg->inter_digestsize;
+ ctx->drvdata = ssi_alg->drvdata;
+
+ return ssi_hash_alloc_ctx(ctx);
+}
+
+static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ SSI_LOG_DEBUG("ssi_hash_cra_exit");
+ ssi_hash_free_ctx(ctx);
+}
+
+static int ssi_mac_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ int rc;
+ uint32_t idx = 0;
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ if (req->nbytes == 0) {
+ /* no real updates required */
+ return 0;
+ }
+
+ state->xcbc_count++;
+
+ if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) {
+ if (rc == 1) {
+ SSI_LOG_DEBUG(" data size not require HW update %x\n",
+ req->nbytes);
+ /* No hardware updates are required */
+ return 0;
+ }
+ SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ return -ENOMEM;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ ssi_hash_create_xcbc_setup(req, desc, &idx);
+ } else {
+ ssi_hash_create_cmac_setup(req, desc, &idx);
+ }
+
+ ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
+
+ /* store the hash digest result in context */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ idx++;
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_update_complete;
+ ssi_req.user_arg = (void *)req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ }
+ return rc;
+}
+
+static int ssi_mac_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ uint32_t keySize, keyLen;
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ uint32_t rem_cnt = state->buff_index ? state->buff1_cnt :
+ state->buff0_cnt;
+
+
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ keySize = CC_AES_128_BIT_KEY_SIZE;
+ keyLen = CC_AES_128_BIT_KEY_SIZE;
+ } else {
+ keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen;
+ keyLen = ctx->key_params.keylen;
+ }
+
+ SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_complete;
+ ssi_req.user_arg = (void *)req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+
+ if (state->xcbc_count && (rem_cnt == 0)) {
+ /* Load key for ECB decryption */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ (ctx->opad_tmp_keys_dma_addr +
+ XCBC_MAC_K1_OFFSET),
+ keySize, NS_BIT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ idx++;
+
+
+ /* Initiate decryption of block state to previous block_state-XOR-M[n] */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,0);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Memory Barrier: wait for axi write to complete */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1);
+ idx++;
+ }
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ ssi_hash_create_xcbc_setup(req, desc, &idx);
+ } else {
+ ssi_hash_create_cmac_setup(req, desc, &idx);
+ }
+
+ if (state->xcbc_count == 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
+ HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else if (rem_cnt > 0) {
+ ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ } else {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_CONST(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT);
+ idx++;
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ }
+ return rc;
+}
+
+static int ssi_mac_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ int idx = 0;
+ int rc = 0;
+ uint32_t key_len = 0;
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+ if (state->xcbc_count > 0 && req->nbytes == 0) {
+ SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n");
+ return ssi_mac_final(req);
+ }
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_complete;
+ ssi_req.user_arg = (void *)req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ key_len = CC_AES_128_BIT_KEY_SIZE;
+ ssi_hash_create_xcbc_setup(req, desc, &idx);
+ } else {
+ key_len = ctx->key_params.keylen;
+ ssi_hash_create_cmac_setup(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len);
+ HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ }
+ return rc;
+}
+
+static int ssi_mac_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = &ctx->drvdata->plat_dev->dev;
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+ struct ssi_crypto_req ssi_req = {};
+ HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN];
+ uint32_t keyLen;
+ int idx = 0;
+ int rc;
+
+ SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
+ CHECK_AND_RETURN_UPON_FIPS_ERROR();
+
+ if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
+ SSI_LOG_ERR("map_ahash_source() failed\n");
+ return -ENOMEM;
+ }
+ if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
+ SSI_LOG_ERR("map_ahash_digest() failed\n");
+ return -ENOMEM;
+ }
+
+ if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ return -ENOMEM;
+ }
+
+ /* Setup DX request structure */
+ ssi_req.user_cb = (void *)ssi_hash_digest_complete;
+ ssi_req.user_arg = (void *)req;
+#ifdef ENABLE_CYCLE_COUNT
+ ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */
+#endif
+
+
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ keyLen = CC_AES_128_BIT_KEY_SIZE;
+ ssi_hash_create_xcbc_setup(req, desc, &idx);
+ } else {
+ keyLen = ctx->key_params.keylen;
+ ssi_hash_create_cmac_setup(req, desc, &idx);
+ }
+
+ if (req->nbytes == 0) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen);
+ HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ } else {
+ ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
+ }
+
+ /* Get final MAC result */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1);
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode);
+ idx++;
+
+ rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ if (unlikely(rc != -EINPROGRESS)) {
+ SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ ssi_hash_unmap_result(dev, state, digestsize, req->result);
+ ssi_hash_unmap_request(dev, state, ctx);
+ }
+ return rc;
+}
+
+//shash wrap functions
+#ifdef SYNC_ALGS
+static int ssi_shash_digest(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct ahash_req_ctx *state = shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+ uint32_t digestsize = crypto_shash_digestsize(tfm);
+ struct scatterlist src;
+
+ if (len == 0) {
+ return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL);
+ }
+
+ /* sg_init_one may crash when len is 0 (depends on kernel configuration) */
+ sg_init_one(&src, (const void *)data, len);
+
+ return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL);
+}
+
+static int ssi_shash_update(struct shash_desc *desc,
+ const u8 *data, unsigned int len)
+{
+ struct ahash_req_ctx *state = shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+ uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base);
+ struct scatterlist src;
+
+ sg_init_one(&src, (const void *)data, len);
+
+ return ssi_hash_update(state, ctx, blocksize, &src, len, NULL);
+}
+
+static int ssi_shash_finup(struct shash_desc *desc,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ struct ahash_req_ctx *state = shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+ uint32_t digestsize = crypto_shash_digestsize(tfm);
+ struct scatterlist src;
+
+ sg_init_one(&src, (const void *)data, len);
+
+ return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL);
+}
+
+static int ssi_shash_final(struct shash_desc *desc, u8 *out)
+{
+ struct ahash_req_ctx *state = shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+ uint32_t digestsize = crypto_shash_digestsize(tfm);
+
+ return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL);
+}
+
+static int ssi_shash_init(struct shash_desc *desc)
+{
+ struct ahash_req_ctx *state = shash_desc_ctx(desc);
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ return ssi_hash_init(state, ctx);
+}
+
+#ifdef EXPORT_FIXED
+static int ssi_shash_export(struct shash_desc *desc, void *out)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ return ssi_hash_export(ctx, out);
+}
+
+static int ssi_shash_import(struct shash_desc *desc, const void *in)
+{
+ struct crypto_shash *tfm = desc->tfm;
+ struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm);
+
+ return ssi_hash_import(ctx, in);
+}
+#endif
+
+static int ssi_shash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return ssi_hash_setkey((void *) tfm, key, keylen, true);
+}
+
+#endif /* SYNC_ALGS */
+
+//ahash wrap functions
+static int ssi_ahash_digest(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
+}
+
+static int ssi_ahash_update(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
+
+ return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req);
+}
+
+static int ssi_ahash_finup(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
+}
+
+static int ssi_ahash_final(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ uint32_t digestsize = crypto_ahash_digestsize(tfm);
+
+ return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req);
+}
+
+static int ssi_ahash_init(struct ahash_request *req)
+{
+ struct ahash_req_ctx *state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
+
+ return ssi_hash_init(state, ctx);
+}
+
+#ifdef EXPORT_FIXED
+static int ssi_ahash_export(struct ahash_request *req, void *out)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ return ssi_hash_export(ctx, out);
+}
+
+static int ssi_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
+ return ssi_hash_import(ctx, in);
+}
+#endif
+
+static int ssi_ahash_setkey(struct crypto_ahash *ahash,
+ const u8 *key, unsigned int keylen)
+{
+ return ssi_hash_setkey((void *) ahash, key, keylen, false);
+}
+
+struct ssi_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ bool synchronize;
+ union {
+ struct ahash_alg template_ahash;
+ struct shash_alg template_shash;
+ };
+ int hash_mode;
+ int hw_mode;
+ int inter_digestsize;
+ struct ssi_drvdata *drvdata;
+};
+
+/* hash descriptors */
+static struct ssi_hash_template driver_hash[] = {
+ //Asynchronize hash template
+ {
+ .name = "sha1",
+ .driver_name = "sha1-dx",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-dx",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_SHA1,
+ .hw_mode = DRV_HASH_HW_SHA1,
+ .inter_digestsize = SHA1_DIGEST_SIZE,
+ },
+ {
+ .name = "sha256",
+ .driver_name = "sha256-dx",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-dx",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_SHA256,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ },
+ {
+ .name = "sha224",
+ .driver_name = "sha224-dx",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-dx",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_SHA224,
+ .hw_mode = DRV_HASH_HW_SHA256,
+ .inter_digestsize = SHA256_DIGEST_SIZE,
+ },
+#if (DX_DEV_SHA_MAX > 256)
+ {
+ .name = "sha384",
+ .driver_name = "sha384-dx",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-dx",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_SHA384,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ },
+ {
+ .name = "sha512",
+ .driver_name = "sha512-dx",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-dx",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct sha512_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_SHA512,
+ .hw_mode = DRV_HASH_HW_SHA512,
+ .inter_digestsize = SHA512_DIGEST_SIZE,
+ },
+#endif
+ {
+ .name = "md5",
+ .driver_name = "md5-dx",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-dx",
+ .blocksize = MD5_HMAC_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_ahash_update,
+ .final = ssi_ahash_final,
+ .finup = ssi_ahash_finup,
+ .digest = ssi_ahash_digest,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .setkey = ssi_ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_MD5,
+ .hw_mode = DRV_HASH_HW_MD5,
+ .inter_digestsize = MD5_DIGEST_SIZE,
+ },
+ {
+ .name = "xcbc(aes)",
+ .driver_name = "xcbc-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_mac_update,
+ .final = ssi_mac_final,
+ .finup = ssi_mac_finup,
+ .digest = ssi_mac_digest,
+ .setkey = ssi_xcbc_setkey,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct aeshash_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_XCBC_MAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ },
+#if SSI_CC_HAS_CMAC
+ {
+ .name = "cmac(aes)",
+ .driver_name = "cmac-aes-dx",
+ .blocksize = AES_BLOCK_SIZE,
+ .synchronize = false,
+ {
+ .template_ahash = {
+ .init = ssi_ahash_init,
+ .update = ssi_mac_update,
+ .final = ssi_mac_final,
+ .finup = ssi_mac_finup,
+ .digest = ssi_mac_digest,
+ .setkey = ssi_cmac_setkey,
+#ifdef EXPORT_FIXED
+ .export = ssi_ahash_export,
+ .import = ssi_ahash_import,
+#endif
+ .halg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .statesize = sizeof(struct aeshash_state),
+ },
+ },
+ },
+ .hash_mode = DRV_HASH_NULL,
+ .hw_mode = DRV_CIPHER_CMAC,
+ .inter_digestsize = AES_BLOCK_SIZE,
+ },
+#endif
+
+};
+
+static struct ssi_hash_alg *
+ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
+{
+ struct ssi_hash_alg *t_crypto_alg;
+ struct crypto_alg *alg;
+
+ t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL);
+ if (!t_crypto_alg) {
+ SSI_LOG_ERR("failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ t_crypto_alg->synchronize = template->synchronize;
+ if (template->synchronize) {
+ struct shash_alg *halg;
+ t_crypto_alg->shash_alg = template->template_shash;
+ halg = &t_crypto_alg->shash_alg;
+ alg = &halg->base;
+ if (!keyed) halg->setkey = NULL;
+ } else {
+ struct ahash_alg *halg;
+ t_crypto_alg->ahash_alg = template->template_ahash;
+ halg = &t_crypto_alg->ahash_alg;
+ alg = &halg->halg.base;
+ if (!keyed) halg->setkey = NULL;
+ }
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_ctxsize = sizeof(struct ssi_hash_ctx);
+ alg->cra_priority = SSI_CRA_PRIO;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_exit = ssi_hash_cra_exit;
+
+ if (template->synchronize) {
+ alg->cra_init = ssi_shash_cra_init;
+ alg->cra_flags = CRYPTO_ALG_TYPE_SHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->cra_type = &crypto_shash_type;
+ } else {
+ alg->cra_init = ssi_ahash_cra_init;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY;
+ alg->cra_type = &crypto_ahash_type;
+ }
+
+ t_crypto_alg->hash_mode = template->hash_mode;
+ t_crypto_alg->hw_mode = template->hw_mode;
+ t_crypto_alg->inter_digestsize = template->inter_digestsize;
+
+ return t_crypto_alg;
+}
+
+int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
+{
+ struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
+ ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ unsigned int larval_seq_len = 0;
+ HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)];
+ int rc = 0;
+#if (DX_DEV_SHA_MAX > 256)
+ int i;
+#endif
+
+ /* Copy-to-sram digest-len */
+ ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_init);
+ larval_seq_len = 0;
+
+#if (DX_DEV_SHA_MAX > 256)
+ /* Copy-to-sram digest-len for sha384/512 */
+ ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+
+ sram_buff_ofs += sizeof(digest_len_sha512_init);
+ larval_seq_len = 0;
+#endif
+
+ /* The initial digests offset */
+ hash_handle->larval_digest_sram_addr = sram_buff_ofs;
+
+ /* Copy-to-sram initial SHA* digests */
+ ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
+ ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(md5_init);
+ larval_seq_len = 0;
+
+ ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha1_init);
+ larval_seq_len = 0;
+
+ ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha224_init);
+ larval_seq_len = 0;
+
+ ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0))
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sha256_init);
+ larval_seq_len = 0;
+
+#if (DX_DEV_SHA_MAX > 256)
+ /* We are forced to swap each double-word larval before copying to sram */
+ for (i = 0; i < ARRAY_SIZE(sha384_init); i++) {
+ const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1];
+ const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0];
+
+ ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
+ larval_seq, &larval_seq_len);
+ sram_buff_ofs += sizeof(uint32_t);
+ ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
+ larval_seq, &larval_seq_len);
+ sram_buff_ofs += sizeof(uint32_t);
+ }
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ goto init_digest_const_err;
+ }
+ larval_seq_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sha512_init); i++) {
+ const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1];
+ const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0];
+
+ ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
+ larval_seq, &larval_seq_len);
+ sram_buff_ofs += sizeof(uint32_t);
+ ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
+ larval_seq, &larval_seq_len);
+ sram_buff_ofs += sizeof(uint32_t);
+ }
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ goto init_digest_const_err;
+ }
+#endif
+
+init_digest_const_err:
+ return rc;
+}
+
+int ssi_hash_alloc(struct ssi_drvdata *drvdata)
+{
+ struct ssi_hash_handle *hash_handle;
+ ssi_sram_addr_t sram_buff;
+ uint32_t sram_size_to_alloc;
+ int rc = 0;
+ int alg;
+
+ hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL);
+ if (hash_handle == NULL) {
+ SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
+ sizeof(struct ssi_hash_handle));
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ drvdata->hash_handle = hash_handle;
+
+ sram_size_to_alloc = sizeof(digest_len_init) +
+#if (DX_DEV_SHA_MAX > 256)
+ sizeof(digest_len_sha512_init) +
+ sizeof(sha384_init) +
+ sizeof(sha512_init) +
+#endif
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init);
+
+ sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
+ if (sram_buff == NULL_SRAM_ADDR) {
+ SSI_LOG_ERR("SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* The initial digest-len offset */
+ hash_handle->digest_len_sram_addr = sram_buff;
+
+ /*must be set before the alg registration as it is being used there*/
+ rc = ssi_hash_init_sram_digest_consts(drvdata);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&hash_handle->hash_list);
+
+ /* ahash registration */
+ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
+ struct ssi_hash_alg *t_alg;
+
+ /* register hmac version */
+
+ if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) &&
+ (((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_CMAC)) {
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ SSI_LOG_ERR("%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ if (t_alg->synchronize) {
+ rc = crypto_register_shash(&t_alg->shash_alg);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->shash_alg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ } else {
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ }
+
+ /* register hash version */
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ SSI_LOG_ERR("%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ if (t_alg->synchronize) {
+ rc = crypto_register_shash(&t_alg->shash_alg);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->shash_alg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+
+ } else {
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else
+ list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ }
+ }
+
+ return 0;
+
+fail:
+
+ if (drvdata->hash_handle != NULL) {
+ kfree(drvdata->hash_handle);
+ drvdata->hash_handle = NULL;
+ }
+ return rc;
+}
+
+int ssi_hash_free(struct ssi_drvdata *drvdata)
+{
+ struct ssi_hash_alg *t_hash_alg, *hash_n;
+ struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
+
+ if (hash_handle != NULL) {
+
+ list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
+ if (t_hash_alg->synchronize) {
+ crypto_unregister_shash(&t_hash_alg->shash_alg);
+ } else {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ }
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+
+ kfree(hash_handle);
+ drvdata->hash_handle = NULL;
+ }
+ return 0;
+}
+
+static void ssi_hash_create_xcbc_setup(struct ahash_request *areq,
+ HwDesc_s desc[],
+ unsigned int *seq_size) {
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup XCBC MAC K1 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ + XCBC_MAC_K1_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K2 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ + XCBC_MAC_K2_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Setup XCBC MAC K3 */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr
+ + XCBC_MAC_K3_OFFSET),
+ CC_AES_128_BIT_KEY_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Loading MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void ssi_hash_create_cmac_setup(struct ahash_request *areq,
+ HwDesc_s desc[],
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+ struct ahash_req_ctx *state = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ /* Setup CMAC Key */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
+ ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+
+ /* Load MAC state */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT);
+ HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0);
+ HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC);
+ HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES);
+ idx++;
+ *seq_size = idx;
+}
+
+static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
+ struct ssi_hash_ctx *ctx,
+ unsigned int flow_mode,
+ HwDesc_s desc[],
+ bool is_not_last_data,
+ unsigned int *seq_size)
+{
+ unsigned int idx = *seq_size;
+
+ if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ sg_dma_address(areq_ctx->curr_sg),
+ areq_ctx->curr_sg->length, NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ idx++;
+ } else {
+ if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
+ SSI_LOG_DEBUG(" NULL mode\n");
+ /* nothing to build */
+ return;
+ }
+ /* bypass */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI,
+ areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_len,
+ NS_BIT);
+ HW_DESC_SET_DOUT_SRAM(&desc[idx],
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_params.mlli_len);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS);
+ idx++;
+ /* process */
+ HW_DESC_INIT(&desc[idx]);
+ HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI,
+ ctx->drvdata->mlli_sram_addr,
+ areq_ctx->mlli_nents,
+ NS_BIT);
+ HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode);
+ idx++;
+ }
+ if (is_not_last_data) {
+ HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx-1]);
+ }
+ /* return updated desc sequence size */
+ *seq_size = idx;
+}
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
+ *
+ * \return uint32_t The address of the inital digest in SRAM
+ */
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode)
+{
+ struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
+ struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+
+ switch (mode) {
+ case DRV_HASH_NULL:
+ break; /*Ignore*/
+ case DRV_HASH_MD5:
+ return (hash_handle->larval_digest_sram_addr);
+ case DRV_HASH_SHA1:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init));
+ case DRV_HASH_SHA224:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init));
+ case DRV_HASH_SHA256:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init));
+#if (DX_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ case DRV_HASH_SHA512:
+ return (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init) +
+ sizeof(sha384_init));
+#endif
+ default:
+ SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
+ }
+
+ /*This is valid wrong value to avoid kernel crash*/
+ return hash_handle->larval_digest_sram_addr;
+}
+
+ssi_sram_addr_t
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode)
+{
+ struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
+ struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+ ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+
+ switch (mode) {
+ case DRV_HASH_SHA1:
+ case DRV_HASH_SHA224:
+ case DRV_HASH_SHA256:
+ case DRV_HASH_MD5:
+ return digest_len_addr;
+#if (DX_DEV_SHA_MAX > 256)
+ case DRV_HASH_SHA384:
+ case DRV_HASH_SHA512:
+ return digest_len_addr + sizeof(digest_len_init);
+#endif
+ default:
+ return digest_len_addr; /*to avoid kernel crash*/
+ }
+}
+
diff --git a/drivers/staging/ccree/ssi_hash.h b/drivers/staging/ccree/ssi_hash.h
new file mode 100644
index 000000000000..a2b076d3af72
--- /dev/null
+++ b/drivers/staging/ccree/ssi_hash.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_hash.h
+ ARM CryptoCell Hash Crypto API
+ */
+
+#ifndef __SSI_HASH_H__
+#define __SSI_HASH_H__
+
+#include "ssi_buffer_mgr.h"
+
+#define HMAC_IPAD_CONST 0x36363636
+#define HMAC_OPAD_CONST 0x5C5C5C5C
+#if (DX_DEV_SHA_MAX > 256)
+#define HASH_LEN_SIZE 16
+#define SSI_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define SSI_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
+#else
+#define HASH_LEN_SIZE 8
+#define SSI_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE
+#define SSI_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE
+#endif
+
+#define XCBC_MAC_K1_OFFSET 0
+#define XCBC_MAC_K2_OFFSET 16
+#define XCBC_MAC_K3_OFFSET 32
+
+// this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used for xcbc/cmac statesize
+struct aeshash_state {
+ u8 state[AES_BLOCK_SIZE];
+ unsigned int count;
+ u8 buffer[AES_BLOCK_SIZE];
+};
+
+/* ahash state */
+struct ahash_req_ctx {
+ uint8_t* buff0;
+ uint8_t* buff1;
+ uint8_t* digest_result_buff;
+ struct async_gen_req_ctx gen_ctx;
+ enum ssi_req_dma_buf_type data_dma_buf_type;
+ uint8_t *digest_buff;
+ uint8_t *opad_digest_buff;
+ uint8_t *digest_bytes_len;
+ dma_addr_t opad_digest_dma_addr;
+ dma_addr_t digest_buff_dma_addr;
+ dma_addr_t digest_bytes_len_dma_addr;
+ dma_addr_t digest_result_dma_addr;
+ uint32_t buff0_cnt;
+ uint32_t buff1_cnt;
+ uint32_t buff_index;
+ uint32_t xcbc_count; /* count xcbc update operatations */
+ struct scatterlist buff_sg[2];
+ struct scatterlist *curr_sg;
+ uint32_t in_nents;
+ uint32_t mlli_nents;
+ struct mlli_params mlli_params;
+};
+
+int ssi_hash_alloc(struct ssi_drvdata *drvdata);
+int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata);
+int ssi_hash_free(struct ssi_drvdata *drvdata);
+
+/*!
+ * Gets the initial digest length
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return uint32_t returns the address of the initial digest length in SRAM
+ */
+ssi_sram_addr_t
+ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode);
+
+/*!
+ * Gets the address of the initial digest in SRAM
+ * according to the given hash mode
+ *
+ * \param drvdata
+ * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512
+ *
+ * \return uint32_t The address of the inital digest in SRAM
+ */
+ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode);
+
+#endif /*__SSI_HASH_H__*/
+
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
new file mode 100644
index 000000000000..f16f4692f404
--- /dev/null
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/platform_device.h>
+#include <crypto/ctr.h>
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "ssi_ivgen.h"
+#include "ssi_request_mgr.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_buffer_mgr.h"
+
+/* The max. size of pool *MUST* be <= SRAM total size */
+#define SSI_IVPOOL_SIZE 1024
+/* The first 32B fraction of pool are dedicated to the
+ next encryption "key" & "IV" for pool regeneration */
+#define SSI_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
+#define SSI_IVPOOL_GEN_SEQ_LEN 4
+
+/**
+ * struct ssi_ivgen_ctx -IV pool generation context
+ * @pool: the start address of the iv-pool resides in internal RAM
+ * @ctr_key_dma: address of pool's encryption key material in internal RAM
+ * @ctr_iv_dma: address of pool's counter iv in internal RAM
+ * @next_iv_ofs: the offset to the next available IV in pool
+ * @pool_meta: virt. address of the initial enc. key/IV
+ * @pool_meta_dma: phys. address of the initial enc. key/IV
+ */
+struct ssi_ivgen_ctx {
+ ssi_sram_addr_t pool;
+ ssi_sram_addr_t ctr_key;
+ ssi_sram_addr_t ctr_iv;
+ uint32_t next_iv_ofs;
+ uint8_t *pool_meta;
+ dma_addr_t pool_meta_dma;
+};
+
+/*!
+ * Generates SSI_IVPOOL_SIZE of random bytes by
+ * encrypting 0's using AES128-CTR.
+ *
+ * \param ivgen iv-pool context
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ */
+static int ssi_ivgen_generate_pool(
+ struct ssi_ivgen_ctx *ivgen_ctx,
+ HwDesc_s iv_seq[],
+ unsigned int *iv_seq_len)
+{
+ unsigned int idx = *iv_seq_len;
+
+ if ( (*iv_seq_len + SSI_IVPOOL_GEN_SEQ_LEN) > SSI_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+ /* Setup key */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
+ HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_KEY0);
+ HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
+ HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Setup cipher state */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_SRAM(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
+ HW_DESC_SET_CIPHER_CONFIG0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[idx], S_DIN_to_AES);
+ HW_DESC_SET_SETUP_MODE(&iv_seq[idx], SETUP_LOAD_STATE1);
+ HW_DESC_SET_KEY_SIZE_AES(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
+ HW_DESC_SET_CIPHER_MODE(&iv_seq[idx], DRV_CIPHER_CTR);
+ idx++;
+
+ /* Perform dummy encrypt to skip first block */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, CC_AES_IV_SIZE);
+ HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ /* Generate IV pool */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_CONST(&iv_seq[idx], 0, SSI_IVPOOL_SIZE);
+ HW_DESC_SET_DOUT_SRAM(&iv_seq[idx], ivgen_ctx->pool, SSI_IVPOOL_SIZE);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[idx], DIN_AES_DOUT);
+ idx++;
+
+ *iv_seq_len = idx; /* Update sequence length */
+
+ /* queue ordering assures pool readiness */
+ ivgen_ctx->next_iv_ofs = SSI_IVPOOL_META_SIZE;
+
+ return 0;
+}
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
+{
+ struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
+ unsigned int iv_seq_len = 0;
+ int rc;
+
+ /* Generate initial enc. key/iv */
+ get_random_bytes(ivgen_ctx->pool_meta, SSI_IVPOOL_META_SIZE);
+
+ /* The first 32B reserved for the enc. Key/IV */
+ ivgen_ctx->ctr_key = ivgen_ctx->pool;
+ ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
+
+ /* Copy initial enc. key and IV to SRAM at a single descriptor */
+ HW_DESC_INIT(&iv_seq[iv_seq_len]);
+ HW_DESC_SET_DIN_TYPE(&iv_seq[iv_seq_len], DMA_DLLI,
+ ivgen_ctx->pool_meta_dma, SSI_IVPOOL_META_SIZE,
+ NS_BIT);
+ HW_DESC_SET_DOUT_SRAM(&iv_seq[iv_seq_len], ivgen_ctx->pool,
+ SSI_IVPOOL_META_SIZE);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[iv_seq_len], BYPASS);
+ iv_seq_len++;
+
+ /* Generate initial pool */
+ rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
+ if (unlikely(rc != 0)) {
+ return rc;
+ }
+ /* Fire-and-forget */
+ return send_request_init(drvdata, iv_seq, iv_seq_len);
+}
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void ssi_ivgen_fini(struct ssi_drvdata *drvdata)
+{
+ struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ struct device *device = &(drvdata->plat_dev->dev);
+
+ if (ivgen_ctx == NULL)
+ return;
+
+ if (ivgen_ctx->pool_meta != NULL) {
+ memset(ivgen_ctx->pool_meta, 0, SSI_IVPOOL_META_SIZE);
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma);
+ dma_free_coherent(device, SSI_IVPOOL_META_SIZE,
+ ivgen_ctx->pool_meta, ivgen_ctx->pool_meta_dma);
+ }
+
+ ivgen_ctx->pool = NULL_SRAM_ADDR;
+
+ /* release "this" context */
+ kfree(ivgen_ctx);
+}
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_init(struct ssi_drvdata *drvdata)
+{
+ struct ssi_ivgen_ctx *ivgen_ctx;
+ struct device *device = &drvdata->plat_dev->dev;
+ int rc;
+
+ /* Allocate "this" context */
+ drvdata->ivgen_handle = kzalloc(sizeof(struct ssi_ivgen_ctx), GFP_KERNEL);
+ if (!drvdata->ivgen_handle) {
+ SSI_LOG_ERR("Not enough memory to allocate IVGEN context "
+ "(%zu B)\n", sizeof(struct ssi_ivgen_ctx));
+ rc = -ENOMEM;
+ goto out;
+ }
+ ivgen_ctx = drvdata->ivgen_handle;
+
+ /* Allocate pool's header for intial enc. key/IV */
+ ivgen_ctx->pool_meta = dma_alloc_coherent(device, SSI_IVPOOL_META_SIZE,
+ &ivgen_ctx->pool_meta_dma, GFP_KERNEL);
+ if (!ivgen_ctx->pool_meta) {
+ SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
+ "(%u B)\n", SSI_IVPOOL_META_SIZE);
+ rc = -ENOMEM;
+ goto out;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(ivgen_ctx->pool_meta_dma,
+ SSI_IVPOOL_META_SIZE);
+ /* Allocate IV pool in SRAM */
+ ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
+ if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
+ SSI_LOG_ERR("SRAM pool exhausted\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ return ssi_ivgen_init_sram_pool(drvdata);
+
+out:
+ ssi_ivgen_fini(drvdata);
+ return rc;
+}
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_getiv(
+ struct ssi_drvdata *drvdata,
+ dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len,
+ unsigned int iv_out_size,
+ HwDesc_s iv_seq[],
+ unsigned int *iv_seq_len)
+{
+ struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
+ unsigned int idx = *iv_seq_len;
+ unsigned int t;
+
+ if ((iv_out_size != CC_AES_IV_SIZE) &&
+ (iv_out_size != CTR_RFC3686_IV_SIZE)) {
+ return -EINVAL;
+ }
+ if ( (iv_out_dma_len + 1) > SSI_IVPOOL_SEQ_LEN) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ //check that number of generated IV is limited to max dma address iv buffer size
+ if ( iv_out_dma_len > SSI_MAX_IVGEN_DMA_ADDRESSES) {
+ /* The sequence will be longer than allowed */
+ return -EINVAL;
+ }
+
+ for (t = 0; t < iv_out_dma_len; t++) {
+ /* Acquire IV from pool */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_SRAM(&iv_seq[idx],
+ ivgen_ctx->pool + ivgen_ctx->next_iv_ofs,
+ iv_out_size);
+ HW_DESC_SET_DOUT_DLLI(&iv_seq[idx], iv_out_dma[t],
+ iv_out_size, NS_BIT, 0);
+ HW_DESC_SET_FLOW_MODE(&iv_seq[idx], BYPASS);
+ idx++;
+ }
+
+ /* Bypass operation is proceeded by crypto sequence, hence must
+ * assure bypass-write-transaction by a memory barrier */
+ HW_DESC_INIT(&iv_seq[idx]);
+ HW_DESC_SET_DIN_NO_DMA(&iv_seq[idx], 0, 0xfffff0);
+ HW_DESC_SET_DOUT_NO_DMA(&iv_seq[idx], 0, 0, 1);
+ idx++;
+
+ *iv_seq_len = idx; /* update seq length */
+
+ /* Update iv index */
+ ivgen_ctx->next_iv_ofs += iv_out_size;
+
+ if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
+ SSI_LOG_DEBUG("Pool exhausted, regenerating iv-pool\n");
+ /* pool is drained -regenerate it! */
+ return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);
+ }
+
+ return 0;
+}
+
+
diff --git a/drivers/staging/ccree/ssi_ivgen.h b/drivers/staging/ccree/ssi_ivgen.h
new file mode 100644
index 000000000000..bc69cd8ca418
--- /dev/null
+++ b/drivers/staging/ccree/ssi_ivgen.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SSI_IVGEN_H__
+#define __SSI_IVGEN_H__
+
+#include "cc_hw_queue_defs.h"
+
+
+#define SSI_IVPOOL_SEQ_LEN 8
+
+/*!
+ * Allocates iv-pool and maps resources.
+ * This function generates the first IV pool.
+ *
+ * \param drvdata Driver's private context
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_init(struct ssi_drvdata *drvdata);
+
+/*!
+ * Free iv-pool and ivgen context.
+ *
+ * \param drvdata
+ */
+void ssi_ivgen_fini(struct ssi_drvdata *drvdata);
+
+/*!
+ * Generates the initial pool in SRAM.
+ * This function should be invoked when resuming DX driver.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata);
+
+/*!
+ * Acquires 16 Bytes IV from the iv-pool
+ *
+ * \param drvdata Driver private context
+ * \param iv_out_dma Array of physical IV out addresses
+ * \param iv_out_dma_len Length of iv_out_dma array (additional elements of iv_out_dma array are ignore)
+ * \param iv_out_size May be 8 or 16 bytes long
+ * \param iv_seq IN/OUT array to the descriptors sequence
+ * \param iv_seq_len IN/OUT pointer to the sequence length
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_ivgen_getiv(
+ struct ssi_drvdata *drvdata,
+ dma_addr_t iv_out_dma[],
+ unsigned int iv_out_dma_len,
+ unsigned int iv_out_size,
+ HwDesc_s iv_seq[],
+ unsigned int *iv_seq_len);
+
+#endif /*__SSI_IVGEN_H__*/
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
new file mode 100644
index 000000000000..dd399f28a68c
--- /dev/null
+++ b/drivers/staging/ccree/ssi_pm.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "ssi_config.h"
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <crypto/ctr.h>
+#include <linux/pm_runtime.h>
+#include "ssi_driver.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_request_mgr.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_sysfs.h"
+#include "ssi_ivgen.h"
+#include "ssi_hash.h"
+#include "ssi_pm.h"
+#include "ssi_pm_ext.h"
+
+
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+
+#define POWER_DOWN_ENABLE 0x01
+#define POWER_DOWN_DISABLE 0x00
+
+
+int ssi_power_mgr_runtime_suspend(struct device *dev)
+{
+ struct ssi_drvdata *drvdata =
+ (struct ssi_drvdata *)dev_get_drvdata(dev);
+ int rc;
+
+ SSI_LOG_DEBUG("ssi_power_mgr_runtime_suspend: set HOST_POWER_DOWN_EN\n");
+ WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
+ if (rc != 0) {
+ SSI_LOG_ERR("ssi_request_mgr_runtime_suspend_queue (%x)\n", rc);
+ return rc;
+ }
+ fini_cc_regs(drvdata);
+
+ /* Specific HW suspend code */
+ ssi_pm_ext_hw_suspend(dev);
+ return 0;
+}
+
+int ssi_power_mgr_runtime_resume(struct device *dev)
+{
+ int rc;
+ struct ssi_drvdata *drvdata =
+ (struct ssi_drvdata *)dev_get_drvdata(dev);
+
+ SSI_LOG_DEBUG("ssi_power_mgr_runtime_resume , unset HOST_POWER_DOWN_EN\n");
+ WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+ /* Specific HW resume code */
+ ssi_pm_ext_hw_resume(dev);
+
+ rc = init_cc_regs(drvdata, false);
+ if (rc !=0) {
+ SSI_LOG_ERR("init_cc_regs (%x)\n",rc);
+ return rc;
+ }
+
+ rc = ssi_request_mgr_runtime_resume_queue(drvdata);
+ if (rc !=0) {
+ SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n",rc);
+ return rc;
+ }
+
+ /* must be after the queue resuming as it uses the HW queue*/
+ ssi_hash_init_sram_digest_consts(drvdata);
+
+ ssi_ivgen_init_sram_pool(drvdata);
+ return 0;
+}
+
+int ssi_power_mgr_runtime_get(struct device *dev)
+{
+ int rc = 0;
+
+ if (ssi_request_mgr_is_queue_runtime_suspend(
+ (struct ssi_drvdata *)dev_get_drvdata(dev))) {
+ rc = pm_runtime_get_sync(dev);
+ } else {
+ pm_runtime_get_noresume(dev);
+ }
+ return rc;
+}
+
+int ssi_power_mgr_runtime_put_suspend(struct device *dev)
+{
+ int rc = 0;
+
+ if (!ssi_request_mgr_is_queue_runtime_suspend(
+ (struct ssi_drvdata *)dev_get_drvdata(dev))) {
+ pm_runtime_mark_last_busy(dev);
+ rc = pm_runtime_put_autosuspend(dev);
+ }
+ else {
+ /* Something wrong happens*/
+ BUG();
+ }
+ return rc;
+
+}
+
+#endif
+
+
+
+int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
+{
+ int rc = 0;
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ struct platform_device *plat_dev = drvdata->plat_dev;
+ /* must be before the enabling to avoid resdundent suspending */
+ pm_runtime_set_autosuspend_delay(&plat_dev->dev,SSI_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&plat_dev->dev);
+ /* activate the PM module */
+ rc = pm_runtime_set_active(&plat_dev->dev);
+ if (rc != 0)
+ return rc;
+ /* enable the PM module*/
+ pm_runtime_enable(&plat_dev->dev);
+#endif
+ return rc;
+}
+
+void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
+{
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ struct platform_device *plat_dev = drvdata->plat_dev;
+
+ pm_runtime_disable(&plat_dev->dev);
+#endif
+}
diff --git a/drivers/staging/ccree/ssi_pm.h b/drivers/staging/ccree/ssi_pm.h
new file mode 100644
index 000000000000..516fc3f445a8
--- /dev/null
+++ b/drivers/staging/ccree/ssi_pm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_pm.h
+ */
+
+#ifndef __SSI_POWER_MGR_H__
+#define __SSI_POWER_MGR_H__
+
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+
+
+#define SSI_SUSPEND_TIMEOUT 3000
+
+
+int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
+
+void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
+
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+int ssi_power_mgr_runtime_suspend(struct device *dev);
+
+int ssi_power_mgr_runtime_resume(struct device *dev);
+
+int ssi_power_mgr_runtime_get(struct device *dev);
+
+int ssi_power_mgr_runtime_put_suspend(struct device *dev);
+#endif
+
+#endif /*__POWER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/ssi_pm_ext.c b/drivers/staging/ccree/ssi_pm_ext.c
new file mode 100644
index 000000000000..f86bbab22073
--- /dev/null
+++ b/drivers/staging/ccree/ssi_pm_ext.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "ssi_config.h"
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <crypto/ctr.h>
+#include <linux/pm_runtime.h>
+#include "ssi_driver.h"
+#include "ssi_sram_mgr.h"
+#include "ssi_pm_ext.h"
+
+/*
+This function should suspend the HW (if possiable), It should be implemented by
+the driver user.
+The reference code clears the internal SRAM to imitate lose of state.
+*/
+void ssi_pm_ext_hw_suspend(struct device *dev)
+{
+ struct ssi_drvdata *drvdata =
+ (struct ssi_drvdata *)dev_get_drvdata(dev);
+ unsigned int val;
+ void __iomem *cc_base = drvdata->cc_base;
+ unsigned int sram_addr = 0;
+
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_ADDR), sram_addr);
+
+ for (;sram_addr < SSI_CC_SRAM_SIZE ; sram_addr+=4) {
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA), 0x0);
+
+ do {
+ val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA_READY));
+ } while (!(val &0x1));
+ }
+}
+
+/*
+This function should resume the HW (if possiable).It should be implemented by
+the driver user.
+*/
+void ssi_pm_ext_hw_resume(struct device *dev)
+{
+ return;
+}
+
diff --git a/drivers/staging/ccree/ssi_pm_ext.h b/drivers/staging/ccree/ssi_pm_ext.h
new file mode 100644
index 000000000000..b4e2795de29e
--- /dev/null
+++ b/drivers/staging/ccree/ssi_pm_ext.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_pm_ext.h
+ */
+
+#ifndef __PM_EXT_H__
+#define __PM_EXT_H__
+
+
+#include "ssi_config.h"
+#include "ssi_driver.h"
+
+void ssi_pm_ext_hw_suspend(struct device *dev);
+
+void ssi_pm_ext_hw_resume(struct device *dev);
+
+
+#endif /*__POWER_MGR_H__*/
+
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
new file mode 100644
index 000000000000..522bd62c102e
--- /dev/null
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "ssi_config.h"
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/ctr.h>
+#ifdef FLUSH_CACHE_ALL
+#include <asm/cacheflush.h>
+#endif
+#include <linux/pm_runtime.h>
+#include "ssi_driver.h"
+#include "ssi_buffer_mgr.h"
+#include "ssi_request_mgr.h"
+#include "ssi_sysfs.h"
+#include "ssi_ivgen.h"
+#include "ssi_pm.h"
+#include "ssi_fips.h"
+#include "ssi_fips_local.h"
+
+#define SSI_MAX_POLL_ITER 10
+
+#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
+
+#ifdef CC_CYCLE_COUNT
+
+#define MONITOR_CNTR_BIT 0
+
+/**
+ * Monitor descriptor.
+ * Used to measure CC performance.
+ */
+#define INIT_CC_MONITOR_DESC(desc_p) \
+do { \
+ HW_DESC_INIT(desc_p); \
+ HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \
+} while (0)
+
+/**
+ * Try adding monitor descriptor BEFORE enqueuing sequence.
+ */
+#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \
+do { \
+ if (!test_and_set_bit(MONITOR_CNTR_BIT, (lock_p))) { \
+ enqueue_seq((cc_base_addr), (desc_p), 1); \
+ *(is_monitored_p) = true; \
+ } else { \
+ *(is_monitored_p) = false; \
+ } \
+} while (0)
+
+/**
+ * If CC_CYCLE_DESC_HEAD was successfully added:
+ * 1. Add memory barrier descriptor to ensure last AXI transaction.
+ * 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence.
+ */
+#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \
+do { \
+ if ((is_monitored) == true) { \
+ HwDesc_s barrier_desc; \
+ HW_DESC_INIT(&barrier_desc); \
+ HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \
+ HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \
+ enqueue_seq((cc_base_addr), &barrier_desc, 1); \
+ enqueue_seq((cc_base_addr), (desc_p), 1); \
+ } \
+} while (0)
+
+/**
+ * Try reading CC monitor counter value upon sequence complete.
+ * Can only succeed if the lock_p is taken by the owner of the given request.
+ */
+#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \
+do { \
+ uint32_t elapsed_cycles; \
+ if ((is_monitored) == true) { \
+ elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \
+ clear_bit(MONITOR_CNTR_BIT, (lock_p)); \
+ if (elapsed_cycles > 0) \
+ update_cc_stat(stat_op_type, stat_phase, (elapsed_cycles - monitor_null_cycles)); \
+ } \
+} while (0)
+
+#else /*CC_CYCLE_COUNT*/
+
+#define INIT_CC_MONITOR_DESC(desc_p) do { } while (0)
+#define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) do { } while (0)
+#define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) do { } while (0)
+#define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) do { } while (0)
+#endif /*CC_CYCLE_COUNT*/
+
+
+struct ssi_request_mgr_handle {
+ /* Request manager resources */
+ unsigned int hw_queue_size; /* HW capability */
+ unsigned int min_free_hw_slots;
+ unsigned int max_used_sw_slots;
+ struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ uint32_t req_queue_head;
+ uint32_t req_queue_tail;
+ uint32_t axi_completed;
+ uint32_t q_free_slots;
+ spinlock_t hw_lock;
+ HwDesc_s compl_desc;
+ uint8_t *dummy_comp_buff;
+ dma_addr_t dummy_comp_buff_dma;
+ HwDesc_s monitor_desc;
+ volatile unsigned long monitor_lock;
+#ifdef COMP_IN_WQ
+ struct workqueue_struct *workq;
+ struct delayed_work compwork;
+#else
+ struct tasklet_struct comptask;
+#endif
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ bool is_runtime_suspended;
+#endif
+};
+
+static void comp_handler(unsigned long devarg);
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work);
+#endif
+
+void request_mgr_fini(struct ssi_drvdata *drvdata)
+{
+ struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+
+ if (req_mgr_h == NULL)
+ return; /* Not allocated */
+
+ if (req_mgr_h->dummy_comp_buff_dma != 0) {
+ SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma);
+ dma_free_coherent(&drvdata->plat_dev->dev,
+ sizeof(uint32_t), req_mgr_h->dummy_comp_buff,
+ req_mgr_h->dummy_comp_buff_dma);
+ }
+
+ SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ req_mgr_h->min_free_hw_slots) );
+ SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+
+#ifdef COMP_IN_WQ
+ flush_workqueue(req_mgr_h->workq);
+ destroy_workqueue(req_mgr_h->workq);
+#else
+ /* Kill tasklet */
+ tasklet_kill(&req_mgr_h->comptask);
+#endif
+ memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle));
+ kfree(req_mgr_h);
+ drvdata->request_mgr_handle = NULL;
+}
+
+int request_mgr_init(struct ssi_drvdata *drvdata)
+{
+#ifdef CC_CYCLE_COUNT
+ HwDesc_s monitor_desc[2];
+ struct ssi_crypto_req monitor_req = {0};
+#endif
+ struct ssi_request_mgr_handle *req_mgr_h;
+ int rc = 0;
+
+ req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle),GFP_KERNEL);
+ if (req_mgr_h == NULL) {
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+
+ drvdata->request_mgr_handle = req_mgr_h;
+
+ spin_lock_init(&req_mgr_h->hw_lock);
+#ifdef COMP_IN_WQ
+ SSI_LOG_DEBUG("Initializing completion workqueue\n");
+ req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
+ if (unlikely(req_mgr_h->workq == NULL)) {
+ SSI_LOG_ERR("Failed creating work queue\n");
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
+#else
+ SSI_LOG_DEBUG("Initializing completion tasklet\n");
+ tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
+#endif
+ req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base +
+ CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE));
+ SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
+ SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
+ req_mgr_h->max_used_sw_slots = 0;
+
+
+ /* Allocate DMA word for "dummy" completion descriptor use */
+ req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
+ sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
+ if (!req_mgr_h->dummy_comp_buff) {
+ SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
+ "buffer\n", sizeof(uint32_t));
+ rc = -ENOMEM;
+ goto req_mgr_init_err;
+ }
+ SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma,
+ sizeof(uint32_t));
+
+ /* Init. "dummy" completion descriptor */
+ HW_DESC_INIT(&req_mgr_h->compl_desc);
+ HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t));
+ HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc,
+ req_mgr_h->dummy_comp_buff_dma,
+ sizeof(uint32_t), NS_BIT, 1);
+ HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS);
+ HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc);
+
+#ifdef CC_CYCLE_COUNT
+ /* For CC-HW cycle performance trace */
+ INIT_CC_MONITOR_DESC(&req_mgr_h->monitor_desc);
+ set_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock);
+ monitor_desc[0] = req_mgr_h->monitor_desc;
+ monitor_desc[1] = req_mgr_h->monitor_desc;
+
+ rc = send_request(drvdata, &monitor_req, monitor_desc, 2, 0);
+ if (unlikely(rc != 0))
+ goto req_mgr_init_err;
+
+ drvdata->monitor_null_cycles = READ_REGISTER(drvdata->cc_base +
+ CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR));
+ SSI_LOG_ERR("Calibration time=0x%08x\n", drvdata->monitor_null_cycles);
+
+ clear_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock);
+#endif
+
+ return 0;
+
+req_mgr_init_err:
+ request_mgr_fini(drvdata);
+ return rc;
+}
+
+static inline void enqueue_seq(
+ void __iomem *cc_base,
+ HwDesc_s seq[], unsigned int seq_len)
+{
+ int i;
+
+ for (i = 0; i < seq_len; i++) {
+ writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ wmb();
+ writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+#ifdef DX_DUMP_DESCS
+ SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
+ seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]);
+#endif
+ }
+}
+
+/*!
+ * Completion will take place if and only if user requested completion
+ * by setting "is_dout = 0" in send_request().
+ *
+ * \param dev
+ * \param dx_compl_h The completion event to signal
+ */
+static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
+{
+ struct completion *this_compl = dx_compl_h;
+ complete(this_compl);
+}
+
+
+static inline int request_mgr_queues_status_check(
+ struct ssi_request_mgr_handle *req_mgr_h,
+ void __iomem *cc_base,
+ unsigned int total_seq_len)
+{
+ unsigned long poll_queue;
+
+ /* SW queue is checked only once as it will not
+ be chaned during the poll becasue the spinlock_bh
+ is held by the thread */
+ if (unlikely(((req_mgr_h->req_queue_head + 1) &
+ (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail)) {
+ SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ return -EBUSY;
+ }
+
+ if ((likely(req_mgr_h->q_free_slots >= total_seq_len)) ) {
+ return 0;
+ }
+ /* Wait for space in HW queue. Poll constant num of iterations. */
+ for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) {
+ req_mgr_h->q_free_slots =
+ CC_HAL_READ_REGISTER(
+ CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
+ if (unlikely(req_mgr_h->q_free_slots <
+ req_mgr_h->min_free_hw_slots)) {
+ req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
+ }
+
+ if (likely (req_mgr_h->q_free_slots >= total_seq_len)) {
+ /* If there is enough place return */
+ return 0;
+ }
+
+ SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
+ }
+ /* No room in the HW queue try again later */
+ SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
+ "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head,
+ MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots,
+ total_seq_len);
+ return -EAGAIN;
+}
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param ssi_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
+ */
+int send_request(
+ struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
+ HwDesc_s *desc, unsigned int len, bool is_dout)
+{
+ void __iomem *cc_base = drvdata->cc_base;
+ struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int used_sw_slots;
+ unsigned int iv_seq_len = 0;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ HwDesc_s iv_seq[SSI_IVPOOL_SEQ_LEN];
+ int rc;
+ unsigned int max_required_seq_len = (total_seq_len +
+ ((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
+ SSI_IVPOOL_SEQ_LEN ) +
+ ((is_dout == 0 )? 1 : 0));
+ DECL_CYCLE_COUNT_RESOURCES;
+
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
+ if (rc != 0) {
+ SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc);
+ spin_unlock_bh(&req_mgr_h->hw_lock);
+ return rc;
+ }
+#endif
+
+ do {
+ spin_lock_bh(&req_mgr_h->hw_lock);
+
+ /* Check if there is enough place in the SW/HW queues
+ in case iv gen add the max size and in case of no dout add 1
+ for the internal completion descriptor */
+ rc = request_mgr_queues_status_check(req_mgr_h,
+ cc_base,
+ max_required_seq_len);
+ if (likely(rc == 0 ))
+ /* There is enough place in the queue */
+ break;
+ /* something wrong release the spinlock*/
+ spin_unlock_bh(&req_mgr_h->hw_lock);
+
+ if (rc != -EAGAIN) {
+ /* Any error other than HW queue full
+ (SW queue is full) */
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+#endif
+ return rc;
+ }
+
+ /* HW queue is full - short sleep */
+ msleep(1);
+ } while (1);
+
+ /* Additional completion descriptor is needed incase caller did not
+ enabled any DLLI/MLLI DOUT bit in the given sequence */
+ if (!is_dout) {
+ init_completion(&ssi_req->seq_compl);
+ ssi_req->user_cb = request_mgr_complete;
+ ssi_req->user_arg = &(ssi_req->seq_compl);
+ total_seq_len++;
+ }
+
+ if (ssi_req->ivgen_dma_addr_len > 0) {
+ SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses 0x%llX, 0x%llX, 0x%llX, IV-size=%u\n",
+ ssi_req->ivgen_dma_addr_len,
+ (unsigned long long)ssi_req->ivgen_dma_addr[0],
+ (unsigned long long)ssi_req->ivgen_dma_addr[1],
+ (unsigned long long)ssi_req->ivgen_dma_addr[2],
+ ssi_req->ivgen_size);
+
+ /* Acquire IV from pool */
+ rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr, ssi_req->ivgen_dma_addr_len,
+ ssi_req->ivgen_size, iv_seq, &iv_seq_len);
+
+ if (unlikely(rc != 0)) {
+ SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
+ spin_unlock_bh(&req_mgr_h->hw_lock);
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+#endif
+ return rc;
+ }
+
+ total_seq_len += iv_seq_len;
+ }
+
+ used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1));
+ if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) {
+ req_mgr_h->max_used_sw_slots = used_sw_slots;
+ }
+
+ CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc,
+ &req_mgr_h->monitor_lock, &ssi_req->is_monitored_p);
+
+ /* Enqueue request - must be locked with HW lock*/
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
+ START_CYCLE_COUNT_AT(req_mgr_h->req_queue[req_mgr_h->req_queue_head].submit_cycle);
+ req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ /* TODO: Use circ_buf.h ? */
+
+ SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+
+#ifdef FLUSH_CACHE_ALL
+ flush_cache_all();
+#endif
+
+ /* STAT_PHASE_4: Push sequence */
+ START_CYCLE_COUNT();
+ enqueue_seq(cc_base, iv_seq, iv_seq_len);
+ enqueue_seq(cc_base, desc, len);
+ enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
+ END_CYCLE_COUNT(ssi_req->op_type, STAT_PHASE_4);
+
+ CC_CYCLE_DESC_TAIL(cc_base, &req_mgr_h->monitor_desc, ssi_req->is_monitored_p);
+
+ if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
+ /*This means that there was a problem with the resume*/
+ BUG();
+ }
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
+
+ spin_unlock_bh(&req_mgr_h->hw_lock);
+
+ if (!is_dout) {
+ /* Wait upon sequence completion.
+ * Return "0" -Operation done successfully. */
+ return wait_for_completion_interruptible(&ssi_req->seq_compl);
+ } else {
+ /* Operation still in process */
+ return -EINPROGRESS;
+ }
+}
+
+
+/*!
+ * Enqueue caller request to crypto hardware during init process.
+ * assume this function is not called in middle of a flow,
+ * since we set QUEUE_LAST_IND flag in the last descriptor.
+ *
+ * \param drvdata
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ *
+ * \return int Returns "0" upon success
+ */
+int send_request_init(
+ struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len)
+{
+ void __iomem *cc_base = drvdata->cc_base;
+ struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ unsigned int total_seq_len = len; /*initial sequence length*/
+ int rc = 0;
+
+ /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
+ rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
+ if (unlikely(rc != 0 )) {
+ return rc;
+ }
+ HW_DESC_SET_QUEUE_LAST_IND(&desc[len-1]);
+
+ enqueue_seq(cc_base, desc, len);
+
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(
+ CC_REG_OFFSET(CRY_KERNEL,
+ DSCRPTR_QUEUE_CONTENT));
+
+ return 0;
+}
+
+
+void complete_request(struct ssi_drvdata *drvdata)
+{
+ struct ssi_request_mgr_handle *request_mgr_handle =
+ drvdata->request_mgr_handle;
+#ifdef COMP_IN_WQ
+ queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0);
+#else
+ tasklet_schedule(&request_mgr_handle->comptask);
+#endif
+}
+
+#ifdef COMP_IN_WQ
+static void comp_work_handler(struct work_struct *work)
+{
+ struct ssi_drvdata *drvdata =
+ container_of(work, struct ssi_drvdata, compwork.work);
+
+ comp_handler((unsigned long)drvdata);
+}
+#endif
+
+static void proc_completions(struct ssi_drvdata *drvdata)
+{
+ struct ssi_crypto_req *ssi_req;
+ struct platform_device *plat_dev = drvdata->plat_dev;
+ struct ssi_request_mgr_handle * request_mgr_handle =
+ drvdata->request_mgr_handle;
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ int rc = 0;
+#endif
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ while(request_mgr_handle->axi_completed) {
+ request_mgr_handle->axi_completed--;
+
+ /* Dequeue request */
+ if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
+ SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head);
+ BUG();
+ }
+
+ ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
+ END_CYCLE_COUNT_AT(ssi_req->submit_cycle, ssi_req->op_type, STAT_PHASE_5); /* Seq. Comp. */
+ END_CC_MONITOR_COUNT(drvdata->cc_base, ssi_req->op_type, STAT_PHASE_6,
+ drvdata->monitor_null_cycles, &request_mgr_handle->monitor_lock, ssi_req->is_monitored_p);
+
+#ifdef FLUSH_CACHE_ALL
+ flush_cache_all();
+#endif
+
+#ifdef COMPLETION_DELAY
+ /* Delay */
+ {
+ uint32_t axi_err;
+ int i;
+ SSI_LOG_INFO("Delay\n");
+ for (i=0;i<1000000;i++) {
+ axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ }
+ }
+#endif /* COMPLETION_DELAY */
+
+ if (likely(ssi_req->user_cb != NULL)) {
+ START_CYCLE_COUNT();
+ ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
+ END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_3);
+ }
+ request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
+ SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
+ SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+ rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
+ if (rc != 0) {
+ SSI_LOG_ERR("Failed to set runtime suspension %d\n",rc);
+ }
+#endif
+ }
+}
+
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void comp_handler(unsigned long devarg)
+{
+ struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ void __iomem *cc_base = drvdata->cc_base;
+ struct ssi_request_mgr_handle * request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ uint32_t irq;
+
+ DECL_CYCLE_COUNT_RESOURCES;
+
+ START_CYCLE_COUNT();
+
+ irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
+
+ if (irq & SSI_COMP_IRQ_MASK) {
+ /* To avoid the interrupt from firing as we unmask it, we clear it now */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter once more */
+ request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+
+ /* ISR-to-Tasklet latency */
+ if (request_mgr_handle->axi_completed) {
+ /* Only if actually reflects ISR-to-completion-handling latency, i.e.,
+ not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */
+ END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1);
+ }
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0.
+ The following assignment was changed to = (previously was +=) to conform KW restrictions. */
+ request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+ } while (request_mgr_handle->axi_completed > 0);
+
+ /* To avoid the interrupt from firing as we unmask it, we clear it now */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+
+ /* Avoid race with above clear: Test completion counter once more */
+ request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE,
+ CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+ }
+
+ }
+ /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
+ CC_HAL_READ_REGISTER(
+ CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+ END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2);
+}
+
+/*
+resume the queue configuration - no need to take the lock as this happens inside
+the spin lock protection
+*/
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
+{
+ struct ssi_request_mgr_handle * request_mgr_handle = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ request_mgr_handle->is_runtime_suspended = false;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0 ;
+}
+
+/*
+suspend the queue configuration. Since it is used for the runtime suspend
+only verify that the queue can be suspended.
+*/
+int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
+{
+ struct ssi_request_mgr_handle * request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ /* lock the send_request */
+ spin_lock_bh(&request_mgr_handle->hw_lock);
+ if (request_mgr_handle->req_queue_head !=
+ request_mgr_handle->req_queue_tail) {
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+ return -EBUSY;
+ }
+ request_mgr_handle->is_runtime_suspended = true;
+ spin_unlock_bh(&request_mgr_handle->hw_lock);
+
+ return 0;
+}
+
+bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
+{
+ struct ssi_request_mgr_handle * request_mgr_handle =
+ drvdata->request_mgr_handle;
+
+ return request_mgr_handle->is_runtime_suspended;
+}
+
+#endif
+
diff --git a/drivers/staging/ccree/ssi_request_mgr.h b/drivers/staging/ccree/ssi_request_mgr.h
new file mode 100644
index 000000000000..c09339b566d0
--- /dev/null
+++ b/drivers/staging/ccree/ssi_request_mgr.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file request_mgr.h
+ Request Manager
+ */
+
+#ifndef __REQUEST_MGR_H__
+#define __REQUEST_MGR_H__
+
+#include "cc_hw_queue_defs.h"
+
+int request_mgr_init(struct ssi_drvdata *drvdata);
+
+/*!
+ * Enqueue caller request to crypto hardware.
+ *
+ * \param drvdata
+ * \param ssi_req The request to enqueue
+ * \param desc The crypto sequence
+ * \param len The crypto sequence length
+ * \param is_dout If "true": completion is handled by the caller
+ * If "false": this function adds a dummy descriptor completion
+ * and waits upon completion signal.
+ *
+ * \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false"
+ */
+int send_request(
+ struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
+ HwDesc_s *desc, unsigned int len, bool is_dout);
+
+int send_request_init(
+ struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len);
+
+void complete_request(struct ssi_drvdata *drvdata);
+
+void request_mgr_fini(struct ssi_drvdata *drvdata);
+
+#if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP)
+int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
+
+int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
+
+bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata);
+#endif
+
+#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
new file mode 100644
index 000000000000..50066e17d1d3
--- /dev/null
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "ssi_driver.h"
+#include "ssi_sram_mgr.h"
+
+
+/**
+ * struct ssi_sram_mgr_ctx -Internal RAM context manager
+ * @sram_free_offset: the offset to the non-allocated area
+ */
+struct ssi_sram_mgr_ctx {
+ ssi_sram_addr_t sram_free_offset;
+};
+
+
+/**
+ * ssi_sram_mgr_fini() - Cleanup SRAM pool.
+ *
+ * @drvdata: Associated device driver context
+ */
+void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
+{
+ struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+
+ /* Free "this" context */
+ if (smgr_ctx != NULL) {
+ memset(smgr_ctx, 0, sizeof(struct ssi_sram_mgr_ctx));
+ kfree(smgr_ctx);
+ }
+}
+
+/**
+ * ssi_sram_mgr_init() - Initializes SRAM pool.
+ * The pool starts right at the beginning of SRAM.
+ * Returns zero for success, negative value otherwise.
+ *
+ * @drvdata: Associated device driver context
+ */
+int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
+{
+ struct ssi_sram_mgr_ctx *smgr_ctx;
+ int rc;
+
+ /* Allocate "this" context */
+ drvdata->sram_mgr_handle = kzalloc(
+ sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL);
+ if (!drvdata->sram_mgr_handle) {
+ SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n",
+ sizeof(struct ssi_sram_mgr_ctx));
+ rc = -ENOMEM;
+ goto out;
+ }
+ smgr_ctx = drvdata->sram_mgr_handle;
+
+ /* Pool starts at start of SRAM */
+ smgr_ctx->sram_free_offset = 0;
+
+ return 0;
+
+out:
+ ssi_sram_mgr_fini(drvdata);
+ return rc;
+}
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size)
+{
+ struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ ssi_sram_addr_t p;
+
+ if (unlikely((size & 0x3) != 0)) {
+ SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4",
+ size);
+ return NULL_SRAM_ADDR;
+ }
+ if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
+ SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n",
+ size, smgr_ctx->sram_free_offset);
+ return NULL_SRAM_ADDR;
+ }
+
+ p = smgr_ctx->sram_free_offset;
+ smgr_ctx->sram_free_offset += size;
+ SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p);
+ return p;
+}
+
+/**
+ * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void ssi_sram_mgr_const2sram_desc(
+ const uint32_t *src, ssi_sram_addr_t dst,
+ unsigned int nelement,
+ HwDesc_s *seq, unsigned int *seq_len)
+{
+ uint32_t i;
+ unsigned int idx = *seq_len;
+
+ for (i = 0; i < nelement; i++, idx++) {
+ HW_DESC_INIT(&seq[idx]);
+ HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(uint32_t));
+ HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(uint32_t)), sizeof(uint32_t));
+ HW_DESC_SET_FLOW_MODE(&seq[idx], BYPASS);
+ }
+
+ *seq_len = idx;
+}
+
diff --git a/drivers/staging/ccree/ssi_sram_mgr.h b/drivers/staging/ccree/ssi_sram_mgr.h
new file mode 100644
index 000000000000..d71fbaf9ac44
--- /dev/null
+++ b/drivers/staging/ccree/ssi_sram_mgr.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SSI_SRAM_MGR_H__
+#define __SSI_SRAM_MGR_H__
+
+
+#ifndef SSI_CC_SRAM_SIZE
+#define SSI_CC_SRAM_SIZE 4096
+#endif
+
+struct ssi_drvdata;
+
+/**
+ * Address (offset) within CC internal SRAM
+ */
+
+typedef uint64_t ssi_sram_addr_t;
+
+#define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
+
+/*!
+ * Initializes SRAM pool.
+ * The first X bytes of SRAM are reserved for ROM usage, hence, pool
+ * starts right after X bytes.
+ *
+ * \param drvdata
+ *
+ * \return int Zero for success, negative value otherwise.
+ */
+int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
+
+/*!
+ * Uninits SRAM pool.
+ *
+ * \param drvdata
+ */
+void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
+
+/*!
+ * Allocated buffer from SRAM pool.
+ * Note: Caller is responsible to free the LAST allocated buffer.
+ * This function does not taking care of any fragmentation may occur
+ * by the order of calls to alloc/free.
+ *
+ * \param drvdata
+ * \param size The requested bytes to allocate
+ */
+ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size);
+
+/**
+ * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * set values in given array into SRAM.
+ * Note: each const value can't exceed word size.
+ *
+ * @src: A pointer to array of words to set as consts.
+ * @dst: The target SRAM buffer to set into
+ * @nelements: The number of words in "src" array
+ * @seq: A pointer to the given IN/OUT descriptor sequence
+ * @seq_len: A pointer to the given IN/OUT sequence length
+ */
+void ssi_sram_mgr_const2sram_desc(
+ const uint32_t *src, ssi_sram_addr_t dst,
+ unsigned int nelement,
+ HwDesc_s *seq, unsigned int *seq_len);
+
+#endif /*__SSI_SRAM_MGR_H__*/
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
new file mode 100644
index 000000000000..7c514c1072a9
--- /dev/null
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include "ssi_config.h"
+#include "ssi_driver.h"
+#include "cc_crypto_ctx.h"
+#include "ssi_sysfs.h"
+
+#ifdef ENABLE_CC_SYSFS
+
+static struct ssi_drvdata *sys_get_drvdata(void);
+
+#ifdef CC_CYCLE_COUNT
+
+#include <asm/timex.h>
+
+struct stat_item {
+ unsigned int min;
+ unsigned int max;
+ cycles_t sum;
+ unsigned int count;
+};
+
+struct stat_name {
+ const char *op_type_name;
+ const char *stat_phase_name[MAX_STAT_PHASES];
+};
+
+static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] =
+{
+ {
+ /* STAT_OP_TYPE_NULL */
+ .op_type_name = "NULL",
+ .stat_phase_name = {NULL},
+ },
+ {
+ .op_type_name = "Encode",
+ .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
+ .stat_phase_name[STAT_PHASE_1] = "Map buffers",
+ .stat_phase_name[STAT_PHASE_2] = "Create sequence",
+ .stat_phase_name[STAT_PHASE_3] = "Send Request",
+ .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
+ .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
+ .stat_phase_name[STAT_PHASE_6] = "HW cycles",
+ },
+ { .op_type_name = "Decode",
+ .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
+ .stat_phase_name[STAT_PHASE_1] = "Map buffers",
+ .stat_phase_name[STAT_PHASE_2] = "Create sequence",
+ .stat_phase_name[STAT_PHASE_3] = "Send Request",
+ .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
+ .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
+ .stat_phase_name[STAT_PHASE_6] = "HW cycles",
+ },
+ { .op_type_name = "Setkey",
+ .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
+ .stat_phase_name[STAT_PHASE_1] = "Copy key to ctx",
+ .stat_phase_name[STAT_PHASE_2] = "Create sequence",
+ .stat_phase_name[STAT_PHASE_3] = "Send Request",
+ .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
+ .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
+ .stat_phase_name[STAT_PHASE_6] = "HW cycles",
+ },
+ {
+ .op_type_name = "Generic",
+ .stat_phase_name[STAT_PHASE_0] = "Interrupt",
+ .stat_phase_name[STAT_PHASE_1] = "ISR-to-Tasklet",
+ .stat_phase_name[STAT_PHASE_2] = "Tasklet start-to-end",
+ .stat_phase_name[STAT_PHASE_3] = "Tasklet:user_cb()",
+ .stat_phase_name[STAT_PHASE_4] = "Tasklet:dx_X_complete() - w/o X_complete()",
+ .stat_phase_name[STAT_PHASE_5] = "",
+ .stat_phase_name[STAT_PHASE_6] = "HW cycles",
+ }
+};
+
+/*
+ * Structure used to create a directory
+ * and its attributes in sysfs.
+ */
+struct sys_dir {
+ struct kobject *sys_dir_kobj;
+ struct attribute_group sys_dir_attr_group;
+ struct attribute **sys_dir_attr_list;
+ uint32_t num_of_attrs;
+ struct ssi_drvdata *drvdata; /* Associated driver context */
+};
+
+/* top level directory structures */
+struct sys_dir sys_top_dir;
+
+static DEFINE_SPINLOCK(stat_lock);
+
+/* List of DBs */
+static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
+static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
+
+
+static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
+{
+ unsigned int i, j;
+
+ /* Clear db */
+ for (i=0; i<MAX_STAT_OP_TYPES; i++) {
+ for (j=0; j<MAX_STAT_PHASES; j++) {
+ item[i][j].min = 0xFFFFFFFF;
+ item[i][j].max = 0;
+ item[i][j].sum = 0;
+ item[i][j].count = 0;
+ }
+ }
+}
+
+static void update_db(struct stat_item *item, unsigned int result)
+{
+ item->count++;
+ item->sum += result;
+ if (result < item->min)
+ item->min = result;
+ if (result > item->max )
+ item->max = result;
+}
+
+static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
+{
+ unsigned int i, j;
+ uint64_t avg;
+
+ for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
+ for (j=0; j<MAX_STAT_PHASES; j++) {
+ if (item[i][j].count > 0) {
+ avg = (uint64_t)item[i][j].sum;
+ do_div(avg, item[i][j].count);
+ SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
+ stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j],
+ item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count);
+ }
+ }
+ }
+}
+
+
+/**************************************
+ * Attributes show functions section *
+ **************************************/
+
+static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ init_db(stat_host_db);
+ return count;
+}
+
+static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ init_db(stat_cc_db);
+ return count;
+}
+
+static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i, j ;
+ char line[512];
+ uint32_t min_cyc, max_cyc;
+ uint64_t avg;
+ ssize_t buf_len, tmp_len=0;
+
+ buf_len = scnprintf(buf,PAGE_SIZE,
+ "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+ return buf_len;
+ for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
+ for (j=0; j<MAX_STAT_PHASES-1; j++) {
+ if (stat_host_db[i][j].count > 0) {
+ avg = (uint64_t)stat_host_db[i][j].sum;
+ do_div(avg, stat_host_db[i][j].count);
+ min_cyc = stat_host_db[i][j].min;
+ max_cyc = stat_host_db[i][j].max;
+ } else {
+ avg = min_cyc = max_cyc = 0;
+ }
+ tmp_len = scnprintf(line,512,
+ "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name,
+ stat_name_db[i].stat_phase_name[j],
+ min_cyc, (unsigned int)avg, max_cyc,
+ stat_host_db[i][j].count);
+ if ( tmp_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+ return buf_len;
+ if ( buf_len + tmp_len >= PAGE_SIZE)
+ return buf_len;
+ buf_len += tmp_len;
+ strncat(buf, line,512);
+ }
+ }
+ return buf_len;
+}
+
+static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int i;
+ char line[256];
+ uint32_t min_cyc, max_cyc;
+ uint64_t avg;
+ ssize_t buf_len,tmp_len=0;
+
+ buf_len = scnprintf(buf,PAGE_SIZE,
+ "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
+ if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/
+ return buf_len;
+ for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) {
+ if (stat_cc_db[i][STAT_PHASE_6].count > 0) {
+ avg = (uint64_t)stat_cc_db[i][STAT_PHASE_6].sum;
+ do_div(avg, stat_cc_db[i][STAT_PHASE_6].count);
+ min_cyc = stat_cc_db[i][STAT_PHASE_6].min;
+ max_cyc = stat_cc_db[i][STAT_PHASE_6].max;
+ } else {
+ avg = min_cyc = max_cyc = 0;
+ }
+ tmp_len = scnprintf(line,256,
+ "%s\t%6u\t%6u\t%6u\t%7u\n",
+ stat_name_db[i].op_type_name,
+ min_cyc,
+ (unsigned int)avg,
+ max_cyc,
+ stat_cc_db[i][STAT_PHASE_6].count);
+
+ if ( tmp_len < 0 )/* scnprintf shouldn't return negative value according to its implementation*/
+ return buf_len;
+
+ if ( buf_len + tmp_len >= PAGE_SIZE)
+ return buf_len;
+ buf_len += tmp_len;
+ strncat(buf, line,256);
+ }
+ return buf_len;
+}
+
+void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&stat_lock, flags);
+ update_db(&(stat_host_db[op_type][phase]), (unsigned int)result);
+ spin_unlock_irqrestore(&stat_lock, flags);
+}
+
+void update_cc_stat(
+ unsigned int op_type,
+ unsigned int phase,
+ unsigned int elapsed_cycles)
+{
+ update_db(&(stat_cc_db[op_type][phase]), elapsed_cycles);
+}
+
+void display_all_stat_db(void)
+{
+ SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n");
+ display_db(stat_host_db);
+ SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
+ display_db(stat_cc_db);
+}
+#endif /*CC_CYCLE_COUNT*/
+
+
+
+static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct ssi_drvdata *drvdata = sys_get_drvdata();
+ uint32_t register_value;
+ void __iomem* cc_base = drvdata->cc_base;
+ int offset = 0;
+
+ register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
+ register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
+ register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN));
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
+ register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
+ register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT));
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
+ return offset;
+}
+
+static ssize_t ssi_sys_help_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char* help_str[]={
+ "cat reg_dump ", "Print several of CC register values",
+ #if defined CC_CYCLE_COUNT
+ "cat stats_host ", "Print host statistics",
+ "echo <number> > stats_host", "Clear host statistics database",
+ "cat stats_cc ", "Print CC statistics",
+ "echo <number> > stats_cc ", "Clear CC statistics database",
+ #endif
+ };
+ int i=0, offset = 0;
+
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n");
+ for ( i = 0; i < ARRAY_SIZE(help_str); i+=2) {
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i+1]);
+ }
+ return offset;
+}
+
+/********************************************************
+ * SYSFS objects *
+ ********************************************************/
+/*
+ * Structure used to create a directory
+ * and its attributes in sysfs.
+ */
+struct sys_dir {
+ struct kobject *sys_dir_kobj;
+ struct attribute_group sys_dir_attr_group;
+ struct attribute **sys_dir_attr_list;
+ uint32_t num_of_attrs;
+ struct ssi_drvdata *drvdata; /* Associated driver context */
+};
+
+/* top level directory structures */
+static struct sys_dir sys_top_dir;
+
+/* TOP LEVEL ATTRIBUTES */
+static struct kobj_attribute ssi_sys_top_level_attrs[] = {
+ __ATTR(dump_regs, 0444, ssi_sys_regdump_show, NULL),
+ __ATTR(help, 0444, ssi_sys_help_show, NULL),
+#if defined CC_CYCLE_COUNT
+ __ATTR(stats_host, 0664, ssi_sys_stat_host_db_show, ssi_sys_stats_host_db_clear),
+ __ATTR(stats_cc, 0664, ssi_sys_stat_cc_db_show, ssi_sys_stats_cc_db_clear),
+#endif
+
+};
+
+static struct ssi_drvdata *sys_get_drvdata(void)
+{
+ /* TODO: supporting multiple SeP devices would require avoiding
+ * global "top_dir" and finding associated "top_dir" by traversing
+ * up the tree to the kobject which matches one of the top_dir's */
+ return sys_top_dir.drvdata;
+}
+
+static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
+ struct kobject *parent_dir_kobj, const char *dir_name,
+ struct kobj_attribute *attrs, uint32_t num_of_attrs)
+{
+ int i;
+
+ memset(sys_dir, 0, sizeof(struct sys_dir));
+
+ sys_dir->drvdata = drvdata;
+
+ /* initialize directory kobject */
+ sys_dir->sys_dir_kobj =
+ kobject_create_and_add(dir_name, parent_dir_kobj);
+
+ if (!(sys_dir->sys_dir_kobj))
+ return -ENOMEM;
+ /* allocate memory for directory's attributes list */
+ sys_dir->sys_dir_attr_list =
+ kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1),
+ GFP_KERNEL);
+
+ if (!(sys_dir->sys_dir_attr_list)) {
+ kobject_put(sys_dir->sys_dir_kobj);
+ return -ENOMEM;
+ }
+
+ sys_dir->num_of_attrs = num_of_attrs;
+
+ /* initialize attributes list */
+ for (i = 0; i < num_of_attrs; ++i)
+ sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr);
+
+ /* last list entry should be NULL */
+ sys_dir->sys_dir_attr_list[num_of_attrs] = NULL;
+
+ sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list;
+
+ return sysfs_create_group(sys_dir->sys_dir_kobj,
+ &(sys_dir->sys_dir_attr_group));
+}
+
+static void sys_free_dir(struct sys_dir *sys_dir)
+{
+ if (!sys_dir)
+ return;
+
+ kfree(sys_dir->sys_dir_attr_list);
+
+ if (sys_dir->sys_dir_kobj != NULL)
+ kobject_put(sys_dir->sys_dir_kobj);
+}
+
+int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
+{
+ int retval;
+
+#if defined CC_CYCLE_COUNT
+ /* Init. statistics */
+ init_db(stat_host_db);
+ init_db(stat_cc_db);
+#endif
+
+ SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name);
+
+ /* Initialize top directory */
+ retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj,
+ "cc_info", ssi_sys_top_level_attrs,
+ ARRAY_SIZE(ssi_sys_top_level_attrs));
+ return retval;
+}
+
+void ssi_sysfs_fini(void)
+{
+ sys_free_dir(&sys_top_dir);
+}
+
+#endif /*ENABLE_CC_SYSFS*/
+
diff --git a/drivers/staging/ccree/ssi_sysfs.h b/drivers/staging/ccree/ssi_sysfs.h
new file mode 100644
index 000000000000..baeac1d99c07
--- /dev/null
+++ b/drivers/staging/ccree/ssi_sysfs.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* \file ssi_sysfs.h
+ ARM CryptoCell sysfs APIs
+ */
+
+#ifndef __SSI_SYSFS_H__
+#define __SSI_SYSFS_H__
+
+#include <asm/timex.h>
+
+/* forward declaration */
+struct ssi_drvdata;
+
+enum stat_phase {
+ STAT_PHASE_0 = 0,
+ STAT_PHASE_1,
+ STAT_PHASE_2,
+ STAT_PHASE_3,
+ STAT_PHASE_4,
+ STAT_PHASE_5,
+ STAT_PHASE_6,
+ MAX_STAT_PHASES,
+};
+enum stat_op {
+ STAT_OP_TYPE_NULL = 0,
+ STAT_OP_TYPE_ENCODE,
+ STAT_OP_TYPE_DECODE,
+ STAT_OP_TYPE_SETKEY,
+ STAT_OP_TYPE_GENERIC,
+ MAX_STAT_OP_TYPES,
+};
+
+int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata);
+void ssi_sysfs_fini(void);
+void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result);
+void update_cc_stat(unsigned int op_type, unsigned int phase, unsigned int elapsed_cycles);
+void display_all_stat_db(void);
+
+#endif /*__SSI_SYSFS_H__*/
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 942507754cab..7a655ed071a3 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -1,6 +1,5 @@
config COMEDI
tristate "Data acquisition support (comedi)"
- depends on m
---help---
Enable support for a wide range of data acquisition devices
for Linux.
@@ -506,7 +505,6 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
select COMEDI_NI_LABPC
- select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API
---help---
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -1316,6 +1314,9 @@ config COMEDI_NI_LABPC
config COMEDI_NI_LABPC_ISADMA
tristate
+ default COMEDI_NI_LABPC
+ depends on COMEDI_NI_LABPC_ISA != n
+ depends on ISA_DMA_API
select COMEDI_ISADMA
config COMEDI_NI_TIO
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 1e1df89b5018..8e9b30b26810 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -161,6 +161,30 @@ int comedi_buf_map_put(struct comedi_buf_map *bm)
return 1;
}
+/* helper for "access" vm operation */
+int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
+ void *buf, int len, int write)
+{
+ unsigned int pgoff = offset & ~PAGE_MASK;
+ unsigned long pg = offset >> PAGE_SHIFT;
+ int done = 0;
+
+ while (done < len && pg < bm->n_pages) {
+ int l = min_t(int, len - done, PAGE_SIZE - pgoff);
+ void *b = bm->page_list[pg].virt_addr + pgoff;
+
+ if (write)
+ memcpy(b, buf, l);
+ else
+ memcpy(buf, b, l);
+ buf += l;
+ done += l;
+ pg++;
+ pgoff = 0;
+ }
+ return done;
+}
+
/* returns s->async->buf_map and increments its kref refcount */
struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8deac8d9225d..f191c2a75732 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -76,8 +76,8 @@ struct comedi_file {
#define COMEDI_NUM_SUBDEVICE_MINORS \
(COMEDI_NUM_MINORS - COMEDI_NUM_BOARD_MINORS)
-static int comedi_num_legacy_minors;
-module_param(comedi_num_legacy_minors, int, 0444);
+static unsigned short comedi_num_legacy_minors;
+module_param(comedi_num_legacy_minors, ushort, 0444);
MODULE_PARM_DESC(comedi_num_legacy_minors,
"number of comedi minor devices to reserve for non-auto-configured devices (default 0)"
);
@@ -2165,9 +2165,24 @@ static void comedi_vm_close(struct vm_area_struct *area)
comedi_buf_map_put(bm);
}
+static int comedi_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct comedi_buf_map *bm = vma->vm_private_data;
+ unsigned long offset =
+ addr - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT);
+
+ if (len < 0)
+ return -EINVAL;
+ if (len > vma->vm_end - addr)
+ len = vma->vm_end - addr;
+ return comedi_buf_map_access(bm, offset, buf, len, write);
+}
+
static const struct vm_operations_struct comedi_vm_ops = {
.open = comedi_vm_open,
.close = comedi_vm_close,
+ .access = comedi_vm_access,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
@@ -2857,8 +2872,7 @@ static int __init comedi_init(void)
pr_info("version " COMEDI_RELEASE " - http://www.comedi.org\n");
- if (comedi_num_legacy_minors < 0 ||
- comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) {
+ if (comedi_num_legacy_minors > COMEDI_NUM_BOARD_MINORS) {
pr_err("invalid value for module parameter \"comedi_num_legacy_minors\". Valid values are 0 through %i.\n",
COMEDI_NUM_BOARD_MINORS);
return -EINVAL;
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index 534415e331b6..6246f4a78ca6 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -29,6 +29,8 @@ void comedi_buf_reset(struct comedi_subdevice *s);
bool comedi_buf_is_mmapped(struct comedi_subdevice *s);
void comedi_buf_map_get(struct comedi_buf_map *bm);
int comedi_buf_map_put(struct comedi_buf_map *bm);
+int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
+ void *buf, int len, int write);
struct comedi_buf_map *comedi_buf_map_from_subdev_get(
struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index b6af3eba91fd..be1f6133ff1c 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -502,7 +502,7 @@ static int apci3xxx_ai_ns_to_timer(struct comedi_device *dev,
timer = *ns / base;
break;
case CMDF_ROUND_UP:
- timer = (*ns + base - 1) / base;
+ timer = DIV_ROUND_UP(*ns, base);
break;
}
@@ -787,6 +787,8 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
dev->iobase = pci_resource_start(pcidev, 2);
dev->mmio = pci_ioremap_bar(pcidev, 3);
+ if (!dev->mmio)
+ return -ENOMEM;
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci3xxx_irq_handler,
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 2e6decf1b69d..4e554944bc71 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -206,8 +206,11 @@
#define CLK_1KHZ 5 /* internal 1 kHz clock */
#define CLK_OUTNM1 6 /* output of channel-1 modulo total */
#define CLK_EXT 7 /* external clock */
-/* Macro to construct clock input configuration register value. */
-#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static unsigned int pci224_clk_config(unsigned int chan, unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Counter/timer gate input configuration sources.
@@ -216,8 +219,11 @@
#define GAT_GND 1 /* GND (i.e. disabled) */
#define GAT_EXT 2 /* reserved (external gate input) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static unsigned int pci224_gat_config(unsigned int chan, unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI224 and PCI234:
@@ -817,14 +823,16 @@ static void pci224_ao_start_pacer(struct comedi_device *dev,
* source.
*/
/* Make sure Z2-0 is gated on. */
- outb(GAT_CONFIG(0, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
+ outb(pci224_gat_config(0, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
/* Cascading with Z2-2. */
/* Make sure Z2-2 is gated on. */
- outb(GAT_CONFIG(2, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
+ outb(pci224_gat_config(2, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE);
/* Z2-2 needs 10 MHz clock. */
- outb(CLK_CONFIG(2, CLK_10MHZ), devpriv->iobase1 + PCI224_ZCLK_SCE);
+ outb(pci224_clk_config(2, CLK_10MHZ),
+ devpriv->iobase1 + PCI224_ZCLK_SCE);
/* Z2-0 is clocked from Z2-2's output. */
- outb(CLK_CONFIG(0, CLK_OUTNM1), devpriv->iobase1 + PCI224_ZCLK_SCE);
+ outb(pci224_clk_config(0, CLK_OUTNM1),
+ devpriv->iobase1 + PCI224_ZCLK_SCE);
comedi_8254_pacer_enable(dev->pacer, 2, 0, false);
}
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 42945de31fe2..48c7890c3007 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -376,8 +376,11 @@
#define CLK_1KHZ 5 /* internal 1 kHz clock */
#define CLK_OUTNM1 6 /* output of channel-1 modulo total */
#define CLK_EXT 7 /* external clock */
-/* Macro to construct clock input configuration register value. */
-#define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static unsigned int pci230_clk_config(unsigned int chan, unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Counter/timer gate input configuration sources.
@@ -387,8 +390,7 @@
#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-static inline unsigned int pci230_gat_config(unsigned int chan,
- unsigned int src)
+static unsigned int pci230_gat_config(unsigned int chan, unsigned int src)
{
return ((chan & 3) << 3) | (src & 7);
}
@@ -698,7 +700,7 @@ static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
/* Determine clock source and count. */
clk_src = pci230_choose_clk_count(ns, &count, flags);
/* Program clock source. */
- outb(CLK_CONFIG(ct, clk_src), dev->iobase + PCI230_ZCLK_SCE);
+ outb(pci230_clk_config(ct, clk_src), dev->iobase + PCI230_ZCLK_SCE);
/* Set initial count. */
if (count >= 65536)
count = 0;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index efbf27730d71..b761f000c1dc 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1417,9 +1417,7 @@ static int set_ai_fifo_size(struct comedi_device *dev, unsigned int num_samples)
if (retval < 0)
return retval;
- num_samples = retval * fifo->num_segments * fifo->sample_packing_ratio;
-
- return num_samples;
+ return retval * fifo->num_segments * fifo->sample_packing_ratio;
}
/* query length of fifo */
@@ -2007,7 +2005,7 @@ static unsigned int get_divisor(unsigned int ns, unsigned int flags)
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_UP:
- divisor = (ns + TIMER_BASE - 1) / TIMER_BASE;
+ divisor = DIV_ROUND_UP(ns, TIMER_BASE);
break;
case CMDF_ROUND_DOWN:
divisor = ns / TIMER_BASE;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 2a063f07fe7b..ccfd642998be 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -480,11 +480,11 @@ static void waveform_ao_timer(unsigned long arg)
/* output the last scan */
for (i = 0; i < cmd->scan_end_arg; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned short *pd;
- if (comedi_buf_read_samples(s,
- &devpriv->
- ao_loopbacks[chan],
- 1) == 0) {
+ pd = &devpriv->ao_loopbacks[chan];
+
+ if (!comedi_buf_read_samples(s, pd, 1)) {
/* unexpected underrun! (cancelled?) */
async->events |= COMEDI_CB_OVERFLOW;
goto underrun;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 70390de66e0e..f1c2a20a7d4d 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -95,27 +95,30 @@ struct jr3_pci_poll_delay {
};
struct jr3_pci_dev_private {
- struct jr3_t __iomem *iobase;
struct timer_list timer;
};
+union jr3_pci_single_range {
+ struct comedi_lrange l;
+ char _reserved[offsetof(struct comedi_lrange, range[1])];
+};
+
+enum jr3_pci_poll_state {
+ state_jr3_poll,
+ state_jr3_init_wait_for_offset,
+ state_jr3_init_transform_complete,
+ state_jr3_init_set_full_scale_complete,
+ state_jr3_init_use_offset_complete,
+ state_jr3_done
+};
+
struct jr3_pci_subdev_private {
- struct jr3_channel __iomem *channel;
+ struct jr3_sensor __iomem *sensor;
unsigned long next_time_min;
- unsigned long next_time_max;
- enum { state_jr3_poll,
- state_jr3_init_wait_for_offset,
- state_jr3_init_transform_complete,
- state_jr3_init_set_full_scale_complete,
- state_jr3_init_use_offset_complete,
- state_jr3_done
- } state;
+ enum jr3_pci_poll_state state;
int serial_no;
int model_no;
- struct {
- int length;
- struct comedi_krange range;
- } range[9];
+ union jr3_pci_single_range range[9];
const struct comedi_lrange *range_table_list[8 * 7 + 2];
unsigned int maxdata_list[8 * 7 + 2];
u16 errors;
@@ -131,43 +134,43 @@ static struct jr3_pci_poll_delay poll_delay_min_max(int min, int max)
return result;
}
-static int is_complete(struct jr3_channel __iomem *channel)
+static int is_complete(struct jr3_sensor __iomem *sensor)
{
- return get_s16(&channel->command_word0) == 0;
+ return get_s16(&sensor->command_word0) == 0;
}
-static void set_transforms(struct jr3_channel __iomem *channel,
- struct jr3_pci_transform transf, short num)
+static void set_transforms(struct jr3_sensor __iomem *sensor,
+ const struct jr3_pci_transform *transf, short num)
{
int i;
num &= 0x000f; /* Make sure that 0 <= num <= 15 */
for (i = 0; i < 8; i++) {
- set_u16(&channel->transforms[num].link[i].link_type,
- transf.link[i].link_type);
+ set_u16(&sensor->transforms[num].link[i].link_type,
+ transf->link[i].link_type);
udelay(1);
- set_s16(&channel->transforms[num].link[i].link_amount,
- transf.link[i].link_amount);
+ set_s16(&sensor->transforms[num].link[i].link_amount,
+ transf->link[i].link_amount);
udelay(1);
- if (transf.link[i].link_type == end_x_form)
+ if (transf->link[i].link_type == end_x_form)
break;
}
}
-static void use_transform(struct jr3_channel __iomem *channel,
+static void use_transform(struct jr3_sensor __iomem *sensor,
short transf_num)
{
- set_s16(&channel->command_word0, 0x0500 + (transf_num & 0x000f));
+ set_s16(&sensor->command_word0, 0x0500 + (transf_num & 0x000f));
}
-static void use_offset(struct jr3_channel __iomem *channel, short offset_num)
+static void use_offset(struct jr3_sensor __iomem *sensor, short offset_num)
{
- set_s16(&channel->command_word0, 0x0600 + (offset_num & 0x000f));
+ set_s16(&sensor->command_word0, 0x0600 + (offset_num & 0x000f));
}
-static void set_offset(struct jr3_channel __iomem *channel)
+static void set_offset(struct jr3_sensor __iomem *sensor)
{
- set_s16(&channel->command_word0, 0x0700);
+ set_s16(&sensor->command_word0, 0x0700);
}
struct six_axis_t {
@@ -179,43 +182,41 @@ struct six_axis_t {
s16 mz;
};
-static void set_full_scales(struct jr3_channel __iomem *channel,
+static void set_full_scales(struct jr3_sensor __iomem *sensor,
struct six_axis_t full_scale)
{
- set_s16(&channel->full_scale.fx, full_scale.fx);
- set_s16(&channel->full_scale.fy, full_scale.fy);
- set_s16(&channel->full_scale.fz, full_scale.fz);
- set_s16(&channel->full_scale.mx, full_scale.mx);
- set_s16(&channel->full_scale.my, full_scale.my);
- set_s16(&channel->full_scale.mz, full_scale.mz);
- set_s16(&channel->command_word0, 0x0a00);
+ set_s16(&sensor->full_scale.fx, full_scale.fx);
+ set_s16(&sensor->full_scale.fy, full_scale.fy);
+ set_s16(&sensor->full_scale.fz, full_scale.fz);
+ set_s16(&sensor->full_scale.mx, full_scale.mx);
+ set_s16(&sensor->full_scale.my, full_scale.my);
+ set_s16(&sensor->full_scale.mz, full_scale.mz);
+ set_s16(&sensor->command_word0, 0x0a00);
}
-static struct six_axis_t get_min_full_scales(struct jr3_channel __iomem
- *channel)
+static struct six_axis_t get_min_full_scales(struct jr3_sensor __iomem *sensor)
{
struct six_axis_t result;
- result.fx = get_s16(&channel->min_full_scale.fx);
- result.fy = get_s16(&channel->min_full_scale.fy);
- result.fz = get_s16(&channel->min_full_scale.fz);
- result.mx = get_s16(&channel->min_full_scale.mx);
- result.my = get_s16(&channel->min_full_scale.my);
- result.mz = get_s16(&channel->min_full_scale.mz);
+ result.fx = get_s16(&sensor->min_full_scale.fx);
+ result.fy = get_s16(&sensor->min_full_scale.fy);
+ result.fz = get_s16(&sensor->min_full_scale.fz);
+ result.mx = get_s16(&sensor->min_full_scale.mx);
+ result.my = get_s16(&sensor->min_full_scale.my);
+ result.mz = get_s16(&sensor->min_full_scale.mz);
return result;
}
-static struct six_axis_t get_max_full_scales(struct jr3_channel __iomem
- *channel)
+static struct six_axis_t get_max_full_scales(struct jr3_sensor __iomem *sensor)
{
struct six_axis_t result;
- result.fx = get_s16(&channel->max_full_scale.fx);
- result.fy = get_s16(&channel->max_full_scale.fy);
- result.fz = get_s16(&channel->max_full_scale.fz);
- result.mx = get_s16(&channel->max_full_scale.mx);
- result.my = get_s16(&channel->max_full_scale.my);
- result.mz = get_s16(&channel->max_full_scale.mz);
+ result.fx = get_s16(&sensor->max_full_scale.fx);
+ result.fy = get_s16(&sensor->max_full_scale.fy);
+ result.fz = get_s16(&sensor->max_full_scale.fz);
+ result.mx = get_s16(&sensor->max_full_scale.mx);
+ result.my = get_s16(&sensor->max_full_scale.my);
+ result.mz = get_s16(&sensor->max_full_scale.mz);
return result;
}
@@ -235,35 +236,35 @@ static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
switch (axis) {
case 0:
- val = get_s16(&spriv->channel->filter[filter].fx);
+ val = get_s16(&spriv->sensor->filter[filter].fx);
break;
case 1:
- val = get_s16(&spriv->channel->filter[filter].fy);
+ val = get_s16(&spriv->sensor->filter[filter].fy);
break;
case 2:
- val = get_s16(&spriv->channel->filter[filter].fz);
+ val = get_s16(&spriv->sensor->filter[filter].fz);
break;
case 3:
- val = get_s16(&spriv->channel->filter[filter].mx);
+ val = get_s16(&spriv->sensor->filter[filter].mx);
break;
case 4:
- val = get_s16(&spriv->channel->filter[filter].my);
+ val = get_s16(&spriv->sensor->filter[filter].my);
break;
case 5:
- val = get_s16(&spriv->channel->filter[filter].mz);
+ val = get_s16(&spriv->sensor->filter[filter].mz);
break;
case 6:
- val = get_s16(&spriv->channel->filter[filter].v1);
+ val = get_s16(&spriv->sensor->filter[filter].v1);
break;
case 7:
- val = get_s16(&spriv->channel->filter[filter].v2);
+ val = get_s16(&spriv->sensor->filter[filter].v2);
break;
}
val += 0x4000;
} else if (chan == 56) {
- val = get_u16(&spriv->channel->model_no);
+ val = get_u16(&spriv->sensor->model_no);
} else if (chan == 57) {
- val = get_u16(&spriv->channel->serial_no);
+ val = get_u16(&spriv->sensor->serial_no);
}
return val;
@@ -279,10 +280,7 @@ static int jr3_pci_ai_insn_read(struct comedi_device *dev,
u16 errors;
int i;
- if (!spriv)
- return -EINVAL;
-
- errors = get_u16(&spriv->channel->errors);
+ errors = get_u16(&spriv->sensor->errors);
if (spriv->state != state_jr3_done ||
(errors & (watch_dog | watch_dog2 | sensor_change))) {
/* No sensor or sensor changed */
@@ -309,9 +307,8 @@ static int jr3_pci_open(struct comedi_device *dev)
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
spriv = s->private;
- if (spriv)
- dev_dbg(dev->class_dev, "serial: %p %d (%d)\n",
- spriv, spriv->serial_no, s->index);
+ dev_dbg(dev->class_dev, "serial[%d]: %d\n", s->index,
+ spriv->serial_no);
}
return 0;
}
@@ -375,8 +372,7 @@ static int jr3_check_firmware(struct comedi_device *dev,
static void jr3_write_firmware(struct comedi_device *dev,
int subdev, const u8 *data, size_t size)
{
- struct jr3_pci_dev_private *devpriv = dev->private;
- struct jr3_t __iomem *iobase = devpriv->iobase;
+ struct jr3_block __iomem *block = dev->mmio;
u32 __iomem *lo;
u32 __iomem *hi;
int more = 1;
@@ -409,8 +405,8 @@ static void jr3_write_firmware(struct comedi_device *dev,
unsigned int data1 = 0;
unsigned int data2 = 0;
- lo = &iobase->channel[subdev].program_lo[addr];
- hi = &iobase->channel[subdev].program_hi[addr];
+ lo = &block[subdev].program_lo[addr];
+ hi = &block[subdev].program_hi[addr];
more = more &&
read_idm_word(data, size, &pos, &data1);
@@ -453,17 +449,14 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
{
struct jr3_pci_subdev_private *spriv = s->private;
struct jr3_pci_poll_delay result = poll_delay_min_max(1000, 2000);
- struct jr3_channel __iomem *channel;
+ struct jr3_sensor __iomem *sensor;
u16 model_no;
u16 serial_no;
int errors;
int i;
- if (!spriv)
- return result;
-
- channel = spriv->channel;
- errors = get_u16(&channel->errors);
+ sensor = spriv->sensor;
+ errors = get_u16(&sensor->errors);
if (errors != spriv->errors)
spriv->errors = errors;
@@ -474,8 +467,8 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
switch (spriv->state) {
case state_jr3_poll:
- model_no = get_u16(&channel->model_no);
- serial_no = get_u16(&channel->serial_no);
+ model_no = get_u16(&sensor->model_no);
+ serial_no = get_u16(&sensor->serial_no);
if ((errors & (watch_dog | watch_dog2)) ||
model_no == 0 || serial_no == 0) {
@@ -499,8 +492,8 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
} else {
struct jr3_pci_transform transf;
- spriv->model_no = get_u16(&channel->model_no);
- spriv->serial_no = get_u16(&channel->serial_no);
+ spriv->model_no = get_u16(&sensor->model_no);
+ spriv->serial_no = get_u16(&sensor->serial_no);
/* Transformation all zeros */
for (i = 0; i < ARRAY_SIZE(transf.link); i++) {
@@ -508,24 +501,24 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
transf.link[i].link_amount = 0;
}
- set_transforms(channel, transf, 0);
- use_transform(channel, 0);
+ set_transforms(sensor, &transf, 0);
+ use_transform(sensor, 0);
spriv->state = state_jr3_init_transform_complete;
/* Allow 20 ms for completion */
result = poll_delay_min_max(20, 100);
}
break;
case state_jr3_init_transform_complete:
- if (!is_complete(channel)) {
+ if (!is_complete(sensor)) {
result = poll_delay_min_max(20, 100);
} else {
/* Set full scale */
struct six_axis_t min_full_scale;
struct six_axis_t max_full_scale;
- min_full_scale = get_min_full_scales(channel);
- max_full_scale = get_max_full_scales(channel);
- set_full_scales(channel, max_full_scale);
+ min_full_scale = get_min_full_scales(sensor);
+ max_full_scale = get_max_full_scales(sensor);
+ set_full_scales(sensor, max_full_scale);
spriv->state = state_jr3_init_set_full_scale_complete;
/* Allow 20 ms for completion */
@@ -533,50 +526,51 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
}
break;
case state_jr3_init_set_full_scale_complete:
- if (!is_complete(channel)) {
+ if (!is_complete(sensor)) {
result = poll_delay_min_max(20, 100);
} else {
- struct force_array __iomem *fs = &channel->full_scale;
+ struct force_array __iomem *fs = &sensor->full_scale;
+ union jr3_pci_single_range *r = spriv->range;
/* Use ranges in kN or we will overflow around 2000N! */
- spriv->range[0].range.min = -get_s16(&fs->fx) * 1000;
- spriv->range[0].range.max = get_s16(&fs->fx) * 1000;
- spriv->range[1].range.min = -get_s16(&fs->fy) * 1000;
- spriv->range[1].range.max = get_s16(&fs->fy) * 1000;
- spriv->range[2].range.min = -get_s16(&fs->fz) * 1000;
- spriv->range[2].range.max = get_s16(&fs->fz) * 1000;
- spriv->range[3].range.min = -get_s16(&fs->mx) * 100;
- spriv->range[3].range.max = get_s16(&fs->mx) * 100;
- spriv->range[4].range.min = -get_s16(&fs->my) * 100;
- spriv->range[4].range.max = get_s16(&fs->my) * 100;
- spriv->range[5].range.min = -get_s16(&fs->mz) * 100;
+ r[0].l.range[0].min = -get_s16(&fs->fx) * 1000;
+ r[0].l.range[0].max = get_s16(&fs->fx) * 1000;
+ r[1].l.range[0].min = -get_s16(&fs->fy) * 1000;
+ r[1].l.range[0].max = get_s16(&fs->fy) * 1000;
+ r[2].l.range[0].min = -get_s16(&fs->fz) * 1000;
+ r[2].l.range[0].max = get_s16(&fs->fz) * 1000;
+ r[3].l.range[0].min = -get_s16(&fs->mx) * 100;
+ r[3].l.range[0].max = get_s16(&fs->mx) * 100;
+ r[4].l.range[0].min = -get_s16(&fs->my) * 100;
+ r[4].l.range[0].max = get_s16(&fs->my) * 100;
+ r[5].l.range[0].min = -get_s16(&fs->mz) * 100;
/* the next five are questionable */
- spriv->range[5].range.max = get_s16(&fs->mz) * 100;
- spriv->range[6].range.min = -get_s16(&fs->v1) * 100;
- spriv->range[6].range.max = get_s16(&fs->v1) * 100;
- spriv->range[7].range.min = -get_s16(&fs->v2) * 100;
- spriv->range[7].range.max = get_s16(&fs->v2) * 100;
- spriv->range[8].range.min = 0;
- spriv->range[8].range.max = 65535;
-
- use_offset(channel, 0);
+ r[5].l.range[0].max = get_s16(&fs->mz) * 100;
+ r[6].l.range[0].min = -get_s16(&fs->v1) * 100;
+ r[6].l.range[0].max = get_s16(&fs->v1) * 100;
+ r[7].l.range[0].min = -get_s16(&fs->v2) * 100;
+ r[7].l.range[0].max = get_s16(&fs->v2) * 100;
+ r[8].l.range[0].min = 0;
+ r[8].l.range[0].max = 65535;
+
+ use_offset(sensor, 0);
spriv->state = state_jr3_init_use_offset_complete;
/* Allow 40 ms for completion */
result = poll_delay_min_max(40, 100);
}
break;
case state_jr3_init_use_offset_complete:
- if (!is_complete(channel)) {
+ if (!is_complete(sensor)) {
result = poll_delay_min_max(20, 100);
} else {
- set_s16(&channel->offsets.fx, 0);
- set_s16(&channel->offsets.fy, 0);
- set_s16(&channel->offsets.fz, 0);
- set_s16(&channel->offsets.mx, 0);
- set_s16(&channel->offsets.my, 0);
- set_s16(&channel->offsets.mz, 0);
+ set_s16(&sensor->offsets.fx, 0);
+ set_s16(&sensor->offsets.fy, 0);
+ set_s16(&sensor->offsets.fz, 0);
+ set_s16(&sensor->offsets.mx, 0);
+ set_s16(&sensor->offsets.my, 0);
+ set_s16(&sensor->offsets.mz, 0);
- set_offset(channel);
+ set_offset(sensor);
spriv->state = state_jr3_done;
}
@@ -606,25 +600,23 @@ static void jr3_pci_poll_dev(unsigned long data)
delay = 1000;
now = jiffies;
- /* Poll all channels that are ready to be polled */
+ /* Poll all sensors that are ready to be polled */
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
spriv = s->private;
- if (now > spriv->next_time_min) {
+ if (time_after_eq(now, spriv->next_time_min)) {
struct jr3_pci_poll_delay sub_delay;
sub_delay = jr3_pci_poll_subdevice(s);
spriv->next_time_min = jiffies +
msecs_to_jiffies(sub_delay.min);
- spriv->next_time_max = jiffies +
- msecs_to_jiffies(sub_delay.max);
if (sub_delay.max && sub_delay.max < delay)
/*
* Wake up as late as possible ->
- * poll as many channels as possible at once.
+ * poll as many sensors as possible at once.
*/
delay = sub_delay.max;
}
@@ -638,7 +630,7 @@ static void jr3_pci_poll_dev(unsigned long data)
static struct jr3_pci_subdev_private *
jr3_pci_alloc_spriv(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_block __iomem *block = dev->mmio;
struct jr3_pci_subdev_private *spriv;
int j;
int k;
@@ -647,36 +639,43 @@ jr3_pci_alloc_spriv(struct comedi_device *dev, struct comedi_subdevice *s)
if (!spriv)
return NULL;
- spriv->channel = &devpriv->iobase->channel[s->index].data;
+ spriv->sensor = &block[s->index].sensor;
for (j = 0; j < 8; j++) {
- spriv->range[j].length = 1;
- spriv->range[j].range.min = -1000000;
- spriv->range[j].range.max = 1000000;
+ spriv->range[j].l.length = 1;
+ spriv->range[j].l.range[0].min = -1000000;
+ spriv->range[j].l.range[0].max = 1000000;
for (k = 0; k < 7; k++) {
- spriv->range_table_list[j + k * 8] =
- (struct comedi_lrange *)&spriv->range[j];
+ spriv->range_table_list[j + k * 8] = &spriv->range[j].l;
spriv->maxdata_list[j + k * 8] = 0x7fff;
}
}
- spriv->range[8].length = 1;
- spriv->range[8].range.min = 0;
- spriv->range[8].range.max = 65536;
+ spriv->range[8].l.length = 1;
+ spriv->range[8].l.range[0].min = 0;
+ spriv->range[8].l.range[0].max = 65535;
- spriv->range_table_list[56] = (struct comedi_lrange *)&spriv->range[8];
- spriv->range_table_list[57] = (struct comedi_lrange *)&spriv->range[8];
+ spriv->range_table_list[56] = &spriv->range[8].l;
+ spriv->range_table_list[57] = &spriv->range[8].l;
spriv->maxdata_list[56] = 0xffff;
spriv->maxdata_list[57] = 0xffff;
- dev_dbg(dev->class_dev, "p->channel %p %p (%tx)\n",
- spriv->channel, devpriv->iobase,
- ((char __iomem *)spriv->channel -
- (char __iomem *)devpriv->iobase));
-
return spriv;
}
+static void jr3_pci_show_copyright(struct comedi_device *dev)
+{
+ struct jr3_block __iomem *block = dev->mmio;
+ struct jr3_sensor __iomem *sensor0 = &block[0].sensor;
+ char copy[ARRAY_SIZE(sensor0->copyright) + 1];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sensor0->copyright); i++)
+ copy[i] = (char)(get_u16(&sensor0->copyright[i]) >> 8);
+ copy[i] = '\0';
+ dev_dbg(dev->class_dev, "Firmware copyright: %s\n", copy);
+}
+
static int jr3_pci_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -684,16 +683,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
static const struct jr3_pci_board *board;
struct jr3_pci_dev_private *devpriv;
struct jr3_pci_subdev_private *spriv;
+ struct jr3_block __iomem *block;
struct comedi_subdevice *s;
int ret;
int i;
- if (sizeof(struct jr3_channel) != 0xc00) {
- dev_err(dev->class_dev,
- "sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned int)sizeof(struct jr3_channel), 0xc00);
- return -EINVAL;
- }
+ BUILD_BUG_ON(sizeof(struct jr3_block) != 0x80000);
if (context < ARRAY_SIZE(jr3_pci_boards))
board = &jr3_pci_boards[context];
@@ -710,10 +705,15 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv->iobase = pci_ioremap_bar(pcidev, 0);
- if (!devpriv->iobase)
+ if (pci_resource_len(pcidev, 0) < board->n_subdevs * sizeof(*block))
+ return -ENXIO;
+
+ dev->mmio = pci_ioremap_bar(pcidev, 0);
+ if (!dev->mmio)
return -ENOMEM;
+ block = dev->mmio;
+
ret = comedi_alloc_subdevices(dev, board->n_subdevs);
if (ret)
return ret;
@@ -727,15 +727,17 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
s->insn_read = jr3_pci_ai_insn_read;
spriv = jr3_pci_alloc_spriv(dev, s);
- if (spriv) {
- /* Channel specific range and maxdata */
- s->range_table_list = spriv->range_table_list;
- s->maxdata_list = spriv->maxdata_list;
- }
+ if (!spriv)
+ return -ENOMEM;
+
+ /* Channel specific range and maxdata */
+ s->range_table_list = spriv->range_table_list;
+ s->maxdata_list = spriv->maxdata_list;
}
/* Reset DSP card */
- writel(0, &devpriv->iobase->channel[0].reset);
+ for (i = 0; i < dev->n_subdevices; i++)
+ writel(0, &block[i].reset);
ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
"comedi/jr3pci.idm",
@@ -758,11 +760,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
* can read firmware version
*/
msleep_interruptible(25);
- for (i = 0; i < 0x18; i++) {
- dev_dbg(dev->class_dev, "%c\n",
- get_u16(&devpriv->iobase->channel[0].
- data.copyright[i]) >> 8);
- }
+ jr3_pci_show_copyright(dev);
/* Start card timer */
for (i = 0; i < dev->n_subdevices; i++) {
@@ -770,7 +768,6 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
spriv = s->private;
spriv->next_time_min = jiffies + msecs_to_jiffies(500);
- spriv->next_time_max = jiffies + msecs_to_jiffies(2000);
}
setup_timer(&devpriv->timer, jr3_pci_poll_dev, (unsigned long)dev);
@@ -784,13 +781,10 @@ static void jr3_pci_detach(struct comedi_device *dev)
{
struct jr3_pci_dev_private *devpriv = dev->private;
- if (devpriv) {
+ if (devpriv)
del_timer_sync(&devpriv->timer);
- if (devpriv->iobase)
- iounmap(devpriv->iobase);
- }
- comedi_pci_disable(dev);
+ comedi_pci_detach(dev);
}
static struct comedi_driver jr3_pci_driver = {
@@ -825,6 +819,6 @@ static struct pci_driver jr3_pci_pci_driver = {
module_comedi_pci_driver(jr3_pci_driver, jr3_pci_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for JR3/PCI force sensor board");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("comedi/jr3pci.idm");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index f10a84fb6c14..28ff0c2aa3b8 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -280,7 +280,7 @@ struct intern_transform {
* and hardware manuals.
*/
-struct jr3_channel {
+struct jr3_sensor {
/*
* Raw_channels is the area used to store the raw data coming from
* the sensor.
@@ -724,13 +724,11 @@ struct jr3_channel {
struct intern_transform transforms[0x10]; /* offset 0x0200 */
};
-struct jr3_t {
- struct {
- u32 program_lo[0x4000]; /* 0x00000 - 0x10000 */
- struct jr3_channel data; /* 0x10000 - 0x10c00 */
- char pad2[0x30000 - 0x00c00]; /* 0x10c00 - 0x40000 */
- u32 program_hi[0x8000]; /* 0x40000 - 0x60000 */
- u32 reset; /* 0x60000 - 0x60004 */
- char pad3[0x20000 - 0x00004]; /* 0x60004 - 0x80000 */
- } channel[4];
+struct jr3_block {
+ u32 program_lo[0x4000]; /* 0x00000 - 0x10000 */
+ struct jr3_sensor sensor; /* 0x10000 - 0x10c00 */
+ char pad2[0x30000 - 0x00c00]; /* 0x10c00 - 0x40000 */
+ u32 program_hi[0x8000]; /* 0x40000 - 0x60000 */
+ u32 reset; /* 0x60000 - 0x60004 */
+ char pad3[0x20000 - 0x00004]; /* 0x60004 - 0x80000 */
};
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index ffcf7afce684..2d62a8c57332 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -216,7 +216,7 @@ static const int ni_irqpin[] = {
#include "ni_mio_common.c"
-static struct pnp_device_id device_ids[] = {
+static const struct pnp_device_id device_ids[] = {
{.id = "NIC1900", .driver_data = 0},
{.id = "NIC2400", .driver_data = 0},
{.id = "NIC2500", .driver_data = 0},
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 5036eebb9162..9a0a96329a55 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -45,7 +45,7 @@
* byte 3 is the total packet length
*
* byte 4 is always 00
- * byte 5 is is the total packet length - 4
+ * byte 5 is the total packet length - 4
* byte 6 is always 01
* byte 7 is the command
*
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 97939b42cc00..4b9c22665748 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -74,26 +74,34 @@ struct s626_buffer_dma {
void *logical_base;
};
+/**
+ * struct s626_private - Working data for s626 driver.
+ * @ai_cmd_running: non-zero if ai_cmd is running.
+ * @ai_sample_timer: time between samples in units of the timer.
+ * @ai_convert_count: conversion counter.
+ * @ai_convert_timer: time between conversion in units of the timer.
+ * @counter_int_enabs: counter interrupt enable mask for MISC2 register.
+ * @adc_items: number of items in ADC poll list.
+ * @rps_buf: DMA buffer used to hold ADC (RPS1) program.
+ * @ana_buf: DMA buffer used to receive ADC data and hold DAC data.
+ * @dac_wbuf: pointer to logical adrs of DMA buffer used to hold DAC data.
+ * @dacpol: image of DAC polarity register.
+ * @trim_setpoint: images of TrimDAC setpoints.
+ * @i2c_adrs: I2C device address for onboard EEPROM (board rev dependent)
+ */
struct s626_private {
- u8 ai_cmd_running; /* ai_cmd is running */
- unsigned int ai_sample_timer; /* time between samples in
- * units of the timer */
- int ai_convert_count; /* conversion counter */
- unsigned int ai_convert_timer; /* time between conversion in
- * units of the timer */
- u16 counter_int_enabs; /* counter interrupt enable mask
- * for MISC2 register */
- u8 adc_items; /* number of items in ADC poll list */
- struct s626_buffer_dma rps_buf; /* DMA buffer used to hold ADC (RPS1)
- * program */
- struct s626_buffer_dma ana_buf; /* DMA buffer used to receive ADC data
- * and hold DAC data */
- u32 *dac_wbuf; /* pointer to logical adrs of DMA buffer
- * used to hold DAC data */
- u16 dacpol; /* image of DAC polarity register */
- u8 trim_setpoint[12]; /* images of TrimDAC setpoints */
- u32 i2c_adrs; /* I2C device address for onboard EEPROM
- * (board rev dependent) */
+ u8 ai_cmd_running;
+ unsigned int ai_sample_timer;
+ int ai_convert_count;
+ unsigned int ai_convert_timer;
+ u16 counter_int_enabs;
+ u8 adc_items;
+ struct s626_buffer_dma rps_buf;
+ struct s626_buffer_dma ana_buf;
+ u32 *dac_wbuf;
+ u16 dacpol;
+ u8 trim_setpoint[12];
+ u32 i2c_adrs;
};
/* Counter overflow/index event flag masks for RDMISC2. */
@@ -591,7 +599,7 @@ static int s626_write_trim_dac(struct comedi_device *dev,
* Save the new setpoint in case the application needs to read it back
* later.
*/
- devpriv->trim_setpoint[logical_chan] = (u8)dac_data;
+ devpriv->trim_setpoint[logical_chan] = dac_data;
/* Map logical channel number to physical channel number. */
chan = s626_trimchan[logical_chan];
@@ -1928,7 +1936,7 @@ static int s626_ao_insn_write(struct comedi_device *dev,
int i;
for (i = 0; i < insn->n; i++) {
- int16_t dacdata = (int16_t)data[i];
+ s16 dacdata = (s16)data[i];
int ret;
dacdata -= (0x1fff);
diff --git a/drivers/staging/dgnc/TODO b/drivers/staging/dgnc/TODO
index e26d1d6a5941..d4cc65770513 100644
--- a/drivers/staging/dgnc/TODO
+++ b/drivers/staging/dgnc/TODO
@@ -1,7 +1,4 @@
* remove unnecessary comments
-* remove unnecessary error messages. Example kzalloc() has its
- own error message. Adding an extra one is useless.
-* use goto statements for error handling when appropriate
* there is a lot of unnecessary code in the driver. It was
originally a standalone driver. Remove unneeded code.
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index c20ffdd254d8..9639035fddd1 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -14,15 +14,15 @@
*/
#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/delay.h> /* For udelay */
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/serial.h> /* For struct async_serial */
-#include <linux/serial_reg.h> /* For the various UART offsets */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
#include <linux/pci.h>
-#include "dgnc_driver.h" /* Driver main header file */
+#include "dgnc_driver.h"
#include "dgnc_cls.h"
#include "dgnc_tty.h"
@@ -273,7 +273,6 @@ static inline void cls_set_no_input_flow_control(struct channel_t *ch)
}
/*
- * cls_clear_break.
* Determines whether its time to shut off break condition.
*
* No locks are assumed to be held when calling this function.
@@ -283,12 +282,11 @@ static inline void cls_clear_break(struct channel_t *ch, int force)
{
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* Bail if we aren't currently sending a break. */
if (!ch->ch_stop_sending_break) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
return;
@@ -316,16 +314,14 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
ushort tail;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* cache head and tail of queue */
head = ch->ch_r_head;
tail = ch->ch_r_tail;
- /* Store how much space we have left in the queue */
qleft = tail - head - 1;
if (qleft < 0)
qleft += RQUEUEMASK + 1;
@@ -343,9 +339,7 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
if (!(linestatus & (UART_LSR_DR)))
break;
- /*
- * Discard character if we are ignoring the error mask.
- */
+ /* Discard character if we are ignoring the error mask. */
if (linestatus & error_mask) {
linestatus = 0;
readb(&ch->ch_cls_uart->txrx);
@@ -356,9 +350,6 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
* If our queue is full, we have no choice but to drop some
* data. The assumption is that HWFLOW or SWFLOW should have
* stopped things way way before we got to this point.
- *
- * I decided that I wanted to ditch the oldest data first,
- * I hope thats okay with everyone? Yes? Good.
*/
while (qleft < 1) {
tail = (tail + 1) & RQUEUEMASK;
@@ -380,13 +371,10 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
if (ch->ch_equeue[head] & UART_LSR_FE)
ch->ch_err_frame++;
- /* Add to, and flip head if needed */
head = (head + 1) & RQUEUEMASK;
ch->ch_rxcount++;
}
- /* Write new final heads to channel structure. */
-
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -398,7 +386,7 @@ static void cls_assert_modem_signals(struct channel_t *ch)
{
unsigned char out;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
out = ch->ch_mostat;
@@ -421,12 +409,11 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
uint len_written = 0;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* No data to write to the UART */
if (ch->ch_w_tail == ch->ch_w_head)
goto exit_unlock;
@@ -440,12 +427,10 @@ static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
n = 32;
- /* cache head and tail of queue */
head = ch->ch_w_head & WQUEUEMASK;
tail = ch->ch_w_tail & WQUEUEMASK;
qlen = (head - tail) & WQUEUEMASK;
- /* Find minimum of the FIFO space, versus queue length */
n = min(n, qlen);
while (n > 0) {
@@ -494,7 +479,7 @@ static void cls_parse_modem(struct channel_t *ch, unsigned char signals)
unsigned char msignals = signals;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
/*
@@ -524,10 +509,7 @@ static void cls_parse_modem(struct channel_t *ch, unsigned char signals)
}
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * Scrub off lower bits. They signify delta's, which I don't
- * care about
- */
+ /* Scrub off lower bits. They signify delta's */
signals &= 0xf0;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -569,34 +551,28 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
return;
ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
- return;
/* Here we try to figure out what caused the interrupt to happen */
while (1) {
isr = readb(&ch->ch_cls_uart->isr_fcr);
- /* Bail if no pending interrupt on port */
if (isr & UART_IIR_NO_INT)
break;
/* Receive Interrupt pending */
if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
- /* Read data from uart -> queue */
cls_copy_data_from_uart_to_queue(ch);
dgnc_check_queue_flow_control(ch);
}
/* Transmit Hold register empty pending */
if (isr & UART_IIR_THRI) {
- /* Transfer data (if any) from Write Queue -> UART. */
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
cls_copy_data_from_queue_to_uart(ch);
}
- /* Parse any modem signal changes */
cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
}
}
@@ -604,12 +580,14 @@ static inline void cls_parse_isr(struct dgnc_board *brd, uint port)
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_write(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
&ch->ch_cls_uart->isr_fcr);
- usleep_range(10, 20);
+
+ /* Must use *delay family functions in atomic context */
+ udelay(10);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
@@ -617,7 +595,7 @@ static void cls_flush_uart_write(struct channel_t *ch)
/* Channel lock MUST be held before calling this function! */
static void cls_flush_uart_read(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
/*
@@ -634,10 +612,7 @@ static void cls_flush_uart_read(struct channel_t *ch)
udelay(10);
}
-/*
- * cls_param()
- * Send any/all changes to the line to the UART.
- */
+/* Send any/all changes to the line to the UART. */
static void cls_param(struct tty_struct *tty)
{
unsigned char lcr = 0;
@@ -650,23 +625,22 @@ static void cls_param(struct tty_struct *tty)
struct channel_t *ch;
struct un_t *un;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = (struct un_t *)tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
/* If baud rate is zero, flush queues, and set mval to drop DTR. */
-
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -776,10 +750,6 @@ static void cls_param(struct tty_struct *tty)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
@@ -850,10 +820,6 @@ static void cls_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
- /*
- * If start/stop is set to disable, then we should
- * disable flow control
- */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_output_flow_control(ch);
@@ -866,10 +832,6 @@ static void cls_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
cls_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
- /*
- * If start/stop is set to disable, then we should disable
- * flow control
- */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
cls_set_no_input_flow_control(ch);
@@ -881,12 +843,10 @@ static void cls_param(struct tty_struct *tty)
cls_assert_modem_signals(ch);
- /* Get current status of the modem signals now */
cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
}
-/* Our board poller function. */
-
+/* Board poller function. */
static void cls_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -896,10 +856,9 @@ static void cls_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
- /* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
state = bd->state;
ports = bd->nasync;
@@ -911,10 +870,7 @@ static void cls_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /* If board is ready, parse deeper to see if there is anything to do. */
-
if ((state == BOARD_READY) && (ports > 0)) {
- /* Loop on each port */
for (i = 0; i < ports; i++) {
ch = bd->channels[i];
@@ -934,8 +890,6 @@ static void cls_tasklet(unsigned long data)
cls_copy_data_from_queue_to_uart(ch);
dgnc_wakeup_writes(ch);
- /* Check carrier function. */
-
dgnc_carrier(ch);
/*
@@ -950,11 +904,7 @@ static void cls_tasklet(unsigned long data)
spin_unlock_irqrestore(&bd->bd_intr_lock, flags);
}
-/*
- * cls_intr()
- *
- * Classic specific interrupt handler.
- */
+/* Classic specific interrupt handler. */
static irqreturn_t cls_intr(int irq, void *voidbrd)
{
struct dgnc_board *brd = voidbrd;
@@ -962,33 +912,20 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
unsigned char poll_reg;
unsigned long flags;
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ if (!brd)
return IRQ_NONE;
spin_lock_irqsave(&brd->bd_intr_lock, flags);
- /*
- * Check the board's global interrupt offset to see if we
- * we actually do have an interrupt pending for us.
- */
poll_reg = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET);
-
- /* If 0, no interrupts pending */
if (!poll_reg) {
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
return IRQ_NONE;
}
- /* Parse each port to find out what caused the interrupt */
for (i = 0; i < brd->nasync; i++)
cls_parse_isr(brd, i);
- /* Schedule tasklet to more in-depth servicing at a better time. */
-
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1013,7 +950,7 @@ static void cls_enable_receiver(struct channel_t *ch)
}
/*
- * This function basically goes to sleep for secs, or until
+ * This function basically goes to sleep for seconds, or until
* it gets signalled that the port has fully drained.
*/
static int cls_drain(struct tty_struct *tty, uint seconds)
@@ -1022,15 +959,15 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
struct channel_t *ch;
struct un_t *un;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return -ENXIO;
un = (struct un_t *)tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return -ENXIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -ENXIO;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1047,7 +984,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
static void cls_send_start_character(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
if (ch->ch_startc != _POSIX_VDISABLE) {
@@ -1058,7 +995,7 @@ static void cls_send_start_character(struct channel_t *ch)
static void cls_send_stop_character(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
if (ch->ch_stopc != _POSIX_VDISABLE) {
@@ -1067,7 +1004,6 @@ static void cls_send_stop_character(struct channel_t *ch)
}
}
-/* Inits UART */
static void cls_uart_init(struct channel_t *ch)
{
unsigned char lcrb = readb(&ch->ch_cls_uart->lcr);
@@ -1096,7 +1032,7 @@ static void cls_uart_init(struct channel_t *ch)
writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
&ch->ch_cls_uart->isr_fcr);
- udelay(10);
+ usleep_range(10, 20);
ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
@@ -1104,25 +1040,21 @@ static void cls_uart_init(struct channel_t *ch)
readb(&ch->ch_cls_uart->msr);
}
-/* Turns off UART. */
-
static void cls_uart_off(struct channel_t *ch)
{
writeb(0, &ch->ch_cls_uart->ier);
}
/*
- * cls_get_uarts_bytes_left.
- * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
- *
* The channel lock MUST be held by the calling function.
+ * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
*/
static uint cls_get_uart_bytes_left(struct channel_t *ch)
{
unsigned char left = 0;
unsigned char lsr = 0;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return 0;
lsr = readb(&ch->ch_cls_uart->lsr);
@@ -1141,20 +1073,16 @@ static uint cls_get_uart_bytes_left(struct channel_t *ch)
}
/*
- * cls_send_break.
* Starts sending a break thru the UART.
- *
* The channel lock MUST be held by the calling function.
*/
static void cls_send_break(struct channel_t *ch, int msecs)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
/* If we receive a time of 0, this means turn off the break. */
-
if (msecs == 0) {
- /* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
unsigned char temp = readb(&ch->ch_cls_uart->lcr);
@@ -1182,7 +1110,6 @@ static void cls_send_break(struct channel_t *ch, int msecs)
}
/*
- * cls_send_immediate_char.
* Sends a specific character as soon as possible to the UART,
* jumping over any bytes that might be in the write queue.
*
@@ -1190,7 +1117,7 @@ static void cls_send_break(struct channel_t *ch, int msecs)
*/
static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
writeb(c, &ch->ch_cls_uart->txrx);
@@ -1203,8 +1130,6 @@ static void cls_vpd(struct dgnc_board *brd)
int i = 0;
vpdbase = pci_resource_start(brd->pdev, 3);
-
- /* No VPD */
if (!vpdbase)
return;
@@ -1213,7 +1138,6 @@ static void cls_vpd(struct dgnc_board *brd)
if (!re_map_vpdbase)
return;
- /* Store the VPD into our buffer */
for (i = 0; i < 0x40; i++) {
brd->vpd[i] = readb(re_map_vpdbase + i);
pr_info("%x ", brd->vpd[i]);
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 463ad30efb3b..9dfa9682a897 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -13,27 +13,24 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DGNC_CLS_H
-#define __DGNC_CLS_H
+#ifndef _DGNC_CLS_H
+#define _DGNC_CLS_H
-/************************************************************************
- * Per channel/port Classic UART structure *
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * U = Unused. *
- ************************************************************************/
-
-/*
- * txrx : WR RHR/THR - Holding reg
- * ier : WR IER - Interrupt Enable Reg
- * isr_fcr : WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg
- * lcr : WR LCR - Line Control Reg
- * mcr : WR MCR - Modem Control Reg
- * lsr : WR LSR - Line Status Reg
- * msr : WR MSG - Modem Status Reg
- * spr : WR SPR - Scratch pad Reg
+/**
+ * struct cls_uart_struct - Per channel/port Classic UART.
+ *
+ * key - W = read write
+ * - R = read only
+ * - U = unused
+ *
+ * @txrx: (WR) Holding Register.
+ * @ier: (WR) Interrupt Enable Register.
+ * @isr_fcr: (WR) Interrupt Status Register/Fifo Control Register.
+ * @lcr: (WR) Line Control Register.
+ * @mcr: (WR) Modem Control Register.
+ * @lsr: (WR) Line Status Register.
+ * @msr: (WR) Modem Status Register.
+ * @spr: (WR) Scratch Pad Register.
*/
struct cls_uart_struct {
u8 txrx;
@@ -74,9 +71,6 @@ struct cls_uart_struct {
#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-/*
- * Our Global Variables
- */
extern struct board_ops dgnc_cls_ops;
-#endif
+#endif /* _DGNC_CLS_H */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 5381dbddd8bb..253f38b25a54 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -30,8 +30,6 @@ MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
MODULE_SUPPORTED_DEVICE("dgnc");
-/* File operations permitted on Control/Management major. */
-
static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
@@ -39,8 +37,6 @@ static const struct file_operations dgnc_board_fops = {
.release = dgnc_mgmt_close
};
-/* Globals */
-
uint dgnc_num_boards;
struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
@@ -48,12 +44,8 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
-/* Static vars. */
-
static struct class *dgnc_class;
-/* Poller stuff */
-
static ulong dgnc_poll_time; /* Time of next poll */
static uint dgnc_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgnc_poll_timer;
@@ -95,23 +87,17 @@ static const struct board_id dgnc_ids[] = {
};
/* Remap PCI memory. */
-
static int dgnc_do_remap(struct dgnc_board *brd)
{
- int rc = 0;
-
brd->re_map_membase = ioremap(brd->membase, 0x1000);
if (!brd->re_map_membase)
- rc = -ENOMEM;
+ return -ENOMEM;
- return rc;
+ return 0;
}
-/*
- * dgnc_found_board()
- *
- * A board has been found, init it.
- */
+
+/* A board has been found, initialize it. */
static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
{
struct dgnc_board *brd;
@@ -119,13 +105,11 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
int i = 0;
int rc = 0;
- /* get the board structure and prep it */
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd)
return ERR_PTR(-ENOMEM);
/* store the info for the board we've found */
- brd->magic = DGNC_BOARD_MAGIC;
brd->boardnum = dgnc_num_boards;
brd->vendor = dgnc_pci_tbl[id].vendor;
brd->device = dgnc_pci_tbl[id].device;
@@ -170,7 +154,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
* 4 Memory Mapped UARTs and Status
*/
- /* get the PCI Base Address Registers */
brd->membase = pci_resource_start(pdev, 4);
if (!brd->membase) {
@@ -191,14 +174,12 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
brd->iobase_end = pci_resource_end(pdev, 1);
brd->iobase = ((unsigned int)(brd->iobase)) & 0xFFFE;
- /* Assign the board_ops struct */
brd->bd_ops = &dgnc_cls_ops;
brd->bd_uart_offset = 0x8;
brd->bd_dividend = 921600;
rc = dgnc_do_remap(brd);
-
if (rc < 0)
goto failed;
@@ -237,7 +218,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
else
brd->dpatype = T_NEO | T_PCIBUS;
- /* get the PCI Base Address Registers */
brd->membase = pci_resource_start(pdev, 0);
brd->membase_end = pci_resource_end(pdev, 0);
@@ -246,7 +226,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
else
brd->membase &= ~15;
- /* Assign the board_ops struct */
brd->bd_ops = &dgnc_neo_ops;
brd->bd_uart_offset = 0x200;
@@ -272,7 +251,6 @@ static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id)
goto failed;
}
- /* init our poll helper tasklet */
tasklet_init(&brd->helper_tasklet,
brd->bd_ops->tasklet,
(unsigned long)brd);
@@ -289,21 +267,18 @@ failed:
static int dgnc_request_irq(struct dgnc_board *brd)
{
- int rc = 0;
-
if (brd->irq) {
- rc = request_irq(brd->irq, brd->bd_ops->intr,
+ int rc = request_irq(brd->irq, brd->bd_ops->intr,
IRQF_SHARED, "DGNC", brd);
-
if (rc) {
dev_err(&brd->pdev->dev,
"Failed to hook IRQ %d\n", brd->irq);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
- rc = -ENODEV;
+ return -ENODEV;
}
}
- return rc;
+ return 0;
}
static void dgnc_free_irq(struct dgnc_board *brd)
@@ -312,30 +287,12 @@ static void dgnc_free_irq(struct dgnc_board *brd)
free_irq(brd->irq, brd);
}
-/*
- * Function:
- *
- * dgnc_poll_handler
- *
- * Author:
- *
- * Scott H Kilau
- *
- * Parameters:
- *
- * dummy -- ignored
- *
- * Return Values:
- *
- * none
- *
- * Description:
- *
- * As each timer expires, it determines (a) whether the "transmit"
- * waiter needs to be woken up, and (b) whether the poller needs to
- * be rescheduled.
- */
+ /*
+ * As each timer expires, it determines (a) whether the "transmit"
+ * waiter needs to be woken up, and (b) whether the poller needs to
+ * be rescheduled.
+ */
static void dgnc_poll_handler(ulong dummy)
{
struct dgnc_board *brd;
@@ -343,19 +300,16 @@ static void dgnc_poll_handler(ulong dummy)
int i;
unsigned long new_time;
- /* Go thru each board, kicking off a tasklet for each if needed */
for (i = 0; i < dgnc_num_boards; i++) {
brd = dgnc_board[i];
spin_lock_irqsave(&brd->bd_lock, flags);
- /* If board is in a failed state don't schedule a tasklet */
if (brd->state == BOARD_FAILED) {
spin_unlock_irqrestore(&brd->bd_lock, flags);
continue;
}
- /* Schedule a poll helper task */
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_lock, flags);
@@ -385,9 +339,7 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int rc;
struct dgnc_board *brd;
- /* wake up and enable device */
rc = pci_enable_device(pdev);
-
if (rc)
return -EIO;
@@ -395,8 +347,6 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(brd))
return PTR_ERR(brd);
- /* Do tty device initialization. */
-
rc = dgnc_tty_register(brd);
if (rc < 0) {
pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
@@ -426,7 +376,6 @@ free_irq:
dgnc_free_irq(brd);
unregister_tty:
dgnc_tty_unregister(brd);
-
failed:
kfree(brd);
@@ -439,24 +388,14 @@ static struct pci_driver dgnc_driver = {
.id_table = dgnc_pci_tbl,
};
-/* Start of driver. */
-
static int dgnc_start(void)
{
int rc = 0;
unsigned long flags;
struct device *dev;
- /* make sure timer is initialized before we do anything else */
init_timer(&dgnc_poll_timer);
- /*
- * Register our base character device into the kernel.
- * This allows the download daemon to connect to the downld device
- * before any of the boards are init'ed.
- *
- * Register management/dpa devices
- */
rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
@@ -495,19 +434,16 @@ failed_device:
class_destroy(dgnc_class);
failed_class:
unregister_chrdev(dgnc_major, "dgnc");
+
return rc;
}
-/*
- * dgnc_cleanup_board()
- *
- * Free all the memory associated with a board
- */
+/* Free all the memory associated with a board */
static void dgnc_cleanup_board(struct dgnc_board *brd)
{
int i = 0;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ if (!brd)
return;
switch (brd->device) {
@@ -534,7 +470,6 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
brd->re_map_membase = NULL;
}
- /* Free all allocated channels structs */
for (i = 0; i < MAXPORTS ; i++) {
if (brd->channels[i]) {
kfree(brd->channels[i]->ch_rqueue);
@@ -574,42 +509,28 @@ static void cleanup(void)
}
}
-/*
- * dgnc_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
static void __exit dgnc_cleanup_module(void)
{
cleanup();
pci_unregister_driver(&dgnc_driver);
}
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
static int __init dgnc_init_module(void)
{
int rc;
/* Initialize global stuff */
-
rc = dgnc_start();
-
if (rc < 0)
return rc;
/* Find and configure all the cards */
-
rc = pci_register_driver(&dgnc_driver);
if (rc) {
pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
cleanup();
return rc;
}
-
return 0;
}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index c8119f2fe881..980410fc4801 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -11,12 +11,10 @@
* but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
- *
- * Driver includes
*/
-#ifndef __DGNC_DRIVER_H
-#define __DGNC_DRIVER_H
+#ifndef _DGNC_DRIVER_H
+#define _DGNC_DRIVER_H
#include <linux/types.h>
#include <linux/tty.h>
@@ -24,8 +22,6 @@
#include "digi.h" /* Digi specific ioctl header */
-/* Driver defines */
-
/* Driver identification and error statements */
#define PROCSTR "dgnc" /* /proc entries */
#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
@@ -39,11 +35,6 @@
#define MAXPORTS 8
#define MAXTTYNAMELEN 200
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGNC_BOARD_MAGIC 0x5c6df104
-#define DGNC_CHANNEL_MAGIC 0x6c6df104
-#define DGNC_UNIT_MAGIC 0x7c6df104
-
/* Serial port types */
#define DGNC_SERIAL 0
#define DGNC_PRINT 1
@@ -53,10 +44,7 @@
#define PORT_NUM(dev) ((dev) & 0x7f)
#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
-/*
- *MAX number of stop characters we will send
- * when our read queue is getting full
- */
+/* MAX number of stop characters sent when our read queue is getting full */
#define MAX_STOPS_SENT 5
/* 4 extra for alignment play space */
@@ -82,27 +70,24 @@
#endif
/* All the possible states the driver can be while being loaded. */
-
enum {
DRIVER_INITIALIZED = 0,
DRIVER_READY
};
/* All the possible states the board can be while booting up. */
-
enum {
BOARD_FAILED = 0,
BOARD_FOUND,
BOARD_READY
};
-/* Structures and closely related defines. */
-
struct dgnc_board;
struct channel_t;
-/* Per board operations structure */
-
+/**
+ * struct board_ops - Per board operations.
+ */
struct board_ops {
void (*tasklet)(unsigned long data);
irqreturn_t (*intr)(int irq, void *voidbrd);
@@ -128,77 +113,107 @@ struct board_ops {
#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-/* Per-board information */
-
+/**
+ * struct dgnc_board - Per board information.
+ * @boardnum: Board number (0 - 32).
+ *
+ * @type: Type of board.
+ * @name: Product name.
+ * @pdev: Pointer to the pci_dev structure.
+ * @bd_flags: Board flags.
+ * @vendor: PCI vendor ID.
+ * @device: PCI device ID.
+ * @subvendor: PCI subsystem vendor ID.
+ * @subdevice: PCI subsystem device ID.
+ * @rev: PCI revision ID.
+ * @pci_bus: PCI bus value.
+ * @pci_slot: PCI slot value.
+ * @maxports: Maximum ports this board can handle.
+ * @dvid: Board specific device ID.
+ * @vpd: VPD of this board, if found.
+ * @serial_num: Serial number of this board, if found in VPD.
+ * @bd_lock: Used to protect board.
+ * @bd_intr_lock: Protect poller tasklet and interrupt routine from each other.
+ * @state: State of the card.
+ * @state_wait: Queue to sleep on for state change.
+ * @helper_tasklet: Poll helper tasklet.
+ * @nasync: Number of ports on card.
+ * @irq: Interrupt request number.
+ * @membase: Start of base memory of the card.
+ * @membase_end: End of base memory of the card.
+ * @iobase: Start of IO base of the card.
+ * @iobase_end: End of IO base of the card.
+ * @bd_uart_offset: Space between each UART.
+ * @channels: array of pointers to our channels.
+ * @serial_driver: Pointer to the serial driver.
+ * @serial_name: Serial driver name.
+ * @print_dirver: Pointer to the print driver.
+ * @print_name: Print driver name.
+ * @dpatype: Board type as defined by DPA.
+ * @dpastatus: Board status as defined by DPA.
+ * @bd_dividend: Board/UART's specific dividend.
+ * @bd_ops: Pointer to board operations structure.
+ * @proc_entry_pointer: Proc/<board> entry
+ * @dgnc_board_table: Proc/<board> entry
+ */
struct dgnc_board {
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-32 */
+ int boardnum;
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- unsigned long bd_flags; /* Board flags */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- unsigned char rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- uint maxports; /* MAX ports this board can handle */
- unsigned char dvid; /* Board specific device id */
- unsigned char vpd[128]; /* VPD of board, if found */
- unsigned char serial_num[20]; /* Serial number of board,
- * if found in VPD
- */
+ int type;
+ char *name;
+ struct pci_dev *pdev;
+ unsigned long bd_flags;
+ u16 vendor;
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+ unsigned char rev;
+ uint pci_bus;
+ uint pci_slot;
+ uint maxports;
+ unsigned char dvid;
+ unsigned char vpd[128];
+ unsigned char serial_num[20];
- spinlock_t bd_lock; /* Used to protect board */
+ /* used to protect the board */
+ spinlock_t bd_lock;
- spinlock_t bd_intr_lock; /* Used to protect the poller tasklet
- * and the interrupt routine from each
- * other.
- */
+ /* Protect poller tasklet and interrupt routine from each other. */
+ spinlock_t bd_intr_lock;
- uint state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
+ uint state;
+ wait_queue_head_t state_wait;
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+ struct tasklet_struct helper_tasklet;
- uint nasync; /* Number of ports on card */
+ uint nasync;
- uint irq; /* Interrupt request number */
+ uint irq;
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
+ ulong membase;
+ ulong membase_end;
- u8 __iomem *re_map_membase; /* Remapped memory of the card */
+ u8 __iomem *re_map_membase;
- ulong iobase; /* Start of io base of the card */
- ulong iobase_end; /* End of io base of the card */
+ ulong iobase;
+ ulong iobase_end;
- uint bd_uart_offset; /* Space between each UART */
+ uint bd_uart_offset;
- struct channel_t *channels[MAXPORTS]; /* array of pointers
- * to our channels.
- */
+ struct channel_t *channels[MAXPORTS];
struct tty_driver *serial_driver;
char serial_name[200];
struct tty_driver *print_driver;
char print_name[200];
- u16 dpatype; /* The board "type",
- * as defined by DPA
- */
- u16 dpastatus; /* The board "status",
- * as defined by DPA
- */
+ u16 dpatype;
+ u16 dpastatus;
- uint bd_dividend; /* Board/UARTs specific dividend */
+ uint bd_dividend;
struct board_ops *bd_ops;
- /* /proc/<board> entries */
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_board_table;
@@ -221,17 +236,23 @@ struct dgnc_board {
struct device;
-/* Structure for terminal or printer unit. */
+/**
+ * struct un_t - terminal or printer unit
+ * @un_open_count: Counter of opens to port.
+ * @un_tty: Pointer to unit tty structure.
+ * @un_flags: Unit flags.
+ * @un_flags_wait: Place to sleep to wait on unit.
+ * @un_dev: Minor device number.
+ */
struct un_t {
- int magic; /* Unit Magic Number. */
struct channel_t *un_ch;
ulong un_time;
uint un_type;
- uint un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty; /* Pointer to unit tty structure */
- uint un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- uint un_dev; /* Minor device number */
+ uint un_open_count;
+ struct tty_struct *un_tty;
+ uint un_flags;
+ wait_queue_head_t un_flags_wait;
+ uint un_dev;
struct device *un_sysfs;
};
@@ -263,102 +284,137 @@ struct un_t {
#define EQUEUESIZE RQUEUESIZE
#define WQUEUESIZE (WQUEUEMASK + 1)
-/* Channel information structure. */
+/**
+ * struct channel_t - Channel information.
+ * @dgnc_board: Pointer to board structure.
+ * @ch_bd: Transparent print structure.
+ * @ch_tun: Terminal unit information.
+ * @ch_pun: Printer unit information.
+ * @ch_lock: Provide for serialization.
+ * @ch_flags_wait: Channel flags wait queue.
+ * @ch_portnum: Port number, 0 offset.
+ * @ch_open_count: Open count.
+ * @ch_flags: Channel flags.
+ * @ch_close_delay: How long we should drop RTS/DTR for.
+ * @ch_cpstime: Time for CPS calculations.
+ * @ch_c_iflag: Channel iflags.
+ * @ch_c_cflag: Channel cflags.
+ * @ch_c_oflag: Channel oflags.
+ * @ch_c_lflag: Channel lflags.
+ * @ch_stopc: Stop character.
+ * @ch_startc: Start character.
+ * @ch_old_baud: Cache of the current baud rate.
+ * @ch_custom_speed: Custom baud rate, if set.
+ * @ch_wopen: Waiting for open process count.
+ * @ch_mostat: FEP output modem status.
+ * @ch_mistat: FEP input modem status.
+ * @chc_neo_uart: Pointer to the mapped neo UART struct
+ * @ch_cls_uart: Pointer to the mapped cls UART struct
+ * @ch_cached_lsr: Cached value of the LSR register.
+ * @ch_rqueue: Read queue buffer, malloc'ed.
+ * @ch_r_head: Head location of the read queue.
+ * @ch_r_tail: Tail location of the read queue.
+ * @ch_equeue: Error queue buffer, malloc'ed.
+ * @ch_e_head: Head location of the error queue.
+ * @ch_e_tail: Tail location of the error queue.
+ * @ch_wqueue: Write queue buffer, malloc'ed.
+ * @ch_w_head: Head location of the write queue.
+ * @ch_w_tail: Tail location of the write queue.
+ * @ch_rxcount: Total of data received so far.
+ * @ch_txcount: Total of data transmitted so far.
+ * @ch_r_tlevel: Receive trigger level.
+ * @ch_t_tlevel: Transmit trigger level.
+ * @ch_r_watermark: Receive water mark.
+ * @ch_stop_sending_break: Time we should STOP sending a break.
+ * @ch_stops_sent: How many times I have send a stop character to try
+ * to stop the other guy sending.
+ * @ch_err_parity: Count of parity
+ * @ch_err_frame: Count of framing errors on channel.
+ * @ch_err_break: Count of breaks on channel.
+ * @ch_err_overrun: Count of overruns on channel.
+ * @ch_xon_sends: Count of xons transmitted.
+ * @ch_xoff_sends: Count of xoffs transmitted.
+ * @proc_entry_pointer: Proc/<board>/<channel> entry.
+ * @dgnc_channel_table: Proc/<board>/<channel> entry.
+ */
struct channel_t {
- int magic; /* Channel Magic Number */
- struct dgnc_board *ch_bd; /* Board structure pointer */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
+ struct dgnc_board *ch_bd;
+ struct digi_t ch_digi;
+ struct un_t ch_tun;
+ struct un_t ch_pun;
- spinlock_t ch_lock; /* provide for serialization */
+ spinlock_t ch_lock; /* provide for serialization */
wait_queue_head_t ch_flags_wait;
- uint ch_portnum; /* Port number, 0 offset. */
- uint ch_open_count; /* open count */
- uint ch_flags; /* Channel flags */
+ uint ch_portnum;
+ uint ch_open_count;
+ uint ch_flags;
- ulong ch_close_delay; /* How long we should
- * drop RTS/DTR for
- */
+ ulong ch_close_delay;
- ulong ch_cpstime; /* Time for CPS calculations */
+ ulong ch_cpstime;
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
- unsigned char ch_stopc; /* Stop character */
- unsigned char ch_startc; /* Start character */
+ tcflag_t ch_c_iflag;
+ tcflag_t ch_c_cflag;
+ tcflag_t ch_c_oflag;
+ tcflag_t ch_c_lflag;
+ unsigned char ch_stopc;
+ unsigned char ch_startc;
- uint ch_old_baud; /* Cache of the current baud */
- uint ch_custom_speed;/* Custom baud, if set */
+ uint ch_old_baud;
+ uint ch_custom_speed;
- uint ch_wopen; /* Waiting for open process cnt */
+ uint ch_wopen;
- unsigned char ch_mostat; /* FEP output modem status */
- unsigned char ch_mistat; /* FEP input modem status */
+ unsigned char ch_mostat;
+ unsigned char ch_mistat;
- struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the
- * "mapped" UART struct
- */
- struct cls_uart_struct __iomem *ch_cls_uart; /* Pointer to the
- * "mapped" UART struct
- */
+ struct neo_uart_struct __iomem *ch_neo_uart;
+ struct cls_uart_struct __iomem *ch_cls_uart;
- unsigned char ch_cached_lsr; /* Cached value of the LSR register */
+ unsigned char ch_cached_lsr;
- unsigned char *ch_rqueue; /* Our read queue buffer - malloc'ed */
- ushort ch_r_head; /* Head location of the read queue */
- ushort ch_r_tail; /* Tail location of the read queue */
+ unsigned char *ch_rqueue;
+ ushort ch_r_head;
+ ushort ch_r_tail;
- unsigned char *ch_equeue; /* Our error queue buffer - malloc'ed */
- ushort ch_e_head; /* Head location of the error queue */
- ushort ch_e_tail; /* Tail location of the error queue */
+ unsigned char *ch_equeue;
+ ushort ch_e_head;
+ ushort ch_e_tail;
- unsigned char *ch_wqueue; /* Our write queue buffer - malloc'ed */
- ushort ch_w_head; /* Head location of the write queue */
- ushort ch_w_tail; /* Tail location of the write queue */
+ unsigned char *ch_wqueue;
+ ushort ch_w_head;
+ ushort ch_w_tail;
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
+ ulong ch_rxcount;
+ ulong ch_txcount;
- unsigned char ch_r_tlevel; /* Receive Trigger level */
- unsigned char ch_t_tlevel; /* Transmit Trigger level */
+ unsigned char ch_r_tlevel;
+ unsigned char ch_t_tlevel;
- unsigned char ch_r_watermark; /* Receive Watermark */
+ unsigned char ch_r_watermark;
- ulong ch_stop_sending_break; /* Time we should STOP
- * sending a break
- */
+ ulong ch_stop_sending_break;
+ uint ch_stops_sent;
- uint ch_stops_sent; /* How many times I have sent a stop
- * character to try to stop the other
- * guy sending.
- */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
+ ulong ch_err_parity;
+ ulong ch_err_frame;
+ ulong ch_err_break;
+ ulong ch_err_overrun;
- ulong ch_xon_sends; /* Count of xons transmitted */
- ulong ch_xoff_sends; /* Count of xoffs transmitted */
+ ulong ch_xon_sends;
+ ulong ch_xoff_sends;
- /* /proc/<board>/<channel> entries */
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_channel_table;
};
-/* Our Global Variables. */
-
extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
extern uint dgnc_num_boards; /* Total number of boards */
-extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board
- * structs
- */
+extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of boards */
-#endif
+#endif /* _DGNC_DRIVER_H */
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 9d9b15d6358a..3ca473b47daf 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -20,11 +20,11 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
#include <linux/serial_reg.h>
#include <linux/termios.h>
-#include <linux/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/uaccess.h>
#include "dgnc_driver.h"
#include "dgnc_pci.h"
@@ -33,64 +33,60 @@
/* Our "in use" variables, to enforce 1 open only */
static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
-/*
- * dgnc_mgmt_open()
- *
- * Open the mgmt/downld/dpa device
+/**
+ * dgnc_mgmt_open() - Open the mgmt/downld/dpa device.
*/
int dgnc_mgmt_open(struct inode *inode, struct file *file)
{
unsigned long flags;
unsigned int minor = iminor(inode);
+ int rc = 0;
spin_lock_irqsave(&dgnc_global_lock, flags);
- /* mgmt device */
- if (minor < MAXMGMTDEVICES) {
- /* Only allow 1 open at a time on mgmt device */
- if (dgnc_mgmt_in_use[minor]) {
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return -EBUSY;
- }
- dgnc_mgmt_in_use[minor]++;
- } else {
- spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return -ENXIO;
+ if (minor >= MAXMGMTDEVICES) {
+ rc = -ENXIO;
+ goto out;
}
+ /* Only allow 1 open at a time on mgmt device */
+ if (dgnc_mgmt_in_use[minor]) {
+ rc = -EBUSY;
+ goto out;
+ }
+ dgnc_mgmt_in_use[minor]++;
+out:
spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return 0;
+ return rc;
}
-/*
- * dgnc_mgmt_close()
- *
- * Open the mgmt/dpa device
+/**
+ * dgnc_mgmt_close() - Close the mgmt/dpa device
*/
int dgnc_mgmt_close(struct inode *inode, struct file *file)
{
unsigned long flags;
unsigned int minor = iminor(inode);
+ int rc = 0;
spin_lock_irqsave(&dgnc_global_lock, flags);
- /* mgmt device */
- if (minor < MAXMGMTDEVICES) {
- if (dgnc_mgmt_in_use[minor])
- dgnc_mgmt_in_use[minor] = 0;
+ if (minor >= MAXMGMTDEVICES) {
+ rc = -ENXIO;
+ goto out;
}
+ dgnc_mgmt_in_use[minor] = 0;
+
+out:
spin_unlock_irqrestore(&dgnc_global_lock, flags);
- return 0;
+ return rc;
}
-/*
- * dgnc_mgmt_ioctl()
- *
- * ioctl the mgmt/dpa device
+/**
+ * dgnc_mgmt_ioctl() - Ioctl the mgmt/dpa device.
*/
-
long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned long flags;
@@ -171,17 +167,15 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
board = ni.board;
channel = ni.channel;
- /* Verify boundaries on board */
if (board >= dgnc_num_boards)
return -ENODEV;
- /* Verify boundaries on channel */
if (channel >= dgnc_board[board]->nasync)
return -ENODEV;
ch = dgnc_board[board]->channels[channel];
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -ENODEV;
memset(&ni, 0, sizeof(ni));
@@ -250,6 +244,5 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
}
-
return 0;
}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.h b/drivers/staging/dgnc/dgnc_mgmt.h
index 708abe9594d4..a7a5770d7630 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.h
+++ b/drivers/staging/dgnc/dgnc_mgmt.h
@@ -13,13 +13,14 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DGNC_MGMT_H
-#define __DGNC_MGMT_H
+#ifndef _DGNC_MGMT_H
+#define _DGNC_MGMT_H
#define MAXMGMTDEVICES 8
int dgnc_mgmt_open(struct inode *inode, struct file *file);
int dgnc_mgmt_close(struct inode *inode, struct file *file);
long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-#endif
+
+#endif /* _DGNC_MGMT_H */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 3eefefe53174..1943e66fec57 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -14,15 +14,15 @@
*/
#include <linux/kernel.h>
-#include <linux/sched.h> /* For jiffies, task states */
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/delay.h> /* For udelay */
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-#include <linux/serial.h> /* For struct async_serial */
-#include <linux/serial_reg.h> /* For the various UART offsets */
-
-#include "dgnc_driver.h" /* Driver main header file */
-#include "dgnc_neo.h" /* Our header file */
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/serial.h>
+#include <linux/serial_reg.h>
+
+#include "dgnc_driver.h"
+#include "dgnc_neo.h"
#include "dgnc_tty.h"
static inline void neo_parse_lsr(struct dgnc_board *brd, uint port);
@@ -96,12 +96,7 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
unsigned char efr = readb(&ch->ch_neo_uart->efr);
/* Turn on auto CTS flow control */
-#if 1
ier |= UART_17158_IER_CTSDSR;
-#else
- ier &= ~(UART_17158_IER_CTSDSR);
-#endif
-
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
/* Turn off auto Xon flow control */
@@ -135,11 +130,7 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
unsigned char efr = readb(&ch->ch_neo_uart->efr);
/* Turn on auto RTS flow control */
-#if 1
ier |= UART_17158_IER_RTSDTR;
-#else
- ier &= ~(UART_17158_IER_RTSDTR);
-#endif
efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
/* Turn off auto Xoff flow control */
@@ -358,20 +349,17 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
}
/* No locks are assumed to be held when calling this function. */
-
static inline void neo_clear_break(struct channel_t *ch, int force)
{
unsigned long flags;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* Bail if we aren't currently sending a break. */
if (!ch->ch_stop_sending_break) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
return;
}
- /* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
if (force ||
time_after_eq(jiffies, ch->ch_stop_sending_break)) {
@@ -387,7 +375,6 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
}
/* Parse the ISR register. */
-
static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
{
struct channel_t *ch;
@@ -396,14 +383,13 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
unsigned long flags;
ch = brd->channels[port];
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
/* Here we try to figure out what caused the interrupt to happen */
while (1) {
isr = readb(&ch->ch_neo_uart->isr_fcr);
- /* Bail if no pending interrupt */
if (isr & UART_IIR_NO_INT)
break;
@@ -426,7 +412,6 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
}
if (isr & UART_IIR_THRI) {
- /* Transfer data (if any) from Write Queue -> UART. */
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -442,10 +427,7 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
* one it was, so we can suspend or resume data flow.
*/
if (cause == UART_17158_XON_DETECT) {
- /*
- * Is output stopped right now, if so,
- * resume it
- */
+ /* resume output if stopped */
if (brd->channels[port]->ch_flags & CH_STOP) {
spin_lock_irqsave(&ch->ch_lock,
flags);
@@ -504,7 +486,6 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
}
}
- /* Parse any modem signal changes */
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
}
}
@@ -515,18 +496,14 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
int linestatus;
unsigned long flags;
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ if (!brd)
return;
if (port >= brd->maxports)
return;
ch = brd->channels[port];
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
linestatus = readb(&ch->ch_neo_uart->lsr);
@@ -534,7 +511,6 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
ch->ch_cached_lsr |= linestatus;
if (ch->ch_cached_lsr & UART_LSR_DR) {
- /* Read data from uart -> queue */
neo_copy_data_from_uart_to_queue(ch);
spin_lock_irqsave(&ch->ch_lock, flags);
dgnc_check_queue_flow_control(ch);
@@ -571,22 +547,17 @@ static inline void neo_parse_lsr(struct dgnc_board *brd, uint port)
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* Transfer data (if any) from Write Queue -> UART. */
neo_copy_data_from_queue_to_uart(ch);
} else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* Transfer data (if any) from Write Queue -> UART. */
neo_copy_data_from_queue_to_uart(ch);
}
}
-/*
- * neo_param()
- * Send any/all changes to the line to the UART.
- */
+/* Send any/all changes to the line to the UART. */
static void neo_param(struct tty_struct *tty)
{
unsigned char lcr = 0;
@@ -599,23 +570,22 @@ static void neo_param(struct tty_struct *tty)
struct channel_t *ch;
struct un_t *un;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = (struct un_t *)tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
/* If baud rate is zero, flush queues, and set mval to drop DTR. */
-
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
ch->ch_r_head = 0;
ch->ch_r_tail = 0;
@@ -724,10 +694,6 @@ static void neo_param(struct tty_struct *tty)
if (!(ch->ch_c_cflag & PARODD))
lcr |= UART_LCR_EPAR;
- /*
- * Not all platforms support mark/space parity,
- * so this will hide behind an ifdef.
- */
#ifdef CMSPAR
if (ch->ch_c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
@@ -796,16 +762,11 @@ static void neo_param(struct tty_struct *tty)
if (ier != uart_ier)
writeb(ier, &ch->ch_neo_uart->ier);
- /* Set new start/stop chars */
neo_set_new_start_stop_chars(ch);
if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
- /*
- * If start/stop is set to disable, then we should
- * disable flow control
- */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_output_flow_control(ch);
@@ -818,10 +779,6 @@ static void neo_param(struct tty_struct *tty)
if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
- /*
- * If start/stop is set to disable, then we should
- * disable flow control
- */
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_input_flow_control(ch);
@@ -843,12 +800,10 @@ static void neo_param(struct tty_struct *tty)
neo_assert_modem_signals(ch);
- /* Get current status of the modem signals now */
neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
}
-/* Our board poller function. */
-
+/* Board poller function. */
static void neo_tasklet(unsigned long data)
{
struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -858,10 +813,9 @@ static void neo_tasklet(unsigned long data)
int state = 0;
int ports = 0;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
- /* Cache a couple board values */
spin_lock_irqsave(&bd->bd_lock, flags);
state = bd->state;
ports = bd->nasync;
@@ -873,15 +827,10 @@ static void neo_tasklet(unsigned long data)
*/
spin_lock_irqsave(&bd->bd_intr_lock, flags);
- /* If board is ready, parse deeper to see if there is anything to do. */
-
if ((state == BOARD_READY) && (ports > 0)) {
- /* Loop on each port */
for (i = 0; i < ports; i++) {
ch = bd->channels[i];
-
- /* Just being careful... */
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
continue;
/*
@@ -903,10 +852,6 @@ static void neo_tasklet(unsigned long data)
neo_copy_data_from_queue_to_uart(ch);
dgnc_wakeup_writes(ch);
- /*
- * Call carrier carrier function, in case something
- * has changed.
- */
dgnc_carrier(ch);
/*
@@ -918,15 +863,10 @@ static void neo_tasklet(unsigned long data)
}
}
- /* Allow interrupt routine to access the interrupt register again */
spin_unlock_irqrestore(&bd->bd_intr_lock, flags);
}
-/*
- * dgnc_neo_intr()
- *
- * Neo specific interrupt handler.
- */
+/* Neo specific interrupt handler. */
static irqreturn_t neo_intr(int irq, void *voidbrd)
{
struct dgnc_board *brd = voidbrd;
@@ -937,11 +877,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
unsigned long flags;
unsigned long flags2;
- /*
- * Check to make sure it didn't receive interrupt with a null board
- * associated or a board pointer that wasn't ours.
- */
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ if (!brd)
return IRQ_NONE;
/* Lock out the slow poller from running on this board. */
@@ -964,19 +900,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
return IRQ_NONE;
}
- /*
- * At this point, we have at least SOMETHING to service, dig
- * further...
- */
-
- /* Loop on each port */
while ((uart_poll & 0xff) != 0) {
type = uart_poll >> (8 + (port * 3));
type &= 0x7;
uart_poll &= ~(0x01 << port);
- /* Switch on type of interrupt we have */
switch (type) {
case UART_17158_RXRDY_TIMEOUT:
/*
@@ -984,7 +913,6 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* RX FIFO until it falls below the trigger level.
*/
- /* Verify the port is in range. */
if (port >= brd->nasync)
break;
@@ -1027,27 +955,17 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
break;
case UART_17158_MSR:
-
/* MSR or flow control was seen. */
-
neo_parse_isr(brd, port);
break;
default:
- /*
- * The UART triggered us with a bogus interrupt type.
- * It appears the Exar chip, when REALLY bogged down,
- * will throw these once and awhile.
- * Its harmless, just ignore it and move on.
- */
break;
}
port++;
}
- /* Schedule tasklet to more in-depth servicing at a better time. */
-
tasklet_schedule(&brd->helper_tasklet);
spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1094,32 +1012,23 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ushort tail;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* cache head and tail of queue */
head = ch->ch_r_head & RQUEUEMASK;
tail = ch->ch_r_tail & RQUEUEMASK;
- /* Get our cached LSR */
linestatus = ch->ch_cached_lsr;
ch->ch_cached_lsr = 0;
- /* Store how much space we have left in the queue */
qleft = tail - head - 1;
if (qleft < 0)
qleft += RQUEUEMASK + 1;
- /*
- * If the UART is not in FIFO mode, force the FIFO copy to
- * NOT be run, by setting total to 0.
- *
- * On the other hand, if the UART IS in FIFO mode, then ask
- * the UART to give us an approximation of data it has RX'ed.
- */
if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
+ /* force the FIFO copy to NOT be run */
total = 0;
} else {
total = readb(&ch->ch_neo_uart->rfifo);
@@ -1138,26 +1047,11 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
total -= 3;
}
- /*
- * Finally, bound the copy to make sure we don't overflow
- * our own queue...
- * The byte by byte copy loop below this loop this will
- * deal with the queue overflow possibility.
- */
total = min(total, qleft);
while (total > 0) {
- /*
- * Grab the linestatus register, we need to check
- * to see if there are any errors in the FIFO.
- */
linestatus = readb(&ch->ch_neo_uart->lsr);
- /*
- * Break out if there is a FIFO error somewhere.
- * This will allow us to go byte by byte down below,
- * finding the exact location of the error.
- */
if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
break;
@@ -1182,7 +1076,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
linestatus = 0;
- /* Copy data from uart to the queue */
memcpy_fromio(ch->ch_rqueue + head,
&ch->ch_neo_uart->txrxburst, n);
@@ -1193,7 +1086,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
*/
memset(ch->ch_equeue + head, 0, n);
- /* Add to and flip head if needed */
head = (head + n) & RQUEUEMASK;
total -= n;
qleft -= n;
@@ -1242,8 +1134,6 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
- /* Discard character if we are ignoring the error mask. */
-
if (linestatus & error_mask) {
unsigned char discard;
@@ -1253,13 +1143,10 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
}
/*
- * If our queue is full, we have no choice but to drop some
- * data.
- * The assumption is that HWFLOW or SWFLOW should have stopped
- * things way way before we got to this point.
- *
- * I decided that I wanted to ditch the oldest data first,
- * I hope thats okay with everyone? Yes? Good.
+ * If our queue is full, we have no choice but to drop
+ * some data. The assumption is that HWFLOW or SWFLOW
+ * should have stopped things way way before we got to
+ * this point.
*/
while (qleft < 1) {
tail = (tail + 1) & RQUEUEMASK;
@@ -1272,18 +1159,14 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
&ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char)linestatus;
- /* Ditch any remaining linestatus value. */
linestatus = 0;
- /* Add to and flip head if needed */
head = (head + 1) & RQUEUEMASK;
qleft--;
ch->ch_rxcount++;
}
- /* Write new final heads to channel structure. */
-
ch->ch_r_head = head & RQUEUEMASK;
ch->ch_e_head = head & EQUEUEMASK;
@@ -1301,25 +1184,21 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
struct un_t *un;
int rc = 0;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return -ENXIO;
un = (struct un_t *)tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return -ENXIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -ENXIO;
spin_lock_irqsave(&ch->ch_lock, flags);
un->un_flags |= UN_EMPTY;
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /*
- * Go to sleep waiting for the tty layer to wake me back up when
- * the empty flag goes away.
- */
rc = wait_event_interruptible_timeout(un->un_flags_wait,
((un->un_flags & UN_EMPTY) == 0),
msecs_to_jiffies(seconds * 1000));
@@ -1330,15 +1209,14 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
/*
* Flush the WRITE FIFO on the Neo.
- *
- * NOTE: Channel lock MUST be held before calling this function!
+ * Channel lock MUST be held before calling this function!
*/
static void neo_flush_uart_write(struct channel_t *ch)
{
unsigned char tmp = 0;
int i = 0;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
@@ -1347,7 +1225,7 @@ static void neo_flush_uart_write(struct channel_t *ch)
for (i = 0; i < 10; i++) {
/*
- * Check to see if the UART feels it completely flushed the
+ * Check to see if the UART completely flushed the FIFO
* FIFO.
*/
tmp = readb(&ch->ch_neo_uart->isr_fcr);
@@ -1362,15 +1240,14 @@ static void neo_flush_uart_write(struct channel_t *ch)
/*
* Flush the READ FIFO on the Neo.
- *
- * NOTE: Channel lock MUST be held before calling this function!
+ * Channel lock MUST be held before calling this function!
*/
static void neo_flush_uart_read(struct channel_t *ch)
{
unsigned char tmp = 0;
int i = 0;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
@@ -1400,12 +1277,11 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
uint len_written = 0;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* No data to write to the UART */
if (ch->ch_w_tail == ch->ch_w_head)
goto exit_unlock;
@@ -1414,12 +1290,10 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
(ch->ch_flags & CH_BREAK_SENDING))
goto exit_unlock;
- /* If FIFOs are disabled. Send data directly to txrx register */
-
if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
+ /* Send data directly to txrx register */
unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
- /* Cache the LSR bits for later parsing */
ch->ch_cached_lsr |= lsrbits;
if (ch->ch_cached_lsr & UART_LSR_THRE) {
ch->ch_cached_lsr &= ~(UART_LSR_THRE);
@@ -1477,12 +1351,10 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo);
}
- /* cache head and tail of queue */
head = ch->ch_w_head & WQUEUEMASK;
tail = ch->ch_w_tail & WQUEUEMASK;
qlen = (head - tail) & WQUEUEMASK;
- /* Find minimum of the FIFO space, versus queue length */
n = min(n, qlen);
while (n > 0) {
@@ -1521,14 +1393,12 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
memcpy_toio(&ch->ch_neo_uart->txrxburst,
ch->ch_wqueue + tail, s);
- /* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
n -= s;
ch->ch_txcount += s;
len_written += s;
}
- /* Update the final tail */
ch->ch_w_tail = tail & WQUEUEMASK;
if (len_written > 0) {
@@ -1544,7 +1414,7 @@ static void neo_parse_modem(struct channel_t *ch, unsigned char signals)
{
unsigned char msignals = signals;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
/*
@@ -1572,10 +1442,7 @@ static void neo_parse_modem(struct channel_t *ch, unsigned char signals)
}
}
- /*
- * Scrub off lower bits. They signify delta's, which I don't care
- * about
- */
+ /* Scrub off lower bits. They signify delta's */
msignals &= 0xf0;
if (msignals & UART_MSR_DCD)
@@ -1604,7 +1471,7 @@ static void neo_assert_modem_signals(struct channel_t *ch)
{
unsigned char out;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
out = ch->ch_mostat;
@@ -1621,7 +1488,7 @@ static void neo_assert_modem_signals(struct channel_t *ch)
static void neo_send_start_character(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
if (ch->ch_startc != _POSIX_VDISABLE) {
@@ -1634,7 +1501,7 @@ static void neo_send_start_character(struct channel_t *ch)
static void neo_send_stop_character(struct channel_t *ch)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
if (ch->ch_stopc != _POSIX_VDISABLE) {
@@ -1668,7 +1535,6 @@ static void neo_uart_init(struct channel_t *ch)
}
/* Make the UART completely turn off. */
-
static void neo_uart_off(struct channel_t *ch)
{
/* Turn off UART enhanced bits */
@@ -1735,8 +1601,6 @@ static void neo_send_break(struct channel_t *ch, int msecs)
}
/*
- * neo_send_immediate_char.
- *
* Sends a specific character as soon as possible to the UART,
* jumping over any bytes that might be in the write queue.
*
@@ -1744,7 +1608,7 @@ static void neo_send_break(struct channel_t *ch, int msecs)
*/
static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
{
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
writeb(c, &ch->ch_neo_uart->txrx);
@@ -1797,7 +1661,7 @@ static void neo_vpd(struct dgnc_board *brd)
unsigned int i = 0;
unsigned int a;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ if (!brd)
return;
if (!brd->re_map_membase)
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index 77ecd9baae45..c30a2c2b5eea 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -13,43 +13,62 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DGNC_NEO_H
-#define __DGNC_NEO_H
+#ifndef _DGNC_NEO_H
+#define _DGNC_NEO_H
#include "dgnc_driver.h"
-/*
- * Per channel/port NEO UART structure
- * Base Structure Entries Usage Meanings to Host
+/**
+ * struct neo_uart_struct - Per channel/port NEO UART structure
+ *
+ * key - W = read write
+ * - R = read only
+ * - U = unused
*
- * W = read write R = read only
- * U = Unused.
+ * @txrx: (RW) Holding Register.
+ * @ier: (RW) Interrupt Enable Register.
+ * @isr_fcr: (RW) Interrupt Status Reg/Fifo Control Register.
+ * @lcr: (RW) Line Control Register.
+ * @mcr: (RW) Modem Control Register.
+ * @lsr: (RW) Line Status Register.
+ * @msr: (RW) Modem Status Register.
+ * @spr: (RW) Scratch Pad Register.
+ * @fctr: (RW) Feature Control Register.
+ * @efr: (RW) Enhanced Function Register.
+ * @tfifo: (RW) Transmit FIFO Register.
+ * @rfifo: (RW) Receive FIFO Register.
+ * @xoffchar1: (RW) XOff Character 1 Register.
+ * @xoffchar2: (RW) XOff Character 2 Register.
+ * @xonchar1: (RW) Xon Character 1 Register.
+ * @xonchar2: (RW) XOn Character 2 Register.
+ * @reserved1: (U) Reserved by Exar.
+ * @txrxburst: (RW) 64 bytes of RX/TX FIFO Data.
+ * @reserved2: (U) Reserved by Exar.
+ * @rxburst_with_errors: (R) bytes of RX FIFO Data + LSR.
*/
-
struct neo_uart_struct {
- u8 txrx; /* WR RHR/THR - Holding Reg */
- u8 ier; /* WR IER - Interrupt Enable Reg */
- u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo
- * Control Reg
- */
- u8 lcr; /* WR LCR - Line Control Reg */
- u8 mcr; /* WR MCR - Modem Control Reg */
- u8 lsr; /* WR LSR - Line Status Reg */
- u8 msr; /* WR MSR - Modem Status Reg */
- u8 spr; /* WR SPR - Scratch Pad Reg */
- u8 fctr; /* WR FCTR - Feature Control Reg */
- u8 efr; /* WR EFR - Enhanced Function Reg */
- u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
- u8 rfifo; /* WR RXCNT/RXTRG - Receive FIFO Reg */
- u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
- u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
- u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
- u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
-
- u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
- u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
- u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
- u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+ u8 txrx;
+ u8 ier;
+ u8 isr_fcr;
+
+ u8 lcr;
+ u8 mcr;
+ u8 lsr;
+ u8 msr;
+ u8 spr;
+ u8 fctr;
+ u8 efr;
+ u8 tfifo;
+ u8 rfifo;
+ u8 xoffchar1;
+ u8 xoffchar2;
+ u8 xonchar1;
+ u8 xonchar2;
+
+ u8 reserved1[0x2ff - 0x200];
+ u8 txrxburst[64];
+ u8 reserved2[0x37f - 0x340];
+ u8 rxburst_with_errors[64];
};
/* Where to read the extended interrupt register (32bits instead of 8bits) */
@@ -151,8 +170,6 @@ struct neo_uart_struct {
#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
-/* Our Global Variables */
-
extern struct board_ops dgnc_neo_ops;
-#endif
+#endif /* _DGNC_NEO_H */
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
index 4e170c47f4a3..59845916d90c 100644
--- a/drivers/staging/dgnc/dgnc_pci.h
+++ b/drivers/staging/dgnc/dgnc_pci.h
@@ -13,10 +13,11 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DGNC_PCI_H
-#define __DGNC_PCI_H
+#ifndef _DGNC_PCI_H
+#define _DGNC_PCI_H
-#define PCIMAX 32 /* maximum number of PCI boards */
+/* Maximum number of PCI boards */
+#define PCIMAX 32
#define DIGI_VID 0x114F
@@ -59,10 +60,10 @@
#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
-/* Size of Memory and I/O for PCI (4 K) */
+/* Size of memory and I/O for PCI (4 K) */
#define PCI_RAM_SIZE 0x1000
-/* Size of Memory (2MB) */
+/* Size of memory (2MB) */
#define PCI_MEM_SIZE 0x1000
-#endif
+#endif /* _DGNC_PCI_H */
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index c3b8fc54883d..9e98781ca6fe 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -58,16 +58,15 @@ static const struct digi_t dgnc_digi_init = {
* This defines a raw port at 9600 baud, 8 data bits, no parity,
* 1 stop bit.
*/
-static struct ktermios default_termios = {
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+static const struct ktermios default_termios = {
+ .c_iflag = (DEFAULT_IFLAGS),
+ .c_oflag = (DEFAULT_OFLAGS),
+ .c_cflag = (DEFAULT_CFLAGS),
+ .c_lflag = (DEFAULT_LFLAGS),
.c_cc = INIT_C_CC,
.c_line = 0,
};
-/* Our function prototypes */
static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file,
@@ -130,10 +129,8 @@ static const struct tty_operations dgnc_tty_ops = {
/* TTY Initialization/Cleanup Functions */
-/*
- * dgnc_tty_register()
- *
- * Init the tty subsystem for this board.
+/**
+ * dgnc_tty_register() - Init the tty subsystem for this board.
*/
int dgnc_tty_register(struct dgnc_board *brd)
{
@@ -143,7 +140,6 @@ int dgnc_tty_register(struct dgnc_board *brd)
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_HARDWARE_BREAK);
-
if (IS_ERR(brd->serial_driver))
return PTR_ERR(brd->serial_driver);
@@ -181,7 +177,6 @@ int dgnc_tty_register(struct dgnc_board *brd)
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_HARDWARE_BREAK);
-
if (IS_ERR(brd->print_driver)) {
rc = PTR_ERR(brd->print_driver);
goto unregister_serial_driver;
@@ -232,15 +227,15 @@ void dgnc_tty_unregister(struct dgnc_board *brd)
put_tty_driver(brd->serial_driver);
}
-/*
- * dgnc_tty_init()
+/**
+ * dgnc_tty_init() - Initialize the tty subsystem.
*
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
+ * Called once per board after board has been downloaded and initialized.
*/
int dgnc_tty_init(struct dgnc_board *brd)
{
int i;
+ int rc;
void __iomem *vaddr;
struct channel_t *ch;
@@ -254,14 +249,12 @@ int dgnc_tty_init(struct dgnc_board *brd)
brd->nasync = brd->maxports;
for (i = 0; i < brd->nasync; i++) {
- /*
- * Okay to malloc with GFP_KERNEL, we are not at
- * interrupt context, and there are no locks held.
- */
brd->channels[i] = kzalloc(sizeof(*brd->channels[i]),
GFP_KERNEL);
- if (!brd->channels[i])
+ if (!brd->channels[i]) {
+ rc = -ENOMEM;
goto err_free_channels;
+ }
}
ch = brd->channels[0];
@@ -271,14 +264,10 @@ int dgnc_tty_init(struct dgnc_board *brd)
for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
spin_lock_init(&ch->ch_lock);
- /* Store all our magic numbers */
- ch->magic = DGNC_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGNC_UNIT_MAGIC;
ch->ch_tun.un_ch = ch;
ch->ch_tun.un_type = DGNC_SERIAL;
ch->ch_tun.un_dev = i;
- ch->ch_pun.magic = DGNC_UNIT_MAGIC;
ch->ch_pun.un_ch = ch;
ch->ch_pun.un_type = DGNC_PRINT;
ch->ch_pun.un_dev = i + 128;
@@ -319,11 +308,12 @@ err_free_channels:
kfree(brd->channels[i]);
brd->channels[i] = NULL;
}
- return -ENOMEM;
+
+ return rc;
}
-/*
- * dgnc_cleanup_tty()
+/**
+ * dgnc_cleanup_tty() - Cleanup driver.
*
* Uninitialize the TTY portion of this driver. Free all memory and
* resources.
@@ -346,19 +336,18 @@ void dgnc_cleanup_tty(struct dgnc_board *brd)
put_tty_driver(brd->print_driver);
}
-/*
- * dgnc_wmove - Write data to transmit queue.
- *
- * ch - Pointer to channel structure.
- * buf - Pointer to characters to be moved.
- * n - Number of characters to move.
+/**
+ * dgnc_wmove() - Write data to transmit queue.
+ * @ch: Pointer to channel structure.
+ * @buf: Pointer to characters to be moved.
+ * @n: Number of characters to move.
*/
static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
{
int remain;
uint head;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
head = ch->ch_w_head & WQUEUEMASK;
@@ -388,10 +377,9 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
ch->ch_w_head = head;
}
-/*
- * dgnc_input - Process received data.
- *
- * ch - Pointer to channel structure.
+/**
+ * dgnc_input() - Process received data.
+ * @ch: Pointer to channel structure.
*/
void dgnc_input(struct channel_t *ch)
{
@@ -409,21 +397,17 @@ void dgnc_input(struct channel_t *ch)
int s = 0;
int i = 0;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
tp = ch->ch_tun.un_tty;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
rmask = RQUEUEMASK;
head = ch->ch_r_head & rmask;
tail = ch->ch_r_tail & rmask;
@@ -436,7 +420,7 @@ void dgnc_input(struct channel_t *ch)
* If the device is not open, or CREAD is off,
* flush input data and return immediately.
*/
- if (!tp || (tp->magic != TTY_MAGIC) ||
+ if (!tp ||
!(ch->ch_tun.un_flags & UN_ISOPEN) ||
!C_CREAD(tp) ||
(ch->ch_tun.un_flags & UN_CLOSING)) {
@@ -448,32 +432,18 @@ void dgnc_input(struct channel_t *ch)
goto exit_unlock;
}
- /* If we are throttled, simply don't read any data. */
-
if (ch->ch_flags & CH_FORCED_STOPI)
goto exit_unlock;
flip_len = TTY_FLIPBUF_SIZE;
- /* Chop down the length, if needed */
len = min(data_len, flip_len);
len = min(len, (N_TTY_BUF_SIZE - 1));
ld = tty_ldisc_ref(tp);
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
if (!ld) {
len = 0;
} else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
if (!ld->ops->receive_buf) {
ch->ch_r_head = ch->ch_r_tail;
len = 0;
@@ -562,7 +532,9 @@ exit_unlock:
tty_ldisc_deref(ld);
}
-/*
+/**
+ * dgnc_carrier()
+ *
* Determines when CARRIER changes state and takes appropriate
* action.
*/
@@ -571,7 +543,7 @@ void dgnc_carrier(struct channel_t *ch)
int virt_carrier = 0;
int phys_carrier = 0;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
if (ch->ch_mistat & UART_MSR_DCD)
@@ -652,7 +624,6 @@ void dgnc_carrier(struct channel_t *ch)
}
/* Assign the custom baud rate to the channel structure */
-
static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
{
int testdiv;
@@ -716,7 +687,6 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
{
int qleft;
- /* Store how much space we have left in the queue */
qleft = ch->ch_r_tail - ch->ch_r_head - 1;
if (qleft < 0)
qleft += RQUEUEMASK + 1;
@@ -797,7 +767,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
int qlen = 0;
unsigned long flags;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -883,8 +853,6 @@ static struct dgnc_board *find_board_by_major(unsigned int major)
/* TTY Entry points and helper functions */
-/* dgnc_tty_open() */
-
static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *brd;
@@ -903,39 +871,30 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
if (major > 255)
return -ENXIO;
- /* Get board pointer from our array of majors we have allocated */
brd = find_board_by_major(major);
if (!brd)
return -ENXIO;
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
rc = wait_event_interruptible(brd->state_wait,
(brd->state & BOARD_READY));
-
if (rc)
return rc;
spin_lock_irqsave(&brd->bd_lock, flags);
- /* If opened device is greater than our number of ports, bail. */
if (PORT_NUM(minor) >= brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_brd_unlock;
}
ch = brd->channels[PORT_NUM(minor)];
if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, flags);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_brd_unlock;
}
- /* Drop board lock */
spin_unlock_irqrestore(&brd->bd_lock, flags);
- /* Grab channel lock */
spin_lock_irqsave(&ch->ch_lock, flags);
/* Figure out our type */
@@ -946,8 +905,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
un = &brd->channels[PORT_NUM(minor)]->ch_pun;
un->un_type = DGNC_PRINT;
} else {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_ch_unlock;
}
/*
@@ -959,7 +918,6 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
rc = wait_event_interruptible(ch->ch_flags_wait,
((ch->ch_flags & CH_OPENING) == 0));
-
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
return -EINTR;
@@ -975,21 +933,18 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
ch->ch_flags_wait,
(((ch->ch_tun.un_flags |
ch->ch_pun.un_flags) & UN_CLOSING) == 0));
-
/* If ret is non-zero, user ctrl-c'ed us */
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- /* Store our unit into driver_data, so we always have it available. */
tty->driver_data = un;
/* Initialize tty's */
if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
+ un->un_tty = tty;
/* Maybe do something here to the TTY struct as well? */
}
@@ -1000,7 +955,6 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
*/
ch->ch_flags |= (CH_OPENING);
- /* Drop locks, as malloc with GFP_KERNEL can sleep */
spin_unlock_irqrestore(&ch->ch_lock, flags);
if (!ch->ch_rqueue)
@@ -1014,7 +968,6 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
kfree(ch->ch_rqueue);
kfree(ch->ch_equeue);
kfree(ch->ch_wqueue);
-
return -ENOMEM;
}
@@ -1062,19 +1015,14 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
brd->bd_ops->uart_init(ch);
}
- /* Run param in case we changed anything */
-
brd->bd_ops->param(tty);
dgnc_carrier(ch);
- /* follow protocol for opening port */
-
spin_unlock_irqrestore(&ch->ch_lock, flags);
rc = dgnc_block_til_ready(tty, file, ch);
- /* No going back now, increment our unit and channel counters */
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_open_count++;
un->un_open_count++;
@@ -1082,18 +1030,23 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
spin_unlock_irqrestore(&ch->ch_lock, flags);
return rc;
+
+err_brd_unlock:
+ spin_unlock_irqrestore(&brd->bd_lock, flags);
+
+ return rc;
+err_ch_unlock:
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+
+ return rc;
}
-/*
- * dgnc_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
+/* Wait for DCD, if needed. */
static int dgnc_block_til_ready(struct tty_struct *tty,
struct file *file,
struct channel_t *ch)
{
- int retval = 0;
+ int rc = 0;
struct un_t *un = tty->driver_data;
unsigned long flags;
uint old_flags = 0;
@@ -1106,22 +1059,16 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
ch->ch_wopen++;
- /* Loop forever */
while (1) {
sleep_on_un_flags = 0;
- /*
- * If board has failed somehow during our sleep,
- * bail with error.
- */
if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -ENXIO;
+ rc = -ENXIO;
break;
}
- /* If tty was hung up, break out of loop and set error. */
if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
+ rc = -EAGAIN;
break;
}
@@ -1146,7 +1093,7 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
break;
if (tty_io_error(tty)) {
- retval = -EIO;
+ rc = -EIO;
break;
}
@@ -1162,15 +1109,12 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
/*
* If there is a signal pending, the user probably
* interrupted (ctrl-c) us.
- * Leave loop with error set.
*/
if (signal_pending(current)) {
- retval = -ERESTARTSYS;
+ rc = -ERESTARTSYS;
break;
}
- /* Store the flags before we let go of channel lock */
-
if (sleep_on_un_flags)
old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
else
@@ -1189,12 +1133,12 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
* from the current value.
*/
if (sleep_on_un_flags)
- retval = wait_event_interruptible
+ rc = wait_event_interruptible
(un->un_flags_wait,
(old_flags != (ch->ch_tun.un_flags |
ch->ch_pun.un_flags)));
else
- retval = wait_event_interruptible(
+ rc = wait_event_interruptible(
ch->ch_flags_wait,
(old_flags != ch->ch_flags));
@@ -1209,25 +1153,19 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- return retval;
+ return rc;
}
-/*
- * dgnc_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
+/* Hangup the port. Like a close, but don't wait for output to drain. */
static void dgnc_tty_hangup(struct tty_struct *tty)
{
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
/* flush the transmit queues */
dgnc_tty_flush_buffer(tty);
}
-/* dgnc_tty_close() */
-
static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
{
struct dgnc_board *bd;
@@ -1235,19 +1173,19 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1361,8 +1299,6 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
}
/*
- * dgnc_tty_chars_in_buffer()
- *
* Return number of characters that have not been transmitted yet.
*
* This routine is used by the line discipline to determine if there
@@ -1375,18 +1311,18 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
ushort thead;
ushort ttail;
uint tmask;
- uint chars = 0;
+ uint chars;
unsigned long flags;
if (!tty)
return 0;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return 0;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return 0;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1397,21 +1333,17 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (ttail == thead) {
+ if (ttail == thead)
chars = 0;
- } else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + WQUEUESIZE;
- }
+ else if (thead > ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + WQUEUESIZE;
return chars;
}
/*
- * dgnc_maxcps_room
- *
* Reduces bytes_available to the max number of characters
* that can be sent currently given the maxcps value, and
* returns the new bytes_available. This only affects printer
@@ -1419,6 +1351,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
*/
static int dgnc_maxcps_room(struct channel_t *ch, int bytes_available)
{
+ int rc = bytes_available;
+
if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
int cps_limit = 0;
unsigned long current_time = jiffies;
@@ -1439,17 +1373,13 @@ static int dgnc_maxcps_room(struct channel_t *ch, int bytes_available)
cps_limit = 0;
}
- bytes_available = min(cps_limit, bytes_available);
+ rc = min(cps_limit, bytes_available);
}
- return bytes_available;
+ return rc;
}
-/*
- * dgnc_tty_write_room()
- *
- * Return space available in Tx buffer
- */
+/* Return room available in Tx buffer */
static int dgnc_tty_write_room(struct tty_struct *tty)
{
struct channel_t *ch = NULL;
@@ -1457,18 +1387,18 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
ushort head;
ushort tail;
ushort tmask;
- int ret = 0;
+ int room = 0;
unsigned long flags;
if (!tty)
return 0;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return 0;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return 0;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1477,53 +1407,45 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
- ret = tail - head - 1;
- if (ret < 0)
- ret += WQUEUESIZE;
+ room = tail - head - 1;
+ if (room < 0)
+ room += WQUEUESIZE;
/* Limit printer to maxcps */
if (un->un_type != DGNC_PRINT)
- ret = dgnc_maxcps_room(ch, ret);
+ room = dgnc_maxcps_room(ch, room);
/*
- * If we are printer device, leave space for
+ * If we are printer device, leave room for
* possibly both the on and off strings.
*/
if (un->un_type == DGNC_PRINT) {
if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
+ room -= ch->ch_digi.digi_onlen;
+ room -= ch->ch_digi.digi_offlen;
} else {
if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
+ room -= ch->ch_digi.digi_offlen;
}
- if (ret < 0)
- ret = 0;
+ if (room < 0)
+ room = 0;
spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- return ret;
+ return room;
}
/*
- * dgnc_tty_put_char()
- *
* Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
+ * Used by the line discipline for OPOST processing
*/
static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
{
- /* Simply call tty_write. */
-
dgnc_tty_write(tty, &c, 1);
return 1;
}
/*
- * dgnc_tty_write()
- *
* Take data from the user or kernel and send it out to the FEP.
* In here exists all the Transparent Print magic as well.
*/
@@ -1543,11 +1465,11 @@ static int dgnc_tty_write(struct tty_struct *tty,
return 0;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return 0;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return 0;
if (!count)
@@ -1561,7 +1483,6 @@ static int dgnc_tty_write(struct tty_struct *tty,
spin_lock_irqsave(&ch->ch_lock, flags);
- /* Get our space available for the channel from the board */
tmask = WQUEUEMASK;
head = (ch->ch_w_head) & tmask;
tail = (ch->ch_w_tail) & tmask;
@@ -1577,14 +1498,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
if (un->un_type != DGNC_PRINT)
bufcount = dgnc_maxcps_room(ch, bufcount);
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
count = min(count, bufcount);
-
- /* Bail if no space left. */
-
if (count <= 0)
goto exit_retry;
@@ -1646,42 +1560,36 @@ static int dgnc_tty_write(struct tty_struct *tty,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- if (count) {
- /*
- * Channel lock is grabbed and then released
- * inside this routine.
- */
+ if (count)
ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
- }
return count;
exit_retry:
-
spin_unlock_irqrestore(&ch->ch_lock, flags);
+
return 0;
}
/* Return modem signals to ld. */
-
static int dgnc_tty_tiocmget(struct tty_struct *tty)
{
struct channel_t *ch;
struct un_t *un;
- int result = -EIO;
+ int rc;
unsigned char mstat = 0;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
- return result;
+ if (!tty)
+ return -EIO;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return result;
+ if (!un)
+ return -EIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return result;
+ if (!ch)
+ return -EIO;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1689,53 +1597,47 @@ static int dgnc_tty_tiocmget(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
- result = 0;
+ rc = 0;
if (mstat & UART_MCR_DTR)
- result |= TIOCM_DTR;
+ rc |= TIOCM_DTR;
if (mstat & UART_MCR_RTS)
- result |= TIOCM_RTS;
+ rc |= TIOCM_RTS;
if (mstat & UART_MSR_CTS)
- result |= TIOCM_CTS;
+ rc |= TIOCM_CTS;
if (mstat & UART_MSR_DSR)
- result |= TIOCM_DSR;
+ rc |= TIOCM_DSR;
if (mstat & UART_MSR_RI)
- result |= TIOCM_RI;
+ rc |= TIOCM_RI;
if (mstat & UART_MSR_DCD)
- result |= TIOCM_CD;
+ rc |= TIOCM_CD;
- return result;
+ return rc;
}
-/*
- * dgnc_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-
+/* Set modem signals, called by ld. */
static int dgnc_tty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
- int ret = -EIO;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
+ if (!tty)
+ return -EIO;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
+ if (!un)
+ return -EIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
+ if (!ch)
+ return -EIO;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
+ if (!bd)
+ return -EIO;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1751,95 +1653,74 @@ static int dgnc_tty_tiocmset(struct tty_struct *tty,
if (clear & TIOCM_DTR)
ch->ch_mostat &= ~(UART_MCR_DTR);
- ch->ch_bd->bd_ops->assert_modem_signals(ch);
+ bd->bd_ops->assert_modem_signals(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
-/*
- * dgnc_tty_send_break()
- *
- * Send a Break, called by ld.
- */
+/* Send a Break, called by ld. */
static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
{
struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
- int ret = -EIO;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
- return ret;
+ if (!tty)
+ return -EIO;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return ret;
+ if (!un)
+ return -EIO;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return ret;
+ if (!ch)
+ return -EIO;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
- return ret;
+ if (!bd)
+ return -EIO;
- switch (msec) {
- case -1:
+ if (msec < 0)
msec = 0xFFFF;
- break;
- case 0:
- msec = 0;
- break;
- default:
- break;
- }
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, msec);
+ bd->bd_ops->send_break(ch, msec);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
-/*
- * dgnc_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
+/* wait until data has been transmitted, called by ld. */
static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct dgnc_board *bd;
struct channel_t *ch;
struct un_t *un;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
bd->bd_ops->drain(tty, 0);
}
-/*
- * dgnc_send_xchar()
- *
- * send a high priority character, called by ld.
- */
+/* send a high priority character, called by ld. */
static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
{
struct dgnc_board *bd;
@@ -1847,39 +1728,34 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
- dev_dbg(tty->dev, "dgnc_tty_send_xchar start\n");
-
spin_lock_irqsave(&ch->ch_lock, flags);
bd->bd_ops->send_immediate_char(ch, c);
spin_unlock_irqrestore(&ch->ch_lock, flags);
-
- dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
}
/* Return modem signals to ld. */
-
static inline int dgnc_get_mstat(struct channel_t *ch)
{
unsigned char mstat;
- int result = 0;
unsigned long flags;
+ int rc;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -ENXIO;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -1888,46 +1764,43 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
spin_unlock_irqrestore(&ch->ch_lock, flags);
+ rc = 0;
+
if (mstat & UART_MCR_DTR)
- result |= TIOCM_DTR;
+ rc |= TIOCM_DTR;
if (mstat & UART_MCR_RTS)
- result |= TIOCM_RTS;
+ rc |= TIOCM_RTS;
if (mstat & UART_MSR_CTS)
- result |= TIOCM_CTS;
+ rc |= TIOCM_CTS;
if (mstat & UART_MSR_DSR)
- result |= TIOCM_DSR;
+ rc |= TIOCM_DSR;
if (mstat & UART_MSR_RI)
- result |= TIOCM_RI;
+ rc |= TIOCM_RI;
if (mstat & UART_MSR_DCD)
- result |= TIOCM_CD;
+ rc |= TIOCM_CD;
- return result;
+ return rc;
}
/* Return modem signals to ld. */
-
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
return put_user(dgnc_get_mstat(ch), value);
}
-/*
- * dgnc_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
+/* Set modem signals, called by ld. */
static int dgnc_set_modem_info(struct channel_t *ch,
unsigned int command,
unsigned int __user *value)
{
- int ret = -ENXIO;
+ int rc;
unsigned int arg = 0;
unsigned long flags;
- ret = get_user(arg, value);
- if (ret)
- return ret;
+ rc = get_user(arg, value);
+ if (rc)
+ return rc;
switch (command) {
case TIOCMBIS:
@@ -1975,11 +1848,7 @@ static int dgnc_set_modem_info(struct channel_t *ch,
return 0;
}
-/*
- * dgnc_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- */
+/* Ioctl to get the information for ditty. */
static int dgnc_tty_digigeta(struct tty_struct *tty,
struct digi_t __user *retinfo)
{
@@ -1991,15 +1860,15 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
if (!retinfo)
return -EFAULT;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return -EFAULT;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return -EFAULT;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
@@ -2014,11 +1883,7 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
return 0;
}
-/*
- * dgnc_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- */
+/* Ioctl to set the information for ditty. */
static int dgnc_tty_digiseta(struct tty_struct *tty,
struct digi_t __user *new_info)
{
@@ -2028,19 +1893,19 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
struct digi_t new_digi;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return -EFAULT;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return -EFAULT;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -EFAULT;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return -EFAULT;
if (copy_from_user(&new_digi, new_info, sizeof(new_digi)))
@@ -2089,15 +1954,13 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
if (ch->ch_digi.digi_offlen > DIGI_PLEN)
ch->ch_digi.digi_offlen = DIGI_PLEN;
- ch->ch_bd->bd_ops->param(tty);
+ bd->bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
-/* dgnc_set_termios() */
-
static void dgnc_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
@@ -2106,19 +1969,19 @@ static void dgnc_tty_set_termios(struct tty_struct *tty,
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2130,7 +1993,7 @@ static void dgnc_tty_set_termios(struct tty_struct *tty,
ch->ch_startc = tty->termios.c_cc[VSTART];
ch->ch_stopc = tty->termios.c_cc[VSTOP];
- ch->ch_bd->bd_ops->param(tty);
+ bd->bd_ops->param(tty);
dgnc_carrier(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2142,15 +2005,15 @@ static void dgnc_tty_throttle(struct tty_struct *tty)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2166,15 +2029,15 @@ static void dgnc_tty_unthrottle(struct tty_struct *tty)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2191,19 +2054,19 @@ static void dgnc_tty_start(struct tty_struct *tty)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2220,19 +2083,19 @@ static void dgnc_tty_stop(struct tty_struct *tty)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2243,8 +2106,6 @@ static void dgnc_tty_stop(struct tty_struct *tty)
}
/*
- * dgnc_tty_flush_chars()
- *
* Flush the cook buffer
*
* Note to self, and any other poor souls who venture here:
@@ -2262,19 +2123,19 @@ static void dgnc_tty_flush_chars(struct tty_struct *tty)
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2284,26 +2145,22 @@ static void dgnc_tty_flush_chars(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*
- * dgnc_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
+/* Flush Tx buffer (make in == out) */
static void dgnc_tty_flush_buffer(struct tty_struct *tty)
{
struct channel_t *ch;
struct un_t *un;
unsigned long flags;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return;
spin_lock_irqsave(&ch->ch_lock, flags);
@@ -2328,11 +2185,7 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
spin_unlock_irqrestore(&ch->ch_lock, flags);
}
-/*
- * dgnc_wake_up_unit()
- *
- * Wakes up processes waiting in the unit's (teminal/printer) wait queue
- */
+/* Wakes up processes waiting in the unit's (teminal/printer) wait queue */
static void dgnc_wake_up_unit(struct un_t *unit)
{
unit->un_flags &= ~(UN_LOW | UN_EMPTY);
@@ -2341,11 +2194,7 @@ static void dgnc_wake_up_unit(struct un_t *unit)
/* The IOCTL function and all of its helpers */
-/*
- * dgnc_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
+/* The usual assortment of ioctl's */
static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
@@ -2357,19 +2206,19 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long flags;
void __user *uarg = (void __user *)arg;
- if (!tty || tty->magic != TTY_MAGIC)
+ if (!tty)
return -ENODEV;
un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
+ if (!un)
return -ENODEV;
ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch)
return -ENODEV;
bd = ch->ch_bd;
- if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ if (!bd)
return -ENODEV;
ch_bd_ops = bd->bd_ops;
@@ -2377,8 +2226,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
if (un->un_open_count <= 0) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return -EIO;
+ rc = -EIO;
+ goto err_unlock;
}
switch (cmd) {
@@ -2399,7 +2248,6 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return rc;
rc = ch_bd_ops->drain(tty, 0);
-
if (rc)
return -EINTR;
@@ -2504,10 +2352,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* also.
*/
rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- return rc;
- }
+ if (rc)
+ goto err_unlock;
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
ch->ch_r_head = ch->ch_r_tail;
@@ -2588,7 +2434,6 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (cmd == (DIGI_SETAW)) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
rc = ch_bd_ops->drain(tty, 0);
-
if (rc)
return -EINTR;
@@ -2634,7 +2479,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case DIGI_SETCUSTOMBAUD:
{
int new_rate;
- /* Let go of locks when accessing user space, could sleep */
+
spin_unlock_irqrestore(&ch->ch_lock, flags);
rc = get_user(new_rate, (int __user *)arg);
if (rc)
@@ -2732,8 +2577,6 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_unlock_irqrestore(&ch->ch_lock, flags);
- /* Get data from user first. */
-
if (copy_from_user(&buf, uarg, sizeof(buf)))
return -EFAULT;
@@ -2787,4 +2630,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return -ENOIOCTLCMD;
}
+err_unlock:
+ spin_unlock_irqrestore(&ch->ch_lock, flags);
+
+ return rc;
}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 1ee0eeeb4730..6c58f1b3461a 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -13,8 +13,8 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DGNC_TTY_H
-#define __DGNC_TTY_H
+#ifndef _DGNC_TTY_H
+#define _DGNC_TTY_H
#include "dgnc_driver.h"
@@ -30,4 +30,4 @@ void dgnc_carrier(struct channel_t *ch);
void dgnc_wakeup_writes(struct channel_t *ch);
void dgnc_check_queue_flow_control(struct channel_t *ch);
-#endif
+#endif /* _DGNC_TTY_H */
diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c
index 6f59240024d1..e07ff8d2f972 100644
--- a/drivers/staging/dgnc/dgnc_utils.c
+++ b/drivers/staging/dgnc/dgnc_utils.c
@@ -2,12 +2,11 @@
#include <linux/sched/signal.h>
#include "dgnc_utils.h"
-/*
- * dgnc_ms_sleep()
+/**
+ * dgnc_ms_sleep - Put the driver to sleep
+ * @ms - milliseconds to sleep
*
- * Put the driver to sleep for x ms's
- *
- * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ * Return: 0 if timed out, if interrupted by a signal return signal.
*/
int dgnc_ms_sleep(ulong ms)
{
diff --git a/drivers/staging/dgnc/dgnc_utils.h b/drivers/staging/dgnc/dgnc_utils.h
index 1164c3a09c6b..d1f07a5735c6 100644
--- a/drivers/staging/dgnc/dgnc_utils.h
+++ b/drivers/staging/dgnc/dgnc_utils.h
@@ -1,6 +1,6 @@
-#ifndef __DGNC_UTILS_H
-#define __DGNC_UTILS_H
+#ifndef _DGNC_UTILS_H
+#define _DGNC_UTILS_H
int dgnc_ms_sleep(ulong ms);
-#endif
+#endif /* _DGNC_UTILS_H */
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index ec2e3dda6119..46b06b07bbf2 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -13,8 +13,8 @@
* PURPOSE. See the GNU General Public License for more details.
*/
-#ifndef __DIGI_H
-#define __DIGI_H
+#ifndef _DIGI_H
+#define _DIGI_H
#ifndef TIOCM_LE
#define TIOCM_LE 0x01 /* line enable */
@@ -45,8 +45,7 @@
#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d' << 8) | 252) /*
- * Enable/disable UART
+#define DIGI_LOOPBACK (('d' << 8) | 252) /* Enable/disable UART
* internal loopback
*/
#define DIGI_FAST 0x0002 /* Fast baud rates */
@@ -64,50 +63,77 @@
/*
* Structure used with ioctl commands for DIGI parameters.
*/
+/**
+ * struct digi_t - Ioctl commands for DIGI parameters.
+ * @digi_flags: Flags.
+ * @digi_maxcps: Maximum printer CPS.
+ * @digi_maxchar: Maximum characters in the print queue.
+ * @digi_bufsize: Buffer size.
+ * @digi_onlen: Length of ON string.
+ * @digi_offlen: Length of OFF string.
+ * @digi_onstr: Printer ON string.
+ * @digi_offstr: Printer OFF string.
+ * @digi_term: Terminal string.
+ */
struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
+ unsigned short digi_flags;
+ unsigned short digi_maxcps;
+ unsigned short digi_maxchar;
+ unsigned short digi_bufsize;
+ unsigned char digi_onlen;
+ unsigned char digi_offlen;
+ char digi_onstr[DIGI_PLEN];
+ char digi_offstr[DIGI_PLEN];
+ char digi_term[DIGI_TSIZ];
};
-/* Structure to get driver status information */
-
+/**
+ * struct digi_dinfo - Driver status information.
+ * @dinfo_nboards: Number of boards configured.
+ * @dinfo_reserved: Not used, for future expansion.
+ * @dinfio_version: Driver version.
+ */
struct digi_dinfo {
- unsigned int dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
+ unsigned int dinfo_nboards;
+ char dinfo_reserved[12];
+ char dinfo_version[16];
};
#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
-/*
- * Structure used with ioctl commands for per-board information
+/**
+ * struct digi_info - Ioctl commands for per board information.
+ *
+ * Physsize and memsize differ when board has "windowed" memory.
*
- * physsize and memsize differ when board has "windowed" memory
+ * @info_bdnum: Board number (0 based).
+ * @info_ioport: IO port address.
+ * @indo_physaddr: Memory address.
+ * @info_physize: Size of host memory window.
+ * @info_memsize: Amount of dual-port memory on board.
+ * @info_bdtype: Board type.
+ * @info_nports: Number of ports.
+ * @info_bdstate: Board state.
+ * @info_reserved: Not used, for future expansion.
*/
struct digi_info {
- unsigned int info_bdnum; /* Board number (0 based) */
- unsigned int info_ioport; /* io port address */
- unsigned int info_physaddr; /* memory address */
- unsigned int info_physsize; /* Size of host mem window */
- unsigned int info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
+ unsigned int info_bdnum;
+ unsigned int info_ioport;
+ unsigned int info_physaddr;
+ unsigned int info_physsize;
+ unsigned int info_memsize;
+ unsigned short info_bdtype;
+ unsigned short info_nports;
+ char info_bdstate;
+ char info_reserved[7];
};
#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
-struct digi_getbuffer /* Struct for holding buffer use counts */
-{
+/**
+ * struct digi_getbuffer - Holds buffer use counts.
+ */
+struct digi_getbuffer {
unsigned long tx_in;
unsigned long tx_out;
unsigned long rxbuf;
@@ -115,14 +141,24 @@ struct digi_getbuffer /* Struct for holding buffer use counts */
unsigned long txdone;
};
+/**
+ * struct digi_getcounter
+ * @norun: Number of UART overrun errors.
+ * @noflow: Number of buffer overflow errors.
+ * @nframe: Number of framing errors.
+ * @nparity: Number of parity errors.
+ * @nbreak: Number of breaks received.
+ * @rbytes: Number of received bytes.
+ * @tbytes: Number of transmitted bytes.
+ */
struct digi_getcounter {
- unsigned long norun; /* number of UART overrun errors */
- unsigned long noflow; /* number of buffer overflow errors */
- unsigned long nframe; /* number of framing errors */
- unsigned long nparity; /* number of parity errors */
- unsigned long nbreak; /* number of breaks received */
- unsigned long rbytes; /* number of received bytes */
- unsigned long tbytes; /* number of bytes transmitted fully */
+ unsigned long norun;
+ unsigned long noflow;
+ unsigned long nframe;
+ unsigned long nparity;
+ unsigned long nbreak;
+ unsigned long rbytes;
+ unsigned long tbytes;
};
/* Board State Definitions */
@@ -137,15 +173,14 @@ struct digi_getcounter {
#define DIGI_REALPORT_GETCOUNTERS (('e' << 8) | 110)
#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
-#define EV_OPU 0x0001 /* !<Output paused by client */
-#define EV_OPS 0x0002 /* !<Output paused by regular sw flowctrl */
-#define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
-#define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
-#define EV_TXB 0x0040 /* !<Transmit break pending */
+#define EV_OPU 0x0001 /* Output paused by client */
+#define EV_OPS 0x0002 /* Output paused by regular sw flowctrl */
+#define EV_IPU 0x0010 /* Input paused unconditionally by user */
+#define EV_IPS 0x0020 /* Input paused by high/low water marks */
+#define EV_TXB 0x0040 /* Transmit break pending */
-/*
- * This structure holds data needed for the intelligent <--> nonintelligent
- * DPA translation
+/**
+ * struct ni_info - intelligent <--> non-intelligent DPA translation.
*/
struct ni_info {
int board;
@@ -175,4 +210,5 @@ struct ni_info {
#define T_NEO 0000
#define TTY_FLIPBUF_SIZE 512
-#endif /* DIGI_H */
+
+#endif /* _DIGI_H */
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 789bfb97143c..78c08e15a1f9 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -567,7 +567,7 @@ struct nbu2ss_udc {
struct usb_gadget_driver *driver;
struct platform_device *pdev;
struct device *dev;
- spinlock_t lock;
+ spinlock_t lock; /* Protects nbu2ss_udc structure fields */
struct completion *pdone;
enum ep0_state ep0state;
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 6f5e82464d78..dba676761d72 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -111,6 +111,12 @@ config FB_TFT_S6D1121
help
Generic Framebuffer support for S6D1121
+config FB_TFT_SH1106
+ tristate "FB driver for the SH1106 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SH1106
+
config FB_TFT_SSD1289
tristate "FB driver for the SSD1289 LCD Controller"
depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 2725ea9a4afc..05ae9fbf906e 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SH1106) += fb_sh1106.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
obj-$(CONFIG_FB_TFT_SSD1306) += fb_ssd1306.o
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 4ee76dbd30b5..489151a6bf80 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -369,7 +369,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
/* select left side (sc0)
* set addr
*/
- write_reg(par, 0x00, (1 << 6) | (u8)addr_win.xs);
+ write_reg(par, 0x00, BIT(6) | (u8)addr_win.xs);
write_reg(par, 0x00, (0x17 << 3) | (u8)y);
/* write bitmap */
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index 579e17734612..045cadc3bc65 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -194,7 +194,7 @@ static int set_var(struct fbtft_par *par)
/* Colorspcae */
if (par->bgr)
- mactrl_data |= (1 << 2);
+ mactrl_data |= BIT(2);
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, mactrl_data);
write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
return 0;
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 7189de5ae4b3..7f9e9b25490e 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -126,7 +126,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x0013, 0x0000); /* VDV[4:0] for VCOM amplitude */
mdelay(200); /* Dis-charge capacitor power voltage */
write_reg(par, 0x0010, /* SAP, BT[3:0], AP, DSTB, SLP, STB */
- (1 << 12) | (bt << 8) | (1 << 7) | (0x01 << 4));
+ BIT(12) | (bt << 8) | BIT(7) | BIT(4));
write_reg(par, 0x0011, 0x220 | vc); /* DC1[2:0], DC0[2:0], VC[2:0] */
mdelay(50); /* Delay 50ms */
write_reg(par, 0x0012, vrh); /* Internal reference voltage= Vci; */
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 4e75f5abe2f9..7f182a1c084c 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -27,7 +27,7 @@
#define WIDTH 320
#define HEIGHT 480
-static s16 default_init_sequence[] = {
+static const s16 default_init_sequence[] = {
/* SLP_OUT - Sleep out */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index f4b314265f9e..ddd07a64c48a 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -26,7 +26,7 @@
#define HEIGHT 480
/* this init sequence matches PiScreen */
-static s16 default_init_sequence[] = {
+static const s16 default_init_sequence[] = {
/* Interface Mode Control */
-1, 0xb0, 0x0,
-1, MIPI_DCS_EXIT_SLEEP_MODE,
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index 89d36d6d0cd3..a899614ce829 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -253,7 +253,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = par->txbuf.buf;
+ __be16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
size_t tx_array_size;
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index eb712aa0d692..c12855bb6891 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -24,7 +24,7 @@
#define DRVNAME "fb_s6d02a1"
-static s16 default_init_sequence[] = {
+static const s16 default_init_sequence[] = {
-1, 0xf0, 0x5a, 0x5a,
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
new file mode 100644
index 000000000000..89c27a440305
--- /dev/null
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -0,0 +1,195 @@
+/*
+ * FB driver for the SH1106 OLED Controller
+ * Based on the SSD1306 driver by Noralf Tronnes
+ *
+ * Copyright (C) 2017 Heiner Kallweit
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_sh1106"
+#define WIDTH 128
+#define HEIGHT 64
+
+/* Init sequence based on the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ if (!par->info->var.xres || par->info->var.xres > WIDTH ||
+ !par->info->var.yres || par->info->var.yres > HEIGHT ||
+ par->info->var.yres % 8) {
+ dev_err(par->info->device, "Invalid screen size\n");
+ return -EINVAL;
+ }
+
+ if (par->info->var.rotate) {
+ dev_err(par->info->device, "Display rotation not supported\n");
+ return -EINVAL;
+ }
+
+ par->fbtftops.reset(par);
+
+ /* Set Display OFF */
+ write_reg(par, 0xAE);
+
+ /* Set Display Clock Divide Ratio/ Oscillator Frequency */
+ write_reg(par, 0xD5, 0x80);
+
+ /* Set Multiplex Ratio */
+ write_reg(par, 0xA8, par->info->var.yres - 1);
+
+ /* Set Display Offset */
+ write_reg(par, 0xD3, 0x00);
+
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+
+ /* Set Segment Re-map */
+ /* column address 127 is mapped to SEG0 */
+ write_reg(par, 0xA0 | 0x1);
+
+ /* Set COM Output Scan Direction */
+ /* remapped mode. Scan from COM[N-1] to COM0 */
+ write_reg(par, 0xC8);
+
+ /* Set COM Pins Hardware Configuration */
+ if (par->info->var.yres == 64)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0xDA, 0x12);
+ else if (par->info->var.yres == 48)
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0xDA, 0x12);
+ else
+ /* A[4]=0b, Sequential COM pin configuration */
+ write_reg(par, 0xDA, 0x02);
+
+ /* Set Pre-charge Period */
+ write_reg(par, 0xD9, 0xF1);
+
+ /* Set VCOMH Deselect Level */
+ write_reg(par, 0xDB, 0x40);
+
+ /* Set Display ON */
+ write_reg(par, 0xAF);
+
+ msleep(150);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+
+ write_reg(par, on ? 0xAE : 0xAF);
+
+ return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, u32 *curves)
+{
+ /* apply mask */
+ curves[0] &= 0xFF;
+
+ /* Set Contrast Control for BANK0 */
+ write_reg(par, 0x81, curves[0]);
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u32 xres = par->info->var.xres;
+ int page, page_start, page_end, x, i, ret;
+ u8 *buf = par->txbuf.buf;
+
+ /* offset refers to vmem with 2 bytes element size */
+ page_start = offset / (8 * 2 * xres);
+ page_end = DIV_ROUND_UP(offset + len, 8 * 2 * xres);
+
+ for (page = page_start; page < page_end; page++) {
+ /* set page and set column to 2 because of vidmem width 132 */
+ write_reg(par, 0xb0 | page, 0x00 | 2, 0x10 | 0);
+
+ memset(buf, 0, xres);
+ for (x = 0; x < xres; x++)
+ for (i = 0; i < 8; i++)
+ if (vmem16[(page * 8 + i) * xres + x])
+ buf[x] |= BIT(i);
+
+ /* Write data */
+ ret = fbtft_write_buf_dc(par, buf, xres, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void write_register(struct fbtft_par *par, int len, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, len);
+
+ for (i = 0; i < len; i++)
+ par->buf[i] = va_arg(args, unsigned int);
+
+ /* keep DC low for all command bytes to transfer */
+ fbtft_write_buf_dc(par, par->buf, len, 0);
+
+ va_end(args);
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ /* set default contrast to 0xcd = 80% */
+ .gamma = "cd",
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .write_register = write_register,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "sinowealth,sh1106", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:sh1106");
+MODULE_ALIAS("platform:sh1106");
+
+MODULE_DESCRIPTION("SH1106 OLED Driver");
+MODULE_AUTHOR("Heiner Kallweit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
index c603e1516e64..129e175fcd7e 100644
--- a/drivers/staging/fbtft/fb_ssd1289.c
+++ b/drivers/staging/fbtft/fb_ssd1289.c
@@ -47,7 +47,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x0E, 0x2B00);
write_reg(par, 0x1E, 0x00B7);
write_reg(par, 0x01,
- (1 << 13) | (par->bgr << 11) | (1 << 9) | (HEIGHT - 1));
+ BIT(13) | (par->bgr << 11) | BIT(9) | (HEIGHT - 1));
write_reg(par, 0x02, 0x0600);
write_reg(par, 0x10, 0x0000);
write_reg(par, 0x05, 0x0000);
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 26f24e32d979..9aa9864fcf30 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -130,16 +130,16 @@ static int set_gamma(struct fbtft_par *par, u32 *curves)
for (i = 0; i < 63; i++) {
if (i > 0 && curves[i] < 2) {
dev_err(par->info->device,
- "Illegal value in Grayscale Lookup Table at index %d. " \
- "Must be greater than 1\n", i);
+ "Illegal value in Grayscale Lookup Table at index %d. Must be greater than 1\n",
+ i);
return -EINVAL;
}
acc += curves[i];
tmp[i] = acc;
if (acc > 180) {
dev_err(par->info->device,
- "Illegal value(s) in Grayscale Lookup Table. " \
- "At index=%d, the accumulated value has exceeded 180\n", i);
+ "Illegal value(s) in Grayscale Lookup Table. At index=%d, the accumulated value has exceeded 180\n",
+ i);
return -EINVAL;
}
}
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 24d17cdc71ab..d98522a39344 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -25,7 +25,7 @@
#define DEFAULT_GAMMA "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
-static s16 default_init_sequence[] = {
+static const s16 default_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 150, /* delay */
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 429304546b44..180e5be6fa4f 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -69,8 +69,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- u16 *pos = par->txbuf.buf + 1;
- u16 *buf16 = par->txbuf.buf + 10;
+ __be16 *pos = par->txbuf.buf + 1;
+ __be16 *buf16 = par->txbuf.buf + 10;
int i, j;
int ret = 0;
@@ -106,7 +106,7 @@ static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
{
unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- u16 *pos = par->txbuf.buf + 1;
+ __be16 *pos = par->txbuf.buf + 1;
u8 *buf8 = par->txbuf.buf + 10;
int i, j;
int ret = 0;
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index ec45043c0830..a80b5d115ff8 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -36,14 +36,9 @@ void func(struct fbtft_par *par, int len, ...) \
} \
\
*buf = modifier((type)va_arg(args, unsigned int)); \
- if (par->gpio.dc != -1) \
- gpio_set_value(par->gpio.dc, 0); \
- ret = par->fbtftops.write(par, par->buf, sizeof(type) + offset); \
- if (ret < 0) { \
- va_end(args); \
- dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
- return; \
- } \
+ ret = fbtft_write_buf_dc(par, par->buf, sizeof(type) + offset, 0); \
+ if (ret < 0) \
+ goto out; \
len--; \
\
if (par->startbyte) \
@@ -51,19 +46,12 @@ void func(struct fbtft_par *par, int len, ...) \
\
if (len) { \
i = len; \
- while (i--) { \
+ while (i--) \
*buf++ = modifier((type)va_arg(args, unsigned int)); \
- } \
- if (par->gpio.dc != -1) \
- gpio_set_value(par->gpio.dc, 1); \
- ret = par->fbtftops.write(par, par->buf, \
- len * (sizeof(type) + offset)); \
- if (ret < 0) { \
- va_end(args); \
- dev_err(par->info->device, "%s: write() failed and returned %d\n", __func__, ret); \
- return; \
- } \
+ fbtft_write_buf_dc(par, par->buf, \
+ len * (sizeof(type) + offset), 1); \
} \
+out: \
va_end(args); \
} \
EXPORT_SYMBOL(func);
@@ -126,7 +114,7 @@ EXPORT_SYMBOL(fbtft_write_reg8_bus9);
int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = par->txbuf.buf;
+ __be16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -243,10 +231,7 @@ int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len)
vmem16 = (u16 *)(par->info->screen_buffer + offset);
- if (par->gpio.dc != -1)
- gpio_set_value(par->gpio.dc, 1);
-
/* no need for buffered write with 16-bit bus */
- return par->fbtftops.write(par, vmem16, len);
+ return fbtft_write_buf_dc(par, vmem16, len, 1);
}
EXPORT_SYMBOL(fbtft_write_vmem16_bus16);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 7c8af29cdb75..b742ee786615 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -43,8 +43,23 @@ static unsigned long debug;
module_param(debug, ulong, 0000);
MODULE_PARM_DESC(debug, "override device debug level");
+int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc)
+{
+ int ret;
+
+ if (gpio_is_valid(par->gpio.dc))
+ gpio_set_value(par->gpio.dc, dc);
+
+ ret = par->fbtftops.write(par, buf, len);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "write() failed and returned %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL(fbtft_write_buf_dc);
+
void fbtft_dbg_hex(const struct device *dev, int groupsize,
- void *buf, size_t len, const char *fmt, ...)
+ void *buf, size_t len, const char *fmt, ...)
{
va_list args;
static char textbuf[512];
@@ -56,7 +71,7 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
va_end(args);
hex_dump_to_buffer(buf, len, 32, groupsize, text + text_len,
- 512 - text_len, false);
+ 512 - text_len, false);
if (len > 32)
dev_info(dev, "%s ...\n", text);
@@ -66,13 +81,13 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
EXPORT_SYMBOL(fbtft_dbg_hex);
static unsigned long fbtft_request_gpios_match(struct fbtft_par *par,
- const struct fbtft_gpio *gpio)
+ const struct fbtft_gpio *gpio)
{
int ret;
long val;
fbtft_par_dbg(DEBUG_REQUEST_GPIOS_MATCH, par, "%s('%s')\n",
- __func__, gpio->name);
+ __func__, gpio->name);
if (strcasecmp(gpio->name, "reset") == 0) {
par->gpio.reset = gpio->gpio;
@@ -141,8 +156,8 @@ static int fbtft_request_gpios(struct fbtft_par *par)
return ret;
}
fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par,
- "%s: '%s' = GPIO%d\n",
- __func__, gpio->name, gpio->gpio);
+ "%s: '%s' = GPIO%d\n",
+ __func__, gpio->name, gpio->gpio);
}
gpio++;
}
@@ -175,7 +190,7 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
flags = (of_flags & OF_GPIO_ACTIVE_LOW) ? GPIOF_OUT_INIT_LOW :
GPIOF_OUT_INIT_HIGH;
ret = devm_gpio_request_one(dev, gpio, flags,
- dev->driver->name);
+ dev->driver->name);
if (ret) {
dev_err(dev,
"gpio_request_one('%s'=%d) failed with %d\n",
@@ -185,7 +200,7 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
if (gpiop)
*gpiop = gpio;
fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' = GPIO%d\n",
- __func__, name, gpio);
+ __func__, name, gpio);
}
return ret;
@@ -219,15 +234,15 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
return ret;
for (i = 0; i < 16; i++) {
ret = fbtft_request_one_gpio(par, "db-gpios", i,
- &par->gpio.db[i]);
+ &par->gpio.db[i]);
if (ret)
return ret;
ret = fbtft_request_one_gpio(par, "led-gpios", i,
- &par->gpio.led[i]);
+ &par->gpio.led[i]);
if (ret)
return ret;
ret = fbtft_request_one_gpio(par, "aux-gpios", i,
- &par->gpio.aux[i]);
+ &par->gpio.aux[i]);
if (ret)
return ret;
}
@@ -282,7 +297,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
if (par->gpio.led[0] == -1) {
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s(): led pin not set, exiting.\n", __func__);
+ "%s(): led pin not set, exiting.\n", __func__);
return;
}
@@ -348,8 +363,8 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
DEBUG_TIME_EACH_UPDATE))) {
if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
- ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
- !par->first_update_done)) {
+ ((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
+ !par->first_update_done)) {
ts_start = ktime_get();
timeit = true;
}
@@ -374,7 +389,7 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
}
fbtft_par_dbg(DEBUG_UPDATE_DISPLAY, par, "%s(start_line=%u, end_line=%u)\n",
- __func__, start_line, end_line);
+ __func__, start_line, end_line);
if (par->fbtftops.set_addr_win)
par->fbtftops.set_addr_win(par, 0, start_line,
@@ -402,8 +417,8 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
throughput = throughput * 1000 / 1024;
dev_info(par->info->device,
- "Display update: %ld kB/s, fps=%ld\n",
- throughput, fps);
+ "Display update: %ld kB/s, fps=%ld\n",
+ throughput, fps);
par->first_update_done = true;
}
}
@@ -556,7 +571,6 @@ static int fbtft_fb_setcolreg(unsigned int regno, unsigned int red, unsigned int
ret = 0;
}
break;
-
}
return ret;
}
@@ -659,7 +673,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
unsigned int bpp = display->bpp;
unsigned int fps = display->fps;
int vmem_size, i;
- s16 *init_sequence = display->init_sequence;
+ const s16 *init_sequence = display->init_sequence;
char *gamma = display->gamma;
u32 *gamma_curves = NULL;
@@ -957,16 +971,16 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
fbtft_sysfs_init(par);
- if (par->txbuf.buf)
+ if (par->txbuf.buf && par->txbuf.len >= 1024)
sprintf(text1, ", %zu KiB buffer memory", par->txbuf.len >> 10);
if (spi)
sprintf(text2, ", spi%d.%d at %d MHz", spi->master->bus_num,
spi->chip_select, spi->max_speed_hz / 1000000);
dev_info(fb_info->dev,
- "%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
- fb_info->fix.id, fb_info->var.xres, fb_info->var.yres,
- fb_info->fix.smem_len >> 10, text1,
- HZ / fb_info->fbdefio->delay, text2);
+ "%s frame buffer, %dx%d, %d KiB video memory%s, fps=%lu%s\n",
+ fb_info->fix.id, fb_info->var.xres, fb_info->var.yres,
+ fb_info->fix.smem_len >> 10, text1,
+ HZ / fb_info->fbdefio->delay, text2);
#ifdef CONFIG_FB_BACKLIGHT
/* Turn on backlight if available */
@@ -1049,7 +1063,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
}
/* make debug message */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: write_register:\n");
+ "init: write_register:\n");
for (j = 0; j < i; j++)
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"buf[%d] = %02X\n", j, buf[j]);
@@ -1073,12 +1087,12 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
buf[60], buf[61], buf[62], buf[63]);
} else if (val & FBTFT_OF_INIT_DELAY) {
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: msleep(%u)\n", val & 0xFFFF);
+ "init: msleep(%u)\n", val & 0xFFFF);
msleep(val & 0xFFFF);
p = of_prop_next_u32(prop, p, &val);
} else {
dev_err(par->info->device, "illegal init value 0x%X\n",
- val);
+ val);
return -EINVAL;
}
}
@@ -1153,8 +1167,8 @@ int fbtft_init_display(struct fbtft_par *par)
j++;
}
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
- "init: write(0x%02X) %s\n",
- par->init_sequence[i], msg);
+ "init: write(0x%02X) %s\n",
+ par->init_sequence[i], msg);
/* Write */
j = 0;
@@ -1447,7 +1461,7 @@ int fbtft_remove_common(struct device *dev, struct fb_info *info)
par = info->par;
if (par)
fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
- "%s()\n", __func__);
+ "%s()\n", __func__);
fbtft_unregister_framebuffer(info);
fbtft_framebuffer_release(info);
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 6b6fbaa794f4..5bfd67b526b5 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -17,7 +17,7 @@ static int get_next_ulong(char **str_p, unsigned long *val, char *sep, int base)
}
int fbtft_gamma_parse_str(struct fbtft_par *par, u32 *curves,
- const char *str, int size)
+ const char *str, int size)
{
char *str_p, *curve_p = NULL;
char *tmp;
@@ -107,8 +107,8 @@ sprintf_gamma(struct fbtft_par *par, u32 *curves, char *buf)
}
static ssize_t store_gamma_curve(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
@@ -125,7 +125,7 @@ static ssize_t store_gamma_curve(struct device *device,
mutex_lock(&par->gamma.lock);
memcpy(par->gamma.curves, tmp_curves,
- par->gamma.num_curves * par->gamma.num_values * sizeof(tmp_curves[0]));
+ par->gamma.num_curves * par->gamma.num_values * sizeof(tmp_curves[0]));
mutex_unlock(&par->gamma.lock);
return count;
@@ -172,8 +172,8 @@ void fbtft_expand_debug_value(unsigned long *debug)
}
static ssize_t store_debug(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
@@ -188,7 +188,7 @@ static ssize_t store_debug(struct device *device,
}
static ssize_t show_debug(struct device *device,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct fb_info *fb_info = dev_get_drvdata(device);
struct fbtft_par *par = fb_info->par;
@@ -196,7 +196,7 @@ static ssize_t show_debug(struct device *device,
return snprintf(buf, PAGE_SIZE, "%lu\n", par->debug);
}
-static struct device_attribute debug_device_attr = \
+static struct device_attribute debug_device_attr =
__ATTR(debug, 0660, show_debug, store_debug);
void fbtft_sysfs_init(struct fbtft_par *par)
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 44cf94d160d4..488ab788138e 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -124,7 +124,7 @@ struct fbtft_display {
unsigned int bpp;
unsigned int fps;
int txbuflen;
- s16 *init_sequence;
+ const s16 *init_sequence;
char *gamma;
int gamma_num;
int gamma_len;
@@ -228,7 +228,7 @@ struct fbtft_par {
int led[16];
int aux[16];
} gpio;
- s16 *init_sequence;
+ const s16 *init_sequence;
struct {
struct mutex lock;
u32 *curves;
@@ -248,6 +248,7 @@ struct fbtft_par {
par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__)
/* fbtft-core.c */
+int fbtft_write_buf_dc(struct fbtft_par *par, void *buf, size_t len, int dc);
void fbtft_dbg_hex(const struct device *dev, int groupsize,
void *buf, size_t len, const char *fmt, ...);
struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 9ffb9cecc465..0d974738c1c4 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
"D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
"D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-static s16 cberry28_init_sequence[] = {
+static const s16 cberry28_init_sequence[] = {
/* turn off sleep mode */
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 120,
@@ -180,7 +180,7 @@ static s16 cberry28_init_sequence[] = {
-3,
};
-static s16 hy28b_init_sequence[] = {
+static const s16 hy28b_init_sequence[] = {
-1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
-1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
-1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
@@ -211,7 +211,7 @@ static s16 hy28b_init_sequence[] = {
"04 1F 4 7 7 0 7 7 6 0\n" \
"0F 00 1 7 4 0 0 0 6 7"
-static s16 pitft_init_sequence[] = {
+static const s16 pitft_init_sequence[] = {
-1, MIPI_DCS_SOFT_RESET,
-2, 5,
-1, MIPI_DCS_SET_DISPLAY_OFF,
@@ -242,7 +242,7 @@ static s16 pitft_init_sequence[] = {
-3
};
-static s16 waveshare32b_init_sequence[] = {
+static const s16 waveshare32b_init_sequence[] = {
-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-1, 0xCF, 0x00, 0xC1, 0x30,
-1, 0xE8, 0x85, 0x00, 0x78,
@@ -1448,11 +1448,10 @@ static int __init fbtft_device_init(void)
if (fbtft_device_param_gpios[0].name[0])
gpio = fbtft_device_param_gpios;
- if (verbose > 2)
+ if (verbose > 2) {
pr_spi_devices(); /* print list of registered SPI devices */
-
- if (verbose > 2)
pr_p_devices(); /* print list of 'fb' platform devices */
+ }
pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
@@ -1483,13 +1482,19 @@ static int __init fbtft_device_init(void)
displays[i].pdev->name = name;
displays[i].spi = NULL;
} else {
- strncpy(displays[i].spi->modalias, name, SPI_NAME_SIZE);
+ size_t len;
+
+ len = strlcpy(displays[i].spi->modalias, name,
+ SPI_NAME_SIZE);
+ if (len >= SPI_NAME_SIZE)
+ pr_warn("modalias (name) truncated to: %s\n",
+ displays[i].spi->modalias);
displays[i].pdev = NULL;
}
}
for (i = 0; i < ARRAY_SIZE(displays); i++) {
- if (strncmp(name, displays[i].name, 32) == 0) {
+ if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) {
if (displays[i].spi) {
spi = displays[i].spi;
spi->chip_select = cs;
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index af8422e18780..7134624a16c2 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -63,11 +63,11 @@ static bool latched;
module_param(latched, bool, 0000);
MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-static s16 *initp;
+static const s16 *initp;
static int initp_num;
/* default init sequences */
-static s16 st7735r_init[] = {
+static const s16 st7735r_init[] = {
-1, 0x01,
-2, 150,
-1, 0x11,
@@ -96,7 +96,7 @@ static s16 st7735r_init[] = {
-3
};
-static s16 ssd1289_init[] = {
+static const s16 ssd1289_init[] = {
-1, 0x00, 0x0001,
-1, 0x03, 0xA8A4,
-1, 0x0C, 0x0000,
@@ -142,7 +142,7 @@ static s16 ssd1289_init[] = {
-3
};
-static s16 hx8340bn_init[] = {
+static const s16 hx8340bn_init[] = {
-1, 0xC1, 0xFF, 0x83, 0x40,
-1, 0x11,
-2, 150,
@@ -162,7 +162,7 @@ static s16 hx8340bn_init[] = {
-3
};
-static s16 ili9225_init[] = {
+static const s16 ili9225_init[] = {
-1, 0x0001, 0x011C,
-1, 0x0002, 0x0100,
-1, 0x0003, 0x1030,
@@ -204,7 +204,7 @@ static s16 ili9225_init[] = {
-3
};
-static s16 ili9320_init[] = {
+static const s16 ili9320_init[] = {
-1, 0x00E5, 0x8000,
-1, 0x0000, 0x0001,
-1, 0x0001, 0x0100,
@@ -265,7 +265,7 @@ static s16 ili9320_init[] = {
-3
};
-static s16 ili9325_init[] = {
+static const s16 ili9325_init[] = {
-1, 0x00E3, 0x3008,
-1, 0x00E7, 0x0012,
-1, 0x00EF, 0x1231,
@@ -324,7 +324,7 @@ static s16 ili9325_init[] = {
-3
};
-static s16 ili9341_init[] = {
+static const s16 ili9341_init[] = {
-1, 0x28,
-2, 20,
-1, 0xCF, 0x00, 0x83, 0x30,
@@ -349,7 +349,7 @@ static s16 ili9341_init[] = {
-3
};
-static s16 ssd1351_init[] = {
+static const s16 ssd1351_init[] = {
-1, 0xfd, 0x12,
-1, 0xfd, 0xb1,
-1, 0xae,
@@ -390,7 +390,7 @@ struct flexfb_lcd_controller {
unsigned int height;
unsigned int setaddrwin;
unsigned int regwidth;
- s16 *init_seq;
+ const s16 *init_seq;
int init_seq_sz;
};
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
new file mode 100644
index 000000000000..2e325cb747ae
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -0,0 +1,17 @@
+#
+# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
+#
+
+config FSL_DPAA2
+ bool "Freescale DPAA2 devices"
+ depends on FSL_MC_BUS
+ ---help---
+ Build drivers for Freescale DataPath Acceleration
+ Architecture (DPAA2) family of SoCs.
+
+config FSL_DPAA2_ETH
+ tristate "Freescale DPAA2 Ethernet"
+ depends on FSL_DPAA2 && FSL_MC_DPIO
+ ---help---
+ Ethernet driver for Freescale DPAA2 SoCs, using the
+ Freescale MC bus driver
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
new file mode 100644
index 000000000000..0836ba8977b1
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -0,0 +1,5 @@
+#
+# Freescale DataPath Acceleration Architecture Gen2 (DPAA2) drivers
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile
new file mode 100644
index 000000000000..77b0b74f835a
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Freescale DPAA2 Ethernet controller
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
+
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
+
+# Needed by the tracing framework
+CFLAGS_dpaa2-eth.o := -I$(src)
diff --git a/drivers/staging/fsl-dpaa2/ethernet/README b/drivers/staging/fsl-dpaa2/ethernet/README
new file mode 100644
index 000000000000..410952ecf657
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/README
@@ -0,0 +1,186 @@
+Freescale DPAA2 Ethernet driver
+===============================
+
+This file provides documentation for the Freescale DPAA2 Ethernet driver.
+
+
+Contents
+========
+ Supported Platforms
+ Architecture Overview
+ Creating a Network Interface
+ Features & Offloads
+
+
+Supported Platforms
+===================
+This driver provides networking support for Freescale DPAA2 SoCs, e.g.
+LS2080A, LS2088A, LS1088A.
+
+
+Architecture Overview
+=====================
+Unlike regular NICs, in the DPAA2 architecture there is no single hardware block
+representing network interfaces; instead, several separate hardware resources
+concur to provide the networking functionality:
+ - network interfaces
+ - queues, channels
+ - buffer pools
+ - MAC/PHY
+
+All hardware resources are allocated and configured through the Management
+Complex (MC) portals. MC abstracts most of these resources as DPAA2 objects
+and exposes ABIs through which they can be configured and controlled. A few
+hardware resources, like queues, do not have a corresponding MC object and
+are treated as internal resources of other objects.
+
+For a more detailed description of the DPAA2 architecture and its object
+abstractions see:
+ drivers/staging/fsl-mc/README.txt
+
+Each Linux net device is built on top of a Datapath Network Interface (DPNI)
+object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
+(DPCONs).
+
+Configuration interface:
+
+ -----------------------
+ | DPAA2 Ethernet Driver |
+ -----------------------
+ . . .
+ . . .
+ . . . . . . . . . . . .
+ . . .
+ . . .
+ ---------- ---------- -----------
+ | DPBP API | | DPNI API | | DPCON API |
+ ---------- ---------- -----------
+ . . . software
+=========== . ========== . ============ . ===================
+ . . . hardware
+ ------------------------------------------
+ | MC hardware portals |
+ ------------------------------------------
+ . . .
+ . . .
+ ------ ------ -------
+ | DPBP | | DPNI | | DPCON |
+ ------ ------ -------
+
+The DPNIs are network interfaces without a direct one-on-one mapping to PHYs.
+DPBPs represent hardware buffer pools. Packet I/O is performed in the context
+of DPCON objects, using DPIO portals for managing and communicating with the
+hardware resources.
+
+Datapath (I/O) interface:
+
+ -----------------------------------------------
+ | DPAA2 Ethernet Driver |
+ -----------------------------------------------
+ | ^ ^ | |
+ | | | | |
+ enqueue| dequeue| data | dequeue| seed |
+ (Tx) | (Rx, TxC)| avail.| request| buffers|
+ | | notify| | |
+ | | | | |
+ V | | V V
+ -----------------------------------------------
+ | DPIO Driver |
+ -----------------------------------------------
+ | | | | | software
+ | | | | | ================
+ | | | | | hardware
+ -----------------------------------------------
+ | I/O hardware portals |
+ -----------------------------------------------
+ | ^ ^ | |
+ | | | | |
+ | | | V |
+ V | ================ V
+ ---------------------- | -------------
+ queues ---------------------- | | Buffer pool |
+ ---------------------- | -------------
+ =======================
+ Channel
+
+Datapath I/O (DPIO) portals provide enqueue and dequeue services, data
+availability notifications and buffer pool management. DPIOs are shared between
+all DPAA2 objects (and implicitly all DPAA2 kernel drivers) that work with data
+frames, but must be affine to the CPUs for the purpose of traffic distribution.
+
+Frames are transmitted and received through hardware frame queues, which can be
+grouped in channels for the purpose of hardware scheduling. The Ethernet driver
+enqueues TX frames on egress queues and after transmission is complete a TX
+confirmation frame is sent back to the CPU.
+
+When frames are available on ingress queues, a data availability notification
+is sent to the CPU; notifications are raised per channel, so even if multiple
+queues in the same channel have available frames, only one notification is sent.
+After a channel fires a notification, is must be explicitly rearmed.
+
+Each network interface can have multiple Rx, Tx and confirmation queues affined
+to CPUs, and one channel (DPCON) for each CPU that services at least one queue.
+DPCONs are used to distribute ingress traffic to different CPUs via the cores'
+affine DPIOs.
+
+The role of hardware buffer pools is storage of ingress frame data. Each network
+interface has a privately owned buffer pool which it seeds with kernel allocated
+buffers.
+
+
+DPNIs are decoupled from PHYs; a DPNI can be connected to a PHY through a DPMAC
+object or to another DPNI through an internal link, but the connection is
+managed by MC and completely transparent to the Ethernet driver.
+
+ --------- --------- ---------
+ | eth if1 | | eth if2 | | eth ifn |
+ --------- --------- ---------
+ . . .
+ . . .
+ . . .
+ ---------------------------
+ | DPAA2 Ethernet Driver |
+ ---------------------------
+ . . .
+ . . .
+ . . .
+ ------ ------ ------ -------
+ | DPNI | | DPNI | | DPNI | | DPMAC |----+
+ ------ ------ ------ ------- |
+ | | | | |
+ | | | | -----
+ =========== ================== | PHY |
+ -----
+
+Creating a Network Interface
+============================
+A net device is created for each DPNI object probed on the MC bus. Each DPNI has
+a number of properties which determine the network interface configuration
+options and associated hardware resources.
+
+DPNI objects (and the other DPAA2 objects needed for a network interface) can be
+added to a container on the MC bus in one of two ways: statically, through a
+Datapath Layout Binary file (DPL) that is parsed by MC at boot time; or created
+dynamically at runtime, via the DPAA2 objects APIs.
+
+
+Features & Offloads
+===================
+Hardware checksum offloading is supported for TCP and UDP over IPv4/6 frames.
+The checksum offloads can be independently configured on RX and TX through
+ethtool.
+
+Hardware offload of unicast and multicast MAC filtering is supported on the
+ingress path and permanently enabled.
+
+Scatter-gather frames are supported on both RX and TX paths. On TX, SG support
+is configurable via ethtool; on RX it is always enabled.
+
+The DPAA2 hardware can process jumbo Ethernet frames of up to 10K bytes.
+
+The Ethernet driver defines a static flow hashing scheme that distributes
+traffic based on a 5-tuple key: src IP, dst IP, IP proto, L4 src port,
+L4 dst port. No user configuration is supported for now.
+
+Hardware specific statistics for the network interface as well as some
+non-standard driver stats can be consulted through ethtool -S option.
diff --git a/drivers/staging/fsl-dpaa2/ethernet/TODO b/drivers/staging/fsl-dpaa2/ethernet/TODO
new file mode 100644
index 000000000000..e400a5e427a5
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/TODO
@@ -0,0 +1,18 @@
+* Add a DPAA2 MAC kernel driver in order to allow PHY management; currently
+ the DPMAC objects and their link to DPNIs are handled by MC internally
+ and all PHYs are seen as fixed-link
+* add more debug support: decide how to expose detailed debug statistics,
+ add ingress error queue support
+* MC firmware uprev; the DPAA2 objects used by the Ethernet driver need to
+ be kept in sync with binary interface changes in MC
+* refine README file
+* cleanup
+
+NOTE: None of the above is must-have before getting the DPAA2 Ethernet driver
+out of staging. The main requirement for that is to have the drivers it
+depends on, fsl-mc bus and DPIO driver, moved to drivers/bus and drivers/soc
+respectively.
+
+ Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+ ruxandra.radulescu@nxp.com, devel@driverdev.osuosl.org,
+ linux-kernel@vger.kernel.org
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
new file mode 100644
index 000000000000..3b040e8d6a4e
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
@@ -0,0 +1,185 @@
+/* Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpaa2_eth
+
+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPAA2_ETH_TRACE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "dpaa2-eth.h"
+#include <linux/tracepoint.h>
+
+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
+/* trace_printk format for raw buffer event class */
+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d"
+
+/* This is used to declare a class of events.
+ * individual events of this type will be defined below.
+ */
+
+/* Store details about a frame descriptor */
+DECLARE_EVENT_CLASS(dpaa2_eth_fd,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, fd),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(u64, fd_addr)
+ __field(u32, fd_len)
+ __field(u16, fd_offset)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->fd_addr = dpaa2_fd_get_addr(fd);
+ __entry->fd_len = dpaa2_fd_get_len(fd);
+ __entry->fd_offset = dpaa2_fd_get_offset(fd);
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_FMT,
+ __get_str(name),
+ __entry->fd_addr,
+ __entry->fd_len,
+ __entry->fd_offset)
+);
+
+/* Now declare events of the above type. Format is:
+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class
+ */
+
+/* Tx (egress) fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Rx fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Tx confirmation fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
+/* Log data about raw buffers. Useful for tracing DPBP content. */
+TRACE_EVENT(dpaa2_eth_buf_seed,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
+ * The syntax is the same as for DECLARE_EVENT_CLASS().
+ */
+
+#endif /* _DPAA2_ETH_TRACE_H */
+
+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpaa2-eth-trace
+#include <trace/define_trace.h>
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
new file mode 100644
index 000000000000..6f9eed66c64d
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -0,0 +1,2520 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/of_net.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+
+#include "../../fsl-mc/include/mc.h"
+#include "../../fsl-mc/include/mc-sys.h"
+#include "dpaa2-eth.h"
+
+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
+ * using trace events only need to #include <trace/events/sched.h>
+ */
+#define CREATE_TRACE_POINTS
+#include "dpaa2-eth-trace.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+
+const char dpaa2_eth_drv_version[] = "0.1";
+
+static void validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
+{
+ skb_checksum_none_assert(skb);
+
+ /* HW checksum validation is disabled, nothing to do here */
+ if (!(priv->net_dev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* Read checksum validation bits */
+ if (!((fd_status & DPAA2_FAS_L3CV) &&
+ (fd_status & DPAA2_FAS_L4CV)))
+ return;
+
+ /* Inform the stack there's no need to compute L3/L4 csum anymore */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/* Free a received FD.
+ * Not to be used for Tx conf FDs or on any other paths.
+ */
+static void free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_sg_entry *sgt;
+ void *sg_vaddr;
+ int i;
+
+ /* If single buffer frame, just free the data buffer */
+ if (fd_format == dpaa2_fd_single)
+ goto free_buf;
+ else if (fd_format != dpaa2_fd_sg)
+ /* We don't support any other format */
+ return;
+
+ /* For S/G frames, we first need to free all SG entries */
+ sgt = vaddr + dpaa2_fd_get_offset(fd);
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ addr = dpaa2_sg_get_addr(&sgt[i]);
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ sg_vaddr = phys_to_virt(addr);
+ skb_free_frag(sg_vaddr);
+
+ if (dpaa2_sg_is_final(&sgt[i]))
+ break;
+ }
+
+free_buf:
+ skb_free_frag(vaddr);
+}
+
+/* Build a linear skb based on a single-buffer frame descriptor */
+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct sk_buff *skb = NULL;
+ u16 fd_offset = dpaa2_fd_get_offset(fd);
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, fd_offset);
+ skb_put(skb, fd_length);
+
+ ch->buf_count--;
+
+ return skb;
+}
+
+/* Build a non linear (fragmented) skb based on a S/G table */
+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
+{
+ struct sk_buff *skb = NULL;
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sg_vaddr;
+ dma_addr_t sg_addr;
+ u16 sg_offset;
+ u32 sg_length;
+ struct page *page, *head_page;
+ int page_offset;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ struct dpaa2_sg_entry *sge = &sgt[i];
+
+ /* NOTE: We only support SG entries in dpaa2_sg_single format,
+ * but this is the only format we may receive from HW anyway
+ */
+
+ /* Get the address and length from the S/G entry */
+ sg_addr = dpaa2_sg_get_addr(sge);
+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ sg_vaddr = phys_to_virt(sg_addr);
+ sg_length = dpaa2_sg_get_len(sge);
+
+ if (i == 0) {
+ /* We build the skb around the first data buffer */
+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ if (unlikely(!skb))
+ return NULL;
+
+ sg_offset = dpaa2_sg_get_offset(sge);
+ skb_reserve(skb, sg_offset);
+ skb_put(skb, sg_length);
+ } else {
+ /* Rest of the data buffers are stored as skb frags */
+ page = virt_to_page(sg_vaddr);
+ head_page = virt_to_head_page(sg_vaddr);
+
+ /* Offset in page (which may be compound).
+ * Data in subsequent SG entries is stored from the
+ * beginning of the buffer, so we don't need to add the
+ * sg_offset.
+ */
+ page_offset = ((unsigned long)sg_vaddr &
+ (PAGE_SIZE - 1)) +
+ (page_address(page) - page_address(head_page));
+
+ skb_add_rx_frag(skb, i - 1, head_page, page_offset,
+ sg_length, DPAA2_ETH_RX_BUF_SIZE);
+ }
+
+ if (dpaa2_sg_is_final(sge))
+ break;
+ }
+
+ /* Count all data buffers + SG table buffer */
+ ch->buf_count -= i + 2;
+
+ return skb;
+}
+
+/* Main Rx frame processing routine */
+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ void *vaddr;
+ struct sk_buff *skb;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+ /* Tracing point */
+ trace_dpaa2_rx_fd(priv->net_dev, fd);
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(addr);
+
+ prefetch(vaddr + priv->buf_layout.private_data_size);
+ prefetch(vaddr + dpaa2_fd_get_offset(fd));
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = build_linear_skb(priv, ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ struct dpaa2_sg_entry *sgt =
+ vaddr + dpaa2_fd_get_offset(fd);
+ skb = build_frag_skb(priv, ch, sgt);
+ skb_free_frag(vaddr);
+ percpu_extras->rx_sg_frames++;
+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
+ } else {
+ /* We don't support any other format */
+ goto err_frame_format;
+ }
+
+ if (unlikely(!skb))
+ goto err_build_skb;
+
+ prefetch(skb->data);
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ fas = (struct dpaa2_fas *)
+ (vaddr + priv->buf_layout.private_data_size);
+ status = le32_to_cpu(fas->status);
+ validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+
+ if (priv->net_dev->features & NETIF_F_GRO)
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+
+ return;
+
+err_build_skb:
+ free_rx_fd(priv, fd, vaddr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+/* Consume all frames pull-dequeued into the store. This is the simplest way to
+ * make sure we don't accidentally issue another volatile dequeue which would
+ * overwrite (leak) frames already in the store.
+ *
+ * Observance of NAPI budget is not our concern, leaving that to the caller.
+ */
+static int consume_frames(struct dpaa2_eth_channel *ch)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_dq *dq;
+ const struct dpaa2_fd *fd;
+ int cleaned = 0;
+ int is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ch->store, &is_last);
+ if (unlikely(!dq)) {
+ /* If we're here, we *must* have placed a
+ * volatile dequeue comnmand, so keep reading through
+ * the store until we get some sort of valid response
+ * token (either a valid frame or an "empty dequeue")
+ */
+ continue;
+ }
+
+ fd = dpaa2_dq_fd(dq);
+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
+ fq->stats.frames++;
+
+ fq->consume(priv, ch, fd, &ch->napi);
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+/* Create a frame descriptor based on a fragmented skb */
+static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *sgt_buf = NULL;
+ void *hwa;
+ dma_addr_t addr;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct dpaa2_sg_entry *sgt;
+ int i, err;
+ int sgt_buf_size;
+ struct scatterlist *scl, *crt_scl;
+ int num_sg;
+ int num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+
+ /* Create and map scatterlist.
+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
+ * to go beyond nr_frags+1.
+ * Note: We don't support chained scatterlists
+ */
+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
+ return -EINVAL;
+
+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (unlikely(!scl))
+ return -ENOMEM;
+
+ sg_init_table(scl, nr_frags + 1);
+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+ if (unlikely(!num_dma_bufs)) {
+ err = -ENOMEM;
+ goto dma_map_sg_failed;
+ }
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
+ if (unlikely(!sgt_buf)) {
+ err = -ENOMEM;
+ goto sgt_buf_alloc_failed;
+ }
+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+
+ /* PTA from egress side is passed as is to the confirmation side so
+ * we need to clear some fields here in order to find consistent values
+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+ * field from the hardware annotation area
+ */
+ hwa = sgt_buf + priv->buf_layout.private_data_size;
+ memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Fill in the HW SGT structure.
+ *
+ * sgt_buf is zeroed out, so the following fields are implicit
+ * in all sgt entries:
+ * - offset is 0
+ * - format is 'dpaa2_sg_single'
+ */
+ for_each_sg(scl, crt_scl, num_dma_bufs, i) {
+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
+ }
+ dpaa2_sg_set_final(&sgt[i - 1], true);
+
+ /* Store the skb backpointer in the SGT buffer.
+ * Fit the scatterlist and the number of buffers alongside the
+ * skb backpointer in the software annotation area. We'll need
+ * all of them on Tx Conf.
+ */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->skb = skb;
+ swa->scl = scl;
+ swa->num_sg = num_sg;
+ swa->num_dma_bufs = num_dma_bufs;
+
+ /* Separately map the SGT buffer */
+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ err = -ENOMEM;
+ goto dma_map_single_failed;
+ }
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+ DPAA2_FD_CTRL_PTV1);
+
+ return 0;
+
+dma_map_single_failed:
+ kfree(sgt_buf);
+sgt_buf_alloc_failed:
+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+dma_map_sg_failed:
+ kfree(scl);
+ return err;
+}
+
+/* Create a frame descriptor based on a linear skb */
+static int build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u8 *buffer_start;
+ void *hwa;
+ struct sk_buff **skbh;
+ dma_addr_t addr;
+
+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
+ DPAA2_ETH_TX_BUF_ALIGN,
+ DPAA2_ETH_TX_BUF_ALIGN);
+
+ /* PTA from egress side is passed as is to the confirmation side so
+ * we need to clear some fields here in order to find consistent values
+ * on TX confirmation. We are clearing FAS (Frame Annotation Status)
+ * field from the hardware annotation area
+ */
+ hwa = buffer_start + priv->buf_layout.private_data_size;
+ memset(hwa + DPAA2_FAS_OFFSET, 0, DPAA2_FAS_SIZE);
+
+ /* Store a backpointer to the skb at the beginning of the buffer
+ * (in the private data area) such that we can release it
+ * on Tx confirm
+ */
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+ addr = dma_map_single(dev, buffer_start,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ return -ENOMEM;
+
+ dpaa2_fd_set_addr(fd, addr);
+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
+ dpaa2_fd_set_len(fd, skb->len);
+ dpaa2_fd_set_format(fd, dpaa2_fd_single);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
+ DPAA2_FD_CTRL_PTV1);
+
+ return 0;
+}
+
+/* FD freeing routine on the Tx path
+ *
+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
+ * back-pointed to is also freed.
+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of
+ * dpaa2_eth_tx().
+ * Optionally, return the frame annotation status word (FAS), which needs
+ * to be checked if we're on the confirmation path.
+ */
+static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ u32 *status)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t fd_addr;
+ struct sk_buff **skbh, *skb;
+ unsigned char *buffer_start;
+ int unmap_size;
+ struct scatterlist *scl;
+ int num_sg, num_dma_bufs;
+ struct dpaa2_eth_swa *swa;
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct dpaa2_fas *fas;
+
+ fd_addr = dpaa2_fd_get_addr(fd);
+ skbh = phys_to_virt(fd_addr);
+
+ if (fd_format == dpaa2_fd_single) {
+ skb = *skbh;
+ buffer_start = (unsigned char *)skbh;
+ /* Accessing the skb buffer is safe before dma unmap, because
+ * we didn't map the actual skb shell.
+ */
+ dma_unmap_single(dev, fd_addr,
+ skb_tail_pointer(skb) - buffer_start,
+ DMA_TO_DEVICE);
+ } else if (fd_format == dpaa2_fd_sg) {
+ swa = (struct dpaa2_eth_swa *)skbh;
+ skb = swa->skb;
+ scl = swa->scl;
+ num_sg = swa->num_sg;
+ num_dma_bufs = swa->num_dma_bufs;
+
+ /* Unmap the scatterlist */
+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE);
+ kfree(scl);
+
+ /* Unmap the SGT buffer */
+ unmap_size = priv->tx_data_offset +
+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE);
+ } else {
+ /* Unsupported format, mark it as errored and give up */
+ if (status)
+ *status = ~0;
+ return;
+ }
+
+ /* Read the status from the Frame Annotation after we unmap the first
+ * buffer but before we free it. The caller function is responsible
+ * for checking the status value.
+ */
+ if (status && (dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ fas = (struct dpaa2_fas *)
+ ((void *)skbh + priv->buf_layout.private_data_size);
+ *status = le32_to_cpu(fas->status);
+ }
+
+ /* Free SGT buffer kmalloc'ed on tx */
+ if (fd_format != dpaa2_fd_single)
+ kfree(skbh);
+
+ /* Move on with skb release */
+ dev_kfree_skb(skb);
+}
+
+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_fd fd;
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct dpaa2_eth_fq *fq;
+ u16 queue_mapping;
+ int err, i;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+
+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+ struct sk_buff *ns;
+
+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+ if (unlikely(!ns)) {
+ percpu_stats->tx_dropped++;
+ goto err_alloc_headroom;
+ }
+ dev_kfree_skb(skb);
+ skb = ns;
+ }
+
+ /* We'll be holding a back-reference to the skb until Tx Confirmation;
+ * we don't want that overwritten by a concurrent Tx with a cloned skb.
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ /* skb_unshare() has already freed the skb */
+ percpu_stats->tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /* Setup the FD fields */
+ memset(&fd, 0, sizeof(fd));
+
+ if (skb_is_nonlinear(skb)) {
+ err = build_sg_fd(priv, skb, &fd);
+ percpu_extras->tx_sg_frames++;
+ percpu_extras->tx_sg_bytes += skb->len;
+ } else {
+ err = build_single_fd(priv, skb, &fd);
+ }
+
+ if (unlikely(err)) {
+ percpu_stats->tx_dropped++;
+ goto err_build_fd;
+ }
+
+ /* Tracing point */
+ trace_dpaa2_tx_fd(net_dev, &fd);
+
+ /* TxConf FQ selection primarily based on cpu affinity; this is
+ * non-migratable context, so it's safe to call smp_processor_id().
+ */
+ queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
+ fq = &priv->fq[queue_mapping];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
+ fq->tx_qdbin, &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ percpu_extras->tx_portal_busy += i;
+ if (unlikely(err < 0)) {
+ percpu_stats->tx_errors++;
+ /* Clean up everything, including freeing the skb */
+ free_tx_fd(priv, &fd, NULL);
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += skb->len;
+ }
+
+ return NETDEV_TX_OK;
+
+err_build_fd:
+err_alloc_headroom:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* Tx confirmation frame processing routine */
+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct napi_struct *napi __always_unused)
+{
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 status = 0;
+
+ /* Tracing point */
+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
+
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ percpu_extras->tx_conf_frames++;
+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+
+ free_tx_fd(priv, fd, &status);
+
+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) {
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ /* Tx-conf logically pertains to the egress path. */
+ percpu_stats->tx_errors++;
+ }
+}
+
+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_RX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_offload(RX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+{
+ int err;
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L3_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
+ return err;
+ }
+
+ err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
+ DPNI_OFF_TX_L4_CSUM, enable);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Perform a single release command to add buffers
+ * to the specified buffer pool
+ */
+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *buf;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Allocate buffer visible to WRIOP + skb shared info +
+ * alignment padding
+ */
+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+ if (unlikely(!buf))
+ goto err_alloc;
+
+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+
+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ buf, DPAA2_ETH_BUF_RAW_SIZE,
+ addr, DPAA2_ETH_RX_BUF_SIZE,
+ bpid);
+ }
+
+release_bufs:
+ /* In case the portal is busy, retry until successful.
+ * The buffer release function would only fail if the QBMan portal
+ * was busy, which implies portal contention (i.e. more CPUs than
+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
+ * there is little we can realistically do, short of giving up -
+ * in which case we'd risk depleting the buffer pool and never again
+ * receiving the Rx interrupt which would kick-start the refill logic.
+ * So just keep retrying, at the risk of being moved to ksoftirqd.
+ */
+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+ cpu_relax();
+ return i;
+
+err_map:
+ skb_free_frag(buf);
+err_alloc:
+ if (i)
+ goto release_bufs;
+
+ return 0;
+}
+
+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+{
+ int i, j;
+ int new_count;
+
+ /* This is the lazy seeding of Rx buffer pools.
+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls
+ * napi_alloc_frag(). The trouble with that is that it in turn ends up
+ * calling this_cpu_ptr(), which mandates execution in atomic context.
+ * Rather than splitting up the code, do a one-off preempt disable.
+ */
+ preempt_disable();
+ for (j = 0; j < priv->num_channels; j++) {
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS;
+ i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = add_bufs(priv, bpid);
+ priv->channel[j]->buf_count += new_count;
+
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
+ preempt_enable();
+ return -ENOMEM;
+ }
+ }
+ }
+ preempt_enable();
+
+ return 0;
+}
+
+/**
+ * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
+ */
+static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ void *vaddr;
+ int ret, i;
+
+ do {
+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid,
+ buf_array, count);
+ if (ret < 0) {
+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
+ return;
+ }
+ for (i = 0; i < ret; i++) {
+ /* Same logic as on regular Rx path */
+ dma_unmap_single(dev, buf_array[i],
+ DPAA2_ETH_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ vaddr = phys_to_virt(buf_array[i]);
+ skb_free_frag(vaddr);
+ }
+ } while (ret);
+}
+
+static void drain_pool(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ drain_bufs(priv, 1);
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->buf_count = 0;
+}
+
+/* Function is called from softirq context only, so we don't need to guard
+ * the access to percpu count
+ */
+static int refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
+{
+ int new_count;
+
+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
+ return 0;
+
+ do {
+ new_count = add_bufs(priv, bpid);
+ if (unlikely(!new_count)) {
+ /* Out of memory; abort for now, we'll try later on */
+ break;
+ }
+ ch->buf_count += new_count;
+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
+
+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int pull_channel(struct dpaa2_eth_channel *ch)
+{
+ int err;
+ int dequeues = -1;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
+ dequeues++;
+ cpu_relax();
+ } while (err == -EBUSY);
+
+ ch->stats.dequeue_portal_busy += dequeues;
+ if (unlikely(err))
+ ch->stats.pull_err++;
+
+ return err;
+}
+
+/* NAPI poll routine
+ *
+ * Frames are dequeued from the QMan channel associated with this NAPI context.
+ * Rx, Tx confirmation and (if configured) Rx error frames all count
+ * towards the NAPI budget.
+ */
+static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_eth_channel *ch;
+ int cleaned = 0, store_cleaned;
+ struct dpaa2_eth_priv *priv;
+ int err;
+
+ ch = container_of(napi, struct dpaa2_eth_channel, napi);
+ priv = ch->priv;
+
+ while (cleaned < budget) {
+ err = pull_channel(ch);
+ if (unlikely(err))
+ break;
+
+ /* Refill pool if appropriate */
+ refill_pool(priv, ch, priv->dpbp_attrs.bpid);
+
+ store_cleaned = consume_frames(ch);
+ cleaned += store_cleaned;
+
+ /* If we have enough budget left for a full store,
+ * try a new pull dequeue, otherwise we're done here
+ */
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_ETH_STORE_SIZE)
+ break;
+ }
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ /* Re-enable data available notifications */
+ do {
+ err = dpaa2_io_service_rearm(NULL, &ch->nctx);
+ cpu_relax();
+ } while (err == -EBUSY);
+ }
+
+ ch->stats.frames += cleaned;
+
+ return cleaned;
+}
+
+static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_enable(&ch->napi);
+ }
+}
+
+static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ napi_disable(&ch->napi);
+ }
+}
+
+static int link_state_update(struct dpaa2_eth_priv *priv)
+{
+ struct dpni_link_state state;
+ int err;
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (unlikely(err)) {
+ netdev_err(priv->net_dev,
+ "dpni_get_link_state() failed\n");
+ return err;
+ }
+
+ /* Chech link state; speed / duplex changes are not treated yet */
+ if (priv->link_state.up == state.up)
+ return 0;
+
+ priv->link_state = state;
+ if (state.up) {
+ netif_carrier_on(priv->net_dev);
+ netif_tx_start_all_queues(priv->net_dev);
+ } else {
+ netif_tx_stop_all_queues(priv->net_dev);
+ netif_carrier_off(priv->net_dev);
+ }
+
+ netdev_info(priv->net_dev, "Link Event: state %s",
+ state.up ? "up" : "down");
+
+ return 0;
+}
+
+static int dpaa2_eth_open(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = seed_pool(priv, priv->dpbp_attrs.bpid);
+ if (err) {
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid);
+ }
+
+ /* We'll only start the txqs when the link is actually ready; make sure
+ * we don't race against the link up notification, which may come
+ * immediately after dpni_enable();
+ */
+ netif_tx_stop_all_queues(net_dev);
+ enable_ch_napi(priv);
+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
+ * return true and cause 'ip link show' to report the LOWER_UP flag,
+ * even though the link notification wasn't even received.
+ */
+ netif_carrier_off(net_dev);
+
+ err = dpni_enable(priv->mc_io, 0, priv->mc_token);
+ if (err < 0) {
+ netdev_err(net_dev, "dpni_enable() failed\n");
+ goto enable_err;
+ }
+
+ /* If the DPMAC object has already processed the link up interrupt,
+ * we have to learn the link state ourselves.
+ */
+ err = link_state_update(priv);
+ if (err < 0) {
+ netdev_err(net_dev, "Can't update link state\n");
+ goto link_state_err;
+ }
+
+ return 0;
+
+link_state_err:
+enable_err:
+ disable_ch_napi(priv);
+ drain_pool(priv);
+ return err;
+}
+
+/* The DPIO store must be empty when we call this,
+ * at the end of every NAPI cycle.
+ */
+static u32 drain_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
+{
+ u32 drained = 0, total = 0;
+
+ do {
+ pull_channel(ch);
+ drained = consume_frames(ch);
+ total += drained;
+ } while (drained);
+
+ return total;
+}
+
+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *ch;
+ int i;
+ u32 drained = 0;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ drained += drain_channel(priv, ch);
+ }
+
+ return drained;
+}
+
+static int dpaa2_eth_stop(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int dpni_enabled;
+ int retries = 10;
+ u32 drained;
+
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
+
+ /* Loop while dpni_disable() attempts to drain the egress FQs
+ * and confirm them back to us.
+ */
+ do {
+ dpni_disable(priv->mc_io, 0, priv->mc_token);
+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
+ if (dpni_enabled)
+ /* Allow the hardware some slack */
+ msleep(100);
+ } while (dpni_enabled && --retries);
+ if (!retries) {
+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
+ /* Must go on and disable NAPI nonetheless, so we don't crash at
+ * the next "ifconfig up"
+ */
+ }
+
+ /* Wait for NAPI to complete on every core and disable it.
+ * In particular, this will also prevent NAPI from being rescheduled if
+ * a new CDAN is serviced, effectively discarding the CDAN. We therefore
+ * don't even need to disarm the channels, except perhaps for the case
+ * of a huge coalescing value.
+ */
+ disable_ch_napi(priv);
+
+ /* Manually drain the Rx and TxConf queues */
+ drained = drain_ingress_frames(priv);
+ if (drained)
+ netdev_dbg(net_dev, "Drained %d frames.\n", drained);
+
+ /* Empty the buffer pool */
+ drain_pool(priv);
+
+ return 0;
+}
+
+static int dpaa2_eth_init(struct net_device *net_dev)
+{
+ u64 supported = 0;
+ u64 not_supported = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u32 options = priv->dpni_attrs.options;
+
+ /* Capabilities listing */
+ supported |= IFF_LIVE_ADDR_CHANGE;
+
+ if (options & DPNI_OPT_NO_MAC_FILTER)
+ not_supported |= IFF_UNICAST_FLT;
+ else
+ supported |= IFF_UNICAST_FLT;
+
+ net_dev->priv_flags |= supported;
+ net_dev->priv_flags &= ~not_supported;
+
+ /* Features */
+ net_dev->features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_LLTX;
+ net_dev->hw_features = net_dev->features;
+
+ return 0;
+}
+
+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ err = eth_mac_addr(net_dev, addr);
+ if (err < 0) {
+ dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/** Fill in counters maintained by the GPP driver. These may be different from
+ * the hardware counters obtained by ethtool.
+ */
+void dpaa2_eth_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct rtnl_link_stats64 *percpu_stats;
+ u64 *cpustats;
+ u64 *netstats = (u64 *)stats;
+ int i, j;
+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
+
+ for_each_possible_cpu(i) {
+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
+ cpustats = (u64 *)percpu_stats;
+ for (j = 0; j < num; j++)
+ netstats[j] += cpustats[j];
+ }
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ /* Set the maximum Rx frame length to match the transmit side;
+ * account for L2 headers when computing the MFL
+ */
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
+ return err;
+ }
+
+ net_dev->mtu = mtu;
+ return 0;
+}
+
+/* Copy mac unicast addresses from @net_dev to @priv.
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_uc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add ucast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+/* Copy mac multicast addresses from @net_dev to @priv
+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
+ */
+static void add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ int err;
+
+ netdev_for_each_mc_addr(ha, net_dev) {
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
+ ha->addr);
+ if (err)
+ netdev_warn(priv->net_dev,
+ "Could not add mcast MAC %pM to the filtering table (err %d)\n",
+ ha->addr, err);
+ }
+}
+
+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int uc_count = netdev_uc_count(net_dev);
+ int mc_count = netdev_mc_count(net_dev);
+ u8 max_mac = priv->dpni_attrs.mac_filter_entries;
+ u32 options = priv->dpni_attrs.options;
+ u16 mc_token = priv->mc_token;
+ struct fsl_mc_io *mc_io = priv->mc_io;
+ int err;
+
+ /* Basic sanity checks; these probably indicate a misconfiguration */
+ if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
+ netdev_info(net_dev,
+ "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
+ max_mac);
+
+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */
+ if (uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count, max_mac);
+ goto force_promisc;
+ }
+ if (mc_count + uc_count > max_mac) {
+ netdev_info(net_dev,
+ "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
+ uc_count + mc_count, max_mac);
+ goto force_mc_promisc;
+ }
+
+ /* Adjust promisc settings due to flag combinations */
+ if (net_dev->flags & IFF_PROMISC)
+ goto force_promisc;
+ if (net_dev->flags & IFF_ALLMULTI) {
+ /* First, rebuild unicast filtering table. This should be done
+ * in promisc mode, in order to avoid frame loss while we
+ * progressively add entries to the table.
+ * We don't know whether we had been in promisc already, and
+ * making an MC call to find out is expensive; so set uc promisc
+ * nonetheless.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc\n");
+
+ /* Actual uc table reconstruction. */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc filters\n");
+ add_uc_hw_addr(net_dev, priv);
+
+ /* Finally, clear uc promisc and set mc promisc as requested. */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear uc promisc\n");
+ goto force_mc_promisc;
+ }
+
+ /* Neither unicast, nor multicast promisc will be on... eventually.
+ * For now, rebuild mac filtering tables while forcing both of them on.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
+
+ /* Actual mac filtering tables reconstruction */
+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mac filters\n");
+ add_mc_hw_addr(net_dev, priv);
+ add_uc_hw_addr(net_dev, priv);
+
+ /* Now we can clear both ucast and mcast promisc, without risking
+ * to drop legitimate frames anymore.
+ */
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear ucast promisc\n");
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
+ if (err)
+ netdev_warn(net_dev, "Can't clear mcast promisc\n");
+
+ return;
+
+force_promisc:
+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set ucast promisc\n");
+force_mc_promisc:
+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
+ if (err)
+ netdev_warn(net_dev, "Can't set mcast promisc\n");
+}
+
+static int dpaa2_eth_set_features(struct net_device *net_dev,
+ netdev_features_t features)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ netdev_features_t changed = features ^ net_dev->features;
+ bool enable;
+ int err;
+
+ if (changed & NETIF_F_RXCSUM) {
+ enable = !!(features & NETIF_F_RXCSUM);
+ err = set_rx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ err = set_tx_csum(priv, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops dpaa2_eth_ops = {
+ .ndo_open = dpaa2_eth_open,
+ .ndo_start_xmit = dpaa2_eth_tx,
+ .ndo_stop = dpaa2_eth_stop,
+ .ndo_init = dpaa2_eth_init,
+ .ndo_set_mac_address = dpaa2_eth_set_addr,
+ .ndo_get_stats64 = dpaa2_eth_get_stats,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
+ .ndo_set_features = dpaa2_eth_set_features,
+};
+
+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_eth_channel *ch;
+
+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
+
+ /* Update NAPI statistics */
+ ch->stats.cdan++;
+
+ napi_schedule_irqoff(&ch->napi);
+}
+
+/* Allocate and configure a DPCON object */
+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+{
+ struct fsl_mc_device *dpcon;
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpcon_attr attrs;
+ int err;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
+ FSL_MC_POOL_DPCON, &dpcon);
+ if (err) {
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return NULL;
+ }
+
+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
+ if (err) {
+ dev_err(dev, "dpcon_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
+ if (err) {
+ dev_err(dev, "dpcon_enable() failed\n");
+ goto err_enable;
+ }
+
+ return dpcon;
+
+err_enable:
+err_get_attr:
+err_reset:
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+err_open:
+ fsl_mc_object_free(dpcon);
+
+ return NULL;
+}
+
+static void free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
+{
+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
+ fsl_mc_object_free(dpcon);
+}
+
+static struct dpaa2_eth_channel *
+alloc_channel(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_attr attr;
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->dpcon = setup_dpcon(priv);
+ if (!channel->dpcon)
+ goto err_setup;
+
+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
+ &attr);
+ if (err) {
+ dev_err(dev, "dpcon_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ channel->dpcon_id = attr.id;
+ channel->ch_id = attr.qbman_ch_id;
+ channel->priv = priv;
+
+ return channel;
+
+err_get_attr:
+ free_dpcon(priv, channel->dpcon);
+err_setup:
+ kfree(channel);
+ return NULL;
+}
+
+static void free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
+{
+ free_dpcon(priv, channel->dpcon);
+ kfree(channel);
+}
+
+/* DPIO setup: allocate and configure QBMan channels, setup core affinity
+ * and register data availability notifications
+ */
+static int setup_dpio(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_eth_channel *channel;
+ struct dpcon_notification_cfg dpcon_notif_cfg;
+ struct device *dev = priv->net_dev->dev.parent;
+ int i, err;
+
+ /* We want the ability to spread ingress traffic (RX, TX conf) to as
+ * many cores as possible, so we need one channel for each core
+ * (unless there's fewer queues than cores, in which case the extra
+ * channels would be wasted).
+ * Allocate one channel per core and register it to the core's
+ * affine DPIO. If not enough channels are available for all cores
+ * or if some cores don't have an affine DPIO, there will be no
+ * ingress frame processing on those cores.
+ */
+ cpumask_clear(&priv->dpio_cpumask);
+ for_each_online_cpu(i) {
+ /* Try to allocate a channel */
+ channel = alloc_channel(priv);
+ if (!channel) {
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
+ goto err_alloc_ch;
+ }
+
+ priv->channel[priv->num_channels] = channel;
+
+ nctx = &channel->nctx;
+ nctx->is_cdan = 1;
+ nctx->cb = cdan_cb;
+ nctx->id = channel->ch_id;
+ nctx->desired_cpu = i;
+
+ /* Register the new context */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (err) {
+ dev_info(dev, "No affine DPIO for cpu %d\n", i);
+ /* If no affine DPIO for this core, there's probably
+ * none available for next cores either.
+ */
+ goto err_service_reg;
+ }
+
+ /* Register DPCON notification with MC */
+ dpcon_notif_cfg.dpio_id = nctx->dpio_id;
+ dpcon_notif_cfg.priority = 0;
+ dpcon_notif_cfg.user_ctx = nctx->qman64;
+ err = dpcon_set_notification(priv->mc_io, 0,
+ channel->dpcon->mc_handle,
+ &dpcon_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpcon_set_notification failed()\n");
+ goto err_set_cdan;
+ }
+
+ /* If we managed to allocate a channel and also found an affine
+ * DPIO for this core, add it to the final mask
+ */
+ cpumask_set_cpu(i, &priv->dpio_cpumask);
+ priv->num_channels++;
+
+ /* Stop if we already have enough channels to accommodate all
+ * RX and TX conf queues
+ */
+ if (priv->num_channels == dpaa2_eth_queue_count(priv))
+ break;
+ }
+
+ return 0;
+
+err_set_cdan:
+ dpaa2_io_service_deregister(NULL, nctx);
+err_service_reg:
+ free_channel(priv, channel);
+err_alloc_ch:
+ if (cpumask_empty(&priv->dpio_cpumask)) {
+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
+ cpumask_pr_args(&priv->dpio_cpumask));
+
+ return 0;
+}
+
+static void free_dpio(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ /* deregister CDAN notifications and free channels */
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ dpaa2_io_service_deregister(NULL, &ch->nctx);
+ free_channel(priv, ch);
+ }
+}
+
+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->nctx.desired_cpu == cpu)
+ return priv->channel[i];
+
+ /* We should never get here. Issue a warning and return
+ * the first channel, because it's still better than nothing
+ */
+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
+
+ return priv->channel[0];
+}
+
+static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_fq *fq;
+ int rx_cpu, txc_cpu;
+ int i;
+
+ /* For each FQ, pick one channel/CPU to deliver frames to.
+ * This may well change at runtime, either through irqbalance or
+ * through direct user intervention.
+ */
+ rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+ switch (fq->type) {
+ case DPAA2_RX_FQ:
+ fq->target_cpu = rx_cpu;
+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
+ if (rx_cpu >= nr_cpu_ids)
+ rx_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ fq->target_cpu = txc_cpu;
+ txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
+ if (txc_cpu >= nr_cpu_ids)
+ txc_cpu = cpumask_first(&priv->dpio_cpumask);
+ break;
+ default:
+ dev_err(dev, "Unknown FQ type: %d\n", fq->type);
+ }
+ fq->channel = get_affine_channel(priv, fq->target_cpu);
+ }
+}
+
+static void setup_fqs(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ /* We have one TxConf FQ per Tx flow.
+ * The number of Tx and Rx queues is the same.
+ * Tx queues come first in the fq array.
+ */
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+
+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
+ priv->fq[priv->num_fqs++].flowid = (u16)i;
+ }
+
+ /* For each FQ, decide on which core to process incoming frames */
+ set_fq_affinity(priv);
+}
+
+/* Allocate and configure one buffer pool for each interface */
+static int setup_dpbp(struct dpaa2_eth_priv *priv)
+{
+ int err;
+ struct fsl_mc_device *dpbp_dev;
+ struct device *dev = priv->net_dev->dev.parent;
+
+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
+ &dpbp_dev);
+ if (err) {
+ dev_err(dev, "DPBP device allocation failed\n");
+ return err;
+ }
+
+ priv->dpbp_dev = dpbp_dev;
+
+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ &dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpbp_enable() failed\n");
+ goto err_enable;
+ }
+
+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
+ &priv->dpbp_attrs);
+ if (err) {
+ dev_err(dev, "dpbp_get_attributes() failed\n");
+ goto err_get_attr;
+ }
+
+ return 0;
+
+err_get_attr:
+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_enable:
+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
+err_open:
+ fsl_mc_object_free(dpbp_dev);
+
+ return err;
+}
+
+static void free_dpbp(struct dpaa2_eth_priv *priv)
+{
+ drain_pool(priv);
+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
+ fsl_mc_object_free(priv->dpbp_dev);
+}
+
+/* Configure the DPNI object this interface is associated with */
+static int setup_dpni(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ priv->dpni_id = ls_dev->obj_desc.id;
+
+ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+ goto err_open;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
+ goto err_reset;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+ goto err_get_attr;
+ }
+
+ /* Configure buffer layouts */
+ /* rx buffer */
+ priv->buf_layout.pass_parser_result = true;
+ priv->buf_layout.pass_frame_status = true;
+ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
+ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &priv->buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+ goto err_buf_layout;
+ }
+
+ /* tx buffer */
+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &priv->buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
+ goto err_buf_layout;
+ }
+
+ /* tx-confirm buffer */
+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, &priv->buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
+ goto err_buf_layout;
+ }
+
+ /* Now that we've set our tx buffer layout, retrieve the minimum
+ * required tx data offset.
+ */
+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
+ &priv->tx_data_offset);
+ if (err) {
+ dev_err(dev, "dpni_get_tx_data_offset() failed\n");
+ goto err_data_offset;
+ }
+
+ if ((priv->tx_data_offset % 64) != 0)
+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B",
+ priv->tx_data_offset);
+
+ /* Accommodate software annotation space (SWA) */
+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
+
+ return 0;
+
+err_data_offset:
+err_buf_layout:
+err_get_attr:
+err_reset:
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+err_open:
+ return err;
+}
+
+static void free_dpni(struct dpaa2_eth_priv *priv)
+{
+ int err;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err)
+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
+ err);
+
+ dpni_close(priv->mc_io, 0, priv->mc_token);
+}
+
+static int setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ struct dpni_taildrop td;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(RX) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 1;
+ queue.user_context = (u64)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(RX) failed\n");
+ return err;
+ }
+
+ td.enable = 1;
+ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
+ DPNI_QUEUE_RX, 0, fq->flowid, &td);
+ if (err) {
+ dev_err(dev, "dpni_set_threshold() failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue queue;
+ struct dpni_queue_id qid;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX) failed\n");
+ return err;
+ }
+
+ fq->tx_qdbin = qid.qdbin;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ &queue, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ queue.destination.id = fq->channel->dpcon_id;
+ queue.destination.type = DPNI_DEST_DPCON;
+ queue.destination.priority = 0;
+ queue.user_context = (u64)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ &queue);
+ if (err) {
+ dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
+static const struct dpaa2_eth_hash_fields hash_fields[] = {
+ {
+ /* IP header */
+ .rxnfc_field = RXH_IP_SRC,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_SRC,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_IP_DST,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_DST,
+ .size = 4,
+ }, {
+ .rxnfc_field = RXH_L3_PROTO,
+ .cls_prot = NET_PROT_IP,
+ .cls_field = NH_FLD_IP_PROTO,
+ .size = 1,
+ }, {
+ /* Using UDP ports, this is functionally equivalent to raw
+ * byte pairs from L4 header.
+ */
+ .rxnfc_field = RXH_L4_B_0_1,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_SRC,
+ .size = 2,
+ }, {
+ .rxnfc_field = RXH_L4_B_2_3,
+ .cls_prot = NET_PROT_UDP,
+ .cls_field = NH_FLD_UDP_PORT_DST,
+ .size = 2,
+ },
+};
+
+/* Set RX hash options
+ * flags is a combination of RXH_ bits
+ */
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpkg_profile_cfg cls_cfg;
+ struct dpni_rx_tc_dist_cfg dist_cfg;
+ u8 *dma_mem;
+ int i;
+ int err = 0;
+
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_err(dev, "Hashing support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&cls_cfg, 0, sizeof(cls_cfg));
+
+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
+ struct dpkg_extract *key =
+ &cls_cfg.extracts[cls_cfg.num_extracts];
+
+ if (!(flags & hash_fields[i].rxnfc_field))
+ continue;
+
+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ dev_err(dev, "error adding key extraction rule, too many rules?\n");
+ return -E2BIG;
+ }
+
+ key->type = DPKG_EXTRACT_FROM_HDR;
+ key->extract.from_hdr.prot = hash_fields[i].cls_prot;
+ key->extract.from_hdr.type = DPKG_FULL_FIELD;
+ key->extract.from_hdr.field = hash_fields[i].cls_field;
+ cls_cfg.num_extracts++;
+
+ priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL);
+ if (!dma_mem)
+ return -ENOMEM;
+
+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg error %d", err);
+ goto err_prep_key;
+ }
+
+ memset(&dist_cfg, 0, sizeof(dist_cfg));
+
+ /* Prepare for setting the rx dist */
+ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem,
+ DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) {
+ dev_err(dev, "DMA mapping failed\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova,
+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+ if (err)
+ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
+
+err_dma_map:
+err_prep_key:
+ kfree(dma_mem);
+ return err;
+}
+
+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
+static int bind_dpni(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_pools_cfg pools_params;
+ struct dpni_error_cfg err_cfg;
+ int err = 0;
+ int i;
+
+ pools_params.num_dpbp = 1;
+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].backup_pool = 0;
+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ dev_err(dev, "dpni_set_pools() failed\n");
+ return err;
+ }
+
+ /* have the interface implicitly distribute traffic based on supported
+ * header fields
+ */
+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
+ if (err)
+ netdev_err(net_dev, "Failed to configure hashing\n");
+
+ /* Configure handling of error frames */
+ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
+ &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ /* Configure Rx and Tx conf queues to generate CDANs */
+ for (i = 0; i < priv->num_fqs; i++) {
+ switch (priv->fq[i].type) {
+ case DPAA2_RX_FQ:
+ err = setup_rx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_TX_CONF_FQ:
+ err = setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ default:
+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
+ return -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_TX, &priv->tx_qdid);
+ if (err) {
+ dev_err(dev, "dpni_get_qdid() failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/* Allocate rings for storing incoming frame descriptors */
+static int alloc_rings(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ priv->channel[i]->store =
+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
+ if (!priv->channel[i]->store) {
+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
+ goto err_ring;
+ }
+ }
+
+ return 0;
+
+err_ring:
+ for (i = 0; i < priv->num_channels; i++) {
+ if (!priv->channel[i]->store)
+ break;
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+ }
+
+ return -ENOMEM;
+}
+
+static void free_rings(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_channels; i++)
+ dpaa2_io_store_destroy(priv->channel[i]->store);
+}
+
+static int netdev_init(struct net_device *net_dev)
+{
+ int err;
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
+ u8 bcast_addr[ETH_ALEN];
+
+ net_dev->netdev_ops = &dpaa2_eth_ops;
+
+ /* Get firmware address, if any */
+ err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_port_mac_addr() failed\n");
+ return err;
+ }
+
+ /* Get DPNI attributes address, if any */
+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ dpni_mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)\n", err);
+ return err;
+ }
+
+ /* First check if firmware has any address configured by bootloader */
+ if (!is_zero_ether_addr(mac_addr)) {
+ /* If the DPMAC addr != DPNI addr, update it */
+ if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0,
+ priv->mc_token,
+ mac_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
+ return err;
+ }
+ }
+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ } else if (is_zero_ether_addr(dpni_mac_addr)) {
+ /* Fills in net_dev->dev_addr, as required by
+ * register_netdevice()
+ */
+ eth_hw_addr_random(net_dev);
+ /* Make the user aware, without cluttering the boot log */
+ dev_dbg_once(dev, " device(s) have all-zero hwaddr, replaced with random\n");
+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
+ net_dev->dev_addr);
+ if (err) {
+ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err);
+ return err;
+ }
+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
+ * practical purposes, this will be our "permanent" mac address,
+ * at least until the next reboot. This move will also permit
+ * register_netdevice() to properly fill up net_dev->perm_addr.
+ */
+ net_dev->addr_assign_type = NET_ADDR_PERM;
+ } else {
+ /* NET_ADDR_PERM is default, all we have to do is
+ * fill in the device addr.
+ */
+ memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ }
+
+ /* Explicitly add the broadcast address to the MAC filtering table;
+ * the MC won't do that for us.
+ */
+ eth_broadcast_addr(bcast_addr);
+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
+ if (err) {
+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err);
+ /* Won't return an error; at least, we'd have egress traffic */
+ }
+
+ /* Reserve enough space to align buffer as per hardware requirement;
+ * NOTE: priv->tx_data_offset MUST be initialized at this point.
+ */
+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
+
+ /* Set MTU limits */
+ net_dev->min_mtu = 68;
+ net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
+
+ /* Our .ndo_init will be called herein */
+ err = register_netdev(net_dev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev() failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int poll_link_state(void *arg)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
+ int err;
+
+ while (!kthread_should_stop()) {
+ err = link_state_update(priv);
+ if (unlikely(err))
+ return err;
+
+ msleep(DPAA2_ETH_LINK_STATE_REFRESH);
+ }
+
+ return 0;
+}
+
+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
+{
+ u32 status, clear = 0;
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
+ struct net_device *net_dev = dev_get_drvdata(dev);
+ int err;
+
+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ DPNI_IRQ_INDEX, &status);
+ if (unlikely(err)) {
+ netdev_err(net_dev, "Can't get irq status (err %d)", err);
+ clear = 0xffffffff;
+ goto out;
+ }
+
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+ link_state_update(netdev_priv(net_dev));
+ }
+
+out:
+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
+ DPNI_IRQ_INDEX, clear);
+ return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct fsl_mc_device *ls_dev)
+{
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+
+ err = fsl_mc_allocate_irqs(ls_dev);
+ if (err) {
+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ irq = ls_dev->irqs[0];
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ dpni_irq0_handler,
+ dpni_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&ls_dev->dev), &ls_dev->dev);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err);
+ goto free_mc_irq;
+ }
+
+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err);
+ goto free_irq;
+ }
+
+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
+ DPNI_IRQ_INDEX, 1);
+ if (err < 0) {
+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+free_mc_irq:
+ fsl_mc_free_irqs(ls_dev);
+
+ return err;
+}
+
+static void add_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void del_ch_napi(struct dpaa2_eth_priv *priv)
+{
+ int i;
+ struct dpaa2_eth_channel *ch;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ netif_napi_del(&ch->napi);
+ }
+}
+
+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev = NULL;
+ struct dpaa2_eth_priv *priv = NULL;
+ int err = 0;
+
+ dev = &dpni_dev->dev;
+
+ /* Net device */
+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
+ if (!net_dev) {
+ dev_err(dev, "alloc_etherdev_mq() failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(net_dev, dev);
+ dev_set_drvdata(dev, net_dev);
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &priv->mc_io);
+ if (err) {
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_portal_alloc;
+ }
+
+ /* MC objects initialization and configuration */
+ err = setup_dpni(dpni_dev);
+ if (err)
+ goto err_dpni_setup;
+
+ err = setup_dpio(priv);
+ if (err)
+ goto err_dpio_setup;
+
+ setup_fqs(priv);
+
+ err = setup_dpbp(priv);
+ if (err)
+ goto err_dpbp_setup;
+
+ err = bind_dpni(priv);
+ if (err)
+ goto err_bind;
+
+ /* Add a NAPI context for each channel */
+ add_ch_napi(priv);
+
+ /* Percpu statistics */
+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
+ if (!priv->percpu_stats) {
+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_stats;
+ }
+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
+ if (!priv->percpu_extras) {
+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_percpu_extras;
+ }
+
+ err = netdev_init(net_dev);
+ if (err)
+ goto err_netdev_init;
+
+ /* Configure checksum offload based on current interface flags */
+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ if (err)
+ goto err_csum;
+
+ err = set_tx_csum(priv, !!(net_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ if (err)
+ goto err_csum;
+
+ err = alloc_rings(priv);
+ if (err)
+ goto err_alloc_rings;
+
+ net_dev->ethtool_ops = &dpaa2_ethtool_ops;
+
+ err = setup_irqs(dpni_dev);
+ if (err) {
+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
+ priv->poll_thread = kthread_run(poll_link_state, priv,
+ "%s_poll_link", net_dev->name);
+ if (IS_ERR(priv->poll_thread)) {
+ netdev_err(net_dev, "Error starting polling thread\n");
+ goto err_poll_thread;
+ }
+ priv->do_link_poll = true;
+ }
+
+ dev_info(dev, "Probed interface %s\n", net_dev->name);
+ return 0;
+
+err_poll_thread:
+ free_rings(priv);
+err_alloc_rings:
+err_csum:
+ unregister_netdev(net_dev);
+err_netdev_init:
+ free_percpu(priv->percpu_extras);
+err_alloc_percpu_extras:
+ free_percpu(priv->percpu_stats);
+err_alloc_percpu_stats:
+ del_ch_napi(priv);
+err_bind:
+ free_dpbp(priv);
+err_dpbp_setup:
+ free_dpio(priv);
+err_dpio_setup:
+ free_dpni(priv);
+err_dpni_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_portal_alloc:
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct net_device *net_dev;
+ struct dpaa2_eth_priv *priv;
+
+ dev = &ls_dev->dev;
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ unregister_netdev(net_dev);
+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+
+ if (priv->do_link_poll)
+ kthread_stop(priv->poll_thread);
+ else
+ fsl_mc_free_irqs(ls_dev);
+
+ free_rings(priv);
+ free_percpu(priv->percpu_stats);
+ free_percpu(priv->percpu_extras);
+
+ del_ch_napi(priv);
+ free_dpbp(priv);
+ free_dpio(priv);
+ free_dpni(priv);
+
+ fsl_mc_portal_free(priv->mc_io);
+
+ dev_set_drvdata(dev, NULL);
+ free_netdev(net_dev);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpni",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
+
+static struct fsl_mc_driver dpaa2_eth_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_eth_probe,
+ .remove = dpaa2_eth_remove,
+ .match_id_table = dpaa2_eth_match_id_table
+};
+
+module_fsl_mc_driver(dpaa2_eth_driver);
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
new file mode 100644
index 000000000000..c67cced55b72
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -0,0 +1,348 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA2_ETH_H
+#define __DPAA2_ETH_H
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include "../../fsl-mc/include/dpaa2-io.h"
+#include "../../fsl-mc/include/dpaa2-fd.h"
+#include "../../fsl-mc/include/dpbp.h"
+#include "../../fsl-mc/include/dpcon.h"
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+#include "dpaa2-eth-trace.h"
+
+#define DPAA2_ETH_STORE_SIZE 16
+
+/* Maximum number of scatter-gather entries in an ingress frame,
+ * considering the maximum receive frame size is 64K
+ */
+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
+
+/* Maximum acceptable MTU value. It is in direct relation with the hardware
+ * enforced Max Frame Length (currently 10k).
+ */
+#define DPAA2_ETH_MFL (10 * 1024)
+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN)
+/* Convert L3 MTU to L2 MFL */
+#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
+
+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
+ * frames in the Rx queues (length of the current frame is not
+ * taken into account when making the taildrop decision)
+ */
+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+
+/* Buffer quota per queue. Must be large enough such that for minimum sized
+ * frames taildrop kicks in before the bpool gets depleted, so we compute
+ * how many 64B frames fit inside the taildrop threshold and add a margin
+ * to accommodate the buffer refill delay.
+ */
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
+#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
+#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
+
+/* Maximum number of buffers that can be acquired/released through a single
+ * QBMan command
+ */
+#define DPAA2_ETH_BUFS_PER_CMD 7
+
+/* Hardware requires alignment for ingress/egress buffer addresses
+ * and ingress buffer lengths.
+ */
+#define DPAA2_ETH_RX_BUF_SIZE 2048
+#define DPAA2_ETH_TX_BUF_ALIGN 64
+#define DPAA2_ETH_RX_BUF_ALIGN 256
+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
+
+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
+ * buffers large enough to allow building an skb around them and also account
+ * for alignment restrictions
+ */
+#define DPAA2_ETH_BUF_RAW_SIZE \
+ (DPAA2_ETH_RX_BUF_SIZE + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ DPAA2_ETH_RX_BUF_ALIGN)
+
+/* We are accommodating a skb backpointer and some S/G info
+ * in the frame's software annotation. The hardware
+ * options are either 0 or 64, so we choose the latter.
+ */
+#define DPAA2_ETH_SWA_SIZE 64
+
+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
+struct dpaa2_eth_swa {
+ struct sk_buff *skb;
+ struct scatterlist *scl;
+ int num_sg;
+ int num_dma_bufs;
+};
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_PTA 0x00800000
+#define DPAA2_FD_CTRL_PTV1 0x00400000
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ u8 reserved;
+ u8 ppid;
+ __le16 ifpid;
+ __le32 status;
+} __packed;
+
+/* Frame annotation status word is located in the first 8 bytes
+ * of the buffer's hardware annoatation area
+ */
+#define DPAA2_FAS_OFFSET 0
+#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas))
+
+/* Error and status bits in the frame annotation status word */
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_FAS_MS 0x40000000
+#define DPAA2_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_FAS_BC 0x02000000
+#define DPAA2_FAS_KSE 0x00040000
+#define DPAA2_FAS_EOFHE 0x00020000
+#define DPAA2_FAS_MNLE 0x00010000
+#define DPAA2_FAS_TIDE 0x00008000
+#define DPAA2_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_FAS_FLE 0x00002000
+/* Frame physical error */
+#define DPAA2_FAS_FPE 0x00001000
+#define DPAA2_FAS_PTE 0x00000080
+#define DPAA2_FAS_ISP 0x00000040
+#define DPAA2_FAS_PHE 0x00000020
+#define DPAA2_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_FAS_L4CE 0x00000001
+/* Possible errors on the ingress path */
+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \
+ DPAA2_FAS_EOFHE | \
+ DPAA2_FAS_MNLE | \
+ DPAA2_FAS_TIDE | \
+ DPAA2_FAS_PIEE | \
+ DPAA2_FAS_FLE | \
+ DPAA2_FAS_FPE | \
+ DPAA2_FAS_PTE | \
+ DPAA2_FAS_ISP | \
+ DPAA2_FAS_PHE | \
+ DPAA2_FAS_BLE | \
+ DPAA2_FAS_L3CE | \
+ DPAA2_FAS_L4CE)
+/* Tx errors */
+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \
+ DPAA2_FAS_EOFHE | \
+ DPAA2_FAS_MNLE | \
+ DPAA2_FAS_TIDE)
+
+/* Time in milliseconds between link state updates */
+#define DPAA2_ETH_LINK_STATE_REFRESH 1000
+
+/* Number of times to retry a frame enqueue before giving up.
+ * Value determined empirically, in order to minimize the number
+ * of frames dropped on Tx
+ */
+#define DPAA2_ETH_ENQUEUE_RETRIES 10
+
+/* Driver statistics, other than those in struct rtnl_link_stats64.
+ * These are usually collected per-CPU and aggregated by ethtool.
+ */
+struct dpaa2_eth_drv_stats {
+ __u64 tx_conf_frames;
+ __u64 tx_conf_bytes;
+ __u64 tx_sg_frames;
+ __u64 tx_sg_bytes;
+ __u64 rx_sg_frames;
+ __u64 rx_sg_bytes;
+ /* Enqueues retried due to portal busy */
+ __u64 tx_portal_busy;
+};
+
+/* Per-FQ statistics */
+struct dpaa2_eth_fq_stats {
+ /* Number of frames received on this queue */
+ __u64 frames;
+};
+
+/* Per-channel statistics */
+struct dpaa2_eth_ch_stats {
+ /* Volatile dequeues retried due to portal busy */
+ __u64 dequeue_portal_busy;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* Number of frames received on queues from this channel */
+ __u64 frames;
+ /* Pull errors */
+ __u64 pull_err;
+};
+
+/* Maximum number of queues associated with a DPNI */
+#define DPAA2_ETH_MAX_RX_QUEUES 16
+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS
+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
+ DPAA2_ETH_MAX_TX_QUEUES)
+
+#define DPAA2_ETH_MAX_DPCONS NR_CPUS
+
+enum dpaa2_eth_fq_type {
+ DPAA2_RX_FQ = 0,
+ DPAA2_TX_CONF_FQ,
+};
+
+struct dpaa2_eth_priv;
+
+struct dpaa2_eth_fq {
+ u32 fqid;
+ u32 tx_qdbin;
+ u16 flowid;
+ int target_cpu;
+ struct dpaa2_eth_channel *channel;
+ enum dpaa2_eth_fq_type type;
+
+ void (*consume)(struct dpaa2_eth_priv *,
+ struct dpaa2_eth_channel *,
+ const struct dpaa2_fd *,
+ struct napi_struct *);
+ struct dpaa2_eth_fq_stats stats;
+};
+
+struct dpaa2_eth_channel {
+ struct dpaa2_io_notification_ctx nctx;
+ struct fsl_mc_device *dpcon;
+ int dpcon_id;
+ int ch_id;
+ int dpio_id;
+ struct napi_struct napi;
+ struct dpaa2_io_store *store;
+ struct dpaa2_eth_priv *priv;
+ int buf_count;
+ struct dpaa2_eth_ch_stats stats;
+};
+
+struct dpaa2_eth_hash_fields {
+ u64 rxnfc_field;
+ enum net_prot cls_prot;
+ int cls_field;
+ int size;
+};
+
+/* Driver private data */
+struct dpaa2_eth_priv {
+ struct net_device *net_dev;
+
+ u8 num_fqs;
+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
+
+ u8 num_channels;
+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+
+ int dpni_id;
+ struct dpni_attr dpni_attrs;
+ /* Insofar as the MC is concerned, we're using one layout on all 3 types
+ * of buffers (Rx, Tx, Tx-Conf).
+ */
+ struct dpni_buffer_layout buf_layout;
+ u16 tx_data_offset;
+
+ struct fsl_mc_device *dpbp_dev;
+ struct dpbp_attr dpbp_attrs;
+
+ u16 tx_qdid;
+ struct fsl_mc_io *mc_io;
+ /* Cores which have an affine DPIO/DPCON.
+ * This is the cpu set on which Rx and Tx conf frames are processed
+ */
+ struct cpumask dpio_cpumask;
+
+ /* Standard statistics */
+ struct rtnl_link_stats64 __percpu *percpu_stats;
+ /* Extra stats, in addition to the ones known by the kernel */
+ struct dpaa2_eth_drv_stats __percpu *percpu_extras;
+
+ u16 mc_token;
+
+ struct dpni_link_state link_state;
+ bool do_link_poll;
+ struct task_struct *poll_thread;
+
+ /* enabled ethtool hashing bits */
+ u64 rx_hash_fields;
+};
+
+/* default Rx hash options, set during probing */
+#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
+ | RXH_L4_B_2_3)
+
+#define dpaa2_eth_hash_enabled(priv) \
+ ((priv)->dpni_attrs.num_queues > 1)
+
+/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
+#define DPAA2_CLASSIFIER_DMA_SIZE 256
+
+extern const struct ethtool_ops dpaa2_ethtool_ops;
+extern const char dpaa2_eth_drv_version[];
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
+
+static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
+{
+ return priv->dpni_attrs.num_queues;
+}
+
+#endif /* __DPAA2_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
new file mode 100644
index 000000000000..dd0cffa908ef
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -0,0 +1,279 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpni.h" /* DPNI_LINK_OPT_* */
+#include "dpaa2-eth.h"
+
+/* To be kept in sync with DPNI statistics */
+char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
+ "rx frames",
+ "rx bytes",
+ "rx mcast frames",
+ "rx mcast bytes",
+ "rx bcast frames",
+ "rx bcast bytes",
+ "tx frames",
+ "tx bytes",
+ "tx mcast frames",
+ "tx mcast bytes",
+ "tx bcast frames",
+ "tx bcast bytes",
+ "rx filtered frames",
+ "rx discarded frames",
+ "rx nobuffer discards",
+ "tx discarded frames",
+ "tx confirmed frames",
+};
+
+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
+
+char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
+ /* per-cpu stats */
+ "tx conf frames",
+ "tx conf bytes",
+ "tx sg frames",
+ "tx sg bytes",
+ "rx sg frames",
+ "rx sg bytes",
+ "enqueue portal busy",
+ /* Channel stats */
+ "dequeue portal busy",
+ "channel pull errors",
+ "cdan",
+};
+
+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
+
+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, dpaa2_eth_drv_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *link_settings)
+{
+ struct dpni_link_state state = {0};
+ int err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
+ if (err) {
+ netdev_err(net_dev, "ERROR %d getting link state", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPNI side - and for that matter there may exist
+ * no DPMAC at all. So for now we just don't report anything
+ * beyond the DPNI attributes.
+ */
+ if (state.options & DPNI_LINK_OPT_AUTONEG)
+ link_settings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.duplex = DUPLEX_FULL;
+ link_settings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int
+dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct dpni_link_cfg cfg = {0};
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = 0;
+
+ netdev_dbg(net_dev, "Setting link parameters...");
+
+ /* Due to a temporary MC limitation, the DPNI must be down
+ * in order to be able to change link settings. Taking steps to let
+ * the user know that.
+ */
+ if (netif_running(net_dev)) {
+ netdev_info(net_dev, "Sorry, interface must be brought down first.\n");
+ return -EACCES;
+ }
+
+ cfg.rate = link_settings->base.speed;
+ if (link_settings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
+ if (link_settings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
+ if (err)
+ /* ethtool will be loud enough if we return an error; no point
+ * in putting our own error message on the console by default
+ */
+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err);
+
+ return err;
+}
+
+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/** Fill in hardware counters, as returned by MC.
+ */
+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ int i = 0;
+ int j, k, err;
+ int num_cnt;
+ union dpni_statistics dpni_stats;
+ u64 cdan = 0;
+ u64 portal_busy = 0, pull_err = 0;
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpaa2_eth_drv_stats *extras;
+ struct dpaa2_eth_ch_stats *ch_stats;
+
+ memset(data, 0,
+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
+
+ /* Print standard counters, from DPNI statistics */
+ for (j = 0; j <= 2; j++) {
+ err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
+ j, &dpni_stats);
+ if (err != 0)
+ netdev_warn(net_dev, "dpni_get_stats(%d) failed", j);
+ switch (j) {
+ case 0:
+ num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64);
+ break;
+ case 1:
+ num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64);
+ break;
+ case 2:
+ num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64);
+ break;
+ default:
+ break;
+ }
+ for (k = 0; k < num_cnt; k++)
+ *(data + i++) = dpni_stats.raw.counter[k];
+ }
+
+ /* Print per-cpu extra stats */
+ for_each_online_cpu(k) {
+ extras = per_cpu_ptr(priv->percpu_extras, k);
+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)extras + j);
+ }
+ i += j;
+
+ for (j = 0; j < priv->num_channels; j++) {
+ ch_stats = &priv->channel[j]->stats;
+ cdan += ch_stats->cdan;
+ portal_busy += ch_stats->dequeue_portal_busy;
+ pull_err += ch_stats->pull_err;
+ }
+
+ *(data + i++) = portal_busy;
+ *(data + i++) = pull_err;
+ *(data + i++) = cdan;
+}
+
+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXFH:
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ break;
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = dpaa2_eth_queue_count(priv);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops dpaa2_ethtool_ops = {
+ .get_drvinfo = dpaa2_eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = dpaa2_eth_get_link_ksettings,
+ .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_sset_count = dpaa2_eth_get_sset_count,
+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
+ .get_strings = dpaa2_eth_get_strings,
+ .get_rxnfc = dpaa2_eth_get_rxnfc,
+};
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
new file mode 100644
index 000000000000..02290a088391
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h
@@ -0,0 +1,176 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <linux/types.h>
+#include "net.h"
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+/**
+ * Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ u8 mask;
+ u8 offset;
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ /**
+ * union extract - Selects extraction method
+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
+ union {
+ /**
+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @prot: Any of the supported headers
+ * @type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @field: One of the supported fields (NH_FLD_)
+ *
+ * @size: Size in bytes
+ * @offset: Byte offset
+ * @hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ */
+
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ u32 field;
+ u8 size;
+ u8 offset;
+ u8 hdr_index;
+ } from_hdr;
+ /**
+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ u8 size;
+ u8 offset;
+ } from_data;
+
+ /**
+ * struct from_parse - Used when
+ * 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ u8 size;
+ u8 offset;
+ } from_parse;
+ } extract;
+
+ u8 num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ u8 num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
new file mode 100644
index 000000000000..57df22292233
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -0,0 +1,541 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+#include "dpni.h"
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+#define DPNI_CMD_BASE_VERSION 1
+#define DPNI_CMD_ID_OFFSET 4
+
+#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
+
+#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
+#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
+#define DPNI_CMDID_CREATE DPNI_CMD(0x901)
+#define DPNI_CMDID_DESTROY DPNI_CMD(0x900)
+#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
+
+#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
+#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004)
+#define DPNI_CMDID_RESET DPNI_CMD(0x005)
+#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
+
+#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010)
+#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011)
+#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012)
+#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013)
+#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014)
+#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015)
+#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
+#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
+
+#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
+
+#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
+#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212)
+#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215)
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
+#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B)
+
+#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
+#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
+#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222)
+#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223)
+#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224)
+#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225)
+#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226)
+#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227)
+#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228)
+
+#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+
+#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
+#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
+#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
+
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260)
+#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262)
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264)
+#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265)
+
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A)
+#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
+#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPNI_MASK(field) \
+ GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
+ DPNI_##field##_SHIFT)
+
+#define dpni_set_field(var, field, val) \
+ ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field)))
+#define dpni_get_field(var, field) \
+ (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT)
+
+struct dpni_cmd_open {
+ __le32 dpni_id;
+};
+
+#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+struct dpni_cmd_set_pools {
+ /* cmd word 0 */
+ u8 num_dpbp;
+ u8 backup_pool_mask;
+ __le16 pad;
+ /* cmd word 0..4 */
+ __le32 dpbp_id[DPNI_MAX_DPBP];
+ /* cmd word 4..6 */
+ __le16 buffer_size[DPNI_MAX_DPBP];
+};
+
+/* The enable indication is always the least significant bit */
+#define DPNI_ENABLE_SHIFT 0
+#define DPNI_ENABLE_SIZE 1
+
+struct dpni_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpni_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dpni_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpni_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpni_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpni_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpni_rsp_get_attr {
+ /* response word 0 */
+ __le32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 pad0;
+ /* response word 1 */
+ u8 vlan_filter_entries;
+ u8 pad1;
+ u8 qos_entries;
+ u8 pad2;
+ __le16 fs_entries;
+ __le16 pad3;
+ /* response word 2 */
+ u8 qos_key_size;
+ u8 fs_key_size;
+ __le16 wriop_version;
+};
+
+#define DPNI_ERROR_ACTION_SHIFT 0
+#define DPNI_ERROR_ACTION_SIZE 4
+#define DPNI_FRAME_ANN_SHIFT 4
+#define DPNI_FRAME_ANN_SIZE 1
+
+struct dpni_cmd_set_errors_behavior {
+ __le32 errors;
+ /* from least significant bit: error_action:4, set_frame_annotation:1 */
+ u8 flags;
+};
+
+/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation
+ * buffer layouts, but they all share the same parameters.
+ * If one of the functions changes, below structure needs to be split.
+ */
+
+#define DPNI_PASS_TS_SHIFT 0
+#define DPNI_PASS_TS_SIZE 1
+#define DPNI_PASS_PR_SHIFT 1
+#define DPNI_PASS_PR_SIZE 1
+#define DPNI_PASS_FS_SHIFT 2
+#define DPNI_PASS_FS_SIZE 1
+
+struct dpni_cmd_get_buffer_layout {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_buffer_layout {
+ /* response word 0 */
+ u8 pad0[6];
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* response word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_buffer_layout {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 pad0[3];
+ __le16 options;
+ /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */
+ u8 flags;
+ u8 pad1;
+ /* cmd word 1 */
+ __le16 private_data_size;
+ __le16 data_align;
+ __le16 head_room;
+ __le16 tail_room;
+};
+
+struct dpni_cmd_set_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+ __le32 config;
+};
+
+struct dpni_cmd_get_offload {
+ u8 pad[3];
+ u8 dpni_offload;
+};
+
+struct dpni_rsp_get_offload {
+ __le32 pad;
+ __le32 config;
+};
+
+struct dpni_cmd_get_qdid {
+ u8 qtype;
+};
+
+struct dpni_rsp_get_qdid {
+ __le16 qdid;
+};
+
+struct dpni_rsp_get_tx_data_offset {
+ __le16 data_offset;
+};
+
+struct dpni_cmd_get_statistics {
+ u8 page_number;
+};
+
+struct dpni_rsp_get_statistics {
+ __le64 counter[DPNI_STATISTICS_CNT];
+};
+
+struct dpni_cmd_set_link_cfg {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+#define DPNI_LINK_STATE_SHIFT 0
+#define DPNI_LINK_STATE_SIZE 1
+
+struct dpni_rsp_get_link_state {
+ /* response word 0 */
+ __le32 pad0;
+ /* from LSB: up:1 */
+ u8 flags;
+ u8 pad1[3];
+ /* response word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* response word 2 */
+ __le64 options;
+};
+
+struct dpni_cmd_set_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_rsp_get_max_frame_length {
+ __le16 max_frame_length;
+};
+
+struct dpni_cmd_set_multicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_multicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_unicast_promisc {
+ u8 enable;
+};
+
+struct dpni_rsp_get_unicast_promisc {
+ u8 enabled;
+};
+
+struct dpni_cmd_set_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_primary_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_rsp_get_port_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_add_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+struct dpni_cmd_remove_mac_addr {
+ __le16 pad;
+ u8 mac_addr[6];
+};
+
+#define DPNI_UNICAST_FILTERS_SHIFT 0
+#define DPNI_UNICAST_FILTERS_SIZE 1
+#define DPNI_MULTICAST_FILTERS_SHIFT 1
+#define DPNI_MULTICAST_FILTERS_SIZE 1
+
+struct dpni_cmd_clear_mac_filters {
+ /* from LSB: unicast:1, multicast:1 */
+ u8 flags;
+};
+
+#define DPNI_DIST_MODE_SHIFT 0
+#define DPNI_DIST_MODE_SIZE 4
+#define DPNI_MISS_ACTION_SHIFT 4
+#define DPNI_MISS_ACTION_SIZE 4
+
+struct dpni_cmd_set_rx_tc_dist {
+ /* cmd word 0 */
+ __le16 dist_size;
+ u8 tc_id;
+ /* from LSB: dist_mode:4, miss_action:4 */
+ u8 flags;
+ __le16 pad0;
+ __le16 default_flow_id;
+ /* cmd word 1..5 */
+ __le64 pad1[5];
+ /* cmd word 6 */
+ __le64 key_cfg_iova;
+};
+
+/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at
+ * key_cfg_iova)
+ */
+struct dpni_mask_cfg {
+ u8 mask;
+ u8 offset;
+};
+
+#define DPNI_EFH_TYPE_SHIFT 0
+#define DPNI_EFH_TYPE_SIZE 4
+#define DPNI_EXTRACT_TYPE_SHIFT 0
+#define DPNI_EXTRACT_TYPE_SIZE 4
+
+struct dpni_dist_extract {
+ /* word 0 */
+ u8 prot;
+ /* EFH type stored in the 4 least significant bits */
+ u8 efh_type;
+ u8 size;
+ u8 offset;
+ __le32 field;
+ /* word 1 */
+ u8 hdr_index;
+ u8 constant;
+ u8 num_of_repeats;
+ u8 num_of_byte_masks;
+ /* Extraction type is stored in the 4 LSBs */
+ u8 extract_type;
+ u8 pad[3];
+ /* word 2 */
+ struct dpni_mask_cfg masks[4];
+};
+
+struct dpni_ext_set_rx_tc_dist {
+ /* extension word 0 */
+ u8 num_extracts;
+ u8 pad[7];
+ /* words 1..25 */
+ struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+struct dpni_cmd_get_queue {
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+#define DPNI_DEST_TYPE_SHIFT 0
+#define DPNI_DEST_TYPE_SIZE 4
+#define DPNI_STASH_CTRL_SHIFT 6
+#define DPNI_STASH_CTRL_SIZE 1
+#define DPNI_HOLD_ACTIVE_SHIFT 7
+#define DPNI_HOLD_ACTIVE_SIZE 1
+
+struct dpni_rsp_get_queue {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */
+ u8 flags;
+ /* response word 2 */
+ __le64 flc;
+ /* response word 3 */
+ __le64 user_context;
+ /* response word 4 */
+ __le32 fqid;
+ __le16 qdbin;
+};
+
+struct dpni_cmd_set_queue {
+ /* cmd word 0 */
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ u8 options;
+ __le32 pad0;
+ /* cmd word 1 */
+ __le32 dest_id;
+ __le16 pad1;
+ u8 dest_prio;
+ u8 flags;
+ /* cmd word 2 */
+ __le64 flc;
+ /* cmd word 3 */
+ __le64 user_context;
+};
+
+struct dpni_cmd_set_taildrop {
+ /* cmd word 0 */
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+ __le32 pad0;
+ /* cmd word 1 */
+ /* Only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+struct dpni_cmd_get_taildrop {
+ u8 congestion_point;
+ u8 qtype;
+ u8 tc;
+ u8 index;
+};
+
+struct dpni_rsp_get_taildrop {
+ /* cmd word 0 */
+ __le64 pad0;
+ /* cmd word 1 */
+ /* only least significant bit is relevant */
+ u8 enable;
+ u8 pad1;
+ u8 units;
+ u8 pad2;
+ __le32 threshold;
+};
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
new file mode 100644
index 000000000000..cea46edec6a2
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -0,0 +1,1595 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../../fsl-mc/include/mc-sys.h"
+#include "../../fsl-mc/include/mc-cmd.h"
+#include "dpni.h"
+#include "dpni-cmd.h"
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf)
+{
+ int i, j;
+ struct dpni_ext_set_rx_tc_dist *dpni_ext;
+ struct dpni_dist_extract *extr;
+
+ if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf;
+ dpni_ext->num_extracts = cfg->num_extracts;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ extr = &dpni_ext->extracts[i];
+
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ extr->prot = cfg->extracts[i].extract.from_hdr.prot;
+ dpni_set_field(extr->efh_type, EFH_TYPE,
+ cfg->extracts[i].extract.from_hdr.type);
+ extr->size = cfg->extracts[i].extract.from_hdr.size;
+ extr->offset = cfg->extracts[i].extract.from_hdr.offset;
+ extr->field = cpu_to_le32(
+ cfg->extracts[i].extract.from_hdr.field);
+ extr->hdr_index =
+ cfg->extracts[i].extract.from_hdr.hdr_index;
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ extr->size = cfg->extracts[i].extract.from_data.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_data.offset;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ extr->size = cfg->extracts[i].extract.from_parse.size;
+ extr->offset =
+ cfg->extracts[i].extract.from_parse.offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
+ dpni_set_field(extr->extract_type, EXTRACT_TYPE,
+ cfg->extracts[i].type);
+
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
+ extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
+ extr->masks[j].offset =
+ cfg->extracts[i].masks[j].offset;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_open *cmd_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpni_cmd_open *)cmd.params;
+ cmd_params->dpni_id = cpu_to_le32(dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_pools *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
+ cmd_params->num_dpbp = cfg->num_dpbp;
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->buffer_size[i] =
+ cpu_to_le16(cfg->pools[i].buffer_size);
+ cmd_params->backup_pool_mask |=
+ DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i);
+ }
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_is_enabled *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_is_enabled *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state: - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_enable *cmd_params;
+ struct dpni_rsp_get_irq_enable *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_mask *cmd_params;
+ struct dpni_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpni_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_irq_status *cmd_params;
+ struct dpni_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpni_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->status = cpu_to_le32(status);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_attr *rsp_params;
+
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_attr *)cmd.params;
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->num_queues = rsp_params->num_queues;
+ attr->num_tcs = rsp_params->num_tcs;
+ attr->mac_filter_entries = rsp_params->mac_filter_entries;
+ attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->qos_entries = rsp_params->qos_entries;
+ attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
+ attr->qos_key_size = rsp_params->qos_key_size;
+ attr->fs_key_size = rsp_params->fs_key_size;
+ attr->wriop_version = le16_to_cpu(rsp_params->wriop_version);
+
+ return 0;
+}
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_errors_behavior *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params;
+ cmd_params->errors = cpu_to_le32(cfg->errors);
+ dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action);
+ dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to retrieve configuration for
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_buffer_layout *cmd_params;
+ struct dpni_rsp_get_buffer_layout *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params;
+ layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS);
+ layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR);
+ layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS);
+ layout->private_data_size = le16_to_cpu(rsp_params->private_data_size);
+ layout->data_align = le16_to_cpu(rsp_params->data_align);
+ layout->data_head_room = le16_to_cpu(rsp_params->head_room);
+ layout->data_tail_room = le16_to_cpu(rsp_params->tail_room);
+
+ return 0;
+}
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue this configuration applies to
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_buffer_layout *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->options = cpu_to_le16(layout->options);
+ dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp);
+ dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result);
+ dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status);
+ cmd_params->private_data_size = cpu_to_le16(layout->private_data_size);
+ cmd_params->data_align = cpu_to_le16(layout->data_align);
+ cmd_params->head_room = cpu_to_le16(layout->data_head_room);
+ cmd_params->tail_room = cpu_to_le16(layout->data_tail_room);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables the offload
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_offload *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+ cmd_params->config = cpu_to_le32(config);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_offload *cmd_params;
+ struct dpni_rsp_get_offload *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_offload *)cmd.params;
+ cmd_params->dpni_offload = type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_offload *)cmd.params;
+ *config = le32_to_cpu(rsp_params->config);
+
+ return 0;
+}
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to receive QDID for
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_qdid *cmd_params;
+ struct dpni_rsp_get_qdid *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_qdid *)cmd.params;
+ cmd_params->qtype = qtype;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_qdid *)cmd.params;
+ *qdid = le16_to_cpu(rsp_params->qdid);
+
+ return 0;
+}
+
+/**
+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @data_offset: Tx data offset (from start of buffer)
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_tx_data_offset *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params;
+ *data_offset = le16_to_cpu(rsp_params->data_offset);
+
+ return 0;
+}
+
+/**
+ * dpni_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_link_state *)cmd.params;
+ state->up = dpni_get_field(rsp_params->flags, LINK_STATE);
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_max_frame_length *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params;
+ *max_frame_length = le16_to_cpu(rsp_params->max_frame_length);
+
+ return 0;
+}
+
+/**
+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_multicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_multicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_unicast_promisc *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params;
+ dpni_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_unicast_promisc *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params;
+ *en = dpni_get_field(rsp_params->enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_primary_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_primary_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_rsp_get_port_mac_addr *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ mac_addr[5 - i] = rsp_params->mac_addr[i];
+
+ return 0;
+}
+
+/**
+ * dpni_add_mac_addr() - Add MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to add
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_add_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_mac_addr() - Remove MAC address filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_remove_mac_addr *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params;
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @unicast: Set to '1' to clear unicast addresses
+ * @multicast: Set to '1' to clear multicast addresses
+ *
+ * The primary MAC address is not cleared by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_clear_mac_filters *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params;
+ dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast);
+ dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_rx_tc_dist *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params;
+ cmd_params->dist_size = cpu_to_le16(cfg->dist_size);
+ cmd_params->tc_id = tc_id;
+ dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode);
+ dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action);
+ cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id);
+ cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control what
+ * configuration options are set on the queue
+ * @queue: Queue structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_queue *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->dest_id = cpu_to_le32(queue->destination.id);
+ cmd_params->dest_prio = queue->destination.priority;
+ dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type);
+ dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control);
+ dpni_set_field(cmd_params->flags, HOLD_ACTIVE,
+ queue->destination.hold_active);
+ cmd_params->flc = cpu_to_le64(queue->flc.value);
+ cmd_params->user_context = cpu_to_le64(queue->user_context);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated for the
+ * same TC. Value must be in range 0 to NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_queue *cmd_params;
+ struct dpni_rsp_get_queue *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_queue *)cmd.params;
+ queue->destination.id = le32_to_cpu(rsp_params->dest_id);
+ queue->destination.priority = rsp_params->dest_prio;
+ queue->destination.type = dpni_get_field(rsp_params->flags,
+ DEST_TYPE);
+ queue->flc.stash_control = dpni_get_field(rsp_params->flags,
+ STASH_CTRL);
+ queue->destination.hold_active = dpni_get_field(rsp_params->flags,
+ HOLD_ACTIVE);
+ queue->flc.value = le64_to_cpu(rsp_params->flc);
+ queue->user_context = le64_to_cpu(rsp_params->user_context);
+ qid->fqid = le32_to_cpu(rsp_params->fqid);
+ qid->qdbin = le16_to_cpu(rsp_params->qdbin);
+
+ return 0;
+}
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_statistics *cmd_params;
+ struct dpni_rsp_get_statistics *rsp_params;
+ int i, err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_statistics *)cmd.params;
+ cmd_params->page_number = page;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_statistics *)cmd.params;
+ for (i = 0; i < DPNI_STATISTICS_CNT; i++)
+ stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]);
+
+ return 0;
+}
+
+/**
+ * dpni_set_taildrop() - Set taildrop per queue or TC
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_taildrop *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+ dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable);
+ cmd_params->units = taildrop->units;
+ cmd_params->threshold = cpu_to_le32(taildrop->threshold);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_taildrop() - Get taildrop information
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cg_point: Congestion point
+ * @q_type: Queue type on which the taildrop is configured.
+ * Only Rx queues are supported for now
+ * @tc: Traffic class to apply this taildrop to
+ * @q_index: Index of the queue if the DPNI supports multiple queues for
+ * traffic distribution. Ignored if CONGESTION_POINT is not 0.
+ * @taildrop: Taildrop structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_taildrop *taildrop)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_get_taildrop *cmd_params;
+ struct dpni_rsp_get_taildrop *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params;
+ cmd_params->congestion_point = cg_point;
+ cmd_params->qtype = qtype;
+ cmd_params->tc = tc;
+ cmd_params->index = index;
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params;
+ taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE);
+ taildrop->units = rsp_params->units;
+ taildrop->threshold = le32_to_cpu(rsp_params->threshold);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
new file mode 100644
index 000000000000..282e5e85ffa7
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -0,0 +1,832 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include "dpkg.h"
+
+struct fsl_mc_io;
+
+/**
+ * Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (u8)(-1)
+/**
+ * All flows within traffic class considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (u16)(-1)
+/**
+ * Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID (u16)(-1)
+
+/**
+ * Tx traffic is always released to a buffer pool on transmit, there are no
+ * resources allocated to have the frames confirmed back to the source after
+ * transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * Disables support for MAC address filtering for addresses other than primary
+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
+ * is disabled, only traffic matching the primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
+ * traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * Congestion can be managed in several ways, allowing the buffer pool to
+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
+ * of queues. If set, it configures a single congestion groups across all TCs.
+ * If reset, a congestion group is allocated for each TC. Only relevant if the
+ * DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
+ * variants. Setting this bit on these SoCs will trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpni_id,
+ u16 *token);
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ */
+struct dpni_pools_cfg {
+ u8 num_dpbp;
+ /**
+ * struct pools - Buffer pools parameters
+ * @dpbp_id: DPBP object ID
+ * @buffer_size: Buffer size
+ * @backup_pool: Backup pool
+ */
+ struct {
+ int dpbp_id;
+ u16 buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_pools_cfg *cfg);
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * DPNI IRQ Index and Events
+ */
+
+/**
+ * IRQ index
+ */
+#define DPNI_IRQ_INDEX 0
+/**
+ * IRQ event - indicates a change in link state
+ */
+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001
+
+int dpni_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpni_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpni_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpni_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpni_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpni_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key will
+ * result in an error.
+ * @wriop_version: Version of WRIOP HW block. The 3 version values are stored
+ * on 6, 5, 5 bits respectively.
+ */
+struct dpni_attr {
+ u32 options;
+ u8 num_queues;
+ u8 num_tcs;
+ u8 mac_filter_entries;
+ u8 vlan_filter_entries;
+ u8 qos_entries;
+ u16 fs_entries;
+ u8 qos_key_size;
+ u8 fs_key_size;
+ u16 wriop_version;
+};
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_attr *attr);
+
+/**
+ * DPNI errors
+ */
+
+/**
+ * Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ * status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+ u32 errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_error_cfg *cfg);
+
+/**
+ * DPNI buffer layout modification options
+ */
+
+/**
+ * Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * Select to modify the parser-result setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ u32 options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ u16 private_data_size;
+ u16 data_align;
+ u16 data_head_room;
+ u16 data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+};
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 config);
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_offload type,
+ u32 *config);
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u16 *qdid);
+
+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *data_offset);
+
+#define DPNI_STATISTICS_CNT 7
+
+union dpni_statistics {
+ /**
+ * struct page_0 - Page_0 statistics structure
+ * @ingress_all_frames: Ingress frame count
+ * @ingress_all_bytes: Ingress byte count
+ * @ingress_multicast_frames: Ingress multicast frame count
+ * @ingress_multicast_bytes: Ingress multicast byte count
+ * @ingress_broadcast_frames: Ingress broadcast frame count
+ * @ingress_broadcast_bytes: Ingress broadcast byte count
+ */
+ struct {
+ u64 ingress_all_frames;
+ u64 ingress_all_bytes;
+ u64 ingress_multicast_frames;
+ u64 ingress_multicast_bytes;
+ u64 ingress_broadcast_frames;
+ u64 ingress_broadcast_bytes;
+ } page_0;
+ /**
+ * struct page_1 - Page_1 statistics structure
+ * @egress_all_frames: Egress frame count
+ * @egress_all_bytes: Egress byte count
+ * @egress_multicast_frames: Egress multicast frame count
+ * @egress_multicast_bytes: Egress multicast byte count
+ * @egress_broadcast_frames: Egress broadcast frame count
+ * @egress_broadcast_bytes: Egress broadcast byte count
+ */
+ struct {
+ u64 egress_all_frames;
+ u64 egress_all_bytes;
+ u64 egress_multicast_frames;
+ u64 egress_multicast_bytes;
+ u64 egress_broadcast_frames;
+ u64 egress_broadcast_bytes;
+ } page_1;
+ /**
+ * struct page_2 - Page_2 statistics structure
+ * @ingress_filtered_frames: Ingress filtered frame count
+ * @ingress_discarded_frames: Ingress discarded frame count
+ * @ingress_nobuffer_discards: Ingress discarded frame count
+ * due to lack of buffers
+ * @egress_discarded_frames: Egress discarded frame count
+ * @egress_confirmed_frames: Egress confirmed frame count
+ */
+ struct {
+ u64 ingress_filtered_frames;
+ u64 ingress_discarded_frames;
+ u64 ingress_nobuffer_discards;
+ u64 egress_discarded_frames;
+ u64 egress_confirmed_frames;
+ } page_2;
+ /**
+ * struct raw - raw statistics structure
+ */
+ struct {
+ u64 counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 page,
+ union dpni_statistics *stat);
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct - Structure representing DPNI link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ */
+struct dpni_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const struct dpni_link_cfg *cfg);
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+};
+
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_state *state);
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length);
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 *max_frame_length);
+
+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int en);
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cm_flags,
+ u16 token,
+ u8 mac_addr[6]);
+
+int dpni_add_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 mac_addr[6]);
+
+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int unicast,
+ int multicast);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ u16 default_flow_id;
+};
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ u8 *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ u16 dist_size;
+ enum dpni_dist_mode dist_mode;
+ u64 key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpni_queue - Queue structure
+ * @user_context: User data, presented to the user along with any frames from
+ * this queue. Not relevant for Tx queues.
+ */
+struct dpni_queue {
+/**
+ * struct destination - Destination structure
+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object. Not relevant for
+ * Tx queues.
+ * @type: May be one of the following:
+ * 0 - No destination, queue can be manually queried, but will not
+ * push traffic or notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic becomes available in
+ * the queue a FQDAN (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is associated with a
+ * DPCON object for the purpose of scheduling between multiple
+ * queues. The DPCON may be independently configured to
+ * generate notifications. Not relevant for Tx queues.
+ * @hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are not affined to a single DPIO.
+ */
+ struct {
+ u16 id;
+ enum dpni_dest type;
+ char hold_active;
+ u8 priority;
+ } destination;
+ u64 user_context;
+ struct {
+ u64 value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant
+ * for Tx queues.
+ */
+struct dpni_queue_id {
+ u32 fqid;
+ u16 qdbin;
+};
+
+/**
+ * Set User Context
+ */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ u8 options,
+ const struct dpni_queue *queue);
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_queue_type qtype,
+ u8 tc,
+ u8 index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+/**
+ * enum dpni_congestion_unit - DPNI congestion units
+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpni_congestion_unit {
+ DPNI_CONGESTION_UNIT_BYTES = 0,
+ DPNI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * enum dpni_congestion_point - Structure representing congestion point
+ * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and
+ * QUEUE_INDEX
+ * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to
+ * define the DPNI this can be either per TC (default) or per
+ * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create).
+ * QUEUE_INDEX is ignored if this type is used.
+ */
+enum dpni_congestion_point {
+ DPNI_CP_QUEUE,
+ DPNI_CP_GROUP,
+};
+
+/**
+ * struct dpni_taildrop - Structure representing the taildrop
+ * @enable: Indicates whether the taildrop is active or not.
+ * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports
+ * byte units, this field is ignored and assumed = 0 if
+ * CONGESTION_POINT is 0.
+ * @threshold: Threshold value, in units identified by UNITS field. Value 0
+ * cannot be used as a valid taildrop threshold, THRESHOLD must
+ * be > 0 if the taildrop is enabled.
+ */
+struct dpni_taildrop {
+ char enable;
+ enum dpni_congestion_unit units;
+ u32 threshold;
+};
+
+int dpni_set_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+int dpni_get_taildrop(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpni_congestion_point cg_point,
+ enum dpni_queue_type q_type,
+ u8 tc,
+ u8 q_index,
+ struct dpni_taildrop *taildrop);
+
+/**
+ * struct dpni_rule_cfg - Rule configuration for table lookup
+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory)
+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory)
+ * @key_size: key and mask size (in bytes)
+ */
+struct dpni_rule_cfg {
+ u64 key_iova;
+ u64 mask_iova;
+ u8 key_size;
+};
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/net.h b/drivers/staging/fsl-dpaa2/ethernet/net.h
new file mode 100644
index 000000000000..5020dee1730c
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethernet/net.h
@@ -0,0 +1,480 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_NET_H
+#define __FSL_NET_H
+
+#define LAST_HDR_INDEX 0xFFFFFFFF
+
+/*****************************************************************************/
+/* Protocol fields */
+/*****************************************************************************/
+
+/************************* Ethernet fields *********************************/
+#define NH_FLD_ETH_DA (1)
+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
+
+#define NH_FLD_ETH_ADDR_SIZE 6
+
+/*************************** VLAN fields ***********************************/
+#define NH_FLD_VLAN_VPRI (1)
+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/************************ IP (generic) fields ******************************/
+#define NH_FLD_IP_VER (1)
+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
+
+#define NH_FLD_IP_PROTO_SIZE 1
+
+/***************************** IPV4 fields *********************************/
+#define NH_FLD_IPV4_VER (1)
+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
+
+#define NH_FLD_IPV4_ADDR_SIZE 4
+#define NH_FLD_IPV4_PROTO_SIZE 1
+
+/***************************** IPV6 fields *********************************/
+#define NH_FLD_IPV6_VER (1)
+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
+
+#define NH_FLD_IPV6_ADDR_SIZE 16
+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
+
+/***************************** ICMP fields *********************************/
+#define NH_FLD_ICMP_TYPE (1)
+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
+
+#define NH_FLD_ICMP_CODE_SIZE 1
+#define NH_FLD_ICMP_TYPE_SIZE 1
+
+/***************************** IGMP fields *********************************/
+#define NH_FLD_IGMP_VERSION (1)
+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
+
+/***************************** TCP fields **********************************/
+#define NH_FLD_TCP_PORT_SRC (1)
+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
+
+#define NH_FLD_TCP_PORT_SIZE 2
+
+/***************************** UDP fields **********************************/
+#define NH_FLD_UDP_PORT_SRC (1)
+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_UDP_PORT_SIZE 2
+
+/*************************** UDP-lite fields *******************************/
+#define NH_FLD_UDP_LITE_PORT_SRC (1)
+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS \
+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
+
+#define NH_FLD_UDP_LITE_PORT_SIZE 2
+
+/*************************** UDP-encap-ESP fields **************************/
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
+
+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_PORT_SRC (1)
+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_SCTP_PORT_SIZE 2
+
+/***************************** DCCP fields *********************************/
+#define NH_FLD_DCCP_PORT_SRC (1)
+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
+
+#define NH_FLD_DCCP_PORT_SIZE 2
+
+/***************************** IPHC fields *********************************/
+#define NH_FLD_IPHC_CID (1)
+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
+
+/*************************** L2TPV2 fields *********************************/
+#define NH_FLD_L2TPV2_TYPE_BIT (1)
+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
+#define NH_FLD_L2TPV2_ALL_FIELDS \
+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
+
+/*************************** L2TPV3 fields *********************************/
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
+
+/**************************** PPP fields ***********************************/
+#define NH_FLD_PPP_PID (1)
+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
+
+/************************** PPPoE fields ***********************************/
+#define NH_FLD_PPPOE_VER (1)
+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
+
+/************************* PPP-Mux fields **********************************/
+#define NH_FLD_PPPMUX_PID (1)
+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
+
+/*********************** PPP-Mux sub-frame fields **************************/
+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
+
+/*************************** LLC fields ************************************/
+#define NH_FLD_LLC_DSAP (1)
+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
+
+/*************************** NLPID fields **********************************/
+#define NH_FLD_NLPID_NLPID (1)
+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
+
+/*************************** SNAP fields ***********************************/
+#define NH_FLD_SNAP_OUI (1)
+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
+
+/*************************** LLC SNAP fields *******************************/
+#define NH_FLD_LLC_SNAP_TYPE (1)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
+
+#define NH_FLD_ARP_HTYPE (1)
+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
+
+/*************************** RFC2684 fields ********************************/
+#define NH_FLD_RFC2684_LLC (1)
+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
+
+/*************************** User defined fields ***************************/
+#define NH_FLD_USER_DEFINED_SRCPORT (1)
+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
+
+/*************************** Payload fields ********************************/
+#define NH_FLD_PAYLOAD_BUFFER (1)
+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
+
+/*************************** GRE fields ************************************/
+#define NH_FLD_GRE_TYPE (1)
+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
+/*************************** MINENCAP fields *******************************/
+#define NH_FLD_MINENCAP_SRC_IP (1)
+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
+#define NH_FLD_MINENCAP_ALL_FIELDS \
+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
+/*************************** IPSEC AH fields *******************************/
+#define NH_FLD_IPSEC_AH_SPI (1)
+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
+/*************************** IPSEC ESP fields ******************************/
+#define NH_FLD_IPSEC_ESP_SPI (1)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
+/*************************** MPLS fields ***********************************/
+#define NH_FLD_MPLS_LABEL_STACK (1)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
+/*************************** MACSEC fields *********************************/
+#define NH_FLD_MACSEC_SECTAG (1)
+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
+/*************************** GTP fields ************************************/
+#define NH_FLD_GTP_TEID (1)
+
+/* Protocol options */
+
+/* Ethernet options */
+#define NH_OPT_ETH_BROADCAST 1
+#define NH_OPT_ETH_MULTICAST 2
+#define NH_OPT_ETH_UNICAST 3
+#define NH_OPT_ETH_BPDU 4
+
+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
+/* also applicable for broadcast */
+
+/* VLAN options */
+#define NH_OPT_VLAN_CFI 1
+
+/* IPV4 options */
+#define NH_OPT_IPV4_UNICAST 1
+#define NH_OPT_IPV4_MULTICAST 2
+#define NH_OPT_IPV4_BROADCAST 3
+#define NH_OPT_IPV4_OPTION 4
+#define NH_OPT_IPV4_FRAG 5
+#define NH_OPT_IPV4_INITIAL_FRAG 6
+
+/* IPV6 options */
+#define NH_OPT_IPV6_UNICAST 1
+#define NH_OPT_IPV6_MULTICAST 2
+#define NH_OPT_IPV6_OPTION 3
+#define NH_OPT_IPV6_FRAG 4
+#define NH_OPT_IPV6_INITIAL_FRAG 5
+
+/* General IP options (may be used for any version) */
+#define NH_OPT_IP_FRAG 1
+#define NH_OPT_IP_INITIAL_FRAG 2
+#define NH_OPT_IP_OPTION 3
+
+/* Minenc. options */
+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
+/* GRE. options */
+#define NH_OPT_GRE_ROUTING_PRESENT 1
+
+/* TCP options */
+#define NH_OPT_TCP_OPTIONS 1
+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
+/* CAPWAP options */
+#define NH_OPT_CAPWAP_DTLS 1
+
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/*! IEEE8021.Q */
+#define NH_IEEE8021Q_ETYPE 0x8100
+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
+ ((((u32)((etype) & 0xFFFF)) << 16) | \
+ (((u32)((pcp) & 0x07)) << 13) | \
+ (((u32)((dei) & 0x01)) << 12) | \
+ (((u32)((vlan_id) & 0xFFF))))
+
+#endif /* __FSL_NET_H */
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 5c009ab48f00..a10aaf03f314 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -15,3 +15,13 @@ config FSL_MC_BUS
architecture. The fsl-mc bus driver handles discovery of
DPAA2 objects (which are represented as Linux devices) and
binding objects to drivers.
+
+config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
+ other DPAA2 objects. This driver does not expose the DPIO
+ objects individually, but groups them under a service layer
+ API.
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index 38716fd5cb58..659eccf52a4f 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -17,4 +17,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \
fsl-mc-msi.o \
irq-gic-v3-its-fsl-mc-msi.o \
dpmcp.o \
- dpbp.o
+ dpbp.o \
+ dpcon.o
+
+# MC DPIO driver
+obj-$(CONFIG_FSL_MC_DPIO) += dpio/
diff --git a/drivers/staging/fsl-mc/bus/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
index d0a5e194c5e1..2bb66988ecf6 100644
--- a/drivers/staging/fsl-mc/bus/dpcon-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -33,30 +33,53 @@
#define _FSL_DPCON_CMD_H
/* DPCON Version */
-#define DPCON_VER_MAJOR 2
-#define DPCON_VER_MINOR 1
+#define DPCON_VER_MAJOR 3
+#define DPCON_VER_MINOR 2
+
+/* Command versioning */
+#define DPCON_CMD_BASE_VERSION 1
+#define DPCON_CMD_ID_OFFSET 4
+
+#define DPCON_CMD(id) (((id) << DPCON_CMD_ID_OFFSET) | DPCON_CMD_BASE_VERSION)
/* Command IDs */
-#define DPCON_CMDID_CLOSE 0x800
-#define DPCON_CMDID_OPEN 0x808
-#define DPCON_CMDID_CREATE 0x908
-#define DPCON_CMDID_DESTROY 0x900
-
-#define DPCON_CMDID_ENABLE 0x002
-#define DPCON_CMDID_DISABLE 0x003
-#define DPCON_CMDID_GET_ATTR 0x004
-#define DPCON_CMDID_RESET 0x005
-#define DPCON_CMDID_IS_ENABLED 0x006
-
-#define DPCON_CMDID_SET_IRQ 0x010
-#define DPCON_CMDID_GET_IRQ 0x011
-#define DPCON_CMDID_SET_IRQ_ENABLE 0x012
-#define DPCON_CMDID_GET_IRQ_ENABLE 0x013
-#define DPCON_CMDID_SET_IRQ_MASK 0x014
-#define DPCON_CMDID_GET_IRQ_MASK 0x015
-#define DPCON_CMDID_GET_IRQ_STATUS 0x016
-#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017
-
-#define DPCON_CMDID_SET_NOTIFICATION 0x100
+#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
+#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
+#define DPCON_CMDID_GET_API_VERSION DPCON_CMD(0xa08)
+
+#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
+#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
+#define DPCON_CMDID_GET_ATTR DPCON_CMD(0x004)
+#define DPCON_CMDID_RESET DPCON_CMD(0x005)
+#define DPCON_CMDID_IS_ENABLED DPCON_CMD(0x006)
+
+#define DPCON_CMDID_SET_NOTIFICATION DPCON_CMD(0x100)
+
+struct dpcon_cmd_open {
+ __le32 dpcon_id;
+};
+
+#define DPCON_ENABLE 1
+
+struct dpcon_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpcon_rsp_get_attr {
+ /* response word 0 */
+ __le32 id;
+ __le16 qbman_ch_id;
+ u8 num_priorities;
+ u8 pad;
+};
+
+struct dpcon_cmd_set_notification {
+ /* cmd word 0 */
+ __le32 dpio_id;
+ u8 priority;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 user_ctx;
+};
#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c
new file mode 100644
index 000000000000..eb713578b817
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpcon.c
@@ -0,0 +1,317 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../include/mc-sys.h"
+#include "../include/mc-cmd.h"
+#include "../include/dpcon.h"
+
+#include "dpcon-cmd.h"
+
+/**
+ * dpcon_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpcon_id: DPCON unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpcon_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_cmd_open *dpcon_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpcon_cmd = (struct dpcon_cmd_open *)cmd.params;
+ dpcon_cmd->dpcon_id = cpu_to_le32(dpcon_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpcon_open);
+
+/**
+ * dpcon_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL(dpcon_close);
+
+/**
+ * dpcon_enable() - Enable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL(dpcon_enable);
+
+/**
+ * dpcon_disable() - Disable the DPCON
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL(dpcon_disable);
+
+/**
+ * dpcon_is_enabled() - Check if the DPCON is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_rsp_is_enabled *dpcon_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_is_enabled *)cmd.params;
+ *en = dpcon_rsp->enabled & DPCON_ENABLE;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpcon_is_enabled);
+
+/**
+ * dpcon_reset() - Reset the DPCON, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL(dpcon_reset);
+
+/**
+ * dpcon_get_attributes() - Retrieve DPCON attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_rsp_get_attr *dpcon_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpcon_rsp = (struct dpcon_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpcon_rsp->id);
+ attr->qbman_ch_id = le16_to_cpu(dpcon_rsp->qbman_ch_id);
+ attr->num_priorities = dpcon_rsp->num_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL(dpcon_get_attributes);
+
+/**
+ * dpcon_set_notification() - Set DPCON notification destination
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCON object
+ * @cfg: Notification parameters
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpcon_cmd_set_notification *dpcon_cmd;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION,
+ cmd_flags,
+ token);
+ dpcon_cmd = (struct dpcon_cmd_set_notification *)cmd.params;
+ dpcon_cmd->dpio_id = cpu_to_le32(cfg->dpio_id);
+ dpcon_cmd->priority = cfg->priority;
+ dpcon_cmd->user_ctx = cpu_to_le64(cfg->user_ctx);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL(dpcon_set_notification);
+
+/**
+ * dpcon_get_api_version - Get Data Path Concentrator API version
+ * @mc_io: Pointer to MC portal's DPCON object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPCON API
+ * @minor_ver: Minor version of DPCON API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
+EXPORT_SYMBOL(dpcon_get_api_version);
diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile
new file mode 100644
index 000000000000..837d3303e11d
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile
@@ -0,0 +1,9 @@
+#
+# QorIQ DPAA2 DPIO driver
+#
+
+subdir-ccflags-y := -Werror
+
+obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
+
+fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
new file mode 100644
index 000000000000..b2dc6e766f09
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+/* Command Versioning */
+
+#define DPIO_CMD_ID_OFFSET 4
+#define DPIO_CMD_BASE_VERSION 1
+
+#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
+#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
+#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
+#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
+#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
+#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
+
+struct dpio_cmd_open {
+ __le32 dpio_id;
+};
+
+#define DPIO_CHANNEL_MODE_MASK 0x3
+
+struct dpio_rsp_get_attr {
+ /* cmd word 0 */
+ __le32 id;
+ __le16 qbman_portal_id;
+ u8 num_priorities;
+ u8 channel_mode;
+ /* cmd word 1 */
+ __le64 qbman_portal_ce_addr;
+ /* cmd word 2 */
+ __le64 qbman_portal_ci_addr;
+ /* cmd word 3 */
+ __le32 qbman_version;
+};
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
new file mode 100644
index 000000000000..e36da20a2796
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP 2016
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#include "../../include/mc.h"
+#include "../../include/dpaa2-io.h"
+
+#include "qbman-portal.h"
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("DPIO Driver");
+
+struct dpio_priv {
+ struct dpaa2_io *io;
+};
+
+static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct dpio_priv *priv = dev_get_drvdata(dev);
+
+ return dpaa2_io_irq(priv->io);
+}
+
+static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
+{
+ struct fsl_mc_device_irq *irq;
+
+ irq = dpio_dev->irqs[0];
+
+ /* clear the affinity hint */
+ irq_set_affinity_hint(irq->msi_desc->irq, NULL);
+}
+
+static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ struct dpio_priv *priv;
+ int error;
+ struct fsl_mc_device_irq *irq;
+ cpumask_t mask;
+
+ priv = dev_get_drvdata(&dpio_dev->dev);
+
+ irq = dpio_dev->irqs[0];
+ error = devm_request_irq(&dpio_dev->dev,
+ irq->msi_desc->irq,
+ dpio_irq_handler,
+ 0,
+ dev_name(&dpio_dev->dev),
+ &dpio_dev->dev);
+ if (error < 0) {
+ dev_err(&dpio_dev->dev,
+ "devm_request_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /* set the affinity hint */
+ cpumask_clear(&mask);
+ cpumask_set_cpu(cpu, &mask);
+ if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
+ dev_err(&dpio_dev->dev,
+ "irq_set_affinity failed irq %d cpu %d\n",
+ irq->msi_desc->irq, cpu);
+
+ return 0;
+}
+
+static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
+{
+ struct dpio_attr dpio_attrs;
+ struct dpaa2_io_desc desc;
+ struct dpio_priv *priv;
+ int err = -ENOMEM;
+ struct device *dev = &dpio_dev->dev;
+ static int next_cpu = -1;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_priv_alloc;
+
+ dev_set_drvdata(dev, priv);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_dbg(dev, "MC portal allocation failed\n");
+ err = -EPROBE_DEFER;
+ goto err_mcportal;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
+ &dpio_attrs);
+ if (err) {
+ dev_err(dev, "dpio_get_attributes() failed %d\n", err);
+ goto err_get_attr;
+ }
+ desc.qman_version = dpio_attrs.qbman_version;
+
+ err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_enable() failed %d\n", err);
+ goto err_get_attr;
+ }
+
+ /* initialize DPIO descriptor */
+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
+ desc.dpio_id = dpio_dev->obj_desc.id;
+
+ /* get the cpu to use for the affinity hint */
+ if (next_cpu == -1)
+ next_cpu = cpumask_first(cpu_online_mask);
+ else
+ next_cpu = cpumask_next(next_cpu, cpu_online_mask);
+
+ if (!cpu_possible(next_cpu)) {
+ dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
+ err = -ERANGE;
+ goto err_allocate_irqs;
+ }
+ desc.cpu = next_cpu;
+
+ /*
+ * Set the CENA regs to be the cache inhibited area of the portal to
+ * avoid coherency issues if a user migrates to another core.
+ */
+ desc.regs_cena = ioremap_wc(dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+ desc.regs_cinh = ioremap(dpio_dev->regions[1].start,
+ resource_size(&dpio_dev->regions[1]));
+
+ err = fsl_mc_allocate_irqs(dpio_dev);
+ if (err) {
+ dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
+ goto err_allocate_irqs;
+ }
+
+ err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+ if (err)
+ goto err_register_dpio_irq;
+
+ priv->io = dpaa2_io_create(&desc);
+ if (!priv->io) {
+ dev_err(dev, "dpaa2_io_create failed\n");
+ goto err_dpaa2_io_create;
+ }
+
+ dev_info(dev, "probed\n");
+ dev_dbg(dev, " receives_notifications = %d\n",
+ desc.receives_notifications);
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+ fsl_mc_portal_free(dpio_dev->mc_io);
+
+ return 0;
+
+err_dpaa2_io_create:
+ unregister_dpio_irq_handlers(dpio_dev);
+err_register_dpio_irq:
+ fsl_mc_free_irqs(dpio_dev);
+err_allocate_irqs:
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_get_attr:
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_mcportal:
+ dev_set_drvdata(dev, NULL);
+err_priv_alloc:
+ return err;
+}
+
+/* Tear down interrupts for a given DPIO object */
+static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
+{
+ unregister_dpio_irq_handlers(dpio_dev);
+ fsl_mc_free_irqs(dpio_dev);
+}
+
+static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
+{
+ struct device *dev;
+ struct dpio_priv *priv;
+ int err;
+
+ dev = &dpio_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ dpaa2_io_down(priv->io);
+
+ dpio_teardown_irqs(dpio_dev);
+
+ err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
+ if (err) {
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
+ &dpio_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpio_open() failed\n");
+ goto err_open;
+ }
+
+ dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
+
+ fsl_mc_portal_free(dpio_dev->mc_io);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+
+err_open:
+ fsl_mc_portal_free(dpio_dev->mc_io);
+err_mcportal:
+ return err;
+}
+
+static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpio",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_dpio_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_dpio_probe,
+ .remove = dpaa2_dpio_remove,
+ .match_id_table = dpaa2_dpio_match_id_table
+};
+
+static int dpio_driver_init(void)
+{
+ return fsl_mc_driver_register(&dpaa2_dpio_driver);
+}
+
+static void dpio_driver_exit(void)
+{
+ fsl_mc_driver_unregister(&dpaa2_dpio_driver);
+}
+module_init(dpio_driver_init);
+module_exit(dpio_driver_exit);
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
new file mode 100644
index 000000000000..0ba6771654f7
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-driver.txt
@@ -0,0 +1,135 @@
+Copyright 2016 NXP
+
+Introduction
+------------
+
+A DPAA2 DPIO (Data Path I/O) is a hardware object that provides
+interfaces to enqueue and dequeue frames to/from network interfaces
+and other accelerators. A DPIO also provides hardware buffer
+pool management for network interfaces.
+
+This document provides an overview the Linux DPIO driver, its
+subcomponents, and its APIs.
+
+See Documentation/dpaa2/overview.txt for a general overview of DPAA2
+and the general DPAA2 driver architecture in Linux.
+
+Driver Overview
+---------------
+
+The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
+provides services that:
+ A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
+ frames for their respective objects
+ B) allow drivers to register callbacks for data availability notifications
+ when data becomes available on a queue or channel
+ C) allow drivers to manage hardware buffer pools
+
+The Linux DPIO driver consists of 3 primary components--
+ DPIO object driver-- fsl-mc driver that manages the DPIO object
+ DPIO service-- provides APIs to other Linux drivers for services
+ QBman portal interface-- sends portal commands, gets responses
+
+ fsl-mc other
+ bus drivers
+ | |
+ +---+----+ +------+-----+
+ |DPIO obj| |DPIO service|
+ | driver |---| (DPIO) |
+ +--------+ +------+-----+
+ |
+ +------+-----+
+ | QBman |
+ | portal i/f |
+ +------------+
+ |
+ hardware
+
+The diagram below shows how the DPIO driver components fit with the other
+DPAA2 Linux driver components:
+ +------------+
+ | OS Network |
+ | Stack |
+ +------------+ +------------+
+ | Allocator |. . . . . . . | Ethernet |
+ |(DPMCP,DPBP)| | (DPNI) |
+ +-.----------+ +---+---+----+
+ . . ^ |
+ . . <data avail, | |<enqueue,
+ . . tx confirm> | | dequeue>
+ +-------------+ . | |
+ | DPRC driver | . +--------+ +------------+
+ | (DPRC) | . . |DPIO obj| |DPIO service|
+ +----------+--+ | driver |-| (DPIO) |
+ | +--------+ +------+-----+
+ |<dev add/remove> +------|-----+
+ | | QBman |
+ +----+--------------+ | portal i/f |
+ | MC-bus driver | +------------+
+ | | |
+ | /soc/fsl-mc | |
+ +-------------------+ |
+ |
+ =========================================|=========|========================
+ +-+--DPIO---|-----------+
+ | | |
+ | QBman Portal |
+ +-----------------------+
+
+ ============================================================================
+
+
+DPIO Object Driver (dpio-driver.c)
+----------------------------------
+
+ The dpio-driver component registers with the fsl-mc bus to handle objects of
+ type "dpio". The implementation of probe() handles basic initialization
+ of the DPIO including mapping of the DPIO regions (the QBman SW portal)
+ and initializing interrupts and registering irq handlers. The dpio-driver
+ registers the probed DPIO with dpio-service.
+
+DPIO service (dpio-service.c, dpaa2-io.h)
+------------------------------------------
+
+ The dpio service component provides queuing, notification, and buffers
+ management services to DPAA2 drivers, such as the Ethernet driver. A system
+ will typically allocate 1 DPIO object per CPU to allow queuing operations
+ to happen simultaneously across all CPUs.
+
+ Notification handling
+ dpaa2_io_service_register()
+ dpaa2_io_service_deregister()
+ dpaa2_io_service_rearm()
+
+ Queuing
+ dpaa2_io_service_pull_fq()
+ dpaa2_io_service_pull_channel()
+ dpaa2_io_service_enqueue_fq()
+ dpaa2_io_service_enqueue_qd()
+ dpaa2_io_store_create()
+ dpaa2_io_store_destroy()
+ dpaa2_io_store_next()
+
+ Buffer pool management
+ dpaa2_io_service_release()
+ dpaa2_io_service_acquire()
+
+QBman portal interface (qbman-portal.c)
+---------------------------------------
+
+ The qbman-portal component provides APIs to do the low level hardware
+ bit twiddling for operations such as:
+ -initializing Qman software portals
+ -building and sending portal commands
+ -portal interrupt configuration and processing
+
+ The qbman-portal APIs are not public to other drivers, and are
+ only used by dpio-service.
+
+Other (dpaa2-fd.h, dpaa2-global.h)
+----------------------------------
+
+ Frame descriptor and scatter-gather definitions and the APIs used to
+ manipulate them are defined in dpaa2-fd.h.
+
+ Dequeue result struct and parsing APIs are defined in dpaa2-global.h.
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
new file mode 100644
index 000000000000..e5d66749614c
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/types.h>
+#include "../../include/mc.h"
+#include "../../include/dpaa2-io.h"
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "dpio.h"
+#include "qbman-portal.h"
+
+struct dpaa2_io {
+ atomic_t refs;
+ struct dpaa2_io_desc dpio_desc;
+ struct qbman_swp_desc swp_desc;
+ struct qbman_swp *swp;
+ struct list_head node;
+ /* protect against multiple management commands */
+ spinlock_t lock_mgmt_cmd;
+ /* protect notifications list */
+ spinlock_t lock_notifications;
+ struct list_head notifications;
+};
+
+struct dpaa2_io_store {
+ unsigned int max;
+ dma_addr_t paddr;
+ struct dpaa2_dq *vaddr;
+ void *alloced_addr; /* unaligned value from kmalloc() */
+ unsigned int idx; /* position of the next-to-be-returned entry */
+ struct qbman_swp *swp; /* portal used to issue VDQCR */
+ struct device *dev; /* device used for DMA mapping */
+};
+
+/* keep a per cpu array of DPIOs for fast access */
+static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
+static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static DEFINE_SPINLOCK(dpio_list_lock);
+
+static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
+ int cpu)
+{
+ if (d)
+ return d;
+
+ if (unlikely(cpu >= num_possible_cpus()))
+ return NULL;
+
+ /*
+ * If cpu == -1, choose the current cpu, with no guarantees about
+ * potentially being migrated away.
+ */
+ if (unlikely(cpu < 0))
+ cpu = smp_processor_id();
+
+ /* If a specific cpu was requested, pick it up immediately */
+ return dpio_by_cpu[cpu];
+}
+
+static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
+{
+ if (d)
+ return d;
+
+ spin_lock(&dpio_list_lock);
+ d = list_entry(dpio_list.next, struct dpaa2_io, node);
+ list_del(&d->node);
+ list_add_tail(&d->node, &dpio_list);
+ spin_unlock(&dpio_list_lock);
+
+ return d;
+}
+
+/**
+ * dpaa2_io_create() - create a dpaa2_io object.
+ * @desc: the dpaa2_io descriptor
+ *
+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual
+ * DPIO object.
+ *
+ * Return a valid dpaa2_io object for success, or NULL for failure.
+ */
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
+{
+ struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ /* check if CPU is out of range (-1 means any cpu) */
+ if (desc->cpu >= num_possible_cpus()) {
+ kfree(obj);
+ return NULL;
+ }
+
+ atomic_set(&obj->refs, 1);
+ obj->dpio_desc = *desc;
+ obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
+ obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+ obj->swp = qbman_swp_init(&obj->swp_desc);
+
+ if (!obj->swp) {
+ kfree(obj);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&obj->node);
+ spin_lock_init(&obj->lock_mgmt_cmd);
+ spin_lock_init(&obj->lock_notifications);
+ INIT_LIST_HEAD(&obj->notifications);
+
+ /* For now only enable DQRR interrupts */
+ qbman_swp_interrupt_set_trigger(obj->swp,
+ QBMAN_SWP_INTERRUPT_DQRI);
+ qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
+ if (obj->dpio_desc.receives_notifications)
+ qbman_swp_push_set(obj->swp, 0, 1);
+
+ spin_lock(&dpio_list_lock);
+ list_add_tail(&obj->node, &dpio_list);
+ if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
+ dpio_by_cpu[desc->cpu] = obj;
+ spin_unlock(&dpio_list_lock);
+
+ return obj;
+}
+EXPORT_SYMBOL(dpaa2_io_create);
+
+/**
+ * dpaa2_io_down() - release the dpaa2_io object.
+ * @d: the dpaa2_io object to be released.
+ *
+ * The "struct dpaa2_io" type can represent an individual DPIO object (as
+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
+ * which can be used to group/encapsulate multiple DPIO objects. In all cases,
+ * each handle obtained should be released using this function.
+ */
+void dpaa2_io_down(struct dpaa2_io *d)
+{
+ if (!atomic_dec_and_test(&d->refs))
+ return;
+ kfree(d);
+}
+EXPORT_SYMBOL(dpaa2_io_down);
+
+#define DPAA_POLL_MAX 32
+
+/**
+ * dpaa2_io_irq() - ISR for DPIO interrupts
+ *
+ * @obj: the given DPIO object.
+ *
+ * Return IRQ_HANDLED for success or IRQ_NONE if there
+ * were no pending interrupts.
+ */
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
+{
+ const struct dpaa2_dq *dq;
+ int max = 0;
+ struct qbman_swp *swp;
+ u32 status;
+
+ swp = obj->swp;
+ status = qbman_swp_interrupt_read_status(swp);
+ if (!status)
+ return IRQ_NONE;
+
+ dq = qbman_swp_dqrr_next(swp);
+ while (dq) {
+ if (qbman_result_is_SCN(dq)) {
+ struct dpaa2_io_notification_ctx *ctx;
+ u64 q64;
+
+ q64 = qbman_result_SCN_ctx(dq);
+ ctx = (void *)q64;
+ ctx->cb(ctx);
+ } else {
+ pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
+ }
+ qbman_swp_dqrr_consume(swp, dq);
+ ++max;
+ if (max > DPAA_POLL_MAX)
+ goto done;
+ dq = qbman_swp_dqrr_next(swp);
+ }
+done:
+ qbman_swp_interrupt_clear_status(swp, status);
+ qbman_swp_interrupt_set_inhibit(swp, 0);
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(dpaa2_io_irq);
+
+/**
+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
+ * notifications on the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * The caller should make the MC command to attach a DPAA2 object to
+ * a DPIO after this function completes successfully. In that way:
+ * (a) The DPIO service is "ready" to handle a notification arrival
+ * (which might happen before the "attach" command to MC has
+ * returned control of execution back to the caller)
+ * (b) The DPIO service can provide back to the caller the 'dpio_id' and
+ * 'qman64' parameters that it should pass along in the MC command
+ * in order for the object to be configured to produce the right
+ * notification fields to the DPIO service.
+ *
+ * Return 0 for success, or -ENODEV for failure.
+ */
+int dpaa2_io_service_register(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!d)
+ return -ENODEV;
+
+ ctx->dpio_id = d->dpio_desc.dpio_id;
+ ctx->qman64 = (u64)ctx;
+ ctx->dpio_private = d;
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_add(&ctx->node, &d->notifications);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+
+ /* Enable the generation of CDAN notifications */
+ if (ctx->is_cdan)
+ qbman_swp_CDAN_set_context_enable(d->swp,
+ (u16)ctx->id,
+ ctx->qman64);
+ return 0;
+}
+EXPORT_SYMBOL(dpaa2_io_service_register);
+
+/**
+ * dpaa2_io_service_deregister - The opposite of 'register'.
+ * @service: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * This function should be called only after sending the MC command to
+ * to detach the notification-producing device from the DPIO.
+ */
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_io *d = ctx->dpio_private;
+ unsigned long irqflags;
+
+ if (ctx->is_cdan)
+ qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
+
+ spin_lock_irqsave(&d->lock_notifications, irqflags);
+ list_del(&ctx->node);
+ spin_unlock_irqrestore(&d->lock_notifications, irqflags);
+}
+EXPORT_SYMBOL(dpaa2_io_service_deregister);
+
+/**
+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
+ * @d: the given DPIO service.
+ * @ctx: the notification context.
+ *
+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that
+ * traffic source for as long as it likes. Eventually it may wish to "rearm"
+ * that source to allow it to produce another FQDAN/CDAN, that's what this
+ * function achieves.
+ *
+ * Return 0 for success.
+ */
+int dpaa2_io_service_rearm(struct dpaa2_io *d,
+ struct dpaa2_io_notification_ctx *ctx)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select_by_cpu(d, ctx->desired_cpu);
+ if (!unlikely(d))
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ if (ctx->is_cdan)
+ err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
+ else
+ err = qbman_swp_fq_schedule(d->swp, ctx->id);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_rearm);
+
+/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
+ * @d: the given DPIO service.
+ * @channelid: the given channel id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_channel);
+
+/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @prio: the given queuing priority.
+ * @qdbin: the given queuing destination bin.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
+ u32 qdid, u8 prio, u16 qdbin,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd);
+
+/**
+ * dpaa2_io_service_release() - Release buffers to a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffers to be released.
+ * @num_buffers: the number of the buffers to be released.
+ *
+ * Return 0 for success, and negative error code for failure.
+ */
+int dpaa2_io_service_release(struct dpaa2_io *d,
+ u32 bpid,
+ const u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_release_desc rd;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_release_desc_clear(&rd);
+ qbman_release_desc_set_bpid(&rd, bpid);
+
+ return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
+}
+EXPORT_SYMBOL(dpaa2_io_service_release);
+
+/**
+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the buffer pool id.
+ * @buffers: the buffer addresses for acquired buffers.
+ * @num_buffers: the expected number of the buffers to acquire.
+ *
+ * Return a negative error code if the command failed, otherwise it returns
+ * the number of buffers acquired, which may be less than the number requested.
+ * Eg. if the buffer pool is empty, this will return zero.
+ */
+int dpaa2_io_service_acquire(struct dpaa2_io *d,
+ u32 bpid,
+ u64 *buffers,
+ unsigned int num_buffers)
+{
+ unsigned long irqflags;
+ int err;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_acquire);
+
+/*
+ * 'Stores' are reusable memory blocks for holding dequeue results, and to
+ * assist with parsing those results.
+ */
+
+/**
+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
+ * @dev: the device to allow mapping/unmapping the DMAable region.
+ *
+ * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
+ * The 'dpaa2_io_store' returned is a DPIO service managed object.
+ *
+ * Return pointer to dpaa2_io_store struct for successfuly created storage
+ * memory, or NULL on error.
+ */
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev)
+{
+ struct dpaa2_io_store *ret;
+ size_t size;
+
+ if (!max_frames || (max_frames > 16))
+ return NULL;
+
+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->max = max_frames;
+ size = max_frames * sizeof(struct dpaa2_dq) + 64;
+ ret->alloced_addr = kzalloc(size, GFP_KERNEL);
+ if (!ret->alloced_addr) {
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
+ ret->paddr = dma_map_single(dev, ret->vaddr,
+ sizeof(struct dpaa2_dq) * max_frames,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, ret->paddr)) {
+ kfree(ret->alloced_addr);
+ kfree(ret);
+ return NULL;
+ }
+
+ ret->idx = 0;
+ ret->dev = dev;
+
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_store_create);
+
+/**
+ * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
+ * result.
+ * @s: the storage memory to be destroyed.
+ */
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
+{
+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
+ DMA_FROM_DEVICE);
+ kfree(s->alloced_addr);
+ kfree(s);
+}
+EXPORT_SYMBOL(dpaa2_io_store_destroy);
+
+/**
+ * dpaa2_io_store_next() - Determine when the next dequeue result is available.
+ * @s: the dpaa2_io_store object.
+ * @is_last: indicate whether this is the last frame in the pull command.
+ *
+ * When an object driver performs dequeues to a dpaa2_io_store, this function
+ * can be used to determine when the next frame result is available. Once
+ * this function returns non-NULL, a subsequent call to it will try to find
+ * the next dequeue result.
+ *
+ * Note that if a pull-dequeue has a NULL result because the target FQ/channel
+ * was empty, then this function will also return NULL (rather than expecting
+ * the caller to always check for this. As such, "is_last" can be used to
+ * differentiate between "end-of-empty-dequeue" and "still-waiting".
+ *
+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
+ */
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
+{
+ int match;
+ struct dpaa2_dq *ret = &s->vaddr[s->idx];
+
+ match = qbman_result_has_new_result(s->swp, ret);
+ if (!match) {
+ *is_last = 0;
+ return NULL;
+ }
+
+ s->idx++;
+
+ if (dpaa2_dq_is_pull_complete(ret)) {
+ *is_last = 1;
+ s->idx = 0;
+ /*
+ * If we get an empty dequeue result to terminate a zero-results
+ * vdqcr, return NULL to the caller rather than expecting him to
+ * check non-NULL results every time.
+ */
+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
+ ret = NULL;
+ } else {
+ *is_last = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dpaa2_io_store_next);
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c
new file mode 100644
index 000000000000..d81e0232f6c1
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../../include/mc-sys.h"
+#include "../../include/mc-cmd.h"
+
+#include "dpio.h"
+#include "dpio-cmd.h"
+
+/*
+ * Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpio_cmd_open *dpio_cmd;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ dpio_cmd = (struct dpio_cmd_open *)cmd.params;
+ dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpio_rsp_get_attr *dpio_rsp;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(dpio_rsp->id);
+ attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
+ attr->num_priorities = dpio_rsp->num_priorities;
+ attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
+ attr->qbman_portal_ce_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
+ attr->qbman_portal_ci_offset =
+ le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
+ attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+
+ return 0;
+}
+
+/**
+ * dpio_get_api_version - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's DPIO object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of DPIO API
+ * @minor_ver: Minor version of DPIO API
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.h b/drivers/staging/fsl-mc/bus/dpio/dpio.h
new file mode 100644
index 000000000000..ced1103d157c
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+struct fsl_mc_io;
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpio_id,
+ u16 *token);
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+};
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+ int id;
+ u64 qbman_portal_ce_offset;
+ u64 qbman_portal_ci_offset;
+ u16 qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ u8 num_priorities;
+ u32 qbman_version;
+};
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpio_attr *attr);
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
new file mode 100644
index 000000000000..7988612aaecf
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
@@ -0,0 +1,1035 @@
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "../../include/dpaa2-global.h"
+
+#include "qbman-portal.h"
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+#define QMAN_REV_MASK 0xffff0000
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((u32)0x80)
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
+
+/* Define token used to determine if response written to memory is valid */
+#define QMAN_DQ_TOKEN_VALID 1
+
+/* SDQCR attribute codes */
+#define QB_SDQCR_FC_SHIFT 29
+#define QB_SDQCR_FC_MASK 0x1
+#define QB_SDQCR_DCT_SHIFT 24
+#define QB_SDQCR_DCT_MASK 0x3
+#define QB_SDQCR_TOK_SHIFT 16
+#define QB_SDQCR_TOK_MASK 0xff
+#define QB_SDQCR_SRC_SHIFT 0
+#define QB_SDQCR_SRC_MASK 0xffff
+
+/* opaque token for static dequeues */
+#define QMAN_SDQCR_TOKEN 0xbb
+
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+/* Portal Access */
+
+static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
+{
+ return readl_relaxed(p->addr_cinh + offset);
+}
+
+static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
+ u32 value)
+{
+ writel_relaxed(value, p->addr_cinh + offset);
+}
+
+static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
+{
+ return p->addr_cena + offset;
+}
+
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+#define SWP_CFG_DQRR_MF_SHIFT 20
+#define SWP_CFG_EST_SHIFT 16
+#define SWP_CFG_WN_SHIFT 14
+#define SWP_CFG_RPM_SHIFT 12
+#define SWP_CFG_DCM_SHIFT 10
+#define SWP_CFG_EPM_SHIFT 8
+#define SWP_CFG_SD_SHIFT 5
+#define SWP_CFG_SP_SHIFT 4
+#define SWP_CFG_SE_SHIFT 3
+#define SWP_CFG_DP_SHIFT 2
+#define SWP_CFG_DE_SHIFT 1
+#define SWP_CFG_EP_SHIFT 0
+
+static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
+ u8 epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ return cpu_to_le32 (max_fill << SWP_CFG_DQRR_MF_SHIFT |
+ est << SWP_CFG_EST_SHIFT |
+ wn << SWP_CFG_WN_SHIFT |
+ rpm << SWP_CFG_RPM_SHIFT |
+ dcm << SWP_CFG_DCM_SHIFT |
+ epm << SWP_CFG_EPM_SHIFT |
+ sd << SWP_CFG_SD_SHIFT |
+ sp << SWP_CFG_SP_SHIFT |
+ se << SWP_CFG_SE_SHIFT |
+ dp << SWP_CFG_DP_SHIFT |
+ de << SWP_CFG_DE_SHIFT |
+ ep << SWP_CFG_EP_SHIFT);
+}
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ u32 reg;
+
+ if (!p)
+ return NULL;
+ p->desc = d;
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
+ p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
+ p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
+
+ atomic_set(&p->vdq.available, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+
+ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ }
+
+ p->addr_cena = d->cena_bar;
+ p->addr_cinh = d->cinh_bar;
+
+ reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
+ 1, /* Writes Non-cacheable */
+ 0, /* EQCR_CI stashing threshold */
+ 3, /* RPM: Valid bit mode, RCR in array mode */
+ 2, /* DCM: Discrete consumption ack mode */
+ 3, /* EPM: Valid bit mode, EQCR in array mode */
+ 0, /* mem stashing drop enable == FALSE */
+ 1, /* mem stashing priority == TRUE */
+ 0, /* mem stashing enable == FALSE */
+ 1, /* dequeue stashing priority == TRUE */
+ 0, /* dequeue stashing enable == FALSE */
+ 0); /* EQCR_CI stashing priority == FALSE */
+
+ qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("qbman: the portal is not enabled!\n");
+ return NULL;
+ }
+
+ /*
+ * SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled.
+ */
+ qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
+ return p;
+}
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed
+ */
+void qbman_swp_finish(struct qbman_swp *p)
+{
+ kfree(p);
+}
+
+/**
+ * qbman_swp_interrupt_read_status()
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_ISR register.
+ */
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
+}
+
+/**
+ * qbman_swp_interrupt_clear_status()
+ * @p: the given software portal
+ * @mask: The mask to clear in SWP_ISR register
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_trigger() - read interrupt enable register
+ * @p: the given software portal
+ *
+ * Return the value in the SWP_IER register.
+ */
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
+}
+
+/**
+ * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
+ * @p: the given software portal
+ * @mask: The mask of bits to enable in SWP_IER
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
+}
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
+ * @p: the given software portal object
+ *
+ * Return the value in the SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
+}
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
+ * @p: the given software portal object
+ * @mask: The mask to set in SWP_IIR register
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/*
+ * Different management commands all use this common base layer of code to issue
+ * commands and poll for results.
+ */
+
+/*
+ * Returns a pointer to where the caller should fill in their management command
+ * (caller should ignore the verb byte)
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
+}
+
+/*
+ * Commits merges in the caller-supplied command verb (which should not include
+ * the valid-bit) and submits the command to hardware
+ */
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
+{
+ u8 *v = cmd;
+
+ dma_wmb();
+ *v = cmd_verb | p->mc.valid_bit;
+}
+
+/*
+ * Checks for a completed response (returns non-NULL if only if the response
+ * is complete).
+ */
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ u32 *ret, verb;
+
+ ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+
+ /* Remove the valid-bit - command completed if the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+#define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
+enum qb_enqueue_commands {
+ enqueue_empty = 0,
+ enqueue_response_always = 1,
+ enqueue_rejects_to_fq = 2
+};
+
+#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
+#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
+#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->verb |= enqueue_response_always;
+ else
+ d->verb |= enqueue_rejects_to_fq;
+}
+
+/*
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * -enqueue to a frame queue
+ * -enqueue to a queuing destination
+ */
+
+/**
+ * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
+{
+ d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
+ d->tgtid = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio)
+{
+ d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
+ d->tgtid = cpu_to_le32(qdid);
+ d->qdbin = cpu_to_le16(qd_bin);
+ d->qpri = qd_prio;
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command
+ * @s: the software portal used for enqueue
+ * @d: the enqueue descriptor
+ * @fd: the frame descriptor to be enqueued
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc *p;
+ u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
+
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ memcpy(&p->dca, &d->dca, 31);
+ memcpy(&p->fd, fd, sizeof(*fd));
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ dma_wmb();
+ p->verb = d->verb | EQAR_VB(eqar);
+
+ return 0;
+}
+
+/* Static (push) dequeue */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup
+ * @p: the software portal object
+ * @channel_idx: the channel index to query
+ * @enabled: returned boolean to show whether the push dequeue is enabled
+ * for the given channel
+ */
+void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
+{
+ u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+
+ WARN_ON(channel_idx > 15);
+ *enabled = src | (1 << channel_idx);
+}
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue
+ * @p: the software portal object
+ * @channel_idx: the channel index (0 to 15)
+ * @enable: enable or disable push dequeue
+ */
+void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
+{
+ u16 dqsrc;
+
+ WARN_ON(channel_idx > 15);
+ if (enable)
+ s->sdq |= 1 << channel_idx;
+ else
+ s->sdq &= ~(1 << channel_idx);
+
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
+ if (dqsrc != 0)
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+#define QB_VDQCR_VERB_DCT_SHIFT 0
+#define QB_VDQCR_VERB_DT_SHIFT 2
+#define QB_VDQCR_VERB_RLS_SHIFT 4
+#define QB_VDQCR_VERB_WAE_SHIFT 5
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state
+ * @d: the pull dequeue descriptor to be cleared
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set
+ * @storage: the pointer of the memory to store the dequeue result
+ * @storage_phys: the physical address of the storage memory
+ * @stash: to indicate whether write allocate is enabled
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ /* save the virtual address */
+ d->rsp_addr_virt = (u64)storage;
+
+ if (!storage) {
+ d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
+ return;
+ }
+ d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
+ if (stash)
+ d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
+ else
+ d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
+
+ d->rsp_addr = cpu_to_le64(storage_phys);
+}
+
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
+ * @d: the pull dequeue descriptor to be set
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
+{
+ d->numf = numframes - 1;
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, u8 token)
+{
+ d->tok = token;
+}
+
+/*
+ * Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @fqid: the frame queue index of the given FQ
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
+{
+ d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(fqid);
+}
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @wqid: composed of channel id and wqid within the channel
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(wqid);
+}
+
+/**
+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues
+ * @chid: the channel id to be dequeued
+ * @dct: the dequeue command type
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct)
+{
+ d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
+ d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
+ d->dq_src = cpu_to_le32(chid);
+}
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ struct qbman_pull_desc *p;
+
+ if (!atomic_dec_and_test(&s->vdq.available)) {
+ atomic_inc(&s->vdq.available);
+ return -EBUSY;
+ }
+ s->vdq.storage = (void *)d->rsp_addr_virt;
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
+ p->numf = d->numf;
+ p->tok = QMAN_DQ_TOKEN_VALID;
+ p->dq_src = d->dq_src;
+ p->rsp_addr = d->rsp_addr;
+ p->rsp_addr_virt = d->rsp_addr_virt;
+ dma_wmb();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p->verb = d->verb | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+#define QMAN_DQRR_PI_MASK 0xf
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry
+ * @s: the software portal object
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ u32 verb;
+ u32 response_verb;
+ u32 flags;
+ struct dpaa2_dq *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /*
+ * We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
+ QMAN_DQRR_PI_MASK;
+
+ /* there are new entries if pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+
+ /*
+ * if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
+ pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ }
+
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ verb = p->dq.verb;
+
+ /*
+ * If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
+ prefetch(qbman_get_cmd(s,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ return NULL;
+ }
+ /*
+ * There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
+ if (!s->dqrr.next_idx)
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+
+ /*
+ * If this is the final response to a volatile dequeue command
+ * indicate that the vdq is available
+ */
+ flags = p->dq.stat;
+ response_verb = verb & QBMAN_RESULT_MASK;
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & DPAA2_DQ_STAT_VOLATILE) &&
+ (flags & DPAA2_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.available);
+
+ prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+
+ return p;
+}
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object
+ * @dq: the DQRR entry to be consumed
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object
+ * @dq: the dequeue result read from the memory
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format. As such, once the user has called qbman_result_has_new_result() and
+ * been returned a valid dequeue result, they should not call it again on
+ * the same memory location (except of course if another dequeue command has
+ * been executed to produce a new result to that location).
+ */
+int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
+{
+ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
+ return 0;
+
+ /*
+ * Set token to be 0 so we will detect change back to 1
+ * next time the looping is traversed. Const is cast away here
+ * as we want users to treat the dequeue responses as read only.
+ */
+ ((struct dpaa2_dq *)dq)->dq.tok = 0;
+
+ /*
+ * Determine whether VDQCR is available based on whether the
+ * current result is sitting in the first storage location of
+ * the busy command.
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.available);
+ }
+
+ return 1;
+}
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+ d->verb = 1 << 5; /* Release Command Valid */
+}
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
+{
+ d->bpid = cpu_to_le16(bpid);
+}
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ if (enable)
+ d->verb |= 1 << 6;
+ else
+ d->verb &= ~(1 << 6);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+/**
+ * qbman_swp_release() - Issue a buffer release command
+ * @s: the software portal object
+ * @d: the release descriptor
+ * @buffers: a pointer pointing to the buffer address to be released
+ * @num_buffers: number of buffers to be released, must be less than 8
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers)
+{
+ int i;
+ struct qbman_release_desc *p;
+ u32 rar;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+
+ /* Start the release command */
+ p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ /* Copy the caller's buffer pointers to the command */
+ for (i = 0; i < num_buffers; i++)
+ p->buf[i] = cpu_to_le64(buffers[i]);
+ p->bpid = d->bpid;
+
+ /*
+ * Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers.
+ */
+ dma_wmb();
+ p->verb = d->verb | RAR_VB(rar) | num_buffers;
+
+ return 0;
+}
+
+struct qbman_acquire_desc {
+ u8 verb;
+ u8 reserved;
+ u16 bpid;
+ u8 num;
+ u8 reserved2[59];
+};
+
+struct qbman_acquire_rslt {
+ u8 verb;
+ u8 rslt;
+ u16 reserved;
+ u8 num;
+ u8 reserved2[3];
+ u64 buf[7];
+};
+
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command
+ * @s: the software portal object
+ * @bpid: the buffer pool index
+ * @buffers: a pointer pointing to the acquired buffer addresses
+ * @num_buffers: number of buffers to be acquired, must be less than 8
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers)
+{
+ struct qbman_acquire_desc *p;
+ struct qbman_acquire_rslt *r;
+ int i;
+
+ if (!num_buffers || (num_buffers > 7))
+ return -EINVAL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->bpid = cpu_to_le16(bpid);
+ p->num = num_buffers;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
+ if (unlikely(!r)) {
+ pr_err("qbman: acquire from BPID %d failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ WARN_ON(r->num > num_buffers);
+
+ /* Copy the acquired buffers to the caller's array */
+ for (i = 0; i < r->num; i++)
+ buffers[i] = le64_to_cpu(r->buf[i]);
+
+ return (int)r->num;
+}
+
+struct qbman_alt_fq_state_desc {
+ u8 verb;
+ u8 reserved[3];
+ u32 fqid;
+ u8 reserved2[56];
+};
+
+struct qbman_alt_fq_state_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[62];
+};
+
+#define ALT_FQ_FQID_MASK 0x00FFFFFF
+
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb)
+{
+ struct qbman_alt_fq_state_desc *p;
+ struct qbman_alt_fq_state_rslt *r;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->fqid = cpu_to_le32(fqid) & ALT_FQ_FQID_MASK;
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, alt_fq_verb);
+ if (unlikely(!r)) {
+ pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
+ alt_fq_verb);
+ return -EIO;
+ }
+
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
+ fqid, r->verb, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+struct qbman_cdan_ctrl_desc {
+ u8 verb;
+ u8 reserved;
+ u16 ch;
+ u8 we;
+ u8 ctrl;
+ u16 reserved2;
+ u64 cdan_ctx;
+ u8 reserved3[48];
+
+};
+
+struct qbman_cdan_ctrl_rslt {
+ u8 verb;
+ u8 rslt;
+ u16 ch;
+ u8 reserved[60];
+};
+
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx)
+{
+ struct qbman_cdan_ctrl_desc *p = NULL;
+ struct qbman_cdan_ctrl_rslt *r = NULL;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ p->ch = cpu_to_le16(channelid);
+ p->we = we_mask;
+ if (cdan_en)
+ p->ctrl = 1;
+ else
+ p->ctrl = 0;
+ p->cdan_ctx = cpu_to_le64(ctx);
+
+ /* Complete the management command */
+ r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
+ if (unlikely(!r)) {
+ pr_err("qbman: wqchan config failed, no response\n");
+ return -EIO;
+ }
+
+ WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
new file mode 100644
index 000000000000..842855971f34
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpio/qbman-portal.h
@@ -0,0 +1,469 @@
+/*
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_QBMAN_PORTAL_H
+#define __FSL_QBMAN_PORTAL_H
+
+#include "../../include/dpaa2-fd.h"
+
+struct dpaa2_dq;
+struct qbman_swp;
+
+/* qbman software portal descriptor structure */
+struct qbman_swp_desc {
+ void *cena_bar; /* Cache-enabled portal base address */
+ void *cinh_bar; /* Cache-inhibited portal base address */
+ u32 qman_version;
+};
+
+#define QBMAN_SWP_INTERRUPT_EQRI 0x01
+#define QBMAN_SWP_INTERRUPT_EQDI 0x02
+#define QBMAN_SWP_INTERRUPT_DQRI 0x04
+#define QBMAN_SWP_INTERRUPT_RCRI 0x08
+#define QBMAN_SWP_INTERRUPT_RCDI 0x10
+#define QBMAN_SWP_INTERRUPT_VDCI 0x20
+
+/* the structure for pull dequeue descriptor */
+struct qbman_pull_desc {
+ u8 verb;
+ u8 numf;
+ u8 tok;
+ u8 reserved;
+ u32 dq_src;
+ u64 rsp_addr;
+ u64 rsp_addr_virt;
+ u8 padding[40];
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/* Definitions for parsing dequeue entries */
+#define QBMAN_RESULT_MASK 0x7f
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/* structure of enqueue descriptor */
+struct qbman_eq_desc {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u16 orpid;
+ u16 reserved1;
+ u32 tgtid;
+ u32 tag;
+ u16 qdbin;
+ u8 qpri;
+ u8 reserved[3];
+ u8 wae;
+ u8 rspid;
+ u64 rsp_addr;
+ u8 fd[32];
+};
+
+/* buffer release descriptor */
+struct qbman_release_desc {
+ u8 verb;
+ u8 reserved;
+ u16 bpid;
+ u32 reserved2;
+ u64 buf[7];
+};
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+/* portal data structure */
+struct qbman_swp {
+ const struct qbman_swp_desc *desc;
+ void __iomem *addr_cena;
+ void __iomem *addr_cinh;
+
+ /* Management commands */
+ struct {
+ u32 valid_bit; /* 0x00 or 0x80 */
+ } mc;
+
+ /* Push dequeues */
+ u32 sdq;
+
+ /* Volatile dequeues */
+ struct {
+ atomic_t available; /* indicates if a command can be sent */
+ u32 valid_bit; /* 0x00 or 0x80 */
+ struct dpaa2_dq *storage; /* NULL if DQRR */
+ } vdq;
+
+ /* DQRR */
+ struct {
+ u32 next_idx;
+ u32 valid_bit;
+ u8 dqrr_size;
+ int reset_bug; /* indicates dqrr reset workaround is needed */
+ } dqrr;
+};
+
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+void qbman_swp_finish(struct qbman_swp *p);
+u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
+u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
+void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct dpaa2_dq *storage,
+ dma_addr_t storage_phys,
+ int stash);
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
+ enum qbman_pull_type_e dct);
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
+ enum qbman_pull_type_e dct);
+
+int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
+
+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
+
+int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
+ u32 qd_bin, u32 qd_prio);
+
+int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
+ const struct dpaa2_fd *fd);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const u64 *buffers, unsigned int num_buffers);
+int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
+ unsigned int num_buffers);
+int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
+ u8 alt_fq_verb);
+int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
+ u8 we_mask, u8 cdan_en,
+ u64 ctx);
+
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/**
+ * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
+ * @dq: the dequeue result to be checked
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
+}
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked
+ *
+ */
+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* FQ Data Availability */
+static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
+}
+
+/* Channel Data Availability */
+static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
+}
+
+/* Congestion State Change */
+static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
+}
+
+/* Buffer Pool State Change */
+static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
+}
+
+/* Congestion Group Count Update */
+static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
+}
+
+/* Retirement */
+static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
+}
+
+/* Retirement Immediate */
+static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
+}
+
+ /* Park */
+static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
+{
+ return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
+}
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ */
+static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
+{
+ return scn->scn.state;
+}
+
+#define SCN_RID_MASK 0x00FFFFFF
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id in State-change notification
+ */
+static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
+{
+ return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
+}
+
+/**
+ * qbman_result_SCN_ctx() - Get the context data in State-change notification
+ */
+static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
+{
+ return le64_to_cpu(scn->scn.ctx);
+}
+
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be scheduled
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state
+ * @s: the software portal object
+ * @fqid: the index of frame queue to be forced
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+/**
+ * qbman_swp_fq_xon() - sets FQ flow-control to XON
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+/**
+ * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
+ * @s: the software portal object
+ * @fqid: the index of frame queue
+ *
+ * This setting doesn't affect enqueues to the FQ, just dequeues.
+ * XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing.
+ * If a FQ is changed to XOFF after it had already become truly-scheduled
+ * to a channel, and a pull dequeue of that channel occurs that selects
+ * that FQ for dequeuing, then the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/* If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then the qbman_swp_CDAN* functions will be
+ * necessary.
+ *
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * they need to be reenabled before they'll generate another. The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step. Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object
+ * @channelid: the channel index
+ * @ctx: the context to be set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object
+ * @channelid: the index of the channel to generate CDAN
+ * @ctx:i the context set in CDAN
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
+ u16 channelid,
+ u64 ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ u8 cmd_verb)
+{
+ int loopvar = 1000;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+
+ do {
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd && loopvar--);
+
+ WARN_ON(!loopvar);
+
+ return cmd;
+}
+
+#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 87e44712b56c..49127acb85b2 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -95,8 +95,8 @@ int __init its_fsl_mc_msi_init(void)
continue;
}
- WARN_ON(mc_msi_domain->
- host_data != &its_fsl_mc_msi_domain_info);
+ WARN_ON(mc_msi_domain->host_data !=
+ &its_fsl_mc_msi_domain_info);
pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
}
diff --git a/drivers/staging/fsl-mc/include/dpaa2-fd.h b/drivers/staging/fsl-mc/include/dpaa2-fd.h
new file mode 100644
index 000000000000..cf7857f00a5c
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpaa2-fd.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPAA2_FD_H
+#define __FSL_DPAA2_FD_H
+
+#include <linux/kernel.h>
+
+/**
+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
+ *
+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
+ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
+ *
+ * There are three types of frames: single, scatter gather, and frame lists.
+ *
+ * The set of APIs in this file must be used to create, manipulate and
+ * query Frame Descriptors.
+ */
+
+/**
+ * struct dpaa2_fd - Struct describing FDs
+ * @words: for easier/faster copying the whole FD structure
+ * @addr: address in the FD
+ * @len: length in the FD
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including dd, sc, va, err, etc
+ * @flc: flow context address
+ *
+ * This structure represents the basic Frame Descriptor used in the system.
+ */
+struct dpaa2_fd {
+ union {
+ u32 words[8];
+ struct dpaa2_fd_simple {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+ } simple;
+ };
+};
+
+#define FD_SHORT_LEN_FLAG_MASK 0x1
+#define FD_SHORT_LEN_FLAG_SHIFT 14
+#define FD_SHORT_LEN_MASK 0x3FFFF
+#define FD_OFFSET_MASK 0x0FFF
+#define FD_FORMAT_MASK 0x3
+#define FD_FORMAT_SHIFT 12
+#define FD_BPID_MASK 0x3FFF
+#define SG_SHORT_LEN_FLAG_MASK 0x1
+#define SG_SHORT_LEN_FLAG_SHIFT 14
+#define SG_SHORT_LEN_MASK 0x1FFFF
+#define SG_OFFSET_MASK 0x0FFF
+#define SG_FORMAT_MASK 0x3
+#define SG_FORMAT_SHIFT 12
+#define SG_BPID_MASK 0x3FFF
+#define SG_FINAL_FLAG_MASK 0x1
+#define SG_FINAL_FLAG_SHIFT 15
+
+enum dpaa2_fd_format {
+ dpaa2_fd_single = 0,
+ dpaa2_fd_list,
+ dpaa2_fd_sg
+};
+
+/**
+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the address in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.addr);
+}
+
+/**
+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ * @addr: the address needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
+{
+ fd->simple.addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the frame context field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.frc);
+}
+
+/**
+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ * @frc: the frame context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
+{
+ fd->simple.frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the control bits field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.ctrl);
+}
+
+/**
+ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ * @ctrl: the control bits to be set in the frame descriptor
+ */
+static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
+{
+ fd->simple.ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the flow context in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.flc);
+}
+
+/**
+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
+ * @fd: the given frame descriptor
+ * @flc_addr: the flow context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
+{
+ fd->simple.flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
+{
+ return !!((le16_to_cpu(fd->simple.format_offset) >>
+ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fd_get_len() - Get the length in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the length field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
+{
+ if (dpaa2_fd_short_len(fd))
+ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fd->simple.len);
+}
+
+/**
+ * dpaa2_fd_set_len() - Set the length field of frame descriptor
+ * @fd: the given frame descriptor
+ * @len: the length needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
+{
+ fd->simple.len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the offset.
+ */
+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
+ * @fd: the given frame descriptor
+ * @offset: the offset needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
+{
+ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
+ fd->simple.format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fd_format dpaa2_fd_get_format(
+ const struct dpaa2_fd *fd)
+{
+ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
+ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fd_set_format() - Set the format field of frame descriptor
+ * @fd: the given frame descriptor
+ * @format: the format needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
+ enum dpaa2_fd_format format)
+{
+ fd->simple.format_offset &=
+ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
+ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the buffer pool id.
+ */
+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
+}
+
+/**
+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
+ * @fd: the given frame descriptor
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
+{
+ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
+ fd->simple.bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * struct dpaa2_sg_entry - the scatter-gathering structure
+ * @addr: address of the sg entry
+ * @len: length in this sg entry
+ * @bpid: buffer pool id
+ * @format_offset: format and offset fields
+ */
+struct dpaa2_sg_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+};
+
+enum dpaa2_sg_format {
+ dpaa2_sg_single = 0,
+ dpaa2_sg_frame_data,
+ dpaa2_sg_sgt_ext
+};
+
+/* Accessors for SG entry fields */
+
+/**
+ * dpaa2_sg_get_addr() - Get the address from SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the address.
+ */
+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
+{
+ return le64_to_cpu((dma_addr_t)sg->addr);
+}
+
+/**
+ * dpaa2_sg_set_addr() - Set the address in SG entry
+ * @sg: the given scatter-gathering object
+ * @addr: the address to be set
+ */
+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
+{
+ sg->addr = cpu_to_le64(addr);
+}
+
+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
+{
+ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
+ & SG_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_sg_get_len() - Get the length in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the length.
+ */
+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
+{
+ if (dpaa2_sg_short_len(sg))
+ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
+
+ return le32_to_cpu(sg->len);
+}
+
+/**
+ * dpaa2_sg_set_len() - Set the length in SG entry
+ * @sg: the given scatter-gathering object
+ * @len: the length to be set
+ */
+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
+{
+ sg->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_sg_get_offset() - Get the offset in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_sg_set_offset() - Set the offset in SG entry
+ * @sg: the given scatter-gathering object
+ * @offset: the offset to be set
+ */
+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
+ u16 offset)
+{
+ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
+ sg->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_sg_get_format() - Get the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_sg_format
+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
+{
+ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
+ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_sg_set_format() - Set the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ * @format: the format to be set
+ */
+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
+ enum dpaa2_sg_format format)
+{
+ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
+ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the bpid.
+ */
+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
+}
+
+/**
+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ * @bpid: the bpid to be set
+ */
+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
+{
+ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
+ sg->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_sg_is_final() - Check final bit in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
+{
+ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_sg_set_final() - Set the final bit in SG entry
+ * @sg: the given scatter-gathering object
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
+{
+ sg->format_offset &= cpu_to_le16(~(SG_FINAL_FLAG_MASK
+ << SG_FINAL_FLAG_SHIFT));
+ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
+}
+
+#endif /* __FSL_DPAA2_FD_H */
diff --git a/drivers/staging/fsl-mc/include/dpaa2-global.h b/drivers/staging/fsl-mc/include/dpaa2-global.h
new file mode 100644
index 000000000000..0326447fde4e
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpaa2-global.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPAA2_GLOBAL_H
+#define __FSL_DPAA2_GLOBAL_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include "dpaa2-fd.h"
+
+struct dpaa2_dq {
+ union {
+ struct common {
+ u8 verb;
+ u8 reserved[63];
+ } common;
+ struct dq {
+ u8 verb;
+ u8 stat;
+ __le16 seqnum;
+ __le16 oprid;
+ u8 reserved;
+ u8 tok;
+ __le32 fqid;
+ u32 reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ u8 fd[32];
+ } dq;
+ struct scn {
+ u8 verb;
+ u8 stat;
+ u8 state;
+ u8 reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
+};
+
+/* Parsing frame dequeue results */
+/* FQ empty */
+#define DPAA2_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define DPAA2_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
+/* valid frame */
+#define DPAA2_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define DPAA2_DQ_STAT_ODPVALID 0x04
+/* volatile dequeue */
+#define DPAA2_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define DPAA2_DQ_STAT_EXPIRED 0x01
+
+#define DQ_FQID_MASK 0x00FFFFFF
+#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
+
+/**
+ * dpaa2_dq_flags() - Get the stat field of dequeue response
+ * @dq: the dequeue result.
+ */
+static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
+{
+ return dq->dq.stat;
+}
+
+/**
+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
+{
+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
+}
+
+/**
+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
+ * @dq: the dequeue result
+ *
+ * Return boolean.
+ */
+static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
+{
+ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
+}
+
+/**
+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
+ * @dq: the dequeue result
+ *
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ *
+ * Return seqnum.
+ */
+static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.seqnum);
+}
+
+/**
+ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
+ * @dq: the dequeue result
+ *
+ * odpid is valid only if ODPVALID flag is TRUE.
+ *
+ * Return odpid.
+ */
+static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.oprid);
+}
+
+/**
+ * dpaa2_dq_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return fqid.
+ */
+static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
+}
+
+/**
+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the byte count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_byte_cnt);
+}
+
+/**
+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
+}
+
+/**
+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame queue context.
+ */
+static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
+{
+ return le64_to_cpu(dq->dq.fqd_ctx);
+}
+
+/**
+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame descriptor.
+ */
+static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
+{
+ return (const struct dpaa2_fd *)&dq->dq.fd[0];
+}
+
+#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
new file mode 100644
index 000000000000..002829cecd75
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPAA2_IO_H
+#define __FSL_DPAA2_IO_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#include "dpaa2-fd.h"
+#include "dpaa2-global.h"
+
+struct dpaa2_io;
+struct dpaa2_io_store;
+struct device;
+
+/**
+ * DOC: DPIO Service
+ *
+ * The DPIO service provides APIs for users to interact with the datapath
+ * by enqueueing and dequeing frame descriptors.
+ *
+ * The following set of APIs can be used to enqueue and dequeue frames
+ * as well as producing notification callbacks when data is available
+ * for dequeue.
+ */
+
+/**
+ * struct dpaa2_io_desc - The DPIO descriptor
+ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
+ * has a channel.
+ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
+ * unless receives_notification is TRUE.
+ * @cpu: The cpu index that at least interrupt handlers will
+ * execute on.
+ * @stash_affinity: The stash affinity for this portal favour 'cpu'
+ * @regs_cena: The cache enabled regs.
+ * @regs_cinh: The cache inhibited regs
+ * @dpio_id: The dpio index
+ * @qman_version: The qman version
+ *
+ * Describes the attributes and features of the DPIO object.
+ */
+struct dpaa2_io_desc {
+ int receives_notifications;
+ int has_8prio;
+ int cpu;
+ void *regs_cena;
+ void *regs_cinh;
+ int dpio_id;
+ u32 qman_version;
+};
+
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
+
+void dpaa2_io_down(struct dpaa2_io *d);
+
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
+
+/**
+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
+ * @cb: The callback to be invoked when the notification arrives
+ * @is_cdan: Zero for FQDAN, non-zero for CDAN
+ * @id: FQID or channel ID, needed for rearm
+ * @desired_cpu: The cpu on which the notifications will show up. -1 means
+ * any CPU.
+ * @dpio_id: The dpio index
+ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
+ * @node: The list node
+ * @dpio_private: The dpio object internal to dpio_service
+ *
+ * Used when a FQDAN/CDAN registration is made by drivers.
+ */
+struct dpaa2_io_notification_ctx {
+ void (*cb)(struct dpaa2_io_notification_ctx *);
+ int is_cdan;
+ u32 id;
+ int desired_cpu;
+ int dpio_id;
+ u64 qman64;
+ struct list_head node;
+ void *dpio_private;
+};
+
+int dpaa2_io_service_register(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+int dpaa2_io_service_rearm(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s);
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s);
+
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd);
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
+ u16 qdbin, const struct dpaa2_fd *fd);
+int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
+ const u64 *buffers, unsigned int num_buffers);
+int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
+ u64 *buffers, unsigned int num_buffers);
+
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev);
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+
+#endif /* __FSL_DPAA2_IO_H */
diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h
new file mode 100644
index 000000000000..efa23906b364
--- /dev/null
+++ b/drivers/staging/fsl-mc/include/dpcon.h
@@ -0,0 +1,115 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPCON_H
+#define __FSL_DPCON_H
+
+/* Data Path Concentrator API
+ * Contains initialization APIs and runtime control APIs for DPCON
+ */
+
+struct fsl_mc_io;
+
+/** General DPCON macros */
+
+/**
+ * Use it to disable notifications; see dpcon_set_notification()
+ */
+#define DPCON_INVALID_DPIO_ID (int)(-1)
+
+int dpcon_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpcon_id,
+ u16 *token);
+
+int dpcon_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpcon_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpcon_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * struct dpcon_attr - Structure representing DPCON attributes
+ * @id: DPCON object ID
+ * @qbman_ch_id: Channel ID to be used by dequeue operation
+ * @num_priorities: Number of priorities for the DPCON channel (1-8)
+ */
+struct dpcon_attr {
+ int id;
+ u16 qbman_ch_id;
+ u8 num_priorities;
+};
+
+int dpcon_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_attr *attr);
+
+/**
+ * struct dpcon_notification_cfg - Structure representing notification params
+ * @dpio_id: DPIO object ID; must be configured with a notification channel;
+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID';
+ * @priority: Priority selection within the DPIO channel; valid values
+ * are 0-7, depending on the number of priorities in that channel
+ * @user_ctx: User context value provided with each CDAN message
+ */
+struct dpcon_notification_cfg {
+ int dpio_id;
+ u8 priority;
+ u64 user_ctx;
+};
+
+int dpcon_set_notification(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpcon_notification_cfg *cfg);
+
+int dpcon_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPCON_H */
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index a3e046c3f65c..cf809987f79f 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -178,10 +178,10 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
return 0;
}
-static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
+static __sum16 icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
{
unsigned short *w = ptr;
- int sum = 0;
+ __wsum sum = 0;
int i;
union {
@@ -203,19 +203,16 @@ static int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
w = (u16 *)&pseudo_header;
for (i = 0; i < ARRAY_SIZE(pseudo_header.pa); i++)
- sum += pseudo_header.pa[i];
+ sum = csum_add(sum, csum_unfold(
+ (__force __sum16)pseudo_header.pa[i]));
w = ptr;
while (len > 1) {
- sum += *w++;
+ sum = csum_add(sum, csum_unfold((__force __sum16)*w++));
len -= 2;
}
- sum = (sum >> 16) + (sum & 0xFFFF);
- sum += (sum >> 16);
- sum = ~sum & 0xffff;
-
- return sum;
+ return csum_fold(sum);
}
static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
@@ -353,7 +350,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
struct ipv6hdr *ipv6;
int mac_proto;
void *network_data;
- u32 nic_type = 0;
+ u32 nic_type;
/* NIC TYPE is based on the nic_id of this net_device */
nic_type = 0x00000010 | nic->nic_id;
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 7ddeabc0e50a..3ecaff1a40cb 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -49,7 +49,7 @@ struct phy_dev {
int (*send_hci_func)(void *priv_dev, void *data, int len,
void (*cb)(void *cb_data), void *cb_data);
int (*send_sdu_func)(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
+ unsigned int dft_eps_id, unsigned int eps_id,
void (*cb)(void *cb_data), void *cb_data,
int dev_idx, int nic_type);
int (*rcv_func)(void *priv_dev,
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 400969170d1c..996b1f538aae 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -62,7 +62,7 @@ static int packet_type_to_index(u16 packetType)
static struct mux_tx *alloc_mux_tx(int len)
{
- struct mux_tx *t = NULL;
+ struct mux_tx *t;
t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
@@ -91,7 +91,7 @@ static void free_mux_tx(struct mux_tx *t)
static struct mux_rx *alloc_mux_rx(void)
{
- struct mux_rx *r = NULL;
+ struct mux_rx *r;
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r)
@@ -664,9 +664,8 @@ static int __init gdm_usb_mux_init(void)
static void __exit gdm_usb_mux_exit(void)
{
- unregister_lte_tty_driver();
-
usb_deregister(&gdm_mux_driver);
+ unregister_lte_tty_driver();
}
module_init(gdm_usb_mux_init);
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index ae396638f897..fc7682c18f20 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -190,8 +190,7 @@ static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
return 0;
while (1) {
- sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE :
- remain;
+ sending_len = min(MUX_TX_MAX_SIZE, remain);
gdm_tty_send(gdm,
(void *)(buf + sent_len),
sending_len,
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index 76d60eed1490..8f92ff4ba4b8 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -114,8 +114,8 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
- pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
+ pr_err("%s: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
+ __func__, ofs, len, mtd->size, mtd->erasesize);
return -EIO;
}
@@ -125,8 +125,8 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
return 0;
invalid_arg:
- pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
- ofs, len, mtd->size, mtd->erasesize);
+ pr_err("%s: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
+ __func__, ofs, len, mtd->size, mtd->erasesize);
return -EINVAL;
}
@@ -254,8 +254,8 @@ static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
invalid_arg:
- pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
+ pr_err("%s: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
+ __func__, ofs, mtd->size, mtd->writesize);
return -EINVAL;
}
@@ -277,8 +277,8 @@ static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
return 0;
invalid_arg:
- pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
- ofs, mtd->size, mtd->writesize);
+ pr_err("%s: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
+ __func__, ofs, mtd->size, mtd->writesize);
return -EINVAL;
}
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
index ab0688ad1e37..b836f0a20c36 100644
--- a/drivers/staging/greybus/Documentation/firmware/authenticate.c
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -103,7 +103,7 @@ int main(int argc, char *argv[])
goto close_fd;
}
- printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
+ printf("UID received: 0x%llx\n", *(unsigned long long int *)(uid.uid));
/* Get certificate */
printf("Get IMS certificate\n");
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
index ff9382401030..c73dee9d13c1 100644
--- a/drivers/staging/greybus/Documentation/firmware/firmware.c
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -52,6 +52,7 @@
*/
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
@@ -64,12 +65,12 @@
#define FW_TAG_INT_DEFAULT "s3f"
#define FW_TAG_BCND_DEFAULT "bf_01"
#define FW_UPDATE_TYPE_DEFAULT 0
-#define FW_TIMEOUT_DEFAULT 10000;
+#define FW_TIMEOUT_DEFAULT 10000
static const char *firmware_tag;
static const char *fwdev = FW_DEV_DEFAULT;
-static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
-static int fw_timeout = FW_TIMEOUT_DEFAULT;
+static unsigned int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
+static unsigned int fw_timeout = FW_TIMEOUT_DEFAULT;
static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
@@ -204,6 +205,7 @@ retry_fw_update:
int main(int argc, char *argv[])
{
int fd, ret;
+ char *endptr;
if (argc > 1 &&
(!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
@@ -215,20 +217,19 @@ int main(int argc, char *argv[])
fwdev = argv[1];
if (argc > 2)
- sscanf(argv[2], "%u", &fw_update_type);
+ fw_update_type = strtoul(argv[2], &endptr, 10);
- if (argc > 3) {
+ if (argc > 3)
firmware_tag = argv[3];
- } else if (!fw_update_type) {
+ else if (!fw_update_type)
firmware_tag = FW_TAG_INT_DEFAULT;
- } else {
+ else
firmware_tag = FW_TAG_BCND_DEFAULT;
- }
if (argc > 4)
- sscanf(argv[4], "%u", &fw_timeout);
+ fw_timeout = strtoul(argv[4], &endptr, 10);
- printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
+ printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %u\n",
fwdev, fw_update_type == 0 ? "interface" : "backend",
firmware_tag, fw_timeout);
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 1bf0ee403106..2cf64640e8ec 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -366,6 +366,9 @@ static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
+ if (!hd->driver->cport_quiesce)
+ return 0;
+
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 64a1eb93ec96..603de6f21fd1 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 8dffd8a7e762..16813628eda1 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -12,7 +12,6 @@
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/v4l2-flash-led-class.h>
#include "greybus.h"
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index aaf29a5fac83..08e255884206 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -365,11 +365,8 @@ static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
{
- u32 lat;
-
do_div(elapsed_nsecs, NSEC_PER_USEC);
- lat = elapsed_nsecs;
- return lat;
+ return elapsed_nsecs;
}
static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index 18d7a3d1f3c7..32a43693181c 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -476,7 +476,7 @@ int format_output(struct loopback_test *t,
r->gbphy_firmware_latency_jitter);
} else {
- len += snprintf(&buf[len], buf_len- len, ",%s,%s,%u,%u,%u",
+ len += snprintf(&buf[len], buf_len - len, ",%s,%s,%u,%u,%u",
t->test_name, dev_name, t->size, t->iteration_max,
r->error);
@@ -636,7 +636,7 @@ int find_loopback_devices(struct loopback_test *t)
ret = 0;
done:
for (i = 0; i < n; i++)
- free(namelist[n]);
+ free(namelist[i]);
free(namelist);
baddir:
return ret;
@@ -674,7 +674,7 @@ static int open_poll_files(struct loopback_test *t)
err:
for (i = 0; i < fds_idx; i++)
- close(t->fds[fds_idx].fd);
+ close(t->fds[i].fd);
return -1;
}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 43255e2e9276..c6d01b800d3c 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -23,7 +23,6 @@
#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <linux/serial.h>
#include <linux/idr.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
@@ -34,7 +33,7 @@
#include "greybus.h"
#include "gbphy.h"
-#define GB_NUM_MINORS 16 /* 16 is is more than enough */
+#define GB_NUM_MINORS 16 /* 16 is more than enough */
#define GB_NAME "ttyGB"
#define GB_UART_WRITE_FIFO_SIZE PAGE_SIZE
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index febb137b60c4..5d8ad21a0dae 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -2,14 +2,7 @@
# Makefile for industrial I/O accelerometer drivers
#
-adis16201-y := adis16201_core.o
obj-$(CONFIG_ADIS16201) += adis16201.o
-
-adis16203-y := adis16203_core.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-
-adis16209-y := adis16209_core.o
obj-$(CONFIG_ADIS16209) += adis16209.o
-
-adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16201.c b/drivers/staging/iio/accel/adis16201.c
new file mode 100644
index 000000000000..fbc240663621
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201.c
@@ -0,0 +1,382 @@
+/*
+ * ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16201_STARTUP_DELAY 220 /* ms */
+
+/* Flash memory write count */
+#define ADIS16201_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16201_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16201_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16201_YACCL_OUT 0x06
+
+/* Output, auxiliary ADC input */
+#define ADIS16201_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16201_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16201_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16201_YINCL_OUT 0x0E
+
+/* Calibration, x-axis acceleration offset */
+#define ADIS16201_XACCL_OFFS 0x10
+
+/* Calibration, y-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS 0x12
+
+/* x-axis acceleration scale factor */
+#define ADIS16201_XACCL_SCALE 0x14
+
+/* y-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE 0x16
+
+/* Calibration, x-axis inclination offset */
+#define ADIS16201_XINCL_OFFS 0x18
+
+/* Calibration, y-axis inclination offset */
+#define ADIS16201_YINCL_OFFS 0x1A
+
+/* x-axis inclination scale factor */
+#define ADIS16201_XINCL_SCALE 0x1C
+
+/* y-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE 0x1E
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16201_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16201_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16201_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16201_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16201_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16201_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16201_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16201_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16201_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16201_GLOB_CMD 0x3E
+
+/* MSC_CTRL */
+
+/* Self-test enable */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
+
+/* DIAG_STAT */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
+
+/* SPI communications failure */
+#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+
+#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
+
+#define ADIS16201_ERROR_ACTIVE BIT(14)
+
+enum adis16201_scan {
+ ADIS16201_SCAN_ACC_X,
+ ADIS16201_SCAN_ACC_Y,
+ ADIS16201_SCAN_INCLI_X,
+ ADIS16201_SCAN_INCLI_Y,
+ ADIS16201_SCAN_SUPPLY,
+ ADIS16201_SCAN_AUX_ADC,
+ ADIS16201_SCAN_TEMP,
+};
+
+static const u8 adis16201_addresses[] = {
+ [ADIS16201_SCAN_ACC_X] = ADIS16201_XACCL_OFFS,
+ [ADIS16201_SCAN_ACC_Y] = ADIS16201_YACCL_OFFS,
+ [ADIS16201_SCAN_INCLI_X] = ADIS16201_XINCL_OFFS,
+ [ADIS16201_SCAN_INCLI_Y] = ADIS16201_YINCL_OFFS,
+};
+
+static int adis16201_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ int bits;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16201_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.610 mV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = -470; /* 0.47 C */
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = 100000; /* 0.1 degree */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ bits = 12;
+ break;
+ case IIO_INCLI:
+ bits = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ addr = adis16201_addresses[chan->scan_index];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ val16 &= (1 << bits) - 1;
+ val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
+ *val = val16;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int adis16201_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int bits;
+ s16 val16;
+ u8 addr;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ bits = 12;
+ break;
+ case IIO_INCLI:
+ bits = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val16 = val & ((1 << bits) - 1);
+ addr = adis16201_addresses[chan->scan_index];
+ return adis_write_reg_16(st, addr, val16);
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adis16201_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16201_SUPPLY_OUT, ADIS16201_SCAN_SUPPLY, 0, 12),
+ ADIS_TEMP_CHAN(ADIS16201_TEMP_OUT, ADIS16201_SCAN_TEMP, 0, 12),
+ ADIS_ACCEL_CHAN(X, ADIS16201_XACCL_OUT, ADIS16201_SCAN_ACC_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_ACCEL_CHAN(Y, ADIS16201_YACCL_OUT, ADIS16201_SCAN_ACC_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC, ADIS16201_SCAN_AUX_ADC, 0, 12),
+ ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT, ADIS16201_SCAN_INCLI_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT, ADIS16201_SCAN_INCLI_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+static const struct iio_info adis16201_info = {
+ .read_raw = adis16201_read_raw,
+ .write_raw = adis16201_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const adis16201_status_error_msgs[] = {
+ [ADIS16201_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16201_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16201_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
+};
+
+static const struct adis_data adis16201_data = {
+ .read_delay = 20,
+ .msc_ctrl_reg = ADIS16201_MSC_CTRL,
+ .glob_cmd_reg = ADIS16201_GLOB_CMD,
+ .diag_stat_reg = ADIS16201_DIAG_STAT,
+
+ .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
+ .startup_delay = ADIS16201_STARTUP_DELAY,
+
+ .status_error_msgs = adis16201_status_error_msgs,
+ .status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16201_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16201_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16201_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16201_probe(struct spi_device *spi)
+{
+ int ret;
+ struct adis *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16201_info;
+
+ indio_dev->channels = adis16201_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16201_data);
+ if (ret)
+ return ret;
+ ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis_initial_startup(st);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto error_cleanup_buffer_trigger;
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
+}
+
+static int adis16201_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16201_driver = {
+ .driver = {
+ .name = "adis16201",
+ },
+ .probe = adis16201_probe,
+ .remove = adis16201_remove,
+};
+module_spi_driver(adis16201_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
deleted file mode 100644
index 64844adcaacd..000000000000
--- a/drivers/staging/iio/accel/adis16201.h
+++ /dev/null
@@ -1,144 +0,0 @@
-#ifndef SPI_ADIS16201_H_
-#define SPI_ADIS16201_H_
-
-#define ADIS16201_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16201_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16201_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16201_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06
-
-/* Output, auxiliary ADC input */
-#define ADIS16201_AUX_ADC 0x08
-
-/* Output, temperature */
-#define ADIS16201_TEMP_OUT 0x0A
-
-/* Output, x-axis inclination */
-#define ADIS16201_XINCL_OUT 0x0C
-
-/* Output, y-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E
-
-/* Calibration, x-axis acceleration offset */
-#define ADIS16201_XACCL_OFFS 0x10
-
-/* Calibration, y-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12
-
-/* x-axis acceleration scale factor */
-#define ADIS16201_XACCL_SCALE 0x14
-
-/* y-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16
-
-/* Calibration, x-axis inclination offset */
-#define ADIS16201_XINCL_OFFS 0x18
-
-/* Calibration, y-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A
-
-/* x-axis inclination scale factor */
-#define ADIS16201_XINCL_SCALE 0x1C
-
-/* y-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG1 0x20
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22
-
-/* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL1 0x24
-
-/* Alarm 2, sample period */
-#define ADIS16201_ALM_SMPL2 0x26
-
-/* Alarm control */
-#define ADIS16201_ALM_CTRL 0x28
-
-/* Auxiliary DAC data */
-#define ADIS16201_AUX_DAC 0x30
-
-/* General-purpose digital input/output control */
-#define ADIS16201_GPIO_CTRL 0x32
-
-/* Miscellaneous control */
-#define ADIS16201_MSC_CTRL 0x34
-
-/* Internal sample period (rate) control */
-#define ADIS16201_SMPL_PRD 0x36
-
-/* Operation, filter configuration */
-#define ADIS16201_AVG_CNT 0x38
-
-/* Operation, sleep mode control */
-#define ADIS16201_SLP_CNT 0x3A
-
-/* Diagnostics, system status register */
-#define ADIS16201_DIAG_STAT 0x3C
-
-/* Operation, system command register */
-#define ADIS16201_GLOB_CMD 0x3E
-
-/* MSC_CTRL */
-
-/* Self-test enable */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
-
-/* SPI communications failure */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
-
-/* Power supply below 3.15 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16201_ERROR_ACTIVE BIT(14)
-
-enum adis16201_scan {
- ADIS16201_SCAN_ACC_X,
- ADIS16201_SCAN_ACC_Y,
- ADIS16201_SCAN_INCLI_X,
- ADIS16201_SCAN_INCLI_Y,
- ADIS16201_SCAN_SUPPLY,
- ADIS16201_SCAN_AUX_ADC,
- ADIS16201_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16201_H_ */
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
deleted file mode 100644
index 7963d4a83f84..000000000000
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16201.h"
-
-static const u8 adis16201_addresses[] = {
- [ADIS16201_SCAN_ACC_X] = ADIS16201_XACCL_OFFS,
- [ADIS16201_SCAN_ACC_Y] = ADIS16201_YACCL_OFFS,
- [ADIS16201_SCAN_INCLI_X] = ADIS16201_XINCL_OFFS,
- [ADIS16201_SCAN_INCLI_Y] = ADIS16201_YINCL_OFFS,
-};
-
-static int adis16201_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16201_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.610 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(462400); /* 0.4624 mg */
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_INCLI:
- *val = 0;
- *val2 = 100000; /* 0.1 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16201_addresses[chan->scan_index];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16201_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- case IIO_INCLI:
- bits = 9;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16201_addresses[chan->scan_index];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16201_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16201_SUPPLY_OUT, ADIS16201_SCAN_SUPPLY, 0, 12),
- ADIS_TEMP_CHAN(ADIS16201_TEMP_OUT, ADIS16201_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16201_XACCL_OUT, ADIS16201_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16201_YACCL_OUT, ADIS16201_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC, ADIS16201_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT, ADIS16201_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT, ADIS16201_SCAN_INCLI_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(7)
-};
-
-static const struct iio_info adis16201_info = {
- .read_raw = &adis16201_read_raw,
- .write_raw = &adis16201_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16201_status_error_msgs[] = {
- [ADIS16201_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16201_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16201_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16201_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16201_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16201_MSC_CTRL,
- .glob_cmd_reg = ADIS16201_GLOB_CMD,
- .diag_stat_reg = ADIS16201_DIAG_STAT,
-
- .self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
- .self_test_no_autoclear = true,
- .startup_delay = ADIS16201_STARTUP_DELAY,
-
- .status_error_msgs = adis16201_status_error_msgs,
- .status_error_mask = BIT(ADIS16201_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16201_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16201_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16201_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16201_info;
-
- indio_dev->channels = adis16201_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16201_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret < 0)
- goto error_cleanup_buffer_trigger;
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16201_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16201_driver = {
- .driver = {
- .name = "adis16201",
- },
- .probe = adis16201_probe,
- .remove = adis16201_remove,
-};
-module_spi_driver(adis16201_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
new file mode 100644
index 000000000000..4e3fa7592d3f
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -0,0 +1,332 @@
+/*
+ * ADIS16203 Programmable 360 Degrees Inclinometer
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+
+#define ADIS16203_STARTUP_DELAY 220 /* ms */
+
+/* Flash memory write count */
+#define ADIS16203_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16203_SUPPLY_OUT 0x02
+
+/* Output, auxiliary ADC input */
+#define ADIS16203_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16203_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16203_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E
+
+/* Incline null calibration */
+#define ADIS16203_INCL_NULL 0x18
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16203_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16203_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16203_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16203_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16203_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16203_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16203_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16203_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16203_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16203_GLOB_CMD 0x3E
+
+/* MSC_CTRL */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
+
+/* Self-test enable */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
+
+/* DIAG_STAT */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+
+#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
+#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
+
+#define ADIS16203_ERROR_ACTIVE BIT(14)
+
+enum adis16203_scan {
+ ADIS16203_SCAN_INCLI_X,
+ ADIS16203_SCAN_INCLI_Y,
+ ADIS16203_SCAN_SUPPLY,
+ ADIS16203_SCAN_AUX_ADC,
+ ADIS16203_SCAN_TEMP,
+};
+
+#define DRIVER_NAME "adis16203"
+
+static const u8 adis16203_addresses[] = {
+ [ADIS16203_SCAN_INCLI_X] = ADIS16203_INCL_NULL,
+};
+
+static int adis16203_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ /* currently only one writable parameter which keeps this simple */
+ u8 addr = adis16203_addresses[chan->scan_index];
+
+ return adis_write_reg_16(st, addr, val & 0x3FFF);
+}
+
+static int adis16203_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ int bits;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16203_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ *val = 1;
+ *val2 = 220000; /* 1.22 mV */
+ } else {
+ *val = 0;
+ *val2 = 610000; /* 0.61 mV */
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = 25000; /* 0.025 degree */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000 / -470 - 1278; /* 25 C = 1278 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ bits = 14;
+ addr = adis16203_addresses[chan->scan_index];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ val16 &= (1 << bits) - 1;
+ val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
+ *val = val16;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec adis16203_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16203_SUPPLY_OUT, ADIS16203_SCAN_SUPPLY, 0, 12),
+ ADIS_AUX_ADC_CHAN(ADIS16203_AUX_ADC, ADIS16203_SCAN_AUX_ADC, 0, 12),
+ ADIS_INCLI_CHAN(X, ADIS16203_XINCL_OUT, ADIS16203_SCAN_INCLI_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ /* Fixme: Not what it appears to be - see data sheet */
+ ADIS_INCLI_CHAN(Y, ADIS16203_YINCL_OUT, ADIS16203_SCAN_INCLI_Y,
+ 0, 0, 14),
+ ADIS_TEMP_CHAN(ADIS16203_TEMP_OUT, ADIS16203_SCAN_TEMP, 0, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
+
+static const struct iio_info adis16203_info = {
+ .read_raw = adis16203_read_raw,
+ .write_raw = adis16203_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const adis16203_status_error_msgs[] = {
+ [ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
+ [ADIS16203_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16203_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16203_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
+};
+
+static const struct adis_data adis16203_data = {
+ .read_delay = 20,
+ .msc_ctrl_reg = ADIS16203_MSC_CTRL,
+ .glob_cmd_reg = ADIS16203_GLOB_CMD,
+ .diag_stat_reg = ADIS16203_DIAG_STAT,
+
+ .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
+ .startup_delay = ADIS16203_STARTUP_DELAY,
+
+ .status_error_msgs = adis16203_status_error_msgs,
+ .status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
+ BIT(ADIS16203_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16203_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16203_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16203_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16203_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct adis *st;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->channels = adis16203_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
+ indio_dev->info = &adis16203_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16203_data);
+ if (ret)
+ return ret;
+
+ ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis_initial_startup(st);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
+}
+
+static int adis16203_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16203_driver = {
+ .driver = {
+ .name = "adis16203",
+ },
+ .probe = adis16203_probe,
+ .remove = adis16203_remove,
+};
+module_spi_driver(adis16203_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
deleted file mode 100644
index b483e4e6475b..000000000000
--- a/drivers/staging/iio/accel/adis16203.h
+++ /dev/null
@@ -1,125 +0,0 @@
-#ifndef SPI_ADIS16203_H_
-#define SPI_ADIS16203_H_
-
-#define ADIS16203_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16203_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16203_SUPPLY_OUT 0x02
-
-/* Output, auxiliary ADC input */
-#define ADIS16203_AUX_ADC 0x08
-
-/* Output, temperature */
-#define ADIS16203_TEMP_OUT 0x0A
-
-/* Output, x-axis inclination */
-#define ADIS16203_XINCL_OUT 0x0C
-
-/* Output, y-axis inclination */
-#define ADIS16203_YINCL_OUT 0x0E
-
-/* Incline null calibration */
-#define ADIS16203_INCL_NULL 0x18
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16203_ALM_MAG1 0x20
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16203_ALM_MAG2 0x22
-
-/* Alarm 1, sample period */
-#define ADIS16203_ALM_SMPL1 0x24
-
-/* Alarm 2, sample period */
-#define ADIS16203_ALM_SMPL2 0x26
-
-/* Alarm control */
-#define ADIS16203_ALM_CTRL 0x28
-
-/* Auxiliary DAC data */
-#define ADIS16203_AUX_DAC 0x30
-
-/* General-purpose digital input/output control */
-#define ADIS16203_GPIO_CTRL 0x32
-
-/* Miscellaneous control */
-#define ADIS16203_MSC_CTRL 0x34
-
-/* Internal sample period (rate) control */
-#define ADIS16203_SMPL_PRD 0x36
-
-/* Operation, filter configuration */
-#define ADIS16203_AVG_CNT 0x38
-
-/* Operation, sleep mode control */
-#define ADIS16203_SLP_CNT 0x3A
-
-/* Diagnostics, system status register */
-#define ADIS16203_DIAG_STAT 0x3C
-
-/* Operation, system command register */
-#define ADIS16203_GLOB_CMD 0x3E
-
-/* MSC_CTRL */
-
-/* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
-
-/* Reverses rotation of both inclination outputs */
-#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
-
-/* Self-test enable */
-#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
-#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
-
-/* Self-test diagnostic error flag */
-#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
-
-/* SPI communications failure */
-#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
-
-/* Power supply below 3.15 V */
-#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16203_ERROR_ACTIVE BIT(14)
-
-enum adis16203_scan {
- ADIS16203_SCAN_INCLI_X,
- ADIS16203_SCAN_INCLI_Y,
- ADIS16203_SCAN_SUPPLY,
- ADIS16203_SCAN_AUX_ADC,
- ADIS16203_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16203_H_ */
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
deleted file mode 100644
index bd8119a23339..000000000000
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * ADIS16203 Programmable 360 Degrees Inclinometer
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16203.h"
-
-#define DRIVER_NAME "adis16203"
-
-static const u8 adis16203_addresses[] = {
- [ADIS16203_SCAN_INCLI_X] = ADIS16203_INCL_NULL,
-};
-
-static int adis16203_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- /* currently only one writable parameter which keeps this simple */
- u8 addr = adis16203_addresses[chan->scan_index];
-
- return adis_write_reg_16(st, addr, val & 0x3FFF);
-}
-
-static int adis16203_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16203_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_INCLI:
- *val = 0;
- *val2 = 25000; /* 0.025 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- bits = 14;
- mutex_lock(&indio_dev->mlock);
- addr = adis16203_addresses[chan->scan_index];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
-}
-
-static const struct iio_chan_spec adis16203_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16203_SUPPLY_OUT, ADIS16203_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16203_AUX_ADC, ADIS16203_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16203_XINCL_OUT, ADIS16203_SCAN_INCLI_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- /* Fixme: Not what it appears to be - see data sheet */
- ADIS_INCLI_CHAN(Y, ADIS16203_YINCL_OUT, ADIS16203_SCAN_INCLI_Y,
- 0, 0, 14),
- ADIS_TEMP_CHAN(ADIS16203_TEMP_OUT, ADIS16203_SCAN_TEMP, 0, 12),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16203_info = {
- .read_raw = &adis16203_read_raw,
- .write_raw = &adis16203_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16203_status_error_msgs[] = {
- [ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16203_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16203_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16203_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16203_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16203_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16203_MSC_CTRL,
- .glob_cmd_reg = ADIS16203_GLOB_CMD,
- .diag_stat_reg = ADIS16203_DIAG_STAT,
-
- .self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
- .self_test_no_autoclear = true,
- .startup_delay = ADIS16203_STARTUP_DELAY,
-
- .status_error_msgs = adis16203_status_error_msgs,
- .status_error_mask = BIT(ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16203_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16203_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16203_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16203_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16203_probe(struct spi_device *spi)
-{
- int ret;
- struct iio_dev *indio_dev;
- struct adis *st;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->channels = adis16203_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
- indio_dev->info = &adis16203_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16203_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16203_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16203_driver = {
- .driver = {
- .name = "adis16203",
- },
- .probe = adis16203_probe,
- .remove = adis16203_remove,
-};
-module_spi_driver(adis16203_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16209.c b/drivers/staging/iio/accel/adis16209.c
new file mode 100644
index 000000000000..8485c024e3f5
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16209.c
@@ -0,0 +1,383 @@
+/*
+ * ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16209_STARTUP_DELAY 220 /* ms */
+
+/* Flash memory write count */
+#define ADIS16209_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16209_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16209_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16209_YACCL_OUT 0x06
+
+/* Output, auxiliary ADC input */
+#define ADIS16209_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16209_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16209_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16209_YINCL_OUT 0x0E
+
+/* Output, +/-180 vertical rotational position */
+#define ADIS16209_ROT_OUT 0x10
+
+/* Calibration, x-axis acceleration offset null */
+#define ADIS16209_XACCL_NULL 0x12
+
+/* Calibration, y-axis acceleration offset null */
+#define ADIS16209_YACCL_NULL 0x14
+
+/* Calibration, x-axis inclination offset null */
+#define ADIS16209_XINCL_NULL 0x16
+
+/* Calibration, y-axis inclination offset null */
+#define ADIS16209_YINCL_NULL 0x18
+
+/* Calibration, vertical rotation offset null */
+#define ADIS16209_ROT_NULL 0x1A
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16209_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16209_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16209_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16209_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16209_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16209_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16209_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16209_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16209_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16209_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16209_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16209_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16209_GLOB_CMD 0x3E
+
+/* MSC_CTRL */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Self-test enable */
+#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
+
+/* DIAG_STAT */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
+#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+
+#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
+#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
+
+#define ADIS16209_ERROR_ACTIVE BIT(14)
+
+enum adis16209_scan {
+ ADIS16209_SCAN_SUPPLY,
+ ADIS16209_SCAN_ACC_X,
+ ADIS16209_SCAN_ACC_Y,
+ ADIS16209_SCAN_AUX_ADC,
+ ADIS16209_SCAN_TEMP,
+ ADIS16209_SCAN_INCLI_X,
+ ADIS16209_SCAN_INCLI_Y,
+ ADIS16209_SCAN_ROT,
+};
+
+static const u8 adis16209_addresses[8][1] = {
+ [ADIS16209_SCAN_SUPPLY] = { },
+ [ADIS16209_SCAN_AUX_ADC] = { },
+ [ADIS16209_SCAN_ACC_X] = { ADIS16209_XACCL_NULL },
+ [ADIS16209_SCAN_ACC_Y] = { ADIS16209_YACCL_NULL },
+ [ADIS16209_SCAN_INCLI_X] = { ADIS16209_XINCL_NULL },
+ [ADIS16209_SCAN_INCLI_Y] = { ADIS16209_YINCL_NULL },
+ [ADIS16209_SCAN_ROT] = { },
+ [ADIS16209_SCAN_TEMP] = { },
+};
+
+static int adis16209_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int bits;
+ s16 val16;
+ u8 addr;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ case IIO_INCLI:
+ bits = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val16 = val & ((1 << bits) - 1);
+ addr = adis16209_addresses[chan->scan_index][0];
+ return adis_write_reg_16(st, addr, val16);
+ }
+ return -EINVAL;
+}
+
+static int adis16209_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ int bits;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16209_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 0;
+ if (chan->channel == 0)
+ *val2 = 305180; /* 0.30518 mV */
+ else
+ *val2 = 610500; /* 0.6105 mV */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = -470; /* -0.47 C */
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(244140); /* 0.244140 mg */
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_INCLI:
+ case IIO_ROT:
+ *val = 0;
+ *val2 = 25000; /* 0.025 degree */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000 / -470 - 0x4FE; /* 25 C = 0x4FE */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ACCEL:
+ bits = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+ addr = adis16209_addresses[chan->scan_index][0];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ val16 &= (1 << bits) - 1;
+ val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
+ *val = val16;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adis16209_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16209_SUPPLY_OUT, ADIS16209_SCAN_SUPPLY, 0, 14),
+ ADIS_TEMP_CHAN(ADIS16209_TEMP_OUT, ADIS16209_SCAN_TEMP, 0, 12),
+ ADIS_ACCEL_CHAN(X, ADIS16209_XACCL_OUT, ADIS16209_SCAN_ACC_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_ACCEL_CHAN(Y, ADIS16209_YACCL_OUT, ADIS16209_SCAN_ACC_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
+ ADIS_AUX_ADC_CHAN(ADIS16209_AUX_ADC, ADIS16209_SCAN_AUX_ADC, 0, 12),
+ ADIS_INCLI_CHAN(X, ADIS16209_XINCL_OUT, ADIS16209_SCAN_INCLI_X,
+ 0, 0, 14),
+ ADIS_INCLI_CHAN(Y, ADIS16209_YINCL_OUT, ADIS16209_SCAN_INCLI_Y,
+ 0, 0, 14),
+ ADIS_ROT_CHAN(X, ADIS16209_ROT_OUT, ADIS16209_SCAN_ROT, 0, 0, 14),
+ IIO_CHAN_SOFT_TIMESTAMP(8)
+};
+
+static const struct iio_info adis16209_info = {
+ .read_raw = adis16209_read_raw,
+ .write_raw = adis16209_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const adis16209_status_error_msgs[] = {
+ [ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
+ [ADIS16209_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16209_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16209_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16209_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
+};
+
+static const struct adis_data adis16209_data = {
+ .read_delay = 30,
+ .msc_ctrl_reg = ADIS16209_MSC_CTRL,
+ .glob_cmd_reg = ADIS16209_GLOB_CMD,
+ .diag_stat_reg = ADIS16209_DIAG_STAT,
+
+ .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
+ .startup_delay = ADIS16209_STARTUP_DELAY,
+
+ .status_error_msgs = adis16209_status_error_msgs,
+ .status_error_mask = BIT(ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT) |
+ BIT(ADIS16209_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16209_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16209_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16209_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16209_probe(struct spi_device *spi)
+{
+ int ret;
+ struct adis *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16209_info;
+ indio_dev->channels = adis16209_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16209_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16209_data);
+ if (ret)
+ return ret;
+ ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis_initial_startup(st);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
+}
+
+static int adis16209_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16209_driver = {
+ .driver = {
+ .name = "adis16209",
+ },
+ .probe = adis16209_probe,
+ .remove = adis16209_remove,
+};
+module_spi_driver(adis16209_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
deleted file mode 100644
index 315f1c0c46e8..000000000000
--- a/drivers/staging/iio/accel/adis16209.h
+++ /dev/null
@@ -1,144 +0,0 @@
-#ifndef SPI_ADIS16209_H_
-#define SPI_ADIS16209_H_
-
-#define ADIS16209_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16209_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16209_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16209_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16209_YACCL_OUT 0x06
-
-/* Output, auxiliary ADC input */
-#define ADIS16209_AUX_ADC 0x08
-
-/* Output, temperature */
-#define ADIS16209_TEMP_OUT 0x0A
-
-/* Output, x-axis inclination */
-#define ADIS16209_XINCL_OUT 0x0C
-
-/* Output, y-axis inclination */
-#define ADIS16209_YINCL_OUT 0x0E
-
-/* Output, +/-180 vertical rotational position */
-#define ADIS16209_ROT_OUT 0x10
-
-/* Calibration, x-axis acceleration offset null */
-#define ADIS16209_XACCL_NULL 0x12
-
-/* Calibration, y-axis acceleration offset null */
-#define ADIS16209_YACCL_NULL 0x14
-
-/* Calibration, x-axis inclination offset null */
-#define ADIS16209_XINCL_NULL 0x16
-
-/* Calibration, y-axis inclination offset null */
-#define ADIS16209_YINCL_NULL 0x18
-
-/* Calibration, vertical rotation offset null */
-#define ADIS16209_ROT_NULL 0x1A
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16209_ALM_MAG1 0x20
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16209_ALM_MAG2 0x22
-
-/* Alarm 1, sample period */
-#define ADIS16209_ALM_SMPL1 0x24
-
-/* Alarm 2, sample period */
-#define ADIS16209_ALM_SMPL2 0x26
-
-/* Alarm control */
-#define ADIS16209_ALM_CTRL 0x28
-
-/* Auxiliary DAC data */
-#define ADIS16209_AUX_DAC 0x30
-
-/* General-purpose digital input/output control */
-#define ADIS16209_GPIO_CTRL 0x32
-
-/* Miscellaneous control */
-#define ADIS16209_MSC_CTRL 0x34
-
-/* Internal sample period (rate) control */
-#define ADIS16209_SMPL_PRD 0x36
-
-/* Operation, filter configuration */
-#define ADIS16209_AVG_CNT 0x38
-
-/* Operation, sleep mode control */
-#define ADIS16209_SLP_CNT 0x3A
-
-/* Diagnostics, system status register */
-#define ADIS16209_DIAG_STAT 0x3C
-
-/* Operation, system command register */
-#define ADIS16209_GLOB_CMD 0x3E
-
-/* MSC_CTRL */
-
-/* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
-
-/* Self-test enable */
-#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
-
-/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
-#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
-
-/* SPI communications failure */
-#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
-
-/* Power supply below 3.15 V */
-#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16209_ERROR_ACTIVE BIT(14)
-
-#define ADIS16209_SCAN_SUPPLY 0
-#define ADIS16209_SCAN_ACC_X 1
-#define ADIS16209_SCAN_ACC_Y 2
-#define ADIS16209_SCAN_AUX_ADC 3
-#define ADIS16209_SCAN_TEMP 4
-#define ADIS16209_SCAN_INCLI_X 5
-#define ADIS16209_SCAN_INCLI_Y 6
-#define ADIS16209_SCAN_ROT 7
-
-#endif /* SPI_ADIS16209_H_ */
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
deleted file mode 100644
index a599e19303d3..000000000000
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16209.h"
-
-static const u8 adis16209_addresses[8][1] = {
- [ADIS16209_SCAN_SUPPLY] = { },
- [ADIS16209_SCAN_AUX_ADC] = { },
- [ADIS16209_SCAN_ACC_X] = { ADIS16209_XACCL_NULL },
- [ADIS16209_SCAN_ACC_Y] = { ADIS16209_YACCL_NULL },
- [ADIS16209_SCAN_INCLI_X] = { ADIS16209_XINCL_NULL },
- [ADIS16209_SCAN_INCLI_Y] = { ADIS16209_YINCL_NULL },
- [ADIS16209_SCAN_ROT] = { },
- [ADIS16209_SCAN_TEMP] = { },
-};
-
-static int adis16209_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- case IIO_INCLI:
- bits = 14;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16209_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static int adis16209_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16209_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- *val = 0;
- if (chan->channel == 0)
- *val2 = 305180; /* 0.30518 mV */
- else
- *val2 = 610500; /* 0.6105 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(244140); /* 0.244140 mg */
- return IIO_VAL_INT_PLUS_NANO;
- case IIO_INCLI:
- case IIO_ROT:
- *val = 0;
- *val2 = 25000; /* 0.025 degree */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 0x4FE; /* 25 C = 0x4FE */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 14;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16209_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16209_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16209_SUPPLY_OUT, ADIS16209_SCAN_SUPPLY, 0, 14),
- ADIS_TEMP_CHAN(ADIS16209_TEMP_OUT, ADIS16209_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16209_XACCL_OUT, ADIS16209_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16209_YACCL_OUT, ADIS16209_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
- ADIS_AUX_ADC_CHAN(ADIS16209_AUX_ADC, ADIS16209_SCAN_AUX_ADC, 0, 12),
- ADIS_INCLI_CHAN(X, ADIS16209_XINCL_OUT, ADIS16209_SCAN_INCLI_X,
- 0, 0, 14),
- ADIS_INCLI_CHAN(Y, ADIS16209_YINCL_OUT, ADIS16209_SCAN_INCLI_Y,
- 0, 0, 14),
- ADIS_ROT_CHAN(X, ADIS16209_ROT_OUT, ADIS16209_SCAN_ROT, 0, 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(8)
-};
-
-static const struct iio_info adis16209_info = {
- .read_raw = &adis16209_read_raw,
- .write_raw = &adis16209_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16209_status_error_msgs[] = {
- [ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16209_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16209_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16209_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16209_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16209_data = {
- .read_delay = 30,
- .msc_ctrl_reg = ADIS16209_MSC_CTRL,
- .glob_cmd_reg = ADIS16209_GLOB_CMD,
- .diag_stat_reg = ADIS16209_DIAG_STAT,
-
- .self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
- .self_test_no_autoclear = true,
- .startup_delay = ADIS16209_STARTUP_DELAY,
-
- .status_error_msgs = adis16209_status_error_msgs,
- .status_error_mask = BIT(ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16209_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16209_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16209_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16209_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16209_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16209_info;
- indio_dev->channels = adis16209_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16209_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16209_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16209_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16209_driver = {
- .driver = {
- .name = "adis16209",
- },
- .probe = adis16209_probe,
- .remove = adis16209_remove,
-};
-module_spi_driver(adis16209_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
new file mode 100644
index 000000000000..109cd94b5ac3
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -0,0 +1,459 @@
+/*
+ * ADIS16240 Programmable Impact Sensor and Recorder driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16240_STARTUP_DELAY 220 /* ms */
+
+/* Flash memory write count */
+#define ADIS16240_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16240_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16240_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16240_YACCL_OUT 0x06
+
+/* Output, z-axis accelerometer */
+#define ADIS16240_ZACCL_OUT 0x08
+
+/* Output, auxiliary ADC input */
+#define ADIS16240_AUX_ADC 0x0A
+
+/* Output, temperature */
+#define ADIS16240_TEMP_OUT 0x0C
+
+/* Output, x-axis acceleration peak */
+#define ADIS16240_XPEAK_OUT 0x0E
+
+/* Output, y-axis acceleration peak */
+#define ADIS16240_YPEAK_OUT 0x10
+
+/* Output, z-axis acceleration peak */
+#define ADIS16240_ZPEAK_OUT 0x12
+
+/* Output, sum-of-squares acceleration peak */
+#define ADIS16240_XYZPEAK_OUT 0x14
+
+/* Output, Capture Buffer 1, X and Y acceleration */
+#define ADIS16240_CAPT_BUF1 0x16
+
+/* Output, Capture Buffer 2, Z acceleration */
+#define ADIS16240_CAPT_BUF2 0x18
+
+/* Diagnostic, error flags */
+#define ADIS16240_DIAG_STAT 0x1A
+
+/* Diagnostic, event counter */
+#define ADIS16240_EVNT_CNTR 0x1C
+
+/* Diagnostic, check sum value from firmware test */
+#define ADIS16240_CHK_SUM 0x1E
+
+/* Calibration, x-axis acceleration offset adjustment */
+#define ADIS16240_XACCL_OFF 0x20
+
+/* Calibration, y-axis acceleration offset adjustment */
+#define ADIS16240_YACCL_OFF 0x22
+
+/* Calibration, z-axis acceleration offset adjustment */
+#define ADIS16240_ZACCL_OFF 0x24
+
+/* Clock, hour and minute */
+#define ADIS16240_CLK_TIME 0x2E
+
+/* Clock, month and day */
+#define ADIS16240_CLK_DATE 0x30
+
+/* Clock, year */
+#define ADIS16240_CLK_YEAR 0x32
+
+/* Wake-up setting, hour and minute */
+#define ADIS16240_WAKE_TIME 0x34
+
+/* Wake-up setting, month and day */
+#define ADIS16240_WAKE_DATE 0x36
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16240_ALM_MAG1 0x38
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16240_ALM_MAG2 0x3A
+
+/* Alarm control */
+#define ADIS16240_ALM_CTRL 0x3C
+
+/* Capture, external trigger control */
+#define ADIS16240_XTRIG_CTRL 0x3E
+
+/* Capture, address pointer */
+#define ADIS16240_CAPT_PNTR 0x40
+
+/* Capture, configuration and control */
+#define ADIS16240_CAPT_CTRL 0x42
+
+/* General-purpose digital input/output control */
+#define ADIS16240_GPIO_CTRL 0x44
+
+/* Miscellaneous control */
+#define ADIS16240_MSC_CTRL 0x46
+
+/* Internal sample period (rate) control */
+#define ADIS16240_SMPL_PRD 0x48
+
+/* System command */
+#define ADIS16240_GLOB_CMD 0x4A
+
+/* MSC_CTRL */
+
+/* Enables sum-of-squares output (XYZPEAK_OUT) */
+#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
+
+/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
+#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
+
+/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
+#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
+
+/* DIAG_STAT */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
+
+/* Capture buffer full: 1 = capture buffer is full */
+#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
+
+/* Flash test, checksum flag: 1 = mismatch, 0 = match */
+#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
+
+/* Power-on, self-test flag: 1 = failure, 0 = pass */
+#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
+
+/* Power-on self-test: 1 = in-progress, 0 = complete */
+#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
+
+/* SPI communications failure */
+#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
+
+ /* Power supply below 3.15 V */
+#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+
+#define ADIS16240_GLOB_CMD_RESUME BIT(8)
+#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
+#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
+
+#define ADIS16240_ERROR_ACTIVE BIT(14)
+
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16240_scan {
+ ADIS16240_SCAN_ACC_X,
+ ADIS16240_SCAN_ACC_Y,
+ ADIS16240_SCAN_ACC_Z,
+ ADIS16240_SCAN_SUPPLY,
+ ADIS16240_SCAN_AUX_ADC,
+ ADIS16240_SCAN_TEMP,
+};
+
+static ssize_t adis16240_spi_read_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf,
+ unsigned int bits)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ s16 val = 0;
+ unsigned int shift = 16 - bits;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = adis_read_reg_16(st,
+ this_attr->address, (u16 *)&val);
+ if (ret)
+ return ret;
+
+ if (val & ADIS16240_ERROR_ACTIVE)
+ adis_check_status(st);
+
+ val = (s16)(val << shift) >> shift;
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16240_read_12bit_signed(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return adis16240_spi_read_signed(dev, attr, buf, 12);
+}
+
+static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, 0444,
+ adis16240_read_12bit_signed, NULL,
+ ADIS16240_XYZPEAK_OUT);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096");
+
+static const u8 adis16240_addresses[][2] = {
+ [ADIS16240_SCAN_ACC_X] = { ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT },
+ [ADIS16240_SCAN_ACC_Y] = { ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT },
+ [ADIS16240_SCAN_ACC_Z] = { ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT },
+};
+
+static int adis16240_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int ret;
+ int bits;
+ u8 addr;
+ s16 val16;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan,
+ ADIS16240_ERROR_ACTIVE, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->channel == 0) {
+ *val = 4;
+ *val2 = 880000; /* 4.88 mV */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+ case IIO_TEMP:
+ *val = 244; /* 0.244 C */
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ACCEL:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_PEAK_SCALE:
+ *val = 0;
+ *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ bits = 10;
+ addr = adis16240_addresses[chan->scan_index][0];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ val16 &= (1 << bits) - 1;
+ val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
+ *val = val16;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PEAK:
+ bits = 10;
+ addr = adis16240_addresses[chan->scan_index][1];
+ ret = adis_read_reg_16(st, addr, &val16);
+ if (ret)
+ return ret;
+ val16 &= (1 << bits) - 1;
+ val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
+ *val = val16;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static int adis16240_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct adis *st = iio_priv(indio_dev);
+ int bits = 10;
+ s16 val16;
+ u8 addr;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val16 = val & ((1 << bits) - 1);
+ addr = adis16240_addresses[chan->scan_index][0];
+ return adis_write_reg_16(st, addr, val16);
+ }
+ return -EINVAL;
+}
+
+static const struct iio_chan_spec adis16240_channels[] = {
+ ADIS_SUPPLY_CHAN(ADIS16240_SUPPLY_OUT, ADIS16240_SCAN_SUPPLY, 0, 10),
+ ADIS_AUX_ADC_CHAN(ADIS16240_AUX_ADC, ADIS16240_SCAN_AUX_ADC, 0, 10),
+ ADIS_ACCEL_CHAN(X, ADIS16240_XACCL_OUT, ADIS16240_SCAN_ACC_X,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
+ 0, 10),
+ ADIS_ACCEL_CHAN(Y, ADIS16240_YACCL_OUT, ADIS16240_SCAN_ACC_Y,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
+ 0, 10),
+ ADIS_ACCEL_CHAN(Z, ADIS16240_ZACCL_OUT, ADIS16240_SCAN_ACC_Z,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
+ 0, 10),
+ ADIS_TEMP_CHAN(ADIS16240_TEMP_OUT, ADIS16240_SCAN_TEMP, 0, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(6)
+};
+
+static struct attribute *adis16240_attributes[] = {
+ &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adis16240_attribute_group = {
+ .attrs = adis16240_attributes,
+};
+
+static const struct iio_info adis16240_info = {
+ .attrs = &adis16240_attribute_group,
+ .read_raw = adis16240_read_raw,
+ .write_raw = adis16240_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const adis16240_status_error_msgs[] = {
+ [ADIS16240_DIAG_STAT_PWRON_FAIL_BIT] = "Power on, self-test failed",
+ [ADIS16240_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
+ [ADIS16240_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
+ [ADIS16240_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
+ [ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
+};
+
+static const struct adis_data adis16240_data = {
+ .write_delay = 35,
+ .read_delay = 35,
+ .msc_ctrl_reg = ADIS16240_MSC_CTRL,
+ .glob_cmd_reg = ADIS16240_GLOB_CMD,
+ .diag_stat_reg = ADIS16240_DIAG_STAT,
+
+ .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
+ .startup_delay = ADIS16240_STARTUP_DELAY,
+
+ .status_error_msgs = adis16240_status_error_msgs,
+ .status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
+ BIT(ADIS16240_DIAG_STAT_SPI_FAIL_BIT) |
+ BIT(ADIS16240_DIAG_STAT_FLASH_UPT_BIT) |
+ BIT(ADIS16240_DIAG_STAT_POWER_HIGH_BIT) |
+ BIT(ADIS16240_DIAG_STAT_POWER_LOW_BIT),
+};
+
+static int adis16240_probe(struct spi_device *spi)
+{
+ int ret;
+ struct adis *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16240_info;
+ indio_dev->channels = adis16240_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis_init(st, indio_dev, spi, &adis16240_data);
+ if (ret)
+ return ret;
+ ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
+ if (ret)
+ return ret;
+
+ /* Get the device into a sane initial state */
+ ret = adis_initial_startup(st);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_cleanup_buffer_trigger;
+ return 0;
+
+error_cleanup_buffer_trigger:
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+ return ret;
+}
+
+static int adis16240_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adis *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ adis_cleanup_buffer_and_trigger(st, indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16240_driver = {
+ .driver = {
+ .name = "adis16240",
+ },
+ .probe = adis16240_probe,
+ .remove = adis16240_remove,
+};
+module_spi_driver(adis16240_driver);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
deleted file mode 100644
index b2cb37b95913..000000000000
--- a/drivers/staging/iio/accel/adis16240.h
+++ /dev/null
@@ -1,179 +0,0 @@
-#ifndef SPI_ADIS16240_H_
-#define SPI_ADIS16240_H_
-
-#define ADIS16240_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16240_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16240_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16240_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16240_YACCL_OUT 0x06
-
-/* Output, z-axis accelerometer */
-#define ADIS16240_ZACCL_OUT 0x08
-
-/* Output, auxiliary ADC input */
-#define ADIS16240_AUX_ADC 0x0A
-
-/* Output, temperature */
-#define ADIS16240_TEMP_OUT 0x0C
-
-/* Output, x-axis acceleration peak */
-#define ADIS16240_XPEAK_OUT 0x0E
-
-/* Output, y-axis acceleration peak */
-#define ADIS16240_YPEAK_OUT 0x10
-
-/* Output, z-axis acceleration peak */
-#define ADIS16240_ZPEAK_OUT 0x12
-
-/* Output, sum-of-squares acceleration peak */
-#define ADIS16240_XYZPEAK_OUT 0x14
-
-/* Output, Capture Buffer 1, X and Y acceleration */
-#define ADIS16240_CAPT_BUF1 0x16
-
-/* Output, Capture Buffer 2, Z acceleration */
-#define ADIS16240_CAPT_BUF2 0x18
-
-/* Diagnostic, error flags */
-#define ADIS16240_DIAG_STAT 0x1A
-
-/* Diagnostic, event counter */
-#define ADIS16240_EVNT_CNTR 0x1C
-
-/* Diagnostic, check sum value from firmware test */
-#define ADIS16240_CHK_SUM 0x1E
-
-/* Calibration, x-axis acceleration offset adjustment */
-#define ADIS16240_XACCL_OFF 0x20
-
-/* Calibration, y-axis acceleration offset adjustment */
-#define ADIS16240_YACCL_OFF 0x22
-
-/* Calibration, z-axis acceleration offset adjustment */
-#define ADIS16240_ZACCL_OFF 0x24
-
-/* Clock, hour and minute */
-#define ADIS16240_CLK_TIME 0x2E
-
-/* Clock, month and day */
-#define ADIS16240_CLK_DATE 0x30
-
-/* Clock, year */
-#define ADIS16240_CLK_YEAR 0x32
-
-/* Wake-up setting, hour and minute */
-#define ADIS16240_WAKE_TIME 0x34
-
-/* Wake-up setting, month and day */
-#define ADIS16240_WAKE_DATE 0x36
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16240_ALM_MAG1 0x38
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16240_ALM_MAG2 0x3A
-
-/* Alarm control */
-#define ADIS16240_ALM_CTRL 0x3C
-
-/* Capture, external trigger control */
-#define ADIS16240_XTRIG_CTRL 0x3E
-
-/* Capture, address pointer */
-#define ADIS16240_CAPT_PNTR 0x40
-
-/* Capture, configuration and control */
-#define ADIS16240_CAPT_CTRL 0x42
-
-/* General-purpose digital input/output control */
-#define ADIS16240_GPIO_CTRL 0x44
-
-/* Miscellaneous control */
-#define ADIS16240_MSC_CTRL 0x46
-
-/* Internal sample period (rate) control */
-#define ADIS16240_SMPL_PRD 0x48
-
-/* System command */
-#define ADIS16240_GLOB_CMD 0x4A
-
-/* MSC_CTRL */
-
-/* Enables sum-of-squares output (XYZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
-
-/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
-
-/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
-#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
-
-/* Capture buffer full: 1 = capture buffer is full */
-#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
-
-/* Flash test, checksum flag: 1 = mismatch, 0 = match */
-#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
-
-/* Power-on, self-test flag: 1 = failure, 0 = pass */
-#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
-
-/* Power-on self-test: 1 = in-progress, 0 = complete */
-#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
-
-/* SPI communications failure */
-#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
-
- /* Power supply below 3.15 V */
-#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16240_GLOB_CMD_RESUME BIT(8)
-#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
-
-#define ADIS16240_ERROR_ACTIVE BIT(14)
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-#define ADIS16240_SCAN_ACC_X 0
-#define ADIS16240_SCAN_ACC_Y 1
-#define ADIS16240_SCAN_ACC_Z 2
-#define ADIS16240_SCAN_SUPPLY 3
-#define ADIS16240_SCAN_AUX_ADC 4
-#define ADIS16240_SCAN_TEMP 5
-
-#endif /* SPI_ADIS16240_H_ */
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
deleted file mode 100644
index d5b99e610d08..000000000000
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * ADIS16240 Programmable Impact Sensor and Recorder driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16240.h"
-
-static ssize_t adis16240_spi_read_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- unsigned int bits)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis *st = iio_priv(indio_dev);
- int ret;
- s16 val = 0;
- unsigned int shift = 16 - bits;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis_read_reg_16(st,
- this_attr->address, (u16 *)&val);
- if (ret)
- return ret;
-
- if (val & ADIS16240_ERROR_ACTIVE)
- adis_check_status(st);
-
- val = (s16)(val << shift) >> shift;
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t adis16240_read_12bit_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t ret;
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis16240_spi_read_signed(dev, attr, buf, 12);
- mutex_unlock(&indio_dev->mlock);
-
- return ret;
-}
-
-static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, S_IRUGO,
- adis16240_read_12bit_signed, NULL,
- ADIS16240_XYZPEAK_OUT);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096");
-
-static const u8 adis16240_addresses[][2] = {
- [ADIS16240_SCAN_ACC_X] = { ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT },
- [ADIS16240_SCAN_ACC_Y] = { ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT },
- [ADIS16240_SCAN_ACC_Z] = { ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT },
-};
-
-static int adis16240_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16240_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 4;
- *val2 = 880000; /* 4.88 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- }
- return -EINVAL;
- case IIO_TEMP:
- *val = 244; /* 0.244 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_PEAK_SCALE:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- bits = 10;
- mutex_lock(&indio_dev->mlock);
- addr = adis16240_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_PEAK:
- bits = 10;
- mutex_lock(&indio_dev->mlock);
- addr = adis16240_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16240_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits = 10;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- val16 = val & ((1 << bits) - 1);
- addr = adis16240_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16240_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16240_SUPPLY_OUT, ADIS16240_SCAN_SUPPLY, 0, 10),
- ADIS_AUX_ADC_CHAN(ADIS16240_AUX_ADC, ADIS16240_SCAN_AUX_ADC, 0, 10),
- ADIS_ACCEL_CHAN(X, ADIS16240_XACCL_OUT, ADIS16240_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Y, ADIS16240_YACCL_OUT, ADIS16240_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Z, ADIS16240_ZACCL_OUT, ADIS16240_SCAN_ACC_Z,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_TEMP_CHAN(ADIS16240_TEMP_OUT, ADIS16240_SCAN_TEMP, 0, 10),
- IIO_CHAN_SOFT_TIMESTAMP(6)
-};
-
-static struct attribute *adis16240_attributes[] = {
- &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16240_attribute_group = {
- .attrs = adis16240_attributes,
-};
-
-static const struct iio_info adis16240_info = {
- .attrs = &adis16240_attribute_group,
- .read_raw = &adis16240_read_raw,
- .write_raw = &adis16240_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16240_status_error_msgs[] = {
- [ADIS16240_DIAG_STAT_PWRON_FAIL_BIT] = "Power on, self-test failed",
- [ADIS16240_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16240_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16240_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
-};
-
-static const struct adis_data adis16240_data = {
- .write_delay = 35,
- .read_delay = 35,
- .msc_ctrl_reg = ADIS16240_MSC_CTRL,
- .glob_cmd_reg = ADIS16240_GLOB_CMD,
- .diag_stat_reg = ADIS16240_DIAG_STAT,
-
- .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
- .self_test_no_autoclear = true,
- .startup_delay = ADIS16240_STARTUP_DELAY,
-
- .status_error_msgs = adis16240_status_error_msgs,
- .status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16240_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16240_info;
- indio_dev->channels = adis16240_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16240_data);
- if (ret)
- return ret;
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16240_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16240_driver = {
- .driver = {
- .name = "adis16240",
- },
- .probe = adis16240_probe,
- .remove = adis16240_remove,
-};
-module_spi_driver(adis16240_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index deff89973d53..e17efb03bac0 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -80,26 +80,4 @@ config AD7280
To compile this driver as a module, choose M here: the
module will be called ad7280a
-config LPC32XX_ADC
- tristate "NXP LPC32XX ADC"
- depends on ARCH_LPC32XX || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to build support for the integrated ADC inside the
- LPC32XX SoC. Note that this feature uses the same hardware as the
- touchscreen driver, so you should either select only one of the two
- drivers (lpc32xx_adc or lpc32xx_ts) or, in the OpenFirmware case,
- activate only one via device tree selection. Provides direct access
- via sysfs.
-
-config SPEAR_ADC
- tristate "ST SPEAr ADC"
- depends on PLAT_SPEAR || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to build support for the integrated ADC inside the
- ST SPEAr SoC. Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called spear_adc.
endmenu
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index ac09485923b6..bf18bdd7c99d 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -10,5 +10,3 @@ obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7280) += ad7280a.o
-obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
-obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1fb68c01abd5..d11c6de9c777 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -342,9 +342,9 @@ ad7192_show_scale_available(struct device *dev,
static IIO_DEVICE_ATTR_NAMED(in_v_m_v_scale_available,
in_voltage-voltage_scale_available,
- S_IRUGO, ad7192_show_scale_available, NULL, 0);
+ 0444, ad7192_show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(in_voltage_scale_available, 0444,
ad7192_show_scale_available, NULL, 0);
static ssize_t ad7192_show_ac_excitation(struct device *dev,
@@ -412,11 +412,11 @@ static ssize_t ad7192_set(struct device *dev,
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(bridge_switch_en, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(bridge_switch_en, 0644,
ad7192_show_bridge_switch, ad7192_set,
AD7192_REG_GPOCON);
-static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(ac_excitation_en, 0644,
ad7192_show_ac_excitation, ad7192_set,
AD7192_REG_MODE);
@@ -564,18 +564,18 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
}
static const struct iio_info ad7192_info = {
- .read_raw = &ad7192_read_raw,
- .write_raw = &ad7192_write_raw,
- .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .read_raw = ad7192_read_raw,
+ .write_raw = ad7192_write_raw,
+ .write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad7195_info = {
- .read_raw = &ad7192_read_raw,
- .write_raw = &ad7192_write_raw,
- .write_raw_get_fmt = &ad7192_write_raw_get_fmt,
+ .read_raw = ad7192_read_raw,
+ .write_raw = ad7192_write_raw,
+ .write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
.driver_module = THIS_MODULE,
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index ee679ac0368f..d5ab83f0236d 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -134,6 +134,7 @@ struct ad7280_state {
unsigned char aux_threshhigh;
unsigned char aux_threshlow;
unsigned char cb_mask[AD7280A_MAX_CHAIN];
+ struct mutex lock; /* protect sensor state */
__be32 buf[2] ____cacheline_aligned;
};
@@ -410,7 +411,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
devaddr = this_attr->address >> 8;
ch = this_attr->address & 0xFF;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (readin)
st->cb_mask[devaddr] |= 1 << (ch + 2);
else
@@ -418,7 +419,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
ret = ad7280_write(st, devaddr, AD7280A_CELL_BALANCE,
0, st->cb_mask[devaddr]);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -433,10 +434,10 @@ static ssize_t ad7280_show_balance_timer(struct device *dev,
int ret;
unsigned int msecs;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad7280_read(st, this_attr->address >> 8,
this_attr->address & 0xFF);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
@@ -466,11 +467,11 @@ static ssize_t ad7280_store_balance_timer(struct device *dev,
if (val > 31)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad7280_write(st, this_attr->address >> 8,
this_attr->address & 0xFF,
0, (val & 0x1F) << 3);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -559,7 +560,7 @@ static int ad7280_attr_init(struct ad7280_state *st)
st->iio_attr[cnt].address =
AD7280A_DEVADDR(dev) << 8 | ch;
st->iio_attr[cnt].dev_attr.attr.mode =
- S_IWUSR | S_IRUGO;
+ 0644;
st->iio_attr[cnt].dev_attr.show =
ad7280_show_balance_sw;
st->iio_attr[cnt].dev_attr.store =
@@ -576,7 +577,7 @@ static int ad7280_attr_init(struct ad7280_state *st)
AD7280A_DEVADDR(dev) << 8 |
(AD7280A_CB1_TIMER + ch);
st->iio_attr[cnt].dev_attr.attr.mode =
- S_IWUSR | S_IRUGO;
+ 0644;
st->iio_attr[cnt].dev_attr.show =
ad7280_show_balance_timer;
st->iio_attr[cnt].dev_attr.store =
@@ -655,7 +656,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
val = clamp(val, 0L, 0xFFL);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
st->cell_threshhigh = val;
@@ -674,7 +675,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER,
this_attr->address, 1, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -745,26 +746,26 @@ out:
static IIO_DEVICE_ATTR_NAMED(in_thresh_low_value,
in_voltage-voltage_thresh_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
ad7280_read_channel_config,
ad7280_write_channel_config,
AD7280A_CELL_UNDERVOLTAGE);
static IIO_DEVICE_ATTR_NAMED(in_thresh_high_value,
in_voltage-voltage_thresh_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
ad7280_read_channel_config,
ad7280_write_channel_config,
AD7280A_CELL_OVERVOLTAGE);
static IIO_DEVICE_ATTR(in_temp_thresh_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
ad7280_read_channel_config,
ad7280_write_channel_config,
AD7280A_AUX_ADC_UNDERVOLTAGE);
static IIO_DEVICE_ATTR(in_temp_thresh_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
ad7280_read_channel_config,
ad7280_write_channel_config,
AD7280A_AUX_ADC_OVERVOLTAGE);
@@ -792,13 +793,13 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (chan->address == AD7280A_ALL_CELLS)
ret = ad7280_read_all_channels(st, st->scan_cnt, NULL);
else
ret = ad7280_read_channel(st, chan->address >> 8,
chan->address & 0xFF);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
@@ -847,6 +848,7 @@ static int ad7280_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
+ mutex_init(&st->lock);
if (!pdata)
pdata = &ad7793_default_pdata;
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
index 9dbfa64b1e53..18f5f139117e 100644
--- a/drivers/staging/iio/adc/ad7606.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -208,7 +208,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
for (i = 0; i < ARRAY_SIZE(scale_avail); i++)
if (val2 == scale_avail[i][1]) {
gpiod_set_value(st->gpio_range, i);
@@ -217,7 +217,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
@@ -231,11 +231,11 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
values[1] = (ret >> 1) & 1;
values[2] = (ret >> 2) & 1;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
values);
st->oversampling = val;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
default:
@@ -413,6 +413,7 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
st = iio_priv(indio_dev);
st->dev = dev;
+ mutex_init(&st->lock);
st->bops = bops;
st->base_address = base_address;
/* tied to logic low, analog input range is +/- 5V */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 746f9553d2ba..acaed8d5379c 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -14,6 +14,7 @@
* @name: identification string for chip
* @channels: channel specification
* @num_channels: number of channels
+ * @lock protect sensor state
*/
struct ad7606_chip_info {
@@ -23,6 +24,7 @@ struct ad7606_chip_info {
/**
* struct ad7606_state - driver instance specific data
+ * @lock protect sensor state
*/
struct ad7606_state {
@@ -37,6 +39,7 @@ struct ad7606_state {
bool done;
void __iomem *base_address;
+ struct mutex lock; /* protect sensor state */
struct gpio_desc *gpio_convst;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_range;
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index e14960038d3e..dec3ba6eba8a 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -154,7 +154,7 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
};
static const struct iio_info ad7780_info = {
- .read_raw = &ad7780_read_raw,
+ .read_raw = ad7780_read_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
deleted file mode 100644
index b51f237cd817..000000000000
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * lpc32xx_adc.c - Support for ADC in LPC32XX
- *
- * 3-channel, 10-bit ADC
- *
- * Copyright (C) 2011, 2012 Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/of.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-/*
- * LPC32XX registers definitions
- */
-#define LPC32XX_ADC_SELECT(x) ((x) + 0x04)
-#define LPC32XX_ADC_CTRL(x) ((x) + 0x08)
-#define LPC32XX_ADC_VALUE(x) ((x) + 0x48)
-
-/* Bit definitions for LPC32XX_ADC_SELECT: */
-#define AD_REFm 0x00000200 /* constant, always write this value! */
-#define AD_REFp 0x00000080 /* constant, always write this value! */
-#define AD_IN 0x00000010 /* multiple of this is the */
- /* channel number: 0, 1, 2 */
-#define AD_INTERNAL 0x00000004 /* constant, always write this value! */
-
-/* Bit definitions for LPC32XX_ADC_CTRL: */
-#define AD_STROBE 0x00000002
-#define AD_PDN_CTRL 0x00000004
-
-/* Bit definitions for LPC32XX_ADC_VALUE: */
-#define ADC_VALUE_MASK 0x000003FF
-
-#define MOD_NAME "lpc32xx-adc"
-
-struct lpc32xx_adc_info {
- void __iomem *adc_base;
- struct clk *clk;
- struct completion completion;
-
- u32 value;
-};
-
-static int lpc32xx_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask)
-{
- struct lpc32xx_adc_info *info = iio_priv(indio_dev);
-
- if (mask == IIO_CHAN_INFO_RAW) {
- mutex_lock(&indio_dev->mlock);
- clk_prepare_enable(info->clk);
- /* Measurement setup */
- __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
- LPC32XX_ADC_SELECT(info->adc_base));
- /* Trigger conversion */
- __raw_writel(AD_PDN_CTRL | AD_STROBE,
- LPC32XX_ADC_CTRL(info->adc_base));
- wait_for_completion(&info->completion); /* set by ISR */
- clk_disable_unprepare(info->clk);
- *val = info->value;
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT;
- }
-
- return -EINVAL;
-}
-
-static const struct iio_info lpc32xx_adc_iio_info = {
- .read_raw = &lpc32xx_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-#define LPC32XX_ADC_CHANNEL(_index) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = _index, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .address = AD_IN * _index, \
- .scan_index = _index, \
-}
-
-static const struct iio_chan_spec lpc32xx_adc_iio_channels[] = {
- LPC32XX_ADC_CHANNEL(0),
- LPC32XX_ADC_CHANNEL(1),
- LPC32XX_ADC_CHANNEL(2),
-};
-
-static irqreturn_t lpc32xx_adc_isr(int irq, void *dev_id)
-{
- struct lpc32xx_adc_info *info = dev_id;
-
- /* Read value and clear irq */
- info->value = __raw_readl(LPC32XX_ADC_VALUE(info->adc_base)) &
- ADC_VALUE_MASK;
- complete(&info->completion);
-
- return IRQ_HANDLED;
-}
-
-static int lpc32xx_adc_probe(struct platform_device *pdev)
-{
- struct lpc32xx_adc_info *info = NULL;
- struct resource *res;
- int retval = -ENODEV;
- struct iio_dev *iodev = NULL;
- int irq;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get platform I/O memory\n");
- return -ENXIO;
- }
-
- iodev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
- if (!iodev)
- return -ENOMEM;
-
- info = iio_priv(iodev);
-
- info->adc_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!info->adc_base) {
- dev_err(&pdev->dev, "failed mapping memory\n");
- return -EBUSY;
- }
-
- info->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed getting clock\n");
- return PTR_ERR(info->clk);
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "failed getting interrupt resource\n");
- return -ENXIO;
- }
-
- retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
- MOD_NAME, info);
- if (retval < 0) {
- dev_err(&pdev->dev, "failed requesting interrupt\n");
- return retval;
- }
-
- platform_set_drvdata(pdev, iodev);
-
- init_completion(&info->completion);
-
- iodev->name = MOD_NAME;
- iodev->dev.parent = &pdev->dev;
- iodev->info = &lpc32xx_adc_iio_info;
- iodev->modes = INDIO_DIRECT_MODE;
- iodev->channels = lpc32xx_adc_iio_channels;
- iodev->num_channels = ARRAY_SIZE(lpc32xx_adc_iio_channels);
-
- retval = devm_iio_device_register(&pdev->dev, iodev);
- if (retval)
- return retval;
-
- dev_info(&pdev->dev, "LPC32XX ADC driver loaded, IRQ %d\n", irq);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id lpc32xx_adc_match[] = {
- { .compatible = "nxp,lpc3220-adc" },
- {},
-};
-MODULE_DEVICE_TABLE(of, lpc32xx_adc_match);
-#endif
-
-static struct platform_driver lpc32xx_adc_driver = {
- .probe = lpc32xx_adc_probe,
- .driver = {
- .name = MOD_NAME,
- .of_match_table = of_match_ptr(lpc32xx_adc_match),
- },
-};
-
-module_platform_driver(lpc32xx_adc_driver);
-
-MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
-MODULE_DESCRIPTION("LPC32XX ADC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 6054c7298fce..b2bce26499f5 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -244,7 +244,6 @@ static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip,
chip->config1 = config1;
return ret;
-
}
static ssize_t adt7316_store_enabled(struct device *dev,
@@ -267,7 +266,7 @@ static ssize_t adt7316_store_enabled(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enabled, 0644,
adt7316_show_enabled,
adt7316_store_enabled,
0);
@@ -311,7 +310,7 @@ static ssize_t adt7316_store_select_ex_temp(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(select_ex_temp, 0644,
adt7316_show_select_ex_temp,
adt7316_store_select_ex_temp,
0);
@@ -352,7 +351,7 @@ static ssize_t adt7316_store_mode(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(mode, 0644,
adt7316_show_mode,
adt7316_store_mode,
0);
@@ -364,7 +363,7 @@ static ssize_t adt7316_show_all_modes(struct device *dev,
return sprintf(buf, "single_channel\nround_robin\n");
}
-static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0);
+static IIO_DEVICE_ATTR(all_modes, 0444, adt7316_show_all_modes, NULL, 0);
static ssize_t adt7316_show_ad_channel(struct device *dev,
struct device_attribute *attr,
@@ -434,7 +433,6 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK);
}
-
config2 |= data;
ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
@@ -446,7 +444,7 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(ad_channel, 0644,
adt7316_show_ad_channel,
adt7316_store_ad_channel,
0);
@@ -469,7 +467,7 @@ static ssize_t adt7316_show_all_ad_channels(struct device *dev,
"2 - External Temperature\n");
}
-static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO,
+static IIO_DEVICE_ATTR(all_ad_channels, 0444,
adt7316_show_all_ad_channels, NULL, 0);
static ssize_t adt7316_show_disable_averaging(struct device *dev,
@@ -506,7 +504,7 @@ static ssize_t adt7316_store_disable_averaging(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(disable_averaging, 0644,
adt7316_show_disable_averaging,
adt7316_store_disable_averaging,
0);
@@ -545,7 +543,7 @@ static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enable_smbus_timeout, 0644,
adt7316_show_enable_smbus_timeout,
adt7316_store_enable_smbus_timeout,
0);
@@ -583,7 +581,7 @@ static ssize_t adt7316_store_powerdown(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(powerdown, 0644,
adt7316_show_powerdown,
adt7316_store_powerdown,
0);
@@ -621,7 +619,7 @@ static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(fast_ad_clock, 0644,
adt7316_show_fast_ad_clock,
adt7316_store_fast_ad_clock,
0);
@@ -674,7 +672,7 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(da_high_resolution, 0644,
adt7316_show_da_high_resolution,
adt7316_store_da_high_resolution,
0);
@@ -720,12 +718,11 @@ static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(AIN_internal_Vref, 0644,
adt7316_show_AIN_internal_Vref,
adt7316_store_AIN_internal_Vref,
0);
-
static ssize_t adt7316_show_enable_prop_DACA(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -760,7 +757,7 @@ static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enable_proportion_DACA, 0644,
adt7316_show_enable_prop_DACA,
adt7316_store_enable_prop_DACA,
0);
@@ -799,7 +796,7 @@ static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(enable_proportion_DACB, 0644,
adt7316_show_enable_prop_DACB,
adt7316_store_enable_prop_DACB,
0);
@@ -842,7 +839,7 @@ static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, 0644,
adt7316_show_DAC_2Vref_ch_mask,
adt7316_store_DAC_2Vref_ch_mask,
0);
@@ -902,7 +899,7 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(DAC_update_mode, 0644,
adt7316_show_DAC_update_mode,
adt7316_store_DAC_update_mode,
0);
@@ -922,10 +919,9 @@ static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
return sprintf(buf, "manual\n");
}
-static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO,
+static IIO_DEVICE_ATTR(all_DAC_update_modes, 0444,
adt7316_show_all_DAC_update_modes, NULL, 0);
-
static ssize_t adt7316_store_update_DAC(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -961,7 +957,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(update_DAC, 0644,
NULL,
adt7316_store_update_DAC,
0);
@@ -1006,7 +1002,7 @@ static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, 0644,
adt7316_show_DA_AB_Vref_bypass,
adt7316_store_DA_AB_Vref_bypass,
0);
@@ -1051,7 +1047,7 @@ static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, 0644,
adt7316_show_DA_CD_Vref_bypass,
adt7316_store_DA_CD_Vref_bypass,
0);
@@ -1112,7 +1108,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(DAC_internal_Vref, 0644,
adt7316_show_DAC_internal_Vref,
adt7316_store_DAC_internal_Vref,
0);
@@ -1201,7 +1197,7 @@ static ssize_t adt7316_show_VDD(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf);
}
-static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0);
+static IIO_DEVICE_ATTR(VDD, 0444, adt7316_show_VDD, NULL, 0);
static ssize_t adt7316_show_in_temp(struct device *dev,
struct device_attribute *attr,
@@ -1213,7 +1209,7 @@ static ssize_t adt7316_show_in_temp(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf);
}
-static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp, 0444, adt7316_show_in_temp, NULL, 0);
static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
struct device_attribute *attr,
@@ -1225,9 +1221,9 @@ static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
}
-static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1,
+static IIO_DEVICE_ATTR(ex_temp_AIN1, 0444, adt7316_show_ex_temp_AIN1,
NULL, 0);
-static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp, 0444, adt7316_show_ex_temp_AIN1, NULL, 0);
static ssize_t adt7316_show_AIN2(struct device *dev,
struct device_attribute *attr,
@@ -1238,7 +1234,7 @@ static ssize_t adt7316_show_AIN2(struct device *dev,
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf);
}
-static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0);
+static IIO_DEVICE_ATTR(AIN2, 0444, adt7316_show_AIN2, NULL, 0);
static ssize_t adt7316_show_AIN3(struct device *dev,
struct device_attribute *attr,
@@ -1249,7 +1245,7 @@ static ssize_t adt7316_show_AIN3(struct device *dev,
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf);
}
-static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0);
+static IIO_DEVICE_ATTR(AIN3, 0444, adt7316_show_AIN3, NULL, 0);
static ssize_t adt7316_show_AIN4(struct device *dev,
struct device_attribute *attr,
@@ -1260,7 +1256,7 @@ static ssize_t adt7316_show_AIN4(struct device *dev,
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf);
}
-static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0);
+static IIO_DEVICE_ATTR(AIN4, 0444, adt7316_show_AIN4, NULL, 0);
static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
int offset_addr, char *buf)
@@ -1325,7 +1321,7 @@ static ssize_t adt7316_store_in_temp_offset(struct device *dev,
len);
}
-static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(in_temp_offset, 0644,
adt7316_show_in_temp_offset,
adt7316_store_in_temp_offset, 0);
@@ -1351,7 +1347,7 @@ static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
len);
}
-static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(ex_temp_offset, 0644,
adt7316_show_ex_temp_offset,
adt7316_store_ex_temp_offset, 0);
@@ -1378,7 +1374,7 @@ static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len);
}
-static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(in_analog_temp_offset, 0644,
adt7316_show_in_analog_temp_offset,
adt7316_store_in_analog_temp_offset, 0);
@@ -1405,7 +1401,7 @@ static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len);
}
-static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
adt7316_show_ex_analog_temp_offset,
adt7316_store_ex_analog_temp_offset, 0);
@@ -1500,7 +1496,7 @@ static ssize_t adt7316_store_DAC_A(struct device *dev,
return adt7316_store_DAC(chip, 0, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A,
+static IIO_DEVICE_ATTR(DAC_A, 0644, adt7316_show_DAC_A,
adt7316_store_DAC_A, 0);
static ssize_t adt7316_show_DAC_B(struct device *dev,
@@ -1524,7 +1520,7 @@ static ssize_t adt7316_store_DAC_B(struct device *dev,
return adt7316_store_DAC(chip, 1, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B,
+static IIO_DEVICE_ATTR(DAC_B, 0644, adt7316_show_DAC_B,
adt7316_store_DAC_B, 0);
static ssize_t adt7316_show_DAC_C(struct device *dev,
@@ -1548,7 +1544,7 @@ static ssize_t adt7316_store_DAC_C(struct device *dev,
return adt7316_store_DAC(chip, 2, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C,
+static IIO_DEVICE_ATTR(DAC_C, 0644, adt7316_show_DAC_C,
adt7316_store_DAC_C, 0);
static ssize_t adt7316_show_DAC_D(struct device *dev,
@@ -1572,7 +1568,7 @@ static ssize_t adt7316_store_DAC_D(struct device *dev,
return adt7316_store_DAC(chip, 3, buf, len);
}
-static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D,
+static IIO_DEVICE_ATTR(DAC_D, 0644, adt7316_show_DAC_D,
adt7316_store_DAC_D, 0);
static ssize_t adt7316_show_device_id(struct device *dev,
@@ -1591,7 +1587,7 @@ static ssize_t adt7316_show_device_id(struct device *dev,
return sprintf(buf, "%d\n", id);
}
-static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0);
+static IIO_DEVICE_ATTR(device_id, 0444, adt7316_show_device_id, NULL, 0);
static ssize_t adt7316_show_manufactorer_id(struct device *dev,
struct device_attribute *attr,
@@ -1609,7 +1605,7 @@ static ssize_t adt7316_show_manufactorer_id(struct device *dev,
return sprintf(buf, "%d\n", id);
}
-static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO,
+static IIO_DEVICE_ATTR(manufactorer_id, 0444,
adt7316_show_manufactorer_id, NULL, 0);
static ssize_t adt7316_show_device_rev(struct device *dev,
@@ -1628,7 +1624,7 @@ static ssize_t adt7316_show_device_rev(struct device *dev,
return sprintf(buf, "%d\n", rev);
}
-static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0);
+static IIO_DEVICE_ATTR(device_rev, 0444, adt7316_show_device_rev, NULL, 0);
static ssize_t adt7316_show_bus_type(struct device *dev,
struct device_attribute *attr,
@@ -1649,7 +1645,7 @@ static ssize_t adt7316_show_bus_type(struct device *dev,
return sprintf(buf, "i2c\n");
}
-static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
+static IIO_DEVICE_ATTR(bus_type, 0444, adt7316_show_bus_type, NULL, 0);
static struct attribute *adt7316_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
@@ -1867,6 +1863,7 @@ static ssize_t adt7316_set_int_mask(struct device *dev,
return len;
}
+
static inline ssize_t adt7316_show_ad_bound(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1972,61 +1969,61 @@ static ssize_t adt7316_set_int_enabled(struct device *dev,
}
static IIO_DEVICE_ATTR(int_mask,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_int_mask, adt7316_set_int_mask,
0);
static IIO_DEVICE_ATTR(in_temp_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_IN_TEMP_HIGH);
static IIO_DEVICE_ATTR(in_temp_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_IN_TEMP_LOW);
static IIO_DEVICE_ATTR(ex_temp_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_HIGH);
static IIO_DEVICE_ATTR(ex_temp_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_LOW);
/* NASTY duplication to be fixed */
static IIO_DEVICE_ATTR(ex_temp_ain1_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_HIGH);
static IIO_DEVICE_ATTR(ex_temp_ain1_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7316_EX_TEMP_LOW);
static IIO_DEVICE_ATTR(ain2_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN2_HIGH);
static IIO_DEVICE_ATTR(ain2_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN2_LOW);
static IIO_DEVICE_ATTR(ain3_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN3_HIGH);
static IIO_DEVICE_ATTR(ain3_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN3_LOW);
static IIO_DEVICE_ATTR(ain4_high_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN4_HIGH);
static IIO_DEVICE_ATTR(ain4_low_value,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_ad_bound, adt7316_set_ad_bound,
ADT7516_AIN4_LOW);
static IIO_DEVICE_ATTR(int_enabled,
- S_IRUGO | S_IWUSR,
+ 0644,
adt7316_show_int_enabled,
adt7316_set_int_enabled, 0);
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index ca72af3e9d4b..a6f249e9c1e1 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -232,7 +232,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
if (ret < 0)
goto error_ret;
- cfg = ret & ~((0x03 << 5) | (0x1 << 7));
+ cfg = ret & ~((0x03 << 5) | BIT(7));
switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index b91b50f345bd..dc6ecd824365 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -41,10 +41,10 @@
#define AD7152_REG_CFG2 26
/* Status Register Bit Designations (AD7152_REG_STATUS) */
-#define AD7152_STATUS_RDY1 (1 << 0)
-#define AD7152_STATUS_RDY2 (1 << 1)
-#define AD7152_STATUS_C1C2 (1 << 2)
-#define AD7152_STATUS_PWDN (1 << 7)
+#define AD7152_STATUS_RDY1 BIT(0)
+#define AD7152_STATUS_RDY2 BIT(1)
+#define AD7152_STATUS_C1C2 BIT(2)
+#define AD7152_STATUS_PWDN BIT(7)
/* Setup Register Bit Designations (AD7152_REG_CHx_SETUP) */
#define AD7152_SETUP_CAPDIFF (1 << 5)
@@ -155,13 +155,13 @@ static ssize_t ad7152_start_gain_calib(struct device *dev,
}
static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
- S_IWUSR, NULL, ad7152_start_offset_calib, 0);
+ 0200, NULL, ad7152_start_offset_calib, 0);
static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
- S_IWUSR, NULL, ad7152_start_offset_calib, 1);
+ 0200, NULL, ad7152_start_offset_calib, 1);
static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
- S_IWUSR, NULL, ad7152_start_gain_calib, 0);
+ 0200, NULL, ad7152_start_gain_calib, 0);
static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
- S_IWUSR, NULL, ad7152_start_gain_calib, 1);
+ 0200, NULL, ad7152_start_gain_calib, 1);
/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
static const unsigned char ad7152_filter_rate_table[][2] = {
@@ -244,6 +244,7 @@ static int ad7152_write_raw_samp_freq(struct device *dev, int val)
return ret;
}
+
static int ad7152_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
@@ -441,9 +442,9 @@ static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
static const struct iio_info ad7152_info = {
.attrs = &ad7152_attribute_group,
- .read_raw = &ad7152_read_raw,
- .write_raw = &ad7152_write_raw,
- .write_raw_get_fmt = &ad7152_write_raw_get_fmt,
+ .read_raw = ad7152_read_raw,
+ .write_raw = ad7152_write_raw,
+ .write_raw_get_fmt = ad7152_write_raw_get_fmt,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 81f8b9ee1120..cdcb4fccf3fe 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -45,10 +45,10 @@
#define AD7746_STATUS_RDYCAP BIT(0)
/* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */
-#define AD7746_CAPSETUP_CAPEN (1 << 7)
-#define AD7746_CAPSETUP_CIN2 (1 << 6) /* AD7746 only */
-#define AD7746_CAPSETUP_CAPDIFF (1 << 5)
-#define AD7746_CAPSETUP_CACHOP (1 << 0)
+#define AD7746_CAPSETUP_CAPEN BIT(7)
+#define AD7746_CAPSETUP_CIN2 BIT(6) /* AD7746 only */
+#define AD7746_CAPSETUP_CAPDIFF BIT(5)
+#define AD7746_CAPSETUP_CACHOP BIT(0)
/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
#define AD7746_VTSETUP_VTEN (1 << 7)
@@ -56,9 +56,9 @@
#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
-#define AD7746_VTSETUP_EXTREF (1 << 4)
-#define AD7746_VTSETUP_VTSHORT (1 << 1)
-#define AD7746_VTSETUP_VTCHOP (1 << 0)
+#define AD7746_VTSETUP_EXTREF BIT(4)
+#define AD7746_VTSETUP_VTSHORT BIT(1)
+#define AD7746_VTSETUP_VTCHOP BIT(0)
/* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */
#define AD7746_EXCSETUP_CLKCTRL BIT(7)
@@ -82,7 +82,7 @@
#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
-#define AD7746_CAPDAC_DACEN (1 << 7)
+#define AD7746_CAPDAC_DACEN BIT(7)
#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
/*
@@ -91,6 +91,7 @@
struct ad7746_chip_info {
struct i2c_client *client;
+ struct mutex lock; /* protect sensor state */
/*
* Capacitive channel digital filter setup;
* conversion time/update rate setup per channel
@@ -298,11 +299,11 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
if (!doit)
return 0;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->lock);
regval |= chip->config;
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->lock);
return ret;
}
@@ -310,12 +311,12 @@ static inline ssize_t ad7746_start_calib(struct device *dev,
msleep(20);
ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
if (ret < 0) {
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->lock);
return ret;
}
} while ((ret == regval) && timeout--);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->lock);
return len;
}
@@ -351,15 +352,15 @@ static ssize_t ad7746_start_gain_calib(struct device *dev,
}
static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
- S_IWUSR, NULL, ad7746_start_offset_calib, CIN1);
+ 0200, NULL, ad7746_start_offset_calib, CIN1);
static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
- S_IWUSR, NULL, ad7746_start_offset_calib, CIN2);
+ 0200, NULL, ad7746_start_offset_calib, CIN2);
static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, CIN1);
+ 0200, NULL, ad7746_start_gain_calib, CIN1);
static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, CIN2);
+ 0200, NULL, ad7746_start_gain_calib, CIN2);
static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
- S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
+ 0200, NULL, ad7746_start_gain_calib, VIN);
static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
int val)
@@ -426,7 +427,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, reg;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->lock);
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
@@ -521,7 +522,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->lock);
return ret;
}
@@ -534,7 +535,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
int ret, delay, idx;
u8 regval, reg;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&chip->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -546,7 +547,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
- regval);
+ regval);
if (ret < 0)
goto out;
@@ -658,14 +659,14 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&chip->lock);
return ret;
}
static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
- .read_raw = &ad7746_read_raw,
- .write_raw = &ad7746_write_raw,
+ .read_raw = ad7746_read_raw,
+ .write_raw = ad7746_write_raw,
.driver_module = THIS_MODULE,
};
@@ -686,6 +687,7 @@ static int ad7746_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
chip = iio_priv(indio_dev);
+ mutex_init(&chip->lock);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index a5b2f068168d..6da46ede7ee0 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -22,6 +22,100 @@
#include "ad9832.h"
+/* Registers */
+
+#define AD9832_FREQ0LL 0x0
+#define AD9832_FREQ0HL 0x1
+#define AD9832_FREQ0LM 0x2
+#define AD9832_FREQ0HM 0x3
+#define AD9832_FREQ1LL 0x4
+#define AD9832_FREQ1HL 0x5
+#define AD9832_FREQ1LM 0x6
+#define AD9832_FREQ1HM 0x7
+#define AD9832_PHASE0L 0x8
+#define AD9832_PHASE0H 0x9
+#define AD9832_PHASE1L 0xA
+#define AD9832_PHASE1H 0xB
+#define AD9832_PHASE2L 0xC
+#define AD9832_PHASE2H 0xD
+#define AD9832_PHASE3L 0xE
+#define AD9832_PHASE3H 0xF
+
+#define AD9832_PHASE_SYM 0x10
+#define AD9832_FREQ_SYM 0x11
+#define AD9832_PINCTRL_EN 0x12
+#define AD9832_OUTPUT_EN 0x13
+
+/* Command Control Bits */
+
+#define AD9832_CMD_PHA8BITSW 0x1
+#define AD9832_CMD_PHA16BITSW 0x0
+#define AD9832_CMD_FRE8BITSW 0x3
+#define AD9832_CMD_FRE16BITSW 0x2
+#define AD9832_CMD_FPSELECT 0x6
+#define AD9832_CMD_SYNCSELSRC 0x8
+#define AD9832_CMD_SLEEPRESCLR 0xC
+
+#define AD9832_FREQ BIT(11)
+#define AD9832_PHASE(x) (((x) & 3) << 9)
+#define AD9832_SYNC BIT(13)
+#define AD9832_SELSRC BIT(12)
+#define AD9832_SLEEP BIT(13)
+#define AD9832_RESET BIT(12)
+#define AD9832_CLR BIT(11)
+#define CMD_SHIFT 12
+#define ADD_SHIFT 8
+#define AD9832_FREQ_BITS 32
+#define AD9832_PHASE_BITS 12
+#define RES_MASK(bits) ((1 << (bits)) - 1)
+
+/**
+ * struct ad9832_state - driver instance specific data
+ * @spi: spi_device
+ * @avdd: supply regulator for the analog section
+ * @dvdd: supply regulator for the digital section
+ * @mclk: external master clock
+ * @ctrl_fp: cached frequency/phase control word
+ * @ctrl_ss: cached sync/selsrc control word
+ * @ctrl_src: cached sleep/reset/clr word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @phase_xfer: tuning word spi transfer
+ * @phase_msg: tuning word spi message
+ * @lock protect sensor state
+ * @data: spi transmit buffer
+ * @phase_data: tuning word spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9832_state {
+ struct spi_device *spi;
+ struct regulator *avdd;
+ struct regulator *dvdd;
+ unsigned long mclk;
+ unsigned short ctrl_fp;
+ unsigned short ctrl_ss;
+ unsigned short ctrl_src;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[4];
+ struct spi_message freq_msg;
+ struct spi_transfer phase_xfer[2];
+ struct spi_message phase_msg;
+ struct mutex lock; /* protect sensor state */
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be16 freq_data[4]____cacheline_aligned;
+ __be16 phase_data[2];
+ __be16 data;
+ };
+};
+
static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
{
unsigned long long freqreg = (u64)fout *
@@ -85,7 +179,7 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
if (ret)
goto error_ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
@@ -146,7 +240,7 @@ static ssize_t ad9832_write(struct device *dev, struct device_attribute *attr,
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
error_ret:
return ret ? ret : len;
@@ -156,22 +250,22 @@ error_ret:
* see dds.h for further information
*/
-static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ0HM);
-static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_FREQ1HM);
-static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9832_write, AD9832_FREQ_SYM);
+static IIO_DEV_ATTR_FREQ(0, 0, 0200, NULL, ad9832_write, AD9832_FREQ0HM);
+static IIO_DEV_ATTR_FREQ(0, 1, 0200, NULL, ad9832_write, AD9832_FREQ1HM);
+static IIO_DEV_ATTR_FREQSYMBOL(0, 0200, NULL, ad9832_write, AD9832_FREQ_SYM);
static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
-static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9832_write, AD9832_PHASE0H);
-static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9832_write, AD9832_PHASE1H);
-static IIO_DEV_ATTR_PHASE(0, 2, S_IWUSR, NULL, ad9832_write, AD9832_PHASE2H);
-static IIO_DEV_ATTR_PHASE(0, 3, S_IWUSR, NULL, ad9832_write, AD9832_PHASE3H);
-static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_PHASE(0, 0, 0200, NULL, ad9832_write, AD9832_PHASE0H);
+static IIO_DEV_ATTR_PHASE(0, 1, 0200, NULL, ad9832_write, AD9832_PHASE1H);
+static IIO_DEV_ATTR_PHASE(0, 2, 0200, NULL, ad9832_write, AD9832_PHASE2H);
+static IIO_DEV_ATTR_PHASE(0, 3, 0200, NULL, ad9832_write, AD9832_PHASE3H);
+static IIO_DEV_ATTR_PHASESYMBOL(0, 0200, NULL,
ad9832_write, AD9832_PHASE_SYM);
static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
-static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_PINCONTROL_EN(0, 0200, NULL,
ad9832_write, AD9832_PINCTRL_EN);
-static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL,
+static IIO_DEV_ATTR_OUT_ENABLE(0, 0200, NULL,
ad9832_write, AD9832_OUTPUT_EN);
static struct attribute *ad9832_attributes[] = {
@@ -242,6 +336,7 @@ static int ad9832_probe(struct spi_device *spi)
st->mclk = pdata->mclk;
st->spi = spi;
+ mutex_init(&st->lock);
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index 1b08b04482a4..39d326cc1af9 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -8,98 +8,6 @@
#ifndef IIO_DDS_AD9832_H_
#define IIO_DDS_AD9832_H_
-/* Registers */
-
-#define AD9832_FREQ0LL 0x0
-#define AD9832_FREQ0HL 0x1
-#define AD9832_FREQ0LM 0x2
-#define AD9832_FREQ0HM 0x3
-#define AD9832_FREQ1LL 0x4
-#define AD9832_FREQ1HL 0x5
-#define AD9832_FREQ1LM 0x6
-#define AD9832_FREQ1HM 0x7
-#define AD9832_PHASE0L 0x8
-#define AD9832_PHASE0H 0x9
-#define AD9832_PHASE1L 0xA
-#define AD9832_PHASE1H 0xB
-#define AD9832_PHASE2L 0xC
-#define AD9832_PHASE2H 0xD
-#define AD9832_PHASE3L 0xE
-#define AD9832_PHASE3H 0xF
-
-#define AD9832_PHASE_SYM 0x10
-#define AD9832_FREQ_SYM 0x11
-#define AD9832_PINCTRL_EN 0x12
-#define AD9832_OUTPUT_EN 0x13
-
-/* Command Control Bits */
-
-#define AD9832_CMD_PHA8BITSW 0x1
-#define AD9832_CMD_PHA16BITSW 0x0
-#define AD9832_CMD_FRE8BITSW 0x3
-#define AD9832_CMD_FRE16BITSW 0x2
-#define AD9832_CMD_FPSELECT 0x6
-#define AD9832_CMD_SYNCSELSRC 0x8
-#define AD9832_CMD_SLEEPRESCLR 0xC
-
-#define AD9832_FREQ BIT(11)
-#define AD9832_PHASE(x) (((x) & 3) << 9)
-#define AD9832_SYNC BIT(13)
-#define AD9832_SELSRC BIT(12)
-#define AD9832_SLEEP BIT(13)
-#define AD9832_RESET BIT(12)
-#define AD9832_CLR BIT(11)
-#define CMD_SHIFT 12
-#define ADD_SHIFT 8
-#define AD9832_FREQ_BITS 32
-#define AD9832_PHASE_BITS 12
-#define RES_MASK(bits) ((1 << (bits)) - 1)
-
-/**
- * struct ad9832_state - driver instance specific data
- * @spi: spi_device
- * @avdd: supply regulator for the analog section
- * @dvdd: supply regulator for the digital section
- * @mclk: external master clock
- * @ctrl_fp: cached frequency/phase control word
- * @ctrl_ss: cached sync/selsrc control word
- * @ctrl_src: cached sleep/reset/clr word
- * @xfer: default spi transfer
- * @msg: default spi message
- * @freq_xfer: tuning word spi transfer
- * @freq_msg: tuning word spi message
- * @phase_xfer: tuning word spi transfer
- * @phase_msg: tuning word spi message
- * @data: spi transmit buffer
- * @phase_data: tuning word spi transmit buffer
- * @freq_data: tuning word spi transmit buffer
- */
-
-struct ad9832_state {
- struct spi_device *spi;
- struct regulator *avdd;
- struct regulator *dvdd;
- unsigned long mclk;
- unsigned short ctrl_fp;
- unsigned short ctrl_ss;
- unsigned short ctrl_src;
- struct spi_transfer xfer;
- struct spi_message msg;
- struct spi_transfer freq_xfer[4];
- struct spi_message freq_msg;
- struct spi_transfer phase_xfer[2];
- struct spi_message phase_msg;
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- union {
- __be16 freq_data[4]____cacheline_aligned;
- __be16 phase_data[2];
- __be16 data;
- };
-};
-
/*
* TODO: struct ad9832_platform_data needs to go into include/linux/iio
*/
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 19216af1dfc9..af108e96b3ec 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -25,6 +25,80 @@
#include "ad9834.h"
+/* Registers */
+
+#define AD9834_REG_CMD 0
+#define AD9834_REG_FREQ0 BIT(14)
+#define AD9834_REG_FREQ1 BIT(15)
+#define AD9834_REG_PHASE0 (BIT(15) | BIT(14))
+#define AD9834_REG_PHASE1 (BIT(15) | BIT(14) | BIT(13))
+
+/* Command Control Bits */
+
+#define AD9834_B28 BIT(13)
+#define AD9834_HLB BIT(12)
+#define AD9834_FSEL BIT(11)
+#define AD9834_PSEL BIT(10)
+#define AD9834_PIN_SW BIT(9)
+#define AD9834_RESET BIT(8)
+#define AD9834_SLEEP1 BIT(7)
+#define AD9834_SLEEP12 BIT(6)
+#define AD9834_OPBITEN BIT(5)
+#define AD9834_SIGN_PIB BIT(4)
+#define AD9834_DIV2 BIT(3)
+#define AD9834_MODE BIT(1)
+
+#define AD9834_FREQ_BITS 28
+#define AD9834_PHASE_BITS 12
+
+#define RES_MASK(bits) (BIT(bits) - 1)
+
+/**
+ * struct ad9834_state - driver instance specific data
+ * @spi: spi_device
+ * @reg: supply regulator
+ * @mclk: external master clock
+ * @control: cached control word
+ * @xfer: default spi transfer
+ * @msg: default spi message
+ * @freq_xfer: tuning word spi transfer
+ * @freq_msg: tuning word spi message
+ * @lock: protect sensor state
+ * @data: spi transmit buffer
+ * @freq_data: tuning word spi transmit buffer
+ */
+
+struct ad9834_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned int mclk;
+ unsigned short control;
+ unsigned short devid;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ struct spi_transfer freq_xfer[2];
+ struct spi_message freq_msg;
+ struct mutex lock; /* protect sensor state */
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 data ____cacheline_aligned;
+ __be16 freq_data[2];
+};
+
+/**
+ * ad9834_supported_device_ids:
+ */
+
+enum ad9834_supported_device_ids {
+ ID_AD9833,
+ ID_AD9834,
+ ID_AD9837,
+ ID_AD9838,
+};
+
static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
{
unsigned long long freqreg = (u64)fout * (u64)BIT(AD9834_FREQ_BITS);
@@ -75,9 +149,9 @@ static ssize_t ad9834_write(struct device *dev,
ret = kstrtoul(buf, 10, &val);
if (ret)
- goto error_ret;
+ return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
@@ -135,9 +209,8 @@ static ssize_t ad9834_write(struct device *dev,
default:
ret = -ENODEV;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
-error_ret:
return ret ? ret : len;
}
@@ -152,7 +225,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
int ret = 0;
bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case 0:
@@ -195,7 +268,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
ret = spi_sync(st->spi, &st->msg);
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -346,6 +419,7 @@ static int ad9834_probe(struct spi_device *spi)
}
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
st->mclk = pdata->mclk;
st->spi = spi;
st->devid = spi_get_device_id(spi)->driver_data;
diff --git a/drivers/staging/iio/frequency/ad9834.h b/drivers/staging/iio/frequency/ad9834.h
index 40fdd5da7bd0..ae620f38eb49 100644
--- a/drivers/staging/iio/frequency/ad9834.h
+++ b/drivers/staging/iio/frequency/ad9834.h
@@ -8,67 +8,6 @@
#ifndef IIO_DDS_AD9834_H_
#define IIO_DDS_AD9834_H_
-/* Registers */
-
-#define AD9834_REG_CMD 0
-#define AD9834_REG_FREQ0 BIT(14)
-#define AD9834_REG_FREQ1 BIT(15)
-#define AD9834_REG_PHASE0 (BIT(15) | BIT(14))
-#define AD9834_REG_PHASE1 (BIT(15) | BIT(14) | BIT(13))
-
-/* Command Control Bits */
-
-#define AD9834_B28 BIT(13)
-#define AD9834_HLB BIT(12)
-#define AD9834_FSEL BIT(11)
-#define AD9834_PSEL BIT(10)
-#define AD9834_PIN_SW BIT(9)
-#define AD9834_RESET BIT(8)
-#define AD9834_SLEEP1 BIT(7)
-#define AD9834_SLEEP12 BIT(6)
-#define AD9834_OPBITEN BIT(5)
-#define AD9834_SIGN_PIB BIT(4)
-#define AD9834_DIV2 BIT(3)
-#define AD9834_MODE BIT(1)
-
-#define AD9834_FREQ_BITS 28
-#define AD9834_PHASE_BITS 12
-
-#define RES_MASK(bits) (BIT(bits) - 1)
-
-/**
- * struct ad9834_state - driver instance specific data
- * @spi: spi_device
- * @reg: supply regulator
- * @mclk: external master clock
- * @control: cached control word
- * @xfer: default spi transfer
- * @msg: default spi message
- * @freq_xfer: tuning word spi transfer
- * @freq_msg: tuning word spi message
- * @data: spi transmit buffer
- * @freq_data: tuning word spi transmit buffer
- */
-
-struct ad9834_state {
- struct spi_device *spi;
- struct regulator *reg;
- unsigned int mclk;
- unsigned short control;
- unsigned short devid;
- struct spi_transfer xfer;
- struct spi_message msg;
- struct spi_transfer freq_xfer[2];
- struct spi_message freq_msg;
-
- /*
- * DMA (thus cache coherency maintenance) requires the
- * transfer buffers to live in their own cache lines.
- */
- __be16 data ____cacheline_aligned;
- __be16 freq_data[2];
-};
-
/*
* TODO: struct ad7887_platform_data needs to go into include/linux/iio
*/
@@ -97,15 +36,4 @@ struct ad9834_platform_data {
bool en_signbit_msb_out;
};
-/**
- * ad9834_supported_device_ids:
- */
-
-enum ad9834_supported_device_ids {
- ID_AD9833,
- ID_AD9834,
- ID_AD9837,
- ID_AD9838,
-};
-
#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index ab816a215eb8..967524583d8a 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -40,25 +40,20 @@ struct adis16060_state {
static struct iio_dev *adis16060_iio_dev;
-static int adis16060_spi_write(struct iio_dev *indio_dev, u8 val)
+static int adis16060_spi_write_then_read(struct iio_dev *indio_dev,
+ u8 conf, u16 *val)
{
int ret;
struct adis16060_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
- st->buf[2] = val; /* The last 8 bits clocked in are latched */
+ st->buf[2] = conf; /* The last 8 bits clocked in are latched */
ret = spi_write(st->us_w, st->buf, 3);
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16060_spi_read(struct iio_dev *indio_dev, u16 *val)
-{
- int ret;
- struct adis16060_state *st = iio_priv(indio_dev);
- mutex_lock(&st->buf_lock);
+ if (ret < 0) {
+ mutex_unlock(&st->buf_lock);
+ return ret;
+ }
ret = spi_read(st->us_r, st->buf, 3);
@@ -86,17 +81,11 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis16060_spi_write(indio_dev, chan->address);
+ ret = adis16060_spi_write_then_read(indio_dev,
+ chan->address, &tval);
if (ret < 0)
- goto out_unlock;
+ return ret;
- ret = adis16060_spi_read(indio_dev, &tval);
- if (ret < 0)
- goto out_unlock;
-
- mutex_unlock(&indio_dev->mlock);
*val = tval;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
@@ -110,14 +99,10 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
}
return -EINVAL;
-
-out_unlock:
- mutex_unlock(&indio_dev->mlock);
- return ret;
}
static const struct iio_info adis16060_info = {
- .read_raw = &adis16060_read_raw,
+ .read_raw = adis16060_read_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 5e96352fa4ac..3d539eeb0e26 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -98,6 +98,7 @@ struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
struct delayed_work work;
+ struct mutex lock; /* Protect sensor state */
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
@@ -306,9 +307,11 @@ static ssize_t ad5933_show_frequency(struct device *dev,
u8 d8[4];
} dat;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -338,19 +341,21 @@ static ssize_t ad5933_store_frequency(struct device *dev,
if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = ad5933_set_freq(st, this_attr->address, val);
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_freq_start, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_freq_start, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_START);
-static IIO_DEVICE_ATTR(out_voltage0_freq_increment, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_freq_increment, 0644,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_INC);
@@ -364,7 +369,7 @@ static ssize_t ad5933_show(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret = 0, len = 0;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%u\n",
@@ -393,7 +398,7 @@ static ssize_t ad5933_show(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -415,7 +420,10 @@ static ssize_t ad5933_store(struct device *dev,
return ret;
}
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
ret = -EINVAL;
@@ -465,36 +473,37 @@ static ssize_t ad5933_store(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
-static IIO_DEVICE_ATTR(out_voltage0_scale, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_scale, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_RANGE);
-static IIO_DEVICE_ATTR(out_voltage0_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(out_voltage0_scale_available, 0444,
ad5933_show,
NULL,
AD5933_OUT_RANGE_AVAIL);
-static IIO_DEVICE_ATTR(in_voltage0_scale, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(in_voltage0_scale, 0644,
ad5933_show,
ad5933_store,
AD5933_IN_PGA_GAIN);
-static IIO_DEVICE_ATTR(in_voltage0_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(in_voltage0_scale_available, 0444,
ad5933_show,
NULL,
AD5933_IN_PGA_GAIN_AVAIL);
-static IIO_DEVICE_ATTR(out_voltage0_freq_points, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_freq_points, 0644,
ad5933_show,
ad5933_store,
AD5933_FREQ_POINTS);
-static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, 0644,
ad5933_show,
ad5933_store,
AD5933_OUT_SETTLING_CYCLES);
@@ -532,11 +541,9 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- ret = -EBUSY;
- goto out;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
if (ret < 0)
goto out;
@@ -549,7 +556,7 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
2, (u8 *)&dat);
if (ret < 0)
goto out;
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
*val = sign_extend32(be16_to_cpu(dat), 13);
return IIO_VAL_INT;
@@ -561,7 +568,7 @@ static int ad5933_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
out:
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -657,18 +664,17 @@ static void ad5933_work(struct work_struct *work)
unsigned char status;
int ret;
- mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
/* start sweep */
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- goto out;
+ return;
}
ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
if (ret)
- goto out;
+ return;
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
@@ -678,7 +684,7 @@ static void ad5933_work(struct work_struct *work)
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
scan_count * 2, (u8 *)buf);
if (ret)
- goto out;
+ return;
if (scan_count == 2) {
val[0] = be16_to_cpu(buf[0]);
@@ -690,7 +696,7 @@ static void ad5933_work(struct work_struct *work)
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- goto out;
+ return;
}
if (status & AD5933_STAT_SWEEP_DONE) {
@@ -703,8 +709,6 @@ static void ad5933_work(struct work_struct *work)
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-out:
- mutex_unlock(&indio_dev->mlock);
}
static int ad5933_probe(struct i2c_client *client,
@@ -723,6 +727,8 @@ static int ad5933_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
st->client = client;
+ mutex_init(&st->lock);
+
if (!pdata)
pdata = &ad5933_default_pdata;
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6bb6d37cc7d1..5375e7a81205 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -3,6 +3,7 @@
* ISL29028 is Concurrent Ambient Light and Proximity Sensor
*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2017 Brian Masney <masneyb@onstation.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -63,6 +64,9 @@
#define ISL29028_POWER_OFF_DELAY_MS 2000
+static const unsigned int isl29028_prox_sleep_time[] = {800, 400, 200, 100, 75,
+ 50, 12, 0};
+
enum isl29028_als_ir_mode {
ISL29028_MODE_NONE = 0,
ISL29028_MODE_ALS,
@@ -78,22 +82,29 @@ struct isl29028_chip {
enum isl29028_als_ir_mode als_ir_mode;
};
-static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
- unsigned int sampling)
+static int isl29028_find_prox_sleep_time_index(int sampling)
{
- struct device *dev = regmap_get_device(chip->regmap);
- static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0};
unsigned int period = DIV_ROUND_UP(1000, sampling);
- int sel, ret;
+ int i;
- for (sel = 0; sel < ARRAY_SIZE(prox_period); ++sel) {
- if (period >= prox_period[sel])
+ for (i = 0; i < ARRAY_SIZE(isl29028_prox_sleep_time); ++i) {
+ if (period >= isl29028_prox_sleep_time[i])
break;
}
+ return i;
+}
+
+static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
+ unsigned int sampling)
+{
+ struct device *dev = regmap_get_device(chip->regmap);
+ int sleep_index, ret;
+
+ sleep_index = isl29028_find_prox_sleep_time_index(sampling);
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
ISL29028_CONF_PROX_SLP_MASK,
- sel << ISL29028_CONF_PROX_SLP_SH);
+ sleep_index << ISL29028_CONF_PROX_SLP_SH);
if (ret < 0) {
dev_err(dev, "%s(): Error %d setting the proximity sampling\n",
@@ -108,7 +119,7 @@ static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
static int isl29028_enable_proximity(struct isl29028_chip *chip)
{
- int ret;
+ int sleep_index, ret;
ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
if (ret < 0)
@@ -121,7 +132,8 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip)
return ret;
/* Wait for conversion to be complete for first sample */
- mdelay(DIV_ROUND_UP(1000, chip->prox_sampling));
+ sleep_index = isl29028_find_prox_sleep_time_index(chip->prox_sampling);
+ msleep(isl29028_prox_sleep_time[sleep_index]);
return 0;
}
@@ -192,7 +204,7 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
return ret;
/* Need to wait for conversion time if ALS/IR mode enabled */
- mdelay(ISL29028_CONV_TIME_MS);
+ msleep(ISL29028_CONV_TIME_MS);
chip->als_ir_mode = mode;
@@ -645,7 +657,8 @@ static int __maybe_unused isl29028_resume(struct device *dev)
}
static const struct dev_pm_ops isl29028_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(isl29028_suspend, isl29028_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(isl29028_suspend, isl29028_resume, NULL)
};
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index ea15bc1c300c..af3910bc1b4f 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -854,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum) / length;
+ statP->stddev = int_sqrt((long)sample_sum / length);
}
/**
@@ -1676,7 +1676,7 @@ static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
},
};
-static struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
+static const struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
[ALS] = {
.attrs = tsl2X7X_ALS_event_attrs,
.name = "events",
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 671dc9971610..b71fbd313778 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -6,22 +6,88 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/module.h>
-
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/spi/spi.h>
#include "meter.h"
-#include "ade7753.h"
+
+#define ADE7753_WAVEFORM 0x01
+#define ADE7753_AENERGY 0x02
+#define ADE7753_RAENERGY 0x03
+#define ADE7753_LAENERGY 0x04
+#define ADE7753_VAENERGY 0x05
+#define ADE7753_RVAENERGY 0x06
+#define ADE7753_LVAENERGY 0x07
+#define ADE7753_LVARENERGY 0x08
+#define ADE7753_MODE 0x09
+#define ADE7753_IRQEN 0x0A
+#define ADE7753_STATUS 0x0B
+#define ADE7753_RSTSTATUS 0x0C
+#define ADE7753_CH1OS 0x0D
+#define ADE7753_CH2OS 0x0E
+#define ADE7753_GAIN 0x0F
+#define ADE7753_PHCAL 0x10
+#define ADE7753_APOS 0x11
+#define ADE7753_WGAIN 0x12
+#define ADE7753_WDIV 0x13
+#define ADE7753_CFNUM 0x14
+#define ADE7753_CFDEN 0x15
+#define ADE7753_IRMS 0x16
+#define ADE7753_VRMS 0x17
+#define ADE7753_IRMSOS 0x18
+#define ADE7753_VRMSOS 0x19
+#define ADE7753_VAGAIN 0x1A
+#define ADE7753_VADIV 0x1B
+#define ADE7753_LINECYC 0x1C
+#define ADE7753_ZXTOUT 0x1D
+#define ADE7753_SAGCYC 0x1E
+#define ADE7753_SAGLVL 0x1F
+#define ADE7753_IPKLVL 0x20
+#define ADE7753_VPKLVL 0x21
+#define ADE7753_IPEAK 0x22
+#define ADE7753_RSTIPEAK 0x23
+#define ADE7753_VPEAK 0x24
+#define ADE7753_RSTVPEAK 0x25
+#define ADE7753_TEMP 0x26
+#define ADE7753_PERIOD 0x27
+#define ADE7753_TMODE 0x3D
+#define ADE7753_CHKSUM 0x3E
+#define ADE7753_DIEREV 0x3F
+
+#define ADE7753_READ_REG(a) a
+#define ADE7753_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7753_MAX_TX 4
+#define ADE7753_MAX_RX 4
+#define ADE7753_STARTUP_DELAY 1000
+
+#define ADE7753_SPI_SLOW (u32)(300 * 1000)
+#define ADE7753_SPI_BURST (u32)(1000 * 1000)
+#define ADE7753_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct ade7753_state - device instance specific data
+ * @us: actual spi_device
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ * @buf_lock: mutex to protect tx and rx
+ **/
+struct ade7753_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7753_MAX_RX];
+};
static int ade7753_spi_write_reg_8(struct device *dev,
u8 reg_address,
diff --git a/drivers/staging/iio/meter/ade7753.h b/drivers/staging/iio/meter/ade7753.h
deleted file mode 100644
index bfe749156bce..000000000000
--- a/drivers/staging/iio/meter/ade7753.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#ifndef _ADE7753_H
-#define _ADE7753_H
-
-#define ADE7753_WAVEFORM 0x01
-#define ADE7753_AENERGY 0x02
-#define ADE7753_RAENERGY 0x03
-#define ADE7753_LAENERGY 0x04
-#define ADE7753_VAENERGY 0x05
-#define ADE7753_RVAENERGY 0x06
-#define ADE7753_LVAENERGY 0x07
-#define ADE7753_LVARENERGY 0x08
-#define ADE7753_MODE 0x09
-#define ADE7753_IRQEN 0x0A
-#define ADE7753_STATUS 0x0B
-#define ADE7753_RSTSTATUS 0x0C
-#define ADE7753_CH1OS 0x0D
-#define ADE7753_CH2OS 0x0E
-#define ADE7753_GAIN 0x0F
-#define ADE7753_PHCAL 0x10
-#define ADE7753_APOS 0x11
-#define ADE7753_WGAIN 0x12
-#define ADE7753_WDIV 0x13
-#define ADE7753_CFNUM 0x14
-#define ADE7753_CFDEN 0x15
-#define ADE7753_IRMS 0x16
-#define ADE7753_VRMS 0x17
-#define ADE7753_IRMSOS 0x18
-#define ADE7753_VRMSOS 0x19
-#define ADE7753_VAGAIN 0x1A
-#define ADE7753_VADIV 0x1B
-#define ADE7753_LINECYC 0x1C
-#define ADE7753_ZXTOUT 0x1D
-#define ADE7753_SAGCYC 0x1E
-#define ADE7753_SAGLVL 0x1F
-#define ADE7753_IPKLVL 0x20
-#define ADE7753_VPKLVL 0x21
-#define ADE7753_IPEAK 0x22
-#define ADE7753_RSTIPEAK 0x23
-#define ADE7753_VPEAK 0x24
-#define ADE7753_RSTVPEAK 0x25
-#define ADE7753_TEMP 0x26
-#define ADE7753_PERIOD 0x27
-#define ADE7753_TMODE 0x3D
-#define ADE7753_CHKSUM 0x3E
-#define ADE7753_DIEREV 0x3F
-
-#define ADE7753_READ_REG(a) a
-#define ADE7753_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7753_MAX_TX 4
-#define ADE7753_MAX_RX 4
-#define ADE7753_STARTUP_DELAY 1000
-
-#define ADE7753_SPI_SLOW (u32)(300 * 1000)
-#define ADE7753_SPI_BURST (u32)(1000 * 1000)
-#define ADE7753_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7753_state - device instance specific data
- * @us: actual spi_device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct ade7753_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7753_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 024463a11c47..32dc50341746 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -6,34 +6,126 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "meter.h"
-#include "ade7754.h"
-static int ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
+#define ADE7754_AENERGY 0x01
+#define ADE7754_RAENERGY 0x02
+#define ADE7754_LAENERGY 0x03
+#define ADE7754_VAENERGY 0x04
+#define ADE7754_RVAENERGY 0x05
+#define ADE7754_LVAENERGY 0x06
+#define ADE7754_PERIOD 0x07
+#define ADE7754_TEMP 0x08
+#define ADE7754_WFORM 0x09
+#define ADE7754_OPMODE 0x0A
+#define ADE7754_MMODE 0x0B
+#define ADE7754_WAVMODE 0x0C
+#define ADE7754_WATMODE 0x0D
+#define ADE7754_VAMODE 0x0E
+#define ADE7754_IRQEN 0x0F
+#define ADE7754_STATUS 0x10
+#define ADE7754_RSTATUS 0x11
+#define ADE7754_ZXTOUT 0x12
+#define ADE7754_LINCYC 0x13
+#define ADE7754_SAGCYC 0x14
+#define ADE7754_SAGLVL 0x15
+#define ADE7754_VPEAK 0x16
+#define ADE7754_IPEAK 0x17
+#define ADE7754_GAIN 0x18
+#define ADE7754_AWG 0x19
+#define ADE7754_BWG 0x1A
+#define ADE7754_CWG 0x1B
+#define ADE7754_AVAG 0x1C
+#define ADE7754_BVAG 0x1D
+#define ADE7754_CVAG 0x1E
+#define ADE7754_APHCAL 0x1F
+#define ADE7754_BPHCAL 0x20
+#define ADE7754_CPHCAL 0x21
+#define ADE7754_AAPOS 0x22
+#define ADE7754_BAPOS 0x23
+#define ADE7754_CAPOS 0x24
+#define ADE7754_CFNUM 0x25
+#define ADE7754_CFDEN 0x26
+#define ADE7754_WDIV 0x27
+#define ADE7754_VADIV 0x28
+#define ADE7754_AIRMS 0x29
+#define ADE7754_BIRMS 0x2A
+#define ADE7754_CIRMS 0x2B
+#define ADE7754_AVRMS 0x2C
+#define ADE7754_BVRMS 0x2D
+#define ADE7754_CVRMS 0x2E
+#define ADE7754_AIRMSOS 0x2F
+#define ADE7754_BIRMSOS 0x30
+#define ADE7754_CIRMSOS 0x31
+#define ADE7754_AVRMSOS 0x32
+#define ADE7754_BVRMSOS 0x33
+#define ADE7754_CVRMSOS 0x34
+#define ADE7754_AAPGAIN 0x35
+#define ADE7754_BAPGAIN 0x36
+#define ADE7754_CAPGAIN 0x37
+#define ADE7754_AVGAIN 0x38
+#define ADE7754_BVGAIN 0x39
+#define ADE7754_CVGAIN 0x3A
+#define ADE7754_CHKSUM 0x3E
+#define ADE7754_VERSION 0x3F
+
+#define ADE7754_READ_REG(a) a
+#define ADE7754_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7754_MAX_TX 4
+#define ADE7754_MAX_RX 4
+#define ADE7754_STARTUP_DELAY 1000
+
+#define ADE7754_SPI_SLOW (u32)(300 * 1000)
+#define ADE7754_SPI_BURST (u32)(1000 * 1000)
+#define ADE7754_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct ade7754_state - device instance specific data
+ * @us: actual spi_device
+ * @buf_lock: mutex to protect tx, rx and write frequency
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ **/
+struct ade7754_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7754_MAX_RX];
+};
+
+/* Unlocked version of ade7754_spi_write_reg_8 function */
+static int __ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
{
- int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7754_state *st = iio_priv(indio_dev);
- mutex_lock(&st->buf_lock);
st->tx[0] = ADE7754_WRITE_REG(reg_address);
st->tx[1] = val;
+ return spi_write(st->us, st->tx, 2);
+}
+
+static int ade7754_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
- ret = spi_write(st->us, st->tx, 2);
+ mutex_lock(&st->buf_lock);
+ ret = __ade7754_spi_write_reg_8(dev, reg_address, val);
mutex_unlock(&st->buf_lock);
return ret;
@@ -349,9 +441,7 @@ static int ade7754_set_irq(struct device *dev, bool enable)
else
irqen &= ~BIT(14);
- ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
-
- return ret;
+ return ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
}
/* Power down the device */
@@ -430,7 +520,7 @@ static ssize_t ade7754_write_frequency(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->buf_lock);
t = 26000 / val;
if (t > 0)
@@ -448,10 +538,10 @@ static ssize_t ade7754_write_frequency(struct device *dev,
reg &= ~(3 << 3);
reg |= t << 3;
- ret = ade7754_spi_write_reg_8(dev, ADE7754_WAVMODE, reg);
+ ret = __ade7754_spi_write_reg_8(dev, ADE7754_WAVMODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
diff --git a/drivers/staging/iio/meter/ade7754.h b/drivers/staging/iio/meter/ade7754.h
deleted file mode 100644
index 28f71c2cde0c..000000000000
--- a/drivers/staging/iio/meter/ade7754.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _ADE7754_H
-#define _ADE7754_H
-
-#define ADE7754_AENERGY 0x01
-#define ADE7754_RAENERGY 0x02
-#define ADE7754_LAENERGY 0x03
-#define ADE7754_VAENERGY 0x04
-#define ADE7754_RVAENERGY 0x05
-#define ADE7754_LVAENERGY 0x06
-#define ADE7754_PERIOD 0x07
-#define ADE7754_TEMP 0x08
-#define ADE7754_WFORM 0x09
-#define ADE7754_OPMODE 0x0A
-#define ADE7754_MMODE 0x0B
-#define ADE7754_WAVMODE 0x0C
-#define ADE7754_WATMODE 0x0D
-#define ADE7754_VAMODE 0x0E
-#define ADE7754_IRQEN 0x0F
-#define ADE7754_STATUS 0x10
-#define ADE7754_RSTATUS 0x11
-#define ADE7754_ZXTOUT 0x12
-#define ADE7754_LINCYC 0x13
-#define ADE7754_SAGCYC 0x14
-#define ADE7754_SAGLVL 0x15
-#define ADE7754_VPEAK 0x16
-#define ADE7754_IPEAK 0x17
-#define ADE7754_GAIN 0x18
-#define ADE7754_AWG 0x19
-#define ADE7754_BWG 0x1A
-#define ADE7754_CWG 0x1B
-#define ADE7754_AVAG 0x1C
-#define ADE7754_BVAG 0x1D
-#define ADE7754_CVAG 0x1E
-#define ADE7754_APHCAL 0x1F
-#define ADE7754_BPHCAL 0x20
-#define ADE7754_CPHCAL 0x21
-#define ADE7754_AAPOS 0x22
-#define ADE7754_BAPOS 0x23
-#define ADE7754_CAPOS 0x24
-#define ADE7754_CFNUM 0x25
-#define ADE7754_CFDEN 0x26
-#define ADE7754_WDIV 0x27
-#define ADE7754_VADIV 0x28
-#define ADE7754_AIRMS 0x29
-#define ADE7754_BIRMS 0x2A
-#define ADE7754_CIRMS 0x2B
-#define ADE7754_AVRMS 0x2C
-#define ADE7754_BVRMS 0x2D
-#define ADE7754_CVRMS 0x2E
-#define ADE7754_AIRMSOS 0x2F
-#define ADE7754_BIRMSOS 0x30
-#define ADE7754_CIRMSOS 0x31
-#define ADE7754_AVRMSOS 0x32
-#define ADE7754_BVRMSOS 0x33
-#define ADE7754_CVRMSOS 0x34
-#define ADE7754_AAPGAIN 0x35
-#define ADE7754_BAPGAIN 0x36
-#define ADE7754_CAPGAIN 0x37
-#define ADE7754_AVGAIN 0x38
-#define ADE7754_BVGAIN 0x39
-#define ADE7754_CVGAIN 0x3A
-#define ADE7754_CHKSUM 0x3E
-#define ADE7754_VERSION 0x3F
-
-#define ADE7754_READ_REG(a) a
-#define ADE7754_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7754_MAX_TX 4
-#define ADE7754_MAX_RX 4
-#define ADE7754_STARTUP_DELAY 1000
-
-#define ADE7754_SPI_SLOW (u32)(300 * 1000)
-#define ADE7754_SPI_BURST (u32)(1000 * 1000)
-#define ADE7754_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7754_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7754_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7754_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 944ee3401029..1691760339da 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -21,7 +21,55 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "meter.h"
-#include "ade7759.h"
+
+#define ADE7759_WAVEFORM 0x01
+#define ADE7759_AENERGY 0x02
+#define ADE7759_RSTENERGY 0x03
+#define ADE7759_STATUS 0x04
+#define ADE7759_RSTSTATUS 0x05
+#define ADE7759_MODE 0x06
+#define ADE7759_CFDEN 0x07
+#define ADE7759_CH1OS 0x08
+#define ADE7759_CH2OS 0x09
+#define ADE7759_GAIN 0x0A
+#define ADE7759_APGAIN 0x0B
+#define ADE7759_PHCAL 0x0C
+#define ADE7759_APOS 0x0D
+#define ADE7759_ZXTOUT 0x0E
+#define ADE7759_SAGCYC 0x0F
+#define ADE7759_IRQEN 0x10
+#define ADE7759_SAGLVL 0x11
+#define ADE7759_TEMP 0x12
+#define ADE7759_LINECYC 0x13
+#define ADE7759_LENERGY 0x14
+#define ADE7759_CFNUM 0x15
+#define ADE7759_CHKSUM 0x1E
+#define ADE7759_DIEREV 0x1F
+
+#define ADE7759_READ_REG(a) a
+#define ADE7759_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7759_MAX_TX 6
+#define ADE7759_MAX_RX 6
+#define ADE7759_STARTUP_DELAY 1000
+
+#define ADE7759_SPI_SLOW (u32)(300 * 1000)
+#define ADE7759_SPI_BURST (u32)(1000 * 1000)
+#define ADE7759_SPI_FAST (u32)(2000 * 1000)
+
+/**
+ * struct ade7759_state - device instance specific data
+ * @us: actual spi_device
+ * @buf_lock: mutex to protect tx and rx
+ * @tx: transmit buffer
+ * @rx: receive buffer
+ **/
+struct ade7759_state {
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7759_MAX_RX];
+};
static int ade7759_spi_write_reg_8(struct device *dev,
u8 reg_address,
@@ -231,49 +279,49 @@ static int ade7759_reset(struct device *dev)
}
static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
-static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFDEN(0644,
ade7759_read_16bit,
ade7759_write_16bit,
ADE7759_CFDEN);
-static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CFNUM(0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_CFNUM);
static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM);
-static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_PHCAL(0644,
ade7759_read_16bit,
ade7759_write_16bit,
ADE7759_PHCAL);
-static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_APOS(0644,
ade7759_read_16bit,
ade7759_write_16bit,
ADE7759_APOS);
-static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAGCYC(0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_SAGCYC);
-static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAGLVL(0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_SAGLVL);
-static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_LINECYC(0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_LINECYC);
static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY);
-static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_PGA_GAIN(0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_GAIN);
-static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(0644,
ade7759_read_16bit,
ade7759_write_16bit,
ADE7759_APGAIN);
-static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CH_OFF(1, 0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_CH1OS);
-static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_CH_OFF(2, 0644,
ade7759_read_8bit,
ade7759_write_8bit,
ADE7759_CH2OS);
@@ -410,7 +458,7 @@ static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
static IIO_CONST_ATTR(in_temp_offset, "70 C");
static IIO_CONST_ATTR(in_temp_scale, "1 C");
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+static IIO_DEV_ATTR_SAMP_FREQ(0644,
ade7759_read_frequency,
ade7759_write_frequency);
diff --git a/drivers/staging/iio/meter/ade7759.h b/drivers/staging/iio/meter/ade7759.h
deleted file mode 100644
index f0716d2fdf8e..000000000000
--- a/drivers/staging/iio/meter/ade7759.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ADE7759_H
-#define _ADE7759_H
-
-#define ADE7759_WAVEFORM 0x01
-#define ADE7759_AENERGY 0x02
-#define ADE7759_RSTENERGY 0x03
-#define ADE7759_STATUS 0x04
-#define ADE7759_RSTSTATUS 0x05
-#define ADE7759_MODE 0x06
-#define ADE7759_CFDEN 0x07
-#define ADE7759_CH1OS 0x08
-#define ADE7759_CH2OS 0x09
-#define ADE7759_GAIN 0x0A
-#define ADE7759_APGAIN 0x0B
-#define ADE7759_PHCAL 0x0C
-#define ADE7759_APOS 0x0D
-#define ADE7759_ZXTOUT 0x0E
-#define ADE7759_SAGCYC 0x0F
-#define ADE7759_IRQEN 0x10
-#define ADE7759_SAGLVL 0x11
-#define ADE7759_TEMP 0x12
-#define ADE7759_LINECYC 0x13
-#define ADE7759_LENERGY 0x14
-#define ADE7759_CFNUM 0x15
-#define ADE7759_CHKSUM 0x1E
-#define ADE7759_DIEREV 0x1F
-
-#define ADE7759_READ_REG(a) a
-#define ADE7759_WRITE_REG(a) ((a) | 0x80)
-
-#define ADE7759_MAX_TX 6
-#define ADE7759_MAX_RX 6
-#define ADE7759_STARTUP_DELAY 1000
-
-#define ADE7759_SPI_SLOW (u32)(300 * 1000)
-#define ADE7759_SPI_BURST (u32)(1000 * 1000)
-#define ADE7759_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct ade7759_state - device instance specific data
- * @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
- * @tx: transmit buffer
- * @rx: receive buffer
- **/
-struct ade7759_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
- u8 rx[ADE7759_MAX_RX];
-};
-
-#endif
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index e8007f0c5186..c6cffc11b0ba 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -426,9 +426,7 @@ static int ade7854_set_irq(struct device *dev, bool enable)
else
irqen &= ~BIT(17);
- ret = st->write_reg_32(dev, ADE7854_MASK0, irqen);
-
- return ret;
+ return st->write_reg_32(dev, ADE7854_MASK0, irqen);
}
static int ade7854_initial_setup(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
index dfba510f29be..0e37f23853f1 100644
--- a/drivers/staging/iio/meter/meter.h
+++ b/drivers/staging/iio/meter/meter.h
@@ -81,94 +81,94 @@
IIO_DEVICE_ATTR(reactive_power_c_gain, _mode, _show, _store, _addr)
#define IIO_DEV_ATTR_CURRENT_A(_show, _addr) \
- IIO_DEVICE_ATTR(current_a, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(current_a, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CURRENT_B(_show, _addr) \
- IIO_DEVICE_ATTR(current_b, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(current_b, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CURRENT_C(_show, _addr) \
- IIO_DEVICE_ATTR(current_c, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(current_c, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_VOLT_A(_show, _addr) \
- IIO_DEVICE_ATTR(volt_a, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(volt_a, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_VOLT_B(_show, _addr) \
- IIO_DEVICE_ATTR(volt_b, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(volt_b, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_VOLT_C(_show, _addr) \
- IIO_DEVICE_ATTR(volt_c, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(volt_c, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_AENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(aenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(aenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_LENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(lenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_RAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(raenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(raenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_LAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(laenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(laenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_VAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(vaenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(vaenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_LVAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lvaenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(lvaenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_RVAENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(rvaenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(rvaenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_LVARENERGY(_show, _addr) \
- IIO_DEVICE_ATTR(lvarenergy, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(lvarenergy, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CHKSUM(_show, _addr) \
- IIO_DEVICE_ATTR(chksum, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(chksum, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_ANGLE0(_show, _addr) \
- IIO_DEVICE_ATTR(angle0, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(angle0, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_ANGLE1(_show, _addr) \
- IIO_DEVICE_ATTR(angle1, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(angle1, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_ANGLE2(_show, _addr) \
- IIO_DEVICE_ATTR(angle2, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(angle2, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_AWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(awatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(awatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_BWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(bwatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(bwatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(cwatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(cwatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_AFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(afwatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(afwatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_BFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(bfwatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(bfwatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CFWATTHR(_show, _addr) \
- IIO_DEVICE_ATTR(cfwatthr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(cfwatthr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_AVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(avarhr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(avarhr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_BVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(bvarhr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(bvarhr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CVARHR(_show, _addr) \
- IIO_DEVICE_ATTR(cvarhr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(cvarhr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_AVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(avahr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(avahr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_BVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(bvahr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(bvahr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_CVAHR(_show, _addr) \
- IIO_DEVICE_ATTR(cvahr, S_IRUGO, _show, NULL, _addr)
+ IIO_DEVICE_ATTR(cvahr, 0444, _show, NULL, _addr)
#define IIO_DEV_ATTR_IOS(_mode, _show, _store, _addr) \
IIO_DEVICE_ATTR(ios, _mode, _show, _store, _addr)
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 82b2d88ca942..a37e199225f4 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -97,7 +97,7 @@ static const struct iio_chan_spec ad2s1200_channels[] = {
};
static const struct iio_info ad2s1200_info = {
- .read_raw = &ad2s1200_read_raw,
+ .read_raw = ad2s1200_read_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6b992634f009..a6a8393d6664 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -490,8 +490,8 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
ad2s1210_set_mode(MOD_VEL, st);
break;
default:
- ret = -EINVAL;
- break;
+ ret = -EINVAL;
+ break;
}
if (ret < 0)
goto error_ret;
@@ -531,36 +531,36 @@ error_ret:
return ret;
}
-static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(fclkin, 0644,
ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
-static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(fexcit, 0644,
ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
-static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(control, 0644,
ad2s1210_show_control, ad2s1210_store_control, 0);
-static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(bits, 0644,
ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
-static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(fault, 0644,
ad2s1210_show_fault, ad2s1210_clear_fault, 0);
-static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(los_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOS_THRD);
-static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(dos_ovr_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_DOS_OVR_THRD);
-static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(dos_mis_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_DOS_MIS_THRD);
-static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(dos_rst_max_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_DOS_RST_MAX_THRD);
-static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(dos_rst_min_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_DOS_RST_MIN_THRD);
-static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(lot_high_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOT_HIGH_THRD);
-static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
+static IIO_DEVICE_ATTR(lot_low_thrd, 0644,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOT_LOW_THRD);
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 5b1c0db33e7f..b2270908f26f 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -47,7 +47,7 @@ error_ret:
}
static const struct iio_info ad2s90_info = {
- .read_raw = &ad2s90_read_raw,
+ .read_raw = ad2s90_read_raw,
.driver_module = THIS_MODULE,
};
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
index 2938d35be5bb..d393ca58e231 100644
--- a/drivers/staging/ks7010/TODO
+++ b/drivers/staging/ks7010/TODO
@@ -18,8 +18,6 @@ First a few words what not to do (at least not blindly):
- don't be overly strict with the 80 char limit. Only if it REALLY makes the
code more readable
-- No '#if 0/1' removal unless the surrounding code is understood and removal is
- really OK. There might be some hints hidden there.
Now the TODOs:
@@ -29,6 +27,8 @@ Now the TODOs:
- fix the 'card removal' event when card is inserted when booting
- check what other upstream wireless mechanisms can be used instead of the
custom ones here
+- replace custom Michael MIC implementation with the kernel
+ implementation. This task is only required for a *clean* WEXT interface.
Please send any patches to:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
index df7f760e4110..b2d25ef1cd6b 100644
--- a/drivers/staging/ks7010/eap_packet.h
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -9,6 +9,8 @@
#define ETH_ALEN 6
#endif
+#define ETHER_HDR_SIZE 20
+
struct ether_hdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
@@ -58,12 +60,15 @@ struct ieee802_1x_eapol_key {
* encrypt the Key field; 64-bit NTP timestamp MAY be used here
*/
unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
- unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random number */
+ unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random
+ * number
+ */
unsigned char key_index; /*
* key flag in the most significant bit:
* 0 = broadcast (default key),
- * 1 = unicast (key mapping key); key index is in the
- * 7 least significant bits
+ * 1 = unicast (key mapping key);
+ * key index is in the 7 least
+ * significant bits
*/
/*
* HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 6f9f746a3a61..c325f4846209 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -32,8 +32,6 @@ static const struct sdio_device_id ks7010_sdio_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
-/* macro */
-
#define inc_txqhead(priv) \
(priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
#define inc_txqtail(priv) \
@@ -48,48 +46,51 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
#define cnt_rxqbody(priv) \
(((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
+/* Read single byte from device address into byte (CMD52) */
+static int ks7010_sdio_readb(struct ks_wlan_private *priv, unsigned int address,
+ unsigned char *byte)
+{
+ struct sdio_func *func = priv->ks_sdio_card->func;
+ int ret;
+
+ *byte = sdio_readb(func, address, &ret);
+
+ return ret;
+}
+
+/* Read length bytes from device address into buffer (CMD53) */
static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
unsigned char *buffer, int length)
{
- struct ks_sdio_card *card;
- int rc;
+ struct sdio_func *func = priv->ks_sdio_card->func;
- card = priv->ks_wlan_hw.sdio_card;
+ return sdio_memcpy_fromio(func, buffer, address, length);
+}
- if (length == 1) /* CMD52 */
- *buffer = sdio_readb(card->func, address, &rc);
- else /* CMD53 multi-block transfer */
- rc = sdio_memcpy_fromio(card->func, buffer, address, length);
+/* Write single byte to device address (CMD52) */
+static int ks7010_sdio_writeb(struct ks_wlan_private *priv,
+ unsigned int address, unsigned char byte)
+{
+ struct sdio_func *func = priv->ks_sdio_card->func;
+ int ret;
- if (rc != 0)
- DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+ sdio_writeb(func, byte, address, &ret);
- return rc;
+ return ret;
}
+/* Write length bytes to device address from buffer (CMD53) */
static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
unsigned char *buffer, int length)
{
- struct ks_sdio_card *card;
- int rc;
-
- card = priv->ks_wlan_hw.sdio_card;
-
- if (length == 1) /* CMD52 */
- sdio_writeb(card->func, *buffer, address, &rc);
- else /* CMD53 */
- rc = sdio_memcpy_toio(card->func, address, buffer, length);
-
- if (rc != 0)
- DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+ struct sdio_func *func = priv->ks_sdio_card->func;
- return rc;
+ return sdio_memcpy_toio(func, address, buffer, length);
}
static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
{
- unsigned char rw_data;
- int retval;
+ int ret;
DPRINTK(4, "\n");
@@ -97,14 +98,11 @@ static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
atomic_set(&priv->sleepstatus.doze_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 0) {
- rw_data = GCR_B_DOZE;
- retval =
- ks7010_sdio_write(priv, GCR_B, &rw_data, sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
- goto out;
+ ret = ks7010_sdio_writeb(priv, GCR_B, GCR_B_DOZE);
+ if (ret) {
+ DPRINTK(1, " error : GCR_B\n");
+ goto set_sleep_mode;
}
- DPRINTK(4, "PMG SET!! : GCR_B=%02X\n", rw_data);
DPRINTK(3, "sleep_mode=SLP_SLEEP\n");
atomic_set(&priv->sleepstatus.status, 1);
priv->last_doze = jiffies;
@@ -112,14 +110,13 @@ static void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
}
- out:
+set_sleep_mode:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
}
static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
{
- unsigned char rw_data;
- int retval;
+ int ret;
DPRINTK(4, "\n");
@@ -127,14 +124,12 @@ static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
atomic_set(&priv->sleepstatus.wakeup_request, 0);
if (atomic_read(&priv->sleepstatus.status) == 1) {
- rw_data = WAKEUP_REQ;
- retval =
- ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
- goto out;
+ ret = ks7010_sdio_writeb(priv, WAKEUP, WAKEUP_REQ);
+ if (ret) {
+ DPRINTK(1, " error : WAKEUP\n");
+ goto set_sleep_mode;
}
- DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+ DPRINTK(4, "wake up : WAKEUP\n");
atomic_set(&priv->sleepstatus.status, 0);
priv->last_wakeup = jiffies;
++priv->wakeup_count;
@@ -142,24 +137,22 @@ static void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
}
- out:
+set_sleep_mode:
priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
}
void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
{
- unsigned char rw_data;
- int retval;
+ int ret;
DPRINTK(4, "\n");
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- rw_data = WAKEUP_REQ;
- retval =
- ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
- if (retval)
- DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+ ret = ks7010_sdio_writeb(priv, WAKEUP, WAKEUP_REQ);
+ if (ret)
+ DPRINTK(1, " error : WAKEUP\n");
+ else
+ DPRINTK(4, "wake up : WAKEUP\n");
- DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
priv->last_wakeup = jiffies;
++priv->wakeup_count;
} else {
@@ -168,131 +161,110 @@ void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
}
}
-static int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+static void _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
- unsigned char rw_data;
- int retval;
+ unsigned char byte;
+ int ret;
- if (priv->reg.powermgt == POWMGT_ACTIVE_MODE)
- return 0;
+ if (priv->reg.power_mgmt == POWER_MGMT_ACTIVE)
+ return;
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
- (priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
- if (priv->dev_state == DEVICE_STATE_SLEEP) {
- switch (atomic_read(&priv->psstatus.status)) {
- case PS_SNOOZE: /* 4 */
- break;
- default:
- DPRINTK(5, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
- atomic_read(&priv->psstatus.status),
- atomic_read(&priv->psstatus.confirm_wait),
- atomic_read(&priv->psstatus.snooze_guard),
- cnt_txqbody(priv));
-
- if (!atomic_read(&priv->psstatus.confirm_wait)
- && !atomic_read(&priv->psstatus.snooze_guard)
- && !cnt_txqbody(priv)) {
- retval =
- ks7010_sdio_read(priv, INT_PENDING,
- &rw_data,
- sizeof(rw_data));
- if (retval) {
- DPRINTK(1,
- " error : INT_PENDING=%02X\n",
- rw_data);
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
- break;
- }
- if (!rw_data) {
- rw_data = GCR_B_DOZE;
- retval =
- ks7010_sdio_write(priv,
- GCR_B,
- &rw_data,
- sizeof(rw_data));
- if (retval) {
- DPRINTK(1,
- " error : GCR_B=%02X\n",
- rw_data);
- queue_delayed_work
- (priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
- break;
- }
- DPRINTK(4,
- "PMG SET!! : GCR_B=%02X\n",
- rw_data);
- atomic_set(&priv->psstatus.
- status, PS_SNOOZE);
- DPRINTK(3,
- "psstatus.status=PS_SNOOZE\n");
- } else {
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
- }
- } else {
- queue_delayed_work(priv->ks_wlan_hw.
- ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq,
- 0);
- }
- break;
- }
- }
+ if (priv->reg.operation_mode != MODE_INFRASTRUCTURE)
+ return;
+
+ if (!is_connect_status(priv->connect_status))
+ return;
+
+ if (priv->dev_state != DEVICE_STATE_SLEEP)
+ return;
+
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE)
+ return;
+
+ DPRINTK(5, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
+ atomic_read(&priv->psstatus.status),
+ atomic_read(&priv->psstatus.confirm_wait),
+ atomic_read(&priv->psstatus.snooze_guard),
+ cnt_txqbody(priv));
+
+ if (atomic_read(&priv->psstatus.confirm_wait) ||
+ atomic_read(&priv->psstatus.snooze_guard) ||
+ cnt_txqbody(priv)) {
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
+ return;
}
- return 0;
+ ret = ks7010_sdio_readb(priv, INT_PENDING, &byte);
+ if (ret) {
+ DPRINTK(1, " error : INT_PENDING\n");
+ goto queue_delayed_work;
+ }
+ if (byte)
+ goto queue_delayed_work;
+
+ ret = ks7010_sdio_writeb(priv, GCR_B, GCR_B_DOZE);
+ if (ret) {
+ DPRINTK(1, " error : GCR_B\n");
+ goto queue_delayed_work;
+ }
+ atomic_set(&priv->psstatus.status, PS_SNOOZE);
+ DPRINTK(3, "psstatus.status=PS_SNOOZE\n");
+
+ return;
+
+queue_delayed_work:
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
{
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return 0;
}
static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
unsigned long size,
- void (*complete_handler)(void *arg1, void *arg2),
- void *arg1, void *arg2)
+ void (*complete_handler)(struct ks_wlan_private *priv,
+ struct sk_buff *skb),
+ struct sk_buff *skb)
{
struct tx_device_buffer *sp;
+ int ret;
if (priv->dev_state < DEVICE_STATE_BOOT) {
- kfree(p);
- if (complete_handler)
- (*complete_handler) (arg1, arg2);
- return 1;
+ ret = -EPERM;
+ goto err_complete;
}
if ((TX_DEVICE_BUFF_SIZE - 1) <= cnt_txqbody(priv)) {
- /* in case of buffer overflow */
DPRINTK(1, "tx buffer overflow\n");
- kfree(p);
- if (complete_handler)
- (*complete_handler) (arg1, arg2);
- return 1;
+ ret = -EOVERFLOW;
+ goto err_complete;
}
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
sp->sendp = p;
sp->size = size;
sp->complete_handler = complete_handler;
- sp->arg1 = arg1;
- sp->arg2 = arg2;
+ sp->skb = skb;
inc_txqtail(priv);
return 0;
+
+err_complete:
+ kfree(p);
+ if (complete_handler)
+ (*complete_handler)(priv, skb);
+
+ return ret;
}
/* write data */
static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
unsigned long size)
{
- int retval;
- unsigned char rw_data;
struct hostif_hdr *hdr;
+ int ret;
hdr = (struct hostif_hdr *)buffer;
@@ -302,59 +274,53 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
return 0;
}
- retval = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
- if (retval) {
- DPRINTK(1, " write error : retval=%d\n", retval);
- return -4;
+ ret = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
+ if (ret) {
+ DPRINTK(1, " write error : retval=%d\n", ret);
+ return ret;
}
- rw_data = WRITE_STATUS_BUSY;
- retval =
- ks7010_sdio_write(priv, WRITE_STATUS, &rw_data, sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : WRITE_STATUS=%02X\n", rw_data);
- return -3;
+ ret = ks7010_sdio_writeb(priv, WRITE_STATUS, REG_STATUS_BUSY);
+ if (ret) {
+ DPRINTK(1, " error : WRITE_STATUS\n");
+ return ret;
}
return 0;
}
-static void tx_device_task(void *dev)
+static void tx_device_task(struct ks_wlan_private *priv)
{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
struct tx_device_buffer *sp;
- int rc = 0;
+ int ret;
DPRINTK(4, "\n");
- if (cnt_txqbody(priv) > 0
- && atomic_read(&priv->psstatus.status) != PS_SNOOZE) {
- sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- if (priv->dev_state >= DEVICE_STATE_BOOT) {
- rc = write_to_device(priv, sp->sendp, sp->size);
- if (rc) {
- DPRINTK(1, "write_to_device error !!(%d)\n",
- rc);
- queue_delayed_work(priv->ks_wlan_hw.
- ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
- return;
- }
- }
- kfree(sp->sendp); /* allocated memory free */
- if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler) (sp->arg1, sp->arg2);
- inc_txqhead(priv);
+ if (cnt_txqbody(priv) <= 0 ||
+ atomic_read(&priv->psstatus.status) == PS_SNOOZE)
+ return;
- if (cnt_txqbody(priv) > 0) {
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 0);
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ ret = write_to_device(priv, sp->sendp, sp->size);
+ if (ret) {
+ DPRINTK(1, "write_to_device error !!(%d)\n", ret);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
+ return;
}
}
+ kfree(sp->sendp);
+ if (sp->complete_handler) /* TX Complete */
+ (*sp->complete_handler)(priv, sp->skb);
+ inc_txqhead(priv);
+
+ if (cnt_txqbody(priv) > 0)
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
}
int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
- void (*complete_handler)(void *arg1, void *arg2),
- void *arg1, void *arg2)
+ void (*complete_handler)(struct ks_wlan_private *priv,
+ struct sk_buff *skb),
+ struct sk_buff *skb)
{
int result = 0;
struct hostif_hdr *hdr;
@@ -372,13 +338,12 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
DPRINTK(4, "event=%04X\n", hdr->event);
spin_lock(&priv->tx_dev.tx_dev_lock);
- result = enqueue_txdev(priv, p, size, complete_handler, arg1, arg2);
+ result = enqueue_txdev(priv, p, size, complete_handler, skb);
spin_unlock(&priv->tx_dev.tx_dev_lock);
- if (cnt_txqbody(priv) > 0) {
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 0);
- }
+ if (cnt_txqbody(priv) > 0)
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
+
return result;
}
@@ -395,34 +360,30 @@ static void rx_event_task(unsigned long dev)
inc_rxqhead(priv);
if (cnt_rxqbody(priv) > 0)
- tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+ tasklet_schedule(&priv->rx_bh_task);
}
}
-static void ks_wlan_hw_rx(void *dev, uint16_t size)
+static void ks_wlan_hw_rx(struct ks_wlan_private *priv, uint16_t size)
{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
- int retval;
+ int ret;
struct rx_device_buffer *rx_buffer;
struct hostif_hdr *hdr;
- unsigned char read_status;
unsigned short event = 0;
DPRINTK(4, "\n");
/* receive data */
if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
- /* in case of buffer overflow */
DPRINTK(1, "rx buffer overflow\n");
- goto error_out;
+ return;
}
rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
- retval =
- ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
- hif_align_size(size));
- if (retval)
- goto error_out;
+ ret = ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
+ hif_align_size(size));
+ if (ret)
+ return;
/* length check */
if (size > 2046 || size == 0) {
@@ -432,15 +393,12 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
DUMP_PREFIX_OFFSET,
rx_buffer->data, 32);
#endif
- /* rx_status update */
- read_status = READ_STATUS_IDLE;
- retval =
- ks7010_sdio_write(priv, READ_STATUS, &read_status,
- sizeof(read_status));
- if (retval)
- DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
-
- goto error_out;
+ ret = ks7010_sdio_writeb(priv, READ_STATUS, REG_STATUS_IDLE);
+ if (ret)
+ DPRINTK(1, " error : READ_STATUS\n");
+
+ /* length check fail */
+ return;
}
hdr = (struct hostif_hdr *)&rx_buffer->data[0];
@@ -448,15 +406,9 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
event = hdr->event;
inc_rxqtail(priv);
- /* read status update */
- read_status = READ_STATUS_IDLE;
- retval =
- ks7010_sdio_write(priv, READ_STATUS, &read_status,
- sizeof(read_status));
- if (retval)
- DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
-
- DPRINTK(4, "READ_STATUS=%02X\n", read_status);
+ ret = ks7010_sdio_writeb(priv, READ_STATUS, REG_STATUS_IDLE);
+ if (ret)
+ DPRINTK(1, " error : READ_STATUS\n");
if (atomic_read(&priv->psstatus.confirm_wait)) {
if (IS_HIF_CONF(event)) {
@@ -465,213 +417,162 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
}
}
- /* rx_event_task((void *)priv); */
- tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
-
- error_out:
- return;
+ tasklet_schedule(&priv->rx_bh_task);
}
static void ks7010_rw_function(struct work_struct *work)
{
- struct hw_info_t *hw;
struct ks_wlan_private *priv;
- unsigned char rw_data;
- int retval;
+ unsigned char byte;
+ int ret;
- hw = container_of(work, struct hw_info_t, rw_wq.work);
- priv = container_of(hw, struct ks_wlan_private, ks_wlan_hw);
+ priv = container_of(work, struct ks_wlan_private, rw_dwork.work);
DPRINTK(4, "\n");
- /* wiat after DOZE */
+ /* wait after DOZE */
if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
DPRINTK(4, "wait after DOZE\n");
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
return;
}
- /* wiat after WAKEUP */
+ /* wait after WAKEUP */
while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
DPRINTK(4, "wait after WAKEUP\n");
-/* queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,&priv->ks_wlan_hw.rw_wq,
- (priv->last_wakeup + ((30*HZ)/1000) - jiffies));*/
- dev_info(&priv->ks_wlan_hw.sdio_card->func->dev,
+ dev_info(&priv->ks_sdio_card->func->dev,
"wake: %lu %lu\n",
priv->last_wakeup + (30 * HZ) / 1000,
jiffies);
msleep(30);
}
- sdio_claim_host(priv->ks_wlan_hw.sdio_card->func);
+ sdio_claim_host(priv->ks_sdio_card->func);
/* power save wakeup */
if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
if (cnt_txqbody(priv) > 0) {
ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
- goto err_out;
+ goto release_host;
}
/* sleep mode doze */
if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
ks_wlan_hw_sleep_doze_request(priv);
- goto err_out;
+ goto release_host;
}
/* sleep mode wakeup */
if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
ks_wlan_hw_sleep_wakeup_request(priv);
- goto err_out;
+ goto release_host;
}
/* read (WriteStatus/ReadDataSize FN1:00_0014) */
- retval =
- ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data, sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : WSTATUS_RSIZE=%02X psstatus=%d\n", rw_data,
+ ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE, &byte);
+ if (ret) {
+ DPRINTK(1, " error : WSTATUS_RSIZE psstatus=%d\n",
atomic_read(&priv->psstatus.status));
- goto err_out;
+ goto release_host;
}
- DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", byte);
- if (rw_data & RSIZE_MASK) { /* Read schedule */
- ks_wlan_hw_rx((void *)priv,
- (uint16_t)((rw_data & RSIZE_MASK) << 4));
+ if (byte & RSIZE_MASK) { /* Read schedule */
+ ks_wlan_hw_rx(priv, (uint16_t)((byte & RSIZE_MASK) << 4));
}
- if ((rw_data & WSTATUS_MASK))
- tx_device_task((void *)priv);
+ if ((byte & WSTATUS_MASK))
+ tx_device_task(priv);
_ks_wlan_hw_power_save(priv);
- err_out:
- sdio_release_host(priv->ks_wlan_hw.sdio_card->func);
+release_host:
+ sdio_release_host(priv->ks_sdio_card->func);
}
static void ks_sdio_interrupt(struct sdio_func *func)
{
- int retval;
+ int ret;
struct ks_sdio_card *card;
struct ks_wlan_private *priv;
- unsigned char status, rsize, rw_data;
+ unsigned char status, rsize, byte;
card = sdio_get_drvdata(func);
priv = card->priv;
DPRINTK(4, "\n");
- if (priv->dev_state >= DEVICE_STATE_BOOT) {
- retval =
- ks7010_sdio_read(priv, INT_PENDING, &status,
- sizeof(status));
- if (retval) {
- DPRINTK(1, "read INT_PENDING Failed!!(%d)\n", retval);
- goto intr_out;
+ if (priv->dev_state < DEVICE_STATE_BOOT)
+ goto queue_delayed_work;
+
+ ret = ks7010_sdio_readb(priv, INT_PENDING, &status);
+ if (ret) {
+ DPRINTK(1, "error : INT_PENDING\n");
+ goto queue_delayed_work;
+ }
+ DPRINTK(4, "INT_PENDING=%02X\n", status);
+
+ /* schedule task for interrupt status */
+ /* bit7 -> Write General Communication B register */
+ /* read (General Communication B register) */
+ /* bit5 -> Write Status Idle */
+ /* bit2 -> Read Status Busy */
+ if (status & INT_GCR_B ||
+ atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ ret = ks7010_sdio_readb(priv, GCR_B, &byte);
+ if (ret) {
+ DPRINTK(1, " error : GCR_B\n");
+ goto queue_delayed_work;
}
- DPRINTK(4, "INT_PENDING=%02X\n", rw_data);
-
- /* schedule task for interrupt status */
- /* bit7 -> Write General Communication B register */
- /* read (General Communication B register) */
- /* bit5 -> Write Status Idle */
- /* bit2 -> Read Status Busy */
- if (status & INT_GCR_B
- || atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- retval =
- ks7010_sdio_read(priv, GCR_B, &rw_data,
- sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
- goto intr_out;
- }
- /* DPRINTK(1, "GCR_B=%02X\n", rw_data); */
- if (rw_data == GCR_B_ACTIVE) {
- if (atomic_read(&priv->psstatus.status) ==
- PS_SNOOZE) {
- atomic_set(&priv->psstatus.status,
- PS_WAKEUP);
- priv->wakeup_count = 0;
- }
- complete(&priv->psstatus.wakeup_wait);
+ if (byte == GCR_B_ACTIVE) {
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ atomic_set(&priv->psstatus.status, PS_WAKEUP);
+ priv->wakeup_count = 0;
}
+ complete(&priv->psstatus.wakeup_wait);
}
+ }
- do {
- /* read (WriteStatus/ReadDataSize FN1:00_0014) */
- retval =
- ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data,
- sizeof(rw_data));
- if (retval) {
- DPRINTK(1, " error : WSTATUS_RSIZE=%02X\n",
- rw_data);
- goto intr_out;
- }
- DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
- rsize = rw_data & RSIZE_MASK;
- if (rsize) { /* Read schedule */
- ks_wlan_hw_rx((void *)priv,
- (uint16_t)(rsize << 4));
- }
- if (rw_data & WSTATUS_MASK) {
-#if 0
- if (status & INT_WRITE_STATUS
- && !cnt_txqbody(priv)) {
- /* dummy write for interrupt clear */
- rw_data = 0;
- retval =
- ks7010_sdio_write(priv, DATA_WINDOW,
- &rw_data,
- sizeof(rw_data));
- if (retval) {
- DPRINTK(1,
- "write DATA_WINDOW Failed!!(%d)\n",
- retval);
- }
- status &= ~INT_WRITE_STATUS;
- } else {
-#endif
- if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
- if (cnt_txqbody(priv)) {
- ks_wlan_hw_wakeup_request(priv);
- queue_delayed_work
- (priv->ks_wlan_hw.
- ks7010sdio_wq,
- &priv->ks_wlan_hw.
- rw_wq, 1);
- return;
- }
- } else {
- tx_device_task((void *)priv);
- }
-#if 0
+ do {
+ /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+ ret = ks7010_sdio_readb(priv, WSTATUS_RSIZE, &byte);
+ if (ret) {
+ DPRINTK(1, " error : WSTATUS_RSIZE\n");
+ goto queue_delayed_work;
+ }
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", byte);
+ rsize = byte & RSIZE_MASK;
+ if (rsize != 0) /* Read schedule */
+ ks_wlan_hw_rx(priv, (uint16_t)(rsize << 4));
+
+ if (byte & WSTATUS_MASK) {
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ if (cnt_txqbody(priv)) {
+ ks_wlan_hw_wakeup_request(priv);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
+ return;
}
-#endif
+ } else {
+ tx_device_task(priv);
}
- } while (rsize);
- }
+ }
+ } while (rsize);
- intr_out:
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 0);
+queue_delayed_work:
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
}
static int trx_device_init(struct ks_wlan_private *priv)
{
- /* initialize values (tx) */
priv->tx_dev.qhead = 0;
priv->tx_dev.qtail = 0;
- /* initialize values (rx) */
priv->rx_dev.qhead = 0;
priv->rx_dev.qtail = 0;
- /* initialize spinLock (tx,rx) */
spin_lock_init(&priv->tx_dev.tx_dev_lock);
spin_lock_init(&priv->rx_dev.rx_dev_lock);
- tasklet_init(&priv->ks_wlan_hw.rx_bh_task, rx_event_task,
- (unsigned long)priv);
+ tasklet_init(&priv->rx_bh_task, rx_event_task, (unsigned long)priv);
return 0;
}
@@ -683,106 +584,100 @@ static void trx_device_exit(struct ks_wlan_private *priv)
/* tx buffer clear */
while (cnt_txqbody(priv) > 0) {
sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
- kfree(sp->sendp); /* allocated memory free */
+ kfree(sp->sendp);
if (sp->complete_handler) /* TX Complete */
- (*sp->complete_handler) (sp->arg1, sp->arg2);
+ (*sp->complete_handler)(priv, sp->skb);
inc_txqhead(priv);
}
- tasklet_kill(&priv->ks_wlan_hw.rx_bh_task);
+ tasklet_kill(&priv->rx_bh_task);
}
static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
{
- int rc = 0;
- int retval;
+ int ret;
unsigned char *data_buf;
data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
- if (!data_buf) {
- rc = 1;
- goto error_out;
- }
+ if (!data_buf)
+ return -ENOMEM;
memcpy(data_buf, &index, sizeof(index));
- retval = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
- if (retval) {
- rc = 2;
- goto error_out;
- }
+ ret = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
+ if (ret)
+ goto err_free_data_buf;
- retval = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
- if (retval) {
- rc = 3;
- goto error_out;
- }
- error_out:
+ ret = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
+ if (ret)
+ goto err_free_data_buf;
+
+ return 0;
+
+err_free_data_buf:
kfree(data_buf);
- return rc;
+
+ return ret;
}
#define ROM_BUFF_SIZE (64 * 1024)
static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
unsigned char *data, unsigned int size)
{
- int rc = 0;
- int retval;
+ int ret;
unsigned char *read_buf;
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
- if (!read_buf) {
- rc = 1;
- goto error_out;
- }
- retval = ks7010_sdio_read(priv, address, read_buf, size);
- if (retval) {
- rc = 2;
- goto error_out;
- }
- retval = memcmp(data, read_buf, size);
+ if (!read_buf)
+ return -ENOMEM;
- if (retval) {
- DPRINTK(0, "data compare error (%d)\n", retval);
- rc = 3;
- goto error_out;
+ ret = ks7010_sdio_read(priv, address, read_buf, size);
+ if (ret)
+ goto err_free_read_buf;
+
+ if (memcmp(data, read_buf, size) != 0) {
+ ret = -EIO;
+ DPRINTK(0, "data compare error (%d)\n", ret);
+ goto err_free_read_buf;
}
- error_out:
+
+ return 0;
+
+err_free_read_buf:
kfree(read_buf);
- return rc;
+
+ return ret;
}
-static int ks7010_upload_firmware(struct ks_wlan_private *priv,
- struct ks_sdio_card *card)
+static int ks7010_upload_firmware(struct ks_sdio_card *card)
{
+ struct ks_wlan_private *priv = card->priv;
unsigned int size, offset, n = 0;
unsigned char *rom_buf;
- unsigned char rw_data = 0;
- int retval, rc = 0;
- int length;
+ unsigned char byte = 0;
+ int ret;
+ unsigned int length;
const struct firmware *fw_entry = NULL;
- /* buffer allocate */
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
if (!rom_buf)
- return 3;
+ return -ENOMEM;
sdio_claim_host(card->func);
/* Firmware running ? */
- retval = ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
- if (rw_data == GCR_A_RUN) {
+ ret = ks7010_sdio_readb(priv, GCR_A, &byte);
+ if (byte == GCR_A_RUN) {
DPRINTK(0, "MAC firmware running ...\n");
- rc = 0;
- goto error_out0;
+ goto release_host_and_free;
}
- retval = request_firmware(&fw_entry, ROM_FILE, &priv->ks_wlan_hw.sdio_card->func->dev);
- if (retval)
- goto error_out0;
+ ret = request_firmware(&fw_entry, ROM_FILE,
+ &priv->ks_sdio_card->func->dev);
+ if (ret)
+ goto release_host_and_free;
length = fw_entry->size;
- /* Load Program */
n = 0;
do {
if (length >= ROM_BUFF_SIZE) {
@@ -796,77 +691,62 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
if (size == 0)
break;
memcpy(rom_buf, fw_entry->data + n, size);
- /* Update write index */
+
offset = n;
- retval =
- ks7010_sdio_update_index(priv,
- KS7010_IRAM_ADDRESS + offset);
- if (retval) {
- rc = 6;
- goto error_out1;
- }
+ ret = ks7010_sdio_update_index(priv, KS7010_IRAM_ADDRESS + offset);
+ if (ret)
+ goto release_firmware;
- /* Write data */
- retval = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
- if (retval) {
- rc = 8;
- goto error_out1;
- }
+ ret = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
+ if (ret)
+ goto release_firmware;
+
+ ret = ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
+ if (ret)
+ goto release_firmware;
- /* compare */
- retval =
- ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
- if (retval) {
- rc = 9;
- goto error_out1;
- }
n += size;
} while (size);
- /* Remap request */
- rw_data = GCR_A_REMAP;
- retval = ks7010_sdio_write(priv, GCR_A, &rw_data, sizeof(rw_data));
- if (retval) {
- rc = 11;
- goto error_out1;
- }
- DPRINTK(4, " REMAP Request : GCR_A=%02X\n", rw_data);
+ ret = ks7010_sdio_writeb(priv, GCR_A, GCR_A_REMAP);
+ if (ret)
+ goto release_firmware;
+
+ DPRINTK(4, " REMAP Request : GCR_A\n");
/* Firmware running check */
for (n = 0; n < 50; ++n) {
mdelay(10); /* wait_ms(10); */
- retval =
- ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
- if (retval) {
- rc = 11;
- goto error_out1;
- }
- if (rw_data == GCR_A_RUN)
+ ret = ks7010_sdio_readb(priv, GCR_A, &byte);
+ if (ret)
+ goto release_firmware;
+
+ if (byte == GCR_A_RUN)
break;
}
DPRINTK(4, "firmware wakeup (%d)!!!!\n", n);
if ((50) <= n) {
DPRINTK(1, "firmware can't start\n");
- rc = 12;
- goto error_out1;
+ ret = -EIO;
+ goto release_firmware;
}
- rc = 0;
+ ret = 0;
- error_out1:
+ release_firmware:
release_firmware(fw_entry);
- error_out0:
+ release_host_and_free:
sdio_release_host(card->func);
kfree(rom_buf);
- return rc;
+
+ return ret;
}
static void ks7010_card_init(struct ks_wlan_private *priv)
{
DPRINTK(5, "\ncard_init_task()\n");
- /* init_waitqueue_head(&priv->confirm_wait); */
init_completion(&priv->confirm_wait);
DPRINTK(5, "init_completion()\n");
@@ -881,7 +761,7 @@ static void ks7010_card_init(struct ks_wlan_private *priv)
DPRINTK(1, "wait time out!! SME_START\n");
}
- if (priv->mac_address_valid && priv->version_size)
+ if (priv->mac_address_valid && priv->version_size != 0)
priv->dev_state = DEVICE_STATE_PREINIT;
hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
@@ -920,7 +800,7 @@ static void ks7010_init_defaults(struct ks_wlan_private *priv)
{
priv->reg.tx_rate = TX_RATE_AUTO;
priv->reg.preamble = LONG_PREAMBLE;
- priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
priv->reg.scan_type = ACTIVE_SCAN;
priv->reg.beacon_lost_count = 20;
priv->reg.rts = 2347UL;
@@ -949,7 +829,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
struct ks_wlan_private *priv;
struct ks_sdio_card *card;
struct net_device *netdev;
- unsigned char rw_data;
+ unsigned char byte;
int ret;
DPRINTK(5, "ks7010_sdio_probe()\n");
@@ -957,63 +837,53 @@ static int ks7010_sdio_probe(struct sdio_func *func,
priv = NULL;
netdev = NULL;
- /* initialize ks_sdio_card */
card = kzalloc(sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->func = func;
- spin_lock_init(&card->lock);
- /*** Initialize SDIO ***/
sdio_claim_host(func);
- /* bus setting */
- /* Issue config request to override clock rate */
-
- /* function blocksize set */
ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
DPRINTK(5, "multi_block=%d sdio_set_block_size()=%d %d\n",
func->card->cccr.multi_block, func->cur_blksize, ret);
- /* Allocate the slot current */
-
- /* function enable */
ret = sdio_enable_func(func);
DPRINTK(5, "sdio_enable_func() %d\n", ret);
if (ret)
- goto error_free_card;
+ goto err_free_card;
/* interrupt disable */
sdio_writeb(func, 0, INT_ENABLE, &ret);
if (ret)
- goto error_free_card;
+ goto err_free_card;
sdio_writeb(func, 0xff, INT_PENDING, &ret);
if (ret)
- goto error_disable_func;
+ goto err_disable_func;
/* setup interrupt handler */
ret = sdio_claim_irq(func, ks_sdio_interrupt);
if (ret)
- goto error_disable_func;
+ goto err_disable_func;
sdio_release_host(func);
sdio_set_drvdata(func, card);
- DPRINTK(5, "class = 0x%X, vendor = 0x%X, "
- "device = 0x%X\n", func->class, func->vendor, func->device);
+ DPRINTK(5, "class = 0x%X, vendor = 0x%X, device = 0x%X\n",
+ func->class, func->vendor, func->device);
/* private memory allocate */
netdev = alloc_etherdev(sizeof(*priv));
if (!netdev) {
dev_err(&card->func->dev, "ks7010 : Unable to alloc new net device\n");
- goto error_release_irq;
+ goto err_release_irq;
}
if (dev_alloc_name(netdev, "wlan%d") < 0) {
dev_err(&card->func->dev,
"ks7010 : Couldn't get name!\n");
- goto error_free_netdev;
+ goto err_free_netdev;
}
priv = netdev_priv(netdev);
@@ -1022,18 +892,13 @@ static int ks7010_sdio_probe(struct sdio_func *func,
SET_NETDEV_DEV(netdev, &card->func->dev); /* for create sysfs symlinks */
/* private memory initialize */
- priv->ks_wlan_hw.sdio_card = card;
- init_completion(&priv->ks_wlan_hw.ks7010_sdio_wait);
- priv->ks_wlan_hw.read_buf = NULL;
- priv->ks_wlan_hw.read_buf = kmalloc(RX_DATA_SIZE, GFP_KERNEL);
- if (!priv->ks_wlan_hw.read_buf)
- goto error_free_netdev;
+ priv->ks_sdio_card = card;
priv->dev_state = DEVICE_STATE_PREBOOT;
priv->net_dev = netdev;
priv->firmware_version[0] = '\0';
priv->version_size = 0;
- priv->last_doze = jiffies; /* set current jiffies */
+ priv->last_doze = jiffies;
priv->last_wakeup = jiffies;
memset(&priv->nstats, 0, sizeof(priv->nstats));
memset(&priv->wstats, 0, sizeof(priv->wstats));
@@ -1049,64 +914,57 @@ static int ks7010_sdio_probe(struct sdio_func *func,
ks7010_init_defaults(priv);
- /* Upload firmware */
- ret = ks7010_upload_firmware(priv, card); /* firmware load */
+ ret = ks7010_upload_firmware(card);
if (ret) {
dev_err(&card->func->dev,
"ks7010: firmware load failed !! return code = %d\n",
ret);
- goto error_free_read_buf;
+ goto err_free_netdev;
}
/* interrupt setting */
/* clear Interrupt status write (ARMtoSD_InterruptPending FN1:00_0024) */
- rw_data = 0xff;
sdio_claim_host(func);
- ret = ks7010_sdio_write(priv, INT_PENDING, &rw_data, sizeof(rw_data));
+ ret = ks7010_sdio_writeb(priv, INT_PENDING, 0xff);
sdio_release_host(func);
if (ret)
- DPRINTK(1, " error : INT_PENDING=%02X\n", rw_data);
+ DPRINTK(1, " error : INT_PENDING\n");
- DPRINTK(4, " clear Interrupt : INT_PENDING=%02X\n", rw_data);
-
- /* enable ks7010sdio interrupt (INT_GCR_B|INT_READ_STATUS|INT_WRITE_STATUS) */
- rw_data = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
+ /* enable ks7010sdio interrupt */
+ byte = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
sdio_claim_host(func);
- ret = ks7010_sdio_write(priv, INT_ENABLE, &rw_data, sizeof(rw_data));
+ ret = ks7010_sdio_writeb(priv, INT_ENABLE, byte);
sdio_release_host(func);
if (ret)
- DPRINTK(1, " error : INT_ENABLE=%02X\n", rw_data);
+ DPRINTK(1, " err : INT_ENABLE\n");
- DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", rw_data);
+ DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", byte);
priv->dev_state = DEVICE_STATE_BOOT;
- priv->ks_wlan_hw.ks7010sdio_wq = create_workqueue("ks7010sdio_wq");
- if (!priv->ks_wlan_hw.ks7010sdio_wq) {
+ priv->wq = create_workqueue("wq");
+ if (!priv->wq) {
DPRINTK(1, "create_workqueue failed !!\n");
- goto error_free_read_buf;
+ goto err_free_netdev;
}
- INIT_DELAYED_WORK(&priv->ks_wlan_hw.rw_wq, ks7010_rw_function);
+ INIT_DELAYED_WORK(&priv->rw_dwork, ks7010_rw_function);
ks7010_card_init(priv);
ret = register_netdev(priv->net_dev);
if (ret)
- goto error_free_read_buf;
+ goto err_free_netdev;
return 0;
- error_free_read_buf:
- kfree(priv->ks_wlan_hw.read_buf);
- priv->ks_wlan_hw.read_buf = NULL;
- error_free_netdev:
+ err_free_netdev:
free_netdev(priv->net_dev);
card->priv = NULL;
- error_release_irq:
+ err_release_irq:
sdio_claim_host(func);
sdio_release_irq(func);
- error_disable_func:
+ err_disable_func:
sdio_disable_func(func);
- error_free_card:
+ err_free_card:
sdio_release_host(func);
sdio_set_drvdata(func, NULL);
kfree(card);
@@ -1114,6 +972,34 @@ static int ks7010_sdio_probe(struct sdio_func *func,
return -ENODEV;
}
+/* send stop request to MAC */
+static int send_stop_request(struct sdio_func *func)
+{
+ struct hostif_stop_request_t *pp;
+ struct ks_sdio_card *card;
+ size_t size;
+
+ card = sdio_get_drvdata(func);
+
+ pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
+ if (!pp) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return -ENOMEM;
+ }
+
+ size = sizeof(*pp) - sizeof(pp->header.size);
+ pp->header.size = cpu_to_le16((uint16_t)size);
+ pp->header.event = cpu_to_le16((uint16_t)HIF_STOP_REQ);
+
+ sdio_claim_host(func);
+ write_to_device(card->priv, (unsigned char *)pp,
+ hif_align_size(sizeof(*pp)));
+ sdio_release_host(func);
+
+ kfree(pp);
+ return 0;
+}
+
static void ks7010_sdio_remove(struct sdio_func *func)
{
int ret;
@@ -1142,35 +1028,17 @@ static void ks7010_sdio_remove(struct sdio_func *func)
sdio_release_host(func);
DPRINTK(1, "interrupt disable\n");
- /* send stop request to MAC */
- {
- struct hostif_stop_request_t *pp;
+ ret = send_stop_request(func);
+ if (ret) /* memory allocation failure */
+ return;
- pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
- return; /* to do goto ni suru */
- }
- pp->header.size =
- cpu_to_le16((uint16_t)
- (sizeof(*pp) -
- sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t)HIF_STOP_REQ);
-
- sdio_claim_host(func);
- write_to_device(priv, (unsigned char *)pp,
- hif_align_size(sizeof(*pp)));
- sdio_release_host(func);
- kfree(pp);
- }
DPRINTK(1, "STOP Req\n");
- if (priv->ks_wlan_hw.ks7010sdio_wq) {
- flush_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
- destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
}
- DPRINTK(1,
- "destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);\n");
+ DPRINTK(1, "destroy_workqueue(priv->wq);\n");
hostif_exit(priv);
DPRINTK(1, "hostif_exit\n");
@@ -1178,7 +1046,6 @@ static void ks7010_sdio_remove(struct sdio_func *func)
unregister_netdev(netdev);
trx_device_exit(priv);
- kfree(priv->ks_wlan_hw.read_buf);
free_netdev(priv->net_dev);
card->priv = NULL;
}
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
index d7e1523a222f..e4f56a11c888 100644
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -22,10 +22,13 @@
/* Older sources suggest earlier versions were named 7910 or 79xx */
#define SDIO_DEVICE_ID_KS_7010 0x7910
-/* Read Status Register */
+/* Read/Write Status Register */
#define READ_STATUS 0x000000
-#define READ_STATUS_BUSY 0
-#define READ_STATUS_IDLE 1
+#define WRITE_STATUS 0x00000C
+enum reg_status_type {
+ REG_STATUS_BUSY,
+ REG_STATUS_IDLE
+};
/* Read Index Register */
#define READ_INDEX 0x000004
@@ -33,11 +36,6 @@
/* Read Data Size Register */
#define READ_DATA_SIZE 0x000008
-/* Write Status Register */
-#define WRITE_STATUS 0x00000C
-#define WRITE_STATUS_BUSY 0
-#define WRITE_STATUS_IDLE 1
-
/* Write Index Register */
#define WRITE_INDEX 0x000010
@@ -64,18 +62,20 @@
/* General Communication Register A */
#define GCR_A 0x000028
-#define GCR_A_INIT 0
-#define GCR_A_REMAP 1
-#define GCR_A_RUN 2
+enum gen_com_reg_a {
+ GCR_A_INIT,
+ GCR_A_REMAP,
+ GCR_A_RUN
+};
/* General Communication Register B */
#define GCR_B 0x00002C
-#define GCR_B_ACTIVE 0
-#define GCR_B_DOZE 1
+enum gen_com_reg_b {
+ GCR_B_ACTIVE,
+ GCR_B_DOZE
+};
/* Wakeup Register */
-/* #define WAKEUP 0x008104 */
-/* #define WAKEUP_REQ 0x00 */
#define WAKEUP 0x008018
#define WAKEUP_REQ 0x5a
@@ -85,62 +85,78 @@
#define KS7010_IRAM_ADDRESS 0x06000000
-/*
- * struct define
+/**
+ * struct ks_sdio_card - SDIO device data.
+ *
+ * Structure is used as the &struct sdio_func private data.
+ *
+ * @func: Pointer to the SDIO function device.
+ * @priv: Pointer to the &struct net_device private data.
*/
-struct hw_info_t {
- struct ks_sdio_card *sdio_card;
- struct completion ks7010_sdio_wait;
- struct workqueue_struct *ks7010sdio_wq;
- struct delayed_work rw_wq;
- unsigned char *read_buf;
- struct tasklet_struct rx_bh_task;
-};
-
-struct ks_sdio_packet {
- struct ks_sdio_packet *next;
- u16 nb;
- u8 buffer[0] __aligned(4);
-};
-
struct ks_sdio_card {
struct sdio_func *func;
struct ks_wlan_private *priv;
- spinlock_t lock;
};
/* Tx Device struct */
#define TX_DEVICE_BUFF_SIZE 1024
+/**
+ * struct tx_device_buffer - Queue item for the tx queue.
+ * @sendp: Pointer to the send request data.
+ * @size: Size of @sendp data.
+ * @complete_handler: Function called once data write to device is complete.
+ * @arg1: First argument to @complete_handler.
+ * @arg2: Second argument to @complete_handler.
+ */
struct tx_device_buffer {
- unsigned char *sendp; /* pointer of send req data */
+ unsigned char *sendp;
unsigned int size;
- void (*complete_handler) (void *arg1, void *arg2);
- void *arg1;
- void *arg2;
+ void (*complete_handler)(struct ks_wlan_private *priv,
+ struct sk_buff *skb);
+ struct sk_buff *skb;
};
+/**
+ * struct tx_device - Tx buffer queue.
+ * @tx_device_buffer: Queue buffer.
+ * @qhead: Head of tx queue.
+ * @qtail: Tail of tx queue.
+ * @tx_dev_lock: Queue lock.
+ */
struct tx_device {
struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
- unsigned int qhead; /* tx buffer queue first pointer */
- unsigned int qtail; /* tx buffer queue last pointer */
- spinlock_t tx_dev_lock;
+ unsigned int qhead;
+ unsigned int qtail;
+ spinlock_t tx_dev_lock; /* protect access to the queue */
};
/* Rx Device struct */
#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
#define RX_DEVICE_BUFF_SIZE 32
+/**
+ * struct rx_device_buffer - Queue item for the rx queue.
+ * @data: rx data.
+ * @size: Size of @data.
+ */
struct rx_device_buffer {
unsigned char data[RX_DATA_SIZE];
unsigned int size;
};
+/**
+ * struct rx_device - Rx buffer queue.
+ * @rx_device_buffer: Queue buffer.
+ * @qhead: Head of rx queue.
+ * @qtail: Tail of rx queue.
+ * @rx_dev_lock: Queue lock.
+ */
struct rx_device {
struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
- unsigned int qhead; /* rx buffer queue first pointer */
- unsigned int qtail; /* rx buffer queue last pointer */
- spinlock_t rx_dev_lock;
+ unsigned int qhead;
+ unsigned int qtail;
+ spinlock_t rx_dev_lock; /* protect access to the queue */
};
#define ROM_FILE "ks7010sd.rom"
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index da7c42ef05f5..49e95426ac30 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -66,11 +66,13 @@ inline u32 get_DWORD(struct ks_wlan_private *priv)
static void ks_wlan_hw_wakeup_task(struct work_struct *work)
{
- struct ks_wlan_private *priv =
- container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
- int ps_status = atomic_read(&priv->psstatus.status);
+ struct ks_wlan_private *priv;
+ int ps_status;
long time_left;
+ priv = container_of(work, struct ks_wlan_private, wakeup_work);
+ ps_status = atomic_read(&priv->psstatus.status);
+
if (ps_status == PS_SNOOZE) {
ks_wlan_hw_wakeup_request(priv);
time_left = wait_for_completion_interruptible_timeout(
@@ -78,7 +80,7 @@ static void ks_wlan_hw_wakeup_task(struct work_struct *work)
msecs_to_jiffies(20));
if (time_left <= 0) {
DPRINTK(1, "wake up timeout or interrupted !!!\n");
- schedule_work(&priv->ks_wlan_wakeup_task);
+ schedule_work(&priv->wakeup_work);
return;
}
} else {
@@ -97,7 +99,7 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
{
DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
+ if (is_connect_status(priv->connect_status))
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
else
priv->dev_state = DEVICE_STATE_READY;
@@ -110,30 +112,29 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
struct local_ap_t *ap;
union iwreq_data wrqu;
struct net_device *netdev = priv->net_dev;
- int rc = 0;
DPRINTK(3, "\n");
- ap = &(priv->current_ap);
+ ap = &priv->current_ap;
- if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ if (is_disconnect_status(priv->connect_status)) {
memset(ap, 0, sizeof(struct local_ap_t));
- return 1;
+ return -EPERM;
}
/* bssid */
- memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ memcpy(ap->bssid, ap_info->bssid, ETH_ALEN);
/* essid */
- memcpy(&(ap->ssid.body[0]), &(priv->reg.ssid.body[0]),
+ memcpy(ap->ssid.body, priv->reg.ssid.body,
priv->reg.ssid.size);
ap->ssid.size = priv->reg.ssid.size;
/* rate_set */
- memcpy(&(ap->rate_set.body[0]), &(ap_info->rate_set.body[0]),
+ memcpy(ap->rate_set.body, ap_info->rate_set.body,
ap_info->rate_set.size);
ap->rate_set.size = ap_info->rate_set.size;
- if (ap_info->ext_rate_set.size) {
+ if (ap_info->ext_rate_set.size != 0) {
/* rate_set */
- memcpy(&(ap->rate_set.body[ap->rate_set.size]),
- &(ap_info->ext_rate_set.body[0]),
+ memcpy(&ap->rate_set.body[ap->rate_set.size],
+ ap_info->ext_rate_set.body,
ap_info->ext_rate_set.size);
ap->rate_set.size += ap_info->ext_rate_set.size;
}
@@ -148,28 +149,28 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
/* capability */
ap->capability = ap_info->capability;
/* rsn */
- if ((ap_info->rsn_mode & RSN_MODE_WPA2)
- && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
+ if ((ap_info->rsn_mode & RSN_MODE_WPA2) &&
+ (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
ap->rsn_ie.id = 0x30;
if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
ap->rsn_ie.size = ap_info->rsn.size;
- memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ memcpy(ap->rsn_ie.body, ap_info->rsn.body,
ap_info->rsn.size);
} else {
ap->rsn_ie.size = RSN_IE_BODY_MAX;
- memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ memcpy(ap->rsn_ie.body, ap_info->rsn.body,
RSN_IE_BODY_MAX);
}
- } else if ((ap_info->rsn_mode & RSN_MODE_WPA)
- && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
+ } else if ((ap_info->rsn_mode & RSN_MODE_WPA) &&
+ (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
ap->wpa_ie.id = 0xdd;
if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
ap->wpa_ie.size = ap_info->rsn.size;
- memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ memcpy(ap->wpa_ie.body, ap_info->rsn.body,
ap_info->rsn.size);
} else {
ap->wpa_ie.size = RSN_IE_BODY_MAX;
- memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ memcpy(ap->wpa_ie.body, ap_info->rsn.body,
RSN_IE_BODY_MAX);
}
} else {
@@ -182,23 +183,35 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if (is_connect_status(priv->connect_status)) {
memcpy(wrqu.ap_addr.sa_data,
- &(priv->current_ap.bssid[0]), ETH_ALEN);
+ priv->current_ap.bssid, ETH_ALEN);
DPRINTK(3,
"IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
}
DPRINTK(4, "\n Link AP\n");
- DPRINTK(4, " bssid=%02X:%02X:%02X:%02X:%02X:%02X\n \
- essid=%s\n rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n channel=%d\n \
- rssi=%d\n sq=%d\n capability=%04X\n", ap->bssid[0], ap->bssid[1], ap->bssid[2], ap->bssid[3], ap->bssid[4], ap->bssid[5], &(ap->ssid.body[0]), ap->rate_set.body[0], ap->rate_set.body[1], ap->rate_set.body[2], ap->rate_set.body[3], ap->rate_set.body[4], ap->rate_set.body[5], ap->rate_set.body[6], ap->rate_set.body[7], ap->channel, ap->rssi, ap->sq, ap->capability);
+ DPRINTK(4, " bssid=%02X:%02X:%02X:%02X:%02X:%02X\n"
+ " essid=%s\n"
+ " rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n"
+ " channel=%d\n"
+ " rssi=%d\n"
+ " sq=%d\n"
+ " capability=%04X\n",
+ ap->bssid[0], ap->bssid[1], ap->bssid[2],
+ ap->bssid[3], ap->bssid[4], ap->bssid[5],
+ &(ap->ssid.body[0]),
+ ap->rate_set.body[0], ap->rate_set.body[1],
+ ap->rate_set.body[2], ap->rate_set.body[3],
+ ap->rate_set.body[4], ap->rate_set.body[5],
+ ap->rate_set.body[6], ap->rate_set.body[7],
+ ap->channel, ap->rssi, ap->sq, ap->capability);
DPRINTK(4, "\n Link AP\n rsn.mode=%d\n rsn.size=%d\n",
ap_info->rsn_mode, ap_info->rsn.size);
DPRINTK(4, "\n ext_rate_set_size=%d\n rate_set_size=%d\n",
ap_info->ext_rate_set.size, ap_info->rate_set.size);
- return rc;
+ return 0;
}
static
@@ -212,7 +225,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
memset(ap, 0, sizeof(struct local_ap_t));
/* bssid */
- memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ memcpy(ap->bssid, ap_info->bssid, ETH_ALEN);
/* rssi */
ap->rssi = ap_info->rssi;
/* sq */
@@ -224,7 +237,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
/* channel */
ap->channel = ap_info->ch_info;
- bp = &(ap_info->body[0]);
+ bp = ap_info->body;
bsize = ap_info->body_size;
offset = 0;
@@ -239,19 +252,19 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
*(bp + 1));
ap->ssid.size = SSID_MAX_SIZE;
}
- memcpy(&(ap->ssid.body[0]), bp + 2, ap->ssid.size);
+ memcpy(ap->ssid.body, bp + 2, ap->ssid.size);
break;
case 1: /* rate */
case 50: /* ext rate */
if ((*(bp + 1) + ap->rate_set.size) <=
RATE_SET_MAX_SIZE) {
- memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ memcpy(&ap->rate_set.body[ap->rate_set.size],
bp + 2, *(bp + 1));
ap->rate_set.size += *(bp + 1);
} else {
DPRINTK(1, "size over :: rate size=%d\n",
(*(bp + 1) + ap->rate_set.size));
- memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ memcpy(&ap->rate_set.body[ap->rate_set.size],
bp + 2,
RATE_SET_MAX_SIZE - ap->rate_set.size);
ap->rate_set.size +=
@@ -269,10 +282,10 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
*(bp + 1));
ap->rsn_ie.size = RSN_IE_BODY_MAX;
}
- memcpy(&(ap->rsn_ie.body[0]), bp + 2, ap->rsn_ie.size);
+ memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size);
break;
case 221: /* WPA */
- if (!memcmp(bp + 2, "\x00\x50\xf2\x01", 4)) { /* WPA OUI check */
+ if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
ap->wpa_ie.id = *bp;
if (*(bp + 1) <= RSN_IE_BODY_MAX) {
ap->wpa_ie.size = *(bp + 1);
@@ -282,7 +295,7 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
*(bp + 1));
ap->wpa_ie.size = RSN_IE_BODY_MAX;
}
- memcpy(&(ap->wpa_ie.body[0]), bp + 2,
+ memcpy(ap->wpa_ie.body, bp + 2,
ap->wpa_ie.size);
}
break;
@@ -308,23 +321,98 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
}
static
+int hostif_data_indication_wpa(struct ks_wlan_private *priv,
+ unsigned short auth_type)
+{
+ struct ether_hdr *eth_hdr;
+ unsigned short eth_proto;
+ unsigned char recv_mic[8];
+ char buf[128];
+ unsigned long now;
+ struct mic_failure_t *mic_failure;
+ struct michael_mic_t michael_mic;
+ union iwreq_data wrqu;
+ unsigned int key_index = auth_type - 1;
+ struct wpa_key_t *key = &priv->wpa.key[key_index];
+
+ eth_hdr = (struct ether_hdr *)(priv->rxp);
+ eth_proto = ntohs(eth_hdr->h_proto);
+
+ if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
+ DPRINTK(1, "invalid data format\n");
+ priv->nstats.rx_errors++;
+ return -EINVAL;
+ }
+ if (((auth_type == TYPE_PMK1 &&
+ priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) ||
+ (auth_type == TYPE_GMK1 &&
+ priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP) ||
+ (auth_type == TYPE_GMK2 &&
+ priv->wpa.group_suite == IW_AUTH_CIPHER_TKIP)) &&
+ key->key_len) {
+ DPRINTK(4, "TKIP: protocol=%04X: size=%u\n",
+ eth_proto, priv->rx_size);
+ /* MIC save */
+ memcpy(&recv_mic[0], (priv->rxp) + ((priv->rx_size) - 8), 8);
+ priv->rx_size = priv->rx_size - 8;
+ if (auth_type > 0 && auth_type < 4) { /* auth_type check */
+ MichaelMICFunction(&michael_mic,
+ (uint8_t *)key->rx_mic_key,
+ (uint8_t *)priv->rxp,
+ (int)priv->rx_size,
+ (uint8_t)0, /* priority */
+ (uint8_t *)michael_mic.Result);
+ }
+ if (memcmp(michael_mic.Result, recv_mic, 8) != 0) {
+ now = jiffies;
+ mic_failure = &priv->wpa.mic_failure;
+ /* MIC FAILURE */
+ if (mic_failure->last_failure_time &&
+ (now - mic_failure->last_failure_time) / HZ >= 60) {
+ mic_failure->failure = 0;
+ }
+ DPRINTK(4, "MIC FAILURE\n");
+ if (mic_failure->failure == 0) {
+ mic_failure->failure = 1;
+ mic_failure->counter = 0;
+ } else if (mic_failure->failure == 1) {
+ mic_failure->failure = 2;
+ mic_failure->counter =
+ (uint16_t)((now - mic_failure->last_failure_time) / HZ);
+ if (!mic_failure->counter) /* range 1-60 */
+ mic_failure->counter = 1;
+ }
+ priv->wpa.mic_failure.last_failure_time = now;
+
+ /* needed parameters: count, keyid, key type, TSC */
+ sprintf(buf,
+ "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)",
+ key_index,
+ eth_hdr->h_dest[0] & 0x01 ? "broad" : "uni",
+ eth_hdr->h_source);
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ DPRINTK(4, "IWEVENT:MICHAELMICFAILURE\n");
+ wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu,
+ buf);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static
void hostif_data_indication(struct ks_wlan_private *priv)
{
unsigned int rx_ind_size; /* indicate data size */
struct sk_buff *skb;
unsigned short auth_type;
unsigned char temp[256];
-
- unsigned char RecvMIC[8];
- char buf[128];
struct ether_hdr *eth_hdr;
unsigned short eth_proto;
- unsigned long now;
- struct mic_failure_t *mic_failure;
struct ieee802_1x_hdr *aa1x_hdr;
- struct wpa_eapol_key *eap_key;
- struct michel_mic_t michel_mic;
- union iwreq_data wrqu;
+ size_t size;
+ int ret;
DPRINTK(3, "\n");
@@ -343,7 +431,7 @@ void hostif_data_indication(struct ks_wlan_private *priv)
DPRINTK(3, "ether protocol = %04X\n", eth_proto);
/* source address check */
- if (!memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN)) {
+ if (memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN) == 0) {
DPRINTK(1, "invalid : source is own mac address !!\n");
DPRINTK(1,
"eth_hdrernet->h_dest=%02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -356,79 +444,9 @@ void hostif_data_indication(struct ks_wlan_private *priv)
/* for WPA */
if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
- if (memcmp(&eth_hdr->h_source[0], &priv->eth_addr[0], ETH_ALEN)) { /* source address check */
- if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
- DPRINTK(1, "invalid data format\n");
- priv->nstats.rx_errors++;
- return;
- }
- if (((auth_type == TYPE_PMK1
- && priv->wpa.pairwise_suite ==
- IW_AUTH_CIPHER_TKIP) || (auth_type == TYPE_GMK1
- && priv->wpa.
- group_suite ==
- IW_AUTH_CIPHER_TKIP)
- || (auth_type == TYPE_GMK2
- && priv->wpa.group_suite ==
- IW_AUTH_CIPHER_TKIP))
- && priv->wpa.key[auth_type - 1].key_len) {
- DPRINTK(4, "TKIP: protocol=%04X: size=%u\n",
- eth_proto, priv->rx_size);
- /* MIC save */
- memcpy(&RecvMIC[0],
- (priv->rxp) + ((priv->rx_size) - 8), 8);
- priv->rx_size = priv->rx_size - 8;
- if (auth_type > 0 && auth_type < 4) { /* auth_type check */
- MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[auth_type - 1].rx_mic_key, (uint8_t *) priv->rxp, (int)priv->rx_size, (uint8_t) 0, /* priority */
- (uint8_t *)
- michel_mic.Result);
- }
- if (memcmp(michel_mic.Result, RecvMIC, 8)) {
- now = jiffies;
- mic_failure = &priv->wpa.mic_failure;
- /* MIC FAILURE */
- if (mic_failure->last_failure_time &&
- (now -
- mic_failure->last_failure_time) /
- HZ >= 60) {
- mic_failure->failure = 0;
- }
- DPRINTK(4, "MIC FAILURE\n");
- if (mic_failure->failure == 0) {
- mic_failure->failure = 1;
- mic_failure->counter = 0;
- } else if (mic_failure->failure == 1) {
- mic_failure->failure = 2;
- mic_failure->counter =
- (uint16_t) ((now -
- mic_failure->
- last_failure_time)
- / HZ);
- if (!mic_failure->counter) /* mic_failure counter value range 1-60 */
- mic_failure->counter =
- 1;
- }
- priv->wpa.mic_failure.
- last_failure_time = now;
- /* needed parameters: count, keyid, key type, TSC */
- sprintf(buf,
- "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
- "%pM)",
- auth_type - 1,
- eth_hdr->
- h_dest[0] & 0x01 ? "broad" :
- "uni", eth_hdr->h_source);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- DPRINTK(4,
- "IWEVENT:MICHAELMICFAILURE\n");
- wireless_send_event(priv->net_dev,
- IWEVCUSTOM, &wrqu,
- buf);
- return;
- }
- }
- }
+ ret = hostif_data_indication_wpa(priv, auth_type);
+ if (ret)
+ return;
}
if ((priv->connect_status & FORCE_DISCONNECT) ||
@@ -441,77 +459,68 @@ void hostif_data_indication(struct ks_wlan_private *priv)
case 0xAA: /* SNAP */
rx_ind_size = priv->rx_size - 6;
skb = dev_alloc_skb(rx_ind_size);
+ if (!skb) {
+ priv->nstats.rx_dropped++;
+ return;
+ }
DPRINTK(4, "SNAP, rx_ind_size = %d\n", rx_ind_size);
- if (skb) {
- memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
- /* (SNAP+UI..) skip */
- memcpy(skb_put(skb, rx_ind_size - 12), priv->rxp + 18, rx_ind_size - 12); /* copy after Type */
-
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 20);
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
- && priv->wpa.rsn_enabled) {
- eap_key =
- (struct wpa_eapol_key *)(aa1x_hdr + 1);
- atomic_set(&priv->psstatus.snooze_guard, 1);
- }
+ size = ETH_ALEN * 2;
+ memcpy(skb_put(skb, size), priv->rxp, size);
- /* rx indication */
- skb->dev = priv->net_dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- priv->nstats.rx_packets++;
- priv->nstats.rx_bytes += rx_ind_size;
- netif_rx(skb);
- } else {
- priv->nstats.rx_dropped++;
- }
+ /* (SNAP+UI..) skip */
+
+ size = rx_ind_size - (ETH_ALEN * 2);
+ memcpy(skb_put(skb, size), &eth_hdr->h_proto, size);
+
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + ETHER_HDR_SIZE);
break;
case 0xF0: /* NETBEUI/NetBIOS */
rx_ind_size = (priv->rx_size + 2);
skb = dev_alloc_skb(rx_ind_size);
+ if (!skb) {
+ priv->nstats.rx_dropped++;
+ return;
+ }
DPRINTK(3, "NETBEUI/NetBIOS rx_ind_size=%d\n", rx_ind_size);
- if (skb) {
- memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+ memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
- temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); /* NETBEUI size add */
- temp[1] = ((rx_ind_size - 12) & 0xff);
- memcpy(skb_put(skb, 2), temp, 2);
+ temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); /* NETBEUI size add */
+ temp[1] = ((rx_ind_size - 12) & 0xff);
+ memcpy(skb_put(skb, 2), temp, 2);
- memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12, rx_ind_size - 14); /* copy after Type */
+ memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12,
+ rx_ind_size - 14); /* copy after Type */
- aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
- if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
- && priv->wpa.rsn_enabled) {
- eap_key =
- (struct wpa_eapol_key *)(aa1x_hdr + 1);
- atomic_set(&priv->psstatus.snooze_guard, 1);
- }
-
- /* rx indication */
- skb->dev = priv->net_dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- priv->nstats.rx_packets++;
- priv->nstats.rx_bytes += rx_ind_size;
- netif_rx(skb);
- } else {
- priv->nstats.rx_dropped++;
- }
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
break;
default: /* other rx data */
DPRINTK(2, "invalid data format\n");
priv->nstats.rx_errors++;
+ return;
}
+
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY &&
+ priv->wpa.rsn_enabled)
+ atomic_set(&priv->psstatus.snooze_guard, 1);
+
+ /* rx indication */
+ skb->dev = priv->net_dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ priv->nstats.rx_packets++;
+ priv->nstats.rx_bytes += rx_ind_size;
+ netif_rx(skb);
}
static
void hostif_mib_get_confirm(struct ks_wlan_private *priv)
{
struct net_device *dev = priv->net_dev;
- uint32_t mib_status;
- uint32_t mib_attribute;
- uint16_t mib_val_size;
- uint16_t mib_val_type;
+ u32 mib_status;
+ u32 mib_attribute;
+ u16 mib_val_size;
+ u16 mib_val_type;
DPRINTK(3, "\n");
@@ -520,7 +529,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
mib_val_size = get_WORD(priv); /* MIB value size */
mib_val_type = get_WORD(priv); /* MIB value type */
- if (mib_status != 0) {
+ if (mib_status) {
/* in case of error */
DPRINTK(1, "attribute=%08X, status=%08X\n", mib_attribute,
mib_status);
@@ -588,15 +597,15 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
static
void hostif_mib_set_confirm(struct ks_wlan_private *priv)
{
- uint32_t mib_status; /* +04 MIB Status */
- uint32_t mib_attribute; /* +08 MIB attribute */
+ u32 mib_status; /* +04 MIB Status */
+ u32 mib_attribute; /* +08 MIB attribute */
DPRINTK(3, "\n");
mib_status = get_DWORD(priv); /* MIB Status */
mib_attribute = get_DWORD(priv); /* MIB attribute */
- if (mib_status != 0) {
+ if (mib_status) {
/* in case of error */
DPRINTK(1, "error :: attribute=%08X, status=%08X\n",
mib_attribute, mib_status);
@@ -715,11 +724,11 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
}
static
-void hostif_power_mngmt_confirm(struct ks_wlan_private *priv)
+void hostif_power_mgmt_confirm(struct ks_wlan_private *priv)
{
DPRINTK(3, "\n");
- if (priv->reg.powermgt > POWMGT_ACTIVE_MODE &&
+ if (priv->reg.power_mgmt > POWER_MGMT_ACTIVE &&
priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
atomic_set(&priv->psstatus.confirm_wait, 0);
priv->dev_state = DEVICE_STATE_SLEEP;
@@ -735,8 +744,7 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
atomic_set(&priv->sleepstatus.doze_request, 1);
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
}
static
@@ -748,7 +756,7 @@ void hostif_start_confirm(struct ks_wlan_private *priv)
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if (is_connect_status(priv->connect_status)) {
eth_zero_addr(wrqu.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
@@ -795,8 +803,8 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
}
get_current_ap(priv, (struct link_ap_info_t *)priv->rxp);
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS &&
- (old_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ if (is_connect_status(priv->connect_status) &&
+ is_disconnect_status(old_status)) {
/* for power save */
atomic_set(&priv->psstatus.snooze_guard, 0);
atomic_set(&priv->psstatus.confirm_wait, 0);
@@ -806,8 +814,8 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS &&
- (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if (is_disconnect_status(priv->connect_status) &&
+ is_connect_status(old_status)) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
@@ -826,18 +834,16 @@ void hostif_scan_indication(struct ks_wlan_private *priv)
DPRINTK(3, "scan_ind_count = %d\n", priv->scan_ind_count);
ap_info = (struct ap_info_t *)(priv->rxp);
- if (priv->scan_ind_count != 0) {
+ if (priv->scan_ind_count) {
for (i = 0; i < priv->aplist.size; i++) { /* bssid check */
- if (!memcmp
- (&(ap_info->bssid[0]),
- &(priv->aplist.ap[i].bssid[0]), ETH_ALEN)) {
- if (ap_info->frame_type ==
- FRAME_TYPE_PROBE_RESP)
- get_ap_information(priv, ap_info,
- &(priv->aplist.
- ap[i]));
- return;
- }
+ if (memcmp(ap_info->bssid,
+ priv->aplist.ap[i].bssid, ETH_ALEN) != 0)
+ continue;
+
+ if (ap_info->frame_type == FRAME_TYPE_PROBE_RESP)
+ get_ap_information(priv, ap_info,
+ &priv->aplist.ap[i]);
+ return;
}
}
priv->scan_ind_count++;
@@ -845,8 +851,7 @@ void hostif_scan_indication(struct ks_wlan_private *priv)
DPRINTK(4, " scan_ind_count=%d :: aplist.size=%d\n",
priv->scan_ind_count, priv->aplist.size);
get_ap_information(priv, (struct ap_info_t *)(priv->rxp),
- &(priv->aplist.
- ap[priv->scan_ind_count - 1]));
+ &(priv->aplist.ap[priv->scan_ind_count - 1]));
priv->aplist.size = priv->scan_ind_count;
} else {
DPRINTK(4, " count over :: scan_ind_count=%d\n",
@@ -867,7 +872,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
priv->dev_state = DEVICE_STATE_READY;
/* disconnect indication */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if (is_connect_status(priv->connect_status)) {
netif_carrier_off(netdev);
tmp = FORCE_DISCONNECT & priv->connect_status;
priv->connect_status = tmp | DISCONNECT_STATUS;
@@ -876,9 +881,8 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
wrqu0.data.length = 0;
wrqu0.data.flags = 0;
wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
- if ((priv->connect_status & CONNECT_STATUS_MASK) ==
- DISCONNECT_STATUS
- && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if (is_disconnect_status(priv->connect_status) &&
+ is_connect_status(old_status)) {
eth_zero_addr(wrqu0.ap_addr.sa_data);
DPRINTK(3, "IWEVENT: disconnect\n");
netdev_info(netdev, "IWEVENT: disconnect\n");
@@ -903,7 +907,7 @@ void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
static
void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
{
- uint16_t result_code;
+ u16 result_code;
DPRINTK(3, "\n");
result_code = get_WORD(priv);
@@ -1011,9 +1015,15 @@ void hostif_phy_information_confirm(struct ks_wlan_private *priv)
wstats->qual.noise = 0; /* invalid noise value */
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- DPRINTK(3, "\n rssi=%u\n signal=%u\n LinkSpeed=%ux500Kbps\n \
- TransmittedFrameCount=%u\n ReceivedFragmentCount=%u\n FailedCount=%u\n \
- FCSErrorCount=%u\n", rssi, signal, LinkSpeed, TransmittedFrameCount, ReceivedFragmentCount, FailedCount, FCSErrorCount);
+ DPRINTK(3, "\n rssi=%u\n"
+ " signal=%u\n"
+ " LinkSpeed=%ux500Kbps\n"
+ " TransmittedFrameCount=%u\n"
+ " ReceivedFragmentCount=%u\n"
+ " FailedCount=%u\n"
+ " FCSErrorCount=%u\n",
+ rssi, signal, LinkSpeed, TransmittedFrameCount,
+ ReceivedFragmentCount, FailedCount, FCSErrorCount);
/* wake_up_interruptible_all(&priv->confirm_wait); */
complete(&priv->confirm_wait);
@@ -1043,8 +1053,8 @@ void hostif_event_check(struct ks_wlan_private *priv)
case HIF_MIB_SET_CONF:
hostif_mib_set_confirm(priv);
break;
- case HIF_POWERMGT_CONF:
- hostif_power_mngmt_confirm(priv);
+ case HIF_POWER_MGMT_CONF:
+ hostif_power_mgmt_confirm(priv);
break;
case HIF_SLEEP_CONF:
hostif_sleep_confirm(priv);
@@ -1099,11 +1109,24 @@ void hostif_event_check(struct ks_wlan_private *priv)
priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
}
-#define CHECK_ALINE(size) (size % 4 ? (size + (4 - (size % 4))) : size)
+/* allocate size bytes, set header size and event */
+static void *hostif_generic_request(size_t size, int event)
+{
+ struct hostif_hdr *p;
+
+ p = kzalloc(hif_align_size(size), KS_WLAN_MEM_FLAG);
+ if (!p)
+ return NULL;
-int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
+ p->size = cpu_to_le16((u16)(size - sizeof(p->size)));
+ p->event = cpu_to_le16(event);
+
+ return p;
+}
+
+int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb)
{
- unsigned int packet_len = 0;
+ unsigned int skb_len = 0;
unsigned char *buffer = NULL;
unsigned int length = 0;
@@ -1112,27 +1135,29 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
int result = 0;
unsigned short eth_proto;
struct ether_hdr *eth_hdr;
- struct michel_mic_t michel_mic;
+ struct michael_mic_t michael_mic;
unsigned short keyinfo = 0;
struct ieee802_1x_hdr *aa1x_hdr;
struct wpa_eapol_key *eap_key;
struct ethhdr *eth;
+ size_t size;
+ int ret;
- packet_len = packet->len;
- if (packet_len > ETH_FRAME_LEN) {
- DPRINTK(1, "bad length packet_len=%d\n", packet_len);
- dev_kfree_skb(packet);
- return -1;
+ skb_len = skb->len;
+ if (skb_len > ETH_FRAME_LEN) {
+ DPRINTK(1, "bad length skb_len=%d\n", skb_len);
+ ret = -EOVERFLOW;
+ goto err_kfree_skb;
}
- if (((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS)
- || (priv->connect_status & FORCE_DISCONNECT)
- || priv->wpa.mic_failure.stop) {
+ if (is_disconnect_status(priv->connect_status) ||
+ (priv->connect_status & FORCE_DISCONNECT) ||
+ priv->wpa.mic_failure.stop) {
DPRINTK(3, " DISCONNECT\n");
if (netif_queue_stopped(priv->net_dev))
netif_wake_queue(priv->net_dev);
- if (packet)
- dev_kfree_skb(packet);
+ if (skb)
+ dev_kfree_skb(skb);
return 0;
}
@@ -1143,36 +1168,34 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
netif_stop_queue(priv->net_dev);
}
- DPRINTK(4, "skb_buff length=%d\n", packet_len);
- pp = kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
- KS_WLAN_MEM_FLAG);
-
+ size = sizeof(*pp) + 6 + skb_len + 8;
+ pp = kmalloc(hif_align_size(size), KS_WLAN_MEM_FLAG);
if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
- dev_kfree_skb(packet);
- return -2;
+ ret = -ENOMEM;
+ goto err_kfree_skb;
}
p = (unsigned char *)pp->data;
- buffer = packet->data;
- length = packet->len;
+ buffer = skb->data;
+ length = skb->len;
- /* packet check */
- eth = (struct ethhdr *)packet->data;
- if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
+ /* skb check */
+ eth = (struct ethhdr *)skb->data;
+ if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN) != 0) {
DPRINTK(1, "invalid mac address !!\n");
DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source);
- dev_kfree_skb(packet);
- kfree(pp);
- return -3;
+ ret = -ENXIO;
+ goto err_kfree;
}
- /* MAC address copy */
- memcpy(p, buffer, 12); /* DST/SRC MAC address */
- p += 12;
- buffer += 12;
- length -= 12;
+ /* dest and src MAC address copy */
+ size = ETH_ALEN * 2;
+ memcpy(p, buffer, size);
+ p += size;
+ buffer += size;
+ length -= size;
+
/* EtherType/Length check */
if (*(buffer + 1) + (*buffer << 8) > 1500) {
/* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
@@ -1184,13 +1207,13 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
*p++ = 0x00; /* OUI ("000000") */
*p++ = 0x00; /* OUI ("000000") */
*p++ = 0x00; /* OUI ("000000") */
- packet_len += 6;
+ skb_len += 6;
} else {
DPRINTK(4, "DIX\n");
/* Length(2 byte) delete */
buffer += 2;
length -= 2;
- packet_len -= 2;
+ skb_len -= 2;
}
/* pp->data copy */
@@ -1203,8 +1226,8 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
eth_proto = ntohs(eth_hdr->h_proto);
/* for MIC FAILURE REPORT check */
- if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
- && priv->wpa.mic_failure.failure > 0) {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP &&
+ priv->wpa.mic_failure.failure > 0) {
aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
@@ -1213,53 +1236,54 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
}
if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
- if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
- && !(priv->wpa.key[1].key_len)
- && !(priv->wpa.key[2].key_len)
- && !(priv->wpa.key[3].key_len)) {
- pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP &&
+ priv->wpa.key[1].key_len == 0 &&
+ priv->wpa.key[2].key_len == 0 &&
+ priv->wpa.key[3].key_len == 0) {
+ pp->auth_type = cpu_to_le16((uint16_t)TYPE_AUTH); /* no encryption */
} else {
if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
- MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
- (uint8_t *) michel_mic.
- Result);
- memcpy(p, michel_mic.Result, 8);
+ MichaelMICFunction(&michael_mic,
+ (uint8_t *)priv->wpa.key[0].tx_mic_key,
+ (uint8_t *)&pp->data[0],
+ (int)skb_len,
+ (uint8_t)0, /* priority */
+ (uint8_t *)michael_mic.Result);
+ memcpy(p, michael_mic.Result, 8);
length += 8;
- packet_len += 8;
+ skb_len += 8;
p += 8;
pp->auth_type =
- cpu_to_le16((uint16_t) TYPE_DATA);
+ cpu_to_le16((uint16_t)TYPE_DATA);
} else if (priv->wpa.pairwise_suite ==
IW_AUTH_CIPHER_CCMP) {
pp->auth_type =
- cpu_to_le16((uint16_t) TYPE_DATA);
+ cpu_to_le16((uint16_t)TYPE_DATA);
}
}
} else {
if (eth_proto == ETHER_PROTOCOL_TYPE_EAP)
- pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);
+ pp->auth_type = cpu_to_le16((uint16_t)TYPE_AUTH);
else
- pp->auth_type = cpu_to_le16((uint16_t) TYPE_DATA);
+ pp->auth_type = cpu_to_le16((uint16_t)TYPE_DATA);
}
/* header value set */
pp->header.size =
cpu_to_le16((uint16_t)
- (sizeof(*pp) - sizeof(pp->header.size) + packet_len));
- pp->header.event = cpu_to_le16((uint16_t) HIF_DATA_REQ);
+ (sizeof(*pp) - sizeof(pp->header.size) + skb_len));
+ pp->header.event = cpu_to_le16((uint16_t)HIF_DATA_REQ);
/* tx request */
- result =
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + packet_len),
- (void *)send_packet_complete, (void *)priv,
- (void *)packet);
+ result = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len),
+ send_packet_complete, skb);
/* MIC FAILURE REPORT check */
- if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
- && priv->wpa.mic_failure.failure > 0) {
- if (keyinfo & WPA_KEY_INFO_ERROR
- && keyinfo & WPA_KEY_INFO_REQUEST) {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP &&
+ priv->wpa.mic_failure.failure > 0) {
+ if (keyinfo & WPA_KEY_INFO_ERROR &&
+ keyinfo & WPA_KEY_INFO_REQUEST) {
DPRINTK(3, " MIC ERROR Report SET : %04X\n", keyinfo);
hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
}
@@ -1268,13 +1292,20 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
}
return result;
+
+err_kfree:
+ kfree(pp);
+err_kfree_skb:
+ dev_kfree_skb(skb);
+
+ return ret;
}
-#define ps_confirm_wait_inc(priv) do { \
- if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \
- atomic_inc(&priv->psstatus.confirm_wait); \
- /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
- } } while (0)
+#define ps_confirm_wait_inc(priv) \
+ do { \
+ if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) \
+ atomic_inc(&priv->psstatus.confirm_wait); \
+ } while (0)
static
void hostif_mib_get_request(struct ks_wlan_private *priv,
@@ -1284,20 +1315,15 @@ void hostif_mib_get_request(struct ks_wlan_private *priv,
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_GET_REQ);
- pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+
+ pp->mib_attribute = cpu_to_le32((uint32_t)mib_attribute);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
@@ -1314,26 +1340,18 @@ void hostif_mib_set_request(struct ks_wlan_private *priv,
return;
}
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t)
- (sizeof(*pp) - sizeof(pp->header.size) + size));
- pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_SET_REQ);
- pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
- pp->mib_value.size = cpu_to_le16((uint16_t) size);
- pp->mib_value.type = cpu_to_le16((uint16_t) type);
+ pp->mib_attribute = cpu_to_le32((uint32_t)mib_attribute);
+ pp->mib_value.size = cpu_to_le16((uint16_t)size);
+ pp->mib_value.type = cpu_to_le16((uint16_t)type);
memcpy(&pp->mib_value.body, vp, size);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL,
- NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL);
}
static
@@ -1343,20 +1361,15 @@ void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_START_REQ);
- pp->mode = cpu_to_le16((uint16_t) mode);
+
+ pp->mode = cpu_to_le16((uint16_t)mode);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
priv->aplist.size = 0;
priv->scan_ind_count = 0;
@@ -1366,24 +1379,18 @@ static
void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
{
struct hostif_ps_adhoc_set_request_t *pp;
- uint16_t capability;
+ u16 capability;
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ);
+ if (!pp)
return;
- }
- memset(pp, 0, sizeof(*pp));
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_PS_ADH_SET_REQ);
- pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
- pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
- pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
- pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+
+ pp->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t)(priv->reg.channel));
pp->rate_set.size = priv->reg.rate_set.size;
memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
priv->reg.rate_set.size);
@@ -1398,33 +1405,28 @@ void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM */
}
- pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->capability = cpu_to_le16((uint16_t)capability);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
{
struct hostif_infrastructure_set_request_t *pp;
- uint16_t capability;
+ u16 capability;
DPRINTK(3, "ssid.size=%d\n", priv->reg.ssid.size);
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_INFRA_SET_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET_REQ);
- pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
- pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
- pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
pp->rate_set.size = priv->reg.rate_set.size;
memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
@@ -1442,10 +1444,10 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
}
- pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->capability = cpu_to_le16((uint16_t)capability);
pp->beacon_lost_count =
- cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
- pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+ cpu_to_le16((uint16_t)(priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t)(priv->reg.authenticate_type));
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
@@ -1469,28 +1471,23 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
{
struct hostif_infrastructure_set2_request_t *pp;
- uint16_t capability;
+ u16 capability;
DPRINTK(2, "ssid.size=%d\n", priv->reg.ssid.size);
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_INFRA_SET2_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET2_REQ);
- pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
- pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
- pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
pp->rate_set.size = priv->reg.rate_set.size;
memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
@@ -1508,10 +1505,10 @@ static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
}
- pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->capability = cpu_to_le16((uint16_t)capability);
pp->beacon_lost_count =
- cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
- pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+ cpu_to_le16((uint16_t)(priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t)(priv->reg.authenticate_type));
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
@@ -1537,31 +1534,25 @@ static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
void hostif_adhoc_set_request(struct ks_wlan_private *priv)
{
struct hostif_adhoc_set_request_t *pp;
- uint16_t capability;
+ u16 capability;
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
+ if (!pp)
return;
- }
- memset(pp, 0, sizeof(*pp));
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
- pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
- pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
- pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
- pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+
+ pp->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t)(priv->reg.channel));
pp->rate_set.size = priv->reg.rate_set.size;
memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
priv->reg.rate_set.size);
@@ -1578,34 +1569,28 @@ void hostif_adhoc_set_request(struct ks_wlan_private *priv)
capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
}
- pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->capability = cpu_to_le16((uint16_t)capability);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
{
struct hostif_adhoc_set2_request_t *pp;
- uint16_t capability;
+ u16 capability;
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ);
+ if (!pp)
return;
- }
- memset(pp, 0, sizeof(*pp));
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
- pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
- pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
- pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->phy_type = cpu_to_le16((uint16_t)(priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t)(priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t)(priv->reg.scan_type));
pp->rate_set.size = priv->reg.rate_set.size;
memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
priv->reg.rate_set.size);
@@ -1622,7 +1607,7 @@ void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
}
- pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->capability = cpu_to_le16((uint16_t)capability);
pp->channel_list.body[0] = priv->reg.channel;
pp->channel_list.size = 1;
@@ -1630,7 +1615,7 @@ void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
@@ -1640,19 +1625,13 @@ void hostif_stop_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
@@ -1662,84 +1641,68 @@ void hostif_phy_information_request(struct ks_wlan_private *priv)
DPRINTK(3, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_PHY_INFO_REQ);
+
if (priv->reg.phy_info_timer) {
- pp->type = cpu_to_le16((uint16_t) TIME_TYPE);
- pp->time = cpu_to_le16((uint16_t) (priv->reg.phy_info_timer));
+ pp->type = cpu_to_le16((uint16_t)TIME_TYPE);
+ pp->time = cpu_to_le16((uint16_t)(priv->reg.phy_info_timer));
} else {
- pp->type = cpu_to_le16((uint16_t) NORMAL_TYPE);
- pp->time = cpu_to_le16((uint16_t) 0);
+ pp->type = cpu_to_le16((uint16_t)NORMAL_TYPE);
+ pp->time = cpu_to_le16((uint16_t)0);
}
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
-void hostif_power_mngmt_request(struct ks_wlan_private *priv,
- unsigned long mode, unsigned long wake_up,
- unsigned long receiveDTIMs)
+void hostif_power_mgmt_request(struct ks_wlan_private *priv,
+ unsigned long mode, unsigned long wake_up,
+ unsigned long receiveDTIMs)
{
- struct hostif_power_mngmt_request_t *pp;
+ struct hostif_power_mgmt_request_t *pp;
DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
receiveDTIMs);
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+
+ pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_POWERMGT_REQ);
- pp->mode = cpu_to_le32((uint32_t) mode);
- pp->wake_up = cpu_to_le32((uint32_t) wake_up);
- pp->receiveDTIMs = cpu_to_le32((uint32_t) receiveDTIMs);
+
+ pp->mode = cpu_to_le32((uint32_t)mode);
+ pp->wake_up = cpu_to_le32((uint32_t)wake_up);
+ pp->receiveDTIMs = cpu_to_le32((uint32_t)receiveDTIMs);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
static
-void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
+void hostif_sleep_request(struct ks_wlan_private *priv,
+ enum sleep_mode_type mode)
{
struct hostif_sleep_request_t *pp;
- DPRINTK(3, "mode=%lu\n", mode);
+ DPRINTK(3, "mode=%lu\n", (long)mode);
if (mode == SLP_SLEEP) {
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+ pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t)
- (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_SLEEP_REQ);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL,
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL,
NULL);
} else if (mode == SLP_ACTIVE) {
atomic_set(&priv->sleepstatus.wakeup_request, 1);
- queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
- &priv->ks_wlan_hw.rw_wq, 1);
+ queue_delayed_work(priv->wq, &priv->rw_dwork, 1);
} else {
- DPRINTK(3, "invalid mode %ld\n", mode);
+ DPRINTK(3, "invalid mode %ld\n", (long)mode);
return;
}
}
@@ -1752,19 +1715,15 @@ void hostif_bss_scan_request(struct ks_wlan_private *priv,
struct hostif_bss_scan_request_t *pp;
DPRINTK(2, "\n");
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+
+ pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_SCAN_REQ);
+
pp->scan_type = scan_type;
- pp->ch_time_min = cpu_to_le32((uint32_t) 110); /* default value */
- pp->ch_time_max = cpu_to_le32((uint32_t) 130); /* default value */
+ pp->ch_time_min = cpu_to_le32((uint32_t)110); /* default value */
+ pp->ch_time_max = cpu_to_le32((uint32_t)130); /* default value */
pp->channel_list.body[0] = 1;
pp->channel_list.body[1] = 8;
pp->channel_list.body[2] = 2;
@@ -1794,7 +1753,7 @@ void hostif_bss_scan_request(struct ks_wlan_private *priv,
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
priv->aplist.size = 0;
priv->scan_ind_count = 0;
@@ -1808,21 +1767,17 @@ void hostif_mic_failure_request(struct ks_wlan_private *priv,
struct hostif_mic_failure_request_t *pp;
DPRINTK(3, "count=%d :: timer=%d\n", failure_count, timer);
- /* make primitive */
- pp = kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
- if (!pp) {
- DPRINTK(3, "allocate memory failed..\n");
+
+ pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ);
+ if (!pp)
return;
- }
- pp->header.size =
- cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
- pp->header.event = cpu_to_le16((uint16_t) HIF_MIC_FAILURE_REQ);
- pp->failure_count = cpu_to_le16((uint16_t) failure_count);
- pp->timer = cpu_to_le16((uint16_t) timer);
+
+ pp->failure_count = cpu_to_le16((uint16_t)failure_count);
+ pp->timer = cpu_to_le16((uint16_t)timer);
/* send to device request */
ps_confirm_wait_inc(priv);
- ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL);
}
/* Device I/O Receive indicate */
@@ -1867,11 +1822,11 @@ void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
static
void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
{
- uint32_t val;
+ u32 val;
switch (type) {
case SME_WEP_INDEX_REQUEST:
- val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
+ val = cpu_to_le32((uint32_t)(priv->reg.wep_index));
hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
sizeof(val), MIB_VALUE_TYPE_INT, &val);
break;
@@ -1908,7 +1863,7 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
&priv->reg.wep_key[3].val[0]);
break;
case SME_WEP_FLAG_REQUEST:
- val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ val = cpu_to_le32((uint32_t)(priv->reg.privacy_invoked));
hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
break;
@@ -1921,8 +1876,8 @@ struct wpa_suite_t {
} __packed;
struct rsn_mode_t {
- uint32_t rsn_mode;
- uint16_t rsn_capability;
+ u32 rsn_mode;
+ u16 rsn_capability;
} __packed;
static
@@ -1930,13 +1885,13 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
{
struct wpa_suite_t wpa_suite;
struct rsn_mode_t rsn_mode;
- uint32_t val;
+ u32 val;
memset(&wpa_suite, 0, sizeof(wpa_suite));
switch (type) {
case SME_RSN_UCAST_REQUEST:
- wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ wpa_suite.size = cpu_to_le16((uint16_t)1);
switch (priv->wpa.pairwise_suite) {
case IW_AUTH_CIPHER_NONE:
if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
@@ -2034,7 +1989,7 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
&wpa_suite.suite[0][0]);
break;
case SME_RSN_AUTH_REQUEST:
- wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ wpa_suite.size = cpu_to_le16((uint16_t)1);
switch (priv->wpa.key_mgmt_suite) {
case IW_AUTH_KEY_MGMT_802_1X:
if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
@@ -2078,23 +2033,23 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
MIB_VALUE_TYPE_OSTRING, &wpa_suite);
break;
case SME_RSN_ENABLED_REQUEST:
- val = cpu_to_le32((uint32_t) (priv->wpa.rsn_enabled));
+ val = cpu_to_le32((uint32_t)(priv->wpa.rsn_enabled));
hostif_mib_set_request(priv, DOT11_RSN_ENABLED,
sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
break;
case SME_RSN_MODE_REQUEST:
if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) {
rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t) RSN_MODE_WPA2);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ cpu_to_le32((uint32_t)RSN_MODE_WPA2);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
} else if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) {
rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t) RSN_MODE_WPA);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ cpu_to_le32((uint32_t)RSN_MODE_WPA);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
} else {
rsn_mode.rsn_mode =
- cpu_to_le32((uint32_t) RSN_MODE_NONE);
- rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ cpu_to_le32((uint32_t)RSN_MODE_NONE);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t)0);
}
hostif_mib_set_request(priv, LOCAL_RSN_MODE, sizeof(rsn_mode),
MIB_VALUE_TYPE_OSTRING, &rsn_mode);
@@ -2137,32 +2092,28 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
/* rate mask by phy setting */
if (priv->reg.phy_type == D_11B_ONLY_MODE) {
for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (IS_11B_RATE(priv->reg.rate_set.body[i])) {
- if ((priv->reg.rate_set.body[i] & RATE_MASK) >=
- TX_RATE_5M)
- rate_octet[i] =
- priv->reg.rate_set.
- body[i] & RATE_MASK;
- else
- rate_octet[i] =
- priv->reg.rate_set.body[i];
- } else
+ if (!IS_11B_RATE(priv->reg.rate_set.body[i]))
break;
+
+ if ((priv->reg.rate_set.body[i] & RATE_MASK) >= TX_RATE_5M) {
+ rate_octet[i] = priv->reg.rate_set.body[i] &
+ RATE_MASK;
+ } else {
+ rate_octet[i] = priv->reg.rate_set.body[i];
+ }
}
} else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
for (i = 0; i < priv->reg.rate_set.size; i++) {
- if (IS_11BG_RATE(priv->reg.rate_set.body[i])) {
- if (IS_OFDM_EXT_RATE
- (priv->reg.rate_set.body[i]))
- rate_octet[i] =
- priv->reg.rate_set.
- body[i] & RATE_MASK;
- else
- rate_octet[i] =
- priv->reg.rate_set.body[i];
- } else
+ if (!IS_11BG_RATE(priv->reg.rate_set.body[i]))
break;
+
+ if (IS_OFDM_EXT_RATE(priv->reg.rate_set.body[i])) {
+ rate_octet[i] = priv->reg.rate_set.body[i] &
+ RATE_MASK;
+ } else {
+ rate_octet[i] = priv->reg.rate_set.body[i];
+ }
}
}
rate_size = i;
@@ -2185,7 +2136,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
break;
case MODE_INFRASTRUCTURE:
/* Infrastructure mode */
- if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
hostif_infrastructure_set_request(priv);
} else {
hostif_infrastructure_set2_request(priv);
@@ -2195,7 +2146,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
break;
case MODE_ADHOC:
/* IEEE802.11 Ad-Hoc mode */
- if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ if (!is_valid_ether_addr((u8 *)priv->reg.bssid)) {
hostif_adhoc_set_request(priv);
} else {
hostif_adhoc_set2_request(priv);
@@ -2225,56 +2176,58 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
if (dev->flags & IFF_PROMISC) {
- filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_PROMISC);
+ filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_PROMISC);
hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
&filter_type);
- } else if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST)
- || (dev->flags & IFF_ALLMULTI)) {
- filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_MCASTALL);
+ goto spin_unlock;
+ }
+
+ if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_MCASTALL);
hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
&filter_type);
- } else {
- if (priv->sme_i.sme_flag & SME_MULTICAST) {
- mc_count = netdev_mc_count(dev);
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(&set_address[i * ETH_ALEN], ha->addr,
- ETH_ALEN);
- i++;
- }
- priv->sme_i.sme_flag &= ~SME_MULTICAST;
- hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
- (ETH_ALEN * mc_count),
- MIB_VALUE_TYPE_OSTRING,
- &set_address[0]);
- } else {
- filter_type =
- cpu_to_le32((uint32_t) MCAST_FILTER_MCAST);
- priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
- sizeof(filter_type),
- MIB_VALUE_TYPE_BOOL,
- &filter_type);
+ goto spin_unlock;
+ }
+
+ if (priv->sme_i.sme_flag & SME_MULTICAST) {
+ mc_count = netdev_mc_count(dev);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(&set_address[i * ETH_ALEN], ha->addr, ETH_ALEN);
+ i++;
}
+ priv->sme_i.sme_flag &= ~SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
+ ETH_ALEN * mc_count,
+ MIB_VALUE_TYPE_OSTRING,
+ &set_address[0]);
+ } else {
+ filter_type = cpu_to_le32((uint32_t)MCAST_FILTER_MCAST);
+ priv->sme_i.sme_flag |= SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+ &filter_type);
}
+spin_unlock:
spin_unlock(&priv->multicast_spin);
}
static
-void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
+void hostif_sme_power_mgmt_set(struct ks_wlan_private *priv)
{
unsigned long mode, wake_up, receiveDTIMs;
DPRINTK(3, "\n");
- switch (priv->reg.powermgt) {
- case POWMGT_ACTIVE_MODE:
+ switch (priv->reg.power_mgmt) {
+ case POWER_MGMT_ACTIVE:
mode = POWER_ACTIVE;
wake_up = 0;
receiveDTIMs = 0;
break;
- case POWMGT_SAVE1_MODE:
+ case POWER_MGMT_SAVE1:
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
mode = POWER_SAVE;
wake_up = 0;
@@ -2285,7 +2238,7 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
receiveDTIMs = 0;
}
break;
- case POWMGT_SAVE2_MODE:
+ case POWER_MGMT_SAVE2:
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
mode = POWER_SAVE;
wake_up = 0;
@@ -2302,7 +2255,7 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
receiveDTIMs = 0;
break;
}
- hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
+ hostif_power_mgmt_request(priv, mode, wake_up, receiveDTIMs);
}
static
@@ -2324,16 +2277,16 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
static
void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
{
- uint32_t val;
+ u32 val;
switch (type) {
case SME_SET_FLAG:
- val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ val = cpu_to_le32((uint32_t)(priv->reg.privacy_invoked));
hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
break;
case SME_SET_TXKEY:
- val = cpu_to_le32((uint32_t) (priv->wpa.txkey));
+ val = cpu_to_le32((uint32_t)(priv->wpa.txkey));
hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
sizeof(val), MIB_VALUE_TYPE_INT, &val);
break;
@@ -2383,10 +2336,10 @@ static
void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
{
struct pmk_cache_t {
- uint16_t size;
+ u16 size;
struct {
- uint8_t bssid[ETH_ALEN];
- uint8_t pmkid[IW_PMKID_LEN];
+ u8 bssid[ETH_ALEN];
+ u8 pmkid[IW_PMKID_LEN];
} __packed list[PMK_LIST_MAX];
} __packed pmkcache;
struct pmk_t *pmk;
@@ -2402,7 +2355,7 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
i++;
}
}
- pmkcache.size = cpu_to_le16((uint16_t) (priv->pmklist.size));
+ pmkcache.size = cpu_to_le16((uint16_t)(priv->pmklist.size));
hostif_mib_set_request(priv, LOCAL_PMK,
sizeof(priv->pmklist.size) + (ETH_ALEN +
IW_PMKID_LEN) *
@@ -2414,7 +2367,7 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
static
void hostif_sme_execute(struct ks_wlan_private *priv, int event)
{
- uint32_t val;
+ u32 val;
DPRINTK(3, "event=%d\n", event);
switch (event) {
@@ -2435,7 +2388,7 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
priv->scan_ssid, priv->scan_ssid_len);
break;
case SME_POW_MNGMT_REQUEST:
- hostif_sme_powermgt_set(priv);
+ hostif_sme_power_mgmt_set(priv);
break;
case SME_PHY_INFO_REQUEST:
hostif_phy_information_request(priv);
@@ -2443,18 +2396,16 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
case SME_MIC_FAILURE_REQUEST:
if (priv->wpa.mic_failure.failure == 1) {
hostif_mic_failure_request(priv,
- priv->wpa.mic_failure.
- failure - 1, 0);
+ priv->wpa.mic_failure.failure - 1,
+ 0);
} else if (priv->wpa.mic_failure.failure == 2) {
hostif_mic_failure_request(priv,
- priv->wpa.mic_failure.
- failure - 1,
- priv->wpa.mic_failure.
- counter);
- } else
- DPRINTK(4,
- "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
+ priv->wpa.mic_failure.failure - 1,
+ priv->wpa.mic_failure.counter);
+ } else {
+ DPRINTK(4, "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
priv->wpa.mic_failure.failure);
+ }
break;
case SME_MIC_FAILURE_CONFIRM:
if (priv->wpa.mic_failure.failure == 2) {
@@ -2476,12 +2427,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_stop_request(priv);
break;
case SME_RTS_THRESHOLD_REQUEST:
- val = cpu_to_le32((uint32_t) (priv->reg.rts));
+ val = cpu_to_le32((uint32_t)(priv->reg.rts));
hostif_mib_set_request(priv, DOT11_RTS_THRESHOLD,
sizeof(val), MIB_VALUE_TYPE_INT, &val);
break;
case SME_FRAGMENTATION_THRESHOLD_REQUEST:
- val = cpu_to_le32((uint32_t) (priv->reg.fragment));
+ val = cpu_to_le32((uint32_t)(priv->reg.fragment));
hostif_mib_set_request(priv, DOT11_FRAGMENTATION_THRESHOLD,
sizeof(val), MIB_VALUE_TYPE_INT, &val);
break;
@@ -2558,7 +2509,7 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
hostif_sme_sleep_set(priv);
break;
case SME_SET_REGION:
- val = cpu_to_le32((uint32_t) (priv->region));
+ val = cpu_to_le32((uint32_t)(priv->region));
hostif_mib_set_request(priv, LOCAL_REGION,
sizeof(val), MIB_VALUE_TYPE_INT, &val);
break;
@@ -2595,17 +2546,16 @@ void hostif_sme_task(unsigned long dev)
DPRINTK(3, "\n");
- if (priv->dev_state >= DEVICE_STATE_BOOT) {
- if (0 < cnt_smeqbody(priv)
- && priv->dev_state >= DEVICE_STATE_BOOT) {
- hostif_sme_execute(priv,
- priv->sme_i.event_buff[priv->sme_i.
- qhead]);
- inc_smeqhead(priv);
- if (0 < cnt_smeqbody(priv))
- tasklet_schedule(&priv->sme_task);
- }
- }
+ if (priv->dev_state < DEVICE_STATE_BOOT)
+ return;
+
+ if (cnt_smeqbody(priv) <= 0)
+ return;
+
+ hostif_sme_execute(priv, priv->sme_i.event_buff[priv->sme_i.qhead]);
+ inc_smeqhead(priv);
+ if (cnt_smeqbody(priv) > 0)
+ tasklet_schedule(&priv->sme_task);
}
/* send to Station Management Entity module */
@@ -2617,14 +2567,12 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
priv->sme_i.event_buff[priv->sme_i.qtail] = event;
inc_smeqtail(priv);
- //DPRINTK(3,"inc_smeqtail \n");
#ifdef KS_WLAN_DEBUG
if (priv->sme_i.max_event_count < cnt_smeqbody(priv))
priv->sme_i.max_event_count = cnt_smeqbody(priv);
#endif /* KS_WLAN_DEBUG */
} else {
/* in case of buffer overflow */
- //DPRINTK(2,"sme queue buffer overflow\n");
netdev_err(priv->net_dev, "sme queue buffer overflow\n");
}
@@ -2639,7 +2587,7 @@ int hostif_init(struct ks_wlan_private *priv)
priv->aplist.size = 0;
for (i = 0; i < LOCAL_APLIST_MAX; i++)
- memset(&(priv->aplist.ap[i]), 0, sizeof(struct local_ap_t));
+ memset(&priv->aplist.ap[i], 0, sizeof(struct local_ap_t));
priv->infra_status = 0;
priv->current_rate = 4;
priv->connect_status = DISCONNECT_STATUS;
@@ -2656,24 +2604,23 @@ int hostif_init(struct ks_wlan_private *priv)
atomic_set(&priv->psstatus.status, PS_NONE);
atomic_set(&priv->psstatus.confirm_wait, 0);
atomic_set(&priv->psstatus.snooze_guard, 0);
- /* init_waitqueue_head(&priv->psstatus.wakeup_wait); */
init_completion(&priv->psstatus.wakeup_wait);
- //INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task, (void *)priv);
- INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task);
+ INIT_WORK(&priv->wakeup_work, ks_wlan_hw_wakeup_task);
/* WPA */
- memset(&(priv->wpa), 0, sizeof(priv->wpa));
+ memset(&priv->wpa, 0, sizeof(priv->wpa));
priv->wpa.rsn_enabled = 0;
priv->wpa.mic_failure.failure = 0;
priv->wpa.mic_failure.last_failure_time = 0;
priv->wpa.mic_failure.stop = 0;
- memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ memset(&priv->pmklist, 0, sizeof(priv->pmklist));
INIT_LIST_HEAD(&priv->pmklist.head);
for (i = 0; i < PMK_LIST_MAX; i++)
INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
priv->sme_i.sme_status = SME_IDLE;
- priv->sme_i.qhead = priv->sme_i.qtail = 0;
+ priv->sme_i.qhead = 0;
+ priv->sme_i.qtail = 0;
#ifdef KS_WLAN_DEBUG
priv->sme_i.max_event_count = 0;
#endif
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
index 30c49b699d62..d758076d419d 100644
--- a/drivers/staging/ks7010/ks_hostif.h
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -1,6 +1,6 @@
/*
* Driver for KeyStream wireless LAN
- *
+ *
* Copyright (c) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
*
@@ -23,8 +23,8 @@
#define HIF_MIB_GET_CONF 0xE802
#define HIF_MIB_SET_REQ 0xE003
#define HIF_MIB_SET_CONF 0xE803
-#define HIF_POWERMGT_REQ 0xE004
-#define HIF_POWERMGT_CONF 0xE804
+#define HIF_POWER_MGMT_REQ 0xE004
+#define HIF_POWER_MGMT_CONF 0xE804
#define HIF_START_REQ 0xE005
#define HIF_START_CONF 0xE805
#define HIF_CONNECT_IND 0xE806
@@ -62,35 +62,35 @@
*/
struct hostif_hdr {
- uint16_t size;
- uint16_t event;
+ u16 size;
+ u16 event;
} __packed;
struct hostif_data_request_t {
struct hostif_hdr header;
- uint16_t auth_type;
+ u16 auth_type;
#define TYPE_DATA 0x0000
#define TYPE_AUTH 0x0001
- uint16_t reserved;
- uint8_t data[0];
+ u16 reserved;
+ u8 data[0];
} __packed;
struct hostif_data_indication_t {
struct hostif_hdr header;
- uint16_t auth_type;
+ u16 auth_type;
/* #define TYPE_DATA 0x0000 */
#define TYPE_PMK1 0x0001
#define TYPE_GMK1 0x0002
#define TYPE_GMK2 0x0003
- uint16_t reserved;
- uint8_t data[0];
+ u16 reserved;
+ u8 data[0];
} __packed;
#define CHANNEL_LIST_MAX_SIZE 14
struct channel_list_t {
- uint8_t size;
- uint8_t body[CHANNEL_LIST_MAX_SIZE];
- uint8_t pad;
+ u8 size;
+ u8 body[CHANNEL_LIST_MAX_SIZE];
+ u8 pad;
} __packed;
/* MIB Attribute */
@@ -110,9 +110,9 @@ struct channel_list_t {
#define DOT11_OPERATION_RATE_SET 0x11110100 /* rate set */
#define LOCAL_AP_SEARCH_INTEAVAL 0xF1010100 /* AP search interval (R/W) */
-#define LOCAL_CURRENTADDRESS 0xF1050100 /* MAC Adress change (W) */
-#define LOCAL_MULTICAST_ADDRESS 0xF1060100 /* Multicast Adress (W) */
-#define LOCAL_MULTICAST_FILTER 0xF1060200 /* Multicast Adress Filter enable/disable (W) */
+#define LOCAL_CURRENTADDRESS 0xF1050100 /* MAC Address change (W) */
+#define LOCAL_MULTICAST_ADDRESS 0xF1060100 /* Multicast Address (W) */
+#define LOCAL_MULTICAST_FILTER 0xF1060200 /* Multicast Address Filter enable/disable (W) */
#define LOCAL_SEARCHED_AP_LIST 0xF1030100 /* AP list (R) */
#define LOCAL_LINK_AP_STATUS 0xF1040100 /* Link AP status (R) */
#define LOCAL_PACKET_STATISTICS 0xF1020100 /* tx,rx packets statistics */
@@ -128,7 +128,7 @@ struct channel_list_t {
#define DOT11_PMK_TSC 0x55010100 /* PMK_TSC (W) */
#define DOT11_GMK1_TSC 0x55010101 /* GMK1_TSC (W) */
#define DOT11_GMK2_TSC 0x55010102 /* GMK2_TSC (W) */
-#define DOT11_GMK3_TSC 0x55010103 /* GMK3_TSC */
+#define DOT11_GMK3_TSC 0x55010103 /* GMK3_TSC */
#define LOCAL_PMK 0x58010100 /* Pairwise Master Key cache (W) */
#define LOCAL_REGION 0xF10A0100 /* Region setting */
@@ -143,61 +143,60 @@ struct channel_list_t {
struct hostif_mib_get_request_t {
struct hostif_hdr header;
- uint32_t mib_attribute;
+ u32 mib_attribute;
} __packed;
struct hostif_mib_value_t {
- uint16_t size;
- uint16_t type;
+ u16 size;
+ u16 type;
#define MIB_VALUE_TYPE_NULL 0
#define MIB_VALUE_TYPE_INT 1
#define MIB_VALUE_TYPE_BOOL 2
#define MIB_VALUE_TYPE_COUNT32 3
#define MIB_VALUE_TYPE_OSTRING 4
- uint8_t body[0];
+ u8 body[0];
} __packed;
struct hostif_mib_get_confirm_t {
struct hostif_hdr header;
- uint32_t mib_status;
+ u32 mib_status;
#define MIB_SUCCESS 0
#define MIB_INVALID 1
#define MIB_READ_ONLY 2
#define MIB_WRITE_ONLY 3
- uint32_t mib_attribute;
+ u32 mib_attribute;
struct hostif_mib_value_t mib_value;
} __packed;
struct hostif_mib_set_request_t {
struct hostif_hdr header;
- uint32_t mib_attribute;
+ u32 mib_attribute;
struct hostif_mib_value_t mib_value;
} __packed;
struct hostif_mib_set_confirm_t {
struct hostif_hdr header;
- uint32_t mib_status;
- uint32_t mib_attribute;
+ u32 mib_status;
+ u32 mib_attribute;
} __packed;
-struct hostif_power_mngmt_request_t {
+struct hostif_power_mgmt_request_t {
struct hostif_hdr header;
- uint32_t mode;
+ u32 mode;
#define POWER_ACTIVE 1
#define POWER_SAVE 2
- uint32_t wake_up;
+ u32 wake_up;
#define SLEEP_FALSE 0
#define SLEEP_TRUE 1 /* not used */
- uint32_t receiveDTIMs;
+ u32 receiveDTIMs;
#define DTIM_FALSE 0
#define DTIM_TRUE 1
} __packed;
-/* power management mode */
-enum {
- POWMGT_ACTIVE_MODE = 0,
- POWMGT_SAVE1_MODE,
- POWMGT_SAVE2_MODE
+enum power_mgmt_mode_type {
+ POWER_MGMT_ACTIVE,
+ POWER_MGMT_SAVE1,
+ POWER_MGMT_SAVE2
};
#define RESULT_SUCCESS 0
@@ -206,14 +205,14 @@ enum {
/* #define RESULT_ALREADY_RUNNING 3 */
#define RESULT_ALREADY_RUNNING 7
-struct hostif_power_mngmt_confirm_t {
+struct hostif_power_mgmt_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
struct hostif_start_request_t {
struct hostif_hdr header;
- uint16_t mode;
+ u16 mode;
#define MODE_PSEUDO_ADHOC 0
#define MODE_INFRASTRUCTURE 1
#define MODE_AP 2 /* not used */
@@ -222,118 +221,118 @@ struct hostif_start_request_t {
struct hostif_start_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
#define SSID_MAX_SIZE 32
struct ssid_t {
- uint8_t size;
- uint8_t body[SSID_MAX_SIZE];
- uint8_t ssid_pad;
+ u8 size;
+ u8 body[SSID_MAX_SIZE];
+ u8 ssid_pad;
} __packed;
#define RATE_SET_MAX_SIZE 16
struct rate_set8_t {
- uint8_t size;
- uint8_t body[8];
- uint8_t rate_pad;
+ u8 size;
+ u8 body[8];
+ u8 rate_pad;
} __packed;
struct FhParms_t {
- uint16_t dwellTime;
- uint8_t hopSet;
- uint8_t hopPattern;
- uint8_t hopIndex;
+ u16 dwellTime;
+ u8 hopSet;
+ u8 hopPattern;
+ u8 hopIndex;
} __packed;
struct DsParms_t {
- uint8_t channel;
+ u8 channel;
} __packed;
struct CfParms_t {
- uint8_t count;
- uint8_t period;
- uint16_t maxDuration;
- uint16_t durRemaining;
+ u8 count;
+ u8 period;
+ u16 maxDuration;
+ u16 durRemaining;
} __packed;
struct IbssParms_t {
- uint16_t atimWindow;
+ u16 atimWindow;
} __packed;
struct rsn_t {
- uint8_t size;
+ u8 size;
#define RSN_BODY_SIZE 64
- uint8_t body[RSN_BODY_SIZE];
+ u8 body[RSN_BODY_SIZE];
} __packed;
struct ErpParams_t {
- uint8_t erp_info;
+ u8 erp_info;
} __packed;
struct rate_set16_t {
- uint8_t size;
- uint8_t body[16];
- uint8_t rate_pad;
+ u8 size;
+ u8 body[16];
+ u8 rate_pad;
} __packed;
struct ap_info_t {
- uint8_t bssid[6]; /* +00 */
- uint8_t rssi; /* +06 */
- uint8_t sq; /* +07 */
- uint8_t noise; /* +08 */
- uint8_t pad0; /* +09 */
- uint16_t beacon_period; /* +10 */
- uint16_t capability; /* +12 */
-#define BSS_CAP_ESS (1<<0)
-#define BSS_CAP_IBSS (1<<1)
-#define BSS_CAP_CF_POLABLE (1<<2)
-#define BSS_CAP_CF_POLL_REQ (1<<3)
-#define BSS_CAP_PRIVACY (1<<4)
-#define BSS_CAP_SHORT_PREAMBLE (1<<5)
-#define BSS_CAP_PBCC (1<<6)
-#define BSS_CAP_CHANNEL_AGILITY (1<<7)
-#define BSS_CAP_SHORT_SLOT_TIME (1<<10)
-#define BSS_CAP_DSSS_OFDM (1<<13)
- uint8_t frame_type; /* +14 */
- uint8_t ch_info; /* +15 */
+ u8 bssid[6]; /* +00 */
+ u8 rssi; /* +06 */
+ u8 sq; /* +07 */
+ u8 noise; /* +08 */
+ u8 pad0; /* +09 */
+ u16 beacon_period; /* +10 */
+ u16 capability; /* +12 */
+#define BSS_CAP_ESS BIT(0)
+#define BSS_CAP_IBSS BIT(1)
+#define BSS_CAP_CF_POLABLE BIT(2)
+#define BSS_CAP_CF_POLL_REQ BIT(3)
+#define BSS_CAP_PRIVACY BIT(4)
+#define BSS_CAP_SHORT_PREAMBLE BIT(5)
+#define BSS_CAP_PBCC BIT(6)
+#define BSS_CAP_CHANNEL_AGILITY BIT(7)
+#define BSS_CAP_SHORT_SLOT_TIME BIT(10)
+#define BSS_CAP_DSSS_OFDM BIT(13)
+ u8 frame_type; /* +14 */
+ u8 ch_info; /* +15 */
#define FRAME_TYPE_BEACON 0x80
#define FRAME_TYPE_PROBE_RESP 0x50
- uint16_t body_size; /* +16 */
- uint8_t body[1024]; /* +18 */
+ u16 body_size; /* +16 */
+ u8 body[1024]; /* +18 */
/* +1032 */
} __packed;
struct link_ap_info_t {
- uint8_t bssid[6]; /* +00 */
- uint8_t rssi; /* +06 */
- uint8_t sq; /* +07 */
- uint8_t noise; /* +08 */
- uint8_t pad0; /* +09 */
- uint16_t beacon_period; /* +10 */
- uint16_t capability; /* +12 */
+ u8 bssid[6]; /* +00 */
+ u8 rssi; /* +06 */
+ u8 sq; /* +07 */
+ u8 noise; /* +08 */
+ u8 pad0; /* +09 */
+ u16 beacon_period; /* +10 */
+ u16 capability; /* +12 */
struct rate_set8_t rate_set; /* +14 */
struct FhParms_t fh_parameter; /* +24 */
struct DsParms_t ds_parameter; /* +29 */
struct CfParms_t cf_parameter; /* +30 */
struct IbssParms_t ibss_parameter; /* +36 */
struct ErpParams_t erp_parameter; /* +38 */
- uint8_t pad1; /* +39 */
+ u8 pad1; /* +39 */
struct rate_set8_t ext_rate_set; /* +40 */
- uint8_t DTIM_period; /* +50 */
- uint8_t rsn_mode; /* +51 */
+ u8 DTIM_period; /* +50 */
+ u8 rsn_mode; /* +51 */
#define RSN_MODE_NONE 0
#define RSN_MODE_WPA 1
#define RSN_MODE_WPA2 2
struct {
- uint8_t size; /* +52 */
- uint8_t body[128]; /* +53 */
+ u8 size; /* +52 */
+ u8 body[128]; /* +53 */
} __packed rsn;
} __packed;
struct hostif_connect_indication_t {
struct hostif_hdr header;
- uint16_t connect_code;
+ u16 connect_code;
#define RESULT_CONNECT 0
#define RESULT_DISCONNECT 1
struct link_ap_info_t link_ap_info;
@@ -345,125 +344,155 @@ struct hostif_stop_request_t {
struct hostif_stop_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
+/**
+ * struct hostif_ps_adhoc_set_request_t - pseudo adhoc mode
+ * @capability: bit5 : preamble
+ * bit6 : pbcc - Not supported always 0
+ * bit10 : ShortSlotTime
+ * bit13 : DSSS-OFDM - Not supported always 0
+ */
struct hostif_ps_adhoc_set_request_t {
struct hostif_hdr header;
- uint16_t phy_type;
+ u16 phy_type;
#define D_11B_ONLY_MODE 0
#define D_11G_ONLY_MODE 1
#define D_11BG_COMPATIBLE_MODE 2
#define D_11A_ONLY_MODE 3
- uint16_t cts_mode;
+ u16 cts_mode;
#define CTS_MODE_FALSE 0
#define CTS_MODE_TRUE 1
- uint16_t channel;
+ u16 channel;
struct rate_set16_t rate_set;
- uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
- * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
- uint16_t scan_type;
+ u16 capability;
+ u16 scan_type;
} __packed;
struct hostif_ps_adhoc_set_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
+/**
+ * struct hostif_infrastructure_set_request_t
+ * @capability: bit5 : preamble
+ * bit6 : pbcc - Not supported always 0
+ * bit10 : ShortSlotTime
+ * bit13 : DSSS-OFDM - Not supported always 0
+ */
struct hostif_infrastructure_set_request_t {
struct hostif_hdr header;
- uint16_t phy_type;
- uint16_t cts_mode;
+ u16 phy_type;
+ u16 cts_mode;
struct rate_set16_t rate_set;
struct ssid_t ssid;
- uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
- * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
- uint16_t beacon_lost_count;
- uint16_t auth_type;
+ u16 capability;
+ u16 beacon_lost_count;
+ u16 auth_type;
#define AUTH_TYPE_OPEN_SYSTEM 0
#define AUTH_TYPE_SHARED_KEY 1
struct channel_list_t channel_list;
- uint16_t scan_type;
+ u16 scan_type;
} __packed;
+/**
+ * struct hostif_infrastructure_set2_request_t
+ * @capability: bit5 : preamble
+ * bit6 : pbcc - Not supported always 0
+ * bit10 : ShortSlotTime
+ * bit13 : DSSS-OFDM - Not supported always 0
+ */
struct hostif_infrastructure_set2_request_t {
struct hostif_hdr header;
- uint16_t phy_type;
- uint16_t cts_mode;
+ u16 phy_type;
+ u16 cts_mode;
struct rate_set16_t rate_set;
struct ssid_t ssid;
- uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
- * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
- uint16_t beacon_lost_count;
- uint16_t auth_type;
+ u16 capability;
+ u16 beacon_lost_count;
+ u16 auth_type;
#define AUTH_TYPE_OPEN_SYSTEM 0
#define AUTH_TYPE_SHARED_KEY 1
struct channel_list_t channel_list;
- uint16_t scan_type;
- uint8_t bssid[ETH_ALEN];
+ u16 scan_type;
+ u8 bssid[ETH_ALEN];
} __packed;
struct hostif_infrastructure_set_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
+/**
+ * struct hostif_adhoc_set_request_t
+ * @capability: bit5 : preamble
+ * bit6 : pbcc - Not supported always 0
+ * bit10 : ShortSlotTime
+ * bit13 : DSSS-OFDM - Not supported always 0
+ */
struct hostif_adhoc_set_request_t {
struct hostif_hdr header;
- uint16_t phy_type;
- uint16_t cts_mode;
- uint16_t channel;
+ u16 phy_type;
+ u16 cts_mode;
+ u16 channel;
struct rate_set16_t rate_set;
struct ssid_t ssid;
- uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
- * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
- uint16_t scan_type;
+ u16 capability;
+ u16 scan_type;
} __packed;
+/**
+ * struct hostif_adhoc_set2_request_t
+ * @capability: bit5 : preamble
+ * bit6 : pbcc - Not supported always 0
+ * bit10 : ShortSlotTime
+ * bit13 : DSSS-OFDM - Not supported always 0
+ */
struct hostif_adhoc_set2_request_t {
struct hostif_hdr header;
- uint16_t phy_type;
- uint16_t cts_mode;
- uint16_t reserved;
+ u16 phy_type;
+ u16 cts_mode;
+ u16 reserved;
struct rate_set16_t rate_set;
struct ssid_t ssid;
- uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
- * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
- uint16_t scan_type;
+ u16 capability;
+ u16 scan_type;
struct channel_list_t channel_list;
- uint8_t bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
} __packed;
struct hostif_adhoc_set_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
struct last_associate_t {
- uint8_t type;
- uint8_t status;
+ u8 type;
+ u8 status;
} __packed;
struct association_request_t {
- uint8_t type;
+ u8 type;
#define FRAME_TYPE_ASSOC_REQ 0x00
#define FRAME_TYPE_REASSOC_REQ 0x20
- uint8_t pad;
- uint16_t capability;
- uint16_t listen_interval;
- uint8_t ap_address[6];
- uint16_t reqIEs_size;
+ u8 pad;
+ u16 capability;
+ u16 listen_interval;
+ u8 ap_address[6];
+ u16 reqIEs_size;
} __packed;
struct association_response_t {
- uint8_t type;
+ u8 type;
#define FRAME_TYPE_ASSOC_RESP 0x10
#define FRAME_TYPE_REASSOC_RESP 0x30
- uint8_t pad;
- uint16_t capability;
- uint16_t status;
- uint16_t association_id;
- uint16_t respIEs_size;
+ u8 pad;
+ u16 capability;
+ u16 status;
+ u16 association_id;
+ u16 respIEs_size;
} __packed;
struct hostif_associate_indication_t {
@@ -476,63 +505,65 @@ struct hostif_associate_indication_t {
struct hostif_bss_scan_request_t {
struct hostif_hdr header;
- uint8_t scan_type;
+ u8 scan_type;
#define ACTIVE_SCAN 0
#define PASSIVE_SCAN 1
- uint8_t pad[3];
- uint32_t ch_time_min;
- uint32_t ch_time_max;
+ u8 pad[3];
+ u32 ch_time_min;
+ u32 ch_time_max;
struct channel_list_t channel_list;
struct ssid_t ssid;
} __packed;
struct hostif_bss_scan_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
- uint16_t reserved;
+ u16 result_code;
+ u16 reserved;
} __packed;
struct hostif_phy_information_request_t {
struct hostif_hdr header;
- uint16_t type;
+ u16 type;
#define NORMAL_TYPE 0
#define TIME_TYPE 1
- uint16_t time; /* unit 100ms */
+ u16 time; /* unit 100ms */
} __packed;
struct hostif_phy_information_confirm_t {
struct hostif_hdr header;
- uint8_t rssi;
- uint8_t sq;
- uint8_t noise;
- uint8_t link_speed;
- uint32_t tx_frame;
- uint32_t rx_frame;
- uint32_t tx_error;
- uint32_t rx_error;
-} __packed;
-
-/* sleep mode */
-#define SLP_ACTIVE 0
-#define SLP_SLEEP 1
+ u8 rssi;
+ u8 sq;
+ u8 noise;
+ u8 link_speed;
+ u32 tx_frame;
+ u32 rx_frame;
+ u32 tx_error;
+ u32 rx_error;
+} __packed;
+
+enum sleep_mode_type {
+ SLP_ACTIVE,
+ SLP_SLEEP
+};
+
struct hostif_sleep_request_t {
struct hostif_hdr header;
} __packed;
struct hostif_sleep_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
struct hostif_mic_failure_request_t {
struct hostif_hdr header;
- uint16_t failure_count;
- uint16_t timer;
+ u16 failure_count;
+ u16 timer;
} __packed;
struct hostif_mic_failure_confirm_t {
struct hostif_hdr header;
- uint16_t result_code;
+ u16 result_code;
} __packed;
#define BASIC_RATE 0x80
@@ -568,69 +599,68 @@ struct hostif_mic_failure_confirm_t {
#define TX_RATE_48M (uint8_t)(480 / 5)
#define TX_RATE_54M (uint8_t)(540 / 5)
-#define IS_11B_RATE(A) (((A & RATE_MASK) == TX_RATE_1M ) || ((A & RATE_MASK) == TX_RATE_2M) || \
- ((A & RATE_MASK) == TX_RATE_5M) || ((A & RATE_MASK) == TX_RATE_11M))
+#define IS_11B_RATE(A) (((A & RATE_MASK) == TX_RATE_1M) || ((A & RATE_MASK) == TX_RATE_2M) || \
+ ((A & RATE_MASK) == TX_RATE_5M) || ((A & RATE_MASK) == TX_RATE_11M))
#define IS_OFDM_RATE(A) (((A & RATE_MASK) == TX_RATE_6M) || ((A & RATE_MASK) == TX_RATE_12M) || \
- ((A & RATE_MASK) == TX_RATE_24M) || ((A & RATE_MASK) == TX_RATE_9M) || \
- ((A & RATE_MASK) == TX_RATE_18M) || ((A & RATE_MASK) == TX_RATE_36M) || \
- ((A & RATE_MASK) == TX_RATE_48M) || ((A & RATE_MASK) == TX_RATE_54M))
+ ((A & RATE_MASK) == TX_RATE_24M) || ((A & RATE_MASK) == TX_RATE_9M) || \
+ ((A & RATE_MASK) == TX_RATE_18M) || ((A & RATE_MASK) == TX_RATE_36M) || \
+ ((A & RATE_MASK) == TX_RATE_48M) || ((A & RATE_MASK) == TX_RATE_54M))
#define IS_11BG_RATE(A) (IS_11B_RATE(A) || IS_OFDM_RATE(A))
-#define IS_OFDM_EXT_RATE(A) (((A & RATE_MASK) == TX_RATE_9M) || ((A & RATE_MASK) == TX_RATE_18M) || \
- ((A & RATE_MASK) == TX_RATE_36M) || ((A & RATE_MASK) == TX_RATE_48M) || \
- ((A & RATE_MASK) == TX_RATE_54M))
+#define IS_OFDM_EXT_RATE(A) (((A & RATE_MASK) == TX_RATE_9M) || ((A & RATE_MASK) == TX_RATE_18M) || \
+ ((A & RATE_MASK) == TX_RATE_36M) || ((A & RATE_MASK) == TX_RATE_48M) || \
+ ((A & RATE_MASK) == TX_RATE_54M))
-enum {
- CONNECT_STATUS = 0,
+enum connect_status_type {
+ CONNECT_STATUS,
DISCONNECT_STATUS
};
-/* preamble type */
-enum {
- LONG_PREAMBLE = 0,
+enum preamble_type {
+ LONG_PREAMBLE,
SHORT_PREAMBLE
};
-/* multicast filter */
-#define MCAST_FILTER_MCAST 0
-#define MCAST_FILTER_MCASTALL 1
-#define MCAST_FILTER_PROMISC 2
+enum multicast_filter_type {
+ MCAST_FILTER_MCAST,
+ MCAST_FILTER_MCASTALL,
+ MCAST_FILTER_PROMISC,
+};
#define NIC_MAX_MCAST_LIST 32
/* macro function */
#define HIF_EVENT_MASK 0xE800
#define IS_HIF_IND(_EVENT) ((_EVENT & HIF_EVENT_MASK) == 0xE800 && \
- ((_EVENT & ~HIF_EVENT_MASK) == 0x0001 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0006 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x000C || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0011 || \
- (_EVENT & ~HIF_EVENT_MASK) == 0x0012))
+ ((_EVENT & ~HIF_EVENT_MASK) == 0x0001 || \
+ (_EVENT & ~HIF_EVENT_MASK) == 0x0006 || \
+ (_EVENT & ~HIF_EVENT_MASK) == 0x000C || \
+ (_EVENT & ~HIF_EVENT_MASK) == 0x0011 || \
+ (_EVENT & ~HIF_EVENT_MASK) == 0x0012))
#define IS_HIF_CONF(_EVENT) ((_EVENT & HIF_EVENT_MASK) == 0xE800 && \
- (_EVENT & ~HIF_EVENT_MASK) > 0x0000 && \
- (_EVENT & ~HIF_EVENT_MASK) < 0x0012 && \
- !IS_HIF_IND(_EVENT) )
+ (_EVENT & ~HIF_EVENT_MASK) > 0x0000 && \
+ (_EVENT & ~HIF_EVENT_MASK) < 0x0012 && \
+ !IS_HIF_IND(_EVENT))
#ifdef __KERNEL__
#include "ks_wlan.h"
/* function prototype */
-int hostif_data_request(struct ks_wlan_private *priv,
- struct sk_buff *packet);
+int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *skb);
void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
- unsigned int size);
+ unsigned int size);
void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
int hostif_init(struct ks_wlan_private *priv);
void hostif_exit(struct ks_wlan_private *priv);
-int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
- unsigned long size,
- void (*complete_handler) (void *arg1, void *arg2),
- void *arg1, void *arg2);
-void send_packet_complete(void *, void *);
+int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ void (*complete_handler)(struct ks_wlan_private *priv,
+ struct sk_buff *skb),
+ struct sk_buff *skb);
+void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb);
void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index 9ab80e1f123e..cd4f56ddbea8 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -18,10 +18,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/spinlock.h> /* spinlock_t */
-#include <linux/sched.h> /* wait_queue_head_t */
-#include <linux/types.h> /* pid_t */
-#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
+#include <linux/spinlock.h> /* spinlock_t */
+#include <linux/sched.h> /* wait_queue_head_t */
+#include <linux/types.h> /* pid_t */
+#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/atomic.h> /* struct atomic_t */
@@ -36,7 +36,10 @@
#ifdef KS_WLAN_DEBUG
#define DPRINTK(n, fmt, args...) \
- if (KS_WLAN_DEBUG > (n)) printk(KERN_NOTICE "%s: "fmt, __FUNCTION__, ## args)
+ do { \
+ if (KS_WLAN_DEBUG > (n)) \
+ pr_notice("%s: "fmt, __func__, ## args); \
+ } while (0)
#else
#define DPRINTK(n, fmt, args...)
#endif
@@ -55,7 +58,7 @@ struct ks_wlan_parameter {
u8 body[32 + 1];
} ssid; /* SSID */
u8 preamble; /* Preamble */
- u8 powermgt; /* PowerManagementMode */
+ u8 power_mgmt;
u32 scan_type; /* AP List Scan Type */
#define BEACON_LOST_COUNT_MIN 0
#define BEACON_LOST_COUNT_MAX 65535
@@ -85,23 +88,23 @@ enum {
};
/* SME flag */
-#define SME_MODE_SET (1<<0)
-#define SME_RTS (1<<1)
-#define SME_FRAG (1<<2)
-#define SME_WEP_FLAG (1<<3)
-#define SME_WEP_INDEX (1<<4)
-#define SME_WEP_VAL1 (1<<5)
-#define SME_WEP_VAL2 (1<<6)
-#define SME_WEP_VAL3 (1<<7)
-#define SME_WEP_VAL4 (1<<8)
+#define SME_MODE_SET BIT(0)
+#define SME_RTS BIT(1)
+#define SME_FRAG BIT(2)
+#define SME_WEP_FLAG BIT(3)
+#define SME_WEP_INDEX BIT(4)
+#define SME_WEP_VAL1 BIT(5)
+#define SME_WEP_VAL2 BIT(6)
+#define SME_WEP_VAL3 BIT(7)
+#define SME_WEP_VAL4 BIT(8)
#define SME_WEP_VAL_MASK (SME_WEP_VAL1 | SME_WEP_VAL2 | SME_WEP_VAL3 | SME_WEP_VAL4)
-#define SME_RSN (1<<9)
-#define SME_RSN_MULTICAST (1<<10)
-#define SME_RSN_UNICAST (1<<11)
-#define SME_RSN_AUTH (1<<12)
+#define SME_RSN BIT(9)
+#define SME_RSN_MULTICAST BIT(10)
+#define SME_RSN_UNICAST BIT(11)
+#define SME_RSN_AUTH BIT(12)
-#define SME_AP_SCAN (1<<13)
-#define SME_MULTICAST (1<<14)
+#define SME_AP_SCAN BIT(13)
+#define SME_MULTICAST BIT(14)
/* SME Event */
enum {
@@ -356,7 +359,8 @@ struct wpa_key_t {
u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
* (group) keys or unicast address for
- * individual keys */
+ * individual keys
+ */
u16 alg;
u16 key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
u8 key_val[IW_ENCODING_TOKEN_MAX];
@@ -409,7 +413,11 @@ struct wps_status_t {
#endif /* WPS */
struct ks_wlan_private {
- struct hw_info_t ks_wlan_hw; /* hardware information */
+ /* hardware information */
+ struct ks_sdio_card *ks_sdio_card;
+ struct workqueue_struct *wq;
+ struct delayed_work rw_dwork;
+ struct tasklet_struct rx_bh_task;
struct net_device *net_dev;
int reg_net; /* register_netdev */
@@ -425,7 +433,7 @@ struct ks_wlan_private {
u8 *rxp;
unsigned int rx_size;
struct tasklet_struct sme_task;
- struct work_struct ks_wlan_wakeup_task;
+ struct work_struct wakeup_work;
int scan_ind_count;
unsigned char eth_addr[ETH_ALEN];
@@ -500,5 +508,7 @@ struct ks_wlan_private {
int ks_wlan_net_start(struct net_device *dev);
int ks_wlan_net_stop(struct net_device *dev);
+bool is_connect_status(u32 status);
+bool is_disconnect_status(u32 status);
#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
index 8e62b10effd6..28b381c24b42 100644
--- a/drivers/staging/ks7010/ks_wlan_ioctl.h
+++ b/drivers/staging/ks7010/ks_wlan_ioctl.h
@@ -1,6 +1,6 @@
/*
* Driver for KeyStream 11b/g wireless LAN
- *
+ *
* Copyright (c) 2005-2008 KeyStream Corp.
* Copyright (C) 2009 Renesas Technology Corp.
*
@@ -15,43 +15,43 @@
#include <linux/wireless.h>
/* The low order bit identify a SET (0) or a GET (1) ioctl. */
-/* SIOCIWFIRSTPRIV + 0 */
-/* former KS_WLAN_GET_DRIVER_VERSION SIOCIWFIRSTPRIV + 1 */
-/* SIOCIWFIRSTPRIV + 2 */
-#define KS_WLAN_GET_FIRM_VERSION SIOCIWFIRSTPRIV + 3
+/* (SIOCIWFIRSTPRIV + 0) */
+/* former KS_WLAN_GET_DRIVER_VERSION (SIOCIWFIRSTPRIV + 1) */
+/* (SIOCIWFIRSTPRIV + 2) */
+#define KS_WLAN_GET_FIRM_VERSION (SIOCIWFIRSTPRIV + 3)
#ifdef WPS
-#define KS_WLAN_SET_WPS_ENABLE SIOCIWFIRSTPRIV + 4
-#define KS_WLAN_GET_WPS_ENABLE SIOCIWFIRSTPRIV + 5
-#define KS_WLAN_SET_WPS_PROBE_REQ SIOCIWFIRSTPRIV + 6
+#define KS_WLAN_SET_WPS_ENABLE (SIOCIWFIRSTPRIV + 4)
+#define KS_WLAN_GET_WPS_ENABLE (SIOCIWFIRSTPRIV + 5)
+#define KS_WLAN_SET_WPS_PROBE_REQ (SIOCIWFIRSTPRIV + 6)
#endif
-#define KS_WLAN_GET_EEPROM_CKSUM SIOCIWFIRSTPRIV + 7
-#define KS_WLAN_SET_PREAMBLE SIOCIWFIRSTPRIV + 8
-#define KS_WLAN_GET_PREAMBLE SIOCIWFIRSTPRIV + 9
-#define KS_WLAN_SET_POWER_SAVE SIOCIWFIRSTPRIV + 10
-#define KS_WLAN_GET_POWER_SAVE SIOCIWFIRSTPRIV + 11
-#define KS_WLAN_SET_SCAN_TYPE SIOCIWFIRSTPRIV + 12
-#define KS_WLAN_GET_SCAN_TYPE SIOCIWFIRSTPRIV + 13
-#define KS_WLAN_SET_RX_GAIN SIOCIWFIRSTPRIV + 14
-#define KS_WLAN_GET_RX_GAIN SIOCIWFIRSTPRIV + 15
-#define KS_WLAN_HOSTT SIOCIWFIRSTPRIV + 16 /* unused */
-//#define KS_WLAN_SET_REGION SIOCIWFIRSTPRIV + 17
-#define KS_WLAN_SET_BEACON_LOST SIOCIWFIRSTPRIV + 18
-#define KS_WLAN_GET_BEACON_LOST SIOCIWFIRSTPRIV + 19
+#define KS_WLAN_GET_EEPROM_CKSUM (SIOCIWFIRSTPRIV + 7)
+#define KS_WLAN_SET_PREAMBLE (SIOCIWFIRSTPRIV + 8)
+#define KS_WLAN_GET_PREAMBLE (SIOCIWFIRSTPRIV + 9)
+#define KS_WLAN_SET_POWER_SAVE (SIOCIWFIRSTPRIV + 10)
+#define KS_WLAN_GET_POWER_SAVE (SIOCIWFIRSTPRIV + 11)
+#define KS_WLAN_SET_SCAN_TYPE (SIOCIWFIRSTPRIV + 12)
+#define KS_WLAN_GET_SCAN_TYPE (SIOCIWFIRSTPRIV + 13)
+#define KS_WLAN_SET_RX_GAIN (SIOCIWFIRSTPRIV + 14)
+#define KS_WLAN_GET_RX_GAIN (SIOCIWFIRSTPRIV + 15)
+#define KS_WLAN_HOSTT (SIOCIWFIRSTPRIV + 16) /* unused */
+//#define KS_WLAN_SET_REGION (SIOCIWFIRSTPRIV + 17)
+#define KS_WLAN_SET_BEACON_LOST (SIOCIWFIRSTPRIV + 18)
+#define KS_WLAN_GET_BEACON_LOST (SIOCIWFIRSTPRIV + 19)
-#define KS_WLAN_SET_TX_GAIN SIOCIWFIRSTPRIV + 20
-#define KS_WLAN_GET_TX_GAIN SIOCIWFIRSTPRIV + 21
+#define KS_WLAN_SET_TX_GAIN (SIOCIWFIRSTPRIV + 20)
+#define KS_WLAN_GET_TX_GAIN (SIOCIWFIRSTPRIV + 21)
/* for KS7010 */
-#define KS_WLAN_SET_PHY_TYPE SIOCIWFIRSTPRIV + 22
-#define KS_WLAN_GET_PHY_TYPE SIOCIWFIRSTPRIV + 23
-#define KS_WLAN_SET_CTS_MODE SIOCIWFIRSTPRIV + 24
-#define KS_WLAN_GET_CTS_MODE SIOCIWFIRSTPRIV + 25
-/* SIOCIWFIRSTPRIV + 26 */
-/* SIOCIWFIRSTPRIV + 27 */
-#define KS_WLAN_SET_SLEEP_MODE SIOCIWFIRSTPRIV + 28 /* sleep mode */
-#define KS_WLAN_GET_SLEEP_MODE SIOCIWFIRSTPRIV + 29 /* sleep mode */
-/* SIOCIWFIRSTPRIV + 30 */
-/* SIOCIWFIRSTPRIV + 31 */
+#define KS_WLAN_SET_PHY_TYPE (SIOCIWFIRSTPRIV + 22)
+#define KS_WLAN_GET_PHY_TYPE (SIOCIWFIRSTPRIV + 23)
+#define KS_WLAN_SET_CTS_MODE (SIOCIWFIRSTPRIV + 24)
+#define KS_WLAN_GET_CTS_MODE (SIOCIWFIRSTPRIV + 25)
+/* (SIOCIWFIRSTPRIV + 26) */
+/* (SIOCIWFIRSTPRIV + 27) */
+#define KS_WLAN_SET_SLEEP_MODE (SIOCIWFIRSTPRIV + 28) /* sleep mode */
+#define KS_WLAN_GET_SLEEP_MODE (SIOCIWFIRSTPRIV + 29) /* sleep mode */
+/* (SIOCIWFIRSTPRIV + 30) */
+/* (SIOCIWFIRSTPRIV + 31) */
#ifdef __KERNEL__
@@ -60,7 +60,7 @@
int ks_wlan_read_config_file(struct ks_wlan_private *priv);
int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
- unsigned int commit_flag);
+ unsigned int commit_flag);
#endif /* __KERNEL__ */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 121e1530fdba..5a43f193dcc8 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -89,10 +89,10 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
if (priv->dev_state < DEVICE_STATE_READY)
- return -1; /* not finished initialize */
+ return -EBUSY; /* not finished initialize */
if (atomic_read(&update_phyinfo))
- return 1;
+ return -EPERM;
/* The status */
wstats->status = priv->reg.operation_mode; /* Operation mode */
@@ -173,14 +173,11 @@ int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
* would not work at all... - Jean II
*/
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get protocol name */
static int ks_wlan_get_name(struct net_device *dev,
struct iw_request_info *info, char *cwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -198,15 +195,12 @@ static int ks_wlan_get_name(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set frequency */
static int ks_wlan_set_freq(struct net_device *dev,
struct iw_request_info *info, struct iw_freq *fwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
- int rc = -EINPROGRESS; /* Call commit handler */
+ struct ks_wlan_private *priv = netdev_priv(dev);
+ int channel;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -226,58 +220,52 @@ static int ks_wlan_set_freq(struct net_device *dev,
}
/* Setting by channel number */
if ((fwrq->m > 1000) || (fwrq->e > 0))
- rc = -EOPNOTSUPP;
- else {
- int channel = fwrq->m;
- /* We should do a better check than that,
- * based on the card capability !!! */
- if ((channel < 1) || (channel > 14)) {
- netdev_dbg(dev,
- "%s: New channel value of %d is invalid!\n",
- dev->name, fwrq->m);
- rc = -EINVAL;
- } else {
- /* Yes ! We can set it !!! */
- priv->reg.channel = (u8) (channel);
- priv->need_commit |= SME_MODE_SET;
- }
+ return -EOPNOTSUPP;
+
+ channel = fwrq->m;
+ /* We should do a better check than that,
+ * based on the card capability !!!
+ */
+ if ((channel < 1) || (channel > 14)) {
+ netdev_dbg(dev, "%s: New channel value of %d is invalid!\n",
+ dev->name, fwrq->m);
+ return -EINVAL;
}
- return rc;
+ /* Yes ! We can set it !!! */
+ priv->reg.channel = (u8)(channel);
+ priv->need_commit |= SME_MODE_SET;
+
+ return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get frequency */
static int ks_wlan_get_freq(struct net_device *dev,
struct iw_request_info *info, struct iw_freq *fwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int f;
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
+ if (is_connect_status(priv->connect_status))
f = (int)priv->current_ap.channel;
- else
+ else
f = (int)priv->reg.channel;
+
fwrq->m = frequency_list[f - 1] * 100000;
fwrq->e = 1;
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set ESSID */
static int ks_wlan_set_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
size_t len;
DPRINTK(2, " %d\n", dwrq->flags);
@@ -287,7 +275,7 @@ static int ks_wlan_set_essid(struct net_device *dev,
/* for SLEEP MODE */
/* Check if we asked for `any' */
- if (dwrq->flags == 0) {
+ if (!dwrq->flags) {
/* Just send an empty SSID list */
memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
priv->reg.ssid.size = 0;
@@ -329,56 +317,40 @@ static int ks_wlan_set_essid(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get ESSID */
static int ks_wlan_get_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
/* Note : if dwrq->flags != 0, we should
- * get the relevant SSID from the SSID list... */
- if (priv->reg.ssid.size) {
+ * get the relevant SSID from the SSID list...
+ */
+ if (priv->reg.ssid.size != 0) {
/* Get the current SSID */
memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
-#if 0
- extra[priv->reg.ssid.size] = '\0';
-#endif
+
/* If none, we may want to get the one that was set */
/* Push it out ! */
-#if 1
dwrq->length = priv->reg.ssid.size;
-#else
- dwrq->length = priv->reg.ssid.size + 1;
-#endif
dwrq->flags = 1; /* active */
} else {
-#if 1
dwrq->length = 0;
-#else
- extra[0] = '\0';
- dwrq->length = 1;
-#endif
dwrq->flags = 0; /* ANY */
}
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set AP address */
static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "\n");
@@ -408,20 +380,17 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get AP address */
static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
- memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
+ if (is_connect_status(priv->connect_status))
+ memcpy(awrq->sa_data, priv->current_ap.bssid, ETH_ALEN);
else
eth_zero_addr(awrq->sa_data);
@@ -430,14 +399,11 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Nickname */
static int ks_wlan_set_nick(struct net_device *dev,
struct iw_request_info *info, struct iw_point *dwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -453,14 +419,11 @@ static int ks_wlan_set_nick(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Nickname */
static int ks_wlan_get_nick(struct net_device *dev,
struct iw_request_info *info, struct iw_point *dwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -473,14 +436,11 @@ static int ks_wlan_get_nick(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Bit-Rate */
static int ks_wlan_set_rate(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int i = 0;
if (priv->sleep_mode == SLP_SLEEP)
@@ -493,12 +453,12 @@ static int ks_wlan_set_rate(struct net_device *dev,
case 11000000:
case 5500000:
priv->reg.rate_set.body[0] =
- (uint8_t) (vwrq->value / 500000);
+ (uint8_t)(vwrq->value / 500000);
break;
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
- ((uint8_t) (vwrq->value / 500000)) |
+ ((uint8_t)(vwrq->value / 500000)) |
BASIC_RATE;
break;
default:
@@ -550,7 +510,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
case 18000000:
case 9000000:
priv->reg.rate_set.body[0] =
- (uint8_t) (vwrq->value / 500000);
+ (uint8_t)(vwrq->value / 500000);
break;
case 24000000:
case 12000000:
@@ -560,7 +520,7 @@ static int ks_wlan_set_rate(struct net_device *dev,
case 2000000:
case 1000000:
priv->reg.rate_set.body[0] =
- ((uint8_t) (vwrq->value / 500000)) |
+ ((uint8_t)(vwrq->value / 500000)) |
BASIC_RATE;
break;
default:
@@ -708,14 +668,11 @@ static int ks_wlan_set_rate(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Bit-Rate */
static int ks_wlan_get_rate(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
in_interrupt(), atomic_read(&update_phyinfo));
@@ -736,13 +693,10 @@ static int ks_wlan_get_rate(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set RTS threshold */
static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int rthr = vwrq->value;
if (priv->sleep_mode == SLP_SLEEP)
@@ -760,13 +714,10 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get RTS threshold */
static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -779,14 +730,11 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Fragmentation threshold */
static int ks_wlan_set_frag(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int fthr = vwrq->value;
if (priv->sleep_mode == SLP_SLEEP)
@@ -805,14 +753,11 @@ static int ks_wlan_set_frag(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Fragmentation threshold */
static int ks_wlan_get_frag(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -825,14 +770,11 @@ static int ks_wlan_get_frag(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Mode of Operation */
static int ks_wlan_set_mode(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "mode=%d\n", *uwrq);
@@ -861,14 +803,11 @@ static int ks_wlan_set_mode(struct net_device *dev,
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Mode of Operation */
static int ks_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -889,14 +828,11 @@ static int ks_wlan_get_mode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Encryption Key */
static int ks_wlan_set_encode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct wep_key key;
int index = (dwrq->flags & IW_ENCODE_INDEX);
@@ -974,11 +910,12 @@ static int ks_wlan_set_encode(struct net_device *dev,
/* Do we want to just set the transmit key index ? */
if ((index >= 0) && (index < 4)) {
/* set_wep_key(priv, index, 0, 0, 1); xxx */
- if (priv->reg.wep_key[index].size) {
+ if (priv->reg.wep_key[index].size != 0) {
priv->reg.wep_index = index;
priv->need_commit |= SME_WEP_INDEX;
- } else
+ } else {
return -EINVAL;
+ }
}
}
}
@@ -1006,14 +943,11 @@ static int ks_wlan_set_encode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Encryption Key */
static int ks_wlan_get_encode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
char zeros[16];
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
@@ -1054,15 +988,14 @@ static int ks_wlan_get_encode(struct net_device *dev,
if ((index >= 0) && (index < 4))
memcpy(extra, priv->reg.wep_key[index].val,
dwrq->length);
- } else
+ } else {
memcpy(extra, zeros, dwrq->length);
+ }
#endif
return 0;
}
#ifndef KSC_OPNOTSUPP
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Tx-Power */
static int ks_wlan_set_txpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -1070,8 +1003,6 @@ static int ks_wlan_set_txpow(struct net_device *dev,
return -EOPNOTSUPP; /* Not Support */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Tx-Power */
static int ks_wlan_get_txpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -1087,8 +1018,6 @@ static int ks_wlan_get_txpow(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Retry limits */
static int ks_wlan_set_retry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -1096,8 +1025,6 @@ static int ks_wlan_set_retry(struct net_device *dev,
return -EOPNOTSUPP; /* Not Support */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Retry limits */
static int ks_wlan_get_retry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -1114,14 +1041,11 @@ static int ks_wlan_get_retry(struct net_device *dev,
}
#endif /* KSC_OPNOTSUPP */
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get range info */
static int ks_wlan_get_range(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_range *range = (struct iw_range *)extra;
int i, k;
@@ -1137,7 +1061,8 @@ static int ks_wlan_get_range(struct net_device *dev,
range->max_nwid = 0x0000;
range->num_channels = 14;
/* Should be based on cap_rid.country to give only
- * what the current card support */
+ * what the current card support
+ */
k = 0;
for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
range->freq[k].i = i + 1; /* List index */
@@ -1189,7 +1114,8 @@ static int ks_wlan_get_range(struct net_device *dev,
/* Set an indication of the max TCP throughput
* in bit/s that we can expect using this interface.
- * May be use for QoS stuff... Jean II */
+ * May be use for QoS stuff... Jean II
+ */
if (i > 2)
range->throughput = 5000 * 1000;
else
@@ -1223,9 +1149,11 @@ static int ks_wlan_get_range(struct net_device *dev,
range->retry_flags = IW_RETRY_ON;
range->r_time_flags = IW_RETRY_ON;
- /* Experimental measurements - boundary 11/5.5 Mb/s */
- /* Note : with or without the (local->rssi), results
- * are somewhat different. - Jean II */
+ /* Experimental measurements - boundary 11/5.5 Mb/s
+ *
+ * Note : with or without the (local->rssi), results
+ * are somewhat different. - Jean II
+ */
range->avg_qual.qual = 50;
range->avg_qual.level = 186; /* -70 dBm */
range->avg_qual.noise = 0;
@@ -1245,54 +1173,39 @@ static int ks_wlan_get_range(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Power Management */
static int ks_wlan_set_power(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
- short enabled;
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
- /* for SLEEP MODE */
- enabled = vwrq->disabled ? 0 : 1;
- if (enabled == 0) { /* 0 */
- priv->reg.powermgt = POWMGT_ACTIVE_MODE;
- } else if (enabled) { /* 1 */
- if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.powermgt = POWMGT_SAVE1_MODE;
- else
- return -EINVAL;
- } else if (enabled) { /* 2 */
+ if (vwrq->disabled) {
+ priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
+ } else {
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ priv->reg.power_mgmt = POWER_MGMT_SAVE1;
else
return -EINVAL;
- } else
- return -EINVAL;
+ }
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Power Management */
static int ks_wlan_get_power(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (priv->reg.powermgt > 0)
+ if (priv->reg.power_mgmt > 0)
vwrq->disabled = 0;
else
vwrq->disabled = 1;
@@ -1300,14 +1213,11 @@ static int ks_wlan_get_power(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get wirless statistics */
static int ks_wlan_get_iwstats(struct net_device *dev,
struct iw_request_info *info,
struct iw_quality *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -1321,8 +1231,7 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
}
#ifndef KSC_OPNOTSUPP
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set Sensitivity */
+
static int ks_wlan_set_sens(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
@@ -1330,8 +1239,6 @@ static int ks_wlan_set_sens(struct net_device *dev,
return -EOPNOTSUPP; /* Not Support */
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get Sensitivity */
static int ks_wlan_get_sens(struct net_device *dev,
struct iw_request_info *info, struct iw_param *vwrq,
char *extra)
@@ -1344,15 +1251,12 @@ static int ks_wlan_get_sens(struct net_device *dev,
}
#endif /* KSC_OPNOTSUPP */
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get AP List */
/* Note : this is deprecated in favor of IWSCAN */
static int ks_wlan_get_aplist(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct sockaddr *address = (struct sockaddr *)extra;
struct iw_quality qual[LOCAL_APLIST_MAX];
@@ -1380,14 +1284,11 @@ static int ks_wlan_get_aplist(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : Initiate Scan */
static int ks_wlan_set_scan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_scan_req *req = NULL;
DPRINTK(2, "\n");
@@ -1397,8 +1298,8 @@ static int ks_wlan_set_scan(struct net_device *dev,
/* for SLEEP MODE */
/* specified SSID SCAN */
- if (wrqu->data.length == sizeof(struct iw_scan_req)
- && wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ if (wrqu->data.length == sizeof(struct iw_scan_req) &&
+ wrqu->data.flags & IW_SCAN_THIS_ESSID) {
req = (struct iw_scan_req *)extra;
priv->scan_ssid_len = req->essid_len;
memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
@@ -1414,7 +1315,6 @@ static int ks_wlan_set_scan(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
/*
* Translate scan data returned from the card to a card independent
* format that the Wireless Tools will understand - Jean II
@@ -1452,7 +1352,7 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
iwe.u.data.flags = 1;
current_ev =
iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- &(ap->ssid.body[0]));
+ ap->ssid.body);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
@@ -1494,15 +1394,18 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
iwe.u.data.length = 0;
current_ev =
iwe_stream_add_point(info, current_ev, end_buf, &iwe,
- &(ap->ssid.body[0]));
+ ap->ssid.body);
/* Rate : stuffing multiple values in a single event require a bit
- * more of magic - Jean II */
+ * more of magic - Jean II
+ */
current_val = current_ev + IW_EV_LCP_LEN;
iwe.cmd = SIOCGIWRATE;
- /* Those two flags are ignored... */
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+ /* These two flags are ignored... */
+ iwe.u.bitrate.fixed = 0;
+ iwe.u.bitrate.disabled = 0;
/* Max 16 values */
for (i = 0; i < 16; i++) {
@@ -1569,18 +1472,16 @@ static inline char *ks_wlan_translate_scan(struct net_device *dev,
}
/* The other data in the scan result are not really
- * interesting, so for now drop it - Jean II */
+ * interesting, so for now drop it - Jean II
+ */
return current_ev;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : Read Scan Results */
static int ks_wlan_get_scan(struct net_device *dev,
struct iw_request_info *info, struct iw_point *dwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int i;
char *current_ev = extra;
@@ -1596,23 +1497,12 @@ static int ks_wlan_get_scan(struct net_device *dev,
if (priv->aplist.size == 0) {
/* Client error, no scan results...
- * The caller need to restart the scan. */
+ * The caller need to restart the scan.
+ */
DPRINTK(2, "aplist 0\n");
return -ENODATA;
}
-#if 0
- /* current connect ap */
- if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
- if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
- dwrq->length = 0;
- return -E2BIG;
- }
- current_ev = ks_wlan_translate_scan(dev, current_ev,
-// extra + IW_SCAN_MAX_DATA,
- extra + dwrq->length,
- &(priv->current_ap));
- }
-#endif
+
/* Read and parse all entries */
for (i = 0; i < priv->aplist.size; i++) {
if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
@@ -1621,9 +1511,8 @@ static int ks_wlan_get_scan(struct net_device *dev,
}
/* Translate to WE format this entry */
current_ev = ks_wlan_translate_scan(dev, info, current_ev,
-// extra + IW_SCAN_MAX_DATA,
extra + dwrq->length,
- &(priv->aplist.ap[i]));
+ &priv->aplist.ap[i]);
}
/* Length of data */
dwrq->length = (current_ev - extra);
@@ -1632,14 +1521,12 @@ static int ks_wlan_get_scan(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Commit handler : called after a bunch of SET operations */
+/* called after a bunch of SET operations */
static int ks_wlan_config_commit(struct net_device *dev,
struct iw_request_info *info, void *zwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (!priv->need_commit)
return 0;
@@ -1649,14 +1536,12 @@ static int ks_wlan_config_commit(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless handler : set association ie params */
+/* set association ie params */
static int ks_wlan_set_genie(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "\n");
@@ -1667,14 +1552,11 @@ static int ks_wlan_set_genie(struct net_device *dev,
// return -EOPNOTSUPP;
}
-/*------------------------------------------------------------------*/
-/* Wireless handler : set authentication mode params */
static int ks_wlan_set_auth_mode(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int index = (vwrq->flags & IW_AUTH_INDEX);
int value = vwrq->value;
@@ -1804,14 +1686,11 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless handler : get authentication mode params */
static int ks_wlan_get_auth_mode(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
int index = (vwrq->flags & IW_AUTH_INDEX);
DPRINTK(2, "index=%d\n", index);
@@ -1850,19 +1729,20 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set encoding token & mode (WPA)*/
+/* set encoding token & mode (WPA)*/
static int ks_wlan_set_encode_ext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_encode_ext *enc;
int index = dwrq->flags & IW_ENCODE_INDEX;
unsigned int commit = 0;
+ struct wpa_key_t *key;
enc = (struct iw_encode_ext *)extra;
+ if (!enc)
+ return -EINVAL;
DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
enc->ext_flags);
@@ -1873,77 +1753,65 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
/* for SLEEP MODE */
if (index < 1 || index > 4)
return -EINVAL;
- else
- index--;
+ index--;
+ key = &priv->wpa.key[index];
if (dwrq->flags & IW_ENCODE_DISABLED)
- priv->wpa.key[index].key_len = 0;
-
- if (enc) {
- priv->wpa.key[index].ext_flags = enc->ext_flags;
- if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- priv->wpa.txkey = index;
- commit |= SME_WEP_INDEX;
- } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- memcpy(&priv->wpa.key[index].rx_seq[0],
- enc->rx_seq, IW_ENCODE_SEQ_MAX_SIZE);
- }
+ key->key_len = 0;
+
+ key->ext_flags = enc->ext_flags;
+ if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ priv->wpa.txkey = index;
+ commit |= SME_WEP_INDEX;
+ } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ memcpy(&key->rx_seq[0], &enc->rx_seq[0], IW_ENCODE_SEQ_MAX_SIZE);
+ }
- memcpy(&priv->wpa.key[index].addr.sa_data[0],
- &enc->addr.sa_data[0], ETH_ALEN);
+ memcpy(&key->addr.sa_data[0], &enc->addr.sa_data[0], ETH_ALEN);
- switch (enc->alg) {
- case IW_ENCODE_ALG_NONE:
- if (priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x00;
- commit |= SME_WEP_FLAG;
- }
- priv->wpa.key[index].key_len = 0;
+ switch (enc->alg) {
+ case IW_ENCODE_ALG_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ commit |= SME_WEP_FLAG;
+ }
+ key->key_len = 0;
- break;
- case IW_ENCODE_ALG_WEP:
- case IW_ENCODE_ALG_CCMP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len) {
- memcpy(&priv->wpa.key[index].key_val[0],
- &enc->key[0], enc->key_len);
- priv->wpa.key[index].key_len = enc->key_len;
- commit |= (SME_WEP_VAL1 << index);
- }
- break;
- case IW_ENCODE_ALG_TKIP:
- if (!priv->reg.privacy_invoked) {
- priv->reg.privacy_invoked = 0x01;
- commit |= SME_WEP_FLAG;
- }
- if (enc->key_len == 32) {
- memcpy(&priv->wpa.key[index].key_val[0],
- &enc->key[0], enc->key_len - 16);
- priv->wpa.key[index].key_len =
- enc->key_len - 16;
- if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
- memcpy(&priv->wpa.key[index].
- tx_mic_key[0], &enc->key[16], 8);
- memcpy(&priv->wpa.key[index].
- rx_mic_key[0], &enc->key[16], 8);
- } else {
- memcpy(&priv->wpa.key[index].
- tx_mic_key[0], &enc->key[16], 8);
- memcpy(&priv->wpa.key[index].
- rx_mic_key[0], &enc->key[24], 8);
- }
- commit |= (SME_WEP_VAL1 << index);
+ break;
+ case IW_ENCODE_ALG_WEP:
+ case IW_ENCODE_ALG_CCMP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len) {
+ memcpy(&key->key_val[0], &enc->key[0], enc->key_len);
+ key->key_len = enc->key_len;
+ commit |= (SME_WEP_VAL1 << index);
+ }
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len == 32) {
+ memcpy(&key->key_val[0], &enc->key[0], enc->key_len - 16);
+ key->key_len = enc->key_len - 16;
+ if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
+ memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&key->rx_mic_key[0], &enc->key[16], 8);
+ } else {
+ memcpy(&key->tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&key->rx_mic_key[0], &enc->key[24], 8);
}
- break;
- default:
- return -EINVAL;
+ commit |= (SME_WEP_VAL1 << index);
}
- priv->wpa.key[index].alg = enc->alg;
- } else
+ break;
+ default:
return -EINVAL;
+ }
+ key->alg = enc->alg;
if (commit) {
if (commit & SME_WEP_INDEX)
@@ -1957,36 +1825,32 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : get encoding token & mode (WPA)*/
+/* get encoding token & mode (WPA)*/
static int ks_wlan_get_encode_ext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- /* WPA (not used ?? wpa_supplicant)
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- struct iw_encode_ext *enc;
- enc = (struct iw_encode_ext *)extra;
- int index = dwrq->flags & IW_ENCODE_INDEX;
- WPA (not used ?? wpa_supplicant) */
+ /* WPA (not used ?? wpa_supplicant)
+ * struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ * struct iw_encode_ext *enc;
+ * enc = (struct iw_encode_ext *)extra;
+ * int index = dwrq->flags & IW_ENCODE_INDEX;
+ * WPA (not used ?? wpa_supplicant)
+ */
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : PMKSA cache operation (WPA2) */
static int ks_wlan_set_pmksa(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_pmksa *pmksa;
int i;
struct pmk_t *pmk;
@@ -2009,76 +1873,68 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
if (list_empty(&priv->pmklist.head)) { /* new list */
for (i = 0; i < PMK_LIST_MAX; i++) {
pmk = &priv->pmklist.pmk[i];
- if (!memcmp
- ("\x00\x00\x00\x00\x00\x00", pmk->bssid,
- ETH_ALEN))
- break;
+ if (memcmp("\x00\x00\x00\x00\x00\x00",
+ pmk->bssid, ETH_ALEN) == 0)
+ break; /* loop */
}
memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
list_add(&pmk->list, &priv->pmklist.head);
priv->pmklist.size++;
- } else { /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
- if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list move to head. */
- memcpy(pmk->pmkid, pmksa->pmkid,
- IW_PMKID_LEN);
- list_move(&pmk->list,
- &priv->pmklist.head);
- break;
- }
+ break; /* case */
+ }
+ /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN) == 0) {
+ memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+ list_move(&pmk->list, &priv->pmklist.head);
+ break; /* list_for_each */
}
- if (ptr == &priv->pmklist.head) { /* not find address. */
- if (PMK_LIST_MAX > priv->pmklist.size) { /* new cache data */
- for (i = 0; i < PMK_LIST_MAX; i++) {
- pmk = &priv->pmklist.pmk[i];
- if (!memcmp
- ("\x00\x00\x00\x00\x00\x00",
- pmk->bssid, ETH_ALEN))
- break;
- }
- memcpy(pmk->bssid, pmksa->bssid.sa_data,
- ETH_ALEN);
- memcpy(pmk->pmkid, pmksa->pmkid,
- IW_PMKID_LEN);
- list_add(&pmk->list,
- &priv->pmklist.head);
- priv->pmklist.size++;
- } else { /* overwrite old cache data */
- pmk =
- list_entry(priv->pmklist.head.prev,
- struct pmk_t, list);
- memcpy(pmk->bssid, pmksa->bssid.sa_data,
- ETH_ALEN);
- memcpy(pmk->pmkid, pmksa->pmkid,
- IW_PMKID_LEN);
- list_move(&pmk->list,
- &priv->pmklist.head);
- }
+ }
+ if (ptr != &priv->pmklist.head) /* not find address. */
+ break; /* case */
+
+ if (priv->pmklist.size < PMK_LIST_MAX) { /* new cache data */
+ for (i = 0; i < PMK_LIST_MAX; i++) {
+ pmk = &priv->pmklist.pmk[i];
+ if (memcmp("\x00\x00\x00\x00\x00\x00",
+ pmk->bssid, ETH_ALEN) == 0)
+ break; /* loop */
}
+ memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+ list_add(&pmk->list, &priv->pmklist.head);
+ priv->pmklist.size++;
+ } else { /* overwrite old cache data */
+ pmk = list_entry(priv->pmklist.head.prev, struct pmk_t,
+ list);
+ memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+ list_move(&pmk->list, &priv->pmklist.head);
}
break;
case IW_PMKSA_REMOVE:
if (list_empty(&priv->pmklist.head)) { /* list empty */
return -EINVAL;
- } else { /* search cache data */
- list_for_each(ptr, &priv->pmklist.head) {
- pmk = list_entry(ptr, struct pmk_t, list);
- if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list del. */
- eth_zero_addr(pmk->bssid);
- memset(pmk->pmkid, 0, IW_PMKID_LEN);
- list_del_init(&pmk->list);
- break;
- }
- }
- if (ptr == &priv->pmklist.head) { /* not find address. */
- return 0;
+ }
+ /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN) == 0) {
+ eth_zero_addr(pmk->bssid);
+ memset(pmk->pmkid, 0, IW_PMKID_LEN);
+ list_del_init(&pmk->list);
+ break;
}
}
+ if (ptr == &priv->pmklist.head) { /* not find address. */
+ return 0;
+ }
+
break;
case IW_PMKSA_FLUSH:
- memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ memset(&priv->pmklist, 0, sizeof(priv->pmklist));
INIT_LIST_HEAD(&priv->pmklist.head);
for (i = 0; i < PMK_LIST_MAX; i++)
INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
@@ -2093,8 +1949,7 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_statistics *wstats = &priv->wstats;
if (!atomic_read(&update_phyinfo)) {
@@ -2105,7 +1960,8 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
}
/* Packets discarded in the wireless adapter due to wireless
- * specific problems */
+ * specific problems
+ */
wstats->discard.nwid = 0; /* Rx invalid nwid */
wstats->discard.code = 0; /* Rx invalid crypt */
wstats->discard.fragment = 0; /* Rx invalid frag */
@@ -2116,14 +1972,12 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
return wstats;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set stop request */
static int ks_wlan_set_stop_request(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
DPRINTK(2, "\n");
if (priv->sleep_mode == SLP_SLEEP)
@@ -2137,15 +1991,12 @@ static int ks_wlan_set_stop_request(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Wireless Handler : set MLME */
#include <linux/ieee80211.h>
static int ks_wlan_set_mlme(struct net_device *dev,
struct iw_request_info *info, struct iw_point *dwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
__u32 mode;
@@ -2167,86 +2018,22 @@ static int ks_wlan_set_mlme(struct net_device *dev,
}
}
-/*------------------------------------------------------------------*/
-/* Private handler : get firemware version */
static int ks_wlan_get_firmware_version(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
- strcpy(extra, &(priv->firmware_version[0]));
- dwrq->length = priv->version_size + 1;
- return 0;
-}
-
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : set force disconnect status */
-static int ks_wlan_set_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- if (*uwrq == CONNECT_STATUS) { /* 0 */
- priv->connect_status &= ~FORCE_DISCONNECT;
- if ((priv->connect_status & CONNECT_STATUS_MASK) ==
- CONNECT_STATUS)
- netif_carrier_on(dev);
- } else if (*uwrq == DISCONNECT_STATUS) { /* 1 */
- priv->connect_status |= FORCE_DISCONNECT;
- netif_carrier_off(dev);
- } else
- return -EINVAL;
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* Private handler : get force disconnect status */
-static int ks_wlan_get_detach(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
-
- /* for SLEEP MODE */
- *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* Private handler : get connect status */
-static int ks_wlan_get_connect(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
+ struct ks_wlan_private *priv = netdev_priv(dev);
- /* for SLEEP MODE */
- *uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
+ strcpy(extra, priv->firmware_version);
+ dwrq->length = priv->version_size + 1;
return 0;
}
-#endif
-/*------------------------------------------------------------------*/
-/* Private handler : set preamble */
static int ks_wlan_set_preamble(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2256,21 +2043,19 @@ static int ks_wlan_set_preamble(struct net_device *dev,
priv->reg.preamble = LONG_PREAMBLE;
} else if (*uwrq == SHORT_PREAMBLE) { /* 1 */
priv->reg.preamble = SHORT_PREAMBLE;
- } else
+ } else {
return -EINVAL;
+ }
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Private handler : get preamble */
static int ks_wlan_get_preamble(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2280,64 +2065,56 @@ static int ks_wlan_get_preamble(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set power save mode */
-static int ks_wlan_set_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
+static int ks_wlan_set_power_mgmt(struct net_device *dev,
+ struct iw_request_info *info, __u32 *uwrq,
+ char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
- priv->reg.powermgt = POWMGT_ACTIVE_MODE;
- } else if (*uwrq == POWMGT_SAVE1_MODE) { /* 1 */
+ if (*uwrq == POWER_MGMT_ACTIVE) { /* 0 */
+ priv->reg.power_mgmt = POWER_MGMT_ACTIVE;
+ } else if (*uwrq == POWER_MGMT_SAVE1) { /* 1 */
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.powermgt = POWMGT_SAVE1_MODE;
+ priv->reg.power_mgmt = POWER_MGMT_SAVE1;
else
return -EINVAL;
- } else if (*uwrq == POWMGT_SAVE2_MODE) { /* 2 */
+ } else if (*uwrq == POWER_MGMT_SAVE2) { /* 2 */
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
- priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ priv->reg.power_mgmt = POWER_MGMT_SAVE2;
else
return -EINVAL;
- } else
+ } else {
return -EINVAL;
+ }
hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get power save made */
-static int ks_wlan_get_powermgt(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
+static int ks_wlan_get_power_mgmt(struct net_device *dev,
+ struct iw_request_info *info, __u32 *uwrq,
+ char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
/* for SLEEP MODE */
- *uwrq = priv->reg.powermgt;
+ *uwrq = priv->reg.power_mgmt;
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set scan type */
static int ks_wlan_set_scan_type(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2346,20 +2123,18 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
priv->reg.scan_type = ACTIVE_SCAN;
} else if (*uwrq == PASSIVE_SCAN) { /* 1 */
priv->reg.scan_type = PASSIVE_SCAN;
- } else
+ } else {
return -EINVAL;
+ }
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get scan type */
static int ks_wlan_get_scan_type(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2368,145 +2143,11 @@ static int ks_wlan_get_scan_type(struct net_device *dev,
return 0;
}
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : write raw data to device */
-static int ks_wlan_data_write(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- unsigned char *wbuff = NULL;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
- if (!wbuff)
- return -EFAULT;
- memcpy(wbuff, extra, dwrq->length);
-
- /* write to device */
- ks_wlan_hw_tx(priv, wbuff, dwrq->length, NULL, NULL, NULL);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* Private handler : read raw data form device */
-static int ks_wlan_data_read(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- unsigned short read_length;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (!atomic_read(&priv->event_count)) {
- if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
- read_length = 4;
- memset(extra, 0xff, read_length);
- dwrq->length = read_length;
- return 0;
- }
- read_length = 0;
- memset(extra, 0, 1);
- dwrq->length = 0;
- return 0;
- }
-
- if (atomic_read(&priv->event_count) > 0)
- atomic_dec(&priv->event_count);
-
- spin_lock(&priv->dev_read_lock); /* request spin lock */
-
- /* Copy length max size 0x07ff */
- if (priv->dev_size[priv->dev_count] > 2047)
- read_length = 2047;
- else
- read_length = priv->dev_size[priv->dev_count];
-
- /* Copy data */
- memcpy(extra, &(priv->dev_data[priv->dev_count][0]), read_length);
-
- spin_unlock(&priv->dev_read_lock); /* release spin lock */
-
- /* Initialize */
- priv->dev_data[priv->dev_count] = 0;
- priv->dev_size[priv->dev_count] = 0;
-
- priv->dev_count++;
- if (priv->dev_count == DEVICE_STOCK_COUNT)
- priv->dev_count = 0;
-
- /* Set read size */
- dwrq->length = read_length;
-
- return 0;
-}
-#endif
-
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : get wep string */
-#define WEP_ASCII_BUFF_SIZE (17 + 64 * 4 + 1)
-static int ks_wlan_get_wep_ascii(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
- int i, j, len = 0;
- char tmp[WEP_ASCII_BUFF_SIZE];
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- strcpy(tmp, " WEP keys ASCII \n");
- len += strlen(" WEP keys ASCII \n");
-
- for (i = 0; i < 4; i++) {
- strcpy(tmp + len, "\t[");
- len += strlen("\t[");
- tmp[len] = '1' + i;
- len++;
- strcpy(tmp + len, "] ");
- len += strlen("] ");
- if (priv->reg.wep_key[i].size) {
- strcpy(tmp + len,
- (priv->reg.wep_key[i].size <
- 6 ? "(40bits) [" : "(104bits) ["));
- len +=
- strlen((priv->reg.wep_key[i].size <
- 6 ? "(40bits) [" : "(104bits) ["));
- for (j = 0; j < priv->reg.wep_key[i].size; j++, len++)
- tmp[len] =
- (isprint(priv->reg.wep_key[i].val[j]) ?
- priv->reg.wep_key[i].val[j] : ' ');
-
- strcpy(tmp + len, "]\n");
- len += strlen("]\n");
- } else {
- strcpy(tmp + len, "off\n");
- len += strlen("off\n");
- }
- }
-
- memcpy(extra, tmp, len);
- dwrq->length = len + 1;
- return 0;
-}
-#endif
-
-/*------------------------------------------------------------------*/
-/* Private handler : set beacon lost count */
static int ks_wlan_set_beacon_lost(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2519,18 +2160,16 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
- } else
+ } else {
return 0;
+ }
}
-/*------------------------------------------------------------------*/
-/* Private handler : get beacon lost count */
static int ks_wlan_get_beacon_lost(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2539,14 +2178,11 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set phy type */
static int ks_wlan_set_phy_type(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2557,21 +2193,19 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
priv->reg.phy_type = D_11G_ONLY_MODE;
} else if (*uwrq == D_11BG_COMPATIBLE_MODE) { /* 2 */
priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
- } else
+ } else {
return -EINVAL;
+ }
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Private handler : get phy type */
static int ks_wlan_get_phy_type(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2580,14 +2214,11 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set cts mode */
static int ks_wlan_set_cts_mode(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2596,25 +2227,24 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
priv->reg.cts_mode = CTS_MODE_FALSE;
} else if (*uwrq == CTS_MODE_TRUE) { /* 1 */
if (priv->reg.phy_type == D_11G_ONLY_MODE ||
- priv->reg.phy_type == D_11BG_COMPATIBLE_MODE)
+ priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) {
priv->reg.cts_mode = CTS_MODE_TRUE;
- else
+ } else {
priv->reg.cts_mode = CTS_MODE_FALSE;
- } else
+ }
+ } else {
return -EINVAL;
+ }
priv->need_commit |= SME_MODE_SET;
return -EINPROGRESS; /* Call commit handler */
}
-/*------------------------------------------------------------------*/
-/* Private handler : get cts mode */
static int ks_wlan_get_cts_mode(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2623,14 +2253,11 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set sleep mode */
static int ks_wlan_set_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
__u32 *uwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "\n");
@@ -2653,14 +2280,11 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get sleep mode */
static int ks_wlan_get_sleep_mode(struct net_device *dev,
struct iw_request_info *info,
__u32 *uwrq, char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "GET_SLEEP_MODE %d\n", priv->sleep_mode);
*uwrq = priv->sleep_mode;
@@ -2668,53 +2292,14 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
return 0;
}
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : set phy information timer */
-static int ks_wlan_set_phy_information_timer(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
- priv->reg.phy_info_timer = (uint16_t)*uwrq;
- else
- return -EINVAL;
-
- hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* Private handler : get phy information timer */
-static int ks_wlan_get_phy_information_timer(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- *uwrq = priv->reg.phy_info_timer;
- return 0;
-}
-#endif
-
#ifdef WPS
-/*------------------------------------------------------------------*/
-/* Private handler : set WPS enable */
+
static int ks_wlan_set_wps_enable(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
DPRINTK(2, "\n");
if (priv->sleep_mode == SLP_SLEEP)
@@ -2730,14 +2315,12 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get WPS enable */
static int ks_wlan_get_wps_enable(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
DPRINTK(2, "\n");
if (priv->sleep_mode == SLP_SLEEP)
@@ -2749,16 +2332,13 @@ static int ks_wlan_get_wps_enable(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set WPS probe req */
static int ks_wlan_set_wps_probe_req(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
- uint8_t *p = extra;
+ u8 *p = extra;
unsigned char len;
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
DPRINTK(2, "\n");
@@ -2786,34 +2366,13 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
return 0;
}
-
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : get WPS probe req */
-static int ks_wlan_get_wps_probe_req(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *uwrq, char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- DPRINTK(2, "\n");
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- return 0;
-}
-#endif
#endif /* WPS */
-/*------------------------------------------------------------------*/
-/* Private handler : set tx gain control value */
static int ks_wlan_set_tx_gain(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2832,14 +2391,11 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get tx gain control value */
static int ks_wlan_get_tx_gain(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2849,14 +2405,11 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : set rx gain control value */
static int ks_wlan_set_rx_gain(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2875,14 +2428,11 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
return 0;
}
-/*------------------------------------------------------------------*/
-/* Private handler : get rx gain control value */
static int ks_wlan_get_rx_gain(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
if (priv->sleep_mode == SLP_SLEEP)
return -EPERM;
@@ -2892,36 +2442,11 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
return 0;
}
-#if 0
-/*------------------------------------------------------------------*/
-/* Private handler : set region value */
-static int ks_wlan_set_region(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq,
- char *extra)
-{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
-
- if (priv->sleep_mode == SLP_SLEEP)
- return -EPERM;
- /* for SLEEP MODE */
- if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
- priv->region = (uint8_t)*uwrq;
- else
- return -EINVAL;
-
- hostif_sme_enqueue(priv, SME_SET_REGION);
- return 0;
-}
-#endif
-
-/*------------------------------------------------------------------*/
-/* Private handler : get eeprom checksum result */
static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
struct iw_request_info *info, __u32 *uwrq,
char *extra)
{
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
*uwrq = priv->eeprom_checksum;
return 0;
@@ -2948,11 +2473,11 @@ static void print_hif_event(struct net_device *dev, int event)
case HIF_MIB_SET_CONF:
netdev_info(dev, "HIF_MIB_SET_CONF\n");
break;
- case HIF_POWERMGT_REQ:
- netdev_info(dev, "HIF_POWERMGT_REQ\n");
+ case HIF_POWER_MGMT_REQ:
+ netdev_info(dev, "HIF_POWER_MGMT_REQ\n");
break;
- case HIF_POWERMGT_CONF:
- netdev_info(dev, "HIF_POWERMGT_CONF\n");
+ case HIF_POWER_MGMT_CONF:
+ netdev_info(dev, "HIF_POWER_MGMT_CONF\n");
break;
case HIF_START_REQ:
netdev_info(dev, "HIF_START_REQ\n");
@@ -3040,14 +2565,12 @@ static void print_hif_event(struct net_device *dev, int event)
}
}
-/*------------------------------------------------------------------*/
-/* Private handler : get host command history */
+/* get host command history */
static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
__u32 *uwrq, char *extra)
{
int i, event;
- struct ks_wlan_private *priv =
- (struct ks_wlan_private *)netdev_priv(dev);
+ struct ks_wlan_private *priv = netdev_priv(dev);
for (i = 63; i >= 0; i--) {
event =
@@ -3115,119 +2638,119 @@ static const struct iw_priv_args ks_wlan_private_args[] = {
};
static const iw_handler ks_wlan_handler[] = {
- (iw_handler) ks_wlan_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler) ks_wlan_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) ks_wlan_set_freq, /* SIOCSIWFREQ */
- (iw_handler) ks_wlan_get_freq, /* SIOCGIWFREQ */
- (iw_handler) ks_wlan_set_mode, /* SIOCSIWMODE */
- (iw_handler) ks_wlan_get_mode, /* SIOCGIWMODE */
+ (iw_handler)ks_wlan_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler)ks_wlan_get_name, /* SIOCGIWNAME */
+ (iw_handler)NULL, /* SIOCSIWNWID */
+ (iw_handler)NULL, /* SIOCGIWNWID */
+ (iw_handler)ks_wlan_set_freq, /* SIOCSIWFREQ */
+ (iw_handler)ks_wlan_get_freq, /* SIOCGIWFREQ */
+ (iw_handler)ks_wlan_set_mode, /* SIOCSIWMODE */
+ (iw_handler)ks_wlan_get_mode, /* SIOCGIWMODE */
#ifndef KSC_OPNOTSUPP
- (iw_handler) ks_wlan_set_sens, /* SIOCSIWSENS */
- (iw_handler) ks_wlan_get_sens, /* SIOCGIWSENS */
+ (iw_handler)ks_wlan_set_sens, /* SIOCSIWSENS */
+ (iw_handler)ks_wlan_get_sens, /* SIOCGIWSENS */
#else /* KSC_OPNOTSUPP */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
+ (iw_handler)NULL, /* SIOCSIWSENS */
+ (iw_handler)NULL, /* SIOCGIWSENS */
#endif /* KSC_OPNOTSUPP */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) ks_wlan_get_iwstats, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* SIOCSIWTHRSPY */
- (iw_handler) NULL, /* SIOCGIWTHRSPY */
- (iw_handler) ks_wlan_set_wap, /* SIOCSIWAP */
- (iw_handler) ks_wlan_get_wap, /* SIOCGIWAP */
-// (iw_handler) NULL, /* SIOCSIWMLME */
- (iw_handler) ks_wlan_set_mlme, /* SIOCSIWMLME */
- (iw_handler) ks_wlan_get_aplist, /* SIOCGIWAPLIST */
- (iw_handler) ks_wlan_set_scan, /* SIOCSIWSCAN */
- (iw_handler) ks_wlan_get_scan, /* SIOCGIWSCAN */
- (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */
- (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */
- (iw_handler) ks_wlan_set_nick, /* SIOCSIWNICKN */
- (iw_handler) ks_wlan_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) ks_wlan_set_rate, /* SIOCSIWRATE */
- (iw_handler) ks_wlan_get_rate, /* SIOCGIWRATE */
- (iw_handler) ks_wlan_set_rts, /* SIOCSIWRTS */
- (iw_handler) ks_wlan_get_rts, /* SIOCGIWRTS */
- (iw_handler) ks_wlan_set_frag, /* SIOCSIWFRAG */
- (iw_handler) ks_wlan_get_frag, /* SIOCGIWFRAG */
+ (iw_handler)NULL, /* SIOCSIWRANGE */
+ (iw_handler)ks_wlan_get_range, /* SIOCGIWRANGE */
+ (iw_handler)NULL, /* SIOCSIWPRIV */
+ (iw_handler)NULL, /* SIOCGIWPRIV */
+ (iw_handler)NULL, /* SIOCSIWSTATS */
+ (iw_handler)ks_wlan_get_iwstats, /* SIOCGIWSTATS */
+ (iw_handler)NULL, /* SIOCSIWSPY */
+ (iw_handler)NULL, /* SIOCGIWSPY */
+ (iw_handler)NULL, /* SIOCSIWTHRSPY */
+ (iw_handler)NULL, /* SIOCGIWTHRSPY */
+ (iw_handler)ks_wlan_set_wap, /* SIOCSIWAP */
+ (iw_handler)ks_wlan_get_wap, /* SIOCGIWAP */
+// (iw_handler)NULL, /* SIOCSIWMLME */
+ (iw_handler)ks_wlan_set_mlme, /* SIOCSIWMLME */
+ (iw_handler)ks_wlan_get_aplist, /* SIOCGIWAPLIST */
+ (iw_handler)ks_wlan_set_scan, /* SIOCSIWSCAN */
+ (iw_handler)ks_wlan_get_scan, /* SIOCGIWSCAN */
+ (iw_handler)ks_wlan_set_essid, /* SIOCSIWESSID */
+ (iw_handler)ks_wlan_get_essid, /* SIOCGIWESSID */
+ (iw_handler)ks_wlan_set_nick, /* SIOCSIWNICKN */
+ (iw_handler)ks_wlan_get_nick, /* SIOCGIWNICKN */
+ (iw_handler)NULL, /* -- hole -- */
+ (iw_handler)NULL, /* -- hole -- */
+ (iw_handler)ks_wlan_set_rate, /* SIOCSIWRATE */
+ (iw_handler)ks_wlan_get_rate, /* SIOCGIWRATE */
+ (iw_handler)ks_wlan_set_rts, /* SIOCSIWRTS */
+ (iw_handler)ks_wlan_get_rts, /* SIOCGIWRTS */
+ (iw_handler)ks_wlan_set_frag, /* SIOCSIWFRAG */
+ (iw_handler)ks_wlan_get_frag, /* SIOCGIWFRAG */
#ifndef KSC_OPNOTSUPP
- (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */
- (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */
+ (iw_handler)ks_wlan_set_txpow, /* SIOCSIWTXPOW */
+ (iw_handler)ks_wlan_get_txpow, /* SIOCGIWTXPOW */
+ (iw_handler)ks_wlan_set_retry, /* SIOCSIWRETRY */
+ (iw_handler)ks_wlan_get_retry, /* SIOCGIWRETRY */
#else /* KSC_OPNOTSUPP */
- (iw_handler) NULL, /* SIOCSIWTXPOW */
- (iw_handler) NULL, /* SIOCGIWTXPOW */
- (iw_handler) NULL, /* SIOCSIWRETRY */
- (iw_handler) NULL, /* SIOCGIWRETRY */
+ (iw_handler)NULL, /* SIOCSIWTXPOW */
+ (iw_handler)NULL, /* SIOCGIWTXPOW */
+ (iw_handler)NULL, /* SIOCSIWRETRY */
+ (iw_handler)NULL, /* SIOCGIWRETRY */
#endif /* KSC_OPNOTSUPP */
- (iw_handler) ks_wlan_set_encode, /* SIOCSIWENCODE */
- (iw_handler) ks_wlan_get_encode, /* SIOCGIWENCODE */
- (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */
- (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
-// (iw_handler) NULL, /* SIOCSIWGENIE */
- (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) ks_wlan_set_auth_mode, /* SIOCSIWAUTH */
- (iw_handler) ks_wlan_get_auth_mode, /* SIOCGIWAUTH */
- (iw_handler) ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
- (iw_handler) ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
- (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
- (iw_handler) NULL, /* -- hole -- */
+ (iw_handler)ks_wlan_set_encode, /* SIOCSIWENCODE */
+ (iw_handler)ks_wlan_get_encode, /* SIOCGIWENCODE */
+ (iw_handler)ks_wlan_set_power, /* SIOCSIWPOWER */
+ (iw_handler)ks_wlan_get_power, /* SIOCGIWPOWER */
+ (iw_handler)NULL, /* -- hole -- */
+ (iw_handler)NULL, /* -- hole -- */
+// (iw_handler)NULL, /* SIOCSIWGENIE */
+ (iw_handler)ks_wlan_set_genie, /* SIOCSIWGENIE */
+ (iw_handler)NULL, /* SIOCGIWGENIE */
+ (iw_handler)ks_wlan_set_auth_mode, /* SIOCSIWAUTH */
+ (iw_handler)ks_wlan_get_auth_mode, /* SIOCGIWAUTH */
+ (iw_handler)ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
+ (iw_handler)ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
+ (iw_handler)ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
+ (iw_handler)NULL, /* -- hole -- */
};
/* private_handler */
static const iw_handler ks_wlan_private_handler[] = {
- (iw_handler) NULL, /* 0 */
- (iw_handler) NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */
- (iw_handler) NULL, /* 2 */
- (iw_handler) ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
+ (iw_handler)NULL, /* 0 */
+ (iw_handler)NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */
+ (iw_handler)NULL, /* 2 */
+ (iw_handler)ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
#ifdef WPS
- (iw_handler) ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
- (iw_handler) ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
- (iw_handler) ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
+ (iw_handler)ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
+ (iw_handler)ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
+ (iw_handler)ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
#else
- (iw_handler) NULL, /* 4 */
- (iw_handler) NULL, /* 5 */
- (iw_handler) NULL, /* 6 */
+ (iw_handler)NULL, /* 4 */
+ (iw_handler)NULL, /* 5 */
+ (iw_handler)NULL, /* 6 */
#endif /* WPS */
- (iw_handler) ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
- (iw_handler) ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
- (iw_handler) ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
- (iw_handler) ks_wlan_set_powermgt, /* 10 KS_WLAN_SET_POWER_SAVE */
- (iw_handler) ks_wlan_get_powermgt, /* 11 KS_WLAN_GET_POWER_SAVE */
- (iw_handler) ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
- (iw_handler) ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
- (iw_handler) ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
- (iw_handler) ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
- (iw_handler) ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
- (iw_handler) NULL, /* 17 */
- (iw_handler) ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
- (iw_handler) ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
- (iw_handler) ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
- (iw_handler) ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
- (iw_handler) ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
- (iw_handler) ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
- (iw_handler) ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
- (iw_handler) ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
- (iw_handler) NULL, /* 26 */
- (iw_handler) NULL, /* 27 */
- (iw_handler) ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
- (iw_handler) ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
- (iw_handler) NULL, /* 30 */
- (iw_handler) NULL, /* 31 */
+ (iw_handler)ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
+ (iw_handler)ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
+ (iw_handler)ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
+ (iw_handler)ks_wlan_set_power_mgmt, /* 10 KS_WLAN_SET_POWER_SAVE */
+ (iw_handler)ks_wlan_get_power_mgmt, /* 11 KS_WLAN_GET_POWER_SAVE */
+ (iw_handler)ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
+ (iw_handler)ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
+ (iw_handler)ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
+ (iw_handler)ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
+ (iw_handler)ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
+ (iw_handler)NULL, /* 17 */
+ (iw_handler)ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
+ (iw_handler)ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
+ (iw_handler)ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
+ (iw_handler)ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
+ (iw_handler)ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
+ (iw_handler)ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
+ (iw_handler)ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
+ (iw_handler)ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
+ (iw_handler)NULL, /* 26 */
+ (iw_handler)NULL, /* 27 */
+ (iw_handler)ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
+ (iw_handler)ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
+ (iw_handler)NULL, /* 30 */
+ (iw_handler)NULL, /* 31 */
};
static const struct iw_handler_def ks_wlan_handler_def = {
@@ -3235,8 +2758,8 @@ static const struct iw_handler_def ks_wlan_handler_def = {
.num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
.num_private_args =
sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
- .standard = (iw_handler *) ks_wlan_handler,
- .private = (iw_handler *) ks_wlan_private_handler,
+ .standard = (iw_handler *)ks_wlan_handler,
+ .private = (iw_handler *)ks_wlan_private_handler,
.private_args = (struct iw_priv_args *)ks_wlan_private_args,
.get_wireless_stats = ks_get_wireless_stats,
};
@@ -3244,20 +2767,20 @@ static const struct iw_handler_def ks_wlan_handler_def = {
static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
int cmd)
{
- int rc = 0;
+ int ret;
struct iwreq *wrq = (struct iwreq *)rq;
switch (cmd) {
case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
- rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
+ ret = ks_wlan_set_stop_request(dev, NULL, &wrq->u.mode, NULL);
break;
// All other calls are currently unsupported
default:
- rc = -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- DPRINTK(5, "return=%d\n", rc);
- return rc;
+ DPRINTK(5, "return=%d\n", ret);
+ return ret;
}
static
@@ -3305,7 +2828,7 @@ static
int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ks_wlan_private *priv = netdev_priv(dev);
- int rc = 0;
+ int ret;
DPRINTK(3, "in_interrupt()=%ld\n", in_interrupt());
@@ -3321,21 +2844,17 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_running(dev))
netif_stop_queue(dev);
- rc = hostif_data_request(priv, skb);
+ ret = hostif_data_request(priv, skb);
netif_trans_update(dev);
- DPRINTK(4, "rc=%d\n", rc);
- if (rc)
- rc = 0;
+ if (ret)
+ DPRINTK(4, "hostif_data_request error: =%d\n", ret);
- return rc;
+ return 0;
}
-void send_packet_complete(void *arg1, void *arg2)
+void send_packet_complete(struct ks_wlan_private *priv, struct sk_buff *skb)
{
- struct ks_wlan_private *priv = (struct ks_wlan_private *)arg1;
- struct sk_buff *packet = (struct sk_buff *)arg2;
-
DPRINTK(3, "\n");
priv->nstats.tx_packets++;
@@ -3343,15 +2862,16 @@ void send_packet_complete(void *arg1, void *arg2)
if (netif_queue_stopped(priv->net_dev))
netif_wake_queue(priv->net_dev);
- if (packet) {
- priv->nstats.tx_bytes += packet->len;
- dev_kfree_skb(packet);
- packet = NULL;
+ if (skb) {
+ priv->nstats.tx_bytes += skb->len;
+ dev_kfree_skb(skb);
}
}
-/* Set or clear the multicast filter for this adaptor.
- This routine is not state sensitive and need not be SMP locked. */
+/*
+ * Set or clear the multicast filter for this adaptor.
+ * This routine is not state sensitive and need not be SMP locked.
+ */
static
void ks_wlan_set_multicast_list(struct net_device *dev)
{
@@ -3457,3 +2977,23 @@ int ks_wlan_net_stop(struct net_device *dev)
return 0;
}
+
+/**
+ * is_connect_status() - return true if status is 'connected'
+ * @status: high bit is used as FORCE_DISCONNECT, low bits used for
+ * connect status.
+ */
+bool is_connect_status(u32 status)
+{
+ return (status & CONNECT_STATUS_MASK) == CONNECT_STATUS;
+}
+
+/**
+ * is_disconnect_status() - return true if status is 'disconnected'
+ * @status: high bit is used as FORCE_DISCONNECT, low bits used for
+ * disconnect status.
+ */
+bool is_disconnect_status(u32 status)
+{
+ return (status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS;
+}
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index f6e70fa2a12f..80497ef5b1aa 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -38,7 +38,7 @@ do { \
} while (0)
static
-void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
+void MichaelInitializeFunction(struct michael_mic_t *Mic, uint8_t *key)
{
// Set the key
Mic->K0 = getUInt32(key, 0);
@@ -61,7 +61,7 @@ do { \
} while (0)
static
-void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
+void MichaelAppend(struct michael_mic_t *Mic, uint8_t *src, int nBytes)
{
int addlen;
@@ -96,7 +96,7 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
}
static
-void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
+void MichaelGetMIC(struct michael_mic_t *Mic, uint8_t *dst)
{
u8 *data = Mic->M;
@@ -125,7 +125,7 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
MichaelClear(Mic);
}
-void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+void MichaelMICFunction(struct michael_mic_t *Mic, u8 *Key,
u8 *Data, int Len, u8 priority,
u8 *Result)
{
@@ -141,8 +141,8 @@ void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
* +--+--+--------+--+----+--+--+--+--+--+--+--+--+
*/
MichaelInitializeFunction(Mic, Key);
- MichaelAppend(Mic, (uint8_t *) Data, 12); /* |DA|SA| */
+ MichaelAppend(Mic, (uint8_t *)Data, 12); /* |DA|SA| */
MichaelAppend(Mic, pad_data, 4); /* |Priority|0|0|0| */
- MichaelAppend(Mic, (uint8_t *) (Data + 12), Len - 12); /* |Data| */
+ MichaelAppend(Mic, (uint8_t *)(Data + 12), Len - 12); /* |Data| */
MichaelGetMIC(Mic, Result);
}
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index 248f849fc4a5..758e429446f1 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -9,8 +9,8 @@
* published by the Free Software Foundation.
*/
-/* MichelMIC routine define */
-struct michel_mic_t {
+/* MichaelMIC routine define */
+struct michael_mic_t {
u32 K0; // Key
u32 K1; // Key
u32 L; // Current state
@@ -20,6 +20,6 @@ struct michel_mic_t {
u8 Result[8];
};
-void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+void MichaelMICFunction(struct michael_mic_t *Mic, u8 *Key,
u8 *Data, int Len, u8 priority,
u8 *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 2dae85798ec1..e774c75ecadd 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -87,12 +87,9 @@ do { \
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
#endif
-#define LIBCFS_ALLOC_PRE(size, mask) \
-do { \
- LASSERT(!in_interrupt() || \
- ((size) <= LIBCFS_VMALLOC_SIZE && \
- !gfpflags_allow_blocking(mask))); \
-} while (0)
+#define LIBCFS_ALLOC_PRE(size, mask) \
+ LASSERT(!in_interrupt() || ((size) <= LIBCFS_VMALLOC_SIZE && \
+ !gfpflags_allow_blocking(mask)))
#define LIBCFS_ALLOC_POST(ptr, size) \
do { \
@@ -187,46 +184,28 @@ void cfs_array_free(void *vars);
#if LASSERT_ATOMIC_ENABLED
/** assert value of @a is equal to @v */
-#define LASSERT_ATOMIC_EQ(a, v) \
-do { \
- LASSERTF(atomic_read(a) == v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_EQ(a, v) \
+ LASSERTF(atomic_read(a) == v, "value: %d\n", atomic_read((a)))
/** assert value of @a is unequal to @v */
-#define LASSERT_ATOMIC_NE(a, v) \
-do { \
- LASSERTF(atomic_read(a) != v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_NE(a, v) \
+ LASSERTF(atomic_read(a) != v, "value: %d\n", atomic_read((a)))
/** assert value of @a is little than @v */
-#define LASSERT_ATOMIC_LT(a, v) \
-do { \
- LASSERTF(atomic_read(a) < v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_LT(a, v) \
+ LASSERTF(atomic_read(a) < v, "value: %d\n", atomic_read((a)))
/** assert value of @a is little/equal to @v */
-#define LASSERT_ATOMIC_LE(a, v) \
-do { \
- LASSERTF(atomic_read(a) <= v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_LE(a, v) \
+ LASSERTF(atomic_read(a) <= v, "value: %d\n", atomic_read((a)))
/** assert value of @a is great than @v */
-#define LASSERT_ATOMIC_GT(a, v) \
-do { \
- LASSERTF(atomic_read(a) > v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_GT(a, v) \
+ LASSERTF(atomic_read(a) > v, "value: %d\n", atomic_read((a)))
/** assert value of @a is great/equal to @v */
-#define LASSERT_ATOMIC_GE(a, v) \
-do { \
- LASSERTF(atomic_read(a) >= v, \
- "value: %d\n", atomic_read((a))); \
-} while (0)
+#define LASSERT_ATOMIC_GE(a, v) \
+ LASSERTF(atomic_read(a) >= v, "value: %d\n", atomic_read((a)))
/** assert value of @a is great than @v1 and little than @v2 */
#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index cb0d6b481455..f4b6de2ec0ff 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -74,9 +74,8 @@ int LNetNIFini(void);
* \see LNetMEAttach
* @{
*/
-int LNetGetId(unsigned int index, lnet_process_id_t *id);
+int LNetGetId(unsigned int index, struct lnet_process_id *id);
int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
-void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
/** @} lnet_addr */
@@ -94,22 +93,22 @@ void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
* @{
*/
int LNetMEAttach(unsigned int portal,
- lnet_process_id_t match_id_in,
+ struct lnet_process_id match_id_in,
__u64 match_bits_in,
__u64 ignore_bits_in,
- lnet_unlink_t unlink_in,
- lnet_ins_pos_t pos_in,
- lnet_handle_me_t *handle_out);
+ enum lnet_unlink unlink_in,
+ enum lnet_ins_pos pos_in,
+ struct lnet_handle_me *handle_out);
-int LNetMEInsert(lnet_handle_me_t current_in,
- lnet_process_id_t match_id_in,
+int LNetMEInsert(struct lnet_handle_me current_in,
+ struct lnet_process_id match_id_in,
__u64 match_bits_in,
__u64 ignore_bits_in,
- lnet_unlink_t unlink_in,
- lnet_ins_pos_t position_in,
- lnet_handle_me_t *handle_out);
+ enum lnet_unlink unlink_in,
+ enum lnet_ins_pos position_in,
+ struct lnet_handle_me *handle_out);
-int LNetMEUnlink(lnet_handle_me_t current_in);
+int LNetMEUnlink(struct lnet_handle_me current_in);
/** @} lnet_me */
/** \defgroup lnet_md Memory descriptors
@@ -125,16 +124,16 @@ int LNetMEUnlink(lnet_handle_me_t current_in);
* associated with a MD: LNetMDUnlink().
* @{
*/
-int LNetMDAttach(lnet_handle_me_t current_in,
- lnet_md_t md_in,
- lnet_unlink_t unlink_in,
- lnet_handle_md_t *handle_out);
+int LNetMDAttach(struct lnet_handle_me current_in,
+ struct lnet_md md_in,
+ enum lnet_unlink unlink_in,
+ struct lnet_handle_md *md_handle_out);
-int LNetMDBind(lnet_md_t md_in,
- lnet_unlink_t unlink_in,
- lnet_handle_md_t *handle_out);
+int LNetMDBind(struct lnet_md md_in,
+ enum lnet_unlink unlink_in,
+ struct lnet_handle_md *md_handle_out);
-int LNetMDUnlink(lnet_handle_md_t md_in);
+int LNetMDUnlink(struct lnet_handle_md md_in);
/** @} lnet_md */
/** \defgroup lnet_eq Events and event queues
@@ -147,9 +146,9 @@ int LNetMDUnlink(lnet_handle_md_t md_in);
* associated with it. If an event handler exists, it will be run for each
* event that is deposited into the EQ.
*
- * In addition to the lnet_handle_eq_t, the LNet API defines two types
- * associated with events: The ::lnet_event_kind_t defines the kinds of events
- * that can be stored in an EQ. The lnet_event_t defines a structure that
+ * In addition to the lnet_handle_eq, the LNet API defines two types
+ * associated with events: The ::lnet_event_kind defines the kinds of events
+ * that can be stored in an EQ. The lnet_event defines a structure that
* holds the information about with an event.
*
* There are five functions for dealing with EQs: LNetEQAlloc() is used to
@@ -162,14 +161,14 @@ int LNetMDUnlink(lnet_handle_md_t md_in);
*/
int LNetEQAlloc(unsigned int count_in,
lnet_eq_handler_t handler,
- lnet_handle_eq_t *handle_out);
+ struct lnet_handle_eq *handle_out);
-int LNetEQFree(lnet_handle_eq_t eventq_in);
+int LNetEQFree(struct lnet_handle_eq eventq_in);
-int LNetEQPoll(lnet_handle_eq_t *eventqs_in,
+int LNetEQPoll(struct lnet_handle_eq *eventqs_in,
int neq_in,
int timeout_ms,
- lnet_event_t *event_out,
+ struct lnet_event *event_out,
int *which_eq_out);
/** @} lnet_eq */
@@ -180,17 +179,17 @@ int LNetEQPoll(lnet_handle_eq_t *eventqs_in,
* @{
*/
int LNetPut(lnet_nid_t self,
- lnet_handle_md_t md_in,
- lnet_ack_req_t ack_req_in,
- lnet_process_id_t target_in,
+ struct lnet_handle_md md_in,
+ enum lnet_ack_req ack_req_in,
+ struct lnet_process_id target_in,
unsigned int portal_in,
__u64 match_bits_in,
unsigned int offset_in,
__u64 hdr_data_in);
int LNetGet(lnet_nid_t self,
- lnet_handle_md_t md_in,
- lnet_process_id_t target_in,
+ struct lnet_handle_md md_in,
+ struct lnet_process_id target_in,
unsigned int portal_in,
__u64 match_bits_in,
unsigned int offset_in);
@@ -203,7 +202,7 @@ int LNetGet(lnet_nid_t self,
int LNetSetLazyPortal(int portal);
int LNetClearLazyPortal(int portal);
int LNetCtl(unsigned int cmd, void *arg);
-void LNetDebugPeer(lnet_process_id_t id);
+void LNetDebugPeer(struct lnet_process_id id);
/** @} lnet_misc */
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index 3d19402ba728..8ae7423b4543 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -41,7 +41,7 @@
#include "lib-types.h"
#include "lib-dlc.h"
-extern lnet_t the_lnet; /* THE network */
+extern struct lnet the_lnet; /* THE network */
#if (BITS_PER_LONG == 32)
/* 2 CPTs, allowing more CPTs might make us under memory pressure */
@@ -65,7 +65,7 @@ extern lnet_t the_lnet; /* THE network */
/** exclusive lock */
#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
-static inline int lnet_is_route_alive(lnet_route_t *route)
+static inline int lnet_is_route_alive(struct lnet_route *route)
{
/* gateway is down */
if (!route->lr_gateway->lp_alive)
@@ -84,14 +84,14 @@ static inline int lnet_is_wire_handle_none(struct lnet_handle_wire *wh)
wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
}
-static inline int lnet_md_exhausted(lnet_libmd_t *md)
+static inline int lnet_md_exhausted(struct lnet_libmd *md)
{
return (!md->md_threshold ||
((md->md_options & LNET_MD_MAX_SIZE) &&
md->md_offset + md->md_max_size > md->md_length));
}
-static inline int lnet_md_unlinkable(lnet_libmd_t *md)
+static inline int lnet_md_unlinkable(struct lnet_libmd *md)
{
/*
* Should unlink md when its refcount is 0 and either:
@@ -178,34 +178,34 @@ lnet_net_lock_current(void)
#define MAX_PORTALS 64
-static inline lnet_eq_t *
+static inline struct lnet_eq *
lnet_eq_alloc(void)
{
- lnet_eq_t *eq;
+ struct lnet_eq *eq;
LIBCFS_ALLOC(eq, sizeof(*eq));
return eq;
}
static inline void
-lnet_eq_free(lnet_eq_t *eq)
+lnet_eq_free(struct lnet_eq *eq)
{
LIBCFS_FREE(eq, sizeof(*eq));
}
-static inline lnet_libmd_t *
-lnet_md_alloc(lnet_md_t *umd)
+static inline struct lnet_libmd *
+lnet_md_alloc(struct lnet_md *umd)
{
- lnet_libmd_t *md;
+ struct lnet_libmd *md;
unsigned int size;
unsigned int niov;
if (umd->options & LNET_MD_KIOV) {
niov = umd->length;
- size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
+ size = offsetof(struct lnet_libmd, md_iov.kiov[niov]);
} else {
niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
- size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
+ size = offsetof(struct lnet_libmd, md_iov.iov[niov]);
}
LIBCFS_ALLOC(md, size);
@@ -221,37 +221,37 @@ lnet_md_alloc(lnet_md_t *umd)
}
static inline void
-lnet_md_free(lnet_libmd_t *md)
+lnet_md_free(struct lnet_libmd *md)
{
unsigned int size;
if (md->md_options & LNET_MD_KIOV)
- size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
+ size = offsetof(struct lnet_libmd, md_iov.kiov[md->md_niov]);
else
- size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
+ size = offsetof(struct lnet_libmd, md_iov.iov[md->md_niov]);
LIBCFS_FREE(md, size);
}
-static inline lnet_me_t *
+static inline struct lnet_me *
lnet_me_alloc(void)
{
- lnet_me_t *me;
+ struct lnet_me *me;
LIBCFS_ALLOC(me, sizeof(*me));
return me;
}
static inline void
-lnet_me_free(lnet_me_t *me)
+lnet_me_free(struct lnet_me *me)
{
LIBCFS_FREE(me, sizeof(*me));
}
-static inline lnet_msg_t *
+static inline struct lnet_msg *
lnet_msg_alloc(void)
{
- lnet_msg_t *msg;
+ struct lnet_msg *msg;
LIBCFS_ALLOC(msg, sizeof(*msg));
@@ -260,57 +260,57 @@ lnet_msg_alloc(void)
}
static inline void
-lnet_msg_free(lnet_msg_t *msg)
+lnet_msg_free(struct lnet_msg *msg)
{
LASSERT(!msg->msg_onactivelist);
LIBCFS_FREE(msg, sizeof(*msg));
}
-lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec,
- __u64 cookie);
+struct lnet_libhandle *lnet_res_lh_lookup(struct lnet_res_container *rec,
+ __u64 cookie);
void lnet_res_lh_initialize(struct lnet_res_container *rec,
- lnet_libhandle_t *lh);
+ struct lnet_libhandle *lh);
static inline void
-lnet_res_lh_invalidate(lnet_libhandle_t *lh)
+lnet_res_lh_invalidate(struct lnet_libhandle *lh)
{
/* NB: cookie is still useful, don't reset it */
list_del(&lh->lh_hash_chain);
}
static inline void
-lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq)
+lnet_eq2handle(struct lnet_handle_eq *handle, struct lnet_eq *eq)
{
if (!eq) {
- LNetInvalidateHandle(handle);
+ LNetInvalidateEQHandle(handle);
return;
}
handle->cookie = eq->eq_lh.lh_cookie;
}
-static inline lnet_eq_t *
-lnet_handle2eq(lnet_handle_eq_t *handle)
+static inline struct lnet_eq *
+lnet_handle2eq(struct lnet_handle_eq *handle)
{
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
if (!lh)
return NULL;
- return lh_entry(lh, lnet_eq_t, eq_lh);
+ return lh_entry(lh, struct lnet_eq, eq_lh);
}
static inline void
-lnet_md2handle(lnet_handle_md_t *handle, lnet_libmd_t *md)
+lnet_md2handle(struct lnet_handle_md *handle, struct lnet_libmd *md)
{
handle->cookie = md->md_lh.lh_cookie;
}
-static inline lnet_libmd_t *
-lnet_handle2md(lnet_handle_md_t *handle)
+static inline struct lnet_libmd *
+lnet_handle2md(struct lnet_handle_md *handle)
{
/* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
int cpt;
cpt = lnet_cpt_of_cookie(handle->cookie);
@@ -319,14 +319,14 @@ lnet_handle2md(lnet_handle_md_t *handle)
if (!lh)
return NULL;
- return lh_entry(lh, lnet_libmd_t, md_lh);
+ return lh_entry(lh, struct lnet_libmd, md_lh);
}
-static inline lnet_libmd_t *
+static inline struct lnet_libmd *
lnet_wire_handle2md(struct lnet_handle_wire *wh)
{
/* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
int cpt;
if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie)
@@ -338,20 +338,20 @@ lnet_wire_handle2md(struct lnet_handle_wire *wh)
if (!lh)
return NULL;
- return lh_entry(lh, lnet_libmd_t, md_lh);
+ return lh_entry(lh, struct lnet_libmd, md_lh);
}
static inline void
-lnet_me2handle(lnet_handle_me_t *handle, lnet_me_t *me)
+lnet_me2handle(struct lnet_handle_me *handle, struct lnet_me *me)
{
handle->cookie = me->me_lh.lh_cookie;
}
-static inline lnet_me_t *
-lnet_handle2me(lnet_handle_me_t *handle)
+static inline struct lnet_me *
+lnet_handle2me(struct lnet_handle_me *handle)
{
/* ALWAYS called with resource lock held */
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
int cpt;
cpt = lnet_cpt_of_cookie(handle->cookie);
@@ -360,20 +360,20 @@ lnet_handle2me(lnet_handle_me_t *handle)
if (!lh)
return NULL;
- return lh_entry(lh, lnet_me_t, me_lh);
+ return lh_entry(lh, struct lnet_me, me_lh);
}
static inline void
-lnet_peer_addref_locked(lnet_peer_t *lp)
+lnet_peer_addref_locked(struct lnet_peer *lp)
{
LASSERT(lp->lp_refcount > 0);
lp->lp_refcount++;
}
-void lnet_destroy_peer_locked(lnet_peer_t *lp);
+void lnet_destroy_peer_locked(struct lnet_peer *lp);
static inline void
-lnet_peer_decref_locked(lnet_peer_t *lp)
+lnet_peer_decref_locked(struct lnet_peer *lp)
{
LASSERT(lp->lp_refcount > 0);
lp->lp_refcount--;
@@ -382,13 +382,13 @@ lnet_peer_decref_locked(lnet_peer_t *lp)
}
static inline int
-lnet_isrouter(lnet_peer_t *lp)
+lnet_isrouter(struct lnet_peer *lp)
{
return lp->lp_rtr_refcount ? 1 : 0;
}
static inline void
-lnet_ni_addref_locked(lnet_ni_t *ni, int cpt)
+lnet_ni_addref_locked(struct lnet_ni *ni, int cpt)
{
LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
LASSERT(*ni->ni_refs[cpt] >= 0);
@@ -397,7 +397,7 @@ lnet_ni_addref_locked(lnet_ni_t *ni, int cpt)
}
static inline void
-lnet_ni_addref(lnet_ni_t *ni)
+lnet_ni_addref(struct lnet_ni *ni)
{
lnet_net_lock(0);
lnet_ni_addref_locked(ni, 0);
@@ -405,7 +405,7 @@ lnet_ni_addref(lnet_ni_t *ni)
}
static inline void
-lnet_ni_decref_locked(lnet_ni_t *ni, int cpt)
+lnet_ni_decref_locked(struct lnet_ni *ni, int cpt)
{
LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
LASSERT(*ni->ni_refs[cpt] > 0);
@@ -414,15 +414,15 @@ lnet_ni_decref_locked(lnet_ni_t *ni, int cpt)
}
static inline void
-lnet_ni_decref(lnet_ni_t *ni)
+lnet_ni_decref(struct lnet_ni *ni)
{
lnet_net_lock(0);
lnet_ni_decref_locked(ni, 0);
lnet_net_unlock(0);
}
-void lnet_ni_free(lnet_ni_t *ni);
-lnet_ni_t *
+void lnet_ni_free(struct lnet_ni *ni);
+struct lnet_ni *
lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist);
static inline int
@@ -439,22 +439,22 @@ lnet_net2rnethash(__u32 net)
((1U << the_lnet.ln_remote_nets_hbits) - 1)];
}
-extern lnd_t the_lolnd;
+extern struct lnet_lnd the_lolnd;
extern int avoid_asym_router_failure;
int lnet_cpt_of_nid_locked(lnet_nid_t nid);
int lnet_cpt_of_nid(lnet_nid_t nid);
-lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
-lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
-lnet_ni_t *lnet_net2ni(__u32 net);
+struct lnet_ni *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
+struct lnet_ni *lnet_net2ni_locked(__u32 net, int cpt);
+struct lnet_ni *lnet_net2ni(__u32 net);
extern int portal_rotor;
int lnet_lib_init(void);
void lnet_lib_exit(void);
-int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when);
-void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
+int lnet_notify(struct lnet_ni *ni, lnet_nid_t peer, int alive, unsigned long when);
+void lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive,
unsigned long when);
int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
unsigned int priority);
@@ -468,12 +468,12 @@ int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
void lnet_router_debugfs_init(void);
void lnet_router_debugfs_fini(void);
int lnet_rtrpools_alloc(int im_a_router);
-void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages);
+void lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages);
int lnet_rtrpools_adjust(int tiny, int small, int large);
int lnet_rtrpools_enable(void);
void lnet_rtrpools_disable(void);
void lnet_rtrpools_free(int keep_pools);
-lnet_remotenet_t *lnet_find_net_locked(__u32 net);
+struct lnet_remotenet *lnet_find_net_locked(__u32 net);
int lnet_dyn_add_ni(lnet_pid_t requested_pid,
struct lnet_ioctl_config_data *conf);
int lnet_dyn_del_ni(__u32 net);
@@ -482,69 +482,70 @@ int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
int lnet_islocalnid(lnet_nid_t nid);
int lnet_islocalnet(__u32 net);
-void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
+void lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
unsigned int offset, unsigned int mlen);
-void lnet_msg_detach_md(lnet_msg_t *msg, int status);
-void lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev);
-void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type);
-void lnet_msg_commit(lnet_msg_t *msg, int cpt);
-void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status);
-
-void lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev);
-void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
- unsigned int offset, unsigned int len);
-int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
-void lnet_return_tx_credits_locked(lnet_msg_t *msg);
-void lnet_return_rx_credits_locked(lnet_msg_t *msg);
-void lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp);
+void lnet_msg_detach_md(struct lnet_msg *msg, int status);
+void lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev);
+void lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type);
+void lnet_msg_commit(struct lnet_msg *msg, int cpt);
+void lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status);
+
+void lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev);
+void lnet_prep_send(struct lnet_msg *msg, int type,
+ struct lnet_process_id target, unsigned int offset,
+ unsigned int len);
+int lnet_send(lnet_nid_t nid, struct lnet_msg *msg, lnet_nid_t rtr_nid);
+void lnet_return_tx_credits_locked(struct lnet_msg *msg);
+void lnet_return_rx_credits_locked(struct lnet_msg *msg);
+void lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp);
void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt);
/* portals functions */
/* portals attributes */
static inline int
-lnet_ptl_is_lazy(lnet_portal_t *ptl)
+lnet_ptl_is_lazy(struct lnet_portal *ptl)
{
return !!(ptl->ptl_options & LNET_PTL_LAZY);
}
static inline int
-lnet_ptl_is_unique(lnet_portal_t *ptl)
+lnet_ptl_is_unique(struct lnet_portal *ptl)
{
return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE);
}
static inline int
-lnet_ptl_is_wildcard(lnet_portal_t *ptl)
+lnet_ptl_is_wildcard(struct lnet_portal *ptl)
{
return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD);
}
static inline void
-lnet_ptl_setopt(lnet_portal_t *ptl, int opt)
+lnet_ptl_setopt(struct lnet_portal *ptl, int opt)
{
ptl->ptl_options |= opt;
}
static inline void
-lnet_ptl_unsetopt(lnet_portal_t *ptl, int opt)
+lnet_ptl_unsetopt(struct lnet_portal *ptl, int opt)
{
ptl->ptl_options &= ~opt;
}
/* match-table functions */
struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
- lnet_process_id_t id, __u64 mbits);
+ struct lnet_process_id id, __u64 mbits);
struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
- lnet_process_id_t id, __u64 mbits,
- __u64 ignore_bits,
- lnet_ins_pos_t pos);
+ struct lnet_process_id id,
+ __u64 mbits, __u64 ignore_bits,
+ enum lnet_ins_pos pos);
int lnet_mt_match_md(struct lnet_match_table *mtable,
struct lnet_match_info *info, struct lnet_msg *msg);
/* portals match/attach functions */
-void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
+void lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
struct list_head *matches, struct list_head *drops);
-void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md);
+void lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md);
int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg);
/* initialized and finalize portals */
@@ -552,23 +553,26 @@ int lnet_portals_create(void);
void lnet_portals_destroy(void);
/* message functions */
-int lnet_parse(lnet_ni_t *ni, struct lnet_hdr *hdr,
+int lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr,
lnet_nid_t fromnid, void *private, int rdma_req);
-int lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg);
-int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg);
+int lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg);
+int lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg);
-void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int offset, unsigned int mlen, unsigned int rlen);
-void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
+void lnet_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
+ int delayed, unsigned int offset, unsigned int mlen,
+ unsigned int rlen);
+void lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int offset,
unsigned int mlen, unsigned int rlen);
-lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg);
-void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
+struct lnet_msg *lnet_create_reply_msg(struct lnet_ni *ni,
+ struct lnet_msg *get_msg);
+void lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *msg,
+ unsigned int len);
-void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
+void lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int rc);
-void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private,
+void lnet_drop_message(struct lnet_ni *ni, int cpt, void *private,
unsigned int nob);
void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
void lnet_recv_delayed_msg_list(struct list_head *head);
@@ -600,7 +604,7 @@ bool lnet_delay_rule_match_locked(struct lnet_hdr *hdr, struct lnet_msg *msg);
/** @} lnet_fault_simulation */
-void lnet_counters_get(lnet_counters_t *counters);
+void lnet_counters_get(struct lnet_counters *counters);
void lnet_counters_reset(void);
unsigned int lnet_iov_nob(unsigned int niov, struct kvec *iov);
@@ -608,25 +612,25 @@ int lnet_extract_iov(int dst_niov, struct kvec *dst,
int src_niov, const struct kvec *src,
unsigned int offset, unsigned int len);
-unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, const lnet_kiov_t *src,
+unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov);
+int lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
+ int src_niov, const struct bio_vec *src,
unsigned int offset, unsigned int len);
void lnet_copy_iov2iter(struct iov_iter *to,
unsigned int nsiov, const struct kvec *siov,
unsigned int soffset, unsigned int nob);
void lnet_copy_kiov2iter(struct iov_iter *to,
- unsigned int nkiov, const lnet_kiov_t *kiov,
+ unsigned int nkiov, const struct bio_vec *kiov,
unsigned int kiovoffset, unsigned int nob);
-void lnet_me_unlink(lnet_me_t *me);
+void lnet_me_unlink(struct lnet_me *me);
-void lnet_md_unlink(lnet_libmd_t *md);
-void lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd);
+void lnet_md_unlink(struct lnet_libmd *md);
+void lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd);
-void lnet_register_lnd(lnd_t *lnd);
-void lnet_unregister_lnd(lnd_t *lnd);
+void lnet_register_lnd(struct lnet_lnd *lnd);
+void lnet_unregister_lnd(struct lnet_lnd *lnd);
int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
__u32 local_ip, __u32 peer_ip, int peer_port);
@@ -659,11 +663,11 @@ int lnet_sock_connect(struct socket **sockp, int *fatal,
void libcfs_sock_release(struct socket *sock);
int lnet_peers_start_down(void);
-int lnet_peer_buffer_credits(lnet_ni_t *ni);
+int lnet_peer_buffer_credits(struct lnet_ni *ni);
int lnet_router_checker_start(void);
void lnet_router_checker_stop(void);
-void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
+void lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net);
void lnet_swap_pinginfo(struct lnet_ping_info *info);
int lnet_parse_ip2nets(char **networksp, char *ip2nets);
@@ -671,10 +675,10 @@ int lnet_parse_routes(char *route_str, int *im_a_router);
int lnet_parse_networks(struct list_head *nilist, char *networks);
int lnet_net_unique(__u32 net, struct list_head *nilist);
-int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
-lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
- lnet_nid_t nid);
-void lnet_peer_tables_cleanup(lnet_ni_t *ni);
+int lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt);
+struct lnet_peer *lnet_find_peer_locked(struct lnet_peer_table *ptable,
+ lnet_nid_t nid);
+void lnet_peer_tables_cleanup(struct lnet_ni *ni);
void lnet_peer_tables_destroy(void);
int lnet_peer_tables_create(void);
void lnet_debug_peer(lnet_nid_t nid);
@@ -686,7 +690,7 @@ int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
__u32 *peer_tx_qnob);
static inline void
-lnet_peer_set_alive(lnet_peer_t *lp)
+lnet_peer_set_alive(struct lnet_peer *lp)
{
lp->lp_last_query = jiffies;
lp->lp_last_alive = jiffies;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 9850398bf29a..321752dfe58b 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -54,11 +54,11 @@
/* forward refs */
struct lnet_libmd;
-typedef struct lnet_msg {
+struct lnet_msg {
struct list_head msg_activelist;
struct list_head msg_list; /* Q for credits/MD */
- lnet_process_id_t msg_target;
+ struct lnet_process_id msg_target;
/* where is it from, it's only for building event */
lnet_nid_t msg_from;
__u32 msg_type;
@@ -102,47 +102,47 @@ typedef struct lnet_msg {
unsigned int msg_offset;
unsigned int msg_niov;
struct kvec *msg_iov;
- lnet_kiov_t *msg_kiov;
+ struct bio_vec *msg_kiov;
- lnet_event_t msg_ev;
+ struct lnet_event msg_ev;
struct lnet_hdr msg_hdr;
-} lnet_msg_t;
+};
-typedef struct lnet_libhandle {
+struct lnet_libhandle {
struct list_head lh_hash_chain;
__u64 lh_cookie;
-} lnet_libhandle_t;
+};
#define lh_entry(ptr, type, member) \
((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
-typedef struct lnet_eq {
+struct lnet_eq {
struct list_head eq_list;
- lnet_libhandle_t eq_lh;
- lnet_seq_t eq_enq_seq;
- lnet_seq_t eq_deq_seq;
+ struct lnet_libhandle eq_lh;
+ unsigned long eq_enq_seq;
+ unsigned long eq_deq_seq;
unsigned int eq_size;
lnet_eq_handler_t eq_callback;
- lnet_event_t *eq_events;
+ struct lnet_event *eq_events;
int **eq_refs; /* percpt refcount for EQ */
-} lnet_eq_t;
+};
-typedef struct lnet_me {
+struct lnet_me {
struct list_head me_list;
- lnet_libhandle_t me_lh;
- lnet_process_id_t me_match_id;
+ struct lnet_libhandle me_lh;
+ struct lnet_process_id me_match_id;
unsigned int me_portal;
unsigned int me_pos; /* hash offset in mt_hash */
__u64 me_match_bits;
__u64 me_ignore_bits;
- lnet_unlink_t me_unlink;
+ enum lnet_unlink me_unlink;
struct lnet_libmd *me_md;
-} lnet_me_t;
+};
-typedef struct lnet_libmd {
+struct lnet_libmd {
struct list_head md_list;
- lnet_libhandle_t md_lh;
- lnet_me_t *md_me;
+ struct lnet_libhandle md_lh;
+ struct lnet_me *md_me;
char *md_start;
unsigned int md_offset;
unsigned int md_length;
@@ -152,24 +152,24 @@ typedef struct lnet_libmd {
unsigned int md_options;
unsigned int md_flags;
void *md_user_ptr;
- lnet_eq_t *md_eq;
+ struct lnet_eq *md_eq;
unsigned int md_niov; /* # frags */
union {
struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
+ struct bio_vec kiov[LNET_MAX_IOV];
} md_iov;
-} lnet_libmd_t;
+};
#define LNET_MD_FLAG_ZOMBIE (1 << 0)
#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
#define LNET_MD_FLAG_ABORTED (1 << 2)
-typedef struct {
+struct lnet_test_peer {
/* info about peers we are trying to fail */
struct list_head tp_list; /* ln_test_peers */
lnet_nid_t tp_nid; /* matching nid */
unsigned int tp_threshold; /* # failures to simulate */
-} lnet_test_peer_t;
+};
#define LNET_COOKIE_TYPE_MD 1
#define LNET_COOKIE_TYPE_ME 2
@@ -179,7 +179,7 @@ typedef struct {
struct lnet_ni; /* forward ref */
-typedef struct lnet_lnd {
+struct lnet_lnd {
/* fields managed by portals */
struct list_head lnd_list; /* stash in the LND table */
int lnd_refcount; /* # active instances */
@@ -210,7 +210,8 @@ typedef struct lnet_lnd {
* non-zero for immediate failure, otherwise complete later with
* lnet_finalize()
*/
- int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
+ int (*lnd_send)(struct lnet_ni *ni, void *private,
+ struct lnet_msg *msg);
/*
* Start receiving 'mlen' bytes of payload data, skipping the following
@@ -219,7 +220,7 @@ typedef struct lnet_lnd {
* complete later with lnet_finalize(). This also gives back a receive
* credit if the LND does flow control.
*/
- int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
+ int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, struct iov_iter *to, unsigned int rlen);
/*
@@ -231,7 +232,7 @@ typedef struct lnet_lnd {
* release resources; lnd_recv() will not be called.
*/
int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
- lnet_msg_t *msg, void **new_privatep);
+ struct lnet_msg *msg, void **new_privatep);
/* notification of peer health */
void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
@@ -242,7 +243,7 @@ typedef struct lnet_lnd {
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
-} lnd_t;
+};
struct lnet_tx_queue {
int tq_credits; /* # tx credits free */
@@ -251,7 +252,7 @@ struct lnet_tx_queue {
struct list_head tq_delayed; /* delayed TXs */
};
-typedef struct lnet_ni {
+struct lnet_ni {
spinlock_t ni_lock;
struct list_head ni_list; /* chain on ln_nis */
struct list_head ni_cptlist; /* chain on ln_nis_cpt */
@@ -266,7 +267,7 @@ typedef struct lnet_ni {
__u32 *ni_cpts; /* bond NI on some CPTs */
lnet_nid_t ni_nid; /* interface's NID */
void *ni_data; /* instance-specific data */
- lnd_t *ni_lnd; /* procedural interface */
+ struct lnet_lnd *ni_lnd; /* procedural interface */
struct lnet_tx_queue **ni_tx_queues; /* percpt TX queues */
int **ni_refs; /* percpt reference count */
time64_t ni_last_alive;/* when I was last alive */
@@ -277,7 +278,7 @@ typedef struct lnet_ni {
char *ni_interfaces[LNET_MAX_INTERFACES];
/* original net namespace */
struct net *ni_net_ns;
-} lnet_ni_t;
+};
#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
@@ -296,15 +297,15 @@ typedef struct lnet_ni {
/* router checker data, per router */
#define LNET_MAX_RTR_NIS 16
#define LNET_PINGINFO_SIZE offsetof(struct lnet_ping_info, pi_ni[LNET_MAX_RTR_NIS])
-typedef struct {
+struct lnet_rc_data {
/* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
struct list_head rcd_list;
- lnet_handle_md_t rcd_mdh; /* ping buffer MD */
+ struct lnet_handle_md rcd_mdh; /* ping buffer MD */
struct lnet_peer *rcd_gateway; /* reference to gateway */
struct lnet_ping_info *rcd_pinginfo; /* ping buffer */
-} lnet_rc_data_t;
+};
-typedef struct lnet_peer {
+struct lnet_peer {
struct list_head lp_hashlist; /* chain on peer hash */
struct list_head lp_txq; /* messages blocking for
tx credits */
@@ -335,17 +336,17 @@ typedef struct lnet_peer {
unsigned long lp_last_alive; /* when I was last alive */
unsigned long lp_last_query; /* when lp_ni was queried
last time */
- lnet_ni_t *lp_ni; /* interface peer is on */
+ struct lnet_ni *lp_ni; /* interface peer is on */
lnet_nid_t lp_nid; /* peer's NID */
int lp_refcount; /* # refs */
int lp_cpt; /* CPT this peer attached on */
- /* # refs from lnet_route_t::lr_gateway */
+ /* # refs from lnet_route::lr_gateway */
int lp_rtr_refcount;
/* returned RC ping features */
unsigned int lp_ping_feats;
struct list_head lp_routes; /* routers on this peer */
- lnet_rc_data_t *lp_rcd; /* router checker state */
-} lnet_peer_t;
+ struct lnet_rc_data *lp_rcd; /* router checker state */
+};
/* peer hash size */
#define LNET_PEER_HASH_BITS 9
@@ -363,39 +364,39 @@ struct lnet_peer_table {
/*
* peer aliveness is enabled only on routers for peers in a network where the
- * lnet_ni_t::ni_peertimeout has been set to a positive value
+ * lnet_ni::ni_peertimeout has been set to a positive value
*/
#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
(lp)->lp_ni->ni_peertimeout > 0)
-typedef struct {
+struct lnet_route {
struct list_head lr_list; /* chain on net */
struct list_head lr_gwlist; /* chain on gateway */
- lnet_peer_t *lr_gateway; /* router node */
+ struct lnet_peer *lr_gateway; /* router node */
__u32 lr_net; /* remote network number */
int lr_seq; /* sequence for round-robin */
unsigned int lr_downis; /* number of down NIs */
__u32 lr_hops; /* how far I am */
unsigned int lr_priority; /* route priority */
-} lnet_route_t;
+};
#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
#define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
-typedef struct {
+struct lnet_remotenet {
struct list_head lrn_list; /* chain on
ln_remote_nets_hash */
struct list_head lrn_routes; /* routes to me */
__u32 lrn_net; /* my net number */
-} lnet_remotenet_t;
+};
/** lnet message has credit and can be submitted to lnd for send/receive */
#define LNET_CREDIT_OK 0
/** lnet message is waiting for credit */
#define LNET_CREDIT_WAIT 1
-typedef struct {
+struct lnet_rtrbufpool {
struct list_head rbp_bufs; /* my free buffer pool */
struct list_head rbp_msgs; /* messages blocking
for a buffer */
@@ -407,13 +408,13 @@ typedef struct {
int rbp_credits; /* # free buffers /
blocked messages */
int rbp_mincredits; /* low water mark */
-} lnet_rtrbufpool_t;
+};
-typedef struct {
+struct lnet_rtrbuf {
struct list_head rb_list; /* chain on rbp_bufs */
- lnet_rtrbufpool_t *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
-} lnet_rtrbuf_t;
+ struct lnet_rtrbufpool *rb_pool; /* owning pool */
+ struct bio_vec rb_kiov[0]; /* the buffer space */
+};
#define LNET_PEER_HASHSIZE 503 /* prime! */
@@ -424,7 +425,7 @@ typedef struct {
/* # different router buffer pools */
#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1)
-enum {
+enum lnet_match_flags {
/* Didn't match anything */
LNET_MATCHMD_NONE = (1 << 0),
/* Matched OK */
@@ -437,7 +438,7 @@ enum {
LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
};
-/* Options for lnet_portal_t::ptl_options */
+/* Options for lnet_portal::ptl_options */
#define LNET_PTL_LAZY (1 << 0)
#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match,
@@ -446,7 +447,7 @@ enum {
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
__u64 mi_mbits;
- lnet_process_id_t mi_id;
+ struct lnet_process_id mi_id;
unsigned int mi_opc;
unsigned int mi_portal;
unsigned int mi_rlength;
@@ -496,7 +497,7 @@ struct lnet_match_table {
/* dispatch routed PUT message by hashing source NID for wildcard portals */
#define LNET_PTL_ROTOR_HASH_RT 3
-typedef struct lnet_portal {
+struct lnet_portal {
spinlock_t ptl_lock;
unsigned int ptl_index; /* portal ID, reserved */
/* flags on this portal: lazy, unique... */
@@ -513,7 +514,7 @@ typedef struct lnet_portal {
int ptl_mt_nmaps;
/* array of active entries' cpu-partition-id */
int ptl_mt_maps[0];
-} lnet_portal_t;
+};
#define LNET_LH_HASH_BITS 12
#define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
@@ -544,7 +545,7 @@ struct lnet_msg_container {
#define LNET_RC_STATE_RUNNING 1 /* started up OK */
#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
-typedef struct {
+struct lnet {
/* CPU partition table of LNet */
struct cfs_cpt_table *ln_cpt_table;
/* number of CPTs in ln_cpt_table */
@@ -556,7 +557,7 @@ typedef struct {
/* # portals */
int ln_nportals;
/* the vector of portals */
- lnet_portal_t **ln_portals;
+ struct lnet_portal **ln_portals;
/* percpt ME containers */
struct lnet_res_container **ln_me_containers;
/* percpt MD container */
@@ -572,7 +573,7 @@ typedef struct {
struct cfs_percpt_lock *ln_net_lock;
/* percpt message containers for active/finalizing/freed message */
struct lnet_msg_container **ln_msg_containers;
- lnet_counters_t **ln_counters;
+ struct lnet_counters **ln_counters;
struct lnet_peer_table **ln_peer_tables;
/* failure simulation */
struct list_head ln_test_peers;
@@ -584,7 +585,7 @@ typedef struct {
struct list_head ln_nis_cpt;
/* dying LND instances */
struct list_head ln_nis_zombie;
- lnet_ni_t *ln_loni; /* the loopback NI */
+ struct lnet_ni *ln_loni; /* the loopback NI */
/* remote networks with routes to them */
struct list_head *ln_remote_nets_hash;
@@ -595,16 +596,16 @@ typedef struct {
/* validity stamp */
__u64 ln_routers_version;
/* percpt router buffer pools */
- lnet_rtrbufpool_t **ln_rtrpools;
+ struct lnet_rtrbufpool **ln_rtrpools;
- lnet_handle_md_t ln_ping_target_md;
- lnet_handle_eq_t ln_ping_target_eq;
+ struct lnet_handle_md ln_ping_target_md;
+ struct lnet_handle_eq ln_ping_target_eq;
struct lnet_ping_info *ln_ping_info;
/* router checker startup/shutdown state */
int ln_rc_state;
/* router checker's event queue */
- lnet_handle_eq_t ln_rc_eqh;
+ struct lnet_handle_eq ln_rc_eqh;
/* rcd still pending on net */
struct list_head ln_rcd_deathrow;
/* rcd ready for free */
@@ -647,6 +648,6 @@ typedef struct {
*/
wait_queue_head_t ln_rc_waitq;
-} lnet_t;
+};
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index c81c246ef221..ea736f8d5231 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -86,7 +86,7 @@ struct lst_bid {
#define LST_NODE_UNKNOWN 0x8 /* node not in session */
struct lstcon_node_ent {
- lnet_process_id_t nde_id; /* id of node */
+ struct lnet_process_id nde_id; /* id of node */
int nde_state; /* state of node */
}; /*** node entry, for list_group command */
@@ -126,7 +126,7 @@ struct lstcon_test_batch_ent {
struct lstcon_rpc_ent {
struct list_head rpe_link; /* link chain */
- lnet_process_id_t rpe_peer; /* peer's id */
+ struct lnet_process_id rpe_peer; /* peer's id */
struct timeval rpe_stamp; /* time stamp of RPC */
int rpe_state; /* peer's state */
int rpe_rpc_errno; /* RPC errno */
@@ -287,7 +287,7 @@ struct lstio_debug_args {
group|batch */
int lstio_dbg_count; /* IN: # of test nodes
to debug */
- lnet_process_id_t __user *lstio_dbg_idsp; /* IN: id of test
+ struct lnet_process_id __user *lstio_dbg_idsp; /* IN: id of test
nodes */
struct list_head __user *lstio_dbg_resultp; /* OUT: list head of
result buffer */
@@ -317,7 +317,7 @@ struct lstio_group_update_args {
int lstio_grp_nmlen; /* IN: name length */
char __user *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes id */
- lnet_process_id_t __user *lstio_grp_idsp; /* IN: array of nodes */
+ struct lnet_process_id __user *lstio_grp_idsp; /* IN: array of nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
};
@@ -329,7 +329,7 @@ struct lstio_group_nodes_args {
int lstio_grp_count; /* IN: # of nodes */
/** OUT: session features */
unsigned int __user *lstio_grp_featp;
- lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */
+ struct lnet_process_id __user *lstio_grp_idsp; /* IN: nodes */
struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
};
@@ -429,7 +429,7 @@ struct lstio_stat_args {
length */
char __user *lstio_sta_namep; /* IN: group name */
int lstio_sta_count; /* IN: # of pid */
- lnet_process_id_t __user *lstio_sta_idsp; /* IN: pid */
+ struct lnet_process_id __user *lstio_sta_idsp; /* IN: pid */
struct list_head __user *lstio_sta_resultp; /* OUT: list head of
result buffer */
};
diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h
index 937fcc9e4a30..ecdd0db04d0a 100644
--- a/drivers/staging/lustre/include/linux/lnet/nidstr.h
+++ b/drivers/staging/lustre/include/linux/lnet/nidstr.h
@@ -88,7 +88,7 @@ static inline char *libcfs_nid2str(lnet_nid_t nid)
__u32 libcfs_str2net(const char *str);
lnet_nid_t libcfs_str2nid(const char *str);
int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
-char *libcfs_id2str(lnet_process_id_t id);
+char *libcfs_id2str(struct lnet_process_id id);
void cfs_free_nidlist(struct list_head *list);
int cfs_parse_nidlist(char *str, int len, struct list_head *list);
int cfs_print_nidlist(char *buffer, int count, struct list_head *list);
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index acf20ce6f403..dd5bc0e46560 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -45,7 +45,7 @@
#define SOCKLND_CONN_ACK SOCKLND_CONN_BULK_IN
-typedef struct {
+struct ksock_hello_msg {
__u32 kshm_magic; /* magic number of socklnd message */
__u32 kshm_version; /* version of socklnd message */
lnet_nid_t kshm_src_nid; /* sender's nid */
@@ -57,9 +57,9 @@ typedef struct {
__u32 kshm_ctype; /* connection type */
__u32 kshm_nips; /* # IP addrs */
__u32 kshm_ips[0]; /* IP addrs */
-} WIRE_ATTR ksock_hello_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct ksock_lnet_msg {
struct lnet_hdr ksnm_hdr; /* lnet hdr */
/*
@@ -68,17 +68,17 @@ typedef struct {
* structure definitions. lnet payload will be stored just after
* the body of structure ksock_lnet_msg_t
*/
-} WIRE_ATTR ksock_lnet_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct ksock_msg {
__u32 ksm_type; /* type of socklnd message */
__u32 ksm_csum; /* checksum if != 0 */
__u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */
union {
- ksock_lnet_msg_t lnetmsg;/* lnet message, it's empty if
+ struct ksock_lnet_msg lnetmsg; /* lnet message, it's empty if
* it's NOOP */
} WIRE_ATTR ksm_u;
-} WIRE_ATTR ksock_msg_t;
+} WIRE_ATTR;
#define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */
#define KSOCK_MSG_LNET 0xC1 /* lnet msg */
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 1c8de72e6d6b..1be9b7aa7326 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -64,7 +64,7 @@
typedef __u64 lnet_nid_t;
/**
* ID of a process in a node. Shortened as PID to distinguish from
- * lnet_process_id_t, the global process ID.
+ * lnet_process_id, the global process ID.
*/
typedef __u32 lnet_pid_t;
@@ -114,7 +114,7 @@ static inline __u32 LNET_MKNET(__u32 type, __u32 num)
#define WIRE_ATTR __packed
-/* Packed version of lnet_process_id_t to transfer via network */
+/* Packed version of lnet_process_id to transfer via network */
struct lnet_process_id_packed {
/* node id / process id */
lnet_nid_t nid;
@@ -132,13 +132,13 @@ struct lnet_handle_wire {
__u64 wh_object_cookie;
} WIRE_ATTR;
-typedef enum {
+enum lnet_msg_type {
LNET_MSG_ACK = 0,
LNET_MSG_PUT,
LNET_MSG_GET,
LNET_MSG_REPLY,
LNET_MSG_HELLO,
-} lnet_msg_type_t;
+};
/*
* The variant fields of the portals message header are aligned on an 8
@@ -182,7 +182,7 @@ struct lnet_hdr {
lnet_nid_t src_nid;
lnet_pid_t dest_pid;
lnet_pid_t src_pid;
- __u32 type; /* lnet_msg_type_t */
+ __u32 type; /* enum lnet_msg_type */
__u32 payload_length; /* payload data to follow */
/*<------__u64 aligned------->*/
union {
@@ -250,7 +250,7 @@ struct lnet_ping_info {
struct lnet_ni_status pi_ni[0];
} WIRE_ATTR;
-typedef struct lnet_counters {
+struct lnet_counters {
__u32 msgs_alloc;
__u32 msgs_max;
__u32 errors;
@@ -262,7 +262,7 @@ typedef struct lnet_counters {
__u64 recv_length;
__u64 route_length;
__u64 drop_length;
-} WIRE_ATTR lnet_counters_t;
+} WIRE_ATTR;
#define LNET_NI_STATUS_UP 0x15aac0de
#define LNET_NI_STATUS_DOWN 0xdeadface
@@ -272,61 +272,70 @@ typedef struct lnet_counters {
/**
* Objects maintained by the LNet are accessed through handles. Handle types
- * have names of the form lnet_handle_xx_t, where xx is one of the two letter
+ * have names of the form lnet_handle_xx, where xx is one of the two letter
* object type codes ('eq' for event queue, 'md' for memory descriptor, and
- * 'me' for match entry).
- * Each type of object is given a unique handle type to enhance type checking.
- * The type lnet_handle_any_t can be used when a generic handle is needed.
- * Every handle value can be converted into a value of type lnet_handle_any_t
- * without loss of information.
+ * 'me' for match entry). Each type of object is given a unique handle type
+ * to enhance type checking.
*/
-typedef struct {
- __u64 cookie;
-} lnet_handle_any_t;
-
-typedef lnet_handle_any_t lnet_handle_eq_t;
-typedef lnet_handle_any_t lnet_handle_md_t;
-typedef lnet_handle_any_t lnet_handle_me_t;
-
#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
+struct lnet_handle_eq {
+ u64 cookie;
+};
+
/**
- * Invalidate handle \a h.
+ * Invalidate eq handle @h.
*/
-static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
+static inline void LNetInvalidateEQHandle(struct lnet_handle_eq *h)
{
h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
}
/**
- * Compare handles \a h1 and \a h2.
+ * Check whether eq handle @h is invalid.
*
- * \return 1 if handles are equal, 0 if otherwise.
+ * @return 1 if handle is invalid, 0 if valid.
*/
-static inline int LNetHandleIsEqual(lnet_handle_any_t h1, lnet_handle_any_t h2)
+static inline int LNetEQHandleIsInvalid(struct lnet_handle_eq h)
{
- return h1.cookie == h2.cookie;
+ return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
}
+struct lnet_handle_md {
+ u64 cookie;
+};
+
/**
- * Check whether handle \a h is invalid.
+ * Invalidate md handle @h.
+ */
+static inline void LNetInvalidateMDHandle(struct lnet_handle_md *h)
+{
+ h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
+}
+
+/**
+ * Check whether eq handle @h is invalid.
*
- * \return 1 if handle is invalid, 0 if valid.
+ * @return 1 if handle is invalid, 0 if valid.
*/
-static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
+static inline int LNetMDHandleIsInvalid(struct lnet_handle_md h)
{
- return h.cookie == LNET_WIRE_HANDLE_COOKIE_NONE;
+ return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
}
+struct lnet_handle_me {
+ u64 cookie;
+};
+
/**
* Global process ID.
*/
-typedef struct {
+struct lnet_process_id {
/** node id */
lnet_nid_t nid;
/** process id */
lnet_pid_t pid;
-} lnet_process_id_t;
+};
/** @} lnet_addr */
/** \addtogroup lnet_me
@@ -337,26 +346,26 @@ typedef struct {
* Specifies whether the match entry or memory descriptor should be unlinked
* automatically (LNET_UNLINK) or not (LNET_RETAIN).
*/
-typedef enum {
+enum lnet_unlink {
LNET_RETAIN = 0,
LNET_UNLINK
-} lnet_unlink_t;
+};
/**
- * Values of the type lnet_ins_pos_t are used to control where a new match
+ * Values of the type lnet_ins_pos are used to control where a new match
* entry is inserted. The value LNET_INS_BEFORE is used to insert the new
* entry before the current entry or before the head of the list. The value
* LNET_INS_AFTER is used to insert the new entry after the current entry
* or after the last item in the list.
*/
-typedef enum {
+enum lnet_ins_pos {
/** insert ME before current position or head of the list */
LNET_INS_BEFORE,
/** insert ME after current position or tail of the list */
LNET_INS_AFTER,
/** attach ME at tail of local CPU partition ME list */
LNET_INS_LOCAL
-} lnet_ins_pos_t;
+};
/** @} lnet_me */
@@ -368,14 +377,14 @@ typedef enum {
* Defines the visible parts of a memory descriptor. Values of this type
* are used to initialize memory descriptors.
*/
-typedef struct {
+struct lnet_md {
/**
* Specify the memory region associated with the memory descriptor.
* If the options field has:
* - LNET_MD_KIOV bit set: The start field points to the starting
- * address of an array of lnet_kiov_t and the length field specifies
+ * address of an array of struct bio_vec and the length field specifies
* the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
+ * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based
* fragments that are not necessarily mapped in virtual memory.
* - LNET_MD_IOVEC bit set: The start field points to the starting
* address of an array of struct iovec and the length field specifies
@@ -435,7 +444,7 @@ typedef struct {
* acknowledgment. Acknowledgments are never sent for GET operations.
* The data sent in the REPLY serves as an implicit acknowledgment.
* - LNET_MD_KIOV: The start and length fields specify an array of
- * lnet_kiov_t.
+ * struct bio_vec.
* - LNET_MD_IOVEC: The start and length fields specify an array of
* struct iovec.
* - LNET_MD_MAX_SIZE: The max_size field is valid.
@@ -461,8 +470,8 @@ typedef struct {
* by LNetInvalidateHandle()), operations performed on this memory
* descriptor are not logged.
*/
- lnet_handle_eq_t eq_handle;
-} lnet_md_t;
+ struct lnet_handle_eq eq_handle;
+};
/*
* Max Transfer Unit (minimum supported everywhere).
@@ -476,35 +485,31 @@ typedef struct {
#define LNET_MAX_IOV 256
/**
- * Options for the MD structure. See lnet_md_t::options.
+ * Options for the MD structure. See lnet_md::options.
*/
#define LNET_MD_OP_PUT (1 << 0)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_OP_GET (1 << 1)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_MANAGE_REMOTE (1 << 2)
/* unused (1 << 3) */
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_TRUNCATE (1 << 4)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_ACK_DISABLE (1 << 5)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_IOVEC (1 << 6)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_MAX_SIZE (1 << 7)
-/** See lnet_md_t::options. */
+/** See lnet_md::options. */
#define LNET_MD_KIOV (1 << 8)
/* For compatibility with Cray Portals */
#define LNET_MD_PHYS 0
-/** Infinite threshold on MD operations. See lnet_md_t::threshold */
+/** Infinite threshold on MD operations. See lnet_md::threshold */
#define LNET_MD_THRESH_INF (-1)
-/* NB lustre portals uses struct iovec internally! */
-typedef struct iovec lnet_md_iovec_t;
-
-typedef struct bio_vec lnet_kiov_t;
/** @} lnet_md */
/** \addtogroup lnet_eq
@@ -514,7 +519,7 @@ typedef struct bio_vec lnet_kiov_t;
/**
* Six types of events can be logged in an event queue.
*/
-typedef enum {
+enum lnet_event_kind {
/** An incoming GET operation has completed on the MD. */
LNET_EVENT_GET = 1,
/**
@@ -550,20 +555,18 @@ typedef enum {
* \see LNetMDUnlink
*/
LNET_EVENT_UNLINK,
-} lnet_event_kind_t;
+};
-#define LNET_SEQ_BASETYPE long
-typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
-#define LNET_SEQ_GT(a, b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
+#define LNET_SEQ_GT(a, b) (((signed long)((a) - (b))) > 0)
/**
* Information about an event on a MD.
*/
-typedef struct {
+struct lnet_event {
/** The identifier (nid, pid) of the target. */
- lnet_process_id_t target;
+ struct lnet_process_id target;
/** The identifier (nid, pid) of the initiator. */
- lnet_process_id_t initiator;
+ struct lnet_process_id initiator;
/**
* The NID of the immediate sender. If the request has been forwarded
* by routers, this is the NID of the last hop; otherwise it's the
@@ -571,7 +574,7 @@ typedef struct {
*/
lnet_nid_t sender;
/** Indicates the type of the event. */
- lnet_event_kind_t type;
+ enum lnet_event_kind type;
/** The portal table index specified in the request */
unsigned int pt_index;
/** A copy of the match bits specified in the request. */
@@ -582,7 +585,7 @@ typedef struct {
* The length (in bytes) of the data that was manipulated by the
* operation. For truncated operations, the manipulated length will be
* the number of bytes specified by the MD (possibly with an offset,
- * see lnet_md_t). For all other operations, the manipulated length
+ * see lnet_md). For all other operations, the manipulated length
* will be the length of the requested operation, i.e. rlength.
*/
unsigned int mlength;
@@ -590,13 +593,13 @@ typedef struct {
* The handle to the MD associated with the event. The handle may be
* invalid if the MD has been unlinked.
*/
- lnet_handle_md_t md_handle;
+ struct lnet_handle_md md_handle;
/**
* A snapshot of the state of the MD immediately after the event has
* been processed. In particular, the threshold field in md will
* reflect the value of the threshold after the operation occurred.
*/
- lnet_md_t md;
+ struct lnet_md md;
/**
* 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
* \see LNetPut
@@ -618,15 +621,15 @@ typedef struct {
* The displacement (in bytes) into the memory region that the
* operation used. The offset can be determined by the operation for
* a remote managed MD or by the local MD.
- * \see lnet_md_t::options
+ * \see lnet_md::options
*/
unsigned int offset;
/**
* The sequence number for this event. Sequence numbers are unique
* to each event.
*/
- volatile lnet_seq_t sequence;
-} lnet_event_t;
+ volatile unsigned long sequence;
+};
/**
* Event queue handler function type.
@@ -638,7 +641,7 @@ typedef struct {
* The handler must not block, must be reentrant, and must not call any LNet
* API functions. It should return as quickly as possible.
*/
-typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
+typedef void (*lnet_eq_handler_t)(struct lnet_event *event);
#define LNET_EQ_HANDLER_NONE NULL
/** @} lnet_eq */
@@ -651,15 +654,15 @@ typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
* operation completes (i.e., when the data has been written to a MD of the
* target process).
*
- * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
+ * \see lnet_md::options for the discussion on LNET_MD_ACK_DISABLE by which
* acknowledgments can be disabled for a MD.
*/
-typedef enum {
+enum lnet_ack_req {
/** Request an acknowledgment */
LNET_ACK_REQ,
/** Request that no acknowledgment should be generated. */
LNET_NOACK_REQ
-} lnet_ack_req_t;
+};
/** @} lnet_data */
/** @} lnet */
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 13b43278a38d..2b5930150cda 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -35,7 +35,6 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
- depends on BROKEN
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index b1e8508f9fc7..79321e4aaf30 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -38,7 +38,7 @@
#include <asm/page.h>
#include "o2iblnd.h"
-static lnd_t the_o2iblnd;
+static struct lnet_lnd the_o2iblnd;
struct kib_data kiblnd_data;
@@ -174,7 +174,7 @@ static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
return 0;
}
-void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
+void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp)
{
struct kib_net *net = ni->ni_data;
@@ -313,7 +313,8 @@ int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
return 0;
}
-int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid)
+int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
+ lnet_nid_t nid)
{
struct kib_peer *peer;
struct kib_net *net = ni->ni_data;
@@ -412,7 +413,7 @@ void kiblnd_unlink_peer_locked(struct kib_peer *peer)
kiblnd_peer_decref(peer);
}
-static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
+static int kiblnd_get_peer_info(struct lnet_ni *ni, int index,
lnet_nid_t *nidp, int *count)
{
struct kib_peer *peer;
@@ -468,7 +469,7 @@ static void kiblnd_del_peer_locked(struct kib_peer *peer)
*/
}
-static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
+static int kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
{
LIST_HEAD(zombies);
struct list_head *ptmp;
@@ -520,7 +521,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
return rc;
}
-static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+static struct kib_conn *kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
{
struct kib_peer *peer;
struct list_head *ptmp;
@@ -947,7 +948,7 @@ int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
return count;
}
-static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
+static int kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
{
struct kib_peer *peer;
struct list_head *ptmp;
@@ -992,7 +993,7 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
return !count ? -ENOENT : 0;
}
-static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+static int kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
int rc = -EINVAL;
@@ -1045,7 +1046,8 @@ static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
return rc;
}
-static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
+static void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid,
+ unsigned long *when)
{
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
@@ -1281,27 +1283,6 @@ static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
- int negotiated_nfrags)
-{
- struct kib_net *net = ni->ni_data;
- struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- __u16 nfrags;
- int mod;
-
- tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
- mod = tunables->lnd_map_on_demand;
- nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
-
- LASSERT(hdev->ibh_mrs);
-
- if (mod > 0 && nfrags <= rd->rd_nfrags)
- return NULL;
-
- return hdev->ibh_mrs;
-}
-
static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
{
LASSERT(!fpo->fpo_map_count);
@@ -2058,7 +2039,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
tpo->tpo_tx_descs = NULL;
tpo->tpo_tx_pages = NULL;
- npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
+ npg = DIV_ROUND_UP(size * IBLND_MSG_SIZE, PAGE_SIZE);
if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
CERROR("Can't allocate tx pages: %d\n", npg);
LIBCFS_FREE(tpo, sizeof(*tpo));
@@ -2164,25 +2145,16 @@ static void kiblnd_net_fini_pools(struct kib_net *net)
}
}
-static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts,
- int ncpts)
+static int kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni,
+ __u32 *cpts, int ncpts)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- unsigned long flags;
int cpt;
int rc;
int i;
tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (!tunables->lnd_map_on_demand) {
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- goto create_tx_pool;
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
tunables->lnd_fmr_pool_size,
@@ -2227,7 +2199,6 @@ static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts
if (i > 0)
LASSERT(i == ncpts);
- create_tx_pool:
/*
* cfs_precpt_alloc is creating an array of struct kib_tx_poolset
* The number of struct kib_tx_poolsets create is equal to the
@@ -2283,20 +2254,8 @@ static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
return -EINVAL;
}
-static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
-{
- if (!hdev->ibh_mrs)
- return;
-
- ib_dereg_mr(hdev->ibh_mrs);
-
- hdev->ibh_mrs = NULL;
-}
-
void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
{
- kiblnd_hdev_cleanup_mrs(hdev);
-
if (hdev->ibh_pd)
ib_dealloc_pd(hdev->ibh_pd);
@@ -2306,28 +2265,6 @@ void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
-{
- struct ib_mr *mr;
- int rc;
- int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
-
- rc = kiblnd_hdev_get_attr(hdev);
- if (rc)
- return rc;
-
- mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
-
- hdev->ibh_mrs = mr;
-
- return 0;
-}
-
/* DUMMY */
static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event)
@@ -2482,9 +2419,9 @@ int kiblnd_dev_failover(struct kib_dev *dev)
goto out;
}
- rc = kiblnd_hdev_setup_mrs(hdev);
+ rc = kiblnd_hdev_get_attr(hdev);
if (rc) {
- CERROR("Can't setup device: %d\n", rc);
+ CERROR("Can't get device attributes: %d\n", rc);
goto out;
}
@@ -2652,7 +2589,7 @@ static void kiblnd_base_shutdown(void)
module_put(THIS_MODULE);
}
-static void kiblnd_shutdown(lnet_ni_t *ni)
+static void kiblnd_shutdown(struct lnet_ni *ni)
{
struct kib_net *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
@@ -2909,7 +2846,7 @@ static struct kib_dev *kiblnd_dev_search(char *ifname)
return alias;
}
-static int kiblnd_startup(lnet_ni_t *ni)
+static int kiblnd_startup(struct lnet_ni *ni)
{
char *ifname;
struct kib_dev *ibdev = NULL;
@@ -3003,7 +2940,7 @@ net_failed:
return -ENETDOWN;
}
-static lnd_t the_o2iblnd = {
+static struct lnet_lnd the_o2iblnd = {
.lnd_type = O2IBLND,
.lnd_startup = kiblnd_startup,
.lnd_shutdown = kiblnd_shutdown,
@@ -3021,12 +2958,12 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE);
- BUILD_BUG_ON(!offsetof(struct kib_msg,
+ BUILD_BUG_ON(offsetof(struct kib_msg,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
- BUILD_BUG_ON(!offsetof(struct kib_msg,
+ > IBLND_MSG_SIZE);
+ BUILD_BUG_ON(offsetof(struct kib_msg,
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ > IBLND_MSG_SIZE);
kiblnd_tunables_init();
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 2cb429830681..16e437b3ad1e 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -172,7 +172,6 @@ struct kib_hca_dev {
__u64 ibh_page_mask; /* page mask of current HCA */
int ibh_mr_shift; /* bits shift of max MR size */
__u64 ibh_mr_size; /* size of MR */
- struct ib_mr *ibh_mrs; /* global MR */
struct ib_pd *ibh_pd; /* PD */
struct kib_dev *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
@@ -491,7 +490,7 @@ struct kib_tx { /* transmit message */
int tx_status; /* LNET completion status */
unsigned long tx_deadline; /* completion deadline */
__u64 tx_cookie; /* completion cookie */
- lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
+ struct lnet_msg *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
struct kib_msg *tx_msg; /* message buffer (host vaddr) */
__u64 tx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
@@ -567,7 +566,7 @@ struct kib_conn {
struct kib_peer {
struct list_head ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
- lnet_ni_t *ibp_ni; /* LNet interface */
+ struct lnet_ni *ibp_ni; /* LNet interface */
struct list_head ibp_conns; /* all active connections */
struct list_head ibp_tx_queue; /* msgs waiting for a conn */
__u64 ibp_incarnation; /* incarnation of peer */
@@ -764,7 +763,7 @@ static inline int
kiblnd_need_noop(struct kib_conn *conn)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
@@ -978,8 +977,6 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
- int negotiated_nfrags);
void kiblnd_map_rx_descs(struct kib_conn *conn);
void kiblnd_unmap_rx_descs(struct kib_conn *conn);
void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
@@ -1005,7 +1002,8 @@ int kiblnd_cm_callback(struct rdma_cm_id *cmid,
int kiblnd_translate_mtu(int value);
int kiblnd_dev_failover(struct kib_dev *dev);
-int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid);
+int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
+ lnet_nid_t nid);
void kiblnd_destroy_peer(struct kib_peer *peer);
bool kiblnd_reconnect_peer(struct kib_peer *peer);
void kiblnd_destroy_dev(struct kib_dev *dev);
@@ -1022,19 +1020,19 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
void kiblnd_close_conn(struct kib_conn *conn, int error);
void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
-void kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid);
-void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
+void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid);
+void kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
int status);
void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
+void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp);
int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
int kiblnd_post_rx(struct kib_rx *rx, int credit);
-int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- struct iov_iter *to, unsigned int rlen);
+int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
+int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
+ int delayed, struct iov_iter *to, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index e2f3f7294260..0db662d6abdd 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -40,20 +40,20 @@
static void kiblnd_peer_alive(struct kib_peer *peer);
static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
-static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
+static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
int type, int body_nob);
static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
int resid, struct kib_rdma_desc *dstrd,
__u64 dstcookie);
static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
-static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
+static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx);
static void kiblnd_check_sends_locked(struct kib_conn *conn);
static void
-kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
+kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx)
{
- lnet_msg_t *lntmsg[2];
+ struct lnet_msg *lntmsg[2];
struct kib_net *net = ni->ni_data;
int rc;
int i;
@@ -94,7 +94,7 @@ kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
}
void
-kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
+kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status)
{
struct kib_tx *tx;
@@ -110,7 +110,7 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
}
static struct kib_tx *
-kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
+kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
{
struct kib_net *net = (struct kib_net *)ni->ni_data;
struct list_head *node;
@@ -157,7 +157,6 @@ kiblnd_post_rx(struct kib_rx *rx, int credit)
struct kib_conn *conn = rx->rx_conn;
struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
int rc;
LASSERT(net);
@@ -165,9 +164,8 @@ kiblnd_post_rx(struct kib_rx *rx, int credit)
LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
- LASSERT(mr);
- rx->rx_sge.lkey = mr->lkey;
+ rx->rx_sge.lkey = conn->ibc_hdev->ibh_pd->local_dma_lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
rx->rx_sge.length = IBLND_MSG_SIZE;
@@ -251,7 +249,7 @@ static void
kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie)
{
struct kib_tx *tx;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
int idle;
spin_lock(&conn->ibc_lock);
@@ -288,7 +286,7 @@ kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 co
static void
kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie)
{
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (!tx) {
@@ -309,7 +307,7 @@ kiblnd_handle_rx(struct kib_rx *rx)
{
struct kib_msg *msg = rx->rx_msg;
struct kib_conn *conn = rx->rx_conn;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
int credits = msg->ibm_credits;
struct kib_tx *tx;
int rc = 0;
@@ -470,7 +468,7 @@ kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
{
struct kib_msg *msg = rx->rx_msg;
struct kib_conn *conn = rx->rx_conn;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
struct kib_net *net = ni->ni_data;
int rc;
int err = -EIO;
@@ -592,7 +590,7 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *
return 0;
}
-static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx)
+static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx)
{
struct kib_net *net = ni->ni_data;
@@ -608,12 +606,11 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx)
}
}
-static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
- int nfrags)
+static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nfrags)
{
struct kib_net *net = ni->ni_data;
struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
- struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -635,14 +632,6 @@ static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc
nob += rd->rd_frags[i].rf_nob;
}
- mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ?
- tx->tx_conn->ibc_max_frags : -1);
- if (mr) {
- /* found pre-mapping MR */
- rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
- return 0;
- }
-
if (net->ibn_fmr_ps)
return kiblnd_fmr_map_tx(net, tx, rd, nob);
@@ -650,8 +639,9 @@ static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc
}
static int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
- unsigned int niov, const struct kvec *iov, int offset, int nob)
+kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, unsigned int niov,
+ const struct kvec *iov, int offset, int nob)
{
struct kib_net *net = ni->ni_data;
struct page *page;
@@ -707,8 +697,9 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
}
static int
-kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
- int nkiov, const lnet_kiov_t *kiov, int offset, int nob)
+kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nkiov,
+ const struct bio_vec *kiov, int offset, int nob)
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
@@ -910,7 +901,7 @@ static void
kiblnd_check_sends_locked(struct kib_conn *conn)
{
int ver = conn->ibc_version;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
struct kib_tx *tx;
/* Don't send anything until after the connection is established */
@@ -1022,22 +1013,21 @@ kiblnd_tx_complete(struct kib_tx *tx, int status)
}
static void
-kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob)
+kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
+ int body_nob)
{
struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
- struct ib_mr *mr = hdev->ibh_mrs;
LASSERT(tx->tx_nwrq >= 0);
LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT(nob <= IBLND_MSG_SIZE);
- LASSERT(mr);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- sge->lkey = mr->lkey;
+ sge->lkey = hdev->ibh_pd->local_dma_lkey;
sge->addr = tx->tx_msgaddr;
sge->length = nob;
@@ -1103,9 +1093,9 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
break;
}
- wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx)),
- (__u32)resid);
+ wrknob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
+ kiblnd_rd_frag_size(dstrd, dstidx),
+ (__u32)resid);
sge = &tx->tx_sge[tx->tx_nwrq];
sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
@@ -1366,7 +1356,7 @@ no_reconnect:
}
void
-kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid)
+kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
{
struct kib_peer *peer;
struct kib_peer *peer2;
@@ -1488,16 +1478,16 @@ kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid)
}
int
-kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
struct lnet_hdr *hdr = &lntmsg->msg_hdr;
int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
+ struct lnet_process_id target = lntmsg->msg_target;
int target_is_router = lntmsg->msg_target_is_router;
int routing = lntmsg->msg_routing;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
struct iov_iter from;
@@ -1661,12 +1651,12 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
}
static void
-kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
+kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
{
- lnet_process_id_t target = lntmsg->msg_target;
+ struct lnet_process_id target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
struct kib_tx *tx;
@@ -1722,8 +1712,8 @@ kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
}
int
-kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- struct iov_iter *to, unsigned int rlen)
+kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
+ int delayed, struct iov_iter *to, unsigned int rlen)
{
struct kib_rx *rx = private;
struct kib_msg *rxmsg = rx->rx_msg;
@@ -2170,7 +2160,7 @@ kiblnd_connreq_done(struct kib_conn *conn, int status)
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
conn->ibc_comms_error) { /* error has happened already */
- lnet_ni_t *ni = peer->ibp_ni;
+ struct lnet_ni *ni = peer->ibp_ni;
/* start to shut down connection */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
@@ -2227,7 +2217,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
struct kib_peer *peer;
struct kib_peer *peer2;
struct kib_conn *conn;
- lnet_ni_t *ni = NULL;
+ struct lnet_ni *ni = NULL;
struct kib_net *net = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
@@ -2789,7 +2779,7 @@ static void
kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
{
struct kib_peer *peer = conn->ibc_peer;
- lnet_ni_t *ni = peer->ibp_ni;
+ struct lnet_ni *ni = peer->ibp_ni;
struct kib_net *net = ni->ni_data;
struct kib_msg *msg = priv;
int ver = conn->ibc_version;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 44e960f60833..3fe4d4858eba 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -106,7 +106,8 @@ static int concurrent_sends;
module_param(concurrent_sends, int, 0444);
MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing");
-static int map_on_demand;
+#define IBLND_DEFAULT_MAP_ON_DEMAND IBLND_MAX_RDMA_FRAGS
+static int map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
module_param(map_on_demand, int, 0444);
MODULE_PARM_DESC(map_on_demand, "map on demand");
@@ -160,7 +161,7 @@ struct kib_tunables kiblnd_tunables = {
static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
/* # messages/RDMAs in-flight */
-int kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
+int kiblnd_msg_queue_size(int version, struct lnet_ni *ni)
{
if (version == IBLND_MSG_VERSION_1)
return IBLND_MSG_QUEUE_SIZE_V1;
@@ -228,10 +229,13 @@ int kiblnd_tunables_setup(struct lnet_ni *ni)
if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
- if (tunables->lnd_map_on_demand < 0 ||
+ if (tunables->lnd_map_on_demand <= 0 ||
tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
- /* disable map-on-demand */
- tunables->lnd_map_on_demand = 0;
+ /* Use the default */
+ CWARN("Invalid map_on_demand (%d), expects 1 - %d. Using default of %d\n",
+ tunables->lnd_map_on_demand,
+ IBLND_MAX_RDMA_FRAGS, IBLND_DEFAULT_MAP_ON_DEMAND);
+ tunables->lnd_map_on_demand = IBLND_DEFAULT_MAP_ON_DEMAND;
}
if (tunables->lnd_map_on_demand == 1) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index f25de3d7f6e8..fbbd8a5489e9 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -39,11 +39,11 @@
#include "socklnd.h"
-static lnd_t the_ksocklnd;
+static struct lnet_lnd the_ksocklnd;
struct ksock_nal_data ksocknal_data;
static struct ksock_interface *
-ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
+ksocknal_ip2iface(struct lnet_ni *ni, __u32 ip)
{
struct ksock_net *net = ni->ni_data;
int i;
@@ -96,8 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
}
static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
- lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni,
+ struct lnet_process_id id)
{
int cpt = lnet_cpt_of_nid(id.nid);
struct ksock_net *net = ni->ni_data;
@@ -173,7 +173,7 @@ ksocknal_destroy_peer(struct ksock_peer *peer)
}
struct ksock_peer *
-ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
@@ -200,7 +200,7 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
}
struct ksock_peer *
-ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_find_peer(struct lnet_ni *ni, struct lnet_process_id id)
{
struct ksock_peer *peer;
@@ -246,8 +246,8 @@ ksocknal_unlink_peer_locked(struct ksock_peer *peer)
}
static int
-ksocknal_get_peer_info(lnet_ni_t *ni, int index,
- lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+ksocknal_get_peer_info(struct lnet_ni *ni, int index,
+ struct lnet_process_id *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
struct ksock_peer *peer;
@@ -450,7 +450,8 @@ ksocknal_del_route_locked(struct ksock_route *route)
}
int
-ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
+ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
+ int port)
{
struct list_head *tmp;
struct ksock_peer *peer;
@@ -568,7 +569,7 @@ ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
}
static int
-ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
+ksocknal_del_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip)
{
LIST_HEAD(zombies);
struct list_head *ptmp;
@@ -627,7 +628,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
}
static struct ksock_conn *
-ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
+ksocknal_get_conn_by_idx(struct lnet_ni *ni, int index)
{
struct ksock_peer *peer;
struct list_head *ptmp;
@@ -687,7 +688,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt)
}
static int
-ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
+ksocknal_local_ipvec(struct lnet_ni *ni, __u32 *ipaddrs)
{
struct ksock_net *net = ni->ni_data;
int i;
@@ -866,7 +867,7 @@ ksocknal_create_routes(struct ksock_peer *peer, int port,
{
struct ksock_route *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- lnet_ni_t *ni = peer->ksnp_ni;
+ struct lnet_ni *ni = peer->ksnp_ni;
struct ksock_net *net = ni->ni_data;
struct list_head *rtmp;
struct ksock_route *route;
@@ -982,7 +983,7 @@ ksocknal_create_routes(struct ksock_peer *peer, int port,
}
int
-ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
+ksocknal_accept(struct lnet_ni *ni, struct socket *sock)
{
struct ksock_connreq *cr;
int rc;
@@ -1025,12 +1026,12 @@ ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
}
int
-ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
+ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
- lnet_process_id_t peerid;
+ struct lnet_process_id peerid;
struct list_head *tmp;
__u64 incarnation;
struct ksock_conn *conn;
@@ -1038,7 +1039,7 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
struct ksock_peer *peer = NULL;
struct ksock_peer *peer2;
struct ksock_sched *sched;
- ksock_hello_msg_t *hello;
+ struct ksock_hello_msg *hello;
int cpt;
struct ksock_tx *tx;
struct ksock_tx *txtmp;
@@ -1077,7 +1078,7 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
conn->ksnc_tx_carrier = NULL;
atomic_set(&conn->ksnc_tx_nob, 0);
- LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
+ LIBCFS_ALLOC(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_MAX_INTERFACES]));
if (!hello) {
rc = -ENOMEM;
@@ -1341,7 +1342,7 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
+ LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_MAX_INTERFACES]));
/*
@@ -1423,7 +1424,7 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
failed_1:
if (hello)
- LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
+ LIBCFS_FREE(hello, offsetof(struct ksock_hello_msg,
kshm_ips[LNET_MAX_INTERFACES]));
LIBCFS_FREE(conn, sizeof(*conn));
@@ -1763,7 +1764,7 @@ ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
}
int
-ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
+ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr)
{
struct ksock_peer *peer;
struct list_head *ptmp;
@@ -1810,13 +1811,13 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
}
void
-ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
+ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive)
{
/*
* The router is telling me she's been notified of a change in
* gateway state....
*/
- lnet_process_id_t id = {0};
+ struct lnet_process_id id = {0};
id.nid = gw_nid;
id.pid = LNET_PID_ANY;
@@ -1837,14 +1838,14 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
}
void
-ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
+ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
{
int connect = 1;
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
struct ksock_peer *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- lnet_process_id_t id = {
+ struct lnet_process_id id = {
.nid = nid,
.pid = LNET_PID_LUSTRE,
};
@@ -1932,7 +1933,7 @@ ksocknal_push_peer(struct ksock_peer *peer)
}
}
-static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
+static int ksocknal_push(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *start;
struct list_head *end;
@@ -1982,7 +1983,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
}
static int
-ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
+ksocknal_add_interface(struct lnet_ni *ni, __u32 ipaddress, __u32 netmask)
{
struct ksock_net *net = ni->ni_data;
struct ksock_interface *iface;
@@ -2086,7 +2087,7 @@ ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
}
static int
-ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
+ksocknal_del_interface(struct lnet_ni *ni, __u32 ipaddress)
{
struct ksock_net *net = ni->ni_data;
int rc = -ENOENT;
@@ -2132,9 +2133,9 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
}
int
-ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
- lnet_process_id_t id = {0};
+ struct lnet_process_id id = {0};
struct libcfs_ioctl_data *data = arg;
int rc;
@@ -2534,7 +2535,7 @@ ksocknal_base_startup(void)
}
static void
-ksocknal_debug_peerhash(lnet_ni_t *ni)
+ksocknal_debug_peerhash(struct lnet_ni *ni)
{
struct ksock_peer *peer = NULL;
struct list_head *tmp;
@@ -2587,11 +2588,11 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
void
-ksocknal_shutdown(lnet_ni_t *ni)
+ksocknal_shutdown(struct lnet_ni *ni)
{
struct ksock_net *net = ni->ni_data;
int i;
- lnet_process_id_t anyid = {0};
+ struct lnet_process_id anyid = {0};
anyid.nid = LNET_NID_ANY;
anyid.pid = LNET_PID_ANY;
@@ -2810,7 +2811,7 @@ ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
}
int
-ksocknal_startup(lnet_ni_t *ni)
+ksocknal_startup(struct lnet_ni *ni)
{
struct ksock_net *net;
int rc;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 9e86563b8c70..5540de65f9a2 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -256,11 +256,11 @@ struct ksock_nal_data {
/*
* A packet just assembled for transmission is represented by 1 or more
* struct iovec fragments (the first frag contains the portals header),
- * followed by 0 or more lnet_kiov_t fragments.
+ * followed by 0 or more struct bio_vec fragments.
*
* On the receive side, initially 1 struct iovec fragment is posted for
* receive (the header). Once the header has been received, the payload is
- * received into either struct iovec or lnet_kiov_t fragments, depending on
+ * received into either struct iovec or struct bio_vec fragments, depending on
* what the header matched or whether the message needs forwarding.
*/
struct ksock_conn; /* forward ref */
@@ -282,17 +282,17 @@ struct ksock_tx { /* transmit packet */
unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
- lnet_kiov_t *tx_kiov; /* packet page frags */
+ struct bio_vec *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
- lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize()
+ struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize()
*/
unsigned long tx_deadline; /* when (in jiffies) tx times out */
- ksock_msg_t tx_msg; /* socklnd message buffer */
+ struct ksock_msg tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
union {
struct {
struct kvec iov; /* virt hdr */
- lnet_kiov_t kiov[0]; /* paged payload */
+ struct bio_vec kiov[0]; /* paged payload */
} paged;
struct {
struct kvec iov[1]; /* virt hdr + payload */
@@ -310,7 +310,7 @@ struct ksock_tx { /* transmit packet */
*/
union ksock_rxiovspace {
struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
+ struct bio_vec kiov[LNET_MAX_IOV];
};
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
@@ -362,14 +362,14 @@ struct ksock_conn {
int ksnc_rx_niov; /* # iovec frags */
struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
- lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
+ struct bio_vec *ksnc_rx_kiov; /* the page frags */
union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data
*/
void *ksnc_cookie; /* rx lnet_finalize passthru arg
*/
- ksock_msg_t ksnc_msg; /* incoming message buffer:
+ struct ksock_msg ksnc_msg; /* incoming message buffer:
* V2.x message takes the
* whole struct
* V1.x message is a bare
@@ -428,7 +428,7 @@ struct ksock_peer {
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
* alive
*/
- lnet_process_id_t ksnp_id; /* who's on the other end(s) */
+ struct lnet_process_id ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
int ksnp_closing; /* being closed */
@@ -447,7 +447,7 @@ struct ksock_peer {
* ACK
*/
unsigned long ksnp_send_keepalive; /* time to send keepalive */
- lnet_ni_t *ksnp_ni; /* which network */
+ struct lnet_ni *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
/* preferred local interfaces */
@@ -456,7 +456,7 @@ struct ksock_peer {
struct ksock_connreq {
struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
- lnet_ni_t *ksncr_ni; /* chosen NI */
+ struct lnet_ni *ksncr_ni; /* chosen NI */
struct socket *ksncr_sock; /* accepted socket */
};
@@ -474,16 +474,16 @@ struct ksock_proto {
int pro_version;
/* handshake function */
- int (*pro_send_hello)(struct ksock_conn *, ksock_hello_msg_t *);
+ int (*pro_send_hello)(struct ksock_conn *, struct ksock_hello_msg *);
/* handshake function */
- int (*pro_recv_hello)(struct ksock_conn *, ksock_hello_msg_t *, int);
+ int (*pro_recv_hello)(struct ksock_conn *, struct ksock_hello_msg *, int);
/* message pack */
void (*pro_pack)(struct ksock_tx *);
/* message unpack */
- void (*pro_unpack)(ksock_msg_t *);
+ void (*pro_unpack)(struct ksock_msg *);
/* queue tx on the connection */
struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *);
@@ -603,7 +603,7 @@ ksocknal_tx_addref(struct ksock_tx *tx)
}
void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
-void ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx);
+void ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx);
static inline void
ksocknal_tx_decref(struct ksock_tx *tx)
@@ -647,19 +647,22 @@ ksocknal_peer_decref(struct ksock_peer *peer)
ksocknal_destroy_peer(peer);
}
-int ksocknal_startup(lnet_ni_t *ni);
-void ksocknal_shutdown(lnet_ni_t *ni);
-int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
-int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
-int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+int ksocknal_startup(struct lnet_ni *ni);
+void ksocknal_shutdown(struct lnet_ni *ni);
+int ksocknal_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg);
+int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
+int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, struct iov_iter *to, unsigned int rlen);
-int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
-
-int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-struct ksock_peer *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-struct ksock_peer *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+int ksocknal_accept(struct lnet_ni *ni, struct socket *sock);
+
+int ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ip,
+ int port);
+struct ksock_peer *ksocknal_find_peer_locked(struct lnet_ni *ni,
+ struct lnet_process_id id);
+struct ksock_peer *ksocknal_find_peer(struct lnet_ni *ni,
+ struct lnet_process_id id);
void ksocknal_peer_failed(struct ksock_peer *peer);
-int ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
+int ksocknal_create_conn(struct lnet_ni *ni, struct ksock_route *route,
struct socket *sock, int type);
void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
void ksocknal_terminate_conn(struct ksock_conn *conn);
@@ -667,19 +670,19 @@ void ksocknal_destroy_conn(struct ksock_conn *conn);
int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
__u32 ipaddr, int why);
int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
-int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
+int ksocknal_close_matching_conns(struct lnet_process_id id, __u32 ipaddr);
struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
struct ksock_tx *tx, int nonblk);
-int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
- lnet_process_id_t id);
+int ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
+ struct lnet_process_id id);
struct ksock_tx *ksocknal_alloc_tx(int type, int size);
void ksocknal_free_tx(struct ksock_tx *tx);
struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
void ksocknal_next_tx_carrier(struct ksock_conn *conn);
void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
-void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
-void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
+void ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error);
+void ksocknal_notify(struct lnet_ni *ni, lnet_nid_t gw_nid, int alive);
void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
void ksocknal_thread_fini(void);
@@ -690,10 +693,11 @@ int ksocknal_new_packet(struct ksock_conn *conn, int skip);
int ksocknal_scheduler(void *arg);
int ksocknal_connd(void *arg);
int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-int ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *id,
+int ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
+ lnet_nid_t peer_nid, struct ksock_hello_msg *hello);
+int ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
+ struct ksock_hello_msg *hello,
+ struct lnet_process_id *id,
__u64 *incarnation);
void ksocknal_read_callback(struct ksock_conn *conn);
void ksocknal_write_callback(struct ksock_conn *conn);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 4c9f92725a44..3ed3b08c122c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -145,7 +145,7 @@ ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
static int
ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int nob;
int rc;
@@ -298,7 +298,7 @@ ksocknal_recv_iov(struct ksock_conn *conn)
static int
ksocknal_recv_kiov(struct ksock_conn *conn)
{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
@@ -393,9 +393,9 @@ ksocknal_receive(struct ksock_conn *conn)
}
void
-ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
+ksocknal_tx_done(struct lnet_ni *ni, struct ksock_tx *tx)
{
- lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
+ struct lnet_msg *lnetmsg = tx->tx_lnetmsg;
int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
LASSERT(ni || tx->tx_conn);
@@ -412,7 +412,7 @@ ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
}
void
-ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
{
struct ksock_tx *tx;
@@ -695,7 +695,7 @@ void
ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
{
struct ksock_sched *sched = conn->ksnc_scheduler;
- ksock_msg_t *msg = &tx->tx_msg;
+ struct ksock_msg *msg = &tx->tx_msg;
struct ksock_tx *ztx = NULL;
int bufnob = 0;
@@ -845,7 +845,8 @@ ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
}
int
-ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
+ksocknal_launch_packet(struct lnet_ni *ni, struct ksock_tx *tx,
+ struct lnet_process_id id)
{
struct ksock_peer *peer;
struct ksock_conn *conn;
@@ -938,14 +939,14 @@ ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
}
int
-ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
int mpflag = 1;
int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
+ struct lnet_process_id target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
struct ksock_tx *tx;
@@ -1072,9 +1073,9 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
- conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
- conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
+ conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
+ conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
break;
case KSOCK_PROTO_V1:
@@ -1132,7 +1133,7 @@ static int
ksocknal_process_receive(struct ksock_conn *conn)
{
struct lnet_hdr *lhdr;
- lnet_process_id_t *id;
+ struct lnet_process_id *id;
int rc;
LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
@@ -1232,12 +1233,12 @@ ksocknal_process_receive(struct ksock_conn *conn)
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
- conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
+ conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
conn->ksnc_rx_niov = 1;
conn->ksnc_rx_kiov = NULL;
@@ -1333,8 +1334,8 @@ ksocknal_process_receive(struct ksock_conn *conn)
}
int
-ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- struct iov_iter *to, unsigned int rlen)
+ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
+ int delayed, struct iov_iter *to, unsigned int rlen)
{
struct ksock_conn *conn = private;
struct ksock_sched *sched = conn->ksnc_scheduler;
@@ -1633,7 +1634,7 @@ void ksocknal_write_callback(struct ksock_conn *conn)
}
static struct ksock_proto *
-ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(struct ksock_hello_msg *hello)
{
__u32 version = 0;
@@ -1664,7 +1665,7 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello;
BUILD_BUG_ON(sizeof(struct lnet_magicversion) !=
- offsetof(ksock_hello_msg_t, kshm_src_nid));
+ offsetof(struct ksock_hello_msg, kshm_src_nid));
if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
@@ -1675,8 +1676,8 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
}
int
-ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(struct lnet_ni *ni, struct ksock_conn *conn,
+ lnet_nid_t peer_nid, struct ksock_hello_msg *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
struct ksock_net *net = (struct ksock_net *)ni->ni_data;
@@ -1713,8 +1714,9 @@ ksocknal_invert_type(int type)
}
int
-ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+ksocknal_recv_hello(struct lnet_ni *ni, struct ksock_conn *conn,
+ struct ksock_hello_msg *hello,
+ struct lnet_process_id *peerid,
__u64 *incarnation)
{
/* Return < 0 fatal error
@@ -1728,7 +1730,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
int proto_match;
int rc;
struct ksock_proto *proto;
- lnet_process_id_t recv_id;
+ struct lnet_process_id recv_id;
/* socket type set on active connections - not set on passive */
LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
@@ -1904,14 +1906,14 @@ ksocknal_connect(struct ksock_route *route)
if (retry_later) /* needs reschedule */
break;
- if (wanted & (1 << SOCKLND_CONN_ANY)) {
+ if (wanted & BIT(SOCKLND_CONN_ANY)) {
type = SOCKLND_CONN_ANY;
- } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
+ } else if (wanted & BIT(SOCKLND_CONN_CONTROL)) {
type = SOCKLND_CONN_CONTROL;
- } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
+ } else if (wanted & BIT(SOCKLND_CONN_BULK_IN)) {
type = SOCKLND_CONN_BULK_IN;
} else {
- LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
+ LASSERT(wanted & BIT(SOCKLND_CONN_BULK_OUT));
type = SOCKLND_CONN_BULK_OUT;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 4bcab4bcc2de..8a036f4eb8d8 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -99,7 +99,7 @@ int
ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int rc;
int nob;
@@ -215,7 +215,7 @@ int
ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
unsigned int niov = conn->ksnc_rx_nkiov;
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
.msg_flags = 0
};
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index d367e74d46c2..84be9a518190 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -287,11 +287,11 @@ ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
if (!tx || !tx->tx_lnetmsg) {
/* noop packet */
- nob = offsetof(ksock_msg_t, ksm_u);
+ nob = offsetof(struct ksock_msg, ksm_u);
} else {
nob = tx->tx_lnetmsg->msg_len +
((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
- sizeof(struct lnet_hdr) : sizeof(ksock_msg_t));
+ sizeof(struct lnet_hdr) : sizeof(struct ksock_msg));
}
/* default checking for typed connection */
@@ -325,9 +325,9 @@ ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
int nob;
if (!tx || !tx->tx_lnetmsg)
- nob = offsetof(ksock_msg_t, ksm_u);
+ nob = offsetof(struct ksock_msg, ksm_u);
else
- nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
+ nob = tx->tx_lnetmsg->msg_len + sizeof(struct ksock_msg);
switch (conn->ksnc_type) {
default:
@@ -456,7 +456,7 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
}
static int
-ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello)
{
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
@@ -531,7 +531,7 @@ out:
}
static int
-ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -549,7 +549,7 @@ ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
LNET_UNLOCK();
}
- rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
+ rc = lnet_sock_write(sock, hello, offsetof(struct ksock_hello_msg, kshm_ips),
lnet_acceptor_timeout());
if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
@@ -573,7 +573,7 @@ ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
}
static int
-ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+ksocknal_recv_hello_v1(struct ksock_conn *conn, struct ksock_hello_msg *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
@@ -649,7 +649,7 @@ out:
}
static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+ksocknal_recv_hello_v2(struct ksock_conn *conn, struct ksock_hello_msg *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
@@ -662,8 +662,8 @@ ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
conn->ksnc_flip = 1;
rc = lnet_sock_read(sock, &hello->kshm_src_nid,
- offsetof(ksock_hello_msg_t, kshm_ips) -
- offsetof(ksock_hello_msg_t, kshm_src_nid),
+ offsetof(struct ksock_hello_msg, kshm_ips) -
+ offsetof(struct ksock_hello_msg, kshm_src_nid),
timeout);
if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
@@ -738,15 +738,15 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
- tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
- tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
- tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ tx->tx_iov[0].iov_len = sizeof(struct ksock_msg);
+ tx->tx_nob = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
+ tx->tx_resid = sizeof(struct ksock_msg) + tx->tx_lnetmsg->msg_len;
} else {
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
- tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_nob = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_resid = offsetof(struct ksock_msg, ksm_u.lnetmsg.ksnm_hdr);
}
/*
* Don't checksum before start sending, because packet can be
@@ -755,7 +755,7 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
}
static void
-ksocknal_unpack_msg_v1(ksock_msg_t *msg)
+ksocknal_unpack_msg_v1(struct ksock_msg *msg)
{
msg->ksm_csum = 0;
msg->ksm_type = KSOCK_MSG_LNET;
@@ -764,7 +764,7 @@ ksocknal_unpack_msg_v1(ksock_msg_t *msg)
}
static void
-ksocknal_unpack_msg_v2(ksock_msg_t *msg)
+ksocknal_unpack_msg_v2(struct ksock_msg *msg)
{
return; /* Do nothing */
}
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 3408041355e3..c56e9922cd5b 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -80,7 +80,7 @@ static int libcfs_param_debug_mb_set(const char *val,
* it needs quite a bunch of extra processing, so we define special
* debugmb parameter type with corresponding methods to handle this case
*/
-static struct kernel_param_ops param_ops_debugmb = {
+static const struct kernel_param_ops param_ops_debugmb = {
.set = libcfs_param_debug_mb_set,
.get = param_get_uint,
};
@@ -138,7 +138,7 @@ static int param_set_console_max_delay(const char *val,
libcfs_console_min_delay, INT_MAX);
}
-static struct kernel_param_ops param_ops_console_max_delay = {
+static const struct kernel_param_ops param_ops_console_max_delay = {
.set = param_set_console_max_delay,
.get = param_get_delay,
};
@@ -156,7 +156,7 @@ static int param_set_console_min_delay(const char *val,
1, libcfs_console_max_delay);
}
-static struct kernel_param_ops param_ops_console_min_delay = {
+static const struct kernel_param_ops param_ops_console_min_delay = {
.set = param_set_console_min_delay,
.get = param_get_delay,
};
@@ -188,7 +188,7 @@ static int param_set_uintpos(const char *val, const struct kernel_param *kp)
return param_set_uint_minmax(val, kp, 1, -1);
}
-static struct kernel_param_ops param_ops_uintpos = {
+static const struct kernel_param_ops param_ops_uintpos = {
.set = param_set_uintpos,
.get = param_get_uint,
};
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index a6a76a681ea9..8f638267e704 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -45,15 +45,6 @@ EXPORT_SYMBOL(libcfs_kvzalloc);
void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
gfp_t flags)
{
- void *ret;
-
- ret = kzalloc_node(size, flags | __GFP_NOWARN,
- cfs_cpt_spread_node(cptab, cpt));
- if (!ret) {
- WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH)));
- ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
- }
-
- return ret;
+ return kvzalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt));
}
EXPORT_SYMBOL(libcfs_kvzalloc_cpt);
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index d7b29f8997c0..9599b7441feb 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -37,6 +37,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#define LUSTRE_TRACEFILE_PRIVATE
+#define pr_fmt(fmt) "Lustre: " fmt
#include "tracefile.h"
#include "../../include/linux/libcfs/libcfs.h"
@@ -192,9 +193,8 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
if (unlikely(!tage)) {
if ((!memory_pressure_get() ||
in_interrupt()) && printk_ratelimit())
- printk(KERN_WARNING
- "cannot allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
+ pr_warn("cannot allocate a tage (%ld)\n",
+ tcd->tcd_cur_pages);
return NULL;
}
}
@@ -230,8 +230,8 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
*/
if (printk_ratelimit())
- printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
- pgcount + 1, tcd->tcd_cur_pages);
+ pr_warn("debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
+ pgcount + 1, tcd->tcd_cur_pages);
INIT_LIST_HEAD(&pc.pc_pages);
@@ -358,8 +358,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
- printk(KERN_EMERG "negative max_nob: %d\n",
- max_nob);
+ pr_emerg("negative max_nob: %d\n", max_nob);
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
tcd = NULL;
@@ -389,8 +388,8 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
}
if (*(string_buf + needed - 1) != '\n')
- printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
- file, msgdata->msg_line, msgdata->msg_fn);
+ pr_info("format at %s:%d:%s doesn't end in newline\n", file,
+ msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
debug_buf = (char *)page_address(tage->page) + tage->used;
@@ -739,8 +738,8 @@ int cfs_tracefile_dump_all_pages(char *filename)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote %d\n",
- tage->used, rc);
+ pr_warn("wanted to write %u but wrote %d\n", tage->used,
+ rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
break;
@@ -894,10 +893,9 @@ int cfs_trace_daemon_command(char *str)
} else {
strcpy(cfs_tracefile, str);
- printk(KERN_INFO
- "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
- cfs_tracefile,
- (long)(cfs_tracefile_size >> 10));
+ pr_info("debug daemon will attempt to start writing to %s (%lukB max)\n",
+ cfs_tracefile,
+ (long)(cfs_tracefile_size >> 10));
cfs_trace_start_thread();
}
@@ -933,16 +931,14 @@ int cfs_trace_set_debug_mb(int mb)
struct cfs_trace_cpu_data *tcd;
if (mb < num_possible_cpus()) {
- printk(KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
- mb, num_possible_cpus());
+ pr_warn("%d MB is too small for debug buffer size, setting it to %d MB.\n",
+ mb, num_possible_cpus());
mb = num_possible_cpus();
}
if (mb > limit) {
- printk(KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
- mb, limit);
+ pr_warn("%d MB is too large for debug buffer size, setting it to %d MB.\n",
+ mb, limit);
mb = limit;
}
@@ -1010,8 +1006,8 @@ static int tracefiled(void *arg)
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
filp = NULL;
- printk(KERN_WARNING "couldn't open %s: %d\n",
- cfs_tracefile, rc);
+ pr_warn("couldn't open %s: %d\n", cfs_tracefile,
+ rc);
}
}
cfs_tracefile_read_unlock();
@@ -1039,8 +1035,8 @@ static int tracefiled(void *arg)
kunmap(tage->page);
if (rc != (int)tage->used) {
- printk(KERN_WARNING "wanted to write %u but wrote %d\n",
- tage->used, rc);
+ pr_warn("wanted to write %u but wrote %d\n",
+ tage->used, rc);
put_pages_back(&pc);
__LASSERT(list_empty(&pc.pc_pages));
break;
@@ -1053,7 +1049,7 @@ static int tracefiled(void *arg)
if (!list_empty(&pc.pc_pages)) {
int i;
- printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
+ pr_alert("trace pages aren't empty\n");
pr_err("total cpus(%d): ", num_possible_cpus());
for (i = 0; i < num_possible_cpus(); i++)
if (cpu_online(i))
@@ -1123,8 +1119,7 @@ void cfs_trace_stop_thread(void)
mutex_lock(&cfs_trace_thread_mutex);
if (thread_running) {
- printk(KERN_INFO
- "Lustre: shutting down debug daemon thread...\n");
+ pr_info("shutting down debug daemon thread...\n");
atomic_set(&tctl->tctl_shutdown, 1);
wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 69bbd594b9bd..a6f60c3e1184 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -211,7 +211,7 @@ lnet_accept(struct socket *sock, __u32 magic)
int peer_port;
int rc;
int flip;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
char *str;
LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 08b38ef67784..0b91d1809cb1 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -39,7 +39,7 @@
#define D_LNI D_CONSOLE
-lnet_t the_lnet; /* THE state of the network */
+struct lnet the_lnet; /* THE state of the network */
EXPORT_SYMBOL(the_lnet);
static char *ip2nets = "";
@@ -58,8 +58,8 @@ static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
module_param(rnet_htable_size, int, 0444);
MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
-static int lnet_ping(lnet_process_id_t id, int timeout_ms,
- lnet_process_id_t __user *ids, int n_ids);
+static int lnet_ping(struct lnet_process_id id, int timeout_ms,
+ struct lnet_process_id __user *ids, int n_ids);
static char *
lnet_get_routes(void)
@@ -265,15 +265,15 @@ static void lnet_assert_wire_constants(void)
BUILD_BUG_ON((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) != 4);
}
-static lnd_t *
+static struct lnet_lnd *
lnet_find_lnd_by_type(__u32 type)
{
- lnd_t *lnd;
+ struct lnet_lnd *lnd;
struct list_head *tmp;
/* holding lnd mutex */
list_for_each(tmp, &the_lnet.ln_lnds) {
- lnd = list_entry(tmp, lnd_t, lnd_list);
+ lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
if (lnd->lnd_type == type)
return lnd;
@@ -283,7 +283,7 @@ lnet_find_lnd_by_type(__u32 type)
}
void
-lnet_register_lnd(lnd_t *lnd)
+lnet_register_lnd(struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
@@ -300,7 +300,7 @@ lnet_register_lnd(lnd_t *lnd)
EXPORT_SYMBOL(lnet_register_lnd);
void
-lnet_unregister_lnd(lnd_t *lnd)
+lnet_unregister_lnd(struct lnet_lnd *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
@@ -315,9 +315,9 @@ lnet_unregister_lnd(lnd_t *lnd)
EXPORT_SYMBOL(lnet_unregister_lnd);
void
-lnet_counters_get(lnet_counters_t *counters)
+lnet_counters_get(struct lnet_counters *counters)
{
- lnet_counters_t *ctr;
+ struct lnet_counters *ctr;
int i;
memset(counters, 0, sizeof(*counters));
@@ -344,13 +344,13 @@ EXPORT_SYMBOL(lnet_counters_get);
void
lnet_counters_reset(void)
{
- lnet_counters_t *counters;
+ struct lnet_counters *counters;
int i;
lnet_net_lock(LNET_LOCK_EX);
cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
- memset(counters, 0, sizeof(lnet_counters_t));
+ memset(counters, 0, sizeof(struct lnet_counters));
lnet_net_unlock(LNET_LOCK_EX);
}
@@ -383,10 +383,10 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
list_del_init(e);
if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
- lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
+ lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
} else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
- lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
+ lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
} else { /* NB: Active MEs should be attached on portals */
LBUG();
@@ -483,12 +483,12 @@ lnet_res_containers_create(int type)
return recs;
}
-lnet_libhandle_t *
+struct lnet_libhandle *
lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
{
/* ALWAYS called with lnet_res_lock held */
struct list_head *head;
- lnet_libhandle_t *lh;
+ struct lnet_libhandle *lh;
unsigned int hash;
if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
@@ -506,7 +506,8 @@ lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
}
void
-lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
+lnet_res_lh_initialize(struct lnet_res_container *rec,
+ struct lnet_libhandle *lh)
{
/* ALWAYS called with lnet_res_lock held */
unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
@@ -559,7 +560,7 @@ lnet_prepare(lnet_pid_t requested_pid)
the_lnet.ln_interface_cookie = ktime_get_ns();
the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(lnet_counters_t));
+ sizeof(struct lnet_counters));
if (!the_lnet.ln_counters) {
CERROR("Failed to allocate counters for LNet\n");
rc = -ENOMEM;
@@ -652,16 +653,16 @@ lnet_unprepare(void)
return 0;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_net2ni_locked(__u32 net, int cpt)
{
struct list_head *tmp;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
LASSERT(cpt != LNET_LOCK_EX);
list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net) {
lnet_ni_addref_locked(ni, cpt);
@@ -672,10 +673,10 @@ lnet_net2ni_locked(__u32 net, int cpt)
return NULL;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_net2ni(__u32 net)
{
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
lnet_net_lock(0);
ni = lnet_net2ni_locked(net, 0);
@@ -765,7 +766,7 @@ lnet_islocalnet(__u32 net)
return !!ni;
}
-lnet_ni_t *
+struct lnet_ni *
lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
{
struct lnet_ni *ni;
@@ -774,7 +775,7 @@ lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
LASSERT(cpt != LNET_LOCK_EX);
list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
if (ni->ni_nid == nid) {
lnet_ni_addref_locked(ni, cpt);
@@ -811,7 +812,7 @@ lnet_count_acceptor_nis(void)
cpt = lnet_net_lock_current();
list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
if (ni->ni_lnd->lnd_accept)
count++;
@@ -887,7 +888,7 @@ lnet_ping_info_destroy(void)
}
static void
-lnet_ping_event_handler(lnet_event_t *event)
+lnet_ping_event_handler(struct lnet_event *event)
{
struct lnet_ping_info *pinfo = event->md.user_ptr;
@@ -896,12 +897,13 @@ lnet_ping_event_handler(lnet_event_t *event)
}
static int
-lnet_ping_info_setup(struct lnet_ping_info **ppinfo, lnet_handle_md_t *md_handle,
+lnet_ping_info_setup(struct lnet_ping_info **ppinfo,
+ struct lnet_handle_md *md_handle,
int ni_count, bool set_eq)
{
- lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
- lnet_handle_me_t me_handle;
- lnet_md_t md = { NULL };
+ struct lnet_process_id id = {LNET_NID_ANY, LNET_PID_ANY};
+ struct lnet_handle_me me_handle;
+ struct lnet_md md = { NULL };
int rc, rc2;
if (set_eq) {
@@ -961,12 +963,13 @@ failed_0:
}
static void
-lnet_ping_md_unlink(struct lnet_ping_info *pinfo, lnet_handle_md_t *md_handle)
+lnet_ping_md_unlink(struct lnet_ping_info *pinfo,
+ struct lnet_handle_md *md_handle)
{
sigset_t blocked = cfs_block_allsigs();
LNetMDUnlink(*md_handle);
- LNetInvalidateHandle(md_handle);
+ LNetInvalidateMDHandle(md_handle);
/* NB md could be busy; this just starts the unlink */
while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
@@ -982,7 +985,7 @@ static void
lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
{
struct lnet_ni_status *ns;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
int i = 0;
list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
@@ -1003,10 +1006,11 @@ lnet_ping_info_install_locked(struct lnet_ping_info *ping_info)
}
static void
-lnet_ping_target_update(struct lnet_ping_info *pinfo, lnet_handle_md_t md_handle)
+lnet_ping_target_update(struct lnet_ping_info *pinfo,
+ struct lnet_handle_md md_handle)
{
struct lnet_ping_info *old_pinfo = NULL;
- lnet_handle_md_t old_md;
+ struct lnet_handle_md old_md;
/* switch the NIs to point to the new ping info created */
lnet_net_lock(LNET_LOCK_EX);
@@ -1046,7 +1050,7 @@ lnet_ping_target_fini(void)
}
static int
-lnet_ni_tq_credits(lnet_ni_t *ni)
+lnet_ni_tq_credits(struct lnet_ni *ni)
{
int credits;
@@ -1063,7 +1067,7 @@ lnet_ni_tq_credits(lnet_ni_t *ni)
}
static void
-lnet_ni_unlink_locked(lnet_ni_t *ni)
+lnet_ni_unlink_locked(struct lnet_ni *ni)
{
if (!list_empty(&ni->ni_cptlist)) {
list_del_init(&ni->ni_cptlist);
@@ -1081,8 +1085,8 @@ lnet_clear_zombies_nis_locked(void)
{
int i;
int islo;
- lnet_ni_t *ni;
- lnet_ni_t *temp;
+ struct lnet_ni *ni;
+ struct lnet_ni *temp;
/*
* Now wait for the NI's I just nuked to show up on ln_zombie_nis
@@ -1141,8 +1145,8 @@ lnet_clear_zombies_nis_locked(void)
static void
lnet_shutdown_lndnis(void)
{
- lnet_ni_t *ni;
- lnet_ni_t *temp;
+ struct lnet_ni *ni;
+ struct lnet_ni *temp;
int i;
/* NB called holding the global mutex */
@@ -1170,7 +1174,7 @@ lnet_shutdown_lndnis(void)
/*
* Clear lazy portals and drop delayed messages which hold refs
- * on their lnet_msg_t::msg_rxpeer
+ * on their lnet_msg::msg_rxpeer
*/
for (i = 0; i < the_lnet.ln_nportals; i++)
LNetClearLazyPortal(i);
@@ -1216,7 +1220,7 @@ lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
int rc = -EINVAL;
int lnd_type;
- lnd_t *lnd;
+ struct lnet_lnd *lnd;
struct lnet_tx_queue *tq;
int i;
@@ -1376,7 +1380,7 @@ lnet_startup_lndnis(struct list_head *nilist)
int ni_count = 0;
while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+ ni = list_entry(nilist->next, struct lnet_ni, ni_list);
list_del(&ni->ni_list);
rc = lnet_startup_lndni(ni, NULL);
@@ -1433,7 +1437,7 @@ int lnet_lib_init(void)
}
the_lnet.ln_refcount = 0;
- LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
+ LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
INIT_LIST_HEAD(&the_lnet.ln_lnds);
INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
@@ -1471,7 +1475,7 @@ void lnet_lib_exit(void)
while (!list_empty(&the_lnet.ln_lnds))
lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ struct lnet_lnd, lnd_list));
lnet_destroy_locks();
}
@@ -1497,7 +1501,7 @@ LNetNIInit(lnet_pid_t requested_pid)
int rc;
int ni_count;
struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_handle_md md_handle;
struct list_head net_head;
INIT_LIST_HEAD(&net_head);
@@ -1738,7 +1742,7 @@ lnet_get_net_config(struct lnet_ioctl_config_data *config)
if (i++ != idx)
continue;
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
lnet_ni_lock(ni);
lnet_fill_ni_info(ni, config);
lnet_ni_unlock(ni);
@@ -1755,10 +1759,10 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
{
char *nets = conf->cfg_config_u.cfg_net.net_intf;
struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_handle_md md_handle;
struct lnet_ni *ni;
struct list_head net_head;
- lnet_remotenet_t *rnet;
+ struct lnet_remotenet *rnet;
int rc;
INIT_LIST_HEAD(&net_head);
@@ -1833,9 +1837,9 @@ failed0:
int
lnet_dyn_del_ni(__u32 net)
{
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
struct lnet_ping_info *pinfo;
- lnet_handle_md_t md_handle;
+ struct lnet_handle_md md_handle;
int rc;
/* don't allow userspace to shutdown the LOLND */
@@ -1883,8 +1887,8 @@ LNetCtl(unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
struct lnet_ioctl_config_data *config;
- lnet_process_id_t id = {0};
- lnet_ni_t *ni;
+ struct lnet_process_id id = {0};
+ struct lnet_ni *ni;
int rc;
unsigned long secs_passed;
@@ -2055,7 +2059,7 @@ LNetCtl(unsigned int cmd, void *arg)
id.pid = data->ioc_u32[0];
rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
data->ioc_pbuf1,
- data->ioc_plen1 / sizeof(lnet_process_id_t));
+ data->ioc_plen1 / sizeof(struct lnet_process_id));
if (rc < 0)
return rc;
data->ioc_count = rc;
@@ -2078,25 +2082,25 @@ LNetCtl(unsigned int cmd, void *arg)
}
EXPORT_SYMBOL(LNetCtl);
-void LNetDebugPeer(lnet_process_id_t id)
+void LNetDebugPeer(struct lnet_process_id id)
{
lnet_debug_peer(id.nid);
}
EXPORT_SYMBOL(LNetDebugPeer);
/**
- * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
+ * Retrieve the lnet_process_id ID of LNet interface at \a index. Note that
* all interfaces share a same PID, as requested by LNetNIInit().
*
* \param index Index of the interface to look up.
* \param id On successful return, this location will hold the
- * lnet_process_id_t ID of the interface.
+ * lnet_process_id ID of the interface.
*
* \retval 0 If an interface exists at \a index.
* \retval -ENOENT If no interface has been found.
*/
int
-LNetGetId(unsigned int index, lnet_process_id_t *id)
+LNetGetId(unsigned int index, struct lnet_process_id *id)
{
struct lnet_ni *ni;
struct list_head *tmp;
@@ -2111,7 +2115,7 @@ LNetGetId(unsigned int index, lnet_process_id_t *id)
if (index--)
continue;
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
id->nid = ni->ni_nid;
id->pid = the_lnet.ln_pid;
@@ -2124,31 +2128,20 @@ LNetGetId(unsigned int index, lnet_process_id_t *id)
}
EXPORT_SYMBOL(LNetGetId);
-/**
- * Print a string representation of handle \a h into buffer \a str of
- * \a len bytes.
- */
-void
-LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
-{
- snprintf(str, len, "%#llx", h.cookie);
-}
-EXPORT_SYMBOL(LNetSnprintHandle);
-
-static int lnet_ping(lnet_process_id_t id, int timeout_ms,
- lnet_process_id_t __user *ids, int n_ids)
+static int lnet_ping(struct lnet_process_id id, int timeout_ms,
+ struct lnet_process_id __user *ids, int n_ids)
{
- lnet_handle_eq_t eqh;
- lnet_handle_md_t mdh;
- lnet_event_t event;
- lnet_md_t md = { NULL };
+ struct lnet_handle_eq eqh;
+ struct lnet_handle_md mdh;
+ struct lnet_event event;
+ struct lnet_md md = { NULL };
int which;
int unlinked = 0;
int replied = 0;
const int a_long_time = 60000; /* mS */
int infosz;
struct lnet_ping_info *info;
- lnet_process_id_t tmpid;
+ struct lnet_process_id tmpid;
int i;
int nob;
int rc;
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 9e2183ff847e..933988da7fe2 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -79,10 +79,10 @@ int
lnet_net_unique(__u32 net, struct list_head *nilist)
{
struct list_head *tmp;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
list_for_each(tmp, nilist) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net)
return 0;
@@ -120,7 +120,7 @@ lnet_ni_free(struct lnet_ni *ni)
LIBCFS_FREE(ni, sizeof(*ni));
}
-lnet_ni_t *
+struct lnet_ni *
lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
{
struct lnet_tx_queue *tq;
@@ -390,7 +390,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
failed:
while (!list_empty(nilist)) {
- ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+ ni = list_entry(nilist->next, struct lnet_ni, ni_list);
list_del(&ni->ni_list);
lnet_ni_free(ni);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index d05c6cc797f6..ce4b83584e17 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -64,9 +64,9 @@
*/
int
LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
- lnet_handle_eq_t *handle)
+ struct lnet_handle_eq *handle)
{
- lnet_eq_t *eq;
+ struct lnet_eq *eq;
LASSERT(the_lnet.ln_refcount > 0);
@@ -93,7 +93,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
return -ENOMEM;
if (count) {
- LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
+ LIBCFS_ALLOC(eq->eq_events, count * sizeof(struct lnet_event));
if (!eq->eq_events)
goto failed;
/*
@@ -131,7 +131,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
failed:
if (eq->eq_events)
- LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
+ LIBCFS_FREE(eq->eq_events, count * sizeof(struct lnet_event));
if (eq->eq_refs)
cfs_percpt_free(eq->eq_refs);
@@ -152,10 +152,10 @@ EXPORT_SYMBOL(LNetEQAlloc);
* \retval -EBUSY If the EQ is still in use by some MDs.
*/
int
-LNetEQFree(lnet_handle_eq_t eqh)
+LNetEQFree(struct lnet_handle_eq eqh)
{
struct lnet_eq *eq;
- lnet_event_t *events = NULL;
+ struct lnet_event *events = NULL;
int **refs = NULL;
int *ref;
int rc = 0;
@@ -201,7 +201,7 @@ LNetEQFree(lnet_handle_eq_t eqh)
lnet_res_unlock(LNET_LOCK_EX);
if (events)
- LIBCFS_FREE(events, size * sizeof(lnet_event_t));
+ LIBCFS_FREE(events, size * sizeof(struct lnet_event));
if (refs)
cfs_percpt_free(refs);
@@ -210,7 +210,7 @@ LNetEQFree(lnet_handle_eq_t eqh)
EXPORT_SYMBOL(LNetEQFree);
void
-lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
+lnet_eq_enqueue_event(struct lnet_eq *eq, struct lnet_event *ev)
{
/* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
int index;
@@ -239,10 +239,10 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
}
static int
-lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
+lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev)
{
int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
- lnet_event_t *new_event = &eq->eq_events[new_index];
+ struct lnet_event *new_event = &eq->eq_events[new_index];
int rc;
/* must called with lnet_eq_wait_lock hold */
@@ -370,8 +370,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
* \retval -ENOENT If there's an invalid handle in \a eventqs.
*/
int
-LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
- lnet_event_t *event, int *which)
+LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
+ struct lnet_event *event, int *which)
{
int wait = 1;
int rc;
@@ -386,7 +386,7 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
for (;;) {
for (i = 0; i < neq; i++) {
- lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
+ struct lnet_eq *eq = lnet_handle2eq(&eventqs[i]);
if (!eq) {
lnet_eq_wait_unlock();
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index eab53cd57296..f08e944f412b 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -40,11 +40,11 @@
/* must be called with lnet_res_lock held */
void
-lnet_md_unlink(lnet_libmd_t *md)
+lnet_md_unlink(struct lnet_libmd *md)
{
if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
/* first unlink attempt... */
- lnet_me_t *me = md->md_me;
+ struct lnet_me *me = md->md_me;
md->md_flags |= LNET_MD_FLAG_ZOMBIE;
@@ -84,7 +84,7 @@ lnet_md_unlink(lnet_libmd_t *md)
}
static int
-lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
+lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink)
{
int i;
unsigned int niov;
@@ -165,7 +165,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
/* must be called with resource lock held */
static int
-lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
+lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
{
struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
@@ -185,7 +185,7 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
* maybe there we shouldn't even allow LNET_EQ_NONE!)
* LASSERT(!eq);
*/
- if (!LNetHandleIsInvalid(eq_handle)) {
+ if (!LNetEQHandleIsInvalid(eq_handle)) {
md->md_eq = lnet_handle2eq(&eq_handle);
if (!md->md_eq)
@@ -204,7 +204,7 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
/* must be called with lnet_res_lock held */
void
-lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
+lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
{
/* NB this doesn't copy out all the iov entries so when a
* discontiguous MD is copied out, the target gets to know the
@@ -223,7 +223,7 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
}
static int
-lnet_md_validate(lnet_md_t *umd)
+lnet_md_validate(struct lnet_md *umd)
{
if (!umd->start && umd->length) {
CERROR("MD start pointer can not be NULL with length %u\n",
@@ -267,8 +267,8 @@ lnet_md_validate(lnet_md_t *umd)
* a MD.
*/
int
-LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
- lnet_unlink_t unlink, lnet_handle_md_t *handle)
+LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
+ enum lnet_unlink unlink, struct lnet_handle_md *handle)
{
LIST_HEAD(matches);
LIST_HEAD(drops);
@@ -350,9 +350,10 @@ EXPORT_SYMBOL(LNetMDAttach);
* LNetInvalidateHandle() on it.
*/
int
-LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
+LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
+ struct lnet_handle_md *handle)
{
- lnet_libmd_t *md;
+ struct lnet_libmd *md;
int cpt;
int rc;
@@ -425,10 +426,10 @@ EXPORT_SYMBOL(LNetMDBind);
* \retval -ENOENT If \a mdh does not point to a valid MD object.
*/
int
-LNetMDUnlink(lnet_handle_md_t mdh)
+LNetMDUnlink(struct lnet_handle_md mdh)
{
- lnet_event_t ev;
- lnet_libmd_t *md;
+ struct lnet_event ev;
+ struct lnet_libmd *md;
int cpt;
LASSERT(the_lnet.ln_refcount > 0);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index eb796a86e6ab..e9b3eedca4db 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -46,7 +46,7 @@
* \param portal The portal table index where the ME should be attached.
* \param match_id Specifies the match criteria for the process ID of
* the requester. The constants LNET_PID_ANY and LNET_NID_ANY can be
- * used to wildcard either of the identifiers in the lnet_process_id_t
+ * used to wildcard either of the identifiers in the lnet_process_id
* structure.
* \param match_bits,ignore_bits Specify the match criteria to apply
* to the match bits in the incoming request. The ignore bits are used
@@ -70,10 +70,10 @@
*/
int
LNetMEAttach(unsigned int portal,
- lnet_process_id_t match_id,
+ struct lnet_process_id match_id,
__u64 match_bits, __u64 ignore_bits,
- lnet_unlink_t unlink, lnet_ins_pos_t pos,
- lnet_handle_me_t *handle)
+ enum lnet_unlink unlink, enum lnet_ins_pos pos,
+ struct lnet_handle_me *handle)
{
struct lnet_match_table *mtable;
struct lnet_me *me;
@@ -140,11 +140,11 @@ EXPORT_SYMBOL(LNetMEAttach);
* \retval -ENOENT If \a current_meh does not point to a valid match entry.
*/
int
-LNetMEInsert(lnet_handle_me_t current_meh,
- lnet_process_id_t match_id,
+LNetMEInsert(struct lnet_handle_me current_meh,
+ struct lnet_process_id match_id,
__u64 match_bits, __u64 ignore_bits,
- lnet_unlink_t unlink, lnet_ins_pos_t pos,
- lnet_handle_me_t *handle)
+ enum lnet_unlink unlink, enum lnet_ins_pos pos,
+ struct lnet_handle_me *handle)
{
struct lnet_me *current_me;
struct lnet_me *new_me;
@@ -220,11 +220,11 @@ EXPORT_SYMBOL(LNetMEInsert);
* \see LNetMDUnlink() for the discussion on delivering unlink event.
*/
int
-LNetMEUnlink(lnet_handle_me_t meh)
+LNetMEUnlink(struct lnet_handle_me meh)
{
- lnet_me_t *me;
- lnet_libmd_t *md;
- lnet_event_t ev;
+ struct lnet_me *me;
+ struct lnet_libmd *md;
+ struct lnet_event ev;
int cpt;
LASSERT(the_lnet.ln_refcount > 0);
@@ -256,12 +256,12 @@ EXPORT_SYMBOL(LNetMEUnlink);
/* call with lnet_res_lock please */
void
-lnet_me_unlink(lnet_me_t *me)
+lnet_me_unlink(struct lnet_me *me)
{
list_del(&me->me_list);
if (me->me_md) {
- lnet_libmd_t *md = me->me_md;
+ struct lnet_libmd *md = me->me_md;
/* detach MD from portal of this ME */
lnet_ptl_detach_md(me, md);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 6b0be6c23fff..a99c5c0aa672 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -47,8 +47,8 @@ MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
int
lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
- lnet_test_peer_t *tp;
- lnet_test_peer_t *temp;
+ struct lnet_test_peer *tp;
+ struct lnet_test_peer *temp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
@@ -75,7 +75,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, lnet_test_peer_t, tp_list);
+ tp = list_entry(el, struct lnet_test_peer, tp_list);
if (!tp->tp_threshold || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
@@ -97,8 +97,8 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
static int
fail_peer(lnet_nid_t nid, int outgoing)
{
- lnet_test_peer_t *tp;
- lnet_test_peer_t *temp;
+ struct lnet_test_peer *tp;
+ struct lnet_test_peer *temp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
@@ -110,7 +110,7 @@ fail_peer(lnet_nid_t nid, int outgoing)
lnet_net_lock(0);
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
- tp = list_entry(el, lnet_test_peer_t, tp_list);
+ tp = list_entry(el, struct lnet_test_peer, tp_list);
if (!tp->tp_threshold) {
/* zombie entry */
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(lnet_copy_iov2iter);
void
lnet_copy_kiov2iter(struct iov_iter *to,
- unsigned int nsiov, const lnet_kiov_t *siov,
+ unsigned int nsiov, const struct bio_vec *siov,
unsigned int soffset, unsigned int nob)
{
if (!nob)
@@ -298,7 +298,7 @@ lnet_extract_iov(int dst_niov, struct kvec *dst,
EXPORT_SYMBOL(lnet_extract_iov);
unsigned int
-lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
+lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
{
unsigned int nob = 0;
@@ -311,8 +311,8 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
EXPORT_SYMBOL(lnet_kiov_nob);
int
-lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, const lnet_kiov_t *src,
+lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
+ int src_niov, const struct bio_vec *src,
unsigned int offset, unsigned int len)
{
/*
@@ -364,12 +364,13 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
EXPORT_SYMBOL(lnet_extract_kiov);
void
-lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+lnet_ni_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
+ int delayed, unsigned int offset, unsigned int mlen,
+ unsigned int rlen)
{
unsigned int niov = 0;
struct kvec *iov = NULL;
- lnet_kiov_t *kiov = NULL;
+ struct bio_vec *kiov = NULL;
struct iov_iter to;
int rc;
@@ -409,9 +410,9 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
}
static void
-lnet_setpayloadbuffer(lnet_msg_t *msg)
+lnet_setpayloadbuffer(struct lnet_msg *msg)
{
- lnet_libmd_t *md = msg->msg_md;
+ struct lnet_libmd *md = msg->msg_md;
LASSERT(msg->msg_len > 0);
LASSERT(!msg->msg_routing);
@@ -428,7 +429,7 @@ lnet_setpayloadbuffer(lnet_msg_t *msg)
}
void
-lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
+lnet_prep_send(struct lnet_msg *msg, int type, struct lnet_process_id target,
unsigned int offset, unsigned int len)
{
msg->msg_type = type;
@@ -449,7 +450,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
}
static void
-lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_ni_send(struct lnet_ni *ni, struct lnet_msg *msg)
{
void *priv = msg->msg_private;
int rc;
@@ -464,7 +465,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
}
static int
-lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
{
int rc;
@@ -488,7 +489,7 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
static void
-lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
+lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer *lp)
{
unsigned long last_alive = 0;
@@ -507,7 +508,7 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
+lnet_peer_is_alive(struct lnet_peer *lp, unsigned long now)
{
int alive;
unsigned long deadline;
@@ -541,7 +542,7 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
* may drop the lnet_net_lock
*/
static int
-lnet_peer_alive_locked(lnet_peer_t *lp)
+lnet_peer_alive_locked(struct lnet_peer *lp)
{
unsigned long now = cfs_time_current();
@@ -595,10 +596,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
* \retval -ECANCELED If the MD of the message has been unlinked.
*/
static int
-lnet_post_send_locked(lnet_msg_t *msg, int do_send)
+lnet_post_send_locked(struct lnet_msg *msg, int do_send)
{
- lnet_peer_t *lp = msg->msg_txpeer;
- lnet_ni_t *ni = lp->lp_ni;
+ struct lnet_peer *lp = msg->msg_txpeer;
+ struct lnet_ni *ni = lp->lp_ni;
int cpt = msg->msg_tx_cpt;
struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
@@ -679,10 +680,10 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
return LNET_CREDIT_OK;
}
-static lnet_rtrbufpool_t *
-lnet_msg2bufpool(lnet_msg_t *msg)
+static struct lnet_rtrbufpool *
+lnet_msg2bufpool(struct lnet_msg *msg)
{
- lnet_rtrbufpool_t *rbp;
+ struct lnet_rtrbufpool *rbp;
int cpt;
LASSERT(msg->msg_rx_committed);
@@ -700,7 +701,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
}
static int
-lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
+lnet_post_routed_recv_locked(struct lnet_msg *msg, int do_recv)
{
/*
* lnet_parse is going to lnet_net_unlock immediately after this, so it
@@ -708,9 +709,9 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
* I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
* received or OK to receive
*/
- lnet_peer_t *lp = msg->msg_rxpeer;
- lnet_rtrbufpool_t *rbp;
- lnet_rtrbuf_t *rb;
+ struct lnet_peer *lp = msg->msg_rxpeer;
+ struct lnet_rtrbufpool *rbp;
+ struct lnet_rtrbuf *rb;
LASSERT(!msg->msg_iov);
LASSERT(!msg->msg_kiov);
@@ -758,7 +759,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
}
LASSERT(!list_empty(&rbp->rbp_bufs));
- rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+ rb = list_entry(rbp->rbp_bufs.next, struct lnet_rtrbuf, rb_list);
list_del(&rb->rb_list);
msg->msg_niov = rbp->rbp_npages;
@@ -776,10 +777,10 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
}
void
-lnet_return_tx_credits_locked(lnet_msg_t *msg)
+lnet_return_tx_credits_locked(struct lnet_msg *msg)
{
- lnet_peer_t *txpeer = msg->msg_txpeer;
- lnet_msg_t *msg2;
+ struct lnet_peer *txpeer = msg->msg_txpeer;
+ struct lnet_msg *msg2;
if (msg->msg_txcredit) {
struct lnet_ni *ni = txpeer->lp_ni;
@@ -794,7 +795,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
tq->tq_credits++;
if (tq->tq_credits <= 0) {
msg2 = list_entry(tq->tq_delayed.next,
- lnet_msg_t, msg_list);
+ struct lnet_msg, msg_list);
list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer->lp_ni == ni);
@@ -817,7 +818,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
msg2 = list_entry(txpeer->lp_txq.next,
- lnet_msg_t, msg_list);
+ struct lnet_msg, msg_list);
list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer == txpeer);
@@ -834,14 +835,14 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
}
void
-lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
+lnet_schedule_blocked_locked(struct lnet_rtrbufpool *rbp)
{
- lnet_msg_t *msg;
+ struct lnet_msg *msg;
if (list_empty(&rbp->rbp_msgs))
return;
msg = list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
+ struct lnet_msg, msg_list);
list_del(&msg->msg_list);
(void)lnet_post_routed_recv_locked(msg, 1);
@@ -851,8 +852,8 @@ void
lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
{
struct list_head drop;
- lnet_msg_t *msg;
- lnet_msg_t *tmp;
+ struct lnet_msg *msg;
+ struct lnet_msg *tmp;
INIT_LIST_HEAD(&drop);
@@ -871,15 +872,15 @@ lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
}
void
-lnet_return_rx_credits_locked(lnet_msg_t *msg)
+lnet_return_rx_credits_locked(struct lnet_msg *msg)
{
- lnet_peer_t *rxpeer = msg->msg_rxpeer;
- lnet_msg_t *msg2;
+ struct lnet_peer *rxpeer = msg->msg_rxpeer;
+ struct lnet_msg *msg2;
if (msg->msg_rtrcredit) {
/* give back global router credits */
- lnet_rtrbuf_t *rb;
- lnet_rtrbufpool_t *rbp;
+ struct lnet_rtrbuf *rb;
+ struct lnet_rtrbufpool *rbp;
/*
* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
@@ -888,7 +889,7 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg)
*/
LASSERT(msg->msg_kiov);
- rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+ rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
rbp = rb->rb_pool;
msg->msg_kiov = NULL;
@@ -943,7 +944,7 @@ routing_off:
msg->msg_rx_cpt);
} else if (rxpeer->lp_rtrcredits <= 0) {
msg2 = list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
+ struct lnet_msg, msg_list);
list_del(&msg2->msg_list);
(void)lnet_post_routed_recv_locked(msg2, 1);
@@ -956,10 +957,10 @@ routing_off:
}
static int
-lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
+lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
{
- lnet_peer_t *p1 = r1->lr_gateway;
- lnet_peer_t *p2 = r2->lr_gateway;
+ struct lnet_peer *p1 = r1->lr_gateway;
+ struct lnet_peer *p2 = r2->lr_gateway;
int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
@@ -993,13 +994,14 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
return -ERANGE;
}
-static lnet_peer_t *
-lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
+static struct lnet_peer *
+lnet_find_route_locked(struct lnet_ni *ni, lnet_nid_t target,
+ lnet_nid_t rtr_nid)
{
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- lnet_route_t *best_route;
- lnet_route_t *last_route;
+ struct lnet_remotenet *rnet;
+ struct lnet_route *route;
+ struct lnet_route *best_route;
+ struct lnet_route *last_route;
struct lnet_peer *lp_best;
struct lnet_peer *lp;
int rc;
@@ -1057,7 +1059,7 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
}
int
-lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
+lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
{
lnet_nid_t dst_nid = msg->msg_target.nid;
struct lnet_ni *src_ni;
@@ -1232,7 +1234,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
}
void
-lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
+lnet_drop_message(struct lnet_ni *ni, int cpt, void *private, unsigned int nob)
{
lnet_net_lock(cpt);
the_lnet.ln_counters[cpt]->drop_count++;
@@ -1243,7 +1245,7 @@ lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
}
static void
-lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_recv_put(struct lnet_ni *ni, struct lnet_msg *msg)
{
struct lnet_hdr *hdr = &msg->msg_hdr;
@@ -1264,7 +1266,7 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
}
static int
-lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_parse_put(struct lnet_ni *ni, struct lnet_msg *msg)
{
struct lnet_hdr *hdr = &msg->msg_hdr;
struct lnet_match_info info;
@@ -1322,7 +1324,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
}
static int
-lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
+lnet_parse_get(struct lnet_ni *ni, struct lnet_msg *msg, int rdma_get)
{
struct lnet_match_info info;
struct lnet_hdr *hdr = &msg->msg_hdr;
@@ -1386,12 +1388,12 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
}
static int
-lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_parse_reply(struct lnet_ni *ni, struct lnet_msg *msg)
{
void *private = msg->msg_private;
struct lnet_hdr *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {0};
- lnet_libmd_t *md;
+ struct lnet_process_id src = {0};
+ struct lnet_libmd *md;
int rlength;
int mlength;
int cpt;
@@ -1451,11 +1453,11 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
}
static int
-lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_parse_ack(struct lnet_ni *ni, struct lnet_msg *msg)
{
struct lnet_hdr *hdr = &msg->msg_hdr;
- lnet_process_id_t src = {0};
- lnet_libmd_t *md;
+ struct lnet_process_id src = {0};
+ struct lnet_libmd *md;
int cpt;
src.nid = hdr->src_nid;
@@ -1506,7 +1508,7 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
* \retval -ve error code
*/
int
-lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_parse_forward_locked(struct lnet_ni *ni, struct lnet_msg *msg)
{
int rc = 0;
@@ -1530,7 +1532,7 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
}
int
-lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
+lnet_parse_local(struct lnet_ni *ni, struct lnet_msg *msg)
{
int rc;
@@ -1578,8 +1580,8 @@ lnet_msgtyp2str(int type)
void
lnet_print_hdr(struct lnet_hdr *hdr)
{
- lnet_process_id_t src = {0};
- lnet_process_id_t dst = {0};
+ struct lnet_process_id src = {0};
+ struct lnet_process_id dst = {0};
char *type_str = lnet_msgtyp2str(hdr->type);
src.nid = hdr->src_nid;
@@ -1634,7 +1636,7 @@ lnet_print_hdr(struct lnet_hdr *hdr)
}
int
-lnet_parse(lnet_ni_t *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
+lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
void *private, int rdma_req)
{
int rc = 0;
@@ -1873,10 +1875,10 @@ void
lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
{
while (!list_empty(head)) {
- lnet_process_id_t id = {0};
- lnet_msg_t *msg;
+ struct lnet_process_id id = {0};
+ struct lnet_msg *msg;
- msg = list_entry(head->next, lnet_msg_t, msg_list);
+ msg = list_entry(head->next, struct lnet_msg, msg_list);
list_del(&msg->msg_list);
id.nid = msg->msg_hdr.src_nid;
@@ -1915,10 +1917,10 @@ void
lnet_recv_delayed_msg_list(struct list_head *head)
{
while (!list_empty(head)) {
- lnet_msg_t *msg;
- lnet_process_id_t id;
+ struct lnet_msg *msg;
+ struct lnet_process_id id;
- msg = list_entry(head->next, lnet_msg_t, msg_list);
+ msg = list_entry(head->next, struct lnet_msg, msg_list);
list_del(&msg->msg_list);
/*
@@ -1985,11 +1987,11 @@ lnet_recv_delayed_msg_list(struct list_head *head)
* \retval -ENOMEM Memory allocation failure.
* \retval -ENOENT Invalid MD object.
*
- * \see lnet_event_t::hdr_data and lnet_event_kind_t.
+ * \see lnet_event::hdr_data and lnet_event_kind.
*/
int
-LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
- lnet_process_id_t target, unsigned int portal,
+LNetPut(lnet_nid_t self, struct lnet_handle_md mdh, enum lnet_ack_req ack,
+ struct lnet_process_id target, unsigned int portal,
__u64 match_bits, unsigned int offset,
__u64 hdr_data)
{
@@ -2009,7 +2011,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
msg = lnet_msg_alloc();
if (!msg) {
- CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
+ CERROR("Dropping PUT to %s: ENOMEM on struct lnet_msg\n",
libcfs_id2str(target));
return -ENOMEM;
}
@@ -2072,8 +2074,8 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
}
EXPORT_SYMBOL(LNetPut);
-lnet_msg_t *
-lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
+struct lnet_msg *
+lnet_create_reply_msg(struct lnet_ni *ni, struct lnet_msg *getmsg)
{
/*
* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
@@ -2085,7 +2087,7 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
*/
struct lnet_msg *msg = lnet_msg_alloc();
struct lnet_libmd *getmd = getmsg->msg_md;
- lnet_process_id_t peer_id = getmsg->msg_target;
+ struct lnet_process_id peer_id = getmsg->msg_target;
int cpt;
LASSERT(!getmsg->msg_target_is_router);
@@ -2151,7 +2153,8 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
EXPORT_SYMBOL(lnet_create_reply_msg);
void
-lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
+lnet_set_reply_msg_len(struct lnet_ni *ni, struct lnet_msg *reply,
+ unsigned int len)
{
/*
* Set the REPLY length, now the RDMA that elides the REPLY message has
@@ -2193,8 +2196,8 @@ EXPORT_SYMBOL(lnet_set_reply_msg_len);
* \retval -ENOENT Invalid MD object.
*/
int
-LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
- lnet_process_id_t target, unsigned int portal,
+LNetGet(lnet_nid_t self, struct lnet_handle_md mdh,
+ struct lnet_process_id target, unsigned int portal,
__u64 match_bits, unsigned int offset)
{
struct lnet_msg *msg;
@@ -2213,7 +2216,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
msg = lnet_msg_alloc();
if (!msg) {
- CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
+ CERROR("Dropping GET to %s: ENOMEM on struct lnet_msg\n",
libcfs_id2str(target));
return -ENOMEM;
}
@@ -2288,7 +2291,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
{
struct list_head *e;
struct lnet_ni *ni;
- lnet_remotenet_t *rnet;
+ struct lnet_remotenet *rnet;
__u32 dstnet = LNET_NIDNET(dstnid);
int hops;
int cpt;
@@ -2306,7 +2309,7 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
cpt = lnet_net_lock_current();
list_for_each(e, &the_lnet.ln_nis) {
- ni = list_entry(e, lnet_ni_t, ni_list);
+ ni = list_entry(e, struct lnet_ni, ni_list);
if (ni->ni_nid == dstnid) {
if (srcnidp)
@@ -2345,11 +2348,11 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
rn_list = lnet_net2rnethash(dstnet);
list_for_each(e, rn_list) {
- rnet = list_entry(e, lnet_remotenet_t, lrn_list);
+ rnet = list_entry(e, struct lnet_remotenet, lrn_list);
if (rnet->lrn_net == dstnet) {
- lnet_route_t *route;
- lnet_route_t *shortest = NULL;
+ struct lnet_route *route;
+ struct lnet_route *shortest = NULL;
__u32 shortest_hops = LNET_UNDEFINED_HOPS;
__u32 route_hops;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 7ee164e67fbe..008ac503f27d 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -39,7 +39,7 @@
#include "../../include/linux/lnet/lib-lnet.h"
void
-lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
+lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
{
memset(ev, 0, sizeof(*ev));
@@ -54,10 +54,10 @@ lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev)
* Don't need any lock, must be called after lnet_commit_md
*/
void
-lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
+lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
{
struct lnet_hdr *hdr = &msg->msg_hdr;
- lnet_event_t *ev = &msg->msg_ev;
+ struct lnet_event *ev = &msg->msg_ev;
LASSERT(!msg->msg_routing);
@@ -129,10 +129,10 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
}
void
-lnet_msg_commit(lnet_msg_t *msg, int cpt)
+lnet_msg_commit(struct lnet_msg *msg, int cpt)
{
struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
- lnet_counters_t *counters = the_lnet.ln_counters[cpt];
+ struct lnet_counters *counters = the_lnet.ln_counters[cpt];
/* routed message can be committed for both receiving and sending */
LASSERT(!msg->msg_tx_committed);
@@ -162,10 +162,10 @@ lnet_msg_commit(lnet_msg_t *msg, int cpt)
}
static void
-lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
+lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
{
- lnet_counters_t *counters;
- lnet_event_t *ev = &msg->msg_ev;
+ struct lnet_counters *counters;
+ struct lnet_event *ev = &msg->msg_ev;
LASSERT(msg->msg_tx_committed);
if (status)
@@ -214,10 +214,10 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
}
static void
-lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
+lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
{
- lnet_counters_t *counters;
- lnet_event_t *ev = &msg->msg_ev;
+ struct lnet_counters *counters;
+ struct lnet_event *ev = &msg->msg_ev;
LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
LASSERT(msg->msg_rx_committed);
@@ -272,7 +272,7 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
}
void
-lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
+lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
{
int cpt2 = cpt;
@@ -306,7 +306,7 @@ lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
}
void
-lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
+lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
unsigned int offset, unsigned int mlen)
{
/* NB: @offset and @len are only useful for receiving */
@@ -336,9 +336,9 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
}
void
-lnet_msg_detach_md(lnet_msg_t *msg, int status)
+lnet_msg_detach_md(struct lnet_msg *msg, int status)
{
- lnet_libmd_t *md = msg->msg_md;
+ struct lnet_libmd *md = msg->msg_md;
int unlink;
/* Now it's safe to drop my caller's ref */
@@ -359,7 +359,7 @@ lnet_msg_detach_md(lnet_msg_t *msg, int status)
}
static int
-lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
+lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
{
struct lnet_handle_wire ack_wmd;
int rc;
@@ -437,7 +437,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
}
void
-lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
+lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status)
{
struct lnet_msg_container *container;
int my_slot;
@@ -502,7 +502,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
while (!list_empty(&container->msc_finalizing)) {
msg = list_entry(container->msc_finalizing.next,
- lnet_msg_t, msg_list);
+ struct lnet_msg, msg_list);
list_del(&msg->msg_list);
@@ -538,9 +538,10 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
return;
while (!list_empty(&container->msc_active)) {
- lnet_msg_t *msg = list_entry(container->msc_active.next,
- lnet_msg_t, msg_activelist);
+ struct lnet_msg *msg;
+ msg = list_entry(container->msc_active.next,
+ struct lnet_msg, msg_activelist);
LASSERT(msg->msg_onactivelist);
msg->msg_onactivelist = 0;
list_del(&msg->msg_activelist);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index fa515af2ac1d..33332724ab94 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -39,7 +39,7 @@ module_param(portal_rotor, int, 0644);
MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions");
static int
-lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
+lnet_ptl_match_type(unsigned int index, struct lnet_process_id match_id,
__u64 mbits, __u64 ignore_bits)
{
struct lnet_portal *ptl = the_lnet.ln_portals[index];
@@ -131,7 +131,7 @@ lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
}
static int
-lnet_try_match_md(lnet_libmd_t *md,
+lnet_try_match_md(struct lnet_libmd *md,
struct lnet_match_info *info, struct lnet_msg *msg)
{
/*
@@ -140,7 +140,7 @@ lnet_try_match_md(lnet_libmd_t *md,
*/
unsigned int offset;
unsigned int mlength;
- lnet_me_t *me = md->md_me;
+ struct lnet_me *me = md->md_me;
/* MD exhausted */
if (lnet_md_exhausted(md))
@@ -212,7 +212,7 @@ lnet_try_match_md(lnet_libmd_t *md,
}
static struct lnet_match_table *
-lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits)
+lnet_match2mt(struct lnet_portal *ptl, struct lnet_process_id id, __u64 mbits)
{
if (LNET_CPT_NUMBER == 1)
return ptl->ptl_mtables[0]; /* the only one */
@@ -223,8 +223,8 @@ lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits)
}
struct lnet_match_table *
-lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
- __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
+lnet_mt_of_attach(unsigned int index, struct lnet_process_id id,
+ __u64 mbits, __u64 ignore_bits, enum lnet_ins_pos pos)
{
struct lnet_portal *ptl;
struct lnet_match_table *mtable;
@@ -334,7 +334,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
pos &= (1 << LNET_MT_BITS_U64) - 1;
- return (*bmap & (1ULL << pos));
+ return (*bmap & BIT(pos));
}
static void
@@ -357,7 +357,7 @@ lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
struct list_head *
lnet_mt_match_head(struct lnet_match_table *mtable,
- lnet_process_id_t id, __u64 mbits)
+ struct lnet_process_id id, __u64 mbits)
{
struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
unsigned long hash = mbits;
@@ -376,8 +376,8 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
struct lnet_match_info *info, struct lnet_msg *msg)
{
struct list_head *head;
- lnet_me_t *me;
- lnet_me_t *tmp;
+ struct lnet_me *me;
+ struct lnet_me *tmp;
int exhausted = 0;
int rc;
@@ -641,7 +641,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
}
void
-lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md)
+lnet_ptl_detach_md(struct lnet_me *me, struct lnet_libmd *md)
{
LASSERT(me->me_md == md && md->md_me == me);
@@ -651,14 +651,14 @@ lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md)
/* called with lnet_res_lock held */
void
-lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
+lnet_ptl_attach_md(struct lnet_me *me, struct lnet_libmd *md,
struct list_head *matches, struct list_head *drops)
{
struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
struct lnet_match_table *mtable;
struct list_head *head;
- lnet_msg_t *tmp;
- lnet_msg_t *msg;
+ struct lnet_msg *tmp;
+ struct lnet_msg *msg;
int exhausted = 0;
int cpt;
@@ -756,7 +756,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
LASSERT(list_empty(&ptl->ptl_msg_stealing));
cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
struct list_head *mhash;
- lnet_me_t *me;
+ struct lnet_me *me;
int j;
if (!mtable->mt_mhash) /* uninitialized match-table */
@@ -767,7 +767,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
while (!list_empty(&mhash[j])) {
me = list_entry(mhash[j].next,
- lnet_me_t, me_list);
+ struct lnet_me, me_list);
CERROR("Active ME %p on exit\n", me);
list_del(&me->me_list);
lnet_me_free(me);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index cb213b8f51cf..a7504b8edf6d 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -32,7 +32,7 @@
#include "../../include/linux/lnet/lib-lnet.h"
static int
-lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+lolnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
LASSERT(!lntmsg->msg_routing);
LASSERT(!lntmsg->msg_target_is_router);
@@ -41,10 +41,10 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
}
static int
-lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, struct iov_iter *to, unsigned int rlen)
{
- lnet_msg_t *sendmsg = private;
+ struct lnet_msg *sendmsg = private;
if (lntmsg) { /* not discarding */
if (sendmsg->msg_iov)
@@ -70,7 +70,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
static int lolnd_instanced;
static void
-lolnd_shutdown(lnet_ni_t *ni)
+lolnd_shutdown(struct lnet_ni *ni)
{
CDEBUG(D_NET, "shutdown\n");
LASSERT(lolnd_instanced);
@@ -79,7 +79,7 @@ lolnd_shutdown(lnet_ni_t *ni)
}
static int
-lolnd_startup(lnet_ni_t *ni)
+lolnd_startup(struct lnet_ni *ni)
{
LASSERT(ni->ni_lnd == &the_lolnd);
LASSERT(!lolnd_instanced);
@@ -88,7 +88,7 @@ lolnd_startup(lnet_ni_t *ni)
return 0;
}
-lnd_t the_lolnd = {
+struct lnet_lnd the_lolnd = {
/* .lnd_list = */ {&the_lolnd.lnd_list, &the_lolnd.lnd_list},
/* .lnd_refcount = */ 0,
/* .lnd_type = */ LOLND,
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index a9fe3e69daae..298533d7b04c 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -1226,7 +1226,7 @@ libcfs_str2nid(const char *str)
EXPORT_SYMBOL(libcfs_str2nid);
char *
-libcfs_id2str(lnet_process_id_t id)
+libcfs_id2str(struct lnet_process_id id)
{
char *str = libcfs_next_nidstring();
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index e8061916c241..e62b21f3ab4d 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -101,11 +101,12 @@ lnet_peer_tables_destroy(void)
}
static void
-lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
+lnet_peer_table_cleanup_locked(struct lnet_ni *ni,
+ struct lnet_peer_table *ptable)
{
int i;
- lnet_peer_t *lp;
- lnet_peer_t *tmp;
+ struct lnet_peer *lp;
+ struct lnet_peer *tmp;
for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
@@ -141,11 +142,12 @@ lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
}
static void
-lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
+lnet_peer_table_del_rtrs_locked(struct lnet_ni *ni,
+ struct lnet_peer_table *ptable,
int cpt_locked)
{
- lnet_peer_t *lp;
- lnet_peer_t *tmp;
+ struct lnet_peer *lp;
+ struct lnet_peer *tmp;
lnet_nid_t lp_nid;
int i;
@@ -168,12 +170,12 @@ lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
}
void
-lnet_peer_tables_cleanup(lnet_ni_t *ni)
+lnet_peer_tables_cleanup(struct lnet_ni *ni)
{
struct lnet_peer_table *ptable;
struct list_head deathrow;
- lnet_peer_t *lp;
- lnet_peer_t *temp;
+ struct lnet_peer *lp;
+ struct lnet_peer *temp;
int i;
INIT_LIST_HEAD(&deathrow);
@@ -214,7 +216,7 @@ lnet_peer_tables_cleanup(lnet_ni_t *ni)
}
void
-lnet_destroy_peer_locked(lnet_peer_t *lp)
+lnet_destroy_peer_locked(struct lnet_peer *lp)
{
struct lnet_peer_table *ptable;
@@ -236,11 +238,11 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
ptable->pt_zombies--;
}
-lnet_peer_t *
+struct lnet_peer *
lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
{
struct list_head *peers;
- lnet_peer_t *lp;
+ struct lnet_peer *lp;
LASSERT(!the_lnet.ln_shutdown);
@@ -256,11 +258,11 @@ lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
}
int
-lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
+lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
{
struct lnet_peer_table *ptable;
- lnet_peer_t *lp = NULL;
- lnet_peer_t *lp2;
+ struct lnet_peer *lp = NULL;
+ struct lnet_peer *lp2;
int cpt2;
int rc = 0;
@@ -280,7 +282,7 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
if (!list_empty(&ptable->pt_deathrow)) {
lp = list_entry(ptable->pt_deathrow.next,
- lnet_peer_t, lp_hashlist);
+ struct lnet_peer, lp_hashlist);
list_del(&lp->lp_hashlist);
}
@@ -362,7 +364,7 @@ void
lnet_debug_peer(lnet_nid_t nid)
{
char *aliveness = "NA";
- lnet_peer_t *lp;
+ struct lnet_peer *lp;
int rc;
int cpt;
@@ -399,7 +401,7 @@ lnet_get_peer_info(__u32 peer_index, __u64 *nid,
__u32 *peer_tx_qnob)
{
struct lnet_peer_table *peer_table;
- lnet_peer_t *lp;
+ struct lnet_peer *lp;
bool found = false;
int lncpt, j;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index cf22525d7129..12dd1043f9fa 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -53,7 +53,7 @@ module_param(auto_down, int, 0444);
MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
int
-lnet_peer_buffer_credits(lnet_ni_t *ni)
+lnet_peer_buffer_credits(struct lnet_ni *ni)
{
/* NI option overrides LNet default */
if (ni->ni_peerrtrcredits > 0)
@@ -98,7 +98,7 @@ lnet_peers_start_down(void)
}
void
-lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
+lnet_notify_locked(struct lnet_peer *lp, int notifylnd, int alive,
unsigned long when)
{
if (time_before(when, lp->lp_timestamp)) { /* out of date information */
@@ -128,7 +128,7 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
}
static void
-lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
+lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer *lp)
{
int alive;
int notifylnd;
@@ -167,7 +167,7 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
}
static void
-lnet_rtr_addref_locked(lnet_peer_t *lp)
+lnet_rtr_addref_locked(struct lnet_peer *lp)
{
LASSERT(lp->lp_refcount > 0);
LASSERT(lp->lp_rtr_refcount >= 0);
@@ -179,9 +179,9 @@ lnet_rtr_addref_locked(lnet_peer_t *lp)
/* a simple insertion sort */
list_for_each_prev(pos, &the_lnet.ln_routers) {
- lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
- lp_rtr_list);
+ struct lnet_peer *rtr;
+ rtr = list_entry(pos, struct lnet_peer, lp_rtr_list);
if (rtr->lp_nid < lp->lp_nid)
break;
}
@@ -194,7 +194,7 @@ lnet_rtr_addref_locked(lnet_peer_t *lp)
}
static void
-lnet_rtr_decref_locked(lnet_peer_t *lp)
+lnet_rtr_decref_locked(struct lnet_peer *lp)
{
LASSERT(lp->lp_refcount > 0);
LASSERT(lp->lp_rtr_refcount > 0);
@@ -217,10 +217,10 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
}
}
-lnet_remotenet_t *
+struct lnet_remotenet *
lnet_find_net_locked(__u32 net)
{
- lnet_remotenet_t *rnet;
+ struct lnet_remotenet *rnet;
struct list_head *tmp;
struct list_head *rn_list;
@@ -228,7 +228,7 @@ lnet_find_net_locked(__u32 net)
rn_list = lnet_net2rnethash(net);
list_for_each(tmp, rn_list) {
- rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+ rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
if (rnet->lrn_net == net)
return rnet;
@@ -241,7 +241,7 @@ static void lnet_shuffle_seed(void)
static int seeded;
__u32 lnd_type, seed[2];
struct timespec64 ts;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
struct list_head *tmp;
if (seeded)
@@ -254,7 +254,7 @@ static void lnet_shuffle_seed(void)
* the NID for this node gives the most entropy in the low bits
*/
list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, lnet_ni_t, ni_list);
+ ni = list_entry(tmp, struct lnet_ni, ni_list);
lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
if (lnd_type != LOLND)
@@ -268,7 +268,7 @@ static void lnet_shuffle_seed(void)
/* NB expects LNET_LOCK held */
static void
-lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
+lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
{
unsigned int len = 0;
unsigned int offset = 0;
@@ -299,10 +299,10 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
unsigned int priority)
{
struct list_head *e;
- lnet_remotenet_t *rnet;
- lnet_remotenet_t *rnet2;
- lnet_route_t *route;
- lnet_ni_t *ni;
+ struct lnet_remotenet *rnet;
+ struct lnet_remotenet *rnet2;
+ struct lnet_route *route;
+ struct lnet_ni *ni;
int add_route;
int rc;
@@ -368,8 +368,9 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
/* Search for a duplicate route (it's a NOOP if it is) */
add_route = 1;
list_for_each(e, &rnet2->lrn_routes) {
- lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+ struct lnet_route *route2;
+ route2 = list_entry(e, struct lnet_route, lr_list);
if (route2->lr_gateway == route->lr_gateway) {
add_route = 0;
break;
@@ -415,9 +416,9 @@ lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
int
lnet_check_routes(void)
{
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
- lnet_route_t *route2;
+ struct lnet_remotenet *rnet;
+ struct lnet_route *route;
+ struct lnet_route *route2;
struct list_head *e1;
struct list_head *e2;
int cpt;
@@ -429,7 +430,7 @@ lnet_check_routes(void)
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
route2 = NULL;
list_for_each(e2, &rnet->lrn_routes) {
@@ -437,7 +438,7 @@ lnet_check_routes(void)
lnet_nid_t nid2;
int net;
- route = list_entry(e2, lnet_route_t, lr_list);
+ route = list_entry(e2, struct lnet_route, lr_list);
if (!route2) {
route2 = route;
@@ -471,8 +472,8 @@ int
lnet_del_route(__u32 net, lnet_nid_t gw_nid)
{
struct lnet_peer *gateway;
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
+ struct lnet_remotenet *rnet;
+ struct lnet_route *route;
struct list_head *e1;
struct list_head *e2;
int rc = -ENOENT;
@@ -494,14 +495,14 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid)
again:
list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
net == rnet->lrn_net))
continue;
list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ route = list_entry(e2, struct lnet_route, lr_list);
gateway = route->lr_gateway;
if (!(gw_nid == LNET_NID_ANY ||
@@ -557,7 +558,7 @@ int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
return rc;
for (i = 0; i < LNET_NRBPOOLS; i++) {
- lnet_rtrbufpool_t *rbp;
+ struct lnet_rtrbufpool *rbp;
lnet_net_lock(LNET_LOCK_EX);
cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
@@ -587,8 +588,8 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
{
struct list_head *e1;
struct list_head *e2;
- lnet_remotenet_t *rnet;
- lnet_route_t *route;
+ struct lnet_remotenet *rnet;
+ struct lnet_route *route;
int cpt;
int i;
struct list_head *rn_list;
@@ -598,10 +599,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
list_for_each(e1, rn_list) {
- rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+ rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t, lr_list);
+ route = list_entry(e2, struct lnet_route,
+ lr_list);
if (!idx--) {
*net = rnet->lrn_net;
@@ -642,11 +644,11 @@ lnet_swap_pinginfo(struct lnet_ping_info *info)
* networks on that router.
*/
static void
-lnet_parse_rc_info(lnet_rc_data_t *rcd)
+lnet_parse_rc_info(struct lnet_rc_data *rcd)
{
struct lnet_ping_info *info = rcd->rcd_pinginfo;
struct lnet_peer *gw = rcd->rcd_gateway;
- lnet_route_t *rte;
+ struct lnet_route *rte;
if (!gw->lp_alive)
return;
@@ -731,15 +733,15 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
}
static void
-lnet_router_checker_event(lnet_event_t *event)
+lnet_router_checker_event(struct lnet_event *event)
{
- lnet_rc_data_t *rcd = event->md.user_ptr;
+ struct lnet_rc_data *rcd = event->md.user_ptr;
struct lnet_peer *lp;
LASSERT(rcd);
if (event->unlinked) {
- LNetInvalidateHandle(&rcd->rcd_mdh);
+ LNetInvalidateMDHandle(&rcd->rcd_mdh);
return;
}
@@ -791,7 +793,7 @@ lnet_router_checker_event(lnet_event_t *event)
static void
lnet_wait_known_routerstate(void)
{
- lnet_peer_t *rtr;
+ struct lnet_peer *rtr;
struct list_head *entry;
int all_known;
@@ -802,7 +804,7 @@ lnet_wait_known_routerstate(void)
all_known = 1;
list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ rtr = list_entry(entry, struct lnet_peer, lp_rtr_list);
if (!rtr->lp_alive_count) {
all_known = 0;
@@ -821,9 +823,9 @@ lnet_wait_known_routerstate(void)
}
void
-lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
+lnet_router_ni_update_locked(struct lnet_peer *gw, __u32 net)
{
- lnet_route_t *rte;
+ struct lnet_route *rte;
if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
@@ -838,7 +840,7 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
static void
lnet_update_ni_status_locked(void)
{
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
time64_t now;
int timeout;
@@ -878,11 +880,11 @@ lnet_update_ni_status_locked(void)
}
static void
-lnet_destroy_rc_data(lnet_rc_data_t *rcd)
+lnet_destroy_rc_data(struct lnet_rc_data *rcd)
{
LASSERT(list_empty(&rcd->rcd_list));
/* detached from network */
- LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
+ LASSERT(LNetMDHandleIsInvalid(rcd->rcd_mdh));
if (rcd->rcd_gateway) {
int cpt = rcd->rcd_gateway->lp_cpt;
@@ -898,12 +900,12 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
LIBCFS_FREE(rcd, sizeof(*rcd));
}
-static lnet_rc_data_t *
-lnet_create_rc_data_locked(lnet_peer_t *gateway)
+static struct lnet_rc_data *
+lnet_create_rc_data_locked(struct lnet_peer *gateway)
{
- lnet_rc_data_t *rcd = NULL;
+ struct lnet_rc_data *rcd = NULL;
struct lnet_ping_info *pi;
- lnet_md_t md;
+ struct lnet_md md;
int rc;
int i;
@@ -913,7 +915,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
if (!rcd)
goto out;
- LNetInvalidateHandle(&rcd->rcd_mdh);
+ LNetInvalidateMDHandle(&rcd->rcd_mdh);
INIT_LIST_HEAD(&rcd->rcd_list);
LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
@@ -933,7 +935,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
md.options = LNET_MD_TRUNCATE;
md.eq_handle = the_lnet.ln_rc_eqh;
- LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
+ LASSERT(!LNetEQHandleIsInvalid(the_lnet.ln_rc_eqh));
rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
if (rc < 0) {
CERROR("Can't bind MD: %d\n", rc);
@@ -957,7 +959,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
out:
if (rcd) {
- if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
+ if (!LNetMDHandleIsInvalid(rcd->rcd_mdh)) {
rc = LNetMDUnlink(rcd->rcd_mdh);
LASSERT(!rc);
}
@@ -969,7 +971,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
}
static int
-lnet_router_check_interval(lnet_peer_t *rtr)
+lnet_router_check_interval(struct lnet_peer *rtr)
{
int secs;
@@ -982,9 +984,9 @@ lnet_router_check_interval(lnet_peer_t *rtr)
}
static void
-lnet_ping_router_locked(lnet_peer_t *rtr)
+lnet_ping_router_locked(struct lnet_peer *rtr)
{
- lnet_rc_data_t *rcd = NULL;
+ struct lnet_rc_data *rcd = NULL;
unsigned long now = cfs_time_current();
int secs;
@@ -1022,8 +1024,8 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
cfs_time_seconds(secs)))) {
int rc;
- lnet_process_id_t id;
- lnet_handle_md_t mdh;
+ struct lnet_process_id id;
+ struct lnet_handle_md mdh;
id.nid = rtr->lp_nid;
id.pid = LNET_PID_LUSTRE;
@@ -1124,9 +1126,9 @@ lnet_router_checker_stop(void)
static void
lnet_prune_rc_data(int wait_unlink)
{
- lnet_rc_data_t *rcd;
- lnet_rc_data_t *tmp;
- lnet_peer_t *lp;
+ struct lnet_rc_data *rcd;
+ struct lnet_rc_data *tmp;
+ struct lnet_peer *lp;
struct list_head head;
int i = 2;
@@ -1171,7 +1173,7 @@ lnet_prune_rc_data(int wait_unlink)
while (!list_empty(&the_lnet.ln_rcd_zombie)) {
list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
rcd_list) {
- if (LNetHandleIsInvalid(rcd->rcd_mdh))
+ if (LNetMDHandleIsInvalid(rcd->rcd_mdh))
list_move(&rcd->rcd_list, &head);
}
@@ -1182,7 +1184,7 @@ lnet_prune_rc_data(int wait_unlink)
while (!list_empty(&head)) {
rcd = list_entry(head.next,
- lnet_rc_data_t, rcd_list);
+ struct lnet_rc_data, rcd_list);
list_del_init(&rcd->rcd_list);
lnet_destroy_rc_data(rcd);
}
@@ -1232,7 +1234,7 @@ lnet_router_checker_active(void)
static int
lnet_router_checker(void *arg)
{
- lnet_peer_t *rtr;
+ struct lnet_peer *rtr;
struct list_head *entry;
cfs_block_allsigs();
@@ -1247,7 +1249,7 @@ rescan:
version = the_lnet.ln_routers_version;
list_for_each(entry, &the_lnet.ln_routers) {
- rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+ rtr = list_entry(entry, struct lnet_peer, lp_rtr_list);
cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
if (cpt != cpt2) {
@@ -1303,9 +1305,9 @@ rescan:
}
void
-lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
+lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
{
- int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+ int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
while (--npages >= 0)
__free_page(rb->rb_kiov[npages].bv_page);
@@ -1313,13 +1315,13 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
LIBCFS_FREE(rb, sz);
}
-static lnet_rtrbuf_t *
-lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
+static struct lnet_rtrbuf *
+lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
{
int npages = rbp->rbp_npages;
- int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+ int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
struct page *page;
- lnet_rtrbuf_t *rb;
+ struct lnet_rtrbuf *rb;
int i;
LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
@@ -1349,12 +1351,12 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
}
static void
-lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
+lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
{
int npages = rbp->rbp_npages;
struct list_head tmp;
- lnet_rtrbuf_t *rb;
- lnet_rtrbuf_t *temp;
+ struct lnet_rtrbuf *rb;
+ struct lnet_rtrbuf *temp;
if (!rbp->rbp_nbuffers) /* not initialized or already freed */
return;
@@ -1378,10 +1380,10 @@ lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
}
static int
-lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
+lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
{
struct list_head rb_list;
- lnet_rtrbuf_t *rb;
+ struct lnet_rtrbuf *rb;
int num_rb;
int num_buffers = 0;
int old_req_nbufs;
@@ -1456,7 +1458,7 @@ lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
failed:
while (!list_empty(&rb_list)) {
- rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
+ rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
list_del(&rb->rb_list);
lnet_destroy_rtrbuf(rb, npages);
}
@@ -1465,7 +1467,7 @@ failed:
}
static void
-lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
+lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
{
INIT_LIST_HEAD(&rbp->rbp_msgs);
INIT_LIST_HEAD(&rbp->rbp_bufs);
@@ -1478,7 +1480,7 @@ lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
void
lnet_rtrpools_free(int keep_pools)
{
- lnet_rtrbufpool_t *rtrp;
+ struct lnet_rtrbufpool *rtrp;
int i;
if (!the_lnet.ln_rtrpools) /* uninitialized or freed */
@@ -1556,7 +1558,7 @@ lnet_nrb_large_calculate(void)
int
lnet_rtrpools_alloc(int im_a_router)
{
- lnet_rtrbufpool_t *rtrp;
+ struct lnet_rtrbufpool *rtrp;
int nrb_tiny;
int nrb_small;
int nrb_large;
@@ -1591,7 +1593,7 @@ lnet_rtrpools_alloc(int im_a_router)
the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
LNET_NRBPOOLS *
- sizeof(lnet_rtrbufpool_t));
+ sizeof(struct lnet_rtrbufpool));
if (!the_lnet.ln_rtrpools) {
LCONSOLE_ERROR_MSG(0x10c,
"Failed to initialize router buffe pool\n");
@@ -1637,7 +1639,7 @@ lnet_rtrpools_adjust_helper(int tiny, int small, int large)
int nrb = 0;
int rc = 0;
int i;
- lnet_rtrbufpool_t *rtrp;
+ struct lnet_rtrbufpool *rtrp;
/*
* If the provided values for each buffer pool are different than the
@@ -1740,7 +1742,7 @@ lnet_rtrpools_disable(void)
}
int
-lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
+lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, unsigned long when)
{
struct lnet_peer *lp = NULL;
unsigned long now = cfs_time_current();
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index a19e1405e3ea..72b80c594108 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -77,7 +77,7 @@ static int __proc_lnet_stats(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
int rc;
- lnet_counters_t *ctrs;
+ struct lnet_counters *ctrs;
int len;
char *tmpstr;
const int tmpsiz = 256; /* 7 %u and 4 %llu */
@@ -171,8 +171,8 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
} else {
struct list_head *n;
struct list_head *r;
- lnet_route_t *route = NULL;
- lnet_remotenet_t *rnet = NULL;
+ struct lnet_route *route = NULL;
+ struct lnet_remotenet *rnet = NULL;
int skip = off - 1;
struct list_head *rn_list;
int i;
@@ -191,15 +191,16 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
n = rn_list->next;
while (n != rn_list && !route) {
- rnet = list_entry(n, lnet_remotenet_t,
+ rnet = list_entry(n, struct lnet_remotenet,
lrn_list);
r = rnet->lrn_routes.next;
while (r != &rnet->lrn_routes) {
- lnet_route_t *re =
- list_entry(r, lnet_route_t,
- lr_list);
+ struct lnet_route *re;
+
+ re = list_entry(r, struct lnet_route,
+ lr_list);
if (!skip) {
route = re;
break;
@@ -307,9 +308,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
r = the_lnet.ln_routers.next;
while (r != &the_lnet.ln_routers) {
- lnet_peer_t *lp = list_entry(r, lnet_peer_t,
- lp_rtr_list);
+ struct lnet_peer *lp;
+ lp = list_entry(r, struct lnet_peer, lp_rtr_list);
if (!skip) {
peer = lp;
break;
@@ -331,7 +332,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
int last_ping = cfs_duration_sec(cfs_time_sub(now,
peer->lp_ping_timestamp));
int down_ni = 0;
- lnet_route_t *rtr;
+ struct lnet_route *rtr;
if ((peer->lp_ping_feats &
LNET_PING_FEAT_NI_STATUS)) {
@@ -454,8 +455,10 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
p = ptable->pt_hash[hash].next;
while (p != &ptable->pt_hash[hash]) {
- lnet_peer_t *lp = list_entry(p, lnet_peer_t,
- lp_hashlist);
+ struct lnet_peer *lp;
+
+ lp = list_entry(p, struct lnet_peer,
+ lp_hashlist);
if (!skip) {
peer = lp;
@@ -589,7 +592,7 @@ static int __proc_lnet_buffers(void *data, int write,
goto out; /* I'm not a router */
for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
- lnet_rtrbufpool_t *rbp;
+ struct lnet_rtrbufpool *rbp;
lnet_net_lock(LNET_LOCK_EX);
cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
@@ -652,7 +655,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
LASSERT(tmpstr + tmpsiz - s > 0);
} else {
struct list_head *n;
- lnet_ni_t *ni = NULL;
+ struct lnet_ni *ni = NULL;
int skip = *ppos - 1;
lnet_net_lock(0);
@@ -660,8 +663,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
n = the_lnet.ln_nis.next;
while (n != &the_lnet.ln_nis) {
- lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
+ struct lnet_ni *a_ni;
+ a_ni = list_entry(n, struct lnet_ni, ni_list);
if (!skip) {
ni = a_ni;
break;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b9ac34ecbd53..f8b9175f08d4 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -267,8 +267,8 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
}
static int
-brw_client_prep_rpc(struct sfw_test_unit *tsu,
- lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
+brw_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest,
+ struct srpc_client_rpc **rpcpp)
{
struct srpc_bulk *bulk = tsu->tsu_private;
struct sfw_test_instance *tsi = tsu->tsu_instance;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index c6a683bda75e..da36c55b86d3 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -505,7 +505,7 @@ lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
jiffies_to_timeval(dur, &tv);
if (copy_to_user(&ent->rpe_peer, &nd->nd_id,
- sizeof(lnet_process_id_t)) ||
+ sizeof(struct lnet_process_id)) ||
copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
copy_to_user(&ent->rpe_state, &nd->nd_state,
sizeof(nd->nd_state)) ||
@@ -699,7 +699,7 @@ lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
}
static struct lnet_process_id_packed *
-lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
+lstcon_next_id(int idx, int nkiov, struct bio_vec *kiov)
{
struct lnet_process_id_packed *pid;
int i;
@@ -715,7 +715,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
static int
lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
- int dist, int span, int nkiov, lnet_kiov_t *kiov)
+ int dist, int span, int nkiov, struct bio_vec *kiov)
{
struct lnet_process_id_packed *pid;
struct lstcon_ndlink *ndl;
@@ -785,8 +785,7 @@ lstcon_bulkrpc_v0_prep(struct lst_test_bulk_param *param,
struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
- PAGE_SIZE;
+ brq->blk_npg = DIV_ROUND_UP(param->blk_size, PAGE_SIZE);
brq->blk_flags = param->blk_flags;
return 0;
@@ -833,11 +832,9 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
if (transop == LST_TRANS_TSBSRVADD) {
- int ndist = (sgrp->grp_nnode + test->tes_dist - 1) /
- test->tes_dist;
- int nspan = (dgrp->grp_nnode + test->tes_span - 1) /
- test->tes_span;
- int nmax = (ndist + nspan - 1) / nspan;
+ int ndist = DIV_ROUND_UP(sgrp->grp_nnode, test->tes_dist);
+ int nspan = DIV_ROUND_UP(dgrp->grp_nnode, test->tes_span);
+ int nmax = DIV_ROUND_UP(ndist, nspan);
trq->tsr_ndest = 0;
trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 4e7e5c862c64..d62c448ecf8a 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -65,7 +65,8 @@ lstcon_node_get(struct lstcon_node *nd)
}
static int
-lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
+lstcon_node_find(struct lnet_process_id id, struct lstcon_node **ndpp,
+ int create)
{
struct lstcon_ndlink *ndl;
unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
@@ -135,7 +136,7 @@ lstcon_node_put(struct lstcon_node *nd)
}
static int
-lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+lstcon_ndlink_find(struct list_head *hash, struct lnet_process_id id,
struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
@@ -283,7 +284,7 @@ lstcon_group_find(const char *name, struct lstcon_group **grpp)
}
static int
-lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
+lstcon_group_ndlink_find(struct lstcon_group *grp, struct lnet_process_id id,
struct lstcon_ndlink **ndlpp, int create)
{
int rc;
@@ -397,14 +398,14 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
static int
lstcon_group_nodes_add(struct lstcon_group *grp,
- int count, lnet_process_id_t __user *ids_up,
+ int count, struct lnet_process_id __user *ids_up,
unsigned int *featp,
struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_ndlink *ndl;
struct lstcon_group *tmp;
- lnet_process_id_t id;
+ struct lnet_process_id id;
int i;
int rc;
@@ -465,13 +466,13 @@ lstcon_group_nodes_add(struct lstcon_group *grp,
static int
lstcon_group_nodes_remove(struct lstcon_group *grp,
- int count, lnet_process_id_t __user *ids_up,
+ int count, struct lnet_process_id __user *ids_up,
struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_ndlink *ndl;
struct lstcon_group *tmp;
- lnet_process_id_t id;
+ struct lnet_process_id id;
int rc;
int i;
@@ -543,9 +544,8 @@ lstcon_group_add(char *name)
}
int
-lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
- unsigned int *featp,
- struct list_head __user *result_up)
+lstcon_nodes_add(char *name, int count, struct lnet_process_id __user *ids_up,
+ unsigned int *featp, struct list_head __user *result_up)
{
struct lstcon_group *grp;
int rc;
@@ -650,7 +650,8 @@ lstcon_group_clean(char *name, int args)
}
int
-lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
+lstcon_nodes_remove(char *name, int count,
+ struct lnet_process_id __user *ids_up,
struct list_head __user *result_up)
{
struct lstcon_group *grp = NULL;
@@ -1469,14 +1470,14 @@ lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
struct sfw_counters __user *sfwk_stat;
struct srpc_counters __user *srpc_stat;
- lnet_counters_t __user *lnet_stat;
+ struct lnet_counters __user *lnet_stat;
if (rep->str_status)
return 0;
sfwk_stat = (struct sfw_counters __user *)&ent_up->rpe_payload[0];
srpc_stat = (struct srpc_counters __user *)(sfwk_stat + 1);
- lnet_stat = (lnet_counters_t __user *)(srpc_stat + 1);
+ lnet_stat = (struct lnet_counters __user *)(srpc_stat + 1);
if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
@@ -1533,12 +1534,12 @@ lstcon_group_stat(char *grp_name, int timeout,
}
int
-lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up,
int timeout, struct list_head __user *result_up)
{
struct lstcon_ndlink *ndl;
struct lstcon_group *tmp;
- lnet_process_id_t id;
+ struct lnet_process_id id;
int i;
int rc;
@@ -1644,11 +1645,11 @@ lstcon_group_debug(int timeout, char *name,
}
int
-lstcon_nodes_debug(int timeout,
- int count, lnet_process_id_t __user *ids_up,
+lstcon_nodes_debug(int timeout, int count,
+ struct lnet_process_id __user *ids_up,
struct list_head __user *result_up)
{
- lnet_process_id_t id;
+ struct lnet_process_id id;
struct lstcon_ndlink *ndl;
struct lstcon_group *grp;
int i;
@@ -1697,7 +1698,7 @@ lstcon_session_match(struct lst_sid sid)
static void
lstcon_new_session_id(struct lst_sid *sid)
{
- lnet_process_id_t id;
+ struct lnet_process_id id;
LASSERT(console_session.ses_state == LST_SESSION_NONE);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 05b4b7013d2e..e3e11aa52526 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -48,7 +48,7 @@
/* node descriptor */
struct lstcon_node {
- lnet_process_id_t nd_id; /* id of the node */
+ struct lnet_process_id nd_id; /* id of the node */
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
@@ -180,7 +180,7 @@ lstcon_trans_stat(void)
}
static inline struct list_head *
-lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
+lstcon_id2hash(struct lnet_process_id id, struct list_head *hash)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
@@ -203,15 +203,17 @@ int lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up);
int lstcon_group_debug(int timeout, char *name,
struct list_head __user *result_up);
-int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t __user *nds_up,
+int lstcon_nodes_debug(int timeout, int nnd,
+ struct lnet_process_id __user *nds_up,
struct list_head __user *result_up);
int lstcon_group_add(char *name);
int lstcon_group_del(char *name);
int lstcon_group_clean(char *name, int args);
int lstcon_group_refresh(char *name, struct list_head __user *result_up);
-int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
+int lstcon_nodes_add(char *name, int nnd, struct lnet_process_id __user *nds_up,
unsigned int *featp, struct list_head __user *result_up);
-int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
+int lstcon_nodes_remove(char *name, int nnd,
+ struct lnet_process_id __user *nds_up,
struct list_head __user *result_up);
int lstcon_group_info(char *name, struct lstcon_ndlist_ent __user *gent_up,
int *index_p, int *ndent_p,
@@ -232,7 +234,7 @@ int lstcon_batch_info(char *name, struct lstcon_test_batch_ent __user *ent_up,
int *ndent_p, struct lstcon_node_ent __user *dents_up);
int lstcon_group_stat(char *grp_name, int timeout,
struct list_head __user *result_up);
-int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+int lstcon_nodes_stat(int count, struct lnet_process_id __user *ids_up,
int timeout, struct list_head __user *result_up);
int lstcon_test_add(char *batch_name, int type, int loop,
int concur, int dist, int span,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 9dd4e1a70329..ef27bfffc230 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -899,7 +899,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc)
}
int
-sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
+sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer,
unsigned int features, int nblk, int blklen,
struct srpc_client_rpc **rpcpp)
{
@@ -1379,7 +1379,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
}
struct srpc_client_rpc *
-sfw_create_rpc(lnet_process_id_t peer, int service,
+sfw_create_rpc(struct lnet_process_id peer, int service,
unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv)
{
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index b9601b00a273..9653ac6fd619 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -82,7 +82,7 @@ ping_client_fini(struct sfw_test_instance *tsi)
}
static int
-ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
+ping_client_prep_rpc(struct sfw_test_unit *tsu, struct lnet_process_id dest,
struct srpc_client_rpc **rpc)
{
struct srpc_ping_reqst *req;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 87fe366f8f70..77c222cca230 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -53,7 +53,7 @@ enum srpc_state {
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
+ struct lnet_handle_eq rpc_lnet_eq; /* _the_ LNet event queue */
enum srpc_state rpc_state;
struct srpc_counters rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
@@ -185,7 +185,7 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc,
rpc->srpc_reqstbuf = buffer;
rpc->srpc_peer = buffer->buf_peer;
rpc->srpc_self = buffer->buf_self;
- LNetInvalidateHandle(&rpc->srpc_replymdh);
+ LNetInvalidateMDHandle(&rpc->srpc_replymdh);
}
static void
@@ -355,12 +355,12 @@ srpc_remove_service(struct srpc_service *sv)
static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
- int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, struct srpc_event *ev)
+ int len, int options, struct lnet_process_id peer,
+ struct lnet_handle_md *mdh, struct srpc_event *ev)
{
int rc;
- lnet_md_t md;
- lnet_handle_me_t meh;
+ struct lnet_md md;
+ struct lnet_handle_me meh;
rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
@@ -394,11 +394,12 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
- int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, struct srpc_event *ev)
+ int options, struct lnet_process_id peer,
+ lnet_nid_t self, struct lnet_handle_md *mdh,
+ struct srpc_event *ev)
{
int rc;
- lnet_md_t md;
+ struct lnet_md md;
md.user_ptr = ev;
md.start = buf;
@@ -448,9 +449,9 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, struct srpc_event *ev)
+ struct lnet_handle_md *mdh, struct srpc_event *ev)
{
- lnet_process_id_t any = { 0 };
+ struct lnet_process_id any = { 0 };
any.nid = LNET_NID_ANY;
any.pid = LNET_PID_ANY;
@@ -468,7 +469,7 @@ __must_hold(&scd->scd_lock)
struct srpc_msg *msg = &buf->buf_msg;
int rc;
- LNetInvalidateHandle(&buf->buf_mdh);
+ LNetInvalidateMDHandle(&buf->buf_mdh);
list_add(&buf->buf_list, &scd->scd_buf_posted);
scd->scd_buf_nposted++;
spin_unlock(&scd->scd_lock);
@@ -1310,7 +1311,7 @@ abort:
}
struct srpc_client_rpc *
-srpc_create_client_rpc(lnet_process_id_t peer, int service,
+srpc_create_client_rpc(struct lnet_process_id peer, int service,
int nbulkiov, int bulklen,
void (*rpc_done)(struct srpc_client_rpc *),
void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
@@ -1408,7 +1409,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
/* when in kernel always called with LNET_LOCK() held, and in thread context */
static void
-srpc_lnet_ev_handler(lnet_event_t *ev)
+srpc_lnet_ev_handler(struct lnet_event *ev)
{
struct srpc_service_cd *scd;
struct srpc_event *rpcev = ev->md.user_ptr;
@@ -1622,7 +1623,7 @@ srpc_startup(void)
srpc_data.rpc_state = SRPC_STATE_NI_INIT;
- LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
+ LNetInvalidateEQHandle(&srpc_data.rpc_lnet_eq);
rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
if (rc) {
CERROR("LNetEQAlloc() has failed: %d\n", rc);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 418c9c96abe6..a765537a79c4 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -163,7 +163,7 @@ struct srpc_stat_reply {
struct lst_sid str_sid;
struct sfw_counters str_fw;
struct srpc_counters str_rpc;
- lnet_counters_t str_lnet;
+ struct lnet_counters str_lnet;
} WIRE_ATTR;
struct test_bulk_req {
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index f25948087ee0..b614e6f23a70 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -144,7 +144,7 @@ enum srpc_event_type {
/* RPC event */
struct srpc_event {
enum srpc_event_type ev_type; /* what's up */
- lnet_event_kind_t ev_lnet; /* LNet event type */
+ enum lnet_event_kind ev_lnet; /* LNet event type */
int ev_fired; /* LNet event fired? */
int ev_status; /* LNet event status */
void *ev_data; /* owning server/client RPC */
@@ -153,19 +153,19 @@ struct srpc_event {
/* bulk descriptor */
struct srpc_bulk {
int bk_len; /* len of bulk data */
- lnet_handle_md_t bk_mdh;
+ struct lnet_handle_md bk_mdh;
int bk_sink; /* sink/source */
int bk_niov; /* # iov in bk_iovs */
- lnet_kiov_t bk_iovs[0];
+ struct bio_vec bk_iovs[0];
};
/* message buffer descriptor */
struct srpc_buffer {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
struct srpc_msg buf_msg;
- lnet_handle_md_t buf_mdh;
+ struct lnet_handle_md buf_mdh;
lnet_nid_t buf_self;
- lnet_process_id_t buf_peer;
+ struct lnet_process_id buf_peer;
};
struct swi_workitem;
@@ -186,9 +186,9 @@ struct srpc_server_rpc {
struct swi_workitem srpc_wi;
struct srpc_event srpc_ev; /* bulk/reply event */
lnet_nid_t srpc_self;
- lnet_process_id_t srpc_peer;
+ struct lnet_process_id srpc_peer;
struct srpc_msg srpc_replymsg;
- lnet_handle_md_t srpc_replymdh;
+ struct lnet_handle_md srpc_replymdh;
struct srpc_buffer *srpc_reqstbuf;
struct srpc_bulk *srpc_bulk;
@@ -206,7 +206,7 @@ struct srpc_client_rpc {
int crpc_timeout; /* # seconds to wait for reply */
struct stt_timer crpc_timer;
struct swi_workitem crpc_wi;
- lnet_process_id_t crpc_dest;
+ struct lnet_process_id crpc_dest;
void (*crpc_done)(struct srpc_client_rpc *);
void (*crpc_fini)(struct srpc_client_rpc *);
@@ -225,8 +225,8 @@ struct srpc_client_rpc {
/* bulk, request(reqst), and reply exchanged on wire */
struct srpc_msg crpc_reqstmsg;
struct srpc_msg crpc_replymsg;
- lnet_handle_md_t crpc_reqstmdh;
- lnet_handle_md_t crpc_replymdh;
+ struct lnet_handle_md crpc_reqstmdh;
+ struct lnet_handle_md crpc_replymdh;
struct srpc_bulk crpc_bulk;
};
@@ -355,7 +355,7 @@ struct sfw_test_client_ops {
* client
*/
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
- lnet_process_id_t dest,
+ struct lnet_process_id dest,
struct srpc_client_rpc **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
struct srpc_client_rpc *rpc); /* done a test rpc */
@@ -392,8 +392,8 @@ struct sfw_test_instance {
};
/*
- * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
- * pages are not used
+ * XXX: trailing (PAGE_SIZE % sizeof(struct lnet_process_id)) bytes at the end
+ * of pages are not used
*/
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(struct lnet_process_id_packed))
@@ -402,7 +402,7 @@ struct sfw_test_instance {
struct sfw_test_unit {
struct list_head tsu_list; /* chain on lst_test_instance */
- lnet_process_id_t tsu_dest; /* id of dest node */
+ struct lnet_process_id tsu_dest; /* id of dest node */
int tsu_loop; /* loop count of the test */
struct sfw_test_instance *tsu_instance; /* pointer to test instance */
void *tsu_private; /* private data */
@@ -416,11 +416,11 @@ struct sfw_test_case {
};
struct srpc_client_rpc *
-sfw_create_rpc(lnet_process_id_t peer, int service,
+sfw_create_rpc(struct lnet_process_id peer, int service,
unsigned int features, int nbulkiov, int bulklen,
void (*done)(struct srpc_client_rpc *), void *priv);
int sfw_create_test_rpc(struct sfw_test_unit *tsu,
- lnet_process_id_t peer, unsigned int features,
+ struct lnet_process_id peer, unsigned int features,
int nblk, int blklen, struct srpc_client_rpc **rpc);
void sfw_abort_rpc(struct srpc_client_rpc *rpc);
void sfw_post_rpc(struct srpc_client_rpc *rpc);
@@ -434,7 +434,7 @@ int sfw_make_session(struct srpc_mksn_reqst *request,
struct srpc_mksn_reply *reply);
struct srpc_client_rpc *
-srpc_create_client_rpc(lnet_process_id_t peer, int service,
+srpc_create_client_rpc(struct lnet_process_id peer, int service,
int nbulkiov, int bulklen,
void (*rpc_done)(struct srpc_client_rpc *),
void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
@@ -522,7 +522,7 @@ srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
}
static inline void
-srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
+srpc_init_client_rpc(struct srpc_client_rpc *rpc, struct lnet_process_id peer,
int service, int nbulkiov, int bulklen,
void (*rpc_done)(struct srpc_client_rpc *),
void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
@@ -545,9 +545,9 @@ srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
rpc->crpc_bulk.bk_niov = nbulkiov;
rpc->crpc_done = rpc_done;
rpc->crpc_fini = rpc_fini;
- LNetInvalidateHandle(&rpc->crpc_reqstmdh);
- LNetInvalidateHandle(&rpc->crpc_replymdh);
- LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
+ LNetInvalidateMDHandle(&rpc->crpc_reqstmdh);
+ LNetInvalidateMDHandle(&rpc->crpc_replymdh);
+ LNetInvalidateMDHandle(&rpc->crpc_bulk.bk_mdh);
/* no event is expected at this point */
rpc->crpc_bulkev.ev_fired = 1;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index e4c0c440f01b..2bc3ee51b069 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1640,9 +1640,14 @@ enum cl_enq_flags {
*/
CEF_PEEK = 0x00000040,
/**
+ * Lock match only. Used by group lock in I/O as group lock
+ * is known to exist.
+ */
+ CEF_LOCK_MATCH = BIT(7),
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000007f,
+ CEF_MASK = 0x000000ff,
};
/**
@@ -2432,9 +2437,9 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
* @{
*/
-struct lu_env *cl_env_get(int *refcheck);
-struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
-void cl_env_put(struct lu_env *env, int *refcheck);
+struct lu_env *cl_env_get(u16 *refcheck);
+struct lu_env *cl_env_alloc(u16 *refcheck, __u32 tags);
+void cl_env_put(struct lu_env *env, u16 *refcheck);
unsigned int cl_env_cache_purge(unsigned int nr);
struct lu_env *cl_env_percpu_get(void);
void cl_env_percpu_put(struct lu_env *env);
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 62753dae0bfa..242abb881766 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -374,94 +374,15 @@ int lprocfs_write_frac_helper(const char __user *buffer,
unsigned long count, int *val, int mult);
int lprocfs_read_frac_helper(char *buffer, unsigned long count,
long val, int mult);
-int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
-/**
- * Lock statistics structure for access, possibly only on this CPU.
- *
- * The statistics struct may be allocated with per-CPU structures for
- * efficient concurrent update (usually only on server-wide stats), or
- * as a single global struct (e.g. for per-client or per-job statistics),
- * so the required locking depends on the type of structure allocated.
- *
- * For per-CPU statistics, pin the thread to the current cpuid so that
- * will only access the statistics for that CPU. If the stats structure
- * for the current CPU has not been allocated (or previously freed),
- * allocate it now. The per-CPU statistics do not need locking since
- * the thread is pinned to the CPU during update.
- *
- * For global statistics, lock the stats structure to prevent concurrent update.
- *
- * \param[in] stats statistics structure to lock
- * \param[in] opc type of operation:
- * LPROCFS_GET_SMP_ID: "lock" and return current CPU index
- * for incrementing statistics for that CPU
- * LPROCFS_GET_NUM_CPU: "lock" and return number of used
- * CPU indices to iterate over all indices
- * \param[out] flags CPU interrupt saved state for IRQ-safe locking
- *
- * \retval cpuid of current thread or number of allocated structs
- * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
- */
-static inline int lprocfs_stats_lock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags)
-{
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_lock_irqsave(&stats->ls_lock, *flags);
- else
- spin_lock(&stats->ls_lock);
- return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
- }
-
- switch (opc) {
- case LPROCFS_GET_SMP_ID: {
- unsigned int cpuid = get_cpu();
-
- if (unlikely(!stats->ls_percpu[cpuid])) {
- int rc = lprocfs_stats_alloc_one(stats, cpuid);
-
- if (rc < 0) {
- put_cpu();
- return rc;
- }
- }
- return cpuid;
- }
- case LPROCFS_GET_NUM_CPU:
- return stats->ls_biggest_alloc_num;
- default:
- LBUG();
- }
-}
-
-/**
- * Unlock statistics structure after access.
- *
- * Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
- * or unpin this thread from the current cpuid for per-CPU statistics.
- *
- * This function must be called using the same arguments as used when calling
- * lprocfs_stats_lock() so that the correct operation can be performed.
- *
- * \param[in] stats statistics structure to unlock
- * \param[in] opc type of operation (current cpuid or number of structs)
- * \param[in] flags CPU interrupt saved state for IRQ-safe locking
- */
-static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats,
- enum lprocfs_stats_lock_ops opc,
- unsigned long *flags)
-{
- if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- spin_unlock_irqrestore(&stats->ls_lock, *flags);
- else
- spin_unlock(&stats->ls_lock);
- } else if (opc == LPROCFS_GET_SMP_ID) {
- put_cpu();
- }
-}
+int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
+ unsigned int cpuid);
+int lprocfs_stats_lock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
+ unsigned long *flags);
+void lprocfs_stats_unlock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
+ unsigned long *flags);
static inline unsigned int
lprocfs_stats_counter_size(struct lprocfs_stats *stats)
@@ -513,29 +434,8 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
struct lprocfs_counter_header *header,
enum lprocfs_stats_flags flags,
enum lprocfs_fields_flags field);
-static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
- int idx,
- enum lprocfs_fields_flags field)
-{
- unsigned int i;
- unsigned int num_cpu;
- unsigned long flags = 0;
- __u64 ret = 0;
-
- LASSERT(stats);
-
- num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
- for (i = 0; i < num_cpu; i++) {
- if (!stats->ls_percpu[i])
- continue;
- ret += lprocfs_read_helper(
- lprocfs_stats_counter_get(stats, i, idx),
- &stats->ls_cnt_header[idx], stats->ls_flags,
- field);
- }
- lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
- return ret;
-}
+__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
+ enum lprocfs_fields_flags field);
extern struct lprocfs_stats *
lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 7a4f412a85a3..73ecc232967b 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -147,9 +147,9 @@ struct lu_device_operations {
struct lu_device *);
/**
- * initialize local objects for device. this method called after layer has
- * been initialized (after LCFG_SETUP stage) and before it starts serving
- * user requests.
+ * initialize local objects for device. this method called after layer
+ * has been initialized (after LCFG_SETUP stage) and before it starts
+ * serving user requests.
*/
int (*ldo_prepare)(const struct lu_env *,
@@ -791,7 +791,7 @@ int lu_cdebug_printer(const struct lu_env *env,
#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
do { \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
CDEBUG(mask, format "\n", ## __VA_ARGS__); \
} \
@@ -803,7 +803,7 @@ do { \
#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
do { \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
(object)->lo_header); \
lu_cdebug_printer(env, &msgdata, "\n"); \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 60b827eeefe2..df48b8d6fe52 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -846,10 +846,10 @@ struct luda_type {
#endif
struct lu_dirpage {
- __u64 ldp_hash_start;
- __u64 ldp_hash_end;
- __u32 ldp_flags;
- __u32 ldp_pad0;
+ __le64 ldp_hash_start;
+ __le64 ldp_hash_end;
+ __le32 ldp_flags;
+ __le32 ldp_pad0;
struct lu_dirent ldp_entries[0];
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index b7e61d082e55..1e86fb53388a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -812,13 +812,6 @@ struct ldlm_lock {
/** referenced export object */
struct obd_export *l_exp_refs_target;
#endif
- /**
- * export blocking dlm lock list, protected by
- * l_export->exp_bl_list_lock.
- * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
- * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
- */
- struct list_head l_exp_list;
};
/**
@@ -1192,6 +1185,10 @@ ldlm_namespace_new(struct obd_device *obd, char *name,
enum ldlm_side client, enum ldlm_appetite apt,
enum ldlm_ns_type ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
+void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
+ struct obd_import *imp,
+ int force);
+void ldlm_namespace_free_post(struct ldlm_namespace *ns);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
int ldlm_debugfs_setup(void);
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index a0f064d237c9..11331ae81d58 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -121,6 +121,9 @@
#define ldlm_set_test_lock(_l) LDLM_SET_FLAG((_l), 1ULL << 19)
#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
+/** match lock only */
+#define LDLM_FL_MATCH_LOCK 0x0000000000100000ULL /* bit 20 */
+
/**
* Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
deleted file mode 100644
index 1e71a8638186..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/lustre/include/lustre_idmap.h
- *
- * MDS data structures.
- * See also lustre_idl.h for wire formats of requests.
- */
-
-#ifndef _LUSTRE_EACL_H
-#define _LUSTRE_EACL_H
-
-/** \defgroup eacl eacl
- *
- * @{
- */
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include <linux/fs.h>
-#include <linux/posix_acl_xattr.h>
-
-typedef struct {
- __u16 e_tag;
- __u16 e_perm;
- __u32 e_id;
- __u32 e_stat;
-} ext_acl_xattr_entry;
-
-typedef struct {
- __u32 a_count;
- ext_acl_xattr_entry a_entries[0];
-} ext_acl_xattr_header;
-
-#define CFS_ACL_XATTR_SIZE(count, prefix) \
- (sizeof(prefix ## _header) + (count) * sizeof(prefix ## _entry))
-
-#define CFS_ACL_XATTR_COUNT(size, prefix) \
- (((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
-
-#endif /* CONFIG_FS_POSIX_ACL */
-
-/** @} eacl */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 1b48df0d4862..d61b000dac2c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -288,7 +288,7 @@ struct ptlrpc_connection {
/** Our own lnet nid for this connection */
lnet_nid_t c_self;
/** Remote side nid for this connection */
- lnet_process_id_t c_peer;
+ struct lnet_process_id c_peer;
/** UUID of the other side */
struct obd_uuid c_remote_uuid;
/** reference counter for this connection */
@@ -400,7 +400,7 @@ struct ptlrpc_service;
* ptlrpc callback & work item stuff
*/
struct ptlrpc_cb_id {
- void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
+ void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */
void *cbid_arg; /* additional arg */
};
@@ -457,7 +457,7 @@ struct ptlrpc_reply_state {
struct obd_export *rs_export;
struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
+ struct lnet_handle_md rs_md_h;
/** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
@@ -586,11 +586,11 @@ struct ptlrpc_cli_req {
/** Link back to the request set */
struct ptlrpc_request_set *cr_set;
/** outgoing request MD handle */
- lnet_handle_md_t cr_req_md_h;
+ struct lnet_handle_md cr_req_md_h;
/** request-out callback parameter */
struct ptlrpc_cb_id cr_req_cbid;
/** incoming reply MD handle */
- lnet_handle_md_t cr_reply_md_h;
+ struct lnet_handle_md cr_reply_md_h;
wait_queue_head_t cr_reply_waitq;
/** reply callback parameter */
struct ptlrpc_cb_id cr_reply_cbid;
@@ -876,7 +876,7 @@ struct ptlrpc_request {
/** our LNet NID */
lnet_nid_t rq_self;
/** Peer description (the other side) */
- lnet_process_id_t rq_peer;
+ struct lnet_process_id rq_peer;
/**
* service time estimate (secs)
* If the request is not served by this time, it is marked as timed out.
@@ -1225,7 +1225,7 @@ struct ptlrpc_bulk_desc {
int bd_md_count; /* # valid entries in bd_mds */
int bd_md_max_brw; /* max entries in bd_mds */
/** array of associated MDs */
- lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
+ struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
union {
struct {
@@ -1376,7 +1376,7 @@ struct ptlrpc_request_buffer_desc {
/** Back pointer to service for which this buffer is registered */
struct ptlrpc_service_part *rqbd_svcpt;
/** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
+ struct lnet_handle_md rqbd_md_h;
int rqbd_refcount;
/** The buffer itself */
char *rqbd_buffer;
@@ -1749,23 +1749,23 @@ static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
/** @} nrs */
/* ptlrpc/events.c */
-extern lnet_handle_eq_t ptlrpc_eq_h;
+extern struct lnet_handle_eq ptlrpc_eq_h;
int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self);
+ struct lnet_process_id *peer, lnet_nid_t *self);
/**
* These callbacks are invoked by LNet when something happened to
* underlying buffer
* @{
*/
-void request_out_callback(lnet_event_t *ev);
-void reply_in_callback(lnet_event_t *ev);
-void client_bulk_callback(lnet_event_t *ev);
-void request_in_callback(lnet_event_t *ev);
-void reply_out_callback(lnet_event_t *ev);
+void request_out_callback(struct lnet_event *ev);
+void reply_in_callback(struct lnet_event *ev);
+void client_bulk_callback(struct lnet_event *ev);
+void request_in_callback(struct lnet_event *ev);
+void reply_out_callback(struct lnet_event *ev);
/** @} */
/* ptlrpc/connection.c */
-struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
+struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer,
lnet_nid_t self,
struct obd_uuid *uuid);
int ptlrpc_connection_put(struct ptlrpc_connection *c);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index dace6591a0a4..33304041bb63 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -318,6 +318,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
#define OBD_FAIL_LDLM_OST_LVB 0x31c
#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
+#define OBD_FAIL_LDLM_PAUSE_CANCEL2 0x31f
#define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320
#define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321
#define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index e134ecd21bb2..e1069021420d 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -101,11 +101,6 @@ static inline int node_equal(struct interval_node *n1, struct interval_node *n2)
return extent_equal(&n1->in_extent, &n2->in_extent);
}
-static inline __u64 max_u64(__u64 x, __u64 y)
-{
- return x > y ? x : y;
-}
-
static struct interval_node *interval_first(struct interval_node *node)
{
if (!node)
@@ -134,8 +129,8 @@ static void __rotate_change_maxhigh(struct interval_node *node,
rotate->in_max_high = node->in_max_high;
left_max = node->in_left ? node->in_left->in_max_high : 0;
right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max_u64(interval_high(node),
- max_u64(left_max, right_max));
+ node->in_max_high = max(interval_high(node),
+ max(left_max, right_max));
}
/* The left rotation "pivots" around the link from node to node->right, and
@@ -394,8 +389,8 @@ static void update_maxhigh(struct interval_node *node,
while (node) {
left_max = node->in_left ? node->in_left->in_max_high : 0;
right_max = node->in_right ? node->in_right->in_max_high : 0;
- node->in_max_high = max_u64(interval_high(node),
- max_u64(left_max, right_max));
+ node->in_max_high = max(interval_high(node),
+ max(left_max, right_max));
if (node->in_max_high >= old_maxhigh)
break;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5c02501d0560..5d24b4825796 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -108,9 +108,7 @@ extern unsigned int ldlm_cancel_unused_locks_before_replay;
/* ldlm_resource.c */
int ldlm_resource_putref_locked(struct ldlm_resource *res);
-void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
- struct obd_import *imp, int force);
-void ldlm_namespace_free_post(struct ldlm_namespace *ns);
+
/* ldlm_lock.c */
struct ldlm_cb_set_arg {
@@ -156,6 +154,7 @@ int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
enum ldlm_cancel_flags cancel_flags);
+int ldlm_bl_thread_wakeup(void);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 5a94265fe60b..ddb46428093f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -435,7 +435,6 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
lock->l_exp_refs_nr = 0;
lock->l_exp_refs_target = NULL;
#endif
- INIT_LIST_HEAD(&lock->l_exp_list);
return lock;
}
@@ -771,19 +770,11 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
ldlm_lock_decref_internal_nolock(lock, mode);
- if (ldlm_is_local(lock) &&
+ if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock.
- */
- CDEBUG(D_INFO, "forcing cancel of local lock\n");
- ldlm_set_cbpending(lock);
- }
-
- if (!lock->l_readers && !lock->l_writers &&
- (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
- /* If we received a blocked AST and this was the last reference,
- * run the callback.
+ *
* Group locks are special:
* They must not go in LRU, but they are not called back
* like non-group locks, instead they are manually released.
@@ -791,6 +782,13 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
* they are manually released, so we remove them when they have
* no more reader or writer references. - LU-6368
*/
+ ldlm_set_cbpending(lock);
+ }
+
+ if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
+ /* If we received a blocked AST and this was the last reference,
+ * run the callback.
+ */
LDLM_DEBUG(lock, "final decref done on cbpending lock");
LDLM_LOCK_GET(lock); /* dropped by bl thread */
@@ -1882,6 +1880,19 @@ out:
return rc;
}
+static bool is_bl_done(struct ldlm_lock *lock)
+{
+ bool bl_done = true;
+
+ if (!ldlm_is_bl_done(lock)) {
+ lock_res_and_lock(lock);
+ bl_done = ldlm_is_bl_done(lock);
+ unlock_res_and_lock(lock);
+ }
+
+ return bl_done;
+}
+
/**
* Helper function to call blocking AST for LDLM lock \a lock in a
* "cancelling" mode.
@@ -1899,8 +1910,20 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
} else {
LDLM_DEBUG(lock, "no blocking ast");
}
+ /* only canceller can set bl_done bit */
+ ldlm_set_bl_done(lock);
+ wake_up_all(&lock->l_waitq);
+ } else if (!ldlm_is_bl_done(lock)) {
+ struct l_wait_info lwi = { 0 };
+
+ /*
+ * The lock is guaranteed to have been canceled once
+ * returning from this function.
+ */
+ unlock_res_and_lock(lock);
+ l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
+ lock_res_and_lock(lock);
}
- ldlm_set_bl_done(lock);
}
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 12647af5a336..fff930fc3cff 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -454,6 +454,12 @@ int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
}
+int ldlm_bl_thread_wakeup(void)
+{
+ wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
+ return 0;
+}
+
/* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
static int ldlm_handle_setinfo(struct ptlrpc_request *req)
{
@@ -675,8 +681,11 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
-static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
+static int ldlm_bl_get_work(struct ldlm_bl_pool *blp,
+ struct ldlm_bl_work_item **p_blwi,
+ struct obd_export **p_exp)
{
+ int num_th = atomic_read(&blp->blp_num_threads);
struct ldlm_bl_work_item *blwi = NULL;
static unsigned int num_bl;
@@ -693,18 +702,18 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
blwi_entry);
if (blwi) {
- if (++num_bl >= atomic_read(&blp->blp_num_threads))
+ if (++num_bl >= num_th)
num_bl = 0;
list_del(&blwi->blwi_entry);
}
spin_unlock(&blp->blp_lock);
+ *p_blwi = blwi;
- return blwi;
+ return (*p_blwi || *p_exp) ? 1 : 0;
}
/* This only contains temporary data until the thread starts */
struct ldlm_bl_thread_data {
- char bltd_name[CFS_CURPROC_COMM_MAX];
struct ldlm_bl_pool *bltd_blp;
struct completion bltd_comp;
int bltd_num;
@@ -712,19 +721,32 @@ struct ldlm_bl_thread_data {
static int ldlm_bl_thread_main(void *arg);
-static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
+static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp, bool check_busy)
{
struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
struct task_struct *task;
init_completion(&bltd.bltd_comp);
- bltd.bltd_num = atomic_read(&blp->blp_num_threads);
- snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
- "ldlm_bl_%02d", bltd.bltd_num);
- task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
+
+ bltd.bltd_num = atomic_inc_return(&blp->blp_num_threads);
+ if (bltd.bltd_num >= blp->blp_max_threads) {
+ atomic_dec(&blp->blp_num_threads);
+ return 0;
+ }
+
+ LASSERTF(bltd.bltd_num > 0, "thread num:%d\n", bltd.bltd_num);
+ if (check_busy &&
+ atomic_read(&blp->blp_busy_threads) < (bltd.bltd_num - 1)) {
+ atomic_dec(&blp->blp_num_threads);
+ return 0;
+ }
+
+ task = kthread_run(ldlm_bl_thread_main, &bltd, "ldlm_bl_%02d",
+ bltd.bltd_num);
if (IS_ERR(task)) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
- atomic_read(&blp->blp_num_threads), PTR_ERR(task));
+ bltd.bltd_num, PTR_ERR(task));
+ atomic_dec(&blp->blp_num_threads);
return PTR_ERR(task);
}
wait_for_completion(&bltd.bltd_comp);
@@ -732,6 +754,64 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
return 0;
}
+/* Not fatal if racy and have a few too many threads */
+static int ldlm_bl_thread_need_create(struct ldlm_bl_pool *blp,
+ struct ldlm_bl_work_item *blwi)
+{
+ if (atomic_read(&blp->blp_num_threads) >= blp->blp_max_threads)
+ return 0;
+
+ if (atomic_read(&blp->blp_busy_threads) <
+ atomic_read(&blp->blp_num_threads))
+ return 0;
+
+ if (blwi && (!blwi->blwi_ns || blwi->blwi_mem_pressure))
+ return 0;
+
+ return 1;
+}
+
+static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
+ struct ldlm_bl_work_item *blwi)
+{
+ if (!blwi->blwi_ns)
+ /* added by ldlm_cleanup() */
+ return LDLM_ITER_STOP;
+
+ if (blwi->blwi_mem_pressure)
+ memory_pressure_set();
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
+
+ if (blwi->blwi_count) {
+ int count;
+
+ /*
+ * The special case when we cancel locks in lru
+ * asynchronously, we pass the list of locks here.
+ * Thus locks are marked LDLM_FL_CANCELING, but NOT
+ * canceled locally yet.
+ */
+ count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
+ blwi->blwi_count,
+ LCF_BL_AST);
+ ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
+ blwi->blwi_flags);
+ } else {
+ ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
+ blwi->blwi_lock);
+ }
+ if (blwi->blwi_mem_pressure)
+ memory_pressure_clr();
+
+ if (blwi->blwi_flags & LCF_ASYNC)
+ kfree(blwi);
+ else
+ complete(&blwi->blwi_comp);
+
+ return 0;
+}
+
/**
* Main blocking requests processing thread.
*
@@ -742,76 +822,40 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
static int ldlm_bl_thread_main(void *arg)
{
struct ldlm_bl_pool *blp;
+ struct ldlm_bl_thread_data *bltd = arg;
- {
- struct ldlm_bl_thread_data *bltd = arg;
-
- blp = bltd->bltd_blp;
-
- atomic_inc(&blp->blp_num_threads);
- atomic_inc(&blp->blp_busy_threads);
+ blp = bltd->bltd_blp;
- complete(&bltd->bltd_comp);
- /* cannot use bltd after this, it is only on caller's stack */
- }
+ complete(&bltd->bltd_comp);
+ /* cannot use bltd after this, it is only on caller's stack */
while (1) {
struct l_wait_info lwi = { 0 };
struct ldlm_bl_work_item *blwi = NULL;
- int busy;
-
- blwi = ldlm_bl_get_work(blp);
+ struct obd_export *exp = NULL;
+ int rc;
- if (!blwi) {
- atomic_dec(&blp->blp_busy_threads);
+ rc = ldlm_bl_get_work(blp, &blwi, &exp);
+ if (!rc)
l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)),
+ ldlm_bl_get_work(blp, &blwi,
+ &exp),
&lwi);
- busy = atomic_inc_return(&blp->blp_busy_threads);
- } else {
- busy = atomic_read(&blp->blp_busy_threads);
- }
-
- if (!blwi->blwi_ns)
- /* added by ldlm_cleanup() */
- break;
+ atomic_inc(&blp->blp_busy_threads);
- /* Not fatal if racy and have a few too many threads */
- if (unlikely(busy < blp->blp_max_threads &&
- busy >= atomic_read(&blp->blp_num_threads) &&
- !blwi->blwi_mem_pressure))
+ if (ldlm_bl_thread_need_create(blp, blwi))
/* discard the return value, we tried */
- ldlm_bl_thread_start(blp);
-
- if (blwi->blwi_mem_pressure)
- memory_pressure_set();
-
- if (blwi->blwi_count) {
- int count;
- /* The special case when we cancel locks in LRU
- * asynchronously, we pass the list of locks here.
- * Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet.
- */
- count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
- blwi->blwi_count,
- LCF_BL_AST);
- ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
- blwi->blwi_flags);
- } else {
- ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
- blwi->blwi_lock);
- }
- if (blwi->blwi_mem_pressure)
- memory_pressure_clr();
+ ldlm_bl_thread_start(blp, true);
- if (blwi->blwi_flags & LCF_ASYNC)
- kfree(blwi);
- else
- complete(&blwi->blwi_comp);
+ if (blwi)
+ rc = ldlm_bl_thread_blwi(blp, blwi);
+
+ atomic_dec(&blp->blp_busy_threads);
+
+ if (rc == LDLM_ITER_STOP)
+ break;
}
- atomic_dec(&blp->blp_busy_threads);
atomic_dec(&blp->blp_num_threads);
complete(&blp->blp_comp);
return 0;
@@ -991,7 +1035,7 @@ static int ldlm_setup(void)
}
for (i = 0; i < blp->blp_min_threads; i++) {
- rc = ldlm_bl_thread_start(blp);
+ rc = ldlm_bl_thread_start(blp, false);
if (rc < 0)
goto out;
}
@@ -1071,7 +1115,7 @@ int ldlm_init(void)
ldlm_lock_slab = kmem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0,
SLAB_HWCACHE_ALIGN |
- SLAB_DESTROY_BY_RCU, NULL);
+ SLAB_TYPESAFE_BY_RCU, NULL);
if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 8dfb3c8e6b7a..cf3fc5793377 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -900,8 +900,9 @@ static int ldlm_pools_recalc(enum ldlm_side client)
{
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL;
+ /* seconds of sleep if no active namespaces */
+ int time = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
int nr;
- int time = 50; /* seconds of sleep if no active namespaces */
/*
* Recalc at least ldlm_namespace_nr_read(client) namespaces.
@@ -974,6 +975,10 @@ static int ldlm_pools_recalc(enum ldlm_side client)
ldlm_namespace_put(ns);
}
}
+
+ /* Wake up the blocking threads from time to time. */
+ ldlm_bl_thread_wakeup();
+
return time;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index ebfda368b057..84eeaa552113 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -1029,13 +1029,23 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
struct ldlm_lock *lock;
LIST_HEAD(cancels);
- /* concurrent cancels on the same handle can happen */
- lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
+ lock = ldlm_handle2lock_long(lockh, 0);
if (!lock) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed");
return 0;
}
+ lock_res_and_lock(lock);
+ /* Lock is being canceled and the caller doesn't want to wait */
+ if (ldlm_is_canceling(lock) && (cancel_flags & LCF_ASYNC)) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_RELEASE(lock);
+ return 0;
+ }
+
+ ldlm_set_canceling(lock);
+ unlock_res_and_lock(lock);
+
rc = ldlm_cli_cancel_local(lock);
if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
LDLM_LOCK_RELEASE(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index d16f5e95ef0b..633f65b078eb 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -806,7 +806,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
unlock_res(res);
ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ rc = ldlm_cli_cancel(&lockh, LCF_LOCAL);
if (rc)
CERROR("ldlm_cli_cancel: %d\n", rc);
LDLM_LOCK_RELEASE(lock);
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 966f580e26fb..38f84662cf02 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -126,6 +126,7 @@ static int ll_ddelete(const struct dentry *de)
static int ll_d_init(struct dentry *de)
{
struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
+
if (unlikely(!lld))
return -ENOMEM;
lld->lld_invalid = 1;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 481c0d01d4c6..67c4b9cc6e75 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -116,13 +116,13 @@ static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
* If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
* swap layouts with.
*/
-static int ll_close_inode_openhandle(struct obd_export *md_exp,
+static int ll_close_inode_openhandle(struct inode *inode,
struct obd_client_handle *och,
- struct inode *inode,
enum mds_op_bias bias,
void *data)
{
const struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_export *md_exp = ll_i2mdexp(inode);
struct md_op_data *op_data;
struct ptlrpc_request *req = NULL;
int rc;
@@ -231,15 +231,13 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
/* There might be a race and this handle may already
* be closed.
*/
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- och, inode, 0, NULL);
+ rc = ll_close_inode_openhandle(inode, och, 0, NULL);
}
return rc;
}
-static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
- struct file *file)
+static int ll_md_close(struct inode *inode, struct file *file)
{
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_inode_info *lli = ll_i2info(inode);
@@ -270,8 +268,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
}
if (fd->fd_och) {
- rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0,
- NULL);
+ rc = ll_close_inode_openhandle(inode, fd->fd_och, 0, NULL);
fd->fd_och = NULL;
goto out;
}
@@ -296,7 +293,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
}
mutex_unlock(&lli->lli_och_mutex);
- if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
+ if (!md_lock_match(ll_i2mdexp(inode), flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode, &lockh))
rc = ll_md_real_close(inode, fd->fd_omode);
@@ -345,7 +342,7 @@ int ll_file_release(struct inode *inode, struct file *file)
lli->lli_async_rc = 0;
}
- rc = ll_md_close(sbi->ll_md_exp, inode, file);
+ rc = ll_md_close(inode, file);
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
libcfs_debug_dumplog();
@@ -835,7 +832,7 @@ out_close:
it.it_lock_mode = 0;
och->och_lease_handle.cookie = 0ULL;
}
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL);
+ rc2 = ll_close_inode_openhandle(inode, och, 0, NULL);
if (rc2 < 0)
CERROR("%s: error closing file "DFID": %d\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -901,8 +898,8 @@ static int ll_swap_layouts_close(struct obd_client_handle *och,
* NB: lease lock handle is released in mdc_close_layout_swap_pack()
* because we still need it to pack l_remote_handle to MDT.
*/
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
- MDS_CLOSE_LAYOUT_SWAP, inode2);
+ rc = ll_close_inode_openhandle(inode, och, MDS_CLOSE_LAYOUT_SWAP,
+ inode2);
och = NULL; /* freed in ll_close_inode_openhandle() */
@@ -937,8 +934,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
if (lease_broken)
*lease_broken = cancelled;
- return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- och, inode, 0, NULL);
+ return ll_close_inode_openhandle(inode, och, 0, NULL);
}
int ll_merge_attr(const struct lu_env *env, struct inode *inode)
@@ -1159,7 +1155,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct lu_env *env;
struct vvp_io_args *args;
ssize_t result;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -1183,7 +1179,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct lu_env *env;
struct vvp_io_args *args;
ssize_t result;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -1340,7 +1336,7 @@ static int ll_file_getstripe(struct inode *inode,
struct lov_user_md __user *lum)
{
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
int rc;
env = cl_env_get(&refcheck);
@@ -1494,8 +1490,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
- och, inode, 0, NULL);
+ rc = ll_close_inode_openhandle(inode, och, 0, NULL);
out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
@@ -1517,7 +1512,7 @@ static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
{
struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
int rc = 0;
/* Checks for fiemap flags */
@@ -1623,7 +1618,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
struct cl_object *obj = ll_i2info(inode)->lli_clob;
struct lu_env *env;
struct cl_io *io;
- int refcheck;
+ u16 refcheck;
int result;
/* If no file object initialized, we consider its version is 0. */
@@ -1668,7 +1663,7 @@ int ll_hsm_release(struct inode *inode)
struct obd_client_handle *och = NULL;
__u64 data_version = 0;
int rc;
- int refcheck;
+ u16 refcheck;
CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
ll_get_fsname(inode->i_sb, NULL, 0),
@@ -1698,8 +1693,8 @@ int ll_hsm_release(struct inode *inode)
* NB: lease lock handle is released in mdc_hsm_release_pack() because
* we still need it to pack l_remote_handle to MDT.
*/
- rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
- MDS_HSM_RELEASE, &data_version);
+ rc = ll_close_inode_openhandle(inode, och, MDS_HSM_RELEASE,
+ &data_version);
och = NULL;
out:
@@ -2324,7 +2319,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
struct cl_io *io;
struct cl_fsync_io *fio;
int result;
- int refcheck;
+ u16 refcheck;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
@@ -3272,7 +3267,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
struct cl_object *obj = lli->lli_clob;
struct lu_env *env;
int rc;
- int refcheck;
+ u16 refcheck;
if (!obj)
return 0;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 504498de536e..0143112e672d 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -138,7 +138,7 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
}
static int cl_io_get(struct inode *inode, struct lu_env **envout,
- struct cl_io **ioout, int *refcheck)
+ struct cl_io **ioout, u16 *refcheck)
{
struct lu_env *env;
struct cl_io *io;
@@ -178,7 +178,7 @@ int cl_glimpse_size0(struct inode *inode, int agl)
struct lu_env *env = NULL;
struct cl_io *io = NULL;
int result;
- int refcheck;
+ u16 refcheck;
result = cl_io_get(inode, &env, &io, &refcheck);
if (result > 0) {
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index f1036f477a51..8af611033e12 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -72,7 +72,7 @@
* mutex.
*/
struct lu_env *cl_inode_fini_env;
-int cl_inode_fini_refcheck;
+u16 cl_inode_fini_refcheck;
/**
* A mutex serializing calls to slp_inode_fini() under extreme memory
@@ -86,7 +86,7 @@ int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
struct lu_env *env;
struct cl_io *io;
int result;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -149,7 +149,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
}
};
int result = 0;
- int refcheck;
+ u16 refcheck;
LASSERT(md->body->mbo_valid & OBD_MD_FLID);
LASSERT(S_ISREG(inode->i_mode));
@@ -237,7 +237,7 @@ void cl_inode_fini(struct inode *inode)
struct lu_env *env;
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *clob = lli->lli_clob;
- int refcheck;
+ u16 refcheck;
int emergency;
if (clob) {
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index f0c132e2cf92..7f7f3f1648ef 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -124,7 +124,7 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
struct cl_lock *lock;
struct cl_lock_descr *descr;
__u32 enqflags;
- int refcheck;
+ u16 refcheck;
int rc;
env = cl_env_get(&refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 55f68acd85d1..d2a0fabd8a69 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -35,7 +35,6 @@
#include "../include/lustre_debug.h"
#include "../include/lustre_ver.h"
#include "../include/lustre_disk.h" /* for s2sbi */
-#include "../include/lustre_eacl.h"
#include "../include/lustre_linkea.h"
/* for struct cl_lock_descr and struct cl_io */
@@ -1330,7 +1329,7 @@ int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
unsigned int attr_flags);
extern struct lu_env *cl_inode_fini_env;
-extern int cl_inode_fini_refcheck;
+extern u16 cl_inode_fini_refcheck;
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
void cl_inode_fini(struct inode *inode);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index d483c44aafe5..ca5040c69217 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1454,17 +1454,17 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
/* We mark all of the fields "set" so MDS/OST does not re-set them */
if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = CURRENT_TIME;
+ attr->ia_ctime = current_time(inode);
attr->ia_valid |= ATTR_CTIME_SET;
}
if (!(attr->ia_valid & ATTR_ATIME_SET) &&
(attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = CURRENT_TIME;
+ attr->ia_atime = current_time(inode);
attr->ia_valid |= ATTR_ATIME_SET;
}
if (!(attr->ia_valid & ATTR_MTIME_SET) &&
(attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = CURRENT_TIME;
+ attr->ia_mtime = current_time(inode);
attr->ia_valid |= ATTR_MTIME_SET;
}
@@ -1486,8 +1486,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
goto out;
}
- op_data->op_attr = *attr;
-
if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
/*
* If we are changing file size, file content is
@@ -1495,8 +1493,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
*/
attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
op_data->op_bias |= MDS_DATA_MODIFIED;
+ clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
}
+ op_data->op_attr = *attr;
+
rc = ll_md_setattr(dentry, op_data);
if (rc)
goto out;
@@ -1542,8 +1543,15 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
int rc2;
rc2 = ll_hsm_state_set(inode, &hss);
+ /*
+ * truncate and write can happen at the same time, so that
+ * the file can be set modified even though the file is not
+ * restored from released state, and ll_hsm_state_set() is
+ * not applicable for the file, and rc2 < 0 is normal in this
+ * case.
+ */
if (rc2 < 0)
- CERROR(DFID "HSM set dirty failed: rc2 = %d\n",
+ CDEBUG(D_INFO, DFID "HSM set dirty failed: rc2 = %d\n",
PFID(ll_inode2fid(inode)), rc2);
}
@@ -2486,7 +2494,7 @@ no_kbuf:
void ll_compute_rootsquash_state(struct ll_sb_info *sbi)
{
struct root_squash_info *squash = &sbi->ll_squash;
- lnet_process_id_t id;
+ struct lnet_process_id id;
bool matched;
int i;
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 896196c74cd2..cbbfdaf127a7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -150,7 +150,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
struct cl_io *io;
struct vvp_io *vio;
int result;
- int refcheck;
+ u16 refcheck;
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
@@ -268,7 +268,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long ra_flags;
int result = 0;
int fault_ret = 0;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index f3ee584157e0..c742cba60199 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -386,7 +386,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
struct lu_env *env;
long diff = 0;
long nrpages = 0;
- int refcheck;
+ u16 refcheck;
long pages_number;
int mult;
long rc;
@@ -1308,7 +1308,7 @@ static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
r, pct(r, read_tot), pct(read_cum, read_tot),
w, pct(w, write_tot), pct(write_cum, write_tot));
start = end;
- if (start == 1 << 10) {
+ if (start == 1024) {
start = 1;
units += 10;
unitp++;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index fc176540bb95..d583696e8378 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -434,6 +434,7 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
{
if (inode) {
struct dentry *new = ll_find_alias(inode, de);
+
if (new) {
d_move(new, de);
iput(inode);
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c
index 14148a097476..161391b6fb36 100644
--- a/drivers/staging/lustre/lustre/llite/range_lock.c
+++ b/drivers/staging/lustre/lustre/llite/range_lock.c
@@ -174,7 +174,7 @@ void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
*/
static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
{
- struct range_lock *lock = (struct range_lock *)arg;
+ struct range_lock *lock = arg;
struct range_lock *overlap = node2rangelock(node);
lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 50d027e0cfab..1bac51f882a7 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -905,7 +905,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
bool redirtied = false;
bool unlocked = false;
int result;
- int refcheck;
+ u16 refcheck;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index d89e79599199..420f296f9658 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -156,32 +156,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL)
-static inline int ll_get_user_pages(int rw, unsigned long user_addr,
- size_t size, struct page ***pages,
- int *max_pages)
-{
- int result = -ENOMEM;
-
- /* set an arbitrary limit to prevent arithmetic overflow */
- if (size > MAX_DIRECTIO_SIZE) {
- *pages = NULL;
- return -EFBIG;
- }
-
- *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- *max_pages -= user_addr >> PAGE_SHIFT;
-
- *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
- if (*pages) {
- result = get_user_pages_fast(user_addr, *max_pages,
- (rw == READ), *pages);
- if (unlikely(result <= 0))
- kvfree(*pages);
- }
-
- return result;
-}
-
/* ll_free_user_pages - tear down page struct array
* @pages: array of page struct pointers underlying target buffer
*/
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 4759802e062d..56f4b10624ce 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -84,7 +84,7 @@ MODULE_ALIAS_FS("lustre");
static int __init lustre_init(void)
{
- lnet_process_id_t lnet_id;
+ struct lnet_process_id lnet_id;
struct timespec64 ts;
int i, rc, seed[2];
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index cd77b55d3895..60aac42e5344 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -129,6 +129,7 @@ static const char *ll_get_link(struct dentry *dentry,
struct ptlrpc_request *request = NULL;
int rc;
char *symname = NULL;
+
if (!dentry)
return ERR_PTR(-ECHILD);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 3669ea77ee93..6cb2db28eb60 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -313,7 +313,7 @@ int cl_sb_init(struct super_block *sb)
struct cl_device *cl;
struct lu_env *env;
int rc = 0;
- int refcheck;
+ u16 refcheck;
sbi = ll_s2sbi(sb);
env = cl_env_get(&refcheck);
@@ -336,7 +336,7 @@ int cl_sb_fini(struct super_block *sb)
struct ll_sb_info *sbi;
struct lu_env *env;
struct cl_device *cld;
- int refcheck;
+ u16 refcheck;
int result;
sbi = ll_s2sbi(sb);
@@ -535,7 +535,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
struct cl_object *clob;
struct lu_env *env;
struct vvp_pgcache_id id;
- int refcheck;
+ u16 refcheck;
int result;
env = cl_env_get(&refcheck);
@@ -584,7 +584,7 @@ static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
{
struct ll_sb_info *sbi;
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
sbi = f->private;
@@ -608,7 +608,7 @@ static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
{
struct ll_sb_info *sbi;
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 4c57755e06e7..aa31bc0a58a6 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -219,6 +219,7 @@ static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
descr->cld_mode = CLM_GROUP;
descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
+ enqflags |= CEF_LOCK_MATCH;
} else {
descr->cld_mode = mode;
}
@@ -449,6 +450,7 @@ static void vvp_io_advance(const struct lu_env *env,
{
struct cl_object *obj = ios->cis_io->ci_obj;
struct vvp_io *vio = cl2vvp_io(env, ios);
+
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
vio->vui_tot_count -= nob;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 421cc04ecf1e..6187bffec8c4 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -40,7 +40,6 @@
#include "../include/obd_support.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_ver.h"
-#include "../include/lustre_eacl.h"
#include "llite_internal.h"
@@ -427,7 +426,7 @@ static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
.cl_buf.lb_len = buf_size,
};
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
if (!obj)
return -ENODATA;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 271e18966f50..732595125d8a 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -373,7 +373,6 @@ static void lmv_del_target(struct lmv_obd *lmv, int index)
kfree(lmv->tgts[index]);
lmv->tgts[index] = NULL;
- return;
}
static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
@@ -640,7 +639,7 @@ static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
int remote_gf_size = 0;
int rc;
- gf = (struct getinfo_fid2path *)karg;
+ gf = karg;
tgt = lmv_find_target(lmv, &gf->gf_fid);
if (IS_ERR(tgt))
return PTR_ERR(tgt);
@@ -657,7 +656,7 @@ repeat_fid2path:
struct getinfo_fid2path *ori_gf;
char *ptr;
- ori_gf = (struct getinfo_fid2path *)karg;
+ ori_gf = karg;
if (strlen(ori_gf->gf_path) +
strlen(gf->gf_path) > ori_gf->gf_pathlen) {
rc = -EOVERFLOW;
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index ff458020b96a..bf25f887062d 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -91,7 +91,6 @@ static void *lmv_tgt_seq_start(struct seq_file *p, loff_t *pos)
static void lmv_tgt_seq_stop(struct seq_file *p, void *v)
{
- return;
}
static void *lmv_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index c49a34bf10e5..391c632365ae 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -118,7 +118,7 @@ struct lov_device_emerg {
*
* \see cl_env_get()
*/
- int emrg_refcheck;
+ u16 emrg_refcheck;
};
struct lov_device {
@@ -378,40 +378,39 @@ struct lov_thread_info {
* State that lov_io maintains for every sub-io.
*/
struct lov_io_sub {
- int sub_stripe;
- /**
- * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
- * independently, with lov acting as a scheduler to maximize overall
- * throughput.
- */
- struct cl_io *sub_io;
+ u16 sub_stripe;
/**
- * Linkage into a list (hanging off lov_io::lis_active) of all
- * sub-io's active for the current IO iteration.
+ * environment's refcheck.
+ *
+ * \see cl_env_get()
*/
- struct list_head sub_linkage;
+ u16 sub_refcheck;
+ u16 sub_reenter;
/**
* true, iff cl_io_init() was successfully executed against
* lov_io_sub::sub_io.
*/
- int sub_io_initialized;
+ u16 sub_io_initialized:1,
/**
* True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
* allocated, but borrowed from a per-device emergency pool.
*/
- int sub_borrowed;
+ sub_borrowed:1;
/**
- * environment, in which sub-io executes.
+ * Linkage into a list (hanging off lov_io::lis_active) of all
+ * sub-io's active for the current IO iteration.
*/
- struct lu_env *sub_env;
+ struct list_head sub_linkage;
/**
- * environment's refcheck.
- *
- * \see cl_env_get()
+ * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
+ * independently, with lov acting as a scheduler to maximize overall
+ * throughput.
+ */
+ struct cl_io *sub_io;
+ /**
+ * environment, in which sub-io executes.
*/
- int sub_refcheck;
- int sub_refcheck2;
- int sub_reenter;
+ struct lu_env *sub_env;
};
/**
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index e0f0756ee883..df77b2586612 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -424,21 +424,23 @@ static int lov_io_iter_init(const struct lu_env *env,
end = lov_offset_mod(end, 1);
sub = lov_sub_get(env, lio, stripe);
- if (!IS_ERR(sub)) {
- lov_io_sub_inherit(sub->sub_io, lio, stripe,
- start, end);
- rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
- lov_sub_put(sub);
- CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
- stripe, start, end);
- } else {
+ if (IS_ERR(sub)) {
rc = PTR_ERR(sub);
+ break;
}
- if (!rc)
- list_add_tail(&sub->sub_linkage, &lio->lis_active);
- else
+ lov_io_sub_inherit(sub->sub_io, lio, stripe, start, end);
+ rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
+ if (rc)
+ cl_io_iter_fini(sub->sub_env, sub->sub_io);
+ lov_sub_put(sub);
+ if (rc)
break;
+
+ CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
+ stripe, start, end);
+
+ list_add_tail(&sub->sub_linkage, &lio->lis_active);
}
return rc;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index b3161fb6d4b5..25f15da6e189 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -68,7 +68,6 @@ static void lov_getref(struct obd_device *obd)
mutex_lock(&lov->lov_lock);
atomic_inc(&lov->lov_refcount);
mutex_unlock(&lov->lov_lock);
- return;
}
static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 977579c9c519..ab3ecfeeadc8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -746,7 +746,7 @@ static int lov_layout_change(const struct lu_env *unused,
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
int rc;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 703cb67ce42e..08e55d418537 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -688,7 +688,7 @@ static inline struct cl_env *cl_env_container(struct lu_env *env)
*
* \see cl_env_put()
*/
-struct lu_env *cl_env_get(int *refcheck)
+struct lu_env *cl_env_get(u16 *refcheck)
{
struct lu_env *env;
@@ -709,7 +709,7 @@ EXPORT_SYMBOL(cl_env_get);
*
* \see cl_env_get()
*/
-struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
+struct lu_env *cl_env_alloc(u16 *refcheck, u32 tags)
{
struct lu_env *env;
@@ -769,7 +769,7 @@ EXPORT_SYMBOL(cl_env_cache_purge);
* this thread is using environment and it is returned to the allocation
* cache, or freed straight away, if cache is large enough.
*/
-void cl_env_put(struct lu_env *env, int *refcheck)
+void cl_env_put(struct lu_env *env, u16 *refcheck)
{
struct cl_env *cle;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index cd9a40ca4448..71fcc4cc9e72 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -482,6 +482,7 @@ void cl_page_disown0(const struct lu_env *env,
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
struct cl_io *top = cl_io_top((struct cl_io *)io);
+
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 2c99717b0aba..1ec6e3767d81 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -598,6 +598,93 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
}
EXPORT_SYMBOL(lprocfs_rd_conn_uuid);
+/**
+ * Lock statistics structure for access, possibly only on this CPU.
+ *
+ * The statistics struct may be allocated with per-CPU structures for
+ * efficient concurrent update (usually only on server-wide stats), or
+ * as a single global struct (e.g. for per-client or per-job statistics),
+ * so the required locking depends on the type of structure allocated.
+ *
+ * For per-CPU statistics, pin the thread to the current cpuid so that
+ * will only access the statistics for that CPU. If the stats structure
+ * for the current CPU has not been allocated (or previously freed),
+ * allocate it now. The per-CPU statistics do not need locking since
+ * the thread is pinned to the CPU during update.
+ *
+ * For global statistics, lock the stats structure to prevent concurrent update.
+ *
+ * \param[in] stats statistics structure to lock
+ * \param[in] opc type of operation:
+ * LPROCFS_GET_SMP_ID: "lock" and return current CPU index
+ * for incrementing statistics for that CPU
+ * LPROCFS_GET_NUM_CPU: "lock" and return number of used
+ * CPU indices to iterate over all indices
+ * \param[out] flags CPU interrupt saved state for IRQ-safe locking
+ *
+ * \retval cpuid of current thread or number of allocated structs
+ * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
+ */
+int lprocfs_stats_lock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
+ unsigned long *flags)
+{
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
+ }
+
+ switch (opc) {
+ case LPROCFS_GET_SMP_ID: {
+ unsigned int cpuid = get_cpu();
+
+ if (unlikely(!stats->ls_percpu[cpuid])) {
+ int rc = lprocfs_stats_alloc_one(stats, cpuid);
+
+ if (rc < 0) {
+ put_cpu();
+ return rc;
+ }
+ }
+ return cpuid;
+ }
+ case LPROCFS_GET_NUM_CPU:
+ return stats->ls_biggest_alloc_num;
+ default:
+ LBUG();
+ }
+}
+
+/**
+ * Unlock statistics structure after access.
+ *
+ * Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
+ * or unpin this thread from the current cpuid for per-CPU statistics.
+ *
+ * This function must be called using the same arguments as used when calling
+ * lprocfs_stats_lock() so that the correct operation can be performed.
+ *
+ * \param[in] stats statistics structure to unlock
+ * \param[in] opc type of operation (current cpuid or number of structs)
+ * \param[in] flags CPU interrupt saved state for IRQ-safe locking
+ */
+void lprocfs_stats_unlock(struct lprocfs_stats *stats,
+ enum lprocfs_stats_lock_ops opc,
+ unsigned long *flags)
+{
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_unlock_irqrestore(&stats->ls_lock, *flags);
+ else
+ spin_unlock(&stats->ls_lock);
+ } else if (opc == LPROCFS_GET_SMP_ID) {
+ put_cpu();
+ }
+}
+
/** add up per-cpu counters */
void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
struct lprocfs_counter *cnt)
@@ -1146,6 +1233,30 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
}
EXPORT_SYMBOL(lprocfs_free_stats);
+__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
+ enum lprocfs_fields_flags field)
+{
+ unsigned int i;
+ unsigned int num_cpu;
+ unsigned long flags = 0;
+ __u64 ret = 0;
+
+ LASSERT(stats);
+
+ num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ for (i = 0; i < num_cpu; i++) {
+ if (!stats->ls_percpu[i])
+ continue;
+ ret += lprocfs_read_helper(
+ lprocfs_stats_counter_get(stats, i, idx),
+ &stats->ls_cnt_header[idx], stats->ls_flags,
+ field);
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ return ret;
+}
+EXPORT_SYMBOL(lprocfs_stats_collector);
+
void lprocfs_clear_stats(struct lprocfs_stats *stats)
{
struct lprocfs_counter *percpu_cntr;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 9ca84c7d49de..6a7e7a7d2af1 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -1052,7 +1052,8 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
oldfs = get_fs();
set_fs(KERNEL_DS);
- rc = var->fops->write(&fakefile, sval,
+ rc = var->fops->write(&fakefile,
+ (const char __user *)sval,
vallen, NULL);
set_fs(oldfs);
}
@@ -1426,8 +1427,8 @@ int class_manual_cleanup(struct obd_device *obd)
lustre_cfg_bufs_reset(&bufs, obd->obd_name);
lustre_cfg_bufs_set_string(&bufs, 1, flags);
lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
- if (!lcfg)
- return -ENOMEM;
+ if (IS_ERR(lcfg))
+ return PTR_ERR(lcfg);
rc = class_process_config(lcfg);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 549076193bde..77b4c5504689 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -816,7 +816,7 @@ cl_echo_object_find(struct echo_device *d, const struct ost_id *oi)
struct echo_object *eco;
struct cl_object *obj;
struct lu_fid *fid;
- int refcheck;
+ u16 refcheck;
int rc;
LASSERTF(ostid_id(oi), DOSTID "\n", POSTID(oi));
@@ -882,7 +882,7 @@ static int cl_echo_object_put(struct echo_object *eco)
{
struct lu_env *env;
struct cl_object *obj = echo_obj2cl(eco);
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -999,7 +999,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct cl_page *clp;
struct lustre_handle lh = { 0 };
size_t page_size = cl_page_size(obj);
- int refcheck;
+ u16 refcheck;
int rc;
int i;
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 575b2969ad83..86f252d6adbd 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -229,7 +229,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
if (rc > 0) {
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 0490478393df..c5ccf568313a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -898,6 +898,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int offset = last_off & ~PAGE_MASK;
int count = last_count + (offset & (blocksize - 1));
int end = (offset + last_count) & (blocksize - 1);
+
if (end)
count += blocksize - end;
@@ -988,7 +989,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
int grants = 0;
int nr_pages = 0;
int rc = 0;
- int refcheck;
+ u16 refcheck;
LASSERT(sanity_check(ext) == 0);
EASSERT(ext->oe_state == OES_TRUNC, ext);
@@ -2790,7 +2791,6 @@ again:
* We have to wait for this extent because we can't
* truncate that page.
*/
- LASSERT(!ext->oe_hp);
OSC_EXTENT_DUMP(D_CACHE, ext,
"waiting for busy extent\n");
waiting = osc_extent_get(ext);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index c09ab97d64ae..270212f4e5cf 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -62,7 +62,9 @@ struct osc_io {
/** super class */
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
- unsigned int oi_lockless;
+ unsigned int oi_lockless:1,
+ /** true if this io is counted as active IO */
+ oi_is_active:1;
/** how many LRU pages are reserved for this IO */
unsigned long oi_lru_reserved;
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 8abd83f26716..845e795d8795 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -133,7 +133,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
long target, bool force);
-long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
+unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages);
+void osc_lru_unreserve(struct client_obd *cli, unsigned long npages);
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 0b4cc4283b05..cbab80092442 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -216,7 +216,7 @@ static int osc_io_submit(const struct lu_env *env,
struct cl_object *obj = ios->cis_obj;
cl_object_attr_lock(obj);
- attr->cat_mtime = LTIME_S(CURRENT_TIME);
+ attr->cat_mtime = ktime_get_real_seconds();
attr->cat_ctime = attr->cat_mtime;
cl_object_attr_update(env, obj, attr, CAT_MTIME | CAT_CTIME);
cl_object_attr_unlock(obj);
@@ -256,7 +256,7 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ attr->cat_ctime = ktime_get_real_seconds();
attr->cat_mtime = attr->cat_ctime;
valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
@@ -354,7 +354,10 @@ static int osc_io_iter_init(const struct lu_env *env,
spin_lock(&imp->imp_lock);
if (likely(!imp->imp_invalid)) {
+ struct osc_io *oio = osc_env_io(env);
+
atomic_inc(&osc->oo_nr_ios);
+ oio->oi_is_active = 1;
rc = 0;
}
spin_unlock(&imp->imp_lock);
@@ -368,10 +371,7 @@ static int osc_io_write_iter_init(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct osc_io *oio = osc_env_io(env);
struct osc_object *osc = cl2osc(ios->cis_obj);
- struct client_obd *cli = osc_cli(osc);
- unsigned long c;
unsigned long npages;
- unsigned long max_pages;
if (cl_io_is_append(io))
return osc_io_iter_init(env, ios);
@@ -380,31 +380,7 @@ static int osc_io_write_iter_init(const struct lu_env *env,
if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
++npages;
- max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
- if (npages > max_pages)
- npages = max_pages;
-
- c = atomic_long_read(cli->cl_lru_left);
- if (c < npages && osc_lru_reclaim(cli, npages) > 0)
- c = atomic_long_read(cli->cl_lru_left);
- while (c >= npages) {
- if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
- oio->oi_lru_reserved = npages;
- break;
- }
- c = atomic_long_read(cli->cl_lru_left);
- }
- if (atomic_long_read(cli->cl_lru_left) < max_pages) {
- /*
- * If there aren't enough pages in the per-OSC LRU then
- * wake up the LRU thread to try and clear out space, so
- * we don't block if pages are being dirtied quickly.
- */
- CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
- cli_name(cli), atomic_long_read(cli->cl_lru_left),
- max_pages);
- (void)ptlrpcd_queue_work(cli->cl_lru_work);
- }
+ oio->oi_lru_reserved = osc_lru_reserve(osc_cli(osc), npages);
return osc_io_iter_init(env, ios);
}
@@ -412,11 +388,16 @@ static int osc_io_write_iter_init(const struct lu_env *env,
static void osc_io_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct osc_io *oio = osc_env_io(env);
+
+ if (oio->oi_is_active) {
+ struct osc_object *osc = cl2osc(ios->cis_obj);
- LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
- if (atomic_dec_and_test(&osc->oo_nr_ios))
- wake_up_all(&osc->oo_io_waitq);
+ oio->oi_is_active = 0;
+ LASSERT(atomic_read(&osc->oo_nr_ios) > 0);
+ if (atomic_dec_and_test(&osc->oo_nr_ios))
+ wake_up_all(&osc->oo_io_waitq);
+ }
}
static void osc_io_write_iter_fini(const struct lu_env *env,
@@ -424,10 +405,9 @@ static void osc_io_write_iter_fini(const struct lu_env *env,
{
struct osc_io *oio = osc_env_io(env);
struct osc_object *osc = cl2osc(ios->cis_obj);
- struct client_obd *cli = osc_cli(osc);
if (oio->oi_lru_reserved > 0) {
- atomic_long_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ osc_lru_unreserve(osc_cli(osc), oio->oi_lru_reserved);
oio->oi_lru_reserved = 0;
}
oio->oi_write_osclock = NULL;
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 5f799a4c78f9..940c10c1d7a1 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -167,6 +167,8 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
result |= LDLM_FL_AST_DISCARD_DATA;
if (enqflags & CEF_PEEK)
result |= LDLM_FL_TEST_LOCK;
+ if (enqflags & CEF_LOCK_MATCH)
+ result |= LDLM_FL_MATCH_LOCK;
return result;
}
@@ -295,7 +297,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
int rc;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
/* should never happen, similar to osc_ldlm_blocking_ast(). */
@@ -347,7 +349,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
struct osc_object *osc = cookie;
struct ldlm_lock *dlmlock;
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
@@ -382,7 +384,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
enum cl_lock_mode mode, int discard)
{
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
int rc = 0;
int rc2 = 0;
@@ -536,7 +538,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
}
case LDLM_CB_CANCELING: {
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
/*
* This can be called in the context of outer IO, e.g.,
@@ -573,7 +575,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
struct req_capsule *cap;
struct cl_object *obj = NULL;
int result;
- int refcheck;
+ u16 refcheck;
LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
@@ -684,7 +686,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
struct osc_lock *oscl;
unsigned long weight;
bool found = false;
- int refcheck;
+ u16 refcheck;
might_sleep();
/*
@@ -838,13 +840,14 @@ static void osc_lock_wake_waiters(const struct lu_env *env,
spin_unlock(&oscl->ols_lock);
}
-static void osc_lock_enqueue_wait(const struct lu_env *env,
- struct osc_object *obj,
- struct osc_lock *oscl)
+static int osc_lock_enqueue_wait(const struct lu_env *env,
+ struct osc_object *obj,
+ struct osc_lock *oscl)
{
struct osc_lock *tmp_oscl;
struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
+ int rc = 0;
spin_lock(&obj->oo_ol_spin);
list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
@@ -881,13 +884,17 @@ restart:
spin_unlock(&tmp_oscl->ols_lock);
spin_unlock(&obj->oo_ol_spin);
- (void)cl_sync_io_wait(env, waiter, 0);
-
+ rc = cl_sync_io_wait(env, waiter, 0);
spin_lock(&obj->oo_ol_spin);
+ if (rc < 0)
+ break;
+
oscl->ols_owner = NULL;
goto restart;
}
spin_unlock(&obj->oo_ol_spin);
+
+ return rc;
}
/**
@@ -935,7 +942,9 @@ static int osc_lock_enqueue(const struct lu_env *env,
goto enqueue_base;
}
- osc_lock_enqueue_wait(env, osc, oscl);
+ result = osc_lock_enqueue_wait(env, osc, oscl);
+ if (result < 0)
+ goto out;
/* we can grant lockless lock right after all conflicting locks
* are canceled.
@@ -960,7 +969,6 @@ enqueue_base:
* osc_lock.
*/
ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
- osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
osc_lock_build_policy(env, lock, policy);
if (oscl->ols_agl) {
oscl->ols_einfo.ei_cbdata = NULL;
@@ -975,18 +983,7 @@ enqueue_base:
upcall, cookie,
&oscl->ols_einfo, PTLRPCD_SET, async,
oscl->ols_agl);
- if (result != 0) {
- oscl->ols_state = OLS_CANCELLED;
- osc_lock_wake_waiters(env, osc, oscl);
-
- /* hide error for AGL lock. */
- if (oscl->ols_agl) {
- cl_object_put(env, osc2cl(osc));
- result = 0;
- }
- if (anchor)
- cl_sync_io_note(env, anchor, result);
- } else {
+ if (!result) {
if (osc_lock_is_lockless(oscl)) {
oio->oi_lockless = 1;
} else if (!async) {
@@ -994,6 +991,18 @@ enqueue_base:
LASSERT(oscl->ols_hold);
LASSERT(oscl->ols_dlmlock);
}
+ } else if (oscl->ols_agl) {
+ cl_object_put(env, osc2cl(osc));
+ result = 0;
+ }
+
+out:
+ if (result < 0) {
+ oscl->ols_state = OLS_CANCELLED;
+ osc_lock_wake_waiters(env, osc, oscl);
+
+ if (anchor)
+ cl_sync_io_note(env, anchor, result);
}
return result;
}
@@ -1157,6 +1166,7 @@ int osc_lock_init(const struct lu_env *env,
oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
oscl->ols_glimpse = 1;
}
+ osc_lock_build_einfo(env, lock, cl2osc(obj), &oscl->ols_einfo);
cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index d3e5ca7db7b2..fa621bda1ffe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -200,10 +200,6 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
struct osc_object *osc = cl2osc(obj);
struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
- LASSERTF(osc->oo_npages == 0,
- DFID "still have %lu pages, obj: %p, osc: %p\n",
- PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
-
/* DLM locks don't hold a reference of osc_object so we have to
* clear it before the object is being destroyed.
*/
@@ -457,9 +453,15 @@ int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc)
l_wait_event(osc->oo_io_waitq, !atomic_read(&osc->oo_nr_ios), &lwi);
- /* Discard all pages of this object. */
+ /* Discard all dirty pages of this object. */
osc_cache_truncate_start(env, osc, 0, NULL);
+ /* Discard all caching pages */
+ osc_lock_discard_pages(env, osc, 0, CL_PAGE_EOF, CLM_WRITE);
+
+ /* Clear ast data of dlm lock. Do this after discarding all pages */
+ osc_object_prune(env, osc2cl(osc));
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ab9d0d7bb943..ed8a0dc18ee5 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -42,8 +42,8 @@
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg);
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg);
/** \addtogroup osc
* @{
@@ -273,7 +273,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
/* reserve an LRU space for this page */
if (page->cp_type == CPT_CACHEABLE && result == 0) {
- result = osc_lru_reserve(env, osc, opg);
+ result = osc_lru_alloc(env, osc_cli(osc), opg);
if (result == 0) {
spin_lock(&osc->oo_tree_lock);
result = radix_tree_insert(&osc->oo_tree, index, opg);
@@ -676,12 +676,12 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
* LRU pages in batch. Therefore, the actual number is adjusted at least
* max_pages_per_rpc.
*/
-long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
+static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
{
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int refcheck;
+ u16 refcheck;
long rc = 0;
LASSERT(cache);
@@ -749,18 +749,17 @@ out:
}
/**
- * osc_lru_reserve() is called to reserve an LRU slot for a cl_page.
+ * osc_lru_alloc() is called to reserve an LRU slot for a cl_page.
*
* Usually the LRU slots are reserved in osc_io_iter_rw_init().
* Only in the case that the LRU slots are in extreme shortage, it should
* have reserved enough slots for an IO.
*/
-static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
- struct osc_page *opg)
+static int osc_lru_alloc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_page *opg)
{
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct osc_io *oio = osc_env_io(env);
- struct client_obd *cli = osc_cli(obj);
int rc = 0;
if (!cli->cl_cache) /* shall not be in LRU */
@@ -801,6 +800,64 @@ out:
}
/**
+ * osc_lru_reserve() is called to reserve enough LRU slots for I/O.
+ *
+ * The benefit of doing this is to reduce contention against atomic counter
+ * cl_lru_left by changing it from per-page access to per-IO access.
+ */
+unsigned long osc_lru_reserve(struct client_obd *cli, unsigned long npages)
+{
+ unsigned long reserved = 0;
+ unsigned long max_pages;
+ unsigned long c;
+
+ /*
+ * reserve a full RPC window at most to avoid that a thread accidentally
+ * consumes too many LRU slots
+ */
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_long_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli, npages) > 0)
+ c = atomic_long_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ reserved = npages;
+ break;
+ }
+ c = atomic_long_read(cli->cl_lru_left);
+ }
+ if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+ /*
+ * If there aren't enough pages in the per-OSC LRU then
+ * wake up the LRU thread to try and clear out space, so
+ * we don't block if pages are being dirtied quickly.
+ */
+ CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+ cli_name(cli), atomic_long_read(cli->cl_lru_left),
+ max_pages);
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+
+ return reserved;
+}
+
+/**
+ * osc_lru_unreserve() is called to unreserve LRU slots.
+ *
+ * LRU slots reserved by osc_lru_reserve() may have entries left due to several
+ * reasons such as page already existing or I/O error. Those reserved slots
+ * should be freed by calling this function.
+ */
+void osc_lru_unreserve(struct client_obd *cli, unsigned long npages)
+{
+ atomic_long_add(npages, cli->cl_lru_left);
+ wake_up_all(&osc_lru_waitq);
+}
+
+/**
* Atomic operations are expensive. We accumulate the accounting for the
* same page pgdat to get better performance.
* In practice this can work pretty good because the pages in the same RPC
@@ -988,7 +1045,7 @@ unsigned long osc_cache_shrink_scan(struct shrinker *sk,
struct client_obd *cli;
struct lu_env *env;
long shrank = 0;
- int refcheck;
+ u16 refcheck;
int rc;
if (!sc->nr_to_scan)
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index c4cfe18c3294..d8aa3fb468c7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1195,7 +1195,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
return rc;
}
-static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
+static int check_write_checksum(struct obdo *oa,
+ const struct lnet_process_id *peer,
__u32 client_cksum, __u32 server_cksum, int nob,
u32 page_count, struct brw_page **pga,
enum cksum_type client_cksum_type)
@@ -1245,7 +1246,7 @@ static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
{
struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
- const lnet_process_id_t *peer =
+ const struct lnet_process_id *peer =
&req->rq_import->imp_connection->c_peer;
struct client_obd *cli = aa->aa_cli;
struct ost_body *body;
@@ -2011,7 +2012,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
}
no_match:
- if (*flags & LDLM_FL_TEST_LOCK)
+ if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
return -ENOLCK;
if (intent) {
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
@@ -2495,7 +2496,13 @@ static int osc_ldlm_resource_invalidate(struct cfs_hash *hs,
osc = lock->l_ast_data;
cl_object_get(osc2cl(osc));
}
- lock->l_ast_data = NULL;
+
+ /*
+ * clear LDLM_FL_CLEANED flag to make sure it will be canceled
+ * by the 2nd round of ldlm_namespace_clean() call in
+ * osc_import_event().
+ */
+ ldlm_clear_cleaned(lock);
}
unlock_res(res);
@@ -2532,7 +2539,7 @@ static int osc_import_event(struct obd_device *obd,
case IMP_EVENT_INVALIDATE: {
struct ldlm_namespace *ns = obd->obd_namespace;
struct lu_env *env;
- int refcheck;
+ u16 refcheck;
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 04a98a08ece1..6466974a43e7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -78,7 +78,7 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
{
struct ptlrpc_connection *c;
lnet_nid_t self;
- lnet_process_id_t peer;
+ struct lnet_process_id peer;
int err;
/*
@@ -151,7 +151,7 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
* node. Negotiated ocd_brw_size will always be <= this number.
*/
for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
- LNetInvalidateHandle(&desc->bd_mds[i]);
+ LNetInvalidateMDHandle(&desc->bd_mds[i]);
return desc;
free_desc:
@@ -3144,8 +3144,7 @@ void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
* that server can infer the number of bulks that were prepared,
* see LU-1431
*/
- req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
- LNET_MAX_IOV) - 1;
+ req->rq_mbits += DIV_ROUND_UP(bd->bd_iov_count, LNET_MAX_IOV) - 1;
}
/**
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 6c7c8b68a909..73a2dbbeb7e6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -41,7 +41,7 @@ static struct cfs_hash *conn_hash;
static struct cfs_hash_ops conn_hash_ops;
struct ptlrpc_connection *
-ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
+ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self,
struct obd_uuid *uuid)
{
struct ptlrpc_connection *conn, *conn2;
@@ -155,14 +155,14 @@ void ptlrpc_connection_fini(void)
static unsigned int
conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
{
- return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
+ return cfs_hash_djb2_hash(key, sizeof(struct lnet_process_id), mask);
}
static int
conn_keycmp(const void *key, struct hlist_node *hnode)
{
struct ptlrpc_connection *conn;
- const lnet_process_id_t *conn_key;
+ const struct lnet_process_id *conn_key;
LASSERT(key);
conn_key = key;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index dc0fe9d660da..978bdaca3cdd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -42,12 +42,12 @@
#include "../include/lustre_sec.h"
#include "ptlrpc_internal.h"
-lnet_handle_eq_t ptlrpc_eq_h;
+struct lnet_handle_eq ptlrpc_eq_h;
/*
* Client's outgoing request callback
*/
-void request_out_callback(lnet_event_t *ev)
+void request_out_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
@@ -86,7 +86,7 @@ void request_out_callback(lnet_event_t *ev)
/*
* Client's incoming reply callback
*/
-void reply_in_callback(lnet_event_t *ev)
+void reply_in_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
@@ -176,7 +176,7 @@ out_wake:
/*
* Client's bulk has been written/read
*/
-void client_bulk_callback(lnet_event_t *ev)
+void client_bulk_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
@@ -289,7 +289,7 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
/*
* Server's incoming request callback
*/
-void request_in_callback(lnet_event_t *ev)
+void request_in_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
@@ -389,7 +389,7 @@ void request_in_callback(lnet_event_t *ev)
/*
* Server's outgoing reply callback
*/
-void reply_out_callback(lnet_event_t *ev)
+void reply_out_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
@@ -429,10 +429,10 @@ void reply_out_callback(lnet_event_t *ev)
}
}
-static void ptlrpc_master_callback(lnet_event_t *ev)
+static void ptlrpc_master_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
- void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
+ void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
LASSERT(cbid->cbid_arg != LP_POISON);
@@ -446,7 +446,7 @@ static void ptlrpc_master_callback(lnet_event_t *ev)
}
int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
- lnet_process_id_t *peer, lnet_nid_t *self)
+ struct lnet_process_id *peer, lnet_nid_t *self)
{
int best_dist = 0;
__u32 best_order = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 356d73511ea3..8177e1a31ca9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -58,7 +58,6 @@
/* struct ptlrpc_request, lustre_msg* */
#include "../include/lustre_req_layout.h"
#include "../include/lustre_acl.h"
-#include "../include/lustre_debug.h"
/*
* RQFs (see below) refer to two struct req_msg_field arrays describing the
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index b8701841ab4a..eddc1927a8d2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -43,13 +43,13 @@
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
+static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
+ enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
unsigned int offset)
{
int rc;
- lnet_md_t md;
+ struct lnet_md md;
LASSERT(portal != 0);
CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
@@ -94,7 +94,7 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
return 0;
}
-static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
+static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
{
int i;
@@ -109,14 +109,14 @@ static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
static int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- lnet_process_id_t peer;
+ struct lnet_process_id peer;
int rc = 0;
int rc2;
int posted_md;
int total_md;
u64 mbits;
- lnet_handle_me_t me_h;
- lnet_md_t md;
+ struct lnet_handle_me me_h;
+ struct lnet_md md;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
return 0;
@@ -142,7 +142,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ total_md = DIV_ROUND_UP(desc->bd_iov_count, LNET_MAX_IOV);
/* rq_mbits is matchbits of the final bulk */
mbits = req->rq_mbits - total_md + 1;
@@ -472,8 +472,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
int rc2;
int mpflag = 0;
struct ptlrpc_connection *connection;
- lnet_handle_me_t reply_me_h;
- lnet_md_t reply_md;
+ struct lnet_handle_me reply_me_h;
+ struct lnet_md reply_md;
struct obd_import *imp = request->rq_import;
struct obd_device *obd = imp->imp_obd;
@@ -719,10 +719,10 @@ EXPORT_SYMBOL(ptl_send_rpc);
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
+ static struct lnet_process_id match_id = {LNET_NID_ANY, LNET_PID_ANY};
int rc;
- lnet_md_t md;
- lnet_handle_me_t me_h;
+ struct lnet_md md;
+ struct lnet_handle_me me_h;
CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
service->srv_req_portal);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 601acb84f343..df4994f406e9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -40,7 +40,7 @@
#include "ptlrpc_internal.h"
-void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
+void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
int mdidx)
{
int offset = mdidx * LNET_MAX_IOV;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index 8e6a805487ec..d2707a323c47 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -234,7 +234,7 @@ extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
/* pers.c */
-void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
+void ptlrpc_fill_bulk_md(struct lnet_md *md, struct ptlrpc_bulk_desc *desc,
int mdcnt);
/* pack_generic.c */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 2fe9085e2034..128838a695e0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -272,7 +272,7 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s,
static inline
int npages_to_npools(unsigned long npages)
{
- return (int)((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
+ return (int)DIV_ROUND_UP(npages, PAGES_PER_POOL);
}
/*
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index abd0e2d57c20..dbda4d9a08e7 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -19,6 +19,8 @@ menuconfig STAGING_MEDIA
if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
+source "drivers/staging/media/atomisp/Kconfig"
+
source "drivers/staging/media/bcm2048/Kconfig"
source "drivers/staging/media/cxd2099/Kconfig"
@@ -27,13 +29,7 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
-source "drivers/staging/media/platform/bcm2835/Kconfig"
-
-source "drivers/staging/media/s5p-cec/Kconfig"
-
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
-source "drivers/staging/media/st-cec/Kconfig"
-
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index dc89325c463d..c04600c81264 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,8 +1,6 @@
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
-obj-$(CONFIG_VIDEO_BCM2835) += platform/bcm2835/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
+obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
new file mode 100644
index 000000000000..8eb13c3ba29c
--- /dev/null
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -0,0 +1,11 @@
+menuconfig INTEL_ATOMISP
+ bool "Enable support to Intel MIPI camera drivers"
+ depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
+
+if INTEL_ATOMISP
+source "drivers/staging/media/atomisp/pci/Kconfig"
+source "drivers/staging/media/atomisp/i2c/Kconfig"
+endif
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
new file mode 100644
index 000000000000..403fe5edff6d
--- /dev/null
+++ b/drivers/staging/media/atomisp/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for camera drivers.
+#
+obj-$(CONFIG_INTEL_ATOMISP) += pci/
+obj-$(CONFIG_INTEL_ATOMISP) += i2c/
+obj-$(CONFIG_INTEL_ATOMISP) += platform/
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
new file mode 100644
index 000000000000..737452cbf8a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/TODO
@@ -0,0 +1,64 @@
+1. A single AtomISP driver needs to be implemented to support both BYT and
+ CHT platforms. The current driver is a mechanical and hand combined merge
+ of the two using an ifdef ISP2401 to select the CHT version, which at the
+ moment is not enabled. Eventually this should become a runtime if check,
+ but there are some quite tricky things that need sorting out before that
+ will be possible.
+
+2. The file structure needs to get tidied up to resemble a normal Linux
+ driver.
+
+3. Lots of the midlayer glue. unused code and abstraction needs removing.
+
+3. The sensor drivers read MIPI settings from EFI variables or default to the
+ settings hard-coded in the platform data file for different platforms.
+ This isn't ideal but may be hard to improve as this is how existing
+ platforms work.
+
+4. The sensor drivers use the regulator framework API. In the ideal world it
+ would be using ACPI but that's not how the existing devices work.
+
+5. The AtomISP driver includes some special IOCTLS (ATOMISP_IOC_XXXX_XXXX)
+ that may need some cleaning up.
+
+6. Correct Coding Style. Please don't send coding style patches for this
+ driver until the other work is done.
+
+7. The ISP code depends on the exact FW version. The version defined in
+ BYT:
+ drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
+ static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458);
+ CHT:
+ drivers/staging/media/atomisp/pci/atomisp2/css/sh_css_firmware.c
+ static const char *release_version = STR(irci_ecr-master_20150911_0724);
+
+ At some point we may need to round up a few driver versions and see if
+ there are any specific things that can be done to fold in support for
+ multiple firmware versions.
+
+
+Limitations:
+
+1. Currently the patch only support some camera sensors
+ gc2235/gc0310/0v2680/ov2722/ov5693/mt9m114...
+
+2. To test the patches, you also need the ISP firmware
+
+ for BYT:/lib/firmware/shisp_2400b0_v21.bin
+ for CHT:/lib/firmware/shisp_2401a0_v21.bin
+
+ The firmware files will usually be found in /etc/firmware on an Android
+ device but can also be extracted from the upgrade kit if you've managed
+ to lose them somehow.
+
+3. Without a 3A libary the capture behaviour is not very good. To take a good
+ picture, you need tune ISP parameters by IOCTL functions or use a 3A libary
+ such as libxcam.
+
+4. The driver is intended to drive the PCI exposed versions of the device.
+ It will not detect those devices enumerated via ACPI as a field of the
+ i915 GPU driver.
+
+5. The driver supports only v2 of the IPU/Camera. It will not work with the
+ versions of the hardware in other SoCs.
+
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
new file mode 100644
index 000000000000..b80d29d53e65
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -0,0 +1,106 @@
+#
+# Kconfig for sensor drivers
+#
+
+source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
+source "drivers/staging/media/atomisp/i2c/imx/Kconfig"
+
+config VIDEO_OV2722
+ tristate "OVT ov2722 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
+
+ OVT is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_GC2235
+ tristate "Galaxy gc2235 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
+
+ GC2235 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_OV8858
+ tristate "Omnivision ov8858 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_ATOMISP
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ ov8858 RAW sensor.
+
+ OV8858 is a 8M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_MSRLIST_HELPER
+ tristate "Helper library to load, parse and apply large register lists."
+ depends on I2C
+ ---help---
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
+
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
+
+config VIDEO_MT9M114
+ tristate "Aptina mt9m114 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
+
+ mt9m114 is video camera sensor.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_AP1302
+ tristate "AP1302 external ISP support"
+ depends on I2C && VIDEO_V4L2
+ select REGMAP_I2C
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the external
+ ISP AP1302.
+
+ AP1302 is an exteral ISP.
+
+ It currently only works with the atomisp driver.
+
+config VIDEO_GC0310
+ tristate "GC0310 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
+
+config VIDEO_OV2680
+ tristate "Omnivision OV2680 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
+
+ ov2680 is a 2M raw sensor.
+
+ It currently only works with the atomisp driver.
+
+#
+# Kconfig for flash drivers
+#
+
+config VIDEO_LM3554
+ tristate "LM3554 flash light driver"
+ depends on VIDEO_V4L2 && I2C
+ ---help---
+ This is a Video4Linux2 sub-dev driver for the LM3554
+ flash light driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lm3554
+
+
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
new file mode 100644
index 000000000000..8ea01904c0ea
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for sensor drivers
+#
+
+obj-$(CONFIG_VIDEO_IMX) += imx/
+obj-$(CONFIG_VIDEO_OV5693) += ov5693/
+obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
+obj-$(CONFIG_VIDEO_GC2235) += gc2235.o
+obj-$(CONFIG_VIDEO_OV2722) += ov2722.o
+obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
+obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
+
+obj-$(CONFIG_VIDEO_MSRLIST_HELPER) += libmsrlisthelper.o
+
+obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
+
+# Makefile for flash drivers
+#
+
+obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
+
+ccflags-y += -Werror
+
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.c b/drivers/staging/media/atomisp/i2c/ap1302.c
new file mode 100644
index 000000000000..bacffbe962d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ap1302.c
@@ -0,0 +1,1260 @@
+/*
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "../include/linux/atomisp.h"
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include "ap1302.h"
+
+#define to_ap1302_device(sub_dev) \
+ container_of(sub_dev, struct ap1302_device, sd)
+
+/* Static definitions */
+static struct regmap_config ap1302_reg16_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static struct regmap_config ap1302_reg32_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static enum ap1302_contexts ap1302_cntx_mapping[] = {
+ CONTEXT_PREVIEW, /* Invalid atomisp run mode */
+ CONTEXT_VIDEO, /* ATOMISP_RUN_MODE_VIDEO */
+ CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_STILL_CAPTURE */
+ CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
+ CONTEXT_PREVIEW, /* ATOMISP_RUN_MODE_PREVIEW */
+};
+
+static struct ap1302_res_struct ap1302_preview_res[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 720,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = 30,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ }
+};
+
+static struct ap1302_res_struct ap1302_snapshot_res[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 720,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = 30,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ }
+};
+
+static struct ap1302_res_struct ap1302_video_res[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 720,
+ .height = 480,
+ .fps = 30,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = 30,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ }
+};
+
+static enum ap1302_contexts stream_to_context[] = {
+ CONTEXT_SNAPSHOT,
+ CONTEXT_PREVIEW,
+ CONTEXT_PREVIEW,
+ CONTEXT_VIDEO
+};
+
+static u16 aux_stream_config[CONTEXT_NUM][CONTEXT_NUM] = {
+ {0, 0, 0}, /* Preview: No aux streams. */
+ {1, 0, 2}, /* Snapshot: 1 for postview. 2 for video */
+ {1, 0, 0}, /* Video: 1 for preview. */
+};
+
+static struct ap1302_context_info context_info[] = {
+ {CNTX_WIDTH, AP1302_REG16, "width"},
+ {CNTX_HEIGHT, AP1302_REG16, "height"},
+ {CNTX_ROI_X0, AP1302_REG16, "roi_x0"},
+ {CNTX_ROI_X1, AP1302_REG16, "roi_x1"},
+ {CNTX_ROI_Y0, AP1302_REG16, "roi_y0"},
+ {CNTX_ROI_Y1, AP1302_REG16, "roi_y1"},
+ {CNTX_ASPECT, AP1302_REG16, "aspect"},
+ {CNTX_LOCK, AP1302_REG16, "lock"},
+ {CNTX_ENABLE, AP1302_REG16, "enable"},
+ {CNTX_OUT_FMT, AP1302_REG16, "out_fmt"},
+ {CNTX_SENSOR_MODE, AP1302_REG16, "sensor_mode"},
+ {CNTX_MIPI_CTRL, AP1302_REG16, "mipi_ctrl"},
+ {CNTX_MIPI_II_CTRL, AP1302_REG16, "mipi_ii_ctrl"},
+ {CNTX_LINE_TIME, AP1302_REG32, "line_time"},
+ {CNTX_MAX_FPS, AP1302_REG16, "max_fps"},
+ {CNTX_AE_USG, AP1302_REG16, "ae_usg"},
+ {CNTX_AE_UPPER_ET, AP1302_REG32, "ae_upper_et"},
+ {CNTX_AE_MAX_ET, AP1302_REG32, "ae_max_et"},
+ {CNTX_SS, AP1302_REG16, "ss"},
+ {CNTX_S1_SENSOR_MODE, AP1302_REG16, "s1_sensor_mode"},
+ {CNTX_HINF_CTRL, AP1302_REG16, "hinf_ctrl"},
+};
+
+/* This array stores the description list for metadata.
+ The metadata contains exposure settings and face
+ detection results. */
+static u16 ap1302_ss_list[] = {
+ 0xb01c, /* From 0x0186 with size 0x1C are exposure settings. */
+ 0x0186,
+ 0xb002, /* 0x71c0 is for F-number */
+ 0x71c0,
+ 0xb010, /* From 0x03dc with size 0x10 are face general infos. */
+ 0x03dc,
+ 0xb0a0, /* From 0x03e4 with size 0xa0 are face detail infos. */
+ 0x03e4,
+ 0xb020, /* From 0x0604 with size 0x20 are smile rate infos. */
+ 0x0604,
+ 0x0000
+};
+
+/* End of static definitions */
+
+static int ap1302_i2c_read_reg(struct v4l2_subdev *sd,
+ u16 reg, u16 len, void *val)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (len == AP1302_REG16)
+ ret = regmap_read(dev->regmap16, reg, val);
+ else if (len == AP1302_REG32)
+ ret = regmap_read(dev->regmap32, reg, val);
+ else
+ ret = -EINVAL;
+ if (ret) {
+ dev_dbg(&client->dev, "Read reg failed. reg=0x%04X\n", reg);
+ return ret;
+ }
+ if (len == AP1302_REG16)
+ dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%04X\n",
+ reg, *(u16 *)val);
+ else
+ dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%08X\n",
+ reg, *(u32 *)val);
+ return ret;
+}
+
+static int ap1302_i2c_write_reg(struct v4l2_subdev *sd,
+ u16 reg, u16 len, u32 val)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ if (len == AP1302_REG16)
+ ret = regmap_write(dev->regmap16, reg, val);
+ else if (len == AP1302_REG32)
+ ret = regmap_write(dev->regmap32, reg, val);
+ else
+ ret = -EINVAL;
+ if (ret) {
+ dev_dbg(&client->dev, "Write reg failed. reg=0x%04X\n", reg);
+ return ret;
+ }
+ if (len == AP1302_REG16)
+ dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%04X\n",
+ reg, (u16)val);
+ else
+ dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%08X\n",
+ reg, (u32)val);
+ return ret;
+}
+
+static u16
+ap1302_calculate_context_reg_addr(enum ap1302_contexts context, u16 offset)
+{
+ u16 reg_addr;
+ /* The register offset is defined according to preview/video registers.
+ Preview and video context have the same register definition.
+ But snapshot context does not have register S1_SENSOR_MODE.
+ When setting snapshot registers, if the offset exceeds
+ S1_SENSOR_MODE, the actual offset needs to minus 2. */
+ if (context == CONTEXT_SNAPSHOT) {
+ if (offset == CNTX_S1_SENSOR_MODE)
+ return 0;
+ if (offset > CNTX_S1_SENSOR_MODE)
+ offset -= 2;
+ }
+ if (context == CONTEXT_PREVIEW)
+ reg_addr = REG_PREVIEW_BASE + offset;
+ else if (context == CONTEXT_VIDEO)
+ reg_addr = REG_VIDEO_BASE + offset;
+ else
+ reg_addr = REG_SNAPSHOT_BASE + offset;
+ return reg_addr;
+}
+
+static int ap1302_read_context_reg(struct v4l2_subdev *sd,
+ enum ap1302_contexts context, u16 offset, u16 len)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
+ if (reg_addr == 0)
+ return -EINVAL;
+ return ap1302_i2c_read_reg(sd, reg_addr, len,
+ ((u8 *)&dev->cntx_config[context]) + offset);
+}
+
+static int ap1302_write_context_reg(struct v4l2_subdev *sd,
+ enum ap1302_contexts context, u16 offset, u16 len)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
+ if (reg_addr == 0)
+ return -EINVAL;
+ return ap1302_i2c_write_reg(sd, reg_addr, len,
+ *(u32 *)(((u8 *)&dev->cntx_config[context]) + offset));
+}
+
+static int ap1302_dump_context_reg(struct v4l2_subdev *sd,
+ enum ap1302_contexts context)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int i;
+ dev_dbg(&client->dev, "Dump registers for context[%d]:\n", context);
+ for (i = 0; i < ARRAY_SIZE(context_info); i++) {
+ struct ap1302_context_info *info = &context_info[i];
+ u8 *var = (u8 *)&dev->cntx_config[context] + info->offset;
+ /* Snapshot context does not have s1_sensor_mode register. */
+ if (context == CONTEXT_SNAPSHOT &&
+ info->offset == CNTX_S1_SENSOR_MODE)
+ continue;
+ ap1302_read_context_reg(sd, context, info->offset, info->len);
+ if (info->len == AP1302_REG16)
+ dev_dbg(&client->dev, "context.%s = 0x%04X (%d)\n",
+ info->name, *(u16 *)var, *(u16 *)var);
+ else
+ dev_dbg(&client->dev, "context.%s = 0x%08X (%d)\n",
+ info->name, *(u32 *)var, *(u32 *)var);
+ }
+ return 0;
+}
+
+static int ap1302_request_firmware(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int ret;
+ ret = request_firmware(&dev->fw, "ap1302_fw.bin", &client->dev);
+ if (ret)
+ dev_err(&client->dev,
+ "ap1302_request_firmware failed. ret=%d\n", ret);
+ return ret;
+}
+
+/* When loading firmware, host writes firmware data from address 0x8000.
+ When the address reaches 0x9FFF, the next address should return to 0x8000.
+ This function handles this address window and load firmware data to AP1302.
+ win_pos indicates the offset within this window. Firmware loading procedure
+ may call this function several times. win_pos records the current position
+ that has been written to.*/
+static int ap1302_write_fw_window(struct v4l2_subdev *sd,
+ u16 *win_pos, const u8 *buf, u32 len)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int ret;
+ u32 pos;
+ u32 sub_len;
+ for (pos = 0; pos < len; pos += sub_len) {
+ if (len - pos < AP1302_FW_WINDOW_SIZE - *win_pos)
+ sub_len = len - pos;
+ else
+ sub_len = AP1302_FW_WINDOW_SIZE - *win_pos;
+ ret = regmap_raw_write(dev->regmap16,
+ *win_pos + AP1302_FW_WINDOW_OFFSET,
+ buf + pos, sub_len);
+ if (ret)
+ return ret;
+ *win_pos += sub_len;
+ if (*win_pos >= AP1302_FW_WINDOW_SIZE)
+ *win_pos = 0;
+ }
+ return 0;
+}
+
+static int ap1302_load_firmware(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ const struct ap1302_firmware *fw;
+ const u8 *fw_data;
+ u16 reg_val = 0;
+ u16 win_pos = 0;
+ int ret;
+
+ dev_info(&client->dev, "Start to load firmware.\n");
+ if (!dev->fw) {
+ dev_err(&client->dev, "firmware not requested.\n");
+ return -EINVAL;
+ }
+ fw = (const struct ap1302_firmware *) dev->fw->data;
+ if (dev->fw->size != (sizeof(*fw) + fw->total_size)) {
+ dev_err(&client->dev, "firmware size does not match.\n");
+ return -EINVAL;
+ }
+ /* The fw binary contains a header of struct ap1302_firmware.
+ Following the header is the bootdata of AP1302.
+ The bootdata pointer can be referenced as &fw[1]. */
+ fw_data = (u8 *)&fw[1];
+
+ /* Clear crc register. */
+ ret = ap1302_i2c_write_reg(sd, REG_SIP_CRC, AP1302_REG16, 0xFFFF);
+ if (ret)
+ return ret;
+
+ /* Load FW data for PLL init stage. */
+ ret = ap1302_write_fw_window(sd, &win_pos, fw_data, fw->pll_init_size);
+ if (ret)
+ return ret;
+
+ /* Write 2 to bootdata_stage register to apply basic_init_hp
+ settings and enable PLL. */
+ ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
+ AP1302_REG16, 0x0002);
+ if (ret)
+ return ret;
+
+ /* Wait 1ms for PLL to lock. */
+ msleep(20);
+
+ /* Load the rest of bootdata content. */
+ ret = ap1302_write_fw_window(sd, &win_pos, fw_data + fw->pll_init_size,
+ fw->total_size - fw->pll_init_size);
+ if (ret)
+ return ret;
+
+ /* Check crc. */
+ ret = ap1302_i2c_read_reg(sd, REG_SIP_CRC, AP1302_REG16, &reg_val);
+ if (ret)
+ return ret;
+ if (reg_val != fw->crc) {
+ dev_err(&client->dev,
+ "crc does not match. T:0x%04X F:0x%04X\n",
+ fw->crc, reg_val);
+ return -EAGAIN;
+ }
+
+ /* Write 0xFFFF to bootdata_stage register to indicate AP1302 that
+ the whole bootdata content has been loaded. */
+ ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
+ AP1302_REG16, 0xFFFF);
+ if (ret)
+ return ret;
+ dev_info(&client->dev, "Load firmware successfully.\n");
+
+ return 0;
+}
+
+static int __ap1302_s_power(struct v4l2_subdev *sd, int on, int load_fw)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret, i;
+ u16 ss_ptr;
+
+ dev_info(&client->dev, "ap1302_s_power is called.\n");
+ ret = dev->platform_data->power_ctrl(sd, on);
+ if (ret) {
+ dev_err(&client->dev,
+ "ap1302_s_power error. on=%d ret=%d\n", on, ret);
+ return ret;
+ }
+ dev->power_on = on;
+ if (!on || !load_fw)
+ return 0;
+ /* Load firmware after power on. */
+ ret = ap1302_load_firmware(sd);
+ if (ret) {
+ dev_err(&client->dev,
+ "ap1302_load_firmware failed. ret=%d\n", ret);
+ return ret;
+ }
+ ret = ap1302_i2c_read_reg(sd, REG_SS_HEAD_PT0, AP1302_REG16, &ss_ptr);
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(ap1302_ss_list); i++) {
+ ret = ap1302_i2c_write_reg(sd, ss_ptr + i * 2,
+ AP1302_REG16, ap1302_ss_list[i]);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int ap1302_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ap1302_s_power(sd, on, 1);
+ dev->sys_activated = 0;
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int ap1302_s_config(struct v4l2_subdev *sd, void *pdata)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *mipi_info;
+ u16 reg_val = 0;
+ int ret;
+
+ dev_info(&client->dev, "ap1302_s_config is called.\n");
+ if (pdata == NULL)
+ return -ENODEV;
+
+ dev->platform_data = pdata;
+
+ mutex_lock(&dev->input_lock);
+
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret)
+ goto fail_power;
+ }
+
+ ret = __ap1302_s_power(sd, 1, 0);
+ if (ret)
+ goto fail_power;
+
+ /* Detect for AP1302 */
+ ret = ap1302_i2c_read_reg(sd, REG_CHIP_VERSION, AP1302_REG16, &reg_val);
+ if (ret || (reg_val != AP1302_CHIP_ID)) {
+ dev_err(&client->dev,
+ "Chip version does no match. ret=%d ver=0x%04x\n",
+ ret, reg_val);
+ goto fail_config;
+ }
+ dev_info(&client->dev, "AP1302 Chip ID is 0x%X\n", reg_val);
+
+ /* Detect revision for AP1302 */
+ ret = ap1302_i2c_read_reg(sd, REG_CHIP_REV, AP1302_REG16, &reg_val);
+ if (ret)
+ goto fail_config;
+ dev_info(&client->dev, "AP1302 Chip Rev is 0x%X\n", reg_val);
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_config;
+
+ mipi_info = v4l2_get_subdev_hostdata(sd);
+ if (!mipi_info)
+ goto fail_config;
+ dev->num_lanes = mipi_info->num_lanes;
+
+ ret = __ap1302_s_power(sd, 0, 0);
+ if (ret)
+ goto fail_power;
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+
+fail_config:
+ __ap1302_s_power(sd, 0, 0);
+fail_power:
+ mutex_unlock(&dev->input_lock);
+ dev_err(&client->dev, "ap1302_s_config failed\n");
+ return ret;
+}
+
+static enum ap1302_contexts ap1302_get_context(struct v4l2_subdev *sd)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ return dev->cur_context;
+}
+
+static int ap1302_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ return 0;
+}
+
+static int ap1302_match_resolution(struct ap1302_context_res *res,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ s32 w0, h0, mismatch, distance;
+ s32 w1 = fmt->width;
+ s32 h1 = fmt->height;
+ s32 min_distance = INT_MAX;
+ s32 i, idx = -1;
+
+ if (w1 == 0 || h1 == 0)
+ return -1;
+
+ for (i = 0; i < res->res_num; i++) {
+ w0 = res->res_table[i].width;
+ h0 = res->res_table[i].height;
+ if (w0 < w1 || h0 < h1)
+ continue;
+ mismatch = abs(w0 * h1 - w1 * h0) * 8192 / w1 / h0;
+ if (mismatch > 8192 * AP1302_MAX_RATIO_MISMATCH / 100)
+ continue;
+ distance = (w0 * h1 + w1 * h0) * 8192 / w1 / h1;
+ if (distance < min_distance) {
+ min_distance = distance;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static s32 ap1302_try_mbus_fmt_locked(struct v4l2_subdev *sd,
+ enum ap1302_contexts context,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct ap1302_res_struct *res_table;
+ s32 res_num, idx = -1;
+
+ res_table = dev->cntx_res[context].res_table;
+ res_num = dev->cntx_res[context].res_num;
+
+ if ((fmt->width <= res_table[res_num - 1].width) &&
+ (fmt->height <= res_table[res_num - 1].height))
+ idx = ap1302_match_resolution(&dev->cntx_res[context], fmt);
+ if (idx == -1)
+ idx = res_num - 1;
+
+ fmt->width = res_table[idx].width;
+ fmt->height = res_table[idx].height;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ return idx;
+}
+
+
+static int ap1302_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ enum ap1302_contexts context;
+ struct ap1302_res_struct *res_table;
+ s32 cur_res;
+ if (format->pad)
+ return -EINVAL;
+ mutex_lock(&dev->input_lock);
+ context = ap1302_get_context(sd);
+ res_table = dev->cntx_res[context].res_table;
+ cur_res = dev->cntx_res[context].cur_res;
+ fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ fmt->width = res_table[cur_res].width;
+ fmt->height = res_table[cur_res].height;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int ap1302_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)fmt->reserved;
+ enum ap1302_contexts context, main_context;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ mutex_lock(&dev->input_lock);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ context = ap1302_get_context(sd);
+ ap1302_try_mbus_fmt_locked(sd, context, fmt);
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+ context = stream_to_context[stream_info->stream];
+ dev_dbg(&client->dev, "ap1302_set_mbus_fmt. stream=%d context=%d\n",
+ stream_info->stream, context);
+ dev->cntx_res[context].cur_res =
+ ap1302_try_mbus_fmt_locked(sd, context, fmt);
+ dev->cntx_config[context].width = fmt->width;
+ dev->cntx_config[context].height = fmt->height;
+ ap1302_write_context_reg(sd, context, CNTX_WIDTH, AP1302_REG16);
+ ap1302_write_context_reg(sd, context, CNTX_HEIGHT, AP1302_REG16);
+ ap1302_read_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
+ dev->cntx_config[context].out_fmt &= ~OUT_FMT_TYPE_MASK;
+ dev->cntx_config[context].out_fmt |= AP1302_FMT_UYVY422;
+ ap1302_write_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
+
+ main_context = ap1302_get_context(sd);
+ if (context == main_context) {
+ ap1302_read_context_reg(sd, context,
+ CNTX_MIPI_CTRL, AP1302_REG16);
+ dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
+ dev->cntx_config[context].mipi_ctrl |=
+ (context << MIPI_CTRL_IMGVC_OFFSET);
+ dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSVC_MASK;
+ dev->cntx_config[context].mipi_ctrl |=
+ (context << MIPI_CTRL_SSVC_OFFSET);
+ dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSTYPE_MASK;
+ dev->cntx_config[context].mipi_ctrl |=
+ (0x12 << MIPI_CTRL_SSTYPE_OFFSET);
+ ap1302_write_context_reg(sd, context,
+ CNTX_MIPI_CTRL, AP1302_REG16);
+ ap1302_read_context_reg(sd, context,
+ CNTX_SS, AP1302_REG16);
+ dev->cntx_config[context].ss = AP1302_SS_CTRL;
+ ap1302_write_context_reg(sd, context,
+ CNTX_SS, AP1302_REG16);
+ } else {
+ /* Configure aux stream */
+ ap1302_read_context_reg(sd, context,
+ CNTX_MIPI_II_CTRL, AP1302_REG16);
+ dev->cntx_config[context].mipi_ii_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
+ dev->cntx_config[context].mipi_ii_ctrl |=
+ (context << MIPI_CTRL_IMGVC_OFFSET);
+ ap1302_write_context_reg(sd, context,
+ CNTX_MIPI_II_CTRL, AP1302_REG16);
+ if (stream_info->enable) {
+ ap1302_read_context_reg(sd, main_context,
+ CNTX_OUT_FMT, AP1302_REG16);
+ dev->cntx_config[context].out_fmt |=
+ (aux_stream_config[main_context][context]
+ << OUT_FMT_IIS_OFFSET);
+ ap1302_write_context_reg(sd, main_context,
+ CNTX_OUT_FMT, AP1302_REG16);
+ }
+ }
+ stream_info->ch_id = context;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+
+static int ap1302_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ enum ap1302_contexts context;
+ struct ap1302_res_struct *res_table;
+ u32 cur_res;
+
+ mutex_lock(&dev->input_lock);
+ context = ap1302_get_context(sd);
+ res_table = dev->cntx_res[context].res_table;
+ cur_res = dev->cntx_res[context].cur_res;
+ interval->interval.denominator = res_table[cur_res].fps;
+ interval->interval.numerator = 1;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int ap1302_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ enum ap1302_contexts context;
+ struct ap1302_res_struct *res_table;
+ int index = fse->index;
+
+ mutex_lock(&dev->input_lock);
+ context = ap1302_get_context(sd);
+ if (index >= dev->cntx_res[context].res_num) {
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ res_table = dev->cntx_res[context].res_table;
+ fse->min_width = res_table[index].width;
+ fse->min_height = res_table[index].height;
+ fse->max_width = res_table[index].width;
+ fse->max_height = res_table[index].height;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+
+static int ap1302_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ *frames = 0;
+ return 0;
+}
+
+static int ap1302_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ enum ap1302_contexts context;
+ u32 reg_val;
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ context = ap1302_get_context(sd);
+ dev_dbg(&client->dev, "ap1302_s_stream. context=%d enable=%d\n",
+ context, enable);
+ /* Switch context */
+ ap1302_i2c_read_reg(sd, REG_CTRL,
+ AP1302_REG16, &reg_val);
+ reg_val &= ~CTRL_CNTX_MASK;
+ reg_val |= (context<<CTRL_CNTX_OFFSET);
+ ap1302_i2c_write_reg(sd, REG_CTRL,
+ AP1302_REG16, reg_val);
+ /* Select sensor */
+ ap1302_i2c_read_reg(sd, REG_SENSOR_SELECT,
+ AP1302_REG16, &reg_val);
+ reg_val &= ~SENSOR_SELECT_MASK;
+ reg_val |= (AP1302_SENSOR_PRI<<SENSOR_SELECT_OFFSET);
+ ap1302_i2c_write_reg(sd, REG_SENSOR_SELECT,
+ AP1302_REG16, reg_val);
+ if (enable) {
+ dev_info(&client->dev, "Start stream. context=%d\n", context);
+ ap1302_dump_context_reg(sd, context);
+ if (!dev->sys_activated) {
+ reg_val = AP1302_SYS_ACTIVATE;
+ dev->sys_activated = 1;
+ } else {
+ reg_val = AP1302_SYS_SWITCH;
+ }
+ } else {
+ dev_info(&client->dev, "Stop stream. context=%d\n", context);
+ reg_val = AP1302_SYS_SWITCH;
+ }
+ ret = ap1302_i2c_write_reg(sd, REG_SYS_START, AP1302_REG16, reg_val);
+ if (ret)
+ dev_err(&client->dev,
+ "AP1302 set stream failed. enable=%d\n", enable);
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static u16 ap1302_ev_values[] = {0xfd00, 0xfe80, 0x0, 0x180, 0x300};
+
+static int ap1302_set_exposure_off(struct v4l2_subdev *sd, s32 val)
+{
+ val -= AP1302_MIN_EV;
+ return ap1302_i2c_write_reg(sd, REG_AE_BV_OFF, AP1302_REG16,
+ ap1302_ev_values[val]);
+}
+
+static u16 ap1302_wb_values[] = {
+ 0, /* V4L2_WHITE_BALANCE_MANUAL */
+ 0xf, /* V4L2_WHITE_BALANCE_AUTO */
+ 0x2, /* V4L2_WHITE_BALANCE_INCANDESCENT */
+ 0x4, /* V4L2_WHITE_BALANCE_FLUORESCENT */
+ 0x5, /* V4L2_WHITE_BALANCE_FLUORESCENT_H */
+ 0x1, /* V4L2_WHITE_BALANCE_HORIZON */
+ 0x5, /* V4L2_WHITE_BALANCE_DAYLIGHT */
+ 0xf, /* V4L2_WHITE_BALANCE_FLASH */
+ 0x6, /* V4L2_WHITE_BALANCE_CLOUDY */
+ 0x6, /* V4L2_WHITE_BALANCE_SHADE */
+};
+
+static int ap1302_set_wb_mode(struct v4l2_subdev *sd, s32 val)
+{
+ int ret = 0;
+ u16 reg_val;
+
+ ret = ap1302_i2c_read_reg(sd, REG_AWB_CTRL, AP1302_REG16, &reg_val);
+ if (ret)
+ return ret;
+ reg_val &= ~AWB_CTRL_MODE_MASK;
+ reg_val |= ap1302_wb_values[val] << AWB_CTRL_MODE_OFFSET;
+ if (val == V4L2_WHITE_BALANCE_FLASH)
+ reg_val |= AWB_CTRL_FLASH_MASK;
+ else
+ reg_val &= ~AWB_CTRL_FLASH_MASK;
+ ret = ap1302_i2c_write_reg(sd, REG_AWB_CTRL, AP1302_REG16, reg_val);
+ return ret;
+}
+
+static int ap1302_set_zoom(struct v4l2_subdev *sd, s32 val)
+{
+ ap1302_i2c_write_reg(sd, REG_DZ_TGT_FCT, AP1302_REG16,
+ val * 4 + 0x100);
+ return 0;
+}
+
+static u16 ap1302_sfx_values[] = {
+ 0x00, /* V4L2_COLORFX_NONE */
+ 0x03, /* V4L2_COLORFX_BW */
+ 0x0d, /* V4L2_COLORFX_SEPIA */
+ 0x07, /* V4L2_COLORFX_NEGATIVE */
+ 0x04, /* V4L2_COLORFX_EMBOSS */
+ 0x0f, /* V4L2_COLORFX_SKETCH */
+ 0x08, /* V4L2_COLORFX_SKY_BLUE */
+ 0x09, /* V4L2_COLORFX_GRASS_GREEN */
+ 0x0a, /* V4L2_COLORFX_SKIN_WHITEN */
+ 0x00, /* V4L2_COLORFX_VIVID */
+ 0x00, /* V4L2_COLORFX_AQUA */
+ 0x00, /* V4L2_COLORFX_ART_FREEZE */
+ 0x00, /* V4L2_COLORFX_SILHOUETTE */
+ 0x10, /* V4L2_COLORFX_SOLARIZATION */
+ 0x02, /* V4L2_COLORFX_ANTIQUE */
+ 0x00, /* V4L2_COLORFX_SET_CBCR */
+};
+
+static int ap1302_set_special_effect(struct v4l2_subdev *sd, s32 val)
+{
+ ap1302_i2c_write_reg(sd, REG_SFX_MODE, AP1302_REG16,
+ ap1302_sfx_values[val]);
+ return 0;
+}
+
+static u16 ap1302_scene_mode_values[] = {
+ 0x00, /* V4L2_SCENE_MODE_NONE */
+ 0x07, /* V4L2_SCENE_MODE_BACKLIGHT */
+ 0x0a, /* V4L2_SCENE_MODE_BEACH_SNOW */
+ 0x06, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
+ 0x00, /* V4L2_SCENE_MODE_DAWN_DUSK */
+ 0x00, /* V4L2_SCENE_MODE_FALL_COLORS */
+ 0x0d, /* V4L2_SCENE_MODE_FIREWORKS */
+ 0x02, /* V4L2_SCENE_MODE_LANDSCAPE */
+ 0x05, /* V4L2_SCENE_MODE_NIGHT */
+ 0x0c, /* V4L2_SCENE_MODE_PARTY_INDOOR */
+ 0x01, /* V4L2_SCENE_MODE_PORTRAIT */
+ 0x03, /* V4L2_SCENE_MODE_SPORTS */
+ 0x0e, /* V4L2_SCENE_MODE_SUNSET */
+ 0x0b, /* V4L2_SCENE_MODE_TEXT */
+};
+
+static int ap1302_set_scene_mode(struct v4l2_subdev *sd, s32 val)
+{
+ ap1302_i2c_write_reg(sd, REG_SCENE_CTRL, AP1302_REG16,
+ ap1302_scene_mode_values[val]);
+ return 0;
+}
+
+static u16 ap1302_flicker_values[] = {
+ 0x0, /* OFF */
+ 0x3201, /* 50HZ */
+ 0x3c01, /* 60HZ */
+ 0x2 /* AUTO */
+};
+
+static int ap1302_set_flicker_freq(struct v4l2_subdev *sd, s32 val)
+{
+ ap1302_i2c_write_reg(sd, REG_FLICK_CTRL, AP1302_REG16,
+ ap1302_flicker_values[val]);
+ return 0;
+}
+
+static int ap1302_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ap1302_device *dev = container_of(
+ ctrl->handler, struct ap1302_device, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RUN_MODE:
+ dev->cur_context = ap1302_cntx_mapping[ctrl->val];
+ break;
+ case V4L2_CID_EXPOSURE:
+ ap1302_set_exposure_off(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ ap1302_set_wb_mode(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ ap1302_set_zoom(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_COLORFX:
+ ap1302_set_special_effect(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_SCENE_MODE:
+ ap1302_set_scene_mode(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ ap1302_set_flicker_freq(&dev->sd, ctrl->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ap1302_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int ret;
+ u32 reg_val;
+
+ if (reg->size != AP1302_REG16 &&
+ reg->size != AP1302_REG32)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ if (dev->power_on)
+ ret = ap1302_i2c_read_reg(sd, reg->reg, reg->size, &reg_val);
+ else
+ ret = -EIO;
+ mutex_unlock(&dev->input_lock);
+ if (ret)
+ return ret;
+
+ reg->val = reg_val;
+
+ return 0;
+}
+
+static int ap1302_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ struct ap1302_device *dev = to_ap1302_device(sd);
+ int ret;
+
+ if (reg->size != AP1302_REG16 &&
+ reg->size != AP1302_REG32)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ if (dev->power_on)
+ ret = ap1302_i2c_write_reg(sd, reg->reg, reg->size, reg->val);
+ else
+ ret = -EIO;
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static long ap1302_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ long ret = 0;
+ switch (cmd) {
+ case VIDIOC_DBG_G_REGISTER:
+ ret = ap1302_g_register(sd, arg);
+ break;
+ case VIDIOC_DBG_S_REGISTER:
+ ret = ap1302_s_register(sd, arg);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ap1302_s_ctrl,
+};
+
+static const char * const ctrl_run_mode_menu[] = {
+ NULL,
+ "Video",
+ "Still capture",
+ "Continuous capture",
+ "Preview",
+};
+
+static const struct v4l2_ctrl_config ctrls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_RUN_MODE,
+ .name = "Run Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .def = 4,
+ .max = 4,
+ .qmenu = ctrl_run_mode_menu,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE,
+ .name = "Exposure",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = AP1302_MIN_EV,
+ .def = 0,
+ .max = AP1302_MAX_EV,
+ .step = 1,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ .name = "White Balance",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 9,
+ .step = 1,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ZOOM_ABSOLUTE,
+ .name = "Zoom Absolute",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 1024,
+ .step = 1,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_COLORFX,
+ .name = "Color Special Effect",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 15,
+ .step = 1,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_SCENE_MODE,
+ .name = "Scene Mode",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 0,
+ .max = 13,
+ .step = 1,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .name = "Light frequency filter",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .def = 3,
+ .max = 3,
+ .step = 1,
+ },
+};
+
+static struct v4l2_subdev_sensor_ops ap1302_sensor_ops = {
+ .g_skip_frames = ap1302_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops ap1302_video_ops = {
+ .s_stream = ap1302_s_stream,
+ .g_frame_interval = ap1302_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ap1302_core_ops = {
+ .s_power = ap1302_s_power,
+ .ioctl = ap1302_ioctl,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ap1302_g_register,
+ .s_register = ap1302_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_pad_ops ap1302_pad_ops = {
+ .enum_mbus_code = ap1302_enum_mbus_code,
+ .enum_frame_size = ap1302_enum_frame_size,
+ .get_fmt = ap1302_get_fmt,
+ .set_fmt = ap1302_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ap1302_ops = {
+ .core = &ap1302_core_ops,
+ .pad = &ap1302_pad_ops,
+ .video = &ap1302_video_ops,
+ .sensor = &ap1302_sensor_ops
+};
+
+static int ap1302_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ap1302_device *dev = to_ap1302_device(sd);
+
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ release_firmware(dev->fw);
+
+ media_entity_cleanup(&dev->sd.entity);
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_device_unregister_subdev(sd);
+
+ return 0;
+}
+
+static int ap1302_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ap1302_device *dev;
+ int ret;
+ unsigned int i;
+
+ dev_info(&client->dev, "ap1302 probe called.\n");
+
+ /* allocate device & init sub device */
+ dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ v4l2_i2c_subdev_init(&(dev->sd), client, &ap1302_ops);
+
+ ret = ap1302_request_firmware(&(dev->sd));
+ if (ret) {
+ dev_err(&client->dev, "Cannot request ap1302 firmware.\n");
+ goto out_free;
+ }
+
+ dev->regmap16 = devm_regmap_init_i2c(client, &ap1302_reg16_config);
+ if (IS_ERR(dev->regmap16)) {
+ ret = PTR_ERR(dev->regmap16);
+ dev_err(&client->dev,
+ "Failed to allocate 16bit register map: %d\n", ret);
+ return ret;
+ }
+
+ dev->regmap32 = devm_regmap_init_i2c(client, &ap1302_reg32_config);
+ if (IS_ERR(dev->regmap32)) {
+ ret = PTR_ERR(dev->regmap32);
+ dev_err(&client->dev,
+ "Failed to allocate 32bit register map: %d\n", ret);
+ return ret;
+ }
+
+ if (client->dev.platform_data) {
+ ret = ap1302_s_config(&dev->sd, client->dev.platform_data);
+ if (ret)
+ goto out_free;
+ }
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ dev->cntx_res[CONTEXT_PREVIEW].res_num = ARRAY_SIZE(ap1302_preview_res);
+ dev->cntx_res[CONTEXT_PREVIEW].res_table = ap1302_preview_res;
+ dev->cntx_res[CONTEXT_SNAPSHOT].res_num =
+ ARRAY_SIZE(ap1302_snapshot_res);
+ dev->cntx_res[CONTEXT_SNAPSHOT].res_table = ap1302_snapshot_res;
+ dev->cntx_res[CONTEXT_VIDEO].res_num = ARRAY_SIZE(ap1302_video_res);
+ dev->cntx_res[CONTEXT_VIDEO].res_table = ap1302_video_res;
+
+ ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ctrls));
+ if (ret) {
+ ap1302_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctrls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ctrls[i], NULL);
+
+ if (dev->ctrl_handler.error) {
+ ap1302_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
+
+ dev->run_mode = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_RUN_MODE);
+ v4l2_ctrl_s_ctrl(dev->run_mode, ATOMISP_RUN_MODE_PREVIEW);
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ap1302_remove(client);
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ return ret;
+}
+
+static const struct i2c_device_id ap1302_id[] = {
+ {AP1302_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ap1302_id);
+
+static struct i2c_driver ap1302_driver = {
+ .driver = {
+ .name = AP1302_NAME,
+ },
+ .probe = ap1302_probe,
+ .remove = ap1302_remove,
+ .id_table = ap1302_id,
+};
+
+module_i2c_driver(ap1302_driver);
+
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_DESCRIPTION("AP1302 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
new file mode 100644
index 000000000000..9341232c580d
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ap1302.h
@@ -0,0 +1,198 @@
+/*
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __AP1302_H__
+#define __AP1302_H__
+
+#include "../include/linux/atomisp_platform.h"
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define AP1302_NAME "ap1302"
+#define AP1302_CHIP_ID 0x265
+#define AP1302_I2C_MAX_LEN 65534
+#define AP1302_FW_WINDOW_OFFSET 0x8000
+#define AP1302_FW_WINDOW_SIZE 0x2000
+
+#define AP1302_REG16 2
+#define AP1302_REG32 4
+
+#define REG_CHIP_VERSION 0x0000
+#define REG_CHIP_REV 0x0050
+#define REG_MF_ID 0x0004
+#define REG_ERROR 0x0006
+#define REG_CTRL 0x1000
+#define REG_DZ_TGT_FCT 0x1010
+#define REG_SFX_MODE 0x1016
+#define REG_SS_HEAD_PT0 0x1174
+#define REG_AE_BV_OFF 0x5014
+#define REG_AE_BV_BIAS 0x5016
+#define REG_AWB_CTRL 0x5100
+#define REG_FLICK_CTRL 0x5440
+#define REG_SCENE_CTRL 0x5454
+#define REG_BOOTDATA_STAGE 0x6002
+#define REG_SENSOR_SELECT 0x600C
+#define REG_SYS_START 0x601A
+#define REG_SIP_CRC 0xF052
+
+#define REG_PREVIEW_BASE 0x2000
+#define REG_SNAPSHOT_BASE 0x3000
+#define REG_VIDEO_BASE 0x4000
+#define CNTX_WIDTH 0x00
+#define CNTX_HEIGHT 0x02
+#define CNTX_ROI_X0 0x04
+#define CNTX_ROI_Y0 0x06
+#define CNTX_ROI_X1 0x08
+#define CNTX_ROI_Y1 0x0A
+#define CNTX_ASPECT 0x0C
+#define CNTX_LOCK 0x0E
+#define CNTX_ENABLE 0x10
+#define CNTX_OUT_FMT 0x12
+#define CNTX_SENSOR_MODE 0x14
+#define CNTX_MIPI_CTRL 0x16
+#define CNTX_MIPI_II_CTRL 0x18
+#define CNTX_LINE_TIME 0x1C
+#define CNTX_MAX_FPS 0x20
+#define CNTX_AE_USG 0x22
+#define CNTX_AE_UPPER_ET 0x24
+#define CNTX_AE_MAX_ET 0x28
+#define CNTX_SS 0x2C
+#define CNTX_S1_SENSOR_MODE 0x2E
+#define CNTX_HINF_CTRL 0x30
+
+#define CTRL_CNTX_MASK 0x03
+#define CTRL_CNTX_OFFSET 0x00
+#define HINF_CTRL_LANE_MASK 0x07
+#define HINF_CTRL_LANE_OFFSET 0x00
+#define MIPI_CTRL_IMGVC_MASK 0xC0
+#define MIPI_CTRL_IMGVC_OFFSET 0x06
+#define MIPI_CTRL_IMGTYPE_AUTO 0x3F
+#define MIPI_CTRL_SSVC_MASK 0xC000
+#define MIPI_CTRL_SSVC_OFFSET 0x0E
+#define MIPI_CTRL_SSTYPE_MASK 0x3F00
+#define MIPI_CTRL_SSTYPE_OFFSET 0x08
+#define OUT_FMT_IIS_MASK 0x30
+#define OUT_FMT_IIS_OFFSET 0x08
+#define OUT_FMT_SS_MASK 0x1000
+#define OUT_FMT_SS_OFFSET 0x12
+#define OUT_FMT_TYPE_MASK 0xFF
+#define SENSOR_SELECT_MASK 0x03
+#define SENSOR_SELECT_OFFSET 0x00
+#define AWB_CTRL_MODE_MASK 0x0F
+#define AWB_CTRL_MODE_OFFSET 0x00
+#define AWB_CTRL_FLASH_MASK 0x100
+
+#define AP1302_FMT_UYVY422 0x50
+
+#define AP1302_SYS_ACTIVATE 0x8010
+#define AP1302_SYS_SWITCH 0x8140
+#define AP1302_SENSOR_PRI 0x01
+#define AP1302_SENSOR_SEC 0x02
+#define AP1302_SS_CTRL 0x31
+
+#define AP1302_MAX_RATIO_MISMATCH 10 /* Unit in percentage */
+#define AP1302_MAX_EV 2
+#define AP1302_MIN_EV -2
+
+enum ap1302_contexts {
+ CONTEXT_PREVIEW = 0,
+ CONTEXT_SNAPSHOT,
+ CONTEXT_VIDEO,
+ CONTEXT_NUM
+};
+
+/* The context registers are defined according to preview/video registers.
+ Preview and video context have the same register definition.
+ But snapshot context does not have register S1_SENSOR_MODE.
+ When setting snapshot registers, if the offset exceeds
+ S1_SENSOR_MODE, the actual offset needs to minus 2. */
+struct ap1302_context_config {
+ u16 width;
+ u16 height;
+ u16 roi_x0;
+ u16 roi_y0;
+ u16 roi_x1;
+ u16 roi_y1;
+ u16 aspect_factor;
+ u16 lock;
+ u16 enable;
+ u16 out_fmt;
+ u16 sensor_mode;
+ u16 mipi_ctrl;
+ u16 mipi_ii_ctrl;
+ u16 padding;
+ u32 line_time;
+ u16 max_fps;
+ u16 ae_usg;
+ u32 ae_upper_et;
+ u32 ae_max_et;
+ u16 ss;
+ u16 s1_sensor_mode;
+ u16 hinf_ctrl;
+ u32 reserved;
+};
+
+struct ap1302_res_struct {
+ u16 width;
+ u16 height;
+ u16 fps;
+};
+
+struct ap1302_context_res {
+ s32 res_num;
+ s32 cur_res;
+ struct ap1302_res_struct *res_table;
+};
+
+struct ap1302_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct camera_sensor_platform_data *platform_data;
+ const struct firmware *fw;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *run_mode;
+ struct ap1302_context_config cntx_config[CONTEXT_NUM];
+ struct ap1302_context_res cntx_res[CONTEXT_NUM];
+ enum ap1302_contexts cur_context;
+ unsigned int num_lanes;
+ struct regmap *regmap16;
+ struct regmap *regmap32;
+ bool sys_activated;
+ bool power_on;
+};
+
+struct ap1302_firmware {
+ u32 crc;
+ u32 pll_init_size;
+ u32 total_size;
+ u32 reserved;
+};
+
+struct ap1302_context_info {
+ u16 offset;
+ u16 len;
+ char *name;
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/gc0310.c
new file mode 100644
index 000000000000..1ec616a15086
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc0310.c
@@ -0,0 +1,1490 @@
+/*
+ * Support for GalaxyCore GC0310 VGA camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+
+#include "gc0310.h"
+
+/* i2c read/write stuff */
+static int gc0310_read_reg(struct i2c_client *client,
+ u16 data_length, u8 reg, u8 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[1];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != GC0310_8BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == GC0310_8BIT)
+ *val = (u8)data[0];
+
+ return 0;
+}
+
+static int gc0310_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int gc0310_write_reg(struct i2c_client *client, u16 data_length,
+ u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[2] = {0};
+ u8 *wreg = (u8 *)data;
+ const u16 len = data_length + sizeof(u8); /* 8-bit address + data */
+
+ if (data_length != GC0310_8BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = (u8)(reg & 0xff);
+
+ if (data_length == GC0310_8BIT) {
+ data[1] = (u8)(val);
+ }
+
+ ret = gc0310_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * gc0310_write_reg_array - Initializes a list of GC0310 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __gc0310_flush_reg_array, __gc0310_buf_reg_array() and
+ * __gc0310_write_reg_is_consecutive() are internal functions to
+ * gc0310_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __gc0310_flush_reg_array(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u8) + ctrl->index; /* 8-bit address + data */
+ ctrl->buffer.addr = (u8)(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return gc0310_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __gc0310_buf_reg_array(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl,
+ const struct gc0310_reg *next)
+{
+ int size;
+
+ switch (next->type) {
+ case GC0310_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u8) >= GC0310_MAX_WRITE_BUF_SIZE)
+ return __gc0310_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __gc0310_write_reg_is_consecutive(struct i2c_client *client,
+ struct gc0310_write_ctrl *ctrl,
+ const struct gc0310_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int gc0310_write_reg_array(struct i2c_client *client,
+ const struct gc0310_reg *reglist)
+{
+ const struct gc0310_reg *next = reglist;
+ struct gc0310_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != GC0310_TOK_TERM; next++) {
+ switch (next->type & GC0310_TOK_MASK) {
+ case GC0310_TOK_DELAY:
+ err = __gc0310_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__gc0310_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __gc0310_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __gc0310_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __gc0310_flush_reg_array(client, &ctrl);
+}
+static int gc0310_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC0310_FOCAL_LENGTH_NUM << 16) | GC0310_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int gc0310_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (GC0310_F_NUMBER_DEFAULT_NUM << 16) | GC0310_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc0310_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC0310_F_NUMBER_DEFAULT_NUM << 24) |
+ (GC0310_F_NUMBER_DEM << 16) |
+ (GC0310_F_NUMBER_DEFAULT_NUM << 8) | GC0310_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc0310_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ *val = gc0310_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int gc0310_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ *val = gc0310_res[dev->fmt_idx].bin_factor_y;
+
+ return 0;
+}
+
+static int gc0310_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct gc0310_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u16 val;
+ u8 reg_val;
+ int ret;
+ unsigned int hori_blanking;
+ unsigned int vert_blanking;
+ unsigned int sh_delay;
+
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock calculattion */
+ dev->vt_pix_clk_freq_mhz = 14400000; // 16.8MHz
+ buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz;
+ pr_info("vt_pix_clk_freq_mhz=%d\n", buf->vt_pix_clk_freq_mhz);
+
+ /* get integration time */
+ buf->coarse_integration_time_min = GC0310_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ GC0310_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = GC0310_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ GC0310_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = GC0310_FINE_INTG_TIME_MIN;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ /* Getting crop_horizontal_start */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = val | (reg_val & 0xFF);
+ pr_info("crop_horizontal_start=%d\n", buf->crop_horizontal_start);
+
+ /* Getting crop_vertical_start */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = val | (reg_val & 0xFF);
+ pr_info("crop_vertical_start=%d\n", buf->crop_vertical_start);
+
+ /* Getting output_width */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = val | (reg_val & 0xFF);
+ pr_info("output_width=%d\n", buf->output_width);
+
+ /* Getting output_height */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = val | (reg_val & 0xFF);
+ pr_info("output_height=%d\n", buf->output_height);
+
+ buf->crop_horizontal_end = buf->crop_horizontal_start + buf->output_width - 1;
+ buf->crop_vertical_end = buf->crop_vertical_start + buf->output_height - 1;
+ pr_info("crop_horizontal_end=%d\n", buf->crop_horizontal_end);
+ pr_info("crop_vertical_end=%d\n", buf->crop_vertical_end);
+
+ /* Getting line_length_pck */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_BLANKING_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_H_BLANKING_L, &reg_val);
+ if (ret)
+ return ret;
+ hori_blanking = val | (reg_val & 0xFF);
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SH_DELAY, &reg_val);
+ if (ret)
+ return ret;
+ sh_delay = reg_val;
+ buf->line_length_pck = buf->output_width + hori_blanking + sh_delay + 4;
+ pr_info("hori_blanking=%d sh_delay=%d line_length_pck=%d\n", hori_blanking, sh_delay, buf->line_length_pck);
+
+ /* Getting frame_length_lines */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_BLANKING_H, &reg_val);
+ if (ret)
+ return ret;
+ val = (reg_val & 0xFF) << 8;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_V_BLANKING_L, &reg_val);
+ if (ret)
+ return ret;
+ vert_blanking = val | (reg_val & 0xFF);
+ buf->frame_length_lines = buf->output_height + vert_blanking;
+ pr_info("vert_blanking=%d frame_length_lines=%d\n", vert_blanking, buf->frame_length_lines);
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static int gc0310_set_gain(struct v4l2_subdev *sd, int gain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 again, dgain;
+
+ if (gain < 0x20)
+ gain = 0x20;
+ if (gain > 0x80)
+ gain = 0x80;
+
+ if (gain >= 0x20 && gain < 0x40) {
+ again = 0x0; /* sqrt(2) */
+ dgain = gain;
+ } else {
+ again = 0x2; /* 2 * sqrt(2) */
+ dgain = gain / 2;
+ }
+
+ pr_info("gain=0x%x again=0x%x dgain=0x%x\n", gain, again, dgain);
+
+ /* set analog gain */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AGC_ADJ, again);
+ if (ret)
+ return ret;
+
+ /* set digital gain */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_DGC_ADJ, dgain);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __gc0310_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("coarse_itg=%d gain=%d digitgain=%d\n", coarse_itg, gain, digitgain);
+
+ /* set exposure */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_L,
+ coarse_itg & 0xff);
+ if (ret)
+ return ret;
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_H,
+ (coarse_itg >> 8) & 0x0f);
+ if (ret)
+ return ret;
+
+ ret = gc0310_set_gain(sd, gain);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int gc0310_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __gc0310_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long gc0310_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return gc0310_set_exposure(sd, exp, gain, digitgain);
+}
+
+/* TO DO */
+static int gc0310_v_flip(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+/* TO DO */
+static int gc0310_h_flip(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+static long gc0310_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return gc0310_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int gc0310_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 reg_v;
+ int ret;
+
+ /* get exposure */
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ *value = reg_v;
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_AEC_PK_EXPO_H,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ *value = *value + (reg_v << 8);
+err:
+ return ret;
+}
+
+static int gc0310_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0310_device *dev =
+ container_of(ctrl->handler, struct gc0310_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = gc0310_v_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = gc0310_h_flip(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int gc0310_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc0310_device *dev =
+ container_of(ctrl->handler, struct gc0310_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = gc0310_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = gc0310_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = gc0310_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = gc0310_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = gc0310_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = gc0310_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = gc0310_s_ctrl,
+ .g_volatile_ctrl = gc0310_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config gc0310_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = GC0310_FOCAL_LENGTH_DEFAULT,
+ .max = GC0310_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = GC0310_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = GC0310_F_NUMBER_DEFAULT,
+ .max = GC0310_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = GC0310_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = GC0310_F_NUMBER_RANGE,
+ .max = GC0310_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = GC0310_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = GC0310_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = GC0310_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int gc0310_init(struct v4l2_subdev *sd)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ pr_info("%s S\n", __func__);
+ mutex_lock(&dev->input_lock);
+
+ /* set inital registers */
+ ret = gc0310_write_reg_array(client, gc0310_reset_register);
+
+ /* restore settings */
+ gc0310_res = gc0310_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ mutex_unlock(&dev->input_lock);
+
+ pr_info("%s E\n", __func__);
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = 0;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (flag) {
+ /* The upstream module driver (written to Crystal
+ * Cove) had this logic to pulse the rails low first.
+ * This appears to break things on the MRD7 with the
+ * X-Powers PMIC...
+ *
+ * ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ * ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ * mdelay(50);
+ */
+ ret |= dev->platform_data->v1p8_ctrl(sd, 1);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ }
+
+ if (!flag || ret) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ /* GPIO0 == "reset" (active low), GPIO1 == "power down" */
+ if (flag) {
+ /* Pulse reset, then release power down */
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ usleep_range(5000, 10000);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ ret |= dev->platform_data->gpio1_ctrl(sd, 0);
+ usleep_range(10000, 15000);
+ } else {
+ ret = dev->platform_data->gpio1_ctrl(sd, 1);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+
+static int power_down(struct v4l2_subdev *sd);
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s S\n", __func__);
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_gpio;
+ }
+
+ msleep(100);
+
+ pr_info("%s E\n", __func__);
+ return 0;
+
+fail_gpio:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_clk:
+ power_ctrl(sd, 0);
+fail_power:
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int gc0310_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ return gc0310_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct gc0310_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
+
+ if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct gc0310_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &gc0310_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != gc0310_res[i].width)
+ continue;
+ if (h != gc0310_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ pr_info("%s S\n", __func__);
+
+ ret = gc0310_write_reg_array(client, gc0310_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 write register err.\n");
+ return ret;
+ }
+
+ pr_info("%s E\n", __func__);
+ return ret;
+}
+
+static int gc0310_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *gc0310_info = NULL;
+ int ret = 0;
+ int idx = 0;
+ pr_info("%s S\n", __func__);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ gc0310_info = v4l2_get_subdev_hostdata(sd);
+ if (!gc0310_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = gc0310_res[N_RES - 1].width;
+ fmt->height = gc0310_res[N_RES - 1].height;
+ } else {
+ fmt->width = gc0310_res[idx].width;
+ fmt->height = gc0310_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ printk("%s: before gc0310_write_reg_array %s\n", __FUNCTION__,
+ gc0310_res[dev->fmt_idx].desc);
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 startup err\n");
+ goto err;
+ }
+
+ ret = gc0310_get_intg_factor(client, gc0310_info,
+ &gc0310_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration_factor\n");
+ goto err;
+ }
+
+ pr_info("%s E\n", __func__);
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc0310_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = gc0310_res[dev->fmt_idx].width;
+ fmt->height = gc0310_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+
+ return 0;
+}
+
+static int gc0310_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u8 high, low;
+ int ret;
+ u16 id;
+
+ pr_info("%s S\n", __func__);
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "read sensor_id_high failed\n");
+ return -ENODEV;
+ }
+ ret = gc0310_read_reg(client, GC0310_8BIT,
+ GC0310_SC_CMMN_CHIP_ID_L, &low);
+ if (ret) {
+ dev_err(&client->dev, "read sensor_id_low failed\n");
+ return -ENODEV;
+ }
+ id = ((((u16) high) << 8) | (u16) low);
+ pr_info("sensor ID = 0x%x\n", id);
+
+ if (id != GC0310_ID) {
+ dev_err(&client->dev, "sensor ID error, read id = 0x%x, target id = 0x%x\n", id, GC0310_ID);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "detect gc0310 success\n");
+
+ pr_info("%s E\n", __func__);
+
+ return 0;
+}
+
+static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s S enable=%d\n", __func__, enable);
+ mutex_lock(&dev->input_lock);
+
+ if (enable) {
+ /* enable per frame MIPI and sensor ctrl reset */
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ 0xFE, 0x30);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_3);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT, GC0310_SW_STREAM,
+ enable ? GC0310_START_STREAMING :
+ GC0310_STOP_STREAMING);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = gc0310_write_reg(client, GC0310_8BIT,
+ GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_0);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ mutex_unlock(&dev->input_lock);
+ pr_info("%s E\n", __func__);
+ return ret;
+}
+
+
+static int gc0310_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ pr_info("%s S\n", __func__);
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ dev_err(&client->dev, "platform init err\n");
+ goto platform_init_failed;
+ }
+ }
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = gc0310_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "gc0310_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc0310 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ pr_info("%s E\n", __func__);
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+platform_init_failed:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc0310_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!param)
+ return -EINVAL;
+
+ if (param->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(&client->dev, "unsupported buffer type.\n");
+ return -EINVAL;
+ }
+
+ memset(param, 0, sizeof(*param));
+ param->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (dev->fmt_idx >= 0 && dev->fmt_idx < N_RES) {
+ param->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ param->parm.capture.timeperframe.numerator = 1;
+ param->parm.capture.capturemode = dev->run_mode;
+ param->parm.capture.timeperframe.denominator =
+ gc0310_res[dev->fmt_idx].fps;
+ }
+ return 0;
+}
+
+static int gc0310_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ mutex_lock(&dev->input_lock);
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ gc0310_res = gc0310_res_video;
+ N_RES = N_RES_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ gc0310_res = gc0310_res_still;
+ N_RES = N_RES_STILL;
+ break;
+ default:
+ gc0310_res = gc0310_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int gc0310_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = gc0310_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int gc0310_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ return 0;
+}
+
+static int gc0310_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = gc0310_res[index].width;
+ fse->min_height = gc0310_res[index].height;
+ fse->max_width = gc0310_res[index].width;
+ fse->max_height = gc0310_res[index].height;
+
+ return 0;
+
+}
+
+
+static int gc0310_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = gc0310_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops gc0310_sensor_ops = {
+ .g_skip_frames = gc0310_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops gc0310_video_ops = {
+ .s_stream = gc0310_s_stream,
+ .g_parm = gc0310_g_parm,
+ .s_parm = gc0310_s_parm,
+ .g_frame_interval = gc0310_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops gc0310_core_ops = {
+ .s_power = gc0310_s_power,
+ .ioctl = gc0310_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops gc0310_pad_ops = {
+ .enum_mbus_code = gc0310_enum_mbus_code,
+ .enum_frame_size = gc0310_enum_frame_size,
+ .get_fmt = gc0310_get_fmt,
+ .set_fmt = gc0310_set_fmt,
+};
+
+static const struct v4l2_subdev_ops gc0310_ops = {
+ .core = &gc0310_core_ops,
+ .video = &gc0310_video_ops,
+ .pad = &gc0310_pad_ops,
+ .sensor = &gc0310_sensor_ops,
+};
+
+static int gc0310_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc0310_device *dev = to_gc0310_sensor(sd);
+ dev_dbg(&client->dev, "gc0310_remove...\n");
+
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int gc0310_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gc0310_device *dev;
+ int ret;
+ void *pdata;
+ unsigned int i;
+
+ pr_info("%s S\n", __func__);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&(dev->sd), client, &gc0310_ops);
+
+ if (ACPI_COMPANION(&client->dev))
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_8,
+ atomisp_bayer_order_grbg);
+ else
+ pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = gc0310_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(gc0310_controls));
+ if (ret) {
+ gc0310_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gc0310_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc0310_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ gc0310_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ gc0310_remove(client);
+
+ pr_info("%s E\n", __func__);
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static struct acpi_device_id gc0310_acpi_match[] = {
+ {"XXGC0310"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match);
+
+MODULE_DEVICE_TABLE(i2c, gc0310_id);
+static struct i2c_driver gc0310_driver = {
+ .driver = {
+ .name = GC0310_NAME,
+ .acpi_match_table = ACPI_PTR(gc0310_acpi_match),
+ },
+ .probe = gc0310_probe,
+ .remove = gc0310_remove,
+ .id_table = gc0310_id,
+};
+
+static int init_gc0310(void)
+{
+ return i2c_add_driver(&gc0310_driver);
+}
+
+static void exit_gc0310(void)
+{
+
+ i2c_del_driver(&gc0310_driver);
+}
+
+module_init(init_gc0310);
+module_exit(exit_gc0310);
+
+MODULE_AUTHOR("Lai, Angie <angie.lai@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
new file mode 100644
index 000000000000..f31eb277f542
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -0,0 +1,459 @@
+/*
+ * Support for GalaxyCore GC0310 VGA camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __GC0310_H__
+#define __GC0310_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define GC0310_NAME "gc0310"
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 1
+#define I2C_RETRY_COUNT 5
+
+#define GC0310_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define GC0310_FOCAL_LENGTH_DEM 100
+#define GC0310_F_NUMBER_DEFAULT_NUM 26
+#define GC0310_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC0310_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC0310_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define GC0310_F_NUMBER_RANGE 0x1a0a1a0a
+#define GC0310_ID 0xa310
+
+#define GC0310_RESET_RELATED 0xFE
+#define GC0310_REGISTER_PAGE_0 0x0
+#define GC0310_REGISTER_PAGE_3 0x3
+
+#define GC0310_FINE_INTG_TIME_MIN 0
+#define GC0310_FINE_INTG_TIME_MAX_MARGIN 0
+#define GC0310_COARSE_INTG_TIME_MIN 1
+#define GC0310_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * GC0310 System control registers
+ */
+#define GC0310_SW_STREAM 0x10
+
+#define GC0310_SC_CMMN_CHIP_ID_H 0xf0
+#define GC0310_SC_CMMN_CHIP_ID_L 0xf1
+
+#define GC0310_AEC_PK_EXPO_H 0x03
+#define GC0310_AEC_PK_EXPO_L 0x04
+#define GC0310_AGC_ADJ 0x48
+#define GC0310_DGC_ADJ 0x71
+#if 0
+#define GC0310_GROUP_ACCESS 0x3208
+#endif
+
+#define GC0310_H_CROP_START_H 0x09
+#define GC0310_H_CROP_START_L 0x0A
+#define GC0310_V_CROP_START_H 0x0B
+#define GC0310_V_CROP_START_L 0x0C
+#define GC0310_H_OUTSIZE_H 0x0F
+#define GC0310_H_OUTSIZE_L 0x10
+#define GC0310_V_OUTSIZE_H 0x0D
+#define GC0310_V_OUTSIZE_L 0x0E
+#define GC0310_H_BLANKING_H 0x05
+#define GC0310_H_BLANKING_L 0x06
+#define GC0310_V_BLANKING_H 0x07
+#define GC0310_V_BLANKING_L 0x08
+#define GC0310_SH_DELAY 0x11
+
+#define GC0310_START_STREAMING 0x94 /* 8-bit enable */
+#define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */
+
+#define GC0310_BIN_FACTOR_MAX 3
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct gc0310_resolution {
+ u8 *desc;
+ const struct gc0310_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct gc0310_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct gc0310_reg *regs;
+};
+
+/*
+ * gc0310 device structure.
+ */
+struct gc0310_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u8 res;
+ u8 type;
+};
+
+enum gc0310_tok_type {
+ GC0310_8BIT = 0x0001,
+ GC0310_TOK_TERM = 0xf000, /* terminating token for reg list */
+ GC0310_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ GC0310_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct gc0310_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct gc0310_reg {
+ enum gc0310_tok_type type;
+ u8 reg;
+ u8 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd)
+
+#define GC0310_MAX_WRITE_BUF_SIZE 30
+
+struct gc0310_write_buffer {
+ u8 addr;
+ u8 data[GC0310_MAX_WRITE_BUF_SIZE];
+};
+
+struct gc0310_write_ctrl {
+ int index;
+ struct gc0310_write_buffer buffer;
+};
+
+static const struct i2c_device_id gc0310_id[] = {
+ {GC0310_NAME, 0},
+ {}
+};
+
+/*
+ * Register settings for various resolution
+ */
+static const struct gc0310_reg gc0310_reset_register[] = {
+/////////////////////////////////////////////////
+///////////////// system reg /////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0xf0},
+ {GC0310_8BIT, 0xfe, 0xf0},
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_8BIT, 0xfc, 0x0e}, //4e
+ {GC0310_8BIT, 0xfc, 0x0e}, //16//4e // [0]apwd [6]regf_clk_gate
+ {GC0310_8BIT, 0xf2, 0x80}, //sync output
+ {GC0310_8BIT, 0xf3, 0x00}, //1f//01 data output
+ {GC0310_8BIT, 0xf7, 0x33}, //f9
+ {GC0310_8BIT, 0xf8, 0x05}, //00
+ {GC0310_8BIT, 0xf9, 0x0e}, // 0x8e //0f
+ {GC0310_8BIT, 0xfa, 0x11},
+
+/////////////////////////////////////////////////
+/////////////////// MIPI ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0x03},
+ {GC0310_8BIT, 0x01, 0x03}, ///mipi 1lane
+ {GC0310_8BIT, 0x02, 0x22}, // 0x33
+ {GC0310_8BIT, 0x03, 0x94},
+ {GC0310_8BIT, 0x04, 0x01}, // fifo_prog
+ {GC0310_8BIT, 0x05, 0x00}, //fifo_prog
+ {GC0310_8BIT, 0x06, 0x80}, //b0 //YUV ISP data
+ {GC0310_8BIT, 0x11, 0x2a},//1e //LDI set YUV422
+ {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] //
+ {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8]
+ {GC0310_8BIT, 0x15, 0x12}, // 0x10 //DPHYY_MODE read_ready
+ {GC0310_8BIT, 0x17, 0x01},
+ {GC0310_8BIT, 0x40, 0x08},
+ {GC0310_8BIT, 0x41, 0x00},
+ {GC0310_8BIT, 0x42, 0x00},
+ {GC0310_8BIT, 0x43, 0x00},
+ {GC0310_8BIT, 0x21, 0x02}, // 0x01
+ {GC0310_8BIT, 0x22, 0x02}, // 0x01
+ {GC0310_8BIT, 0x23, 0x01}, // 0x05 //Nor:0x05 DOU:0x06
+ {GC0310_8BIT, 0x29, 0x00},
+ {GC0310_8BIT, 0x2A, 0x25}, // 0x05 //data zero 0x7a de
+ {GC0310_8BIT, 0x2B, 0x02},
+
+ {GC0310_8BIT, 0xfe, 0x00},
+
+/////////////////////////////////////////////////
+///////////////// CISCTL reg /////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x00, 0x2f}, //2f//0f//02//01
+ {GC0310_8BIT, 0x01, 0x0f}, //06
+ {GC0310_8BIT, 0x02, 0x04},
+ {GC0310_8BIT, 0x4f, 0x00}, //AEC 0FF
+ {GC0310_8BIT, 0x03, 0x01}, // 0x03 //04
+ {GC0310_8BIT, 0x04, 0xc0}, // 0xe8 //58
+ {GC0310_8BIT, 0x05, 0x00},
+ {GC0310_8BIT, 0x06, 0xb2}, // 0x0a //HB
+ {GC0310_8BIT, 0x07, 0x00},
+ {GC0310_8BIT, 0x08, 0x0c}, // 0x89 //VB
+ {GC0310_8BIT, 0x09, 0x00}, //row start
+ {GC0310_8BIT, 0x0a, 0x00}, //
+ {GC0310_8BIT, 0x0b, 0x00}, //col start
+ {GC0310_8BIT, 0x0c, 0x00},
+ {GC0310_8BIT, 0x0d, 0x01}, //height
+ {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height
+ {GC0310_8BIT, 0x0f, 0x02}, //width
+ {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height
+ {GC0310_8BIT, 0x17, 0x14},
+ {GC0310_8BIT, 0x18, 0x1a}, //0a//[4]double reset
+ {GC0310_8BIT, 0x19, 0x14}, //AD pipeline
+ {GC0310_8BIT, 0x1b, 0x48},
+ {GC0310_8BIT, 0x1e, 0x6b}, //3b//col bias
+ {GC0310_8BIT, 0x1f, 0x28}, //20//00//08//txlow
+ {GC0310_8BIT, 0x20, 0x89}, //88//0c//[3:2]DA15
+ {GC0310_8BIT, 0x21, 0x49}, //48//[3] txhigh
+ {GC0310_8BIT, 0x22, 0xb0},
+ {GC0310_8BIT, 0x23, 0x04}, //[1:0]vcm_r
+ {GC0310_8BIT, 0x24, 0x16}, //15
+ {GC0310_8BIT, 0x34, 0x20}, //[6:4] rsg high//range
+
+/////////////////////////////////////////////////
+//////////////////// BLK ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x26, 0x23}, //[1]dark_current_en [0]offset_en
+ {GC0310_8BIT, 0x28, 0xff}, //BLK_limie_value
+ {GC0310_8BIT, 0x29, 0x00}, //global offset
+ {GC0310_8BIT, 0x33, 0x18}, //offset_ratio
+ {GC0310_8BIT, 0x37, 0x20}, //dark_current_ratio
+ {GC0310_8BIT, 0x2a, 0x00},
+ {GC0310_8BIT, 0x2b, 0x00},
+ {GC0310_8BIT, 0x2c, 0x00},
+ {GC0310_8BIT, 0x2d, 0x00},
+ {GC0310_8BIT, 0x2e, 0x00},
+ {GC0310_8BIT, 0x2f, 0x00},
+ {GC0310_8BIT, 0x30, 0x00},
+ {GC0310_8BIT, 0x31, 0x00},
+ {GC0310_8BIT, 0x47, 0x80}, //a7
+ {GC0310_8BIT, 0x4e, 0x66}, //select_row
+ {GC0310_8BIT, 0xa8, 0x02}, //win_width_dark, same with crop_win_width
+ {GC0310_8BIT, 0xa9, 0x80},
+
+/////////////////////////////////////////////////
+////////////////// ISP reg ///////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x40, 0x06}, // 0xff //ff //48
+ {GC0310_8BIT, 0x41, 0x00}, // 0x21 //00//[0]curve_en
+ {GC0310_8BIT, 0x42, 0x04}, // 0xcf //0a//[1]awn_en
+ {GC0310_8BIT, 0x44, 0x18}, // 0x18 //02
+ {GC0310_8BIT, 0x46, 0x02}, // 0x03 //sync
+ {GC0310_8BIT, 0x49, 0x03},
+ {GC0310_8BIT, 0x4c, 0x20}, //00[5]pretect exp
+ {GC0310_8BIT, 0x50, 0x01}, //crop enable
+ {GC0310_8BIT, 0x51, 0x00},
+ {GC0310_8BIT, 0x52, 0x00},
+ {GC0310_8BIT, 0x53, 0x00},
+ {GC0310_8BIT, 0x54, 0x01},
+ {GC0310_8BIT, 0x55, 0x01}, //crop window height
+ {GC0310_8BIT, 0x56, 0xf0},
+ {GC0310_8BIT, 0x57, 0x02}, //crop window width
+ {GC0310_8BIT, 0x58, 0x90},
+
+/////////////////////////////////////////////////
+/////////////////// GAIN ////////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0x70, 0x70}, //70 //80//global gain
+ {GC0310_8BIT, 0x71, 0x20}, // pregain gain
+ {GC0310_8BIT, 0x72, 0x40}, // post gain
+ {GC0310_8BIT, 0x5a, 0x84}, //84//analog gain 0
+ {GC0310_8BIT, 0x5b, 0xc9}, //c9
+ {GC0310_8BIT, 0x5c, 0xed}, //ed//not use pga gain highest level
+ {GC0310_8BIT, 0x77, 0x40}, // R gain 0x74 //awb gain
+ {GC0310_8BIT, 0x78, 0x40}, // G gain
+ {GC0310_8BIT, 0x79, 0x40}, // B gain 0x5f
+
+ {GC0310_8BIT, 0x48, 0x00},
+ {GC0310_8BIT, 0xfe, 0x01},
+ {GC0310_8BIT, 0x0a, 0x45}, //[7]col gain mode
+
+ {GC0310_8BIT, 0x3e, 0x40},
+ {GC0310_8BIT, 0x3f, 0x5c},
+ {GC0310_8BIT, 0x40, 0x7b},
+ {GC0310_8BIT, 0x41, 0xbd},
+ {GC0310_8BIT, 0x42, 0xf6},
+ {GC0310_8BIT, 0x43, 0x63},
+ {GC0310_8BIT, 0x03, 0x60},
+ {GC0310_8BIT, 0x44, 0x03},
+
+/////////////////////////////////////////////////
+///////////////// dark sun //////////////////
+/////////////////////////////////////////////////
+ {GC0310_8BIT, 0xfe, 0x01},
+ {GC0310_8BIT, 0x45, 0xa4}, // 0xf7
+ {GC0310_8BIT, 0x46, 0xf0}, // 0xff //f0//sun vaule th
+ {GC0310_8BIT, 0x48, 0x03}, //sun mode
+ {GC0310_8BIT, 0x4f, 0x60}, //sun_clamp
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_TOK_TERM, 0, 0},
+};
+
+static struct gc0310_reg const gc0310_VGA_30fps[] = {
+ {GC0310_8BIT, 0xfe, 0x00},
+ {GC0310_8BIT, 0x0d, 0x01}, //height
+ {GC0310_8BIT, 0x0e, 0xf2}, // 0xf7 //height
+ {GC0310_8BIT, 0x0f, 0x02}, //width
+ {GC0310_8BIT, 0x10, 0x94}, // 0xa0 //height
+
+ {GC0310_8BIT, 0x50, 0x01}, //crop enable
+ {GC0310_8BIT, 0x51, 0x00},
+ {GC0310_8BIT, 0x52, 0x00},
+ {GC0310_8BIT, 0x53, 0x00},
+ {GC0310_8BIT, 0x54, 0x01},
+ {GC0310_8BIT, 0x55, 0x01}, //crop window height
+ {GC0310_8BIT, 0x56, 0xf0},
+ {GC0310_8BIT, 0x57, 0x02}, //crop window width
+ {GC0310_8BIT, 0x58, 0x90},
+
+ {GC0310_8BIT, 0xfe, 0x03},
+ {GC0310_8BIT, 0x12, 0x90},//00 //04 //00 //04//00 //LWC[7:0] //
+ {GC0310_8BIT, 0x13, 0x02},//05 //05 //LWC[15:8]
+
+ {GC0310_8BIT, 0xfe, 0x00},
+
+ {GC0310_TOK_TERM, 0, 0},
+};
+
+
+struct gc0310_resolution gc0310_res_preview[] = {
+ {
+ .desc = "gc0310_VGA_30fps",
+ .width = 656, // 648,
+ .height = 496, // 488,
+ .fps = 30,
+ //.pix_clk_freq = 73,
+ .used = 0,
+#if 0
+ .pixels_per_line = 0x0314,
+ .lines_per_frame = 0x0213,
+#endif
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 2,
+ .regs = gc0310_VGA_30fps,
+ },
+};
+#define N_RES_PREVIEW (ARRAY_SIZE(gc0310_res_preview))
+
+struct gc0310_resolution gc0310_res_still[] = {
+ {
+ .desc = "gc0310_VGA_30fps",
+ .width = 656, // 648,
+ .height = 496, // 488,
+ .fps = 30,
+ //.pix_clk_freq = 73,
+ .used = 0,
+#if 0
+ .pixels_per_line = 0x0314,
+ .lines_per_frame = 0x0213,
+#endif
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 2,
+ .regs = gc0310_VGA_30fps,
+ },
+};
+#define N_RES_STILL (ARRAY_SIZE(gc0310_res_still))
+
+struct gc0310_resolution gc0310_res_video[] = {
+ {
+ .desc = "gc0310_VGA_30fps",
+ .width = 656, // 648,
+ .height = 496, // 488,
+ .fps = 30,
+ //.pix_clk_freq = 73,
+ .used = 0,
+#if 0
+ .pixels_per_line = 0x0314,
+ .lines_per_frame = 0x0213,
+#endif
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 2,
+ .regs = gc0310_VGA_30fps,
+ },
+};
+#define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video))
+
+static struct gc0310_resolution *gc0310_res = gc0310_res_preview;
+static int N_RES = N_RES_PREVIEW;
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.c b/drivers/staging/media/atomisp/i2c/gc2235.c
new file mode 100644
index 000000000000..50f431729b6c
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc2235.c
@@ -0,0 +1,1219 @@
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "gc2235.h"
+
+/* i2c read/write stuff */
+static int gc2235_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0, sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == GC2235_8BIT)
+ *val = (u8)data[0];
+
+ return 0;
+}
+
+static int gc2235_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int gc2235_write_reg(struct i2c_client *client, u16 data_length,
+ u8 reg, u8 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ const u16 len = data_length + sizeof(u8); /* 16-bit address + data */
+
+ if (data_length != GC2235_8BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ data[0] = reg;
+ data[1] = val;
+
+ ret = gc2235_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+static int __gc2235_flush_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u8) + ctrl->index; /* 8-bit address + data */
+ ctrl->index = 0;
+
+ return gc2235_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __gc2235_buf_reg_array(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ int size;
+
+ if (next->type != GC2235_8BIT)
+ return -EINVAL;
+
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u8) >= GC2235_MAX_WRITE_BUF_SIZE)
+ return __gc2235_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+static int __gc2235_write_reg_is_consecutive(struct i2c_client *client,
+ struct gc2235_write_ctrl *ctrl,
+ const struct gc2235_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+static int gc2235_write_reg_array(struct i2c_client *client,
+ const struct gc2235_reg *reglist)
+{
+ const struct gc2235_reg *next = reglist;
+ struct gc2235_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != GC2235_TOK_TERM; next++) {
+ switch (next->type & GC2235_TOK_MASK) {
+ case GC2235_TOK_DELAY:
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__gc2235_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __gc2235_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __gc2235_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __gc2235_flush_reg_array(client, &ctrl);
+}
+
+static int gc2235_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC2235_FOCAL_LENGTH_NUM << 16) | GC2235_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int gc2235_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (GC2235_F_NUMBER_DEFAULT_NUM << 16) | GC2235_F_NUMBER_DEM;
+ return 0;
+}
+
+static int gc2235_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (GC2235_F_NUMBER_DEFAULT_NUM << 24) |
+ (GC2235_F_NUMBER_DEM << 16) |
+ (GC2235_F_NUMBER_DEFAULT_NUM << 8) | GC2235_F_NUMBER_DEM;
+ return 0;
+}
+
+
+static int gc2235_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct gc2235_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u16 reg_val, reg_val_h, dummy;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock calculattion */
+ buf->vt_pix_clk_freq_mhz = dev->vt_pix_clk_freq_mhz = 30000000;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = GC2235_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ GC2235_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = GC2235_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ GC2235_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = GC2235_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_CROP_START_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+
+ buf->crop_horizontal_start = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_CROP_START_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_CROP_START_L, &reg_val);
+ if (ret)
+ return ret;
+
+ buf->crop_vertical_start = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_OUTSIZE_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_H_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_OUTSIZE_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_V_OUTSIZE_L, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = (reg_val_h << 8) | reg_val;
+
+ buf->crop_horizontal_end = buf->crop_horizontal_start +
+ buf->output_width - 1;
+ buf->crop_vertical_end = buf->crop_vertical_start +
+ buf->output_height - 1;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_HB_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_HB_L, &reg_val);
+ if (ret)
+ return ret;
+
+ dummy = (reg_val_h << 8) | reg_val;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SH_DELAY_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SH_DELAY_L, &reg_val);
+
+#if 0
+ buf->line_length_pck = buf->output_width + 16 + dummy +
+ (((u16)reg_val_h << 8) | (u16)reg_val) + 4;
+#endif
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_VB_H, &reg_val_h);
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_VB_L, &reg_val);
+ if (ret)
+ return ret;
+
+#if 0
+ buf->frame_length_lines = buf->output_height + 32 +
+ (((u16)reg_val_h << 8) | (u16)reg_val);
+#endif
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __gc2235_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 coarse_integration = (u16)coarse_itg;
+ int ret = 0;
+ u16 expo_coarse_h, expo_coarse_l, gain_val = 0xF0, gain_val2 = 0xF0;
+ expo_coarse_h = coarse_integration >> 8;
+ expo_coarse_l = coarse_integration & 0xff;
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H, expo_coarse_h);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L, expo_coarse_l);
+
+ if (gain <= 0x58) {
+ gain_val = 0x40;
+ gain_val2 = 0x58;
+ } else if (gain < 256) {
+ gain_val = 0x40;
+ gain_val2 = gain;
+ } else {
+ gain_val2 = 64 * gain / 256;
+ gain_val = 0xff;
+ }
+
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_GLOBAL_GAIN, (u8)gain_val);
+ ret = gc2235_write_reg(client, GC2235_8BIT,
+ GC2235_PRE_GAIN, (u8)gain_val2);
+
+ return ret;
+}
+
+
+static int gc2235_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __gc2235_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long gc2235_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return gc2235_set_exposure(sd, exp, gain, digitgain);
+}
+static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return gc2235_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+
+ *value = reg_v;
+err:
+ return ret;
+}
+
+static int gc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gc2235_device *dev =
+ container_of(ctrl->handler, struct gc2235_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = gc2235_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = gc2235_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = gc2235_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = gc2235_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = gc2235_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config gc2235_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = GC2235_FOCAL_LENGTH_DEFAULT,
+ .max = GC2235_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = GC2235_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = GC2235_F_NUMBER_DEFAULT,
+ .max = GC2235_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = GC2235_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = GC2235_F_NUMBER_RANGE,
+ .max = GC2235_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = GC2235_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+};
+
+static int __gc2235_init(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /* restore settings */
+ gc2235_res = gc2235_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ return gc2235_write_reg_array(client, gc2235_init_settings);
+}
+
+static int is_init;
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ usleep_range(60, 90);
+ if (ret == 0)
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ int ret = -1;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
+ usleep_range(60, 90);
+ return dev->platform_data->gpio0_ctrl(sd, flag);
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ msleep(5);
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int gc2235_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0)
+ ret = power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ ret = __gc2235_init(sd);
+ is_init = 1;
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct gc2235_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct gc2235_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &gc2235_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != gc2235_res[i].width)
+ continue;
+ if (h != gc2235_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+static int startup(struct v4l2_subdev *sd)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+ if (is_init == 0) {
+ /* force gc2235 to do a reset in res change, otherwise it
+ * can not output normal after switching res. and it is not
+ * necessary for first time run up after power on, for the sack
+ * of performance
+ */
+ power_down(sd);
+ power_up(sd);
+ gc2235_write_reg_array(client, gc2235_init_settings);
+ }
+
+ ret = gc2235_write_reg_array(client, gc2235_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 write register err.\n");
+ return ret;
+ }
+ is_init = 0;
+
+ return ret;
+}
+
+static int gc2235_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *gc2235_info = NULL;
+ int ret = 0;
+ int idx;
+
+ gc2235_info = v4l2_get_subdev_hostdata(sd);
+ if (!gc2235_info)
+ return -EINVAL;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = gc2235_res[N_RES - 1].width;
+ fmt->height = gc2235_res[N_RES - 1].height;
+ } else {
+ fmt->width = gc2235_res[idx].width;
+ fmt->height = gc2235_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 startup err\n");
+ goto err;
+ }
+
+ ret = gc2235_get_intg_factor(client, gc2235_info,
+ &gc2235_res[dev->fmt_idx]);
+ if (ret)
+ dev_err(&client->dev, "failed to get integration_factor\n");
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = gc2235_res[dev->fmt_idx].width;
+ fmt->height = gc2235_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int gc2235_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SENSOR_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = gc2235_read_reg(client, GC2235_8BIT,
+ GC2235_SENSOR_ID_L, &low);
+ id = ((high << 8) | low);
+
+ if (id != GC2235_ID) {
+ dev_err(&client->dev, "sensor ID error, 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "detect gc2235 success\n");
+ return 0;
+}
+
+static int gc2235_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ mutex_lock(&dev->input_lock);
+
+ if (enable)
+ ret = gc2235_write_reg_array(client, gc2235_stream_on);
+ else
+ ret = gc2235_write_reg_array(client, gc2235_stream_off);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+
+static int gc2235_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ dev_err(&client->dev, "platform init err\n");
+ goto platform_init_failed;
+ }
+ }
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = gc2235_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "gc2235_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "gc2235 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+platform_init_failed:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int gc2235_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!param)
+ return -EINVAL;
+
+ if (param->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(&client->dev, "unsupported buffer type.\n");
+ return -EINVAL;
+ }
+
+ memset(param, 0, sizeof(*param));
+ param->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (dev->fmt_idx >= 0 && dev->fmt_idx < N_RES) {
+ param->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ param->parm.capture.timeperframe.numerator = 1;
+ param->parm.capture.capturemode = dev->run_mode;
+ param->parm.capture.timeperframe.denominator =
+ gc2235_res[dev->fmt_idx].fps;
+ }
+ return 0;
+}
+
+static int gc2235_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ mutex_lock(&dev->input_lock);
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ gc2235_res = gc2235_res_video;
+ N_RES = N_RES_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ gc2235_res = gc2235_res_still;
+ N_RES = N_RES_STILL;
+ break;
+ default:
+ gc2235_res = gc2235_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int gc2235_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = gc2235_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int gc2235_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int gc2235_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = gc2235_res[index].width;
+ fse->min_height = gc2235_res[index].height;
+ fse->max_width = gc2235_res[index].width;
+ fse->max_height = gc2235_res[index].height;
+
+ return 0;
+
+}
+
+static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = gc2235_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = {
+ .g_skip_frames = gc2235_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops gc2235_video_ops = {
+ .s_stream = gc2235_s_stream,
+ .g_parm = gc2235_g_parm,
+ .s_parm = gc2235_s_parm,
+ .g_frame_interval = gc2235_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops gc2235_core_ops = {
+ .s_power = gc2235_s_power,
+ .ioctl = gc2235_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops gc2235_pad_ops = {
+ .enum_mbus_code = gc2235_enum_mbus_code,
+ .enum_frame_size = gc2235_enum_frame_size,
+ .get_fmt = gc2235_get_fmt,
+ .set_fmt = gc2235_set_fmt,
+};
+
+static const struct v4l2_subdev_ops gc2235_ops = {
+ .core = &gc2235_core_ops,
+ .video = &gc2235_video_ops,
+ .pad = &gc2235_pad_ops,
+ .sensor = &gc2235_sensor_ops,
+};
+
+static int gc2235_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct gc2235_device *dev = to_gc2235_sensor(sd);
+ dev_dbg(&client->dev, "gc2235_remove...\n");
+
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int gc2235_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct gc2235_device *dev;
+ void *gcpdev;
+ int ret;
+ unsigned int i;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&(dev->sd), client, &gc2235_ops);
+
+ gcpdev = client->dev.platform_data;
+ if (ACPI_COMPANION(&client->dev))
+ gcpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+
+ ret = gc2235_s_config(&dev->sd, client->irq, gcpdev);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(gc2235_controls));
+ if (ret) {
+ gc2235_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gc2235_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc2235_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ gc2235_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ gc2235_remove(client);
+
+ if (ACPI_HANDLE(&client->dev))
+ ret = atomisp_register_i2c_module(&dev->sd, gcpdev, RAW_CAMERA);
+
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+
+ return ret;
+}
+
+static struct acpi_device_id gc2235_acpi_match[] = {
+ { "INT33F8" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
+MODULE_DEVICE_TABLE(i2c, gc2235_id);
+static struct i2c_driver gc2235_driver = {
+ .driver = {
+ .name = GC2235_NAME,
+ .acpi_match_table = ACPI_PTR(gc2235_acpi_match),
+ },
+ .probe = gc2235_probe,
+ .remove = gc2235_remove,
+ .id_table = gc2235_id,
+};
+
+static int init_gc2235(void)
+{
+ return i2c_add_driver(&gc2235_driver);
+}
+
+static void exit_gc2235(void)
+{
+
+ i2c_del_driver(&gc2235_driver);
+}
+
+module_init(init_gc2235);
+module_exit(exit_gc2235);
+
+MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
new file mode 100644
index 000000000000..ccbc757045a5
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -0,0 +1,672 @@
+/*
+ * Support for GalaxyCore GC2235 2M camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ */
+
+#ifndef __GC2235_H__
+#define __GC2235_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define GC2235_NAME "gc2235"
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define GC2235_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define GC2235_FOCAL_LENGTH_DEM 100
+#define GC2235_F_NUMBER_DEFAULT_NUM 26
+#define GC2235_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define GC2235_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define GC2235_F_NUMBER_RANGE 0x1a0a1a0a
+#define GC2235_ID 0x2235
+
+#define GC2235_FINE_INTG_TIME_MIN 0
+#define GC2235_FINE_INTG_TIME_MAX_MARGIN 0
+#define GC2235_COARSE_INTG_TIME_MIN 1
+#define GC2235_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * GC2235 System control registers
+ */
+/*
+ * GC2235 System control registers
+ */
+#define GC2235_SENSOR_ID_H 0xF0
+#define GC2235_SENSOR_ID_L 0xF1
+#define GC2235_RESET_RELATED 0xFE
+#define GC2235_SW_RESET 0x8
+#define GC2235_MIPI_RESET 0x3
+#define GC2235_RESET_BIT 0x4
+#define GC2235_REGISTER_PAGE_0 0x0
+#define GC2235_REGISTER_PAGE_3 0x3
+
+#define GC2235_V_CROP_START_H 0x91
+#define GC2235_V_CROP_START_L 0x92
+#define GC2235_H_CROP_START_H 0x93
+#define GC2235_H_CROP_START_L 0x94
+#define GC2235_V_OUTSIZE_H 0x95
+#define GC2235_V_OUTSIZE_L 0x96
+#define GC2235_H_OUTSIZE_H 0x97
+#define GC2235_H_OUTSIZE_L 0x98
+
+#define GC2235_HB_H 0x5
+#define GC2235_HB_L 0x6
+#define GC2235_VB_H 0x7
+#define GC2235_VB_L 0x8
+#define GC2235_SH_DELAY_H 0x11
+#define GC2235_SH_DELAY_L 0x12
+
+#define GC2235_CSI2_MODE 0x10
+
+#define GC2235_EXPOSURE_H 0x3
+#define GC2235_EXPOSURE_L 0x4
+#define GC2235_GLOBAL_GAIN 0xB0
+#define GC2235_PRE_GAIN 0xB1
+#define GC2235_AWB_R_GAIN 0xB3
+#define GC2235_AWB_G_GAIN 0xB4
+#define GC2235_AWB_B_GAIN 0xB5
+
+#define GC2235_START_STREAMING 0x91
+#define GC2235_STOP_STREAMING 0x0
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct gc2235_resolution {
+ u8 *desc;
+ const struct gc2235_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct gc2235_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct gc2235_reg *regs;
+};
+
+/*
+ * gc2235 device structure.
+ */
+struct gc2235_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u8 res;
+ u8 type;
+};
+
+enum gc2235_tok_type {
+ GC2235_8BIT = 0x0001,
+ GC2235_16BIT = 0x0002,
+ GC2235_32BIT = 0x0004,
+ GC2235_TOK_TERM = 0xf000, /* terminating token for reg list */
+ GC2235_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ GC2235_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct gc2235_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 8-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct gc2235_reg {
+ enum gc2235_tok_type type;
+ u8 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_gc2235_sensor(x) container_of(x, struct gc2235_device, sd)
+
+#define GC2235_MAX_WRITE_BUF_SIZE 30
+
+struct gc2235_write_buffer {
+ u8 addr;
+ u8 data[GC2235_MAX_WRITE_BUF_SIZE];
+};
+
+struct gc2235_write_ctrl {
+ int index;
+ struct gc2235_write_buffer buffer;
+};
+
+static const struct i2c_device_id gc2235_id[] = {
+ {GC2235_NAME, 0},
+ {}
+};
+
+static struct gc2235_reg const gc2235_stream_on[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x91}, /* start mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_stream_off[] = {
+ { GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
+ { GC2235_8BIT, 0x10, 0x01}, /* stop mipi */
+ { GC2235_8BIT, 0xfe, 0x00}, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_init_settings[] = {
+ /* Sysytem */
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xfe, 0x80 },
+ { GC2235_8BIT, 0xf2, 0x00 },
+ { GC2235_8BIT, 0xf6, 0x00 },
+ { GC2235_8BIT, 0xfc, 0x06 },
+ { GC2235_8BIT, 0xf7, 0x15 },
+ { GC2235_8BIT, 0xf8, 0x84 },
+ { GC2235_8BIT, 0xf9, 0xfe },
+ { GC2235_8BIT, 0xfa, 0x00 },
+ { GC2235_8BIT, 0xfe, 0x00 },
+ /* Analog & cisctl */
+ { GC2235_8BIT, 0x03, 0x04 },
+ { GC2235_8BIT, 0x04, 0x9E },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x06, 0xfd },
+ { GC2235_8BIT, 0x07, 0x00 },
+ { GC2235_8BIT, 0x08, 0x14 },
+ { GC2235_8BIT, 0x0a, 0x02 }, /* row start */
+ { GC2235_8BIT, 0x0c, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x60 },
+ { GC2235_8BIT, 0x17, 0x15 }, /* mirror flip */
+ { GC2235_8BIT, 0x18, 0x1a },
+ { GC2235_8BIT, 0x19, 0x06 },
+ { GC2235_8BIT, 0x1a, 0x01 },
+ { GC2235_8BIT, 0x1b, 0x4d },
+ { GC2235_8BIT, 0x1e, 0x88 },
+ { GC2235_8BIT, 0x1f, 0x48 },
+ { GC2235_8BIT, 0x20, 0x03 },
+ { GC2235_8BIT, 0x21, 0x7f },
+ { GC2235_8BIT, 0x22, 0x83 },
+ { GC2235_8BIT, 0x23, 0x42 },
+ { GC2235_8BIT, 0x24, 0x16 },
+ { GC2235_8BIT, 0x26, 0x01 }, /*analog gain*/
+ { GC2235_8BIT, 0x27, 0x30 },
+ { GC2235_8BIT, 0x3f, 0x00 }, /* PRC */
+ /* blk */
+ { GC2235_8BIT, 0x40, 0xa3 },
+ { GC2235_8BIT, 0x41, 0x82 },
+ { GC2235_8BIT, 0x43, 0x20 },
+ { GC2235_8BIT, 0x5e, 0x18 },
+ { GC2235_8BIT, 0x5f, 0x18 },
+ { GC2235_8BIT, 0x60, 0x18 },
+ { GC2235_8BIT, 0x61, 0x18 },
+ { GC2235_8BIT, 0x62, 0x18 },
+ { GC2235_8BIT, 0x63, 0x18 },
+ { GC2235_8BIT, 0x64, 0x18 },
+ { GC2235_8BIT, 0x65, 0x18 },
+ { GC2235_8BIT, 0x66, 0x20 },
+ { GC2235_8BIT, 0x67, 0x20 },
+ { GC2235_8BIT, 0x68, 0x20 },
+ { GC2235_8BIT, 0x69, 0x20 },
+ /* Gain */
+ { GC2235_8BIT, 0xb2, 0x00 },
+ { GC2235_8BIT, 0xb3, 0x40 },
+ { GC2235_8BIT, 0xb4, 0x40 },
+ { GC2235_8BIT, 0xb5, 0x40 },
+ /* Dark sun */
+ { GC2235_8BIT, 0xbc, 0x00 },
+
+ { GC2235_8BIT, 0xfe, 0x03 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+/*
+ * Register settings for various resolution
+ */
+static struct gc2235_reg const gc2235_1296_736_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x01 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0x44 },
+ { GC2235_8BIT, 0x09, 0x00 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0xf0 },
+ { GC2235_8BIT, 0x0b, 0x00 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0xa0 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 736 */
+ { GC2235_8BIT, 0x0e, 0xf0 },
+ { GC2235_8BIT, 0x0f, 0x05 }, /* win width: 1296 */
+ { GC2235_8BIT, 0x10, 0x20 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x08 },
+ { GC2235_8BIT, 0x94, 0x08 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 736 */
+ { GC2235_8BIT, 0x96, 0xe0 },
+ { GC2235_8BIT, 0x97, 0x05 }, /* crop win width 1296 */
+ { GC2235_8BIT, 0x98, 0x10 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0x54 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x06 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_960_640_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x07, 0x02 }, /* VBI */
+ { GC2235_8BIT, 0x08, 0xA4 },
+ { GC2235_8BIT, 0x09, 0x01 }, /* row start */
+ { GC2235_8BIT, 0x0a, 0x18 },
+ { GC2235_8BIT, 0x0b, 0x01 }, /* col start */
+ { GC2235_8BIT, 0x0c, 0x40 },
+ { GC2235_8BIT, 0x0d, 0x02 }, /* win height 656 */
+ { GC2235_8BIT, 0x0e, 0x90 },
+ { GC2235_8BIT, 0x0f, 0x03 }, /* win width: 976 */
+ { GC2235_8BIT, 0x10, 0xd0 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x02 }, /* crop win height 640 */
+ { GC2235_8BIT, 0x96, 0x80 },
+ { GC2235_8BIT, 0x97, 0x03 }, /* crop win width 960 */
+ { GC2235_8BIT, 0x98, 0xc0 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xb0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x04 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_1600_900_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x03 }, /* win height 932 */
+ { GC2235_8BIT, 0x0e, 0xa4 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1632 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x06 },
+ { GC2235_8BIT, 0x95, 0x03 }, /* crop win height 900 */
+ { GC2235_8BIT, 0x96, 0x84 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1600 */
+ { GC2235_8BIT, 0x98, 0x40 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xd0 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_1616_1082_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x4a },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1082 */
+ { GC2235_8BIT, 0x96, 0x3a },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimp init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+static struct gc2235_reg const gc2235_1616_1216_30fps[] = {
+ { GC2235_8BIT, 0x8b, 0xa0 },
+ { GC2235_8BIT, 0x8c, 0x02 },
+
+ { GC2235_8BIT, 0x0d, 0x04 }, /* win height 1232 */
+ { GC2235_8BIT, 0x0e, 0xd0 },
+ { GC2235_8BIT, 0x0f, 0x06 }, /* win width: 1616 */
+ { GC2235_8BIT, 0x10, 0x50 },
+
+ { GC2235_8BIT, 0x90, 0x01 },
+ { GC2235_8BIT, 0x92, 0x02 },
+ { GC2235_8BIT, 0x94, 0x00 },
+ { GC2235_8BIT, 0x95, 0x04 }, /* crop win height 1216 */
+ { GC2235_8BIT, 0x96, 0xc0 },
+ { GC2235_8BIT, 0x97, 0x06 }, /* crop win width 1616 */
+ { GC2235_8BIT, 0x98, 0x50 },
+ /* mimi init */
+ { GC2235_8BIT, 0xfe, 0x03 }, /* switch to P3 */
+ { GC2235_8BIT, 0x01, 0x07 },
+ { GC2235_8BIT, 0x02, 0x11 },
+ { GC2235_8BIT, 0x03, 0x11 },
+ { GC2235_8BIT, 0x06, 0x80 },
+ { GC2235_8BIT, 0x11, 0x2b },
+ /* set mipi buffer */
+ { GC2235_8BIT, 0x12, 0xe4 }, /* val_low = (width * 10 / 8) & 0xFF */
+ { GC2235_8BIT, 0x13, 0x07 }, /* val_high = (width * 10 / 8) >> 8 */
+ { GC2235_8BIT, 0x15, 0x12 }, /* DPHY mode*/
+ { GC2235_8BIT, 0x04, 0x10 },
+ { GC2235_8BIT, 0x05, 0x00 },
+ { GC2235_8BIT, 0x17, 0x01 },
+ { GC2235_8BIT, 0x22, 0x01 },
+ { GC2235_8BIT, 0x23, 0x05 },
+ { GC2235_8BIT, 0x24, 0x10 },
+ { GC2235_8BIT, 0x25, 0x10 },
+ { GC2235_8BIT, 0x26, 0x02 },
+ { GC2235_8BIT, 0x21, 0x10 },
+ { GC2235_8BIT, 0x29, 0x01 },
+ { GC2235_8BIT, 0x2a, 0x02 },
+ { GC2235_8BIT, 0x2b, 0x02 },
+ { GC2235_8BIT, 0x10, 0x01 }, /* disable mipi */
+ { GC2235_8BIT, 0xfe, 0x00 }, /* switch to P0 */
+ { GC2235_TOK_TERM, 0, 0 }
+};
+
+struct gc2235_resolution gc2235_res_preview[] = {
+
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+#define N_RES_PREVIEW (ARRAY_SIZE(gc2235_res_preview))
+
+struct gc2235_resolution gc2235_res_still[] = {
+ {
+ .desc = "gc2235_1600_900_30fps",
+ .width = 1600,
+ .height = 900,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1068,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1600_900_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1066_30fps",
+ .width = 1616,
+ .height = 1082,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1082_30fps,
+ },
+ {
+ .desc = "gc2235_1600_1200_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2132,
+ .lines_per_frame = 1368,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1616_1216_30fps,
+ },
+
+};
+#define N_RES_STILL (ARRAY_SIZE(gc2235_res_still))
+
+struct gc2235_resolution gc2235_res_video[] = {
+ {
+ .desc = "gc2235_1296_736_30fps",
+ .width = 1296,
+ .height = 736,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1828,
+ .lines_per_frame = 888,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_1296_736_30fps,
+ },
+ {
+ .desc = "gc2235_960_640_30fps",
+ .width = 960,
+ .height = 640,
+ .pix_clk_freq = 30,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1492,
+ .lines_per_frame = 792,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = gc2235_960_640_30fps,
+ },
+
+};
+#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video))
+
+static struct gc2235_resolution *gc2235_res = gc2235_res_preview;
+static int N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/Kconfig b/drivers/staging/media/atomisp/i2c/imx/Kconfig
new file mode 100644
index 000000000000..a39eeb3b6ad4
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_IMX
+ tristate "sony imx sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_MSRLIST_HELPER && m
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Sony
+ IMX RAW sensor.
+
+ It currently depends on internal V4L2 extensions defined in
+ atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
new file mode 100644
index 000000000000..1d7f7ab94cac
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_VIDEO_IMX) += imx1x5.o
+
+imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o otp_imx.o otp_brcc064_e2prom.o otp_e2prom.o
+
+ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
+obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
+
+ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
new file mode 100644
index 000000000000..d68ebb49f002
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
@@ -0,0 +1,225 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+#include "ad5816g.h"
+
+struct ad5816g_device ad5816g_dev;
+
+static int ad5816g_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = 0;
+
+ msg[0].addr = AD5816G_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+
+ msg[1].addr = AD5816G_VCM_ADDR;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+ return 0;
+}
+
+static int ad5816g_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = AD5816G_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int ad5816g_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3];
+ buf[0] = reg;
+ buf[1] = (u8)(val >> 8);
+ buf[2] = (u8)(val & 0xff);
+ msg.addr = AD5816G_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = &buf[0];
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int ad5816g_set_arc_mode(struct i2c_client *client)
+{
+ int ret;
+
+ ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_ARC_EN);
+ if (ret)
+ return ret;
+
+ ret = ad5816g_i2c_wr8(client, AD5816G_MODE,
+ AD5816G_MODE_2_5M_SWITCH_CLOCK);
+ if (ret)
+ return ret;
+
+ ret = ad5816g_i2c_wr8(client, AD5816G_VCM_FREQ, AD5816G_DEF_FREQ);
+ return ret;
+}
+
+int ad5816g_vcm_power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 ad5816g_id;
+
+ /* Enable power */
+ ret = ad5816g_dev.platform_data->power_ctrl(sd, 1);
+ if (ret)
+ return ret;
+ /* waiting time AD5816G(vcm) - t1 + t2
+ * t1(1ms) -Time from VDD high to first i2c cmd
+ * t2(100us) - exit power-down mode time
+ */
+ usleep_range(1100, 2200);
+ /* Detect device */
+ ret = ad5816g_i2c_rd8(client, AD5816G_IC_INFO, &ad5816g_id);
+ if (ret < 0)
+ goto fail_powerdown;
+ if (ad5816g_id != AD5816G_ID) {
+ ret = -ENXIO;
+ goto fail_powerdown;
+ }
+ ret = ad5816g_set_arc_mode(client);
+ if (ret)
+ return ret;
+
+ /* set the VCM_THRESHOLD */
+ ret = ad5816g_i2c_wr8(client, AD5816G_VCM_THRESHOLD,
+ AD5816G_DEF_THRESHOLD);
+
+ return ret;
+
+fail_powerdown:
+ ad5816g_dev.platform_data->power_ctrl(sd, 0);
+ return ret;
+}
+
+int ad5816g_vcm_power_down(struct v4l2_subdev *sd)
+{
+ return ad5816g_dev.platform_data->power_ctrl(sd, 0);
+}
+
+
+int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 data = val & VCM_CODE_MASK;
+
+ return ad5816g_i2c_wr16(client, AD5816G_VCM_CODE_MSB, data);
+}
+
+int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ int ret;
+
+ value = clamp(value, 0, AD5816G_MAX_FOCUS_POS);
+ ret = ad5816g_t_focus_vcm(sd, value);
+ if (ret == 0) {
+ ad5816g_dev.number_of_steps = value - ad5816g_dev.focus;
+ ad5816g_dev.focus = value;
+ getnstimeofday(&(ad5816g_dev.timestamp_t_focus_abs));
+ }
+
+ return ret;
+}
+
+int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+
+ return ad5816g_t_focus_abs(sd, ad5816g_dev.focus + value);
+}
+
+int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ u32 status = 0;
+ struct timespec temptime;
+ const struct timespec timedelay = {
+ 0,
+ min_t(u32, abs(ad5816g_dev.number_of_steps) * DELAY_PER_STEP_NS,
+ DELAY_MAX_PER_STEP_NS),
+ };
+
+ ktime_get_ts(&temptime);
+
+ temptime = timespec_sub(temptime, (ad5816g_dev.timestamp_t_focus_abs));
+
+ if (timespec_compare(&temptime, &timedelay) <= 0) {
+ status |= ATOMISP_FOCUS_STATUS_MOVING;
+ status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
+ } else {
+ status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ status |= ATOMISP_FOCUS_HP_COMPLETE;
+ }
+ *value = status;
+
+ return 0;
+}
+
+int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ s32 val;
+
+ ad5816g_q_focus_status(sd, &val);
+
+ if (val & ATOMISP_FOCUS_STATUS_MOVING)
+ *value = ad5816g_dev.focus - ad5816g_dev.number_of_steps;
+ else
+ *value = ad5816g_dev.focus;
+
+ return 0;
+}
+
+int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int ad5816g_vcm_init(struct v4l2_subdev *sd)
+{
+ ad5816g_dev.platform_data = camera_get_af_platform_data();
+ return (NULL == ad5816g_dev.platform_data) ? -ENODEV : 0;
+
+}
+
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h b/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
new file mode 100644
index 000000000000..f995c2eeada4
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
@@ -0,0 +1,49 @@
+#ifndef __AD5816G_H__
+#define __AD5816G_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/types.h>
+#include <linux/time.h>
+
+#define AD5816G_VCM_ADDR 0x0e
+
+/* ad5816g device structure */
+struct ad5816g_device {
+ const struct camera_af_platform_data *platform_data;
+ struct timespec timestamp_t_focus_abs;
+ struct timespec focus_time; /* Time when focus was last time set */
+ s32 focus; /* Current focus value */
+ s16 number_of_steps;
+};
+
+#define AD5816G_INVALID_CONFIG 0xffffffff
+#define AD5816G_MAX_FOCUS_POS 1023
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+
+/* Register Definitions */
+#define AD5816G_IC_INFO 0x00
+#define AD5816G_IC_VERSION 0x01
+#define AD5816G_CONTROL 0x02
+#define AD5816G_VCM_CODE_MSB 0x03
+#define AD5816G_VCM_CODE_LSB 0x04
+#define AD5816G_STATUS 0x05
+#define AD5816G_MODE 0x06
+#define AD5816G_VCM_FREQ 0x07
+#define AD5816G_VCM_THRESHOLD 0x08
+
+/* ARC MODE ENABLE */
+#define AD5816G_ARC_EN 0x02
+/* ARC RES2 MODE */
+#define AD5816G_ARC_RES2 0x01
+/* ARC VCM FREQ - 78.1Hz */
+#define AD5816G_DEF_FREQ 0x7a
+/* ARC VCM THRESHOLD - 0x08 << 1 */
+#define AD5816G_DEF_THRESHOLD 0x64
+#define AD5816G_ID 0x24
+#define VCM_CODE_MASK 0x03ff
+
+#define AD5816G_MODE_2_5M_SWITCH_CLOCK 0x14
+
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/common.h b/drivers/staging/media/atomisp/i2c/imx/common.h
new file mode 100644
index 000000000000..7e525cef56ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/common.h
@@ -0,0 +1,65 @@
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#define MAX_FPS_OPTIONS_SUPPORTED 3
+#define I2C_MSG_LENGTH 0x2
+#define E2PROM_2ADDR 0x80000000
+#define E2PROM_ADDR_MASK 0x7fffffff
+
+/* Defines for register writes and register array processing */
+#define IMX_BYTE_MAX 32
+#define IMX_SHORT_MAX 16
+#define I2C_RETRY_COUNT 5
+#define IMX_TOK_MASK 0xfff0
+
+enum imx_tok_type {
+ IMX_8BIT = 0x0001,
+ IMX_16BIT = 0x0002,
+ IMX_TOK_TERM = 0xf000, /* terminating token for reg list */
+ IMX_TOK_DELAY = 0xfe00 /* delay token for reg list */
+};
+
+/**
+ * struct imx_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct imx_reg {
+ enum imx_tok_type type;
+ u16 sreg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+struct imx_fps_setting {
+ int fps;
+ unsigned short pixels_per_line;
+ unsigned short lines_per_frame;
+ int mipi_freq; /* MIPI lane frequency in kHz */
+ const struct imx_reg *regs; /* regs that the fps setting needs */
+};
+
+struct imx_resolution {
+ const struct imx_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
+ u8 *desc;
+ const struct imx_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ unsigned short pixels_per_line;
+ unsigned short lines_per_frame;
+ int mipi_freq; /* MIPI lane frequency in kHz */
+ unsigned short skip_frames;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ bool used;
+};
+
+#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
+#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
+
+int imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val);
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.c b/drivers/staging/media/atomisp/i2c/imx/drv201.c
new file mode 100644
index 000000000000..915e4019cfeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/drv201.c
@@ -0,0 +1,218 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include <asm/intel-mid.h>
+
+#include "drv201.h"
+
+static struct drv201_device drv201_dev;
+
+static int drv201_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = 0;
+
+ msg[0].addr = DRV201_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = &buf[0];
+
+ msg[1].addr = DRV201_VCM_ADDR;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+ return 0;
+}
+
+static int drv201_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = DRV201_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int drv201_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3];
+ buf[0] = reg;
+ buf[1] = (u8)(val >> 8);
+ buf[2] = (u8)(val & 0xff);
+ msg.addr = DRV201_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 3;
+ msg.buf = &buf[0];
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+int drv201_vcm_power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 value;
+
+ /* Enable power */
+ ret = drv201_dev.platform_data->power_ctrl(sd, 1);
+ if (ret)
+ return ret;
+ /* Wait for VBAT to stabilize */
+ udelay(1);
+ /*
+ * Jiggle SCL pin to wake up device.
+ * Drv201 expect SCL from low to high to wake device up.
+ * So the 1st access to i2c would fail.
+ * Using following function to wake device up.
+ */
+ drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
+
+ /* Need 100us to transit from SHUTDOWN to STANDBY*/
+ usleep_range(WAKEUP_DELAY_US, WAKEUP_DELAY_US * 10);
+
+ /* Reset device */
+ ret = drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
+ if (ret < 0)
+ goto fail_powerdown;
+
+ /* Detect device */
+ ret = drv201_i2c_rd8(client, DRV201_CONTROL, &value);
+ if (ret < 0)
+ goto fail_powerdown;
+ if (value != DEFAULT_CONTROL_VAL) {
+ ret = -ENXIO;
+ goto fail_powerdown;
+ }
+
+ drv201_dev.focus = DRV201_MAX_FOCUS_POS;
+ drv201_dev.initialized = true;
+
+ return 0;
+fail_powerdown:
+ drv201_dev.platform_data->power_ctrl(sd, 0);
+ return ret;
+}
+
+int drv201_vcm_power_down(struct v4l2_subdev *sd)
+{
+ return drv201_dev.platform_data->power_ctrl(sd, 0);
+}
+
+
+int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 data = val & VCM_CODE_MASK;
+
+ if (!drv201_dev.initialized)
+ return -ENODEV;
+ return drv201_i2c_wr16(client, DRV201_VCM_CURRENT, data);
+}
+
+int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ int ret;
+
+ value = clamp(value, 0, DRV201_MAX_FOCUS_POS);
+ ret = drv201_t_focus_vcm(sd, value);
+ if (ret == 0) {
+ drv201_dev.number_of_steps = value - drv201_dev.focus;
+ drv201_dev.focus = value;
+ getnstimeofday(&(drv201_dev.timestamp_t_focus_abs));
+ }
+
+ return ret;
+}
+
+int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ return drv201_t_focus_abs(sd, drv201_dev.focus + value);
+}
+
+int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ u32 status = 0;
+ struct timespec temptime;
+ const struct timespec timedelay = {
+ 0,
+ min_t(u32, abs(drv201_dev.number_of_steps)*DELAY_PER_STEP_NS,
+ DELAY_MAX_PER_STEP_NS),
+ };
+
+ ktime_get_ts(&temptime);
+
+ temptime = timespec_sub(temptime, (drv201_dev.timestamp_t_focus_abs));
+
+ if (timespec_compare(&temptime, &timedelay) <= 0) {
+ status |= ATOMISP_FOCUS_STATUS_MOVING;
+ status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
+ } else {
+ status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ status |= ATOMISP_FOCUS_HP_COMPLETE;
+ }
+ *value = status;
+
+ return 0;
+}
+
+int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ s32 val;
+
+ drv201_q_focus_status(sd, &val);
+
+ if (val & ATOMISP_FOCUS_STATUS_MOVING)
+ *value = drv201_dev.focus - drv201_dev.number_of_steps;
+ else
+ *value = drv201_dev.focus;
+
+ return 0;
+}
+
+int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int drv201_vcm_init(struct v4l2_subdev *sd)
+{
+ drv201_dev.platform_data = camera_get_af_platform_data();
+ return (NULL == drv201_dev.platform_data) ? -ENODEV : 0;
+}
+
+
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.h b/drivers/staging/media/atomisp/i2c/imx/drv201.h
new file mode 100644
index 000000000000..8fc0ad116630
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/drv201.h
@@ -0,0 +1,38 @@
+#ifndef __DRV201_H__
+#define __DRV201_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/types.h>
+#include <linux/time.h>
+
+#define DRV201_VCM_ADDR 0x0e
+
+/* drv201 device structure */
+struct drv201_device {
+ const struct camera_af_platform_data *platform_data;
+ struct timespec timestamp_t_focus_abs;
+ struct timespec focus_time; /* Time when focus was last time set */
+ s32 focus; /* Current focus value */
+ s16 number_of_steps;
+ bool initialized; /* true if drv201 is detected */
+};
+
+#define DRV201_INVALID_CONFIG 0xffffffff
+#define DRV201_MAX_FOCUS_POS 1023
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+
+#define DRV201_CONTROL 2
+#define DRV201_VCM_CURRENT 3
+#define DRV201_STATUS 5
+#define DRV201_MODE 6
+#define DRV201_VCM_FREQ 7
+
+#define DEFAULT_CONTROL_VAL 2
+#define DRV201_RESET 1
+#define WAKEUP_DELAY_US 100
+#define VCM_CODE_MASK 0x03ff
+
+#endif
+
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.c b/drivers/staging/media/atomisp/i2c/imx/dw9714.c
new file mode 100644
index 000000000000..b7dee1b6bb37
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9714.c
@@ -0,0 +1,235 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include <asm/intel-mid.h>
+
+#include "dw9714.h"
+
+static struct dw9714_device dw9714_dev;
+static int dw9714_i2c_write(struct i2c_client *client, u16 data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+ u16 val;
+
+ val = cpu_to_be16(data);
+ msg.addr = DW9714_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = DW9714_16BIT;
+ msg.buf = (u8 *)&val;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+int dw9714_vcm_power_up(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ /* Enable power */
+ ret = dw9714_dev.platform_data->power_ctrl(sd, 1);
+ /* waiting time requested by DW9714A(vcm) */
+ usleep_range(12000, 12500);
+ return ret;
+}
+
+int dw9714_vcm_power_down(struct v4l2_subdev *sd)
+{
+ return dw9714_dev.platform_data->power_ctrl(sd, 0);
+}
+
+
+int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = -EINVAL;
+ u8 mclk = vcm_step_mclk(dw9714_dev.vcm_settings.step_setting);
+ u8 s = vcm_step_s(dw9714_dev.vcm_settings.step_setting);
+
+ /*
+ * For different mode, VCM_PROTECTION_OFF/ON required by the
+ * control procedure. For DW9714_DIRECT/DLC mode, slew value is
+ * VCM_DEFAULT_S(0).
+ */
+ switch (dw9714_dev.vcm_mode) {
+ case DW9714_DIRECT:
+ if (dw9714_dev.vcm_settings.update) {
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client, DIRECT_VCM);
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
+ if (ret)
+ return ret;
+ dw9714_dev.vcm_settings.update = false;
+ }
+ ret = dw9714_i2c_write(client,
+ vcm_val(val, VCM_DEFAULT_S));
+ break;
+ case DW9714_LSC:
+ if (dw9714_dev.vcm_settings.update) {
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client,
+ vcm_dlc_mclk(DLC_DISABLE, mclk));
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client,
+ vcm_tsrc(dw9714_dev.vcm_settings.t_src));
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
+ if (ret)
+ return ret;
+ dw9714_dev.vcm_settings.update = false;
+ }
+ ret = dw9714_i2c_write(client, vcm_val(val, s));
+ break;
+ case DW9714_DLC:
+ if (dw9714_dev.vcm_settings.update) {
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client,
+ vcm_dlc_mclk(DLC_ENABLE, mclk));
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client,
+ vcm_tsrc(dw9714_dev.vcm_settings.t_src));
+ if (ret)
+ return ret;
+ ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
+ if (ret)
+ return ret;
+ dw9714_dev.vcm_settings.update = false;
+ }
+ ret = dw9714_i2c_write(client,
+ vcm_val(val, VCM_DEFAULT_S));
+ break;
+ }
+ return ret;
+}
+
+int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ int ret;
+
+ value = clamp(value, 0, DW9714_MAX_FOCUS_POS);
+ ret = dw9714_t_focus_vcm(sd, value);
+ if (ret == 0) {
+ dw9714_dev.number_of_steps = value - dw9714_dev.focus;
+ dw9714_dev.focus = value;
+ getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
+ }
+
+ return ret;
+}
+
+int dw9714_t_focus_abs_init(struct v4l2_subdev *sd)
+{
+ int ret;
+
+ ret = dw9714_t_focus_vcm(sd, DW9714_DEFAULT_FOCUS_POS);
+ if (ret == 0) {
+ dw9714_dev.number_of_steps =
+ DW9714_DEFAULT_FOCUS_POS - dw9714_dev.focus;
+ dw9714_dev.focus = DW9714_DEFAULT_FOCUS_POS;
+ getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
+ }
+
+ return ret;
+}
+
+int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+
+ return dw9714_t_focus_abs(sd, dw9714_dev.focus + value);
+}
+
+int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ u32 status = 0;
+ struct timespec temptime;
+ const struct timespec timedelay = {
+ 0,
+ min_t(u32, abs(dw9714_dev.number_of_steps)*DELAY_PER_STEP_NS,
+ DELAY_MAX_PER_STEP_NS),
+ };
+
+ ktime_get_ts(&temptime);
+
+ temptime = timespec_sub(temptime, (dw9714_dev.timestamp_t_focus_abs));
+
+ if (timespec_compare(&temptime, &timedelay) <= 0) {
+ status |= ATOMISP_FOCUS_STATUS_MOVING;
+ status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
+ } else {
+ status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ status |= ATOMISP_FOCUS_HP_COMPLETE;
+ }
+ *value = status;
+
+ return 0;
+}
+
+int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ s32 val;
+
+ dw9714_q_focus_status(sd, &val);
+
+ if (val & ATOMISP_FOCUS_STATUS_MOVING)
+ *value = dw9714_dev.focus - dw9714_dev.number_of_steps;
+ else
+ *value = dw9714_dev.focus;
+
+ return 0;
+}
+
+int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ dw9714_dev.vcm_settings.step_setting = value;
+ dw9714_dev.vcm_settings.update = true;
+
+ return 0;
+}
+
+int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ dw9714_dev.vcm_settings.t_src = value;
+ dw9714_dev.vcm_settings.update = true;
+
+ return 0;
+}
+
+int dw9714_vcm_init(struct v4l2_subdev *sd)
+{
+
+ /* set VCM to home position and vcm mode to direct*/
+ dw9714_dev.vcm_mode = DW9714_DIRECT;
+ dw9714_dev.vcm_settings.update = false;
+ dw9714_dev.platform_data = camera_get_af_platform_data();
+ return (NULL == dw9714_dev.platform_data) ? -ENODEV : 0;
+
+}
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.h b/drivers/staging/media/atomisp/i2c/imx/dw9714.h
new file mode 100644
index 000000000000..5a98a9c97182
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9714.h
@@ -0,0 +1,63 @@
+#ifndef __DW9714_H__
+#define __DW9714_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/types.h>
+
+
+#define DW9714_VCM_ADDR 0x0c
+
+enum dw9714_tok_type {
+ DW9714_8BIT = 0x0001,
+ DW9714_16BIT = 0x0002,
+};
+
+struct dw9714_vcm_settings {
+ u16 code; /* bit[9:0]: Data[9:0] */
+ u8 t_src; /* bit[4:0]: T_SRC[4:0] */
+ u8 step_setting; /* bit[3:0]: S[3:0]/bit[5:4]: MCLK[1:0] */
+ bool update;
+};
+
+enum dw9714_vcm_mode {
+ DW9714_DIRECT = 0x1, /* direct control */
+ DW9714_LSC = 0x2, /* linear slope control */
+ DW9714_DLC = 0x3, /* dual level control */
+};
+
+/* dw9714 device structure */
+struct dw9714_device {
+ struct dw9714_vcm_settings vcm_settings;
+ struct timespec timestamp_t_focus_abs;
+ enum dw9714_vcm_mode vcm_mode;
+ s16 number_of_steps;
+ bool initialized; /* true if dw9714 is detected */
+ s32 focus; /* Current focus value */
+ struct timespec focus_time; /* Time when focus was last time set */
+ __u8 buffer[4]; /* Used for i2c transactions */
+ const struct camera_af_platform_data *platform_data;
+};
+
+#define DW9714_INVALID_CONFIG 0xffffffff
+#define DW9714_MAX_FOCUS_POS 1023
+#define DW9714_DEFAULT_FOCUS_POS 290
+
+
+/* MCLK[1:0] = 01 T_SRC[4:0] = 00001 S[3:0] = 0111 */
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+
+#define DLC_ENABLE 1
+#define DLC_DISABLE 0
+#define VCM_PROTECTION_OFF 0xeca3
+#define VCM_PROTECTION_ON 0xdc51
+#define VCM_DEFAULT_S 0x0
+
+#define vcm_step_s(a) (u8)(a & 0xf)
+#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
+#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
+#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
+#define vcm_val(data, s) (u16)(data << 4 | s)
+#define DIRECT_VCM vcm_dlc_mclk(0, 0)
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.c b/drivers/staging/media/atomisp/i2c/imx/dw9718.c
new file mode 100644
index 000000000000..65a1fcf187d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9718.c
@@ -0,0 +1,238 @@
+/*
+ * Support for dw9718 vcm driver.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include "dw9718.h"
+
+static struct dw9718_device dw9718_dev;
+
+static int dw9718_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2] = { reg };
+
+ msg[0].addr = DW9718_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf;
+
+ msg[1].addr = DW9718_VCM_ADDR;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+
+ return 0;
+}
+
+static int dw9718_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2] = { reg, val};
+
+ msg.addr = DW9718_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(buf);
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int dw9718_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
+
+ msg.addr = DW9718_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(buf);
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ value = clamp(value, 0, DW9718_MAX_FOCUS_POS);
+ ret = dw9718_i2c_wr16(client, DW9718_DATA_M, value);
+ /*pr_info("%s: value = %d\n", __func__, value);*/
+ if (ret < 0)
+ return ret;
+
+ getnstimeofday(&dw9718_dev.focus_time);
+ dw9718_dev.focus = value;
+
+ return 0;
+}
+
+int dw9718_vcm_power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 value;
+
+ if (dw9718_dev.power_on)
+ return 0;
+
+ /* Enable power */
+ ret = dw9718_dev.platform_data->power_ctrl(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "DW9718_PD power_ctrl failed %d\n", ret);
+ return ret;
+ }
+ /* Wait for VBAT to stabilize */
+ udelay(100);
+
+ /* Detect device */
+ ret = dw9718_i2c_rd8(client, DW9718_SACT, &value);
+ if (ret < 0) {
+ dev_err(&client->dev, "read DW9718_SACT failed %d\n", ret);
+ goto fail_powerdown;
+ }
+ /*
+ * WORKAROUND: for module P8V12F-203 which are used on
+ * Cherrytrail Refresh Davis Reef AoB, register SACT is not
+ * returning default value as spec. But VCM works as expected and
+ * root cause is still under discussion with vendor.
+ * workaround here to avoid aborting the power up sequence and just
+ * give a warning about this error.
+ */
+ if (value != DW9718_SACT_DEFAULT_VAL)
+ dev_warn(&client->dev, "%s error, incorrect ID\n", __func__);
+
+ /* Initialize according to recommended settings */
+ ret = dw9718_i2c_wr8(client, DW9718_CONTROL,
+ DW9718_CONTROL_SW_LINEAR |
+ DW9718_CONTROL_S_SAC4 |
+ DW9718_CONTROL_OCP_DISABLE |
+ DW9718_CONTROL_UVLO_DISABLE);
+ if (ret < 0) {
+ dev_err(&client->dev, "write DW9718_CONTROL failed %d\n", ret);
+ goto fail_powerdown;
+ }
+ ret = dw9718_i2c_wr8(client, DW9718_SACT,
+ DW9718_SACT_MULT_TWO |
+ DW9718_SACT_PERIOD_8_8MS);
+ if (ret < 0) {
+ dev_err(&client->dev, "write DW9718_SACT failed %d\n", ret);
+ goto fail_powerdown;
+ }
+
+ ret = dw9718_t_focus_abs(sd, dw9718_dev.focus);
+ if (ret)
+ return ret;
+ dw9718_dev.initialized = true;
+ dw9718_dev.power_on = 1;
+
+ return 0;
+
+fail_powerdown:
+ dev_err(&client->dev, "%s error, powerup failed\n", __func__);
+ dw9718_dev.platform_data->power_ctrl(sd, 0);
+ return ret;
+}
+
+int dw9718_vcm_power_down(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dw9718_dev.power_on)
+ return 0;
+
+ ret = dw9718_dev.platform_data->power_ctrl(sd, 0);
+ if (ret) {
+ dev_err(&client->dev, "%s power_ctrl failed\n",
+ __func__);
+ return ret;
+ }
+ dw9718_dev.power_on = 0;
+
+ return 0;
+}
+
+int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ static const struct timespec move_time = {
+ .tv_sec = 0,
+ .tv_nsec = 60000000
+ };
+ struct timespec current_time, finish_time, delta_time;
+
+ getnstimeofday(&current_time);
+ finish_time = timespec_add(dw9718_dev.focus_time, move_time);
+ delta_time = timespec_sub(current_time, finish_time);
+ if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
+ *value = ATOMISP_FOCUS_HP_COMPLETE |
+ ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ } else {
+ *value = ATOMISP_FOCUS_STATUS_MOVING |
+ ATOMISP_FOCUS_HP_IN_PROGRESS;
+ }
+
+ return 0;
+}
+
+int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ return -EINVAL;
+}
+
+int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ return dw9718_t_focus_abs(sd, dw9718_dev.focus + value);
+}
+
+int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ *value = dw9718_dev.focus;
+ return 0;
+}
+int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int dw9718_vcm_init(struct v4l2_subdev *sd)
+{
+ dw9718_dev.platform_data = camera_get_af_platform_data();
+ dw9718_dev.focus = DW9718_DEFAULT_FOCUS_POSITION;
+ dw9718_dev.power_on = 0;
+ return (NULL == dw9718_dev.platform_data) ? -ENODEV : 0;
+}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.h b/drivers/staging/media/atomisp/i2c/imx/dw9718.h
new file mode 100644
index 000000000000..4a1040c3149f
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9718.h
@@ -0,0 +1,64 @@
+/*
+ * Support for dw9719 vcm driver.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __DW9718_H__
+#define __DW9718_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/types.h>
+
+#define DW9718_VCM_ADDR (0x18 >> 1)
+
+/* dw9718 device structure */
+struct dw9718_device {
+ struct timespec timestamp_t_focus_abs;
+ s16 number_of_steps;
+ bool initialized; /* true if dw9718 is detected */
+ s32 focus; /* Current focus value */
+ struct timespec focus_time; /* Time when focus was last time set */
+ __u8 buffer[4]; /* Used for i2c transactions */
+ const struct camera_af_platform_data *platform_data;
+ __u8 power_on;
+};
+
+#define DW9718_MAX_FOCUS_POS 1023
+
+/* Register addresses */
+#define DW9718_PD 0x00
+#define DW9718_CONTROL 0x01
+#define DW9718_DATA_M 0x02
+#define DW9718_DATA_L 0x03
+#define DW9718_SW 0x04
+#define DW9718_SACT 0x05
+#define DW9718_FLAG 0x10
+
+#define DW9718_CONTROL_SW_LINEAR BIT(0)
+#define DW9718_CONTROL_S_SAC4 (BIT(1) | BIT(3))
+#define DW9718_CONTROL_OCP_DISABLE BIT(4)
+#define DW9718_CONTROL_UVLO_DISABLE BIT(5)
+
+#define DW9718_SACT_MULT_TWO 0x00
+#define DW9718_SACT_PERIOD_8_8MS 0x19
+#define DW9718_SACT_DEFAULT_VAL 0x60
+
+#define DW9718_DEFAULT_FOCUS_POSITION 300
+
+#endif /* __DW9718_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.c b/drivers/staging/media/atomisp/i2c/imx/dw9719.c
new file mode 100644
index 000000000000..eca2d7640030
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9719.c
@@ -0,0 +1,209 @@
+/*
+ * Support for dw9719 vcm driver.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include "dw9719.h"
+
+static struct dw9719_device dw9719_dev;
+
+static int dw9719_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2] = { reg };
+
+ msg[0].addr = DW9719_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf;
+
+ msg[1].addr = DW9719_VCM_ADDR;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 1;
+ msg[1].buf = &buf[1];
+ *val = 0;
+
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+
+ return 0;
+}
+
+static int dw9719_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2] = { reg, val };
+
+ msg.addr = DW9719_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(buf);
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int dw9719_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
+{
+ struct i2c_msg msg;
+ u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
+
+ msg.addr = DW9719_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(buf);
+ msg.buf = buf;
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+int dw9719_vcm_power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 value;
+
+ /* Enable power */
+ ret = dw9719_dev.platform_data->power_ctrl(sd, 1);
+ /* waiting time requested by DW9714A(vcm) */
+ if (ret)
+ return ret;
+ /* Wait for VBAT to stabilize */
+ udelay(1);
+
+ /*
+ * Jiggle SCL pin to wake up device.
+ */
+ ret = dw9719_i2c_wr8(client, DW9719_CONTROL, 1);
+ /* Need 100us to transit from SHUTDOWN to STANDBY*/
+ usleep_range(100, 1000);
+
+ /* Enable the ringing compensation */
+ ret = dw9719_i2c_wr8(client, DW9719_CONTROL, DW9719_ENABLE_RINGING);
+ if (ret < 0)
+ goto fail_powerdown;
+
+ /* Use SAC3 mode */
+ ret = dw9719_i2c_wr8(client, DW9719_MODE, DW9719_MODE_SAC3);
+ if (ret < 0)
+ goto fail_powerdown;
+
+ /* Set the resonance frequency */
+ ret = dw9719_i2c_wr8(client, DW9719_VCM_FREQ, DW9719_DEFAULT_VCM_FREQ);
+ if (ret < 0)
+ goto fail_powerdown;
+
+ /* Detect device */
+ ret = dw9719_i2c_rd8(client, DW9719_INFO, &value);
+ if (ret < 0)
+ goto fail_powerdown;
+ if (value != DW9719_ID) {
+ ret = -ENXIO;
+ goto fail_powerdown;
+ }
+ dw9719_dev.focus = 0;
+ dw9719_dev.initialized = true;
+
+ return 0;
+
+fail_powerdown:
+ dw9719_dev.platform_data->power_ctrl(sd, 0);
+ return ret;
+}
+
+int dw9719_vcm_power_down(struct v4l2_subdev *sd)
+{
+ return dw9719_dev.platform_data->power_ctrl(sd, 0);
+}
+
+int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ static const struct timespec move_time = {
+
+ .tv_sec = 0,
+ .tv_nsec = 60000000
+ };
+ struct timespec current_time, finish_time, delta_time;
+
+ getnstimeofday(&current_time);
+ finish_time = timespec_add(dw9719_dev.focus_time, move_time);
+ delta_time = timespec_sub(current_time, finish_time);
+ if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
+ *value = ATOMISP_FOCUS_HP_COMPLETE |
+ ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ } else {
+ *value = ATOMISP_FOCUS_STATUS_MOVING |
+ ATOMISP_FOCUS_HP_IN_PROGRESS;
+ }
+
+ return 0;
+}
+
+int dw9719_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ return -EINVAL;
+}
+
+int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ value = clamp(value, 0, DW9719_MAX_FOCUS_POS);
+ ret = dw9719_i2c_wr16(client, DW9719_VCM_CURRENT, value);
+ if (ret < 0)
+ return ret;
+
+ getnstimeofday(&dw9719_dev.focus_time);
+ dw9719_dev.focus = value;
+
+ return 0;
+}
+
+int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ return dw9719_t_focus_abs(sd, dw9719_dev.focus + value);
+}
+
+int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ *value = dw9719_dev.focus;
+ return 0;
+}
+int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ return 0;
+}
+
+int dw9719_vcm_init(struct v4l2_subdev *sd)
+{
+ dw9719_dev.platform_data = camera_get_af_platform_data();
+ return (NULL == dw9719_dev.platform_data) ? -ENODEV : 0;
+}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.h b/drivers/staging/media/atomisp/i2c/imx/dw9719.h
new file mode 100644
index 000000000000..711f412aef2a
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/dw9719.h
@@ -0,0 +1,58 @@
+/*
+ * Support for dw9719 vcm driver.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __DW9719_H__
+#define __DW9719_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/types.h>
+
+#define DW9719_VCM_ADDR (0x18 >> 1)
+
+/* dw9719 device structure */
+struct dw9719_device {
+ struct timespec timestamp_t_focus_abs;
+ s16 number_of_steps;
+ bool initialized; /* true if dw9719 is detected */
+ s32 focus; /* Current focus value */
+ struct timespec focus_time; /* Time when focus was last time set */
+ __u8 buffer[4]; /* Used for i2c transactions */
+ const struct camera_af_platform_data *platform_data;
+};
+
+#define DW9719_INVALID_CONFIG 0xffffffff
+#define DW9719_MAX_FOCUS_POS 1023
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+
+#define DW9719_INFO 0
+#define DW9719_ID 0xF1
+#define DW9719_CONTROL 2
+#define DW9719_VCM_CURRENT 3
+
+#define DW9719_MODE 6
+#define DW9719_VCM_FREQ 7
+
+#define DW9719_MODE_SAC3 0x40
+#define DW9719_DEFAULT_VCM_FREQ 0x04
+#define DW9719_ENABLE_RINGING 0x02
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c
new file mode 100644
index 000000000000..408a7b945153
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx.c
@@ -0,0 +1,2512 @@
+/*
+ * Support for Sony imx 8MP camera sensor.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <asm/intel-mid.h>
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include "../../include/linux/libmsrlisthelper.h"
+#include <linux/mm.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include "imx.h"
+
+/*
+ * The imx135 embedded data info:
+ * embedded data line num: 2
+ * line 0 effective data size(byte): 76
+ * line 1 effective data size(byte): 113
+ */
+static const uint32_t
+ imx135_embedded_effective_size[IMX135_EMBEDDED_DATA_LINE_NUM]
+ = {76, 113};
+
+static enum atomisp_bayer_order imx_bayer_order_mapping[] = {
+ atomisp_bayer_order_rggb,
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_gbrg,
+ atomisp_bayer_order_bggr
+};
+
+static const unsigned int
+IMX227_BRACKETING_LUT_FRAME_ENTRY[IMX_MAX_AE_LUT_LENGTH] = {
+ 0x0E10, 0x0E1E, 0x0E2C, 0x0E3A, 0x0E48};
+
+static int
+imx_read_reg(struct i2c_client *client, u16 len, u16 reg, u16 *val)
+{
+ struct i2c_msg msg[2];
+ u16 data[IMX_SHORT_MAX];
+ int ret, i;
+ int retry = 0;
+
+ if (len > IMX_BYTE_MAX) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ do {
+ memset(msg, 0 , sizeof(msg));
+ memset(data, 0 , sizeof(data));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = (u8 *)data;
+ /* high byte goes first */
+ data[0] = cpu_to_be16(reg);
+
+ msg[1].addr = client->addr;
+ msg[1].len = len;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = (u8 *)data;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret != 2) {
+ dev_err(&client->dev,
+ "retrying i2c read from offset 0x%x error %d... %d\n",
+ reg, ret, retry);
+ msleep(20);
+ }
+ } while (ret != 2 && retry++ < I2C_RETRY_COUNT);
+
+ if (ret != 2)
+ return -EIO;
+
+ /* high byte comes first */
+ if (len == IMX_8BIT) {
+ *val = (u8)data[0];
+ } else {
+ /* 16-bit access is default when len > 1 */
+ for (i = 0; i < (len >> 1); i++)
+ val[i] = be16_to_cpu(data[i]);
+ }
+
+ return 0;
+}
+
+static int imx_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ int ret;
+ int retry = 0;
+
+ do {
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret != 1) {
+ dev_err(&client->dev,
+ "retrying i2c write transfer... %d\n", retry);
+ msleep(20);
+ }
+ } while (ret != 1 && retry++ < I2C_RETRY_COUNT);
+
+ return ret == 1 ? 0 : -EIO;
+}
+
+int
+imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ u16 *wreg = (u16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != IMX_8BIT && data_length != IMX_16BIT) {
+ v4l2_err(client, "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == IMX_8BIT)
+ data[2] = (u8)(val);
+ else {
+ /* IMX_16BIT */
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = imx_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * imx_write_reg_array - Initializes a list of imx registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __imx_flush_reg_array, __imx_buf_reg_array() and
+ * __imx_write_reg_is_consecutive() are internal functions to
+ * imx_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __imx_flush_reg_array(struct i2c_client *client,
+ struct imx_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return imx_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __imx_buf_reg_array(struct i2c_client *client,
+ struct imx_write_ctrl *ctrl,
+ const struct imx_reg *next)
+{
+ int size;
+ u16 *data16;
+
+ switch (next->type) {
+ case IMX_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case IMX_16BIT:
+ size = 2;
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->sreg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= IMX_MAX_WRITE_BUF_SIZE)
+ return __imx_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int
+__imx_write_reg_is_consecutive(struct i2c_client *client,
+ struct imx_write_ctrl *ctrl,
+ const struct imx_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->sreg;
+}
+
+static int imx_write_reg_array(struct i2c_client *client,
+ const struct imx_reg *reglist)
+{
+ const struct imx_reg *next = reglist;
+ struct imx_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != IMX_TOK_TERM; next++) {
+ switch (next->type & IMX_TOK_MASK) {
+ case IMX_TOK_DELAY:
+ err = __imx_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__imx_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __imx_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __imx_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ v4l2_err(client, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __imx_flush_reg_array(client, &ctrl);
+}
+
+static int __imx_min_fps_diff(int fps, const struct imx_fps_setting *fps_list)
+{
+ int diff = INT_MAX;
+ int i;
+
+ if (fps == 0)
+ return 0;
+
+ for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
+ if (!fps_list[i].fps)
+ break;
+ if (abs(fps_list[i].fps - fps) < diff)
+ diff = abs(fps_list[i].fps - fps);
+ }
+
+ return diff;
+}
+
+static int __imx_nearest_fps_index(int fps,
+ const struct imx_fps_setting *fps_list)
+{
+ int fps_index = 0;
+ int i;
+
+ for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
+ if (!fps_list[i].fps)
+ break;
+ if (abs(fps_list[i].fps - fps)
+ < abs(fps_list[fps_index].fps - fps))
+ fps_index = i;
+ }
+ return fps_index;
+}
+
+/*
+ * This is to choose the nearest fps setting above the requested fps
+ * fps_list should be in ascendant order.
+ */
+static int __imx_above_nearest_fps_index(int fps,
+ const struct imx_fps_setting *fps_list)
+{
+ int fps_index = 0;
+ int i;
+
+ for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
+ if (!fps_list[i].fps)
+ break;
+ if (fps <= fps_list[i].fps) {
+ fps_index = i;
+ break;
+ }
+ }
+
+ return fps_index;
+}
+
+static int imx_get_lanes(struct v4l2_subdev *sd)
+{
+ struct camera_mipi_info *imx_info = v4l2_get_subdev_hostdata(sd);
+
+ if (!imx_info)
+ return -ENOSYS;
+ if (imx_info->num_lanes < 1 || imx_info->num_lanes > 4 ||
+ imx_info->num_lanes == 3)
+ return -EINVAL;
+
+ return imx_info->num_lanes;
+}
+
+static int __imx_update_exposure_timing(struct i2c_client *client, u16 exposure,
+ u16 llp, u16 fll)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret = 0;
+
+ if (dev->sensor_id != IMX227_ID) {
+ /* Increase the VTS to match exposure + margin */
+ if (exposure > fll - IMX_INTEGRATION_TIME_MARGIN)
+ fll = exposure + IMX_INTEGRATION_TIME_MARGIN;
+ }
+
+ ret = imx_write_reg(client, IMX_16BIT,
+ dev->reg_addr->line_length_pixels, llp);
+ if (ret)
+ return ret;
+
+ ret = imx_write_reg(client, IMX_16BIT,
+ dev->reg_addr->frame_length_lines, fll);
+ if (ret)
+ return ret;
+
+ if (exposure)
+ ret = imx_write_reg(client, IMX_16BIT,
+ dev->reg_addr->coarse_integration_time, exposure);
+
+ return ret;
+}
+
+static int __imx_update_gain(struct v4l2_subdev *sd, u16 gain)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ /* set global gain */
+ ret = imx_write_reg(client, IMX_8BIT, dev->reg_addr->global_gain, gain);
+ if (ret)
+ return ret;
+
+ /* set short analog gain */
+ if (dev->sensor_id == IMX135_ID)
+ ret = imx_write_reg(client, IMX_8BIT, IMX_SHORT_AGC_GAIN, gain);
+
+ return ret;
+}
+
+static int __imx_update_digital_gain(struct i2c_client *client, u16 digitgain)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct imx_write_buffer digit_gain;
+
+ digit_gain.addr = cpu_to_be16(dev->reg_addr->dgc_adj);
+ digit_gain.data[0] = (digitgain >> 8) & 0xFF;
+ digit_gain.data[1] = digitgain & 0xFF;
+
+ if (dev->sensor_id == IMX219_ID) {
+ return imx_i2c_write(client, IMX219_DGC_LEN, (u8 *)&digit_gain);
+ } else if (dev->sensor_id == IMX227_ID) {
+ return imx_i2c_write(client, IMX227_DGC_LEN, (u8 *)&digit_gain);
+ } else {
+ digit_gain.data[2] = (digitgain >> 8) & 0xFF;
+ digit_gain.data[3] = digitgain & 0xFF;
+ digit_gain.data[4] = (digitgain >> 8) & 0xFF;
+ digit_gain.data[5] = digitgain & 0xFF;
+ digit_gain.data[6] = (digitgain >> 8) & 0xFF;
+ digit_gain.data[7] = digitgain & 0xFF;
+ return imx_i2c_write(client, IMX_DGC_LEN, (u8 *)&digit_gain);
+ }
+ return 0;
+}
+
+static int imx_set_exposure_gain(struct v4l2_subdev *sd, u16 coarse_itg,
+ u16 gain, u16 digitgain)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int lanes = imx_get_lanes(sd);
+ unsigned int digitgain_scaled;
+ int ret = 0;
+
+ /* Validate exposure: cannot exceed VTS-4 where VTS is 16bit */
+ coarse_itg = clamp_t(u16, coarse_itg, 0, IMX_MAX_EXPOSURE_SUPPORTED);
+
+ /* Validate gain: must not exceed maximum 8bit value */
+ gain = clamp_t(u16, gain, 0, IMX_MAX_GLOBAL_GAIN_SUPPORTED);
+
+ mutex_lock(&dev->input_lock);
+
+ if (dev->sensor_id == IMX227_ID) {
+ ret = imx_write_reg_array(client, imx_param_hold);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ }
+
+ /* For imx175, setting gain must be delayed by one */
+ if ((dev->sensor_id == IMX175_ID) && dev->digital_gain)
+ digitgain_scaled = dev->digital_gain;
+ else
+ digitgain_scaled = digitgain;
+ /* imx132 with two lanes needs more gain to saturate at max */
+ if (dev->sensor_id == IMX132_ID && lanes > 1) {
+ digitgain_scaled *= IMX132_2LANES_GAINFACT;
+ digitgain_scaled >>= IMX132_2LANES_GAINFACT_SHIFT;
+ }
+ /* Validate digital gain: must not exceed 12 bit value*/
+ digitgain_scaled = clamp_t(unsigned int, digitgain_scaled,
+ 0, IMX_MAX_DIGITAL_GAIN_SUPPORTED);
+
+ ret = __imx_update_exposure_timing(client, coarse_itg,
+ dev->pixels_per_line, dev->lines_per_frame);
+ if (ret)
+ goto out;
+ dev->coarse_itg = coarse_itg;
+
+ if (dev->sensor_id == IMX175_ID)
+ ret = __imx_update_gain(sd, dev->gain);
+ else
+ ret = __imx_update_gain(sd, gain);
+ if (ret)
+ goto out;
+ dev->gain = gain;
+
+ ret = __imx_update_digital_gain(client, digitgain_scaled);
+ if (ret)
+ goto out;
+ dev->digital_gain = digitgain;
+
+out:
+ if (dev->sensor_id == IMX227_ID)
+ ret = imx_write_reg_array(client, imx_param_update);
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static long imx_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ return imx_set_exposure_gain(sd, exposure->integration_time[0],
+ exposure->gain[0], exposure->gain[1]);
+}
+
+/* FIXME -To be updated with real OTP reading */
+static int imx_g_priv_int_data(struct v4l2_subdev *sd,
+ struct v4l2_private_int_data *priv)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ u8 __user *to = priv->data;
+ u32 read_size = priv->size;
+ int ret;
+
+ /* No need to copy data if size is 0 */
+ if (!read_size)
+ goto out;
+
+ if (IS_ERR(dev->otp_data)) {
+ dev_err(&client->dev, "OTP data not available");
+ return PTR_ERR(dev->otp_data);
+ }
+ /* Correct read_size value only if bigger than maximum */
+ if (read_size > dev->otp_driver->size)
+ read_size = dev->otp_driver->size;
+
+ ret = copy_to_user(to, dev->otp_data, read_size);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
+ __func__);
+ return -EFAULT;
+ }
+out:
+ /* Return correct size */
+ priv->size = dev->otp_driver->size;
+
+ return 0;
+}
+
+static int __imx_init(struct v4l2_subdev *sd, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ int lanes = imx_get_lanes(sd);
+ int ret;
+
+ if (dev->sensor_id == IMX_ID_DEFAULT)
+ return 0;
+
+ /* The default is no flip at sensor initialization */
+ dev->h_flip->cur.val = 0;
+ dev->v_flip->cur.val = 0;
+ /* Sets the default FPS */
+ dev->fps_index = 0;
+ dev->curr_res_table = dev->mode_tables->res_preview;
+ dev->entries_curr_table = dev->mode_tables->n_res_preview;
+
+ ret = imx_write_reg_array(client, dev->mode_tables->init_settings);
+ if (ret)
+ return ret;
+
+ if (dev->sensor_id == IMX132_ID && lanes > 0) {
+ static const u8 imx132_rglanesel[] = {
+ IMX132_RGLANESEL_1LANE, /* 1 lane */
+ IMX132_RGLANESEL_2LANES, /* 2 lanes */
+ IMX132_RGLANESEL_1LANE, /* undefined */
+ IMX132_RGLANESEL_4LANES, /* 4 lanes */
+ };
+ ret = imx_write_reg(client, IMX_8BIT,
+ IMX132_RGLANESEL, imx132_rglanesel[lanes - 1]);
+ }
+
+ return ret;
+}
+
+static int imx_init(struct v4l2_subdev *sd, u32 val)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret = 0;
+
+ mutex_lock(&dev->input_lock);
+ ret = __imx_init(sd, val);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long imx_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return imx_s_exposure(sd, arg);
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ return imx_g_priv_int_data(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret;
+
+ /* power control */
+ ret = dev->platform_data->power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* gpio ctrl */
+ ret = dev->platform_data->gpio_ctrl(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "gpio failed\n");
+ goto fail_gpio;
+ }
+
+ return 0;
+fail_gpio:
+ dev->platform_data->gpio_ctrl(sd, 0);
+fail_clk:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_power:
+ dev->platform_data->power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = dev->platform_data->gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed\n");
+
+ /* power control */
+ ret = dev->platform_data->power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int __imx_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret = 0;
+ int r = 0;
+
+ if (on == 0) {
+ ret = power_down(sd);
+ if (dev->vcm_driver && dev->vcm_driver->power_down)
+ r = dev->vcm_driver->power_down(sd);
+ if (ret == 0)
+ ret = r;
+ dev->power = 0;
+ } else {
+ if (dev->vcm_driver && dev->vcm_driver->power_up)
+ ret = dev->vcm_driver->power_up(sd);
+ if (ret)
+ return ret;
+ ret = power_up(sd);
+ if (!ret) {
+ dev->power = 1;
+ return __imx_init(sd, 0);
+ }
+ }
+
+ return ret;
+}
+
+static int imx_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ ret = __imx_s_power(sd, on);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int imx_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct imx_reg *reglist)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx_device *dev = to_imx_sensor(sd);
+ int lanes = imx_get_lanes(sd);
+ u32 vt_pix_clk_div;
+ u32 vt_sys_clk_div;
+ u32 pre_pll_clk_div;
+ u32 pll_multiplier;
+
+ const int ext_clk_freq_hz = 19200000;
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ int ret;
+ u16 data[IMX_INTG_BUF_COUNT];
+
+ u32 vt_pix_clk_freq_mhz;
+ u32 coarse_integration_time_min;
+ u32 coarse_integration_time_max_margin;
+ u32 read_mode;
+ u32 div;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
+ ret = imx_read_reg(client, 1, IMX_VT_PIX_CLK_DIV, data);
+ if (ret)
+ return ret;
+ vt_pix_clk_div = data[0] & IMX_MASK_5BIT;
+
+ if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID) {
+ static const int rgpltd[] = { 2, 4, 1, 1 };
+ ret = imx_read_reg(client, 1, IMX132_208_VT_RGPLTD, data);
+ if (ret)
+ return ret;
+ vt_sys_clk_div = rgpltd[data[0] & IMX_MASK_2BIT];
+ } else {
+ ret = imx_read_reg(client, 1, IMX_VT_SYS_CLK_DIV, data);
+ if (ret)
+ return ret;
+ vt_sys_clk_div = data[0] & IMX_MASK_2BIT;
+ }
+ ret = imx_read_reg(client, 1, IMX_PRE_PLL_CLK_DIV, data);
+ if (ret)
+ return ret;
+ pre_pll_clk_div = data[0] & IMX_MASK_4BIT;
+
+ ret = imx_read_reg(client, 2,
+ (dev->sensor_id == IMX132_ID ||
+ dev->sensor_id == IMX219_ID ||
+ dev->sensor_id == IMX208_ID) ?
+ IMX132_208_219_PLL_MULTIPLIER : IMX_PLL_MULTIPLIER, data);
+ if (ret)
+ return ret;
+ pll_multiplier = data[0] & IMX_MASK_11BIT;
+
+ memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
+ ret = imx_read_reg(client, 4, IMX_COARSE_INTG_TIME_MIN, data);
+ if (ret)
+ return ret;
+ coarse_integration_time_min = data[0];
+ coarse_integration_time_max_margin = data[1];
+
+ /* Get the cropping and output resolution to ISP for this mode. */
+ ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_start_h, data);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = data[0];
+
+ ret = imx_read_reg(client, 2, dev->reg_addr->vertical_start_h, data);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = data[0];
+
+ ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_end_h, data);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = data[0];
+
+ ret = imx_read_reg(client, 2, dev->reg_addr->vertical_end_h, data);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = data[0];
+
+ ret = imx_read_reg(client, 2,
+ dev->reg_addr->horizontal_output_size_h, data);
+ if (ret)
+ return ret;
+ buf->output_width = data[0];
+
+ ret = imx_read_reg(client, 2,
+ dev->reg_addr->vertical_output_size_h, data);
+ if (ret)
+ return ret;
+ buf->output_height = data[0];
+
+ memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
+ if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
+ dev->sensor_id == IMX219_ID)
+ read_mode = 0;
+ else {
+ if (dev->sensor_id == IMX227_ID)
+ ret = imx_read_reg(client, 1, IMX227_READ_MODE, data);
+ else
+ ret = imx_read_reg(client, 1, IMX_READ_MODE, data);
+
+ if (ret)
+ return ret;
+ read_mode = data[0] & IMX_MASK_2BIT;
+ }
+
+ div = pre_pll_clk_div*vt_sys_clk_div*vt_pix_clk_div;
+ if (div == 0)
+ return -EINVAL;
+
+ if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID)
+ vt_pix_clk_freq_mhz = ext_clk_freq_hz / div;
+ else if (dev->sensor_id == IMX227_ID) {
+ /* according to IMX227 datasheet:
+ * vt_pix_freq_mhz = * num_of_vt_lanes(4) * ivt_pix_clk_freq_mhz
+ */
+ vt_pix_clk_freq_mhz =
+ (u64)4 * ext_clk_freq_hz * pll_multiplier;
+ do_div(vt_pix_clk_freq_mhz, div);
+ } else
+ vt_pix_clk_freq_mhz = 2 * ext_clk_freq_hz / div;
+
+ vt_pix_clk_freq_mhz *= pll_multiplier;
+ if (dev->sensor_id == IMX132_ID && lanes > 0)
+ vt_pix_clk_freq_mhz *= lanes;
+
+ dev->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
+
+ buf->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
+ buf->coarse_integration_time_min = coarse_integration_time_min;
+ buf->coarse_integration_time_max_margin =
+ coarse_integration_time_max_margin;
+
+ buf->fine_integration_time_min = IMX_FINE_INTG_TIME;
+ buf->fine_integration_time_max_margin = IMX_FINE_INTG_TIME;
+ buf->fine_integration_time_def = IMX_FINE_INTG_TIME;
+ buf->frame_length_lines = dev->lines_per_frame;
+ buf->line_length_pck = dev->pixels_per_line;
+ buf->read_mode = read_mode;
+
+ if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
+ dev->sensor_id == IMX219_ID) {
+ buf->binning_factor_x = 1;
+ buf->binning_factor_y = 1;
+ } else {
+ if (dev->sensor_id == IMX227_ID)
+ ret = imx_read_reg(client, 1, IMX227_BINNING_ENABLE,
+ data);
+ else
+ ret = imx_read_reg(client, 1, IMX_BINNING_ENABLE, data);
+
+ if (ret)
+ return ret;
+ /* 1:binning enabled, 0:disabled */
+ if (data[0] == 1) {
+ if (dev->sensor_id == IMX227_ID)
+ ret = imx_read_reg(client, 1,
+ IMX227_BINNING_TYPE, data);
+ else
+ ret = imx_read_reg(client, 1,
+ IMX_BINNING_TYPE, data);
+
+ if (ret)
+ return ret;
+ buf->binning_factor_x = data[0] >> 4 & 0x0f;
+ if (!buf->binning_factor_x)
+ buf->binning_factor_x = 1;
+ buf->binning_factor_y = data[0] & 0xf;
+ if (!buf->binning_factor_y)
+ buf->binning_factor_y = 1;
+ /* WOWRKAROUND, NHD setting for IMX227 should have 4x4
+ * binning but the register setting does not reflect
+ * this, I am asking vendor why this happens. this is
+ * workaround for INTEL BZ 216560.
+ */
+ if (dev->sensor_id == IMX227_ID) {
+ if (dev->curr_res_table[dev->fmt_idx].width ==
+ 376 &&
+ dev->curr_res_table[dev->fmt_idx].height ==
+ 656) {
+ buf->binning_factor_x = 4;
+ buf->binning_factor_y = 4;
+ }
+ }
+ } else {
+ buf->binning_factor_x = 1;
+ buf->binning_factor_y = 1;
+ }
+ }
+
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ for filling in EXIF data, not for actual image processing. */
+static int imx_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ u16 coarse;
+ int ret;
+
+ /* the fine integration time is currently not calculated */
+ ret = imx_read_reg(client, IMX_16BIT,
+ dev->reg_addr->coarse_integration_time, &coarse);
+ *value = coarse;
+
+ return ret;
+}
+
+static int imx_test_pattern(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret;
+
+ if (dev->power == 0)
+ return 0;
+
+ ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_R,
+ (u16)(dev->tp_r->val >> 22));
+ if (ret)
+ return ret;
+
+ ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GR,
+ (u16)(dev->tp_gr->val >> 22));
+ if (ret)
+ return ret;
+
+ ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GB,
+ (u16)(dev->tp_gb->val >> 22));
+ if (ret)
+ return ret;
+
+ ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_B,
+ (u16)(dev->tp_b->val >> 22));
+ if (ret)
+ return ret;
+
+ return imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_MODE,
+ (u16)(dev->tp_mode->val));
+}
+
+static u32 imx_translate_bayer_order(enum atomisp_bayer_order code)
+{
+ switch (code) {
+ case atomisp_bayer_order_rggb:
+ return MEDIA_BUS_FMT_SRGGB10_1X10;
+ case atomisp_bayer_order_grbg:
+ return MEDIA_BUS_FMT_SGRBG10_1X10;
+ case atomisp_bayer_order_bggr:
+ return MEDIA_BUS_FMT_SBGGR10_1X10;
+ case atomisp_bayer_order_gbrg:
+ return MEDIA_BUS_FMT_SGBRG10_1X10;
+ }
+ return 0;
+}
+
+static int imx_v_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct camera_mipi_info *imx_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+
+ if (dev->power == 0)
+ return -EIO;
+
+ ret = imx_write_reg_array(client, dev->param_hold);
+ if (ret)
+ return ret;
+
+ ret = imx_read_reg(client, IMX_8BIT,
+ dev->reg_addr->img_orientation, &val);
+ if (ret)
+ return ret;
+ if (value)
+ val |= IMX_VFLIP_BIT;
+ else
+ val &= ~IMX_VFLIP_BIT;
+
+ ret = imx_write_reg(client, IMX_8BIT,
+ dev->reg_addr->img_orientation, val);
+ if (ret)
+ return ret;
+
+ imx_info = v4l2_get_subdev_hostdata(sd);
+ if (imx_info) {
+ val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
+ imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
+ dev->format.code = imx_translate_bayer_order(
+ imx_info->raw_bayer_order);
+ }
+
+ return imx_write_reg_array(client, dev->param_update);
+}
+
+static int imx_h_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct camera_mipi_info *imx_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+
+ if (dev->power == 0)
+ return -EIO;
+
+ ret = imx_write_reg_array(client, dev->param_hold);
+ if (ret)
+ return ret;
+ ret = imx_read_reg(client, IMX_8BIT,
+ dev->reg_addr->img_orientation, &val);
+ if (ret)
+ return ret;
+ if (value)
+ val |= IMX_HFLIP_BIT;
+ else
+ val &= ~IMX_HFLIP_BIT;
+ ret = imx_write_reg(client, IMX_8BIT,
+ dev->reg_addr->img_orientation, val);
+ if (ret)
+ return ret;
+
+ imx_info = v4l2_get_subdev_hostdata(sd);
+ if (imx_info) {
+ val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
+ imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
+ dev->format.code = imx_translate_bayer_order(
+ imx_info->raw_bayer_order);
+ }
+
+ return imx_write_reg_array(client, dev->param_update);
+}
+
+static int imx_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (IMX_FOCAL_LENGTH_NUM << 16) | IMX_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int imx_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (IMX_F_NUMBER_DEFAULT_NUM << 16) | IMX_F_NUMBER_DEM;
+ return 0;
+}
+
+static int imx_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (IMX_F_NUMBER_DEFAULT_NUM << 24) |
+ (IMX_F_NUMBER_DEM << 16) |
+ (IMX_F_NUMBER_DEFAULT_NUM << 8) | IMX_F_NUMBER_DEM;
+ return 0;
+}
+
+static int imx_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ *val = dev->curr_res_table[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ *val = dev->curr_res_table[dev->fmt_idx].bin_factor_y;
+
+ return 0;
+}
+
+int imx_vcm_power_up(struct v4l2_subdev *sd)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->power_up)
+ return dev->vcm_driver->power_up(sd);
+ return 0;
+}
+
+int imx_vcm_power_down(struct v4l2_subdev *sd)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->power_down)
+ return dev->vcm_driver->power_down(sd);
+ return 0;
+}
+
+int imx_vcm_init(struct v4l2_subdev *sd)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->init)
+ return dev->vcm_driver->init(sd);
+ return 0;
+}
+
+int imx_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->t_focus_vcm)
+ return dev->vcm_driver->t_focus_vcm(sd, val);
+ return 0;
+}
+
+int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->t_focus_abs)
+ return dev->vcm_driver->t_focus_abs(sd, value);
+ return 0;
+}
+int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->t_focus_rel)
+ return dev->vcm_driver->t_focus_rel(sd, value);
+ return 0;
+}
+
+int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->q_focus_status)
+ return dev->vcm_driver->q_focus_status(sd, value);
+ return 0;
+}
+
+int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->q_focus_abs)
+ return dev->vcm_driver->q_focus_abs(sd, value);
+ return 0;
+}
+
+int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew)
+ return dev->vcm_driver->t_vcm_slew(sd, value);
+ return 0;
+}
+
+int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing)
+ return dev->vcm_driver->t_vcm_timing(sd, value);
+ return 0;
+}
+
+static int imx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx_device *dev = container_of(
+ ctrl->handler, struct imx_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx_test_pattern(&dev->sd);
+ break;
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = imx_v_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = imx_h_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ret = imx_t_focus_abs(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_RELATIVE:
+ ret = imx_t_focus_rel(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_SLEW:
+ ret = imx_t_vcm_slew(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_TIMEING:
+ ret = imx_t_vcm_timing(&dev->sd, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx_device *dev = container_of(
+ ctrl->handler, struct imx_device, ctrl_handler);
+ int ret = 0;
+ unsigned int val;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = imx_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ret = imx_q_focus_abs(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_STATUS:
+ ret = imx_q_focus_status(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = imx_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = imx_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = imx_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = imx_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = imx_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ctrl->val = dev->lines_per_frame -
+ dev->curr_res_table[dev->fmt_idx].height;
+ break;
+ case V4L2_CID_HBLANK:
+ ctrl->val = dev->pixels_per_line -
+ dev->curr_res_table[dev->fmt_idx].width;
+ break;
+ case V4L2_CID_PIXEL_RATE:
+ ctrl->val = dev->vt_pix_clk_freq_mhz;
+ break;
+ case V4L2_CID_LINK_FREQ:
+ val = dev->curr_res_table[dev->fmt_idx].
+ fps_options[dev->fps_index].mipi_freq;
+ if (val == 0)
+ val = dev->curr_res_table[dev->fmt_idx].mipi_freq;
+ if (val == 0)
+ return -EINVAL;
+ ctrl->val = val * 1000; /* To Hz */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = imx_s_ctrl,
+ .g_volatile_ctrl = imx_g_volatile_ctrl
+};
+
+static const struct v4l2_ctrl_config imx_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern",
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_COLOR_R,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern solid color R",
+ .min = INT_MIN,
+ .max = INT_MAX,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern solid color GR",
+ .min = INT_MIN,
+ .max = INT_MAX,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern solid color GB",
+ .min = INT_MIN,
+ .max = INT_MAX,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN_COLOR_B,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test pattern solid color B",
+ .min = INT_MIN,
+ .max = INT_MAX,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move absolute",
+ .min = 0,
+ .max = IMX_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_RELATIVE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move relative",
+ .min = IMX_MAX_FOCUS_NEG,
+ .max = IMX_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_STATUS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus status",
+ .min = 0,
+ .max = 100, /* allow enum to grow in the future */
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_SLEW,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm slew",
+ .min = 0,
+ .max = IMX_VCM_SLEW_STEP_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_TIMEING,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm step time",
+ .min = 0,
+ .max = IMX_VCM_SLEW_TIME_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = IMX_FOCAL_LENGTH_DEFAULT,
+ .max = IMX_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = IMX_FOCAL_LENGTH_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = IMX_F_NUMBER_DEFAULT,
+ .max = IMX_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = IMX_F_NUMBER_DEFAULT,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = IMX_F_NUMBER_RANGE,
+ .max = IMX_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = IMX_F_NUMBER_RANGE,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = IMX_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = IMX_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_LINK_FREQ,
+ .name = "Link Frequency",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1500000 * 1000,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_PIXEL_RATE,
+ .name = "Pixel Rate",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = INT_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HBLANK,
+ .name = "Horizontal Blanking",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = SHRT_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VBLANK,
+ .name = "Vertical Blanking",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = SHRT_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = V4L2_CTRL_FLAG_VOLATILE,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .name = "Horizontal Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .name = "Vertical Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 600
+static int distance(struct imx_resolution const *res, u32 w, u32 h,
+ bool keep_ratio)
+{
+ unsigned int w_ratio;
+ unsigned int h_ratio;
+ int match;
+ unsigned int allowed_ratio_mismatch = LARGEST_ALLOWED_RATIO_MISMATCH;
+
+ if (!keep_ratio)
+ allowed_ratio_mismatch = ~0;
+
+ if (w == 0)
+ return -1;
+ w_ratio = (res->width << 13) / w;
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
+
+ if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
+ (match > allowed_ratio_mismatch))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(struct v4l2_subdev *sd, int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int fps_diff;
+ int min_fps_diff = INT_MAX;
+ int min_dist = INT_MAX;
+ const struct imx_resolution *tmp_res = NULL;
+ struct imx_device *dev = to_imx_sensor(sd);
+ bool again = 1;
+retry:
+ for (i = 0; i < dev->entries_curr_table; i++) {
+ tmp_res = &dev->curr_res_table[i];
+ dist = distance(tmp_res, w, h, again);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ if (dist == min_dist) {
+ fps_diff = __imx_min_fps_diff(dev->targetfps,
+ tmp_res->fps_options);
+ if (fps_diff < min_fps_diff) {
+ min_fps_diff = fps_diff;
+ idx = i;
+ }
+ }
+ }
+
+ /*
+ * FIXME!
+ * only IMX135 for Saltbay and IMX227 use this algorithm
+ */
+ if (idx == -1 && again == true && dev->new_res_sel_method) {
+ again = false;
+ goto retry;
+ }
+ return idx;
+}
+
+/* Call with ctrl_handler.lock hold */
+static int __adjust_hvblank(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+ u16 new_frame_length_lines, new_line_length_pck;
+ int ret;
+
+ /*
+ * No need to adjust h/v blank if not set dbg value
+ * Note that there is no other checking on the h/v blank value,
+ * as h/v blank can be set to any value above zero for debug purpose
+ */
+ if (!dev->v_blank->val || !dev->h_blank->val)
+ return 0;
+
+ new_frame_length_lines = dev->curr_res_table[dev->fmt_idx].height +
+ dev->v_blank->val;
+ new_line_length_pck = dev->curr_res_table[dev->fmt_idx].width +
+ dev->h_blank->val;
+
+ ret = imx_write_reg(client, IMX_16BIT,
+ dev->reg_addr->line_length_pixels, new_line_length_pck);
+ if (ret)
+ return ret;
+ ret = imx_write_reg(client, IMX_16BIT,
+ dev->reg_addr->frame_length_lines, new_frame_length_lines);
+ if (ret)
+ return ret;
+
+ dev->lines_per_frame = new_frame_length_lines;
+ dev->pixels_per_line = new_line_length_pck;
+
+ return 0;
+}
+
+static int imx_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct camera_mipi_info *imx_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct imx_resolution *res;
+ int lanes = imx_get_lanes(sd);
+ int ret;
+ u16 data, val;
+ int idx;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ imx_info = v4l2_get_subdev_hostdata(sd);
+ if (imx_info == NULL)
+ return -EINVAL;
+ if ((fmt->width > imx_max_res[dev->sensor_id].res_max_width)
+ || (fmt->height > imx_max_res[dev->sensor_id].res_max_height)) {
+ fmt->width = imx_max_res[dev->sensor_id].res_max_width;
+ fmt->height = imx_max_res[dev->sensor_id].res_max_height;
+ } else {
+ idx = nearest_resolution_index(sd, fmt->width, fmt->height);
+
+ /*
+ * nearest_resolution_index() doesn't return smaller
+ * resolutions. If it fails, it means the requested
+ * resolution is higher than wecan support. Fallback
+ * to highest possible resolution in this case.
+ */
+ if (idx == -1)
+ idx = dev->entries_curr_table - 1;
+
+ fmt->width = dev->curr_res_table[idx].width;
+ fmt->height = dev->curr_res_table[idx].height;
+ }
+
+ fmt->code = dev->format.code;
+ if(format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ return 0;
+ }
+ mutex_lock(&dev->input_lock);
+
+ dev->fmt_idx = nearest_resolution_index(sd, fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ ret = -EINVAL;
+ goto out;
+ }
+ res = &dev->curr_res_table[dev->fmt_idx];
+
+ /* Adjust the FPS selection based on the resolution selected */
+ dev->fps_index = __imx_nearest_fps_index(dev->targetfps,
+ res->fps_options);
+ dev->fps = res->fps_options[dev->fps_index].fps;
+ dev->regs = res->fps_options[dev->fps_index].regs;
+ if (!dev->regs)
+ dev->regs = res->regs;
+
+ ret = imx_write_reg_array(client, dev->regs);
+ if (ret)
+ goto out;
+
+ if (dev->sensor_id == IMX132_ID && lanes > 0) {
+ static const u8 imx132_rgpltd[] = {
+ 2, /* 1 lane: /1 */
+ 0, /* 2 lanes: /2 */
+ 0, /* undefined */
+ 1, /* 4 lanes: /4 */
+ };
+ ret = imx_write_reg(client, IMX_8BIT, IMX132_208_VT_RGPLTD,
+ imx132_rgpltd[lanes - 1]);
+ if (ret)
+ goto out;
+ }
+
+ dev->pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
+ dev->lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
+
+ /* dbg h/v blank time */
+ __adjust_hvblank(sd);
+
+ ret = __imx_update_exposure_timing(client, dev->coarse_itg,
+ dev->pixels_per_line, dev->lines_per_frame);
+ if (ret)
+ goto out;
+
+ ret = __imx_update_gain(sd, dev->gain);
+ if (ret)
+ goto out;
+
+ ret = __imx_update_digital_gain(client, dev->digital_gain);
+ if (ret)
+ goto out;
+
+ ret = imx_write_reg_array(client, dev->param_update);
+ if (ret)
+ goto out;
+
+ ret = imx_get_intg_factor(client, imx_info, dev->regs);
+ if (ret)
+ goto out;
+
+ ret = imx_read_reg(client, IMX_8BIT,
+ dev->reg_addr->img_orientation, &val);
+ if (ret)
+ goto out;
+ val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
+ imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
+ dev->format.code = imx_translate_bayer_order(
+ imx_info->raw_bayer_order);
+
+ /*
+ * Fill meta data info. add imx135 metadata setting for RAW10 format
+ */
+ switch (dev->sensor_id) {
+ case IMX135_ID:
+ ret = imx_read_reg(client, 2,
+ IMX135_OUTPUT_DATA_FORMAT_REG, &data);
+ if (ret)
+ goto out;
+ /*
+ * The IMX135 can support various resolutions like
+ * RAW6/8/10/12/14.
+ * 1.The data format is RAW10:
+ * matadata width = current resolution width(pixel) * 10 / 8
+ * 2.The data format is RAW6 or RAW8:
+ * matadata width = current resolution width(pixel);
+ * 3.other data format(RAW12/14 etc):
+ * TBD.
+ */
+ if (data == IMX135_OUTPUT_FORMAT_RAW10)
+ /* the data format is RAW10. */
+ imx_info->metadata_width = res->width * 10 / 8;
+ else
+ /* The data format is RAW6/8/12/14/ etc. */
+ imx_info->metadata_width = res->width;
+
+ imx_info->metadata_height = IMX135_EMBEDDED_DATA_LINE_NUM;
+
+ if (imx_info->metadata_effective_width == NULL)
+ imx_info->metadata_effective_width =
+ imx135_embedded_effective_size;
+
+ break;
+ case IMX227_ID:
+ ret = imx_read_reg(client, 2, IMX227_OUTPUT_DATA_FORMAT_REG,
+ &data);
+ if (ret)
+ goto out;
+ if (data == IMX227_OUTPUT_FORMAT_RAW10)
+ /* the data format is RAW10. */
+ imx_info->metadata_width = res->width * 10 / 8;
+ else
+ /* The data format is RAW6/8/12/14/ etc. */
+ imx_info->metadata_width = res->width;
+
+ imx_info->metadata_height = IMX227_EMBEDDED_DATA_LINE_NUM;
+
+ if (imx_info->metadata_effective_width == NULL)
+ imx_info->metadata_effective_width =
+ imx227_embedded_effective_size;
+
+ break;
+ default:
+ imx_info->metadata_width = 0;
+ imx_info->metadata_height = 0;
+ imx_info->metadata_effective_width = NULL;
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+
+static int imx_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ fmt->width = dev->curr_res_table[dev->fmt_idx].width;
+ fmt->height = dev->curr_res_table[dev->fmt_idx].height;
+ fmt->code = dev->format.code;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int imx_detect(struct i2c_client *client, u16 *id, u8 *revision)
+{
+ struct i2c_adapter *adapter = client->adapter;
+
+ /* i2c check */
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ /* check sensor chip ID */
+ if (imx_read_reg(client, IMX_16BIT, IMX132_175_208_219_CHIP_ID, id)) {
+ v4l2_err(client, "sensor_id = 0x%x\n", *id);
+ return -ENODEV;
+ }
+
+ if (*id == IMX132_ID || *id == IMX175_ID ||
+ *id == IMX208_ID || *id == IMX219_ID)
+ goto found;
+
+ if (imx_read_reg(client, IMX_16BIT, IMX134_135_227_CHIP_ID, id)) {
+ v4l2_err(client, "sensor_id = 0x%x\n", *id);
+ return -ENODEV;
+ }
+ if (*id != IMX134_ID && *id != IMX135_ID && *id != IMX227_ID) {
+ v4l2_err(client, "no imx sensor found\n");
+ return -ENODEV;
+ }
+found:
+ v4l2_info(client, "sensor_id = 0x%x\n", *id);
+
+ /* TODO - need to be updated */
+ *revision = 0;
+
+ return 0;
+}
+
+static void __imx_print_timing(struct v4l2_subdev *sd)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 width = dev->curr_res_table[dev->fmt_idx].width;
+ u16 height = dev->curr_res_table[dev->fmt_idx].height;
+
+ dev_dbg(&client->dev, "Dump imx timing in stream on:\n");
+ dev_dbg(&client->dev, "width: %d:\n", width);
+ dev_dbg(&client->dev, "height: %d:\n", height);
+ dev_dbg(&client->dev, "pixels_per_line: %d:\n", dev->pixels_per_line);
+ dev_dbg(&client->dev, "line per frame: %d:\n", dev->lines_per_frame);
+ dev_dbg(&client->dev, "pix freq: %d:\n", dev->vt_pix_clk_freq_mhz);
+ dev_dbg(&client->dev, "init fps: %d:\n", dev->vt_pix_clk_freq_mhz /
+ dev->pixels_per_line / dev->lines_per_frame);
+ dev_dbg(&client->dev, "HBlank: %d nS:\n",
+ 1000 * (dev->pixels_per_line - width) /
+ (dev->vt_pix_clk_freq_mhz / 1000000));
+ dev_dbg(&client->dev, "VBlank: %d uS:\n",
+ (dev->lines_per_frame - height) * dev->pixels_per_line /
+ (dev->vt_pix_clk_freq_mhz / 1000000));
+}
+
+/*
+ * imx stream on/off
+ */
+static int imx_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ if (enable) {
+ /* Noise reduction & dead pixel applied before streaming */
+ if (dev->fw == NULL) {
+ dev_warn(&client->dev, "No MSR loaded from library");
+ } else {
+ ret = apply_msr_data(client, dev->fw);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ }
+ ret = imx_test_pattern(sd);
+ if (ret) {
+ v4l2_err(client, "Configure test pattern failed.\n");
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ __imx_print_timing(sd);
+ ret = imx_write_reg_array(client, imx_streaming);
+ if (ret != 0) {
+ v4l2_err(client, "write_reg_array err\n");
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ dev->streaming = 1;
+ if (dev->vcm_driver && dev->vcm_driver->t_focus_abs_init)
+ dev->vcm_driver->t_focus_abs_init(sd);
+ } else {
+ ret = imx_write_reg_array(client, imx_soft_standby);
+ if (ret != 0) {
+ v4l2_err(client, "write_reg_array err\n");
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+ dev->streaming = 0;
+ dev->targetfps = 0;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int __update_imx_device_settings(struct imx_device *dev, u16 sensor_id)
+{
+ /* IMX on other platform is not supported yet */
+ return -EINVAL;
+}
+
+static int imx_s_config(struct v4l2_subdev *sd,
+ int irq, void *pdata)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 sensor_revision;
+ u16 sensor_id;
+ int ret;
+ if (pdata == NULL)
+ return -ENODEV;
+
+ dev->platform_data = pdata;
+
+ mutex_lock(&dev->input_lock);
+
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ dev_err(&client->dev, "imx platform init err\n");
+ return ret;
+ }
+ }
+ /*
+ * power off the module first.
+ *
+ * As first power on by board have undecided state of power/gpio pins.
+ */
+ ret = __imx_s_power(sd, 0);
+ if (ret) {
+ v4l2_err(client, "imx power-down err.\n");
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = __imx_s_power(sd, 1);
+ if (ret) {
+ v4l2_err(client, "imx power-up err.\n");
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = imx_detect(client, &sensor_id, &sensor_revision);
+ if (ret) {
+ v4l2_err(client, "imx_detect err s_config.\n");
+ goto fail_detect;
+ }
+
+ dev->sensor_id = sensor_id;
+ dev->sensor_revision = sensor_revision;
+
+ /* Resolution settings depend on sensor type and platform */
+ ret = __update_imx_device_settings(dev, dev->sensor_id);
+ if (ret)
+ goto fail_detect;
+ /* Read sensor's OTP data */
+ dev->otp_data = dev->otp_driver->otp_read(sd,
+ dev->otp_driver->dev_addr, dev->otp_driver->start_addr,
+ dev->otp_driver->size);
+
+ /* power off sensor */
+ ret = __imx_s_power(sd, 0);
+
+ mutex_unlock(&dev->input_lock);
+ if (ret)
+ v4l2_err(client, "imx power-down err.\n");
+
+ return ret;
+
+fail_detect:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_csi_cfg:
+ __imx_s_power(sd, 0);
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+ mutex_unlock(&dev->input_lock);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+ return ret;
+}
+
+static int
+imx_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ code->code = dev->format.code;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int
+imx_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ if (index >= dev->entries_curr_table) {
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ fse->min_width = dev->curr_res_table[index].width;
+ fse->min_height = dev->curr_res_table[index].height;
+ fse->max_width = dev->curr_res_table[index].width;
+ fse->max_height = dev->curr_res_table[index].height;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int
+imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ dev->curr_res_table = dev->mode_tables->res_video;
+ dev->entries_curr_table = dev->mode_tables->n_res_video;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ dev->curr_res_table = dev->mode_tables->res_still;
+ dev->entries_curr_table = dev->mode_tables->n_res_still;
+ break;
+ default:
+ dev->curr_res_table = dev->mode_tables->res_preview;
+ dev->entries_curr_table = dev->mode_tables->n_res_preview;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+int
+imx_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ interval->interval.denominator = dev->fps;
+ interval->interval.numerator = 1;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int __imx_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct imx_resolution *res =
+ &dev->curr_res_table[dev->fmt_idx];
+ struct camera_mipi_info *imx_info = NULL;
+ unsigned short pixels_per_line;
+ unsigned short lines_per_frame;
+ unsigned int fps_index;
+ int fps;
+ int ret = 0;
+
+
+ imx_info = v4l2_get_subdev_hostdata(sd);
+ if (imx_info == NULL)
+ return -EINVAL;
+
+ if (!interval->interval.numerator)
+ interval->interval.numerator = 1;
+
+ fps = interval->interval.denominator / interval->interval.numerator;
+
+ if (!fps)
+ return -EINVAL;
+
+ dev->targetfps = fps;
+ /* No need to proceed further if we are not streaming */
+ if (!dev->streaming)
+ return 0;
+
+ /* Ignore if we are already using the required FPS. */
+ if (fps == dev->fps)
+ return 0;
+
+ /*
+ * Start here, sensor is already streaming, so adjust fps dynamically
+ */
+ fps_index = __imx_above_nearest_fps_index(fps, res->fps_options);
+ if (fps > res->fps_options[fps_index].fps) {
+ /*
+ * if does not have high fps setting, not support increase fps
+ * by adjust lines per frame.
+ */
+ dev_err(&client->dev, "Could not support fps: %d.\n", fps);
+ return -EINVAL;
+ }
+
+ if (res->fps_options[fps_index].regs &&
+ res->fps_options[fps_index].regs != dev->regs) {
+ /*
+ * if need a new setting, but the new setting has difference
+ * with current setting, not use this one, as may have
+ * unexpected result, e.g. PLL, IQ.
+ */
+ dev_dbg(&client->dev,
+ "Sensor is streaming, not apply new sensor setting\n");
+ if (fps > res->fps_options[dev->fps_index].fps) {
+ /*
+ * Does not support increase fps based on low fps
+ * setting, as the high fps setting could not be used,
+ * and fps requested is above current setting fps.
+ */
+ dev_warn(&client->dev,
+ "Could not support fps: %d, keep current: %d.\n",
+ fps, dev->fps);
+ return 0;
+ }
+ } else {
+ dev->fps_index = fps_index;
+ dev->fps = res->fps_options[dev->fps_index].fps;
+ }
+
+ /* Update the new frametimings based on FPS */
+ pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
+ lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
+
+ if (fps > res->fps_options[fps_index].fps) {
+ /*
+ * if does not have high fps setting, not support increase fps
+ * by adjust lines per frame.
+ */
+ dev_warn(&client->dev, "Could not support fps: %d. Use:%d.\n",
+ fps, res->fps_options[fps_index].fps);
+ goto done;
+ }
+
+ /* if the new setting does not match exactly */
+ if (dev->fps != fps) {
+#define MAX_LINES_PER_FRAME 0xffff
+ dev_dbg(&client->dev, "adjusting fps using lines_per_frame\n");
+ /*
+ * FIXME!
+ * 1: check DS on max value of lines_per_frame
+ * 2: consider use pixel per line for more range?
+ */
+ if (dev->lines_per_frame * dev->fps / fps >
+ MAX_LINES_PER_FRAME) {
+ dev_warn(&client->dev,
+ "adjust lines_per_frame out of range, try to use max value.\n");
+ lines_per_frame = MAX_LINES_PER_FRAME;
+ } else {
+ lines_per_frame = lines_per_frame * dev->fps / fps;
+ }
+ }
+done:
+ /* Update the new frametimings based on FPS */
+ dev->pixels_per_line = pixels_per_line;
+ dev->lines_per_frame = lines_per_frame;
+
+ /* Update the new values so that user side knows the current settings */
+ ret = __imx_update_exposure_timing(client,
+ dev->coarse_itg, dev->pixels_per_line, dev->lines_per_frame);
+ if (ret)
+ return ret;
+
+ dev->fps = fps;
+
+ ret = imx_get_intg_factor(client, imx_info, dev->regs);
+ if (ret)
+ return ret;
+
+ interval->interval.denominator = res->fps_options[dev->fps_index].fps;
+ interval->interval.numerator = 1;
+ __imx_print_timing(sd);
+
+ return ret;
+}
+
+static int imx_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __imx_s_frame_interval(sd, interval);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+static int imx_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = dev->curr_res_table[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops imx_sensor_ops = {
+ .g_skip_frames = imx_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops imx_video_ops = {
+ .s_stream = imx_s_stream,
+ .s_parm = imx_s_parm,
+ .g_frame_interval = imx_g_frame_interval,
+ .s_frame_interval = imx_s_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops imx_core_ops = {
+ .s_power = imx_s_power,
+ .ioctl = imx_ioctl,
+ .init = imx_init,
+};
+
+static const struct v4l2_subdev_pad_ops imx_pad_ops = {
+ .enum_mbus_code = imx_enum_mbus_code,
+ .enum_frame_size = imx_enum_frame_size,
+ .get_fmt = imx_get_fmt,
+ .set_fmt = imx_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx_ops = {
+ .core = &imx_core_ops,
+ .video = &imx_video_ops,
+ .pad = &imx_pad_ops,
+ .sensor = &imx_sensor_ops,
+};
+
+static const struct media_entity_operations imx_entity_ops = {
+ .link_setup = NULL,
+};
+
+static int imx_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx_device *dev = to_imx_sensor(sd);
+
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_device_unregister_subdev(sd);
+ release_msr_list(client, dev->fw);
+ kfree(dev);
+
+ return 0;
+}
+
+static int __imx_init_ctrl_handler(struct imx_device *dev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ int i;
+
+ hdl = &dev->ctrl_handler;
+
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(imx_controls));
+
+ for (i = 0; i < ARRAY_SIZE(imx_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler,
+ &imx_controls[i], NULL);
+
+ dev->pixel_rate = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_PIXEL_RATE);
+ dev->h_blank = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_HBLANK);
+ dev->v_blank = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_VBLANK);
+ dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_LINK_FREQ);
+ dev->h_flip = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_HFLIP);
+ dev->v_flip = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_VFLIP);
+ dev->tp_mode = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_TEST_PATTERN);
+ dev->tp_r = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_TEST_PATTERN_COLOR_R);
+ dev->tp_gr = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_TEST_PATTERN_COLOR_GR);
+ dev->tp_gb = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_TEST_PATTERN_COLOR_GB);
+ dev->tp_b = v4l2_ctrl_find(&dev->ctrl_handler,
+ V4L2_CID_TEST_PATTERN_COLOR_B);
+
+ if (dev->ctrl_handler.error || dev->pixel_rate == NULL
+ || dev->h_blank == NULL || dev->v_blank == NULL
+ || dev->h_flip == NULL || dev->v_flip == NULL
+ || dev->link_freq == NULL) {
+ return dev->ctrl_handler.error;
+ }
+
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
+
+ return 0;
+}
+
+static void imx_update_reg_info(struct imx_device *dev)
+{
+ if (dev->sensor_id == IMX219_ID) {
+ dev->reg_addr = &imx219_addr;
+ dev->param_hold = imx219_param_hold;
+ dev->param_update = imx219_param_update;
+ } else {
+ dev->reg_addr = &imx_addr;
+ dev->param_hold = imx_param_hold;
+ dev->param_update = imx_param_update;
+ }
+}
+
+static int imx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct imx_device *dev;
+ struct camera_mipi_info *imx_info = NULL;
+ int ret;
+ char *msr_file_name = NULL;
+
+ /* allocate sensor device & init sub device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ v4l2_err(client, "%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->i2c_id = id->driver_data;
+ dev->fmt_idx = 0;
+ dev->sensor_id = IMX_ID_DEFAULT;
+ dev->vcm_driver = &imx_vcms[IMX_ID_DEFAULT];
+ dev->digital_gain = 256;
+
+ v4l2_i2c_subdev_init(&(dev->sd), client, &imx_ops);
+
+ if (client->dev.platform_data) {
+ ret = imx_s_config(&dev->sd, client->irq,
+ client->dev.platform_data);
+ if (ret)
+ goto out_free;
+ }
+ imx_info = v4l2_get_subdev_hostdata(&dev->sd);
+
+ /*
+ * sd->name is updated with sensor driver name by the v4l2.
+ * change it to sensor name in this case.
+ */
+ imx_update_reg_info(dev);
+ snprintf(dev->sd.name, sizeof(dev->sd.name), "%s%x %d-%04x",
+ IMX_SUBDEV_PREFIX, dev->sensor_id,
+ i2c_adapter_id(client->adapter), client->addr);
+
+ ret = __imx_init_ctrl_handler(dev);
+ if (ret)
+ goto out_ctrl_handler_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = imx_translate_bayer_order(
+ imx_info->raw_bayer_order);
+ dev->sd.entity.ops = &imx_entity_ops;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret) {
+ imx_remove(client);
+ return ret;
+ }
+
+ /* Load the Noise reduction, Dead pixel registers from cpf file*/
+ if (dev->platform_data->msr_file_name != NULL)
+ msr_file_name = dev->platform_data->msr_file_name();
+ if (msr_file_name) {
+ ret = load_msr_list(client, msr_file_name, &dev->fw);
+ if (ret) {
+ imx_remove(client);
+ return ret;
+ }
+ } else {
+ dev_warn(&client->dev, "Drvb file not present");
+ }
+
+ return ret;
+
+out_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct i2c_device_id imx_ids[] = {
+ {IMX_NAME_175, IMX175_ID},
+ {IMX_NAME_135, IMX135_ID},
+ {IMX_NAME_135_FUJI, IMX135_FUJI_ID},
+ {IMX_NAME_134, IMX134_ID},
+ {IMX_NAME_132, IMX132_ID},
+ {IMX_NAME_208, IMX208_ID},
+ {IMX_NAME_219, IMX219_ID},
+ {IMX_NAME_227, IMX227_ID},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, imx_ids);
+
+static struct i2c_driver imx_driver = {
+ .driver = {
+ .name = IMX_DRIVER,
+ },
+ .probe = imx_probe,
+ .remove = imx_remove,
+ .id_table = imx_ids,
+};
+
+static __init int init_imx(void)
+{
+ return i2c_add_driver(&imx_driver);
+}
+
+static __exit void exit_imx(void)
+{
+ i2c_del_driver(&imx_driver);
+}
+
+module_init(init_imx);
+module_exit(exit_imx);
+
+MODULE_DESCRIPTION("A low-level driver for Sony IMX sensors");
+MODULE_AUTHOR("Shenbo Huang <shenbo.huang@intel.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
new file mode 100644
index 000000000000..36b3f3a5a41f
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx.h
@@ -0,0 +1,766 @@
+/*
+ * Support for Sony IMX camera sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IMX_H__
+#define __IMX_H__
+#include "../../include/linux/atomisp_platform.h"
+#include "../../include/linux/atomisp.h"
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "imx175.h"
+#include "imx135.h"
+#include "imx134.h"
+#include "imx132.h"
+#include "imx208.h"
+#include "imx219.h"
+#include "imx227.h"
+
+#define IMX_MCLK 192
+
+/* TODO - This should be added into include/linux/videodev2.h */
+#ifndef V4L2_IDENT_IMX
+#define V4L2_IDENT_IMX 8245
+#endif
+
+#define IMX_MAX_AE_LUT_LENGTH 5
+/*
+ * imx System control registers
+ */
+#define IMX_MASK_5BIT 0x1F
+#define IMX_MASK_4BIT 0xF
+#define IMX_MASK_3BIT 0x7
+#define IMX_MASK_2BIT 0x3
+#define IMX_MASK_8BIT 0xFF
+#define IMX_MASK_11BIT 0x7FF
+#define IMX_INTG_BUF_COUNT 2
+
+#define IMX_FINE_INTG_TIME 0x1E8
+
+#define IMX_VT_PIX_CLK_DIV 0x0301
+#define IMX_VT_SYS_CLK_DIV 0x0303
+#define IMX_PRE_PLL_CLK_DIV 0x0305
+#define IMX227_IOP_PRE_PLL_CLK_DIV 0x030D
+#define IMX227_PLL_MULTIPLIER 0x0306
+#define IMX227_IOP_PLL_MULTIPLIER 0x030E
+#define IMX227_PLL_MULTI_DRIVE 0x0310
+#define IMX227_OP_PIX_CLK_DIV 0x0309
+#define IMX227_OP_SYS_CLK_DIV 0x030B
+#define IMX_PLL_MULTIPLIER 0x030C
+#define IMX_OP_PIX_DIV 0x0309
+#define IMX_OP_SYS_DIV 0x030B
+#define IMX_FRAME_LENGTH_LINES 0x0340
+#define IMX_LINE_LENGTH_PIXELS 0x0342
+#define IMX_COARSE_INTG_TIME_MIN 0x1004
+#define IMX_COARSE_INTG_TIME_MAX 0x1006
+#define IMX_BINNING_ENABLE 0x0390
+#define IMX227_BINNING_ENABLE 0x0900
+#define IMX_BINNING_TYPE 0x0391
+#define IMX227_BINNING_TYPE 0x0901
+#define IMX_READ_MODE 0x0390
+#define IMX227_READ_MODE 0x0900
+
+#define IMX_HORIZONTAL_START_H 0x0344
+#define IMX_VERTICAL_START_H 0x0346
+#define IMX_HORIZONTAL_END_H 0x0348
+#define IMX_VERTICAL_END_H 0x034a
+#define IMX_HORIZONTAL_OUTPUT_SIZE_H 0x034c
+#define IMX_VERTICAL_OUTPUT_SIZE_H 0x034e
+
+/* Post Divider setting register for imx132 and imx208 */
+#define IMX132_208_VT_RGPLTD 0x30A4
+
+/* Multiplier setting register for imx132, imx208, and imx219 */
+#define IMX132_208_219_PLL_MULTIPLIER 0x0306
+
+#define IMX_COARSE_INTEGRATION_TIME 0x0202
+#define IMX_TEST_PATTERN_MODE 0x0600
+#define IMX_TEST_PATTERN_COLOR_R 0x0602
+#define IMX_TEST_PATTERN_COLOR_GR 0x0604
+#define IMX_TEST_PATTERN_COLOR_B 0x0606
+#define IMX_TEST_PATTERN_COLOR_GB 0x0608
+#define IMX_IMG_ORIENTATION 0x0101
+#define IMX_VFLIP_BIT 2
+#define IMX_HFLIP_BIT 1
+#define IMX_GLOBAL_GAIN 0x0205
+#define IMX_SHORT_AGC_GAIN 0x0233
+#define IMX_DGC_ADJ 0x020E
+#define IMX_DGC_LEN 10
+#define IMX227_DGC_LEN 4
+#define IMX_MAX_EXPOSURE_SUPPORTED 0xfffb
+#define IMX_MAX_GLOBAL_GAIN_SUPPORTED 0x00ff
+#define IMX_MAX_DIGITAL_GAIN_SUPPORTED 0x0fff
+
+#define MAX_FMTS 1
+#define IMX_OTP_DATA_SIZE 1280
+
+#define IMX_SUBDEV_PREFIX "imx"
+#define IMX_DRIVER "imx1x5"
+
+/* Sensor ids from identification register */
+#define IMX_NAME_134 "imx134"
+#define IMX_NAME_135 "imx135"
+#define IMX_NAME_175 "imx175"
+#define IMX_NAME_132 "imx132"
+#define IMX_NAME_208 "imx208"
+#define IMX_NAME_219 "imx219"
+#define IMX_NAME_227 "imx227"
+#define IMX175_ID 0x0175
+#define IMX135_ID 0x0135
+#define IMX134_ID 0x0134
+#define IMX132_ID 0x0132
+#define IMX208_ID 0x0208
+#define IMX219_ID 0x0219
+#define IMX227_ID 0x0227
+
+/* Sensor id based on i2c_device_id table
+ * (Fuji module can not be detected based on sensor registers) */
+#define IMX135_FUJI_ID 0x0136
+#define IMX_NAME_135_FUJI "imx135fuji"
+
+/* imx175 - use dw9714 vcm */
+#define IMX175_MERRFLD 0x175
+#define IMX175_VALLEYVIEW 0x176
+#define IMX135_SALTBAY 0x135
+#define IMX135_VICTORIABAY 0x136
+#define IMX132_SALTBAY 0x132
+#define IMX134_VALLEYVIEW 0x134
+#define IMX208_MOFD_PD2 0x208
+#define IMX219_MFV0_PRH 0x219
+#define IMX227_SAND 0x227
+
+/* otp - specific settings */
+#define E2PROM_ADDR 0xa0
+#define E2PROM_LITEON_12P1BA869D_ADDR 0xa0
+#define E2PROM_ABICO_SS89A839_ADDR 0xa8
+#define DEFAULT_OTP_SIZE 1280
+#define IMX135_OTP_SIZE 1280
+#define IMX219_OTP_SIZE 2048
+#define IMX227_OTP_SIZE 2560
+#define E2PROM_LITEON_12P1BA869D_SIZE 544
+
+#define IMX_ID_DEFAULT 0x0000
+#define IMX132_175_208_219_CHIP_ID 0x0000
+#define IMX134_135_CHIP_ID 0x0016
+#define IMX134_135_227_CHIP_ID 0x0016
+
+#define IMX175_RES_WIDTH_MAX 3280
+#define IMX175_RES_HEIGHT_MAX 2464
+#define IMX135_RES_WIDTH_MAX 4208
+#define IMX135_RES_HEIGHT_MAX 3120
+#define IMX132_RES_WIDTH_MAX 1936
+#define IMX132_RES_HEIGHT_MAX 1096
+#define IMX134_RES_WIDTH_MAX 3280
+#define IMX134_RES_HEIGHT_MAX 2464
+#define IMX208_RES_WIDTH_MAX 1936
+#define IMX208_RES_HEIGHT_MAX 1096
+#define IMX219_RES_WIDTH_MAX 3280
+#define IMX219_RES_HEIGHT_MAX 2464
+#define IMX227_RES_WIDTH_MAX 2400
+#define IMX227_RES_HEIGHT_MAX 2720
+
+/* Defines for lens/VCM */
+#define IMX_FOCAL_LENGTH_NUM 369 /*3.69mm*/
+#define IMX_FOCAL_LENGTH_DEM 100
+#define IMX_F_NUMBER_DEFAULT_NUM 22
+#define IMX_F_NUMBER_DEM 10
+#define IMX_INVALID_CONFIG 0xffffffff
+#define IMX_MAX_FOCUS_POS 1023
+#define IMX_MAX_FOCUS_NEG (-1023)
+#define IMX_VCM_SLEW_STEP_MAX 0x3f
+#define IMX_VCM_SLEW_TIME_MAX 0x1f
+
+#define IMX_BIN_FACTOR_MAX 4
+#define IMX_INTEGRATION_TIME_MARGIN 4
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define IMX_F_NUMBER_DEFAULT 0x16000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define IMX_F_NUMBER_RANGE 0x160a160a
+
+struct imx_vcm {
+ int (*power_up)(struct v4l2_subdev *sd);
+ int (*power_down)(struct v4l2_subdev *sd);
+ int (*init)(struct v4l2_subdev *sd);
+ int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val);
+ int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
+ int (*t_focus_abs_init)(struct v4l2_subdev *sd);
+ int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
+ int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
+ int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
+ int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
+ int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
+};
+
+struct imx_otp {
+ void * (*otp_read)(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+ u32 start_addr;
+ u32 size;
+ u8 dev_addr;
+};
+
+struct max_res {
+ int res_max_width;
+ int res_max_height;
+};
+
+struct max_res imx_max_res[] = {
+ [IMX175_ID] = {
+ .res_max_width = IMX175_RES_WIDTH_MAX,
+ .res_max_height = IMX175_RES_HEIGHT_MAX,
+ },
+ [IMX135_ID] = {
+ .res_max_width = IMX135_RES_WIDTH_MAX,
+ .res_max_height = IMX135_RES_HEIGHT_MAX,
+ },
+ [IMX132_ID] = {
+ .res_max_width = IMX132_RES_WIDTH_MAX,
+ .res_max_height = IMX132_RES_HEIGHT_MAX,
+ },
+ [IMX134_ID] = {
+ .res_max_width = IMX134_RES_WIDTH_MAX,
+ .res_max_height = IMX134_RES_HEIGHT_MAX,
+ },
+ [IMX208_ID] = {
+ .res_max_width = IMX208_RES_WIDTH_MAX,
+ .res_max_height = IMX208_RES_HEIGHT_MAX,
+ },
+ [IMX219_ID] = {
+ .res_max_width = IMX219_RES_WIDTH_MAX,
+ .res_max_height = IMX219_RES_HEIGHT_MAX,
+ },
+ [IMX227_ID] = {
+ .res_max_width = IMX227_RES_WIDTH_MAX,
+ .res_max_height = IMX227_RES_HEIGHT_MAX,
+ },
+};
+
+struct imx_settings {
+ struct imx_reg const *init_settings;
+ struct imx_resolution *res_preview;
+ struct imx_resolution *res_still;
+ struct imx_resolution *res_video;
+ int n_res_preview;
+ int n_res_still;
+ int n_res_video;
+};
+
+struct imx_settings imx_sets[] = {
+ [IMX175_MERRFLD] = {
+ .init_settings = imx175_init_settings,
+ .res_preview = imx175_res_preview,
+ .res_still = imx175_res_still,
+ .res_video = imx175_res_video,
+ .n_res_preview = ARRAY_SIZE(imx175_res_preview),
+ .n_res_still = ARRAY_SIZE(imx175_res_still),
+ .n_res_video = ARRAY_SIZE(imx175_res_video),
+ },
+ [IMX175_VALLEYVIEW] = {
+ .init_settings = imx175_init_settings,
+ .res_preview = imx175_res_preview,
+ .res_still = imx175_res_still,
+ .res_video = imx175_res_video,
+ .n_res_preview = ARRAY_SIZE(imx175_res_preview),
+ .n_res_still = ARRAY_SIZE(imx175_res_still),
+ .n_res_video = ARRAY_SIZE(imx175_res_video),
+ },
+ [IMX135_SALTBAY] = {
+ .init_settings = imx135_init_settings,
+ .res_preview = imx135_res_preview,
+ .res_still = imx135_res_still,
+ .res_video = imx135_res_video,
+ .n_res_preview = ARRAY_SIZE(imx135_res_preview),
+ .n_res_still = ARRAY_SIZE(imx135_res_still),
+ .n_res_video = ARRAY_SIZE(imx135_res_video),
+ },
+ [IMX135_VICTORIABAY] = {
+ .init_settings = imx135_init_settings,
+ .res_preview = imx135_res_preview_mofd,
+ .res_still = imx135_res_still_mofd,
+ .res_video = imx135_res_video,
+ .n_res_preview = ARRAY_SIZE(imx135_res_preview_mofd),
+ .n_res_still = ARRAY_SIZE(imx135_res_still_mofd),
+ .n_res_video = ARRAY_SIZE(imx135_res_video),
+ },
+ [IMX132_SALTBAY] = {
+ .init_settings = imx132_init_settings,
+ .res_preview = imx132_res_preview,
+ .res_still = imx132_res_still,
+ .res_video = imx132_res_video,
+ .n_res_preview = ARRAY_SIZE(imx132_res_preview),
+ .n_res_still = ARRAY_SIZE(imx132_res_still),
+ .n_res_video = ARRAY_SIZE(imx132_res_video),
+ },
+ [IMX134_VALLEYVIEW] = {
+ .init_settings = imx134_init_settings,
+ .res_preview = imx134_res_preview,
+ .res_still = imx134_res_still,
+ .res_video = imx134_res_video,
+ .n_res_preview = ARRAY_SIZE(imx134_res_preview),
+ .n_res_still = ARRAY_SIZE(imx134_res_still),
+ .n_res_video = ARRAY_SIZE(imx134_res_video),
+ },
+ [IMX208_MOFD_PD2] = {
+ .init_settings = imx208_init_settings,
+ .res_preview = imx208_res_preview,
+ .res_still = imx208_res_still,
+ .res_video = imx208_res_video,
+ .n_res_preview = ARRAY_SIZE(imx208_res_preview),
+ .n_res_still = ARRAY_SIZE(imx208_res_still),
+ .n_res_video = ARRAY_SIZE(imx208_res_video),
+ },
+ [IMX219_MFV0_PRH] = {
+ .init_settings = imx219_init_settings,
+ .res_preview = imx219_res_preview,
+ .res_still = imx219_res_still,
+ .res_video = imx219_res_video,
+ .n_res_preview = ARRAY_SIZE(imx219_res_preview),
+ .n_res_still = ARRAY_SIZE(imx219_res_still),
+ .n_res_video = ARRAY_SIZE(imx219_res_video),
+ },
+ [IMX227_SAND] = {
+ .init_settings = imx227_init_settings,
+ .res_preview = imx227_res_preview,
+ .res_still = imx227_res_still,
+ .res_video = imx227_res_video,
+ .n_res_preview = ARRAY_SIZE(imx227_res_preview),
+ .n_res_still = ARRAY_SIZE(imx227_res_still),
+ .n_res_video = ARRAY_SIZE(imx227_res_video),
+ },
+};
+
+struct imx_reg_addr {
+ u16 frame_length_lines;
+ u16 line_length_pixels;
+ u16 horizontal_start_h;
+ u16 vertical_start_h;
+ u16 horizontal_end_h;
+ u16 vertical_end_h;
+ u16 horizontal_output_size_h;
+ u16 vertical_output_size_h;
+ u16 coarse_integration_time;
+ u16 img_orientation;
+ u16 global_gain;
+ u16 dgc_adj;
+};
+
+struct imx_reg_addr imx_addr = {
+ IMX_FRAME_LENGTH_LINES,
+ IMX_LINE_LENGTH_PIXELS,
+ IMX_HORIZONTAL_START_H,
+ IMX_VERTICAL_START_H,
+ IMX_HORIZONTAL_END_H,
+ IMX_VERTICAL_END_H,
+ IMX_HORIZONTAL_OUTPUT_SIZE_H,
+ IMX_VERTICAL_OUTPUT_SIZE_H,
+ IMX_COARSE_INTEGRATION_TIME,
+ IMX_IMG_ORIENTATION,
+ IMX_GLOBAL_GAIN,
+ IMX_DGC_ADJ,
+};
+
+struct imx_reg_addr imx219_addr = {
+ IMX219_FRAME_LENGTH_LINES,
+ IMX219_LINE_LENGTH_PIXELS,
+ IMX219_HORIZONTAL_START_H,
+ IMX219_VERTICAL_START_H,
+ IMX219_HORIZONTAL_END_H,
+ IMX219_VERTICAL_END_H,
+ IMX219_HORIZONTAL_OUTPUT_SIZE_H,
+ IMX219_VERTICAL_OUTPUT_SIZE_H,
+ IMX219_COARSE_INTEGRATION_TIME,
+ IMX219_IMG_ORIENTATION,
+ IMX219_GLOBAL_GAIN,
+ IMX219_DGC_ADJ,
+};
+
+#define v4l2_format_capture_type_entry(_width, _height, \
+ _pixelformat, _bytesperline, _colorspace) \
+ {\
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,\
+ .fmt.pix.width = (_width),\
+ .fmt.pix.height = (_height),\
+ .fmt.pix.pixelformat = (_pixelformat),\
+ .fmt.pix.bytesperline = (_bytesperline),\
+ .fmt.pix.colorspace = (_colorspace),\
+ .fmt.pix.sizeimage = (_height)*(_bytesperline),\
+ }
+
+#define s_output_format_entry(_width, _height, _pixelformat, \
+ _bytesperline, _colorspace, _fps) \
+ {\
+ .v4l2_fmt = v4l2_format_capture_type_entry(_width, \
+ _height, _pixelformat, _bytesperline, \
+ _colorspace),\
+ .fps = (_fps),\
+ }
+
+#define s_output_format_reg_entry(_width, _height, _pixelformat, \
+ _bytesperline, _colorspace, _fps, _reg_setting) \
+ {\
+ .s_fmt = s_output_format_entry(_width, _height,\
+ _pixelformat, _bytesperline, \
+ _colorspace, _fps),\
+ .reg_setting = (_reg_setting),\
+ }
+
+/* imx device structure */
+struct imx_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct camera_sensor_platform_data *platform_data;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ int fmt_idx;
+ int status;
+ int streaming;
+ int power;
+ int run_mode;
+ int vt_pix_clk_freq_mhz;
+ int fps_index;
+ u32 focus;
+ u16 sensor_id; /* Sensor id from registers */
+ u16 i2c_id; /* Sensor id from i2c_device_id */
+ u16 coarse_itg;
+ u16 fine_itg;
+ u16 digital_gain;
+ u16 gain;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 targetfps;
+ u8 fps;
+ const struct imx_reg *regs;
+ u8 res;
+ u8 type;
+ u8 sensor_revision;
+ u8 *otp_data;
+ struct imx_settings *mode_tables;
+ struct imx_vcm *vcm_driver;
+ struct imx_otp *otp_driver;
+ const struct imx_resolution *curr_res_table;
+ int entries_curr_table;
+ const struct firmware *fw;
+ struct imx_reg_addr *reg_addr;
+ const struct imx_reg *param_hold;
+ const struct imx_reg *param_update;
+
+ /* used for h/b blank tuning */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *h_blank;
+ struct v4l2_ctrl *v_blank;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *h_flip;
+ struct v4l2_ctrl *v_flip;
+
+ /* Test pattern control */
+ struct v4l2_ctrl *tp_mode;
+ struct v4l2_ctrl *tp_r;
+ struct v4l2_ctrl *tp_gr;
+ struct v4l2_ctrl *tp_gb;
+ struct v4l2_ctrl *tp_b;
+
+ /* FIXME! */
+ bool new_res_sel_method;
+};
+
+#define to_imx_sensor(x) container_of(x, struct imx_device, sd)
+
+#define IMX_MAX_WRITE_BUF_SIZE 32
+struct imx_write_buffer {
+ u16 addr;
+ u8 data[IMX_MAX_WRITE_BUF_SIZE];
+};
+
+struct imx_write_ctrl {
+ int index;
+ struct imx_write_buffer buffer;
+};
+
+static const struct imx_reg imx_soft_standby[] = {
+ {IMX_8BIT, 0x0100, 0x00},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx_streaming[] = {
+ {IMX_8BIT, 0x0100, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx_param_hold[] = {
+ {IMX_8BIT, 0x0104, 0x01}, /* GROUPED_PARAMETER_HOLD */
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx_param_update[] = {
+ {IMX_8BIT, 0x0104, 0x00}, /* GROUPED_PARAMETER_HOLD */
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx219_param_hold[] = {
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx219_param_update[] = {
+ {IMX_TOK_TERM, 0, 0}
+};
+
+extern int ad5816g_vcm_power_up(struct v4l2_subdev *sd);
+extern int ad5816g_vcm_power_down(struct v4l2_subdev *sd);
+extern int ad5816g_vcm_init(struct v4l2_subdev *sd);
+
+extern int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int drv201_vcm_power_up(struct v4l2_subdev *sd);
+extern int drv201_vcm_power_down(struct v4l2_subdev *sd);
+extern int drv201_vcm_init(struct v4l2_subdev *sd);
+
+extern int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int dw9714_vcm_power_up(struct v4l2_subdev *sd);
+extern int dw9714_vcm_power_down(struct v4l2_subdev *sd);
+extern int dw9714_vcm_init(struct v4l2_subdev *sd);
+
+extern int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int dw9714_t_focus_abs_init(struct v4l2_subdev *sd);
+extern int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int dw9719_vcm_power_up(struct v4l2_subdev *sd);
+extern int dw9719_vcm_power_down(struct v4l2_subdev *sd);
+extern int dw9719_vcm_init(struct v4l2_subdev *sd);
+
+extern int dw9719_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
+extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
+extern int dw9718_vcm_init(struct v4l2_subdev *sd);
+
+extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int vcm_power_up(struct v4l2_subdev *sd);
+extern int vcm_power_down(struct v4l2_subdev *sd);
+
+struct imx_vcm imx_vcms[] = {
+ [IMX175_MERRFLD] = {
+ .power_up = drv201_vcm_power_up,
+ .power_down = drv201_vcm_power_down,
+ .init = drv201_vcm_init,
+ .t_focus_vcm = drv201_t_focus_vcm,
+ .t_focus_abs = drv201_t_focus_abs,
+ .t_focus_abs_init = NULL,
+ .t_focus_rel = drv201_t_focus_rel,
+ .q_focus_status = drv201_q_focus_status,
+ .q_focus_abs = drv201_q_focus_abs,
+ .t_vcm_slew = drv201_t_vcm_slew,
+ .t_vcm_timing = drv201_t_vcm_timing,
+ },
+ [IMX175_VALLEYVIEW] = {
+ .power_up = dw9714_vcm_power_up,
+ .power_down = dw9714_vcm_power_down,
+ .init = dw9714_vcm_init,
+ .t_focus_vcm = dw9714_t_focus_vcm,
+ .t_focus_abs = dw9714_t_focus_abs,
+ .t_focus_abs_init = NULL,
+ .t_focus_rel = dw9714_t_focus_rel,
+ .q_focus_status = dw9714_q_focus_status,
+ .q_focus_abs = dw9714_q_focus_abs,
+ .t_vcm_slew = dw9714_t_vcm_slew,
+ .t_vcm_timing = dw9714_t_vcm_timing,
+ },
+ [IMX135_SALTBAY] = {
+ .power_up = ad5816g_vcm_power_up,
+ .power_down = ad5816g_vcm_power_down,
+ .init = ad5816g_vcm_init,
+ .t_focus_vcm = ad5816g_t_focus_vcm,
+ .t_focus_abs = ad5816g_t_focus_abs,
+ .t_focus_abs_init = NULL,
+ .t_focus_rel = ad5816g_t_focus_rel,
+ .q_focus_status = ad5816g_q_focus_status,
+ .q_focus_abs = ad5816g_q_focus_abs,
+ .t_vcm_slew = ad5816g_t_vcm_slew,
+ .t_vcm_timing = ad5816g_t_vcm_timing,
+ },
+ [IMX135_VICTORIABAY] = {
+ .power_up = dw9719_vcm_power_up,
+ .power_down = dw9719_vcm_power_down,
+ .init = dw9719_vcm_init,
+ .t_focus_vcm = dw9719_t_focus_vcm,
+ .t_focus_abs = dw9719_t_focus_abs,
+ .t_focus_abs_init = NULL,
+ .t_focus_rel = dw9719_t_focus_rel,
+ .q_focus_status = dw9719_q_focus_status,
+ .q_focus_abs = dw9719_q_focus_abs,
+ .t_vcm_slew = dw9719_t_vcm_slew,
+ .t_vcm_timing = dw9719_t_vcm_timing,
+ },
+ [IMX134_VALLEYVIEW] = {
+ .power_up = dw9714_vcm_power_up,
+ .power_down = dw9714_vcm_power_down,
+ .init = dw9714_vcm_init,
+ .t_focus_vcm = dw9714_t_focus_vcm,
+ .t_focus_abs = dw9714_t_focus_abs,
+ .t_focus_abs_init = dw9714_t_focus_abs_init,
+ .t_focus_rel = dw9714_t_focus_rel,
+ .q_focus_status = dw9714_q_focus_status,
+ .q_focus_abs = dw9714_q_focus_abs,
+ .t_vcm_slew = dw9714_t_vcm_slew,
+ .t_vcm_timing = dw9714_t_vcm_timing,
+ },
+ [IMX219_MFV0_PRH] = {
+ .power_up = dw9718_vcm_power_up,
+ .power_down = dw9718_vcm_power_down,
+ .init = dw9718_vcm_init,
+ .t_focus_vcm = dw9718_t_focus_vcm,
+ .t_focus_abs = dw9718_t_focus_abs,
+ .t_focus_abs_init = NULL,
+ .t_focus_rel = dw9718_t_focus_rel,
+ .q_focus_status = dw9718_q_focus_status,
+ .q_focus_abs = dw9718_q_focus_abs,
+ .t_vcm_slew = dw9718_t_vcm_slew,
+ .t_vcm_timing = dw9718_t_vcm_timing,
+ },
+ [IMX_ID_DEFAULT] = {
+ .power_up = NULL,
+ .power_down = NULL,
+ .t_focus_abs_init = NULL,
+ },
+};
+
+extern void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+extern void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+extern void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+extern void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size);
+struct imx_otp imx_otps[] = {
+ [IMX175_MERRFLD] = {
+ .otp_read = imx_otp_read,
+ .dev_addr = E2PROM_ADDR,
+ .start_addr = 0,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX175_VALLEYVIEW] = {
+ .otp_read = e2prom_otp_read,
+ .dev_addr = E2PROM_ABICO_SS89A839_ADDR,
+ .start_addr = E2PROM_2ADDR,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX135_SALTBAY] = {
+ .otp_read = e2prom_otp_read,
+ .dev_addr = E2PROM_ADDR,
+ .start_addr = 0,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX135_VICTORIABAY] = {
+ .otp_read = imx_otp_read,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX134_VALLEYVIEW] = {
+ .otp_read = e2prom_otp_read,
+ .dev_addr = E2PROM_LITEON_12P1BA869D_ADDR,
+ .start_addr = 0,
+ .size = E2PROM_LITEON_12P1BA869D_SIZE,
+ },
+ [IMX132_SALTBAY] = {
+ .otp_read = dummy_otp_read,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX208_MOFD_PD2] = {
+ .otp_read = dummy_otp_read,
+ .size = DEFAULT_OTP_SIZE,
+ },
+ [IMX219_MFV0_PRH] = {
+ .otp_read = brcc064_otp_read,
+ .dev_addr = E2PROM_ADDR,
+ .start_addr = 0,
+ .size = IMX219_OTP_SIZE,
+ },
+ [IMX227_SAND] = {
+ .otp_read = imx227_otp_read,
+ .size = IMX227_OTP_SIZE,
+ },
+ [IMX_ID_DEFAULT] = {
+ .otp_read = dummy_otp_read,
+ .size = DEFAULT_OTP_SIZE,
+ },
+};
+
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx132.h b/drivers/staging/media/atomisp/i2c/imx/imx132.h
new file mode 100644
index 000000000000..98f047b8a1ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx132.h
@@ -0,0 +1,566 @@
+/*
+ * Support for Sony IMX camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IMX132_H__
+#define __IMX132_H__
+#include "common.h"
+
+/********************** registers define ********************************/
+#define IMX132_RGLANESEL 0x3301 /* Number of lanes */
+#define IMX132_RGLANESEL_1LANE 0x01
+#define IMX132_RGLANESEL_2LANES 0x00
+#define IMX132_RGLANESEL_4LANES 0x03
+
+#define IMX132_2LANES_GAINFACT 2096 /* 524/256 * 2^10 */
+#define IMX132_2LANES_GAINFACT_SHIFT 10
+
+/********************** settings for imx from vendor*********************/
+static struct imx_reg imx132_1080p_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Global Settings */
+ {IMX_8BIT, 0x3087, 0x53},
+ {IMX_8BIT, 0x308B, 0x5A},
+ {IMX_8BIT, 0x3094, 0x11},
+ {IMX_8BIT, 0x309D, 0xA4},
+ {IMX_8BIT, 0x30AA, 0x01},
+ {IMX_8BIT, 0x30C6, 0x00},
+ {IMX_8BIT, 0x30C7, 0x00},
+ {IMX_8BIT, 0x3118, 0x2F},
+ {IMX_8BIT, 0x312A, 0x00},
+ {IMX_8BIT, 0x312B, 0x0B},
+ {IMX_8BIT, 0x312C, 0x0B},
+ {IMX_8BIT, 0x312D, 0x13},
+ /* PLL setting */
+ {IMX_8BIT, 0x0305, 0x02},
+ {IMX_8BIT, 0x0307, 0x50},
+ {IMX_8BIT, 0x30A4, 0x02},
+ {IMX_8BIT, 0x303C, 0x3C},
+ /* Mode setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x14},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x32},
+ {IMX_8BIT, 0x0348, 0x07},
+ {IMX_8BIT, 0x0349, 0xA3},
+ {IMX_8BIT, 0x034A, 0x04},
+ {IMX_8BIT, 0x034B, 0x79},
+ {IMX_8BIT, 0x034C, 0x07},
+ {IMX_8BIT, 0x034D, 0x90},
+ {IMX_8BIT, 0x034E, 0x04},
+ {IMX_8BIT, 0x034F, 0x48},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x303D, 0x10},
+ {IMX_8BIT, 0x303E, 0x5A},
+ {IMX_8BIT, 0x3040, 0x00},
+ {IMX_8BIT, 0x3041, 0x00},
+ {IMX_8BIT, 0x3048, 0x00},
+ {IMX_8BIT, 0x304C, 0x2F},
+ {IMX_8BIT, 0x304D, 0x02},
+ {IMX_8BIT, 0x3064, 0x92},
+ {IMX_8BIT, 0x306A, 0x10},
+ {IMX_8BIT, 0x309B, 0x00},
+ {IMX_8BIT, 0x309E, 0x41},
+ {IMX_8BIT, 0x30A0, 0x10},
+ {IMX_8BIT, 0x30A1, 0x0B},
+ {IMX_8BIT, 0x30B2, 0x00},
+ {IMX_8BIT, 0x30D5, 0x00},
+ {IMX_8BIT, 0x30D6, 0x00},
+ {IMX_8BIT, 0x30D7, 0x00},
+ {IMX_8BIT, 0x30D8, 0x00},
+ {IMX_8BIT, 0x30D9, 0x00},
+ {IMX_8BIT, 0x30DA, 0x00},
+ {IMX_8BIT, 0x30DB, 0x00},
+ {IMX_8BIT, 0x30DC, 0x00},
+ {IMX_8BIT, 0x30DD, 0x00},
+ {IMX_8BIT, 0x30DE, 0x00},
+ {IMX_8BIT, 0x3102, 0x0C},
+ {IMX_8BIT, 0x3103, 0x33},
+ {IMX_8BIT, 0x3104, 0x18},
+ {IMX_8BIT, 0x3105, 0x00},
+ {IMX_8BIT, 0x3106, 0x65},
+ {IMX_8BIT, 0x3107, 0x00},
+ {IMX_8BIT, 0x3108, 0x06},
+ {IMX_8BIT, 0x3109, 0x04},
+ {IMX_8BIT, 0x310A, 0x04},
+ {IMX_8BIT, 0x315C, 0x3D},
+ {IMX_8BIT, 0x315D, 0x3C},
+ {IMX_8BIT, 0x316E, 0x3E},
+ {IMX_8BIT, 0x316F, 0x3D},
+ /* Global timing */
+ {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
+ {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
+ {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
+ {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
+ {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
+ {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
+ {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
+ {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
+ {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
+ {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
+ {IMX_8BIT, 0x330E, 0x03},
+ {IMX_8BIT, 0x3318, 0x62},
+ {IMX_8BIT, 0x3322, 0x09},
+ {IMX_8BIT, 0x3342, 0x00},
+ {IMX_8BIT, 0x3348, 0xE0},
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx132_1456x1096_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Global Settings */
+ {IMX_8BIT, 0x3087, 0x53},
+ {IMX_8BIT, 0x308B, 0x5A},
+ {IMX_8BIT, 0x3094, 0x11},
+ {IMX_8BIT, 0x309D, 0xA4},
+ {IMX_8BIT, 0x30AA, 0x01},
+ {IMX_8BIT, 0x30C6, 0x00},
+ {IMX_8BIT, 0x30C7, 0x00},
+ {IMX_8BIT, 0x3118, 0x2F},
+ {IMX_8BIT, 0x312A, 0x00},
+ {IMX_8BIT, 0x312B, 0x0B},
+ {IMX_8BIT, 0x312C, 0x0B},
+ {IMX_8BIT, 0x312D, 0x13},
+ /* PLL setting */
+ {IMX_8BIT, 0x0305, 0x02},
+ {IMX_8BIT, 0x0307, 0x50},
+ {IMX_8BIT, 0x30A4, 0x02},
+ {IMX_8BIT, 0x303C, 0x3C},
+ /* Mode setting */
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0x04},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x32},
+ {IMX_8BIT, 0x0348, 0x06},
+ {IMX_8BIT, 0x0349, 0xB3},
+ {IMX_8BIT, 0x034A, 0x04},
+ {IMX_8BIT, 0x034B, 0x79},
+ {IMX_8BIT, 0x034C, 0x05},
+ {IMX_8BIT, 0x034D, 0xB0},
+ {IMX_8BIT, 0x034E, 0x04},
+ {IMX_8BIT, 0x034F, 0x48},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x303D, 0x10},
+ {IMX_8BIT, 0x303E, 0x5A},
+ {IMX_8BIT, 0x3040, 0x00},
+ {IMX_8BIT, 0x3041, 0x00},
+ {IMX_8BIT, 0x3048, 0x00},
+ {IMX_8BIT, 0x304C, 0x2F},
+ {IMX_8BIT, 0x304D, 0x02},
+ {IMX_8BIT, 0x3064, 0x92},
+ {IMX_8BIT, 0x306A, 0x10},
+ {IMX_8BIT, 0x309B, 0x00},
+ {IMX_8BIT, 0x309E, 0x41},
+ {IMX_8BIT, 0x30A0, 0x10},
+ {IMX_8BIT, 0x30A1, 0x0B},
+ {IMX_8BIT, 0x30B2, 0x00},
+ {IMX_8BIT, 0x30D5, 0x00},
+ {IMX_8BIT, 0x30D6, 0x00},
+ {IMX_8BIT, 0x30D7, 0x00},
+ {IMX_8BIT, 0x30D8, 0x00},
+ {IMX_8BIT, 0x30D9, 0x00},
+ {IMX_8BIT, 0x30DA, 0x00},
+ {IMX_8BIT, 0x30DB, 0x00},
+ {IMX_8BIT, 0x30DC, 0x00},
+ {IMX_8BIT, 0x30DD, 0x00},
+ {IMX_8BIT, 0x30DE, 0x00},
+ {IMX_8BIT, 0x3102, 0x0C},
+ {IMX_8BIT, 0x3103, 0x33},
+ {IMX_8BIT, 0x3104, 0x18},
+ {IMX_8BIT, 0x3105, 0x00},
+ {IMX_8BIT, 0x3106, 0x65},
+ {IMX_8BIT, 0x3107, 0x00},
+ {IMX_8BIT, 0x3108, 0x06},
+ {IMX_8BIT, 0x3109, 0x04},
+ {IMX_8BIT, 0x310A, 0x04},
+ {IMX_8BIT, 0x315C, 0x3D},
+ {IMX_8BIT, 0x315D, 0x3C},
+ {IMX_8BIT, 0x316E, 0x3E},
+ {IMX_8BIT, 0x316F, 0x3D},
+ /* Global timing */
+ {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
+ {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
+ {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
+ {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
+ {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
+ {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
+ {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
+ {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
+ {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
+ {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
+ {IMX_8BIT, 0x330E, 0x03},
+ {IMX_8BIT, 0x3318, 0x62},
+ {IMX_8BIT, 0x3322, 0x09},
+ {IMX_8BIT, 0x3342, 0x00},
+ {IMX_8BIT, 0x3348, 0xE0},
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx132_1636x1096_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Global Settings */
+ {IMX_8BIT, 0x3087, 0x53},
+ {IMX_8BIT, 0x308B, 0x5A},
+ {IMX_8BIT, 0x3094, 0x11},
+ {IMX_8BIT, 0x309D, 0xA4},
+ {IMX_8BIT, 0x30AA, 0x01},
+ {IMX_8BIT, 0x30C6, 0x00},
+ {IMX_8BIT, 0x30C7, 0x00},
+ {IMX_8BIT, 0x3118, 0x2F},
+ {IMX_8BIT, 0x312A, 0x00},
+ {IMX_8BIT, 0x312B, 0x0B},
+ {IMX_8BIT, 0x312C, 0x0B},
+ {IMX_8BIT, 0x312D, 0x13},
+ /* PLL setting */
+ {IMX_8BIT, 0x0305, 0x02},
+ {IMX_8BIT, 0x0307, 0x50},
+ {IMX_8BIT, 0x30A4, 0x02},
+ {IMX_8BIT, 0x303C, 0x3C},
+ /* Mode setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0xAA},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x32},
+ {IMX_8BIT, 0x0348, 0x07},
+ {IMX_8BIT, 0x0349, 0x0D},
+ {IMX_8BIT, 0x034A, 0x04},
+ {IMX_8BIT, 0x034B, 0x79},
+ {IMX_8BIT, 0x034C, 0x06},
+ {IMX_8BIT, 0x034D, 0x64},
+ {IMX_8BIT, 0x034E, 0x04},
+ {IMX_8BIT, 0x034F, 0x48},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x303D, 0x10},
+ {IMX_8BIT, 0x303E, 0x5A},
+ {IMX_8BIT, 0x3040, 0x00},
+ {IMX_8BIT, 0x3041, 0x00},
+ {IMX_8BIT, 0x3048, 0x00},
+ {IMX_8BIT, 0x304C, 0x2F},
+ {IMX_8BIT, 0x304D, 0x02},
+ {IMX_8BIT, 0x3064, 0x92},
+ {IMX_8BIT, 0x306A, 0x10},
+ {IMX_8BIT, 0x309B, 0x00},
+ {IMX_8BIT, 0x309E, 0x41},
+ {IMX_8BIT, 0x30A0, 0x10},
+ {IMX_8BIT, 0x30A1, 0x0B},
+ {IMX_8BIT, 0x30B2, 0x00},
+ {IMX_8BIT, 0x30D5, 0x00},
+ {IMX_8BIT, 0x30D6, 0x00},
+ {IMX_8BIT, 0x30D7, 0x00},
+ {IMX_8BIT, 0x30D8, 0x00},
+ {IMX_8BIT, 0x30D9, 0x00},
+ {IMX_8BIT, 0x30DA, 0x00},
+ {IMX_8BIT, 0x30DB, 0x00},
+ {IMX_8BIT, 0x30DC, 0x00},
+ {IMX_8BIT, 0x30DD, 0x00},
+ {IMX_8BIT, 0x30DE, 0x00},
+ {IMX_8BIT, 0x3102, 0x0C},
+ {IMX_8BIT, 0x3103, 0x33},
+ {IMX_8BIT, 0x3104, 0x18},
+ {IMX_8BIT, 0x3105, 0x00},
+ {IMX_8BIT, 0x3106, 0x65},
+ {IMX_8BIT, 0x3107, 0x00},
+ {IMX_8BIT, 0x3108, 0x06},
+ {IMX_8BIT, 0x3109, 0x04},
+ {IMX_8BIT, 0x310A, 0x04},
+ {IMX_8BIT, 0x315C, 0x3D},
+ {IMX_8BIT, 0x315D, 0x3C},
+ {IMX_8BIT, 0x316E, 0x3E},
+ {IMX_8BIT, 0x316F, 0x3D},
+ /* Global timing */
+ {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
+ {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
+ {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
+ {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
+ {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
+ {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
+ {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
+ {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
+ {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
+ {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
+ {IMX_8BIT, 0x330E, 0x03},
+ {IMX_8BIT, 0x3318, 0x62},
+ {IMX_8BIT, 0x3322, 0x09},
+ {IMX_8BIT, 0x3342, 0x00},
+ {IMX_8BIT, 0x3348, 0xE0},
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx132_1336x1096_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Global Settings */
+ {IMX_8BIT, 0x3087, 0x53},
+ {IMX_8BIT, 0x308B, 0x5A},
+ {IMX_8BIT, 0x3094, 0x11},
+ {IMX_8BIT, 0x309D, 0xA4},
+ {IMX_8BIT, 0x30AA, 0x01},
+ {IMX_8BIT, 0x30C6, 0x00},
+ {IMX_8BIT, 0x30C7, 0x00},
+ {IMX_8BIT, 0x3118, 0x2F},
+ {IMX_8BIT, 0x312A, 0x00},
+ {IMX_8BIT, 0x312B, 0x0B},
+ {IMX_8BIT, 0x312C, 0x0B},
+ {IMX_8BIT, 0x312D, 0x13},
+ /* PLL setting */
+ {IMX_8BIT, 0x0305, 0x02},
+ {IMX_8BIT, 0x0307, 0x50},
+ {IMX_8BIT, 0x30A4, 0x02},
+ {IMX_8BIT, 0x303C, 0x3C},
+ /* Mode setting */
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0x2C},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x32},
+ {IMX_8BIT, 0x0348, 0x06},
+ {IMX_8BIT, 0x0349, 0x77},
+ {IMX_8BIT, 0x034A, 0x04},
+ {IMX_8BIT, 0x034B, 0x79},
+ {IMX_8BIT, 0x034C, 0x05},
+ {IMX_8BIT, 0x034D, 0x38},
+ {IMX_8BIT, 0x034E, 0x04},
+ {IMX_8BIT, 0x034F, 0x48},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x303D, 0x10},
+ {IMX_8BIT, 0x303E, 0x5A},
+ {IMX_8BIT, 0x3040, 0x00},
+ {IMX_8BIT, 0x3041, 0x00},
+ {IMX_8BIT, 0x3048, 0x00},
+ {IMX_8BIT, 0x304C, 0x2F},
+ {IMX_8BIT, 0x304D, 0x02},
+ {IMX_8BIT, 0x3064, 0x92},
+ {IMX_8BIT, 0x306A, 0x10},
+ {IMX_8BIT, 0x309B, 0x00},
+ {IMX_8BIT, 0x309E, 0x41},
+ {IMX_8BIT, 0x30A0, 0x10},
+ {IMX_8BIT, 0x30A1, 0x0B},
+ {IMX_8BIT, 0x30B2, 0x00},
+ {IMX_8BIT, 0x30D5, 0x00},
+ {IMX_8BIT, 0x30D6, 0x00},
+ {IMX_8BIT, 0x30D7, 0x00},
+ {IMX_8BIT, 0x30D8, 0x00},
+ {IMX_8BIT, 0x30D9, 0x00},
+ {IMX_8BIT, 0x30DA, 0x00},
+ {IMX_8BIT, 0x30DB, 0x00},
+ {IMX_8BIT, 0x30DC, 0x00},
+ {IMX_8BIT, 0x30DD, 0x00},
+ {IMX_8BIT, 0x30DE, 0x00},
+ {IMX_8BIT, 0x3102, 0x0C},
+ {IMX_8BIT, 0x3103, 0x33},
+ {IMX_8BIT, 0x3104, 0x18},
+ {IMX_8BIT, 0x3105, 0x00},
+ {IMX_8BIT, 0x3106, 0x65},
+ {IMX_8BIT, 0x3107, 0x00},
+ {IMX_8BIT, 0x3108, 0x06},
+ {IMX_8BIT, 0x3109, 0x04},
+ {IMX_8BIT, 0x310A, 0x04},
+ {IMX_8BIT, 0x315C, 0x3D},
+ {IMX_8BIT, 0x315D, 0x3C},
+ {IMX_8BIT, 0x316E, 0x3E},
+ {IMX_8BIT, 0x316F, 0x3D},
+ /* Global timing */
+ {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
+ {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
+ {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
+ {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
+ {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
+ {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
+ {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
+ {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
+ {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
+ {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
+ {IMX_8BIT, 0x330E, 0x03},
+ {IMX_8BIT, 0x3318, 0x62},
+ {IMX_8BIT, 0x3322, 0x09},
+ {IMX_8BIT, 0x3342, 0x00},
+ {IMX_8BIT, 0x3348, 0xE0},
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/********************** settings for imx - reference *********************/
+static struct imx_reg const imx132_init_settings[] = {
+ /* sw reset */
+ { IMX_8BIT, 0x0100, 0x00 },
+ { IMX_8BIT, 0x0103, 0x01 },
+ { IMX_TOK_DELAY, 0, 5},
+ { IMX_8BIT, 0x0103, 0x00 },
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Global Settings */
+ {IMX_8BIT, 0x3087, 0x53},
+ {IMX_8BIT, 0x308B, 0x5A},
+ {IMX_8BIT, 0x3094, 0x11},
+ {IMX_8BIT, 0x309D, 0xA4},
+ {IMX_8BIT, 0x30AA, 0x01},
+ {IMX_8BIT, 0x30C6, 0x00},
+ {IMX_8BIT, 0x30C7, 0x00},
+ {IMX_8BIT, 0x3118, 0x2F},
+ {IMX_8BIT, 0x312A, 0x00},
+ {IMX_8BIT, 0x312B, 0x0B},
+ {IMX_8BIT, 0x312C, 0x0B},
+ {IMX_8BIT, 0x312D, 0x13},
+ GROUPED_PARAMETER_HOLD_DISABLE,
+ { IMX_TOK_TERM, 0, 0}
+};
+
+struct imx_resolution imx132_res_preview[] = {
+ {
+ .desc = "imx132_1080p_30fps",
+ .regs = imx132_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+};
+
+struct imx_resolution imx132_res_still[] = {
+ {
+ .desc = "imx132_1080p_30fps",
+ .regs = imx132_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+};
+
+struct imx_resolution imx132_res_video[] = {
+ {
+ .desc = "imx132_1336x1096_30fps",
+ .regs = imx132_1336x1096_30fps,
+ .width = 1336,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+ {
+ .desc = "imx132_1456x1096_30fps",
+ .regs = imx132_1456x1096_30fps,
+ .width = 1456,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+ {
+ .desc = "imx132_1636x1096_30fps",
+ .regs = imx132_1636x1096_30fps,
+ .width = 1636,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+ {
+ .desc = "imx132_1080p_30fps",
+ .regs = imx132_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08F2,
+ .lines_per_frame = 0x045C,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 384000,
+ },
+};
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx134.h b/drivers/staging/media/atomisp/i2c/imx/imx134.h
new file mode 100644
index 000000000000..cf35197ed77f
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx134.h
@@ -0,0 +1,2464 @@
+#ifndef __IMX134_H__
+#define __IMX134_H__
+
+/********************** imx134 setting - version 1 *********************/
+static struct imx_reg const imx134_init_settings[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* Basic settings */
+ { IMX_8BIT, 0x0105, 0x01 },
+ { IMX_8BIT, 0x0220, 0x01 },
+ { IMX_8BIT, 0x3302, 0x11 },
+ { IMX_8BIT, 0x3833, 0x20 },
+ { IMX_8BIT, 0x3893, 0x00 },
+ { IMX_8BIT, 0x3906, 0x08 },
+ { IMX_8BIT, 0x3907, 0x01 },
+ { IMX_8BIT, 0x391B, 0x01 },
+ { IMX_8BIT, 0x3C09, 0x01 },
+ { IMX_8BIT, 0x600A, 0x00 },
+
+ /* Analog settings */
+ { IMX_8BIT, 0x3008, 0xB0 },
+ { IMX_8BIT, 0x320A, 0x01 },
+ { IMX_8BIT, 0x320D, 0x10 },
+ { IMX_8BIT, 0x3216, 0x2E },
+ { IMX_8BIT, 0x322C, 0x02 },
+ { IMX_8BIT, 0x3409, 0x0C },
+ { IMX_8BIT, 0x340C, 0x2D },
+ { IMX_8BIT, 0x3411, 0x39 },
+ { IMX_8BIT, 0x3414, 0x1E },
+ { IMX_8BIT, 0x3427, 0x04 },
+ { IMX_8BIT, 0x3480, 0x1E },
+ { IMX_8BIT, 0x3484, 0x1E },
+ { IMX_8BIT, 0x3488, 0x1E },
+ { IMX_8BIT, 0x348C, 0x1E },
+ { IMX_8BIT, 0x3490, 0x1E },
+ { IMX_8BIT, 0x3494, 0x1E },
+ { IMX_8BIT, 0x3511, 0x8F },
+ { IMX_8BIT, 0x3617, 0x2D },
+
+ GROUPED_PARAMETER_HOLD_DISABLE,
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 3280x2464 8M 30fps, vendor provide */
+static struct imx_reg const imx134_8M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* clock setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 },
+ { IMX_8BIT, 0x0391, 0x11 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
+ { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
+ { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x0C }, /* x_output_size[15:8]: 3280*/
+ { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x09 }, /* y_output_size[15:8]:2464 */
+ { IMX_8BIT, 0x034F, 0xA0 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x0C },
+ { IMX_8BIT, 0x0355, 0xD0 },
+ { IMX_8BIT, 0x0356, 0x09 },
+ { IMX_8BIT, 0x0357, 0xA0 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x0C },
+ { IMX_8BIT, 0x3311, 0xD0 },
+ { IMX_8BIT, 0x3312, 0x09 },
+ { IMX_8BIT, 0x3313, 0xA0 },
+ { IMX_8BIT, 0x331C, 0x01 },
+ { IMX_8BIT, 0x331D, 0xAE },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global timing setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration time setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/2 binning 30fps 1640x1232, vendor provide */
+static struct imx_reg const imx134_1640_1232_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
+ { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
+ { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1640 */
+ { IMX_8BIT, 0x034D, 0x68 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1232 */
+ { IMX_8BIT, 0x034F, 0xD0 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x68 },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0xD0 },
+
+ { IMX_8BIT, 0x301D, 0x30 },
+
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x68 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0xD0 },
+
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x06 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/4 binning 30fps 820x616, vendor provide */
+static struct imx_reg const imx134_820_616_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
+ { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
+ { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:820 */
+ { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
+ { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x03 },
+ { IMX_8BIT, 0x0355, 0x34 },
+ { IMX_8BIT, 0x0356, 0x02 },
+ { IMX_8BIT, 0x0357, 0x68 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x03 },
+ { IMX_8BIT, 0x3311, 0x34 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x68 },
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/4 binning 30fps 820x552 */
+static struct imx_reg const imx134_820_552_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:128 */
+ { IMX_8BIT, 0x0347, 0x80 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3280-1 */
+ { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2208+128-1 */
+ { IMX_8BIT, 0x034B, 0x1F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]: */
+ { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
+ { IMX_8BIT, 0x034F, 0x28 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x03 },
+ { IMX_8BIT, 0x0355, 0x34 },
+ { IMX_8BIT, 0x0356, 0x02 },
+ { IMX_8BIT, 0x0357, 0x28 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x03 },
+ { IMX_8BIT, 0x3311, 0x34 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x28 },
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/4 binning 30fps 720x592 */
+static struct imx_reg const imx134_720_592_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:200 */
+ { IMX_8BIT, 0x0345, 0xC8 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:40 */
+ { IMX_8BIT, 0x0347, 0x28 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:2880+200-1 */
+ { IMX_8BIT, 0x0349, 0x07 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2368+40-1 */
+ { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: */
+ { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
+ { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x02 },
+ { IMX_8BIT, 0x0355, 0xD0 },
+ { IMX_8BIT, 0x0356, 0x02 },
+ { IMX_8BIT, 0x0357, 0x50 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x02 },
+ { IMX_8BIT, 0x3311, 0xD0 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x50 },
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+static struct imx_reg const imx134_752_616_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
+ { IMX_8BIT, 0x0345, 0x88 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
+ { IMX_8BIT, 0x0349, 0x47 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
+ { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: 752*/
+ { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
+ { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+
+ { IMX_8BIT, 0x0354, 0x02 },
+ { IMX_8BIT, 0x0355, 0xF0 },
+ { IMX_8BIT, 0x0356, 0x02 },
+ { IMX_8BIT, 0x0357, 0x68 },
+
+ { IMX_8BIT, 0x301D, 0x30 },
+
+ { IMX_8BIT, 0x3310, 0x02 },
+ { IMX_8BIT, 0x3311, 0xF0 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x68 },
+
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 1424x1168 */
+static struct imx_reg const imx134_1424_1168_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x11 }, /* no binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x22 }, /* 34/16=2.125 */
+ { IMX_8BIT, 0x4082, 0x00 }, /* ?? */
+ { IMX_8BIT, 0x4083, 0x00 }, /* ?? */
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
+ { IMX_8BIT, 0x0345, 0x80 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
+ { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
+ { IMX_8BIT, 0x0349, 0x51 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
+ { IMX_8BIT, 0x034B, 0xB1 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]: 1424*/
+ { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1168 */
+ { IMX_8BIT, 0x034F, 0x90 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+
+ { IMX_8BIT, 0x0354, 0x0B },
+ { IMX_8BIT, 0x0355, 0xD2 },
+ { IMX_8BIT, 0x0356, 0x09 },
+ { IMX_8BIT, 0x0357, 0xB2 },
+
+ { IMX_8BIT, 0x301D, 0x30 },
+
+ { IMX_8BIT, 0x3310, 0x05 },
+ { IMX_8BIT, 0x3311, 0x90 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0x90 },
+
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+ { IMX_8BIT, 0x4084, 0x05 },
+ { IMX_8BIT, 0x4085, 0x90 },
+ { IMX_8BIT, 0x4086, 0x04 },
+ { IMX_8BIT, 0x4087, 0x90 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/4 binning, 16/35 down scaling, 30fps, dvs */
+static struct imx_reg const imx134_240_196_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /*4x4 binning */
+ { IMX_8BIT, 0x0391, 0x44 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x23 }, /* down scaling = 16/35 */
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
+ { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
+ { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2104+590-1 */
+ { IMX_8BIT, 0x0349, 0x85 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1720+366-1 */
+ { IMX_8BIT, 0x034B, 0x25 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x00 }, /* x_output_size[15:8]: 240*/
+ { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x00 }, /* y_output_size[15:8]:196 */
+ { IMX_8BIT, 0x034F, 0xC4 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x02 }, /* crop_x: 526 */
+ { IMX_8BIT, 0x0355, 0x0E },
+ { IMX_8BIT, 0x0356, 0x01 }, /* crop_y: 430 */
+ { IMX_8BIT, 0x0357, 0xAE },
+
+ { IMX_8BIT, 0x301D, 0x30 },
+
+ { IMX_8BIT, 0x3310, 0x00 },
+ { IMX_8BIT, 0x3311, 0xF0 },
+ { IMX_8BIT, 0x3312, 0x00 },
+ { IMX_8BIT, 0x3313, 0xC4 },
+
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x4C },
+
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0xF0 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0xC4 },
+
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x0A },
+ { IMX_8BIT, 0x0203, 0x88 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane, 1/2 binning, 16/38 downscaling, 30fps, dvs */
+static struct imx_reg const imx134_448_366_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x26 }, /* down scaling = 16/38 */
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
+ { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
+ { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2128+590-1 */
+ { IMX_8BIT, 0x0349, 0x9D }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1740+366-1 */
+ { IMX_8BIT, 0x034B, 0x39 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x01 }, /* x_output_size[15:8]: 448*/
+ { IMX_8BIT, 0x034D, 0xC0 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x01 }, /* y_output_size[15:8]:366 */
+ { IMX_8BIT, 0x034F, 0x6E }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x04 }, /* crop_x: 1064 */
+ { IMX_8BIT, 0x0355, 0x28 },
+ { IMX_8BIT, 0x0356, 0x03 }, /* crop_y: 870 */
+ { IMX_8BIT, 0x0357, 0x66 },
+
+ { IMX_8BIT, 0x301D, 0x30 },
+
+ { IMX_8BIT, 0x3310, 0x01 },
+ { IMX_8BIT, 0x3311, 0xC0 },
+ { IMX_8BIT, 0x3312, 0x01 },
+ { IMX_8BIT, 0x3313, 0x6E },
+
+ { IMX_8BIT, 0x331C, 0x02 },
+ { IMX_8BIT, 0x331D, 0xD0 },
+
+ { IMX_8BIT, 0x4084, 0x01 },
+ { IMX_8BIT, 0x4085, 0xC0 },
+ { IMX_8BIT, 0x4086, 0x01 },
+ { IMX_8BIT, 0x4087, 0x6E },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 2336x1312, 30fps, for 1080p dvs, vendor provide */
+static struct imx_reg const imx134_2336_1312_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
+ { IMX_8BIT, 0x0391, 0x11 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x16 }, /* down scaling = 16/22 = 8/11 */
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:34 */
+ { IMX_8BIT, 0x0345, 0x22 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
+ { IMX_8BIT, 0x0347, 0x4C }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3245 */
+ { IMX_8BIT, 0x0349, 0xAD }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2135 */
+ { IMX_8BIT, 0x034B, 0x57 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:2336 */
+ { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:1312 */
+ { IMX_8BIT, 0x034F, 0x20 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x0C },
+ { IMX_8BIT, 0x0355, 0x8C },
+ { IMX_8BIT, 0x0356, 0x07 },
+ { IMX_8BIT, 0x0357, 0x0C },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x09 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x05 },
+ { IMX_8BIT, 0x3313, 0x20 },
+ { IMX_8BIT, 0x331C, 0x03 },
+ { IMX_8BIT, 0x331D, 0xEB },
+ { IMX_8BIT, 0x4084, 0x09 },
+ { IMX_8BIT, 0x4085, 0x20 },
+ { IMX_8BIT, 0x4086, 0x05 },
+ { IMX_8BIT, 0x4087, 0x20 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 1920x1080, 30fps, for 720p still capture */
+static struct imx_reg const imx134_1936_1096_30fps_v1[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
+ { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x1A }, /* downscaling 16/26*/
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
+ { IMX_8BIT, 0x0345, 0x40 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
+ { IMX_8BIT, 0x0347, 0x54 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
+ { IMX_8BIT, 0x0349, 0x89 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
+ { IMX_8BIT, 0x034B, 0x49 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
+ { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
+ { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
+ { IMX_8BIT, 0x0355, 0x4A },
+ { IMX_8BIT, 0x0356, 0x06 }, /* xrop y:1782 */
+ { IMX_8BIT, 0x0357, 0xF6 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x07 },
+ { IMX_8BIT, 0x3311, 0x80 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0x38 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x1E },
+ { IMX_8BIT, 0x4084, 0x07 },
+ { IMX_8BIT, 0x4085, 0x80 },
+ { IMX_8BIT, 0x4086, 0x04 },
+ { IMX_8BIT, 0x4087, 0x38 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 1920x1080, 30fps, for 720p still capture, vendor provide */
+static struct imx_reg const imx134_1936_1096_30fps_v2[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
+ { IMX_8BIT, 0x0391, 0x11 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x1B }, /* downscaling 16/27*/
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* Optionnal Function setting */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
+ { IMX_8BIT, 0x0345, 0x06 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
+ { IMX_8BIT, 0x0347, 0x34 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
+ { IMX_8BIT, 0x0349, 0xC9 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
+ { IMX_8BIT, 0x034B, 0x6F }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
+ { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
+ { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
+ { IMX_8BIT, 0x0355, 0xC4 },
+ { IMX_8BIT, 0x0356, 0x07 }, /* xrop y:1782 */
+ { IMX_8BIT, 0x0357, 0x3A },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x07 }, /* decide by mode and output size */
+ { IMX_8BIT, 0x3311, 0x90 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0x48 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x1E },
+ { IMX_8BIT, 0x4084, 0x07 },
+ { IMX_8BIT, 0x4085, 0x90 },
+ { IMX_8BIT, 0x4086, 0x04 },
+ { IMX_8BIT, 0x4087, 0x48 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Setting */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 1296x736, 30fps, for 720p still capture, vendor provide */
+static struct imx_reg const imx134_1296_736_30fps_v2[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning */
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x14 },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:40 */
+ { IMX_8BIT, 0x0345, 0x14 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
+ { IMX_8BIT, 0x0347, 0x38 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3239 */
+ { IMX_8BIT, 0x0349, 0xBB }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2131 */
+ { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]:1280 */
+ { IMX_8BIT, 0x034D, 0x10 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:720 */
+ { IMX_8BIT, 0x034F, 0xE0 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x54 },
+ { IMX_8BIT, 0x0356, 0x03 },
+ { IMX_8BIT, 0x0357, 0x98 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x05 },
+ { IMX_8BIT, 0x3311, 0x10 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0xE0 },
+ { IMX_8BIT, 0x331C, 0x01 },
+ { IMX_8BIT, 0x331D, 0x10 },
+ { IMX_8BIT, 0x4084, 0x05 },
+ { IMX_8BIT, 0x4085, 0x10 },
+ { IMX_8BIT, 0x4086, 0x02 },
+ { IMX_8BIT, 0x4087, 0xE0 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+/* 4 lane 1280x720, 30fps, for 720p dvs, vendor provide */
+static struct imx_reg const imx134_1568_880_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xA9 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
+ { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
+ { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
+ { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
+ { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
+ { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
+ { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x20 },
+ { IMX_8BIT, 0x0356, 0x03 },
+ { IMX_8BIT, 0x0357, 0x70 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x03 },
+ { IMX_8BIT, 0x3313, 0x70 },
+ { IMX_8BIT, 0x331C, 0x03 },
+ { IMX_8BIT, 0x331D, 0xF2 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xAF },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+static struct imx_reg const imx134_1568_876_60fps_0625[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0x8F },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
+ { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
+ { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
+ { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
+ { IMX_8BIT, 0x034B, 0x3B }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
+ { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
+ { IMX_8BIT, 0x034F, 0x6C }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x20 },
+ { IMX_8BIT, 0x0356, 0x03 },
+ { IMX_8BIT, 0x0357, 0x6C },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x03 },
+ { IMX_8BIT, 0x3313, 0x6C },
+ { IMX_8BIT, 0x331C, 0x03 },
+ { IMX_8BIT, 0x331D, 0xF2 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x6F },
+ { IMX_8BIT, 0x0831, 0x27 },
+ { IMX_8BIT, 0x0832, 0x4F },
+ { IMX_8BIT, 0x0833, 0x2F },
+ { IMX_8BIT, 0x0834, 0x2F },
+ { IMX_8BIT, 0x0835, 0x2F },
+ { IMX_8BIT, 0x0836, 0x9F },
+ { IMX_8BIT, 0x0837, 0x37 },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+
+/* 4 lane for 720p dvs, vendor provide */
+static struct imx_reg const imx134_1568_880[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xC8 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
+ { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
+ { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
+ { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
+ { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
+ { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
+ { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x20 },
+ { IMX_8BIT, 0x0356, 0x03 },
+ { IMX_8BIT, 0x0357, 0x70 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x03 },
+ { IMX_8BIT, 0x3313, 0x70 },
+ { IMX_8BIT, 0x331C, 0x03 },
+ { IMX_8BIT, 0x331D, 0xF2 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x5F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x37 },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xBF },
+ { IMX_8BIT, 0x0837, 0x3F },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x09 },
+ { IMX_8BIT, 0x0203, 0xD2 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+/* 4 lane for 480p dvs, default 60fps, vendor provide */
+static struct imx_reg const imx134_880_592[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xC8 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
+ { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x1D }, /* downscaling ratio = 16/29 */
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:44 */
+ { IMX_8BIT, 0x0345, 0x2C }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:160 */
+ { IMX_8BIT, 0x0347, 0xA0 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3235 */
+ { IMX_8BIT, 0x0349, 0xA3 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2307 */
+ { IMX_8BIT, 0x034B, 0x03 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:880 */
+ { IMX_8BIT, 0x034D, 0x70 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:592 */
+ { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x06 },
+ { IMX_8BIT, 0x0355, 0x3C },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0x32 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x03 },
+ { IMX_8BIT, 0x3311, 0x70 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x50 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x4C },
+ { IMX_8BIT, 0x4084, 0x03 },
+ { IMX_8BIT, 0x4085, 0x70 },
+ { IMX_8BIT, 0x4086, 0x02 },
+ { IMX_8BIT, 0x4087, 0x50 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x5F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x37 },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xBF },
+ { IMX_8BIT, 0x0837, 0x3F },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x05 },
+ { IMX_8BIT, 0x0203, 0x42 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+static struct imx_reg const imx134_2336_1308_60fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ /* mode set clear */
+ { IMX_8BIT, 0x3A43, 0x01 },
+ /* Clock Setting */
+ { IMX_8BIT, 0x011E, 0x13 },
+ { IMX_8BIT, 0x011F, 0x33 },
+ { IMX_8BIT, 0x0301, 0x05 },
+ { IMX_8BIT, 0x0303, 0x01 },
+ { IMX_8BIT, 0x0305, 0x0C },
+ { IMX_8BIT, 0x0309, 0x05 },
+ { IMX_8BIT, 0x030B, 0x01 },
+ { IMX_8BIT, 0x030C, 0x01 },
+ { IMX_8BIT, 0x030D, 0xC8 },
+ { IMX_8BIT, 0x030E, 0x01 },
+ { IMX_8BIT, 0x3A06, 0x11 },
+
+ /* Mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 }, /* binning*/
+ { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+
+ /* OptionnalFunction settig */
+ { IMX_8BIT, 0x0700, 0x00 },
+ { IMX_8BIT, 0x3A63, 0x00 },
+ { IMX_8BIT, 0x4100, 0xF8 },
+ { IMX_8BIT, 0x4203, 0xFF },
+ { IMX_8BIT, 0x4344, 0x00 },
+ { IMX_8BIT, 0x441C, 0x01 },
+
+ /* Size setting */
+ { IMX_8BIT, 0x0344, 0x01 }, /* x_addr_start[15:8]:72 */
+ { IMX_8BIT, 0x0345, 0xD8 }, /* x_addr_start[7:0] */
+ { IMX_8BIT, 0x0346, 0x02 }, /* y_addr_start[15:8]:356 */
+ { IMX_8BIT, 0x0347, 0x44 }, /* y_addr_start[7:0] */
+ { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:3207 */
+ { IMX_8BIT, 0x0349, 0xF7 }, /* x_addr_end[7:0] */
+ { IMX_8BIT, 0x034A, 0x07 }, /* y_addr_end[15:8]:2107 */
+ { IMX_8BIT, 0x034B, 0x5F+4 }, /* y_addr_end[7:0] */
+ { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:1568 */
+ { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
+ { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:876 */
+ { IMX_8BIT, 0x034F, 0x1C+4 }, /* y_output_size[7:0] */
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x09 },
+ { IMX_8BIT, 0x0355, 0x20 },
+ { IMX_8BIT, 0x0356, 0x05 },
+ { IMX_8BIT, 0x0357, 0x1C+4 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x09 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x05 },
+ { IMX_8BIT, 0x3313, 0x1C+4 },
+ { IMX_8BIT, 0x331C, 0x03 },
+ { IMX_8BIT, 0x331D, 0xE8 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+
+ /* Global Timing Setting */
+ { IMX_8BIT, 0x0830, 0x77 },
+ { IMX_8BIT, 0x0831, 0x2F },
+ { IMX_8BIT, 0x0832, 0x5F },
+ { IMX_8BIT, 0x0833, 0x37 },
+ { IMX_8BIT, 0x0834, 0x37 },
+ { IMX_8BIT, 0x0835, 0x37 },
+ { IMX_8BIT, 0x0836, 0xBF },
+ { IMX_8BIT, 0x0837, 0x3F },
+ { IMX_8BIT, 0x0839, 0x1F },
+ { IMX_8BIT, 0x083A, 0x17 },
+ { IMX_8BIT, 0x083B, 0x02 },
+
+ /* Integration Time Settin */
+ { IMX_8BIT, 0x0202, 0x05 },
+ { IMX_8BIT, 0x0203, 0x42 },
+
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00 },
+ { IMX_8BIT, 0x0231, 0x00 },
+ { IMX_8BIT, 0x0233, 0x00 },
+ { IMX_8BIT, 0x0234, 0x00 },
+ { IMX_8BIT, 0x0235, 0x40 },
+ { IMX_8BIT, 0x0238, 0x00 },
+ { IMX_8BIT, 0x0239, 0x04 },
+ { IMX_8BIT, 0x023B, 0x00 },
+ { IMX_8BIT, 0x023C, 0x01 },
+ { IMX_8BIT, 0x33B0, 0x04 },
+ { IMX_8BIT, 0x33B1, 0x00 },
+ { IMX_8BIT, 0x33B3, 0x00 },
+ { IMX_8BIT, 0x33B4, 0x01 },
+ { IMX_8BIT, 0x3800, 0x00 },
+ { IMX_TOK_TERM, 0, 0 }
+};
+
+struct imx_resolution imx134_res_preview[] = {
+ {
+ .desc = "imx134_CIF_30fps",
+ .regs = imx134_720_592_30fps,
+ .width = 720,
+ .height = 592,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_820_552_30fps_preview",
+ .regs = imx134_820_552_30fps,
+ .width = 820,
+ .height = 552,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_820_616_preview_30fps",
+ .regs = imx134_820_616_30fps,
+ .width = 820,
+ .height = 616,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1080p_preview_30fps",
+ .regs = imx134_1936_1096_30fps_v2,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1640_1232_preview_30fps",
+ .regs = imx134_1640_1232_30fps,
+ .width = 1640,
+ .height = 1232,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_8M_preview_30fps",
+ .regs = imx134_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+};
+
+struct imx_resolution imx134_res_still[] = {
+ {
+ .desc = "imx134_CIF_30fps",
+ .regs = imx134_1424_1168_30fps,
+ .width = 1424,
+ .height = 1168,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_VGA_still_30fps",
+ .regs = imx134_1640_1232_30fps,
+ .width = 1640,
+ .height = 1232,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1080p_still_30fps",
+ .regs = imx134_1936_1096_30fps_v2,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1640_1232_still_30fps",
+ .regs = imx134_1640_1232_30fps,
+ .width = 1640,
+ .height = 1232,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_8M_still_30fps",
+ .regs = imx134_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ {
+ /* WORKAROUND for FW performance limitation */
+ .fps = 8,
+ .pixels_per_line = 6400,
+ .lines_per_frame = 5312,
+ },
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+};
+
+struct imx_resolution imx134_res_video[] = {
+ {
+ .desc = "imx134_QCIF_DVS_30fps",
+ .regs = imx134_240_196_30fps,
+ .width = 240,
+ .height = 196,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_CIF_DVS_30fps",
+ .regs = imx134_448_366_30fps,
+ .width = 448,
+ .height = 366,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_VGA_30fps",
+ .regs = imx134_820_616_30fps,
+ .width = 820,
+ .height = 616,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_480p",
+ .regs = imx134_880_592,
+ .width = 880,
+ .height = 592,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2700,
+ },
+ {
+ .fps = 60,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 1350,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1568_880",
+ .regs = imx134_1568_880,
+ .width = 1568,
+ .height = 880,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2700,
+ },
+ {
+ .fps = 60,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 1350,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1080p_dvs_30fps",
+ .regs = imx134_2336_1312_30fps,
+ .width = 2336,
+ .height = 1312,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+ {
+ .desc = "imx134_1080p_dvs_60fps",
+ .regs = imx134_2336_1308_60fps,
+ .width = 2336,
+ .height = 1312,
+ .fps_options = {
+ {
+ .fps = 60,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 1350,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+ {
+ /*This setting only be used for SDV mode*/
+ .desc = "imx134_8M_sdv_30fps",
+ .regs = imx134_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3600,
+ .lines_per_frame = 2518,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ },
+};
+
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx135.h b/drivers/staging/media/atomisp/i2c/imx/imx135.h
new file mode 100644
index 000000000000..58b43af909f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx135.h
@@ -0,0 +1,3374 @@
+/*
+ * Support for Sony IMX camera sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IMX135_H__
+#define __IMX135_H__
+
+#include "common.h"
+
+#define IMX_SC_CMMN_CHIP_ID_H 0x0016
+#define IMX_SC_CMMN_CHIP_ID_L 0x0017
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define IMX_F_NUMBER_DEFAULT 0x16000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define IMX_F_NUMBER_RANGE 0x160a160a
+
+#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
+#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
+
+#define IMX135_EMBEDDED_DATA_LINE_NUM 2
+#define IMX135_OUTPUT_DATA_FORMAT_REG 0x0112
+#define IMX135_OUTPUT_FORMAT_RAW10 0x0a0a
+/*
+ * We use three different MIPI rates for our modes based on the resolution and
+ * FPS requirements. So we have three PLL configurationa and these are based
+ * on the EMC friendly MIPI values.
+ *
+ * Maximum clock: Pix clock @ 360.96MHz MIPI @ 451.2MHz 902.4mbps
+ * Reduced clock: Pix clock @ 273.00MHz MIPI @ 342.0MHz 684.0mbps
+ * Binning modes: Pix clock @ 335.36MHz MIPI @ 209.6MHz 419.2mbps
+ * Global Timing registers are based on the data rates and these are part of
+ * the below clock definitions.
+ */
+/* MIPI 499.2MHz 998.4mbps PIXCLK: 399.36MHz */
+#define PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY \
+ {IMX_8BIT, 0x011e, 0x13}, \
+ {IMX_8BIT, 0x011f, 0x33}, \
+ {IMX_8BIT, 0x0301, 0x05}, \
+ {IMX_8BIT, 0x0303, 0x01}, \
+ {IMX_8BIT, 0x0305, 0x0c}, \
+ {IMX_8BIT, 0x0309, 0x05}, \
+ {IMX_8BIT, 0x030b, 0x01}, \
+ {IMX_8BIT, 0x030c, 0x02}, \
+ {IMX_8BIT, 0x030d, 0x70}, \
+ {IMX_8BIT, 0x030e, 0x01}, \
+ {IMX_8BIT, 0x3a06, 0x11}, \
+ {IMX_8BIT, 0x0830, 0x7f}, \
+ {IMX_8BIT, 0x0831, 0x37}, \
+ {IMX_8BIT, 0x0832, 0x67}, \
+ {IMX_8BIT, 0x0833, 0x3f}, \
+ {IMX_8BIT, 0x0834, 0x3f}, \
+ {IMX_8BIT, 0x0835, 0x47}, \
+ {IMX_8BIT, 0x0836, 0xdf}, \
+ {IMX_8BIT, 0x0837, 0x47}, \
+ {IMX_8BIT, 0x0839, 0x1f}, \
+ {IMX_8BIT, 0x083a, 0x17}, \
+ {IMX_8BIT, 0x083b, 0x02}
+
+/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */
+#define PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY \
+ {IMX_8BIT, 0x011e, 0x13}, \
+ {IMX_8BIT, 0x011f, 0x33}, \
+ {IMX_8BIT, 0x0301, 0x05}, \
+ {IMX_8BIT, 0x0303, 0x01}, \
+ {IMX_8BIT, 0x0305, 0x0c}, \
+ {IMX_8BIT, 0x0309, 0x05}, \
+ {IMX_8BIT, 0x030b, 0x01}, \
+ {IMX_8BIT, 0x030c, 0x02}, \
+ {IMX_8BIT, 0x030d, 0x34}, \
+ {IMX_8BIT, 0x030e, 0x01}, \
+ {IMX_8BIT, 0x3a06, 0x11}, \
+ {IMX_8BIT, 0x0830, 0x7f}, \
+ {IMX_8BIT, 0x0831, 0x37}, \
+ {IMX_8BIT, 0x0832, 0x67}, \
+ {IMX_8BIT, 0x0833, 0x3f}, \
+ {IMX_8BIT, 0x0834, 0x3f}, \
+ {IMX_8BIT, 0x0835, 0x47}, \
+ {IMX_8BIT, 0x0836, 0xdf}, \
+ {IMX_8BIT, 0x0837, 0x47}, \
+ {IMX_8BIT, 0x0839, 0x1f}, \
+ {IMX_8BIT, 0x083a, 0x17}, \
+ {IMX_8BIT, 0x083b, 0x02}
+
+/* MIPI 209.6MHz, 419.2mbps PIXCLK: 335.36 MHz */
+#define PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY \
+ {IMX_8BIT, 0x011e, 0x13}, \
+ {IMX_8BIT, 0x011f, 0x33}, \
+ {IMX_8BIT, 0x0301, 0x05}, \
+ {IMX_8BIT, 0x0303, 0x01}, \
+ {IMX_8BIT, 0x0305, 0x06}, \
+ {IMX_8BIT, 0x0309, 0x05}, \
+ {IMX_8BIT, 0x030b, 0x02}, \
+ {IMX_8BIT, 0x030c, 0x01}, \
+ {IMX_8BIT, 0x030d, 0x06}, \
+ {IMX_8BIT, 0x030e, 0x01}, \
+ {IMX_8BIT, 0x3a06, 0x12}, \
+ {IMX_8BIT, 0x0830, 0x5f}, \
+ {IMX_8BIT, 0x0831, 0x1f}, \
+ {IMX_8BIT, 0x0832, 0x3f}, \
+ {IMX_8BIT, 0x0833, 0x1f}, \
+ {IMX_8BIT, 0x0834, 0x1f}, \
+ {IMX_8BIT, 0x0835, 0x17}, \
+ {IMX_8BIT, 0x0836, 0x67}, \
+ {IMX_8BIT, 0x0837, 0x27}, \
+ {IMX_8BIT, 0x0839, 0x1f}, \
+ {IMX_8BIT, 0x083a, 0x17}, \
+ {IMX_8BIT, 0x083b, 0x02}
+
+/* MIPI 342MHz 684mbps PIXCLK: 273.6MHz */
+#define PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY \
+ {IMX_8BIT, 0x011e, 0x13}, \
+ {IMX_8BIT, 0x011f, 0x33}, \
+ {IMX_8BIT, 0x0301, 0x05}, \
+ {IMX_8BIT, 0x0303, 0x01}, \
+ {IMX_8BIT, 0x0305, 0x08}, \
+ {IMX_8BIT, 0x0309, 0x05}, \
+ {IMX_8BIT, 0x030b, 0x01}, \
+ {IMX_8BIT, 0x030c, 0x01}, \
+ {IMX_8BIT, 0x030d, 0x1d}, \
+ {IMX_8BIT, 0x030e, 0x01}, \
+ {IMX_8BIT, 0x3a06, 0x11}, \
+ {IMX_8BIT, 0x0830, 0x77}, \
+ {IMX_8BIT, 0x0831, 0x2f}, \
+ {IMX_8BIT, 0x0832, 0x4f}, \
+ {IMX_8BIT, 0x0833, 0x37}, \
+ {IMX_8BIT, 0x0834, 0x2f}, \
+ {IMX_8BIT, 0x0835, 0x37}, \
+ {IMX_8BIT, 0x0836, 0xa7}, \
+ {IMX_8BIT, 0x0837, 0x37}, \
+ {IMX_8BIT, 0x0839, 0x1f}, \
+ {IMX_8BIT, 0x083a, 0x17}, \
+ {IMX_8BIT, 0x083b, 0x02}
+
+/* Basic settings: Applied only once after the sensor power up */
+static struct imx_reg const imx135_init_settings[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ { IMX_8BIT, 0x0220, 0x01},
+ { IMX_8BIT, 0x3008, 0xB0},
+ { IMX_8BIT, 0x320A, 0x01},
+ { IMX_8BIT, 0x320D, 0x10},
+ { IMX_8BIT, 0x3216, 0x2E},
+ { IMX_8BIT, 0x3230, 0x0A},
+ { IMX_8BIT, 0x3228, 0x05},
+ { IMX_8BIT, 0x3229, 0x02},
+ { IMX_8BIT, 0x322C, 0x02},
+ { IMX_8BIT, 0x3302, 0x10},
+ { IMX_8BIT, 0x3390, 0x45},
+ { IMX_8BIT, 0x3409, 0x0C},
+ { IMX_8BIT, 0x340B, 0xF5},
+ { IMX_8BIT, 0x340C, 0x2D},
+ { IMX_8BIT, 0x3412, 0x41},
+ { IMX_8BIT, 0x3413, 0xAD},
+ { IMX_8BIT, 0x3414, 0x1E},
+ { IMX_8BIT, 0x3427, 0x04},
+ { IMX_8BIT, 0x3480, 0x1E},
+ { IMX_8BIT, 0x3484, 0x1E},
+ { IMX_8BIT, 0x3488, 0x1E},
+ { IMX_8BIT, 0x348C, 0x1E},
+ { IMX_8BIT, 0x3490, 0x1E},
+ { IMX_8BIT, 0x3494, 0x1E},
+ { IMX_8BIT, 0x349C, 0x38},
+ { IMX_8BIT, 0x34A3, 0x38},
+ { IMX_8BIT, 0x3511, 0x8F},
+ { IMX_8BIT, 0x3518, 0x00},
+ { IMX_8BIT, 0x3519, 0x94},
+ { IMX_8BIT, 0x3833, 0x20},
+ { IMX_8BIT, 0x3893, 0x01},
+ { IMX_8BIT, 0x38C2, 0x08},
+ { IMX_8BIT, 0x38C3, 0x08},
+ { IMX_8BIT, 0x3C09, 0x01},
+ { IMX_8BIT, 0x4000, 0x0E},
+ { IMX_8BIT, 0x4300, 0x00},
+ { IMX_8BIT, 0x4316, 0x12},
+ { IMX_8BIT, 0x4317, 0x22},
+ { IMX_8BIT, 0x4318, 0x00},
+ { IMX_8BIT, 0x4319, 0x00},
+ { IMX_8BIT, 0x431A, 0x00},
+ { IMX_8BIT, 0x4324, 0x03},
+ { IMX_8BIT, 0x4325, 0x20},
+ { IMX_8BIT, 0x4326, 0x03},
+ { IMX_8BIT, 0x4327, 0x84},
+ { IMX_8BIT, 0x4328, 0x03},
+ { IMX_8BIT, 0x4329, 0x20},
+ { IMX_8BIT, 0x432A, 0x03},
+ { IMX_8BIT, 0x432B, 0x84},
+ { IMX_8BIT, 0x432C, 0x01},
+ { IMX_8BIT, 0x4401, 0x3F},
+ { IMX_8BIT, 0x4402, 0xFF},
+ { IMX_8BIT, 0x4412, 0x3F},
+ { IMX_8BIT, 0x4413, 0xFF},
+ { IMX_8BIT, 0x441D, 0x28},
+ { IMX_8BIT, 0x4444, 0x00},
+ { IMX_8BIT, 0x4445, 0x00},
+ { IMX_8BIT, 0x4446, 0x3F},
+ { IMX_8BIT, 0x4447, 0xFF},
+ { IMX_8BIT, 0x4452, 0x00},
+ { IMX_8BIT, 0x4453, 0xA0},
+ { IMX_8BIT, 0x4454, 0x08},
+ { IMX_8BIT, 0x4455, 0x00},
+ { IMX_8BIT, 0x4458, 0x18},
+ { IMX_8BIT, 0x4459, 0x18},
+ { IMX_8BIT, 0x445A, 0x3F},
+ { IMX_8BIT, 0x445B, 0x3A},
+ { IMX_8BIT, 0x4462, 0x00},
+ { IMX_8BIT, 0x4463, 0x00},
+ { IMX_8BIT, 0x4464, 0x00},
+ { IMX_8BIT, 0x4465, 0x00},
+ { IMX_8BIT, 0x446E, 0x01},
+ { IMX_8BIT, 0x4500, 0x1F},
+ { IMX_8BIT, 0x600a, 0x00},
+ { IMX_8BIT, 0x380a, 0x00},
+ { IMX_8BIT, 0x380b, 0x00},
+ { IMX_8BIT, 0x4103, 0x00},
+ { IMX_8BIT, 0x4243, 0x9a},
+ { IMX_8BIT, 0x4330, 0x01},
+ { IMX_8BIT, 0x4331, 0x90},
+ { IMX_8BIT, 0x4332, 0x02},
+ { IMX_8BIT, 0x4333, 0x58},
+ { IMX_8BIT, 0x4334, 0x03},
+ { IMX_8BIT, 0x4335, 0x20},
+ { IMX_8BIT, 0x4336, 0x03},
+ { IMX_8BIT, 0x4337, 0x84},
+ { IMX_8BIT, 0x433C, 0x01},
+ { IMX_8BIT, 0x4340, 0x02},
+ { IMX_8BIT, 0x4341, 0x58},
+ { IMX_8BIT, 0x4342, 0x03},
+ { IMX_8BIT, 0x4343, 0x52},
+ { IMX_8BIT, 0x4364, 0x0b},
+ { IMX_8BIT, 0x4368, 0x00},
+ { IMX_8BIT, 0x4369, 0x0f},
+ { IMX_8BIT, 0x436a, 0x03},
+ { IMX_8BIT, 0x436b, 0xa8},
+ { IMX_8BIT, 0x436c, 0x00},
+ { IMX_8BIT, 0x436d, 0x00},
+ { IMX_8BIT, 0x436e, 0x00},
+ { IMX_8BIT, 0x436f, 0x06},
+ { IMX_8BIT, 0x4281, 0x21},
+ { IMX_8BIT, 0x4282, 0x18},
+ { IMX_8BIT, 0x4283, 0x04},
+ { IMX_8BIT, 0x4284, 0x08},
+ { IMX_8BIT, 0x4287, 0x7f},
+ { IMX_8BIT, 0x4288, 0x08},
+ { IMX_8BIT, 0x428c, 0x08},
+ { IMX_8BIT, 0x4297, 0x00},
+ { IMX_8BIT, 0x4299, 0x7E},
+ { IMX_8BIT, 0x42A4, 0xFB},
+ { IMX_8BIT, 0x42A5, 0x7E},
+ { IMX_8BIT, 0x42A6, 0xDF},
+ { IMX_8BIT, 0x42A7, 0xB7},
+ { IMX_8BIT, 0x42AF, 0x03},
+ { IMX_8BIT, 0x4207, 0x03},
+ { IMX_8BIT, 0x4218, 0x00},
+ { IMX_8BIT, 0x421B, 0x20},
+ { IMX_8BIT, 0x421F, 0x04},
+ { IMX_8BIT, 0x4222, 0x02},
+ { IMX_8BIT, 0x4223, 0x22},
+ { IMX_8BIT, 0x422E, 0x54},
+ { IMX_8BIT, 0x422F, 0xFB},
+ { IMX_8BIT, 0x4230, 0xFF},
+ { IMX_8BIT, 0x4231, 0xFE},
+ { IMX_8BIT, 0x4232, 0xFF},
+ { IMX_8BIT, 0x4235, 0x58},
+ { IMX_8BIT, 0x4236, 0xF7},
+ { IMX_8BIT, 0x4237, 0xFD},
+ { IMX_8BIT, 0x4239, 0x4E},
+ { IMX_8BIT, 0x423A, 0xFC},
+ { IMX_8BIT, 0x423B, 0xFD},
+ { IMX_8BIT, 0x4300, 0x00},
+ { IMX_8BIT, 0x4316, 0x12},
+ { IMX_8BIT, 0x4317, 0x22},
+ { IMX_8BIT, 0x4318, 0x00},
+ { IMX_8BIT, 0x4319, 0x00},
+ { IMX_8BIT, 0x431A, 0x00},
+ { IMX_8BIT, 0x4324, 0x03},
+ { IMX_8BIT, 0x4325, 0x20},
+ { IMX_8BIT, 0x4326, 0x03},
+ { IMX_8BIT, 0x4327, 0x84},
+ { IMX_8BIT, 0x4328, 0x03},
+ { IMX_8BIT, 0x4329, 0x20},
+ { IMX_8BIT, 0x432A, 0x03},
+ { IMX_8BIT, 0x432B, 0x20},
+ { IMX_8BIT, 0x432C, 0x01},
+ { IMX_8BIT, 0x432D, 0x01},
+ { IMX_8BIT, 0x4338, 0x02},
+ { IMX_8BIT, 0x4339, 0x00},
+ { IMX_8BIT, 0x433A, 0x00},
+ { IMX_8BIT, 0x433B, 0x02},
+ { IMX_8BIT, 0x435A, 0x03},
+ { IMX_8BIT, 0x435B, 0x84},
+ { IMX_8BIT, 0x435E, 0x01},
+ { IMX_8BIT, 0x435F, 0xFF},
+ { IMX_8BIT, 0x4360, 0x01},
+ { IMX_8BIT, 0x4361, 0xF4},
+ { IMX_8BIT, 0x4362, 0x03},
+ { IMX_8BIT, 0x4363, 0x84},
+ { IMX_8BIT, 0x437B, 0x01},
+ { IMX_8BIT, 0x4400, 0x00}, /* STATS off ISP do not support STATS*/
+ { IMX_8BIT, 0x4401, 0x3F},
+ { IMX_8BIT, 0x4402, 0xFF},
+ { IMX_8BIT, 0x4404, 0x13},
+ { IMX_8BIT, 0x4405, 0x26},
+ { IMX_8BIT, 0x4406, 0x07},
+ { IMX_8BIT, 0x4408, 0x20},
+ { IMX_8BIT, 0x4409, 0xE5},
+ { IMX_8BIT, 0x440A, 0xFB},
+ { IMX_8BIT, 0x440C, 0xF6},
+ { IMX_8BIT, 0x440D, 0xEA},
+ { IMX_8BIT, 0x440E, 0x20},
+ { IMX_8BIT, 0x4410, 0x00},
+ { IMX_8BIT, 0x4411, 0x00},
+ { IMX_8BIT, 0x4412, 0x3F},
+ { IMX_8BIT, 0x4413, 0xFF},
+ { IMX_8BIT, 0x4414, 0x1F},
+ { IMX_8BIT, 0x4415, 0xFF},
+ { IMX_8BIT, 0x4416, 0x20},
+ { IMX_8BIT, 0x4417, 0x00},
+ { IMX_8BIT, 0x4418, 0x1F},
+ { IMX_8BIT, 0x4419, 0xFF},
+ { IMX_8BIT, 0x441A, 0x20},
+ { IMX_8BIT, 0x441B, 0x00},
+ { IMX_8BIT, 0x441D, 0x40},
+ { IMX_8BIT, 0x441E, 0x1E},
+ { IMX_8BIT, 0x441F, 0x38},
+ { IMX_8BIT, 0x4420, 0x01},
+ { IMX_8BIT, 0x4444, 0x00},
+ { IMX_8BIT, 0x4445, 0x00},
+ { IMX_8BIT, 0x4446, 0x1D},
+ { IMX_8BIT, 0x4447, 0xF9},
+ { IMX_8BIT, 0x4452, 0x00},
+ { IMX_8BIT, 0x4453, 0xA0},
+ { IMX_8BIT, 0x4454, 0x08},
+ { IMX_8BIT, 0x4455, 0x00},
+ { IMX_8BIT, 0x4456, 0x0F},
+ { IMX_8BIT, 0x4457, 0xFF},
+ { IMX_8BIT, 0x4458, 0x18},
+ { IMX_8BIT, 0x4459, 0x18},
+ { IMX_8BIT, 0x445A, 0x3F},
+ { IMX_8BIT, 0x445B, 0x3A},
+ { IMX_8BIT, 0x445C, 0x00},
+ { IMX_8BIT, 0x445D, 0x28},
+ { IMX_8BIT, 0x445E, 0x01},
+ { IMX_8BIT, 0x445F, 0x90},
+ { IMX_8BIT, 0x4460, 0x00},
+ { IMX_8BIT, 0x4461, 0x60},
+ { IMX_8BIT, 0x4462, 0x00},
+ { IMX_8BIT, 0x4463, 0x00},
+ { IMX_8BIT, 0x4464, 0x00},
+ { IMX_8BIT, 0x4465, 0x00},
+ { IMX_8BIT, 0x446C, 0x00},
+ { IMX_8BIT, 0x446D, 0x00},
+ { IMX_8BIT, 0x446E, 0x00},
+ { IMX_8BIT, 0x452A, 0x02},
+ { IMX_8BIT, 0x0712, 0x01},
+ { IMX_8BIT, 0x0713, 0x00},
+ { IMX_8BIT, 0x0714, 0x01},
+ { IMX_8BIT, 0x0715, 0x00},
+ { IMX_8BIT, 0x0716, 0x01},
+ { IMX_8BIT, 0x0717, 0x00},
+ { IMX_8BIT, 0x0718, 0x01},
+ { IMX_8BIT, 0x0719, 0x00},
+ { IMX_8BIT, 0x4500, 0x1F },
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ { IMX_8BIT, 0x0205, 0x00},
+ { IMX_8BIT, 0x020E, 0x01},
+ { IMX_8BIT, 0x020F, 0x00},
+ { IMX_8BIT, 0x0210, 0x02},
+ { IMX_8BIT, 0x0211, 0x00},
+ { IMX_8BIT, 0x0212, 0x02},
+ { IMX_8BIT, 0x0213, 0x00},
+ { IMX_8BIT, 0x0214, 0x01},
+ { IMX_8BIT, 0x0215, 0x00},
+ /* HDR Setting */
+ { IMX_8BIT, 0x0230, 0x00},
+ { IMX_8BIT, 0x0231, 0x00},
+ { IMX_8BIT, 0x0233, 0x00},
+ { IMX_8BIT, 0x0234, 0x00},
+ { IMX_8BIT, 0x0235, 0x40},
+ { IMX_8BIT, 0x0238, 0x00},
+ { IMX_8BIT, 0x0239, 0x04},
+ { IMX_8BIT, 0x023B, 0x00},
+ { IMX_8BIT, 0x023C, 0x01},
+ { IMX_8BIT, 0x33B0, 0x04},
+ { IMX_8BIT, 0x33B1, 0x00},
+ { IMX_8BIT, 0x33B3, 0x00},
+ { IMX_8BIT, 0x33B4, 0x01},
+ { IMX_8BIT, 0x3800, 0x00},
+ GROUPED_PARAMETER_HOLD_DISABLE,
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/********* Preview, continuous capture and still modes *****************/
+
+static struct imx_reg const imx135_13m[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size Setting */
+ {IMX_8BIT, 0x0344, 0x00}, /* 0, 0, 4207,3119 4208x3120 */
+ {IMX_8BIT, 0x0345, 0x00},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x00},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x6F},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x2F},
+ {IMX_8BIT, 0x034C, 0x10},
+ {IMX_8BIT, 0x034D, 0x70},
+ {IMX_8BIT, 0x034E, 0x0C},
+ {IMX_8BIT, 0x034F, 0x30},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10}, /* 4208x3120 */
+ {IMX_8BIT, 0x0355, 0x70},
+ {IMX_8BIT, 0x0356, 0x0C},
+ {IMX_8BIT, 0x0357, 0x30},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x10},
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x0C},
+ {IMX_8BIT, 0x3313, 0x30},
+ {IMX_8BIT, 0x331C, 0x00},
+ {IMX_8BIT, 0x331D, 0x10},
+ {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/* 13MP reduced pixel clock MIPI 342MHz is EMC friendly*/
+static struct imx_reg const imx135_13m_for_mipi_342[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size Setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x00},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x00},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x6F},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x2F},
+ {IMX_8BIT, 0x034C, 0x10},
+ {IMX_8BIT, 0x034D, 0x70},
+ {IMX_8BIT, 0x034E, 0x0C},
+ {IMX_8BIT, 0x034F, 0x30},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10},
+ {IMX_8BIT, 0x0355, 0x70},
+ {IMX_8BIT, 0x0356, 0x0C},
+ {IMX_8BIT, 0x0357, 0x30},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x10},
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x0C},
+ {IMX_8BIT, 0x3313, 0x30},
+ {IMX_8BIT, 0x331C, 0x00},
+ {IMX_8BIT, 0x331D, 0x10},
+ {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_10m[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
+ {IMX_8BIT, 0x0345, 0x00},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x78},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x6f},
+ {IMX_8BIT, 0x034A, 0x0a},
+ {IMX_8BIT, 0x034B, 0xb7},
+ {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
+ {IMX_8BIT, 0x034D, 0x70},
+ {IMX_8BIT, 0x034E, 0x09},
+ {IMX_8BIT, 0x034F, 0x40},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10},
+ {IMX_8BIT, 0x0355, 0x70},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0x40},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x10},
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x09},
+ {IMX_8BIT, 0x3313, 0x40},
+ {IMX_8BIT, 0x331C, 0x01},
+ {IMX_8BIT, 0x331D, 0x68},
+ {IMX_8BIT, 0x4084, 0x00},
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_10m_for_mipi_342[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
+ {IMX_8BIT, 0x0345, 0x00},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x78},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x6f},
+ {IMX_8BIT, 0x034A, 0x0a},
+ {IMX_8BIT, 0x034B, 0xb7},
+ {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
+ {IMX_8BIT, 0x034D, 0x70},
+ {IMX_8BIT, 0x034E, 0x09},
+ {IMX_8BIT, 0x034F, 0x40},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10},
+ {IMX_8BIT, 0x0355, 0x70},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0x40},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x10},
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x09},
+ {IMX_8BIT, 0x3313, 0x40},
+ {IMX_8BIT, 0x331C, 0x01},
+ {IMX_8BIT, 0x331D, 0x68},
+ {IMX_8BIT, 0x4084, 0x00},
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * It is 8.5 DS from (3:2)8m cropped setting.
+ *
+ * The 8m(3:2) cropped setting is 2992x2448 effective res.
+ * The ISP effect cropped setting should be 1408x1152 effect res.
+ *
+ * Consider ISP 16x16 padding:
+ * sensor outputs 368x304
+ * cropped region is 3128x2584
+ */
+static struct imx_reg const imx135_368x304_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11}, /* no binning */
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* resize */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x88}, /* 136/16=8.5 */
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
+ {IMX_8BIT, 0x0345, 0x1C}, /* 540 */
+ {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
+ {IMX_8BIT, 0x0347, 0x0C}, /* 268 */
+ {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
+ {IMX_8BIT, 0x0349, 0x53}, /* 3667 */
+ {IMX_8BIT, 0x034A, 0x0B}, /* Y_ADD_END */
+ {IMX_8BIT, 0x034B, 0x23}, /* 2851 */
+ {IMX_8BIT, 0x034C, 0x01}, /* X_OUT_SIZE */
+ {IMX_8BIT, 0x034D, 0x70}, /* 368 */
+ {IMX_8BIT, 0x034E, 0x01}, /* Y_OUT_SIZE */
+ {IMX_8BIT, 0x034F, 0x30}, /* 304 */
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x0C}, /* Cut out siz same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x38},
+ {IMX_8BIT, 0x0356, 0x0A},
+ {IMX_8BIT, 0x0357, 0x18},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x01}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x01},
+ {IMX_8BIT, 0x3313, 0x30},
+ {IMX_8BIT, 0x331C, 0x02}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xD0},
+ {IMX_8BIT, 0x4084, 0x01}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x70},
+ {IMX_8BIT, 0x4086, 0x01},
+ {IMX_8BIT, 0x4087, 0x30},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * It is 1/4 binning from 8m cropped setting.
+ *
+ * The 8m cropped setting is 3264x2448 effective res.
+ * The xga cropped setting should be 816x612 effect res.
+ *
+ * Consider ISP 16x16 padding:
+ * sensor outputs 832x628
+ * cropped region is 3328x2512
+ */
+static struct imx_reg const imx135_xga_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01},
+ {IMX_8BIT, 0x0391, 0x44},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+/* {IMX_8BIT, 0x4203, 0xFF}, */
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
+ {IMX_8BIT, 0x0345, 0xB8}, /* 440 */
+ {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
+ {IMX_8BIT, 0x0347, 0x30}, /* 304 */
+ {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
+ {IMX_8BIT, 0x0349, 0xB7}, /* 4207-440=3767 */
+ {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
+ {IMX_8BIT, 0x034B, 0xFF}, /* 3119-304=2815 */
+ {IMX_8BIT, 0x034C, 0x03}, /* X_OUT_SIZE */
+ {IMX_8BIT, 0x034D, 0x40}, /* 832 */
+ {IMX_8BIT, 0x034E, 0x02}, /* Y_OUT_SIZE */
+ {IMX_8BIT, 0x034F, 0x74}, /* 628 */
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x03}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x40},
+ {IMX_8BIT, 0x0356, 0x02},
+ {IMX_8BIT, 0x0357, 0x74},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x03}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0x40},
+ {IMX_8BIT, 0x3312, 0x02},
+ {IMX_8BIT, 0x3313, 0x74},
+ {IMX_8BIT, 0x331C, 0x02}, /* ?? */
+ {IMX_8BIT, 0x331D, 0x21},
+ {IMX_8BIT, 0x4084, 0x03}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x40},
+ {IMX_8BIT, 0x4086, 0x02},
+ {IMX_8BIT, 0x4087, 0x74},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * It is 28/16 DS from (16:9)8m cropped setting.
+ *
+ * The 8m(16:9) cropped setting is 3360x1890 effective res.
+ * - this is larger then the expected 3264x1836 FOV
+ *
+ * Consider ISP 16x16 padding:
+ * sensor outputs 1936x1096
+ * cropped region is 3388x1918
+ */
+static struct imx_reg const imx135_1936x1096_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11}, /* no binning */
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* resize */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x1C}, /* 28/16 */
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
+ {IMX_8BIT, 0x0345, 0x9A}, /* 410 */
+ {IMX_8BIT, 0x0346, 0x02}, /* Y_ADD_STA */
+ {IMX_8BIT, 0x0347, 0x58}, /* 600 */
+ {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
+ {IMX_8BIT, 0x0349, 0xD5}, /* 3797 */
+ {IMX_8BIT, 0x034A, 0x09}, /* Y_ADD_END */
+ {IMX_8BIT, 0x034B, 0xD5}, /* 2517 */
+ {IMX_8BIT, 0x034C, 0x07}, /* X_OUT_SIZE */
+ {IMX_8BIT, 0x034D, 0x90}, /* 1936 */
+ {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
+ {IMX_8BIT, 0x034F, 0x48}, /* 1096 */
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x0D}, /* Cut out siz same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x3C},
+ {IMX_8BIT, 0x0356, 0x07},
+ {IMX_8BIT, 0x0357, 0x7E},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x07}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0x90},
+ {IMX_8BIT, 0x3312, 0x04},
+ {IMX_8BIT, 0x3313, 0x48},
+ {IMX_8BIT, 0x331C, 0x00}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xAA},
+ {IMX_8BIT, 0x4084, 0x07}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x90},
+ {IMX_8BIT, 0x4086, 0x04},
+ {IMX_8BIT, 0x4087, 0x48},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * It is 2.125 DS from (3:2)8m cropped setting.
+ *
+ * The 8m(3:2) cropped setting is 2992x2448 effective res.
+ * The ISP effect cropped setting should be 1408x1152 effect res.
+ *
+ * Consider ISP 16x16 padding:
+ * sensor outputs 1424x1168
+ * cropped region is 3026x2482
+ */
+static struct imx_reg const imx135_1424x1168_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11}, /* no binning */
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* resize */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x22}, /* 34/16=2.125 */
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
+ {IMX_8BIT, 0x0345, 0x4E}, /* 590 */
+ {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
+ {IMX_8BIT, 0x0347, 0x3E}, /* 318 */
+ {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
+ {IMX_8BIT, 0x0349, 0x1F}, /* 3615 */
+ {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
+ {IMX_8BIT, 0x034B, 0xEF}, /* 2799 */
+ {IMX_8BIT, 0x034C, 0x05}, /* X_OUT_SIZE */
+ {IMX_8BIT, 0x034D, 0x90}, /* 1424 */
+ {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
+ {IMX_8BIT, 0x034F, 0x90}, /* 1168 */
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x0B}, /* Cut out siz same as the size after crop */
+ {IMX_8BIT, 0x0355, 0xD2},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0xB2},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x05}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0x90},
+ {IMX_8BIT, 0x3312, 0x04},
+ {IMX_8BIT, 0x3313, 0x90},
+ {IMX_8BIT, 0x331C, 0x00}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xAA},
+ {IMX_8BIT, 0x4084, 0x05}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x90},
+ {IMX_8BIT, 0x4086, 0x04},
+ {IMX_8BIT, 0x4087, 0x90},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * It is 1/2 binning from 8m cropped setting.
+ *
+ * The 8m cropped setting is 3264x2448 effective res.
+ * The 2m cropped setting should be 1632x1224 effect res.
+ *
+ * Consider ISP 16x16 padding:
+ * sensor outputs 1648x1240
+ * cropped region is 3296x2480
+ */
+static struct imx_reg const imx135_2m_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01},
+ {IMX_8BIT, 0x0391, 0x22},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
+ {IMX_8BIT, 0x0345, 0xC8}, /* 464(1D0) -> 456(1C8)*/
+ {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
+ {IMX_8BIT, 0x0347, 0x40}, /* 320 */
+ {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
+ {IMX_8BIT, 0x0349, 0xA7}, /* 4207-456=3751 */
+ {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
+ {IMX_8BIT, 0x034B, 0xEF}, /* 3119-320=2799 */
+ {IMX_8BIT, 0x034C, 0x06}, /* X_OUT_SIZE */
+ {IMX_8BIT, 0x034D, 0x70}, /* 1648 */
+ {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
+ {IMX_8BIT, 0x034F, 0xD8}, /* 1240 */
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x06}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x70},
+ {IMX_8BIT, 0x0356, 0x04},
+ {IMX_8BIT, 0x0357, 0xD8},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x06}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0x70},
+ {IMX_8BIT, 0x3312, 0x04},
+ {IMX_8BIT, 0x3313, 0xD8},
+ {IMX_8BIT, 0x331C, 0x00}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xAA},
+ {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * 8M Cropped 16:9 setting
+ *
+ * Effect res: 3264x1836
+ * Sensor out: 3280x1852
+ */
+static struct imx_reg const imx135_6m_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0xD0},
+ {IMX_8BIT, 0x0346, 0x02}, /* 634 */
+ {IMX_8BIT, 0x0347, 0x7A},
+ {IMX_8BIT, 0x0348, 0x0E},
+ {IMX_8BIT, 0x0349, 0x9F},
+ {IMX_8BIT, 0x034A, 0x09}, /* 2485 */
+ {IMX_8BIT, 0x034B, 0xB5},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x07}, /* 1852 */
+ {IMX_8BIT, 0x034F, 0x3C},
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0xD0},
+ {IMX_8BIT, 0x0356, 0x07},
+ {IMX_8BIT, 0x0357, 0x3C},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x07},
+ {IMX_8BIT, 0x3313, 0x3C},
+ {IMX_8BIT, 0x331C, 0x00}, /* ?? */
+ {IMX_8BIT, 0x331D, 0x10},
+ {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_8m_cropped[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0xD0},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x48},
+ {IMX_8BIT, 0x0348, 0x0E},
+ {IMX_8BIT, 0x0349, 0x9F},
+ {IMX_8BIT, 0x034A, 0x0A},
+ {IMX_8BIT, 0x034B, 0xE7},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x09}, /* 2464 */
+ {IMX_8BIT, 0x034F, 0xA0},
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0xD0},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0xA0},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x09},
+ {IMX_8BIT, 0x3313, 0xA0},
+ {IMX_8BIT, 0x331C, 0x00}, /* ?? */
+ {IMX_8BIT, 0x331D, 0x10},
+ {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_8m_scaled_from_12m[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x14},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x36},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x14},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x39},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x1B},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x09},
+ {IMX_8BIT, 0x034F, 0xA0},
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x04},
+ {IMX_8BIT, 0x0356, 0x0C},
+ {IMX_8BIT, 0x0357, 0x08},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x09},
+ {IMX_8BIT, 0x3313, 0xA0},
+ {IMX_8BIT, 0x331C, 0x02}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xA0},
+ {IMX_8BIT, 0x4084, 0x0C}, /* Scaling related? */
+ {IMX_8BIT, 0x4085, 0xD0},
+ {IMX_8BIT, 0x4086, 0x09},
+ {IMX_8BIT, 0x4087, 0xA0},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_8m_scaled_from_12m_for_mipi342[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x14},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x36},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x14},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x39},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x1B},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x09},
+ {IMX_8BIT, 0x034F, 0xA0},
+ {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
+ {IMX_8BIT, 0x0355, 0x04},
+ {IMX_8BIT, 0x0356, 0x0C},
+ {IMX_8BIT, 0x0357, 0x08},
+ {IMX_8BIT, 0x301D, 0x30}, /* ?? */
+ {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x09},
+ {IMX_8BIT, 0x3313, 0xA0},
+ {IMX_8BIT, 0x331C, 0x02}, /* ?? */
+ {IMX_8BIT, 0x331D, 0xA0},
+ {IMX_8BIT, 0x4084, 0x0C}, /* Resize IMG Hand V size-> Scaling related?*/
+ {IMX_8BIT, 0x4085, 0xD0},
+ {IMX_8BIT, 0x4086, 0x09},
+ {IMX_8BIT, 0x4087, 0xA0},
+ {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_6m[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x14},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
+ {IMX_8BIT, 0x0345, 0x36},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x94},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x39},
+ {IMX_8BIT, 0x034A, 0x0A},
+ {IMX_8BIT, 0x034B, 0x9F},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x07},
+ {IMX_8BIT, 0x034F, 0x3C},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
+ {IMX_8BIT, 0x0355, 0x04},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0x0C},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x0C},
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x07},
+ {IMX_8BIT, 0x3313, 0x3C},
+ {IMX_8BIT, 0x331C, 0x02},
+ {IMX_8BIT, 0x331D, 0xA0},
+ {IMX_8BIT, 0x4084, 0x0C},
+ {IMX_8BIT, 0x4085, 0xD0},
+ {IMX_8BIT, 0x4086, 0x07},
+ {IMX_8BIT, 0x4087, 0x3C},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_6m_for_mipi_342[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x00},
+ {IMX_8BIT, 0x0391, 0x11},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x14},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
+ {IMX_8BIT, 0x0345, 0x36},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x94},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x39},
+ {IMX_8BIT, 0x034A, 0x0A},
+ {IMX_8BIT, 0x034B, 0x9F},
+ {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
+ {IMX_8BIT, 0x034D, 0xD0},
+ {IMX_8BIT, 0x034E, 0x07},
+ {IMX_8BIT, 0x034F, 0x3C},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
+ {IMX_8BIT, 0x0355, 0x04},
+ {IMX_8BIT, 0x0356, 0x09},
+ {IMX_8BIT, 0x0357, 0x0C},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x0C},
+ {IMX_8BIT, 0x3311, 0xD0},
+ {IMX_8BIT, 0x3312, 0x07},
+ {IMX_8BIT, 0x3313, 0x3C},
+ {IMX_8BIT, 0x331C, 0x02},
+ {IMX_8BIT, 0x331D, 0xA0},
+ {IMX_8BIT, 0x4084, 0x0C},
+ {IMX_8BIT, 0x4085, 0xD0},
+ {IMX_8BIT, 0x4086, 0x07},
+ {IMX_8BIT, 0x4087, 0x3C},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/*
+ * FOV is: 3280x2464, larger then 3264x2448.
+ * Sensor output: 336x256
+ * Cropping region: 3444x2624
+ */
+static struct imx_reg const imx135_336x256[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01},
+ {IMX_8BIT, 0x0391, 0x22},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02}, /* 2x binning */
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x52}, /* scaling: 82/16 */
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_start: 374 */
+ {IMX_8BIT, 0x0345, 0x76},
+ {IMX_8BIT, 0x0346, 0x00}, /* y_start: 248 */
+ {IMX_8BIT, 0x0347, 0xF8},
+ {IMX_8BIT, 0x0348, 0x0E}, /* x_end: 3817 */
+ {IMX_8BIT, 0x0349, 0xE9},
+ {IMX_8BIT, 0x034A, 0x0B}, /* y_end: 2871 */
+ {IMX_8BIT, 0x034B, 0x37},
+ {IMX_8BIT, 0x034C, 0x01}, /* x_out: 336 */
+ {IMX_8BIT, 0x034D, 0x50},
+ {IMX_8BIT, 0x034E, 0x01}, /* y_out: 256 */
+ {IMX_8BIT, 0x034F, 0x00},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x06}, /* dig x_out: 1722 */
+ {IMX_8BIT, 0x0355, 0xBA},
+ {IMX_8BIT, 0x0356, 0x05}, /* dig y_out: 1312 */
+ {IMX_8BIT, 0x0357, 0x20},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x01}, /* ?: x_out */
+ {IMX_8BIT, 0x3311, 0x50},
+ {IMX_8BIT, 0x3312, 0x01}, /* ?: y_out */
+ {IMX_8BIT, 0x3313, 0x00},
+ {IMX_8BIT, 0x331C, 0x02},
+ {IMX_8BIT, 0x331D, 0x4E},
+ {IMX_8BIT, 0x4084, 0x01}, /* ?: x_out */
+ {IMX_8BIT, 0x4085, 0x50},
+ {IMX_8BIT, 0x4086, 0x01}, /* ?: y_out */
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_1m[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01},
+ {IMX_8BIT, 0x0391, 0x22},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x1F},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x58},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x28},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x17},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x07},
+ {IMX_8BIT, 0x034C, 0x04},
+ {IMX_8BIT, 0x034D, 0x10},
+ {IMX_8BIT, 0x034E, 0x03},
+ {IMX_8BIT, 0x034F, 0x10},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x07},
+ {IMX_8BIT, 0x0355, 0xE0},
+ {IMX_8BIT, 0x0356, 0x05},
+ {IMX_8BIT, 0x0357, 0xF0},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x04},
+ {IMX_8BIT, 0x3311, 0x10},
+ {IMX_8BIT, 0x3312, 0x03},
+ {IMX_8BIT, 0x3313, 0x10},
+ {IMX_8BIT, 0x331C, 0x02},
+ {IMX_8BIT, 0x331D, 0x4E},
+ {IMX_8BIT, 0x4084, 0x04},
+ {IMX_8BIT, 0x4085, 0x10},
+ {IMX_8BIT, 0x4086, 0x03},
+ {IMX_8BIT, 0x4087, 0x10},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg const imx135_3m_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01}, /* Binning */
+ {IMX_8BIT, 0x0391, 0x22}, /* 2x2 binning */
+ {IMX_8BIT, 0x0392, 0x00}, /* average */
+ {IMX_8BIT, 0x0401, 0x00},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x10},
+ {IMX_8BIT, 0x4082, 0x01},
+ {IMX_8BIT, 0x4083, 0x01},
+ {IMX_8BIT, 0x4203, 0xFF},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x28},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x08},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x47},
+ {IMX_8BIT, 0x034A, 0x0C},
+ {IMX_8BIT, 0x034B, 0x27},
+ {IMX_8BIT, 0x034C, 0x08},
+ {IMX_8BIT, 0x034D, 0x10},
+ {IMX_8BIT, 0x034E, 0x06},
+ {IMX_8BIT, 0x034F, 0x10},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x08},
+ {IMX_8BIT, 0x0355, 0x10},
+ {IMX_8BIT, 0x0356, 0x06},
+ {IMX_8BIT, 0x0357, 0x10},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x08},
+ {IMX_8BIT, 0x3311, 0x10},
+ {IMX_8BIT, 0x3312, 0x06},
+ {IMX_8BIT, 0x3313, 0x10},
+ {IMX_8BIT, 0x331C, 0x00},
+ {IMX_8BIT, 0x331D, 0xAA},
+ {IMX_8BIT, 0x4084, 0x00},
+ {IMX_8BIT, 0x4085, 0x00},
+ {IMX_8BIT, 0x4086, 0x00},
+ {IMX_8BIT, 0x4087, 0x00},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+/* 1080P 1936x1104 */
+static struct imx_reg const imx135_1080p_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03},
+ {IMX_8BIT, 0x0112, 0x0A},
+ {IMX_8BIT, 0x0113, 0x0A},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0387, 0x01},
+ {IMX_8BIT, 0x0390, 0x01},
+ {IMX_8BIT, 0x0391, 0x22},
+ {IMX_8BIT, 0x0392, 0x00},
+ {IMX_8BIT, 0x0401, 0x02},
+ {IMX_8BIT, 0x0404, 0x00},
+ {IMX_8BIT, 0x0405, 0x11},
+ {IMX_8BIT, 0x4082, 0x00},
+ {IMX_8BIT, 0x4083, 0x00},
+ {IMX_8BIT, 0x7006, 0x04},
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x2E},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x84},
+ {IMX_8BIT, 0x0348, 0x10},
+ {IMX_8BIT, 0x0349, 0x41},
+ {IMX_8BIT, 0x034A, 0x0A},
+ {IMX_8BIT, 0x034B, 0xAF},
+ {IMX_8BIT, 0x034C, 0x07},
+ {IMX_8BIT, 0x034D, 0x90},
+ {IMX_8BIT, 0x034E, 0x04},
+ {IMX_8BIT, 0x034F, 0x50},
+ {IMX_8BIT, 0x0350, 0x00},
+ {IMX_8BIT, 0x0351, 0x00},
+ {IMX_8BIT, 0x0352, 0x00},
+ {IMX_8BIT, 0x0353, 0x00},
+ {IMX_8BIT, 0x0354, 0x08},
+ {IMX_8BIT, 0x0355, 0x0A},
+ {IMX_8BIT, 0x0356, 0x04},
+ {IMX_8BIT, 0x0357, 0x96},
+ {IMX_8BIT, 0x301D, 0x30},
+ {IMX_8BIT, 0x3310, 0x07},
+ {IMX_8BIT, 0x3311, 0x90},
+ {IMX_8BIT, 0x3312, 0x04},
+ {IMX_8BIT, 0x3313, 0x50},
+ {IMX_8BIT, 0x331C, 0x01},
+ {IMX_8BIT, 0x331D, 0x00},
+ {IMX_8BIT, 0x4084, 0x07},
+ {IMX_8BIT, 0x4085, 0x90},
+ {IMX_8BIT, 0x4086, 0x04},
+ {IMX_8BIT, 0x4087, 0x50},
+ {IMX_8BIT, 0x4400, 0x00},
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static const struct imx_reg imx135_1080p_nodvs_fullfov_max_clock[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 168,464,4039,2655: 3872x2192 */
+ { IMX_8BIT, 0x0345, 0xA8 },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0xD0 },
+ { IMX_8BIT, 0x0348, 0x0F },
+ { IMX_8BIT, 0x0349, 0xC7 },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0x5F },
+ { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
+ { IMX_8BIT, 0x034D, 0x90 },
+ { IMX_8BIT, 0x034E, 0x04 },
+ { IMX_8BIT, 0x034F, 0x48 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x07 }, /*1936 x 1096 */
+ { IMX_8BIT, 0x0355, 0x90 },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0x48 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x07 },
+ { IMX_8BIT, 0x3311, 0x90 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0x48 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0xB0 },
+ { IMX_8BIT, 0x4084, 0x07 },
+ { IMX_8BIT, 0x4085, 0x90 },
+ { IMX_8BIT, 0x4086, 0x04 },
+ { IMX_8BIT, 0x4087, 0x48 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* 1080P NODVS 1936x1096 */
+static const struct imx_reg imx135_1080p_nodvs_max_clock[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x11 },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 46,396,4161,2727: 4116x2332 */
+ { IMX_8BIT, 0x0345, 0x2E },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0x8C },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x41 },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0xA7 },
+ { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
+ { IMX_8BIT, 0x034D, 0x90 },
+ { IMX_8BIT, 0x034E, 0x04 },
+ { IMX_8BIT, 0x034F, 0x48 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1166 */
+ { IMX_8BIT, 0x0355, 0x0A },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0x8E },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x07 },
+ { IMX_8BIT, 0x3311, 0x90 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0x48 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0xB0 },
+ { IMX_8BIT, 0x4084, 0x07 },
+ { IMX_8BIT, 0x4085, 0x90 },
+ { IMX_8BIT, 0x4086, 0x04 },
+ { IMX_8BIT, 0x4087, 0x48 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* 1080P 10%DVS 2104x1184 */
+static const struct imx_reg imx135_1080p_10_dvs_max_clock[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x00 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x10 },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 0,376,4207,2743: 4208x2368 */
+ { IMX_8BIT, 0x0345, 0x00 },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0x78 },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x6F },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0xB7 },
+ { IMX_8BIT, 0x034C, 0x08 }, /* 2104 x 1184 */
+ { IMX_8BIT, 0x034D, 0x38 },
+ { IMX_8BIT, 0x034E, 0x04 },
+ { IMX_8BIT, 0x034F, 0xA0 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x08 }, /* 2104 x 1184 */
+ { IMX_8BIT, 0x0355, 0x38 },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0xA0 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x08 },
+ { IMX_8BIT, 0x3311, 0x38 },
+ { IMX_8BIT, 0x3312, 0x04 },
+ { IMX_8BIT, 0x3313, 0xA0 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0xB0 },
+ { IMX_8BIT, 0x4084, 0x00 },
+ { IMX_8BIT, 0x4085, 0x00 },
+ { IMX_8BIT, 0x4086, 0x00 },
+ { IMX_8BIT, 0x4087, 0x00 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+static const struct imx_reg imx135_720pdvs_max_clock[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x15 },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
+ { IMX_8BIT, 0x0345, 0x2E },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0x94 },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x41 },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0x9B },
+ { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
+ { IMX_8BIT, 0x034D, 0x20 },
+ { IMX_8BIT, 0x034E, 0x03 },
+ { IMX_8BIT, 0x034F, 0x70 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x08 }, /*2058 x 1156 */
+ { IMX_8BIT, 0x0355, 0x0A },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0x84 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x03 },
+ { IMX_8BIT, 0x3313, 0x70 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0x4C },
+ { IMX_8BIT, 0x4084, 0x06 },
+ { IMX_8BIT, 0x4085, 0x20 },
+ { IMX_8BIT, 0x4086, 0x03 },
+ { IMX_8BIT, 0x4087, 0x70 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/******************* Video Modes ******************/
+
+/* 1080P DVS 2336x1320 */
+static const struct imx_reg imx135_2336x1320_max_clock[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 },
+ { IMX_8BIT, 0x0391, 0x11 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x1C },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 60,404,4147,2715: 4088x2312 */
+ { IMX_8BIT, 0x0345, 0x3C },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0x94 },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x33 },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0x9B },
+ { IMX_8BIT, 0x034C, 0x09 }, /*2336 x 1320 */
+ { IMX_8BIT, 0x034D, 0x20 },
+ { IMX_8BIT, 0x034E, 0x05 },
+ { IMX_8BIT, 0x034F, 0x28 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x0F }, /* 4088x2312 */
+ { IMX_8BIT, 0x0355, 0xF8 },
+ { IMX_8BIT, 0x0356, 0x09 },
+ { IMX_8BIT, 0x0357, 0x08 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x09 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x05 },
+ { IMX_8BIT, 0x3313, 0x28 },
+ { IMX_8BIT, 0x331C, 0x04 },
+ { IMX_8BIT, 0x331D, 0xE2 },
+ { IMX_8BIT, 0x4084, 0x09 },
+ { IMX_8BIT, 0x4085, 0x20 },
+ { IMX_8BIT, 0x4086, 0x05 },
+ { IMX_8BIT, 0x4087, 0x28 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* 1080P DVS 2336x1320 Cropped */
+static const struct imx_reg imx135_2336x1320_cropped_mipi499[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x00 },
+ { IMX_8BIT, 0x0391, 0x11 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x1C },
+ { IMX_8BIT, 0x4082, 0x01 },
+ { IMX_8BIT, 0x4083, 0x01 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x03 }, /* 936,900,3271,2219: 2336x1320 */
+ { IMX_8BIT, 0x0345, 0xA8 },
+ { IMX_8BIT, 0x0346, 0x03 },
+ { IMX_8BIT, 0x0347, 0x84 },
+ { IMX_8BIT, 0x0348, 0x0C },
+ { IMX_8BIT, 0x0349, 0xC7 },
+ { IMX_8BIT, 0x034A, 0x08 },
+ { IMX_8BIT, 0x034B, 0xAB },
+ { IMX_8BIT, 0x034C, 0x09 }, /* 2336 x 1320 */
+ { IMX_8BIT, 0x034D, 0x20 },
+ { IMX_8BIT, 0x034E, 0x05 },
+ { IMX_8BIT, 0x034F, 0x28 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x09 }, /* 2336 x 1320 */
+ { IMX_8BIT, 0x0355, 0x20 },
+ { IMX_8BIT, 0x0356, 0x05 },
+ { IMX_8BIT, 0x0357, 0x28 },
+ { IMX_8BIT, 0x301D, 0x30 },
+ { IMX_8BIT, 0x3310, 0x09 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x05 },
+ { IMX_8BIT, 0x3313, 0x28 },
+ { IMX_8BIT, 0x331C, 0x00 },
+ { IMX_8BIT, 0x331D, 0xB4 },
+ { IMX_8BIT, 0x4084, 0x09 },
+ { IMX_8BIT, 0x4085, 0x20 },
+ { IMX_8BIT, 0x4086, 0x05 },
+ { IMX_8BIT, 0x4087, 0x28 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* 720P DVS 1568 x 880 */
+static const struct imx_reg imx135_720p_dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x15 },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
+ { IMX_8BIT, 0x0345, 0x2e },
+ { IMX_8BIT, 0x0346, 0x01 },
+ { IMX_8BIT, 0x0347, 0x94 },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x41 },
+ { IMX_8BIT, 0x034A, 0x0A },
+ { IMX_8BIT, 0x034B, 0x9B },
+ { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
+ { IMX_8BIT, 0x034D, 0x20 },
+ { IMX_8BIT, 0x034E, 0x03 },
+ { IMX_8BIT, 0x034F, 0x70 },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
+ { IMX_8BIT, 0x0355, 0x0a },
+ { IMX_8BIT, 0x0356, 0x04 },
+ { IMX_8BIT, 0x0357, 0x84 },
+ { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
+ { IMX_8BIT, 0x3310, 0x06 },
+ { IMX_8BIT, 0x3311, 0x20 },
+ { IMX_8BIT, 0x3312, 0x03 },
+ { IMX_8BIT, 0x3313, 0x70 },
+ { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
+ { IMX_8BIT, 0x331D, 0xd6 }, /* TODO! */
+ { IMX_8BIT, 0x4084, 0x06 },
+ { IMX_8BIT, 0x4085, 0x20 },
+ { IMX_8BIT, 0x4086, 0x03 },
+ { IMX_8BIT, 0x4087, 0x70 },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* wvga: H : 1640 V : 1024 */
+static const struct imx_reg imx135_wvga_dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x22 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x14 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 },
+ {IMX_8BIT, 0x0345, 0x36 },
+ {IMX_8BIT, 0x0346, 0x01 },
+ {IMX_8BIT, 0x0347, 0x18 },
+ {IMX_8BIT, 0x0348, 0x10 },
+ {IMX_8BIT, 0x0349, 0x39 },
+ {IMX_8BIT, 0x034A, 0x0B },
+ {IMX_8BIT, 0x034B, 0x17 },
+ {IMX_8BIT, 0x034C, 0x06 },
+ {IMX_8BIT, 0x034D, 0x68 },
+ {IMX_8BIT, 0x034E, 0x04 },
+ {IMX_8BIT, 0x034F, 0x00 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x08 },
+ {IMX_8BIT, 0x0355, 0x02 },
+ {IMX_8BIT, 0x0356, 0x05 },
+ {IMX_8BIT, 0x0357, 0x00 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x06 },
+ {IMX_8BIT, 0x3311, 0x68 },
+ {IMX_8BIT, 0x3312, 0x04 },
+ {IMX_8BIT, 0x3313, 0x00 },
+ {IMX_8BIT, 0x331C, 0x01 },
+ {IMX_8BIT, 0x331D, 0xBD },
+ {IMX_8BIT, 0x4084, 0x06 },
+ {IMX_8BIT, 0x4085, 0x68 },
+ {IMX_8BIT, 0x4086, 0x04 },
+ {IMX_8BIT, 0x4087, 0x00 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* 480P 1036 x 696 */
+static const struct imx_reg imx135_480p_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x00 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x10 },/* No scal */
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x2784*/
+ {IMX_8BIT, 0x0345, 0x20 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0xA8 },
+ {IMX_8BIT, 0x0348, 0x10 },
+ {IMX_8BIT, 0x0349, 0x4F },
+ {IMX_8BIT, 0x034A, 0x0B },
+ {IMX_8BIT, 0x034B, 0x88 },
+ {IMX_8BIT, 0x034C, 0x04 }, /* 1036 * 696 */
+ {IMX_8BIT, 0x034D, 0x0C },
+ {IMX_8BIT, 0x034E, 0x02 },
+ {IMX_8BIT, 0x034F, 0xB8 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x04 }, /* 1036x696 */
+ {IMX_8BIT, 0x0355, 0x0C },
+ {IMX_8BIT, 0x0356, 0x02 },
+ {IMX_8BIT, 0x0357, 0xB8 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x04 },
+ {IMX_8BIT, 0x3311, 0x0C },
+ {IMX_8BIT, 0x3312, 0x02 },
+ {IMX_8BIT, 0x3313, 0xB8 },
+ {IMX_8BIT, 0x331C, 0x02 },
+ {IMX_8BIT, 0x331D, 0x21 },
+ {IMX_8BIT, 0x4084, 0x04 },
+ {IMX_8BIT, 0x4085, 0x0C },
+ {IMX_8BIT, 0x4086, 0x02 },
+ {IMX_8BIT, 0x4087, 0xB8 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* 480P DVS 936 x 602 */
+static const struct imx_reg imx135_480p_dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* mode setting */
+ { IMX_8BIT, 0x0108, 0x03 },
+ { IMX_8BIT, 0x0112, 0x0A },
+ { IMX_8BIT, 0x0113, 0x0A },
+ { IMX_8BIT, 0x0381, 0x01 },
+ { IMX_8BIT, 0x0383, 0x01 },
+ { IMX_8BIT, 0x0385, 0x01 },
+ { IMX_8BIT, 0x0387, 0x01 },
+ { IMX_8BIT, 0x0390, 0x01 },
+ { IMX_8BIT, 0x0391, 0x22 },
+ { IMX_8BIT, 0x0392, 0x00 },
+ { IMX_8BIT, 0x0401, 0x02 },
+ { IMX_8BIT, 0x0404, 0x00 },
+ { IMX_8BIT, 0x0405, 0x23 },
+ { IMX_8BIT, 0x4082, 0x00 },
+ { IMX_8BIT, 0x4083, 0x00 },
+ { IMX_8BIT, 0x7006, 0x04 },
+ /* size setting */
+ { IMX_8BIT, 0x0344, 0x00 }, /* 56,244,4151,2877: 4096x2634 */
+ { IMX_8BIT, 0x0345, 0x38 },
+ { IMX_8BIT, 0x0346, 0x00 },
+ { IMX_8BIT, 0x0347, 0xf4 },
+ { IMX_8BIT, 0x0348, 0x10 },
+ { IMX_8BIT, 0x0349, 0x37 },
+ { IMX_8BIT, 0x034A, 0x0b },
+ { IMX_8BIT, 0x034B, 0x3d },
+ { IMX_8BIT, 0x034C, 0x03 }, /* 936 x 602 */
+ { IMX_8BIT, 0x034D, 0xa8 },
+ { IMX_8BIT, 0x034E, 0x02 },
+ { IMX_8BIT, 0x034F, 0x5a },
+ { IMX_8BIT, 0x0350, 0x00 },
+ { IMX_8BIT, 0x0351, 0x00 },
+ { IMX_8BIT, 0x0352, 0x00 },
+ { IMX_8BIT, 0x0353, 0x00 },
+ { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
+ { IMX_8BIT, 0x0355, 0x00 },
+ { IMX_8BIT, 0x0356, 0x05 },
+ { IMX_8BIT, 0x0357, 0x25 },
+ { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
+ { IMX_8BIT, 0x3310, 0x03 },
+ { IMX_8BIT, 0x3311, 0xa8 },
+ { IMX_8BIT, 0x3312, 0x02 },
+ { IMX_8BIT, 0x3313, 0x5a },
+ { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
+ { IMX_8BIT, 0x331D, 0xd6 },
+ { IMX_8BIT, 0x4084, 0x03 },
+ { IMX_8BIT, 0x4085, 0xa8 },
+ { IMX_8BIT, 0x4086, 0x02 },
+ { IMX_8BIT, 0x4087, 0x5a },
+ { IMX_8BIT, 0x4400, 0x00 },
+ { IMX_TOK_TERM, 0, 0}
+};
+
+/* VGA: H : 1036 V : 780 */
+static const struct imx_reg imx135_vga_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x00 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x10 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x3120*/
+ {IMX_8BIT, 0x0345, 0x20 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x00 },
+ {IMX_8BIT, 0x0348, 0x10 },
+ {IMX_8BIT, 0x0349, 0x4F },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x2F },
+ {IMX_8BIT, 0x034C, 0x04 }, /* 1036x780 */
+ {IMX_8BIT, 0x034D, 0x0C },
+ {IMX_8BIT, 0x034E, 0x03 },
+ {IMX_8BIT, 0x034F, 0x0C },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x04 }, /* 1036x780 */
+ {IMX_8BIT, 0x0355, 0x0C },
+ {IMX_8BIT, 0x0356, 0x03 },
+ {IMX_8BIT, 0x0357, 0x0C },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x04 },
+ {IMX_8BIT, 0x3311, 0x0C },
+ {IMX_8BIT, 0x3312, 0x03 },
+ {IMX_8BIT, 0x3313, 0x0C },
+ {IMX_8BIT, 0x331C, 0x02 },
+ {IMX_8BIT, 0x331D, 0x21 },
+ {IMX_8BIT, 0x4084, 0x04 },
+ {IMX_8BIT, 0x4085, 0x0C },
+ {IMX_8BIT, 0x4086, 0x03 },
+ {IMX_8BIT, 0x4087, 0x0C },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* VGA: H : 820 V : 616 */
+static const struct imx_reg imx135_vga_dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x14 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4104x3080*/
+ {IMX_8BIT, 0x0345, 0x34 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x14 },
+ {IMX_8BIT, 0x0348, 0x10 },
+ {IMX_8BIT, 0x0349, 0x3B },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x1B },
+ {IMX_8BIT, 0x034C, 0x03 }, /* 820x616 */
+ {IMX_8BIT, 0x034D, 0x34 },
+ {IMX_8BIT, 0x034E, 0x02 },
+ {IMX_8BIT, 0x034F, 0x68 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x04 }, /* 1026x770 */
+ {IMX_8BIT, 0x0355, 0x02 },
+ {IMX_8BIT, 0x0356, 0x03 },
+ {IMX_8BIT, 0x0357, 0x02 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x03 },
+ {IMX_8BIT, 0x3311, 0x34 },
+ {IMX_8BIT, 0x3312, 0x02 },
+ {IMX_8BIT, 0x3313, 0x68 },
+ {IMX_8BIT, 0x331C, 0x02 },
+ {IMX_8BIT, 0x331D, 0x21 },
+ {IMX_8BIT, 0x4084, 0x03 },
+ {IMX_8BIT, 0x4085, 0x34 },
+ {IMX_8BIT, 0x4086, 0x02 },
+ {IMX_8BIT, 0x4087, 0x68 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* VGA: H : 436 V : 360 */
+static const struct imx_reg imx135_436x360_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x22 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 212,0,3995,3119 3784x3120 */
+ {IMX_8BIT, 0x0345, 0xD4 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x00 },
+ {IMX_8BIT, 0x0348, 0x0F },
+ {IMX_8BIT, 0x0349, 0x9B },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x2F },
+
+ {IMX_8BIT, 0x034C, 0x01 }, /* 436x360 */
+ {IMX_8BIT, 0x034D, 0xB4 },
+ {IMX_8BIT, 0x034E, 0x01 },
+ {IMX_8BIT, 0x034F, 0x68 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x12 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x0C },
+
+ {IMX_8BIT, 0x0354, 0x03 }, /* 928x768 crop from 946x780*/
+ {IMX_8BIT, 0x0355, 0xA0 },
+ {IMX_8BIT, 0x0356, 0x03 },
+ {IMX_8BIT, 0x0357, 0x00 },
+
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x01 },
+ {IMX_8BIT, 0x3311, 0xB4 },
+ {IMX_8BIT, 0x3312, 0x01 },
+ {IMX_8BIT, 0x3313, 0x68 },
+ {IMX_8BIT, 0x331C, 0x02 },
+ {IMX_8BIT, 0x331D, 0x21 },
+ {IMX_8BIT, 0x4084, 0x01 },
+ {IMX_8BIT, 0x4085, 0xB4 },
+ {IMX_8BIT, 0x4086, 0x01 },
+ {IMX_8BIT, 0x4087, 0x68 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* QVGA: H : 408 V : 308 */
+static const struct imx_reg imx135_qvga__dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x28 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 64,20,4143,3099 4080x3080 */
+ {IMX_8BIT, 0x0345, 0x40 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x14 },
+ {IMX_8BIT, 0x0348, 0x10 },
+ {IMX_8BIT, 0x0349, 0x2F },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x1B },
+ {IMX_8BIT, 0x034C, 0x01 }, /* 408x308 */
+ {IMX_8BIT, 0x034D, 0x98 },
+ {IMX_8BIT, 0x034E, 0x01 },
+ {IMX_8BIT, 0x034F, 0x34 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x03 }, /* 1020x770 */
+ {IMX_8BIT, 0x0355, 0xFC },
+ {IMX_8BIT, 0x0356, 0x03 },
+ {IMX_8BIT, 0x0357, 0x02 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x01 },
+ {IMX_8BIT, 0x3311, 0x98 },
+ {IMX_8BIT, 0x3312, 0x01 },
+ {IMX_8BIT, 0x3313, 0x34 },
+ {IMX_8BIT, 0x331C, 0x01 },
+ {IMX_8BIT, 0x331D, 0x68 },
+ {IMX_8BIT, 0x4084, 0x01 },
+ {IMX_8BIT, 0x4085, 0x98 },
+ {IMX_8BIT, 0x4086, 0x01 },
+ {IMX_8BIT, 0x4087, 0x34 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* CIF H : 368 V : 304 */
+static const struct imx_reg imx135_cif_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x28 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x01 }, /* 264,42,3943,3081 3680x3040 */
+ {IMX_8BIT, 0x0345, 0x08 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x2a },
+ {IMX_8BIT, 0x0348, 0x0F },
+ {IMX_8BIT, 0x0349, 0x67 },
+ {IMX_8BIT, 0x034A, 0x0c },
+ {IMX_8BIT, 0x034B, 0x09 },
+ {IMX_8BIT, 0x034C, 0x01 }, /* 368x304 */
+ {IMX_8BIT, 0x034D, 0x70 },
+ {IMX_8BIT, 0x034E, 0x01 },
+ {IMX_8BIT, 0x034F, 0x30 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x03 }, /* 920x760 */
+ {IMX_8BIT, 0x0355, 0x98 },
+ {IMX_8BIT, 0x0356, 0x02 },
+ {IMX_8BIT, 0x0357, 0xf8 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x01 },
+ {IMX_8BIT, 0x3311, 0x70 },
+ {IMX_8BIT, 0x3312, 0x01 },
+ {IMX_8BIT, 0x3313, 0x30 },
+ {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
+ {IMX_8BIT, 0x331D, 0x1C },
+ {IMX_8BIT, 0x4084, 0x01 },
+ {IMX_8BIT, 0x4085, 0x70 },
+ {IMX_8BIT, 0x4086, 0x01 },
+ {IMX_8BIT, 0x4087, 0x30 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* CIF H : 1888 V : 1548 */
+static const struct imx_reg imx135_cif_binning_1888x1548[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x22 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x00 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x10 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 264,42, 3776x3096 */
+ {IMX_8BIT, 0x0345, 0xD8 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x0C },
+ {IMX_8BIT, 0x0348, 0x0F },
+ {IMX_8BIT, 0x0349, 0x97 },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x23 },
+ {IMX_8BIT, 0x034C, 0x07 }, /* 1888x1548 */
+ {IMX_8BIT, 0x034D, 0x60 },
+ {IMX_8BIT, 0x034E, 0x06 },
+ {IMX_8BIT, 0x034F, 0x0C },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x07 }, /* 1888x1548 */
+ {IMX_8BIT, 0x0355, 0x60 },
+ {IMX_8BIT, 0x0356, 0x06 },
+ {IMX_8BIT, 0x0357, 0x0C },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x07 },
+ {IMX_8BIT, 0x3311, 0x60 },
+ {IMX_8BIT, 0x3312, 0x06 },
+ {IMX_8BIT, 0x3313, 0x0C },
+ {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
+ {IMX_8BIT, 0x331D, 0x1C },
+ {IMX_8BIT, 0x4084, 0x07 },
+ {IMX_8BIT, 0x4085, 0x60 },
+ {IMX_8BIT, 0x4086, 0x06 },
+ {IMX_8BIT, 0x4087, 0x0C },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* QCIF H : 216 V : 176 */
+static const struct imx_reg imx135_qcif_dvs_binning[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
+ /* Mode setting */
+ {IMX_8BIT, 0x0108, 0x03 },
+ {IMX_8BIT, 0x0112, 0x0A },
+ {IMX_8BIT, 0x0113, 0x0A },
+ {IMX_8BIT, 0x0381, 0x01 },
+ {IMX_8BIT, 0x0383, 0x01 },
+ {IMX_8BIT, 0x0385, 0x01 },
+ {IMX_8BIT, 0x0387, 0x01 },
+ {IMX_8BIT, 0x0390, 0x01 },
+ {IMX_8BIT, 0x0391, 0x44 },
+ {IMX_8BIT, 0x0392, 0x00 },
+ {IMX_8BIT, 0x0401, 0x02 },
+ {IMX_8BIT, 0x0404, 0x00 },
+ {IMX_8BIT, 0x0405, 0x46 },
+ {IMX_8BIT, 0x4082, 0x00 },
+ {IMX_8BIT, 0x4083, 0x00 },
+ {IMX_8BIT, 0x7006, 0x04 },
+ /* Size setting */
+ {IMX_8BIT, 0x0344, 0x00 }, /* 212,20,3995,3099 3784x3080 */
+ {IMX_8BIT, 0x0345, 0xD4 },
+ {IMX_8BIT, 0x0346, 0x00 },
+ {IMX_8BIT, 0x0347, 0x14 },
+ {IMX_8BIT, 0x0348, 0x0F },
+ {IMX_8BIT, 0x0349, 0x9B },
+ {IMX_8BIT, 0x034A, 0x0C },
+ {IMX_8BIT, 0x034B, 0x1B },
+ {IMX_8BIT, 0x034C, 0x00 }, /* 216x176 */
+ {IMX_8BIT, 0x034D, 0xD8 },
+ {IMX_8BIT, 0x034E, 0x00 },
+ {IMX_8BIT, 0x034F, 0xB0 },
+ {IMX_8BIT, 0x0350, 0x00 },
+ {IMX_8BIT, 0x0351, 0x00 },
+ {IMX_8BIT, 0x0352, 0x00 },
+ {IMX_8BIT, 0x0353, 0x00 },
+ {IMX_8BIT, 0x0354, 0x03 }, /* 946x770 */
+ {IMX_8BIT, 0x0355, 0xB2 },
+ {IMX_8BIT, 0x0356, 0x03 },
+ {IMX_8BIT, 0x0357, 0x02 },
+ {IMX_8BIT, 0x301D, 0x30 },
+ {IMX_8BIT, 0x3310, 0x00 },
+ {IMX_8BIT, 0x3311, 0xD8 },
+ {IMX_8BIT, 0x3312, 0x00 },
+ {IMX_8BIT, 0x3313, 0xB0 },
+ {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c */
+ {IMX_8BIT, 0x331D, 0x1C },
+ {IMX_8BIT, 0x4084, 0x00 },
+ {IMX_8BIT, 0x4085, 0xD8 },
+ {IMX_8BIT, 0x4086, 0x00 },
+ {IMX_8BIT, 0x4087, 0xB0 },
+ {IMX_8BIT, 0x4400, 0x00 },
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/*
+ * ISP Scaling is now supported in offine capture use cases. Because of that
+ * we need only few modes to cover the different aspect ratios from the
+ * sensor and the ISP will scale it based on the requested resolution from HAL.
+ *
+ * There is a performance impact when continuous view finder option is chose
+ * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
+ * than these take 8MP or 6MP espectively for down scaling based on the
+ * aspect ratio.
+ */
+struct imx_resolution imx135_res_preview_mofd[] = {
+ {
+ .desc = "imx135_cif_binning_preview",
+ .regs = imx135_cif_binning,
+ .width = 368,
+ .height = 304,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 9114,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_vga_binning_preview",
+ .regs = imx135_vga_binning,
+ .width = 1036,
+ .height = 780,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_480p_preview",
+ .regs = imx135_480p_binning,
+ .width = 1036,
+ .height = 696,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_1080p_binning_preview",
+ .regs = imx135_1080p_binning,
+ .width = 1936,
+ .height = 1104,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_3m__cont_cap",
+ .regs = imx135_3m_binning,
+ .width = 2064,
+ .height = 1552,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_6m_cont_cap",
+ .regs = imx135_6m,
+ .width = 3280,
+ .height = 1852,
+ .fps_options = {
+ { /* Binning Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_8m_scaled_from_12m__cont_cap",
+ .regs = imx135_8m_scaled_from_12m,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 24,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 3280,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_10m__cont_cap",
+ .regs = imx135_10m,
+ .width = 4208,
+ .height = 2368,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2632,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_13m__cont_cap",
+ .regs = imx135_13m,
+ .width = 4208,
+ .height = 3120,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 24,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 3290,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+};
+
+struct imx_resolution imx135_res_preview[] = {
+ {
+ .desc = "imx135_xga_cropped_video",
+ .regs = imx135_xga_cropped,
+ .width = 832,
+ .height = 628,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_2m_cropped_video",
+ .regs = imx135_2m_cropped,
+ .width = 1648,
+ .height = 1240,
+ .fps_options = {
+ { /* Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_1936x1096_cropped",
+ .regs = imx135_1936x1096_cropped,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ { /* Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_8m_cropped_video",
+ .regs = imx135_8m_cropped,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+};
+
+/*
+ * ISP Scaling is now supported in online capture use cases. Because of that
+ * we need only few modes to cover the different aspect ratios from the
+ * sensor and the ISP will scale it based on the requested resolution from HAL.
+ *
+ * There is a performance impact when continuous view finder option is chose
+ * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
+ * than these take 8MP or 6MP espectively for down scaling based on the
+ * aspect ratio.
+ */
+struct imx_resolution imx135_res_still_mofd[] = {
+ {
+ .desc = "imx135_cif_binning_still",
+ .regs = imx135_cif_binning_1888x1548,
+ .width = 1888,
+ .height = 1548,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_vga_binning_preview",
+ .regs = imx135_vga_binning,
+ .width = 1036,
+ .height = 780,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_480p_preview",
+ .regs = imx135_480p_binning,
+ .width = 1036,
+ .height = 696,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_1080p_binning_still",
+ .regs = imx135_1080p_binning,
+ .width = 1936,
+ .height = 1104,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 15,
+ .pixels_per_line = 9114,
+ .lines_per_frame = 2453,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_3m__still",
+ .regs = imx135_3m_binning,
+ .width = 2064,
+ .height = 1552,
+ .fps_options = {
+ { /* Binning Pixel clock: 335.36MHz */
+ .fps = 15,
+ .pixels_per_line = 9114,
+ .lines_per_frame = 2453,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_6m_for_mipi_342_still",
+ .regs = imx135_6m_for_mipi_342,
+ .width = 3280,
+ .height = 1852,
+ .fps_options = {
+ { /* Pixel clock: 273.6MHz */
+ .fps = 11,
+ .pixels_per_line = 9114,
+ .lines_per_frame = 2664,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 342000,
+ },
+ {
+ .desc = "imx135_8m_scaled_from_12m_for_mipi342_still",
+ .regs = imx135_8m_scaled_from_12m_for_mipi342,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ { /* Pixel clock: 273.6MHz */
+ .fps = 8,
+ .pixels_per_line = 7672,
+ .lines_per_frame = 4458,
+ },
+ { /* Pixel clock: 273.6MHz */
+ .fps = 15,
+ .pixels_per_line = 5500,
+ .lines_per_frame = 3314,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 342000,
+ },
+ {
+ .desc = "imx135_10m_for_mipi_342_still",
+ .regs = imx135_10m_for_mipi_342,
+ .width = 4208,
+ .height = 2368,
+ .fps_options = {
+ { /* Pixel clock: 273.6MHz */
+ .fps = 11,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 2664,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 342000,
+ },
+ {
+ .desc = "imx135_13m_still",
+ .regs = imx135_13m_for_mipi_342,
+ .width = 4208,
+ .height = 3120,
+ .fps_options = {
+ { /* Pixel clock: 273.6MHz */
+ .fps = 5,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 5990,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 342000,
+ },
+};
+
+struct imx_resolution imx135_res_still[] = {
+ {
+ .desc = "imx135_qvga",
+ .regs = imx135_336x256,
+ .width = 336,
+ .height = 256,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_cif",
+ .regs = imx135_368x304_cropped,
+ .width = 368,
+ .height = 304,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_xga_cropped_video",
+ .regs = imx135_xga_cropped,
+ .width = 832,
+ .height = 628,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_2M_for_11:9",
+ .regs = imx135_1424x1168_cropped,
+ .width = 1424,
+ .height = 1168,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_2m_cropped_video",
+ .regs = imx135_2m_cropped,
+ .width = 1648,
+ .height = 1240,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 15,
+ .pixels_per_line = 6466,
+ .lines_per_frame = 3710,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_6m_cropped_video",
+ .regs = imx135_6m_cropped,
+ .width = 3280,
+ .height = 1852,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 8,
+ .pixels_per_line = 8850,
+ .lines_per_frame = 5080,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_8m_cropped_video",
+ .regs = imx135_8m_cropped,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 8,
+ .pixels_per_line = 8850,
+ .lines_per_frame = 5080,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+};
+
+/*
+ * ISP scaling is not supported in case of video modes. So we need to have
+ * separate sensor mode for video use cases
+ */
+struct imx_resolution imx135_res_video[] = {
+ /* For binning modes pix clock is 335.36 MHz. */
+ {
+ .desc = "imx135_qcif_dvs_binning_video",
+ .regs = imx135_qcif_dvs_binning,
+ .width = 216,
+ .height = 176,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_cif_binning_video",
+ .regs = imx135_cif_binning,
+ .width = 368,
+ .height = 304,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_qvga__dvs_binning_video",
+ .regs = imx135_qvga__dvs_binning,
+ .width = 408,
+ .height = 308,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_436x360_binning_video",
+ .regs = imx135_436x360_binning,
+ .width = 436,
+ .height = 360,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_vga_dvs_binning_video",
+ .regs = imx135_vga_dvs_binning,
+ .width = 820,
+ .height = 616,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 9144,
+ .lines_per_frame = 1226,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_480p_dvs_binning_video",
+ .regs = imx135_480p_dvs_binning,
+ .width = 936,
+ .height = 602,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_720P_dvs_video",
+ .regs = imx135_720pdvs_max_clock,
+ .width = 1568,
+ .height = 880,
+ .fps_options = {
+ {/* Pixel Clock : 360.96 MHz */
+ .fps = 30,
+ .pixels_per_line = 5850,
+ .lines_per_frame = 2000,
+ },
+ {/* Pixel Clock : 360.96 MHz */
+ .fps = 60,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 1310,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_wvga_dvs_binning_video",
+ .regs = imx135_wvga_dvs_binning,
+ .width = 1640,
+ .height = 1024,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 5464,
+ .lines_per_frame = 2046,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 209600,
+ },
+ {
+ .desc = "imx135_1936_1096_fullfov_max_clock",
+ .regs = imx135_1080p_nodvs_max_clock,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {/* Pixel Clock : 360.96 MHz */
+ .fps = 30,
+ .pixels_per_line = 5850,
+ .lines_per_frame = 2000,
+ },
+ {/* Pixel Clock : 360.96 MHz */
+ .fps = 60,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 1310,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_1080P_dvs_video",
+ .regs = imx135_2336x1320_max_clock,
+ .width = 2336,
+ .height = 1320,
+ .fps_options = {
+ {/* Pixel Clock : 360.96 MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2632,
+ .regs = imx135_2336x1320_max_clock,
+ .mipi_freq = 451200,
+ },
+ {/* Pixel Clock : 399.36MHz */
+ .fps = 60,
+ .pixels_per_line = 4754,
+ .lines_per_frame = 1400,
+ .regs = imx135_2336x1320_cropped_mipi499,
+ .mipi_freq = 499200,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_6m_cont_cap",
+ .regs = imx135_6m,
+ .width = 3280,
+ .height = 1852,
+ .fps_options = {
+ { /* Binning Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+ {
+ .desc = "imx135_8m_cropped_video",
+ .regs = imx135_8m_cropped,
+ .width = 3280,
+ .height = 2464,
+ .fps_options = {
+ { /* Pixel clock: 360.96MHz */
+ .fps = 30,
+ .pixels_per_line = 4572,
+ .lines_per_frame = 2624,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .mipi_freq = 451200,
+ },
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx175.h b/drivers/staging/media/atomisp/i2c/imx/imx175.h
new file mode 100644
index 000000000000..5f409ccedc85
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx175.h
@@ -0,0 +1,1959 @@
+#ifndef __IMX175_H__
+#define __IMX175_H__
+#include "common.h"
+
+/************************** settings for imx *************************/
+static struct imx_reg const imx_STILL_8M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_8M_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_3M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_3M_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+
+static struct imx_reg const imx_STILL_5M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_5M_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_6M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_6M_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_2M_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_2M_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x2c},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x77},
+ {IMX_8BIT, 0x3371, 0x2F},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x2F},
+ {IMX_8BIT, 0x3375, 0x37},
+ {IMX_8BIT, 0x3376, 0x9F},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x09},
+ {IMX_8BIT, 0x33D7, 0xA0},
+
+ {IMX_8BIT, 0x030e, 0x01},
+ {IMX_8BIT, 0x41c0, 0x01},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_PREVIEW_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x03},
+ {IMX_8BIT, 0x33D5, 0x34},
+ {IMX_8BIT, 0x33D6, 0x02},
+ {IMX_8BIT, 0x33D7, 0x68},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_WIDE_PREVIEW_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x0D}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x70}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x10}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x00}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x14}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x8C}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xBC}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x68},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0xBC},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/*****************************video************************/
+static struct imx_reg const imx_1080p_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x06}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x4C}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xA4}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0xC6}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0xDB}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x42}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xEA}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x61}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x09}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x05}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x20}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_1080p_no_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xD0}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0F}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x3C}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_1080p_no_dvs_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xA6}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+/*****************************video************************/
+static struct imx_reg const imx_720p_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0xD7}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x3E}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xEE}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x65}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x70}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x18}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_480p_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0xD4}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0xC8}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xF1}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0xDB}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x50}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x15}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_720p_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x14}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x28}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x20},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0x6C},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_STILL_720p_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x08}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0xCA}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x38}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x20},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0x6C},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_WVGA_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xEC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x01},
+ {IMX_8BIT, 0x030D, 0x12},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0xD0}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0xCF}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x00}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x57},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x6F},
+ {IMX_8BIT, 0x3371, 0x27},
+ {IMX_8BIT, 0x3372, 0x4F},
+ {IMX_8BIT, 0x3373, 0x2F},
+ {IMX_8BIT, 0x3374, 0x27},
+ {IMX_8BIT, 0x3375, 0x2F},
+ {IMX_8BIT, 0x3376, 0x97},
+ {IMX_8BIT, 0x3377, 0x37},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x0C},
+ {IMX_8BIT, 0x33D5, 0xD0},
+ {IMX_8BIT, 0x33D6, 0x07},
+ {IMX_8BIT, 0x33D7, 0x38},
+ {IMX_TOK_TERM, 0, 0}
+};
+static struct imx_reg const imx_CIF_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0xDB}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x30}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x20},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0x6C},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_VGA_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x94}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x20},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0x6C},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_VGA_strong_dvs_15fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x9E}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x1C}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0xB6}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x06},
+ {IMX_8BIT, 0x33D5, 0x20},
+ {IMX_8BIT, 0x33D6, 0x03},
+ {IMX_8BIT, 0x33D7, 0x6C},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_QVGA_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x03}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0x38}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x68}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x09}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0x97}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x37}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0x98}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0x34}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x01},
+ {IMX_8BIT, 0x33D5, 0x98},
+ {IMX_8BIT, 0x33D6, 0x01},
+ {IMX_8BIT, 0x33D7, 0x34},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_QCIF_strong_dvs_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ /* shutter */
+ {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
+ {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
+ /* pll */
+ {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
+ {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
+ {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
+ {IMX_8BIT, 0x030C, 0x00},
+ {IMX_8BIT, 0x030D, 0x6D},
+ /* image sizing */
+ {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
+ {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
+ {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
+ {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
+ {IMX_8BIT, 0x0344, 0x04}, /* x_addr_start[15:8] */
+ {IMX_8BIT, 0x0345, 0xB8}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x03}, /* y_addr_start[15:8] */
+ {IMX_8BIT, 0x0347, 0x70}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x08}, /* x_addr_end[15:8] */
+ {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end[7:0] */
+ {IMX_8BIT, 0x034A, 0x06}, /* y_addr_end[15:8] */
+ {IMX_8BIT, 0x034B, 0x2F}, /* y_addr_end[7:0] */
+ {IMX_8BIT, 0x034C, 0x00}, /* x_output_size[15:8] */
+ {IMX_8BIT, 0x034D, 0xD8}, /* x_output_size[7:0] */
+ {IMX_8BIT, 0x034E, 0x00}, /* y_output_size[15:8] */
+ {IMX_8BIT, 0x034F, 0xB0}, /* y_output_size[7:0] */
+ /* binning & scaling */
+ {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
+ {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
+ {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
+ /* timer */
+ {IMX_8BIT, 0x3344, 0x37},
+ {IMX_8BIT, 0x3345, 0x1F},
+ /* timing */
+ {IMX_8BIT, 0x3370, 0x5F},
+ {IMX_8BIT, 0x3371, 0x17},
+ {IMX_8BIT, 0x3372, 0x37},
+ {IMX_8BIT, 0x3373, 0x17},
+ {IMX_8BIT, 0x3374, 0x17},
+ {IMX_8BIT, 0x3375, 0x0F},
+ {IMX_8BIT, 0x3376, 0x57},
+ {IMX_8BIT, 0x3377, 0x27},
+ {IMX_8BIT, 0x33C8, 0x01},
+ {IMX_8BIT, 0x33D4, 0x00},
+ {IMX_8BIT, 0x33D5, 0xD8},
+ {IMX_8BIT, 0x33D6, 0x00},
+ {IMX_8BIT, 0x33D7, 0xB0},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx175_init_settings[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0103, 0x01},
+ /* misc control */
+ {IMX_8BIT, 0x3020, 0x10},
+ {IMX_8BIT, 0x302D, 0x02},
+ {IMX_8BIT, 0x302F, 0x80},
+ {IMX_8BIT, 0x3032, 0xA3},
+ {IMX_8BIT, 0x3033, 0x20},
+ {IMX_8BIT, 0x3034, 0x24},
+ {IMX_8BIT, 0x3041, 0x15},
+ {IMX_8BIT, 0x3042, 0x87},
+ {IMX_8BIT, 0x3050, 0x35},
+ {IMX_8BIT, 0x3056, 0x57},
+ {IMX_8BIT, 0x305D, 0x41},
+ {IMX_8BIT, 0x3097, 0x69},
+ {IMX_8BIT, 0x3109, 0x41},
+ {IMX_8BIT, 0x3148, 0x3F},
+ {IMX_8BIT, 0x330F, 0x07},
+ /* csi & inck */
+ {IMX_8BIT, 0x3364, 0x00},
+ {IMX_8BIT, 0x3368, 0x13},
+ {IMX_8BIT, 0x3369, 0x33},
+ /* znr */
+ {IMX_8BIT, 0x4100, 0x0E},
+ {IMX_8BIT, 0x4104, 0x32},
+ {IMX_8BIT, 0x4105, 0x32},
+ {IMX_8BIT, 0x4108, 0x01},
+ {IMX_8BIT, 0x4109, 0x7C},
+ {IMX_8BIT, 0x410A, 0x00},
+ {IMX_8BIT, 0x410B, 0x00},
+ GROUPED_PARAMETER_HOLD_DISABLE,
+ {IMX_TOK_TERM, 0, 0}
+};
+/* TODO settings of preview/still/video will be updated with new use case */
+struct imx_resolution imx175_res_preview[] = {
+ {
+ .desc = "CIF_strong_dvs_30fps",
+ .regs = imx_CIF_strong_dvs_30fps,
+ .width = 368,
+ .height = 304,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x11DB,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+
+ },
+ {
+ .desc = "VGA_strong_dvs_30fps",
+ .regs = imx_VGA_strong_dvs_30fps,
+ .width = 820,
+ .height = 616,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x11DB,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+ },
+ {
+ .desc = "WIDE_PREVIEW_30fps",
+ .regs = imx_WIDE_PREVIEW_30fps,
+ .width = 1640,
+ .height = 956,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x1000,
+ .lines_per_frame = 0x0D70,
+ },
+ {
+ }
+ },
+ .mipi_freq = 174500,
+ },
+ {
+ .desc = "STILL_720p_30fps",
+ .regs = imx_STILL_720p_30fps,
+ .width = 1568,
+ .height = 876,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x1428,
+ .lines_per_frame = 0x0548,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+ },
+ {
+ .desc = "STILL_2M_30fps",
+ .regs = imx_STILL_2M_30fps,
+ .width = 1640,
+ .height = 1232,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D66,
+ .lines_per_frame = 0x09C4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "1080p_strong_dvs_30fps",
+ .regs = imx_1080p_no_dvs_30fps,
+ .width = 1940,
+ .height = 1092,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0F3C,
+ .lines_per_frame = 0x07D0,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+ {
+ .desc = "STILL_3M_30fps",
+ .regs = imx_STILL_3M_30fps,
+ .width = 2064,
+ .height = 1552,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D66,
+ .lines_per_frame = 0x09C4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_5M_30fps",
+ .regs = imx_STILL_5M_30fps,
+ .width = 2576,
+ .height = 1936,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D66,
+ .lines_per_frame = 0x09C4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_6M_30fps",
+ .regs = imx_STILL_6M_30fps,
+ .width = 3280,
+ .height = 1852,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D66,
+ .lines_per_frame = 0x09C4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_8M_30fps",
+ .regs = imx_STILL_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D66,
+ .lines_per_frame = 0x09C4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+};
+
+struct imx_resolution imx175_res_still[] = {
+ {
+ .desc = "CIF_strong_dvs_30fps",
+ .regs = imx_CIF_strong_dvs_30fps,
+ .width = 368,
+ .height = 304,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x11DB,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261000,
+ },
+ {
+ .desc = "VGA_strong_dvs_15fps",
+ .regs = imx_VGA_strong_dvs_15fps,
+ .width = 820,
+ .height = 616,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1C86,
+ .lines_per_frame = 0x079E,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+ },
+ {
+ .desc = "imx_STILL_720p_15fps",
+ .regs = imx_STILL_720p_15fps,
+ .width = 1568,
+ .height = 876,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1838,
+ .lines_per_frame = 0x08CA,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+ },
+ {
+ .desc = "STILL_2M_15fps",
+ .regs = imx_STILL_2M_15fps,
+ .width = 1640,
+ .height = 1232,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1646,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "1080p_strong_dvs_15fps",
+ .regs = imx_1080p_no_dvs_15fps,
+ .width = 1940,
+ .height = 1092,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x189C,
+ .lines_per_frame = 0x09A6,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+ {
+ .desc = "STILL_3M_15fps",
+ .regs = imx_STILL_3M_15fps,
+ .width = 2064,
+ .height = 1552,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1646,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_5M_15fps",
+ .regs = imx_STILL_5M_15fps,
+ .width = 2576,
+ .height = 1936,
+ .fps = 15,
+ .pixels_per_line = 0x1646, /* consistent with regs arrays */
+ .lines_per_frame = 0x0BB8, /* consistent with regs arrays */
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1646,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_6M_15fps",
+ .regs = imx_STILL_6M_15fps,
+ .width = 3280,
+ .height = 1852,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1646,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+ {
+ .desc = "STILL_8M_15fps",
+ .regs = imx_STILL_8M_15fps,
+ .width = 3280,
+ .height = 2464,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 15,
+ .pixels_per_line = 0x1646,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+ .mipi_freq = 320000,
+ },
+};
+
+struct imx_resolution imx175_res_video[] = {
+ {
+ .desc = "QCIF_strong_dvs_30fps",
+ .regs = imx_QCIF_strong_dvs_30fps,
+ .width = 216,
+ .height = 176,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D70,
+ .lines_per_frame = 0x0548,
+ },
+ {
+ }
+ },
+ .mipi_freq = 174500,
+ },
+ {
+ .desc = "QVGA_strong_dvs_30fps",
+ .regs = imx_QVGA_strong_dvs_30fps,
+ .width = 408,
+ .height = 308,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D70,
+ .lines_per_frame = 0x0548,
+ },
+ {
+ }
+ },
+ .mipi_freq = 174500,
+ },
+ {
+ .desc = "VGA_strong_dvs_30fps",
+ .regs = imx_VGA_strong_dvs_30fps,
+ .width = 820,
+ .height = 616,
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x1194,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 261500,
+ },
+ {
+ .desc = "720p_strong_dvs_30fps",
+ .regs = imx_720p_strong_dvs_30fps,
+ .width = 1552,
+ .height = 880,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x139C,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ .fps = 60,
+ .pixels_per_line = 0xD70,
+ .lines_per_frame = 0x444,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+ {
+ .desc = "480p_strong_dvs_30fps",
+ .regs = imx_480p_strong_dvs_30fps,
+ .width = 880,
+ .height = 592,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x139C,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+ {
+ .desc = "WVGA_strong_dvs_30fps",
+ .regs = imx_WVGA_strong_dvs_30fps,
+ .width = 1640,
+ .height = 1024,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x139C,
+ .lines_per_frame = 0x0600,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+ {
+ .desc = "1080p_strong_dvs_30fps",
+ .regs = imx_1080p_strong_dvs_30fps,
+ .width = 2320,
+ .height = 1312,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x11C6,
+ .lines_per_frame = 0x06A4,
+ },
+ {
+ }
+ },
+ .mipi_freq = 292500,
+ },
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx208.h b/drivers/staging/media/atomisp/i2c/imx/imx208.h
new file mode 100644
index 000000000000..fed387f42f99
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx208.h
@@ -0,0 +1,550 @@
+/*
+ * Support for Sony IMX camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __IMX208_H__
+#define __IMX208_H__
+#include "common.h"
+
+/********************** settings for imx from vendor*********************/
+static struct imx_reg imx208_1080p_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
+ {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
+ {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
+ {IMX_8BIT, 0x30A4, 0x02}, /* Default */
+ {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
+ {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
+ {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
+ {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
+ {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[12:8] */
+ {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
+ {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x07}, /* x_addr_end [12:8] */
+ {IMX_8BIT, 0x0349, 0x8F}, /* x_addr_end [7:0] */
+ {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
+ {IMX_8BIT, 0x034B, 0x47}, /* y_addr_end [7:0] */
+ {IMX_8BIT, 0x034C, 0x07}, /* x_output_size [ 12:8] */
+ {IMX_8BIT, 0x034D, 0x90}, /* x_output_size [7:0] */
+ {IMX_8BIT, 0x034E, 0x04}, /* y_output_size [11:8] */
+ {IMX_8BIT, 0x034F, 0x48}, /* y_output_size [7:0] */
+ {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
+ {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
+ {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
+ {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
+ {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
+ {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
+ {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
+ {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
+ {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
+ {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
+ {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
+ {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
+ {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
+ {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx208_1296x736_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
+ {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
+ {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
+ {IMX_8BIT, 0x30A4, 0x02}, /* Default */
+ {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
+ {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
+ {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
+ {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
+ {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
+ {IMX_8BIT, 0x0347, 0xB4}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
+ {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
+ {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
+ {IMX_8BIT, 0x034B, 0x93}, /* y_addr_end [7:0] */
+ {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
+ {IMX_8BIT, 0x034E, 0x02}, /* y_output_size [11:8] */
+ {IMX_8BIT, 0x034F, 0xE0}, /* y_output_size [7:0] */
+ {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
+ {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
+ {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
+ {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
+ {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
+ {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
+ {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
+ {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
+ {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
+ {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
+ {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
+ {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
+ {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
+ {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx208_1296x976_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
+ {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
+ {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
+ {IMX_8BIT, 0x30A4, 0x02}, /* Default */
+ {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
+ {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
+ {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
+ {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
+ {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
+ {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
+ {IMX_8BIT, 0x0347, 0x3C}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
+ {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
+ {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
+ {IMX_8BIT, 0x034B, 0x0B}, /* y_addr_end [7:0] */
+ {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
+ {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
+ {IMX_8BIT, 0x034E, 0x03}, /* y_output_size [11:8] */
+ {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size [7:0] */
+ {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
+ {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
+ {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
+ {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
+ {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
+ {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
+ {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
+ {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
+ {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
+ {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
+ {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
+ {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
+ {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
+ {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx208_336x256_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
+ {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
+ {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
+ {IMX_8BIT, 0x30A4, 0x02}, /* Default */
+ {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
+ {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
+ {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
+ {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
+ {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
+ {IMX_8BIT, 0x0345, 0x78}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[12:8] */
+ {IMX_8BIT, 0x0347, 0x24}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
+ {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end [7:0] */
+ {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
+ {IMX_8BIT, 0x034B, 0x23}, /* y_addr_end [7:0] */
+ {IMX_8BIT, 0x034C, 0x01}, /* x_output_size [ 12:8] */
+ {IMX_8BIT, 0x034D, 0x50}, /* x_output_size [7:0] */
+ {IMX_8BIT, 0x034E, 0x01}, /* y_output_size [11:8] */
+ {IMX_8BIT, 0x034F, 0x00}, /* y_output_size [7:0] */
+ {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
+ {IMX_8BIT, 0x0383, 0x03}, /* x_odd_inc */
+ {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
+ {IMX_8BIT, 0x0387, 0x03}, /* y_odd_inc */
+ {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
+ {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
+ {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
+ {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
+ {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
+ {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
+ {IMX_8BIT, 0x3318, 0x66}, /* MIPI Global Timing */
+ {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
+ {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
+ {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
+
+ {IMX_TOK_TERM, 0, 0},
+};
+
+static struct imx_reg imx208_192x160_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
+ {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
+ {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
+ {IMX_8BIT, 0x30A4, 0x02}, /* Default */
+ {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
+ {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
+ {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
+ {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
+ {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
+ {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
+ {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
+ {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
+ {IMX_8BIT, 0x0347, 0xE4}, /* y_addr_start[7:0] */
+ {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
+ {IMX_8BIT, 0x0349, 0x47}, /* x_addr_end [7:0] */
+ {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
+ {IMX_8BIT, 0x034B, 0x63}, /* y_addr_end [7:0] */
+ {IMX_8BIT, 0x034C, 0x00}, /* x_output_size [ 12:8] */
+ {IMX_8BIT, 0x034D, 0xC0}, /* x_output_size [7:0] */
+ {IMX_8BIT, 0x034E, 0x00}, /* y_output_size [11:8] */
+ {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size [7:0] */
+ {IMX_8BIT, 0x0381, 0x03}, /* x_even_inc */
+ {IMX_8BIT, 0x0383, 0x05}, /* x_odd_inc */
+ {IMX_8BIT, 0x0385, 0x03}, /* y_even_inc */
+ {IMX_8BIT, 0x0387, 0x05}, /* y_odd_inc */
+ {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
+ {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
+ {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
+ {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
+ {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
+ {IMX_8BIT, 0x3301, 0x11}, /* RGLANESEL */
+ {IMX_8BIT, 0x3318, 0x74}, /* MIPI Global Timing */
+ {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
+ {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
+ {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
+
+ {IMX_TOK_TERM, 0, 0},
+};
+/********************** settings for imx - reference *********************/
+static struct imx_reg const imx208_init_settings[] = {
+ { IMX_TOK_TERM, 0, 0}
+};
+
+struct imx_resolution imx208_res_preview[] = {
+ {
+ .desc = "imx208_1080p_30fps",
+ .regs = imx208_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x976_30fps",
+ .regs = imx208_1296x976_30fps,
+ .width = 1296,
+ .height = 976,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x736_30fps",
+ .regs = imx208_1296x736_30fps,
+ .width = 1296,
+ .height = 736,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_336x256_30fps",
+ .regs = imx208_336x256_30fps,
+ .width = 336,
+ .height = 256,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 201600,
+ },
+ {
+ .desc = "imx208_192x160_30fps",
+ .regs = imx208_192x160_30fps,
+ .width = 192,
+ .height = 160,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 100800,
+ },
+};
+
+struct imx_resolution imx208_res_still[] = {
+ {
+ .desc = "imx208_1080p_30fps",
+ .regs = imx208_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x976_30fps",
+ .regs = imx208_1296x976_30fps,
+ .width = 1296,
+ .height = 976,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x736_30fps",
+ .regs = imx208_1296x736_30fps,
+ .width = 1296,
+ .height = 736,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_336x256_30fps",
+ .regs = imx208_336x256_30fps,
+ .width = 336,
+ .height = 256,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 201600,
+ },
+ {
+ .desc = "imx208_192x160_30fps",
+ .regs = imx208_192x160_30fps,
+ .width = 192,
+ .height = 160,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 100800,
+ },
+};
+
+struct imx_resolution imx208_res_video[] = {
+ {
+ .desc = "imx208_1080p_30fps",
+ .regs = imx208_1080p_30fps,
+ .width = 1936,
+ .height = 1096,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x976_30fps",
+ .regs = imx208_1296x976_30fps,
+ .width = 1296,
+ .height = 976,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_1296x736_30fps",
+ .regs = imx208_1296x736_30fps,
+ .width = 1296,
+ .height = 736,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 403200,
+ },
+ {
+ .desc = "imx208_336x256_30fps",
+ .regs = imx208_336x256_30fps,
+ .width = 336,
+ .height = 256,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 201600,
+ },
+ {
+ .desc = "imx208_192x160_30fps",
+ .regs = imx208_192x160_30fps,
+ .width = 192,
+ .height = 160,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x08C8,
+ .lines_per_frame = 0x04AA,
+ },
+ {
+ }
+ },
+ .bin_factor_x = 4,
+ .bin_factor_y = 4,
+ .used = 0,
+ .skip_frames = 2,
+ .mipi_freq = 100800,
+ },
+};
+#endif
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx219.h b/drivers/staging/media/atomisp/i2c/imx/imx219.h
new file mode 100644
index 000000000000..52df582c56d8
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx219.h
@@ -0,0 +1,227 @@
+#ifndef __IMX219_H__
+#define __IMX219_H__
+#include "common.h"
+
+#define IMX219_FRAME_LENGTH_LINES 0x0160
+#define IMX219_LINE_LENGTH_PIXELS 0x0162
+#define IMX219_HORIZONTAL_START_H 0x0164
+#define IMX219_VERTICAL_START_H 0x0168
+#define IMX219_HORIZONTAL_END_H 0x0166
+#define IMX219_VERTICAL_END_H 0x016A
+#define IMX219_HORIZONTAL_OUTPUT_SIZE_H 0x016c
+#define IMX219_VERTICAL_OUTPUT_SIZE_H 0x016E
+#define IMX219_COARSE_INTEGRATION_TIME 0x015A
+#define IMX219_IMG_ORIENTATION 0x0172
+#define IMX219_GLOBAL_GAIN 0x0157
+#define IMX219_DGC_ADJ 0x0158
+
+#define IMX219_DGC_LEN 4
+
+/************************** settings for imx *************************/
+static struct imx_reg const imx219_STILL_8M_30fps[] = {
+ {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
+ {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
+ {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
+ {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
+ {IMX_8BIT, 0x0160, 0x0A}, /*FRM_LENGTH_A[15:8]*/
+ {IMX_8BIT, 0x0161, 0x94}, /*FRM_LENGTH_A[7:0]*/
+ {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
+ {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
+ {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
+ {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
+ {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
+ {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
+ {IMX_8BIT, 0x0168, 0x00}, /*Y_ADD_STA_A[11:8]*/
+ {IMX_8BIT, 0x0169, 0x00}, /*Y_ADD_STA_A[7:0]*/
+ {IMX_8BIT, 0x016A, 0x09}, /*Y_ADD_END_A[11:8]*/
+ {IMX_8BIT, 0x016B, 0x9F}, /*Y_ADD_END_A[7:0]*/
+ {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
+ {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
+ {IMX_8BIT, 0x016E, 0x09}, /*Y_OUTPUT_SIZE_A[11:8]*/
+ {IMX_8BIT, 0x016F, 0xA0}, /*Y_OUTPUT_SIZE_A[7:0]*/
+ {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
+ {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
+ {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
+ {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
+ {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
+ {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
+ {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
+ {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
+ {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
+ {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
+ {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
+ {IMX_8BIT, 0x0307, 0x49}, /*PLL_VT_MPY[7:0]*/
+ {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
+ {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
+ {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
+ {IMX_8BIT, 0x030D, 0x4C}, /*PLL_OP_MPY[7:0]*/
+ {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
+ {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
+ {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx219_STILL_6M_30fps[] = {
+ {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
+ {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
+ {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
+ {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
+ {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
+ {IMX_8BIT, 0x0160, 0x07}, /*FRM_LENGTH_A[15:8]*/
+ {IMX_8BIT, 0x0161, 0x64}, /*FRM_LENGTH_A[7:0]*/
+ {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
+ {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
+ {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
+ {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
+ {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
+ {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
+ {IMX_8BIT, 0x0168, 0x01}, /*Y_ADD_STA_A[11:8]*/
+ {IMX_8BIT, 0x0169, 0x32}, /*Y_ADD_STA_A[7:0]*/
+ {IMX_8BIT, 0x016A, 0x08}, /*Y_ADD_END_A[11:8]*/
+ {IMX_8BIT, 0x016B, 0x6D}, /*Y_ADD_END_A[7:0]*/
+ {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
+ {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
+ {IMX_8BIT, 0x016E, 0x07}, /*Y_OUTPUT_SIZE_A[11:8]*/
+ {IMX_8BIT, 0x016F, 0x3C}, /*Y_OUTPUT_SIZE_A[7:0]*/
+ {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
+ {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
+ {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
+ {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
+ {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
+ {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
+ {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
+ {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
+ {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
+ {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
+ {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
+ {IMX_8BIT, 0x0307, 0x33}, /*PLL_VT_MPY[7:0]*/
+ {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
+ {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
+ {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
+ {IMX_8BIT, 0x030D, 0x36}, /*PLL_OP_MPY[7:0]*/
+ {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
+ {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
+ {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx219_init_settings[] = {
+ {IMX_TOK_TERM, 0, 0}
+};
+
+struct imx_resolution imx219_res_preview[] = {
+ {
+ .desc = "STILL_6M_30fps",
+ .regs = imx219_STILL_6M_30fps,
+ .width = 3280,
+ .height = 1852,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D78,
+ .lines_per_frame = 0x0764,
+ },
+ {
+ }
+ },
+ .mipi_freq = 259000,
+ },
+ {
+ .desc = "STILL_8M_30fps",
+ .regs = imx219_STILL_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D78,
+ .lines_per_frame = 0x0A94,
+ },
+ {
+ }
+ },
+ .mipi_freq = 365000,
+ },
+};
+
+struct imx_resolution imx219_res_still[] = {
+ {
+ .desc = "STILL_6M_30fps",
+ .regs = imx219_STILL_6M_30fps,
+ .width = 3280,
+ .height = 1852,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D78,
+ .lines_per_frame = 0x0764,
+ },
+ {
+ }
+ },
+ .mipi_freq = 259000,
+ },
+ {
+ .desc = "STILL_8M_30fps",
+ .regs = imx219_STILL_8M_30fps,
+ .width = 3280,
+ .height = 2464,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D78,
+ .lines_per_frame = 0x0A94,
+ },
+ {
+ }
+ },
+ .mipi_freq = 365000,
+ },
+};
+
+struct imx_resolution imx219_res_video[] = {
+ {
+ .desc = "STILL_6M_30fps",
+ .regs = imx219_STILL_6M_30fps,
+ .width = 3280,
+ .height = 1852,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0D78,
+ .lines_per_frame = 0x0764,
+ },
+ {
+ }
+ },
+ .mipi_freq = 259000,
+ },
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx227.h b/drivers/staging/media/atomisp/i2c/imx/imx227.h
new file mode 100644
index 000000000000..10e5b86f6687
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/imx227.h
@@ -0,0 +1,726 @@
+#ifndef __IMX227_H__
+#define __IMX227_H__
+
+#include "common.h"
+
+#define IMX227_EMBEDDED_DATA_LINE_NUM 2
+#define IMX227_OUTPUT_DATA_FORMAT_REG 0x0112
+#define IMX227_OUTPUT_FORMAT_RAW10 0x0a0a
+
+/* AE Bracketing Registers */
+#define IMX227_BRACKETING_LUT_MODE_BIT_CONTINUE_STREAMING 0x1
+#define IMX227_BRACKETING_LUT_MODE_BIT_LOOP_MODE 0x2
+
+#define IMX227_BRACKETING_LUT_CONTROL 0x0E00
+#define IMX227_BRACKETING_LUT_MODE 0x0E01
+#define IMX227_BRACKETING_LUT_ENTRY_CONTROL 0x0E02
+
+/*
+ * The imx135 embedded data info:
+ * embedded data line num: 2
+ * line 0 effective data size(byte): 76
+ * line 1 effective data size(byte): 113
+ */
+static const uint32_t
+imx227_embedded_effective_size[IMX227_EMBEDDED_DATA_LINE_NUM] = {160, 62};
+
+/************************** settings for imx *************************/
+/* Full Output Mode */
+static struct imx_reg const imx_STILL_6_5M_25fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xd0}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* 4:3 Output Mode */
+static struct imx_reg const imx_STILL_5_5M_3X4_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0xb0},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x00},
+ {IMX_8BIT, 0x0348, 0x08},
+ {IMX_8BIT, 0x0349, 0xaf},
+ {IMX_8BIT, 0x034a, 0x0a},
+ {IMX_8BIT, 0x034b, 0x9f},
+ {IMX_8BIT, 0x034c, 0x08},
+ {IMX_8BIT, 0x034d, 0x00},
+ {IMX_8BIT, 0x034e, 0x0a},
+ {IMX_8BIT, 0x034f, 0xa0},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xd8}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* Square Output Mode */
+static struct imx_reg const imx_STILL_5_7M_1X1_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0344, 0x00},
+ {IMX_8BIT, 0x0345, 0x00},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0xa0},
+ {IMX_8BIT, 0x0348, 0x09},
+ {IMX_8BIT, 0x0349, 0x5f},
+ {IMX_8BIT, 0x034a, 0x09},
+ {IMX_8BIT, 0x034b, 0xff},
+ {IMX_8BIT, 0x034c, 0x09},
+ {IMX_8BIT, 0x034d, 0x60},
+ {IMX_8BIT, 0x034e, 0x09},
+ {IMX_8BIT, 0x034f, 0x60},
+
+ {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xd4}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* Full Frame 1080P Mode (use ISP scaler)*/
+static struct imx_reg const imx_VIDEO_4M_9X16_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* Cropped 1080P Mode */
+static struct imx_reg const imx_VIDEO_2M_9X16_45fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0112, 0x0a},
+ {IMX_8BIT, 0x0113, 0x0a},
+ {IMX_8BIT, 0x0344, 0x02},
+ {IMX_8BIT, 0x0345, 0x8a},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0x88},
+ {IMX_8BIT, 0x0348, 0x06},
+ {IMX_8BIT, 0x0349, 0xd1},
+ {IMX_8BIT, 0x034a, 0x09},
+ {IMX_8BIT, 0x034b, 0x17},
+ {IMX_8BIT, 0x034c, 0x04},
+ {IMX_8BIT, 0x034d, 0x48},
+ {IMX_8BIT, 0x034e, 0x07},
+ {IMX_8BIT, 0x034f, 0x90},
+
+ {IMX_8BIT, 0x0380, 0x00},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0382, 0x00},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0384, 0x00},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0386, 0x00},
+ {IMX_8BIT, 0x0387, 0x01},
+
+ {IMX_8BIT, 0x0408, 0x00},
+ {IMX_8BIT, 0x0409, 0x00},
+ {IMX_8BIT, 0x040a, 0x00},
+ {IMX_8BIT, 0x040b, 0x00},
+ {IMX_8BIT, 0x040c, 0x04},
+ {IMX_8BIT, 0x040d, 0x48},
+ {IMX_8BIT, 0x040e, 0x07},
+ {IMX_8BIT, 0x040f, 0x90},
+
+ {IMX_8BIT, 0x0900, 0x00},
+ {IMX_8BIT, 0x0901, 0x00},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
+
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* Moment mode */
+static struct imx_reg const imx_VIDEO_1_3M_3X4_60fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xd9}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* High Speed 3:4 mode */
+static struct imx_reg const imx_VIDEO_VGA_3X4_120fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x9004, 0xca}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+
+/* Binned 720P mode */
+static struct imx_reg const imx_VIDEO_1M_9X16_60fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0112, 0x0a},
+ {IMX_8BIT, 0x0113, 0x0a},
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0xd0},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x40},
+ {IMX_8BIT, 0x0348, 0x07},
+ {IMX_8BIT, 0x0349, 0x8f},
+ {IMX_8BIT, 0x034a, 0x0a},
+ {IMX_8BIT, 0x034b, 0x5f},
+ {IMX_8BIT, 0x034c, 0x02},
+ {IMX_8BIT, 0x034d, 0xe0},
+ {IMX_8BIT, 0x034e, 0x05},
+ {IMX_8BIT, 0x034f, 0x10},
+
+ {IMX_8BIT, 0x0380, 0x00},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0382, 0x00},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0384, 0x00},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0386, 0x00},
+ {IMX_8BIT, 0x0387, 0x01},
+
+ {IMX_8BIT, 0x0408, 0x00},
+ {IMX_8BIT, 0x0409, 0x00},
+ {IMX_8BIT, 0x040a, 0x00},
+ {IMX_8BIT, 0x040b, 0x00},
+ {IMX_8BIT, 0x040c, 0x02},
+ {IMX_8BIT, 0x040d, 0xe0},
+ {IMX_8BIT, 0x040e, 0x05},
+ {IMX_8BIT, 0x040f, 0x10},
+
+ {IMX_8BIT, 0x0900, 0x01},
+ {IMX_8BIT, 0x0901, 0x22},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* Binned 496x868 mode */
+static struct imx_reg const imx_VIDEO_496x868_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0112, 0x0a},
+ {IMX_8BIT, 0x0113, 0x0a},
+ {IMX_8BIT, 0x0344, 0x02},
+ {IMX_8BIT, 0x0345, 0xc0},
+ {IMX_8BIT, 0x0346, 0x01},
+ {IMX_8BIT, 0x0347, 0xec},
+ {IMX_8BIT, 0x0348, 0x06},
+ {IMX_8BIT, 0x0349, 0x9f},
+ {IMX_8BIT, 0x034a, 0x08},
+ {IMX_8BIT, 0x034b, 0xb3},
+ {IMX_8BIT, 0x034c, 0x01},
+ {IMX_8BIT, 0x034d, 0xf0},
+ {IMX_8BIT, 0x034e, 0x03},
+ {IMX_8BIT, 0x034f, 0x64},
+
+ {IMX_8BIT, 0x0380, 0x00},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0382, 0x00},
+ {IMX_8BIT, 0x0383, 0x01},
+ {IMX_8BIT, 0x0384, 0x00},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0386, 0x00},
+ {IMX_8BIT, 0x0387, 0x01},
+
+ {IMX_8BIT, 0x0408, 0x00},
+ {IMX_8BIT, 0x0409, 0x00},
+ {IMX_8BIT, 0x040a, 0x00},
+ {IMX_8BIT, 0x040b, 0x00},
+ {IMX_8BIT, 0x040c, 0x01},
+ {IMX_8BIT, 0x040d, 0xf0},
+ {IMX_8BIT, 0x040e, 0x03},
+ {IMX_8BIT, 0x040f, 0x64},
+
+ {IMX_8BIT, 0x0900, 0x01},
+ {IMX_8BIT, 0x0901, 0x22},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+
+/* Hangout mode */
+static struct imx_reg const imx_PREVIEW_374X652_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0112, 0x0a},
+ {IMX_8BIT, 0x0113, 0x0a},
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0xc0},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x30},
+ {IMX_8BIT, 0x0348, 0x07},
+ {IMX_8BIT, 0x0349, 0x9f},
+ {IMX_8BIT, 0x034a, 0x0a},
+ {IMX_8BIT, 0x034b, 0x6f},
+ {IMX_8BIT, 0x034c, 0x01},
+ {IMX_8BIT, 0x034d, 0x78},
+ {IMX_8BIT, 0x034e, 0x02},
+ {IMX_8BIT, 0x034f, 0x90},
+
+ {IMX_8BIT, 0x0380, 0x00},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0382, 0x00},
+ {IMX_8BIT, 0x0383, 0x03},
+ {IMX_8BIT, 0x0384, 0x00},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0386, 0x00},
+ {IMX_8BIT, 0x0387, 0x03},
+
+ {IMX_8BIT, 0x0408, 0x00},
+ {IMX_8BIT, 0x0409, 0x00},
+ {IMX_8BIT, 0x040a, 0x00},
+ {IMX_8BIT, 0x040b, 0x02},
+ {IMX_8BIT, 0x040c, 0x01},
+ {IMX_8BIT, 0x040d, 0x76},
+ {IMX_8BIT, 0x040e, 0x02},
+ {IMX_8BIT, 0x040f, 0x8c},
+
+ {IMX_8BIT, 0x0900, 0x01},
+ {IMX_8BIT, 0x0901, 0x22},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+static struct imx_reg const imx_VIDEO_NHD_9X16_30fps[] = {
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0112, 0x0a},
+ {IMX_8BIT, 0x0113, 0x0a},
+ {IMX_8BIT, 0x0344, 0x01},
+ {IMX_8BIT, 0x0345, 0xc0},
+ {IMX_8BIT, 0x0346, 0x00},
+ {IMX_8BIT, 0x0347, 0x30},
+ {IMX_8BIT, 0x0348, 0x07},
+ {IMX_8BIT, 0x0349, 0x9f},
+ {IMX_8BIT, 0x034a, 0x0a},
+ {IMX_8BIT, 0x034b, 0x6f},
+ {IMX_8BIT, 0x034c, 0x01},
+ {IMX_8BIT, 0x034d, 0x78},
+ {IMX_8BIT, 0x034e, 0x02},
+ {IMX_8BIT, 0x034f, 0x90},
+
+ {IMX_8BIT, 0x0380, 0x00},
+ {IMX_8BIT, 0x0381, 0x01},
+ {IMX_8BIT, 0x0382, 0x00},
+ {IMX_8BIT, 0x0383, 0x03},
+ {IMX_8BIT, 0x0384, 0x00},
+ {IMX_8BIT, 0x0385, 0x01},
+ {IMX_8BIT, 0x0386, 0x00},
+ {IMX_8BIT, 0x0387, 0x03},
+
+ {IMX_8BIT, 0x0408, 0x00},
+ {IMX_8BIT, 0x0409, 0x00},
+ {IMX_8BIT, 0x040a, 0x00},
+ {IMX_8BIT, 0x040b, 0x00},
+ {IMX_8BIT, 0x040c, 0x01},
+ {IMX_8BIT, 0x040d, 0x78},
+ {IMX_8BIT, 0x040e, 0x02},
+ {IMX_8BIT, 0x040f, 0x90},
+
+ {IMX_8BIT, 0x0900, 0x01},
+ {IMX_8BIT, 0x0901, 0x22},
+
+ {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
+ {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
+ {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
+ {IMX_8BIT, 0x0136, 0x13},
+ {IMX_8BIT, 0x0137, 0x33},
+ {IMX_TOK_TERM, 0, 0}
+};
+
+
+static struct imx_reg const imx227_init_settings[] = {
+ {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
+ GROUPED_PARAMETER_HOLD_ENABLE,
+ {IMX_8BIT, 0x0306, 0x00},
+ {IMX_8BIT, 0x0307, 0xBB},
+ {IMX_8BIT, 0x030E, 0x03},
+ {IMX_8BIT, 0x030F, 0x0D},
+ {IMX_8BIT, 0x463b, 0x30},
+ {IMX_8BIT, 0x463e, 0x05},
+ {IMX_8BIT, 0x4612, 0x66},
+ {IMX_8BIT, 0x4815, 0x65},
+ {IMX_8BIT, 0x4991, 0x00},
+ {IMX_8BIT, 0x4992, 0x01},
+ {IMX_8BIT, 0x4993, 0xff},
+ {IMX_8BIT, 0x458b, 0x00},
+ {IMX_8BIT, 0x452a, 0x02},
+ {IMX_8BIT, 0x4a7c, 0x00},
+ {IMX_8BIT, 0x4a7d, 0x1c},
+ {IMX_8BIT, 0x4a7e, 0x00},
+ {IMX_8BIT, 0x4a7f, 0x17},
+ {IMX_8BIT, 0x462C, 0x2E},
+ {IMX_8BIT, 0x461B, 0x28},
+ {IMX_8BIT, 0x4663, 0x29},
+ {IMX_8BIT, 0x461A, 0x7C},
+ {IMX_8BIT, 0x4619, 0x28},
+ {IMX_8BIT, 0x4667, 0x22},
+ {IMX_8BIT, 0x466B, 0x23},
+ {IMX_8BIT, 0x40AD, 0xFF},
+ {IMX_8BIT, 0x40BE, 0x00},
+ {IMX_8BIT, 0x40BF, 0x6E},
+ {IMX_8BIT, 0x40CE, 0x00},
+ {IMX_8BIT, 0x40CF, 0x0A},
+ {IMX_8BIT, 0x40CA, 0x00},
+ {IMX_8BIT, 0x40CB, 0x1F},
+ {IMX_8BIT, 0x4D16, 0x00},
+ {IMX_8BIT, 0x6204, 0x01},
+ {IMX_8BIT, 0x6209, 0x00},
+ {IMX_8BIT, 0x621F, 0x01},
+ {IMX_8BIT, 0x621E, 0x10},
+ GROUPED_PARAMETER_HOLD_DISABLE,
+ {IMX_TOK_TERM, 0, 0}
+};
+
+/* TODO settings of preview/still/video will be updated with new use case */
+struct imx_resolution imx227_res_preview[] = {
+ {
+ .desc = "imx_PREVIEW_374X652_30fps",
+ .regs = imx_PREVIEW_374X652_30fps,
+ .width = 374,
+ .height = 652,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C0A,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_VIDEO_496x868_30fps",
+ .regs = imx_VIDEO_496x868_30fps,
+ .width = 496,
+ .height = 868,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C08,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_STILL_5_5M_3X4_30fps",
+ .regs = imx_STILL_5_5M_3X4_30fps,
+ .width = 2048,
+ .height = 2720,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0ED8,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_STILL_5_7M_1X1_30fps",
+ .regs = imx_STILL_5_7M_1X1_30fps,
+ .width = 2400,
+ .height = 2400,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x1130,
+ .lines_per_frame = 0x0A1E,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_STILL_6_5M_25fps",
+ .regs = imx_STILL_6_5M_25fps,
+ .width = 2400,
+ .height = 2720,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 25,
+ .pixels_per_line = 0x1130,
+ .lines_per_frame = 0x0C24,
+ },
+ {
+ }
+ },
+ }
+};
+
+struct imx_resolution imx227_res_still[] = {
+ {
+ .desc = "imx_STILL_5_5M_3X4_30fps",
+ .regs = imx_STILL_5_5M_3X4_30fps,
+ .width = 2048,
+ .height = 2720,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 6,
+ .pixels_per_line = 0x2130,
+ .lines_per_frame = 0x1A22,
+ },
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0ED8,
+ .lines_per_frame = 0x0BB8,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_STILL_5_7M_1X1_30fps",
+ .regs = imx_STILL_5_7M_1X1_30fps,
+ .width = 2400,
+ .height = 2400,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 6,
+ .pixels_per_line = 0x266E,
+ .lines_per_frame = 0x1704,
+ },
+ {
+ .fps = 30,
+ .pixels_per_line = 0x1130,
+ .lines_per_frame = 0x0A1E,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_STILL_6_5M_25fps",
+ .regs = imx_STILL_6_5M_25fps,
+ .width = 2400,
+ .height = 2720,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 25,
+ .pixels_per_line = 0x1130,
+ .lines_per_frame = 0x0C24,
+ },
+ {
+ }
+ },
+ },
+};
+
+struct imx_resolution imx227_res_video[] = {
+ {
+ .desc = "imx_VIDEO_4M_9X16_30fps",
+ .regs = imx_VIDEO_4M_9X16_30fps,
+ .width = 1536,
+ .height = 2720,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C08,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_VIDEO_2M_9X16_45fps",
+ .regs = imx_VIDEO_2M_9X16_45fps,
+ .width = 1096,
+ .height = 1936,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C08,
+ },
+ {
+ .fps = 45,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0800,
+ },
+ {
+ }
+ },
+
+ },
+ {
+ .desc = "imx_VIDEO_1_3M_3X4_60fps",
+ .regs = imx_VIDEO_1_3M_3X4_60fps,
+ .width = 1024,
+ .height = 1360,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 60,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0604,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_VIDEO_496x868_30fps",
+ .regs = imx_VIDEO_496x868_30fps,
+ .width = 496,
+ .height = 868,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C08,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_VIDEO_1M_9X16_60fps",
+ .regs = imx_VIDEO_1M_9X16_60fps,
+ .width = 736,
+ .height = 1296,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 60,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0604,
+ },
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C10,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_VIDEO_VGA_3X4_120fps",
+ .regs = imx_VIDEO_VGA_3X4_120fps,
+ .width = 512,
+ .height = 680,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 120,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0302,
+ },
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C08,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "imx_VIDEO_NHD_9X16_30fps",
+ .regs = imx_VIDEO_NHD_9X16_30fps,
+ .width = 376,
+ .height = 656,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .mipi_freq = 499000,
+ .used = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 0x0E70,
+ .lines_per_frame = 0x0C0A,
+ },
+ {
+ }
+ },
+ },
+};
+
+#endif /* __IMX227_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp.c b/drivers/staging/media/atomisp/i2c/imx/otp.c
new file mode 100644
index 000000000000..462275038046
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/otp.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 *buf;
+
+ buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ return buf;
+}
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
new file mode 100644
index 000000000000..b11f90c5960c
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include "common.h"
+
+/*
+ * Read EEPROM data from brcc064 and store
+ * it into a kmalloced buffer. On error return NULL.
+ * @size: set to the size of the returned EEPROM data.
+ */
+void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int e2prom_i2c_addr = dev_addr >> 1;
+ static const unsigned int max_read_size = 30;
+ int addr;
+ u32 s_addr = start_addr & E2PROM_ADDR_MASK;
+ unsigned char *buffer;
+
+ buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!buffer)
+ return NULL;
+
+ for (addr = s_addr; addr < size; addr += max_read_size) {
+ struct i2c_msg msg[2];
+ unsigned int i2c_addr = e2prom_i2c_addr;
+ u16 addr_buf;
+ int r;
+
+ msg[0].flags = 0;
+ msg[0].addr = i2c_addr;
+ addr_buf = cpu_to_be16(addr & 0xFFFF);
+ msg[0].len = 2;
+ msg[0].buf = (u8 *)&addr_buf;
+
+ msg[1].addr = i2c_addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = min(max_read_size, size - addr);
+ msg[1].buf = &buffer[addr];
+
+ r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (r != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev, "read failed at 0x%03x\n", addr);
+ return NULL;
+ }
+ }
+ return buffer;
+
+}
+
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
new file mode 100644
index 000000000000..73d041f97811
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include "common.h"
+
+/*
+ * Read EEPROM data from the gerneral e2prom chip(eg.
+ * CAT24C08, CAT24C128, le24l042cs, and store
+ * it into a kmalloced buffer. On error return NULL.
+ * @size: set to the size of the returned EEPROM data.
+ */
+void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int e2prom_i2c_addr = dev_addr >> 1;
+ static const unsigned int max_read_size = 30;
+ int addr;
+ u32 s_addr = start_addr & E2PROM_ADDR_MASK;
+ bool two_addr = (start_addr & E2PROM_2ADDR) >> 31;
+ char *buffer;
+
+ buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!buffer)
+ return NULL;
+
+ for (addr = s_addr; addr < size; addr += max_read_size) {
+ struct i2c_msg msg[2];
+ unsigned int i2c_addr = e2prom_i2c_addr;
+ u16 addr_buf;
+ int r;
+
+ msg[0].flags = 0;
+ if (two_addr) {
+ msg[0].addr = i2c_addr;
+ addr_buf = cpu_to_be16(addr & 0xFFFF);
+ msg[0].len = 2;
+ msg[0].buf = (u8 *)&addr_buf;
+ } else {
+ i2c_addr |= (addr >> 8) & 0x7;
+ msg[0].addr = i2c_addr;
+ addr_buf = addr & 0xFF;
+ msg[0].len = 1;
+ msg[0].buf = (u8 *)&addr_buf;
+ }
+
+ msg[1].addr = i2c_addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = min(max_read_size, size - addr);
+ msg[1].buf = &buffer[addr];
+
+ r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (r != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev, "read failed at 0x%03x\n", addr);
+ return NULL;
+ }
+ }
+ return buffer;
+}
+
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c b/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
new file mode 100644
index 000000000000..1ca27c26ef75
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include <asm/intel-mid.h>
+#include "common.h"
+
+/* Defines for OTP Data Registers */
+#define IMX_OTP_START_ADDR 0x3B04
+#define IMX_OTP_PAGE_SIZE 64
+#define IMX_OTP_READY_REG 0x3B01
+#define IMX_OTP_PAGE_REG 0x3B02
+#define IMX_OTP_MODE_REG 0x3B00
+#define IMX_OTP_PAGE_MAX 20
+#define IMX_OTP_READY_REG_DONE 1
+#define IMX_OTP_READ_ONETIME 32
+#define IMX_OTP_MODE_READ 1
+#define IMX227_OTP_START_ADDR 0x0A04
+#define IMX227_OTP_ENABLE_REG 0x0A00
+#define IMX227_OTP_READY_REG 0x0A01
+#define IMX227_OTP_PAGE_REG 0x0A02
+#define IMX227_OTP_READY_REG_DONE 1
+#define IMX227_OTP_MODE_READ 1
+
+static int
+imx_read_otp_data(struct i2c_client *client, u16 len, u16 reg, void *val)
+{
+ struct i2c_msg msg[2];
+ u16 data[IMX_SHORT_MAX] = { 0 };
+ int err;
+
+ if (len > IMX_BYTE_MAX) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0 , sizeof(msg));
+ memset(data, 0 , sizeof(data));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = (u8 *)data;
+ /* high byte goes first */
+ data[0] = cpu_to_be16(reg);
+
+ msg[1].addr = client->addr;
+ msg[1].len = len;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = (u8 *)data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ goto error;
+ }
+
+ memcpy(val, data, len);
+ return 0;
+
+error:
+ dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
+ return err;
+}
+
+static int imx_read_otp_reg_array(struct i2c_client *client, u16 size, u16 addr,
+ u8 *buf)
+{
+ u16 index;
+ int ret;
+
+ for (index = 0; index + IMX_OTP_READ_ONETIME <= size;
+ index += IMX_OTP_READ_ONETIME) {
+ ret = imx_read_otp_data(client, IMX_OTP_READ_ONETIME,
+ addr + index, &buf[index]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
+
+ /*set page NO.*/
+ ret = imx_write_reg(client, IMX_8BIT,
+ IMX_OTP_PAGE_REG, i & 0xff);
+ if (ret)
+ goto fail;
+
+ /*set read mode*/
+ ret = imx_write_reg(client, IMX_8BIT,
+ IMX_OTP_MODE_REG, IMX_OTP_MODE_READ);
+ if (ret)
+ goto fail;
+
+ /* Reading the OTP data array */
+ ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
+ IMX_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
+ if (ret)
+ goto fail;
+ }
+
+ return buf;
+fail:
+ /* Driver has failed to find valid data */
+ dev_err(&client->dev, "sensor found no valid OTP data\n");
+ return ERR_PTR(ret);
+}
+
+void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
+ u32 start_addr, u32 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
+
+ /*set page NO.*/
+ ret = imx_write_reg(client, IMX_8BIT,
+ IMX227_OTP_PAGE_REG, i & 0xff);
+ if (ret)
+ goto fail;
+
+ /*set read mode*/
+ ret = imx_write_reg(client, IMX_8BIT,
+ IMX227_OTP_ENABLE_REG, IMX227_OTP_MODE_READ);
+ if (ret)
+ goto fail;
+
+ /* Reading the OTP data array */
+ ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
+ IMX227_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
+ if (ret)
+ goto fail;
+ }
+
+ return buf;
+fail:
+ /* Driver has failed to find valid data */
+ dev_err(&client->dev, "sensor found no valid OTP data\n");
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/staging/media/atomisp/i2c/imx/vcm.c b/drivers/staging/media/atomisp/i2c/imx/vcm.c
new file mode 100644
index 000000000000..2d2df04c800a
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/imx/vcm.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+#include "../../include/linux/atomisp_platform.h"
+
+int vcm_power_up(struct v4l2_subdev *sd)
+{
+ const struct camera_af_platform_data *vcm_platform_data;
+
+ vcm_platform_data = camera_get_af_platform_data();
+ if (NULL == vcm_platform_data)
+ return -ENODEV;
+ /* Enable power */
+ return vcm_platform_data->power_ctrl(sd, 1);
+}
+
+int vcm_power_down(struct v4l2_subdev *sd)
+{
+ const struct camera_af_platform_data *vcm_platform_data;
+
+ vcm_platform_data = camera_get_af_platform_data();
+ if (NULL == vcm_platform_data)
+ return -ENODEV;
+ return vcm_platform_data->power_ctrl(sd, 0);
+}
+
diff --git a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c
new file mode 100644
index 000000000000..decb65cfd7c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/i2c.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include "../include/linux/libmsrlisthelper.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* Tagged binary data container structure definitions. */
+struct tbd_header {
+ uint32_t tag; /*!< Tag identifier, also checks endianness */
+ uint32_t size; /*!< Container size including this header */
+ uint32_t version; /*!< Version, format 0xYYMMDDVV */
+ uint32_t revision; /*!< Revision, format 0xYYMMDDVV */
+ uint32_t config_bits; /*!< Configuration flag bits set */
+ uint32_t checksum; /*!< Global checksum, header included */
+} __packed;
+
+struct tbd_record_header {
+ uint32_t size; /*!< Size of record including header */
+ uint8_t format_id; /*!< tbd_format_t enumeration values used */
+ uint8_t packing_key; /*!< Packing method; 0 = no packing */
+ uint16_t class_id; /*!< tbd_class_t enumeration values used */
+} __packed;
+
+struct tbd_data_record_header {
+ uint16_t next_offset;
+ uint16_t flags;
+ uint16_t data_offset;
+ uint16_t data_size;
+} __packed;
+
+#define TBD_CLASS_DRV_ID 2
+
+static int set_msr_configuration(struct i2c_client *client, uint8_t *bufptr,
+ unsigned int size)
+{
+ /* The configuration data contains any number of sequences where
+ * the first byte (that is, uint8_t) that marks the number of bytes
+ * in the sequence to follow, is indeed followed by the indicated
+ * number of bytes of actual data to be written to sensor.
+ * By convention, the first two bytes of actual data should be
+ * understood as an address in the sensor address space (hibyte
+ * followed by lobyte) where the remaining data in the sequence
+ * will be written. */
+
+ uint8_t *ptr = bufptr;
+ while (ptr < bufptr + size) {
+ struct i2c_msg msg = {
+ .addr = client->addr,
+ .flags = 0,
+ };
+ int ret;
+
+ /* How many bytes */
+ msg.len = *ptr++;
+ /* Where the bytes are located */
+ msg.buf = ptr;
+ ptr += msg.len;
+
+ if (ptr > bufptr + size)
+ /* Accessing data beyond bounds is not tolerated */
+ return -EINVAL;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "i2c write error: %d", ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int parse_and_apply(struct i2c_client *client, uint8_t *buffer,
+ unsigned int size)
+{
+ uint8_t *endptr8 = buffer + size;
+ struct tbd_data_record_header *header =
+ (struct tbd_data_record_header *)buffer;
+
+ /* There may be any number of datasets present */
+ unsigned int dataset = 0;
+
+ do {
+ /* In below, four variables are read from buffer */
+ if ((uint8_t *)header + sizeof(*header) > endptr8)
+ return -EINVAL;
+
+ /* All data should be located within given buffer */
+ if ((uint8_t *)header + header->data_offset +
+ header->data_size > endptr8)
+ return -EINVAL;
+
+ /* We have a new valid dataset */
+ dataset++;
+ /* See whether there is MSR data */
+ /* If yes, update the reg info */
+ if (header->data_size && (header->flags & 1)) {
+ int ret;
+
+ dev_info(&client->dev,
+ "New MSR data for sensor driver (dataset %02d) size:%d\n",
+ dataset, header->data_size);
+ ret = set_msr_configuration(client,
+ buffer + header->data_offset,
+ header->data_size);
+ if (ret)
+ return ret;
+ }
+ header = (struct tbd_data_record_header *)(buffer +
+ header->next_offset);
+ } while (header->next_offset);
+
+ return 0;
+}
+
+int apply_msr_data(struct i2c_client *client, const struct firmware *fw)
+{
+ struct tbd_header *header;
+ struct tbd_record_header *record;
+
+ if (!fw) {
+ dev_warn(&client->dev, "Drv data is not loaded.\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(*header) > fw->size)
+ return -EINVAL;
+
+ header = (struct tbd_header *)fw->data;
+ /* Check that we have drvb block. */
+ if (memcmp(&header->tag, "DRVB", 4))
+ return -EINVAL;
+
+ /* Check the size */
+ if (header->size != fw->size)
+ return -EINVAL;
+
+ if (sizeof(*header) + sizeof(*record) > fw->size)
+ return -EINVAL;
+
+ record = (struct tbd_record_header *)(header + 1);
+ /* Check that class id mathes tbd's drv id. */
+ if (record->class_id != TBD_CLASS_DRV_ID)
+ return -EINVAL;
+
+ /* Size 0 shall not be treated as an error */
+ if (!record->size)
+ return 0;
+
+ return parse_and_apply(client, (uint8_t *)(record + 1), record->size);
+}
+EXPORT_SYMBOL_GPL(apply_msr_data);
+
+int load_msr_list(struct i2c_client *client, char *name,
+ const struct firmware **fw)
+{
+ int ret = request_firmware(fw, name, &client->dev);
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d while requesting firmware %s\n",
+ ret, name);
+ return ret;
+ }
+ dev_info(&client->dev, "Received %lu bytes drv data\n",
+ (unsigned long)(*fw)->size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(load_msr_list);
+
+void release_msr_list(struct i2c_client *client, const struct firmware *fw)
+{
+ release_firmware(fw);
+}
+EXPORT_SYMBOL_GPL(release_msr_list);
+
+static int init_msrlisthelper(void)
+{
+ return 0;
+}
+
+static void exit_msrlisthelper(void)
+{
+}
+
+module_init(init_msrlisthelper);
+module_exit(exit_msrlisthelper);
+
+MODULE_AUTHOR("Jukka Kaartinen <jukka.o.kaartinen@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/lm3554.c
new file mode 100644
index 000000000000..dd9c9c3ffff7
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/lm3554.c
@@ -0,0 +1,1009 @@
+/*
+ * LED flash driver for LM3554
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../include/media/lm3554.h"
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include "../include/linux/atomisp.h"
+
+/* Registers */
+
+#define LM3554_TORCH_BRIGHTNESS_REG 0xA0
+#define LM3554_TORCH_MODE_SHIFT 0
+#define LM3554_TORCH_CURRENT_SHIFT 3
+#define LM3554_INDICATOR_CURRENT_SHIFT 6
+
+#define LM3554_FLASH_BRIGHTNESS_REG 0xB0
+#define LM3554_FLASH_MODE_SHIFT 0
+#define LM3554_FLASH_CURRENT_SHIFT 3
+#define LM3554_STROBE_SENSITIVITY_SHIFT 7
+
+#define LM3554_FLASH_DURATION_REG 0xC0
+#define LM3554_FLASH_TIMEOUT_SHIFT 0
+#define LM3554_CURRENT_LIMIT_SHIFT 5
+
+#define LM3554_FLAGS_REG 0xD0
+#define LM3554_FLAG_TIMEOUT (1 << 0)
+#define LM3554_FLAG_THERMAL_SHUTDOWN (1 << 1)
+#define LM3554_FLAG_LED_FAULT (1 << 2)
+#define LM3554_FLAG_TX1_INTERRUPT (1 << 3)
+#define LM3554_FLAG_TX2_INTERRUPT (1 << 4)
+#define LM3554_FLAG_LED_THERMAL_FAULT (1 << 5)
+#define LM3554_FLAG_UNUSED (1 << 6)
+#define LM3554_FLAG_INPUT_VOLTAGE_LOW (1 << 7)
+
+#define LM3554_CONFIG_REG_1 0xE0
+#define LM3554_ENVM_TX2_SHIFT 5
+#define LM3554_TX2_POLARITY_SHIFT 6
+
+struct lm3554 {
+ struct v4l2_subdev sd;
+
+ struct mutex power_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+ int power_count;
+
+ unsigned int mode;
+ int timeout;
+ u8 torch_current;
+ u8 indicator_current;
+ u8 flash_current;
+
+ struct timer_list flash_off_delay;
+ struct lm3554_platform_data *pdata;
+};
+
+#define to_lm3554(p_sd) container_of(p_sd, struct lm3554, sd)
+
+/* Return negative errno else zero on success */
+static int lm3554_write(struct lm3554 *flash, u8 addr, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, addr, val);
+
+ dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
+ ret < 0 ? "fail" : "ok");
+
+ return ret;
+}
+
+/* Return negative errno else a data byte received from the device. */
+static int lm3554_read(struct lm3554 *flash, u8 addr)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, addr);
+
+ dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, ret,
+ ret < 0 ? "fail" : "ok");
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static int lm3554_set_mode(struct lm3554 *flash, unsigned int mode)
+{
+ u8 val;
+ int ret;
+
+ val = (mode << LM3554_FLASH_MODE_SHIFT) |
+ (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT);
+
+ ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val);
+ if (ret == 0)
+ flash->mode = mode;
+ return ret;
+}
+
+static int lm3554_set_torch(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->mode << LM3554_TORCH_MODE_SHIFT) |
+ (flash->torch_current << LM3554_TORCH_CURRENT_SHIFT) |
+ (flash->indicator_current << LM3554_INDICATOR_CURRENT_SHIFT);
+
+ return lm3554_write(flash, LM3554_TORCH_BRIGHTNESS_REG, val);
+}
+
+static int lm3554_set_flash(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->mode << LM3554_FLASH_MODE_SHIFT) |
+ (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT);
+
+ return lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val);
+}
+
+static int lm3554_set_duration(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->timeout << LM3554_FLASH_TIMEOUT_SHIFT) |
+ (flash->pdata->current_limit << LM3554_CURRENT_LIMIT_SHIFT);
+
+ return lm3554_write(flash, LM3554_FLASH_DURATION_REG, val);
+}
+
+static int lm3554_set_config1(struct lm3554 *flash)
+{
+ u8 val;
+
+ val = (flash->pdata->envm_tx2 << LM3554_ENVM_TX2_SHIFT) |
+ (flash->pdata->tx2_polarity << LM3554_TX2_POLARITY_SHIFT);
+ return lm3554_write(flash, LM3554_CONFIG_REG_1, val);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware trigger
+ */
+static void lm3554_flash_off_delay(long unsigned int arg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata((struct i2c_client *)arg);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+
+ gpio_set_value(pdata->gpio_strobe, 0);
+}
+
+static int lm3554_hw_strobe(struct i2c_client *client, bool strobe)
+{
+ int ret, timer_pending;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+
+ /*
+ * An abnormal high flash current is observed when strobe off the
+ * flash. Workaround here is firstly set flash current to lower level,
+ * wait a short moment, and then strobe off the flash.
+ */
+
+ timer_pending = del_timer_sync(&flash->flash_off_delay);
+
+ /* Flash off */
+ if (!strobe) {
+ /* set current to 70mA and wait a while */
+ ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, 0);
+ if (ret < 0)
+ goto err;
+ mod_timer(&flash->flash_off_delay,
+ jiffies + msecs_to_jiffies(LM3554_TIMER_DELAY));
+ return 0;
+ }
+
+ /* Flash on */
+
+ /*
+ * If timer is killed before run, flash is not strobe off,
+ * so must strobe off here
+ */
+ if (timer_pending)
+ gpio_set_value(pdata->gpio_strobe, 0);
+
+ /* Restore flash current settings */
+ ret = lm3554_set_flash(flash);
+ if (ret < 0)
+ goto err;
+
+ /* Strobe on Flash */
+ gpio_set_value(pdata->gpio_strobe, 1);
+
+ return 0;
+err:
+ dev_err(&client->dev, "failed to %s flash strobe (%d)\n",
+ strobe ? "on" : "off", ret);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int lm3554_read_status(struct lm3554 *flash)
+{
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+
+ /* NOTE: reading register clear fault status */
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Accordingly to datasheet we read back '1' in bit 6.
+ * Clear it first.
+ */
+ ret &= ~LM3554_FLAG_UNUSED;
+
+ /*
+ * Do not take TX1/TX2 signal as an error
+ * because MSIC will not turn off flash, but turn to
+ * torch mode according to gsm modem signal by hardware.
+ */
+ ret &= ~(LM3554_FLAG_TX1_INTERRUPT | LM3554_FLAG_TX2_INTERRUPT);
+
+ if (ret > 0)
+ dev_dbg(&client->dev, "LM3554 flag status: %02x\n", ret);
+
+ return ret;
+}
+
+static int lm3554_s_flash_timeout(struct v4l2_subdev *sd, u32 val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ val = clamp(val, LM3554_MIN_TIMEOUT, LM3554_MAX_TIMEOUT);
+ val = val / LM3554_TIMEOUT_STEPSIZE - 1;
+
+ flash->timeout = val;
+
+ return lm3554_set_duration(flash);
+}
+
+static int lm3554_g_flash_timeout(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = (u32)(flash->timeout + 1) * LM3554_TIMEOUT_STEPSIZE;
+
+ return 0;
+}
+
+static int lm3554_s_flash_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_FLASH_STEP);
+
+ flash->flash_current = intensity;
+
+ return lm3554_set_flash(flash);
+}
+
+static int lm3554_g_flash_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->flash_current,
+ LM3554_FLASH_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_torch_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_TORCH_STEP);
+
+ flash->torch_current = intensity;
+
+ return lm3554_set_torch(flash);
+}
+
+static int lm3554_g_torch_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->torch_current,
+ LM3554_TORCH_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_indicator_intensity(struct v4l2_subdev *sd, u32 intensity)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ intensity = LM3554_CLAMP_PERCENTAGE(intensity);
+ intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_INDICATOR_STEP);
+
+ flash->indicator_current = intensity;
+
+ return lm3554_set_torch(flash);
+}
+
+static int lm3554_g_indicator_intensity(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+
+ *val = LM3554_VALUE_TO_PERCENT((u32)flash->indicator_current,
+ LM3554_INDICATOR_STEP);
+
+ return 0;
+}
+
+static int lm3554_s_flash_strobe(struct v4l2_subdev *sd, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return lm3554_hw_strobe(client, val);
+}
+
+static int lm3554_s_flash_mode(struct v4l2_subdev *sd, u32 new_mode)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ unsigned int mode;
+
+ switch (new_mode) {
+ case ATOMISP_FLASH_MODE_OFF:
+ mode = LM3554_MODE_SHUTDOWN;
+ break;
+ case ATOMISP_FLASH_MODE_FLASH:
+ mode = LM3554_MODE_FLASH;
+ break;
+ case ATOMISP_FLASH_MODE_INDICATOR:
+ mode = LM3554_MODE_INDICATOR;
+ break;
+ case ATOMISP_FLASH_MODE_TORCH:
+ mode = LM3554_MODE_TORCH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lm3554_set_mode(flash, mode);
+}
+
+static int lm3554_g_flash_mode(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ *val = flash->mode;
+ return 0;
+}
+
+static int lm3554_g_flash_status(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int value;
+
+ value = lm3554_read_status(flash);
+ if (value < 0)
+ return value;
+
+ if (value & LM3554_FLAG_TIMEOUT)
+ *val = ATOMISP_FLASH_STATUS_TIMEOUT;
+ else if (value > 0)
+ *val = ATOMISP_FLASH_STATUS_HW_ERROR;
+ else
+ *val = ATOMISP_FLASH_STATUS_OK;
+
+ return 0;
+}
+
+#ifndef CSS15
+static int lm3554_g_flash_status_register(struct v4l2_subdev *sd, s32 *val)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+#endif
+
+static int lm3554_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3554 *dev =
+ container_of(ctrl->handler, struct lm3554, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_TIMEOUT:
+ ret = lm3554_s_flash_timeout(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = lm3554_s_flash_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ ret = lm3554_s_torch_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ ret = lm3554_s_indicator_intensity(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_STROBE:
+ ret = lm3554_s_flash_strobe(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FLASH_MODE:
+ ret = lm3554_s_flash_mode(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int lm3554_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct lm3554 *dev =
+ container_of(ctrl->handler, struct lm3554, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_TIMEOUT:
+ ret = lm3554_g_flash_timeout(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = lm3554_g_flash_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ ret = lm3554_g_torch_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ ret = lm3554_g_indicator_intensity(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_MODE:
+ ret = lm3554_g_flash_mode(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ ret = lm3554_g_flash_status(&dev->sd, &ctrl->val);
+ break;
+#ifndef CSS15
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ ret = lm3554_g_flash_status_register(&dev->sd, &ctrl->val);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = lm3554_s_ctrl,
+ .g_volatile_ctrl = lm3554_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config lm3554_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_TIMEOUT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Timeout",
+ .min = 0x0,
+ .max = LM3554_MAX_TIMEOUT,
+ .step = 0x01,
+ .def = LM3554_DEFAULT_TIMEOUT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_FLASH_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_TORCH_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Torch Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_TORCH_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_INDICATOR_INTENSITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Indicator Intensity",
+ .min = LM3554_MIN_PERCENT,
+ .max = LM3554_MAX_PERCENT,
+ .step = 0x01,
+ .def = LM3554_INDICATOR_DEFAULT_BRIGHTNESS,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STROBE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flash Strobe",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_MODE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Flash Mode",
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = ATOMISP_FLASH_MODE_OFF,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STATUS,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flash Status",
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = ATOMISP_FLASH_STATUS_OK,
+ .flags = 0,
+ },
+#ifndef CSS15
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FLASH_STATUS_REGISTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flash Status Register",
+ .min = 0,
+ .max = 100,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+#endif
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev core operations
+ */
+
+/* Put device into known state. */
+static int lm3554_setup(struct lm3554 *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->sd);
+ int ret;
+
+ /* clear the flags register */
+ ret = lm3554_read(flash, LM3554_FLAGS_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&client->dev, "Fault info: %02x\n", ret);
+
+ ret = lm3554_set_config1(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_duration(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_torch(flash);
+ if (ret < 0)
+ return ret;
+
+ ret = lm3554_set_flash(flash);
+ if (ret < 0)
+ return ret;
+
+ /* read status */
+ ret = lm3554_read_status(flash);
+ if (ret < 0)
+ return ret;
+
+ return ret ? -EIO : 0;
+}
+
+static int __lm3554_s_power(struct lm3554 *flash, int power)
+{
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ /*initialize flash driver*/
+ gpio_set_value(pdata->gpio_reset, power);
+ usleep_range(100, 100 + 1);
+
+ if (power) {
+ /* Setup default values. This makes sure that the chip
+ * is in a known state.
+ */
+ ret = lm3554_setup(flash);
+ if (ret < 0) {
+ __lm3554_s_power(flash, 0);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int lm3554_s_power(struct v4l2_subdev *sd, int power)
+{
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ if (flash->power_count == !power) {
+ ret = __lm3554_s_power(flash, !!power);
+ if (ret < 0)
+ goto done;
+ }
+
+ flash->power_count += power ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops lm3554_core_ops = {
+ .s_power = lm3554_s_power,
+};
+
+static const struct v4l2_subdev_ops lm3554_ops = {
+ .core = &lm3554_core_ops,
+};
+
+static int lm3554_detect(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct i2c_adapter *adapter = client->adapter;
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "lm3554_detect i2c error\n");
+ return -ENODEV;
+ }
+
+ /* Power up the flash driver and reset it */
+ ret = lm3554_s_power(&flash->sd, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to power on lm3554 LED flash\n");
+ } else {
+ dev_dbg(&client->dev, "Successfully detected lm3554 LED flash\n");
+ lm3554_s_power(&flash->sd, 0);
+ }
+
+ return ret;
+}
+
+static int lm3554_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return lm3554_s_power(sd, 1);
+}
+
+static int lm3554_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return lm3554_s_power(sd, 0);
+}
+
+static const struct v4l2_subdev_internal_ops lm3554_internal_ops = {
+ .registered = lm3554_detect,
+ .open = lm3554_open,
+ .close = lm3554_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int lm3554_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __lm3554_s_power(flash, 0);
+
+ dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
+
+ return rval;
+}
+
+static int lm3554_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(subdev);
+ int rval;
+
+ if (flash->power_count == 0)
+ return 0;
+
+ rval = __lm3554_s_power(flash, 1);
+
+ dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+#else
+
+#define lm3554_suspend NULL
+#define lm3554_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int lm3554_gpio_init(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ if (!gpio_is_valid(pdata->gpio_reset))
+ return -EINVAL;
+
+ ret = gpio_direction_output(pdata->gpio_reset, 0);
+ if (ret < 0)
+ goto err_gpio_reset;
+ dev_info(&client->dev, "flash led reset successfully\n");
+
+ if (!gpio_is_valid(pdata->gpio_strobe)) {
+ ret = -EINVAL;
+ goto err_gpio_dir_reset;
+ }
+
+ ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ if (ret < 0)
+ goto err_gpio_strobe;
+
+ return 0;
+
+err_gpio_strobe:
+ gpio_free(pdata->gpio_strobe);
+err_gpio_dir_reset:
+ gpio_direction_output(pdata->gpio_reset, 0);
+err_gpio_reset:
+ gpio_free(pdata->gpio_reset);
+
+ return ret;
+}
+
+static int lm3554_gpio_uninit(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554_platform_data *pdata = flash->pdata;
+ int ret;
+
+ ret = gpio_direction_output(pdata->gpio_strobe, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_output(pdata->gpio_reset, 0);
+ if (ret < 0)
+ return ret;
+
+ gpio_free(pdata->gpio_strobe);
+ gpio_free(pdata->gpio_reset);
+ return 0;
+}
+
+void *lm3554_platform_data_func(struct i2c_client *client)
+{
+ static struct lm3554_platform_data platform_data;
+
+ if (ACPI_COMPANION(&client->dev)) {
+ platform_data.gpio_reset =
+ desc_to_gpio(gpiod_get_index(&(client->dev),
+ NULL, 2, GPIOD_OUT_LOW));
+ platform_data.gpio_strobe =
+ desc_to_gpio(gpiod_get_index(&(client->dev),
+ NULL, 0, GPIOD_OUT_LOW));
+ platform_data.gpio_torch =
+ desc_to_gpio(gpiod_get_index(&(client->dev),
+ NULL, 1, GPIOD_OUT_LOW));
+ } else {
+ platform_data.gpio_reset = -1;
+ platform_data.gpio_strobe = -1;
+ platform_data.gpio_torch = -1;
+ }
+
+ dev_info(&client->dev, "camera pdata: lm3554: reset: %d strobe %d torch %d\n",
+ platform_data.gpio_reset, platform_data.gpio_strobe,
+ platform_data.gpio_torch);
+
+ /* Set to TX2 mode, then ENVM/TX2 pin is a power amplifier sync input:
+ * ENVM/TX pin asserted, flash forced into torch;
+ * ENVM/TX pin desserted, flash set back;
+ */
+ platform_data.envm_tx2 = 1;
+ platform_data.tx2_polarity = 0;
+
+ /* set peak current limit to be 1000mA */
+ platform_data.current_limit = 0;
+
+ return &platform_data;
+}
+
+static int lm3554_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = 0;
+ struct lm3554 *flash;
+ unsigned int i;
+ int ret;
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (!flash) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ flash->pdata = client->dev.platform_data;
+
+ if (!flash->pdata || ACPI_COMPANION(&client->dev))
+ flash->pdata = lm3554_platform_data_func(client);
+
+ v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
+ flash->sd.internal_ops = &lm3554_internal_ops;
+ flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ flash->mode = ATOMISP_FLASH_MODE_OFF;
+ flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1;
+ ret =
+ v4l2_ctrl_handler_init(&flash->ctrl_handler,
+ ARRAY_SIZE(lm3554_controls));
+ if (ret) {
+ dev_err(&client->dev, "error initialize a ctrl_handler.\n");
+ goto fail2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
+ v4l2_ctrl_new_custom(&flash->ctrl_handler, &lm3554_controls[i],
+ NULL);
+
+ if (flash->ctrl_handler.error) {
+
+ dev_err(&client->dev, "ctrl_handler error.\n");
+ goto fail2;
+ }
+
+ flash->sd.ctrl_handler = &flash->ctrl_handler;
+ err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
+ if (err) {
+ dev_err(&client->dev, "error initialize a media entity.\n");
+ goto fail1;
+ }
+
+ flash->sd.entity.function = MEDIA_ENT_F_FLASH;
+
+ mutex_init(&flash->power_lock);
+
+ setup_timer(&flash->flash_off_delay, lm3554_flash_off_delay,
+ (unsigned long)client);
+
+ err = lm3554_gpio_init(client);
+ if (err) {
+ dev_err(&client->dev, "gpio request/direction_output fail");
+ goto fail2;
+ }
+ if (ACPI_HANDLE(&client->dev))
+ err = atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+ return 0;
+fail2:
+ media_entity_cleanup(&flash->sd.entity);
+ v4l2_ctrl_handler_free(&flash->ctrl_handler);
+fail1:
+ v4l2_device_unregister_subdev(&flash->sd);
+ kfree(flash);
+
+ return err;
+}
+
+static int lm3554_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lm3554 *flash = to_lm3554(sd);
+ int ret;
+
+ media_entity_cleanup(&flash->sd.entity);
+ v4l2_ctrl_handler_free(&flash->ctrl_handler);
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ del_timer_sync(&flash->flash_off_delay);
+
+ ret = lm3554_gpio_uninit(client);
+ if (ret < 0)
+ goto fail;
+
+ kfree(flash);
+
+ return 0;
+fail:
+ dev_err(&client->dev, "gpio request/direction_output fail");
+ return ret;
+}
+
+static const struct i2c_device_id lm3554_id[] = {
+ {LM3554_NAME, 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3554_id);
+
+static const struct dev_pm_ops lm3554_pm_ops = {
+ .suspend = lm3554_suspend,
+ .resume = lm3554_resume,
+};
+
+static struct acpi_device_id lm3554_acpi_match[] = {
+ { "INTCF1C" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match);
+
+static struct i2c_driver lm3554_driver = {
+ .driver = {
+ .name = LM3554_NAME,
+ .pm = &lm3554_pm_ops,
+ .acpi_match_table = ACPI_PTR(lm3554_acpi_match),
+ },
+ .probe = lm3554_probe,
+ .remove = lm3554_remove,
+ .id_table = lm3554_id,
+};
+
+static __init int init_lm3554(void)
+{
+ return i2c_add_driver(&lm3554_driver);
+}
+
+static __exit void exit_lm3554(void)
+{
+ i2c_del_driver(&lm3554_driver);
+}
+
+module_init(init_lm3554);
+module_exit(exit_lm3554);
+MODULE_AUTHOR("Jing Tao <jing.tao@intel.com>");
+MODULE_DESCRIPTION("LED flash driver for LM3554");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/mt9m114.c
new file mode 100644
index 000000000000..ced175c268d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.c
@@ -0,0 +1,1963 @@
+/*
+ * Support for mt9m114 Camera Sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <media/v4l2-device.h>
+
+#include "mt9m114.h"
+
+#define to_mt9m114_sensor(sd) container_of(sd, struct mt9m114_device, sd)
+
+/*
+ * TODO: use debug parameter to actually define when debug messages should
+ * be printed.
+ */
+static int debug;
+static int aaalock;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value);
+static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value);
+static int mt9m114_wait_state(struct i2c_client *client, int timeout);
+
+static int
+mt9m114_read_reg(struct i2c_client *client, u16 data_length, u32 reg, u32 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[4];
+
+ if (!client->adapter) {
+ v4l2_err(client, "%s error, no client->adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
+ && data_length != MISENSOR_32BIT) {
+ v4l2_err(client, "%s error, invalid data length\n", __func__);
+ return -EINVAL;
+ }
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = MSG_LEN_OFFSET;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u16) (reg >> 8);
+ data[1] = (u16) (reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+
+ if (err >= 0) {
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == MISENSOR_8BIT)
+ *val = data[0];
+ else if (data_length == MISENSOR_16BIT)
+ *val = data[1] + (data[0] << 8);
+ else
+ *val = data[3] + (data[2] << 8) +
+ (data[1] << 16) + (data[0] << 24);
+
+ return 0;
+ }
+
+ dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
+ return err;
+}
+
+static int
+mt9m114_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 val)
+{
+ int num_msg;
+ struct i2c_msg msg;
+ unsigned char data[6] = {0};
+ u16 *wreg;
+ int retry = 0;
+
+ if (!client->adapter) {
+ v4l2_err(client, "%s error, no client->adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT
+ && data_length != MISENSOR_32BIT) {
+ v4l2_err(client, "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+
+again:
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2 + data_length;
+ msg.buf = data;
+
+ /* high byte goes out first */
+ wreg = (u16 *)data;
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == MISENSOR_8BIT) {
+ data[2] = (u8)(val);
+ } else if (data_length == MISENSOR_16BIT) {
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = be16_to_cpu((u16)val);
+ } else {
+ /* MISENSOR_32BIT */
+ u32 *wdata = (u32 *)&data[2];
+ *wdata = be32_to_cpu(val);
+ }
+
+ num_msg = i2c_transfer(client->adapter, &msg, 1);
+
+ /*
+ * HACK: Need some delay here for Rev 2 sensors otherwise some
+ * registers do not seem to load correctly.
+ */
+ mdelay(1);
+
+ if (num_msg >= 0)
+ return 0;
+
+ dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, num_msg);
+ if (retry <= I2C_RETRY_COUNT) {
+ dev_dbg(&client->dev, "retrying... %d", retry);
+ retry++;
+ msleep(20);
+ goto again;
+ }
+
+ return num_msg;
+}
+
+/**
+ * misensor_rmw_reg - Read/Modify/Write a value to a register in the sensor
+ * device
+ * @client: i2c driver client structure
+ * @data_length: 8/16/32-bits length
+ * @reg: register address
+ * @mask: masked out bits
+ * @set: bits set
+ *
+ * Read/modify/write a value to a register in the sensor device.
+ * Returns zero if successful, or non-zero otherwise.
+ */
+static int
+misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg,
+ u32 mask, u32 set)
+{
+ int err;
+ u32 val;
+
+ /* Exit when no mask */
+ if (mask == 0)
+ return 0;
+
+ /* @mask must not exceed data length */
+ switch (data_length) {
+ case MISENSOR_8BIT:
+ if (mask & ~0xff)
+ return -EINVAL;
+ break;
+ case MISENSOR_16BIT:
+ if (mask & ~0xffff)
+ return -EINVAL;
+ break;
+ case MISENSOR_32BIT:
+ break;
+ default:
+ /* Wrong @data_length */
+ return -EINVAL;
+ }
+
+ err = mt9m114_read_reg(client, data_length, reg, &val);
+ if (err) {
+ v4l2_err(client, "misensor_rmw_reg error exit, read failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~mask;
+
+ /*
+ * Perform the OR function if the @set exists.
+ * Shift @set value to target bit location. @set should set only
+ * bits included in @mask.
+ *
+ * REVISIT: This function expects @set to be non-shifted. Its shift
+ * value is then defined to be equal to mask's LSB position.
+ * How about to inform values in their right offset position and avoid
+ * this unneeded shift operation?
+ */
+ set <<= ffs(mask) - 1;
+ val |= set & mask;
+
+ err = mt9m114_write_reg(client, data_length, reg, val);
+ if (err) {
+ v4l2_err(client, "misensor_rmw_reg error exit, write failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int __mt9m114_flush_reg_array(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+ int retry = 0;
+
+ if (ctrl->index == 0)
+ return 0;
+
+again:
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 2 + ctrl->index;
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ msg.buf = (u8 *)&ctrl->buffer;
+
+ ret = i2c_transfer(client->adapter, &msg, num_msg);
+ if (ret != num_msg) {
+ if (++retry <= I2C_RETRY_COUNT) {
+ dev_dbg(&client->dev, "retrying... %d\n", retry);
+ msleep(20);
+ goto again;
+ }
+ dev_err(&client->dev, "%s: i2c transfer error\n", __func__);
+ return -EIO;
+ }
+
+ ctrl->index = 0;
+
+ /*
+ * REVISIT: Previously we had a delay after writing data to sensor.
+ * But it was removed as our tests have shown it is not necessary
+ * anymore.
+ */
+
+ return 0;
+}
+
+static int __mt9m114_buf_reg_array(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl,
+ const struct misensor_reg *next)
+{
+ u16 *data16;
+ u32 *data32;
+ int err;
+
+ /* Insufficient buffer? Let's flush and get more free space. */
+ if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) {
+ err = __mt9m114_flush_reg_array(client, ctrl);
+ if (err)
+ return err;
+ }
+
+ switch (next->length) {
+ case MISENSOR_8BIT:
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case MISENSOR_16BIT:
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ case MISENSOR_32BIT:
+ data32 = (u32 *)&ctrl->buffer.data[ctrl->index];
+ *data32 = cpu_to_be32(next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += next->length;
+
+ return 0;
+}
+
+static int
+__mt9m114_write_reg_is_consecutive(struct i2c_client *client,
+ struct mt9m114_write_ctrl *ctrl,
+ const struct misensor_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+/*
+ * mt9m114_write_reg_array - Initializes a list of mt9m114 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ * @poll: completion polling requirement
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __mt9m114_flush_reg_array, __mt9m114_buf_reg_array() and
+ * __mt9m114_write_reg_is_consecutive() are internal functions to
+ * mt9m114_write_reg_array() and should be not used anywhere else.
+ *
+ */
+static int mt9m114_write_reg_array(struct i2c_client *client,
+ const struct misensor_reg *reglist,
+ int poll)
+{
+ const struct misensor_reg *next = reglist;
+ struct mt9m114_write_ctrl ctrl;
+ int err;
+
+ if (poll == PRE_POLLING) {
+ err = mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ ctrl.index = 0;
+ for (; next->length != MISENSOR_TOK_TERM; next++) {
+ switch (next->length & MISENSOR_TOK_MASK) {
+ case MISENSOR_TOK_DELAY:
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ case MISENSOR_TOK_RMW:
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ err |= misensor_rmw_reg(client,
+ next->length &
+ ~MISENSOR_TOK_RMW,
+ next->reg, next->val,
+ next->val2);
+ if (err) {
+ dev_err(&client->dev, "%s read err. aborted\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__mt9m114_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __mt9m114_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ v4l2_err(client, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ err = __mt9m114_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+
+ if (poll == POST_POLLING)
+ return mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT);
+
+ return 0;
+}
+
+static int mt9m114_wait_state(struct i2c_client *client, int timeout)
+{
+ int ret;
+ unsigned int val;
+
+ while (timeout-- > 0) {
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT, 0x0080, &val);
+ if (ret)
+ return ret;
+ if ((val & 0x2) == 0)
+ return 0;
+ msleep(20);
+ }
+
+ return -EINVAL;
+
+}
+
+static int mt9m114_set_suspend(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ return mt9m114_write_reg_array(client,
+ mt9m114_standby_reg, POST_POLLING);
+}
+
+static int mt9m114_init_common(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return mt9m114_write_reg_array(client, mt9m114_common, PRE_POLLING);
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (flag) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret)
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ } else {
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ /* Note: current modules wire only one GPIO signal (RESET#),
+ * but the schematic wires up two to the connector. BIOS
+ * versions have been unfortunately inconsistent with which
+ * ACPI index RESET# is on, so hit both */
+
+ if (flag) {
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ ret = dev->platform_data->gpio1_ctrl(sd, 0);
+ msleep(60);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 1);
+ ret |= dev->platform_data->gpio1_ctrl(sd, 1);
+ } else {
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
+ ret = dev->platform_data->gpio1_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (NULL == dev->platform_data) {
+ dev_err(&client->dev, "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 1\n");
+ /*
+ * according to DS, 44ms is needed between power up and first i2c
+ * commend
+ */
+ msleep(50);
+
+ return 0;
+
+fail_clk:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (NULL == dev->platform_data) {
+ dev_err(&client->dev, "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 1\n");
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ /*according to DS, 20ms is needed after power down*/
+ msleep(20);
+
+ return ret;
+}
+
+static int mt9m114_s_power(struct v4l2_subdev *sd, int power)
+{
+ if (power == 0)
+ return power_down(sd);
+ else {
+ if (power_up(sd))
+ return -EINVAL;
+
+ return mt9m114_init_common(sd);
+ }
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 600
+static int distance(struct mt9m114_res_struct const *res, u32 w, u32 h)
+{
+ unsigned int w_ratio;
+ unsigned int h_ratio;
+ int match;
+
+ if (w == 0)
+ return -1;
+ w_ratio = (res->width << 13) / w;
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ const struct mt9m114_res_struct *tmp_res = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mt9m114_res); i++) {
+ tmp_res = &mt9m114_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int mt9m114_try_res(u32 *w, u32 *h)
+{
+ int idx = 0;
+
+ if ((*w > MT9M114_RES_960P_SIZE_H)
+ || (*h > MT9M114_RES_960P_SIZE_V)) {
+ *w = MT9M114_RES_960P_SIZE_H;
+ *h = MT9M114_RES_960P_SIZE_V;
+ } else {
+ idx = nearest_resolution_index(*w, *h);
+
+ /*
+ * nearest_resolution_index() doesn't return smaller
+ * resolutions. If it fails, it means the requested
+ * resolution is higher than wecan support. Fallback
+ * to highest possible resolution in this case.
+ */
+ if (idx == -1)
+ idx = ARRAY_SIZE(mt9m114_res) - 1;
+
+ *w = mt9m114_res[idx].width;
+ *h = mt9m114_res[idx].height;
+ }
+
+ return 0;
+}
+
+static struct mt9m114_res_struct *mt9m114_to_res(u32 w, u32 h)
+{
+ int index;
+
+ for (index = 0; index < N_RES; index++) {
+ if ((mt9m114_res[index].width == w) &&
+ (mt9m114_res[index].height == h))
+ break;
+ }
+
+ /* No mode found */
+ if (index >= N_RES)
+ return NULL;
+
+ return &mt9m114_res[index];
+}
+
+static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ unsigned short hsize;
+ unsigned short vsize;
+
+ switch (dev->res) {
+ case MT9M114_RES_736P:
+ hsize = MT9M114_RES_736P_SIZE_H;
+ vsize = MT9M114_RES_736P_SIZE_V;
+ break;
+ case MT9M114_RES_864P:
+ hsize = MT9M114_RES_864P_SIZE_H;
+ vsize = MT9M114_RES_864P_SIZE_V;
+ break;
+ case MT9M114_RES_960P:
+ hsize = MT9M114_RES_960P_SIZE_H;
+ vsize = MT9M114_RES_960P_SIZE_V;
+ break;
+ default:
+ v4l2_err(sd, "%s: Resolution 0x%08x unknown\n", __func__,
+ dev->res);
+ return -EINVAL;
+ }
+
+ if (h_size != NULL)
+ *h_size = hsize;
+ if (v_size != NULL)
+ *v_size = vsize;
+
+ return 0;
+}
+
+static int mt9m114_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct mt9m114_res_struct *res)
+{
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ u32 reg_val;
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ ret = mt9m114_read_reg(client, MISENSOR_32BIT,
+ REG_PIXEL_CLK, &reg_val);
+ if (ret)
+ return ret;
+ buf->vt_pix_clk_freq_mhz = reg_val;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = MT9M114_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ MT9M114_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = MT9M114_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ MT9M114_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = MT9M114_FINE_INTG_TIME_MIN;
+
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_H_START, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_V_START, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_H_END, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_V_END, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_WIDTH, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_HEIGHT, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_TIMING_HTS, &reg_val);
+ if (ret)
+ return ret;
+ buf->line_length_pck = reg_val;
+
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_TIMING_VTS, &reg_val);
+ if (ret)
+ return ret;
+ buf->frame_length_lines = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static int mt9m114_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ int width, height;
+ int ret;
+ if (format->pad)
+ return -EINVAL;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ ret = mt9m114_res2size(sd, &width, &height);
+ if (ret)
+ return ret;
+ fmt->width = width;
+ fmt->height = height;
+
+ return 0;
+}
+
+static int mt9m114_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct mt9m114_res_struct *res_index;
+ u32 width = fmt->width;
+ u32 height = fmt->height;
+ struct camera_mipi_info *mt9m114_info = NULL;
+
+ int ret;
+ if (format->pad)
+ return -EINVAL;
+ dev->streamon = 0;
+ dev->first_exp = MT9M114_DEFAULT_FIRST_EXP;
+
+ mt9m114_info = v4l2_get_subdev_hostdata(sd);
+ if (mt9m114_info == NULL)
+ return -EINVAL;
+
+ mt9m114_try_res(&width, &height);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ return 0;
+ }
+ res_index = mt9m114_to_res(width, height);
+
+ /* Sanity check */
+ if (unlikely(!res_index)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ switch (res_index->res) {
+ case MT9M114_RES_736P:
+ ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING);
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ case MT9M114_RES_864P:
+ ret = mt9m114_write_reg_array(c, mt9m114_864P_init, NO_POLLING);
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ case MT9M114_RES_960P:
+ ret = mt9m114_write_reg_array(c, mt9m114_976P_init, NO_POLLING);
+ /* set sensor read_mode to Normal */
+ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET);
+ break;
+ default:
+ v4l2_err(sd, "set resolution: %d failed!\n", res_index->res);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return -EINVAL;
+
+ ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING);
+ if (ret < 0)
+ return ret;
+
+ if (mt9m114_set_suspend(sd))
+ return -EINVAL;
+
+ if (dev->res != res_index->res) {
+ int index;
+
+ /* Switch to different size */
+ if (width <= 640) {
+ dev->nctx = 0x00; /* Set for context A */
+ } else {
+ /*
+ * Context B is used for resolutions larger than 640x480
+ * Using YUV for Context B.
+ */
+ dev->nctx = 0x01; /* set for context B */
+ }
+
+ /*
+ * Marked current sensor res as being "used"
+ *
+ * REVISIT: We don't need to use an "used" field on each mode
+ * list entry to know which mode is selected. If this
+ * information is really necessary, how about to use a single
+ * variable on sensor dev struct?
+ */
+ for (index = 0; index < N_RES; index++) {
+ if ((width == mt9m114_res[index].width) &&
+ (height == mt9m114_res[index].height)) {
+ mt9m114_res[index].used = true;
+ continue;
+ }
+ mt9m114_res[index].used = false;
+ }
+ }
+ ret = mt9m114_get_intg_factor(c, mt9m114_info,
+ &mt9m114_res[res_index->res]);
+ if (ret) {
+ dev_err(&c->dev, "failed to get integration_factor\n");
+ return -EINVAL;
+ }
+ /*
+ * mt9m114 - we don't poll for context switch
+ * because it does not happen with streaming disabled.
+ */
+ dev->res = res_index->res;
+
+ fmt->width = width;
+ fmt->height = height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ return 0;
+}
+
+/* TODO: Update to SOC functions, remove exposure and gain */
+static int mt9m114_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (MT9M114_FOCAL_LENGTH_NUM << 16) | MT9M114_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int mt9m114_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for mt9m114*/
+ *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 16) | MT9M114_F_NUMBER_DEM;
+ return 0;
+}
+
+static int mt9m114_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (MT9M114_F_NUMBER_DEFAULT_NUM << 24) |
+ (MT9M114_F_NUMBER_DEM << 16) |
+ (MT9M114_F_NUMBER_DEFAULT_NUM << 8) | MT9M114_F_NUMBER_DEM;
+ return 0;
+}
+
+/* Horizontal flip the image. */
+static int mt9m114_g_hflip(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int ret;
+ u32 data;
+ ret = mt9m114_read_reg(c, MISENSOR_16BIT,
+ (u32)MISENSOR_READ_MODE, &data);
+ if (ret)
+ return ret;
+ *val = !!(data & MISENSOR_HFLIP_MASK);
+
+ return 0;
+}
+
+static int mt9m114_g_vflip(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int ret;
+ u32 data;
+
+ ret = mt9m114_read_reg(c, MISENSOR_16BIT,
+ (u32)MISENSOR_READ_MODE, &data);
+ if (ret)
+ return ret;
+ *val = !!(data & MISENSOR_VFLIP_MASK);
+
+ return 0;
+}
+
+static long mt9m114_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ int ret = 0;
+ unsigned int coarse_integration = 0;
+ unsigned int fine_integration = 0;
+ unsigned int FLines = 0;
+ unsigned int FrameLengthLines = 0; /* ExposureTime.FrameLengthLines; */
+ unsigned int AnalogGain, DigitalGain;
+ u32 AnalogGainToWrite = 0;
+ u16 exposure_local[3];
+
+ dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__,
+ exposure->integration_time[0], exposure->gain[0],
+ exposure->gain[1]);
+
+ coarse_integration = exposure->integration_time[0];
+ /* fine_integration = ExposureTime.FineIntegrationTime; */
+ /* FrameLengthLines = ExposureTime.FrameLengthLines; */
+ FLines = mt9m114_res[dev->res].lines_per_frame;
+ AnalogGain = exposure->gain[0];
+ DigitalGain = exposure->gain[1];
+ if (!dev->streamon) {
+ /*Save the first exposure values while stream is off*/
+ dev->first_exp = coarse_integration;
+ dev->first_gain = AnalogGain;
+ dev->first_diggain = DigitalGain;
+ }
+ /* DigitalGain = 0x400 * (((u16) DigitalGain) >> 8) +
+ ((unsigned int)(0x400 * (((u16) DigitalGain) & 0xFF)) >>8); */
+
+ /* set frame length */
+ if (FLines < coarse_integration + 6)
+ FLines = coarse_integration + 6;
+ if (FLines < FrameLengthLines)
+ FLines = FrameLengthLines;
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, FLines);
+ if (ret) {
+ v4l2_err(client, "%s: fail to set FLines\n", __func__);
+ return -EINVAL;
+ }
+
+ /* set coarse/fine integration */
+ exposure_local[0] = REG_EXPO_COARSE;
+ exposure_local[1] = (u16)coarse_integration;
+ exposure_local[2] = (u16)fine_integration;
+ /* 3A provide real exposure time.
+ should not translate to any value here. */
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT,
+ REG_EXPO_COARSE, (u16)(coarse_integration));
+ if (ret) {
+ v4l2_err(client, "%s: fail to set exposure time\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ // set analog/digital gain
+ switch(AnalogGain)
+ {
+ case 0:
+ AnalogGainToWrite = 0x0;
+ break;
+ case 1:
+ AnalogGainToWrite = 0x20;
+ break;
+ case 2:
+ AnalogGainToWrite = 0x60;
+ break;
+ case 4:
+ AnalogGainToWrite = 0xA0;
+ break;
+ case 8:
+ AnalogGainToWrite = 0xE0;
+ break;
+ default:
+ AnalogGainToWrite = 0x20;
+ break;
+ }
+ */
+ if (DigitalGain >= 16 || DigitalGain <= 1)
+ DigitalGain = 1;
+ /* AnalogGainToWrite =
+ (u16)((DigitalGain << 12) | AnalogGainToWrite); */
+ AnalogGainToWrite = (u16)((DigitalGain << 12) | (u16)AnalogGain);
+ ret = mt9m114_write_reg(client, MISENSOR_16BIT,
+ REG_GAIN, AnalogGainToWrite);
+ if (ret) {
+ v4l2_err(client, "%s: fail to set AnalogGainToWrite\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return mt9m114_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ for filling in EXIF data, not for actual image processing. */
+static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 coarse;
+ int ret;
+
+ /* the fine integration time is currently not calculated */
+ ret = mt9m114_read_reg(client, MISENSOR_16BIT,
+ REG_EXPO_COARSE, &coarse);
+ if (ret)
+ return ret;
+
+ *value = coarse;
+ return 0;
+}
+#ifndef CSS15
+/*
+ * This function will return the sensor supported max exposure zone number.
+ * the sensor which supports max exposure zone number is 1.
+ */
+static int mt9m114_g_exposure_zone_num(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = 1;
+
+ return 0;
+}
+
+/*
+ * set exposure metering, average/center_weighted/spot/matrix.
+ */
+static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ switch (val) {
+ case V4L2_EXPOSURE_METERING_SPOT:
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_average,
+ NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_average reg err.\n");
+ return ret;
+ }
+ break;
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ default:
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_center,
+ NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_default reg err");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is for touch exposure feature.
+ */
+static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct misensor_reg exp_reg;
+ int width, height;
+ int grid_width, grid_height;
+ int grid_left, grid_top, grid_right, grid_bottom;
+ int win_left, win_top, win_right, win_bottom;
+ int i, j;
+ int ret;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
+ sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ grid_left = sel->r.left;
+ grid_top = sel->r.top;
+ grid_right = sel->r.left + sel->r.width - 1;
+ grid_bottom = sel->r.top + sel->r.height - 1;
+
+ ret = mt9m114_res2size(sd, &width, &height);
+ if (ret)
+ return ret;
+
+ grid_width = width / 5;
+ grid_height = height / 5;
+
+ if (grid_width && grid_height) {
+ win_left = grid_left / grid_width;
+ win_top = grid_top / grid_height;
+ win_right = grid_right / grid_width;
+ win_bottom = grid_bottom / grid_height;
+ } else {
+ dev_err(&client->dev, "Incorrect exp grid.\n");
+ return -EINVAL;
+ }
+
+ clamp_t(int, win_left, 0, 4);
+ clamp_t(int, win_top, 0, 4);
+ clamp_t(int, win_right, 0, 4);
+ clamp_t(int, win_bottom, 0, 4);
+
+ ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING);
+ if (ret) {
+ dev_err(&client->dev, "write exp_average reg err.\n");
+ return ret;
+ }
+
+ for (i = win_top; i <= win_bottom; i++) {
+ for (j = win_left; j <= win_right; j++) {
+ exp_reg = mt9m114_exp_win[i][j];
+
+ ret = mt9m114_write_reg(client, exp_reg.length,
+ exp_reg.reg, exp_reg.val);
+ if (ret) {
+ dev_err(&client->dev, "write exp_reg err.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static int mt9m114_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ *val = mt9m114_res[dev->res].bin_factor_x;
+
+ return 0;
+}
+
+static int mt9m114_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ *val = mt9m114_res[dev->res].bin_factor_y;
+
+ return 0;
+}
+
+static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ s32 luma = 0x37;
+ int err;
+
+ /* EV value only support -2 to 2
+ * 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17
+ */
+ if (val < -2 || val > 2)
+ return -EINVAL;
+ luma += 0x10 * val;
+ dev_dbg(&c->dev, "%s val:%d luma:0x%x\n", __func__, val, luma);
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
+ if (err) {
+ dev_err(&c->dev, "%s logic addr access error\n", __func__);
+ return err;
+ }
+ err = mt9m114_write_reg(c, MISENSOR_8BIT, 0xC87A, (u32)luma);
+ if (err) {
+ dev_err(&c->dev, "%s write target_average_luma failed\n",
+ __func__);
+ return err;
+ }
+ udelay(10);
+
+ return 0;
+}
+
+static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int err;
+ u32 luma;
+
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A);
+ if (err) {
+ dev_err(&c->dev, "%s logic addr access error\n", __func__);
+ return err;
+ }
+ err = mt9m114_read_reg(c, MISENSOR_8BIT, 0xC87A, &luma);
+ if (err) {
+ dev_err(&c->dev, "%s read target_average_luma failed\n",
+ __func__);
+ return err;
+ }
+ luma -= 0x17;
+ luma /= 0x10;
+ *val = (s32)luma - 2;
+ dev_dbg(&c->dev, "%s val:%d\n", __func__, *val);
+
+ return 0;
+}
+
+/* Fake interface
+ * mt9m114 now can not support 3a_lock
+*/
+static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val)
+{
+ aaalock = val;
+ return 0;
+}
+
+static int mt9m114_g_3a_lock(struct v4l2_subdev *sd, s32 *val)
+{
+ if (aaalock)
+ return V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
+ | V4L2_LOCK_FOCUS;
+ return 0;
+}
+
+static int mt9m114_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114_device *dev =
+ container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = mt9m114_t_vflip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = mt9m114_t_hflip(&dev->sd, ctrl->val);
+ break;
+#ifndef CSS15
+ case V4L2_CID_EXPOSURE_METERING:
+ ret = mt9m114_s_exposure_metering(&dev->sd, ctrl->val);
+ break;
+#endif
+ case V4L2_CID_EXPOSURE:
+ ret = mt9m114_s_ev(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = mt9m114_s_3a_lock(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int mt9m114_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mt9m114_device *dev =
+ container_of(ctrl->handler, struct mt9m114_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ ret = mt9m114_g_vflip(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ret = mt9m114_g_hflip(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = mt9m114_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = mt9m114_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = mt9m114_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = mt9m114_g_exposure(&dev->sd, &ctrl->val);
+ break;
+#ifndef CSS15
+ case V4L2_CID_EXPOSURE_ZONE_NUM:
+ ret = mt9m114_g_exposure_zone_num(&dev->sd, &ctrl->val);
+ break;
+#endif
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = mt9m114_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = mt9m114_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = mt9m114_g_ev(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_3A_LOCK:
+ ret = mt9m114_g_3a_lock(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = mt9m114_s_ctrl,
+ .g_volatile_ctrl = mt9m114_g_volatile_ctrl
+};
+
+static struct v4l2_ctrl_config mt9m114_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .name = "Image v-Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .name = "Image h-Flip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .name = "focal length",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .max = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .step = 1,
+ .def = MT9M114_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .name = "f-number",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_F_NUMBER_DEFAULT,
+ .max = MT9M114_F_NUMBER_DEFAULT,
+ .step = 1,
+ .def = MT9M114_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .name = "f-number range",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = MT9M114_F_NUMBER_RANGE,
+ .max = MT9M114_F_NUMBER_RANGE,
+ .step = 1,
+ .def = MT9M114_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .name = "exposure",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+#ifndef CSS15
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ZONE_NUM,
+ .name = "one-time exposure zone number",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0xffff,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_METERING,
+ .name = "metering",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 3,
+ .step = 1,
+ .def = 1,
+ .flags = 0,
+ },
+#endif
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .name = "horizontal binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = MT9M114_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .name = "vertical binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = MT9M114_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE,
+ .name = "exposure biasx",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = -2,
+ .max = 2,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_3A_LOCK,
+ .name = "3a lock",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u32 retvalue;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "%s: i2c error", __func__);
+ return -ENODEV;
+ }
+ mt9m114_read_reg(client, MISENSOR_16BIT, (u32)MT9M114_PID, &retvalue);
+ dev->real_model_id = retvalue;
+
+ if (retvalue != MT9M114_MOD_ID) {
+ dev_err(&client->dev, "%s: failed: client->addr = %x\n",
+ __func__, client->addr);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (NULL == platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ v4l2_err(client, "mt9m114 platform init err\n");
+ return ret;
+ }
+ }
+ ret = power_up(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 power-up err");
+ return ret;
+ }
+
+ /* config & detect sensor */
+ ret = mt9m114_detect(dev, client);
+ if (ret) {
+ v4l2_err(client, "mt9m114_detect err s_config.\n");
+ goto fail_detect;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ ret = mt9m114_set_suspend(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 suspend err");
+ return ret;
+ }
+
+ ret = power_down(sd);
+ if (ret) {
+ v4l2_err(client, "mt9m114 power down err");
+ return ret;
+ }
+
+ return ret;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_detect:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+ return ret;
+}
+
+/* Horizontal flip the image. */
+static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ int err;
+ /* set for direct mode */
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
+ if (value) {
+ /* enable H flip ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x01);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x01);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_HFLIP_MASK, MISENSOR_FLIP_EN);
+
+ dev->bpat = MT9M114_BPAT_GRGRBGBG;
+ } else {
+ /* disable H flip ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x00);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x00);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_HFLIP_MASK, MISENSOR_FLIP_DIS);
+
+ dev->bpat = MT9M114_BPAT_BGBGGRGR;
+ }
+
+ err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
+ udelay(10);
+
+ return !!err;
+}
+
+/* Vertically flip the image */
+static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value)
+{
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ int err;
+ /* set for direct mode */
+ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850);
+ if (value >= 1) {
+ /* enable H flip - ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x01);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x01);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x01);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_VFLIP_MASK, MISENSOR_FLIP_EN);
+ } else {
+ /* disable H flip - ctx A */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x00);
+ /* ctx B */
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x00);
+ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x00);
+
+ err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE,
+ MISENSOR_VFLIP_MASK, MISENSOR_FLIP_DIS);
+ }
+
+ err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06);
+ udelay(10);
+
+ return !!err;
+}
+static int mt9m114_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ return 0;
+}
+
+static int mt9m114_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = mt9m114_res[dev->res].fps;
+
+ return 0;
+}
+
+static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret;
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+ struct mt9m114_device *dev = to_mt9m114_sensor(sd);
+ struct atomisp_exposure exposure;
+
+ if (enable) {
+ ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg,
+ POST_POLLING);
+ if (ret < 0)
+ return ret;
+
+ if (dev->first_exp > MT9M114_MAX_FIRST_EXP) {
+ exposure.integration_time[0] = dev->first_exp;
+ exposure.gain[0] = dev->first_gain;
+ exposure.gain[1] = dev->first_diggain;
+ mt9m114_s_exposure(sd, &exposure);
+ }
+ dev->streamon = 1;
+
+ } else {
+ dev->streamon = 0;
+ ret = mt9m114_set_suspend(sd);
+ }
+
+ return ret;
+}
+
+static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int mt9m114_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+
+ unsigned int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = mt9m114_res[index].width;
+ fse->min_height = mt9m114_res[index].height;
+ fse->max_width = mt9m114_res[index].width;
+ fse->max_height = mt9m114_res[index].height;
+
+ return 0;
+}
+
+static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ int index;
+ struct mt9m114_device *snr = to_mt9m114_sensor(sd);
+
+ if (frames == NULL)
+ return -EINVAL;
+
+ for (index = 0; index < N_RES; index++) {
+ if (mt9m114_res[index].res == snr->res)
+ break;
+ }
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ *frames = mt9m114_res[index].skip_frames;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9m114_video_ops = {
+ .s_parm = mt9m114_s_parm,
+ .s_stream = mt9m114_s_stream,
+ .g_frame_interval = mt9m114_g_frame_interval,
+};
+
+static struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = {
+ .g_skip_frames = mt9m114_g_skip_frames,
+};
+
+static const struct v4l2_subdev_core_ops mt9m114_core_ops = {
+ .s_power = mt9m114_s_power,
+ .ioctl = mt9m114_ioctl,
+};
+
+/* REVISIT: Do we need pad operations? */
+static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = {
+ .enum_mbus_code = mt9m114_enum_mbus_code,
+ .enum_frame_size = mt9m114_enum_frame_size,
+ .get_fmt = mt9m114_get_fmt,
+ .set_fmt = mt9m114_set_fmt,
+#ifndef CSS15
+ .set_selection = mt9m114_s_exposure_selection,
+#endif
+};
+
+static const struct v4l2_subdev_ops mt9m114_ops = {
+ .core = &mt9m114_core_ops,
+ .video = &mt9m114_video_ops,
+ .pad = &mt9m114_pad_ops,
+ .sensor = &mt9m114_sensor_ops,
+};
+
+static const struct media_entity_operations mt9m114_entity_ops = {
+ .link_setup = NULL,
+};
+
+static int mt9m114_remove(struct i2c_client *client)
+{
+ struct mt9m114_device *dev;
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ dev = container_of(sd, struct mt9m114_device, sd);
+ dev->platform_data->csi_cfg(sd, 0);
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+ return 0;
+}
+
+static int mt9m114_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mt9m114_device *dev;
+ int ret = 0;
+ unsigned int i;
+ void *pdata;
+
+ /* Setup sensor configuration structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
+ pdata = client->dev.platform_data;
+ if (ACPI_COMPANION(&client->dev))
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+ if (pdata)
+ ret = mt9m114_s_config(&dev->sd, client->irq, pdata);
+ if (!pdata || ret) {
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+ }
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret) {
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ /* Coverity CID 298095 - return on error */
+ return ret;
+ }
+
+ /*TODO add format code here*/
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(mt9m114_controls));
+ if (ret) {
+ mt9m114_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mt9m114_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &mt9m114_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ mt9m114_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ /* REVISIT: Do we need media controller? */
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret) {
+ mt9m114_remove(client);
+ return ret;
+ }
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(i2c, mt9m114_id);
+
+static struct acpi_device_id mt9m114_acpi_match[] = {
+ { "INT33F0" },
+ { "CRMT1040" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
+
+static struct i2c_driver mt9m114_driver = {
+ .driver = {
+ .name = "mt9m114",
+ .acpi_match_table = ACPI_PTR(mt9m114_acpi_match),
+ },
+ .probe = mt9m114_probe,
+ .remove = mt9m114_remove,
+ .id_table = mt9m114_id,
+};
+
+static __init int init_mt9m114(void)
+{
+ return i2c_add_driver(&mt9m114_driver);
+}
+
+static __exit void exit_mt9m114(void)
+{
+ i2c_del_driver(&mt9m114_driver);
+}
+
+module_init(init_mt9m114);
+module_exit(exit_mt9m114);
+
+MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
new file mode 100644
index 000000000000..5e7d79d2e01b
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -0,0 +1,1786 @@
+/*
+ * Support for mt9m114 Camera Sensor.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __A1040_H__
+#define __A1040_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include "../include/linux/atomisp_platform.h"
+#include "../include/linux/atomisp.h"
+
+#define V4L2_IDENT_MT9M114 8245
+
+#define MT9P111_REV3
+#define FULLINISUPPORT
+
+/* #defines for register writes and register array processing */
+#define MISENSOR_8BIT 1
+#define MISENSOR_16BIT 2
+#define MISENSOR_32BIT 4
+
+#define MISENSOR_FWBURST0 0x80
+#define MISENSOR_FWBURST1 0x81
+#define MISENSOR_FWBURST4 0x84
+#define MISENSOR_FWBURST 0x88
+
+#define MISENSOR_TOK_TERM 0xf000 /* terminating token for reg list */
+#define MISENSOR_TOK_DELAY 0xfe00 /* delay token for reg list */
+#define MISENSOR_TOK_FWLOAD 0xfd00 /* token indicating load FW */
+#define MISENSOR_TOK_POLL 0xfc00 /* token indicating poll instruction */
+#define MISENSOR_TOK_RMW 0x0010 /* RMW operation */
+#define MISENSOR_TOK_MASK 0xfff0
+#define MISENSOR_AWB_STEADY (1<<0) /* awb steady */
+#define MISENSOR_AE_READY (1<<3) /* ae status ready */
+
+/* mask to set sensor read_mode via misensor_rmw_reg */
+#define MISENSOR_R_MODE_MASK 0x0330
+/* mask to set sensor vert_flip and horz_mirror */
+#define MISENSOR_VFLIP_MASK 0x0002
+#define MISENSOR_HFLIP_MASK 0x0001
+#define MISENSOR_FLIP_EN 1
+#define MISENSOR_FLIP_DIS 0
+
+/* bits set to set sensor read_mode via misensor_rmw_reg */
+#define MISENSOR_SKIPPING_SET 0x0011
+#define MISENSOR_SUMMING_SET 0x0033
+#define MISENSOR_NORMAL_SET 0x0000
+
+/* sensor register that control sensor read-mode and mirror */
+#define MISENSOR_READ_MODE 0xC834
+/* sensor ae-track status register */
+#define MISENSOR_AE_TRACK_STATUS 0xA800
+/* sensor awb status register */
+#define MISENSOR_AWB_STATUS 0xAC00
+/* sensor coarse integration time register */
+#define MISENSOR_COARSE_INTEGRATION_TIME 0xC83C
+
+/* registers */
+#define REG_SW_RESET 0x301A
+#define REG_SW_STREAM 0xDC00
+#define REG_SCCB_CTRL 0x3100
+#define REG_SC_CMMN_CHIP_ID 0x0000
+#define REG_V_START 0xc800 /* 16bits */
+#define REG_H_START 0xc802 /* 16bits */
+#define REG_V_END 0xc804 /* 16bits */
+#define REG_H_END 0xc806 /* 16bits */
+#define REG_PIXEL_CLK 0xc808 /* 32bits */
+#define REG_TIMING_VTS 0xc812 /* 16bits */
+#define REG_TIMING_HTS 0xc814 /* 16bits */
+#define REG_WIDTH 0xC868 /* 16bits */
+#define REG_HEIGHT 0xC86A /* 16bits */
+#define REG_EXPO_COARSE 0x3012 /* 16bits */
+#define REG_EXPO_FINE 0x3014 /* 16bits */
+#define REG_GAIN 0x305E
+#define REG_ANALOGGAIN 0x305F
+#define REG_ADDR_ACESSS 0x098E /* logical_address_access */
+#define REG_COMM_Register 0x0080 /* command_register */
+
+#define SENSOR_DETECTED 1
+#define SENSOR_NOT_DETECTED 0
+
+#define I2C_RETRY_COUNT 5
+#define MSG_LEN_OFFSET 2
+
+#ifndef MIPI_CONTROL
+#define MIPI_CONTROL 0x3400 /* MIPI_Control */
+#endif
+
+/* GPIO pin on Moorestown */
+#define GPIO_SCLK_25 44
+#define GPIO_STB_PIN 47
+
+#define GPIO_STDBY_PIN 49 /* ab:new */
+#define GPIO_RESET_PIN 50
+
+/* System control register for Aptina A-1040SOC*/
+#define MT9M114_PID 0x0
+
+/* MT9P111_DEVICE_ID */
+#define MT9M114_MOD_ID 0x2481
+
+#define MT9M114_FINE_INTG_TIME_MIN 0
+#define MT9M114_FINE_INTG_TIME_MAX_MARGIN 0
+#define MT9M114_COARSE_INTG_TIME_MIN 1
+#define MT9M114_COARSE_INTG_TIME_MAX_MARGIN 6
+
+
+/* ulBPat; */
+
+#define MT9M114_BPAT_RGRGGBGB (1 << 0)
+#define MT9M114_BPAT_GRGRBGBG (1 << 1)
+#define MT9M114_BPAT_GBGBRGRG (1 << 2)
+#define MT9M114_BPAT_BGBGGRGR (1 << 3)
+
+#define MT9M114_FOCAL_LENGTH_NUM 208 /*2.08mm*/
+#define MT9M114_FOCAL_LENGTH_DEM 100
+#define MT9M114_F_NUMBER_DEFAULT_NUM 24
+#define MT9M114_F_NUMBER_DEM 10
+#define MT9M114_WAIT_STAT_TIMEOUT 100
+#define MT9M114_FLICKER_MODE_50HZ 1
+#define MT9M114_FLICKER_MODE_60HZ 2
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define MT9M114_FOCAL_LENGTH_DEFAULT 0xD00064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define MT9M114_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define MT9M114_F_NUMBER_RANGE 0x180a180a
+
+/* Supported resolutions */
+enum {
+ MT9M114_RES_736P,
+ MT9M114_RES_864P,
+ MT9M114_RES_960P,
+};
+#define MT9M114_RES_960P_SIZE_H 1296
+#define MT9M114_RES_960P_SIZE_V 976
+#define MT9M114_RES_720P_SIZE_H 1280
+#define MT9M114_RES_720P_SIZE_V 720
+#define MT9M114_RES_576P_SIZE_H 1024
+#define MT9M114_RES_576P_SIZE_V 576
+#define MT9M114_RES_480P_SIZE_H 768
+#define MT9M114_RES_480P_SIZE_V 480
+#define MT9M114_RES_VGA_SIZE_H 640
+#define MT9M114_RES_VGA_SIZE_V 480
+#define MT9M114_RES_QVGA_SIZE_H 320
+#define MT9M114_RES_QVGA_SIZE_V 240
+#define MT9M114_RES_QCIF_SIZE_H 176
+#define MT9M114_RES_QCIF_SIZE_V 144
+
+#define MT9M114_RES_720_480p_768_SIZE_H 736
+#define MT9M114_RES_720_480p_768_SIZE_V 496
+#define MT9M114_RES_736P_SIZE_H 1296
+#define MT9M114_RES_736P_SIZE_V 736
+#define MT9M114_RES_864P_SIZE_H 1296
+#define MT9M114_RES_864P_SIZE_V 864
+#define MT9M114_RES_976P_SIZE_H 1296
+#define MT9M114_RES_976P_SIZE_V 976
+
+#define MT9M114_BIN_FACTOR_MAX 3
+
+#define MT9M114_DEFAULT_FIRST_EXP 0x10
+#define MT9M114_MAX_FIRST_EXP 0x302
+
+/* completion status polling requirements, usage based on Aptina .INI Rev2 */
+enum poll_reg {
+ NO_POLLING,
+ PRE_POLLING,
+ POST_POLLING,
+};
+/*
+ * struct misensor_reg - MI sensor register format
+ * @length: length of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ * Define a structure for sensor register initialization values
+ */
+struct misensor_reg {
+ u32 length;
+ u32 reg;
+ u32 val; /* value or for read/mod/write, AND mask */
+ u32 val2; /* optional; for rmw, OR mask */
+};
+
+/*
+ * struct misensor_fwreg - Firmware burst command
+ * @type: FW burst or 8/16 bit register
+ * @addr: 16-bit offset to register or other values depending on type
+ * @valx: data value for burst (or other commands)
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct misensor_fwreg {
+ u32 type; /* type of value, register or FW burst string */
+ u32 addr; /* target address */
+ u32 val0;
+ u32 val1;
+ u32 val2;
+ u32 val3;
+ u32 val4;
+ u32 val5;
+ u32 val6;
+ u32 val7;
+};
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct mt9m114_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct camera_sensor_platform_data *platform_data;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ struct v4l2_ctrl_handler ctrl_handler;
+ int real_model_id;
+ int nctx;
+ int power;
+
+ unsigned int bus_width;
+ unsigned int mode;
+ unsigned int field_inv;
+ unsigned int field_sel;
+ unsigned int ycseq;
+ unsigned int conv422;
+ unsigned int bpat;
+ unsigned int hpol;
+ unsigned int vpol;
+ unsigned int edge;
+ unsigned int bls;
+ unsigned int gamma;
+ unsigned int cconv;
+ unsigned int res;
+ unsigned int dwn_sz;
+ unsigned int blc;
+ unsigned int agc;
+ unsigned int awb;
+ unsigned int aec;
+ /* extention SENSOR version 2 */
+ unsigned int cie_profile;
+
+ /* extention SENSOR version 3 */
+ unsigned int flicker_freq;
+
+ /* extension SENSOR version 4 */
+ unsigned int smia_mode;
+ unsigned int mipi_mode;
+
+ /* Add name here to load shared library */
+ unsigned int type;
+
+ /*Number of MIPI lanes*/
+ unsigned int mipi_lanes;
+ /*WA for low light AE*/
+ unsigned int first_exp;
+ unsigned int first_gain;
+ unsigned int first_diggain;
+ char name[32];
+
+ u8 lightfreq;
+ u8 streamon;
+};
+
+struct mt9m114_format_struct {
+ u8 *desc;
+ u32 pixelformat;
+ struct regval_list *regs;
+};
+
+struct mt9m114_res_struct {
+ u8 *desc;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int skip_frames;
+ bool used;
+ struct regval_list *regs;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+};
+
+/* 2 bytes used for address: 256 bytes total */
+#define MT9M114_MAX_WRITE_BUF_SIZE 254
+struct mt9m114_write_buffer {
+ u16 addr;
+ u8 data[MT9M114_MAX_WRITE_BUF_SIZE];
+};
+
+struct mt9m114_write_ctrl {
+ int index;
+ struct mt9m114_write_buffer buffer;
+};
+
+/*
+ * Modes supported by the mt9m114 driver.
+ * Please, keep them in ascending order.
+ */
+static struct mt9m114_res_struct mt9m114_res[] = {
+ {
+ .desc = "720P",
+ .res = MT9M114_RES_736P,
+ .width = 1296,
+ .height = 736,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0640,
+ .lines_per_frame = 0x0307,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+ {
+ .desc = "848P",
+ .res = MT9M114_RES_864P,
+ .width = 1296,
+ .height = 864,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0640,
+ .lines_per_frame = 0x03E8,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+ {
+ .desc = "960P",
+ .res = MT9M114_RES_960P,
+ .width = 1296,
+ .height = 976,
+ .fps = 30,
+ .used = false,
+ .regs = NULL,
+ .skip_frames = 1,
+
+ .pixels_per_line = 0x0644, /* consistent with regs arrays */
+ .lines_per_frame = 0x03E5, /* consistent with regs arrays */
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ },
+};
+#define N_RES (ARRAY_SIZE(mt9m114_res))
+
+static const struct i2c_device_id mt9m114_id[] = {
+ {"mt9m114", 0},
+ {}
+};
+
+static struct misensor_reg const mt9m114_exitstandby[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ /* exit-standby */
+ {MISENSOR_8BIT, 0xDC00, 0x54},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_exp_win[5][5] = {
+ {
+ {MISENSOR_8BIT, 0xA407, 0x64},
+ {MISENSOR_8BIT, 0xA408, 0x64},
+ {MISENSOR_8BIT, 0xA409, 0x64},
+ {MISENSOR_8BIT, 0xA40A, 0x64},
+ {MISENSOR_8BIT, 0xA40B, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA40C, 0x64},
+ {MISENSOR_8BIT, 0xA40D, 0x64},
+ {MISENSOR_8BIT, 0xA40E, 0x64},
+ {MISENSOR_8BIT, 0xA40F, 0x64},
+ {MISENSOR_8BIT, 0xA410, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA411, 0x64},
+ {MISENSOR_8BIT, 0xA412, 0x64},
+ {MISENSOR_8BIT, 0xA413, 0x64},
+ {MISENSOR_8BIT, 0xA414, 0x64},
+ {MISENSOR_8BIT, 0xA415, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA416, 0x64},
+ {MISENSOR_8BIT, 0xA417, 0x64},
+ {MISENSOR_8BIT, 0xA418, 0x64},
+ {MISENSOR_8BIT, 0xA419, 0x64},
+ {MISENSOR_8BIT, 0xA41A, 0x64},
+ },
+ {
+ {MISENSOR_8BIT, 0xA41B, 0x64},
+ {MISENSOR_8BIT, 0xA41C, 0x64},
+ {MISENSOR_8BIT, 0xA41D, 0x64},
+ {MISENSOR_8BIT, 0xA41E, 0x64},
+ {MISENSOR_8BIT, 0xA41F, 0x64},
+ },
+};
+
+static struct misensor_reg const mt9m114_exp_average[] = {
+ {MISENSOR_8BIT, 0xA407, 0x00},
+ {MISENSOR_8BIT, 0xA408, 0x00},
+ {MISENSOR_8BIT, 0xA409, 0x00},
+ {MISENSOR_8BIT, 0xA40A, 0x00},
+ {MISENSOR_8BIT, 0xA40B, 0x00},
+ {MISENSOR_8BIT, 0xA40C, 0x00},
+ {MISENSOR_8BIT, 0xA40D, 0x00},
+ {MISENSOR_8BIT, 0xA40E, 0x00},
+ {MISENSOR_8BIT, 0xA40F, 0x00},
+ {MISENSOR_8BIT, 0xA410, 0x00},
+ {MISENSOR_8BIT, 0xA411, 0x00},
+ {MISENSOR_8BIT, 0xA412, 0x00},
+ {MISENSOR_8BIT, 0xA413, 0x00},
+ {MISENSOR_8BIT, 0xA414, 0x00},
+ {MISENSOR_8BIT, 0xA415, 0x00},
+ {MISENSOR_8BIT, 0xA416, 0x00},
+ {MISENSOR_8BIT, 0xA417, 0x00},
+ {MISENSOR_8BIT, 0xA418, 0x00},
+ {MISENSOR_8BIT, 0xA419, 0x00},
+ {MISENSOR_8BIT, 0xA41A, 0x00},
+ {MISENSOR_8BIT, 0xA41B, 0x00},
+ {MISENSOR_8BIT, 0xA41C, 0x00},
+ {MISENSOR_8BIT, 0xA41D, 0x00},
+ {MISENSOR_8BIT, 0xA41E, 0x00},
+ {MISENSOR_8BIT, 0xA41F, 0x00},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_exp_center[] = {
+ {MISENSOR_8BIT, 0xA407, 0x19},
+ {MISENSOR_8BIT, 0xA408, 0x19},
+ {MISENSOR_8BIT, 0xA409, 0x19},
+ {MISENSOR_8BIT, 0xA40A, 0x19},
+ {MISENSOR_8BIT, 0xA40B, 0x19},
+ {MISENSOR_8BIT, 0xA40C, 0x19},
+ {MISENSOR_8BIT, 0xA40D, 0x4B},
+ {MISENSOR_8BIT, 0xA40E, 0x4B},
+ {MISENSOR_8BIT, 0xA40F, 0x4B},
+ {MISENSOR_8BIT, 0xA410, 0x19},
+ {MISENSOR_8BIT, 0xA411, 0x19},
+ {MISENSOR_8BIT, 0xA412, 0x4B},
+ {MISENSOR_8BIT, 0xA413, 0x64},
+ {MISENSOR_8BIT, 0xA414, 0x4B},
+ {MISENSOR_8BIT, 0xA415, 0x19},
+ {MISENSOR_8BIT, 0xA416, 0x19},
+ {MISENSOR_8BIT, 0xA417, 0x4B},
+ {MISENSOR_8BIT, 0xA418, 0x4B},
+ {MISENSOR_8BIT, 0xA419, 0x4B},
+ {MISENSOR_8BIT, 0xA41A, 0x19},
+ {MISENSOR_8BIT, 0xA41B, 0x19},
+ {MISENSOR_8BIT, 0xA41C, 0x19},
+ {MISENSOR_8BIT, 0xA41D, 0x19},
+ {MISENSOR_8BIT, 0xA41E, 0x19},
+ {MISENSOR_8BIT, 0xA41F, 0x19},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_suspend[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x40},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_streaming[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x34},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_standby_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x50},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_wakeup_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x54},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_chgstat_reg[] = {
+ {MISENSOR_16BIT, 0x098E, 0xDC00},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x976_30fps] - Intel */
+static struct misensor_reg const mt9m114_960P_init[] = {
+ {MISENSOR_16BIT, 0x098E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 971 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1291 */
+ {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_16BIT, 0xC80A, 0x6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* cam_sensor_cfg_frame_length_lines = 1006 */
+ {MISENSOR_16BIT, 0xC812, 0x03F6},
+ /* cam_sensor_cfg_line_length_pck = 1590 */
+ {MISENSOR_16BIT, 0xC814, 0x063E},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 963 */
+ {MISENSOR_16BIT, 0xC818, 0x03C3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1280 */
+ {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 960 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1280 */
+ {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 960 */
+ {MISENSOR_TOK_TERM, 0, 0},
+};
+
+/* [1296x976_30fps_768Mbps] */
+static struct misensor_reg const mt9m114_976P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x03CF}, /* cam_sensor_cfg_y_addr_end = 975 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},/* cam_sensor_cfg_pixclk = 480000*/
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 1006 */
+ {MISENSOR_16BIT, 0xC812, 0x03E5},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1590 */
+ {MISENSOR_16BIT, 0xC814, 0x0644},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 963 */
+ {MISENSOR_16BIT, 0xC818, 0x03C3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x03C8}, /* cam_crop_window_height = 968 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x03C8}, /* cam_output_height = 968 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x864_30fps] */
+static struct misensor_reg const mt9m114_864P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0038}, /* cam_sensor_cfg_y_addr_start = 56 */
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x0397}, /* cam_sensor_cfg_y_addr_end = 919 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* cam_sensor_cfg_fine_integ_time_max = 1469 */
+ {MISENSOR_16BIT, 0xC810, 0x05BD},
+ /* cam_sensor_cfg_frame_length_lines = 1000 */
+ {MISENSOR_16BIT, 0xC812, 0x03E8},
+ /* cam_sensor_cfg_line_length_pck = 1600 */
+ {MISENSOR_16BIT, 0xC814, 0x0640},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 859 */
+ {MISENSOR_16BIT, 0xC818, 0x035B},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x0358}, /* cam_crop_window_height = 856 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x0358}, /* cam_output_height = 856 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [1296x736_30fps] */
+static struct misensor_reg const mt9m114_736P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x011F}, /* cam_sysctl_pll_divider_m_n = 287 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0078}, /* cam_sensor_cfg_y_addr_start = 120*/
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 0 */
+ {MISENSOR_16BIT, 0xC804, 0x0357}, /* cam_sensor_cfg_y_addr_end = 855 */
+ {MISENSOR_16BIT, 0xC806, 0x050F}, /* cam_sensor_cfg_x_addr_end = 1295 */
+ {MISENSOR_32BIT, 0xC808, 0x237A07F}, /* cam_sensor_cfg_pixclk=37199999*/
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1469 */
+ {MISENSOR_16BIT, 0xC810, 0x05BD},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 775 */
+ {MISENSOR_16BIT, 0xC812, 0x0307},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1600 */
+ {MISENSOR_16BIT, 0xC814, 0x0640},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ /* cam_sensor_cfg_cpipe_last_row = 731 */
+ {MISENSOR_16BIT, 0xC818, 0x02DB},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0508}, /* cam_crop_window_width = 1288 */
+ {MISENSOR_16BIT, 0xC85A, 0x02D8}, /* cam_crop_window_height = 728 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0508}, /* cam_output_width = 1288 */
+ {MISENSOR_16BIT, 0xC86A, 0x02D8}, /* cam_output_height = 728 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+/* [736x496_30fps_768Mbps] */
+static struct misensor_reg const mt9m114_720_480P_init[] = {
+ {MISENSOR_16BIT, 0x98E, 0x1000},
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x00F0}, /* cam_sensor_cfg_y_addr_start = 240*/
+ {MISENSOR_16BIT, 0xC802, 0x0118}, /* cam_sensor_cfg_x_addr_start = 280*/
+ {MISENSOR_16BIT, 0xC804, 0x02DF}, /* cam_sensor_cfg_y_addr_end = 735 */
+ {MISENSOR_16BIT, 0xC806, 0x03F7}, /* cam_sensor_cfg_x_addr_end = 1015 */
+ /* cam_sensor_cfg_pixclk = 48000000 */
+ {MISENSOR_32BIT, 0xC808, 0x2DC6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x00DB},
+ /* 0x062E //cam_sensor_cfg_fine_integ_time_max = 1459 */
+ {MISENSOR_16BIT, 0xC810, 0x05B3},
+ /* 0x074C //cam_sensor_cfg_frame_length_lines = 997 */
+ {MISENSOR_16BIT, 0xC812, 0x03E5},
+ /* 0x06B1 /cam_sensor_cfg_line_length_pck = 1604 */
+ {MISENSOR_16BIT, 0xC814, 0x0644},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x0060},
+ {MISENSOR_16BIT, 0xC818, 0x03C3}, /* cam_sensor_cfg_cpipe_last_row=963*/
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0000}, /* cam_sensor_control_read_mode = 0*/
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x02D8}, /* cam_crop_window_width = 728 */
+ {MISENSOR_16BIT, 0xC85A, 0x01E8}, /* cam_crop_window_height = 488 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x02D8}, /* cam_output_width = 728 */
+ {MISENSOR_16BIT, 0xC86A, 0x01E8}, /* cam_output_height = 488 */
+ {MISENSOR_8BIT, 0xC878, 0x00}, /* 0x0E //cam_aet_aemode = 0 */
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_common[] = {
+ /* reset */
+ {MISENSOR_16BIT, 0x301A, 0x0234},
+ /* LOAD = Step2-PLL_Timing //PLL and Timing */
+ {MISENSOR_16BIT, 0x098E, 0x1000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_8BIT, 0xC97E, 0x01}, /* cam_sysctl_pll_enable = 1 */
+ {MISENSOR_16BIT, 0xC980, 0x0128}, /* cam_sysctl_pll_divider_m_n = 276 */
+ {MISENSOR_16BIT, 0xC982, 0x0700}, /* cam_sysctl_pll_divider_p = 1792 */
+ {MISENSOR_16BIT, 0xC800, 0x0000}, /* cam_sensor_cfg_y_addr_start = 216*/
+ {MISENSOR_16BIT, 0xC802, 0x0000}, /* cam_sensor_cfg_x_addr_start = 168*/
+ {MISENSOR_16BIT, 0xC804, 0x03CD}, /* cam_sensor_cfg_y_addr_end = 761 */
+ {MISENSOR_16BIT, 0xC806, 0x050D}, /* cam_sensor_cfg_x_addr_end = 1127 */
+ {MISENSOR_16BIT, 0xC808, 0x02DC}, /* cam_sensor_cfg_pixclk = 24000000 */
+ {MISENSOR_16BIT, 0xC80A, 0x6C00},
+ {MISENSOR_16BIT, 0xC80C, 0x0001}, /* cam_sensor_cfg_row_speed = 1 */
+ /* cam_sensor_cfg_fine_integ_time_min = 219 */
+ {MISENSOR_16BIT, 0xC80E, 0x01C3},
+ /* cam_sensor_cfg_fine_integ_time_max = 1149 */
+ {MISENSOR_16BIT, 0xC810, 0x03F7},
+ /* cam_sensor_cfg_frame_length_lines = 625 */
+ {MISENSOR_16BIT, 0xC812, 0x0500},
+ /* cam_sensor_cfg_line_length_pck = 1280 */
+ {MISENSOR_16BIT, 0xC814, 0x04E2},
+ /* cam_sensor_cfg_fine_correction = 96 */
+ {MISENSOR_16BIT, 0xC816, 0x00E0},
+ /* cam_sensor_cfg_cpipe_last_row = 541 */
+ {MISENSOR_16BIT, 0xC818, 0x01E3},
+ {MISENSOR_16BIT, 0xC826, 0x0020}, /* cam_sensor_cfg_reg_0_data = 32 */
+ {MISENSOR_16BIT, 0xC834, 0x0330}, /* cam_sensor_control_read_mode = 0 */
+ {MISENSOR_16BIT, 0xC854, 0x0000}, /* cam_crop_window_xoffset = 0 */
+ {MISENSOR_16BIT, 0xC856, 0x0000}, /* cam_crop_window_yoffset = 0 */
+ {MISENSOR_16BIT, 0xC858, 0x0280}, /* cam_crop_window_width = 952 */
+ {MISENSOR_16BIT, 0xC85A, 0x01E0}, /* cam_crop_window_height = 538 */
+ {MISENSOR_8BIT, 0xC85C, 0x03}, /* cam_crop_cropmode = 3 */
+ {MISENSOR_16BIT, 0xC868, 0x0280}, /* cam_output_width = 952 */
+ {MISENSOR_16BIT, 0xC86A, 0x01E0}, /* cam_output_height = 538 */
+ /* LOAD = Step3-Recommended
+ * Patch,Errata and Sensor optimization Setting */
+ {MISENSOR_16BIT, 0x316A, 0x8270}, /* DAC_TXLO_ROW */
+ {MISENSOR_16BIT, 0x316C, 0x8270}, /* DAC_TXLO */
+ {MISENSOR_16BIT, 0x3ED0, 0x2305}, /* DAC_LD_4_5 */
+ {MISENSOR_16BIT, 0x3ED2, 0x77CF}, /* DAC_LD_6_7 */
+ {MISENSOR_16BIT, 0x316E, 0x8202}, /* DAC_ECL */
+ {MISENSOR_16BIT, 0x3180, 0x87FF}, /* DELTA_DK_CONTROL */
+ {MISENSOR_16BIT, 0x30D4, 0x6080}, /* COLUMN_CORRECTION */
+ {MISENSOR_16BIT, 0xA802, 0x0008}, /* AE_TRACK_MODE */
+ {MISENSOR_16BIT, 0x3E14, 0xFF39}, /* SAMP_COL_PUP2 */
+ {MISENSOR_16BIT, 0x31E0, 0x0003}, /* PIX_DEF_ID */
+ /* LOAD = Step8-Features //Ports, special features, etc. */
+ {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_16BIT, 0x001E, 0x0777}, /* PAD_SLEW */
+ {MISENSOR_16BIT, 0x098E, 0x0000}, /* LOGICAL_ADDRESS_ACCESS */
+ {MISENSOR_16BIT, 0xC984, 0x8001}, /* CAM_PORT_OUTPUT_CONTROL */
+ {MISENSOR_16BIT, 0xC988, 0x0F00}, /* CAM_PORT_MIPI_TIMING_T_HS_ZERO */
+ /* CAM_PORT_MIPI_TIMING_T_HS_EXIT_HS_TRAIL */
+ {MISENSOR_16BIT, 0xC98A, 0x0B07},
+ /* CAM_PORT_MIPI_TIMING_T_CLK_POST_CLK_PRE */
+ {MISENSOR_16BIT, 0xC98C, 0x0D01},
+ /* CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_CLK_ZERO */
+ {MISENSOR_16BIT, 0xC98E, 0x071D},
+ {MISENSOR_16BIT, 0xC990, 0x0006}, /* CAM_PORT_MIPI_TIMING_T_LPX */
+ {MISENSOR_16BIT, 0xC992, 0x0A0C}, /* CAM_PORT_MIPI_TIMING_INIT_TIMING */
+ {MISENSOR_16BIT, 0x3C5A, 0x0009}, /* MIPI_DELAY_TRIM */
+ {MISENSOR_16BIT, 0xC86C, 0x0210}, /* CAM_OUTPUT_FORMAT */
+ {MISENSOR_16BIT, 0xA804, 0x0000}, /* AE_TRACK_ALGO */
+ /* default exposure */
+ {MISENSOR_16BIT, 0x3012, 0x0110}, /* COMMAND_REGISTER */
+ {MISENSOR_TOK_TERM, 0, 0},
+
+};
+
+static struct misensor_reg const mt9m114_antiflicker_50hz[] = {
+ {MISENSOR_16BIT, 0x098E, 0xC88B},
+ {MISENSOR_8BIT, 0xC88B, 0x32},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_antiflicker_60hz[] = {
+ {MISENSOR_16BIT, 0x098E, 0xC88B},
+ {MISENSOR_8BIT, 0xC88B, 0x3C},
+ {MISENSOR_8BIT, 0xDC00, 0x28},
+ {MISENSOR_16BIT, 0x0080, 0x8002},
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+static struct misensor_reg const mt9m114_iq[] = {
+ /* [Step3-Recommended] [Sensor optimization] */
+ {MISENSOR_16BIT, 0x316A, 0x8270},
+ {MISENSOR_16BIT, 0x316C, 0x8270},
+ {MISENSOR_16BIT, 0x3ED0, 0x2305},
+ {MISENSOR_16BIT, 0x3ED2, 0x77CF},
+ {MISENSOR_16BIT, 0x316E, 0x8202},
+ {MISENSOR_16BIT, 0x3180, 0x87FF},
+ {MISENSOR_16BIT, 0x30D4, 0x6080},
+ {MISENSOR_16BIT, 0xA802, 0x0008},
+
+ /* This register is from vender to avoid low light color noise */
+ {MISENSOR_16BIT, 0x31E0, 0x0001},
+
+ /* LOAD=Errata item 1 */
+ {MISENSOR_16BIT, 0x3E14, 0xFF39},
+
+ /* LOAD=Errata item 2 */
+ {MISENSOR_16BIT, 0x301A, 0x8234},
+
+ /*
+ * LOAD=Errata item 3
+ * LOAD=Patch 0202;
+ * Feature Recommended; Black level correction fix
+ */
+ {MISENSOR_16BIT, 0x0982, 0x0001},
+ {MISENSOR_16BIT, 0x098A, 0x5000},
+ {MISENSOR_16BIT, 0xD000, 0x70CF},
+ {MISENSOR_16BIT, 0xD002, 0xFFFF},
+ {MISENSOR_16BIT, 0xD004, 0xC5D4},
+ {MISENSOR_16BIT, 0xD006, 0x903A},
+ {MISENSOR_16BIT, 0xD008, 0x2144},
+ {MISENSOR_16BIT, 0xD00A, 0x0C00},
+ {MISENSOR_16BIT, 0xD00C, 0x2186},
+ {MISENSOR_16BIT, 0xD00E, 0x0FF3},
+ {MISENSOR_16BIT, 0xD010, 0xB844},
+ {MISENSOR_16BIT, 0xD012, 0xB948},
+ {MISENSOR_16BIT, 0xD014, 0xE082},
+ {MISENSOR_16BIT, 0xD016, 0x20CC},
+ {MISENSOR_16BIT, 0xD018, 0x80E2},
+ {MISENSOR_16BIT, 0xD01A, 0x21CC},
+ {MISENSOR_16BIT, 0xD01C, 0x80A2},
+ {MISENSOR_16BIT, 0xD01E, 0x21CC},
+ {MISENSOR_16BIT, 0xD020, 0x80E2},
+ {MISENSOR_16BIT, 0xD022, 0xF404},
+ {MISENSOR_16BIT, 0xD024, 0xD801},
+ {MISENSOR_16BIT, 0xD026, 0xF003},
+ {MISENSOR_16BIT, 0xD028, 0xD800},
+ {MISENSOR_16BIT, 0xD02A, 0x7EE0},
+ {MISENSOR_16BIT, 0xD02C, 0xC0F1},
+ {MISENSOR_16BIT, 0xD02E, 0x08BA},
+
+ {MISENSOR_16BIT, 0xD030, 0x0600},
+ {MISENSOR_16BIT, 0xD032, 0xC1A1},
+ {MISENSOR_16BIT, 0xD034, 0x76CF},
+ {MISENSOR_16BIT, 0xD036, 0xFFFF},
+ {MISENSOR_16BIT, 0xD038, 0xC130},
+ {MISENSOR_16BIT, 0xD03A, 0x6E04},
+ {MISENSOR_16BIT, 0xD03C, 0xC040},
+ {MISENSOR_16BIT, 0xD03E, 0x71CF},
+ {MISENSOR_16BIT, 0xD040, 0xFFFF},
+ {MISENSOR_16BIT, 0xD042, 0xC790},
+ {MISENSOR_16BIT, 0xD044, 0x8103},
+ {MISENSOR_16BIT, 0xD046, 0x77CF},
+ {MISENSOR_16BIT, 0xD048, 0xFFFF},
+ {MISENSOR_16BIT, 0xD04A, 0xC7C0},
+ {MISENSOR_16BIT, 0xD04C, 0xE001},
+ {MISENSOR_16BIT, 0xD04E, 0xA103},
+ {MISENSOR_16BIT, 0xD050, 0xD800},
+ {MISENSOR_16BIT, 0xD052, 0x0C6A},
+ {MISENSOR_16BIT, 0xD054, 0x04E0},
+ {MISENSOR_16BIT, 0xD056, 0xB89E},
+ {MISENSOR_16BIT, 0xD058, 0x7508},
+ {MISENSOR_16BIT, 0xD05A, 0x8E1C},
+ {MISENSOR_16BIT, 0xD05C, 0x0809},
+ {MISENSOR_16BIT, 0xD05E, 0x0191},
+
+ {MISENSOR_16BIT, 0xD060, 0xD801},
+ {MISENSOR_16BIT, 0xD062, 0xAE1D},
+ {MISENSOR_16BIT, 0xD064, 0xE580},
+ {MISENSOR_16BIT, 0xD066, 0x20CA},
+ {MISENSOR_16BIT, 0xD068, 0x0022},
+ {MISENSOR_16BIT, 0xD06A, 0x20CF},
+ {MISENSOR_16BIT, 0xD06C, 0x0522},
+ {MISENSOR_16BIT, 0xD06E, 0x0C5C},
+ {MISENSOR_16BIT, 0xD070, 0x04E2},
+ {MISENSOR_16BIT, 0xD072, 0x21CA},
+ {MISENSOR_16BIT, 0xD074, 0x0062},
+ {MISENSOR_16BIT, 0xD076, 0xE580},
+ {MISENSOR_16BIT, 0xD078, 0xD901},
+ {MISENSOR_16BIT, 0xD07A, 0x79C0},
+ {MISENSOR_16BIT, 0xD07C, 0xD800},
+ {MISENSOR_16BIT, 0xD07E, 0x0BE6},
+ {MISENSOR_16BIT, 0xD080, 0x04E0},
+ {MISENSOR_16BIT, 0xD082, 0xB89E},
+ {MISENSOR_16BIT, 0xD084, 0x70CF},
+ {MISENSOR_16BIT, 0xD086, 0xFFFF},
+ {MISENSOR_16BIT, 0xD088, 0xC8D4},
+ {MISENSOR_16BIT, 0xD08A, 0x9002},
+ {MISENSOR_16BIT, 0xD08C, 0x0857},
+ {MISENSOR_16BIT, 0xD08E, 0x025E},
+
+ {MISENSOR_16BIT, 0xD090, 0xFFDC},
+ {MISENSOR_16BIT, 0xD092, 0xE080},
+ {MISENSOR_16BIT, 0xD094, 0x25CC},
+ {MISENSOR_16BIT, 0xD096, 0x9022},
+ {MISENSOR_16BIT, 0xD098, 0xF225},
+ {MISENSOR_16BIT, 0xD09A, 0x1700},
+ {MISENSOR_16BIT, 0xD09C, 0x108A},
+ {MISENSOR_16BIT, 0xD09E, 0x73CF},
+ {MISENSOR_16BIT, 0xD0A0, 0xFF00},
+ {MISENSOR_16BIT, 0xD0A2, 0x3174},
+ {MISENSOR_16BIT, 0xD0A4, 0x9307},
+ {MISENSOR_16BIT, 0xD0A6, 0x2A04},
+ {MISENSOR_16BIT, 0xD0A8, 0x103E},
+ {MISENSOR_16BIT, 0xD0AA, 0x9328},
+ {MISENSOR_16BIT, 0xD0AC, 0x2942},
+ {MISENSOR_16BIT, 0xD0AE, 0x7140},
+ {MISENSOR_16BIT, 0xD0B0, 0x2A04},
+ {MISENSOR_16BIT, 0xD0B2, 0x107E},
+ {MISENSOR_16BIT, 0xD0B4, 0x9349},
+ {MISENSOR_16BIT, 0xD0B6, 0x2942},
+ {MISENSOR_16BIT, 0xD0B8, 0x7141},
+ {MISENSOR_16BIT, 0xD0BA, 0x2A04},
+ {MISENSOR_16BIT, 0xD0BC, 0x10BE},
+ {MISENSOR_16BIT, 0xD0BE, 0x934A},
+
+ {MISENSOR_16BIT, 0xD0C0, 0x2942},
+ {MISENSOR_16BIT, 0xD0C2, 0x714B},
+ {MISENSOR_16BIT, 0xD0C4, 0x2A04},
+ {MISENSOR_16BIT, 0xD0C6, 0x10BE},
+ {MISENSOR_16BIT, 0xD0C8, 0x130C},
+ {MISENSOR_16BIT, 0xD0CA, 0x010A},
+ {MISENSOR_16BIT, 0xD0CC, 0x2942},
+ {MISENSOR_16BIT, 0xD0CE, 0x7142},
+ {MISENSOR_16BIT, 0xD0D0, 0x2250},
+ {MISENSOR_16BIT, 0xD0D2, 0x13CA},
+ {MISENSOR_16BIT, 0xD0D4, 0x1B0C},
+ {MISENSOR_16BIT, 0xD0D6, 0x0284},
+ {MISENSOR_16BIT, 0xD0D8, 0xB307},
+ {MISENSOR_16BIT, 0xD0DA, 0xB328},
+ {MISENSOR_16BIT, 0xD0DC, 0x1B12},
+ {MISENSOR_16BIT, 0xD0DE, 0x02C4},
+ {MISENSOR_16BIT, 0xD0E0, 0xB34A},
+ {MISENSOR_16BIT, 0xD0E2, 0xED88},
+ {MISENSOR_16BIT, 0xD0E4, 0x71CF},
+ {MISENSOR_16BIT, 0xD0E6, 0xFF00},
+ {MISENSOR_16BIT, 0xD0E8, 0x3174},
+ {MISENSOR_16BIT, 0xD0EA, 0x9106},
+ {MISENSOR_16BIT, 0xD0EC, 0xB88F},
+ {MISENSOR_16BIT, 0xD0EE, 0xB106},
+
+ {MISENSOR_16BIT, 0xD0F0, 0x210A},
+ {MISENSOR_16BIT, 0xD0F2, 0x8340},
+ {MISENSOR_16BIT, 0xD0F4, 0xC000},
+ {MISENSOR_16BIT, 0xD0F6, 0x21CA},
+ {MISENSOR_16BIT, 0xD0F8, 0x0062},
+ {MISENSOR_16BIT, 0xD0FA, 0x20F0},
+ {MISENSOR_16BIT, 0xD0FC, 0x0040},
+ {MISENSOR_16BIT, 0xD0FE, 0x0B02},
+ {MISENSOR_16BIT, 0xD100, 0x0320},
+ {MISENSOR_16BIT, 0xD102, 0xD901},
+ {MISENSOR_16BIT, 0xD104, 0x07F1},
+ {MISENSOR_16BIT, 0xD106, 0x05E0},
+ {MISENSOR_16BIT, 0xD108, 0xC0A1},
+ {MISENSOR_16BIT, 0xD10A, 0x78E0},
+ {MISENSOR_16BIT, 0xD10C, 0xC0F1},
+ {MISENSOR_16BIT, 0xD10E, 0x71CF},
+ {MISENSOR_16BIT, 0xD110, 0xFFFF},
+ {MISENSOR_16BIT, 0xD112, 0xC7C0},
+ {MISENSOR_16BIT, 0xD114, 0xD840},
+ {MISENSOR_16BIT, 0xD116, 0xA900},
+ {MISENSOR_16BIT, 0xD118, 0x71CF},
+ {MISENSOR_16BIT, 0xD11A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD11C, 0xD02C},
+ {MISENSOR_16BIT, 0xD11E, 0xD81E},
+
+ {MISENSOR_16BIT, 0xD120, 0x0A5A},
+ {MISENSOR_16BIT, 0xD122, 0x04E0},
+ {MISENSOR_16BIT, 0xD124, 0xDA00},
+ {MISENSOR_16BIT, 0xD126, 0xD800},
+ {MISENSOR_16BIT, 0xD128, 0xC0D1},
+ {MISENSOR_16BIT, 0xD12A, 0x7EE0},
+
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xE000, 0x010C},
+ {MISENSOR_16BIT, 0xE002, 0x0202},
+ {MISENSOR_16BIT, 0xE004, 0x4103},
+ {MISENSOR_16BIT, 0xE006, 0x0202},
+ {MISENSOR_16BIT, 0x0080, 0xFFF0},
+ {MISENSOR_16BIT, 0x0080, 0xFFF1},
+
+ /* LOAD=Patch 0302; Feature Recommended; Adaptive Sensitivity */
+ {MISENSOR_16BIT, 0x0982, 0x0001},
+ {MISENSOR_16BIT, 0x098A, 0x512C},
+ {MISENSOR_16BIT, 0xD12C, 0x70CF},
+ {MISENSOR_16BIT, 0xD12E, 0xFFFF},
+ {MISENSOR_16BIT, 0xD130, 0xC5D4},
+ {MISENSOR_16BIT, 0xD132, 0x903A},
+ {MISENSOR_16BIT, 0xD134, 0x2144},
+ {MISENSOR_16BIT, 0xD136, 0x0C00},
+ {MISENSOR_16BIT, 0xD138, 0x2186},
+ {MISENSOR_16BIT, 0xD13A, 0x0FF3},
+ {MISENSOR_16BIT, 0xD13C, 0xB844},
+ {MISENSOR_16BIT, 0xD13E, 0x262F},
+ {MISENSOR_16BIT, 0xD140, 0xF008},
+ {MISENSOR_16BIT, 0xD142, 0xB948},
+ {MISENSOR_16BIT, 0xD144, 0x21CC},
+ {MISENSOR_16BIT, 0xD146, 0x8021},
+ {MISENSOR_16BIT, 0xD148, 0xD801},
+ {MISENSOR_16BIT, 0xD14A, 0xF203},
+ {MISENSOR_16BIT, 0xD14C, 0xD800},
+ {MISENSOR_16BIT, 0xD14E, 0x7EE0},
+ {MISENSOR_16BIT, 0xD150, 0xC0F1},
+ {MISENSOR_16BIT, 0xD152, 0x71CF},
+ {MISENSOR_16BIT, 0xD154, 0xFFFF},
+ {MISENSOR_16BIT, 0xD156, 0xC610},
+ {MISENSOR_16BIT, 0xD158, 0x910E},
+ {MISENSOR_16BIT, 0xD15A, 0x208C},
+ {MISENSOR_16BIT, 0xD15C, 0x8014},
+ {MISENSOR_16BIT, 0xD15E, 0xF418},
+ {MISENSOR_16BIT, 0xD160, 0x910F},
+ {MISENSOR_16BIT, 0xD162, 0x208C},
+ {MISENSOR_16BIT, 0xD164, 0x800F},
+ {MISENSOR_16BIT, 0xD166, 0xF414},
+ {MISENSOR_16BIT, 0xD168, 0x9116},
+ {MISENSOR_16BIT, 0xD16A, 0x208C},
+ {MISENSOR_16BIT, 0xD16C, 0x800A},
+ {MISENSOR_16BIT, 0xD16E, 0xF410},
+ {MISENSOR_16BIT, 0xD170, 0x9117},
+ {MISENSOR_16BIT, 0xD172, 0x208C},
+ {MISENSOR_16BIT, 0xD174, 0x8807},
+ {MISENSOR_16BIT, 0xD176, 0xF40C},
+ {MISENSOR_16BIT, 0xD178, 0x9118},
+ {MISENSOR_16BIT, 0xD17A, 0x2086},
+ {MISENSOR_16BIT, 0xD17C, 0x0FF3},
+ {MISENSOR_16BIT, 0xD17E, 0xB848},
+ {MISENSOR_16BIT, 0xD180, 0x080D},
+ {MISENSOR_16BIT, 0xD182, 0x0090},
+ {MISENSOR_16BIT, 0xD184, 0xFFEA},
+ {MISENSOR_16BIT, 0xD186, 0xE081},
+ {MISENSOR_16BIT, 0xD188, 0xD801},
+ {MISENSOR_16BIT, 0xD18A, 0xF203},
+ {MISENSOR_16BIT, 0xD18C, 0xD800},
+ {MISENSOR_16BIT, 0xD18E, 0xC0D1},
+ {MISENSOR_16BIT, 0xD190, 0x7EE0},
+ {MISENSOR_16BIT, 0xD192, 0x78E0},
+ {MISENSOR_16BIT, 0xD194, 0xC0F1},
+ {MISENSOR_16BIT, 0xD196, 0x71CF},
+ {MISENSOR_16BIT, 0xD198, 0xFFFF},
+ {MISENSOR_16BIT, 0xD19A, 0xC610},
+ {MISENSOR_16BIT, 0xD19C, 0x910E},
+ {MISENSOR_16BIT, 0xD19E, 0x208C},
+ {MISENSOR_16BIT, 0xD1A0, 0x800A},
+ {MISENSOR_16BIT, 0xD1A2, 0xF418},
+ {MISENSOR_16BIT, 0xD1A4, 0x910F},
+ {MISENSOR_16BIT, 0xD1A6, 0x208C},
+ {MISENSOR_16BIT, 0xD1A8, 0x8807},
+ {MISENSOR_16BIT, 0xD1AA, 0xF414},
+ {MISENSOR_16BIT, 0xD1AC, 0x9116},
+ {MISENSOR_16BIT, 0xD1AE, 0x208C},
+ {MISENSOR_16BIT, 0xD1B0, 0x800A},
+ {MISENSOR_16BIT, 0xD1B2, 0xF410},
+ {MISENSOR_16BIT, 0xD1B4, 0x9117},
+ {MISENSOR_16BIT, 0xD1B6, 0x208C},
+ {MISENSOR_16BIT, 0xD1B8, 0x8807},
+ {MISENSOR_16BIT, 0xD1BA, 0xF40C},
+ {MISENSOR_16BIT, 0xD1BC, 0x9118},
+ {MISENSOR_16BIT, 0xD1BE, 0x2086},
+ {MISENSOR_16BIT, 0xD1C0, 0x0FF3},
+ {MISENSOR_16BIT, 0xD1C2, 0xB848},
+ {MISENSOR_16BIT, 0xD1C4, 0x080D},
+ {MISENSOR_16BIT, 0xD1C6, 0x0090},
+ {MISENSOR_16BIT, 0xD1C8, 0xFFD9},
+ {MISENSOR_16BIT, 0xD1CA, 0xE080},
+ {MISENSOR_16BIT, 0xD1CC, 0xD801},
+ {MISENSOR_16BIT, 0xD1CE, 0xF203},
+ {MISENSOR_16BIT, 0xD1D0, 0xD800},
+ {MISENSOR_16BIT, 0xD1D2, 0xF1DF},
+ {MISENSOR_16BIT, 0xD1D4, 0x9040},
+ {MISENSOR_16BIT, 0xD1D6, 0x71CF},
+ {MISENSOR_16BIT, 0xD1D8, 0xFFFF},
+ {MISENSOR_16BIT, 0xD1DA, 0xC5D4},
+ {MISENSOR_16BIT, 0xD1DC, 0xB15A},
+ {MISENSOR_16BIT, 0xD1DE, 0x9041},
+ {MISENSOR_16BIT, 0xD1E0, 0x73CF},
+ {MISENSOR_16BIT, 0xD1E2, 0xFFFF},
+ {MISENSOR_16BIT, 0xD1E4, 0xC7D0},
+ {MISENSOR_16BIT, 0xD1E6, 0xB140},
+ {MISENSOR_16BIT, 0xD1E8, 0x9042},
+ {MISENSOR_16BIT, 0xD1EA, 0xB141},
+ {MISENSOR_16BIT, 0xD1EC, 0x9043},
+ {MISENSOR_16BIT, 0xD1EE, 0xB142},
+ {MISENSOR_16BIT, 0xD1F0, 0x9044},
+ {MISENSOR_16BIT, 0xD1F2, 0xB143},
+ {MISENSOR_16BIT, 0xD1F4, 0x9045},
+ {MISENSOR_16BIT, 0xD1F6, 0xB147},
+ {MISENSOR_16BIT, 0xD1F8, 0x9046},
+ {MISENSOR_16BIT, 0xD1FA, 0xB148},
+ {MISENSOR_16BIT, 0xD1FC, 0x9047},
+ {MISENSOR_16BIT, 0xD1FE, 0xB14B},
+ {MISENSOR_16BIT, 0xD200, 0x9048},
+ {MISENSOR_16BIT, 0xD202, 0xB14C},
+ {MISENSOR_16BIT, 0xD204, 0x9049},
+ {MISENSOR_16BIT, 0xD206, 0x1958},
+ {MISENSOR_16BIT, 0xD208, 0x0084},
+ {MISENSOR_16BIT, 0xD20A, 0x904A},
+ {MISENSOR_16BIT, 0xD20C, 0x195A},
+ {MISENSOR_16BIT, 0xD20E, 0x0084},
+ {MISENSOR_16BIT, 0xD210, 0x8856},
+ {MISENSOR_16BIT, 0xD212, 0x1B36},
+ {MISENSOR_16BIT, 0xD214, 0x8082},
+ {MISENSOR_16BIT, 0xD216, 0x8857},
+ {MISENSOR_16BIT, 0xD218, 0x1B37},
+ {MISENSOR_16BIT, 0xD21A, 0x8082},
+ {MISENSOR_16BIT, 0xD21C, 0x904C},
+ {MISENSOR_16BIT, 0xD21E, 0x19A7},
+ {MISENSOR_16BIT, 0xD220, 0x009C},
+ {MISENSOR_16BIT, 0xD222, 0x881A},
+ {MISENSOR_16BIT, 0xD224, 0x7FE0},
+ {MISENSOR_16BIT, 0xD226, 0x1B54},
+ {MISENSOR_16BIT, 0xD228, 0x8002},
+ {MISENSOR_16BIT, 0xD22A, 0x78E0},
+ {MISENSOR_16BIT, 0xD22C, 0x71CF},
+ {MISENSOR_16BIT, 0xD22E, 0xFFFF},
+ {MISENSOR_16BIT, 0xD230, 0xC350},
+ {MISENSOR_16BIT, 0xD232, 0xD828},
+ {MISENSOR_16BIT, 0xD234, 0xA90B},
+ {MISENSOR_16BIT, 0xD236, 0x8100},
+ {MISENSOR_16BIT, 0xD238, 0x01C5},
+ {MISENSOR_16BIT, 0xD23A, 0x0320},
+ {MISENSOR_16BIT, 0xD23C, 0xD900},
+ {MISENSOR_16BIT, 0xD23E, 0x78E0},
+ {MISENSOR_16BIT, 0xD240, 0x220A},
+ {MISENSOR_16BIT, 0xD242, 0x1F80},
+ {MISENSOR_16BIT, 0xD244, 0xFFFF},
+ {MISENSOR_16BIT, 0xD246, 0xD4E0},
+ {MISENSOR_16BIT, 0xD248, 0xC0F1},
+ {MISENSOR_16BIT, 0xD24A, 0x0811},
+ {MISENSOR_16BIT, 0xD24C, 0x0051},
+ {MISENSOR_16BIT, 0xD24E, 0x2240},
+ {MISENSOR_16BIT, 0xD250, 0x1200},
+ {MISENSOR_16BIT, 0xD252, 0xFFE1},
+ {MISENSOR_16BIT, 0xD254, 0xD801},
+ {MISENSOR_16BIT, 0xD256, 0xF006},
+ {MISENSOR_16BIT, 0xD258, 0x2240},
+ {MISENSOR_16BIT, 0xD25A, 0x1900},
+ {MISENSOR_16BIT, 0xD25C, 0xFFDE},
+ {MISENSOR_16BIT, 0xD25E, 0xD802},
+ {MISENSOR_16BIT, 0xD260, 0x1A05},
+ {MISENSOR_16BIT, 0xD262, 0x1002},
+ {MISENSOR_16BIT, 0xD264, 0xFFF2},
+ {MISENSOR_16BIT, 0xD266, 0xF195},
+ {MISENSOR_16BIT, 0xD268, 0xC0F1},
+ {MISENSOR_16BIT, 0xD26A, 0x0E7E},
+ {MISENSOR_16BIT, 0xD26C, 0x05C0},
+ {MISENSOR_16BIT, 0xD26E, 0x75CF},
+ {MISENSOR_16BIT, 0xD270, 0xFFFF},
+ {MISENSOR_16BIT, 0xD272, 0xC84C},
+ {MISENSOR_16BIT, 0xD274, 0x9502},
+ {MISENSOR_16BIT, 0xD276, 0x77CF},
+ {MISENSOR_16BIT, 0xD278, 0xFFFF},
+ {MISENSOR_16BIT, 0xD27A, 0xC344},
+ {MISENSOR_16BIT, 0xD27C, 0x2044},
+ {MISENSOR_16BIT, 0xD27E, 0x008E},
+ {MISENSOR_16BIT, 0xD280, 0xB8A1},
+ {MISENSOR_16BIT, 0xD282, 0x0926},
+ {MISENSOR_16BIT, 0xD284, 0x03E0},
+ {MISENSOR_16BIT, 0xD286, 0xB502},
+ {MISENSOR_16BIT, 0xD288, 0x9502},
+ {MISENSOR_16BIT, 0xD28A, 0x952E},
+ {MISENSOR_16BIT, 0xD28C, 0x7E05},
+ {MISENSOR_16BIT, 0xD28E, 0xB5C2},
+ {MISENSOR_16BIT, 0xD290, 0x70CF},
+ {MISENSOR_16BIT, 0xD292, 0xFFFF},
+ {MISENSOR_16BIT, 0xD294, 0xC610},
+ {MISENSOR_16BIT, 0xD296, 0x099A},
+ {MISENSOR_16BIT, 0xD298, 0x04A0},
+ {MISENSOR_16BIT, 0xD29A, 0xB026},
+ {MISENSOR_16BIT, 0xD29C, 0x0E02},
+ {MISENSOR_16BIT, 0xD29E, 0x0560},
+ {MISENSOR_16BIT, 0xD2A0, 0xDE00},
+ {MISENSOR_16BIT, 0xD2A2, 0x0A12},
+ {MISENSOR_16BIT, 0xD2A4, 0x0320},
+ {MISENSOR_16BIT, 0xD2A6, 0xB7C4},
+ {MISENSOR_16BIT, 0xD2A8, 0x0B36},
+ {MISENSOR_16BIT, 0xD2AA, 0x03A0},
+ {MISENSOR_16BIT, 0xD2AC, 0x70C9},
+ {MISENSOR_16BIT, 0xD2AE, 0x9502},
+ {MISENSOR_16BIT, 0xD2B0, 0x7608},
+ {MISENSOR_16BIT, 0xD2B2, 0xB8A8},
+ {MISENSOR_16BIT, 0xD2B4, 0xB502},
+ {MISENSOR_16BIT, 0xD2B6, 0x70CF},
+ {MISENSOR_16BIT, 0xD2B8, 0x0000},
+ {MISENSOR_16BIT, 0xD2BA, 0x5536},
+ {MISENSOR_16BIT, 0xD2BC, 0x7860},
+ {MISENSOR_16BIT, 0xD2BE, 0x2686},
+ {MISENSOR_16BIT, 0xD2C0, 0x1FFB},
+ {MISENSOR_16BIT, 0xD2C2, 0x9502},
+ {MISENSOR_16BIT, 0xD2C4, 0x78C5},
+ {MISENSOR_16BIT, 0xD2C6, 0x0631},
+ {MISENSOR_16BIT, 0xD2C8, 0x05E0},
+ {MISENSOR_16BIT, 0xD2CA, 0xB502},
+ {MISENSOR_16BIT, 0xD2CC, 0x72CF},
+ {MISENSOR_16BIT, 0xD2CE, 0xFFFF},
+ {MISENSOR_16BIT, 0xD2D0, 0xC5D4},
+ {MISENSOR_16BIT, 0xD2D2, 0x923A},
+ {MISENSOR_16BIT, 0xD2D4, 0x73CF},
+ {MISENSOR_16BIT, 0xD2D6, 0xFFFF},
+ {MISENSOR_16BIT, 0xD2D8, 0xC7D0},
+ {MISENSOR_16BIT, 0xD2DA, 0xB020},
+ {MISENSOR_16BIT, 0xD2DC, 0x9220},
+ {MISENSOR_16BIT, 0xD2DE, 0xB021},
+ {MISENSOR_16BIT, 0xD2E0, 0x9221},
+ {MISENSOR_16BIT, 0xD2E2, 0xB022},
+ {MISENSOR_16BIT, 0xD2E4, 0x9222},
+ {MISENSOR_16BIT, 0xD2E6, 0xB023},
+ {MISENSOR_16BIT, 0xD2E8, 0x9223},
+ {MISENSOR_16BIT, 0xD2EA, 0xB024},
+ {MISENSOR_16BIT, 0xD2EC, 0x9227},
+ {MISENSOR_16BIT, 0xD2EE, 0xB025},
+ {MISENSOR_16BIT, 0xD2F0, 0x9228},
+ {MISENSOR_16BIT, 0xD2F2, 0xB026},
+ {MISENSOR_16BIT, 0xD2F4, 0x922B},
+ {MISENSOR_16BIT, 0xD2F6, 0xB027},
+ {MISENSOR_16BIT, 0xD2F8, 0x922C},
+ {MISENSOR_16BIT, 0xD2FA, 0xB028},
+ {MISENSOR_16BIT, 0xD2FC, 0x1258},
+ {MISENSOR_16BIT, 0xD2FE, 0x0101},
+ {MISENSOR_16BIT, 0xD300, 0xB029},
+ {MISENSOR_16BIT, 0xD302, 0x125A},
+ {MISENSOR_16BIT, 0xD304, 0x0101},
+ {MISENSOR_16BIT, 0xD306, 0xB02A},
+ {MISENSOR_16BIT, 0xD308, 0x1336},
+ {MISENSOR_16BIT, 0xD30A, 0x8081},
+ {MISENSOR_16BIT, 0xD30C, 0xA836},
+ {MISENSOR_16BIT, 0xD30E, 0x1337},
+ {MISENSOR_16BIT, 0xD310, 0x8081},
+ {MISENSOR_16BIT, 0xD312, 0xA837},
+ {MISENSOR_16BIT, 0xD314, 0x12A7},
+ {MISENSOR_16BIT, 0xD316, 0x0701},
+ {MISENSOR_16BIT, 0xD318, 0xB02C},
+ {MISENSOR_16BIT, 0xD31A, 0x1354},
+ {MISENSOR_16BIT, 0xD31C, 0x8081},
+ {MISENSOR_16BIT, 0xD31E, 0x7FE0},
+ {MISENSOR_16BIT, 0xD320, 0xA83A},
+ {MISENSOR_16BIT, 0xD322, 0x78E0},
+ {MISENSOR_16BIT, 0xD324, 0xC0F1},
+ {MISENSOR_16BIT, 0xD326, 0x0DC2},
+ {MISENSOR_16BIT, 0xD328, 0x05C0},
+ {MISENSOR_16BIT, 0xD32A, 0x7608},
+ {MISENSOR_16BIT, 0xD32C, 0x09BB},
+ {MISENSOR_16BIT, 0xD32E, 0x0010},
+ {MISENSOR_16BIT, 0xD330, 0x75CF},
+ {MISENSOR_16BIT, 0xD332, 0xFFFF},
+ {MISENSOR_16BIT, 0xD334, 0xD4E0},
+ {MISENSOR_16BIT, 0xD336, 0x8D21},
+ {MISENSOR_16BIT, 0xD338, 0x8D00},
+ {MISENSOR_16BIT, 0xD33A, 0x2153},
+ {MISENSOR_16BIT, 0xD33C, 0x0003},
+ {MISENSOR_16BIT, 0xD33E, 0xB8C0},
+ {MISENSOR_16BIT, 0xD340, 0x8D45},
+ {MISENSOR_16BIT, 0xD342, 0x0B23},
+ {MISENSOR_16BIT, 0xD344, 0x0000},
+ {MISENSOR_16BIT, 0xD346, 0xEA8F},
+ {MISENSOR_16BIT, 0xD348, 0x0915},
+ {MISENSOR_16BIT, 0xD34A, 0x001E},
+ {MISENSOR_16BIT, 0xD34C, 0xFF81},
+ {MISENSOR_16BIT, 0xD34E, 0xE808},
+ {MISENSOR_16BIT, 0xD350, 0x2540},
+ {MISENSOR_16BIT, 0xD352, 0x1900},
+ {MISENSOR_16BIT, 0xD354, 0xFFDE},
+ {MISENSOR_16BIT, 0xD356, 0x8D00},
+ {MISENSOR_16BIT, 0xD358, 0xB880},
+ {MISENSOR_16BIT, 0xD35A, 0xF004},
+ {MISENSOR_16BIT, 0xD35C, 0x8D00},
+ {MISENSOR_16BIT, 0xD35E, 0xB8A0},
+ {MISENSOR_16BIT, 0xD360, 0xAD00},
+ {MISENSOR_16BIT, 0xD362, 0x8D05},
+ {MISENSOR_16BIT, 0xD364, 0xE081},
+ {MISENSOR_16BIT, 0xD366, 0x20CC},
+ {MISENSOR_16BIT, 0xD368, 0x80A2},
+ {MISENSOR_16BIT, 0xD36A, 0xDF00},
+ {MISENSOR_16BIT, 0xD36C, 0xF40A},
+ {MISENSOR_16BIT, 0xD36E, 0x71CF},
+ {MISENSOR_16BIT, 0xD370, 0xFFFF},
+ {MISENSOR_16BIT, 0xD372, 0xC84C},
+ {MISENSOR_16BIT, 0xD374, 0x9102},
+ {MISENSOR_16BIT, 0xD376, 0x7708},
+ {MISENSOR_16BIT, 0xD378, 0xB8A6},
+ {MISENSOR_16BIT, 0xD37A, 0x2786},
+ {MISENSOR_16BIT, 0xD37C, 0x1FFE},
+ {MISENSOR_16BIT, 0xD37E, 0xB102},
+ {MISENSOR_16BIT, 0xD380, 0x0B42},
+ {MISENSOR_16BIT, 0xD382, 0x0180},
+ {MISENSOR_16BIT, 0xD384, 0x0E3E},
+ {MISENSOR_16BIT, 0xD386, 0x0180},
+ {MISENSOR_16BIT, 0xD388, 0x0F4A},
+ {MISENSOR_16BIT, 0xD38A, 0x0160},
+ {MISENSOR_16BIT, 0xD38C, 0x70C9},
+ {MISENSOR_16BIT, 0xD38E, 0x8D05},
+ {MISENSOR_16BIT, 0xD390, 0xE081},
+ {MISENSOR_16BIT, 0xD392, 0x20CC},
+ {MISENSOR_16BIT, 0xD394, 0x80A2},
+ {MISENSOR_16BIT, 0xD396, 0xF429},
+ {MISENSOR_16BIT, 0xD398, 0x76CF},
+ {MISENSOR_16BIT, 0xD39A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD39C, 0xC84C},
+ {MISENSOR_16BIT, 0xD39E, 0x082D},
+ {MISENSOR_16BIT, 0xD3A0, 0x0051},
+ {MISENSOR_16BIT, 0xD3A2, 0x70CF},
+ {MISENSOR_16BIT, 0xD3A4, 0xFFFF},
+ {MISENSOR_16BIT, 0xD3A6, 0xC90C},
+ {MISENSOR_16BIT, 0xD3A8, 0x8805},
+ {MISENSOR_16BIT, 0xD3AA, 0x09B6},
+ {MISENSOR_16BIT, 0xD3AC, 0x0360},
+ {MISENSOR_16BIT, 0xD3AE, 0xD908},
+ {MISENSOR_16BIT, 0xD3B0, 0x2099},
+ {MISENSOR_16BIT, 0xD3B2, 0x0802},
+ {MISENSOR_16BIT, 0xD3B4, 0x9634},
+ {MISENSOR_16BIT, 0xD3B6, 0xB503},
+ {MISENSOR_16BIT, 0xD3B8, 0x7902},
+ {MISENSOR_16BIT, 0xD3BA, 0x1523},
+ {MISENSOR_16BIT, 0xD3BC, 0x1080},
+ {MISENSOR_16BIT, 0xD3BE, 0xB634},
+ {MISENSOR_16BIT, 0xD3C0, 0xE001},
+ {MISENSOR_16BIT, 0xD3C2, 0x1D23},
+ {MISENSOR_16BIT, 0xD3C4, 0x1002},
+ {MISENSOR_16BIT, 0xD3C6, 0xF00B},
+ {MISENSOR_16BIT, 0xD3C8, 0x9634},
+ {MISENSOR_16BIT, 0xD3CA, 0x9503},
+ {MISENSOR_16BIT, 0xD3CC, 0x6038},
+ {MISENSOR_16BIT, 0xD3CE, 0xB614},
+ {MISENSOR_16BIT, 0xD3D0, 0x153F},
+ {MISENSOR_16BIT, 0xD3D2, 0x1080},
+ {MISENSOR_16BIT, 0xD3D4, 0xE001},
+ {MISENSOR_16BIT, 0xD3D6, 0x1D3F},
+ {MISENSOR_16BIT, 0xD3D8, 0x1002},
+ {MISENSOR_16BIT, 0xD3DA, 0xFFA4},
+ {MISENSOR_16BIT, 0xD3DC, 0x9602},
+ {MISENSOR_16BIT, 0xD3DE, 0x7F05},
+ {MISENSOR_16BIT, 0xD3E0, 0xD800},
+ {MISENSOR_16BIT, 0xD3E2, 0xB6E2},
+ {MISENSOR_16BIT, 0xD3E4, 0xAD05},
+ {MISENSOR_16BIT, 0xD3E6, 0x0511},
+ {MISENSOR_16BIT, 0xD3E8, 0x05E0},
+ {MISENSOR_16BIT, 0xD3EA, 0xD800},
+ {MISENSOR_16BIT, 0xD3EC, 0xC0F1},
+ {MISENSOR_16BIT, 0xD3EE, 0x0CFE},
+ {MISENSOR_16BIT, 0xD3F0, 0x05C0},
+ {MISENSOR_16BIT, 0xD3F2, 0x0A96},
+ {MISENSOR_16BIT, 0xD3F4, 0x05A0},
+ {MISENSOR_16BIT, 0xD3F6, 0x7608},
+ {MISENSOR_16BIT, 0xD3F8, 0x0C22},
+ {MISENSOR_16BIT, 0xD3FA, 0x0240},
+ {MISENSOR_16BIT, 0xD3FC, 0xE080},
+ {MISENSOR_16BIT, 0xD3FE, 0x20CA},
+ {MISENSOR_16BIT, 0xD400, 0x0F82},
+ {MISENSOR_16BIT, 0xD402, 0x0000},
+ {MISENSOR_16BIT, 0xD404, 0x190B},
+ {MISENSOR_16BIT, 0xD406, 0x0C60},
+ {MISENSOR_16BIT, 0xD408, 0x05A2},
+ {MISENSOR_16BIT, 0xD40A, 0x21CA},
+ {MISENSOR_16BIT, 0xD40C, 0x0022},
+ {MISENSOR_16BIT, 0xD40E, 0x0C56},
+ {MISENSOR_16BIT, 0xD410, 0x0240},
+ {MISENSOR_16BIT, 0xD412, 0xE806},
+ {MISENSOR_16BIT, 0xD414, 0x0E0E},
+ {MISENSOR_16BIT, 0xD416, 0x0220},
+ {MISENSOR_16BIT, 0xD418, 0x70C9},
+ {MISENSOR_16BIT, 0xD41A, 0xF048},
+ {MISENSOR_16BIT, 0xD41C, 0x0896},
+ {MISENSOR_16BIT, 0xD41E, 0x0440},
+ {MISENSOR_16BIT, 0xD420, 0x0E96},
+ {MISENSOR_16BIT, 0xD422, 0x0400},
+ {MISENSOR_16BIT, 0xD424, 0x0966},
+ {MISENSOR_16BIT, 0xD426, 0x0380},
+ {MISENSOR_16BIT, 0xD428, 0x75CF},
+ {MISENSOR_16BIT, 0xD42A, 0xFFFF},
+ {MISENSOR_16BIT, 0xD42C, 0xD4E0},
+ {MISENSOR_16BIT, 0xD42E, 0x8D00},
+ {MISENSOR_16BIT, 0xD430, 0x084D},
+ {MISENSOR_16BIT, 0xD432, 0x001E},
+ {MISENSOR_16BIT, 0xD434, 0xFF47},
+ {MISENSOR_16BIT, 0xD436, 0x080D},
+ {MISENSOR_16BIT, 0xD438, 0x0050},
+ {MISENSOR_16BIT, 0xD43A, 0xFF57},
+ {MISENSOR_16BIT, 0xD43C, 0x0841},
+ {MISENSOR_16BIT, 0xD43E, 0x0051},
+ {MISENSOR_16BIT, 0xD440, 0x8D04},
+ {MISENSOR_16BIT, 0xD442, 0x9521},
+ {MISENSOR_16BIT, 0xD444, 0xE064},
+ {MISENSOR_16BIT, 0xD446, 0x790C},
+ {MISENSOR_16BIT, 0xD448, 0x702F},
+ {MISENSOR_16BIT, 0xD44A, 0x0CE2},
+ {MISENSOR_16BIT, 0xD44C, 0x05E0},
+ {MISENSOR_16BIT, 0xD44E, 0xD964},
+ {MISENSOR_16BIT, 0xD450, 0x72CF},
+ {MISENSOR_16BIT, 0xD452, 0xFFFF},
+ {MISENSOR_16BIT, 0xD454, 0xC700},
+ {MISENSOR_16BIT, 0xD456, 0x9235},
+ {MISENSOR_16BIT, 0xD458, 0x0811},
+ {MISENSOR_16BIT, 0xD45A, 0x0043},
+ {MISENSOR_16BIT, 0xD45C, 0xFF3D},
+ {MISENSOR_16BIT, 0xD45E, 0x080D},
+ {MISENSOR_16BIT, 0xD460, 0x0051},
+ {MISENSOR_16BIT, 0xD462, 0xD801},
+ {MISENSOR_16BIT, 0xD464, 0xFF77},
+ {MISENSOR_16BIT, 0xD466, 0xF025},
+ {MISENSOR_16BIT, 0xD468, 0x9501},
+ {MISENSOR_16BIT, 0xD46A, 0x9235},
+ {MISENSOR_16BIT, 0xD46C, 0x0911},
+ {MISENSOR_16BIT, 0xD46E, 0x0003},
+ {MISENSOR_16BIT, 0xD470, 0xFF49},
+ {MISENSOR_16BIT, 0xD472, 0x080D},
+ {MISENSOR_16BIT, 0xD474, 0x0051},
+ {MISENSOR_16BIT, 0xD476, 0xD800},
+ {MISENSOR_16BIT, 0xD478, 0xFF72},
+ {MISENSOR_16BIT, 0xD47A, 0xF01B},
+ {MISENSOR_16BIT, 0xD47C, 0x0886},
+ {MISENSOR_16BIT, 0xD47E, 0x03E0},
+ {MISENSOR_16BIT, 0xD480, 0xD801},
+ {MISENSOR_16BIT, 0xD482, 0x0EF6},
+ {MISENSOR_16BIT, 0xD484, 0x03C0},
+ {MISENSOR_16BIT, 0xD486, 0x0F52},
+ {MISENSOR_16BIT, 0xD488, 0x0340},
+ {MISENSOR_16BIT, 0xD48A, 0x0DBA},
+ {MISENSOR_16BIT, 0xD48C, 0x0200},
+ {MISENSOR_16BIT, 0xD48E, 0x0AF6},
+ {MISENSOR_16BIT, 0xD490, 0x0440},
+ {MISENSOR_16BIT, 0xD492, 0x0C22},
+ {MISENSOR_16BIT, 0xD494, 0x0400},
+ {MISENSOR_16BIT, 0xD496, 0x0D72},
+ {MISENSOR_16BIT, 0xD498, 0x0440},
+ {MISENSOR_16BIT, 0xD49A, 0x0DC2},
+ {MISENSOR_16BIT, 0xD49C, 0x0200},
+ {MISENSOR_16BIT, 0xD49E, 0x0972},
+ {MISENSOR_16BIT, 0xD4A0, 0x0440},
+ {MISENSOR_16BIT, 0xD4A2, 0x0D3A},
+ {MISENSOR_16BIT, 0xD4A4, 0x0220},
+ {MISENSOR_16BIT, 0xD4A6, 0xD820},
+ {MISENSOR_16BIT, 0xD4A8, 0x0BFA},
+ {MISENSOR_16BIT, 0xD4AA, 0x0260},
+ {MISENSOR_16BIT, 0xD4AC, 0x70C9},
+ {MISENSOR_16BIT, 0xD4AE, 0x0451},
+ {MISENSOR_16BIT, 0xD4B0, 0x05C0},
+ {MISENSOR_16BIT, 0xD4B2, 0x78E0},
+ {MISENSOR_16BIT, 0xD4B4, 0xD900},
+ {MISENSOR_16BIT, 0xD4B6, 0xF00A},
+ {MISENSOR_16BIT, 0xD4B8, 0x70CF},
+ {MISENSOR_16BIT, 0xD4BA, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4BC, 0xD520},
+ {MISENSOR_16BIT, 0xD4BE, 0x7835},
+ {MISENSOR_16BIT, 0xD4C0, 0x8041},
+ {MISENSOR_16BIT, 0xD4C2, 0x8000},
+ {MISENSOR_16BIT, 0xD4C4, 0xE102},
+ {MISENSOR_16BIT, 0xD4C6, 0xA040},
+ {MISENSOR_16BIT, 0xD4C8, 0x09F1},
+ {MISENSOR_16BIT, 0xD4CA, 0x8114},
+ {MISENSOR_16BIT, 0xD4CC, 0x71CF},
+ {MISENSOR_16BIT, 0xD4CE, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4D0, 0xD4E0},
+ {MISENSOR_16BIT, 0xD4D2, 0x70CF},
+ {MISENSOR_16BIT, 0xD4D4, 0xFFFF},
+ {MISENSOR_16BIT, 0xD4D6, 0xC594},
+ {MISENSOR_16BIT, 0xD4D8, 0xB03A},
+ {MISENSOR_16BIT, 0xD4DA, 0x7FE0},
+ {MISENSOR_16BIT, 0xD4DC, 0xD800},
+ {MISENSOR_16BIT, 0xD4DE, 0x0000},
+ {MISENSOR_16BIT, 0xD4E0, 0x0000},
+ {MISENSOR_16BIT, 0xD4E2, 0x0500},
+ {MISENSOR_16BIT, 0xD4E4, 0x0500},
+ {MISENSOR_16BIT, 0xD4E6, 0x0200},
+ {MISENSOR_16BIT, 0xD4E8, 0x0330},
+ {MISENSOR_16BIT, 0xD4EA, 0x0000},
+ {MISENSOR_16BIT, 0xD4EC, 0x0000},
+ {MISENSOR_16BIT, 0xD4EE, 0x03CD},
+ {MISENSOR_16BIT, 0xD4F0, 0x050D},
+ {MISENSOR_16BIT, 0xD4F2, 0x01C5},
+ {MISENSOR_16BIT, 0xD4F4, 0x03B3},
+ {MISENSOR_16BIT, 0xD4F6, 0x00E0},
+ {MISENSOR_16BIT, 0xD4F8, 0x01E3},
+ {MISENSOR_16BIT, 0xD4FA, 0x0280},
+ {MISENSOR_16BIT, 0xD4FC, 0x01E0},
+ {MISENSOR_16BIT, 0xD4FE, 0x0109},
+ {MISENSOR_16BIT, 0xD500, 0x0080},
+ {MISENSOR_16BIT, 0xD502, 0x0500},
+ {MISENSOR_16BIT, 0xD504, 0x0000},
+ {MISENSOR_16BIT, 0xD506, 0x0000},
+ {MISENSOR_16BIT, 0xD508, 0x0000},
+ {MISENSOR_16BIT, 0xD50A, 0x0000},
+ {MISENSOR_16BIT, 0xD50C, 0x0000},
+ {MISENSOR_16BIT, 0xD50E, 0x0000},
+ {MISENSOR_16BIT, 0xD510, 0x0000},
+ {MISENSOR_16BIT, 0xD512, 0x0000},
+ {MISENSOR_16BIT, 0xD514, 0x0000},
+ {MISENSOR_16BIT, 0xD516, 0x0000},
+ {MISENSOR_16BIT, 0xD518, 0x0000},
+ {MISENSOR_16BIT, 0xD51A, 0x0000},
+ {MISENSOR_16BIT, 0xD51C, 0x0000},
+ {MISENSOR_16BIT, 0xD51E, 0x0000},
+ {MISENSOR_16BIT, 0xD520, 0xFFFF},
+ {MISENSOR_16BIT, 0xD522, 0xC9B4},
+ {MISENSOR_16BIT, 0xD524, 0xFFFF},
+ {MISENSOR_16BIT, 0xD526, 0xD324},
+ {MISENSOR_16BIT, 0xD528, 0xFFFF},
+ {MISENSOR_16BIT, 0xD52A, 0xCA34},
+ {MISENSOR_16BIT, 0xD52C, 0xFFFF},
+ {MISENSOR_16BIT, 0xD52E, 0xD3EC},
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xE000, 0x04B4},
+ {MISENSOR_16BIT, 0xE002, 0x0302},
+ {MISENSOR_16BIT, 0xE004, 0x4103},
+ {MISENSOR_16BIT, 0xE006, 0x0202},
+ {MISENSOR_16BIT, 0x0080, 0xFFF0},
+ {MISENSOR_16BIT, 0x0080, 0xFFF1},
+
+ /* PGA parameter and APGA
+ * [Step4-APGA] [TP101_MT9M114_APGA]
+ */
+ {MISENSOR_16BIT, 0x098E, 0x495E},
+ {MISENSOR_16BIT, 0xC95E, 0x0000},
+ {MISENSOR_16BIT, 0x3640, 0x02B0},
+ {MISENSOR_16BIT, 0x3642, 0x8063},
+ {MISENSOR_16BIT, 0x3644, 0x78D0},
+ {MISENSOR_16BIT, 0x3646, 0x50CC},
+ {MISENSOR_16BIT, 0x3648, 0x3511},
+ {MISENSOR_16BIT, 0x364A, 0x0110},
+ {MISENSOR_16BIT, 0x364C, 0xBD8A},
+ {MISENSOR_16BIT, 0x364E, 0x0CD1},
+ {MISENSOR_16BIT, 0x3650, 0x24ED},
+ {MISENSOR_16BIT, 0x3652, 0x7C11},
+ {MISENSOR_16BIT, 0x3654, 0x0150},
+ {MISENSOR_16BIT, 0x3656, 0x124C},
+ {MISENSOR_16BIT, 0x3658, 0x3130},
+ {MISENSOR_16BIT, 0x365A, 0x508C},
+ {MISENSOR_16BIT, 0x365C, 0x21F1},
+ {MISENSOR_16BIT, 0x365E, 0x0090},
+ {MISENSOR_16BIT, 0x3660, 0xBFCA},
+ {MISENSOR_16BIT, 0x3662, 0x0A11},
+ {MISENSOR_16BIT, 0x3664, 0x4F4B},
+ {MISENSOR_16BIT, 0x3666, 0x28B1},
+ {MISENSOR_16BIT, 0x3680, 0x50A9},
+ {MISENSOR_16BIT, 0x3682, 0xA04B},
+ {MISENSOR_16BIT, 0x3684, 0x0E2D},
+ {MISENSOR_16BIT, 0x3686, 0x73EC},
+ {MISENSOR_16BIT, 0x3688, 0x164F},
+ {MISENSOR_16BIT, 0x368A, 0xF829},
+ {MISENSOR_16BIT, 0x368C, 0xC1A8},
+ {MISENSOR_16BIT, 0x368E, 0xB0EC},
+ {MISENSOR_16BIT, 0x3690, 0xE76A},
+ {MISENSOR_16BIT, 0x3692, 0x69AF},
+ {MISENSOR_16BIT, 0x3694, 0x378C},
+ {MISENSOR_16BIT, 0x3696, 0xA70D},
+ {MISENSOR_16BIT, 0x3698, 0x884F},
+ {MISENSOR_16BIT, 0x369A, 0xEE8B},
+ {MISENSOR_16BIT, 0x369C, 0x5DEF},
+ {MISENSOR_16BIT, 0x369E, 0x27CC},
+ {MISENSOR_16BIT, 0x36A0, 0xCAAC},
+ {MISENSOR_16BIT, 0x36A2, 0x840E},
+ {MISENSOR_16BIT, 0x36A4, 0xDAA9},
+ {MISENSOR_16BIT, 0x36A6, 0xF00C},
+ {MISENSOR_16BIT, 0x36C0, 0x1371},
+ {MISENSOR_16BIT, 0x36C2, 0x272F},
+ {MISENSOR_16BIT, 0x36C4, 0x2293},
+ {MISENSOR_16BIT, 0x36C6, 0xE6D0},
+ {MISENSOR_16BIT, 0x36C8, 0xEC32},
+ {MISENSOR_16BIT, 0x36CA, 0x11B1},
+ {MISENSOR_16BIT, 0x36CC, 0x7BAF},
+ {MISENSOR_16BIT, 0x36CE, 0x5813},
+ {MISENSOR_16BIT, 0x36D0, 0xB871},
+ {MISENSOR_16BIT, 0x36D2, 0x8913},
+ {MISENSOR_16BIT, 0x36D4, 0x4610},
+ {MISENSOR_16BIT, 0x36D6, 0x7EEE},
+ {MISENSOR_16BIT, 0x36D8, 0x0DF3},
+ {MISENSOR_16BIT, 0x36DA, 0xB84F},
+ {MISENSOR_16BIT, 0x36DC, 0xB532},
+ {MISENSOR_16BIT, 0x36DE, 0x1171},
+ {MISENSOR_16BIT, 0x36E0, 0x13CF},
+ {MISENSOR_16BIT, 0x36E2, 0x22F3},
+ {MISENSOR_16BIT, 0x36E4, 0xE090},
+ {MISENSOR_16BIT, 0x36E6, 0x8133},
+ {MISENSOR_16BIT, 0x3700, 0x88AE},
+ {MISENSOR_16BIT, 0x3702, 0x00EA},
+ {MISENSOR_16BIT, 0x3704, 0x344F},
+ {MISENSOR_16BIT, 0x3706, 0xEC88},
+ {MISENSOR_16BIT, 0x3708, 0x3E91},
+ {MISENSOR_16BIT, 0x370A, 0xF12D},
+ {MISENSOR_16BIT, 0x370C, 0xB0EF},
+ {MISENSOR_16BIT, 0x370E, 0x77CD},
+ {MISENSOR_16BIT, 0x3710, 0x7930},
+ {MISENSOR_16BIT, 0x3712, 0x5C12},
+ {MISENSOR_16BIT, 0x3714, 0x500C},
+ {MISENSOR_16BIT, 0x3716, 0x22CE},
+ {MISENSOR_16BIT, 0x3718, 0x2370},
+ {MISENSOR_16BIT, 0x371A, 0x258F},
+ {MISENSOR_16BIT, 0x371C, 0x3D30},
+ {MISENSOR_16BIT, 0x371E, 0x370C},
+ {MISENSOR_16BIT, 0x3720, 0x03ED},
+ {MISENSOR_16BIT, 0x3722, 0x9AD0},
+ {MISENSOR_16BIT, 0x3724, 0x7ECF},
+ {MISENSOR_16BIT, 0x3726, 0x1093},
+ {MISENSOR_16BIT, 0x3740, 0x2391},
+ {MISENSOR_16BIT, 0x3742, 0xAAD0},
+ {MISENSOR_16BIT, 0x3744, 0x28F2},
+ {MISENSOR_16BIT, 0x3746, 0xBA4F},
+ {MISENSOR_16BIT, 0x3748, 0xC536},
+ {MISENSOR_16BIT, 0x374A, 0x1472},
+ {MISENSOR_16BIT, 0x374C, 0xD110},
+ {MISENSOR_16BIT, 0x374E, 0x2933},
+ {MISENSOR_16BIT, 0x3750, 0xD0D1},
+ {MISENSOR_16BIT, 0x3752, 0x9F37},
+ {MISENSOR_16BIT, 0x3754, 0x34D1},
+ {MISENSOR_16BIT, 0x3756, 0x1C6C},
+ {MISENSOR_16BIT, 0x3758, 0x3FD2},
+ {MISENSOR_16BIT, 0x375A, 0xCB72},
+ {MISENSOR_16BIT, 0x375C, 0xBA96},
+ {MISENSOR_16BIT, 0x375E, 0x1551},
+ {MISENSOR_16BIT, 0x3760, 0xB74F},
+ {MISENSOR_16BIT, 0x3762, 0x1672},
+ {MISENSOR_16BIT, 0x3764, 0x84F1},
+ {MISENSOR_16BIT, 0x3766, 0xC2D6},
+ {MISENSOR_16BIT, 0x3782, 0x01E0},
+ {MISENSOR_16BIT, 0x3784, 0x0280},
+ {MISENSOR_16BIT, 0x37C0, 0xA6EA},
+ {MISENSOR_16BIT, 0x37C2, 0x874B},
+ {MISENSOR_16BIT, 0x37C4, 0x85CB},
+ {MISENSOR_16BIT, 0x37C6, 0x968A},
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xC960, 0x0AF0},
+ {MISENSOR_16BIT, 0xC962, 0x79E2},
+ {MISENSOR_16BIT, 0xC964, 0x5EC8},
+ {MISENSOR_16BIT, 0xC966, 0x791F},
+ {MISENSOR_16BIT, 0xC968, 0x76EE},
+ {MISENSOR_16BIT, 0xC96A, 0x0FA0},
+ {MISENSOR_16BIT, 0xC96C, 0x7DFA},
+ {MISENSOR_16BIT, 0xC96E, 0x7DAF},
+ {MISENSOR_16BIT, 0xC970, 0x7E02},
+ {MISENSOR_16BIT, 0xC972, 0x7E0A},
+ {MISENSOR_16BIT, 0xC974, 0x1964},
+ {MISENSOR_16BIT, 0xC976, 0x7CDC},
+ {MISENSOR_16BIT, 0xC978, 0x7838},
+ {MISENSOR_16BIT, 0xC97A, 0x7C2F},
+ {MISENSOR_16BIT, 0xC97C, 0x7792},
+ {MISENSOR_16BIT, 0xC95E, 0x0003},
+
+ /* [Step4-APGA] */
+ {MISENSOR_16BIT, 0x098E, 0x0000},
+ {MISENSOR_16BIT, 0xC95E, 0x0003},
+
+ /* [Step5-AWB_CCM]1: LOAD=CCM */
+ {MISENSOR_16BIT, 0xC892, 0x0267},
+ {MISENSOR_16BIT, 0xC894, 0xFF1A},
+ {MISENSOR_16BIT, 0xC896, 0xFFB3},
+ {MISENSOR_16BIT, 0xC898, 0xFF80},
+ {MISENSOR_16BIT, 0xC89A, 0x0166},
+ {MISENSOR_16BIT, 0xC89C, 0x0003},
+ {MISENSOR_16BIT, 0xC89E, 0xFF9A},
+ {MISENSOR_16BIT, 0xC8A0, 0xFEB4},
+ {MISENSOR_16BIT, 0xC8A2, 0x024D},
+ {MISENSOR_16BIT, 0xC8A4, 0x01BF},
+ {MISENSOR_16BIT, 0xC8A6, 0xFF01},
+ {MISENSOR_16BIT, 0xC8A8, 0xFFF3},
+ {MISENSOR_16BIT, 0xC8AA, 0xFF75},
+ {MISENSOR_16BIT, 0xC8AC, 0x0198},
+ {MISENSOR_16BIT, 0xC8AE, 0xFFFD},
+ {MISENSOR_16BIT, 0xC8B0, 0xFF9A},
+ {MISENSOR_16BIT, 0xC8B2, 0xFEE7},
+ {MISENSOR_16BIT, 0xC8B4, 0x02A8},
+ {MISENSOR_16BIT, 0xC8B6, 0x01D9},
+ {MISENSOR_16BIT, 0xC8B8, 0xFF26},
+ {MISENSOR_16BIT, 0xC8BA, 0xFFF3},
+ {MISENSOR_16BIT, 0xC8BC, 0xFFB3},
+ {MISENSOR_16BIT, 0xC8BE, 0x0132},
+ {MISENSOR_16BIT, 0xC8C0, 0xFFE8},
+ {MISENSOR_16BIT, 0xC8C2, 0xFFDA},
+ {MISENSOR_16BIT, 0xC8C4, 0xFECD},
+ {MISENSOR_16BIT, 0xC8C6, 0x02C2},
+ {MISENSOR_16BIT, 0xC8C8, 0x0075},
+ {MISENSOR_16BIT, 0xC8CA, 0x011C},
+ {MISENSOR_16BIT, 0xC8CC, 0x009A},
+ {MISENSOR_16BIT, 0xC8CE, 0x0105},
+ {MISENSOR_16BIT, 0xC8D0, 0x00A4},
+ {MISENSOR_16BIT, 0xC8D2, 0x00AC},
+ {MISENSOR_16BIT, 0xC8D4, 0x0A8C},
+ {MISENSOR_16BIT, 0xC8D6, 0x0F0A},
+ {MISENSOR_16BIT, 0xC8D8, 0x1964},
+
+ /* LOAD=AWB */
+ {MISENSOR_16BIT, 0xC914, 0x0000},
+ {MISENSOR_16BIT, 0xC916, 0x0000},
+ {MISENSOR_16BIT, 0xC918, 0x04FF},
+ {MISENSOR_16BIT, 0xC91A, 0x02CF},
+ {MISENSOR_16BIT, 0xC904, 0x0033},
+ {MISENSOR_16BIT, 0xC906, 0x0040},
+ {MISENSOR_8BIT, 0xC8F2, 0x03},
+ {MISENSOR_8BIT, 0xC8F3, 0x02},
+ {MISENSOR_16BIT, 0xC906, 0x003C},
+ {MISENSOR_16BIT, 0xC8F4, 0x0000},
+ {MISENSOR_16BIT, 0xC8F6, 0x0000},
+ {MISENSOR_16BIT, 0xC8F8, 0x0000},
+ {MISENSOR_16BIT, 0xC8FA, 0xE724},
+ {MISENSOR_16BIT, 0xC8FC, 0x1583},
+ {MISENSOR_16BIT, 0xC8FE, 0x2045},
+ {MISENSOR_16BIT, 0xC900, 0x05DC},
+ {MISENSOR_16BIT, 0xC902, 0x007C},
+ {MISENSOR_8BIT, 0xC90C, 0x80},
+ {MISENSOR_8BIT, 0xC90D, 0x80},
+ {MISENSOR_8BIT, 0xC90E, 0x80},
+ {MISENSOR_8BIT, 0xC90F, 0x88},
+ {MISENSOR_8BIT, 0xC910, 0x80},
+ {MISENSOR_8BIT, 0xC911, 0x80},
+
+ /* LOAD=Step7-CPIPE_Preference */
+ {MISENSOR_16BIT, 0xC926, 0x0020},
+ {MISENSOR_16BIT, 0xC928, 0x009A},
+ {MISENSOR_16BIT, 0xC946, 0x0070},
+ {MISENSOR_16BIT, 0xC948, 0x00F3},
+ {MISENSOR_16BIT, 0xC952, 0x0020},
+ {MISENSOR_16BIT, 0xC954, 0x009A},
+ {MISENSOR_8BIT, 0xC92A, 0x80},
+ {MISENSOR_8BIT, 0xC92B, 0x4B},
+ {MISENSOR_8BIT, 0xC92C, 0x00},
+ {MISENSOR_8BIT, 0xC92D, 0xFF},
+ {MISENSOR_8BIT, 0xC92E, 0x3C},
+ {MISENSOR_8BIT, 0xC92F, 0x02},
+ {MISENSOR_8BIT, 0xC930, 0x06},
+ {MISENSOR_8BIT, 0xC931, 0x64},
+ {MISENSOR_8BIT, 0xC932, 0x01},
+ {MISENSOR_8BIT, 0xC933, 0x0C},
+ {MISENSOR_8BIT, 0xC934, 0x3C},
+ {MISENSOR_8BIT, 0xC935, 0x3C},
+ {MISENSOR_8BIT, 0xC936, 0x3C},
+ {MISENSOR_8BIT, 0xC937, 0x0F},
+ {MISENSOR_8BIT, 0xC938, 0x64},
+ {MISENSOR_8BIT, 0xC939, 0x64},
+ {MISENSOR_8BIT, 0xC93A, 0x64},
+ {MISENSOR_8BIT, 0xC93B, 0x32},
+ {MISENSOR_16BIT, 0xC93C, 0x0020},
+ {MISENSOR_16BIT, 0xC93E, 0x009A},
+ {MISENSOR_16BIT, 0xC940, 0x00DC},
+ {MISENSOR_8BIT, 0xC942, 0x38},
+ {MISENSOR_8BIT, 0xC943, 0x30},
+ {MISENSOR_8BIT, 0xC944, 0x50},
+ {MISENSOR_8BIT, 0xC945, 0x19},
+ {MISENSOR_16BIT, 0xC94A, 0x0230},
+ {MISENSOR_16BIT, 0xC94C, 0x0010},
+ {MISENSOR_16BIT, 0xC94E, 0x01CD},
+ {MISENSOR_8BIT, 0xC950, 0x05},
+ {MISENSOR_8BIT, 0xC951, 0x40},
+ {MISENSOR_8BIT, 0xC87B, 0x1B},
+ {MISENSOR_8BIT, 0xC878, 0x0E},
+ {MISENSOR_16BIT, 0xC890, 0x0080},
+ {MISENSOR_16BIT, 0xC886, 0x0100},
+ {MISENSOR_16BIT, 0xC87C, 0x005A},
+ {MISENSOR_8BIT, 0xB42A, 0x05},
+ {MISENSOR_8BIT, 0xA80A, 0x20},
+
+ /* Speed up AE/AWB */
+ {MISENSOR_16BIT, 0x098E, 0x2802},
+ {MISENSOR_16BIT, 0xA802, 0x0008},
+ {MISENSOR_8BIT, 0xC908, 0x01},
+ {MISENSOR_8BIT, 0xC879, 0x01},
+ {MISENSOR_8BIT, 0xC909, 0x02},
+ {MISENSOR_8BIT, 0xA80A, 0x18},
+ {MISENSOR_8BIT, 0xA80B, 0x18},
+ {MISENSOR_8BIT, 0xAC16, 0x18},
+ {MISENSOR_8BIT, 0xC878, 0x0E},
+
+ {MISENSOR_TOK_TERM, 0, 0}
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c
new file mode 100644
index 000000000000..566091035c64
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2680.c
@@ -0,0 +1,1559 @@
+/*
+ * Support for OmniVision OV2680 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+
+#include "ov2680.h"
+
+static int h_flag = 0;
+static int v_flag = 0;
+static enum atomisp_bayer_order ov2680_bayer_order_mapping[] = {
+ atomisp_bayer_order_bggr,
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_gbrg,
+ atomisp_bayer_order_rggb,
+};
+
+/* i2c read/write stuff */
+static int ov2680_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV2680_8BIT && data_length != OV2680_16BIT
+ && data_length != OV2680_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0 , sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV2680_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV2680_16BIT)
+ *val = be16_to_cpu(*(u16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(u32 *)&data[0]);
+ //dev_dbg(&client->dev, "++++i2c read adr%x = %x\n", reg,*val);
+ return 0;
+}
+
+static int ov2680_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ //dev_dbg(&client->dev, "+++i2c write reg=%x->%x\n", data[0]*256 +data[1],data[2]);
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int ov2680_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ u16 *wreg = (u16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV2680_8BIT && data_length != OV2680_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV2680_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV2680_16BIT */
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov2680_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov2680_write_reg_array - Initializes a list of OV2680 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov2680_flush_reg_array, __ov2680_buf_reg_array() and
+ * __ov2680_write_reg_is_consecutive() are internal functions to
+ * ov2680_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov2680_flush_reg_array(struct i2c_client *client,
+ struct ov2680_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov2680_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov2680_buf_reg_array(struct i2c_client *client,
+ struct ov2680_write_ctrl *ctrl,
+ const struct ov2680_reg *next)
+{
+ int size;
+ u16 *data16;
+
+ switch (next->type) {
+ case OV2680_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV2680_16BIT:
+ size = 2;
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV2680_MAX_WRITE_BUF_SIZE)
+ return __ov2680_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov2680_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov2680_write_ctrl *ctrl,
+ const struct ov2680_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov2680_write_reg_array(struct i2c_client *client,
+ const struct ov2680_reg *reglist)
+{
+ const struct ov2680_reg *next = reglist;
+ struct ov2680_write_ctrl ctrl;
+ int err;
+ dev_dbg(&client->dev, "++++write reg array\n");
+ ctrl.index = 0;
+ for (; next->type != OV2680_TOK_TERM; next++) {
+ switch (next->type & OV2680_TOK_MASK) {
+ case OV2680_TOK_DELAY:
+ err = __ov2680_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ dev_dbg(&client->dev, "+++ov2680_write_reg_array reg=%x->%x\n", next->reg,next->val);
+ if (!__ov2680_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov2680_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov2680_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov2680_flush_reg_array(client, &ctrl);
+}
+static int ov2680_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+
+ *val = (OV2680_FOCAL_LENGTH_NUM << 16) | OV2680_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov2680_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for ov2680*/
+
+ *val = (OV2680_F_NUMBER_DEFAULT_NUM << 16) | OV2680_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2680_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+
+ *val = (OV2680_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV2680_F_NUMBER_DEM << 16) |
+ (OV2680_F_NUMBER_DEFAULT_NUM << 8) | OV2680_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2680_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ dev_dbg(&client->dev, "++++ov2680_g_bin_factor_x\n");
+ *val = ov2680_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int ov2680_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ *val = ov2680_res[dev->fmt_idx].bin_factor_y;
+ dev_dbg(&client->dev, "++++ov2680_g_bin_factor_y\n");
+ return 0;
+}
+
+
+static int ov2680_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov2680_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ unsigned int pix_clk_freq_hz;
+ u16 reg_val;
+ int ret;
+ dev_dbg(&client->dev, "++++ov2680_get_intg_factor\n");
+ if (!info)
+ return -EINVAL;
+
+ /* pixel clock */
+ pix_clk_freq_hz = res->pix_clk_freq * 1000000;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV2680_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV2680_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV2680_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV2680_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV2680_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_HORIZONTAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_VERTICAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_HORIZONTAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_VERTICAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_HORIZONTAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov2680_read_reg(client, OV2680_16BIT,
+ OV2680_VERTICAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ (res->bin_factor_x * 2) : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ (res->bin_factor_y * 2) : 1;
+ return 0;
+}
+
+static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ u16 vts,hts;
+ int ret,exp_val;
+
+ dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain);
+
+ hts = ov2680_res[dev->fmt_idx].pixels_per_line;
+ vts = ov2680_res[dev->fmt_idx].lines_per_frame;
+
+ /* group hold */
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_GROUP_ACCESS, 0x00);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_GROUP_ACCESS);
+ return ret;
+ }
+
+ /* Increase the VTS to match exposure + MARGIN */
+ if (coarse_itg > vts - OV2680_INTEGRATION_TIME_MARGIN)
+ vts = (u16) coarse_itg + OV2680_INTEGRATION_TIME_MARGIN;
+
+ ret = ov2680_write_reg(client, OV2680_16BIT, OV2680_TIMING_VTS_H, vts);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_TIMING_VTS_H);
+ return ret;
+ }
+
+ /* set exposure */
+
+ /* Lower four bit should be 0*/
+ exp_val = coarse_itg << 4;
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_L, exp_val & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_EXPOSURE_L);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_M, (exp_val >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_EXPOSURE_M);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_H, (exp_val >> 16) & 0x0F);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_EXPOSURE_H);
+ return ret;
+ }
+
+ /* Analog gain */
+ ret = ov2680_write_reg(client, OV2680_16BIT, OV2680_AGC_H, gain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_AGC_H);
+ return ret;
+ }
+ /* Digital gain */
+ if (digitgain) {
+ ret = ov2680_write_reg(client, OV2680_16BIT,
+ OV2680_MWB_RED_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, OV2680_16BIT,
+ OV2680_MWB_GREEN_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov2680_write_reg(client, OV2680_16BIT,
+ OV2680_MWB_BLUE_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV2680_MWB_RED_GAIN_H);
+ return ret;
+ }
+ }
+
+ /* End group */
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_GROUP_ACCESS, 0x10);
+ if (ret)
+ return ret;
+
+ /* Delay launch group */
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_GROUP_ACCESS, 0xa0);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int ov2680_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov2680_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov2680_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ u16 coarse_itg = exposure->integration_time[0];
+ u16 analog_gain = exposure->gain[0];
+ u16 digital_gain = exposure->gain[1];
+
+ /* we should not accept the invalid value below */
+ if (analog_gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ // EXPOSURE CONTROL DISABLED FOR INITIAL CHECKIN, TUNING DOESN'T WORK
+ return ov2680_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
+}
+
+
+
+
+
+static long ov2680_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov2680_s_exposure(sd, arg);
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov2680_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static u32 ov2680_translate_bayer_order(enum atomisp_bayer_order code)
+{
+ switch (code) {
+ case atomisp_bayer_order_rggb:
+ return MEDIA_BUS_FMT_SRGGB10_1X10;
+ case atomisp_bayer_order_grbg:
+ return MEDIA_BUS_FMT_SGRBG10_1X10;
+ case atomisp_bayer_order_bggr:
+ return MEDIA_BUS_FMT_SBGGR10_1X10;
+ case atomisp_bayer_order_gbrg:
+ return MEDIA_BUS_FMT_SGBRG10_1X10;
+ }
+ return 0;
+}
+
+static int ov2680_v_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+ u8 index;
+ dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
+ ret = ov2680_read_reg(client, OV2680_8BIT, OV2680_FLIP_REG, &val);
+ if (ret)
+ return ret;
+ if (value) {
+ val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
+ } else {
+ val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
+ }
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_FLIP_REG, val);
+ if (ret)
+ return ret;
+ index = (v_flag>0?OV2680_FLIP_BIT:0) | (h_flag>0?OV2680_MIRROR_BIT:0);
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (ov2680_info) {
+ ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
+ dev->format.code = ov2680_translate_bayer_order(
+ ov2680_info->raw_bayer_order);
+ }
+ return ret;
+}
+
+static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+ u8 index;
+ dev_dbg(&client->dev, "@%s: value:%d\n", __func__, value);
+
+ ret = ov2680_read_reg(client, OV2680_8BIT, OV2680_MIRROR_REG, &val);
+ if (ret)
+ return ret;
+ if (value) {
+ val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
+ } else {
+ val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
+ }
+ ret = ov2680_write_reg(client, OV2680_8BIT,
+ OV2680_MIRROR_REG, val);
+ if (ret)
+ return ret;
+ index = (v_flag>0?OV2680_FLIP_BIT:0) | (h_flag>0?OV2680_MIRROR_BIT:0);
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (ov2680_info) {
+ ov2680_info->raw_bayer_order = ov2680_bayer_order_mapping[index];
+ dev->format.code = ov2680_translate_bayer_order(
+ ov2680_info->raw_bayer_order);
+ }
+ return ret;
+}
+
+static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2680_device *dev =
+ container_of(ctrl->handler, struct ov2680_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = ov2680_v_flip(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
+ __func__, ctrl->val);
+ ret = ov2680_h_flip(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2680_device *dev =
+ container_of(ctrl->handler, struct ov2680_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov2680_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov2680_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov2680_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov2680_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = ov2680_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = ov2680_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ov2680_s_ctrl,
+ .g_volatile_ctrl = ov2680_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config ov2680_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV2680_FOCAL_LENGTH_DEFAULT,
+ .max = OV2680_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV2680_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV2680_F_NUMBER_DEFAULT,
+ .max = OV2680_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV2680_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV2680_F_NUMBER_RANGE,
+ .max = OV2680_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV2680_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = OV2680_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = OV2680_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mirror",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+ },
+};
+
+static int ov2680_init_registers(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = ov2680_write_reg(client, OV2680_8BIT, OV2680_SW_RESET, 0x01);
+ ret |= ov2680_write_reg_array(client, ov2680_global_setting);
+
+ return ret;
+}
+
+static int ov2680_init(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ /* restore settings */
+ ov2680_res = ov2680_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ ret = ov2680_init_registers(sd);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = 0;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (flag) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 1);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ }
+
+ if (!flag || ret) {
+ ret |= dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ /* The OV2680 documents only one GPIO input (#XSHUTDN), but
+ * existing integrations often wire two (reset/power_down)
+ * because that is the way other sensors work. There is no
+ * way to tell how it is wired internally, so existing
+ * firmwares expose both and we drive them symmetrically. */
+ if (flag) {
+ ret = dev->platform_data->gpio0_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ ret |= dev->platform_data->gpio1_ctrl(sd, 1);
+ usleep_range(10000, 15000);
+ } else {
+ ret = dev->platform_data->gpio1_ctrl(sd, 0);
+ ret |= dev->platform_data->gpio0_ctrl(sd, 0);
+ }
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* according to DS, 20ms is needed between PWDN and i2c access */
+ msleep(20);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ h_flag = 0;
+ v_flag = 0;
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int ov2680_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ if (on == 0){
+ ret = power_down(sd);
+ } else {
+ ret = power_up(sd);
+ if (!ret)
+ return ov2680_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 600
+static int distance(struct ov2680_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
+
+
+ if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct ov2680_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov2680_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov2680_res[i].width)
+ continue;
+ if (h != ov2680_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+static int ov2680_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov2680_info = NULL;
+ int ret = 0;
+ int idx = 0;
+ dev_dbg(&client->dev, "+++++ov2680_s_mbus_fmt+++++l\n");
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ ov2680_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov2680_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov2680_res[N_RES - 1].width;
+ fmt->height = ov2680_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov2680_res[idx].width;
+ fmt->height = ov2680_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ dev_dbg(&client->dev, "+++++get_resolution_index=%d+++++l\n",
+ dev->fmt_idx);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+ v4l2_info(client, "__s_mbus_fmt i=%d, w=%d, h=%d\n", dev->fmt_idx,
+ fmt->width, fmt->height);
+ dev_dbg(&client->dev, "__s_mbus_fmt i=%d, w=%d, h=%d\n",
+ dev->fmt_idx, fmt->width, fmt->height);
+
+ ret = ov2680_write_reg_array(client, ov2680_res[dev->fmt_idx].regs);
+ if (ret)
+ dev_err(&client->dev, "ov2680 write resolution register err\n");
+
+ ret = ov2680_get_intg_factor(client, ov2680_info,
+ &ov2680_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration_factor\n");
+ goto err;
+ }
+
+ /*recall flip functions to avoid flip registers
+ * were overridden by default setting
+ */
+ if (h_flag)
+ ov2680_h_flip(sd, h_flag);
+ if (v_flag)
+ ov2680_v_flip(sd, v_flag);
+
+ v4l2_info(client, "\n%s idx %d \n", __func__, dev->fmt_idx);
+
+ /*ret = startup(sd);
+ * if (ret)
+ * dev_err(&client->dev, "ov2680 startup err\n");
+ */
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2680_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov2680_res[dev->fmt_idx].width;
+ fmt->height = ov2680_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov2680_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_SC_CMMN_CHIP_ID_L, &low);
+ id = ((((u16) high) << 8) | (u16) low);
+
+ if (id != OV2680_ID) {
+ dev_err(&client->dev, "sensor ID error 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ ret = ov2680_read_reg(client, OV2680_8BIT,
+ OV2680_SC_CMMN_SUB_ID, &high);
+ revision = (u8) high & 0x0f;
+
+ dev_err(&client->dev, "sensor_revision id = 0x%x\n", id);
+ dev_err(&client->dev, "detect ov2680 success\n");
+ dev_err(&client->dev, "################5##########\n");
+ return 0;
+}
+
+static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ if(enable )
+ dev_dbg(&client->dev, "ov2680_s_stream one \n");
+ else
+ dev_dbg(&client->dev, "ov2680_s_stream off \n");
+
+ ret = ov2680_write_reg(client, OV2680_8BIT, OV2680_SW_STREAM,
+ enable ? OV2680_START_STREAMING :
+ OV2680_STOP_STREAMING);
+#if 0
+ /* restore settings */
+ ov2680_res = ov2680_res_preview;
+ N_RES = N_RES_PREVIEW;
+#endif
+
+ //otp valid at stream on state
+ //if(!dev->otp_data)
+ // dev->otp_data = ov2680_otp_read(sd);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+
+static int ov2680_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov2680_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov2680_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2680 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2680_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!param)
+ return -EINVAL;
+
+ if (param->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(&client->dev, "unsupported buffer type.\n");
+ return -EINVAL;
+ }
+
+ memset(param, 0, sizeof(*param));
+ param->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (dev->fmt_idx >= 0 && dev->fmt_idx < N_RES) {
+ param->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ param->parm.capture.timeperframe.numerator = 1;
+ param->parm.capture.capturemode = dev->run_mode;
+ param->parm.capture.timeperframe.denominator =
+ ov2680_res[dev->fmt_idx].fps;
+ }
+ return 0;
+}
+
+static int ov2680_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ v4l2_info(client, "\n%s:run_mode :%x\n", __func__, dev->run_mode);
+
+ mutex_lock(&dev->input_lock);
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ ov2680_res = ov2680_res_video;
+ N_RES = N_RES_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ ov2680_res = ov2680_res_still;
+ N_RES = N_RES_STILL;
+ break;
+ default:
+ ov2680_res = ov2680_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov2680_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov2680_res[index].width;
+ fse->min_height = ov2680_res[index].height;
+ fse->max_width = ov2680_res[index].width;
+ fse->max_height = ov2680_res[index].height;
+
+ return 0;
+
+}
+
+static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = ov2680_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2680_video_ops = {
+ .s_stream = ov2680_s_stream,
+ .g_parm = ov2680_g_parm,
+ .s_parm = ov2680_s_parm,
+ .g_frame_interval = ov2680_g_frame_interval,
+};
+
+static const struct v4l2_subdev_sensor_ops ov2680_sensor_ops = {
+ .g_skip_frames = ov2680_g_skip_frames,
+};
+
+static const struct v4l2_subdev_core_ops ov2680_core_ops = {
+ .s_power = ov2680_s_power,
+ .ioctl = ov2680_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
+ .enum_mbus_code = ov2680_enum_mbus_code,
+ .enum_frame_size = ov2680_enum_frame_size,
+ .get_fmt = ov2680_get_fmt,
+ .set_fmt = ov2680_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2680_ops = {
+ .core = &ov2680_core_ops,
+ .video = &ov2680_video_ops,
+ .pad = &ov2680_pad_ops,
+ .sensor = &ov2680_sensor_ops,
+};
+
+static int ov2680_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2680_device *dev = to_ov2680_sensor(sd);
+ dev_dbg(&client->dev, "ov2680_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int ov2680_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ov2680_device *dev;
+ int ret;
+ void *pdata;
+ unsigned int i;
+
+ printk("++++ov2680_probe++++\n");
+ dev_info(&client->dev, "++++ov2680_probe++++\n");
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&(dev->sd), client, &ov2680_ops);
+
+ if (ACPI_COMPANION(&client->dev))
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_bggr);
+ else
+ pdata = client->dev.platform_data;
+
+ if (!pdata) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = ov2680_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(ov2680_controls));
+ if (ret) {
+ ov2680_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov2680_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2680_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ ov2680_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ {
+ ov2680_remove(client);
+ dev_dbg(&client->dev, "+++ remove ov2680 \n");
+ }
+ return ret;
+out_free:
+ dev_dbg(&client->dev, "+++ out free \n");
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static struct acpi_device_id ov2680_acpi_match[] = {
+ {"XXOV2680"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
+
+
+MODULE_DEVICE_TABLE(i2c, ov2680_id);
+static struct i2c_driver ov2680_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = OV2680_NAME,
+ .acpi_match_table = ACPI_PTR(ov2680_acpi_match),
+
+ },
+ .probe = ov2680_probe,
+ .remove = ov2680_remove,
+ .id_table = ov2680_id,
+};
+
+static int init_ov2680(void)
+{
+ return i2c_add_driver(&ov2680_driver);
+}
+
+static void exit_ov2680(void)
+{
+
+ i2c_del_driver(&ov2680_driver);
+}
+
+module_init(init_ov2680);
+module_exit(exit_ov2680);
+
+MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
new file mode 100644
index 000000000000..944fe8e3bcbf
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -0,0 +1,940 @@
+/*
+ * Support for OmniVision OV2680 5M camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __OV2680_H__
+#define __OV2680_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define OV2680_NAME "ov2680"
+#define OV2680B_NAME "ov2680b"
+#define OV2680F_NAME "ov2680f"
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV2680_FOCAL_LENGTH_NUM 334 /*3.34mm*/
+#define OV2680_FOCAL_LENGTH_DEM 100
+#define OV2680_F_NUMBER_DEFAULT_NUM 24
+#define OV2680_F_NUMBER_DEM 10
+
+#define OV2680_BIN_FACTOR_MAX 4
+
+#define MAX_FMTS 1
+
+/* sensor_mode_data read_mode adaptation */
+#define OV2680_READ_MODE_BINNING_ON 0x0400
+#define OV2680_READ_MODE_BINNING_OFF 0x00
+#define OV2680_INTEGRATION_TIME_MARGIN 8
+
+#define OV2680_MAX_EXPOSURE_VALUE 0xFFF1
+#define OV2680_MAX_GAIN_VALUE 0xFF
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2680_FOCAL_LENGTH_DEFAULT 0x1B70064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2680_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV2680_F_NUMBER_RANGE 0x180a180a
+#define OV2680_ID 0x2680
+
+#define OV2680_FINE_INTG_TIME_MIN 0
+#define OV2680_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV2680_COARSE_INTG_TIME_MIN 1
+#define OV2680_COARSE_INTG_TIME_MAX_MARGIN 6
+
+/*
+ * OV2680 System control registers
+ */
+#define OV2680_SW_SLEEP 0x0100
+#define OV2680_SW_RESET 0x0103
+#define OV2680_SW_STREAM 0x0100
+
+#define OV2680_SC_CMMN_CHIP_ID_H 0x300A
+#define OV2680_SC_CMMN_CHIP_ID_L 0x300B
+#define OV2680_SC_CMMN_SCCB_ID 0x302B /* 0x300C*/
+#define OV2680_SC_CMMN_SUB_ID 0x302A /* process, version*/
+
+#define OV2680_GROUP_ACCESS 0x3208 /*Bit[7:4] Group control, Bit[3:0] Group ID*/
+
+#define OV2680_EXPOSURE_H 0x3500 /*Bit[3:0] Bit[19:16] of exposure, remaining 16 bits lies in Reg0x3501&Reg0x3502*/
+#define OV2680_EXPOSURE_M 0x3501
+#define OV2680_EXPOSURE_L 0x3502
+#define OV2680_AGC_H 0x350A /*Bit[1:0] means Bit[9:8] of gain*/
+#define OV2680_AGC_L 0x350B /*Bit[7:0] of gain*/
+
+#define OV2680_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/
+#define OV2680_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/
+#define OV2680_VERTICAL_START_H 0x3802 /*Bit[11:8]*/
+#define OV2680_VERTICAL_START_L 0x3803 /*Bit[7:0]*/
+#define OV2680_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/
+#define OV2680_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/
+#define OV2680_VERTICAL_END_H 0x3806 /*Bit[11:8]*/
+#define OV2680_VERTICAL_END_L 0x3807 /*Bit[7:0]*/
+#define OV2680_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/
+#define OV2680_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/
+#define OV2680_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/
+#define OV2680_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/
+#define OV2680_TIMING_HTS_H 0x380C /*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV2680_TIMING_HTS_L 0x380D /*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV2680_TIMING_VTS_H 0x380e /*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV2680_TIMING_VTS_L 0x380f /*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV2680_FRAME_OFF_NUM 0x4202
+
+/*Flip/Mirror*/
+#define OV2680_FLIP_REG 0x3820
+#define OV2680_MIRROR_REG 0x3821
+#define OV2680_FLIP_BIT 1
+#define OV2680_MIRROR_BIT 2
+#define OV2680_FLIP_MIRROR_BIT_ENABLE 4
+
+#define OV2680_MWB_RED_GAIN_H 0x5004/*0x3400*/
+#define OV2680_MWB_GREEN_GAIN_H 0x5006/*0x3402*/
+#define OV2680_MWB_BLUE_GAIN_H 0x5008/*0x3404*/
+#define OV2680_MWB_GAIN_MAX 0x0fff
+
+#define OV2680_START_STREAMING 0x01
+#define OV2680_STOP_STREAMING 0x00
+
+
+#define OV2680_INVALID_CONFIG 0xffffffff
+
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov2680_resolution {
+ u8 *desc;
+ const struct ov2680_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct ov2680_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov2680_reg *regs;
+};
+
+ /*
+ * ov2680 device structure.
+ */
+ struct ov2680_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct camera_sensor_platform_data *platform_data;
+ struct timespec timestamp_t_focus_abs;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u8 res;
+ u8 type;
+ };
+
+ enum ov2680_tok_type {
+ OV2680_8BIT = 0x0001,
+ OV2680_16BIT = 0x0002,
+ OV2680_32BIT = 0x0004,
+ OV2680_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV2680_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV2680_TOK_MASK = 0xfff0
+ };
+
+ /**
+ * struct ov2680_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+ struct ov2680_reg {
+ enum ov2680_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+ };
+
+ #define to_ov2680_sensor(x) container_of(x, struct ov2680_device, sd)
+
+ #define OV2680_MAX_WRITE_BUF_SIZE 30
+
+ struct ov2680_write_buffer {
+ u16 addr;
+ u8 data[OV2680_MAX_WRITE_BUF_SIZE];
+ };
+
+ struct ov2680_write_ctrl {
+ int index;
+ struct ov2680_write_buffer buffer;
+ };
+
+ static const struct i2c_device_id ov2680_id[] = {
+ {OV2680B_NAME, 0},
+ {OV2680F_NAME, 0},
+ {}
+ };
+
+ static struct ov2680_reg const ov2680_global_setting[] = {
+ {OV2680_8BIT, 0x0103, 0x01},
+ {OV2680_8BIT, 0x3002, 0x00},
+ {OV2680_8BIT, 0x3016, 0x1c},
+ {OV2680_8BIT, 0x3018, 0x44},
+ {OV2680_8BIT, 0x3020, 0x00},
+ {OV2680_8BIT, 0x3080, 0x02},
+ {OV2680_8BIT, 0x3082, 0x45},
+ {OV2680_8BIT, 0x3084, 0x09},
+ {OV2680_8BIT, 0x3085, 0x04},
+ {OV2680_8BIT, 0x3503, 0x03},
+ {OV2680_8BIT, 0x350b, 0x36},
+ {OV2680_8BIT, 0x3600, 0xb4},
+ {OV2680_8BIT, 0x3603, 0x39},
+ {OV2680_8BIT, 0x3604, 0x24},
+ {OV2680_8BIT, 0x3605, 0x00},
+ {OV2680_8BIT, 0x3620, 0x26},
+ {OV2680_8BIT, 0x3621, 0x37},
+ {OV2680_8BIT, 0x3622, 0x04},
+ {OV2680_8BIT, 0x3628, 0x00},
+ {OV2680_8BIT, 0x3705, 0x3c},
+ {OV2680_8BIT, 0x370c, 0x50},
+ {OV2680_8BIT, 0x370d, 0xc0},
+ {OV2680_8BIT, 0x3718, 0x88},
+ {OV2680_8BIT, 0x3720, 0x00},
+ {OV2680_8BIT, 0x3721, 0x00},
+ {OV2680_8BIT, 0x3722, 0x00},
+ {OV2680_8BIT, 0x3723, 0x00},
+ {OV2680_8BIT, 0x3738, 0x00},
+ {OV2680_8BIT, 0x3717, 0x58},
+ {OV2680_8BIT, 0x3781, 0x80},
+ {OV2680_8BIT, 0x3789, 0x60},
+ {OV2680_8BIT, 0x3800, 0x00},
+ {OV2680_8BIT, 0x3819, 0x04},
+ {OV2680_8BIT, 0x4000, 0x81},
+ {OV2680_8BIT, 0x4001, 0x40},
+ {OV2680_8BIT, 0x4602, 0x02},
+ {OV2680_8BIT, 0x481f, 0x36},
+ {OV2680_8BIT, 0x4825, 0x36},
+ {OV2680_8BIT, 0x4837, 0x18},
+ {OV2680_8BIT, 0x5002, 0x30},
+ {OV2680_8BIT, 0x5004, 0x04},//manual awb 1x
+ {OV2680_8BIT, 0x5005, 0x00},
+ {OV2680_8BIT, 0x5006, 0x04},
+ {OV2680_8BIT, 0x5007, 0x00},
+ {OV2680_8BIT, 0x5008, 0x04},
+ {OV2680_8BIT, 0x5009, 0x00},
+ {OV2680_8BIT, 0x5080, 0x00},
+ {OV2680_8BIT, 0x3701, 0x64}, //add on 14/05/13
+ {OV2680_8BIT, 0x3784, 0x0c}, //based OV2680_R1A_AM10.ovt add on 14/06/13
+ {OV2680_8BIT, 0x5780, 0x3e}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
+ {OV2680_8BIT, 0x5781, 0x0f},
+ {OV2680_8BIT, 0x5782, 0x04},
+ {OV2680_8BIT, 0x5783, 0x02},
+ {OV2680_8BIT, 0x5784, 0x01},
+ {OV2680_8BIT, 0x5785, 0x01},
+ {OV2680_8BIT, 0x5786, 0x00},
+ {OV2680_8BIT, 0x5787, 0x04},
+ {OV2680_8BIT, 0x5788, 0x02},
+ {OV2680_8BIT, 0x5789, 0x00},
+ {OV2680_8BIT, 0x578a, 0x01},
+ {OV2680_8BIT, 0x578b, 0x02},
+ {OV2680_8BIT, 0x578c, 0x03},
+ {OV2680_8BIT, 0x578d, 0x03},
+ {OV2680_8BIT, 0x578e, 0x08},
+ {OV2680_8BIT, 0x578f, 0x0c},
+ {OV2680_8BIT, 0x5790, 0x08},
+ {OV2680_8BIT, 0x5791, 0x04},
+ {OV2680_8BIT, 0x5792, 0x00},
+ {OV2680_8BIT, 0x5793, 0x00},
+ {OV2680_8BIT, 0x5794, 0x03}, //based OV2680_R1A_AM10.ovt,Adjust DPC setting (57xx) on 14/06/13
+ {OV2680_8BIT, 0x0100, 0x00}, //stream off
+
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+
+ /*
+ * 176x144 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_QCIF_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x24},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x05},
+ {OV2680_8BIT, 0x3805, 0xaf},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0x47},
+ {OV2680_8BIT, 0x3808, 0x00},
+ {OV2680_8BIT, 0x3809, 0xC0},
+ {OV2680_8BIT, 0x380a, 0x00},
+ {OV2680_8BIT, 0x380b, 0xa0},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xb0},
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x04},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x04},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x4000, 0x81},
+ {OV2680_8BIT, 0x4001, 0x40},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc2},
+ {OV2680_8BIT, 0x3821, 0x01},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 352x288 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_CIF_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x24},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x03},
+ {OV2680_8BIT, 0x3805, 0x8f},
+ {OV2680_8BIT, 0x3806, 0x02},
+ {OV2680_8BIT, 0x3807, 0xe7},
+ {OV2680_8BIT, 0x3808, 0x01},
+ {OV2680_8BIT, 0x3809, 0x70},
+ {OV2680_8BIT, 0x380a, 0x01},
+ {OV2680_8BIT, 0x380b, 0x30},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xb0},
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x04},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x04},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc2},
+ {OV2680_8BIT, 0x3821, 0x01},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 336x256 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_QVGA_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x24},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x03},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x02},
+ {OV2680_8BIT, 0x3807, 0x87},
+ {OV2680_8BIT, 0x3808, 0x01},
+ {OV2680_8BIT, 0x3809, 0x50},
+ {OV2680_8BIT, 0x380a, 0x01},
+ {OV2680_8BIT, 0x380b, 0x00},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xb0},
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x04},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x04},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc2},
+ {OV2680_8BIT, 0x3821, 0x01},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+
+ /*
+ * 656x496 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_656x496_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x24},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x05},
+ {OV2680_8BIT, 0x3805, 0xcf},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0x67},
+ {OV2680_8BIT, 0x3808, 0x02},
+ {OV2680_8BIT, 0x3809, 0x90},
+ {OV2680_8BIT, 0x380a, 0x01},
+ {OV2680_8BIT, 0x380b, 0xf0},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xb0},
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x04},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x04},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc2},
+ {OV2680_8BIT, 0x3821, 0x01},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+ /*
+ * 800x600 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_720x592_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x26},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0x00}, // X_ADDR_START;
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x00}, // Y_ADDR_START;
+ {OV2680_8BIT, 0x3804, 0x05},
+ {OV2680_8BIT, 0x3805, 0xaf}, // X_ADDR_END;
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0xaf}, // Y_ADDR_END;
+ {OV2680_8BIT, 0x3808, 0x02},
+ {OV2680_8BIT, 0x3809, 0xd0}, // X_OUTPUT_SIZE;
+ {OV2680_8BIT, 0x380a, 0x02},
+ {OV2680_8BIT, 0x380b, 0x50}, // Y_OUTPUT_SIZE;
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xac}, // HTS;
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84}, // VTS;
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x00},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x00},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5708, 0x00},
+ {OV2680_8BIT, 0x5704, 0x02},
+ {OV2680_8BIT, 0x5705, 0xd0}, // X_WIN;
+ {OV2680_8BIT, 0x5706, 0x02},
+ {OV2680_8BIT, 0x5707, 0x50}, // Y_WIN;
+ {OV2680_8BIT, 0x3820, 0xc2}, // FLIP_FORMAT;
+ {OV2680_8BIT, 0x3821, 0x01}, // MIRROR_FORMAT;
+ {OV2680_8BIT, 0x5090, 0x00}, // PRE ISP CTRL16, default value is 0x0C;
+ // BIT[3]: Mirror order, BG or GB;
+ // BIT[2]: Flip order, BR or RB;
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 800x600 30fps VBlanking 1lane 10Bit (binning)
+ */
+ static struct ov2680_reg const ov2680_800x600_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x01},
+ {OV2680_8BIT, 0x3501, 0x26},
+ {OV2680_8BIT, 0x3502, 0x40},
+ {OV2680_8BIT, 0x370a, 0x23},
+ {OV2680_8BIT, 0x3801, 0x00},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x00},
+ {OV2680_8BIT, 0x3804, 0x06},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0xbf},
+ {OV2680_8BIT, 0x3808, 0x03},
+ {OV2680_8BIT, 0x3809, 0x20},
+ {OV2680_8BIT, 0x380a, 0x02},
+ {OV2680_8BIT, 0x380b, 0x58},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xac},
+ {OV2680_8BIT, 0x380e, 0x02},
+ {OV2680_8BIT, 0x380f, 0x84},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x00},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x00},
+ {OV2680_8BIT, 0x3814, 0x31},
+ {OV2680_8BIT, 0x3815, 0x31},
+ {OV2680_8BIT, 0x5708, 0x00},
+ {OV2680_8BIT, 0x5704, 0x03},
+ {OV2680_8BIT, 0x5705, 0x20},
+ {OV2680_8BIT, 0x5706, 0x02},
+ {OV2680_8BIT, 0x5707, 0x58},
+ {OV2680_8BIT, 0x3820, 0xc2},
+ {OV2680_8BIT, 0x3821, 0x01},
+ {OV2680_8BIT, 0x5090, 0x00},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x03},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 720p=1280*720 30fps VBlanking 1lane 10Bit (no-Scaling)
+ */
+ static struct ov2680_reg const ov2680_720p_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0xf2},
+ {OV2680_8BIT, 0x3804, 0x05},
+ {OV2680_8BIT, 0x3805, 0xbf},
+ {OV2680_8BIT, 0x3806, 0x03},
+ {OV2680_8BIT, 0x3807, 0xdd},
+ {OV2680_8BIT, 0x3808, 0x05},
+ {OV2680_8BIT, 0x3809, 0x10},
+ {OV2680_8BIT, 0x380a, 0x02},
+ {OV2680_8BIT, 0x380b, 0xe0},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x08},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x06},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x4008, 0x02},
+ {OV2680_8BIT, 0x4009, 0x09},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 1296x976 30fps VBlanking 1lane 10Bit(no-scaling)
+ */
+ static struct ov2680_reg const ov2680_1296x976_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0xa0},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x05},
+ {OV2680_8BIT, 0x3805, 0xbf},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0x57},
+ {OV2680_8BIT, 0x3808, 0x05},
+ {OV2680_8BIT, 0x3809, 0x10},
+ {OV2680_8BIT, 0x380a, 0x03},
+ {OV2680_8BIT, 0x380b, 0xd0},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x08},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x08},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x4008, 0x02},
+ {OV2680_8BIT, 0x4009, 0x09},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00}, //miror/flip
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 1456*1096 30fps VBlanking 1lane 10bit(no-scaling)
+ */
+ static struct ov2680_reg const ov2680_1456x1096_30fps[]= {
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0x90},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x78},
+ {OV2680_8BIT, 0x3804, 0x06},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0xC0},
+ {OV2680_8BIT, 0x3808, 0x05},
+ {OV2680_8BIT, 0x3809, 0xb0},
+ {OV2680_8BIT, 0x380a, 0x04},
+ {OV2680_8BIT, 0x380b, 0x48},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x08},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x00},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x4008, 0x02},
+ {OV2680_8BIT, 0x4009, 0x09},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x00}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x10},
+ {OV2680_8BIT, 0x5705, 0xa0},
+ {OV2680_8BIT, 0x5706, 0x0c},
+ {OV2680_8BIT, 0x5707, 0x78},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00},
+ // {OV2680_8BIT, 0x5090, 0x0c},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ *1616x916 30fps VBlanking 1lane 10bit
+ */
+
+ static struct ov2680_reg const ov2680_1616x916_30fps[] = {
+
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0x00},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x96},
+ {OV2680_8BIT, 0x3804, 0x06},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0x39},
+ {OV2680_8BIT, 0x3808, 0x06},
+ {OV2680_8BIT, 0x3809, 0x50},
+ {OV2680_8BIT, 0x380a, 0x03},
+ {OV2680_8BIT, 0x380b, 0x94},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x00},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x08},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x4008, 0x02},
+ {OV2680_8BIT, 0x4009, 0x09},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x06},
+ {OV2680_8BIT, 0x5705, 0x50},
+ {OV2680_8BIT, 0x5706, 0x03},
+ {OV2680_8BIT, 0x5707, 0x94},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00},
+ // {OV2680_8BIT, 0x5090, 0x0C},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ /*
+ * 1612x1212 30fps VBlanking 1lane 10Bit
+ */
+ static struct ov2680_reg const ov2680_1616x1082_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0x00},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x86},
+ {OV2680_8BIT, 0x3804, 0x06},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0xbf},
+ {OV2680_8BIT, 0x3808, 0x06},
+ {OV2680_8BIT, 0x3809, 0x50},
+ {OV2680_8BIT, 0x380a, 0x04},
+ {OV2680_8BIT, 0x380b, 0x3a},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x00},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x00},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x06},
+ {OV2680_8BIT, 0x5705, 0x50},
+ {OV2680_8BIT, 0x5706, 0x04},
+ {OV2680_8BIT, 0x5707, 0x3a},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00},
+ // {OV2680_8BIT, 0x5090, 0x0C},
+ {OV2680_8BIT, 0x4008, 0x02},
+ {OV2680_8BIT, 0x4009, 0x09},
+ {OV2680_8BIT, 0x5081, 0x41},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+ /*
+ * 1616x1216 30fps VBlanking 1lane 10Bit
+ */
+ static struct ov2680_reg const ov2680_1616x1216_30fps[] = {
+ {OV2680_8BIT, 0x3086, 0x00},
+ {OV2680_8BIT, 0x3501, 0x48},
+ {OV2680_8BIT, 0x3502, 0xe0},
+ {OV2680_8BIT, 0x370a, 0x21},
+ {OV2680_8BIT, 0x3801, 0x00},
+ {OV2680_8BIT, 0x3802, 0x00},
+ {OV2680_8BIT, 0x3803, 0x00},
+ {OV2680_8BIT, 0x3804, 0x06},
+ {OV2680_8BIT, 0x3805, 0x4f},
+ {OV2680_8BIT, 0x3806, 0x04},
+ {OV2680_8BIT, 0x3807, 0xbf},
+ {OV2680_8BIT, 0x3808, 0x06},
+ {OV2680_8BIT, 0x3809, 0x50},//50},//4line for mirror and flip
+ {OV2680_8BIT, 0x380a, 0x04},
+ {OV2680_8BIT, 0x380b, 0xc0},//c0},
+ {OV2680_8BIT, 0x380c, 0x06},
+ {OV2680_8BIT, 0x380d, 0xa8},
+ {OV2680_8BIT, 0x380e, 0x05},
+ {OV2680_8BIT, 0x380f, 0x0e},
+ {OV2680_8BIT, 0x3810, 0x00},
+ {OV2680_8BIT, 0x3811, 0x00},
+ {OV2680_8BIT, 0x3812, 0x00},
+ {OV2680_8BIT, 0x3813, 0x00},
+ {OV2680_8BIT, 0x3814, 0x11},
+ {OV2680_8BIT, 0x3815, 0x11},
+ {OV2680_8BIT, 0x4008, 0x00},
+ {OV2680_8BIT, 0x4009, 0x0b},
+ {OV2680_8BIT, 0x5081, 0x01},
+ {OV2680_8BIT, 0x5708, 0x01}, //add for full size flip off and mirror off 2014/09/11
+ {OV2680_8BIT, 0x5704, 0x06},
+ {OV2680_8BIT, 0x5705, 0x50},
+ {OV2680_8BIT, 0x5706, 0x04},
+ {OV2680_8BIT, 0x5707, 0xcc},
+ {OV2680_8BIT, 0x3820, 0xc0},
+ {OV2680_8BIT, 0x3821, 0x00},
+ // {OV2680_8BIT, 0x5090, 0x0C},
+ {OV2680_TOK_TERM, 0, 0}
+ };
+
+ static struct ov2680_resolution ov2680_res_preview[] = {
+ {
+ .desc = "ov2680_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 66,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x1216_30fps,
+ },
+ {
+ .desc = "ov2680_1616x916_30fps",
+ .width = 1616,
+ .height = 916,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x916_30fps,
+ },
+};
+#define N_RES_PREVIEW (ARRAY_SIZE(ov2680_res_preview))
+
+static struct ov2680_resolution ov2680_res_still[] = {
+ {
+ .desc = "ov2680_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 66,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x1216_30fps,
+ },
+ {
+ .desc = "ov2680_1616x916_30fps",
+ .width = 1616,
+ .height = 916,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x916_30fps,
+ },
+};
+#define N_RES_STILL (ARRAY_SIZE(ov2680_res_still))
+
+static struct ov2680_resolution ov2680_res_video[] = {
+ {
+ .desc = "ov2680_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 66,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x1216_30fps,
+ },
+ {
+ .desc = "ov2680_720p_30fps",
+ .width = 1616,
+ .height = 916,
+ .fps = 30,
+ .pix_clk_freq = 66,
+ .used = 0,
+ .pixels_per_line = 1698,//1704,
+ .lines_per_frame = 1294,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2680_1616x916_30fps,
+ },
+};
+#define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video))
+
+static struct ov2680_resolution *ov2680_res = ov2680_res_preview;
+static int N_RES = N_RES_PREVIEW;
+
+
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.c b/drivers/staging/media/atomisp/i2c/ov2722.c
new file mode 100644
index 000000000000..b7afadebdf89
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2722.c
@@ -0,0 +1,1373 @@
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "ov2722.h"
+
+/* i2c read/write stuff */
+static int ov2722_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT
+ && data_length != OV2722_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0 , sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV2722_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV2722_16BIT)
+ *val = be16_to_cpu(*(u16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(u32 *)&data[0]);
+
+ return 0;
+}
+
+static int ov2722_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int ov2722_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ u16 *wreg = (u16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV2722_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV2722_16BIT */
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov2722_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov2722_write_reg_array - Initializes a list of OV2722 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov2722_flush_reg_array, __ov2722_buf_reg_array() and
+ * __ov2722_write_reg_is_consecutive() are internal functions to
+ * ov2722_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov2722_flush_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov2722_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov2722_buf_reg_array(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ int size;
+ u16 *data16;
+
+ switch (next->type) {
+ case OV2722_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV2722_16BIT:
+ size = 2;
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV2722_MAX_WRITE_BUF_SIZE)
+ return __ov2722_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov2722_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov2722_write_ctrl *ctrl,
+ const struct ov2722_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov2722_write_reg_array(struct i2c_client *client,
+ const struct ov2722_reg *reglist)
+{
+ const struct ov2722_reg *next = reglist;
+ struct ov2722_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV2722_TOK_TERM; next++) {
+ switch (next->type & OV2722_TOK_MASK) {
+ case OV2722_TOK_DELAY:
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__ov2722_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov2722_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov2722_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov2722_flush_reg_array(client, &ctrl);
+}
+static int ov2722_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2722_FOCAL_LENGTH_NUM << 16) | OV2722_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov2722_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (OV2722_F_NUMBER_DEFAULT_NUM << 16) | OV2722_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2722_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV2722_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV2722_F_NUMBER_DEM << 16) |
+ (OV2722_F_NUMBER_DEFAULT_NUM << 8) | OV2722_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov2722_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov2722_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2722_device *dev = NULL;
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ const unsigned int ext_clk_freq_hz = 19200000;
+ const unsigned int pll_invariant_div = 10;
+ unsigned int pix_clk_freq_hz;
+ u16 pre_pll_clk_div;
+ u16 pll_multiplier;
+ u16 op_pix_clk_div;
+ u16 reg_val;
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ dev = to_ov2722_sensor(sd);
+
+ /* pixel clock calculattion */
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_CTRL3, &pre_pll_clk_div);
+ if (ret)
+ return ret;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_MULTIPLIER, &pll_multiplier);
+ if (ret)
+ return ret;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_PLL_DEBUG_OPT, &op_pix_clk_div);
+ if (ret)
+ return ret;
+
+ pre_pll_clk_div = (pre_pll_clk_div & 0x70) >> 4;
+ if (0 == pre_pll_clk_div)
+ return -EINVAL;
+
+ pll_multiplier = pll_multiplier & 0x7f;
+ op_pix_clk_div = op_pix_clk_div & 0x03;
+ pix_clk_freq_hz = ext_clk_freq_hz / pre_pll_clk_div * pll_multiplier
+ * op_pix_clk_div / pll_invariant_div;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV2722_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV2722_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV2722_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV2722_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV2722_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_CROP_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_CROP_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_CROP_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_H_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov2722_read_reg(client, OV2722_16BIT,
+ OV2722_V_OUTSIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __ov2722_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ u16 hts, vts;
+ int ret;
+
+ dev_dbg(&client->dev, "set_exposure without group hold\n");
+
+ /* clear VTS_DIFF on manual mode */
+ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_DIFF_H, 0);
+ if (ret)
+ return ret;
+
+ hts = dev->pixels_per_line;
+ vts = dev->lines_per_frame;
+
+ if ((coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN) > vts)
+ vts = coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN;
+
+ coarse_itg <<= 4;
+ digitgain <<= 2;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_VTS_H, vts);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_HTS_H, hts);
+ if (ret)
+ return ret;
+
+ /* set exposure */
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ coarse_itg & 0xff);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AEC_PK_EXPO_H,
+ (coarse_itg >> 8) & 0xfff);
+ if (ret)
+ return ret;
+
+ /* set analog gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_AGC_ADJ_H, gain);
+ if (ret)
+ return ret;
+
+ /* set digital gain */
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_R_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_G_H, digitgain);
+ if (ret)
+ return ret;
+
+ ret = ov2722_write_reg(client, OV2722_16BIT,
+ OV2722_MWB_GAIN_B_H, digitgain);
+
+ return ret;
+}
+
+static int ov2722_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov2722_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov2722_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ int exp = exposure->integration_time[0];
+ int gain = exposure->gain[0];
+ int digitgain = exposure->gain[1];
+
+ /* we should not accept the invalid value below. */
+ if (gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ return ov2722_set_exposure(sd, exp, gain, digitgain);
+}
+
+static long ov2722_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov2722_s_exposure(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ * for filling in EXIF data, not for actual image processing.
+ */
+static int ov2722_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_AEC_PK_EXPO_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2722_device *dev =
+ container_of(ctrl->handler, struct ov2722_device, ctrl_handler);
+ int ret = 0;
+ unsigned int val;
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov2722_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov2722_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov2722_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov2722_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_LINK_FREQ:
+ val = ov2722_res[dev->fmt_idx].mipi_freq;
+ if (val == 0)
+ return -EINVAL;
+
+ ctrl->val = val * 1000; /* To Hz */
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .g_volatile_ctrl = ov2722_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config ov2722_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV2722_FOCAL_LENGTH_DEFAULT,
+ .max = OV2722_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV2722_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV2722_F_NUMBER_DEFAULT,
+ .max = OV2722_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV2722_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV2722_F_NUMBER_RANGE,
+ .max = OV2722_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV2722_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_LINK_FREQ,
+ .name = "Link Frequency",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 1500000 * 1000,
+ .step = 1,
+ .def = 1,
+ .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
+ },
+};
+
+static int ov2722_init(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+
+ /* restore settings */
+ ov2722_res = ov2722_res_preview;
+ N_RES = N_RES_PREVIEW;
+
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = -1;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (flag) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ if (ret)
+ dev->platform_data->v1p8_ctrl(sd, 0);
+ }
+ } else {
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ int ret = -1;
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ /* Note: the GPIO order is asymmetric: always RESET#
+ * before PWDN# when turning it on or off.
+ */
+ ret = dev->platform_data->gpio0_ctrl(sd, flag);
+ /*
+ *ov2722 PWDN# active high when pull down,opposite to the convention
+ */
+ ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ usleep_range(5000, 6000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ /* according to DS, 20ms is needed between PWDN and i2c access */
+ msleep(20);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int ov2722_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret)
+ return ov2722_init(sd);
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between resolution and w/h.
+ * res->width/height smaller than w/h wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 800
+static int distance(struct ov2722_resolution *res, u32 w, u32 h)
+{
+ unsigned int w_ratio = (res->width << 13) / w;
+ unsigned int h_ratio;
+ int match;
+
+ if (h == 0)
+ return -1;
+ h_ratio = (res->height << 13) / h;
+ if (h_ratio == 0)
+ return -1;
+ match = abs(((w_ratio << 13) / h_ratio) - 8192);
+
+ if ((w_ratio < 8192) || (h_ratio < 8192) ||
+ (match > LARGEST_ALLOWED_RATIO_MISMATCH))
+ return -1;
+
+ return w_ratio + h_ratio;
+}
+
+/* Return the nearest higher resolution index */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ struct ov2722_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov2722_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov2722_res[i].width)
+ continue;
+ if (h != ov2722_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ ret = ov2722_write_reg(client, OV2722_8BIT,
+ OV2722_SW_RESET, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 reset err.\n");
+ return ret;
+ }
+
+ ret = ov2722_write_reg_array(client, ov2722_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 write register err.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov2722_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov2722_info = NULL;
+ int ret = 0;
+ int idx;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ ov2722_info = v4l2_get_subdev_hostdata(sd);
+ if (!ov2722_info)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov2722_res[N_RES - 1].width;
+ fmt->height = ov2722_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov2722_res[idx].width;
+ fmt->height = ov2722_res[idx].height;
+ }
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ dev->pixels_per_line = ov2722_res[dev->fmt_idx].pixels_per_line;
+ dev->lines_per_frame = ov2722_res[dev->fmt_idx].lines_per_frame;
+
+ ret = startup(sd);
+ if (ret) {
+ int i = 0;
+ dev_err(&client->dev, "ov2722 startup err, retry to power up\n");
+ for (i = 0; i < OV2722_POWER_UP_RETRY_NUM; i++) {
+ dev_err(&client->dev,
+ "ov2722 retry to power up %d/%d times, result: ",
+ i + 1, OV2722_POWER_UP_RETRY_NUM);
+ power_down(sd);
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "power up failed, continue\n");
+ continue;
+ }
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, " startup FAILED!\n");
+ } else {
+ dev_err(&client->dev, " startup SUCCESS!\n");
+ break;
+ }
+ }
+ if (ret) {
+ dev_err(&client->dev, "ov2722 startup err\n");
+ goto err;
+ }
+ }
+
+ ret = ov2722_get_intg_factor(client, ov2722_info,
+ &ov2722_res[dev->fmt_idx]);
+ if (ret)
+ dev_err(&client->dev, "failed to get integration_factor\n");
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+static int ov2722_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov2722_res[dev->fmt_idx].width;
+ fmt->height = ov2722_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov2722_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_CHIP_ID_L, &low);
+ id = (high << 8) | low;
+
+ if ((id != OV2722_ID) && (id != OV2720_ID)) {
+ dev_err(&client->dev, "sensor ID error\n");
+ return -ENODEV;
+ }
+
+ ret = ov2722_read_reg(client, OV2722_8BIT,
+ OV2722_SC_CMMN_SUB_ID, &high);
+ revision = (u8) high & 0x0f;
+
+ dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
+ dev_dbg(&client->dev, "detect ov2722 success\n");
+ return 0;
+}
+
+static int ov2722_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_STREAM,
+ enable ? OV2722_START_STREAMING :
+ OV2722_STOP_STREAMING);
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!platform_data)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ dev_err(&client->dev, "platform init err\n");
+ goto platform_init_failed;
+ }
+ }
+
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov2722_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov2722_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov2722 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+platform_init_failed:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov2722_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!param)
+ return -EINVAL;
+
+ if (param->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(&client->dev, "unsupported buffer type.\n");
+ return -EINVAL;
+ }
+
+ memset(param, 0, sizeof(*param));
+ param->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (dev->fmt_idx >= 0 && dev->fmt_idx < N_RES) {
+ param->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ param->parm.capture.timeperframe.numerator = 1;
+ param->parm.capture.capturemode = dev->run_mode;
+ param->parm.capture.timeperframe.denominator =
+ ov2722_res[dev->fmt_idx].fps;
+ }
+ return 0;
+}
+
+static int ov2722_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ mutex_lock(&dev->input_lock);
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ ov2722_res = ov2722_res_video;
+ N_RES = N_RES_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ ov2722_res = ov2722_res_still;
+ N_RES = N_RES_STILL;
+ break;
+ default:
+ ov2722_res = ov2722_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int ov2722_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov2722_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov2722_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov2722_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov2722_res[index].width;
+ fse->min_height = ov2722_res[index].height;
+ fse->max_width = ov2722_res[index].width;
+ fse->max_height = ov2722_res[index].height;
+
+ return 0;
+
+}
+
+
+static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = ov2722_res[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = {
+ .g_skip_frames = ov2722_g_skip_frames,
+};
+
+static const struct v4l2_subdev_video_ops ov2722_video_ops = {
+ .s_stream = ov2722_s_stream,
+ .g_parm = ov2722_g_parm,
+ .s_parm = ov2722_s_parm,
+ .g_frame_interval = ov2722_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ov2722_core_ops = {
+ .s_power = ov2722_s_power,
+ .ioctl = ov2722_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov2722_pad_ops = {
+ .enum_mbus_code = ov2722_enum_mbus_code,
+ .enum_frame_size = ov2722_enum_frame_size,
+ .get_fmt = ov2722_get_fmt,
+ .set_fmt = ov2722_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov2722_ops = {
+ .core = &ov2722_core_ops,
+ .video = &ov2722_video_ops,
+ .pad = &ov2722_pad_ops,
+ .sensor = &ov2722_sensor_ops,
+};
+
+static int ov2722_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2722_device *dev = to_ov2722_sensor(sd);
+ dev_dbg(&client->dev, "ov2722_remove...\n");
+
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ media_entity_cleanup(&dev->sd.entity);
+ kfree(dev);
+
+ return 0;
+}
+
+static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ unsigned int i;
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov2722_controls));
+ for (i = 0; i < ARRAY_SIZE(ov2722_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2722_controls[i],
+ NULL);
+
+ dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_LINK_FREQ);
+
+ if (dev->ctrl_handler.error || !dev->link_freq)
+ return dev->ctrl_handler.error;
+
+ dev->sd.ctrl_handler = hdl;
+
+ return 0;
+}
+
+static int ov2722_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ov2722_device *dev;
+ void *ovpdev;
+ int ret;
+ struct acpi_device *adev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&(dev->sd), client, &ov2722_ops);
+
+ ovpdev = client->dev.platform_data;
+ adev = ACPI_COMPANION(&client->dev);
+ if (adev) {
+ adev->power.flags.power_resources = 0;
+ ovpdev = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_grbg);
+ }
+
+ ret = ov2722_s_config(&dev->sd, client->irq, ovpdev);
+ if (ret)
+ goto out_free;
+
+ ret = __ov2722_init_ctrl_handler(dev);
+ if (ret)
+ goto out_ctrl_handler_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ov2722_remove(client);
+
+ if (ACPI_HANDLE(&client->dev))
+ ret = atomisp_register_i2c_module(&dev->sd, ovpdev, RAW_CAMERA);
+
+ return ret;
+
+out_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+MODULE_DEVICE_TABLE(i2c, ov2722_id);
+
+static struct acpi_device_id ov2722_acpi_match[] = {
+ { "INT33FB" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
+
+static struct i2c_driver ov2722_driver = {
+ .driver = {
+ .name = OV2722_NAME,
+ .acpi_match_table = ACPI_PTR(ov2722_acpi_match),
+ },
+ .probe = ov2722_probe,
+ .remove = ov2722_remove,
+ .id_table = ov2722_id,
+};
+
+static int init_ov2722(void)
+{
+ return i2c_add_driver(&ov2722_driver);
+}
+
+static void exit_ov2722(void)
+{
+
+ i2c_del_driver(&ov2722_driver);
+}
+
+module_init(init_ov2722);
+module_exit(exit_ov2722);
+
+MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
+MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
new file mode 100644
index 000000000000..b0d40965d89e
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -0,0 +1,1267 @@
+/*
+ * Support for OmniVision OV2722 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __OV2722_H__
+#define __OV2722_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+
+#include "../include/linux/atomisp_platform.h"
+
+#define OV2722_NAME "ov2722"
+
+#define OV2722_POWER_UP_RETRY_NUM 5
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV2722_FOCAL_LENGTH_NUM 278 /*2.78mm*/
+#define OV2722_FOCAL_LENGTH_DEM 100
+#define OV2722_F_NUMBER_DEFAULT_NUM 26
+#define OV2722_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_FOCAL_LENGTH_DEFAULT 0x1160064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV2722_F_NUMBER_DEFAULT 0x1a000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV2722_F_NUMBER_RANGE 0x1a0a1a0a
+#define OV2720_ID 0x2720
+#define OV2722_ID 0x2722
+
+#define OV2722_FINE_INTG_TIME_MIN 0
+#define OV2722_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV2722_COARSE_INTG_TIME_MIN 1
+#define OV2722_COARSE_INTG_TIME_MAX_MARGIN 4
+
+/*
+ * OV2722 System control registers
+ */
+#define OV2722_SW_SLEEP 0x0100
+#define OV2722_SW_RESET 0x0103
+#define OV2722_SW_STREAM 0x0100
+
+#define OV2722_SC_CMMN_CHIP_ID_H 0x300A
+#define OV2722_SC_CMMN_CHIP_ID_L 0x300B
+#define OV2722_SC_CMMN_SCCB_ID 0x300C
+#define OV2722_SC_CMMN_SUB_ID 0x302A /* process, version*/
+
+#define OV2722_SC_CMMN_PAD_OEN0 0x3000
+#define OV2722_SC_CMMN_PAD_OEN1 0x3001
+#define OV2722_SC_CMMN_PAD_OEN2 0x3002
+#define OV2722_SC_CMMN_PAD_OUT0 0x3008
+#define OV2722_SC_CMMN_PAD_OUT1 0x3009
+#define OV2722_SC_CMMN_PAD_OUT2 0x300D
+#define OV2722_SC_CMMN_PAD_SEL0 0x300E
+#define OV2722_SC_CMMN_PAD_SEL1 0x300F
+#define OV2722_SC_CMMN_PAD_SEL2 0x3010
+
+#define OV2722_SC_CMMN_PAD_PK 0x3011
+#define OV2722_SC_CMMN_A_PWC_PK_O_13 0x3013
+#define OV2722_SC_CMMN_A_PWC_PK_O_14 0x3014
+
+#define OV2722_SC_CMMN_CLKRST0 0x301A
+#define OV2722_SC_CMMN_CLKRST1 0x301B
+#define OV2722_SC_CMMN_CLKRST2 0x301C
+#define OV2722_SC_CMMN_CLKRST3 0x301D
+#define OV2722_SC_CMMN_CLKRST4 0x301E
+#define OV2722_SC_CMMN_CLKRST5 0x3005
+#define OV2722_SC_CMMN_PCLK_DIV_CTRL 0x3007
+#define OV2722_SC_CMMN_CLOCK_SEL 0x3020
+#define OV2722_SC_SOC_CLKRST5 0x3040
+
+#define OV2722_SC_CMMN_PLL_CTRL0 0x3034
+#define OV2722_SC_CMMN_PLL_CTRL1 0x3035
+#define OV2722_SC_CMMN_PLL_CTRL2 0x3039
+#define OV2722_SC_CMMN_PLL_CTRL3 0x3037
+#define OV2722_SC_CMMN_PLL_MULTIPLIER 0x3036
+#define OV2722_SC_CMMN_PLL_DEBUG_OPT 0x3038
+#define OV2722_SC_CMMN_PLLS_CTRL0 0x303A
+#define OV2722_SC_CMMN_PLLS_CTRL1 0x303B
+#define OV2722_SC_CMMN_PLLS_CTRL2 0x303C
+#define OV2722_SC_CMMN_PLLS_CTRL3 0x303D
+
+#define OV2722_SC_CMMN_MIPI_PHY_16 0x3016
+#define OV2722_SC_CMMN_MIPI_PHY_17 0x3017
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_18 0x3018
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_19 0x3019
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_21 0x3021
+#define OV2722_SC_CMMN_MIPI_SC_CTRL_22 0x3022
+
+#define OV2722_AEC_PK_EXPO_H 0x3500
+#define OV2722_AEC_PK_EXPO_M 0x3501
+#define OV2722_AEC_PK_EXPO_L 0x3502
+#define OV2722_AEC_MANUAL_CTRL 0x3503
+#define OV2722_AGC_ADJ_H 0x3508
+#define OV2722_AGC_ADJ_L 0x3509
+#define OV2722_VTS_DIFF_H 0x350c
+#define OV2722_VTS_DIFF_L 0x350d
+#define OV2722_GROUP_ACCESS 0x3208
+#define OV2722_HTS_H 0x380c
+#define OV2722_HTS_L 0x380d
+#define OV2722_VTS_H 0x380e
+#define OV2722_VTS_L 0x380f
+
+#define OV2722_MWB_GAIN_R_H 0x5186
+#define OV2722_MWB_GAIN_R_L 0x5187
+#define OV2722_MWB_GAIN_G_H 0x5188
+#define OV2722_MWB_GAIN_G_L 0x5189
+#define OV2722_MWB_GAIN_B_H 0x518a
+#define OV2722_MWB_GAIN_B_L 0x518b
+
+#define OV2722_H_CROP_START_H 0x3800
+#define OV2722_H_CROP_START_L 0x3801
+#define OV2722_V_CROP_START_H 0x3802
+#define OV2722_V_CROP_START_L 0x3803
+#define OV2722_H_CROP_END_H 0x3804
+#define OV2722_H_CROP_END_L 0x3805
+#define OV2722_V_CROP_END_H 0x3806
+#define OV2722_V_CROP_END_L 0x3807
+#define OV2722_H_OUTSIZE_H 0x3808
+#define OV2722_H_OUTSIZE_L 0x3809
+#define OV2722_V_OUTSIZE_H 0x380a
+#define OV2722_V_OUTSIZE_L 0x380b
+
+#define OV2722_START_STREAMING 0x01
+#define OV2722_STOP_STREAMING 0x00
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov2722_resolution {
+ u8 *desc;
+ const struct ov2722_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u32 skip_frames;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+ int mipi_freq;
+};
+
+struct ov2722_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov2722_reg *regs;
+};
+
+/*
+ * ov2722 device structure.
+ */
+struct ov2722_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+
+ struct camera_sensor_platform_data *platform_data;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 res;
+ u8 type;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *link_freq;
+};
+
+enum ov2722_tok_type {
+ OV2722_8BIT = 0x0001,
+ OV2722_16BIT = 0x0002,
+ OV2722_32BIT = 0x0004,
+ OV2722_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV2722_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV2722_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct ov2722_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov2722_reg {
+ enum ov2722_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov2722_sensor(x) container_of(x, struct ov2722_device, sd)
+
+#define OV2722_MAX_WRITE_BUF_SIZE 30
+
+struct ov2722_write_buffer {
+ u16 addr;
+ u8 data[OV2722_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov2722_write_ctrl {
+ int index;
+ struct ov2722_write_buffer buffer;
+};
+
+static const struct i2c_device_id ov2722_id[] = {
+ {OV2722_NAME, 0},
+ {}
+};
+
+/*
+ * Register settings for various resolution
+ */
+static struct ov2722_reg const ov2722_QVGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x0c},
+ {OV2722_8BIT, 0x373a, 0x1c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x0c},
+ {OV2722_8BIT, 0x3705, 0x06},
+ {OV2722_8BIT, 0x3730, 0x0e},
+ {OV2722_8BIT, 0x3704, 0x1c},
+ {OV2722_8BIT, 0x3f06, 0x00},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x46},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0x63},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x95}, /* H crop end: 1685 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x27}, /* V crop end: 1063 */
+ {OV2722_8BIT, 0x3808, 0x01},
+ {OV2722_8BIT, 0x3809, 0x50}, /* H output size: 336 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0x00}, /* V output size: 256 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0xc0},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x71},
+ {OV2722_8BIT, 0x3815, 0x71},
+ {OV2722_8BIT, 0x3612, 0x49},
+ {OV2722_8BIT, 0x3618, 0x00},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0xc3},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x77},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x09},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+
+};
+
+static struct ov2722_reg const ov2722_480P_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xf2}, /* H crop start: 322 - 80 = 242*/
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xBB}, /* H crop end: 1643 + 80 = 1723*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0xE0}, /* H output size: 656 +80 = 736*/
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+static struct ov2722_reg const ov2722_VGA_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x18},
+ {OV2722_8BIT, 0x373a, 0x3c},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x1d},
+ {OV2722_8BIT, 0x3705, 0x12},
+ {OV2722_8BIT, 0x3730, 0x1f},
+ {OV2722_8BIT, 0x3704, 0x3f},
+ {OV2722_8BIT, 0x3f06, 0x1d},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0x83},
+ {OV2722_8BIT, 0x371e, 0x00},
+ {OV2722_8BIT, 0x371f, 0xbd},
+ {OV2722_8BIT, 0x3708, 0x63},
+ {OV2722_8BIT, 0x3709, 0x52},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x42}, /* H crop start: 322 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x20}, /* V crop start: 32*/
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x6B}, /* H crop end: 1643*/
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x03}, /* V crop end: 1027*/
+ {OV2722_8BIT, 0x3808, 0x02},
+ {OV2722_8BIT, 0x3809, 0x90}, /* H output size: 656 */
+ {OV2722_8BIT, 0x380a, 0x01},
+ {OV2722_8BIT, 0x380b, 0xF0}, /* V output size: 496 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x04}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x01}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp*/
+ {OV2722_8BIT, 0x3814, 0x31},
+ {OV2722_8BIT, 0x3815, 0x31},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x02},
+ {OV2722_8BIT, 0x3a09, 0x67},
+ {OV2722_8BIT, 0x3a0a, 0x02},
+ {OV2722_8BIT, 0x3a0b, 0x00},
+ {OV2722_8BIT, 0x3a0d, 0x00},
+ {OV2722_8BIT, 0x3a0e, 0x00},
+ {OV2722_8BIT, 0x4520, 0x0a},
+ {OV2722_8BIT, 0x4837, 0x29},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xff},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3a00, 0x58},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+static struct ov2722_reg const ov2722_1632_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x9E}, /* H crop start: 158 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x05}, /* H crop end: 1797 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+
+ {OV2722_8BIT, 0x3808, 0x06},
+ {OV2722_8BIT, 0x3809, 0x60}, /* H output size: 1632 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+static struct ov2722_reg const ov2722_1452_1092_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for
+ a whole frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0xF8}, /* H crop start: 248 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xab}, /* H crop end: 1707 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0xac}, /* H output size: 1452 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0xd4}, /* H timing: 2260 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xdc}, /* V timing: 1244 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x2c}, /* 422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+static struct ov2722_reg const ov2722_1M3_30fps[] = {
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x4a}, /* H crop start: 330 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x03}, /* V crop start: 3 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0xe1}, /* H crop end: 1761 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x47}, /* V crop end: 1095 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x88}, /* H output size: 1416 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x0a}, /* V output size: 1034 */
+
+ /* H blank timing */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H total size: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa0}, /* V total size: 1184 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x05}, /* H window offset: 5 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* flip isp */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1}, /* v_en, h_en, blc_en */
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf},
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0}, /* AWB red */
+ {OV2722_8BIT, 0x5184, 0xb0}, /* AWB green */
+ {OV2722_8BIT, 0x5185, 0xb0}, /* AWB blue */
+ {OV2722_8BIT, 0x5180, 0x03}, /* AWB manual mode */
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x4800, 0x24}, /* clk lane gate enable */
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26},
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+
+ /* Added for power optimization */
+ {OV2722_8BIT, 0x3000, 0x00},
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x46},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x10},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+static struct ov2722_reg const ov2722_1080p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03}, /* For stand wait for a whole
+ frame complete.(vblank) */
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x2b},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x28},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x00},
+ {OV2722_8BIT, 0x3801, 0x08}, /* H crop start: 8 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0x01}, /* V crop start: 1 */
+ {OV2722_8BIT, 0x3804, 0x07},
+ {OV2722_8BIT, 0x3805, 0x9b}, /* H crop end: 1947 */
+ {OV2722_8BIT, 0x3806, 0x04},
+ {OV2722_8BIT, 0x3807, 0x45}, /* V crop end: 1093 */
+ {OV2722_8BIT, 0x3808, 0x07},
+ {OV2722_8BIT, 0x3809, 0x8c}, /* H output size: 1932 */
+ {OV2722_8BIT, 0x380a, 0x04},
+ {OV2722_8BIT, 0x380b, 0x44}, /* V output size: 1092 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x14}, /* H timing: 2068 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0x5a}, /* V timing: 1114 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x4b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3000, 0xff},
+ {OV2722_8BIT, 0x3001, 0xff},
+ {OV2722_8BIT, 0x3002, 0xf0},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0x53}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x63},
+ {OV2722_8BIT, 0x3634, 0x24},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcd}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x3503, 0x17},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x24}, /* 345.6 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3011, 0x22},
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0}
+};
+
+static struct ov2722_reg const ov2722_720p_30fps[] = {
+ {OV2722_8BIT, 0x3021, 0x03},
+ {OV2722_8BIT, 0x3718, 0x10},
+ {OV2722_8BIT, 0x3702, 0x24},
+ {OV2722_8BIT, 0x373a, 0x60},
+ {OV2722_8BIT, 0x3715, 0x01},
+ {OV2722_8BIT, 0x3703, 0x2e},
+ {OV2722_8BIT, 0x3705, 0x10},
+ {OV2722_8BIT, 0x3730, 0x30},
+ {OV2722_8BIT, 0x3704, 0x62},
+ {OV2722_8BIT, 0x3f06, 0x3a},
+ {OV2722_8BIT, 0x371c, 0x00},
+ {OV2722_8BIT, 0x371d, 0xc4},
+ {OV2722_8BIT, 0x371e, 0x01},
+ {OV2722_8BIT, 0x371f, 0x0d},
+ {OV2722_8BIT, 0x3708, 0x61},
+ {OV2722_8BIT, 0x3709, 0x12},
+ {OV2722_8BIT, 0x3800, 0x01},
+ {OV2722_8BIT, 0x3801, 0x40}, /* H crop start: 320 */
+ {OV2722_8BIT, 0x3802, 0x00},
+ {OV2722_8BIT, 0x3803, 0xb1}, /* V crop start: 177 */
+ {OV2722_8BIT, 0x3804, 0x06},
+ {OV2722_8BIT, 0x3805, 0x55}, /* H crop end: 1621 */
+ {OV2722_8BIT, 0x3806, 0x03},
+ {OV2722_8BIT, 0x3807, 0x95}, /* V crop end: 918 */
+ {OV2722_8BIT, 0x3808, 0x05},
+ {OV2722_8BIT, 0x3809, 0x10}, /* H output size: 0x0788==1928 */
+ {OV2722_8BIT, 0x380a, 0x02},
+ {OV2722_8BIT, 0x380b, 0xe0}, /* output size: 0x02DE==734 */
+ {OV2722_8BIT, 0x380c, 0x08},
+ {OV2722_8BIT, 0x380d, 0x00}, /* H timing: 2048 */
+ {OV2722_8BIT, 0x380e, 0x04},
+ {OV2722_8BIT, 0x380f, 0xa3}, /* V timing: 1187 */
+ {OV2722_8BIT, 0x3810, 0x00},
+ {OV2722_8BIT, 0x3811, 0x03}, /* H window offset: 3 */
+ {OV2722_8BIT, 0x3812, 0x00},
+ {OV2722_8BIT, 0x3813, 0x02}, /* V window offset: 2 */
+ {OV2722_8BIT, 0x3820, 0x80},
+ {OV2722_8BIT, 0x3821, 0x06}, /* mirror */
+ {OV2722_8BIT, 0x3814, 0x11},
+ {OV2722_8BIT, 0x3815, 0x11},
+ {OV2722_8BIT, 0x3612, 0x0b},
+ {OV2722_8BIT, 0x3618, 0x04},
+ {OV2722_8BIT, 0x3a08, 0x01},
+ {OV2722_8BIT, 0x3a09, 0x50},
+ {OV2722_8BIT, 0x3a0a, 0x01},
+ {OV2722_8BIT, 0x3a0b, 0x18},
+ {OV2722_8BIT, 0x3a0d, 0x03},
+ {OV2722_8BIT, 0x3a0e, 0x03},
+ {OV2722_8BIT, 0x4520, 0x00},
+ {OV2722_8BIT, 0x4837, 0x1b},
+ {OV2722_8BIT, 0x3600, 0x08},
+ {OV2722_8BIT, 0x3621, 0xc0},
+ {OV2722_8BIT, 0x3632, 0xd2}, /* added for power opt */
+ {OV2722_8BIT, 0x3633, 0x23},
+ {OV2722_8BIT, 0x3634, 0x54},
+ {OV2722_8BIT, 0x3f01, 0x0c},
+ {OV2722_8BIT, 0x5001, 0xc1},
+ {OV2722_8BIT, 0x3614, 0xf0},
+ {OV2722_8BIT, 0x3630, 0x2d},
+ {OV2722_8BIT, 0x370b, 0x62},
+ {OV2722_8BIT, 0x3706, 0x61},
+ {OV2722_8BIT, 0x4000, 0x02},
+ {OV2722_8BIT, 0x4002, 0xc5},
+ {OV2722_8BIT, 0x4005, 0x08},
+ {OV2722_8BIT, 0x404f, 0x84},
+ {OV2722_8BIT, 0x4051, 0x00},
+ {OV2722_8BIT, 0x5000, 0xcf}, /* manual 3a */
+ {OV2722_8BIT, 0x301d, 0xf0}, /* enable group hold */
+ {OV2722_8BIT, 0x3a18, 0x00},
+ {OV2722_8BIT, 0x3a19, 0x80},
+ {OV2722_8BIT, 0x4521, 0x00},
+ {OV2722_8BIT, 0x5183, 0xb0},
+ {OV2722_8BIT, 0x5184, 0xb0},
+ {OV2722_8BIT, 0x5185, 0xb0},
+ {OV2722_8BIT, 0x370c, 0x0c},
+ {OV2722_8BIT, 0x3035, 0x00},
+ {OV2722_8BIT, 0x3036, 0x26}, /* {0x3036, 0x2c}, //422.4 MHz */
+ {OV2722_8BIT, 0x3037, 0xa1},
+ {OV2722_8BIT, 0x303e, 0x19},
+ {OV2722_8BIT, 0x3038, 0x06},
+ {OV2722_8BIT, 0x3018, 0x04},
+ {OV2722_8BIT, 0x3000, 0x00}, /* added for power optimization */
+ {OV2722_8BIT, 0x3001, 0x00},
+ {OV2722_8BIT, 0x3002, 0x00},
+ {OV2722_8BIT, 0x3a0f, 0x40},
+ {OV2722_8BIT, 0x3a10, 0x38},
+ {OV2722_8BIT, 0x3a1b, 0x48},
+ {OV2722_8BIT, 0x3a1e, 0x30},
+ {OV2722_8BIT, 0x3a11, 0x90},
+ {OV2722_8BIT, 0x3a1f, 0x10},
+ {OV2722_8BIT, 0x3503, 0x17}, /* manual 3a */
+ {OV2722_8BIT, 0x3500, 0x00},
+ {OV2722_8BIT, 0x3501, 0x3F},
+ {OV2722_8BIT, 0x3502, 0x00},
+ {OV2722_8BIT, 0x3508, 0x00},
+ {OV2722_8BIT, 0x3509, 0x00},
+ {OV2722_TOK_TERM, 0, 0},
+};
+
+struct ov2722_resolution ov2722_res_preview[] = {
+ {
+ .desc = "ov2722_1632_1092_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+#define N_RES_PREVIEW (ARRAY_SIZE(ov2722_res_preview))
+
+struct ov2722_resolution ov2722_res_still[] = {
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 1632,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1632_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1452_1092_30fps",
+ .width = 1452,
+ .height = 1092,
+ .fps = 30,
+ .pix_clk_freq = 85,
+ .used = 0,
+ .pixels_per_line = 2260,
+ .lines_per_frame = 1244,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1452_1092_30fps,
+ .mipi_freq = 422400,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+#define N_RES_STILL (ARRAY_SIZE(ov2722_res_still))
+
+struct ov2722_resolution ov2722_res_video[] = {
+ {
+ .desc = "ov2722_QVGA_30fps",
+ .width = 336,
+ .height = 256,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_QVGA_30fps,
+ .mipi_freq = 364800,
+ },
+ {
+ .desc = "ov2722_480P_30fps",
+ .width = 736,
+ .height = 496,
+ .fps = 30,
+ .pix_clk_freq = 73,
+ .used = 0,
+ .pixels_per_line = 2048,
+ .lines_per_frame = 1184,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_480P_30fps,
+ },
+ {
+ .desc = "ov2722_1080P_30fps",
+ .width = 1932,
+ .height = 1092,
+ .pix_clk_freq = 69,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2068,
+ .lines_per_frame = 1114,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .skip_frames = 3,
+ .regs = ov2722_1080p_30fps,
+ .mipi_freq = 345600,
+ },
+};
+#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video))
+
+static struct ov2722_resolution *ov2722_res = ov2722_res_preview;
+static int N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
new file mode 100644
index 000000000000..9fb1bffbe9b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_OV5693
+ tristate "Omnivision ov5693 sensor support"
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
+
+ ov5693 is video camera sensor.
+
+ It currently only works with the atomisp driver.
+
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
new file mode 100644
index 000000000000..fceb9e9b881b
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+
+ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
new file mode 100644
index 000000000000..2dd894989cd9
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
@@ -0,0 +1,67 @@
+/*
+ * Support for AD5823 VCM.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __AD5823_H__
+#define __AD5823_H__
+
+#include <linux/types.h>
+
+
+#define AD5823_VCM_ADDR 0x0c
+
+#define AD5823_REG_RESET 0x01
+#define AD5823_REG_MODE 0x02
+#define AD5823_REG_VCM_MOVE_TIME 0x03
+#define AD5823_REG_VCM_CODE_MSB 0x04
+#define AD5823_REG_VCM_CODE_LSB 0x05
+#define AD5823_REG_VCM_THRESHOLD_MSB 0x06
+#define AD5823_REG_VCM_THRESHOLD_LSB 0x07
+
+#define AD5823_REG_LENGTH 0x1
+
+#define AD5823_RING_CTRL_ENABLE 0x04
+#define AD5823_RING_CTRL_DISABLE 0x00
+
+#define AD5823_RESONANCE_PERIOD 100000
+#define AD5823_RESONANCE_COEF 512
+#define AD5823_HIGH_FREQ_RANGE 0x80
+
+#define VCM_CODE_MSB_MASK 0xfc
+#define AD5823_INIT_FOCUS_POS 350
+
+enum ad5823_tok_type {
+ AD5823_8BIT = 0x1,
+ AD5823_16BIT = 0x2,
+};
+
+enum ad5823_vcm_mode {
+ AD5823_ARC_RES0 = 0x0, /* Actuator response control RES1 */
+ AD5823_ARC_RES1 = 0x1, /* Actuator response control RES0.5 */
+ AD5823_ARC_RES2 = 0x2, /* Actuator response control RES2 */
+ AD5823_ESRC = 0x3, /* Enhanced slew rate control */
+ AD5823_DIRECT = 0x4, /* Direct control */
+};
+
+#define AD5823_INVALID_CONFIG 0xffffffff
+#define AD5823_MAX_FOCUS_POS 1023
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
new file mode 100644
index 000000000000..5e9dafe7cc32
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
@@ -0,0 +1,2066 @@
+/*
+ * Support for OmniVision OV5693 1080p HD camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/moduleparam.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#include "ov5693.h"
+#include "ad5823.h"
+
+#define __cci_delay(t) \
+ do { \
+ if ((t) < 10) { \
+ usleep_range((t) * 1000, ((t) + 1) * 1000); \
+ } else { \
+ msleep((t)); \
+ } \
+ } while (0)
+
+/* Value 30ms reached through experimentation on byt ecs.
+ * The DS specifies a much lower value but when using a smaller value
+ * the I2C bus sometimes locks up permanently when starting the camera.
+ * This issue could not be reproduced on cht, so we can reduce the
+ * delay value to a lower value when insmod.
+ */
+static uint up_delay = 30;
+module_param(up_delay, uint, 0644);
+MODULE_PARM_DESC(up_delay, "Delay prior to the first CCI transaction for ov5693");
+
+static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
+{
+ int err;
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = reg;
+ buf[1] = val;
+
+ msg.addr = VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[0];
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err != 1) {
+ dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n",
+ __func__, err);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = val;
+ msg.addr = AD5823_VCM_ADDR;
+ msg.flags = 0;
+ msg.len = 0x02;
+ msg.buf = &buf[0];
+
+ if (i2c_transfer(client->adapter, &msg, 1) != 1)
+ return -EIO;
+ return 0;
+}
+
+static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ buf[0] = reg;
+ buf[1] = 0;
+
+ msg[0].addr = AD5823_VCM_ADDR;
+ msg[0].flags = 0;
+ msg[0].len = 0x01;
+ msg[0].buf = &buf[0];
+
+ msg[1].addr = 0x0c;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 0x01;
+ msg[1].buf = &buf[1];
+ *val = 0;
+ if (i2c_transfer(client->adapter, msg, 2) != 2)
+ return -EIO;
+ *val = buf[1];
+ return 0;
+}
+
+
+static const uint32_t ov5693_embedded_effective_size = 28;
+
+/* i2c read/write stuff */
+static int ov5693_read_reg(struct i2c_client *client,
+ u16 data_length, u16 reg, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[6];
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no client->adapter\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV5693_8BIT && data_length != OV5693_16BIT
+ && data_length != OV5693_32BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(msg, 0 , sizeof(msg));
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8)(reg >> 8);
+ data[1] = (u8)(reg & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].len = data_length;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = data;
+
+ err = i2c_transfer(client->adapter, msg, 2);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ dev_err(&client->dev,
+ "read from offset 0x%x error %d", reg, err);
+ return err;
+ }
+
+ *val = 0;
+ /* high byte comes first */
+ if (data_length == OV5693_8BIT)
+ *val = (u8)data[0];
+ else if (data_length == OV5693_16BIT)
+ *val = be16_to_cpu(*(u16 *)&data[0]);
+ else
+ *val = be32_to_cpu(*(u32 *)&data[0]);
+
+ return 0;
+}
+
+static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int vcm_dw_i2c_write(struct i2c_client *client, u16 data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+ u16 val;
+
+ val = cpu_to_be16(data);
+ msg.addr = VCM_ADDR;
+ msg.flags = 0;
+ msg.len = OV5693_16BIT;
+ msg.buf = (u8 *)&val;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+/* Theory: per datasheet, the two VCMs both allow for a 2-byte read.
+ * The DW9714 doesn't actually specify what this does (it has a
+ * two-byte write-only protocol, but specifies the read sequence as
+ * legal), but it returns the same data (zeroes) always, after an
+ * undocumented initial NAK. The AD5823 has a one-byte address
+ * register to which all writes go, and subsequent reads will cycle
+ * through the 8 bytes of registers. Notably, the default values (the
+ * device is always power-cycled affirmatively, so we can rely on
+ * these) in AD5823 are not pairwise repetitions of the same 16 bit
+ * word. So all we have to do is sequentially read two bytes at a
+ * time and see if we detect a difference in any of the first four
+ * pairs. */
+static int vcm_detect(struct i2c_client *client)
+{
+ int i, ret;
+ struct i2c_msg msg;
+ u16 data0 = 0, data;
+ for (i = 0; i < 4; i++) {
+ msg.addr = VCM_ADDR;
+ msg.flags = I2C_M_RD;
+ msg.len = sizeof(data);
+ msg.buf = (u8 *)&data;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ /* DW9714 always fails the first read and returns
+ * zeroes for subsequent ones */
+ if (i == 0 && ret == -EREMOTEIO) {
+ data0 = 0;
+ continue;
+ }
+
+ if (i == 0)
+ data0 = data;
+
+ if (data != data0)
+ return VCM_AD5823;
+ }
+ return ret == 1 ? VCM_DW9714 : ret;
+}
+
+static int ov5693_write_reg(struct i2c_client *client, u16 data_length,
+ u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ u16 *wreg = (u16 *)data;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) {
+ dev_err(&client->dev,
+ "%s error, invalid data_length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV5693_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV5693_16BIT */
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = cpu_to_be16(val);
+ }
+
+ ret = ov5693_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov5693_write_reg_array - Initializes a list of OV5693 registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and
+ * __ov5693_write_reg_is_consecutive() are internal functions to
+ * ov5693_write_reg_array_fast() and should be not used anywhere else.
+ *
+ */
+
+static int __ov5693_flush_reg_array(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl)
+{
+ u16 size;
+
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov5693_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov5693_buf_reg_array(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl,
+ const struct ov5693_reg *next)
+{
+ int size;
+ u16 *data16;
+
+ switch (next->type) {
+ case OV5693_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV5693_16BIT:
+ size = 2;
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->reg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE)
+ return __ov5693_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int __ov5693_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov5693_write_ctrl *ctrl,
+ const struct ov5693_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->reg;
+}
+
+static int ov5693_write_reg_array(struct i2c_client *client,
+ const struct ov5693_reg *reglist)
+{
+ const struct ov5693_reg *next = reglist;
+ struct ov5693_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV5693_TOK_TERM; next++) {
+ switch (next->type & OV5693_TOK_MASK) {
+ case OV5693_TOK_DELAY:
+ err = __ov5693_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceed.
+ */
+ if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
+ next)) {
+ err = __ov5693_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov5693_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev,
+ "%s: write error, aborted\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov5693_flush_reg_array(client, &ctrl);
+}
+static int ov5693_g_focal(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV5693_FOCAL_LENGTH_NUM << 16) | OV5693_FOCAL_LENGTH_DEM;
+ return 0;
+}
+
+static int ov5693_g_fnumber(struct v4l2_subdev *sd, s32 *val)
+{
+ /*const f number for imx*/
+ *val = (OV5693_F_NUMBER_DEFAULT_NUM << 16) | OV5693_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov5693_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
+{
+ *val = (OV5693_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV5693_F_NUMBER_DEM << 16) |
+ (OV5693_F_NUMBER_DEFAULT_NUM << 8) | OV5693_F_NUMBER_DEM;
+ return 0;
+}
+
+static int ov5693_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ *val = ov5693_res[dev->fmt_idx].bin_factor_x;
+
+ return 0;
+}
+
+static int ov5693_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ *val = ov5693_res[dev->fmt_idx].bin_factor_y;
+
+ return 0;
+}
+
+static int ov5693_get_intg_factor(struct i2c_client *client,
+ struct camera_mipi_info *info,
+ const struct ov5693_resolution *res)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct atomisp_sensor_mode_data *buf = &info->data;
+ unsigned int pix_clk_freq_hz;
+ u16 reg_val;
+ int ret;
+
+ if (info == NULL)
+ return -EINVAL;
+
+ /* pixel clock */
+ pix_clk_freq_hz = res->pix_clk_freq * 1000000;
+
+ dev->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+ buf->vt_pix_clk_freq_mhz = pix_clk_freq_hz;
+
+ /* get integration time */
+ buf->coarse_integration_time_min = OV5693_COARSE_INTG_TIME_MIN;
+ buf->coarse_integration_time_max_margin =
+ OV5693_COARSE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_min = OV5693_FINE_INTG_TIME_MIN;
+ buf->fine_integration_time_max_margin =
+ OV5693_FINE_INTG_TIME_MAX_MARGIN;
+
+ buf->fine_integration_time_def = OV5693_FINE_INTG_TIME_MIN;
+ buf->frame_length_lines = res->lines_per_frame;
+ buf->line_length_pck = res->pixels_per_line;
+ buf->read_mode = res->bin_mode;
+
+ /* get the cropping and output resolution to ISP for this mode. */
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_start = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_START_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_start = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_horizontal_end = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_END_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->crop_vertical_end = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_HORIZONTAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_width = reg_val;
+
+ ret = ov5693_read_reg(client, OV5693_16BIT,
+ OV5693_VERTICAL_OUTPUT_SIZE_H, &reg_val);
+ if (ret)
+ return ret;
+ buf->output_height = reg_val;
+
+ buf->binning_factor_x = res->bin_factor_x ?
+ res->bin_factor_x : 1;
+ buf->binning_factor_y = res->bin_factor_y ?
+ res->bin_factor_y : 1;
+ return 0;
+}
+
+static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
+ int gain, int digitgain)
+
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ u16 vts, hts;
+ int ret, exp_val;
+
+ hts = ov5693_res[dev->fmt_idx].pixels_per_line;
+ vts = ov5693_res[dev->fmt_idx].lines_per_frame;
+ /*If coarse_itg is larger than 1<<15, can not write to reg directly.
+ The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts
+ to the reg. */
+ if (coarse_itg > (1 << 15)) {
+ hts = hts * 2;
+ coarse_itg = (int)coarse_itg / 2;
+ }
+ /* group hold */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0x00);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_GROUP_ACCESS);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_HTS_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_HTS_L, hts & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_HTS_L);
+ return ret;
+ }
+ /* Increase the VTS to match exposure + MARGIN */
+ if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN)
+ vts = (u16) coarse_itg + OV5693_INTEGRATION_TIME_MARGIN;
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_VTS_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_TIMING_VTS_L, vts & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_TIMING_VTS_L);
+ return ret;
+ }
+
+ /* set exposure */
+
+ /* Lower four bit should be 0*/
+ exp_val = coarse_itg << 4;
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_L, exp_val & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_L);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_M);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_EXPOSURE_H);
+ return ret;
+ }
+
+ /* Analog gain */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_AGC_L, gain & 0xff);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_AGC_L);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_AGC_H, (gain >> 8) & 0xff);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_AGC_H);
+ return ret;
+ }
+
+ /* Digital gain */
+ if (digitgain) {
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_RED_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_GREEN_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+
+ ret = ov5693_write_reg(client, OV5693_16BIT,
+ OV5693_MWB_BLUE_GAIN_H, digitgain);
+ if (ret) {
+ dev_err(&client->dev, "%s: write %x error, aborted\n",
+ __func__, OV5693_MWB_RED_GAIN_H);
+ return ret;
+ }
+ }
+
+ /* End group */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0x10);
+ if (ret)
+ return ret;
+
+ /* Delay launch group */
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_GROUP_ACCESS, 0xa0);
+ if (ret)
+ return ret;
+ return ret;
+}
+
+static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure,
+ int gain, int digitgain)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov5693_set_exposure(sd, exposure, gain, digitgain);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static long ov5693_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ u16 coarse_itg = exposure->integration_time[0];
+ u16 analog_gain = exposure->gain[0];
+ u16 digital_gain = exposure->gain[1];
+
+ /* we should not accept the invalid value below */
+ if (analog_gain == 0) {
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ v4l2_err(client, "%s: invalid value\n", __func__);
+ return -EINVAL;
+ }
+ return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
+}
+
+static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
+ u16 addr, u8 * buf)
+{
+ u16 index;
+ int ret;
+ u16 *pVal = 0;
+
+ for (index = 0; index <= size; index++) {
+ pVal = (u16 *) (buf + index);
+ ret =
+ ov5693_read_reg(client, OV5693_8BIT, addr + index,
+ pVal);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 * buf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ int ret;
+ int i;
+ u8 *b = buf;
+ dev->otp_size = 0;
+ for (i = 1; i < OV5693_OTP_BANK_MAX; i++) {
+ /*set bank NO and OTP read mode. */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG, (i | 0xc0)); //[7:6] 2'b11 [5:0] bank no
+ if (ret) {
+ dev_err(&client->dev, "failed to prepare OTP page\n");
+ return ret;
+ }
+ //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0));
+
+ /*enable read */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG, OV5693_OTP_MODE_READ); // enable :1
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to set OTP reading mode page");
+ return ret;
+ }
+ //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ);
+
+ /* Reading the OTP data array */
+ ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE,
+ OV5693_OTP_START_ADDR,
+ b);
+ if (ret) {
+ dev_err(&client->dev, "failed to read OTP data\n");
+ return ret;
+ }
+
+ //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15));
+
+ //Intel OTP map, try to read 320byts first.
+ if (21 == i) {
+ if ((*b) == 0) {
+ dev->otp_size = 320;
+ break;
+ } else {
+ b = buf;
+ continue;
+ }
+ } else if (24 == i) { //if the first 320bytes data doesn't not exist, try to read the next 32bytes data.
+ if ((*b) == 0) {
+ dev->otp_size = 32;
+ break;
+ } else {
+ b = buf;
+ continue;
+ }
+ } else if (27 == i) { //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again.
+ if ((*b) == 0) {
+ dev->otp_size = 32;
+ break;
+ } else {
+ dev->otp_size = 0; // no OTP data.
+ break;
+ }
+ }
+
+ b = b + OV5693_OTP_BANK_SIZE;
+ }
+ return 0;
+}
+
+/*
+ * Read otp data and store it into a kmalloced buffer.
+ * The caller must kfree the buffer when no more needed.
+ * @size: set to the size of the returned otp data.
+ */
+static void *ov5693_otp_read(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 *buf;
+ int ret;
+
+ buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ //otp valid after mipi on and sw stream on
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_STREAM, OV5693_START_STREAMING);
+
+ ret = __ov5693_otp_read(sd, buf);
+
+ //mipi off and sw stream off after otp read
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_STREAM, OV5693_STOP_STREAMING);
+
+ /* Driver has failed to find valid data */
+ if (ret) {
+ dev_err(&client->dev, "sensor found no valid OTP data\n");
+ return ERR_PTR(ret);
+ }
+
+ return buf;
+}
+
+static int ov5693_g_priv_int_data(struct v4l2_subdev *sd,
+ struct v4l2_private_int_data *priv)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ u8 __user *to = priv->data;
+ u32 read_size = priv->size;
+ int ret;
+
+ /* No need to copy data if size is 0 */
+ if (!read_size)
+ goto out;
+
+ if (IS_ERR(dev->otp_data)) {
+ dev_err(&client->dev, "OTP data not available");
+ return PTR_ERR(dev->otp_data);
+ }
+
+ /* Correct read_size value only if bigger than maximum */
+ if (read_size > OV5693_OTP_DATA_SIZE)
+ read_size = OV5693_OTP_DATA_SIZE;
+
+ ret = copy_to_user(to, dev->otp_data, read_size);
+ if (ret) {
+ dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ pr_debug("%s read_size:%d\n", __func__, read_size);
+
+out:
+ /* Return correct size */
+ priv->size = dev->otp_size;
+
+ return 0;
+
+}
+
+static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov5693_s_exposure(sd, arg);
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ return ov5693_g_priv_int_data(sd, arg);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* This returns the exposure time being used. This should only be used
+ for filling in EXIF data, not for actual image processing. */
+static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 reg_v, reg_v2;
+ int ret;
+
+ /* get exposure */
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_L,
+ &reg_v);
+ if (ret)
+ goto err;
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_M,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ reg_v += reg_v2 << 8;
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_EXPOSURE_H,
+ &reg_v2);
+ if (ret)
+ goto err;
+
+ *value = reg_v + (((u32)reg_v2 << 16));
+err:
+ return ret;
+}
+
+int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = -EINVAL;
+ u8 vcm_code;
+
+ ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code);
+ if (ret)
+ return ret;
+
+ /* set reg VCM_CODE_MSB Bit[1:0] */
+ vcm_code = (vcm_code & VCM_CODE_MSB_MASK) |
+ ((val >> 8) & ~VCM_CODE_MSB_MASK);
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code);
+ if (ret)
+ return ret;
+
+ /* set reg VCM_CODE_LSB Bit[7:0] */
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff));
+ if (ret)
+ return ret;
+
+ /* set required vcm move time */
+ vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF
+ - AD5823_HIGH_FREQ_RANGE;
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code);
+
+ return ret;
+}
+
+int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ int ret;
+
+ value = min(value, AD5823_MAX_FOCUS_POS);
+ ret = ad5823_t_focus_vcm(sd, value);
+
+ return ret;
+}
+
+static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value);
+ value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS);
+ if (dev->vcm == VCM_DW9714) {
+ if (dev->vcm_update) {
+ ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF);
+ if (ret)
+ return ret;
+ ret = vcm_dw_i2c_write(client, DIRECT_VCM);
+ if (ret)
+ return ret;
+ ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON);
+ if (ret)
+ return ret;
+ dev->vcm_update = false;
+ }
+ ret = vcm_dw_i2c_write(client,
+ vcm_val(value, VCM_DEFAULT_S));
+ } else if (dev->vcm == VCM_AD5823) {
+ ad5823_t_focus_abs(sd, value);
+ }
+ if (ret == 0) {
+ dev->number_of_steps = value - dev->focus;
+ dev->focus = value;
+ getnstimeofday(&(dev->timestamp_t_focus_abs));
+ } else
+ dev_err(&client->dev,
+ "%s: i2c failed. ret %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ return ov5693_t_focus_abs(sd, dev->focus + value);
+}
+
+#define DELAY_PER_STEP_NS 1000000
+#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
+static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value)
+{
+ u32 status = 0;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct timespec temptime;
+ const struct timespec timedelay = {
+ 0,
+ min((u32)abs(dev->number_of_steps) * DELAY_PER_STEP_NS,
+ (u32)DELAY_MAX_PER_STEP_NS),
+ };
+
+ getnstimeofday(&temptime);
+ temptime = timespec_sub(temptime, (dev->timestamp_t_focus_abs));
+ if (timespec_compare(&temptime, &timedelay) <= 0) {
+ status |= ATOMISP_FOCUS_STATUS_MOVING;
+ status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
+ } else {
+ status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
+ status |= ATOMISP_FOCUS_HP_COMPLETE;
+ }
+
+ *value = status;
+
+ return 0;
+}
+
+static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ s32 val;
+
+ ov5693_q_focus_status(sd, &val);
+
+ if (val & ATOMISP_FOCUS_STATUS_MOVING)
+ *value = dev->focus - dev->number_of_steps;
+ else
+ *value = dev->focus;
+
+ return 0;
+}
+
+static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ dev->number_of_steps = value;
+ dev->vcm_update = true;
+ return 0;
+}
+
+static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ dev->number_of_steps = value;
+ dev->vcm_update = true;
+ return 0;
+}
+
+static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *dev =
+ container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n",
+ __func__, ctrl->val);
+ ret = ov5693_t_focus_abs(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_RELATIVE:
+ dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n",
+ __func__, ctrl->val);
+ ret = ov5693_t_focus_rel(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_SLEW:
+ ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val);
+ break;
+ case V4L2_CID_VCM_TIMEING:
+ ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5693_device *dev =
+ container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ret = ov5693_q_exposure(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCAL_ABSOLUTE:
+ ret = ov5693_g_focal(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ ret = ov5693_g_fnumber(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FNUMBER_RANGE:
+ ret = ov5693_g_fnumber_range(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_FOCUS_STATUS:
+ ret = ov5693_q_focus_status(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ ret = ov5693_g_bin_factor_x(&dev->sd, &ctrl->val);
+ break;
+ case V4L2_CID_BIN_FACTOR_VERT:
+ ret = ov5693_g_bin_factor_y(&dev->sd, &ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ov5693_s_ctrl,
+ .g_volatile_ctrl = ov5693_g_volatile_ctrl
+};
+
+struct v4l2_ctrl_config ov5693_controls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .min = 0x0,
+ .max = 0xffff,
+ .step = 0x01,
+ .def = 0x00,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focal length",
+ .min = OV5693_FOCAL_LENGTH_DEFAULT,
+ .max = OV5693_FOCAL_LENGTH_DEFAULT,
+ .step = 0x01,
+ .def = OV5693_FOCAL_LENGTH_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number",
+ .min = OV5693_F_NUMBER_DEFAULT,
+ .max = OV5693_F_NUMBER_DEFAULT,
+ .step = 0x01,
+ .def = OV5693_F_NUMBER_DEFAULT,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "f-number range",
+ .min = OV5693_F_NUMBER_RANGE,
+ .max = OV5693_F_NUMBER_RANGE,
+ .step = 0x01,
+ .def = OV5693_F_NUMBER_RANGE,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_ABSOLUTE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move absolute",
+ .min = 0,
+ .max = OV5693_VCM_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_RELATIVE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus move relative",
+ .min = OV5693_VCM_MAX_FOCUS_NEG,
+ .max = OV5693_VCM_MAX_FOCUS_POS,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_STATUS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "focus status",
+ .min = 0,
+ .max = 100, /* allow enum to grow in the future */
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_SLEW,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm slew",
+ .min = 0,
+ .max = OV5693_VCM_SLEW_STEP_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VCM_TIMEING,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vcm step time",
+ .min = 0,
+ .max = OV5693_VCM_SLEW_TIME_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "horizontal binning factor",
+ .min = 0,
+ .max = OV5693_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "vertical binning factor",
+ .min = 0,
+ .max = OV5693_BIN_FACTOR_MAX,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ },
+};
+
+static int ov5693_init(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ pr_info("%s\n", __func__);
+ mutex_lock(&dev->input_lock);
+ dev->vcm_update = false;
+
+ if (dev->vcm == VCM_AD5823) {
+ ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */
+ if (ret)
+ dev_err(&client->dev,
+ "vcm reset failed\n");
+ /*change the mode*/
+ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB,
+ AD5823_RING_CTRL_ENABLE);
+ if (ret)
+ dev_err(&client->dev,
+ "vcm enable ringing failed\n");
+ ret = ad5823_i2c_write(client, AD5823_REG_MODE,
+ AD5823_ARC_RES1);
+ if (ret)
+ dev_err(&client->dev,
+ "vcm change mode failed\n");
+ }
+
+ /*change initial focus value for ad5823*/
+ if (dev->vcm == VCM_AD5823) {
+ dev->focus = AD5823_INIT_FOCUS_POS;
+ ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS);
+ } else {
+ dev->focus = 0;
+ ov5693_t_focus_abs(sd, 0);
+ }
+
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ /* This driver assumes "internal DVDD, PWDNB tied to DOVDD".
+ * In this set up only gpio0 (XSHUTDN) should be available
+ * but in some products (for example ECS) gpio1 (PWDNB) is
+ * also available. If gpio1 is available we emulate it being
+ * tied to DOVDD here. */
+ if (flag) {
+ ret = dev->platform_data->v2p8_ctrl(sd, 1);
+ dev->platform_data->gpio1_ctrl(sd, 1);
+ if (ret == 0) {
+ ret = dev->platform_data->v1p8_ctrl(sd, 1);
+ if (ret) {
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+ }
+ } else {
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->v1p8_ctrl(sd, 0);
+ ret |= dev->platform_data->v2p8_ctrl(sd, 0);
+ }
+
+ return ret;
+}
+
+static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ ret = dev->platform_data->gpio0_ctrl(sd, flag);
+
+ return ret;
+}
+
+static int __power_up(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (NULL == dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+
+ /* according to DS, at least 5ms is needed between DOVDD and PWDN */
+ /* add this delay time to 10~11ms*/
+ usleep_range(10000, 11000);
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 1);
+ if (ret) {
+ ret = gpio_ctrl(sd, 1);
+ if (ret)
+ goto fail_power;
+ }
+
+ /* flis clock control */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret)
+ goto fail_clk;
+
+ __cci_delay(up_delay);
+
+ return 0;
+
+fail_clk:
+ gpio_ctrl(sd, 0);
+fail_power:
+ power_ctrl(sd, 0);
+ dev_err(&client->dev, "sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ dev->focus = OV5693_INVALID_CONFIG;
+ if (NULL == dev->platform_data) {
+ dev_err(&client->dev,
+ "no camera_sensor_platform_data");
+ return -ENODEV;
+ }
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk failed\n");
+
+ /* gpio ctrl */
+ ret = gpio_ctrl(sd, 0);
+ if (ret) {
+ ret = gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio failed 2\n");
+ }
+
+ /* power control */
+ ret = power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "vprog failed.\n");
+
+ return ret;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ static const int retry_count = 4;
+ int i, ret;
+
+ for (i = 0; i < retry_count; i++) {
+ ret = __power_up(sd);
+ if (!ret)
+ return 0;
+
+ power_down(sd);
+ }
+ return ret;
+}
+
+static int ov5693_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+
+ pr_info("%s: on %d\n", __func__, on);
+ if (on == 0)
+ return power_down(sd);
+ else {
+ ret = power_up(sd);
+ if (!ret) {
+ ret = ov5693_init(sd);
+ /* restore settings */
+ ov5693_res = ov5693_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ }
+ return ret;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between res_w/res_h and w/h.
+ * distance = (res_w/res_h - w/h) / (w/h) * 8192
+ * res->width/height smaller than w/h wouldn't be considered.
+ * The gap of ratio larger than 1/8 wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
+static int distance(struct ov5693_resolution *res, u32 w, u32 h)
+{
+ int ratio;
+ int distance;
+
+ if (w == 0 || h == 0 ||
+ res->width < w || res->height < h)
+ return -1;
+
+ ratio = res->width << 13;
+ ratio /= w;
+ ratio *= h;
+ ratio /= res->height;
+
+ distance = abs(ratio - 8192);
+
+ if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
+ return -1;
+
+ return distance;
+}
+
+/* Return the nearest higher resolution index
+ * Firstly try to find the approximate aspect ratio resolution
+ * If we find multiple same AR resolutions, choose the
+ * minimal size.
+ */
+static int nearest_resolution_index(int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int min_dist = INT_MAX;
+ int min_res_w = INT_MAX;
+ struct ov5693_resolution *tmp_res = NULL;
+
+ for (i = 0; i < N_RES; i++) {
+ tmp_res = &ov5693_res[i];
+ dist = distance(tmp_res, w, h);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ idx = i;
+ min_res_w = ov5693_res[i].width;
+ continue;
+ }
+ if (dist == min_dist && ov5693_res[i].width < min_res_w)
+ idx = i;
+ }
+
+ return idx;
+}
+
+static int get_resolution_index(int w, int h)
+{
+ int i;
+
+ for (i = 0; i < N_RES; i++) {
+ if (w != ov5693_res[i].width)
+ continue;
+ if (h != ov5693_res[i].height)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+/* TODO: remove it. */
+static int startup(struct v4l2_subdev *sd)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ ret = ov5693_write_reg(client, OV5693_8BIT,
+ OV5693_SW_RESET, 0x01);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 reset err.\n");
+ return ret;
+ }
+
+ ret = ov5693_write_reg_array(client, ov5693_global_setting);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 write register err.\n");
+ return ret;
+ }
+
+ ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 write register err.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ov5693_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *ov5693_info = NULL;
+ int ret = 0;
+ int idx;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+ ov5693_info = v4l2_get_subdev_hostdata(sd);
+ if (ov5693_info == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ idx = nearest_resolution_index(fmt->width, fmt->height);
+ if (idx == -1) {
+ /* return the largest resolution */
+ fmt->width = ov5693_res[N_RES - 1].width;
+ fmt->height = ov5693_res[N_RES - 1].height;
+ } else {
+ fmt->width = ov5693_res[idx].width;
+ fmt->height = ov5693_res[idx].height;
+ }
+
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ dev_err(&client->dev, "get resolution fail\n");
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ ret = startup(sd);
+ if (ret) {
+ int i = 0;
+ dev_err(&client->dev, "ov5693 startup err, retry to power up\n");
+ for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) {
+ dev_err(&client->dev,
+ "ov5693 retry to power up %d/%d times, result: ",
+ i+1, OV5693_POWER_UP_RETRY_NUM);
+ power_down(sd);
+ ret = power_up(sd);
+ if (!ret) {
+ mutex_unlock(&dev->input_lock);
+ ov5693_init(sd);
+ mutex_lock(&dev->input_lock);
+ } else {
+ dev_err(&client->dev, "power up failed, continue\n");
+ continue;
+ }
+ ret = startup(sd);
+ if (ret) {
+ dev_err(&client->dev, " startup FAILED!\n");
+ } else {
+ dev_err(&client->dev, " startup SUCCESS!\n");
+ break;
+ }
+ }
+ }
+
+ /*
+ * After sensor settings are set to HW, sometimes stream is started.
+ * This would cause ISP timeout because ISP is not ready to receive
+ * data yet. So add stop streaming here.
+ */
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
+ OV5693_STOP_STREAMING);
+ if (ret)
+ dev_warn(&client->dev, "ov5693 stream off err\n");
+
+ ret = ov5693_get_intg_factor(client, ov5693_info,
+ &ov5693_res[dev->fmt_idx]);
+ if (ret) {
+ dev_err(&client->dev, "failed to get integration_factor\n");
+ goto err;
+ }
+
+ ov5693_info->metadata_width = fmt->width * 10 / 8;
+ ov5693_info->metadata_height = 1;
+ ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size;
+
+err:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+static int ov5693_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ if (format->pad)
+ return -EINVAL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ fmt->width = ov5693_res[dev->fmt_idx].width;
+ fmt->height = ov5693_res[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov5693_detect(struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 high, low;
+ int ret;
+ u16 id;
+ u8 revision;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_CHIP_ID_H, &high);
+ if (ret) {
+ dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+ return -ENODEV;
+ }
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_CHIP_ID_L, &low);
+ id = ((((u16) high) << 8) | (u16) low);
+
+ if (id != OV5693_ID) {
+ dev_err(&client->dev, "sensor ID error 0x%x\n", id);
+ return -ENODEV;
+ }
+
+ ret = ov5693_read_reg(client, OV5693_8BIT,
+ OV5693_SC_CMMN_SUB_ID, &high);
+ revision = (u8) high & 0x0f;
+
+ dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
+ dev_dbg(&client->dev, "detect ov5693 success\n");
+ return 0;
+}
+
+static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
+ enable ? OV5693_START_STREAMING :
+ OV5693_STOP_STREAMING);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+
+static int ov5693_s_config(struct v4l2_subdev *sd,
+ int irq, void *platform_data)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (platform_data == NULL)
+ return -ENODEV;
+
+ dev->platform_data =
+ (struct camera_sensor_platform_data *)platform_data;
+
+ mutex_lock(&dev->input_lock);
+ /* power off the module, then power on it in future
+ * as first power on by board may not fulfill the
+ * power on sequqence needed by the module
+ */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-off err.\n");
+ goto fail_power_off;
+ }
+
+ ret = power_up(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-up err.\n");
+ goto fail_power_on;
+ }
+
+ if (!dev->vcm)
+ dev->vcm = vcm_detect(client);
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov5693_detect(client);
+ if (ret) {
+ dev_err(&client->dev, "ov5693_detect err s_config.\n");
+ goto fail_csi_cfg;
+ }
+
+ dev->otp_data = ov5693_otp_read(sd);
+
+ /* turn off sensor, after probed */
+ ret = power_down(sd);
+ if (ret) {
+ dev_err(&client->dev, "ov5693 power-off err.\n");
+ goto fail_csi_cfg;
+ }
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+
+fail_csi_cfg:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_power_on:
+ power_down(sd);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+fail_power_off:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int ov5693_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!param)
+ return -EINVAL;
+
+ if (param->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(&client->dev, "unsupported buffer type.\n");
+ return -EINVAL;
+ }
+
+ memset(param, 0, sizeof(*param));
+ param->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ if (dev->fmt_idx >= 0 && dev->fmt_idx < N_RES) {
+ param->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ param->parm.capture.timeperframe.numerator = 1;
+ param->parm.capture.capturemode = dev->run_mode;
+ param->parm.capture.timeperframe.denominator =
+ ov5693_res[dev->fmt_idx].fps;
+ }
+ return 0;
+}
+
+static int ov5693_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ dev->run_mode = param->parm.capture.capturemode;
+
+ mutex_lock(&dev->input_lock);
+ switch (dev->run_mode) {
+ case CI_MODE_VIDEO:
+ ov5693_res = ov5693_res_video;
+ N_RES = N_RES_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ ov5693_res = ov5693_res_still;
+ N_RES = N_RES_STILL;
+ break;
+ default:
+ ov5693_res = ov5693_res_preview;
+ N_RES = N_RES_PREVIEW;
+ }
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+
+ interval->interval.numerator = 1;
+ interval->interval.denominator = ov5693_res[dev->fmt_idx].fps;
+
+ return 0;
+}
+
+static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= MAX_FMTS)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
+static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+
+ if (index >= N_RES)
+ return -EINVAL;
+
+ fse->min_width = ov5693_res[index].width;
+ fse->min_height = ov5693_res[index].height;
+ fse->max_width = ov5693_res[index].width;
+ fse->max_height = ov5693_res[index].height;
+
+ return 0;
+
+}
+
+static const struct v4l2_subdev_video_ops ov5693_video_ops = {
+ .s_stream = ov5693_s_stream,
+ .g_parm = ov5693_g_parm,
+ .s_parm = ov5693_s_parm,
+ .g_frame_interval = ov5693_g_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ov5693_core_ops = {
+ .s_power = ov5693_s_power,
+ .ioctl = ov5693_ioctl,
+};
+
+static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
+ .enum_mbus_code = ov5693_enum_mbus_code,
+ .enum_frame_size = ov5693_enum_frame_size,
+ .get_fmt = ov5693_get_fmt,
+ .set_fmt = ov5693_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov5693_ops = {
+ .core = &ov5693_core_ops,
+ .video = &ov5693_video_ops,
+ .pad = &ov5693_pad_ops,
+};
+
+static int ov5693_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5693_device *dev = to_ov5693_sensor(sd);
+ dev_dbg(&client->dev, "ov5693_remove...\n");
+
+ dev->platform_data->csi_cfg(sd, 0);
+
+ v4l2_device_unregister_subdev(sd);
+
+ atomisp_gmin_remove_subdev(sd);
+
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ kfree(dev);
+
+ return 0;
+}
+
+static int ov5693_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ov5693_device *dev;
+ int i2c;
+ int ret = 0;
+ void *pdata = client->dev.platform_data;
+ struct acpi_device *adev;
+ unsigned int i;
+
+ /* Firmware workaround: Some modules use a "secondary default"
+ * address of 0x10 which doesn't appear on schematics, and
+ * some BIOS versions haven't gotten the memo. Work around
+ * via config. */
+ i2c = gmin_get_var_int(&client->dev, "I2CAddr", -1);
+ if (i2c != -1) {
+ dev_info(&client->dev,
+ "Overriding firmware-provided I2C address (0x%x) with 0x%x\n",
+ client->addr, i2c);
+ client->addr = i2c;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ dev->fmt_idx = 0;
+ v4l2_i2c_subdev_init(&(dev->sd), client, &ov5693_ops);
+
+ adev = ACPI_COMPANION(&client->dev);
+ if (adev) {
+ adev->power.flags.power_resources = 0;
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_bggr);
+ }
+
+ if (!pdata)
+ goto out_free;
+
+ ret = ov5693_s_config(&dev->sd, client->irq, pdata);
+ if (ret)
+ goto out_free;
+
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret)
+ goto out_free;
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret =
+ v4l2_ctrl_handler_init(&dev->ctrl_handler,
+ ARRAY_SIZE(ov5693_controls));
+ if (ret) {
+ ov5693_remove(client);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i],
+ NULL);
+
+ if (dev->ctrl_handler.error) {
+ ov5693_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret)
+ ov5693_remove(client);
+
+ return ret;
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+MODULE_DEVICE_TABLE(i2c, ov5693_id);
+
+static struct acpi_device_id ov5693_acpi_match[] = {
+ {"INT33BE"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
+
+static struct i2c_driver ov5693_driver = {
+ .driver = {
+ .name = OV5693_NAME,
+ .acpi_match_table = ACPI_PTR(ov5693_acpi_match),
+ },
+ .probe = ov5693_probe,
+ .remove = ov5693_remove,
+ .id_table = ov5693_id,
+};
+
+static int init_ov5693(void)
+{
+ return i2c_add_driver(&ov5693_driver);
+}
+
+static void exit_ov5693(void)
+{
+
+ i2c_del_driver(&ov5693_driver);
+}
+
+module_init(init_ov5693);
+module_exit(exit_ov5693);
+
+MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
new file mode 100644
index 000000000000..d88ac1777d86
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -0,0 +1,1381 @@
+/*
+ * Support for OmniVision OV5693 5M camera sensor.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __OV5693_H__
+#define __OV5693_H__
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/media-entity.h>
+
+#include "../../include/linux/atomisp_platform.h"
+
+#define OV5693_NAME "ov5693"
+
+#define OV5693_POWER_UP_RETRY_NUM 5
+
+/* Defines for register writes and register array processing */
+#define I2C_MSG_LENGTH 0x2
+#define I2C_RETRY_COUNT 5
+
+#define OV5693_FOCAL_LENGTH_NUM 334 /*3.34mm*/
+#define OV5693_FOCAL_LENGTH_DEM 100
+#define OV5693_F_NUMBER_DEFAULT_NUM 24
+#define OV5693_F_NUMBER_DEM 10
+
+#define MAX_FMTS 1
+
+/* sensor_mode_data read_mode adaptation */
+#define OV5693_READ_MODE_BINNING_ON 0x0400
+#define OV5693_READ_MODE_BINNING_OFF 0x00
+#define OV5693_INTEGRATION_TIME_MARGIN 8
+
+#define OV5693_MAX_EXPOSURE_VALUE 0xFFF1
+#define OV5693_MAX_GAIN_VALUE 0xFF
+
+/*
+ * focal length bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064
+
+/*
+ * current f-number bits definition:
+ * bits 31-16: numerator, bits 15-0: denominator
+ */
+#define OV5693_F_NUMBER_DEFAULT 0x18000a
+
+/*
+ * f-number range bits definition:
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+#define OV5693_F_NUMBER_RANGE 0x180a180a
+#define OV5693_ID 0x5690
+
+#define OV5693_FINE_INTG_TIME_MIN 0
+#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0
+#define OV5693_COARSE_INTG_TIME_MIN 1
+#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6
+
+#define OV5693_BIN_FACTOR_MAX 4
+/*
+ * OV5693 System control registers
+ */
+#define OV5693_SW_SLEEP 0x0100
+#define OV5693_SW_RESET 0x0103
+#define OV5693_SW_STREAM 0x0100
+
+#define OV5693_SC_CMMN_CHIP_ID_H 0x300A
+#define OV5693_SC_CMMN_CHIP_ID_L 0x300B
+#define OV5693_SC_CMMN_SCCB_ID 0x300C
+#define OV5693_SC_CMMN_SUB_ID 0x302A /* process, version*/
+/*Bit[7:4] Group control, Bit[3:0] Group ID*/
+#define OV5693_GROUP_ACCESS 0x3208
+/*
+*Bit[3:0] Bit[19:16] of exposure,
+*remaining 16 bits lies in Reg0x3501&Reg0x3502
+*/
+#define OV5693_EXPOSURE_H 0x3500
+#define OV5693_EXPOSURE_M 0x3501
+#define OV5693_EXPOSURE_L 0x3502
+/*Bit[1:0] means Bit[9:8] of gain*/
+#define OV5693_AGC_H 0x350A
+#define OV5693_AGC_L 0x350B /*Bit[7:0] of gain*/
+
+#define OV5693_HORIZONTAL_START_H 0x3800 /*Bit[11:8]*/
+#define OV5693_HORIZONTAL_START_L 0x3801 /*Bit[7:0]*/
+#define OV5693_VERTICAL_START_H 0x3802 /*Bit[11:8]*/
+#define OV5693_VERTICAL_START_L 0x3803 /*Bit[7:0]*/
+#define OV5693_HORIZONTAL_END_H 0x3804 /*Bit[11:8]*/
+#define OV5693_HORIZONTAL_END_L 0x3805 /*Bit[7:0]*/
+#define OV5693_VERTICAL_END_H 0x3806 /*Bit[11:8]*/
+#define OV5693_VERTICAL_END_L 0x3807 /*Bit[7:0]*/
+#define OV5693_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /*Bit[3:0]*/
+#define OV5693_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /*Bit[7:0]*/
+#define OV5693_VERTICAL_OUTPUT_SIZE_H 0x380a /*Bit[3:0]*/
+#define OV5693_VERTICAL_OUTPUT_SIZE_L 0x380b /*Bit[7:0]*/
+/*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV5693_TIMING_HTS_H 0x380C
+/*High 8-bit, and low 8-bit HTS address is 0x380d*/
+#define OV5693_TIMING_HTS_L 0x380D
+/*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV5693_TIMING_VTS_H 0x380e
+/*High 8-bit, and low 8-bit HTS address is 0x380f*/
+#define OV5693_TIMING_VTS_L 0x380f
+
+#define OV5693_MWB_RED_GAIN_H 0x3400
+#define OV5693_MWB_GREEN_GAIN_H 0x3402
+#define OV5693_MWB_BLUE_GAIN_H 0x3404
+#define OV5693_MWB_GAIN_MAX 0x0fff
+
+#define OV5693_START_STREAMING 0x01
+#define OV5693_STOP_STREAMING 0x00
+
+#define VCM_ADDR 0x0c
+#define VCM_CODE_MSB 0x04
+
+#define OV5693_INVALID_CONFIG 0xffffffff
+
+#define OV5693_VCM_SLEW_STEP 0x30F0
+#define OV5693_VCM_SLEW_STEP_MAX 0x7
+#define OV5693_VCM_SLEW_STEP_MASK 0x7
+#define OV5693_VCM_CODE 0x30F2
+#define OV5693_VCM_SLEW_TIME 0x30F4
+#define OV5693_VCM_SLEW_TIME_MAX 0xffff
+#define OV5693_VCM_ENABLE 0x8000
+
+#define OV5693_VCM_MAX_FOCUS_NEG -1023
+#define OV5693_VCM_MAX_FOCUS_POS 1023
+
+#define DLC_ENABLE 1
+#define DLC_DISABLE 0
+#define VCM_PROTECTION_OFF 0xeca3
+#define VCM_PROTECTION_ON 0xdc51
+#define VCM_DEFAULT_S 0x0
+#define vcm_step_s(a) (u8)(a & 0xf)
+#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
+#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
+#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
+#define vcm_val(data, s) (u16)(data << 4 | s)
+#define DIRECT_VCM vcm_dlc_mclk(0, 0)
+
+/* Defines for OTP Data Registers */
+#define OV5693_FRAME_OFF_NUM 0x4202
+#define OV5693_OTP_BYTE_MAX 32 //change to 32 as needed by otpdata
+#define OV5693_OTP_SHORT_MAX 16
+#define OV5693_OTP_START_ADDR 0x3D00
+#define OV5693_OTP_END_ADDR 0x3D0F
+#define OV5693_OTP_DATA_SIZE 320
+#define OV5693_OTP_PROGRAM_REG 0x3D80
+#define OV5693_OTP_READ_REG 0x3D81 // 1:Enable 0:disable
+#define OV5693_OTP_BANK_REG 0x3D84 //otp bank and mode
+#define OV5693_OTP_READY_REG_DONE 1
+#define OV5693_OTP_BANK_MAX 28
+#define OV5693_OTP_BANK_SIZE 16 //16 bytes per bank
+#define OV5693_OTP_READ_ONETIME 16
+#define OV5693_OTP_MODE_READ 1
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+struct ov5693_resolution {
+ u8 *desc;
+ const struct ov5693_reg *regs;
+ int res;
+ int width;
+ int height;
+ int fps;
+ int pix_clk_freq;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ u8 bin_mode;
+ bool used;
+};
+
+struct ov5693_format {
+ u8 *desc;
+ u32 pixelformat;
+ struct ov5693_reg *regs;
+};
+
+enum vcm_type {
+ VCM_UNKNOWN,
+ VCM_AD5823,
+ VCM_DW9714,
+};
+
+/*
+ * ov5693 device structure.
+ */
+struct ov5693_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+ struct mutex input_lock;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ struct camera_sensor_platform_data *platform_data;
+ struct timespec timestamp_t_focus_abs;
+ int vt_pix_clk_freq_mhz;
+ int fmt_idx;
+ int run_mode;
+ int otp_size;
+ u8 *otp_data;
+ u32 focus;
+ s16 number_of_steps;
+ u8 res;
+ u8 type;
+ bool vcm_update;
+ enum vcm_type vcm;
+};
+
+enum ov5693_tok_type {
+ OV5693_8BIT = 0x0001,
+ OV5693_16BIT = 0x0002,
+ OV5693_32BIT = 0x0004,
+ OV5693_TOK_TERM = 0xf000, /* terminating token for reg list */
+ OV5693_TOK_DELAY = 0xfe00, /* delay token for reg list */
+ OV5693_TOK_MASK = 0xfff0
+};
+
+/**
+ * struct ov5693_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov5693_reg {
+ enum ov5693_tok_type type;
+ u16 reg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
+
+#define OV5693_MAX_WRITE_BUF_SIZE 30
+
+struct ov5693_write_buffer {
+ u16 addr;
+ u8 data[OV5693_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov5693_write_ctrl {
+ int index;
+ struct ov5693_write_buffer buffer;
+};
+
+static const struct i2c_device_id ov5693_id[] = {
+ {OV5693_NAME, 0},
+ {}
+};
+
+static struct ov5693_reg const ov5693_global_setting[] = {
+ {OV5693_8BIT, 0x0103, 0x01},
+ {OV5693_8BIT, 0x3001, 0x0a},
+ {OV5693_8BIT, 0x3002, 0x80},
+ {OV5693_8BIT, 0x3006, 0x00},
+ {OV5693_8BIT, 0x3011, 0x21},
+ {OV5693_8BIT, 0x3012, 0x09},
+ {OV5693_8BIT, 0x3013, 0x10},
+ {OV5693_8BIT, 0x3014, 0x00},
+ {OV5693_8BIT, 0x3015, 0x08},
+ {OV5693_8BIT, 0x3016, 0xf0},
+ {OV5693_8BIT, 0x3017, 0xf0},
+ {OV5693_8BIT, 0x3018, 0xf0},
+ {OV5693_8BIT, 0x301b, 0xb4},
+ {OV5693_8BIT, 0x301d, 0x02},
+ {OV5693_8BIT, 0x3021, 0x00},
+ {OV5693_8BIT, 0x3022, 0x01},
+ {OV5693_8BIT, 0x3028, 0x44},
+ {OV5693_8BIT, 0x3098, 0x02},
+ {OV5693_8BIT, 0x3099, 0x19},
+ {OV5693_8BIT, 0x309a, 0x02},
+ {OV5693_8BIT, 0x309b, 0x01},
+ {OV5693_8BIT, 0x309c, 0x00},
+ {OV5693_8BIT, 0x30a0, 0xd2},
+ {OV5693_8BIT, 0x30a2, 0x01},
+ {OV5693_8BIT, 0x30b2, 0x00},
+ {OV5693_8BIT, 0x30b3, 0x7d},
+ {OV5693_8BIT, 0x30b4, 0x03},
+ {OV5693_8BIT, 0x30b5, 0x04},
+ {OV5693_8BIT, 0x30b6, 0x01},
+ {OV5693_8BIT, 0x3104, 0x21},
+ {OV5693_8BIT, 0x3106, 0x00},
+ {OV5693_8BIT, 0x3400, 0x04},
+ {OV5693_8BIT, 0x3401, 0x00},
+ {OV5693_8BIT, 0x3402, 0x04},
+ {OV5693_8BIT, 0x3403, 0x00},
+ {OV5693_8BIT, 0x3404, 0x04},
+ {OV5693_8BIT, 0x3405, 0x00},
+ {OV5693_8BIT, 0x3406, 0x01},
+ {OV5693_8BIT, 0x3500, 0x00},
+ {OV5693_8BIT, 0x3503, 0x07},
+ {OV5693_8BIT, 0x3504, 0x00},
+ {OV5693_8BIT, 0x3505, 0x00},
+ {OV5693_8BIT, 0x3506, 0x00},
+ {OV5693_8BIT, 0x3507, 0x02},
+ {OV5693_8BIT, 0x3508, 0x00},
+ {OV5693_8BIT, 0x3509, 0x10},
+ {OV5693_8BIT, 0x350a, 0x00},
+ {OV5693_8BIT, 0x350b, 0x40},
+ {OV5693_8BIT, 0x3601, 0x0a},
+ {OV5693_8BIT, 0x3602, 0x38},
+ {OV5693_8BIT, 0x3612, 0x80},
+ {OV5693_8BIT, 0x3620, 0x54},
+ {OV5693_8BIT, 0x3621, 0xc7},
+ {OV5693_8BIT, 0x3622, 0x0f},
+ {OV5693_8BIT, 0x3625, 0x10},
+ {OV5693_8BIT, 0x3630, 0x55},
+ {OV5693_8BIT, 0x3631, 0xf4},
+ {OV5693_8BIT, 0x3632, 0x00},
+ {OV5693_8BIT, 0x3633, 0x34},
+ {OV5693_8BIT, 0x3634, 0x02},
+ {OV5693_8BIT, 0x364d, 0x0d},
+ {OV5693_8BIT, 0x364f, 0xdd},
+ {OV5693_8BIT, 0x3660, 0x04},
+ {OV5693_8BIT, 0x3662, 0x10},
+ {OV5693_8BIT, 0x3663, 0xf1},
+ {OV5693_8BIT, 0x3665, 0x00},
+ {OV5693_8BIT, 0x3666, 0x20},
+ {OV5693_8BIT, 0x3667, 0x00},
+ {OV5693_8BIT, 0x366a, 0x80},
+ {OV5693_8BIT, 0x3680, 0xe0},
+ {OV5693_8BIT, 0x3681, 0x00},
+ {OV5693_8BIT, 0x3700, 0x42},
+ {OV5693_8BIT, 0x3701, 0x14},
+ {OV5693_8BIT, 0x3702, 0xa0},
+ {OV5693_8BIT, 0x3703, 0xd8},
+ {OV5693_8BIT, 0x3704, 0x78},
+ {OV5693_8BIT, 0x3705, 0x02},
+ {OV5693_8BIT, 0x370a, 0x00},
+ {OV5693_8BIT, 0x370b, 0x20},
+ {OV5693_8BIT, 0x370c, 0x0c},
+ {OV5693_8BIT, 0x370d, 0x11},
+ {OV5693_8BIT, 0x370e, 0x00},
+ {OV5693_8BIT, 0x370f, 0x40},
+ {OV5693_8BIT, 0x3710, 0x00},
+ {OV5693_8BIT, 0x371a, 0x1c},
+ {OV5693_8BIT, 0x371b, 0x05},
+ {OV5693_8BIT, 0x371c, 0x01},
+ {OV5693_8BIT, 0x371e, 0xa1},
+ {OV5693_8BIT, 0x371f, 0x0c},
+ {OV5693_8BIT, 0x3721, 0x00},
+ {OV5693_8BIT, 0x3724, 0x10},
+ {OV5693_8BIT, 0x3726, 0x00},
+ {OV5693_8BIT, 0x372a, 0x01},
+ {OV5693_8BIT, 0x3730, 0x10},
+ {OV5693_8BIT, 0x3738, 0x22},
+ {OV5693_8BIT, 0x3739, 0xe5},
+ {OV5693_8BIT, 0x373a, 0x50},
+ {OV5693_8BIT, 0x373b, 0x02},
+ {OV5693_8BIT, 0x373c, 0x41},
+ {OV5693_8BIT, 0x373f, 0x02},
+ {OV5693_8BIT, 0x3740, 0x42},
+ {OV5693_8BIT, 0x3741, 0x02},
+ {OV5693_8BIT, 0x3742, 0x18},
+ {OV5693_8BIT, 0x3743, 0x01},
+ {OV5693_8BIT, 0x3744, 0x02},
+ {OV5693_8BIT, 0x3747, 0x10},
+ {OV5693_8BIT, 0x374c, 0x04},
+ {OV5693_8BIT, 0x3751, 0xf0},
+ {OV5693_8BIT, 0x3752, 0x00},
+ {OV5693_8BIT, 0x3753, 0x00},
+ {OV5693_8BIT, 0x3754, 0xc0},
+ {OV5693_8BIT, 0x3755, 0x00},
+ {OV5693_8BIT, 0x3756, 0x1a},
+ {OV5693_8BIT, 0x3758, 0x00},
+ {OV5693_8BIT, 0x3759, 0x0f},
+ {OV5693_8BIT, 0x376b, 0x44},
+ {OV5693_8BIT, 0x375c, 0x04},
+ {OV5693_8BIT, 0x3774, 0x10},
+ {OV5693_8BIT, 0x3776, 0x00},
+ {OV5693_8BIT, 0x377f, 0x08},
+ {OV5693_8BIT, 0x3780, 0x22},
+ {OV5693_8BIT, 0x3781, 0x0c},
+ {OV5693_8BIT, 0x3784, 0x2c},
+ {OV5693_8BIT, 0x3785, 0x1e},
+ {OV5693_8BIT, 0x378f, 0xf5},
+ {OV5693_8BIT, 0x3791, 0xb0},
+ {OV5693_8BIT, 0x3795, 0x00},
+ {OV5693_8BIT, 0x3796, 0x64},
+ {OV5693_8BIT, 0x3797, 0x11},
+ {OV5693_8BIT, 0x3798, 0x30},
+ {OV5693_8BIT, 0x3799, 0x41},
+ {OV5693_8BIT, 0x379a, 0x07},
+ {OV5693_8BIT, 0x379b, 0xb0},
+ {OV5693_8BIT, 0x379c, 0x0c},
+ {OV5693_8BIT, 0x37c5, 0x00},
+ {OV5693_8BIT, 0x37c6, 0x00},
+ {OV5693_8BIT, 0x37c7, 0x00},
+ {OV5693_8BIT, 0x37c9, 0x00},
+ {OV5693_8BIT, 0x37ca, 0x00},
+ {OV5693_8BIT, 0x37cb, 0x00},
+ {OV5693_8BIT, 0x37de, 0x00},
+ {OV5693_8BIT, 0x37df, 0x00},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3810, 0x00},
+ {OV5693_8BIT, 0x3812, 0x00},
+ {OV5693_8BIT, 0x3823, 0x00},
+ {OV5693_8BIT, 0x3824, 0x00},
+ {OV5693_8BIT, 0x3825, 0x00},
+ {OV5693_8BIT, 0x3826, 0x00},
+ {OV5693_8BIT, 0x3827, 0x00},
+ {OV5693_8BIT, 0x382a, 0x04},
+ {OV5693_8BIT, 0x3a04, 0x06},
+ {OV5693_8BIT, 0x3a05, 0x14},
+ {OV5693_8BIT, 0x3a06, 0x00},
+ {OV5693_8BIT, 0x3a07, 0xfe},
+ {OV5693_8BIT, 0x3b00, 0x00},
+ {OV5693_8BIT, 0x3b02, 0x00},
+ {OV5693_8BIT, 0x3b03, 0x00},
+ {OV5693_8BIT, 0x3b04, 0x00},
+ {OV5693_8BIT, 0x3b05, 0x00},
+ {OV5693_8BIT, 0x3e07, 0x20},
+ {OV5693_8BIT, 0x4000, 0x08},
+ {OV5693_8BIT, 0x4001, 0x04},
+ {OV5693_8BIT, 0x4002, 0x45},
+ {OV5693_8BIT, 0x4004, 0x08},
+ {OV5693_8BIT, 0x4005, 0x18},
+ {OV5693_8BIT, 0x4006, 0x20},
+ {OV5693_8BIT, 0x4008, 0x24},
+ {OV5693_8BIT, 0x4009, 0x10},
+ {OV5693_8BIT, 0x400c, 0x00},
+ {OV5693_8BIT, 0x400d, 0x00},
+ {OV5693_8BIT, 0x4058, 0x00},
+ {OV5693_8BIT, 0x404e, 0x37},
+ {OV5693_8BIT, 0x404f, 0x8f},
+ {OV5693_8BIT, 0x4058, 0x00},
+ {OV5693_8BIT, 0x4101, 0xb2},
+ {OV5693_8BIT, 0x4303, 0x00},
+ {OV5693_8BIT, 0x4304, 0x08},
+ {OV5693_8BIT, 0x4307, 0x31},
+ {OV5693_8BIT, 0x4311, 0x04},
+ {OV5693_8BIT, 0x4315, 0x01},
+ {OV5693_8BIT, 0x4511, 0x05},
+ {OV5693_8BIT, 0x4512, 0x01},
+ {OV5693_8BIT, 0x4806, 0x00},
+ {OV5693_8BIT, 0x4816, 0x52},
+ {OV5693_8BIT, 0x481f, 0x30},
+ {OV5693_8BIT, 0x4826, 0x2c},
+ {OV5693_8BIT, 0x4831, 0x64},
+ {OV5693_8BIT, 0x4d00, 0x04},
+ {OV5693_8BIT, 0x4d01, 0x71},
+ {OV5693_8BIT, 0x4d02, 0xfd},
+ {OV5693_8BIT, 0x4d03, 0xf5},
+ {OV5693_8BIT, 0x4d04, 0x0c},
+ {OV5693_8BIT, 0x4d05, 0xcc},
+ {OV5693_8BIT, 0x4837, 0x0a},
+ {OV5693_8BIT, 0x5000, 0x06},
+ {OV5693_8BIT, 0x5001, 0x01},
+ {OV5693_8BIT, 0x5003, 0x20},
+ {OV5693_8BIT, 0x5046, 0x0a},
+ {OV5693_8BIT, 0x5013, 0x00},
+ {OV5693_8BIT, 0x5046, 0x0a},
+ {OV5693_8BIT, 0x5780, 0x1c},
+ {OV5693_8BIT, 0x5786, 0x20},
+ {OV5693_8BIT, 0x5787, 0x10},
+ {OV5693_8BIT, 0x5788, 0x18},
+ {OV5693_8BIT, 0x578a, 0x04},
+ {OV5693_8BIT, 0x578b, 0x02},
+ {OV5693_8BIT, 0x578c, 0x02},
+ {OV5693_8BIT, 0x578e, 0x06},
+ {OV5693_8BIT, 0x578f, 0x02},
+ {OV5693_8BIT, 0x5790, 0x02},
+ {OV5693_8BIT, 0x5791, 0xff},
+ {OV5693_8BIT, 0x5842, 0x01},
+ {OV5693_8BIT, 0x5843, 0x2b},
+ {OV5693_8BIT, 0x5844, 0x01},
+ {OV5693_8BIT, 0x5845, 0x92},
+ {OV5693_8BIT, 0x5846, 0x01},
+ {OV5693_8BIT, 0x5847, 0x8f},
+ {OV5693_8BIT, 0x5848, 0x01},
+ {OV5693_8BIT, 0x5849, 0x0c},
+ {OV5693_8BIT, 0x5e00, 0x00},
+ {OV5693_8BIT, 0x5e10, 0x0c},
+ {OV5693_8BIT, 0x0100, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ */
+static struct ov5693_reg const ov5693_654x496[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0x90},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+*DS from 2592x1952
+*/
+static struct ov5693_reg const ov5693_1296x976[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0x00},
+
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+
+ {OV5693_8BIT, 0x3808, 0x05},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x03},
+ {OV5693_8BIT, 0x380b, 0xD0},
+
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+
+ {OV5693_8BIT, 0x3810, 0x00},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3812, 0x00},
+ {OV5693_8BIT, 0x3813, 0x02},
+
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+
+};
+
+
+/*
+ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2564x1956
+ */
+static struct ov5693_reg const ov5693_336x256[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x01},
+ {OV5693_8BIT, 0x3809, 0x50},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0x00},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x1E},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2368x1956
+ */
+static struct ov5693_reg const ov5693_368x304[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3808, 0x01},
+ {OV5693_8BIT, 0x3809, 0x70},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0x30},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x80},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling)
+ DS from 2460x1956
+ */
+static struct ov5693_reg const ov5693_192x160[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x80},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+ {OV5693_8BIT, 0x3808, 0x00},
+ {OV5693_8BIT, 0x3809, 0xC0},
+ {OV5693_8BIT, 0x380a, 0x00},
+ {OV5693_8BIT, 0x380b, 0xA0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x40},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+
+static struct ov5693_reg const ov5693_736x496[] = {
+ {OV5693_8BIT, 0x3501, 0x3d},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc7},
+ {OV5693_8BIT, 0x3803, 0x68},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0x3b},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0xe0},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x04},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+static struct ov5693_reg const ov5693_736x496[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe6},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x02},
+ {OV5693_8BIT, 0x3809, 0xe0},
+ {OV5693_8BIT, 0x380a, 0x01},
+ {OV5693_8BIT, 0x380b, 0xf0},
+ {OV5693_8BIT, 0x380c, 0x0d},
+ {OV5693_8BIT, 0x380d, 0xb0},
+ {OV5693_8BIT, 0x380e, 0x05},
+ {OV5693_8BIT, 0x380f, 0xf2},
+ {OV5693_8BIT, 0x3811, 0x08},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x31},
+ {OV5693_8BIT, 0x3815, 0x31},
+ {OV5693_8BIT, 0x3820, 0x01},
+ {OV5693_8BIT, 0x3821, 0x1f},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+*/
+/*
+ * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling)
+ */
+static struct ov5693_reg const ov5693_976x556[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x03},
+ {OV5693_8BIT, 0x3809, 0xd0},
+ {OV5693_8BIT, 0x380a, 0x02},
+ {OV5693_8BIT, 0x380b, 0x2C},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*DS from 2624x1492*/
+static struct ov5693_reg const ov5693_1296x736[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0x00},
+
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xA3},
+
+ {OV5693_8BIT, 0x3808, 0x05},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x02},
+ {OV5693_8BIT, 0x380b, 0xe0},
+
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+
+ {OV5693_8BIT, 0x3813, 0xE8},
+
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_1636p_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x06},
+ {OV5693_8BIT, 0x3809, 0x64},
+ {OV5693_8BIT, 0x380a, 0x04},
+ {OV5693_8BIT, 0x380b, 0x48},
+ {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x02},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x80},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /*{3800,3801} Array X start*/
+ {OV5693_8BIT, 0x3801, 0x08}, /* 04 //{3800,3801} Array X start*/
+ {OV5693_8BIT, 0x3802, 0x00}, /*{3802,3803} Array Y start*/
+ {OV5693_8BIT, 0x3803, 0x04}, /* 00 //{3802,3803} Array Y start*/
+ {OV5693_8BIT, 0x3804, 0x0a}, /*{3804,3805} Array X end*/
+ {OV5693_8BIT, 0x3805, 0x37}, /* 3b //{3804,3805} Array X end*/
+ {OV5693_8BIT, 0x3806, 0x07}, /*{3806,3807} Array Y end*/
+ {OV5693_8BIT, 0x3807, 0x9f}, /* a3 //{3806,3807} Array Y end*/
+ {OV5693_8BIT, 0x3808, 0x06}, /*{3808,3809} Final output H size*/
+ {OV5693_8BIT, 0x3809, 0x50}, /*{3808,3809} Final output H size*/
+ {OV5693_8BIT, 0x380a, 0x04}, /*{380a,380b} Final output V size*/
+ {OV5693_8BIT, 0x380b, 0xc0}, /*{380a,380b} Final output V size*/
+ {OV5693_8BIT, 0x380c, 0x0a}, /*{380c,380d} HTS*/
+ {OV5693_8BIT, 0x380d, 0x80}, /*{380c,380d} HTS*/
+ {OV5693_8BIT, 0x380e, 0x07}, /*{380e,380f} VTS*/
+ {OV5693_8BIT, 0x380f, 0xc0}, /* bc //{380e,380f} VTS*/
+ {OV5693_8BIT, 0x3810, 0x00}, /*{3810,3811} windowing X offset*/
+ {OV5693_8BIT, 0x3811, 0x10}, /*{3810,3811} windowing X offset*/
+ {OV5693_8BIT, 0x3812, 0x00}, /*{3812,3813} windowing Y offset*/
+ {OV5693_8BIT, 0x3813, 0x06}, /*{3812,3813} windowing Y offset*/
+ {OV5693_8BIT, 0x3814, 0x11}, /*X subsample control*/
+ {OV5693_8BIT, 0x3815, 0x11}, /*Y subsample control*/
+ {OV5693_8BIT, 0x3820, 0x00}, /*FLIP/Binnning control*/
+ {OV5693_8BIT, 0x3821, 0x1e}, /*MIRROR control*/
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+
+/*
+ * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling)
+ */
+static struct ov5693_reg const ov5693_1940x1096[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa7},
+ {OV5693_8BIT, 0x3808, 0x07},
+ {OV5693_8BIT, 0x3809, 0x94},
+ {OV5693_8BIT, 0x380a, 0x04},
+ {OV5693_8BIT, 0x380b, 0x48},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x02},
+ {OV5693_8BIT, 0x3813, 0x02},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x80},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_2592x1456_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa4},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x20},
+ {OV5693_8BIT, 0x380a, 0x05},
+ {OV5693_8BIT, 0x380b, 0xb0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_2576x1456_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00},
+ {OV5693_8BIT, 0x3801, 0x00},
+ {OV5693_8BIT, 0x3802, 0x00},
+ {OV5693_8BIT, 0x3803, 0xf0},
+ {OV5693_8BIT, 0x3804, 0x0a},
+ {OV5693_8BIT, 0x3805, 0x3f},
+ {OV5693_8BIT, 0x3806, 0x06},
+ {OV5693_8BIT, 0x3807, 0xa4},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x05},
+ {OV5693_8BIT, 0x380b, 0xb0},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x18},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit
+ */
+static struct ov5693_reg const ov5693_2592x1944_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x20},
+ {OV5693_8BIT, 0x380a, 0x07},
+ {OV5693_8BIT, 0x380b, 0x98},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x10},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 11:9 Full FOV Output, expected FOV Res: 2346x1920
+ * ISP Effect Res: 1408x1152
+ * Sensor out: 1424x1168, DS From: 2380x1952
+ *
+ * WA: Left Offset: 8, Hor scal: 64
+ */
+static struct ov5693_reg const ov5693_1424x1168_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
+ {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
+ {OV5693_8BIT, 0x3801, 0x50}, /* 80 */
+ {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
+ {OV5693_8BIT, 0x3803, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */
+ {OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */
+ {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
+ {OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */
+ {OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x3809, 0x90}, /* 1424 */
+ {OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x380b, 0x90}, /* 1168 */
+ {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
+ {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
+ {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
+ {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
+ {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+/*
+ * 3:2 Full FOV Output, expected FOV Res: 2560x1706
+ * ISP Effect Res: 720x480
+ * Sensor out: 736x496, DS From 2616x1764
+ */
+static struct ov5693_reg const ov5693_736x496_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
+ {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
+ {OV5693_8BIT, 0x3801, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
+ {OV5693_8BIT, 0x3803, 0x62}, /* 98 */
+ {OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */
+ {OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */
+ {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
+ {OV5693_8BIT, 0x3807, 0x43}, /* 1859 */
+ {OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x3809, 0xe0}, /* 736 */
+ {OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */
+ {OV5693_8BIT, 0x380b, 0xf0}, /* 496 */
+ {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
+ {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
+ {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
+ {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
+ {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
+ {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+static struct ov5693_reg const ov5693_2576x1936_30fps[] = {
+ {OV5693_8BIT, 0x3501, 0x7b},
+ {OV5693_8BIT, 0x3502, 0x00},
+ {OV5693_8BIT, 0x3708, 0xe2},
+ {OV5693_8BIT, 0x3709, 0xc3},
+ {OV5693_8BIT, 0x3803, 0x00},
+ {OV5693_8BIT, 0x3806, 0x07},
+ {OV5693_8BIT, 0x3807, 0xa3},
+ {OV5693_8BIT, 0x3808, 0x0a},
+ {OV5693_8BIT, 0x3809, 0x10},
+ {OV5693_8BIT, 0x380a, 0x07},
+ {OV5693_8BIT, 0x380b, 0x90},
+ {OV5693_8BIT, 0x380c, 0x0a},
+ {OV5693_8BIT, 0x380d, 0x80},
+ {OV5693_8BIT, 0x380e, 0x07},
+ {OV5693_8BIT, 0x380f, 0xc0},
+ {OV5693_8BIT, 0x3811, 0x18},
+ {OV5693_8BIT, 0x3813, 0x00},
+ {OV5693_8BIT, 0x3814, 0x11},
+ {OV5693_8BIT, 0x3815, 0x11},
+ {OV5693_8BIT, 0x3820, 0x00},
+ {OV5693_8BIT, 0x3821, 0x1e},
+ {OV5693_8BIT, 0x5002, 0x00},
+ {OV5693_8BIT, 0x0100, 0x01},
+ {OV5693_TOK_TERM, 0, 0}
+};
+
+struct ov5693_resolution ov5693_res_preview[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_736x496_30fps,
+ },
+ {
+ .desc = "ov5693_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1616x1216_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2576,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2576x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2576,
+ .height = 1936,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2576x1936_30fps,
+ },
+};
+#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview))
+
+struct ov5693_resolution ov5693_res_still[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_736x496_30fps,
+ },
+ {
+ .desc = "ov5693_1424x1168_30fps",
+ .width = 1424,
+ .height = 1168,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1424x1168_30fps,
+ },
+ {
+ .desc = "ov5693_1616x1216_30fps",
+ .width = 1616,
+ .height = 1216,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1616x1216_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1944,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1944_30fps,
+ },
+};
+#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still))
+
+struct ov5693_resolution ov5693_res_video[] = {
+ {
+ .desc = "ov5693_736x496_30fps",
+ .width = 736,
+ .height = 496,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_736x496,
+ },
+ {
+ .desc = "ov5693_336x256_30fps",
+ .width = 336,
+ .height = 256,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_336x256,
+ },
+ {
+ .desc = "ov5693_368x304_30fps",
+ .width = 368,
+ .height = 304,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_368x304,
+ },
+ {
+ .desc = "ov5693_192x160_30fps",
+ .width = 192,
+ .height = 160,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 1,
+ .regs = ov5693_192x160,
+ },
+ {
+ .desc = "ov5693_1296x736_30fps",
+ .width = 1296,
+ .height = 736,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 0,
+ .regs = ov5693_1296x736,
+ },
+ {
+ .desc = "ov5693_1296x976_30fps",
+ .width = 1296,
+ .height = 976,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 2,
+ .bin_factor_y = 2,
+ .bin_mode = 0,
+ .regs = ov5693_1296x976,
+ },
+ {
+ .desc = "ov5693_1636P_30fps",
+ .width = 1636,
+ .height = 1096,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1636p_30fps,
+ },
+ {
+ .desc = "ov5693_1080P_30fps",
+ .width = 1940,
+ .height = 1096,
+ .fps = 30,
+ .pix_clk_freq = 160,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_1940x1096,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1456,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1456_30fps,
+ },
+ {
+ .desc = "ov5693_5M_30fps",
+ .width = 2592,
+ .height = 1944,
+ .pix_clk_freq = 160,
+ .fps = 30,
+ .used = 0,
+ .pixels_per_line = 2688,
+ .lines_per_frame = 1984,
+ .bin_factor_x = 1,
+ .bin_factor_y = 1,
+ .bin_mode = 0,
+ .regs = ov5693_2592x1944_30fps,
+ },
+};
+#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
+
+static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
+static int N_RES = N_RES_PREVIEW;
+#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.c b/drivers/staging/media/atomisp/i2c/ov8858.c
new file mode 100644
index 000000000000..9574bc49113c
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov8858.c
@@ -0,0 +1,2221 @@
+/*
+ * Support for OmniVision ov8858 camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <media/v4l2-device.h>
+#include <linux/acpi.h>
+#include "../include/linux/atomisp_gmin_platform.h"
+#ifdef CONFIG_PLATFORM_BTNS
+#include "ov8858_btns.h"
+#else
+#include "ov8858.h"
+#endif
+static int ov8858_i2c_read(struct i2c_client *client, u16 len, u16 addr,
+ u8 *buf)
+{
+ struct i2c_msg msg[2];
+ u8 address[2];
+ int err;
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "%s: len = %d, addr = 0x%04x\n",
+ __func__, len, addr);
+
+ memset(msg, 0, sizeof(msg));
+
+ address[0] = (addr >> 8) & 0xff;
+ address[1] = addr & 0xff;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = I2C_MSG_LENGTH;
+ msg[0].buf = address;
+
+ msg[1].addr = client->addr;
+ msg[1].len = len;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = buf;
+
+ err = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (err != 2) {
+ if (err >= 0)
+ err = -EIO;
+ goto error;
+ }
+
+ return 0;
+error:
+ dev_err(&client->dev, "reading from address 0x%x error %d", addr, err);
+ return err;
+}
+
+static int ov8858_read_reg(struct i2c_client *client, u16 type, u16 reg,
+ u16 *val)
+{
+ u8 data[OV8858_SHORT_MAX];
+ int err;
+
+ dev_dbg(&client->dev, "%s: type = %d, reg = 0x%04x\n",
+ __func__, type, reg);
+
+ /* read only 8 and 16 bit values */
+ if (type != OV8858_8BIT && type != OV8858_16BIT) {
+ dev_err(&client->dev, "%s error, invalid data length\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ memset(data, 0, sizeof(data));
+
+ err = ov8858_i2c_read(client, type, reg, data);
+ if (err)
+ goto error;
+
+ /* high byte comes first */
+ if (type == OV8858_8BIT)
+ *val = (u8)data[0];
+ else
+ *val = data[0] << 8 | data[1];
+
+ dev_dbg(&client->dev, "%s: val = 0x%04x\n", __func__, *val);
+
+ return 0;
+
+error:
+ dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
+ return err;
+}
+
+static int ov8858_i2c_write(struct i2c_client *client, u16 len, u8 *data)
+{
+ struct i2c_msg msg;
+ const int num_msg = 1;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data;
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+
+ return ret == num_msg ? 0 : -EIO;
+}
+
+static int
+ov8858_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val)
+{
+ int ret;
+ unsigned char data[4] = {0};
+ u16 *wreg;
+ const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
+
+ dev_dbg(&client->dev,
+ "%s: data_length = %d, reg = 0x%04x, val = 0x%04x\n",
+ __func__, data_length, reg, val);
+
+ if (!client->adapter) {
+ dev_err(&client->dev, "%s error, no adapter\n", __func__);
+ return -ENODEV;
+ }
+
+ if (data_length != OV8858_8BIT && data_length != OV8858_16BIT) {
+ dev_err(&client->dev, "%s error, invalid length\n", __func__);
+ return -EINVAL;
+ }
+
+ /* high byte goes out first */
+ wreg = (u16 *)data;
+ *wreg = cpu_to_be16(reg);
+
+ if (data_length == OV8858_8BIT) {
+ data[2] = (u8)(val);
+ } else {
+ /* OV8858_16BIT */
+ u16 *wdata = (u16 *)&data[2];
+ *wdata = be16_to_cpu(val);
+ }
+
+ ret = ov8858_i2c_write(client, len, data);
+ if (ret)
+ dev_err(&client->dev,
+ "write error: wrote 0x%x to offset 0x%x error %d",
+ val, reg, ret);
+
+ return ret;
+}
+
+/*
+ * ov8858_write_reg_array - Initializes a list of registers
+ * @client: i2c driver client structure
+ * @reglist: list of registers to be written
+ *
+ * This function initializes a list of registers. When consecutive addresses
+ * are found in a row on the list, this function creates a buffer and sends
+ * consecutive data in a single i2c_transfer().
+ *
+ * __ov8858_flush_reg_array(), __ov8858_buf_reg_array() and
+ * __ov8858_write_reg_is_consecutive() are internal functions to
+ * ov8858_write_reg_array() and should be not used anywhere else.
+ *
+ */
+static int __ov8858_flush_reg_array(struct i2c_client *client,
+ struct ov8858_write_ctrl *ctrl)
+{
+ u16 size;
+ if (ctrl->index == 0)
+ return 0;
+
+ size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
+ ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
+ ctrl->index = 0;
+
+ return ov8858_i2c_write(client, size, (u8 *)&ctrl->buffer);
+}
+
+static int __ov8858_buf_reg_array(struct i2c_client *client,
+ struct ov8858_write_ctrl *ctrl,
+ const struct ov8858_reg *next)
+{
+ int size;
+ u16 *data16;
+
+ switch (next->type) {
+ case OV8858_8BIT:
+ size = 1;
+ ctrl->buffer.data[ctrl->index] = (u8)next->val;
+ break;
+ case OV8858_16BIT:
+ size = 2;
+ data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
+ *data16 = cpu_to_be16((u16)next->val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* When first item is added, we need to store its starting address */
+ if (ctrl->index == 0)
+ ctrl->buffer.addr = next->sreg;
+
+ ctrl->index += size;
+
+ /*
+ * Buffer cannot guarantee free space for u32? Better flush it to avoid
+ * possible lack of memory for next item.
+ */
+ if (ctrl->index + sizeof(u16) >= OV8858_MAX_WRITE_BUF_SIZE)
+ __ov8858_flush_reg_array(client, ctrl);
+
+ return 0;
+}
+
+static int
+__ov8858_write_reg_is_consecutive(struct i2c_client *client,
+ struct ov8858_write_ctrl *ctrl,
+ const struct ov8858_reg *next)
+{
+ if (ctrl->index == 0)
+ return 1;
+
+ return ctrl->buffer.addr + ctrl->index == next->sreg;
+}
+
+static int ov8858_write_reg_array(struct i2c_client *client,
+ const struct ov8858_reg *reglist)
+{
+ const struct ov8858_reg *next = reglist;
+ struct ov8858_write_ctrl ctrl;
+ int err;
+
+ ctrl.index = 0;
+ for (; next->type != OV8858_TOK_TERM; next++) {
+ switch (next->type & OV8858_TOK_MASK) {
+ case OV8858_TOK_DELAY:
+ err = __ov8858_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ msleep(next->val);
+ break;
+
+ default:
+ /*
+ * If next address is not consecutive, data needs to be
+ * flushed before proceeding
+ */
+ if (!__ov8858_write_reg_is_consecutive(client,
+ &ctrl, next)) {
+ err = __ov8858_flush_reg_array(client, &ctrl);
+ if (err)
+ return err;
+ }
+ err = __ov8858_buf_reg_array(client, &ctrl, next);
+ if (err) {
+ dev_err(&client->dev, "%s: write error\n",
+ __func__);
+ return err;
+ }
+ break;
+ }
+ }
+
+ return __ov8858_flush_reg_array(client, &ctrl);
+}
+
+static int __ov8858_min_fps_diff(int fps,
+ const struct ov8858_fps_setting *fps_list)
+{
+ int diff = INT_MAX;
+ int i;
+
+ if (fps == 0)
+ return 0;
+
+ for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
+ if (!fps_list[i].fps)
+ break;
+ if (abs(fps_list[i].fps - fps) < diff)
+ diff = abs(fps_list[i].fps - fps);
+ }
+
+ return diff;
+}
+
+static int __ov8858_nearest_fps_index(int fps,
+ const struct ov8858_fps_setting *fps_list)
+{
+ int fps_index = 0;
+ int i;
+
+ for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
+ if (!fps_list[i].fps)
+ break;
+ if (abs(fps_list[i].fps - fps)
+ < abs(fps_list[fps_index].fps - fps))
+ fps_index = i;
+ }
+ return fps_index;
+}
+
+static int __ov8858_update_frame_timing(struct v4l2_subdev *sd,
+ u16 *hts, u16 *vts)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+
+
+ dev_dbg(&client->dev, "%s OV8858_TIMING_HTS=0x%04x\n",
+ __func__, *hts);
+
+ /* HTS = pixel_per_line / 2 */
+ ret = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_TIMING_HTS, *hts >> 1);
+ if (ret)
+ return ret;
+ dev_dbg(&client->dev, "%s OV8858_TIMING_VTS=0x%04x\n",
+ __func__, *vts);
+
+ return ov8858_write_reg(client, OV8858_16BIT, OV8858_TIMING_VTS, *vts);
+}
+
+static int __ov8858_set_exposure(struct v4l2_subdev *sd, int exposure, int gain,
+ int dig_gain, u16 *hts, u16 *vts)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int exp_val, ret;
+ dev_dbg(&client->dev, "%s, exposure = %d, gain=%d, dig_gain=%d\n",
+ __func__, exposure, gain, dig_gain);
+
+ if (dev->limit_exposure_flag) {
+ if (exposure > *vts - OV8858_INTEGRATION_TIME_MARGIN)
+ exposure = *vts - OV8858_INTEGRATION_TIME_MARGIN;
+ } else {
+ if (*vts < exposure + OV8858_INTEGRATION_TIME_MARGIN)
+ *vts = (u16) exposure + OV8858_INTEGRATION_TIME_MARGIN;
+ }
+
+ ret = __ov8858_update_frame_timing(sd, hts, vts);
+ if (ret)
+ return ret;
+
+ /* For ov8858, the low 4 bits are fraction bits and must be kept 0 */
+ exp_val = exposure << 4;
+ ret = ov8858_write_reg(client, OV8858_8BIT,
+ OV8858_LONG_EXPO+2, exp_val & 0xFF);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_8BIT,
+ OV8858_LONG_EXPO+1, (exp_val >> 8) & 0xFF);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_8BIT,
+ OV8858_LONG_EXPO, (exp_val >> 16) & 0x0F);
+ if (ret)
+ return ret;
+
+ /* Digital gain : to all MWB channel gains */
+ if (dig_gain) {
+ ret = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_MWB_RED_GAIN_H, dig_gain);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_MWB_GREEN_GAIN_H, dig_gain);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_MWB_BLUE_GAIN_H, dig_gain);
+ if (ret)
+ return ret;
+ }
+
+ ret = ov8858_write_reg(client, OV8858_16BIT, OV8858_LONG_GAIN,
+ gain & 0x07ff);
+ if (ret)
+ return ret;
+
+ dev->gain = gain;
+ dev->exposure = exposure;
+ dev->digital_gain = dig_gain;
+
+ return 0;
+}
+
+static int ov8858_set_exposure(struct v4l2_subdev *sd, int exposure, int gain,
+ int dig_gain)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ const struct ov8858_resolution *res;
+ u16 hts, vts;
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+
+ /* Validate exposure: cannot exceed 16bit value */
+ exposure = clamp_t(int, exposure, 0, OV8858_MAX_EXPOSURE_VALUE);
+
+ /* Validate gain: must not exceed maximum 8bit value */
+ gain = clamp_t(int, gain, 0, OV8858_MAX_GAIN_VALUE);
+
+ /* Validate digital gain: must not exceed 12 bit value*/
+ dig_gain = clamp_t(int, dig_gain, 0, OV8858_MWB_GAIN_MAX);
+
+ res = &dev->curr_res_table[dev->fmt_idx];
+ /*
+ * Vendor: HTS reg value is half the total pixel line
+ */
+ hts = res->fps_options[dev->fps_index].pixels_per_line;
+ vts = res->fps_options[dev->fps_index].lines_per_frame;
+
+ ret = __ov8858_set_exposure(sd, exposure, gain, dig_gain, &hts, &vts);
+
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+/*
+ When exposure gain value set to sensor, the sensor changed value.
+ So we need the function to get real value
+ */
+static int ov8858_g_update_exposure(struct v4l2_subdev *sd,
+ struct atomisp_update_exposure *exposure)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int gain = exposure->gain;
+
+ dev_dbg(&client->dev, "%s: gain: %d, digi_gain: %d\n", __func__,
+ exposure->gain, exposure->digi_gain);
+ exposure->update_digi_gain = dev->digital_gain;
+ /* This real gain value fetching function is provided by vendor */
+ exposure->update_gain = (((gain & 0x700) >> 8) + 1) * (gain & 0xFF);
+
+ return 0;
+}
+
+static int ov8858_s_exposure(struct v4l2_subdev *sd,
+ struct atomisp_exposure *exposure)
+{
+ return ov8858_set_exposure(sd, exposure->integration_time[0],
+ exposure->gain[0], exposure->gain[1]);
+}
+
+static int ov8858_priv_int_data_init(struct v4l2_subdev *sd)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 size = OV8858_OTP_END_ADDR - OV8858_OTP_START_ADDR + 1;
+ int r;
+ u16 isp_ctrl2 = 0;
+
+ if (!dev->otp_data) {
+ dev->otp_data = devm_kzalloc(&client->dev, size, GFP_KERNEL);
+ if (!dev->otp_data) {
+ dev_err(&client->dev, "%s: can't allocate memory",
+ __func__);
+ r = -ENOMEM;
+ goto error3;
+ }
+
+ /* Streaming has to be on */
+ r = ov8858_write_reg(client, OV8858_8BIT, OV8858_STREAM_MODE,
+ 0x01);
+ if (r)
+ goto error2;
+
+ /* Turn off Dead Pixel Correction */
+ r = ov8858_read_reg(client, OV8858_8BIT,
+ OV8858_OTP_ISP_CTRL2, &isp_ctrl2);
+ if (r)
+ goto error1;
+
+ r = ov8858_write_reg(client, OV8858_8BIT, OV8858_OTP_ISP_CTRL2,
+ isp_ctrl2 & ~OV8858_OTP_DPC_ENABLE);
+ if (r)
+ goto error1;
+
+ /* Enable partial OTP read mode */
+ r = ov8858_write_reg(client, OV8858_8BIT, OV8858_OTP_MODE_CTRL,
+ OV8858_OTP_MODE_PROGRAM_DISABLE |
+ OV8858_OTP_MODE_MANUAL);
+ if (r)
+ goto error1;
+
+ /* Set address range of OTP memory to read */
+ r = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_OTP_START_ADDR_REG,
+ OV8858_OTP_START_ADDR);
+ if (r)
+ goto error1;
+
+ r = ov8858_write_reg(client, OV8858_16BIT,
+ OV8858_OTP_END_ADDR_REG,
+ OV8858_OTP_END_ADDR);
+ if (r)
+ goto error1;
+
+ /* Load the OTP data into the OTP buffer */
+ r = ov8858_write_reg(client, OV8858_8BIT, OV8858_OTP_LOAD_CTRL,
+ OV8858_OTP_LOAD_ENABLE);
+ if (r)
+ goto error1;
+
+ /* Wait for the data to load into the buffer */
+ usleep_range(5000, 5500);
+
+ /* Read the OTP data from the buffer */
+ r = ov8858_i2c_read(client, size, OV8858_OTP_START_ADDR,
+ dev->otp_data);
+ if (r)
+ goto error1;
+
+ /* Turn on Dead Pixel Correction */
+ r = ov8858_write_reg(client, OV8858_8BIT, OV8858_OTP_ISP_CTRL2,
+ isp_ctrl2 | OV8858_OTP_DPC_ENABLE);
+ if (r)
+ goto error1;
+
+ /* Stop streaming */
+ r = ov8858_write_reg(client, 1, OV8858_STREAM_MODE, 0x00);
+ if (r) {
+ dev_err(&client->dev, "%s: cannot turn off streaming\n",
+ __func__);
+ goto error1;
+ }
+ }
+
+
+ return 0;
+
+error1:
+ /* Turn on Dead Pixel Correction and set streaming off */
+ ov8858_write_reg(client, OV8858_8BIT, OV8858_OTP_ISP_CTRL2,
+ isp_ctrl2 | OV8858_OTP_DPC_ENABLE);
+ ov8858_write_reg(client, 1, OV8858_STREAM_MODE, 0x00);
+error2:
+ devm_kfree(&client->dev, dev->otp_data);
+ dev->otp_data = NULL;
+error3:
+ dev_err(&client->dev, "%s: OTP reading failed\n", __func__);
+ return r;
+}
+
+static int ov8858_g_priv_int_data(struct v4l2_subdev *sd,
+ struct v4l2_private_int_data *priv)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 size = OV8858_OTP_END_ADDR - OV8858_OTP_START_ADDR + 1;
+ int r;
+
+ mutex_lock(&dev->input_lock);
+
+ if (!dev->otp_data) {
+ dev_err(&client->dev, "%s: otp data is NULL\n", __func__);
+ mutex_unlock(&dev->input_lock);
+ return -EFAULT;
+ }
+
+ if (copy_to_user(priv->data, dev->otp_data,
+ min_t(__u32, priv->size, size))) {
+ r = -EFAULT;
+ dev_err(&client->dev, "%s: OTP reading failed\n", __func__);
+ mutex_unlock(&dev->input_lock);
+ return r;
+ }
+
+ priv->size = size;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int __ov8858_init(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret;
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ /* Sets the default FPS */
+ dev->fps_index = 0;
+
+ /* Set default exposure values (initially start values) */
+ dev->exposure = 256;
+ dev->gain = 16;
+ dev->digital_gain = 1024;
+ dev->limit_exposure_flag = false;
+
+ dev_dbg(&client->dev, "%s: Writing basic settings to ov8858\n",
+ __func__);
+ ret = ov8858_write_reg_array(client, ov8858_BasicSettings);
+ if (ret)
+ return ret;
+
+ return ov8858_priv_int_data_init(sd);
+}
+
+static int ov8858_init(struct v4l2_subdev *sd, u32 val)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov8858_init(sd);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static void ov8858_uninit(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct v4l2_ctrl *ctrl;
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ dev->exposure = 0;
+ dev->gain = 0;
+ dev->digital_gain = 0;
+ dev->limit_exposure_flag = false;
+ mutex_unlock(&dev->input_lock);
+ ctrl = v4l2_ctrl_find(sd->ctrl_handler,
+ V4L2_CID_EXPOSURE_AUTO_PRIORITY);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, V4L2_EXPOSURE_AUTO);
+ mutex_lock(&dev->input_lock);
+}
+
+static int ov8858_g_comp_delay(struct v4l2_subdev *sd, unsigned int *usec)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret = 0, exposure;
+ u16 vts, data;
+
+ if (dev->exposure == 0) {
+ ret = ov8858_read_reg(client, OV8858_16BIT,
+ OV8858_LONG_EXPO + 1, &data);
+ if (ret)
+ return ret;
+ exposure = data;
+ exposure >>= 4;
+ } else {
+ exposure = dev->exposure;
+ }
+
+ ret = ov8858_read_reg(client, OV8858_16BIT, OV8858_TIMING_VTS, &vts);
+ if (ret || vts == 0)
+ vts = OV8858_DEPTH_VTS_CONST;
+
+ *usec = (exposure * 33333 / vts);
+ if (*usec > OV8858_DEPTH_COMP_CONST)
+ *usec = *usec - OV8858_DEPTH_COMP_CONST;
+ else
+ *usec = OV8858_DEPTH_COMP_CONST;
+
+ return 0;
+}
+
+static long ov8858_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ switch (cmd) {
+ case ATOMISP_IOC_S_EXPOSURE:
+ return ov8858_s_exposure(sd, (struct atomisp_exposure *)arg);
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ return ov8858_g_priv_int_data(sd, arg);
+ case ATOMISP_IOC_G_DEPTH_SYNC_COMP:
+ return ov8858_g_comp_delay(sd, (unsigned int *)arg);
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+ return ov8858_g_update_exposure(sd,
+ (struct atomisp_update_exposure *)arg);
+ default:
+ dev_dbg(&client->dev, "Unhandled command 0x%X\n", cmd);
+ return -EINVAL;
+ }
+}
+
+static int __power_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ int ret = 0;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (!dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->power_ctrl)
+ return dev->platform_data->power_ctrl(sd, flag);
+
+ if (dev->platform_data->v1p2_ctrl) {
+ ret = dev->platform_data->v1p2_ctrl(sd, flag);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to power %s 1.2v power rail\n",
+ flag ? "up" : "down");
+ return ret;
+ }
+ }
+
+ if (dev->platform_data->v2p8_ctrl) {
+ ret = dev->platform_data->v2p8_ctrl(sd, flag);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to power %s 2.8v power rail\n",
+ flag ? "up" : "down");
+ return ret;
+ }
+ }
+
+ if (dev->platform_data->v1p8_ctrl) {
+ ret = dev->platform_data->v1p8_ctrl(sd, flag);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to power %s 1.8v power rail\n",
+ flag ? "up" : "down");
+ if (dev->platform_data->v2p8_ctrl)
+ dev->platform_data->v2p8_ctrl(sd, 0);
+ return ret;
+ }
+ }
+
+ if (flag)
+ msleep(20); /* Wait for power lines to stabilize */
+ return ret;
+}
+
+static int __gpio_ctrl(struct v4l2_subdev *sd, bool flag)
+{
+ struct i2c_client *client;
+ struct ov8858_device *dev;
+
+ if (!sd)
+ return -EINVAL;
+
+ client = v4l2_get_subdevdata(sd);
+ dev = to_ov8858_sensor(sd);
+
+ if (!client || !dev || !dev->platform_data)
+ return -ENODEV;
+
+ /* Non-gmin platforms use the legacy callback */
+ if (dev->platform_data->gpio_ctrl)
+ return dev->platform_data->gpio_ctrl(sd, flag);
+
+ if (dev->platform_data->gpio0_ctrl)
+ return dev->platform_data->gpio0_ctrl(sd, flag);
+
+ dev_err(&client->dev, "failed to find platform gpio callback\n");
+
+ return -EINVAL;
+}
+
+static int power_up(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret;
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ /* Enable power */
+ ret = __power_ctrl(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "power rail on failed %d.\n", ret);
+ goto fail_power;
+ }
+
+ /* Enable clock */
+ ret = dev->platform_data->flisclk_ctrl(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "flisclk on failed %d\n", ret);
+ goto fail_clk;
+ }
+
+ /* Release reset */
+ ret = __gpio_ctrl(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "gpio on failed %d\n", ret);
+ goto fail_gpio;
+ }
+
+ /* Minumum delay is 8192 clock cycles before first i2c transaction,
+ * which is 1.37 ms at the lowest allowed clock rate 6 MHz */
+ usleep_range(2000, 2500);
+ return 0;
+
+fail_gpio:
+ dev->platform_data->flisclk_ctrl(sd, 0);
+fail_clk:
+ __power_ctrl(sd, 0);
+fail_power:
+ dev_err(&client->dev, "Sensor power-up failed\n");
+
+ return ret;
+}
+
+static int power_down(struct v4l2_subdev *sd)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ ret = dev->platform_data->flisclk_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "flisclk off failed\n");
+
+ ret = __gpio_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "gpio off failed\n");
+
+ ret = __power_ctrl(sd, 0);
+ if (ret)
+ dev_err(&client->dev, "power rail off failed.\n");
+
+ return ret;
+}
+
+static int __ov8858_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret, r = 0;
+
+ if (on == 0) {
+ ov8858_uninit(sd);
+ if (dev->vcm_driver && dev->vcm_driver->power_down)
+ r = dev->vcm_driver->power_down(sd);
+ ret = power_down(sd);
+ if (r != 0 && ret == 0)
+ ret = r;
+ } else {
+ ret = power_up(sd);
+ if (ret)
+ power_down(sd);
+ if (dev->vcm_driver && dev->vcm_driver->power_up) {
+ ret = dev->vcm_driver->power_up(sd);
+ if (ret) {
+ power_down(sd);
+ return ret;
+ }
+ }
+ return __ov8858_init(sd);
+ }
+
+ return ret;
+}
+
+static int ov8858_s_power(struct v4l2_subdev *sd, int on)
+{
+ int ret;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov8858_s_power(sd, on);
+ mutex_unlock(&dev->input_lock);
+
+ /*
+ * FIXME: Compatibility with old behaviour: return to preview
+ * when the device is power cycled.
+ */
+ if (!ret && on)
+ v4l2_ctrl_s_ctrl(dev->run_mode, ATOMISP_RUN_MODE_PREVIEW);
+
+ return ret;
+}
+
+/*
+ * Return value of the specified register, first try getting it from
+ * the register list and if not found, get from the sensor via i2c.
+ */
+static int ov8858_get_register(struct v4l2_subdev *sd, int reg, int type,
+ const struct ov8858_reg *reglist)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct ov8858_reg *next;
+ u16 val;
+
+ /* Try if the values are in the register list */
+ for (next = reglist; next->type != OV8858_TOK_TERM; next++) {
+ if (next->sreg == reg) {
+ if (type == OV8858_8BIT)
+ return next->val;
+
+ if (type == OV8858_16BIT &&
+ next[1].type != OV8858_TOK_TERM)
+ return next[0].val << 8 | next[1].val;
+ }
+ }
+
+ /* If not, read from sensor */
+ if (ov8858_read_reg(client, type, reg, &val)) {
+ dev_err(&client->dev, "failed to read register 0x%08x\n", reg);
+ return -EIO;
+ }
+
+ return val;
+}
+
+static inline int ov8858_get_register_16bit(struct v4l2_subdev *sd, int reg,
+ const struct ov8858_reg *reglist)
+{
+ return ov8858_get_register(sd, reg, OV8858_16BIT, reglist);
+}
+
+static inline int ov8858_get_register_8bit(struct v4l2_subdev *sd, int reg,
+ const struct ov8858_reg *reglist)
+{
+ return ov8858_get_register(sd, reg, OV8858_8BIT, reglist);
+}
+
+static int __ov8858_get_pll1_values(struct v4l2_subdev *sd,
+ int *value,
+ const struct ov8858_reg *reglist)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int prediv_idx;
+ unsigned int multiplier;
+ unsigned int sys_prediv;
+ unsigned int prediv_coef[] = {2, 3, 4, 5, 6, 8, 12, 16};
+ int ret;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL1_PREDIV0, reglist);
+ if (ret < 0)
+ return ret;
+
+ if (ret & OV8858_PLL1_PREDIV0_MASK)
+ *value /= 2;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL1_PREDIV, reglist);
+
+ if (ret < 0)
+ return ret;
+
+ prediv_idx = ret & OV8858_PLL1_PREDIV_MASK;
+ *value = *value * 2 / prediv_coef[prediv_idx];
+
+ ret = ov8858_get_register_16bit(sd, OV8858_PLL1_MULTIPLIER, reglist);
+ if (ret < 0)
+ return ret;
+
+ multiplier = ret;
+ *value *= multiplier & OV8858_PLL1_MULTIPLIER_MASK;
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL1_SYS_PRE_DIV, reglist);
+
+ if (ret < 0)
+ return ret;
+
+ sys_prediv = ret & OV8858_PLL1_SYS_PRE_DIV_MASK;
+ *value /= (sys_prediv + 3);
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL1_SYS_DIVIDER, reglist);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret & OV8858_PLL1_SYS_DIVIDER_MASK)
+ *value /= 2;
+
+ dev_dbg(&client->dev, "%s: *value: %d\n", __func__, *value);
+
+ return 0;
+}
+
+static int __ov8858_get_pll2a_values(struct v4l2_subdev *sd, int *value,
+ const struct ov8858_reg *reglist)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int prediv_idx;
+ unsigned int multiplier;
+ unsigned int prediv_coef[] = {2, 3, 4, 5, 6, 8, 12, 16};
+ int ret;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL2_PREDIV0, reglist);
+ if (ret < 0)
+ return ret;
+
+ if (ret & OV8858_PLL2_PREDIV0_MASK)
+ *value /= 2;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL2_PREDIV, reglist);
+ if (ret < 0)
+ return ret;
+
+ prediv_idx = (ret & OV8858_PLL2_PREDIV_MASK);
+ *value = *value * 2 / prediv_coef[prediv_idx];
+
+ ret = ov8858_get_register_16bit(sd, OV8858_PLL2_MULTIPLIER, reglist);
+ if (ret < 0)
+ return ret;
+
+ multiplier = ret;
+ *value *= multiplier & OV8858_PLL2_MULTIPLIER_MASK;
+ dev_dbg(&client->dev, "%s: *value: %d\n", __func__, *value);
+
+ return 0;
+}
+static int __ov8858_get_pll2b_values(struct v4l2_subdev *sd, int *value,
+ const struct ov8858_reg *reglist)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int dac_divider;
+ int ret;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL2_DAC_DIVIDER, reglist);
+ if (ret < 0)
+ return ret;
+
+ dac_divider = (ret & OV8858_PLL2_DAC_DIVIDER_MASK) + 1;
+ *value /= dac_divider;
+
+ dev_dbg(&client->dev, "%s: *value: %d\n", __func__, *value);
+
+ return 0;
+}
+static int __ov8858_get_pll2c_values(struct v4l2_subdev *sd, int *value,
+ const struct ov8858_reg *reglist)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ unsigned int sys_pre_div;
+ unsigned int sys_divider_idx;
+ unsigned int sys_divider_coef[] = {2, 3, 4, 5, 6, 7, 8, 10};
+ int ret;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL2_SYS_PRE_DIV, reglist);
+ if (ret < 0)
+ return ret;
+
+ sys_pre_div = (ret & OV8858_PLL2_SYS_PRE_DIV_MASK) + 1;
+ *value /= sys_pre_div;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL2_SYS_DIVIDER, reglist);
+ if (ret < 0)
+ return ret;
+
+ sys_divider_idx = ret & OV8858_PLL2_SYS_DIVIDER_MASK;
+ *value *= 2 / sys_divider_coef[sys_divider_idx];
+
+ dev_dbg(&client->dev, "%s: *value: %d\n", __func__, *value);
+
+ return 0;
+}
+
+static int ov8858_get_intg_factor(struct v4l2_subdev *sd,
+ struct camera_mipi_info *info,
+ const struct ov8858_reg *reglist)
+{
+ const unsigned int ext_clk = 19200000; /* Hz */
+ struct atomisp_sensor_mode_data *m = &info->data;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *d = &client->dev;
+ const struct ov8858_resolution *res =
+ &dev->curr_res_table[dev->fmt_idx];
+ unsigned int pll_sclksel1;
+ unsigned int pll_sclksel2;
+ unsigned int sys_pre_div;
+ unsigned int sclk_pdiv;
+ unsigned int sclk = ext_clk;
+ u16 hts;
+ int ret;
+
+ memset(&info->data, 0, sizeof(info->data));
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL_SCLKSEL1, reglist);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(d, "%s: OV8858_PLL_SCLKSEL1: 0x%02x\n", __func__, ret);
+ pll_sclksel1 = ret & OV8858_PLL_SCLKSEL1_MASK;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_PLL_SCLKSEL2, reglist);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(d, "%s: OV8858_PLL_SCLKSEL2: 0x%02x\n", __func__, ret);
+ pll_sclksel2 = ret & OV8858_PLL_SCLKSEL2_MASK;
+
+ if (pll_sclksel2) {
+ ret = __ov8858_get_pll2a_values(sd, &sclk, reglist);
+ if (ret < 0)
+ return ret;
+ ret = __ov8858_get_pll2b_values(sd, &sclk, reglist);
+ if (ret < 0)
+ return ret;
+ } else if (pll_sclksel1) {
+ ret = __ov8858_get_pll2a_values(sd, &sclk, reglist);
+ if (ret < 0)
+ return ret;
+ ret = __ov8858_get_pll2c_values(sd, &sclk, reglist);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = __ov8858_get_pll1_values(sd, &sclk, reglist);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = ov8858_get_register_8bit(sd, OV8858_SRB_HOST_INPUT_DIS, reglist);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(d, "%s: OV8858_SRB_HOST_INPUT_DIS: 0x%02x\n", __func__, ret);
+
+ sys_pre_div = ret & OV8858_SYS_PRE_DIV_MASK;
+ sys_pre_div >>= OV8858_SYS_PRE_DIV_OFFSET;
+
+ if (sys_pre_div == 1)
+ sclk /= 2;
+ else if (sys_pre_div == 2)
+ sclk /= 4;
+
+ sclk_pdiv = ret & OV8858_SCLK_PDIV_MASK;
+ sclk_pdiv >>= OV8858_SCLK_PDIV_OFFSET;
+
+ if (sclk_pdiv > 1)
+ sclk /= sclk_pdiv;
+
+ dev_dbg(d, "%s: sclk: %d\n", __func__, sclk);
+
+ dev->vt_pix_clk_freq_mhz = sclk;
+ m->vt_pix_clk_freq_mhz = sclk;
+
+ /* HTS and VTS */
+ m->frame_length_lines =
+ res->fps_options[dev->fps_index].lines_per_frame;
+ m->line_length_pck = res->fps_options[dev->fps_index].pixels_per_line;
+
+ m->coarse_integration_time_min = 0;
+ m->coarse_integration_time_max_margin = OV8858_INTEGRATION_TIME_MARGIN;
+ ret = ov8858_read_reg(client, OV8858_16BIT, OV8858_TIMING_HTS, &hts);
+ if (ret < 0)
+ return ret;
+ m->hts = hts;
+ dev_dbg(&client->dev, "%s: get HTS %d\n", __func__, hts);
+
+ /* OV Sensor do not use fine integration time. */
+ m->fine_integration_time_min = 0;
+ m->fine_integration_time_max_margin = 0;
+
+ /*
+ * read_mode indicate whether binning is used for calculating
+ * the correct exposure value from the user side. So adapt the
+ * read mode values accordingly.
+ */
+ m->read_mode = res->bin_factor_x ?
+ OV8858_READ_MODE_BINNING_ON : OV8858_READ_MODE_BINNING_OFF;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_H_INC_ODD, res->regs);
+ if (ret < 0)
+ return ret;
+ m->binning_factor_x = (ret + 1) / 2;
+
+ ret = ov8858_get_register_8bit(sd, OV8858_V_INC_ODD, res->regs);
+ if (ret < 0)
+ return ret;
+ m->binning_factor_y = (ret + 1) / 2;
+
+ /* Get the cropping and output resolution to ISP for this mode. */
+ ret = ov8858_get_register_16bit(sd, OV8858_HORIZONTAL_START_H,
+ res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->crop_horizontal_start = ret;
+
+ ret = ov8858_get_register_16bit(sd, OV8858_VERTICAL_START_H, res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->crop_vertical_start = ret;
+
+ ret = ov8858_get_register_16bit(sd, OV8858_HORIZONTAL_END_H, res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->crop_horizontal_end = ret;
+
+ ret = ov8858_get_register_16bit(sd, OV8858_VERTICAL_END_H, res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->crop_vertical_end = ret;
+
+ ret = ov8858_get_register_16bit(sd, OV8858_HORIZONTAL_OUTPUT_SIZE_H,
+ res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->output_width = ret;
+
+ ret = ov8858_get_register_16bit(sd, OV8858_VERTICAL_OUTPUT_SIZE_H,
+ res->regs);
+ if (ret < 0)
+ return ret;
+
+ m->output_height = ret;
+
+ return 0;
+}
+
+/*
+ * distance - calculate the distance
+ * @res: resolution
+ * @w: width
+ * @h: height
+ *
+ * Get the gap between res_w/res_h and w/h.
+ * distance = (res_w/res_h - w/h) / (w/h) * 8192
+ * res->width/height smaller than w/h wouldn't be considered.
+ * The gap of ratio larger than 1/8 wouldn't be considered.
+ * Returns the value of gap or -1 if fail.
+ */
+#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
+static int distance(struct ov8858_resolution const *res, const u32 w,
+ const u32 h)
+{
+ int ratio;
+ int distance;
+
+ if (w == 0 || h == 0 ||
+ res->width < w || res->height < h)
+ return -1;
+
+ ratio = res->width << 13;
+ ratio /= w;
+ ratio *= h;
+ ratio /= res->height;
+
+ distance = abs(ratio - 8192);
+
+ if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
+ return -1;
+ return distance;
+}
+
+/*
+ * Returns the nearest higher resolution index.
+ * @w: width
+ * @h: height
+ * matching is done based on enveloping resolution and
+ * aspect ratio. If the aspect ratio cannot be matched
+ * to any index, -1 is returned.
+ */
+static int nearest_resolution_index(struct v4l2_subdev *sd, int w, int h)
+{
+ int i;
+ int idx = -1;
+ int dist;
+ int fps_diff;
+ int min_fps_diff = INT_MAX;
+ int min_dist = INT_MAX;
+ int min_res_w = INT_MAX;
+ const struct ov8858_resolution *tmp_res = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ dev_dbg(&client->dev, "%s: w=%d, h=%d\n", __func__, w, h);
+
+ for (i = 0; i < dev->entries_curr_table; i++) {
+ tmp_res = &dev->curr_res_table[i];
+ dist = distance(tmp_res, w, h);
+ dev_dbg(&client->dev,
+ "%s[%d]: %dx%d distance=%d\n", tmp_res->desc,
+ i, tmp_res->width, tmp_res->height, dist);
+ if (dist == -1)
+ continue;
+ if (dist < min_dist) {
+ min_dist = dist;
+ min_res_w = tmp_res->width;
+ min_fps_diff = __ov8858_min_fps_diff(dev->fps,
+ tmp_res->fps_options);
+ idx = i;
+ }
+ if (dist == min_dist) {
+ fps_diff = __ov8858_min_fps_diff(dev->fps,
+ tmp_res->fps_options);
+ if (fps_diff < min_fps_diff) {
+ min_fps_diff = fps_diff;
+ idx = i;
+ }
+ if (tmp_res->width < min_res_w) {
+ min_res_w = tmp_res->width;
+ idx = i;
+ }
+ }
+ }
+
+ return idx;
+}
+
+static int ov8858_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct camera_mipi_info *ov8858_info = NULL;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct ov8858_resolution *res;
+ int ret;
+ int idx;
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ ov8858_info = v4l2_get_subdev_hostdata(sd);
+ if (ov8858_info == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+
+ if ((fmt->width > OV8858_RES_WIDTH_MAX) ||
+ (fmt->height > OV8858_RES_HEIGHT_MAX)) {
+ fmt->width = OV8858_RES_WIDTH_MAX;
+ fmt->height = OV8858_RES_HEIGHT_MAX;
+ } else {
+ idx = nearest_resolution_index(sd, fmt->width, fmt->height);
+
+ /*
+ * nearest_resolution_index() doesn't return smaller
+ * resolutions. If it fails, it means the requested resolution
+ * is higher than we can support. Fallback to highest possible
+ * resolution in this case.
+ */
+ if (idx == -1)
+ idx = dev->entries_curr_table - 1;
+
+ fmt->width = dev->curr_res_table[idx].width;
+ fmt->height = dev->curr_res_table[idx].height;
+ }
+
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+ }
+
+ dev->fmt_idx = nearest_resolution_index(sd, fmt->width, fmt->height);
+ if (dev->fmt_idx == -1) {
+ ret = -EINVAL;
+ goto out;
+ }
+ res = &dev->curr_res_table[dev->fmt_idx];
+ dev_dbg(&client->dev, "%s: selected width = %d, height = %d\n",
+ __func__, res->width, res->height);
+
+ /* Adjust the FPS selection based on the resolution selected */
+ dev->fps_index = __ov8858_nearest_fps_index(dev->fps, res->fps_options);
+ dev->fps = res->fps_options[dev->fps_index].fps;
+ dev->regs = res->fps_options[dev->fps_index].regs;
+ if (!dev->regs)
+ dev->regs = res->regs;
+
+ ret = ov8858_write_reg_array(client, dev->regs);
+ if (ret)
+ goto out;
+
+ dev->pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
+ dev->lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
+
+ /* ov8858 only support RGB RAW10 output */
+ ov8858_info->metadata_width = res->width * 10 / 8;
+ ov8858_info->metadata_height = 2;
+ ov8858_info->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+
+ /* Set the initial exposure */
+ ret = __ov8858_set_exposure(sd, dev->exposure, dev->gain,
+ dev->digital_gain, &dev->pixels_per_line,
+ &dev->lines_per_frame);
+ if (ret)
+ goto out;
+
+ ret = ov8858_get_intg_factor(sd, ov8858_info, dev->regs);
+
+out:
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int ov8858_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+
+ if (format->pad)
+ return -EINVAL;
+ if (!fmt)
+ return -EINVAL;
+
+ mutex_lock(&dev->input_lock);
+ fmt->width = dev->curr_res_table[dev->fmt_idx].width;
+ fmt->height = dev->curr_res_table[dev->fmt_idx].height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int ov8858_detect(struct i2c_client *client, u16 *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ u16 id_hi = 0;
+ u16 id_low = 0;
+ int ret;
+
+ /* i2c check */
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ dev_dbg(&client->dev, "%s: I2C functionality ok\n", __func__);
+ ret = ov8858_read_reg(client, OV8858_8BIT, OV8858_CHIP_ID_HIGH, &id_hi);
+ if (ret)
+ return ret;
+ dev_dbg(&client->dev, "%s: id_high = 0x%04x\n", __func__, id_hi);
+ ret = ov8858_read_reg(client, OV8858_8BIT, OV8858_CHIP_ID_LOW, &id_low);
+ if (ret)
+ return ret;
+ dev_dbg(&client->dev, "%s: id_low = 0x%04x\n", __func__, id_low);
+ *id = (id_hi << 8) | id_low;
+
+ dev_dbg(&client->dev, "%s: chip_id = 0x%04x\n", __func__, *id);
+
+ dev_info(&client->dev, "%s: chip_id = 0x%04x\n", __func__, *id);
+ if (*id != OV8858_CHIP_ID)
+ return -ENODEV;
+
+ /* Stream off now. */
+ return ov8858_write_reg(client, OV8858_8BIT, OV8858_STREAM_MODE, 0);
+}
+
+static void __ov8858_print_timing(struct v4l2_subdev *sd)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 width = dev->curr_res_table[dev->fmt_idx].width;
+ u16 height = dev->curr_res_table[dev->fmt_idx].height;
+
+ dev_dbg(&client->dev, "Dump ov8858 timing in stream on:\n");
+ dev_dbg(&client->dev, "width: %d:\n", width);
+ dev_dbg(&client->dev, "height: %d:\n", height);
+ dev_dbg(&client->dev, "pixels_per_line: %d:\n", dev->pixels_per_line);
+ dev_dbg(&client->dev, "line per frame: %d:\n", dev->lines_per_frame);
+ dev_dbg(&client->dev, "pix freq: %d:\n", dev->vt_pix_clk_freq_mhz);
+ /* updated formula: pixels_per_line = 2 * HTS */
+ /* updated formula: fps = SCLK / (VTS * HTS) */
+ dev_dbg(&client->dev, "init fps: %d:\n", dev->vt_pix_clk_freq_mhz /
+ (dev->pixels_per_line / 2) / dev->lines_per_frame);
+ dev_dbg(&client->dev, "HBlank: %d nS:\n",
+ 1000 * (dev->pixels_per_line - width) /
+ (dev->vt_pix_clk_freq_mhz / 1000000));
+ dev_dbg(&client->dev, "VBlank: %d uS:\n",
+ (dev->lines_per_frame - height) * dev->pixels_per_line /
+ (dev->vt_pix_clk_freq_mhz / 1000000));
+}
+
+/*
+ * ov8858 stream on/off
+ */
+static int ov8858_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u16 val;
+ dev_dbg(&client->dev, "%s: enable = %d\n", __func__, enable);
+
+ /* Set orientation */
+ ret = ov8858_read_reg(client, OV8858_8BIT, OV8858_FORMAT2, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_8BIT, OV8858_FORMAT2,
+ dev->hflip ? val | OV8858_FLIP_ENABLE :
+ val & ~OV8858_FLIP_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = ov8858_read_reg(client, OV8858_8BIT, OV8858_FORMAT1, &val);
+ if (ret)
+ return ret;
+
+ ret = ov8858_write_reg(client, OV8858_8BIT, OV8858_FORMAT1,
+ dev->vflip ? val | OV8858_FLIP_ENABLE :
+ val & ~OV8858_FLIP_ENABLE);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->input_lock);
+ if (enable) {
+ __ov8858_print_timing(sd);
+ ret = ov8858_write_reg_array(client, ov8858_streaming);
+ if (ret != 0) {
+ dev_err(&client->dev, "write_reg_array err\n");
+ goto out;
+ }
+ dev->streaming = 1;
+ } else {
+ ret = ov8858_write_reg_array(client, ov8858_soft_standby);
+ if (ret != 0) {
+ dev_err(&client->dev, "write_reg_array err\n");
+ goto out;
+ }
+ dev->streaming = 0;
+ dev->fps_index = 0;
+ dev->fps = 0;
+ }
+out:
+ mutex_unlock(&dev->input_lock);
+ return ret;
+}
+
+static int __update_ov8858_device_settings(struct ov8858_device *dev,
+ u16 sensor_id)
+{
+ if (sensor_id == OV8858_CHIP_ID)
+#ifdef CONFIG_PLATFORM_BTNS
+ dev->vcm_driver = &ov8858_vcms[OV8858_ID_DEFAULT];
+#else
+ dev->vcm_driver = &ov8858_vcms[OV8858_SUNNY];
+#endif
+ else
+ return -ENODEV;
+
+ if (dev->vcm_driver && dev->vcm_driver->init)
+ return dev->vcm_driver->init(&dev->sd);
+
+ return 0;
+}
+
+static int ov8858_s_config(struct v4l2_subdev *sd,
+ int irq, void *pdata)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u16 sensor_id;
+ int ret;
+
+ if (pdata == NULL)
+ return -ENODEV;
+
+ dev->platform_data = pdata;
+
+ mutex_lock(&dev->input_lock);
+
+ if (dev->platform_data->platform_init) {
+ ret = dev->platform_data->platform_init(client);
+ if (ret) {
+ mutex_unlock(&dev->input_lock);
+ dev_err(&client->dev, "platform init error %d!\n", ret);
+ return ret;
+ }
+ }
+
+ ret = __ov8858_s_power(sd, 1);
+ if (ret) {
+ dev_err(&client->dev, "power-up error %d!\n", ret);
+ mutex_unlock(&dev->input_lock);
+ return ret;
+ }
+
+ ret = dev->platform_data->csi_cfg(sd, 1);
+ if (ret)
+ goto fail_csi_cfg;
+
+ /* config & detect sensor */
+ ret = ov8858_detect(client, &sensor_id);
+ if (ret) {
+ dev_err(&client->dev, "detect error %d!\n", ret);
+ goto fail_detect;
+ }
+
+ dev->sensor_id = sensor_id;
+
+ /* power off sensor */
+ ret = __ov8858_s_power(sd, 0);
+ if (ret) {
+ dev->platform_data->csi_cfg(sd, 0);
+ dev_err(&client->dev, "__ov8858_s_power-down error %d!\n", ret);
+ goto fail_update;
+ }
+
+ /* Resolution settings depend on sensor type and platform */
+ ret = __update_ov8858_device_settings(dev, dev->sensor_id);
+ if (ret) {
+ dev->platform_data->csi_cfg(sd, 0);
+ dev_err(&client->dev, "__update_ov8858_device_settings error %d!\n", ret);
+ goto fail_update;
+ }
+
+ mutex_unlock(&dev->input_lock);
+ return ret;
+
+fail_detect:
+ dev->platform_data->csi_cfg(sd, 0);
+fail_csi_cfg:
+ __ov8858_s_power(sd, 0);
+fail_update:
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+ mutex_unlock(&dev->input_lock);
+ dev_err(&client->dev, "sensor power-gating failed\n");
+ return ret;
+}
+
+static int
+ov8858_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int
+ov8858_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ int index = fse->index;
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ if (index >= dev->entries_curr_table) {
+ mutex_unlock(&dev->input_lock);
+ return -EINVAL;
+ }
+
+ fse->min_width = dev->curr_res_table[index].width;
+ fse->min_height = dev->curr_res_table[index].height;
+ fse->max_width = dev->curr_res_table[index].width;
+ fse->max_height = dev->curr_res_table[index].height;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static int ov8858_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov8858_device *dev = container_of(
+ ctrl->handler, struct ov8858_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+
+ /* input_lock is taken by the control framework, so it
+ * doesn't need to be taken here.
+ */
+
+ switch (ctrl->id) {
+ case V4L2_CID_RUN_MODE:
+ switch (ctrl->val) {
+ case ATOMISP_RUN_MODE_VIDEO:
+ dev->curr_res_table = ov8858_res_video;
+ dev->entries_curr_table = ARRAY_SIZE(ov8858_res_video);
+ break;
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ dev->curr_res_table = ov8858_res_still;
+ dev->entries_curr_table = ARRAY_SIZE(ov8858_res_still);
+ break;
+ default:
+ dev->curr_res_table = ov8858_res_preview;
+ dev->entries_curr_table =
+ ARRAY_SIZE(ov8858_res_preview);
+ }
+
+ dev->fmt_idx = 0;
+ dev->fps_index = 0;
+
+ return 0;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ if (dev->vcm_driver && dev->vcm_driver->t_focus_abs)
+ return dev->vcm_driver->t_focus_abs(&dev->sd,
+ ctrl->val);
+ return 0;
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ if (ctrl->val == V4L2_EXPOSURE_AUTO)
+ dev->limit_exposure_flag = false;
+ else if (ctrl->val == V4L2_EXPOSURE_APERTURE_PRIORITY)
+ dev->limit_exposure_flag = true;
+ return 0;
+ case V4L2_CID_HFLIP:
+ dev->hflip = ctrl->val;
+ return 0;
+ case V4L2_CID_VFLIP:
+ dev->vflip = ctrl->val;
+ return 0;
+ default:
+ dev_err(&client->dev, "%s: Error: Invalid ctrl: 0x%X\n",
+ __func__, ctrl->id);
+ return -EINVAL;
+ }
+}
+
+static int ov8858_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov8858_device *dev = container_of(
+ ctrl->handler, struct ov8858_device, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
+ int r_odd, r_even;
+ int i = dev->fmt_idx;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_STATUS:
+ if (dev->vcm_driver && dev->vcm_driver->q_focus_status)
+ return dev->vcm_driver->q_focus_status(&dev->sd,
+ &(ctrl->val));
+ return 0;
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ r_odd = ov8858_get_register_8bit(&dev->sd, OV8858_H_INC_ODD,
+ dev->curr_res_table[i].regs);
+ if (r_odd < 0)
+ return r_odd;
+ r_even = ov8858_get_register_8bit(&dev->sd, OV8858_H_INC_EVEN,
+ dev->curr_res_table[i].regs);
+ if (r_even < 0)
+ return r_even;
+ ctrl->val = fls(r_odd + (r_even)) - 2;
+ return 0;
+
+ case V4L2_CID_BIN_FACTOR_VERT:
+ r_odd = ov8858_get_register_8bit(&dev->sd, OV8858_V_INC_ODD,
+ dev->curr_res_table[i].regs);
+ if (r_odd < 0)
+ return r_odd;
+ r_even = ov8858_get_register_8bit(&dev->sd, OV8858_V_INC_EVEN,
+ dev->curr_res_table[i].regs);
+ if (r_even < 0)
+ return r_even;
+ ctrl->val = fls(r_odd + (r_even)) - 2;
+ return 0;
+ case V4L2_CID_HFLIP:
+ ctrl->val = dev->hflip;
+ break;
+ case V4L2_CID_VFLIP:
+ ctrl->val = dev->vflip;
+ break;
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ ctrl->val = dev->exposure;
+ break;
+ default:
+ dev_warn(&client->dev,
+ "%s: Error: Invalid ctrl: 0x%X\n", __func__, ctrl->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ov8858_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ const struct ov8858_resolution *res =
+ &dev->curr_res_table[dev->fmt_idx];
+
+ mutex_lock(&dev->input_lock);
+ interval->interval.denominator = res->fps_options[dev->fps_index].fps;
+ interval->interval.numerator = 1;
+ mutex_unlock(&dev->input_lock);
+ return 0;
+}
+
+static int __ov8858_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct ov8858_resolution *res =
+ &dev->curr_res_table[dev->fmt_idx];
+ struct camera_mipi_info *info = NULL;
+ unsigned int fps_index;
+ int ret = 0;
+ int fps;
+
+ info = v4l2_get_subdev_hostdata(sd);
+ if (info == NULL)
+ return -EINVAL;
+
+ if (!interval->interval.numerator)
+ interval->interval.numerator = 1;
+
+ fps = interval->interval.denominator / interval->interval.numerator;
+
+ /* No need to proceed further if we are not streaming */
+ if (!dev->streaming) {
+ /* Save the new FPS and use it while selecting setting */
+ dev->fps = fps;
+ return 0;
+ }
+
+ /* Ignore if we are already using the required FPS. */
+ if (fps == res->fps_options[dev->fps_index].fps)
+ return 0;
+
+ fps_index = __ov8858_nearest_fps_index(fps, res->fps_options);
+
+ if (res->fps_options[fps_index].regs &&
+ res->fps_options[fps_index].regs != dev->regs) {
+ dev_err(&client->dev,
+ "Sensor is streaming, can't apply new configuration\n");
+ return -EBUSY;
+ }
+
+ dev->fps_index = fps_index;
+ dev->fps = res->fps_options[dev->fps_index].fps;
+
+ /* Update the new frametimings based on FPS */
+ dev->pixels_per_line =
+ res->fps_options[dev->fps_index].pixels_per_line;
+ dev->lines_per_frame =
+ res->fps_options[dev->fps_index].lines_per_frame;
+
+ /* update frametiming. Conside the curren exposure/gain as well */
+ ret = __ov8858_update_frame_timing(sd,
+ &dev->pixels_per_line, &dev->lines_per_frame);
+ if (ret)
+ return ret;
+
+ /* Update the new values so that user side knows the current settings */
+ ret = ov8858_get_intg_factor(sd, info, dev->regs);
+ if (ret)
+ return ret;
+
+ interval->interval.denominator = res->fps_options[dev->fps_index].fps;
+ interval->interval.numerator = 1;
+ __ov8858_print_timing(sd);
+
+ return ret;
+}
+
+static int ov8858_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ int ret;
+
+ mutex_lock(&dev->input_lock);
+ ret = __ov8858_s_frame_interval(sd, interval);
+ mutex_unlock(&dev->input_lock);
+
+ return ret;
+}
+
+static int ov8858_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+
+ mutex_lock(&dev->input_lock);
+ *frames = dev->curr_res_table[dev->fmt_idx].skip_frames;
+ mutex_unlock(&dev->input_lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_sensor_ops ov8858_sensor_ops = {
+ .g_skip_frames = ov8858_g_skip_frames,
+};
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = ov8858_s_ctrl,
+ .g_volatile_ctrl = ov8858_g_ctrl,
+};
+
+static const struct v4l2_subdev_video_ops ov8858_video_ops = {
+ .s_stream = ov8858_s_stream,
+ .g_frame_interval = ov8858_g_frame_interval,
+ .s_frame_interval = ov8858_s_frame_interval,
+};
+
+static const struct v4l2_subdev_core_ops ov8858_core_ops = {
+ .s_power = ov8858_s_power,
+ .ioctl = ov8858_ioctl,
+ .init = ov8858_init,
+};
+
+static const struct v4l2_subdev_pad_ops ov8858_pad_ops = {
+ .enum_mbus_code = ov8858_enum_mbus_code,
+ .enum_frame_size = ov8858_enum_frame_size,
+ .get_fmt = ov8858_get_fmt,
+ .set_fmt = ov8858_set_fmt,
+};
+
+static const struct v4l2_subdev_ops ov8858_ops = {
+ .core = &ov8858_core_ops,
+ .video = &ov8858_video_ops,
+ .pad = &ov8858_pad_ops,
+ .sensor = &ov8858_sensor_ops,
+};
+
+static const struct media_entity_operations ov_entity_ops = {
+ .link_setup = NULL,
+};
+
+static int ov8858_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov8858_device *dev = to_ov8858_sensor(sd);
+ if (dev->platform_data->platform_deinit)
+ dev->platform_data->platform_deinit();
+
+ media_entity_cleanup(&dev->sd.entity);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ dev->platform_data->csi_cfg(sd, 0);
+ v4l2_device_unregister_subdev(sd);
+ kfree(dev);
+
+ return 0;
+}
+
+static const char * const ctrl_run_mode_menu[] = {
+ NULL,
+ "Video",
+ "Still capture",
+ "Continuous capture",
+ "Preview",
+};
+
+static const struct v4l2_ctrl_config ctrl_run_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_RUN_MODE,
+ .name = "run mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .def = 4,
+ .max = 4,
+ .qmenu = ctrl_run_mode_menu,
+};
+
+static const struct v4l2_ctrl_config ctrls[] = {
+ {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_VFLIP,
+ .name = "Vertical flip",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_HFLIP,
+ .name = "Horizontal flip",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = false,
+ .max = true,
+ .step = 1,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_ABSOLUTE,
+ .name = "Absolute exposure",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .max = 0xffff,
+ .min = 0x0,
+ .step = 1,
+ .def = 0x00,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_ABSOLUTE,
+ .name = "Focus absolute",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .step = 1,
+ .max = OV8858_MAX_FOCUS_POS,
+ }, {
+ /* This one is junk: see the spec for proper use of this CID. */
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCUS_STATUS,
+ .name = "Focus status",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .step = 1,
+ .max = 100,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE,
+ }, {
+ /* This is crap. For compatibility use only. */
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FOCAL_ABSOLUTE,
+ .name = "Focal lenght",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = (OV8858_FOCAL_LENGTH_NUM << 16) |
+ OV8858_FOCAL_LENGTH_DEM,
+ .max = (OV8858_FOCAL_LENGTH_NUM << 16) |
+ OV8858_FOCAL_LENGTH_DEM,
+ .step = 1,
+ .def = (OV8858_FOCAL_LENGTH_NUM << 16) |
+ OV8858_FOCAL_LENGTH_DEM,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ /* This one is crap, too. For compatibility use only. */
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_ABSOLUTE,
+ .name = "F-number",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = (OV8858_F_NUMBER_DEFAULT_NUM << 16) |
+ OV8858_F_NUMBER_DEM,
+ .max = (OV8858_F_NUMBER_DEFAULT_NUM << 16) |
+ OV8858_F_NUMBER_DEM,
+ .step = 1,
+ .def = (OV8858_F_NUMBER_DEFAULT_NUM << 16) |
+ OV8858_F_NUMBER_DEM,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ /*
+ * The most utter crap. _Never_ use this, even for
+ * compatibility reasons!
+ */
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FNUMBER_RANGE,
+ .name = "F-number range",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = (OV8858_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV8858_F_NUMBER_DEM << 16) |
+ (OV8858_F_NUMBER_DEFAULT_NUM << 8) |
+ OV8858_F_NUMBER_DEM,
+ .max = (OV8858_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV8858_F_NUMBER_DEM << 16) |
+ (OV8858_F_NUMBER_DEFAULT_NUM << 8) |
+ OV8858_F_NUMBER_DEM,
+ .step = 1,
+ .def = (OV8858_F_NUMBER_DEFAULT_NUM << 24) |
+ (OV8858_F_NUMBER_DEM << 16) |
+ (OV8858_F_NUMBER_DEFAULT_NUM << 8) |
+ OV8858_F_NUMBER_DEM,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .name = "Horizontal binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .max = OV8858_BIN_FACTOR_MAX,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .name = "Vertical binning factor",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .max = OV8858_BIN_FACTOR_MAX,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_VOLATILE,
+ }, {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
+ .name = "Exposure auto priority",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = V4L2_EXPOSURE_AUTO,
+ .max = V4L2_EXPOSURE_APERTURE_PRIORITY,
+ .step = 1,
+ }
+};
+
+static int ov8858_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ov8858_device *dev;
+ unsigned int i;
+ int ret = 0;
+ struct camera_sensor_platform_data *pdata;
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ /* allocate sensor device & init sub device */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&client->dev, "%s: out of memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_init(&dev->input_lock);
+
+ if (id)
+ dev->i2c_id = id->driver_data;
+ dev->fmt_idx = 0;
+ dev->sensor_id = OV_ID_DEFAULT;
+ dev->vcm_driver = &ov8858_vcms[OV8858_ID_DEFAULT];
+
+ v4l2_i2c_subdev_init(&(dev->sd), client, &ov8858_ops);
+
+ if (ACPI_COMPANION(&client->dev)) {
+ pdata = gmin_camera_platform_data(&dev->sd,
+ ATOMISP_INPUT_FORMAT_RAW_10,
+ atomisp_bayer_order_bggr);
+ if (!pdata) {
+ dev_err(&client->dev,
+ "%s: failed to get acpi platform data\n",
+ __func__);
+ goto out_free;
+ }
+ ret = ov8858_s_config(&dev->sd, client->irq, pdata);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: failed to set config\n", __func__);
+ goto out_free;
+ }
+ ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s: failed to register subdev\n", __func__);
+ goto out_free;
+ }
+ }
+ /*
+ * sd->name is updated with sensor driver name by the v4l2.
+ * change it to sensor name in this case.
+ */
+ snprintf(dev->sd.name, sizeof(dev->sd.name), "%s%x %d-%04x",
+ OV_SUBDEV_PREFIX, dev->sensor_id,
+ i2c_adapter_id(client->adapter), client->addr);
+
+ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dev->pad.flags = MEDIA_PAD_FL_SOURCE;
+ dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ dev->sd.entity.ops = &ov_entity_ops;
+ dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ctrls) + 1);
+ if (ret) {
+ ov8858_remove(client);
+ return ret;
+ }
+
+ dev->run_mode = v4l2_ctrl_new_custom(&dev->ctrl_handler,
+ &ctrl_run_mode, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(ctrls); i++)
+ v4l2_ctrl_new_custom(&dev->ctrl_handler, &ctrls[i], NULL);
+
+ if (dev->ctrl_handler.error) {
+ ov8858_remove(client);
+ return dev->ctrl_handler.error;
+ }
+
+ /* Use same lock for controls as for everything else. */
+ dev->ctrl_handler.lock = &dev->input_lock;
+ dev->sd.ctrl_handler = &dev->ctrl_handler;
+ v4l2_ctrl_handler_setup(&dev->ctrl_handler);
+
+ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
+ if (ret) {
+ ov8858_remove(client);
+ return ret;
+ }
+
+ return 0;
+
+out_free:
+ v4l2_device_unregister_subdev(&dev->sd);
+ kfree(dev);
+ return ret;
+}
+
+static const struct i2c_device_id ov8858_id[] = {
+ {OV8858_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ov8858_id);
+
+static struct acpi_device_id ov8858_acpi_match[] = {
+ {"INT3477"},
+ {},
+};
+
+static struct i2c_driver ov8858_driver = {
+ .driver = {
+ .name = OV8858_NAME,
+ .acpi_match_table = ACPI_PTR(ov8858_acpi_match),
+ },
+ .probe = ov8858_probe,
+ .remove = ov8858_remove,
+ .id_table = ov8858_id,
+};
+
+static __init int ov8858_init_mod(void)
+{
+ return i2c_add_driver(&ov8858_driver);
+}
+
+static __exit void ov8858_exit_mod(void)
+{
+ i2c_del_driver(&ov8858_driver);
+}
+
+module_init(ov8858_init_mod);
+module_exit(ov8858_exit_mod);
+
+MODULE_DESCRIPTION("A low-level driver for Omnivision OV8858 sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
new file mode 100644
index 000000000000..9be6a0e63861
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -0,0 +1,1482 @@
+/*
+ * Support for the Omnivision OV8858 camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __OV8858_H__
+#define __OV8858_H__
+#include "../include/linux/atomisp_platform.h"
+#include <media/v4l2-ctrls.h>
+
+#define I2C_MSG_LENGTH 0x2
+
+/*
+ * This should be added into include/linux/videodev2.h
+ * NOTE: This is most likely not used anywhere.
+ */
+#define V4L2_IDENT_OV8858 V4L2_IDENT_UNKNOWN
+
+/*
+ * Indexes for VCM driver lists
+ */
+#define OV8858_ID_DEFAULT 0
+#define OV8858_SUNNY 1
+
+#define OV8858_OTP_START_ADDR 0x7010
+#define OV8858_OTP_END_ADDR 0x7186
+
+/*
+ * ov8858 System control registers
+ */
+
+#define OV8858_OTP_LOAD_CTRL 0x3D81
+#define OV8858_OTP_MODE_CTRL 0x3D84
+#define OV8858_OTP_START_ADDR_REG 0x3D88
+#define OV8858_OTP_END_ADDR_REG 0x3D8A
+#define OV8858_OTP_ISP_CTRL2 0x5002
+
+#define OV8858_OTP_MODE_MANUAL BIT(6)
+#define OV8858_OTP_MODE_PROGRAM_DISABLE BIT(7)
+#define OV8858_OTP_LOAD_ENABLE BIT(0)
+#define OV8858_OTP_DPC_ENABLE BIT(3)
+
+#define OV8858_PLL1_PREDIV0 0x030A
+#define OV8858_PLL1_PREDIV 0x0300
+#define OV8858_PLL1_MULTIPLIER 0x0301
+#define OV8858_PLL1_SYS_PRE_DIV 0x0305
+#define OV8858_PLL1_SYS_DIVIDER 0x0306
+
+#define OV8858_PLL1_PREDIV0_MASK BIT(0)
+#define OV8858_PLL1_PREDIV_MASK (BIT(0) | BIT(1) | BIT(2))
+#define OV8858_PLL1_MULTIPLIER_MASK 0x01FF
+#define OV8858_PLL1_SYS_PRE_DIV_MASK (BIT(0) | BIT(1))
+#define OV8858_PLL1_SYS_DIVIDER_MASK BIT(0)
+
+#define OV8858_PLL2_PREDIV0 0x0312
+#define OV8858_PLL2_PREDIV 0x030B
+#define OV8858_PLL2_MULTIPLIER 0x030C
+#define OV8858_PLL2_DAC_DIVIDER 0x0312
+#define OV8858_PLL2_SYS_PRE_DIV 0x030F
+#define OV8858_PLL2_SYS_DIVIDER 0x030E
+
+#define OV8858_PLL2_PREDIV0_MASK BIT(4)
+#define OV8858_PLL2_PREDIV_MASK (BIT(0) | BIT(1) | BIT(2))
+#define OV8858_PLL2_MULTIPLIER_MASK 0x01FF
+#define OV8858_PLL2_DAC_DIVIDER_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define OV8858_PLL2_SYS_PRE_DIV_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define OV8858_PLL2_SYS_DIVIDER_MASK (BIT(0) | BIT(1) | BIT(2))
+
+#define OV8858_PLL_SCLKSEL1 0x3032
+#define OV8858_PLL_SCLKSEL2 0x3033
+#define OV8858_SRB_HOST_INPUT_DIS 0x3106
+
+#define OV8858_PLL_SCLKSEL1_MASK BIT(7)
+#define OV8858_PLL_SCLKSEL2_MASK BIT(1)
+
+#define OV8858_SYS_PRE_DIV_OFFSET 2
+#define OV8858_SYS_PRE_DIV_MASK (BIT(2) | BIT(3))
+#define OV8858_SCLK_PDIV_OFFSET 4
+#define OV8858_SCLK_PDIV_MASK (BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+#define OV8858_TIMING_HTS 0x380C
+#define OV8858_TIMING_VTS 0x380E
+
+#define OV8858_HORIZONTAL_START_H 0x3800
+#define OV8858_VERTICAL_START_H 0x3802
+#define OV8858_HORIZONTAL_END_H 0x3804
+#define OV8858_VERTICAL_END_H 0x3806
+#define OV8858_HORIZONTAL_OUTPUT_SIZE_H 0x3808
+#define OV8858_VERTICAL_OUTPUT_SIZE_H 0x380A
+
+#define OV8858_GROUP_ACCESS 0x3208
+#define OV8858_GROUP_ZERO 0x00
+#define OV8858_GROUP_ACCESS_HOLD_START 0x00
+#define OV8858_GROUP_ACCESS_HOLD_END 0x10
+#define OV8858_GROUP_ACCESS_DELAY_LAUNCH 0xA0
+#define OV8858_GROUP_ACCESS_QUICK_LAUNCH 0xE0
+
+#define OV_SUBDEV_PREFIX "ov"
+#define OV_ID_DEFAULT 0x0000
+#define OV8858_NAME "ov8858"
+#define OV8858_CHIP_ID 0x8858
+
+#define OV8858_LONG_EXPO 0x3500
+#define OV8858_LONG_GAIN 0x3508
+#define OV8858_LONG_DIGI_GAIN 0x350A
+#define OV8858_SHORT_GAIN 0x350C
+#define OV8858_SHORT_DIGI_GAIN 0x350E
+
+#define OV8858_FORMAT1 0x3820
+#define OV8858_FORMAT2 0x3821
+
+#define OV8858_FLIP_ENABLE 0x06
+
+#define OV8858_MWB_RED_GAIN_H 0x5032
+#define OV8858_MWB_GREEN_GAIN_H 0x5034
+#define OV8858_MWB_BLUE_GAIN_H 0x5036
+#define OV8858_MWB_GAIN_MAX 0x0FFF
+
+#define OV8858_CHIP_ID_HIGH 0x300B
+#define OV8858_CHIP_ID_LOW 0x300C
+#define OV8858_STREAM_MODE 0x0100
+
+#define OV8858_FOCAL_LENGTH_NUM 294 /* 2.94mm */
+#define OV8858_FOCAL_LENGTH_DEM 100
+#define OV8858_F_NUMBER_DEFAULT_NUM 24 /* 2.4 */
+#define OV8858_F_NUMBER_DEM 10
+
+#define OV8858_H_INC_ODD 0x3814
+#define OV8858_H_INC_EVEN 0x3815
+#define OV8858_V_INC_ODD 0x382A
+#define OV8858_V_INC_EVEN 0x382B
+
+#define OV8858_READ_MODE_BINNING_ON 0x0400 /* ToDo: Check this */
+#define OV8858_READ_MODE_BINNING_OFF 0x00 /* ToDo: Check this */
+#define OV8858_BIN_FACTOR_MAX 2
+#define OV8858_INTEGRATION_TIME_MARGIN 14
+
+#define OV8858_MAX_VTS_VALUE 0xFFFF
+#define OV8858_MAX_EXPOSURE_VALUE \
+ (OV8858_MAX_VTS_VALUE - OV8858_INTEGRATION_TIME_MARGIN)
+#define OV8858_MAX_GAIN_VALUE 0x07FF
+
+#define OV8858_MAX_FOCUS_POS 1023
+
+#define OV8858_TEST_PATTERN_REG 0x5E00
+
+struct ov8858_vcm {
+ int (*power_up)(struct v4l2_subdev *sd);
+ int (*power_down)(struct v4l2_subdev *sd);
+ int (*init)(struct v4l2_subdev *sd);
+ int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val);
+ int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
+ int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
+ int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
+ int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
+ int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
+ int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
+};
+
+/*
+ * Defines for register writes and register array processing
+ * */
+#define OV8858_BYTE_MAX 32
+#define OV8858_SHORT_MAX 16
+#define OV8858_TOK_MASK 0xFFF0
+
+#define MAX_FPS_OPTIONS_SUPPORTED 3
+
+#define OV8858_DEPTH_COMP_CONST 2200
+#define OV8858_DEPTH_VTS_CONST 2573
+
+enum ov8858_tok_type {
+ OV8858_8BIT = 0x0001,
+ OV8858_16BIT = 0x0002,
+ OV8858_TOK_TERM = 0xF000, /* terminating token for reg list */
+ OV8858_TOK_DELAY = 0xFE00 /* delay token for reg list */
+};
+
+/*
+ * If register address or register width is not 32 bit width,
+ * user needs to convert it manually
+ */
+struct s_register_setting {
+ u32 reg;
+ u32 val;
+};
+
+/**
+ * struct ov8858_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov8858_reg {
+ enum ov8858_tok_type type;
+ u16 sreg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+struct ov8858_fps_setting {
+ int fps;
+ unsigned short pixels_per_line;
+ unsigned short lines_per_frame;
+ const struct ov8858_reg *regs; /* regs that the fps setting needs */
+};
+
+struct ov8858_resolution {
+ u8 *desc;
+ const struct ov8858_reg *regs;
+ int res;
+ int width;
+ int height;
+ bool used;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ unsigned short skip_frames;
+ const struct ov8858_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
+};
+
+/*
+ * ov8858 device structure
+ * */
+struct ov8858_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct camera_sensor_platform_data *platform_data;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ int fmt_idx;
+ int streaming;
+ int vt_pix_clk_freq_mhz;
+ int fps_index;
+ u16 sensor_id; /* Sensor id from registers */
+ u16 i2c_id; /* Sensor id from i2c_device_id */
+ int exposure;
+ int gain;
+ u16 digital_gain;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 fps;
+ u8 *otp_data;
+ /* Prevent the framerate from being lowered in low light scenes. */
+ int limit_exposure_flag;
+ bool hflip;
+ bool vflip;
+
+ const struct ov8858_reg *regs;
+ struct ov8858_vcm *vcm_driver;
+ const struct ov8858_resolution *curr_res_table;
+ int entries_curr_table;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *run_mode;
+};
+
+#define to_ov8858_sensor(x) container_of(x, struct ov8858_device, sd)
+
+#define OV8858_MAX_WRITE_BUF_SIZE 32
+struct ov8858_write_buffer {
+ u16 addr;
+ u8 data[OV8858_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov8858_write_ctrl {
+ int index;
+ struct ov8858_write_buffer buffer;
+};
+
+static const struct ov8858_reg ov8858_soft_standby[] = {
+ {OV8858_8BIT, 0x0100, 0x00},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_streaming[] = {
+ {OV8858_8BIT, 0x0100, 0x01},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_param_hold[] = {
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_HOLD_START},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_param_update[] = {
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_HOLD_END},
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_DELAY_LAUNCH},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
+extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
+extern int dw9718_vcm_init(struct v4l2_subdev *sd);
+extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int vcm_power_up(struct v4l2_subdev *sd);
+extern int vcm_power_down(struct v4l2_subdev *sd);
+
+static struct ov8858_vcm ov8858_vcms[] = {
+ [OV8858_SUNNY] = {
+ .power_up = dw9718_vcm_power_up,
+ .power_down = dw9718_vcm_power_down,
+ .init = dw9718_vcm_init,
+ .t_focus_vcm = dw9718_t_focus_vcm,
+ .t_focus_abs = dw9718_t_focus_abs,
+ .t_focus_rel = dw9718_t_focus_rel,
+ .q_focus_status = dw9718_q_focus_status,
+ .q_focus_abs = dw9718_q_focus_abs,
+ .t_vcm_slew = dw9718_t_vcm_slew,
+ .t_vcm_timing = dw9718_t_vcm_timing,
+ },
+ [OV8858_ID_DEFAULT] = {
+ .power_up = NULL,
+ .power_down = NULL,
+ },
+};
+
+
+#define OV8858_RES_WIDTH_MAX 3280
+#define OV8858_RES_HEIGHT_MAX 2464
+
+static struct ov8858_reg ov8858_BasicSettings[] = {
+ {OV8858_8BIT, 0x0103, 0x01}, /* software_reset */
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ /* PLL settings */
+ {OV8858_8BIT, 0x0300, 0x05}, /* pll1_pre_div = /4 */
+ {OV8858_8BIT, 0x0302, 0xAF}, /* pll1_multiplier = 175 */
+ {OV8858_8BIT, 0x0303, 0x00}, /* pll1_divm = /(1 + 0) */
+ {OV8858_8BIT, 0x0304, 0x03}, /* pll1_div_mipi = /8 */
+ {OV8858_8BIT, 0x030B, 0x02}, /* pll2_pre_div = /2 */
+ {OV8858_8BIT, 0x030D, 0x4E}, /* pll2_r_divp = 78 */
+ {OV8858_8BIT, 0x030E, 0x00}, /* pll2_r_divs = /1 */
+ {OV8858_8BIT, 0x030F, 0x04}, /* pll2_r_divsp = /(1 + 4) */
+ /* pll2_pre_div0 = /1, pll2_r_divdac = /(1 + 1) */
+ {OV8858_8BIT, 0x0312, 0x01},
+ {OV8858_8BIT, 0x031E, 0x0C}, /* pll1_no_lat = 1, mipi_bitsel_man = 0 */
+
+ /* PAD OEN2, VSYNC out enable=0x80, disable=0x00 */
+ {OV8858_8BIT, 0x3002, 0x80},
+ /* PAD OUT2, VSYNC pulse direction low-to-high = 1 */
+ {OV8858_8BIT, 0x3007, 0x01},
+ /* PAD SEL2, VSYNC out value = 0 */
+ {OV8858_8BIT, 0x300D, 0x00},
+ /* PAD OUT2, VSYNC out select = 0 */
+ {OV8858_8BIT, 0x3010, 0x00},
+
+ /* Npump clock div = /2, Ppump clock div = /4 */
+ {OV8858_8BIT, 0x3015, 0x01},
+ /*
+ * mipi_lane_mode = 1+3, mipi_lvds_sel = 1 = MIPI enable,
+ * r_phy_pd_mipi_man = 0, lane_dis_option = 0
+ */
+ {OV8858_8BIT, 0x3018, 0x72},
+ /* Clock switch output = normal, pclk_div = /1 */
+ {OV8858_8BIT, 0x3020, 0x93},
+ /*
+ * lvds_mode_o = 0, clock lane disable when pd_mipi = 0,
+ * pd_mipi enable when rst_sync = 1
+ */
+ {OV8858_8BIT, 0x3022, 0x01},
+ {OV8858_8BIT, 0x3031, 0x0A}, /* mipi_bit_sel = 10 */
+ {OV8858_8BIT, 0x3034, 0x00}, /* Unknown */
+ /* sclk_div = /1, sclk_pre_div = /1, chip debug = 1 */
+ {OV8858_8BIT, 0x3106, 0x01},
+
+ {OV8858_8BIT, 0x3305, 0xF1}, /* Unknown */
+ {OV8858_8BIT, 0x3307, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x3308, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3309, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x330A, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330B, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x330C, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330F, 0x40}, /* Unknown */
+
+ {OV8858_8BIT, 0x3500, 0x00}, /* long exposure = 0x9A20 */
+ {OV8858_8BIT, 0x3501, 0x9A}, /* long exposure = 0x9A20 */
+ {OV8858_8BIT, 0x3502, 0x20}, /* long exposure = 0x9A20 */
+ /*
+ * Digital fraction gain delay option = Delay 1 frame,
+ * Gain change delay option = Delay 1 frame,
+ * Gain delay option = Delay 1 frame,
+ * Gain manual as sensor gain = Input gain as real gain format,
+ * Exposure delay option (must be 0 = Delay 1 frame,
+ * Exposure change delay option (must be 0) = Delay 1 frame
+ */
+ {OV8858_8BIT, 0x3503, 0x00},
+ {OV8858_8BIT, 0x3505, 0x80}, /* gain conversation option */
+ /*
+ * [10:7] are integer gain, [6:0] are fraction gain. For example:
+ * 0x80 is 1x gain, 0x100 is 2x gain, 0x1C0 is 3.5x gain
+ */
+ {OV8858_8BIT, 0x3508, 0x02}, /* long gain = 0x0200 */
+ {OV8858_8BIT, 0x3509, 0x00}, /* long gain = 0x0200 */
+ {OV8858_8BIT, 0x350C, 0x00}, /* short gain = 0x0080 */
+ {OV8858_8BIT, 0x350D, 0x80}, /* short gain = 0x0080 */
+ {OV8858_8BIT, 0x3510, 0x00}, /* short exposure = 0x000200 */
+ {OV8858_8BIT, 0x3511, 0x02}, /* short exposure = 0x000200 */
+ {OV8858_8BIT, 0x3512, 0x00}, /* short exposure = 0x000200 */
+
+ {OV8858_8BIT, 0x3600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3601, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3602, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3603, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3604, 0x22}, /* Unknown */
+ {OV8858_8BIT, 0x3605, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x3606, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3607, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3608, 0x11}, /* Unknown */
+ {OV8858_8BIT, 0x3609, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x360A, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x360B, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x360C, 0xDC}, /* Unknown */
+ {OV8858_8BIT, 0x360D, 0x40}, /* Unknown */
+ {OV8858_8BIT, 0x360E, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x360F, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3610, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x3611, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3612, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x3613, 0x80}, /* Unknown */
+ {OV8858_8BIT, 0x3614, 0x58}, /* Unknown */
+ {OV8858_8BIT, 0x3615, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3616, 0x4A}, /* Unknown */
+ {OV8858_8BIT, 0x3617, 0x90}, /* Unknown */
+ {OV8858_8BIT, 0x3618, 0x56}, /* Unknown */
+ {OV8858_8BIT, 0x3619, 0x70}, /* Unknown */
+ {OV8858_8BIT, 0x361A, 0x99}, /* Unknown */
+ {OV8858_8BIT, 0x361B, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361C, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x361D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361F, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3633, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3634, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3635, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3636, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3638, 0xFF}, /* Unknown */
+ {OV8858_8BIT, 0x3645, 0x13}, /* Unknown */
+ {OV8858_8BIT, 0x3646, 0x83}, /* Unknown */
+ {OV8858_8BIT, 0x364A, 0x07}, /* Unknown */
+
+ {OV8858_8BIT, 0x3700, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x3701, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x3702, 0x50}, /* Unknown */
+ {OV8858_8BIT, 0x3703, 0x32}, /* Unknown */
+ {OV8858_8BIT, 0x3704, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3705, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3706, 0x6A}, /* Unknown */
+ {OV8858_8BIT, 0x3707, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3708, 0x48}, /* Unknown */
+ {OV8858_8BIT, 0x3709, 0x66}, /* Unknown */
+ {OV8858_8BIT, 0x370A, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x370B, 0x6A}, /* Unknown */
+ {OV8858_8BIT, 0x370C, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x3712, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x3714, 0x24}, /* Unknown */
+ {OV8858_8BIT, 0x3718, 0x14}, /* Unknown */
+ {OV8858_8BIT, 0x3719, 0x31}, /* Unknown */
+ {OV8858_8BIT, 0x371E, 0x31}, /* Unknown */
+ {OV8858_8BIT, 0x371F, 0x7F}, /* Unknown */
+ {OV8858_8BIT, 0x3720, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3721, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3724, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3725, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3726, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3728, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3729, 0x03}, /* Unknown */
+ {OV8858_8BIT, 0x372A, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x372B, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372C, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372D, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372E, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x372F, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3730, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3731, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3732, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3733, 0x10}, /* Unknown */
+ {OV8858_8BIT, 0x3734, 0x40}, /* Unknown */
+ {OV8858_8BIT, 0x3736, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x373A, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x373B, 0x0B}, /* Unknown */
+ {OV8858_8BIT, 0x373C, 0x14}, /* Unknown */
+ {OV8858_8BIT, 0x373E, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3755, 0x10}, /* Unknown */
+ {OV8858_8BIT, 0x3758, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3759, 0x4C}, /* Unknown */
+ {OV8858_8BIT, 0x375A, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x375B, 0x26}, /* Unknown */
+ {OV8858_8BIT, 0x375C, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x375D, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x375E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x375F, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3760, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3761, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3762, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3763, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3766, 0xFF}, /* Unknown */
+ {OV8858_8BIT, 0x3768, 0x22}, /* Unknown */
+ {OV8858_8BIT, 0x3769, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x376A, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x376B, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x376F, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3772, 0x46}, /* Unknown */
+ {OV8858_8BIT, 0x3773, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x3774, 0x2C}, /* Unknown */
+ {OV8858_8BIT, 0x3775, 0x13}, /* Unknown */
+ {OV8858_8BIT, 0x3776, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3777, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x37A0, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37A1, 0x7A}, /* Unknown */
+ {OV8858_8BIT, 0x37A2, 0x7A}, /* Unknown */
+ {OV8858_8BIT, 0x37A3, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A4, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A5, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A6, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A7, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37A8, 0x98}, /* Unknown */
+ {OV8858_8BIT, 0x37A9, 0x98}, /* Unknown */
+ {OV8858_8BIT, 0x37AA, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37AB, 0x5C}, /* Unknown */
+ {OV8858_8BIT, 0x37AC, 0x5C}, /* Unknown */
+ {OV8858_8BIT, 0x37AD, 0x55}, /* Unknown */
+ {OV8858_8BIT, 0x37AE, 0x19}, /* Unknown */
+ {OV8858_8BIT, 0x37AF, 0x19}, /* Unknown */
+ {OV8858_8BIT, 0x37B0, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B1, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B2, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B3, 0x84}, /* Unknown */
+ {OV8858_8BIT, 0x37B4, 0x84}, /* Unknown */
+ {OV8858_8BIT, 0x37B5, 0x66}, /* Unknown */
+ {OV8858_8BIT, 0x37B6, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B7, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B8, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B9, 0xFF}, /* Unknown */
+
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high */
+ {OV8858_8BIT, 0x3809, 0xC0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x09}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x90}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3810, 0x00}, /* h_win offset high */
+ {OV8858_8BIT, 0x3811, 0x04}, /* h_win offset low */
+ {OV8858_8BIT, 0x3812, 0x00}, /* v_win offset high */
+ {OV8858_8BIT, 0x3813, 0x02}, /* v_win offset low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3837, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x3841, 0xFF}, /* AUTO_SIZE_CTRL */
+ {OV8858_8BIT, 0x3846, 0x48}, /* Unknown */
+
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3D8C, 0x73}, /* OTP_SETTING_STT_ADDRESS */
+ {OV8858_8BIT, 0x3D8D, 0xDE}, /* OTP_SETTING_STT_ADDRESS */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x3F0A, 0x80}, /* PSRAM control register */
+
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x0B}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4023, 0xC3}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4024, 0x0C}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4025, 0x36}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4026, 0x0C}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4027, 0x37}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4300, 0xFF}, /* clip_max[11:4] = 0xFFF */
+ {OV8858_8BIT, 0x4301, 0x00}, /* clip_min[11:4] = 0 */
+ {OV8858_8BIT, 0x4302, 0x0F}, /* clip_min/max[3:0] */
+ {OV8858_8BIT, 0x4307, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4316, 0x00}, /* CTRL16 = default */
+ {OV8858_8BIT, 0x4503, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x4500, 0x38}, /* Unknown */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ /* wkup_dly = Mark1 wakeup delay/2^10 = 0x25 */
+ {OV8858_8BIT, 0x4808, 0x25},
+ {OV8858_8BIT, 0x4816, 0x52}, /* Embedded data type*/
+ {OV8858_8BIT, 0x481F, 0x32}, /* clk_prepare_min = 0x32 */
+ {OV8858_8BIT, 0x4825, 0x3A}, /* lpx_p_min = 0x3A */
+ {OV8858_8BIT, 0x4826, 0x40}, /* hs_prepare_min = 0x40 */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_8BIT, 0x4850, 0x10}, /* LANE SEL01 */
+ {OV8858_8BIT, 0x4851, 0x32}, /* LANE SEL02 */
+
+ {OV8858_8BIT, 0x4B00, 0x2A}, /* Unknown */
+ {OV8858_8BIT, 0x4B0D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4D00, 0x04}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D01, 0x18}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D02, 0xC3}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D03, 0xFF}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D04, 0xFF}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D05, 0xFF}, /* TPM_CTRL_REG */
+
+ /*
+ * Lens correction (LENC) function enable = 0
+ * Slave sensor AWB Gain function enable = 1
+ * Slave sensor AWB Statistics function enable = 1
+ * Master sensor AWB Gain function enable = 1
+ * Master sensor AWB Statistics function enable = 1
+ * Black DPC function enable = 1
+ * White DPC function enable =1
+ */
+ {OV8858_8BIT, 0x5000, 0x7E},
+ {OV8858_8BIT, 0x5001, 0x01}, /* BLC function enable = 1 */
+ /*
+ * Horizontal scale function enable = 0
+ * WBMATCH bypass mode = Select slave sensor's gain
+ * WBMATCH function enable = 0
+ * Master MWB gain support RGBC = 0
+ * OTP_DPC function enable = 1
+ * Manual mode of VarioPixel function enable = 0
+ * Manual enable of VarioPixel function enable = 0
+ * Use VSYNC to latch ISP modules's function enable signals = 0
+ */
+ {OV8858_8BIT, 0x5002, 0x08},
+ /*
+ * Bypass all ISP modules after BLC module = 0
+ * DPC_DBC buffer control enable = 1
+ * WBMATCH VSYNC selection = Select master sensor's VSYNC fall
+ * Select master AWB gain to embed line = AWB gain before manual mode
+ * Enable BLC's input flip_i signal = 0
+ */
+ {OV8858_8BIT, 0x5003, 0x20},
+ {OV8858_8BIT, 0x5041, 0x1D}, /* ISP CTRL41 - embedded data=on */
+ {OV8858_8BIT, 0x5046, 0x12}, /* ISP CTRL46 = default */
+ /*
+ * Tail enable = 1
+ * Saturate cross cluster enable = 1
+ * Remove cross cluster enable = 1
+ * Enable to remove connected defect pixels in same channel = 1
+ * Enable to remove connected defect pixels in different channel = 1
+ * Smooth enable, use average G for recovery = 1
+ * Black/white sensor mode enable = 0
+ * Manual mode enable = 0
+ */
+ {OV8858_8BIT, 0x5780, 0xFC},
+ {OV8858_8BIT, 0x5784, 0x0C}, /* DPC CTRL04 */
+ {OV8858_8BIT, 0x5787, 0x40}, /* DPC CTRL07 */
+ {OV8858_8BIT, 0x5788, 0x08}, /* DPC CTRL08 */
+ {OV8858_8BIT, 0x578A, 0x02}, /* DPC CTRL0A */
+ {OV8858_8BIT, 0x578B, 0x01}, /* DPC CTRL0B */
+ {OV8858_8BIT, 0x578C, 0x01}, /* DPC CTRL0C */
+ {OV8858_8BIT, 0x578E, 0x02}, /* DPC CTRL0E */
+ {OV8858_8BIT, 0x578F, 0x01}, /* DPC CTRL0F */
+ {OV8858_8BIT, 0x5790, 0x01}, /* DPC CTRL10 */
+ {OV8858_8BIT, 0x5901, 0x00}, /* VAP CTRL01 = default */
+ /* WINC CTRL08 = embedded data in 1st line*/
+ {OV8858_8BIT, 0x5A08, 0x00},
+ {OV8858_8BIT, 0x5B00, 0x02}, /* OTP CTRL00 */
+ {OV8858_8BIT, 0x5B01, 0x10}, /* OTP CTRL01 */
+ {OV8858_8BIT, 0x5B02, 0x03}, /* OTP CTRL02 */
+ {OV8858_8BIT, 0x5B03, 0xCF}, /* OTP CTRL03 */
+ {OV8858_8BIT, 0x5B05, 0x6C}, /* OTP CTRL05 = default */
+ {OV8858_8BIT, 0x5E00, 0x00}, /* PRE CTRL00 = default */
+ {OV8858_8BIT, 0x5E01, 0x41}, /* PRE_CTRL01 = default */
+
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+/*****************************STILL********************************/
+
+static const struct ov8858_reg ov8858_8M[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low 3283 */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3280 x 2464 */
+ {OV8858_8BIT, 0x3809, 0xD0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x09}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0xa0}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x0B}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4023, 0xC3}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4024, 0x0C}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4025, 0x36}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4026, 0x0C}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4027, 0x37}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_3276x1848[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x10}, /* h_crop_start low 0c->10*/
+ {OV8858_8BIT, 0x3802, 0x01}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x42}, /* v_crop_start low 3e->42*/
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x08}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x71}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3276 x 1848 */
+ {OV8858_8BIT, 0x3809, 0xCC}, /* h_output_size low d0->cc*/
+ {OV8858_8BIT, 0x380A, 0x07}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x38}, /* v_output_size low 3c->38*/
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x0B}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4023, 0xC3}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4024, 0x0C}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4025, 0x36}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4026, 0x0C}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4027, 0x37}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_6M[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x01}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x3E}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x08}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x71}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3280 x 1852 */
+ {OV8858_8BIT, 0x3809, 0xD0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x07}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x3C}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x0B}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4023, 0xC3}, /* Anchor left end = 0x0BC3 */
+ {OV8858_8BIT, 0x4024, 0x0C}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4025, 0x36}, /* Anchor right start = 0x0C36 */
+ {OV8858_8BIT, 0x4026, 0x0C}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4027, 0x37}, /* Anchor right end = 0x0C37 */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1080P_60[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x17}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x02}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x26}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x02}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x8C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0A}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0x9D}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x07}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x0A}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x07}, /* h_output_size high*/
+ {OV8858_8BIT, 0x3809, 0x90}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x04}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xEC}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x07}, /* Anchor left end = 0x072D */
+ {OV8858_8BIT, 0x4023, 0x2D}, /* Anchor left end = 0x072D */
+ {OV8858_8BIT, 0x4024, 0x07}, /* Anchor right start = 0x079E */
+ {OV8858_8BIT, 0x4025, 0x9E}, /* Anchor right start = 0x079E */
+ {OV8858_8BIT, 0x4026, 0x07}, /* Anchor right end = 0x079F */
+ {OV8858_8BIT, 0x4027, 0x9F}, /* Anchor right end = 0x079F */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xef}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x16}, /* pclk_period = 0x16 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1080P_30[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x17}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x02}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x26}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x02}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x8C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0A}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0x9D}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x07}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x0A}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x07}, /* h_output_size high*/
+ {OV8858_8BIT, 0x3809, 0x90}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x07}, /* Anchor left end = 0x072D */
+ {OV8858_8BIT, 0x4023, 0x2D}, /* Anchor left end = 0x072D */
+ {OV8858_8BIT, 0x4024, 0x07}, /* Anchor right start = 0x079E */
+ {OV8858_8BIT, 0x4025, 0x9E}, /* Anchor right start = 0x079E */
+ {OV8858_8BIT, 0x4026, 0x07}, /* Anchor right end = 0x079F */
+ {OV8858_8BIT, 0x4027, 0x9F}, /* Anchor right end = 0x079F */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x0C}, /* Bottom black line start = 12 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xef}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x16}, /* pclk_period = 0x16 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1640x1232[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high 3283 */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 1232 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0xD0}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x10}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x04}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4023, 0xB9}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4024, 0x05}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4025, 0x2A}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4026, 0x05}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4027, 0x2B}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x04}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x08}, /* Bottom black line start = 8 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1640x1096[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high 3283 */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 1096 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x10}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x04}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4023, 0xB9}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4024, 0x05}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4025, 0x2A}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4026, 0x05}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4027, 0x2B}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x04}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x08}, /* Bottom black line start = 8 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+
+static const struct ov8858_reg ov8858_1640x926[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 926 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x03}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x9E}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x10}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x04}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4023, 0xB9}, /* Anchor left end = 0x04B9 */
+ {OV8858_8BIT, 0x4024, 0x05}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4025, 0x2A}, /* Anchor right start = 0x052A */
+ {OV8858_8BIT, 0x4026, 0x05}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4027, 0x2B}, /* Anchor right end = 0x052B */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x02}, /* Top zero line number = 2 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x04}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x02}, /* Bottom zero start line = 2 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x08}, /* Bottom black line start = 8 */
+ {OV8858_8BIT, 0x402F, 0x02}, /* Bottom black line number = 2 */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static struct ov8858_resolution ov8858_res_preview[] = {
+ {
+ .desc = "ov8858_1640x926_PREVIEW",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1232_PREVIEW",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_3276x1848_PREVIEW",
+ .width = 3276,
+ .height = 1848,
+ .used = 0,
+ .regs = ov8858_3276x1848,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_8M_PREVIEW",
+ .width = 3280,
+ .height = 2464,
+ .used = 0,
+ .regs = ov8858_8M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+};
+
+static struct ov8858_resolution ov8858_res_still[] = {
+ {
+ .desc = "ov8858_1640x1232_STILL",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x926_STILL",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_3276X1848_STILL",
+ .width = 3276,
+ .height = 1848,
+ .used = 0,
+ .regs = ov8858_3276x1848,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_8M_STILL",
+ .width = 3280,
+ .height = 2464,
+ .used = 0,
+ .regs = ov8858_8M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ /* Pixel clock: 149.76MHZ */
+ .fps = 10,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 3859,
+ },
+ {
+ }
+ },
+ },
+};
+
+static struct ov8858_resolution ov8858_res_video[] = {
+ {
+ .desc = "ov8858_1640x926_VIDEO",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1232_VIDEO",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1096_VIDEO",
+ .width = 1640,
+ .height = 1096,
+ .used = 0,
+ .regs = ov8858_1640x1096,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_6M_VIDEO",
+ .width = 3280,
+ .height = 1852,
+ .used = 0,
+ .regs = ov8858_6M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_8M_VIDEO",
+ .width = 3280,
+ .height = 2464,
+ .used = 0,
+ .regs = ov8858_8M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+};
+
+#endif /* __OV8858_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
new file mode 100644
index 000000000000..09e3cdc1a394
--- /dev/null
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -0,0 +1,1284 @@
+/*
+ * Support for the Omnivision OV8858 camera sensor.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __OV8858_H__
+#define __OV8858_H__
+#include "../include/linux/atomisp_platform.h"
+#include <media/v4l2-ctrls.h>
+
+#define I2C_MSG_LENGTH 0x2
+
+/*
+ * This should be added into include/linux/videodev2.h
+ * NOTE: This is most likely not used anywhere.
+ */
+#define V4L2_IDENT_OV8858 V4L2_IDENT_UNKNOWN
+
+/*
+ * Indexes for VCM driver lists
+ */
+#define OV8858_ID_DEFAULT 0
+#define OV8858_SUNNY 1
+
+#define OV8858_OTP_START_ADDR 0x7010
+#define OV8858_OTP_END_ADDR 0x7186
+
+/*
+ * ov8858 System control registers
+ */
+
+#define OV8858_OTP_LOAD_CTRL 0x3D81
+#define OV8858_OTP_MODE_CTRL 0x3D84
+#define OV8858_OTP_START_ADDR_REG 0x3D88
+#define OV8858_OTP_END_ADDR_REG 0x3D8A
+#define OV8858_OTP_ISP_CTRL2 0x5002
+
+#define OV8858_OTP_MODE_MANUAL BIT(6)
+#define OV8858_OTP_MODE_PROGRAM_DISABLE BIT(7)
+#define OV8858_OTP_LOAD_ENABLE BIT(0)
+#define OV8858_OTP_DPC_ENABLE BIT(3)
+
+#define OV8858_PLL1_PREDIV0 0x030A
+#define OV8858_PLL1_PREDIV 0x0300
+#define OV8858_PLL1_MULTIPLIER 0x0301
+#define OV8858_PLL1_SYS_PRE_DIV 0x0305
+#define OV8858_PLL1_SYS_DIVIDER 0x0306
+
+#define OV8858_PLL1_PREDIV0_MASK BIT(0)
+#define OV8858_PLL1_PREDIV_MASK (BIT(0) | BIT(1) | BIT(2))
+#define OV8858_PLL1_MULTIPLIER_MASK 0x01FF
+#define OV8858_PLL1_SYS_PRE_DIV_MASK (BIT(0) | BIT(1))
+#define OV8858_PLL1_SYS_DIVIDER_MASK BIT(0)
+
+#define OV8858_PLL2_PREDIV0 0x0312
+#define OV8858_PLL2_PREDIV 0x030B
+#define OV8858_PLL2_MULTIPLIER 0x030C
+#define OV8858_PLL2_DAC_DIVIDER 0x0312
+#define OV8858_PLL2_SYS_PRE_DIV 0x030F
+#define OV8858_PLL2_SYS_DIVIDER 0x030E
+
+#define OV8858_PLL2_PREDIV0_MASK BIT(4)
+#define OV8858_PLL2_PREDIV_MASK (BIT(0) | BIT(1) | BIT(2))
+#define OV8858_PLL2_MULTIPLIER_MASK 0x01FF
+#define OV8858_PLL2_DAC_DIVIDER_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define OV8858_PLL2_SYS_PRE_DIV_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define OV8858_PLL2_SYS_DIVIDER_MASK (BIT(0) | BIT(1) | BIT(2))
+
+#define OV8858_PLL_SCLKSEL1 0x3032
+#define OV8858_PLL_SCLKSEL2 0x3033
+#define OV8858_SRB_HOST_INPUT_DIS 0x3106
+
+#define OV8858_PLL_SCLKSEL1_MASK BIT(7)
+#define OV8858_PLL_SCLKSEL2_MASK BIT(1)
+
+#define OV8858_SYS_PRE_DIV_OFFSET 2
+#define OV8858_SYS_PRE_DIV_MASK (BIT(2) | BIT(3))
+#define OV8858_SCLK_PDIV_OFFSET 4
+#define OV8858_SCLK_PDIV_MASK (BIT(4) | BIT(5) | BIT(6) | BIT(7))
+
+#define OV8858_TIMING_HTS 0x380C
+#define OV8858_TIMING_VTS 0x380E
+
+#define OV8858_HORIZONTAL_START_H 0x3800
+#define OV8858_VERTICAL_START_H 0x3802
+#define OV8858_HORIZONTAL_END_H 0x3804
+#define OV8858_VERTICAL_END_H 0x3806
+#define OV8858_HORIZONTAL_OUTPUT_SIZE_H 0x3808
+#define OV8858_VERTICAL_OUTPUT_SIZE_H 0x380A
+
+#define OV8858_GROUP_ACCESS 0x3208
+#define OV8858_GROUP_ZERO 0x00
+#define OV8858_GROUP_ACCESS_HOLD_START 0x00
+#define OV8858_GROUP_ACCESS_HOLD_END 0x10
+#define OV8858_GROUP_ACCESS_DELAY_LAUNCH 0xA0
+#define OV8858_GROUP_ACCESS_QUICK_LAUNCH 0xE0
+
+#define OV_SUBDEV_PREFIX "ov"
+#define OV_ID_DEFAULT 0x0000
+#define OV8858_NAME "ov8858"
+#define OV8858_CHIP_ID 0x8858
+
+#define OV8858_LONG_EXPO 0x3500
+#define OV8858_LONG_GAIN 0x3508
+#define OV8858_LONG_DIGI_GAIN 0x350A
+#define OV8858_SHORT_GAIN 0x350C
+#define OV8858_SHORT_DIGI_GAIN 0x350E
+
+#define OV8858_FORMAT1 0x3820
+#define OV8858_FORMAT2 0x3821
+
+#define OV8858_FLIP_ENABLE 0x06
+
+#define OV8858_MWB_RED_GAIN_H 0x5032
+#define OV8858_MWB_GREEN_GAIN_H 0x5034
+#define OV8858_MWB_BLUE_GAIN_H 0x5036
+#define OV8858_MWB_GAIN_MAX 0x0FFF
+
+#define OV8858_CHIP_ID_HIGH 0x300B
+#define OV8858_CHIP_ID_LOW 0x300C
+#define OV8858_STREAM_MODE 0x0100
+
+#define OV8858_FOCAL_LENGTH_NUM 294 /* 2.94mm */
+#define OV8858_FOCAL_LENGTH_DEM 100
+#define OV8858_F_NUMBER_DEFAULT_NUM 24 /* 2.4 */
+#define OV8858_F_NUMBER_DEM 10
+
+#define OV8858_H_INC_ODD 0x3814
+#define OV8858_H_INC_EVEN 0x3815
+#define OV8858_V_INC_ODD 0x382A
+#define OV8858_V_INC_EVEN 0x382B
+
+#define OV8858_READ_MODE_BINNING_ON 0x0400 /* ToDo: Check this */
+#define OV8858_READ_MODE_BINNING_OFF 0x00 /* ToDo: Check this */
+#define OV8858_BIN_FACTOR_MAX 2
+#define OV8858_INTEGRATION_TIME_MARGIN 14
+
+#define OV8858_MAX_VTS_VALUE 0xFFFF
+#define OV8858_MAX_EXPOSURE_VALUE \
+ (OV8858_MAX_VTS_VALUE - OV8858_INTEGRATION_TIME_MARGIN)
+#define OV8858_MAX_GAIN_VALUE 0x07FF
+
+#define OV8858_MAX_FOCUS_POS 1023
+
+#define OV8858_TEST_PATTERN_REG 0x5E00
+
+struct ov8858_vcm {
+ int (*power_up)(struct v4l2_subdev *sd);
+ int (*power_down)(struct v4l2_subdev *sd);
+ int (*init)(struct v4l2_subdev *sd);
+ int (*t_focus_vcm)(struct v4l2_subdev *sd, u16 val);
+ int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
+ int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
+ int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
+ int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
+ int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
+ int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
+};
+
+/*
+ * Defines for register writes and register array processing
+ * */
+#define OV8858_BYTE_MAX 32
+#define OV8858_SHORT_MAX 16
+#define OV8858_TOK_MASK 0xFFF0
+
+#define MAX_FPS_OPTIONS_SUPPORTED 3
+
+#define OV8858_DEPTH_COMP_CONST 2200
+#define OV8858_DEPTH_VTS_CONST 2573
+
+enum ov8858_tok_type {
+ OV8858_8BIT = 0x0001,
+ OV8858_16BIT = 0x0002,
+ OV8858_TOK_TERM = 0xF000, /* terminating token for reg list */
+ OV8858_TOK_DELAY = 0xFE00 /* delay token for reg list */
+};
+
+/*
+ * If register address or register width is not 32 bit width,
+ * user needs to convert it manually
+ */
+struct s_register_setting {
+ u32 reg;
+ u32 val;
+};
+
+/**
+ * struct ov8858_reg - MI sensor register format
+ * @type: type of the register
+ * @reg: 16-bit offset to register
+ * @val: 8/16/32-bit register value
+ *
+ * Define a structure for sensor register initialization values
+ */
+struct ov8858_reg {
+ enum ov8858_tok_type type;
+ u16 sreg;
+ u32 val; /* @set value for read/mod/write, @mask */
+};
+
+struct ov8858_fps_setting {
+ int fps;
+ unsigned short pixels_per_line;
+ unsigned short lines_per_frame;
+ const struct ov8858_reg *regs; /* regs that the fps setting needs */
+};
+
+struct ov8858_resolution {
+ u8 *desc;
+ const struct ov8858_reg *regs;
+ int res;
+ int width;
+ int height;
+ bool used;
+ u8 bin_factor_x;
+ u8 bin_factor_y;
+ unsigned short skip_frames;
+ const struct ov8858_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
+};
+
+/*
+ * ov8858 device structure
+ * */
+struct ov8858_device {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt format;
+
+ struct camera_sensor_platform_data *platform_data;
+ struct mutex input_lock; /* serialize sensor's ioctl */
+ int fmt_idx;
+ int streaming;
+ int vt_pix_clk_freq_mhz;
+ int fps_index;
+ u16 sensor_id; /* Sensor id from registers */
+ u16 i2c_id; /* Sensor id from i2c_device_id */
+ int exposure;
+ int gain;
+ u16 digital_gain;
+ u16 pixels_per_line;
+ u16 lines_per_frame;
+ u8 fps;
+ u8 *otp_data;
+ /* Prevent the framerate from being lowered in low light scenes. */
+ int limit_exposure_flag;
+ bool hflip;
+ bool vflip;
+
+ const struct ov8858_reg *regs;
+ struct ov8858_vcm *vcm_driver;
+ const struct ov8858_resolution *curr_res_table;
+ int entries_curr_table;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *run_mode;
+};
+
+#define to_ov8858_sensor(x) container_of(x, struct ov8858_device, sd)
+
+#define OV8858_MAX_WRITE_BUF_SIZE 32
+struct ov8858_write_buffer {
+ u16 addr;
+ u8 data[OV8858_MAX_WRITE_BUF_SIZE];
+};
+
+struct ov8858_write_ctrl {
+ int index;
+ struct ov8858_write_buffer buffer;
+};
+
+static const struct ov8858_reg ov8858_soft_standby[] = {
+ {OV8858_8BIT, 0x0100, 0x00},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_streaming[] = {
+ {OV8858_8BIT, 0x0100, 0x01},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_param_hold[] = {
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_HOLD_START},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_param_update[] = {
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_HOLD_END},
+ {OV8858_8BIT, OV8858_GROUP_ACCESS,
+ OV8858_GROUP_ZERO | OV8858_GROUP_ACCESS_DELAY_LAUNCH},
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
+extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
+extern int dw9718_vcm_init(struct v4l2_subdev *sd);
+extern int dw9718_t_focus_vcm(struct v4l2_subdev *sd, u16 val);
+extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
+extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
+extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
+
+extern int vcm_power_up(struct v4l2_subdev *sd);
+extern int vcm_power_down(struct v4l2_subdev *sd);
+
+static struct ov8858_vcm ov8858_vcms[] = {
+ [OV8858_SUNNY] = {
+ .power_up = dw9718_vcm_power_up,
+ .power_down = dw9718_vcm_power_down,
+ .init = dw9718_vcm_init,
+ .t_focus_vcm = dw9718_t_focus_vcm,
+ .t_focus_abs = dw9718_t_focus_abs,
+ .t_focus_rel = dw9718_t_focus_rel,
+ .q_focus_status = dw9718_q_focus_status,
+ .q_focus_abs = dw9718_q_focus_abs,
+ .t_vcm_slew = dw9718_t_vcm_slew,
+ .t_vcm_timing = dw9718_t_vcm_timing,
+ },
+ [OV8858_ID_DEFAULT] = {
+ .power_up = NULL,
+ .power_down = NULL,
+ },
+};
+
+
+#define OV8858_RES_WIDTH_MAX 3280
+#define OV8858_RES_HEIGHT_MAX 2464
+
+static struct ov8858_reg ov8858_BasicSettings[] = {
+ {OV8858_8BIT, 0x0103, 0x01}, /* software_reset */
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ /* PLL settings */
+ {OV8858_8BIT, 0x0300, 0x05}, /* pll1_pre_div = /4 */
+ {OV8858_8BIT, 0x0302, 0xAF}, /* pll1_multiplier = 175 */
+ {OV8858_8BIT, 0x0303, 0x00}, /* pll1_divm = /(1 + 0) */
+ {OV8858_8BIT, 0x0304, 0x03}, /* pll1_div_mipi = /8 */
+ {OV8858_8BIT, 0x030B, 0x02}, /* pll2_pre_div = /2 */
+ {OV8858_8BIT, 0x030D, 0x4E}, /* pll2_r_divp = 78 */
+ {OV8858_8BIT, 0x030E, 0x00}, /* pll2_r_divs = /1 */
+ {OV8858_8BIT, 0x030F, 0x04}, /* pll2_r_divsp = /(1 + 4) */
+ /* pll2_pre_div0 = /1, pll2_r_divdac = /(1 + 1) */
+ {OV8858_8BIT, 0x0312, 0x01},
+ {OV8858_8BIT, 0x031E, 0x0C}, /* pll1_no_lat = 1, mipi_bitsel_man = 0 */
+
+ /* PAD OEN2, VSYNC out enable=0x80, disable=0x00 */
+ {OV8858_8BIT, 0x3002, 0x80},
+ /* PAD OUT2, VSYNC pulse direction low-to-high = 1 */
+ {OV8858_8BIT, 0x3007, 0x01},
+ /* PAD SEL2, VSYNC out value = 0 */
+ {OV8858_8BIT, 0x300D, 0x00},
+ /* PAD OUT2, VSYNC out select = 0 */
+ {OV8858_8BIT, 0x3010, 0x00},
+
+ /* Npump clock div = /2, Ppump clock div = /4 */
+ {OV8858_8BIT, 0x3015, 0x01},
+ /*
+ * mipi_lane_mode = 1+3, mipi_lvds_sel = 1 = MIPI enable,
+ * r_phy_pd_mipi_man = 0, lane_dis_option = 0
+ */
+ {OV8858_8BIT, 0x3018, 0x72},
+ /* Clock switch output = normal, pclk_div = /1 */
+ {OV8858_8BIT, 0x3020, 0x93},
+ /*
+ * lvds_mode_o = 0, clock lane disable when pd_mipi = 0,
+ * pd_mipi enable when rst_sync = 1
+ */
+ {OV8858_8BIT, 0x3022, 0x01},
+ {OV8858_8BIT, 0x3031, 0x0A}, /* mipi_bit_sel = 10 */
+ {OV8858_8BIT, 0x3034, 0x00}, /* Unknown */
+ /* sclk_div = /1, sclk_pre_div = /1, chip debug = 1 */
+ {OV8858_8BIT, 0x3106, 0x01},
+
+ {OV8858_8BIT, 0x3305, 0xF1}, /* Unknown */
+ {OV8858_8BIT, 0x3307, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x3308, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3309, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x330A, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330B, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x330C, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x330F, 0x40}, /* Unknown */
+
+ {OV8858_8BIT, 0x3500, 0x00}, /* long exposure = 0x9A20 */
+ {OV8858_8BIT, 0x3501, 0x9A}, /* long exposure = 0x9A20 */
+ {OV8858_8BIT, 0x3502, 0x20}, /* long exposure = 0x9A20 */
+ /*
+ * Digital fraction gain delay option = Delay 1 frame,
+ * Gain change delay option = Delay 1 frame,
+ * Gain delay option = Delay 1 frame,
+ * Gain manual as sensor gain = Input gain as real gain format,
+ * Exposure delay option (must be 0 = Delay 1 frame,
+ * Exposure change delay option (must be 0) = Delay 1 frame
+ */
+ {OV8858_8BIT, 0x3503, 0x00},
+ {OV8858_8BIT, 0x3505, 0x80}, /* gain conversation option */
+ /*
+ * [10:7] are integer gain, [6:0] are fraction gain. For example:
+ * 0x80 is 1x gain, 0x100 is 2x gain, 0x1C0 is 3.5x gain
+ */
+ {OV8858_8BIT, 0x3508, 0x02}, /* long gain = 0x0200 */
+ {OV8858_8BIT, 0x3509, 0x00}, /* long gain = 0x0200 */
+ {OV8858_8BIT, 0x350C, 0x00}, /* short gain = 0x0080 */
+ {OV8858_8BIT, 0x350D, 0x80}, /* short gain = 0x0080 */
+ {OV8858_8BIT, 0x3510, 0x00}, /* short exposure = 0x000200 */
+ {OV8858_8BIT, 0x3511, 0x02}, /* short exposure = 0x000200 */
+ {OV8858_8BIT, 0x3512, 0x00}, /* short exposure = 0x000200 */
+
+ {OV8858_8BIT, 0x3600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3601, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3602, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3603, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3604, 0x22}, /* Unknown */
+ {OV8858_8BIT, 0x3605, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x3606, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3607, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3608, 0x11}, /* Unknown */
+ {OV8858_8BIT, 0x3609, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x360A, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x360B, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x360C, 0xDC}, /* Unknown */
+ {OV8858_8BIT, 0x360D, 0x40}, /* Unknown */
+ {OV8858_8BIT, 0x360E, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x360F, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3610, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x3611, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3612, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x3613, 0x80}, /* Unknown */
+ {OV8858_8BIT, 0x3614, 0x58}, /* Unknown */
+ {OV8858_8BIT, 0x3615, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3616, 0x4A}, /* Unknown */
+ {OV8858_8BIT, 0x3617, 0x90}, /* Unknown */
+ {OV8858_8BIT, 0x3618, 0x56}, /* Unknown */
+ {OV8858_8BIT, 0x3619, 0x70}, /* Unknown */
+ {OV8858_8BIT, 0x361A, 0x99}, /* Unknown */
+ {OV8858_8BIT, 0x361B, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361C, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x361D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x361F, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3633, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3634, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3635, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3636, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3638, 0xFF}, /* Unknown */
+ {OV8858_8BIT, 0x3645, 0x13}, /* Unknown */
+ {OV8858_8BIT, 0x3646, 0x83}, /* Unknown */
+ {OV8858_8BIT, 0x364A, 0x07}, /* Unknown */
+
+ {OV8858_8BIT, 0x3700, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x3701, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x3702, 0x50}, /* Unknown */
+ {OV8858_8BIT, 0x3703, 0x32}, /* Unknown */
+ {OV8858_8BIT, 0x3704, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3705, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3706, 0x6A}, /* Unknown */
+ {OV8858_8BIT, 0x3707, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3708, 0x48}, /* Unknown */
+ {OV8858_8BIT, 0x3709, 0x66}, /* Unknown */
+ {OV8858_8BIT, 0x370A, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x370B, 0x6A}, /* Unknown */
+ {OV8858_8BIT, 0x370C, 0x07}, /* Unknown */
+ {OV8858_8BIT, 0x3712, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x3714, 0x24}, /* Unknown */
+ {OV8858_8BIT, 0x3718, 0x14}, /* Unknown */
+ {OV8858_8BIT, 0x3719, 0x31}, /* Unknown */
+ {OV8858_8BIT, 0x371E, 0x31}, /* Unknown */
+ {OV8858_8BIT, 0x371F, 0x7F}, /* Unknown */
+ {OV8858_8BIT, 0x3720, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3721, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3724, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3725, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3726, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3728, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x3729, 0x03}, /* Unknown */
+ {OV8858_8BIT, 0x372A, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x372B, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372C, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372D, 0xA6}, /* Unknown */
+ {OV8858_8BIT, 0x372E, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x372F, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x3730, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3731, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x3732, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3733, 0x10}, /* Unknown */
+ {OV8858_8BIT, 0x3734, 0x40}, /* Unknown */
+ {OV8858_8BIT, 0x3736, 0x30}, /* Unknown */
+ {OV8858_8BIT, 0x373A, 0x0A}, /* Unknown */
+ {OV8858_8BIT, 0x373B, 0x0B}, /* Unknown */
+ {OV8858_8BIT, 0x373C, 0x14}, /* Unknown */
+ {OV8858_8BIT, 0x373E, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3755, 0x10}, /* Unknown */
+ {OV8858_8BIT, 0x3758, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3759, 0x4C}, /* Unknown */
+ {OV8858_8BIT, 0x375A, 0x0C}, /* Unknown */
+ {OV8858_8BIT, 0x375B, 0x26}, /* Unknown */
+ {OV8858_8BIT, 0x375C, 0x20}, /* Unknown */
+ {OV8858_8BIT, 0x375D, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x375E, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x375F, 0x28}, /* Unknown */
+ {OV8858_8BIT, 0x3760, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3761, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3762, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3763, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3766, 0xFF}, /* Unknown */
+ {OV8858_8BIT, 0x3768, 0x22}, /* Unknown */
+ {OV8858_8BIT, 0x3769, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x376A, 0x44}, /* Unknown */
+ {OV8858_8BIT, 0x376B, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x376F, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3772, 0x46}, /* Unknown */
+ {OV8858_8BIT, 0x3773, 0x04}, /* Unknown */
+ {OV8858_8BIT, 0x3774, 0x2C}, /* Unknown */
+ {OV8858_8BIT, 0x3775, 0x13}, /* Unknown */
+ {OV8858_8BIT, 0x3776, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3777, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x37A0, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37A1, 0x7A}, /* Unknown */
+ {OV8858_8BIT, 0x37A2, 0x7A}, /* Unknown */
+ {OV8858_8BIT, 0x37A3, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A4, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A5, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A6, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37A7, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37A8, 0x98}, /* Unknown */
+ {OV8858_8BIT, 0x37A9, 0x98}, /* Unknown */
+ {OV8858_8BIT, 0x37AA, 0x88}, /* Unknown */
+ {OV8858_8BIT, 0x37AB, 0x5C}, /* Unknown */
+ {OV8858_8BIT, 0x37AC, 0x5C}, /* Unknown */
+ {OV8858_8BIT, 0x37AD, 0x55}, /* Unknown */
+ {OV8858_8BIT, 0x37AE, 0x19}, /* Unknown */
+ {OV8858_8BIT, 0x37AF, 0x19}, /* Unknown */
+ {OV8858_8BIT, 0x37B0, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B1, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B2, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B3, 0x84}, /* Unknown */
+ {OV8858_8BIT, 0x37B4, 0x84}, /* Unknown */
+ {OV8858_8BIT, 0x37B5, 0x66}, /* Unknown */
+ {OV8858_8BIT, 0x37B6, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B7, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B8, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x37B9, 0xFF}, /* Unknown */
+
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high */
+ {OV8858_8BIT, 0x3809, 0xC0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x09}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x90}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3810, 0x00}, /* h_win offset high */
+ {OV8858_8BIT, 0x3811, 0x04}, /* h_win offset low */
+ {OV8858_8BIT, 0x3812, 0x00}, /* v_win offset high */
+ {OV8858_8BIT, 0x3813, 0x02}, /* v_win offset low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3837, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x3841, 0xFF}, /* AUTO_SIZE_CTRL */
+ {OV8858_8BIT, 0x3846, 0x48}, /* Unknown */
+
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3D8C, 0x73}, /* OTP_SETTING_STT_ADDRESS */
+ {OV8858_8BIT, 0x3D8D, 0xDE}, /* OTP_SETTING_STT_ADDRESS */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x3F0A, 0x80}, /* PSRAM control register */
+
+ {OV8858_8BIT, 0x4000, 0xF1}, /* BLC CTRL00 = default */
+ {OV8858_8BIT, 0x4001, 0x00}, /* BLC CTRL01 */
+ {OV8858_8BIT, 0x4002, 0x27}, /* BLC offset = 0x27 */
+ {OV8858_8BIT, 0x4005, 0x10}, /* BLC target = 0x0010 */
+ {OV8858_8BIT, 0x4009, 0x81}, /* BLC CTRL09 */
+ {OV8858_8BIT, 0x400B, 0x0C}, /* BLC CTRL0B = default */
+ {OV8858_8BIT, 0x400A, 0x01},
+ {OV8858_8BIT, 0x4011, 0x20}, /* BLC CTRL11 = 0x20 */
+ {OV8858_8BIT, 0x401B, 0x00}, /* Zero line R coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401D, 0x00}, /* Zero line T coeff. = 0x0000 */
+ {OV8858_8BIT, 0x401F, 0x00}, /* BLC CTRL1F */
+ {OV8858_8BIT, 0x4020, 0x00}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4021, 0x04}, /* Anchor left start = 0x0004 */
+ {OV8858_8BIT, 0x4022, 0x0C}, /* Anchor left end = 0x0C60 */
+ {OV8858_8BIT, 0x4023, 0x60}, /* Anchor left end = 0x0C60 */
+ {OV8858_8BIT, 0x4024, 0x0F}, /* Anchor right start = 0x0F36 */
+ {OV8858_8BIT, 0x4025, 0x36}, /* Anchor right start = 0x0F36 */
+ {OV8858_8BIT, 0x4026, 0x0F}, /* Anchor right end = 0x0F37 */
+ {OV8858_8BIT, 0x4027, 0x37}, /* Anchor right end = 0x0F37 */
+ {OV8858_8BIT, 0x4028, 0x00}, /* Top zero line start = 0 */
+ {OV8858_8BIT, 0x4029, 0x04}, /* Top zero line number = 4 */
+ {OV8858_8BIT, 0x402A, 0x04}, /* Top black line start = 4 */
+ {OV8858_8BIT, 0x402B, 0x08}, /* Top black line number = 8 */
+ {OV8858_8BIT, 0x402C, 0x00}, /* Bottom zero start line = 0 */
+ {OV8858_8BIT, 0x402D, 0x02}, /* Bottom zero line number = 2 */
+ {OV8858_8BIT, 0x402E, 0x04}, /* Bottom black line start = 4 */
+ {OV8858_8BIT, 0x402F, 0x08}, /* Bottom black line number = 8 */
+
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4300, 0xFF}, /* clip_max[11:4] = 0xFFF */
+ {OV8858_8BIT, 0x4301, 0x00}, /* clip_min[11:4] = 0 */
+ {OV8858_8BIT, 0x4302, 0x0F}, /* clip_min/max[3:0] */
+ {OV8858_8BIT, 0x4307, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4316, 0x00}, /* CTRL16 = default */
+ {OV8858_8BIT, 0x4503, 0x18}, /* Unknown */
+ {OV8858_8BIT, 0x4500, 0x38}, /* Unknown */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ /* wkup_dly = Mark1 wakeup delay/2^10 = 0x25 */
+ {OV8858_8BIT, 0x4808, 0x25},
+ {OV8858_8BIT, 0x4816, 0x52}, /* Embedded data type*/
+ {OV8858_8BIT, 0x481F, 0x32}, /* clk_prepare_min = 0x32 */
+ {OV8858_8BIT, 0x4825, 0x3A}, /* lpx_p_min = 0x3A */
+ {OV8858_8BIT, 0x4826, 0x40}, /* hs_prepare_min = 0x40 */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_8BIT, 0x4850, 0x10}, /* LANE SEL01 */
+ {OV8858_8BIT, 0x4851, 0x32}, /* LANE SEL02 */
+
+ {OV8858_8BIT, 0x4B00, 0x2A}, /* Unknown */
+ {OV8858_8BIT, 0x4B0D, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4D00, 0x04}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D01, 0x18}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D02, 0xC3}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D03, 0xFF}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D04, 0xFF}, /* TPM_CTRL_REG */
+ {OV8858_8BIT, 0x4D05, 0xFF}, /* TPM_CTRL_REG */
+
+ /*
+ * Lens correction (LENC) function enable = 0
+ * Slave sensor AWB Gain function enable = 1
+ * Slave sensor AWB Statistics function enable = 1
+ * Master sensor AWB Gain function enable = 1
+ * Master sensor AWB Statistics function enable = 1
+ * Black DPC function enable = 1
+ * White DPC function enable =1
+ */
+ {OV8858_8BIT, 0x5000, 0x7E},
+ {OV8858_8BIT, 0x5001, 0x01}, /* BLC function enable = 1 */
+ /*
+ * Horizontal scale function enable = 0
+ * WBMATCH bypass mode = Select slave sensor's gain
+ * WBMATCH function enable = 0
+ * Master MWB gain support RGBC = 0
+ * OTP_DPC function enable = 1
+ * Manual mode of VarioPixel function enable = 0
+ * Manual enable of VarioPixel function enable = 0
+ * Use VSYNC to latch ISP modules's function enable signals = 0
+ */
+ {OV8858_8BIT, 0x5002, 0x08},
+ /*
+ * Bypass all ISP modules after BLC module = 0
+ * DPC_DBC buffer control enable = 1
+ * WBMATCH VSYNC selection = Select master sensor's VSYNC fall
+ * Select master AWB gain to embed line = AWB gain before manual mode
+ * Enable BLC's input flip_i signal = 0
+ */
+ {OV8858_8BIT, 0x5003, 0x20},
+ {OV8858_8BIT, 0x5041, 0x1D}, /* ISP CTRL41 - embedded data=on */
+ {OV8858_8BIT, 0x5046, 0x12}, /* ISP CTRL46 = default */
+ /*
+ * Tail enable = 1
+ * Saturate cross cluster enable = 1
+ * Remove cross cluster enable = 1
+ * Enable to remove connected defect pixels in same channel = 1
+ * Enable to remove connected defect pixels in different channel = 1
+ * Smooth enable, use average G for recovery = 1
+ * Black/white sensor mode enable = 0
+ * Manual mode enable = 0
+ */
+ {OV8858_8BIT, 0x5780, 0xFC},
+ {OV8858_8BIT, 0x5784, 0x0C}, /* DPC CTRL04 */
+ {OV8858_8BIT, 0x5787, 0x40}, /* DPC CTRL07 */
+ {OV8858_8BIT, 0x5788, 0x08}, /* DPC CTRL08 */
+ {OV8858_8BIT, 0x578A, 0x02}, /* DPC CTRL0A */
+ {OV8858_8BIT, 0x578B, 0x01}, /* DPC CTRL0B */
+ {OV8858_8BIT, 0x578C, 0x01}, /* DPC CTRL0C */
+ {OV8858_8BIT, 0x578E, 0x02}, /* DPC CTRL0E */
+ {OV8858_8BIT, 0x578F, 0x01}, /* DPC CTRL0F */
+ {OV8858_8BIT, 0x5790, 0x01}, /* DPC CTRL10 */
+ {OV8858_8BIT, 0x5901, 0x00}, /* VAP CTRL01 = default */
+ /* WINC CTRL08 = embedded data in 1st line*/
+ {OV8858_8BIT, 0x5A08, 0x00},
+ {OV8858_8BIT, 0x5B00, 0x02}, /* OTP CTRL00 */
+ {OV8858_8BIT, 0x5B01, 0x10}, /* OTP CTRL01 */
+ {OV8858_8BIT, 0x5B02, 0x03}, /* OTP CTRL02 */
+ {OV8858_8BIT, 0x5B03, 0xCF}, /* OTP CTRL03 */
+ {OV8858_8BIT, 0x5B05, 0x6C}, /* OTP CTRL05 = default */
+ {OV8858_8BIT, 0x5E00, 0x00}, /* PRE CTRL00 = default */
+ {OV8858_8BIT, 0x5E01, 0x41}, /* PRE_CTRL01 = default */
+
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+/*****************************STILL********************************/
+
+static const struct ov8858_reg ov8858_8M[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low 3283 */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3280 x 2464 */
+ {OV8858_8BIT, 0x3809, 0xD0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x09}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0xa0}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_3276x1848[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x10}, /* h_crop_start low 0c->10*/
+ {OV8858_8BIT, 0x3802, 0x01}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x42}, /* v_crop_start low 3e->42*/
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x08}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x71}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3276 x 1848 */
+ {OV8858_8BIT, 0x3809, 0xCC}, /* h_output_size low d0->cc*/
+ {OV8858_8BIT, 0x380A, 0x07}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x38}, /* v_output_size low 3c->38*/
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_6M[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x01}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x3E}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x08}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x71}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x0C}, /* h_output_size high 3280 x 1852 */
+ {OV8858_8BIT, 0x3809, 0xD0}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x07}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x3C}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0x97}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1080P_60[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x17}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x02}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x26}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x02}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x8C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0A}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0x9D}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x07}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x0A}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x07}, /* h_output_size high*/
+ {OV8858_8BIT, 0x3809, 0x90}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x04}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xEC}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xef}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x16}, /* pclk_period = 0x16 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1080P_30[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x17}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x02}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x26}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x02}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x8C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0A}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0x9D}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x07}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0x0A}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x07}, /* h_output_size high*/
+ {OV8858_8BIT, 0x3809, 0x90}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x0A}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0x0D}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x01}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x40}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x01}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x06}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x01}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x14}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x10}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xef}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x16}, /* pclk_period = 0x16 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1640x1232[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high 3283 */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 1232 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0xD0}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static const struct ov8858_reg ov8858_1640x1096[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low 12 */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high 3283 */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 1096 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x04}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x48}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+
+static const struct ov8858_reg ov8858_1640x926[] = {
+ {OV8858_8BIT, 0x0100, 0x00}, /* software_standby */
+ {OV8858_8BIT, 0x3778, 0x16}, /* Unknown */
+ {OV8858_8BIT, 0x3800, 0x00}, /* h_crop_start high */
+ {OV8858_8BIT, 0x3801, 0x0C}, /* h_crop_start low */
+ {OV8858_8BIT, 0x3802, 0x00}, /* v_crop_start high */
+ {OV8858_8BIT, 0x3803, 0x0C}, /* v_crop_start low */
+ {OV8858_8BIT, 0x3804, 0x0C}, /* h_crop_end high */
+ {OV8858_8BIT, 0x3805, 0xD3}, /* h_crop_end low */
+ {OV8858_8BIT, 0x3806, 0x09}, /* v_crop_end high */
+ {OV8858_8BIT, 0x3807, 0xA3}, /* v_crop_end low */
+ {OV8858_8BIT, 0x3808, 0x06}, /* h_output_size high 1640 x 926 */
+ {OV8858_8BIT, 0x3809, 0x68}, /* h_output_size low */
+ {OV8858_8BIT, 0x380A, 0x03}, /* v_output_size high */
+ {OV8858_8BIT, 0x380B, 0x9E}, /* v_output_size low */
+ {OV8858_8BIT, 0x380C, 0x07}, /* horizontal timing size high */
+ {OV8858_8BIT, 0x380D, 0x94}, /* horizontal timing size low */
+ {OV8858_8BIT, 0x380E, 0x09}, /* vertical timing size high */
+ {OV8858_8BIT, 0x380F, 0xAA}, /* vertical timing size low */
+ {OV8858_8BIT, 0x3814, 0x03}, /* h_odd_inc */
+ {OV8858_8BIT, 0x3815, 0x01}, /* h_even_inc */
+ {OV8858_8BIT, 0x3820, 0x00}, /* format1 */
+ {OV8858_8BIT, 0x3821, 0x67}, /* format2 */
+ {OV8858_8BIT, 0x382A, 0x03}, /* v_odd_inc */
+ {OV8858_8BIT, 0x382B, 0x01}, /* v_even_inc */
+ {OV8858_8BIT, 0x3830, 0x08}, /* Unknown */
+ {OV8858_8BIT, 0x3836, 0x02}, /* Unknown */
+ {OV8858_8BIT, 0x3D85, 0x16}, /* OTP_REG85 */
+ {OV8858_8BIT, 0x3F08, 0x08}, /* PSRAM control register */
+ {OV8858_8BIT, 0x4034, 0x3F}, /* Unknown */
+ {OV8858_8BIT, 0x403D, 0x04}, /* BLC CTRL3D */
+ {OV8858_8BIT, 0x4600, 0x00}, /* Unknown */
+ {OV8858_8BIT, 0x4601, 0xCB}, /* Unknown */
+ {OV8858_8BIT, 0x4837, 0x14}, /* pclk_period = 0x14 */
+ {OV8858_TOK_TERM, 0, 0}
+};
+
+static struct ov8858_resolution ov8858_res_preview[] = {
+ {
+ .desc = "ov8858_1640x926_PREVIEW",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1232_PREVIEW",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1936x1096_PREVIEW",
+ .width = 1936,
+ .height = 1096,
+ .used = 0,
+ .regs = ov8858_1080P_30,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_3276x1848_PREVIEW",
+ .width = 3276,
+ .height = 1848,
+ .used = 0,
+ .regs = ov8858_3276x1848,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_8M_PREVIEW",
+ .width = 3280,
+ .height = 2464,
+ .used = 0,
+ .regs = ov8858_8M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+};
+
+static struct ov8858_resolution ov8858_res_still[] = {
+ {
+ .desc = "ov8858_1640x1232_STILL",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 0,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x926_STILL",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_3276X1848_STILL",
+ .width = 3276,
+ .height = 1848,
+ .used = 0,
+ .regs = ov8858_3276x1848,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_8M_STILL",
+ .width = 3280,
+ .height = 2464,
+ .used = 0,
+ .regs = ov8858_8M,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ /* Pixel clock: 149.76MHZ */
+ .fps = 10,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 3859,
+ },
+ {
+ }
+ },
+ },
+};
+
+static struct ov8858_resolution ov8858_res_video[] = {
+ {
+ .desc = "ov8858_1640x926_VIDEO",
+ .width = 1640,
+ .height = 926,
+ .used = 0,
+ .regs = ov8858_1640x926,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1232_VIDEO",
+ .width = 1640,
+ .height = 1232,
+ .used = 0,
+ .regs = ov8858_1640x1232,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1640x1096_VIDEO",
+ .width = 1640,
+ .height = 1096,
+ .used = 0,
+ .regs = ov8858_1640x1096,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+ {
+ .desc = "ov8858_1080P_30_VIDEO",
+ .width = 1936,
+ .height = 1096,
+ .used = 0,
+ .regs = ov8858_1080P_30,
+ .bin_factor_x = 0,
+ .bin_factor_y = 0,
+ .skip_frames = 1,
+ .fps_options = {
+ {
+ .fps = 30,
+ .pixels_per_line = 3880,
+ .lines_per_frame = 2573,
+ },
+ {
+ }
+ },
+ },
+};
+
+#endif /* __OV8858_H__ */
diff --git a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h b/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
new file mode 100644
index 000000000000..c5e22bba455a
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
@@ -0,0 +1,37 @@
+/*
+ * Access to message bus through three registers
+ * in CUNIT(0:0:0) PCI configuration space.
+ * MSGBUS_CTRL_REG(0xD0):
+ * 31:24 = message bus opcode
+ * 23:16 = message bus port
+ * 15:8 = message bus address, low 8 bits.
+ * 7:4 = message bus byte enables
+ * MSGBUS_CTRL_EXT_REG(0xD8):
+ * 31:8 = message bus address, high 24 bits.
+ * MSGBUS_DATA_REG(0xD4):
+ * hold the data for write or read
+ */
+#define PCI_ROOT_MSGBUS_CTRL_REG 0xD0
+#define PCI_ROOT_MSGBUS_DATA_REG 0xD4
+#define PCI_ROOT_MSGBUS_CTRL_EXT_REG 0xD8
+#define PCI_ROOT_MSGBUS_READ 0x10
+#define PCI_ROOT_MSGBUS_WRITE 0x11
+#define PCI_ROOT_MSGBUS_DWORD_ENABLE 0xf0
+
+/* In BYT platform for all internal PCI devices d3 delay
+ * of 3 ms is sufficient. Default value of 10 ms is overkill.
+ */
+#define INTERNAL_PCI_PM_D3_WAIT 3
+
+#define ISP_SUB_CLASS 0x80
+#define SUB_CLASS_MASK 0xFF00
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd);
+u32 intel_mid_msgbus_read32(u8 port, u32 addr);
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
+u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext);
+void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data);
+u32 intel_mid_soc_stepping(void);
+int intel_mid_dw_i2c_acquire_ownership(void);
+int intel_mid_dw_i2c_release_ownership(void);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
new file mode 100644
index 000000000000..35865462ccf9
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -0,0 +1,1367 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifdef CSS15
+#include <linux/atomisp_css15.h>
+#else
+
+#ifndef _ATOM_ISP_H
+#define _ATOM_ISP_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+
+/* struct media_device_info.driver_version */
+#define ATOMISP_CSS_VERSION_MASK 0x00ffffff
+#define ATOMISP_CSS_VERSION_15 KERNEL_VERSION(1, 5, 0)
+#define ATOMISP_CSS_VERSION_20 KERNEL_VERSION(2, 0, 0)
+#define ATOMISP_CSS_VERSION_21 KERNEL_VERSION(2, 1, 0)
+
+/* struct media_device_info.hw_revision */
+#define ATOMISP_HW_REVISION_MASK 0x0000ff00
+#define ATOMISP_HW_REVISION_SHIFT 8
+#define ATOMISP_HW_REVISION_ISP2300 0x00
+#define ATOMISP_HW_REVISION_ISP2400 0x10
+#define ATOMISP_HW_REVISION_ISP2401_LEGACY 0x11
+#define ATOMISP_HW_REVISION_ISP2401 0x20
+
+#define ATOMISP_HW_STEPPING_MASK 0x000000ff
+#define ATOMISP_HW_STEPPING_A0 0x00
+#define ATOMISP_HW_STEPPING_B0 0x10
+
+/*ISP binary running mode*/
+#define CI_MODE_PREVIEW 0x8000
+#define CI_MODE_VIDEO 0x4000
+#define CI_MODE_STILL_CAPTURE 0x2000
+#define CI_MODE_CONTINUOUS 0x1000
+#define CI_MODE_NONE 0x0000
+
+#define OUTPUT_MODE_FILE 0x0100
+#define OUTPUT_MODE_TEXT 0x0200
+
+/*
+ * Camera HAL sets this flag in v4l2_buffer reserved2 to indicate this
+ * buffer has a per-frame parameter.
+ */
+#define ATOMISP_BUFFER_HAS_PER_FRAME_SETTING 0x80000000
+
+/* Custom format for RAW capture from M10MO 0x3130314d */
+#define V4L2_PIX_FMT_CUSTOM_M10MO_RAW v4l2_fourcc('M', '1', '0', '1')
+
+/* Custom media bus formats being used in atomisp */
+#define V4L2_MBUS_FMT_CUSTOM_YUV420 0x8001
+#define V4L2_MBUS_FMT_CUSTOM_YVU420 0x8002
+#define V4L2_MBUS_FMT_CUSTOM_YUV422P 0x8003
+#define V4L2_MBUS_FMT_CUSTOM_YUV444 0x8004
+#define V4L2_MBUS_FMT_CUSTOM_NV12 0x8005
+#define V4L2_MBUS_FMT_CUSTOM_NV21 0x8006
+#define V4L2_MBUS_FMT_CUSTOM_NV16 0x8007
+#define V4L2_MBUS_FMT_CUSTOM_YUYV 0x8008
+#define V4L2_MBUS_FMT_CUSTOM_SBGGR16 0x8009
+#define V4L2_MBUS_FMT_CUSTOM_RGB32 0x800a
+
+/* Custom media bus format for M10MO RAW capture */
+#define V4L2_MBUS_FMT_CUSTOM_M10MO_RAW 0x800b
+
+/* Configuration used by Bayer noise reduction and YCC noise reduction */
+struct atomisp_nr_config {
+ /* [gain] Strength of noise reduction for Bayer NR (Used by Bayer NR) */
+ unsigned int bnr_gain;
+ /* [gain] Strength of noise reduction for YCC NR (Used by YCC NR) */
+ unsigned int ynr_gain;
+ /* [intensity] Sensitivity of Edge (Used by Bayer NR) */
+ unsigned int direction;
+ /* [intensity] coring threshold for Cb (Used by YCC NR) */
+ unsigned int threshold_cb;
+ /* [intensity] coring threshold for Cr (Used by YCC NR) */
+ unsigned int threshold_cr;
+};
+
+/* Temporal noise reduction configuration */
+struct atomisp_tnr_config {
+ unsigned int gain; /* [gain] Strength of NR */
+ unsigned int threshold_y;/* [intensity] Motion sensitivity for Y */
+ unsigned int threshold_uv;/* [intensity] Motion sensitivity for U/V */
+};
+
+/* Histogram. This contains num_elements values of type unsigned int.
+ * The data pointer is a DDR pointer (virtual address).
+ */
+struct atomisp_histogram {
+ unsigned int num_elements;
+ void __user *data;
+};
+
+enum atomisp_ob_mode {
+ atomisp_ob_mode_none,
+ atomisp_ob_mode_fixed,
+ atomisp_ob_mode_raster
+};
+
+/* Optical black level configuration */
+struct atomisp_ob_config {
+ /* Obtical black level mode (Fixed / Raster) */
+ enum atomisp_ob_mode mode;
+ /* [intensity] optical black level for GR (relevant for fixed mode) */
+ unsigned int level_gr;
+ /* [intensity] optical black level for R (relevant for fixed mode) */
+ unsigned int level_r;
+ /* [intensity] optical black level for B (relevant for fixed mode) */
+ unsigned int level_b;
+ /* [intensity] optical black level for GB (relevant for fixed mode) */
+ unsigned int level_gb;
+ /* [BQ] 0..63 start position of OB area (relevant for raster mode) */
+ unsigned short start_position;
+ /* [BQ] start..63 end position of OB area (relevant for raster mode) */
+ unsigned short end_position;
+};
+
+/* Edge enhancement (sharpen) configuration */
+struct atomisp_ee_config {
+ /* [gain] The strength of sharpness. u5_11 */
+ unsigned int gain;
+ /* [intensity] The threshold that divides noises from edge. u8_8 */
+ unsigned int threshold;
+ /* [gain] The strength of sharpness in pell-mell area. u5_11 */
+ unsigned int detail_gain;
+};
+
+struct atomisp_3a_output {
+ int ae_y;
+ int awb_cnt;
+ int awb_gr;
+ int awb_r;
+ int awb_b;
+ int awb_gb;
+ int af_hpf1;
+ int af_hpf2;
+};
+
+enum atomisp_calibration_type {
+ calibration_type1,
+ calibration_type2,
+ calibration_type3
+};
+
+struct atomisp_calibration_group {
+ unsigned int size;
+ unsigned int type;
+ unsigned short *calb_grp_values;
+};
+
+struct atomisp_gc_config {
+ __u16 gain_k1;
+ __u16 gain_k2;
+};
+
+struct atomisp_3a_config {
+ unsigned int ae_y_coef_r; /* [gain] Weight of R for Y */
+ unsigned int ae_y_coef_g; /* [gain] Weight of G for Y */
+ unsigned int ae_y_coef_b; /* [gain] Weight of B for Y */
+ unsigned int awb_lg_high_raw; /* [intensity]
+ AWB level gate high for raw */
+ unsigned int awb_lg_low; /* [intensity] AWB level gate low */
+ unsigned int awb_lg_high; /* [intensity] AWB level gate high */
+ int af_fir1_coef[7]; /* [factor] AF FIR coefficients of fir1 */
+ int af_fir2_coef[7]; /* [factor] AF FIR coefficients of fir2 */
+};
+
+struct atomisp_dvs_grid_info {
+ uint32_t enable;
+ uint32_t width;
+ uint32_t aligned_width;
+ uint32_t height;
+ uint32_t aligned_height;
+ uint32_t bqs_per_grid_cell;
+ uint32_t num_hor_coefs;
+ uint32_t num_ver_coefs;
+};
+
+struct atomisp_dvs_envelop {
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_grid_info {
+ uint32_t enable;
+ uint32_t use_dmem;
+ uint32_t has_histogram;
+ uint32_t s3a_width;
+ uint32_t s3a_height;
+ uint32_t aligned_width;
+ uint32_t aligned_height;
+ uint32_t s3a_bqs_per_grid_cell;
+ uint32_t deci_factor_log2;
+ uint32_t elem_bit_depth;
+};
+
+struct atomisp_dis_vector {
+ int x;
+ int y;
+};
+
+
+/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coeffients for each type.
+ */
+struct atomisp_dvs2_coef_types {
+ short __user *odd_real; /**< real part of the odd coefficients*/
+ short __user *odd_imag; /**< imaginary part of the odd coefficients*/
+ short __user *even_real;/**< real part of the even coefficients*/
+ short __user *even_imag;/**< imaginary part of the even coefficients*/
+};
+
+/*
+ * DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct atomisp_dvs2_stat_types {
+ int __user *odd_real; /**< real part of the odd statistics*/
+ int __user *odd_imag; /**< imaginary part of the odd statistics*/
+ int __user *even_real;/**< real part of the even statistics*/
+ int __user *even_imag;/**< imaginary part of the even statistics*/
+};
+
+struct atomisp_dis_coefficients {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_coef_types hor_coefs;
+ struct atomisp_dvs2_coef_types ver_coefs;
+};
+
+struct atomisp_dvs2_statistics {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_stat_types hor_prod;
+ struct atomisp_dvs2_stat_types ver_prod;
+};
+
+struct atomisp_dis_statistics {
+ struct atomisp_dvs2_statistics dvs2_stat;
+ uint32_t exp_id;
+};
+
+struct atomisp_3a_rgby_output {
+ uint32_t r;
+ uint32_t g;
+ uint32_t b;
+ uint32_t y;
+};
+
+/*
+ * Because we have 2 pipes at max to output metadata, therefore driver will use
+ * ATOMISP_MAIN_METADATA to specify the metadata from the pipe which keeps
+ * streaming always and use ATOMISP_SEC_METADATA to specify the metadata from
+ * the pipe which is streaming by request like capture pipe of ZSL or SDV mode
+ * as secondary metadata. And for the use case which has only one pipe
+ * streaming like online capture, ATOMISP_MAIN_METADATA will be used.
+ */
+enum atomisp_metadata_type {
+ ATOMISP_MAIN_METADATA = 0,
+ ATOMISP_SEC_METADATA,
+ ATOMISP_METADATA_TYPE_NUM,
+};
+
+struct atomisp_metadata_with_type {
+ /* to specify which type of metadata to get */
+ enum atomisp_metadata_type type;
+ void __user *data;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride; /* in bytes */
+ uint32_t exp_id; /* exposure ID */
+ uint32_t *effective_width; /* mipi packets valid data size */
+};
+
+struct atomisp_metadata {
+ void __user *data;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride; /* in bytes */
+ uint32_t exp_id; /* exposure ID */
+ uint32_t *effective_width; /* mipi packets valid data size */
+};
+
+struct atomisp_ext_isp_ctrl {
+ uint32_t id;
+ uint32_t data;
+};
+
+struct atomisp_3a_statistics {
+ struct atomisp_grid_info grid_info;
+ struct atomisp_3a_output __user *data;
+ struct atomisp_3a_rgby_output __user *rgby_data;
+ uint32_t exp_id; /* exposure ID */
+ uint32_t isp_config_id; /* isp config ID */
+};
+
+/**
+ * struct atomisp_cont_capture_conf - continuous capture parameters
+ * @num_captures: number of still images to capture
+ * @skip_frames: number of frames to skip between 2 captures
+ * @offset: offset in ring buffer to start capture
+ *
+ * For example, to capture 1 frame from past, current, and 1 from future
+ * and skip one frame between each capture, parameters would be:
+ * num_captures:3
+ * skip_frames:1
+ * offset:-2
+ */
+
+struct atomisp_cont_capture_conf {
+ int num_captures;
+ unsigned int skip_frames;
+ int offset;
+ __u32 reserved[5];
+};
+
+struct atomisp_ae_window {
+ int x_left;
+ int x_right;
+ int y_top;
+ int y_bottom;
+ int weight;
+};
+
+/* White Balance (Gain Adjust) */
+struct atomisp_wb_config {
+ unsigned int integer_bits;
+ unsigned int gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int r; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int b; /* unsigned <integer_bits>.<16-integer_bits> */
+ unsigned int gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+/* Color Space Conversion settings */
+struct atomisp_cc_config {
+ unsigned int fraction_bits;
+ int matrix[3 * 3]; /* RGB2YUV Color matrix, signed
+ <13-fraction_bits>.<fraction_bits> */
+};
+
+/* De pixel noise configuration */
+struct atomisp_de_config {
+ unsigned int pixelnoise;
+ unsigned int c1_coring_threshold;
+ unsigned int c2_coring_threshold;
+};
+
+/* Chroma enhancement */
+struct atomisp_ce_config {
+ unsigned char uv_level_min;
+ unsigned char uv_level_max;
+};
+
+/* Defect pixel correction configuration */
+struct atomisp_dp_config {
+ /* [intensity] The threshold of defect Pixel Correction, representing
+ * the permissible difference of intensity between one pixel and its
+ * surrounding pixels. Smaller values result in more frequent pixel
+ * corrections. u0_16
+ */
+ unsigned int threshold;
+ /* [gain] The sensitivity of mis-correction. ISP will miss a lot of
+ * defects if the value is set too large. u8_8
+ */
+ unsigned int gain;
+ unsigned int gr;
+ unsigned int r;
+ unsigned int b;
+ unsigned int gb;
+};
+
+/* XNR threshold */
+struct atomisp_xnr_config {
+ __u16 threshold;
+};
+
+/* metadata config */
+struct atomisp_metadata_config {
+ uint32_t metadata_height;
+ uint32_t metadata_stride;
+};
+
+/*
+ * Generic resolution structure.
+ */
+struct atomisp_resolution {
+ uint32_t width; /**< Width */
+ uint32_t height; /**< Height */
+};
+
+/*
+ * This specifies the coordinates (x,y)
+ */
+struct atomisp_zoom_point {
+ int32_t x; /**< x coordinate */
+ int32_t y; /**< y coordinate */
+};
+
+/*
+ * This specifies the region
+ */
+struct atomisp_zoom_region {
+ struct atomisp_zoom_point origin; /* Starting point coordinates for the region */
+ struct atomisp_resolution resolution; /* Region resolution */
+};
+
+struct atomisp_dz_config {
+ uint32_t dx; /**< Horizontal zoom factor */
+ uint32_t dy; /**< Vertical zoom factor */
+ struct atomisp_zoom_region zoom_region; /**< region for zoom */
+};
+
+struct atomisp_parm {
+ struct atomisp_grid_info info;
+ struct atomisp_dvs_grid_info dvs_grid;
+ struct atomisp_dvs_envelop dvs_envelop;
+ struct atomisp_wb_config wb_config;
+ struct atomisp_cc_config cc_config;
+ struct atomisp_ob_config ob_config;
+ struct atomisp_de_config de_config;
+ struct atomisp_dz_config dz_config;
+ struct atomisp_ce_config ce_config;
+ struct atomisp_dp_config dp_config;
+ struct atomisp_nr_config nr_config;
+ struct atomisp_ee_config ee_config;
+ struct atomisp_tnr_config tnr_config;
+ struct atomisp_metadata_config metadata_config;
+};
+
+struct dvs2_bq_resolution {
+ int width_bq; /* width [BQ] */
+ int height_bq; /* height [BQ] */
+};
+
+struct atomisp_dvs2_bq_resolutions {
+ /* GDC source image size [BQ] */
+ struct dvs2_bq_resolution source_bq;
+ /* GDC output image size [BQ] */
+ struct dvs2_bq_resolution output_bq;
+ /* GDC effective envelope size [BQ] */
+ struct dvs2_bq_resolution envelope_bq;
+ /* isp pipe filter size [BQ] */
+ struct dvs2_bq_resolution ispfilter_bq;
+ /* GDC shit size [BQ] */
+ struct dvs2_bq_resolution gdc_shift_bq;
+};
+
+struct atomisp_dvs_6axis_config {
+ uint32_t exp_id;
+ uint32_t width_y;
+ uint32_t height_y;
+ uint32_t width_uv;
+ uint32_t height_uv;
+ uint32_t *xcoords_y;
+ uint32_t *ycoords_y;
+ uint32_t *xcoords_uv;
+ uint32_t *ycoords_uv;
+};
+
+struct atomisp_formats_config {
+ uint32_t video_full_range_flag;
+};
+
+struct atomisp_parameters {
+ struct atomisp_wb_config *wb_config; /* White Balance config */
+ struct atomisp_cc_config *cc_config; /* Color Correction config */
+ struct atomisp_tnr_config *tnr_config; /* Temporal Noise Reduction */
+ struct atomisp_ecd_config *ecd_config; /* Eigen Color Demosaicing */
+ struct atomisp_ynr_config *ynr_config; /* Y(Luma) Noise Reduction */
+ struct atomisp_fc_config *fc_config; /* Fringe Control */
+ struct atomisp_formats_config *formats_config; /* Formats Control */
+ struct atomisp_cnr_config *cnr_config; /* Chroma Noise Reduction */
+ struct atomisp_macc_config *macc_config; /* MACC */
+ struct atomisp_ctc_config *ctc_config; /* Chroma Tone Control */
+ struct atomisp_aa_config *aa_config; /* Anti-Aliasing */
+ struct atomisp_aa_config *baa_config; /* Anti-Aliasing */
+ struct atomisp_ce_config *ce_config;
+ struct atomisp_dvs_6axis_config *dvs_6axis_config;
+ struct atomisp_ob_config *ob_config; /* Objective Black config */
+ struct atomisp_dp_config *dp_config; /* Dead Pixel config */
+ struct atomisp_nr_config *nr_config; /* Noise Reduction config */
+ struct atomisp_ee_config *ee_config; /* Edge Enhancement config */
+ struct atomisp_de_config *de_config; /* Demosaic config */
+ struct atomisp_gc_config *gc_config; /* Gamma Correction config */
+ struct atomisp_anr_config *anr_config; /* Advanced Noise Reduction */
+ struct atomisp_3a_config *a3a_config; /* 3A Statistics config */
+ struct atomisp_xnr_config *xnr_config; /* eXtra Noise Reduction */
+ struct atomisp_dz_config *dz_config; /* Digital Zoom */
+ struct atomisp_cc_config *yuv2rgb_cc_config; /* Color
+ Correction config */
+ struct atomisp_cc_config *rgb2yuv_cc_config; /* Color
+ Correction config */
+ struct atomisp_macc_table *macc_table;
+ struct atomisp_gamma_table *gamma_table;
+ struct atomisp_ctc_table *ctc_table;
+ struct atomisp_xnr_table *xnr_table;
+ struct atomisp_rgb_gamma_table *r_gamma_table;
+ struct atomisp_rgb_gamma_table *g_gamma_table;
+ struct atomisp_rgb_gamma_table *b_gamma_table;
+ struct atomisp_vector *motion_vector; /* For 2-axis DVS */
+ struct atomisp_shading_table *shading_table;
+ struct atomisp_morph_table *morph_table;
+ struct atomisp_dvs_coefficients *dvs_coefs; /* DVS 1.0 coefficients */
+ struct atomisp_dvs2_coefficients *dvs2_coefs; /* DVS 2.0 coefficients */
+ struct atomisp_capture_config *capture_config;
+ struct atomisp_anr_thres *anr_thres;
+
+ void *lin_2500_config; /* Skylake: Linearization config */
+ void *obgrid_2500_config; /* Skylake: OBGRID config */
+ void *bnr_2500_config; /* Skylake: bayer denoise config */
+ void *shd_2500_config; /* Skylake: shading config */
+ void *dm_2500_config; /* Skylake: demosaic config */
+ void *rgbpp_2500_config; /* Skylake: RGBPP config */
+ void *dvs_stat_2500_config; /* Skylake: DVS STAT config */
+ void *lace_stat_2500_config; /* Skylake: LACE STAT config */
+ void *yuvp1_2500_config; /* Skylake: yuvp1 config */
+ void *yuvp2_2500_config; /* Skylake: yuvp2 config */
+ void *tnr_2500_config; /* Skylake: TNR config */
+ void *dpc_2500_config; /* Skylake: DPC config */
+ void *awb_2500_config; /* Skylake: auto white balance config */
+ void *awb_fr_2500_config; /* Skylake: auto white balance filter response config */
+ void *anr_2500_config; /* Skylake: ANR config */
+ void *af_2500_config; /* Skylake: auto focus config */
+ void *ae_2500_config; /* Skylake: auto exposure config */
+ void *bds_2500_config; /* Skylake: bayer downscaler config */
+ void *dvs_2500_config; /* Skylake: digital video stabilization config */
+ void *res_mgr_2500_config;
+
+ /*
+ * Output frame pointer the config is to be applied to (optional),
+ * set to NULL to make this config is applied as global.
+ */
+ void *output_frame;
+ /*
+ * Unique ID to track which config was actually applied to a particular
+ * frame, driver will send this id back with output frame together.
+ */
+ uint32_t isp_config_id;
+
+ /*
+ * Switch to control per_frame setting:
+ * 0: this is a global setting
+ * 1: this is a per_frame setting
+ * PLEASE KEEP THIS AT THE END OF THE STRUCTURE!!
+ */
+ uint32_t per_frame_setting;
+};
+
+#define ATOMISP_GAMMA_TABLE_SIZE 1024
+struct atomisp_gamma_table {
+ unsigned short data[ATOMISP_GAMMA_TABLE_SIZE];
+};
+
+/* Morphing table for advanced ISP.
+ * Each line of width elements takes up COORD_TABLE_EXT_WIDTH elements
+ * in memory.
+ */
+#define ATOMISP_MORPH_TABLE_NUM_PLANES 6
+struct atomisp_morph_table {
+ unsigned int enabled;
+
+ unsigned int height;
+ unsigned int width; /* number of valid elements per line */
+ unsigned short __user *coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+ unsigned short __user *coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+#define ATOMISP_NUM_SC_COLORS 4
+#define ATOMISP_SC_FLAG_QUERY (1 << 0)
+
+struct atomisp_shading_table {
+ __u32 enable;
+
+ __u32 sensor_width;
+ __u32 sensor_height;
+ __u32 width;
+ __u32 height;
+ __u32 fraction_bits;
+
+ __u16 *data[ATOMISP_NUM_SC_COLORS];
+};
+
+struct atomisp_makernote_info {
+ /* bits 31-16: numerator, bits 15-0: denominator */
+ unsigned int focal_length;
+ /* bits 31-16: numerator, bits 15-0: denominator*/
+ unsigned int f_number_curr;
+ /*
+ * bits 31-24: max f-number numerator
+ * bits 23-16: max f-number denominator
+ * bits 15-8: min f-number numerator
+ * bits 7-0: min f-number denominator
+ */
+ unsigned int f_number_range;
+};
+
+/* parameter for MACC */
+#define ATOMISP_NUM_MACC_AXES 16
+struct atomisp_macc_table {
+ short data[4 * ATOMISP_NUM_MACC_AXES];
+};
+
+struct atomisp_macc_config {
+ int color_effect;
+ struct atomisp_macc_table table;
+};
+
+/* Parameter for ctc parameter control */
+#define ATOMISP_CTC_TABLE_SIZE 1024
+struct atomisp_ctc_table {
+ unsigned short data[ATOMISP_CTC_TABLE_SIZE];
+};
+
+/* Parameter for overlay image loading */
+struct atomisp_overlay {
+ /* the frame containing the overlay data The overlay frame width should
+ * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+ * should be the multiples of 2.
+ */
+ struct v4l2_framebuffer *frame;
+ /* Y value of overlay background */
+ unsigned char bg_y;
+ /* U value of overlay background */
+ char bg_u;
+ /* V value of overlay background */
+ char bg_v;
+ /* the blending percent of input data for Y subpixels */
+ unsigned char blend_input_perc_y;
+ /* the blending percent of input data for U subpixels */
+ unsigned char blend_input_perc_u;
+ /* the blending percent of input data for V subpixels */
+ unsigned char blend_input_perc_v;
+ /* the blending percent of overlay data for Y subpixels */
+ unsigned char blend_overlay_perc_y;
+ /* the blending percent of overlay data for U subpixels */
+ unsigned char blend_overlay_perc_u;
+ /* the blending percent of overlay data for V subpixels */
+ unsigned char blend_overlay_perc_v;
+ /* the overlay start x pixel position on output frame It should be the
+ multiples of 2*ISP_VEC_NELEMS. */
+ unsigned int overlay_start_x;
+ /* the overlay start y pixel position on output frame It should be the
+ multiples of 2. */
+ unsigned int overlay_start_y;
+};
+
+/* Sensor resolution specific data for AE calculation.*/
+struct atomisp_sensor_mode_data {
+ unsigned int coarse_integration_time_min;
+ unsigned int coarse_integration_time_max_margin;
+ unsigned int fine_integration_time_min;
+ unsigned int fine_integration_time_max_margin;
+ unsigned int fine_integration_time_def;
+ unsigned int frame_length_lines;
+ unsigned int line_length_pck;
+ unsigned int read_mode;
+ unsigned int vt_pix_clk_freq_mhz;
+ unsigned int crop_horizontal_start; /* Sensor crop start cord. (x0,y0)*/
+ unsigned int crop_vertical_start;
+ unsigned int crop_horizontal_end; /* Sensor crop end cord. (x1,y1)*/
+ unsigned int crop_vertical_end;
+ unsigned int output_width; /* input size to ISP after binning/scaling */
+ unsigned int output_height;
+ uint8_t binning_factor_x; /* horizontal binning factor used */
+ uint8_t binning_factor_y; /* vertical binning factor used */
+ uint16_t hts;
+};
+
+struct atomisp_exposure {
+ unsigned int integration_time[8];
+ unsigned int shutter_speed[8];
+ unsigned int gain[4];
+ unsigned int aperture;
+};
+
+/* For texture streaming. */
+struct atomisp_bc_video_package {
+ int ioctl_cmd;
+ int device_id;
+ int inputparam;
+ int outputparam;
+};
+
+enum atomisp_focus_hp {
+ ATOMISP_FOCUS_HP_IN_PROGRESS = (1U << 2),
+ ATOMISP_FOCUS_HP_COMPLETE = (2U << 2),
+ ATOMISP_FOCUS_HP_FAILED = (3U << 2)
+};
+
+/* Masks */
+#define ATOMISP_FOCUS_STATUS_MOVING (1U << 0)
+#define ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE (1U << 1)
+#define ATOMISP_FOCUS_STATUS_HOME_POSITION (3U << 2)
+
+enum atomisp_camera_port {
+ ATOMISP_CAMERA_PORT_SECONDARY,
+ ATOMISP_CAMERA_PORT_PRIMARY,
+ ATOMISP_CAMERA_PORT_TERTIARY,
+ ATOMISP_CAMERA_NR_PORTS
+};
+
+/* Flash modes. Default is off.
+ * Setting a flash to TORCH or INDICATOR mode will automatically
+ * turn it on. Setting it to FLASH mode will not turn on the flash
+ * until the FLASH_STROBE command is sent. */
+enum atomisp_flash_mode {
+ ATOMISP_FLASH_MODE_OFF,
+ ATOMISP_FLASH_MODE_FLASH,
+ ATOMISP_FLASH_MODE_TORCH,
+ ATOMISP_FLASH_MODE_INDICATOR,
+};
+
+/* Flash statuses, used by atomisp driver to check before starting
+ * flash and after having started flash. */
+enum atomisp_flash_status {
+ ATOMISP_FLASH_STATUS_OK,
+ ATOMISP_FLASH_STATUS_HW_ERROR,
+ ATOMISP_FLASH_STATUS_INTERRUPTED,
+ ATOMISP_FLASH_STATUS_TIMEOUT,
+};
+
+/* Frame status. This is used to detect corrupted frames and flash
+ * exposed frames. Usually, the first 2 frames coming out of the sensor
+ * are corrupted. When using flash, the frame before and the frame after
+ * the flash exposed frame may be partially exposed by flash. The ISP
+ * statistics for these frames should not be used by the 3A library.
+ * The frame status value can be found in the "reserved" field in the
+ * v4l2_buffer struct. */
+enum atomisp_frame_status {
+ ATOMISP_FRAME_STATUS_OK,
+ ATOMISP_FRAME_STATUS_CORRUPTED,
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED,
+ ATOMISP_FRAME_STATUS_FLASH_PARTIAL,
+ ATOMISP_FRAME_STATUS_FLASH_FAILED,
+};
+
+enum atomisp_acc_type {
+ ATOMISP_ACC_STANDALONE, /* Stand-alone acceleration */
+ ATOMISP_ACC_OUTPUT, /* Accelerator stage on output frame */
+ ATOMISP_ACC_VIEWFINDER /* Accelerator stage on viewfinder frame */
+};
+
+enum atomisp_acc_arg_type {
+ ATOMISP_ACC_ARG_SCALAR_IN, /* Scalar input argument */
+ ATOMISP_ACC_ARG_SCALAR_OUT, /* Scalar output argument */
+ ATOMISP_ACC_ARG_SCALAR_IO, /* Scalar in/output argument */
+ ATOMISP_ACC_ARG_PTR_IN, /* Pointer input argument */
+ ATOMISP_ACC_ARG_PTR_OUT, /* Pointer output argument */
+ ATOMISP_ACC_ARG_PTR_IO, /* Pointer in/output argument */
+ ATOMISP_ARG_PTR_NOFLUSH, /* Pointer argument will not be flushed */
+ ATOMISP_ARG_PTR_STABLE, /* Pointer input argument that is stable */
+ ATOMISP_ACC_ARG_FRAME /* Frame argument */
+};
+
+/** ISP memories, isp2400 */
+enum atomisp_acc_memory {
+ ATOMISP_ACC_MEMORY_PMEM0 = 0,
+ ATOMISP_ACC_MEMORY_DMEM0,
+ /* for backward compatibility */
+ ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0,
+ ATOMISP_ACC_MEMORY_VMEM0,
+ ATOMISP_ACC_MEMORY_VAMEM0,
+ ATOMISP_ACC_MEMORY_VAMEM1,
+ ATOMISP_ACC_MEMORY_VAMEM2,
+ ATOMISP_ACC_MEMORY_HMEM0,
+ ATOMISP_ACC_NR_MEMORY
+};
+
+enum atomisp_ext_isp_id {
+ EXT_ISP_CID_ISO = 0,
+ EXT_ISP_CID_CAPTURE_HDR,
+ EXT_ISP_CID_CAPTURE_LLS,
+ EXT_ISP_CID_FOCUS_MODE,
+ EXT_ISP_CID_FOCUS_EXECUTION,
+ EXT_ISP_CID_TOUCH_POSX,
+ EXT_ISP_CID_TOUCH_POSY,
+ EXT_ISP_CID_CAF_STATUS,
+ EXT_ISP_CID_AF_STATUS,
+ EXT_ISP_CID_GET_AF_MODE,
+ EXT_ISP_CID_CAPTURE_BURST,
+ EXT_ISP_CID_FLASH_MODE,
+ EXT_ISP_CID_ZOOM,
+ EXT_ISP_CID_SHOT_MODE
+};
+
+#define EXT_ISP_FOCUS_MODE_NORMAL 0
+#define EXT_ISP_FOCUS_MODE_MACRO 1
+#define EXT_ISP_FOCUS_MODE_TOUCH_AF 2
+#define EXT_ISP_FOCUS_MODE_PREVIEW_CAF 3
+#define EXT_ISP_FOCUS_MODE_MOVIE_CAF 4
+#define EXT_ISP_FOCUS_MODE_FACE_CAF 5
+#define EXT_ISP_FOCUS_MODE_TOUCH_MACRO 6
+#define EXT_ISP_FOCUS_MODE_TOUCH_CAF 7
+
+#define EXT_ISP_FOCUS_STOP 0
+#define EXT_ISP_FOCUS_SEARCH 1
+#define EXT_ISP_PAN_FOCUSING 2
+
+#define EXT_ISP_CAF_RESTART_CHECK 1
+#define EXT_ISP_CAF_STATUS_FOCUSING 2
+#define EXT_ISP_CAF_STATUS_SUCCESS 3
+#define EXT_ISP_CAF_STATUS_FAIL 4
+
+#define EXT_ISP_AF_STATUS_INVALID 1
+#define EXT_ISP_AF_STATUS_FOCUSING 2
+#define EXT_ISP_AF_STATUS_SUCCESS 3
+#define EXT_ISP_AF_STATUS_FAIL 4
+
+enum atomisp_burst_capture_options {
+ EXT_ISP_BURST_CAPTURE_CTRL_START = 0,
+ EXT_ISP_BURST_CAPTURE_CTRL_STOP
+};
+
+#define EXT_ISP_FLASH_MODE_OFF 0
+#define EXT_ISP_FLASH_MODE_ON 1
+#define EXT_ISP_FLASH_MODE_AUTO 2
+#define EXT_ISP_LED_TORCH_OFF 3
+#define EXT_ISP_LED_TORCH_ON 4
+
+#define EXT_ISP_SHOT_MODE_AUTO 0
+#define EXT_ISP_SHOT_MODE_BEAUTY_FACE 1
+#define EXT_ISP_SHOT_MODE_BEST_PHOTO 2
+#define EXT_ISP_SHOT_MODE_DRAMA 3
+#define EXT_ISP_SHOT_MODE_BEST_FACE 4
+#define EXT_ISP_SHOT_MODE_ERASER 5
+#define EXT_ISP_SHOT_MODE_PANORAMA 6
+#define EXT_ISP_SHOT_MODE_RICH_TONE_HDR 7
+#define EXT_ISP_SHOT_MODE_NIGHT 8
+#define EXT_ISP_SHOT_MODE_SOUND_SHOT 9
+#define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO 10
+#define EXT_ISP_SHOT_MODE_SPORTS 11
+
+struct atomisp_sp_arg {
+ enum atomisp_acc_arg_type type; /* Type of SP argument */
+ void *value; /* Value of SP argument */
+ unsigned int size; /* Size of SP argument */
+};
+
+/* Acceleration API */
+
+/* For CSS 1.0 only */
+struct atomisp_acc_fw_arg {
+ unsigned int fw_handle;
+ unsigned int index;
+ void __user *value;
+ size_t size;
+};
+
+/*
+ * Set arguments after first mapping with ATOMISP_IOC_ACC_S_MAPPED_ARG.
+ */
+struct atomisp_acc_s_mapped_arg {
+ unsigned int fw_handle;
+ __u32 memory; /* one of enum atomisp_acc_memory */
+ size_t length;
+ unsigned long css_ptr;
+};
+
+struct atomisp_acc_fw_abort {
+ unsigned int fw_handle;
+ /* Timeout in us */
+ unsigned int timeout;
+};
+
+struct atomisp_acc_fw_load {
+ unsigned int size;
+ unsigned int fw_handle;
+ void __user *data;
+};
+
+/*
+ * Load firmware to specified pipeline.
+ */
+struct atomisp_acc_fw_load_to_pipe {
+ __u32 flags; /* Flags, see below for valid values */
+ unsigned int fw_handle; /* Handle, filled by kernel. */
+ __u32 size; /* Firmware binary size */
+ void __user *data; /* Pointer to firmware */
+ __u32 type; /* Binary type */
+ __u32 reserved[3]; /* Set to zero */
+};
+/*
+ * Set Senor run mode
+ */
+struct atomisp_s_runmode {
+ __u32 mode;
+};
+
+#define ATOMISP_ACC_FW_LOAD_FL_PREVIEW (1 << 0)
+#define ATOMISP_ACC_FW_LOAD_FL_COPY (1 << 1)
+#define ATOMISP_ACC_FW_LOAD_FL_VIDEO (1 << 2)
+#define ATOMISP_ACC_FW_LOAD_FL_CAPTURE (1 << 3)
+#define ATOMISP_ACC_FW_LOAD_FL_ACC (1 << 4)
+#define ATOMISP_ACC_FW_LOAD_FL_ENABLE (1 << 16)
+
+#define ATOMISP_ACC_FW_LOAD_TYPE_NONE 0 /* Normal binary: don't use */
+#define ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT 1 /* Stage on output */
+#define ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER 2 /* Stage on viewfinder */
+#define ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE 3 /* Stand-alone acceleration */
+
+struct atomisp_acc_map {
+ __u32 flags; /* Flags, see list below */
+ __u32 length; /* Length of data in bytes */
+ void __user *user_ptr; /* Pointer into user space */
+ unsigned long css_ptr; /* Pointer into CSS address space */
+ __u32 reserved[4]; /* Set to zero */
+};
+
+#define ATOMISP_MAP_FLAG_NOFLUSH 0x0001 /* Do not flush cache */
+#define ATOMISP_MAP_FLAG_CACHED 0x0002 /* Enable cache */
+
+struct atomisp_acc_state {
+ __u32 flags; /* Flags, see list below */
+#define ATOMISP_STATE_FLAG_ENABLE ATOMISP_ACC_FW_LOAD_FL_ENABLE
+ unsigned int fw_handle;
+};
+
+struct atomisp_update_exposure {
+ unsigned int gain;
+ unsigned int digi_gain;
+ unsigned int update_gain;
+ unsigned int update_digi_gain;
+};
+
+/*
+ * V4L2 private internal data interface.
+ * -----------------------------------------------------------------------------
+ * struct v4l2_private_int_data - request private data stored in video device
+ * internal memory.
+ * @size: sanity check to ensure userspace's buffer fits whole private data.
+ * If not, kernel will make partial copy (or nothing if @size == 0).
+ * @size is always corrected for the minimum necessary if IOCTL returns
+ * no error.
+ * @data: pointer to userspace buffer.
+ */
+struct v4l2_private_int_data {
+ __u32 size;
+ void __user *data;
+ __u32 reserved[2];
+};
+
+enum atomisp_sensor_ae_bracketing_mode {
+ SENSOR_AE_BRACKETING_MODE_OFF = 0,
+ SENSOR_AE_BRACKETING_MODE_SINGLE, /* back to SW standby after bracketing */
+ SENSOR_AE_BRACKETING_MODE_SINGLE_TO_STREAMING, /* back to normal streaming after bracketing */
+ SENSOR_AE_BRACKETING_MODE_LOOP, /* continue AE bracketing in loop mode */
+};
+
+struct atomisp_sensor_ae_bracketing_info {
+ unsigned int modes; /* bit mask to indicate supported modes */
+ unsigned int lut_depth;
+};
+
+struct atomisp_sensor_ae_bracketing_lut_entry {
+ __u16 coarse_integration_time;
+ __u16 analog_gain;
+ __u16 digital_gain;
+};
+
+struct atomisp_sensor_ae_bracketing_lut {
+ struct atomisp_sensor_ae_bracketing_lut_entry *lut;
+ unsigned int lut_size;
+};
+
+/*Private IOCTLs for ISP */
+#define ATOMISP_IOC_G_XNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_S_XNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 0, int)
+#define ATOMISP_IOC_G_NR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_S_NR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 1, struct atomisp_nr_config)
+#define ATOMISP_IOC_G_TNR \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_S_TNR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 2, struct atomisp_tnr_config)
+#define ATOMISP_IOC_G_HISTOGRAM \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_S_HISTOGRAM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram)
+#define ATOMISP_IOC_G_BLACK_LEVEL_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_S_BLACK_LEVEL_COMP \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 4, struct atomisp_ob_config)
+#define ATOMISP_IOC_G_EE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+#define ATOMISP_IOC_S_EE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 5, struct atomisp_ee_config)
+/* Digital Image Stabilization:
+ * 1. get dis statistics: reads DIS statistics from ISP (every frame)
+ * 2. set dis coefficients: set DIS filter coefficients (one time)
+ * 3. set dis motion vecotr: set motion vector (result of DIS, every frame)
+ */
+#define ATOMISP_IOC_G_DIS_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics)
+
+#define ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs2_bq_resolutions)
+
+#define ATOMISP_IOC_S_DIS_COEFS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients)
+
+#define ATOMISP_IOC_S_DIS_VECTOR \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config)
+
+#define ATOMISP_IOC_G_3A_STAT \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics)
+#define ATOMISP_IOC_G_ISP_PARM \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_S_ISP_PARM \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 8, struct atomisp_parm)
+#define ATOMISP_IOC_G_ISP_GAMMA \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_S_ISP_GAMMA \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 9, struct atomisp_gamma_table)
+#define ATOMISP_IOC_G_ISP_GDC_TAB \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_S_ISP_GDC_TAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table)
+#define ATOMISP_IOC_ISP_MAKERNOTE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 11, struct atomisp_makernote_info)
+
+/* macc parameter control*/
+#define ATOMISP_IOC_G_ISP_MACC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+#define ATOMISP_IOC_S_ISP_MACC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 12, struct atomisp_macc_config)
+
+/* Defect pixel detection & Correction */
+#define ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+#define ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 13, struct atomisp_dp_config)
+
+/* False Color Correction */
+#define ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+#define ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 14, struct atomisp_de_config)
+
+/* ctc parameter control */
+#define ATOMISP_IOC_G_ISP_CTC \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+#define ATOMISP_IOC_S_ISP_CTC \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 15, struct atomisp_ctc_table)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_ISP_WHITE_BALANCE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+#define ATOMISP_IOC_S_ISP_WHITE_BALANCE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 16, struct atomisp_wb_config)
+
+/* fpn table loading */
+#define ATOMISP_IOC_S_ISP_FPN_TABLE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer)
+
+/* overlay image loading */
+#define ATOMISP_IOC_G_ISP_OVERLAY \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+#define ATOMISP_IOC_S_ISP_OVERLAY \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay)
+
+/* bcd driver bridge */
+#define ATOMISP_IOC_CAMERA_BRIDGE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 19, struct atomisp_bc_video_package)
+
+/* Sensor resolution specific info for AE */
+#define ATOMISP_IOC_G_SENSOR_MODE_DATA \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 20, struct atomisp_sensor_mode_data)
+
+#define ATOMISP_IOC_S_EXPOSURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 21, struct atomisp_exposure)
+
+/* sensor calibration registers group */
+#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group)
+
+/* white balance Correction */
+#define ATOMISP_IOC_G_3A_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+#define ATOMISP_IOC_S_3A_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 23, struct atomisp_3a_config)
+
+/* Accelerate ioctls */
+#define ATOMISP_IOC_ACC_LOAD \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load)
+
+#define ATOMISP_IOC_ACC_UNLOAD \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+/* For CSS 1.0 only */
+#define ATOMISP_IOC_ACC_S_ARG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg)
+
+#define ATOMISP_IOC_ACC_START \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, unsigned int)
+
+#define ATOMISP_IOC_ACC_WAIT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, unsigned int)
+
+#define ATOMISP_IOC_ACC_ABORT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_abort)
+
+#define ATOMISP_IOC_ACC_DESTAB \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg)
+
+/* sensor OTP memory read */
+#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data)
+
+/* LCS (shading) table write */
+#define ATOMISP_IOC_S_ISP_SHD_TAB \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table)
+
+/* Gamma Correction */
+#define ATOMISP_IOC_G_ISP_GAMMA_CORRECTION \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+#define ATOMISP_IOC_S_ISP_GAMMA_CORRECTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 28, struct atomisp_gc_config)
+
+/* motor internal memory read */
+#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data)
+
+/*
+ * Ioctls to map and unmap user buffers to CSS address space for acceleration.
+ * User fills fields length and user_ptr and sets other fields to zero,
+ * kernel may modify the flags and sets css_ptr.
+ */
+#define ATOMISP_IOC_ACC_MAP \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+/* User fills fields length, user_ptr, and css_ptr and zeroes other fields. */
+#define ATOMISP_IOC_ACC_UNMAP \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map)
+
+#define ATOMISP_IOC_ACC_S_MAPPED_ARG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg)
+
+#define ATOMISP_IOC_ACC_LOAD_TO_PIPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe)
+
+#define ATOMISP_IOC_S_PARAMETERS \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters)
+
+#define ATOMISP_IOC_S_CONT_CAPTURE_CONFIG \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 33, struct atomisp_cont_capture_conf)
+
+#define ATOMISP_IOC_G_METADATA \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata)
+
+#define ATOMISP_IOC_G_METADATA_BY_TYPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type)
+
+#define ATOMISP_IOC_EXT_ISP_CTRL \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 35, struct atomisp_ext_isp_ctrl)
+
+#define ATOMISP_IOC_EXP_ID_UNLOCK \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 36, int)
+
+#define ATOMISP_IOC_EXP_ID_CAPTURE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 37, int)
+
+#define ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 38, unsigned int)
+
+#define ATOMISP_IOC_G_FORMATS_CONFIG \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_S_FORMATS_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 39, struct atomisp_formats_config)
+
+#define ATOMISP_IOC_S_EXPOSURE_WINDOW \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 40, struct atomisp_ae_window)
+
+#define ATOMISP_IOC_S_ACC_STATE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
+
+#define ATOMISP_IOC_G_ACC_STATE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 41, struct atomisp_acc_state)
+
+#define ATOMISP_IOC_INJECT_A_FAKE_EVENT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 42, int)
+
+#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_info)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
+
+#define ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 43, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut)
+
+#define ATOMISP_IOC_G_INVALID_FRAME_NUM \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 44, unsigned int)
+
+#define ATOMISP_IOC_S_ARRAY_RESOLUTION \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 45, struct atomisp_resolution)
+
+/* for depth mode sensor frame sync compensation */
+#define ATOMISP_IOC_G_DEPTH_SYNC_COMP \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 46, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_EE_CONFIG \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int)
+
+#define ATOMISP_IOC_S_SENSOR_RUNMODE \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode)
+
+#define ATOMISP_IOC_G_UPDATE_EXPOSURE \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 49, struct atomisp_update_exposure)
+
+/*
+ * Reserved ioctls. We have customer implementing it internally.
+ * We can't use both numbers to not cause ABI conflict.
+ * Anyway, those ioctls are hacks and not implemented by us:
+ *
+ * #define ATOMISP_IOC_G_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 55, struct atomisp_sensor_regs)
+ * #define ATOMISP_IOC_S_SENSOR_REG \
+ * _IOW('v', BASE_VIDIOC_PRIVATE + 56, struct atomisp_sensor_regs)
+ */
+
+/* ISP Private control IDs */
+#define V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION \
+ (V4L2_CID_PRIVATE_BASE + 0)
+#define V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC \
+ (V4L2_CID_PRIVATE_BASE + 1)
+#define V4L2_CID_ATOMISP_VIDEO_STABLIZATION \
+ (V4L2_CID_PRIVATE_BASE + 2)
+#define V4L2_CID_ATOMISP_FIXED_PATTERN_NR \
+ (V4L2_CID_PRIVATE_BASE + 3)
+#define V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION \
+ (V4L2_CID_PRIVATE_BASE + 4)
+#define V4L2_CID_ATOMISP_LOW_LIGHT \
+ (V4L2_CID_PRIVATE_BASE + 5)
+
+/* Camera class:
+ * Exposure, Flash and privacy (indicator) light controls, to be upstreamed */
+#define V4L2_CID_CAMERA_LASTP1 (V4L2_CID_CAMERA_CLASS_BASE + 1024)
+
+#define V4L2_CID_FOCAL_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 0)
+#define V4L2_CID_FNUMBER_ABSOLUTE (V4L2_CID_CAMERA_LASTP1 + 1)
+#define V4L2_CID_FNUMBER_RANGE (V4L2_CID_CAMERA_LASTP1 + 2)
+
+/* Flash related CIDs, see also:
+ * http://linuxtv.org/downloads/v4l-dvb-apis/extended-controls.html\
+ * #flash-controls */
+
+/* Request a number of flash-exposed frames. The frame status can be
+ * found in the reserved field in the v4l2_buffer struct. */
+#define V4L2_CID_REQUEST_FLASH (V4L2_CID_CAMERA_LASTP1 + 3)
+/* Query flash driver status. See enum atomisp_flash_status above. */
+#define V4L2_CID_FLASH_STATUS (V4L2_CID_CAMERA_LASTP1 + 5)
+/* Set the flash mode (see enum atomisp_flash_mode) */
+#define V4L2_CID_FLASH_MODE (V4L2_CID_CAMERA_LASTP1 + 10)
+
+/* VCM slew control */
+#define V4L2_CID_VCM_SLEW (V4L2_CID_CAMERA_LASTP1 + 11)
+/* VCM step time */
+#define V4L2_CID_VCM_TIMEING (V4L2_CID_CAMERA_LASTP1 + 12)
+
+/* Query Focus Status */
+#define V4L2_CID_FOCUS_STATUS (V4L2_CID_CAMERA_LASTP1 + 14)
+
+/* Query sensor's binning factor */
+#define V4L2_CID_BIN_FACTOR_HORZ (V4L2_CID_CAMERA_LASTP1 + 15)
+#define V4L2_CID_BIN_FACTOR_VERT (V4L2_CID_CAMERA_LASTP1 + 16)
+
+/* number of frames to skip at stream start */
+#define V4L2_CID_G_SKIP_FRAMES (V4L2_CID_CAMERA_LASTP1 + 17)
+
+/* Query sensor's 2A status */
+#define V4L2_CID_2A_STATUS (V4L2_CID_CAMERA_LASTP1 + 18)
+#define V4L2_2A_STATUS_AE_READY (1 << 0)
+#define V4L2_2A_STATUS_AWB_READY (1 << 1)
+
+#define V4L2_CID_FMT_AUTO (V4L2_CID_CAMERA_LASTP1 + 19)
+
+#define V4L2_CID_RUN_MODE (V4L2_CID_CAMERA_LASTP1 + 20)
+#define ATOMISP_RUN_MODE_VIDEO 1
+#define ATOMISP_RUN_MODE_STILL_CAPTURE 2
+#define ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE 3
+#define ATOMISP_RUN_MODE_PREVIEW 4
+#define ATOMISP_RUN_MODE_SDV 5
+
+#define V4L2_CID_ENABLE_VFPP (V4L2_CID_CAMERA_LASTP1 + 21)
+#define V4L2_CID_ATOMISP_CONTINUOUS_MODE (V4L2_CID_CAMERA_LASTP1 + 22)
+#define V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE \
+ (V4L2_CID_CAMERA_LASTP1 + 23)
+#define V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER \
+ (V4L2_CID_CAMERA_LASTP1 + 24)
+
+#define V4L2_CID_VFPP (V4L2_CID_CAMERA_LASTP1 + 25)
+#define ATOMISP_VFPP_ENABLE 0
+#define ATOMISP_VFPP_DISABLE_SCALER 1
+#define ATOMISP_VFPP_DISABLE_LOWLAT 2
+
+/* Query real flash status register value */
+#define V4L2_CID_FLASH_STATUS_REGISTER (V4L2_CID_CAMERA_LASTP1 + 26)
+
+#define V4L2_CID_START_ZSL_CAPTURE (V4L2_CID_CAMERA_LASTP1 + 28)
+/* Lock and unlock raw buffer */
+#define V4L2_CID_ENABLE_RAW_BUFFER_LOCK (V4L2_CID_CAMERA_LASTP1 + 29)
+
+#define V4L2_CID_DEPTH_MODE (V4L2_CID_CAMERA_LASTP1 + 30)
+
+#define V4L2_CID_EXPOSURE_ZONE_NUM (V4L2_CID_CAMERA_LASTP1 + 31)
+/* Disable digital zoom */
+#define V4L2_CID_DISABLE_DZ (V4L2_CID_CAMERA_LASTP1 + 32)
+
+#define V4L2_CID_TEST_PATTERN_COLOR_R (V4L2_CID_CAMERA_LASTP1 + 33)
+#define V4L2_CID_TEST_PATTERN_COLOR_GR (V4L2_CID_CAMERA_LASTP1 + 34)
+#define V4L2_CID_TEST_PATTERN_COLOR_GB (V4L2_CID_CAMERA_LASTP1 + 35)
+#define V4L2_CID_TEST_PATTERN_COLOR_B (V4L2_CID_CAMERA_LASTP1 + 36)
+
+#define V4L2_CID_ATOMISP_SELECT_ISP_VERSION (V4L2_CID_CAMERA_LASTP1 + 38)
+
+#define V4L2_BUF_FLAG_BUFFER_INVALID 0x0400
+#define V4L2_BUF_FLAG_BUFFER_VALID 0x0800
+
+#define V4L2_BUF_TYPE_VIDEO_CAPTURE_ION (V4L2_BUF_TYPE_PRIVATE + 1024)
+
+#define V4L2_EVENT_ATOMISP_3A_STATS_READY (V4L2_EVENT_PRIVATE_START + 1)
+#define V4L2_EVENT_ATOMISP_METADATA_READY (V4L2_EVENT_PRIVATE_START + 2)
+#define V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE (V4L2_EVENT_PRIVATE_START + 3)
+#define V4L2_EVENT_ATOMISP_ACC_COMPLETE (V4L2_EVENT_PRIVATE_START + 4)
+#define V4L2_EVENT_ATOMISP_PAUSE_BUFFER (V4L2_EVENT_PRIVATE_START + 5)
+#define V4L2_EVENT_ATOMISP_CSS_RESET (V4L2_EVENT_PRIVATE_START + 6)
+/* Nonstandard color effects for V4L2_CID_COLORFX */
+enum {
+ V4L2_COLORFX_SKIN_WHITEN_LOW = 1001,
+ V4L2_COLORFX_SKIN_WHITEN_HIGH = 1002,
+ V4L2_COLORFX_WARM = 1003,
+ V4L2_COLORFX_COLD = 1004,
+ V4L2_COLORFX_WASHED = 1005,
+ V4L2_COLORFX_RED = 1006,
+ V4L2_COLORFX_GREEN = 1007,
+ V4L2_COLORFX_BLUE = 1008,
+ V4L2_COLORFX_PINK = 1009,
+ V4L2_COLORFX_YELLOW = 1010,
+ V4L2_COLORFX_PURPLE = 1011,
+};
+
+#endif /* _ATOM_ISP_H */
+#endif /* CSS15*/
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
new file mode 100644
index 000000000000..5390b97ac6e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel MID SoC Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef ATOMISP_GMIN_PLATFORM_H_
+#define ATOMISP_GMIN_PLATFORM_H_
+
+#include "atomisp_platform.h"
+
+const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
+const struct atomisp_platform_data *atomisp_get_platform_data(void);
+const struct camera_af_platform_data *camera_get_af_platform_data(void);
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data,
+ enum intel_v4l2_subdev_type type);
+struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
+ struct i2c_board_info *board_info);
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd);
+int gmin_get_config_var(struct device *dev, const char *var, char *out, size_t *out_len);
+int gmin_get_var_int(struct device *dev, const char *var, int def);
+int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
+ u32 lanes, u32 format, u32 bayer_order, int flag);
+struct camera_sensor_platform_data *gmin_camera_platform_data(
+ struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer);
+
+int atomisp_gmin_register_vcm_control(struct camera_vcm_control *);
+
+#endif
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
new file mode 100644
index 000000000000..dbac2b777dad
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -0,0 +1,262 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef ATOMISP_PLATFORM_H_
+#define ATOMISP_PLATFORM_H_
+
+#include <linux/i2c.h>
+#include <linux/sfi.h>
+#include <media/v4l2-subdev.h>
+#include "atomisp.h"
+
+#define MAX_SENSORS_PER_PORT 4
+#define MAX_STREAMS_PER_CHANNEL 2
+
+#define CAMERA_MODULE_ID_LEN 64
+
+enum atomisp_bayer_order {
+ atomisp_bayer_order_grbg,
+ atomisp_bayer_order_rggb,
+ atomisp_bayer_order_bggr,
+ atomisp_bayer_order_gbrg
+};
+
+enum atomisp_input_stream_id {
+ ATOMISP_INPUT_STREAM_GENERAL = 0,
+ ATOMISP_INPUT_STREAM_CAPTURE = 0,
+ ATOMISP_INPUT_STREAM_POSTVIEW,
+ ATOMISP_INPUT_STREAM_PREVIEW,
+ ATOMISP_INPUT_STREAM_VIDEO,
+ ATOMISP_INPUT_STREAM_NUM
+};
+
+enum atomisp_input_format {
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY,/* 8 bits per subpixel (legacy) */
+ ATOMISP_INPUT_FORMAT_YUV420_8, /* 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_10,/* 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV420_16,/* 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_8, /* UYVY..UVYV, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_10,/* UYVY..UVYV, 10 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_YUV422_16,/* UYVY..UVYV, 16 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_444, /* BGR..BGR, 4 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_555, /* BGR..BGR, 5 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_565, /* BGR..BGR, 5 bits B and R, 6 bits G */
+ ATOMISP_INPUT_FORMAT_RGB_666, /* BGR..BGR, 6 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RGB_888, /* BGR..BGR, 8 bits per subpixel */
+ ATOMISP_INPUT_FORMAT_RAW_6, /* RAW data, 6 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_7, /* RAW data, 7 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_8, /* RAW data, 8 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_10, /* RAW data, 10 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_12, /* RAW data, 12 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_14, /* RAW data, 14 bits per pixel */
+ ATOMISP_INPUT_FORMAT_RAW_16, /* RAW data, 16 bits per pixel */
+ ATOMISP_INPUT_FORMAT_BINARY_8, /* Binary byte stream. */
+
+ /* CSI2-MIPI specific format: Generic short packet data. It is used to
+ * keep the timing information for the opening/closing of shutters,
+ * triggering of flashes and etc.
+ */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT1, /* Generic Short Packet Code 1 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT2, /* Generic Short Packet Code 2 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT3, /* Generic Short Packet Code 3 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT4, /* Generic Short Packet Code 4 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT5, /* Generic Short Packet Code 5 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT6, /* Generic Short Packet Code 6 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT7, /* Generic Short Packet Code 7 */
+ ATOMISP_INPUT_FORMAT_GENERIC_SHORT8, /* Generic Short Packet Code 8 */
+
+ /* CSI2-MIPI specific format: YUV data.
+ */
+ ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+ ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT, /* YUV420 8-bit (Chroma Shifted
+ Pixel Sampling) */
+
+ /* CSI2-MIPI specific format: Generic long packet data
+ */
+ ATOMISP_INPUT_FORMAT_EMBEDDED, /* Embedded 8-bit non Image Data */
+
+ /* CSI2-MIPI specific format: User defined byte-based data. For example,
+ * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
+ * the User Defined Data Type 4 and the MPEG data as the
+ * User Defined Data Type 7.
+ */
+ ATOMISP_INPUT_FORMAT_USER_DEF1, /* User defined 8-bit data type 1 */
+ ATOMISP_INPUT_FORMAT_USER_DEF2, /* User defined 8-bit data type 2 */
+ ATOMISP_INPUT_FORMAT_USER_DEF3, /* User defined 8-bit data type 3 */
+ ATOMISP_INPUT_FORMAT_USER_DEF4, /* User defined 8-bit data type 4 */
+ ATOMISP_INPUT_FORMAT_USER_DEF5, /* User defined 8-bit data type 5 */
+ ATOMISP_INPUT_FORMAT_USER_DEF6, /* User defined 8-bit data type 6 */
+ ATOMISP_INPUT_FORMAT_USER_DEF7, /* User defined 8-bit data type 7 */
+ ATOMISP_INPUT_FORMAT_USER_DEF8, /* User defined 8-bit data type 8 */
+};
+
+enum intel_v4l2_subdev_type {
+ RAW_CAMERA = 1,
+ SOC_CAMERA = 2,
+ CAMERA_MOTOR = 3,
+ LED_FLASH = 4,
+ XENON_FLASH = 5,
+ FILE_INPUT = 6,
+ TEST_PATTERN = 7,
+};
+
+struct intel_v4l2_subdev_id {
+ char name[17];
+ enum intel_v4l2_subdev_type type;
+ enum atomisp_camera_port port;
+};
+
+struct intel_v4l2_subdev_i2c_board_info {
+ struct i2c_board_info board_info;
+ int i2c_adapter_id;
+};
+
+struct intel_v4l2_subdev_table {
+ struct intel_v4l2_subdev_i2c_board_info v4l2_subdev;
+ enum intel_v4l2_subdev_type type;
+ enum atomisp_camera_port port;
+ struct v4l2_subdev *subdev;
+};
+
+struct atomisp_platform_data {
+ struct intel_v4l2_subdev_table *subdevs;
+};
+
+/* Describe the capacities of one single sensor. */
+struct atomisp_sensor_caps {
+ /* The number of streams this sensor can output. */
+ int stream_num;
+ bool is_slave;
+};
+
+/* Describe the capacities of sensors connected to one camera port. */
+struct atomisp_camera_caps {
+ /* The number of sensors connected to this camera port. */
+ int sensor_num;
+ /* The capacities of each sensor. */
+ struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT];
+ /* Define whether stream control is required for multiple streams. */
+ bool multi_stream_ctrl;
+};
+
+/*
+ * Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_isys_config_info {
+ u8 input_format;
+ u16 width;
+ u16 height;
+};
+
+struct atomisp_input_stream_info {
+ enum atomisp_input_stream_id stream;
+ u8 enable;
+ /* Sensor driver fills ch_id with the id
+ of the virtual channel. */
+ u8 ch_id;
+ /* Tells how many streams in this virtual channel. If 0 ignore rest
+ * and the input format will be from mipi_info */
+ u8 isys_configs;
+ /*
+ * if more isys_configs is more than 0, sensor needs to configure the
+ * input format differently. width and height can be 0. If width and
+ * height is not zero, then the corresponsing data needs to be set
+ */
+ struct atomisp_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct camera_vcm_control;
+struct camera_vcm_ops {
+ int (*power_up)(struct v4l2_subdev *sd, struct camera_vcm_control *vcm);
+ int (*power_down)(struct v4l2_subdev *sd,
+ struct camera_vcm_control *vcm);
+ int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc,
+ struct camera_vcm_control *vcm);
+ int (*g_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl,
+ struct camera_vcm_control *vcm);
+ int (*s_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl,
+ struct camera_vcm_control *vcm);
+};
+
+struct camera_vcm_control {
+ char camera_module[CAMERA_MODULE_ID_LEN];
+ struct camera_vcm_ops *ops;
+ struct list_head list;
+};
+
+struct camera_sensor_platform_data {
+ int (*gpio_ctrl)(struct v4l2_subdev *subdev, int flag);
+ int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
+ int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
+ int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
+ bool (*low_fps)(void);
+ int (*platform_init)(struct i2c_client *);
+ int (*platform_deinit)(void);
+ char *(*msr_file_name)(void);
+ struct atomisp_camera_caps *(*get_camera_caps)(void);
+ int (*gpio_intr_ctrl)(struct v4l2_subdev *subdev);
+
+ /* New G-Min power and GPIO interface, replaces
+ * power/gpio_ctrl with methods to control individual
+ * lines as implemented on all known camera modules. */
+ int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v2p8_ctrl)(struct v4l2_subdev *subdev, int on);
+ int (*v1p2_ctrl)(struct v4l2_subdev *subdev, int on);
+ struct camera_vcm_control * (*get_vcm_ctrl)(struct v4l2_subdev *subdev,
+ char *module_id);
+};
+
+struct camera_af_platform_data {
+ int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
+};
+
+const struct camera_af_platform_data *camera_get_af_platform_data(void);
+
+struct camera_mipi_info {
+ enum atomisp_camera_port port;
+ unsigned int num_lanes;
+ enum atomisp_input_format input_format;
+ enum atomisp_bayer_order raw_bayer_order;
+ struct atomisp_sensor_mode_data data;
+ enum atomisp_input_format metadata_format;
+ uint32_t metadata_width;
+ uint32_t metadata_height;
+ const uint32_t *metadata_effective_width;
+};
+
+extern const struct atomisp_platform_data *atomisp_get_platform_data(void);
+extern const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
+
+/* API from old platform_camera.h, new CPUID implementation */
+#define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \
+ boot_cpu_data.x86 == 6 && \
+ boot_cpu_data.x86_model == x)
+
+#define IS_MFLD __IS_SOC(0x27)
+#define IS_BYT __IS_SOC(0x37)
+#define IS_CHT __IS_SOC(0x4C)
+#define IS_MOFD __IS_SOC(0x5A)
+
+#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
new file mode 100644
index 000000000000..589f4eae38ca
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __LIBMSRLISTHELPER_H__
+#define __LIBMSRLISTHELPER_H__
+
+struct i2c_client;
+struct firmware;
+
+extern int load_msr_list(struct i2c_client *client, char *path,
+ const struct firmware **fw);
+extern int apply_msr_data(struct i2c_client *client, const struct firmware *fw);
+extern void release_msr_list(struct i2c_client *client,
+ const struct firmware *fw);
+
+
+#endif /* ifndef __LIBMSRLISTHELPER_H__ */
diff --git a/drivers/staging/media/atomisp/include/linux/vlv2_plat_clock.h b/drivers/staging/media/atomisp/include/linux/vlv2_plat_clock.h
new file mode 100644
index 000000000000..ed709bdd6498
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/linux/vlv2_plat_clock.h
@@ -0,0 +1,30 @@
+/*
+ * vlv2_plat_clock.h
+ *
+ * Copyright (C) 2013 Intel Corp
+ * Author: Asutosh Pathak <asutosh.pathak@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __VLV2_PLAT_CLOCK_H
+#define __VLV2_PLAT_CLOCK_H
+
+int vlv2_plat_set_clock_freq(int clock_num, int freq_type);
+int vlv2_plat_get_clock_freq(int clock_num);
+
+int vlv2_plat_configure_clock(int clock_num, u32 conf);
+int vlv2_plat_get_clock_status(int clock_num);
+
+#endif /* __VLV2_PLAT_CLOCK_H */
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
new file mode 100644
index 000000000000..7d6a8c05dd52
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -0,0 +1,136 @@
+/*
+ * include/media/lm3554.h
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef _LM3554_H_
+#define _LM3554_H_
+
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+
+#define LM3554_NAME "lm3554"
+#define LM3554_ID 3554
+
+#define v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step, \
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .name = _name, \
+ .minimum = (_minimum), \
+ .maximum = (_maximum), \
+ .step = (_step), \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+#define v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_BOOLEAN, \
+ .name = _name, \
+ .minimum = 0, \
+ .maximum = 1, \
+ .step = 1, \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+
+#define s_ctrl_id_entry_integer(_id, _name, \
+ _minimum, _maximum, _step, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+#define s_ctrl_id_entry_boolean(_id, _name, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+/* Value settings for Flash Time-out Duration*/
+#define LM3554_DEFAULT_TIMEOUT 512U
+#define LM3554_MIN_TIMEOUT 32U
+#define LM3554_MAX_TIMEOUT 1024U
+#define LM3554_TIMEOUT_STEPSIZE 32U
+
+/* Flash modes */
+#define LM3554_MODE_SHUTDOWN 0
+#define LM3554_MODE_INDICATOR 1
+#define LM3554_MODE_TORCH 2
+#define LM3554_MODE_FLASH 3
+
+/* timer delay time */
+#define LM3554_TIMER_DELAY 5
+
+/* Percentage <-> value macros */
+#define LM3554_MIN_PERCENT 0U
+#define LM3554_MAX_PERCENT 100U
+#define LM3554_CLAMP_PERCENTAGE(val) \
+ clamp(val, LM3554_MIN_PERCENT, LM3554_MAX_PERCENT)
+
+#define LM3554_VALUE_TO_PERCENT(v, step) (((((unsigned long)(v))*(step))+50)/100)
+#define LM3554_PERCENT_TO_VALUE(p, step) (((((unsigned long)(p))*100)+(step>>1))/(step))
+
+/* Product specific limits
+ * TODO: get these from platform data */
+#define LM3554_FLASH_MAX_LVL 0x0F /* 1191mA */
+
+/* Flash brightness, input is percentage, output is [0..15] */
+#define LM3554_FLASH_STEP \
+ ((100ul*(LM3554_MAX_PERCENT)+((LM3554_FLASH_MAX_LVL)>>1))/((LM3554_FLASH_MAX_LVL)))
+#define LM3554_FLASH_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(13, LM3554_FLASH_STEP)
+
+/* Torch brightness, input is percentage, output is [0..7] */
+#define LM3554_TORCH_STEP 1250
+#define LM3554_TORCH_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(2, LM3554_TORCH_STEP)
+
+/* Indicator brightness, input is percentage, output is [0..3] */
+#define LM3554_INDICATOR_STEP 2500
+#define LM3554_INDICATOR_DEFAULT_BRIGHTNESS \
+ LM3554_VALUE_TO_PERCENT(1, LM3554_INDICATOR_STEP)
+
+/*
+ * lm3554_platform_data - Flash controller platform data
+ */
+struct lm3554_platform_data {
+ int gpio_torch;
+ int gpio_strobe;
+ int gpio_reset;
+
+ unsigned int current_limit;
+ unsigned int envm_tx2;
+ unsigned int tx2_polarity;
+};
+
+#endif /* _LM3554_H_ */
+
diff --git a/drivers/staging/media/atomisp/include/media/lm3642.h b/drivers/staging/media/atomisp/include/media/lm3642.h
new file mode 100644
index 000000000000..545d95763335
--- /dev/null
+++ b/drivers/staging/media/atomisp/include/media/lm3642.h
@@ -0,0 +1,153 @@
+/*
+ * include/media/lm3642.h
+ *
+ * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#ifndef _LM3642_H_
+#define _LM3642_H_
+
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+
+#define LM3642_NAME "lm3642"
+#define LM3642_ID 3642
+
+#define v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step, \
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_INTEGER, \
+ .name = _name, \
+ .minimum = (_minimum), \
+ .maximum = (_maximum), \
+ .step = (_step), \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+#define v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags) \
+ {\
+ .id = (_id), \
+ .type = V4L2_CTRL_TYPE_BOOLEAN, \
+ .name = _name, \
+ .minimum = 0, \
+ .maximum = 1, \
+ .step = 1, \
+ .default_value = (_default_value),\
+ .flags = (_flags),\
+ }
+
+#define s_ctrl_id_entry_integer(_id, _name, \
+ _minimum, _maximum, _step, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_integer(_id, _name,\
+ _minimum, _maximum, _step,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+#define s_ctrl_id_entry_boolean(_id, _name, \
+ _default_value, _flags, \
+ _s_ctrl, _g_ctrl) \
+ {\
+ .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
+ _default_value, _flags), \
+ .s_ctrl = _s_ctrl, \
+ .g_ctrl = _g_ctrl, \
+ }
+
+
+/* Default Values */
+#define LM3642_DEFAULT_TIMEOUT 300U
+#define LM3642_DEFAULT_RAMP_TIME 0x10 /* 1.024ms */
+#define LM3642_DEFAULT_INDICATOR_CURRENT 0x01 /* 1.88A */
+#define LM3642_DEFAULT_FLASH_CURRENT 0x0f /* 1500mA */
+
+/* Value settings for Flash Time-out Duration*/
+#define LM3642_MIN_TIMEOUT 100U
+#define LM3642_MAX_TIMEOUT 800U
+#define LM3642_TIMEOUT_STEPSIZE 100U
+
+/* Flash modes */
+#define LM3642_MODE_SHUTDOWN 0
+#define LM3642_MODE_INDICATOR 1
+#define LM3642_MODE_TORCH 2
+#define LM3642_MODE_FLASH 3
+
+/* timer delay time */
+#define LM3642_TIMER_DELAY 5
+
+/* Percentage <-> value macros */
+#define LM3642_MIN_PERCENT 0U
+#define LM3642_MAX_PERCENT 100U
+#define LM3642_CLAMP_PERCENTAGE(val) \
+ clamp(val, LM3642_MIN_PERCENT, LM3642_MAX_PERCENT)
+
+#define LM3642_VALUE_TO_PERCENT(v, step) \
+ (((((unsigned long)((v)+1))*(step))+50)/100)
+#define LM3642_PERCENT_TO_VALUE(p, step) \
+ (((((unsigned long)(p))*100)+((step)>>1))/(step)-1)
+
+/* Product specific limits
+ * TODO: get these from platform data */
+#define LM3642_FLASH_MAX_LVL 0x0F /* 1500mA */
+#define LM3642_TORCH_MAX_LVL 0x07 /* 187mA */
+#define LM3642_INDICATOR_MAX_LVL 0x01 /* 1.88A */
+
+/* Flash brightness, input is percentage, output is [0..15] */
+#define LM3642_FLASH_STEP \
+ ((100ul*(LM3642_MAX_PERCENT) \
+ +((LM3642_FLASH_MAX_LVL+1)>>1)) \
+ /((LM3642_FLASH_MAX_LVL+1)))
+#define LM3642_FLASH_DEFAULT_BRIGHTNESS \
+ LM3642_VALUE_TO_PERCENT(15, LM3642_FLASH_STEP)
+
+/* Torch brightness, input is percentage, output is [0..7] */
+#define LM3642_TORCH_STEP \
+ ((100ul*(LM3642_MAX_PERCENT) \
+ +((LM3642_TORCH_MAX_LVL+1)>>1)) \
+ /((LM3642_TORCH_MAX_LVL+1)))
+#define LM3642_TORCH_DEFAULT_BRIGHTNESS \
+ LM3642_VALUE_TO_PERCENT(0, LM3642_TORCH_STEP)
+
+/* Indicator brightness, input is percentage, output is [0..1] */
+#define LM3642_INDICATOR_STEP \
+ ((100ul*(LM3642_MAX_PERCENT) \
+ +((LM3642_INDICATOR_MAX_LVL+1)>>1)) \
+ /((LM3642_INDICATOR_MAX_LVL+1)))
+#define LM3642_INDICATOR_DEFAULT_BRIGHTNESS \
+ LM3642_VALUE_TO_PERCENT(1, LM3642_INDICATOR_STEP)
+
+/*
+ * lm3642_platform_data - Flash controller platform data
+ */
+struct lm3642_platform_data {
+ int gpio_torch;
+ int gpio_strobe;
+ int (*power_ctrl)(struct v4l2_subdev *subdev, int on);
+
+ unsigned int torch_en;
+ unsigned int flash_en;
+ unsigned int tx_en;
+ unsigned int ivfm_en;
+};
+
+#endif /* _LM3642_H_ */
+
diff --git a/drivers/staging/media/atomisp/pci/Kconfig b/drivers/staging/media/atomisp/pci/Kconfig
new file mode 100644
index 000000000000..a72421431c7a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/Kconfig
@@ -0,0 +1,13 @@
+#
+# Kconfig for ISP driver
+#
+
+config VIDEO_ATOMISP
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
diff --git a/drivers/staging/media/atomisp/pci/Makefile b/drivers/staging/media/atomisp/pci/Makefile
new file mode 100644
index 000000000000..61ad1fbb1ee6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for ISP driver
+#
+
+obj-$(CONFIG_VIDEO_ATOMISP) += atomisp2/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
new file mode 100644
index 000000000000..3fa7c1c1479f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -0,0 +1,355 @@
+atomisp-objs += \
+ atomisp_drvfs.o \
+ atomisp_file.o \
+ css2400/sh_css_mipi.o \
+ css2400/runtime/pipeline/src/pipeline.o \
+ css2400/runtime/spctrl/src/spctrl.o \
+ css2400/runtime/rmgr/src/rmgr.o \
+ css2400/runtime/rmgr/src/rmgr_vbuf.o \
+ css2400/runtime/isp_param/src/isp_param.o \
+ css2400/runtime/inputfifo/src/inputfifo.o \
+ css2400/runtime/queue/src/queue_access.o \
+ css2400/runtime/queue/src/queue.o \
+ css2400/runtime/frame/src/frame.o \
+ css2400/runtime/eventq/src/eventq.o \
+ css2400/runtime/binary/src/binary.o \
+ css2400/runtime/timer/src/timer.o \
+ css2400/runtime/isys/src/csi_rx_rmgr.o \
+ css2400/runtime/isys/src/isys_stream2mmio_rmgr.o \
+ css2400/runtime/isys/src/virtual_isys.o \
+ css2400/runtime/isys/src/rx.o \
+ css2400/runtime/isys/src/isys_dma_rmgr.o \
+ css2400/runtime/isys/src/ibuf_ctrl_rmgr.o \
+ css2400/runtime/isys/src/isys_init.o \
+ css2400/runtime/bufq/src/bufq.o \
+ css2400/runtime/ifmtr/src/ifmtr.o \
+ css2400/runtime/debug/src/ia_css_debug.o \
+ css2400/runtime/event/src/event.o \
+ css2400/sh_css_sp.o \
+ css2400/css_2400_system/spmem_dump.o \
+ css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.o \
+ css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.o \
+ css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.o \
+ css2400/sh_css_stream_format.o \
+ css2400/sh_css_hrt.o \
+ css2400/sh_css_properties.o \
+ css2400/memory_realloc.o \
+ css2400/hive_isp_css_shared/host/tag.o \
+ css2400/sh_css_params.o \
+ css2400/sh_css.o \
+ css2400/isp/kernels/hdr/ia_css_hdr.host.o \
+ css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.o \
+ css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.o \
+ css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.o \
+ css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.o \
+ css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.o \
+ css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.o \
+ css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.o \
+ css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.o \
+ css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.o \
+ css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.o \
+ css2400/isp/kernels/output/output_1.0/ia_css_output.host.o \
+ css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.o \
+ css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.o \
+ css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.o \
+ css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.o \
+ css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.o \
+ css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.o \
+ css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.o \
+ css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.o \
+ css2400/isp/kernels/dpc2/ia_css_dpc2.host.o \
+ css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.o \
+ css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.o \
+ css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.o \
+ css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.o \
+ css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.o \
+ css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.o \
+ css2400/isp/kernels/bh/bh_2/ia_css_bh.host.o \
+ css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.o \
+ css2400/isp/kernels/bnlm/ia_css_bnlm.host.o \
+ css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.o \
+ css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.o \
+ css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.o \
+ css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.o \
+ css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.o \
+ css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.o \
+ css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.o \
+ css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.o \
+ css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.o \
+ css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.o \
+ css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.o \
+ css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.o \
+ css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.o \
+ css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.o \
+ css2400/isp/kernels/de/de_1.0/ia_css_de.host.o \
+ css2400/isp/kernels/de/de_2/ia_css_de2.host.o \
+ css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.o \
+ css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.o \
+ css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.o \
+ css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.o \
+ css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.o \
+ css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.o \
+ css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.o \
+ css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.o \
+ css2400/isp/kernels/ob/ob2/ia_css_ob2.host.o \
+ css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.o \
+ css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.o \
+ css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.o \
+ css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.o \
+ css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.o \
+ css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.o \
+ css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.o \
+ css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.o \
+ css2400/sh_css_pipe.o \
+ css2400/ia_css_device_access.o \
+ css2400/sh_css_host_data.o \
+ css2400/sh_css_mmu.o \
+ css2400/sh_css_metadata.o \
+ css2400/base/refcount/src/refcount.o \
+ css2400/base/circbuf/src/circbuf.o \
+ css2400/sh_css_irq.o \
+ css2400/camera/pipe/src/pipe_binarydesc.o \
+ css2400/camera/pipe/src/pipe_util.o \
+ css2400/camera/pipe/src/pipe_stagedesc.o \
+ css2400/camera/util/src/util.o \
+ css2400/sh_css_metrics.o \
+ css2400/sh_css_version.o \
+ css2400/ia_css_memory_access.o \
+ css2400/sh_css_param_shading.o \
+ css2400/sh_css_morph.o \
+ css2400/sh_css_firmware.o \
+ css2400/hive_isp_css_common/host/isp.o \
+ css2400/hive_isp_css_common/host/gdc.o \
+ css2400/hive_isp_css_common/host/sp.o \
+ css2400/hive_isp_css_common/host/vmem.o \
+ css2400/hive_isp_css_common/host/dma.o \
+ css2400/hive_isp_css_common/host/input_formatter.o \
+ css2400/hive_isp_css_common/host/debug.o \
+ css2400/hive_isp_css_common/host/hmem.o \
+ css2400/hive_isp_css_common/host/gp_device.o \
+ css2400/hive_isp_css_common/host/fifo_monitor.o \
+ css2400/hive_isp_css_common/host/gp_timer.o \
+ css2400/hive_isp_css_common/host/irq.o \
+ css2400/hive_isp_css_common/host/input_system.o \
+ css2400/hive_isp_css_common/host/timed_ctrl.o \
+ css2400/hive_isp_css_common/host/mmu.o \
+ css2400/hive_isp_css_common/host/event_fifo.o \
+ css2400/sh_css_param_dvs.o \
+ css2400/sh_css_shading.o \
+ css2400/sh_css_stream.o \
+ mmu/sh_mmu_mrfld.o \
+ mmu/isp_mmu.o \
+ atomisp_acc.o \
+ atomisp_compat_css20.o \
+ atomisp_fops.o \
+ atomisp_subdev.o \
+ atomisp_ioctl.o \
+ atomisp_compat_ioctl32.o \
+ atomisp_csi2.o \
+ atomisp_cmd.o \
+ atomisp_tpg.o \
+ hmm/hmm_vm.o \
+ hmm/hmm.o \
+ hmm/hmm_bo.o \
+ hmm/hmm_reserved_pool.o \
+ hmm/hmm_dynamic_pool.o \
+ hrt/hive_isp_css_mm_hrt.o \
+ atomisp_v4l2.o
+
+# These will be needed when clean merge CHT support nicely into the driver
+# Keep them here handy for when we get to that point
+#
+
+obj-cht= \
+ css2400/css_2401_system/spmem_dump.o \
+ css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.o \
+ css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.o \
+ css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.o \
+ css2400/css_2401_csi2p_system/spmem_dump.o \
+ css2400/css_2401_csi2p_system/host/isys_stream2mmio.o \
+ css2400/css_2401_csi2p_system/host/ibuf_ctrl.o \
+ css2400/css_2401_csi2p_system/host/isys_irq.o \
+ css2400/css_2401_csi2p_system/host/isys_dma.o \
+ css2400/css_2401_csi2p_system/host/csi_rx.o \
+ css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.o \
+ css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.o \
+ css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.o \
+
+# -I$(atomisp)/css2400/css_2401_csi2p_system/ \
+# -I$(atomisp)/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ \
+# -I$(atomisp)/css2400/css_2401_csi2p_system/host/ \
+# -I$(atomisp)/css2400/css_2401_csi2p_system/hrt/ \
+# -I$(atomisp)/css2400/css_2401_system/hive_isp_css_2401_system_generated/ \
+# -I$(atomisp)/css2400/css_2401_system/hrt/ \
+
+
+
+obj-$(CONFIG_VIDEO_ATOMISP) += atomisp.o
+
+atomisp = $(srctree)/drivers/staging/media/atomisp/pci/atomisp2
+
+INCLUDES += \
+ -I$(atomisp)/ \
+ -I$(atomisp)/css2400/ \
+ -I$(atomisp)/hrt/ \
+ -I$(atomisp)/include/ \
+ -I$(atomisp)/include/hmm/ \
+ -I$(atomisp)/include/mmu/ \
+ -I$(atomisp)/css2400/base/circbuf/interface/ \
+ -I$(atomisp)/css2400/base/refcount/interface/ \
+ -I$(atomisp)/css2400/camera/pipe/interface/ \
+ -I$(atomisp)/css2400/camera/util/interface/ \
+ -I$(atomisp)/css2400/css_2400_system/ \
+ -I$(atomisp)/css2400/css_2400_system/hive_isp_css_2400_system_generated/ \
+ -I$(atomisp)/css2400/css_2400_system/hrt/ \
+ -I$(atomisp)/css2400/hive_isp_css_common/ \
+ -I$(atomisp)/css2400/hive_isp_css_common/host/ \
+ -I$(atomisp)/css2400/hive_isp_css_include/ \
+ -I$(atomisp)/css2400/hive_isp_css_include/device_access/ \
+ -I$(atomisp)/css2400/hive_isp_css_include/host/ \
+ -I$(atomisp)/css2400/hive_isp_css_include/memory_access/ \
+ -I$(atomisp)/css2400/hive_isp_css_shared/ \
+ -I$(atomisp)/css2400/hive_isp_css_shared/host/ \
+ -I$(atomisp)/css2400/isp/kernels/ \
+ -I$(atomisp)/css2400/isp/kernels/aa/aa_2/ \
+ -I$(atomisp)/css2400/isp/kernels/anr/anr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/anr/anr_2/ \
+ -I$(atomisp)/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/bh/bh_2/ \
+ -I$(atomisp)/css2400/isp/kernels/bnlm/ \
+ -I$(atomisp)/css2400/isp/kernels/bnr/ \
+ -I$(atomisp)/css2400/isp/kernels/bnr/bnr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/bnr/bnr2_2/ \
+ -I$(atomisp)/css2400/isp/kernels/cnr/ \
+ -I$(atomisp)/css2400/isp/kernels/cnr/cnr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/cnr/cnr_2/ \
+ -I$(atomisp)/css2400/isp/kernels/conversion/ \
+ -I$(atomisp)/css2400/isp/kernels/conversion/conversion_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/copy_output/ \
+ -I$(atomisp)/css2400/isp/kernels/copy_output/copy_output_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/crop/ \
+ -I$(atomisp)/css2400/isp/kernels/crop/crop_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/csc/ \
+ -I$(atomisp)/css2400/isp/kernels/csc/csc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ctc/ \
+ -I$(atomisp)/css2400/isp/kernels/ctc/ctc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ctc/ctc1_5/ \
+ -I$(atomisp)/css2400/isp/kernels/ctc/ctc2/ \
+ -I$(atomisp)/css2400/isp/kernels/de/ \
+ -I$(atomisp)/css2400/isp/kernels/de/de_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/de/de_2/ \
+ -I$(atomisp)/css2400/isp/kernels/dpc2/ \
+ -I$(atomisp)/css2400/isp/kernels/dp/ \
+ -I$(atomisp)/css2400/isp/kernels/dp/dp_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/dvs/ \
+ -I$(atomisp)/css2400/isp/kernels/dvs/dvs_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/eed1_8/ \
+ -I$(atomisp)/css2400/isp/kernels/fc/ \
+ -I$(atomisp)/css2400/isp/kernels/fc/fc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/fixedbds/ \
+ -I$(atomisp)/css2400/isp/kernels/fixedbds/fixedbds_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/fpn/ \
+ -I$(atomisp)/css2400/isp/kernels/fpn/fpn_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/gc/ \
+ -I$(atomisp)/css2400/isp/kernels/gc/gc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/gc/gc_2/ \
+ -I$(atomisp)/css2400/isp/kernels/hdr/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/bayer_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/common/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/plane_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/yuv420_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/common/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/iterator/ \
+ -I$(atomisp)/css2400/isp/kernels/iterator/iterator_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/macc/ \
+ -I$(atomisp)/css2400/isp/kernels/macc/macc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/macc/macc1_5/ \
+ -I$(atomisp)/css2400/isp/kernels/norm/ \
+ -I$(atomisp)/css2400/isp/kernels/norm/norm_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ob/ \
+ -I$(atomisp)/css2400/isp/kernels/ob/ob_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ob/ob2/ \
+ -I$(atomisp)/css2400/isp/kernels/output/ \
+ -I$(atomisp)/css2400/isp/kernels/output/output_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/qplane/ \
+ -I$(atomisp)/css2400/isp/kernels/qplane/qplane_2/ \
+ -I$(atomisp)/css2400/isp/kernels/raw_aa_binning/ \
+ -I$(atomisp)/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/raw/ \
+ -I$(atomisp)/css2400/isp/kernels/raw/raw_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ref/ \
+ -I$(atomisp)/css2400/isp/kernels/ref/ref_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/s3a/ \
+ -I$(atomisp)/css2400/isp/kernels/s3a/s3a_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/s3a_stat_ls/ \
+ -I$(atomisp)/css2400/isp/kernels/scale/ \
+ -I$(atomisp)/css2400/isp/kernels/scale/scale_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/sc/ \
+ -I$(atomisp)/css2400/isp/kernels/sc/sc_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/sdis/ \
+ -I$(atomisp)/css2400/isp/kernels/sdis/common/ \
+ -I$(atomisp)/css2400/isp/kernels/sdis/sdis_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/sdis/sdis_2/ \
+ -I$(atomisp)/css2400/isp/kernels/tdf/ \
+ -I$(atomisp)/css2400/isp/kernels/tdf/tdf_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/tnr/ \
+ -I$(atomisp)/css2400/isp/kernels/tnr/tnr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/tnr/tnr3/ \
+ -I$(atomisp)/css2400/isp/kernels/uds/ \
+ -I$(atomisp)/css2400/isp/kernels/uds/uds_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/vf/ \
+ -I$(atomisp)/css2400/isp/kernels/vf/vf_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/wb/ \
+ -I$(atomisp)/css2400/isp/kernels/wb/wb_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/xnr/ \
+ -I$(atomisp)/css2400/isp/kernels/xnr/xnr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/xnr/xnr_3.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ynr/ \
+ -I$(atomisp)/css2400/isp/kernels/ynr/ynr_1.0/ \
+ -I$(atomisp)/css2400/isp/kernels/ynr/ynr_2/ \
+ -I$(atomisp)/css2400/isp/kernels/yuv_ls \
+ -I$(atomisp)/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ \
+ -I$(atomisp)/css2400/isp/modes/interface/ \
+ -I$(atomisp)/css2400/runtime/binary/interface/ \
+ -I$(atomisp)/css2400/runtime/bufq/interface/ \
+ -I$(atomisp)/css2400/runtime/debug/interface/ \
+ -I$(atomisp)/css2400/runtime/event/interface/ \
+ -I$(atomisp)/css2400/runtime/eventq/interface/ \
+ -I$(atomisp)/css2400/runtime/frame/interface/ \
+ -I$(atomisp)/css2400/runtime/ifmtr/interface/ \
+ -I$(atomisp)/css2400/runtime/inputfifo/interface/ \
+ -I$(atomisp)/css2400/runtime/isp_param/interface/ \
+ -I$(atomisp)/css2400/runtime/isys/interface/ \
+ -I$(atomisp)/css2400/runtime/isys/src/ \
+ -I$(atomisp)/css2400/runtime/pipeline/interface/ \
+ -I$(atomisp)/css2400/runtime/queue/interface/ \
+ -I$(atomisp)/css2400/runtime/queue/src/ \
+ -I$(atomisp)/css2400/runtime/rmgr/interface/ \
+ -I$(atomisp)/css2400/runtime/spctrl/interface/ \
+ -I$(atomisp)/css2400/runtime/tagger/interface/
+
+ifeq ($(CONFIG_ION),y)
+INCLUDES += -I$(srctree)/drivers/staging/android/ion
+endif
+
+DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
+#DEFINES += -DUSE_DYNAMIC_BIN
+#DEFINES += -DISP_POWER_GATING
+#DEFINES += -DUSE_INTERRUPTS
+#DEFINES += -DUSE_SSSE3
+#DEFINES += -DPUNIT_CAMERA_BUSY
+#DEFINES += -DUSE_KMEM_CACHE
+
+DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
+DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
+
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
new file mode 100644
index 000000000000..513a430ee01a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
@@ -0,0 +1,209 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef ATOMISP_REGS_H
+#define ATOMISP_REGS_H
+
+/* common register definitions */
+#define PUNIT_PORT 0x04
+#define CCK_PORT 0x14
+
+#define PCICMDSTS 0x01
+#define INTR 0x0f
+#define MSI_CAPID 0x24
+#define MSI_ADDRESS 0x25
+#define MSI_DATA 0x26
+#define INTR_CTL 0x27
+
+#define PCI_MSI_CAPID 0x90
+#define PCI_MSI_ADDR 0x94
+#define PCI_MSI_DATA 0x98
+#define PCI_INTERRUPT_CTRL 0x9C
+#define PCI_I_CONTROL 0xfc
+
+/* MRFLD specific register definitions */
+#define MRFLD_CSI_AFE 0x39
+#define MRFLD_CSI_CONTROL 0x3a
+#define MRFLD_CSI_RCOMP 0x3d
+
+#define MRFLD_PCI_PMCS 0x84
+#define MRFLD_PCI_CSI_ACCESS_CTRL_VIOL 0xd4
+#define MRFLD_PCI_CSI_AFE_HS_CONTROL 0xdc
+#define MRFLD_PCI_CSI_AFE_RCOMP_CONTROL 0xe0
+#define MRFLD_PCI_CSI_CONTROL 0xe8
+#define MRFLD_PCI_CSI_AFE_TRIM_CONTROL 0xe4
+#define MRFLD_PCI_CSI_DEADLINE_CONTROL 0xec
+#define MRFLD_PCI_CSI_RCOMP_CONTROL 0xf4
+
+/* Select Arasan (legacy)/Intel input system */
+#define MRFLD_PCI_CSI_CONTROL_PARPATHEN BIT(24)
+/* Enable CSI interface (ANN B0/K0) */
+#define MRFLD_PCI_CSI_CONTROL_CSI_READY BIT(25)
+
+/*
+ * Enables the combining of adjacent 32-byte read requests to the same
+ * cache line. When cleared, each 32-byte read request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING 0x1
+
+/*
+ * Register: MRFLD_PCI_CSI_RCOMP_CONTROL
+ * If cleared, the high speed clock going to the digital logic is gated when
+ * RCOMP update is happening. The clock is gated for a minimum of 100 nsec.
+ * If this bit is set, then the high speed clock is not gated during the
+ * update cycle.
+ */
+#define MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE 0x800000
+
+/*
+ * Enables the combining of adjacent 32-byte write requests to the same
+ * cache line. When cleared, each 32-byte write request is sent as a
+ * separate request on the IB interface.
+ */
+#define MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING 0x2
+
+#define MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK 0xc
+
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT 16
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM 0x3
+#define MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT 24
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM 0x2
+#define MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT 28
+#define MRFLD_PCI_CSI_HSRXCLKTRIM_MASK 0xf
+
+/*
+ * This register is IUINT MMIO register, it is used to select the CSI
+ * receiver backend.
+ * 1: SH CSI backend
+ * 0: Arasan CSI backend
+ */
+#define MRFLD_CSI_RECEIVER_SELECTION_REG 0x8081c
+
+#define MRFLD_INTR_CLEAR_REG 0x50c
+#define MRFLD_INTR_STATUS_REG 0x508
+#define MRFLD_INTR_ENABLE_REG 0x510
+
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+
+/* MRFLD ISP POWER related */
+#define MRFLD_ISPSSPM0 0x39
+#define MRFLD_ISPSSPM0_ISPSSC_OFFSET 0
+#define MRFLD_ISPSSPM0_ISPSSS_OFFSET 24
+#define MRFLD_ISPSSPM0_ISPSSC_MASK 0x3
+#define MRFLD_ISPSSPM0_IUNIT_POWER_ON 0
+#define MRFLD_ISPSSPM0_IUNIT_POWER_OFF 0x3
+#define MRFLD_ISPSSDVFS 0x13F
+#define MRFLD_BIT0 0x0001
+#define MRFLD_BIT1 0x0002
+
+/* MRFLD CSI lane configuration related */
+#define MRFLD_PORT_CONFIG_NUM 8
+#define MRFLD_PORT_NUM 3
+#define MRFLD_PORT1_ENABLE_SHIFT 0
+#define MRFLD_PORT2_ENABLE_SHIFT 1
+#define MRFLD_PORT3_ENABLE_SHIFT 2
+#define MRFLD_PORT1_LANES_SHIFT 3
+#define MRFLD_PORT2_LANES_SHIFT 7
+#define MRFLD_PORT3_LANES_SHIFT 8
+#define MRFLD_PORT_CONFIG_MASK 0x000f03ff
+#define MRFLD_PORT_CONFIGCODE_SHIFT 16
+#define MRFLD_ALL_CSI_PORTS_OFF_MASK 0x7
+
+#define CHV_PORT3_LANES_SHIFT 9
+#define CHV_PORT_CONFIG_MASK 0x1f07ff
+
+#define ISPSSPM1 0x3a
+#define ISP_FREQ_STAT_MASK (0x1f << ISP_FREQ_STAT_OFFSET)
+#define ISP_REQ_FREQ_MASK 0x1f
+#define ISP_FREQ_VALID_MASK (0x1 << ISP_FREQ_VALID_OFFSET)
+#define ISP_FREQ_STAT_OFFSET 0x18
+#define ISP_REQ_GUAR_FREQ_OFFSET 0x8
+#define ISP_REQ_FREQ_OFFSET 0x0
+#define ISP_FREQ_VALID_OFFSET 0x7
+#define ISP_FREQ_RULE_ANY 0x0
+
+#define ISP_FREQ_457MHZ 0x1C9
+#define ISP_FREQ_400MHZ 0x190
+#define ISP_FREQ_356MHZ 0x164
+#define ISP_FREQ_320MHZ 0x140
+#define ISP_FREQ_266MHZ 0x10a
+#define ISP_FREQ_200MHZ 0xc8
+#define ISP_FREQ_100MHZ 0x64
+
+#define HPLL_FREQ_800MHZ 0x320
+#define HPLL_FREQ_1600MHZ 0x640
+#define HPLL_FREQ_2000MHZ 0x7D0
+
+#define CCK_FUSE_REG_0 0x08
+#define CCK_FUSE_HPLL_FREQ_MASK 0x03
+
+#if defined(ISP2401)
+#define ISP_FREQ_MAX ISP_FREQ_320MHZ
+#else
+#define ISP_FREQ_MAX ISP_FREQ_400MHZ
+#endif
+
+/* ISP2401 CSI2+ receiver delay settings */
+#define CSI2_PORT_A_BASE 0xC0000
+#define CSI2_PORT_B_BASE 0xC2000
+#define CSI2_PORT_C_BASE 0xC4000
+
+#define CSI2_LANE_CL_BASE 0x418
+#define CSI2_LANE_D0_BASE 0x420
+#define CSI2_LANE_D1_BASE 0x428
+#define CSI2_LANE_D2_BASE 0x430
+#define CSI2_LANE_D3_BASE 0x438
+
+#define CSI2_REG_RX_CSI_DLY_CNT_TERMEN 0
+#define CSI2_REG_RX_CSI_DLY_CNT_SETTLE 0x4
+
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC0418
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC041C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC0420
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC0424
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC0428
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC042C
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE2 0xC0430
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE2 0xC0434
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_TERMEN_DLANE3 0xC0438
+#define CSI2_PORT_A_RX_CSI_DLY_CNT_SETTLE_DLANE3 0xC043C
+
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC2418
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC241C
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC2420
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC2424
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC2428
+#define CSI2_PORT_B_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC242C
+
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_CLANE 0xC4418
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_CLANE 0xC441C
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE0 0xC4420
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE0 0xC4424
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_TERMEN_DLANE1 0xC4428
+#define CSI2_PORT_C_RX_CSI_DLY_CNT_SETTLE_DLANE1 0xC442C
+
+#define DMA_BURST_SIZE_REG 0xCD408
+
+#define ISP_DFS_TRY_TIMES 2
+
+#endif /* ATOMISP_REGS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
new file mode 100644
index 000000000000..1eac329339b7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
@@ -0,0 +1,608 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+/*
+ * This file implements loadable acceleration firmware API,
+ * including ioctls to map and unmap acceleration parameters and buffers.
+ */
+
+#include <linux/init.h>
+#include <media/v4l2-event.h>
+
+#include "atomisp_acc.h"
+#include "atomisp_internal.h"
+#include "atomisp_compat.h"
+#include "atomisp_cmd.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+#include "memory_access/memory_access.h"
+#include "ia_css.h"
+
+static const struct {
+ unsigned int flag;
+ enum atomisp_css_pipe_id pipe_id;
+} acc_flag_to_pipe[] = {
+ { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, CSS_PIPE_ID_PREVIEW },
+ { ATOMISP_ACC_FW_LOAD_FL_COPY, CSS_PIPE_ID_COPY },
+ { ATOMISP_ACC_FW_LOAD_FL_VIDEO, CSS_PIPE_ID_VIDEO },
+ { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, CSS_PIPE_ID_CAPTURE },
+ { ATOMISP_ACC_FW_LOAD_FL_ACC, CSS_PIPE_ID_ACC }
+};
+
+/*
+ * Allocate struct atomisp_acc_fw along with space for firmware.
+ * The returned struct atomisp_acc_fw is cleared (firmware region is not).
+ */
+static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL);
+ if (!acc_fw)
+ return NULL;
+
+ acc_fw->fw = vmalloc(fw_size);
+ if (!acc_fw->fw) {
+ kfree(acc_fw);
+ return NULL;
+ }
+
+ return acc_fw;
+}
+
+static void acc_free_fw(struct atomisp_acc_fw *acc_fw)
+{
+ vfree(acc_fw->fw);
+ kfree(acc_fw);
+}
+
+static struct atomisp_acc_fw *
+acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ list_for_each_entry(acc_fw, &asd->acc.fw, list)
+ if (acc_fw->handle == handle)
+ return acc_fw;
+
+ return NULL;
+}
+
+static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd,
+ unsigned long css_ptr, size_t length)
+{
+ struct atomisp_map *atomisp_map;
+
+ list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) {
+ if (atomisp_map->ptr == css_ptr &&
+ atomisp_map->length == length)
+ return atomisp_map;
+ }
+ return NULL;
+}
+
+static int acc_stop_acceleration(struct atomisp_sub_device *asd)
+{
+ int ret;
+
+ ret = atomisp_css_stop_acc_pipe(asd);
+ atomisp_css_destroy_acc_pipe(asd);
+
+ return ret;
+}
+
+void atomisp_acc_cleanup(struct atomisp_device *isp)
+{
+ int i;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ ida_destroy(&isp->asd[i].acc.ida);
+}
+
+void atomisp_acc_release(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw, *ta;
+ struct atomisp_map *atomisp_map, *tm;
+
+ /* Stop acceleration if already running */
+ if (asd->acc.pipeline)
+ acc_stop_acceleration(asd);
+
+ /* Unload all loaded acceleration binaries */
+ list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) {
+ list_del(&acc_fw->list);
+ ida_remove(&asd->acc.ida, acc_fw->handle);
+ acc_free_fw(acc_fw);
+ }
+
+ /* Free all mapped memory blocks */
+ list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) {
+ list_del(&atomisp_map->list);
+ hmm_free(atomisp_map->ptr);
+ kfree(atomisp_map);
+ }
+}
+
+int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load_to_pipe *user_fw)
+{
+ static const unsigned int pipeline_flags =
+ ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY |
+ ATOMISP_ACC_FW_LOAD_FL_VIDEO |
+ ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC;
+
+ struct atomisp_acc_fw *acc_fw;
+ int handle;
+
+ if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw))
+ return -EINVAL;
+
+ /* Binary has to be enabled at least for one pipeline */
+ if (!(user_fw->flags & pipeline_flags))
+ return -EINVAL;
+
+ /* We do not support other flags yet */
+ if (user_fw->flags & ~pipeline_flags)
+ return -EINVAL;
+
+ if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT ||
+ user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
+ return -EINVAL;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_alloc_fw(user_fw->size);
+ if (!acc_fw)
+ return -ENOMEM;
+
+ if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) {
+ acc_free_fw(acc_fw);
+ return -EFAULT;
+ }
+
+ if (!ida_pre_get(&asd->acc.ida, GFP_KERNEL) ||
+ ida_get_new_above(&asd->acc.ida, 1, &handle)) {
+ acc_free_fw(acc_fw);
+ return -ENOSPC;
+ }
+
+ user_fw->fw_handle = handle;
+ acc_fw->handle = handle;
+ acc_fw->flags = user_fw->flags;
+ acc_fw->type = user_fw->type;
+ acc_fw->fw->handle = handle;
+
+ /*
+ * correct isp firmware type in order ISP firmware can be appended
+ * to correct pipe properly
+ */
+ if (acc_fw->fw->type == ia_css_isp_firmware) {
+ static const int type_to_css[] = {
+ [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] =
+ IA_CSS_ACC_OUTPUT,
+ [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] =
+ IA_CSS_ACC_VIEWFINDER,
+ [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] =
+ IA_CSS_ACC_STANDALONE,
+ };
+ acc_fw->fw->info.isp.type = type_to_css[acc_fw->type];
+ }
+
+ list_add_tail(&acc_fw->list, &asd->acc.fw);
+ return 0;
+}
+
+int atomisp_acc_load(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load *user_fw)
+{
+ struct atomisp_acc_fw_load_to_pipe ltp = {0};
+ int r;
+
+ ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC;
+ ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE;
+ ltp.size = user_fw->size;
+ ltp.data = user_fw->data;
+ r = atomisp_acc_load_to_pipe(asd, &ltp);
+ user_fw->fw_handle = ltp.fw_handle;
+ return r;
+}
+
+int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, *handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ list_del(&acc_fw->list);
+ ida_remove(&asd->acc.ida, acc_fw->handle);
+ acc_free_fw(acc_fw);
+
+ return 0;
+}
+
+int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_acc_fw *acc_fw;
+ int ret;
+ unsigned int nbin;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ ret = atomisp_css_create_acc_pipe(asd);
+ if (ret)
+ return ret;
+
+ nbin = 0;
+ list_for_each_entry(acc_fw, &asd->acc.fw, list) {
+ if (*handle != 0 && *handle != acc_fw->handle)
+ continue;
+
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
+ continue;
+
+ /* Add the binary into the pipeline */
+ ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc_load_binary failed\n");
+ goto err_stage;
+ }
+
+ ret = atomisp_css_set_acc_parameters(acc_fw);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc_set_parameters failed\n");
+ goto err_stage;
+ }
+ nbin++;
+ }
+ if (nbin < 1) {
+ /* Refuse creating pipelines with no binaries */
+ dev_err(isp->dev, "%s: no acc binary available\n", __func__);
+ ret = -EINVAL;
+ goto err_stage;
+ }
+
+ ret = atomisp_css_start_acc_pipe(asd);
+ if (ret) {
+ dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n",
+ __func__);
+ goto err_stage;
+ }
+
+ return 0;
+
+err_stage:
+ atomisp_css_destroy_acc_pipe(asd);
+ return ret;
+}
+
+int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret;
+
+ if (!asd->acc.pipeline)
+ return -ENOENT;
+
+ if (*handle && !acc_get_fw(asd, *handle))
+ return -EINVAL;
+
+ ret = atomisp_css_wait_acc_finish(asd);
+ if (acc_stop_acceleration(asd) == -EIO) {
+ atomisp_reset(isp);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle)
+{
+ struct v4l2_event event = { 0 };
+
+ event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE;
+ event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence);
+ event.id = handle;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
+{
+ struct atomisp_map *atomisp_map;
+ ia_css_ptr cssptr;
+ int pgnr;
+
+ if (map->css_ptr)
+ return -EINVAL;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ if (map->user_ptr) {
+ /* Buffer to map must be page-aligned */
+ if ((unsigned long)map->user_ptr & ~PAGE_MASK) {
+ dev_err(asd->isp->dev,
+ "%s: mapped buffer address %p is not page aligned\n",
+ __func__, map->user_ptr);
+ return -EINVAL;
+ }
+
+ pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
+ cssptr = hrt_isp_css_mm_alloc_user_ptr(
+ map->length, map->user_ptr,
+ pgnr, HRT_USR_PTR,
+ (map->flags & ATOMISP_MAP_FLAG_CACHED));
+ } else {
+ /* Allocate private buffer. */
+ if (map->flags & ATOMISP_MAP_FLAG_CACHED)
+ cssptr = hrt_isp_css_mm_calloc_cached(map->length);
+ else
+ cssptr = hrt_isp_css_mm_calloc(map->length);
+ }
+
+ if (!cssptr)
+ return -ENOMEM;
+
+ atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL);
+ if (!atomisp_map) {
+ hmm_free(cssptr);
+ return -ENOMEM;
+ }
+ atomisp_map->ptr = cssptr;
+ atomisp_map->length = map->length;
+ list_add(&atomisp_map->list, &asd->acc.memory_maps);
+
+ dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n",
+ __func__, map->user_ptr, cssptr, map->length);
+ map->css_ptr = cssptr;
+ return 0;
+}
+
+int atomisp_acc_unmap(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
+{
+ struct atomisp_map *atomisp_map;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ atomisp_map = acc_get_map(asd, map->css_ptr, map->length);
+ if (!atomisp_map)
+ return -EINVAL;
+
+ list_del(&atomisp_map->list);
+ hmm_free(atomisp_map->ptr);
+ kfree(atomisp_map);
+ return 0;
+}
+
+int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
+ struct atomisp_acc_s_mapped_arg *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (arg->memory >= ATOMISP_ACC_NR_MEMORY)
+ return -EINVAL;
+
+ if (asd->acc.pipeline)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ if (arg->css_ptr != 0 || arg->length != 0) {
+ /* Unless the parameter is cleared, check that it exists */
+ if (!acc_get_map(asd, arg->css_ptr, arg->length))
+ return -EINVAL;
+ }
+
+ acc_fw->args[arg->memory].length = arg->length;
+ acc_fw->args[arg->memory].css_ptr = arg->css_ptr;
+
+ dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n",
+ __func__, arg->memory, (void *)arg->css_ptr,
+ (unsigned long)arg->length);
+ return 0;
+}
+
+/*
+ * Appends the loaded acceleration binary extensions to the
+ * current ISP mode. Must be called just before sh_css_start().
+ */
+int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw;
+ bool ext_loaded = false;
+ bool continuous = asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW;
+ int ret = 0, i = -1;
+ struct atomisp_device *isp = asd->isp;
+
+ if (asd->acc.pipeline || asd->acc.extension_mode)
+ return -EBUSY;
+
+ /* Invalidate caches. FIXME: should flush only necessary buffers */
+ wbinvd();
+
+ list_for_each_entry(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
+ /* QoS (ACC pipe) acceleration stages are currently
+ * allowed only in continuous mode. Skip them for
+ * all other modes. */
+ if (!continuous &&
+ acc_flag_to_pipe[i].flag ==
+ ATOMISP_ACC_FW_LOAD_FL_ACC)
+ continue;
+
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ ret = atomisp_css_load_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id,
+ acc_fw->type);
+ if (ret)
+ goto error;
+
+ ext_loaded = true;
+ }
+ }
+
+ ret = atomisp_css_set_acc_parameters(acc_fw);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (!ext_loaded)
+ return ret;
+
+ ret = atomisp_css_update_stream(asd);
+ if (ret) {
+ dev_err(isp->dev, "%s: update stream failed.\n", __func__);
+ goto error;
+ }
+
+ asd->acc.extension_mode = true;
+ return 0;
+
+error:
+ while (--i >= 0) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd, acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+
+ list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
+ if (!continuous &&
+ acc_flag_to_pipe[i].flag ==
+ ATOMISP_ACC_FW_LOAD_FL_ACC)
+ continue;
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+ }
+ return ret;
+}
+
+void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd)
+{
+ struct atomisp_acc_fw *acc_fw;
+ int i;
+
+ if (!asd->acc.extension_mode)
+ return;
+
+ list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) {
+ if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
+ acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
+ continue;
+
+ for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ atomisp_css_unload_acc_extension(asd,
+ acc_fw->fw,
+ acc_flag_to_pipe[i].pipe_id);
+ }
+ }
+ }
+
+ asd->acc.extension_mode = false;
+}
+
+int atomisp_acc_set_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+ bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0;
+ struct ia_css_pipe *pipe;
+ enum ia_css_err r;
+ int i;
+
+ if (!asd->acc.extension_mode)
+ return -EBUSY;
+
+ if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE)
+ return -EINVAL;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ if (enable)
+ wbinvd();
+
+ for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
+ if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
+ pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ pipes[acc_flag_to_pipe[i].pipe_id];
+ r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle,
+ enable);
+ if (r != IA_CSS_SUCCESS)
+ return -EBADRQC;
+ }
+ }
+
+ if (enable)
+ acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE;
+ else
+ acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE;
+
+ return 0;
+}
+
+int atomisp_acc_get_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg)
+{
+ struct atomisp_acc_fw *acc_fw;
+
+ if (!asd->acc.extension_mode)
+ return -EBUSY;
+
+ acc_fw = acc_get_fw(asd, arg->fw_handle);
+ if (!acc_fw)
+ return -EINVAL;
+
+ arg->flags = acc_fw->flags;
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
new file mode 100644
index 000000000000..5b58e7d9ca5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
@@ -0,0 +1,124 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_ACC_H__
+#define __ATOMISP_ACC_H__
+
+#include "../../include/linux/atomisp.h"
+#include "atomisp_internal.h"
+
+#include "ia_css_types.h"
+
+/*
+ * Interface functions for AtomISP driver acceleration API implementation.
+ */
+
+struct atomisp_sub_device;
+
+void atomisp_acc_cleanup(struct atomisp_device *isp);
+
+/*
+ * Free up any allocated resources.
+ * Must be called each time when the device is closed.
+ * Note that there isn't corresponding open() call;
+ * this function may be called sequentially multiple times.
+ * Must be called to free up resources before driver is unloaded.
+ */
+void atomisp_acc_release(struct atomisp_sub_device *asd);
+
+/* Load acceleration binary. DEPRECATED. */
+int atomisp_acc_load(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load *fw);
+
+/* Load acceleration binary with specified properties */
+int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_fw_load_to_pipe *fw);
+
+/* Unload specified acceleration binary */
+int atomisp_acc_unload(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Map a memory region into ISP memory space.
+ */
+int atomisp_acc_map(struct atomisp_sub_device *asd,
+ struct atomisp_acc_map *map);
+
+/*
+ * Unmap a mapped memory region.
+ */
+int atomisp_acc_unmap(struct atomisp_sub_device *asd,
+ struct atomisp_acc_map *map);
+
+/*
+ * Set acceleration binary argument to a previously mapped memory region.
+ */
+int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
+ struct atomisp_acc_s_mapped_arg *arg);
+
+
+/*
+ * Start acceleration.
+ * Return immediately, acceleration is left running in background.
+ * Specify either acceleration binary or pipeline which to start.
+ */
+int atomisp_acc_start(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Wait until acceleration finishes.
+ * This MUST be called after each acceleration has been started.
+ * Specify either acceleration binary or pipeline handle.
+ */
+int atomisp_acc_wait(struct atomisp_sub_device *asd,
+ unsigned int *handle);
+
+/*
+ * Used by ISR to notify ACC stage finished.
+ * This is internally used and does not export as IOCTL.
+ */
+void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle);
+
+/*
+ * Appends the loaded acceleration binary extensions to the
+ * current ISP mode. Must be called just before atomisp_css_start().
+ */
+int atomisp_acc_load_extensions(struct atomisp_sub_device *asd);
+
+/*
+ * Must be called after streaming is stopped:
+ * unloads any loaded acceleration extensions.
+ */
+void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd);
+
+/*
+ * Set acceleration firmware flags.
+ */
+int atomisp_acc_set_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg);
+
+/*
+ * Get acceleration firmware flags.
+ */
+int atomisp_acc_get_state(struct atomisp_sub_device *asd,
+ struct atomisp_acc_state *arg);
+
+#endif /* __ATOMISP_ACC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
new file mode 100644
index 000000000000..97093baf28ac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
@@ -0,0 +1,6751 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <asm/intel-mid.h>
+
+#include <media/v4l2-event.h>
+#include <media/videobuf-vmalloc.h>
+
+#define CREATE_TRACE_POINTS
+#include "atomisp_trace_event.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_tables.h"
+#include "atomisp_acc.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_dfs_tables.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "system_global.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+#include "ia_css_types.h"
+#include "ia_css_stream.h"
+#include "error_support.h"
+#include "hrt/bits.h"
+
+
+/* We should never need to run the flash for more than 2 frames.
+ * At 15fps this means 133ms. We set the timeout a bit longer.
+ * Each flash driver is supposed to set its own timeout, but
+ * just in case someone else changed the timeout, we set it
+ * here to make sure we don't damage the flash hardware. */
+#define FLASH_TIMEOUT 800 /* ms */
+
+union host {
+ struct {
+ void *kernel_ptr;
+ void __user *user_ptr;
+ int size;
+ } scalar;
+ struct {
+ void *hmm_ptr;
+ } ptr;
+};
+
+/*
+ * atomisp_kernel_malloc: chooses whether kmalloc() or vmalloc() is preferable.
+ *
+ * It is also a wrap functions to pass into css framework.
+ */
+void *atomisp_kernel_malloc(size_t bytes)
+{
+ /* vmalloc() is preferable if allocating more than 1 page */
+ if (bytes > PAGE_SIZE)
+ return vmalloc(bytes);
+
+ return kmalloc(bytes, GFP_KERNEL);
+}
+
+/*
+ * atomisp_kernel_zalloc: chooses whether set 0 to the allocated memory.
+ *
+ * It is also a wrap functions to pass into css framework.
+ */
+void *atomisp_kernel_zalloc(size_t bytes, bool zero_mem)
+{
+ void *ptr = atomisp_kernel_malloc(bytes);
+
+ if (ptr && zero_mem)
+ memset(ptr, 0, bytes);
+
+ return ptr;
+}
+
+/*
+ * Free buffer allocated with atomisp_kernel_malloc()/atomisp_kernel_zalloc
+ * helper
+ */
+void atomisp_kernel_free(void *ptr)
+{
+ /* Verify if buffer was allocated by vmalloc() or kmalloc() */
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+/*
+ * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field.
+ * subdev->priv is set in mrst.c
+ */
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd)
+{
+ return (struct camera_mipi_info *)v4l2_get_subdev_hostdata(sd);
+}
+
+/*
+ * get struct atomisp_video_pipe from v4l2 video_device
+ */
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev)
+{
+ return (struct atomisp_video_pipe *)
+ container_of(dev, struct atomisp_video_pipe, vdev);
+}
+
+/*
+ * get struct atomisp_acc_pipe from v4l2 video_device
+ */
+struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev)
+{
+ return (struct atomisp_acc_pipe *)
+ container_of(dev, struct atomisp_acc_pipe, vdev);
+}
+
+static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev_frame_interval fi;
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned short fps = 0;
+ int ret;
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, g_frame_interval, &fi);
+
+ if (!ret && fi.interval.numerator)
+ fps = fi.interval.denominator / fi.interval.numerator;
+
+ return fps;
+}
+
+/*
+ * DFS progress is shown as follows:
+ * 1. Target frequency is calculated according to FPS/Resolution/ISP running
+ * mode.
+ * 2. Ratio is calculated using formula: 2 * HPLL / target frequency - 1
+ * with proper rounding.
+ * 3. Set ratio to ISPFREQ40, 1 to FREQVALID and ISPFREQGUAR40
+ * to 200MHz in ISPSSPM1.
+ * 4. Wait for FREQVALID to be cleared by P-Unit.
+ * 5. Wait for field ISPFREQSTAT40 in ISPSSPM1 turn to ratio set in 3.
+ */
+static int write_target_freq_to_hw(struct atomisp_device *isp,
+ unsigned int new_freq)
+{
+ unsigned int ratio, timeout, guar_ratio;
+ u32 isp_sspm1 = 0;
+ int i;
+ if (!isp->hpll_freq) {
+ dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
+ return -EINVAL;
+ }
+
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
+ dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
+ intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
+ }
+
+ ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
+ guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
+
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
+
+ for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
+ intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ isp_sspm1
+ | ratio << ISP_REQ_FREQ_OFFSET
+ | 1 << ISP_FREQ_VALID_OFFSET
+ | guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
+
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+
+ timeout = 20;
+ while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
+ udelay(100);
+ timeout--;
+ }
+
+ if (timeout != 0)
+ break;
+ }
+
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS failed due to HW error.\n");
+ return -EINVAL;
+ }
+
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ timeout = 10;
+ while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
+ isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
+ new_freq);
+ udelay(100);
+ timeout--;
+ }
+ if (timeout == 0) {
+ dev_err(isp->dev, "DFS target freq is rejected by HW.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+int atomisp_freq_scaling(struct atomisp_device *isp,
+ enum atomisp_dfs_mode mode,
+ bool force)
+{
+ /* FIXME! Only use subdev[0] status yet */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ const struct atomisp_dfs_config *dfs;
+ unsigned int new_freq;
+ struct atomisp_freq_scaling_rule curr_rules;
+ int i, ret;
+ unsigned short fps = 0;
+
+ if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP) {
+ dev_err(isp->dev, "DFS cannot proceed due to no power.\n");
+ return -EINVAL;
+ }
+
+ if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
+ ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd))
+ isp->dfs = &dfs_config_cht_soc;
+
+ dfs = isp->dfs;
+
+ if (dfs->lowest_freq == 0 || dfs->max_freq_at_vmin == 0 ||
+ dfs->highest_freq == 0 || dfs->dfs_table_size == 0 ||
+ !dfs->dfs_table) {
+ dev_err(isp->dev, "DFS configuration is invalid.\n");
+ return -EINVAL;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_LOW) {
+ new_freq = dfs->lowest_freq;
+ goto done;
+ }
+
+ if (mode == ATOMISP_DFS_MODE_MAX) {
+ new_freq = dfs->highest_freq;
+ goto done;
+ }
+
+ fps = atomisp_get_sensor_fps(asd);
+ if (fps == 0)
+ return -EINVAL;
+
+ curr_rules.width = asd->fmt[asd->capture_pad].fmt.width;
+ curr_rules.height = asd->fmt[asd->capture_pad].fmt.height;
+ curr_rules.fps = fps;
+ curr_rules.run_mode = asd->run_mode->val;
+ /*
+ * For continuous mode, we need to make the capture setting applied
+ * since preview mode, because there is no chance to do this when
+ * starting image capture.
+ */
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ curr_rules.run_mode = ATOMISP_RUN_MODE_SDV;
+ else
+ curr_rules.run_mode =
+ ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE;
+ }
+
+ /* search for the target frequency by looping freq rules*/
+ for (i = 0; i < dfs->dfs_table_size; i++) {
+ if (curr_rules.width != dfs->dfs_table[i].width &&
+ dfs->dfs_table[i].width != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.height != dfs->dfs_table[i].height &&
+ dfs->dfs_table[i].height != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.fps != dfs->dfs_table[i].fps &&
+ dfs->dfs_table[i].fps != ISP_FREQ_RULE_ANY)
+ continue;
+ if (curr_rules.run_mode != dfs->dfs_table[i].run_mode &&
+ dfs->dfs_table[i].run_mode != ISP_FREQ_RULE_ANY)
+ continue;
+ break;
+ }
+
+ if (i == dfs->dfs_table_size)
+ new_freq = dfs->max_freq_at_vmin;
+ else
+ new_freq = dfs->dfs_table[i].isp_freq;
+
+done:
+ dev_dbg(isp->dev, "DFS target frequency=%d.\n", new_freq);
+
+ if ((new_freq == isp->sw_contex.running_freq) && !force)
+ return 0;
+
+ dev_dbg(isp->dev, "Programming DFS frequency to %d\n", new_freq);
+
+ ret = write_target_freq_to_hw(isp, new_freq);
+ if (!ret) {
+ isp->sw_contex.running_freq = new_freq;
+ trace_ipu_pstate(new_freq, -1);
+ }
+ return ret;
+}
+
+/*
+ * reset and restore ISP
+ */
+int atomisp_reset(struct atomisp_device *isp)
+{
+ /* Reset ISP by power-cycling it */
+ int ret = 0;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+ atomisp_css_suspend(isp);
+ ret = atomisp_runtime_suspend(isp->dev);
+ if (ret < 0)
+ dev_err(isp->dev, "atomisp_runtime_suspend failed, %d\n", ret);
+ ret = atomisp_mrfld_power_down(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "can not disable ISP power\n");
+ } else {
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret < 0)
+ dev_err(isp->dev, "can not enable ISP power\n");
+ ret = atomisp_runtime_resume(isp->dev);
+ if (ret < 0)
+ dev_err(isp->dev, "atomisp_runtime_resume failed, %d\n", ret);
+ }
+ ret = atomisp_css_resume(isp);
+ if (ret)
+ isp->isp_fatal_error = true;
+
+ return ret;
+}
+
+/*
+ * interrupt disable functions
+ */
+static void disable_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_disable_channel(IRQ0_ID, irq);
+
+ if (irq != hrt_isp_css_irq_sp)
+ return;
+
+ cnd_sp_irq_enable(SP0_ID, false);
+}
+
+/*
+ * interrupt clean function
+ */
+static void clear_isp_irq(enum hrt_isp_css_irq irq)
+{
+ irq_clear_all(IRQ0_ID);
+}
+
+void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev)
+{
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ msg32 |= 1 << MSI_ENABLE_BIT;
+ pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+
+ msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE);
+ pci_write_config_word(dev, PCI_COMMAND, msg16);
+}
+
+void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev)
+{
+ u32 msg32;
+ u16 msg16;
+
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ msg32 &= ~(1 << MSI_ENABLE_BIT);
+ pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+
+ msg32 = 0x0;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+
+ pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ msg16 &= ~(PCI_COMMAND_MASTER);
+ pci_write_config_word(dev, PCI_COMMAND, msg16);
+}
+
+static void atomisp_sof_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_SYNC;
+ event.u.frame_sync.frame_sequence = atomic_read(&asd->sof_count);
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_FRAME_END;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_3a_stats_ready_event(struct atomisp_sub_device *asd, uint8_t exp_id)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_3A_STATS_READY;
+ event.u.frame_sync.frame_sequence = exp_id;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_metadata_ready_event(struct atomisp_sub_device *asd,
+ enum atomisp_metadata_type md_type)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_METADATA_READY;
+ event.u.data[0] = md_type;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+static void atomisp_reset_event(struct atomisp_sub_device *asd)
+{
+ struct v4l2_event event = {0};
+
+ event.type = V4L2_EVENT_ATOMISP_CSS_RESET;
+
+ v4l2_event_queue(asd->subdev.devnode, &event);
+}
+
+
+static void print_csi_rx_errors(enum ia_css_csi2_port port,
+ struct atomisp_device *isp)
+{
+ u32 infos = 0;
+
+ atomisp_css_rx_get_irq_info(port, &infos);
+
+ dev_err(isp->dev, "CSI Receiver port %d errors:\n", port);
+ if (infos & CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ dev_err(isp->dev, " buffer overrun");
+ if (infos & CSS_RX_IRQ_INFO_ERR_SOT)
+ dev_err(isp->dev, " start-of-transmission error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ dev_err(isp->dev, " start-of-transmission sync error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_CONTROL)
+ dev_err(isp->dev, " control error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ dev_err(isp->dev, " 2 or more ECC errors");
+ if (infos & CSS_RX_IRQ_INFO_ERR_CRC)
+ dev_err(isp->dev, " CRC mismatch");
+ if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ dev_err(isp->dev, " unknown error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ dev_err(isp->dev, " frame sync error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ dev_err(isp->dev, " frame data error");
+ if (infos & CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ dev_err(isp->dev, " data timeout");
+ if (infos & CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ dev_err(isp->dev, " unknown escape command entry");
+ if (infos & CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ dev_err(isp->dev, " line sync error");
+}
+
+/* Clear irq reg */
+static void clear_irq_reg(struct atomisp_device *isp)
+{
+ u32 msg_ret;
+ pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret);
+ msg_ret |= 1 << INTR_IIR;
+ pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret);
+}
+
+static struct atomisp_sub_device *
+__get_asd_from_port(struct atomisp_device *isp, mipi_port_ID_t port)
+{
+ int i;
+
+ /* Check which isp subdev to send eof */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ struct camera_mipi_info *mipi_info;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED &&
+ __get_mipi_port(isp, mipi_info->port) == port) {
+ return asd;
+ }
+ }
+
+ return NULL;
+}
+
+/* interrupt handling function*/
+irqreturn_t atomisp_isr(int irq, void *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)dev;
+ struct atomisp_sub_device *asd;
+ struct atomisp_css_event eof_event;
+ unsigned int irq_infos = 0;
+ unsigned long flags;
+ unsigned int i;
+ int err;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (isp->sw_contex.power_state != ATOM_ISP_POWER_UP ||
+ isp->css_initialized == false) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_HANDLED;
+ }
+ err = atomisp_css_irq_translate(isp, &irq_infos);
+ if (err) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_NONE;
+ }
+
+ dev_dbg(isp->dev, "irq:0x%x\n", irq_infos);
+
+ clear_irq_reg(isp);
+
+ if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp))
+ goto out_nowake;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ /*
+ * Current SOF only support one stream, so the SOF only valid
+ * either solely one stream is running
+ */
+ if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
+ atomic_inc(&asd->sof_count);
+ atomisp_sof_event(asd);
+
+ /* If sequence_temp and sequence are the same
+ * there where no frames lost so we can increase
+ * sequence_temp.
+ * If not then processing of frame is still in progress
+ * and driver needs to keep old sequence_temp value.
+ * NOTE: There is assumption here that ISP will not
+ * start processing next frame from sensor before old
+ * one is completely done. */
+ if (atomic_read(&asd->sequence) == atomic_read(
+ &asd->sequence_temp))
+ atomic_set(&asd->sequence_temp,
+ atomic_read(&asd->sof_count));
+ }
+ if (irq_infos & CSS_IRQ_INFO_EVENTS_READY)
+ atomic_set(&asd->sequence,
+ atomic_read(&asd->sequence_temp));
+ }
+
+ if (irq_infos & CSS_IRQ_INFO_CSS_RECEIVER_SOF)
+ irq_infos &= ~CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+
+ if ((irq_infos & CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) ||
+ (irq_infos & CSS_IRQ_INFO_IF_ERROR)) {
+ /* handle mipi receiver error */
+ u32 rx_infos;
+ enum ia_css_csi2_port port;
+
+ for (port = IA_CSS_CSI2_PORT0; port <= IA_CSS_CSI2_PORT2;
+ port++) {
+ print_csi_rx_errors(port, isp);
+ atomisp_css_rx_get_irq_info(port, &rx_infos);
+ atomisp_css_rx_clear_irq_info(port, rx_infos);
+ }
+ }
+
+ if (irq_infos & IA_CSS_IRQ_INFO_ISYS_EVENTS_READY) {
+ while (ia_css_dequeue_isys_event(&(eof_event.event)) ==
+ IA_CSS_SUCCESS) {
+ /* EOF Event does not have the css_pipe returned */
+ asd = __get_asd_from_port(isp, eof_event.event.port);
+ if (!asd) {
+ dev_err(isp->dev, "%s:no subdev.event:%d", __func__,
+ eof_event.event.type);
+ continue;
+ }
+
+ atomisp_eof_event(asd, eof_event.event.exp_id);
+ dev_dbg(isp->dev, "%s EOF exp_id %d, asd %d\n",
+ __func__, eof_event.event.exp_id, asd->index);
+ }
+
+ irq_infos &= ~IA_CSS_IRQ_INFO_ISYS_EVENTS_READY;
+ if (irq_infos == 0)
+ goto out_nowake;
+ }
+
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return IRQ_WAKE_THREAD;
+
+out_nowake:
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd)
+{
+ int i;
+ memset(asd->s3a_bufs_in_css, 0, sizeof(asd->s3a_bufs_in_css));
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ memset(asd->metadata_bufs_in_css[i], 0,
+ sizeof(asd->metadata_bufs_in_css[i]));
+ asd->dis_bufs_in_css = 0;
+ asd->video_out_capture.buffers_in_css = 0;
+ asd->video_out_vf.buffers_in_css = 0;
+ asd->video_out_preview.buffers_in_css = 0;
+ asd->video_out_video_capture.buffers_in_css = 0;
+}
+
+#ifndef ISP2401
+bool atomisp_buffers_queued(struct atomisp_sub_device *asd)
+#else
+bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe)
+#endif
+{
+#ifndef ISP2401
+ return asd->video_out_capture.buffers_in_css ||
+ asd->video_out_vf.buffers_in_css ||
+ asd->video_out_preview.buffers_in_css ||
+ asd->video_out_video_capture.buffers_in_css ?
+ true : false;
+#else
+ return pipe->buffers_in_css ? true : false;
+#endif
+}
+
+/* 0x100000 is the start of dmem inside SP */
+#define SP_DMEM_BASE 0x100000
+
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size)
+{
+ unsigned int data = 0;
+ unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
+
+ dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base);
+ dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
+ addr, size, size32);
+ if (size32 * 4 + addr > 0x4000) {
+ dev_err(isp->dev, "illegal size (%d) or addr (0x%x)\n",
+ size32, addr);
+ return;
+ }
+ addr += SP_DMEM_BASE;
+ do {
+ data = _hrt_master_port_uload_32(addr);
+
+ dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
+ addr += sizeof(unsigned int);
+ size32 -= 1;
+ } while (size32 > 0);
+}
+
+static struct videobuf_buffer *atomisp_css_frame_to_vbuf(
+ struct atomisp_video_pipe *pipe, struct atomisp_css_frame *frame)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame *handle;
+ int i;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ handle = vm_mem->vaddr;
+ if (handle && handle->data == frame->data)
+ return pipe->capq.bufs[i];
+ }
+
+ return NULL;
+}
+
+static void get_buf_timestamp(struct timeval *tv)
+{
+ struct timespec ts;
+ ktime_get_ts(&ts);
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+
+static void atomisp_flush_video_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe)
+{
+ unsigned long irqflags;
+ int i;
+
+ if (!pipe->users)
+ return;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (pipe->capq.bufs[i]->state == VIDEOBUF_ACTIVE ||
+ pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED) {
+ get_buf_timestamp(&pipe->capq.bufs[i]->ts);
+ pipe->capq.bufs[i]->field_count =
+ atomic_read(&asd->sequence) << 1;
+ dev_dbg(asd->isp->dev, "release buffers on device %s\n",
+ pipe->vdev.name);
+ if (pipe->capq.bufs[i]->state == VIDEOBUF_QUEUED)
+ list_del_init(&pipe->capq.bufs[i]->queue);
+ pipe->capq.bufs[i]->state = VIDEOBUF_ERROR;
+ wake_up(&pipe->capq.bufs[i]->done);
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ }
+}
+
+/* Returns queued buffers back to video-core */
+void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd)
+{
+ atomisp_flush_video_pipe(asd, &asd->video_out_capture);
+ atomisp_flush_video_pipe(asd, &asd->video_out_vf);
+ atomisp_flush_video_pipe(asd, &asd->video_out_preview);
+ atomisp_flush_video_pipe(asd, &asd->video_out_video_capture);
+}
+
+/* clean out the parameters that did not apply */
+void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+
+ while (!list_empty(&pipe->per_frame_params)) {
+ param = list_entry(pipe->per_frame_params.next,
+ struct atomisp_css_params_with_list, list);
+ list_del(&param->list);
+ atomisp_free_css_parameters(&param->params);
+ atomisp_kernel_free(param);
+ }
+}
+
+/* Re-queue per-frame parameters */
+static void atomisp_recover_params_queue(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_css_params_with_list *param;
+ int i;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ param = pipe->frame_params[i];
+ if (param)
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ pipe->frame_params[i] = NULL;
+ }
+ atomisp_handle_parameter_and_buffer(pipe);
+}
+
+/* find atomisp_video_pipe with css pipe id, buffer type and atomisp run_mode */
+static struct atomisp_video_pipe *__atomisp_get_pipe(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id,
+ enum atomisp_css_buffer_type buf_type)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (css_pipe_id == CSS_PIPE_ID_COPY &&
+ isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ switch (stream_id) {
+ case ATOMISP_INPUT_STREAM_PREVIEW:
+ return &asd->video_out_preview;
+ case ATOMISP_INPUT_STREAM_POSTVIEW:
+ return &asd->video_out_vf;
+ case ATOMISP_INPUT_STREAM_VIDEO:
+ return &asd->video_out_video_capture;
+ case ATOMISP_INPUT_STREAM_CAPTURE:
+ default:
+ return &asd->video_out_capture;
+ }
+ }
+
+ /* video is same in online as in continuouscapture mode */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ /*
+ * Disable vf_pp and run CSS in still capture mode. In this
+ * mode, CSS does not cause extra latency with buffering, but
+ * scaling is not available.
+ */
+ return &asd->video_out_capture;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ /*
+ * Disable vf_pp and run CSS in video mode. This allows using
+ * ISP scaling but it has one frame delay due to CSS internal
+ * buffering.
+ */
+ return &asd->video_out_video_capture;
+ } else if (css_pipe_id == CSS_PIPE_ID_YUVPP) {
+ /*
+ * to SOC camera, yuvpp pipe is run for capture/video/SDV/ZSL.
+ */
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* SDV case */
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ return &asd->video_out_video_capture;
+ case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ return &asd->video_out_preview;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ return &asd->video_out_capture;
+ default:
+ return &asd->video_out_vf;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ /* ZSL case */
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ return &asd->video_out_preview;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ return &asd->video_out_capture;
+ default:
+ return &asd->video_out_vf;
+ }
+ }
+ } else if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_VIDEO:
+ return &asd->video_out_video_capture;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ return &asd->video_out_preview;
+ default:
+ return &asd->video_out_capture;
+ }
+ } else if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return &asd->video_out_preview;
+ else
+ return &asd->video_out_vf;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* For online video or SDV video pipe. */
+ if (css_pipe_id == CSS_PIPE_ID_VIDEO ||
+ css_pipe_id == CSS_PIPE_ID_COPY) {
+ if (buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ return &asd->video_out_video_capture;
+ return &asd->video_out_preview;
+ }
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ /* For online preview or ZSL preview pipe. */
+ if (css_pipe_id == CSS_PIPE_ID_PREVIEW ||
+ css_pipe_id == CSS_PIPE_ID_COPY)
+ return &asd->video_out_preview;
+ }
+ /* For capture pipe. */
+ if (buf_type == CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ return &asd->video_out_vf;
+ return &asd->video_out_capture;
+}
+
+enum atomisp_metadata_type
+atomisp_get_metadata_type(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ if (!asd->continuous_mode->val)
+ return ATOMISP_MAIN_METADATA;
+
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) /* online capture pipe */
+ return ATOMISP_SEC_METADATA;
+ else
+ return ATOMISP_MAIN_METADATA;
+}
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum atomisp_css_buffer_type buf_type,
+ enum atomisp_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id)
+{
+ struct videobuf_buffer *vb = NULL;
+ struct atomisp_video_pipe *pipe = NULL;
+ struct atomisp_css_buffer buffer;
+ bool requeue = false;
+ int err;
+ unsigned long irqflags;
+ struct atomisp_css_frame *frame = NULL;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp;
+ enum atomisp_metadata_type md_type;
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_control ctrl;
+#ifdef ISP2401
+ bool reset_wdt_timer = false;
+#endif
+
+ if (
+ buf_type != CSS_BUFFER_TYPE_METADATA &&
+ buf_type != CSS_BUFFER_TYPE_3A_STATISTICS &&
+ buf_type != CSS_BUFFER_TYPE_DIS_STATISTICS &&
+ buf_type != CSS_BUFFER_TYPE_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
+ buf_type != CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ dev_err(isp->dev, "%s, unsupported buffer type: %d\n",
+ __func__, buf_type);
+ return;
+ }
+
+ memset(&buffer, 0, sizeof(struct atomisp_css_buffer));
+ buffer.css_buffer.type = buf_type;
+ err = atomisp_css_dequeue_buffer(asd, stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err) {
+ dev_err(isp->dev,
+ "atomisp_css_dequeue_buffer failed: 0x%x\n", err);
+ return;
+ }
+
+ /* need to know the atomisp pipe for frame buffers */
+ pipe = __atomisp_get_pipe(asd, stream_id, css_pipe_id, buf_type);
+ if (pipe == NULL) {
+ dev_err(isp->dev, "error getting atomisp pipe\n");
+ return;
+ }
+
+ switch (buf_type) {
+ case CSS_BUFFER_TYPE_3A_STATISTICS:
+ list_for_each_entry_safe(s3a_buf, _s3a_buf_tmp,
+ &asd->s3a_stats_in_css, list) {
+ if (s3a_buf->s3a_data ==
+ buffer.css_buffer.data.stats_3a) {
+ list_del_init(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list,
+ &asd->s3a_stats_ready);
+ break;
+ }
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]--;
+ atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
+ dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
+ __func__, s3a_buf->s3a_data->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_METADATA:
+ if (error)
+ break;
+
+ md_type = atomisp_get_metadata_type(asd, css_pipe_id);
+ list_for_each_entry_safe(md_buf, _md_buf_tmp,
+ &asd->metadata_in_css[md_type], list) {
+ if (md_buf->metadata ==
+ buffer.css_buffer.data.metadata) {
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list,
+ &asd->metadata_ready[md_type]);
+ break;
+ }
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
+ atomisp_metadata_ready_event(asd, md_type);
+ dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
+ __func__, md_buf->metadata->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_DIS_STATISTICS:
+ list_for_each_entry_safe(dis_buf, _dis_buf_tmp,
+ &asd->dis_stats_in_css, list) {
+ if (dis_buf->dis_data ==
+ buffer.css_buffer.data.stats_dvs) {
+ spin_lock_irqsave(&asd->dis_stats_lock,
+ irqflags);
+ list_del_init(&dis_buf->list);
+ list_add(&dis_buf->list, &asd->dis_stats);
+ asd->params.dis_proj_data_valid = true;
+ spin_unlock_irqrestore(&asd->dis_stats_lock,
+ irqflags);
+ break;
+ }
+ }
+ asd->dis_bufs_in_css--;
+ dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
+ __func__, dis_buf->dis_data->exp_id);
+ break;
+ case CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+#ifdef ISP2401
+ reset_wdt_timer = true;
+#endif
+ pipe->buffers_in_css--;
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+ if (!frame->valid)
+ error = true;
+
+ /* FIXME:
+ * YUVPP doesn't set postview exp_id correctlly in SDV mode.
+ * This is a WORKAROUND to set exp_id. see HSDES-1503911606.
+ */
+ if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
+ asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd))
+ frame->exp_id = (asd->postview_exp_id++) %
+ (ATOMISP_MAX_EXP_ID + 1);
+
+ dev_dbg(isp->dev, "%s: vf frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+ if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
+ if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_PARTIAL)
+ dev_dbg(isp->dev, "%s thumb partially flashed\n",
+ __func__);
+ else if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_FULL)
+ dev_dbg(isp->dev, "%s thumb completely flashed\n",
+ __func__);
+ else
+ dev_dbg(isp->dev, "%s thumb no flash in this frame\n",
+ __func__);
+ }
+ vb = atomisp_css_frame_to_vbuf(pipe, frame);
+ WARN_ON(!vb);
+ if (vb)
+ pipe->frame_config_id[vb->i] = frame->isp_config_id;
+ if (css_pipe_id == IA_CSS_PIPE_ID_CAPTURE &&
+ asd->pending_capture_request > 0) {
+ err = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+#ifndef ISP2401
+ asd->pending_capture_request--;
+ dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n",
+ err);
+#else
+ asd->pending_capture_request--;
+ asd->re_trigger_capture = false;
+ dev_dbg(isp->dev, "Trigger capture again for new buffer. err=%d\n",
+ err);
+ } else {
+ asd->re_trigger_capture = true;
+ }
+#endif
+ }
+ break;
+ case CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+#ifdef ISP2401
+ reset_wdt_timer = true;
+#endif
+ pipe->buffers_in_css--;
+ frame = buffer.css_buffer.data.frame;
+ if (!frame) {
+ WARN_ON(1);
+ break;
+ }
+
+ if (!frame->valid)
+ error = true;
+
+ /* FIXME:
+ * YUVPP doesn't set preview exp_id correctlly in ZSL mode.
+ * This is a WORKAROUND to set exp_id. see HSDES-1503911606.
+ */
+ if (IS_BYT && buf_type == CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
+ asd->continuous_mode->val && ATOMISP_USE_YUVPP(asd))
+ frame->exp_id = (asd->preview_exp_id++) %
+ (ATOMISP_MAX_EXP_ID + 1);
+
+ dev_dbg(isp->dev, "%s: main frame with exp_id %d is ready\n",
+ __func__, frame->exp_id);
+ vb = atomisp_css_frame_to_vbuf(pipe, frame);
+ if (!vb) {
+ WARN_ON(1);
+ break;
+ }
+
+ /* free the parameters */
+ if (pipe->frame_params[vb->i]) {
+ if (asd->params.dvs_6axis ==
+ pipe->frame_params[vb->i]->params.dvs_6axis)
+ asd->params.dvs_6axis = NULL;
+ atomisp_free_css_parameters(
+ &pipe->frame_params[vb->i]->params);
+ atomisp_kernel_free(pipe->frame_params[vb->i]);
+ pipe->frame_params[vb->i] = NULL;
+ }
+
+ pipe->frame_config_id[vb->i] = frame->isp_config_id;
+ ctrl.id = V4L2_CID_FLASH_MODE;
+ if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
+ if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_PARTIAL) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_PARTIAL;
+ dev_dbg(isp->dev, "%s partially flashed\n",
+ __func__);
+ } else if (frame->flash_state
+ == CSS_FRAME_FLASH_STATE_FULL) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
+ asd->params.num_flash_frames--;
+ dev_dbg(isp->dev, "%s completely flashed\n",
+ __func__);
+ } else {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ dev_dbg(isp->dev,
+ "%s no flash in this frame\n",
+ __func__);
+ }
+
+ /* Check if flashing sequence is done */
+ if (asd->frame_status[vb->i] ==
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED)
+ asd->params.flash_state = ATOMISP_FLASH_DONE;
+ } else if (isp->flash) {
+ if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) ==
+ 0 && ctrl.value == ATOMISP_FLASH_MODE_TORCH) {
+ ctrl.id = V4L2_CID_FLASH_TORCH_INTENSITY;
+ if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl)
+ == 0 && ctrl.value > 0) {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
+ } else {
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ }
+ } else
+ asd->frame_status[vb->i] =
+ ATOMISP_FRAME_STATUS_OK;
+ } else {
+ asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK;
+ }
+
+ asd->params.last_frame_status = asd->frame_status[vb->i];
+
+ if (asd->continuous_mode->val) {
+ if (css_pipe_id == CSS_PIPE_ID_PREVIEW ||
+ css_pipe_id == CSS_PIPE_ID_VIDEO) {
+ asd->latest_preview_exp_id = frame->exp_id;
+ } else if (css_pipe_id ==
+ CSS_PIPE_ID_CAPTURE) {
+ if (asd->run_mode->val ==
+ ATOMISP_RUN_MODE_VIDEO)
+ dev_dbg(isp->dev, "SDV capture raw buffer id: %u\n",
+ frame->exp_id);
+ else
+ dev_dbg(isp->dev, "ZSL capture raw buffer id: %u\n",
+ frame->exp_id);
+ }
+ }
+ /*
+ * Only after enabled the raw buffer lock
+ * and in continuous mode.
+ * in preview/video pipe, each buffer will
+ * be locked automatically, so record it here.
+ */
+ if (((css_pipe_id == CSS_PIPE_ID_PREVIEW) ||
+ (css_pipe_id == CSS_PIPE_ID_VIDEO)) &&
+ asd->enable_raw_buffer_lock->val &&
+ asd->continuous_mode->val) {
+ atomisp_set_raw_buffer_bitmap(asd, frame->exp_id);
+ WARN_ON(frame->exp_id > ATOMISP_MAX_EXP_ID);
+ }
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd,
+ &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ atomisp_css_set_dz_config(asd,
+ &asd->params.css_param.dz_config);
+ /* New global dvs 6axis config should be blocked
+ * here if there's a buffer with per-frame parameters
+ * pending in CSS frame buffer queue.
+ * This is to aviod zooming vibration since global
+ * parameters take effect immediately while
+ * per-frame parameters are taken after previous
+ * buffers in CSS got processed.
+ */
+ if (asd->params.dvs_6axis)
+ atomisp_css_set_dvs_6axis(asd,
+ asd->params.dvs_6axis);
+ else
+ asd->params.css_update_params_needed = false;
+ /* The update flag should not be cleaned here
+ * since it is still going to be used to make up
+ * following per-frame parameters.
+ * This will introduce more copy work since each
+ * time when updating global parameters, the whole
+ * parameter set are applied.
+ * FIXME: A new set of parameter copy functions can
+ * be added to make up per-frame parameters based on
+ * solid structures stored in asd->params.css_param
+ * instead of using shadow pointers in update flag.
+ */
+ atomisp_css_update_isp_params(asd);
+ }
+ break;
+ default:
+ break;
+ }
+ if (vb) {
+ get_buf_timestamp(&vb->ts);
+ vb->field_count = atomic_read(&asd->sequence) << 1;
+ /*mark videobuffer done for dequeue*/
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ vb->state = !error ? VIDEOBUF_DONE : VIDEOBUF_ERROR;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ /*
+ * Frame capture done, wake up any process block on
+ * current active buffer
+ * possibly hold by videobuf_dqbuf()
+ */
+ wake_up(&vb->done);
+ }
+#ifdef ISP2401
+ atomic_set(&pipe->wdt_count, 0);
+#endif
+ /*
+ * Requeue should only be done for 3a and dis buffers.
+ * Queue/dequeue order will change if driver recycles image buffers.
+ */
+ if (requeue) {
+ err = atomisp_css_queue_buffer(asd,
+ stream_id, css_pipe_id,
+ buf_type, &buffer);
+ if (err)
+ dev_err(isp->dev, "%s, q to css fails: %d\n",
+ __func__, err);
+ return;
+ }
+ if (!error && q_buffers)
+ atomisp_qbuffers_to_css(asd);
+#ifdef ISP2401
+
+ /* If there are no buffers queued then
+ * delete wdt timer. */
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+ if (!atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_stop_pipe(pipe, false);
+ else if (reset_wdt_timer)
+ /* SOF irq should not reset wdt timer. */
+ atomisp_wdt_refresh_pipe(pipe,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+#endif
+}
+
+void atomisp_delayed_init_work(struct work_struct *work)
+{
+ struct atomisp_sub_device *asd = container_of(work,
+ struct atomisp_sub_device,
+ delayed_init_work);
+ /*
+ * to SOC camera, use yuvpp pipe and no support continuous mode.
+ */
+ if (!ATOMISP_USE_YUVPP(asd)) {
+ struct v4l2_event event = {0};
+
+ atomisp_css_allocate_continuous_frames(false, asd);
+ atomisp_css_update_continuous_frames(asd);
+
+ event.type = V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE;
+ v4l2_event_queue(asd->subdev.devnode, &event);
+ }
+
+ /* signal streamon after delayed init is done */
+ asd->delayed_init = ATOMISP_DELAYED_INIT_DONE;
+ complete(&asd->init_done);
+}
+
+static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
+{
+ enum atomisp_css_pipe_id css_pipe_id;
+ bool stream_restart[MAX_STREAM_NUM] = {0};
+ bool depth_mode = false;
+ int i, ret, depth_cnt = 0;
+
+ if (!isp->sw_contex.file_input)
+ atomisp_css_irq_enable(isp,
+ CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
+
+ BUG_ON(isp->num_of_streams > MAX_STREAM_NUM);
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ struct ia_css_pipeline *acc_pipeline;
+ struct ia_css_pipe *acc_pipe = NULL;
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED &&
+ !asd->stream_prepared)
+ continue;
+
+ /*
+ * AtomISP::waitStageUpdate is blocked when WDT happens.
+ * By calling acc_done() for all loaded fw_handles,
+ * HAL will be unblocked.
+ */
+ acc_pipe = asd->stream_env[i].pipes[CSS_PIPE_ID_ACC];
+ if (acc_pipe != NULL) {
+ acc_pipeline = ia_css_pipe_get_pipeline(acc_pipe);
+ if (acc_pipeline) {
+ struct ia_css_pipeline_stage *stage;
+ for (stage = acc_pipeline->stages; stage;
+ stage = stage->next) {
+ const struct ia_css_fw_info *fw;
+ fw = stage->firmware;
+ atomisp_acc_done(asd, fw->handle);
+ }
+ }
+ }
+
+ depth_cnt++;
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED)
+ cancel_work_sync(&asd->delayed_init_work);
+
+ complete(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+
+ stream_restart[asd->index] = true;
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+
+ /* stream off sensor */
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].
+ camera, video, s_stream, 0);
+ if (ret)
+ dev_warn(isp->dev,
+ "can't stop streaming on sensor!\n");
+
+ atomisp_acc_unload_extensions(asd);
+
+ atomisp_clear_css_buffer_counters(asd);
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ atomisp_css_stop(asd, css_pipe_id, true);
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+
+ asd->preview_exp_id = 1;
+ asd->postview_exp_id = 1;
+ /* notify HAL the CSS reset */
+ dev_dbg(isp->dev,
+ "send reset event to %s\n", asd->subdev.devnode->name);
+ atomisp_reset_event(asd);
+ }
+
+ /* clear irq */
+ disable_isp_irq(hrt_isp_css_irq_sp);
+ clear_isp_irq(hrt_isp_css_irq_sp);
+
+ /* Set the SRSE to 3 before resetting */
+ pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
+ MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+
+ /* reset ISP and restore its state */
+ isp->isp_timeout = true;
+ atomisp_reset(isp);
+ isp->isp_timeout = false;
+
+ if (!isp_timeout) {
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (isp->asd[i].depth_mode->val)
+ return;
+ }
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (!stream_restart[i])
+ continue;
+
+ if (isp->inputs[asd->input_curr].type != FILE_INPUT)
+ atomisp_css_input_set_mode(asd,
+ CSS_INPUT_MODE_SENSOR);
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ if (atomisp_css_start(asd, css_pipe_id, true))
+ dev_warn(isp->dev,
+ "start SP failed, so do not set streaming to be enable!\n");
+ else
+ asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+
+ atomisp_csi2_configure(asd);
+ }
+
+ if (!isp->sw_contex.file_input) {
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ } else {
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd;
+
+ asd = &isp->asd[i];
+
+ if (!stream_restart[i])
+ continue;
+
+ if (asd->continuous_mode->val &&
+ asd->delayed_init == ATOMISP_DELAYED_INIT_NOT_QUEUED) {
+ reinit_completion(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED;
+ queue_work(asd->delayed_init_workq,
+ &asd->delayed_init_work);
+ }
+ /*
+ * dequeueing buffers is not needed. CSS will recycle
+ * buffers that it has.
+ */
+ atomisp_flush_bufs_and_wakeup(asd);
+
+ /* Requeue unprocessed per-frame parameters. */
+ atomisp_recover_params_queue(&asd->video_out_capture);
+ atomisp_recover_params_queue(&asd->video_out_preview);
+ atomisp_recover_params_queue(&asd->video_out_video_capture);
+
+ if ((asd->depth_mode->val) &&
+ (depth_cnt == ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
+ depth_mode = true;
+ continue;
+ }
+
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera, video,
+ s_stream, 1);
+ if (ret)
+ dev_warn(isp->dev,
+ "can't start streaming on sensor!\n");
+
+ }
+
+ if (depth_mode) {
+ if (atomisp_stream_on_master_slave_sensor(isp, true))
+ dev_warn(isp->dev,
+ "master slave sensor stream on failed!\n");
+ }
+}
+
+void atomisp_wdt_work(struct work_struct *work)
+{
+ struct atomisp_device *isp = container_of(work, struct atomisp_device,
+ wdt_work);
+ int i;
+#ifdef ISP2401
+ unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} };
+ bool css_recover = true;
+#endif
+
+ rt_mutex_lock(&isp->mutex);
+ if (!atomisp_streaming_count(isp)) {
+ atomic_set(&isp->wdt_work_queued, 0);
+ rt_mutex_unlock(&isp->mutex);
+ return;
+ }
+
+#ifndef ISP2401
+ dev_err(isp->dev, "timeout %d of %d\n",
+ atomic_read(&isp->wdt_count) + 1,
+ ATOMISP_ISP_MAX_TIMEOUT_COUNT);
+#else
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ pipe_wdt_cnt[i][0] +=
+ atomic_read(&asd->video_out_capture.wdt_count);
+ pipe_wdt_cnt[i][1] +=
+ atomic_read(&asd->video_out_vf.wdt_count);
+ pipe_wdt_cnt[i][2] +=
+ atomic_read(&asd->video_out_preview.wdt_count);
+ pipe_wdt_cnt[i][3] +=
+ atomic_read(&asd->video_out_video_capture.wdt_count);
+ css_recover =
+ (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT &&
+ pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT)
+ ? true : false;
+ dev_err(isp->dev, "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n",
+ asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1],
+ pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3],
+ ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover);
+ }
+#endif
+
+#ifndef ISP2401
+ if (atomic_inc_return(&isp->wdt_count) <
+ ATOMISP_ISP_MAX_TIMEOUT_COUNT) {
+#else
+ if (css_recover) {
+#endif
+ unsigned int old_dbglevel = dbg_level;
+ atomisp_css_debug_dump_sp_sw_debug_info();
+ atomisp_css_debug_dump_debug_info(__func__);
+ dbg_level = old_dbglevel;
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_capture.vdev.name,
+ asd->video_out_capture.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_vf.vdev.name,
+ asd->video_out_vf.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_preview.vdev.name,
+ asd->video_out_preview.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, vdev %s buffers in css: %d\n",
+ __func__,
+ asd->video_out_video_capture.vdev.name,
+ asd->video_out_video_capture.
+ buffers_in_css);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css preview pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_PREVIEW]);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css capture pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_CAPTURE]);
+ dev_err(isp->dev,
+ "%s, s3a buffers in css video pipe:%d\n",
+ __func__,
+ asd->s3a_bufs_in_css[CSS_PIPE_ID_VIDEO]);
+ dev_err(isp->dev,
+ "%s, dis buffers in css: %d\n",
+ __func__, asd->dis_bufs_in_css);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css preview pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_PREVIEW]);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css capture pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_CAPTURE]);
+ dev_err(isp->dev,
+ "%s, metadata buffers in css video pipe:%d\n",
+ __func__,
+ asd->metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_GENERAL]
+ [CSS_PIPE_ID_VIDEO]);
+ if (asd->enable_raw_buffer_lock->val) {
+ unsigned int j;
+
+ dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n",
+ __func__, asd->raw_buffer_locked_count);
+ for (j = 0; j <= ATOMISP_MAX_EXP_ID/32; j++)
+ dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n",
+ __func__, j,
+ asd->raw_buffer_bitmap[j]);
+ }
+ }
+
+ /*sh_css_dump_sp_state();*/
+ /*sh_css_dump_isp_state();*/
+ } else {
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ if (asd->streaming ==
+ ATOMISP_DEVICE_STREAMING_ENABLED) {
+ atomisp_clear_css_buffer_counters(asd);
+ atomisp_flush_bufs_and_wakeup(asd);
+ complete(&asd->init_done);
+ }
+#ifdef ISP2401
+ atomisp_wdt_stop(asd, false);
+#endif
+ }
+
+#ifndef ISP2401
+ atomic_set(&isp->wdt_count, 0);
+#endif
+ isp->isp_fatal_error = true;
+ atomic_set(&isp->wdt_work_queued, 0);
+
+ rt_mutex_unlock(&isp->mutex);
+ return;
+ }
+
+ __atomisp_css_recover(isp, true);
+#ifdef ISP2401
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ if (asd->streaming ==
+ ATOMISP_DEVICE_STREAMING_ENABLED) {
+ atomisp_wdt_refresh(asd,
+ isp->sw_contex.file_input ?
+ ATOMISP_ISP_FILE_TIMEOUT_DURATION :
+ ATOMISP_ISP_TIMEOUT_DURATION);
+ }
+ }
+#endif
+ dev_err(isp->dev, "timeout recovery handling done\n");
+ atomic_set(&isp->wdt_work_queued, 0);
+
+ rt_mutex_unlock(&isp->mutex);
+}
+
+void atomisp_css_flush(struct atomisp_device *isp)
+{
+ int i;
+
+ if (!atomisp_streaming_count(isp))
+ return;
+
+ /* Disable wdt */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ atomisp_wdt_stop(asd, true);
+ }
+
+ /* Start recover */
+ __atomisp_css_recover(isp, false);
+ /* Restore wdt */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming !=
+ ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+
+ atomisp_wdt_refresh(asd,
+ isp->sw_contex.file_input ?
+ ATOMISP_ISP_FILE_TIMEOUT_DURATION :
+ ATOMISP_ISP_TIMEOUT_DURATION);
+ }
+ dev_dbg(isp->dev, "atomisp css flush done\n");
+}
+
+#ifndef ISP2401
+void atomisp_wdt(unsigned long isp_addr)
+#else
+void atomisp_wdt(unsigned long pipe_addr)
+#endif
+{
+#ifndef ISP2401
+ struct atomisp_device *isp = (struct atomisp_device *)isp_addr;
+#else
+ struct atomisp_video_pipe *pipe =
+ (struct atomisp_video_pipe *)pipe_addr;
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = asd->isp;
+#endif
+
+#ifdef ISP2401
+ atomic_inc(&pipe->wdt_count);
+ dev_warn(isp->dev,
+ "[WARNING]asd %d pipe %s ISP timeout %d!\n",
+ asd->index, pipe->vdev.name,
+ atomic_read(&pipe->wdt_count));
+#endif
+ if (atomic_read(&isp->wdt_work_queued)) {
+ dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n");
+ return;
+ }
+ atomic_set(&isp->wdt_work_queued, 1);
+ queue_work(isp->wdt_work_queue, &isp->wdt_work);
+}
+
+#ifndef ISP2401
+void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay)
+#else
+void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
+ unsigned int delay)
+#endif
+{
+ unsigned long next;
+
+ if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY)
+#ifndef ISP2401
+ asd->wdt_duration = delay;
+#else
+ pipe->wdt_duration = delay;
+#endif
+
+#ifndef ISP2401
+ next = jiffies + asd->wdt_duration;
+#else
+ next = jiffies + pipe->wdt_duration;
+#endif
+
+ /* Override next if it has been pushed beyon the "next" time */
+#ifndef ISP2401
+ if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next))
+ next = asd->wdt_expires;
+#else
+ if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next))
+ next = pipe->wdt_expires;
+#endif
+
+#ifndef ISP2401
+ asd->wdt_expires = next;
+#else
+ pipe->wdt_expires = next;
+#endif
+
+#ifndef ISP2401
+ if (atomisp_is_wdt_running(asd))
+ dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n",
+ ((int)(next - jiffies) * 1000 / HZ));
+#else
+ if (atomisp_is_wdt_running(pipe))
+ dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n",
+ ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
+#endif
+ else
+#ifndef ISP2401
+ dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n",
+ ((int)(next - jiffies) * 1000 / HZ));
+#else
+ dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n",
+ ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name);
+#endif
+
+#ifndef ISP2401
+ mod_timer(&asd->wdt, next);
+ atomic_set(&asd->isp->wdt_count, 0);
+#else
+ mod_timer(&pipe->wdt, next);
+#endif
+}
+
+#ifndef ISP2401
+void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync)
+#else
+void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay)
+{
+ dev_dbg(asd->isp->dev, "WDT refresh all:\n");
+ if (atomisp_is_wdt_running(&asd->video_out_capture))
+ atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_preview))
+ atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_vf))
+ atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay);
+ if (atomisp_is_wdt_running(&asd->video_out_video_capture))
+ atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay);
+}
+
+
+void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync)
+#endif
+{
+#ifndef ISP2401
+ dev_dbg(asd->isp->dev, "WDT stop\n");
+#else
+ if (!atomisp_is_wdt_running(pipe))
+ return;
+
+ dev_dbg(pipe->asd->isp->dev,
+ "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name);
+
+#endif
+ if (sync) {
+#ifndef ISP2401
+ del_timer_sync(&asd->wdt);
+ cancel_work_sync(&asd->isp->wdt_work);
+#else
+ del_timer_sync(&pipe->wdt);
+ cancel_work_sync(&pipe->asd->isp->wdt_work);
+#endif
+ } else {
+#ifndef ISP2401
+ del_timer(&asd->wdt);
+#else
+ del_timer(&pipe->wdt);
+#endif
+ }
+}
+
+#ifndef ISP2401
+void atomisp_wdt_start(struct atomisp_sub_device *asd)
+#else
+void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync)
+{
+ dev_dbg(asd->isp->dev, "WDT stop all:\n");
+ atomisp_wdt_stop_pipe(&asd->video_out_capture, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_preview, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_vf, sync);
+ atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync);
+}
+
+void atomisp_wdt_start(struct atomisp_video_pipe *pipe)
+#endif
+{
+#ifndef ISP2401
+ atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION);
+#else
+ atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION);
+#endif
+}
+
+void atomisp_setup_flash(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_control ctrl;
+
+ if (isp->flash == NULL)
+ return;
+
+ if (asd->params.flash_state != ATOMISP_FLASH_REQUESTED &&
+ asd->params.flash_state != ATOMISP_FLASH_DONE)
+ return;
+
+ if (asd->params.num_flash_frames) {
+ /* make sure the timeout is set before setting flash mode */
+ ctrl.id = V4L2_CID_FLASH_TIMEOUT;
+ ctrl.value = FLASH_TIMEOUT;
+
+ if (v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, &ctrl)) {
+ dev_err(isp->dev, "flash timeout configure failed\n");
+ return;
+ }
+
+ atomisp_css_request_flash(asd);
+ asd->params.flash_state = ATOMISP_FLASH_ONGOING;
+ } else {
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ }
+}
+
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
+{
+ struct atomisp_device *isp = isp_ptr;
+ unsigned long flags;
+ bool frame_done_found[MAX_STREAM_NUM] = {0};
+ bool css_pipe_done[MAX_STREAM_NUM] = {0};
+ unsigned int i;
+ struct atomisp_sub_device *asd = &isp->asd[0];
+
+ dev_dbg(isp->dev, ">%s\n", __func__);
+
+ spin_lock_irqsave(&isp->lock, flags);
+
+ if (!atomisp_streaming_count(isp) && !atomisp_is_acc_enabled(isp)) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ /*
+ * The standard CSS2.0 API tells the following calling sequence of
+ * dequeue ready buffers:
+ * while (ia_css_dequeue_event(...)) {
+ * switch (event.type) {
+ * ...
+ * ia_css_pipe_dequeue_buffer()
+ * }
+ * }
+ * That is, dequeue event and buffer are one after another.
+ *
+ * But the following implementation is to first deuque all the event
+ * to a FIFO, then process the event in the FIFO.
+ * This will not have issue in single stream mode, but it do have some
+ * issue in multiple stream case. The issue is that
+ * ia_css_pipe_dequeue_buffer() will not return the corrent buffer in
+ * a specific pipe.
+ *
+ * This is due to ia_css_pipe_dequeue_buffer() does not take the
+ * ia_css_pipe parameter.
+ *
+ * So:
+ * For CSS2.0: we change the way to not dequeue all the event at one
+ * time, instead, dequue one and process one, then another
+ */
+ rt_mutex_lock(&isp->mutex);
+ if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done))
+ goto out;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ atomisp_setup_flash(asd);
+
+ }
+out:
+ rt_mutex_unlock(&isp->mutex);
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED
+ && css_pipe_done[asd->index]
+ && isp->sw_contex.file_input)
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 1);
+ /* FIXME! FIX ACC implementation */
+ if (asd->acc.pipeline && css_pipe_done[asd->index])
+ atomisp_css_acc_done(asd);
+ }
+ dev_dbg(isp->dev, "<%s\n", __func__);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * utils for buffer allocation/free
+ */
+
+int atomisp_get_frame_pgnr(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, u32 *p_pgnr)
+{
+ if (!frame) {
+ dev_err(isp->dev, "%s: NULL frame pointer ERROR.\n", __func__);
+ return -EINVAL;
+ }
+
+ *p_pgnr = DIV_ROUND_UP(frame->data_bytes, PAGE_SIZE);
+ return 0;
+}
+
+/*
+ * Get internal fmt according to V4L2 fmt
+ */
+static enum atomisp_css_frame_format
+v4l2_fmt_to_sh_fmt(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_YUV420:
+ return CSS_FRAME_FORMAT_YUV420;
+ case V4L2_PIX_FMT_YVU420:
+ return CSS_FRAME_FORMAT_YV12;
+ case V4L2_PIX_FMT_YUV422P:
+ return CSS_FRAME_FORMAT_YUV422;
+ case V4L2_PIX_FMT_YUV444:
+ return CSS_FRAME_FORMAT_YUV444;
+ case V4L2_PIX_FMT_NV12:
+ return CSS_FRAME_FORMAT_NV12;
+ case V4L2_PIX_FMT_NV21:
+ return CSS_FRAME_FORMAT_NV21;
+ case V4L2_PIX_FMT_NV16:
+ return CSS_FRAME_FORMAT_NV16;
+ case V4L2_PIX_FMT_NV61:
+ return CSS_FRAME_FORMAT_NV61;
+ case V4L2_PIX_FMT_UYVY:
+ return CSS_FRAME_FORMAT_UYVY;
+ case V4L2_PIX_FMT_YUYV:
+ return CSS_FRAME_FORMAT_YUYV;
+ case V4L2_PIX_FMT_RGB24:
+ return CSS_FRAME_FORMAT_PLANAR_RGB888;
+ case V4L2_PIX_FMT_RGB32:
+ return CSS_FRAME_FORMAT_RGBA888;
+ case V4L2_PIX_FMT_RGB565:
+ return CSS_FRAME_FORMAT_RGB565;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ return CSS_FRAME_FORMAT_BINARY_8;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return CSS_FRAME_FORMAT_RAW;
+ default:
+ return -EINVAL;
+ }
+}
+/*
+ * raw format match between SH format and V4L2 format
+ */
+static int raw_output_format_match_input(u32 input, u32 output)
+{
+ if ((input == CSS_FORMAT_RAW_12) &&
+ ((output == V4L2_PIX_FMT_SRGGB12) ||
+ (output == V4L2_PIX_FMT_SGRBG12) ||
+ (output == V4L2_PIX_FMT_SBGGR12) ||
+ (output == V4L2_PIX_FMT_SGBRG12)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_10) &&
+ ((output == V4L2_PIX_FMT_SRGGB10) ||
+ (output == V4L2_PIX_FMT_SGRBG10) ||
+ (output == V4L2_PIX_FMT_SBGGR10) ||
+ (output == V4L2_PIX_FMT_SGBRG10)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_8) &&
+ ((output == V4L2_PIX_FMT_SRGGB8) ||
+ (output == V4L2_PIX_FMT_SGRBG8) ||
+ (output == V4L2_PIX_FMT_SBGGR8) ||
+ (output == V4L2_PIX_FMT_SGBRG8)))
+ return 0;
+
+ if ((input == CSS_FORMAT_RAW_16) && (output == V4L2_PIX_FMT_SBGGR16))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u32 get_pixel_depth(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YVU420:
+ return 12;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ return 16;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_YUV444:
+ return 24;
+ case V4L2_PIX_FMT_RGB32:
+ return 32;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return 8;
+ default:
+ return 8 * 2; /* raw type now */
+ }
+}
+
+bool atomisp_is_mbuscode_raw(uint32_t code)
+{
+ return code >= 0x3000 && code < 0x4000;
+}
+
+/*
+ * ISP features control function
+ */
+
+/*
+ * Set ISP capture mode based on current settings
+ */
+static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
+{
+ if (asd->params.gdc_cac_en)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_ADVANCED);
+ else if (asd->params.low_light)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_LOW_LIGHT);
+ else if (asd->video_out_capture.sh_fmt == CSS_FRAME_FORMAT_RAW)
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW);
+ else
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY);
+}
+
+#ifdef ISP2401
+int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
+ struct atomisp_s_runmode *runmode)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_ctrl *c;
+ struct v4l2_streamparm p = {0};
+ int ret;
+ int modes[] = { CI_MODE_NONE,
+ CI_MODE_VIDEO,
+ CI_MODE_STILL_CAPTURE,
+ CI_MODE_CONTINUOUS,
+ CI_MODE_PREVIEW };
+
+ if (!(runmode && (runmode->mode & RUNMODE_MASK)))
+ return -EINVAL;
+
+ mutex_lock(asd->ctrl_handler.lock);
+ c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_RUN_MODE);
+
+ if (c) {
+ ret = v4l2_ctrl_s_ctrl(c, runmode->mode);
+ } else {
+ p.parm.capture.capturemode = modes[runmode->mode];
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_parm, &p);
+ }
+
+ mutex_unlock(asd->ctrl_handler.lock);
+ return ret;
+}
+
+#endif
+/*
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.gdc_cac_en;
+ return 0;
+ }
+
+ asd->params.gdc_cac_en = !!*value;
+ if (asd->params.gdc_cac_en) {
+ atomisp_css_set_morph_table(asd,
+ asd->params.css_param.morph_table);
+ } else {
+ atomisp_css_set_morph_table(asd, NULL);
+ }
+ asd->params.css_update_params_needed = true;
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable low light mode including ANR
+ */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0) {
+ *value = asd->params.low_light;
+ return 0;
+ }
+
+ asd->params.low_light = (*value != 0);
+ atomisp_update_capture_mode(asd);
+ return 0;
+}
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag,
+ int *xnr_enable)
+{
+ if (flag == 0) {
+ *xnr_enable = asd->params.xnr_en;
+ return 0;
+ }
+
+ atomisp_css_capture_enable_xnr(asd, !!*xnr_enable);
+
+ return 0;
+}
+
+/*
+ * Function to configure bayer noise reduction
+ */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *arg)
+{
+ if (flag == 0) {
+ /* Get nr config from current setup */
+ if (atomisp_css_get_nr_config(asd, arg))
+ return -EINVAL;
+ } else {
+ /* Set nr config to isp parameters */
+ memcpy(&asd->params.css_param.nr_config, arg,
+ sizeof(struct atomisp_css_nr_config));
+ atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config);
+ asd->params.css_update_params_needed = true;
+ }
+ return 0;
+}
+
+/*
+ * Function to configure temporal noise reduction (TNR)
+ */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config)
+{
+ /* Get tnr config from current setup */
+ if (flag == 0) {
+ /* Get tnr config from current setup */
+ if (atomisp_css_get_tnr_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set tnr config to isp parameters */
+ memcpy(&asd->params.css_param.tnr_config, config,
+ sizeof(struct atomisp_css_tnr_config));
+ atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure black level compensation
+ */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config)
+{
+ if (flag == 0) {
+ /* Get ob config from current setup */
+ if (atomisp_css_get_ob_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ob config to isp parameters */
+ memcpy(&asd->params.css_param.ob_config, config,
+ sizeof(struct atomisp_css_ob_config));
+ atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure edge enhancement
+ */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config)
+{
+ if (flag == 0) {
+ /* Get ee config from current setup */
+ if (atomisp_css_get_ee_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ee config to isp parameters */
+ memcpy(&asd->params.css_param.ee_config, config,
+ sizeof(asd->params.css_param.ee_config));
+ atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Gamma table for gamma, brightness and contrast config
+ */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config)
+{
+ if (flag == 0) {
+ /* Get gamma table from current setup */
+ if (atomisp_css_get_gamma_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma table to isp parameters */
+ memcpy(&asd->params.css_param.gamma_table, config,
+ sizeof(asd->params.css_param.gamma_table));
+ atomisp_css_set_gamma_table(asd, &asd->params.css_param.gamma_table);
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update Ctc table for Chroma Enhancement
+ */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config)
+{
+ if (flag == 0) {
+ /* Get ctc table from current setup */
+ if (atomisp_css_get_ctc_table(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set ctc table to isp parameters */
+ memcpy(&asd->params.css_param.ctc_table, config,
+ sizeof(asd->params.css_param.ctc_table));
+ atomisp_css_set_ctc_table(asd, &asd->params.css_param.ctc_table);
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update gamma correction parameters
+ */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config)
+{
+ if (flag == 0) {
+ /* Get gamma correction params from current setup */
+ if (atomisp_css_get_gc_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set gamma correction params to isp parameters */
+ memcpy(&asd->params.css_param.gc_config, config,
+ sizeof(asd->params.css_param.gc_config));
+ atomisp_css_set_gc_config(asd, &asd->params.css_param.gc_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to update narrow gamma flag
+ */
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config)
+{
+ if (flag == 0) {
+ /* Get narrow gamma flag from current setup */
+ if (atomisp_css_get_formats_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set narrow gamma flag to isp parameters */
+ memcpy(&asd->params.css_param.formats_config, config,
+ sizeof(asd->params.css_param.formats_config));
+ atomisp_css_set_formats_config(asd, &asd->params.css_param.formats_config);
+ }
+
+ return 0;
+}
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd)
+{
+ atomisp_free_css_parameters(&asd->params.css_param);
+
+ if (asd->raw_output_frame) {
+ atomisp_css_frame_free(asd->raw_output_frame);
+ asd->raw_output_frame = NULL;
+ }
+}
+
+static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad)
+{
+ struct atomisp_device *isp = asd->isp;
+ int err;
+ uint16_t stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+
+ if (atomisp_css_get_grid_info(asd, pipe_id, source_pad))
+ return;
+
+ /* We must free all buffers because they no longer match
+ the grid size. */
+ atomisp_css_free_stat_buffers(asd);
+
+ err = atomisp_alloc_css_stat_bufs(asd, stream_id);
+ if (err) {
+ dev_err(isp->dev, "stat_buf allocate error\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_3a_output_buf(asd)) {
+ /* Failure for 3A buffers does not influence DIS buffers */
+ if (asd->params.s3a_output_bytes != 0) {
+ /* For SOC sensor happens s3a_output_bytes == 0,
+ * using if condition to exclude false error log */
+ dev_err(isp->dev, "Failed to allocate memory for 3A statistics\n");
+ }
+ goto err;
+ }
+
+ if (atomisp_alloc_dis_coef_buf(asd)) {
+ dev_err(isp->dev,
+ "Failed to allocate memory for DIS statistics\n");
+ goto err;
+ }
+
+ if (atomisp_alloc_metadata_output_buf(asd)) {
+ dev_err(isp->dev, "Failed to allocate memory for metadata\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ atomisp_css_free_stat_buffers(asd);
+ return;
+}
+
+static void atomisp_curr_user_grid_info(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *info)
+{
+ memcpy(info, &asd->params.curr_grid_info.s3a_grid,
+ sizeof(struct atomisp_css_3a_grid_info));
+}
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid)
+{
+ struct atomisp_grid_info tmp = {0};
+
+ atomisp_curr_user_grid_info(asd, &tmp);
+ return memcmp(atomgrid, &tmp, sizeof(tmp));
+}
+
+/*
+ * Function to update Gdc table for gdc
+ */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config)
+{
+ int ret;
+ int i;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ /* Get gdc table from current setup */
+ struct atomisp_css_morph_table tab = {0};
+ atomisp_css_get_morph_table(asd, &tab);
+
+ config->width = tab.width;
+ config->height = tab.height;
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_to_user(config->coordinates_x[i],
+ tab.coordinates_x[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for x\n");
+ return -EFAULT;
+ }
+ ret = copy_to_user(config->coordinates_y[i],
+ tab.coordinates_y[i], tab.height *
+ tab.width * sizeof(*tab.coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy to User for y\n");
+ return -EFAULT;
+ }
+ }
+ } else {
+ struct atomisp_css_morph_table *tab =
+ asd->params.css_param.morph_table;
+
+ /* free first if we have one */
+ if (tab) {
+ atomisp_css_morph_table_free(tab);
+ asd->params.css_param.morph_table = NULL;
+ }
+
+ /* allocate new one */
+ tab = atomisp_css_morph_table_allocate(config->width,
+ config->height);
+
+ if (!tab) {
+ dev_err(isp->dev, "out of memory\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ ret = copy_from_user(tab->coordinates_x[i],
+ config->coordinates_x[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_x[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for x, ret %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ ret = copy_from_user(tab->coordinates_y[i],
+ config->coordinates_y[i],
+ config->height * config->width *
+ sizeof(*config->coordinates_y[i]));
+ if (ret) {
+ dev_err(isp->dev,
+ "Failed to copy from User for y, ret is %d\n",
+ ret);
+ atomisp_css_morph_table_free(tab);
+ return -EFAULT;
+ }
+ }
+ asd->params.css_param.morph_table = tab;
+ if (asd->params.gdc_cac_en)
+ atomisp_css_set_morph_table(asd, tab);
+ }
+
+ return 0;
+}
+
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config)
+{
+ struct atomisp_css_macc_table *macc_table;
+
+ switch (config->color_effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (flag == 0) {
+ /* Get macc table from current setup */
+ memcpy(&config->table, macc_table,
+ sizeof(struct atomisp_css_macc_table));
+ } else {
+ memcpy(macc_table, &config->table,
+ sizeof(struct atomisp_css_macc_table));
+ if (config->color_effect == asd->params.color_effect)
+ atomisp_css_set_macc_table(asd, macc_table);
+ }
+
+ return 0;
+}
+
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ atomisp_css_video_set_dis_vector(asd, vector);
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ return atomisp_css_get_dis_stat(asd, stats);
+}
+
+/*
+ * Function set camrea_prefiles.xml current sensor pixel array size
+ */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config)
+{
+ dev_dbg(asd->isp->dev, ">%s start\n", __func__);
+ if (!config) {
+ dev_err(asd->isp->dev, "Set sensor array size is not valid\n");
+ return -EINVAL;
+ }
+
+ asd->sensor_array_res.width = config->width;
+ asd->sensor_array_res.height = config->height;
+ return 0;
+}
+
+/*
+ * Function to get DVS2 BQ resolution settings
+ */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res)
+{
+ struct ia_css_pipe_config *pipe_cfg = NULL;
+ struct ia_css_stream_config *stream_cfg = NULL;
+ struct ia_css_stream_input_config *input_config = NULL;
+
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ if (!stream) {
+ dev_warn(asd->isp->dev, "stream is not created");
+ return -EAGAIN;
+ }
+
+ pipe_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[CSS_PIPE_ID_VIDEO];
+ stream_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config;
+ input_config = &stream_cfg->input_config;
+
+ if (!bq_res)
+ return -EINVAL;
+
+ /* the GDC output resolution */
+ bq_res->output_bq.width_bq = pipe_cfg->output_info[0].res.width / 2;
+ bq_res->output_bq.height_bq = pipe_cfg->output_info[0].res.height / 2;
+
+ bq_res->envelope_bq.width_bq = 0;
+ bq_res->envelope_bq.height_bq = 0;
+ /* the GDC input resolution */
+ if (!asd->continuous_mode->val) {
+ bq_res->source_bq.width_bq = bq_res->output_bq.width_bq +
+ pipe_cfg->dvs_envelope.width / 2;
+ bq_res->source_bq.height_bq = bq_res->output_bq.height_bq +
+ pipe_cfg->dvs_envelope.height / 2;
+ /*
+ * Bad pixels caused by spatial filter processing
+ * ISP filter resolution should be given by CSS/FW, but for now
+ * there is not such API to query, and it is fixed value, so
+ * hardcoded here.
+ */
+ bq_res->ispfilter_bq.width_bq = 12 / 2;
+ bq_res->ispfilter_bq.height_bq = 12 / 2;
+ /* spatial filter shift, always 4 pixels */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+
+ if (asd->params.video_dis_en) {
+ bq_res->envelope_bq.width_bq = pipe_cfg->dvs_envelope.width
+ / 2 - bq_res->ispfilter_bq.width_bq;
+ bq_res->envelope_bq.height_bq = pipe_cfg->dvs_envelope.height
+ / 2 - bq_res->ispfilter_bq.height_bq;
+ }
+ } else {
+ unsigned int w_padding;
+ unsigned int gdc_effective_input = 0;
+
+ /* For GDC:
+ * gdc_effective_input = effective_input + envelope
+ *
+ * From the comment and formula in BZ1786,
+ * we see the source_bq should be:
+ * effective_input / bayer_ds_ratio
+ */
+ bq_res->source_bq.width_bq =
+ (input_config->effective_res.width *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1) / 2;
+ bq_res->source_bq.height_bq =
+ (input_config->effective_res.height *
+ pipe_cfg->bayer_ds_out_res.height /
+ input_config->effective_res.height + 1) / 2;
+
+
+ if (!asd->params.video_dis_en) {
+ /*
+ * We adjust the ispfilter_bq to:
+ * ispfilter_bq = 128/BDS
+ * we still need firmware team to provide an offical
+ * formula for SDV.
+ */
+ bq_res->ispfilter_bq.width_bq = 128 *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width / 2;
+ bq_res->ispfilter_bq.height_bq = 128 *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width / 2;
+
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) {
+ /* No additional left padding for ISYS2401 */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ } else {
+ /*
+ * For the w_padding and gdc_shift_bq cacluation
+ * Please see the BZ 1786 and 4358 for more info.
+ * Just test that this formula can work now,
+ * but we still have no offical formula.
+ *
+ * w_padding = ceiling(gdc_effective_input
+ * /128, 1) * 128 - effective_width
+ * gdc_shift_bq = w_padding/BDS/2 + ispfilter_bq/2
+ */
+ gdc_effective_input =
+ input_config->effective_res.width +
+ pipe_cfg->dvs_envelope.width;
+ w_padding = roundup(gdc_effective_input, 128) -
+ input_config->effective_res.width;
+ w_padding = w_padding *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1;
+ w_padding = roundup(w_padding/2, 1);
+
+ bq_res->gdc_shift_bq.width_bq = bq_res->ispfilter_bq.width_bq / 2
+ + w_padding;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ }
+ } else {
+ unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max;
+
+ bq_res->ispfilter_bq.width_bq = 8 / 2;
+ bq_res->ispfilter_bq.height_bq = 8 / 2;
+
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401)) {
+ /* No additional left padding for ISYS2401 */
+ bq_res->gdc_shift_bq.width_bq = 4 / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ } else {
+ w_padding =
+ roundup(input_config->effective_res.width, 128) -
+ input_config->effective_res.width;
+ if (w_padding < 12)
+ w_padding = 12;
+ bq_res->gdc_shift_bq.width_bq = 4 / 2 +
+ ((w_padding - 12) *
+ pipe_cfg->bayer_ds_out_res.width /
+ input_config->effective_res.width + 1) / 2;
+ bq_res->gdc_shift_bq.height_bq = 4 / 2;
+ }
+
+ dvs_w = pipe_cfg->bayer_ds_out_res.width -
+ pipe_cfg->output_info[0].res.width;
+ dvs_h = pipe_cfg->bayer_ds_out_res.height -
+ pipe_cfg->output_info[0].res.height;
+ dvs_w_max = rounddown(
+ pipe_cfg->output_info[0].res.width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h_max = rounddown(
+ pipe_cfg->output_info[0].res.height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+ bq_res->envelope_bq.width_bq =
+ min((dvs_w / 2), (dvs_w_max / 2)) -
+ bq_res->ispfilter_bq.width_bq;
+ bq_res->envelope_bq.height_bq =
+ min((dvs_h / 2), (dvs_h_max / 2)) -
+ bq_res->ispfilter_bq.height_bq;
+ }
+ }
+
+ dev_dbg(asd->isp->dev, "source_bq.width_bq %d, source_bq.height_bq %d,\nispfilter_bq.width_bq %d, ispfilter_bq.height_bq %d,\ngdc_shift_bq.width_bq %d, gdc_shift_bq.height_bq %d,\nenvelope_bq.width_bq %d, envelope_bq.height_bq %d,\noutput_bq.width_bq %d, output_bq.height_bq %d\n",
+ bq_res->source_bq.width_bq, bq_res->source_bq.height_bq,
+ bq_res->ispfilter_bq.width_bq, bq_res->ispfilter_bq.height_bq,
+ bq_res->gdc_shift_bq.width_bq, bq_res->gdc_shift_bq.height_bq,
+ bq_res->envelope_bq.width_bq, bq_res->envelope_bq.height_bq,
+ bq_res->output_bq.width_bq, bq_res->output_bq.height_bq);
+
+ return 0;
+}
+
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ return atomisp_css_set_dis_coefs(asd, coefs);
+}
+
+/*
+ * Function to set/get 3A stat from isp
+ */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf;
+ unsigned long ret;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ /* sanity check to avoid writing into unallocated memory. */
+ if (asd->params.s3a_output_bytes == 0)
+ return -EINVAL;
+
+ if (atomisp_compare_grid(asd, &config->grid_info) != 0) {
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+ if (list_empty(&asd->s3a_stats_ready)) {
+ dev_err(isp->dev, "3a statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ s3a_buf = list_entry(asd->s3a_stats_ready.next,
+ struct atomisp_s3a_buf, list);
+ if (s3a_buf->s3a_map)
+ ia_css_translate_3a_statistics(
+ asd->params.s3a_user_stat, s3a_buf->s3a_map);
+ else
+ ia_css_get_3a_statistics(asd->params.s3a_user_stat,
+ s3a_buf->s3a_data);
+
+ config->exp_id = s3a_buf->s3a_data->exp_id;
+ config->isp_config_id = s3a_buf->s3a_data->isp_config_id;
+
+ ret = copy_to_user(config->data, asd->params.s3a_user_stat->data,
+ asd->params.s3a_output_bytes);
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %lu bytes\n",
+ ret);
+ return -EFAULT;
+ }
+
+ /* Move to free buffer list */
+ list_del_init(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ dev_dbg(isp->dev, "%s: finish getting exp_id %d 3a stat, isp_config_id %d\n", __func__,
+ config->exp_id, config->isp_config_id);
+ return 0;
+}
+
+int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata *md)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *stream_config;
+ struct ia_css_stream_info *stream_info;
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_metadata_buf *md_buf;
+ enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA;
+ int ret, i;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config;
+ stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info;
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the information
+ * needed to allocate user-space buffers. */
+ md->width = stream_info->metadata_info.resolution.width;
+ md->height = stream_info->metadata_info.resolution.height;
+ md->stride = stream_info->metadata_info.stride;
+
+ /* sanity check to avoid writing into unallocated memory.
+ * This does not return an error because it is a valid way
+ * for applications to detect that metadata is not enabled. */
+ if (md->width == 0 || md->height == 0 || !md->data)
+ return 0;
+
+ /* This is done in the atomisp_buf_done() */
+ if (list_empty(&asd->metadata_ready[md_type])) {
+ dev_warn(isp->dev, "Metadata queue is empty now!\n");
+ return -EAGAIN;
+ }
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (mipi_info == NULL)
+ return -EINVAL;
+
+ if (mipi_info->metadata_effective_width != NULL) {
+ for (i = 0; i < md->height; i++)
+ md->effective_width[i] =
+ mipi_info->metadata_effective_width[i];
+ }
+
+ md_buf = list_entry(asd->metadata_ready[md_type].next,
+ struct atomisp_metadata_buf, list);
+ md->exp_id = md_buf->metadata->exp_id;
+ if (md_buf->md_vptr) {
+ ret = copy_to_user(md->data,
+ md_buf->md_vptr,
+ stream_info->metadata_info.size);
+ } else {
+ hmm_load(md_buf->metadata->address,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+
+ ret = copy_to_user(md->data,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+ }
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
+ ret);
+ return -EFAULT;
+ }
+
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[md_type]);
+
+ dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
+ __func__, md_type, md->exp_id);
+ return 0;
+}
+
+int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata_with_type *md)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *stream_config;
+ struct ia_css_stream_info *stream_info;
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_metadata_buf *md_buf;
+ enum atomisp_metadata_type md_type;
+ int ret, i;
+
+ if (flag != 0)
+ return -EINVAL;
+
+ stream_config = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config;
+ stream_info = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info;
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the information
+ * needed to allocate user-space buffers. */
+ md->width = stream_info->metadata_info.resolution.width;
+ md->height = stream_info->metadata_info.resolution.height;
+ md->stride = stream_info->metadata_info.stride;
+
+ /* sanity check to avoid writing into unallocated memory.
+ * This does not return an error because it is a valid way
+ * for applications to detect that metadata is not enabled. */
+ if (md->width == 0 || md->height == 0 || !md->data)
+ return 0;
+
+ md_type = md->type;
+ if (md_type < 0 || md_type >= ATOMISP_METADATA_TYPE_NUM)
+ return -EINVAL;
+
+ /* This is done in the atomisp_buf_done() */
+ if (list_empty(&asd->metadata_ready[md_type])) {
+ dev_warn(isp->dev, "Metadata queue is empty now!\n");
+ return -EAGAIN;
+ }
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (mipi_info == NULL)
+ return -EINVAL;
+
+ if (mipi_info->metadata_effective_width != NULL) {
+ for (i = 0; i < md->height; i++)
+ md->effective_width[i] =
+ mipi_info->metadata_effective_width[i];
+ }
+
+ md_buf = list_entry(asd->metadata_ready[md_type].next,
+ struct atomisp_metadata_buf, list);
+ md->exp_id = md_buf->metadata->exp_id;
+ if (md_buf->md_vptr) {
+ ret = copy_to_user(md->data,
+ md_buf->md_vptr,
+ stream_info->metadata_info.size);
+ } else {
+ hmm_load(md_buf->metadata->address,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+
+ ret = copy_to_user(md->data,
+ asd->params.metadata_user[md_type],
+ stream_info->metadata_info.size);
+ }
+ if (ret) {
+ dev_err(isp->dev, "copy to user failed: copied %d bytes\n",
+ ret);
+ return -EFAULT;
+ } else {
+ list_del_init(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[md_type]);
+ }
+ dev_dbg(isp->dev, "%s: HAL de-queued metadata type %d with exp_id %d\n",
+ __func__, md_type, md->exp_id);
+ return 0;
+}
+
+/*
+ * Function to calculate real zoom region for every pipe
+ */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config,
+ enum atomisp_css_pipe_id css_pipe_id)
+
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct atomisp_resolution eff_res, out_res;
+#ifdef ISP2401
+ int w_offset, h_offset;
+#endif
+
+ memset(&eff_res, 0, sizeof(eff_res));
+ memset(&out_res, 0, sizeof(out_res));
+
+ if (dz_config->dx || dz_config->dy)
+ return 0;
+
+ if (css_pipe_id != IA_CSS_PIPE_ID_PREVIEW
+ && css_pipe_id != IA_CSS_PIPE_ID_CAPTURE) {
+ dev_err(asd->isp->dev, "%s the set pipe no support crop region"
+ , __func__);
+ return -EINVAL;
+ }
+
+ eff_res.width =
+ stream_env->stream_config.input_config.effective_res.width;
+ eff_res.height =
+ stream_env->stream_config.input_config.effective_res.height;
+ if (eff_res.width == 0 || eff_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err effective resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (dz_config->zoom_region.resolution.width
+ == asd->sensor_array_res.width
+ || dz_config->zoom_region.resolution.height
+ == asd->sensor_array_res.height) {
+ /*no need crop region*/
+ dz_config->zoom_region.origin.x = 0;
+ dz_config->zoom_region.origin.y = 0;
+ dz_config->zoom_region.resolution.width = eff_res.width;
+ dz_config->zoom_region.resolution.height = eff_res.height;
+ return 0;
+ }
+
+ /* FIXME:
+ * This is not the correct implementation with Google's definition, due
+ * to firmware limitation.
+ * map real crop region base on above calculating base max crop region.
+ */
+#ifdef ISP2401
+ out_res.width =
+ stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height =
+ stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+ if (asd->sensor_array_res.width * out_res.height
+ < out_res.width * asd->sensor_array_res.height) {
+ h_offset = asd->sensor_array_res.height -
+ asd->sensor_array_res.width
+ * out_res.height / out_res.width;
+ h_offset = h_offset / 2;
+ if (dz_config->zoom_region.origin.y < h_offset)
+ dz_config->zoom_region.origin.y = 0;
+ else
+ dz_config->zoom_region.origin.y =
+ dz_config->zoom_region.origin.y - h_offset;
+ w_offset = 0;
+ } else {
+ w_offset = asd->sensor_array_res.width -
+ asd->sensor_array_res.height
+ * out_res.width / out_res.height;
+ w_offset = w_offset / 2;
+ if (dz_config->zoom_region.origin.x < w_offset)
+ dz_config->zoom_region.origin.x = 0;
+ else
+ dz_config->zoom_region.origin.x =
+ dz_config->zoom_region.origin.x - w_offset;
+ h_offset = 0;
+ }
+#endif
+ dz_config->zoom_region.origin.x =
+ dz_config->zoom_region.origin.x
+ * eff_res.width
+#ifndef ISP2401
+ / asd->sensor_array_res.width;
+#else
+ / (asd->sensor_array_res.width -
+ 2 * w_offset);
+#endif
+ dz_config->zoom_region.origin.y =
+ dz_config->zoom_region.origin.y
+ * eff_res.height
+#ifndef ISP2401
+ / asd->sensor_array_res.height;
+#else
+ / (asd->sensor_array_res.height -
+ 2 * h_offset);
+#endif
+ dz_config->zoom_region.resolution.width =
+ dz_config->zoom_region.resolution.width
+ * eff_res.width
+#ifndef ISP2401
+ / asd->sensor_array_res.width;
+#else
+ / (asd->sensor_array_res.width -
+ 2 * w_offset);
+#endif
+ dz_config->zoom_region.resolution.height =
+ dz_config->zoom_region.resolution.height
+ * eff_res.height
+#ifndef ISP2401
+ / asd->sensor_array_res.height;
+#else
+ / (asd->sensor_array_res.height -
+ 2 * h_offset);
+#endif
+
+ /*
+ * Set same ratio of crop region resolution and current pipe output
+ * resolution
+ */
+#ifndef ISP2401
+ out_res.width =
+ stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
+ out_res.height =
+ stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
+ if (out_res.width == 0 || out_res.height == 0) {
+ dev_err(asd->isp->dev, "%s err current pipe output resolution"
+ , __func__);
+ return -EINVAL;
+ }
+
+#endif
+ if (out_res.width * dz_config->zoom_region.resolution.height
+ > dz_config->zoom_region.resolution.width * out_res.height) {
+ dz_config->zoom_region.resolution.height =
+ dz_config->zoom_region.resolution.width
+ * out_res.height / out_res.width;
+ } else {
+ dz_config->zoom_region.resolution.width =
+ dz_config->zoom_region.resolution.height
+ * out_res.width / out_res.height;
+ }
+ dev_dbg(asd->isp->dev, "%s crop region:(%d,%d),(%d,%d) eff_res(%d, %d) array_size(%d,%d) out_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ eff_res.width, eff_res.height,
+ asd->sensor_array_res.width,
+ asd->sensor_array_res.height,
+ out_res.width, out_res.height);
+
+
+ if ((dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width
+ > eff_res.width) ||
+ (dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height
+ > eff_res.height))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+/*
+ * Function to check the zoom region whether is effective
+ */
+static bool atomisp_check_zoom_region(
+ struct atomisp_sub_device *asd,
+ struct ia_css_dz_config *dz_config)
+{
+ struct atomisp_resolution config;
+ bool flag = false;
+ unsigned int w , h;
+
+ memset(&config, 0, sizeof(struct atomisp_resolution));
+
+ if (dz_config->dx && dz_config->dy)
+ return true;
+
+ config.width = asd->sensor_array_res.width;
+ config.height = asd->sensor_array_res.height;
+ w = dz_config->zoom_region.origin.x +
+ dz_config->zoom_region.resolution.width;
+ h = dz_config->zoom_region.origin.y +
+ dz_config->zoom_region.resolution.height;
+
+ if ((w <= config.width) && (h <= config.height) && w > 0 && h > 0)
+ flag = true;
+ else
+ /* setting error zoom region */
+ dev_err(asd->isp->dev, "%s zoom region ERROR:dz_config:(%d,%d),(%d,%d)array_res(%d, %d)\n",
+ __func__, dz_config->zoom_region.origin.x,
+ dz_config->zoom_region.origin.y,
+ dz_config->zoom_region.resolution.width,
+ dz_config->zoom_region.resolution.height,
+ config.width, config.height);
+
+ return flag;
+}
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param)
+{
+ if (css_param->update_flag.wb_config)
+ atomisp_css_set_wb_config(asd, &css_param->wb_config);
+
+ if (css_param->update_flag.ob_config)
+ atomisp_css_set_ob_config(asd, &css_param->ob_config);
+
+ if (css_param->update_flag.dp_config)
+ atomisp_css_set_dp_config(asd, &css_param->dp_config);
+
+ if (css_param->update_flag.nr_config)
+ atomisp_css_set_nr_config(asd, &css_param->nr_config);
+
+ if (css_param->update_flag.ee_config)
+ atomisp_css_set_ee_config(asd, &css_param->ee_config);
+
+ if (css_param->update_flag.tnr_config)
+ atomisp_css_set_tnr_config(asd, &css_param->tnr_config);
+
+ if (css_param->update_flag.a3a_config)
+ atomisp_css_set_3a_config(asd, &css_param->s3a_config);
+
+ if (css_param->update_flag.ctc_config)
+ atomisp_css_set_ctc_config(asd, &css_param->ctc_config);
+
+ if (css_param->update_flag.cnr_config)
+ atomisp_css_set_cnr_config(asd, &css_param->cnr_config);
+
+ if (css_param->update_flag.ecd_config)
+ atomisp_css_set_ecd_config(asd, &css_param->ecd_config);
+
+ if (css_param->update_flag.ynr_config)
+ atomisp_css_set_ynr_config(asd, &css_param->ynr_config);
+
+ if (css_param->update_flag.fc_config)
+ atomisp_css_set_fc_config(asd, &css_param->fc_config);
+
+ if (css_param->update_flag.macc_config)
+ atomisp_css_set_macc_config(asd, &css_param->macc_config);
+
+ if (css_param->update_flag.aa_config)
+ atomisp_css_set_aa_config(asd, &css_param->aa_config);
+
+ if (css_param->update_flag.anr_config)
+ atomisp_css_set_anr_config(asd, &css_param->anr_config);
+
+ if (css_param->update_flag.xnr_config)
+ atomisp_css_set_xnr_config(asd, &css_param->xnr_config);
+
+ if (css_param->update_flag.yuv2rgb_cc_config)
+ atomisp_css_set_yuv2rgb_cc_config(asd,
+ &css_param->yuv2rgb_cc_config);
+
+ if (css_param->update_flag.rgb2yuv_cc_config)
+ atomisp_css_set_rgb2yuv_cc_config(asd,
+ &css_param->rgb2yuv_cc_config);
+
+ if (css_param->update_flag.macc_table)
+ atomisp_css_set_macc_table(asd, &css_param->macc_table);
+
+ if (css_param->update_flag.xnr_table)
+ atomisp_css_set_xnr_table(asd, &css_param->xnr_table);
+
+ if (css_param->update_flag.r_gamma_table)
+ atomisp_css_set_r_gamma_table(asd, &css_param->r_gamma_table);
+
+ if (css_param->update_flag.g_gamma_table)
+ atomisp_css_set_g_gamma_table(asd, &css_param->g_gamma_table);
+
+ if (css_param->update_flag.b_gamma_table)
+ atomisp_css_set_b_gamma_table(asd, &css_param->b_gamma_table);
+
+ if (css_param->update_flag.anr_thres)
+ atomisp_css_set_anr_thres(asd, &css_param->anr_thres);
+
+ if (css_param->update_flag.shading_table)
+ atomisp_css_set_shading_table(asd, css_param->shading_table);
+
+ if (css_param->update_flag.morph_table && asd->params.gdc_cac_en)
+ atomisp_css_set_morph_table(asd, css_param->morph_table);
+
+ if (css_param->update_flag.dvs2_coefs) {
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ if (dvs_grid_info && dvs_grid_info->enable)
+ atomisp_css_set_dvs2_coefs(asd, css_param->dvs2_coeff);
+ }
+
+ if (css_param->update_flag.dvs_6axis_config)
+ atomisp_css_set_dvs_6axis(asd, css_param->dvs_6axis);
+
+ atomisp_css_set_isp_config_id(asd, css_param->isp_config_id);
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+}
+
+static unsigned int long copy_from_compatible(void *to, const void *from,
+ unsigned long n, bool from_user)
+{
+ if (from_user)
+ return copy_from_user(to, from, n);
+ else
+ memcpy(to, from, n);
+ return 0;
+}
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_parameters *cur_config = &css_param->update_flag;
+
+ if (!arg || !asd || !css_param)
+ return -EINVAL;
+
+ if (arg->wb_config && (from_user || !cur_config->wb_config)) {
+ if (copy_from_compatible(&css_param->wb_config, arg->wb_config,
+ sizeof(struct atomisp_css_wb_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.wb_config =
+ (struct atomisp_wb_config *) &css_param->wb_config;
+ }
+
+ if (arg->ob_config && (from_user || !cur_config->ob_config)) {
+ if (copy_from_compatible(&css_param->ob_config, arg->ob_config,
+ sizeof(struct atomisp_css_ob_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ob_config =
+ (struct atomisp_ob_config *) &css_param->ob_config;
+ }
+
+ if (arg->dp_config && (from_user || !cur_config->dp_config)) {
+ if (copy_from_compatible(&css_param->dp_config, arg->dp_config,
+ sizeof(struct atomisp_css_dp_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.dp_config =
+ (struct atomisp_dp_config *) &css_param->dp_config;
+ }
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ if (arg->dz_config && (from_user || !cur_config->dz_config)) {
+ if (copy_from_compatible(&css_param->dz_config,
+ arg->dz_config,
+ sizeof(struct atomisp_css_dz_config),
+ from_user))
+ return -EFAULT;
+ if (!atomisp_check_zoom_region(asd,
+ &css_param->dz_config)) {
+ dev_err(asd->isp->dev, "crop region error!");
+ return -EINVAL;
+ }
+ css_param->update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &css_param->dz_config;
+ }
+ }
+
+ if (arg->nr_config && (from_user || !cur_config->nr_config)) {
+ if (copy_from_compatible(&css_param->nr_config, arg->nr_config,
+ sizeof(struct atomisp_css_nr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.nr_config =
+ (struct atomisp_nr_config *) &css_param->nr_config;
+ }
+
+ if (arg->ee_config && (from_user || !cur_config->ee_config)) {
+ if (copy_from_compatible(&css_param->ee_config, arg->ee_config,
+ sizeof(struct atomisp_css_ee_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ee_config =
+ (struct atomisp_ee_config *) &css_param->ee_config;
+ }
+
+ if (arg->tnr_config && (from_user || !cur_config->tnr_config)) {
+ if (copy_from_compatible(&css_param->tnr_config,
+ arg->tnr_config,
+ sizeof(struct atomisp_css_tnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.tnr_config =
+ (struct atomisp_tnr_config *)
+ &css_param->tnr_config;
+ }
+
+ if (arg->a3a_config && (from_user || !cur_config->a3a_config)) {
+ if (copy_from_compatible(&css_param->s3a_config,
+ arg->a3a_config,
+ sizeof(struct atomisp_css_3a_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.a3a_config =
+ (struct atomisp_3a_config *) &css_param->s3a_config;
+ }
+
+ if (arg->ctc_config && (from_user || !cur_config->ctc_config)) {
+ if (copy_from_compatible(&css_param->ctc_config,
+ arg->ctc_config,
+ sizeof(struct atomisp_css_ctc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ctc_config =
+ (struct atomisp_ctc_config *)
+ &css_param->ctc_config;
+ }
+
+ if (arg->cnr_config && (from_user || !cur_config->cnr_config)) {
+ if (copy_from_compatible(&css_param->cnr_config,
+ arg->cnr_config,
+ sizeof(struct atomisp_css_cnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.cnr_config =
+ (struct atomisp_cnr_config *)
+ &css_param->cnr_config;
+ }
+
+ if (arg->ecd_config && (from_user || !cur_config->ecd_config)) {
+ if (copy_from_compatible(&css_param->ecd_config,
+ arg->ecd_config,
+ sizeof(struct atomisp_css_ecd_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ecd_config =
+ (struct atomisp_ecd_config *)
+ &css_param->ecd_config;
+ }
+
+ if (arg->ynr_config && (from_user || !cur_config->ynr_config)) {
+ if (copy_from_compatible(&css_param->ynr_config,
+ arg->ynr_config,
+ sizeof(struct atomisp_css_ynr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.ynr_config =
+ (struct atomisp_ynr_config *)
+ &css_param->ynr_config;
+ }
+
+ if (arg->fc_config && (from_user || !cur_config->fc_config)) {
+ if (copy_from_compatible(&css_param->fc_config,
+ arg->fc_config,
+ sizeof(struct atomisp_css_fc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.fc_config =
+ (struct atomisp_fc_config *) &css_param->fc_config;
+ }
+
+ if (arg->macc_config && (from_user || !cur_config->macc_config)) {
+ if (copy_from_compatible(&css_param->macc_config,
+ arg->macc_config,
+ sizeof(struct atomisp_css_macc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_config =
+ (struct atomisp_macc_config *)
+ &css_param->macc_config;
+ }
+
+ if (arg->aa_config && (from_user || !cur_config->aa_config)) {
+ if (copy_from_compatible(&css_param->aa_config, arg->aa_config,
+ sizeof(struct atomisp_css_aa_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.aa_config =
+ (struct atomisp_aa_config *) &css_param->aa_config;
+ }
+
+ if (arg->anr_config && (from_user || !cur_config->anr_config)) {
+ if (copy_from_compatible(&css_param->anr_config,
+ arg->anr_config,
+ sizeof(struct atomisp_css_anr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_config =
+ (struct atomisp_anr_config *)
+ &css_param->anr_config;
+ }
+
+ if (arg->xnr_config && (from_user || !cur_config->xnr_config)) {
+ if (copy_from_compatible(&css_param->xnr_config,
+ arg->xnr_config,
+ sizeof(struct atomisp_css_xnr_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_config =
+ (struct atomisp_xnr_config *)
+ &css_param->xnr_config;
+ }
+
+ if (arg->yuv2rgb_cc_config &&
+ (from_user || !cur_config->yuv2rgb_cc_config)) {
+ if (copy_from_compatible(&css_param->yuv2rgb_cc_config,
+ arg->yuv2rgb_cc_config,
+ sizeof(struct atomisp_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.yuv2rgb_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->yuv2rgb_cc_config;
+ }
+
+ if (arg->rgb2yuv_cc_config &&
+ (from_user || !cur_config->rgb2yuv_cc_config)) {
+ if (copy_from_compatible(&css_param->rgb2yuv_cc_config,
+ arg->rgb2yuv_cc_config,
+ sizeof(struct atomisp_css_cc_config),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.rgb2yuv_cc_config =
+ (struct atomisp_cc_config *)
+ &css_param->rgb2yuv_cc_config;
+ }
+
+ if (arg->macc_table && (from_user || !cur_config->macc_table)) {
+ if (copy_from_compatible(&css_param->macc_table,
+ arg->macc_table,
+ sizeof(struct atomisp_css_macc_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.macc_table =
+ (struct atomisp_macc_table *)
+ &css_param->macc_table;
+ }
+
+ if (arg->xnr_table && (from_user || !cur_config->xnr_table)) {
+ if (copy_from_compatible(&css_param->xnr_table,
+ arg->xnr_table,
+ sizeof(struct atomisp_css_xnr_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.xnr_table =
+ (struct atomisp_xnr_table *) &css_param->xnr_table;
+ }
+
+ if (arg->r_gamma_table && (from_user || !cur_config->r_gamma_table)) {
+ if (copy_from_compatible(&css_param->r_gamma_table,
+ arg->r_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.r_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->r_gamma_table;
+ }
+
+ if (arg->g_gamma_table && (from_user || !cur_config->g_gamma_table)) {
+ if (copy_from_compatible(&css_param->g_gamma_table,
+ arg->g_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.g_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->g_gamma_table;
+ }
+
+ if (arg->b_gamma_table && (from_user || !cur_config->b_gamma_table)) {
+ if (copy_from_compatible(&css_param->b_gamma_table,
+ arg->b_gamma_table,
+ sizeof(struct atomisp_css_rgb_gamma_table),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.b_gamma_table =
+ (struct atomisp_rgb_gamma_table *)
+ &css_param->b_gamma_table;
+ }
+
+ if (arg->anr_thres && (from_user || !cur_config->anr_thres)) {
+ if (copy_from_compatible(&css_param->anr_thres, arg->anr_thres,
+ sizeof(struct atomisp_css_anr_thres),
+ from_user))
+ return -EFAULT;
+ css_param->update_flag.anr_thres =
+ (struct atomisp_anr_thres *) &css_param->anr_thres;
+ }
+
+ if (from_user)
+ css_param->isp_config_id = arg->isp_config_id;
+ /*
+ * These configurations are on used by ISP1.x, not for ISP2.x,
+ * so do not handle them. see comments of ia_css_isp_config.
+ * 1 cc_config
+ * 2 ce_config
+ * 3 de_config
+ * 4 gc_config
+ * 5 gamma_table
+ * 6 ctc_table
+ * 7 dvs_coefs
+ */
+ return 0;
+}
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ unsigned int i;
+ unsigned int len_table;
+ struct atomisp_css_shading_table *shading_table;
+ struct atomisp_css_shading_table *old_table;
+#ifdef ISP2401
+ struct atomisp_shading_table st;
+#endif
+
+ if (!source_st)
+ return 0;
+
+ if (!css_param)
+ return -EINVAL;
+
+ if (!from_user && css_param->update_flag.shading_table)
+ return 0;
+
+#ifdef ISP2401
+ if (copy_from_compatible(&st, source_st,
+ sizeof(struct atomisp_shading_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy shading table failed!");
+ return -EFAULT;
+ }
+
+#endif
+ old_table = css_param->shading_table;
+
+#ifdef ISP2401
+
+#endif
+ /* user config is to disable the shading table. */
+#ifndef ISP2401
+ if (!source_st->enable) {
+#else
+ if (!st.enable) {
+#endif
+ /* Generate a minimum table with enable = 0. */
+ shading_table = atomisp_css_shading_table_alloc(1, 1);
+ if (!shading_table)
+ return -ENOMEM;
+ shading_table->enable = 0;
+ goto set_lsc;
+ }
+
+ /* Setting a new table. Validate first - all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+#ifndef ISP2401
+ if (!source_st->data[i])
+#else
+ if (!st.data[i]) {
+ dev_err(asd->isp->dev, "shading table validate failed");
+#endif
+ return -EINVAL;
+#ifdef ISP2401
+ }
+#endif
+ }
+
+ /* Shading table size per color */
+#ifndef ISP2401
+ if (source_st->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ source_st->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+#else
+ if (st.width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ st.height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
+ dev_err(asd->isp->dev, "shading table w/h validate failed!");
+#endif
+ return -EINVAL;
+#ifdef ISP2401
+ }
+#endif
+
+#ifndef ISP2401
+ shading_table = atomisp_css_shading_table_alloc(source_st->width,
+ source_st->height);
+ if (!shading_table)
+ return -ENOMEM;
+#else
+ shading_table = atomisp_css_shading_table_alloc(st.width,
+ st.height);
+ if (!shading_table) {
+ dev_err(asd->isp->dev, "shading table alloc failed!");
+ return -ENOMEM;
+ }
+#endif
+
+#ifndef ISP2401
+ len_table = source_st->width * source_st->height * ATOMISP_SC_TYPE_SIZE;
+#else
+ len_table = st.width * st.height * ATOMISP_SC_TYPE_SIZE;
+#endif
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (copy_from_compatible(shading_table->data[i],
+#ifndef ISP2401
+ source_st->data[i], len_table, from_user)) {
+#else
+ st.data[i], len_table, from_user)) {
+#endif
+ atomisp_css_shading_table_free(shading_table);
+ return -EFAULT;
+ }
+
+ }
+#ifndef ISP2401
+ shading_table->sensor_width = source_st->sensor_width;
+ shading_table->sensor_height = source_st->sensor_height;
+ shading_table->fraction_bits = source_st->fraction_bits;
+ shading_table->enable = source_st->enable;
+#else
+ shading_table->sensor_width = st.sensor_width;
+ shading_table->sensor_height = st.sensor_height;
+ shading_table->fraction_bits = st.fraction_bits;
+ shading_table->enable = st.enable;
+#endif
+
+ /* No need to update shading table if it is the same */
+ if (old_table != NULL &&
+ old_table->sensor_width == shading_table->sensor_width &&
+ old_table->sensor_height == shading_table->sensor_height &&
+ old_table->width == shading_table->width &&
+ old_table->height == shading_table->height &&
+ old_table->fraction_bits == shading_table->fraction_bits &&
+ old_table->enable == shading_table->enable) {
+ bool data_is_same = true;
+
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (memcmp(shading_table->data[i], old_table->data[i],
+ len_table) != 0) {
+ data_is_same = false;
+ break;
+ }
+ }
+
+ if (data_is_same) {
+ atomisp_css_shading_table_free(shading_table);
+ return 0;
+ }
+ }
+
+set_lsc:
+ /* set LSC to CSS */
+ css_param->shading_table = shading_table;
+ css_param->update_flag.shading_table =
+ (struct atomisp_shading_table *) shading_table;
+ asd->params.sc_en = shading_table != NULL;
+
+ if (old_table)
+ atomisp_css_shading_table_free(old_table);
+
+ return 0;
+}
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int dvs_hor_coef_bytes, dvs_ver_coef_bytes;
+#ifdef ISP2401
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+#endif
+
+ if (!coefs || !cur)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs2_coefs)
+ return 0;
+
+#ifndef ISP2401
+ if (sizeof(*cur) != sizeof(coefs->grid) ||
+ memcmp(&coefs->grid, cur, sizeof(coefs->grid))) {
+#else
+ if (copy_from_compatible(&dvs2_coefs, coefs,
+ sizeof(struct ia_css_dvs2_coefficients),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy dvs2 coef failed");
+ return -EFAULT;
+ }
+
+ if (sizeof(*cur) != sizeof(dvs2_coefs.grid) ||
+ memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) {
+#endif
+ dev_err(asd->isp->dev, "dvs grid mis-match!\n");
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+ }
+
+#ifndef ISP2401
+ if (coefs->hor_coefs.odd_real == NULL ||
+ coefs->hor_coefs.odd_imag == NULL ||
+ coefs->hor_coefs.even_real == NULL ||
+ coefs->hor_coefs.even_imag == NULL ||
+ coefs->ver_coefs.odd_real == NULL ||
+ coefs->ver_coefs.odd_imag == NULL ||
+ coefs->ver_coefs.even_real == NULL ||
+ coefs->ver_coefs.even_imag == NULL)
+#else
+ if (dvs2_coefs.hor_coefs.odd_real == NULL ||
+ dvs2_coefs.hor_coefs.odd_imag == NULL ||
+ dvs2_coefs.hor_coefs.even_real == NULL ||
+ dvs2_coefs.hor_coefs.even_imag == NULL ||
+ dvs2_coefs.ver_coefs.odd_real == NULL ||
+ dvs2_coefs.ver_coefs.odd_imag == NULL ||
+ dvs2_coefs.ver_coefs.even_real == NULL ||
+ dvs2_coefs.ver_coefs.even_imag == NULL)
+#endif
+ return -EINVAL;
+
+ if (!css_param->dvs2_coeff) {
+ /* DIS coefficients. */
+ css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
+ if (!css_param->dvs2_coeff)
+ return -ENOMEM;
+ }
+
+ dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
+ dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
+ if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
+#ifndef ISP2401
+ coefs->hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
+#ifndef ISP2401
+ coefs->hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
+#ifndef ISP2401
+ coefs->hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
+#ifndef ISP2401
+ coefs->hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
+#ifndef ISP2401
+ coefs->ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
+#ifndef ISP2401
+ coefs->ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
+#ifndef ISP2401
+ coefs->ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+#else
+ dvs2_coefs.ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
+#endif
+ copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
+#ifndef ISP2401
+ coefs->ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+#else
+ dvs2_coefs.ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
+#endif
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ return -EFAULT;
+ }
+
+ css_param->update_flag.dvs2_coefs =
+ (struct atomisp_dvs2_coefficients *)css_param->dvs2_coeff;
+ return 0;
+}
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *source_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ struct atomisp_css_dvs_6axis_config *dvs_6axis_config;
+ struct atomisp_css_dvs_6axis_config *old_6axis_config;
+#ifdef ISP2401
+ struct atomisp_css_dvs_6axis_config t_6axis_config;
+#endif
+ struct ia_css_stream *stream =
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ int ret = -EFAULT;
+
+ if (stream == NULL) {
+ dev_err(asd->isp->dev, "%s: internal error!", __func__);
+ return -EINVAL;
+ }
+
+ if (!source_6axis_config || !dvs_grid_info)
+ return 0;
+
+ if (!dvs_grid_info->enable)
+ return 0;
+
+ if (!from_user && css_param->update_flag.dvs_6axis_config)
+ return 0;
+
+ /* check whether need to reallocate for 6 axis config */
+ old_6axis_config = css_param->dvs_6axis;
+ dvs_6axis_config = old_6axis_config;
+#ifdef ISP2401
+
+ if (copy_from_compatible(&t_6axis_config, source_6axis_config,
+ sizeof(struct atomisp_dvs_6axis_config),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+#endif
+ if (old_6axis_config &&
+#ifndef ISP2401
+ (old_6axis_config->width_y != source_6axis_config->width_y ||
+ old_6axis_config->height_y != source_6axis_config->height_y ||
+ old_6axis_config->width_uv != source_6axis_config->width_uv ||
+ old_6axis_config->height_uv != source_6axis_config->height_uv)) {
+#else
+ (old_6axis_config->width_y != t_6axis_config.width_y ||
+ old_6axis_config->height_y != t_6axis_config.height_y ||
+ old_6axis_config->width_uv != t_6axis_config.width_uv ||
+ old_6axis_config->height_uv != t_6axis_config.height_uv)) {
+#endif
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ } else if (!dvs_6axis_config) {
+ dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
+ if (!dvs_6axis_config)
+ return -ENOMEM;
+ }
+
+#ifndef ISP2401
+ dvs_6axis_config->exp_id = source_6axis_config->exp_id;
+#else
+ dvs_6axis_config->exp_id = t_6axis_config.exp_id;
+#endif
+
+ if (copy_from_compatible(dvs_6axis_config->xcoords_y,
+#ifndef ISP2401
+ source_6axis_config->xcoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->xcoords_y),
+#else
+ t_6axis_config.xcoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->xcoords_y),
+#endif
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_y,
+#ifndef ISP2401
+ source_6axis_config->ycoords_y,
+ source_6axis_config->width_y *
+ source_6axis_config->height_y *
+ sizeof(*source_6axis_config->ycoords_y),
+#else
+ t_6axis_config.ycoords_y,
+ t_6axis_config.width_y *
+ t_6axis_config.height_y *
+ sizeof(*dvs_6axis_config->ycoords_y),
+#endif
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
+#ifndef ISP2401
+ source_6axis_config->xcoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->xcoords_uv),
+#else
+ t_6axis_config.xcoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->xcoords_uv),
+#endif
+ from_user))
+ goto error;
+ if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
+#ifndef ISP2401
+ source_6axis_config->ycoords_uv,
+ source_6axis_config->width_uv *
+ source_6axis_config->height_uv *
+ sizeof(*source_6axis_config->ycoords_uv),
+#else
+ t_6axis_config.ycoords_uv,
+ t_6axis_config.width_uv *
+ t_6axis_config.height_uv *
+ sizeof(*dvs_6axis_config->ycoords_uv),
+#endif
+ from_user))
+ goto error;
+
+ css_param->dvs_6axis = dvs_6axis_config;
+ css_param->update_flag.dvs_6axis_config =
+ (struct atomisp_dvs_6axis_config *) dvs_6axis_config;
+ return 0;
+
+error:
+ if (dvs_6axis_config)
+ ia_css_dvs2_6axis_config_free(dvs_6axis_config);
+ return ret;
+}
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user)
+{
+ int ret = -EFAULT;
+ unsigned int i;
+ struct atomisp_css_morph_table *morph_table;
+#ifdef ISP2401
+ struct atomisp_css_morph_table mtbl;
+#endif
+ struct atomisp_css_morph_table *old_morph_table;
+
+ if (!source_morph_table)
+ return 0;
+
+ if (!from_user && css_param->update_flag.morph_table)
+ return 0;
+
+ old_morph_table = css_param->morph_table;
+
+#ifdef ISP2401
+ if (copy_from_compatible(&mtbl, source_morph_table,
+ sizeof(struct atomisp_morph_table),
+ from_user)) {
+ dev_err(asd->isp->dev, "copy morph table failed!");
+ return -EFAULT;
+ }
+
+#endif
+ morph_table = atomisp_css_morph_table_allocate(
+#ifndef ISP2401
+ source_morph_table->width,
+ source_morph_table->height);
+#else
+ mtbl.width,
+ mtbl.height);
+#endif
+ if (!morph_table)
+ return -ENOMEM;
+
+ for (i = 0; i < CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (copy_from_compatible(morph_table->coordinates_x[i],
+ source_morph_table->coordinates_x[i],
+#ifndef ISP2401
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_x[i]),
+#else
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_x[i]),
+#endif
+ from_user))
+ goto error;
+
+ if (copy_from_compatible(morph_table->coordinates_y[i],
+ source_morph_table->coordinates_y[i],
+#ifndef ISP2401
+ source_morph_table->height * source_morph_table->width *
+ sizeof(*source_morph_table->coordinates_y[i]),
+#else
+ mtbl.height * mtbl.width *
+ sizeof(*morph_table->coordinates_y[i]),
+#endif
+ from_user))
+ goto error;
+ }
+
+ css_param->morph_table = morph_table;
+ if (old_morph_table)
+ atomisp_css_morph_table_free(old_morph_table);
+ css_param->update_flag.morph_table =
+ (struct atomisp_morph_table *) morph_table;
+ return 0;
+
+error:
+ if (morph_table)
+ atomisp_css_morph_table_free(morph_table);
+ return ret;
+}
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param)
+{
+ int ret;
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *) arg->dvs2_coefs,
+ css_param, false);
+ if (ret)
+ return ret;
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, false);
+ return ret;
+}
+
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param)
+{
+ if (css_param->dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
+ css_param->dvs_6axis = NULL;
+ }
+ if (css_param->dvs2_coeff) {
+ ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
+ css_param->dvs2_coeff = NULL;
+ }
+ if (css_param->shading_table) {
+ ia_css_shading_table_free(css_param->shading_table);
+ css_param->shading_table = NULL;
+ }
+ if (css_param->morph_table) {
+ ia_css_morph_table_free(css_param->morph_table);
+ css_param->morph_table = NULL;
+ }
+}
+
+/*
+ * Check parameter queue list and buffer queue list to find out if matched items
+ * and then set parameter to CSS and enqueue buffer to CSS.
+ * Of course, if the buffer in buffer waiting list is not bound to a per-frame
+ * parameter, it will be enqueued into CSS as long as the per-frame setting
+ * buffers before it get enqueued.
+ */
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct videobuf_buffer *vb = NULL, *vb_tmp;
+ struct atomisp_css_params_with_list *param = NULL, *param_tmp;
+ struct videobuf_vmalloc_memory *vm_mem = NULL;
+ unsigned long irqflags;
+ bool need_to_enqueue_buffer = false;
+
+ if (atomisp_is_vf_pipe(pipe))
+ return;
+
+ /*
+ * CSS/FW requires set parameter and enqueue buffer happen after ISP
+ * is streamon.
+ */
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+
+ if (list_empty(&pipe->per_frame_params) ||
+ list_empty(&pipe->buffers_waiting_for_param))
+ return;
+
+ list_for_each_entry_safe(vb, vb_tmp,
+ &pipe->buffers_waiting_for_param, queue) {
+ if (pipe->frame_request_config_id[vb->i]) {
+ list_for_each_entry_safe(param, param_tmp,
+ &pipe->per_frame_params, list) {
+ if (pipe->frame_request_config_id[vb->i] !=
+ param->params.isp_config_id)
+ continue;
+
+ list_del(&param->list);
+ list_del(&vb->queue);
+ /*
+ * clear the request config id as the buffer
+ * will be handled and enqueued into CSS soon
+ */
+ pipe->frame_request_config_id[vb->i] = 0;
+ pipe->frame_params[vb->i] = param;
+ vm_mem = vb->priv;
+ BUG_ON(!vm_mem);
+ break;
+ }
+
+ if (vm_mem) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ vm_mem = NULL;
+ need_to_enqueue_buffer = true;
+ } else {
+ /* The is the end, stop further loop */
+ break;
+ }
+ } else {
+ list_del(&vb->queue);
+ pipe->frame_params[vb->i] = NULL;
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ need_to_enqueue_buffer = true;
+ }
+ }
+
+ if (need_to_enqueue_buffer) {
+ atomisp_qbuffers_to_css(asd);
+#ifndef ISP2401
+ if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
+ atomisp_wdt_start(asd);
+#else
+ if (atomisp_buffers_queued_pipe(pipe)) {
+ if (!atomisp_is_wdt_running(pipe))
+ atomisp_wdt_start(pipe);
+ else
+ atomisp_wdt_refresh_pipe(pipe,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+ }
+#endif
+ }
+}
+
+/*
+* Function to configure ISP parameters
+*/
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_params_with_list *param = NULL;
+ struct atomisp_css_params *css_param = &asd->params.css_param;
+ int ret;
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream == NULL) {
+ dev_err(asd->isp->dev, "%s: internal error!\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(asd->isp->dev, "%s: set parameter(per_frame_setting %d) for asd%d with isp_config_id %d of %s\n",
+ __func__, arg->per_frame_setting, asd->index,
+ arg->isp_config_id, vdev->name);
+#ifdef ISP2401
+
+ if (atomisp_is_vf_pipe(pipe) && arg->per_frame_setting) {
+ dev_err(asd->isp->dev, "%s: vf pipe not support per_frame_setting",
+ __func__);
+ return -EINVAL;
+ }
+
+#endif
+ if (arg->per_frame_setting && !atomisp_is_vf_pipe(pipe)) {
+ /*
+ * Per-frame setting enabled, we allocate a new paramter
+ * buffer to cache the parameters and only when frame buffers
+ * are ready, the parameters will be set to CSS.
+ * per-frame setting only works for the main output frame.
+ */
+ param = atomisp_kernel_zalloc(sizeof(*param), true);
+ if (!param) {
+ dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+ css_param = &param->params;
+ }
+
+ ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_css_cp_dvs2_coefs(asd,
+ (struct ia_css_dvs2_coefficients *) arg->dvs2_coefs,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
+ css_param, true);
+ if (ret)
+ goto apply_parameter_failed;
+
+ if (!(arg->per_frame_setting && !atomisp_is_vf_pipe(pipe))) {
+ /* indicate to CSS that we have parameters to be updated */
+ asd->params.css_update_params_needed = true;
+ } else {
+ list_add_tail(&param->list, &pipe->per_frame_params);
+ atomisp_handle_parameter_and_buffer(pipe);
+ }
+
+ return 0;
+
+apply_parameter_failed:
+ if (css_param)
+ atomisp_free_css_parameters(css_param);
+ if (param)
+ atomisp_kernel_free(param);
+
+ return ret;
+}
+
+/*
+ * Function to set/get isp parameters to isp
+ */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_config *vp_cfg =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ pipe_configs[IA_CSS_PIPE_ID_VIDEO];
+
+ /* Read parameter for 3A binary info */
+ if (flag == 0) {
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(
+ &asd->params.curr_grid_info);
+
+ if (&config->info == NULL) {
+ dev_err(isp->dev, "ERROR: NULL pointer in grid_info\n");
+ return -EINVAL;
+ }
+ atomisp_curr_user_grid_info(asd, &config->info);
+
+ /* We always return the resolution and stride even if there is
+ * no valid metadata. This allows the caller to get the
+ * information needed to allocate user-space buffers. */
+ config->metadata_config.metadata_height = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.resolution.height;
+ config->metadata_config.metadata_stride = asd->
+ stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.stride;
+
+ /* update dvs grid info */
+ if (dvs_grid_info)
+ memcpy(&config->dvs_grid,
+ dvs_grid_info,
+ sizeof(struct atomisp_css_dvs_grid_info));
+
+ if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ config->dvs_envelop.width = 0;
+ config->dvs_envelop.height = 0;
+ return 0;
+ }
+
+ /* update dvs envelop info */
+ if (!asd->continuous_mode->val) {
+ config->dvs_envelop.width = vp_cfg->dvs_envelope.width;
+ config->dvs_envelop.height =
+ vp_cfg->dvs_envelope.height;
+ } else {
+ unsigned int dvs_w, dvs_h, dvs_w_max, dvs_h_max;
+
+ dvs_w = vp_cfg->bayer_ds_out_res.width -
+ vp_cfg->output_info[0].res.width;
+ dvs_h = vp_cfg->bayer_ds_out_res.height -
+ vp_cfg->output_info[0].res.height;
+ dvs_w_max = rounddown(
+ vp_cfg->output_info[0].res.width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h_max = rounddown(
+ vp_cfg->output_info[0].res.height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+
+ config->dvs_envelop.width = min(dvs_w, dvs_w_max);
+ config->dvs_envelop.height = min(dvs_h, dvs_h_max);
+ }
+
+ return 0;
+ }
+
+ memcpy(&asd->params.css_param.wb_config, &config->wb_config,
+ sizeof(struct atomisp_css_wb_config));
+ memcpy(&asd->params.css_param.ob_config, &config->ob_config,
+ sizeof(struct atomisp_css_ob_config));
+ memcpy(&asd->params.css_param.dp_config, &config->dp_config,
+ sizeof(struct atomisp_css_dp_config));
+ memcpy(&asd->params.css_param.de_config, &config->de_config,
+ sizeof(struct atomisp_css_de_config));
+ memcpy(&asd->params.css_param.dz_config, &config->dz_config,
+ sizeof(struct atomisp_css_dz_config));
+ memcpy(&asd->params.css_param.ce_config, &config->ce_config,
+ sizeof(struct atomisp_css_ce_config));
+ memcpy(&asd->params.css_param.nr_config, &config->nr_config,
+ sizeof(struct atomisp_css_nr_config));
+ memcpy(&asd->params.css_param.ee_config, &config->ee_config,
+ sizeof(struct atomisp_css_ee_config));
+ memcpy(&asd->params.css_param.tnr_config, &config->tnr_config,
+ sizeof(struct atomisp_css_tnr_config));
+
+ if (asd->params.color_effect == V4L2_COLORFX_NEGATIVE) {
+ asd->params.css_param.cc_config.matrix[3] = -config->cc_config.matrix[3];
+ asd->params.css_param.cc_config.matrix[4] = -config->cc_config.matrix[4];
+ asd->params.css_param.cc_config.matrix[5] = -config->cc_config.matrix[5];
+ asd->params.css_param.cc_config.matrix[6] = -config->cc_config.matrix[6];
+ asd->params.css_param.cc_config.matrix[7] = -config->cc_config.matrix[7];
+ asd->params.css_param.cc_config.matrix[8] = -config->cc_config.matrix[8];
+ }
+
+ if (asd->params.color_effect != V4L2_COLORFX_SEPIA &&
+ asd->params.color_effect != V4L2_COLORFX_BW) {
+ memcpy(&asd->params.css_param.cc_config, &config->cc_config,
+ sizeof(struct atomisp_css_cc_config));
+ atomisp_css_set_cc_config(asd, &asd->params.css_param.cc_config);
+ }
+
+ atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config);
+ atomisp_css_set_ob_config(asd, &asd->params.css_param.ob_config);
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ atomisp_css_set_dz_config(asd, &asd->params.css_param.dz_config);
+ atomisp_css_set_ce_config(asd, &asd->params.css_param.ce_config);
+ atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config);
+ atomisp_css_set_nr_config(asd, &asd->params.css_param.nr_config);
+ atomisp_css_set_ee_config(asd, &asd->params.css_param.ee_config);
+ atomisp_css_set_tnr_config(asd, &asd->params.css_param.tnr_config);
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+/*
+ * Function to configure color effect of the image
+ */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect)
+{
+ struct atomisp_css_cc_config *cc_config = NULL;
+ struct atomisp_css_macc_table *macc_table = NULL;
+ struct atomisp_css_ctc_table *ctc_table = NULL;
+ int ret = 0;
+ struct v4l2_control control;
+ struct atomisp_device *isp = asd->isp;
+
+ if (flag == 0) {
+ *effect = asd->params.color_effect;
+ return 0;
+ }
+
+
+ control.id = V4L2_CID_COLORFX;
+ control.value = *effect;
+ ret =
+ v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &control);
+ /*
+ * if set color effect to sensor successfully, return
+ * 0 directly.
+ */
+ if (!ret) {
+ asd->params.color_effect = (u32)*effect;
+ return 0;
+ }
+
+ if (*effect == asd->params.color_effect)
+ return 0;
+
+ /*
+ * isp_subdev->params.macc_en should be set to false.
+ */
+ asd->params.macc_en = false;
+
+ switch (*effect) {
+ case V4L2_COLORFX_NONE:
+ macc_table = &asd->params.css_param.macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ cc_config = &sepia_cc_config;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ cc_config = &nega_cc_config;
+ break;
+ case V4L2_COLORFX_BW:
+ cc_config = &mono_cc_config;
+ break;
+ case V4L2_COLORFX_SKY_BLUE:
+ macc_table = &blue_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_GRASS_GREEN:
+ macc_table = &green_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_LOW:
+ macc_table = &skin_low_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN:
+ macc_table = &skin_medium_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_SKIN_WHITEN_HIGH:
+ macc_table = &skin_high_macc_table;
+ asd->params.macc_en = true;
+ break;
+ case V4L2_COLORFX_VIVID:
+ ctc_table = &vivid_ctc_table;
+ break;
+ default:
+ return -EINVAL;
+ }
+ atomisp_update_capture_mode(asd);
+
+ if (cc_config)
+ atomisp_css_set_cc_config(asd, cc_config);
+ if (macc_table)
+ atomisp_css_set_macc_table(asd, macc_table);
+ if (ctc_table)
+ atomisp_css_set_ctc_table(asd, ctc_table);
+ asd->params.color_effect = (u32)*effect;
+ asd->params.css_update_params_needed = true;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction
+ */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+
+ if (flag == 0) {
+ *value = asd->params.bad_pixel_en;
+ return 0;
+ }
+ asd->params.bad_pixel_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config)
+{
+ if (flag == 0) {
+ /* Get bad pixel from current setup */
+ if (atomisp_css_get_dp_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set bad pixel to isp parameters */
+ memcpy(&asd->params.css_param.dp_config, config,
+ sizeof(asd->params.css_param.dp_config));
+ atomisp_css_set_dp_config(asd, &asd->params.css_param.dp_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to enable/disable video image stablization
+ */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ if (flag == 0)
+ *value = asd->params.video_dis_en;
+ else
+ asd->params.video_dis_en = !!*value;
+
+ return 0;
+}
+
+/*
+ * Function to configure fixed pattern noise
+ */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+
+ if (flag == 0) {
+ *value = asd->params.fpn_en;
+ return 0;
+ }
+
+ if (*value == 0) {
+ asd->params.fpn_en = 0;
+ return 0;
+ }
+
+ /* Add function to get black from from sensor with shutter off */
+ return 0;
+}
+
+static unsigned int
+atomisp_bytesperline_to_padded_width(unsigned int bytesperline,
+ enum atomisp_css_frame_format format)
+{
+ switch (format) {
+ case CSS_FRAME_FORMAT_UYVY:
+ case CSS_FRAME_FORMAT_YUYV:
+ case CSS_FRAME_FORMAT_RAW:
+ case CSS_FRAME_FORMAT_RGB565:
+ return bytesperline/2;
+ case CSS_FRAME_FORMAT_RGBA888:
+ return bytesperline/4;
+ /* The following cases could be removed, but we leave them
+ in to document the formats that are included. */
+ case CSS_FRAME_FORMAT_NV11:
+ case CSS_FRAME_FORMAT_NV12:
+ case CSS_FRAME_FORMAT_NV16:
+ case CSS_FRAME_FORMAT_NV21:
+ case CSS_FRAME_FORMAT_NV61:
+ case CSS_FRAME_FORMAT_YV12:
+ case CSS_FRAME_FORMAT_YV16:
+ case CSS_FRAME_FORMAT_YUV420:
+ case CSS_FRAME_FORMAT_YUV420_16:
+ case CSS_FRAME_FORMAT_YUV422:
+ case CSS_FRAME_FORMAT_YUV422_16:
+ case CSS_FRAME_FORMAT_YUV444:
+ case CSS_FRAME_FORMAT_YUV_LINE:
+ case CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case CSS_FRAME_FORMAT_QPLANE6:
+ case CSS_FRAME_FORMAT_BINARY_8:
+ default:
+ return bytesperline;
+ }
+}
+
+static int
+atomisp_v4l2_framebuffer_to_css_frame(const struct v4l2_framebuffer *arg,
+ struct atomisp_css_frame **result)
+{
+ struct atomisp_css_frame *res = NULL;
+ unsigned int padded_width;
+ enum atomisp_css_frame_format sh_format;
+ char *tmp_buf = NULL;
+ int ret = 0;
+
+ sh_format = v4l2_fmt_to_sh_fmt(arg->fmt.pixelformat);
+ padded_width = atomisp_bytesperline_to_padded_width(
+ arg->fmt.bytesperline, sh_format);
+
+ /* Note: the padded width on an atomisp_css_frame is in elements, not in
+ bytes. The RAW frame we use here should always be a 16bit RAW
+ frame. This is why we bytesperline/2 is equal to the padded with */
+ if (atomisp_css_frame_allocate(&res, arg->fmt.width, arg->fmt.height,
+ sh_format, padded_width, 0)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tmp_buf = vmalloc(arg->fmt.sizeimage);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if (copy_from_user(tmp_buf, (void __user __force *)arg->base,
+ arg->fmt.sizeimage)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ if (hmm_store(res->data, tmp_buf, arg->fmt.sizeimage)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ if (ret && res)
+ atomisp_css_frame_free(res);
+ if (tmp_buf)
+ vfree(tmp_buf);
+ if (ret == 0)
+ *result = res;
+ return ret;
+}
+
+/*
+ * Function to configure fixed pattern noise table
+ */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *arg)
+{
+ struct atomisp_css_frame *raw_black_frame = NULL;
+ int ret;
+
+ if (arg == NULL)
+ return -EINVAL;
+
+ ret = atomisp_v4l2_framebuffer_to_css_frame(arg, &raw_black_frame);
+ if (ret)
+ return ret;
+ if (atomisp_css_set_black_frame(asd, raw_black_frame))
+ ret = -ENOMEM;
+
+ atomisp_css_frame_free(raw_black_frame);
+ return ret;
+}
+
+/*
+ * Function to configure false color correction
+ */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ /* Get nr config from current setup */
+ if (flag == 0) {
+ *value = asd->params.false_color;
+ return 0;
+ }
+
+ /* Set nr config to isp parameters */
+ if (*value) {
+ atomisp_css_set_default_de_config(asd);
+ } else {
+ asd->params.css_param.de_config.pixelnoise = 0;
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ }
+ asd->params.css_update_params_needed = true;
+ asd->params.false_color = *value;
+ return 0;
+}
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config)
+{
+ if (flag == 0) {
+ /* Get false color from current setup */
+ if (atomisp_css_get_de_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set false color to isp parameters */
+ memcpy(&asd->params.css_param.de_config, config,
+ sizeof(asd->params.css_param.de_config));
+ atomisp_css_set_de_config(asd, &asd->params.css_param.de_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to configure white balance params
+ */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config)
+{
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_wb_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.wb_config, config,
+ sizeof(asd->params.css_param.wb_config));
+ atomisp_css_set_wb_config(asd, &asd->params.css_param.wb_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ dev_dbg(isp->dev, ">%s %d\n", __func__, flag);
+
+ if (flag == 0) {
+ /* Get white balance from current setup */
+ if (atomisp_css_get_3a_config(asd, config))
+ return -EINVAL;
+ } else {
+ /* Set white balance to isp parameters */
+ memcpy(&asd->params.css_param.s3a_config, config,
+ sizeof(asd->params.css_param.s3a_config));
+ atomisp_css_set_3a_config(asd, &asd->params.css_param.s3a_config);
+ asd->params.css_update_params_needed = true;
+ }
+
+ dev_dbg(isp->dev, "<%s %d\n", __func__, flag);
+ return 0;
+}
+
+/*
+ * Function to setup digital zoom
+ */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value)
+{
+ u32 zoom;
+ struct atomisp_device *isp = asd->isp;
+
+ unsigned int max_zoom = MRFLD_MAX_ZOOM_FACTOR;
+
+ if (flag == 0) {
+ atomisp_css_get_zoom_factor(asd, &zoom);
+ *value = max_zoom - zoom;
+ } else {
+ if (*value < 0)
+ return -EINVAL;
+
+ zoom = max_zoom - min_t(u32, max_zoom - 1, *value);
+ atomisp_css_set_zoom_factor(asd, zoom);
+
+ dev_dbg(isp->dev, "%s, zoom: %d\n", __func__, zoom);
+ asd->params.css_update_params_needed = true;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to get sensor specific info for current resolution,
+ * which will be used for auto exposure conversion.
+ */
+int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
+ struct atomisp_sensor_mode_data *config)
+{
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_device *isp = asd->isp;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (mipi_info == NULL)
+ return -EINVAL;
+
+ memcpy(config, &mipi_info->data, sizeof(*config));
+ return 0;
+}
+
+int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ f->fmt.pix = pipe->pix;
+
+ return 0;
+}
+
+static void __atomisp_update_stream_env(struct atomisp_sub_device *asd,
+ uint16_t stream_index, struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+#if defined(ISP2401_NEW_INPUT_SYSTEM)
+ /* assign virtual channel id return from sensor driver query */
+ asd->stream_env[stream_index].ch_id = stream_info->ch_id;
+#endif
+ asd->stream_env[stream_index].isys_configs = stream_info->isys_configs;
+ for (i = 0; i < stream_info->isys_configs; i++) {
+ asd->stream_env[stream_index].isys_info[i].input_format =
+ stream_info->isys_info[i].input_format;
+ asd->stream_env[stream_index].isys_info[i].width =
+ stream_info->isys_info[i].width;
+ asd->stream_env[stream_index].isys_info[i].height =
+ stream_info->isys_info[i].height;
+ }
+}
+
+static void __atomisp_init_stream_info(uint16_t stream_index,
+ struct atomisp_input_stream_info *stream_info)
+{
+ int i;
+
+ stream_info->enable = 1;
+ stream_info->stream = stream_index;
+ stream_info->ch_id = 0;
+ stream_info->isys_configs = 0;
+ for (i = 0; i < MAX_STREAMS_PER_CHANNEL; i++) {
+ stream_info->isys_info[i].input_format = 0;
+ stream_info->isys_info[i].width = 0;
+ stream_info->isys_info[i].height = 0;
+ }
+}
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
+ bool *res_overflow)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+
+ struct v4l2_mbus_framefmt *snr_mbus_fmt = &format.format;
+ const struct atomisp_format_bridge *fmt;
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved;
+ uint16_t stream_index;
+ int source_pad = atomisp_subdev_source_pad(vdev);
+ int ret;
+
+ if (isp->inputs[asd->input_curr].camera == NULL)
+ return -EINVAL;
+
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ fmt = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (fmt == NULL) {
+ dev_err(isp->dev, "unsupported pixelformat!\n");
+ fmt = atomisp_output_fmts;
+ }
+
+#ifdef ISP2401
+ if (f->fmt.pix.width <= 0 || f->fmt.pix.height <= 0)
+ return -EINVAL;
+
+#endif
+ snr_mbus_fmt->code = fmt->mbus_code;
+ snr_mbus_fmt->width = f->fmt.pix.width;
+ snr_mbus_fmt->height = f->fmt.pix.height;
+
+ __atomisp_init_stream_info(stream_index, stream_info);
+
+ dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
+ snr_mbus_fmt->width, snr_mbus_fmt->height);
+
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_fmt, &pad_cfg, &format);
+ if (ret)
+ return ret;
+
+ dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n",
+ snr_mbus_fmt->width, snr_mbus_fmt->height);
+
+ fmt = atomisp_get_format_bridge_from_mbus(snr_mbus_fmt->code);
+ if (fmt == NULL) {
+ dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
+ snr_mbus_fmt->code);
+ return -EINVAL;
+ }
+
+ f->fmt.pix.pixelformat = fmt->pixelformat;
+
+ /*
+ * If the format is jpeg or custom RAW, then the width and height will
+ * not satisfy the normal atomisp requirements and no need to check
+ * the below conditions. So just assign to what is being returned from
+ * the sensor driver.
+ */
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG ||
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) {
+ f->fmt.pix.width = snr_mbus_fmt->width;
+ f->fmt.pix.height = snr_mbus_fmt->height;
+ return 0;
+ }
+
+ if (snr_mbus_fmt->width < f->fmt.pix.width
+ && snr_mbus_fmt->height < f->fmt.pix.height) {
+ f->fmt.pix.width = snr_mbus_fmt->width;
+ f->fmt.pix.height = snr_mbus_fmt->height;
+ /* Set the flag when resolution requested is
+ * beyond the max value supported by sensor
+ */
+ if (res_overflow != NULL)
+ *res_overflow = true;
+ }
+
+ /* app vs isp */
+ f->fmt.pix.width = rounddown(
+ clamp_t(u32, f->fmt.pix.width, ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
+ f->fmt.pix.height = rounddown(
+ clamp_t(u32, f->fmt.pix.height, ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+
+ return 0;
+}
+
+static int
+atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f)
+{
+ u32 width = f->fmt.pix.width;
+ u32 height = f->fmt.pix.height;
+ u32 pixelformat = f->fmt.pix.pixelformat;
+ enum v4l2_field field = f->fmt.pix.field;
+ u32 depth;
+
+ if (!atomisp_get_format_bridge(pixelformat)) {
+ dev_err(isp->dev, "Wrong output pixelformat\n");
+ return -EINVAL;
+ }
+
+ depth = get_pixel_depth(pixelformat);
+
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (field != V4L2_FIELD_NONE) {
+ dev_err(isp->dev, "Wrong output field\n");
+ return -EINVAL;
+ }
+
+ f->fmt.pix.field = field;
+ f->fmt.pix.width = clamp_t(u32,
+ rounddown(width, (u32)ATOM_ISP_STEP_WIDTH),
+ ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH);
+ f->fmt.pix.height = clamp_t(u32, rounddown(height,
+ (u32)ATOM_ISP_STEP_HEIGHT),
+ ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT);
+ f->fmt.pix.bytesperline = (width * depth) >> 3;
+
+ return 0;
+}
+
+mipi_port_ID_t __get_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port)
+{
+ switch (port) {
+ case ATOMISP_CAMERA_PORT_PRIMARY:
+ return MIPI_PORT0_ID;
+ case ATOMISP_CAMERA_PORT_SECONDARY:
+ return MIPI_PORT1_ID;
+ case ATOMISP_CAMERA_PORT_TERTIARY:
+ if (MIPI_PORT1_ID + 1 != N_MIPI_PORT_ID)
+ return MIPI_PORT1_ID + 1;
+ /* go through down for else case */
+ default:
+ dev_err(isp->dev, "unsupported port: %d\n", port);
+ return MIPI_PORT0_ID;
+ }
+}
+
+static inline int atomisp_set_sensor_mipi_to_isp(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct camera_mipi_info *mipi_info)
+{
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ const struct atomisp_in_fmt_conv *fc;
+ int mipi_freq = 0;
+ unsigned int input_format, bayer_order;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ if (asd->stream_env[stream_id].isys_configs == 1) {
+ input_format =
+ asd->stream_env[stream_id].isys_info[0].input_format;
+ atomisp_css_isys_set_format(asd, stream_id,
+ input_format, IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ } else if (asd->stream_env[stream_id].isys_configs == 2) {
+ atomisp_css_isys_two_stream_cfg_update_stream1(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[0].input_format,
+ asd->stream_env[stream_id].isys_info[0].width,
+ asd->stream_env[stream_id].isys_info[0].height);
+
+ atomisp_css_isys_two_stream_cfg_update_stream2(
+ asd, stream_id,
+ asd->stream_env[stream_id].isys_info[1].input_format,
+ asd->stream_env[stream_id].isys_info[1].width,
+ asd->stream_env[stream_id].isys_info[1].height);
+ }
+
+ /* Compatibility for sensors which provide no media bus code
+ * in s_mbus_framefmt() nor support pad formats. */
+ if (mipi_info->input_format != -1) {
+ bayer_order = mipi_info->raw_bayer_order;
+
+ /* Input stream config is still needs configured */
+ /* TODO: Check if this is necessary */
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->input_format);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ } else {
+ struct v4l2_mbus_framefmt *sink;
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ fc = atomisp_find_in_fmt_conv(sink->code);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ bayer_order = fc->bayer_order;
+ }
+
+ atomisp_css_input_set_format(asd, stream_id, input_format);
+ atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order);
+
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->metadata_format);
+ if (!fc)
+ return -EINVAL;
+ input_format = fc->css_stream_fmt;
+ atomisp_css_input_configure_port(asd,
+ __get_mipi_port(asd->isp, mipi_info->port),
+ mipi_info->num_lanes,
+ 0xffff4, mipi_freq,
+ input_format,
+ mipi_info->metadata_width,
+ mipi_info->metadata_height);
+ return 0;
+}
+
+static int __enable_continuous_mode(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ dev_dbg(isp->dev,
+ "continuous mode %d, raw buffers %d, stop preview %d\n",
+ enable, asd->continuous_raw_buffer_size->val,
+ !asd->continuous_viewfinder->val);
+#ifndef ISP2401
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_PRIMARY);
+#else
+ atomisp_update_capture_mode(asd);
+#endif
+ /* in case of ANR, force capture pipe to offline mode */
+ atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ asd->params.low_light ? false : !enable);
+ atomisp_css_preview_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
+ !enable);
+ atomisp_css_enable_continuous(asd, enable);
+ atomisp_css_enable_cvf(asd, asd->continuous_viewfinder->val);
+
+ if (atomisp_css_continuous_set_num_raw_frames(asd,
+ asd->continuous_raw_buffer_size->val)) {
+ dev_err(isp->dev, "css_continuous_set_num_raw_frames failed\n");
+ return -EINVAL;
+ }
+
+ if (!enable) {
+ atomisp_css_enable_raw_binning(asd, false);
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+ }
+
+ if (isp->inputs[asd->input_curr].type != FILE_INPUT)
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR);
+
+ return atomisp_update_run_mode(asd);
+}
+
+int configure_pp_input_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ return 0;
+}
+
+int configure_output_nop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format sh_fmt)
+{
+ return 0;
+}
+
+int get_frame_info_nop(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *finfo)
+{
+ return 0;
+}
+
+/**
+ * Resets CSS parameters that depend on input resolution.
+ *
+ * Update params like CSS RAW binning, 2ppc mode and pp_input
+ * which depend on input size, but are not automatically
+ * handled in CSS when the input resolution is changed.
+ */
+static int css_input_resolution_changed(struct atomisp_sub_device *asd,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ unsigned int i;
+
+ dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n",
+ ffmt->width, ffmt->height);
+
+#if defined(ISP2401_NEW_INPUT_SYSTEM)
+ atomisp_css_input_set_two_pixels_per_clock(asd, false);
+#else
+ atomisp_css_input_set_two_pixels_per_clock(asd, true);
+#endif
+ if (asd->continuous_mode->val) {
+ /* Note for all checks: ffmt includes pad_w+pad_h */
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ (ffmt->width >= 2048 || ffmt->height >= 1536)) {
+ /*
+ * For preview pipe, enable only if resolution
+ * is >= 3M for ISP2400.
+ */
+ atomisp_css_enable_raw_binning(asd, true);
+ }
+ }
+ /*
+ * If sensor input changed, which means metadata resolution changed
+ * together. Release all metadata buffers here to let it re-allocated
+ * next time in reqbufs.
+ */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return 0;
+
+ /*
+ * TODO: atomisp_css_preview_configure_pp_input() not
+ * reset due to CSS bug tracked as PSI BZ 115124
+ */
+}
+
+static int atomisp_set_fmt_to_isp(struct video_device *vdev,
+ struct atomisp_css_frame_info *output_info,
+ struct atomisp_css_frame_info *raw_output_info,
+ struct v4l2_pix_format *pix,
+ unsigned int source_pad)
+{
+ struct camera_mipi_info *mipi_info;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ const struct atomisp_format_bridge *format;
+ struct v4l2_rect *isp_sink_crop;
+ enum atomisp_css_pipe_id pipe_id;
+ struct v4l2_subdev_fh fh;
+ int (*configure_output)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format sh_fmt) =
+ configure_output_nop;
+ int (*get_frame_info)(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *finfo) =
+ get_frame_info_nop;
+ int (*configure_pp_input)(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height) =
+ configure_pp_input_nop;
+ uint16_t stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ const struct atomisp_in_fmt_conv *fc;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ isp_sink_crop = atomisp_subdev_get_rect(
+ &asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
+
+ format = atomisp_get_format_bridge(pix->pixelformat);
+ if (format == NULL)
+ return -EINVAL;
+
+ if (isp->inputs[asd->input_curr].type != TEST_PATTERN &&
+ isp->inputs[asd->input_curr].type != FILE_INPUT) {
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ if (!mipi_info) {
+ dev_err(isp->dev, "mipi_info is NULL\n");
+ return -EINVAL;
+ }
+ if (atomisp_set_sensor_mipi_to_isp(asd, stream_index,
+ mipi_info))
+ return -EINVAL;
+ fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ mipi_info->input_format);
+ if (!fc)
+ fc = atomisp_find_in_fmt_conv(
+ atomisp_subdev_get_ffmt(&asd->subdev,
+ NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK)->code);
+ if (!fc)
+ return -EINVAL;
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW &&
+ raw_output_format_match_input(fc->css_stream_fmt,
+ pix->pixelformat))
+ return -EINVAL;
+ }
+
+ /*
+ * Configure viewfinder also when vfpp is disabled: the
+ * CSS still requires viewfinder configuration.
+ */
+ if (asd->fmt_auto->val ||
+ asd->vfpp->val != ATOMISP_VFPP_ENABLE) {
+ struct v4l2_rect vf_size = {0};
+ struct v4l2_mbus_framefmt vf_ffmt = {0};
+
+ if (pix->width < 640 || pix->height < 480) {
+ vf_size.width = pix->width;
+ vf_size.height = pix->height;
+ } else {
+ vf_size.width = 640;
+ vf_size.height = 480;
+ }
+
+ /* FIXME: proper format name for this one. See
+ atomisp_output_fmts[] in atomisp_v4l2.c */
+ vf_ffmt.code = V4L2_MBUS_FMT_CUSTOM_YUV420;
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF,
+ V4L2_SEL_TGT_COMPOSE, 0, &vf_size);
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF, &vf_ffmt);
+ asd->video_out_vf.sh_fmt = CSS_FRAME_FORMAT_NV12;
+
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ atomisp_css_video_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO)
+ atomisp_css_video_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ else
+ atomisp_css_capture_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ } else if (source_pad != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ atomisp_css_capture_configure_viewfinder(asd,
+ vf_size.width, vf_size.height, 0,
+ asd->video_out_vf.sh_fmt);
+ }
+ }
+
+ if (asd->continuous_mode->val) {
+ ret = __enable_continuous_mode(asd, true);
+ if (ret)
+ return -EINVAL;
+ }
+
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_SENSOR);
+ atomisp_css_disable_vf_pp(asd,
+ asd->vfpp->val != ATOMISP_VFPP_ENABLE);
+
+ /* ISP2401 new input system need to use copy pipe */
+ if (asd->copy_mode) {
+ pipe_id = CSS_PIPE_ID_COPY;
+ atomisp_css_capture_enable_online(asd, stream_index, false);
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ /* video same in continuouscapture and online modes */
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info = atomisp_css_video_get_output_frame_info;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (!asd->continuous_mode->val) {
+ configure_output = atomisp_css_video_configure_output;
+ get_frame_info =
+ atomisp_css_video_get_output_frame_info;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ configure_output =
+ atomisp_css_video_configure_output;
+ get_frame_info =
+ atomisp_css_video_get_output_frame_info;
+ configure_pp_input =
+ atomisp_css_video_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_VIDEO;
+ } else {
+ configure_output =
+ atomisp_css_capture_configure_output;
+ get_frame_info =
+ atomisp_css_capture_get_output_frame_info;
+ configure_pp_input =
+ atomisp_css_capture_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_CAPTURE;
+
+ atomisp_update_capture_mode(asd);
+ atomisp_css_capture_enable_online(asd, stream_index, false);
+ }
+ }
+ } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+ configure_output = atomisp_css_preview_configure_output;
+ get_frame_info = atomisp_css_preview_get_output_frame_info;
+ configure_pp_input = atomisp_css_preview_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_PREVIEW;
+ } else {
+ /* CSS doesn't support low light mode on SOC cameras, so disable
+ * it. FIXME: if this is done elsewhere, it gives corrupted
+ * colors into thumbnail image.
+ */
+ if (isp->inputs[asd->input_curr].type == SOC_CAMERA)
+ asd->params.low_light = false;
+
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW) {
+ atomisp_css_capture_set_mode(asd, CSS_CAPTURE_MODE_RAW);
+ atomisp_css_enable_dz(asd, false);
+ } else {
+ atomisp_update_capture_mode(asd);
+ }
+
+ if (!asd->continuous_mode->val)
+ /* in case of ANR, force capture pipe to offline mode */
+ atomisp_css_capture_enable_online(asd, stream_index,
+ asd->params.low_light ?
+ false : asd->params.online_process);
+
+ configure_output = atomisp_css_capture_configure_output;
+ get_frame_info = atomisp_css_capture_get_output_frame_info;
+ configure_pp_input = atomisp_css_capture_configure_pp_input;
+ pipe_id = CSS_PIPE_ID_CAPTURE;
+
+ if (!asd->params.online_process &&
+ !asd->continuous_mode->val) {
+ ret = atomisp_css_capture_get_output_raw_frame_info(asd,
+ raw_output_info);
+ if (ret)
+ return ret;
+ }
+ if (!asd->continuous_mode->val && asd->run_mode->val
+ != ATOMISP_RUN_MODE_STILL_CAPTURE) {
+ dev_err(isp->dev,
+ "Need to set the running mode first\n");
+ asd->run_mode->val = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ }
+ }
+
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = CSS_PIPE_ID_YUVPP;
+
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_configure_output(asd, stream_index,
+ pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ else
+ ret = configure_output(asd, pix->width, pix->height,
+ format->planar ? pix->bytesperline :
+ pix->bytesperline * 8 / format->depth,
+ format->sh_fmt);
+ if (ret) {
+ dev_err(isp->dev, "configure_output %ux%u, format %8.8x\n",
+ pix->width, pix->height, format->sh_fmt);
+ return -EINVAL;
+ }
+
+ if (asd->continuous_mode->val &&
+ (configure_pp_input == atomisp_css_preview_configure_pp_input ||
+ configure_pp_input == atomisp_css_video_configure_pp_input)) {
+ /* for isp 2.2, configure pp input is available for continuous
+ * mode */
+ ret = configure_pp_input(asd, isp_sink_crop->width,
+ isp_sink_crop->height);
+ if (ret) {
+ dev_err(isp->dev, "configure_pp_input %ux%u\n",
+ isp_sink_crop->width,
+ isp_sink_crop->height);
+ return -EINVAL;
+ }
+ } else {
+ ret = configure_pp_input(asd, isp_sink_crop->width,
+ isp_sink_crop->height);
+ if (ret) {
+ dev_err(isp->dev, "configure_pp_input %ux%u\n",
+ isp_sink_crop->width, isp_sink_crop->height);
+ return -EINVAL;
+ }
+ }
+ if (asd->copy_mode)
+ ret = atomisp_css_copy_get_output_frame_info(asd, stream_index,
+ output_info);
+ else
+ ret = get_frame_info(asd, output_info);
+ if (ret) {
+ dev_err(isp->dev, "get_frame_info %ux%u (padded to %u)\n",
+ pix->width, pix->height, pix->bytesperline);
+ return -EINVAL;
+ }
+
+ atomisp_update_grid_info(asd, pipe_id, source_pad);
+
+ /* Free the raw_dump buffer first */
+ atomisp_css_frame_free(asd->raw_output_frame);
+ asd->raw_output_frame = NULL;
+
+ if (!asd->continuous_mode->val &&
+ !asd->params.online_process && !isp->sw_contex.file_input &&
+ atomisp_css_frame_allocate_from_info(&asd->raw_output_frame,
+ raw_output_info))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int *dvs_env_w, unsigned int *dvs_env_h)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ /* if subdev type is SOC camera,we do not need to set DVS */
+ if (isp->inputs[asd->input_curr].type == SOC_CAMERA)
+ asd->params.video_dis_en = 0;
+
+ if (asd->params.video_dis_en &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /* envelope is 20% of the output resolution */
+ /*
+ * dvs envelope cannot be round up.
+ * it would cause ISP timeout and color switch issue
+ */
+ *dvs_env_w = rounddown(width / 5, ATOM_ISP_STEP_WIDTH);
+ *dvs_env_h = rounddown(height / 5, ATOM_ISP_STEP_HEIGHT);
+ }
+
+ asd->params.dis_proj_data_valid = false;
+ asd->params.css_update_params_needed = true;
+}
+
+static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
+ int source_pad, struct v4l2_format *f)
+{
+#if defined(ISP2401_NEW_INPUT_SYSTEM)
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+
+ if ((sink->code == src->code &&
+ sink->width == f->fmt.pix.width &&
+ sink->height == f->fmt.pix.height) ||
+ ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
+ (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1)))
+ asd->copy_mode = true;
+ else
+#endif
+ /* Only used for the new input system */
+ asd->copy_mode = false;
+
+ dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode);
+
+}
+
+static int atomisp_set_fmt_to_snr(struct video_device *vdev,
+ struct v4l2_format *f, unsigned int pixelformat,
+ unsigned int padding_w, unsigned int padding_h,
+ unsigned int dvs_env_w, unsigned int dvs_env_h)
+{
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ const struct atomisp_format_bridge *format;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format vformat = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ struct v4l2_mbus_framefmt *ffmt = &vformat.format;
+ struct v4l2_mbus_framefmt *req_ffmt;
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_input_stream_info *stream_info =
+ (struct atomisp_input_stream_info *)ffmt->reserved;
+ uint16_t stream_index = ATOMISP_INPUT_STREAM_GENERAL;
+ int source_pad = atomisp_subdev_source_pad(vdev);
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+
+ format = atomisp_get_format_bridge(pixelformat);
+ if (format == NULL)
+ return -EINVAL;
+
+ v4l2_fill_mbus_format(ffmt, &f->fmt.pix, format->mbus_code);
+ ffmt->height += padding_h + dvs_env_h;
+ ffmt->width += padding_w + dvs_env_w;
+
+ dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
+ ffmt->width, ffmt->height, padding_w, padding_h,
+ dvs_env_w, dvs_env_h);
+
+ __atomisp_init_stream_info(stream_index, stream_info);
+
+ req_ffmt = ffmt;
+
+ /* Disable dvs if resolution can't be supported by sensor */
+ if (asd->params.video_dis_en &&
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_fmt, &pad_cfg, &vformat);
+ if (ret)
+ return ret;
+ if (ffmt->width < req_ffmt->width ||
+ ffmt->height < req_ffmt->height) {
+ req_ffmt->height -= dvs_env_h;
+ req_ffmt->width -= dvs_env_w;
+ ffmt = req_ffmt;
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = 0;
+ }
+ }
+ dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
+ ffmt->width, ffmt->height);
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
+ set_fmt, NULL, &vformat);
+ if (ret)
+ return ret;
+
+ __atomisp_update_stream_env(asd, stream_index, stream_info);
+
+ dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
+ ffmt->width, ffmt->height);
+
+ if (ffmt->width < ATOM_ISP_STEP_WIDTH ||
+ ffmt->height < ATOM_ISP_STEP_HEIGHT)
+ return -EINVAL;
+
+ if (asd->params.video_dis_en &&
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
+ (ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) {
+ dev_warn(isp->dev,
+ "can not enable video dis due to sensor limitation.");
+ asd->params.video_dis_en = 0;
+ }
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, ffmt);
+
+ return css_input_resolution_changed(asd, ffmt);
+}
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ const struct atomisp_format_bridge *format_bridge;
+ const struct atomisp_format_bridge *snr_format_bridge;
+ struct atomisp_css_frame_info output_info, raw_output_info;
+ struct v4l2_format snr_fmt = *f;
+ struct v4l2_format backup_fmt = *f, s_fmt = *f;
+ unsigned int dvs_env_w = 0, dvs_env_h = 0;
+ unsigned int padding_w = pad_w, padding_h = pad_h;
+ bool res_overflow = false, crop_needs_override = false;
+ struct v4l2_mbus_framefmt isp_sink_fmt;
+ struct v4l2_mbus_framefmt isp_source_fmt = {0};
+ struct v4l2_rect isp_sink_crop;
+ uint16_t source_pad = atomisp_subdev_source_pad(vdev);
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ dev_dbg(isp->dev,
+ "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n",
+ f->fmt.pix.width, f->fmt.pix.height, source_pad,
+ asd->index, f->fmt.pix.bytesperline);
+
+ if (source_pad >= ATOMISP_SUBDEV_PADS_NUM)
+ return -EINVAL;
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ dev_warn(isp->dev, "ISP does not support set format while at streaming!\n");
+ return -EBUSY;
+ }
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (format_bridge == NULL)
+ return -EINVAL;
+
+ pipe->sh_fmt = format_bridge->sh_fmt;
+ pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VF ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW
+ && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)) {
+ if (asd->fmt_auto->val) {
+ struct v4l2_rect *capture_comp;
+ struct v4l2_rect r = {0};
+
+ r.width = f->fmt.pix.width;
+ r.height = f->fmt.pix.height;
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ capture_comp = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO,
+ V4L2_SEL_TGT_COMPOSE);
+ else
+ capture_comp = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ V4L2_SEL_TGT_COMPOSE);
+
+ if (capture_comp->width < r.width
+ || capture_comp->height < r.height) {
+ r.width = capture_comp->width;
+ r.height = capture_comp->height;
+ }
+
+ atomisp_subdev_set_selection(
+ &asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0, &r);
+
+ f->fmt.pix.width = r.width;
+ f->fmt.pix.height = r.height;
+ }
+
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) &&
+ (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1)) {
+ /* For M10MO outputing YUV preview images. */
+ uint16_t video_index =
+ atomisp_source_pad_to_stream_id(asd,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO);
+
+ ret = atomisp_css_copy_get_output_frame_info(asd,
+ video_index, &output_info);
+ if (ret) {
+ dev_err(isp->dev,
+ "copy_get_output_frame_info ret %i", ret);
+ return -EINVAL;
+ }
+ if (!asd->yuvpp_mode) {
+ /*
+ * If viewfinder was configured into copy_mode,
+ * we switch to using yuvpp pipe instead.
+ */
+ asd->yuvpp_mode = true;
+ ret = atomisp_css_copy_configure_output(
+ asd, video_index, 0, 0, 0, 0);
+ if (ret) {
+ dev_err(isp->dev,
+ "failed to disable copy pipe");
+ return -EINVAL;
+ }
+ ret = atomisp_css_yuvpp_configure_output(
+ asd, video_index,
+ output_info.res.width,
+ output_info.res.height,
+ output_info.padded_width,
+ output_info.format);
+ if (ret) {
+ dev_err(isp->dev,
+ "failed to set up yuvpp pipe\n");
+ return -EINVAL;
+ }
+ atomisp_css_video_enable_online(asd, false);
+ atomisp_css_preview_enable_online(asd,
+ ATOMISP_INPUT_STREAM_GENERAL, false);
+ }
+ atomisp_css_yuvpp_configure_viewfinder(asd, video_index,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_yuvpp_get_viewfinder_frame_info(
+ asd, video_index, &output_info);
+ } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) {
+ atomisp_css_video_configure_viewfinder(asd,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_video_get_viewfinder_frame_info(asd,
+ &output_info);
+ asd->copy_mode = false;
+ } else {
+ atomisp_css_capture_configure_viewfinder(asd,
+ f->fmt.pix.width, f->fmt.pix.height,
+ format_bridge->planar ? f->fmt.pix.bytesperline
+ : f->fmt.pix.bytesperline * 8
+ / format_bridge->depth, format_bridge->sh_fmt);
+ atomisp_css_capture_get_viewfinder_frame_info(asd,
+ &output_info);
+ asd->copy_mode = false;
+ }
+
+ goto done;
+ }
+ /*
+ * Check whether main resolution configured smaller
+ * than snapshot resolution. If so, force main resolution
+ * to be the same as snapshot resolution
+ */
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ struct v4l2_rect *r;
+
+ r = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF, V4L2_SEL_TGT_COMPOSE);
+
+ if (r->width && r->height
+ && (r->width > f->fmt.pix.width
+ || r->height > f->fmt.pix.height))
+ dev_warn(isp->dev,
+ "Main Resolution config smaller then Vf Resolution. Force to be equal with Vf Resolution.");
+ }
+
+ /* Pipeline configuration done through subdevs. Bail out now. */
+ if (!asd->fmt_auto->val)
+ goto set_fmt_to_isp;
+
+ /* get sensor resolution and format */
+ ret = atomisp_try_fmt(vdev, &snr_fmt, &res_overflow);
+ if (ret)
+ return ret;
+ f->fmt.pix.width = snr_fmt.fmt.pix.width;
+ f->fmt.pix.height = snr_fmt.fmt.pix.height;
+
+ snr_format_bridge =
+ atomisp_get_format_bridge(snr_fmt.fmt.pix.pixelformat);
+ if (!snr_format_bridge)
+ return -EINVAL;
+
+ atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK)->code =
+ snr_format_bridge->mbus_code;
+
+ isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ isp_source_fmt.code = format_bridge->mbus_code;
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad, &isp_source_fmt);
+
+ if (!atomisp_subdev_format_conversion(asd, source_pad)) {
+ padding_w = 0;
+ padding_h = 0;
+ } else if (IS_BYT) {
+ padding_w = 12;
+ padding_h = 12;
+ }
+
+ /* construct resolution supported by isp */
+ if (res_overflow && !asd->continuous_mode->val) {
+ f->fmt.pix.width = rounddown(
+ clamp_t(u32, f->fmt.pix.width - padding_w,
+ ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
+ f->fmt.pix.height = rounddown(
+ clamp_t(u32, f->fmt.pix.height - padding_h,
+ ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
+ }
+
+ atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height,
+ &dvs_env_w, &dvs_env_h);
+
+ if (asd->continuous_mode->val) {
+ struct v4l2_rect *r;
+
+ r = atomisp_subdev_get_rect(
+ &asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ V4L2_SEL_TGT_COMPOSE);
+ /*
+ * The ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE should get resolutions
+ * properly set otherwise, it should not be the capture_pad.
+ */
+ if (r->width && r->height)
+ asd->capture_pad = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE;
+ else
+ asd->capture_pad = source_pad;
+ } else {
+ asd->capture_pad = source_pad;
+ }
+ /*
+ * set format info to sensor
+ * In continuous mode, resolution is set only if it is higher than
+ * existing value. This because preview pipe will be configured after
+ * capture pipe and usually has lower resolution than capture pipe.
+ */
+ if (!asd->continuous_mode->val ||
+ isp_sink_fmt.width < (f->fmt.pix.width + padding_w + dvs_env_w) ||
+ isp_sink_fmt.height < (f->fmt.pix.height + padding_h +
+ dvs_env_h)) {
+ /*
+ * For jpeg or custom raw format the sensor will return constant
+ * width and height. Because we already had quried try_mbus_fmt,
+ * f->fmt.pix.width and f->fmt.pix.height has been changed to
+ * this fixed width and height. So we cannot select the correct
+ * resolution with that information. So use the original width
+ * and height while set_mbus_fmt() so actual resolutions are
+ * being used in while set media bus format.
+ */
+ s_fmt = *f;
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG ||
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW) {
+ s_fmt.fmt.pix.width = backup_fmt.fmt.pix.width;
+ s_fmt.fmt.pix.height = backup_fmt.fmt.pix.height;
+ }
+ ret = atomisp_set_fmt_to_snr(vdev, &s_fmt,
+ f->fmt.pix.pixelformat, padding_w,
+ padding_h, dvs_env_w, dvs_env_h);
+ if (ret)
+ return -EINVAL;
+
+ atomisp_csi_lane_config(isp);
+ crop_needs_override = true;
+ }
+
+ atomisp_check_copy_mode(asd, source_pad, &backup_fmt);
+ asd->yuvpp_mode = false; /* Reset variable */
+
+ isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+
+ /* Try to enable YUV downscaling if ISP input is 10 % (either
+ * width or height) bigger than the desired result. */
+ if (isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
+ isp_sink_crop.height * 9 / 10 < f->fmt.pix.height ||
+ (atomisp_subdev_format_conversion(asd, source_pad) &&
+ ((asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !asd->continuous_mode->val) ||
+ asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) {
+ /* for continuous mode, preview size might be smaller than
+ * still capture size. if preview size still needs crop,
+ * pick the larger one between crop size of preview and
+ * still capture.
+ */
+ if (asd->continuous_mode->val
+ && source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW
+ && !crop_needs_override) {
+ isp_sink_crop.width =
+ max_t(unsigned int, f->fmt.pix.width,
+ isp_sink_crop.width);
+ isp_sink_crop.height =
+ max_t(unsigned int, f->fmt.pix.height,
+ isp_sink_crop.height);
+ } else {
+ isp_sink_crop.width = f->fmt.pix.width;
+ isp_sink_crop.height = f->fmt.pix.height;
+ }
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP,
+ V4L2_SEL_FLAG_KEEP_CONFIG,
+ &isp_sink_crop);
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad, V4L2_SEL_TGT_COMPOSE,
+ 0, &isp_sink_crop);
+ } else if (IS_MOFD) {
+ struct v4l2_rect main_compose = {0};
+
+ main_compose.width = isp_sink_crop.width;
+ main_compose.height =
+ DIV_ROUND_UP(main_compose.width * f->fmt.pix.height,
+ f->fmt.pix.width);
+ if (main_compose.height > isp_sink_crop.height) {
+ main_compose.height = isp_sink_crop.height;
+ main_compose.width =
+ DIV_ROUND_UP(main_compose.height *
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ }
+
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &main_compose);
+ } else {
+ struct v4l2_rect sink_crop = {0};
+ struct v4l2_rect main_compose = {0};
+
+ main_compose.width = f->fmt.pix.width;
+ main_compose.height = f->fmt.pix.height;
+
+#ifndef ISP2401
+ /* WORKAROUND: this override is universally enabled in
+ * GMIN to work around a CTS failures (GMINL-539)
+ * which appears to be related by a hardware
+ * performance limitation. It's unclear why this
+ * particular code triggers the issue. */
+ if (1 ||
+ crop_needs_override) {
+#else
+ if (crop_needs_override) {
+#endif
+ if (isp_sink_crop.width * main_compose.height >
+ isp_sink_crop.height * main_compose.width) {
+ sink_crop.height = isp_sink_crop.height;
+ sink_crop.width = DIV_NEAREST_STEP(
+ sink_crop.height *
+ f->fmt.pix.width,
+ f->fmt.pix.height,
+ ATOM_ISP_STEP_WIDTH);
+ } else {
+ sink_crop.width = isp_sink_crop.width;
+ sink_crop.height = DIV_NEAREST_STEP(
+ sink_crop.width *
+ f->fmt.pix.height,
+ f->fmt.pix.width,
+ ATOM_ISP_STEP_HEIGHT);
+ }
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK,
+ V4L2_SEL_TGT_CROP,
+ V4L2_SEL_FLAG_KEEP_CONFIG,
+ &sink_crop);
+ }
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ source_pad,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &main_compose);
+ }
+
+set_fmt_to_isp:
+ ret = atomisp_set_fmt_to_isp(vdev, &output_info, &raw_output_info,
+ &f->fmt.pix, source_pad);
+ if (ret)
+ return -EINVAL;
+done:
+ pipe->pix.width = f->fmt.pix.width;
+ pipe->pix.height = f->fmt.pix.height;
+ pipe->pix.pixelformat = f->fmt.pix.pixelformat;
+ if (format_bridge->planar) {
+ pipe->pix.bytesperline = output_info.padded_width;
+ pipe->pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height *
+ DIV_ROUND_UP(format_bridge->depth *
+ output_info.padded_width, 8));
+ } else {
+ pipe->pix.bytesperline =
+ DIV_ROUND_UP(format_bridge->depth *
+ output_info.padded_width, 8);
+ pipe->pix.sizeimage =
+ PAGE_ALIGN(f->fmt.pix.height * pipe->pix.bytesperline);
+
+ }
+ if (f->fmt.pix.field == V4L2_FIELD_ANY)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ pipe->pix.field = f->fmt.pix.field;
+
+ f->fmt.pix = pipe->pix;
+ f->fmt.pix.priv = PAGE_ALIGN(pipe->pix.width *
+ pipe->pix.height * 2);
+
+ pipe->capq.field = f->fmt.pix.field;
+
+ /*
+ * If in video 480P case, no GFX throttle
+ */
+ if (asd->run_mode->val == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO &&
+ f->fmt.pix.width == 720 && f->fmt.pix.height == 480)
+ isp->need_gfx_throttle = false;
+ else
+ isp->need_gfx_throttle = true;
+
+ return 0;
+}
+
+int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f)
+{
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct v4l2_mbus_framefmt ffmt = {0};
+ const struct atomisp_format_bridge *format_bridge;
+ struct v4l2_subdev_fh fh;
+ int ret;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
+ ret = atomisp_try_fmt_file(isp, f);
+ if (ret) {
+ dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret);
+ return ret;
+ }
+
+ format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat);
+ if (format_bridge == NULL) {
+ dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ pipe->pix = f->fmt.pix;
+ atomisp_css_input_set_mode(asd, CSS_INPUT_MODE_FIFO);
+ atomisp_css_input_configure_port(asd,
+ __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4,
+ 0, 0, 0, 0);
+ ffmt.width = f->fmt.pix.width;
+ ffmt.height = f->fmt.pix.height;
+ ffmt.code = format_bridge->mbus_code;
+
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad, V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &ffmt);
+
+ return 0;
+}
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *user_shading_table)
+{
+ struct atomisp_css_shading_table *shading_table;
+ struct atomisp_css_shading_table *free_table;
+ unsigned int len_table;
+ int i;
+ int ret = 0;
+
+ if (!user_shading_table)
+ return -EINVAL;
+
+ if (!user_shading_table->enable) {
+ atomisp_css_set_shading_table(asd, NULL);
+ asd->params.sc_en = 0;
+ return 0;
+ }
+
+ /* If enabling, all tables must be set */
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ if (!user_shading_table->data[i])
+ return -EINVAL;
+ }
+
+ /* Shading table size per color */
+ if (user_shading_table->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
+ user_shading_table->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
+ return -EINVAL;
+
+ shading_table = atomisp_css_shading_table_alloc(
+ user_shading_table->width, user_shading_table->height);
+ if (!shading_table)
+ return -ENOMEM;
+
+ len_table = user_shading_table->width * user_shading_table->height *
+ ATOMISP_SC_TYPE_SIZE;
+ for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
+ ret = copy_from_user(shading_table->data[i],
+ user_shading_table->data[i], len_table);
+ if (ret) {
+ free_table = shading_table;
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ shading_table->sensor_width = user_shading_table->sensor_width;
+ shading_table->sensor_height = user_shading_table->sensor_height;
+ shading_table->fraction_bits = user_shading_table->fraction_bits;
+
+ free_table = asd->params.css_param.shading_table;
+ asd->params.css_param.shading_table = shading_table;
+ atomisp_css_set_shading_table(asd, shading_table);
+ asd->params.sc_en = 1;
+
+out:
+ if (free_table != NULL)
+ atomisp_css_shading_table_free(free_table);
+
+ return ret;
+}
+
+/*Turn off ISP dphy */
+int atomisp_ospm_dphy_down(struct atomisp_device *isp)
+{
+ unsigned long flags;
+ u32 reg;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ /* if ISP timeout, we can force powerdown */
+ if (isp->isp_timeout)
+ goto done;
+
+ if (!atomisp_dev_users(isp))
+ goto done;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->sw_contex.power_state = ATOM_ISP_POWER_DOWN;
+ spin_unlock_irqrestore(&isp->lock, flags);
+done:
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &reg);
+ reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
+ pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg);
+ return 0;
+}
+
+/*Turn on ISP dphy */
+int atomisp_ospm_dphy_up(struct atomisp_device *isp)
+{
+ unsigned long flags;
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&isp->lock, flags);
+ isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return 0;
+}
+
+
+int atomisp_exif_makernote(struct atomisp_sub_device *asd,
+ struct atomisp_makernote_info *config)
+{
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+
+ ctrl.id = V4L2_CID_FOCAL_ABSOLUTE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for focal length\n");
+ return -EINVAL;
+ } else {
+ config->focal_length = ctrl.value;
+ }
+
+ ctrl.id = V4L2_CID_FNUMBER_ABSOLUTE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for f-number\n");
+ return -EINVAL;
+ } else {
+ config->f_number_curr = ctrl.value;
+ }
+
+ ctrl.id = V4L2_CID_FNUMBER_RANGE;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl)) {
+ dev_warn(isp->dev, "failed to g_ctrl for f number range\n");
+ return -EINVAL;
+ } else {
+ config->f_number_range = ctrl.value;
+ }
+
+ return 0;
+}
+
+int atomisp_offline_capture_configure(struct atomisp_sub_device *asd,
+ struct atomisp_cont_capture_conf *cvf_config)
+{
+ struct v4l2_ctrl *c;
+
+ /*
+ * In case of M10MO ZSL capture case, we need to issue a separate
+ * capture request to M10MO which will output captured jpeg image
+ */
+ c = v4l2_ctrl_find(
+ asd->isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_START_ZSL_CAPTURE);
+ if (c) {
+ int ret;
+ dev_dbg(asd->isp->dev, "%s trigger ZSL capture request\n",
+ __func__);
+ /* TODO: use the cvf_config */
+ ret = v4l2_ctrl_s_ctrl(c, 1);
+ if (ret)
+ return ret;
+
+ return v4l2_ctrl_s_ctrl(c, 0);
+ }
+
+ asd->params.offline_parm = *cvf_config;
+
+ if (asd->params.offline_parm.num_captures) {
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED) {
+ unsigned int init_raw_num;
+
+ if (asd->enable_raw_buffer_lock->val) {
+ init_raw_num =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ asd->params.video_dis_en)
+ init_raw_num +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ } else {
+ init_raw_num =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ /* TODO: this can be removed once user-space
+ * has been updated to use control API */
+ asd->continuous_raw_buffer_size->val =
+ max_t(int,
+ asd->continuous_raw_buffer_size->val,
+ asd->params.offline_parm.
+ num_captures + init_raw_num);
+ asd->continuous_raw_buffer_size->val =
+ min_t(int, ATOMISP_CONT_RAW_FRAMES,
+ asd->continuous_raw_buffer_size->val);
+ }
+ asd->continuous_mode->val = true;
+ } else {
+ asd->continuous_mode->val = false;
+ __enable_continuous_mode(asd, false);
+ }
+
+ return 0;
+}
+
+/*
+ * set auto exposure metering window to camera sensor
+ */
+int atomisp_s_ae_window(struct atomisp_sub_device *asd,
+ struct atomisp_ae_window *arg)
+{
+ struct atomisp_device *isp = asd->isp;
+ /* Coverity CID 298071 - initialzize struct */
+ struct v4l2_subdev_selection sel = { 0 };
+
+ sel.r.left = arg->x_left;
+ sel.r.top = arg->y_top;
+ sel.r.width = arg->x_right - arg->x_left + 1;
+ sel.r.height = arg->y_bottom - arg->y_top + 1;
+
+ if (v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ pad, set_selection, NULL, &sel)) {
+ dev_err(isp->dev, "failed to call sensor set_selection.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (num_frames < 0) {
+ dev_dbg(isp->dev, "%s ERROR: num_frames: %d\n", __func__,
+ num_frames);
+ return -EINVAL;
+ }
+ /* a requested flash is still in progress. */
+ if (num_frames && asd->params.flash_state != ATOMISP_FLASH_IDLE) {
+ dev_dbg(isp->dev, "%s flash busy: %d frames left: %d\n",
+ __func__, asd->params.flash_state,
+ asd->params.num_flash_frames);
+ return -EBUSY;
+ }
+
+ asd->params.num_flash_frames = num_frames;
+ asd->params.flash_state = ATOMISP_FLASH_REQUESTED;
+ return 0;
+}
+
+int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
+ uint16_t source_pad)
+{
+ int stream_id;
+ struct atomisp_device *isp = asd->isp;
+
+ if (isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num == 1)
+ return ATOMISP_INPUT_STREAM_GENERAL;
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ stream_id = ATOMISP_INPUT_STREAM_CAPTURE;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ break;
+ default:
+ stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ }
+
+ return stream_id;
+}
+
+bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+
+ if (pipe == &asd->video_out_vf)
+ return true;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ pipe == &asd->video_out_preview)
+ return true;
+
+ return false;
+}
+
+static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->enable_raw_buffer_lock->val) {
+ dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__);
+ return -EINVAL;
+ }
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
+ dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n",
+ __func__, exp_id, asd->streaming);
+ return -EINVAL;
+ }
+ if ((exp_id > ATOMISP_MAX_EXP_ID) || (exp_id <= 0)) {
+ dev_err(isp->dev, "%s exp_id %d invalid.\n", __func__, exp_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ memset(asd->raw_buffer_bitmap, 0, sizeof(asd->raw_buffer_bitmap));
+ asd->raw_buffer_locked_count = 0;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+}
+
+int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+
+ if (__checking_exp_id(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) |= (1 << bit);
+ asd->raw_buffer_locked_count++;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+
+ dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+
+ /* Check if the raw buffer after next is still locked!!! */
+ exp_id += 2;
+ if (exp_id > ATOMISP_MAX_EXP_ID)
+ exp_id -= ATOMISP_MAX_EXP_ID;
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ if ((*bitmap) & (1 << bit)) {
+ int ret;
+
+ /* WORKAROUND unlock the raw buffer compulsively */
+ ret = atomisp_css_exp_id_unlock(asd, exp_id);
+ if (ret) {
+ dev_err(asd->isp->dev, "%s exp_id is wrapping back to %d but force unlock failed,, err %d.\n",
+ __func__, exp_id, ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) &= ~(1 << bit);
+ asd->raw_buffer_locked_count--;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+ dev_warn(asd->isp->dev, "%s exp_id is wrapping back to %d but it is still locked so force unlock it, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+ }
+ return 0;
+}
+
+static int __is_raw_buffer_locked(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+ int ret;
+
+ if (__checking_exp_id(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ ret = ((*bitmap) & (1 << bit));
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+ return !ret;
+}
+
+static int __clear_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
+{
+ int *bitmap, bit;
+ unsigned long flags;
+
+ if (__is_raw_buffer_locked(asd, exp_id))
+ return -EINVAL;
+
+ bitmap = asd->raw_buffer_bitmap + exp_id / 32;
+ bit = exp_id % 32;
+ spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
+ (*bitmap) &= ~(1 << bit);
+ asd->raw_buffer_locked_count--;
+ spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
+
+ dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
+ __func__, exp_id, asd->raw_buffer_locked_count);
+ return 0;
+}
+
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ ret = __is_raw_buffer_locked(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_capture(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d failed.\n", __func__, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int value = *exp_id;
+ int ret;
+
+ ret = __clear_raw_buffer_bitmap(asd, value);
+ if (ret) {
+ dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
+ return -EINVAL;
+ }
+
+ dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
+ ret = atomisp_css_exp_id_unlock(asd, value);
+ if (ret)
+ dev_err(isp->dev, "%s exp_id %d failed, err %d.\n",
+ __func__, value, ret);
+
+ return ret;
+}
+
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable)
+{
+ bool value;
+
+ if (enable == NULL)
+ return -EINVAL;
+
+ value = *enable > 0 ? true : false;
+
+ atomisp_en_dz_capt_pipe(asd, value);
+
+ return 0;
+}
+
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
+{
+ if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return -EINVAL;
+
+ dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n",
+ __func__, *event);
+
+ switch (*event) {
+ case V4L2_EVENT_FRAME_SYNC:
+ atomisp_sof_event(asd);
+ break;
+ case V4L2_EVENT_FRAME_END:
+ atomisp_eof_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_3A_STATS_READY:
+ atomisp_3a_stats_ready_event(asd, 0);
+ break;
+ case V4L2_EVENT_ATOMISP_METADATA_READY:
+ atomisp_metadata_ready_event(asd, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe)
+{
+ struct atomisp_sub_device *asd = pipe->asd;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ return CSS_PIPE_ID_YUVPP;
+ else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return CSS_PIPE_ID_VIDEO;
+ else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
+ return CSS_PIPE_ID_CAPTURE;
+ else if (pipe == &asd->video_out_video_capture)
+ return CSS_PIPE_ID_VIDEO;
+ else if (pipe == &asd->video_out_vf)
+ return CSS_PIPE_ID_CAPTURE;
+ else if (pipe == &asd->video_out_preview) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return CSS_PIPE_ID_VIDEO;
+ else
+ return CSS_PIPE_ID_PREVIEW;
+ } else if (pipe == &asd->video_out_capture) {
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ else
+ return CSS_PIPE_ID_CAPTURE;
+ }
+
+ /* fail through */
+ dev_warn(asd->isp->dev, "%s failed to find proper pipe\n",
+ __func__);
+ return CSS_PIPE_ID_CAPTURE;
+}
+
+int atomisp_get_invalid_frame_num(struct video_device *vdev,
+ int *invalid_frame_num)
+{
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ enum atomisp_css_pipe_id pipe_id;
+ struct ia_css_pipe_info p_info;
+ int ret;
+
+ if (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ /* External ISP */
+ *invalid_frame_num = 0;
+ return 0;
+ }
+
+ pipe_id = atomisp_get_pipe_id(pipe);
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) {
+ dev_warn(asd->isp->dev, "%s pipe %d has not been created yet, do SET_FMT first!\n",
+ __func__, pipe_id);
+ return -EINVAL;
+ }
+
+ ret = ia_css_pipe_get_info(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipes[pipe_id], &p_info);
+ if (ret == IA_CSS_SUCCESS) {
+ *invalid_frame_num = p_info.num_invalid_frames;
+ return 0;
+ } else {
+ dev_warn(asd->isp->dev, "%s get pipe infor failed %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
new file mode 100644
index 000000000000..8e6d9df7ad1a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
@@ -0,0 +1,457 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_CMD_H__
+#define __ATOMISP_CMD_H__
+
+#include "../../include/linux/atomisp.h"
+#include <linux/interrupt.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "atomisp_internal.h"
+
+#include "ia_css_types.h"
+#include "ia_css.h"
+
+struct atomisp_device;
+struct atomisp_css_frame;
+
+#define MSI_ENABLE_BIT 16
+#define INTR_DISABLE_BIT 10
+#define BUS_MASTER_ENABLE 2
+#define MEMORY_SPACE_ENABLE 1
+#define INTR_IER 24
+#define INTR_IIR 16
+#ifdef ISP2401
+#define RUNMODE_MASK (ATOMISP_RUN_MODE_VIDEO | ATOMISP_RUN_MODE_STILL_CAPTURE \
+ | ATOMISP_RUN_MODE_PREVIEW)
+
+/* FIXME: check if can go */
+extern int atomisp_punit_hpll_freq;
+#endif
+
+/*
+ * Helper function
+ */
+void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
+ unsigned int size);
+struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd);
+struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev);
+struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev);
+int atomisp_reset(struct atomisp_device *isp);
+void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd);
+void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd);
+#ifndef ISP2401
+bool atomisp_buffers_queued(struct atomisp_sub_device *asd);
+#else
+bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
+#endif
+
+/* TODO:should be here instead of atomisp_helper.h
+extern void __iomem *atomisp_io_base;
+
+static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
+{
+ void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF);
+ return ret;
+}
+*/
+void *atomisp_kernel_malloc(size_t bytes);
+void *atomisp_kernel_zalloc(size_t bytes, bool zero_mem);
+void atomisp_kernel_free(void *ptr);
+
+/*
+ * Interrupt functions
+ */
+void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
+void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
+void atomisp_wdt_work(struct work_struct *work);
+#ifndef ISP2401
+void atomisp_wdt(unsigned long isp_addr);
+#else
+void atomisp_wdt(unsigned long pipe_addr);
+#endif
+void atomisp_setup_flash(struct atomisp_sub_device *asd);
+irqreturn_t atomisp_isr(int irq, void *dev);
+irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
+const struct atomisp_format_bridge *get_atomisp_format_bridge_from_mbus(
+ u32 mbus_code);
+bool atomisp_is_mbuscode_raw(uint32_t code);
+int atomisp_get_frame_pgnr(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, u32 *p_pgnr);
+void atomisp_delayed_init_work(struct work_struct *work);
+
+/*
+ * Get internal fmt according to V4L2 fmt
+ */
+
+bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
+
+/*
+ * ISP features control function
+ */
+
+/*
+#ifdef ISP2401
+ * Function to set sensor runmode by user when
+ * ATOMISP_IOC_S_SENSOR_RUNMODE ioctl was called
+ */
+int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
+ struct atomisp_s_runmode *runmode);
+/*
+#endif
+ * Function to enable/disable lens geometry distortion correction (GDC) and
+ * chromatic aberration correction (CAC)
+ */
+int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to enable/disable low light mode (including ANR)
+ */
+int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to enable/disable extra noise reduction (XNR) in low light
+ * condition
+ */
+int atomisp_xnr(struct atomisp_sub_device *asd, int flag, int *arg);
+
+int atomisp_formats(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_formats_config *config);
+
+/*
+ * Function to configure noise reduction
+ */
+int atomisp_nr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_nr_config *config);
+
+/*
+ * Function to configure temporal noise reduction (TNR)
+ */
+int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_tnr_config *config);
+
+/*
+ * Function to configure black level compensation
+ */
+int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ob_config *config);
+
+/*
+ * Function to configure edge enhancement
+ */
+int atomisp_ee(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ee_config *config);
+
+/*
+ * Function to update Gamma table for gamma, brightness and contrast config
+ */
+int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gamma_table *config);
+/*
+ * Function to update Ctc table for Chroma Enhancement
+ */
+int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_ctc_table *config);
+
+/*
+ * Function to update gamma correction parameters
+ */
+int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_gc_config *config);
+
+/*
+ * Function to update Gdc table for gdc
+ */
+int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_morph_table *config);
+
+/*
+ * Function to update table for macc
+ */
+int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_macc_config *config);
+/*
+ * Function to get DIS statistics.
+ */
+int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+/*
+ * Function to get DVS2 BQ resolution settings
+ */
+int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
+ struct atomisp_dvs2_bq_resolutions *bq_res);
+
+/*
+ * Function to set the DIS coefficients.
+ */
+int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+/*
+ * Function to set the DIS motion vector.
+ */
+int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+/*
+ * Function to set/get 3A stat from isp
+ */
+int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_statistics *config);
+
+/*
+ * Function to get metadata from isp
+ */
+int atomisp_get_metadata(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata *config);
+
+int atomisp_get_metadata_by_type(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_metadata_with_type *config);
+
+int atomisp_set_parameters(struct video_device *vdev,
+ struct atomisp_parameters *arg);
+/*
+ * Function to set/get isp parameters to isp
+ */
+int atomisp_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_parm *config);
+
+/*
+ * Function to configure color effect of the image
+ */
+int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
+ __s32 *effect);
+
+/*
+ * Function to configure bad pixel correction
+ */
+int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure bad pixel correction params
+ */
+int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_dp_config *config);
+
+/*
+ * Function to enable/disable video image stablization
+ */
+int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure fixed pattern noise
+ */
+int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure fixed pattern noise table
+ */
+int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
+ struct v4l2_framebuffer *config);
+
+/*
+ * Function to configure false color correction
+ */
+int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function to configure false color correction params
+ */
+int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_de_config *config);
+
+/*
+ * Function to configure white balance params
+ */
+int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_wb_config *config);
+
+int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
+ struct atomisp_3a_config *config);
+
+/*
+ * Function to setup digital zoom
+ */
+int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
+ __s32 *value);
+
+/*
+ * Function set camera_prefiles.xml current sensor pixel array size
+ */
+int atomisp_set_array_res(struct atomisp_sub_device *asd,
+ struct atomisp_resolution *config);
+
+/*
+ * Function to calculate real zoom region for every pipe
+ */
+int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *source_st,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_morph_table *source_morph_table,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_6axis_config *user_6axis_config,
+ struct atomisp_css_params *css_param,
+ bool from_user);
+
+int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
+ struct atomisp_parameters *arg,
+ struct atomisp_css_params *css_param);
+
+int atomisp_compare_grid(struct atomisp_sub_device *asd,
+ struct atomisp_grid_info *atomgrid);
+
+int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd,
+ struct atomisp_sensor_mode_data *config);
+
+int atomisp_get_fmt(struct video_device *vdev, struct v4l2_format *f);
+
+
+/* This function looks up the closest available resolution. */
+int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f,
+ bool *res_overflow);
+
+int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f);
+int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f);
+
+int atomisp_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_shading_table *shading_table);
+
+int atomisp_offline_capture_configure(struct atomisp_sub_device *asd,
+ struct atomisp_cont_capture_conf *cvf_config);
+
+int atomisp_ospm_dphy_down(struct atomisp_device *isp);
+int atomisp_ospm_dphy_up(struct atomisp_device *isp);
+int atomisp_exif_makernote(struct atomisp_sub_device *asd,
+ struct atomisp_makernote_info *config);
+
+void atomisp_free_internal_buffers(struct atomisp_sub_device *asd);
+
+int atomisp_s_ae_window(struct atomisp_sub_device *asd,
+ struct atomisp_ae_window *arg);
+
+int atomisp_flash_enable(struct atomisp_sub_device *asd,
+ int num_frames);
+
+int atomisp_freq_scaling(struct atomisp_device *vdev,
+ enum atomisp_dfs_mode mode,
+ bool force);
+
+void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
+ enum atomisp_css_buffer_type buf_type,
+ enum atomisp_css_pipe_id css_pipe_id,
+ bool q_buffers, enum atomisp_input_stream_id stream_id);
+
+void atomisp_css_flush(struct atomisp_device *isp);
+int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd,
+ uint16_t source_pad);
+
+/*
+ * Events. Only one event has to be exported for now.
+ */
+void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id);
+
+mipi_port_ID_t __get_mipi_port(struct atomisp_device *isp,
+ enum atomisp_camera_port port);
+
+bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe);
+
+void atomisp_apply_css_parameters(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_params *css_param);
+void atomisp_free_css_parameters(struct atomisp_css_params *css_param);
+
+void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe);
+
+void atomisp_flush_params_queue(struct atomisp_video_pipe *asd);
+/*
+ * Function to do Raw Buffer related operation, after enable Lock Unlock Raw Buffer
+ */
+int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id);
+int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id);
+
+/*
+ * Function to update Raw Buffer bitmap
+ */
+int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id);
+void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd);
+
+/*
+ * Function to enable/disable zoom for capture pipe
+ */
+int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
+ unsigned int *enable);
+
+/*
+ * Function to get metadata type bu pipe id
+ */
+enum atomisp_metadata_type
+atomisp_get_metadata_type(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id);
+
+/*
+ * Function for HAL to inject a fake event to wake up poll thread
+ */
+int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event);
+
+/*
+ * Function for HAL to query how many invalid frames at the beginning of ISP
+ * pipeline output
+ */
+int atomisp_get_invalid_frame_num(struct video_device *vdev,
+ int *invalid_frame_num);
+
+int atomisp_mrfld_power_up(struct atomisp_device *isp);
+int atomisp_mrfld_power_down(struct atomisp_device *isp);
+int atomisp_runtime_suspend(struct device *dev);
+int atomisp_runtime_resume(struct device *dev);
+#endif /* __ATOMISP_CMD_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
new file mode 100644
index 000000000000..69d1526da362
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_COMMON_H__
+#define __ATOMISP_COMMON_H__
+
+#include "../../include/linux/atomisp.h"
+
+#include <linux/v4l2-mediabus.h>
+
+#include <media/videobuf-core.h>
+
+#include "atomisp_compat.h"
+
+#include "ia_css.h"
+
+extern int dbg_level;
+extern int dbg_func;
+extern int mipicsi_flag;
+extern int pad_w;
+extern int pad_h;
+
+#define CSS_DTRACE_VERBOSITY_LEVEL 5 /* Controls trace verbosity */
+#define CSS_DTRACE_VERBOSITY_TIMEOUT 9 /* Verbosity on ISP timeout */
+#define MRFLD_MAX_ZOOM_FACTOR 1024
+#ifdef ISP2401
+#define ATOMISP_CSS_ISP_PIPE_VERSION_2_2 0
+#define ATOMISP_CSS_ISP_PIPE_VERSION_2_7 1
+#endif
+
+#define IS_ISP2401(isp) \
+ (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) \
+ >= (ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT))
+
+struct atomisp_format_bridge {
+ unsigned int pixelformat;
+ unsigned int depth;
+ u32 mbus_code;
+ enum atomisp_css_frame_format sh_fmt;
+ unsigned char description[32]; /* the same as struct v4l2_fmtdesc */
+ bool planar;
+};
+
+struct atomisp_fmt {
+ u32 pixelformat;
+ u32 depth;
+ u32 bytesperline;
+ u32 framesize;
+ u32 imagesize;
+ u32 width;
+ u32 height;
+ u32 bayer_order;
+};
+
+struct atomisp_buffer {
+ struct videobuf_buffer vb;
+};
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
new file mode 100644
index 000000000000..fb8b8fab4e92
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
@@ -0,0 +1,668 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_COMPAT_H__
+#define __ATOMISP_COMPAT_H__
+
+#include "atomisp_compat_css20.h"
+
+#include "../../include/linux/atomisp.h"
+#include <media/videobuf-vmalloc.h>
+
+#define CSS_RX_IRQ_INFO_BUFFER_OVERRUN \
+ CSS_ID(CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+#define CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE \
+ CSS_ID(CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
+#define CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE \
+ CSS_ID(CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
+#define CSS_RX_IRQ_INFO_ECC_CORRECTED \
+ CSS_ID(CSS_RX_IRQ_INFO_ECC_CORRECTED)
+#define CSS_RX_IRQ_INFO_ERR_SOT \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT)
+#define CSS_RX_IRQ_INFO_ERR_SOT_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+#define CSS_RX_IRQ_INFO_ERR_CONTROL \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_CONTROL)
+#define CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+#define CSS_RX_IRQ_INFO_ERR_CRC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_CRC)
+#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+#define CSS_RX_IRQ_INFO_ERR_FRAME_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+#define CSS_RX_IRQ_INFO_ERR_FRAME_DATA \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+#define CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+#define CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+#define CSS_RX_IRQ_INFO_ERR_LINE_SYNC \
+ CSS_ID(CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+#define CSS_RX_IRQ_INFO_INIT_TIMEOUT \
+ CSS_ID(CSS_RX_IRQ_INFO_INIT_TIMEOUT)
+
+#define CSS_IRQ_INFO_CSS_RECEIVER_SOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_SOF)
+#define CSS_IRQ_INFO_CSS_RECEIVER_EOF CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_EOF)
+#define CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW \
+ CSS_ID(CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW)
+#define CSS_EVENT_OUTPUT_FRAME_DONE CSS_EVENT(OUTPUT_FRAME_DONE)
+#define CSS_EVENT_SEC_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_VF_OUTPUT_FRAME_DONE CSS_EVENT(VF_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE CSS_EVENT(SECOND_VF_OUTPUT_FRAME_DONE)
+#define CSS_EVENT_3A_STATISTICS_DONE CSS_EVENT(3A_STATISTICS_DONE)
+#define CSS_EVENT_DIS_STATISTICS_DONE CSS_EVENT(DIS_STATISTICS_DONE)
+#define CSS_EVENT_PIPELINE_DONE CSS_EVENT(PIPELINE_DONE)
+#define CSS_EVENT_METADATA_DONE CSS_EVENT(METADATA_DONE)
+#define CSS_EVENT_ACC_STAGE_COMPLETE CSS_EVENT(ACC_STAGE_COMPLETE)
+#define CSS_EVENT_TIMER CSS_EVENT(TIMER)
+
+#define CSS_BUFFER_TYPE_METADATA CSS_ID(CSS_BUFFER_TYPE_METADATA)
+#define CSS_BUFFER_TYPE_3A_STATISTICS CSS_ID(CSS_BUFFER_TYPE_3A_STATISTICS)
+#define CSS_BUFFER_TYPE_DIS_STATISTICS CSS_ID(CSS_BUFFER_TYPE_DIS_STATISTICS)
+#define CSS_BUFFER_TYPE_INPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_INPUT_FRAME)
+#define CSS_BUFFER_TYPE_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME CSS_ID(CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+#define CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME \
+ CSS_ID(CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME)
+
+#define CSS_FORMAT_RAW_8 CSS_FORMAT(RAW_8)
+#define CSS_FORMAT_RAW_10 CSS_FORMAT(RAW_10)
+#define CSS_FORMAT_RAW_12 CSS_FORMAT(RAW_12)
+#define CSS_FORMAT_RAW_16 CSS_FORMAT(RAW_16)
+
+#define CSS_CAPTURE_MODE_RAW CSS_ID(CSS_CAPTURE_MODE_RAW)
+#define CSS_CAPTURE_MODE_BAYER CSS_ID(CSS_CAPTURE_MODE_BAYER)
+#define CSS_CAPTURE_MODE_PRIMARY CSS_ID(CSS_CAPTURE_MODE_PRIMARY)
+#define CSS_CAPTURE_MODE_ADVANCED CSS_ID(CSS_CAPTURE_MODE_ADVANCED)
+#define CSS_CAPTURE_MODE_LOW_LIGHT CSS_ID(CSS_CAPTURE_MODE_LOW_LIGHT)
+
+#define CSS_MORPH_TABLE_NUM_PLANES CSS_ID(CSS_MORPH_TABLE_NUM_PLANES)
+
+#define CSS_FRAME_FORMAT_NV11 CSS_ID(CSS_FRAME_FORMAT_NV11)
+#define CSS_FRAME_FORMAT_NV12 CSS_ID(CSS_FRAME_FORMAT_NV12)
+#define CSS_FRAME_FORMAT_NV16 CSS_ID(CSS_FRAME_FORMAT_NV16)
+#define CSS_FRAME_FORMAT_NV21 CSS_ID(CSS_FRAME_FORMAT_NV21)
+#define CSS_FRAME_FORMAT_NV61 CSS_ID(CSS_FRAME_FORMAT_NV61)
+#define CSS_FRAME_FORMAT_YV12 CSS_ID(CSS_FRAME_FORMAT_YV12)
+#define CSS_FRAME_FORMAT_YV16 CSS_ID(CSS_FRAME_FORMAT_YV16)
+#define CSS_FRAME_FORMAT_YUV420 CSS_ID(CSS_FRAME_FORMAT_YUV420)
+#define CSS_FRAME_FORMAT_YUV420_16 CSS_ID(CSS_FRAME_FORMAT_YUV420_16)
+#define CSS_FRAME_FORMAT_YUV422 CSS_ID(CSS_FRAME_FORMAT_YUV422)
+#define CSS_FRAME_FORMAT_YUV422_16 CSS_ID(CSS_FRAME_FORMAT_YUV422_16)
+#define CSS_FRAME_FORMAT_UYVY CSS_ID(CSS_FRAME_FORMAT_UYVY)
+#define CSS_FRAME_FORMAT_YUYV CSS_ID(CSS_FRAME_FORMAT_YUYV)
+#define CSS_FRAME_FORMAT_YUV444 CSS_ID(CSS_FRAME_FORMAT_YUV444)
+#define CSS_FRAME_FORMAT_YUV_LINE CSS_ID(CSS_FRAME_FORMAT_YUV_LINE)
+#define CSS_FRAME_FORMAT_RAW CSS_ID(CSS_FRAME_FORMAT_RAW)
+#define CSS_FRAME_FORMAT_RGB565 CSS_ID(CSS_FRAME_FORMAT_RGB565)
+#define CSS_FRAME_FORMAT_PLANAR_RGB888 CSS_ID(CSS_FRAME_FORMAT_PLANAR_RGB888)
+#define CSS_FRAME_FORMAT_RGBA888 CSS_ID(CSS_FRAME_FORMAT_RGBA888)
+#define CSS_FRAME_FORMAT_QPLANE6 CSS_ID(CSS_FRAME_FORMAT_QPLANE6)
+#define CSS_FRAME_FORMAT_BINARY_8 CSS_ID(CSS_FRAME_FORMAT_BINARY_8)
+
+struct atomisp_device;
+struct atomisp_sub_device;
+struct video_device;
+enum atomisp_input_stream_id;
+
+struct atomisp_metadata_buf {
+ struct ia_css_metadata *metadata;
+ void *md_vptr;
+ struct list_head list;
+};
+
+void atomisp_css_debug_dump_sp_sw_debug_info(void);
+void atomisp_css_debug_dump_debug_info(const char *context);
+void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level);
+
+void atomisp_store_uint32(hrt_address addr, uint32_t data);
+void atomisp_load_uint32(hrt_address addr, uint32_t *data);
+
+int atomisp_css_init(struct atomisp_device *isp);
+
+void atomisp_css_uninit(struct atomisp_device *isp);
+
+void atomisp_css_suspend(struct atomisp_device *isp);
+
+int atomisp_css_resume(struct atomisp_device *isp);
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd);
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos);
+
+void atomisp_css_rx_get_irq_info(enum ia_css_csi2_port port,
+ unsigned int *infos);
+
+void atomisp_css_rx_clear_irq_info(enum ia_css_csi2_port port,
+ unsigned int infos);
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum atomisp_css_irq_info info, bool enable);
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct videobuf_vmalloc_memory *vm_mem,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+void atomisp_css_mmu_invalidate_cache(void);
+
+void atomisp_css_mmu_invalidate_tlb(void);
+
+void atomisp_css_mmu_set_page_table_base_index(unsigned long base_index);
+
+int atomisp_css_start(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset);
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd);
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe);
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer);
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ uint16_t stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf);
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd);
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf);
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf);
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf);
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad);
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd);
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd);
+
+void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
+ struct atomisp_css_buffer *isp_css_buffer,
+ struct ia_css_isp_dvs_statistics_map *dvs_map);
+
+int atomisp_css_dequeue_event(struct atomisp_css_event *current_event);
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event);
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream);
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream);
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream);
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format format,
+ int isys_stream);
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format);
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format,
+ unsigned int width, unsigned int height);
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt);
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor);
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_bayer_order bayer_order);
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format format);
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width,
+ unsigned int height);
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h);
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc);
+
+void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable);
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_capture_mode mode);
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_input_mode mode);
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable);
+
+void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
+ bool enable);
+
+int atomisp_css_input_configure_port(struct atomisp_sub_device *asd,
+ mipi_port_ID_t port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_css_stream_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height);
+
+int atomisp_css_frame_allocate(struct atomisp_css_frame **frame,
+ unsigned int width, unsigned int height,
+ enum atomisp_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth);
+
+int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info);
+
+void atomisp_css_frame_free(struct atomisp_css_frame *frame);
+
+int atomisp_css_frame_map(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info,
+ const void *data, uint16_t attribute,
+ void *context);
+
+int atomisp_css_set_black_frame(struct atomisp_sub_device *asd,
+ const struct atomisp_css_frame *raw_black_frame);
+
+int atomisp_css_allocate_continuous_frames(bool init_time,
+ struct atomisp_sub_device *asd);
+
+void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd);
+
+void atomisp_create_pipes_stream(struct atomisp_sub_device *asd);
+void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd);
+
+int atomisp_css_stop(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset);
+
+int atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames);
+
+void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd,
+ bool disable);
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_yuvpp_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_yuvpp_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ uint16_t source_pad,
+ struct atomisp_css_frame_info *frame_info);
+
+int atomisp_css_video_configure_viewfinder(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format);
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_output_raw_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info);
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height);
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset);
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id);
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id);
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable);
+
+void atomisp_css_send_input_frame(struct atomisp_sub_device *asd,
+ unsigned short *data, unsigned int width,
+ unsigned int height);
+
+bool atomisp_css_isp_has_started(void);
+
+void atomisp_css_request_flash(struct atomisp_sub_device *asd);
+
+void atomisp_css_set_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_wb_config *wb_config);
+
+void atomisp_css_set_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ob_config *ob_config);
+
+void atomisp_css_set_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dp_config *dp_config);
+
+void atomisp_css_set_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_de_config *de_config);
+
+void atomisp_css_set_dz_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config);
+
+void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd);
+
+void atomisp_css_set_ce_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ce_config *ce_config);
+
+void atomisp_css_set_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_nr_config *nr_config);
+
+void atomisp_css_set_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ee_config *ee_config);
+
+void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_tnr_config *tnr_config);
+
+void atomisp_css_set_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *cc_config);
+
+void atomisp_css_set_macc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_table *macc_table);
+
+void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_gamma_table *gamma_table);
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_table *ctc_table);
+
+void atomisp_css_set_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_gc_config *gc_config);
+
+void atomisp_css_set_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_3a_config *s3a_config);
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector);
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs);
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs);
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom);
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config);
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config);
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config);
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config);
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config);
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config);
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config);
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config);
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config);
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config);
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config);
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *formats_config);
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_formats_config *formats_config);
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom);
+
+struct atomisp_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_shading_table *table);
+
+void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table);
+
+struct atomisp_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height);
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table);
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table);
+
+void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table);
+
+void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
+ unsigned int overlap);
+
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats);
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd);
+
+int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd);
+
+void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd);
+
+int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id,
+ unsigned int type);
+
+void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id);
+
+int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd);
+
+void atomisp_css_acc_done(struct atomisp_sub_device *asd);
+
+int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ unsigned int index);
+
+void atomisp_css_unload_acc_binary(struct atomisp_sub_device *asd);
+
+struct atomisp_acc_fw;
+int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw);
+
+int atomisp_css_isr_thread(struct atomisp_device *isp,
+ bool *frame_done_found,
+ bool *css_pipe_done);
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp);
+
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
new file mode 100644
index 000000000000..b830b241e2e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -0,0 +1,4722 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <media/videobuf-vmalloc.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+#include "hmm/hmm_bo.h"
+#include "hmm/hmm.h"
+
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+#include "atomisp_cmd.h"
+#include "atomisp-regs.h"
+#include "atomisp_fops.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_acc.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include <asm/intel-mid.h>
+
+#include "ia_css_debug.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_hrt.h"
+#include "ia_css_isys.h"
+
+#include <linux/pm_runtime.h>
+
+/* Assume max number of ACC stages */
+#define MAX_ACC_STAGES 20
+
+/* Ideally, this should come from CSS headers */
+#define NO_LINK -1
+
+/*
+ * to serialize MMIO access , this is due to ISP2400 silicon issue Sighting
+ * #4684168, if concurrency access happened, system may hard hang.
+ */
+static DEFINE_SPINLOCK(mmio_lock);
+
+enum frame_info_type {
+ ATOMISP_CSS_VF_FRAME,
+ ATOMISP_CSS_SECOND_VF_FRAME,
+ ATOMISP_CSS_OUTPUT_FRAME,
+ ATOMISP_CSS_SECOND_OUTPUT_FRAME,
+ ATOMISP_CSS_RAW_FRAME,
+};
+
+struct bayer_ds_factor {
+ unsigned int numerator;
+ unsigned int denominator;
+};
+
+void atomisp_css_debug_dump_sp_sw_debug_info(void)
+{
+ ia_css_debug_dump_sp_sw_debug_info();
+}
+
+void atomisp_css_debug_dump_debug_info(const char *context)
+{
+ ia_css_debug_dump_debug_info(context);
+}
+
+void atomisp_css_debug_set_dtrace_level(const unsigned int trace_level)
+{
+ ia_css_debug_set_dtrace_level(trace_level);
+}
+
+unsigned int atomisp_css_debug_get_dtrace_level(void)
+{
+ return ia_css_debug_trace_level;
+}
+
+void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_8(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_16(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ _hrt_master_port_store_32(addr, data);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
+{
+ unsigned long flags;
+ uint8_t ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_8(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+uint16_t atomisp_css2_hw_load_16(hrt_address addr)
+{
+ unsigned long flags;
+ uint16_t ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_16(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+uint32_t atomisp_css2_hw_load_32(hrt_address addr)
+{
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ ret = _hrt_master_port_load_32(addr);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+ return ret;
+}
+
+static void atomisp_css2_hw_store(hrt_address addr,
+ const void *from, uint32_t n)
+{
+ unsigned long flags;
+ unsigned i;
+ unsigned int _to = (unsigned int)addr;
+ const char *_from = (const char *)from;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, _to++, _from++)
+ _hrt_master_port_store_8(_to , *_from);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
+{
+ unsigned long flags;
+ unsigned i;
+ char *_to = (char *)to;
+ unsigned int _from = (unsigned int)addr;
+
+ spin_lock_irqsave(&mmio_lock, flags);
+ for (i = 0; i < n; i++, _to++, _from++)
+ *_to = _hrt_master_port_load_8(_from);
+ spin_unlock_irqrestore(&mmio_lock, flags);
+}
+
+static int atomisp_css2_dbg_print(const char *fmt, va_list args)
+{
+ vprintk(fmt, args);
+ return 0;
+}
+
+static int atomisp_css2_dbg_ftrace_print(const char *fmt, va_list args)
+{
+ ftrace_vprintk(fmt, args);
+ return 0;
+}
+
+static int atomisp_css2_err_print(const char *fmt, va_list args)
+{
+ vprintk(fmt, args);
+ return 0;
+}
+
+void atomisp_store_uint32(hrt_address addr, uint32_t data)
+{
+ atomisp_css2_hw_store_32(addr, data);
+}
+
+void atomisp_load_uint32(hrt_address addr, uint32_t *data)
+{
+ *data = atomisp_css2_hw_load_32(addr);
+}
+static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr)
+{
+ if (sh_mmu_mrfld.get_pd_base == NULL) {
+ dev_err(atomisp_dev, "get mmu base address failed.\n");
+ return -EINVAL;
+ }
+
+ *mmu_base_addr = sh_mmu_mrfld.get_pd_base(&bo_device.mmu,
+ bo_device.mmu.base_address);
+ return 0;
+}
+
+static void atomisp_isp_parameters_clean_up(
+ struct atomisp_css_isp_config *config)
+{
+ /*
+ * Set NULL to configs pointer to avoid they are set into isp again when
+ * some configs are changed and need to be updated later.
+ */
+ memset(config, 0, sizeof(*config));
+}
+
+static void __dump_pipe_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ unsigned int pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ if (stream_env->pipes[pipe_id]) {
+ struct ia_css_pipe_config *p_config;
+ struct ia_css_pipe_extra_config *pe_config;
+ p_config = &stream_env->pipe_configs[pipe_id];
+ pe_config = &stream_env->pipe_extra_configs[pipe_id];
+ dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_config.pipe_mode:%d.\n", p_config->mode);
+ dev_dbg(isp->dev,
+ "pipe_config.output_info[0] w=%d, h=%d.\n",
+ p_config->output_info[0].res.width,
+ p_config->output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_pp_in_res w=%d, h=%d.\n",
+ p_config->vf_pp_in_res.width,
+ p_config->vf_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.capt_pp_in_res w=%d, h=%d.\n",
+ p_config->capt_pp_in_res.width,
+ p_config->capt_pp_in_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.output.padded w=%d.\n",
+ p_config->output_info[0].padded_width);
+ dev_dbg(isp->dev,
+ "pipe_config.vf_output_info[0] w=%d, h=%d.\n",
+ p_config->vf_output_info[0].res.width,
+ p_config->vf_output_info[0].res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.bayer_ds_out_res w=%d, h=%d.\n",
+ p_config->bayer_ds_out_res.width,
+ p_config->bayer_ds_out_res.height);
+ dev_dbg(isp->dev,
+ "pipe_config.envelope w=%d, h=%d.\n",
+ p_config->dvs_envelope.width,
+ p_config->dvs_envelope.height);
+ dev_dbg(isp->dev,
+ "pipe_config.dvs_frame_delay=%d.\n",
+ p_config->dvs_frame_delay);
+ dev_dbg(isp->dev,
+ "pipe_config.isp_pipe_version:%d.\n",
+ p_config->isp_pipe_version);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_extension=%p.\n",
+ p_config->acc_extension);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_stages=%p.\n",
+ p_config->acc_stages);
+ dev_dbg(isp->dev,
+ "pipe_config.num_acc_stages=%d.\n",
+ p_config->num_acc_stages);
+ dev_dbg(isp->dev,
+ "pipe_config.acc_num_execs=%d.\n",
+ p_config->acc_num_execs);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.capture_mode=%d.\n",
+ p_config->default_capture_config.mode);
+ dev_dbg(isp->dev,
+ "pipe_config.enable_dz=%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_config.default_capture_config.enable_xnr=%d.\n",
+ p_config->default_capture_config.enable_xnr);
+ dev_dbg(isp->dev,
+ "dumping pipe[%d] extra config:\n", pipe_id);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_raw_binning:%d.\n",
+ pe_config->enable_raw_binning);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_yuv_ds:%d.\n",
+ pe_config->enable_yuv_ds);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_high_speed:%d.\n",
+ pe_config->enable_high_speed);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_dvs_6axis:%d.\n",
+ pe_config->enable_dvs_6axis);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.enable_reduced_pipe:%d.\n",
+ pe_config->enable_reduced_pipe);
+ dev_dbg(isp->dev,
+ "pipe_(extra_)config.enable_dz:%d.\n",
+ p_config->enable_dz);
+ dev_dbg(isp->dev,
+ "pipe_extra_config.disable_vf_pp:%d.\n",
+ pe_config->disable_vf_pp);
+ }
+}
+
+static void __dump_stream_config(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_stream_config *s_config;
+ int j;
+ bool valid_stream = false;
+
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ if (stream_env->pipes[j]) {
+ __dump_pipe_config(asd, stream_env, j);
+ valid_stream = true;
+ }
+ }
+ if (!valid_stream)
+ return;
+ s_config = &stream_env->stream_config;
+ dev_dbg(isp->dev, "stream_config.mode=%d.\n", s_config->mode);
+
+ if (s_config->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ s_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ dev_dbg(isp->dev, "stream_config.source.port.port=%d.\n",
+ s_config->source.port.port);
+ dev_dbg(isp->dev, "stream_config.source.port.num_lanes=%d.\n",
+ s_config->source.port.num_lanes);
+ dev_dbg(isp->dev, "stream_config.source.port.timeout=%d.\n",
+ s_config->source.port.timeout);
+ dev_dbg(isp->dev, "stream_config.source.port.rxcount=0x%x.\n",
+ s_config->source.port.rxcount);
+ dev_dbg(isp->dev, "stream_config.source.port.compression.type=%d.\n",
+ s_config->source.port.compression.type);
+ dev_dbg(isp->dev, "stream_config.source.port.compression.compressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ compressed_bits_per_pixel);
+ dev_dbg(isp->dev, "stream_config.source.port.compression.uncompressed_bits_per_pixel=%d.\n",
+ s_config->source.port.compression.
+ uncompressed_bits_per_pixel);
+ } else if (s_config->mode == IA_CSS_INPUT_MODE_TPG) {
+ dev_dbg(isp->dev, "stream_config.source.tpg.id=%d.\n",
+ s_config->source.tpg.id);
+ dev_dbg(isp->dev, "stream_config.source.tpg.mode=%d.\n",
+ s_config->source.tpg.mode);
+ dev_dbg(isp->dev, "stream_config.source.tpg.x_mask=%d.\n",
+ s_config->source.tpg.x_mask);
+ dev_dbg(isp->dev, "stream_config.source.tpg.x_delta=%d.\n",
+ s_config->source.tpg.x_delta);
+ dev_dbg(isp->dev, "stream_config.source.tpg.y_mask=%d.\n",
+ s_config->source.tpg.y_mask);
+ dev_dbg(isp->dev, "stream_config.source.tpg.y_delta=%d.\n",
+ s_config->source.tpg.y_delta);
+ dev_dbg(isp->dev, "stream_config.source.tpg.xy_mask=%d.\n",
+ s_config->source.tpg.xy_mask);
+ } else if (s_config->mode == IA_CSS_INPUT_MODE_PRBS) {
+ dev_dbg(isp->dev, "stream_config.source.prbs.id=%d.\n",
+ s_config->source.prbs.id);
+ dev_dbg(isp->dev, "stream_config.source.prbs.h_blank=%d.\n",
+ s_config->source.prbs.h_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.v_blank=%d.\n",
+ s_config->source.prbs.v_blank);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed=%d.\n",
+ s_config->source.prbs.seed);
+ dev_dbg(isp->dev, "stream_config.source.prbs.seed1=%d.\n",
+ s_config->source.prbs.seed1);
+ }
+
+ for (j = 0; j < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; j++) {
+ dev_dbg(isp->dev, "stream_configisys_config[%d].input_res w=%d, h=%d.\n",
+ j,
+ s_config->isys_config[j].input_res.width,
+ s_config->isys_config[j].input_res.height);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].linked_isys_stream_id=%d\n",
+ j,
+ s_config->isys_config[j].linked_isys_stream_id);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].format=%d\n",
+ j,
+ s_config->isys_config[j].format);
+
+ dev_dbg(isp->dev, "stream_configisys_config[%d].valid=%d.\n",
+ j,
+ s_config->isys_config[j].valid);
+ }
+
+ dev_dbg(isp->dev, "stream_config.input_config.input_res w=%d, h=%d.\n",
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.effective_res w=%d, h=%d.\n",
+ s_config->input_config.effective_res.width,
+ s_config->input_config.effective_res.height);
+
+ dev_dbg(isp->dev, "stream_config.input_config.format=%d\n",
+ s_config->input_config.format);
+
+ dev_dbg(isp->dev, "stream_config.input_config.bayer_order=%d.\n",
+ s_config->input_config.bayer_order);
+
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.online=%d.\n", s_config->online);
+ dev_dbg(isp->dev, "stream_config.continuous=%d.\n",
+ s_config->continuous);
+ dev_dbg(isp->dev, "stream_config.disable_cont_viewfinder=%d.\n",
+ s_config->disable_cont_viewfinder);
+ dev_dbg(isp->dev, "stream_config.channel_id=%d.\n",
+ s_config->channel_id);
+ dev_dbg(isp->dev, "stream_config.init_num_cont_raw_buf=%d.\n",
+ s_config->init_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.target_num_cont_raw_buf=%d.\n",
+ s_config->target_num_cont_raw_buf);
+ dev_dbg(isp->dev, "stream_config.left_padding=%d.\n",
+ s_config->left_padding);
+ dev_dbg(isp->dev, "stream_config.sensor_binning_factor=%d.\n",
+ s_config->sensor_binning_factor);
+ dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
+ s_config->pixels_per_clock);
+ dev_dbg(isp->dev, "stream_config.pack_raw_pixels=%d.\n",
+ s_config->pack_raw_pixels);
+ dev_dbg(isp->dev, "stream_config.flash_gpio_pin=%d.\n",
+ s_config->flash_gpio_pin);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.size_mem_words=%d.\n",
+ s_config->mipi_buffer_config.size_mem_words);
+ dev_dbg(isp->dev, "stream_config.mipi_buffer_config.contiguous=%d.\n",
+ s_config->mipi_buffer_config.contiguous);
+ dev_dbg(isp->dev, "stream_config.metadata_config.data_type=%d.\n",
+ s_config->metadata_config.data_type);
+ dev_dbg(isp->dev, "stream_config.metadata_config.resolution w=%d, h=%d.\n",
+ s_config->metadata_config.resolution.width,
+ s_config->metadata_config.resolution.height);
+}
+
+static int __destroy_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env, bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int i;
+ unsigned long timeout;
+
+ if (!stream_env->stream)
+ return 0;
+
+ if (!force) {
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ if (stream_env->update_pipe[i])
+ break;
+
+ if (i == IA_CSS_PIPE_ID_NUM)
+ return 0;
+ }
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED
+ && ia_css_stream_stop(stream_env->stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stop stream failed.\n");
+ return -EINVAL;
+ }
+
+ if (stream_env->stream_state == CSS_STREAM_STARTED) {
+ timeout = jiffies + msecs_to_jiffies(40);
+ while (1) {
+ if (ia_css_stream_has_stopped(stream_env->stream))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_warn(isp->dev, "stop stream timeout.\n");
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+ }
+
+ stream_env->stream_state = CSS_STREAM_STOPPED;
+
+ if (ia_css_stream_destroy(stream_env->stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "destroy stream failed.\n");
+ return -EINVAL;
+ }
+ stream_env->stream_state = CSS_STREAM_UNINIT;
+ stream_env->stream = NULL;
+
+ return 0;
+}
+
+static int __destroy_streams(struct atomisp_sub_device *asd, bool force)
+{
+ int ret, i;
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __destroy_stream(asd, &asd->stream_env[i], force);
+ if (ret)
+ return ret;
+ }
+ asd->stream_prepared = false;
+ return 0;
+}
+static int __create_stream(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env)
+{
+ int pipe_index = 0, i;
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_env->pipes[i])
+ multi_pipes[pipe_index++] = stream_env->pipes[i];
+ }
+ if (pipe_index == 0)
+ return 0;
+
+ stream_env->stream_config.target_num_cont_raw_buf =
+ asd->continuous_raw_buffer_size->val;
+ stream_env->stream_config.channel_id = stream_env->ch_id;
+ stream_env->stream_config.ia_css_enable_raw_buffer_locking =
+ asd->enable_raw_buffer_lock->val;
+
+ __dump_stream_config(asd, stream_env);
+ if (ia_css_stream_create(&stream_env->stream_config,
+ pipe_index, multi_pipes, &stream_env->stream) != IA_CSS_SUCCESS)
+ return -EINVAL;
+ if (ia_css_stream_get_info(stream_env->stream,
+ &stream_env->stream_info) != IA_CSS_SUCCESS) {
+ ia_css_stream_destroy(stream_env->stream);
+ stream_env->stream = NULL;
+ return -EINVAL;
+ }
+
+ stream_env->stream_state = CSS_STREAM_CREATED;
+ return 0;
+}
+
+static int __create_streams(struct atomisp_sub_device *asd)
+{
+ int ret, i;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ ret = __create_stream(asd, &asd->stream_env[i]);
+ if (ret)
+ goto rollback;
+ }
+ asd->stream_prepared = true;
+ return 0;
+rollback:
+ for (i--; i >= 0; i--)
+ __destroy_stream(asd, &asd->stream_env[i], true);
+ return ret;
+}
+
+static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int ret = 0;
+ int i;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (!stream_env->pipes[i] ||
+ !(force || stream_env->update_pipe[i]))
+ continue;
+ if (ia_css_pipe_destroy(stream_env->pipes[i])
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev,
+ "destroy pipe[%d]failed.cannot recover.\n", i);
+ ret = -EINVAL;
+ }
+ stream_env->pipes[i] = NULL;
+ stream_env->update_pipe[i] = false;
+ }
+ return ret;
+}
+
+static int __destroy_pipes(struct atomisp_sub_device *asd, bool force)
+{
+ struct atomisp_device *isp = asd->isp;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+
+ dev_err(isp->dev,
+ "cannot destroy css pipes for stream[%d].\n",
+ i);
+ continue;
+ }
+
+ ret = __destroy_stream_pipes(asd, &asd->stream_env[i], force);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void atomisp_destroy_pipes_stream_force(struct atomisp_sub_device *asd)
+{
+ __destroy_streams(asd, true);
+ __destroy_pipes(asd, true);
+}
+
+static void __apply_additional_pipe_config(
+ struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (pipe_id < 0 || pipe_id >= IA_CSS_PIPE_ID_NUM) {
+ dev_err(isp->dev,
+ "wrong pipe_id for additional pipe config.\n");
+ return;
+ }
+
+ /* apply default pipe config */
+ stream_env->pipe_configs[pipe_id].isp_pipe_version = 2;
+ stream_env->pipe_configs[pipe_id].enable_dz =
+ asd->disable_dz->val ? false : true;
+ /* apply isp 2.2 specific config for baytrail*/
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_CAPTURE:
+ /* enable capture pp/dz manually or digital zoom would
+ * fail*/
+ if (stream_env->pipe_configs[pipe_id].
+ default_capture_config.mode == CSS_CAPTURE_MODE_RAW)
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+#ifdef ISP2401
+
+ /* the isp default to use ISP2.2 and the camera hal will
+ * control whether use isp2.7 */
+ if (asd->select_isp_version->val ==
+ ATOMISP_CSS_ISP_PIPE_VERSION_2_7)
+ stream_env->pipe_configs[pipe_id].isp_pipe_version =
+ SH_CSS_ISP_PIPE_VERSION_2_7;
+ else
+ stream_env->pipe_configs[pipe_id].isp_pipe_version =
+ SH_CSS_ISP_PIPE_VERSION_2_2;
+#endif
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ /* enable reduced pipe to have binary
+ * video_dz_2_min selected*/
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_reduced_pipe = true;
+ stream_env->pipe_configs[pipe_id]
+ .enable_dz = false;
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_env->pipe_configs[pipe_id].enable_dz = true;
+
+ if (asd->params.video_dis_en) {
+ stream_env->pipe_extra_configs[pipe_id]
+ .enable_dvs_6axis = true;
+ stream_env->pipe_configs[pipe_id]
+ .dvs_frame_delay =
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ case IA_CSS_PIPE_ID_COPY:
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_env->pipe_configs[pipe_id].enable_dz = true;
+ else
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ stream_env->pipe_configs[pipe_id].mode = IA_CSS_PIPE_MODE_ACC;
+ stream_env->pipe_configs[pipe_id].enable_dz = false;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ if (!asd)
+ return false;
+
+ if (pipe_id == CSS_PIPE_ID_ACC || pipe_id == CSS_PIPE_ID_YUVPP)
+ return true;
+
+ if (asd->vfpp) {
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ return true;
+ else
+ return false;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+ else
+ return false;
+ }
+ }
+
+ if (!asd->run_mode)
+ return false;
+
+ if (asd->copy_mode && pipe_id == IA_CSS_PIPE_ID_COPY)
+ return true;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
+ return true;
+ else
+ return false;
+ case ATOMISP_RUN_MODE_PREVIEW:
+ if (!asd->continuous_mode->val) {
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ return true;
+ else
+ return false;
+ }
+ /* fall through to ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
+ case ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ return true;
+ else
+ return false;
+ case ATOMISP_RUN_MODE_VIDEO:
+ if (!asd->continuous_mode->val) {
+ if (pipe_id == IA_CSS_PIPE_ID_VIDEO ||
+ pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ return true;
+ else
+ return false;
+ }
+ /* fall through to ATOMISP_RUN_MODE_SDV */
+ case ATOMISP_RUN_MODE_SDV:
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE ||
+ pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ return true;
+ else
+ return false;
+ }
+
+ return false;
+}
+
+static int __create_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_stream_env *stream_env,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_extra_config extra_config;
+ enum ia_css_err ret;
+
+ if (pipe_id >= IA_CSS_PIPE_ID_NUM)
+ return -EINVAL;
+
+ if (pipe_id != CSS_PIPE_ID_ACC &&
+ !stream_env->pipe_configs[pipe_id].output_info[0].res.width)
+ return 0;
+
+ if (pipe_id == CSS_PIPE_ID_ACC &&
+ !stream_env->pipe_configs[pipe_id].acc_extension)
+ return 0;
+
+ if (!is_pipe_valid_to_current_run_mode(asd, pipe_id))
+ return 0;
+
+ ia_css_pipe_extra_config_defaults(&extra_config);
+
+ __apply_additional_pipe_config(asd, stream_env, pipe_id);
+ if (!memcmp(&extra_config,
+ &stream_env->pipe_extra_configs[pipe_id],
+ sizeof(extra_config)))
+ ret = ia_css_pipe_create(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ else
+ ret = ia_css_pipe_create_extra(
+ &stream_env->pipe_configs[pipe_id],
+ &stream_env->pipe_extra_configs[pipe_id],
+ &stream_env->pipes[pipe_id]);
+ if (ret != IA_CSS_SUCCESS)
+ dev_err(isp->dev, "create pipe[%d] error.\n", pipe_id);
+ return ret;
+}
+
+static int __create_pipes(struct atomisp_sub_device *asd)
+{
+ enum ia_css_err ret;
+ int i, j;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ret = __create_pipe(asd, &asd->stream_env[i], j);
+ if (ret != IA_CSS_SUCCESS)
+ break;
+ }
+ if (j < IA_CSS_PIPE_ID_NUM)
+ goto pipe_err;
+ }
+ return 0;
+pipe_err:
+ for (; i >= 0; i--) {
+ for (j--; j >= 0; j--) {
+ if (asd->stream_env[i].pipes[j]) {
+ ia_css_pipe_destroy(asd->stream_env[i].pipes[j]);
+ asd->stream_env[i].pipes[j] = NULL;
+ }
+ }
+ j = IA_CSS_PIPE_ID_NUM;
+ }
+ return -EINVAL;
+}
+
+void atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
+{
+ __create_pipes(asd);
+ __create_streams(asd);
+}
+
+int atomisp_css_update_stream(struct atomisp_sub_device *asd)
+{
+ int ret;
+ struct atomisp_device *isp = asd->isp;
+
+ if (__destroy_streams(asd, true) != IA_CSS_SUCCESS)
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true) != IA_CSS_SUCCESS)
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ ret = __create_pipes(asd);
+ if (ret != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "create pipe failed %d.\n", ret);
+ return -EIO;
+ }
+
+ ret = __create_streams(asd);
+ if (ret != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev, "create stream failed %d.\n", ret);
+ __destroy_pipes(asd, true);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int atomisp_css_init(struct atomisp_device *isp)
+{
+ unsigned int mmu_base_addr;
+ int ret;
+ enum ia_css_err err;
+
+ ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ if (ret)
+ return ret;
+
+ /* Init ISP */
+ err = ia_css_init(&isp->css_env.isp_css_env, NULL,
+ (uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "css init failed --- bad firmware?\n");
+ return -EINVAL;
+ }
+ ia_css_enable_isys_event_queue(true);
+
+ isp->css_initialized = true;
+ dev_dbg(isp->dev, "sh_css_init success\n");
+
+ return 0;
+}
+
+static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
+{
+ int ret = 0;
+
+ if (0 == opt)
+ isp->css_env.isp_css_env.print_env.debug_print = NULL;
+ else if (1 == opt)
+ isp->css_env.isp_css_env.print_env.debug_print =
+ atomisp_css2_dbg_ftrace_print;
+ else if (2 == opt)
+ isp->css_env.isp_css_env.print_env.debug_print =
+ atomisp_css2_dbg_print;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int atomisp_css_check_firmware_version(struct atomisp_device *isp)
+{
+ if (!sh_css_check_firmware_version((void *)isp->firmware->data)) {
+ dev_err(isp->dev, "Fw version check failed.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int atomisp_css_load_firmware(struct atomisp_device *isp)
+{
+ enum ia_css_err err;
+
+ /* set css env */
+ isp->css_env.isp_css_fw.data = (void *)isp->firmware->data;
+ isp->css_env.isp_css_fw.bytes = isp->firmware->size;
+
+ isp->css_env.isp_css_env.hw_access_env.store_8 =
+ atomisp_css2_hw_store_8;
+ isp->css_env.isp_css_env.hw_access_env.store_16 =
+ atomisp_css2_hw_store_16;
+ isp->css_env.isp_css_env.hw_access_env.store_32 =
+ atomisp_css2_hw_store_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load_8 = atomisp_css2_hw_load_8;
+ isp->css_env.isp_css_env.hw_access_env.load_16 =
+ atomisp_css2_hw_load_16;
+ isp->css_env.isp_css_env.hw_access_env.load_32 =
+ atomisp_css2_hw_load_32;
+
+ isp->css_env.isp_css_env.hw_access_env.load = atomisp_css2_hw_load;
+ isp->css_env.isp_css_env.hw_access_env.store = atomisp_css2_hw_store;
+
+ __set_css_print_env(isp, dbg_func);
+
+ isp->css_env.isp_css_env.print_env.error_print = atomisp_css2_err_print;
+
+ /* load isp fw into ISP memory */
+ err = ia_css_load_firmware(&isp->css_env.isp_css_env,
+ &isp->css_env.isp_css_fw);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "css load fw failed.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_unload_firmware(struct atomisp_device *isp)
+{
+ ia_css_unload_firmware();
+}
+
+void atomisp_css_uninit(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ unsigned int i;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+ asd->params.css_update_params_needed = false;
+ }
+
+ isp->css_initialized = false;
+ ia_css_uninit();
+}
+
+void atomisp_css_suspend(struct atomisp_device *isp)
+{
+ isp->css_initialized = false;
+ ia_css_uninit();
+}
+
+int atomisp_css_resume(struct atomisp_device *isp)
+{
+ unsigned int mmu_base_addr;
+ int ret;
+
+ ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ if (ret) {
+ dev_err(isp->dev, "get base address error.\n");
+ return -EINVAL;
+ }
+
+ ret = ia_css_init(&isp->css_env.isp_css_env, NULL,
+ mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
+ if (ret) {
+ dev_err(isp->dev, "re-init css failed.\n");
+ return -EINVAL;
+ }
+ ia_css_enable_isys_event_queue(true);
+
+ isp->css_initialized = true;
+ return 0;
+}
+
+int atomisp_css_irq_translate(struct atomisp_device *isp,
+ unsigned int *infos)
+{
+ int err;
+
+ err = ia_css_irq_translate(infos);
+ if (err != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev,
+ "%s:failed to translate irq (err = %d,infos = %d)\n",
+ __func__, err, *infos);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_rx_get_irq_info(enum ia_css_csi2_port port,
+ unsigned int *infos)
+{
+#ifndef ISP2401_NEW_INPUT_SYSTEM
+ ia_css_isys_rx_get_irq_info(port, infos);
+#else
+ *infos = 0;
+#endif
+}
+
+void atomisp_css_rx_clear_irq_info(enum ia_css_csi2_port port,
+ unsigned int infos)
+{
+#ifndef ISP2401_NEW_INPUT_SYSTEM
+ ia_css_isys_rx_clear_irq_info(port, infos);
+#endif
+}
+
+int atomisp_css_irq_enable(struct atomisp_device *isp,
+ enum atomisp_css_irq_info info, bool enable)
+{
+ if (ia_css_irq_enable(info, enable) != IA_CSS_SUCCESS) {
+ dev_warn(isp->dev, "%s:Invalid irq info.\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_init_struct(struct atomisp_sub_device *asd)
+{
+ int i, j;
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ asd->stream_env[i].stream = NULL;
+ for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
+ asd->stream_env[i].pipes[j] = NULL;
+ asd->stream_env[i].update_pipe[j] = false;
+ ia_css_pipe_config_defaults(
+ &asd->stream_env[i].pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &asd->stream_env[i].pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(&asd->stream_env[i].stream_config);
+ }
+}
+
+int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
+ struct videobuf_vmalloc_memory *vm_mem,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer css_buf = {0};
+ enum ia_css_err err;
+
+ css_buf.type = css_buf_type;
+ css_buf.data.frame = vm_mem->vaddr;
+
+ err = ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id], &css_buf);
+ if (err != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_metadata_buf *metadata_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_METADATA;
+ buffer.data.metadata = metadata_buf->metadata;
+ if (ia_css_pipe_enqueue_buffer(stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_err(isp->dev, "failed to q meta data buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_s3a_buf *s3a_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_3A_STATISTICS;
+ buffer.data.stats_3a = s3a_buf->s3a_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q s3a stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_dis_buf *dis_buf,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
+ struct ia_css_buffer buffer = {0};
+ struct atomisp_device *isp = asd->isp;
+
+ buffer.type = IA_CSS_BUFFER_TYPE_DIS_STATISTICS;
+ buffer.data.stats_dvs = dis_buf->dis_data;
+ if (ia_css_pipe_enqueue_buffer(
+ stream_env->pipes[css_pipe_id],
+ &buffer)) {
+ dev_dbg(isp->dev, "failed to q dvs stat buffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void atomisp_css_mmu_invalidate_cache(void)
+{
+ ia_css_mmu_invalidate_cache();
+}
+
+void atomisp_css_mmu_invalidate_tlb(void)
+{
+ ia_css_mmu_invalidate_cache();
+}
+
+void atomisp_css_mmu_set_page_table_base_index(unsigned long base_index)
+{
+}
+
+/*
+ * Check whether currently running MIPI buffer size fulfill
+ * the requirement of the stream to be run
+ */
+bool __need_realloc_mipi_buffer(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ if (asd->streaming !=
+ ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ if (asd->mipi_frame_size < isp->mipi_frame_size)
+ return true;
+ }
+
+ return false;
+}
+
+int atomisp_css_start(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset)
+{
+ struct atomisp_device *isp = asd->isp;
+ bool sp_is_started = false;
+ int ret = 0, i = 0;
+ if (in_reset) {
+ if (__destroy_streams(asd, true))
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true))
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ if (__create_pipes(asd)) {
+ dev_err(isp->dev, "create pipe error.\n");
+ return -EINVAL;
+ }
+ if (__create_streams(asd)) {
+ dev_err(isp->dev, "create stream error.\n");
+ ret = -EINVAL;
+ goto stream_err;
+ }
+ /* in_reset == true, extension firmwares are reloaded after the recovery */
+ atomisp_acc_load_extensions(asd);
+ }
+
+ /*
+ * For dual steam case, it is possible that:
+ * 1: for this stream, it is at the stage that:
+ * - after set_fmt is called
+ * - before stream on is called
+ * 2: for the other stream, the stream off is called which css reset
+ * has been done.
+ *
+ * Thus the stream created in set_fmt get destroyed and need to be
+ * recreated in the next stream on.
+ */
+ if (asd->stream_prepared == false) {
+ if (__create_pipes(asd)) {
+ dev_err(isp->dev, "create pipe error.\n");
+ return -EINVAL;
+ }
+ if (__create_streams(asd)) {
+ dev_err(isp->dev, "create stream error.\n");
+ ret = -EINVAL;
+ goto stream_err;
+ }
+ }
+ /*
+ * SP can only be started one time
+ * if atomisp_subdev_streaming_count() tell there already has some
+ * subdev at streamming, then SP should already be started previously,
+ * so need to skip start sp procedure
+ */
+ if (atomisp_streaming_count(isp)) {
+ dev_dbg(isp->dev, "skip start sp\n");
+ } else {
+ if (!sh_css_hrt_system_is_idle())
+ dev_err(isp->dev, "CSS HW not idle before starting SP\n");
+ if (ia_css_start_sp() != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "start sp error.\n");
+ ret = -EINVAL;
+ goto start_err;
+ } else {
+ sp_is_started = true;
+ }
+ }
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ if (asd->stream_env[i].stream) {
+ if (ia_css_stream_start(asd->stream_env[i]
+ .stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stream[%d] start error.\n", i);
+ ret = -EINVAL;
+ goto start_err;
+ } else {
+ asd->stream_env[i].stream_state = CSS_STREAM_STARTED;
+ dev_dbg(isp->dev, "stream[%d] started.\n", i);
+ }
+ }
+ }
+
+ return 0;
+
+start_err:
+ __destroy_streams(asd, true);
+stream_err:
+ __destroy_pipes(asd, true);
+
+ /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
+ * destroy all pipes
+ */
+ /*
+ * SP can not be stop if other streams are in use
+ */
+ if ((atomisp_streaming_count(isp) == 0) && sp_is_started)
+ ia_css_stop_sp();
+
+ return ret;
+}
+
+void atomisp_css_update_isp_params(struct atomisp_sub_device *asd)
+{
+ /*
+ * FIXME!
+ * for ISP2401 new input system, this api is under development.
+ * Calling it would cause kernel panic.
+ *
+ * VIED BZ: 1458
+ *
+ * Check if it is Cherry Trail and also new input system
+ */
+ if (asd->copy_mode) {
+ dev_warn(asd->isp->dev,
+ "%s: ia_css_stream_set_isp_config() not supported in copy mode!.\n",
+ __func__);
+ return;
+ }
+
+ ia_css_stream_set_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config);
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+}
+
+
+void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
+ struct ia_css_pipe *pipe)
+{
+ enum ia_css_err ret;
+
+ if (!pipe) {
+ atomisp_css_update_isp_params(asd);
+ return;
+ }
+
+ dev_dbg(asd->isp->dev, "%s: apply parameter for ia_css_frame %p with isp_config_id %d on pipe %p.\n",
+ __func__, asd->params.config.output_frame,
+ asd->params.config.isp_config_id, pipe);
+
+ ret = ia_css_stream_set_isp_config_on_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &asd->params.config, pipe);
+ if (ret != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev, "%s: ia_css_stream_set_isp_config_on_pipe failed %d\n",
+ __func__, ret);
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+}
+
+int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ if (ia_css_pipe_enqueue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer)
+ != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id pipe_id,
+ enum atomisp_css_buffer_type buf_type,
+ struct atomisp_css_buffer *isp_css_buffer)
+{
+ struct atomisp_device *isp = asd->isp;
+ enum ia_css_err err;
+
+ err = ia_css_pipe_dequeue_buffer(
+ asd->stream_env[stream_id].pipes[pipe_id],
+ &isp_css_buffer->css_buffer);
+ if (err != IA_CSS_SUCCESS) {
+ dev_err(isp->dev,
+ "ia_css_pipe_dequeue_buffer failed: 0x%x\n", err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
+ uint16_t stream_id,
+ struct atomisp_s3a_buf *s3a_buf,
+ struct atomisp_dis_buf *dis_buf,
+ struct atomisp_metadata_buf *md_buf)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (s3a_buf && asd->params.curr_grid_info.s3a_grid.enable) {
+ void *s3a_ptr;
+
+ s3a_buf->s3a_data = ia_css_isp_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!s3a_buf->s3a_data) {
+ dev_err(isp->dev, "3a buf allocation failed.\n");
+ return -EINVAL;
+ }
+
+ s3a_ptr = hmm_vmap(s3a_buf->s3a_data->data_ptr, true);
+ s3a_buf->s3a_map = ia_css_isp_3a_statistics_map_allocate(
+ s3a_buf->s3a_data, s3a_ptr);
+ }
+
+ if (dis_buf && dvs_grid_info && dvs_grid_info->enable) {
+ void *dvs_ptr;
+
+ dis_buf->dis_data = ia_css_isp_dvs2_statistics_allocate(
+ dvs_grid_info);
+ if (!dis_buf->dis_data) {
+ dev_err(isp->dev, "dvs buf allocation failed.\n");
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ return -EINVAL;
+ }
+
+ dvs_ptr = hmm_vmap(dis_buf->dis_data->data_ptr, true);
+ dis_buf->dvs_map = ia_css_isp_dvs_statistics_map_allocate(
+ dis_buf->dis_data, dvs_ptr);
+ }
+
+ if (asd->stream_env[stream_id].stream_info.
+ metadata_info.size && md_buf) {
+ md_buf->metadata = ia_css_metadata_allocate(
+ &asd->stream_env[stream_id].stream_info.metadata_info);
+ if (!md_buf->metadata) {
+ if (s3a_buf)
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+ if (dis_buf)
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+ dev_err(isp->dev, "metadata buf allocation failed.\n");
+ return -EINVAL;
+ }
+ md_buf->md_vptr = hmm_vmap(md_buf->metadata->address, false);
+ }
+
+ return 0;
+}
+
+void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf)
+{
+ if (s3a_buf->s3a_data)
+ hmm_vunmap(s3a_buf->s3a_data->data_ptr);
+
+ ia_css_isp_3a_statistics_map_free(s3a_buf->s3a_map);
+ s3a_buf->s3a_map = NULL;
+ ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
+}
+
+void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf)
+{
+ if (dis_buf->dis_data)
+ hmm_vunmap(dis_buf->dis_data->data_ptr);
+
+ ia_css_isp_dvs_statistics_map_free(dis_buf->dvs_map);
+ dis_buf->dvs_map = NULL;
+ ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
+}
+
+void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf)
+{
+ if (metadata_buf->md_vptr) {
+ hmm_vunmap(metadata_buf->metadata->address);
+ metadata_buf->md_vptr = NULL;
+ }
+ ia_css_metadata_free(metadata_buf->metadata);
+}
+
+void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd)
+{
+ struct atomisp_s3a_buf *s3a_buf, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf, *_md_buf;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ /* 3A statistics use vmalloc, DIS use kmalloc */
+ if (dvs_grid_info && dvs_grid_info->enable) {
+ ia_css_dvs2_coefficients_free(asd->params.css_param.dvs2_coeff);
+ ia_css_dvs2_statistics_free(asd->params.dvs_stat);
+ asd->params.css_param.dvs2_coeff = NULL;
+ asd->params.dvs_stat = NULL;
+ asd->params.dvs_hor_proj_bytes = 0;
+ asd->params.dvs_ver_proj_bytes = 0;
+ asd->params.dvs_hor_coef_bytes = 0;
+ asd->params.dvs_ver_coef_bytes = 0;
+ asd->params.dis_proj_data_valid = false;
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ list_for_each_entry_safe(dis_buf, _dis_buf,
+ &asd->dis_stats_in_css, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+ }
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ ia_css_3a_statistics_free(asd->params.s3a_user_stat);
+ asd->params.s3a_user_stat = NULL;
+ asd->params.s3a_output_bytes = 0;
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_in_css, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ list_for_each_entry_safe(s3a_buf, _s3a_buf,
+ &asd->s3a_stats_ready, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+ }
+
+ if (asd->params.css_param.dvs_6axis) {
+ ia_css_dvs2_6axis_config_free(asd->params.css_param.dvs_6axis);
+ asd->params.css_param.dvs_6axis = NULL;
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_in_css[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ list_for_each_entry_safe(md_buf, _md_buf,
+ &asd->metadata_ready[i], list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ asd->params.metadata_width_size = 0;
+ atomisp_free_metadata_output_buf(asd);
+}
+
+int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ int source_pad)
+{
+ struct ia_css_pipe_info p_info;
+ struct ia_css_grid_info old_info;
+ struct atomisp_device *isp = asd->isp;
+ int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_config.metadata_config.resolution.width;
+
+ memset(&p_info, 0, sizeof(struct ia_css_pipe_info));
+ memset(&old_info, 0, sizeof(struct ia_css_grid_info));
+
+ if (ia_css_pipe_get_info(
+ asd->stream_env[stream_index].pipes[pipe_id],
+ &p_info) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "ia_css_pipe_get_info failed\n");
+ return -EINVAL;
+ }
+
+ memcpy(&old_info, &asd->params.curr_grid_info,
+ sizeof(struct ia_css_grid_info));
+ memcpy(&asd->params.curr_grid_info, &p_info.grid_info,
+ sizeof(struct ia_css_grid_info));
+ /*
+ * Record which css pipe enables s3a_grid.
+ * Currently would have one css pipe that need it
+ */
+ if (asd->params.curr_grid_info.s3a_grid.enable) {
+ if (asd->params.s3a_enabled_pipe != CSS_PIPE_ID_NUM)
+ dev_dbg(isp->dev, "css pipe %d enabled s3a grid replaced by: %d.\n",
+ asd->params.s3a_enabled_pipe, pipe_id);
+ asd->params.s3a_enabled_pipe = pipe_id;
+ }
+
+ /* If the grid info has not changed and the buffers for 3A and
+ * DIS statistics buffers are allocated or buffer size would be zero
+ * then no need to do anything. */
+ if (((!memcmp(&old_info, &asd->params.curr_grid_info, sizeof(old_info))
+ && asd->params.s3a_user_stat && asd->params.dvs_stat)
+ || asd->params.curr_grid_info.s3a_grid.width == 0
+ || asd->params.curr_grid_info.s3a_grid.height == 0)
+ && asd->params.metadata_width_size == md_width) {
+ dev_dbg(isp->dev,
+ "grid info change escape. memcmp=%d, s3a_user_stat=%d,"
+ "dvs_stat=%d, s3a.width=%d, s3a.height=%d, metadata width =%d\n",
+ !memcmp(&old_info, &asd->params.curr_grid_info,
+ sizeof(old_info)),
+ !!asd->params.s3a_user_stat, !!asd->params.dvs_stat,
+ asd->params.curr_grid_info.s3a_grid.width,
+ asd->params.curr_grid_info.s3a_grid.height,
+ asd->params.metadata_width_size);
+ return -EINVAL;
+ }
+ asd->params.metadata_width_size = md_width;
+
+ return 0;
+}
+
+int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd)
+{
+ if (!asd->params.curr_grid_info.s3a_grid.width ||
+ !asd->params.curr_grid_info.s3a_grid.height)
+ return 0;
+
+ asd->params.s3a_user_stat = ia_css_3a_statistics_allocate(
+ &asd->params.curr_grid_info.s3a_grid);
+ if (!asd->params.s3a_user_stat)
+ return -ENOMEM;
+ /* 3A statistics. These can be big, so we use vmalloc. */
+ asd->params.s3a_output_bytes =
+ asd->params.curr_grid_info.s3a_grid.width *
+ asd->params.curr_grid_info.s3a_grid.height *
+ sizeof(*asd->params.s3a_user_stat->data);
+
+ return 0;
+}
+
+int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd)
+{
+ struct atomisp_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!dvs_grid)
+ return 0;
+
+ if (!dvs_grid->enable) {
+ dev_dbg(asd->isp->dev, "%s: dvs_grid not enabled.\n", __func__);
+ return 0;
+ }
+
+ /* DIS coefficients. */
+ asd->params.css_param.dvs2_coeff = ia_css_dvs2_coefficients_allocate(
+ dvs_grid);
+ if (!asd->params.css_param.dvs2_coeff)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_coef_bytes = dvs_grid->num_hor_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->hor_coefs.odd_real);
+
+ asd->params.dvs_ver_coef_bytes = dvs_grid->num_ver_coefs *
+ sizeof(*asd->params.css_param.dvs2_coeff->ver_coefs.odd_real);
+
+ /* DIS projections. */
+ asd->params.dis_proj_data_valid = false;
+ asd->params.dvs_stat = ia_css_dvs2_statistics_allocate(dvs_grid);
+ if (!asd->params.dvs_stat)
+ return -ENOMEM;
+
+ asd->params.dvs_hor_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->hor_prod.odd_real);
+
+ asd->params.dvs_ver_proj_bytes =
+ dvs_grid->aligned_height * dvs_grid->aligned_width *
+ sizeof(*asd->params.dvs_stat->ver_prod.odd_real);
+
+ return 0;
+}
+
+int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ int i;
+
+ /* We allocate the cpu-side buffer used for communication with user
+ * space */
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ asd->params.metadata_user[i] = atomisp_kernel_malloc(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
+ stream_info.metadata_info.size);
+ if (!asd->params.metadata_user[i]) {
+ while (--i >= 0) {
+ atomisp_kernel_free(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (asd->params.metadata_user[i]) {
+ atomisp_kernel_free(asd->params.metadata_user[i]);
+ asd->params.metadata_user[i] = NULL;
+ }
+ }
+}
+
+void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd,
+ struct atomisp_css_buffer *isp_css_buffer,
+ struct ia_css_isp_dvs_statistics_map *dvs_map)
+{
+ if (asd->params.dvs_stat) {
+ if (dvs_map)
+ ia_css_translate_dvs2_statistics(
+ asd->params.dvs_stat, dvs_map);
+ else
+ ia_css_get_dvs2_statistics(asd->params.dvs_stat,
+ isp_css_buffer->css_buffer.data.stats_dvs);
+
+ }
+}
+
+int atomisp_css_dequeue_event(struct atomisp_css_event *current_event)
+{
+ if (ia_css_dequeue_event(&current_event->event) != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
+ struct atomisp_css_event *current_event)
+{
+ /*
+ * FIXME!
+ * Pipe ID reported in CSS event is not correct for new system's
+ * copy pipe.
+ * VIED BZ: 1463
+ */
+ ia_css_temp_pipe_to_pipe_id(current_event->event.pipe,
+ &current_event->pipe);
+ if (asd && asd->copy_mode &&
+ current_event->pipe == IA_CSS_PIPE_ID_CAPTURE)
+ current_event->pipe = IA_CSS_PIPE_ID_COPY;
+}
+
+int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ if (isys_stream >= IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH)
+ return -EINVAL;
+
+ s_config->isys_config[isys_stream].input_res.width = ffmt->width;
+ s_config->isys_config[isys_stream].input_res.height = ffmt->height;
+ return 0;
+}
+
+int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.input_res.width = ffmt->width;
+ s_config->input_config.input_res.height = ffmt->height;
+ return 0;
+}
+
+void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int bin_factor)
+{
+ asd->stream_env[stream_id]
+ .stream_config.sensor_binning_factor = bin_factor;
+}
+
+void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_bayer_order bayer_order)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.bayer_order = bayer_order;
+}
+
+void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ int link,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].linked_isys_stream_id = link;
+}
+
+void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ bool valid,
+ int isys_stream)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].valid = valid;
+}
+
+void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format format,
+ int isys_stream)
+{
+
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[isys_stream].format = format;
+}
+
+void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format format)
+{
+
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->input_config.format = format;
+}
+
+int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ int i;
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ /*
+ * Set all isys configs to not valid.
+ * Currently we support only one stream per channel
+ */
+ for (i = IA_CSS_STREAM_ISYS_STREAM_0;
+ i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
+ s_config->isys_config[i].valid = false;
+
+ atomisp_css_isys_set_resolution(asd, stream_id, ffmt,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_format(asd, stream_id,
+ s_config->input_config.format,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_link(asd, stream_id, NO_LINK,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+ atomisp_css_isys_set_valid(asd, stream_id, true,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
+
+ return 0;
+}
+
+int atomisp_css_isys_two_stream_cfg(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height / 2;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
+ = IA_CSS_STREAM_ISYS_STREAM_0;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
+ IA_CSS_STREAM_FORMAT_USER_DEF1;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
+ IA_CSS_STREAM_FORMAT_USER_DEF2;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
+ return 0;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream1(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].valid = true;
+}
+
+void atomisp_css_isys_two_stream_cfg_update_stream2(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_stream_format input_format,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
+ width;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
+ height;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
+ = IA_CSS_STREAM_ISYS_STREAM_0;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
+ input_format;
+ s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
+}
+
+int atomisp_css_input_set_effective_resolution(
+ struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ unsigned int width, unsigned int height)
+{
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[stream_id].stream_config;
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ return 0;
+}
+
+void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
+ unsigned int dvs_w, unsigned int dvs_h)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.width = dvs_w;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.height = dvs_h;
+}
+
+void atomisp_css_input_set_two_pixels_per_clock(
+ struct atomisp_sub_device *asd,
+ bool two_ppc)
+{
+ int i;
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock == (two_ppc ? 2 : 1))
+ return;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.pixels_per_clock = (two_ppc ? 2 : 1);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[i] = true;
+}
+
+void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ unsigned int pipe;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ pipe = IA_CSS_PIPE_ID_VIDEO;
+ else
+ pipe = IA_CSS_PIPE_ID_PREVIEW;
+
+ stream_env->pipe_extra_configs[pipe].enable_raw_binning = enable;
+ stream_env->update_pipe[pipe] = true;
+ if (enable)
+ stream_env->pipe_configs[pipe].output_info[0].padded_width =
+ stream_env->stream_config.input_config.effective_res.width;
+}
+
+void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
+{
+ int i;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[i].enable_dz = enable;
+}
+
+void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_capture_mode mode)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ if (stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.mode == mode)
+ return;
+
+ stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ default_capture_config.mode = mode;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
+ enum atomisp_css_input_mode mode)
+{
+ int i;
+ struct atomisp_device *isp = asd->isp;
+ unsigned int size_mem_words;
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
+ asd->stream_env[i].stream_config.mode = mode;
+
+ if (isp->inputs[asd->input_curr].type == TEST_PATTERN) {
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_config;
+ s_config->mode = IA_CSS_INPUT_MODE_TPG;
+ s_config->source.tpg.mode = IA_CSS_TPG_MODE_CHECKERBOARD;
+ s_config->source.tpg.x_mask = (1 << 4) - 1;
+ s_config->source.tpg.x_delta = -2;
+ s_config->source.tpg.y_mask = (1 << 4) - 1;
+ s_config->source.tpg.y_delta = 3;
+ s_config->source.tpg.xy_mask = (1 << 8) - 1;
+ return;
+ }
+
+ if (mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ /*
+ * TODO: sensor needs to export the embedded_data_size_words
+ * information to atomisp for each setting.
+ * Here using a large safe value.
+ */
+ struct ia_css_stream_config *s_config =
+ &asd->stream_env[i].stream_config;
+
+ if (s_config->input_config.input_res.width == 0)
+ continue;
+
+ if (ia_css_mipi_frame_calculate_size(
+ s_config->input_config.input_res.width,
+ s_config->input_config.input_res.height,
+ s_config->input_config.format,
+ true,
+ 0x13000,
+ &size_mem_words) != IA_CSS_SUCCESS) {
+ if (intel_mid_identify_cpu() ==
+ INTEL_MID_CPU_CHIP_TANGIER)
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
+ else
+ size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
+ dev_warn(asd->isp->dev,
+ "ia_css_mipi_frame_calculate_size failed,"
+ "applying pre-defined MIPI buffer size %u.\n",
+ size_mem_words);
+ }
+ s_config->mipi_buffer_config.size_mem_words = size_mem_words;
+ s_config->mipi_buffer_config.nof_mipi_buffers = 2;
+ }
+}
+
+void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+
+ if (stream_env->stream_config.online == !!enable)
+ return;
+
+ stream_env->stream_config.online = !!enable;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+}
+
+void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
+ unsigned short stream_index, bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ int i;
+
+ if (stream_env->stream_config.online != !!enable) {
+ stream_env->stream_config.online = !!enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_video_enable_online(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO];
+ int i;
+
+ if (stream_env->stream_config.online != enable) {
+ stream_env->stream_config.online = enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_enable_continuous(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ int i;
+
+ /*
+ * To SOC camera, there is only one YUVPP pipe in any case
+ * including ZSL/SDV/continuous viewfinder, so always set
+ * stream_config.continuous to 0.
+ */
+ if (ATOMISP_USE_YUVPP(asd)) {
+ stream_env->stream_config.continuous = 0;
+ stream_env->stream_config.online = 1;
+ return;
+ }
+
+ if (stream_env->stream_config.continuous != !!enable) {
+ stream_env->stream_config.continuous = !!enable;
+ stream_env->stream_config.pack_raw_pixels = true;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+void atomisp_css_enable_cvf(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ int i;
+
+ if (stream_env->stream_config.disable_cont_viewfinder != !enable) {
+ stream_env->stream_config.disable_cont_viewfinder = !enable;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ stream_env->update_pipe[i] = true;
+ }
+}
+
+int atomisp_css_input_configure_port(
+ struct atomisp_sub_device *asd,
+ mipi_port_ID_t port,
+ unsigned int num_lanes,
+ unsigned int timeout,
+ unsigned int mipi_freq,
+ enum atomisp_css_stream_format metadata_format,
+ unsigned int metadata_width,
+ unsigned int metadata_height)
+{
+ int i;
+ struct atomisp_stream_env *stream_env;
+ /*
+ * Calculate rx_count as follows:
+ * Input: mipi_freq : CSI-2 bus frequency in Hz
+ * UI = 1 / (2 * mipi_freq) : period of one bit on the bus
+ * min = 85e-9 + 6 * UI : Limits for rx_count in seconds
+ * max = 145e-9 + 10 * UI
+ * rxcount0 = min / (4 / mipi_freq) : convert seconds to byte clocks
+ * rxcount = rxcount0 - 2 : adjust for better results
+ * The formula below is simplified version of the above with
+ * 10-bit fixed points for improved accuracy.
+ */
+ const unsigned int rxcount =
+ min(((mipi_freq / 46000) - 1280) >> 10, 0xffU) * 0x01010101U;
+
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ stream_env->stream_config.source.port.port = port;
+ stream_env->stream_config.source.port.num_lanes = num_lanes;
+ stream_env->stream_config.source.port.timeout = timeout;
+ if (mipi_freq)
+ stream_env->stream_config.source.port.rxcount = rxcount;
+ stream_env->stream_config.
+ metadata_config.data_type = metadata_format;
+ stream_env->stream_config.
+ metadata_config.resolution.width = metadata_width;
+ stream_env->stream_config.
+ metadata_config.resolution.height = metadata_height;
+ }
+
+ return 0;
+}
+
+int atomisp_css_frame_allocate(struct atomisp_css_frame **frame,
+ unsigned int width, unsigned int height,
+ enum atomisp_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ if (ia_css_frame_allocate(frame, width, height, format,
+ padded_width, raw_bit_depth) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_frame_allocate_from_info(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info)
+{
+ if (ia_css_frame_allocate_from_info(frame, info) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void atomisp_css_frame_free(struct atomisp_css_frame *frame)
+{
+ ia_css_frame_free(frame);
+}
+
+int atomisp_css_frame_map(struct atomisp_css_frame **frame,
+ const struct atomisp_css_frame_info *info,
+ const void *data, uint16_t attribute,
+ void *context)
+{
+ if (ia_css_frame_map(frame, info, data, attribute, context)
+ != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_set_black_frame(struct atomisp_sub_device *asd,
+ const struct atomisp_css_frame *raw_black_frame)
+{
+ if (sh_css_set_black_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ raw_black_frame) != IA_CSS_SUCCESS)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int atomisp_css_allocate_continuous_frames(bool init_time,
+ struct atomisp_sub_device *asd)
+{
+ if (ia_css_alloc_continuous_frame_remain(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream)
+ != IA_CSS_SUCCESS)
+ return -EINVAL;
+ return 0;
+}
+
+void atomisp_css_update_continuous_frames(struct atomisp_sub_device *asd)
+{
+ ia_css_update_continuous_frames(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream);
+}
+
+int atomisp_css_stop(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id, bool in_reset)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf;
+ struct atomisp_dis_buf *dis_buf;
+ struct atomisp_metadata_buf *md_buf;
+ unsigned long irqflags;
+ unsigned int i;
+
+ /* if is called in atomisp_reset(), force destroy stream */
+ if (__destroy_streams(asd, true))
+ dev_err(isp->dev, "destroy stream failed.\n");
+
+ /* if is called in atomisp_reset(), force destroy all pipes */
+ if (__destroy_pipes(asd, true))
+ dev_err(isp->dev, "destroy pipes failed.\n");
+
+ atomisp_init_raw_buffer_bitmap(asd);
+
+ /*
+ * SP can not be stop if other streams are in use
+ */
+ if (atomisp_streaming_count(isp) == 0)
+ ia_css_stop_sp();
+
+ if (!in_reset) {
+ struct atomisp_stream_env *stream_env;
+ int i, j;
+ for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
+ stream_env = &asd->stream_env[i];
+ for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
+ ia_css_pipe_config_defaults(
+ &stream_env->pipe_configs[j]);
+ ia_css_pipe_extra_config_defaults(
+ &stream_env->pipe_extra_configs[j]);
+ }
+ ia_css_stream_config_defaults(
+ &stream_env->stream_config);
+ }
+ atomisp_isp_parameters_clean_up(&asd->params.config);
+ asd->params.css_update_params_needed = false;
+ }
+
+ /* move stats buffers to free queue list */
+ while (!list_empty(&asd->s3a_stats_in_css)) {
+ s3a_buf = list_entry(asd->s3a_stats_in_css.next,
+ struct atomisp_s3a_buf, list);
+ list_del(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+ while (!list_empty(&asd->s3a_stats_ready)) {
+ s3a_buf = list_entry(asd->s3a_stats_ready.next,
+ struct atomisp_s3a_buf, list);
+ list_del(&s3a_buf->list);
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ while (!list_empty(&asd->dis_stats_in_css)) {
+ dis_buf = list_entry(asd->dis_stats_in_css.next,
+ struct atomisp_dis_buf, list);
+ list_del(&dis_buf->list);
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ }
+ asd->params.dis_proj_data_valid = false;
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ while (!list_empty(&asd->metadata_in_css[i])) {
+ md_buf = list_entry(asd->metadata_in_css[i].next,
+ struct atomisp_metadata_buf, list);
+ list_del(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ while (!list_empty(&asd->metadata_ready[i])) {
+ md_buf = list_entry(asd->metadata_ready[i].next,
+ struct atomisp_metadata_buf, list);
+ list_del(&md_buf->list);
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ }
+
+ atomisp_flush_params_queue(&asd->video_out_capture);
+ atomisp_flush_params_queue(&asd->video_out_vf);
+ atomisp_flush_params_queue(&asd->video_out_preview);
+ atomisp_flush_params_queue(&asd->video_out_video_capture);
+ atomisp_free_css_parameters(&asd->params.css_param);
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+ return 0;
+}
+
+int atomisp_css_continuous_set_num_raw_frames(
+ struct atomisp_sub_device *asd,
+ int num_frames)
+{
+ if (asd->enable_raw_buffer_lock->val) {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+ } else {
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf =
+ ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (asd->params.video_dis_en)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.init_num_cont_raw_buf +=
+ ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .stream_config.target_num_cont_raw_buf = num_frames;
+ return 0;
+}
+
+void atomisp_css_disable_vf_pp(struct atomisp_sub_device *asd,
+ bool disable)
+{
+ int i;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_extra_configs[i].disable_vf_pp = !!disable;
+}
+
+static enum ia_css_pipe_mode __pipe_id_to_pipe_mode(
+ struct atomisp_sub_device *asd,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_COPY:
+ /* Currently only YUVPP mode supports YUV420_Legacy format.
+ * Revert this when other pipe modes can support
+ * YUV420_Legacy format.
+ */
+ if (mipi_info && mipi_info->input_format ==
+ ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY)
+ return IA_CSS_PIPE_MODE_YUVPP;
+ return IA_CSS_PIPE_MODE_COPY;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ return IA_CSS_PIPE_MODE_CAPTURE;
+ case IA_CSS_PIPE_ID_VIDEO:
+ return IA_CSS_PIPE_MODE_VIDEO;
+ case IA_CSS_PIPE_ID_ACC:
+ return IA_CSS_PIPE_MODE_ACC;
+ case IA_CSS_PIPE_ID_YUVPP:
+ return IA_CSS_PIPE_MODE_YUVPP;
+ default:
+ WARN_ON(1);
+ return IA_CSS_PIPE_MODE_PREVIEW;
+ }
+
+}
+
+static void __configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ struct ia_css_stream_config *s_config = &stream_env->stream_config;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].output_info[0].padded_width = min_width;
+
+ /* isp binary 2.2 specific setting*/
+ if (width > s_config->input_config.effective_res.width ||
+ height > s_config->input_config.effective_res.height) {
+ s_config->input_config.effective_res.width = width;
+ s_config->input_config.effective_res.height = height;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static void __configure_video_preview_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum ia_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ struct ia_css_frame_info *css_output_info;
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ /*
+ * second_output will be as video main output in SDV mode
+ * with SOC camera. output will be as video main output in
+ * normal video mode.
+ */
+ if (asd->continuous_mode->val)
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ css_output_info->res.width = width;
+ css_output_info->res.height = height;
+ css_output_info->format = format;
+ css_output_info->padded_width = min_width;
+
+ /* isp binary 2.2 specific setting*/
+ if (width > stream_config->input_config.effective_res.width ||
+ height > stream_config->input_config.effective_res.height) {
+ stream_config->input_config.effective_res.width = width;
+ stream_config->input_config.effective_res.height = height;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+/*
+ * For CSS2.1, capture pipe uses capture_pp_in_res to configure yuv
+ * downscaling input resolution.
+ */
+static void __configure_capture_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+#define CEIL_DIV(a, b) ((b) ? ((a) + (b) - 1) / (b) : 0)
+
+ if (width == 0 && height == 0)
+ return;
+
+ if (width * 9 / 10 < pipe_configs->output_info[0].res.width ||
+ height * 9 / 10 < pipe_configs->output_info[0].res.height)
+ return;
+ /* here just copy the calculation in css */
+ hor_ds_factor = CEIL_DIV(width >> 1,
+ pipe_configs->output_info[0].res.width);
+ ver_ds_factor = CEIL_DIV(height >> 1,
+ pipe_configs->output_info[0].res.height);
+
+ if ((asd->isp->media_dev.hw_revision <
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) ||
+ IS_CHT) && hor_ds_factor != ver_ds_factor) {
+ dev_warn(asd->isp->dev,
+ "Cropping for capture due to FW limitation");
+ return;
+ }
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = true;
+
+ pipe_configs->capt_pp_in_res.width =
+ stream_config->input_config.effective_res.width;
+ pipe_configs->capt_pp_in_res.height =
+ stream_config->input_config.effective_res.height;
+
+ dev_dbg(isp->dev, "configuring pipe[%d]capture pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, preview pipe could support bayer downscaling, yuv decimation and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height, yuv_ds_in_width, yuv_ds_in_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *vf_pp_in_res =
+ &pipe_configs->vf_pp_in_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
+ /*
+ * BZ201033: YUV decimation factor of 4 causes couple of rightmost
+ * columns to be shaded. Remove this factor to work around the CSS bug.
+ * const unsigned int yuv_dec_fct[] = {4, 2};
+ */
+ const unsigned int yuv_dec_fct[] = { 2 };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+
+ /*
+ * The ISP could do bayer downscaling, yuv decimation and yuv
+ * downscaling:
+ * 1: Bayer Downscaling: between effective resolution and
+ * bayer_ds_res_out;
+ * 2: YUV Decimation: between bayer_ds_res_out and vf_pp_in_res;
+ * 3: YUV Downscaling: between vf_pp_in_res and final vf output
+ *
+ * Rule for Bayer Downscaling: support factor 2, 1.5 and 1.25
+ * Rule for YUV Decimation: support factor 2, 4
+ * Rule for YUV Downscaling: arbitary value below 2
+ *
+ * General rule of factor distribution among these stages:
+ * 1: try to do Bayer downscaling first if not in online mode.
+ * 2: try to do maximum of 2 for YUV downscaling
+ * 3: the remainling for YUV decimation
+ *
+ * Note:
+ * Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous ||
+ !pipe_extra_configs->enable_raw_binning) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ } else {
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < ARRAY_SIZE(bds_fct); i++) {
+ if (effective_res->width >= out_width *
+ bds_fct[i].numerator / bds_fct[i].denominator &&
+ effective_res->height >= out_height *
+ bds_fct[i].numerator / bds_fct[i].denominator) {
+ bayer_ds_out_res->width =
+ effective_res->width *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ bayer_ds_out_res->height =
+ effective_res->height *
+ bds_fct[i].denominator /
+ bds_fct[i].numerator;
+ break;
+ }
+ }
+ }
+ /*
+ * calculate YUV Decimation, YUV downscaling facor:
+ * YUV Downscaling factor must not exceed 2.
+ * YUV Decimation factor could be 2, 4.
+ */
+ /* first decide the yuv_ds input resolution */
+ if (bayer_ds_out_res->width == 0) {
+ yuv_ds_in_width = effective_res->width;
+ yuv_ds_in_height = effective_res->height;
+ } else {
+ yuv_ds_in_width = bayer_ds_out_res->width;
+ yuv_ds_in_height = bayer_ds_out_res->height;
+ }
+
+ vf_pp_in_res->width = yuv_ds_in_width;
+ vf_pp_in_res->height = yuv_ds_in_height;
+
+ /* find out the yuv decimation factor */
+ for (i = 0; i < ARRAY_SIZE(yuv_dec_fct); i++) {
+ if (yuv_ds_in_width >= out_width * yuv_dec_fct[i] &&
+ yuv_ds_in_height >= out_height * yuv_dec_fct[i]) {
+ vf_pp_in_res->width = yuv_ds_in_width / yuv_dec_fct[i];
+ vf_pp_in_res->height = yuv_ds_in_height / yuv_dec_fct[i];
+ break;
+ }
+ }
+
+ if (vf_pp_in_res->width == out_width &&
+ vf_pp_in_res->height == out_height) {
+ pipe_extra_configs->enable_yuv_ds = false;
+ vf_pp_in_res->width = 0;
+ vf_pp_in_res->height = 0;
+ } else {
+ pipe_extra_configs->enable_yuv_ds = true;
+ }
+
+ dev_dbg(isp->dev, "configuring pipe[%d]preview pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+/*
+ * For CSS2.1, offline video pipe could support bayer decimation, and
+ * yuv downscaling, which needs addtional configurations.
+ */
+static void __configure_video_pp_input(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ int out_width, out_height;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_stream_config *stream_config = &stream_env->stream_config;
+ struct ia_css_pipe_config *pipe_configs =
+ &stream_env->pipe_configs[pipe_id];
+ struct ia_css_pipe_extra_config *pipe_extra_configs =
+ &stream_env->pipe_extra_configs[pipe_id];
+ struct ia_css_resolution *bayer_ds_out_res =
+ &pipe_configs->bayer_ds_out_res;
+ struct ia_css_resolution *effective_res =
+ &stream_config->input_config.effective_res;
+
+ const struct bayer_ds_factor bds_factors[] = {
+ {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2} };
+ unsigned int i;
+
+ if (width == 0 && height == 0)
+ return;
+
+ pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ pipe_extra_configs->enable_yuv_ds = false;
+
+ /*
+ * If DVS is enabled, video binary will take care the dvs envelope
+ * and usually the bayer_ds_out_res should be larger than 120% of
+ * destination resolution, the extra 20% will be cropped as DVS
+ * envelope. But, if the bayer_ds_out_res is less than 120% of the
+ * destination. The ISP can still work, but DVS quality is not good.
+ */
+ /* taking at least 10% as envelope */
+ if (asd->params.video_dis_en) {
+ out_width = pipe_configs->output_info[0].res.width * 110 / 100;
+ out_height = pipe_configs->output_info[0].res.height * 110 / 100;
+ } else {
+ out_width = pipe_configs->output_info[0].res.width;
+ out_height = pipe_configs->output_info[0].res.height;
+ }
+
+ /*
+ * calculate bayer decimate factor:
+ * 1: only 1.5, 2, 4 and 8 get supported
+ * 2: Do not configure bayer_ds_out_res if:
+ * online == 1 or continuous == 0 or raw_binning = 0
+ */
+ if (stream_config->online || !stream_config->continuous) {
+ bayer_ds_out_res->width = 0;
+ bayer_ds_out_res->height = 0;
+ goto done;
+ }
+
+ pipe_extra_configs->enable_raw_binning = true;
+ bayer_ds_out_res->width = effective_res->width;
+ bayer_ds_out_res->height = effective_res->height;
+
+ for (i = 0; i < sizeof(bds_factors) / sizeof(struct bayer_ds_factor);
+ i++) {
+ if (effective_res->width >= out_width *
+ bds_factors[i].numerator / bds_factors[i].denominator &&
+ effective_res->height >= out_height *
+ bds_factors[i].numerator / bds_factors[i].denominator) {
+ bayer_ds_out_res->width = effective_res->width *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ bayer_ds_out_res->height = effective_res->height *
+ bds_factors[i].denominator /
+ bds_factors[i].numerator;
+ break;
+ }
+ }
+
+ /*
+ * DVS is cropped from BDS output, so we do not really need to set the
+ * envelope to 20% of output resolution here. always set it to 12x12
+ * per firmware requirement.
+ */
+ pipe_configs->dvs_envelope.width = 12;
+ pipe_configs->dvs_envelope.height = 12;
+
+done:
+ if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
+ stream_config->left_padding = -1;
+ else
+ stream_config->left_padding = 12;
+ dev_dbg(isp->dev, "configuring pipe[%d]video pp input w=%d.h=%d.\n",
+ pipe_id, width, height);
+}
+
+static void __configure_vf_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
+ min_width;
+ dev_dbg(isp->dev,
+ "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static void __configure_video_vf_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_frame_info *css_output_info;
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ /*
+ * second_vf_output will be as video viewfinder in SDV mode
+ * with SOC camera. vf_output will be as video viewfinder in
+ * normal video mode.
+ */
+ if (asd->continuous_mode->val)
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ css_output_info = &stream_env->pipe_configs[pipe_id].
+ vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ css_output_info->res.width = width;
+ css_output_info->res.height = height;
+ css_output_info->format = format;
+ css_output_info->padded_width = min_width;
+ dev_dbg(isp->dev,
+ "configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
+ pipe_id, width, height, format);
+}
+
+static int __get_frame_info(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info,
+ enum frame_info_type type,
+ enum ia_css_pipe_id pipe_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ enum ia_css_err ret;
+ struct ia_css_pipe_info p_info;
+
+ /* FIXME! No need to destroy/recreate all streams */
+ if (__destroy_streams(asd, true))
+ dev_warn(isp->dev, "destroy stream failed.\n");
+
+ if (__destroy_pipes(asd, true))
+ dev_warn(isp->dev, "destroy pipe failed.\n");
+
+ if (__create_pipes(asd))
+ return -EINVAL;
+
+ if (__create_streams(asd))
+ goto stream_err;
+
+ ret = ia_css_pipe_get_info(
+ asd->stream_env[stream_index]
+ .pipes[pipe_id], &p_info);
+ if (ret == IA_CSS_SUCCESS) {
+ switch (type) {
+ case ATOMISP_CSS_VF_FRAME:
+ *info = p_info.vf_output_info[0];
+ dev_dbg(isp->dev, "getting vf frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_VF_FRAME:
+ *info = p_info.vf_output_info[1];
+ dev_dbg(isp->dev, "getting second vf frame info.\n");
+ break;
+ case ATOMISP_CSS_OUTPUT_FRAME:
+ *info = p_info.output_info[0];
+ dev_dbg(isp->dev, "getting main frame info.\n");
+ break;
+ case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
+ *info = p_info.output_info[1];
+ dev_dbg(isp->dev, "getting second main frame info.\n");
+ break;
+ case ATOMISP_CSS_RAW_FRAME:
+ *info = p_info.raw_output_info;
+ dev_dbg(isp->dev, "getting raw frame info.\n");
+ }
+ dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
+ info->res.width, info->res.height, p_info.num_invalid_frames);
+ return 0;
+ }
+
+stream_err:
+ __destroy_pipes(asd, true);
+ return -EINVAL;
+}
+
+unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd,
+ uint16_t source_pad)
+{
+ struct atomisp_device *isp = asd->isp;
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ return IA_CSS_PIPE_ID_YUVPP;
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ if (asd->yuvpp_mode)
+ return IA_CSS_PIPE_ID_YUVPP;
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO
+ || asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return IA_CSS_PIPE_ID_VIDEO;
+ else
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ if (!atomisp_is_mbuscode_raw(
+ asd->fmt[asd->capture_pad].fmt.code))
+ return IA_CSS_PIPE_ID_CAPTURE;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ if (asd->yuvpp_mode)
+ return IA_CSS_PIPE_ID_YUVPP;
+ if (asd->copy_mode)
+ return IA_CSS_PIPE_ID_COPY;
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return IA_CSS_PIPE_ID_VIDEO;
+ else
+ return IA_CSS_PIPE_ID_PREVIEW;
+ }
+ dev_warn(isp->dev,
+ "invalid source pad:%d, return default preview pipe index.\n",
+ source_pad);
+ return IA_CSS_PIPE_ID_PREVIEW;
+}
+
+int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
+ uint16_t source_pad,
+ struct atomisp_css_frame_info *frame_info)
+{
+ struct ia_css_pipe_info info;
+ int pipe_index = atomisp_get_pipe_index(asd, source_pad);
+ int stream_index;
+ struct atomisp_device *isp = asd->isp;
+
+ if (ATOMISP_SOC_CAMERA(asd))
+ stream_index = atomisp_source_pad_to_stream_id(asd, source_pad);
+ else {
+ stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
+ ATOMISP_INPUT_STREAM_VIDEO :
+ atomisp_source_pad_to_stream_id(asd, source_pad);
+ }
+
+ if (IA_CSS_SUCCESS != ia_css_pipe_get_info(asd->stream_env[stream_index]
+ .pipes[pipe_index], &info)) {
+ dev_err(isp->dev, "ia_css_pipe_get_info FAILED");
+ return -EINVAL;
+ }
+
+ switch (source_pad) {
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ *frame_info = info.output_info[0];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info = info.
+ output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info = info.
+ output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ if (stream_index == ATOMISP_INPUT_STREAM_POSTVIEW)
+ *frame_info = info.output_info[0];
+ else
+ *frame_info = info.vf_output_info[0];
+ break;
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ (pipe_index == IA_CSS_PIPE_ID_VIDEO ||
+ pipe_index == IA_CSS_PIPE_ID_YUVPP))
+ if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info = info.
+ vf_output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info = info.
+ vf_output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+ else if (ATOMISP_USE_YUVPP(asd) && asd->continuous_mode->val)
+ *frame_info =
+ info.output_info[ATOMISP_CSS_OUTPUT_SECOND_INDEX];
+ else
+ *frame_info =
+ info.output_info[ATOMISP_CSS_OUTPUT_DEFAULT_INDEX];
+
+ break;
+ default:
+ frame_info = NULL;
+ break;
+ }
+ return frame_info ? 0 : -EINVAL;
+}
+
+int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format)
+{
+ asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_COPY].
+ default_capture_config.mode =
+ CSS_CAPTURE_MODE_RAW;
+
+ __configure_output(asd, stream_index, width, height, padded_width,
+ format, IA_CSS_PIPE_ID_COPY);
+ return 0;
+}
+
+int atomisp_css_yuvpp_configure_output(struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int padded_width,
+ enum atomisp_css_frame_format format)
+{
+ asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_YUVPP].
+ default_capture_config.mode =
+ CSS_CAPTURE_MODE_RAW;
+
+ __configure_output(asd, stream_index, width, height, padded_width,
+ format, IA_CSS_PIPE_ID_YUVPP);
+ return 0;
+}
+
+int atomisp_css_yuvpp_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[stream_index];
+ enum ia_css_pipe_id pipe_id = IA_CSS_PIPE_ID_YUVPP;
+
+ stream_env->pipe_configs[pipe_id].mode =
+ __pipe_id_to_pipe_mode(asd, pipe_id);
+ stream_env->update_pipe[pipe_id] = true;
+
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
+ stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
+ min_width;
+ return 0;
+}
+
+int atomisp_css_yuvpp_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_YUVPP);
+}
+
+int atomisp_css_yuvpp_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_YUVPP);
+}
+
+int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_PREVIEW);
+ return 0;
+}
+
+int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, pipe_id);
+ return 0;
+}
+
+int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_preview_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
+ min_width, format, IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_video_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ /*
+ * to SOC camera, video will use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ __configure_video_vf_output(asd, width, height, min_width, format,
+ IA_CSS_PIPE_ID_YUVPP);
+ else
+ __configure_vf_output(asd, width, height, min_width, format,
+ IA_CSS_PIPE_ID_VIDEO);
+ return 0;
+}
+
+int atomisp_css_capture_configure_viewfinder(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height,
+ unsigned int min_width,
+ enum atomisp_css_frame_format format)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ /*
+ * to SOC camera, video will use yuvpp pipe.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ __configure_vf_output(asd, width, height, min_width, format,
+ pipe_id);
+ return 0;
+}
+
+int atomisp_css_video_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_VF_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_VF_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_capture_get_viewfinder_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_VF_FRAME, pipe_id);
+}
+
+int atomisp_css_capture_get_output_raw_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ if (ATOMISP_USE_YUVPP(asd))
+ return 0;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_RAW_FRAME, IA_CSS_PIPE_ID_CAPTURE);
+}
+
+int atomisp_css_copy_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ unsigned int stream_index,
+ struct atomisp_css_frame_info *info)
+{
+ return __get_frame_info(asd, stream_index, info,
+ ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_COPY);
+}
+
+int atomisp_css_preview_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_PREVIEW;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_capture_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+
+ if (ATOMISP_USE_YUVPP(asd))
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ else
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ ATOMISP_CSS_OUTPUT_FRAME, pipe_id);
+}
+
+int atomisp_css_video_get_output_frame_info(
+ struct atomisp_sub_device *asd,
+ struct atomisp_css_frame_info *info)
+{
+ enum ia_css_pipe_id pipe_id;
+ enum frame_info_type frame_type = ATOMISP_CSS_OUTPUT_FRAME;
+
+ if (ATOMISP_USE_YUVPP(asd)) {
+ pipe_id = IA_CSS_PIPE_ID_YUVPP;
+ if (asd->continuous_mode->val)
+ frame_type = ATOMISP_CSS_SECOND_OUTPUT_FRAME;
+ } else {
+ pipe_id = IA_CSS_PIPE_ID_VIDEO;
+ }
+
+ return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
+ frame_type, pipe_id);
+}
+
+int atomisp_css_preview_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ __configure_preview_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_PREVIEW);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_capture_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_video_configure_pp_input(
+ struct atomisp_sub_device *asd,
+ unsigned int width, unsigned int height)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ __configure_video_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_VIDEO);
+
+ if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
+ capt_pp_in_res.width)
+ __configure_capture_pp_input(asd, width, height,
+ ATOMISP_USE_YUVPP(asd) ?
+ IA_CSS_PIPE_ID_YUVPP : IA_CSS_PIPE_ID_CAPTURE);
+ return 0;
+}
+
+int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
+ int num_captures, unsigned int skip, int offset)
+{
+ enum ia_css_err ret;
+
+#ifdef ISP2401
+ dev_dbg(asd->isp->dev, "%s num_capture:%d skip:%d offset:%d\n",
+ __func__, num_captures, skip, offset);
+#endif
+ ret = ia_css_stream_capture(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ num_captures, skip, offset);
+ if (ret != IA_CSS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id)
+{
+ enum ia_css_err ret;
+
+ ret = ia_css_stream_capture_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == IA_CSS_ERR_QUEUE_IS_FULL) {
+ /* capture cmd queue is full */
+ return -EBUSY;
+ } else if (ret != IA_CSS_SUCCESS) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id)
+{
+ enum ia_css_err ret;
+
+ ret = ia_css_unlock_raw_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ exp_id);
+ if (ret == IA_CSS_ERR_QUEUE_IS_FULL)
+ return -EAGAIN;
+ else if (ret != IA_CSS_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
+ bool enable)
+{
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
+ .default_capture_config.enable_xnr = enable;
+ asd->params.capture_config.enable_xnr = enable;
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
+
+ return 0;
+}
+
+void atomisp_css_send_input_frame(struct atomisp_sub_device *asd,
+ unsigned short *data, unsigned int width,
+ unsigned int height)
+{
+ ia_css_stream_send_input_frame(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ data, width, height);
+}
+
+bool atomisp_css_isp_has_started(void)
+{
+ return ia_css_isp_has_started();
+}
+
+void atomisp_css_request_flash(struct atomisp_sub_device *asd)
+{
+ ia_css_stream_request_flash(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream);
+}
+
+void atomisp_css_set_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_wb_config *wb_config)
+{
+ asd->params.config.wb_config = wb_config;
+}
+
+void atomisp_css_set_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ob_config *ob_config)
+{
+ asd->params.config.ob_config = ob_config;
+}
+
+void atomisp_css_set_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dp_config *dp_config)
+{
+ asd->params.config.dp_config = dp_config;
+}
+
+void atomisp_css_set_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_de_config *de_config)
+{
+ asd->params.config.de_config = de_config;
+}
+
+void atomisp_css_set_dz_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_dz_config *dz_config)
+{
+ asd->params.config.dz_config = dz_config;
+}
+
+void atomisp_css_set_default_de_config(struct atomisp_sub_device *asd)
+{
+ asd->params.config.de_config = NULL;
+}
+
+void atomisp_css_set_ce_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ce_config *ce_config)
+{
+ asd->params.config.ce_config = ce_config;
+}
+
+void atomisp_css_set_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_nr_config *nr_config)
+{
+ asd->params.config.nr_config = nr_config;
+}
+
+void atomisp_css_set_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ee_config *ee_config)
+{
+ asd->params.config.ee_config = ee_config;
+}
+
+void atomisp_css_set_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_tnr_config *tnr_config)
+{
+ asd->params.config.tnr_config = tnr_config;
+}
+
+void atomisp_css_set_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *cc_config)
+{
+ asd->params.config.cc_config = cc_config;
+}
+
+void atomisp_css_set_macc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_table *macc_table)
+{
+ asd->params.config.macc_table = macc_table;
+}
+
+void atomisp_css_set_macc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_config *macc_config)
+{
+ asd->params.config.macc_config = macc_config;
+}
+
+void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ecd_config *ecd_config)
+{
+ asd->params.config.ecd_config = ecd_config;
+}
+
+void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ynr_config *ynr_config)
+{
+ asd->params.config.ynr_config = ynr_config;
+}
+
+void atomisp_css_set_fc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_fc_config *fc_config)
+{
+ asd->params.config.fc_config = fc_config;
+}
+
+void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_config *ctc_config)
+{
+ asd->params.config.ctc_config = ctc_config;
+}
+
+void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cnr_config *cnr_config)
+{
+ asd->params.config.cnr_config = cnr_config;
+}
+
+void atomisp_css_set_aa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_aa_config *aa_config)
+{
+ asd->params.config.aa_config = aa_config;
+}
+
+void atomisp_css_set_baa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_baa_config *baa_config)
+{
+ asd->params.config.baa_config = baa_config;
+}
+
+void atomisp_css_set_anr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_config *anr_config)
+{
+ asd->params.config.anr_config = anr_config;
+}
+
+void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_config *xnr_config)
+{
+ asd->params.config.xnr_config = xnr_config;
+}
+
+void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *yuv2rgb_cc_config)
+{
+ asd->params.config.yuv2rgb_cc_config = yuv2rgb_cc_config;
+}
+
+void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *rgb2yuv_cc_config)
+{
+ asd->params.config.rgb2yuv_cc_config = rgb2yuv_cc_config;
+}
+
+void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_table *xnr_table)
+{
+ asd->params.config.xnr_table = xnr_table;
+}
+
+void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *r_gamma_table)
+{
+ asd->params.config.r_gamma_table = r_gamma_table;
+}
+
+void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *g_gamma_table)
+{
+ asd->params.config.g_gamma_table = g_gamma_table;
+}
+
+void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *b_gamma_table)
+{
+ asd->params.config.b_gamma_table = b_gamma_table;
+}
+
+void atomisp_css_set_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_gamma_table *gamma_table)
+{
+ asd->params.config.gamma_table = gamma_table;
+}
+
+void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_table *ctc_table)
+{
+ int i;
+ uint16_t *vamem_ptr = ctc_table->data.vamem_1;
+ int data_size = IA_CSS_VAMEM_1_CTC_TABLE_SIZE;
+ bool valid = false;
+
+ /* workaround: if ctc_table is all 0, do not apply it */
+ if (ctc_table->vamem_type == IA_CSS_VAMEM_TYPE_2) {
+ vamem_ptr = ctc_table->data.vamem_2;
+ data_size = IA_CSS_VAMEM_2_CTC_TABLE_SIZE;
+ }
+
+ for (i = 0; i < data_size; i++) {
+ if (*(vamem_ptr + i)) {
+ valid = true;
+ break;
+ }
+ }
+
+ if (valid)
+ asd->params.config.ctc_table = ctc_table;
+ else
+ dev_warn(asd->isp->dev, "Bypass the invalid ctc_table.\n");
+}
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_thres *anr_thres)
+{
+ asd->params.config.anr_thres = anr_thres;
+}
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct atomisp_css_dvs_6axis *dvs_6axis)
+{
+ asd->params.config.dvs_6axis_config = dvs_6axis;
+}
+
+void atomisp_css_set_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_gc_config *gc_config)
+{
+ asd->params.config.gc_config = gc_config;
+}
+
+void atomisp_css_set_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_3a_config *s3a_config)
+{
+ asd->params.config.s3a_config = s3a_config;
+}
+
+void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
+ struct atomisp_dis_vector *vector)
+{
+ if (!asd->params.config.motion_vector)
+ asd->params.config.motion_vector = &asd->params.css_param.motion_vector;
+
+ memset(asd->params.config.motion_vector,
+ 0, sizeof(struct ia_css_vector));
+ asd->params.css_param.motion_vector.x = vector->x;
+ asd->params.css_param.motion_vector.y = vector->y;
+}
+
+static int atomisp_compare_dvs_grid(struct atomisp_sub_device *asd,
+ struct atomisp_dvs_grid_info *atomgrid)
+{
+ struct atomisp_css_dvs_grid_info *cur =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+
+ if (!cur) {
+ dev_err(asd->isp->dev, "dvs grid not available!\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(*cur) != sizeof(*atomgrid)) {
+ dev_err(asd->isp->dev, "dvs grid mis-match!\n");
+ return -EINVAL;
+ }
+
+ if (!cur->enable) {
+ dev_err(asd->isp->dev, "dvs not enabled!\n");
+ return -EINVAL;
+ }
+
+ return memcmp(atomgrid, cur, sizeof(*cur));
+}
+
+void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
+ struct ia_css_dvs2_coefficients *coefs)
+{
+ asd->params.config.dvs2_coefs = coefs;
+}
+
+int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
+ struct atomisp_dis_coefficients *coefs)
+{
+ if (atomisp_compare_dvs_grid(asd, &coefs->grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ if (coefs->hor_coefs.odd_real == NULL ||
+ coefs->hor_coefs.odd_imag == NULL ||
+ coefs->hor_coefs.even_real == NULL ||
+ coefs->hor_coefs.even_imag == NULL ||
+ coefs->ver_coefs.odd_real == NULL ||
+ coefs->ver_coefs.odd_imag == NULL ||
+ coefs->ver_coefs.even_real == NULL ||
+ coefs->ver_coefs.even_imag == NULL ||
+ asd->params.css_param.dvs2_coeff->hor_coefs.odd_real == NULL ||
+ asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag == NULL ||
+ asd->params.css_param.dvs2_coeff->hor_coefs.even_real == NULL ||
+ asd->params.css_param.dvs2_coeff->hor_coefs.even_imag == NULL ||
+ asd->params.css_param.dvs2_coeff->ver_coefs.odd_real == NULL ||
+ asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag == NULL ||
+ asd->params.css_param.dvs2_coeff->ver_coefs.even_real == NULL ||
+ asd->params.css_param.dvs2_coeff->ver_coefs.even_imag == NULL)
+ return -EINVAL;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_real,
+ coefs->hor_coefs.odd_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag,
+ coefs->hor_coefs.odd_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_real,
+ coefs->hor_coefs.even_real, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_imag,
+ coefs->hor_coefs.even_imag, asd->params.dvs_hor_coef_bytes))
+ return -EFAULT;
+
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_real,
+ coefs->ver_coefs.odd_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag,
+ coefs->ver_coefs.odd_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_real,
+ coefs->ver_coefs.even_real, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+ if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_imag,
+ coefs->ver_coefs.even_imag, asd->params.dvs_ver_coef_bytes))
+ return -EFAULT;
+
+ asd->params.css_param.update_flag.dvs2_coefs =
+ (struct atomisp_dvs2_coefficients *)
+ asd->params.css_param.dvs2_coeff;
+ /* FIXME! */
+/* asd->params.dis_proj_data_valid = false; */
+ asd->params.css_update_params_needed = true;
+
+ return 0;
+}
+
+void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int zoom)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (zoom == asd->params.css_param.dz_config.dx &&
+ zoom == asd->params.css_param.dz_config.dy) {
+ dev_dbg(isp->dev, "same zoom scale. skipped.\n");
+ return;
+ }
+
+ memset(&asd->params.css_param.dz_config, 0,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.dz_config.dx = zoom;
+ asd->params.css_param.dz_config.dy = zoom;
+
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *) &asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+}
+
+void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_formats_config *formats_config)
+{
+ asd->params.config.formats_config = formats_config;
+}
+
+int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
+ struct atomisp_wb_config *config)
+{
+ struct atomisp_css_wb_config wb_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&wb_config, 0, sizeof(struct atomisp_css_wb_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.wb_config = &wb_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &wb_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
+ struct atomisp_ob_config *config)
+{
+ struct atomisp_css_ob_config ob_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ob_config, 0, sizeof(struct atomisp_css_ob_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ob_config = &ob_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ob_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
+ struct atomisp_dp_config *config)
+{
+ struct atomisp_css_dp_config dp_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dp_config, 0, sizeof(struct atomisp_css_dp_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dp_config = &dp_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &dp_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
+ struct atomisp_de_config *config)
+{
+ struct atomisp_css_de_config de_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&de_config, 0, sizeof(struct atomisp_css_de_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.de_config = &de_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &de_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
+ struct atomisp_nr_config *config)
+{
+ struct atomisp_css_nr_config nr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&nr_config, 0, sizeof(struct atomisp_css_nr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+
+ isp_config.nr_config = &nr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &nr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
+ struct atomisp_ee_config *config)
+{
+ struct atomisp_css_ee_config ee_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&ee_config, 0, sizeof(struct atomisp_css_ee_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ee_config = &ee_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &ee_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_tnr_config *config)
+{
+ struct atomisp_css_tnr_config tnr_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&tnr_config, 0, sizeof(struct atomisp_css_tnr_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.tnr_config = &tnr_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, &tnr_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
+ struct atomisp_ctc_table *config)
+{
+ struct atomisp_css_ctc_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct atomisp_css_ctc_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.ctc_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_gamma_table *config)
+{
+ struct atomisp_css_gamma_table *tab;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ tab = vzalloc(sizeof(struct atomisp_css_gamma_table));
+ if (!tab)
+ return -ENOMEM;
+
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gamma_table = tab;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ memcpy(config, tab, sizeof(*tab));
+ vfree(tab);
+
+ return 0;
+}
+
+int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
+ struct atomisp_gc_config *config)
+{
+ struct atomisp_css_gc_config gc_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&gc_config, 0, sizeof(struct atomisp_css_gc_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.gc_config = &gc_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get gamma correction params from current setup */
+ memcpy(config, &gc_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
+ struct atomisp_3a_config *config)
+{
+ struct atomisp_css_3a_config s3a_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&s3a_config, 0, sizeof(struct atomisp_css_3a_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.s3a_config = &s3a_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get white balance from current setup */
+ memcpy(config, &s3a_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
+ struct atomisp_formats_config *config)
+{
+ struct atomisp_css_formats_config formats_config;
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&formats_config, 0, sizeof(formats_config));
+ memset(&isp_config, 0, sizeof(isp_config));
+ isp_config.formats_config = &formats_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ /* Get narrow gamma from current setup */
+ memcpy(config, &formats_config, sizeof(*config));
+
+ return 0;
+}
+
+int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
+ unsigned int *zoom)
+{
+ struct ia_css_dz_config dz_config; /**< Digital Zoom */
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev, "%s called after streamoff, skipping.\n",
+ __func__);
+ return -EINVAL;
+ }
+ memset(&dz_config, 0, sizeof(struct ia_css_dz_config));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.dz_config = &dz_config;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+ *zoom = dz_config.dx;
+
+ return 0;
+}
+
+
+/*
+ * Function to set/get image stablization statistics
+ */
+int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
+ struct atomisp_dis_statistics *stats)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long flags;
+
+ if (asd->params.dvs_stat->hor_prod.odd_real == NULL ||
+ asd->params.dvs_stat->hor_prod.odd_imag == NULL ||
+ asd->params.dvs_stat->hor_prod.even_real == NULL ||
+ asd->params.dvs_stat->hor_prod.even_imag == NULL ||
+ asd->params.dvs_stat->ver_prod.odd_real == NULL ||
+ asd->params.dvs_stat->ver_prod.odd_imag == NULL ||
+ asd->params.dvs_stat->ver_prod.even_real == NULL ||
+ asd->params.dvs_stat->ver_prod.even_imag == NULL)
+ return -EINVAL;
+
+ /* isp needs to be streaming to get DIS statistics */
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
+ /* If the grid info in the argument differs from the current
+ grid info, we tell the caller to reset the grid size and
+ try again. */
+ return -EAGAIN;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ if (!asd->params.dis_proj_data_valid || list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+ dev_err(isp->dev, "dis statistics is not valid.\n");
+ return -EAGAIN;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.next,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (dis_buf->dvs_map)
+ ia_css_translate_dvs2_statistics(
+ asd->params.dvs_stat, dis_buf->dvs_map);
+ else
+ ia_css_get_dvs2_statistics(asd->params.dvs_stat,
+ dis_buf->dis_data);
+ stats->exp_id = dis_buf->dis_data->exp_id;
+
+ spin_lock_irqsave(&asd->dis_stats_lock, flags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
+
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_real,
+ asd->params.dvs_stat->ver_prod.odd_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.odd_imag,
+ asd->params.dvs_stat->ver_prod.odd_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_real,
+ asd->params.dvs_stat->ver_prod.even_real,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.ver_prod.even_imag,
+ asd->params.dvs_stat->ver_prod.even_imag,
+ asd->params.dvs_ver_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_real,
+ asd->params.dvs_stat->hor_prod.odd_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.odd_imag,
+ asd->params.dvs_stat->hor_prod.odd_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_real,
+ asd->params.dvs_stat->hor_prod.even_real,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+ if (copy_to_user(stats->dvs2_stat.hor_prod.even_imag,
+ asd->params.dvs_stat->hor_prod.even_imag,
+ asd->params.dvs_hor_proj_bytes))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct atomisp_css_shading_table *atomisp_css_shading_table_alloc(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_shading_table_alloc(width, height);
+}
+
+void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_shading_table *table)
+{
+ asd->params.config.shading_table = table;
+}
+
+void atomisp_css_shading_table_free(struct atomisp_css_shading_table *table)
+{
+ ia_css_shading_table_free(table);
+}
+
+struct atomisp_css_morph_table *atomisp_css_morph_table_allocate(
+ unsigned int width, unsigned int height)
+{
+ return ia_css_morph_table_allocate(width, height);
+}
+
+void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table)
+{
+ asd->params.config.morph_table = table;
+}
+
+void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_morph_table *table)
+{
+ struct ia_css_isp_config isp_config;
+ struct atomisp_device *isp = asd->isp;
+
+ if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
+ dev_err(isp->dev,
+ "%s called after streamoff, skipping.\n", __func__);
+ return;
+ }
+ memset(table, 0, sizeof(struct atomisp_css_morph_table));
+ memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
+ isp_config.morph_table = table;
+ ia_css_stream_get_isp_config(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ &isp_config);
+}
+
+void atomisp_css_morph_table_free(struct atomisp_css_morph_table *table)
+{
+ ia_css_morph_table_free(table);
+}
+
+void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp,
+ unsigned int overlap)
+{
+ /* CSS 2.0 doesn't support this API. */
+ dev_dbg(isp->dev, "set cont prev start time is not supported.\n");
+ return;
+}
+
+void atomisp_css_acc_done(struct atomisp_sub_device *asd)
+{
+ complete(&asd->acc.acc_done);
+}
+
+int atomisp_css_wait_acc_finish(struct atomisp_sub_device *asd)
+{
+ int ret = 0;
+ struct atomisp_device *isp = asd->isp;
+
+ /* Unlock the isp mutex taken in IOCTL handler before sleeping! */
+ rt_mutex_unlock(&isp->mutex);
+ if (wait_for_completion_interruptible_timeout(&asd->acc.acc_done,
+ ATOMISP_ISP_TIMEOUT_DURATION) == 0) {
+ dev_err(isp->dev, "<%s: completion timeout\n", __func__);
+ atomisp_css_debug_dump_sp_sw_debug_info();
+ atomisp_css_debug_dump_debug_info(__func__);
+ ret = -EIO;
+ }
+ rt_mutex_lock(&isp->mutex);
+
+ return ret;
+}
+
+/* Set the ACC binary arguments */
+int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw)
+{
+ unsigned int mem;
+
+ for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) {
+ if (acc_fw->args[mem].length == 0)
+ continue;
+
+ ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers,
+ IA_CSS_PARAM_CLASS_PARAM, mem,
+ acc_fw->args[mem].css_ptr,
+ acc_fw->args[mem].length);
+ }
+
+ return 0;
+}
+
+/* Load acc binary extension */
+int atomisp_css_load_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id,
+ unsigned int type)
+{
+ struct atomisp_css_fw_info **hd;
+
+ fw->next = NULL;
+ hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[pipe_id].acc_extension);
+ while (*hd)
+ hd = &(*hd)->next;
+ *hd = fw;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[pipe_id] = true;
+ return 0;
+}
+
+/* Unload acc binary extension */
+void atomisp_css_unload_acc_extension(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ enum atomisp_css_pipe_id pipe_id)
+{
+ struct atomisp_css_fw_info **hd;
+
+ hd = &(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[pipe_id].acc_extension);
+ while (*hd && *hd != fw)
+ hd = &(*hd)->next;
+ if (!*hd) {
+ dev_err(asd->isp->dev, "did not find acc fw for removal\n");
+ return;
+ }
+ *hd = fw->next;
+ fw->next = NULL;
+
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .update_pipe[pipe_id] = true;
+}
+
+int atomisp_css_create_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct ia_css_pipe_config *pipe_config;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+
+ if (stream_env->acc_stream) {
+ if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
+ if (ia_css_stream_stop(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "stop acc_stream failed.\n");
+ return -EBUSY;
+ }
+ }
+
+ if (ia_css_stream_destroy(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "destroy acc_stream failed.\n");
+ return -EBUSY;
+ }
+ stream_env->acc_stream = NULL;
+ }
+
+ pipe_config = &stream_env->pipe_configs[CSS_PIPE_ID_ACC];
+ ia_css_pipe_config_defaults(pipe_config);
+ asd->acc.acc_stages = kzalloc(MAX_ACC_STAGES *
+ sizeof(void *), GFP_KERNEL);
+ if (!asd->acc.acc_stages)
+ return -ENOMEM;
+ pipe_config->acc_stages = asd->acc.acc_stages;
+ pipe_config->mode = IA_CSS_PIPE_MODE_ACC;
+ pipe_config->num_acc_stages = 0;
+
+ /*
+ * We delay the ACC pipeline creation to atomisp_css_start_acc_pipe,
+ * because pipe configuration will soon be changed by
+ * atomisp_css_load_acc_binary()
+ */
+ return 0;
+}
+
+int atomisp_css_start_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ struct ia_css_pipe_config *pipe_config =
+ &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC];
+
+ if (ia_css_pipe_create(pipe_config,
+ &stream_env->pipes[IA_CSS_PIPE_ID_ACC]) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "%s: ia_css_pipe_create failed\n",
+ __func__);
+ return -EBADE;
+ }
+
+ memset(&stream_env->acc_stream_config, 0,
+ sizeof(struct ia_css_stream_config));
+ if (ia_css_stream_create(&stream_env->acc_stream_config, 1,
+ &stream_env->pipes[IA_CSS_PIPE_ID_ACC],
+ &stream_env->acc_stream) != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "%s: create acc_stream error.\n", __func__);
+ return -EINVAL;
+ }
+ stream_env->acc_stream_state = CSS_STREAM_CREATED;
+
+ init_completion(&asd->acc.acc_done);
+ asd->acc.pipeline = stream_env->pipes[IA_CSS_PIPE_ID_ACC];
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false);
+
+ if (ia_css_start_sp() != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "start sp error.\n");
+ return -EIO;
+ }
+
+ if (ia_css_stream_start(stream_env->acc_stream)
+ != IA_CSS_SUCCESS) {
+ dev_err(isp->dev, "acc_stream start error.\n");
+ return -EIO;
+ }
+
+ stream_env->acc_stream_state = CSS_STREAM_STARTED;
+ return 0;
+}
+
+int atomisp_css_stop_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ if (stream_env->acc_stream_state == CSS_STREAM_STARTED) {
+ ia_css_stream_stop(stream_env->acc_stream);
+ stream_env->acc_stream_state = CSS_STREAM_STOPPED;
+ }
+ return 0;
+}
+
+void atomisp_css_destroy_acc_pipe(struct atomisp_sub_device *asd)
+{
+ struct atomisp_stream_env *stream_env =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
+ if (stream_env->acc_stream) {
+ if (ia_css_stream_destroy(stream_env->acc_stream)
+ != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev,
+ "destroy acc_stream failed.\n");
+ stream_env->acc_stream = NULL;
+ }
+
+ if (stream_env->pipes[IA_CSS_PIPE_ID_ACC]) {
+ if (ia_css_pipe_destroy(stream_env->pipes[IA_CSS_PIPE_ID_ACC])
+ != IA_CSS_SUCCESS)
+ dev_warn(asd->isp->dev,
+ "destroy ACC pipe failed.\n");
+ stream_env->pipes[IA_CSS_PIPE_ID_ACC] = NULL;
+ stream_env->update_pipe[IA_CSS_PIPE_ID_ACC] = false;
+ ia_css_pipe_config_defaults(
+ &stream_env->pipe_configs[IA_CSS_PIPE_ID_ACC]);
+ ia_css_pipe_extra_config_defaults(
+ &stream_env->pipe_extra_configs[IA_CSS_PIPE_ID_ACC]);
+ }
+ asd->acc.pipeline = NULL;
+
+ /* css 2.0 API limitation: ia_css_stop_sp() could be only called after
+ * destroy all pipes
+ */
+ ia_css_stop_sp();
+
+ kfree(asd->acc.acc_stages);
+ asd->acc.acc_stages = NULL;
+
+ atomisp_freq_scaling(asd->isp, ATOMISP_DFS_MODE_LOW, false);
+}
+
+int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
+ struct atomisp_css_fw_info *fw,
+ unsigned int index)
+{
+ struct ia_css_pipe_config *pipe_config =
+ &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
+ .pipe_configs[IA_CSS_PIPE_ID_ACC];
+
+ if (index >= MAX_ACC_STAGES) {
+ dev_dbg(asd->isp->dev, "%s: index(%d) out of range\n",
+ __func__, index);
+ return -ENOMEM;
+ }
+
+ pipe_config->acc_stages[index] = fw;
+ pipe_config->num_acc_stages = index + 1;
+ pipe_config->acc_num_execs = 1;
+
+ return 0;
+}
+
+static struct atomisp_sub_device *__get_atomisp_subdev(
+ struct ia_css_pipe *css_pipe,
+ struct atomisp_device *isp,
+ enum atomisp_input_stream_id *stream_id) {
+ int i, j, k;
+ struct atomisp_sub_device *asd;
+ struct atomisp_stream_env *stream_env;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_DISABLED &&
+ !asd->acc.pipeline)
+ continue;
+ for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) {
+ stream_env = &asd->stream_env[j];
+ for (k = 0; k < IA_CSS_PIPE_ID_NUM; k++) {
+ if (stream_env->pipes[k] &&
+ stream_env->pipes[k] == css_pipe) {
+ *stream_id = j;
+ return asd;
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+int atomisp_css_isr_thread(struct atomisp_device *isp,
+ bool *frame_done_found,
+ bool *css_pipe_done)
+{
+ enum atomisp_input_stream_id stream_id = 0;
+ struct atomisp_css_event current_event;
+ struct atomisp_sub_device *asd = &isp->asd[0];
+#ifndef ISP2401
+ bool reset_wdt_timer[MAX_STREAM_NUM] = {false};
+#endif
+ int i;
+
+ while (!atomisp_css_dequeue_event(&current_event)) {
+ if (current_event.event.type ==
+ IA_CSS_EVENT_TYPE_FW_ASSERT) {
+ /*
+ * Received FW assertion signal,
+ * trigger WDT to recover
+ */
+ dev_err(isp->dev, "%s: ISP reports FW_ASSERT event! fw_assert_module_id %d fw_assert_line_no %d\n",
+ __func__,
+ current_event.event.fw_assert_module_id,
+ current_event.event.fw_assert_line_no);
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_wdt_stop(&isp->asd[i], 0);
+#ifndef ISP2401
+ atomisp_wdt((unsigned long)isp);
+#else
+ queue_work(isp->wdt_work_queue, &isp->wdt_work);
+#endif
+ return -EINVAL;
+ } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) {
+ dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n",
+ __func__, current_event.event.fw_warning,
+ current_event.event.exp_id);
+ continue;
+ }
+
+ asd = __get_atomisp_subdev(current_event.event.pipe,
+ isp, &stream_id);
+ if (!asd) {
+ if (current_event.event.type == CSS_EVENT_TIMER)
+ dev_dbg(isp->dev,
+ "event: Timer event.");
+ else
+ dev_warn(isp->dev, "%s:no subdev.event:%d",
+ __func__,
+ current_event.event.type);
+ continue;
+ }
+
+ atomisp_css_temp_pipe_to_pipe_id(asd, &current_event);
+ switch (current_event.event.type) {
+ case CSS_EVENT_OUTPUT_FRAME_DONE:
+ frame_done_found[asd->index] = true;
+ atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+#ifndef ISP2401
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+#endif
+ break;
+ case CSS_EVENT_SEC_OUTPUT_FRAME_DONE:
+ frame_done_found[asd->index] = true;
+ atomisp_buf_done(asd, 0, CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+#ifndef ISP2401
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+#endif
+ break;
+ case CSS_EVENT_3A_STATISTICS_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_3A_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_METADATA_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_METADATA,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_VF_OUTPUT_FRAME_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+#ifndef ISP2401
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+#endif
+ break;
+ case CSS_EVENT_SEC_VF_OUTPUT_FRAME_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ current_event.pipe, true, stream_id);
+#ifndef ISP2401
+ reset_wdt_timer[asd->index] = true; /* ISP running */
+#endif
+ break;
+ case CSS_EVENT_DIS_STATISTICS_DONE:
+ atomisp_buf_done(asd, 0,
+ CSS_BUFFER_TYPE_DIS_STATISTICS,
+ current_event.pipe,
+ false, stream_id);
+ break;
+ case CSS_EVENT_PIPELINE_DONE:
+ css_pipe_done[asd->index] = true;
+ break;
+ case CSS_EVENT_ACC_STAGE_COMPLETE:
+ atomisp_acc_done(asd, current_event.event.fw_handle);
+ break;
+ default:
+ dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n",
+ current_event.event.type);
+ break;
+ }
+ }
+#ifndef ISP2401
+ /* If there are no buffers queued then
+ * delete wdt timer. */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ if (!asd)
+ continue;
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ continue;
+ if (!atomisp_buffers_queued(asd))
+ atomisp_wdt_stop(asd, false);
+ else if (reset_wdt_timer[i])
+ /* SOF irq should not reset wdt timer. */
+ atomisp_wdt_refresh(asd,
+ ATOMISP_WDT_KEEP_CURRENT_DELAY);
+ }
+#endif
+
+ return 0;
+}
+
+bool atomisp_css_valid_sof(struct atomisp_device *isp)
+{
+ unsigned int i, j;
+
+ /* Loop for each css stream */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ /* Loop for each css vc stream */
+ for (j = 0; j < ATOMISP_INPUT_STREAM_NUM; j++) {
+ if (asd->stream_env[j].stream &&
+ asd->stream_env[j].stream_config.mode ==
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int atomisp_css_debug_dump_isp_binary(void)
+{
+ ia_css_debug_dump_isp_binary();
+ return 0;
+}
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ sh_css_dump_sp_raw_copy_linecount(reduced);
+ return 0;
+}
+
+int atomisp_css_dump_blob_infor(void)
+{
+ struct ia_css_blob_descr *bd = sh_css_blob_info;
+ unsigned i, nm = sh_css_num_binaries;
+
+ if (nm == 0)
+ return -EPERM;
+ if (bd == NULL)
+ return -EPERM;
+
+ for (i = 1; i < sh_css_num_binaries; i++)
+ dev_dbg(atomisp_dev, "Num%d binary id is %d, name is %s\n", i,
+ bd[i-1].header.info.isp.sp.id, bd[i-1].name);
+
+ return 0;
+}
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id)
+{
+ asd->params.config.isp_config_id = isp_config_id;
+}
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame *output_frame)
+{
+ asd->params.config.output_frame = output_frame;
+}
+
+int atomisp_get_css_dbgfunc(void)
+{
+ return dbg_func;
+}
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
+{
+ int ret;
+
+ ret = __set_css_print_env(isp, opt);
+ if (0 == ret)
+ dbg_func = opt;
+
+ return ret;
+}
+void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable)
+{
+ ia_css_en_dz_capt_pipe(
+ asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
+ enable);
+}
+
+struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct atomisp_css_grid_info *grid_info)
+{
+ if (!grid_info)
+ return NULL;
+
+#ifdef IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+ return &grid_info->dvs_grid.dvs_grid_info;
+#else
+ return &grid_info->dvs_grid;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
new file mode 100644
index 000000000000..b62ad9082018
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
@@ -0,0 +1,282 @@
+/*
+ * Support for Clovertrail PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_COMPAT_CSS20_H__
+#define __ATOMISP_COMPAT_CSS20_H__
+
+#include <media/v4l2-mediabus.h>
+
+#include "ia_css.h"
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "sh_css_legacy.h"
+
+#define ATOMISP_CSS2_PIPE_MAX 2
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES 3
+#define ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN 4
+#define ATOMISP_CSS2_NUM_DVS_FRAME_DELAY 2
+
+#define atomisp_css_pipe_id ia_css_pipe_id
+#define atomisp_css_pipeline ia_css_pipe
+#define atomisp_css_buffer_type ia_css_buffer_type
+#define atomisp_css_dis_data ia_css_isp_dvs_statistics
+#define atomisp_css_irq_info ia_css_irq_info
+#define atomisp_css_isp_config ia_css_isp_config
+#define atomisp_css_bayer_order ia_css_bayer_order
+#define atomisp_css_stream_format ia_css_stream_format
+#define atomisp_css_capture_mode ia_css_capture_mode
+#define atomisp_css_input_mode ia_css_input_mode
+#define atomisp_css_frame ia_css_frame
+#define atomisp_css_frame_format ia_css_frame_format
+#define atomisp_css_frame_info ia_css_frame_info
+#define atomisp_css_dp_config ia_css_dp_config
+#define atomisp_css_wb_config ia_css_wb_config
+#define atomisp_css_cc_config ia_css_cc_config
+#define atomisp_css_nr_config ia_css_nr_config
+#define atomisp_css_ee_config ia_css_ee_config
+#define atomisp_css_ob_config ia_css_ob_config
+#define atomisp_css_de_config ia_css_de_config
+#define atomisp_css_dz_config ia_css_dz_config
+#define atomisp_css_ce_config ia_css_ce_config
+#define atomisp_css_gc_config ia_css_gc_config
+#define atomisp_css_tnr_config ia_css_tnr_config
+#define atomisp_css_cnr_config ia_css_cnr_config
+#define atomisp_css_ctc_config ia_css_ctc_config
+#define atomisp_css_3a_config ia_css_3a_config
+#define atomisp_css_ecd_config ia_css_ecd_config
+#define atomisp_css_ynr_config ia_css_ynr_config
+#define atomisp_css_fc_config ia_css_fc_config
+#define atomisp_css_aa_config ia_css_aa_config
+#define atomisp_css_baa_config ia_css_aa_config
+#define atomisp_css_anr_config ia_css_anr_config
+#define atomisp_css_xnr_config ia_css_xnr_config
+#define atomisp_css_macc_config ia_css_macc_config
+#define atomisp_css_gamma_table ia_css_gamma_table
+#define atomisp_css_ctc_table ia_css_ctc_table
+#define atomisp_css_macc_table ia_css_macc_table
+#define atomisp_css_xnr_table ia_css_xnr_table
+#define atomisp_css_rgb_gamma_table ia_css_rgb_gamma_table
+#define atomisp_css_anr_thres ia_css_anr_thres
+#define atomisp_css_dvs_6axis ia_css_dvs_6axis_config
+#define atomisp_css_grid_info ia_css_grid_info
+#define atomisp_css_3a_grid_info ia_css_3a_grid_info
+#define atomisp_css_dvs_grid_info ia_css_dvs_grid_info
+#define atomisp_css_shading_table ia_css_shading_table
+#define atomisp_css_morph_table ia_css_morph_table
+#define atomisp_css_dvs_6axis_config ia_css_dvs_6axis_config
+#define atomisp_css_fw_info ia_css_fw_info
+#define atomisp_css_formats_config ia_css_formats_config
+
+#define CSS_PIPE_ID_PREVIEW IA_CSS_PIPE_ID_PREVIEW
+#define CSS_PIPE_ID_COPY IA_CSS_PIPE_ID_COPY
+#define CSS_PIPE_ID_VIDEO IA_CSS_PIPE_ID_VIDEO
+#define CSS_PIPE_ID_CAPTURE IA_CSS_PIPE_ID_CAPTURE
+#define CSS_PIPE_ID_ACC IA_CSS_PIPE_ID_ACC
+#define CSS_PIPE_ID_YUVPP IA_CSS_PIPE_ID_YUVPP
+#define CSS_PIPE_ID_NUM IA_CSS_PIPE_ID_NUM
+
+#define CSS_INPUT_MODE_SENSOR IA_CSS_INPUT_MODE_BUFFERED_SENSOR
+#define CSS_INPUT_MODE_FIFO IA_CSS_INPUT_MODE_FIFO
+#define CSS_INPUT_MODE_TPG IA_CSS_INPUT_MODE_TPG
+#define CSS_INPUT_MODE_PRBS IA_CSS_INPUT_MODE_PRBS
+#define CSS_INPUT_MODE_MEMORY IA_CSS_INPUT_MODE_MEMORY
+
+#define CSS_IRQ_INFO_CSS_RECEIVER_ERROR IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR
+#define CSS_IRQ_INFO_EVENTS_READY IA_CSS_IRQ_INFO_EVENTS_READY
+#define CSS_IRQ_INFO_INPUT_SYSTEM_ERROR \
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR
+#define CSS_IRQ_INFO_IF_ERROR IA_CSS_IRQ_INFO_IF_ERROR
+
+#define CSS_BUFFER_TYPE_NUM IA_CSS_BUFFER_TYPE_NUM
+
+#define CSS_FRAME_FLASH_STATE_NONE IA_CSS_FRAME_FLASH_STATE_NONE
+#define CSS_FRAME_FLASH_STATE_PARTIAL IA_CSS_FRAME_FLASH_STATE_PARTIAL
+#define CSS_FRAME_FLASH_STATE_FULL IA_CSS_FRAME_FLASH_STATE_FULL
+
+#define CSS_BAYER_ORDER_GRBG IA_CSS_BAYER_ORDER_GRBG
+#define CSS_BAYER_ORDER_RGGB IA_CSS_BAYER_ORDER_RGGB
+#define CSS_BAYER_ORDER_BGGR IA_CSS_BAYER_ORDER_BGGR
+#define CSS_BAYER_ORDER_GBRG IA_CSS_BAYER_ORDER_GBRG
+
+/*
+ * Hide IA_ naming difference in otherwise common CSS macros.
+ */
+#define CSS_ID(val) (IA_ ## val)
+#define CSS_EVENT(val) (IA_CSS_EVENT_TYPE_ ## val)
+#define CSS_FORMAT(val) (IA_CSS_STREAM_FORMAT_ ## val)
+
+#define CSS_EVENT_PORT_EOF CSS_EVENT(PORT_EOF)
+#define CSS_EVENT_FRAME_TAGGED CSS_EVENT(FRAME_TAGGED)
+
+#define CSS_MIPI_FRAME_BUFFER_SIZE_1 0x60000
+#define CSS_MIPI_FRAME_BUFFER_SIZE_2 0x80000
+
+struct atomisp_device;
+struct atomisp_sub_device;
+
+#define MAX_STREAMS_PER_CHANNEL 2
+
+/*
+ * These are used to indicate the css stream state, corresponding
+ * stream handling can be done via judging the different state.
+ */
+enum atomisp_css_stream_state {
+ CSS_STREAM_UNINIT,
+ CSS_STREAM_CREATED,
+ CSS_STREAM_STARTED,
+ CSS_STREAM_STOPPED,
+};
+
+/*
+ * Sensor of external ISP can send multiple steams with different mipi data
+ * type in the same virtual channel. This information needs to come from the
+ * sensor or external ISP
+ */
+struct atomisp_css_isys_config_info {
+ unsigned int input_format;
+ unsigned int width;
+ unsigned int height;
+};
+
+struct atomisp_stream_env {
+ struct ia_css_stream *stream;
+ struct ia_css_stream_config stream_config;
+ struct ia_css_stream_info stream_info;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_config pipe_configs[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_pipe_extra_config pipe_extra_configs[IA_CSS_PIPE_ID_NUM];
+ bool update_pipe[IA_CSS_PIPE_ID_NUM];
+ enum atomisp_css_stream_state stream_state;
+ struct ia_css_stream *acc_stream;
+ enum atomisp_css_stream_state acc_stream_state;
+ struct ia_css_stream_config acc_stream_config;
+ unsigned int ch_id; /* virtual channel ID */
+ unsigned int isys_configs;
+ struct atomisp_css_isys_config_info isys_info[MAX_STREAMS_PER_CHANNEL];
+};
+
+struct atomisp_css_env {
+ struct ia_css_env isp_css_env;
+ struct ia_css_fw isp_css_fw;
+};
+
+struct atomisp_s3a_buf {
+ struct ia_css_isp_3a_statistics *s3a_data;
+ struct ia_css_isp_3a_statistics_map *s3a_map;
+ struct list_head list;
+};
+
+struct atomisp_dis_buf {
+ struct atomisp_css_dis_data *dis_data;
+ struct ia_css_isp_dvs_statistics_map *dvs_map;
+ struct list_head list;
+};
+
+struct atomisp_css_buffer {
+ struct ia_css_buffer css_buffer;
+};
+
+struct atomisp_css_event {
+ enum atomisp_css_pipe_id pipe;
+ struct ia_css_event event;
+};
+
+void atomisp_css_set_macc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_macc_config *macc_config);
+
+void atomisp_css_set_ecd_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ecd_config *ecd_config);
+
+void atomisp_css_set_ynr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ynr_config *ynr_config);
+
+void atomisp_css_set_fc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_fc_config *fc_config);
+
+void atomisp_css_set_aa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_aa_config *aa_config);
+
+void atomisp_css_set_baa_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_baa_config *baa_config);
+
+void atomisp_css_set_anr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_config *anr_config);
+
+void atomisp_css_set_xnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_config *xnr_config);
+
+void atomisp_css_set_cnr_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cnr_config *cnr_config);
+
+void atomisp_css_set_ctc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_ctc_config *ctc_config);
+
+void atomisp_css_set_yuv2rgb_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *yuv2rgb_cc_config);
+
+void atomisp_css_set_rgb2yuv_cc_config(struct atomisp_sub_device *asd,
+ struct atomisp_css_cc_config *rgb2yuv_cc_config);
+
+void atomisp_css_set_xnr_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_xnr_table *xnr_table);
+
+void atomisp_css_set_r_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *r_gamma_table);
+
+void atomisp_css_set_g_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *g_gamma_table);
+
+void atomisp_css_set_b_gamma_table(struct atomisp_sub_device *asd,
+ struct atomisp_css_rgb_gamma_table *b_gamma_table);
+
+void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
+ struct atomisp_css_anr_thres *anr_thres);
+
+int atomisp_css_check_firmware_version(struct atomisp_device *isp);
+
+int atomisp_css_load_firmware(struct atomisp_device *isp);
+
+void atomisp_css_unload_firmware(struct atomisp_device *isp);
+
+void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
+ struct atomisp_css_dvs_6axis *dvs_6axis);
+
+unsigned int atomisp_css_debug_get_dtrace_level(void);
+
+int atomisp_css_debug_dump_isp_binary(void);
+
+int atomisp_css_dump_sp_raw_copy_linecount(bool reduced);
+
+int atomisp_css_dump_blob_infor(void);
+
+void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
+ uint32_t isp_config_id);
+
+void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
+ struct atomisp_css_frame *output_frame);
+
+int atomisp_get_css_dbgfunc(void);
+
+int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt);
+struct atomisp_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
+ struct atomisp_css_grid_info *grid_info);
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
new file mode 100644
index 000000000000..0592ac1f2832
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
@@ -0,0 +1,1263 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+#include <linux/videodev2.h>
+
+#include "atomisp_internal.h"
+#include "atomisp_compat.h"
+#include "atomisp_compat_ioctl32.h"
+
+static int get_atomisp_histogram32(struct atomisp_histogram *kp,
+ struct atomisp_histogram32 __user *up)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_histogram32)) ||
+ get_user(kp->num_elements, &up->num_elements) ||
+ get_user(tmp, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(tmp);
+ return 0;
+}
+
+static int put_atomisp_histogram32(struct atomisp_histogram *kp,
+ struct atomisp_histogram32 __user *up)
+{
+ compat_uptr_t tmp = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_histogram32)) ||
+ put_user(kp->num_elements, &up->num_elements) ||
+ put_user(tmp, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp,
+ struct v4l2_pix_format __user *up)
+{
+ if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp,
+ struct v4l2_pix_format __user *up)
+{
+ if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp,
+ struct v4l2_framebuffer32 __user *up)
+{
+ compat_uptr_t tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+ get_user(tmp, &up->base) ||
+ get_user(kp->capability, &up->capability) ||
+ get_user(kp->flags, &up->flags))
+ return -EFAULT;
+
+ kp->base = compat_ptr(tmp);
+ get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt);
+ return 0;
+}
+
+static int get_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp,
+ struct atomisp_dis_statistics32 __user *up)
+{
+ compat_uptr_t hor_prod_odd_real;
+ compat_uptr_t hor_prod_odd_imag;
+ compat_uptr_t hor_prod_even_real;
+ compat_uptr_t hor_prod_even_imag;
+ compat_uptr_t ver_prod_odd_real;
+ compat_uptr_t ver_prod_odd_imag;
+ compat_uptr_t ver_prod_even_real;
+ compat_uptr_t ver_prod_even_imag;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_dis_statistics32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
+ get_user(hor_prod_odd_real,
+ &up->dvs2_stat.hor_prod.odd_real) ||
+ get_user(hor_prod_odd_imag,
+ &up->dvs2_stat.hor_prod.odd_imag) ||
+ get_user(hor_prod_even_real,
+ &up->dvs2_stat.hor_prod.even_real) ||
+ get_user(hor_prod_even_imag,
+ &up->dvs2_stat.hor_prod.even_imag) ||
+ get_user(ver_prod_odd_real,
+ &up->dvs2_stat.ver_prod.odd_real) ||
+ get_user(ver_prod_odd_imag,
+ &up->dvs2_stat.ver_prod.odd_imag) ||
+ get_user(ver_prod_even_real,
+ &up->dvs2_stat.ver_prod.even_real) ||
+ get_user(ver_prod_even_imag,
+ &up->dvs2_stat.ver_prod.even_imag) ||
+ get_user(kp->exp_id, &up->exp_id))
+ return -EFAULT;
+
+ kp->dvs2_stat.hor_prod.odd_real = compat_ptr(hor_prod_odd_real);
+ kp->dvs2_stat.hor_prod.odd_imag = compat_ptr(hor_prod_odd_imag);
+ kp->dvs2_stat.hor_prod.even_real = compat_ptr(hor_prod_even_real);
+ kp->dvs2_stat.hor_prod.even_imag = compat_ptr(hor_prod_even_imag);
+ kp->dvs2_stat.ver_prod.odd_real = compat_ptr(ver_prod_odd_real);
+ kp->dvs2_stat.ver_prod.odd_imag = compat_ptr(ver_prod_odd_imag);
+ kp->dvs2_stat.ver_prod.even_real = compat_ptr(ver_prod_even_real);
+ kp->dvs2_stat.ver_prod.even_imag = compat_ptr(ver_prod_even_imag);
+ return 0;
+}
+
+static int put_atomisp_dis_statistics32(struct atomisp_dis_statistics *kp,
+ struct atomisp_dis_statistics32 __user *up)
+{
+ compat_uptr_t hor_prod_odd_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_real);
+ compat_uptr_t hor_prod_odd_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.odd_imag);
+ compat_uptr_t hor_prod_even_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_real);
+ compat_uptr_t hor_prod_even_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.hor_prod.even_imag);
+ compat_uptr_t ver_prod_odd_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_real);
+ compat_uptr_t ver_prod_odd_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.odd_imag);
+ compat_uptr_t ver_prod_even_real =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_real);
+ compat_uptr_t ver_prod_even_imag =
+ (compat_uptr_t)((uintptr_t)kp->dvs2_stat.ver_prod.even_imag);
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_dis_statistics32)) ||
+ copy_to_user(up, kp, sizeof(struct atomisp_dvs_grid_info)) ||
+ put_user(hor_prod_odd_real,
+ &up->dvs2_stat.hor_prod.odd_real) ||
+ put_user(hor_prod_odd_imag,
+ &up->dvs2_stat.hor_prod.odd_imag) ||
+ put_user(hor_prod_even_real,
+ &up->dvs2_stat.hor_prod.even_real) ||
+ put_user(hor_prod_even_imag,
+ &up->dvs2_stat.hor_prod.even_imag) ||
+ put_user(ver_prod_odd_real,
+ &up->dvs2_stat.ver_prod.odd_real) ||
+ put_user(ver_prod_odd_imag,
+ &up->dvs2_stat.ver_prod.odd_imag) ||
+ put_user(ver_prod_even_real,
+ &up->dvs2_stat.ver_prod.even_real) ||
+ put_user(ver_prod_even_imag,
+ &up->dvs2_stat.ver_prod.even_imag) ||
+ put_user(kp->exp_id, &up->exp_id))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_dis_coefficients32(struct atomisp_dis_coefficients *kp,
+ struct atomisp_dis_coefficients32 __user *up)
+{
+ compat_uptr_t hor_coefs_odd_real;
+ compat_uptr_t hor_coefs_odd_imag;
+ compat_uptr_t hor_coefs_even_real;
+ compat_uptr_t hor_coefs_even_imag;
+ compat_uptr_t ver_coefs_odd_real;
+ compat_uptr_t ver_coefs_odd_imag;
+ compat_uptr_t ver_coefs_even_real;
+ compat_uptr_t ver_coefs_even_imag;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_dis_coefficients32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_dvs_grid_info)) ||
+ get_user(hor_coefs_odd_real, &up->hor_coefs.odd_real) ||
+ get_user(hor_coefs_odd_imag, &up->hor_coefs.odd_imag) ||
+ get_user(hor_coefs_even_real, &up->hor_coefs.even_real) ||
+ get_user(hor_coefs_even_imag, &up->hor_coefs.even_imag) ||
+ get_user(ver_coefs_odd_real, &up->ver_coefs.odd_real) ||
+ get_user(ver_coefs_odd_imag, &up->ver_coefs.odd_imag) ||
+ get_user(ver_coefs_even_real, &up->ver_coefs.even_real) ||
+ get_user(ver_coefs_even_imag, &up->ver_coefs.even_imag))
+ return -EFAULT;
+
+ kp->hor_coefs.odd_real = compat_ptr(hor_coefs_odd_real);
+ kp->hor_coefs.odd_imag = compat_ptr(hor_coefs_odd_imag);
+ kp->hor_coefs.even_real = compat_ptr(hor_coefs_even_real);
+ kp->hor_coefs.even_imag = compat_ptr(hor_coefs_even_imag);
+ kp->ver_coefs.odd_real = compat_ptr(ver_coefs_odd_real);
+ kp->ver_coefs.odd_imag = compat_ptr(ver_coefs_odd_imag);
+ kp->ver_coefs.even_real = compat_ptr(ver_coefs_even_real);
+ kp->ver_coefs.even_imag = compat_ptr(ver_coefs_even_imag);
+ return 0;
+}
+
+static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp,
+ struct atomisp_dvs_6axis_config32 __user *up)
+{ compat_uptr_t xcoords_y;
+ compat_uptr_t ycoords_y;
+ compat_uptr_t xcoords_uv;
+ compat_uptr_t ycoords_uv;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_dvs_6axis_config32)) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(kp->width_y, &up->width_y) ||
+ get_user(kp->height_y, &up->height_y) ||
+ get_user(kp->width_uv, &up->width_uv) ||
+ get_user(kp->height_uv, &up->height_uv) ||
+ get_user(xcoords_y, &up->xcoords_y) ||
+ get_user(ycoords_y, &up->ycoords_y) ||
+ get_user(xcoords_uv, &up->xcoords_uv) ||
+ get_user(ycoords_uv, &up->ycoords_uv))
+ return -EFAULT;
+
+ kp->xcoords_y = compat_ptr(xcoords_y);
+ kp->ycoords_y = compat_ptr(ycoords_y);
+ kp->xcoords_uv = compat_ptr(xcoords_uv);
+ kp->ycoords_uv = compat_ptr(ycoords_uv);
+ return 0;
+}
+
+static int get_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp,
+ struct atomisp_3a_statistics32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t rgby_data;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_3a_statistics32)) ||
+ copy_from_user(kp, up, sizeof(struct atomisp_grid_info)) ||
+ get_user(rgby_data, &up->rgby_data) ||
+ get_user(data, &up->data) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(kp->isp_config_id, &up->isp_config_id))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->rgby_data = compat_ptr(rgby_data);
+
+ return 0;
+}
+
+static int put_atomisp_3a_statistics32(struct atomisp_3a_statistics *kp,
+ struct atomisp_3a_statistics32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t rgby_data = (compat_uptr_t)((uintptr_t)kp->rgby_data);
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_3a_statistics32)) ||
+ copy_to_user(up, kp, sizeof(struct atomisp_grid_info)) ||
+ put_user(rgby_data, &up->rgby_data) ||
+ put_user(data, &up->data) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(kp->isp_config_id, &up->isp_config_id))
+ return -EFAULT;
+
+ return 0;
+}
+
+
+static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp,
+ struct atomisp_metadata32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t effective_width;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_metadata32)) ||
+ get_user(data, &up->data) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->stride, &up->stride) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(effective_width, &up->effective_width))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->effective_width = compat_ptr(effective_width);
+ return 0;
+}
+
+
+static int put_atomisp_metadata_stat32(struct atomisp_metadata *kp,
+ struct atomisp_metadata32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t effective_width =
+ (compat_uptr_t)((uintptr_t)kp->effective_width);
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_metadata32)) ||
+ put_user(data, &up->data) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->stride, &up->stride) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(effective_width, &up->effective_width))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int put_atomisp_metadata_by_type_stat32(
+ struct atomisp_metadata_with_type *kp,
+ struct atomisp_metadata_with_type32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ compat_uptr_t effective_width =
+ (compat_uptr_t)((uintptr_t)kp->effective_width);
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_metadata_with_type32)) ||
+ put_user(data, &up->data) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->stride, &up->stride) ||
+ put_user(kp->exp_id, &up->exp_id) ||
+ put_user(effective_width, &up->effective_width) ||
+ put_user(kp->type, &up->type))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_metadata_by_type_stat32(
+ struct atomisp_metadata_with_type *kp,
+ struct atomisp_metadata_with_type32 __user *up)
+{
+ compat_uptr_t data;
+ compat_uptr_t effective_width;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_metadata_with_type32)) ||
+ get_user(data, &up->data) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->stride, &up->stride) ||
+ get_user(kp->exp_id, &up->exp_id) ||
+ get_user(effective_width, &up->effective_width) ||
+ get_user(kp->type, &up->type))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ kp->effective_width = compat_ptr(effective_width);
+ return 0;
+}
+
+static int get_atomisp_morph_table32(struct atomisp_morph_table *kp,
+ struct atomisp_morph_table32 __user *up)
+{
+ unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_morph_table32)) ||
+ get_user(kp->enabled, &up->enabled) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n];
+
+ if (get_user((*coord_kp), &up->coordinates_x[n]))
+ return -EFAULT;
+
+ coord_kp = (uintptr_t *)&kp->coordinates_y[n];
+ if (get_user((*coord_kp), &up->coordinates_y[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int put_atomisp_morph_table32(struct atomisp_morph_table *kp,
+ struct atomisp_morph_table32 __user *up)
+{
+ unsigned int n = ATOMISP_MORPH_TABLE_NUM_PLANES;
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_morph_table32)) ||
+ put_user(kp->enabled, &up->enabled) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *coord_kp = (uintptr_t *)&kp->coordinates_x[n];
+
+ if (put_user((*coord_kp), &up->coordinates_x[n]))
+ return -EFAULT;
+
+ coord_kp = (uintptr_t *)&kp->coordinates_y[n];
+ if (put_user((*coord_kp), &up->coordinates_y[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_atomisp_overlay32(struct atomisp_overlay *kp,
+ struct atomisp_overlay32 __user *up)
+{
+ compat_uptr_t frame;
+ if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_overlay32)) ||
+ get_user(frame, &up->frame) ||
+ get_user(kp->bg_y, &up->bg_y) ||
+ get_user(kp->bg_u, &up->bg_u) ||
+ get_user(kp->bg_v, &up->bg_v) ||
+ get_user(kp->blend_input_perc_y, &up->blend_input_perc_y) ||
+ get_user(kp->blend_input_perc_u, &up->blend_input_perc_u) ||
+ get_user(kp->blend_input_perc_v, &up->blend_input_perc_v) ||
+ get_user(kp->blend_overlay_perc_y,
+ &up->blend_overlay_perc_y) ||
+ get_user(kp->blend_overlay_perc_u,
+ &up->blend_overlay_perc_u) ||
+ get_user(kp->blend_overlay_perc_v,
+ &up->blend_overlay_perc_v) ||
+ get_user(kp->blend_overlay_perc_u,
+ &up->blend_overlay_perc_u) ||
+ get_user(kp->overlay_start_x, &up->overlay_start_y))
+ return -EFAULT;
+
+ kp->frame = compat_ptr(frame);
+ return 0;
+}
+
+static int put_atomisp_overlay32(struct atomisp_overlay *kp,
+ struct atomisp_overlay32 __user *up)
+{
+ compat_uptr_t frame = (compat_uptr_t)((uintptr_t)kp->frame);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_overlay32)) ||
+ put_user(frame, &up->frame) ||
+ put_user(kp->bg_y, &up->bg_y) ||
+ put_user(kp->bg_u, &up->bg_u) ||
+ put_user(kp->bg_v, &up->bg_v) ||
+ put_user(kp->blend_input_perc_y, &up->blend_input_perc_y) ||
+ put_user(kp->blend_input_perc_u, &up->blend_input_perc_u) ||
+ put_user(kp->blend_input_perc_v, &up->blend_input_perc_v) ||
+ put_user(kp->blend_overlay_perc_y,
+ &up->blend_overlay_perc_y) ||
+ put_user(kp->blend_overlay_perc_u,
+ &up->blend_overlay_perc_u) ||
+ put_user(kp->blend_overlay_perc_v,
+ &up->blend_overlay_perc_v) ||
+ put_user(kp->blend_overlay_perc_u,
+ &up->blend_overlay_perc_u) ||
+ put_user(kp->overlay_start_x, &up->overlay_start_y))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_calibration_group32(
+ struct atomisp_calibration_group *kp,
+ struct atomisp_calibration_group32 __user *up)
+{
+ compat_uptr_t calb_grp_values;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_calibration_group32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->type, &up->type) ||
+ get_user(calb_grp_values, &up->calb_grp_values))
+ return -EFAULT;
+
+ kp->calb_grp_values = compat_ptr(calb_grp_values);
+ return 0;
+}
+
+static int put_atomisp_calibration_group32(
+ struct atomisp_calibration_group *kp,
+ struct atomisp_calibration_group32 __user *up)
+{
+ compat_uptr_t calb_grp_values =
+ (compat_uptr_t)((uintptr_t)kp->calb_grp_values);
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_calibration_group32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->type, &up->type) ||
+ put_user(calb_grp_values, &up->calb_grp_values))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp,
+ struct atomisp_acc_fw_load32 __user *up)
+{
+ compat_uptr_t data;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_acc_fw_load32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(data, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_load32(struct atomisp_acc_fw_load *kp,
+ struct atomisp_acc_fw_load32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_acc_fw_load32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(data, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp,
+ struct atomisp_acc_fw_arg32 __user *up)
+{
+ compat_uptr_t value;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_acc_fw_arg32)) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->index, &up->index) ||
+ get_user(value, &up->value) ||
+ get_user(kp->size, &up->size))
+ return -EFAULT;
+
+ kp->value = compat_ptr(value);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_arg32(struct atomisp_acc_fw_arg *kp,
+ struct atomisp_acc_fw_arg32 __user *up)
+{
+ compat_uptr_t value = (compat_uptr_t)((uintptr_t)kp->value);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_acc_fw_arg32)) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->index, &up->index) ||
+ put_user(value, &up->value) ||
+ put_user(kp->size, &up->size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_v4l2_private_int_data32(struct v4l2_private_int_data *kp,
+ struct v4l2_private_int_data32 __user *up)
+{
+ compat_uptr_t data;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct v4l2_private_int_data32)) ||
+ get_user(kp->size, &up->size) ||
+ get_user(data, &up->data) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_v4l2_private_int_data32(struct v4l2_private_int_data *kp,
+ struct v4l2_private_int_data32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct v4l2_private_int_data32)) ||
+ put_user(kp->size, &up->size) ||
+ put_user(data, &up->data) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_shading_table32(struct atomisp_shading_table *kp,
+ struct atomisp_shading_table32 __user *up)
+{
+ unsigned int n = ATOMISP_NUM_SC_COLORS;
+
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_shading_table32)) ||
+ get_user(kp->enable, &up->enable) ||
+ get_user(kp->sensor_width, &up->sensor_width) ||
+ get_user(kp->sensor_height, &up->sensor_height) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->fraction_bits, &up->fraction_bits))
+ return -EFAULT;
+
+ while (n-- > 0) {
+ uintptr_t *data_p = (uintptr_t *)&kp->data[n];
+
+ if (get_user((*data_p), &up->data[n]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_atomisp_acc_map32(struct atomisp_acc_map *kp,
+ struct atomisp_acc_map32 __user *up)
+{
+ compat_uptr_t user_ptr;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_acc_map32)) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->length, &up->length) ||
+ get_user(user_ptr, &up->user_ptr) ||
+ get_user(kp->css_ptr, &up->css_ptr) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]) ||
+ get_user(kp->reserved[2], &up->reserved[2]) ||
+ get_user(kp->reserved[3], &up->reserved[3]))
+ return -EFAULT;
+
+ kp->user_ptr = compat_ptr(user_ptr);
+ return 0;
+}
+
+static int put_atomisp_acc_map32(struct atomisp_acc_map *kp,
+ struct atomisp_acc_map32 __user *up)
+{
+ compat_uptr_t user_ptr = (compat_uptr_t)((uintptr_t)kp->user_ptr);
+
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct atomisp_acc_map32)) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->length, &up->length) ||
+ put_user(user_ptr, &up->user_ptr) ||
+ put_user(kp->css_ptr, &up->css_ptr) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]) ||
+ put_user(kp->reserved[2], &up->reserved[2]) ||
+ put_user(kp->reserved[3], &up->reserved[3]))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp,
+ struct atomisp_acc_s_mapped_arg32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_acc_s_mapped_arg32)) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length) ||
+ get_user(kp->css_ptr, &up->css_ptr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int put_atomisp_acc_s_mapped_arg32(struct atomisp_acc_s_mapped_arg *kp,
+ struct atomisp_acc_s_mapped_arg32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_acc_s_mapped_arg32)) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->memory, &up->memory) ||
+ put_user(kp->length, &up->length) ||
+ put_user(kp->css_ptr, &up->css_ptr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_parameters32(struct atomisp_parameters *kp,
+ struct atomisp_parameters32 __user *up)
+{
+ int n = offsetof(struct atomisp_parameters32, output_frame) /
+ sizeof(compat_uptr_t);
+ unsigned int size, offset = 0;
+ void __user *user_ptr;
+#ifdef ISP2401
+ unsigned int stp, mtp, dcp, dscp = 0;
+
+#endif
+ if (!access_ok(VERIFY_READ, up, sizeof(struct atomisp_parameters32)))
+ return -EFAULT;
+
+ while (n >= 0) {
+ compat_uptr_t *src = (compat_uptr_t *)up + n;
+ uintptr_t *dst = (uintptr_t *)kp + n;
+
+ if (get_user((*dst), src))
+ return -EFAULT;
+ n--;
+ }
+ if (get_user(kp->isp_config_id, &up->isp_config_id) ||
+#ifndef ISP2401
+ get_user(kp->per_frame_setting, &up->per_frame_setting))
+#else
+ get_user(kp->per_frame_setting, &up->per_frame_setting) ||
+ get_user(stp, &up->shading_table) ||
+ get_user(mtp, &up->morph_table) ||
+ get_user(dcp, &up->dvs2_coefs) ||
+ get_user(dscp, &up->dvs_6axis_config))
+#endif
+ return -EFAULT;
+
+ {
+ union {
+ struct atomisp_shading_table shading_table;
+ struct atomisp_morph_table morph_table;
+ struct atomisp_dis_coefficients dvs2_coefs;
+ struct atomisp_dvs_6axis_config dvs_6axis_config;
+ } karg;
+
+ size = sizeof(struct atomisp_shading_table) +
+ sizeof(struct atomisp_morph_table) +
+ sizeof(struct atomisp_dis_coefficients) +
+ sizeof(struct atomisp_dvs_6axis_config);
+ user_ptr = compat_alloc_user_space(size);
+
+ /* handle shading table */
+#ifndef ISP2401
+ if (up->shading_table != 0) {
+#else
+ if (stp != 0) {
+#endif
+ if (get_atomisp_shading_table32(&karg.shading_table,
+ (struct atomisp_shading_table32 __user *)
+#ifndef ISP2401
+ (uintptr_t)up->shading_table))
+#else
+ (uintptr_t)stp))
+#endif
+ return -EFAULT;
+
+ kp->shading_table = user_ptr + offset;
+ offset = sizeof(struct atomisp_shading_table);
+ if (!kp->shading_table)
+ return -EFAULT;
+
+ if (copy_to_user(kp->shading_table,
+ &karg.shading_table,
+ sizeof(struct atomisp_shading_table)))
+ return -EFAULT;
+ }
+
+ /* handle morph table */
+#ifndef ISP2401
+ if (up->morph_table != 0) {
+#else
+ if (mtp != 0) {
+#endif
+ if (get_atomisp_morph_table32(&karg.morph_table,
+ (struct atomisp_morph_table32 __user *)
+#ifndef ISP2401
+ (uintptr_t)up->morph_table))
+#else
+ (uintptr_t)mtp))
+#endif
+ return -EFAULT;
+
+ kp->morph_table = user_ptr + offset;
+ offset += sizeof(struct atomisp_morph_table);
+ if (!kp->morph_table)
+ return -EFAULT;
+
+ if (copy_to_user(kp->morph_table, &karg.morph_table,
+ sizeof(struct atomisp_morph_table)))
+ return -EFAULT;
+ }
+
+ /* handle dvs2 coefficients */
+#ifndef ISP2401
+ if (up->dvs2_coefs != 0) {
+#else
+ if (dcp != 0) {
+#endif
+ if (get_atomisp_dis_coefficients32(&karg.dvs2_coefs,
+ (struct atomisp_dis_coefficients32 __user *)
+#ifndef ISP2401
+ (uintptr_t)up->dvs2_coefs))
+#else
+ (uintptr_t)dcp))
+#endif
+ return -EFAULT;
+
+ kp->dvs2_coefs = user_ptr + offset;
+ offset += sizeof(struct atomisp_dis_coefficients);
+ if (!kp->dvs2_coefs)
+ return -EFAULT;
+
+ if (copy_to_user(kp->dvs2_coefs, &karg.dvs2_coefs,
+ sizeof(struct atomisp_dis_coefficients)))
+ return -EFAULT;
+ }
+ /* handle dvs 6axis configuration */
+#ifndef ISP2401
+ if (up->dvs_6axis_config != 0) {
+#else
+ if (dscp != 0) {
+#endif
+ if (get_atomisp_dvs_6axis_config32(&karg.dvs_6axis_config,
+ (struct atomisp_dvs_6axis_config32 __user *)
+#ifndef ISP2401
+ (uintptr_t)up->dvs_6axis_config))
+#else
+ (uintptr_t)dscp))
+#endif
+ return -EFAULT;
+
+ kp->dvs_6axis_config = user_ptr + offset;
+ offset += sizeof(struct atomisp_dvs_6axis_config);
+ if (!kp->dvs_6axis_config)
+ return -EFAULT;
+
+ if (copy_to_user(kp->dvs_6axis_config, &karg.dvs_6axis_config,
+ sizeof(struct atomisp_dvs_6axis_config)))
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static int get_atomisp_acc_fw_load_to_pipe32(
+ struct atomisp_acc_fw_load_to_pipe *kp,
+ struct atomisp_acc_fw_load_to_pipe32 __user *up)
+{
+ compat_uptr_t data;
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->fw_handle, &up->fw_handle) ||
+ get_user(kp->size, &up->size) ||
+ get_user(kp->type, &up->type) ||
+ get_user(kp->reserved[0], &up->reserved[0]) ||
+ get_user(kp->reserved[1], &up->reserved[1]) ||
+ get_user(kp->reserved[2], &up->reserved[2]) ||
+ get_user(data, &up->data))
+ return -EFAULT;
+
+ kp->data = compat_ptr(data);
+ return 0;
+}
+
+static int put_atomisp_acc_fw_load_to_pipe32(
+ struct atomisp_acc_fw_load_to_pipe *kp,
+ struct atomisp_acc_fw_load_to_pipe32 __user *up)
+{
+ compat_uptr_t data = (compat_uptr_t)((uintptr_t)kp->data);
+ if (!access_ok(VERIFY_WRITE, up,
+ sizeof(struct atomisp_acc_fw_load_to_pipe32)) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->fw_handle, &up->fw_handle) ||
+ put_user(kp->size, &up->size) ||
+ put_user(kp->type, &up->type) ||
+ put_user(kp->reserved[0], &up->reserved[0]) ||
+ put_user(kp->reserved[1], &up->reserved[1]) ||
+ put_user(kp->reserved[2], &up->reserved[2]) ||
+ put_user(data, &up->data))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_atomisp_sensor_ae_bracketing_lut(
+ struct atomisp_sensor_ae_bracketing_lut *kp,
+ struct atomisp_sensor_ae_bracketing_lut32 __user *up)
+{
+ compat_uptr_t lut;
+ if (!access_ok(VERIFY_READ, up,
+ sizeof(struct atomisp_sensor_ae_bracketing_lut32)) ||
+ get_user(kp->lut_size, &up->lut_size) ||
+ get_user(lut, &up->lut))
+ return -EFAULT;
+
+ kp->lut = compat_ptr(lut);
+ return 0;
+}
+
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -ENOIOCTLCMD;
+
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+
+ return ret;
+}
+
+long atomisp_do_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ union {
+ struct atomisp_histogram his;
+ struct atomisp_dis_statistics dis_s;
+ struct atomisp_dis_coefficients dis_c;
+ struct atomisp_dvs_6axis_config dvs_c;
+ struct atomisp_3a_statistics s3a_s;
+ struct atomisp_morph_table mor_t;
+ struct v4l2_framebuffer v4l2_buf;
+ struct atomisp_overlay overlay;
+ struct atomisp_calibration_group cal_grp;
+ struct atomisp_acc_fw_load acc_fw_load;
+ struct atomisp_acc_fw_arg acc_fw_arg;
+ struct v4l2_private_int_data v4l2_pri_data;
+ struct atomisp_shading_table shd_tbl;
+ struct atomisp_acc_map acc_map;
+ struct atomisp_acc_s_mapped_arg acc_map_arg;
+ struct atomisp_parameters param;
+ struct atomisp_acc_fw_load_to_pipe acc_fw_to_pipe;
+ struct atomisp_metadata md;
+ struct atomisp_metadata_with_type md_with_type;
+ struct atomisp_sensor_ae_bracketing_lut lut;
+ } karg;
+ mm_segment_t old_fs;
+ void __user *up = compat_ptr(arg);
+ long err = -ENOIOCTLCMD;
+
+ /* First, convert the command. */
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM32:
+ cmd = ATOMISP_IOC_G_HISTOGRAM;
+ break;
+ case ATOMISP_IOC_S_HISTOGRAM32:
+ cmd = ATOMISP_IOC_S_HISTOGRAM;
+ break;
+ case ATOMISP_IOC_G_DIS_STAT32:
+ cmd = ATOMISP_IOC_G_DIS_STAT;
+ break;
+ case ATOMISP_IOC_S_DIS_COEFS32:
+ cmd = ATOMISP_IOC_S_DIS_COEFS;
+ break;
+ case ATOMISP_IOC_S_DIS_VECTOR32:
+ cmd = ATOMISP_IOC_S_DIS_VECTOR;
+ break;
+ case ATOMISP_IOC_G_3A_STAT32:
+ cmd = ATOMISP_IOC_G_3A_STAT;
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB32:
+ cmd = ATOMISP_IOC_G_ISP_GDC_TAB;
+ break;
+ case ATOMISP_IOC_S_ISP_GDC_TAB32:
+ cmd = ATOMISP_IOC_S_ISP_GDC_TAB;
+ break;
+ case ATOMISP_IOC_S_ISP_FPN_TABLE32:
+ cmd = ATOMISP_IOC_S_ISP_FPN_TABLE;
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY32:
+ cmd = ATOMISP_IOC_G_ISP_OVERLAY;
+ break;
+ case ATOMISP_IOC_S_ISP_OVERLAY32:
+ cmd = ATOMISP_IOC_S_ISP_OVERLAY;
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
+ cmd = ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP;
+ break;
+ case ATOMISP_IOC_ACC_LOAD32:
+ cmd = ATOMISP_IOC_ACC_LOAD;
+ break;
+ case ATOMISP_IOC_ACC_S_ARG32:
+ cmd = ATOMISP_IOC_ACC_S_ARG;
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
+ cmd = ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA;
+ break;
+ case ATOMISP_IOC_S_ISP_SHD_TAB32:
+ cmd = ATOMISP_IOC_S_ISP_SHD_TAB;
+ break;
+ case ATOMISP_IOC_ACC_DESTAB32:
+ cmd = ATOMISP_IOC_ACC_DESTAB;
+ break;
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
+ cmd = ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA;
+ break;
+ case ATOMISP_IOC_ACC_MAP32:
+ cmd = ATOMISP_IOC_ACC_MAP;
+ break;
+ case ATOMISP_IOC_ACC_UNMAP32:
+ cmd = ATOMISP_IOC_ACC_UNMAP;
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
+ cmd = ATOMISP_IOC_ACC_S_MAPPED_ARG;
+ break;
+ case ATOMISP_IOC_S_PARAMETERS32:
+ cmd = ATOMISP_IOC_S_PARAMETERS;
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
+ cmd = ATOMISP_IOC_ACC_LOAD_TO_PIPE;
+ break;
+ case ATOMISP_IOC_G_METADATA32:
+ cmd = ATOMISP_IOC_G_METADATA;
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE32:
+ cmd = ATOMISP_IOC_G_METADATA_BY_TYPE;
+ break;
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
+ cmd = ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT;
+ break;
+ }
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM:
+ case ATOMISP_IOC_S_HISTOGRAM:
+ err = get_atomisp_histogram32(&karg.his, up);
+ break;
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = get_atomisp_dis_statistics32(&karg.dis_s, up);
+ break;
+ case ATOMISP_IOC_S_DIS_COEFS:
+ err = get_atomisp_dis_coefficients32(&karg.dis_c, up);
+ break;
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ err = get_atomisp_dvs_6axis_config32(&karg.dvs_c, up);
+ break;
+ case ATOMISP_IOC_G_3A_STAT:
+ err = get_atomisp_3a_statistics32(&karg.s3a_s, up);
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ case ATOMISP_IOC_S_ISP_GDC_TAB:
+ err = get_atomisp_morph_table32(&karg.mor_t, up);
+ break;
+ case ATOMISP_IOC_S_ISP_FPN_TABLE:
+ err = get_v4l2_framebuffer32(&karg.v4l2_buf, up);
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY:
+ case ATOMISP_IOC_S_ISP_OVERLAY:
+ err = get_atomisp_overlay32(&karg.overlay, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ err = get_atomisp_calibration_group32(&karg.cal_grp, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD:
+ err = get_atomisp_acc_fw_load32(&karg.acc_fw_load, up);
+ break;
+ case ATOMISP_IOC_ACC_S_ARG:
+ case ATOMISP_IOC_ACC_DESTAB:
+ err = get_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ err = get_v4l2_private_int_data32(&karg.v4l2_pri_data, up);
+ break;
+ case ATOMISP_IOC_S_ISP_SHD_TAB:
+ err = get_atomisp_shading_table32(&karg.shd_tbl, up);
+ break;
+ case ATOMISP_IOC_ACC_MAP:
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = get_atomisp_acc_map32(&karg.acc_map, up);
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = get_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up);
+ break;
+ case ATOMISP_IOC_S_PARAMETERS:
+ err = get_atomisp_parameters32(&karg.param, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = get_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe,
+ up);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = get_atomisp_metadata_stat32(&karg.md, up);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = get_atomisp_metadata_by_type_stat32(&karg.md_with_type,
+ up);
+ break;
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ err = get_atomisp_sensor_ae_bracketing_lut(&karg.lut, up);
+ break;
+ }
+ if (err)
+ return err;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = native_ioctl(file, cmd, (unsigned long)&karg);
+ set_fs(old_fs);
+ if (err)
+ return err;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_HISTOGRAM:
+ err = put_atomisp_histogram32(&karg.his, up);
+ break;
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = put_atomisp_dis_statistics32(&karg.dis_s, up);
+ break;
+ case ATOMISP_IOC_G_3A_STAT:
+ err = put_atomisp_3a_statistics32(&karg.s3a_s, up);
+ break;
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ err = put_atomisp_morph_table32(&karg.mor_t, up);
+ break;
+ case ATOMISP_IOC_G_ISP_OVERLAY:
+ err = put_atomisp_overlay32(&karg.overlay, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ err = put_atomisp_calibration_group32(&karg.cal_grp, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD:
+ err = put_atomisp_acc_fw_load32(&karg.acc_fw_load, up);
+ break;
+ case ATOMISP_IOC_ACC_S_ARG:
+ case ATOMISP_IOC_ACC_DESTAB:
+ err = put_atomisp_acc_fw_arg32(&karg.acc_fw_arg, up);
+ break;
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ err = put_v4l2_private_int_data32(&karg.v4l2_pri_data, up);
+ break;
+ case ATOMISP_IOC_ACC_MAP:
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = put_atomisp_acc_map32(&karg.acc_map, up);
+ break;
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = put_atomisp_acc_s_mapped_arg32(&karg.acc_map_arg, up);
+ break;
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = put_atomisp_acc_fw_load_to_pipe32(&karg.acc_fw_to_pipe,
+ up);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = put_atomisp_metadata_stat32(&karg.md, up);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = put_atomisp_metadata_by_type_stat32(&karg.md_with_type,
+ up);
+ break;
+ }
+
+ return err;
+}
+
+long atomisp_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ long ret = -ENOIOCTLCMD;
+
+ if (!file->f_op->unlocked_ioctl)
+ return ret;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_XNR:
+ case ATOMISP_IOC_S_XNR:
+ case ATOMISP_IOC_G_NR:
+ case ATOMISP_IOC_S_NR:
+ case ATOMISP_IOC_G_TNR:
+ case ATOMISP_IOC_S_TNR:
+ case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
+ case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
+ case ATOMISP_IOC_G_EE:
+ case ATOMISP_IOC_S_EE:
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ case ATOMISP_IOC_G_ISP_PARM:
+ case ATOMISP_IOC_S_ISP_PARM:
+ case ATOMISP_IOC_G_ISP_GAMMA:
+ case ATOMISP_IOC_S_ISP_GAMMA:
+ case ATOMISP_IOC_ISP_MAKERNOTE:
+ case ATOMISP_IOC_G_ISP_MACC:
+ case ATOMISP_IOC_S_ISP_MACC:
+ case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
+ case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
+ case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
+ case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
+ case ATOMISP_IOC_G_ISP_CTC:
+ case ATOMISP_IOC_S_ISP_CTC:
+ case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
+ case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
+ case ATOMISP_IOC_CAMERA_BRIDGE:
+ case ATOMISP_IOC_G_SENSOR_MODE_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_3A_CONFIG:
+ case ATOMISP_IOC_S_3A_CONFIG:
+ case ATOMISP_IOC_ACC_UNLOAD:
+ case ATOMISP_IOC_ACC_START:
+ case ATOMISP_IOC_ACC_WAIT:
+ case ATOMISP_IOC_ACC_ABORT:
+ case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
+ case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
+ case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG:
+ case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_EXP_ID_UNLOCK:
+ case ATOMISP_IOC_EXP_ID_CAPTURE:
+ case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
+ case ATOMISP_IOC_G_FORMATS_CONFIG:
+ case ATOMISP_IOC_S_FORMATS_CONFIG:
+ case ATOMISP_IOC_S_EXPOSURE_WINDOW:
+ case ATOMISP_IOC_S_ACC_STATE:
+ case ATOMISP_IOC_G_ACC_STATE:
+ case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_INVALID_FRAME_NUM:
+ case ATOMISP_IOC_S_ARRAY_RESOLUTION:
+#ifdef ISP2401
+ case ATOMISP_IOC_S_SENSOR_RUNMODE:
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+#endif
+ ret = native_ioctl(file, cmd, arg);
+ break;
+
+ case ATOMISP_IOC_G_HISTOGRAM32:
+ case ATOMISP_IOC_S_HISTOGRAM32:
+ case ATOMISP_IOC_G_DIS_STAT32:
+ case ATOMISP_IOC_S_DIS_COEFS32:
+ case ATOMISP_IOC_S_DIS_VECTOR32:
+ case ATOMISP_IOC_G_3A_STAT32:
+ case ATOMISP_IOC_G_ISP_GDC_TAB32:
+ case ATOMISP_IOC_S_ISP_GDC_TAB32:
+ case ATOMISP_IOC_S_ISP_FPN_TABLE32:
+ case ATOMISP_IOC_G_ISP_OVERLAY32:
+ case ATOMISP_IOC_S_ISP_OVERLAY32:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32:
+ case ATOMISP_IOC_ACC_LOAD32:
+ case ATOMISP_IOC_ACC_S_ARG32:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32:
+ case ATOMISP_IOC_S_ISP_SHD_TAB32:
+ case ATOMISP_IOC_ACC_DESTAB32:
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32:
+ case ATOMISP_IOC_ACC_MAP32:
+ case ATOMISP_IOC_ACC_UNMAP32:
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG32:
+ case ATOMISP_IOC_S_PARAMETERS32:
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE32:
+ case ATOMISP_IOC_G_METADATA32:
+ case ATOMISP_IOC_G_METADATA_BY_TYPE32:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32:
+ ret = atomisp_do_compat_ioctl(file, cmd, arg);
+ break;
+
+ default:
+ dev_warn(isp->dev,
+ "%s: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ __func__, _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd),
+ cmd);
+ break;
+ }
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
new file mode 100644
index 000000000000..750478f614d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
@@ -0,0 +1,369 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_COMPAT_IOCTL32_H__
+#define __ATOMISP_COMPAT_IOCTL32_H__
+
+#include <linux/compat.h>
+#include <linux/videodev2.h>
+
+#include "atomisp_compat.h"
+
+struct atomisp_histogram32 {
+ unsigned int num_elements;
+ compat_uptr_t data;
+};
+
+struct atomisp_dvs2_stat_types32 {
+ compat_uptr_t odd_real; /**< real part of the odd statistics*/
+ compat_uptr_t odd_imag; /**< imaginary part of the odd statistics*/
+ compat_uptr_t even_real;/**< real part of the even statistics*/
+ compat_uptr_t even_imag;/**< imaginary part of the even statistics*/
+};
+
+struct atomisp_dvs2_coef_types32 {
+ compat_uptr_t odd_real; /**< real part of the odd coefficients*/
+ compat_uptr_t odd_imag; /**< imaginary part of the odd coefficients*/
+ compat_uptr_t even_real;/**< real part of the even coefficients*/
+ compat_uptr_t even_imag;/**< imaginary part of the even coefficients*/
+};
+
+struct atomisp_dvs2_statistics32 {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_stat_types32 hor_prod;
+ struct atomisp_dvs2_stat_types32 ver_prod;
+};
+
+struct atomisp_dis_statistics32 {
+ struct atomisp_dvs2_statistics32 dvs2_stat;
+ uint32_t exp_id;
+};
+
+struct atomisp_dis_coefficients32 {
+ struct atomisp_dvs_grid_info grid_info;
+ struct atomisp_dvs2_coef_types32 hor_coefs;
+ struct atomisp_dvs2_coef_types32 ver_coefs;
+};
+
+struct atomisp_3a_statistics32 {
+ struct atomisp_grid_info grid_info;
+ compat_uptr_t data;
+ compat_uptr_t rgby_data;
+ uint32_t exp_id;
+ uint32_t isp_config_id;
+};
+
+struct atomisp_metadata_with_type32 {
+ /* to specify which type of metadata to get */
+ enum atomisp_metadata_type type;
+ compat_uptr_t data;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride; /* in bytes */
+ uint32_t exp_id; /* exposure ID */
+ compat_uptr_t effective_width;
+};
+
+struct atomisp_metadata32 {
+ compat_uptr_t data;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t exp_id;
+ compat_uptr_t effective_width;
+};
+
+struct atomisp_morph_table32 {
+ unsigned int enabled;
+ unsigned int height;
+ unsigned int width; /* number of valid elements per line */
+ compat_uptr_t coordinates_x[ATOMISP_MORPH_TABLE_NUM_PLANES];
+ compat_uptr_t coordinates_y[ATOMISP_MORPH_TABLE_NUM_PLANES];
+};
+
+struct v4l2_framebuffer32 {
+ __u32 capability;
+ __u32 flags;
+ compat_uptr_t base;
+ struct v4l2_pix_format fmt;
+};
+
+struct atomisp_overlay32 {
+ /* the frame containing the overlay data The overlay frame width should
+ * be the multiples of 2*ISP_VEC_NELEMS. The overlay frame height
+ * should be the multiples of 2.
+ */
+ compat_uptr_t frame;
+ /* Y value of overlay background */
+ unsigned char bg_y;
+ /* U value of overlay background */
+ char bg_u;
+ /* V value of overlay background */
+ char bg_v;
+ /* the blending percent of input data for Y subpixels */
+ unsigned char blend_input_perc_y;
+ /* the blending percent of input data for U subpixels */
+ unsigned char blend_input_perc_u;
+ /* the blending percent of input data for V subpixels */
+ unsigned char blend_input_perc_v;
+ /* the blending percent of overlay data for Y subpixels */
+ unsigned char blend_overlay_perc_y;
+ /* the blending percent of overlay data for U subpixels */
+ unsigned char blend_overlay_perc_u;
+ /* the blending percent of overlay data for V subpixels */
+ unsigned char blend_overlay_perc_v;
+ /* the overlay start x pixel position on output frame It should be the
+ multiples of 2*ISP_VEC_NELEMS. */
+ unsigned int overlay_start_x;
+ /* the overlay start y pixel position on output frame It should be the
+ multiples of 2. */
+ unsigned int overlay_start_y;
+};
+
+struct atomisp_calibration_group32 {
+ unsigned int size;
+ unsigned int type;
+ compat_uptr_t calb_grp_values;
+};
+
+struct atomisp_acc_fw_load32 {
+ unsigned int size;
+ unsigned int fw_handle;
+ compat_uptr_t data;
+};
+
+struct atomisp_acc_fw_arg32 {
+ unsigned int fw_handle;
+ unsigned int index;
+ compat_uptr_t value;
+ compat_size_t size;
+};
+
+struct v4l2_private_int_data32 {
+ __u32 size;
+ compat_uptr_t data;
+ __u32 reserved[2];
+};
+
+struct atomisp_shading_table32 {
+ __u32 enable;
+ __u32 sensor_width;
+ __u32 sensor_height;
+ __u32 width;
+ __u32 height;
+ __u32 fraction_bits;
+
+ compat_uptr_t data[ATOMISP_NUM_SC_COLORS];
+};
+
+struct atomisp_acc_map32 {
+ __u32 flags; /* Flags, see list below */
+ __u32 length; /* Length of data in bytes */
+ compat_uptr_t user_ptr; /* Pointer into user space */
+ compat_ulong_t css_ptr; /* Pointer into CSS address space */
+ __u32 reserved[4]; /* Set to zero */
+};
+
+struct atomisp_acc_s_mapped_arg32 {
+ unsigned int fw_handle;
+ __u32 memory; /* one of enum atomisp_acc_memory */
+ compat_size_t length;
+ compat_ulong_t css_ptr;
+};
+
+struct atomisp_parameters32 {
+ compat_uptr_t wb_config; /* White Balance config */
+ compat_uptr_t cc_config; /* Color Correction config */
+ compat_uptr_t tnr_config; /* Temporal Noise Reduction */
+ compat_uptr_t ecd_config; /* Eigen Color Demosaicing */
+ compat_uptr_t ynr_config; /* Y(Luma) Noise Reduction */
+ compat_uptr_t fc_config; /* Fringe Control */
+ compat_uptr_t formats_config; /* Formats Control */
+ compat_uptr_t cnr_config; /* Chroma Noise Reduction */
+ compat_uptr_t macc_config; /* MACC */
+ compat_uptr_t ctc_config; /* Chroma Tone Control */
+ compat_uptr_t aa_config; /* Anti-Aliasing */
+ compat_uptr_t baa_config; /* Anti-Aliasing */
+ compat_uptr_t ce_config;
+ compat_uptr_t dvs_6axis_config;
+ compat_uptr_t ob_config; /* Objective Black config */
+ compat_uptr_t dp_config; /* Dead Pixel config */
+ compat_uptr_t nr_config; /* Noise Reduction config */
+ compat_uptr_t ee_config; /* Edge Enhancement config */
+ compat_uptr_t de_config; /* Demosaic config */
+ compat_uptr_t gc_config; /* Gamma Correction config */
+ compat_uptr_t anr_config; /* Advanced Noise Reduction */
+ compat_uptr_t a3a_config; /* 3A Statistics config */
+ compat_uptr_t xnr_config; /* eXtra Noise Reduction */
+ compat_uptr_t dz_config; /* Digital Zoom */
+ compat_uptr_t yuv2rgb_cc_config; /* Color
+ Correction config */
+ compat_uptr_t rgb2yuv_cc_config; /* Color
+ Correction config */
+ compat_uptr_t macc_table;
+ compat_uptr_t gamma_table;
+ compat_uptr_t ctc_table;
+ compat_uptr_t xnr_table;
+ compat_uptr_t r_gamma_table;
+ compat_uptr_t g_gamma_table;
+ compat_uptr_t b_gamma_table;
+ compat_uptr_t motion_vector; /* For 2-axis DVS */
+ compat_uptr_t shading_table;
+ compat_uptr_t morph_table;
+ compat_uptr_t dvs_coefs; /* DVS 1.0 coefficients */
+ compat_uptr_t dvs2_coefs; /* DVS 2.0 coefficients */
+ compat_uptr_t capture_config;
+ compat_uptr_t anr_thres;
+
+ compat_uptr_t lin_2500_config; /* Skylake: Linearization config */
+ compat_uptr_t obgrid_2500_config; /* Skylake: OBGRID config */
+ compat_uptr_t bnr_2500_config; /* Skylake: bayer denoise config */
+ compat_uptr_t shd_2500_config; /* Skylake: shading config */
+ compat_uptr_t dm_2500_config; /* Skylake: demosaic config */
+ compat_uptr_t rgbpp_2500_config; /* Skylake: RGBPP config */
+ compat_uptr_t dvs_stat_2500_config; /* Skylake: DVS STAT config */
+ compat_uptr_t lace_stat_2500_config; /* Skylake: LACE STAT config */
+ compat_uptr_t yuvp1_2500_config; /* Skylake: yuvp1 config */
+ compat_uptr_t yuvp2_2500_config; /* Skylake: yuvp2 config */
+ compat_uptr_t tnr_2500_config; /* Skylake: TNR config */
+ compat_uptr_t dpc_2500_config; /* Skylake: DPC config */
+ compat_uptr_t awb_2500_config; /* Skylake: auto white balance config */
+ compat_uptr_t awb_fr_2500_config; /* Skylake: auto white balance filter response config */
+ compat_uptr_t anr_2500_config; /* Skylake: ANR config */
+ compat_uptr_t af_2500_config; /* Skylake: auto focus config */
+ compat_uptr_t ae_2500_config; /* Skylake: auto exposure config */
+ compat_uptr_t bds_2500_config; /* Skylake: bayer downscaler config */
+ compat_uptr_t dvs_2500_config; /* Skylake: digital video stabilization config */
+ compat_uptr_t res_mgr_2500_config;
+
+ /*
+ * Output frame pointer the config is to be applied to (optional),
+ * set to NULL to make this config is applied as global.
+ */
+ compat_uptr_t output_frame;
+ /*
+ * Unique ID to track which config was actually applied to a particular
+ * frame, driver will send this id back with output frame together.
+ */
+ uint32_t isp_config_id;
+ uint32_t per_frame_setting;
+};
+
+struct atomisp_acc_fw_load_to_pipe32 {
+ __u32 flags; /* Flags, see below for valid values */
+ unsigned int fw_handle; /* Handle, filled by kernel. */
+ __u32 size; /* Firmware binary size */
+ compat_uptr_t data; /* Pointer to firmware */
+ __u32 type; /* Binary type */
+ __u32 reserved[3]; /* Set to zero */
+};
+
+struct atomisp_dvs_6axis_config32 {
+ uint32_t exp_id;
+ uint32_t width_y;
+ uint32_t height_y;
+ uint32_t width_uv;
+ uint32_t height_uv;
+ compat_uptr_t xcoords_y;
+ compat_uptr_t ycoords_y;
+ compat_uptr_t xcoords_uv;
+ compat_uptr_t ycoords_uv;
+};
+
+struct atomisp_sensor_ae_bracketing_lut32 {
+ compat_uptr_t lut;
+ unsigned int lut_size;
+};
+
+#define ATOMISP_IOC_G_HISTOGRAM32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
+#define ATOMISP_IOC_S_HISTOGRAM32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 3, struct atomisp_histogram32)
+
+#define ATOMISP_IOC_G_DIS_STAT32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_statistics32)
+#define ATOMISP_IOC_S_DIS_COEFS32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dis_coefficients32)
+
+#define ATOMISP_IOC_S_DIS_VECTOR32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 6, struct atomisp_dvs_6axis_config32)
+
+#define ATOMISP_IOC_G_3A_STAT32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 7, struct atomisp_3a_statistics32)
+
+#define ATOMISP_IOC_G_ISP_GDC_TAB32 \
+ _IOR('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
+#define ATOMISP_IOC_S_ISP_GDC_TAB32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 10, struct atomisp_morph_table32)
+
+#define ATOMISP_IOC_S_ISP_FPN_TABLE32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 17, struct v4l2_framebuffer32)
+
+#define ATOMISP_IOC_G_ISP_OVERLAY32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
+#define ATOMISP_IOC_S_ISP_OVERLAY32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 18, struct atomisp_overlay32)
+
+#define ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 22, struct atomisp_calibration_group32)
+
+#define ATOMISP_IOC_ACC_LOAD32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_load32)
+
+#define ATOMISP_IOC_ACC_S_ARG32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 24, struct atomisp_acc_fw_arg32)
+
+#define ATOMISP_IOC_ACC_DESTAB32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 25, struct atomisp_acc_fw_arg32)
+
+#define ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 26, struct v4l2_private_int_data32)
+
+#define ATOMISP_IOC_S_ISP_SHD_TAB32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 27, struct atomisp_shading_table32)
+
+#define ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 29, struct v4l2_private_int_data32)
+
+#define ATOMISP_IOC_ACC_MAP32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
+
+#define ATOMISP_IOC_ACC_UNMAP32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_map32)
+
+#define ATOMISP_IOC_ACC_S_MAPPED_ARG32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 30, struct atomisp_acc_s_mapped_arg32)
+
+#define ATOMISP_IOC_ACC_LOAD_TO_PIPE32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 31, struct atomisp_acc_fw_load_to_pipe32)
+
+#define ATOMISP_IOC_S_PARAMETERS32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 32, struct atomisp_parameters32)
+
+#define ATOMISP_IOC_G_METADATA32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata32)
+
+#define ATOMISP_IOC_G_METADATA_BY_TYPE32 \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 34, struct atomisp_metadata_with_type32)
+
+#define ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT32 \
+ _IOW('v', BASE_VIDIOC_PRIVATE + 43, struct atomisp_sensor_ae_bracketing_lut32)
+
+#endif /* __ATOMISP_COMPAT_IOCTL32_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
new file mode 100644
index 000000000000..2c5036685447
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
@@ -0,0 +1,446 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_cmd.h"
+#include "atomisp_internal.h"
+#include "atomisp-regs.h"
+
+static struct v4l2_mbus_framefmt *__csi2_get_format(struct
+ atomisp_mipi_csi2_device
+ *csi2,
+ struct
+ v4l2_subdev_pad_config *cfg,
+ enum
+ v4l2_subdev_format_whence
+ which, unsigned int pad)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi2->subdev, cfg, pad);
+ else
+ return &csi2->formats[pad];
+}
+
+/*
+ * csi2_enum_mbus_code - Handle pixel format enumeration
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+*/
+static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
+ unsigned int i = 0;
+
+ while (ic->code) {
+ if (i == code->index) {
+ code->code = ic->code;
+ return 0;
+ }
+ i++, ic++;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * csi2_get_format - Handle get format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on sucess
+*/
+static int csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *format;
+
+ format = __csi2_get_format(csi2, cfg, fmt->which, fmt->pad);
+
+ fmt->format = *format;
+
+ return 0;
+}
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *actual_ffmt =
+#ifndef ISP2401
+ __csi2_get_format(csi2, cfg, which, pad);
+#else
+ __csi2_get_format(csi2, cfg, which, pad);
+#endif
+
+ if (pad == CSI2_PAD_SINK) {
+ const struct atomisp_in_fmt_conv *ic;
+ struct v4l2_mbus_framefmt tmp_ffmt;
+
+ ic = atomisp_find_in_fmt_conv(ffmt->code);
+ if (ic)
+ actual_ffmt->code = ic->code;
+ else
+ actual_ffmt->code = atomisp_in_fmt_conv[0].code;
+
+ actual_ffmt->width = clamp_t(
+ u32, ffmt->width, ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH);
+ actual_ffmt->height = clamp_t(
+ u32, ffmt->height, ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT);
+
+ tmp_ffmt = *ffmt = *actual_ffmt;
+
+ return atomisp_csi2_set_ffmt(sd, cfg, which, CSI2_PAD_SOURCE,
+ &tmp_ffmt);
+ }
+
+ /* FIXME: DPCM decompression */
+ *actual_ffmt = *ffmt =
+#ifndef ISP2401
+ *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK);
+#else
+ *__csi2_get_format(csi2, cfg, which, CSI2_PAD_SINK);
+#endif
+
+ return 0;
+}
+
+/*
+ * csi2_set_format - Handle set format by pads subdev method
+ * @sd : pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @pad: pad num
+ * @fmt: pointer to v4l2 format structure
+ * return -EINVAL or zero on success
+*/
+static int csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ return atomisp_csi2_set_ffmt(sd, cfg, fmt->which, fmt->pad,
+ &fmt->format);
+}
+
+/*
+ * csi2_set_stream - Enable/Disable streaming on the CSI2 module
+ * @sd: ISP CSI2 V4L2 subdevice
+ * @enable: Enable/disable stream (1/0)
+ *
+ * Return 0 on success or a negative error code otherwise.
+*/
+static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+/* subdev core operations */
+static const struct v4l2_subdev_core_ops csi2_core_ops = {
+};
+
+/* subdev video operations */
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_set_stream,
+};
+
+/* subdev pad operations */
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .enum_mbus_code = csi2_enum_mbus_code,
+ .get_fmt = csi2_get_format,
+ .set_fmt = csi2_set_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* subdev operations */
+static const struct v4l2_subdev_ops csi2_ops = {
+ .core = &csi2_core_ops,
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+#ifndef ISP2401
+
+#endif
+/*
+ * csi2_link_setup - Setup CSI2 connections.
+ * @entity : Pointer to media entity structure
+ * @local : Pointer to local pad array
+ * @remote : Pointer to remote pad array
+ * @flags : Link flags
+ * return -EINVAL or zero on success
+*/
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
+ u32 result = local->index | is_media_entity_v4l2_subdev(remote->entity);
+
+ switch (result) {
+ case CSI2_PAD_SOURCE | MEDIA_ENT_F_OLD_BASE:
+ /* not supported yet */
+ return -EINVAL;
+
+ case CSI2_PAD_SOURCE | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->output & ~CSI2_OUTPUT_ISP_SUBDEV)
+ return -EBUSY;
+ csi2->output |= CSI2_OUTPUT_ISP_SUBDEV;
+ } else {
+ csi2->output &= ~CSI2_OUTPUT_ISP_SUBDEV;
+ }
+ break;
+
+ default:
+ /* Link from camera to CSI2 is fixed... */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations csi2_media_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/*
+* ispcsi2_init_entities - Initialize subdev and media entity.
+* @csi2: Pointer to ispcsi2 structure.
+* return -ENOMEM or zero on success
+*/
+static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
+ int port)
+{
+ struct v4l2_subdev *sd = &csi2->subdev;
+ struct media_pad *pads = csi2->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ v4l2_subdev_init(sd, &csi2_ops);
+ snprintf(sd->name, sizeof(sd->name), "ATOM ISP CSI2-port%d", port);
+
+ v4l2_set_subdevdata(sd, csi2);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+
+ me->ops = &csi2_media_ops;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ csi2->formats[CSI2_PAD_SINK].code =
+ csi2->formats[CSI2_PAD_SOURCE].code =
+ atomisp_in_fmt_conv[0].code;
+
+ return 0;
+}
+
+void
+atomisp_mipi_csi2_unregister_entities(struct atomisp_mipi_csi2_device *csi2)
+{
+ media_entity_cleanup(&csi2->subdev.entity);
+ v4l2_device_unregister_subdev(&csi2->subdev);
+}
+
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_mipi_csi2_unregister_entities(csi2);
+ return ret;
+}
+
+static const int LIMIT_SHIFT = 6; /* Limit numeric range into 31 bits */
+
+static int
+atomisp_csi2_configure_calc(const short int coeffs[2], int mipi_freq, int def)
+{
+ /* Delay counter accuracy, 1/0.0625 for ANN/CHT, 1/0.125 for BXT */
+ static const int accinv = 16; /* 1 / COUNT_ACC */
+ int r;
+
+ if (mipi_freq >> LIMIT_SHIFT <= 0)
+ return def;
+
+ r = accinv * coeffs[1] * (500000000 >> LIMIT_SHIFT);
+ r /= mipi_freq >> LIMIT_SHIFT;
+ r += accinv * coeffs[0];
+
+ return r;
+}
+
+static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
+{
+ /*
+ * The ISP2401 new input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * For ANN and CHV, COUNT_ACC = 0.0625 ns
+ * For BXT, COUNT_ACC = 0.125 ns
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values in the calculations below.
+ */
+ static const short int coeff_clk_termen[] = { 0, 0 };
+ static const short int coeff_clk_settle[] = { 95, -8 };
+ static const short int coeff_dat_termen[] = { 0, 0 };
+ static const short int coeff_dat_settle[] = { 85, -2 };
+ static const int TERMEN_DEFAULT = 0 * 0;
+ static const int SETTLE_DEFAULT = 0x480;
+ static const hrt_address csi2_port_base[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = CSI2_PORT_A_BASE,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = CSI2_PORT_B_BASE,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = CSI2_PORT_C_BASE,
+ };
+ /* Number of lanes on each port, excluding clock lane */
+ static const unsigned char csi2_port_lanes[] = {
+ [ATOMISP_CAMERA_PORT_PRIMARY] = 4,
+ [ATOMISP_CAMERA_PORT_SECONDARY] = 2,
+ [ATOMISP_CAMERA_PORT_TERTIARY] = 2,
+ };
+ static const hrt_address csi2_lane_base[] = {
+ CSI2_LANE_CL_BASE,
+ CSI2_LANE_D0_BASE,
+ CSI2_LANE_D1_BASE,
+ CSI2_LANE_D2_BASE,
+ CSI2_LANE_D3_BASE,
+ };
+
+ int clk_termen;
+ int clk_settle;
+ int dat_termen;
+ int dat_settle;
+
+ struct v4l2_control ctrl;
+ struct atomisp_device *isp = asd->isp;
+ struct camera_mipi_info *mipi_info;
+ int mipi_freq = 0;
+ enum atomisp_camera_port port;
+
+ int n;
+
+ mipi_info = atomisp_to_sensor_mipi_info(
+ isp->inputs[asd->input_curr].camera);
+ port = mipi_info->port;
+
+ ctrl.id = V4L2_CID_LINK_FREQ;
+ if (v4l2_g_ctrl
+ (isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
+ mipi_freq = ctrl.value;
+
+ clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen,
+ mipi_freq, TERMEN_DEFAULT);
+ clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle,
+ mipi_freq, SETTLE_DEFAULT);
+ dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen,
+ mipi_freq, TERMEN_DEFAULT);
+ dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle,
+ mipi_freq, SETTLE_DEFAULT);
+ for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
+ hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
+ atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
+ n == 0 ? clk_termen : dat_termen);
+ atomisp_store_uint32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
+ n == 0 ? clk_settle : dat_settle);
+ }
+}
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd)
+{
+ if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401))
+ atomisp_csi2_configure_isp2401(asd);
+}
+
+/*
+ * atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
+*/
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
+{
+}
+
+#ifndef ISP2401
+
+#endif
+int atomisp_mipi_csi2_init(struct atomisp_device *isp)
+{
+ struct atomisp_mipi_csi2_device *csi2_port;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ csi2_port = &isp->csi2_port[i];
+ csi2_port->isp = isp;
+ ret = mipi_csi2_init_entities(csi2_port, i);
+ if (ret < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
new file mode 100644
index 000000000000..faa9cf7e05c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_CSI2_H__
+#define __ATOMISP_CSI2_H__
+
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+
+#define CSI2_PAD_SINK 0
+#define CSI2_PAD_SOURCE 1
+#define CSI2_PADS_NUM 2
+
+#define CSI2_OUTPUT_ISP_SUBDEV (1 << 0)
+#define CSI2_OUTPUT_MEMORY (1 << 1)
+
+struct atomisp_device;
+struct v4l2_device;
+struct atomisp_sub_device;
+
+struct atomisp_mipi_csi2_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[CSI2_PADS_NUM];
+ struct v4l2_mbus_framefmt formats[CSI2_PADS_NUM];
+
+ struct v4l2_ctrl_handler ctrls;
+ struct atomisp_device *isp;
+
+ u32 output; /* output direction */
+};
+
+int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+ unsigned int which, uint16_t pad,
+ struct v4l2_mbus_framefmt *ffmt);
+int atomisp_mipi_csi2_init(struct atomisp_device *isp);
+void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp);
+void atomisp_mipi_csi2_unregister_entities(
+ struct atomisp_mipi_csi2_device *csi2);
+int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_device *vdev);
+
+void atomisp_csi2_configure(struct atomisp_sub_device *asd);
+
+#endif /* __ATOMISP_CSI2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
new file mode 100644
index 000000000000..204d941cdb6c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
@@ -0,0 +1,412 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_DFS_TABLES_H__
+#define __ATOMISP_DFS_TABLES_H__
+
+#include <linux/kernel.h>
+
+struct atomisp_freq_scaling_rule {
+ unsigned int width;
+ unsigned int height;
+ unsigned short fps;
+ unsigned int isp_freq;
+ unsigned int run_mode;
+};
+
+
+struct atomisp_dfs_config {
+ unsigned int lowest_freq;
+ unsigned int max_freq_at_vmin;
+ unsigned int highest_freq;
+ const struct atomisp_freq_scaling_rule *dfs_table;
+ unsigned int dfs_table_size;
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_457MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+/* Merrifield and Moorefield DFS rules */
+static const struct atomisp_dfs_config dfs_config_merr = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_457MHZ,
+ .dfs_table = dfs_rules_merr,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_merr_1179 = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_1179,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_1179),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 30,
+ .isp_freq = ISP_FREQ_266MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 30,
+#ifndef ISP2401
+ .isp_freq = ISP_FREQ_266MHZ,
+#else
+ .isp_freq = ISP_FREQ_400MHZ,
+#endif
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = 1080,
+ .height = 1920,
+ .fps = 45,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = 60,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_200MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_merr_117a = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_200MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_merr_117a,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_merr_117a),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_400MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_byt = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_400MHZ,
+ .highest_freq = ISP_FREQ_400MHZ,
+ .dfs_table = dfs_rules_byt,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_byt),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_byt_cr[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_byt_cr = {
+ .lowest_freq = ISP_FREQ_200MHZ,
+ .max_freq_at_vmin = ISP_FREQ_320MHZ,
+ .highest_freq = ISP_FREQ_320MHZ,
+ .dfs_table = dfs_rules_byt_cr,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_byt_cr),
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_VIDEO,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_320MHZ,
+ .run_mode = ATOMISP_RUN_MODE_PREVIEW,
+ },
+ {
+ .width = ISP_FREQ_RULE_ANY,
+ .height = ISP_FREQ_RULE_ANY,
+ .fps = ISP_FREQ_RULE_ANY,
+ .isp_freq = ISP_FREQ_356MHZ,
+ .run_mode = ATOMISP_RUN_MODE_SDV,
+ },
+};
+
+static const struct atomisp_dfs_config dfs_config_cht = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht),
+};
+
+static const struct atomisp_dfs_config dfs_config_cht_soc = {
+ .lowest_freq = ISP_FREQ_100MHZ,
+ .max_freq_at_vmin = ISP_FREQ_356MHZ,
+ .highest_freq = ISP_FREQ_356MHZ,
+ .dfs_table = dfs_rules_cht_soc,
+ .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
+};
+
+#endif /* __ATOMISP_DFS_TABLES_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
new file mode 100644
index 000000000000..1ae2358de8d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
@@ -0,0 +1,209 @@
+/*
+ * Support for atomisp driver sysfs interface
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "hmm/hmm.h"
+
+/*
+ * _iunit_debug:
+ * dbglvl: iunit css driver trace level
+ * dbgopt: iunit debug option:
+ * bit 0: binary list
+ * bit 1: running binary
+ * bit 2: memory statistic
+*/
+struct _iunit_debug {
+ struct pci_driver *drv;
+ struct atomisp_device *isp;
+ unsigned int dbglvl;
+ unsigned int dbgfun;
+ unsigned int dbgopt;
+};
+
+#define OPTION_BIN_LIST (1<<0)
+#define OPTION_BIN_RUN (1<<1)
+#define OPTION_MEM_STAT (1<<2)
+#define OPTION_VALID (OPTION_BIN_LIST \
+ | OPTION_BIN_RUN \
+ | OPTION_MEM_STAT)
+
+static struct _iunit_debug iunit_debug = {
+ .dbglvl = 0,
+ .dbgopt = OPTION_BIN_LIST,
+};
+
+static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
+ unsigned int opt)
+{
+ int ret = 0;
+
+ if (opt & OPTION_VALID) {
+ if (opt & OPTION_BIN_LIST) {
+ ret = atomisp_css_dump_blob_infor();
+ if (ret) {
+ dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n",
+ __func__, ret);
+ goto opt_err;
+ }
+ }
+
+ if (opt & OPTION_BIN_RUN) {
+ if (atomisp_streaming_count(isp)) {
+ atomisp_css_dump_sp_raw_copy_linecount(true);
+ atomisp_css_debug_dump_isp_binary();
+ } else {
+ ret = -EPERM;
+ dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n",
+ __func__, ret);
+ goto opt_err;
+ }
+ }
+
+ if (opt & OPTION_MEM_STAT)
+ hmm_show_mem_stat(__func__, __LINE__);
+ } else {
+ ret = -EINVAL;
+ dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__,
+ ret);
+ }
+
+opt_err:
+ return ret;
+}
+
+static ssize_t iunit_dbglvl_show(struct device_driver *drv, char *buf)
+{
+ iunit_debug.dbglvl = atomisp_css_debug_get_dtrace_level();
+ return sprintf(buf, "dtrace level:%u\n", iunit_debug.dbglvl);
+}
+
+static ssize_t iunit_dbglvl_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ if (kstrtouint(buf, 10, &iunit_debug.dbglvl)
+ || iunit_debug.dbglvl < 1
+ || iunit_debug.dbglvl > 9) {
+ return -ERANGE;
+ }
+ atomisp_css_debug_set_dtrace_level(iunit_debug.dbglvl);
+
+ return size;
+}
+
+static ssize_t iunit_dbgfun_show(struct device_driver *drv, char *buf)
+{
+ iunit_debug.dbgfun = atomisp_get_css_dbgfunc();
+ return sprintf(buf, "dbgfun opt:%u\n", iunit_debug.dbgfun);
+}
+
+static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ unsigned int opt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &opt);
+ if (ret)
+ return ret;
+
+ ret = atomisp_set_css_dbgfunc(iunit_debug.isp, opt);
+ if (ret)
+ return ret;
+
+ iunit_debug.dbgfun = opt;
+
+ return size;
+}
+
+static ssize_t iunit_dbgopt_show(struct device_driver *drv, char *buf)
+{
+ return sprintf(buf, "option:0x%x\n", iunit_debug.dbgopt);
+}
+
+static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
+ size_t size)
+{
+ unsigned int opt;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &opt);
+ if (ret)
+ return ret;
+
+ iunit_debug.dbgopt = opt;
+ ret = iunit_dump_dbgopt(iunit_debug.isp, iunit_debug.dbgopt);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static struct driver_attribute iunit_drvfs_attrs[] = {
+ __ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
+ __ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
+ __ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
+};
+
+static int iunit_drvfs_create_files(struct pci_driver *drv)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
+ ret |= driver_create_file(&(drv->driver),
+ &iunit_drvfs_attrs[i]);
+
+ return ret;
+}
+
+static void iunit_drvfs_remove_files(struct pci_driver *drv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
+ driver_remove_file(&(drv->driver), &iunit_drvfs_attrs[i]);
+}
+
+int atomisp_drvfs_init(struct pci_driver *drv, struct atomisp_device *isp)
+{
+ int ret;
+
+ iunit_debug.isp = isp;
+ iunit_debug.drv = drv;
+
+ ret = iunit_drvfs_create_files(iunit_debug.drv);
+ if (ret) {
+ dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret);
+ iunit_drvfs_remove_files(drv);
+ }
+
+ return ret;
+}
+
+void atomisp_drvfs_exit(void)
+{
+ iunit_drvfs_remove_files(iunit_debug.drv);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
new file mode 100644
index 000000000000..5cb717b0c1c2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
@@ -0,0 +1,29 @@
+/*
+ * Support for atomisp driver sysfs interface.
+ *
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_DRVFS_H__
+#define __ATOMISP_DRVFS_H__
+
+extern int atomisp_drvfs_init(struct pci_driver *drv, struct atomisp_device
+ *isp);
+extern void atomisp_drvfs_exit(void);
+
+#endif /* __ATOMISP_DRVFS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
new file mode 100644
index 000000000000..c766119bf798
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
@@ -0,0 +1,245 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+
+#include <media/videobuf-vmalloc.h>
+#include <linux/delay.h>
+
+#include "ia_css.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_file.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+
+static void file_work(struct work_struct *work)
+{
+ struct atomisp_file_device *file_dev =
+ container_of(work, struct atomisp_file_device, work);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ struct atomisp_video_pipe *out_pipe = &asd->video_in;
+ unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]);
+ struct v4l2_mbus_framefmt isp_sink_fmt;
+
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return;
+
+ dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__);
+ isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ while (!atomisp_css_isp_has_started())
+ usleep_range(1000, 1500);
+
+ atomisp_css_send_input_frame(asd, buf, isp_sink_fmt.width,
+ isp_sink_fmt.height);
+ dev_dbg(isp->dev, "<%s: streaming done\n", __func__);
+}
+
+static int file_input_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+
+ dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable);
+ if (enable) {
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED)
+ return 0;
+
+ queue_work(file_dev->work_queue, &file_dev->work);
+ return 0;
+ }
+ cancel_work_sync(&file_dev->work);
+ return 0;
+}
+
+static int file_input_g_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_s_parm(struct v4l2_subdev *sd,
+ struct v4l2_streamparm *param)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = file_dev->isp;
+ /* only support file injection on subdev0 */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ struct v4l2_mbus_framefmt *isp_sink_fmt;
+ if (format->pad)
+ return -EINVAL;
+ isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ fmt->width = isp_sink_fmt->width;
+ fmt->height = isp_sink_fmt->height;
+ fmt->code = isp_sink_fmt->code;
+
+ return 0;
+}
+
+static int file_input_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ if (format->pad)
+ return -EINVAL;
+ file_input_get_fmt(sd, cfg, format);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = *fmt;
+ return 0;
+}
+
+static int file_input_log_status(struct v4l2_subdev *sd)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_s_power(struct v4l2_subdev *sd, int on)
+{
+ /* to fake */
+ return 0;
+}
+
+static int file_input_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int file_input_enum_frame_ival(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum
+ *fie)
+{
+ /*to fake*/
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops file_input_video_ops = {
+ .s_stream = file_input_s_stream,
+ .g_parm = file_input_g_parm,
+ .s_parm = file_input_s_parm,
+};
+
+static const struct v4l2_subdev_core_ops file_input_core_ops = {
+ .log_status = file_input_log_status,
+ .s_power = file_input_s_power,
+};
+
+static const struct v4l2_subdev_pad_ops file_input_pad_ops = {
+ .enum_mbus_code = file_input_enum_mbus_code,
+ .enum_frame_size = file_input_enum_frame_size,
+ .enum_frame_interval = file_input_enum_frame_ival,
+ .get_fmt = file_input_get_fmt,
+ .set_fmt = file_input_set_fmt,
+};
+
+static const struct v4l2_subdev_ops file_input_ops = {
+ .core = &file_input_core_ops,
+ .video = &file_input_video_ops,
+ .pad = &file_input_pad_ops,
+};
+
+void
+atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev)
+{
+ media_entity_cleanup(&file_dev->sd.entity);
+ v4l2_device_unregister_subdev(&file_dev->sd);
+}
+
+int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
+ struct v4l2_device *vdev)
+{
+ /* Register the subdev and video nodes. */
+ return v4l2_device_register_subdev(vdev, &file_dev->sd);
+}
+
+void atomisp_file_input_cleanup(struct atomisp_device *isp)
+{
+ struct atomisp_file_device *file_dev = &isp->file_dev;
+
+ if (file_dev->work_queue) {
+ destroy_workqueue(file_dev->work_queue);
+ file_dev->work_queue = NULL;
+ }
+}
+
+int atomisp_file_input_init(struct atomisp_device *isp)
+{
+ struct atomisp_file_device *file_dev = &isp->file_dev;
+ struct v4l2_subdev *sd = &file_dev->sd;
+ struct media_pad *pads = file_dev->pads;
+ struct media_entity *me = &sd->entity;
+
+ file_dev->isp = isp;
+ file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
+ if (file_dev->work_queue == NULL) {
+ dev_err(isp->dev, "Failed to initialize file inject workq\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&file_dev->work, file_work);
+
+ v4l2_subdev_init(sd, &file_input_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(sd->name, "file_input_subdev");
+ v4l2_set_subdevdata(sd, file_dev);
+
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+
+ return media_entity_pads_init(me, 1, pads);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
new file mode 100644
index 000000000000..1b86abd35c38
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_FILE_H__
+#define __ATOMISP_FILE_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct atomisp_device;
+
+struct atomisp_file_device {
+ struct v4l2_subdev sd;
+ struct atomisp_device *isp;
+ struct media_pad pads[1];
+
+ struct workqueue_struct *work_queue;
+ struct work_struct work;
+};
+
+void atomisp_file_input_cleanup(struct atomisp_device *isp);
+int atomisp_file_input_init(struct atomisp_device *isp);
+void atomisp_file_input_unregister_entities(
+ struct atomisp_file_device *file_dev);
+int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev,
+ struct v4l2_device *vdev);
+#endif /* __ATOMISP_FILE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
new file mode 100644
index 000000000000..7ce8803cf6f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -0,0 +1,1304 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-vmalloc.h>
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_compat.h"
+#include "atomisp_subdev.h"
+#include "atomisp_v4l2.h"
+#include "atomisp-regs.h"
+#include "hmm/hmm.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "type_support.h"
+#include "device_access/device_access.h"
+#include "memory_access/memory_access.h"
+
+#include "atomisp_acc.h"
+
+#define ISP_LEFT_PAD 128 /* equal to 2*NWAY */
+
+/*
+ * input image data, and current frame resolution for test
+ */
+#define ISP_PARAM_MMAP_OFFSET 0xfffff000
+
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ pr_err("magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
+
+/*
+ * Videobuf ops
+ */
+static int atomisp_buf_setup(struct videobuf_queue *vq, unsigned int *count,
+ unsigned int *size)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ *size = pipe->pix.sizeimage;
+
+ return 0;
+}
+
+static int atomisp_buf_prepare(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ vb->size = pipe->pix.sizeimage;
+ vb->width = pipe->pix.width;
+ vb->height = pipe->pix.height;
+ vb->field = field;
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_metadata_buf *metadata_buf;
+ enum atomisp_metadata_type md_type =
+ atomisp_get_metadata_type(asd, css_pipe_id);
+ struct list_head *metadata_list;
+
+ if (asd->metadata_bufs_in_css[stream_id][css_pipe_id] >=
+ ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->metadata[md_type])) {
+ metadata_list = &asd->metadata[md_type];
+ } else if (!list_empty(&asd->metadata_ready[md_type])) {
+ metadata_list = &asd->metadata_ready[md_type];
+ } else {
+ dev_warn(asd->isp->dev, "%s: No metadata buffers available for type %d!\n",
+ __func__, md_type);
+ return -EINVAL;
+ }
+
+ metadata_buf = list_entry(metadata_list->next,
+ struct atomisp_metadata_buf, list);
+ list_del_init(&metadata_buf->list);
+
+ if (atomisp_q_metadata_buffer_to_css(asd, metadata_buf,
+ stream_id, css_pipe_id)) {
+ list_add(&metadata_buf->list, metadata_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&metadata_buf->list,
+ &asd->metadata_in_css[md_type]);
+ }
+ asd->metadata_bufs_in_css[stream_id][css_pipe_id]++;
+
+ return 0;
+}
+
+int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_s3a_buf *s3a_buf;
+ struct list_head *s3a_list;
+ unsigned int exp_id;
+
+ if (asd->s3a_bufs_in_css[css_pipe_id] >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ if (!list_empty(&asd->s3a_stats)) {
+ s3a_list = &asd->s3a_stats;
+ } else if (!list_empty(&asd->s3a_stats_ready)) {
+ s3a_list = &asd->s3a_stats_ready;
+ } else {
+ dev_warn(asd->isp->dev, "%s: No s3a buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ s3a_buf = list_entry(s3a_list->next, struct atomisp_s3a_buf, list);
+ list_del_init(&s3a_buf->list);
+ exp_id = s3a_buf->s3a_data->exp_id;
+
+ hmm_flush_vmap(s3a_buf->s3a_data->data_ptr);
+ if (atomisp_q_s3a_buffer_to_css(asd, s3a_buf,
+ stream_id, css_pipe_id)) {
+ /* got from head, so return back to the head */
+ list_add(&s3a_buf->list, s3a_list);
+ return -EINVAL;
+ } else {
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats_in_css);
+ if (s3a_list == &asd->s3a_stats_ready)
+ dev_warn(asd->isp->dev, "%s: drop one s3a stat which has exp_id %d!\n",
+ __func__, exp_id);
+ }
+
+ asd->s3a_bufs_in_css[css_pipe_id]++;
+ return 0;
+}
+
+int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct atomisp_dis_buf *dis_buf;
+ unsigned long irqflags;
+
+ if (asd->dis_bufs_in_css >= ATOMISP_CSS_Q_DEPTH)
+ return 0; /* we have reached CSS queue depth */
+
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ if (list_empty(&asd->dis_stats)) {
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ dev_warn(asd->isp->dev, "%s: No dis buffers available!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ dis_buf = list_entry(asd->dis_stats.prev,
+ struct atomisp_dis_buf, list);
+ list_del_init(&dis_buf->list);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+
+ hmm_flush_vmap(dis_buf->dis_data->data_ptr);
+ if (atomisp_q_dis_buffer_to_css(asd, dis_buf,
+ stream_id, css_pipe_id)) {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ /* got from tail, so return back to the tail */
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ return -EINVAL;
+ } else {
+ spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
+ list_add_tail(&dis_buf->list, &asd->dis_stats_in_css);
+ spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
+ }
+
+ asd->dis_bufs_in_css++;
+
+ return 0;
+}
+
+int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_params_with_list *param;
+ struct atomisp_css_dvs_grid_info *dvs_grid =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned long irqflags;
+ int err = 0;
+
+ while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
+ struct videobuf_buffer *vb;
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (list_empty(&pipe->activeq)) {
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ return -EINVAL;
+ }
+ vb = list_entry(pipe->activeq.next,
+ struct videobuf_buffer, queue);
+ list_del_init(&vb->queue);
+ vb->state = VIDEOBUF_ACTIVE;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ /*
+ * If there is a per_frame setting to apply on the buffer,
+ * do it before buffer en-queueing.
+ */
+ vm_mem = vb->priv;
+
+ param = pipe->frame_params[vb->i];
+ if (param) {
+ atomisp_makeup_css_parameters(asd,
+ &asd->params.css_param.update_flag,
+ &param->params);
+ atomisp_apply_css_parameters(asd, &param->params);
+
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
+ err = atomisp_calculate_real_zoom_region(asd,
+ &param->params.dz_config, css_pipe_id);
+ if (!err)
+ atomisp_css_set_dz_config(asd,
+ &param->params.dz_config);
+ }
+ atomisp_css_set_isp_config_applied_frame(asd,
+ vm_mem->vaddr);
+ atomisp_css_update_isp_params_on_pipe(asd,
+ asd->stream_env[stream_id].pipes[css_pipe_id]);
+ asd->params.dvs_6axis = (struct atomisp_css_dvs_6axis *)
+ param->params.dvs_6axis;
+
+ /*
+ * WORKAROUND:
+ * Because the camera halv3 can't ensure to set zoom
+ * region to per_frame setting and global setting at
+ * same time and only set zoom region to pre_frame
+ * setting now.so when the pre_frame setting inculde
+ * zoom region,I will set it to global setting.
+ */
+ if (param->params.update_flag.dz_config &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO
+ && !err) {
+ memcpy(&asd->params.css_param.dz_config,
+ &param->params.dz_config,
+ sizeof(struct ia_css_dz_config));
+ asd->params.css_param.update_flag.dz_config =
+ (struct atomisp_dz_config *)
+ &asd->params.css_param.dz_config;
+ asd->params.css_update_params_needed = true;
+ }
+ }
+ /* Enqueue buffer */
+ err = atomisp_q_video_buffer_to_css(asd, vm_mem, stream_id,
+ css_buf_type, css_pipe_id);
+ if (err) {
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ list_add_tail(&vb->queue, &pipe->activeq);
+ vb->state = VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ dev_err(asd->isp->dev, "%s, css q fails: %d\n",
+ __func__, err);
+ return -EINVAL;
+ }
+ pipe->buffers_in_css++;
+
+ /* enqueue 3A/DIS/metadata buffers */
+ if (asd->params.curr_grid_info.s3a_grid.enable &&
+ css_pipe_id == asd->params.s3a_enabled_pipe &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_s3a_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
+ metadata_info.size &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_metadata_buffer(asd, stream_id,
+ css_pipe_id);
+
+ if (dvs_grid && dvs_grid->enable &&
+ css_pipe_id == CSS_PIPE_ID_VIDEO &&
+ css_buf_type == CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ atomisp_q_one_dis_buffer(asd, stream_id,
+ css_pipe_id);
+ }
+
+ return 0;
+}
+
+static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd,
+ enum atomisp_css_pipe_id pipe_id,
+ uint16_t source_pad)
+{
+ if (ATOMISP_USE_YUVPP(asd)) {
+ /* when run ZSL case */
+ if (asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE)
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ /*when run SDV case*/
+ if (asd->continuous_mode->val &&
+ asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE)
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW)
+ return CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME;
+ else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO)
+ return CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ /*other case: default setting*/
+ if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO))
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+ }
+
+ if (pipe_id == CSS_PIPE_ID_COPY ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE ||
+ source_pad == ATOMISP_SUBDEV_PAD_SOURCE_VIDEO ||
+ (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO))
+ return CSS_BUFFER_TYPE_OUTPUT_FRAME;
+ else
+ return CSS_BUFFER_TYPE_VF_OUTPUT_FRAME;
+}
+
+static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd)
+{
+ enum atomisp_css_buffer_type buf_type;
+ enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_COPY;
+ enum atomisp_input_stream_id input_stream_id;
+ struct atomisp_video_pipe *capture_pipe;
+ struct atomisp_video_pipe *preview_pipe;
+ struct atomisp_video_pipe *video_pipe;
+
+ capture_pipe = &asd->video_out_capture;
+ preview_pipe = &asd->video_out_preview;
+ video_pipe = &asd->video_out_video_capture;
+
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_preview_pipe_id,
+ atomisp_subdev_source_pad(&preview_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ atomisp_q_video_buffers_to_css(asd, preview_pipe,
+ input_stream_id,
+ buf_type, css_preview_pipe_id);
+
+ buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&capture_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ atomisp_q_video_buffers_to_css(asd, capture_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+
+ buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id,
+ atomisp_subdev_source_pad(&video_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ atomisp_q_video_buffers_to_css(asd, video_pipe,
+ input_stream_id,
+ buf_type, css_video_pipe_id);
+ return 0;
+}
+
+
+/* queue all available buffers to css */
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
+{
+ enum atomisp_css_buffer_type buf_type;
+ enum atomisp_css_pipe_id css_capture_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_css_pipe_id css_preview_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_css_pipe_id css_video_pipe_id = CSS_PIPE_ID_NUM;
+ enum atomisp_input_stream_id input_stream_id;
+ struct atomisp_video_pipe *capture_pipe = NULL;
+ struct atomisp_video_pipe *vf_pipe = NULL;
+ struct atomisp_video_pipe *preview_pipe = NULL;
+ struct atomisp_video_pipe *video_pipe = NULL;
+ bool raw_mode = atomisp_is_mbuscode_raw(
+ asd->fmt[asd->capture_pad].fmt.code);
+
+ if (asd->isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num == 2 &&
+ !asd->yuvpp_mode)
+ return atomisp_qbuffers_to_css_for_all_pipes(asd);
+
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
+ video_pipe = &asd->video_out_video_capture;
+ css_video_pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ preview_pipe = &asd->video_out_capture;
+ css_preview_pipe_id = CSS_PIPE_ID_CAPTURE;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ if (asd->continuous_mode->val) {
+ capture_pipe = &asd->video_out_capture;
+ vf_pipe = &asd->video_out_vf;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ }
+ video_pipe = &asd->video_out_video_capture;
+ preview_pipe = &asd->video_out_preview;
+ css_video_pipe_id = CSS_PIPE_ID_VIDEO;
+ css_preview_pipe_id = CSS_PIPE_ID_VIDEO;
+ } else if (asd->continuous_mode->val) {
+ capture_pipe = &asd->video_out_capture;
+ vf_pipe = &asd->video_out_vf;
+ preview_pipe = &asd->video_out_preview;
+
+ css_preview_pipe_id = CSS_PIPE_ID_PREVIEW;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ } else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
+ preview_pipe = &asd->video_out_preview;
+ css_preview_pipe_id = CSS_PIPE_ID_PREVIEW;
+ } else {
+ /* ATOMISP_RUN_MODE_STILL_CAPTURE */
+ capture_pipe = &asd->video_out_capture;
+ if (!raw_mode)
+ vf_pipe = &asd->video_out_vf;
+ css_capture_pipe_id = CSS_PIPE_ID_CAPTURE;
+ }
+
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ if (asd->copy_mode) {
+ css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ css_preview_pipe_id = CSS_PIPE_ID_COPY;
+ css_video_pipe_id = CSS_PIPE_ID_COPY;
+ }
+#endif
+
+ if (asd->yuvpp_mode) {
+ capture_pipe = &asd->video_out_capture;
+ video_pipe = &asd->video_out_video_capture;
+ preview_pipe = &asd->video_out_preview;
+ css_capture_pipe_id = CSS_PIPE_ID_COPY;
+ css_video_pipe_id = CSS_PIPE_ID_YUVPP;
+ css_preview_pipe_id = CSS_PIPE_ID_YUVPP;
+ }
+
+ if (capture_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&capture_pipe->vdev));
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_capture_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, capture_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+ }
+
+ if (vf_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_capture_pipe_id,
+ atomisp_subdev_source_pad(&vf_pipe->vdev));
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_POSTVIEW].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_POSTVIEW;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_capture_pipe_id = CSS_PIPE_ID_YUVPP;
+ atomisp_q_video_buffers_to_css(asd, vf_pipe,
+ input_stream_id,
+ buf_type, css_capture_pipe_id);
+ }
+
+ if (preview_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_preview_pipe_id,
+ atomisp_subdev_source_pad(&preview_pipe->vdev));
+ if (ATOMISP_SOC_CAMERA(asd) && css_preview_pipe_id == CSS_PIPE_ID_YUVPP)
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+ /* else for ext isp use case */
+ else if (css_preview_pipe_id == CSS_PIPE_ID_YUVPP)
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ else if (asd->stream_env[ATOMISP_INPUT_STREAM_PREVIEW].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_preview_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, preview_pipe,
+ input_stream_id,
+ buf_type, css_preview_pipe_id);
+ }
+
+ if (video_pipe) {
+ buf_type = atomisp_get_css_buf_type(
+ asd, css_video_pipe_id,
+ atomisp_subdev_source_pad(&video_pipe->vdev));
+ if (asd->stream_env[ATOMISP_INPUT_STREAM_VIDEO].stream)
+ input_stream_id = ATOMISP_INPUT_STREAM_VIDEO;
+ else
+ input_stream_id = ATOMISP_INPUT_STREAM_GENERAL;
+
+ /*
+ * use yuvpp pipe for SOC camera.
+ */
+ if (ATOMISP_USE_YUVPP(asd))
+ css_video_pipe_id = CSS_PIPE_ID_YUVPP;
+
+ atomisp_q_video_buffers_to_css(asd, video_pipe,
+ input_stream_id,
+ buf_type, css_video_pipe_id);
+ }
+
+ return 0;
+}
+
+static void atomisp_buf_queue(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ /*
+ * when a frame buffer meets following conditions, it should be put into
+ * the waiting list:
+ * 1. It is not a main output frame, and it has a per-frame parameter
+ * to go with it.
+ * 2. It is not a main output frame, and the waiting buffer list is not
+ * empty, to keep the FIFO sequence of frame buffer processing, it
+ * is put to waiting list until previous per-frame parameter buffers
+ * get enqueued.
+ */
+ if (!atomisp_is_vf_pipe(pipe) &&
+ (pipe->frame_request_config_id[vb->i] ||
+ !list_empty(&pipe->buffers_waiting_for_param)))
+ list_add_tail(&vb->queue, &pipe->buffers_waiting_for_param);
+ else
+ list_add_tail(&vb->queue, &pipe->activeq);
+
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void atomisp_buf_release(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ atomisp_videobuf_free_buf(vb);
+}
+
+static int atomisp_buf_setup_output(struct videobuf_queue *vq,
+ unsigned int *count, unsigned int *size)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ *size = pipe->pix.sizeimage;
+
+ return 0;
+}
+
+static int atomisp_buf_prepare_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ vb->size = pipe->pix.sizeimage;
+ vb->width = pipe->pix.width;
+ vb->height = pipe->pix.height;
+ vb->field = field;
+ vb->state = VIDEOBUF_PREPARED;
+
+ return 0;
+}
+
+static void atomisp_buf_queue_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ struct atomisp_video_pipe *pipe = vq->priv_data;
+
+ list_add_tail(&vb->queue, &pipe->activeq_out);
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+static void atomisp_buf_release_output(struct videobuf_queue *vq,
+ struct videobuf_buffer *vb)
+{
+ videobuf_vmalloc_free(vb);
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops videobuf_qops = {
+ .buf_setup = atomisp_buf_setup,
+ .buf_prepare = atomisp_buf_prepare,
+ .buf_queue = atomisp_buf_queue,
+ .buf_release = atomisp_buf_release,
+};
+
+static struct videobuf_queue_ops videobuf_qops_output = {
+ .buf_setup = atomisp_buf_setup_output,
+ .buf_prepare = atomisp_buf_prepare_output,
+ .buf_queue = atomisp_buf_queue_output,
+ .buf_release = atomisp_buf_release_output,
+};
+
+static int atomisp_init_pipe(struct atomisp_video_pipe *pipe)
+{
+ /* init locks */
+ spin_lock_init(&pipe->irq_lock);
+
+ videobuf_queue_vmalloc_init(&pipe->capq, &videobuf_qops, NULL,
+ &pipe->irq_lock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_NONE,
+ sizeof(struct atomisp_buffer), pipe,
+ NULL); /* ext_lock: NULL */
+
+ videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL,
+ &pipe->irq_lock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ V4L2_FIELD_NONE,
+ sizeof(struct atomisp_buffer), pipe,
+ NULL); /* ext_lock: NULL */
+
+ INIT_LIST_HEAD(&pipe->activeq);
+ INIT_LIST_HEAD(&pipe->activeq_out);
+ INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
+ INIT_LIST_HEAD(&pipe->per_frame_params);
+ memset(pipe->frame_request_config_id, 0,
+ VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params, 0,
+ VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+
+ return 0;
+}
+
+static void atomisp_dev_init_struct(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ isp->sw_contex.file_input = 0;
+ isp->need_gfx_throttle = true;
+ isp->isp_fatal_error = false;
+ isp->mipi_frame_size = 0;
+
+ for (i = 0; i < isp->input_cnt; i++)
+ isp->inputs[i].asd = NULL;
+ /*
+ * For Merrifield, frequency is scalable.
+ * After boot-up, the default frequency is 200MHz.
+ */
+ isp->sw_contex.running_freq = ISP_FREQ_200MHZ;
+}
+
+static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
+{
+ v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE);
+ memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
+ asd->params.color_effect = V4L2_COLORFX_NONE;
+ asd->params.bad_pixel_en = 1;
+ asd->params.gdc_cac_en = 0;
+ asd->params.video_dis_en = 0;
+ asd->params.sc_en = 0;
+ asd->params.fpn_en = 0;
+ asd->params.xnr_en = 0;
+ asd->params.false_color = 0;
+ asd->params.online_process = 1;
+ asd->params.yuv_ds_en = 0;
+ /* s3a grid not enabled for any pipe */
+ asd->params.s3a_enabled_pipe = CSS_PIPE_ID_NUM;
+
+ asd->params.offline_parm.num_captures = 1;
+ asd->params.offline_parm.skip_frames = 0;
+ asd->params.offline_parm.offset = 0;
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ /* Add for channel */
+ asd->input_curr = 0;
+
+ asd->mipi_frame_size = 0;
+ asd->copy_mode = false;
+ asd->yuvpp_mode = false;
+
+ asd->stream_prepared = false;
+ asd->high_speed_mode = false;
+ asd->sensor_array_res.height = 0;
+ asd->sensor_array_res.width = 0;
+ atomisp_css_init_struct(asd);
+}
+/*
+ * file operation functions
+ */
+unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
+{
+ return asd->video_out_preview.users +
+ asd->video_out_vf.users +
+ asd->video_out_capture.users +
+ asd->video_out_video_capture.users +
+ asd->video_acc.users +
+ asd->video_in.users;
+}
+
+unsigned int atomisp_dev_users(struct atomisp_device *isp)
+{
+ unsigned int i, sum;
+ for (i = 0, sum = 0; i < isp->num_of_streams; i++)
+ sum += atomisp_subdev_users(&isp->asd[i]);
+
+ return sum;
+}
+
+static int atomisp_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = NULL;
+ struct atomisp_acc_pipe *acc_pipe = NULL;
+ struct atomisp_sub_device *asd;
+ bool acc_node = false;
+ int ret;
+
+ dev_dbg(isp->dev, "open device %s\n", vdev->name);
+
+ rt_mutex_lock(&isp->mutex);
+
+ acc_node = !strncmp(vdev->name, "ATOMISP ISP ACC",
+ sizeof(vdev->name));
+ if (acc_node) {
+ acc_pipe = atomisp_to_acc_pipe(vdev);
+ asd = acc_pipe->asd;
+ } else {
+ pipe = atomisp_to_video_pipe(vdev);
+ asd = pipe->asd;
+ }
+ asd->subdev.devnode = vdev;
+ /* Deferred firmware loading case. */
+ if (isp->css_env.isp_css_fw.bytes == 0) {
+ isp->firmware = atomisp_load_firmware(isp);
+ if (!isp->firmware) {
+ dev_err(isp->dev, "Failed to load ISP firmware.\n");
+ ret = -ENOENT;
+ goto error;
+ }
+ ret = atomisp_css_load_firmware(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to init css.\n");
+ goto error;
+ }
+ /* No need to keep FW in memory anymore. */
+ release_firmware(isp->firmware);
+ isp->firmware = NULL;
+ isp->css_env.isp_css_fw.data = NULL;
+ }
+
+ if (acc_node && acc_pipe->users) {
+ dev_dbg(isp->dev, "acc node already opened\n");
+ rt_mutex_unlock(&isp->mutex);
+ return -EBUSY;
+ } else if (acc_node) {
+ goto dev_init;
+ }
+
+ if (!isp->input_cnt) {
+ dev_err(isp->dev, "no camera attached\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * atomisp does not allow multiple open
+ */
+ if (pipe->users) {
+ dev_dbg(isp->dev, "video node already opened\n");
+ rt_mutex_unlock(&isp->mutex);
+ return -EBUSY;
+ }
+
+ ret = atomisp_init_pipe(pipe);
+ if (ret)
+ goto error;
+
+dev_init:
+ if (atomisp_dev_users(isp)) {
+ dev_dbg(isp->dev, "skip init isp in open\n");
+ goto init_subdev;
+ }
+
+ /* runtime power management, turn on ISP */
+ ret = pm_runtime_get_sync(vdev->v4l2_dev->dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "Failed to power on device\n");
+ goto error;
+ }
+
+ if (dypool_enable) {
+ ret = hmm_pool_register(dypool_pgnr, HMM_POOL_TYPE_DYNAMIC);
+ if (ret)
+ dev_err(isp->dev, "Failed to register dynamic memory pool.\n");
+ }
+
+ /* Init ISP */
+ if (atomisp_css_init(isp)) {
+ ret = -EINVAL;
+ /* Need to clean up CSS init if it fails. */
+ goto css_error;
+ }
+
+ atomisp_dev_init_struct(isp);
+
+ ret = v4l2_subdev_call(isp->flash, core, s_power, 1);
+ if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) {
+ dev_err(isp->dev, "Failed to power-on flash\n");
+ goto css_error;
+ }
+
+init_subdev:
+ if (atomisp_subdev_users(asd))
+ goto done;
+
+ atomisp_subdev_init_struct(asd);
+
+done:
+
+ if (acc_node)
+ acc_pipe->users++;
+ else
+ pipe->users++;
+ rt_mutex_unlock(&isp->mutex);
+ return 0;
+
+css_error:
+ atomisp_css_uninit(isp);
+error:
+ hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
+ pm_runtime_put(vdev->v4l2_dev->dev);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe;
+ struct atomisp_acc_pipe *acc_pipe;
+ struct atomisp_sub_device *asd;
+ bool acc_node;
+ struct v4l2_requestbuffers req;
+ struct v4l2_subdev_fh fh;
+ struct v4l2_rect clear_compose = {0};
+ int ret = 0;
+
+ v4l2_fh_init(&fh.vfh, vdev);
+
+ req.count = 0;
+ if (isp == NULL)
+ return -EBADF;
+
+ mutex_lock(&isp->streamoff_mutex);
+ rt_mutex_lock(&isp->mutex);
+
+ dev_dbg(isp->dev, "release device %s\n", vdev->name);
+ acc_node = !strncmp(vdev->name, "ATOMISP ISP ACC",
+ sizeof(vdev->name));
+ if (acc_node) {
+ acc_pipe = atomisp_to_acc_pipe(vdev);
+ asd = acc_pipe->asd;
+ } else {
+ pipe = atomisp_to_video_pipe(vdev);
+ asd = pipe->asd;
+ }
+ asd->subdev.devnode = vdev;
+ if (acc_node) {
+ acc_pipe->users--;
+ goto subdev_uninit;
+ }
+ pipe->users--;
+
+ if (pipe->capq.streaming)
+ dev_warn(isp->dev,
+ "%s: ISP still streaming while closing!",
+ __func__);
+
+ if (pipe->capq.streaming &&
+ __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_err(isp->dev,
+ "atomisp_streamoff failed on release, driver bug");
+ goto done;
+ }
+
+ if (pipe->users)
+ goto done;
+
+ if (__atomisp_reqbufs(file, NULL, &req)) {
+ dev_err(isp->dev,
+ "atomisp_reqbufs failed on release, driver bug");
+ goto done;
+ }
+
+ if (pipe->outq.bufs[0]) {
+ mutex_lock(&pipe->outq.vb_lock);
+ videobuf_queue_cancel(&pipe->outq);
+ mutex_unlock(&pipe->outq.vb_lock);
+ }
+
+ /*
+ * A little trick here:
+ * file injection input resolution is recorded in the sink pad,
+ * therefore can not be cleared when releaseing one device node.
+ * The sink pad setting can only be cleared when all device nodes
+ * get released.
+ */
+ if (!isp->sw_contex.file_input && asd->fmt_auto->val) {
+ struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
+ }
+subdev_uninit:
+ if (atomisp_subdev_users(asd))
+ goto done;
+
+ /* clear the sink pad for file input */
+ if (isp->sw_contex.file_input && asd->fmt_auto->val) {
+ struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
+ atomisp_subdev_set_ffmt(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
+ }
+
+ atomisp_css_free_stat_buffers(asd);
+ atomisp_free_internal_buffers(asd);
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, s_power, 0);
+ if (ret)
+ dev_warn(isp->dev, "Failed to power-off sensor\n");
+
+ /* clear the asd field to show this camera is not used */
+ isp->inputs[asd->input_curr].asd = NULL;
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+
+ if (atomisp_dev_users(isp))
+ goto done;
+
+ atomisp_acc_release(asd);
+
+ atomisp_destroy_pipes_stream_force(asd);
+ atomisp_css_uninit(isp);
+
+ if (defer_fw_load) {
+ atomisp_css_unload_firmware(isp);
+ isp->css_env.isp_css_fw.data = NULL;
+ isp->css_env.isp_css_fw.bytes = 0;
+ }
+
+ hmm_pool_unregister(HMM_POOL_TYPE_DYNAMIC);
+
+ ret = v4l2_subdev_call(isp->flash, core, s_power, 0);
+ if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD)
+ dev_warn(isp->dev, "Failed to power-off flash\n");
+
+ if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0)
+ dev_err(isp->dev, "Failed to power off device\n");
+
+done:
+ if (!acc_node) {
+ atomisp_subdev_set_selection(&asd->subdev, fh.pad,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ atomisp_subdev_source_pad(vdev),
+ V4L2_SEL_TGT_COMPOSE, 0,
+ &clear_compose);
+ }
+ rt_mutex_unlock(&isp->mutex);
+ mutex_unlock(&isp->streamoff_mutex);
+
+ return 0;
+}
+
+/*
+ * Memory help functions for image frame and private parameters
+ */
+static int do_isp_mm_remap(struct atomisp_device *isp,
+ struct vm_area_struct *vma,
+ ia_css_ptr isp_virt, u32 host_virt, u32 pgnr)
+{
+ u32 pfn;
+
+ while (pgnr) {
+ pfn = hmm_virt_to_phys(isp_virt) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, host_virt, pfn,
+ PAGE_SIZE, PAGE_SHARED)) {
+ dev_err(isp->dev, "remap_pfn_range err.\n");
+ return -EAGAIN;
+ }
+
+ isp_virt += PAGE_SIZE;
+ host_virt += PAGE_SIZE;
+ pgnr--;
+ }
+
+ return 0;
+}
+
+static int frame_mmap(struct atomisp_device *isp,
+ const struct atomisp_css_frame *frame, struct vm_area_struct *vma)
+{
+ ia_css_ptr isp_virt;
+ u32 host_virt;
+ u32 pgnr;
+
+ if (!frame) {
+ dev_err(isp->dev, "%s: NULL frame pointer.\n", __func__);
+ return -EINVAL;
+ }
+
+ host_virt = vma->vm_start;
+ isp_virt = frame->data;
+ atomisp_get_frame_pgnr(isp, frame, &pgnr);
+
+ if (do_isp_mm_remap(isp, vma, isp_virt, host_virt, pgnr))
+ return -EAGAIN;
+
+ return 0;
+}
+
+int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct vm_area_struct *vma)
+{
+ u32 offset = vma->vm_pgoff << PAGE_SHIFT;
+ int ret = -EINVAL, i;
+ struct atomisp_device *isp =
+ ((struct atomisp_video_pipe *)(q->priv_data))->isp;
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct videobuf_mapping *map;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dev_err(isp->dev, "map appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&q->vb_lock);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+ if (buf == NULL)
+ continue;
+
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (!map) {
+ mutex_unlock(&q->vb_lock);
+ return -ENOMEM;
+ }
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == offset) {
+ vm_mem = buf->priv;
+ ret = frame_mmap(isp, vm_mem->vaddr, vma);
+ vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
+ break;
+ }
+ }
+ mutex_unlock(&q->vb_lock);
+
+ return ret;
+}
+
+/* The input frame contains left and right padding that need to be removed.
+ * There is always ISP_LEFT_PAD padding on the left side.
+ * There is also padding on the right (padded_width - width).
+ */
+static int remove_pad_from_frame(struct atomisp_device *isp,
+ struct atomisp_css_frame *in_frame, __u32 width, __u32 height)
+{
+ unsigned int i;
+ unsigned short *buffer;
+ int ret = 0;
+ ia_css_ptr load = in_frame->data;
+ ia_css_ptr store = load;
+
+ buffer = kmalloc(width*sizeof(load), GFP_KERNEL);
+ if (!buffer) {
+ dev_err(isp->dev, "out of memory.\n");
+ return -ENOMEM;
+ }
+
+ load += ISP_LEFT_PAD;
+ for (i = 0; i < height; i++) {
+ ret = hmm_load(load, buffer, width*sizeof(load));
+ if (ret < 0)
+ goto remove_pad_error;
+
+ ret = hmm_store(store, buffer, width*sizeof(store));
+ if (ret < 0)
+ goto remove_pad_error;
+
+ load += in_frame->info.padded_width;
+ store += width;
+ }
+
+remove_pad_error:
+ kfree(buffer);
+ return ret;
+}
+
+static int atomisp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_frame *raw_virt_addr;
+ u32 start = vma->vm_start;
+ u32 end = vma->vm_end;
+ u32 size = end - start;
+ u32 origin_size, new_size;
+ int ret;
+
+ if (!(vma->vm_flags & (VM_WRITE | VM_READ)))
+ return -EACCES;
+
+ rt_mutex_lock(&isp->mutex);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ /* Map private buffer.
+ * Set VM_SHARED to the flags since we need
+ * to map the buffer page by page.
+ * Without VM_SHARED, remap_pfn_range() treats
+ * this kind of mapping as invalid.
+ */
+ vma->vm_flags |= VM_SHARED;
+ ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+ }
+
+ /* mmap for ISP offline raw data */
+ if (atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE &&
+ vma->vm_pgoff == (ISP_PARAM_MMAP_OFFSET >> PAGE_SHIFT)) {
+ new_size = pipe->pix.width * pipe->pix.height * 2;
+ if (asd->params.online_process != 0) {
+ ret = -EINVAL;
+ goto error;
+ }
+ raw_virt_addr = asd->raw_output_frame;
+ if (raw_virt_addr == NULL) {
+ dev_err(isp->dev, "Failed to request RAW frame\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = remove_pad_from_frame(isp, raw_virt_addr,
+ pipe->pix.width, pipe->pix.height);
+ if (ret < 0) {
+ dev_err(isp->dev, "remove pad failed.\n");
+ goto error;
+ }
+ origin_size = raw_virt_addr->data_bytes;
+ raw_virt_addr->data_bytes = new_size;
+
+ if (size != PAGE_ALIGN(new_size)) {
+ dev_err(isp->dev, "incorrect size for mmap ISP Raw Frame\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (frame_mmap(isp, raw_virt_addr, vma)) {
+ dev_err(isp->dev, "frame_mmap failed.\n");
+ raw_virt_addr->data_bytes = origin_size;
+ ret = -EAGAIN;
+ goto error;
+ }
+ raw_virt_addr->data_bytes = origin_size;
+ vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
+ rt_mutex_unlock(&isp->mutex);
+ return 0;
+ }
+
+ /*
+ * mmap for normal frames
+ */
+ if (size != pipe->pix.sizeimage) {
+ dev_err(isp->dev, "incorrect size for mmap ISP frames\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return atomisp_videobuf_mmap_mapper(&pipe->capq, vma);
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_mmap_mapper(&pipe->outq, vma);
+}
+
+static unsigned int atomisp_poll(struct file *file,
+ struct poll_table_struct *pt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ rt_mutex_lock(&isp->mutex);
+ if (pipe->capq.streaming != 1) {
+ rt_mutex_unlock(&isp->mutex);
+ return POLLERR;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return videobuf_poll_stream(file, &pipe->capq, pt);
+}
+
+const struct v4l2_file_operations atomisp_fops = {
+ .owner = THIS_MODULE,
+ .open = atomisp_open,
+ .release = atomisp_release,
+ .mmap = atomisp_mmap,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = atomisp_compat_ioctl32,
+#endif
+ .poll = atomisp_poll,
+};
+
+const struct v4l2_file_operations atomisp_file_fops = {
+ .owner = THIS_MODULE,
+ .open = atomisp_open,
+ .release = atomisp_release,
+ .mmap = atomisp_file_mmap,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = atomisp_compat_ioctl32,
+#endif
+ .poll = atomisp_poll,
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
new file mode 100644
index 000000000000..8471e391501a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_FOPS_H__
+#define __ATOMISP_FOPS_H__
+#include "atomisp_subdev.h"
+
+int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_buffer_type css_buf_type,
+ enum atomisp_css_pipe_id css_pipe_id);
+
+unsigned int atomisp_dev_users(struct atomisp_device *isp);
+unsigned int atomisp_sub_dev_users(struct atomisp_sub_device *asd);
+
+/*
+ * Memory help functions for image frame and private parameters
+ */
+
+int atomisp_videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct vm_area_struct *vma);
+
+int atomisp_qbuf_to_css(struct atomisp_device *isp,
+ struct atomisp_video_pipe *pipe,
+ struct videobuf_buffer *vb);
+
+int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd);
+
+extern const struct v4l2_file_operations atomisp_fops;
+
+extern bool defer_fw_load;
+
+#endif /* __ATOMISP_FOPS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
new file mode 100644
index 000000000000..e9650cb75ba5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef _atomisp_helper_h_
+#define _atomisp_helper_h_
+extern void __iomem *atomisp_io_base;
+
+static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
+{
+ void __iomem *ret = atomisp_io_base + (address & 0x003FFFFF);
+ return ret;
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
new file mode 100644
index 000000000000..d3667132851b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -0,0 +1,331 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_INTERNAL_H__
+#define __ATOMISP_INTERNAL_H__
+
+#include "../../include/linux/atomisp_platform.h"
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/pm_qos.h>
+#include <linux/idr.h>
+
+#include <asm/intel-mid.h>
+#include "../../include/asm/intel_mid_pcihelpers.h"
+
+#include <media/media-device.h>
+#include <media/v4l2-subdev.h>
+
+#ifndef ISP2401
+#include "ia_css_types.h"
+#include "sh_css_legacy.h"
+#else
+/*#include "ia_css_types.h"*/
+/*#include "sh_css_legacy.h"*/
+#endif
+
+#include "atomisp_csi2.h"
+#include "atomisp_file.h"
+#include "atomisp_subdev.h"
+#include "atomisp_tpg.h"
+#include "atomisp_compat.h"
+
+#include "gp_device.h"
+#include "irq.h"
+#include <linux/vmalloc.h>
+
+#define V4L2_EVENT_FRAME_END 5
+
+#define IS_HWREVISION(isp, rev) \
+ (((isp)->media_dev.hw_revision & ATOMISP_HW_REVISION_MASK) == \
+ ((rev) << ATOMISP_HW_REVISION_SHIFT))
+
+#define MAX_STREAM_NUM 2
+
+#define ATOMISP_PCI_DEVICE_SOC_MASK 0xfff8
+/* MRFLD with 0x1178: ISP freq can burst to 457MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD 0x1178
+/* MRFLD with 0x1179: max ISP freq limited to 400MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_1179 0x1179
+/* MRFLD with 0x117a: max ISP freq is 400MHz and max freq at Vmin is 200MHz */
+#define ATOMISP_PCI_DEVICE_SOC_MRFLD_117A 0x117a
+#define ATOMISP_PCI_DEVICE_SOC_BYT 0x0f38
+#define ATOMISP_PCI_DEVICE_SOC_ANN 0x1478
+#define ATOMISP_PCI_DEVICE_SOC_CHT 0x22b8
+
+#define ATOMISP_PCI_REV_MRFLD_A0_MAX 0
+#define ATOMISP_PCI_REV_BYT_A0_MAX 4
+
+#define ATOMISP_MAJOR 0
+#define ATOMISP_MINOR 5
+#define ATOMISP_PATCHLEVEL 1
+
+#define DRIVER_VERSION_STR __stringify(ATOMISP_MAJOR) \
+ "." __stringify(ATOMISP_MINOR) "." __stringify(ATOMISP_PATCHLEVEL)
+#define DRIVER_VERSION KERNEL_VERSION(ATOMISP_MAJOR, \
+ ATOMISP_MINOR, ATOMISP_PATCHLEVEL)
+
+#define ATOM_ISP_STEP_WIDTH 2
+#define ATOM_ISP_STEP_HEIGHT 2
+
+#define ATOM_ISP_MIN_WIDTH 4
+#define ATOM_ISP_MIN_HEIGHT 4
+#define ATOM_ISP_MAX_WIDTH UINT_MAX
+#define ATOM_ISP_MAX_HEIGHT UINT_MAX
+
+/* sub-QCIF resolution */
+#define ATOM_RESOLUTION_SUBQCIF_WIDTH 128
+#define ATOM_RESOLUTION_SUBQCIF_HEIGHT 96
+
+#define ATOM_ISP_MAX_WIDTH_TMP 1280
+#define ATOM_ISP_MAX_HEIGHT_TMP 720
+
+#define ATOM_ISP_I2C_BUS_1 4
+#define ATOM_ISP_I2C_BUS_2 5
+
+#define ATOM_ISP_POWER_DOWN 0
+#define ATOM_ISP_POWER_UP 1
+
+#define ATOM_ISP_MAX_INPUTS 4
+
+#define ATOMISP_SC_TYPE_SIZE 2
+
+#define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ)
+#define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ)
+#define ATOMISP_ISP_FILE_TIMEOUT_DURATION (60 * HZ)
+#define ATOMISP_WDT_KEEP_CURRENT_DELAY 0
+#define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2
+#define ATOMISP_CSS_STOP_TIMEOUT_US 200000
+
+#define ATOMISP_CSS_Q_DEPTH 3
+#define ATOMISP_CSS_EVENTS_MAX 16
+#define ATOMISP_CONT_RAW_FRAMES 15
+#define ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL 8
+#define ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL 8
+
+#define ATOMISP_DELAYED_INIT_NOT_QUEUED 0
+#define ATOMISP_DELAYED_INIT_QUEUED 1
+#define ATOMISP_DELAYED_INIT_DONE 2
+
+#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \
+ ((lines) * 38 / 100 & 0xfffffe)
+
+/*
+ * Define how fast CPU should be able to serve ISP interrupts.
+ * The bigger the value, the higher risk that the ISP is not
+ * triggered sufficiently fast for it to process image during
+ * vertical blanking time, increasing risk of dropped frames.
+ * 1000 us is a reasonable value considering that the processing
+ * time is typically ~2000 us.
+ */
+#define ATOMISP_MAX_ISR_LATENCY 1000
+
+/* Add new YUVPP pipe for SOC sensor. */
+#define ATOMISP_CSS_SUPPORT_YUVPP 1
+
+#define ATOMISP_CSS_OUTPUT_SECOND_INDEX 1
+#define ATOMISP_CSS_OUTPUT_DEFAULT_INDEX 0
+
+/*
+ * ATOMISP_SOC_CAMERA
+ * This is to differentiate between ext-isp and soc camera in
+ * Moorefield/Baytrail platform.
+ */
+#define ATOMISP_SOC_CAMERA(asd) \
+ (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \
+ && asd->isp->inputs[asd->input_curr].camera_caps-> \
+ sensor[asd->sensor_curr].stream_num == 1)
+
+#define ATOMISP_USE_YUVPP(asd) \
+ (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \
+ !asd->copy_mode)
+
+#define ATOMISP_DEPTH_SENSOR_STREAMON_COUNT 2
+
+#define ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR 0
+#define ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR 1
+
+#ifdef ISP2401
+#define ATOMISP_ION_DEVICE_FD_OFFSET 16
+#define ATOMISP_ION_SHARED_FD_MASK (0xFFFF)
+#define ATOMISP_ION_DEVICE_FD_MASK (~ATOMISP_ION_SHARED_FD_MASK)
+#define ION_FD_UNSET (-1)
+
+#endif
+#define DIV_NEAREST_STEP(n, d, step) \
+ round_down((2 * (n) + (d) * (step))/(2 * (d)), (step))
+
+struct atomisp_input_subdev {
+ unsigned int type;
+ enum atomisp_camera_port port;
+ struct v4l2_subdev *camera;
+ struct v4l2_subdev *motor;
+ struct v4l2_frmsizeenum frame_size;
+
+ /*
+ * To show this resource is used by
+ * which stream, in ISP multiple stream mode
+ */
+ struct atomisp_sub_device *asd;
+
+ const struct atomisp_camera_caps *camera_caps;
+ int sensor_index;
+};
+
+enum atomisp_dfs_mode {
+ ATOMISP_DFS_MODE_AUTO = 0,
+ ATOMISP_DFS_MODE_LOW,
+ ATOMISP_DFS_MODE_MAX,
+};
+
+struct atomisp_regs {
+ /* PCI config space info */
+ u16 pcicmdsts;
+ u32 ispmmadr;
+ u32 msicap;
+ u32 msi_addr;
+ u16 msi_data;
+ u8 intr;
+ u32 interrupt_control;
+ u32 pmcs;
+ u32 cg_dis;
+ u32 i_control;
+
+ /* I-Unit PHY related info */
+ u32 csi_rcomp_config;
+ u32 csi_afe_dly;
+ u32 csi_control;
+
+ /* New for MRFLD */
+ u32 csi_afe_rcomp_config;
+ u32 csi_afe_hs_control;
+ u32 csi_deadline_control;
+ u32 csi_access_viol;
+};
+
+struct atomisp_sw_contex {
+ bool file_input;
+ int power_state;
+ int running_freq;
+};
+
+
+#define ATOMISP_DEVICE_STREAMING_DISABLED 0
+#define ATOMISP_DEVICE_STREAMING_ENABLED 1
+#define ATOMISP_DEVICE_STREAMING_STOPPING 2
+
+/*
+ * ci device struct
+ */
+struct atomisp_device {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct atomisp_platform_data *pdata;
+ void *mmu_l1_base;
+ struct pci_dev *pci_root;
+ const struct firmware *firmware;
+
+ struct pm_qos_request pm_qos;
+ s32 max_isr_latency;
+
+ /*
+ * ISP modules
+ * Multiple streams are represents by multiple
+ * atomisp_sub_device instances
+ */
+ struct atomisp_sub_device *asd;
+ /*
+ * this will be assiged dyanamically.
+ * For Merr/BTY(ISP2400), 2 streams are supported.
+ */
+ unsigned int num_of_streams;
+
+ struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS];
+ struct atomisp_tpg_device tpg;
+ struct atomisp_file_device file_dev;
+
+ /* Purpose of mutex is to protect and serialize use of isp data
+ * structures and css API calls. */
+ struct rt_mutex mutex;
+ /*
+ * Serialise streamoff: mutex is dropped during streamoff to
+ * cancel the watchdog queue. MUST be acquired BEFORE
+ * "mutex".
+ */
+ struct mutex streamoff_mutex;
+
+ int input_cnt;
+ struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
+ struct v4l2_subdev *flash;
+ struct v4l2_subdev *motor;
+
+ struct atomisp_regs saved_regs;
+ struct atomisp_sw_contex sw_contex;
+ struct atomisp_css_env css_env;
+
+ /* isp timeout status flag */
+ bool isp_timeout;
+ bool isp_fatal_error;
+ struct workqueue_struct *wdt_work_queue;
+ struct work_struct wdt_work;
+#ifndef ISP2401
+ atomic_t wdt_count;
+#endif
+ atomic_t wdt_work_queued;
+
+ spinlock_t lock; /* Just for streaming below */
+
+ bool need_gfx_throttle;
+
+ unsigned int mipi_frame_size;
+ const struct atomisp_dfs_config *dfs;
+ unsigned int hpll_freq;
+
+ bool css_initialized;
+};
+
+#define v4l2_dev_to_atomisp_device(dev) \
+ container_of(dev, struct atomisp_device, v4l2_dev)
+
+extern struct device *atomisp_dev;
+
+extern void *atomisp_kernel_malloc(size_t bytes);
+
+extern void atomisp_kernel_free(void *ptr);
+
+#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt)
+#ifdef ISP2401
+extern void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe,
+ unsigned int delay);
+#endif
+extern void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay);
+#ifndef ISP2401
+extern void atomisp_wdt_start(struct atomisp_sub_device *asd);
+#else
+extern void atomisp_wdt_start(struct atomisp_video_pipe *pipe);
+extern void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync);
+#endif
+extern void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync);
+
+#endif /* __ATOMISP_INTERNAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
new file mode 100644
index 000000000000..6064bb823a47
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
@@ -0,0 +1,3130 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf-vmalloc.h>
+
+#include "atomisp_acc.h"
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_internal.h"
+#include "atomisp_ioctl.h"
+#include "atomisp-regs.h"
+#include "atomisp_compat.h"
+
+#include "sh_css_hrt.h"
+
+#include "gp_device.h"
+#include "device_access.h"
+#include "irq.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+/* for v4l2_capability */
+static const char *DRIVER = "atomisp"; /* max size 15 */
+static const char *CARD = "ATOM ISP"; /* max size 31 */
+static const char *BUS_INFO = "PCI-3"; /* max size 31 */
+static const u32 VERSION = DRIVER_VERSION;
+
+/*
+ * FIXME: ISP should not know beforehand all CIDs supported by sensor.
+ * Instead, it needs to propagate to sensor unkonwn CIDs.
+ */
+static struct v4l2_queryctrl ci_v4l2_controls[] = {
+ {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Automatic White Balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_RED_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Red Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_BLUE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Blue Balance",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_GAMMA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gamma",
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x00,
+ },
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_COLORFX,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Image Color Effect",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_COLORFX_CBCR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Image Color Effect CbCr",
+ .minimum = 0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Bad Pixel Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "GDC/CAC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_VIDEO_STABLIZATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Video Stablization",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FIXED_PATTERN_NR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Fixed Pattern Noise Reduction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "False Color Correction",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_REQUEST_FLASH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Request flash frames",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_ATOMISP_LOW_LIGHT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Low light mode",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_BIN_FACTOR_HORZ,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Horizontal binning factor",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_BIN_FACTOR_VERT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Vertical binning factor",
+ .minimum = 0,
+ .maximum = 10,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_2A_STATUS,
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .name = "AE and AWB status",
+ .minimum = 0,
+ .maximum = V4L2_2A_STATUS_AE_READY | V4L2_2A_STATUS_AWB_READY,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "exposure",
+ .minimum = -4,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_ZONE_NUM,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "one-time exposure zone number",
+ .minimum = 0x0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure auto priority",
+ .minimum = V4L2_EXPOSURE_AUTO,
+ .maximum = V4L2_EXPOSURE_APERTURE_PRIORITY,
+ .step = 1,
+ .default_value = V4L2_EXPOSURE_AUTO,
+ },
+ {
+ .id = V4L2_CID_SCENE_MODE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "scene mode",
+ .minimum = 0,
+ .maximum = 13,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ISO_SENSITIVITY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "iso",
+ .minimum = -4,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_ISO_SENSITIVITY_AUTO,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "iso mode",
+ .minimum = V4L2_ISO_SENSITIVITY_MANUAL,
+ .maximum = V4L2_ISO_SENSITIVITY_AUTO,
+ .step = 1,
+ .default_value = V4L2_ISO_SENSITIVITY_AUTO,
+ },
+ {
+ .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "white balance",
+ .minimum = 0,
+ .maximum = 9,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_EXPOSURE_METERING,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "metering",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_3A_LOCK,
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .name = "3a lock",
+ .minimum = 0,
+ .maximum = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
+ | V4L2_LOCK_FOCUS,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern",
+ .minimum = 0,
+ .maximum = 0xffff,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_R,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color R",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color GR",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color GB",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_TEST_PATTERN_COLOR_B,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Test Pattern Solid Color B",
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+static const u32 ctrls_num = ARRAY_SIZE(ci_v4l2_controls);
+
+/*
+ * supported V4L2 fmts and resolutions
+ */
+const struct atomisp_format_bridge atomisp_output_fmts[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV420,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV420,
+ .description = "YUV420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YVU420,
+ .sh_fmt = CSS_FRAME_FORMAT_YV12,
+ .description = "YVU420, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV422P,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV422,
+ .description = "YUV422, planar",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV444,
+ .depth = 24,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV444,
+ .sh_fmt = CSS_FRAME_FORMAT_YUV444,
+ .description = "YUV444"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV12,
+ .sh_fmt = CSS_FRAME_FORMAT_NV12,
+ .description = "NV12, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV21,
+ .sh_fmt = CSS_FRAME_FORMAT_NV21,
+ .description = "NV21, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_NV16,
+ .sh_fmt = CSS_FRAME_FORMAT_NV16,
+ .description = "NV16, Y-plane, CbCr interleaved",
+ .planar = true
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_YUYV,
+ .sh_fmt = CSS_FRAME_FORMAT_YUYV,
+ .description = "YUYV, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .sh_fmt = CSS_FRAME_FORMAT_UYVY,
+ .description = "UYVY, interleaved"
+ }, { /* This one is for parallel sensors! DO NOT USE! */
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .sh_fmt = CSS_FRAME_FORMAT_UYVY,
+ .description = "UYVY, interleaved"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR16,
+ .depth = 16,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_SBGGR16,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 16"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 10"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .sh_fmt = CSS_FRAME_FORMAT_RAW,
+ .description = "Bayer 12"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ .depth = 32,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_RGB32,
+ .sh_fmt = CSS_FRAME_FORMAT_RGBA888,
+ .description = "32 RGB 8-8-8-8"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .depth = 16,
+ .mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE,
+ .sh_fmt = CSS_FRAME_FORMAT_RGB565,
+ .description = "16 RGB 5-6-5"
+ }, {
+ .pixelformat = V4L2_PIX_FMT_JPEG,
+ .depth = 8,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .sh_fmt = CSS_FRAME_FORMAT_BINARY_8,
+ .description = "JPEG"
+ }, {
+ /* This is a custom format being used by M10MO to send the RAW data */
+ .pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW,
+ .depth = 8,
+ .mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW,
+ .sh_fmt = CSS_FRAME_FORMAT_BINARY_8,
+ .description = "Custom RAW for M10MO"
+ },
+};
+
+const struct atomisp_format_bridge *atomisp_get_format_bridge(
+ unsigned int pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (atomisp_output_fmts[i].pixelformat == pixelformat)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(
+ u32 mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ if (mbus_code == atomisp_output_fmts[i].mbus_code)
+ return &atomisp_output_fmts[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * v4l2 ioctls
+ * return ISP capabilities
+ *
+ * FIXME: capabilities should be different for video0/video2/video3
+ */
+static int atomisp_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ memset(cap, 0, sizeof(struct v4l2_capability));
+
+ WARN_ON(sizeof(DRIVER) > sizeof(cap->driver) ||
+ sizeof(CARD) > sizeof(cap->card) ||
+ sizeof(BUS_INFO) > sizeof(cap->bus_info));
+
+ strncpy(cap->driver, DRIVER, sizeof(cap->driver) - 1);
+ strncpy(cap->card, CARD, sizeof(cap->card) - 1);
+ strncpy(cap->bus_info, BUS_INFO, sizeof(cap->card) - 1);
+
+ cap->version = VERSION;
+
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+/*
+ * enum input are used to check primary/secondary camera
+ */
+static int atomisp_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int index = input->index;
+
+ if (index >= isp->input_cnt)
+ return -EINVAL;
+
+ if (!isp->inputs[index].camera)
+ return -EINVAL;
+
+ memset(input, 0, sizeof(struct v4l2_input));
+ strncpy(input->name, isp->inputs[index].camera->name,
+ sizeof(input->name) - 1);
+
+ /*
+ * HACK: append actuator's name to sensor's
+ * As currently userspace can't talk directly to subdev nodes, this
+ * ioctl is the only way to enum inputs + possible external actuators
+ * for 3A tuning purpose.
+ */
+#ifndef ISP2401
+ if (isp->inputs[index].motor &&
+ strlen(isp->inputs[index].motor->name) > 0) {
+#else
+ if (isp->motor &&
+ strlen(isp->motor->name) > 0) {
+#endif
+ const int cur_len = strlen(input->name);
+ const int max_size = sizeof(input->name) - cur_len - 1;
+
+ if (max_size > 1) {
+ input->name[cur_len] = '+';
+ strncpy(&input->name[cur_len + 1],
+#ifndef ISP2401
+ isp->inputs[index].motor->name, max_size - 1);
+#else
+ isp->motor->name, max_size - 1);
+#endif
+ }
+ }
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->index = index;
+ input->reserved[0] = isp->inputs[index].type;
+ input->reserved[1] = isp->inputs[index].port;
+
+ return 0;
+}
+
+static unsigned int atomisp_subdev_streaming_count(
+ struct atomisp_sub_device *asd)
+{
+ return asd->video_out_preview.capq.streaming
+ + asd->video_out_capture.capq.streaming
+ + asd->video_out_video_capture.capq.streaming
+ + asd->video_out_vf.capq.streaming
+ + asd->video_in.capq.streaming;
+}
+
+unsigned int atomisp_streaming_count(struct atomisp_device *isp)
+{
+ unsigned int i, sum;
+
+ for (i = 0, sum = 0; i < isp->num_of_streams; i++)
+ sum += isp->asd[i].streaming ==
+ ATOMISP_DEVICE_STREAMING_ENABLED;
+
+ return sum;
+}
+
+unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ if (isp->asd[i].acc.pipeline)
+ return 1;
+
+ return 0;
+}
+/*
+ * get input are used to get current primary/secondary camera
+ */
+static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+
+ rt_mutex_lock(&isp->mutex);
+ *input = asd->input_curr;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+/*
+ * set input are used to set current primary/secondary camera
+ */
+static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev *camera = NULL;
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (input >= ATOM_ISP_MAX_INPUTS || input > isp->input_cnt) {
+ dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * check whether the request camera:
+ * 1: already in use
+ * 2: if in use, whether it is used by other streams
+ */
+ if (isp->inputs[input].asd != NULL && isp->inputs[input].asd != asd) {
+ dev_err(isp->dev,
+ "%s, camera is already used by stream: %d\n", __func__,
+ isp->inputs[input].asd->index);
+ ret = -EBUSY;
+ goto error;
+ }
+
+ camera = isp->inputs[input].camera;
+ if (!camera) {
+ dev_err(isp->dev, "%s, no camera\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (atomisp_subdev_streaming_count(asd)) {
+ dev_err(isp->dev,
+ "ISP is still streaming, stop first\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* power off the current owned sensor, as it is not used this time */
+ if (isp->inputs[asd->input_curr].asd == asd &&
+ asd->input_curr != input) {
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, s_power, 0);
+ if (ret)
+ dev_warn(isp->dev,
+ "Failed to power-off sensor\n");
+ /* clear the asd field to show this camera is not used */
+ isp->inputs[asd->input_curr].asd = NULL;
+ }
+
+ /* powe on the new sensor */
+ ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power-on sensor\n");
+ goto error;
+ }
+ /*
+ * Some sensor driver resets the run mode during power-on, thus force
+ * update the run mode to sensor after power-on.
+ */
+ atomisp_update_run_mode(asd);
+
+ /* select operating sensor */
+ ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
+ 0, isp->inputs[input].sensor_index, 0);
+ if (ret && (ret != -ENOIOCTLCMD)) {
+ dev_err(isp->dev, "Failed to select sensor\n");
+ goto error;
+ }
+
+#ifndef ISP2401
+ if (!isp->sw_contex.file_input && isp->inputs[input].motor)
+ ret = v4l2_subdev_call(isp->inputs[input].motor, core,
+ init, 1);
+#else
+ if (isp->motor)
+ ret = v4l2_subdev_call(isp->motor, core, s_power, 1);
+
+ if (!isp->sw_contex.file_input && isp->motor)
+ ret = v4l2_subdev_call(isp->motor, core, init, 1);
+#endif
+
+ asd->input_curr = input;
+ /* mark this camera is used by the current stream */
+ isp->inputs[input].asd = asd;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_enum_fmt_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct v4l2_subdev_mbus_code_enum code = { 0 };
+ unsigned int i, fi = 0;
+ int rval;
+
+ rt_mutex_lock(&isp->mutex);
+ rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad,
+ enum_mbus_code, NULL, &code);
+ if (rval == -ENOIOCTLCMD) {
+ dev_warn(isp->dev, "enum_mbus_code pad op not supported. Please fix your sensor driver!\n");
+ // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ // video, enum_mbus_fmt, 0, &code.code);
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ if (rval)
+ return rval;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
+ const struct atomisp_format_bridge *format =
+ &atomisp_output_fmts[i];
+
+ /*
+ * Is the atomisp-supported format is valid for the
+ * sensor (configuration)? If not, skip it.
+ */
+ if (format->sh_fmt == CSS_FRAME_FORMAT_RAW
+ && format->mbus_code != code.code)
+ continue;
+
+ /* Found a match. Now let's pick f->index'th one. */
+ if (fi < f->index) {
+ fi++;
+ continue;
+ }
+
+ strlcpy(f->description, format->description,
+ sizeof(f->description));
+ f->pixelformat = format->pixelformat;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int atomisp_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_get_fmt(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_g_fmt_file(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ rt_mutex_lock(&isp->mutex);
+ f->fmt.pix = pipe->pix;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+/* This function looks up the closest available resolution. */
+static int atomisp_try_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_try_fmt(vdev, f, NULL);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_s_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+ }
+ ret = atomisp_set_fmt(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_s_fmt_file(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_set_fmt_file(vdev, f);
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+/*
+ * Free videobuffer buffer priv data
+ */
+void atomisp_videobuf_free_buf(struct videobuf_buffer *vb)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+
+ if (vb == NULL)
+ return;
+
+ vm_mem = vb->priv;
+ if (vm_mem && vm_mem->vaddr) {
+ atomisp_css_frame_free(vm_mem->vaddr);
+ vm_mem->vaddr = NULL;
+ }
+}
+
+/*
+ * this function is used to free video buffer queue
+ */
+static void atomisp_videobuf_free_queue(struct videobuf_queue *q)
+{
+ int i;
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ atomisp_videobuf_free_buf(q->bufs[i]);
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+}
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf;
+ struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf;
+ struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
+ int count;
+ struct atomisp_css_dvs_grid_info *dvs_grid_info =
+ atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
+ unsigned int i;
+
+ if (list_empty(&asd->s3a_stats) &&
+ asd->params.curr_grid_info.s3a_grid.enable) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
+ while (count--) {
+ s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
+ if (!s3a_buf) {
+ dev_err(isp->dev, "s3a stat buf alloc failed\n");
+ goto error;
+ }
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, s3a_buf, NULL, NULL)) {
+ kfree(s3a_buf);
+ goto error;
+ }
+
+ list_add_tail(&s3a_buf->list, &asd->s3a_stats);
+ }
+ }
+
+ if (list_empty(&asd->dis_stats) && dvs_grid_info &&
+ dvs_grid_info->enable) {
+ count = ATOMISP_CSS_Q_DEPTH + 1;
+ dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
+ while (count--) {
+ dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
+ if (!dis_buf) {
+ dev_err(isp->dev, "dis stat buf alloc failed\n");
+ kfree(s3a_buf);
+ goto error;
+ }
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, dis_buf, NULL)) {
+ kfree(dis_buf);
+ goto error;
+ }
+
+ list_add_tail(&dis_buf->list, &asd->dis_stats);
+ }
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ if (list_empty(&asd->metadata[i]) &&
+ list_empty(&asd->metadata_ready[i]) &&
+ list_empty(&asd->metadata_in_css[i])) {
+ count = ATOMISP_CSS_Q_DEPTH +
+ ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL;
+ dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n",
+ count, i);
+ while (count--) {
+ md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
+ GFP_KERNEL);
+ if (!md_buf) {
+ dev_err(isp->dev, "metadata buf alloc failed\n");
+ goto error;
+ }
+
+ if (atomisp_css_allocate_stat_buffers(
+ asd, stream_id, NULL, NULL, md_buf)) {
+ kfree(md_buf);
+ goto error;
+ }
+ list_add_tail(&md_buf->list, &asd->metadata[i]);
+ }
+ }
+ }
+ return 0;
+
+error:
+ dev_err(isp->dev, "failed to allocate statistics buffers\n");
+
+ list_for_each_entry_safe(dis_buf, _dis_buf, &asd->dis_stats, list) {
+ atomisp_css_free_dis_buffer(dis_buf);
+ list_del(&dis_buf->list);
+ kfree(dis_buf);
+ }
+
+ list_for_each_entry_safe(s3a_buf, _s3a_buf, &asd->s3a_stats, list) {
+ atomisp_css_free_3a_buffer(s3a_buf);
+ list_del(&s3a_buf->list);
+ kfree(s3a_buf);
+ }
+
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
+ list) {
+ atomisp_css_free_metadata_buffer(md_buf);
+ list_del(&md_buf->list);
+ kfree(md_buf);
+ }
+ }
+ return -ENOMEM;
+}
+
+/*
+ * Initiate Memory Mapping or User Pointer I/O
+ */
+int __atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_css_frame_info frame_info;
+ struct atomisp_css_frame *frame;
+ struct videobuf_vmalloc_memory *vm_mem;
+ uint16_t source_pad = atomisp_subdev_source_pad(vdev);
+ uint16_t stream_id = atomisp_source_pad_to_stream_id(asd, source_pad);
+ int ret = 0, i = 0;
+
+ if (req->count == 0) {
+ mutex_lock(&pipe->capq.vb_lock);
+ if (!list_empty(&pipe->capq.stream))
+ videobuf_queue_cancel(&pipe->capq);
+
+ atomisp_videobuf_free_queue(&pipe->capq);
+ mutex_unlock(&pipe->capq.vb_lock);
+ /* clear request config id */
+ memset(pipe->frame_request_config_id, 0,
+ VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params, 0,
+ VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+ return 0;
+ }
+
+ ret = videobuf_reqbufs(&pipe->capq, req);
+ if (ret)
+ return ret;
+
+ atomisp_alloc_css_stat_bufs(asd, stream_id);
+
+ /*
+ * for user pointer type, buffers are not really allcated here,
+ * buffers are setup in QBUF operation through v4l2_buffer structure
+ */
+ if (req->memory == V4L2_MEMORY_USERPTR)
+ return 0;
+
+ ret = atomisp_get_css_frame_info(asd, source_pad, &frame_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Allocate the real frame here for selected node using our
+ * memory management function
+ */
+ for (i = 0; i < req->count; i++) {
+ if (atomisp_css_frame_allocate_from_info(&frame, &frame_info))
+ goto error;
+ vm_mem = pipe->capq.bufs[i]->priv;
+ vm_mem->vaddr = frame;
+ }
+
+ return ret;
+
+error:
+ while (i--) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ atomisp_css_frame_free(vm_mem->vaddr);
+ }
+
+ if (asd->vf_frame)
+ atomisp_css_frame_free(asd->vf_frame);
+
+ return -ENOMEM;
+}
+
+int atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ ret = __atomisp_reqbufs(file, fh, req);
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int atomisp_reqbufs_file(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ if (req->count == 0) {
+ mutex_lock(&pipe->outq.vb_lock);
+ atomisp_videobuf_free_queue(&pipe->outq);
+ mutex_unlock(&pipe->outq.vb_lock);
+ return 0;
+ }
+
+ return videobuf_reqbufs(&pipe->outq, req);
+}
+
+/* application query the status of a buffer */
+static int atomisp_querybuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_querybuf(&pipe->capq, buf);
+}
+
+static int atomisp_querybuf_file(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+
+ return videobuf_querybuf(&pipe->outq, buf);
+}
+
+/*
+ * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or
+ * filled (output) buffer in the drivers incoming queue.
+ */
+static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ static const int NOFLUSH_FLAGS = V4L2_BUF_FLAG_NO_CACHE_INVALIDATE |
+ V4L2_BUF_FLAG_NO_CACHE_CLEAN;
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct videobuf_buffer *vb;
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame_info frame_info;
+ struct atomisp_css_frame *handle = NULL;
+ u32 length;
+ u32 pgnr;
+ int ret = 0;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
+ __func__);
+ ret = -EIO;
+ goto error;
+ }
+
+ if (!buf || buf->index >= VIDEO_MAX_FRAME ||
+ !pipe->capq.bufs[buf->index]) {
+ dev_err(isp->dev, "Invalid index for qbuf.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /*
+ * For userptr type frame, we convert user space address to physic
+ * address and reprograme out page table properly
+ */
+ if (buf->memory == V4L2_MEMORY_USERPTR) {
+ struct hrt_userbuffer_attr attributes;
+ vb = pipe->capq.bufs[buf->index];
+ vm_mem = vb->priv;
+ if (!vm_mem) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ length = vb->bsize;
+ pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ if (vb->baddr == buf->m.userptr && vm_mem->vaddr)
+ goto done;
+
+ if (atomisp_get_css_frame_info(asd,
+ atomisp_subdev_source_pad(vdev), &frame_info)) {
+ ret = -EIO;
+ goto error;
+ }
+
+ attributes.pgnr = pgnr;
+#ifdef CONFIG_ION
+#ifndef ISP2401
+ attributes.type = buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_ION
+ ? HRT_USR_ION : HRT_USR_PTR;
+#else
+ if (buf->reserved & ATOMISP_BUFFER_TYPE_IS_ION) {
+ attributes.type = HRT_USR_ION;
+ if (asd->ion_dev_fd->val != ION_FD_UNSET) {
+ dev_dbg(isp->dev, "ION buffer queued, share_fd=%lddev_fd=%d.\n",
+ buf->m.userptr, asd->ion_dev_fd->val);
+ /*
+ * Make sure the shared fd we just got
+ * from user space isn't larger than
+ * the space we have for it.
+ */
+ if ((buf->m.userptr &
+ (ATOMISP_ION_DEVICE_FD_MASK)) != 0) {
+ dev_err(isp->dev,
+ "Error: v4l2 buffer fd:0X%0lX > 0XFFFF.\n",
+ buf->m.userptr);
+ ret = -EINVAL;
+ goto error;
+ }
+ buf->m.userptr |= asd->ion_dev_fd->val <<
+ ATOMISP_ION_DEVICE_FD_OFFSET;
+ } else {
+ dev_err(isp->dev, "v4l2 buffer type is ION, \
+ but no dev fd set from userspace.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ attributes.type = HRT_USR_PTR;
+ }
+#endif
+#else
+ attributes.type = HRT_USR_PTR;
+#endif
+ ret = atomisp_css_frame_map(&handle, &frame_info,
+ (void *)buf->m.userptr,
+ 0, &attributes);
+ if (ret) {
+ dev_err(isp->dev, "Failed to map user buffer\n");
+ goto error;
+ }
+
+ if (vm_mem->vaddr) {
+ mutex_lock(&pipe->capq.vb_lock);
+ atomisp_css_frame_free(vm_mem->vaddr);
+ vm_mem->vaddr = NULL;
+ vb->state = VIDEOBUF_NEEDS_INIT;
+ mutex_unlock(&pipe->capq.vb_lock);
+ }
+
+ vm_mem->vaddr = handle;
+
+ buf->flags &= ~V4L2_BUF_FLAG_MAPPED;
+ buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ } else if (buf->memory == V4L2_MEMORY_MMAP) {
+ buf->flags |= V4L2_BUF_FLAG_MAPPED;
+ buf->flags |= V4L2_BUF_FLAG_QUEUED;
+ buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ }
+
+done:
+ if (!((buf->flags & NOFLUSH_FLAGS) == NOFLUSH_FLAGS))
+ wbinvd();
+
+ if (!atomisp_is_vf_pipe(pipe) &&
+ (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING)) {
+ /* this buffer will have a per-frame parameter */
+ pipe->frame_request_config_id[buf->index] = buf->reserved2 &
+ ~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING;
+ dev_dbg(isp->dev, "This buffer requires per_frame setting which has isp_config_id %d\n",
+ pipe->frame_request_config_id[buf->index]);
+ } else {
+ pipe->frame_request_config_id[buf->index] = 0;
+ }
+
+ pipe->frame_params[buf->index] = NULL;
+
+ rt_mutex_unlock(&isp->mutex);
+
+ ret = videobuf_qbuf(&pipe->capq, buf);
+ rt_mutex_lock(&isp->mutex);
+ if (ret)
+ goto error;
+
+ /* TODO: do this better, not best way to queue to css */
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ if (!list_empty(&pipe->buffers_waiting_for_param)) {
+ atomisp_handle_parameter_and_buffer(pipe);
+ } else {
+ atomisp_qbuffers_to_css(asd);
+
+#ifndef ISP2401
+ if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd))
+ atomisp_wdt_start(asd);
+#else
+ if (!atomisp_is_wdt_running(pipe) &&
+ atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_start(pipe);
+#endif
+ }
+ }
+
+ /* Workaround: Due to the design of HALv3,
+ * sometimes in ZSL or SDV mode HAL needs to
+ * capture multiple images within one streaming cycle.
+ * But the capture number cannot be determined by HAL.
+ * So HAL only sets the capture number to be 1 and queue multiple
+ * buffers. Atomisp driver needs to check this case and re-trigger
+ * CSS to do capture when new buffer is queued. */
+ if (asd->continuous_mode->val &&
+ atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE &&
+ pipe->capq.streaming &&
+ !asd->enable_raw_buffer_lock->val &&
+ asd->params.offline_parm.num_captures == 1) {
+#ifndef ISP2401
+ asd->pending_capture_request++;
+ dev_dbg(isp->dev, "Add one pending capture request.\n");
+#else
+ if (asd->re_trigger_capture) {
+ ret = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+ asd->re_trigger_capture = false;
+ dev_dbg(isp->dev, "%s Trigger capture again ret=%d\n",
+ __func__, ret);
+
+ } else {
+ asd->pending_capture_request++;
+ asd->re_trigger_capture = false;
+ dev_dbg(isp->dev, "Add one pending capture request.\n");
+ }
+#endif
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index,
+ vdev->name, asd->index);
+
+ return ret;
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+static int atomisp_qbuf_file(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ int ret;
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (!buf || buf->index >= VIDEO_MAX_FRAME ||
+ !pipe->outq.bufs[buf->index]) {
+ dev_err(isp->dev, "Invalid index for qbuf.\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (buf->memory != V4L2_MEMORY_MMAP) {
+ dev_err(isp->dev, "Unsupported memory method\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ dev_err(isp->dev, "Unsupported buffer type\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ rt_mutex_unlock(&isp->mutex);
+
+ return videobuf_qbuf(&pipe->outq, buf);
+
+error:
+ rt_mutex_unlock(&isp->mutex);
+
+ return ret;
+}
+
+static int __get_frame_exp_id(struct atomisp_video_pipe *pipe,
+ struct v4l2_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *vm_mem;
+ struct atomisp_css_frame *handle;
+ int i;
+
+ for (i = 0; pipe->capq.bufs[i]; i++) {
+ vm_mem = pipe->capq.bufs[i]->priv;
+ handle = vm_mem->vaddr;
+ if (buf->index == pipe->capq.bufs[i]->i && handle)
+ return handle->exp_id;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Applications call the VIDIOC_DQBUF ioctl to dequeue a filled (capturing) or
+ * displayed (output buffer)from the driver's outgoing queue
+ */
+static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int ret = 0;
+
+ rt_mutex_lock(&isp->mutex);
+
+ if (isp->isp_fatal_error) {
+ rt_mutex_unlock(&isp->mutex);
+ return -EIO;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ rt_mutex_unlock(&isp->mutex);
+ dev_err(isp->dev, "%s: reject, as ISP at stopping.\n",
+ __func__);
+ return -EIO;
+ }
+
+ rt_mutex_unlock(&isp->mutex);
+
+ ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK);
+ if (ret) {
+ dev_dbg(isp->dev, "<%s: %d\n", __func__, ret);
+ return ret;
+ }
+ rt_mutex_lock(&isp->mutex);
+ buf->bytesused = pipe->pix.sizeimage;
+ buf->reserved = asd->frame_status[buf->index];
+
+ /*
+ * Hack:
+ * Currently frame_status in the enum type which takes no more lower
+ * 8 bit.
+ * use bit[31:16] for exp_id as it is only in the range of 1~255
+ */
+ buf->reserved &= 0x0000ffff;
+ if (!(buf->flags & V4L2_BUF_FLAG_ERROR))
+ buf->reserved |= __get_frame_exp_id(pipe, buf) << 16;
+ buf->reserved2 = pipe->frame_config_id[buf->index];
+ rt_mutex_unlock(&isp->mutex);
+
+ dev_dbg(isp->dev, "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n",
+ buf->index, vdev->name, asd->index, buf->reserved >> 16,
+ buf->reserved2);
+ return 0;
+}
+
+enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd)
+{
+ if (ATOMISP_USE_YUVPP(asd))
+ return CSS_PIPE_ID_YUVPP;
+
+ if (asd->continuous_mode->val) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ return CSS_PIPE_ID_VIDEO;
+ else
+ return CSS_PIPE_ID_PREVIEW;
+ }
+
+ /*
+ * Disable vf_pp and run CSS in video mode. This allows using ISP
+ * scaling but it has one frame delay due to CSS internal buffering.
+ */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER)
+ return CSS_PIPE_ID_VIDEO;
+
+ /*
+ * Disable vf_pp and run CSS in still capture mode. In this mode
+ * CSS does not cause extra latency with buffering, but scaling
+ * is not available.
+ */
+ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT)
+ return CSS_PIPE_ID_CAPTURE;
+
+ switch (asd->run_mode->val) {
+ case ATOMISP_RUN_MODE_PREVIEW:
+ return CSS_PIPE_ID_PREVIEW;
+ case ATOMISP_RUN_MODE_VIDEO:
+ return CSS_PIPE_ID_VIDEO;
+ case ATOMISP_RUN_MODE_STILL_CAPTURE:
+ /* fall through */
+ default:
+ return CSS_PIPE_ID_CAPTURE;
+ }
+}
+
+static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+
+ if (isp->inputs[asd->input_curr].camera_caps->
+ sensor[asd->sensor_curr].stream_num > 1) {
+ if (asd->high_speed_mode)
+ return 1;
+ else
+ return 2;
+ }
+
+ if (asd->vfpp->val != ATOMISP_VFPP_ENABLE ||
+ asd->copy_mode)
+ return 1;
+
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
+ (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE &&
+ !atomisp_is_mbuscode_raw(
+ asd->fmt[
+ asd->capture_pad].fmt.code) &&
+ !asd->continuous_mode->val))
+ return 2;
+ else
+ return 1;
+}
+
+int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp,
+ bool isp_timeout)
+{
+ unsigned int master = -1, slave = -1, delay_slave = 0;
+ int i, ret;
+
+ /*
+ * ISP only support 2 streams now so ignore multiple master/slave
+ * case to reduce the delay between 2 stream_on calls.
+ */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ int sensor_index = isp->asd[i].input_curr;
+ if (isp->inputs[sensor_index].camera_caps->
+ sensor[isp->asd[i].sensor_curr].is_slave)
+ slave = sensor_index;
+ else
+ master = sensor_index;
+ }
+
+ if (master == -1 || slave == -1) {
+ master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR;
+ slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR;
+ dev_warn(isp->dev,
+ "depth mode use default master=%s.slave=%s.\n",
+ isp->inputs[master].camera->name,
+ isp->inputs[slave].camera->name);
+ }
+
+ ret = v4l2_subdev_call(isp->inputs[master].camera, core,
+ ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP,
+ &delay_slave);
+ if (ret)
+ dev_warn(isp->dev,
+ "get depth sensor %s compensation delay failed.\n",
+ isp->inputs[master].camera->name);
+
+ ret = v4l2_subdev_call(isp->inputs[master].camera,
+ video, s_stream, 1);
+ if (ret) {
+ dev_err(isp->dev, "depth mode master sensor %s stream-on failed.\n",
+ isp->inputs[master].camera->name);
+ return -EINVAL;
+ }
+
+ if (delay_slave != 0)
+ udelay(delay_slave);
+
+ ret = v4l2_subdev_call(isp->inputs[slave].camera,
+ video, s_stream, 1);
+ if (ret) {
+ dev_err(isp->dev, "depth mode slave sensor %s stream-on failed.\n",
+ isp->inputs[slave].camera->name);
+ v4l2_subdev_call(isp->inputs[master].camera, video, s_stream, 0);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* FIXME! */
+#ifndef ISP2401
+void __wdt_on_master_slave_sensor(struct atomisp_device *isp, unsigned int wdt_duration)
+#else
+void __wdt_on_master_slave_sensor(struct atomisp_video_pipe *pipe,
+ unsigned int wdt_duration, bool enable)
+#endif
+{
+#ifndef ISP2401
+ if (atomisp_buffers_queued(&isp->asd[0]))
+ atomisp_wdt_refresh(&isp->asd[0], wdt_duration);
+ if (atomisp_buffers_queued(&isp->asd[1]))
+ atomisp_wdt_refresh(&isp->asd[1], wdt_duration);
+#else
+ static struct atomisp_video_pipe *pipe0;
+
+ if (enable) {
+ if (atomisp_buffers_queued_pipe(pipe0))
+ atomisp_wdt_refresh_pipe(pipe0, wdt_duration);
+ if (atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_refresh_pipe(pipe, wdt_duration);
+ } else {
+ pipe0 = pipe;
+ }
+#endif
+}
+
+static void atomisp_pause_buffer_event(struct atomisp_device *isp)
+{
+ struct v4l2_event event = {0};
+ int i;
+
+ event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER;
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ int sensor_index = isp->asd[i].input_curr;
+ if (isp->inputs[sensor_index].camera_caps->
+ sensor[isp->asd[i].sensor_curr].is_slave) {
+ v4l2_event_queue(isp->asd[i].subdev.devnode, &event);
+ break;
+ }
+ }
+}
+
+/* Input system HW workaround */
+/* Input system address translation corrupts burst during */
+/* invalidate. SW workaround for this is to set burst length */
+/* manually to 128 in case of 13MPx snapshot and to 1 otherwise. */
+static void atomisp_dma_burst_len_cfg(struct atomisp_sub_device *asd)
+{
+
+ struct v4l2_mbus_framefmt *sink;
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ if (sink->width * sink->height >= 4096*3072)
+ atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x7F);
+ else
+ atomisp_store_uint32(DMA_BURST_SIZE_REG, 0x00);
+}
+
+/*
+ * This ioctl start the capture during streaming I/O.
+ */
+static int atomisp_streamon(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ enum atomisp_css_pipe_id css_pipe_id;
+ unsigned int sensor_start_stream;
+ unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION;
+ int ret = 0;
+ unsigned long irqflags;
+
+ dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n",
+ atomisp_subdev_source_pad(vdev), asd->index);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ if (isp->isp_fatal_error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (pipe->capq.streaming)
+ goto out;
+
+ /* Input system HW workaround */
+ atomisp_dma_burst_len_cfg(asd);
+
+ /*
+ * The number of streaming video nodes is based on which
+ * binary is going to be run.
+ */
+ sensor_start_stream = atomisp_sensor_start_stream(asd);
+
+ spin_lock_irqsave(&pipe->irq_lock, irqflags);
+ if (list_empty(&(pipe->capq.stream))) {
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+ dev_dbg(isp->dev, "no buffer in the queue\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
+
+ ret = videobuf_streamon(&pipe->capq);
+ if (ret)
+ goto out;
+
+ /* Reset pending capture request count. */
+ asd->pending_capture_request = 0;
+#ifdef ISP2401
+ asd->re_trigger_capture = false;
+#endif
+
+ if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) &&
+ (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) {
+ /* trigger still capture */
+ if (asd->continuous_mode->val &&
+ atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO)
+ dev_dbg(isp->dev, "SDV last video raw buffer id: %u\n",
+ asd->latest_preview_exp_id);
+ else
+ dev_dbg(isp->dev, "ZSL last preview raw buffer id: %u\n",
+ asd->latest_preview_exp_id);
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
+ flush_work(&asd->delayed_init_work);
+ rt_mutex_unlock(&isp->mutex);
+ if (wait_for_completion_interruptible(
+ &asd->init_done) != 0)
+ return -ERESTARTSYS;
+ rt_mutex_lock(&isp->mutex);
+ }
+
+ /* handle per_frame_setting parameter and buffers */
+ atomisp_handle_parameter_and_buffer(pipe);
+
+ /*
+ * only ZSL/SDV capture request will be here, raise
+ * the ISP freq to the highest possible to minimize
+ * the S2S latency.
+ */
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false);
+ /*
+ * When asd->enable_raw_buffer_lock->val is true,
+ * An extra IOCTL is needed to call
+ * atomisp_css_exp_id_capture and trigger real capture
+ */
+ if (!asd->enable_raw_buffer_lock->val) {
+ ret = atomisp_css_offline_capture_configure(asd,
+ asd->params.offline_parm.num_captures,
+ asd->params.offline_parm.skip_frames,
+ asd->params.offline_parm.offset);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (asd->depth_mode->val)
+ atomisp_pause_buffer_event(isp);
+ }
+ }
+ atomisp_qbuffers_to_css(asd);
+ goto out;
+ }
+
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ atomisp_qbuffers_to_css(asd);
+ goto start_sensor;
+ }
+
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+
+ ret = atomisp_acc_load_extensions(asd);
+ if (ret < 0) {
+ dev_err(isp->dev, "acc extension failed to load\n");
+ goto out;
+ }
+
+ if (asd->params.css_update_params_needed) {
+ atomisp_apply_css_parameters(asd, &asd->params.css_param);
+ if (asd->params.css_param.update_flag.dz_config)
+ atomisp_css_set_dz_config(asd,
+ &asd->params.css_param.dz_config);
+ atomisp_css_update_isp_params(asd);
+ asd->params.css_update_params_needed = false;
+ memset(&asd->params.css_param.update_flag, 0,
+ sizeof(struct atomisp_parameters));
+ }
+ asd->params.dvs_6axis = NULL;
+
+ ret = atomisp_css_start(asd, css_pipe_id, false);
+ if (ret)
+ goto out;
+
+ asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED;
+ atomic_set(&asd->sof_count, -1);
+ atomic_set(&asd->sequence, -1);
+ atomic_set(&asd->sequence_temp, -1);
+ if (isp->sw_contex.file_input)
+ wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION;
+
+ asd->params.dis_proj_data_valid = false;
+ asd->latest_preview_exp_id = 0;
+ asd->postview_exp_id = 1;
+ asd->preview_exp_id = 1;
+
+ /* handle per_frame_setting parameter and buffers */
+ atomisp_handle_parameter_and_buffer(pipe);
+
+ atomisp_qbuffers_to_css(asd);
+
+ /* Only start sensor when the last streaming instance started */
+ if (atomisp_subdev_streaming_count(asd) < sensor_start_stream)
+ goto out;
+
+start_sensor:
+ if (isp->flash) {
+ asd->params.num_flash_frames = 0;
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ atomisp_setup_flash(asd);
+ }
+
+ if (!isp->sw_contex.file_input) {
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ atomisp_css_valid_sof(isp));
+ atomisp_csi2_configure(asd);
+ /*
+ * set freq to max when streaming count > 1 which indicate
+ * dual camera would run
+ */
+ if (atomisp_streaming_count(isp) > 1) {
+ if (atomisp_freq_scaling(isp,
+ ATOMISP_DFS_MODE_MAX, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ } else {
+ if (atomisp_freq_scaling(isp,
+ ATOMISP_DFS_MODE_AUTO, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+ } else {
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0)
+ dev_dbg(isp->dev, "dfs failed!\n");
+ }
+
+ if (asd->depth_mode->val && atomisp_streaming_count(isp) ==
+ ATOMISP_DEPTH_SENSOR_STREAMON_COUNT) {
+ ret = atomisp_stream_on_master_slave_sensor(isp, false);
+ if (ret) {
+ dev_err(isp->dev, "master slave sensor stream on failed!\n");
+ goto out;
+ }
+#ifndef ISP2401
+ __wdt_on_master_slave_sensor(isp, wdt_duration);
+#else
+ __wdt_on_master_slave_sensor(pipe, wdt_duration, true);
+#endif
+ goto start_delay_wq;
+ } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) <
+ ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) {
+#ifdef ISP2401
+ __wdt_on_master_slave_sensor(pipe, wdt_duration, false);
+#endif
+ goto start_delay_wq;
+ }
+
+ /* Enable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control |
+ MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ /* stream on the sensor */
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 1);
+ if (ret) {
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+ ret = -EINVAL;
+ goto out;
+ }
+
+#ifndef ISP2401
+ if (atomisp_buffers_queued(asd))
+ atomisp_wdt_refresh(asd, wdt_duration);
+#else
+ if (atomisp_buffers_queued_pipe(pipe))
+ atomisp_wdt_refresh_pipe(pipe, wdt_duration);
+#endif
+
+start_delay_wq:
+ if (asd->continuous_mode->val) {
+ struct v4l2_mbus_framefmt *sink;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+
+ reinit_completion(&asd->init_done);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED;
+ queue_work(asd->delayed_init_workq, &asd->delayed_init_work);
+ atomisp_css_set_cont_prev_start_time(isp,
+ ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height));
+ } else {
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ }
+out:
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
+ struct atomisp_sub_device *asd = pipe->asd;
+ struct atomisp_video_pipe *capture_pipe = NULL;
+ struct atomisp_video_pipe *vf_pipe = NULL;
+ struct atomisp_video_pipe *preview_pipe = NULL;
+ struct atomisp_video_pipe *video_pipe = NULL;
+ struct videobuf_buffer *vb, *_vb;
+ enum atomisp_css_pipe_id css_pipe_id;
+ int ret;
+ unsigned long flags;
+ bool first_streamoff = false;
+
+ dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n",
+ atomisp_subdev_source_pad(vdev), asd->index);
+
+ BUG_ON(!rt_mutex_is_locked(&isp->mutex));
+ BUG_ON(!mutex_is_locked(&isp->streamoff_mutex));
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_dbg(isp->dev, "unsupported v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * do only videobuf_streamoff for capture & vf pipes in
+ * case of continuous capture
+ */
+ if ((asd->continuous_mode->val ||
+ isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) &&
+ atomisp_subdev_source_pad(vdev) !=
+ ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW &&
+ atomisp_subdev_source_pad(vdev) !=
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) {
+
+ if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) {
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+ } else if (atomisp_subdev_source_pad(vdev)
+ == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) {
+ /* stop continuous still capture if needed */
+ if (asd->params.offline_parm.num_captures == -1)
+ atomisp_css_offline_capture_configure(asd,
+ 0, 0, 0);
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false);
+ }
+ /*
+ * Currently there is no way to flush buffers queued to css.
+ * When doing videobuf_streamoff, active buffers will be
+ * marked as VIDEOBUF_NEEDS_INIT. HAL will be able to use
+ * these buffers again, and these buffers might be queued to
+ * css more than once! Warn here, if HAL has not dequeued all
+ * buffers back before calling streamoff.
+ */
+ if (pipe->buffers_in_css != 0) {
+ WARN(1, "%s: buffers of vdev %s still in CSS!\n",
+ __func__, pipe->vdev.name);
+
+ /*
+ * Buffers remained in css maybe dequeued out in the
+ * next stream on, while this will causes serious
+ * issues as buffers already get invalid after
+ * previous stream off.
+ *
+ * No way to flush buffers but to reset the whole css
+ */
+ dev_warn(isp->dev, "Reset CSS to clean up css buffers.\n");
+ atomisp_css_flush(isp);
+ }
+
+ return videobuf_streamoff(&pipe->capq);
+ }
+
+ if (!pipe->capq.streaming)
+ return 0;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) {
+ asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING;
+ first_streamoff = true;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (first_streamoff) {
+ /* if other streams are running, should not disable watch dog */
+ rt_mutex_unlock(&isp->mutex);
+ atomisp_wdt_stop(asd, true);
+
+ /*
+ * must stop sending pixels into GP_FIFO before stop
+ * the pipeline.
+ */
+ if (isp->sw_contex.file_input)
+ v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+
+ rt_mutex_lock(&isp->mutex);
+ atomisp_acc_unload_extensions(asd);
+ }
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (atomisp_subdev_streaming_count(asd) == 1)
+ asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED;
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ if (!first_streamoff) {
+ ret = videobuf_streamoff(&pipe->capq);
+ if (ret)
+ return ret;
+ goto stopsensor;
+ }
+
+ atomisp_clear_css_buffer_counters(asd);
+
+ if (!isp->sw_contex.file_input)
+ atomisp_css_irq_enable(isp, CSS_IRQ_INFO_CSS_RECEIVER_SOF,
+ false);
+
+ if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) {
+ cancel_work_sync(&asd->delayed_init_work);
+ asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED;
+ }
+ if (first_streamoff) {
+ css_pipe_id = atomisp_get_css_pipe_id(asd);
+ ret = atomisp_css_stop(asd, css_pipe_id, false);
+ }
+ /* cancel work queue*/
+ if (asd->video_out_capture.users) {
+ capture_pipe = &asd->video_out_capture;
+ wake_up_interruptible(&capture_pipe->capq.wait);
+ }
+ if (asd->video_out_vf.users) {
+ vf_pipe = &asd->video_out_vf;
+ wake_up_interruptible(&vf_pipe->capq.wait);
+ }
+ if (asd->video_out_preview.users) {
+ preview_pipe = &asd->video_out_preview;
+ wake_up_interruptible(&preview_pipe->capq.wait);
+ }
+ if (asd->video_out_video_capture.users) {
+ video_pipe = &asd->video_out_video_capture;
+ wake_up_interruptible(&video_pipe->capq.wait);
+ }
+ ret = videobuf_streamoff(&pipe->capq);
+ if (ret)
+ return ret;
+
+ /* cleanup css here */
+ /* no need for this, as ISP will be reset anyway */
+ /*atomisp_flush_bufs_in_css(isp);*/
+
+ spin_lock_irqsave(&pipe->irq_lock, flags);
+ list_for_each_entry_safe(vb, _vb, &pipe->activeq, queue) {
+ vb->state = VIDEOBUF_PREPARED;
+ list_del(&vb->queue);
+ }
+ list_for_each_entry_safe(vb, _vb, &pipe->buffers_waiting_for_param, queue) {
+ vb->state = VIDEOBUF_PREPARED;
+ list_del(&vb->queue);
+ pipe->frame_request_config_id[vb->i] = 0;
+ }
+ spin_unlock_irqrestore(&pipe->irq_lock, flags);
+
+ atomisp_subdev_cleanup_pending_events(asd);
+stopsensor:
+ if (atomisp_subdev_streaming_count(asd) + 1
+ != atomisp_sensor_start_stream(asd))
+ return 0;
+
+ if (!isp->sw_contex.file_input)
+ ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_stream, 0);
+
+ if (isp->flash) {
+ asd->params.num_flash_frames = 0;
+ asd->params.flash_state = ATOMISP_FLASH_IDLE;
+ }
+
+ /* if other streams are running, isp should not be powered off */
+ if (atomisp_streaming_count(isp)) {
+ atomisp_css_flush(isp);
+ return 0;
+ }
+
+ /* Disable the CSI interface on ANN B0/K0 */
+ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
+ pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control &
+ ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ }
+
+ if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
+ dev_warn(isp->dev, "DFS failed.\n");
+ /*
+ * ISP work around, need to reset isp
+ * Is it correct time to reset ISP when first node does streamoff?
+ */
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_UP) {
+ unsigned int i;
+ bool recreate_streams[MAX_STREAM_NUM] = {0};
+ if (isp->isp_timeout)
+ dev_err(isp->dev, "%s: Resetting with WA activated",
+ __func__);
+ /*
+ * It is possible that the other asd stream is in the stage
+ * that v4l2_setfmt is just get called on it, which will
+ * create css stream on that stream. But at this point, there
+ * is no way to destroy the css stream created on that stream.
+ *
+ * So force stream destroy here.
+ */
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (isp->asd[i].stream_prepared) {
+ atomisp_destroy_pipes_stream_force(&isp->
+ asd[i]);
+ recreate_streams[i] = true;
+ }
+ }
+
+ /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */
+ pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
+ MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+ dev_err(isp->dev, "atomisp_reset");
+ atomisp_reset(isp);
+ for (i = 0; i < isp->num_of_streams; i++) {
+ if (recreate_streams[i])
+ atomisp_create_pipes_stream(&isp->asd[i]);
+ }
+ isp->isp_timeout = false;
+ }
+ return ret;
+}
+
+static int atomisp_streamoff(struct file *file, void *fh,
+ enum v4l2_buf_type type)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int rval;
+
+ mutex_lock(&isp->streamoff_mutex);
+ rt_mutex_lock(&isp->mutex);
+ rval = __atomisp_streamoff(file, fh, type);
+ rt_mutex_unlock(&isp->mutex);
+ mutex_unlock(&isp->streamoff_mutex);
+
+ return rval;
+}
+
+/*
+ * To get the current value of a control.
+ * applications initialize the id field of a struct v4l2_control and
+ * call this ioctl with a pointer to this structure
+ */
+static int atomisp_g_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ rt_mutex_lock(&isp->mutex);
+
+ switch (control->id) {
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_2A_STATUS:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_EXPOSURE_ZONE_NUM:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ rt_mutex_unlock(&isp->mutex);
+ return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, control);
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 0, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 0, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+
+/*
+ * To change the value of a control.
+ * applications initialize the id and value fields of a struct v4l2_control
+ * and call this ioctl.
+ */
+static int atomisp_s_ctrl(struct file *file, void *fh,
+ struct v4l2_control *control)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == control->id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ rt_mutex_lock(&isp->mutex);
+ switch (control->id) {
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_EXPOSURE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_COLORFX_CBCR:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ rt_mutex_unlock(&isp->mutex);
+ return v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].camera->
+ ctrl_handler, control);
+ case V4L2_CID_COLORFX:
+ ret = atomisp_color_effect(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
+ ret = atomisp_bad_pixel(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
+ ret = atomisp_gdc_cac(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
+ ret = atomisp_video_stable(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
+ ret = atomisp_fixed_pattern(asd, 1, &control->value);
+ break;
+ case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
+ ret = atomisp_false_color(asd, 1, &control->value);
+ break;
+ case V4L2_CID_REQUEST_FLASH:
+ ret = atomisp_flash_enable(asd, control->value);
+ break;
+ case V4L2_CID_ATOMISP_LOW_LIGHT:
+ ret = atomisp_low_light(asd, 1, &control->value);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ rt_mutex_unlock(&isp->mutex);
+ return ret;
+}
+/*
+ * To query the attributes of a control.
+ * applications set the id field of a struct v4l2_queryctrl and call the
+ * this ioctl with a pointer to this structure. The driver fills
+ * the rest of the structure.
+ */
+static int atomisp_queryctl(struct file *file, void *fh,
+ struct v4l2_queryctrl *qc)
+{
+ int i, ret = -EINVAL;
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ switch (qc->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+#ifndef ISP2401
+ return v4l2_queryctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, qc);
+#else
+ if (isp->motor)
+ return v4l2_queryctrl(isp->motor->ctrl_handler, qc);
+ else
+ return v4l2_queryctrl(isp->inputs[asd->input_curr].
+ camera->ctrl_handler, qc);
+#endif
+ }
+
+ if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
+ return ret;
+
+ for (i = 0; i < ctrls_num; i++) {
+ if (ci_v4l2_controls[i].id == qc->id) {
+ memcpy(qc, &ci_v4l2_controls[i],
+ sizeof(struct v4l2_queryctrl));
+ qc->reserved[0] = 0;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret != 0)
+ qc->flags = V4L2_CTRL_FLAG_DISABLED;
+
+ return ret;
+}
+
+static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_BIN_FACTOR_HORZ:
+ case V4L2_CID_BIN_FACTOR_VERT:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ /*
+ * Exposure related control will be handled by sensor
+ * driver
+ */
+ ret =
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
+ ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+ case V4L2_CID_FOCUS_AUTO:
+#ifndef ISP2401
+ if (isp->inputs[asd->input_curr].motor)
+#else
+ if (isp->motor)
+#endif
+ ret =
+#ifndef ISP2401
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].
+ motor->ctrl_handler, &ctrl);
+#else
+ v4l2_g_ctrl(isp->motor->ctrl_handler,
+ &ctrl);
+#endif
+ else
+ ret =
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].
+ camera->ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ case V4L2_CID_FLASH_INTENSITY:
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ case V4L2_CID_FLASH_TIMEOUT:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_MODE:
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ if (isp->flash)
+ ret =
+ v4l2_g_ctrl(isp->flash->ctrl_handler,
+ &ctrl);
+ break;
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_digital_zoom(asd, 0, &ctrl.value);
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ case V4L2_CID_G_SKIP_FRAMES:
+ ret = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera,
+ sensor, g_skip_frames, (u32 *)&ctrl.value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to get multiple controls by class */
+static int atomisp_g_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /* input_lock is not need for the Camera releated IOCTLs
+ * The input_lock downgrade the FPS of 3A*/
+ ret = atomisp_camera_g_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_g_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct v4l2_control ctrl;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < c->count; i++) {
+ struct v4l2_ctrl *ctr;
+
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ switch (ctrl.id) {
+ case V4L2_CID_EXPOSURE_ABSOLUTE:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_IRIS_ABSOLUTE:
+ case V4L2_CID_FNUMBER_ABSOLUTE:
+ case V4L2_CID_VCM_TIMEING:
+ case V4L2_CID_VCM_SLEW:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_TEST_PATTERN_COLOR_R:
+ case V4L2_CID_TEST_PATTERN_COLOR_GR:
+ case V4L2_CID_TEST_PATTERN_COLOR_GB:
+ case V4L2_CID_TEST_PATTERN_COLOR_B:
+ ret = v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].camera->
+ ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_FOCUS_STATUS:
+ case V4L2_CID_FOCUS_AUTO:
+#ifndef ISP2401
+ if (isp->inputs[asd->input_curr].motor)
+#else
+ if (isp->motor)
+#endif
+ ret = v4l2_s_ctrl(NULL,
+#ifndef ISP2401
+ isp->inputs[asd->input_curr].
+ motor->ctrl_handler, &ctrl);
+#else
+ isp->motor->ctrl_handler,
+ &ctrl);
+#endif
+ else
+ ret = v4l2_s_ctrl(NULL,
+ isp->inputs[asd->input_curr].
+ camera->ctrl_handler, &ctrl);
+ break;
+ case V4L2_CID_FLASH_STATUS:
+ case V4L2_CID_FLASH_INTENSITY:
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ case V4L2_CID_FLASH_TIMEOUT:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_MODE:
+ case V4L2_CID_FLASH_STATUS_REGISTER:
+ rt_mutex_lock(&isp->mutex);
+ if (isp->flash) {
+ ret =
+ v4l2_s_ctrl(NULL, isp->flash->ctrl_handler,
+ &ctrl);
+ /* When flash mode is changed we need to reset
+ * flash state */
+ if (ctrl.id == V4L2_CID_FLASH_MODE) {
+ asd->params.flash_state =
+ ATOMISP_FLASH_IDLE;
+ asd->params.num_flash_frames = 0;
+ }
+ }
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ case V4L2_CID_ZOOM_ABSOLUTE:
+ rt_mutex_lock(&isp->mutex);
+ ret = atomisp_digital_zoom(asd, 1, &ctrl.value);
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ default:
+ ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id);
+ if (ctr)
+ ret = v4l2_ctrl_s_ctrl(ctr, ctrl.value);
+ else
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ c->controls[i].value = ctrl.value;
+ }
+ return ret;
+}
+
+/* This ioctl allows the application to set multiple controls by class */
+static int atomisp_s_ext_ctrls(struct file *file, void *fh,
+ struct v4l2_ext_controls *c)
+{
+ struct v4l2_control ctrl;
+ int i, ret = 0;
+
+ /* input_lock is not need for the Camera releated IOCTLs
+ * The input_lock downgrade the FPS of 3A*/
+ ret = atomisp_camera_s_ext_ctrls(file, fh, c);
+ if (ret != -EINVAL)
+ return ret;
+
+ for (i = 0; i < c->count; i++) {
+ ctrl.id = c->controls[i].id;
+ ctrl.value = c->controls[i].value;
+ ret = atomisp_s_ctrl(file, fh, &ctrl);
+ c->controls[i].value = ctrl.value;
+ if (ret) {
+ c->error_idx = i;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * vidioc_g/s_param are used to switch isp running mode
+ */
+static int atomisp_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ parm->parm.capture.capturemode = asd->run_mode->val;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+static int atomisp_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ int mode;
+ int rval;
+ int fps;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ dev_err(isp->dev, "unsupport v4l2 buf type\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+
+ asd->high_speed_mode = false;
+ switch (parm->parm.capture.capturemode) {
+ case CI_MODE_NONE: {
+ struct v4l2_subdev_frame_interval fi = {0};
+
+ fi.interval = parm->parm.capture.timeperframe;
+
+ rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ video, s_frame_interval, &fi);
+ if (!rval)
+ parm->parm.capture.timeperframe = fi.interval;
+
+ if (fi.interval.numerator != 0) {
+ fps = fi.interval.denominator / fi.interval.numerator;
+ if (fps > 30)
+ asd->high_speed_mode = true;
+ }
+
+ goto out;
+ }
+ case CI_MODE_VIDEO:
+ mode = ATOMISP_RUN_MODE_VIDEO;
+ break;
+ case CI_MODE_STILL_CAPTURE:
+ mode = ATOMISP_RUN_MODE_STILL_CAPTURE;
+ break;
+ case CI_MODE_CONTINUOUS:
+ mode = ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE;
+ break;
+ case CI_MODE_PREVIEW:
+ mode = ATOMISP_RUN_MODE_PREVIEW;
+ break;
+ default:
+ rval = -EINVAL;
+ goto out;
+ }
+
+ rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode);
+
+out:
+ rt_mutex_unlock(&isp->mutex);
+
+ return rval == -ENOIOCTLCMD ? 0 : rval;
+}
+
+static int atomisp_s_parm_file(struct file *file, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ dev_err(isp->dev, "unsupport v4l2 buf type for output\n");
+ return -EINVAL;
+ }
+
+ rt_mutex_lock(&isp->mutex);
+ isp->sw_contex.file_input = 1;
+ rt_mutex_unlock(&isp->mutex);
+
+ return 0;
+}
+
+static long atomisp_vidioc_default(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct atomisp_sub_device *asd;
+ bool acc_node;
+ int err;
+
+ acc_node = !strncmp(vdev->name, "ATOMISP ISP ACC",
+ sizeof(vdev->name));
+ if (acc_node)
+ asd = atomisp_to_acc_pipe(vdev)->asd;
+ else
+ asd = atomisp_to_video_pipe(vdev)->asd;
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+ case ATOMISP_IOC_S_SENSOR_EE_CONFIG:
+#ifdef ISP2401
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+#endif
+ /* we do not need take isp->mutex for these IOCTLs */
+ break;
+ default:
+ rt_mutex_lock(&isp->mutex);
+ break;
+ }
+ switch (cmd) {
+#ifdef ISP2401
+ case ATOMISP_IOC_S_SENSOR_RUNMODE:
+ err = atomisp_set_sensor_runmode(asd, arg);
+ break;
+
+#endif
+ case ATOMISP_IOC_G_XNR:
+ err = atomisp_xnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_XNR:
+ err = atomisp_xnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_NR:
+ err = atomisp_nr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_NR:
+ err = atomisp_nr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_TNR:
+ err = atomisp_tnr(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_TNR:
+ err = atomisp_tnr(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
+ err = atomisp_black_level(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_EE:
+ err = atomisp_ee(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_EE:
+ err = atomisp_ee(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_DIS_STAT:
+ err = atomisp_get_dis_stat(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
+ err = atomisp_get_dvs2_bq_resolutions(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_DIS_COEFS:
+ err = atomisp_css_cp_dvs2_coefs(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_S_DIS_VECTOR:
+ err = atomisp_cp_dvs_6axis_config(asd, arg,
+ &asd->params.css_param, true);
+ if (!err && arg)
+ asd->params.css_update_params_needed = true;
+ break;
+
+ case ATOMISP_IOC_G_ISP_PARM:
+ err = atomisp_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_PARM:
+ err = atomisp_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_STAT:
+ err = atomisp_3a_stat(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA:
+ err = atomisp_gamma(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA:
+ err = atomisp_gamma(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GDC_TAB:
+ err = atomisp_gdc_cac_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_MACC:
+ err = atomisp_macc_table(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_MACC:
+ err = atomisp_macc_table(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
+ err = atomisp_bad_pixel_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
+ err = atomisp_false_color_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_CTC:
+ err = atomisp_ctc(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_CTC:
+ err = atomisp_ctc(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
+ err = atomisp_white_balance_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_G_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_3A_CONFIG:
+ err = atomisp_3a_config_param(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_FPN_TABLE:
+ err = atomisp_fixed_pattern_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ISP_MAKERNOTE:
+ err = atomisp_exif_makernote(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_SENSOR_MODE_DATA:
+ err = atomisp_get_sensor_mode_data(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+#ifndef ISP2401
+ if (isp->inputs[asd->input_curr].motor)
+#else
+ if (isp->motor)
+#endif
+ err = v4l2_subdev_call(
+#ifndef ISP2401
+ isp->inputs[asd->input_curr].motor,
+#else
+ isp->motor,
+#endif
+ core, ioctl, cmd, arg);
+ else
+ err = v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+#ifdef ISP2401
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+#endif
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_LOAD:
+ err = atomisp_acc_load(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_LOAD_TO_PIPE:
+ err = atomisp_acc_load_to_pipe(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_UNLOAD:
+ err = atomisp_acc_unload(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_START:
+ err = atomisp_acc_start(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_WAIT:
+ err = atomisp_acc_wait(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_MAP:
+ err = atomisp_acc_map(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_UNMAP:
+ err = atomisp_acc_unmap(asd, arg);
+ break;
+
+ case ATOMISP_IOC_ACC_S_MAPPED_ARG:
+ err = atomisp_acc_s_mapped_arg(asd, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_SHD_TAB:
+ err = atomisp_set_shading_table(asd, arg);
+ break;
+
+ case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
+ err = atomisp_gamma_correction(asd, 1, arg);
+ break;
+
+ case ATOMISP_IOC_S_PARAMETERS:
+ err = atomisp_set_parameters(vdev, arg);
+ break;
+
+ case ATOMISP_IOC_S_CONT_CAPTURE_CONFIG:
+ err = atomisp_offline_capture_configure(asd, arg);
+ break;
+ case ATOMISP_IOC_G_METADATA:
+ err = atomisp_get_metadata(asd, 0, arg);
+ break;
+ case ATOMISP_IOC_G_METADATA_BY_TYPE:
+ err = atomisp_get_metadata_by_type(asd, 0, arg);
+ break;
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
+ core, ioctl, cmd, arg);
+ break;
+ case ATOMISP_IOC_EXP_ID_UNLOCK:
+ err = atomisp_exp_id_unlock(asd, arg);
+ break;
+ case ATOMISP_IOC_EXP_ID_CAPTURE:
+ err = atomisp_exp_id_capture(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
+ err = atomisp_enable_dz_capt_pipe(asd, arg);
+ break;
+ case ATOMISP_IOC_G_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 0, arg);
+ break;
+
+ case ATOMISP_IOC_S_FORMATS_CONFIG:
+ err = atomisp_formats(asd, 1, arg);
+ break;
+ case ATOMISP_IOC_S_EXPOSURE_WINDOW:
+ err = atomisp_s_ae_window(asd, arg);
+ break;
+ case ATOMISP_IOC_S_ACC_STATE:
+ err = atomisp_acc_set_state(asd, arg);
+ break;
+ case ATOMISP_IOC_G_ACC_STATE:
+ err = atomisp_acc_get_state(asd, arg);
+ break;
+ case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
+ err = atomisp_inject_a_fake_event(asd, arg);
+ break;
+ case ATOMISP_IOC_G_INVALID_FRAME_NUM:
+ err = atomisp_get_invalid_frame_num(vdev, arg);
+ break;
+ case ATOMISP_IOC_S_ARRAY_RESOLUTION:
+ err = atomisp_set_array_res(asd, arg);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ switch (cmd) {
+ case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_S_EXPOSURE:
+ case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP:
+ case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
+ case ATOMISP_IOC_EXT_ISP_CTRL:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE:
+ case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT:
+#ifdef ISP2401
+ case ATOMISP_IOC_G_UPDATE_EXPOSURE:
+#endif
+ break;
+ default:
+ rt_mutex_unlock(&isp->mutex);
+ break;
+ }
+ return err;
+}
+
+const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
+ .vidioc_querycap = atomisp_querycap,
+ .vidioc_enum_input = atomisp_enum_input,
+ .vidioc_g_input = atomisp_g_input,
+ .vidioc_s_input = atomisp_s_input,
+ .vidioc_queryctrl = atomisp_queryctl,
+ .vidioc_s_ctrl = atomisp_s_ctrl,
+ .vidioc_g_ctrl = atomisp_g_ctrl,
+ .vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
+ .vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
+ .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
+ .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
+ .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
+ .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap,
+ .vidioc_reqbufs = atomisp_reqbufs,
+ .vidioc_querybuf = atomisp_querybuf,
+ .vidioc_qbuf = atomisp_qbuf,
+ .vidioc_dqbuf = atomisp_dqbuf,
+ .vidioc_streamon = atomisp_streamon,
+ .vidioc_streamoff = atomisp_streamoff,
+ .vidioc_default = atomisp_vidioc_default,
+ .vidioc_s_parm = atomisp_s_parm,
+ .vidioc_g_parm = atomisp_g_parm,
+};
+
+const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = {
+ .vidioc_querycap = atomisp_querycap,
+ .vidioc_g_fmt_vid_out = atomisp_g_fmt_file,
+ .vidioc_s_fmt_vid_out = atomisp_s_fmt_file,
+ .vidioc_s_parm = atomisp_s_parm_file,
+ .vidioc_reqbufs = atomisp_reqbufs_file,
+ .vidioc_querybuf = atomisp_querybuf_file,
+ .vidioc_qbuf = atomisp_qbuf_file,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
new file mode 100644
index 000000000000..fb5fadb5332b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
@@ -0,0 +1,73 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_IOCTL_H__
+#define __ATOMISP_IOCTL_H__
+
+#include "ia_css.h"
+
+struct atomisp_device;
+struct atomisp_video_pipe;
+
+extern const struct atomisp_format_bridge atomisp_output_fmts[];
+
+const struct atomisp_format_bridge *atomisp_get_format_bridge(
+ unsigned int pixelformat);
+#ifndef ISP2401
+const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(
+ u32 mbus_code);
+#else
+const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32
+ mbus_code);
+#endif
+
+int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
+ uint16_t stream_id);
+
+int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type);
+int __atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req);
+
+int atomisp_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *req);
+
+enum atomisp_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device
+ *asd);
+
+void atomisp_videobuf_free_buf(struct videobuf_buffer *vb);
+
+extern const struct v4l2_file_operations atomisp_file_fops;
+
+extern const struct v4l2_ioctl_ops atomisp_ioctl_ops;
+
+extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops;
+
+unsigned int atomisp_streaming_count(struct atomisp_device *isp);
+
+unsigned int atomisp_is_acc_enabled(struct atomisp_device *isp);
+/* compat_ioctl for 32bit userland app and 64bit kernel */
+long atomisp_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, bool isp_timeout);
+#endif /* __ATOMISP_IOCTL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
new file mode 100644
index 000000000000..3d6bb166927c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
@@ -0,0 +1,1437 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/intel-mid.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_internal.h"
+
+const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[] = {
+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_8 },
+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_10 },
+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_BGGR, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GBRG, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_GRBG, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, CSS_BAYER_ORDER_RGGB, CSS_FORMAT_RAW_12 },
+ { MEDIA_BUS_FMT_UYVY8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, IA_CSS_STREAM_FORMAT_YUV422_8 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0, IA_CSS_STREAM_FORMAT_YUV422_8 },
+ { MEDIA_BUS_FMT_JPEG_1X8, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, IA_CSS_STREAM_FORMAT_BINARY_8 },
+ { V4L2_MBUS_FMT_CUSTOM_NV12, 12, 12, CSS_FRAME_FORMAT_NV12, 0, CSS_FRAME_FORMAT_NV12 },
+ { V4L2_MBUS_FMT_CUSTOM_NV21, 12, 12, CSS_FRAME_FORMAT_NV21, 0, CSS_FRAME_FORMAT_NV21 },
+ { V4L2_MBUS_FMT_CUSTOM_YUV420, 12, 12, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, 0, IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY },
+ { V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, 8, 8, CSS_FRAME_FORMAT_BINARY_8, 0, IA_CSS_STREAM_FORMAT_BINARY_8 },
+ /* no valid V4L2 MBUS code for metadata format, so leave it 0. */
+ { 0, 0, 0, ATOMISP_INPUT_FORMAT_EMBEDDED, 0, IA_CSS_STREAM_FORMAT_EMBEDDED },
+ {}
+};
+
+static const struct {
+ u32 code;
+ u32 compressed;
+} compressed_codes[] = {
+ { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 },
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 },
+};
+
+u32 atomisp_subdev_uncompressed_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(compressed_codes); i++)
+ if (code == compressed_codes[i].compressed)
+ return compressed_codes[i].code;
+
+ return code;
+}
+
+bool atomisp_subdev_is_compressed(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv[i].bpp !=
+ atomisp_in_fmt_conv[i].depth;
+
+ return false;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (code == atomisp_in_fmt_conv[i].code)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_css_stream_format atomisp_in_fmt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
+ if (atomisp_in_fmt_conv[i].atomisp_in_fmt == atomisp_in_fmt)
+ return atomisp_in_fmt_conv + i;
+
+ return NULL;
+}
+
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
+ unsigned int source_pad)
+{
+ struct v4l2_mbus_framefmt *sink, *src;
+
+ sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE,
+ ATOMISP_SUBDEV_PAD_SINK);
+ src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
+ V4L2_SUBDEV_FORMAT_ACTIVE, source_pad);
+
+ return atomisp_is_mbuscode_raw(sink->code)
+ && !atomisp_is_mbuscode_raw(src->code);
+}
+
+uint16_t atomisp_subdev_source_pad(struct video_device * vdev)
+{
+ struct media_link *link;
+ uint16_t ret = 0;
+ list_for_each_entry(link, &vdev->entity.links, list) {
+ if (link->source) {
+ ret = link->source->index;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * V4L2 subdev operations
+ */
+
+/*
+ * isp_subdev_ioctl - CCDC module private ioctl's
+ * @sd: ISP V4L2 subdevice
+ * @cmd: ioctl command
+ * @arg: ioctl argument
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static long isp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return 0;
+}
+
+/*
+ * isp_subdev_set_power - Power on/off the CCDC module
+ * @sd: ISP V4L2 subdevice
+ * @on: power on/off
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+static int isp_subdev_set_power(struct v4l2_subdev *sd, int on)
+{
+ return 0;
+}
+
+static int isp_subdev_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+
+ if (sub->type != V4L2_EVENT_FRAME_SYNC &&
+ sub->type != V4L2_EVENT_FRAME_END &&
+ sub->type != V4L2_EVENT_ATOMISP_3A_STATS_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_METADATA_READY &&
+ sub->type != V4L2_EVENT_ATOMISP_PAUSE_BUFFER &&
+ sub->type != V4L2_EVENT_ATOMISP_CSS_RESET &&
+ sub->type != V4L2_EVENT_ATOMISP_RAW_BUFFERS_ALLOC_DONE &&
+ sub->type != V4L2_EVENT_ATOMISP_ACC_COMPLETE)
+ return -EINVAL;
+
+ if (sub->type == V4L2_EVENT_FRAME_SYNC &&
+ !atomisp_css_valid_sof(isp))
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 16, NULL);
+}
+
+static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * isp_subdev_enum_mbus_code - Handle pixel format enumeration
+ * @sd: pointer to v4l2 subdev structure
+ * @fh : V4L2 subdev file handle
+ * @code: pointer to v4l2_subdev_pad_mbus_code_enum structure
+ * return -EINVAL or zero on success
+ */
+static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
+ return -EINVAL;
+
+ code->code = atomisp_in_fmt_conv[code->index].code;
+
+ return 0;
+}
+
+static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
+ uint32_t target)
+{
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK:
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return 0;
+ }
+ break;
+ default:
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return 0;
+ }
+ break;
+ }
+
+ return -EINVAL;
+}
+
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ uint32_t which, uint32_t pad,
+ uint32_t target)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY) {
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(sd, cfg, pad);
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(sd, cfg, pad);
+ }
+ }
+
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return &isp_sd->fmt[pad].crop;
+ case V4L2_SEL_TGT_COMPOSE:
+ return &isp_sd->fmt[pad].compose;
+ }
+
+ return NULL;
+}
+
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+
+ return &isp_sd->fmt[pad].fmt;
+}
+
+static void isp_get_fmt_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ struct v4l2_mbus_framefmt **ffmt,
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
+{
+ unsigned int i;
+
+ for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
+ ffmt[i] = atomisp_subdev_get_ffmt(sd, cfg, which, i);
+ crop[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ V4L2_SEL_TGT_CROP);
+ comp[i] = atomisp_subdev_get_rect(sd, cfg, which, i,
+ V4L2_SEL_TGT_COMPOSE);
+ }
+}
+
+static void isp_subdev_propagate(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ uint32_t which, uint32_t pad, uint32_t target,
+ uint32_t flags)
+{
+ struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ *comp[ATOMISP_SUBDEV_PADS_NUM];
+
+ if (flags & V4L2_SEL_FLAG_KEEP_CONFIG)
+ return;
+
+ isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ struct v4l2_rect r = {0};
+
+ /* Only crop target supported on sink pad. */
+ r.width = ffmt[pad]->width;
+ r.height = ffmt[pad]->height;
+
+ atomisp_subdev_set_selection(sd, cfg, which, pad,
+ target, flags, &r);
+ break;
+ }
+ }
+}
+
+static int isp_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *rec;
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+
+ if (rval)
+ return rval;
+
+ rec = atomisp_subdev_get_rect(sd, cfg, sel->which, sel->pad,
+ sel->target);
+ if (!rec)
+ return -EINVAL;
+
+ sel->r = *rec;
+ return 0;
+}
+
+static char *atomisp_pad_str[] = { "ATOMISP_SUBDEV_PAD_SINK",
+ "ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VF",
+ "ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW",
+ "ATOMISP_SUBDEV_PAD_SOURCE_VIDEO"};
+
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ uint32_t which, uint32_t pad, uint32_t target,
+ uint32_t flags, struct v4l2_rect *r)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
+ uint16_t vdev_pad = atomisp_subdev_source_pad(sd->devnode);
+ struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
+ *comp[ATOMISP_SUBDEV_PADS_NUM];
+ enum atomisp_input_stream_id stream_id;
+ unsigned int i;
+ unsigned int padding_w = pad_w;
+ unsigned int padding_h = pad_h;
+
+ stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
+
+ isp_get_fmt_rect(sd, cfg, which, ffmt, crop, comp);
+
+ dev_dbg(isp->dev,
+ "sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
+ atomisp_pad_str[pad], target == V4L2_SEL_TGT_CROP
+ ? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
+ r->left, r->top, r->width, r->height,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE", flags);
+
+ r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH);
+ r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ /* Only crop target supported on sink pad. */
+ unsigned int dvs_w, dvs_h;
+
+ crop[pad]->width = ffmt[pad]->width;
+ crop[pad]->height = ffmt[pad]->height;
+
+ /* Workaround for BYT 1080p perfectshot since the maxinum resolution of
+ * front camera ov2722 is 1932x1092 and cannot use pad_w > 12*/
+ if (!strncmp(isp->inputs[isp_sd->input_curr].camera->name,
+ "ov2722", 6) && crop[pad]->height == 1092) {
+ padding_w = 12;
+ padding_h = 12;
+ }
+
+ if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA) {
+ padding_w = 0;
+ padding_h = 0;
+ }
+
+ if (atomisp_subdev_format_conversion(isp_sd,
+ isp_sd->capture_pad)
+ && crop[pad]->width && crop[pad]->height)
+ crop[pad]->width -= padding_w, crop[pad]->height -= padding_h;
+
+ /* if subdev type is SOC camera,we do not need to set DVS */
+ if (isp->inputs[isp_sd->input_curr].type == SOC_CAMERA)
+ isp_sd->params.video_dis_en = 0;
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !isp_sd->continuous_mode->val) {
+ /* This resolution contains 20 % of DVS slack
+ * (of the desired captured image before
+ * scaling, or 1 / 6 of what we get from the
+ * sensor) in both width and height. Remove
+ * it. */
+ crop[pad]->width = roundup(crop[pad]->width * 5 / 6,
+ ATOM_ISP_STEP_WIDTH);
+ crop[pad]->height = roundup(crop[pad]->height * 5 / 6,
+ ATOM_ISP_STEP_HEIGHT);
+ }
+
+ crop[pad]->width = min(crop[pad]->width, r->width);
+ crop[pad]->height = min(crop[pad]->height, r->height);
+
+ if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) {
+ for (i = ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE;
+ i < ATOMISP_SUBDEV_PADS_NUM; i++) {
+ struct v4l2_rect tmp = *crop[pad];
+
+ atomisp_subdev_set_selection(
+ sd, cfg, which, i, V4L2_SEL_TGT_COMPOSE,
+ flags, &tmp);
+ }
+ }
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ break;
+
+ if (isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
+ !isp_sd->continuous_mode->val) {
+ dvs_w = rounddown(crop[pad]->width / 5,
+ ATOM_ISP_STEP_WIDTH);
+ dvs_h = rounddown(crop[pad]->height / 5,
+ ATOM_ISP_STEP_HEIGHT);
+ } else if (!isp_sd->params.video_dis_en &&
+ isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
+ /*
+ * For CSS2.0, digital zoom needs to set dvs envelope to 12
+ * when dvs is disabled.
+ */
+ dvs_w = dvs_h = 12;
+ } else
+ dvs_w = dvs_h = 0;
+
+ atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
+ atomisp_css_input_set_effective_resolution(isp_sd, stream_id,
+ crop[pad]->width, crop[pad]->height);
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: {
+ /* Only compose target is supported on source pads. */
+
+ if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
+ /* Scaling is disabled in this mode */
+ r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width;
+ r->height = crop[ATOMISP_SUBDEV_PAD_SINK]->height;
+ }
+
+ if (crop[ATOMISP_SUBDEV_PAD_SINK]->width == r->width
+ && crop[ATOMISP_SUBDEV_PAD_SINK]->height == r->height)
+ isp_sd->params.yuv_ds_en = false;
+ else
+ isp_sd->params.yuv_ds_en = true;
+
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+
+ if (r->width == 0 || r->height == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 ||
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0)
+ break;
+ /*
+ * do cropping on sensor input if ratio of required resolution
+ * is different with sensor output resolution ratio:
+ *
+ * ratio = width / height
+ *
+ * if ratio_output < ratio_sensor:
+ * effect_width = sensor_height * out_width / out_height;
+ * effect_height = sensor_height;
+ * else
+ * effect_width = sensor_width;
+ * effect_height = sensor_width * out_height / out_width;
+ *
+ */
+ if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height <
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height)
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ stream_id,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ height * r->width / r->height,
+ ATOM_ISP_STEP_WIDTH),
+ crop[ATOMISP_SUBDEV_PAD_SINK]->height);
+ else
+ atomisp_css_input_set_effective_resolution(isp_sd,
+ stream_id,
+ crop[ATOMISP_SUBDEV_PAD_SINK]->width,
+ rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
+ width * r->height / r->width,
+ ATOM_ISP_STEP_WIDTH));
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ comp[pad]->width = r->width;
+ comp[pad]->height = r->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set format dimensions on non-sink pads as well. */
+ if (pad != ATOMISP_SUBDEV_PAD_SINK) {
+ ffmt[pad]->width = comp[pad]->width;
+ ffmt[pad]->height = comp[pad]->height;
+ }
+
+ if (!atomisp_subdev_get_rect(sd, cfg, which, pad, target))
+ return -EINVAL;
+ *r = *atomisp_subdev_get_rect(sd, cfg, which, pad, target);
+
+ dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
+ r->left, r->top, r->width, r->height);
+
+ return 0;
+}
+
+static int isp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
+ if (rval)
+ return rval;
+
+ return atomisp_subdev_set_selection(sd, cfg, sel->which, sel->pad,
+ sel->target, sel->flags, &sel->r);
+}
+
+static int atomisp_get_sensor_bin_factor(struct atomisp_sub_device *asd)
+{
+ struct v4l2_control ctrl = {0};
+ struct atomisp_device *isp = asd->isp;
+ int hbin, vbin;
+ int ret;
+
+ if (isp->inputs[asd->input_curr].type == FILE_INPUT ||
+ isp->inputs[asd->input_curr].type == TEST_PATTERN)
+ return 0;
+
+ ctrl.id = V4L2_CID_BIN_FACTOR_HORZ;
+ ret =
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &ctrl);
+ hbin = ctrl.value;
+ ctrl.id = V4L2_CID_BIN_FACTOR_VERT;
+ ret |=
+ v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->ctrl_handler,
+ &ctrl);
+ vbin = ctrl.value;
+
+ /*
+ * ISP needs to know binning factor from sensor.
+ * In case horizontal and vertical sensor's binning factors
+ * are different or sensor does not support binning factor CID,
+ * ISP will apply default 0 value.
+ */
+ if (ret || hbin != vbin)
+ hbin = 0;
+
+ return hbin;
+}
+
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad, struct v4l2_mbus_framefmt *ffmt)
+{
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ struct v4l2_mbus_framefmt *__ffmt =
+ atomisp_subdev_get_ffmt(sd, cfg, which, pad);
+ uint16_t vdev_pad = atomisp_subdev_source_pad(sd->devnode);
+ enum atomisp_input_stream_id stream_id;
+
+ dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
+ atomisp_pad_str[pad], ffmt->width, ffmt->height, ffmt->code,
+ which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
+ : "V4L2_SUBDEV_FORMAT_ACTIVE");
+
+ stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad);
+
+ switch (pad) {
+ case ATOMISP_SUBDEV_PAD_SINK: {
+ const struct atomisp_in_fmt_conv *fc =
+ atomisp_find_in_fmt_conv(ffmt->code);
+
+ if (!fc) {
+ fc = atomisp_in_fmt_conv;
+ ffmt->code = fc->code;
+ dev_dbg(isp->dev, "using 0x%8.8x instead\n",
+ ffmt->code);
+ }
+
+ *__ffmt = *ffmt;
+
+ isp_subdev_propagate(sd, cfg, which, pad,
+ V4L2_SEL_TGT_CROP, 0);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ atomisp_css_input_set_resolution(isp_sd,
+ stream_id, ffmt);
+ atomisp_css_input_set_binning_factor(isp_sd,
+ stream_id,
+ atomisp_get_sensor_bin_factor(isp_sd));
+ atomisp_css_input_set_bayer_order(isp_sd, stream_id,
+ fc->bayer_order);
+ atomisp_css_input_set_format(isp_sd, stream_id,
+ fc->css_stream_fmt);
+ atomisp_css_set_default_isys_config(isp_sd, stream_id,
+ ffmt);
+ }
+
+ break;
+ }
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE:
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF:
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO:
+ __ffmt->code = ffmt->code;
+ break;
+ }
+}
+
+/*
+ * isp_subdev_get_format - Retrieve the video format on a pad
+ * @sd : ISP V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format = *atomisp_subdev_get_ffmt(sd, cfg, fmt->which, fmt->pad);
+
+ return 0;
+}
+
+/*
+ * isp_subdev_set_format - Set the video format on a pad
+ * @sd : ISP subdev V4L2 subdevice
+ * @fh : V4L2 subdev file handle
+ * @pad: Pad number
+ * @fmt: Format
+ *
+ * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
+ * to the format type.
+ */
+static int isp_subdev_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ atomisp_subdev_set_ffmt(sd, cfg, fmt->which, fmt->pad, &fmt->format);
+
+ return 0;
+}
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = {
+ .ioctl = isp_subdev_ioctl, .s_power = isp_subdev_set_power,
+ .subscribe_event = isp_subdev_subscribe_event,
+ .unsubscribe_event = isp_subdev_unsubscribe_event,
+};
+
+/* V4L2 subdev pad operations */
+static const struct v4l2_subdev_pad_ops isp_subdev_v4l2_pad_ops = {
+ .enum_mbus_code = isp_subdev_enum_mbus_code,
+ .get_fmt = isp_subdev_get_format,
+ .set_fmt = isp_subdev_set_format,
+ .get_selection = isp_subdev_get_selection,
+ .set_selection = isp_subdev_set_selection,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+/* V4L2 subdev operations */
+static const struct v4l2_subdev_ops isp_subdev_v4l2_ops = {
+ .core = &isp_subdev_v4l2_core_ops,
+ .pad = &isp_subdev_v4l2_pad_ops,
+};
+
+static void isp_subdev_init_params(struct atomisp_sub_device *asd)
+{
+ unsigned int i;
+
+ /* parameters initialization */
+ INIT_LIST_HEAD(&asd->s3a_stats);
+ INIT_LIST_HEAD(&asd->s3a_stats_in_css);
+ INIT_LIST_HEAD(&asd->s3a_stats_ready);
+ INIT_LIST_HEAD(&asd->dis_stats);
+ INIT_LIST_HEAD(&asd->dis_stats_in_css);
+ spin_lock_init(&asd->dis_stats_lock);
+ for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
+ INIT_LIST_HEAD(&asd->metadata[i]);
+ INIT_LIST_HEAD(&asd->metadata_in_css[i]);
+ INIT_LIST_HEAD(&asd->metadata_ready[i]);
+ }
+}
+
+/*
+* isp_subdev_link_setup - Setup isp subdev connections
+* @entity: ispsubdev media entity
+* @local: Pad at the local end of the link
+* @remote: Pad at the remote end of the link
+* @flags: Link flags
+*
+* return -EINVAL or zero on success
+*/
+static int isp_subdev_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
+ struct atomisp_device *isp = isp_sd->isp;
+ unsigned int i;
+
+ switch (local->index | is_media_entity_v4l2_subdev(remote->entity)) {
+ case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN:
+ /* Read from the sensor CSI2-ports. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED)) {
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE;
+ break;
+ }
+
+ if (isp_sd->input != ATOMISP_SUBDEV_INPUT_NONE)
+ return -EBUSY;
+
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ if (remote->entity != &isp->csi2_port[i].subdev.entity)
+ continue;
+
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_CSI2_PORT1 + i;
+ return 0;
+ }
+
+ return -EINVAL;
+
+ case ATOMISP_SUBDEV_PAD_SINK | MEDIA_ENT_F_OLD_BASE:
+ /* read from memory */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (isp_sd->input >= ATOMISP_SUBDEV_INPUT_CSI2_PORT1 &&
+ isp_sd->input < (ATOMISP_SUBDEV_INPUT_CSI2_PORT1
+ + ATOMISP_CAMERA_NR_PORTS))
+ return -EBUSY;
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_MEMORY;
+ } else {
+ if (isp_sd->input == ATOMISP_SUBDEV_INPUT_MEMORY)
+ isp_sd->input = ATOMISP_SUBDEV_INPUT_NONE;
+ }
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_VF | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO | MEDIA_ENT_F_OLD_BASE:
+ /* always write to memory */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* media operations */
+static const struct media_entity_operations isp_subdev_media_ops = {
+ .link_setup = isp_subdev_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+/* .set_power = v4l2_subdev_set_power, */
+};
+
+static int __atomisp_update_run_mode(struct atomisp_sub_device *asd)
+{
+ struct atomisp_device *isp = asd->isp;
+ struct v4l2_ctrl *ctrl = asd->run_mode;
+ struct v4l2_ctrl *c;
+ struct v4l2_streamparm p = {0};
+ int modes[] = { CI_MODE_NONE,
+ CI_MODE_VIDEO,
+ CI_MODE_STILL_CAPTURE,
+ CI_MODE_CONTINUOUS,
+ CI_MODE_PREVIEW };
+ s32 mode;
+
+ if (ctrl->val != ATOMISP_RUN_MODE_VIDEO &&
+ asd->continuous_mode->val)
+ mode = ATOMISP_RUN_MODE_PREVIEW;
+ else
+ mode = ctrl->val;
+
+ c = v4l2_ctrl_find(
+ isp->inputs[asd->input_curr].camera->ctrl_handler,
+ V4L2_CID_RUN_MODE);
+
+ if (c)
+ return v4l2_ctrl_s_ctrl(c, mode);
+
+ /* Fall back to obsolete s_parm */
+ p.parm.capture.capturemode = modes[mode];
+
+ return v4l2_subdev_call(
+ isp->inputs[asd->input_curr].camera, video, s_parm, &p);
+}
+
+int atomisp_update_run_mode(struct atomisp_sub_device *asd)
+{
+ int rval;
+
+ mutex_lock(asd->ctrl_handler.lock);
+ rval = __atomisp_update_run_mode(asd);
+ mutex_unlock(asd->ctrl_handler.lock);
+
+ return rval;
+}
+
+static int s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct atomisp_sub_device *asd = container_of(
+ ctrl->handler, struct atomisp_sub_device, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RUN_MODE:
+ return __atomisp_update_run_mode(asd);
+ case V4L2_CID_DEPTH_MODE:
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+ dev_err(asd->isp->dev, "ISP is streaming, it is not supported to change the depth mode\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = &s_ctrl,
+};
+
+static const struct v4l2_ctrl_config ctrl_fmt_auto = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_FMT_AUTO,
+ .name = "Automatic format guessing",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const char * const ctrl_run_mode_menu[] = {
+ NULL,
+ "Video",
+ "Still capture",
+ "Continuous capture",
+ "Preview",
+};
+
+static const struct v4l2_ctrl_config ctrl_run_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_RUN_MODE,
+ .name = "Atomisp run mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 1,
+ .def = 1,
+ .max = 4,
+ .qmenu = ctrl_run_mode_menu,
+};
+
+static const char * const ctrl_vfpp_mode_menu[] = {
+ "Enable", /* vfpp always enabled */
+ "Disable to scaler mode", /* CSS into video mode and disable */
+ "Disable to low latency mode", /* CSS into still mode and disable */
+};
+
+static const struct v4l2_ctrl_config ctrl_vfpp = {
+ .id = V4L2_CID_VFPP,
+ .name = "Atomisp vf postprocess",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .def = 0,
+ .max = 2,
+ .qmenu = ctrl_vfpp_mode_menu,
+};
+
+/*
+ * Control for ISP continuous mode
+ *
+ * When enabled, capture processing is possible without
+ * stopping the preview pipeline. When disabled, ISP needs
+ * to be restarted between preview and capture.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_MODE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Continuous mode",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for continuous mode raw buffer size
+ *
+ * The size of the RAW ringbuffer sets limit on how much
+ * back in time application can go when requesting capture
+ * frames to be rendered, and how many frames can be rendered
+ * in a burst at full sensor rate.
+ *
+ * Note: this setting has a big impact on memory consumption of
+ * the CSS subsystem.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Continuous raw ringbuffer size",
+ .min = 1,
+ .max = 100, /* depends on CSS version, runtime checked */
+ .step = 1,
+ .def = 3,
+};
+
+/*
+ * Control for enabling continuous viewfinder
+ *
+ * When enabled, and ISP is in continuous mode (see ctrl_continuous_mode ),
+ * preview pipeline continues concurrently with capture
+ * processing. When disabled, and continuous mode is used,
+ * preview is paused while captures are processed, but
+ * full pipeline restart is not needed.
+ *
+ * By setting this to disabled, capture processing is
+ * essentially given priority over preview, and the effective
+ * capture output rate may be higher than with continuous
+ * viewfinder enabled.
+ */
+static const struct v4l2_ctrl_config ctrl_continuous_viewfinder = {
+ .id = V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Continuous viewfinder",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for enabling Lock&Unlock Raw Buffer mechanism
+ *
+ * When enabled, Raw Buffer can be locked and unlocked.
+ * Application can hold the exp_id of Raw Buffer
+ * and unlock it when no longer needed.
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_enable_raw_buffer_lock = {
+ .id = V4L2_CID_ENABLE_RAW_BUFFER_LOCK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Lock Unlock Raw Buffer",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control to disable digital zoom of the whole stream
+ *
+ * When it is true, pipe configuation enable_dz will be set to false.
+ * This can help get a better performance by disabling pp binary.
+ *
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_disable_dz = {
+ .id = V4L2_CID_DISABLE_DZ,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Disable digital zoom",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+/*
+ * Control for ISP depth mode
+ *
+ * When enabled, that means ISP will deal with dual streams and sensors will be
+ * in slave/master mode.
+ * slave sensor will have no output until master sensor is streamed on.
+ */
+static const struct v4l2_ctrl_config ctrl_depth_mode = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_DEPTH_MODE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Depth mode",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+#ifdef ISP2401
+/*
+ * Control for selectting ISP version
+ *
+ * When enabled, that means ISP version will be used ISP2.7. when disable, the
+ * isp will default to use ISP2.2.
+ * Note: Make sure set this configuration before creating stream.
+ */
+static const struct v4l2_ctrl_config ctrl_select_isp_version = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_SELECT_ISP_VERSION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Select Isp version",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+#ifdef CONFIG_ION
+/*
+ * Control for ISP ion device fd
+ *
+ * userspace will open ion device and pass the fd to kernel.
+ * this fd will be used to map shared fd to buffer.
+ */
+static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
+ .ops = &ctrl_ops,
+ .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Ion Device Fd",
+ .min = -1,
+ .max = 1024,
+ .step = 1,
+ .def = ION_FD_UNSET
+};
+#endif
+
+#endif
+static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
+{
+ pipe->type = buf_type;
+ pipe->asd = asd;
+ pipe->isp = asd->isp;
+ spin_lock_init(&pipe->irq_lock);
+ INIT_LIST_HEAD(&pipe->activeq);
+ INIT_LIST_HEAD(&pipe->activeq_out);
+ INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
+ INIT_LIST_HEAD(&pipe->per_frame_params);
+ memset(pipe->frame_request_config_id,
+ 0, VIDEO_MAX_FRAME * sizeof(unsigned int));
+ memset(pipe->frame_params,
+ 0, VIDEO_MAX_FRAME *
+ sizeof(struct atomisp_css_params_with_list *));
+}
+
+static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd,
+ struct atomisp_acc_pipe *pipe)
+{
+ pipe->asd = asd;
+ pipe->isp = asd->isp;
+ INIT_LIST_HEAD(&asd->acc.fw);
+ INIT_LIST_HEAD(&asd->acc.memory_maps);
+ ida_init(&asd->acc.ida);
+}
+
+/*
+ * isp_subdev_init_entities - Initialize V4L2 subdev and media entity
+ * @asd: ISP CCDC module
+ *
+ * Return 0 on success and a negative error code on failure.
+ */
+static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
+{
+ struct v4l2_subdev *sd = &asd->subdev;
+ struct media_pad *pads = asd->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ asd->input = ATOMISP_SUBDEV_INPUT_NONE;
+
+ v4l2_subdev_init(sd, &isp_subdev_v4l2_ops);
+ sprintf(sd->name, "ATOMISP_SUBDEV_%d", asd->index);
+ v4l2_set_subdevdata(sd, asd);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_VF].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].flags = MEDIA_PAD_FL_SOURCE;
+ pads[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
+
+ asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VF].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+ asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE_VIDEO].fmt.code =
+ MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ me->ops = &isp_subdev_media_ops;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ ret = media_entity_pads_init(me, ATOMISP_SUBDEV_PADS_NUM, pads);
+ if (ret < 0)
+ return ret;
+
+ atomisp_init_subdev_pipe(asd, &asd->video_in,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_preview,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_vf,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_capture,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ atomisp_init_acc_pipe(asd, &asd->video_acc);
+
+ ret = atomisp_video_init(&asd->video_in, "MEMORY");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW");
+ if (ret < 0)
+ return ret;
+
+ ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO");
+ if (ret < 0)
+ return ret;
+
+ atomisp_acc_init(&asd->video_acc, "ACC");
+
+ ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1);
+ if (ret)
+ return ret;
+
+ asd->fmt_auto = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_fmt_auto, NULL);
+ asd->run_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_run_mode, NULL);
+ asd->vfpp = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_vfpp, NULL);
+ asd->continuous_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_mode, NULL);
+ asd->continuous_viewfinder = v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_viewfinder,
+ NULL);
+ asd->continuous_raw_buffer_size =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_continuous_raw_buffer_size,
+ NULL);
+
+ asd->enable_raw_buffer_lock =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_enable_raw_buffer_lock,
+ NULL);
+ asd->depth_mode =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_depth_mode,
+ NULL);
+ asd->disable_dz =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_disable_dz,
+ NULL);
+#ifdef ISP2401
+ asd->select_isp_version =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_select_isp_version,
+ NULL);
+
+#ifdef CONFIG_ION
+ asd->ion_dev_fd =
+ v4l2_ctrl_new_custom(&asd->ctrl_handler,
+ &ctrl_ion_dev_fd,
+ NULL);
+#endif
+#endif
+
+ /* Make controls visible on subdev as well. */
+ asd->subdev.ctrl_handler = &asd->ctrl_handler;
+ spin_lock_init(&asd->raw_buffer_bitmap_lock);
+ return asd->ctrl_handler.error;
+}
+
+int atomisp_create_pads_links(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ int i, j, ret = 0;
+ isp->num_of_streams = isp->media_dev.driver_version >=
+ ATOMISP_CSS_VERSION_20 ? 2 : 1;
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ for (j = 0; j < isp->num_of_streams; j++) {
+ ret =
+ media_create_pad_link(&isp->csi2_port[i].subdev.
+ entity, CSI2_PAD_SOURCE,
+ &isp->asd[j].subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ for (i = 0; i < isp->input_cnt - 2; i++) {
+ ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0,
+ &isp->csi2_port[isp->inputs[i].
+ port].subdev.entity,
+ CSI2_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW,
+ &asd->video_out_preview.vdev.entity,
+ 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_VF,
+ &asd->video_out_vf.vdev.entity, 0,
+ 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE,
+ &asd->video_out_capture.vdev.entity,
+ 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = media_create_pad_link(&asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SOURCE_VIDEO,
+ &asd->video_out_video_capture.vdev.
+ entity, 0, 0);
+ if (ret < 0)
+ return ret;
+ /*
+ * file input only supported on subdev0
+ * so do not create pad link for subdevs other then subdev0
+ */
+ if (asd->index)
+ return 0;
+ ret = media_create_pad_link(&asd->video_in.vdev.entity,
+ 0, &asd->subdev.entity,
+ ATOMISP_SUBDEV_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd)
+{
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+ media_entity_cleanup(&asd->subdev.entity);
+}
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd)
+{
+ struct v4l2_fh *fh, *fh_tmp;
+ struct v4l2_event event;
+ unsigned int i, pending_event;
+
+ list_for_each_entry_safe(fh, fh_tmp,
+ &asd->subdev.devnode->fh_list, list) {
+ pending_event = v4l2_event_pending(fh);
+ for (i = 0; i < pending_event; i++)
+ v4l2_event_dequeue(fh, &event, 1);
+ }
+}
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
+{
+ atomisp_subdev_cleanup_entities(asd);
+ v4l2_device_unregister_subdev(&asd->subdev);
+ atomisp_video_unregister(&asd->video_in);
+ atomisp_video_unregister(&asd->video_out_preview);
+ atomisp_video_unregister(&asd->video_out_vf);
+ atomisp_video_unregister(&asd->video_out_capture);
+ atomisp_video_unregister(&asd->video_out_video_capture);
+ atomisp_acc_unregister(&asd->video_acc);
+}
+
+int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ /* Register the subdev and video node. */
+ ret = v4l2_device_register_subdev(vdev, &asd->subdev);
+ if (ret < 0)
+ goto error;
+
+ ret = atomisp_video_register(&asd->video_out_capture, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = atomisp_video_register(&asd->video_out_vf, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = atomisp_video_register(&asd->video_out_preview, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = atomisp_video_register(&asd->video_out_video_capture, vdev);
+ if (ret < 0)
+ goto error;
+
+ ret = atomisp_acc_register(&asd->video_acc, vdev);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * file input only supported on subdev0
+ * so do not create video node for subdevs other then subdev0
+ */
+ if (asd->index)
+ return 0;
+ ret = atomisp_video_register(&asd->video_in, vdev);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_subdev_unregister_entities(asd);
+ return ret;
+}
+
+/*
+ * atomisp_subdev_init - ISP Subdevice initialization.
+ * @dev: Device pointer specific to the ATOM ISP.
+ *
+ * TODO: Get the initialisation values from platform data.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int atomisp_subdev_init(struct atomisp_device *isp)
+{
+ struct atomisp_sub_device *asd;
+ int i, ret = 0;
+
+ /*
+ * CSS2.0 running ISP2400 support
+ * multiple streams
+ */
+ isp->num_of_streams = isp->media_dev.driver_version >=
+ ATOMISP_CSS_VERSION_20 ? 2 : 1;
+ isp->asd = devm_kzalloc(isp->dev, sizeof(struct atomisp_sub_device) *
+ isp->num_of_streams, GFP_KERNEL);
+ if (!isp->asd)
+ return -ENOMEM;
+ for (i = 0; i < isp->num_of_streams; i++) {
+ asd = &isp->asd[i];
+ spin_lock_init(&asd->lock);
+ asd->isp = isp;
+ isp_subdev_init_params(asd);
+ asd->index = i;
+ ret = isp_subdev_init_entities(asd);
+ if (ret < 0) {
+ atomisp_subdev_cleanup_entities(asd);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
new file mode 100644
index 000000000000..ba5c2ab14253
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
@@ -0,0 +1,471 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_SUBDEV_H__
+#define __ATOMISP_SUBDEV_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf-core.h>
+
+#include "atomisp_common.h"
+#include "atomisp_compat.h"
+#include "atomisp_v4l2.h"
+
+#include "ia_css.h"
+
+/* EXP_ID's ranger is 1 ~ 250 */
+#define ATOMISP_MAX_EXP_ID (250)
+enum atomisp_subdev_input_entity {
+ ATOMISP_SUBDEV_INPUT_NONE,
+ ATOMISP_SUBDEV_INPUT_MEMORY,
+ ATOMISP_SUBDEV_INPUT_CSI2,
+ /*
+ * The following enum for CSI2 port must go together in one row.
+ * Otherwise it breaks the code logic.
+ */
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT1,
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT2,
+ ATOMISP_SUBDEV_INPUT_CSI2_PORT3,
+};
+
+#define ATOMISP_SUBDEV_PAD_SINK 0
+/* capture output for still frames */
+#define ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE 1
+/* viewfinder output for downscaled capture output */
+#define ATOMISP_SUBDEV_PAD_SOURCE_VF 2
+/* preview output for display */
+#define ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW 3
+/* main output for video pipeline */
+#define ATOMISP_SUBDEV_PAD_SOURCE_VIDEO 4
+#define ATOMISP_SUBDEV_PADS_NUM 5
+
+struct atomisp_in_fmt_conv {
+ u32 code;
+ uint8_t bpp; /* bits per pixel */
+ uint8_t depth; /* uncompressed */
+ enum atomisp_css_stream_format atomisp_in_fmt;
+ enum atomisp_css_bayer_order bayer_order;
+ enum ia_css_stream_format css_stream_fmt;
+};
+
+struct atomisp_sub_device;
+
+struct atomisp_video_pipe {
+ struct video_device vdev;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+ struct videobuf_queue capq;
+ struct videobuf_queue outq;
+ struct list_head activeq;
+ struct list_head activeq_out;
+ /*
+ * the buffers waiting for per-frame parameters, this is only valid
+ * in per-frame setting mode.
+ */
+ struct list_head buffers_waiting_for_param;
+ /* the link list to store per_frame parameters */
+ struct list_head per_frame_params;
+
+ unsigned int buffers_in_css;
+
+ /* irq_lock is used to protect video buffer state change operations and
+ * also to make activeq, activeq_out, capq and outq list
+ * operations atomic. */
+ spinlock_t irq_lock;
+ unsigned int users;
+
+ struct atomisp_device *isp;
+ struct v4l2_pix_format pix;
+ uint32_t sh_fmt;
+
+ struct atomisp_sub_device *asd;
+
+ /*
+ * This frame_config_id is got from CSS when dequueues buffers from CSS,
+ * it is used to indicate which parameter it has applied.
+ */
+ unsigned int frame_config_id[VIDEO_MAX_FRAME];
+ /*
+ * This config id is set when camera HAL enqueues buffer, it has a
+ * non-zero value to indicate which parameter it needs to applu
+ */
+ unsigned int frame_request_config_id[VIDEO_MAX_FRAME];
+ struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME];
+#ifdef ISP2401
+
+ /*
+ * move wdt from asd struct to create wdt for each pipe
+ */
+ struct timer_list wdt;
+ unsigned int wdt_duration; /* in jiffies */
+ unsigned long wdt_expires;
+ atomic_t wdt_count;
+#endif
+};
+
+struct atomisp_acc_pipe {
+ struct video_device vdev;
+ unsigned int users;
+ bool running;
+ struct atomisp_sub_device *asd;
+ struct atomisp_device *isp;
+};
+
+struct atomisp_pad_format {
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+};
+
+/* Internal states for flash process */
+enum atomisp_flash_state {
+ ATOMISP_FLASH_IDLE,
+ ATOMISP_FLASH_REQUESTED,
+ ATOMISP_FLASH_ONGOING,
+ ATOMISP_FLASH_DONE
+};
+
+/*
+ * This structure is used to cache the CSS parameters, it aligns to
+ * struct ia_css_isp_config but without un-supported and deprecated parts.
+ */
+struct atomisp_css_params {
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_formats_config formats_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config baa_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_ob_config ob_config;
+ struct ia_css_dp_config dp_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_dz_config dz_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gamma_table;
+ struct ia_css_ctc_table ctc_table;
+
+ struct ia_css_xnr_table xnr_table;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+
+ struct ia_css_vector motion_vector;
+ struct ia_css_anr_thres anr_thres;
+
+ struct ia_css_dvs_6axis_config *dvs_6axis;
+ struct ia_css_dvs2_coefficients *dvs2_coeff;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+
+ /*
+ * Used to store the user pointer address of the frame. driver needs to
+ * translate to ia_css_frame * and then set to CSS.
+ */
+ void *output_frame;
+ uint32_t isp_config_id;
+
+ /* Indicates which parameters need to be updated. */
+ struct atomisp_parameters update_flag;
+};
+
+struct atomisp_subdev_params {
+ /* FIXME: Determines whether raw capture buffer are being passed to
+ * user space. Unimplemented for now. */
+ int online_process;
+ int yuv_ds_en;
+ unsigned int color_effect;
+ bool gdc_cac_en;
+ bool macc_en;
+ bool bad_pixel_en;
+ bool video_dis_en;
+ bool sc_en;
+ bool fpn_en;
+ bool xnr_en;
+ bool low_light;
+ int false_color;
+ unsigned int histogram_elenum;
+
+ /* Current grid info */
+ struct atomisp_css_grid_info curr_grid_info;
+ enum atomisp_css_pipe_id s3a_enabled_pipe;
+
+ int s3a_output_bytes;
+
+ bool dis_proj_data_valid;
+
+ struct ia_css_dz_config dz_config; /**< Digital Zoom */
+ struct ia_css_capture_config capture_config;
+
+ struct atomisp_css_isp_config config;
+
+ /* current configurations */
+ struct atomisp_css_params css_param;
+
+ /*
+ * Intermediate buffers used to communicate data between
+ * CSS and user space.
+ */
+ struct ia_css_3a_statistics *s3a_user_stat;
+
+ void *metadata_user[ATOMISP_METADATA_TYPE_NUM];
+ uint32_t metadata_width_size;
+
+ struct ia_css_dvs2_statistics *dvs_stat;
+ struct atomisp_css_dvs_6axis *dvs_6axis;
+ uint32_t exp_id;
+ int dvs_hor_coef_bytes;
+ int dvs_ver_coef_bytes;
+ int dvs_ver_proj_bytes;
+ int dvs_hor_proj_bytes;
+
+ /* Flash */
+ int num_flash_frames;
+ enum atomisp_flash_state flash_state;
+ enum atomisp_frame_status last_frame_status;
+
+ /* continuous capture */
+ struct atomisp_cont_capture_conf offline_parm;
+ /* Flag to check if driver needs to update params to css */
+ bool css_update_params_needed;
+};
+
+struct atomisp_css_params_with_list {
+ /* parameters for CSS */
+ struct atomisp_css_params params;
+ struct list_head list;
+};
+
+struct atomisp_acc_fw {
+ struct atomisp_css_fw_info *fw;
+ unsigned int handle;
+ unsigned int flags;
+ unsigned int type;
+ struct {
+ size_t length;
+ unsigned long css_ptr;
+ } args[ATOMISP_ACC_NR_MEMORY];
+ struct list_head list;
+};
+
+struct atomisp_map {
+ ia_css_ptr ptr;
+ size_t length;
+ struct list_head list;
+ /* FIXME: should keep book which maps are currently used
+ * by binaries and not allow releasing those
+ * which are in use. Implement by reference counting.
+ */
+};
+
+struct atomisp_sub_device {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM];
+ struct atomisp_pad_format fmt[ATOMISP_SUBDEV_PADS_NUM];
+ uint16_t capture_pad; /* main capture pad; defines much of isp config */
+
+ enum atomisp_subdev_input_entity input;
+ unsigned int output;
+ struct atomisp_video_pipe video_in;
+ struct atomisp_video_pipe video_out_capture; /* capture output */
+ struct atomisp_video_pipe video_out_vf; /* viewfinder output */
+ struct atomisp_video_pipe video_out_preview; /* preview output */
+ struct atomisp_acc_pipe video_acc;
+ /* video pipe main output */
+ struct atomisp_video_pipe video_out_video_capture;
+ /* struct isp_subdev_params params; */
+ spinlock_t lock;
+ struct atomisp_device *isp;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *fmt_auto;
+ struct v4l2_ctrl *run_mode;
+ struct v4l2_ctrl *depth_mode;
+ struct v4l2_ctrl *vfpp;
+ struct v4l2_ctrl *continuous_mode;
+ struct v4l2_ctrl *continuous_raw_buffer_size;
+ struct v4l2_ctrl *continuous_viewfinder;
+ struct v4l2_ctrl *enable_raw_buffer_lock;
+#ifdef ISP2401
+ struct v4l2_ctrl *ion_dev_fd;
+#endif
+ struct v4l2_ctrl *disable_dz;
+#ifdef ISP2401
+ struct v4l2_ctrl *select_isp_version;
+#endif
+
+ struct {
+ struct list_head fw;
+ struct list_head memory_maps;
+ struct atomisp_css_pipeline *pipeline;
+ bool extension_mode;
+ struct ida ida;
+ struct completion acc_done;
+ void *acc_stages;
+ } acc;
+
+ struct atomisp_subdev_params params;
+
+ struct atomisp_stream_env stream_env[ATOMISP_INPUT_STREAM_NUM];
+
+ struct v4l2_pix_format dvs_envelop;
+ unsigned int s3a_bufs_in_css[CSS_PIPE_ID_NUM];
+ unsigned int dis_bufs_in_css;
+
+ unsigned int metadata_bufs_in_css
+ [ATOMISP_INPUT_STREAM_NUM][CSS_PIPE_ID_NUM];
+ /* The list of free and available metadata buffers for CSS */
+ struct list_head metadata[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which have been en-queued to CSS */
+ struct list_head metadata_in_css[ATOMISP_METADATA_TYPE_NUM];
+ /* The list of metadata buffers which are ready for userspace to get */
+ struct list_head metadata_ready[ATOMISP_METADATA_TYPE_NUM];
+
+ /* The list of free and available s3a stat buffers for CSS */
+ struct list_head s3a_stats;
+ /* The list of s3a stat buffers which have been en-queued to CSS */
+ struct list_head s3a_stats_in_css;
+ /* The list of s3a stat buffers which are ready for userspace to get */
+ struct list_head s3a_stats_ready;
+
+ struct list_head dis_stats;
+ struct list_head dis_stats_in_css;
+ spinlock_t dis_stats_lock;
+
+ struct atomisp_css_frame *vf_frame; /* TODO: needed? */
+ struct atomisp_css_frame *raw_output_frame;
+ enum atomisp_frame_status frame_status[VIDEO_MAX_FRAME];
+
+ /* This field specifies which camera (v4l2 input) is selected. */
+ int input_curr;
+ /* This field specifies which sensor is being selected when there
+ are multiple sensors connected to the same MIPI port. */
+ int sensor_curr;
+
+ atomic_t sof_count;
+ atomic_t sequence; /* Sequence value that is assigned to buffer. */
+ atomic_t sequence_temp;
+
+ unsigned int streaming; /* Hold both mutex and lock to change this */
+ bool stream_prepared; /* whether css stream is created */
+
+ /* subdev index: will be used to show which subdev is holding the
+ * resource, like which camera is used by which subdev
+ */
+ unsigned int index;
+
+ /* delayed memory allocation for css */
+ struct completion init_done;
+ struct workqueue_struct *delayed_init_workq;
+ unsigned int delayed_init;
+ struct work_struct delayed_init_work;
+
+ unsigned int latest_preview_exp_id; /* CSS ZSL/SDV raw buffer id */
+
+ unsigned int mipi_frame_size;
+
+ bool copy_mode; /* CSI2+ use copy mode */
+ bool yuvpp_mode; /* CSI2+ yuvpp pipe */
+
+ int raw_buffer_bitmap[ATOMISP_MAX_EXP_ID/32 + 1]; /* Record each Raw Buffer lock status */
+ int raw_buffer_locked_count;
+ spinlock_t raw_buffer_bitmap_lock;
+
+#ifndef ISP2401
+ struct timer_list wdt;
+ unsigned int wdt_duration; /* in jiffies */
+ unsigned long wdt_expires;
+
+#endif
+ struct atomisp_resolution sensor_array_res;
+ bool high_speed_mode; /* Indicate whether now is a high speed mode */
+ int pending_capture_request; /* Indicates the number of pending capture requests. */
+#ifndef ISP2401
+
+#else
+ bool re_trigger_capture;
+#endif
+ unsigned int preview_exp_id;
+ unsigned int postview_exp_id;
+};
+
+extern const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[];
+
+u32 atomisp_subdev_uncompressed_code(u32 code);
+bool atomisp_subdev_is_compressed(u32 code);
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code);
+#ifndef ISP2401
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
+ enum atomisp_css_stream_format atomisp_in_fmt);
+#else
+const struct atomisp_in_fmt_conv
+ *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(enum atomisp_css_stream_format
+ atomisp_in_fmt);
+#endif
+const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_compressed(u32 code);
+bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd,
+ unsigned int source_pad);
+uint16_t atomisp_subdev_source_pad(struct video_device *vdev);
+
+/* Get pointer to appropriate format */
+struct v4l2_mbus_framefmt
+*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad);
+struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ uint32_t which, uint32_t pad,
+ uint32_t target);
+int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ uint32_t which, uint32_t pad, uint32_t target,
+ uint32_t flags, struct v4l2_rect *r);
+/* Actually set the format */
+void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg, uint32_t which,
+ uint32_t pad, struct v4l2_mbus_framefmt *ffmt);
+
+int atomisp_update_run_mode(struct atomisp_sub_device *asd);
+
+void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
+
+void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
+int atomisp_subdev_register_entities(struct atomisp_sub_device *asd,
+ struct v4l2_device *vdev);
+int atomisp_subdev_init(struct atomisp_device *isp);
+void atomisp_subdev_cleanup(struct atomisp_device *isp);
+int atomisp_create_pads_links(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_SUBDEV_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
new file mode 100644
index 000000000000..af09218d8b71
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
@@ -0,0 +1,191 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __ATOMISP_TABLES_H__
+#define __ATOMISP_TABLES_H__
+
+#include "sh_css_params.h"
+
+/*Sepia image effect table*/
+static struct atomisp_css_cc_config sepia_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {141, 18, 68, -40, -5, -19, 35, 4, 16},
+};
+
+/*Negative image effect table*/
+static struct atomisp_css_cc_config nega_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 374, 342, 0, 672, -301},
+};
+
+/*Mono image effect table*/
+static struct atomisp_css_cc_config mono_cc_config = {
+ .fraction_bits = 8,
+ .matrix = {255, 29, 120, 0, 0, 0, 0, 0, 0},
+};
+
+/*Skin whiten image effect table*/
+static struct atomisp_css_macc_table skin_low_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 7168, 0, 2048, 8192,
+ 5120, -1024, 2048, 8192,
+ 8192, 2048, -1024, 5120,
+ 8192, 2048, 0, 7168,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_macc_table skin_medium_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 5120, 0, 6144, 8192,
+ 3072, -1024, 2048, 6144,
+ 6144, 2048, -1024, 3072,
+ 8192, 6144, 0, 5120,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_macc_table skin_high_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 4096, 0, 8192, 8192,
+ 0, -2048, 4096, 6144,
+ 6144, 4096, -2048, 0,
+ 8192, 8192, 0, 4096,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+/*Blue enhencement image effect table*/
+static struct atomisp_css_macc_table blue_macc_table = {
+ .data = {
+ 9728, -3072, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 9728, 0, -3072, 8192,
+ 12800, 1536, -3072, 8192,
+ 11264, 0, 0, 11264,
+ 9728, -3072, 0, 11264
+ }
+};
+
+/*Green enhencement image effect table*/
+static struct atomisp_css_macc_table green_macc_table = {
+ .data = {
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 10240, 4096, 0, 8192,
+ 10240, 4096, 0, 12288,
+ 12288, 0, 0, 12288,
+ 14336, -2048, 4096, 8192,
+ 10240, 0, 4096, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192
+ }
+};
+
+static struct atomisp_css_ctc_table vivid_ctc_table = {
+ .data.vamem_2 = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+ 1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+ }
+};
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
new file mode 100644
index 000000000000..996d1bdebad4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
@@ -0,0 +1,181 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include "atomisp_internal.h"
+#include "atomisp_tpg.h"
+
+static int tpg_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static int tpg_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ if (format->pad)
+ return -EINVAL;
+ /* only raw8 grbg is supported by TPG */
+ fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ cfg->try_fmt = *fmt;
+ return 0;
+ }
+ return 0;
+}
+
+static int tpg_log_status(struct v4l2_subdev *sd)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_s_power(struct v4l2_subdev *sd, int on)
+{
+ return 0;
+}
+
+static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ /*to fake*/
+ return 0;
+}
+
+static int tpg_enum_frame_ival(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ /*to fake*/
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops tpg_video_ops = {
+ .s_stream = tpg_s_stream,
+ .g_parm = tpg_g_parm,
+ .s_parm = tpg_s_parm,
+};
+
+static const struct v4l2_subdev_core_ops tpg_core_ops = {
+ .log_status = tpg_log_status,
+ .s_power = tpg_s_power,
+};
+
+static const struct v4l2_subdev_pad_ops tpg_pad_ops = {
+ .enum_mbus_code = tpg_enum_mbus_code,
+ .enum_frame_size = tpg_enum_frame_size,
+ .enum_frame_interval = tpg_enum_frame_ival,
+ .get_fmt = tpg_get_fmt,
+ .set_fmt = tpg_set_fmt,
+};
+
+static const struct v4l2_subdev_ops tpg_ops = {
+ .core = &tpg_core_ops,
+ .video = &tpg_video_ops,
+ .pad = &tpg_pad_ops,
+};
+
+void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg)
+{
+ media_entity_cleanup(&tpg->sd.entity);
+ v4l2_device_unregister_subdev(&tpg->sd);
+}
+
+int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg,
+ struct v4l2_device *vdev)
+{
+ int ret;
+ /* Register the subdev and video nodes. */
+ ret = v4l2_device_register_subdev(vdev, &tpg->sd);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ atomisp_tpg_unregister_entities(tpg);
+ return ret;
+}
+
+void atomisp_tpg_cleanup(struct atomisp_device *isp)
+{
+
+}
+
+int atomisp_tpg_init(struct atomisp_device *isp)
+{
+ struct atomisp_tpg_device *tpg = &isp->tpg;
+ struct v4l2_subdev *sd = &tpg->sd;
+ struct media_pad *pads = tpg->pads;
+ struct media_entity *me = &sd->entity;
+ int ret;
+
+ tpg->isp = isp;
+ v4l2_subdev_init(sd, &tpg_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(sd->name, "tpg_subdev");
+ v4l2_set_subdevdata(sd, tpg);
+
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+
+ ret = media_entity_pads_init(me, 1, pads);
+ if (ret < 0)
+ goto fail;
+ return 0;
+fail:
+ atomisp_tpg_cleanup(isp);
+ return ret;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
new file mode 100644
index 000000000000..64ab60f02e85
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_TPG_H__
+#define __ATOMISP_TPG_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+struct atomisp_tpg_device {
+ struct v4l2_subdev sd;
+ struct atomisp_device *isp;
+ struct media_pad pads[1];
+};
+
+void atomisp_tpg_cleanup(struct atomisp_device *isp);
+int atomisp_tpg_init(struct atomisp_device *isp);
+void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg);
+int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg,
+ struct v4l2_device *vdev);
+
+#endif /* __ATOMISP_TPG_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
new file mode 100644
index 000000000000..5ce282d6c939
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
@@ -0,0 +1,133 @@
+/*
+ * Support Camera Imaging tracer core.
+ *
+ * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM atomisp
+
+#if !defined(ATOMISP_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define ATOMISP_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+TRACE_EVENT(camera_meminfo,
+
+ TP_PROTO(const char *name, int uptr_size, int counter, int sys_size,
+ int sys_res_size, int cam_sys_use, int cam_dyc_use,
+ int cam_res_use),
+
+ TP_ARGS(name, uptr_size, counter, sys_size, sys_res_size, cam_sys_use,
+ cam_dyc_use, cam_res_use),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __field(int, uptr_size)
+ __field(int, counter)
+ __field(int, sys_size)
+ __field(int, sys_res_size)
+ __field(int, cam_res_use)
+ __field(int, cam_dyc_use)
+ __field(int, cam_sys_use)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->name, name, 24);
+ __entry->uptr_size = uptr_size;
+ __entry->counter = counter;
+ __entry->sys_size = sys_size;
+ __entry->sys_res_size = sys_res_size;
+ __entry->cam_res_use = cam_res_use;
+ __entry->cam_dyc_use = cam_dyc_use;
+ __entry->cam_sys_use = cam_sys_use;
+ ),
+
+ TP_printk(
+ "<%s> User ptr memory:%d pages,\tISP private memory used:%d"
+ " pages:\tsysFP system size:%d,\treserved size:%d"
+ "\tcamFP sysUse:%d,\tdycUse:%d,\tresUse:%d.\n",
+ __entry->name, __entry->uptr_size, __entry->counter,
+ __entry->sys_size, __entry->sys_res_size, __entry->cam_sys_use,
+ __entry->cam_dyc_use, __entry->cam_res_use)
+);
+
+TRACE_EVENT(camera_debug,
+
+ TP_PROTO(const char *name, char *info, const int line),
+
+ TP_ARGS(name, info, line),
+
+ TP_STRUCT__entry(
+ __array(char, name, 24)
+ __array(char, info, 24)
+ __field(int, line)
+ ),
+
+ TP_fast_assign(
+ strlcpy(__entry->name, name, 24);
+ strlcpy(__entry->info, info, 24);
+ __entry->line = line;
+ ),
+
+ TP_printk("<%s>-<%d> %s\n", __entry->name, __entry->line,
+ __entry->info)
+);
+
+TRACE_EVENT(ipu_cstate,
+
+ TP_PROTO(int cstate),
+
+ TP_ARGS(cstate),
+
+ TP_STRUCT__entry(
+ __field(int, cstate)
+ ),
+
+ TP_fast_assign(
+ __entry->cstate = cstate;
+ ),
+
+ TP_printk("cstate=%d", __entry->cstate)
+);
+
+TRACE_EVENT(ipu_pstate,
+
+ TP_PROTO(int freq, int util),
+
+ TP_ARGS(freq, util),
+
+ TP_STRUCT__entry(
+ __field(int, freq)
+ __field(int, util)
+ ),
+
+ TP_fast_assign(
+ __entry->freq = freq;
+ __entry->util = util;
+ ),
+
+ TP_printk("freq=%d util=%d", __entry->freq, __entry->util)
+);
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE atomisp_trace_event
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
new file mode 100644
index 000000000000..e3fdbdba0b34
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
@@ -0,0 +1,1610 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#include "atomisp_cmd.h"
+#include "atomisp_common.h"
+#include "atomisp_fops.h"
+#include "atomisp_file.h"
+#include "atomisp_ioctl.h"
+#include "atomisp_internal.h"
+#include "atomisp_acc.h"
+#include "atomisp-regs.h"
+#include "atomisp_dfs_tables.h"
+#include "atomisp_drvfs.h"
+#include "hmm/hmm.h"
+#include "atomisp_trace_event.h"
+
+#include "hrt/hive_isp_css_mm_hrt.h"
+
+#include "device_access.h"
+#include <asm/intel-mid.h>
+
+/* G-Min addition: pull this in from intel_mid_pm.h */
+#define CSTATE_EXIT_LATENCY_C1 1
+
+static uint skip_fwload = 0;
+module_param(skip_fwload, uint, 0644);
+MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
+
+/* set reserved memory pool size in page */
+unsigned int repool_pgnr;
+module_param(repool_pgnr, uint, 0644);
+MODULE_PARM_DESC(repool_pgnr,
+ "Set the reserved memory pool size in page (default:0)");
+
+/* set dynamic memory pool size in page */
+unsigned int dypool_pgnr = UINT_MAX;
+module_param(dypool_pgnr, uint, 0644);
+MODULE_PARM_DESC(dypool_pgnr,
+ "Set the dynamic memory pool size in page (default:0)");
+
+bool dypool_enable;
+module_param(dypool_enable, bool, 0644);
+MODULE_PARM_DESC(dypool_enable,
+ "dynamic memory pool enable/disable (default:disable)");
+
+/* memory optimization: deferred firmware loading */
+bool defer_fw_load;
+module_param(defer_fw_load, bool, 0644);
+MODULE_PARM_DESC(defer_fw_load,
+ "Defer FW loading until device is opened (default:disable)");
+
+/* cross componnet debug message flag */
+int dbg_level;
+module_param(dbg_level, int, 0644);
+MODULE_PARM_DESC(dbg_level, "debug message on/off (default:off)");
+
+/* log function switch */
+int dbg_func = 2;
+module_param(dbg_func, int, 0644);
+MODULE_PARM_DESC(dbg_func,
+ "log function switch non/trace_printk/printk (default:printk)");
+
+int mipicsi_flag;
+module_param(mipicsi_flag, int, 0644);
+MODULE_PARM_DESC(mipicsi_flag, "mipi csi compression predictor algorithm");
+
+/*set to 16x16 since this is the amount of lines and pixels the sensor
+exports extra. If these are kept at the 10x8 that they were on, in yuv
+downscaling modes incorrect resolutions where requested to the sensor
+driver with strange outcomes as a result. The proper way tot do this
+would be to have a list of tables the specify the sensor res, mipi rec,
+output res, and isp output res. however since we do not have this yet,
+the chosen solution is the next best thing. */
+int pad_w = 16;
+module_param(pad_w, int, 0644);
+MODULE_PARM_DESC(pad_w, "extra data for ISP processing");
+
+int pad_h = 16;
+module_param(pad_h, int, 0644);
+MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
+
+struct device *atomisp_dev;
+
+void __iomem *atomisp_io_base;
+
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name)
+{
+ int ret;
+ const char *direction;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->vdev.fops = &atomisp_file_fops;
+ video->vdev.ioctl_ops = &atomisp_file_ioctl_ops;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the video device. */
+ snprintf(video->vdev.name, sizeof(video->vdev.name),
+ "ATOMISP ISP %s %s", name, direction);
+ video->vdev.release = video_device_release_empty;
+ video_set_drvdata(&video->vdev, video->isp);
+
+ return 0;
+}
+
+void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name)
+{
+ video->vdev.fops = &atomisp_fops;
+ video->vdev.ioctl_ops = &atomisp_ioctl_ops;
+
+ /* Initialize the video device. */
+ snprintf(video->vdev.name, sizeof(video->vdev.name),
+ "ATOMISP ISP %s", name);
+ video->vdev.release = video_device_release_empty;
+ video_set_drvdata(&video->vdev, video->isp);
+}
+
+int atomisp_video_register(struct atomisp_video_pipe *video,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->vdev.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ dev_err(vdev->dev, "%s: could not register video device (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+int atomisp_acc_register(struct atomisp_acc_pipe *video,
+ struct v4l2_device *vdev)
+{
+ int ret;
+
+ video->vdev.v4l2_dev = vdev;
+
+ ret = video_register_device(&video->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0)
+ dev_err(vdev->dev, "%s: could not register video device (%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+void atomisp_video_unregister(struct atomisp_video_pipe *video)
+{
+ if (video_is_registered(&video->vdev)) {
+ media_entity_cleanup(&video->vdev.entity);
+ video_unregister_device(&video->vdev);
+ }
+}
+
+void atomisp_acc_unregister(struct atomisp_acc_pipe *video)
+{
+ if (video_is_registered(&video->vdev))
+ video_unregister_device(&video->vdev);
+}
+
+static int atomisp_save_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
+ /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
+ pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
+ pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
+ pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL,
+ &isp->saved_regs.interrupt_control);
+
+ pci_read_config_dword(dev, MRFLD_PCI_PMCS,
+ &isp->saved_regs.pmcs);
+ /* Ensure read/write combining is enabled. */
+ pci_read_config_dword(dev, PCI_I_CONTROL,
+ &isp->saved_regs.i_control);
+ isp->saved_regs.i_control |=
+ MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
+ MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ &isp->saved_regs.csi_access_viol);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ &isp->saved_regs.csi_rcomp_config);
+ /*
+ * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
+ * ANN/CHV: RCOMP updates do not happen when using CSI2+ path
+ * and sensor sending "continuous clock".
+ * TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence
+ * is missed, and IUNIT can hang.
+ * For both issues, setting this bit is a workaround.
+ */
+ isp->saved_regs.csi_rcomp_config |=
+ MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ &isp->saved_regs.csi_afe_dly);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ &isp->saved_regs.csi_control);
+ if (isp->media_dev.hw_revision >=
+ (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
+ isp->saved_regs.csi_control |=
+ MRFLD_PCI_CSI_CONTROL_PARPATHEN;
+ /*
+ * On CHT CSI_READY bit should be enabled before stream on
+ */
+ if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
+ isp->saved_regs.csi_control |=
+ MRFLD_PCI_CSI_CONTROL_CSI_READY;
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ &isp->saved_regs.csi_afe_rcomp_config);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ &isp->saved_regs.csi_afe_hs_control);
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ &isp->saved_regs.csi_deadline_control);
+ return 0;
+}
+
+static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+
+ dev_dbg(isp->dev, "%s\n", __func__);
+
+ pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
+ isp->saved_regs.ispmmadr);
+ pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap);
+ pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
+ pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL,
+ isp->saved_regs.interrupt_control);
+ pci_write_config_dword(dev, PCI_I_CONTROL,
+ isp->saved_regs.i_control);
+
+ pci_write_config_dword(dev, MRFLD_PCI_PMCS,
+ isp->saved_regs.pmcs);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ isp->saved_regs.csi_access_viol);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ isp->saved_regs.csi_rcomp_config);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ isp->saved_regs.csi_afe_dly);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ isp->saved_regs.csi_afe_rcomp_config);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ isp->saved_regs.csi_afe_hs_control);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ isp->saved_regs.csi_deadline_control);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit0
+ * of the register at CSI_RECEIVER_SELECTION_REG to enable
+ * SH CSI backend write 0 will enable Arasan CSI backend,
+ * which has bugs(like sighting:4567697 and 4567699) and
+ * will be removed in B0
+ */
+ atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+ return 0;
+}
+
+static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
+{
+ struct pci_dev *dev = isp->pdev;
+ u32 irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ dev_dbg(isp->dev, "<%s %d.\n", __func__, __LINE__);
+ return 0;
+ }
+ /*
+ * MRFLD HAS requirement: cannot power off i-unit if
+ * ISP has IRQ not serviced.
+ * So, here we need to check if there is any pending
+ * IRQ, if so, waiting for it to be served
+ */
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & (1 << INTR_IIR)))
+ goto done;
+
+ atomisp_store_uint32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF);
+ atomisp_load_uint32(MRFLD_INTR_STATUS_REG, &irq);
+ if (irq != 0) {
+ dev_err(isp->dev,
+ "%s: fail to clear isp interrupt status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ } else {
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ if (!(irq & (1 << INTR_IIR))) {
+ atomisp_store_uint32(MRFLD_INTR_ENABLE_REG, 0x0);
+ goto done;
+ }
+ dev_err(isp->dev,
+ "%s: error in iunit interrupt. status reg=0x%x\n",
+ __func__, irq);
+ spin_unlock_irqrestore(&isp->lock, flags);
+ return -EAGAIN;
+ }
+done:
+ /*
+ * MRFLD WORKAROUND:
+ * before powering off IUNIT, clear the pending interrupts
+ * and disable the interrupt. driver should avoid writing 0
+ * to IIR. It could block subsequent interrupt messages.
+ * HW sighting:4568410.
+ */
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= ~(1 << INTR_IER);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ atomisp_msi_irq_uninit(isp, dev);
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ return 0;
+}
+
+
+ /*
+ * WA for DDR DVFS enable/disable
+ * By default, ISP will force DDR DVFS 1600MHz before disable DVFS
+ */
+void punit_ddr_dvfs_enable(bool enable)
+{
+ int reg = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSDVFS);
+ int door_bell = 1 << 8;
+ int max_wait = 30;
+
+ if (enable) {
+ reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
+ } else {
+ reg |= (MRFLD_BIT1 | door_bell);
+ reg &= ~(MRFLD_BIT0);
+ }
+
+ intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSDVFS, reg);
+
+ /*Check Req_ACK to see freq status, wait until door_bell is cleared*/
+ if (reg & door_bell) {
+ while (max_wait--) {
+ if (0 == (intel_mid_msgbus_read32(PUNIT_PORT,
+ MRFLD_ISPSSDVFS) & door_bell))
+ break;
+
+ usleep_range(100, 500);
+ }
+ }
+
+ if (max_wait == -1)
+ pr_info("DDR DVFS, door bell is not cleared within 3ms\n");
+}
+
+/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
+int atomisp_mrfld_power_down(struct atomisp_device *isp)
+{
+ unsigned long timeout;
+ u32 reg_value;
+
+ /* writing 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
+ reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
+ reg_value |= MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
+ intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+
+ /*WA:Enable DVFS*/
+ if (IS_CHT)
+ punit_ddr_dvfs_enable(true);
+
+ /*
+ * There should be no iunit access while power-down is
+ * in progress HW sighting: 4567865
+ * FIXME: msecs_to_jiffies(50)- experienced value
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ while (1) {
+ reg_value = intel_mid_msgbus_read32(PUNIT_PORT,
+ MRFLD_ISPSSPM0);
+ dev_dbg(isp->dev, "power-off in progress, ISPSSPM0: 0x%x\n",
+ reg_value);
+ /* wait until ISPSSPM0 bit[25:24] shows 0x3 */
+ if ((reg_value >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) ==
+ MRFLD_ISPSSPM0_IUNIT_POWER_OFF) {
+ trace_ipu_cstate(0);
+ return 0;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(isp->dev, "power-off iunit timeout.\n");
+ return -EBUSY;
+ }
+ /* FIXME: experienced value for delay */
+ usleep_range(100, 150);
+ }
+}
+
+
+/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
+int atomisp_mrfld_power_up(struct atomisp_device *isp)
+{
+ unsigned long timeout;
+ u32 reg_value;
+
+ /*WA for PUNIT, if DVFS enabled, ISP timeout observed*/
+ if (IS_CHT)
+ punit_ddr_dvfs_enable(false);
+
+ /*
+ * FIXME:WA for ECS28A, with this sleep, CTS
+ * android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort
+ * PASS, no impact on other platforms
+ */
+ if (IS_BYT)
+ msleep(10);
+
+ /* writing 0x0 to ISPSSPM0 bit[1:0] to power off the IUNIT */
+ reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
+ intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+
+ /* FIXME: experienced value for delay */
+ timeout = jiffies + msecs_to_jiffies(50);
+ while (1) {
+ reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ dev_dbg(isp->dev, "power-on in progress, ISPSSPM0: 0x%x\n",
+ reg_value);
+ /* wait until ISPSSPM0 bit[25:24] shows 0x0 */
+ if ((reg_value >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) ==
+ MRFLD_ISPSSPM0_IUNIT_POWER_ON) {
+ trace_ipu_cstate(1);
+ return 0;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(isp->dev, "power-on iunit timeout.\n");
+ return -EBUSY;
+ }
+ /* FIXME: experienced value for delay */
+ usleep_range(100, 150);
+ }
+}
+
+int atomisp_runtime_suspend(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+
+ /*Turn off the ISP d-phy*/
+ ret = atomisp_ospm_dphy_down(isp);
+ if (ret)
+ return ret;
+ pm_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ return atomisp_mrfld_power_down(isp);
+}
+
+int atomisp_runtime_resume(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret)
+ return ret;
+
+ pm_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+ if (isp->sw_contex.power_state == ATOM_ISP_POWER_DOWN) {
+ /*Turn on ISP d-phy */
+ ret = atomisp_ospm_dphy_up(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power up ISP!.\n");
+ return -EINVAL;
+ }
+ }
+
+ /*restore register values for iUnit and iUnitPHY registers*/
+ if (isp->saved_regs.pcicmdsts)
+ atomisp_restore_iunit_reg(isp);
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ return 0;
+}
+
+static int __maybe_unused atomisp_suspend(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ /* FIXME: only has one isp_subdev at present */
+ struct atomisp_sub_device *asd = &isp->asd[0];
+ unsigned long flags;
+ int ret;
+
+ /*
+ * FIXME: Suspend is not supported by sensors. Abort if any video
+ * node was opened.
+ */
+ if (atomisp_dev_users(isp))
+ return -EBUSY;
+
+ spin_lock_irqsave(&isp->lock, flags);
+ if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) {
+ spin_unlock_irqrestore(&isp->lock, flags);
+ dev_err(isp->dev, "atomisp cannot suspend at this time.\n");
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&isp->lock, flags);
+
+ ret = atomisp_mrfld_pre_power_down(isp);
+ if (ret)
+ return ret;
+
+ /*Turn off the ISP d-phy */
+ ret = atomisp_ospm_dphy_down(isp);
+ if (ret) {
+ dev_err(isp->dev, "fail to power off ISP\n");
+ return ret;
+ }
+ pm_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
+ return atomisp_mrfld_power_down(isp);
+}
+
+static int __maybe_unused atomisp_resume(struct device *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ dev_get_drvdata(dev);
+ int ret;
+
+ ret = atomisp_mrfld_power_up(isp);
+ if (ret)
+ return ret;
+
+ pm_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
+
+ /*Turn on ISP d-phy */
+ ret = atomisp_ospm_dphy_up(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power up ISP!.\n");
+ return -EINVAL;
+ }
+
+ /*restore register values for iUnit and iUnitPHY registers*/
+ if (isp->saved_regs.pcicmdsts)
+ atomisp_restore_iunit_reg(isp);
+
+ atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
+ return 0;
+}
+
+int atomisp_csi_lane_config(struct atomisp_device *isp)
+{
+ static const struct {
+ u8 code;
+ u8 lanes[MRFLD_PORT_NUM];
+ } portconfigs[] = {
+ /* Tangier/Merrifield available lane configurations */
+ { 0x00, { 4, 1, 0 } }, /* 00000 */
+ { 0x01, { 3, 1, 0 } }, /* 00001 */
+ { 0x02, { 2, 1, 0 } }, /* 00010 */
+ { 0x03, { 1, 1, 0 } }, /* 00011 */
+ { 0x04, { 2, 1, 2 } }, /* 00100 */
+ { 0x08, { 3, 1, 1 } }, /* 01000 */
+ { 0x09, { 2, 1, 1 } }, /* 01001 */
+ { 0x0a, { 1, 1, 1 } }, /* 01010 */
+
+ /* Anniedale/Moorefield only configurations */
+ { 0x10, { 4, 2, 0 } }, /* 10000 */
+ { 0x11, { 3, 2, 0 } }, /* 10001 */
+ { 0x12, { 2, 2, 0 } }, /* 10010 */
+ { 0x13, { 1, 2, 0 } }, /* 10011 */
+ { 0x14, { 2, 2, 2 } }, /* 10100 */
+ { 0x18, { 3, 2, 1 } }, /* 11000 */
+ { 0x19, { 2, 2, 1 } }, /* 11001 */
+ { 0x1a, { 1, 2, 1 } }, /* 11010 */
+ };
+
+ unsigned int i, j;
+ u8 sensor_lanes[MRFLD_PORT_NUM] = { 0 };
+ u32 csi_control;
+ int nportconfigs;
+ u32 port_config_mask;
+ int port3_lanes_shift;
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401_LEGACY <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield */
+ port_config_mask = MRFLD_PORT_CONFIG_MASK;
+ port3_lanes_shift = MRFLD_PORT3_LANES_SHIFT;
+ } else {
+ /* Moorefield / Cherryview */
+ port_config_mask = CHV_PORT_CONFIG_MASK;
+ port3_lanes_shift = CHV_PORT3_LANES_SHIFT;
+ }
+
+ if (isp->media_dev.hw_revision <
+ ATOMISP_HW_REVISION_ISP2401 <<
+ ATOMISP_HW_REVISION_SHIFT) {
+ /* Merrifield / Moorefield legacy input system */
+ nportconfigs = MRFLD_PORT_CONFIG_NUM;
+ } else {
+ /* Moorefield / Cherryview new input system */
+ nportconfigs = ARRAY_SIZE(portconfigs);
+ }
+
+ for (i = 0; i < isp->input_cnt; i++) {
+ struct camera_mipi_info *mipi_info;
+
+ if (isp->inputs[i].type != RAW_CAMERA &&
+ isp->inputs[i].type != SOC_CAMERA)
+ continue;
+
+ mipi_info = atomisp_to_sensor_mipi_info(isp->inputs[i].camera);
+ if (!mipi_info)
+ continue;
+
+ switch (mipi_info->port) {
+ case ATOMISP_CAMERA_PORT_PRIMARY:
+ sensor_lanes[0] = mipi_info->num_lanes;
+ break;
+ case ATOMISP_CAMERA_PORT_SECONDARY:
+ sensor_lanes[1] = mipi_info->num_lanes;
+ break;
+ case ATOMISP_CAMERA_PORT_TERTIARY:
+ sensor_lanes[2] = mipi_info->num_lanes;
+ break;
+ default:
+ dev_err(isp->dev,
+ "%s: invalid port: %d for the %dth sensor\n",
+ __func__, mipi_info->port, i);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < nportconfigs; i++) {
+ for (j = 0; j < MRFLD_PORT_NUM; j++)
+ if (sensor_lanes[j] &&
+ sensor_lanes[j] != portconfigs[i].lanes[j])
+ break;
+
+ if (j == MRFLD_PORT_NUM)
+ break; /* Found matching setting */
+ }
+
+ if (i >= nportconfigs) {
+ dev_err(isp->dev,
+ "%s: could not find the CSI port setting for %d-%d-%d\n",
+ __func__,
+ sensor_lanes[0], sensor_lanes[1], sensor_lanes[2]);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
+ csi_control &= ~port_config_mask;
+ csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
+ | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[1] ? 0 : (1 << MRFLD_PORT2_ENABLE_SHIFT))
+ | (portconfigs[i].lanes[2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT))
+ | (((1 << portconfigs[i].lanes[0]) - 1) << MRFLD_PORT1_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
+ | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
+
+ pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
+
+ dev_dbg(isp->dev,
+ "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
+ __func__, portconfigs[i].lanes[0], portconfigs[i].lanes[1],
+ portconfigs[i].lanes[2], csi_control);
+
+ return 0;
+}
+
+static int atomisp_subdev_probe(struct atomisp_device *isp)
+{
+ const struct atomisp_platform_data *pdata;
+ struct intel_v4l2_subdev_table *subdevs;
+ int ret, raw_index = -1;
+
+ pdata = atomisp_get_platform_data();
+ if (pdata == NULL) {
+ dev_err(isp->dev, "no platform data available\n");
+ return 0;
+ }
+
+ for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
+ struct v4l2_subdev *subdev;
+ struct i2c_board_info *board_info =
+ &subdevs->v4l2_subdev.board_info;
+ struct i2c_adapter *adapter =
+ i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
+ struct camera_sensor_platform_data *sensor_pdata;
+ int sensor_num, i;
+
+ if (adapter == NULL) {
+ dev_err(isp->dev,
+ "Failed to find i2c adapter for subdev %s\n",
+ board_info->type);
+ break;
+ }
+
+ /* In G-Min, the sensor devices will already be probed
+ * (via ACPI) and registered, do not create new
+ * ones */
+ subdev = atomisp_gmin_find_subdev(adapter, board_info);
+ ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdev);
+ if (ret) {
+ dev_warn(isp->dev, "Subdev %s detection fail\n",
+ board_info->type);
+ continue;
+ }
+
+ if (subdev == NULL) {
+ dev_warn(isp->dev, "Subdev %s detection fail\n",
+ board_info->type);
+ continue;
+ }
+
+ dev_info(isp->dev, "Subdev %s successfully register\n",
+ board_info->type);
+
+ switch (subdevs->type) {
+ case RAW_CAMERA:
+ raw_index = isp->input_cnt;
+ dev_dbg(isp->dev, "raw_index: %d\n", raw_index);
+ case SOC_CAMERA:
+ dev_dbg(isp->dev, "SOC_INDEX: %d\n", isp->input_cnt);
+ if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
+ dev_warn(isp->dev,
+ "too many atomisp inputs, ignored\n");
+ break;
+ }
+
+ isp->inputs[isp->input_cnt].type = subdevs->type;
+ isp->inputs[isp->input_cnt].port = subdevs->port;
+ isp->inputs[isp->input_cnt].camera = subdev;
+ isp->inputs[isp->input_cnt].sensor_index = 0;
+ /*
+ * initialize the subdev frame size, then next we can
+ * judge whether frame_size store effective value via
+ * pixel_format.
+ */
+ isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
+ sensor_pdata = (struct camera_sensor_platform_data *)
+ board_info->platform_data;
+ if (sensor_pdata->get_camera_caps)
+ isp->inputs[isp->input_cnt].camera_caps =
+ sensor_pdata->get_camera_caps();
+ else
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ sensor_num = isp->inputs[isp->input_cnt]
+ .camera_caps->sensor_num;
+ isp->input_cnt++;
+ for (i = 1; i < sensor_num; i++) {
+ if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) {
+ dev_warn(isp->dev,
+ "atomisp inputs out of range\n");
+ break;
+ }
+ isp->inputs[isp->input_cnt] =
+ isp->inputs[isp->input_cnt - 1];
+ isp->inputs[isp->input_cnt].sensor_index = i;
+ isp->input_cnt++;
+ }
+ break;
+ case CAMERA_MOTOR:
+ isp->motor = subdev;
+ break;
+ case LED_FLASH:
+ case XENON_FLASH:
+ isp->flash = subdev;
+ break;
+ default:
+ dev_dbg(isp->dev, "unknown subdev probed\n");
+ break;
+ }
+
+ }
+
+ /*
+ * HACK: Currently VCM belongs to primary sensor only, but correct
+ * approach must be to acquire from platform code which sensor
+ * owns it.
+ */
+ if (isp->motor && raw_index >= 0)
+ isp->inputs[raw_index].motor = isp->motor;
+
+ /* Proceed even if no modules detected. For COS mode and no modules. */
+ if (!isp->inputs[0].camera)
+ dev_warn(isp->dev, "no camera attached or fail to detect\n");
+
+ return atomisp_csi_lane_config(isp);
+}
+
+static void atomisp_unregister_entities(struct atomisp_device *isp)
+{
+ unsigned int i;
+ struct v4l2_subdev *sd, *next;
+
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_subdev_unregister_entities(&isp->asd[i]);
+ atomisp_tpg_unregister_entities(&isp->tpg);
+ atomisp_file_input_unregister_entities(&isp->file_dev);
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+
+ list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list)
+ v4l2_device_unregister_subdev(sd);
+
+ v4l2_device_unregister(&isp->v4l2_dev);
+ media_device_unregister(&isp->media_dev);
+}
+
+static int atomisp_register_entities(struct atomisp_device *isp)
+{
+ int ret = 0;
+ unsigned int i;
+
+ isp->media_dev.dev = isp->dev;
+
+ strlcpy(isp->media_dev.model, "Intel Atom ISP",
+ sizeof(isp->media_dev.model));
+
+ media_device_init(&isp->media_dev);
+ isp->v4l2_dev.mdev = &isp->media_dev;
+ ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
+ __func__, ret);
+ goto v4l2_device_failed;
+ }
+
+ ret = atomisp_subdev_probe(isp);
+ if (ret < 0)
+ goto csi_and_subdev_probe_failed;
+
+ /* Register internal entities */
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
+ ret = atomisp_mipi_csi2_register_entities(&isp->csi2_port[i],
+ &isp->v4l2_dev);
+ if (ret == 0)
+ continue;
+
+ /* error case */
+ dev_err(isp->dev, "failed to register the CSI port: %d\n", i);
+ /* deregister all registered CSI ports */
+ while (i--)
+ atomisp_mipi_csi2_unregister_entities(
+ &isp->csi2_port[i]);
+
+ goto csi_and_subdev_probe_failed;
+ }
+
+ ret =
+ atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_file_input_register_entities\n");
+ goto file_input_register_failed;
+ }
+
+ ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev, "atomisp_tpg_register_entities\n");
+ goto tpg_register_failed;
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "atomisp_subdev_register_entities fail\n");
+ for (; i > 0; i--)
+ atomisp_subdev_unregister_entities(
+ &isp->asd[i - 1]);
+ goto subdev_register_failed;
+ }
+ }
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+
+ init_completion(&asd->init_done);
+
+ asd->delayed_init_workq =
+ alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE,
+ 1);
+ if (asd->delayed_init_workq == NULL) {
+ dev_err(isp->dev,
+ "Failed to initialize delayed init workq\n");
+ ret = -ENOMEM;
+
+ for (; i > 0; i--)
+ destroy_workqueue(isp->asd[i - 1].
+ delayed_init_workq);
+ goto wq_alloc_failed;
+ }
+ INIT_WORK(&asd->delayed_init_work, atomisp_delayed_init_work);
+ }
+
+ for (i = 0; i < isp->input_cnt; i++) {
+ if (isp->inputs[i].port >= ATOMISP_CAMERA_NR_PORTS) {
+ dev_err(isp->dev, "isp->inputs port %d not supported\n",
+ isp->inputs[i].port);
+ ret = -EINVAL;
+ goto link_failed;
+ }
+ }
+
+ dev_dbg(isp->dev,
+ "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt);
+ isp->inputs[isp->input_cnt].type = FILE_INPUT;
+ isp->inputs[isp->input_cnt].port = -1;
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd;
+
+ if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
+ dev_dbg(isp->dev,
+ "TPG detected, camera_cnt: %d\n", isp->input_cnt);
+ isp->inputs[isp->input_cnt].type = TEST_PATTERN;
+ isp->inputs[isp->input_cnt].port = -1;
+ isp->inputs[isp->input_cnt].camera_caps =
+ atomisp_get_default_camera_caps();
+ isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
+ } else {
+ dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
+ if (ret < 0)
+ goto link_failed;
+
+ return media_device_register(&isp->media_dev);
+
+link_failed:
+ for (i = 0; i < isp->num_of_streams; i++)
+ destroy_workqueue(isp->asd[i].
+ delayed_init_workq);
+wq_alloc_failed:
+ for (i = 0; i < isp->num_of_streams; i++)
+ atomisp_subdev_unregister_entities(
+ &isp->asd[i]);
+subdev_register_failed:
+ atomisp_tpg_unregister_entities(&isp->tpg);
+tpg_register_failed:
+ atomisp_file_input_unregister_entities(&isp->file_dev);
+file_input_register_failed:
+ for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
+ atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
+csi_and_subdev_probe_failed:
+ v4l2_device_unregister(&isp->v4l2_dev);
+v4l2_device_failed:
+ media_device_unregister(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
+ return ret;
+}
+
+static int atomisp_initialize_modules(struct atomisp_device *isp)
+{
+ int ret;
+
+ ret = atomisp_mipi_csi2_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "mipi csi2 initialization failed\n");
+ goto error_mipi_csi2;
+ }
+
+ ret = atomisp_file_input_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev,
+ "file input device initialization failed\n");
+ goto error_file_input;
+ }
+
+ ret = atomisp_tpg_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "tpg initialization failed\n");
+ goto error_tpg;
+ }
+
+ ret = atomisp_subdev_init(isp);
+ if (ret < 0) {
+ dev_err(isp->dev, "ISP subdev initialization failed\n");
+ goto error_isp_subdev;
+ }
+
+
+ return 0;
+
+error_isp_subdev:
+error_tpg:
+ atomisp_tpg_cleanup(isp);
+error_file_input:
+ atomisp_file_input_cleanup(isp);
+error_mipi_csi2:
+ atomisp_mipi_csi2_cleanup(isp);
+ return ret;
+}
+
+static void atomisp_uninitialize_modules(struct atomisp_device *isp)
+{
+ atomisp_tpg_cleanup(isp);
+ atomisp_file_input_cleanup(isp);
+ atomisp_mipi_csi2_cleanup(isp);
+}
+
+const struct firmware *
+atomisp_load_firmware(struct atomisp_device *isp)
+{
+ const struct firmware *fw;
+ int rc;
+ char *fw_path = NULL;
+
+ if (skip_fwload)
+ return NULL;
+
+ if (isp->media_dev.driver_version == ATOMISP_CSS_VERSION_21) {
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT)
+ | ATOMISP_HW_STEPPING_A0))
+ fw_path = "shisp_2401a0_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT)
+ | ATOMISP_HW_STEPPING_A0))
+ fw_path = "shisp_2401a0_legacy_v21.bin";
+
+ if (isp->media_dev.hw_revision ==
+ ((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT)
+ | ATOMISP_HW_STEPPING_B0))
+ fw_path = "shisp_2400b0_v21.bin";
+ }
+
+ if (!fw_path) {
+ dev_err(isp->dev,
+ "Unsupported driver_version 0x%x, hw_revision 0x%x\n",
+ isp->media_dev.driver_version,
+ isp->media_dev.hw_revision);
+ return NULL;
+ }
+
+ rc = request_firmware(&fw, fw_path, isp->dev);
+ if (rc) {
+ dev_err(isp->dev,
+ "atomisp: Error %d while requesting firmware %s\n",
+ rc, fw_path);
+ return NULL;
+ }
+
+ return fw;
+}
+
+/*
+ * Check for flags the driver was compiled with against the PCI
+ * device. Always returns true on other than ISP 2400.
+ */
+static bool is_valid_device(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ unsigned int a0_max_id;
+
+ switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ a0_max_id = ATOMISP_PCI_REV_MRFLD_A0_MAX;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_BYT:
+ a0_max_id = ATOMISP_PCI_REV_BYT_A0_MAX;
+ break;
+ default:
+ return true;
+ }
+
+ return dev->revision > a0_max_id;
+}
+
+static int init_atomisp_wdts(struct atomisp_device *isp)
+{
+ int i, err;
+
+ atomic_set(&isp->wdt_work_queued, 0);
+ isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
+ if (isp->wdt_work_queue == NULL) {
+ dev_err(isp->dev, "Failed to initialize wdt work queue\n");
+ err = -ENOMEM;
+ goto alloc_fail;
+ }
+ INIT_WORK(&isp->wdt_work, atomisp_wdt_work);
+
+ for (i = 0; i < isp->num_of_streams; i++) {
+ struct atomisp_sub_device *asd = &isp->asd[i];
+ asd = &isp->asd[i];
+#ifndef ISP2401
+ setup_timer(&asd->wdt, atomisp_wdt, (unsigned long)isp);
+#else
+ setup_timer(&asd->video_out_capture.wdt,
+ atomisp_wdt, (unsigned long)&asd->video_out_capture);
+ setup_timer(&asd->video_out_preview.wdt,
+ atomisp_wdt, (unsigned long)&asd->video_out_preview);
+ setup_timer(&asd->video_out_vf.wdt,
+ atomisp_wdt, (unsigned long)&asd->video_out_vf);
+ setup_timer(&asd->video_out_video_capture.wdt,
+ atomisp_wdt,
+ (unsigned long)&asd->video_out_video_capture);
+#endif
+ }
+ return 0;
+alloc_fail:
+ return err;
+}
+
+static struct pci_driver atomisp_pci_driver;
+
+#define ATOM_ISP_PCI_BAR 0
+
+static int atomisp_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ const struct atomisp_platform_data *pdata;
+ struct atomisp_device *isp;
+ unsigned int start;
+ void __iomem *base;
+ int err, val;
+ u32 irq;
+
+ if (!dev) {
+ dev_err(&dev->dev, "atomisp: error device ptr\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_device(dev, id))
+ return -ENODEV;
+ /* Pointer to struct device. */
+ atomisp_dev = &dev->dev;
+
+ pdata = atomisp_get_platform_data();
+ if (pdata == NULL)
+ dev_warn(&dev->dev, "no platform data available\n");
+
+ err = pcim_enable_device(dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
+ err);
+ return err;
+ }
+
+ start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
+ dev_dbg(&dev->dev, "start: 0x%x\n", start);
+
+ err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
+ if (err) {
+ dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
+ err);
+ return err;
+ }
+
+ base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
+ dev_dbg(&dev->dev, "base: %p\n", base);
+
+ atomisp_io_base = base;
+
+ dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base);
+
+ isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
+ if (!isp) {
+ dev_err(&dev->dev, "Failed to alloc CI ISP structure\n");
+ return -ENOMEM;
+ }
+ isp->pdev = dev;
+ isp->dev = &dev->dev;
+ isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
+ isp->pci_root = pci_get_bus_and_slot(0, 0);
+ if (!isp->pci_root) {
+ dev_err(&dev->dev, "Unable to find PCI host\n");
+ return -ENODEV;
+ }
+ isp->saved_regs.ispmmadr = start;
+
+ rt_mutex_init(&isp->mutex);
+ mutex_init(&isp->streamoff_mutex);
+ spin_lock_init(&isp->lock);
+
+ /* This is not a true PCI device on SoC, so the delay is not needed. */
+ isp->pdev->d3_delay = 0;
+
+ isp->media_dev.driver_version = ATOMISP_CSS_VERSION_21;
+ switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+
+ switch (id->device) {
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
+ isp->dfs = &dfs_config_merr_1179;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
+ isp->dfs = &dfs_config_merr_117a;
+ break;
+ default:
+ isp->dfs = &dfs_config_merr;
+ break;
+ }
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_BYT:
+ isp->media_dev.hw_revision =
+ (ATOMISP_HW_REVISION_ISP2400
+ << ATOMISP_HW_REVISION_SHIFT) |
+ ATOMISP_HW_STEPPING_B0;
+#ifdef FIXME
+ if (INTEL_MID_BOARD(3, TABLET, BYT, BLK, PRO, CRV2) ||
+ INTEL_MID_BOARD(3, TABLET, BYT, BLK, ENG, CRV2)) {
+ isp->dfs = &dfs_config_byt_cr;
+ isp->hpll_freq = HPLL_FREQ_2000MHZ;
+ } else
+#endif
+ {
+ isp->dfs = &dfs_config_byt;
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ }
+ /* HPLL frequency is known to be device-specific, but we don't
+ * have specs yet for exactly how it varies. Default to
+ * BYT-CR but let provisioning set it via EFI variable */
+ isp->hpll_freq = gmin_get_var_int(&dev->dev, "HpllFreq",
+ HPLL_FREQ_2000MHZ);
+
+ /*
+ * for BYT/CHT we are put isp into D3cold to avoid pci registers access
+ * in power off. Set d3cold_delay to 0 since default 100ms is not
+ * necessary.
+ */
+ isp->pdev->d3cold_delay = 0;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_ANN:
+ isp->media_dev.hw_revision = (
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ ATOMISP_HW_REVISION_ISP2401
+#else
+ ATOMISP_HW_REVISION_ISP2401_LEGACY
+#endif
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+ isp->dfs = &dfs_config_merr;
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case ATOMISP_PCI_DEVICE_SOC_CHT:
+ isp->media_dev.hw_revision = (
+#ifdef ISP2401_NEW_INPUT_SYSTEM
+ ATOMISP_HW_REVISION_ISP2401
+#else
+ ATOMISP_HW_REVISION_ISP2401_LEGACY
+#endif
+ << ATOMISP_HW_REVISION_SHIFT);
+ isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
+
+ isp->dfs = &dfs_config_cht;
+ isp->pdev->d3cold_delay = 0;
+
+ val = intel_mid_msgbus_read32(CCK_PORT, CCK_FUSE_REG_0);
+ switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
+ case 0x00:
+ isp->hpll_freq = HPLL_FREQ_800MHZ;
+ break;
+ case 0x01:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ break;
+ case 0x02:
+ isp->hpll_freq = HPLL_FREQ_2000MHZ;
+ break;
+ default:
+ isp->hpll_freq = HPLL_FREQ_1600MHZ;
+ dev_warn(isp->dev,
+ "read HPLL from cck failed.default 1600MHz.\n");
+ }
+ break;
+ default:
+ dev_err(&dev->dev, "un-supported IUNIT device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n",
+ isp->hpll_freq);
+
+ isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
+
+ /* Load isp firmware from user space */
+ if (!defer_fw_load) {
+ isp->firmware = atomisp_load_firmware(isp);
+ if (!isp->firmware) {
+ err = -ENOENT;
+ goto load_fw_fail;
+ }
+
+ err = atomisp_css_check_firmware_version(isp);
+ if (err) {
+ dev_dbg(&dev->dev, "Firmware version check failed\n");
+ goto fw_validation_fail;
+ }
+ }
+
+ pci_set_master(dev);
+ pci_set_drvdata(dev, isp);
+
+ err = pci_enable_msi(dev);
+ if (err) {
+ dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
+ goto enable_msi_fail;
+ }
+
+ atomisp_msi_irq_init(isp, dev);
+
+ pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /*
+ * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
+ * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
+ * backend write 0 will enable Arasan CSI backend, which has
+ * bugs(like sighting:4567697 and 4567699) and will be removed
+ * in B0
+ */
+ atomisp_store_uint32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
+
+ if ((id->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
+ ATOMISP_PCI_DEVICE_SOC_MRFLD) {
+ u32 csi_afe_trim;
+
+ /*
+ * Workaround for imbalance data eye issue which is observed
+ * on TNG B0.
+ */
+ pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ &csi_afe_trim);
+ csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
+ MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT));
+ csi_afe_trim |= (MRFLD_PCI_CSI1_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI2_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
+ (MRFLD_PCI_CSI3_HSRXCLKTRIM <<
+ MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
+ pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ csi_afe_trim);
+ }
+
+ err = atomisp_initialize_modules(isp);
+ if (err < 0) {
+ dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
+ goto initialize_modules_fail;
+ }
+
+ err = atomisp_register_entities(isp);
+ if (err < 0) {
+ dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
+ err);
+ goto register_entities_fail;
+ }
+ err = atomisp_create_pads_links(isp);
+ if (err < 0)
+ goto register_entities_fail;
+ /* init atomisp wdts */
+ if (init_atomisp_wdts(isp) != 0)
+ goto wdt_work_queue_fail;
+
+ /* save the iunit context only once after all the values are init'ed. */
+ atomisp_save_iunit_reg(isp);
+
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_allow(&dev->dev);
+
+ hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr);
+ err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
+ if (err) {
+ dev_err(&dev->dev, "Failed to register reserved memory pool.\n");
+ goto hmm_pool_fail;
+ }
+
+ /* Init ISP memory management */
+ hmm_init();
+
+ err = devm_request_threaded_irq(&dev->dev, dev->irq,
+ atomisp_isr, atomisp_isr_thread,
+ IRQF_SHARED, "isp_irq", isp);
+ if (err) {
+ dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
+ goto request_irq_fail;
+ }
+
+ /* Load firmware into ISP memory */
+ if (!defer_fw_load) {
+ err = atomisp_css_load_firmware(isp);
+ if (err) {
+ dev_err(&dev->dev, "Failed to init css.\n");
+ goto css_init_fail;
+ }
+ } else {
+ dev_dbg(&dev->dev, "Skip css init.\n");
+ }
+ /* Clear FW image from memory */
+ release_firmware(isp->firmware);
+ isp->firmware = NULL;
+ isp->css_env.isp_css_fw.data = NULL;
+
+ atomisp_drvfs_init(&atomisp_pci_driver, isp);
+
+ return 0;
+
+css_init_fail:
+ devm_free_irq(&dev->dev, dev->irq, isp);
+request_irq_fail:
+ hmm_cleanup();
+ hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
+hmm_pool_fail:
+ destroy_workqueue(isp->wdt_work_queue);
+wdt_work_queue_fail:
+ atomisp_acc_cleanup(isp);
+ atomisp_unregister_entities(isp);
+register_entities_fail:
+ atomisp_uninitialize_modules(isp);
+initialize_modules_fail:
+ pm_qos_remove_request(&isp->pm_qos);
+ atomisp_msi_irq_uninit(isp, dev);
+enable_msi_fail:
+fw_validation_fail:
+ release_firmware(isp->firmware);
+load_fw_fail:
+ /*
+ * Switch off ISP, as keeping it powered on would prevent
+ * reaching S0ix states.
+ *
+ * The following lines have been copied from atomisp suspend path
+ */
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq = irq & 1 << INTR_IIR;
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ irq &= ~(1 << INTR_IER);
+ pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+
+ atomisp_msi_irq_uninit(isp, dev);
+
+ atomisp_ospm_dphy_down(isp);
+
+ /* Address later when we worry about the ...field chips */
+ if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp))
+ dev_err(&dev->dev, "Failed to switch off ISP\n");
+ pci_dev_put(isp->pci_root);
+ return err;
+}
+
+static void atomisp_pci_remove(struct pci_dev *dev)
+{
+ struct atomisp_device *isp = (struct atomisp_device *)
+ pci_get_drvdata(dev);
+
+ atomisp_drvfs_exit();
+
+ atomisp_acc_cleanup(isp);
+
+ atomisp_css_unload_firmware(isp);
+ hmm_cleanup();
+
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+ pm_qos_remove_request(&isp->pm_qos);
+
+ atomisp_msi_irq_uninit(isp, dev);
+ pci_dev_put(isp->pci_root);
+
+ atomisp_unregister_entities(isp);
+
+ destroy_workqueue(isp->wdt_work_queue);
+ atomisp_file_input_cleanup(isp);
+
+ release_firmware(isp->firmware);
+
+ hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
+}
+
+static const struct pci_device_id atomisp_pci_tbl[] = {
+#if defined(ISP2400) || defined(ISP2400B0)
+ /* Merrifield */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1178)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1179)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x117a)},
+ /* Baytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+#elif defined(ISP2401)
+ /* Anniedale (Merrifield+ / Moorefield) */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1478)},
+ /* Cherrytrail */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+#endif
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl);
+
+static const struct dev_pm_ops atomisp_pm_ops = {
+ .runtime_suspend = atomisp_runtime_suspend,
+ .runtime_resume = atomisp_runtime_resume,
+ .suspend = atomisp_suspend,
+ .resume = atomisp_resume,
+};
+
+static struct pci_driver atomisp_pci_driver = {
+ .driver = {
+ .pm = &atomisp_pm_ops,
+ },
+ .name = "atomisp-isp2",
+ .id_table = atomisp_pci_tbl,
+ .probe = atomisp_pci_probe,
+ .remove = atomisp_pci_remove,
+};
+
+static int __init atomisp_init(void)
+{
+ return pci_register_driver(&atomisp_pci_driver);
+}
+
+static void __exit atomisp_exit(void)
+{
+ pci_unregister_driver(&atomisp_pci_driver);
+}
+
+module_init(atomisp_init);
+module_exit(atomisp_exit);
+
+MODULE_AUTHOR("Wen Wang <wen.w.wang@intel.com>");
+MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver");
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
new file mode 100644
index 000000000000..191b2e57a810
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __ATOMISP_V4L2_H__
+#define __ATOMISP_V4L2_H__
+
+struct atomisp_video_pipe;
+struct atomisp_acc_pipe;
+struct v4l2_device;
+struct atomisp_device;
+struct firmware;
+
+int atomisp_video_init(struct atomisp_video_pipe *video, const char *name);
+void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name);
+void atomisp_video_unregister(struct atomisp_video_pipe *video);
+int atomisp_video_register(struct atomisp_video_pipe *video,
+ struct v4l2_device *vdev);
+void atomisp_acc_unregister(struct atomisp_acc_pipe *video);
+int atomisp_acc_register(struct atomisp_acc_pipe *video,
+ struct v4l2_device *vdev);
+const struct firmware *atomisp_load_firmware(struct atomisp_device *isp);
+int atomisp_csi_lane_config(struct atomisp_device *isp);
+
+#endif /* __ATOMISP_V4L2_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile
new file mode 100644
index 000000000000..04defaafa02c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -DISP2400B0
+ISP2400B0 := y
+
+include $(srctree)/$(src)/../Makefile.common
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
new file mode 100644
index 000000000000..766218ed3649
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
@@ -0,0 +1,377 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_H
+#define _IA_CSS_CIRCBUF_H
+
+#include <sp.h>
+#include <type_support.h>
+#include <math_support.h>
+#include <storage_class.h>
+#include <assert_support.h>
+#include <platform_support.h>
+#include "ia_css_circbuf_comm.h"
+#include "ia_css_circbuf_desc.h"
+
+/****************************************************************
+ *
+ * Data structures.
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular buffer.
+ */
+typedef struct ia_css_circbuf_s ia_css_circbuf_t;
+struct ia_css_circbuf_s {
+ ia_css_circbuf_desc_t *desc; /* Pointer to the descriptor of the circbuf */
+ ia_css_circbuf_elem_t *elems; /* an array of elements */
+};
+
+/**
+ * @brief Create the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elems An array of elements.
+ * @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
+ */
+STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc);
+
+/**
+ * @brief Destroy the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ */
+STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Pop a value out of the circular buffer.
+ * Get a value at the head of the circular buffer.
+ * The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the pop-out value.
+ */
+STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
+ ia_css_circbuf_t *cb);
+
+/**
+ * @brief Extract a value out of the circular buffer.
+ * Get a value at an arbitrary poistion in the circular
+ * buffer. The user should call "ia_css_circbuf_is_empty()"
+ * to avoid accessing to an empty buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset The offset from "start" to the target position.
+ *
+ * @return the extracted value.
+ */
+STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Set the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ * @param val The value to be set.
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
+ ia_css_circbuf_elem_t *elem,
+ uint32_t val)
+{
+ OP___assert(elem != NULL);
+
+ elem->val = val;
+}
+
+/**
+ * @brief Initialize the element.
+ *
+ * @param elem The pointer to the element.
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
+ ia_css_circbuf_elem_t *elem)
+{
+ OP___assert(elem != NULL);
+ ia_css_circbuf_elem_set_val(elem, 0);
+}
+
+/**
+ * @brief Copy an element.
+ *
+ * @param src The element as the copy source.
+ * @param dest The element as the copy destination.
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
+ ia_css_circbuf_elem_t *src,
+ ia_css_circbuf_elem_t *dest)
+{
+ OP___assert(src != NULL);
+ OP___assert(dest != NULL);
+
+ ia_css_circbuf_elem_set_val(dest, src->val);
+}
+
+/**
+ * @brief Get position in the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position at offset.
+ */
+STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
+ ia_css_circbuf_t *cb,
+ uint32_t base,
+ int offset)
+{
+ uint8_t dest;
+
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+ OP___assert(cb->desc->size > 0);
+
+ /* step 1: adjudst the offset */
+ while (offset < 0) {
+ offset += cb->desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb->desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
+ ia_css_circbuf_t *cb,
+ uint32_t src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb->desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the maximum number of elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the maximum number of elements.
+ *
+ * TODO: Test this API.
+ */
+STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ return cb->desc->size;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the number of available elements.
+ */
+STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
+ ia_css_circbuf_t *cb)
+{
+ int num;
+
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ num = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ return ia_css_circbuf_desc_is_empty(cb->desc);
+}
+
+/**
+ * @brief Test if the circular buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+{
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ return ia_css_circbuf_desc_is_full(cb->desc);
+}
+
+/**
+ * @brief Write a new element into the circular buffer.
+ * Write a new element WITHOUT checking whether the
+ * circular buffer is full or not. So it also overwrites
+ * the oldest element when the buffer is full.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param elem The new element.
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_write(
+ ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t elem)
+{
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ /* Cannot continue as the queue is full*/
+ assert(!ia_css_circbuf_is_full(cb));
+
+ ia_css_circbuf_elem_cpy(&elem, &cb->elems[cb->desc->end]);
+
+ cb->desc->end = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, 1);
+}
+
+/**
+ * @brief Push a value in the circular buffer.
+ * Put a new value at the tail of the circular buffer.
+ * The user should call "ia_css_circbuf_is_full()"
+ * to avoid accessing to a full buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param val The value to be pushed in.
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_push(
+ ia_css_circbuf_t *cb,
+ uint32_t val)
+{
+ ia_css_circbuf_elem_t elem;
+
+ OP___assert(cb != NULL);
+
+ /* set up an element */
+ ia_css_circbuf_elem_init(&elem);
+ ia_css_circbuf_elem_set_val(&elem, val);
+
+ /* write the element into the buffer */
+ ia_css_circbuf_write(cb, elem);
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return: The number of free elements.
+ */
+STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
+ ia_css_circbuf_t *cb)
+{
+ OP___assert(cb != NULL);
+ OP___assert(cb->desc != NULL);
+
+ return ia_css_circbuf_desc_get_free_elems(cb->desc);
+}
+
+/**
+ * @brief Peek an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Get an element in Circular Buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param offset Offset to the element.
+ *
+ * @return the elements value.
+ */
+STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
+ ia_css_circbuf_t *cb,
+ int offset);
+
+/**
+ * @brief Increase Size of a Circular Buffer.
+ * Use 'CAUTION' before using this function, This was added to
+ * support / fix issue with increasing size for tagger only
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param sz_delta delta increase for new size
+ * @param elems (optional) pointers to new additional elements
+ * cb element array size will not be increased dynamically,
+ * but new elements should be added at the end to existing
+ * cb element array which if of max_size >= new size
+ *
+ * @return true on succesfully increasing the size
+ * false on failure
+ */
+STORAGE_CLASS_EXTERN bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems);
+
+#endif /*_IA_CSS_CIRCBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h
new file mode 100644
index 000000000000..3fc0330b9526
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_comm.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_COMM_H
+#define _IA_CSS_CIRCBUF_COMM_H
+
+#include <type_support.h> /* uint8_t, uint32_t */
+
+#define IA_CSS_CIRCBUF_PADDING 1 /* The circular buffer is implemented in lock-less manner, wherein
+ * the head and tail can advance independently without any locks.
+ * But to achieve this, an extra buffer element is required to detect
+ * queue full & empty conditions, wherein the tail trails the head for
+ * full and is equal to head for empty condition. This causes 1 buffer
+ * not being available for use.
+ */
+
+/****************************************************************
+ *
+ * Portable Data structures
+ *
+ ****************************************************************/
+/**
+ * @brief Data structure for the circular descriptor.
+ */
+typedef struct ia_css_circbuf_desc_s ia_css_circbuf_desc_t;
+struct ia_css_circbuf_desc_s {
+ uint8_t size; /* the maximum number of elements*/
+ uint8_t step; /* number of bytes per element */
+ uint8_t start; /* index of the oldest element */
+ uint8_t end; /* index at which to write the new element */
+};
+#define SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT \
+ (4 * sizeof(uint8_t))
+
+/**
+ * @brief Data structure for the circular buffer element.
+ */
+typedef struct ia_css_circbuf_elem_s ia_css_circbuf_elem_t;
+struct ia_css_circbuf_elem_s {
+ uint32_t val; /* the value stored in the element */
+};
+#define SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT \
+ (sizeof(uint32_t))
+
+#endif /*_IA_CSS_CIRCBUF_COMM_H*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
new file mode 100644
index 000000000000..a8447d409c31
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -0,0 +1,170 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_CIRCBUF_DESC_H_
+#define _IA_CSS_CIRCBUF_DESC_H_
+
+#include <type_support.h>
+#include <math_support.h>
+#include <storage_class.h>
+#include <platform_support.h>
+#include <sp.h>
+#include "ia_css_circbuf_comm.h"
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Test if the circular buffer is empty.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return
+ * - true when it is empty.
+ * - false when it is not empty.
+ */
+STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc != NULL);
+ return (cb_desc->end == cb_desc->start);
+}
+
+/**
+ * @brief Test if the circular buffer descriptor is full.
+ *
+ * @param cb_desc The pointer to the circular buffer
+ * descriptor.
+ *
+ * @return
+ * - true when it is full.
+ * - false when it is not full.
+ */
+STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ OP___assert(cb_desc != NULL);
+ return (OP_std_modadd(cb_desc->end, 1, cb_desc->size) == cb_desc->start);
+}
+
+/**
+ * @brief Initialize the circular buffer descriptor
+ *
+ * @param cb_desc The pointer circular buffer descriptor
+ * @param size The size of the circular buffer
+ */
+STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
+ ia_css_circbuf_desc_t *cb_desc,
+ int8_t size)
+{
+ OP___assert(cb_desc != NULL);
+ cb_desc->size = size;
+}
+
+/**
+ * @brief Get a position in the circular buffer descriptor.
+ *
+ * @param cb The pointer to the circular buffer descriptor.
+ * @param base The base position.
+ * @param offset The offset.
+ *
+ * @return the position in the circular buffer descriptor.
+ */
+STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t base,
+ int offset)
+{
+ uint8_t dest;
+ OP___assert(cb_desc != NULL);
+ OP___assert(cb_desc->size > 0);
+
+ /* step 1: adjust the offset */
+ while (offset < 0) {
+ offset += cb_desc->size;
+ }
+
+ /* step 2: shift and round by the upper limit */
+ dest = OP_std_modadd(base, offset, cb_desc->size);
+
+ return dest;
+}
+
+/**
+ * @brief Get the offset between two positions in the circular buffer
+ * descriptor.
+ * Get the offset from the source position to the terminal position,
+ * along the direction in which the new elements come in.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ * @param src_pos The source position.
+ * @param dest_pos The terminal position.
+ *
+ * @return the offset.
+ */
+STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t src_pos,
+ uint32_t dest_pos)
+{
+ int offset;
+ OP___assert(cb_desc != NULL);
+
+ offset = (int)(dest_pos - src_pos);
+ offset += (offset < 0) ? cb_desc->size : 0;
+
+ return offset;
+}
+
+/**
+ * @brief Get the number of available elements.
+ *
+ * @param cb_desc The pointer to the circular buffer.
+ *
+ * @return The number of available elements.
+ */
+STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ int num;
+ OP___assert(cb_desc != NULL);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (uint32_t)num;
+}
+
+/**
+ * @brief Get the number of free elements.
+ *
+ * @param cb_desc The pointer to the circular buffer descriptor.
+ *
+ * @return: The number of free elements.
+ */
+STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_free_elems(
+ ia_css_circbuf_desc_t *cb_desc)
+{
+ uint32_t num;
+ OP___assert(cb_desc != NULL);
+
+ num = ia_css_circbuf_desc_get_offset(cb_desc,
+ cb_desc->start,
+ cb_desc->end);
+
+ return (cb_desc->size - num);
+}
+#endif /*_IA_CSS_CIRCBUF_DESC_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
new file mode 100644
index 000000000000..19bae1610fb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/src/circbuf.c
@@ -0,0 +1,321 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_circbuf.h"
+
+#include <assert_support.h>
+
+/**********************************************************************
+ *
+ * Forward declarations.
+ *
+ **********************************************************************/
+/**
+ * @brief Read the oldest element from the circular buffer.
+ * Read the oldest element WITHOUT checking whehter the
+ * circular buffer is empty or not. The oldest element is
+ * also removed out from the circular buffer.
+ *
+ * @param cb The pointer to the circular buffer.
+ *
+ * @return the oldest element.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb);
+
+/**
+ * @brief Shift a chunk of elements in the circular buffer.
+ * A chunk of elements (i.e. the ones from the "start" position
+ * to the "chunk_src" position) are shifted in the circular buffer,
+ * along the direction of new elements coming.
+ *
+ * @param cb The pointer to the circular buffer.
+ * @param chunk_src The position at which the first element in the chunk is.
+ * @param chunk_dest The position to which the first element in the chunk would be shift.
+ */
+static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ uint32_t chunk_src,
+ uint32_t chunk_dest);
+
+/**
+ * @brief Get the "val" field in the element.
+ *
+ * @param elem The pointer to the element.
+ *
+ * @return the "val" field.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem);
+
+/**********************************************************************
+ *
+ * Non-inline functions.
+ *
+ **********************************************************************/
+/**
+ * @brief Create the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void
+ia_css_circbuf_create(ia_css_circbuf_t *cb,
+ ia_css_circbuf_elem_t *elems,
+ ia_css_circbuf_desc_t *desc)
+{
+ uint32_t i;
+
+ OP___assert(desc);
+
+ cb->desc = desc;
+ /* Initialize to defaults */
+ cb->desc->start = 0;
+ cb->desc->end = 0;
+ cb->desc->step = 0;
+
+ for (i = 0; i < cb->desc->size; i++)
+ ia_css_circbuf_elem_init(&elems[i]);
+
+ cb->elems = elems;
+}
+
+/**
+ * @brief Destroy the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+void ia_css_circbuf_destroy(ia_css_circbuf_t *cb)
+{
+ cb->desc = NULL;
+
+ cb->elems = NULL;
+}
+
+/**
+ * @brief Pop a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb)
+{
+ uint32_t ret;
+ ia_css_circbuf_elem_t elem;
+
+ assert(!ia_css_circbuf_is_empty(cb));
+
+ /* read an element from the buffer */
+ elem = ia_css_circbuf_read(cb);
+ ret = ia_css_circbuf_elem_get_val(&elem);
+ return ret;
+}
+
+/**
+ * @brief Extract a value out of the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset)
+{
+ int max_offset;
+ uint32_t val;
+ uint32_t pos;
+ uint32_t src_pos;
+ uint32_t dest_pos;
+
+ /* get the maximum offest */
+ max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end);
+ max_offset--;
+
+ /*
+ * Step 1: When the target element is at the "start" position.
+ */
+ if (offset == 0) {
+ val = ia_css_circbuf_pop(cb);
+ return val;
+ }
+
+ /*
+ * Step 2: When the target element is out of the range.
+ */
+ if (offset > max_offset) {
+ val = 0;
+ return val;
+ }
+
+ /*
+ * Step 3: When the target element is between the "start" and
+ * "end" position.
+ */
+ /* get the position of the target element */
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value from the target element */
+ val = ia_css_circbuf_elem_get_val(&cb->elems[pos]);
+
+ /* shift the elements */
+ src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1);
+ dest_pos = pos;
+ ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos);
+
+ return val;
+}
+
+/**
+ * @brief Peek an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/**
+ * @brief Get the value of an element from the circular buffer.
+ * Refer to "ia_css_circbuf.h" for details.
+ */
+uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset)
+{
+ int pos;
+
+ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset);
+
+ /* get the value at the position */
+ return cb->elems[pos].val;
+}
+
+/** @brief increase size of a circular buffer.
+ * Use 'CAUTION' before using this function. This was added to
+ * support / fix issue with increasing size for tagger only
+ * Please refer to "ia_css_circbuf.h" for details.
+ */
+bool ia_css_circbuf_increase_size(
+ ia_css_circbuf_t *cb,
+ unsigned int sz_delta,
+ ia_css_circbuf_elem_t *elems)
+{
+ uint8_t curr_size;
+ uint8_t curr_end;
+ unsigned int i = 0;
+
+ if (!cb || sz_delta == 0)
+ return false;
+
+ curr_size = cb->desc->size;
+ curr_end = cb->desc->end;
+ /* We assume cb was pre defined as global to allow
+ * increase in size */
+ /* FM: are we sure this cannot cause size to become too big? */
+ if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) && ((uint8_t)sz_delta == sz_delta))
+ cb->desc->size += (uint8_t)sz_delta;
+ else
+ return false; /* overflow in size */
+
+ /* If elems are passed update them else we assume its been taken
+ * care before calling this function */
+ if (elems) {
+ /* cb element array size will not be increased dynamically,
+ * but pointers to new elements can be added at the end
+ * of existing pre defined cb element array of
+ * size >= new size if not already added */
+ for (i = curr_size; i < cb->desc->size; i++)
+ cb->elems[i] = elems[i - curr_size];
+ }
+ /* Fix Start / End */
+ if (curr_end < cb->desc->start) {
+ if (curr_end == 0) {
+ /* Easily fix End */
+ cb->desc->end = curr_size;
+ } else {
+ /* Move elements and fix Start*/
+ ia_css_circbuf_shift_chunk(cb,
+ curr_size - 1,
+ curr_size + sz_delta - 1);
+ }
+ }
+
+ return true;
+}
+
+/****************************************************************
+ *
+ * Inline functions.
+ *
+ ****************************************************************/
+/**
+ * @brief Get the "val" field in the element.
+ * Refer to "Forward declarations" for details.
+ */
+static inline uint32_t
+ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem)
+{
+ return elem->val;
+}
+
+/**
+ * @brief Read the oldest element from the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline ia_css_circbuf_elem_t
+ia_css_circbuf_read(ia_css_circbuf_t *cb)
+{
+ ia_css_circbuf_elem_t elem;
+
+ /* get the element from the target position */
+ elem = cb->elems[cb->desc->start];
+
+ /* clear the target position */
+ ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]);
+
+ /* adjust the "start" position */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1);
+ return elem;
+}
+
+/**
+ * @brief Shift a chunk of elements in the circular buffer.
+ * Refer to "Forward declarations" for details.
+ */
+static inline void
+ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb,
+ uint32_t chunk_src, uint32_t chunk_dest)
+{
+ int chunk_offset;
+ int chunk_sz;
+ int i;
+
+ /* get the chunk offset and size */
+ chunk_offset = ia_css_circbuf_get_offset(cb,
+ chunk_src, chunk_dest);
+ chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1;
+
+ /* shift each element to its terminal position */
+ for (i = 0; i < chunk_sz; i++) {
+
+ /* copy the element from the source to the destination */
+ ia_css_circbuf_elem_cpy(&cb->elems[chunk_src],
+ &cb->elems[chunk_dest]);
+
+ /* clear the source position */
+ ia_css_circbuf_elem_init(&cb->elems[chunk_src]);
+
+ /* adjust the source/terminal positions */
+ chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1);
+ chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1);
+
+ }
+
+ /* adjust the index "start" */
+ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, chunk_offset);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h
new file mode 100644
index 000000000000..20db4de6beeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/interface/ia_css_refcount.h
@@ -0,0 +1,83 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_REFCOUNT_H_
+#define _IA_CSS_REFCOUNT_H_
+
+#include <type_support.h>
+#include <system_types.h>
+#include <ia_css_err.h>
+
+typedef void (*clear_func)(hrt_vaddress ptr);
+
+/*! \brief Function for initializing refcount list
+ *
+ * \param[in] size Size of the refcount list.
+ * \return ia_css_err
+ */
+extern enum ia_css_err ia_css_refcount_init(uint32_t size);
+
+/*! \brief Function for de-initializing refcount list
+ *
+ * \return None
+ */
+extern void ia_css_refcount_uninit(void);
+
+/*! \brief Function for increasing reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ * \return hrt_vaddress (saved address)
+ */
+extern hrt_vaddress ia_css_refcount_increment(int32_t id, hrt_vaddress ptr);
+
+/*! \brief Function for decrease reference by 1.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+extern bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr);
+
+/*! \brief Function to check if reference count is 1.
+ *
+ * \param[in] ptr Data of the object (ptr).
+ *
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+extern bool ia_css_refcount_is_single(hrt_vaddress ptr);
+
+/*! \brief Function to clear reference list objects.
+ *
+ * \param[in] id ID of the object.
+ * \param[in] clear_func function to be run to free reference objects.
+ *
+ * return None
+ */
+extern void ia_css_refcount_clear(int32_t id,
+ clear_func clear_func_ptr);
+
+/*! \brief Function to verify if object is valid
+ *
+ * \param[in] ptr Data of the object (ptr)
+ *
+ * - true, if valid
+ * - false, if invalid
+ */
+extern bool ia_css_refcount_is_valid(hrt_vaddress ptr);
+
+#endif /* _IA_CSS_REFCOUNT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c
new file mode 100644
index 000000000000..6e3bd773ee4c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/refcount/src/refcount.c
@@ -0,0 +1,281 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_refcount.h"
+#include "memory_access/memory_access.h"
+#include "sh_css_defs.h"
+
+#include "platform_support.h"
+
+#include "assert_support.h"
+
+#include "ia_css_debug.h"
+
+/* TODO: enable for other memory aswell
+ now only for hrt_vaddress */
+struct ia_css_refcount_entry {
+ uint32_t count;
+ hrt_vaddress data;
+ int32_t id;
+};
+
+struct ia_css_refcount_list {
+ uint32_t size;
+ struct ia_css_refcount_entry *items;
+};
+
+static struct ia_css_refcount_list myrefcount;
+
+static struct ia_css_refcount_entry *refcount_find_entry(hrt_vaddress ptr,
+ bool firstfree)
+{
+ uint32_t i;
+
+ if (ptr == 0)
+ return NULL;
+ if (myrefcount.items == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "refcount_find_entry(): Ref count not initiliazed!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < myrefcount.size; i++) {
+
+ if ((&myrefcount.items[i])->data == 0) {
+ if (firstfree) {
+ /* for new entry */
+ return &myrefcount.items[i];
+ }
+ }
+ if ((&myrefcount.items[i])->data == ptr) {
+ /* found entry */
+ return &myrefcount.items[i];
+ }
+ }
+ return NULL;
+}
+
+enum ia_css_err ia_css_refcount_init(uint32_t size)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (size == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_init(): Size of 0 for Ref count init!\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (myrefcount.items != NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_init(): Ref count is already initialized\n");
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ myrefcount.items =
+ sh_css_malloc(sizeof(struct ia_css_refcount_entry) * size);
+ if (!myrefcount.items)
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ if (err == IA_CSS_SUCCESS) {
+ memset(myrefcount.items, 0,
+ sizeof(struct ia_css_refcount_entry) * size);
+ myrefcount.size = size;
+ }
+ return err;
+}
+
+void ia_css_refcount_uninit(void)
+{
+ struct ia_css_refcount_entry *entry;
+ uint32_t i;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_uninit() entry\n");
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if (entry->data != mmgr_NULL) {
+ /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE,
+ "ia_css_refcount_uninit: freeing (%x)\n",
+ entry->data);*/
+ hmm_free(entry->data);
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ }
+ }
+ sh_css_free(myrefcount.items);
+ myrefcount.items = NULL;
+ myrefcount.size = 0;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_uninit() leave\n");
+}
+
+hrt_vaddress ia_css_refcount_increment(int32_t id, hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return ptr;
+
+ entry = refcount_find_entry(ptr, false);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_increment(%x) 0x%x\n", id, ptr);
+
+ if (!entry) {
+ entry = refcount_find_entry(ptr, true);
+ assert(entry != NULL);
+ if (entry == NULL)
+ return mmgr_NULL;
+ entry->id = id;
+ }
+
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_refcount_increment(): Ref count IDS do not match!\n");
+ return mmgr_NULL;
+ }
+
+ if (entry->data == ptr)
+ entry->count += 1;
+ else if (entry->data == mmgr_NULL) {
+ entry->data = ptr;
+ entry->count = 1;
+ } else
+ return mmgr_NULL;
+
+ return ptr;
+}
+
+bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr);
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry) {
+ if (entry->id != id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_refcount_decrement(): Ref count IDS do not match!\n");
+ return false;
+ }
+ if (entry->count > 0) {
+ entry->count -= 1;
+ if (entry->count == 0) {
+ /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE,
+ "ia_css_refcount_decrement: freeing\n");*/
+ hmm_free(ptr);
+ entry->data = mmgr_NULL;
+ entry->id = 0;
+ }
+ return true;
+ }
+ }
+
+ /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not
+ valid anymore */
+ if (entry)
+ IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n",
+ id, ptr, entry, entry->id, entry->count);
+ else
+ IA_CSS_ERROR("entry NULL\n");
+#ifdef ISP2401
+ assert(false);
+#endif
+
+ return false;
+}
+
+bool ia_css_refcount_is_single(hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ if (entry)
+ return (entry->count == 1);
+
+ return true;
+}
+
+void ia_css_refcount_clear(int32_t id, clear_func clear_func_ptr)
+{
+ struct ia_css_refcount_entry *entry;
+ uint32_t i;
+ uint32_t count = 0;
+
+ assert(clear_func_ptr != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n",
+ id);
+
+ for (i = 0; i < myrefcount.size; i++) {
+ /* driver verifier tool has issues with &arr[i]
+ and prefers arr + i; as these are actually equivalent
+ the line below uses + i
+ */
+ entry = myrefcount.items + i;
+ if ((entry->data != mmgr_NULL) && (entry->id == id)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear:"
+ " %x: 0x%x\n", id, entry->data);
+ if (clear_func_ptr) {
+ /* clear using provided function */
+ clear_func_ptr(entry->data);
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear: "
+ "using hmm_free: "
+ "no clear_func\n");
+ hmm_free(entry->data);
+ }
+#ifndef ISP2401
+
+#else
+ assert(entry->count == 0);
+#endif
+ if (entry->count != 0) {
+ IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id);
+ }
+ entry->data = mmgr_NULL;
+ entry->count = 0;
+ entry->id = 0;
+ count++;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_refcount_clear(%x): cleared %d\n", id,
+ count);
+}
+
+bool ia_css_refcount_is_valid(hrt_vaddress ptr)
+{
+ struct ia_css_refcount_entry *entry;
+
+ if (ptr == mmgr_NULL)
+ return false;
+
+ entry = refcount_find_entry(ptr, false);
+
+ return entry != NULL;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
new file mode 100644
index 000000000000..616789d9b3f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_binarydesc.h
@@ -0,0 +1,297 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_BINARYDESC_H__
+#define __IA_CSS_PIPE_BINARYDESC_H__
+
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_frame_public.h> /* ia_css_frame_info */
+#include <ia_css_binary.h> /* ia_css_binary_descr */
+
+/** @brief Get a binary descriptor for copy.
+ *
+ * @param[in] pipe
+ * @param[out] copy_desc
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for vfpp.
+ *
+ * @param[in] pipe
+ * @param[out] vfpp_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get numerator and denominator of bayer downscaling factor.
+ *
+ * @param[in] bds_factor: The bayer downscaling factor.
+ * (= The bds_factor member in the sh_css_bds_factor structure.)
+ * @param[out] bds_factor_numerator: The numerator of the bayer downscaling factor.
+ * (= The numerator member in the sh_css_bds_factor structure.)
+ * @param[out] bds_factor_denominator: The denominator of the bayer downscaling factor.
+ * (= The denominator member in the sh_css_bds_factor structure.)
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
+ unsigned int bds_factor,
+ unsigned int *bds_factor_numerator,
+ unsigned int *bds_factor_denominator);
+
+/** @brief Get a binary descriptor for preview stage.
+ *
+ * @param[in] pipe
+ * @param[out] preview_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for video stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] video_descr
+ * @param[in/out] in_info
+ * @param[in/out] bds_out_info
+ * @param[in/out] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding);
+
+/** @brief Get a binary descriptor for yuv scaler stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] yuv_scaler_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] internal_out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for capture pp stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for primary capture.
+ *
+ * @param[in] pipe
+ * @param[out] prim_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx);
+
+/** @brief Get a binary descriptor for pre gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get a binary descriptor for gdc stage.
+ *
+ * @param[in] pipe
+ * @param[out] gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get a binary descriptor for post gdc.
+ *
+ * @param[in] pipe
+ * @param[out] post_gdc_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for de.
+ *
+ * @param[in] pipe
+ * @param[out] pre_de_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get a binary descriptor for pre anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] pre_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get a binary descriptor for ANR stage.
+ *
+ * @param[in] pipe
+ * @param[out] anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Get a binary descriptor for post anr stage.
+ *
+ * @param[in] pipe
+ * @param[out] post_anr_descr
+ * @param[in/out] in_info
+ * @param[in/out] out_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info);
+
+/** @brief Get a binary descriptor for ldc stage.
+ *
+ * @param[in/out] pipe
+ * @param[out] capture_pp_descr
+ * @param[in/out] in_info
+ * @param[in/out] vf_info
+ * @return None
+ *
+ */
+extern void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info);
+
+/** @brief Calculates the required BDS factor
+ *
+ * @param[in] input_res
+ * @param[in] output_res
+ * @param[in/out] bds_factor
+ * @return IA_CSS_SUCCESS or error code upon error.
+ */
+enum ia_css_err binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor);
+
+#endif /* __IA_CSS_PIPE_BINARYDESC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h
new file mode 100644
index 000000000000..38690ea093c2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_stagedesc.h
@@ -0,0 +1,52 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_STAGEDESC_H__
+#define __IA_CSS_PIPE_STAGEDESC_H__
+
+#include <ia_css_acc_types.h> /* ia_css_fw_info */
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipeline_common.h"
+
+extern void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame);
+
+extern void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode);
+
+extern void ia_css_pipe_get_acc_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_fw_info *fw);
+
+extern void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned max_input_width);
+
+#endif /*__IA_CSS_PIPE_STAGEDESC__H__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
new file mode 100644
index 000000000000..ba8858759b30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/interface/ia_css_pipe_util.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_UTIL_H__
+#define __IA_CSS_PIPE_UTIL_H__
+
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+
+/** @brief Get Input format bits per pixel based on stream configuration of this
+ * pipe.
+ *
+ * @param[in] pipe
+ * @return bits per pixel for the underlying stream
+ *
+ */
+extern unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe * const pipe);
+
+extern void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[]);
+
+extern void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame);
+
+#endif /* __IA_CSS_PIPE_UTIL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
new file mode 100644
index 000000000000..17d3b7de93ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
@@ -0,0 +1,883 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_util.h"
+#include "ia_css_debug.h"
+#include "sh_css_params.h"
+#include <assert_support.h>
+/* HRT_GDC_N */
+#include "gdc_device.h"
+
+/* This module provides a binary descriptions to used to find a binary. Since,
+ * every stage is associated with a binary, it implicity helps stage
+ * description. Apart from providing a binary description, this module also
+ * populates the frame info's when required.*/
+
+/* Generic descriptor for offline binaries. Internal function. */
+static void pipe_binarydesc_get_offline(
+ struct ia_css_pipe const * const pipe,
+ const int mode,
+ struct ia_css_binary_descr *descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info[],
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ /* in_info, out_info, vf_info can be NULL */
+ assert(pipe != NULL);
+ assert(descr != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "pipe_binarydesc_get_offline() enter:\n");
+
+ descr->mode = mode;
+ descr->online = false;
+ descr->continuous = pipe->stream->config.continuous;
+ descr->striped = false;
+ descr->two_ppc = false;
+ descr->enable_yuv_ds = false;
+ descr->enable_high_speed = false;
+ descr->enable_dvs_6axis = false;
+ descr->enable_reduced_pipe = false;
+ descr->enable_dz = true;
+ descr->enable_xnr = false;
+ descr->enable_dpc = false;
+#ifdef ISP2401
+ descr->enable_luma_only = false;
+ descr->enable_tnr = false;
+#endif
+ descr->enable_capture_pp_bli = false;
+ descr->enable_fractional_ds = false;
+ descr->dvs_env.width = 0;
+ descr->dvs_env.height = 0;
+ descr->stream_format = pipe->stream->config.input_config.format;
+ descr->in_info = in_info;
+ descr->bds_out_info = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ descr->out_info[i] = out_info[i];
+ descr->vf_info = vf_info;
+ descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ descr->stream_config_left_padding = -1;
+}
+
+void ia_css_pipe_get_copy_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *copy_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL */
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_COPY,
+ copy_descr, in_info, out_infos, vf_info);
+ copy_descr->online = true;
+ copy_descr->continuous = false;
+ copy_descr->two_ppc = (pipe->stream->config.pixels_per_clock == 2);
+ copy_descr->enable_dz = false;
+ copy_descr->isp_pipe_version = IA_CSS_PIPE_VERSION_1;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+void ia_css_pipe_get_vfpp_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *vf_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ unsigned int i;
+ /* out_info can be NULL ??? */
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->raw_bit_depth = 0;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_VF_PP,
+ vf_pp_descr, in_info, out_infos, NULL);
+ vf_pp_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static struct sh_css_bds_factor bds_factors_list[] = {
+ {1, 1, SH_CSS_BDS_FACTOR_1_00},
+ {5, 4, SH_CSS_BDS_FACTOR_1_25},
+ {3, 2, SH_CSS_BDS_FACTOR_1_50},
+ {2, 1, SH_CSS_BDS_FACTOR_2_00},
+ {9, 4, SH_CSS_BDS_FACTOR_2_25},
+ {5, 2, SH_CSS_BDS_FACTOR_2_50},
+ {3, 1, SH_CSS_BDS_FACTOR_3_00},
+ {4, 1, SH_CSS_BDS_FACTOR_4_00},
+ {9, 2, SH_CSS_BDS_FACTOR_4_50},
+ {5, 1, SH_CSS_BDS_FACTOR_5_00},
+ {6, 1, SH_CSS_BDS_FACTOR_6_00},
+ {8, 1, SH_CSS_BDS_FACTOR_8_00}
+};
+
+enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
+ unsigned int bds_factor,
+ unsigned int *bds_factor_numerator,
+ unsigned int *bds_factor_denominator)
+{
+ unsigned int i;
+ unsigned int bds_list_size = sizeof(bds_factors_list) /
+ sizeof(struct sh_css_bds_factor);
+
+ /* Loop over all bds factors until a match is found */
+ for (i = 0; i < bds_list_size; i++) {
+ if (bds_factors_list[i].bds_factor == bds_factor) {
+ *bds_factor_numerator = bds_factors_list[i].numerator;
+ *bds_factor_denominator = bds_factors_list[i].denominator;
+ return IA_CSS_SUCCESS;
+ }
+ }
+
+ /* Throw an error since bds_factor cannot be found
+ in bds_factors_list */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+}
+
+enum ia_css_err binarydesc_calculate_bds_factor(
+ struct ia_css_resolution input_res,
+ struct ia_css_resolution output_res,
+ unsigned int *bds_factor)
+{
+ unsigned int i;
+ unsigned int bds_list_size = sizeof(bds_factors_list) /
+ sizeof(struct sh_css_bds_factor);
+ unsigned int in_w = input_res.width,
+ in_h = input_res.height,
+ out_w = output_res.width, out_h = output_res.height;
+
+ unsigned int max_bds_factor = 8;
+ unsigned int max_rounding_margin = 2;
+ /* delta in pixels to account for rounding margin in the calculation */
+ unsigned int delta = max_bds_factor * max_rounding_margin;
+
+ /* Assert if the resolutions are not set */
+ assert(in_w != 0 && in_h != 0);
+ assert(out_w != 0 && out_h != 0);
+
+ /* Loop over all bds factors until a match is found */
+ for (i = 0; i < bds_list_size; i++) {
+ unsigned num = bds_factors_list[i].numerator;
+ unsigned den = bds_factors_list[i].denominator;
+
+ /* See width-wise and height-wise if this bds_factor
+ * satisfies the condition */
+ bool cond = (out_w * num / den + delta > in_w) &&
+ (out_w * num / den <= in_w) &&
+ (out_h * num / den + delta > in_h) &&
+ (out_h * num / den <= in_h);
+
+ if (cond) {
+ *bds_factor = bds_factors_list[i].bds_factor;
+ return IA_CSS_SUCCESS;
+ }
+ }
+
+ /* Throw an error since a suitable bds_factor cannot be found */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+}
+
+enum ia_css_err ia_css_pipe_get_preview_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *preview_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ enum ia_css_err err;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ int mode = IA_CSS_BINARY_MODE_PREVIEW;
+ unsigned int i;
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ assert(vf_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ /*
+ * Set up the info of the input frame with
+ * the ISP required resolution
+ */
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+ else
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ preview_descr, in_info, out_infos, vf_info);
+ if (pipe->stream->config.online) {
+ preview_descr->online = pipe->stream->config.online;
+ preview_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+ preview_descr->stream_format = pipe->stream->config.input_config.format;
+
+ /* TODO: Remove this when bds_out_info is available! */
+ *bds_out_info = *in_info;
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(in_info->res,
+ bds_out_info->res,
+ &preview_descr->required_bds_factor);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ bds_out_info->res.width = in_info->res.width / 2;
+ bds_out_info->res.height = in_info->res.height / 2;
+ bds_out_info->padded_width = in_info->padded_width / 2;
+ preview_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ /* TODO: Remove this when bds_out_info->is available! */
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ preview_descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ }
+ pipe->required_bds_factor = preview_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled at the same time,
+ so we disable bds_out_info when fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ preview_descr->bds_out_info = bds_out_info;
+ else
+ preview_descr->bds_out_info = NULL;
+ /*
+ ----Preview binary-----
+ --in-->|--out->|vf_veceven|--|--->vf
+ -----------------------
+ * Preview binary normally doesn't have a vf_port but
+ * instead it has an output port. However, the output is
+ * generated by vf_veceven module in which we might have
+ * a downscaling (by 1x, 2x, or 4x). Because the resolution
+ * might change, we need two different info, namely out_info
+ * & vf_info. In fill_binary_info we use out&vf info to
+ * calculate vf decimation factor.
+ */
+ *out_info = *vf_info;
+
+ /* In case of preview_ds binary, we can do any fractional amount
+ * of downscale, so there is no DS needed in vf_veceven. Therefore,
+ * out and vf infos will be the same. Otherwise, we set out resolution
+ * equal to in resolution. */
+ if (!pipe->extra_config.enable_fractional_ds) {
+ /* TODO: Change this when bds_out_info is available! */
+ out_info->res.width = bds_out_info->res.width;
+ out_info->res.height = bds_out_info->res.height;
+ out_info->padded_width = bds_out_info->padded_width;
+ }
+ preview_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+
+ preview_descr->enable_dpc = pipe->config.enable_dpc;
+
+ preview_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_pipe_get_video_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *video_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *bds_out_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ int stream_config_left_padding)
+{
+ int mode = IA_CSS_BINARY_MODE_VIDEO;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool stream_dz_config = false;
+
+ /* vf_info can be NULL */
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ /* assert(vf_info != NULL); */
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* The solution below is not optimal; we should move to using ia_css_pipe_get_copy_binarydesc()
+ * But for now this fixes things; this code used to be there but was removed
+ * with gerrit 8908 as this was wrong for Skycam; however 240x still needs this
+ */
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ video_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online) {
+ video_descr->online = pipe->stream->config.online;
+ video_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
+ stream_dz_config =
+ ((pipe->stream->isp_params_configs->dz_config.dx !=
+ HRT_GDC_N)
+ || (pipe->stream->isp_params_configs->dz_config.dy !=
+ HRT_GDC_N));
+
+ video_descr->enable_dz = pipe->config.enable_dz
+ || stream_dz_config;
+ video_descr->dvs_env = pipe->config.dvs_envelope;
+ video_descr->enable_yuv_ds = pipe->extra_config.enable_yuv_ds;
+ video_descr->enable_high_speed =
+ pipe->extra_config.enable_high_speed;
+ video_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ video_descr->enable_reduced_pipe =
+ pipe->extra_config.enable_reduced_pipe;
+ video_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->enable_dpc =
+ pipe->config.enable_dpc;
+#ifdef ISP2401
+ video_descr->enable_luma_only =
+ pipe->config.enable_luma_only;
+ video_descr->enable_tnr =
+ pipe->config.enable_tnr;
+#endif
+
+ if (pipe->extra_config.enable_raw_binning) {
+ if (pipe->config.bayer_ds_out_res.width != 0 &&
+ pipe->config.bayer_ds_out_res.height != 0) {
+ bds_out_info->res.width =
+ pipe->config.bayer_ds_out_res.width;
+ bds_out_info->res.height =
+ pipe->config.bayer_ds_out_res.height;
+ bds_out_info->padded_width =
+ pipe->config.bayer_ds_out_res.width;
+ err =
+ binarydesc_calculate_bds_factor(
+ in_info->res, bds_out_info->res,
+ &video_descr->required_bds_factor);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ bds_out_info->res.width =
+ in_info->res.width / 2;
+ bds_out_info->res.height =
+ in_info->res.height / 2;
+ bds_out_info->padded_width =
+ in_info->padded_width / 2;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_2_00;
+ }
+ } else {
+ bds_out_info->res.width = in_info->res.width;
+ bds_out_info->res.height = in_info->res.height;
+ bds_out_info->padded_width = in_info->padded_width;
+ video_descr->required_bds_factor =
+ SH_CSS_BDS_FACTOR_1_00;
+ }
+
+ pipe->required_bds_factor = video_descr->required_bds_factor;
+
+ /* bayer ds and fractional ds cannot be enabled
+ at the same time, so we disable bds_out_info when
+ fractional ds is used */
+ if (!pipe->extra_config.enable_fractional_ds)
+ video_descr->bds_out_info = bds_out_info;
+ else
+ video_descr->bds_out_info = NULL;
+
+ video_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+ video_descr->stream_config_left_padding = stream_config_left_padding;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void ia_css_pipe_get_yuvscaler_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *yuv_scaler_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *internal_out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *this_vf_info = NULL;
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ /* Note: if the following assert fails, the number of ports has been
+ * changed; in that case an additional initializer must be added
+ * a few lines below after which this assert can be updated.
+ */
+ assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == 2);
+ IA_CSS_ENTER_PRIVATE("");
+
+ in_info->padded_width = in_info->res.width;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+ out_infos[0] = out_info;
+ out_infos[1] = internal_out_info;
+ /* add initializers here if
+ * assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == ...);
+ * fails
+ */
+
+ if (vf_info) {
+ this_vf_info = (vf_info->res.width == 0 &&
+ vf_info->res.height == 0) ? NULL : vf_info;
+ }
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ yuv_scaler_descr,
+ in_info, out_infos, this_vf_info);
+
+ yuv_scaler_descr->enable_fractional_ds = true;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_capturepp_binarydesc(
+ struct ia_css_pipe * const pipe,
+ struct ia_css_binary_descr *capture_pp_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(vf_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+
+ /* the in_info is only used for resolution to enable
+ bayer down scaling. */
+ if (pipe->out_yuv_ds_input_info.res.width)
+ *in_info = pipe->out_yuv_ds_input_info;
+ else
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ capture_pp_descr,
+ in_info, out_infos, vf_info);
+
+ capture_pp_descr->enable_capture_pp_bli =
+ pipe->config.default_capture_config.enable_capture_pp_bli;
+ capture_pp_descr->enable_fractional_ds = true;
+ capture_pp_descr->enable_xnr =
+ pipe->config.default_capture_config.enable_xnr != 0;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+/* lookup table for high quality primary binaries */
+static unsigned int primary_hq_binary_modes[NUM_PRIMARY_HQ_STAGES] =
+{
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4,
+ IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5
+};
+
+void ia_css_pipe_get_primary_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *prim_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int stage_idx)
+{
+ enum ia_css_pipe_version pipe_version = pipe->config.isp_pipe_version;
+ int mode;
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ assert(stage_idx < NUM_PRIMARY_HQ_STAGES);
+ /* vf_info can be NULL - example video_binarydescr */
+ /*assert(vf_info != NULL);*/
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ mode = primary_hq_binary_modes[stage_idx];
+ else
+ mode = IA_CSS_BINARY_MODE_PRIMARY;
+
+ if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
+ mode = IA_CSS_BINARY_MODE_COPY;
+
+ in_info->res = pipe->config.input_effective_res;
+ in_info->padded_width = in_info->res.width;
+
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ if (pipe->stream->config.pack_raw_pixels)
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ else
+#endif
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, mode,
+ prim_descr, in_info, out_infos, vf_info);
+
+ if (pipe->stream->config.online &&
+ pipe->stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
+ prim_descr->online = true;
+ prim_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ prim_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ if (mode == IA_CSS_BINARY_MODE_PRIMARY) {
+ prim_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ prim_descr->enable_fractional_ds =
+ pipe->extra_config.enable_fractional_ds;
+#ifdef ISP2401
+ prim_descr->enable_luma_only =
+ pipe->config.enable_luma_only;
+#endif
+ /* We have both striped and non-striped primary binaries,
+ * if continuous viewfinder is required, then we must select
+ * a striped one. Otherwise we prefer to use a non-striped
+ * since it has better performance. */
+ if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
+ prim_descr->striped = false;
+ else
+#ifndef ISP2401
+ prim_descr->striped = prim_descr->continuous && (!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf);
+#else
+ prim_descr->striped = prim_descr->continuous && !pipe->stream->disable_cont_vf;
+
+ if ((pipe->config.default_capture_config.enable_xnr != 0) &&
+ (pipe->extra_config.enable_dvs_6axis == true))
+ prim_descr->enable_xnr = true;
+#endif
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *pre_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_gdc_descr, in_info, out_infos, NULL);
+ pre_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_QPLANE6;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_GDC,
+ gdc_descr, in_info, out_infos, NULL);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_post_gdc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *post_gdc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ assert(vf_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420_16;
+ in_info->raw_bit_depth = 16;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_gdc_descr, in_info, out_infos, vf_info);
+
+ post_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_de_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *pre_de_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_de_descr, in_info, out_infos, NULL);
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) {
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_DE,
+ pre_de_descr, in_info, out_infos, NULL);
+ }
+
+ if (pipe->stream->config.online) {
+ pre_de_descr->online = true;
+ pre_de_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_de_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_de_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_pre_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *pre_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
+ pre_anr_descr, in_info, out_infos, NULL);
+
+ if (pipe->stream->config.online) {
+ pre_anr_descr->online = true;
+ pre_anr_descr->two_ppc =
+ (pipe->stream->config.pixels_per_clock == 2);
+ pre_anr_descr->stream_format = pipe->stream->config.input_config.format;
+ }
+ pre_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_ANR,
+ anr_descr, in_info, out_infos, NULL);
+
+ anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+
+void ia_css_pipe_get_post_anr_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *post_anr_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ assert(vf_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ *in_info = *out_info;
+ in_info->format = IA_CSS_FRAME_FORMAT_RAW;
+ in_info->raw_bit_depth = ANR_ELEMENT_BITS;
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
+ post_anr_descr, in_info, out_infos, vf_info);
+
+ post_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_ldc_binarydesc(
+ struct ia_css_pipe const * const pipe,
+ struct ia_css_binary_descr *ldc_descr,
+ struct ia_css_frame_info *in_info,
+ struct ia_css_frame_info *out_info)
+{
+ unsigned int i;
+ struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+
+ assert(pipe != NULL);
+ assert(in_info != NULL);
+ assert(out_info != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+#ifndef ISP2401
+ *in_info = *out_info;
+#else
+ if (pipe->out_yuv_ds_input_info.res.width)
+ *in_info = pipe->out_yuv_ds_input_info;
+ else
+ *in_info = *out_info;
+#endif
+ in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
+ in_info->raw_bit_depth = 0;
+ ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
+
+ out_infos[0] = out_info;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ out_infos[i] = NULL;
+
+ pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_CAPTURE_PP,
+ ldc_descr, in_info, out_infos, NULL);
+ ldc_descr->enable_dvs_6axis =
+ pipe->extra_config.enable_dvs_6axis;
+ IA_CSS_LEAVE_PRIVATE("");
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c
new file mode 100644
index 000000000000..40af8daf5ad9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_stagedesc.c
@@ -0,0 +1,115 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_stagedesc.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+
+void ia_css_pipe_get_generic_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame)
+{
+ unsigned int i;
+ IA_CSS_ENTER_PRIVATE("stage_desc = %p, binary = %p, out_frame = %p, in_frame = %p, vf_frame = %p",
+ stage_desc, binary, out_frame, in_frame, vf_frame);
+
+ assert(stage_desc != NULL && binary != NULL && binary->info != NULL);
+ if (stage_desc == NULL || binary == NULL || binary->info == NULL) {
+ IA_CSS_ERROR("invalid arguments");
+ goto ERR;
+ }
+
+ stage_desc->binary = binary;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = binary->info->sp.pipeline.mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+ERR:
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_pipe_get_firmwares_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_frame *out_frame[],
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *vf_frame,
+ const struct ia_css_fw_info *fw,
+ unsigned int mode)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_firmwares_stage_desc() enter:\n");
+ stage_desc->binary = binary;
+ stage_desc->firmware = fw;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = mode;
+ stage_desc->in_frame = in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = out_frame[i];
+ }
+ stage_desc->vf_frame = vf_frame;
+}
+
+void ia_css_pipe_get_acc_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_binary *binary,
+ struct ia_css_fw_info *fw)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_acc_stage_desc() enter:\n");
+ stage_desc->binary = binary;
+ stage_desc->firmware = fw;
+ stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
+ stage_desc->max_input_width = 0;
+ stage_desc->mode = IA_CSS_BINARY_MODE_VF_PP;
+ stage_desc->in_frame = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = NULL;
+ }
+ stage_desc->vf_frame = NULL;
+}
+
+void ia_css_pipe_get_sp_func_stage_desc(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_frame *out_frame,
+ enum ia_css_pipeline_stage_sp_func sp_func,
+ unsigned max_input_width)
+{
+ unsigned int i;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_get_sp_func_stage_desc() enter:\n");
+ stage_desc->binary = NULL;
+ stage_desc->firmware = NULL;
+ stage_desc->sp_func = sp_func;
+ stage_desc->max_input_width = max_input_width;
+ stage_desc->mode = (unsigned int)-1;
+ stage_desc->in_frame = NULL;
+ stage_desc->out_frame[0] = out_frame;
+ for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ stage_desc->out_frame[i] = NULL;
+ }
+ stage_desc->vf_frame = NULL;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c
new file mode 100644
index 000000000000..5fc1718cb2bd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_util.c
@@ -0,0 +1,51 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_pipe_util.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "assert_support.h"
+
+unsigned int ia_css_pipe_util_pipe_input_format_bpp(
+ const struct ia_css_pipe * const pipe)
+{
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+
+ return ia_css_util_input_format_bpp(pipe->stream->config.input_config.format,
+ pipe->stream->config.pixels_per_clock == 2);
+}
+
+void ia_css_pipe_util_create_output_frames(
+ struct ia_css_frame *frames[])
+{
+ unsigned int i;
+
+ assert(frames != NULL);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ frames[i] = NULL;
+ }
+}
+
+void ia_css_pipe_util_set_output_frames(
+ struct ia_css_frame *frames[],
+ unsigned int idx,
+ struct ia_css_frame *frame)
+{
+ assert(idx < IA_CSS_BINARY_MAX_OUTPUT_PORTS);
+
+ frames[idx] = frame;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
new file mode 100644
index 000000000000..f8b2e458f876
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/interface/ia_css_util.h
@@ -0,0 +1,141 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_UTIL_H__
+#define __IA_CSS_UTIL_H__
+
+#include <ia_css_err.h>
+#include <error_support.h>
+#include <type_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_stream_format.h>
+
+/** @brief convert "errno" error code to "ia_css_err" error code
+ *
+ * @param[in] "errno" error code
+ * @return "ia_css_err" error code
+ *
+ */
+enum ia_css_err ia_css_convert_errno(
+ int in_err);
+
+/** @brief check vf frame info.
+ *
+ * @param[in] info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_util_check_vf_info(
+ const struct ia_css_frame_info * const info);
+
+/** @brief check input configuration.
+ *
+ * @param[in] stream_config
+ * @param[in] must_be_raw
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_util_check_input(
+ const struct ia_css_stream_config * const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv);
+
+/** @brief check vf and out frame info.
+ *
+ * @param[in] out_info
+ * @param[in] vf_info
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info * const out_info,
+ const struct ia_css_frame_info * const vf_info);
+
+/** @brief check width and height
+ *
+ * @param[in] width
+ * @param[in] height
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+extern enum ia_css_err ia_css_util_check_res(
+ unsigned int width,
+ unsigned int height);
+
+#ifdef ISP2401
+/** @brief compare resolutions (less or equal)
+ *
+ * @param[in] a resolution
+ * @param[in] b resolution
+ * @return true if both dimensions of a are less or
+ * equal than those of b, false otherwise
+ *
+ */
+extern bool ia_css_util_res_leq(
+ struct ia_css_resolution a,
+ struct ia_css_resolution b);
+
+/**
+ * @brief Check if resolution is zero
+ *
+ * @param[in] resolution The resolution to check
+ *
+ * @returns true if resolution is zero
+ */
+extern bool ia_css_util_resolution_is_zero(
+ const struct ia_css_resolution resolution);
+
+/**
+ * @brief Check if resolution is even
+ *
+ * @param[in] resolution The resolution to check
+ *
+ * @returns true if resolution is even
+ */
+extern bool ia_css_util_resolution_is_even(
+ const struct ia_css_resolution resolution);
+
+#endif
+/** @brief check width and height
+ *
+ * @param[in] stream_format
+ * @param[in] two_ppc
+ * @return bits per pixel based on given parameters.
+ *
+ */
+extern unsigned int ia_css_util_input_format_bpp(
+ enum ia_css_stream_format stream_format,
+ bool two_ppc);
+
+/** @brief check if input format it raw
+ *
+ * @param[in] stream_format
+ * @return true if the input format is raw or false otherwise
+ *
+ */
+extern bool ia_css_util_is_input_format_raw(
+ enum ia_css_stream_format stream_format);
+
+/** @brief check if input format it yuv
+ *
+ * @param[in] stream_format
+ * @return true if the input format is yuv or false otherwise
+ *
+ */
+extern bool ia_css_util_is_input_format_yuv(
+ enum ia_css_stream_format stream_format);
+
+#endif /* __IA_CSS_UTIL_H__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
new file mode 100644
index 000000000000..08f486e20a65
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
@@ -0,0 +1,227 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_util.h"
+#include <ia_css_frame.h>
+#include <assert_support.h>
+#include <math_support.h>
+
+/* for ia_css_binary_max_vf_width() */
+#include "ia_css_binary.h"
+
+
+enum ia_css_err ia_css_convert_errno(
+ int in_err)
+{
+ enum ia_css_err out_err;
+
+ switch (in_err) {
+ case 0:
+ out_err = IA_CSS_SUCCESS;
+ break;
+ case EINVAL:
+ out_err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ break;
+ case ENODATA:
+ out_err = IA_CSS_ERR_QUEUE_IS_EMPTY;
+ break;
+ case ENOSYS:
+ case ENOTSUP:
+ out_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ case ENOBUFS:
+ out_err = IA_CSS_ERR_QUEUE_IS_FULL;
+ break;
+ default:
+ out_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ return out_err;
+}
+
+/* MW: Table look-up ??? */
+unsigned int ia_css_util_input_format_bpp(
+ enum ia_css_stream_format format,
+ bool two_ppc)
+{
+ unsigned int rval = 0;
+ switch (format) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ case IA_CSS_STREAM_FORMAT_EMBEDDED:
+ rval = 8;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ rval = 65;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ if (two_ppc)
+ rval = 14;
+ else
+ rval = 12;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ if (two_ppc)
+ rval = 16;
+ else
+ rval = 12;
+ break;
+ default:
+ rval = 0;
+ break;
+
+ }
+return rval;
+}
+
+enum ia_css_err ia_css_util_check_vf_info(
+ const struct ia_css_frame_info * const info)
+{
+ enum ia_css_err err;
+ unsigned int max_vf_width;
+ assert(info != NULL);
+ err = ia_css_frame_check_info(info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ max_vf_width = ia_css_binary_max_vf_width();
+ if (max_vf_width != 0 && info->res.width > max_vf_width*2)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_util_check_vf_out_info(
+ const struct ia_css_frame_info * const out_info,
+ const struct ia_css_frame_info * const vf_info)
+{
+ enum ia_css_err err;
+
+ assert(out_info != NULL);
+ assert(vf_info != NULL);
+
+ err = ia_css_frame_check_info(out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_util_check_vf_info(vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_util_check_res(unsigned int width, unsigned int height)
+{
+ /* height can be odd number for jpeg/embedded data from ISYS2401 */
+ if (((width == 0) ||
+ (height == 0) ||
+ IS_ODD(width))) {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+#ifdef ISP2401
+bool ia_css_util_res_leq(struct ia_css_resolution a, struct ia_css_resolution b)
+{
+ return a.width <= b.width && a.height <= b.height;
+}
+
+bool ia_css_util_resolution_is_zero(const struct ia_css_resolution resolution)
+{
+ return (resolution.width == 0) || (resolution.height == 0);
+}
+
+bool ia_css_util_resolution_is_even(const struct ia_css_resolution resolution)
+{
+ return IS_EVEN(resolution.height) && IS_EVEN(resolution.width);
+}
+
+#endif
+bool ia_css_util_is_input_format_raw(enum ia_css_stream_format format)
+{
+ return ((format == IA_CSS_STREAM_FORMAT_RAW_6) ||
+ (format == IA_CSS_STREAM_FORMAT_RAW_7) ||
+ (format == IA_CSS_STREAM_FORMAT_RAW_8) ||
+ (format == IA_CSS_STREAM_FORMAT_RAW_10) ||
+ (format == IA_CSS_STREAM_FORMAT_RAW_12));
+ /* raw_14 and raw_16 are not supported as input formats to the ISP.
+ * They can only be copied to a frame in memory using the
+ * copy binary.
+ */
+}
+
+bool ia_css_util_is_input_format_yuv(enum ia_css_stream_format format)
+{
+ return format == IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY ||
+ format == IA_CSS_STREAM_FORMAT_YUV420_8 ||
+ format == IA_CSS_STREAM_FORMAT_YUV420_10 ||
+ format == IA_CSS_STREAM_FORMAT_YUV420_16 ||
+ format == IA_CSS_STREAM_FORMAT_YUV422_8 ||
+ format == IA_CSS_STREAM_FORMAT_YUV422_10 ||
+ format == IA_CSS_STREAM_FORMAT_YUV422_16;
+}
+
+enum ia_css_err ia_css_util_check_input(
+ const struct ia_css_stream_config * const stream_config,
+ bool must_be_raw,
+ bool must_be_yuv)
+{
+ assert(stream_config != NULL);
+
+ if (stream_config == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+#ifdef IS_ISP_2400_SYSTEM
+ if (stream_config->input_config.effective_res.width == 0 ||
+ stream_config->input_config.effective_res.height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+#endif
+ if (must_be_raw &&
+ !ia_css_util_is_input_format_raw(stream_config->input_config.format))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (must_be_yuv &&
+ !ia_css_util_is_input_format_yuv(stream_config->input_config.format))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ return IA_CSS_SUCCESS;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c
new file mode 100644
index 000000000000..325b821f276c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.c
@@ -0,0 +1,360 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+ }
+ if (size) {
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+ }
+ if (size) {
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+ }
+ if (size) {
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ }
+ if (size) {
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ }
+ if (size) {
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ }
+ if (size) {
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+ }
+ if (size) {
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+ }
+ if (size) {
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+ }
+ if (size) {
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#ifdef ISP2401
+
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.sc.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset;
+ }
+ if (size) {
+ ia_css_sc_config((struct sh_css_isp_sc_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#endif
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+ }
+ if (size) {
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+ }
+ if (size) {
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+ }
+ if (size) {
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+ }
+ if (size) {
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n");
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h
new file mode 100644
index 000000000000..8aacd3dbc05a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_configs.h
@@ -0,0 +1,189 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifdef IA_CSS_INCLUDE_CONFIGURATIONS
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h"
+#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#ifdef ISP2401
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#endif
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
+#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
+#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
+#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_CONFIG_H
+#define _IA_CSS_ISP_CONFIG_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_configuration_ids {
+ IA_CSS_ITERATOR_CONFIG_ID,
+ IA_CSS_COPY_OUTPUT_CONFIG_ID,
+ IA_CSS_CROP_CONFIG_ID,
+ IA_CSS_FPN_CONFIG_ID,
+ IA_CSS_DVS_CONFIG_ID,
+ IA_CSS_QPLANE_CONFIG_ID,
+ IA_CSS_OUTPUT0_CONFIG_ID,
+ IA_CSS_OUTPUT1_CONFIG_ID,
+ IA_CSS_OUTPUT_CONFIG_ID,
+#ifdef ISP2401
+ IA_CSS_SC_CONFIG_ID,
+#endif
+ IA_CSS_RAW_CONFIG_ID,
+ IA_CSS_TNR_CONFIG_ID,
+ IA_CSS_REF_CONFIG_ID,
+ IA_CSS_VF_CONFIG_ID,
+ IA_CSS_NUM_CONFIGURATION_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_config_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter iterator;
+ struct ia_css_isp_parameter copy_output;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter dvs;
+ struct ia_css_isp_parameter qplane;
+ struct ia_css_isp_parameter output0;
+ struct ia_css_isp_parameter output1;
+ struct ia_css_isp_parameter output;
+#ifdef ISP2401
+ struct ia_css_isp_parameter sc;
+#endif
+ struct ia_css_isp_parameter raw;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ struct ia_css_isp_parameter vf;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_CONFIGURATIONS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#ifdef ISP2401
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#endif
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
+
+#endif /* IA_CSS_INCLUDE_CONFIGURATION */
+
+#endif /* _IA_CSS_ISP_CONFIG_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c
new file mode 100644
index 000000000000..d418e763b755
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.c
@@ -0,0 +1,3221 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#ifdef ISP2401
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#endif
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DP_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_WB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_TNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ECD_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_AA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CSC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_NR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_S3A_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h
new file mode 100644
index 000000000000..5b3deb7f74ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_params.h
@@ -0,0 +1,399 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_PARAM_H
+#define _IA_CSS_ISP_PARAM_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_parameter_ids {
+ IA_CSS_AA_ID,
+ IA_CSS_ANR_ID,
+ IA_CSS_ANR2_ID,
+ IA_CSS_BH_ID,
+ IA_CSS_CNR_ID,
+ IA_CSS_CROP_ID,
+ IA_CSS_CSC_ID,
+ IA_CSS_DP_ID,
+ IA_CSS_BNR_ID,
+ IA_CSS_DE_ID,
+ IA_CSS_ECD_ID,
+ IA_CSS_FORMATS_ID,
+ IA_CSS_FPN_ID,
+ IA_CSS_GC_ID,
+ IA_CSS_CE_ID,
+ IA_CSS_YUV2RGB_ID,
+ IA_CSS_RGB2YUV_ID,
+ IA_CSS_R_GAMMA_ID,
+ IA_CSS_G_GAMMA_ID,
+ IA_CSS_B_GAMMA_ID,
+ IA_CSS_UDS_ID,
+ IA_CSS_RAA_ID,
+ IA_CSS_S3A_ID,
+ IA_CSS_OB_ID,
+ IA_CSS_OUTPUT_ID,
+ IA_CSS_SC_ID,
+ IA_CSS_BDS_ID,
+ IA_CSS_TNR_ID,
+ IA_CSS_MACC_ID,
+ IA_CSS_SDIS_HORICOEF_ID,
+ IA_CSS_SDIS_VERTCOEF_ID,
+ IA_CSS_SDIS_HORIPROJ_ID,
+ IA_CSS_SDIS_VERTPROJ_ID,
+ IA_CSS_SDIS2_HORICOEF_ID,
+ IA_CSS_SDIS2_VERTCOEF_ID,
+ IA_CSS_SDIS2_HORIPROJ_ID,
+ IA_CSS_SDIS2_VERTPROJ_ID,
+ IA_CSS_WB_ID,
+ IA_CSS_NR_ID,
+ IA_CSS_YEE_ID,
+ IA_CSS_YNR_ID,
+ IA_CSS_FC_ID,
+ IA_CSS_CTC_ID,
+ IA_CSS_XNR_TABLE_ID,
+ IA_CSS_XNR_ID,
+ IA_CSS_XNR3_ID,
+ IA_CSS_NUM_PARAMETER_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter anr;
+ struct ia_css_isp_parameter bh;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter csc;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter bnr;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ecd;
+ struct ia_css_isp_parameter formats;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter ce;
+ struct ia_css_isp_parameter yuv2rgb;
+ struct ia_css_isp_parameter rgb2yuv;
+ struct ia_css_isp_parameter uds;
+ struct ia_css_isp_parameter raa;
+ struct ia_css_isp_parameter s3a;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter sc;
+ struct ia_css_isp_parameter bds;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter macc;
+ struct ia_css_isp_parameter sdis_horiproj;
+ struct ia_css_isp_parameter sdis_vertproj;
+ struct ia_css_isp_parameter sdis2_horiproj;
+ struct ia_css_isp_parameter sdis2_vertproj;
+ struct ia_css_isp_parameter wb;
+ struct ia_css_isp_parameter nr;
+ struct ia_css_isp_parameter yee;
+ struct ia_css_isp_parameter ynr;
+ struct ia_css_isp_parameter fc;
+ struct ia_css_isp_parameter ctc;
+ struct ia_css_isp_parameter xnr;
+ struct ia_css_isp_parameter xnr3;
+ struct ia_css_isp_parameter get;
+ struct ia_css_isp_parameter put;
+ } dmem;
+ struct {
+ struct ia_css_isp_parameter anr2;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter sdis_horicoef;
+ struct ia_css_isp_parameter sdis_vertcoef;
+ struct ia_css_isp_parameter sdis2_horicoef;
+ struct ia_css_isp_parameter sdis2_vertcoef;
+#ifdef ISP2401
+ struct ia_css_isp_parameter xnr3;
+#endif
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter bh;
+ } hmem0;
+ struct {
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter g_gamma;
+ struct ia_css_isp_parameter xnr_table;
+ } vamem1;
+ struct {
+ struct ia_css_isp_parameter r_gamma;
+ struct ia_css_isp_parameter ctc;
+ } vamem0;
+ struct {
+ struct ia_css_isp_parameter b_gamma;
+ } vamem2;
+};
+
+#if defined(IA_CSS_INCLUDE_PARAMETERS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+struct ia_css_pipeline_stage; /* forward declaration */
+
+extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config);
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+#endif /* IA_CSS_INCLUDE_PARAMETER */
+
+#endif /* _IA_CSS_ISP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c
new file mode 100644
index 000000000000..fb3ba08f69c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.c
@@ -0,0 +1,214 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size);
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h
new file mode 100644
index 000000000000..732adafb0a63
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hive_isp_css_2400_system_generated/ia_css_isp_states.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_STATES
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_STATE_H
+#define _IA_CSS_ISP_STATE_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_state_ids {
+ IA_CSS_AA_STATE_ID,
+ IA_CSS_CNR_STATE_ID,
+ IA_CSS_CNR2_STATE_ID,
+ IA_CSS_DP_STATE_ID,
+ IA_CSS_DE_STATE_ID,
+ IA_CSS_TNR_STATE_ID,
+ IA_CSS_REF_STATE_ID,
+ IA_CSS_YNR_STATE_ID,
+ IA_CSS_NUM_STATE_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_state_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter cnr2;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ynr;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_STATES)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary);
+
+#endif /* IA_CSS_INCLUDE_STATE */
+
+#endif /* _IA_CSS_ISP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h
new file mode 100644
index 000000000000..e71e33d9d143
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/bits.h
@@ -0,0 +1,104 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_BITS_H
+#define _HRT_BITS_H
+
+#include "defs.h"
+
+#define _hrt_ones(n) HRTCAT(_hrt_ones_, n)
+#define _hrt_ones_0x0 0x00000000U
+#define _hrt_ones_0x1 0x00000001U
+#define _hrt_ones_0x2 0x00000003U
+#define _hrt_ones_0x3 0x00000007U
+#define _hrt_ones_0x4 0x0000000FU
+#define _hrt_ones_0x5 0x0000001FU
+#define _hrt_ones_0x6 0x0000003FU
+#define _hrt_ones_0x7 0x0000007FU
+#define _hrt_ones_0x8 0x000000FFU
+#define _hrt_ones_0x9 0x000001FFU
+#define _hrt_ones_0xA 0x000003FFU
+#define _hrt_ones_0xB 0x000007FFU
+#define _hrt_ones_0xC 0x00000FFFU
+#define _hrt_ones_0xD 0x00001FFFU
+#define _hrt_ones_0xE 0x00003FFFU
+#define _hrt_ones_0xF 0x00007FFFU
+#define _hrt_ones_0x10 0x0000FFFFU
+#define _hrt_ones_0x11 0x0001FFFFU
+#define _hrt_ones_0x12 0x0003FFFFU
+#define _hrt_ones_0x13 0x0007FFFFU
+#define _hrt_ones_0x14 0x000FFFFFU
+#define _hrt_ones_0x15 0x001FFFFFU
+#define _hrt_ones_0x16 0x003FFFFFU
+#define _hrt_ones_0x17 0x007FFFFFU
+#define _hrt_ones_0x18 0x00FFFFFFU
+#define _hrt_ones_0x19 0x01FFFFFFU
+#define _hrt_ones_0x1A 0x03FFFFFFU
+#define _hrt_ones_0x1B 0x07FFFFFFU
+#define _hrt_ones_0x1C 0x0FFFFFFFU
+#define _hrt_ones_0x1D 0x1FFFFFFFU
+#define _hrt_ones_0x1E 0x3FFFFFFFU
+#define _hrt_ones_0x1F 0x7FFFFFFFU
+#define _hrt_ones_0x20 0xFFFFFFFFU
+
+#define _hrt_ones_0 _hrt_ones_0x0
+#define _hrt_ones_1 _hrt_ones_0x1
+#define _hrt_ones_2 _hrt_ones_0x2
+#define _hrt_ones_3 _hrt_ones_0x3
+#define _hrt_ones_4 _hrt_ones_0x4
+#define _hrt_ones_5 _hrt_ones_0x5
+#define _hrt_ones_6 _hrt_ones_0x6
+#define _hrt_ones_7 _hrt_ones_0x7
+#define _hrt_ones_8 _hrt_ones_0x8
+#define _hrt_ones_9 _hrt_ones_0x9
+#define _hrt_ones_10 _hrt_ones_0xA
+#define _hrt_ones_11 _hrt_ones_0xB
+#define _hrt_ones_12 _hrt_ones_0xC
+#define _hrt_ones_13 _hrt_ones_0xD
+#define _hrt_ones_14 _hrt_ones_0xE
+#define _hrt_ones_15 _hrt_ones_0xF
+#define _hrt_ones_16 _hrt_ones_0x10
+#define _hrt_ones_17 _hrt_ones_0x11
+#define _hrt_ones_18 _hrt_ones_0x12
+#define _hrt_ones_19 _hrt_ones_0x13
+#define _hrt_ones_20 _hrt_ones_0x14
+#define _hrt_ones_21 _hrt_ones_0x15
+#define _hrt_ones_22 _hrt_ones_0x16
+#define _hrt_ones_23 _hrt_ones_0x17
+#define _hrt_ones_24 _hrt_ones_0x18
+#define _hrt_ones_25 _hrt_ones_0x19
+#define _hrt_ones_26 _hrt_ones_0x1A
+#define _hrt_ones_27 _hrt_ones_0x1B
+#define _hrt_ones_28 _hrt_ones_0x1C
+#define _hrt_ones_29 _hrt_ones_0x1D
+#define _hrt_ones_30 _hrt_ones_0x1E
+#define _hrt_ones_31 _hrt_ones_0x1F
+#define _hrt_ones_32 _hrt_ones_0x20
+
+#define _hrt_mask(b, n) \
+ (_hrt_ones(n) << (b))
+#define _hrt_get_bits(w, b, n) \
+ (((w) >> (b)) & _hrt_ones(n))
+#define _hrt_set_bits(w, b, n, v) \
+ (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b)))
+#define _hrt_get_bit(w, b) \
+ (((w) >> (b)) & 1)
+#define _hrt_set_bit(w, b, v) \
+ (((w) & (~(1 << (b)))) | (((v)&1) << (b)))
+#define _hrt_set_lower_half(w, v) \
+ _hrt_set_bits(w, 0, 16, v)
+#define _hrt_set_upper_half(w, v) \
+ _hrt_set_bits(w, 16, 16, v)
+
+#endif /* _HRT_BITS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h
new file mode 100644
index 000000000000..b5756bfe8eb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/cell_params.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _cell_params_h
+#define _cell_params_h
+
+#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/
+#define SP_ICACHE_TAG_BITS 4 /*size of tag*/
+#define SP_ICACHE_SET_BITS 8 /* 256 sets*/
+#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/
+#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/
+
+#define SP_ICACHE_ADDRESS_BITS \
+ (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS)
+
+#define SP_PMEM_DEPTH (1<<SP_ICACHE_ADDRESS_BITS)
+
+#define SP_FIFO_0_DEPTH 0
+#define SP_FIFO_1_DEPTH 0
+#define SP_FIFO_2_DEPTH 0
+#define SP_FIFO_3_DEPTH 0
+#define SP_FIFO_4_DEPTH 0
+#define SP_FIFO_5_DEPTH 0
+#define SP_FIFO_6_DEPTH 0
+#define SP_FIFO_7_DEPTH 0
+
+
+#define SP_SLV_BUS_MAXBURSTSIZE 1
+
+#endif /* _cell_params_h */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_common_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_common_defs.h
new file mode 100644
index 000000000000..f3054fe04d03
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_common_defs.h
@@ -0,0 +1,200 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reseved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define BE_CUST_EN_IDX 0 /* 2bits */
+#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */
+#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */
+#define BE_CUST_DATA_STATE_WIDTH 21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */
+#define BE_CUST_PIX_EXT_WIDTH 29
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h
new file mode 100644
index 000000000000..6f5b7d3d3715
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/css_receiver_2400_defs.h
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_defs_h_
+#define _css_receiver_2400_defs_h_
+
+#include "css_receiver_2400_common_defs.h"
+
+#define CSS_RECEIVER_DATA_WIDTH 8
+#define CSS_RECEIVER_RX_TRIG 4
+#define CSS_RECEIVER_RF_WORD 32
+#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10
+#define CSS_RECEIVER_CSI_RF_ADDR 4
+#define CSS_RECEIVER_DATA_OUT 12
+#define CSS_RECEIVER_CHN_NO 2
+#define CSS_RECEIVER_DWORD_CNT 11
+#define CSS_RECEIVER_FORMAT_TYP 5
+#define CSS_RECEIVER_HRESPONSE 2
+#define CSS_RECEIVER_STATE_WIDTH 3
+#define CSS_RECEIVER_FIFO_DAT 32
+#define CSS_RECEIVER_CNT_VAL 2
+#define CSS_RECEIVER_PRED10_VAL 10
+#define CSS_RECEIVER_PRED12_VAL 12
+#define CSS_RECEIVER_CNT_WIDTH 8
+#define CSS_RECEIVER_WORD_CNT 16
+#define CSS_RECEIVER_PIXEL_LEN 6
+#define CSS_RECEIVER_PIXEL_CNT 5
+#define CSS_RECEIVER_COMP_8_BIT 8
+#define CSS_RECEIVER_COMP_7_BIT 7
+#define CSS_RECEIVER_COMP_6_BIT 6
+
+#define CSI_CONFIG_WIDTH 4
+
+/* division of gen_short data, ch_id and fmt_type over streaming data interface */
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1)
+
+#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4
+#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4
+
+#define hrt_css_receiver_2400_4_lane_port_offset 0x100
+#define hrt_css_receiver_2400_1_lane_port_offset 0x200
+#define hrt_css_receiver_2400_2_lane_port_offset 0x300
+#define hrt_css_receiver_2400_backend_port_offset 0x100
+
+#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3
+#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4
+#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16
+#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25
+#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26
+#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27
+#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28
+
+/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16
+
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun"
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data"
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync"
+
+/* Bits for CSI2_DEVICE_READY register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7
+
+
+/* Bits for CSI2_FUNC_PROG register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19
+
+/* Bits for INIT_COUNT register */
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16
+
+/* Bits for COUNT registers */
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8
+
+/* Bits for RAW116_18_DATAID register */
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6
+
+/* Bits for COMP_FORMAT register, this selects the compression data format */
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS)
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8
+
+/* Bits for COMP_PREDICT register, this selects the predictor algorithm */
+#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1
+#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2
+
+/* Number of bits used for the delay registers */
+#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8
+
+/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2
+
+
+/* BITS for backend RAW16 and RAW 18 registers */
+
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1
+
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1
+
+/* These hsync and vsync values are for HSS simulation only */
+#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16)
+#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17)
+
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1)
+
+// SH Backend Register IDs
+#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */
+
+#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28
+
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8
+
+#endif /* _css_receiver_2400_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h
new file mode 100644
index 000000000000..47505f41790c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_DEFS_H_
+#define _HRT_DEFS_H_
+
+#ifndef HRTCAT
+#define _HRTCAT(m, n) m##n
+#define HRTCAT(m, n) _HRTCAT(m, n)
+#endif
+
+#ifndef HRTSTR
+#define _HRTSTR(x) #x
+#define HRTSTR(x) _HRTSTR(x)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#endif /* _HRT_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h
new file mode 100644
index 000000000000..d184a8b313c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/dma_v2_defs.h
@@ -0,0 +1,199 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _dma_v2_defs_h
+#define _dma_v2_defs_h
+
+#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels
+#define _DMA_V2_CONNECTIONS_ID Connections
+#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths
+#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth
+#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat
+#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass
+#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst
+#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept
+#define _DMA_V2_DEV_SRMD_ID DevSRMD
+#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters
+#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth
+#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth
+#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat
+#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass
+#define _DMA_V2_NO_PACK_ID has_no_pack
+
+#define _DMA_V2_REG_ALIGN 4
+#define _DMA_V2_REG_ADDR_BITS 2
+
+/* Command word */
+#define _DMA_V2_CMD_IDX 0
+#define _DMA_V2_CMD_BITS 6
+#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS)
+#define _DMA_V2_CHANNEL_BITS 5
+
+/* The command to set a parameter contains the PARAM field next */
+#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_PARAM_BITS 4
+
+/* Commands to read, write or init specific blocks contain these
+ three values */
+#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_SPEC_DEV_A_XB_BITS 8
+#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS)
+#define _DMA_V2_SPEC_DEV_B_XB_BITS 8
+#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS)
+#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS)
+
+/* */
+#define _DMA_V2_CMD_CTRL_IDX 4
+#define _DMA_V2_CMD_CTRL_BITS 4
+
+/* Packing setup word */
+#define _DMA_V2_CONNECTION_IDX 0
+#define _DMA_V2_CONNECTION_BITS 4
+#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS)
+#define _DMA_V2_EXTENSION_BITS 1
+
+/* Elements packing word */
+#define _DMA_V2_ELEMENTS_IDX 0
+#define _DMA_V2_ELEMENTS_BITS 8
+#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS)
+#define _DMA_V2_LEFT_CROPPING_BITS 8
+
+#define _DMA_V2_WIDTH_IDX 0
+#define _DMA_V2_WIDTH_BITS 16
+
+#define _DMA_V2_HEIGHT_IDX 0
+#define _DMA_V2_HEIGHT_BITS 16
+
+#define _DMA_V2_STRIDE_IDX 0
+#define _DMA_V2_STRIDE_BITS 32
+
+/* Command IDs */
+#define _DMA_V2_MOVE_B2A_COMMAND 0
+#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1
+#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2
+#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3
+#define _DMA_V2_MOVE_A2B_COMMAND 4
+#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5
+#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6
+#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7
+#define _DMA_V2_INIT_A_COMMAND 8
+#define _DMA_V2_INIT_A_BLOCK_COMMAND 9
+#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10
+#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11
+#define _DMA_V2_INIT_B_COMMAND 12
+#define _DMA_V2_INIT_B_BLOCK_COMMAND 13
+#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14
+#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15
+#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32
+#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33
+#define _DMA_V2_SET_CRUN_COMMAND 62
+
+/* Channel Parameter IDs */
+#define _DMA_V2_PACKING_SETUP_PARAM 0
+#define _DMA_V2_STRIDE_A_PARAM 1
+#define _DMA_V2_ELEM_CROPPING_A_PARAM 2
+#define _DMA_V2_WIDTH_A_PARAM 3
+#define _DMA_V2_STRIDE_B_PARAM 4
+#define _DMA_V2_ELEM_CROPPING_B_PARAM 5
+#define _DMA_V2_WIDTH_B_PARAM 6
+#define _DMA_V2_HEIGHT_PARAM 7
+#define _DMA_V2_QUEUED_CMDS 8
+
+/* Parameter Constants */
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+ /* SLAVE address map */
+#define _DMA_V2_SEL_FSM_CMD 0
+#define _DMA_V2_SEL_CH_REG 1
+#define _DMA_V2_SEL_CONN_GROUP 2
+#define _DMA_V2_SEL_DEV_INTERF 3
+
+#define _DMA_V2_ADDR_SEL_COMP_IDX 12
+#define _DMA_V2_ADDR_SEL_COMP_BITS 4
+#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2
+#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6
+#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define _DMA_V2_ADDR_SEL_PARAM_BITS 4
+
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4
+
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4
+
+#define _DMA_V2_FSM_GROUP_CMD_IDX 0
+#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1
+#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2
+#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7
+
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15
+
+#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3
+
+#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4
+
+#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4
+
+#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0
+#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1
+#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2
+#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3
+#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4
+#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5
+
+#endif /* _dma_v2_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h
new file mode 100644
index 000000000000..77722d205701
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gdc_v2_defs.h
@@ -0,0 +1,170 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_GDC_v2_defs_h_
+#define HRT_GDC_v2_defs_h_
+
+#define HRT_GDC_IS_V2
+
+#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */
+#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */
+
+#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */
+#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS)
+
+#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */
+#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */
+ /* The supported range is [-256, .., +256] */
+ /* in 14-bit signed notation, */
+ /* We need all ten bits (MSB must be zero). */
+ /* -s is inserted to solve this issue, and */
+ /* therefore "1" is equal to +256. */
+#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1)
+
+#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */
+ /* 2 bytes per coefficient */
+
+#define _HRT_GDC_REG_ALIGN 4
+
+ // 31 30 29 25 24 0
+ // |-----|---|--------|------------------------|
+ // | CMD | C | Reg_ID | Value |
+
+
+ // There are just two commands possible for the GDC block:
+ // 1 - Configure reg
+ // 0 - Data token
+
+ // C - Reserved bit
+ // Used in protocol to indicate whether it is C-run or other type of runs
+ // In case of C-run, this bit has a value of 1, for all the other runs, it is 0.
+
+ // Reg_ID - Address of the register to be configured
+
+ // Value - Value to store to the addressed register, maximum of 24 bits
+
+ // Configure reg command is not followed by any other token.
+ // The address of the register and the data to be filled in is contained in the same token
+
+ // When the first data token is received, it must be:
+ // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or,
+ // 2. P0'X (device configured in one of the tetragon modes)
+ // After the first data token is received, pre-defined number of tokens with the following meaning follow:
+ // 1. two tokens: SRC address ; DST address
+ // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address
+
+#define HRT_GDC_CONFIG_CMD 1
+#define HRT_GDC_DATA_CMD 0
+
+
+#define HRT_GDC_CMD_POS 31
+#define HRT_GDC_CMD_BITS 1
+#define HRT_GDC_CRUN_POS 30
+#define HRT_GDC_REG_ID_POS 25
+#define HRT_GDC_REG_ID_BITS 5
+#define HRT_GDC_DATA_POS 0
+#define HRT_GDC_DATA_BITS 25
+
+#define HRT_GDC_FRYIPXFRX_BITS 26
+#define HRT_GDC_P0X_BITS 23
+
+
+#define HRT_GDC_MAX_OXDIM (8192-64)
+#define HRT_GDC_MAX_OYDIM 4095
+#define HRT_GDC_MAX_IXDIM (8192-64)
+#define HRT_GDC_MAX_IYDIM 4095
+#define HRT_GDC_MAX_DS_FAC 16
+#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1)
+#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX
+
+
+/* GDC lookup tables entries are 10 bits values, but they're
+ stored 2 by 2 as 32 bit values, yielding 16 bits per entry.
+ A GDC lookup table contains 64 * 4 elements */
+
+#define HRT_GDC_PERF_1_1_pix 0
+#define HRT_GDC_PERF_2_1_pix 1
+#define HRT_GDC_PERF_1_2_pix 2
+#define HRT_GDC_PERF_2_2_pix 3
+
+#define HRT_GDC_NND_MODE 0
+#define HRT_GDC_BLI_MODE 1
+#define HRT_GDC_BCI_MODE 2
+#define HRT_GDC_LUT_MODE 3
+
+#define HRT_GDC_SCAN_STB 0
+#define HRT_GDC_SCAN_STR 1
+
+#define HRT_GDC_MODE_SCALING 0
+#define HRT_GDC_MODE_TETRAGON 1
+
+#define HRT_GDC_LUT_COEFF_OFFSET 16
+#define HRT_GDC_FRY_BIT_OFFSET 16
+// FRYIPXFRX is the only register where we store two values in one field,
+// to save one token in the scaling protocol.
+// Like this, we have three tokens in the scaling protocol,
+// Otherwise, we would have had four.
+// The register bit-map is:
+// 31 26 25 16 15 10 9 0
+// |------|----------|------|----------|
+// | XXXX | FRY | IPX | FRX |
+
+
+#define HRT_GDC_CE_FSM0_POS 0
+#define HRT_GDC_CE_FSM0_LEN 2
+#define HRT_GDC_CE_OPY_POS 2
+#define HRT_GDC_CE_OPY_LEN 14
+#define HRT_GDC_CE_OPX_POS 16
+#define HRT_GDC_CE_OPX_LEN 16
+// CHK_ENGINE register bit-map:
+// 31 16 15 2 1 0
+// |----------------|-----------|----|
+// | OPX | OPY |FSM0|
+// However, for the time being at least,
+// this implementation is meaningless in hss model,
+// So, we just return 0
+
+
+#define HRT_GDC_CHK_ENGINE_IDX 0
+#define HRT_GDC_WOIX_IDX 1
+#define HRT_GDC_WOIY_IDX 2
+#define HRT_GDC_BPP_IDX 3
+#define HRT_GDC_FRYIPXFRX_IDX 4
+#define HRT_GDC_OXDIM_IDX 5
+#define HRT_GDC_OYDIM_IDX 6
+#define HRT_GDC_SRC_ADDR_IDX 7
+#define HRT_GDC_SRC_END_ADDR_IDX 8
+#define HRT_GDC_SRC_WRAP_ADDR_IDX 9
+#define HRT_GDC_SRC_STRIDE_IDX 10
+#define HRT_GDC_DST_ADDR_IDX 11
+#define HRT_GDC_DST_STRIDE_IDX 12
+#define HRT_GDC_DX_IDX 13
+#define HRT_GDC_DY_IDX 14
+#define HRT_GDC_P0X_IDX 15
+#define HRT_GDC_P0Y_IDX 16
+#define HRT_GDC_P1X_IDX 17
+#define HRT_GDC_P1Y_IDX 18
+#define HRT_GDC_P2X_IDX 19
+#define HRT_GDC_P2Y_IDX 20
+#define HRT_GDC_P3X_IDX 21
+#define HRT_GDC_P3Y_IDX 22
+#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc
+#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT
+#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right)
+#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon
+
+#define HRT_GDC_LUT_IDX 32
+
+
+#endif /* HRT_GDC_v2_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_regs_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_regs_defs.h
new file mode 100644
index 000000000000..34e734f6648e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_regs_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_regs_defs_h
+#define _gp_regs_defs_h
+
+#define _HRT_GP_REGS_IS_FWD_REG_IDX 0
+
+#define _HRT_GP_REGS_REG_ALIGN 4
+
+#endif /* _gp_regs_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h
new file mode 100644
index 000000000000..3082e2f5e014
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gp_timer_defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_timer_defs_h
+#define _gp_timer_defs_h
+
+#define _HRT_GP_TIMER_REG_ALIGN 4
+
+#define HIVE_GP_TIMER_RESET_REG_IDX 0
+#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1
+#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer)
+#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer)
+#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq)
+
+#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0
+#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1
+#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define HIVE_GP_TIMER_COUNT_TYPES 4
+
+#endif /* _gp_timer_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h
new file mode 100644
index 000000000000..a807d4c99041
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/gpio_block_defs.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gpio_block_defs_h_
+#define _gpio_block_defs_h_
+
+#define _HRT_GPIO_BLOCK_REG_ALIGN 4
+
+/* R/W registers */
+#define _gpio_block_reg_do_e 0
+#define _gpio_block_reg_do_select 1
+#define _gpio_block_reg_do_0 2
+#define _gpio_block_reg_do_1 3
+#define _gpio_block_reg_do_pwm_cnt_0 4
+#define _gpio_block_reg_do_pwm_cnt_1 5
+#define _gpio_block_reg_do_pwm_cnt_2 6
+#define _gpio_block_reg_do_pwm_cnt_3 7
+#define _gpio_block_reg_do_pwm_main_cnt 8
+#define _gpio_block_reg_do_pwm_enable 9
+#define _gpio_block_reg_di_debounce_sel 10
+#define _gpio_block_reg_di_debounce_cnt_0 11
+#define _gpio_block_reg_di_debounce_cnt_1 12
+#define _gpio_block_reg_di_debounce_cnt_2 13
+#define _gpio_block_reg_di_debounce_cnt_3 14
+#define _gpio_block_reg_di_active_level 15
+
+
+/* read-only registers */
+#define _gpio_block_reg_di 16
+
+#endif /* _gpio_block_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h
new file mode 100644
index 000000000000..39584996092e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_defs.h
@@ -0,0 +1,416 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_defs_h__
+#define _hive_isp_css_defs_h__
+
+#define HIVE_ISP_CSS_IS_2400B0_SYSTEM
+
+#define HIVE_ISP_CTRL_DATA_WIDTH 32
+#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32
+#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1
+#define HIVE_ISP_DDR_ADDRESS_WIDTH 36
+
+#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */
+#define HIVE_ISP_NUM_GPIO_PINS 12
+
+/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer
+ and in the DMA parameter list */
+#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}}
+#define HIVE_ISP_DDR_WORD_BITS 256
+#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8)
+#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024) /* hss only */
+#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024) /* RTL only */
+#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8)
+#define HIVE_ISP_PAGE_SHIFT 12
+#define HIVE_ISP_PAGE_SIZE (1<<HIVE_ISP_PAGE_SHIFT)
+
+#define CSS_DDR_WORD_BITS HIVE_ISP_DDR_WORD_BITS
+#define CSS_DDR_WORD_BYTES HIVE_ISP_DDR_WORD_BYTES
+
+/* If HIVE_ISP_DDR_BASE_OFFSET is set to a non-zero value, the wide bus just before the DDRAM gets an extra dummy port where */
+/* address range 0 .. HIVE_ISP_DDR_BASE_OFFSET-1 maps onto. This effectively creates an offset for the DDRAM from system perspective */
+#define HIVE_ISP_DDR_BASE_OFFSET 0x120000000 /* 0x200000 */
+
+#define HIVE_DMA_ISP_BUS_CONN 0
+#define HIVE_DMA_ISP_DDR_CONN 1
+#define HIVE_DMA_BUS_DDR_CONN 2
+#define HIVE_DMA_ISP_MASTER master_port0
+#define HIVE_DMA_BUS_MASTER master_port1
+#define HIVE_DMA_DDR_MASTER master_port2
+
+#define HIVE_DMA_NUM_CHANNELS 32 /* old value was 8 */
+#define HIVE_DMA_CMD_FIFO_DEPTH 24 /* old value was 12 */
+
+#define HIVE_IF_PIXEL_WIDTH 12
+
+#define HIVE_MMU_TLB_SETS 8
+#define HIVE_MMU_TLB_SET_BLOCKS 8
+#define HIVE_MMU_TLB_BLOCK_ELEMENTS 8
+#define HIVE_MMU_PAGE_TABLE_LEVELS 2
+#define HIVE_MMU_PAGE_BYTES HIVE_ISP_PAGE_SIZE
+
+#define HIVE_ISP_CH_ID_BITS 2
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#define HIVE_ISP_ISEL_SEL_BITS 2
+
+#define HIVE_GP_REGS_SDRAM_WAKEUP_IDX 0
+#define HIVE_GP_REGS_IDLE_IDX 1
+#define HIVE_GP_REGS_IRQ_0_IDX 2
+#define HIVE_GP_REGS_IRQ_1_IDX 3
+#define HIVE_GP_REGS_SP_STREAM_STAT_IDX 4
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IDX 5
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IDX 6
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IDX 7
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_COND_IDX 8
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_COND_IDX 9
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_COND_IDX 10
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_COND_IDX 11
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_ENABLE_IDX 12
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_ENABLE_IDX 13
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_ENABLE_IDX 14
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_ENABLE_IDX 15
+#define HIVE_GP_REGS_SWITCH_PRIM_IF_IDX 16
+#define HIVE_GP_REGS_SWITCH_GDC1_IDX 17
+#define HIVE_GP_REGS_SWITCH_GDC2_IDX 18
+#define HIVE_GP_REGS_SRST_IDX 19
+#define HIVE_GP_REGS_SLV_REG_SRST_IDX 20
+#define HIVE_GP_REGS_VISA_REG_IDX 21
+
+/* Bit numbers of the soft reset register */
+#define HIVE_GP_REGS_SRST_ISYS_CBUS 0
+#define HIVE_GP_REGS_SRST_ISEL_CBUS 1
+#define HIVE_GP_REGS_SRST_IFMT_CBUS 2
+#define HIVE_GP_REGS_SRST_GPDEV_CBUS 3
+#define HIVE_GP_REGS_SRST_GPIO 4
+#define HIVE_GP_REGS_SRST_TC 5
+#define HIVE_GP_REGS_SRST_GPTIMER 6
+#define HIVE_GP_REGS_SRST_FACELLFIFOS 7
+#define HIVE_GP_REGS_SRST_D_OSYS 8
+#define HIVE_GP_REGS_SRST_IFT_SEC_PIPE 9
+#define HIVE_GP_REGS_SRST_GDC1 10
+#define HIVE_GP_REGS_SRST_GDC2 11
+#define HIVE_GP_REGS_SRST_VEC_BUS 12
+#define HIVE_GP_REGS_SRST_ISP 13
+#define HIVE_GP_REGS_SRST_SLV_GRP_BUS 14
+#define HIVE_GP_REGS_SRST_DMA 15
+#define HIVE_GP_REGS_SRST_SF_ISP_SP 16
+#define HIVE_GP_REGS_SRST_SF_PIF_CELLS 17
+#define HIVE_GP_REGS_SRST_SF_SIF_SP 18
+#define HIVE_GP_REGS_SRST_SF_MC_SP 19
+#define HIVE_GP_REGS_SRST_SF_ISYS_SP 20
+#define HIVE_GP_REGS_SRST_SF_DMA_CELLS 21
+#define HIVE_GP_REGS_SRST_SF_GDC1_CELLS 22
+#define HIVE_GP_REGS_SRST_SF_GDC2_CELLS 23
+#define HIVE_GP_REGS_SRST_SP 24
+#define HIVE_GP_REGS_SRST_OCP2CIO 25
+#define HIVE_GP_REGS_SRST_NBUS 26
+#define HIVE_GP_REGS_SRST_HOST12BUS 27
+#define HIVE_GP_REGS_SRST_WBUS 28
+#define HIVE_GP_REGS_SRST_IC_OSYS 29
+#define HIVE_GP_REGS_SRST_WBUS_IC 30
+
+/* Bit numbers of the slave register soft reset register */
+#define HIVE_GP_REGS_SLV_REG_SRST_DMA 0
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC1 1
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC2 2
+
+/* order of the input bits for the irq controller */
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_IRQ_SP_BIT_ID 12
+#define HIVE_GP_DEV_IRQ_ISP_BIT_ID 13
+#define HIVE_GP_DEV_IRQ_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_IRQ_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_IRQ_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID 17
+#define HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID 18
+#define HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID 19
+#define HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID 20
+#define HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID 21
+#define HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID 22
+#define HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID 23
+#define HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID 24
+#define HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID 25
+#define HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID 26
+#define HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID 27
+#define HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID 28
+#define HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID 29
+#define HIVE_GP_DEV_IRQ_DMA_BIT_ID 30
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID 31
+
+#define HIVE_GP_REGS_NUM_SW_IRQ_REGS 2
+
+/* order of the input bits for the timed controller */
+#define HIVE_GP_DEV_TC_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_TC_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_TC_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_TC_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_TC_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_TC_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_TC_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_TC_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_TC_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_TC_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_TC_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_TC_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_TC_SP_BIT_ID 12
+#define HIVE_GP_DEV_TC_ISP_BIT_ID 13
+#define HIVE_GP_DEV_TC_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_TC_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_TC_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_TC_GP_TIMER_0_BIT_ID 17
+#define HIVE_GP_DEV_TC_GP_TIMER_1_BIT_ID 18
+#define HIVE_GP_DEV_TC_MIPI_SOL_BIT_ID 19
+#define HIVE_GP_DEV_TC_MIPI_EOL_BIT_ID 20
+#define HIVE_GP_DEV_TC_MIPI_SOF_BIT_ID 21
+#define HIVE_GP_DEV_TC_MIPI_EOF_BIT_ID 22
+#define HIVE_GP_DEV_TC_INPSYS_SM 23
+
+/* definitions for the gp_timer block */
+#define HIVE_GP_TIMER_0 0
+#define HIVE_GP_TIMER_1 1
+#define HIVE_GP_TIMER_2 2
+#define HIVE_GP_TIMER_3 3
+#define HIVE_GP_TIMER_4 4
+#define HIVE_GP_TIMER_5 5
+#define HIVE_GP_TIMER_6 6
+#define HIVE_GP_TIMER_7 7
+#define HIVE_GP_TIMER_NUM_COUNTERS 8
+
+#define HIVE_GP_TIMER_IRQ_0 0
+#define HIVE_GP_TIMER_IRQ_1 1
+#define HIVE_GP_TIMER_NUM_IRQS 2
+
+#define HIVE_GP_TIMER_GPIO_0_BIT_ID 0
+#define HIVE_GP_TIMER_GPIO_1_BIT_ID 1
+#define HIVE_GP_TIMER_GPIO_2_BIT_ID 2
+#define HIVE_GP_TIMER_GPIO_3_BIT_ID 3
+#define HIVE_GP_TIMER_GPIO_4_BIT_ID 4
+#define HIVE_GP_TIMER_GPIO_5_BIT_ID 5
+#define HIVE_GP_TIMER_GPIO_6_BIT_ID 6
+#define HIVE_GP_TIMER_GPIO_7_BIT_ID 7
+#define HIVE_GP_TIMER_GPIO_8_BIT_ID 8
+#define HIVE_GP_TIMER_GPIO_9_BIT_ID 9
+#define HIVE_GP_TIMER_GPIO_10_BIT_ID 10
+#define HIVE_GP_TIMER_GPIO_11_BIT_ID 11
+#define HIVE_GP_TIMER_INP_SYS_IRQ 12
+#define HIVE_GP_TIMER_ISEL_IRQ 13
+#define HIVE_GP_TIMER_IFMT_IRQ 14
+#define HIVE_GP_TIMER_SP_STRMON_IRQ 15
+#define HIVE_GP_TIMER_SP_B_STRMON_IRQ 16
+#define HIVE_GP_TIMER_ISP_STRMON_IRQ 17
+#define HIVE_GP_TIMER_MOD_STRMON_IRQ 18
+#define HIVE_GP_TIMER_ISP_PMEM_ERROR_IRQ 19
+#define HIVE_GP_TIMER_ISP_BAMEM_ERROR_IRQ 20
+#define HIVE_GP_TIMER_ISP_DMEM_ERROR_IRQ 21
+#define HIVE_GP_TIMER_SP_ICACHE_MEM_ERROR_IRQ 22
+#define HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ 23
+#define HIVE_GP_TIMER_SP_OUT_RUN_DP 24
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 25
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 26
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I2 27
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I3 28
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I4 29
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I5 30
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I6 31
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I7 32
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I8 33
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I9 34
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I10 35
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 36
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 37
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 38
+#define HIVE_GP_TIMER_ISP_OUT_RUN_DP 39
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 40
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 41
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 42
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 43
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I1 44
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I2 45
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I3 46
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I4 47
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I5 48
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I6 49
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 50
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I4_I0 51
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I5_I0 52
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I6_I0 53
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I7_I0 54
+#define HIVE_GP_TIMER_MIPI_SOL_BIT_ID 55
+#define HIVE_GP_TIMER_MIPI_EOL_BIT_ID 56
+#define HIVE_GP_TIMER_MIPI_SOF_BIT_ID 57
+#define HIVE_GP_TIMER_MIPI_EOF_BIT_ID 58
+#define HIVE_GP_TIMER_INPSYS_SM 59
+
+/* port definitions for the streaming monitors */
+/* port definititions SP streaming monitor, monitors the status of streaming ports at the SP side of the streaming FIFO's */
+#define SP_STR_MON_PORT_SP2SIF 0
+#define SP_STR_MON_PORT_SIF2SP 1
+#define SP_STR_MON_PORT_SP2MC 2
+#define SP_STR_MON_PORT_MC2SP 3
+#define SP_STR_MON_PORT_SP2DMA 4
+#define SP_STR_MON_PORT_DMA2SP 5
+#define SP_STR_MON_PORT_SP2ISP 6
+#define SP_STR_MON_PORT_ISP2SP 7
+#define SP_STR_MON_PORT_SP2GPD 8
+#define SP_STR_MON_PORT_FA2SP 9
+#define SP_STR_MON_PORT_SP2ISYS 10
+#define SP_STR_MON_PORT_ISYS2SP 11
+#define SP_STR_MON_PORT_SP2PIFA 12
+#define SP_STR_MON_PORT_PIFA2SP 13
+#define SP_STR_MON_PORT_SP2PIFB 14
+#define SP_STR_MON_PORT_PIFB2SP 15
+
+#define SP_STR_MON_PORT_B_SP2GDC1 0
+#define SP_STR_MON_PORT_B_GDC12SP 1
+#define SP_STR_MON_PORT_B_SP2GDC2 2
+#define SP_STR_MON_PORT_B_GDC22SP 3
+
+/* previously used SP streaming monitor port identifiers, kept for backward compatibility */
+#define SP_STR_MON_PORT_SND_SIF SP_STR_MON_PORT_SP2SIF
+#define SP_STR_MON_PORT_RCV_SIF SP_STR_MON_PORT_SIF2SP
+#define SP_STR_MON_PORT_SND_MC SP_STR_MON_PORT_SP2MC
+#define SP_STR_MON_PORT_RCV_MC SP_STR_MON_PORT_MC2SP
+#define SP_STR_MON_PORT_SND_DMA SP_STR_MON_PORT_SP2DMA
+#define SP_STR_MON_PORT_RCV_DMA SP_STR_MON_PORT_DMA2SP
+#define SP_STR_MON_PORT_SND_ISP SP_STR_MON_PORT_SP2ISP
+#define SP_STR_MON_PORT_RCV_ISP SP_STR_MON_PORT_ISP2SP
+#define SP_STR_MON_PORT_SND_GPD SP_STR_MON_PORT_SP2GPD
+#define SP_STR_MON_PORT_RCV_GPD SP_STR_MON_PORT_FA2SP
+/* Deprecated */
+#define SP_STR_MON_PORT_SND_PIF SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIFB SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIFB SP_STR_MON_PORT_PIFB2SP
+
+#define SP_STR_MON_PORT_SND_PIF_A SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF_A SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIF_B SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIF_B SP_STR_MON_PORT_PIFB2SP
+
+/* port definititions ISP streaming monitor, monitors the status of streaming ports at the ISP side of the streaming FIFO's */
+#define ISP_STR_MON_PORT_ISP2PIFA 0
+#define ISP_STR_MON_PORT_PIFA2ISP 1
+#define ISP_STR_MON_PORT_ISP2PIFB 2
+#define ISP_STR_MON_PORT_PIFB2ISP 3
+#define ISP_STR_MON_PORT_ISP2DMA 4
+#define ISP_STR_MON_PORT_DMA2ISP 5
+#define ISP_STR_MON_PORT_ISP2GDC1 6
+#define ISP_STR_MON_PORT_GDC12ISP 7
+#define ISP_STR_MON_PORT_ISP2GDC2 8
+#define ISP_STR_MON_PORT_GDC22ISP 9
+#define ISP_STR_MON_PORT_ISP2GPD 10
+#define ISP_STR_MON_PORT_FA2ISP 11
+#define ISP_STR_MON_PORT_ISP2SP 12
+#define ISP_STR_MON_PORT_SP2ISP 13
+
+/* previously used ISP streaming monitor port identifiers, kept for backward compatibility */
+#define ISP_STR_MON_PORT_SND_PIF_A ISP_STR_MON_PORT_ISP2PIFA
+#define ISP_STR_MON_PORT_RCV_PIF_A ISP_STR_MON_PORT_PIFA2ISP
+#define ISP_STR_MON_PORT_SND_PIF_B ISP_STR_MON_PORT_ISP2PIFB
+#define ISP_STR_MON_PORT_RCV_PIF_B ISP_STR_MON_PORT_PIFB2ISP
+#define ISP_STR_MON_PORT_SND_DMA ISP_STR_MON_PORT_ISP2DMA
+#define ISP_STR_MON_PORT_RCV_DMA ISP_STR_MON_PORT_DMA2ISP
+#define ISP_STR_MON_PORT_SND_GDC ISP_STR_MON_PORT_ISP2GDC1
+#define ISP_STR_MON_PORT_RCV_GDC ISP_STR_MON_PORT_GDC12ISP
+#define ISP_STR_MON_PORT_SND_GPD ISP_STR_MON_PORT_ISP2GPD
+#define ISP_STR_MON_PORT_RCV_GPD ISP_STR_MON_PORT_FA2ISP
+#define ISP_STR_MON_PORT_SND_SP ISP_STR_MON_PORT_ISP2SP
+#define ISP_STR_MON_PORT_RCV_SP ISP_STR_MON_PORT_SP2ISP
+
+/* port definititions MOD streaming monitor, monitors the status of streaming ports at the module side of the streaming FIFO's */
+
+#define MOD_STR_MON_PORT_PIFA2CELLS 0
+#define MOD_STR_MON_PORT_CELLS2PIFA 1
+#define MOD_STR_MON_PORT_PIFB2CELLS 2
+#define MOD_STR_MON_PORT_CELLS2PIFB 3
+#define MOD_STR_MON_PORT_SIF2SP 4
+#define MOD_STR_MON_PORT_SP2SIF 5
+#define MOD_STR_MON_PORT_MC2SP 6
+#define MOD_STR_MON_PORT_SP2MC 7
+#define MOD_STR_MON_PORT_DMA2ISP 8
+#define MOD_STR_MON_PORT_ISP2DMA 9
+#define MOD_STR_MON_PORT_DMA2SP 10
+#define MOD_STR_MON_PORT_SP2DMA 11
+#define MOD_STR_MON_PORT_GDC12CELLS 12
+#define MOD_STR_MON_PORT_CELLS2GDC1 13
+#define MOD_STR_MON_PORT_GDC22CELLS 14
+#define MOD_STR_MON_PORT_CELLS2GDC2 15
+
+#define MOD_STR_MON_PORT_SND_PIF_A 0
+#define MOD_STR_MON_PORT_RCV_PIF_A 1
+#define MOD_STR_MON_PORT_SND_PIF_B 2
+#define MOD_STR_MON_PORT_RCV_PIF_B 3
+#define MOD_STR_MON_PORT_SND_SIF 4
+#define MOD_STR_MON_PORT_RCV_SIF 5
+#define MOD_STR_MON_PORT_SND_MC 6
+#define MOD_STR_MON_PORT_RCV_MC 7
+#define MOD_STR_MON_PORT_SND_DMA2ISP 8
+#define MOD_STR_MON_PORT_RCV_DMA_FR_ISP 9
+#define MOD_STR_MON_PORT_SND_DMA2SP 10
+#define MOD_STR_MON_PORT_RCV_DMA_FR_SP 11
+#define MOD_STR_MON_PORT_SND_GDC 12
+#define MOD_STR_MON_PORT_RCV_GDC 13
+
+
+/* testbench signals: */
+
+/* testbench GP adapter register ids */
+#define HIVE_TESTBENCH_GPIO_DATA_OUT_REG_IDX 0
+#define HIVE_TESTBENCH_GPIO_DIR_OUT_REG_IDX 1
+#define HIVE_TESTBENCH_IRQ_REG_IDX 2
+#define HIVE_TESTBENCH_SDRAM_WAKEUP_REG_IDX 3
+#define HIVE_TESTBENCH_IDLE_REG_IDX 4
+#define HIVE_TESTBENCH_GPIO_DATA_IN_REG_IDX 5
+#define HIVE_TESTBENCH_MIPI_BFM_EN_REG_IDX 6
+#define HIVE_TESTBENCH_CSI_CONFIG_REG_IDX 7
+#define HIVE_TESTBENCH_DDR_STALL_EN_REG_IDX 8
+
+#define HIVE_TESTBENCH_ISP_PMEM_ERROR_IRQ_REG_IDX 9
+#define HIVE_TESTBENCH_ISP_BAMEM_ERROR_IRQ_REG_IDX 10
+#define HIVE_TESTBENCH_ISP_DMEM_ERROR_IRQ_REG_IDX 11
+#define HIVE_TESTBENCH_SP_ICACHE_MEM_ERROR_IRQ_REG_IDX 12
+#define HIVE_TESTBENCH_SP_DMEM_ERROR_IRQ_REG_IDX 13
+
+/* Signal monitor input bit ids */
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_O_BIT_ID 0
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TESTBENCH_SIG_MON_IRQ_PIN_BIT_ID 12
+#define HIVE_TESTBENCH_SIG_MON_SDRAM_WAKEUP_PIN_BIT_ID 13
+#define HIVE_TESTBENCH_SIG_MON_IDLE_PIN_BIT_ID 14
+
+#define ISP2400_DEBUG_NETWORK 1
+
+#endif /* _hive_isp_css_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_host_ids_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_host_ids_hrt.h
new file mode 100644
index 000000000000..f4d033e221cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_host_ids_hrt.h
@@ -0,0 +1,84 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_host_ids_hrt_h_
+#define _hive_isp_css_host_ids_hrt_h_
+
+/* ISP_CSS identifiers */
+#define INP_SYS testbench_isp_inp_sys
+#define ISYS_GP_REGS testbench_isp_inp_sys_gpreg
+#define ISYS_IRQ_CTRL testbench_isp_inp_sys_irq_ctrl
+#define ISYS_CAP_A testbench_isp_inp_sys_capt_unit_a
+#define ISYS_CAP_B testbench_isp_inp_sys_capt_unit_b
+#define ISYS_CAP_C testbench_isp_inp_sys_capt_unit_c
+#define ISYS_INP_BUF testbench_isp_inp_sys_input_buffer
+#define ISYS_INP_CTRL testbench_isp_inp_sys_inp_ctrl
+#define ISYS_ACQ testbench_isp_inp_sys_acq_unit
+
+#define ISP testbench_isp_isp
+#define SP testbench_isp_scp
+
+#define IF_PRIM testbench_isp_ifmt_ift_prim
+#define IF_PRIM_B testbench_isp_ifmt_ift_prim_b
+#define IF_SEC testbench_isp_ifmt_ift_sec
+#define IF_SEC_MASTER testbench_isp_ifmt_ift_sec_mt_out
+#define STR_TO_MEM testbench_isp_ifmt_mem_cpy
+#define IFMT_GP_REGS testbench_isp_ifmt_gp_reg
+#define IFMT_IRQ_CTRL testbench_isp_ifmt_irq_ctrl
+
+#define CSS_RECEIVER testbench_isp_inp_sys_csi_receiver
+
+#define TC testbench_isp_gpd_tc
+#define GPTIMER testbench_isp_gpd_gptimer
+#define DMA testbench_isp_isp_dma
+#define GDC testbench_isp_gdc1
+#define GDC2 testbench_isp_gdc2
+#define IRQ_CTRL testbench_isp_gpd_irq_ctrl
+#define GPIO testbench_isp_gpd_c_gpio
+#define GP_REGS testbench_isp_gpd_gp_reg
+#define ISEL_GP_REGS testbench_isp_isel_gpr
+#define ISEL_IRQ_CTRL testbench_isp_isel_irq_ctrl
+#define DATA_MMU testbench_isp_data_out_sys_c_mmu
+#define ICACHE_MMU testbench_isp_icache_out_sys_c_mmu
+
+/* next is actually not FIFO but FIFO adapter, or slave to streaming adapter */
+#define ISP_SP_FIFO testbench_isp_fa_sp_isp
+#define ISEL_FIFO testbench_isp_isel_sf_fa_in
+
+#define FIFO_GPF_SP testbench_isp_sf_fa2sp_in
+#define FIFO_GPF_ISP testbench_isp_sf_fa2isp_in
+#define FIFO_SP_GPF testbench_isp_sf_sp2fa_in
+#define FIFO_ISP_GPF testbench_isp_sf_isp2fa_in
+
+#define DATA_OCP_MASTER testbench_isp_data_out_sys_cio2ocp_wide_data_out_mt
+#define ICACHE_OCP_MASTER testbench_isp_icache_out_sys_cio2ocp_wide_data_out_mt
+
+#define SP_IN_FIFO testbench_isp_sf_fa2sp_in
+#define SP_OUT_FIFO testbench_isp_sf_sp2fa_out
+#define ISP_IN_FIFO testbench_isp_sf_fa2isp_in
+#define ISP_OUT_FIFO testbench_isp_sf_isp2fa_out
+#define GEN_SHORT_PACK_PORT testbench_isp_inp_sys_csi_str_mon_fa_gensh_out
+#define ISYS_GP_REGS testbench_isp_inp_sys_gpreg
+
+/* Testbench identifiers */
+#define DDR testbench_ddram
+#define DDR_SMALL testbench_ddram_small
+#define XMEM DDR
+#define GPIO_ADAPTER testbench_gp_adapter
+#define SIG_MONITOR testbench_sig_mon
+#define DDR_SLAVE testbench_ddram_ip0
+#define DDR_SMALL_SLAVE testbench_ddram_small_ip0
+#define HOST_MASTER host_op0
+
+#endif /* _hive_isp_css_host_ids_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h
new file mode 100644
index 000000000000..04c237083835
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_irq_types_hrt.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID ,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID ,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID ,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID ,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID ,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID ,
+#ifdef _HIVE_ISP_CSS_2401_SYSTEM
+ hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID ,
+#else
+ hrt_isp_css_irq_isp_pmem_error = HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID ,
+#endif
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID ,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
new file mode 100644
index 000000000000..b4211a0c631a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_
+#define _hive_isp_css_streaming_to_mipi_types_hrt_h_
+
+#include <streaming_to_mipi_defs.h>
+
+#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1)
+#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1)
+
+#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS)
+#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH)
+
+#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h
new file mode 100644
index 000000000000..58b0e6effbd0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/hive_types.h
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_HIVE_TYPES_H
+#define _HRT_HIVE_TYPES_H
+
+#include "version.h"
+#include "defs.h"
+
+#ifndef HRTCAT3
+#define _HRTCAT3(m,n,o) m##n##o
+#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o)
+#endif
+
+#ifndef HRTCAT4
+#define _HRTCAT4(m,n,o,p) m##n##o##p
+#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+/* boolean data type */
+typedef unsigned int hive_bool;
+#define hive_false 0
+#define hive_true 1
+
+typedef char hive_int8;
+typedef short hive_int16;
+typedef int hive_int32;
+typedef long long hive_int64;
+
+typedef unsigned char hive_uint8;
+typedef unsigned short hive_uint16;
+typedef unsigned int hive_uint32;
+typedef unsigned long long hive_uint64;
+
+/* by default assume 32 bit master port (both data and address) */
+#ifndef HRT_DATA_WIDTH
+#define HRT_DATA_WIDTH 32
+#endif
+#ifndef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 32
+#endif
+
+#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8)
+#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8)
+
+#if HRT_DATA_WIDTH == 64
+typedef hive_uint64 hrt_data;
+#elif HRT_DATA_WIDTH == 32
+typedef hive_uint32 hrt_data;
+#else
+#error data width not supported
+#endif
+
+#if HRT_ADDRESS_WIDTH == 64
+typedef hive_uint64 hrt_address;
+#elif HRT_ADDRESS_WIDTH == 32
+typedef hive_uint32 hrt_address;
+#else
+#error adddres width not supported
+#endif
+
+/* The SP side representation of an HMM virtual address */
+typedef hive_uint32 hrt_vaddress;
+
+/* use 64 bit addresses in simulation, where possible */
+typedef hive_uint64 hive_sim_address;
+
+/* below is for csim, not for hrt, rename and move this elsewhere */
+
+typedef unsigned int hive_uint;
+typedef hive_uint32 hive_address;
+typedef hive_address hive_slave_address;
+typedef hive_address hive_mem_address;
+
+/* MMIO devices */
+typedef hive_uint hive_mmio_id;
+typedef hive_mmio_id hive_slave_id;
+typedef hive_mmio_id hive_port_id;
+typedef hive_mmio_id hive_master_id;
+typedef hive_mmio_id hive_mem_id;
+typedef hive_mmio_id hive_dev_id;
+typedef hive_mmio_id hive_fifo_id;
+
+typedef hive_uint hive_hier_id;
+typedef hive_hier_id hive_device_id;
+typedef hive_device_id hive_proc_id;
+typedef hive_device_id hive_cell_id;
+typedef hive_device_id hive_host_id;
+typedef hive_device_id hive_bus_id;
+typedef hive_device_id hive_bridge_id;
+typedef hive_device_id hive_fifo_adapter_id;
+typedef hive_device_id hive_custom_device_id;
+
+typedef hive_uint hive_slot_id;
+typedef hive_uint hive_fu_id;
+typedef hive_uint hive_reg_file_id;
+typedef hive_uint hive_reg_id;
+
+/* Streaming devices */
+typedef hive_uint hive_outport_id;
+typedef hive_uint hive_inport_id;
+
+typedef hive_uint hive_msink_id;
+
+/* HRT specific */
+typedef char* hive_program;
+typedef char* hive_function;
+
+#endif /* _HRT_HIVE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h
new file mode 100644
index 000000000000..7d39e45796ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/if_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IF_DEFS_H
+#define _IF_DEFS_H
+
+#define HIVE_IF_FRAME_REQUEST 0xA000
+#define HIVE_IF_LINES_REQUEST 0xB000
+#define HIVE_IF_VECTORS_REQUEST 0xC000
+
+#endif /* _IF_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
new file mode 100644
index 000000000000..16bfe1d80bc9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _if_subsystem_defs_h
+#define _if_subsystem_defs_h__
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8
+#define HIVE_IFMT_GP_REGS_SRST_IDX 9
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10
+
+#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0
+
+/* order of the input bits for the ifmt irq controller */
+#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0
+#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1
+#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2
+#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3
+#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3
+
+#endif /* _if_subsystem_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h
new file mode 100644
index 000000000000..87fbf82edb5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_selector_defs.h
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_selector_defs_h
+#define _input_selector_defs_h
+
+#ifndef HIVE_ISP_ISEL_SEL_BITS
+#define HIVE_ISP_ISEL_SEL_BITS 2
+#endif
+
+#ifndef HIVE_ISP_CH_ID_BITS
+#define HIVE_ISP_CH_ID_BITS 2
+#endif
+
+#ifndef HIVE_ISP_FMT_TYPE_BITS
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#endif
+
+/* gp_register register id's -- Outputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1
+#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7
+
+#define HIVE_ISEL_GP_REGS_SOF_IDX 8
+#define HIVE_ISEL_GP_REGS_EOF_IDX 9
+#define HIVE_ISEL_GP_REGS_SOL_IDX 10
+#define HIVE_ISEL_GP_REGS_EOL_IDX 11
+
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13
+#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14
+
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18
+#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21
+#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22
+#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23
+#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24
+#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25
+#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26
+#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27
+#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28
+
+
+#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29
+#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30
+#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31
+#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32
+#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33
+#define HIVE_ISEL_GP_REGS_SRST_IDX 37
+
+#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0
+#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1
+#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2
+#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3
+
+/* gp_register register id's -- Inputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36
+
+/* irq sources isel irq controller */
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3
+#define HIVE_ISEL_IRQ_NUM_IRQS 4
+
+#endif /* _input_selector_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h
new file mode 100644
index 000000000000..20a13c4cdb56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_switch_2400_defs.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_switch_2400_defs_h
+#define _input_switch_2400_defs_h
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16))
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2)
+
+#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3
+#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4
+
+#endif /* _input_switch_2400_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h
new file mode 100644
index 000000000000..a7f0ca80bc9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_ctrl_defs.h
@@ -0,0 +1,254 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_ctrl_defs_h
+#define _input_system_ctrl_defs_h
+
+#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */
+
+/* --------------------------------------------------*/
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define ISYS_CTRL_NOF_REGS 23
+
+// Register id's of MMIO slave accesible registers
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8
+#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11
+#define ISYS_CTRL_INIT_REG_ID 12
+#define ISYS_CTRL_LAST_COMMAND_REG_ID 13
+#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16
+#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22
+
+
+/* register reset value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3
+#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ISYS_CTRL_INIT_REG_RSTVAL 0
+#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0
+
+/* register width value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ISYS_CTRL_INIT_REG_WIDTH 3
+#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */
+#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1
+
+/* bit definitions */
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+
+/*
+InpSysCaptFramesAcq 1/0 [3:0] - 'b0000
+[7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB-'b0001
+ CaptC-'b0010
+[31:16] - NOF_frames
+InpSysCaptFrameExt 2/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+ 2/1 [31:0] - external capture address
+InpSysAcqFrame 2/0 [3:0] - 'b0010,
+[31:4] - NOF_ext_mem_words
+ 2/1 [31:0] - external memory read start address
+InpSysOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleCmd 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - command token value for port opid
+
+
+acknowledge tokens:
+
+InpSysAckCFA 1/0 [3:0] - 'b0000
+ [7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB- 'b0001
+ CaptC-'b0010
+ [31:16] - NOF_frames
+InpSysAckCFE 1/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+InpSysAckAF 1/0 [3:0] - 'b0010
+InpSysAckOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverrule 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - acknowledge token value from port opid
+
+
+
+*/
+
+
+/* Command and acknowledge tokens IDs */
+#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */
+#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */
+#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */
+#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */
+#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */
+#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */
+
+#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0
+#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1
+#define ISYS_CTRL_ACK_AF_TOKEN_ID 2
+#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3
+#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4
+#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5
+#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6
+
+#define ISYS_CTRL_TOKEN_ID_MSB 3
+#define ISYS_CTRL_TOKEN_ID_LSB 0
+#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7
+#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4
+#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16
+#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8
+
+#define ISYS_CTRL_TOKEN_ID_IDX 0
+#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1)
+#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS)
+#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1)
+#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB
+#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB
+#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1)
+
+#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */
+#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */
+#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */
+#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */
+
+#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */
+#define ISYS_CTRL_NO_DMA_ACK 0
+#define ISYS_CTRL_NO_CAPT_ACK 16
+
+#endif /* _input_system_ctrl_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h
new file mode 100644
index 000000000000..ae62163034a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_system_defs.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_defs_h
+#define _input_system_defs_h
+
+/* csi controller modes */
+#define HIVE_CSI_CONFIG_MAIN 0
+#define HIVE_CSI_CONFIG_STEREO1 4
+#define HIVE_CSI_CONFIG_STEREO2 8
+
+/* general purpose register IDs */
+
+/* Stream Multicast select modes */
+#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0
+#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1
+#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2
+
+/* Stream Mux select modes */
+#define HIVE_ISYS_GPREG_MUX_IDX 3
+
+/* streaming monitor status and control */
+#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4
+#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5
+#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6
+#define HIVE_ISYS_GPREG_SRST_IDX 7
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8
+#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9
+#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10
+
+/* Bit numbers of the soft reset register */
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5
+#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6
+#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7
+#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8
+#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9
+/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14
+/* -- */
+#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15
+#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16
+#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17
+#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv
+#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23
+#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24
+
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5
+
+/* streaming monitor port id's */
+#define HIVE_ISYS_STR_MON_PORT_CAPA 0
+#define HIVE_ISYS_STR_MON_PORT_CAPB 1
+#define HIVE_ISYS_STR_MON_PORT_CAPC 2
+#define HIVE_ISYS_STR_MON_PORT_ACQ 3
+#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4
+#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5
+#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6
+#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7
+#define HIVE_ISYS_STR_MON_PORT_PIXA 8
+#define HIVE_ISYS_STR_MON_PORT_PIXB 9
+
+/* interrupt bit ID's */
+#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0
+#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1
+#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2
+#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/
+#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12
+/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15
+#define HIVE_ISYS_IRQ_CIO2AHB 16
+#define HIVE_ISYS_IRQ_DMA_BIT_ID 17
+#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18
+#define HIVE_ISYS_IRQ_NUM_BITS 19
+
+/* DMA */
+#define HIVE_ISYS_DMA_CHANNEL 0
+#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS_DMA_HEIGHT 1
+#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */
+#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */
+#define HIVE_ISYS_DMA_CROP 0 /* no cropping */
+#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */
+
+#endif /* _input_system_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h
new file mode 100644
index 000000000000..ec6dd4487158
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/irq_controller_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _irq_controller_defs_h
+#define _irq_controller_defs_h
+
+#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0
+#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1
+#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2
+#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3
+#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4
+#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5
+#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6
+
+#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4
+
+#endif /* _irq_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h
new file mode 100644
index 000000000000..669060d17c4f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_mamoiada_params.h
@@ -0,0 +1,254 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* Cell name */
+#define ISP_CELL_TYPE isp2400_mamoiada
+#define ISP_VMEM simd_vmem
+#define _HRT_ISP_VMEM isp2400_mamoiada_simd_vmem
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_VMEM_IS_BAMEM 1
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8
+ #define ISP_VMEM_BAMEM_LATENCY 5
+ #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2
+ #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8
+ #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16
+ #define ISP_VMEM_BAMEM_LININT 0
+ #define ISP_VMEM_BAMEM_DAP_BITS 3
+ #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0
+ #define ISP_VMEM_BAMEM_PID_BITS 3
+ #define ISP_VMEM_BAMEM_OFFSET_BITS 19
+ #define ISP_VMEM_BAMEM_ADDRESS_BITS 25
+ #define ISP_VMEM_BAMEM_RID_BITS 4
+ #define ISP_VMEM_BAMEM_TRANSPOSITION 1
+ #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1
+ #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1
+ #define ISP_VMEM_BAMEM_LUT_ELEMS 16
+ #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14
+ #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1
+ #define ISP_VMEM_BAMEM_SMART_FETCH 1
+ #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_ALIGN_ELEM 2
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b))
+
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+
+/* HRT specific vector support */
+#define isp2400_mamoiada_vector_alignment ISP_VEC_ALIGN
+#define isp2400_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS
+#define isp2400_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION
+#define isp2400_mamoiada_vector_num_elems ISP_VEC_NELEMS
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+#define ISP_VRF1_SIZE 24
+#define ISP_VRF2_SIZE 24
+#define ISP_VRF3_SIZE 24
+#define ISP_VRF4_SIZE 24
+#define ISP_VRF5_SIZE 24
+#define ISP_VRF6_SIZE 24
+#define ISP_VRF7_SIZE 24
+#define ISP_VRF8_SIZE 24
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h
new file mode 100644
index 000000000000..e00bc841d0f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp2400_support.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp2400_support_h
+#define _isp2400_support_h
+
+#ifndef ISP2400_VECTOR_TYPES
+/* This typedef is to be able to include hive header files
+ in the host code which is useful in crun */
+typedef char *tmemvectors, *tmemvectoru, *tvector;
+#endif
+
+#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val)
+#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val)
+
+#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem)
+#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem)
+
+#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell))
+#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell))
+
+#if ISP_HAS_HIST
+ #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram)
+ #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell))
+#endif
+
+#endif /* _isp2400_support_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h
new file mode 100644
index 000000000000..593620721627
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_acquisition_defs.h
@@ -0,0 +1,234 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_acquisition_defs_h
+#define _isp_acquisition_defs_h
+
+#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_ACQUISITION_BYTES_PER_ELEM 4
+
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_IRQS 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define MEM2STREAM_FSM_STATE_BITS 2
+#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_REGS 12
+
+// Register id's of MMIO slave accesible registers
+#define ACQ_START_ADDR_REG_ID 0
+#define ACQ_MEM_REGION_SIZE_REG_ID 1
+#define ACQ_NUM_MEM_REGIONS_REG_ID 2
+#define ACQ_INIT_REG_ID 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4
+#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5
+#define ACQ_LAST_COMMAND_REG_ID 6
+#define ACQ_NEXT_COMMAND_REG_ID 7
+#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8
+#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9
+#define ACQ_FSM_STATE_INFO_REG_ID 10
+#define ACQ_INT_CNTR_INFO_REG_ID 11
+
+// Register width
+#define ACQ_START_ADDR_REG_WIDTH 9
+#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ACQ_INIT_REG_WIDTH 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define ACQ_LAST_COMMAND_REG_WIDTH 32
+#define ACQ_NEXT_COMMAND_REG_WIDTH 32
+#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3))
+#define ACQ_INT_CNTR_INFO_REG_WIDTH 32
+
+/* register reset value */
+#define ACQ_START_ADDR_REG_RSTVAL 0
+#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ACQ_INIT_REG_RSTVAL 0
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define ACQ_LAST_COMMAND_REG_RSTVAL 0
+#define ACQ_NEXT_COMMAND_REG_RSTVAL 0
+#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define ACQ_INIT_RST_REG_BIT 0
+#define ACQ_INIT_RESYNC_BIT 2
+#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT
+#define ACQ_INIT_RST_BITS 1
+#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT
+#define ACQ_INIT_RESYNC_BITS 1
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define ACQ_TOKEN_ID_LSB 0
+#define ACQ_TOKEN_ID_MSB 3
+#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4
+#define ACQ_TOKEN_ID_IDX 0
+#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH
+#define ACQ_INIT_CMD_INIT_IDX 4
+#define ACQ_INIT_CMD_INIT_BITS 3
+#define ACQ_CMD_START_ADDR_IDX 4
+#define ACQ_CMD_START_ADDR_BITS 9
+#define ACQ_CMD_NOFWORDS_IDX 13
+#define ACQ_CMD_NOFWORDS_BITS 9
+#define ACQ_MEM_REGION_ID_IDX 22
+#define ACQ_MEM_REGION_ID_BITS 9
+#define ACQ_PACKET_LENGTH_TOKEN_MSB 21
+#define ACQ_PACKET_LENGTH_TOKEN_LSB 13
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4
+#define ACQ_PACKET_CH_ID_TOKEN_MSB 11
+#define ACQ_PACKET_CH_ID_TOKEN_LSB 10
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */
+
+
+/* Command tokens IDs */
+#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b
+#define ACQ_READ_REGION_TOKEN_ID 1 //0001b
+#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b
+#define ACQ_INIT_TOKEN_ID 8 //1000b
+
+/* Acknowledge token IDs */
+#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b
+#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b
+#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b
+#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b
+#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b
+
+#define ACQ_TOKEN_MEMREGIONID_MSB 30
+#define ACQ_TOKEN_MEMREGIONID_LSB 22
+#define ACQ_TOKEN_NOFWORDS_MSB 21
+#define ACQ_TOKEN_NOFWORDS_LSB 13
+#define ACQ_TOKEN_STARTADDR_MSB 12
+#define ACQ_TOKEN_STARTADDR_LSB 4
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define WORD_COUNT_WIDTH 16
+#define PKT_CODE_WIDTH 6
+#define CHN_NO_WIDTH 2
+#define ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+#define EOF_CODE 1
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define ACQ_START_OF_FRAME 0
+#define ACQ_END_OF_FRAME 1
+#define ACQ_START_OF_LINE 2
+#define ACQ_END_OF_LINE 3
+#define ACQ_LINE_PAYLOAD 4
+#define ACQ_GEN_SH_PKT 5
+
+
+/* bit definition */
+#define ACQ_PKT_TYPE_IDX 16
+#define ACQ_PKT_TYPE_BITS 6
+#define ACQ_PKT_SOP_IDX 32
+#define ACQ_WORD_CNT_IDX 0
+#define ACQ_WORD_CNT_BITS 16
+#define ACQ_PKT_INFO_IDX 16
+#define ACQ_PKT_INFO_BITS 8
+#define ACQ_HEADER_DATA_IDX 0
+#define ACQ_HEADER_DATA_BITS 16
+#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX
+#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS
+#define ACQ_ACK_NOFWORDS_IDX 13
+#define ACQ_ACK_NOFWORDS_BITS 9
+#define ACQ_ACK_PKT_LEN_IDX 4
+#define ACQ_ACK_PKT_LEN_BITS 16
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+
+#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define ACQ_SOF_DATA 0 /* 00 0000 frame start */
+#define ACQ_EOF_DATA 1 /* 00 0001 frame end */
+#define ACQ_SOL_DATA 2 /* 00 0010 line start */
+#define ACQ_EOL_DATA 3 /* 00 0011 line end */
+#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_RESERVED_DATA_TYPE_MIN 56
+#define ACQ_RESERVED_DATA_TYPE_MAX 63
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define ACQ_YUV_RESERVED_DATA_TYPE 27
+#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37
+#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39
+#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46
+#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_acquisition_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h
new file mode 100644
index 000000000000..0a249ce3e589
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/isp_capture_defs.h
@@ -0,0 +1,310 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_capture_defs_h
+#define _isp_capture_defs_h
+
+#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */
+#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 )
+#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */
+#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM
+
+//#define CAPT_RCV_ACK 1
+//#define CAPT_WRT_ACK 2
+//#define CAPT_IRQ_ACK 3
+
+/* --------------------------------------------------*/
+
+#define NOF_IRQS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define CAPT_NOF_REGS 16
+
+// Register id's of MMIO slave accesible registers
+#define CAPT_START_MODE_REG_ID 0
+#define CAPT_START_ADDR_REG_ID 1
+#define CAPT_MEM_REGION_SIZE_REG_ID 2
+#define CAPT_NUM_MEM_REGIONS_REG_ID 3
+#define CAPT_INIT_REG_ID 4
+#define CAPT_START_REG_ID 5
+#define CAPT_STOP_REG_ID 6
+
+#define CAPT_PACKET_LENGTH_REG_ID 7
+#define CAPT_RECEIVED_LENGTH_REG_ID 8
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9
+#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10
+#define CAPT_LAST_COMMAND_REG_ID 11
+#define CAPT_NEXT_COMMAND_REG_ID 12
+#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13
+#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14
+#define CAPT_FSM_STATE_INFO_REG_ID 15
+
+// Register width
+#define CAPT_START_MODE_REG_WIDTH 1
+#define CAPT_START_ADDR_REG_WIDTH 9
+#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9
+#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9
+#define CAPT_INIT_REG_WIDTH (18 + 4)
+
+#define CAPT_START_REG_WIDTH 1
+#define CAPT_STOP_REG_WIDTH 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define CAPT_WRITE2MEM_FSM_STATE_BITS 2
+#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3
+
+
+#define CAPT_PACKET_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define CAPT_LAST_COMMAND_REG_WIDTH 32
+/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */
+#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3))
+
+#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9
+#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9
+
+/* register reset value */
+#define CAPT_START_MODE_REG_RSTVAL 0
+#define CAPT_START_ADDR_REG_RSTVAL 0
+#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128
+#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define CAPT_INIT_REG_RSTVAL 0
+
+#define CAPT_START_REG_RSTVAL 0
+#define CAPT_STOP_REG_RSTVAL 0
+
+#define CAPT_PACKET_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define CAPT_LAST_COMMAND_REG_RSTVAL 0
+#define CAPT_NEXT_COMMAND_REG_RSTVAL 0
+#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define CAPT_INIT_RST_REG_BIT 0
+#define CAPT_INIT_FLUSH_BIT 1
+#define CAPT_INIT_RESYNC_BIT 2
+#define CAPT_INIT_RESTART_BIT 3
+#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4
+#define CAPT_INIT_RESTART_MEM_ADDR_MSB 12
+#define CAPT_INIT_RESTART_MEM_REGION_LSB 13
+#define CAPT_INIT_RESTART_MEM_REGION_MSB 21
+
+
+#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT
+#define CAPT_INIT_RST_REG_BITS 1
+#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT
+#define CAPT_INIT_FLUSH_BITS 1
+#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT
+#define CAPT_INIT_RESYNC_BITS 1
+#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT
+#define CAPT_INIT_RESTART_BITS 1
+#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB
+#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1)
+#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB
+#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1)
+
+
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define CAPT_TOKEN_ID_LSB 0
+#define CAPT_TOKEN_ID_MSB 3
+#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */
+
+/* Command tokens IDs */
+#define CAPT_START_TOKEN_ID 0 /* 0000b */
+#define CAPT_STOP_TOKEN_ID 1 /* 0001b */
+#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */
+#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */
+#define CAPT_INIT_TOKEN_ID 8 /* 1000b */
+
+#define CAPT_START_TOKEN_BIT 0
+#define CAPT_STOP_TOKEN_BIT 0
+#define CAPT_FREEZE_TOKEN_BIT 0
+#define CAPT_RESUME_TOKEN_BIT 0
+#define CAPT_INIT_TOKEN_BIT 0
+
+/* Acknowledge token IDs */
+#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */
+#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */
+#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */
+#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */
+#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */
+#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */
+#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */
+#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */
+
+#define CAPT_PACKET_LENGTH_TOKEN_MSB 19
+#define CAPT_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20
+#define CAPT_PACKET_CH_ID_TOKEN_MSB 27
+#define CAPT_PACKET_CH_ID_TOKEN_LSB 26
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21
+
+/* bit definition */
+#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB
+#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1)
+#define CAPT_SOP_IDX 32
+#define CAPT_SOP_BITS 1
+#define CAPT_PKT_INFO_IDX 16
+#define CAPT_PKT_INFO_BITS 8
+#define CAPT_PKT_TYPE_IDX 0
+#define CAPT_PKT_TYPE_BITS 6
+#define CAPT_HEADER_DATA_IDX 0
+#define CAPT_HEADER_DATA_BITS 16
+#define CAPT_PKT_DATA_IDX 0
+#define CAPT_PKT_DATA_BITS 32
+#define CAPT_WORD_CNT_IDX 0
+#define CAPT_WORD_CNT_BITS 16
+#define CAPT_ACK_TOKEN_ID_IDX 0
+#define CAPT_ACK_TOKEN_ID_BITS 4
+//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+//#define CAPT_ACK_PKT_INFO_IDX 20
+//#define CAPT_ACK_PKT_INFO_BITS 8
+//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */
+//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */
+#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB
+#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_INIT_TOKEN_INIT_IDX 4
+#define CAPT_INIT_TOKEN_INIT_BITS 22
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define CAPT_WORD_COUNT_WIDTH 16
+#define CAPT_PKT_CODE_WIDTH 6
+#define CAPT_CHN_NO_WIDTH 2
+#define CAPT_ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define CAPT_START_OF_FRAME 0
+#define CAPT_END_OF_FRAME 1
+#define CAPT_START_OF_LINE 2
+#define CAPT_END_OF_LINE 3
+#define CAPT_LINE_PAYLOAD 4
+#define CAPT_GEN_SH_PKT 5
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define CAPT_SOF_DATA 0 /* 00 0000 frame start */
+#define CAPT_EOF_DATA 1 /* 00 0001 frame end */
+#define CAPT_SOL_DATA 2 /* 00 0010 line start */
+#define CAPT_EOL_DATA 3 /* 00 0011 line end */
+#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_RESERVED_DATA_TYPE_MIN 56
+#define CAPT_RESERVED_DATA_TYPE_MAX 63
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define CAPT_YUV_RESERVED_DATA_TYPE 27
+#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37
+#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39
+#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46
+#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47
+
+
+/* --------------------------------------------------*/
+/* Capture Unit State */
+/* --------------------------------------------------*/
+#define CAPT_FREE_RUN 0
+#define CAPT_NO_SYNC 1
+#define CAPT_SYNC_SWP 2
+#define CAPT_SYNC_MWP 3
+#define CAPT_SYNC_WAIT 4
+#define CAPT_FREEZE 5
+#define CAPT_RUN 6
+
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_capture_defs_h */
+
+
+
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h
new file mode 100644
index 000000000000..c038f39ffd25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/mmu_defs.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mmu_defs_h
+#define _mmu_defs_h
+
+#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0
+#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1
+
+#define _HRT_MMU_REG_ALIGN 4
+
+#endif /* _mmu_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h
new file mode 100644
index 000000000000..9b6c2893d950
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/scalar_processor_2400_params.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _scalar_processor_2400_params_h
+#define _scalar_processor_2400_params_h
+
+#include "cell_params.h"
+
+#endif /* _scalar_processor_2400_params_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/sp_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/sp_hrt.h
new file mode 100644
index 000000000000..7ee4deba519a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/sp_hrt.h
@@ -0,0 +1,24 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_hrt_h_
+#define _sp_hrt_h_
+
+#define hrt_sp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _dmem)
+
+#define hrt_sp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_sp_dmem(cell))
+
+#endif /* _sp_hrt_h_ */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h
new file mode 100644
index 000000000000..1cb62444cf68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/str2mem_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ST2MEM_DEFS_H
+#define _ST2MEM_DEFS_H
+
+#define _STR2MEM_CRUN_BIT 0x100000
+#define _STR2MEM_CMD_BITS 0x0F0000
+#define _STR2MEM_COUNT_BITS 0x00FFFF
+
+#define _STR2MEM_BLOCKS_CMD 0xA0000
+#define _STR2MEM_PACKETS_CMD 0xB0000
+#define _STR2MEM_BYTES_CMD 0xC0000
+#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000
+
+#define _STR2MEM_SOFT_RESET_REG_ID 0
+#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1
+#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2
+#define _STR2MEM_BIT_SWAPPING_REG_ID 3
+#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4
+#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5
+#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6
+#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7
+#define _STR2MEM_EN_STAT_UPDATE_ID 8
+
+#define _STR2MEM_REG_ALIGN 4
+
+#endif /* _ST2MEM_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h
new file mode 100644
index 000000000000..60143b8743a2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/streaming_to_mipi_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _streaming_to_mipi_defs_h
+#define _streaming_to_mipi_defs_h
+
+#define HIVE_STR_TO_MIPI_VALID_A_BIT 0
+#define HIVE_STR_TO_MIPI_VALID_B_BIT 1
+#define HIVE_STR_TO_MIPI_SOL_BIT 2
+#define HIVE_STR_TO_MIPI_EOL_BIT 3
+#define HIVE_STR_TO_MIPI_SOF_BIT 4
+#define HIVE_STR_TO_MIPI_EOF_BIT 5
+#define HIVE_STR_TO_MIPI_CH_ID_LSB 6
+
+#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1)
+
+#endif /* _streaming_to_mipi_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h
new file mode 100644
index 000000000000..d2b8972b0d9e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/timed_controller_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _timed_controller_defs_h
+#define _timed_controller_defs_h
+
+#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0
+
+#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4
+
+#endif /* _timed_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h
new file mode 100644
index 000000000000..5bc0ad34616e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/var.h
@@ -0,0 +1,74 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_VAR_H
+#define _HRT_VAR_H
+
+#include "version.h"
+#include "system_api.h"
+#include "hive_types.h"
+
+#define hrt_int_type_of_char char
+#define hrt_int_type_of_uchar unsigned char
+#define hrt_int_type_of_short short
+#define hrt_int_type_of_ushort unsigned short
+#define hrt_int_type_of_int int
+#define hrt_int_type_of_uint unsigned int
+#define hrt_int_type_of_long long
+#define hrt_int_type_of_ulong unsigned long
+#define hrt_int_type_of_ptr unsigned int
+
+#define hrt_host_type_of_char char
+#define hrt_host_type_of_uchar unsigned char
+#define hrt_host_type_of_short short
+#define hrt_host_type_of_ushort unsigned short
+#define hrt_host_type_of_int int
+#define hrt_host_type_of_uint unsigned int
+#define hrt_host_type_of_long long
+#define hrt_host_type_of_ulong unsigned long
+#define hrt_host_type_of_ptr void*
+
+#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8)
+#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type)
+#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type)
+
+#define hrt_scalar_store(cell, type, var, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_scalar_load(cell, type, var) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var)))
+
+#define hrt_indexed_store(cell, type, array, index, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_indexed_load(cell, type, array, index) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type))))
+
+#endif /* _HRT_VAR_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h
new file mode 100644
index 000000000000..bbc4948baea9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/version.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_VERSION_H
+#define HRT_VERSION_H
+#define HRT_VERSION_MAJOR 1
+#define HRT_VERSION_MINOR 4
+#define HRT_VERSION 1_4
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c
new file mode 100644
index 000000000000..ddc7a8f05153
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/spmem_dump.c
@@ -0,0 +1,3634 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_map_h_
+#define _sp_map_h_
+
+
+#ifndef _hrt_dummy_use_blob_sp
+#define _hrt_dummy_use_blob_sp()
+#endif
+
+#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp)
+
+#ifndef ISP2401
+/* function input_system_acquisition_stop: ADE */
+#else
+/* function input_system_acquisition_stop: AD8 */
+#endif
+
+#ifndef ISP2401
+/* function longjmp: 684E */
+#else
+/* function longjmp: 69C1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SRST_MASK
+#define HIVE_MEM_HIVE_IF_SRST_MASK scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SRST_MASK 0x1C8
+#define HIVE_SIZE_HIVE_IF_SRST_MASK 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SRST_MASK scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SRST_MASK 0x1C8
+#define HIVE_SIZE_sp_HIVE_IF_SRST_MASK 16
+
+#ifndef ISP2401
+/* function tmpmem_init_dmem: 6580 */
+#else
+/* function tmpmem_init_dmem: 66BB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_receive_ack: 5EC4 */
+#else
+/* function ia_css_isys_sp_token_map_receive_ack: 5FFF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_addr_B: 332C */
+#else
+/* function ia_css_dmaproxy_sp_set_addr_B: 3520 */
+
+/* function ia_css_pipe_data_init_tagger_resources: A4F */
+#endif
+
+/* function debug_buffer_set_ddr_addr: DD */
+
+#ifndef ISP2401
+/* function receiver_port_reg_load: AC2 */
+#else
+/* function receiver_port_reg_load: ABC */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_mipi
+#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_mipi 0x631C
+#else
+#define HIVE_ADDR_vbuf_mipi 0x6378
+#endif
+#define HIVE_SIZE_vbuf_mipi 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_mipi 0x631C
+#else
+#define HIVE_ADDR_sp_vbuf_mipi 0x6378
+#endif
+#define HIVE_SIZE_sp_vbuf_mipi 12
+
+#ifndef ISP2401
+/* function ia_css_event_sp_decode: 351D */
+#else
+/* function ia_css_event_sp_decode: 3711 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_get_size: 48A5 */
+#else
+/* function ia_css_queue_get_size: 4B2D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_load: 4EE6 */
+#else
+/* function ia_css_queue_load: 5144 */
+#endif
+
+#ifndef ISP2401
+/* function setjmp: 6857 */
+#else
+/* function setjmp: 69CA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue
+#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x4684
+#else
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x46CC
+#endif
+#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x4684
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x46CC
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6E07 */
+#else
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6F4B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_func: 510B */
+#else
+/* function ia_css_sp_rawcopy_func: 5369 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_marked: 29F7 */
+#else
+/* function ia_css_tagger_buf_sp_pop_marked: 2B99 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stage
+#define HIVE_MEM_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stage 0x5C00
+#else
+#define HIVE_ADDR_isp_stage 0x5C60
+#endif
+#define HIVE_SIZE_isp_stage 832
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stage 0x5C00
+#else
+#define HIVE_ADDR_sp_isp_stage 0x5C60
+#endif
+#define HIVE_SIZE_sp_isp_stage 832
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_raw
+#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_raw 0x2F4
+#else
+#define HIVE_ADDR_vbuf_raw 0x30C
+#endif
+#define HIVE_SIZE_vbuf_raw 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_raw 0x2F4
+#else
+#define HIVE_ADDR_sp_vbuf_raw 0x30C
+#endif
+#define HIVE_SIZE_sp_vbuf_raw 4
+
+#ifndef ISP2401
+/* function ia_css_sp_bin_copy_func: 5032 */
+#else
+/* function ia_css_sp_bin_copy_func: 5290 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_item_store: 4C34 */
+#else
+/* function ia_css_queue_item_store: 4E92 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+
+/* function sp_start_isp: 45D */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_binary_group
+#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_binary_group 0x5FF0
+#else
+#define HIVE_ADDR_sp_binary_group 0x6050
+#endif
+#define HIVE_SIZE_sp_binary_group 32
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_binary_group 0x5FF0
+#else
+#define HIVE_ADDR_sp_sp_binary_group 0x6050
+#endif
+#define HIVE_SIZE_sp_sp_binary_group 32
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sw_state
+#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sw_state 0x62AC
+#else
+#define HIVE_ADDR_sp_sw_state 0x6308
+#endif
+#define HIVE_SIZE_sp_sw_state 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sw_state 0x62AC
+#else
+#define HIVE_ADDR_sp_sp_sw_state 0x6308
+#endif
+#define HIVE_SIZE_sp_sp_sw_state 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_main: D5B */
+#else
+/* function ia_css_thread_sp_main: D50 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_internal_buffers: 3723 */
+#else
+/* function ia_css_ispctrl_sp_init_internal_buffers: 3952 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_psys_event_queue_handle
+#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4B54
+#else
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4BB0
+#endif
+#define HIVE_SIZE_sp2host_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4B54
+#else
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4BB0
+#endif
+#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue
+#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x4698
+#else
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x46E0
+#endif
+#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x4698
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x46E0
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_propagate_frame: 2410 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_stop_copy_preview
+#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_stop_copy_preview 0x6290
+#define HIVE_SIZE_sp_stop_copy_preview 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_stop_copy_preview 0x6290
+#define HIVE_SIZE_sp_sp_stop_copy_preview 4
+#else
+/* function ia_css_tagger_sp_propagate_frame: 2460 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_reg_load: B17 */
+#else
+/* function input_system_reg_load: B11 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_handles
+#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_handles 0x6328
+#else
+#define HIVE_ADDR_vbuf_handles 0x6384
+#endif
+#define HIVE_SIZE_vbuf_handles 960
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_handles 0x6328
+#else
+#define HIVE_ADDR_sp_vbuf_handles 0x6384
+#endif
+#define HIVE_SIZE_sp_vbuf_handles 960
+
+#ifndef ISP2401
+/* function ia_css_queue_store: 4D9A */
+
+/* function ia_css_sp_flash_register: 2C2C */
+#else
+/* function ia_css_queue_store: 4FF8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_dummy_function: 5652 */
+#else
+/* function ia_css_sp_flash_register: 2DCE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_create: 5B37 */
+#else
+/* function ia_css_isys_sp_backend_create: 5C72 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_init: 1833 */
+#else
+/* function ia_css_pipeline_sp_init: 186D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_configure: 2300 */
+#else
+/* function ia_css_tagger_sp_configure: 2350 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_end_binary: 3566 */
+#else
+/* function ia_css_ispctrl_sp_end_binary: 375A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function receiver_port_reg_store: AC9 */
+#else
+/* function receiver_port_reg_store: AC3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_is_pending_mask
+#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_is_pending_mask 0x5C
+#define HIVE_SIZE_event_is_pending_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_is_pending_mask 0x5C
+#define HIVE_SIZE_sp_event_is_pending_mask 44
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_frame
+#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x46AC
+#else
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x46F4
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46AC
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46F4
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_isys_event_queue_handle
+#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4B74
+#else
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4BD0
+#endif
+#define HIVE_SIZE_sp2host_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4B74
+#else
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4BD0
+#endif
+#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_com
+#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_com 0x4114
+#else
+#define HIVE_ADDR_host_sp_com 0x4134
+#endif
+#define HIVE_SIZE_host_sp_com 220
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_com 0x4114
+#else
+#define HIVE_ADDR_sp_host_sp_com 0x4134
+#endif
+#define HIVE_SIZE_sp_host_sp_com 220
+
+#ifndef ISP2401
+/* function ia_css_queue_get_free_space: 49F9 */
+#else
+/* function ia_css_queue_get_free_space: 4C57 */
+#endif
+
+#ifndef ISP2401
+/* function exec_image_pipe: 6C4 */
+#else
+/* function exec_image_pipe: 658 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_init_dmem_data
+#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_init_dmem_data 0x62B0
+#else
+#define HIVE_ADDR_sp_init_dmem_data 0x630C
+#endif
+#define HIVE_SIZE_sp_init_dmem_data 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x62B0
+#else
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x630C
+#endif
+#define HIVE_SIZE_sp_sp_init_dmem_data 24
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_start: 5914 */
+#else
+/* function ia_css_sp_metadata_start: 5A4F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_init_buffer_queues: 2C9B */
+#else
+/* function ia_css_bufq_sp_init_buffer_queues: 2E3D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_stop: 1816 */
+#else
+/* function ia_css_pipeline_sp_stop: 1850 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_connect_pipes: 27EA */
+#else
+/* function ia_css_tagger_sp_connect_pipes: 283A */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_wait: 70D */
+#else
+/* function sp_isys_copy_wait: 6A1 */
+#endif
+
+/* function is_isp_debug_buffer_full: 337 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 32AF */
+#else
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3490 */
+#endif
+
+#ifndef ISP2401
+/* function encode_and_post_timer_event: A30 */
+#else
+/* function encode_and_post_timer_event: 9C4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_per_frame_data
+#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_per_frame_data 0x41F0
+#else
+#define HIVE_ADDR_sp_per_frame_data 0x4210
+#endif
+#define HIVE_SIZE_sp_per_frame_data 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_per_frame_data 0x41F0
+#else
+#define HIVE_ADDR_sp_sp_per_frame_data 0x4210
+#endif
+#define HIVE_SIZE_sp_sp_per_frame_data 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_dequeue: 62D4 */
+#else
+/* function ia_css_rmgr_sp_vbuf_dequeue: 640F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_psys_event_queue_handle
+#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4B80
+#else
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4BDC
+#endif
+#define HIVE_SIZE_host2sp_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4B80
+#else
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4BDC
+#endif
+#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_xmem_bin_addr
+#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_xmem_bin_addr 0x41F4
+#else
+#define HIVE_ADDR_xmem_bin_addr 0x4214
+#endif
+#define HIVE_SIZE_xmem_bin_addr 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_xmem_bin_addr 0x41F4
+#else
+#define HIVE_ADDR_sp_xmem_bin_addr 0x4214
+#endif
+#define HIVE_SIZE_sp_xmem_bin_addr 4
+
+#ifndef ISP2401
+/* function tmr_clock_init: 65A0 */
+#else
+/* function tmr_clock_init: 66DB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_run: 1403 */
+#else
+/* function ia_css_pipeline_sp_run: 1424 */
+#endif
+
+#ifndef ISP2401
+/* function memcpy: 68F7 */
+#else
+/* function memcpy: 6A6A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GP_DEVICE_BASE
+#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_GP_DEVICE_BASE 0x2FC
+#else
+#define HIVE_ADDR_GP_DEVICE_BASE 0x314
+#endif
+#define HIVE_SIZE_GP_DEVICE_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x2FC
+#else
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x314
+#endif
+#define HIVE_SIZE_sp_GP_DEVICE_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue
+#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E0
+#else
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E4
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E0
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E4
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12
+
+#ifndef ISP2401
+/* function input_system_reg_store: B1E */
+#else
+/* function input_system_reg_store: B18 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_start: 5D4D */
+#else
+/* function ia_css_isys_sp_frontend_start: 5E88 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_uds_sp_scale_params: 6600 */
+#else
+/* function ia_css_uds_sp_scale_params: 6773 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_increase_size: E40 */
+#else
+/* function ia_css_circbuf_increase_size: E35 */
+#endif
+
+#ifndef ISP2401
+/* function __divu: 6875 */
+#else
+/* function __divu: 69E8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_get_state: C83 */
+#else
+/* function ia_css_thread_sp_get_state: C78 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_stop
+#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x46BC
+#else
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x4704
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_stop 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x46BC
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x4704
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20
+
+#ifndef ISP2401
+/* function thread_fiber_sp_main: E39 */
+#else
+/* function thread_fiber_sp_main: E2E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_pipe_thread
+#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pipe_thread 0x4800
+#define HIVE_SIZE_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_isp_pipe_thread 0x4848
+#define HIVE_SIZE_sp_isp_pipe_thread 360
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4800
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4848
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 360
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_handle_parameter_sets: 128A */
+#else
+/* function ia_css_parambuf_sp_handle_parameter_sets: 127F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_set_state: 5943 */
+#else
+/* function ia_css_spctrl_sp_set_state: 5A7E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_signal: 6AF7 */
+#else
+/* function ia_css_thread_sem_sp_signal: 6C6C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_IRQ_BASE
+#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_IRQ_BASE 0x2C
+#define HIVE_SIZE_IRQ_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_IRQ_BASE 0x2C
+#define HIVE_SIZE_sp_IRQ_BASE 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_TIMED_CTRL_BASE
+#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_TIMED_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_isr: 6FDC */
+
+/* function ia_css_isys_sp_generate_exp_id: 60E5 */
+#else
+/* function ia_css_isys_sp_isr: 7139 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_init: 61CF */
+#else
+/* function ia_css_isys_sp_generate_exp_id: 6220 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_init: 6BC8 */
+#else
+/* function ia_css_rmgr_sp_init: 630A */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x308
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x308
+#define HIVE_SIZE_sp_is_isp_requested 4
+#else
+/* function ia_css_thread_sem_sp_init: 6D3B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_frame
+#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x46D0
+#else
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x4718
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_frame 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x46D0
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x4718
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_execute: 3217 */
+#else
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x320
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x320
+#define HIVE_SIZE_sp_is_isp_requested 4
+
+/* function ia_css_dmaproxy_sp_execute: 33F6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_is_empty: 48E0 */
+#else
+/* function ia_css_queue_is_empty: 7098 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_has_stopped: 180C */
+#else
+/* function ia_css_pipeline_sp_has_stopped: 1846 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_extract: F44 */
+#else
+/* function ia_css_circbuf_extract: F39 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 2B0D */
+#else
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 2CAF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_sp_thread
+#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_current_sp_thread 0x1DC
+#define HIVE_SIZE_current_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_current_sp_thread 0x1DC
+#define HIVE_SIZE_sp_current_sp_thread 4
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_spid: 594A */
+#else
+/* function ia_css_spctrl_sp_get_spid: 5A85 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_reset_buffers: 2D22 */
+#else
+/* function ia_css_bufq_sp_reset_buffers: 2EC4 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr: 6E35 */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr: 6F79 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_uninit: 61C8 */
+#else
+/* function ia_css_rmgr_sp_uninit: 6303 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack
+#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_threads_stack 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_sp_threads_stack 28
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek: F26 */
+#else
+/* function ia_css_circbuf_peek: F1B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_wait_for_in_param: 1053 */
+#else
+/* function ia_css_parambuf_sp_wait_for_in_param: 1048 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_get_exp_id: 5FAD */
+#else
+/* function ia_css_isys_sp_token_map_get_exp_id: 60E8 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_param
+#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_param 0x46F8
+#else
+#define HIVE_ADDR_sp_all_cb_elems_param 0x4740
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x46F8
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x4740
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_param 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_pipeline_sp_curr_binary_id
+#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1EC
+#else
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1F0
+#endif
+#define HIVE_SIZE_pipeline_sp_curr_binary_id 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1EC
+#else
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1F0
+#endif
+#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame_desc
+#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4708
+#else
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4750
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4708
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4750
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8
+
+#ifndef ISP2401
+/* function sp_isys_copy_func_v2: 706 */
+#else
+/* function sp_isys_copy_func_v2: 69A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_param
+#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_param 0x4710
+#else
+#define HIVE_ADDR_sem_for_reading_cb_param 0x4758
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_param 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4710
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4758
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_param 40
+
+#ifndef ISP2401
+/* function ia_css_queue_get_used_space: 49AD */
+#else
+/* function ia_css_queue_get_used_space: 4C0B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_start
+#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_start 0x4738
+#else
+#define HIVE_ADDR_sem_for_cont_capt_start 0x4780
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4738
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4780
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_start 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tmp_heap
+#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tmp_heap 0x6010
+#else
+#define HIVE_ADDR_tmp_heap 0x6070
+#endif
+#define HIVE_SIZE_tmp_heap 640
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tmp_heap 0x6010
+#else
+#define HIVE_ADDR_sp_tmp_heap 0x6070
+#endif
+#define HIVE_SIZE_sp_tmp_heap 640
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_get_num_vbuf: 64D8 */
+#else
+/* function ia_css_rmgr_sp_get_num_vbuf: 6613 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 3F49 */
+#else
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 418C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_lock_exp_id: 20CD */
+#else
+/* function ia_css_tagger_sp_lock_exp_id: 211D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+
+#ifndef ISP2401
+/* function ia_css_queue_is_full: 4A44 */
+#else
+/* function ia_css_queue_is_full: 4CA2 */
+#endif
+
+/* function debug_buffer_init_isp: E4 */
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_uninit: 5D07 */
+#else
+/* function ia_css_isys_sp_frontend_uninit: 5E42 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_exp_id_is_locked: 2003 */
+#else
+/* function ia_css_tagger_sp_exp_id_is_locked: 2053 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem
+#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x66E8
+#else
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x6744
+#endif
+#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x66E8
+#else
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x6744
+#endif
+#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_dump: 62AF */
+#else
+/* function ia_css_rmgr_sp_refcount_dump: 63EA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_pipe_threads
+#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_pipe_threads 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_sp_pipe_threads 20
+
+#ifndef ISP2401
+/* function sp_event_proxy_func: 71B */
+#else
+/* function sp_event_proxy_func: 6AF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_isys_event_queue_handle
+#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4BDC
+#else
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4C38
+#endif
+#define HIVE_SIZE_host2sp_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4BDC
+#else
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4C38
+#endif
+#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_yield: 6A70 */
+#else
+/* function ia_css_thread_sp_yield: 6BEA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param_desc
+#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x474C
+#else
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x4794
+#endif
+#define HIVE_SIZE_sp_all_cbs_param_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x474C
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x4794
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb
+#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_fork: D10 */
+#else
+/* function ia_css_thread_sp_fork: D05 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_destroy: 27F4 */
+#else
+/* function ia_css_tagger_sp_destroy: 2844 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_read: 31B7 */
+#else
+/* function ia_css_dmaproxy_sp_vmem_read: 3396 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ifmtr_sp_init: 6136 */
+#else
+/* function ia_css_ifmtr_sp_init: 6271 */
+#endif
+
+#ifndef ISP2401
+/* function initialize_sp_group: 6D4 */
+#else
+/* function initialize_sp_group: 668 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_peek: 2919 */
+#else
+/* function ia_css_tagger_buf_sp_peek: 2ABB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_init: D3C */
+#else
+/* function ia_css_thread_sp_init: D31 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_reset_exp_id: 60DD */
+#else
+/* function ia_css_isys_sp_reset_exp_id: 6218 */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_update_fps: 65F0 */
+#else
+/* function qos_scheduler_update_fps: 6763 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 461E */
+#else
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 4879 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_DMEM_BASE
+#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_ISP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_sp_ISP_DMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_DMEM_BASE
+#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_SP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_sp_SP_DMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read: 322D */
+#else
+/* function __ia_css_queue_is_empty_text: 4B68 */
+
+/* function ia_css_dmaproxy_sp_read: 340C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_raw_copy_line_count
+#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_raw_copy_line_count 0x2C8
+#else
+#define HIVE_ADDR_raw_copy_line_count 0x2E0
+#endif
+#define HIVE_SIZE_raw_copy_line_count 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_raw_copy_line_count 0x2C8
+#else
+#define HIVE_ADDR_sp_raw_copy_line_count 0x2E0
+#endif
+#define HIVE_SIZE_sp_raw_copy_line_count 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle
+#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4BE8
+#else
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4C44
+#endif
+#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4BE8
+#else
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4C44
+#endif
+#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_queue_peek: 4923 */
+#else
+/* function ia_css_queue_peek: 4B81 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt
+#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4A94
+#else
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4AF0
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4A94
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4AF0
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_can_send_token_mask
+#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_can_send_token_mask 0x88
+#define HIVE_SIZE_event_can_send_token_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_can_send_token_mask 0x88
+#define HIVE_SIZE_sp_event_can_send_token_mask 44
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_thread
+#define HIVE_MEM_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_thread 0x5F40
+#else
+#define HIVE_ADDR_isp_thread 0x5FA0
+#endif
+#define HIVE_SIZE_isp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_thread 0x5F40
+#else
+#define HIVE_ADDR_sp_isp_thread 0x5FA0
+#endif
+#define HIVE_SIZE_sp_isp_thread 4
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event_non_blocking: A78 */
+#else
+/* function encode_and_post_sp_event_non_blocking: A0C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_destroy: 5DDF */
+#else
+/* function ia_css_isys_sp_frontend_destroy: 5F1A */
+#endif
+
+/* function is_ddr_debug_buffer_full: 2CC */
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_stop: 5D1F */
+#else
+/* function ia_css_isys_sp_frontend_stop: 5E5A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_init: 607B */
+#else
+/* function ia_css_isys_sp_token_map_init: 61B6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2969 */
+#else
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2B0B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_fiber
+#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_fiber 0x19C
+#define HIVE_SIZE_sp_threads_fiber 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_fiber 0x19C
+#define HIVE_SIZE_sp_sp_threads_fiber 28
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event: A01 */
+#else
+/* function encode_and_post_sp_event: 995 */
+#endif
+
+/* function debug_enqueue_ddr: EE */
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 626A */
+#else
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 63A5 */
+#endif
+
+#ifndef ISP2401
+/* function dmaproxy_sp_read_write: 6EE4 */
+#else
+/* function dmaproxy_sp_read_write: 7017 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer
+#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_buffer_queue_handle
+#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4BF4
+#else
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4C50
+#endif
+#define HIVE_SIZE_host2sp_buffer_queue_handle 480
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4BF4
+#else
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4C50
+#endif
+#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_service
+#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3178
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3198
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_service 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3178
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3198
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_process: 6BF0 */
+#else
+/* function ia_css_dmaproxy_sp_process: 6D63 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_mark_from_end: 2BF1 */
+#else
+/* function ia_css_tagger_buf_sp_mark_from_end: 2D93 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_rcv_acquire_ack: 59EC */
+#else
+/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5B27 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_pre_acquire_request: 5A02 */
+#else
+/* function ia_css_isys_sp_backend_pre_acquire_request: 5B3D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_cs: 3653 */
+#else
+/* function ia_css_ispctrl_sp_init_cs: 3855 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_init: 5958 */
+#else
+/* function ia_css_spctrl_sp_init: 5A93 */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_init: 730 */
+#else
+/* function sp_event_proxy_init: 6C4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_output
+#define HIVE_MEM_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_output 0x41F8
+#else
+#define HIVE_ADDR_sp_output 0x4218
+#endif
+#define HIVE_SIZE_sp_output 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_output 0x41F8
+#else
+#define HIVE_ADDR_sp_sp_output 0x4218
+#endif
+#define HIVE_SIZE_sp_sp_output 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_CTRL_BASE
+#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_ISP_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_sp_ISP_CTRL_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_INPUT_FORMATTER_BASE
+#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_INPUT_FORMATTER_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16
+
+#ifndef ISP2401
+/* function sp_dma_proxy_reset_channels: 3487 */
+#else
+/* function sp_dma_proxy_reset_channels: 367B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_acquire: 5B0D */
+#else
+/* function ia_css_isys_sp_backend_acquire: 5C48 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_update_size: 28E8 */
+#else
+/* function ia_css_tagger_sp_update_size: 2A8A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue
+#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x511C
+#else
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x5178
+#endif
+#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x511C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x5178
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008
+
+#ifndef ISP2401
+/* function thread_fiber_sp_create: DA8 */
+#else
+/* function thread_fiber_sp_create: D9D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_increments: 3319 */
+#else
+/* function ia_css_dmaproxy_sp_set_increments: 350D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_frame
+#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x4754
+#else
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x479C
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_frame 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x4754
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x479C
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20
+
+#ifndef ISP2401
+/* function receiver_reg_store: AD7 */
+#else
+/* function receiver_reg_store: AD1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_param
+#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_param 0x4768
+#else
+#define HIVE_ADDR_sem_for_writing_cb_param 0x47B0
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_param 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x4768
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x47B0
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_param 20
+
+/* function sp_start_isp_entry: 453 */
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifdef HIVE_ADDR_sp_start_isp_entry
+#endif
+#define HIVE_ADDR_sp_start_isp_entry 0x453
+#endif
+#define HIVE_ADDR_sp_sp_start_isp_entry 0x453
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_all: 2B75 */
+#else
+/* function ia_css_tagger_buf_sp_unmark_all: 2D17 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_from_start: 2BB6 */
+#else
+/* function ia_css_tagger_buf_sp_unmark_from_start: 2D58 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_acquire: 34B3 */
+#else
+/* function ia_css_dmaproxy_sp_channel_acquire: 36A7 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_add_num_vbuf: 64B4 */
+#else
+/* function ia_css_rmgr_sp_add_num_vbuf: 65EF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_create: 60C4 */
+#else
+/* function ia_css_isys_sp_token_map_create: 61FF */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3183 */
+#else
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3362 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_acquire_buf_elem: 1FDB */
+#else
+/* function ia_css_tagger_sp_acquire_buf_elem: 202B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_is_dynamic_buffer: 306C */
+#else
+/* function ia_css_bufq_sp_is_dynamic_buffer: 320E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_group
+#define HIVE_MEM_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_group 0x4208
+#define HIVE_SIZE_sp_group 1144
+#else
+#define HIVE_ADDR_sp_group 0x4228
+#define HIVE_SIZE_sp_group 1184
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_group 0x4208
+#define HIVE_SIZE_sp_sp_group 1144
+#else
+#define HIVE_ADDR_sp_sp_group 0x4228
+#define HIVE_SIZE_sp_sp_group 1184
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_event_proxy_thread
+#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_proxy_thread 0x4954
+#define HIVE_SIZE_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_event_proxy_thread 0x49B0
+#define HIVE_SIZE_sp_event_proxy_thread 72
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x4954
+#define HIVE_SIZE_sp_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x49B0
+#define HIVE_SIZE_sp_sp_event_proxy_thread 72
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_kill: CD6 */
+#else
+/* function ia_css_thread_sp_kill: CCB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_create: 28A2 */
+#else
+/* function ia_css_tagger_sp_create: 2A38 */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_acquire_dmem: 6561 */
+#else
+/* function tmpmem_acquire_dmem: 669C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_MMU_BASE
+#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_MMU_BASE 0x24
+#define HIVE_SIZE_MMU_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_MMU_BASE 0x24
+#define HIVE_SIZE_sp_MMU_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_release: 349F */
+#else
+/* function ia_css_dmaproxy_sp_channel_release: 3693 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_is_idle: 347F */
+#else
+/* function ia_css_dmaproxy_sp_is_idle: 3673 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_qos_start
+#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_qos_start 0x477C
+#else
+#define HIVE_ADDR_sem_for_qos_start 0x47C4
+#endif
+#define HIVE_SIZE_sem_for_qos_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_qos_start 0x477C
+#else
+#define HIVE_ADDR_sp_sem_for_qos_start 0x47C4
+#endif
+#define HIVE_SIZE_sp_sem_for_qos_start 20
+
+#ifndef ISP2401
+/* function isp_hmem_load: B55 */
+#else
+/* function isp_hmem_load: B4F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_release_buf_elem: 1FB7 */
+#else
+/* function ia_css_tagger_sp_release_buf_elem: 2007 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_send: 34F5 */
+#else
+/* function ia_css_eventq_sp_send: 36E9 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_isys_sp_error_cnt
+#define HIVE_MEM_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x62D4
+#else
+#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x6330
+#endif
+#define HIVE_SIZE_ia_css_isys_sp_error_cnt 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x62D4
+#else
+#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x6330
+#endif
+#define HIVE_SIZE_sp_ia_css_isys_sp_error_cnt 16
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unlock_from_start: 2AA5 */
+#else
+/* function ia_css_tagger_buf_sp_unlock_from_start: 2C47 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_debug_buffer_ddr_address
+#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_debug_buffer_ddr_address 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_sp_debug_buffer_ddr_address 4
+
+#ifndef ISP2401
+/* function sp_isys_copy_request: 714 */
+#else
+/* function sp_isys_copy_request: 6A8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 6344 */
+#else
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 647F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_set_priority: CCE */
+#else
+/* function ia_css_thread_sp_set_priority: CC3 */
+#endif
+
+#ifndef ISP2401
+/* function sizeof_hmem: BFC */
+#else
+/* function sizeof_hmem: BF6 */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_release_dmem: 6550 */
+#else
+/* function tmpmem_release_dmem: 668B */
+#endif
+
+/* function cnd_input_system_cfg: 392 */
+
+#ifndef ISP2401
+/* function __ia_css_sp_rawcopy_func_critical: 6F65 */
+#else
+/* function __ia_css_sp_rawcopy_func_critical: 70C2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_width_exception: 3304 */
+#else
+/* function __ia_css_dmaproxy_sp_process_text: 3306 */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_assert: 8B1 */
+#else
+/* function ia_css_dmaproxy_sp_set_width_exception: 34F8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_flash_sp_init_internal_params: 2C90 */
+#else
+/* function sp_event_assert: 845 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 29AB */
+#else
+/* function ia_css_flash_sp_init_internal_params: 2E32 */
+#endif
+
+#ifndef ISP2401
+/* function __modu: 68BB */
+#else
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 2B4D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_isp_vector: 3189 */
+#else
+/* function __modu: 6A2E */
+
+/* function ia_css_dmaproxy_sp_init_isp_vector: 3368 */
+#endif
+
+/* function isp_vamem_store: 0 */
+
+#ifdef ISP2401
+/* function ia_css_tagger_sp_set_copy_pipe: 2A2F */
+
+#endif
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GDC_BASE
+#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GDC_BASE 0x44
+#define HIVE_SIZE_GDC_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GDC_BASE 0x44
+#define HIVE_SIZE_sp_GDC_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_queue_local_init: 4C0E */
+#else
+/* function ia_css_queue_local_init: 4E6C */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_callout_func: 6988 */
+#else
+/* function sp_event_proxy_callout_func: 6AFB */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_schedule_stage: 65C1 */
+#else
+/* function qos_scheduler_schedule_stage: 670F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads
+#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x49E0
+#else
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x4A40
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x49E0
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x4A40
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack_size
+#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack_size 0x180
+#define HIVE_SIZE_sp_threads_stack_size 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack_size 0x180
+#define HIVE_SIZE_sp_sp_threads_stack_size 28
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 3F2F */
+#else
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 4172 */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_isys_sp_isr_text: 5E09 */
+#else
+/* function __ia_css_isys_sp_isr_text: 5F44 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_dequeue: 4A8C */
+#else
+/* function ia_css_queue_dequeue: 4CEA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel: 6E4C */
+#else
+/* function is_qos_standalone_mode: 66EA */
+
+/* function ia_css_dmaproxy_sp_configure_channel: 6F90 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_thread_fiber_sp
+#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_current_thread_fiber_sp 0x49E8
+#else
+#define HIVE_ADDR_current_thread_fiber_sp 0x4A44
+#endif
+#define HIVE_SIZE_current_thread_fiber_sp 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x49E8
+#else
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x4A44
+#endif
+#define HIVE_SIZE_sp_current_thread_fiber_sp 4
+
+#ifndef ISP2401
+/* function ia_css_circbuf_pop: FD8 */
+#else
+/* function ia_css_circbuf_pop: FCD */
+#endif
+
+#ifndef ISP2401
+/* function memset: 693A */
+#else
+/* function memset: 6AAD */
+#endif
+
+/* function irq_raise_set_token: B6 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GPIO_BASE
+#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GPIO_BASE 0x3C
+#define HIVE_SIZE_GPIO_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GPIO_BASE 0x3C
+#define HIVE_SIZE_sp_GPIO_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_pipeline_acc_stage_enable: 17D7 */
+#else
+/* function ia_css_pipeline_acc_stage_enable: 17FF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_unlock_exp_id: 2028 */
+#else
+/* function ia_css_tagger_sp_unlock_exp_id: 2078 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_ph
+#define HIVE_MEM_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_ph 0x62E4
+#else
+#define HIVE_ADDR_isp_ph 0x6340
+#endif
+#define HIVE_SIZE_isp_ph 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_ph 0x62E4
+#else
+#define HIVE_ADDR_sp_isp_ph 0x6340
+#endif
+#define HIVE_SIZE_sp_isp_ph 28
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_flush: 6009 */
+#else
+/* function ia_css_isys_sp_token_map_flush: 6144 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_ds: 37B2 */
+#else
+/* function ia_css_ispctrl_sp_init_ds: 39E1 */
+#endif
+
+#ifndef ISP2401
+/* function get_xmem_base_addr_raw: 3B5F */
+#else
+/* function get_xmem_base_addr_raw: 3D9A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param
+#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param 0x4790
+#else
+#define HIVE_ADDR_sp_all_cbs_param 0x47D8
+#endif
+#define HIVE_SIZE_sp_all_cbs_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x4790
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x47D8
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param 16
+
+#ifndef ISP2401
+/* function ia_css_circbuf_create: 1026 */
+#else
+/* function ia_css_circbuf_create: 101B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp_group
+#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp_group 0x47A0
+#else
+#define HIVE_ADDR_sem_for_sp_group 0x47E8
+#endif
+#define HIVE_SIZE_sem_for_sp_group 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp_group 0x47A0
+#else
+#define HIVE_ADDR_sp_sem_for_sp_group 0x47E8
+#endif
+#define HIVE_SIZE_sp_sem_for_sp_group 20
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_wait_for_in_frame: 64DF */
+#else
+/* function __ia_css_dmaproxy_sp_configure_channel_text: 34D7 */
+
+/* function ia_css_framebuf_sp_wait_for_in_frame: 661A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_tag_frame: 556F */
+#else
+/* function ia_css_sp_rawcopy_tag_frame: 57B0 */
+#endif
+
+#ifndef ISP2401
+/* function isp_hmem_clear: B25 */
+#else
+/* function isp_hmem_clear: B1F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_release_in_frame: 6522 */
+#else
+/* function ia_css_framebuf_sp_release_in_frame: 665D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_snd_acquire_request: 5A5F */
+#else
+/* function ia_css_isys_sp_backend_snd_acquire_request: 5B9A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_is_full: 5E90 */
+#else
+/* function ia_css_isys_sp_token_map_is_full: 5FCB */
+#endif
+
+#ifndef ISP2401
+/* function input_system_acquisition_run: AF9 */
+#else
+/* function input_system_acquisition_run: AF3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_start_binary: 3631 */
+#else
+/* function ia_css_ispctrl_sp_start_binary: 3833 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_recv: 34C7 */
+#else
+/* function ia_css_eventq_sp_recv: 36BB */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_pool
+#define HIVE_MEM_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_pool 0x2E8
+#else
+#define HIVE_ADDR_isp_pool 0x300
+#endif
+#define HIVE_SIZE_isp_pool 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pool 0x2E8
+#else
+#define HIVE_ADDR_sp_isp_pool 0x300
+#endif
+#define HIVE_SIZE_sp_isp_pool 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_rel_gen: 6211 */
+#else
+/* function ia_css_rmgr_sp_rel_gen: 634C */
+
+/* function ia_css_tagger_sp_unblock_clients: 2900 */
+#endif
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_end: 1FA7 */
+#else
+/* function css_get_frame_processing_time_end: 1FF7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_any_pending_mask
+#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_event_any_pending_mask 0x300
+#else
+#define HIVE_ADDR_event_any_pending_mask 0x318
+#endif
+#define HIVE_SIZE_event_any_pending_mask 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_any_pending_mask 0x300
+#else
+#define HIVE_ADDR_sp_event_any_pending_mask 0x318
+#endif
+#define HIVE_SIZE_sp_event_any_pending_mask 8
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_push: 5A16 */
+#else
+/* function ia_css_isys_sp_backend_push: 5B51 */
+#endif
+
+/* function sh_css_decode_tag_descr: 352 */
+
+/* function debug_enqueue_isp: 27B */
+
+#ifndef ISP2401
+/* function qos_scheduler_update_stage_budget: 65AF */
+#else
+/* function qos_scheduler_update_stage_budget: 66F2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_uninit: 5951 */
+#else
+/* function ia_css_spctrl_sp_uninit: 5A8C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SWITCH_CODE
+#define HIVE_MEM_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SWITCH_CODE 0x1D8
+#define HIVE_SIZE_HIVE_IF_SWITCH_CODE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SWITCH_CODE 0x1D8
+#define HIVE_SIZE_sp_HIVE_IF_SWITCH_CODE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_lock_from_start: 2AD9 */
+#else
+/* function ia_css_tagger_buf_sp_lock_from_start: 2C7B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_isp_idle
+#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_isp_idle 0x47B4
+#else
+#define HIVE_ADDR_sem_for_isp_idle 0x47FC
+#endif
+#define HIVE_SIZE_sem_for_isp_idle 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x47B4
+#else
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x47FC
+#endif
+#define HIVE_SIZE_sp_sem_for_isp_idle 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write_byte_addr: 31E6 */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr: 33C5 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init: 315D */
+#else
+/* function ia_css_dmaproxy_sp_init: 333C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2D62 */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2F04 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_VAMEM_BASE
+#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_ISP_VAMEM_BASE 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger
+#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x6294
+#else
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x62F0
+#endif
+#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x6294
+#else
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x62F0
+#endif
+#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x5994
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x5994
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70
+
+#ifndef ISP2401
+/* function ia_css_queue_item_load: 4D00 */
+#else
+/* function ia_css_queue_item_load: 4F5E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_state: 593C */
+#else
+/* function ia_css_spctrl_sp_get_state: 5A77 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_uninit: 6026 */
+#else
+/* function ia_css_isys_sp_token_map_uninit: 6161 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_callout_sp_thread
+#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_callout_sp_thread 0x49DC
+#else
+#define HIVE_ADDR_callout_sp_thread 0x1E0
+#endif
+#define HIVE_SIZE_callout_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_callout_sp_thread 0x49DC
+#else
+#define HIVE_ADDR_sp_callout_sp_thread 0x1E0
+#endif
+#define HIVE_SIZE_sp_callout_sp_thread 4
+
+#ifndef ISP2401
+/* function thread_fiber_sp_init: E2F */
+#else
+/* function thread_fiber_sp_init: E24 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_PMEM_BASE
+#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_SP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_sp_SP_PMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_snd_acquire_req: 5F96 */
+#else
+/* function ia_css_isys_sp_token_map_snd_acquire_req: 60D1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_input_stream_format
+#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_input_stream_format 0x40F8
+#else
+#define HIVE_ADDR_sp_isp_input_stream_format 0x4118
+#endif
+#define HIVE_SIZE_sp_isp_input_stream_format 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x40F8
+#else
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x4118
+#endif
+#define HIVE_SIZE_sp_sp_isp_input_stream_format 20
+
+#ifndef ISP2401
+/* function __mod: 68A7 */
+#else
+/* function __mod: 6A1A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 3247 */
+#else
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 3426 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_join: CFF */
+#else
+/* function ia_css_thread_sp_join: CF4 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_add_command: 6F4F */
+#else
+/* function ia_css_dmaproxy_sp_add_command: 7082 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_thread_func: 57F0 */
+#else
+/* function ia_css_sp_metadata_thread_func: 594F */
+#endif
+
+#ifndef ISP2401
+/* function __sp_event_proxy_func_critical: 6975 */
+#else
+/* function __sp_event_proxy_func_critical: 6AE8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_wait: 5903 */
+#else
+/* function ia_css_sp_metadata_wait: 5A3E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek_from_start: F08 */
+#else
+/* function ia_css_circbuf_peek_from_start: EFD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_event_sp_encode: 3552 */
+#else
+/* function ia_css_event_sp_encode: 3746 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_run: D72 */
+#else
+/* function ia_css_thread_sp_run: D67 */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_func: 6F6 */
+#else
+/* function sp_isys_copy_func: 68A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_flush: 5A7F */
+#else
+/* function ia_css_isys_sp_backend_flush: 5BBA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_frame_exists: 599B */
+#else
+/* function ia_css_isys_sp_backend_frame_exists: 5AD6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_init_isp_memories: 4789 */
+#else
+/* function ia_css_sp_isp_param_init_isp_memories: 4A11 */
+#endif
+
+#ifndef ISP2401
+/* function register_isr: 8A9 */
+#else
+/* function register_isr: 83D */
+#endif
+
+/* function irq_raise: C8 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 3124 */
+#else
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 32CC */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SRST_ADDRESS
+#define HIVE_MEM_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SRST_ADDRESS 0x1B8
+#define HIVE_SIZE_HIVE_IF_SRST_ADDRESS 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SRST_ADDRESS 0x1B8
+#define HIVE_SIZE_sp_HIVE_IF_SRST_ADDRESS 16
+
+#ifndef ISP2401
+/* function pipeline_sp_initialize_stage: 190B */
+#else
+/* function pipeline_sp_initialize_stage: 1945 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_isys_sp_frontend_states
+#define HIVE_MEM_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x62C8
+#else
+#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x6324
+#endif
+#define HIVE_SIZE_ia_css_isys_sp_frontend_states 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x62C8
+#else
+#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x6324
+#endif
+#define HIVE_SIZE_sp_ia_css_isys_sp_frontend_states 12
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6E1E */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6F62 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_done_ds: 3799 */
+#else
+/* function ia_css_ispctrl_sp_done_ds: 39C8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_get_mem_inits: 4764 */
+#else
+/* function ia_css_sp_isp_param_get_mem_inits: 49EC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_init_buffer_queues: 13D0 */
+#else
+/* function ia_css_parambuf_sp_init_buffer_queues: 13F1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_pfp_spref
+#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_pfp_spref 0x2F0
+#else
+#define HIVE_ADDR_vbuf_pfp_spref 0x308
+#endif
+#define HIVE_SIZE_vbuf_pfp_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x2F0
+#else
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x308
+#endif
+#define HIVE_SIZE_sp_vbuf_pfp_spref 4
+
+#ifndef ISP2401
+/* function input_system_cfg: ABB */
+#else
+/* function input_system_cfg: AB5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_HMEM_BASE
+#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_ISP_HMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_sp_ISP_HMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x59DC
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x5A38
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x59DC
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x5A38
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280
+
+#ifndef ISP2401
+/* function qos_scheduler_init_stage_budget: 65E8 */
+#else
+/* function qos_scheduler_init_stage_budget: 6750 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_release: 5AF4 */
+#else
+/* function ia_css_isys_sp_backend_release: 5C2F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_destroy: 5B1E */
+#else
+/* function ia_css_isys_sp_backend_destroy: 5C59 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_buffer_queue_handle
+#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5AF4
+#else
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5B50
+#endif
+#define HIVE_SIZE_sp2host_buffer_queue_handle 96
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5AF4
+#else
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5B50
+#endif
+#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 5F5A */
+#else
+/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 6095 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_isp_vars: 4483 */
+#else
+/* function ia_css_ispctrl_sp_init_isp_vars: 46DE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5B70 */
+#else
+/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5CAB */
+#endif
+
+#ifndef ISP2401
+/* function sp_warning: 8DC */
+#else
+/* function sp_warning: 870 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_enqueue: 6304 */
+#else
+/* function ia_css_rmgr_sp_vbuf_enqueue: 643F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_tag_exp_id: 2142 */
+#else
+/* function ia_css_tagger_sp_tag_exp_id: 2192 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write: 31FD */
+#else
+/* function ia_css_dmaproxy_sp_write: 33DC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_release_in_param: 1250 */
+#else
+/* function ia_css_parambuf_sp_release_in_param: 1245 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_irq_sw_interrupt_token
+#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_irq_sw_interrupt_token 0x40F4
+#else
+#define HIVE_ADDR_irq_sw_interrupt_token 0x4114
+#endif
+#define HIVE_SIZE_irq_sw_interrupt_token 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x40F4
+#else
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x4114
+#endif
+#define HIVE_SIZE_sp_irq_sw_interrupt_token 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_addresses
+#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_addresses 0x5F44
+#else
+#define HIVE_ADDR_sp_isp_addresses 0x5FA4
+#endif
+#define HIVE_SIZE_sp_isp_addresses 172
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_addresses 0x5F44
+#else
+#define HIVE_ADDR_sp_sp_isp_addresses 0x5FA4
+#endif
+#define HIVE_SIZE_sp_sp_isp_addresses 172
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_acq_gen: 6229 */
+#else
+/* function ia_css_rmgr_sp_acq_gen: 6364 */
+#endif
+
+#ifndef ISP2401
+/* function receiver_reg_load: AD0 */
+#else
+/* function receiver_reg_load: ACA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isps
+#define HIVE_MEM_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isps 0x6300
+#else
+#define HIVE_ADDR_isps 0x635C
+#endif
+#define HIVE_SIZE_isps 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isps 0x6300
+#else
+#define HIVE_ADDR_sp_isps 0x635C
+#endif
+#define HIVE_SIZE_sp_isps 28
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_queues_initialized
+#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_queues_initialized 0x410C
+#else
+#define HIVE_ADDR_host_sp_queues_initialized 0x412C
+#endif
+#define HIVE_SIZE_host_sp_queues_initialized 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x410C
+#else
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x412C
+#endif
+#define HIVE_SIZE_sp_host_sp_queues_initialized 4
+
+#ifndef ISP2401
+/* function ia_css_queue_uninit: 4BCC */
+#else
+/* function ia_css_queue_uninit: 4E2A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started
+#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5BFC
+#else
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5C58
+#endif
+#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5BFC
+#else
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5C58
+#endif
+#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf: 2DCE */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf: 2F70 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_height_exception: 32F5 */
+#else
+/* function ia_css_dmaproxy_sp_set_height_exception: 34E9 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 327A */
+#else
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 345A */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_num_ready_threads
+#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_num_ready_threads 0x49E4
+#define HIVE_SIZE_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_num_ready_threads 0x49E4
+#define HIVE_SIZE_sp_num_ready_threads 4
+
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 31CF */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 33AE */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_spref
+#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_spref 0x2EC
+#else
+#define HIVE_ADDR_vbuf_spref 0x304
+#endif
+#define HIVE_SIZE_vbuf_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_spref 0x2EC
+#else
+#define HIVE_ADDR_sp_vbuf_spref 0x304
+#endif
+#define HIVE_SIZE_sp_vbuf_spref 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_metadata_thread
+#define HIVE_MEM_sp_metadata_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_metadata_thread 0x4998
+#define HIVE_SIZE_sp_metadata_thread 68
+#else
+#define HIVE_ADDR_sp_metadata_thread 0x49F8
+#define HIVE_SIZE_sp_metadata_thread 72
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_metadata_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_metadata_thread 0x4998
+#define HIVE_SIZE_sp_sp_metadata_thread 68
+#else
+#define HIVE_ADDR_sp_sp_metadata_thread 0x49F8
+#define HIVE_SIZE_sp_sp_metadata_thread 72
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_enqueue: 4B16 */
+#else
+/* function ia_css_queue_enqueue: 4D74 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_request
+#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_request 0x4A98
+#else
+#define HIVE_ADDR_ia_css_flash_sp_request 0x4AF4
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_request 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4A98
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4AF4
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_request 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_write: 31A0 */
+#else
+/* function ia_css_dmaproxy_sp_vmem_write: 337F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tagger_frames
+#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tagger_frames 0x49EC
+#else
+#define HIVE_ADDR_tagger_frames 0x4A48
+#endif
+#define HIVE_SIZE_tagger_frames 168
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tagger_frames 0x49EC
+#else
+#define HIVE_ADDR_sp_tagger_frames 0x4A48
+#endif
+#define HIVE_SIZE_sp_tagger_frames 168
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_snd_capture_req: 5FB8 */
+#else
+/* function ia_css_isys_sp_token_map_snd_capture_req: 60F3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_if
+#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_if 0x47C8
+#else
+#define HIVE_ADDR_sem_for_reading_if 0x4810
+#endif
+#define HIVE_SIZE_sem_for_reading_if 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_if 0x47C8
+#else
+#define HIVE_ADDR_sp_sem_for_reading_if 0x4810
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_if 20
+
+#ifndef ISP2401
+/* function sp_generate_interrupts: 95B */
+#else
+/* function sp_generate_interrupts: 8EF */
+
+/* function ia_css_pipeline_sp_start: 1858 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_start: 181E */
+#else
+/* function ia_css_thread_default_callout: 6BE3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_init: 50F3 */
+#else
+/* function ia_css_sp_rawcopy_init: 5351 */
+#endif
+
+#ifndef ISP2401
+/* function tmr_clock_read: 6596 */
+#else
+/* function tmr_clock_read: 66D1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_BAMEM_BASE
+#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x2F8
+#else
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x310
+#endif
+#define HIVE_SIZE_ISP_BAMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x2F8
+#else
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x310
+#endif
+#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5C1F */
+#else
+/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5D5A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_start: 1FAF */
+#else
+/* function css_get_frame_processing_time_start: 1FFF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame
+#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame 0x47DC
+#else
+#define HIVE_ADDR_sp_all_cbs_frame 0x4824
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x47DC
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x4824
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame 16
+
+#ifndef ISP2401
+/* function thread_sp_queue_print: D8F */
+#else
+/* function thread_sp_queue_print: D84 */
+#endif
+
+#ifndef ISP2401
+/* function sp_notify_eof: 907 */
+#else
+/* function sp_notify_eof: 89B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_str2mem
+#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_str2mem 0x47EC
+#else
+#define HIVE_ADDR_sem_for_str2mem 0x4834
+#endif
+#define HIVE_SIZE_sem_for_str2mem 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_str2mem 0x47EC
+#else
+#define HIVE_ADDR_sp_sem_for_str2mem 0x4834
+#endif
+#define HIVE_SIZE_sp_sem_for_str2mem 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 2B41 */
+#else
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 2CE3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 2F86 */
+#else
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 3128 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_destroy: 101D */
+#else
+/* function ia_css_circbuf_destroy: 1012 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_PMEM_BASE
+#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_ISP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_sp_ISP_PMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_mem_load: 46F7 */
+#else
+/* function ia_css_sp_isp_param_mem_load: 497F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_from_start: 292D */
+#else
+/* function ia_css_tagger_buf_sp_pop_from_start: 2ACF */
+#endif
+
+#ifndef ISP2401
+/* function __div: 685F */
+#else
+/* function __div: 69D2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_create: 5DF0 */
+#else
+/* function ia_css_isys_sp_frontend_create: 5F2B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 6323 */
+#else
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 645E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_use
+#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4A9C
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4AF8
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_use 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4A9C
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4AF8
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_wait: 6B42 */
+#else
+/* function ia_css_thread_sem_sp_wait: 6CB7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sleep_mode
+#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sleep_mode 0x4110
+#else
+#define HIVE_ADDR_sp_sleep_mode 0x4130
+#endif
+#define HIVE_SIZE_sp_sleep_mode 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sleep_mode 0x4110
+#else
+#define HIVE_ADDR_sp_sp_sleep_mode 0x4130
+#endif
+#define HIVE_SIZE_sp_sp_sleep_mode 4
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_push: 2A3C */
+#else
+/* function ia_css_tagger_buf_sp_push: 2BDE */
+#endif
+
+/* function mmu_invalidate_cache: D3 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_max_cb_elems
+#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_max_cb_elems 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_sp_max_cb_elems 8
+
+#ifndef ISP2401
+/* function ia_css_queue_remote_init: 4BEE */
+#else
+/* function ia_css_queue_remote_init: 4E4C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stop_req
+#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stop_req 0x4680
+#else
+#define HIVE_ADDR_isp_stop_req 0x46C8
+#endif
+#define HIVE_SIZE_isp_stop_req 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stop_req 0x4680
+#else
+#define HIVE_ADDR_sp_isp_stop_req 0x46C8
+#endif
+#define HIVE_SIZE_sp_isp_stop_req 4
+
+#ifndef ISP2401
+#define HIVE_ICACHE_sp_critical_SEGMENT_START 0
+#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1
+#endif
+
+#endif /* _sp_map_h_ */
+#ifndef ISP2401
+extern void sh_css_dump_sp_dmem(void);
+void sh_css_dump_sp_dmem(void)
+{
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h
new file mode 100644
index 000000000000..146a578b7c74
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/csi_rx_global.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_GLOBAL_H_INCLUDED__
+#define __CSI_RX_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef enum {
+ CSI_MIPI_PACKET_TYPE_UNDEFINED = 0,
+ CSI_MIPI_PACKET_TYPE_LONG,
+ CSI_MIPI_PACKET_TYPE_SHORT,
+ CSI_MIPI_PACKET_TYPE_RESERVED,
+ N_CSI_MIPI_PACKET_TYPE
+} csi_mipi_packet_type_t;
+
+typedef struct csi_rx_backend_lut_entry_s csi_rx_backend_lut_entry_t;
+struct csi_rx_backend_lut_entry_s {
+ uint32_t long_packet_entry;
+ uint32_t short_packet_entry;
+};
+
+typedef struct csi_rx_backend_cfg_s csi_rx_backend_cfg_t;
+struct csi_rx_backend_cfg_s {
+ /* LUT entry for the packet */
+ csi_rx_backend_lut_entry_t lut_entry;
+
+ /* can be derived from the Data Type */
+ csi_mipi_packet_type_t csi_mipi_packet_type;
+
+ struct {
+ bool comp_enable;
+ uint32_t virtual_channel;
+ uint32_t data_type;
+ uint32_t comp_scheme;
+ uint32_t comp_predictor;
+ uint32_t comp_bit_idx;
+ } csi_mipi_cfg;
+};
+
+typedef struct csi_rx_frontend_cfg_s csi_rx_frontend_cfg_t;
+struct csi_rx_frontend_cfg_s {
+ uint32_t active_lanes;
+};
+
+extern const uint32_t N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const uint32_t N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID];
+extern const uint32_t N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID];
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+extern const uint32_t N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID];
+
+#endif /* __CSI_RX_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c
new file mode 100644
index 000000000000..325b821f276c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.c
@@ -0,0 +1,360 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+ }
+ if (size) {
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+ }
+ if (size) {
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+ }
+ if (size) {
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ }
+ if (size) {
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ }
+ if (size) {
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ }
+ if (size) {
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+ }
+ if (size) {
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+ }
+ if (size) {
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+ }
+ if (size) {
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#ifdef ISP2401
+
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.sc.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset;
+ }
+ if (size) {
+ ia_css_sc_config((struct sh_css_isp_sc_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#endif
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+ }
+ if (size) {
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+ }
+ if (size) {
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+ }
+ if (size) {
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+ }
+ if (size) {
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n");
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h
new file mode 100644
index 000000000000..8aacd3dbc05a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_configs.h
@@ -0,0 +1,189 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifdef IA_CSS_INCLUDE_CONFIGURATIONS
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h"
+#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#ifdef ISP2401
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#endif
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
+#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
+#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
+#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_CONFIG_H
+#define _IA_CSS_ISP_CONFIG_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_configuration_ids {
+ IA_CSS_ITERATOR_CONFIG_ID,
+ IA_CSS_COPY_OUTPUT_CONFIG_ID,
+ IA_CSS_CROP_CONFIG_ID,
+ IA_CSS_FPN_CONFIG_ID,
+ IA_CSS_DVS_CONFIG_ID,
+ IA_CSS_QPLANE_CONFIG_ID,
+ IA_CSS_OUTPUT0_CONFIG_ID,
+ IA_CSS_OUTPUT1_CONFIG_ID,
+ IA_CSS_OUTPUT_CONFIG_ID,
+#ifdef ISP2401
+ IA_CSS_SC_CONFIG_ID,
+#endif
+ IA_CSS_RAW_CONFIG_ID,
+ IA_CSS_TNR_CONFIG_ID,
+ IA_CSS_REF_CONFIG_ID,
+ IA_CSS_VF_CONFIG_ID,
+ IA_CSS_NUM_CONFIGURATION_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_config_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter iterator;
+ struct ia_css_isp_parameter copy_output;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter dvs;
+ struct ia_css_isp_parameter qplane;
+ struct ia_css_isp_parameter output0;
+ struct ia_css_isp_parameter output1;
+ struct ia_css_isp_parameter output;
+#ifdef ISP2401
+ struct ia_css_isp_parameter sc;
+#endif
+ struct ia_css_isp_parameter raw;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ struct ia_css_isp_parameter vf;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_CONFIGURATIONS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#ifdef ISP2401
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#endif
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
+
+#endif /* IA_CSS_INCLUDE_CONFIGURATION */
+
+#endif /* _IA_CSS_ISP_CONFIG_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c
new file mode 100644
index 000000000000..11e4463ebb50
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.c
@@ -0,0 +1,3220 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#ifdef ISP2401
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#endif
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DP_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_WB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_TNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ECD_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_AA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CSC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_NR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_S3A_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h
new file mode 100644
index 000000000000..5b3deb7f74ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_params.h
@@ -0,0 +1,399 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_PARAM_H
+#define _IA_CSS_ISP_PARAM_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_parameter_ids {
+ IA_CSS_AA_ID,
+ IA_CSS_ANR_ID,
+ IA_CSS_ANR2_ID,
+ IA_CSS_BH_ID,
+ IA_CSS_CNR_ID,
+ IA_CSS_CROP_ID,
+ IA_CSS_CSC_ID,
+ IA_CSS_DP_ID,
+ IA_CSS_BNR_ID,
+ IA_CSS_DE_ID,
+ IA_CSS_ECD_ID,
+ IA_CSS_FORMATS_ID,
+ IA_CSS_FPN_ID,
+ IA_CSS_GC_ID,
+ IA_CSS_CE_ID,
+ IA_CSS_YUV2RGB_ID,
+ IA_CSS_RGB2YUV_ID,
+ IA_CSS_R_GAMMA_ID,
+ IA_CSS_G_GAMMA_ID,
+ IA_CSS_B_GAMMA_ID,
+ IA_CSS_UDS_ID,
+ IA_CSS_RAA_ID,
+ IA_CSS_S3A_ID,
+ IA_CSS_OB_ID,
+ IA_CSS_OUTPUT_ID,
+ IA_CSS_SC_ID,
+ IA_CSS_BDS_ID,
+ IA_CSS_TNR_ID,
+ IA_CSS_MACC_ID,
+ IA_CSS_SDIS_HORICOEF_ID,
+ IA_CSS_SDIS_VERTCOEF_ID,
+ IA_CSS_SDIS_HORIPROJ_ID,
+ IA_CSS_SDIS_VERTPROJ_ID,
+ IA_CSS_SDIS2_HORICOEF_ID,
+ IA_CSS_SDIS2_VERTCOEF_ID,
+ IA_CSS_SDIS2_HORIPROJ_ID,
+ IA_CSS_SDIS2_VERTPROJ_ID,
+ IA_CSS_WB_ID,
+ IA_CSS_NR_ID,
+ IA_CSS_YEE_ID,
+ IA_CSS_YNR_ID,
+ IA_CSS_FC_ID,
+ IA_CSS_CTC_ID,
+ IA_CSS_XNR_TABLE_ID,
+ IA_CSS_XNR_ID,
+ IA_CSS_XNR3_ID,
+ IA_CSS_NUM_PARAMETER_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter anr;
+ struct ia_css_isp_parameter bh;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter csc;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter bnr;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ecd;
+ struct ia_css_isp_parameter formats;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter ce;
+ struct ia_css_isp_parameter yuv2rgb;
+ struct ia_css_isp_parameter rgb2yuv;
+ struct ia_css_isp_parameter uds;
+ struct ia_css_isp_parameter raa;
+ struct ia_css_isp_parameter s3a;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter sc;
+ struct ia_css_isp_parameter bds;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter macc;
+ struct ia_css_isp_parameter sdis_horiproj;
+ struct ia_css_isp_parameter sdis_vertproj;
+ struct ia_css_isp_parameter sdis2_horiproj;
+ struct ia_css_isp_parameter sdis2_vertproj;
+ struct ia_css_isp_parameter wb;
+ struct ia_css_isp_parameter nr;
+ struct ia_css_isp_parameter yee;
+ struct ia_css_isp_parameter ynr;
+ struct ia_css_isp_parameter fc;
+ struct ia_css_isp_parameter ctc;
+ struct ia_css_isp_parameter xnr;
+ struct ia_css_isp_parameter xnr3;
+ struct ia_css_isp_parameter get;
+ struct ia_css_isp_parameter put;
+ } dmem;
+ struct {
+ struct ia_css_isp_parameter anr2;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter sdis_horicoef;
+ struct ia_css_isp_parameter sdis_vertcoef;
+ struct ia_css_isp_parameter sdis2_horicoef;
+ struct ia_css_isp_parameter sdis2_vertcoef;
+#ifdef ISP2401
+ struct ia_css_isp_parameter xnr3;
+#endif
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter bh;
+ } hmem0;
+ struct {
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter g_gamma;
+ struct ia_css_isp_parameter xnr_table;
+ } vamem1;
+ struct {
+ struct ia_css_isp_parameter r_gamma;
+ struct ia_css_isp_parameter ctc;
+ } vamem0;
+ struct {
+ struct ia_css_isp_parameter b_gamma;
+ } vamem2;
+};
+
+#if defined(IA_CSS_INCLUDE_PARAMETERS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+struct ia_css_pipeline_stage; /* forward declaration */
+
+extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config);
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+#endif /* IA_CSS_INCLUDE_PARAMETER */
+
+#endif /* _IA_CSS_ISP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c
new file mode 100644
index 000000000000..e87d05bc73ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.c
@@ -0,0 +1,214 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size);
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h
new file mode 100644
index 000000000000..732adafb0a63
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hive_isp_css_2401_system_csi2p_generated/ia_css_isp_states.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_STATES
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_STATE_H
+#define _IA_CSS_ISP_STATE_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_state_ids {
+ IA_CSS_AA_STATE_ID,
+ IA_CSS_CNR_STATE_ID,
+ IA_CSS_CNR2_STATE_ID,
+ IA_CSS_DP_STATE_ID,
+ IA_CSS_DE_STATE_ID,
+ IA_CSS_TNR_STATE_ID,
+ IA_CSS_REF_STATE_ID,
+ IA_CSS_YNR_STATE_ID,
+ IA_CSS_NUM_STATE_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_state_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter cnr2;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ynr;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_STATES)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary);
+
+#endif /* IA_CSS_INCLUDE_STATE */
+
+#endif /* _IA_CSS_ISP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c
new file mode 100644
index 000000000000..505e2b600beb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx.c
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+
+#include "system_global.h"
+
+const uint32_t N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 4, /* 4 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const uint32_t N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
+ 8, /* 8 entries at CSI_RX_BACKEND0_ID*/
+ 4, /* 4 entries at CSI_RX_BACKEND1_ID*/
+ 4 /* 4 entries at CSI_RX_BACKEND2_ID*/
+};
+
+const uint32_t N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID] = {
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND0_ID */
+ N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND1_ID */
+ N_CSI_RX_DLANE_ID /* 4 dlanes for CSI_RX_FR0NTEND2_ID */
+};
+
+/* sid_width for CSI_RX_BACKEND<N>_ID */
+const uint32_t N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID] = {
+ 3,
+ 2,
+ 2
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h
new file mode 100644
index 000000000000..a2e9d54a4a37
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_local.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_LOCAL_H_INCLUDED__
+#define __CSI_RX_LOCAL_H_INCLUDED__
+
+#include "csi_rx_global.h"
+#define N_CSI_RX_BE_MIPI_COMP_FMT_REG 4
+#define N_CSI_RX_BE_MIPI_CUSTOM_PEC 12
+#define N_CSI_RX_BE_SHORT_PKT_LUT 4
+#define N_CSI_RX_BE_LONG_PKT_LUT 8
+typedef struct csi_rx_fe_ctrl_state_s csi_rx_fe_ctrl_state_t;
+typedef struct csi_rx_fe_ctrl_lane_s csi_rx_fe_ctrl_lane_t;
+typedef struct csi_rx_be_ctrl_state_s csi_rx_be_ctrl_state_t;
+/*mipi_backend_custom_mode_pixel_extraction_config*/
+typedef struct csi_rx_be_ctrl_pec_s csi_rx_be_ctrl_pec_t;
+
+
+struct csi_rx_fe_ctrl_lane_s {
+ hrt_data termen;
+ hrt_data settle;
+};
+struct csi_rx_fe_ctrl_state_s {
+ hrt_data enable;
+ hrt_data nof_enable_lanes;
+ hrt_data error_handling;
+ hrt_data status;
+ hrt_data status_dlane_hs;
+ hrt_data status_dlane_lp;
+ csi_rx_fe_ctrl_lane_t clane;
+ csi_rx_fe_ctrl_lane_t dlane[N_CSI_RX_DLANE_ID];
+};
+struct csi_rx_be_ctrl_state_s {
+ hrt_data enable;
+ hrt_data status;
+ hrt_data comp_format_reg[N_CSI_RX_BE_MIPI_COMP_FMT_REG];
+ hrt_data raw16;
+ hrt_data raw18;
+ hrt_data force_raw8;
+ hrt_data irq_status;
+ hrt_data custom_mode_enable;
+ hrt_data custom_mode_data_state;
+ hrt_data pec[N_CSI_RX_BE_MIPI_CUSTOM_PEC];
+ hrt_data custom_mode_valid_eop_config;
+ hrt_data global_lut_disregard_reg;
+ hrt_data packet_status_stall;
+ hrt_data short_packet_lut_entry[N_CSI_RX_BE_SHORT_PKT_LUT];
+ hrt_data long_packet_lut_entry[N_CSI_RX_BE_LONG_PKT_LUT];
+};
+#endif /* __CSI_RX_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
new file mode 100644
index 000000000000..5819bcff5e55
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
@@ -0,0 +1,282 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_PRIVATE_H_INCLUDED__
+#define __CSI_RX_PRIVATE_H_INCLUDED__
+
+#include "rx_csi_defs.h"
+#include "mipi_backend_defs.h"
+#include "csi_rx_public.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ uint32_t i;
+
+ state->enable =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ENABLE_REG_IDX);
+ state->nof_enable_lanes =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX);
+ state->error_handling =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_ERROR_HANDLING_REG_IDX);
+ state->status =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_REG_IDX);
+ state->status_dlane_hs =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX);
+ state->status_dlane_lp =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX);
+ state->clane.termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX);
+ state->clane.settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ csi_rx_fe_ctrl_get_dlane_state(
+ ID,
+ i,
+ &(state->dlane[i]));
+ }
+}
+
+/**
+ * @brief Get the state of the csi rx fe dlane process.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const uint32_t lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state)
+{
+
+ dlane_state->termen =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane));
+ dlane_state->settle =
+ csi_rx_fe_ctrl_reg_load(ID, _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane));
+
+}
+/**
+ * @brief dump the csi rx fe state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state)
+{
+ uint32_t i;
+
+ ia_css_print("CSI RX FE STATE Controller %d Enable state 0x%x \n", ID, state->enable);
+ ia_css_print("CSI RX FE STATE Controller %d No Of enable lanes 0x%x \n", ID, state->nof_enable_lanes);
+ ia_css_print("CSI RX FE STATE Controller %d Error handling 0x%x \n", ID, state->error_handling);
+ ia_css_print("CSI RX FE STATE Controller %d Status 0x%x \n", ID, state->status);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane HS 0x%x \n", ID, state->status_dlane_hs);
+ ia_css_print("CSI RX FE STATE Controller %d Status Dlane LP 0x%x \n", ID, state->status_dlane_lp);
+ ia_css_print("CSI RX FE STATE Controller %d Status term enable LP 0x%x \n", ID, state->clane.termen);
+ ia_css_print("CSI RX FE STATE Controller %d Status term settle LP 0x%x \n", ID, state->clane.settle);
+
+ /*
+ * Get the values of the register-set per
+ * dlane.
+ */
+ for (i = 0; i < N_CSI_RX_FE_CTRL_DLANES[ID]; i++) {
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d termen 0x%x \n", ID, i, state->dlane[i].termen);
+ ia_css_print("CSI RX FE STATE Controller %d DLANE ID %d settle 0x%x \n", ID, i, state->dlane[i].settle);
+ }
+}
+
+/**
+ * @brief Get the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ uint32_t i;
+
+ state->enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_ENABLE_REG_IDX);
+
+ state->status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_STATUS_REG_IDX);
+
+ for(i = 0; i <N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ state->comp_format_reg[i] =
+ csi_rx_be_ctrl_reg_load(ID,
+ _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX+i);
+ }
+
+ state->raw16 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX);
+
+ state->raw18 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX);
+ state->force_raw8 =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX);
+ state->irq_status =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX);
+#if 0 /* device access error for these registers */
+ /* ToDo: rootcause this failure */
+ state->custom_mode_enable =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_EN_REG_IDX);
+
+ state->custom_mode_data_state =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX);
+ for(i = 0; i <N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ state->pec[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX + i);
+ }
+ state->custom_mode_valid_eop_config =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX);
+#endif
+ state->global_lut_disregard_reg =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX);
+ state->packet_status_stall =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->short_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX + i);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ state->long_packet_lut_entry[i] =
+ csi_rx_be_ctrl_reg_load(ID, _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX + i);
+ }
+}
+
+/**
+ * @brief Dump the csi rx be state.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state)
+{
+ uint32_t i;
+
+ ia_css_print("CSI RX BE STATE Controller %d Enable 0x%x \n", ID, state->enable);
+ ia_css_print("CSI RX BE STATE Controller %d Status 0x%x \n", ID, state->status);
+
+ for(i = 0; i <N_CSI_RX_BE_MIPI_COMP_FMT_REG ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d comp format reg vc%d value 0x%x \n", ID, i, state->status);
+ }
+ ia_css_print("CSI RX BE STATE Controller %d RAW16 0x%x \n", ID, state->raw16);
+ ia_css_print("CSI RX BE STATE Controller %d RAW18 0x%x \n", ID, state->raw18);
+ ia_css_print("CSI RX BE STATE Controller %d Force RAW8 0x%x \n", ID, state->force_raw8);
+ ia_css_print("CSI RX BE STATE Controller %d IRQ state 0x%x \n", ID, state->irq_status);
+#if 0 /* ToDo:Getting device access error for this register */
+ for(i = 0; i <N_CSI_RX_BE_MIPI_CUSTOM_PEC ; i++) {
+ ia_css_print("CSI RX BE STATE Controller %d PEC ID %d custom pec 0x%x \n", ID, i, state->pec[i]);
+ }
+#endif
+ ia_css_print("CSI RX BE STATE Controller %d Global LUT diregard reg 0x%x \n", ID, state->global_lut_disregard_reg);
+ ia_css_print("CSI RX BE STATE Controller %d packet stall reg 0x%x \n", ID, state->packet_status_stall);
+ /*
+ * Get the values of the register-set per
+ * lut.
+ */
+ for (i = 0; i < N_SHORT_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Short packat entry %d shart packet lut id 0x%x \n", ID, i, state->short_packet_lut_entry[i]);
+ }
+ for (i = 0; i < N_LONG_PACKET_LUT_ENTRIES[ID]; i++) {
+ ia_css_print("CSI RX BE STATE Controller ID %d Long packat entry %d Long packet lut id 0x%x \n", ID, i, state->long_packet_lut_entry[i]);
+ }
+}
+/* end of NCI */
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_FRONTEND_ID);
+ assert(CSI_RX_FE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_FE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+/**
+ * @brief Load the register value.
+ * Refer to "csi_rx_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_CSI_RX_BACKEND_ID);
+ assert(CSI_RX_BE_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(CSI_RX_BE_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+/** end of DLI */
+
+#endif /* __CSI_RX_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c
new file mode 100644
index 000000000000..14973d1c2756
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl.c
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include "system_global.h"
+
+const uint32_t N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = {
+ 8, /* IBUF_CTRL0_ID supports at most 8 processes */
+ 4, /* IBUF_CTRL1_ID supports at most 4 processes */
+ 4 /* IBUF_CTRL2_ID supports at most 4 processes */
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h
new file mode 100644
index 000000000000..ea40284623d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_local.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_LOCAL_H_INCLUDED__
+#define __IBUF_CTRL_LOCAL_H_INCLUDED__
+
+#include "ibuf_ctrl_global.h"
+
+typedef struct ibuf_ctrl_proc_state_s ibuf_ctrl_proc_state_t;
+typedef struct ibuf_ctrl_state_s ibuf_ctrl_state_t;
+
+struct ibuf_ctrl_proc_state_s {
+ hrt_data num_items;
+ hrt_data num_stores;
+ hrt_data dma_channel;
+ hrt_data dma_command;
+ hrt_data ibuf_st_addr;
+ hrt_data ibuf_stride;
+ hrt_data ibuf_end_addr;
+ hrt_data dest_st_addr;
+ hrt_data dest_stride;
+ hrt_data dest_end_addr;
+ hrt_data sync_frame;
+ hrt_data sync_command;
+ hrt_data store_command;
+ hrt_data shift_returned_items;
+ hrt_data elems_ibuf;
+ hrt_data elems_dest;
+ hrt_data cur_stores;
+ hrt_data cur_acks;
+ hrt_data cur_s2m_ibuf_addr;
+ hrt_data cur_dma_ibuf_addr;
+ hrt_data cur_dma_dest_addr;
+ hrt_data cur_isp_dest_addr;
+ hrt_data dma_cmds_send;
+ hrt_data main_cntrl_state;
+ hrt_data dma_sync_state;
+ hrt_data isp_sync_state;
+};
+
+struct ibuf_ctrl_state_s {
+ hrt_data recalc_words;
+ hrt_data arbiters;
+ ibuf_ctrl_proc_state_t proc_state[N_STREAM2MMIO_SID_ID];
+};
+
+#endif /* __IBUF_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
new file mode 100644
index 000000000000..470c92d287fe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/ibuf_ctrl_private.h
@@ -0,0 +1,233 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_PRIVATE_H_INCLUDED__
+#define __IBUF_CTRL_PRIVATE_H_INCLUDED__
+
+#include "ibuf_ctrl_public.h"
+
+#include "device_access.h" /* ia_css_device_load_uint32 */
+
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the ibuf-controller state.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ uint32_t i;
+
+ state->recalc_words =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_RECALC_WORDS_STATUS);
+ state->arbiters =
+ ibuf_ctrl_reg_load(ID, _IBUF_CNTRL_ARBITERS_STATUS);
+
+ /*
+ * Get the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ibuf_ctrl_get_proc_state(
+ ID,
+ i,
+ &(state->proc_state[i]));
+ }
+}
+
+/**
+ * @brief Get the state of the ibuf-controller process.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_get_proc_state(
+ const ibuf_ctrl_ID_t ID,
+ const uint32_t proc_id,
+ ibuf_ctrl_proc_state_t *state)
+{
+ hrt_address reg_bank_offset;
+
+ reg_bank_offset =
+ _IBUF_CNTRL_PROC_REG_ALIGN * (1 + proc_id);
+
+ state->num_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_ITEMS_PER_STORE);
+
+ state->num_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_NUM_STORES_PER_FRAME);
+
+ state->dma_channel =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CHANNEL);
+
+ state->dma_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_CMD);
+
+ state->ibuf_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_START_ADDRESS);
+
+ state->ibuf_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_STRIDE);
+
+ state->ibuf_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_BUFFER_END_ADDRESS);
+
+ state->dest_st_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_START_ADDRESS);
+
+ state->dest_stride =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_STRIDE);
+
+ state->dest_end_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DEST_END_ADDRESS);
+
+ state->sync_frame =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SYNC_FRAME);
+
+ state->sync_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_SYNC_CMD);
+
+ state->store_command =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_STR2MMIO_STORE_CMD);
+
+ state->shift_returned_items =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_SHIFT_ITEMS);
+
+ state->elems_ibuf =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_IBUF);
+
+ state->elems_dest =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ELEMS_P_WORD_DEST);
+
+ state->cur_stores =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_STORES);
+
+ state->cur_acks =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ACKS);
+
+ state->cur_s2m_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_S2M_IBUF_ADDR);
+
+ state->cur_dma_ibuf_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_IBUF_ADDR);
+
+ state->cur_dma_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_DMA_DEST_ADDR);
+
+ state->cur_isp_dest_addr =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_ISP_DEST_ADDR);
+
+ state->dma_cmds_send =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND);
+
+ state->main_cntrl_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_MAIN_CNTRL_STATE);
+
+ state->dma_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_DMA_SYNC_STATE);
+
+ state->isp_sync_state =
+ ibuf_ctrl_reg_load(ID, reg_bank_offset + _IBUF_CNTRL_ISP_SYNC_STATE);
+}
+/**
+ * @brief Dump the ibuf-controller state.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_dump_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state)
+{
+ uint32_t i;
+ ia_css_print("IBUF controller ID %d recalculate words 0x%x\n", ID, state->recalc_words);
+ ia_css_print("IBUF controller ID %d arbiters 0x%x\n", ID, state->arbiters);
+
+ /*
+ * Dump the values of the register-set per
+ * ibuf-controller process.
+ */
+ for (i = 0; i < N_IBUF_CTRL_PROCS[ID]; i++) {
+ ia_css_print("IBUF controller ID %d Process ID %d num_items 0x%x\n", ID, i, state->proc_state[i].num_items);
+ ia_css_print("IBUF controller ID %d Process ID %d num_stores 0x%x\n", ID, i, state->proc_state[i].num_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_channel 0x%x\n", ID, i, state->proc_state[i].dma_channel);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_command 0x%x\n", ID, i, state->proc_state[i].dma_command);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_st_addr 0x%x\n", ID, i, state->proc_state[i].ibuf_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_stride 0x%x\n", ID, i, state->proc_state[i].ibuf_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d ibuf_end_addr 0x%x\n", ID, i, state->proc_state[i].ibuf_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_st_addr 0x%x\n", ID, i, state->proc_state[i].dest_st_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_stride 0x%x\n", ID, i, state->proc_state[i].dest_stride);
+ ia_css_print("IBUF controller ID %d Process ID %d dest_end_addr 0x%x\n", ID, i, state->proc_state[i].dest_end_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_frame 0x%x\n", ID, i, state->proc_state[i].sync_frame);
+ ia_css_print("IBUF controller ID %d Process ID %d sync_command 0x%x\n", ID, i, state->proc_state[i].sync_command);
+ ia_css_print("IBUF controller ID %d Process ID %d store_command 0x%x\n", ID, i, state->proc_state[i].store_command);
+ ia_css_print("IBUF controller ID %d Process ID %d shift_returned_items 0x%x\n", ID, i, state->proc_state[i].shift_returned_items);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_ibuf 0x%x\n", ID, i, state->proc_state[i].elems_ibuf);
+ ia_css_print("IBUF controller ID %d Process ID %d elems_dest 0x%x\n", ID, i, state->proc_state[i].elems_dest);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_stores 0x%x\n", ID, i, state->proc_state[i].cur_stores);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_acks 0x%x\n", ID, i, state->proc_state[i].cur_acks);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_s2m_ibuf_addr 0x%x\n", ID, i, state->proc_state[i].cur_s2m_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_ibuf_addr 0x%x\n", ID, i, state->proc_state[i].cur_dma_ibuf_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_dma_dest_addr 0x%x\n", ID, i, state->proc_state[i].cur_dma_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d cur_isp_dest_addr 0x%x\n", ID, i, state->proc_state[i].cur_isp_dest_addr);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_cmds_send 0x%x\n", ID, i, state->proc_state[i].dma_cmds_send);
+ ia_css_print("IBUF controller ID %d Process ID %d main_cntrl_state 0x%x\n", ID, i, state->proc_state[i].main_cntrl_state);
+ ia_css_print("IBUF controller ID %d Process ID %d dma_sync_state 0x%x\n", ID, i, state->proc_state[i].dma_sync_state);
+ ia_css_print("IBUF controller ID %d Process ID %d isp_sync_state 0x%x\n", ID, i, state->proc_state[i].isp_sync_state);
+ }
+}
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C hrt_data ibuf_ctrl_reg_load(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "ibuf_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_IBUF_CTRL_C void ibuf_ctrl_reg_store(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_IBUF_CTRL_ID);
+ assert(IBUF_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(IBUF_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+/** end of DLI */
+
+
+#endif /* __IBUF_CTRL_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h
new file mode 100644
index 000000000000..f199423e28da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_local.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+
+#include "type_support.h"
+#include "input_system_global.h"
+
+#include "ibuf_ctrl.h"
+#include "csi_rx.h"
+#include "pixelgen.h"
+#include "isys_stream2mmio.h"
+#include "isys_irq.h"
+
+typedef input_system_err_t input_system_error_t;
+
+typedef enum {
+ MIPI_FORMAT_SHORT1 = 0x08,
+ MIPI_FORMAT_SHORT2,
+ MIPI_FORMAT_SHORT3,
+ MIPI_FORMAT_SHORT4,
+ MIPI_FORMAT_SHORT5,
+ MIPI_FORMAT_SHORT6,
+ MIPI_FORMAT_SHORT7,
+ MIPI_FORMAT_SHORT8,
+ MIPI_FORMAT_EMBEDDED = 0x12,
+ MIPI_FORMAT_YUV420_8 = 0x18,
+ MIPI_FORMAT_YUV420_10,
+ MIPI_FORMAT_YUV420_8_LEGACY,
+ MIPI_FORMAT_YUV420_8_SHIFT = 0x1C,
+ MIPI_FORMAT_YUV420_10_SHIFT,
+ MIPI_FORMAT_YUV422_8 = 0x1E,
+ MIPI_FORMAT_YUV422_10,
+ MIPI_FORMAT_RGB444 = 0x20,
+ MIPI_FORMAT_RGB555,
+ MIPI_FORMAT_RGB565,
+ MIPI_FORMAT_RGB666,
+ MIPI_FORMAT_RGB888,
+ MIPI_FORMAT_RAW6 = 0x28,
+ MIPI_FORMAT_RAW7,
+ MIPI_FORMAT_RAW8,
+ MIPI_FORMAT_RAW10,
+ MIPI_FORMAT_RAW12,
+ MIPI_FORMAT_RAW14,
+ MIPI_FORMAT_CUSTOM0 = 0x30,
+ MIPI_FORMAT_CUSTOM1,
+ MIPI_FORMAT_CUSTOM2,
+ MIPI_FORMAT_CUSTOM3,
+ MIPI_FORMAT_CUSTOM4,
+ MIPI_FORMAT_CUSTOM5,
+ MIPI_FORMAT_CUSTOM6,
+ MIPI_FORMAT_CUSTOM7,
+ //MIPI_FORMAT_RAW16, /*not supported by 2401*/
+ //MIPI_FORMAT_RAW18,
+ N_MIPI_FORMAT
+} mipi_format_t;
+
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+#define UNCOMPRESSED_BITS_PER_PIXEL_10 10
+#define UNCOMPRESSED_BITS_PER_PIXEL_12 12
+#define COMPRESSED_BITS_PER_PIXEL_6 6
+#define COMPRESSED_BITS_PER_PIXEL_7 7
+#define COMPRESSED_BITS_PER_PIXEL_8 8
+enum mipi_compressor {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+};
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef struct input_system_state_s input_system_state_t;
+struct input_system_state_s {
+ ibuf_ctrl_state_t ibuf_ctrl_state[N_IBUF_CTRL_ID];
+ csi_rx_fe_ctrl_state_t csi_rx_fe_ctrl_state[N_CSI_RX_FRONTEND_ID];
+ csi_rx_be_ctrl_state_t csi_rx_be_ctrl_state[N_CSI_RX_BACKEND_ID];
+ pixelgen_ctrl_state_t pixelgen_ctrl_state[N_PIXELGEN_ID];
+ stream2mmio_state_t stream2mmio_state[N_STREAM2MMIO_ID];
+ isys_irqc_state_t isys_irqc_state[N_ISYS_IRQ_ID];
+};
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h
new file mode 100644
index 000000000000..97505e436047
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/input_system_private.h
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+STORAGE_CLASS_INPUT_SYSTEM_C input_system_err_t input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ uint32_t i;
+
+ (void)(ID);
+
+ /* get the states of all CSI RX frontend devices */
+ for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
+ csi_rx_fe_ctrl_get_state(
+ (csi_rx_frontend_ID_t)i,
+ &(state->csi_rx_fe_ctrl_state[i]));
+ }
+
+ /* get the states of all CIS RX backend devices */
+ for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
+ csi_rx_be_ctrl_get_state(
+ (csi_rx_backend_ID_t)i,
+ &(state->csi_rx_be_ctrl_state[i]));
+ }
+
+ /* get the states of all pixelgen devices */
+ for (i = 0; i < N_PIXELGEN_ID; i++) {
+ pixelgen_ctrl_get_state(
+ (pixelgen_ID_t)i,
+ &(state->pixelgen_ctrl_state[i]));
+ }
+
+ /* get the states of all stream2mmio devices */
+ for (i = 0; i < N_STREAM2MMIO_ID; i++) {
+ stream2mmio_get_state(
+ (stream2mmio_ID_t)i,
+ &(state->stream2mmio_state[i]));
+ }
+
+ /* get the states of all ibuf-controller devices */
+ for (i = 0; i < N_IBUF_CTRL_ID; i++) {
+ ibuf_ctrl_get_state(
+ (ibuf_ctrl_ID_t)i,
+ &(state->ibuf_ctrl_state[i]));
+ }
+
+ /* get the states of all isys irq controllers */
+ for (i = 0; i < N_ISYS_IRQ_ID; i++) {
+ isys_irqc_state_get((isys_irq_ID_t)i, &(state->isys_irqc_state[i]));
+ }
+
+ /* TODO: get the states of all ISYS2401 DMA devices */
+ for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_dump_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ uint32_t i;
+
+ (void)(ID);
+
+ /* dump the states of all CSI RX frontend devices */
+ for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
+ csi_rx_fe_ctrl_dump_state(
+ (csi_rx_frontend_ID_t)i,
+ &(state->csi_rx_fe_ctrl_state[i]));
+ }
+
+ /* dump the states of all CIS RX backend devices */
+ for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
+ csi_rx_be_ctrl_dump_state(
+ (csi_rx_backend_ID_t)i,
+ &(state->csi_rx_be_ctrl_state[i]));
+ }
+
+ /* dump the states of all pixelgen devices */
+ for (i = 0; i < N_PIXELGEN_ID; i++) {
+ pixelgen_ctrl_dump_state(
+ (pixelgen_ID_t)i,
+ &(state->pixelgen_ctrl_state[i]));
+ }
+
+ /* dump the states of all st2mmio devices */
+ for (i = 0; i < N_STREAM2MMIO_ID; i++) {
+ stream2mmio_dump_state(
+ (stream2mmio_ID_t)i,
+ &(state->stream2mmio_state[i]));
+ }
+
+ /* dump the states of all ibuf-controller devices */
+ for (i = 0; i < N_IBUF_CTRL_ID; i++) {
+ ibuf_ctrl_dump_state(
+ (ibuf_ctrl_ID_t)i,
+ &(state->ibuf_ctrl_state[i]));
+ }
+
+ /* dump the states of all isys irq controllers */
+ for (i = 0; i < N_ISYS_IRQ_ID; i++) {
+ isys_irqc_state_dump((isys_irq_ID_t)i, &(state->isys_irqc_state[i]));
+ }
+
+ /* TODO: dump the states of all ISYS2401 DMA devices */
+ for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
+ }
+
+ return;
+}
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c
new file mode 100644
index 000000000000..77767228985e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma.c
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isys_dma.h"
+#include "assert_support.h"
+
+#ifndef __INLINE_ISYS2401_DMA__
+/*
+ * Include definitions for isys dma register access functions. isys_dma.h
+ * includes declarations of these functions by including isys_dma_public.h.
+ */
+#include "isys_dma_private.h"
+#endif
+
+const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = {
+ N_ISYS2401_DMA_CHANNEL
+};
+
+void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size)
+{
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert((max_burst_size > 0x00) && (max_burst_size <= 0xFF));
+
+ isys2401_dma_reg_store(dma_id,
+ DMA_DEV_INFO_REG_IDX(_DMA_V2_DEV_INTERF_MAX_BURST_IDX, HIVE_DMA_BUS_DDR_CONN),
+ (max_burst_size - 1));
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h
new file mode 100644
index 000000000000..5c694a26386e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_LOCAL_H_INCLUDED__
+#define __ISYS_DMA_LOCAL_H_INCLUDED__
+
+#include "isys_dma_global.h"
+
+#endif /* __ISYS_DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h
new file mode 100644
index 000000000000..2cd1aeecf617
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_dma_private.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_PRIVATE_H_INCLUDED__
+#define __ISYS_DMA_PRIVATE_H_INCLUDED__
+
+#include "isys_dma_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+#include "dma.h"
+#include "dma_v2_defs.h"
+#include "print_support.h"
+
+
+STORAGE_CLASS_ISYS2401_DMA_C void isys2401_dma_reg_store(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ unsigned int reg_loc;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address)-1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ ia_css_print("isys dma store at addr(0x%x) val(%u)\n", reg_loc, (unsigned int)value);
+ ia_css_device_store_uint32(reg_loc, value);
+}
+
+STORAGE_CLASS_ISYS2401_DMA_C hrt_data isys2401_dma_reg_load(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg)
+{
+ unsigned int reg_loc;
+ hrt_data value;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(ISYS2401_DMA_BASE[dma_id] != (hrt_address)-1);
+
+ reg_loc = ISYS2401_DMA_BASE[dma_id] + (reg * sizeof(hrt_data));
+
+ value = ia_css_device_load_uint32(reg_loc);
+ ia_css_print("isys dma load from addr(0x%x) val(%u)\n", reg_loc, (unsigned int)value);
+
+ return value;
+}
+
+#endif /* __ISYS_DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
new file mode 100644
index 000000000000..14d1d3b627a9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq.c
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <system_local.h>
+#include "device_access.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "isys_irq.h"
+
+#ifndef __INLINE_ISYS2401_IRQ__
+/*
+ * Include definitions for isys irq private functions. isys_irq.h includes
+ * declarations of these functions by including isys_irq_public.h.
+ */
+#include "isys_irq_private.h"
+#endif
+
+/** Public interface */
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_status_enable(
+ const isys_irq_ID_t isys_irqc_id)
+{
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Setting irq mask for port %u\n", isys_irqc_id);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX, ISYS_IRQ_MASK_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX, ISYS_IRQ_CLEAR_REG_VALUE);
+ isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX, ISYS_IRQ_ENABLE_REG_VALUE);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h
new file mode 100644
index 000000000000..0bffb5680e25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_local.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_LOCAL_H__
+#define __ISYS_IRQ_LOCAL_H__
+
+#include <type_support.h>
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+typedef struct isys_irqc_state_s isys_irqc_state_t;
+
+struct isys_irqc_state_s {
+ hrt_data edge;
+ hrt_data mask;
+ hrt_data status;
+ hrt_data enable;
+ hrt_data level_no;
+/*hrt_data clear; */ /* write-only register */
+};
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_LOCAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
new file mode 100644
index 000000000000..c17ce131c9e1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_irq_private.h
@@ -0,0 +1,108 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_PRIVATE_H__
+#define __ISYS_IRQ_PRIVATE_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/* -------------------------------------------------------+
+ | Native command interface (NCI) |
+ + -------------------------------------------------------*/
+
+/**
+* @brief Get the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_get(
+ const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state)
+{
+ state->edge = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_EDGE_REG_IDX);
+ state->mask = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX);
+ state->status = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_STATUS_REG_IDX);
+ state->enable = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX);
+ state->level_no = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_LEVEL_NO_REG_IDX);
+ /*
+ ** Invalid to read/load from write-only register 'clear'
+ ** state->clear = isys_irqc_reg_load(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX);
+ */
+}
+
+/**
+* @brief Dump the isys irq status.
+* Refer to "isys_irq.h" for details.
+*/
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_state_dump(
+ const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq controller id %d"
+ "\n\tstatus:0x%x\n\tedge:0x%x\n\tmask:0x%x"
+ "\n\tenable:0x%x\n\tlevel_not_pulse:0x%x\n",
+ isys_irqc_id,
+ state->status, state->edge, state->mask, state->enable, state->level_no);
+}
+
+/** end of NCI */
+
+/* -------------------------------------------------------+
+ | Device level interface (DLI) |
+ + -------------------------------------------------------*/
+
+/* Support functions */
+STORAGE_CLASS_ISYS2401_IRQ_C void isys_irqc_reg_store(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value)
+{
+ unsigned int reg_addr;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq store at addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ ia_css_device_store_uint32(reg_addr, value);
+}
+
+STORAGE_CLASS_ISYS2401_IRQ_C hrt_data isys_irqc_reg_load(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx)
+{
+ unsigned int reg_addr;
+ hrt_data value;
+
+ assert(isys_irqc_id < N_ISYS_IRQ_ID);
+ assert(reg_idx <= ISYS_IRQ_LEVEL_NO_REG_IDX);
+
+ reg_addr = ISYS_IRQ_BASE[isys_irqc_id] + (reg_idx * sizeof(hrt_data));
+ value = ia_css_device_load_uint32(reg_addr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "isys irq load from addr(0x%x) val(%u)\n", reg_addr, (unsigned int)value);
+
+ return value;
+}
+
+/** end of DLI */
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_PRIVATE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c
new file mode 100644
index 000000000000..67570138ba24
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio.c
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isys_stream2mmio.h"
+
+const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID] = {
+ N_STREAM2MMIO_SID_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID4_ID
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h
new file mode 100644
index 000000000000..801523977e1d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_local.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__
+
+#include "isys_stream2mmio_global.h"
+
+typedef struct stream2mmio_state_s stream2mmio_state_t;
+typedef struct stream2mmio_sid_state_s stream2mmio_sid_state_t;
+
+struct stream2mmio_sid_state_s {
+ hrt_data rcv_ack;
+ hrt_data pix_width_id;
+ hrt_data start_addr;
+ hrt_data end_addr;
+ hrt_data strides;
+ hrt_data num_items;
+ hrt_data block_when_no_cmd;
+};
+
+struct stream2mmio_state_s {
+ stream2mmio_sid_state_t sid_state[N_STREAM2MMIO_SID_ID];
+};
+#endif /* __ISYS_STREAM2MMIO_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
new file mode 100644
index 000000000000..1603a09b621a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/isys_stream2mmio_private.h
@@ -0,0 +1,168 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__
+
+#include "isys_stream2mmio_public.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+#include "print_support.h" /* print */
+
+#define STREAM2MMIO_COMMAND_REG_ID 0
+#define STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define STREAM2MMIO_REGS_PER_SID 8
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ stream2mmio_get_sid_state(ID, i, &(state->sid_state[i]));
+ }
+}
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state)
+{
+
+ state->rcv_ack =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_ACKNOWLEDGE_REG_ID);
+
+ state->pix_width_id =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_PIX_WIDTH_ID_REG_ID);
+
+ state->start_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_START_ADDR_REG_ID);
+
+ state->end_addr =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_END_ADDR_REG_ID);
+
+ state->strides =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_STRIDE_REG_ID);
+
+ state->num_items =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_NUM_ITEMS_REG_ID);
+
+ state->block_when_no_cmd =
+ stream2mmio_reg_load(ID, sid_id, STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID);
+
+}
+
+/**
+ * @brief Dump the state of the stream2mmio-controller sidess.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state)
+{
+ ia_css_print("\t \t Receive acks 0x%x\n", state->rcv_ack);
+ ia_css_print("\t \t Pixel width 0x%x\n", state->pix_width_id);
+ ia_css_print("\t \t Startaddr 0x%x\n", state->start_addr);
+ ia_css_print("\t \t Endaddr 0x%x\n", state->end_addr);
+ ia_css_print("\t \t Strides 0x%x\n", state->strides);
+ ia_css_print("\t \t Num Items 0x%x\n", state->num_items);
+ ia_css_print("\t \t block when no cmd 0x%x\n", state->block_when_no_cmd);
+
+}
+/**
+ * @brief Dump the ibuf-controller state.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state)
+{
+ stream2mmio_sid_ID_t i;
+
+ /*
+ * Get the values of the register-set per
+ * stream2mmio-controller sids.
+ */
+ for (i = STREAM2MMIO_SID0_ID; i < N_STREAM2MMIO_SID_PROCS[ID]; i++) {
+ ia_css_print("StREAM2MMIO ID %d SID %d\n", ID, i);
+ stream2mmio_print_sid_state(&(state->sid_state[i]));
+ }
+}
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx)
+{
+ uint32_t reg_bank_offset;
+
+ assert(ID < N_STREAM2MMIO_ID);
+
+ reg_bank_offset = STREAM2MMIO_REGS_PER_SID * sid_id;
+ return ia_css_device_load_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ (reg_bank_offset + reg_idx) * sizeof(hrt_data));
+}
+
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "stream2mmio_public.h" for details.
+ */
+STORAGE_CLASS_STREAM2MMIO_C void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_STREAM2MMIO_ID);
+ assert(STREAM2MMIO_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(STREAM2MMIO_CTRL_BASE[ID] +
+ reg * sizeof(hrt_data), value);
+}
+/** end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h
new file mode 100644
index 000000000000..24f4da9aef40
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_local.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_LOCAL_H_INCLUDED__
+#define __PIXELGEN_LOCAL_H_INCLUDED__
+
+#include "pixelgen_global.h"
+
+typedef struct pixelgen_ctrl_state_s pixelgen_ctrl_state_t;
+struct pixelgen_ctrl_state_s {
+ hrt_data com_enable;
+ hrt_data prbs_rstval0;
+ hrt_data prbs_rstval1;
+ hrt_data syng_sid;
+ hrt_data syng_free_run;
+ hrt_data syng_pause;
+ hrt_data syng_nof_frames;
+ hrt_data syng_nof_pixels;
+ hrt_data syng_nof_line;
+ hrt_data syng_hblank_cyc;
+ hrt_data syng_vblank_cyc;
+ hrt_data syng_stat_hcnt;
+ hrt_data syng_stat_vcnt;
+ hrt_data syng_stat_fcnt;
+ hrt_data syng_stat_done;
+ hrt_data tpg_mode;
+ hrt_data tpg_hcnt_mask;
+ hrt_data tpg_vcnt_mask;
+ hrt_data tpg_xycnt_mask;
+ hrt_data tpg_hcnt_delta;
+ hrt_data tpg_vcnt_delta;
+ hrt_data tpg_r1;
+ hrt_data tpg_g1;
+ hrt_data tpg_b1;
+ hrt_data tpg_r2;
+ hrt_data tpg_g2;
+ hrt_data tpg_b2;
+};
+#endif /* __PIXELGEN_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
new file mode 100644
index 000000000000..3f34b508f0bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/pixelgen_private.h
@@ -0,0 +1,164 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_PRIVATE_H_INCLUDED__
+#define __PIXELGEN_PRIVATE_H_INCLUDED__
+#include "pixelgen_public.h"
+#include "hive_isp_css_host_ids_hrt.h"
+#include "PixelGen_SysBlock_defs.h"
+#include "device_access.h" /* ia_css_device_load_uint32 */
+#include "assert_support.h" /* assert */
+
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+
+ state->com_enable =
+ pixelgen_ctrl_reg_load(ID, _PXG_COM_ENABLE_REG_IDX);
+ state->prbs_rstval0 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG0_IDX);
+ state->prbs_rstval1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_PRBS_RSTVAL_REG1_IDX);
+ state->syng_sid =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_SID_REG_IDX);
+ state->syng_free_run =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_FREE_RUN_REG_IDX);
+ state->syng_pause =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_PAUSE_REG_IDX);
+ state->syng_nof_frames =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_FRAME_REG_IDX);
+ state->syng_nof_pixels =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_PIXEL_REG_IDX);
+ state->syng_nof_line =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_NOF_LINE_REG_IDX);
+ state->syng_hblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_HBLANK_CYC_REG_IDX);
+ state->syng_vblank_cyc =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_VBLANK_CYC_REG_IDX);
+ state->syng_stat_hcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_HCNT_REG_IDX);
+ state->syng_stat_vcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_VCNT_REG_IDX);
+ state->syng_stat_fcnt =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_FCNT_REG_IDX);
+ state->syng_stat_done =
+ pixelgen_ctrl_reg_load(ID, _PXG_SYNG_STAT_DONE_REG_IDX);
+ state->tpg_mode =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_MODE_REG_IDX);
+ state->tpg_hcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_MASK_REG_IDX);
+ state->tpg_vcnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_MASK_REG_IDX);
+ state->tpg_xycnt_mask =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_XYCNT_MASK_REG_IDX);
+ state->tpg_hcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_HCNT_DELTA_REG_IDX);
+ state->tpg_vcnt_delta =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_VCNT_DELTA_REG_IDX);
+ state->tpg_r1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R1_REG_IDX);
+ state->tpg_g1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G1_REG_IDX);
+ state->tpg_b1 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B1_REG_IDX);
+ state->tpg_r2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_R2_REG_IDX);
+ state->tpg_g2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_G2_REG_IDX);
+ state->tpg_b2 =
+ pixelgen_ctrl_reg_load(ID, _PXG_TPG_B2_REG_IDX);
+}
+/**
+ * @brief Dump the pixelgen state.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state)
+{
+ ia_css_print("Pixel Generator ID %d Enable 0x%x \n", ID, state->com_enable);
+ ia_css_print("Pixel Generator ID %d PRBS reset vlue 0 0x%x \n", ID, state->prbs_rstval0);
+ ia_css_print("Pixel Generator ID %d PRBS reset vlue 1 0x%x \n", ID, state->prbs_rstval1);
+ ia_css_print("Pixel Generator ID %d SYNC SID 0x%x \n", ID, state->syng_sid);
+ ia_css_print("Pixel Generator ID %d syng free run 0x%x \n", ID, state->syng_free_run);
+ ia_css_print("Pixel Generator ID %d syng pause 0x%x \n", ID, state->syng_pause);
+ ia_css_print("Pixel Generator ID %d syng no of frames 0x%x \n", ID, state->syng_nof_frames);
+ ia_css_print("Pixel Generator ID %d syng no of pixels 0x%x \n", ID, state->syng_nof_pixels);
+ ia_css_print("Pixel Generator ID %d syng no of line 0x%x \n", ID, state->syng_nof_line);
+ ia_css_print("Pixel Generator ID %d syng hblank cyc 0x%x \n", ID, state->syng_hblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng vblank cyc 0x%x \n", ID, state->syng_vblank_cyc);
+ ia_css_print("Pixel Generator ID %d syng stat hcnt 0x%x \n", ID, state->syng_stat_hcnt);
+ ia_css_print("Pixel Generator ID %d syng stat vcnt 0x%x \n", ID, state->syng_stat_vcnt);
+ ia_css_print("Pixel Generator ID %d syng stat fcnt 0x%x \n", ID, state->syng_stat_fcnt);
+ ia_css_print("Pixel Generator ID %d syng stat done 0x%x \n", ID, state->syng_stat_done);
+ ia_css_print("Pixel Generator ID %d tpg modee 0x%x \n", ID, state->tpg_mode);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x \n", ID, state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt mask 0x%x \n", ID, state->tpg_hcnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg xycnt mask 0x%x \n", ID, state->tpg_xycnt_mask);
+ ia_css_print("Pixel Generator ID %d tpg hcnt delta 0x%x \n", ID, state->tpg_hcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg vcnt delta 0x%x \n", ID, state->tpg_vcnt_delta);
+ ia_css_print("Pixel Generator ID %d tpg r1 0x%x \n", ID, state->tpg_r1);
+ ia_css_print("Pixel Generator ID %d tpg g1 0x%x \n", ID, state->tpg_g1);
+ ia_css_print("Pixel Generator ID %d tpg b1 0x%x \n", ID, state->tpg_b1);
+ ia_css_print("Pixel Generator ID %d tpg r2 0x%x \n", ID, state->tpg_r2);
+ ia_css_print("Pixel Generator ID %d tpg g2 0x%x \n", ID, state->tpg_g2);
+ ia_css_print("Pixel Generator ID %d tpg b2 0x%x \n", ID, state->tpg_b2);
+}
+/* end of NCI */
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Refer to "pixelgen_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+
+/**
+ * @brief Store a value to the register.
+ * Refer to "pixelgen_ctrl_public.h" for details.
+ */
+STORAGE_CLASS_PIXELGEN_C void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+ assert(ID < N_PIXELGEN_ID);
+ assert(PIXELGEN_CTRL_BASE[ID] != (hrt_address)-1);
+
+ ia_css_device_store_uint32(PIXELGEN_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+/** end of DLI */
+#endif /* __PIXELGEN_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h
new file mode 100644
index 000000000000..c16670989702
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/system_local.h
@@ -0,0 +1,381 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+/* This interface is deprecated */
+/*#include "hive_isp_css_custom_host_hrt.h"*/
+#endif
+
+#include "system_global.h"
+
+#ifdef __FIST__
+#define HRT_ADDRESS_WIDTH 32 /* Surprise, this is a local property and even differs per platform */
+#else
+#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
+#endif
+
+#if !defined(__KERNEL__) || (1 == 1)
+/* This interface is deprecated */
+#include "hrt/hive_types.h"
+#else /* __KERNEL__ */
+#include <type_support.h>
+
+#if HRT_ADDRESS_WIDTH == 64
+typedef uint64_t hrt_address;
+#elif HRT_ADDRESS_WIDTH == 32
+typedef uint32_t hrt_address;
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+typedef uint32_t hrt_vaddress;
+typedef uint32_t hrt_data;
+#endif /* __KERNEL__ */
+
+/*
+ * Cell specific address maps
+ */
+#if HRT_ADDRESS_WIDTH == 64
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* DDR */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ 0x0000000120000000ULL};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x0000000000020000ULL};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0x0000000000200000ULL};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0x0000000000100000ULL};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ 0x00000000001C0000ULL,
+ 0x00000000001D0000ULL,
+ 0x00000000001E0000ULL};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ 0x00000000001F0000ULL};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x0000000000010000ULL};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x0000000000300000ULL};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x0000000000070000ULL,
+ 0x00000000000A0000ULL};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x0000000000040000ULL};
+
+static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x00000000000CA000ULL};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x0000000000000500ULL,
+ 0x0000000000030A00ULL,
+ 0x000000000008C000ULL,
+ 0x0000000000090200ULL};
+/*
+ 0x0000000000000500ULL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x0000000000050000ULL,
+ 0x0000000000060000ULL};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x0000000000000000ULL};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x0000000000000000ULL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000000000ULL};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x0000000000000400ULL};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x0000000000000100ULL};
+
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x0000000000030000ULL,
+ 0x0000000000030200ULL,
+ 0x0000000000030400ULL,
+ 0x0000000000030600ULL}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x0000000000080000ULL};
+/* 0x0000000000081000ULL, */ /* capture A */
+/* 0x0000000000082000ULL, */ /* capture B */
+/* 0x0000000000083000ULL, */ /* capture C */
+/* 0x0000000000084000ULL, */ /* Acquisition */
+/* 0x0000000000085000ULL, */ /* DMA */
+/* 0x0000000000089000ULL, */ /* ctrl */
+/* 0x000000000008A000ULL, */ /* GP regs */
+/* 0x000000000008B000ULL, */ /* FIFO */
+/* 0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ 0x0000000000080100ULL};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x00000000000C1800ULL, /* ibuf controller A */
+ 0x00000000000C3800ULL, /* ibuf controller B */
+ 0x00000000000C5800ULL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x00000000000C1400ULL, /* port a */
+ 0x00000000000C3400ULL, /* port b */
+ 0x00000000000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x00000000000C0400ULL, /* csi fe controller A */
+ 0x00000000000C2400ULL, /* csi fe controller B */
+ 0x00000000000C4400ULL /* csi fe controller C */
+};
+/* CSI BE, part of the Input System 2401 */
+static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x00000000000C0800ULL, /* csi be controller A */
+ 0x00000000000C2800ULL, /* csi be controller B */
+ 0x00000000000C4800ULL /* csi be controller C */
+};
+/* PIXEL Generator, part of the Input System 2401 */
+static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x00000000000C1000ULL, /* pixel gen controller A */
+ 0x00000000000C3000ULL, /* pixel gen controller B */
+ 0x00000000000C5000ULL /* pixel gen controller C */
+};
+/* Stream2MMIO, part of the Input System 2401 */
+static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x00000000000C0C00ULL, /* stream2mmio controller A */
+ 0x00000000000C2C00ULL, /* stream2mmio controller B */
+ 0x00000000000C4C00ULL /* stream2mmio controller C */
+};
+#elif HRT_ADDRESS_WIDTH == 32
+
+#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
+
+/* DDR : Attention, this value not defined in 32-bit */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ 0x00000000UL};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x00020000UL};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0xffffffffUL};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0xffffffffUL};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ 0xffffffffUL,
+ 0xffffffffUL,
+ 0xffffffffUL};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ 0xffffffffUL};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x00010000UL};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x00300000UL};
+
+/* MMU */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM) || defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x00070000UL,
+ 0x000A0000UL};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x00040000UL};
+
+static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x000CA000UL};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x00000500UL,
+ 0x00030A00UL,
+ 0x0008C000UL,
+ 0x00090200UL};
+/*
+ 0x00000500UL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x00050000UL,
+ 0x00060000UL};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x00000000UL};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x00000000UL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x00090000UL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x00000000UL};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x00000600UL;
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x00000400UL};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x00000100UL};
+
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x00030000UL,
+ 0x00030200UL,
+ 0x00030400UL};
+/* 0x00030600UL, */ /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x00080000UL};
+/* 0x00081000UL, */ /* capture A */
+/* 0x00082000UL, */ /* capture B */
+/* 0x00083000UL, */ /* capture C */
+/* 0x00084000UL, */ /* Acquisition */
+/* 0x00085000UL, */ /* DMA */
+/* 0x00089000UL, */ /* ctrl */
+/* 0x0008A000UL, */ /* GP regs */
+/* 0x0008B000UL, */ /* FIFO */
+/* 0x0008C000UL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ 0x00080100UL};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x000C1800UL, /* ibuf controller A */
+ 0x000C3800UL, /* ibuf controller B */
+ 0x000C5800UL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x000C1400ULL, /* port a */
+ 0x000C3400ULL, /* port b */
+ 0x000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x000C0400UL, /* csi fe controller A */
+ 0x000C2400UL, /* csi fe controller B */
+ 0x000C4400UL /* csi fe controller C */
+};
+/* CSI BE, part of the Input System 2401 */
+static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x000C0800UL, /* csi be controller A */
+ 0x000C2800UL, /* csi be controller B */
+ 0x000C4800UL /* csi be controller C */
+};
+/* PIXEL Generator, part of the Input System 2401 */
+static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x000C1000UL, /* pixel gen controller A */
+ 0x000C3000UL, /* pixel gen controller B */
+ 0x000C5000UL /* pixel gen controller C */
+};
+/* Stream2MMIO, part of the Input System 2401 */
+static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x000C0C00UL, /* stream2mmio controller A */
+ 0x000C2C00UL, /* stream2mmio controller B */
+ 0x000C4C00UL /* stream2mmio controller C */
+};
+
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h
new file mode 100644
index 000000000000..1b3391c242a3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/PixelGen_SysBlock_defs.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _PixelGen_SysBlock_defs_h
+#define _PixelGen_SysBlock_defs_h
+
+#ifdef ISYS2401_PXG_A
+#else
+#ifdef ISYS2401_PXG_B
+#else
+#ifdef ISYS2401_PXG_C
+#else
+#include <mipi_backend/hrt/include/mipi_backend_defs.h>
+#endif
+#endif
+#endif
+
+/* Parematers and User_Parameters for HSS */
+#define _PXG_PPC Ppc
+#define _PXG_PIXEL_BITS PixelWidth
+#define _PXG_MAX_NOF_SID MaxNofSids
+#define _PXG_DATA_BITS DataWidth
+#define _PXG_CNT_BITS CntWidth
+#define _PXG_FIFODEPTH FifoDepth
+#define _PXG_DBG Dbg_device_not_included
+
+/* ID's and Address */
+#define _PXG_ADRRESS_ALIGN_REG 4
+
+#define _PXG_COM_ENABLE_REG_IDX 0
+#define _PXG_PRBS_RSTVAL_REG0_IDX 1
+#define _PXG_PRBS_RSTVAL_REG1_IDX 2
+#define _PXG_SYNG_SID_REG_IDX 3
+#define _PXG_SYNG_FREE_RUN_REG_IDX 4
+#define _PXG_SYNG_PAUSE_REG_IDX 5
+#define _PXG_SYNG_NOF_FRAME_REG_IDX 6
+#define _PXG_SYNG_NOF_PIXEL_REG_IDX 7
+#define _PXG_SYNG_NOF_LINE_REG_IDX 8
+#define _PXG_SYNG_HBLANK_CYC_REG_IDX 9
+#define _PXG_SYNG_VBLANK_CYC_REG_IDX 10
+#define _PXG_SYNG_STAT_HCNT_REG_IDX 11
+#define _PXG_SYNG_STAT_VCNT_REG_IDX 12
+#define _PXG_SYNG_STAT_FCNT_REG_IDX 13
+#define _PXG_SYNG_STAT_DONE_REG_IDX 14
+#define _PXG_TPG_MODE_REG_IDX 15
+#define _PXG_TPG_HCNT_MASK_REG_IDX 16
+#define _PXG_TPG_VCNT_MASK_REG_IDX 17
+#define _PXG_TPG_XYCNT_MASK_REG_IDX 18
+#define _PXG_TPG_HCNT_DELTA_REG_IDX 19
+#define _PXG_TPG_VCNT_DELTA_REG_IDX 20
+#define _PXG_TPG_R1_REG_IDX 21
+#define _PXG_TPG_G1_REG_IDX 22
+#define _PXG_TPG_B1_REG_IDX 23
+#define _PXG_TPG_R2_REG_IDX 24
+#define _PXG_TPG_G2_REG_IDX 25
+#define _PXG_TPG_B2_REG_IDX 26
+/* */
+#define _PXG_SYNG_PAUSE_CYCLES 0
+/* Subblock ID's */
+#define _PXG_DISBALE_IDX 0
+#define _PXG_PRBS_IDX 0
+#define _PXG_TPG_IDX 1
+#define _PXG_SYNG_IDX 2
+#define _PXG_SMUX_IDX 3
+/* Register Widths */
+#define _PXG_COM_ENABLE_REG_WIDTH 2
+#define _PXG_COM_SRST_REG_WIDTH 4
+#define _PXG_PRBS_RSTVAL_REG0_WIDTH 31
+#define _PXG_PRBS_RSTVAL_REG1_WIDTH 31
+
+#define _PXG_SYNG_SID_REG_WIDTH 3
+
+#define _PXG_SYNG_FREE_RUN_REG_WIDTH 1
+#define _PXG_SYNG_PAUSE_REG_WIDTH 1
+/*
+#define _PXG_SYNG_NOF_FRAME_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_PIXEL_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_NOF_LINE_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_HBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_VBLANK_CYC_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_HCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_VCNT_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_SYNG_STAT_FCNT_REG_WIDTH <sync_gen_cnt_width>
+*/
+#define _PXG_SYNG_STAT_DONE_REG_WIDTH 1
+#define _PXG_TPG_MODE_REG_WIDTH 2
+/*
+#define _PXG_TPG_HCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_VCNT_MASK_REG_WIDTH <sync_gen_cnt_width>
+#define _PXG_TPG_XYCNT_MASK_REG_WIDTH <pixle_width>
+*/
+#define _PXG_TPG_HCNT_DELTA_REG_WIDTH 4
+#define _PXG_TPG_VCNT_DELTA_REG_WIDTH 4
+/*
+#define _PXG_TPG_R1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B1_REG_WIDTH <pixle_width>
+#define _PXG_TPG_R2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_G2_REG_WIDTH <pixle_width>
+#define _PXG_TPG_B2_REG_WIDTH <pixle_width>
+*/
+#define _PXG_FIFO_DEPTH 2
+/* MISC */
+#define _PXG_ENABLE_REG_VAL 1
+#define _PXG_PRBS_ENABLE_REG_VAL 1
+#define _PXG_TPG_ENABLE_REG_VAL 2
+#define _PXG_SYNG_ENABLE_REG_VAL 4
+#define _PXG_FIFO_ENABLE_REG_VAL 8
+#define _PXG_PXL_BITS 14
+#define _PXG_INVALID_FLAG 0xDEADBEEF
+#define _PXG_CAFE_FLAG 0xCAFEBABE
+
+
+#endif /* _PixelGen_SysBlock_defs_h */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h
new file mode 100644
index 000000000000..e71e33d9d143
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/bits.h
@@ -0,0 +1,104 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_BITS_H
+#define _HRT_BITS_H
+
+#include "defs.h"
+
+#define _hrt_ones(n) HRTCAT(_hrt_ones_, n)
+#define _hrt_ones_0x0 0x00000000U
+#define _hrt_ones_0x1 0x00000001U
+#define _hrt_ones_0x2 0x00000003U
+#define _hrt_ones_0x3 0x00000007U
+#define _hrt_ones_0x4 0x0000000FU
+#define _hrt_ones_0x5 0x0000001FU
+#define _hrt_ones_0x6 0x0000003FU
+#define _hrt_ones_0x7 0x0000007FU
+#define _hrt_ones_0x8 0x000000FFU
+#define _hrt_ones_0x9 0x000001FFU
+#define _hrt_ones_0xA 0x000003FFU
+#define _hrt_ones_0xB 0x000007FFU
+#define _hrt_ones_0xC 0x00000FFFU
+#define _hrt_ones_0xD 0x00001FFFU
+#define _hrt_ones_0xE 0x00003FFFU
+#define _hrt_ones_0xF 0x00007FFFU
+#define _hrt_ones_0x10 0x0000FFFFU
+#define _hrt_ones_0x11 0x0001FFFFU
+#define _hrt_ones_0x12 0x0003FFFFU
+#define _hrt_ones_0x13 0x0007FFFFU
+#define _hrt_ones_0x14 0x000FFFFFU
+#define _hrt_ones_0x15 0x001FFFFFU
+#define _hrt_ones_0x16 0x003FFFFFU
+#define _hrt_ones_0x17 0x007FFFFFU
+#define _hrt_ones_0x18 0x00FFFFFFU
+#define _hrt_ones_0x19 0x01FFFFFFU
+#define _hrt_ones_0x1A 0x03FFFFFFU
+#define _hrt_ones_0x1B 0x07FFFFFFU
+#define _hrt_ones_0x1C 0x0FFFFFFFU
+#define _hrt_ones_0x1D 0x1FFFFFFFU
+#define _hrt_ones_0x1E 0x3FFFFFFFU
+#define _hrt_ones_0x1F 0x7FFFFFFFU
+#define _hrt_ones_0x20 0xFFFFFFFFU
+
+#define _hrt_ones_0 _hrt_ones_0x0
+#define _hrt_ones_1 _hrt_ones_0x1
+#define _hrt_ones_2 _hrt_ones_0x2
+#define _hrt_ones_3 _hrt_ones_0x3
+#define _hrt_ones_4 _hrt_ones_0x4
+#define _hrt_ones_5 _hrt_ones_0x5
+#define _hrt_ones_6 _hrt_ones_0x6
+#define _hrt_ones_7 _hrt_ones_0x7
+#define _hrt_ones_8 _hrt_ones_0x8
+#define _hrt_ones_9 _hrt_ones_0x9
+#define _hrt_ones_10 _hrt_ones_0xA
+#define _hrt_ones_11 _hrt_ones_0xB
+#define _hrt_ones_12 _hrt_ones_0xC
+#define _hrt_ones_13 _hrt_ones_0xD
+#define _hrt_ones_14 _hrt_ones_0xE
+#define _hrt_ones_15 _hrt_ones_0xF
+#define _hrt_ones_16 _hrt_ones_0x10
+#define _hrt_ones_17 _hrt_ones_0x11
+#define _hrt_ones_18 _hrt_ones_0x12
+#define _hrt_ones_19 _hrt_ones_0x13
+#define _hrt_ones_20 _hrt_ones_0x14
+#define _hrt_ones_21 _hrt_ones_0x15
+#define _hrt_ones_22 _hrt_ones_0x16
+#define _hrt_ones_23 _hrt_ones_0x17
+#define _hrt_ones_24 _hrt_ones_0x18
+#define _hrt_ones_25 _hrt_ones_0x19
+#define _hrt_ones_26 _hrt_ones_0x1A
+#define _hrt_ones_27 _hrt_ones_0x1B
+#define _hrt_ones_28 _hrt_ones_0x1C
+#define _hrt_ones_29 _hrt_ones_0x1D
+#define _hrt_ones_30 _hrt_ones_0x1E
+#define _hrt_ones_31 _hrt_ones_0x1F
+#define _hrt_ones_32 _hrt_ones_0x20
+
+#define _hrt_mask(b, n) \
+ (_hrt_ones(n) << (b))
+#define _hrt_get_bits(w, b, n) \
+ (((w) >> (b)) & _hrt_ones(n))
+#define _hrt_set_bits(w, b, n, v) \
+ (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b)))
+#define _hrt_get_bit(w, b) \
+ (((w) >> (b)) & 1)
+#define _hrt_set_bit(w, b, v) \
+ (((w) & (~(1 << (b)))) | (((v)&1) << (b)))
+#define _hrt_set_lower_half(w, v) \
+ _hrt_set_bits(w, 0, 16, v)
+#define _hrt_set_upper_half(w, v) \
+ _hrt_set_bits(w, 16, 16, v)
+
+#endif /* _HRT_BITS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h
new file mode 100644
index 000000000000..b5756bfe8eb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/cell_params.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _cell_params_h
+#define _cell_params_h
+
+#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/
+#define SP_ICACHE_TAG_BITS 4 /*size of tag*/
+#define SP_ICACHE_SET_BITS 8 /* 256 sets*/
+#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/
+#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/
+
+#define SP_ICACHE_ADDRESS_BITS \
+ (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS)
+
+#define SP_PMEM_DEPTH (1<<SP_ICACHE_ADDRESS_BITS)
+
+#define SP_FIFO_0_DEPTH 0
+#define SP_FIFO_1_DEPTH 0
+#define SP_FIFO_2_DEPTH 0
+#define SP_FIFO_3_DEPTH 0
+#define SP_FIFO_4_DEPTH 0
+#define SP_FIFO_5_DEPTH 0
+#define SP_FIFO_6_DEPTH 0
+#define SP_FIFO_7_DEPTH 0
+
+
+#define SP_SLV_BUS_MAXBURSTSIZE 1
+
+#endif /* _cell_params_h */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_common_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_common_defs.h
new file mode 100644
index 000000000000..f3054fe04d03
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_common_defs.h
@@ -0,0 +1,200 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reseved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define BE_CUST_EN_IDX 0 /* 2bits */
+#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */
+#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */
+#define BE_CUST_DATA_STATE_WIDTH 21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */
+#define BE_CUST_PIX_EXT_WIDTH 29
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h
new file mode 100644
index 000000000000..6f5b7d3d3715
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/css_receiver_2400_defs.h
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_defs_h_
+#define _css_receiver_2400_defs_h_
+
+#include "css_receiver_2400_common_defs.h"
+
+#define CSS_RECEIVER_DATA_WIDTH 8
+#define CSS_RECEIVER_RX_TRIG 4
+#define CSS_RECEIVER_RF_WORD 32
+#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10
+#define CSS_RECEIVER_CSI_RF_ADDR 4
+#define CSS_RECEIVER_DATA_OUT 12
+#define CSS_RECEIVER_CHN_NO 2
+#define CSS_RECEIVER_DWORD_CNT 11
+#define CSS_RECEIVER_FORMAT_TYP 5
+#define CSS_RECEIVER_HRESPONSE 2
+#define CSS_RECEIVER_STATE_WIDTH 3
+#define CSS_RECEIVER_FIFO_DAT 32
+#define CSS_RECEIVER_CNT_VAL 2
+#define CSS_RECEIVER_PRED10_VAL 10
+#define CSS_RECEIVER_PRED12_VAL 12
+#define CSS_RECEIVER_CNT_WIDTH 8
+#define CSS_RECEIVER_WORD_CNT 16
+#define CSS_RECEIVER_PIXEL_LEN 6
+#define CSS_RECEIVER_PIXEL_CNT 5
+#define CSS_RECEIVER_COMP_8_BIT 8
+#define CSS_RECEIVER_COMP_7_BIT 7
+#define CSS_RECEIVER_COMP_6_BIT 6
+
+#define CSI_CONFIG_WIDTH 4
+
+/* division of gen_short data, ch_id and fmt_type over streaming data interface */
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1)
+
+#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4
+#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4
+
+#define hrt_css_receiver_2400_4_lane_port_offset 0x100
+#define hrt_css_receiver_2400_1_lane_port_offset 0x200
+#define hrt_css_receiver_2400_2_lane_port_offset 0x300
+#define hrt_css_receiver_2400_backend_port_offset 0x100
+
+#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3
+#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4
+#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16
+#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25
+#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26
+#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27
+#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28
+
+/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16
+
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun"
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data"
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync"
+
+/* Bits for CSI2_DEVICE_READY register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7
+
+
+/* Bits for CSI2_FUNC_PROG register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19
+
+/* Bits for INIT_COUNT register */
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16
+
+/* Bits for COUNT registers */
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8
+
+/* Bits for RAW116_18_DATAID register */
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6
+
+/* Bits for COMP_FORMAT register, this selects the compression data format */
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS)
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8
+
+/* Bits for COMP_PREDICT register, this selects the predictor algorithm */
+#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1
+#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2
+
+/* Number of bits used for the delay registers */
+#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8
+
+/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2
+
+
+/* BITS for backend RAW16 and RAW 18 registers */
+
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1
+
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1
+
+/* These hsync and vsync values are for HSS simulation only */
+#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16)
+#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17)
+
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1)
+
+// SH Backend Register IDs
+#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */
+
+#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28
+
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8
+
+#endif /* _css_receiver_2400_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h
new file mode 100644
index 000000000000..47505f41790c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_DEFS_H_
+#define _HRT_DEFS_H_
+
+#ifndef HRTCAT
+#define _HRTCAT(m, n) m##n
+#define HRTCAT(m, n) _HRTCAT(m, n)
+#endif
+
+#ifndef HRTSTR
+#define _HRTSTR(x) #x
+#define HRTSTR(x) _HRTSTR(x)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#endif /* _HRT_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h
new file mode 100644
index 000000000000..d184a8b313c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/dma_v2_defs.h
@@ -0,0 +1,199 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _dma_v2_defs_h
+#define _dma_v2_defs_h
+
+#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels
+#define _DMA_V2_CONNECTIONS_ID Connections
+#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths
+#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth
+#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat
+#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass
+#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst
+#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept
+#define _DMA_V2_DEV_SRMD_ID DevSRMD
+#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters
+#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth
+#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth
+#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat
+#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass
+#define _DMA_V2_NO_PACK_ID has_no_pack
+
+#define _DMA_V2_REG_ALIGN 4
+#define _DMA_V2_REG_ADDR_BITS 2
+
+/* Command word */
+#define _DMA_V2_CMD_IDX 0
+#define _DMA_V2_CMD_BITS 6
+#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS)
+#define _DMA_V2_CHANNEL_BITS 5
+
+/* The command to set a parameter contains the PARAM field next */
+#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_PARAM_BITS 4
+
+/* Commands to read, write or init specific blocks contain these
+ three values */
+#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_SPEC_DEV_A_XB_BITS 8
+#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS)
+#define _DMA_V2_SPEC_DEV_B_XB_BITS 8
+#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS)
+#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS)
+
+/* */
+#define _DMA_V2_CMD_CTRL_IDX 4
+#define _DMA_V2_CMD_CTRL_BITS 4
+
+/* Packing setup word */
+#define _DMA_V2_CONNECTION_IDX 0
+#define _DMA_V2_CONNECTION_BITS 4
+#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS)
+#define _DMA_V2_EXTENSION_BITS 1
+
+/* Elements packing word */
+#define _DMA_V2_ELEMENTS_IDX 0
+#define _DMA_V2_ELEMENTS_BITS 8
+#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS)
+#define _DMA_V2_LEFT_CROPPING_BITS 8
+
+#define _DMA_V2_WIDTH_IDX 0
+#define _DMA_V2_WIDTH_BITS 16
+
+#define _DMA_V2_HEIGHT_IDX 0
+#define _DMA_V2_HEIGHT_BITS 16
+
+#define _DMA_V2_STRIDE_IDX 0
+#define _DMA_V2_STRIDE_BITS 32
+
+/* Command IDs */
+#define _DMA_V2_MOVE_B2A_COMMAND 0
+#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1
+#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2
+#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3
+#define _DMA_V2_MOVE_A2B_COMMAND 4
+#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5
+#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6
+#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7
+#define _DMA_V2_INIT_A_COMMAND 8
+#define _DMA_V2_INIT_A_BLOCK_COMMAND 9
+#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10
+#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11
+#define _DMA_V2_INIT_B_COMMAND 12
+#define _DMA_V2_INIT_B_BLOCK_COMMAND 13
+#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14
+#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15
+#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32
+#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33
+#define _DMA_V2_SET_CRUN_COMMAND 62
+
+/* Channel Parameter IDs */
+#define _DMA_V2_PACKING_SETUP_PARAM 0
+#define _DMA_V2_STRIDE_A_PARAM 1
+#define _DMA_V2_ELEM_CROPPING_A_PARAM 2
+#define _DMA_V2_WIDTH_A_PARAM 3
+#define _DMA_V2_STRIDE_B_PARAM 4
+#define _DMA_V2_ELEM_CROPPING_B_PARAM 5
+#define _DMA_V2_WIDTH_B_PARAM 6
+#define _DMA_V2_HEIGHT_PARAM 7
+#define _DMA_V2_QUEUED_CMDS 8
+
+/* Parameter Constants */
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+ /* SLAVE address map */
+#define _DMA_V2_SEL_FSM_CMD 0
+#define _DMA_V2_SEL_CH_REG 1
+#define _DMA_V2_SEL_CONN_GROUP 2
+#define _DMA_V2_SEL_DEV_INTERF 3
+
+#define _DMA_V2_ADDR_SEL_COMP_IDX 12
+#define _DMA_V2_ADDR_SEL_COMP_BITS 4
+#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2
+#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6
+#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define _DMA_V2_ADDR_SEL_PARAM_BITS 4
+
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4
+
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4
+
+#define _DMA_V2_FSM_GROUP_CMD_IDX 0
+#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1
+#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2
+#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7
+
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15
+
+#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3
+
+#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4
+
+#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4
+
+#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0
+#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1
+#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2
+#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3
+#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4
+#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5
+
+#endif /* _dma_v2_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h
new file mode 100644
index 000000000000..77722d205701
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gdc_v2_defs.h
@@ -0,0 +1,170 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_GDC_v2_defs_h_
+#define HRT_GDC_v2_defs_h_
+
+#define HRT_GDC_IS_V2
+
+#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */
+#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */
+
+#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */
+#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS)
+
+#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */
+#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */
+ /* The supported range is [-256, .., +256] */
+ /* in 14-bit signed notation, */
+ /* We need all ten bits (MSB must be zero). */
+ /* -s is inserted to solve this issue, and */
+ /* therefore "1" is equal to +256. */
+#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1)
+
+#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */
+ /* 2 bytes per coefficient */
+
+#define _HRT_GDC_REG_ALIGN 4
+
+ // 31 30 29 25 24 0
+ // |-----|---|--------|------------------------|
+ // | CMD | C | Reg_ID | Value |
+
+
+ // There are just two commands possible for the GDC block:
+ // 1 - Configure reg
+ // 0 - Data token
+
+ // C - Reserved bit
+ // Used in protocol to indicate whether it is C-run or other type of runs
+ // In case of C-run, this bit has a value of 1, for all the other runs, it is 0.
+
+ // Reg_ID - Address of the register to be configured
+
+ // Value - Value to store to the addressed register, maximum of 24 bits
+
+ // Configure reg command is not followed by any other token.
+ // The address of the register and the data to be filled in is contained in the same token
+
+ // When the first data token is received, it must be:
+ // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or,
+ // 2. P0'X (device configured in one of the tetragon modes)
+ // After the first data token is received, pre-defined number of tokens with the following meaning follow:
+ // 1. two tokens: SRC address ; DST address
+ // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address
+
+#define HRT_GDC_CONFIG_CMD 1
+#define HRT_GDC_DATA_CMD 0
+
+
+#define HRT_GDC_CMD_POS 31
+#define HRT_GDC_CMD_BITS 1
+#define HRT_GDC_CRUN_POS 30
+#define HRT_GDC_REG_ID_POS 25
+#define HRT_GDC_REG_ID_BITS 5
+#define HRT_GDC_DATA_POS 0
+#define HRT_GDC_DATA_BITS 25
+
+#define HRT_GDC_FRYIPXFRX_BITS 26
+#define HRT_GDC_P0X_BITS 23
+
+
+#define HRT_GDC_MAX_OXDIM (8192-64)
+#define HRT_GDC_MAX_OYDIM 4095
+#define HRT_GDC_MAX_IXDIM (8192-64)
+#define HRT_GDC_MAX_IYDIM 4095
+#define HRT_GDC_MAX_DS_FAC 16
+#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1)
+#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX
+
+
+/* GDC lookup tables entries are 10 bits values, but they're
+ stored 2 by 2 as 32 bit values, yielding 16 bits per entry.
+ A GDC lookup table contains 64 * 4 elements */
+
+#define HRT_GDC_PERF_1_1_pix 0
+#define HRT_GDC_PERF_2_1_pix 1
+#define HRT_GDC_PERF_1_2_pix 2
+#define HRT_GDC_PERF_2_2_pix 3
+
+#define HRT_GDC_NND_MODE 0
+#define HRT_GDC_BLI_MODE 1
+#define HRT_GDC_BCI_MODE 2
+#define HRT_GDC_LUT_MODE 3
+
+#define HRT_GDC_SCAN_STB 0
+#define HRT_GDC_SCAN_STR 1
+
+#define HRT_GDC_MODE_SCALING 0
+#define HRT_GDC_MODE_TETRAGON 1
+
+#define HRT_GDC_LUT_COEFF_OFFSET 16
+#define HRT_GDC_FRY_BIT_OFFSET 16
+// FRYIPXFRX is the only register where we store two values in one field,
+// to save one token in the scaling protocol.
+// Like this, we have three tokens in the scaling protocol,
+// Otherwise, we would have had four.
+// The register bit-map is:
+// 31 26 25 16 15 10 9 0
+// |------|----------|------|----------|
+// | XXXX | FRY | IPX | FRX |
+
+
+#define HRT_GDC_CE_FSM0_POS 0
+#define HRT_GDC_CE_FSM0_LEN 2
+#define HRT_GDC_CE_OPY_POS 2
+#define HRT_GDC_CE_OPY_LEN 14
+#define HRT_GDC_CE_OPX_POS 16
+#define HRT_GDC_CE_OPX_LEN 16
+// CHK_ENGINE register bit-map:
+// 31 16 15 2 1 0
+// |----------------|-----------|----|
+// | OPX | OPY |FSM0|
+// However, for the time being at least,
+// this implementation is meaningless in hss model,
+// So, we just return 0
+
+
+#define HRT_GDC_CHK_ENGINE_IDX 0
+#define HRT_GDC_WOIX_IDX 1
+#define HRT_GDC_WOIY_IDX 2
+#define HRT_GDC_BPP_IDX 3
+#define HRT_GDC_FRYIPXFRX_IDX 4
+#define HRT_GDC_OXDIM_IDX 5
+#define HRT_GDC_OYDIM_IDX 6
+#define HRT_GDC_SRC_ADDR_IDX 7
+#define HRT_GDC_SRC_END_ADDR_IDX 8
+#define HRT_GDC_SRC_WRAP_ADDR_IDX 9
+#define HRT_GDC_SRC_STRIDE_IDX 10
+#define HRT_GDC_DST_ADDR_IDX 11
+#define HRT_GDC_DST_STRIDE_IDX 12
+#define HRT_GDC_DX_IDX 13
+#define HRT_GDC_DY_IDX 14
+#define HRT_GDC_P0X_IDX 15
+#define HRT_GDC_P0Y_IDX 16
+#define HRT_GDC_P1X_IDX 17
+#define HRT_GDC_P1Y_IDX 18
+#define HRT_GDC_P2X_IDX 19
+#define HRT_GDC_P2Y_IDX 20
+#define HRT_GDC_P3X_IDX 21
+#define HRT_GDC_P3Y_IDX 22
+#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc
+#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT
+#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right)
+#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon
+
+#define HRT_GDC_LUT_IDX 32
+
+
+#endif /* HRT_GDC_v2_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_regs_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_regs_defs.h
new file mode 100644
index 000000000000..34e734f6648e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_regs_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_regs_defs_h
+#define _gp_regs_defs_h
+
+#define _HRT_GP_REGS_IS_FWD_REG_IDX 0
+
+#define _HRT_GP_REGS_REG_ALIGN 4
+
+#endif /* _gp_regs_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h
new file mode 100644
index 000000000000..3082e2f5e014
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gp_timer_defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_timer_defs_h
+#define _gp_timer_defs_h
+
+#define _HRT_GP_TIMER_REG_ALIGN 4
+
+#define HIVE_GP_TIMER_RESET_REG_IDX 0
+#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1
+#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer)
+#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer)
+#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq)
+
+#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0
+#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1
+#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define HIVE_GP_TIMER_COUNT_TYPES 4
+
+#endif /* _gp_timer_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h
new file mode 100644
index 000000000000..a807d4c99041
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/gpio_block_defs.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gpio_block_defs_h_
+#define _gpio_block_defs_h_
+
+#define _HRT_GPIO_BLOCK_REG_ALIGN 4
+
+/* R/W registers */
+#define _gpio_block_reg_do_e 0
+#define _gpio_block_reg_do_select 1
+#define _gpio_block_reg_do_0 2
+#define _gpio_block_reg_do_1 3
+#define _gpio_block_reg_do_pwm_cnt_0 4
+#define _gpio_block_reg_do_pwm_cnt_1 5
+#define _gpio_block_reg_do_pwm_cnt_2 6
+#define _gpio_block_reg_do_pwm_cnt_3 7
+#define _gpio_block_reg_do_pwm_main_cnt 8
+#define _gpio_block_reg_do_pwm_enable 9
+#define _gpio_block_reg_di_debounce_sel 10
+#define _gpio_block_reg_di_debounce_cnt_0 11
+#define _gpio_block_reg_di_debounce_cnt_1 12
+#define _gpio_block_reg_di_debounce_cnt_2 13
+#define _gpio_block_reg_di_debounce_cnt_3 14
+#define _gpio_block_reg_di_active_level 15
+
+
+/* read-only registers */
+#define _gpio_block_reg_di 16
+
+#endif /* _gpio_block_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h
new file mode 100644
index 000000000000..2f7cb2dff0e9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_2401_irq_types_hrt.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID ,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID ,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID ,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID ,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID ,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID ,
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID ,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h
new file mode 100644
index 000000000000..5a2ce9108ae4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_defs.h
@@ -0,0 +1,435 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_defs_h__
+#define _hive_isp_css_defs_h__
+
+#define _HIVE_ISP_CSS_2401_SYSTEM 1
+#define HIVE_ISP_CTRL_DATA_WIDTH 32
+#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32
+#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1
+#define HIVE_ISP_DDR_ADDRESS_WIDTH 36
+
+#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */
+#define HIVE_ISP_NUM_GPIO_PINS 12
+
+/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer
+ and in the DMA parameter list */
+#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}}
+#define HIVE_ISP_DDR_WORD_BITS 256
+#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8)
+#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024)
+#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024)
+#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8)
+#define HIVE_ISP_PAGE_SHIFT 12
+#define HIVE_ISP_PAGE_SIZE (1<<HIVE_ISP_PAGE_SHIFT)
+
+#define CSS_DDR_WORD_BITS HIVE_ISP_DDR_WORD_BITS
+#define CSS_DDR_WORD_BYTES HIVE_ISP_DDR_WORD_BYTES
+
+/* settings used in applications */
+#define HIVE_XMEM_WIDTH HIVE_ISP_DDR_WORD_BITS
+#define HIVE_VMEM_VECTOR_ELEMENTS 64
+#define HIVE_VMEM_ELEMENT_BITS 14
+#define HIVE_XMEM_ELEMENT_BITS 16
+#define HIVE_VMEM_VECTOR_BYTES (HIVE_VMEM_VECTOR_ELEMENTS*HIVE_XMEM_ELEMENT_BITS/8) /* used for # addr bytes for one vector */
+#define HIVE_XMEM_PACKED_WORD_VMEM_ELEMENTS (HIVE_XMEM_WIDTH/HIVE_VMEM_ELEMENT_BITS)
+#define HIVE_XMEM_WORD_VMEM_ELEMENTS (HIVE_XMEM_WIDTH/HIVE_XMEM_ELEMENT_BITS)
+#define XMEM_INT_SIZE 4
+
+
+
+#define HIVE_ISYS_INP_BUFFER_BYTES (64*1024) /* 64 kByte = 2k words (of 256 bits) */
+
+/* If HIVE_ISP_DDR_BASE_OFFSET is set to a non-zero value, the wide bus just before the DDRAM gets an extra dummy port where */
+/* address range 0 .. HIVE_ISP_DDR_BASE_OFFSET-1 maps onto. This effectively creates an offset for the DDRAM from system perspective */
+#define HIVE_ISP_DDR_BASE_OFFSET 0x120000000 /* 0x200000 */
+
+#define HIVE_DMA_ISP_BUS_CONN 0
+#define HIVE_DMA_ISP_DDR_CONN 1
+#define HIVE_DMA_BUS_DDR_CONN 2
+#define HIVE_DMA_ISP_MASTER master_port0
+#define HIVE_DMA_BUS_MASTER master_port1
+#define HIVE_DMA_DDR_MASTER master_port2
+
+#define HIVE_DMA_NUM_CHANNELS 32 /* old value was 8 */
+#define HIVE_DMA_CMD_FIFO_DEPTH 24 /* old value was 12 */
+
+#define HIVE_IF_PIXEL_WIDTH 12
+
+#define HIVE_MMU_TLB_SETS 8
+#define HIVE_MMU_TLB_SET_BLOCKS 8
+#define HIVE_MMU_TLB_BLOCK_ELEMENTS 8
+#define HIVE_MMU_PAGE_TABLE_LEVELS 2
+#define HIVE_MMU_PAGE_BYTES HIVE_ISP_PAGE_SIZE
+
+#define HIVE_ISP_CH_ID_BITS 2
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#define HIVE_ISP_ISEL_SEL_BITS 2
+
+#define HIVE_GP_REGS_SDRAM_WAKEUP_IDX 0
+#define HIVE_GP_REGS_IDLE_IDX 1
+#define HIVE_GP_REGS_IRQ_0_IDX 2
+#define HIVE_GP_REGS_IRQ_1_IDX 3
+#define HIVE_GP_REGS_SP_STREAM_STAT_IDX 4
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IDX 5
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IDX 6
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IDX 7
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_COND_IDX 8
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_COND_IDX 9
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_COND_IDX 10
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_COND_IDX 11
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_ENABLE_IDX 12
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_ENABLE_IDX 13
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_ENABLE_IDX 14
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_ENABLE_IDX 15
+#define HIVE_GP_REGS_SWITCH_PRIM_IF_IDX 16
+#define HIVE_GP_REGS_SWITCH_GDC1_IDX 17
+#define HIVE_GP_REGS_SWITCH_GDC2_IDX 18
+#define HIVE_GP_REGS_SRST_IDX 19
+#define HIVE_GP_REGS_SLV_REG_SRST_IDX 20
+#define HIVE_GP_REGS_SWITCH_ISYS_IDX 21
+
+/* Bit numbers of the soft reset register */
+#define HIVE_GP_REGS_SRST_ISYS_CBUS 0
+#define HIVE_GP_REGS_SRST_ISEL_CBUS 1
+#define HIVE_GP_REGS_SRST_IFMT_CBUS 2
+#define HIVE_GP_REGS_SRST_GPDEV_CBUS 3
+#define HIVE_GP_REGS_SRST_GPIO 4
+#define HIVE_GP_REGS_SRST_TC 5
+#define HIVE_GP_REGS_SRST_GPTIMER 6
+#define HIVE_GP_REGS_SRST_FACELLFIFOS 7
+#define HIVE_GP_REGS_SRST_D_OSYS 8
+#define HIVE_GP_REGS_SRST_IFT_SEC_PIPE 9
+#define HIVE_GP_REGS_SRST_GDC1 10
+#define HIVE_GP_REGS_SRST_GDC2 11
+#define HIVE_GP_REGS_SRST_VEC_BUS 12
+#define HIVE_GP_REGS_SRST_ISP 13
+#define HIVE_GP_REGS_SRST_SLV_GRP_BUS 14
+#define HIVE_GP_REGS_SRST_DMA 15
+#define HIVE_GP_REGS_SRST_SF_ISP_SP 16
+#define HIVE_GP_REGS_SRST_SF_PIF_CELLS 17
+#define HIVE_GP_REGS_SRST_SF_SIF_SP 18
+#define HIVE_GP_REGS_SRST_SF_MC_SP 19
+#define HIVE_GP_REGS_SRST_SF_ISYS_SP 20
+#define HIVE_GP_REGS_SRST_SF_DMA_CELLS 21
+#define HIVE_GP_REGS_SRST_SF_GDC1_CELLS 22
+#define HIVE_GP_REGS_SRST_SF_GDC2_CELLS 23
+#define HIVE_GP_REGS_SRST_SP 24
+#define HIVE_GP_REGS_SRST_OCP2CIO 25
+#define HIVE_GP_REGS_SRST_NBUS 26
+#define HIVE_GP_REGS_SRST_HOST12BUS 27
+#define HIVE_GP_REGS_SRST_WBUS 28
+#define HIVE_GP_REGS_SRST_IC_OSYS 29
+#define HIVE_GP_REGS_SRST_WBUS_IC 30
+#define HIVE_GP_REGS_SRST_ISYS_INP_BUF_BUS 31
+
+/* Bit numbers of the slave register soft reset register */
+#define HIVE_GP_REGS_SLV_REG_SRST_DMA 0
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC1 1
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC2 2
+
+/* order of the input bits for the irq controller */
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_IRQ_SP_BIT_ID 12
+#define HIVE_GP_DEV_IRQ_ISP_BIT_ID 13
+#define HIVE_GP_DEV_IRQ_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_IRQ_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_IRQ_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID 17
+#define HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID 18
+#define HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID 19
+#define HIVE_GP_DEV_IRQ_IS2401_BIT_ID 20
+#define HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID 21
+#define HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID 22
+#define HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID 23
+#define HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID 24
+#define HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID 25
+#define HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID 26
+#define HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID 27
+#define HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID 28
+#define HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID 29
+#define HIVE_GP_DEV_IRQ_DMA_BIT_ID 30
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID 31
+
+#define HIVE_GP_REGS_NUM_SW_IRQ_REGS 2
+
+/* order of the input bits for the timed controller */
+#define HIVE_GP_DEV_TC_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_TC_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_TC_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_TC_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_TC_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_TC_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_TC_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_TC_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_TC_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_TC_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_TC_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_TC_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_TC_SP_BIT_ID 12
+#define HIVE_GP_DEV_TC_ISP_BIT_ID 13
+#define HIVE_GP_DEV_TC_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_TC_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_TC_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_TC_GP_TIMER_0_BIT_ID 17
+#define HIVE_GP_DEV_TC_GP_TIMER_1_BIT_ID 18
+#define HIVE_GP_DEV_TC_MIPI_SOL_BIT_ID 19
+#define HIVE_GP_DEV_TC_MIPI_EOL_BIT_ID 20
+#define HIVE_GP_DEV_TC_MIPI_SOF_BIT_ID 21
+#define HIVE_GP_DEV_TC_MIPI_EOF_BIT_ID 22
+#define HIVE_GP_DEV_TC_INPSYS_SM 23
+
+/* definitions for the gp_timer block */
+#define HIVE_GP_TIMER_0 0
+#define HIVE_GP_TIMER_1 1
+#define HIVE_GP_TIMER_2 2
+#define HIVE_GP_TIMER_3 3
+#define HIVE_GP_TIMER_4 4
+#define HIVE_GP_TIMER_5 5
+#define HIVE_GP_TIMER_6 6
+#define HIVE_GP_TIMER_7 7
+#define HIVE_GP_TIMER_NUM_COUNTERS 8
+
+#define HIVE_GP_TIMER_IRQ_0 0
+#define HIVE_GP_TIMER_IRQ_1 1
+#define HIVE_GP_TIMER_NUM_IRQS 2
+
+#define HIVE_GP_TIMER_GPIO_0_BIT_ID 0
+#define HIVE_GP_TIMER_GPIO_1_BIT_ID 1
+#define HIVE_GP_TIMER_GPIO_2_BIT_ID 2
+#define HIVE_GP_TIMER_GPIO_3_BIT_ID 3
+#define HIVE_GP_TIMER_GPIO_4_BIT_ID 4
+#define HIVE_GP_TIMER_GPIO_5_BIT_ID 5
+#define HIVE_GP_TIMER_GPIO_6_BIT_ID 6
+#define HIVE_GP_TIMER_GPIO_7_BIT_ID 7
+#define HIVE_GP_TIMER_GPIO_8_BIT_ID 8
+#define HIVE_GP_TIMER_GPIO_9_BIT_ID 9
+#define HIVE_GP_TIMER_GPIO_10_BIT_ID 10
+#define HIVE_GP_TIMER_GPIO_11_BIT_ID 11
+#define HIVE_GP_TIMER_INP_SYS_IRQ 12
+#define HIVE_GP_TIMER_ISEL_IRQ 13
+#define HIVE_GP_TIMER_IFMT_IRQ 14
+#define HIVE_GP_TIMER_SP_STRMON_IRQ 15
+#define HIVE_GP_TIMER_SP_B_STRMON_IRQ 16
+#define HIVE_GP_TIMER_ISP_STRMON_IRQ 17
+#define HIVE_GP_TIMER_MOD_STRMON_IRQ 18
+#define HIVE_GP_TIMER_IS2401_IRQ 19
+#define HIVE_GP_TIMER_ISP_BAMEM_ERROR_IRQ 20
+#define HIVE_GP_TIMER_ISP_DMEM_ERROR_IRQ 21
+#define HIVE_GP_TIMER_SP_ICACHE_MEM_ERROR_IRQ 22
+#define HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ 23
+#define HIVE_GP_TIMER_SP_OUT_RUN_DP 24
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 25
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 26
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I2 27
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I3 28
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I4 29
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I5 30
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I6 31
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I7 32
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I8 33
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I9 34
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I10 35
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 36
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 37
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 38
+#define HIVE_GP_TIMER_ISP_OUT_RUN_DP 39
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 40
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 41
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 42
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 43
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I1 44
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I2 45
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I3 46
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I4 47
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I5 48
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I6 49
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 50
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I4_I0 51
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I5_I0 52
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I6_I0 53
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I7_I0 54
+#define HIVE_GP_TIMER_MIPI_SOL_BIT_ID 55
+#define HIVE_GP_TIMER_MIPI_EOL_BIT_ID 56
+#define HIVE_GP_TIMER_MIPI_SOF_BIT_ID 57
+#define HIVE_GP_TIMER_MIPI_EOF_BIT_ID 58
+#define HIVE_GP_TIMER_INPSYS_SM 59
+#define HIVE_GP_TIMER_ISP_PMEM_ERROR_IRQ 60
+
+/* port definitions for the streaming monitors */
+/* port definititions SP streaming monitor, monitors the status of streaming ports at the SP side of the streaming FIFO's */
+#define SP_STR_MON_PORT_SP2SIF 0
+#define SP_STR_MON_PORT_SIF2SP 1
+#define SP_STR_MON_PORT_SP2MC 2
+#define SP_STR_MON_PORT_MC2SP 3
+#define SP_STR_MON_PORT_SP2DMA 4
+#define SP_STR_MON_PORT_DMA2SP 5
+#define SP_STR_MON_PORT_SP2ISP 6
+#define SP_STR_MON_PORT_ISP2SP 7
+#define SP_STR_MON_PORT_SP2GPD 8
+#define SP_STR_MON_PORT_FA2SP 9
+#define SP_STR_MON_PORT_SP2ISYS 10
+#define SP_STR_MON_PORT_ISYS2SP 11
+#define SP_STR_MON_PORT_SP2PIFA 12
+#define SP_STR_MON_PORT_PIFA2SP 13
+#define SP_STR_MON_PORT_SP2PIFB 14
+#define SP_STR_MON_PORT_PIFB2SP 15
+
+#define SP_STR_MON_PORT_B_SP2GDC1 0
+#define SP_STR_MON_PORT_B_GDC12SP 1
+#define SP_STR_MON_PORT_B_SP2GDC2 2
+#define SP_STR_MON_PORT_B_GDC22SP 3
+
+/* previously used SP streaming monitor port identifiers, kept for backward compatibility */
+#define SP_STR_MON_PORT_SND_SIF SP_STR_MON_PORT_SP2SIF
+#define SP_STR_MON_PORT_RCV_SIF SP_STR_MON_PORT_SIF2SP
+#define SP_STR_MON_PORT_SND_MC SP_STR_MON_PORT_SP2MC
+#define SP_STR_MON_PORT_RCV_MC SP_STR_MON_PORT_MC2SP
+#define SP_STR_MON_PORT_SND_DMA SP_STR_MON_PORT_SP2DMA
+#define SP_STR_MON_PORT_RCV_DMA SP_STR_MON_PORT_DMA2SP
+#define SP_STR_MON_PORT_SND_ISP SP_STR_MON_PORT_SP2ISP
+#define SP_STR_MON_PORT_RCV_ISP SP_STR_MON_PORT_ISP2SP
+#define SP_STR_MON_PORT_SND_GPD SP_STR_MON_PORT_SP2GPD
+#define SP_STR_MON_PORT_RCV_GPD SP_STR_MON_PORT_FA2SP
+/* Deprecated */
+#define SP_STR_MON_PORT_SND_PIF SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIFB SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIFB SP_STR_MON_PORT_PIFB2SP
+
+#define SP_STR_MON_PORT_SND_PIF_A SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF_A SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIF_B SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIF_B SP_STR_MON_PORT_PIFB2SP
+
+/* port definititions ISP streaming monitor, monitors the status of streaming ports at the ISP side of the streaming FIFO's */
+#define ISP_STR_MON_PORT_ISP2PIFA 0
+#define ISP_STR_MON_PORT_PIFA2ISP 1
+#define ISP_STR_MON_PORT_ISP2PIFB 2
+#define ISP_STR_MON_PORT_PIFB2ISP 3
+#define ISP_STR_MON_PORT_ISP2DMA 4
+#define ISP_STR_MON_PORT_DMA2ISP 5
+#define ISP_STR_MON_PORT_ISP2GDC1 6
+#define ISP_STR_MON_PORT_GDC12ISP 7
+#define ISP_STR_MON_PORT_ISP2GDC2 8
+#define ISP_STR_MON_PORT_GDC22ISP 9
+#define ISP_STR_MON_PORT_ISP2GPD 10
+#define ISP_STR_MON_PORT_FA2ISP 11
+#define ISP_STR_MON_PORT_ISP2SP 12
+#define ISP_STR_MON_PORT_SP2ISP 13
+
+/* previously used ISP streaming monitor port identifiers, kept for backward compatibility */
+#define ISP_STR_MON_PORT_SND_PIF_A ISP_STR_MON_PORT_ISP2PIFA
+#define ISP_STR_MON_PORT_RCV_PIF_A ISP_STR_MON_PORT_PIFA2ISP
+#define ISP_STR_MON_PORT_SND_PIF_B ISP_STR_MON_PORT_ISP2PIFB
+#define ISP_STR_MON_PORT_RCV_PIF_B ISP_STR_MON_PORT_PIFB2ISP
+#define ISP_STR_MON_PORT_SND_DMA ISP_STR_MON_PORT_ISP2DMA
+#define ISP_STR_MON_PORT_RCV_DMA ISP_STR_MON_PORT_DMA2ISP
+#define ISP_STR_MON_PORT_SND_GDC ISP_STR_MON_PORT_ISP2GDC1
+#define ISP_STR_MON_PORT_RCV_GDC ISP_STR_MON_PORT_GDC12ISP
+#define ISP_STR_MON_PORT_SND_GPD ISP_STR_MON_PORT_ISP2GPD
+#define ISP_STR_MON_PORT_RCV_GPD ISP_STR_MON_PORT_FA2ISP
+#define ISP_STR_MON_PORT_SND_SP ISP_STR_MON_PORT_ISP2SP
+#define ISP_STR_MON_PORT_RCV_SP ISP_STR_MON_PORT_SP2ISP
+
+/* port definititions MOD streaming monitor, monitors the status of streaming ports at the module side of the streaming FIFO's */
+
+#define MOD_STR_MON_PORT_PIFA2CELLS 0
+#define MOD_STR_MON_PORT_CELLS2PIFA 1
+#define MOD_STR_MON_PORT_PIFB2CELLS 2
+#define MOD_STR_MON_PORT_CELLS2PIFB 3
+#define MOD_STR_MON_PORT_SIF2SP 4
+#define MOD_STR_MON_PORT_SP2SIF 5
+#define MOD_STR_MON_PORT_MC2SP 6
+#define MOD_STR_MON_PORT_SP2MC 7
+#define MOD_STR_MON_PORT_DMA2ISP 8
+#define MOD_STR_MON_PORT_ISP2DMA 9
+#define MOD_STR_MON_PORT_DMA2SP 10
+#define MOD_STR_MON_PORT_SP2DMA 11
+#define MOD_STR_MON_PORT_GDC12CELLS 12
+#define MOD_STR_MON_PORT_CELLS2GDC1 13
+#define MOD_STR_MON_PORT_GDC22CELLS 14
+#define MOD_STR_MON_PORT_CELLS2GDC2 15
+
+#define MOD_STR_MON_PORT_SND_PIF_A 0
+#define MOD_STR_MON_PORT_RCV_PIF_A 1
+#define MOD_STR_MON_PORT_SND_PIF_B 2
+#define MOD_STR_MON_PORT_RCV_PIF_B 3
+#define MOD_STR_MON_PORT_SND_SIF 4
+#define MOD_STR_MON_PORT_RCV_SIF 5
+#define MOD_STR_MON_PORT_SND_MC 6
+#define MOD_STR_MON_PORT_RCV_MC 7
+#define MOD_STR_MON_PORT_SND_DMA2ISP 8
+#define MOD_STR_MON_PORT_RCV_DMA_FR_ISP 9
+#define MOD_STR_MON_PORT_SND_DMA2SP 10
+#define MOD_STR_MON_PORT_RCV_DMA_FR_SP 11
+#define MOD_STR_MON_PORT_SND_GDC 12
+#define MOD_STR_MON_PORT_RCV_GDC 13
+
+
+/* testbench signals: */
+
+/* testbench GP adapter register ids */
+#define HIVE_TESTBENCH_GPIO_DATA_OUT_REG_IDX 0
+#define HIVE_TESTBENCH_GPIO_DIR_OUT_REG_IDX 1
+#define HIVE_TESTBENCH_IRQ_REG_IDX 2
+#define HIVE_TESTBENCH_SDRAM_WAKEUP_REG_IDX 3
+#define HIVE_TESTBENCH_IDLE_REG_IDX 4
+#define HIVE_TESTBENCH_GPIO_DATA_IN_REG_IDX 5
+#define HIVE_TESTBENCH_MIPI_BFM_EN_REG_IDX 6
+#define HIVE_TESTBENCH_CSI_CONFIG_REG_IDX 7
+#define HIVE_TESTBENCH_DDR_STALL_EN_REG_IDX 8
+
+#define HIVE_TESTBENCH_ISP_PMEM_ERROR_IRQ_REG_IDX 9
+#define HIVE_TESTBENCH_ISP_BAMEM_ERROR_IRQ_REG_IDX 10
+#define HIVE_TESTBENCH_ISP_DMEM_ERROR_IRQ_REG_IDX 11
+#define HIVE_TESTBENCH_SP_ICACHE_MEM_ERROR_IRQ_REG_IDX 12
+#define HIVE_TESTBENCH_SP_DMEM_ERROR_IRQ_REG_IDX 13
+
+#define HIVE_TESTBENCH_MIPI_PARPATHEN_REG_IDX 14
+#define HIVE_TESTBENCH_FB_HPLL_FREQ_REG_IDX 15
+#define HIVE_TESTBENCH_ISCLK_RATIO_REG_IDX 16
+
+/* Signal monitor input bit ids */
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_O_BIT_ID 0
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TESTBENCH_SIG_MON_IRQ_PIN_BIT_ID 12
+#define HIVE_TESTBENCH_SIG_MON_SDRAM_WAKEUP_PIN_BIT_ID 13
+#define HIVE_TESTBENCH_SIG_MON_IDLE_PIN_BIT_ID 14
+
+#define ISP2400_DEBUG_NETWORK 1
+
+#endif /* _hive_isp_css_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_host_ids_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_host_ids_hrt.h
new file mode 100644
index 000000000000..8d4c9d67c0e2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_host_ids_hrt.h
@@ -0,0 +1,119 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_host_ids_hrt_h_
+#define _hive_isp_css_host_ids_hrt_h_
+
+/* ISP_CSS identifiers */
+#define INP_SYS testbench_isp_isp_css_part_is_2400_inp_sys
+#define ISYS_GP_REGS testbench_isp_isp_css_part_is_2400_inp_sys_gpreg
+#define ISYS_IRQ_CTRL testbench_isp_isp_css_part_is_2400_inp_sys_irq_ctrl
+#define ISYS_CAP_A testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_a
+#define ISYS_CAP_B testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_b
+#define ISYS_CAP_C testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_c
+#define ISYS_INP_BUF testbench_isp_isp_css_part_input_buffer
+#define ISYS_INP_CTRL testbench_isp_isp_css_part_is_2400_inp_sys_inp_ctrl
+#define ISYS_ACQ testbench_isp_isp_css_part_is_2400_inp_sys_acq_unit
+
+#define ISP testbench_isp_isp_css_sec_part_isp
+#define SP testbench_isp_isp_css_sec_part_scp
+
+#define IF_PRIM testbench_isp_isp_css_part_is_2400_ifmt_ift_prim
+#define IF_PRIM_B testbench_isp_isp_css_part_is_2400_ifmt_ift_prim_b
+#define IF_SEC testbench_isp_isp_css_part_is_2400_ifmt_ift_sec
+#define IF_SEC_MASTER testbench_isp_isp_css_part_is_2400_ifmt_ift_sec_mt_out
+#define STR_TO_MEM testbench_isp_isp_css_part_is_2400_ifmt_mem_cpy
+#define IFMT_GP_REGS testbench_isp_isp_css_part_is_2400_ifmt_gp_reg
+#define IFMT_IRQ_CTRL testbench_isp_isp_css_part_is_2400_ifmt_irq_ctrl
+
+#define CSS_RECEIVER testbench_isp_isp_css_part_is_2400_inp_sys_csi_receiver
+
+#define TC testbench_isp_isp_css_sec_part_gpd_tc
+#define GPTIMER testbench_isp_isp_css_sec_part_gpd_gptimer
+#define DMA testbench_isp_isp_css_sec_part_isp_dma
+#define GDC testbench_isp_isp_css_sec_part_gdc1
+#define GDC2 testbench_isp_isp_css_sec_part_gdc2
+#define IRQ_CTRL testbench_isp_isp_css_sec_part_gpd_irq_ctrl
+#define GPIO testbench_isp_isp_css_sec_part_gpd_c_gpio
+#define GP_REGS testbench_isp_isp_css_sec_part_gpd_gp_reg
+#define ISEL_GP_REGS testbench_isp_isp_css_part_is_2400_isel_gpr
+#define ISEL_IRQ_CTRL testbench_isp_isp_css_part_is_2400_isel_irq_ctrl
+#define DATA_MMU testbench_isp_isp_css_sec_part_data_out_sys_c_mmu
+#define ICACHE_MMU testbench_isp_isp_css_sec_part_icache_out_sys_c_mmu
+
+/* next is actually not FIFO but FIFO adapter, or slave to streaming adapter */
+#define ISP_SP_FIFO testbench_isp_isp_css_sec_part_fa_sp_isp
+#define ISEL_FIFO testbench_isp_isp_css_part_is_2400_isel_sf_fa_in
+
+#define FIFO_GPF_SP testbench_isp_isp_css_sec_part_sf_fa2sp_in
+#define FIFO_GPF_ISP testbench_isp_isp_css_sec_part_sf_fa2isp_in
+#define FIFO_SP_GPF testbench_isp_isp_css_sec_part_sf_sp2fa_in
+#define FIFO_ISP_GPF testbench_isp_isp_css_sec_part_sf_isp2fa_in
+
+#define DATA_OCP_MASTER testbench_isp_isp_css_sec_part_data_out_sys_cio2ocp_wide_data_out_mt
+#define ICACHE_OCP_MASTER testbench_isp_isp_css_sec_part_icache_out_sys_cio2ocp_wide_data_out_mt
+
+#define SP_IN_FIFO testbench_isp_isp_css_sec_part_sf_fa2sp_in
+#define SP_OUT_FIFO testbench_isp_isp_css_sec_part_sf_sp2fa_out
+#define ISP_IN_FIFO testbench_isp_isp_css_sec_part_sf_fa2isp_in
+#define ISP_OUT_FIFO testbench_isp_isp_css_sec_part_sf_isp2fa_out
+#define GEN_SHORT_PACK_PORT testbench_isp_isp_css_part_is_2400_inp_sys_csi_str_mon_fa_gensh_out
+
+/* input_system_2401 identifiers */
+#define ISYS2401_GP_REGS testbench_isp_isp_css_part_is_2401_gpreg
+#define ISYS2401_DMA testbench_isp_isp_css_part_is_2401_dma
+#define ISYS2401_IRQ_CTRL testbench_isp_isp_css_part_is_2401_isys_irq_ctrl
+
+#define ISYS2401_CSI_RX_A testbench_isp_isp_css_part_is_2401_is_pipe_a_csi_rx
+#define ISYS2401_MIPI_BE_A testbench_isp_isp_css_part_is_2401_is_pipe_a_mipi_be
+#define ISYS2401_S2M_A testbench_isp_isp_css_part_is_2401_is_pipe_a_s2m
+#define ISYS2401_PXG_A testbench_isp_isp_css_part_is_2401_is_pipe_a_pxlgen
+#define ISYS2401_IBUF_CNTRL_A testbench_isp_isp_css_part_is_2401_is_pipe_a_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_A testbench_isp_isp_css_part_is_2401_is_pipe_a_irq_ctrl_pipe
+
+#define ISYS2401_CSI_RX_B testbench_isp_isp_css_part_is_2401_is_pipe_b_csi_rx
+#define ISYS2401_MIPI_BE_B testbench_isp_isp_css_part_is_2401_is_pipe_b_mipi_be
+#define ISYS2401_S2M_B testbench_isp_isp_css_part_is_2401_is_pipe_b_s2m
+#define ISYS2401_PXG_B testbench_isp_isp_css_part_is_2401_is_pipe_b_pxlgen
+#define ISYS2401_IBUF_CNTRL_B testbench_isp_isp_css_part_is_2401_is_pipe_b_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_B testbench_isp_isp_css_part_is_2401_is_pipe_b_irq_ctrl_pipe
+
+#define ISYS2401_CSI_RX_C testbench_isp_isp_css_part_is_2401_is_pipe_c_csi_rx
+#define ISYS2401_MIPI_BE_C testbench_isp_isp_css_part_is_2401_is_pipe_c_mipi_be
+#define ISYS2401_S2M_C testbench_isp_isp_css_part_is_2401_is_pipe_c_s2m
+#define ISYS2401_PXG_C testbench_isp_isp_css_part_is_2401_is_pipe_c_pxlgen
+#define ISYS2401_IBUF_CNTRL_C testbench_isp_isp_css_part_is_2401_is_pipe_c_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_C testbench_isp_isp_css_part_is_2401_is_pipe_c_irq_ctrl_pipe
+
+
+/* Testbench identifiers */
+#define DDR testbench_ddram
+#define DDR_SMALL testbench_ddram_small
+#define XMEM DDR
+#define GPIO_ADAPTER testbench_gp_adapter
+#define SIG_MONITOR testbench_sig_mon
+#define DDR_SLAVE testbench_ddram_ip0
+#define DDR_SMALL_SLAVE testbench_ddram_small_ip0
+#define HOST_MASTER host_op0
+
+#define CSI_SENSOR testbench_vied_sensor
+#define CSI_SENSOR_GP_REGS testbench_vied_sensor_gpreg
+#define CSI_STR_IN_A testbench_vied_sensor_tx_a_csi_tx_data_in
+#define CSI_STR_IN_B testbench_vied_sensor_tx_b_csi_tx_data_in
+#define CSI_STR_IN_C testbench_vied_sensor_tx_c_csi_tx_data_in
+#define CSI_SENSOR_TX_A testbench_vied_sensor_tx_a
+#define CSI_SENSOR_TX_B testbench_vied_sensor_tx_b
+#define CSI_SENSOR_TX_C testbench_vied_sensor_tx_c
+
+#endif /* _hive_isp_css_host_ids_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
new file mode 100644
index 000000000000..b4211a0c631a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_
+#define _hive_isp_css_streaming_to_mipi_types_hrt_h_
+
+#include <streaming_to_mipi_defs.h>
+
+#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1)
+#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1)
+
+#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS)
+#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH)
+
+#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h
new file mode 100644
index 000000000000..58b0e6effbd0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/hive_types.h
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_HIVE_TYPES_H
+#define _HRT_HIVE_TYPES_H
+
+#include "version.h"
+#include "defs.h"
+
+#ifndef HRTCAT3
+#define _HRTCAT3(m,n,o) m##n##o
+#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o)
+#endif
+
+#ifndef HRTCAT4
+#define _HRTCAT4(m,n,o,p) m##n##o##p
+#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+/* boolean data type */
+typedef unsigned int hive_bool;
+#define hive_false 0
+#define hive_true 1
+
+typedef char hive_int8;
+typedef short hive_int16;
+typedef int hive_int32;
+typedef long long hive_int64;
+
+typedef unsigned char hive_uint8;
+typedef unsigned short hive_uint16;
+typedef unsigned int hive_uint32;
+typedef unsigned long long hive_uint64;
+
+/* by default assume 32 bit master port (both data and address) */
+#ifndef HRT_DATA_WIDTH
+#define HRT_DATA_WIDTH 32
+#endif
+#ifndef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 32
+#endif
+
+#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8)
+#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8)
+
+#if HRT_DATA_WIDTH == 64
+typedef hive_uint64 hrt_data;
+#elif HRT_DATA_WIDTH == 32
+typedef hive_uint32 hrt_data;
+#else
+#error data width not supported
+#endif
+
+#if HRT_ADDRESS_WIDTH == 64
+typedef hive_uint64 hrt_address;
+#elif HRT_ADDRESS_WIDTH == 32
+typedef hive_uint32 hrt_address;
+#else
+#error adddres width not supported
+#endif
+
+/* The SP side representation of an HMM virtual address */
+typedef hive_uint32 hrt_vaddress;
+
+/* use 64 bit addresses in simulation, where possible */
+typedef hive_uint64 hive_sim_address;
+
+/* below is for csim, not for hrt, rename and move this elsewhere */
+
+typedef unsigned int hive_uint;
+typedef hive_uint32 hive_address;
+typedef hive_address hive_slave_address;
+typedef hive_address hive_mem_address;
+
+/* MMIO devices */
+typedef hive_uint hive_mmio_id;
+typedef hive_mmio_id hive_slave_id;
+typedef hive_mmio_id hive_port_id;
+typedef hive_mmio_id hive_master_id;
+typedef hive_mmio_id hive_mem_id;
+typedef hive_mmio_id hive_dev_id;
+typedef hive_mmio_id hive_fifo_id;
+
+typedef hive_uint hive_hier_id;
+typedef hive_hier_id hive_device_id;
+typedef hive_device_id hive_proc_id;
+typedef hive_device_id hive_cell_id;
+typedef hive_device_id hive_host_id;
+typedef hive_device_id hive_bus_id;
+typedef hive_device_id hive_bridge_id;
+typedef hive_device_id hive_fifo_adapter_id;
+typedef hive_device_id hive_custom_device_id;
+
+typedef hive_uint hive_slot_id;
+typedef hive_uint hive_fu_id;
+typedef hive_uint hive_reg_file_id;
+typedef hive_uint hive_reg_id;
+
+/* Streaming devices */
+typedef hive_uint hive_outport_id;
+typedef hive_uint hive_inport_id;
+
+typedef hive_uint hive_msink_id;
+
+/* HRT specific */
+typedef char* hive_program;
+typedef char* hive_function;
+
+#endif /* _HRT_HIVE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h
new file mode 100644
index 000000000000..f82bb79785cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/ibuf_cntrl_defs.h
@@ -0,0 +1,138 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ibuf_cntrl_defs_h_
+#define _ibuf_cntrl_defs_h_
+
+#include <stream2mmio_defs.h>
+#include <dma_v2_defs.h>
+
+#define _IBUF_CNTRL_REG_ALIGN 4
+ /* alignment of register banks, first bank are shared configuration and status registers: */
+#define _IBUF_CNTRL_PROC_REG_ALIGN 32
+
+ /* the actual amount of configuration registers per proc: */
+#define _IBUF_CNTRL_CONFIG_REGS_PER_PROC 18
+ /* the actual amount of shared configuration registers: */
+#define _IBUF_CNTRL_CONFIG_REGS_NO_PROC 0
+
+ /* the actual amount of status registers per proc */
+#define _IBUF_CNTRL_STATUS_REGS_PER_PROC (_IBUF_CNTRL_CONFIG_REGS_PER_PROC + 10)
+ /* the actual amount shared status registers */
+#define _IBUF_CNTRL_STATUS_REGS_NO_PROC (_IBUF_CNTRL_CONFIG_REGS_NO_PROC + 2)
+
+ /* time out bits, maximum time out value is 2^_IBUF_CNTRL_TIME_OUT_BITS - 1 */
+#define _IBUF_CNTRL_TIME_OUT_BITS 5
+
+/* command token definition */
+#define _IBUF_CNTRL_CMD_TOKEN_LSB 0
+#define _IBUF_CNTRL_CMD_TOKEN_MSB 1
+
+/* Str2MMIO defines */
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_MSB _STREAM2MMIO_CMD_TOKEN_CMD_MSB
+#define _IBUF_CNTRL_STREAM2MMIO_CMD_TOKEN_LSB _STREAM2MMIO_CMD_TOKEN_CMD_LSB
+#define _IBUF_CNTRL_STREAM2MMIO_NUM_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _IBUF_CNTRL_STREAM2MMIO_ACK_TOKEN_VALID_BIT _STREAM2MMIO_ACK_TOKEN_VALID_BIT
+
+/* acknowledge token definition */
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_IDX 0
+#define _IBUF_CNTRL_ACK_TOKEN_STORES_BITS 15
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX (_IBUF_CNTRL_ACK_TOKEN_STORES_BITS + _IBUF_CNTRL_ACK_TOKEN_STORES_IDX)
+#define _IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _IBUF_CNTRL_ACK_TOKEN_LSB _IBUF_CNTRL_ACK_TOKEN_STORES_IDX
+#define _IBUF_CNTRL_ACK_TOKEN_MSB (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX - 1)
+ /* bit 31 indicates a valid ack: */
+#define _IBUF_CNTRL_ACK_TOKEN_VALID_BIT (_IBUF_CNTRL_ACK_TOKEN_ITEMS_BITS + _IBUF_CNTRL_ACK_TOKEN_ITEMS_IDX)
+
+
+/*shared registers:*/
+#define _IBUF_CNTRL_RECALC_WORDS_STATUS 0
+#define _IBUF_CNTRL_ARBITERS_STATUS 1
+
+#define _IBUF_CNTRL_SET_CRUN 2 /* NO PHYSICAL REGISTER!! Only used in HSS model */
+
+
+/*register addresses for each proc: */
+#define _IBUF_CNTRL_CMD 0
+#define _IBUF_CNTRL_ACK 1
+
+ /* number of items (packets or words) per frame: */
+#define _IBUF_CNTRL_NUM_ITEMS_PER_STORE 2
+
+ /* number of stores (packets or words) per store/buffer: */
+#define _IBUF_CNTRL_NUM_STORES_PER_FRAME 3
+
+ /* the channel and command in the DMA */
+#define _IBUF_CNTRL_DMA_CHANNEL 4
+#define _IBUF_CNTRL_DMA_CMD 5
+
+ /* the start address and stride of the buffers */
+#define _IBUF_CNTRL_BUFFER_START_ADDRESS 6
+#define _IBUF_CNTRL_BUFFER_STRIDE 7
+#define _IBUF_CNTRL_BUFFER_END_ADDRESS 8
+
+ /* destination start address, stride and end address; should be the same as in the DMA */
+#define _IBUF_CNTRL_DEST_START_ADDRESS 9
+#define _IBUF_CNTRL_DEST_STRIDE 10
+#define _IBUF_CNTRL_DEST_END_ADDRESS 11
+
+ /* send a frame sync or not, default 1 */
+#define _IBUF_CNTRL_SYNC_FRAME 12
+
+ /* str2mmio cmds */
+#define _IBUF_CNTRL_STR2MMIO_SYNC_CMD 13
+#define _IBUF_CNTRL_STR2MMIO_STORE_CMD 14
+
+ /* num elems p word*/
+#define _IBUF_CNTRL_SHIFT_ITEMS 15
+#define _IBUF_CNTRL_ELEMS_P_WORD_IBUF 16
+#define _IBUF_CNTRL_ELEMS_P_WORD_DEST 17
+
+
+ /* STATUS */
+ /* current frame and stores in buffer */
+#define _IBUF_CNTRL_CUR_STORES 18
+#define _IBUF_CNTRL_CUR_ACKS 19
+
+ /* current buffer and destination address for DMA cmd's */
+#define _IBUF_CNTRL_CUR_S2M_IBUF_ADDR 20
+#define _IBUF_CNTRL_CUR_DMA_IBUF_ADDR 21
+#define _IBUF_CNTRL_CUR_DMA_DEST_ADDR 22
+#define _IBUF_CNTRL_CUR_ISP_DEST_ADDR 23
+
+#define _IBUF_CNTRL_CUR_NR_DMA_CMDS_SEND 24
+
+#define _IBUF_CNTRL_MAIN_CNTRL_STATE 25
+#define _IBUF_CNTRL_DMA_SYNC_STATE 26
+#define _IBUF_CNTRL_ISP_SYNC_STATE 27
+
+
+/*Commands: */
+#define _IBUF_CNTRL_CMD_STORE_FRAME_IDX 0
+#define _IBUF_CNTRL_CMD_ONLINE_IDX 1
+
+ /* initialize, copy st_addr to cur_addr etc */
+#define _IBUF_CNTRL_CMD_INITIALIZE 0
+
+ /* store an online frame (sync with ISP, use end cfg start, stride and end address: */
+#define _IBUF_CNTRL_CMD_STORE_ONLINE_FRAME ((1<<_IBUF_CNTRL_CMD_STORE_FRAME_IDX) | (1<<_IBUF_CNTRL_CMD_ONLINE_IDX))
+
+ /* store an offline frame (don't sync with ISP, requires start address as 2nd token, no end address: */
+#define _IBUF_CNTRL_CMD_STORE_OFFLINE_FRAME (1<<_IBUF_CNTRL_CMD_STORE_FRAME_IDX)
+
+ /* false command token, should be different then commands. Use online bit, not store frame: */
+#define _IBUF_CNTRL_FALSE_ACK 2
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h
new file mode 100644
index 000000000000..7d39e45796ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/if_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IF_DEFS_H
+#define _IF_DEFS_H
+
+#define HIVE_IF_FRAME_REQUEST 0xA000
+#define HIVE_IF_LINES_REQUEST 0xB000
+#define HIVE_IF_VECTORS_REQUEST 0xC000
+
+#endif /* _IF_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
new file mode 100644
index 000000000000..16bfe1d80bc9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _if_subsystem_defs_h
+#define _if_subsystem_defs_h__
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8
+#define HIVE_IFMT_GP_REGS_SRST_IDX 9
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10
+
+#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0
+
+/* order of the input bits for the ifmt irq controller */
+#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0
+#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1
+#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2
+#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3
+#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3
+
+#endif /* _if_subsystem_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h
new file mode 100644
index 000000000000..87fbf82edb5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_selector_defs.h
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_selector_defs_h
+#define _input_selector_defs_h
+
+#ifndef HIVE_ISP_ISEL_SEL_BITS
+#define HIVE_ISP_ISEL_SEL_BITS 2
+#endif
+
+#ifndef HIVE_ISP_CH_ID_BITS
+#define HIVE_ISP_CH_ID_BITS 2
+#endif
+
+#ifndef HIVE_ISP_FMT_TYPE_BITS
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#endif
+
+/* gp_register register id's -- Outputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1
+#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7
+
+#define HIVE_ISEL_GP_REGS_SOF_IDX 8
+#define HIVE_ISEL_GP_REGS_EOF_IDX 9
+#define HIVE_ISEL_GP_REGS_SOL_IDX 10
+#define HIVE_ISEL_GP_REGS_EOL_IDX 11
+
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13
+#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14
+
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18
+#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21
+#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22
+#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23
+#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24
+#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25
+#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26
+#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27
+#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28
+
+
+#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29
+#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30
+#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31
+#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32
+#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33
+#define HIVE_ISEL_GP_REGS_SRST_IDX 37
+
+#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0
+#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1
+#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2
+#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3
+
+/* gp_register register id's -- Inputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36
+
+/* irq sources isel irq controller */
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3
+#define HIVE_ISEL_IRQ_NUM_IRQS 4
+
+#endif /* _input_selector_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h
new file mode 100644
index 000000000000..20a13c4cdb56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_switch_2400_defs.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_switch_2400_defs_h
+#define _input_switch_2400_defs_h
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16))
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2)
+
+#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3
+#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4
+
+#endif /* _input_switch_2400_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h
new file mode 100644
index 000000000000..a7f0ca80bc9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_ctrl_defs.h
@@ -0,0 +1,254 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_ctrl_defs_h
+#define _input_system_ctrl_defs_h
+
+#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */
+
+/* --------------------------------------------------*/
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define ISYS_CTRL_NOF_REGS 23
+
+// Register id's of MMIO slave accesible registers
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8
+#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11
+#define ISYS_CTRL_INIT_REG_ID 12
+#define ISYS_CTRL_LAST_COMMAND_REG_ID 13
+#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16
+#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22
+
+
+/* register reset value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3
+#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ISYS_CTRL_INIT_REG_RSTVAL 0
+#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0
+
+/* register width value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ISYS_CTRL_INIT_REG_WIDTH 3
+#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */
+#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1
+
+/* bit definitions */
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+
+/*
+InpSysCaptFramesAcq 1/0 [3:0] - 'b0000
+[7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB-'b0001
+ CaptC-'b0010
+[31:16] - NOF_frames
+InpSysCaptFrameExt 2/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+ 2/1 [31:0] - external capture address
+InpSysAcqFrame 2/0 [3:0] - 'b0010,
+[31:4] - NOF_ext_mem_words
+ 2/1 [31:0] - external memory read start address
+InpSysOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleCmd 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - command token value for port opid
+
+
+acknowledge tokens:
+
+InpSysAckCFA 1/0 [3:0] - 'b0000
+ [7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB- 'b0001
+ CaptC-'b0010
+ [31:16] - NOF_frames
+InpSysAckCFE 1/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+InpSysAckAF 1/0 [3:0] - 'b0010
+InpSysAckOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverrule 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - acknowledge token value from port opid
+
+
+
+*/
+
+
+/* Command and acknowledge tokens IDs */
+#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */
+#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */
+#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */
+#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */
+#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */
+#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */
+
+#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0
+#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1
+#define ISYS_CTRL_ACK_AF_TOKEN_ID 2
+#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3
+#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4
+#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5
+#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6
+
+#define ISYS_CTRL_TOKEN_ID_MSB 3
+#define ISYS_CTRL_TOKEN_ID_LSB 0
+#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7
+#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4
+#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16
+#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8
+
+#define ISYS_CTRL_TOKEN_ID_IDX 0
+#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1)
+#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS)
+#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1)
+#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB
+#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB
+#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1)
+
+#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */
+#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */
+#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */
+#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */
+
+#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */
+#define ISYS_CTRL_NO_DMA_ACK 0
+#define ISYS_CTRL_NO_CAPT_ACK 16
+
+#endif /* _input_system_ctrl_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h
new file mode 100644
index 000000000000..ae62163034a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_system_defs.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_defs_h
+#define _input_system_defs_h
+
+/* csi controller modes */
+#define HIVE_CSI_CONFIG_MAIN 0
+#define HIVE_CSI_CONFIG_STEREO1 4
+#define HIVE_CSI_CONFIG_STEREO2 8
+
+/* general purpose register IDs */
+
+/* Stream Multicast select modes */
+#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0
+#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1
+#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2
+
+/* Stream Mux select modes */
+#define HIVE_ISYS_GPREG_MUX_IDX 3
+
+/* streaming monitor status and control */
+#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4
+#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5
+#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6
+#define HIVE_ISYS_GPREG_SRST_IDX 7
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8
+#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9
+#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10
+
+/* Bit numbers of the soft reset register */
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5
+#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6
+#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7
+#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8
+#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9
+/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14
+/* -- */
+#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15
+#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16
+#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17
+#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv
+#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23
+#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24
+
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5
+
+/* streaming monitor port id's */
+#define HIVE_ISYS_STR_MON_PORT_CAPA 0
+#define HIVE_ISYS_STR_MON_PORT_CAPB 1
+#define HIVE_ISYS_STR_MON_PORT_CAPC 2
+#define HIVE_ISYS_STR_MON_PORT_ACQ 3
+#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4
+#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5
+#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6
+#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7
+#define HIVE_ISYS_STR_MON_PORT_PIXA 8
+#define HIVE_ISYS_STR_MON_PORT_PIXB 9
+
+/* interrupt bit ID's */
+#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0
+#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1
+#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2
+#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/
+#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12
+/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15
+#define HIVE_ISYS_IRQ_CIO2AHB 16
+#define HIVE_ISYS_IRQ_DMA_BIT_ID 17
+#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18
+#define HIVE_ISYS_IRQ_NUM_BITS 19
+
+/* DMA */
+#define HIVE_ISYS_DMA_CHANNEL 0
+#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS_DMA_HEIGHT 1
+#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */
+#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */
+#define HIVE_ISYS_DMA_CROP 0 /* no cropping */
+#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */
+
+#endif /* _input_system_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h
new file mode 100644
index 000000000000..ec6dd4487158
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/irq_controller_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _irq_controller_defs_h
+#define _irq_controller_defs_h
+
+#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0
+#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1
+#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2
+#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3
+#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4
+#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5
+#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6
+
+#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4
+
+#endif /* _irq_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h
new file mode 100644
index 000000000000..e00bc841d0f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2400_support.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp2400_support_h
+#define _isp2400_support_h
+
+#ifndef ISP2400_VECTOR_TYPES
+/* This typedef is to be able to include hive header files
+ in the host code which is useful in crun */
+typedef char *tmemvectors, *tmemvectoru, *tvector;
+#endif
+
+#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val)
+#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val)
+
+#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem)
+#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem)
+
+#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell))
+#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell))
+
+#if ISP_HAS_HIST
+ #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram)
+ #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell))
+#endif
+
+#endif /* _isp2400_support_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h
new file mode 100644
index 000000000000..033e23bcf672
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp2401_mamoiada_params.h
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* Cell name */
+#define ISP_CELL_TYPE isp2401_mamoiada
+#define ISP_VMEM simd_vmem
+#define _HRT_ISP_VMEM isp2401_mamoiada_simd_vmem
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_VMEM_IS_BAMEM 1
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8
+ #define ISP_VMEM_BAMEM_LATENCY 5
+ #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2
+ #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8
+ #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16
+ #define ISP_VMEM_BAMEM_LININT 0
+ #define ISP_VMEM_BAMEM_DAP_BITS 3
+ #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0
+ #define ISP_VMEM_BAMEM_PID_BITS 3
+ #define ISP_VMEM_BAMEM_OFFSET_BITS 19
+ #define ISP_VMEM_BAMEM_ADDRESS_BITS 25
+ #define ISP_VMEM_BAMEM_RID_BITS 4
+ #define ISP_VMEM_BAMEM_TRANSPOSITION 1
+ #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1
+ #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1
+ #define ISP_VMEM_BAMEM_LUT_ELEMS 16
+ #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14
+ #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1
+ #define ISP_VMEM_BAMEM_SMART_FETCH 1
+ #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_ALIGN_ELEM 2
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b))
+
+#ifdef C_RUN
+#define ISP_VEC_ALIGN (_isp_ceil_div(ISP_VEC_WIDTH, 64)*8)
+#else
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+#endif
+
+/* HRT specific vector support */
+#define isp2401_mamoiada_vector_alignment ISP_VEC_ALIGN
+#define isp2401_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS
+#define isp2401_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION
+#define isp2401_mamoiada_vector_num_elems ISP_VEC_NELEMS
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+#define ISP_VRF1_SIZE 32
+#define ISP_VRF2_SIZE 32
+#define ISP_VRF3_SIZE 32
+#define ISP_VRF4_SIZE 32
+#define ISP_VRF5_SIZE 32
+#define ISP_VRF6_SIZE 32
+#define ISP_VRF7_SIZE 32
+#define ISP_VRF8_SIZE 32
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h
new file mode 100644
index 000000000000..593620721627
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_acquisition_defs.h
@@ -0,0 +1,234 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_acquisition_defs_h
+#define _isp_acquisition_defs_h
+
+#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_ACQUISITION_BYTES_PER_ELEM 4
+
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_IRQS 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define MEM2STREAM_FSM_STATE_BITS 2
+#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_REGS 12
+
+// Register id's of MMIO slave accesible registers
+#define ACQ_START_ADDR_REG_ID 0
+#define ACQ_MEM_REGION_SIZE_REG_ID 1
+#define ACQ_NUM_MEM_REGIONS_REG_ID 2
+#define ACQ_INIT_REG_ID 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4
+#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5
+#define ACQ_LAST_COMMAND_REG_ID 6
+#define ACQ_NEXT_COMMAND_REG_ID 7
+#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8
+#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9
+#define ACQ_FSM_STATE_INFO_REG_ID 10
+#define ACQ_INT_CNTR_INFO_REG_ID 11
+
+// Register width
+#define ACQ_START_ADDR_REG_WIDTH 9
+#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ACQ_INIT_REG_WIDTH 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define ACQ_LAST_COMMAND_REG_WIDTH 32
+#define ACQ_NEXT_COMMAND_REG_WIDTH 32
+#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3))
+#define ACQ_INT_CNTR_INFO_REG_WIDTH 32
+
+/* register reset value */
+#define ACQ_START_ADDR_REG_RSTVAL 0
+#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ACQ_INIT_REG_RSTVAL 0
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define ACQ_LAST_COMMAND_REG_RSTVAL 0
+#define ACQ_NEXT_COMMAND_REG_RSTVAL 0
+#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define ACQ_INIT_RST_REG_BIT 0
+#define ACQ_INIT_RESYNC_BIT 2
+#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT
+#define ACQ_INIT_RST_BITS 1
+#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT
+#define ACQ_INIT_RESYNC_BITS 1
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define ACQ_TOKEN_ID_LSB 0
+#define ACQ_TOKEN_ID_MSB 3
+#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4
+#define ACQ_TOKEN_ID_IDX 0
+#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH
+#define ACQ_INIT_CMD_INIT_IDX 4
+#define ACQ_INIT_CMD_INIT_BITS 3
+#define ACQ_CMD_START_ADDR_IDX 4
+#define ACQ_CMD_START_ADDR_BITS 9
+#define ACQ_CMD_NOFWORDS_IDX 13
+#define ACQ_CMD_NOFWORDS_BITS 9
+#define ACQ_MEM_REGION_ID_IDX 22
+#define ACQ_MEM_REGION_ID_BITS 9
+#define ACQ_PACKET_LENGTH_TOKEN_MSB 21
+#define ACQ_PACKET_LENGTH_TOKEN_LSB 13
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4
+#define ACQ_PACKET_CH_ID_TOKEN_MSB 11
+#define ACQ_PACKET_CH_ID_TOKEN_LSB 10
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */
+
+
+/* Command tokens IDs */
+#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b
+#define ACQ_READ_REGION_TOKEN_ID 1 //0001b
+#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b
+#define ACQ_INIT_TOKEN_ID 8 //1000b
+
+/* Acknowledge token IDs */
+#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b
+#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b
+#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b
+#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b
+#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b
+
+#define ACQ_TOKEN_MEMREGIONID_MSB 30
+#define ACQ_TOKEN_MEMREGIONID_LSB 22
+#define ACQ_TOKEN_NOFWORDS_MSB 21
+#define ACQ_TOKEN_NOFWORDS_LSB 13
+#define ACQ_TOKEN_STARTADDR_MSB 12
+#define ACQ_TOKEN_STARTADDR_LSB 4
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define WORD_COUNT_WIDTH 16
+#define PKT_CODE_WIDTH 6
+#define CHN_NO_WIDTH 2
+#define ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+#define EOF_CODE 1
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define ACQ_START_OF_FRAME 0
+#define ACQ_END_OF_FRAME 1
+#define ACQ_START_OF_LINE 2
+#define ACQ_END_OF_LINE 3
+#define ACQ_LINE_PAYLOAD 4
+#define ACQ_GEN_SH_PKT 5
+
+
+/* bit definition */
+#define ACQ_PKT_TYPE_IDX 16
+#define ACQ_PKT_TYPE_BITS 6
+#define ACQ_PKT_SOP_IDX 32
+#define ACQ_WORD_CNT_IDX 0
+#define ACQ_WORD_CNT_BITS 16
+#define ACQ_PKT_INFO_IDX 16
+#define ACQ_PKT_INFO_BITS 8
+#define ACQ_HEADER_DATA_IDX 0
+#define ACQ_HEADER_DATA_BITS 16
+#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX
+#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS
+#define ACQ_ACK_NOFWORDS_IDX 13
+#define ACQ_ACK_NOFWORDS_BITS 9
+#define ACQ_ACK_PKT_LEN_IDX 4
+#define ACQ_ACK_PKT_LEN_BITS 16
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+
+#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define ACQ_SOF_DATA 0 /* 00 0000 frame start */
+#define ACQ_EOF_DATA 1 /* 00 0001 frame end */
+#define ACQ_SOL_DATA 2 /* 00 0010 line start */
+#define ACQ_EOL_DATA 3 /* 00 0011 line end */
+#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_RESERVED_DATA_TYPE_MIN 56
+#define ACQ_RESERVED_DATA_TYPE_MAX 63
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define ACQ_YUV_RESERVED_DATA_TYPE 27
+#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37
+#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39
+#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46
+#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_acquisition_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h
new file mode 100644
index 000000000000..aa413df022f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/isp_capture_defs.h
@@ -0,0 +1,310 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_capture_defs_h
+#define _isp_capture_defs_h
+
+#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */
+#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 )
+#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */
+#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM
+
+//#define CAPT_RCV_ACK 1
+//#define CAPT_WRT_ACK 2
+//#define CAPT_IRQ_ACK 3
+
+/* --------------------------------------------------*/
+
+#define NOF_IRQS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define CAPT_NOF_REGS 16
+
+// Register id's of MMIO slave accesible registers
+#define CAPT_START_MODE_REG_ID 0
+#define CAPT_START_ADDR_REG_ID 1
+#define CAPT_MEM_REGION_SIZE_REG_ID 2
+#define CAPT_NUM_MEM_REGIONS_REG_ID 3
+#define CAPT_INIT_REG_ID 4
+#define CAPT_START_REG_ID 5
+#define CAPT_STOP_REG_ID 6
+
+#define CAPT_PACKET_LENGTH_REG_ID 7
+#define CAPT_RECEIVED_LENGTH_REG_ID 8
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9
+#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10
+#define CAPT_LAST_COMMAND_REG_ID 11
+#define CAPT_NEXT_COMMAND_REG_ID 12
+#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13
+#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14
+#define CAPT_FSM_STATE_INFO_REG_ID 15
+
+// Register width
+#define CAPT_START_MODE_REG_WIDTH 1
+//#define CAPT_START_ADDR_REG_WIDTH 9
+//#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9
+//#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9
+#define CAPT_INIT_REG_WIDTH (22 + 4)
+
+#define CAPT_START_REG_WIDTH 1
+#define CAPT_STOP_REG_WIDTH 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define CAPT_WRITE2MEM_FSM_STATE_BITS 2
+#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3
+
+
+#define CAPT_PACKET_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define CAPT_LAST_COMMAND_REG_WIDTH 32
+/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */
+#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3))
+
+//#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9
+//#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9
+
+/* register reset value */
+#define CAPT_START_MODE_REG_RSTVAL 0
+#define CAPT_START_ADDR_REG_RSTVAL 0
+#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128
+#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define CAPT_INIT_REG_RSTVAL 0
+
+#define CAPT_START_REG_RSTVAL 0
+#define CAPT_STOP_REG_RSTVAL 0
+
+#define CAPT_PACKET_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define CAPT_LAST_COMMAND_REG_RSTVAL 0
+#define CAPT_NEXT_COMMAND_REG_RSTVAL 0
+#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define CAPT_INIT_RST_REG_BIT 0
+#define CAPT_INIT_FLUSH_BIT 1
+#define CAPT_INIT_RESYNC_BIT 2
+#define CAPT_INIT_RESTART_BIT 3
+#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4
+#define CAPT_INIT_RESTART_MEM_ADDR_MSB 14
+#define CAPT_INIT_RESTART_MEM_REGION_LSB 15
+#define CAPT_INIT_RESTART_MEM_REGION_MSB 25
+
+
+#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT
+#define CAPT_INIT_RST_REG_BITS 1
+#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT
+#define CAPT_INIT_FLUSH_BITS 1
+#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT
+#define CAPT_INIT_RESYNC_BITS 1
+#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT
+#define CAPT_INIT_RESTART_BITS 1
+#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB
+#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1)
+#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB
+#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1)
+
+
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define CAPT_TOKEN_ID_LSB 0
+#define CAPT_TOKEN_ID_MSB 3
+#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */
+
+/* Command tokens IDs */
+#define CAPT_START_TOKEN_ID 0 /* 0000b */
+#define CAPT_STOP_TOKEN_ID 1 /* 0001b */
+#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */
+#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */
+#define CAPT_INIT_TOKEN_ID 8 /* 1000b */
+
+#define CAPT_START_TOKEN_BIT 0
+#define CAPT_STOP_TOKEN_BIT 0
+#define CAPT_FREEZE_TOKEN_BIT 0
+#define CAPT_RESUME_TOKEN_BIT 0
+#define CAPT_INIT_TOKEN_BIT 0
+
+/* Acknowledge token IDs */
+#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */
+#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */
+#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */
+#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */
+#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */
+#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */
+#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */
+#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */
+
+#define CAPT_PACKET_LENGTH_TOKEN_MSB 19
+#define CAPT_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20
+#define CAPT_PACKET_CH_ID_TOKEN_MSB 27
+#define CAPT_PACKET_CH_ID_TOKEN_LSB 26
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21
+
+/* bit definition */
+#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB
+#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1)
+#define CAPT_SOP_IDX 32
+#define CAPT_SOP_BITS 1
+#define CAPT_PKT_INFO_IDX 16
+#define CAPT_PKT_INFO_BITS 8
+#define CAPT_PKT_TYPE_IDX 0
+#define CAPT_PKT_TYPE_BITS 6
+#define CAPT_HEADER_DATA_IDX 0
+#define CAPT_HEADER_DATA_BITS 16
+#define CAPT_PKT_DATA_IDX 0
+#define CAPT_PKT_DATA_BITS 32
+#define CAPT_WORD_CNT_IDX 0
+#define CAPT_WORD_CNT_BITS 16
+#define CAPT_ACK_TOKEN_ID_IDX 0
+#define CAPT_ACK_TOKEN_ID_BITS 4
+//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+//#define CAPT_ACK_PKT_INFO_IDX 20
+//#define CAPT_ACK_PKT_INFO_BITS 8
+//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */
+//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */
+#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB
+#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_INIT_TOKEN_INIT_IDX 4
+#define CAPT_INIT_TOKEN_INIT_BITS 22
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define CAPT_WORD_COUNT_WIDTH 16
+#define CAPT_PKT_CODE_WIDTH 6
+#define CAPT_CHN_NO_WIDTH 2
+#define CAPT_ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define CAPT_START_OF_FRAME 0
+#define CAPT_END_OF_FRAME 1
+#define CAPT_START_OF_LINE 2
+#define CAPT_END_OF_LINE 3
+#define CAPT_LINE_PAYLOAD 4
+#define CAPT_GEN_SH_PKT 5
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define CAPT_SOF_DATA 0 /* 00 0000 frame start */
+#define CAPT_EOF_DATA 1 /* 00 0001 frame end */
+#define CAPT_SOL_DATA 2 /* 00 0010 line start */
+#define CAPT_EOL_DATA 3 /* 00 0011 line end */
+#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_RESERVED_DATA_TYPE_MIN 56
+#define CAPT_RESERVED_DATA_TYPE_MAX 63
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define CAPT_YUV_RESERVED_DATA_TYPE 27
+#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37
+#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39
+#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46
+#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47
+
+
+/* --------------------------------------------------*/
+/* Capture Unit State */
+/* --------------------------------------------------*/
+#define CAPT_FREE_RUN 0
+#define CAPT_NO_SYNC 1
+#define CAPT_SYNC_SWP 2
+#define CAPT_SYNC_MWP 3
+#define CAPT_SYNC_WAIT 4
+#define CAPT_FREEZE 5
+#define CAPT_RUN 6
+
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_capture_defs_h */
+
+
+
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h
new file mode 100644
index 000000000000..76705d7a2b44
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_common_defs.h
@@ -0,0 +1,210 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reseved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+//_HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 63
+#define _HRT_MIPI_BACKEND_FMT_TYPE_CUSTOM 63
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+/*
+#define BE_CUST_EN_IDX 0 // 2bits
+#define BE_CUST_EN_DATAID_IDX 2 // 6bits MIPI DATA ID
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 // Enable Custom Decoding for all DATA IDs
+#define BE_CUST_MODE_ONE 3 // Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID
+
+// Data State config = {get_bits(6bits), valid(1bit)} //
+#define BE_CUST_DATA_STATE_S0_IDX 0 // 7bits
+#define BE_CUST_DATA_STATE_S1_IDX 8 //7 // 7bits
+#define BE_CUST_DATA_STATE_S2_IDX 16//14 // 7bits /
+#define BE_CUST_DATA_STATE_WIDTH 24//21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 // 1bits
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 // 6bits
+
+
+
+
+// Pixel Extractor config
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 // 6bits
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 6//5 // 5bits
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 11//10 // 18bits
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 29 //28 // 1bits
+
+#define BE_CUST_PIX_EXT_WIDTH 30//29
+
+// Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)}
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 // 4bits
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 // 4bits
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 // Normal (NO less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 // Normal (NO less get_bits case) EoP - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 // Especial (less get_bits case) Valid - 1bits
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 // Especial (less get_bits case) EoP - 1bits
+
+*/
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h
new file mode 100644
index 000000000000..db5a1d2caba0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mipi_backend_defs.h
@@ -0,0 +1,215 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mipi_backend_defs_h
+#define _mipi_backend_defs_h
+
+#include "mipi_backend_common_defs.h"
+
+#define MIPI_BACKEND_REG_ALIGN 4 // assuming 32 bit control bus width
+
+#define _HRT_MIPI_BACKEND_NOF_IRQS 3 // sid_lut
+
+// SH Backend Register IDs
+#define _HRT_MIPI_BACKEND_ENABLE_REG_IDX 0
+#define _HRT_MIPI_BACKEND_STATUS_REG_IDX 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG0_IDX 2
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG1_IDX 3
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG2_IDX 4
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG3_IDX 5
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_IDX 6
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_IDX 7
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_IDX 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_IDX 9
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_IDX 10
+////
+#define _HRT_MIPI_BACKEND_CUST_EN_REG_IDX 11
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_REG_IDX 12
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P0_REG_IDX 13
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P1_REG_IDX 14
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P2_REG_IDX 15
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S0P3_REG_IDX 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P0_REG_IDX 17
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P1_REG_IDX 18
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P2_REG_IDX 19
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S1P3_REG_IDX 20
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P0_REG_IDX 21
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P1_REG_IDX 22
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P2_REG_IDX 23
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_S2P3_REG_IDX 24
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_REG_IDX 25
+////
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_IDX 26
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_IDX 27
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_IDX 28
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_IDX 29
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_IDX 30
+#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_IDX 31
+
+#define _HRT_MIPI_BACKEND_NOF_REGISTERS 32 // excluding the LP LUT entries
+
+#define _HRT_MIPI_BACKEND_LP_LUT_ENTRY_0_REG_IDX 32
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#define _HRT_MIPI_BACKEND_ENABLE_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_STATUS_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_HIGH_PREC_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_COMP_FORMAT_REG_WIDTH 32
+#define _HRT_MIPI_BACKEND_RAW16_CONFIG_REG_WIDTH 7
+#define _HRT_MIPI_BACKEND_RAW18_CONFIG_REG_WIDTH 9
+#define _HRT_MIPI_BACKEND_FORCE_RAW8_REG_WIDTH 8
+#define _HRT_MIPI_BACKEND_IRQ_STATUS_REG_WIDTH _HRT_MIPI_BACKEND_NOF_IRQS
+#define _HRT_MIPI_BACKEND_IRQ_CLEAR_REG_WIDTH 0
+#define _HRT_MIPI_BACKEND_GLOBAL_LUT_DISREGARD_REG_WIDTH 1
+#define _HRT_MIPI_BACKEND_PKT_STALL_STATUS_REG_WIDTH 1+2+6
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENABLE_REG_WIDTH 1
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_0_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_1_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_2_REG_WIDTH 7
+//#define _HRT_MIPI_BACKEND_SP_LUT_ENTRY_3_REG_WIDTH 7
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define _HRT_MIPI_BACKEND_NOF_SP_LUT_ENTRIES 4
+
+//#define _HRT_MIPI_BACKEND_MAX_NOF_LP_LUT_ENTRIES 16 // to satisfy hss model static array declaration
+
+
+#define _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH 2
+#define _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH 6
+#define _HRT_MIPI_BACKEND_PACKET_ID_WIDTH _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH
+
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB 0
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_LSB + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_A_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_LSB(pix_width) + (pix_width) - 1)
+#define _HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_MSB(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_PIX_B_VAL_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) (_HRT_MIPI_BACKEND_STREAMING_SOP_BIT(pix_width) + 1)
+#define _HRT_MIPI_BACKEND_STREAMING_WIDTH(pix_width) (_HRT_MIPI_BACKEND_STREAMING_EOP_BIT(pix_width) + 1)
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "mipi_backend_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define _HRT_MIPI_BACKEND_CUST_EN_IDX 0 /* 2bits */
+#define _HRT_MIPI_BACKEND_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define _HRT_MIPI_BACKEND_CUST_EN_HIGH_PREC_IDX 8 // 1 bit
+#define _HRT_MIPI_BACKEND_CUST_EN_WIDTH 9
+#define _HRT_MIPI_BACKEND_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define _HRT_MIPI_BACKEND_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+#define _HRT_MIPI_BACKEND_CUST_EN_OPTION_IDX 1
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S1_IDX 8 /* 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_S2_IDX 16 /* was 14 7bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_WIDTH 24 /* was 21*/
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define _HRT_MIPI_BACKEND_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 6bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_ALIGN_IDX 6 /* 5bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_MASK_IDX 11 /* was 10 18bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_PIX_EN_IDX 29 /* was 28 1bits */
+
+#define _HRT_MIPI_BACKEND_CUST_PIX_EXT_WIDTH 30 /* was 29 */
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_WIDTH 16
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define _HRT_MIPI_BACKEND_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+/*************************************************************************************************/
+/* MIPI backend output streaming interface definition */
+/* These parameters define the fields within the streaming bus. These should also be used by the */
+/* subsequent block, ie stream2mmio. */
+/*************************************************************************************************/
+/* The pipe backend - stream2mmio should be design time configurable in */
+/* PixWidth - Number of bits per pixel */
+/* PPC - Pixel per Clocks */
+/* NumSids - Max number of source Ids (ifc's) and derived from that: */
+/* SidWidth - Number of bits required for the sid parameter */
+/* In order to keep this configurability, below Macro's have these as a parameter */
+/*************************************************************************************************/
+
+#define HRT_MIPI_BACKEND_STREAM_EOP_BIT 0
+#define HRT_MIPI_BACKEND_STREAM_SOP_BIT 1
+#define HRT_MIPI_BACKEND_STREAM_EOF_BIT 2
+#define HRT_MIPI_BACKEND_STREAM_SOF_BIT 3
+#define HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT 4
+#define HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_STREAM_CHID_LS_BIT+(sid_width)-1)
+#define HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width,p) (HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT(sid_width)+1+p)
+
+#define HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,p) (HRT_MIPI_BACKEND_STREAM_PIX_VAL_BIT(sid_width,ppc)+ ((pix_width)*p))
+#define HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width,ppc,pix_width,p) (HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,p) + (pix_width) - 1)
+
+#if 0
+//#define HRT_MIPI_BACKEND_STREAM_PIX_BITS 14
+//#define HRT_MIPI_BACKEND_STREAM_CHID_BITS 4
+//#define HRT_MIPI_BACKEND_STREAM_PPC 4
+#endif
+
+#define HRT_MIPI_BACKEND_STREAM_BITS(sid_width,ppc,pix_width) (HRT_MIPI_BACKEND_STREAM_PIX_MS_BIT(sid_width,ppc,pix_width,(ppc-1))+1)
+
+
+/* SP and LP LUT BIT POSITIONS */
+#define HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT 0 // 0
+#define HRT_MIPI_BACKEND_LUT_SID_LS_BIT HRT_MIPI_BACKEND_LUT_PKT_DISREGARD_BIT + 1 // 1
+#define HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) (HRT_MIPI_BACKEND_LUT_SID_LS_BIT+(sid_width)-1) // 1 + (4) - 1 = 4
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1 // 5
+#define HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_CHANNEL_ID_WIDTH - 1 // 6
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7
+#define HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_LS_BIT(sid_width) + _HRT_MIPI_BACKEND_FORMAT_TYPE_WIDTH - 1 // 12
+
+/* #define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_CH_ID_MS_BIT(sid_width) + 1 // 7 */
+
+#define HRT_MIPI_BACKEND_SP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_SID_MS_BIT(sid_width) + 1
+#define HRT_MIPI_BACKEND_LP_LUT_BITS(sid_width) HRT_MIPI_BACKEND_LUT_MIPI_FMT_MS_BIT(sid_width) + 1 // 13
+
+
+// temp solution
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT HRT_MIPI_BACKEND_STREAM_CHID_MS_BIT + 1 // 8
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXA_VAL_BIT + 1 // 9
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXB_VAL_BIT + 1 // 10
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT HRT_MIPI_BACKEND_STREAM_PIXC_VAL_BIT + 1 // 11
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_VAL_BIT + 1 // 12
+//#define HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 25
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXA_MS_BIT + 1 // 26
+//#define HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 39
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXB_MS_BIT + 1 // 40
+//#define HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 53
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT HRT_MIPI_BACKEND_STREAM_PIXC_MS_BIT + 1 // 54
+//#define HRT_MIPI_BACKEND_STREAM_PIXD_MS_BIT HRT_MIPI_BACKEND_STREAM_PIXD_LS_BIT + HRT_MIPI_BACKEND_STREAM_PIX_BITS - 1 // 67
+
+// vc hidden in pixb data (passed as raw12 the pipe)
+#define HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width,ppc,pix_width) HRT_MIPI_BACKEND_STREAM_PIX_LS_BIT(sid_width,ppc,pix_width,1) + 10 //HRT_MIPI_BACKEND_STREAM_PIXB_LS_BIT + 10 // 36
+#define HRT_MIPI_BACKEND_STREAM_VC_MS_BIT(sid_width,ppc,pix_width) HRT_MIPI_BACKEND_STREAM_VC_LS_BIT(sid_width,ppc,pix_width) + 1 // 37
+
+
+
+
+#endif /* _mipi_backend_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h
new file mode 100644
index 000000000000..c038f39ffd25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/mmu_defs.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mmu_defs_h
+#define _mmu_defs_h
+
+#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0
+#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1
+
+#define _HRT_MMU_REG_ALIGN 4
+
+#endif /* _mmu_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h
new file mode 100644
index 000000000000..0aad86e2e914
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/rx_csi_defs.h
@@ -0,0 +1,175 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _csi_rx_defs_h
+#define _csi_rx_defs_h
+
+//#include "rx_csi_common_defs.h"
+
+
+
+#define MIPI_PKT_DATA_WIDTH 32
+//#define CLK_CROSSING_FIFO_DEPTH 16
+#define _CSI_RX_REG_ALIGN 4
+
+//define number of IRQ (see below for definition of each IRQ bits)
+#define CSI_RX_NOF_IRQS_BYTE_DOMAIN 11
+#define CSI_RX_NOF_IRQS_ISP_DOMAIN 15 // CSI_RX_NOF_IRQS_BYTE_DOMAIN + remaining from Dphy_rx already on ISP clock domain
+
+// REGISTER DESCRIPTION
+//#define _HRT_CSI_RX_SOFTRESET_REG_IDX 0
+#define _HRT_CSI_RX_ENABLE_REG_IDX 0
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_IDX 1
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_IDX 2
+#define _HRT_CSI_RX_STATUS_REG_IDX 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX 5
+//#define _HRT_CSI_RX_IRQ_CONFIG_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_CLANE_REG_IDX 6
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_CLANE_REG_IDX 7
+#define _HRT_CSI_RX_DLY_CNT_TERMEN_DLANE_REG_IDX(lane_idx) (8+(2*lane_idx))
+#define _HRT_CSI_RX_DLY_CNT_SETTLE_DLANE_REG_IDX(lane_idx) (8+(2*lane_idx)+1)
+
+#define _HRT_CSI_RX_NOF_REGISTERS(nof_dlanes) (8+2*(nof_dlanes))
+
+
+//#define _HRT_CSI_RX_SOFTRESET_REG_WIDTH 1
+#define _HRT_CSI_RX_ENABLE_REG_WIDTH 1
+#define _HRT_CSI_RX_NOF_ENABLED_LANES_REG_WIDTH 3
+#define _HRT_CSI_RX_ERROR_HANDLING_REG_WIDTH 4
+#define _HRT_CSI_RX_STATUS_REG_WIDTH 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_REG_WIDTH 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_REG_WIDTH 24
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_WIDTH (CSI_RX_NOF_IRQS_ISP_DOMAIN)
+#define _HRT_CSI_RX_DLY_CNT_REG_WIDTH 24
+//#define _HRT_CSI_RX_IRQ_STATUS_REG_WIDTH NOF_IRQS
+//#define _HRT_CSI_RX_IRQ_CLEAR_REG_WIDTH 0
+
+
+#define ONE_LANE_ENABLED 0
+#define TWO_LANES_ENABLED 1
+#define THREE_LANES_ENABLED 2
+#define FOUR_LANES_ENABLED 3
+
+// Error handling reg bit positions
+#define ERR_DECISION_BIT 0
+#define DISC_RESERVED_SP_BIT 1
+#define DISC_RESERVED_LP_BIT 2
+#define DIS_INCOMP_PKT_CHK_BIT 3
+
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_POSEDGE 0
+#define _HRT_CSI_RX_IRQ_CONFIG_REG_VAL_ORIGINAL 1
+
+// Interrupt bits
+#define _HRT_RX_CSI_IRQ_SINGLE_PH_ERROR_CORRECTED 0
+#define _HRT_RX_CSI_IRQ_MULTIPLE_PH_ERROR_DETECTED 1
+#define _HRT_RX_CSI_IRQ_PAYLOAD_CHECKSUM_ERROR 2
+#define _HRT_RX_CSI_IRQ_FIFO_FULL_ERROR 3
+#define _HRT_RX_CSI_IRQ_RESERVED_SP_DETECTED 4
+#define _HRT_RX_CSI_IRQ_RESERVED_LP_DETECTED 5
+//#define _HRT_RX_CSI_IRQ_PREMATURE_SOP 6
+#define _HRT_RX_CSI_IRQ_INCOMPLETE_PACKET 6
+#define _HRT_RX_CSI_IRQ_FRAME_SYNC_ERROR 7
+#define _HRT_RX_CSI_IRQ_LINE_SYNC_ERROR 8
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_ERROR 9
+#define _HRT_RX_CSI_IRQ_DLANE_HS_SOT_SYNC_ERROR 10
+
+#define _HRT_RX_CSI_IRQ_DLANE_ESC_ERROR 11
+#define _HRT_RX_CSI_IRQ_DLANE_TRIGGERESC 12
+#define _HRT_RX_CSI_IRQ_DLANE_ULPSESC 13
+#define _HRT_RX_CSI_IRQ_CLANE_ULPSCLKNOT 14
+
+/* OLD ARASAN FRONTEND IRQs
+#define _HRT_RX_CSI_IRQ_OVERRUN_BIT 0
+#define _HRT_RX_CSI_IRQ_RESERVED_BIT 1
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_RX_CSI_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_RX_CSI_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_RX_CSI_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_RX_CSI_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_RX_CSI_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_RX_CSI_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_RX_CSI_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_RX_CSI_IRQ_ERR_CRC_BIT 10
+#define _HRT_RX_CSI_IRQ_ERR_ID_BIT 11
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_RX_CSI_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_RX_CSI_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_RX_CSI_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_RX_CSI_IRQ_ERR_LINE_SYNC_BIT 16
+*/
+
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_HS_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE1 5
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE2 6
+#define _HRT_CSI_RX_STATUS_DLANE_HS_SOT_SYNC_ERR_LANE3 7
+
+////Bit Description for reg _HRT_CSI_RX_STATUS_DLANE_LP_REG_IDX
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE0 0
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE1 1
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE2 2
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ESC_ERR_LANE3 3
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE0 4
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE0 5
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE0 6
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE0 7
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE1 8
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE1 9
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE1 10
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE1 11
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE2 12
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE2 13
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE2 14
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE2 15
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC0_LANE3 16
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC1_LANE3 17
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC2_LANE3 18
+#define _HRT_CSI_RX_STATUS_DLANE_LP_TRIGGERESC3_LANE3 19
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE0 20
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE1 21
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE2 22
+#define _HRT_CSI_RX_STATUS_DLANE_LP_ULPSESC_LANE3 23
+
+/*********************************************************/
+/*** Relevant declarations from rx_csi_common_defs.h *****/
+/*********************************************************/
+/* packet bit definition */
+#define _HRT_RX_CSI_PKT_SOP_BITPOS 32
+#define _HRT_RX_CSI_PKT_EOP_BITPOS 33
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITPOS 0
+#define _HRT_RX_CSI_PH_CH_ID_BITPOS 22
+#define _HRT_RX_CSI_PH_FMT_ID_BITPOS 16
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITPOS 0
+
+#define _HRT_RX_CSI_PKT_SOP_BITS 1
+#define _HRT_RX_CSI_PKT_EOP_BITS 1
+#define _HRT_RX_CSI_PKT_PAYLOAD_BITS 32
+#define _HRT_RX_CSI_PH_CH_ID_BITS 2
+#define _HRT_RX_CSI_PH_FMT_ID_BITS 6
+#define _HRT_RX_CSI_PH_DATA_FIELD_BITS 16
+
+/* Definition of data format ID at the interface CSS_receiver units */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_RX_CSI_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+
+
+#endif /* _csi_rx_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h
new file mode 100644
index 000000000000..9b6c2893d950
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/scalar_processor_2400_params.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _scalar_processor_2400_params_h
+#define _scalar_processor_2400_params_h
+
+#include "cell_params.h"
+
+#endif /* _scalar_processor_2400_params_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/sp_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/sp_hrt.h
new file mode 100644
index 000000000000..7ee4deba519a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/sp_hrt.h
@@ -0,0 +1,24 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_hrt_h_
+#define _sp_hrt_h_
+
+#define hrt_sp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _dmem)
+
+#define hrt_sp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_sp_dmem(cell))
+
+#endif /* _sp_hrt_h_ */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h
new file mode 100644
index 000000000000..1cb62444cf68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/str2mem_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ST2MEM_DEFS_H
+#define _ST2MEM_DEFS_H
+
+#define _STR2MEM_CRUN_BIT 0x100000
+#define _STR2MEM_CMD_BITS 0x0F0000
+#define _STR2MEM_COUNT_BITS 0x00FFFF
+
+#define _STR2MEM_BLOCKS_CMD 0xA0000
+#define _STR2MEM_PACKETS_CMD 0xB0000
+#define _STR2MEM_BYTES_CMD 0xC0000
+#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000
+
+#define _STR2MEM_SOFT_RESET_REG_ID 0
+#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1
+#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2
+#define _STR2MEM_BIT_SWAPPING_REG_ID 3
+#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4
+#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5
+#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6
+#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7
+#define _STR2MEM_EN_STAT_UPDATE_ID 8
+
+#define _STR2MEM_REG_ALIGN 4
+
+#endif /* _ST2MEM_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h
new file mode 100644
index 000000000000..46b52fe5ae99
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/stream2mmio_defs.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _STREAM2MMMIO_DEFS_H
+#define _STREAM2MMMIO_DEFS_H
+
+#include <mipi_backend_defs.h>
+
+#define _STREAM2MMIO_REG_ALIGN 4
+
+#define _STREAM2MMIO_COMMAND_REG_ID 0
+#define _STREAM2MMIO_ACKNOWLEDGE_REG_ID 1
+#define _STREAM2MMIO_PIX_WIDTH_ID_REG_ID 2
+#define _STREAM2MMIO_START_ADDR_REG_ID 3 /* master port address,NOT Byte */
+#define _STREAM2MMIO_END_ADDR_REG_ID 4 /* master port address,NOT Byte */
+#define _STREAM2MMIO_STRIDE_REG_ID 5 /* stride in master port words, increment is per packet for long sids, stride is not used for short sid's*/
+#define _STREAM2MMIO_NUM_ITEMS_REG_ID 6 /* number of packets for store packets cmd, number of words for store_words cmd */
+#define _STREAM2MMIO_BLOCK_WHEN_NO_CMD_REG_ID 7 /* if this register is 1, input will be stalled if there is no pending command for this sid */
+#define _STREAM2MMIO_REGS_PER_SID 8
+
+#define _STREAM2MMIO_SID_REG_OFFSET 8
+#define _STREAM2MMIO_MAX_NOF_SIDS 64 /* value used in hss model */
+
+/* command token definition */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_LSB 0 /* bits 1-0 is for the command field */
+#define _STREAM2MMIO_CMD_TOKEN_CMD_MSB 1
+
+#define _STREAM2MMIO_CMD_TOKEN_WIDTH (_STREAM2MMIO_CMD_TOKEN_CMD_MSB+1-_STREAM2MMIO_CMD_TOKEN_CMD_LSB)
+
+#define _STREAM2MMIO_CMD_TOKEN_STORE_WORDS 0 /* command for storing a number of output words indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1 /* command for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2 /* command for waiting for a frame start */
+
+/* acknowledges from packer module */
+/* fields: eof - indicates whether last (short) packet received was an eof packet */
+/* eop - indicates whether command has ended due to packet end or due to no of words requested has been received */
+/* count - indicates number of words stored */
+#define _STREAM2MMIO_PACK_NUM_ITEMS_BITS 16
+#define _STREAM2MMIO_PACK_ACK_EOP_BIT _STREAM2MMIO_PACK_NUM_ITEMS_BITS
+#define _STREAM2MMIO_PACK_ACK_EOF_BIT (_STREAM2MMIO_PACK_ACK_EOP_BIT+1)
+
+/* acknowledge token definition */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_LSB 0 /* bits 3-0 is for the command field */
+#define _STREAM2MMIO_ACK_TOKEN_NUM_ITEMS_MSB (_STREAM2MMIO_PACK_NUM_ITEMS_BITS-1)
+#define _STREAM2MMIO_ACK_TOKEN_EOP_BIT _STREAM2MMIO_PACK_ACK_EOP_BIT
+#define _STREAM2MMIO_ACK_TOKEN_EOF_BIT _STREAM2MMIO_PACK_ACK_EOF_BIT
+#define _STREAM2MMIO_ACK_TOKEN_VALID_BIT (_STREAM2MMIO_ACK_TOKEN_EOF_BIT+1) /* this bit indicates a valid ack */
+ /* if there is no valid ack, a read */
+ /* on the ack register returns 0 */
+#define _STREAM2MMIO_ACK_TOKEN_WIDTH (_STREAM2MMIO_ACK_TOKEN_VALID_BIT+1)
+
+/* commands for packer module */
+#define _STREAM2MMIO_PACK_CMD_STORE_WORDS 0
+#define _STREAM2MMIO_PACK_CMD_STORE_LONG_PACKET 1
+#define _STREAM2MMIO_PACK_CMD_STORE_SHORT_PACKET 2
+
+
+
+
+#endif /* _STREAM2MMIO_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h
new file mode 100644
index 000000000000..60143b8743a2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/streaming_to_mipi_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _streaming_to_mipi_defs_h
+#define _streaming_to_mipi_defs_h
+
+#define HIVE_STR_TO_MIPI_VALID_A_BIT 0
+#define HIVE_STR_TO_MIPI_VALID_B_BIT 1
+#define HIVE_STR_TO_MIPI_SOL_BIT 2
+#define HIVE_STR_TO_MIPI_EOL_BIT 3
+#define HIVE_STR_TO_MIPI_SOF_BIT 4
+#define HIVE_STR_TO_MIPI_EOF_BIT 5
+#define HIVE_STR_TO_MIPI_CH_ID_LSB 6
+
+#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1)
+
+#endif /* _streaming_to_mipi_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h
new file mode 100644
index 000000000000..d2b8972b0d9e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/timed_controller_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _timed_controller_defs_h
+#define _timed_controller_defs_h
+
+#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0
+
+#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4
+
+#endif /* _timed_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h
new file mode 100644
index 000000000000..19b19ef484f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/var.h
@@ -0,0 +1,99 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_VAR_H
+#define _HRT_VAR_H
+
+#include "version.h"
+#include "system_api.h"
+#include "hive_types.h"
+
+#define hrt_int_type_of_char char
+#define hrt_int_type_of_uchar unsigned char
+#define hrt_int_type_of_short short
+#define hrt_int_type_of_ushort unsigned short
+#define hrt_int_type_of_int int
+#define hrt_int_type_of_uint unsigned int
+#define hrt_int_type_of_long long
+#define hrt_int_type_of_ulong unsigned long
+#define hrt_int_type_of_ptr unsigned int
+
+#define hrt_host_type_of_char char
+#define hrt_host_type_of_uchar unsigned char
+#define hrt_host_type_of_short short
+#define hrt_host_type_of_ushort unsigned short
+#define hrt_host_type_of_int int
+#define hrt_host_type_of_uint unsigned int
+#define hrt_host_type_of_long long
+#define hrt_host_type_of_ulong unsigned long
+#define hrt_host_type_of_ptr void*
+
+#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8)
+#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type)
+#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type)
+
+#ifdef C_RUN
+
+#ifdef C_RUN_DYNAMIC_LINK_PROGRAMS
+extern void *csim_processor_get_crun_symbol(hive_proc_id p, const char *sym);
+#define _hrt_cell_get_crun_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym))
+#define _hrt_cell_get_crun_indexed_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym))
+#else
+#define _hrt_cell_get_crun_symbol(cell,sym) (&sym)
+#define _hrt_cell_get_crun_indexed_symbol(cell,sym) (sym)
+#endif // C_RUN_DYNAMIC_LINK_PROGRAMS
+
+#define hrt_scalar_store(cell, type, var, data) \
+ ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)) = (data))
+#define hrt_scalar_load(cell, type, var) \
+ ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)))
+
+#define hrt_indexed_store(cell, type, array, index, data) \
+ ((((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) = (data))
+#define hrt_indexed_load(cell, type, array, index) \
+ (((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index])
+
+#else /* C_RUN */
+
+#define hrt_scalar_store(cell, type, var, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_scalar_load(cell, type, var) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var)))
+
+#define hrt_indexed_store(cell, type, array, index, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_indexed_load(cell, type, array, index) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type))))
+
+#endif /* C_RUN */
+
+#endif /* _HRT_VAR_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h
new file mode 100644
index 000000000000..bbc4948baea9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/version.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_VERSION_H
+#define HRT_VERSION_H
+#define HRT_VERSION_MAJOR 1
+#define HRT_VERSION_MINOR 4
+#define HRT_VERSION 1_4
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h
new file mode 100644
index 000000000000..edb23252c48e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/ibuf_ctrl_global.h
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_GLOBAL_H_INCLUDED__
+#define __IBUF_CTRL_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#include <ibuf_cntrl_defs.h> /* _IBUF_CNTRL_RECALC_WORDS_STATUS,
+ * _IBUF_CNTRL_ARBITERS_STATUS,
+ * _IBUF_CNTRL_PROC_REG_ALIGN,
+ * etc.
+ */
+
+/* Definition of contents of main controller state register is lacking
+ * in ibuf_cntrl_defs.h, so define these here:
+ */
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_MASK 0xf
+#define _IBUF_CNTRL_MAIN_CNTRL_FSM_NEXT_COMMAND_CHECK 0x9
+#define _IBUF_CNTRL_MAIN_CNTRL_MEM_INP_BUF_ALLOC (1 << 8)
+#define _IBUF_CNTRL_DMA_SYNC_WAIT_FOR_SYNC 1
+#define _IBUF_CNTRL_DMA_SYNC_FSM_WAIT_FOR_ACK (0x3 << 1)
+
+typedef struct ib_buffer_s ib_buffer_t;
+struct ib_buffer_s {
+ uint32_t start_addr; /* start address of the buffer in the
+ * "input-buffer hardware block"
+ */
+
+ uint32_t stride; /* stride per buffer line (in bytes) */
+ uint32_t lines; /* lines in the buffer */
+};
+
+typedef struct ibuf_ctrl_cfg_s ibuf_ctrl_cfg_t;
+struct ibuf_ctrl_cfg_s {
+
+ bool online;
+
+ struct {
+ /* DMA configuration */
+ uint32_t channel;
+ uint32_t cmd; /* must be _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND */
+
+ /* DMA reconfiguration */
+ uint32_t shift_returned_items;
+ uint32_t elems_per_word_in_ibuf;
+ uint32_t elems_per_word_in_dest;
+ } dma_cfg;
+
+ ib_buffer_t ib_buffer;
+
+ struct {
+ uint32_t stride;
+ uint32_t start_addr;
+ uint32_t lines;
+ } dest_buf_cfg;
+
+ uint32_t items_per_store;
+ uint32_t stores_per_frame;
+
+ struct {
+ uint32_t sync_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME */
+ uint32_t store_cmd; /* must be _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS */
+ } stream2mmio_cfg;
+};
+
+extern const uint32_t N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID];
+
+#endif /* __IBUF_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h
new file mode 100644
index 000000000000..25e3f04f374b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/input_system_global.h
@@ -0,0 +1,206 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_SYSTEM_VERSION_VERSION_2401
+
+/* CSI reveiver has 3 ports. */
+#define N_CSI_PORTS (3)
+
+#include "isys_dma.h" /* isys2401_dma_channel,
+ * isys2401_dma_cfg_t
+ */
+
+#include "ibuf_ctrl.h" /* ibuf_cfg_t,
+ * ibuf_ctrl_cfg_t
+ */
+
+#include "isys_stream2mmio.h" /* stream2mmio_cfg_t */
+
+#include "csi_rx.h" /* csi_rx_frontend_cfg_t,
+ * csi_rx_backend_cfg_t,
+ * csi_rx_backend_lut_entry_t
+ */
+#include "pixelgen.h"
+
+
+#define INPUT_SYSTEM_N_STREAM_ID 6 /* maximum number of simultaneous
+ virtual channels supported*/
+
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ INPUT_SYSTEM_ERR_CREATE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_CHANNEL_FAIL,
+ INPUT_SYSTEM_ERR_TRANSFER_FAIL,
+ INPUT_SYSTEM_ERR_CREATE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_CONFIGURE_INPUT_PORT_FAIL,
+ INPUT_SYSTEM_ERR_OPEN_INPUT_PORT_FAIL,
+ N_INPUT_SYSTEM_ERR
+} input_system_err_t;
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_TYPE_UNDEFINED = 0,
+ INPUT_SYSTEM_SOURCE_TYPE_SENSOR,
+ INPUT_SYSTEM_SOURCE_TYPE_TPG,
+ INPUT_SYSTEM_SOURCE_TYPE_PRBS,
+ N_INPUT_SYSTEM_SOURCE_TYPE
+} input_system_source_type_t;
+
+typedef enum {
+ INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME,
+ INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST,
+} input_system_polling_mode_t;
+
+typedef struct input_system_channel_s input_system_channel_t;
+struct input_system_channel_s {
+ stream2mmio_ID_t stream2mmio_id;
+ stream2mmio_sid_ID_t stream2mmio_sid_id;
+
+ ibuf_ctrl_ID_t ibuf_ctrl_id;
+ ib_buffer_t ib_buffer;
+
+ isys2401_dma_ID_t dma_id;
+ isys2401_dma_channel dma_channel;
+};
+
+typedef struct input_system_channel_cfg_s input_system_channel_cfg_t;
+struct input_system_channel_cfg_s {
+ stream2mmio_cfg_t stream2mmio_cfg;
+ ibuf_ctrl_cfg_t ibuf_ctrl_cfg;
+ isys2401_dma_cfg_t dma_cfg;
+ isys2401_dma_port_cfg_t dma_src_port_cfg;
+ isys2401_dma_port_cfg_t dma_dest_port_cfg;
+};
+
+typedef struct input_system_input_port_s input_system_input_port_t;
+struct input_system_input_port_s {
+ input_system_source_type_t source_type;
+
+ struct {
+ csi_rx_frontend_ID_t frontend_id;
+ csi_rx_backend_ID_t backend_id;
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } csi_rx;
+
+ struct {
+ csi_mipi_packet_type_t packet_type;
+ csi_rx_backend_lut_entry_t backend_lut_entry;
+ } metadata;
+
+ struct {
+ pixelgen_ID_t pixelgen_id;
+ } pixelgen;
+};
+
+typedef struct input_system_input_port_cfg_s input_system_input_port_cfg_t;
+struct input_system_input_port_cfg_s {
+ struct {
+ csi_rx_frontend_cfg_t frontend_cfg;
+ csi_rx_backend_cfg_t backend_cfg;
+ csi_rx_backend_cfg_t md_backend_cfg;
+ } csi_rx_cfg;
+
+ struct {
+ pixelgen_tpg_cfg_t tpg_cfg;
+ pixelgen_prbs_cfg_t prbs_cfg;
+ } pixelgen_cfg;
+};
+
+typedef struct input_system_cfg_s input_system_cfg_t;
+struct input_system_cfg_s {
+ input_system_input_port_ID_t input_port_id;
+
+ input_system_source_type_t mode;
+#ifdef ISP2401
+ input_system_polling_mode_t polling_mode;
+#endif
+
+ bool online;
+ bool raw_packed;
+ int8_t linked_isys_stream_id;
+
+ struct {
+ bool comp_enable;
+ int32_t active_lanes;
+ int32_t fmt_type;
+ int32_t ch_id;
+ int32_t comp_predictor;
+ int32_t comp_scheme;
+ } csi_port_attr;
+
+ pixelgen_tpg_cfg_t tpg_port_attr;
+
+ pixelgen_prbs_cfg_t prbs_port_attr;
+
+ struct {
+ int32_t align_req_in_bytes;
+ int32_t bits_per_pixel;
+ int32_t pixels_per_line;
+ int32_t lines_per_frame;
+ } input_port_resolution;
+
+ struct {
+ int32_t left_padding;
+ int32_t max_isp_input_width;
+ } output_port_attr;
+
+ struct {
+ bool enable;
+ int32_t fmt_type;
+ int32_t align_req_in_bytes;
+ int32_t bits_per_pixel;
+ int32_t pixels_per_line;
+ int32_t lines_per_frame;
+ } metadata;
+};
+
+typedef struct virtual_input_system_stream_s virtual_input_system_stream_t;
+struct virtual_input_system_stream_s {
+ uint32_t id; /*Used when multiple MIPI data types and/or virtual channels are used.
+ Must be unique within one CSI RX
+ and lower than SH_CSS_MAX_ISYS_CHANNEL_NODES */
+ uint8_t enable_metadata;
+ input_system_input_port_t input_port;
+ input_system_channel_t channel;
+ input_system_channel_t md_channel; /* metadata channel */
+ uint8_t online;
+ int8_t linked_isys_stream_id;
+ uint8_t valid;
+#ifdef ISP2401
+ input_system_polling_mode_t polling_mode;
+ int32_t subscr_index;
+#endif
+};
+
+typedef struct virtual_input_system_stream_cfg_s virtual_input_system_stream_cfg_t;
+struct virtual_input_system_stream_cfg_s {
+ uint8_t enable_metadata;
+ input_system_input_port_cfg_t input_port_cfg;
+ input_system_channel_cfg_t channel_cfg;
+ input_system_channel_cfg_t md_channel_cfg;
+ uint8_t valid;
+};
+
+#define ISP_INPUT_BUF_START_ADDR 0
+#define NUM_OF_INPUT_BUF 2
+#define NUM_OF_LINES_PER_BUF 2
+#define LINES_OF_ISP_INPUT_BUF (NUM_OF_INPUT_BUF * NUM_OF_LINES_PER_BUF)
+#define ISP_INPUT_BUF_STRIDE SH_CSS_MAX_SENSOR_WIDTH
+
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
new file mode 100644
index 000000000000..e7a734a9fc43
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_dma_global.h
@@ -0,0 +1,87 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_GLOBAL_H_INCLUDED__
+#define __ISYS_DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define HIVE_ISYS2401_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS2401_DMA_IBUF_VMEM_CONN 1
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+/********************************************************
+ *
+ * DMA Port.
+ *
+ * The DMA port definition for the input system
+ * 2401 DMA is the duplication of the DMA port
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device libary
+ * is available. The device libary is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ********************************************************/
+typedef struct isys2401_dma_port_cfg_s isys2401_dma_port_cfg_t;
+struct isys2401_dma_port_cfg_s {
+ uint32_t stride;
+ uint32_t elements;
+ uint32_t cropping;
+ uint32_t width;
+ };
+/** end of DMA Port */
+
+/************************************************
+ *
+ * DMA Device.
+ *
+ * The DMA device definition for the input system
+ * 2401 DMA is the duplicattion of the DMA device
+ * definition for the CSS system DMA. It is duplicated
+ * here just as the temporal step before the device libary
+ * is available. The device libary is suppose to provide
+ * the capability of reusing the control interface of the
+ * same device prototypes. The refactor team will work on
+ * this, right?
+ *
+ ************************************************/
+typedef enum {
+ isys2401_dma_ibuf_to_ddr_connection = HIVE_ISYS2401_DMA_IBUF_DDR_CONN,
+ isys2401_dma_ibuf_to_vmem_connection = HIVE_ISYS2401_DMA_IBUF_VMEM_CONN
+} isys2401_dma_connection;
+
+typedef enum {
+ isys2401_dma_zero_extension = _DMA_ZERO_EXTEND,
+ isys2401_dma_sign_extension = _DMA_SIGN_EXTEND
+} isys2401_dma_extension;
+
+typedef struct isys2401_dma_cfg_s isys2401_dma_cfg_t;
+struct isys2401_dma_cfg_s {
+ isys2401_dma_channel channel;
+ isys2401_dma_connection connection;
+ isys2401_dma_extension extension;
+ uint32_t height;
+};
+/** end of DMA Device */
+
+/* isys2401_dma_channel limits per DMA ID */
+extern const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID];
+
+#endif /* __ISYS_DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h
new file mode 100644
index 000000000000..41d051db3987
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_irq_global.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_GLOBAL_H__
+#define __ISYS_IRQ_GLOBAL_H__
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/* Register offset/index from base location */
+#define ISYS_IRQ_EDGE_REG_IDX (0)
+#define ISYS_IRQ_MASK_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 1)
+#define ISYS_IRQ_STATUS_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 2)
+#define ISYS_IRQ_CLEAR_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 3)
+#define ISYS_IRQ_ENABLE_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 4)
+#define ISYS_IRQ_LEVEL_NO_REG_IDX (ISYS_IRQ_EDGE_REG_IDX + 5)
+
+/* Register values */
+#define ISYS_IRQ_MASK_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_CLEAR_REG_VALUE (0xFFFF)
+#define ISYS_IRQ_ENABLE_REG_VALUE (0xFFFF)
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_GLOBAL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h
new file mode 100644
index 000000000000..649f44fd2408
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/isys_stream2mmio_global.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+typedef struct stream2mmio_cfg_s stream2mmio_cfg_t;
+struct stream2mmio_cfg_s {
+ uint32_t bits_per_pixel;
+ uint32_t enable_blocking;
+};
+
+/* Stream2MMIO limits per ID*/
+/*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+extern const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID];
+
+#endif /* __ISYS_STREAM2MMIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
new file mode 100644
index 000000000000..216813e42a0a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/pixelgen_global.h
@@ -0,0 +1,91 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_GLOBAL_H_INCLUDED__
+#define __PIXELGEN_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+/**
+ * Pixel-generator. ("pixelgen_global.h")
+ */
+/*
+ * Duplicates "sync_generator_cfg_t" in "input_system_global.h".
+ */
+typedef struct sync_generator_cfg_s sync_generator_cfg_t;
+struct sync_generator_cfg_s {
+ uint32_t hblank_cycles;
+ uint32_t vblank_cycles;
+ uint32_t pixels_per_clock;
+ uint32_t nr_of_frames;
+ uint32_t pixels_per_line;
+ uint32_t lines_per_frame;
+};
+
+typedef enum {
+ PIXELGEN_TPG_MODE_RAMP = 0,
+ PIXELGEN_TPG_MODE_CHBO,
+ PIXELGEN_TPG_MODE_MONO,
+ N_PIXELGEN_TPG_MODE
+} pixelgen_tpg_mode_t;
+
+/*
+ * "pixelgen_tpg_cfg_t" duplicates parts of
+ * "tpg_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_tpg_cfg_s pixelgen_tpg_cfg_t;
+struct pixelgen_tpg_cfg_s {
+ pixelgen_tpg_mode_t mode; /* CHBO, MONO */
+
+ struct {
+ /* be used by CHBO and MON */
+ uint32_t R1;
+ uint32_t G1;
+ uint32_t B1;
+
+ /* be used by CHBO only */
+ uint32_t R2;
+ uint32_t G2;
+ uint32_t B2;
+ } color_cfg;
+
+ struct {
+ uint32_t h_mask; /* horizontal mask */
+ uint32_t v_mask; /* vertical mask */
+ uint32_t hv_mask; /* horizontal+vertical mask? */
+ } mask_cfg;
+
+ struct {
+ int32_t h_delta; /* horizontal delta? */
+ int32_t v_delta; /* vertical delta? */
+ } delta_cfg;
+
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+/*
+ * "pixelgen_prbs_cfg_t" duplicates parts of
+ * prbs_cfg_t" in "input_system_global.h".
+ */
+typedef struct pixelgen_prbs_cfg_s pixelgen_prbs_cfg_t;
+struct pixelgen_prbs_cfg_s {
+ int32_t seed0;
+ int32_t seed1;
+
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+/** end of Pixel-generator: TPG. ("pixelgen_global.h") */
+#endif /* __PIXELGEN_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c
new file mode 100644
index 000000000000..d733a3503a20
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/spmem_dump.c
@@ -0,0 +1,3686 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_map_h_
+#define _sp_map_h_
+
+
+#ifndef _hrt_dummy_use_blob_sp
+#define _hrt_dummy_use_blob_sp()
+#endif
+
+#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp)
+
+#ifndef ISP2401
+/* function longjmp: 680D */
+#else
+/* function longjmp: 6A0B */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_init_dmem: 6558 */
+#else
+/* function tmpmem_init_dmem: 671E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_addr_B: 3C50 */
+#else
+/* function ia_css_dmaproxy_sp_set_addr_B: 3DC5 */
+
+/* function ia_css_pipe_data_init_tagger_resources: AC7 */
+#endif
+
+/* function debug_buffer_set_ddr_addr: DD */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_mipi
+#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_mipi 0x7398
+#else
+#define HIVE_ADDR_vbuf_mipi 0x7444
+#endif
+#define HIVE_SIZE_vbuf_mipi 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_mipi 0x7398
+#else
+#define HIVE_ADDR_sp_vbuf_mipi 0x7444
+#endif
+#define HIVE_SIZE_sp_vbuf_mipi 12
+
+#ifndef ISP2401
+/* function ia_css_event_sp_decode: 3E41 */
+#else
+/* function ia_css_event_sp_decode: 3FB6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_get_size: 51BF */
+#else
+/* function ia_css_queue_get_size: 53C8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_load: 5800 */
+#else
+/* function ia_css_queue_load: 59DF */
+#endif
+
+#ifndef ISP2401
+/* function setjmp: 6816 */
+#else
+/* function setjmp: 6A14 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_sfi_get_current_frame: 27BF */
+#else
+/* function ia_css_pipeline_sp_sfi_get_current_frame: 2790 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue
+#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x5760
+#else
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x57FC
+#endif
+#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x5760
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x57FC
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6DA9 */
+#else
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6FF7 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_func: 596B */
+#else
+/* function ia_css_sp_rawcopy_func: 5B4A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_marked: 3339 */
+#else
+/* function ia_css_tagger_buf_sp_pop_marked: 345C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_CSI_RX_BE_SID_WIDTH
+#define HIVE_MEM_N_CSI_RX_BE_SID_WIDTH scalar_processor_2400_dmem
+#define HIVE_ADDR_N_CSI_RX_BE_SID_WIDTH 0x1D0
+#define HIVE_SIZE_N_CSI_RX_BE_SID_WIDTH 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_CSI_RX_BE_SID_WIDTH scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_CSI_RX_BE_SID_WIDTH 0x1D0
+#define HIVE_SIZE_sp_N_CSI_RX_BE_SID_WIDTH 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stage
+#define HIVE_MEM_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stage 0x6C98
+#else
+#define HIVE_ADDR_isp_stage 0x6D48
+#endif
+#define HIVE_SIZE_isp_stage 832
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stage 0x6C98
+#else
+#define HIVE_ADDR_sp_isp_stage 0x6D48
+#endif
+#define HIVE_SIZE_sp_isp_stage 832
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_raw
+#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_raw 0x37C
+#else
+#define HIVE_ADDR_vbuf_raw 0x394
+#endif
+#define HIVE_SIZE_vbuf_raw 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_raw 0x37C
+#else
+#define HIVE_ADDR_sp_vbuf_raw 0x394
+#endif
+#define HIVE_SIZE_sp_vbuf_raw 4
+
+#ifndef ISP2401
+/* function ia_css_sp_bin_copy_func: 594C */
+#else
+/* function ia_css_sp_bin_copy_func: 5B2B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_item_store: 554E */
+#else
+/* function ia_css_queue_item_store: 572D */
+#endif
+
+#ifndef ISP2401
+/* function input_system_reset: 1286 */
+#else
+/* function input_system_reset: 1201 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5B38
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5BE4
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5B38
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x5BE4
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5B4C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5BF8
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5B4C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x5BF8
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+
+/* function sp_start_isp: 39C */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_binary_group
+#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_binary_group 0x7088
+#else
+#define HIVE_ADDR_sp_binary_group 0x7138
+#endif
+#define HIVE_SIZE_sp_binary_group 32
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_binary_group 0x7088
+#else
+#define HIVE_ADDR_sp_sp_binary_group 0x7138
+#endif
+#define HIVE_SIZE_sp_sp_binary_group 32
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sw_state
+#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sw_state 0x7344
+#else
+#define HIVE_ADDR_sp_sw_state 0x73F0
+#endif
+#define HIVE_SIZE_sp_sw_state 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sw_state 0x7344
+#else
+#define HIVE_ADDR_sp_sp_sw_state 0x73F0
+#endif
+#define HIVE_SIZE_sp_sp_sw_state 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_main: 13F7 */
+#else
+/* function ia_css_thread_sp_main: 136D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_internal_buffers: 4047 */
+#else
+/* function ia_css_ispctrl_sp_init_internal_buffers: 41F7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_psys_event_queue_handle
+#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x5BEC
+#else
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x5C98
+#endif
+#define HIVE_SIZE_sp2host_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x5BEC
+#else
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x5C98
+#endif
+#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12
+
+#ifndef ISP2401
+/* function pixelgen_unit_test: E68 */
+#else
+/* function pixelgen_unit_test: E62 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue
+#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x5774
+#else
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x5810
+#endif
+#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x5774
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x5810
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_propagate_frame: 2D52 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_stop_copy_preview
+#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_stop_copy_preview 0x7328
+#define HIVE_SIZE_sp_stop_copy_preview 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_stop_copy_preview 0x7328
+#define HIVE_SIZE_sp_sp_stop_copy_preview 4
+#else
+/* function ia_css_tagger_sp_propagate_frame: 2D23 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_handles
+#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_handles 0x73A4
+#else
+#define HIVE_ADDR_vbuf_handles 0x7450
+#endif
+#define HIVE_SIZE_vbuf_handles 960
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_handles 0x73A4
+#else
+#define HIVE_ADDR_sp_vbuf_handles 0x7450
+#endif
+#define HIVE_SIZE_sp_vbuf_handles 960
+
+#ifndef ISP2401
+/* function ia_css_queue_store: 56B4 */
+
+/* function ia_css_sp_flash_register: 356E */
+#else
+/* function ia_css_queue_store: 5893 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_dummy_function: 5CF7 */
+#else
+/* function ia_css_sp_flash_register: 3691 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_init: 201C */
+#else
+/* function ia_css_pipeline_sp_init: 1FD7 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_configure: 2C42 */
+#else
+/* function ia_css_tagger_sp_configure: 2C13 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_end_binary: 3E8A */
+#else
+/* function ia_css_ispctrl_sp_end_binary: 3FFF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5BF8
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5CA4
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5BF8
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x5CA4
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function pixelgen_tpg_run: F1E */
+#else
+/* function pixelgen_tpg_run: F18 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_is_pending_mask
+#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_is_pending_mask 0x5C
+#define HIVE_SIZE_event_is_pending_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_is_pending_mask 0x5C
+#define HIVE_SIZE_sp_event_is_pending_mask 44
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_frame
+#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x5788
+#else
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x5824
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x5788
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x5824
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_isys_event_queue_handle
+#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x5C0C
+#else
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x5CB8
+#endif
+#define HIVE_SIZE_sp2host_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x5C0C
+#else
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x5CB8
+#endif
+#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_com
+#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_com 0x3E48
+#else
+#define HIVE_ADDR_host_sp_com 0x3E6C
+#endif
+#define HIVE_SIZE_host_sp_com 220
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_com 0x3E48
+#else
+#define HIVE_ADDR_sp_host_sp_com 0x3E6C
+#endif
+#define HIVE_SIZE_sp_host_sp_com 220
+
+#ifndef ISP2401
+/* function ia_css_queue_get_free_space: 5313 */
+#else
+/* function ia_css_queue_get_free_space: 54F2 */
+#endif
+
+#ifndef ISP2401
+/* function exec_image_pipe: 5E6 */
+#else
+/* function exec_image_pipe: 57A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_init_dmem_data
+#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_init_dmem_data 0x7348
+#else
+#define HIVE_ADDR_sp_init_dmem_data 0x73F4
+#endif
+#define HIVE_SIZE_sp_init_dmem_data 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x7348
+#else
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x73F4
+#endif
+#define HIVE_SIZE_sp_sp_init_dmem_data 24
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_start: 5DD1 */
+#else
+/* function ia_css_sp_metadata_start: 5EB3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_init_buffer_queues: 35BF */
+#else
+/* function ia_css_bufq_sp_init_buffer_queues: 36E2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_stop: 1FFF */
+#else
+/* function ia_css_pipeline_sp_stop: 1FBA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_connect_pipes: 312C */
+#else
+/* function ia_css_tagger_sp_connect_pipes: 30FD */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_wait: 644 */
+#else
+/* function sp_isys_copy_wait: 5D8 */
+#endif
+
+/* function is_isp_debug_buffer_full: 337 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3BD3 */
+#else
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 3D35 */
+#endif
+
+#ifndef ISP2401
+/* function encode_and_post_timer_event: AA8 */
+#else
+/* function encode_and_post_timer_event: A3C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_input_system_bz2788_active
+#define HIVE_MEM_input_system_bz2788_active scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_input_system_bz2788_active 0x250C
+#else
+#define HIVE_ADDR_input_system_bz2788_active 0x2524
+#endif
+#define HIVE_SIZE_input_system_bz2788_active 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_input_system_bz2788_active scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_input_system_bz2788_active 0x250C
+#else
+#define HIVE_ADDR_sp_input_system_bz2788_active 0x2524
+#endif
+#define HIVE_SIZE_sp_input_system_bz2788_active 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_IBUF_CTRL_PROCS
+#define HIVE_MEM_N_IBUF_CTRL_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_N_IBUF_CTRL_PROCS 0x1FC
+#define HIVE_SIZE_N_IBUF_CTRL_PROCS 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_IBUF_CTRL_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_IBUF_CTRL_PROCS 0x1FC
+#define HIVE_SIZE_sp_N_IBUF_CTRL_PROCS 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_per_frame_data
+#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_per_frame_data 0x3F24
+#else
+#define HIVE_ADDR_sp_per_frame_data 0x3F48
+#endif
+#define HIVE_SIZE_sp_per_frame_data 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_per_frame_data 0x3F24
+#else
+#define HIVE_ADDR_sp_sp_per_frame_data 0x3F48
+#endif
+#define HIVE_SIZE_sp_sp_per_frame_data 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_dequeue: 62AC */
+#else
+/* function ia_css_rmgr_sp_vbuf_dequeue: 6472 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_psys_event_queue_handle
+#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x5C18
+#else
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x5CC4
+#endif
+#define HIVE_SIZE_host2sp_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x5C18
+#else
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x5CC4
+#endif
+#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_xmem_bin_addr
+#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_xmem_bin_addr 0x3F28
+#else
+#define HIVE_ADDR_xmem_bin_addr 0x3F4C
+#endif
+#define HIVE_SIZE_xmem_bin_addr 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_xmem_bin_addr 0x3F28
+#else
+#define HIVE_ADDR_sp_xmem_bin_addr 0x3F4C
+#endif
+#define HIVE_SIZE_sp_xmem_bin_addr 4
+
+#ifndef ISP2401
+/* function tmr_clock_init: 16F9 */
+#else
+/* function tmr_clock_init: 166F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_run: 1ABF */
+#else
+/* function ia_css_pipeline_sp_run: 1A61 */
+#endif
+
+#ifndef ISP2401
+/* function memcpy: 68B6 */
+#else
+/* function memcpy: 6AB4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_ISYS2401_DMA_CHANNEL_PROCS
+#define HIVE_MEM_N_ISYS2401_DMA_CHANNEL_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_N_ISYS2401_DMA_CHANNEL_PROCS 0x214
+#define HIVE_SIZE_N_ISYS2401_DMA_CHANNEL_PROCS 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_ISYS2401_DMA_CHANNEL_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_ISYS2401_DMA_CHANNEL_PROCS 0x214
+#define HIVE_SIZE_sp_N_ISYS2401_DMA_CHANNEL_PROCS 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GP_DEVICE_BASE
+#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_GP_DEVICE_BASE 0x384
+#else
+#define HIVE_ADDR_GP_DEVICE_BASE 0x39C
+#endif
+#define HIVE_SIZE_GP_DEVICE_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x384
+#else
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x39C
+#endif
+#define HIVE_SIZE_sp_GP_DEVICE_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue
+#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x278
+#else
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x27C
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x278
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x27C
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12
+
+#ifndef ISP2401
+/* function stream2mmio_send_command: E0A */
+#else
+/* function stream2mmio_send_command: E04 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_uds_sp_scale_params: 65BF */
+#else
+/* function ia_css_uds_sp_scale_params: 67BD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_increase_size: 14DC */
+#else
+/* function ia_css_circbuf_increase_size: 1452 */
+#endif
+
+#ifndef ISP2401
+/* function __divu: 6834 */
+#else
+/* function __divu: 6A32 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_get_state: 131F */
+#else
+/* function ia_css_thread_sp_get_state: 1295 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_stop
+#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x5798
+#else
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x5834
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_stop 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x5798
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x5834
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_SHORT_PACKET_LUT_ENTRIES
+#define HIVE_MEM_N_SHORT_PACKET_LUT_ENTRIES scalar_processor_2400_dmem
+#define HIVE_ADDR_N_SHORT_PACKET_LUT_ENTRIES 0x1AC
+#define HIVE_SIZE_N_SHORT_PACKET_LUT_ENTRIES 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_SHORT_PACKET_LUT_ENTRIES scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_SHORT_PACKET_LUT_ENTRIES 0x1AC
+#define HIVE_SIZE_sp_N_SHORT_PACKET_LUT_ENTRIES 12
+
+#ifndef ISP2401
+/* function thread_fiber_sp_main: 14D5 */
+#else
+/* function thread_fiber_sp_main: 144B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_pipe_thread
+#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pipe_thread 0x58DC
+#define HIVE_SIZE_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_isp_pipe_thread 0x5978
+#define HIVE_SIZE_sp_isp_pipe_thread 360
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x58DC
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x5978
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 360
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_handle_parameter_sets: 193F */
+#else
+/* function ia_css_parambuf_sp_handle_parameter_sets: 18B5 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_set_state: 5DED */
+#else
+/* function ia_css_spctrl_sp_set_state: 5ECF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_signal: 6A99 */
+#else
+/* function ia_css_thread_sem_sp_signal: 6D18 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_IRQ_BASE
+#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_IRQ_BASE 0x2C
+#define HIVE_SIZE_IRQ_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_IRQ_BASE 0x2C
+#define HIVE_SIZE_sp_IRQ_BASE 16
+
+#ifndef ISP2401
+/* function ia_css_virtual_isys_sp_isr_init: 5E8C */
+#else
+/* function ia_css_virtual_isys_sp_isr_init: 5F70 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_TIMED_CTRL_BASE
+#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_TIMED_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_generate_exp_id: 613C */
+
+/* function ia_css_rmgr_sp_init: 61A7 */
+#else
+/* function ia_css_isys_sp_generate_exp_id: 6302 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_init: 6B6A */
+#else
+/* function ia_css_rmgr_sp_init: 636D */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x390
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x390
+#define HIVE_SIZE_sp_is_isp_requested 4
+#else
+/* function ia_css_thread_sem_sp_init: 6DE7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_frame
+#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x57AC
+#else
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x5848
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_frame 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x57AC
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x5848
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_execute: 3B3B */
+#else
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x3A8
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x3A8
+#define HIVE_SIZE_sp_is_isp_requested 4
+
+/* function ia_css_dmaproxy_sp_execute: 3C9B */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_rst: CE6 */
+#else
+/* function csi_rx_backend_rst: CE0 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_is_empty: 51FA */
+#else
+/* function ia_css_queue_is_empty: 7144 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_has_stopped: 1FF5 */
+#else
+/* function ia_css_pipeline_sp_has_stopped: 1FB0 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_extract: 15E0 */
+#else
+/* function ia_css_circbuf_extract: 1556 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 344F */
+#else
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 3572 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_sp_thread
+#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_current_sp_thread 0x274
+#define HIVE_SIZE_current_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_current_sp_thread 0x274
+#define HIVE_SIZE_sp_current_sp_thread 4
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_spid: 5DF4 */
+#else
+/* function ia_css_spctrl_sp_get_spid: 5ED6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_reset_buffers: 3646 */
+#else
+/* function ia_css_bufq_sp_reset_buffers: 3769 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr: 6DD7 */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr: 7025 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_uninit: 61A0 */
+#else
+/* function ia_css_rmgr_sp_uninit: 6366 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack
+#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_threads_stack 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_sp_threads_stack 24
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_STREAM2MMIO_SID_PROCS
+#define HIVE_MEM_N_STREAM2MMIO_SID_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_N_STREAM2MMIO_SID_PROCS 0x218
+#define HIVE_SIZE_N_STREAM2MMIO_SID_PROCS 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_STREAM2MMIO_SID_PROCS scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_STREAM2MMIO_SID_PROCS 0x218
+#define HIVE_SIZE_sp_N_STREAM2MMIO_SID_PROCS 12
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek: 15C2 */
+#else
+/* function ia_css_circbuf_peek: 1538 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_wait_for_in_param: 1708 */
+#else
+/* function ia_css_parambuf_sp_wait_for_in_param: 167E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_param
+#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_param 0x57D4
+#else
+#define HIVE_ADDR_sp_all_cb_elems_param 0x5870
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x57D4
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x5870
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_param 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_pipeline_sp_curr_binary_id
+#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x284
+#else
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x288
+#endif
+#define HIVE_SIZE_pipeline_sp_curr_binary_id 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x284
+#else
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x288
+#endif
+#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame_desc
+#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x57E4
+#else
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x5880
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x57E4
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x5880
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8
+
+#ifndef ISP2401
+/* function sp_isys_copy_func_v2: 629 */
+#else
+/* function sp_isys_copy_func_v2: 5BD */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_param
+#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_param 0x57EC
+#else
+#define HIVE_ADDR_sem_for_reading_cb_param 0x5888
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_param 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x57EC
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x5888
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_param 40
+
+#ifndef ISP2401
+/* function ia_css_queue_get_used_space: 52C7 */
+#else
+/* function ia_css_queue_get_used_space: 54A6 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_start
+#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_start 0x5814
+#else
+#define HIVE_ADDR_sem_for_cont_capt_start 0x58B0
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x5814
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x58B0
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_start 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tmp_heap
+#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tmp_heap 0x70A8
+#else
+#define HIVE_ADDR_tmp_heap 0x7158
+#endif
+#define HIVE_SIZE_tmp_heap 640
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tmp_heap 0x70A8
+#else
+#define HIVE_ADDR_sp_tmp_heap 0x7158
+#endif
+#define HIVE_SIZE_sp_tmp_heap 640
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_get_num_vbuf: 64B0 */
+#else
+/* function ia_css_rmgr_sp_get_num_vbuf: 6676 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 4863 */
+#else
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 4A27 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_lock_exp_id: 2A0F */
+#else
+/* function ia_css_tagger_sp_lock_exp_id: 29E0 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5C24
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5CD0
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5C24
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x5CD0
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+
+#ifndef ISP2401
+/* function ia_css_queue_is_full: 535E */
+#else
+/* function ia_css_queue_is_full: 553D */
+#endif
+
+/* function debug_buffer_init_isp: E4 */
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_exp_id_is_locked: 2945 */
+#else
+/* function ia_css_tagger_sp_exp_id_is_locked: 2916 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem
+#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x7764
+#else
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x7810
+#endif
+#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x7764
+#else
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x7810
+#endif
+#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_dump: 6287 */
+#else
+/* function ia_css_rmgr_sp_refcount_dump: 644D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5C60
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5D0C
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5C60
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x5D0C
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_pipe_threads
+#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_pipe_threads 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_sp_pipe_threads 20
+
+#ifndef ISP2401
+/* function sp_event_proxy_func: 78D */
+#else
+/* function sp_event_proxy_func: 721 */
+#endif
+
+#ifndef ISP2401
+/* function ibuf_ctrl_run: D7F */
+#else
+/* function ibuf_ctrl_run: D79 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_isys_event_queue_handle
+#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x5C74
+#else
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x5D20
+#endif
+#define HIVE_SIZE_host2sp_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x5C74
+#else
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x5D20
+#endif
+#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_yield: 6A12 */
+#else
+/* function ia_css_thread_sp_yield: 6C96 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param_desc
+#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x5828
+#else
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x58C4
+#endif
+#define HIVE_SIZE_sp_all_cbs_param_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x5828
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x58C4
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb
+#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x6C8C
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x6D38
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x6C8C
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x6D38
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_fork: 13AC */
+#else
+/* function ia_css_thread_sp_fork: 1322 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_destroy: 3136 */
+#else
+/* function ia_css_tagger_sp_destroy: 3107 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_read: 3ADB */
+#else
+/* function ia_css_dmaproxy_sp_vmem_read: 3C3B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_LONG_PACKET_LUT_ENTRIES
+#define HIVE_MEM_N_LONG_PACKET_LUT_ENTRIES scalar_processor_2400_dmem
+#define HIVE_ADDR_N_LONG_PACKET_LUT_ENTRIES 0x1B8
+#define HIVE_SIZE_N_LONG_PACKET_LUT_ENTRIES 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_LONG_PACKET_LUT_ENTRIES scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_LONG_PACKET_LUT_ENTRIES 0x1B8
+#define HIVE_SIZE_sp_N_LONG_PACKET_LUT_ENTRIES 12
+
+#ifndef ISP2401
+/* function initialize_sp_group: 5F6 */
+#else
+/* function initialize_sp_group: 58A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_peek: 325B */
+#else
+/* function ia_css_tagger_buf_sp_peek: 337E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_init: 13D8 */
+#else
+/* function ia_css_thread_sp_init: 134E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_reset_exp_id: 6133 */
+#else
+/* function qos_scheduler_update_fps: 67AD */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_update_fps: 65AF */
+#else
+/* function ia_css_isys_sp_reset_exp_id: 62F9 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 4F38 */
+#else
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 5114 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_DMEM_BASE
+#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_ISP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_sp_ISP_DMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_DMEM_BASE
+#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_SP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_sp_SP_DMEM_BASE 4
+
+#ifndef ISP2401
+/* function ibuf_ctrl_transfer: D67 */
+#else
+/* function ibuf_ctrl_transfer: D61 */
+
+/* function __ia_css_queue_is_empty_text: 5403 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read: 3B51 */
+#else
+/* function ia_css_dmaproxy_sp_read: 3CB1 */
+#endif
+
+#ifndef ISP2401
+/* function virtual_isys_stream_is_capture_done: 5EB0 */
+#else
+/* function virtual_isys_stream_is_capture_done: 5F94 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_raw_copy_line_count
+#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_raw_copy_line_count 0x360
+#else
+#define HIVE_ADDR_raw_copy_line_count 0x378
+#endif
+#define HIVE_SIZE_raw_copy_line_count 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_raw_copy_line_count 0x360
+#else
+#define HIVE_ADDR_sp_raw_copy_line_count 0x378
+#endif
+#define HIVE_SIZE_sp_raw_copy_line_count 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle
+#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x5C80
+#else
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x5D2C
+#endif
+#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x5C80
+#else
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x5D2C
+#endif
+#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_queue_peek: 523D */
+#else
+/* function ia_css_queue_peek: 541C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt
+#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x5B2C
+#else
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x5BD8
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x5B2C
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x5BD8
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_can_send_token_mask
+#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_can_send_token_mask 0x88
+#define HIVE_SIZE_event_can_send_token_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_can_send_token_mask 0x88
+#define HIVE_SIZE_sp_event_can_send_token_mask 44
+
+#ifndef ISP2401
+/* function csi_rx_frontend_stop: C11 */
+#else
+/* function csi_rx_frontend_stop: C0B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_thread
+#define HIVE_MEM_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_thread 0x6FD8
+#else
+#define HIVE_ADDR_isp_thread 0x7088
+#endif
+#define HIVE_SIZE_isp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_thread 0x6FD8
+#else
+#define HIVE_ADDR_sp_isp_thread 0x7088
+#endif
+#define HIVE_SIZE_sp_isp_thread 4
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event_non_blocking: AF0 */
+#else
+/* function encode_and_post_sp_event_non_blocking: A84 */
+#endif
+
+/* function is_ddr_debug_buffer_full: 2CC */
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 32AB */
+#else
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 33CE */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_fiber
+#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_fiber 0x194
+#define HIVE_SIZE_sp_threads_fiber 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_fiber 0x194
+#define HIVE_SIZE_sp_sp_threads_fiber 24
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event: A79 */
+#else
+/* function encode_and_post_sp_event: A0D */
+#endif
+
+/* function debug_enqueue_ddr: EE */
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 6242 */
+#else
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 6408 */
+#endif
+
+#ifndef ISP2401
+/* function dmaproxy_sp_read_write: 6E86 */
+#else
+/* function dmaproxy_sp_read_write: 70C3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer
+#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6C90
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6D3C
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6C90
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x6D3C
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_buffer_queue_handle
+#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x5C8C
+#else
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x5D38
+#endif
+#define HIVE_SIZE_host2sp_buffer_queue_handle 480
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x5C8C
+#else
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x5D38
+#endif
+#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_service
+#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3054
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3074
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_service 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3054
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3074
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_process: 6B92 */
+#else
+/* function ia_css_dmaproxy_sp_process: 6E0F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_mark_from_end: 3533 */
+#else
+/* function ia_css_tagger_buf_sp_mark_from_end: 3656 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_cs: 3F77 */
+#else
+/* function ia_css_ispctrl_sp_init_cs: 40FA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_init: 5E02 */
+#else
+/* function ia_css_spctrl_sp_init: 5EE4 */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_init: 7A2 */
+#else
+/* function sp_event_proxy_init: 736 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_input_port_close: 109B */
+#else
+/* function input_system_input_port_close: 1095 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5E6C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5F18
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5E6C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x5F18
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_output
+#define HIVE_MEM_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_output 0x3F2C
+#else
+#define HIVE_ADDR_sp_output 0x3F50
+#endif
+#define HIVE_SIZE_sp_output 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_output 0x3F2C
+#else
+#define HIVE_ADDR_sp_sp_output 0x3F50
+#endif
+#define HIVE_SIZE_sp_sp_output 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5E94
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5F40
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5E94
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x5F40
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+
+#ifndef ISP2401
+/* function pixelgen_prbs_config: E93 */
+#else
+/* function pixelgen_prbs_config: E8D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_CTRL_BASE
+#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_ISP_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_sp_ISP_CTRL_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_INPUT_FORMATTER_BASE
+#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_INPUT_FORMATTER_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16
+
+#ifndef ISP2401
+/* function sp_dma_proxy_reset_channels: 3DAB */
+#else
+/* function sp_dma_proxy_reset_channels: 3F20 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_update_size: 322A */
+#else
+/* function ia_css_tagger_sp_update_size: 334D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue
+#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x61B4
+#else
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x6260
+#endif
+#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x61B4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x6260
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008
+
+#ifndef ISP2401
+/* function thread_fiber_sp_create: 1444 */
+#else
+/* function thread_fiber_sp_create: 13BA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_increments: 3C3D */
+#else
+/* function ia_css_dmaproxy_sp_set_increments: 3DB2 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_frame
+#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x5830
+#else
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x58CC
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_frame 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x5830
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x58CC
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_param
+#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_param 0x5844
+#else
+#define HIVE_ADDR_sem_for_writing_cb_param 0x58E0
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_param 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x5844
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x58E0
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_param 20
+
+#ifndef ISP2401
+/* function pixelgen_tpg_is_done: F0D */
+#else
+/* function pixelgen_tpg_is_done: F07 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_stream_capture_indication: 5FB6 */
+#else
+/* function ia_css_isys_stream_capture_indication: 60D7 */
+#endif
+
+/* function sp_start_isp_entry: 392 */
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifdef HIVE_ADDR_sp_start_isp_entry
+#endif
+#define HIVE_ADDR_sp_start_isp_entry 0x392
+#endif
+#define HIVE_ADDR_sp_sp_start_isp_entry 0x392
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_all: 34B7 */
+#else
+/* function ia_css_tagger_buf_sp_unmark_all: 35DA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_from_start: 34F8 */
+#else
+/* function ia_css_tagger_buf_sp_unmark_from_start: 361B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_acquire: 3DD7 */
+#else
+/* function ia_css_dmaproxy_sp_channel_acquire: 3F4C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_add_num_vbuf: 648C */
+#else
+/* function ia_css_rmgr_sp_add_num_vbuf: 6652 */
+#endif
+
+#ifndef ISP2401
+/* function ibuf_ctrl_config: D8B */
+#else
+/* function ibuf_ctrl_config: D85 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_stream_stop: 602E */
+#else
+/* function ia_css_isys_stream_stop: 61F4 */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3AA7 */
+#else
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 3C07 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_acquire_buf_elem: 291D */
+#else
+/* function ia_css_tagger_sp_acquire_buf_elem: 28EE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_is_dynamic_buffer: 3990 */
+#else
+/* function ia_css_bufq_sp_is_dynamic_buffer: 3AB3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_group
+#define HIVE_MEM_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_group 0x3F3C
+#define HIVE_SIZE_sp_group 6176
+#else
+#define HIVE_ADDR_sp_group 0x3F60
+#define HIVE_SIZE_sp_group 6296
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_group 0x3F3C
+#define HIVE_SIZE_sp_sp_group 6176
+#else
+#define HIVE_ADDR_sp_sp_group 0x3F60
+#define HIVE_SIZE_sp_sp_group 6296
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_event_proxy_thread
+#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_proxy_thread 0x5A30
+#define HIVE_SIZE_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_event_proxy_thread 0x5AE0
+#define HIVE_SIZE_sp_event_proxy_thread 72
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x5A30
+#define HIVE_SIZE_sp_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x5AE0
+#define HIVE_SIZE_sp_sp_event_proxy_thread 72
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_kill: 1372 */
+#else
+/* function ia_css_thread_sp_kill: 12E8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_create: 31E4 */
+#else
+/* function ia_css_tagger_sp_create: 32FB */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_acquire_dmem: 6539 */
+#else
+/* function tmpmem_acquire_dmem: 66FF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_MMU_BASE
+#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_MMU_BASE 0x24
+#define HIVE_SIZE_MMU_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_MMU_BASE 0x24
+#define HIVE_SIZE_sp_MMU_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_release: 3DC3 */
+#else
+/* function ia_css_dmaproxy_sp_channel_release: 3F38 */
+#endif
+
+#ifndef ISP2401
+/* function pixelgen_prbs_run: E81 */
+#else
+/* function pixelgen_prbs_run: E7B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_is_idle: 3DA3 */
+#else
+/* function ia_css_dmaproxy_sp_is_idle: 3F18 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_qos_start
+#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_qos_start 0x5858
+#else
+#define HIVE_ADDR_sem_for_qos_start 0x58F4
+#endif
+#define HIVE_SIZE_sem_for_qos_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_qos_start 0x5858
+#else
+#define HIVE_ADDR_sp_sem_for_qos_start 0x58F4
+#endif
+#define HIVE_SIZE_sp_sem_for_qos_start 20
+
+#ifndef ISP2401
+/* function isp_hmem_load: B63 */
+#else
+/* function isp_hmem_load: B5D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_release_buf_elem: 28F9 */
+#else
+/* function ia_css_tagger_sp_release_buf_elem: 28CA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_send: 3E19 */
+#else
+/* function ia_css_eventq_sp_send: 3F8E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unlock_from_start: 33E7 */
+#else
+/* function ia_css_tagger_buf_sp_unlock_from_start: 350A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_debug_buffer_ddr_address
+#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_debug_buffer_ddr_address 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_sp_debug_buffer_ddr_address 4
+
+#ifndef ISP2401
+/* function sp_isys_copy_request: 6ED */
+#else
+/* function sp_isys_copy_request: 681 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 631C */
+#else
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 64E2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_set_priority: 136A */
+#else
+/* function ia_css_thread_sp_set_priority: 12E0 */
+#endif
+
+#ifndef ISP2401
+/* function sizeof_hmem: C0A */
+#else
+/* function sizeof_hmem: C04 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_channel_open: 1241 */
+#else
+/* function input_system_channel_open: 11BC */
+#endif
+
+#ifndef ISP2401
+/* function pixelgen_tpg_stop: EFB */
+#else
+/* function pixelgen_tpg_stop: EF5 */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_release_dmem: 6528 */
+#else
+/* function tmpmem_release_dmem: 66EE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_width_exception: 3C28 */
+#else
+/* function __ia_css_dmaproxy_sp_process_text: 3BAB */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_assert: 929 */
+#else
+/* function ia_css_dmaproxy_sp_set_width_exception: 3D9D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_flash_sp_init_internal_params: 35B4 */
+#else
+/* function sp_event_assert: 8BD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 32ED */
+#else
+/* function ia_css_flash_sp_init_internal_params: 36D7 */
+#endif
+
+#ifndef ISP2401
+/* function __modu: 687A */
+#else
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 3410 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_isp_vector: 3AAD */
+#else
+/* function __modu: 6A78 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_channel_transfer: 122A */
+#else
+/* function ia_css_dmaproxy_sp_init_isp_vector: 3C0D */
+
+/* function input_system_channel_transfer: 11A5 */
+#endif
+
+/* function isp_vamem_store: 0 */
+
+#ifdef ISP2401
+/* function ia_css_tagger_sp_set_copy_pipe: 32F2 */
+
+#endif
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GDC_BASE
+#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GDC_BASE 0x44
+#define HIVE_SIZE_GDC_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GDC_BASE 0x44
+#define HIVE_SIZE_sp_GDC_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_queue_local_init: 5528 */
+#else
+/* function ia_css_queue_local_init: 5707 */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_callout_func: 6947 */
+#else
+/* function sp_event_proxy_callout_func: 6B45 */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_schedule_stage: 6580 */
+#else
+/* function qos_scheduler_schedule_stage: 6759 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads
+#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x5A78
+#else
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x5B28
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x5A78
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x5B28
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack_size
+#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack_size 0x17C
+#define HIVE_SIZE_sp_threads_stack_size 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack_size 0x17C
+#define HIVE_SIZE_sp_sp_threads_stack_size 24
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 4849 */
+#else
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 4A0D */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_virtual_isys_sp_isr_text: 5E45 */
+#else
+/* function __ia_css_virtual_isys_sp_isr_text: 5F4E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_dequeue: 53A6 */
+#else
+/* function ia_css_queue_dequeue: 5585 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel: 6DEE */
+#else
+/* function is_qos_standalone_mode: 6734 */
+
+/* function ia_css_dmaproxy_sp_configure_channel: 703C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_thread_fiber_sp
+#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_current_thread_fiber_sp 0x5A80
+#else
+#define HIVE_ADDR_current_thread_fiber_sp 0x5B2C
+#endif
+#define HIVE_SIZE_current_thread_fiber_sp 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x5A80
+#else
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x5B2C
+#endif
+#define HIVE_SIZE_sp_current_thread_fiber_sp 4
+
+#ifndef ISP2401
+/* function ia_css_circbuf_pop: 1674 */
+#else
+/* function ia_css_circbuf_pop: 15EA */
+#endif
+
+#ifndef ISP2401
+/* function memset: 68F9 */
+#else
+/* function memset: 6AF7 */
+#endif
+
+/* function irq_raise_set_token: B6 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GPIO_BASE
+#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GPIO_BASE 0x3C
+#define HIVE_SIZE_GPIO_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GPIO_BASE 0x3C
+#define HIVE_SIZE_sp_GPIO_BASE 4
+
+#ifndef ISP2401
+/* function pixelgen_prbs_stop: E6F */
+#else
+/* function pixelgen_prbs_stop: E69 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_acc_stage_enable: 1FC0 */
+#else
+/* function ia_css_pipeline_acc_stage_enable: 1F69 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_unlock_exp_id: 296A */
+#else
+/* function ia_css_tagger_sp_unlock_exp_id: 293B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_ph
+#define HIVE_MEM_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_ph 0x7360
+#else
+#define HIVE_ADDR_isp_ph 0x740C
+#endif
+#define HIVE_SIZE_isp_ph 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_ph 0x7360
+#else
+#define HIVE_ADDR_sp_isp_ph 0x740C
+#endif
+#define HIVE_SIZE_sp_isp_ph 28
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_ds: 40D6 */
+#else
+/* function ia_css_ispctrl_sp_init_ds: 4286 */
+#endif
+
+#ifndef ISP2401
+/* function get_xmem_base_addr_raw: 4479 */
+#else
+/* function get_xmem_base_addr_raw: 4635 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param
+#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param 0x586C
+#else
+#define HIVE_ADDR_sp_all_cbs_param 0x5908
+#endif
+#define HIVE_SIZE_sp_all_cbs_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x586C
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x5908
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param 16
+
+#ifndef ISP2401
+/* function pixelgen_tpg_config: F30 */
+#else
+/* function pixelgen_tpg_config: F2A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_create: 16C2 */
+#else
+/* function ia_css_circbuf_create: 1638 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp_group
+#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp_group 0x587C
+#else
+#define HIVE_ADDR_sem_for_sp_group 0x5918
+#endif
+#define HIVE_SIZE_sem_for_sp_group 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp_group 0x587C
+#else
+#define HIVE_ADDR_sp_sem_for_sp_group 0x5918
+#endif
+#define HIVE_SIZE_sp_sem_for_sp_group 20
+
+#ifndef ISP2401
+/* function csi_rx_frontend_run: C22 */
+#else
+/* function csi_rx_frontend_run: C1C */
+
+/* function __ia_css_dmaproxy_sp_configure_channel_text: 3D7C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_wait_for_in_frame: 64B7 */
+#else
+/* function ia_css_framebuf_sp_wait_for_in_frame: 667D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_stream_open: 60E3 */
+#else
+/* function ia_css_isys_stream_open: 62A9 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_tag_frame: 5C71 */
+#else
+/* function ia_css_sp_rawcopy_tag_frame: 5E35 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_channel_configure: 125D */
+#else
+/* function input_system_channel_configure: 11D8 */
+#endif
+
+#ifndef ISP2401
+/* function isp_hmem_clear: B33 */
+#else
+/* function isp_hmem_clear: B2D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_release_in_frame: 64FA */
+#else
+/* function ia_css_framebuf_sp_release_in_frame: 66C0 */
+#endif
+
+#ifndef ISP2401
+/* function stream2mmio_config: E1B */
+#else
+/* function stream2mmio_config: E15 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_start_binary: 3F55 */
+#else
+/* function ia_css_ispctrl_sp_start_binary: 40D8 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x698C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x6A38
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x698C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x6A38
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_recv: 3DEB */
+#else
+/* function ia_css_eventq_sp_recv: 3F60 */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_frontend_config: C7A */
+#else
+/* function csi_rx_frontend_config: C74 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_pool
+#define HIVE_MEM_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_pool 0x370
+#else
+#define HIVE_ADDR_isp_pool 0x388
+#endif
+#define HIVE_SIZE_isp_pool 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pool 0x370
+#else
+#define HIVE_ADDR_sp_isp_pool 0x388
+#endif
+#define HIVE_SIZE_sp_isp_pool 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_rel_gen: 61E9 */
+#else
+/* function ia_css_rmgr_sp_rel_gen: 63AF */
+
+/* function ia_css_tagger_sp_unblock_clients: 31C3 */
+#endif
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_end: 28E9 */
+#else
+/* function css_get_frame_processing_time_end: 28BA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_any_pending_mask
+#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_event_any_pending_mask 0x388
+#else
+#define HIVE_ADDR_event_any_pending_mask 0x3A0
+#endif
+#define HIVE_SIZE_event_any_pending_mask 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_any_pending_mask 0x388
+#else
+#define HIVE_ADDR_sp_event_any_pending_mask 0x3A0
+#endif
+#define HIVE_SIZE_sp_event_any_pending_mask 8
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_get_pipe_io_status: 1AB8 */
+#else
+/* function ia_css_pipeline_sp_get_pipe_io_status: 1A5A */
+#endif
+
+/* function sh_css_decode_tag_descr: 352 */
+
+/* function debug_enqueue_isp: 27B */
+
+#ifndef ISP2401
+/* function qos_scheduler_update_stage_budget: 656E */
+#else
+/* function qos_scheduler_update_stage_budget: 673C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_uninit: 5DFB */
+#else
+/* function ia_css_spctrl_sp_uninit: 5EDD */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_run: C68 */
+#else
+/* function csi_rx_backend_run: C62 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x69A0
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x6A4C
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x69A0
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x6A4C
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_lock_from_start: 341B */
+#else
+/* function ia_css_tagger_buf_sp_lock_from_start: 353E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_isp_idle
+#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_isp_idle 0x5890
+#else
+#define HIVE_ADDR_sem_for_isp_idle 0x592C
+#endif
+#define HIVE_SIZE_sem_for_isp_idle 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x5890
+#else
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x592C
+#endif
+#define HIVE_SIZE_sp_sem_for_isp_idle 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write_byte_addr: 3B0A */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr: 3C6A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init: 3A81 */
+#else
+/* function ia_css_dmaproxy_sp_init: 3BE1 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 3686 */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 37A9 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_VAMEM_BASE
+#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_ISP_VAMEM_BASE 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12
+
+#ifndef ISP2401
+/* function input_system_channel_sync: 11A4 */
+#else
+/* function input_system_channel_sync: 6C10 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger
+#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x732C
+#else
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x73D8
+#endif
+#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x732C
+#else
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x73D8
+#endif
+#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x6A2C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x6AD8
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x6A2C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x6AD8
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70
+
+#ifndef ISP2401
+/* function ia_css_queue_item_load: 561A */
+#else
+/* function ia_css_queue_item_load: 57F9 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_state: 5DE6 */
+#else
+/* function ia_css_spctrl_sp_get_state: 5EC8 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_callout_sp_thread
+#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_callout_sp_thread 0x5A74
+#else
+#define HIVE_ADDR_callout_sp_thread 0x278
+#endif
+#define HIVE_SIZE_callout_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_callout_sp_thread 0x5A74
+#else
+#define HIVE_ADDR_sp_callout_sp_thread 0x278
+#endif
+#define HIVE_SIZE_sp_callout_sp_thread 4
+
+#ifndef ISP2401
+/* function thread_fiber_sp_init: 14CB */
+#else
+/* function thread_fiber_sp_init: 1441 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_PMEM_BASE
+#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_SP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_sp_SP_PMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_input_stream_format
+#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_input_stream_format 0x3E2C
+#else
+#define HIVE_ADDR_sp_isp_input_stream_format 0x3E50
+#endif
+#define HIVE_SIZE_sp_isp_input_stream_format 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x3E2C
+#else
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x3E50
+#endif
+#define HIVE_SIZE_sp_sp_isp_input_stream_format 20
+
+#ifndef ISP2401
+/* function __mod: 6866 */
+#else
+/* function __mod: 6A64 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 3B6B */
+#else
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 3CCB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_join: 139B */
+#else
+/* function ia_css_thread_sp_join: 1311 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_add_command: 6EF1 */
+#else
+/* function ia_css_dmaproxy_sp_add_command: 712E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_thread_func: 5DDF */
+#else
+/* function ia_css_sp_metadata_thread_func: 5EC1 */
+#endif
+
+#ifndef ISP2401
+/* function __sp_event_proxy_func_critical: 6934 */
+#else
+/* function __sp_event_proxy_func_critical: 6B32 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_wait_for_isys_stream_N: 5F53 */
+#else
+/* function ia_css_pipeline_sp_wait_for_isys_stream_N: 6074 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_wait: 5DD8 */
+#else
+/* function ia_css_sp_metadata_wait: 5EBA */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek_from_start: 15A4 */
+#else
+/* function ia_css_circbuf_peek_from_start: 151A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_event_sp_encode: 3E76 */
+#else
+/* function ia_css_event_sp_encode: 3FEB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_run: 140E */
+#else
+/* function ia_css_thread_sp_run: 1384 */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_func: 618 */
+#else
+/* function sp_isys_copy_func: 5AC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_init_isp_memories: 50A3 */
+#else
+/* function ia_css_sp_isp_param_init_isp_memories: 52AC */
+#endif
+
+#ifndef ISP2401
+/* function register_isr: 921 */
+#else
+/* function register_isr: 8B5 */
+#endif
+
+/* function irq_raise: C8 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 3A48 */
+#else
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 3B71 */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_disable: C34 */
+#else
+/* function csi_rx_backend_disable: C2E */
+#endif
+
+#ifndef ISP2401
+/* function pipeline_sp_initialize_stage: 2104 */
+#else
+/* function pipeline_sp_initialize_stage: 20BF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_N_CSI_RX_FE_CTRL_DLANES
+#define HIVE_MEM_N_CSI_RX_FE_CTRL_DLANES scalar_processor_2400_dmem
+#define HIVE_ADDR_N_CSI_RX_FE_CTRL_DLANES 0x1C4
+#define HIVE_SIZE_N_CSI_RX_FE_CTRL_DLANES 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_N_CSI_RX_FE_CTRL_DLANES scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_N_CSI_RX_FE_CTRL_DLANES 0x1C4
+#define HIVE_SIZE_sp_N_CSI_RX_FE_CTRL_DLANES 12
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6DC0 */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 700E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_done_ds: 40BD */
+#else
+/* function ia_css_ispctrl_sp_done_ds: 426D */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_config: C8B */
+#else
+/* function csi_rx_backend_config: C85 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_get_mem_inits: 507E */
+#else
+/* function ia_css_sp_isp_param_get_mem_inits: 5287 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_init_buffer_queues: 1A85 */
+#else
+/* function ia_css_parambuf_sp_init_buffer_queues: 1A27 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_pfp_spref
+#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_pfp_spref 0x378
+#else
+#define HIVE_ADDR_vbuf_pfp_spref 0x390
+#endif
+#define HIVE_SIZE_vbuf_pfp_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x378
+#else
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x390
+#endif
+#define HIVE_SIZE_sp_vbuf_pfp_spref 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_HMEM_BASE
+#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_ISP_HMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_sp_ISP_HMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x6A74
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x6B20
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x6A74
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x6B20
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280
+
+#ifndef ISP2401
+/* function qos_scheduler_init_stage_budget: 65A7 */
+#else
+/* function qos_scheduler_init_stage_budget: 679A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_buffer_queue_handle
+#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x6B8C
+#else
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x6C38
+#endif
+#define HIVE_SIZE_sp2host_buffer_queue_handle 96
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x6B8C
+#else
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x6C38
+#endif
+#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_isp_vars: 4D9D */
+#else
+/* function ia_css_ispctrl_sp_init_isp_vars: 4F79 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_stream_start: 6010 */
+#else
+/* function ia_css_isys_stream_start: 6187 */
+#endif
+
+#ifndef ISP2401
+/* function sp_warning: 954 */
+#else
+/* function sp_warning: 8E8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_enqueue: 62DC */
+#else
+/* function ia_css_rmgr_sp_vbuf_enqueue: 64A2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_tag_exp_id: 2A84 */
+#else
+/* function ia_css_tagger_sp_tag_exp_id: 2A55 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_sfi_release_current_frame: 276B */
+#else
+/* function ia_css_pipeline_sp_sfi_release_current_frame: 273C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write: 3B21 */
+#else
+/* function ia_css_dmaproxy_sp_write: 3C81 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_stream_start_async: 608A */
+#else
+/* function ia_css_isys_stream_start_async: 6250 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_release_in_param: 1905 */
+#else
+/* function ia_css_parambuf_sp_release_in_param: 187B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_irq_sw_interrupt_token
+#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_irq_sw_interrupt_token 0x3E28
+#else
+#define HIVE_ADDR_irq_sw_interrupt_token 0x3E4C
+#endif
+#define HIVE_SIZE_irq_sw_interrupt_token 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x3E28
+#else
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x3E4C
+#endif
+#define HIVE_SIZE_sp_irq_sw_interrupt_token 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_addresses
+#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_addresses 0x6FDC
+#else
+#define HIVE_ADDR_sp_isp_addresses 0x708C
+#endif
+#define HIVE_SIZE_sp_isp_addresses 172
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_addresses 0x6FDC
+#else
+#define HIVE_ADDR_sp_sp_isp_addresses 0x708C
+#endif
+#define HIVE_SIZE_sp_sp_isp_addresses 172
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_acq_gen: 6201 */
+#else
+/* function ia_css_rmgr_sp_acq_gen: 63C7 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_input_port_open: 10ED */
+#else
+/* function input_system_input_port_open: 10E7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isps
+#define HIVE_MEM_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isps 0x737C
+#else
+#define HIVE_ADDR_isps 0x7428
+#endif
+#define HIVE_SIZE_isps 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isps 0x737C
+#else
+#define HIVE_ADDR_sp_isps 0x7428
+#endif
+#define HIVE_SIZE_sp_isps 28
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_queues_initialized
+#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_queues_initialized 0x3E40
+#else
+#define HIVE_ADDR_host_sp_queues_initialized 0x3E64
+#endif
+#define HIVE_SIZE_host_sp_queues_initialized 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x3E40
+#else
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x3E64
+#endif
+#define HIVE_SIZE_sp_host_sp_queues_initialized 4
+
+#ifndef ISP2401
+/* function ia_css_queue_uninit: 54E6 */
+#else
+/* function ia_css_queue_uninit: 56C5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started
+#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x6C94
+#else
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x6D40
+#endif
+#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x6C94
+#else
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x6D40
+#endif
+#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf: 36F2 */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf: 3815 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_height_exception: 3C19 */
+#else
+/* function ia_css_dmaproxy_sp_set_height_exception: 3D8E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 3B9E */
+#else
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 3CFF */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_stop: C57 */
+#else
+/* function csi_rx_backend_stop: C51 */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_num_ready_threads
+#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_num_ready_threads 0x5A7C
+#define HIVE_SIZE_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_num_ready_threads 0x5A7C
+#define HIVE_SIZE_sp_num_ready_threads 4
+
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 3AF3 */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 3C53 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_spref
+#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_spref 0x374
+#else
+#define HIVE_ADDR_vbuf_spref 0x38C
+#endif
+#define HIVE_SIZE_vbuf_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_spref 0x374
+#else
+#define HIVE_ADDR_sp_vbuf_spref 0x38C
+#endif
+#define HIVE_SIZE_sp_vbuf_spref 4
+
+#ifndef ISP2401
+/* function ia_css_queue_enqueue: 5430 */
+#else
+/* function ia_css_queue_enqueue: 560F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_request
+#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_request 0x5B30
+#else
+#define HIVE_ADDR_ia_css_flash_sp_request 0x5BDC
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_request 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x5B30
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x5BDC
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_request 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_write: 3AC4 */
+#else
+/* function ia_css_dmaproxy_sp_vmem_write: 3C24 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tagger_frames
+#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tagger_frames 0x5A84
+#else
+#define HIVE_ADDR_tagger_frames 0x5B30
+#endif
+#define HIVE_SIZE_tagger_frames 168
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tagger_frames 0x5A84
+#else
+#define HIVE_ADDR_sp_tagger_frames 0x5B30
+#endif
+#define HIVE_SIZE_sp_tagger_frames 168
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_if
+#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_if 0x58A4
+#else
+#define HIVE_ADDR_sem_for_reading_if 0x5940
+#endif
+#define HIVE_SIZE_sem_for_reading_if 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_if 0x58A4
+#else
+#define HIVE_ADDR_sp_sem_for_reading_if 0x5940
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_if 20
+
+#ifndef ISP2401
+/* function sp_generate_interrupts: 9D3 */
+#else
+/* function sp_generate_interrupts: 967 */
+
+/* function ia_css_pipeline_sp_start: 1FC2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_start: 2007 */
+#else
+/* function ia_css_thread_default_callout: 6C8F */
+#endif
+
+#ifndef ISP2401
+/* function csi_rx_backend_enable: C45 */
+#else
+/* function csi_rx_backend_enable: C3F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_init: 5953 */
+#else
+/* function ia_css_sp_rawcopy_init: 5B32 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_input_port_configure: 113F */
+#else
+/* function input_system_input_port_configure: 1139 */
+#endif
+
+#ifndef ISP2401
+/* function tmr_clock_read: 16EF */
+#else
+/* function tmr_clock_read: 1665 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_BAMEM_BASE
+#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x380
+#else
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x398
+#endif
+#define HIVE_SIZE_ISP_BAMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x380
+#else
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x398
+#endif
+#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6BEC
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6C98
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6BEC
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x6C98
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+
+#ifndef ISP2401
+/* function isys2401_dma_config_legacy: DE0 */
+#else
+/* function isys2401_dma_config_legacy: DDA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ibuf_ctrl_master_ports
+#define HIVE_MEM_ibuf_ctrl_master_ports scalar_processor_2400_dmem
+#define HIVE_ADDR_ibuf_ctrl_master_ports 0x208
+#define HIVE_SIZE_ibuf_ctrl_master_ports 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ibuf_ctrl_master_ports scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ibuf_ctrl_master_ports 0x208
+#define HIVE_SIZE_sp_ibuf_ctrl_master_ports 12
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_start: 28F1 */
+#else
+/* function css_get_frame_processing_time_start: 28C2 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame
+#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame 0x58B8
+#else
+#define HIVE_ADDR_sp_all_cbs_frame 0x5954
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x58B8
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x5954
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame 16
+
+#ifndef ISP2401
+/* function ia_css_virtual_isys_sp_isr: 6F07 */
+#else
+/* function ia_css_virtual_isys_sp_isr: 716E */
+#endif
+
+#ifndef ISP2401
+/* function thread_sp_queue_print: 142B */
+#else
+/* function thread_sp_queue_print: 13A1 */
+#endif
+
+#ifndef ISP2401
+/* function sp_notify_eof: 97F */
+#else
+/* function sp_notify_eof: 913 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_str2mem
+#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_str2mem 0x58C8
+#else
+#define HIVE_ADDR_sem_for_str2mem 0x5964
+#endif
+#define HIVE_SIZE_sem_for_str2mem 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_str2mem 0x58C8
+#else
+#define HIVE_ADDR_sp_sem_for_str2mem 0x5964
+#endif
+#define HIVE_SIZE_sp_sem_for_str2mem 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 3483 */
+#else
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 35A6 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 38AA */
+#else
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 39CD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_sfi_mode_is_enabled: 28BF */
+#else
+/* function ia_css_pipeline_sp_sfi_mode_is_enabled: 2890 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_destroy: 16B9 */
+#else
+/* function ia_css_circbuf_destroy: 162F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_PMEM_BASE
+#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_ISP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_sp_ISP_PMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_mem_load: 5011 */
+#else
+/* function ia_css_sp_isp_param_mem_load: 521A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_from_start: 326F */
+#else
+/* function ia_css_tagger_buf_sp_pop_from_start: 3392 */
+#endif
+
+#ifndef ISP2401
+/* function __div: 681E */
+#else
+/* function __div: 6A1C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 62FB */
+#else
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 64C1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_use
+#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x5B34
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x5BE0
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_use 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x5B34
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x5BE0
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_wait: 6AE4 */
+#else
+/* function ia_css_thread_sem_sp_wait: 6D63 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sleep_mode
+#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sleep_mode 0x3E44
+#else
+#define HIVE_ADDR_sp_sleep_mode 0x3E68
+#endif
+#define HIVE_SIZE_sp_sleep_mode 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sleep_mode 0x3E44
+#else
+#define HIVE_ADDR_sp_sp_sleep_mode 0x3E68
+#endif
+#define HIVE_SIZE_sp_sp_sleep_mode 4
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_push: 337E */
+#else
+/* function ia_css_tagger_buf_sp_push: 34A1 */
+#endif
+
+/* function mmu_invalidate_cache: D3 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_max_cb_elems
+#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_max_cb_elems 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_sp_max_cb_elems 8
+
+#ifndef ISP2401
+/* function ia_css_queue_remote_init: 5508 */
+#else
+/* function ia_css_queue_remote_init: 56E7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stop_req
+#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stop_req 0x575C
+#else
+#define HIVE_ADDR_isp_stop_req 0x57F8
+#endif
+#define HIVE_SIZE_isp_stop_req 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stop_req 0x575C
+#else
+#define HIVE_ADDR_sp_isp_stop_req 0x57F8
+#endif
+#define HIVE_SIZE_sp_isp_stop_req 4
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_sfi_request_next_frame: 2781 */
+#else
+/* function ia_css_pipeline_sp_sfi_request_next_frame: 2752 */
+#endif
+
+#ifndef ISP2401
+#define HIVE_ICACHE_sp_critical_SEGMENT_START 0
+#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1
+#endif
+
+#endif /* _sp_map_h_ */
+#ifndef ISP2401
+extern void sh_css_dump_sp_dmem(void);
+void sh_css_dump_sp_dmem(void)
+{
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
new file mode 100644
index 000000000000..9f7ecac46273
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/system_global.h
@@ -0,0 +1,458 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ * This value is set to 2 to prevent the ISP DMA from blocking
+ * the bus for too long; as the input system can only buffer
+ * 2 lines on Moorefield and Cherrytrail, the input system buffers
+ * may overflow if blocked for too long (BZ 2726).
+ */
+#define ISP_DMA_MAX_BURST_LENGTH 2
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+#define USE_INPUT_SYSTEM_VERSION_2401
+
+#define IS_ISP_2400_SYSTEM
+/*
+ * Since this file is visible everywhere and the system definition
+ * macros are not, detect the separate definitions for {host, SP, ISP}
+ *
+ * The 2401 system has the nice property that it uses a vanilla 2400 SP
+ * so the SP will believe it is a 2400 system rather than 2401...
+ */
+/* #if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401) */
+#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada)
+#define IS_ISP_2401_MAMOIADA_SYSTEM
+#define HAS_ISP_2401_MAMOIADA
+#define HAS_SP_2400
+/* #elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)*/
+#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada)
+#define IS_ISP_2400_MAMOIADA_SYSTEM
+#define HAS_ISP_2400_MAMOIADA
+#define HAS_SP_2400
+#else
+#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+
+#define HAS_MMU_VERSION_2
+#define HAS_DMA_VERSION_2
+#define HAS_GDC_VERSION_2
+#define HAS_VAMEM_VERSION_2
+#define HAS_HMEM_VERSION_1
+#define HAS_BAMEM_VERSION_2
+#define HAS_IRQ_VERSION_2
+#define HAS_IRQ_MAP_VERSION_2
+#define HAS_INPUT_FORMATTER_VERSION_2
+/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */
+/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */
+#define HAS_INPUT_SYSTEM_VERSION_2
+#define HAS_INPUT_SYSTEM_VERSION_2401
+#define HAS_BUFFERED_SENSOR
+#define HAS_FIFO_MONITORS_VERSION_2
+/* #define HAS_GP_REGS_VERSION_2 */
+#define HAS_GP_DEVICE_VERSION_2
+#define HAS_GPIO_VERSION_1
+#define HAS_TIMED_CTRL_VERSION_1
+#define HAS_RX_VERSION_2
+#define HAS_NO_INPUT_FORMATTER
+/*#define HAS_NO_PACKED_RAW_PIXELS*/
+/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+
+/*
+ * Semi global. "HRT" is accessible from SP, but
+ * the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+/* Surprise, this is a local property*/
+/*#define HRT_ADDRESS_WIDTH 64 */
+#define HRT_DATA_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH>>3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH/8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE
+
+/* per-frame parameter handling support */
+#define SH_CSS_ENABLE_PER_FRAME_PARAMS
+
+typedef uint32_t hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID,
+ * through the DLI by address. The enumerator terminators are used
+ * to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+#if defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#elif defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#else
+#error "system_global.h: SYSTEM must be one of {2400, 2401}"
+#endif
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+/* this extra define is needed because we want to use it also
+ in the preprocessor, and that doesn't work with enums.
+ */
+#define N_GDC_ID_CPP 2
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+typedef enum {
+ ISYS_IRQ0_ID = 0, /* port a */
+ ISYS_IRQ1_ID, /* port b */
+ ISYS_IRQ2_ID, /* port c */
+ N_ISYS_IRQ_ID
+} isys_irq_ID_t;
+
+typedef enum {
+ IRQ0_ID = 0, /* GP IRQ block */
+ IRQ1_ID, /* Input formatter */
+ IRQ2_ID, /* input system */
+ IRQ3_ID, /* input selector */
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+/*
+ * Deprecated: Since all gp_reg instances are different
+ * and put in the address maps of other devices we cannot
+ * enumerate them as that assumes the instrances are the
+ * same.
+ *
+ * We define a single GP_DEVICE containing all gp_regs
+ * w.r.t. a single base address
+ *
+typedef enum {
+ GP_REGS0_ID = 0,
+ N_GP_REGS_ID
+} gp_regs_ID_t;
+ */
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+typedef enum {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+} mipi_port_ID_t;
+
+#define N_RX_CHANNEL_ID 4
+
+/* Generic port enumeration with an internal port type ID */
+typedef enum {
+ CSI_PORT0_ID = 0,
+ CSI_PORT1_ID,
+ CSI_PORT2_ID,
+ TPG_PORT0_ID,
+ PRBS_PORT0_ID,
+ FIFO_PORT0_ID,
+ MEMORY_PORT0_ID,
+ N_INPUT_PORT_ID
+} input_port_ID_t;
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+/*
+ * Input-buffer Controller.
+ */
+typedef enum {
+ IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
+ IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
+ IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
+ N_IBUF_CTRL_ID
+} ibuf_ctrl_ID_t;
+/** end of Input-buffer Controller */
+
+/*
+ * Stream2MMIO.
+ */
+typedef enum {
+ STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
+ STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
+ STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
+ N_STREAM2MMIO_ID
+} stream2mmio_ID_t;
+
+typedef enum {
+ /*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+ STREAM2MMIO_SID0_ID = 0,
+ STREAM2MMIO_SID1_ID,
+ STREAM2MMIO_SID2_ID,
+ STREAM2MMIO_SID3_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID5_ID,
+ STREAM2MMIO_SID6_ID,
+ STREAM2MMIO_SID7_ID,
+ N_STREAM2MMIO_SID_ID
+} stream2mmio_sid_ID_t;
+/** end of Stream2MMIO */
+
+/**
+ * Input System 2401: CSI-MIPI recevier.
+ */
+typedef enum {
+ CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
+ CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
+ CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
+ N_CSI_RX_BACKEND_ID
+} csi_rx_backend_ID_t;
+
+typedef enum {
+ CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
+ CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
+ CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
+#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID+1)
+} csi_rx_frontend_ID_t;
+
+typedef enum {
+ CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
+ CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
+ CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
+ CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
+ N_CSI_RX_DLANE_ID
+} csi_rx_fe_dlane_ID_t;
+/** end of CSI-MIPI receiver */
+
+typedef enum {
+ ISYS2401_DMA0_ID = 0,
+ N_ISYS2401_DMA_ID
+} isys2401_dma_ID_t;
+
+/**
+ * Pixel-generator. ("system_global.h")
+ */
+typedef enum {
+ PIXELGEN0_ID = 0,
+ PIXELGEN1_ID,
+ PIXELGEN2_ID,
+ N_PIXELGEN_ID
+} pixelgen_ID_t;
+/** end of pixel-generator. ("system_global.h") */
+
+typedef enum {
+ INPUT_SYSTEM_CSI_PORT0_ID = 0,
+ INPUT_SYSTEM_CSI_PORT1_ID,
+ INPUT_SYSTEM_CSI_PORT2_ID,
+
+ INPUT_SYSTEM_PIXELGEN_PORT0_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT1_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT2_ID,
+
+ N_INPUT_SYSTEM_INPUT_PORT_ID
+} input_system_input_port_ID_t;
+
+#define N_INPUT_SYSTEM_CSI_PORT 3
+
+typedef enum {
+ ISYS2401_DMA_CHANNEL_0 = 0,
+ ISYS2401_DMA_CHANNEL_1,
+ ISYS2401_DMA_CHANNEL_2,
+ ISYS2401_DMA_CHANNEL_3,
+ ISYS2401_DMA_CHANNEL_4,
+ ISYS2401_DMA_CHANNEL_5,
+ ISYS2401_DMA_CHANNEL_6,
+ ISYS2401_DMA_CHANNEL_7,
+ ISYS2401_DMA_CHANNEL_8,
+ ISYS2401_DMA_CHANNEL_9,
+ ISYS2401_DMA_CHANNEL_10,
+ ISYS2401_DMA_CHANNEL_11,
+ N_ISYS2401_DMA_CHANNEL
+} isys2401_dma_channel;
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatability */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c
new file mode 100644
index 000000000000..325b821f276c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.c
@@ -0,0 +1,360 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
+ }
+ if (size) {
+ ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_iterator() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
+ }
+ if (size) {
+ ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_copy_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
+ }
+ if (size) {
+ ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_crop() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
+ }
+ if (size) {
+ ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_fpn() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
+ }
+ if (size) {
+ ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_dvs() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
+ }
+ if (size) {
+ ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_qplane() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
+ }
+ if (size) {
+ ia_css_output0_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output0() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
+ }
+ if (size) {
+ ia_css_output1_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output1() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.output.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
+ }
+ if (size) {
+ ia_css_output_config((struct sh_css_isp_output_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_output() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#ifdef ISP2401
+
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.sc.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.sc.offset;
+ }
+ if (size) {
+ ia_css_sc_config((struct sh_css_isp_sc_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_sc() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+#endif
+
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
+ }
+ if (size) {
+ ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_raw() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
+ }
+ if (size) {
+ ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_tnr() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
+ }
+ if (size) {
+ ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_ref() leave:\n");
+}
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() enter:\n");
+
+ {
+ unsigned offset = 0;
+ unsigned size = 0;
+ if (binary->info->mem_offsets.offsets.config) {
+ size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
+ offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
+ }
+ if (size) {
+ ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
+ config_dmem, size); }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_configure_vf() leave:\n");
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h
new file mode 100644
index 000000000000..8aacd3dbc05a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_configs.h
@@ -0,0 +1,189 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifdef IA_CSS_INCLUDE_CONFIGURATIONS
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/qplane/qplane_2/ia_css_qplane.host.h"
+#include "isp/kernels/raw/raw_1.0/ia_css_raw.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#ifdef ISP2401
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#endif
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/vf/vf_1.0/ia_css_vf.host.h"
+#include "isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h"
+#include "isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h"
+#endif /* IA_CSS_INCLUDE_CONFIGURATIONS */
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_CONFIG_H
+#define _IA_CSS_ISP_CONFIG_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_configuration_ids {
+ IA_CSS_ITERATOR_CONFIG_ID,
+ IA_CSS_COPY_OUTPUT_CONFIG_ID,
+ IA_CSS_CROP_CONFIG_ID,
+ IA_CSS_FPN_CONFIG_ID,
+ IA_CSS_DVS_CONFIG_ID,
+ IA_CSS_QPLANE_CONFIG_ID,
+ IA_CSS_OUTPUT0_CONFIG_ID,
+ IA_CSS_OUTPUT1_CONFIG_ID,
+ IA_CSS_OUTPUT_CONFIG_ID,
+#ifdef ISP2401
+ IA_CSS_SC_CONFIG_ID,
+#endif
+ IA_CSS_RAW_CONFIG_ID,
+ IA_CSS_TNR_CONFIG_ID,
+ IA_CSS_REF_CONFIG_ID,
+ IA_CSS_VF_CONFIG_ID,
+ IA_CSS_NUM_CONFIGURATION_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_config_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter iterator;
+ struct ia_css_isp_parameter copy_output;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter dvs;
+ struct ia_css_isp_parameter qplane;
+ struct ia_css_isp_parameter output0;
+ struct ia_css_isp_parameter output1;
+ struct ia_css_isp_parameter output;
+#ifdef ISP2401
+ struct ia_css_isp_parameter sc;
+#endif
+ struct ia_css_isp_parameter raw;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ struct ia_css_isp_parameter vf;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_CONFIGURATIONS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_iterator(
+ const struct ia_css_binary *binary,
+ const struct ia_css_iterator_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_copy_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_copy_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_crop(
+ const struct ia_css_binary *binary,
+ const struct ia_css_crop_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_fpn(
+ const struct ia_css_binary *binary,
+ const struct ia_css_fpn_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_dvs(
+ const struct ia_css_binary *binary,
+ const struct ia_css_dvs_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_qplane(
+ const struct ia_css_binary *binary,
+ const struct ia_css_qplane_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output0(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output0_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output1(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output1_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_output(
+ const struct ia_css_binary *binary,
+ const struct ia_css_output_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#ifdef ISP2401
+void
+ia_css_configure_sc(
+ const struct ia_css_binary *binary,
+ const struct ia_css_sc_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+#endif
+void
+ia_css_configure_raw(
+ const struct ia_css_binary *binary,
+ const struct ia_css_raw_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_tnr(
+ const struct ia_css_binary *binary,
+ const struct ia_css_tnr_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_ref(
+ const struct ia_css_binary *binary,
+ const struct ia_css_ref_configuration *config_dmem);
+
+/* Code generated by genparam/genconfig.c:gen_configure_function() */
+
+void
+ia_css_configure_vf(
+ const struct ia_css_binary *binary,
+ const struct ia_css_vf_configuration *config_dmem);
+
+#endif /* IA_CSS_INCLUDE_CONFIGURATION */
+
+#endif /* _IA_CSS_ISP_CONFIG_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c
new file mode 100644
index 000000000000..11e4463ebb50
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.c
@@ -0,0 +1,3220 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_PARAMETERS
+#include "sh_css_params.h"
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
+#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
+#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/de/de_2/ia_css_de2.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
+#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
+#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
+#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
+#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
+#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
+#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_aa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
+
+ if (size) {
+ struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ t->strength = params->aa_config.strength;
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() enter:\n");
+
+ ia_css_anr_encode((struct sh_css_isp_anr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->anr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_anr2(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() enter:\n");
+
+ ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->anr_thres,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_anr2() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bh(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_cnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() enter:\n");
+
+ ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_cnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_crop(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() enter:\n");
+
+ ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->crop_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_crop() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_csc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() enter:\n");
+
+ ia_css_csc_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_csc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_dp(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
+
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dp_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() enter:\n");
+
+ ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_de(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
+
+ ia_css_de_encode((struct sh_css_isp_de_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->de_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ecd(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() enter:\n");
+
+ ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ecd_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ecd() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_formats(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() enter:\n");
+
+ ia_css_formats_encode((struct sh_css_isp_formats_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->formats_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_formats() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fpn(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() enter:\n");
+
+ ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fpn_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fpn() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_gc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_encode((struct sh_css_isp_gc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->gc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
+
+ ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->gc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ce(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
+
+ ia_css_ce_encode((struct sh_css_isp_ce_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ce_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yuv2rgb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() enter:\n");
+
+ ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yuv2rgb_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yuv2rgb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_rgb2yuv(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() enter:\n");
+
+ ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->rgb2yuv_cc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_rgb2yuv() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_r_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() enter:\n");
+
+ ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->r_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_r_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_g_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() enter:\n");
+
+ ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->g_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_g_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_b_gamma(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() enter:\n");
+
+ ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
+ &params->b_gamma_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_b_gamma() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_uds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
+
+ if (size) {
+ struct sh_css_sp_uds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() enter:\n");
+
+ p = (struct sh_css_sp_uds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->crop_pos = params->uds_config.crop_pos;
+ p->uds = params->uds_config.uds;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_uds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_raa(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() enter:\n");
+
+ ia_css_raa_encode((struct sh_css_isp_aa_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->raa_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_raa() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_s3a(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() enter:\n");
+
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->s3a_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_s3a() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ob(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_encode((struct sh_css_isp_ob_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
+
+ ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->ob_config,
+&params->stream_configs.ob, size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_output(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() enter:\n");
+
+ ia_css_output_encode((struct sh_css_isp_output_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->output_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_output() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
+
+ ia_css_sc_encode((struct sh_css_isp_sc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->sc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_bds(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
+
+ if (size) {
+ struct sh_css_isp_bds_params *p;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() enter:\n");
+
+ p = (struct sh_css_isp_bds_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ p->baf_strength = params->bds_config.strength;
+
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bds() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_tnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() enter:\n");
+
+ ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->tnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_tnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_macc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() enter:\n");
+
+ ia_css_macc_encode((struct sh_css_isp_macc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->macc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_macc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() enter:\n");
+
+ ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() enter:\n");
+
+ ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() enter:\n");
+
+ ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() enter:\n");
+
+ ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horicoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() enter:\n");
+
+ ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horicoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertcoef(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() enter:\n");
+
+ ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertcoef() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_horiproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() enter:\n");
+
+ ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_horiproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_sdis2_vertproj(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() enter:\n");
+
+ ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->dvs2_coefs,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sdis2_vertproj() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_wb(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
+
+ ia_css_wb_encode((struct sh_css_isp_wb_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->wb_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_nr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
+
+ ia_css_nr_encode((struct sh_css_isp_ynr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->nr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_yee(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() enter:\n");
+
+ ia_css_yee_encode((struct sh_css_isp_yee_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->yee_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_yee() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ynr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() enter:\n");
+
+ ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ynr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ynr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_fc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
+
+ ia_css_fc_encode((struct sh_css_isp_fc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->fc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_ctc(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->ctc_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() enter:\n");
+
+ ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
+ &params->ctc_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ctc() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr_table(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() enter:\n");
+
+ ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
+ &params->xnr_table,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr_table() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() enter:\n");
+
+ ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr() leave:\n");
+ }
+
+ }
+}
+
+/* Code generated by genparam/gencode.c:gen_process_function() */
+
+static void
+ia_css_process_xnr3(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#ifdef ISP2401
+ {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->vmem.xnr3.offset;
+
+ if (size) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() enter:\n");
+
+ ia_css_xnr3_vmem_encode((struct sh_css_isp_xnr3_vmem_params *)
+ &stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
+ &params->xnr3_config,
+size);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_xnr3() leave:\n");
+ }
+
+ }
+#endif
+}
+
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params) = {
+ ia_css_process_aa,
+ ia_css_process_anr,
+ ia_css_process_anr2,
+ ia_css_process_bh,
+ ia_css_process_cnr,
+ ia_css_process_crop,
+ ia_css_process_csc,
+ ia_css_process_dp,
+ ia_css_process_bnr,
+ ia_css_process_de,
+ ia_css_process_ecd,
+ ia_css_process_formats,
+ ia_css_process_fpn,
+ ia_css_process_gc,
+ ia_css_process_ce,
+ ia_css_process_yuv2rgb,
+ ia_css_process_rgb2yuv,
+ ia_css_process_r_gamma,
+ ia_css_process_g_gamma,
+ ia_css_process_b_gamma,
+ ia_css_process_uds,
+ ia_css_process_raa,
+ ia_css_process_s3a,
+ ia_css_process_ob,
+ ia_css_process_output,
+ ia_css_process_sc,
+ ia_css_process_bds,
+ ia_css_process_tnr,
+ ia_css_process_macc,
+ ia_css_process_sdis_horicoef,
+ ia_css_process_sdis_vertcoef,
+ ia_css_process_sdis_horiproj,
+ ia_css_process_sdis_vertproj,
+ ia_css_process_sdis2_horicoef,
+ ia_css_process_sdis2_vertcoef,
+ ia_css_process_sdis2_horiproj,
+ ia_css_process_sdis2_vertproj,
+ ia_css_process_wb,
+ ia_css_process_nr,
+ ia_css_process_yee,
+ ia_css_process_ynr,
+ ia_css_process_fc,
+ ia_css_process_ctc,
+ ia_css_process_xnr_table,
+ ia_css_process_xnr,
+ ia_css_process_xnr3,
+};
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dp_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_dp_config() leave\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dp_config = *config;
+ params->config_changed[IA_CSS_DP_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DP_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_dp_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_wb_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->wb_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_wb_config() leave\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
+ ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->wb_config = *config;
+ params->config_changed[IA_CSS_WB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_WB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_wb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_tnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->tnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_tnr_config() leave\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
+ ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->tnr_config = *config;
+ params->config_changed[IA_CSS_TNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_TNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_tnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ob_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ob_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ob_config() leave\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
+ ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ob_config = *config;
+ params->config_changed[IA_CSS_OB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ob_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_de_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_de_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->de_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_de_config() leave\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
+ ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->de_config = *config;
+ params->config_changed[IA_CSS_DE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_DE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_de_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr_config() leave\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
+ ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_config = *config;
+ params->config_changed[IA_CSS_ANR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_anr_thres *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->anr_thres;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_anr2_config() leave\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
+ ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->anr_thres = *config;
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ANR2_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_anr2_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ce_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ce_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ce_config() leave\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
+ ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ce_config = *config;
+ params->config_changed[IA_CSS_CE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ce_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ecd_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ecd_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ecd_config() leave\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
+ ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ecd_config = *config;
+ params->config_changed[IA_CSS_ECD_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_ECD_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ecd_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ynr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ynr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ynr_config() leave\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
+ ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ynr_config = *config;
+ params->config_changed[IA_CSS_YNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ynr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_fc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->fc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_fc_config() leave\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
+ ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->fc_config = *config;
+ params->config_changed[IA_CSS_FC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_fc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_cnr_config() leave\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
+ ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cnr_config = *config;
+ params->config_changed[IA_CSS_CNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_cnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->macc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_macc_config() leave\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
+ ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->macc_config = *config;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_macc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->ctc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_ctc_config() leave\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
+ ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->ctc_config = *config;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_ctc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->aa_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_aa_config() leave\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
+ params->aa_config = *config;
+ params->config_changed[IA_CSS_AA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_AA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_aa_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->yuv2rgb_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_yuv2rgb_config() leave\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
+ ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->yuv2rgb_cc_config = *config;
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_YUV2RGB_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_yuv2rgb_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->rgb2yuv_cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_rgb2yuv_config() leave\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
+ ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->rgb2yuv_cc_config = *config;
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_RGB2YUV_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_rgb2yuv_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_cc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->cc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_csc_config() leave\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
+ ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->cc_config = *config;
+ params->config_changed[IA_CSS_CSC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_CSC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_csc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_nr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->nr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_nr_config() leave\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->nr_config = *config;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+ params->config_changed[IA_CSS_NR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_NR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_nr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_gc_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->gc_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_gc_config() leave\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
+ ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->gc_config = *config;
+ params->config_changed[IA_CSS_GC_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_gc_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horicoef_config() leave\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horicoef_config() enter:\n");
+ ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertcoef_config() leave\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertcoef_config() enter:\n");
+ ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_horiproj_config() leave\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_horiproj_config() enter:\n");
+ ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis_vertproj_config() leave\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis_vertproj_config() enter:\n");
+ ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs_coefs = *config;
+ params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horicoef_config() leave\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horicoef_config() enter:\n");
+ ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horicoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertcoef_config() leave\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertcoef_config() enter:\n");
+ ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertcoef_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_horiproj_config() leave\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_horiproj_config() enter:\n");
+ ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_horiproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs2_coefficients *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->dvs2_coefs;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_sdis2_vertproj_config() leave\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_sdis2_vertproj_config() enter:\n");
+ ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->dvs2_coefs = *config;
+ params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_sdis2_vertproj_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->r_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_r_gamma_config() leave\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
+ ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->r_gamma_table = *config;
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_R_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_r_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->g_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_g_gamma_config() leave\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
+ ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->g_gamma_table = *config;
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_G_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_g_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_rgb_gamma_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->b_gamma_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_b_gamma_config() leave\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
+ ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->b_gamma_table = *config;
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_B_GAMMA_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_b_gamma_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_table *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_table;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_table_config() leave\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_table_config() enter:\n");
+ ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_table = *config;
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_table_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_formats_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->formats_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_formats_config() leave\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
+ ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->formats_config = *config;
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_FORMATS_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_formats_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr_config() leave\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
+ ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr_config = *config;
+ params->config_changed[IA_CSS_XNR_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_xnr3_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->xnr3_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_xnr3_config() leave\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
+ ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->xnr3_config = *config;
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_XNR3_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_xnr3_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_3a_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->s3a_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_s3a_config() leave\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
+ ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->s3a_config = *config;
+ params->config_changed[IA_CSS_BH_ID] = true;
+ params->config_changed[IA_CSS_S3A_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_S3A_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_s3a_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_get_function() */
+
+static void
+ia_css_get_output_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_output_config *config){
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() enter: "
+ "config=%p\n",config);
+
+ *config = params->output_config;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_get_output_config() leave\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+}
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
+ ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
+ params->output_config = *config;
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+#ifndef ISP2401
+ params->config_changed[IA_CSS_OUTPUT_ID] = true;
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_set_output_config() leave: "
+ "return_void\n");
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_get_dp_config(params, config->dp_config);
+ ia_css_get_wb_config(params, config->wb_config);
+ ia_css_get_tnr_config(params, config->tnr_config);
+ ia_css_get_ob_config(params, config->ob_config);
+ ia_css_get_de_config(params, config->de_config);
+ ia_css_get_anr_config(params, config->anr_config);
+ ia_css_get_anr2_config(params, config->anr_thres);
+ ia_css_get_ce_config(params, config->ce_config);
+ ia_css_get_ecd_config(params, config->ecd_config);
+ ia_css_get_ynr_config(params, config->ynr_config);
+ ia_css_get_fc_config(params, config->fc_config);
+ ia_css_get_cnr_config(params, config->cnr_config);
+ ia_css_get_macc_config(params, config->macc_config);
+ ia_css_get_ctc_config(params, config->ctc_config);
+ ia_css_get_aa_config(params, config->aa_config);
+ ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_get_csc_config(params, config->cc_config);
+ ia_css_get_nr_config(params, config->nr_config);
+ ia_css_get_gc_config(params, config->gc_config);
+ ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_get_r_gamma_config(params, config->r_gamma_table);
+ ia_css_get_g_gamma_config(params, config->g_gamma_table);
+ ia_css_get_b_gamma_config(params, config->b_gamma_table);
+ ia_css_get_xnr_table_config(params, config->xnr_table);
+ ia_css_get_formats_config(params, config->formats_config);
+ ia_css_get_xnr_config(params, config->xnr_config);
+ ia_css_get_xnr3_config(params, config->xnr3_config);
+ ia_css_get_s3a_config(params, config->s3a_config);
+ ia_css_get_output_config(params, config->output_config);
+}
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+{
+ ia_css_set_dp_config(params, config->dp_config);
+ ia_css_set_wb_config(params, config->wb_config);
+ ia_css_set_tnr_config(params, config->tnr_config);
+ ia_css_set_ob_config(params, config->ob_config);
+ ia_css_set_de_config(params, config->de_config);
+ ia_css_set_anr_config(params, config->anr_config);
+ ia_css_set_anr2_config(params, config->anr_thres);
+ ia_css_set_ce_config(params, config->ce_config);
+ ia_css_set_ecd_config(params, config->ecd_config);
+ ia_css_set_ynr_config(params, config->ynr_config);
+ ia_css_set_fc_config(params, config->fc_config);
+ ia_css_set_cnr_config(params, config->cnr_config);
+ ia_css_set_macc_config(params, config->macc_config);
+ ia_css_set_ctc_config(params, config->ctc_config);
+ ia_css_set_aa_config(params, config->aa_config);
+ ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
+ ia_css_set_csc_config(params, config->cc_config);
+ ia_css_set_nr_config(params, config->nr_config);
+ ia_css_set_gc_config(params, config->gc_config);
+ ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
+ ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
+ ia_css_set_r_gamma_config(params, config->r_gamma_table);
+ ia_css_set_g_gamma_config(params, config->g_gamma_table);
+ ia_css_set_b_gamma_config(params, config->b_gamma_table);
+ ia_css_set_xnr_table_config(params, config->xnr_table);
+ ia_css_set_formats_config(params, config->formats_config);
+ ia_css_set_xnr_config(params, config->xnr_config);
+ ia_css_set_xnr3_config(params, config->xnr3_config);
+ ia_css_set_s3a_config(params, config->s3a_config);
+ ia_css_set_output_config(params, config->output_config);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h
new file mode 100644
index 000000000000..5b3deb7f74ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_params.h
@@ -0,0 +1,399 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_PARAM_H
+#define _IA_CSS_ISP_PARAM_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_parameter_ids {
+ IA_CSS_AA_ID,
+ IA_CSS_ANR_ID,
+ IA_CSS_ANR2_ID,
+ IA_CSS_BH_ID,
+ IA_CSS_CNR_ID,
+ IA_CSS_CROP_ID,
+ IA_CSS_CSC_ID,
+ IA_CSS_DP_ID,
+ IA_CSS_BNR_ID,
+ IA_CSS_DE_ID,
+ IA_CSS_ECD_ID,
+ IA_CSS_FORMATS_ID,
+ IA_CSS_FPN_ID,
+ IA_CSS_GC_ID,
+ IA_CSS_CE_ID,
+ IA_CSS_YUV2RGB_ID,
+ IA_CSS_RGB2YUV_ID,
+ IA_CSS_R_GAMMA_ID,
+ IA_CSS_G_GAMMA_ID,
+ IA_CSS_B_GAMMA_ID,
+ IA_CSS_UDS_ID,
+ IA_CSS_RAA_ID,
+ IA_CSS_S3A_ID,
+ IA_CSS_OB_ID,
+ IA_CSS_OUTPUT_ID,
+ IA_CSS_SC_ID,
+ IA_CSS_BDS_ID,
+ IA_CSS_TNR_ID,
+ IA_CSS_MACC_ID,
+ IA_CSS_SDIS_HORICOEF_ID,
+ IA_CSS_SDIS_VERTCOEF_ID,
+ IA_CSS_SDIS_HORIPROJ_ID,
+ IA_CSS_SDIS_VERTPROJ_ID,
+ IA_CSS_SDIS2_HORICOEF_ID,
+ IA_CSS_SDIS2_VERTCOEF_ID,
+ IA_CSS_SDIS2_HORIPROJ_ID,
+ IA_CSS_SDIS2_VERTPROJ_ID,
+ IA_CSS_WB_ID,
+ IA_CSS_NR_ID,
+ IA_CSS_YEE_ID,
+ IA_CSS_YNR_ID,
+ IA_CSS_FC_ID,
+ IA_CSS_CTC_ID,
+ IA_CSS_XNR_TABLE_ID,
+ IA_CSS_XNR_ID,
+ IA_CSS_XNR3_ID,
+ IA_CSS_NUM_PARAMETER_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter anr;
+ struct ia_css_isp_parameter bh;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter crop;
+ struct ia_css_isp_parameter csc;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter bnr;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ecd;
+ struct ia_css_isp_parameter formats;
+ struct ia_css_isp_parameter fpn;
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter ce;
+ struct ia_css_isp_parameter yuv2rgb;
+ struct ia_css_isp_parameter rgb2yuv;
+ struct ia_css_isp_parameter uds;
+ struct ia_css_isp_parameter raa;
+ struct ia_css_isp_parameter s3a;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter output;
+ struct ia_css_isp_parameter sc;
+ struct ia_css_isp_parameter bds;
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter macc;
+ struct ia_css_isp_parameter sdis_horiproj;
+ struct ia_css_isp_parameter sdis_vertproj;
+ struct ia_css_isp_parameter sdis2_horiproj;
+ struct ia_css_isp_parameter sdis2_vertproj;
+ struct ia_css_isp_parameter wb;
+ struct ia_css_isp_parameter nr;
+ struct ia_css_isp_parameter yee;
+ struct ia_css_isp_parameter ynr;
+ struct ia_css_isp_parameter fc;
+ struct ia_css_isp_parameter ctc;
+ struct ia_css_isp_parameter xnr;
+ struct ia_css_isp_parameter xnr3;
+ struct ia_css_isp_parameter get;
+ struct ia_css_isp_parameter put;
+ } dmem;
+ struct {
+ struct ia_css_isp_parameter anr2;
+ struct ia_css_isp_parameter ob;
+ struct ia_css_isp_parameter sdis_horicoef;
+ struct ia_css_isp_parameter sdis_vertcoef;
+ struct ia_css_isp_parameter sdis2_horicoef;
+ struct ia_css_isp_parameter sdis2_vertcoef;
+#ifdef ISP2401
+ struct ia_css_isp_parameter xnr3;
+#endif
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter bh;
+ } hmem0;
+ struct {
+ struct ia_css_isp_parameter gc;
+ struct ia_css_isp_parameter g_gamma;
+ struct ia_css_isp_parameter xnr_table;
+ } vamem1;
+ struct {
+ struct ia_css_isp_parameter r_gamma;
+ struct ia_css_isp_parameter ctc;
+ } vamem0;
+ struct {
+ struct ia_css_isp_parameter b_gamma;
+ } vamem2;
+};
+
+#if defined(IA_CSS_INCLUDE_PARAMETERS)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/gencode.c:gen_param_process_table() */
+
+struct ia_css_pipeline_stage; /* forward declaration */
+
+extern void (* ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_dp_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_wb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_wb_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_tnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ob_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ob_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_de_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_de_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_anr_thres *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ce_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ce_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ecd_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ynr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_fc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_fc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_macc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_aa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_csc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_cc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_gc_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_gc_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_rgb_gamma_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_table *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_formats_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_formats_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_xnr3_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_3a_config *config);
+
+/* Code generated by genparam/gencode.c:gen_set_function() */
+
+void
+ia_css_set_output_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_output_config *config);
+
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_get_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+/* Code generated by genparam/gencode.c:gen_global_access_function() */
+
+void
+ia_css_set_configs(struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config)
+;
+#ifdef ISP2401
+
+#endif
+#endif /* IA_CSS_INCLUDE_PARAMETER */
+
+#endif /* _IA_CSS_ISP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c
new file mode 100644
index 000000000000..e87d05bc73ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.c
@@ -0,0 +1,214 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Generated code: do not edit or commmit. */
+
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_states.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_aa_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
+
+ if (size)
+ memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset], 0, size);
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_aa_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
+
+ if (size) {
+ ia_css_init_cnr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_cnr2_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
+
+ if (size) {
+ ia_css_init_cnr2_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_cnr2_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_dp_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
+
+ if (size) {
+ ia_css_init_dp_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_dp_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_de_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.de.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
+
+ if (size) {
+ ia_css_init_de_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_de_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_tnr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
+
+ if (size) {
+ ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_tnr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ref_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
+
+ if (size) {
+ ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ref_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_init_function() */
+
+static void
+ia_css_initialize_ynr_state(
+ const struct ia_css_binary *binary)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() enter:\n");
+
+ {
+ unsigned size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
+
+ unsigned offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
+
+ if (size) {
+ ia_css_init_ynr_state(
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
+ size);
+ }
+
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_initialize_ynr_state() leave:\n");
+}
+
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary) = {
+ ia_css_initialize_aa_state,
+ ia_css_initialize_cnr_state,
+ ia_css_initialize_cnr2_state,
+ ia_css_initialize_dp_state,
+ ia_css_initialize_de_state,
+ ia_css_initialize_tnr_state,
+ ia_css_initialize_ref_state,
+ ia_css_initialize_ynr_state,
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h
new file mode 100644
index 000000000000..732adafb0a63
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hive_isp_css_2401_system_generated/ia_css_isp_states.h
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define IA_CSS_INCLUDE_STATES
+#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
+#include "isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
+#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
+#include "isp/kernels/ref/ref_1.0/ia_css_ref.host.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
+#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
+/* Generated code: do not edit or commmit. */
+
+#ifndef _IA_CSS_ISP_STATE_H
+#define _IA_CSS_ISP_STATE_H
+
+/* Code generated by genparam/gencode.c:gen_param_enum() */
+
+enum ia_css_state_ids {
+ IA_CSS_AA_STATE_ID,
+ IA_CSS_CNR_STATE_ID,
+ IA_CSS_CNR2_STATE_ID,
+ IA_CSS_DP_STATE_ID,
+ IA_CSS_DE_STATE_ID,
+ IA_CSS_TNR_STATE_ID,
+ IA_CSS_REF_STATE_ID,
+ IA_CSS_YNR_STATE_ID,
+ IA_CSS_NUM_STATE_IDS
+};
+
+/* Code generated by genparam/gencode.c:gen_param_offsets() */
+
+struct ia_css_state_memory_offsets {
+ struct {
+ struct ia_css_isp_parameter aa;
+ struct ia_css_isp_parameter cnr;
+ struct ia_css_isp_parameter cnr2;
+ struct ia_css_isp_parameter dp;
+ struct ia_css_isp_parameter de;
+ struct ia_css_isp_parameter ynr;
+ } vmem;
+ struct {
+ struct ia_css_isp_parameter tnr;
+ struct ia_css_isp_parameter ref;
+ } dmem;
+};
+
+#if defined(IA_CSS_INCLUDE_STATES)
+
+#include "ia_css_stream.h" /* struct ia_css_stream */
+#include "ia_css_binary.h" /* struct ia_css_binary */
+/* Code generated by genparam/genstate.c:gen_state_init_table() */
+
+extern void (* ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(const struct ia_css_binary *binary);
+
+#endif /* IA_CSS_INCLUDE_STATE */
+
+#endif /* _IA_CSS_ISP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h
new file mode 100644
index 000000000000..e71e33d9d143
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/bits.h
@@ -0,0 +1,104 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_BITS_H
+#define _HRT_BITS_H
+
+#include "defs.h"
+
+#define _hrt_ones(n) HRTCAT(_hrt_ones_, n)
+#define _hrt_ones_0x0 0x00000000U
+#define _hrt_ones_0x1 0x00000001U
+#define _hrt_ones_0x2 0x00000003U
+#define _hrt_ones_0x3 0x00000007U
+#define _hrt_ones_0x4 0x0000000FU
+#define _hrt_ones_0x5 0x0000001FU
+#define _hrt_ones_0x6 0x0000003FU
+#define _hrt_ones_0x7 0x0000007FU
+#define _hrt_ones_0x8 0x000000FFU
+#define _hrt_ones_0x9 0x000001FFU
+#define _hrt_ones_0xA 0x000003FFU
+#define _hrt_ones_0xB 0x000007FFU
+#define _hrt_ones_0xC 0x00000FFFU
+#define _hrt_ones_0xD 0x00001FFFU
+#define _hrt_ones_0xE 0x00003FFFU
+#define _hrt_ones_0xF 0x00007FFFU
+#define _hrt_ones_0x10 0x0000FFFFU
+#define _hrt_ones_0x11 0x0001FFFFU
+#define _hrt_ones_0x12 0x0003FFFFU
+#define _hrt_ones_0x13 0x0007FFFFU
+#define _hrt_ones_0x14 0x000FFFFFU
+#define _hrt_ones_0x15 0x001FFFFFU
+#define _hrt_ones_0x16 0x003FFFFFU
+#define _hrt_ones_0x17 0x007FFFFFU
+#define _hrt_ones_0x18 0x00FFFFFFU
+#define _hrt_ones_0x19 0x01FFFFFFU
+#define _hrt_ones_0x1A 0x03FFFFFFU
+#define _hrt_ones_0x1B 0x07FFFFFFU
+#define _hrt_ones_0x1C 0x0FFFFFFFU
+#define _hrt_ones_0x1D 0x1FFFFFFFU
+#define _hrt_ones_0x1E 0x3FFFFFFFU
+#define _hrt_ones_0x1F 0x7FFFFFFFU
+#define _hrt_ones_0x20 0xFFFFFFFFU
+
+#define _hrt_ones_0 _hrt_ones_0x0
+#define _hrt_ones_1 _hrt_ones_0x1
+#define _hrt_ones_2 _hrt_ones_0x2
+#define _hrt_ones_3 _hrt_ones_0x3
+#define _hrt_ones_4 _hrt_ones_0x4
+#define _hrt_ones_5 _hrt_ones_0x5
+#define _hrt_ones_6 _hrt_ones_0x6
+#define _hrt_ones_7 _hrt_ones_0x7
+#define _hrt_ones_8 _hrt_ones_0x8
+#define _hrt_ones_9 _hrt_ones_0x9
+#define _hrt_ones_10 _hrt_ones_0xA
+#define _hrt_ones_11 _hrt_ones_0xB
+#define _hrt_ones_12 _hrt_ones_0xC
+#define _hrt_ones_13 _hrt_ones_0xD
+#define _hrt_ones_14 _hrt_ones_0xE
+#define _hrt_ones_15 _hrt_ones_0xF
+#define _hrt_ones_16 _hrt_ones_0x10
+#define _hrt_ones_17 _hrt_ones_0x11
+#define _hrt_ones_18 _hrt_ones_0x12
+#define _hrt_ones_19 _hrt_ones_0x13
+#define _hrt_ones_20 _hrt_ones_0x14
+#define _hrt_ones_21 _hrt_ones_0x15
+#define _hrt_ones_22 _hrt_ones_0x16
+#define _hrt_ones_23 _hrt_ones_0x17
+#define _hrt_ones_24 _hrt_ones_0x18
+#define _hrt_ones_25 _hrt_ones_0x19
+#define _hrt_ones_26 _hrt_ones_0x1A
+#define _hrt_ones_27 _hrt_ones_0x1B
+#define _hrt_ones_28 _hrt_ones_0x1C
+#define _hrt_ones_29 _hrt_ones_0x1D
+#define _hrt_ones_30 _hrt_ones_0x1E
+#define _hrt_ones_31 _hrt_ones_0x1F
+#define _hrt_ones_32 _hrt_ones_0x20
+
+#define _hrt_mask(b, n) \
+ (_hrt_ones(n) << (b))
+#define _hrt_get_bits(w, b, n) \
+ (((w) >> (b)) & _hrt_ones(n))
+#define _hrt_set_bits(w, b, n, v) \
+ (((w) & ~_hrt_mask(b, n)) | (((v) & _hrt_ones(n)) << (b)))
+#define _hrt_get_bit(w, b) \
+ (((w) >> (b)) & 1)
+#define _hrt_set_bit(w, b, v) \
+ (((w) & (~(1 << (b)))) | (((v)&1) << (b)))
+#define _hrt_set_lower_half(w, v) \
+ _hrt_set_bits(w, 0, 16, v)
+#define _hrt_set_upper_half(w, v) \
+ _hrt_set_bits(w, 16, 16, v)
+
+#endif /* _HRT_BITS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h
new file mode 100644
index 000000000000..b5756bfe8eb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/cell_params.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _cell_params_h
+#define _cell_params_h
+
+#define SP_PMEM_LOG_WIDTH_BITS 6 /*Width of PC, 64 bits, 8 bytes*/
+#define SP_ICACHE_TAG_BITS 4 /*size of tag*/
+#define SP_ICACHE_SET_BITS 8 /* 256 sets*/
+#define SP_ICACHE_BLOCKS_PER_SET_BITS 1 /* 2 way associative*/
+#define SP_ICACHE_BLOCK_ADDRESS_BITS 11 /* 2048 lines capacity*/
+
+#define SP_ICACHE_ADDRESS_BITS \
+ (SP_ICACHE_TAG_BITS+SP_ICACHE_BLOCK_ADDRESS_BITS)
+
+#define SP_PMEM_DEPTH (1<<SP_ICACHE_ADDRESS_BITS)
+
+#define SP_FIFO_0_DEPTH 0
+#define SP_FIFO_1_DEPTH 0
+#define SP_FIFO_2_DEPTH 0
+#define SP_FIFO_3_DEPTH 0
+#define SP_FIFO_4_DEPTH 0
+#define SP_FIFO_5_DEPTH 0
+#define SP_FIFO_6_DEPTH 0
+#define SP_FIFO_7_DEPTH 0
+
+
+#define SP_SLV_BUS_MAXBURSTSIZE 1
+
+#endif /* _cell_params_h */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_common_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_common_defs.h
new file mode 100644
index 000000000000..f3054fe04d03
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_common_defs.h
@@ -0,0 +1,200 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_common_defs_h_
+#define _css_receiver_2400_common_defs_h_
+#ifndef _mipi_backend_common_defs_h_
+#define _mipi_backend_common_defs_h_
+
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH 16
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH 2
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH (_HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_CH_ID_WIDTH + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_WIDTH 32 /* use 32 to be compatibel with streaming monitor !, MSB's of interface are tied to '0' */
+
+/* Definition of data format ID at the interface CSS_receiver capture/acquisition units */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8 24 /* 01 1000 YUV420 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10 25 /* 01 1001 YUV420 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8L 26 /* 01 1010 YUV420 8-bit legacy */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_8 30 /* 01 1110 YUV422 8-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV422_10 31 /* 01 1111 YUV422 10-bit */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB444 32 /* 10 0000 RGB444 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB555 33 /* 10 0001 RGB555 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB565 34 /* 10 0010 RGB565 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB666 35 /* 10 0011 RGB666 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RGB888 36 /* 10 0100 RGB888 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW6 40 /* 10 1000 RAW6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW7 41 /* 10 1001 RAW7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW8 42 /* 10 1010 RAW8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW10 43 /* 10 1011 RAW10 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW12 44 /* 10 1100 RAW12 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW14 45 /* 10 1101 RAW14 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_1 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_2 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_3 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_4 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_5 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_6 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_7 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_USR_DEF_8 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_Emb 18 /* 01 0010 embedded eight bit non image data */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOF 0 /* 00 0000 frame start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOF 1 /* 00 0001 frame end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_SOL 2 /* 00 0010 line start */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_EOL 3 /* 00 0011 line end */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH1 8 /* 00 1000 Generic Short Packet Code 1 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH2 9 /* 00 1001 Generic Short Packet Code 2 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH3 10 /* 00 1010 Generic Short Packet Code 3 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH4 11 /* 00 1011 Generic Short Packet Code 4 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH5 12 /* 00 1100 Generic Short Packet Code 5 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH6 13 /* 00 1101 Generic Short Packet Code 6 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH7 14 /* 00 1110 Generic Short Packet Code 7 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_GEN_SH8 15 /* 00 1111 Generic Short Packet Code 8 */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_8_CSPS 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_YUV420_10_CSPS 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+/* used reseved mipi positions for these */
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW16 46
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18 47
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_2 37
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_RAW18_3 38
+
+#define _HRT_CSS_RECEIVER_2400_DATA_FORMAT_ID_WIDTH 6
+
+/* Definition of format_types at the interface CSS --> input_selector*/
+/* !! Changes here should be copied to systems/isp/isp_css/bin/conv_transmitter_cmd.tcl !! */
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB888 0 // 36 'h24
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB555 1 // 33 'h
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB444 2 // 32
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB565 3 // 34
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RGB666 4 // 35
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW8 5 // 42
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW10 6 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW6 7 // 40
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW7 8 // 41
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW12 9 // 43
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW14 10 // 45
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8 11 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10 12 // 25
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_8 13 // 30
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV422_10 14 // 31
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_1 15 // 48
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8L 16 // 26
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_Emb 17 // 18
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_2 18 // 49
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_3 19 // 50
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_4 20 // 51
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_5 21 // 52
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_6 22 // 53
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_7 23 // 54
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_USR_DEF_8 24 // 55
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_8_CSPS 25 // 28
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_YUV420_10_CSPS 26 // 29
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW16 27 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18 28 // ?
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_2 29 // ? Option 2 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_RAW18_3 30 // ? Option 3 for depacketiser
+#define _HRT_CSS_RECEIVER_2400_FMT_TYPE_CUSTOM 31 // to signal custom decoding
+
+/* definition for state machine of data FIFO for decode different type of data */
+#define _HRT_CSS_RECEIVER_2400_YUV420_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV420_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_YUV420_8L_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_YUV422_10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RGB444_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB555_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB565_REPEAT_PTN 2
+#define _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN 9
+#define _HRT_CSS_RECEIVER_2400_RGB888_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW6_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW7_REPEAT_PTN 7
+#define _HRT_CSS_RECEIVER_2400_RAW8_REPEAT_PTN 1
+#define _HRT_CSS_RECEIVER_2400_RAW10_REPEAT_PTN 5
+#define _HRT_CSS_RECEIVER_2400_RAW12_REPEAT_PTN 3
+#define _HRT_CSS_RECEIVER_2400_RAW14_REPEAT_PTN 7
+
+#define _HRT_CSS_RECEIVER_2400_MAX_REPEAT_PTN _HRT_CSS_RECEIVER_2400_RGB666_REPEAT_PTN
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FMT_WIDTH 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_PRED_WIDTH 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_USD_BITS 4 /* bits per USD type */
+
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_EN_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_EN_IDX 8
+
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_6_10 1
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_7_10 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_10_8_10 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_6_12 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_7_12 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_12_8_12 6
+
+
+/* packet bit definition */
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_IDX 32
+#define _HRT_CSS_RECEIVER_2400_PKT_SOP_BITS 1
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_IDX 22
+#define _HRT_CSS_RECEIVER_2400_PKT_CH_ID_BITS 2
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_IDX 16
+#define _HRT_CSS_RECEIVER_2400_PKT_FMT_ID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PH_DATA_FIELD_BITS 16
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_IDX 0
+#define _HRT_CSS_RECEIVER_2400_PKT_PAYLOAD_BITS 32
+
+
+/*************************************************************************************************/
+/* Custom Decoding */
+/* These Custom Defs are defined based on design-time config in "csi_be_pixel_formatter.chdl" !! */
+/*************************************************************************************************/
+#define BE_CUST_EN_IDX 0 /* 2bits */
+#define BE_CUST_EN_DATAID_IDX 2 /* 6bits MIPI DATA ID */
+#define BE_CUST_EN_WIDTH 8
+#define BE_CUST_MODE_ALL 1 /* Enable Custom Decoding for all DATA IDs */
+#define BE_CUST_MODE_ONE 3 /* Enable Custom Decoding for ONE DATA ID, programmed in CUST_EN_DATA_ID */
+
+/* Data State config = {get_bits(6bits), valid(1bit)} */
+#define BE_CUST_DATA_STATE_S0_IDX 0 /* 7bits */
+#define BE_CUST_DATA_STATE_S1_IDX 7 /* 7bits */
+#define BE_CUST_DATA_STATE_S2_IDX 14 /* 7bits */
+#define BE_CUST_DATA_STATE_WIDTH 21
+#define BE_CUST_DATA_STATE_VALID_IDX 0 /* 1bits */
+#define BE_CUST_DATA_STATE_GETBITS_IDX 1 /* 6bits */
+
+/* Pixel Extractor config */
+#define BE_CUST_PIX_EXT_DATA_ALIGN_IDX 0 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_ALIGN_IDX 5 /* 5bits */
+#define BE_CUST_PIX_EXT_PIX_MASK_IDX 10 /* 18bits */
+#define BE_CUST_PIX_EXT_PIX_EN_IDX 28 /* 1bits */
+#define BE_CUST_PIX_EXT_WIDTH 29
+
+/* Pixel Valid & EoP config = {[eop,valid](especial), [eop,valid](normal)} */
+#define BE_CUST_PIX_VALID_EOP_P0_IDX 0 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P1_IDX 4 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P2_IDX 8 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_P3_IDX 12 /* 4bits */
+#define BE_CUST_PIX_VALID_EOP_WIDTH 16
+#define BE_CUST_PIX_VALID_EOP_NOR_VALID_IDX 0 /* Normal (NO less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_NOR_EOP_IDX 1 /* Normal (NO less get_bits case) EoP - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_VALID_IDX 2 /* Especial (less get_bits case) Valid - 1bits */
+#define BE_CUST_PIX_VALID_EOP_ESP_EOP_IDX 3 /* Especial (less get_bits case) EoP - 1bits */
+
+#endif /* _mipi_backend_common_defs_h_ */
+#endif /* _css_receiver_2400_common_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h
new file mode 100644
index 000000000000..6f5b7d3d3715
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/css_receiver_2400_defs.h
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _css_receiver_2400_defs_h_
+#define _css_receiver_2400_defs_h_
+
+#include "css_receiver_2400_common_defs.h"
+
+#define CSS_RECEIVER_DATA_WIDTH 8
+#define CSS_RECEIVER_RX_TRIG 4
+#define CSS_RECEIVER_RF_WORD 32
+#define CSS_RECEIVER_IMG_PROC_RF_ADDR 10
+#define CSS_RECEIVER_CSI_RF_ADDR 4
+#define CSS_RECEIVER_DATA_OUT 12
+#define CSS_RECEIVER_CHN_NO 2
+#define CSS_RECEIVER_DWORD_CNT 11
+#define CSS_RECEIVER_FORMAT_TYP 5
+#define CSS_RECEIVER_HRESPONSE 2
+#define CSS_RECEIVER_STATE_WIDTH 3
+#define CSS_RECEIVER_FIFO_DAT 32
+#define CSS_RECEIVER_CNT_VAL 2
+#define CSS_RECEIVER_PRED10_VAL 10
+#define CSS_RECEIVER_PRED12_VAL 12
+#define CSS_RECEIVER_CNT_WIDTH 8
+#define CSS_RECEIVER_WORD_CNT 16
+#define CSS_RECEIVER_PIXEL_LEN 6
+#define CSS_RECEIVER_PIXEL_CNT 5
+#define CSS_RECEIVER_COMP_8_BIT 8
+#define CSS_RECEIVER_COMP_7_BIT 7
+#define CSS_RECEIVER_COMP_6_BIT 6
+
+#define CSI_CONFIG_WIDTH 4
+
+/* division of gen_short data, ch_id and fmt_type over streaming data interface */
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB 0
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_DATA_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB + _HRT_CSS_RECEIVER_2400_GEN_SHORT_FMT_TYPE_WIDTH)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_DATA_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_FMT_TYPE_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_LSB - 1)
+#define _HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_CH_ID_BIT_MSB (_HRT_CSS_RECEIVER_2400_GEN_SHORT_STR_REAL_WIDTH - 1)
+
+#define _HRT_CSS_RECEIVER_2400_REG_ALIGN 4
+#define _HRT_CSS_RECEIVER_2400_BYTES_PER_PKT 4
+
+#define hrt_css_receiver_2400_4_lane_port_offset 0x100
+#define hrt_css_receiver_2400_1_lane_port_offset 0x200
+#define hrt_css_receiver_2400_2_lane_port_offset 0x300
+#define hrt_css_receiver_2400_backend_port_offset 0x100
+
+#define _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX 3
+#define _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX 4
+#define _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX 14
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX 15
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX 16
+#define _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX 17
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX 18
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX 19
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX 21
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX 22
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX 23
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX 24
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX 25
+#define _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX 26
+#define _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX 27
+#define _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX 28
+
+/* Interrupt bits for IRQ_STATUS and IRQ_ENABLE registers */
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT 0
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT 1
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT 2
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT 3
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT 4
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT 5
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT 6
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT 7
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT 8
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT 9
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT 10
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT 11
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT 12
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT 13
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT 14
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT 15
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT 16
+
+#define _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_CAUSE_ "Fifo Overrun"
+#define _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_CAUSE_ "Reserved"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_CAUSE_ "Sleep mode entry"
+#define _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_CAUSE_ "Sleep mode exit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_CAUSE_ "Error high speed SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_CAUSE_ "Error high speed sync SOT"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_CAUSE_ "Error control"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_CAUSE_ "Error correction double bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_CAUSE_ "Error correction single bit"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_CAUSE_ "No error"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_CAUSE_ "Error cyclic redundancy check"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_CAUSE_ "Error id"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_CAUSE_ "Error frame sync"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_CAUSE_ "Error frame data"
+#define _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_CAUSE_ "Data time-out"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_CAUSE_ "Error escape"
+#define _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_CAUSE_ "Error line sync"
+
+/* Bits for CSI2_DEVICE_READY register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DEVICE_READY_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_INIT_TIME_OUT_ERR_IDX 2
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_OVER_RUN_ERR_IDX 3
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_SOT_SYNC_ERR_IDX 4
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_RECEIVE_DATA_TIME_OUT_ERR_IDX 5
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_ECC_TWO_BIT_ERR_IDX 6
+#define _HRT_CSS_RECEIVER_2400_CSI2_MASK_DATA_ID_ERR_IDX 7
+
+
+/* Bits for CSI2_FUNC_PROG register */
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS 19
+
+/* Bits for INIT_COUNT register */
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_IDX 0
+#define _HRT_CSS_RECEIVER_2400_INIT_TIMER_BITS 16
+
+/* Bits for COUNT registers */
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_SYNC_COUNT_BITS 8
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RX_COUNT_BITS 8
+
+/* Bits for RAW116_18_DATAID register */
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW16_BITS_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_RAW18_BITS_BITS 6
+
+/* Bits for COMP_FORMAT register, this selects the compression data format */
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS 8
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_IDX (_HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_IDX + _HRT_CSS_RECEIVER_2400_COMP_RAW_BITS_BITS)
+#define _HRT_CSS_RECEIVER_2400_COMP_NUM_BITS_BITS 8
+
+/* Bits for COMP_PREDICT register, this selects the predictor algorithm */
+#define _HRT_CSS_RECEIVER_2400_PREDICT_NO_COMP 0
+#define _HRT_CSS_RECEIVER_2400_PREDICT_1 1
+#define _HRT_CSS_RECEIVER_2400_PREDICT_2 2
+
+/* Number of bits used for the delay registers */
+#define _HRT_CSS_RECEIVER_2400_DELAY_BITS 8
+
+/* Bits for COMP_SCHEME register, this selects the compression scheme for a VC */
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD1_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD2_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD3_BITS_IDX 10
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD4_BITS_IDX 15
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD5_BITS_IDX 20
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD6_BITS_IDX 25
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD7_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD8_BITS_IDX 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_BITS_BITS 5
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_IDX 0
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_FMT_BITS_BITS 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_IDX 3
+#define _HRT_CSS_RECEIVER_2400_COMP_SCHEME_USD_PRED_BITS_BITS 2
+
+
+/* BITS for backend RAW16 and RAW 18 registers */
+
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW18_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW18_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW18_EN_BITS 1
+
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_IDX 0
+#define _HRT_CSS_RECEIVER_2400_RAW16_DATAID_BITS 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_IDX 6
+#define _HRT_CSS_RECEIVER_2400_RAW16_OPTION_BITS 2
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_IDX 8
+#define _HRT_CSS_RECEIVER_2400_RAW16_EN_BITS 1
+
+/* These hsync and vsync values are for HSS simulation only */
+#define _HRT_CSS_RECEIVER_2400_HSYNC_VAL (1<<16)
+#define _HRT_CSS_RECEIVER_2400_VSYNC_VAL (1<<17)
+
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_WIDTH 28
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB 0
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_A_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_LSB + CSS_RECEIVER_DATA_OUT - 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_MSB + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_PIX_B_VAL_BIT + 1)
+#define _HRT_CSS_RECEIVER_2400_BE_STREAMING_EOP_BIT (_HRT_CSS_RECEIVER_2400_BE_STREAMING_SOP_BIT + 1)
+
+// SH Backend Register IDs
+#define _HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX 1
+#define _HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX 2
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX 3
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX 4
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX 5
+#define _HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX 6
+#define _HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX 7
+#define _HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX 8
+#define _HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX 9
+#define _HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX 10
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX 11
+#define _HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX 12
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_EN_REG_IDX 13
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_DATA_STATE_REG_IDX 14 /* Data State 0,1,2 config */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P0_REG_IDX 15 /* Pixel Extractor config for Data State 0 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P1_REG_IDX 16 /* Pixel Extractor config for Data State 0 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P2_REG_IDX 17 /* Pixel Extractor config for Data State 0 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S0P3_REG_IDX 18 /* Pixel Extractor config for Data State 0 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P0_REG_IDX 19 /* Pixel Extractor config for Data State 1 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P1_REG_IDX 20 /* Pixel Extractor config for Data State 1 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P2_REG_IDX 21 /* Pixel Extractor config for Data State 1 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S1P3_REG_IDX 22 /* Pixel Extractor config for Data State 1 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P0_REG_IDX 23 /* Pixel Extractor config for Data State 2 & Pix 0 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P1_REG_IDX 24 /* Pixel Extractor config for Data State 2 & Pix 1 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P2_REG_IDX 25 /* Pixel Extractor config for Data State 2 & Pix 2 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_EXT_S2P3_REG_IDX 26 /* Pixel Extractor config for Data State 2 & Pix 3 */
+#define _HRT_CSS_RECEIVER_2400_BE_CUST_PIX_VALID_EOP_REG_IDX 27 /* Pixel Valid & EoP config for Pix 0,1,2,3 */
+
+#define _HRT_CSS_RECEIVER_2400_BE_NOF_REGISTERS 28
+
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_HE 0
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_RCF 1
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PF 2
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SM 3
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_PD 4
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_SD 5
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_OT 6
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_BC 7
+#define _HRT_CSS_RECEIVER_2400_BE_SRST_WIDTH 8
+
+#endif /* _css_receiver_2400_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h
new file mode 100644
index 000000000000..47505f41790c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_DEFS_H_
+#define _HRT_DEFS_H_
+
+#ifndef HRTCAT
+#define _HRTCAT(m, n) m##n
+#define HRTCAT(m, n) _HRTCAT(m, n)
+#endif
+
+#ifndef HRTSTR
+#define _HRTSTR(x) #x
+#define HRTSTR(x) _HRTSTR(x)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#endif /* _HRT_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h
new file mode 100644
index 000000000000..d184a8b313c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/dma_v2_defs.h
@@ -0,0 +1,199 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _dma_v2_defs_h
+#define _dma_v2_defs_h
+
+#define _DMA_V2_NUM_CHANNELS_ID MaxNumChannels
+#define _DMA_V2_CONNECTIONS_ID Connections
+#define _DMA_V2_DEV_ELEM_WIDTHS_ID DevElemWidths
+#define _DMA_V2_DEV_FIFO_DEPTH_ID DevFifoDepth
+#define _DMA_V2_DEV_FIFO_RD_LAT_ID DevFifoRdLat
+#define _DMA_V2_DEV_FIFO_LAT_BYPASS_ID DevFifoRdLatBypass
+#define _DMA_V2_DEV_NO_BURST_ID DevNoBurst
+#define _DMA_V2_DEV_RD_ACCEPT_ID DevRdAccept
+#define _DMA_V2_DEV_SRMD_ID DevSRMD
+#define _DMA_V2_DEV_HAS_CRUN_ID CRunMasters
+#define _DMA_V2_CTRL_ACK_FIFO_DEPTH_ID CtrlAckFifoDepth
+#define _DMA_V2_CMD_FIFO_DEPTH_ID CommandFifoDepth
+#define _DMA_V2_CMD_FIFO_RD_LAT_ID CommandFifoRdLat
+#define _DMA_V2_CMD_FIFO_LAT_BYPASS_ID CommandFifoRdLatBypass
+#define _DMA_V2_NO_PACK_ID has_no_pack
+
+#define _DMA_V2_REG_ALIGN 4
+#define _DMA_V2_REG_ADDR_BITS 2
+
+/* Command word */
+#define _DMA_V2_CMD_IDX 0
+#define _DMA_V2_CMD_BITS 6
+#define _DMA_V2_CHANNEL_IDX (_DMA_V2_CMD_IDX + _DMA_V2_CMD_BITS)
+#define _DMA_V2_CHANNEL_BITS 5
+
+/* The command to set a parameter contains the PARAM field next */
+#define _DMA_V2_PARAM_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_PARAM_BITS 4
+
+/* Commands to read, write or init specific blocks contain these
+ three values */
+#define _DMA_V2_SPEC_DEV_A_XB_IDX (_DMA_V2_CHANNEL_IDX + _DMA_V2_CHANNEL_BITS)
+#define _DMA_V2_SPEC_DEV_A_XB_BITS 8
+#define _DMA_V2_SPEC_DEV_B_XB_IDX (_DMA_V2_SPEC_DEV_A_XB_IDX + _DMA_V2_SPEC_DEV_A_XB_BITS)
+#define _DMA_V2_SPEC_DEV_B_XB_BITS 8
+#define _DMA_V2_SPEC_YB_IDX (_DMA_V2_SPEC_DEV_B_XB_IDX + _DMA_V2_SPEC_DEV_B_XB_BITS)
+#define _DMA_V2_SPEC_YB_BITS (32-_DMA_V2_SPEC_DEV_B_XB_BITS-_DMA_V2_SPEC_DEV_A_XB_BITS-_DMA_V2_CMD_BITS-_DMA_V2_CHANNEL_BITS)
+
+/* */
+#define _DMA_V2_CMD_CTRL_IDX 4
+#define _DMA_V2_CMD_CTRL_BITS 4
+
+/* Packing setup word */
+#define _DMA_V2_CONNECTION_IDX 0
+#define _DMA_V2_CONNECTION_BITS 4
+#define _DMA_V2_EXTENSION_IDX (_DMA_V2_CONNECTION_IDX + _DMA_V2_CONNECTION_BITS)
+#define _DMA_V2_EXTENSION_BITS 1
+
+/* Elements packing word */
+#define _DMA_V2_ELEMENTS_IDX 0
+#define _DMA_V2_ELEMENTS_BITS 8
+#define _DMA_V2_LEFT_CROPPING_IDX (_DMA_V2_ELEMENTS_IDX + _DMA_V2_ELEMENTS_BITS)
+#define _DMA_V2_LEFT_CROPPING_BITS 8
+
+#define _DMA_V2_WIDTH_IDX 0
+#define _DMA_V2_WIDTH_BITS 16
+
+#define _DMA_V2_HEIGHT_IDX 0
+#define _DMA_V2_HEIGHT_BITS 16
+
+#define _DMA_V2_STRIDE_IDX 0
+#define _DMA_V2_STRIDE_BITS 32
+
+/* Command IDs */
+#define _DMA_V2_MOVE_B2A_COMMAND 0
+#define _DMA_V2_MOVE_B2A_BLOCK_COMMAND 1
+#define _DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND 2
+#define _DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND 3
+#define _DMA_V2_MOVE_A2B_COMMAND 4
+#define _DMA_V2_MOVE_A2B_BLOCK_COMMAND 5
+#define _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND 6
+#define _DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND 7
+#define _DMA_V2_INIT_A_COMMAND 8
+#define _DMA_V2_INIT_A_BLOCK_COMMAND 9
+#define _DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND 10
+#define _DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND 11
+#define _DMA_V2_INIT_B_COMMAND 12
+#define _DMA_V2_INIT_B_BLOCK_COMMAND 13
+#define _DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND 14
+#define _DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND 15
+#define _DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_B2A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_MOVE_A2B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_A_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_NO_ACK_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND (_DMA_V2_INIT_B_BLOCK_NO_SYNC_CHK_COMMAND + 16)
+#define _DMA_V2_CONFIG_CHANNEL_COMMAND 32
+#define _DMA_V2_SET_CHANNEL_PARAM_COMMAND 33
+#define _DMA_V2_SET_CRUN_COMMAND 62
+
+/* Channel Parameter IDs */
+#define _DMA_V2_PACKING_SETUP_PARAM 0
+#define _DMA_V2_STRIDE_A_PARAM 1
+#define _DMA_V2_ELEM_CROPPING_A_PARAM 2
+#define _DMA_V2_WIDTH_A_PARAM 3
+#define _DMA_V2_STRIDE_B_PARAM 4
+#define _DMA_V2_ELEM_CROPPING_B_PARAM 5
+#define _DMA_V2_WIDTH_B_PARAM 6
+#define _DMA_V2_HEIGHT_PARAM 7
+#define _DMA_V2_QUEUED_CMDS 8
+
+/* Parameter Constants */
+#define _DMA_V2_ZERO_EXTEND 0
+#define _DMA_V2_SIGN_EXTEND 1
+
+ /* SLAVE address map */
+#define _DMA_V2_SEL_FSM_CMD 0
+#define _DMA_V2_SEL_CH_REG 1
+#define _DMA_V2_SEL_CONN_GROUP 2
+#define _DMA_V2_SEL_DEV_INTERF 3
+
+#define _DMA_V2_ADDR_SEL_COMP_IDX 12
+#define _DMA_V2_ADDR_SEL_COMP_BITS 4
+#define _DMA_V2_ADDR_SEL_CH_REG_IDX 2
+#define _DMA_V2_ADDR_SEL_CH_REG_BITS 6
+#define _DMA_V2_ADDR_SEL_PARAM_IDX (_DMA_V2_ADDR_SEL_CH_REG_BITS+_DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define _DMA_V2_ADDR_SEL_PARAM_BITS 4
+
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_IDX 2
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_BITS 6
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX (_DMA_V2_ADDR_SEL_GROUP_COMP_BITS + _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS 4
+
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX 2
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS 6
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX (_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX+_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)
+#define _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS 4
+
+#define _DMA_V2_FSM_GROUP_CMD_IDX 0
+#define _DMA_V2_FSM_GROUP_ADDR_SRC_IDX 1
+#define _DMA_V2_FSM_GROUP_ADDR_DEST_IDX 2
+#define _DMA_V2_FSM_GROUP_CMD_CTRL_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_PACK_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_REQ_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_WR_IDX 7
+
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX 4
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX 5
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX 6
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX 7
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX 8
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX 9
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX 10
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX 11
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX 12
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX 13
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX 14
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX 15
+#define _DMA_V2_FSM_GROUP_FSM_CTRL_CMD_CTRL_IDX 15
+
+#define _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX 3
+
+#define _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_REQ_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_REQ_CNT_BURST_IDX 4
+
+#define _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX 0
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX 1
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX 2
+#define _DMA_V2_FSM_GROUP_FSM_WR_XB_REMAINING_IDX 3
+#define _DMA_V2_FSM_GROUP_FSM_WR_CNT_BURST_IDX 4
+
+#define _DMA_V2_DEV_INTERF_REQ_SIDE_STATUS_IDX 0
+#define _DMA_V2_DEV_INTERF_SEND_SIDE_STATUS_IDX 1
+#define _DMA_V2_DEV_INTERF_FIFO_STATUS_IDX 2
+#define _DMA_V2_DEV_INTERF_REQ_ONLY_COMPLETE_BURST_IDX 3
+#define _DMA_V2_DEV_INTERF_MAX_BURST_IDX 4
+#define _DMA_V2_DEV_INTERF_CHK_ADDR_ALIGN 5
+
+#endif /* _dma_v2_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h
new file mode 100644
index 000000000000..77722d205701
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gdc_v2_defs.h
@@ -0,0 +1,170 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_GDC_v2_defs_h_
+#define HRT_GDC_v2_defs_h_
+
+#define HRT_GDC_IS_V2
+
+#define HRT_GDC_N 1024 /* Top-level design constant, equal to the number of entries in the LUT */
+#define HRT_GDC_FRAC_BITS 10 /* Number of fractional bits in the GDC block, driven by the size of the LUT */
+
+#define HRT_GDC_BLI_FRAC_BITS 4 /* Number of fractional bits for the bi-linear interpolation type */
+#define HRT_GDC_BLI_COEF_ONE (1 << HRT_GDC_BLI_FRAC_BITS)
+
+#define HRT_GDC_BCI_COEF_BITS 14 /* 14 bits per coefficient */
+#define HRT_GDC_BCI_COEF_ONE (1 << (HRT_GDC_BCI_COEF_BITS-2)) /* We represent signed 10 bit coefficients. */
+ /* The supported range is [-256, .., +256] */
+ /* in 14-bit signed notation, */
+ /* We need all ten bits (MSB must be zero). */
+ /* -s is inserted to solve this issue, and */
+ /* therefore "1" is equal to +256. */
+#define HRT_GDC_BCI_COEF_MASK ((1 << HRT_GDC_BCI_COEF_BITS) - 1)
+
+#define HRT_GDC_LUT_BYTES (HRT_GDC_N*4*2) /* 1024 addresses, 4 coefficients per address, */
+ /* 2 bytes per coefficient */
+
+#define _HRT_GDC_REG_ALIGN 4
+
+ // 31 30 29 25 24 0
+ // |-----|---|--------|------------------------|
+ // | CMD | C | Reg_ID | Value |
+
+
+ // There are just two commands possible for the GDC block:
+ // 1 - Configure reg
+ // 0 - Data token
+
+ // C - Reserved bit
+ // Used in protocol to indicate whether it is C-run or other type of runs
+ // In case of C-run, this bit has a value of 1, for all the other runs, it is 0.
+
+ // Reg_ID - Address of the register to be configured
+
+ // Value - Value to store to the addressed register, maximum of 24 bits
+
+ // Configure reg command is not followed by any other token.
+ // The address of the register and the data to be filled in is contained in the same token
+
+ // When the first data token is received, it must be:
+ // 1. FRX and FRY (device configured in one of the scaling modes) ***DEFAULT MODE***, or,
+ // 2. P0'X (device configured in one of the tetragon modes)
+ // After the first data token is received, pre-defined number of tokens with the following meaning follow:
+ // 1. two tokens: SRC address ; DST address
+ // 2. nine tokens: P0'Y, .., P3'Y ; SRC address ; DST address
+
+#define HRT_GDC_CONFIG_CMD 1
+#define HRT_GDC_DATA_CMD 0
+
+
+#define HRT_GDC_CMD_POS 31
+#define HRT_GDC_CMD_BITS 1
+#define HRT_GDC_CRUN_POS 30
+#define HRT_GDC_REG_ID_POS 25
+#define HRT_GDC_REG_ID_BITS 5
+#define HRT_GDC_DATA_POS 0
+#define HRT_GDC_DATA_BITS 25
+
+#define HRT_GDC_FRYIPXFRX_BITS 26
+#define HRT_GDC_P0X_BITS 23
+
+
+#define HRT_GDC_MAX_OXDIM (8192-64)
+#define HRT_GDC_MAX_OYDIM 4095
+#define HRT_GDC_MAX_IXDIM (8192-64)
+#define HRT_GDC_MAX_IYDIM 4095
+#define HRT_GDC_MAX_DS_FAC 16
+#define HRT_GDC_MAX_DX (HRT_GDC_MAX_DS_FAC*HRT_GDC_N - 1)
+#define HRT_GDC_MAX_DY HRT_GDC_MAX_DX
+
+
+/* GDC lookup tables entries are 10 bits values, but they're
+ stored 2 by 2 as 32 bit values, yielding 16 bits per entry.
+ A GDC lookup table contains 64 * 4 elements */
+
+#define HRT_GDC_PERF_1_1_pix 0
+#define HRT_GDC_PERF_2_1_pix 1
+#define HRT_GDC_PERF_1_2_pix 2
+#define HRT_GDC_PERF_2_2_pix 3
+
+#define HRT_GDC_NND_MODE 0
+#define HRT_GDC_BLI_MODE 1
+#define HRT_GDC_BCI_MODE 2
+#define HRT_GDC_LUT_MODE 3
+
+#define HRT_GDC_SCAN_STB 0
+#define HRT_GDC_SCAN_STR 1
+
+#define HRT_GDC_MODE_SCALING 0
+#define HRT_GDC_MODE_TETRAGON 1
+
+#define HRT_GDC_LUT_COEFF_OFFSET 16
+#define HRT_GDC_FRY_BIT_OFFSET 16
+// FRYIPXFRX is the only register where we store two values in one field,
+// to save one token in the scaling protocol.
+// Like this, we have three tokens in the scaling protocol,
+// Otherwise, we would have had four.
+// The register bit-map is:
+// 31 26 25 16 15 10 9 0
+// |------|----------|------|----------|
+// | XXXX | FRY | IPX | FRX |
+
+
+#define HRT_GDC_CE_FSM0_POS 0
+#define HRT_GDC_CE_FSM0_LEN 2
+#define HRT_GDC_CE_OPY_POS 2
+#define HRT_GDC_CE_OPY_LEN 14
+#define HRT_GDC_CE_OPX_POS 16
+#define HRT_GDC_CE_OPX_LEN 16
+// CHK_ENGINE register bit-map:
+// 31 16 15 2 1 0
+// |----------------|-----------|----|
+// | OPX | OPY |FSM0|
+// However, for the time being at least,
+// this implementation is meaningless in hss model,
+// So, we just return 0
+
+
+#define HRT_GDC_CHK_ENGINE_IDX 0
+#define HRT_GDC_WOIX_IDX 1
+#define HRT_GDC_WOIY_IDX 2
+#define HRT_GDC_BPP_IDX 3
+#define HRT_GDC_FRYIPXFRX_IDX 4
+#define HRT_GDC_OXDIM_IDX 5
+#define HRT_GDC_OYDIM_IDX 6
+#define HRT_GDC_SRC_ADDR_IDX 7
+#define HRT_GDC_SRC_END_ADDR_IDX 8
+#define HRT_GDC_SRC_WRAP_ADDR_IDX 9
+#define HRT_GDC_SRC_STRIDE_IDX 10
+#define HRT_GDC_DST_ADDR_IDX 11
+#define HRT_GDC_DST_STRIDE_IDX 12
+#define HRT_GDC_DX_IDX 13
+#define HRT_GDC_DY_IDX 14
+#define HRT_GDC_P0X_IDX 15
+#define HRT_GDC_P0Y_IDX 16
+#define HRT_GDC_P1X_IDX 17
+#define HRT_GDC_P1Y_IDX 18
+#define HRT_GDC_P2X_IDX 19
+#define HRT_GDC_P2Y_IDX 20
+#define HRT_GDC_P3X_IDX 21
+#define HRT_GDC_P3Y_IDX 22
+#define HRT_GDC_PERF_POINT_IDX 23 // 1x1 ; 1x2 ; 2x1 ; 2x2 pixels per cc
+#define HRT_GDC_INTERP_TYPE_IDX 24 // NND ; BLI ; BCI ; LUT
+#define HRT_GDC_SCAN_IDX 25 // 0 = STB (Slide To Bottom) ; 1 = STR (Slide To Right)
+#define HRT_GDC_PROC_MODE_IDX 26 // 0 = Scaling ; 1 = Tetragon
+
+#define HRT_GDC_LUT_IDX 32
+
+
+#endif /* HRT_GDC_v2_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_regs_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_regs_defs.h
new file mode 100644
index 000000000000..34e734f6648e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_regs_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_regs_defs_h
+#define _gp_regs_defs_h
+
+#define _HRT_GP_REGS_IS_FWD_REG_IDX 0
+
+#define _HRT_GP_REGS_REG_ALIGN 4
+
+#endif /* _gp_regs_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h
new file mode 100644
index 000000000000..3082e2f5e014
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gp_timer_defs.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gp_timer_defs_h
+#define _gp_timer_defs_h
+
+#define _HRT_GP_TIMER_REG_ALIGN 4
+
+#define HIVE_GP_TIMER_RESET_REG_IDX 0
+#define HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX 1
+#define HIVE_GP_TIMER_ENABLE_REG_IDX(timer) (HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX + 1 + timer)
+#define HIVE_GP_TIMER_VALUE_REG_IDX(timer,timers) (HIVE_GP_TIMER_ENABLE_REG_IDX(timers) + timer)
+#define HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer,timers) (HIVE_GP_TIMER_VALUE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer,timers) (HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timers, timers) + timer)
+#define HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq,timers) (HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timers, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irqs, timers) + irq)
+#define HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq,timers,irqs) (HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irqs, timers, irqs) + irq)
+
+#define HIVE_GP_TIMER_COUNT_TYPE_HIGH 0
+#define HIVE_GP_TIMER_COUNT_TYPE_LOW 1
+#define HIVE_GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define HIVE_GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define HIVE_GP_TIMER_COUNT_TYPES 4
+
+#endif /* _gp_timer_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h
new file mode 100644
index 000000000000..a807d4c99041
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/gpio_block_defs.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _gpio_block_defs_h_
+#define _gpio_block_defs_h_
+
+#define _HRT_GPIO_BLOCK_REG_ALIGN 4
+
+/* R/W registers */
+#define _gpio_block_reg_do_e 0
+#define _gpio_block_reg_do_select 1
+#define _gpio_block_reg_do_0 2
+#define _gpio_block_reg_do_1 3
+#define _gpio_block_reg_do_pwm_cnt_0 4
+#define _gpio_block_reg_do_pwm_cnt_1 5
+#define _gpio_block_reg_do_pwm_cnt_2 6
+#define _gpio_block_reg_do_pwm_cnt_3 7
+#define _gpio_block_reg_do_pwm_main_cnt 8
+#define _gpio_block_reg_do_pwm_enable 9
+#define _gpio_block_reg_di_debounce_sel 10
+#define _gpio_block_reg_di_debounce_cnt_0 11
+#define _gpio_block_reg_di_debounce_cnt_1 12
+#define _gpio_block_reg_di_debounce_cnt_2 13
+#define _gpio_block_reg_di_debounce_cnt_3 14
+#define _gpio_block_reg_di_active_level 15
+
+
+/* read-only registers */
+#define _gpio_block_reg_di 16
+
+#endif /* _gpio_block_defs_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h
new file mode 100644
index 000000000000..7a94c1d85b08
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_2401_irq_types_hrt.h
@@ -0,0 +1,69 @@
+/*
+#ifndef ISP2401
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+#define _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_
+
+/*
+ * These are the indices of each interrupt in the interrupt
+ * controller's registers. these can be used as the irq_id
+ * argument to the hrt functions irq_controller.h.
+ *
+ * The definitions are taken from <system>_defs.h
+ */
+typedef enum hrt_isp_css_irq {
+ hrt_isp_css_irq_gpio_pin_0 = HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_1 = HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_2 = HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_3 = HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_4 = HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_5 = HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_6 = HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_7 = HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_8 = HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_9 = HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_10 = HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID ,
+ hrt_isp_css_irq_gpio_pin_11 = HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID ,
+ hrt_isp_css_irq_sp = HIVE_GP_DEV_IRQ_SP_BIT_ID ,
+ hrt_isp_css_irq_isp = HIVE_GP_DEV_IRQ_ISP_BIT_ID ,
+ hrt_isp_css_irq_isys = HIVE_GP_DEV_IRQ_ISYS_BIT_ID ,
+ hrt_isp_css_irq_isel = HIVE_GP_DEV_IRQ_ISEL_BIT_ID ,
+ hrt_isp_css_irq_ifmt = HIVE_GP_DEV_IRQ_IFMT_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon = HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_isp_stream_mon = HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_mod_stream_mon = HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID ,
+ hrt_isp_css_irq_is2401 = HIVE_GP_DEV_IRQ_IS2401_BIT_ID ,
+ hrt_isp_css_irq_isp_bamem_error = HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_isp_dmem_error = HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_icache_mem_error = HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_sp_dmem_error = HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_mmu_cache_mem_error = HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_0 = HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID ,
+ hrt_isp_css_irq_gp_timer_1 = HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_0 = HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID ,
+ hrt_isp_css_irq_sw_pin_1 = HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID ,
+ hrt_isp_css_irq_dma = HIVE_GP_DEV_IRQ_DMA_BIT_ID ,
+ hrt_isp_css_irq_sp_stream_mon_b = HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID ,
+ /* this must (obviously) be the last on in the enum */
+ hrt_isp_css_irq_num_irqs
+} hrt_isp_css_irq_t;
+
+typedef enum hrt_isp_css_irq_status {
+ hrt_isp_css_irq_status_error,
+ hrt_isp_css_irq_status_more_irqs,
+ hrt_isp_css_irq_status_success
+} hrt_isp_css_irq_status_t;
+
+#endif /* _HIVE_ISP_CSS_2401_IRQ_TYPES_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h
new file mode 100644
index 000000000000..5a2ce9108ae4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_defs.h
@@ -0,0 +1,435 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_defs_h__
+#define _hive_isp_css_defs_h__
+
+#define _HIVE_ISP_CSS_2401_SYSTEM 1
+#define HIVE_ISP_CTRL_DATA_WIDTH 32
+#define HIVE_ISP_CTRL_ADDRESS_WIDTH 32
+#define HIVE_ISP_CTRL_MAX_BURST_SIZE 1
+#define HIVE_ISP_DDR_ADDRESS_WIDTH 36
+
+#define HIVE_ISP_HOST_MAX_BURST_SIZE 8 /* host supports bursts in order to prevent repeating DDRAM accesses */
+#define HIVE_ISP_NUM_GPIO_PINS 12
+
+/* This list of vector num_elems/elem_bits pairs is valid both in C as initializer
+ and in the DMA parameter list */
+#define HIVE_ISP_DDR_DMA_SPECS {{32, 8}, {16, 16}, {18, 14}, {25, 10}, {21, 12}}
+#define HIVE_ISP_DDR_WORD_BITS 256
+#define HIVE_ISP_DDR_WORD_BYTES (HIVE_ISP_DDR_WORD_BITS/8)
+#define HIVE_ISP_DDR_BYTES (512 * 1024 * 1024)
+#define HIVE_ISP_DDR_BYTES_RTL (127 * 1024 * 1024)
+#define HIVE_ISP_DDR_SMALL_BYTES (128 * 256 / 8)
+#define HIVE_ISP_PAGE_SHIFT 12
+#define HIVE_ISP_PAGE_SIZE (1<<HIVE_ISP_PAGE_SHIFT)
+
+#define CSS_DDR_WORD_BITS HIVE_ISP_DDR_WORD_BITS
+#define CSS_DDR_WORD_BYTES HIVE_ISP_DDR_WORD_BYTES
+
+/* settings used in applications */
+#define HIVE_XMEM_WIDTH HIVE_ISP_DDR_WORD_BITS
+#define HIVE_VMEM_VECTOR_ELEMENTS 64
+#define HIVE_VMEM_ELEMENT_BITS 14
+#define HIVE_XMEM_ELEMENT_BITS 16
+#define HIVE_VMEM_VECTOR_BYTES (HIVE_VMEM_VECTOR_ELEMENTS*HIVE_XMEM_ELEMENT_BITS/8) /* used for # addr bytes for one vector */
+#define HIVE_XMEM_PACKED_WORD_VMEM_ELEMENTS (HIVE_XMEM_WIDTH/HIVE_VMEM_ELEMENT_BITS)
+#define HIVE_XMEM_WORD_VMEM_ELEMENTS (HIVE_XMEM_WIDTH/HIVE_XMEM_ELEMENT_BITS)
+#define XMEM_INT_SIZE 4
+
+
+
+#define HIVE_ISYS_INP_BUFFER_BYTES (64*1024) /* 64 kByte = 2k words (of 256 bits) */
+
+/* If HIVE_ISP_DDR_BASE_OFFSET is set to a non-zero value, the wide bus just before the DDRAM gets an extra dummy port where */
+/* address range 0 .. HIVE_ISP_DDR_BASE_OFFSET-1 maps onto. This effectively creates an offset for the DDRAM from system perspective */
+#define HIVE_ISP_DDR_BASE_OFFSET 0x120000000 /* 0x200000 */
+
+#define HIVE_DMA_ISP_BUS_CONN 0
+#define HIVE_DMA_ISP_DDR_CONN 1
+#define HIVE_DMA_BUS_DDR_CONN 2
+#define HIVE_DMA_ISP_MASTER master_port0
+#define HIVE_DMA_BUS_MASTER master_port1
+#define HIVE_DMA_DDR_MASTER master_port2
+
+#define HIVE_DMA_NUM_CHANNELS 32 /* old value was 8 */
+#define HIVE_DMA_CMD_FIFO_DEPTH 24 /* old value was 12 */
+
+#define HIVE_IF_PIXEL_WIDTH 12
+
+#define HIVE_MMU_TLB_SETS 8
+#define HIVE_MMU_TLB_SET_BLOCKS 8
+#define HIVE_MMU_TLB_BLOCK_ELEMENTS 8
+#define HIVE_MMU_PAGE_TABLE_LEVELS 2
+#define HIVE_MMU_PAGE_BYTES HIVE_ISP_PAGE_SIZE
+
+#define HIVE_ISP_CH_ID_BITS 2
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#define HIVE_ISP_ISEL_SEL_BITS 2
+
+#define HIVE_GP_REGS_SDRAM_WAKEUP_IDX 0
+#define HIVE_GP_REGS_IDLE_IDX 1
+#define HIVE_GP_REGS_IRQ_0_IDX 2
+#define HIVE_GP_REGS_IRQ_1_IDX 3
+#define HIVE_GP_REGS_SP_STREAM_STAT_IDX 4
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IDX 5
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IDX 6
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IDX 7
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_COND_IDX 8
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_COND_IDX 9
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_COND_IDX 10
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_COND_IDX 11
+#define HIVE_GP_REGS_SP_STREAM_STAT_IRQ_ENABLE_IDX 12
+#define HIVE_GP_REGS_SP_STREAM_STAT_B_IRQ_ENABLE_IDX 13
+#define HIVE_GP_REGS_ISP_STREAM_STAT_IRQ_ENABLE_IDX 14
+#define HIVE_GP_REGS_MOD_STREAM_STAT_IRQ_ENABLE_IDX 15
+#define HIVE_GP_REGS_SWITCH_PRIM_IF_IDX 16
+#define HIVE_GP_REGS_SWITCH_GDC1_IDX 17
+#define HIVE_GP_REGS_SWITCH_GDC2_IDX 18
+#define HIVE_GP_REGS_SRST_IDX 19
+#define HIVE_GP_REGS_SLV_REG_SRST_IDX 20
+#define HIVE_GP_REGS_SWITCH_ISYS_IDX 21
+
+/* Bit numbers of the soft reset register */
+#define HIVE_GP_REGS_SRST_ISYS_CBUS 0
+#define HIVE_GP_REGS_SRST_ISEL_CBUS 1
+#define HIVE_GP_REGS_SRST_IFMT_CBUS 2
+#define HIVE_GP_REGS_SRST_GPDEV_CBUS 3
+#define HIVE_GP_REGS_SRST_GPIO 4
+#define HIVE_GP_REGS_SRST_TC 5
+#define HIVE_GP_REGS_SRST_GPTIMER 6
+#define HIVE_GP_REGS_SRST_FACELLFIFOS 7
+#define HIVE_GP_REGS_SRST_D_OSYS 8
+#define HIVE_GP_REGS_SRST_IFT_SEC_PIPE 9
+#define HIVE_GP_REGS_SRST_GDC1 10
+#define HIVE_GP_REGS_SRST_GDC2 11
+#define HIVE_GP_REGS_SRST_VEC_BUS 12
+#define HIVE_GP_REGS_SRST_ISP 13
+#define HIVE_GP_REGS_SRST_SLV_GRP_BUS 14
+#define HIVE_GP_REGS_SRST_DMA 15
+#define HIVE_GP_REGS_SRST_SF_ISP_SP 16
+#define HIVE_GP_REGS_SRST_SF_PIF_CELLS 17
+#define HIVE_GP_REGS_SRST_SF_SIF_SP 18
+#define HIVE_GP_REGS_SRST_SF_MC_SP 19
+#define HIVE_GP_REGS_SRST_SF_ISYS_SP 20
+#define HIVE_GP_REGS_SRST_SF_DMA_CELLS 21
+#define HIVE_GP_REGS_SRST_SF_GDC1_CELLS 22
+#define HIVE_GP_REGS_SRST_SF_GDC2_CELLS 23
+#define HIVE_GP_REGS_SRST_SP 24
+#define HIVE_GP_REGS_SRST_OCP2CIO 25
+#define HIVE_GP_REGS_SRST_NBUS 26
+#define HIVE_GP_REGS_SRST_HOST12BUS 27
+#define HIVE_GP_REGS_SRST_WBUS 28
+#define HIVE_GP_REGS_SRST_IC_OSYS 29
+#define HIVE_GP_REGS_SRST_WBUS_IC 30
+#define HIVE_GP_REGS_SRST_ISYS_INP_BUF_BUS 31
+
+/* Bit numbers of the slave register soft reset register */
+#define HIVE_GP_REGS_SLV_REG_SRST_DMA 0
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC1 1
+#define HIVE_GP_REGS_SLV_REG_SRST_GDC2 2
+
+/* order of the input bits for the irq controller */
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_IRQ_SP_BIT_ID 12
+#define HIVE_GP_DEV_IRQ_ISP_BIT_ID 13
+#define HIVE_GP_DEV_IRQ_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_IRQ_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_IRQ_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID 17
+#define HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID 18
+#define HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID 19
+#define HIVE_GP_DEV_IRQ_IS2401_BIT_ID 20
+#define HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID 21
+#define HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID 22
+#define HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID 23
+#define HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID 24
+#define HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID 25
+#define HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID 26
+#define HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID 27
+#define HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID 28
+#define HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID 29
+#define HIVE_GP_DEV_IRQ_DMA_BIT_ID 30
+#define HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID 31
+
+#define HIVE_GP_REGS_NUM_SW_IRQ_REGS 2
+
+/* order of the input bits for the timed controller */
+#define HIVE_GP_DEV_TC_GPIO_PIN_0_BIT_ID 0
+#define HIVE_GP_DEV_TC_GPIO_PIN_1_BIT_ID 1
+#define HIVE_GP_DEV_TC_GPIO_PIN_2_BIT_ID 2
+#define HIVE_GP_DEV_TC_GPIO_PIN_3_BIT_ID 3
+#define HIVE_GP_DEV_TC_GPIO_PIN_4_BIT_ID 4
+#define HIVE_GP_DEV_TC_GPIO_PIN_5_BIT_ID 5
+#define HIVE_GP_DEV_TC_GPIO_PIN_6_BIT_ID 6
+#define HIVE_GP_DEV_TC_GPIO_PIN_7_BIT_ID 7
+#define HIVE_GP_DEV_TC_GPIO_PIN_8_BIT_ID 8
+#define HIVE_GP_DEV_TC_GPIO_PIN_9_BIT_ID 9
+#define HIVE_GP_DEV_TC_GPIO_PIN_10_BIT_ID 10
+#define HIVE_GP_DEV_TC_GPIO_PIN_11_BIT_ID 11
+#define HIVE_GP_DEV_TC_SP_BIT_ID 12
+#define HIVE_GP_DEV_TC_ISP_BIT_ID 13
+#define HIVE_GP_DEV_TC_ISYS_BIT_ID 14
+#define HIVE_GP_DEV_TC_ISEL_BIT_ID 15
+#define HIVE_GP_DEV_TC_IFMT_BIT_ID 16
+#define HIVE_GP_DEV_TC_GP_TIMER_0_BIT_ID 17
+#define HIVE_GP_DEV_TC_GP_TIMER_1_BIT_ID 18
+#define HIVE_GP_DEV_TC_MIPI_SOL_BIT_ID 19
+#define HIVE_GP_DEV_TC_MIPI_EOL_BIT_ID 20
+#define HIVE_GP_DEV_TC_MIPI_SOF_BIT_ID 21
+#define HIVE_GP_DEV_TC_MIPI_EOF_BIT_ID 22
+#define HIVE_GP_DEV_TC_INPSYS_SM 23
+
+/* definitions for the gp_timer block */
+#define HIVE_GP_TIMER_0 0
+#define HIVE_GP_TIMER_1 1
+#define HIVE_GP_TIMER_2 2
+#define HIVE_GP_TIMER_3 3
+#define HIVE_GP_TIMER_4 4
+#define HIVE_GP_TIMER_5 5
+#define HIVE_GP_TIMER_6 6
+#define HIVE_GP_TIMER_7 7
+#define HIVE_GP_TIMER_NUM_COUNTERS 8
+
+#define HIVE_GP_TIMER_IRQ_0 0
+#define HIVE_GP_TIMER_IRQ_1 1
+#define HIVE_GP_TIMER_NUM_IRQS 2
+
+#define HIVE_GP_TIMER_GPIO_0_BIT_ID 0
+#define HIVE_GP_TIMER_GPIO_1_BIT_ID 1
+#define HIVE_GP_TIMER_GPIO_2_BIT_ID 2
+#define HIVE_GP_TIMER_GPIO_3_BIT_ID 3
+#define HIVE_GP_TIMER_GPIO_4_BIT_ID 4
+#define HIVE_GP_TIMER_GPIO_5_BIT_ID 5
+#define HIVE_GP_TIMER_GPIO_6_BIT_ID 6
+#define HIVE_GP_TIMER_GPIO_7_BIT_ID 7
+#define HIVE_GP_TIMER_GPIO_8_BIT_ID 8
+#define HIVE_GP_TIMER_GPIO_9_BIT_ID 9
+#define HIVE_GP_TIMER_GPIO_10_BIT_ID 10
+#define HIVE_GP_TIMER_GPIO_11_BIT_ID 11
+#define HIVE_GP_TIMER_INP_SYS_IRQ 12
+#define HIVE_GP_TIMER_ISEL_IRQ 13
+#define HIVE_GP_TIMER_IFMT_IRQ 14
+#define HIVE_GP_TIMER_SP_STRMON_IRQ 15
+#define HIVE_GP_TIMER_SP_B_STRMON_IRQ 16
+#define HIVE_GP_TIMER_ISP_STRMON_IRQ 17
+#define HIVE_GP_TIMER_MOD_STRMON_IRQ 18
+#define HIVE_GP_TIMER_IS2401_IRQ 19
+#define HIVE_GP_TIMER_ISP_BAMEM_ERROR_IRQ 20
+#define HIVE_GP_TIMER_ISP_DMEM_ERROR_IRQ 21
+#define HIVE_GP_TIMER_SP_ICACHE_MEM_ERROR_IRQ 22
+#define HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ 23
+#define HIVE_GP_TIMER_SP_OUT_RUN_DP 24
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 25
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 26
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I2 27
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I3 28
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I4 29
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I5 30
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I6 31
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I7 32
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I8 33
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I9 34
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I0_I10 35
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 36
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 37
+#define HIVE_GP_TIMER_SP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 38
+#define HIVE_GP_TIMER_ISP_OUT_RUN_DP 39
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I0 40
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I0_I1 41
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I1_I0 42
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I0 43
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I1 44
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I2 45
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I3 46
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I4 47
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I5 48
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I2_I6 49
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I3_I0 50
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I4_I0 51
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I5_I0 52
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I6_I0 53
+#define HIVE_GP_TIMER_ISP_WIRE_DEBUG_LM_MSINK_RUN_I7_I0 54
+#define HIVE_GP_TIMER_MIPI_SOL_BIT_ID 55
+#define HIVE_GP_TIMER_MIPI_EOL_BIT_ID 56
+#define HIVE_GP_TIMER_MIPI_SOF_BIT_ID 57
+#define HIVE_GP_TIMER_MIPI_EOF_BIT_ID 58
+#define HIVE_GP_TIMER_INPSYS_SM 59
+#define HIVE_GP_TIMER_ISP_PMEM_ERROR_IRQ 60
+
+/* port definitions for the streaming monitors */
+/* port definititions SP streaming monitor, monitors the status of streaming ports at the SP side of the streaming FIFO's */
+#define SP_STR_MON_PORT_SP2SIF 0
+#define SP_STR_MON_PORT_SIF2SP 1
+#define SP_STR_MON_PORT_SP2MC 2
+#define SP_STR_MON_PORT_MC2SP 3
+#define SP_STR_MON_PORT_SP2DMA 4
+#define SP_STR_MON_PORT_DMA2SP 5
+#define SP_STR_MON_PORT_SP2ISP 6
+#define SP_STR_MON_PORT_ISP2SP 7
+#define SP_STR_MON_PORT_SP2GPD 8
+#define SP_STR_MON_PORT_FA2SP 9
+#define SP_STR_MON_PORT_SP2ISYS 10
+#define SP_STR_MON_PORT_ISYS2SP 11
+#define SP_STR_MON_PORT_SP2PIFA 12
+#define SP_STR_MON_PORT_PIFA2SP 13
+#define SP_STR_MON_PORT_SP2PIFB 14
+#define SP_STR_MON_PORT_PIFB2SP 15
+
+#define SP_STR_MON_PORT_B_SP2GDC1 0
+#define SP_STR_MON_PORT_B_GDC12SP 1
+#define SP_STR_MON_PORT_B_SP2GDC2 2
+#define SP_STR_MON_PORT_B_GDC22SP 3
+
+/* previously used SP streaming monitor port identifiers, kept for backward compatibility */
+#define SP_STR_MON_PORT_SND_SIF SP_STR_MON_PORT_SP2SIF
+#define SP_STR_MON_PORT_RCV_SIF SP_STR_MON_PORT_SIF2SP
+#define SP_STR_MON_PORT_SND_MC SP_STR_MON_PORT_SP2MC
+#define SP_STR_MON_PORT_RCV_MC SP_STR_MON_PORT_MC2SP
+#define SP_STR_MON_PORT_SND_DMA SP_STR_MON_PORT_SP2DMA
+#define SP_STR_MON_PORT_RCV_DMA SP_STR_MON_PORT_DMA2SP
+#define SP_STR_MON_PORT_SND_ISP SP_STR_MON_PORT_SP2ISP
+#define SP_STR_MON_PORT_RCV_ISP SP_STR_MON_PORT_ISP2SP
+#define SP_STR_MON_PORT_SND_GPD SP_STR_MON_PORT_SP2GPD
+#define SP_STR_MON_PORT_RCV_GPD SP_STR_MON_PORT_FA2SP
+/* Deprecated */
+#define SP_STR_MON_PORT_SND_PIF SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIFB SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIFB SP_STR_MON_PORT_PIFB2SP
+
+#define SP_STR_MON_PORT_SND_PIF_A SP_STR_MON_PORT_SP2PIFA
+#define SP_STR_MON_PORT_RCV_PIF_A SP_STR_MON_PORT_PIFA2SP
+#define SP_STR_MON_PORT_SND_PIF_B SP_STR_MON_PORT_SP2PIFB
+#define SP_STR_MON_PORT_RCV_PIF_B SP_STR_MON_PORT_PIFB2SP
+
+/* port definititions ISP streaming monitor, monitors the status of streaming ports at the ISP side of the streaming FIFO's */
+#define ISP_STR_MON_PORT_ISP2PIFA 0
+#define ISP_STR_MON_PORT_PIFA2ISP 1
+#define ISP_STR_MON_PORT_ISP2PIFB 2
+#define ISP_STR_MON_PORT_PIFB2ISP 3
+#define ISP_STR_MON_PORT_ISP2DMA 4
+#define ISP_STR_MON_PORT_DMA2ISP 5
+#define ISP_STR_MON_PORT_ISP2GDC1 6
+#define ISP_STR_MON_PORT_GDC12ISP 7
+#define ISP_STR_MON_PORT_ISP2GDC2 8
+#define ISP_STR_MON_PORT_GDC22ISP 9
+#define ISP_STR_MON_PORT_ISP2GPD 10
+#define ISP_STR_MON_PORT_FA2ISP 11
+#define ISP_STR_MON_PORT_ISP2SP 12
+#define ISP_STR_MON_PORT_SP2ISP 13
+
+/* previously used ISP streaming monitor port identifiers, kept for backward compatibility */
+#define ISP_STR_MON_PORT_SND_PIF_A ISP_STR_MON_PORT_ISP2PIFA
+#define ISP_STR_MON_PORT_RCV_PIF_A ISP_STR_MON_PORT_PIFA2ISP
+#define ISP_STR_MON_PORT_SND_PIF_B ISP_STR_MON_PORT_ISP2PIFB
+#define ISP_STR_MON_PORT_RCV_PIF_B ISP_STR_MON_PORT_PIFB2ISP
+#define ISP_STR_MON_PORT_SND_DMA ISP_STR_MON_PORT_ISP2DMA
+#define ISP_STR_MON_PORT_RCV_DMA ISP_STR_MON_PORT_DMA2ISP
+#define ISP_STR_MON_PORT_SND_GDC ISP_STR_MON_PORT_ISP2GDC1
+#define ISP_STR_MON_PORT_RCV_GDC ISP_STR_MON_PORT_GDC12ISP
+#define ISP_STR_MON_PORT_SND_GPD ISP_STR_MON_PORT_ISP2GPD
+#define ISP_STR_MON_PORT_RCV_GPD ISP_STR_MON_PORT_FA2ISP
+#define ISP_STR_MON_PORT_SND_SP ISP_STR_MON_PORT_ISP2SP
+#define ISP_STR_MON_PORT_RCV_SP ISP_STR_MON_PORT_SP2ISP
+
+/* port definititions MOD streaming monitor, monitors the status of streaming ports at the module side of the streaming FIFO's */
+
+#define MOD_STR_MON_PORT_PIFA2CELLS 0
+#define MOD_STR_MON_PORT_CELLS2PIFA 1
+#define MOD_STR_MON_PORT_PIFB2CELLS 2
+#define MOD_STR_MON_PORT_CELLS2PIFB 3
+#define MOD_STR_MON_PORT_SIF2SP 4
+#define MOD_STR_MON_PORT_SP2SIF 5
+#define MOD_STR_MON_PORT_MC2SP 6
+#define MOD_STR_MON_PORT_SP2MC 7
+#define MOD_STR_MON_PORT_DMA2ISP 8
+#define MOD_STR_MON_PORT_ISP2DMA 9
+#define MOD_STR_MON_PORT_DMA2SP 10
+#define MOD_STR_MON_PORT_SP2DMA 11
+#define MOD_STR_MON_PORT_GDC12CELLS 12
+#define MOD_STR_MON_PORT_CELLS2GDC1 13
+#define MOD_STR_MON_PORT_GDC22CELLS 14
+#define MOD_STR_MON_PORT_CELLS2GDC2 15
+
+#define MOD_STR_MON_PORT_SND_PIF_A 0
+#define MOD_STR_MON_PORT_RCV_PIF_A 1
+#define MOD_STR_MON_PORT_SND_PIF_B 2
+#define MOD_STR_MON_PORT_RCV_PIF_B 3
+#define MOD_STR_MON_PORT_SND_SIF 4
+#define MOD_STR_MON_PORT_RCV_SIF 5
+#define MOD_STR_MON_PORT_SND_MC 6
+#define MOD_STR_MON_PORT_RCV_MC 7
+#define MOD_STR_MON_PORT_SND_DMA2ISP 8
+#define MOD_STR_MON_PORT_RCV_DMA_FR_ISP 9
+#define MOD_STR_MON_PORT_SND_DMA2SP 10
+#define MOD_STR_MON_PORT_RCV_DMA_FR_SP 11
+#define MOD_STR_MON_PORT_SND_GDC 12
+#define MOD_STR_MON_PORT_RCV_GDC 13
+
+
+/* testbench signals: */
+
+/* testbench GP adapter register ids */
+#define HIVE_TESTBENCH_GPIO_DATA_OUT_REG_IDX 0
+#define HIVE_TESTBENCH_GPIO_DIR_OUT_REG_IDX 1
+#define HIVE_TESTBENCH_IRQ_REG_IDX 2
+#define HIVE_TESTBENCH_SDRAM_WAKEUP_REG_IDX 3
+#define HIVE_TESTBENCH_IDLE_REG_IDX 4
+#define HIVE_TESTBENCH_GPIO_DATA_IN_REG_IDX 5
+#define HIVE_TESTBENCH_MIPI_BFM_EN_REG_IDX 6
+#define HIVE_TESTBENCH_CSI_CONFIG_REG_IDX 7
+#define HIVE_TESTBENCH_DDR_STALL_EN_REG_IDX 8
+
+#define HIVE_TESTBENCH_ISP_PMEM_ERROR_IRQ_REG_IDX 9
+#define HIVE_TESTBENCH_ISP_BAMEM_ERROR_IRQ_REG_IDX 10
+#define HIVE_TESTBENCH_ISP_DMEM_ERROR_IRQ_REG_IDX 11
+#define HIVE_TESTBENCH_SP_ICACHE_MEM_ERROR_IRQ_REG_IDX 12
+#define HIVE_TESTBENCH_SP_DMEM_ERROR_IRQ_REG_IDX 13
+
+#define HIVE_TESTBENCH_MIPI_PARPATHEN_REG_IDX 14
+#define HIVE_TESTBENCH_FB_HPLL_FREQ_REG_IDX 15
+#define HIVE_TESTBENCH_ISCLK_RATIO_REG_IDX 16
+
+/* Signal monitor input bit ids */
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_O_BIT_ID 0
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TESTBENCH_SIG_MON_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TESTBENCH_SIG_MON_IRQ_PIN_BIT_ID 12
+#define HIVE_TESTBENCH_SIG_MON_SDRAM_WAKEUP_PIN_BIT_ID 13
+#define HIVE_TESTBENCH_SIG_MON_IDLE_PIN_BIT_ID 14
+
+#define ISP2400_DEBUG_NETWORK 1
+
+#endif /* _hive_isp_css_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_host_ids_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_host_ids_hrt.h
new file mode 100644
index 000000000000..8d4c9d67c0e2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_host_ids_hrt.h
@@ -0,0 +1,119 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_host_ids_hrt_h_
+#define _hive_isp_css_host_ids_hrt_h_
+
+/* ISP_CSS identifiers */
+#define INP_SYS testbench_isp_isp_css_part_is_2400_inp_sys
+#define ISYS_GP_REGS testbench_isp_isp_css_part_is_2400_inp_sys_gpreg
+#define ISYS_IRQ_CTRL testbench_isp_isp_css_part_is_2400_inp_sys_irq_ctrl
+#define ISYS_CAP_A testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_a
+#define ISYS_CAP_B testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_b
+#define ISYS_CAP_C testbench_isp_isp_css_part_is_2400_inp_sys_capt_unit_c
+#define ISYS_INP_BUF testbench_isp_isp_css_part_input_buffer
+#define ISYS_INP_CTRL testbench_isp_isp_css_part_is_2400_inp_sys_inp_ctrl
+#define ISYS_ACQ testbench_isp_isp_css_part_is_2400_inp_sys_acq_unit
+
+#define ISP testbench_isp_isp_css_sec_part_isp
+#define SP testbench_isp_isp_css_sec_part_scp
+
+#define IF_PRIM testbench_isp_isp_css_part_is_2400_ifmt_ift_prim
+#define IF_PRIM_B testbench_isp_isp_css_part_is_2400_ifmt_ift_prim_b
+#define IF_SEC testbench_isp_isp_css_part_is_2400_ifmt_ift_sec
+#define IF_SEC_MASTER testbench_isp_isp_css_part_is_2400_ifmt_ift_sec_mt_out
+#define STR_TO_MEM testbench_isp_isp_css_part_is_2400_ifmt_mem_cpy
+#define IFMT_GP_REGS testbench_isp_isp_css_part_is_2400_ifmt_gp_reg
+#define IFMT_IRQ_CTRL testbench_isp_isp_css_part_is_2400_ifmt_irq_ctrl
+
+#define CSS_RECEIVER testbench_isp_isp_css_part_is_2400_inp_sys_csi_receiver
+
+#define TC testbench_isp_isp_css_sec_part_gpd_tc
+#define GPTIMER testbench_isp_isp_css_sec_part_gpd_gptimer
+#define DMA testbench_isp_isp_css_sec_part_isp_dma
+#define GDC testbench_isp_isp_css_sec_part_gdc1
+#define GDC2 testbench_isp_isp_css_sec_part_gdc2
+#define IRQ_CTRL testbench_isp_isp_css_sec_part_gpd_irq_ctrl
+#define GPIO testbench_isp_isp_css_sec_part_gpd_c_gpio
+#define GP_REGS testbench_isp_isp_css_sec_part_gpd_gp_reg
+#define ISEL_GP_REGS testbench_isp_isp_css_part_is_2400_isel_gpr
+#define ISEL_IRQ_CTRL testbench_isp_isp_css_part_is_2400_isel_irq_ctrl
+#define DATA_MMU testbench_isp_isp_css_sec_part_data_out_sys_c_mmu
+#define ICACHE_MMU testbench_isp_isp_css_sec_part_icache_out_sys_c_mmu
+
+/* next is actually not FIFO but FIFO adapter, or slave to streaming adapter */
+#define ISP_SP_FIFO testbench_isp_isp_css_sec_part_fa_sp_isp
+#define ISEL_FIFO testbench_isp_isp_css_part_is_2400_isel_sf_fa_in
+
+#define FIFO_GPF_SP testbench_isp_isp_css_sec_part_sf_fa2sp_in
+#define FIFO_GPF_ISP testbench_isp_isp_css_sec_part_sf_fa2isp_in
+#define FIFO_SP_GPF testbench_isp_isp_css_sec_part_sf_sp2fa_in
+#define FIFO_ISP_GPF testbench_isp_isp_css_sec_part_sf_isp2fa_in
+
+#define DATA_OCP_MASTER testbench_isp_isp_css_sec_part_data_out_sys_cio2ocp_wide_data_out_mt
+#define ICACHE_OCP_MASTER testbench_isp_isp_css_sec_part_icache_out_sys_cio2ocp_wide_data_out_mt
+
+#define SP_IN_FIFO testbench_isp_isp_css_sec_part_sf_fa2sp_in
+#define SP_OUT_FIFO testbench_isp_isp_css_sec_part_sf_sp2fa_out
+#define ISP_IN_FIFO testbench_isp_isp_css_sec_part_sf_fa2isp_in
+#define ISP_OUT_FIFO testbench_isp_isp_css_sec_part_sf_isp2fa_out
+#define GEN_SHORT_PACK_PORT testbench_isp_isp_css_part_is_2400_inp_sys_csi_str_mon_fa_gensh_out
+
+/* input_system_2401 identifiers */
+#define ISYS2401_GP_REGS testbench_isp_isp_css_part_is_2401_gpreg
+#define ISYS2401_DMA testbench_isp_isp_css_part_is_2401_dma
+#define ISYS2401_IRQ_CTRL testbench_isp_isp_css_part_is_2401_isys_irq_ctrl
+
+#define ISYS2401_CSI_RX_A testbench_isp_isp_css_part_is_2401_is_pipe_a_csi_rx
+#define ISYS2401_MIPI_BE_A testbench_isp_isp_css_part_is_2401_is_pipe_a_mipi_be
+#define ISYS2401_S2M_A testbench_isp_isp_css_part_is_2401_is_pipe_a_s2m
+#define ISYS2401_PXG_A testbench_isp_isp_css_part_is_2401_is_pipe_a_pxlgen
+#define ISYS2401_IBUF_CNTRL_A testbench_isp_isp_css_part_is_2401_is_pipe_a_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_A testbench_isp_isp_css_part_is_2401_is_pipe_a_irq_ctrl_pipe
+
+#define ISYS2401_CSI_RX_B testbench_isp_isp_css_part_is_2401_is_pipe_b_csi_rx
+#define ISYS2401_MIPI_BE_B testbench_isp_isp_css_part_is_2401_is_pipe_b_mipi_be
+#define ISYS2401_S2M_B testbench_isp_isp_css_part_is_2401_is_pipe_b_s2m
+#define ISYS2401_PXG_B testbench_isp_isp_css_part_is_2401_is_pipe_b_pxlgen
+#define ISYS2401_IBUF_CNTRL_B testbench_isp_isp_css_part_is_2401_is_pipe_b_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_B testbench_isp_isp_css_part_is_2401_is_pipe_b_irq_ctrl_pipe
+
+#define ISYS2401_CSI_RX_C testbench_isp_isp_css_part_is_2401_is_pipe_c_csi_rx
+#define ISYS2401_MIPI_BE_C testbench_isp_isp_css_part_is_2401_is_pipe_c_mipi_be
+#define ISYS2401_S2M_C testbench_isp_isp_css_part_is_2401_is_pipe_c_s2m
+#define ISYS2401_PXG_C testbench_isp_isp_css_part_is_2401_is_pipe_c_pxlgen
+#define ISYS2401_IBUF_CNTRL_C testbench_isp_isp_css_part_is_2401_is_pipe_c_ibuf_ctrl
+#define ISYS2401_IRQ_CTRL_C testbench_isp_isp_css_part_is_2401_is_pipe_c_irq_ctrl_pipe
+
+
+/* Testbench identifiers */
+#define DDR testbench_ddram
+#define DDR_SMALL testbench_ddram_small
+#define XMEM DDR
+#define GPIO_ADAPTER testbench_gp_adapter
+#define SIG_MONITOR testbench_sig_mon
+#define DDR_SLAVE testbench_ddram_ip0
+#define DDR_SMALL_SLAVE testbench_ddram_small_ip0
+#define HOST_MASTER host_op0
+
+#define CSI_SENSOR testbench_vied_sensor
+#define CSI_SENSOR_GP_REGS testbench_vied_sensor_gpreg
+#define CSI_STR_IN_A testbench_vied_sensor_tx_a_csi_tx_data_in
+#define CSI_STR_IN_B testbench_vied_sensor_tx_b_csi_tx_data_in
+#define CSI_STR_IN_C testbench_vied_sensor_tx_c_csi_tx_data_in
+#define CSI_SENSOR_TX_A testbench_vied_sensor_tx_a
+#define CSI_SENSOR_TX_B testbench_vied_sensor_tx_b
+#define CSI_SENSOR_TX_C testbench_vied_sensor_tx_c
+
+#endif /* _hive_isp_css_host_ids_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
new file mode 100644
index 000000000000..b4211a0c631a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_isp_css_streaming_to_mipi_types_hrt.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_streaming_to_mipi_types_hrt_h_
+#define _hive_isp_css_streaming_to_mipi_types_hrt_h_
+
+#include <streaming_to_mipi_defs.h>
+
+#define _HIVE_ISP_CH_ID_MASK ((1U << HIVE_ISP_CH_ID_BITS)-1)
+#define _HIVE_ISP_FMT_TYPE_MASK ((1U << HIVE_ISP_FMT_TYPE_BITS)-1)
+
+#define _HIVE_STR_TO_MIPI_FMT_TYPE_LSB (HIVE_STR_TO_MIPI_CH_ID_LSB + HIVE_ISP_CH_ID_BITS)
+#define _HIVE_STR_TO_MIPI_DATA_B_LSB (HIVE_STR_TO_MIPI_DATA_A_LSB + HIVE_IF_PIXEL_WIDTH)
+
+#endif /* _hive_isp_css_streaming_to_mipi_types_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h
new file mode 100644
index 000000000000..58b0e6effbd0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/hive_types.h
@@ -0,0 +1,128 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_HIVE_TYPES_H
+#define _HRT_HIVE_TYPES_H
+
+#include "version.h"
+#include "defs.h"
+
+#ifndef HRTCAT3
+#define _HRTCAT3(m,n,o) m##n##o
+#define HRTCAT3(m,n,o) _HRTCAT3(m,n,o)
+#endif
+
+#ifndef HRTCAT4
+#define _HRTCAT4(m,n,o,p) m##n##o##p
+#define HRTCAT4(m,n,o,p) _HRTCAT4(m,n,o,p)
+#endif
+
+#ifndef HRTMIN
+#define HRTMIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+#ifndef HRTMAX
+#define HRTMAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+/* boolean data type */
+typedef unsigned int hive_bool;
+#define hive_false 0
+#define hive_true 1
+
+typedef char hive_int8;
+typedef short hive_int16;
+typedef int hive_int32;
+typedef long long hive_int64;
+
+typedef unsigned char hive_uint8;
+typedef unsigned short hive_uint16;
+typedef unsigned int hive_uint32;
+typedef unsigned long long hive_uint64;
+
+/* by default assume 32 bit master port (both data and address) */
+#ifndef HRT_DATA_WIDTH
+#define HRT_DATA_WIDTH 32
+#endif
+#ifndef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 32
+#endif
+
+#define HRT_DATA_BYTES (HRT_DATA_WIDTH/8)
+#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH/8)
+
+#if HRT_DATA_WIDTH == 64
+typedef hive_uint64 hrt_data;
+#elif HRT_DATA_WIDTH == 32
+typedef hive_uint32 hrt_data;
+#else
+#error data width not supported
+#endif
+
+#if HRT_ADDRESS_WIDTH == 64
+typedef hive_uint64 hrt_address;
+#elif HRT_ADDRESS_WIDTH == 32
+typedef hive_uint32 hrt_address;
+#else
+#error adddres width not supported
+#endif
+
+/* The SP side representation of an HMM virtual address */
+typedef hive_uint32 hrt_vaddress;
+
+/* use 64 bit addresses in simulation, where possible */
+typedef hive_uint64 hive_sim_address;
+
+/* below is for csim, not for hrt, rename and move this elsewhere */
+
+typedef unsigned int hive_uint;
+typedef hive_uint32 hive_address;
+typedef hive_address hive_slave_address;
+typedef hive_address hive_mem_address;
+
+/* MMIO devices */
+typedef hive_uint hive_mmio_id;
+typedef hive_mmio_id hive_slave_id;
+typedef hive_mmio_id hive_port_id;
+typedef hive_mmio_id hive_master_id;
+typedef hive_mmio_id hive_mem_id;
+typedef hive_mmio_id hive_dev_id;
+typedef hive_mmio_id hive_fifo_id;
+
+typedef hive_uint hive_hier_id;
+typedef hive_hier_id hive_device_id;
+typedef hive_device_id hive_proc_id;
+typedef hive_device_id hive_cell_id;
+typedef hive_device_id hive_host_id;
+typedef hive_device_id hive_bus_id;
+typedef hive_device_id hive_bridge_id;
+typedef hive_device_id hive_fifo_adapter_id;
+typedef hive_device_id hive_custom_device_id;
+
+typedef hive_uint hive_slot_id;
+typedef hive_uint hive_fu_id;
+typedef hive_uint hive_reg_file_id;
+typedef hive_uint hive_reg_id;
+
+/* Streaming devices */
+typedef hive_uint hive_outport_id;
+typedef hive_uint hive_inport_id;
+
+typedef hive_uint hive_msink_id;
+
+/* HRT specific */
+typedef char* hive_program;
+typedef char* hive_function;
+
+#endif /* _HRT_HIVE_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h
new file mode 100644
index 000000000000..7d39e45796ae
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/if_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IF_DEFS_H
+#define _IF_DEFS_H
+
+#define HIVE_IF_FRAME_REQUEST 0xA000
+#define HIVE_IF_LINES_REQUEST 0xB000
+#define HIVE_IF_VECTORS_REQUEST 0xC000
+
+#endif /* _IF_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
new file mode 100644
index 000000000000..16bfe1d80bc9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _if_subsystem_defs_h
+#define _if_subsystem_defs_h__
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_1 1
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_2 2
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_3 3
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_4 4
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_5 5
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_6 6
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_7 7
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_FSYNC_LUT_REG 8
+#define HIVE_IFMT_GP_REGS_SRST_IDX 9
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IDX 10
+
+#define HIVE_IFMT_GP_REGS_CH_ID_FMT_TYPE_IDX 11
+
+#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_BASE HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0
+
+/* order of the input bits for the ifmt irq controller */
+#define HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID 0
+#define HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID 1
+#define HIVE_IFMT_IRQ_IFT_SEC_BIT_ID 2
+#define HIVE_IFMT_IRQ_MEM_CPY_BIT_ID 3
+#define HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID 4
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SRST_MEM_CPY_BIT_IDX 3
+
+/* order of the input bits for the ifmt Soft reset register */
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_BIT_IDX 0
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_PRIM_B_BIT_IDX 1
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_IFT_SEC_BIT_IDX 2
+#define HIVE_IFMT_GP_REGS_SLV_REG_SRST_MEM_CPY_BIT_IDX 3
+
+#endif /* _if_subsystem_defs_h__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h
new file mode 100644
index 000000000000..87fbf82edb5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_selector_defs.h
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_selector_defs_h
+#define _input_selector_defs_h
+
+#ifndef HIVE_ISP_ISEL_SEL_BITS
+#define HIVE_ISP_ISEL_SEL_BITS 2
+#endif
+
+#ifndef HIVE_ISP_CH_ID_BITS
+#define HIVE_ISP_CH_ID_BITS 2
+#endif
+
+#ifndef HIVE_ISP_FMT_TYPE_BITS
+#define HIVE_ISP_FMT_TYPE_BITS 5
+#endif
+
+/* gp_register register id's -- Outputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_ENABLE_IDX 0
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FREE_RUNNING_IDX 1
+#define HIVE_ISEL_GP_REGS_SYNCGEN_PAUSE_IDX 2
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_FRAMES_IDX 3
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_PIX_IDX 4
+#define HIVE_ISEL_GP_REGS_SYNCGEN_NR_LINES_IDX 5
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HBLANK_CYCLES_IDX 6
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VBLANK_CYCLES_IDX 7
+
+#define HIVE_ISEL_GP_REGS_SOF_IDX 8
+#define HIVE_ISEL_GP_REGS_EOF_IDX 9
+#define HIVE_ISEL_GP_REGS_SOL_IDX 10
+#define HIVE_ISEL_GP_REGS_EOL_IDX 11
+
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE 12
+#define HIVE_ISEL_GP_REGS_PRBS_ENABLE_PORT_B 13
+#define HIVE_ISEL_GP_REGS_PRBS_LFSR_RESET_VALUE 14
+
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE 15
+#define HIVE_ISEL_GP_REGS_TPG_ENABLE_PORT_B 16
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_MASK_IDX 17
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_MASK_IDX 18
+#define HIVE_ISEL_GP_REGS_TPG_XY_CNT_MASK_IDX 19
+#define HIVE_ISEL_GP_REGS_TPG_HOR_CNT_DELTA_IDX 20
+#define HIVE_ISEL_GP_REGS_TPG_VER_CNT_DELTA_IDX 21
+#define HIVE_ISEL_GP_REGS_TPG_MODE_IDX 22
+#define HIVE_ISEL_GP_REGS_TPG_R1_IDX 23
+#define HIVE_ISEL_GP_REGS_TPG_G1_IDX 24
+#define HIVE_ISEL_GP_REGS_TPG_B1_IDX 25
+#define HIVE_ISEL_GP_REGS_TPG_R2_IDX 26
+#define HIVE_ISEL_GP_REGS_TPG_G2_IDX 27
+#define HIVE_ISEL_GP_REGS_TPG_B2_IDX 28
+
+
+#define HIVE_ISEL_GP_REGS_CH_ID_IDX 29
+#define HIVE_ISEL_GP_REGS_FMT_TYPE_IDX 30
+#define HIVE_ISEL_GP_REGS_DATA_SEL_IDX 31
+#define HIVE_ISEL_GP_REGS_SBAND_SEL_IDX 32
+#define HIVE_ISEL_GP_REGS_SYNC_SEL_IDX 33
+#define HIVE_ISEL_GP_REGS_SRST_IDX 37
+
+#define HIVE_ISEL_GP_REGS_SRST_SYNCGEN_BIT 0
+#define HIVE_ISEL_GP_REGS_SRST_PRBS_BIT 1
+#define HIVE_ISEL_GP_REGS_SRST_TPG_BIT 2
+#define HIVE_ISEL_GP_REGS_SRST_FIFO_BIT 3
+
+/* gp_register register id's -- Inputs */
+#define HIVE_ISEL_GP_REGS_SYNCGEN_HOR_CNT_IDX 34
+#define HIVE_ISEL_GP_REGS_SYNCGEN_VER_CNT_IDX 35
+#define HIVE_ISEL_GP_REGS_SYNCGEN_FRAMES_CNT_IDX 36
+
+/* irq sources isel irq controller */
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID 0
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID 1
+#define HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID 2
+#define HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID 3
+#define HIVE_ISEL_IRQ_NUM_IRQS 4
+
+#endif /* _input_selector_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h
new file mode 100644
index 000000000000..20a13c4cdb56
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_switch_2400_defs.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_switch_2400_defs_h
+#define _input_switch_2400_defs_h
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_ID(ch_id, fmt_type) (((ch_id)*2) + ((fmt_type)>=16))
+#define _HIVE_INPUT_SWITCH_GET_LUT_REG_LSB(fmt_type) (((fmt_type)%16) * 2)
+
+#define HIVE_INPUT_SWITCH_SELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_SELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_SELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM 3
+#define HIVE_INPUT_SWITCH_VSELECT_NO_OUTPUT 0
+#define HIVE_INPUT_SWITCH_VSELECT_IF_PRIM 1
+#define HIVE_INPUT_SWITCH_VSELECT_IF_SEC 2
+#define HIVE_INPUT_SWITCH_VSELECT_STR_TO_MEM 4
+
+#endif /* _input_switch_2400_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h
new file mode 100644
index 000000000000..a7f0ca80bc9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_ctrl_defs.h
@@ -0,0 +1,254 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_ctrl_defs_h
+#define _input_system_ctrl_defs_h
+
+#define _INPUT_SYSTEM_CTRL_REG_ALIGN 4 /* assuming 32 bit control bus width */
+
+/* --------------------------------------------------*/
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define ISYS_CTRL_NOF_REGS 23
+
+// Register id's of MMIO slave accesible registers
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_ID 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_ID 1
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_ID 2
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID 3
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID 4
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID 5
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID 6
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID 7
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID 8
+#define ISYS_CTRL_ACQ_START_ADDR_REG_ID 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID 10
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID 11
+#define ISYS_CTRL_INIT_REG_ID 12
+#define ISYS_CTRL_LAST_COMMAND_REG_ID 13
+#define ISYS_CTRL_NEXT_COMMAND_REG_ID 14
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID 15
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID 16
+#define ISYS_CTRL_FSM_STATE_INFO_REG_ID 17
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID 18
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID 19
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID 20
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID 21
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID 22
+
+
+/* register reset value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_RSTVAL 128
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_RSTVAL 3
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_RSTVAL 3
+#define ISYS_CTRL_ACQ_START_ADDR_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ISYS_CTRL_INIT_REG_RSTVAL 0
+#define ISYS_CTRL_LAST_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_COMMAND_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_RSTVAL 15 //0x0000_000F (to signal non-valid cmd/ack after reset/soft-reset)
+#define ISYS_CTRL_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_RSTVAL 0
+
+/* register width value */
+#define ISYS_CTRL_CAPT_START_ADDR_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_START_ADDR_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_WIDTH 9
+#define ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_START_ADDR_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ISYS_CTRL_INIT_REG_WIDTH 3
+#define ISYS_CTRL_LAST_COMMAND_REG_WIDTH 32 /* slave data width */
+#define ISYS_CTRL_NEXT_COMMAND_REG_WIDTH 32
+#define ISYS_CTRL_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ISYS_CTRL_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_WIDTH 32
+#define ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_WIDTH 1
+
+/* bit definitions */
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+
+/*
+InpSysCaptFramesAcq 1/0 [3:0] - 'b0000
+[7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB-'b0001
+ CaptC-'b0010
+[31:16] - NOF_frames
+InpSysCaptFrameExt 2/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+ 2/1 [31:0] - external capture address
+InpSysAcqFrame 2/0 [3:0] - 'b0010,
+[31:4] - NOF_ext_mem_words
+ 2/1 [31:0] - external memory read start address
+InpSysOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysOverruleCmd 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - command token value for port opid
+
+
+acknowledge tokens:
+
+InpSysAckCFA 1/0 [3:0] - 'b0000
+ [7:4] - CaptPortId,
+ CaptA-'b0000
+ CaptB- 'b0001
+ CaptC-'b0010
+ [31:16] - NOF_frames
+InpSysAckCFE 1/0 [3:0] - 'b0001'
+[7:4] - CaptPortId,
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+
+InpSysAckAF 1/0 [3:0] - 'b0010
+InpSysAckOverruleON 1/0 [3:0] - 'b0011,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverruleOFF 1/0 [3:0] - 'b0100,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+InpSysAckOverrule 2/0 [3:0] - 'b0101,
+[7:4] - overrule port id (opid)
+ 'b0000 - CaptA
+ 'b0001 - CaptB
+ 'b0010 - CaptC
+ 'b0011 - Acq
+ 'b0100 - DMA
+
+
+ 2/1 [31:0] - acknowledge token value from port opid
+
+
+
+*/
+
+
+/* Command and acknowledge tokens IDs */
+#define ISYS_CTRL_CAPT_FRAMES_ACQ_TOKEN_ID 0 /* 0000b */
+#define ISYS_CTRL_CAPT_FRAME_EXT_TOKEN_ID 1 /* 0001b */
+#define ISYS_CTRL_ACQ_FRAME_TOKEN_ID 2 /* 0010b */
+#define ISYS_CTRL_OVERRULE_ON_TOKEN_ID 3 /* 0011b */
+#define ISYS_CTRL_OVERRULE_OFF_TOKEN_ID 4 /* 0100b */
+#define ISYS_CTRL_OVERRULE_TOKEN_ID 5 /* 0101b */
+
+#define ISYS_CTRL_ACK_CFA_TOKEN_ID 0
+#define ISYS_CTRL_ACK_CFE_TOKEN_ID 1
+#define ISYS_CTRL_ACK_AF_TOKEN_ID 2
+#define ISYS_CTRL_ACK_OVERRULE_ON_TOKEN_ID 3
+#define ISYS_CTRL_ACK_OVERRULE_OFF_TOKEN_ID 4
+#define ISYS_CTRL_ACK_OVERRULE_TOKEN_ID 5
+#define ISYS_CTRL_ACK_DEVICE_ERROR_TOKEN_ID 6
+
+#define ISYS_CTRL_TOKEN_ID_MSB 3
+#define ISYS_CTRL_TOKEN_ID_LSB 0
+#define ISYS_CTRL_PORT_ID_TOKEN_MSB 7
+#define ISYS_CTRL_PORT_ID_TOKEN_LSB 4
+#define ISYS_CTRL_NOF_CAPT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_CAPT_TOKEN_LSB 16
+#define ISYS_CTRL_NOF_EXT_TOKEN_MSB 31
+#define ISYS_CTRL_NOF_EXT_TOKEN_LSB 8
+
+#define ISYS_CTRL_TOKEN_ID_IDX 0
+#define ISYS_CTRL_TOKEN_ID_BITS (ISYS_CTRL_TOKEN_ID_MSB - ISYS_CTRL_TOKEN_ID_LSB + 1)
+#define ISYS_CTRL_PORT_ID_IDX (ISYS_CTRL_TOKEN_ID_IDX + ISYS_CTRL_TOKEN_ID_BITS)
+#define ISYS_CTRL_PORT_ID_BITS (ISYS_CTRL_PORT_ID_TOKEN_MSB - ISYS_CTRL_PORT_ID_TOKEN_LSB +1)
+#define ISYS_CTRL_NOF_CAPT_IDX ISYS_CTRL_NOF_CAPT_TOKEN_LSB
+#define ISYS_CTRL_NOF_CAPT_BITS (ISYS_CTRL_NOF_CAPT_TOKEN_MSB - ISYS_CTRL_NOF_CAPT_TOKEN_LSB + 1)
+#define ISYS_CTRL_NOF_EXT_IDX ISYS_CTRL_NOF_EXT_TOKEN_LSB
+#define ISYS_CTRL_NOF_EXT_BITS (ISYS_CTRL_NOF_EXT_TOKEN_MSB - ISYS_CTRL_NOF_EXT_TOKEN_LSB + 1)
+
+#define ISYS_CTRL_PORT_ID_CAPT_A 0 /* device ID for capture unit A */
+#define ISYS_CTRL_PORT_ID_CAPT_B 1 /* device ID for capture unit B */
+#define ISYS_CTRL_PORT_ID_CAPT_C 2 /* device ID for capture unit C */
+#define ISYS_CTRL_PORT_ID_ACQUISITION 3 /* device ID for acquistion unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_A 4 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_B 5 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_CAPT_C 6 /* device ID for dma unit */
+#define ISYS_CTRL_PORT_ID_DMA_ACQ 7 /* device ID for dma unit */
+
+#define ISYS_CTRL_NO_ACQ_ACK 16 /* no ack from acquisition unit */
+#define ISYS_CTRL_NO_DMA_ACK 0
+#define ISYS_CTRL_NO_CAPT_ACK 16
+
+#endif /* _input_system_ctrl_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h
new file mode 100644
index 000000000000..ae62163034a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_system_defs.h
@@ -0,0 +1,126 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _input_system_defs_h
+#define _input_system_defs_h
+
+/* csi controller modes */
+#define HIVE_CSI_CONFIG_MAIN 0
+#define HIVE_CSI_CONFIG_STEREO1 4
+#define HIVE_CSI_CONFIG_STEREO2 8
+
+/* general purpose register IDs */
+
+/* Stream Multicast select modes */
+#define HIVE_ISYS_GPREG_MULTICAST_A_IDX 0
+#define HIVE_ISYS_GPREG_MULTICAST_B_IDX 1
+#define HIVE_ISYS_GPREG_MULTICAST_C_IDX 2
+
+/* Stream Mux select modes */
+#define HIVE_ISYS_GPREG_MUX_IDX 3
+
+/* streaming monitor status and control */
+#define HIVE_ISYS_GPREG_STRMON_STAT_IDX 4
+#define HIVE_ISYS_GPREG_STRMON_COND_IDX 5
+#define HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX 6
+#define HIVE_ISYS_GPREG_SRST_IDX 7
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_IDX 8
+#define HIVE_ISYS_GPREG_REG_PORT_A_IDX 9
+#define HIVE_ISYS_GPREG_REG_PORT_B_IDX 10
+
+/* Bit numbers of the soft reset register */
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_A_BIT 0
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_B_BIT 1
+#define HIVE_ISYS_GPREG_SRST_CAPT_FIFO_C_BIT 2
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_A_BIT 3
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_B_BIT 4
+#define HIVE_ISYS_GPREG_SRST_MULTICAST_C_BIT 5
+#define HIVE_ISYS_GPREG_SRST_CAPT_A_BIT 6
+#define HIVE_ISYS_GPREG_SRST_CAPT_B_BIT 7
+#define HIVE_ISYS_GPREG_SRST_CAPT_C_BIT 8
+#define HIVE_ISYS_GPREG_SRST_ACQ_BIT 9
+/* For ISYS_CTRL 5bits are defined to allow soft-reset per sub-controller and top-ctrl */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_BIT 10 /*LSB for 5bit vector */
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_A_BIT 10
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_B_BIT 11
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_CAPT_C_BIT 12
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_ACQ_BIT 13
+#define HIVE_ISYS_GPREG_SRST_ISYS_CTRL_TOP_BIT 14
+/* -- */
+#define HIVE_ISYS_GPREG_SRST_STR_MUX_BIT 15
+#define HIVE_ISYS_GPREG_SRST_CIO2AHB_BIT 16
+#define HIVE_ISYS_GPREG_SRST_GEN_SHORT_FIFO_BIT 17
+#define HIVE_ISYS_GPREG_SRST_WIDE_BUS_BIT 18 // includes CIO conv
+#define HIVE_ISYS_GPREG_SRST_DMA_BIT 19
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_A_BIT 20
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_B_BIT 21
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_CAPT_C_BIT 22
+#define HIVE_ISYS_GPREG_SRST_SF_CTRL_ACQ_BIT 23
+#define HIVE_ISYS_GPREG_SRST_CSI_BE_OUT_BIT 24
+
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_A_BIT 0
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_B_BIT 1
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_CAPT_C_BIT 2
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ACQ_BIT 3
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_DMA_BIT 4
+#define HIVE_ISYS_GPREG_SLV_REG_SRST_ISYS_CTRL_BIT 5
+
+/* streaming monitor port id's */
+#define HIVE_ISYS_STR_MON_PORT_CAPA 0
+#define HIVE_ISYS_STR_MON_PORT_CAPB 1
+#define HIVE_ISYS_STR_MON_PORT_CAPC 2
+#define HIVE_ISYS_STR_MON_PORT_ACQ 3
+#define HIVE_ISYS_STR_MON_PORT_CSS_GENSH 4
+#define HIVE_ISYS_STR_MON_PORT_SF_GENSH 5
+#define HIVE_ISYS_STR_MON_PORT_SP2ISYS 6
+#define HIVE_ISYS_STR_MON_PORT_ISYS2SP 7
+#define HIVE_ISYS_STR_MON_PORT_PIXA 8
+#define HIVE_ISYS_STR_MON_PORT_PIXB 9
+
+/* interrupt bit ID's */
+#define HIVE_ISYS_IRQ_CSI_SOF_BIT_ID 0
+#define HIVE_ISYS_IRQ_CSI_EOF_BIT_ID 1
+#define HIVE_ISYS_IRQ_CSI_SOL_BIT_ID 2
+#define HIVE_ISYS_IRQ_CSI_EOL_BIT_ID 3
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID 4
+#define HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID 5
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP 6
+#define HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP 7
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_A_UNDEF_PH 7*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP 8
+#define HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP 9
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_B_UNDEF_PH 10*/
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP 10
+#define HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP 11
+/*#define HIVE_ISYS_IRQ_CAP_UNIT_C_UNDEF_PH 13*/
+#define HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH 12
+/*#define HIVE_ISYS_IRQ_ACQ_UNIT_UNDEF_PH 15*/
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPA 13
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPB 14
+#define HIVE_ISYS_IRQ_INP_CTRL_CAPC 15
+#define HIVE_ISYS_IRQ_CIO2AHB 16
+#define HIVE_ISYS_IRQ_DMA_BIT_ID 17
+#define HIVE_ISYS_IRQ_STREAM_MON_BIT_ID 18
+#define HIVE_ISYS_IRQ_NUM_BITS 19
+
+/* DMA */
+#define HIVE_ISYS_DMA_CHANNEL 0
+#define HIVE_ISYS_DMA_IBUF_DDR_CONN 0
+#define HIVE_ISYS_DMA_HEIGHT 1
+#define HIVE_ISYS_DMA_ELEMS 1 /* both master buses of same width */
+#define HIVE_ISYS_DMA_STRIDE 0 /* no stride required as height is fixed to 1 */
+#define HIVE_ISYS_DMA_CROP 0 /* no cropping */
+#define HIVE_ISYS_DMA_EXTENSION 0 /* no extension as elem width is same on both side */
+
+#endif /* _input_system_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h
new file mode 100644
index 000000000000..ec6dd4487158
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/irq_controller_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _irq_controller_defs_h
+#define _irq_controller_defs_h
+
+#define _HRT_IRQ_CONTROLLER_EDGE_REG_IDX 0
+#define _HRT_IRQ_CONTROLLER_MASK_REG_IDX 1
+#define _HRT_IRQ_CONTROLLER_STATUS_REG_IDX 2
+#define _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX 3
+#define _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX 4
+#define _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX 5
+#define _HRT_IRQ_CONTROLLER_STR_OUT_ENABLE_REG_IDX 6
+
+#define _HRT_IRQ_CONTROLLER_REG_ALIGN 4
+
+#endif /* _irq_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h
new file mode 100644
index 000000000000..e00bc841d0f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2400_support.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp2400_support_h
+#define _isp2400_support_h
+
+#ifndef ISP2400_VECTOR_TYPES
+/* This typedef is to be able to include hive header files
+ in the host code which is useful in crun */
+typedef char *tmemvectors, *tmemvectoru, *tvector;
+#endif
+
+#define hrt_isp_vamem1_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem1), addr, val)
+#define hrt_isp_vamem2_store_16(cell, addr, val) hrt_mem_store_16(cell, HRT_PROC_TYPE_PROP(cell, _simd_vamem2), addr, val)
+
+#define hrt_isp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _base_dmem)
+#define hrt_isp_vmem(cell) HRT_PROC_TYPE_PROP(cell, _simd_vmem)
+
+#define hrt_isp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_dmem(cell))
+#define hrt_isp_vmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_vmem(cell))
+
+#if ISP_HAS_HIST
+ #define hrt_isp_hist(cell) HRT_PROC_TYPE_PROP(cell, _simd_histogram)
+ #define hrt_isp_hist_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_isp_hist(cell))
+#endif
+
+#endif /* _isp2400_support_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h
new file mode 100644
index 000000000000..033e23bcf672
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp2401_mamoiada_params.h
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* Version */
+#define RTL_VERSION
+
+/* Cell name */
+#define ISP_CELL_TYPE isp2401_mamoiada
+#define ISP_VMEM simd_vmem
+#define _HRT_ISP_VMEM isp2401_mamoiada_simd_vmem
+
+/* instruction pipeline depth */
+#define ISP_BRANCHDELAY 5
+
+/* bus */
+#define ISP_BUS_WIDTH 32
+#define ISP_BUS_ADDR_WIDTH 32
+#define ISP_BUS_BURST_SIZE 1
+
+/* data-path */
+#define ISP_SCALAR_WIDTH 32
+#define ISP_SLICE_NELEMS 4
+#define ISP_VEC_NELEMS 64
+#define ISP_VEC_ELEMBITS 14
+#define ISP_VEC_ELEM8BITS 16
+#define ISP_CLONE_DATAPATH_IS_16 1
+
+/* memories */
+#define ISP_DMEM_DEPTH 4096
+#define ISP_DMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_DEPTH 3072
+#define ISP_VMEM_BSEL_DOWNSAMPLE 8
+#define ISP_VMEM_ELEMBITS 14
+#define ISP_VMEM_ELEM_PRECISION 14
+#define ISP_VMEM_IS_BAMEM 1
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_BAMEM_MAX_BOI_HEIGHT 8
+ #define ISP_VMEM_BAMEM_LATENCY 5
+ #define ISP_VMEM_BAMEM_BANK_NARROWING_FACTOR 2
+ #define ISP_VMEM_BAMEM_NR_DATA_PLANES 8
+ #define ISP_VMEM_BAMEM_NR_CFG_REGISTERS 16
+ #define ISP_VMEM_BAMEM_LININT 0
+ #define ISP_VMEM_BAMEM_DAP_BITS 3
+ #define ISP_VMEM_BAMEM_LININT_FRAC_BITS 0
+ #define ISP_VMEM_BAMEM_PID_BITS 3
+ #define ISP_VMEM_BAMEM_OFFSET_BITS 19
+ #define ISP_VMEM_BAMEM_ADDRESS_BITS 25
+ #define ISP_VMEM_BAMEM_RID_BITS 4
+ #define ISP_VMEM_BAMEM_TRANSPOSITION 1
+ #define ISP_VMEM_BAMEM_VEC_PLUS_SLICE 1
+ #define ISP_VMEM_BAMEM_ARB_SERVICE_CYCLE_BITS 1
+ #define ISP_VMEM_BAMEM_LUT_ELEMS 16
+ #define ISP_VMEM_BAMEM_LUT_ADDR_WIDTH 14
+ #define ISP_VMEM_BAMEM_HALF_BLOCK_WRITE 1
+ #define ISP_VMEM_BAMEM_SMART_FETCH 1
+ #define ISP_VMEM_BAMEM_BIG_ENDIANNESS 0
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_PMEM_DEPTH 2048
+#define ISP_PMEM_WIDTH 640
+#define ISP_VAMEM_ADDRESS_BITS 12
+#define ISP_VAMEM_ELEMBITS 12
+#define ISP_VAMEM_DEPTH 2048
+#define ISP_VAMEM_ALIGNMENT 2
+#define ISP_VA_ADDRESS_WIDTH 896
+#define ISP_VEC_VALSU_LATENCY ISP_VEC_NELEMS
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+
+/* program counter */
+#define ISP_PC_WIDTH 13
+
+/* Template switches */
+#define ISP_SHIELD_INPUT_DMEM 0
+#define ISP_SHIELD_OUTPUT_DMEM 1
+#define ISP_SHIELD_INPUT_VMEM 0
+#define ISP_SHIELD_OUTPUT_VMEM 0
+#define ISP_SHIELD_INPUT_PMEM 1
+#define ISP_SHIELD_OUTPUT_PMEM 1
+#define ISP_SHIELD_INPUT_HIST 1
+#define ISP_SHIELD_OUTPUT_HIST 1
+/* When LUT is select the shielding is always on */
+#define ISP_SHIELD_INPUT_VAMEM 1
+#define ISP_SHIELD_OUTPUT_VAMEM 1
+
+#define ISP_HAS_IRQ 1
+#define ISP_HAS_SOFT_RESET 1
+#define ISP_HAS_VEC_DIV 0
+#define ISP_HAS_VFU_W_2O 1
+#define ISP_HAS_DEINT3 1
+#define ISP_HAS_LUT 1
+#define ISP_HAS_HIST 1
+#define ISP_HAS_VALSU 1
+#define ISP_HAS_3rdVALSU 1
+#define ISP_VRF1_HAS_2P 1
+
+#define ISP_SRU_GUARDING 1
+#define ISP_VLSU_GUARDING 1
+
+#define ISP_VRF_RAM 1
+#define ISP_SRF_RAM 1
+
+#define ISP_SPLIT_VMUL_VADD_IS 0
+#define ISP_RFSPLIT_FPGA 0
+
+/* RSN or Bus pipelining */
+#define ISP_RSN_PIPE 1
+#define ISP_VSF_BUS_PIPE 0
+
+/* extra slave port to vmem */
+#define ISP_IF_VMEM 0
+#define ISP_GDC_VMEM 0
+
+/* Streaming ports */
+#define ISP_IF 1
+#define ISP_IF_B 1
+#define ISP_GDC 1
+#define ISP_SCL 1
+#define ISP_GPFIFO 1
+#define ISP_SP 1
+
+/* Removing Issue Slot(s) */
+#define ISP_HAS_NOT_SIMD_IS2 0
+#define ISP_HAS_NOT_SIMD_IS3 0
+#define ISP_HAS_NOT_SIMD_IS4 0
+#define ISP_HAS_NOT_SIMD_IS4_VADD 0
+#define ISP_HAS_NOT_SIMD_IS5 0
+#define ISP_HAS_NOT_SIMD_IS6 0
+#define ISP_HAS_NOT_SIMD_IS7 0
+#define ISP_HAS_NOT_SIMD_IS8 0
+
+/* ICache */
+#define ISP_ICACHE 1
+#define ISP_ICACHE_ONLY 0
+#define ISP_ICACHE_PREFETCH 1
+#define ISP_ICACHE_INDEX_BITS 8
+#define ISP_ICACHE_SET_BITS 5
+#define ISP_ICACHE_BLOCKS_PER_SET_BITS 1
+
+/* Experimental Flags */
+#define ISP_EXP_1 0
+#define ISP_EXP_2 0
+#define ISP_EXP_3 0
+#define ISP_EXP_4 0
+#define ISP_EXP_5 0
+#define ISP_EXP_6 0
+
+/* Derived values */
+#define ISP_LOG2_PMEM_WIDTH 10
+#define ISP_VEC_WIDTH 896
+#define ISP_SLICE_WIDTH 56
+#define ISP_VMEM_WIDTH 896
+#define ISP_VMEM_ALIGN 128
+#if ISP_VMEM_IS_BAMEM
+ #define ISP_VMEM_ALIGN_ELEM 2
+#endif /* ISP_VMEM_IS_BAMEM */
+#define ISP_SIMDLSU 1
+#define ISP_LSU_IMM_BITS 12
+
+/* convenient shortcuts for software*/
+#define ISP_NWAY ISP_VEC_NELEMS
+#define NBITS ISP_VEC_ELEMBITS
+
+#define _isp_ceil_div(a,b) (((a)+(b)-1)/(b))
+
+#ifdef C_RUN
+#define ISP_VEC_ALIGN (_isp_ceil_div(ISP_VEC_WIDTH, 64)*8)
+#else
+#define ISP_VEC_ALIGN ISP_VMEM_ALIGN
+#endif
+
+/* HRT specific vector support */
+#define isp2401_mamoiada_vector_alignment ISP_VEC_ALIGN
+#define isp2401_mamoiada_vector_elem_bits ISP_VMEM_ELEMBITS
+#define isp2401_mamoiada_vector_elem_precision ISP_VMEM_ELEM_PRECISION
+#define isp2401_mamoiada_vector_num_elems ISP_VEC_NELEMS
+
+/* register file sizes */
+#define ISP_RF0_SIZE 64
+#define ISP_RF1_SIZE 16
+#define ISP_RF2_SIZE 64
+#define ISP_RF3_SIZE 4
+#define ISP_RF4_SIZE 64
+#define ISP_RF5_SIZE 16
+#define ISP_RF6_SIZE 16
+#define ISP_RF7_SIZE 16
+#define ISP_RF8_SIZE 16
+#define ISP_RF9_SIZE 16
+#define ISP_RF10_SIZE 16
+#define ISP_RF11_SIZE 16
+#define ISP_VRF1_SIZE 32
+#define ISP_VRF2_SIZE 32
+#define ISP_VRF3_SIZE 32
+#define ISP_VRF4_SIZE 32
+#define ISP_VRF5_SIZE 32
+#define ISP_VRF6_SIZE 32
+#define ISP_VRF7_SIZE 32
+#define ISP_VRF8_SIZE 32
+#define ISP_SRF1_SIZE 4
+#define ISP_SRF2_SIZE 64
+#define ISP_SRF3_SIZE 64
+#define ISP_SRF4_SIZE 32
+#define ISP_SRF5_SIZE 64
+#define ISP_FRF0_SIZE 16
+#define ISP_FRF1_SIZE 4
+#define ISP_FRF2_SIZE 16
+#define ISP_FRF3_SIZE 4
+#define ISP_FRF4_SIZE 4
+#define ISP_FRF5_SIZE 8
+#define ISP_FRF6_SIZE 4
+/* register file read latency */
+#define ISP_VRF1_READ_LAT 1
+#define ISP_VRF2_READ_LAT 1
+#define ISP_VRF3_READ_LAT 1
+#define ISP_VRF4_READ_LAT 1
+#define ISP_VRF5_READ_LAT 1
+#define ISP_VRF6_READ_LAT 1
+#define ISP_VRF7_READ_LAT 1
+#define ISP_VRF8_READ_LAT 1
+#define ISP_SRF1_READ_LAT 1
+#define ISP_SRF2_READ_LAT 1
+#define ISP_SRF3_READ_LAT 1
+#define ISP_SRF4_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+#define ISP_SRF5_READ_LAT 1
+/* immediate sizes */
+#define ISP_IS1_IMM_BITS 14
+#define ISP_IS2_IMM_BITS 13
+#define ISP_IS3_IMM_BITS 14
+#define ISP_IS4_IMM_BITS 14
+#define ISP_IS5_IMM_BITS 9
+#define ISP_IS6_IMM_BITS 16
+#define ISP_IS7_IMM_BITS 9
+#define ISP_IS8_IMM_BITS 16
+#define ISP_IS9_IMM_BITS 11
+/* fifo depths */
+#define ISP_IF_FIFO_DEPTH 0
+#define ISP_IF_B_FIFO_DEPTH 0
+#define ISP_DMA_FIFO_DEPTH 0
+#define ISP_OF_FIFO_DEPTH 0
+#define ISP_GDC_FIFO_DEPTH 0
+#define ISP_SCL_FIFO_DEPTH 0
+#define ISP_GPFIFO_FIFO_DEPTH 0
+#define ISP_SP_FIFO_DEPTH 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h
new file mode 100644
index 000000000000..593620721627
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_acquisition_defs.h
@@ -0,0 +1,234 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_acquisition_defs_h
+#define _isp_acquisition_defs_h
+
+#define _ISP_ACQUISITION_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_ACQUISITION_BYTES_PER_ELEM 4
+
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_IRQS 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define MEM2STREAM_FSM_STATE_BITS 2
+#define ACQ_SYNCHRONIZER_FSM_STATE_BITS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+#define NOF_ACQ_REGS 12
+
+// Register id's of MMIO slave accesible registers
+#define ACQ_START_ADDR_REG_ID 0
+#define ACQ_MEM_REGION_SIZE_REG_ID 1
+#define ACQ_NUM_MEM_REGIONS_REG_ID 2
+#define ACQ_INIT_REG_ID 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_ID 4
+#define ACQ_RECEIVED_LONG_PACKETS_REG_ID 5
+#define ACQ_LAST_COMMAND_REG_ID 6
+#define ACQ_NEXT_COMMAND_REG_ID 7
+#define ACQ_LAST_ACKNOWLEDGE_REG_ID 8
+#define ACQ_NEXT_ACKNOWLEDGE_REG_ID 9
+#define ACQ_FSM_STATE_INFO_REG_ID 10
+#define ACQ_INT_CNTR_INFO_REG_ID 11
+
+// Register width
+#define ACQ_START_ADDR_REG_WIDTH 9
+#define ACQ_MEM_REGION_SIZE_REG_WIDTH 9
+#define ACQ_NUM_MEM_REGIONS_REG_WIDTH 9
+#define ACQ_INIT_REG_WIDTH 3
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define ACQ_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define ACQ_LAST_COMMAND_REG_WIDTH 32
+#define ACQ_NEXT_COMMAND_REG_WIDTH 32
+#define ACQ_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define ACQ_FSM_STATE_INFO_REG_WIDTH ((MEM2STREAM_FSM_STATE_BITS * 3) + (ACQ_SYNCHRONIZER_FSM_STATE_BITS *3))
+#define ACQ_INT_CNTR_INFO_REG_WIDTH 32
+
+/* register reset value */
+#define ACQ_START_ADDR_REG_RSTVAL 0
+#define ACQ_MEM_REGION_SIZE_REG_RSTVAL 128
+#define ACQ_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define ACQ_INIT_REG_RSTVAL 0
+#define ACQ_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define ACQ_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define ACQ_LAST_COMMAND_REG_RSTVAL 0
+#define ACQ_NEXT_COMMAND_REG_RSTVAL 0
+#define ACQ_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define ACQ_FSM_STATE_INFO_REG_RSTVAL 0
+#define ACQ_INT_CNTR_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define ACQ_INIT_RST_REG_BIT 0
+#define ACQ_INIT_RESYNC_BIT 2
+#define ACQ_INIT_RST_IDX ACQ_INIT_RST_REG_BIT
+#define ACQ_INIT_RST_BITS 1
+#define ACQ_INIT_RESYNC_IDX ACQ_INIT_RESYNC_BIT
+#define ACQ_INIT_RESYNC_BITS 1
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define ACQ_TOKEN_ID_LSB 0
+#define ACQ_TOKEN_ID_MSB 3
+#define ACQ_TOKEN_WIDTH (ACQ_TOKEN_ID_MSB - ACQ_TOKEN_ID_LSB + 1) // 4
+#define ACQ_TOKEN_ID_IDX 0
+#define ACQ_TOKEN_ID_BITS ACQ_TOKEN_WIDTH
+#define ACQ_INIT_CMD_INIT_IDX 4
+#define ACQ_INIT_CMD_INIT_BITS 3
+#define ACQ_CMD_START_ADDR_IDX 4
+#define ACQ_CMD_START_ADDR_BITS 9
+#define ACQ_CMD_NOFWORDS_IDX 13
+#define ACQ_CMD_NOFWORDS_BITS 9
+#define ACQ_MEM_REGION_ID_IDX 22
+#define ACQ_MEM_REGION_ID_BITS 9
+#define ACQ_PACKET_LENGTH_TOKEN_MSB 21
+#define ACQ_PACKET_LENGTH_TOKEN_LSB 13
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_MSB 9
+#define ACQ_PACKET_DATA_FORMAT_ID_TOKEN_LSB 4
+#define ACQ_PACKET_CH_ID_TOKEN_MSB 11
+#define ACQ_PACKET_CH_ID_TOKEN_LSB 10
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_MSB 12 /* only for capt_end_of_packet_written */
+#define ACQ_PACKET_MEM_REGION_ID_TOKEN_LSB 4 /* only for capt_end_of_packet_written */
+
+
+/* Command tokens IDs */
+#define ACQ_READ_REGION_AUTO_INCR_TOKEN_ID 0 //0000b
+#define ACQ_READ_REGION_TOKEN_ID 1 //0001b
+#define ACQ_READ_REGION_SOP_TOKEN_ID 2 //0010b
+#define ACQ_INIT_TOKEN_ID 8 //1000b
+
+/* Acknowledge token IDs */
+#define ACQ_READ_REGION_ACK_TOKEN_ID 0 //0000b
+#define ACQ_END_OF_PACKET_TOKEN_ID 4 //0100b
+#define ACQ_END_OF_REGION_TOKEN_ID 5 //0101b
+#define ACQ_SOP_MISMATCH_TOKEN_ID 6 //0110b
+#define ACQ_UNDEF_PH_TOKEN_ID 7 //0111b
+
+#define ACQ_TOKEN_MEMREGIONID_MSB 30
+#define ACQ_TOKEN_MEMREGIONID_LSB 22
+#define ACQ_TOKEN_NOFWORDS_MSB 21
+#define ACQ_TOKEN_NOFWORDS_LSB 13
+#define ACQ_TOKEN_STARTADDR_MSB 12
+#define ACQ_TOKEN_STARTADDR_LSB 4
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define WORD_COUNT_WIDTH 16
+#define PKT_CODE_WIDTH 6
+#define CHN_NO_WIDTH 2
+#define ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+#define EOF_CODE 1
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define ACQ_START_OF_FRAME 0
+#define ACQ_END_OF_FRAME 1
+#define ACQ_START_OF_LINE 2
+#define ACQ_END_OF_LINE 3
+#define ACQ_LINE_PAYLOAD 4
+#define ACQ_GEN_SH_PKT 5
+
+
+/* bit definition */
+#define ACQ_PKT_TYPE_IDX 16
+#define ACQ_PKT_TYPE_BITS 6
+#define ACQ_PKT_SOP_IDX 32
+#define ACQ_WORD_CNT_IDX 0
+#define ACQ_WORD_CNT_BITS 16
+#define ACQ_PKT_INFO_IDX 16
+#define ACQ_PKT_INFO_BITS 8
+#define ACQ_HEADER_DATA_IDX 0
+#define ACQ_HEADER_DATA_BITS 16
+#define ACQ_ACK_TOKEN_ID_IDX ACQ_TOKEN_ID_IDX
+#define ACQ_ACK_TOKEN_ID_BITS ACQ_TOKEN_ID_BITS
+#define ACQ_ACK_NOFWORDS_IDX 13
+#define ACQ_ACK_NOFWORDS_BITS 9
+#define ACQ_ACK_PKT_LEN_IDX 4
+#define ACQ_ACK_PKT_LEN_BITS 16
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+
+#define ACQ_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define ACQ_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define ACQ_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define ACQ_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define ACQ_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define ACQ_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define ACQ_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define ACQ_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define ACQ_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define ACQ_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define ACQ_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define ACQ_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define ACQ_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define ACQ_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define ACQ_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define ACQ_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define ACQ_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define ACQ_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define ACQ_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define ACQ_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define ACQ_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define ACQ_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define ACQ_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define ACQ_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define ACQ_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define ACQ_SOF_DATA 0 /* 00 0000 frame start */
+#define ACQ_EOF_DATA 1 /* 00 0001 frame end */
+#define ACQ_SOL_DATA 2 /* 00 0010 line start */
+#define ACQ_EOL_DATA 3 /* 00 0011 line end */
+#define ACQ_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define ACQ_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define ACQ_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define ACQ_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define ACQ_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define ACQ_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define ACQ_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define ACQ_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define ACQ_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define ACQ_RESERVED_DATA_TYPE_MIN 56
+#define ACQ_RESERVED_DATA_TYPE_MAX 63
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define ACQ_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define ACQ_YUV_RESERVED_DATA_TYPE 27
+#define ACQ_RGB_RESERVED_DATA_TYPE_MIN 37
+#define ACQ_RGB_RESERVED_DATA_TYPE_MAX 39
+#define ACQ_RAW_RESERVED_DATA_TYPE_MIN 46
+#define ACQ_RAW_RESERVED_DATA_TYPE_MAX 47
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_acquisition_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h
new file mode 100644
index 000000000000..aa413df022f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/isp_capture_defs.h
@@ -0,0 +1,310 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _isp_capture_defs_h
+#define _isp_capture_defs_h
+
+#define _ISP_CAPTURE_REG_ALIGN 4 /* assuming 32 bit control bus width */
+#define _ISP_CAPTURE_BITS_PER_ELEM 32 /* only for data, not SOP */
+#define _ISP_CAPTURE_BYTES_PER_ELEM (_ISP_CAPTURE_BITS_PER_ELEM/8 )
+#define _ISP_CAPTURE_BYTES_PER_WORD 32 /* 256/8 */
+#define _ISP_CAPTURE_ELEM_PER_WORD _ISP_CAPTURE_BYTES_PER_WORD / _ISP_CAPTURE_BYTES_PER_ELEM
+
+//#define CAPT_RCV_ACK 1
+//#define CAPT_WRT_ACK 2
+//#define CAPT_IRQ_ACK 3
+
+/* --------------------------------------------------*/
+
+#define NOF_IRQS 2
+
+/* --------------------------------------------------*/
+/* REGISTER INFO */
+/* --------------------------------------------------*/
+
+// Number of registers
+#define CAPT_NOF_REGS 16
+
+// Register id's of MMIO slave accesible registers
+#define CAPT_START_MODE_REG_ID 0
+#define CAPT_START_ADDR_REG_ID 1
+#define CAPT_MEM_REGION_SIZE_REG_ID 2
+#define CAPT_NUM_MEM_REGIONS_REG_ID 3
+#define CAPT_INIT_REG_ID 4
+#define CAPT_START_REG_ID 5
+#define CAPT_STOP_REG_ID 6
+
+#define CAPT_PACKET_LENGTH_REG_ID 7
+#define CAPT_RECEIVED_LENGTH_REG_ID 8
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_ID 9
+#define CAPT_RECEIVED_LONG_PACKETS_REG_ID 10
+#define CAPT_LAST_COMMAND_REG_ID 11
+#define CAPT_NEXT_COMMAND_REG_ID 12
+#define CAPT_LAST_ACKNOWLEDGE_REG_ID 13
+#define CAPT_NEXT_ACKNOWLEDGE_REG_ID 14
+#define CAPT_FSM_STATE_INFO_REG_ID 15
+
+// Register width
+#define CAPT_START_MODE_REG_WIDTH 1
+//#define CAPT_START_ADDR_REG_WIDTH 9
+//#define CAPT_MEM_REGION_SIZE_REG_WIDTH 9
+//#define CAPT_NUM_MEM_REGIONS_REG_WIDTH 9
+#define CAPT_INIT_REG_WIDTH (22 + 4)
+
+#define CAPT_START_REG_WIDTH 1
+#define CAPT_STOP_REG_WIDTH 1
+
+/* --------------------------------------------------*/
+/* FSM */
+/* --------------------------------------------------*/
+#define CAPT_WRITE2MEM_FSM_STATE_BITS 2
+#define CAPT_SYNCHRONIZER_FSM_STATE_BITS 3
+
+
+#define CAPT_PACKET_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_LENGTH_REG_WIDTH 17
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_WIDTH 32
+#define CAPT_RECEIVED_LONG_PACKETS_REG_WIDTH 32
+#define CAPT_LAST_COMMAND_REG_WIDTH 32
+/* #define CAPT_NEXT_COMMAND_REG_WIDTH 32 */
+#define CAPT_LAST_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_NEXT_ACKNOWLEDGE_REG_WIDTH 32
+#define CAPT_FSM_STATE_INFO_REG_WIDTH ((CAPT_WRITE2MEM_FSM_STATE_BITS * 3) + (CAPT_SYNCHRONIZER_FSM_STATE_BITS * 3))
+
+//#define CAPT_INIT_RESTART_MEM_ADDR_WIDTH 9
+//#define CAPT_INIT_RESTART_MEM_REGION_WIDTH 9
+
+/* register reset value */
+#define CAPT_START_MODE_REG_RSTVAL 0
+#define CAPT_START_ADDR_REG_RSTVAL 0
+#define CAPT_MEM_REGION_SIZE_REG_RSTVAL 128
+#define CAPT_NUM_MEM_REGIONS_REG_RSTVAL 3
+#define CAPT_INIT_REG_RSTVAL 0
+
+#define CAPT_START_REG_RSTVAL 0
+#define CAPT_STOP_REG_RSTVAL 0
+
+#define CAPT_PACKET_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_LENGTH_REG_RSTVAL 0
+#define CAPT_RECEIVED_SHORT_PACKETS_REG_RSTVAL 0
+#define CAPT_RECEIVED_LONG_PACKETS_REG_RSTVAL 0
+#define CAPT_LAST_COMMAND_REG_RSTVAL 0
+#define CAPT_NEXT_COMMAND_REG_RSTVAL 0
+#define CAPT_LAST_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_NEXT_ACKNOWLEDGE_REG_RSTVAL 0
+#define CAPT_FSM_STATE_INFO_REG_RSTVAL 0
+
+/* bit definitions */
+#define CAPT_INIT_RST_REG_BIT 0
+#define CAPT_INIT_FLUSH_BIT 1
+#define CAPT_INIT_RESYNC_BIT 2
+#define CAPT_INIT_RESTART_BIT 3
+#define CAPT_INIT_RESTART_MEM_ADDR_LSB 4
+#define CAPT_INIT_RESTART_MEM_ADDR_MSB 14
+#define CAPT_INIT_RESTART_MEM_REGION_LSB 15
+#define CAPT_INIT_RESTART_MEM_REGION_MSB 25
+
+
+#define CAPT_INIT_RST_REG_IDX CAPT_INIT_RST_REG_BIT
+#define CAPT_INIT_RST_REG_BITS 1
+#define CAPT_INIT_FLUSH_IDX CAPT_INIT_FLUSH_BIT
+#define CAPT_INIT_FLUSH_BITS 1
+#define CAPT_INIT_RESYNC_IDX CAPT_INIT_RESYNC_BIT
+#define CAPT_INIT_RESYNC_BITS 1
+#define CAPT_INIT_RESTART_IDX CAPT_INIT_RESTART_BIT
+#define CAPT_INIT_RESTART_BITS 1
+#define CAPT_INIT_RESTART_MEM_ADDR_IDX CAPT_INIT_RESTART_MEM_ADDR_LSB
+#define CAPT_INIT_RESTART_MEM_ADDR_BITS (CAPT_INIT_RESTART_MEM_ADDR_MSB - CAPT_INIT_RESTART_MEM_ADDR_LSB + 1)
+#define CAPT_INIT_RESTART_MEM_REGION_IDX CAPT_INIT_RESTART_MEM_REGION_LSB
+#define CAPT_INIT_RESTART_MEM_REGION_BITS (CAPT_INIT_RESTART_MEM_REGION_MSB - CAPT_INIT_RESTART_MEM_REGION_LSB + 1)
+
+
+
+/* --------------------------------------------------*/
+/* TOKEN INFO */
+/* --------------------------------------------------*/
+#define CAPT_TOKEN_ID_LSB 0
+#define CAPT_TOKEN_ID_MSB 3
+#define CAPT_TOKEN_WIDTH (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1) /* 4 */
+
+/* Command tokens IDs */
+#define CAPT_START_TOKEN_ID 0 /* 0000b */
+#define CAPT_STOP_TOKEN_ID 1 /* 0001b */
+#define CAPT_FREEZE_TOKEN_ID 2 /* 0010b */
+#define CAPT_RESUME_TOKEN_ID 3 /* 0011b */
+#define CAPT_INIT_TOKEN_ID 8 /* 1000b */
+
+#define CAPT_START_TOKEN_BIT 0
+#define CAPT_STOP_TOKEN_BIT 0
+#define CAPT_FREEZE_TOKEN_BIT 0
+#define CAPT_RESUME_TOKEN_BIT 0
+#define CAPT_INIT_TOKEN_BIT 0
+
+/* Acknowledge token IDs */
+#define CAPT_END_OF_PACKET_RECEIVED_TOKEN_ID 0 /* 0000b */
+#define CAPT_END_OF_PACKET_WRITTEN_TOKEN_ID 1 /* 0001b */
+#define CAPT_END_OF_REGION_WRITTEN_TOKEN_ID 2 /* 0010b */
+#define CAPT_FLUSH_DONE_TOKEN_ID 3 /* 0011b */
+#define CAPT_PREMATURE_SOP_TOKEN_ID 4 /* 0100b */
+#define CAPT_MISSING_SOP_TOKEN_ID 5 /* 0101b */
+#define CAPT_UNDEF_PH_TOKEN_ID 6 /* 0110b */
+#define CAPT_STOP_ACK_TOKEN_ID 7 /* 0111b */
+
+#define CAPT_PACKET_LENGTH_TOKEN_MSB 19
+#define CAPT_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB 20
+#define CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB 4
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB 25
+#define CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB 20
+#define CAPT_PACKET_CH_ID_TOKEN_MSB 27
+#define CAPT_PACKET_CH_ID_TOKEN_LSB 26
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB 29
+#define CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB 21
+
+/* bit definition */
+#define CAPT_CMD_IDX CAPT_TOKEN_ID_LSB
+#define CAPT_CMD_BITS (CAPT_TOKEN_ID_MSB - CAPT_TOKEN_ID_LSB + 1)
+#define CAPT_SOP_IDX 32
+#define CAPT_SOP_BITS 1
+#define CAPT_PKT_INFO_IDX 16
+#define CAPT_PKT_INFO_BITS 8
+#define CAPT_PKT_TYPE_IDX 0
+#define CAPT_PKT_TYPE_BITS 6
+#define CAPT_HEADER_DATA_IDX 0
+#define CAPT_HEADER_DATA_BITS 16
+#define CAPT_PKT_DATA_IDX 0
+#define CAPT_PKT_DATA_BITS 32
+#define CAPT_WORD_CNT_IDX 0
+#define CAPT_WORD_CNT_BITS 16
+#define CAPT_ACK_TOKEN_ID_IDX 0
+#define CAPT_ACK_TOKEN_ID_BITS 4
+//#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+//#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+//#define CAPT_ACK_PKT_INFO_IDX 20
+//#define CAPT_ACK_PKT_INFO_BITS 8
+//#define CAPT_ACK_MEM_REG_ID1_IDX 20 /* for capt_end_of_packet_written */
+//#define CAPT_ACK_MEM_REG_ID2_IDX 4 /* for capt_end_of_region_written */
+#define CAPT_ACK_PKT_LEN_IDX CAPT_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_PKT_LEN_BITS (CAPT_PACKET_LENGTH_TOKEN_MSB - CAPT_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_SUPER_PKT_LEN_IDX CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB
+#define CAPT_ACK_SUPER_PKT_LEN_BITS (CAPT_SUPER_PACKET_LENGTH_TOKEN_MSB - CAPT_SUPER_PACKET_LENGTH_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_INFO_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_INFO_BITS (CAPT_PACKET_CH_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_MEM_REGION_ID_IDX CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB
+#define CAPT_ACK_MEM_REGION_ID_BITS (CAPT_PACKET_MEM_REGION_ID_TOKEN_MSB - CAPT_PACKET_MEM_REGION_ID_TOKEN_LSB + 1)
+#define CAPT_ACK_PKT_TYPE_IDX CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB
+#define CAPT_ACK_PKT_TYPE_BITS (CAPT_PACKET_DATA_FORMAT_ID_TOKEN_MSB - CAPT_PACKET_DATA_FORMAT_ID_TOKEN_LSB + 1)
+#define CAPT_INIT_TOKEN_INIT_IDX 4
+#define CAPT_INIT_TOKEN_INIT_BITS 22
+
+
+/* --------------------------------------------------*/
+/* MIPI */
+/* --------------------------------------------------*/
+
+#define CAPT_WORD_COUNT_WIDTH 16
+#define CAPT_PKT_CODE_WIDTH 6
+#define CAPT_CHN_NO_WIDTH 2
+#define CAPT_ERROR_INFO_WIDTH 8
+
+#define LONG_PKTCODE_MAX 63
+#define LONG_PKTCODE_MIN 16
+#define SHORT_PKTCODE_MAX 15
+
+
+/* --------------------------------------------------*/
+/* Packet Info */
+/* --------------------------------------------------*/
+#define CAPT_START_OF_FRAME 0
+#define CAPT_END_OF_FRAME 1
+#define CAPT_START_OF_LINE 2
+#define CAPT_END_OF_LINE 3
+#define CAPT_LINE_PAYLOAD 4
+#define CAPT_GEN_SH_PKT 5
+
+
+/* --------------------------------------------------*/
+/* Packet Data Type */
+/* --------------------------------------------------*/
+
+#define CAPT_YUV420_8_DATA 24 /* 01 1000 YUV420 8-bit */
+#define CAPT_YUV420_10_DATA 25 /* 01 1001 YUV420 10-bit */
+#define CAPT_YUV420_8L_DATA 26 /* 01 1010 YUV420 8-bit legacy */
+#define CAPT_YUV422_8_DATA 30 /* 01 1110 YUV422 8-bit */
+#define CAPT_YUV422_10_DATA 31 /* 01 1111 YUV422 10-bit */
+#define CAPT_RGB444_DATA 32 /* 10 0000 RGB444 */
+#define CAPT_RGB555_DATA 33 /* 10 0001 RGB555 */
+#define CAPT_RGB565_DATA 34 /* 10 0010 RGB565 */
+#define CAPT_RGB666_DATA 35 /* 10 0011 RGB666 */
+#define CAPT_RGB888_DATA 36 /* 10 0100 RGB888 */
+#define CAPT_RAW6_DATA 40 /* 10 1000 RAW6 */
+#define CAPT_RAW7_DATA 41 /* 10 1001 RAW7 */
+#define CAPT_RAW8_DATA 42 /* 10 1010 RAW8 */
+#define CAPT_RAW10_DATA 43 /* 10 1011 RAW10 */
+#define CAPT_RAW12_DATA 44 /* 10 1100 RAW12 */
+#define CAPT_RAW14_DATA 45 /* 10 1101 RAW14 */
+#define CAPT_USR_DEF_1_DATA 48 /* 11 0000 JPEG [User Defined 8-bit Data Type 1] */
+#define CAPT_USR_DEF_2_DATA 49 /* 11 0001 User Defined 8-bit Data Type 2 */
+#define CAPT_USR_DEF_3_DATA 50 /* 11 0010 User Defined 8-bit Data Type 3 */
+#define CAPT_USR_DEF_4_DATA 51 /* 11 0011 User Defined 8-bit Data Type 4 */
+#define CAPT_USR_DEF_5_DATA 52 /* 11 0100 User Defined 8-bit Data Type 5 */
+#define CAPT_USR_DEF_6_DATA 53 /* 11 0101 User Defined 8-bit Data Type 6 */
+#define CAPT_USR_DEF_7_DATA 54 /* 11 0110 User Defined 8-bit Data Type 7 */
+#define CAPT_USR_DEF_8_DATA 55 /* 11 0111 User Defined 8-bit Data Type 8 */
+#define CAPT_Emb_DATA 18 /* 01 0010 embedded eight bit non image data */
+#define CAPT_SOF_DATA 0 /* 00 0000 frame start */
+#define CAPT_EOF_DATA 1 /* 00 0001 frame end */
+#define CAPT_SOL_DATA 2 /* 00 0010 line start */
+#define CAPT_EOL_DATA 3 /* 00 0011 line end */
+#define CAPT_GEN_SH1_DATA 8 /* 00 1000 Generic Short Packet Code 1 */
+#define CAPT_GEN_SH2_DATA 9 /* 00 1001 Generic Short Packet Code 2 */
+#define CAPT_GEN_SH3_DATA 10 /* 00 1010 Generic Short Packet Code 3 */
+#define CAPT_GEN_SH4_DATA 11 /* 00 1011 Generic Short Packet Code 4 */
+#define CAPT_GEN_SH5_DATA 12 /* 00 1100 Generic Short Packet Code 5 */
+#define CAPT_GEN_SH6_DATA 13 /* 00 1101 Generic Short Packet Code 6 */
+#define CAPT_GEN_SH7_DATA 14 /* 00 1110 Generic Short Packet Code 7 */
+#define CAPT_GEN_SH8_DATA 15 /* 00 1111 Generic Short Packet Code 8 */
+#define CAPT_YUV420_8_CSPS_DATA 28 /* 01 1100 YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_YUV420_10_CSPS_DATA 29 /* 01 1101 YUV420 10-bit (Chroma Shifted Pixel Sampling) */
+#define CAPT_RESERVED_DATA_TYPE_MIN 56
+#define CAPT_RESERVED_DATA_TYPE_MAX 63
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MIN 19
+#define CAPT_GEN_LONG_RESERVED_DATA_TYPE_MAX 23
+#define CAPT_YUV_RESERVED_DATA_TYPE 27
+#define CAPT_RGB_RESERVED_DATA_TYPE_MIN 37
+#define CAPT_RGB_RESERVED_DATA_TYPE_MAX 39
+#define CAPT_RAW_RESERVED_DATA_TYPE_MIN 46
+#define CAPT_RAW_RESERVED_DATA_TYPE_MAX 47
+
+
+/* --------------------------------------------------*/
+/* Capture Unit State */
+/* --------------------------------------------------*/
+#define CAPT_FREE_RUN 0
+#define CAPT_NO_SYNC 1
+#define CAPT_SYNC_SWP 2
+#define CAPT_SYNC_MWP 3
+#define CAPT_SYNC_WAIT 4
+#define CAPT_FREEZE 5
+#define CAPT_RUN 6
+
+
+/* --------------------------------------------------*/
+
+#endif /* _isp_capture_defs_h */
+
+
+
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h
new file mode 100644
index 000000000000..c038f39ffd25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/mmu_defs.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _mmu_defs_h
+#define _mmu_defs_h
+
+#define _HRT_MMU_INVALIDATE_TLB_REG_IDX 0
+#define _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX 1
+
+#define _HRT_MMU_REG_ALIGN 4
+
+#endif /* _mmu_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h
new file mode 100644
index 000000000000..9b6c2893d950
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/scalar_processor_2400_params.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _scalar_processor_2400_params_h
+#define _scalar_processor_2400_params_h
+
+#include "cell_params.h"
+
+#endif /* _scalar_processor_2400_params_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/sp_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/sp_hrt.h
new file mode 100644
index 000000000000..7ee4deba519a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/sp_hrt.h
@@ -0,0 +1,24 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_hrt_h_
+#define _sp_hrt_h_
+
+#define hrt_sp_dmem(cell) HRT_PROC_TYPE_PROP(cell, _dmem)
+
+#define hrt_sp_dmem_master_port_address(cell) hrt_mem_master_port_address(cell, hrt_sp_dmem(cell))
+
+#endif /* _sp_hrt_h_ */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h
new file mode 100644
index 000000000000..1cb62444cf68
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/str2mem_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ST2MEM_DEFS_H
+#define _ST2MEM_DEFS_H
+
+#define _STR2MEM_CRUN_BIT 0x100000
+#define _STR2MEM_CMD_BITS 0x0F0000
+#define _STR2MEM_COUNT_BITS 0x00FFFF
+
+#define _STR2MEM_BLOCKS_CMD 0xA0000
+#define _STR2MEM_PACKETS_CMD 0xB0000
+#define _STR2MEM_BYTES_CMD 0xC0000
+#define _STR2MEM_BYTES_FROM_PACKET_CMD 0xD0000
+
+#define _STR2MEM_SOFT_RESET_REG_ID 0
+#define _STR2MEM_INPUT_ENDIANNESS_REG_ID 1
+#define _STR2MEM_OUTPUT_ENDIANNESS_REG_ID 2
+#define _STR2MEM_BIT_SWAPPING_REG_ID 3
+#define _STR2MEM_BLOCK_SYNC_LEVEL_REG_ID 4
+#define _STR2MEM_PACKET_SYNC_LEVEL_REG_ID 5
+#define _STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID 6
+#define _STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID 7
+#define _STR2MEM_EN_STAT_UPDATE_ID 8
+
+#define _STR2MEM_REG_ALIGN 4
+
+#endif /* _ST2MEM_DEFS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h
new file mode 100644
index 000000000000..60143b8743a2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/streaming_to_mipi_defs.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _streaming_to_mipi_defs_h
+#define _streaming_to_mipi_defs_h
+
+#define HIVE_STR_TO_MIPI_VALID_A_BIT 0
+#define HIVE_STR_TO_MIPI_VALID_B_BIT 1
+#define HIVE_STR_TO_MIPI_SOL_BIT 2
+#define HIVE_STR_TO_MIPI_EOL_BIT 3
+#define HIVE_STR_TO_MIPI_SOF_BIT 4
+#define HIVE_STR_TO_MIPI_EOF_BIT 5
+#define HIVE_STR_TO_MIPI_CH_ID_LSB 6
+
+#define HIVE_STR_TO_MIPI_DATA_A_LSB (HIVE_STR_TO_MIPI_VALID_B_BIT + 1)
+
+#endif /* _streaming_to_mipi_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h
new file mode 100644
index 000000000000..d2b8972b0d9e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/timed_controller_defs.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _timed_controller_defs_h
+#define _timed_controller_defs_h
+
+#define _HRT_TIMED_CONTROLLER_CMD_REG_IDX 0
+
+#define _HRT_TIMED_CONTROLLER_REG_ALIGN 4
+
+#endif /* _timed_controller_defs_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h
new file mode 100644
index 000000000000..19b19ef484f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/var.h
@@ -0,0 +1,99 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _HRT_VAR_H
+#define _HRT_VAR_H
+
+#include "version.h"
+#include "system_api.h"
+#include "hive_types.h"
+
+#define hrt_int_type_of_char char
+#define hrt_int_type_of_uchar unsigned char
+#define hrt_int_type_of_short short
+#define hrt_int_type_of_ushort unsigned short
+#define hrt_int_type_of_int int
+#define hrt_int_type_of_uint unsigned int
+#define hrt_int_type_of_long long
+#define hrt_int_type_of_ulong unsigned long
+#define hrt_int_type_of_ptr unsigned int
+
+#define hrt_host_type_of_char char
+#define hrt_host_type_of_uchar unsigned char
+#define hrt_host_type_of_short short
+#define hrt_host_type_of_ushort unsigned short
+#define hrt_host_type_of_int int
+#define hrt_host_type_of_uint unsigned int
+#define hrt_host_type_of_long long
+#define hrt_host_type_of_ulong unsigned long
+#define hrt_host_type_of_ptr void*
+
+#define HRT_TYPE_BYTES(cell, type) (HRT_TYPE_BITS(cell, type)/8)
+#define HRT_HOST_TYPE(cell_type) HRTCAT(hrt_host_type_of_, cell_type)
+#define HRT_INT_TYPE(type) HRTCAT(hrt_int_type_of_, type)
+
+#ifdef C_RUN
+
+#ifdef C_RUN_DYNAMIC_LINK_PROGRAMS
+extern void *csim_processor_get_crun_symbol(hive_proc_id p, const char *sym);
+#define _hrt_cell_get_crun_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym))
+#define _hrt_cell_get_crun_indexed_symbol(cell,sym) csim_processor_get_crun_symbol(cell,HRTSTR(sym))
+#else
+#define _hrt_cell_get_crun_symbol(cell,sym) (&sym)
+#define _hrt_cell_get_crun_indexed_symbol(cell,sym) (sym)
+#endif // C_RUN_DYNAMIC_LINK_PROGRAMS
+
+#define hrt_scalar_store(cell, type, var, data) \
+ ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)) = (data))
+#define hrt_scalar_load(cell, type, var) \
+ ((*(HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_symbol(cell,var)))
+
+#define hrt_indexed_store(cell, type, array, index, data) \
+ ((((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index]) = (data))
+#define hrt_indexed_load(cell, type, array, index) \
+ (((HRT_HOST_TYPE(type)*)_hrt_cell_get_crun_indexed_symbol(cell,array))[index])
+
+#else /* C_RUN */
+
+#define hrt_scalar_store(cell, type, var, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_scalar_load(cell, type, var) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,var), \
+ HRTCAT(HIVE_ADDR_,var)))
+
+#define hrt_indexed_store(cell, type, array, index, data) \
+ HRTCAT(hrt_mem_store_,HRT_TYPE_BITS(cell, type))(\
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type)), \
+ (HRT_INT_TYPE(type))(data))
+
+#define hrt_indexed_load(cell, type, array, index) \
+ (HRT_HOST_TYPE(type))(HRTCAT4(_hrt_mem_load_,HRT_PROC_TYPE(cell),_,type) ( \
+ cell, \
+ HRTCAT(HIVE_MEM_,array), \
+ (HRTCAT(HIVE_ADDR_,array))+((index)*HRT_TYPE_BYTES(cell, type))))
+
+#endif /* C_RUN */
+
+#endif /* _HRT_VAR_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h
new file mode 100644
index 000000000000..bbc4948baea9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/version.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef HRT_VERSION_H
+#define HRT_VERSION_H
+#define HRT_VERSION_MAJOR 1
+#define HRT_VERSION_MINOR 4
+#define HRT_VERSION 1_4
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c
new file mode 100644
index 000000000000..09f0780f0c80
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/spmem_dump.c
@@ -0,0 +1,3634 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _sp_map_h_
+#define _sp_map_h_
+
+
+#ifndef _hrt_dummy_use_blob_sp
+#define _hrt_dummy_use_blob_sp()
+#endif
+
+#define _hrt_cell_load_program_sp(proc) _hrt_cell_load_program_embedded(proc, sp)
+
+#ifndef ISP2401
+/* function input_system_acquisition_stop: ADE */
+#else
+/* function input_system_acquisition_stop: AD8 */
+#endif
+
+#ifndef ISP2401
+/* function longjmp: 684E */
+#else
+/* function longjmp: 69C1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SRST_MASK
+#define HIVE_MEM_HIVE_IF_SRST_MASK scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SRST_MASK 0x1C8
+#define HIVE_SIZE_HIVE_IF_SRST_MASK 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SRST_MASK scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SRST_MASK 0x1C8
+#define HIVE_SIZE_sp_HIVE_IF_SRST_MASK 16
+
+#ifndef ISP2401
+/* function tmpmem_init_dmem: 6599 */
+#else
+/* function tmpmem_init_dmem: 66D4 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_receive_ack: 5EDD */
+#else
+/* function ia_css_isys_sp_token_map_receive_ack: 6018 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_addr_B: 3345 */
+#else
+/* function ia_css_dmaproxy_sp_set_addr_B: 3539 */
+
+/* function ia_css_pipe_data_init_tagger_resources: A4F */
+#endif
+
+/* function debug_buffer_set_ddr_addr: DD */
+
+#ifndef ISP2401
+/* function receiver_port_reg_load: AC2 */
+#else
+/* function receiver_port_reg_load: ABC */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_mipi
+#define HIVE_MEM_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_mipi 0x631C
+#else
+#define HIVE_ADDR_vbuf_mipi 0x6378
+#endif
+#define HIVE_SIZE_vbuf_mipi 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_mipi scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_mipi 0x631C
+#else
+#define HIVE_ADDR_sp_vbuf_mipi 0x6378
+#endif
+#define HIVE_SIZE_sp_vbuf_mipi 12
+
+#ifndef ISP2401
+/* function ia_css_event_sp_decode: 3536 */
+#else
+/* function ia_css_event_sp_decode: 372A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_get_size: 48BE */
+#else
+/* function ia_css_queue_get_size: 4B46 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_load: 4EFF */
+#else
+/* function ia_css_queue_load: 515D */
+#endif
+
+#ifndef ISP2401
+/* function setjmp: 6857 */
+#else
+/* function setjmp: 69CA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_isys_event_queue
+#define HIVE_MEM_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x4684
+#else
+#define HIVE_ADDR_sem_for_sp2host_isys_event_queue 0x46CC
+#endif
+#define HIVE_SIZE_sem_for_sp2host_isys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_isys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x4684
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_isys_event_queue 0x46CC
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_isys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6E07 */
+#else
+/* function ia_css_dmaproxy_sp_wait_for_ack: 6F4B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_func: 5124 */
+#else
+/* function ia_css_sp_rawcopy_func: 5382 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_marked: 2A10 */
+#else
+/* function ia_css_tagger_buf_sp_pop_marked: 2BB2 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stage
+#define HIVE_MEM_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stage 0x5C00
+#else
+#define HIVE_ADDR_isp_stage 0x5C60
+#endif
+#define HIVE_SIZE_isp_stage 832
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stage scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stage 0x5C00
+#else
+#define HIVE_ADDR_sp_isp_stage 0x5C60
+#endif
+#define HIVE_SIZE_sp_isp_stage 832
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_raw
+#define HIVE_MEM_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_raw 0x2F4
+#else
+#define HIVE_ADDR_vbuf_raw 0x30C
+#endif
+#define HIVE_SIZE_vbuf_raw 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_raw scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_raw 0x2F4
+#else
+#define HIVE_ADDR_sp_vbuf_raw 0x30C
+#endif
+#define HIVE_SIZE_sp_vbuf_raw 4
+
+#ifndef ISP2401
+/* function ia_css_sp_bin_copy_func: 504B */
+#else
+/* function ia_css_sp_bin_copy_func: 52A9 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_item_store: 4C4D */
+#else
+/* function ia_css_queue_item_store: 4EAB */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_metadata_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AA0
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 0x4AFC
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_metadata_bufs 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_buffer_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4AB4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 0x4B10
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_buffer_bufs 160
+
+/* function sp_start_isp: 45D */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_binary_group
+#define HIVE_MEM_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_binary_group 0x5FF0
+#else
+#define HIVE_ADDR_sp_binary_group 0x6050
+#endif
+#define HIVE_SIZE_sp_binary_group 32
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_binary_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_binary_group 0x5FF0
+#else
+#define HIVE_ADDR_sp_sp_binary_group 0x6050
+#endif
+#define HIVE_SIZE_sp_sp_binary_group 32
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sw_state
+#define HIVE_MEM_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sw_state 0x62AC
+#else
+#define HIVE_ADDR_sp_sw_state 0x6308
+#endif
+#define HIVE_SIZE_sp_sw_state 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sw_state scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sw_state 0x62AC
+#else
+#define HIVE_ADDR_sp_sp_sw_state 0x6308
+#endif
+#define HIVE_SIZE_sp_sp_sw_state 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_main: D5B */
+#else
+/* function ia_css_thread_sp_main: D50 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_internal_buffers: 373C */
+#else
+/* function ia_css_ispctrl_sp_init_internal_buffers: 396B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_psys_event_queue_handle
+#define HIVE_MEM_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4B54
+#else
+#define HIVE_ADDR_sp2host_psys_event_queue_handle 0x4BB0
+#endif
+#define HIVE_SIZE_sp2host_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4B54
+#else
+#define HIVE_ADDR_sp_sp2host_psys_event_queue_handle 0x4BB0
+#endif
+#define HIVE_SIZE_sp_sp2host_psys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp2host_psys_event_queue
+#define HIVE_MEM_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x4698
+#else
+#define HIVE_ADDR_sem_for_sp2host_psys_event_queue 0x46E0
+#endif
+#define HIVE_SIZE_sem_for_sp2host_psys_event_queue 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp2host_psys_event_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x4698
+#else
+#define HIVE_ADDR_sp_sem_for_sp2host_psys_event_queue 0x46E0
+#endif
+#define HIVE_SIZE_sp_sem_for_sp2host_psys_event_queue 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_propagate_frame: 2429 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_stop_copy_preview
+#define HIVE_MEM_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_stop_copy_preview 0x6290
+#define HIVE_SIZE_sp_stop_copy_preview 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_stop_copy_preview scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_stop_copy_preview 0x6290
+#define HIVE_SIZE_sp_sp_stop_copy_preview 4
+#else
+/* function ia_css_tagger_sp_propagate_frame: 2479 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_reg_load: B17 */
+#else
+/* function input_system_reg_load: B11 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_handles
+#define HIVE_MEM_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_handles 0x6328
+#else
+#define HIVE_ADDR_vbuf_handles 0x6384
+#endif
+#define HIVE_SIZE_vbuf_handles 960
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_handles scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_handles 0x6328
+#else
+#define HIVE_ADDR_sp_vbuf_handles 0x6384
+#endif
+#define HIVE_SIZE_sp_vbuf_handles 960
+
+#ifndef ISP2401
+/* function ia_css_queue_store: 4DB3 */
+
+/* function ia_css_sp_flash_register: 2C45 */
+#else
+/* function ia_css_queue_store: 5011 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_dummy_function: 566B */
+#else
+/* function ia_css_sp_flash_register: 2DE7 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_create: 5B50 */
+#else
+/* function ia_css_isys_sp_backend_create: 5C8B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_init: 184C */
+#else
+/* function ia_css_pipeline_sp_init: 1886 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_configure: 2319 */
+#else
+/* function ia_css_tagger_sp_configure: 2369 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_end_binary: 357F */
+#else
+/* function ia_css_ispctrl_sp_end_binary: 3773 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4B60
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 0x4BBC
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_per_frame_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function receiver_port_reg_store: AC9 */
+#else
+/* function receiver_port_reg_store: AC3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_is_pending_mask
+#define HIVE_MEM_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_is_pending_mask 0x5C
+#define HIVE_SIZE_event_is_pending_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_is_pending_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_is_pending_mask 0x5C
+#define HIVE_SIZE_sp_event_is_pending_mask 44
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_frame
+#define HIVE_MEM_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x46AC
+#else
+#define HIVE_ADDR_sp_all_cb_elems_frame 0x46F4
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46AC
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_frame 0x46F4
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_frame 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_isys_event_queue_handle
+#define HIVE_MEM_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4B74
+#else
+#define HIVE_ADDR_sp2host_isys_event_queue_handle 0x4BD0
+#endif
+#define HIVE_SIZE_sp2host_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4B74
+#else
+#define HIVE_ADDR_sp_sp2host_isys_event_queue_handle 0x4BD0
+#endif
+#define HIVE_SIZE_sp_sp2host_isys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_com
+#define HIVE_MEM_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_com 0x4114
+#else
+#define HIVE_ADDR_host_sp_com 0x4134
+#endif
+#define HIVE_SIZE_host_sp_com 220
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_com scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_com 0x4114
+#else
+#define HIVE_ADDR_sp_host_sp_com 0x4134
+#endif
+#define HIVE_SIZE_sp_host_sp_com 220
+
+#ifndef ISP2401
+/* function ia_css_queue_get_free_space: 4A12 */
+#else
+/* function ia_css_queue_get_free_space: 4C70 */
+#endif
+
+#ifndef ISP2401
+/* function exec_image_pipe: 6C4 */
+#else
+/* function exec_image_pipe: 658 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_init_dmem_data
+#define HIVE_MEM_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_init_dmem_data 0x62B0
+#else
+#define HIVE_ADDR_sp_init_dmem_data 0x630C
+#endif
+#define HIVE_SIZE_sp_init_dmem_data 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_init_dmem_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x62B0
+#else
+#define HIVE_ADDR_sp_sp_init_dmem_data 0x630C
+#endif
+#define HIVE_SIZE_sp_sp_init_dmem_data 24
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_start: 592D */
+#else
+/* function ia_css_sp_metadata_start: 5A68 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_init_buffer_queues: 2CB4 */
+#else
+/* function ia_css_bufq_sp_init_buffer_queues: 2E56 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_stop: 182F */
+#else
+/* function ia_css_pipeline_sp_stop: 1869 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_connect_pipes: 2803 */
+#else
+/* function ia_css_tagger_sp_connect_pipes: 2853 */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_wait: 70D */
+#else
+/* function sp_isys_copy_wait: 6A1 */
+#endif
+
+/* function is_isp_debug_buffer_full: 337 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 32C8 */
+#else
+/* function ia_css_dmaproxy_sp_configure_channel_from_info: 34A9 */
+#endif
+
+#ifndef ISP2401
+/* function encode_and_post_timer_event: A30 */
+#else
+/* function encode_and_post_timer_event: 9C4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_per_frame_data
+#define HIVE_MEM_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_per_frame_data 0x41F0
+#else
+#define HIVE_ADDR_sp_per_frame_data 0x4210
+#endif
+#define HIVE_SIZE_sp_per_frame_data 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_per_frame_data scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_per_frame_data 0x41F0
+#else
+#define HIVE_ADDR_sp_sp_per_frame_data 0x4210
+#endif
+#define HIVE_SIZE_sp_sp_per_frame_data 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_dequeue: 62ED */
+#else
+/* function ia_css_rmgr_sp_vbuf_dequeue: 6428 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_psys_event_queue_handle
+#define HIVE_MEM_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4B80
+#else
+#define HIVE_ADDR_host2sp_psys_event_queue_handle 0x4BDC
+#endif
+#define HIVE_SIZE_host2sp_psys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_psys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4B80
+#else
+#define HIVE_ADDR_sp_host2sp_psys_event_queue_handle 0x4BDC
+#endif
+#define HIVE_SIZE_sp_host2sp_psys_event_queue_handle 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_xmem_bin_addr
+#define HIVE_MEM_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_xmem_bin_addr 0x41F4
+#else
+#define HIVE_ADDR_xmem_bin_addr 0x4214
+#endif
+#define HIVE_SIZE_xmem_bin_addr 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_xmem_bin_addr scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_xmem_bin_addr 0x41F4
+#else
+#define HIVE_ADDR_sp_xmem_bin_addr 0x4214
+#endif
+#define HIVE_SIZE_sp_xmem_bin_addr 4
+
+#ifndef ISP2401
+/* function tmr_clock_init: 13FB */
+#else
+/* function tmr_clock_init: 141C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_run: 141C */
+#else
+/* function ia_css_pipeline_sp_run: 143D */
+#endif
+
+#ifndef ISP2401
+/* function memcpy: 68F7 */
+#else
+/* function memcpy: 6A6A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GP_DEVICE_BASE
+#define HIVE_MEM_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_GP_DEVICE_BASE 0x2FC
+#else
+#define HIVE_ADDR_GP_DEVICE_BASE 0x314
+#endif
+#define HIVE_SIZE_GP_DEVICE_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GP_DEVICE_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x2FC
+#else
+#define HIVE_ADDR_sp_GP_DEVICE_BASE 0x314
+#endif
+#define HIVE_SIZE_sp_GP_DEVICE_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_ready_queue
+#define HIVE_MEM_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E0
+#else
+#define HIVE_ADDR_ia_css_thread_sp_ready_queue 0x1E4
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_ready_queue 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_ready_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E0
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_ready_queue 0x1E4
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_ready_queue 12
+
+#ifndef ISP2401
+/* function input_system_reg_store: B1E */
+#else
+/* function input_system_reg_store: B18 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_start: 5D66 */
+#else
+/* function ia_css_isys_sp_frontend_start: 5EA1 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_uds_sp_scale_params: 6600 */
+#else
+/* function ia_css_uds_sp_scale_params: 6773 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_increase_size: E40 */
+#else
+/* function ia_css_circbuf_increase_size: E35 */
+#endif
+
+#ifndef ISP2401
+/* function __divu: 6875 */
+#else
+/* function __divu: 69E8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_get_state: C83 */
+#else
+/* function ia_css_thread_sp_get_state: C78 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_stop
+#define HIVE_MEM_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x46BC
+#else
+#define HIVE_ADDR_sem_for_cont_capt_stop 0x4704
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_stop 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_stop scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x46BC
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_stop 0x4704
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_stop 20
+
+#ifndef ISP2401
+/* function thread_fiber_sp_main: E39 */
+#else
+/* function thread_fiber_sp_main: E2E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_pipe_thread
+#define HIVE_MEM_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pipe_thread 0x4800
+#define HIVE_SIZE_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_isp_pipe_thread 0x4848
+#define HIVE_SIZE_sp_isp_pipe_thread 360
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_pipe_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4800
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 340
+#else
+#define HIVE_ADDR_sp_sp_isp_pipe_thread 0x4848
+#define HIVE_SIZE_sp_sp_isp_pipe_thread 360
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_handle_parameter_sets: 128A */
+#else
+/* function ia_css_parambuf_sp_handle_parameter_sets: 127F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_set_state: 595C */
+#else
+/* function ia_css_spctrl_sp_set_state: 5A97 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_signal: 6AF7 */
+#else
+/* function ia_css_thread_sem_sp_signal: 6C6C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_IRQ_BASE
+#define HIVE_MEM_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_IRQ_BASE 0x2C
+#define HIVE_SIZE_IRQ_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_IRQ_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_IRQ_BASE 0x2C
+#define HIVE_SIZE_sp_IRQ_BASE 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_TIMED_CTRL_BASE
+#define HIVE_MEM_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_TIMED_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_TIMED_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_TIMED_CTRL_BASE 0x40
+#define HIVE_SIZE_sp_TIMED_CTRL_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_isr: 6FDC */
+
+/* function ia_css_isys_sp_generate_exp_id: 60FE */
+#else
+/* function ia_css_isys_sp_isr: 7139 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_init: 61E8 */
+#else
+/* function ia_css_isys_sp_generate_exp_id: 6239 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_init: 6BC8 */
+#else
+/* function ia_css_rmgr_sp_init: 6323 */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x308
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x308
+#define HIVE_SIZE_sp_is_isp_requested 4
+#else
+/* function ia_css_thread_sem_sp_init: 6D3B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_frame
+#define HIVE_MEM_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x46D0
+#else
+#define HIVE_ADDR_sem_for_reading_cb_frame 0x4718
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_frame 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x46D0
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_frame 0x4718
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_frame 40
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_execute: 3230 */
+#else
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_is_isp_requested
+#define HIVE_MEM_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_is_isp_requested 0x320
+#define HIVE_SIZE_is_isp_requested 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_is_isp_requested scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_is_isp_requested 0x320
+#define HIVE_SIZE_sp_is_isp_requested 4
+
+/* function ia_css_dmaproxy_sp_execute: 340F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_is_empty: 48F9 */
+#else
+/* function ia_css_queue_is_empty: 7098 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_has_stopped: 1825 */
+#else
+/* function ia_css_pipeline_sp_has_stopped: 185F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_extract: F44 */
+#else
+/* function ia_css_circbuf_extract: F39 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 2B26 */
+#else
+/* function ia_css_tagger_buf_sp_is_locked_from_start: 2CC8 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_sp_thread
+#define HIVE_MEM_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_current_sp_thread 0x1DC
+#define HIVE_SIZE_current_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_sp_thread scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_current_sp_thread 0x1DC
+#define HIVE_SIZE_sp_current_sp_thread 4
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_spid: 5963 */
+#else
+/* function ia_css_spctrl_sp_get_spid: 5A9E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_reset_buffers: 2D3B */
+#else
+/* function ia_css_bufq_sp_reset_buffers: 2EDD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr: 6E35 */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr: 6F79 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_uninit: 61E1 */
+#else
+/* function ia_css_rmgr_sp_uninit: 631C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack
+#define HIVE_MEM_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_threads_stack 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack 0x164
+#define HIVE_SIZE_sp_sp_threads_stack 28
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek: F26 */
+#else
+/* function ia_css_circbuf_peek: F1B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_wait_for_in_param: 1053 */
+#else
+/* function ia_css_parambuf_sp_wait_for_in_param: 1048 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_get_exp_id: 5FC6 */
+#else
+/* function ia_css_isys_sp_token_map_get_exp_id: 6101 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cb_elems_param
+#define HIVE_MEM_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cb_elems_param 0x46F8
+#else
+#define HIVE_ADDR_sp_all_cb_elems_param 0x4740
+#endif
+#define HIVE_SIZE_sp_all_cb_elems_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cb_elems_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x46F8
+#else
+#define HIVE_ADDR_sp_sp_all_cb_elems_param 0x4740
+#endif
+#define HIVE_SIZE_sp_sp_all_cb_elems_param 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_pipeline_sp_curr_binary_id
+#define HIVE_MEM_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1EC
+#else
+#define HIVE_ADDR_pipeline_sp_curr_binary_id 0x1F0
+#endif
+#define HIVE_SIZE_pipeline_sp_curr_binary_id 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_pipeline_sp_curr_binary_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1EC
+#else
+#define HIVE_ADDR_sp_pipeline_sp_curr_binary_id 0x1F0
+#endif
+#define HIVE_SIZE_sp_pipeline_sp_curr_binary_id 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame_desc
+#define HIVE_MEM_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4708
+#else
+#define HIVE_ADDR_sp_all_cbs_frame_desc 0x4750
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4708
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame_desc 0x4750
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame_desc 8
+
+#ifndef ISP2401
+/* function sp_isys_copy_func_v2: 706 */
+#else
+/* function sp_isys_copy_func_v2: 69A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_cb_param
+#define HIVE_MEM_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_cb_param 0x4710
+#else
+#define HIVE_ADDR_sem_for_reading_cb_param 0x4758
+#endif
+#define HIVE_SIZE_sem_for_reading_cb_param 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4710
+#else
+#define HIVE_ADDR_sp_sem_for_reading_cb_param 0x4758
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_cb_param 40
+
+#ifndef ISP2401
+/* function ia_css_queue_get_used_space: 49C6 */
+#else
+/* function ia_css_queue_get_used_space: 4C24 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_cont_capt_start
+#define HIVE_MEM_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_cont_capt_start 0x4738
+#else
+#define HIVE_ADDR_sem_for_cont_capt_start 0x4780
+#endif
+#define HIVE_SIZE_sem_for_cont_capt_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_cont_capt_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4738
+#else
+#define HIVE_ADDR_sp_sem_for_cont_capt_start 0x4780
+#endif
+#define HIVE_SIZE_sp_sem_for_cont_capt_start 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tmp_heap
+#define HIVE_MEM_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tmp_heap 0x6010
+#else
+#define HIVE_ADDR_tmp_heap 0x6070
+#endif
+#define HIVE_SIZE_tmp_heap 640
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tmp_heap scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tmp_heap 0x6010
+#else
+#define HIVE_ADDR_sp_tmp_heap 0x6070
+#endif
+#define HIVE_SIZE_sp_tmp_heap 640
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_get_num_vbuf: 64F1 */
+#else
+/* function ia_css_rmgr_sp_get_num_vbuf: 662C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 3F62 */
+#else
+/* function ia_css_ispctrl_sp_output_compute_dma_info: 41A5 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_lock_exp_id: 20E6 */
+#else
+/* function ia_css_tagger_sp_lock_exp_id: 2136 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_s3a_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4B8C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 0x4BE8
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_s3a_bufs 60
+
+#ifndef ISP2401
+/* function ia_css_queue_is_full: 4A5D */
+#else
+/* function ia_css_queue_is_full: 4CBB */
+#endif
+
+/* function debug_buffer_init_isp: E4 */
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_uninit: 5D20 */
+#else
+/* function ia_css_isys_sp_frontend_uninit: 5E5B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_exp_id_is_locked: 201C */
+#else
+/* function ia_css_tagger_sp_exp_id_is_locked: 206C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem
+#define HIVE_MEM_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x66E8
+#else
+#define HIVE_ADDR_ia_css_rmgr_sp_mipi_frame_sem 0x6744
+#endif
+#define HIVE_SIZE_ia_css_rmgr_sp_mipi_frame_sem 60
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rmgr_sp_mipi_frame_sem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x66E8
+#else
+#define HIVE_ADDR_sp_ia_css_rmgr_sp_mipi_frame_sem 0x6744
+#endif
+#define HIVE_SIZE_sp_ia_css_rmgr_sp_mipi_frame_sem 60
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_dump: 62C8 */
+#else
+/* function ia_css_rmgr_sp_refcount_dump: 6403 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4BC8
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 0x4C24
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_isp_parameters_id 20
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_pipe_threads
+#define HIVE_MEM_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_pipe_threads 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_pipe_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_pipe_threads 0x150
+#define HIVE_SIZE_sp_sp_pipe_threads 20
+
+#ifndef ISP2401
+/* function sp_event_proxy_func: 71B */
+#else
+/* function sp_event_proxy_func: 6AF */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_isys_event_queue_handle
+#define HIVE_MEM_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4BDC
+#else
+#define HIVE_ADDR_host2sp_isys_event_queue_handle 0x4C38
+#endif
+#define HIVE_SIZE_host2sp_isys_event_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_isys_event_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4BDC
+#else
+#define HIVE_ADDR_sp_host2sp_isys_event_queue_handle 0x4C38
+#endif
+#define HIVE_SIZE_sp_host2sp_isys_event_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_yield: 6A70 */
+#else
+/* function ia_css_thread_sp_yield: 6BEA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param_desc
+#define HIVE_MEM_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x474C
+#else
+#define HIVE_ADDR_sp_all_cbs_param_desc 0x4794
+#endif
+#define HIVE_SIZE_sp_all_cbs_param_desc 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param_desc scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x474C
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param_desc 0x4794
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param_desc 8
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb
+#define HIVE_MEM_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_sp_invalidate_tlb 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_sp_invalidate_tlb scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5BF4
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_sp_invalidate_tlb 0x5C50
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_sp_invalidate_tlb 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_fork: D10 */
+#else
+/* function ia_css_thread_sp_fork: D05 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_destroy: 280D */
+#else
+/* function ia_css_tagger_sp_destroy: 285D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_read: 31D0 */
+#else
+/* function ia_css_dmaproxy_sp_vmem_read: 33AF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ifmtr_sp_init: 614F */
+#else
+/* function ia_css_ifmtr_sp_init: 628A */
+#endif
+
+#ifndef ISP2401
+/* function initialize_sp_group: 6D4 */
+#else
+/* function initialize_sp_group: 668 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_peek: 2932 */
+#else
+/* function ia_css_tagger_buf_sp_peek: 2AD4 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_init: D3C */
+#else
+/* function ia_css_thread_sp_init: D31 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_reset_exp_id: 60F6 */
+#else
+/* function ia_css_isys_sp_reset_exp_id: 6231 */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_update_fps: 65F0 */
+#else
+/* function qos_scheduler_update_fps: 6763 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 4637 */
+#else
+/* function ia_css_ispctrl_sp_set_stream_base_addr: 4892 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_DMEM_BASE
+#define HIVE_MEM_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_ISP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_DMEM_BASE 0x10
+#define HIVE_SIZE_sp_ISP_DMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_DMEM_BASE
+#define HIVE_MEM_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_SP_DMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_DMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_DMEM_BASE 0x4
+#define HIVE_SIZE_sp_SP_DMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read: 3246 */
+#else
+/* function __ia_css_queue_is_empty_text: 4B81 */
+
+/* function ia_css_dmaproxy_sp_read: 3425 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_raw_copy_line_count
+#define HIVE_MEM_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_raw_copy_line_count 0x2C8
+#else
+#define HIVE_ADDR_raw_copy_line_count 0x2E0
+#endif
+#define HIVE_SIZE_raw_copy_line_count 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_raw_copy_line_count scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_raw_copy_line_count 0x2C8
+#else
+#define HIVE_ADDR_sp_raw_copy_line_count 0x2E0
+#endif
+#define HIVE_SIZE_sp_raw_copy_line_count 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_tag_cmd_queue_handle
+#define HIVE_MEM_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4BE8
+#else
+#define HIVE_ADDR_host2sp_tag_cmd_queue_handle 0x4C44
+#endif
+#define HIVE_SIZE_host2sp_tag_cmd_queue_handle 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_tag_cmd_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4BE8
+#else
+#define HIVE_ADDR_sp_host2sp_tag_cmd_queue_handle 0x4C44
+#endif
+#define HIVE_SIZE_sp_host2sp_tag_cmd_queue_handle 12
+
+#ifndef ISP2401
+/* function ia_css_queue_peek: 493C */
+#else
+/* function ia_css_queue_peek: 4B9A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_frame_cnt
+#define HIVE_MEM_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4A94
+#else
+#define HIVE_ADDR_ia_css_flash_sp_frame_cnt 0x4AF0
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_frame_cnt 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_frame_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4A94
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_frame_cnt 0x4AF0
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_frame_cnt 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_can_send_token_mask
+#define HIVE_MEM_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_event_can_send_token_mask 0x88
+#define HIVE_SIZE_event_can_send_token_mask 44
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_can_send_token_mask scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_event_can_send_token_mask 0x88
+#define HIVE_SIZE_sp_event_can_send_token_mask 44
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_thread
+#define HIVE_MEM_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_thread 0x5F40
+#else
+#define HIVE_ADDR_isp_thread 0x5FA0
+#endif
+#define HIVE_SIZE_isp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_thread 0x5F40
+#else
+#define HIVE_ADDR_sp_isp_thread 0x5FA0
+#endif
+#define HIVE_SIZE_sp_isp_thread 4
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event_non_blocking: A78 */
+#else
+/* function encode_and_post_sp_event_non_blocking: A0C */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_destroy: 5DF8 */
+#else
+/* function ia_css_isys_sp_frontend_destroy: 5F33 */
+#endif
+
+/* function is_ddr_debug_buffer_full: 2CC */
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_stop: 5D38 */
+#else
+/* function ia_css_isys_sp_frontend_stop: 5E73 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_init: 6094 */
+#else
+/* function ia_css_isys_sp_token_map_init: 61CF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2982 */
+#else
+/* function ia_css_tagger_buf_sp_get_oldest_marked_offset: 2B24 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_fiber
+#define HIVE_MEM_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_fiber 0x19C
+#define HIVE_SIZE_sp_threads_fiber 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_fiber scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_fiber 0x19C
+#define HIVE_SIZE_sp_sp_threads_fiber 28
+
+#ifndef ISP2401
+/* function encode_and_post_sp_event: A01 */
+#else
+/* function encode_and_post_sp_event: 995 */
+#endif
+
+/* function debug_enqueue_ddr: EE */
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 6283 */
+#else
+/* function ia_css_rmgr_sp_refcount_init_vbuf: 63BE */
+#endif
+
+#ifndef ISP2401
+/* function dmaproxy_sp_read_write: 6EE4 */
+#else
+/* function dmaproxy_sp_read_write: 7017 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer
+#define HIVE_MEM_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8
+#else
+#define HIVE_ADDR_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54
+#endif
+#define HIVE_SIZE_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_dmaproxy_isp_dma_cmd_buffer scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5BF8
+#else
+#define HIVE_ADDR_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 0x5C54
+#endif
+#define HIVE_SIZE_sp_ia_css_dmaproxy_isp_dma_cmd_buffer 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host2sp_buffer_queue_handle
+#define HIVE_MEM_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4BF4
+#else
+#define HIVE_ADDR_host2sp_buffer_queue_handle 0x4C50
+#endif
+#define HIVE_SIZE_host2sp_buffer_queue_handle 480
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host2sp_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4BF4
+#else
+#define HIVE_ADDR_sp_host2sp_buffer_queue_handle 0x4C50
+#endif
+#define HIVE_SIZE_sp_host2sp_buffer_queue_handle 480
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_service
+#define HIVE_MEM_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3178
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_service 0x3198
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_service 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_service scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3178
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_service 0x3198
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_service 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_process: 6BF0 */
+#else
+/* function ia_css_dmaproxy_sp_process: 6D63 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_mark_from_end: 2C0A */
+#else
+/* function ia_css_tagger_buf_sp_mark_from_end: 2DAC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5A05 */
+#else
+/* function ia_css_isys_sp_backend_rcv_acquire_ack: 5B40 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_pre_acquire_request: 5A1B */
+#else
+/* function ia_css_isys_sp_backend_pre_acquire_request: 5B56 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_cs: 366C */
+#else
+/* function ia_css_ispctrl_sp_init_cs: 386E */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_init: 5971 */
+#else
+/* function ia_css_spctrl_sp_init: 5AAC */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_init: 730 */
+#else
+/* function sp_event_proxy_init: 6C4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4DD4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 0x4E30
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_previous_clock_tick 40
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_output
+#define HIVE_MEM_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_output 0x41F8
+#else
+#define HIVE_ADDR_sp_output 0x4218
+#endif
+#define HIVE_SIZE_sp_output 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_output scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_output 0x41F8
+#else
+#define HIVE_ADDR_sp_sp_output 0x4218
+#endif
+#define HIVE_SIZE_sp_sp_output 16
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4DFC
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 0x4E58
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_host2sp_buf_queues 800
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_CTRL_BASE
+#define HIVE_MEM_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_ISP_CTRL_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_CTRL_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_CTRL_BASE 0x8
+#define HIVE_SIZE_sp_ISP_CTRL_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_INPUT_FORMATTER_BASE
+#define HIVE_MEM_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_INPUT_FORMATTER_BASE 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_INPUT_FORMATTER_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_INPUT_FORMATTER_BASE 0x4C
+#define HIVE_SIZE_sp_INPUT_FORMATTER_BASE 16
+
+#ifndef ISP2401
+/* function sp_dma_proxy_reset_channels: 34A0 */
+#else
+/* function sp_dma_proxy_reset_channels: 3694 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_acquire: 5B26 */
+#else
+/* function ia_css_isys_sp_backend_acquire: 5C61 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_update_size: 2901 */
+#else
+/* function ia_css_tagger_sp_update_size: 2AA3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_host_sp_queue
+#define HIVE_MEM_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x511C
+#else
+#define HIVE_ADDR_ia_css_bufq_host_sp_queue 0x5178
+#endif
+#define HIVE_SIZE_ia_css_bufq_host_sp_queue 2008
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_host_sp_queue scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x511C
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_host_sp_queue 0x5178
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_host_sp_queue 2008
+
+#ifndef ISP2401
+/* function thread_fiber_sp_create: DA8 */
+#else
+/* function thread_fiber_sp_create: D9D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_increments: 3332 */
+#else
+/* function ia_css_dmaproxy_sp_set_increments: 3526 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_frame
+#define HIVE_MEM_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x4754
+#else
+#define HIVE_ADDR_sem_for_writing_cb_frame 0x479C
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_frame 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x4754
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_frame 0x479C
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_frame 20
+
+#ifndef ISP2401
+/* function receiver_reg_store: AD7 */
+#else
+/* function receiver_reg_store: AD1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_writing_cb_param
+#define HIVE_MEM_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_writing_cb_param 0x4768
+#else
+#define HIVE_ADDR_sem_for_writing_cb_param 0x47B0
+#endif
+#define HIVE_SIZE_sem_for_writing_cb_param 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_writing_cb_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x4768
+#else
+#define HIVE_ADDR_sp_sem_for_writing_cb_param 0x47B0
+#endif
+#define HIVE_SIZE_sp_sem_for_writing_cb_param 20
+
+/* function sp_start_isp_entry: 453 */
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifdef HIVE_ADDR_sp_start_isp_entry
+#endif
+#define HIVE_ADDR_sp_start_isp_entry 0x453
+#endif
+#define HIVE_ADDR_sp_sp_start_isp_entry 0x453
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_all: 2B8E */
+#else
+/* function ia_css_tagger_buf_sp_unmark_all: 2D30 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unmark_from_start: 2BCF */
+#else
+/* function ia_css_tagger_buf_sp_unmark_from_start: 2D71 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_acquire: 34CC */
+#else
+/* function ia_css_dmaproxy_sp_channel_acquire: 36C0 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_add_num_vbuf: 64CD */
+#else
+/* function ia_css_rmgr_sp_add_num_vbuf: 6608 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_create: 60DD */
+#else
+/* function ia_css_isys_sp_token_map_create: 6218 */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 319C */
+#else
+/* function __ia_css_dmaproxy_sp_wait_for_ack_text: 337B */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_acquire_buf_elem: 1FF4 */
+#else
+/* function ia_css_tagger_sp_acquire_buf_elem: 2044 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_is_dynamic_buffer: 3085 */
+#else
+/* function ia_css_bufq_sp_is_dynamic_buffer: 3227 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_group
+#define HIVE_MEM_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_group 0x4208
+#define HIVE_SIZE_sp_group 1144
+#else
+#define HIVE_ADDR_sp_group 0x4228
+#define HIVE_SIZE_sp_group 1184
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_group 0x4208
+#define HIVE_SIZE_sp_sp_group 1144
+#else
+#define HIVE_ADDR_sp_sp_group 0x4228
+#define HIVE_SIZE_sp_sp_group 1184
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_event_proxy_thread
+#define HIVE_MEM_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_proxy_thread 0x4954
+#define HIVE_SIZE_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_event_proxy_thread 0x49B0
+#define HIVE_SIZE_sp_event_proxy_thread 72
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_event_proxy_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x4954
+#define HIVE_SIZE_sp_sp_event_proxy_thread 68
+#else
+#define HIVE_ADDR_sp_sp_event_proxy_thread 0x49B0
+#define HIVE_SIZE_sp_sp_event_proxy_thread 72
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_kill: CD6 */
+#else
+/* function ia_css_thread_sp_kill: CCB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_create: 28BB */
+#else
+/* function ia_css_tagger_sp_create: 2A51 */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_acquire_dmem: 657A */
+#else
+/* function tmpmem_acquire_dmem: 66B5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_MMU_BASE
+#define HIVE_MEM_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_MMU_BASE 0x24
+#define HIVE_SIZE_MMU_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_MMU_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_MMU_BASE 0x24
+#define HIVE_SIZE_sp_MMU_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_channel_release: 34B8 */
+#else
+/* function ia_css_dmaproxy_sp_channel_release: 36AC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_is_idle: 3498 */
+#else
+/* function ia_css_dmaproxy_sp_is_idle: 368C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_qos_start
+#define HIVE_MEM_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_qos_start 0x477C
+#else
+#define HIVE_ADDR_sem_for_qos_start 0x47C4
+#endif
+#define HIVE_SIZE_sem_for_qos_start 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_qos_start scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_qos_start 0x477C
+#else
+#define HIVE_ADDR_sp_sem_for_qos_start 0x47C4
+#endif
+#define HIVE_SIZE_sp_sem_for_qos_start 20
+
+#ifndef ISP2401
+/* function isp_hmem_load: B55 */
+#else
+/* function isp_hmem_load: B4F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_release_buf_elem: 1FD0 */
+#else
+/* function ia_css_tagger_sp_release_buf_elem: 2020 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_send: 350E */
+#else
+/* function ia_css_eventq_sp_send: 3702 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_isys_sp_error_cnt
+#define HIVE_MEM_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x62D4
+#else
+#define HIVE_ADDR_ia_css_isys_sp_error_cnt 0x6330
+#endif
+#define HIVE_SIZE_ia_css_isys_sp_error_cnt 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_isys_sp_error_cnt scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x62D4
+#else
+#define HIVE_ADDR_sp_ia_css_isys_sp_error_cnt 0x6330
+#endif
+#define HIVE_SIZE_sp_ia_css_isys_sp_error_cnt 16
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_unlock_from_start: 2ABE */
+#else
+/* function ia_css_tagger_buf_sp_unlock_from_start: 2C60 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_debug_buffer_ddr_address
+#define HIVE_MEM_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_debug_buffer_ddr_address 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_debug_buffer_ddr_address scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_debug_buffer_ddr_address 0xBC
+#define HIVE_SIZE_sp_debug_buffer_ddr_address 4
+
+#ifndef ISP2401
+/* function sp_isys_copy_request: 714 */
+#else
+/* function sp_isys_copy_request: 6A8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 635D */
+#else
+/* function ia_css_rmgr_sp_refcount_retain_vbuf: 6498 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_set_priority: CCE */
+#else
+/* function ia_css_thread_sp_set_priority: CC3 */
+#endif
+
+#ifndef ISP2401
+/* function sizeof_hmem: BFC */
+#else
+/* function sizeof_hmem: BF6 */
+#endif
+
+#ifndef ISP2401
+/* function tmpmem_release_dmem: 6569 */
+#else
+/* function tmpmem_release_dmem: 66A4 */
+#endif
+
+/* function cnd_input_system_cfg: 392 */
+
+#ifndef ISP2401
+/* function __ia_css_sp_rawcopy_func_critical: 6F65 */
+#else
+/* function __ia_css_sp_rawcopy_func_critical: 70C2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_width_exception: 331D */
+#else
+/* function __ia_css_dmaproxy_sp_process_text: 331F */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_assert: 8B1 */
+#else
+/* function ia_css_dmaproxy_sp_set_width_exception: 3511 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_flash_sp_init_internal_params: 2CA9 */
+#else
+/* function sp_event_assert: 845 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 29C4 */
+#else
+/* function ia_css_flash_sp_init_internal_params: 2E4B */
+#endif
+
+#ifndef ISP2401
+/* function __modu: 68BB */
+#else
+/* function ia_css_tagger_buf_sp_pop_unmarked_and_unlocked: 2B66 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_isp_vector: 31A2 */
+#else
+/* function __modu: 6A2E */
+
+/* function ia_css_dmaproxy_sp_init_isp_vector: 3381 */
+#endif
+
+/* function isp_vamem_store: 0 */
+
+#ifdef ISP2401
+/* function ia_css_tagger_sp_set_copy_pipe: 2A48 */
+
+#endif
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GDC_BASE
+#define HIVE_MEM_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GDC_BASE 0x44
+#define HIVE_SIZE_GDC_BASE 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GDC_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GDC_BASE 0x44
+#define HIVE_SIZE_sp_GDC_BASE 8
+
+#ifndef ISP2401
+/* function ia_css_queue_local_init: 4C27 */
+#else
+/* function ia_css_queue_local_init: 4E85 */
+#endif
+
+#ifndef ISP2401
+/* function sp_event_proxy_callout_func: 6988 */
+#else
+/* function sp_event_proxy_callout_func: 6AFB */
+#endif
+
+#ifndef ISP2401
+/* function qos_scheduler_schedule_stage: 65C1 */
+#else
+/* function qos_scheduler_schedule_stage: 670F */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_thread_sp_num_ready_threads
+#define HIVE_MEM_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x49E0
+#else
+#define HIVE_ADDR_ia_css_thread_sp_num_ready_threads 0x4A40
+#endif
+#define HIVE_SIZE_ia_css_thread_sp_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_thread_sp_num_ready_threads scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x49E0
+#else
+#define HIVE_ADDR_sp_ia_css_thread_sp_num_ready_threads 0x4A40
+#endif
+#define HIVE_SIZE_sp_ia_css_thread_sp_num_ready_threads 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_threads_stack_size
+#define HIVE_MEM_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_threads_stack_size 0x180
+#define HIVE_SIZE_sp_threads_stack_size 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_threads_stack_size scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_threads_stack_size 0x180
+#define HIVE_SIZE_sp_sp_threads_stack_size 28
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 3F48 */
+#else
+/* function ia_css_ispctrl_sp_isp_done_row_striping: 418B */
+#endif
+
+#ifndef ISP2401
+/* function __ia_css_isys_sp_isr_text: 5E22 */
+#else
+/* function __ia_css_isys_sp_isr_text: 5F5D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_dequeue: 4AA5 */
+#else
+/* function ia_css_queue_dequeue: 4D03 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_configure_channel: 6E4C */
+#else
+/* function is_qos_standalone_mode: 66EA */
+
+/* function ia_css_dmaproxy_sp_configure_channel: 6F90 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_current_thread_fiber_sp
+#define HIVE_MEM_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_current_thread_fiber_sp 0x49E8
+#else
+#define HIVE_ADDR_current_thread_fiber_sp 0x4A44
+#endif
+#define HIVE_SIZE_current_thread_fiber_sp 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_current_thread_fiber_sp scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x49E8
+#else
+#define HIVE_ADDR_sp_current_thread_fiber_sp 0x4A44
+#endif
+#define HIVE_SIZE_sp_current_thread_fiber_sp 4
+
+#ifndef ISP2401
+/* function ia_css_circbuf_pop: FD8 */
+#else
+/* function ia_css_circbuf_pop: FCD */
+#endif
+
+#ifndef ISP2401
+/* function memset: 693A */
+#else
+/* function memset: 6AAD */
+#endif
+
+/* function irq_raise_set_token: B6 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_GPIO_BASE
+#define HIVE_MEM_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_GPIO_BASE 0x3C
+#define HIVE_SIZE_GPIO_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_GPIO_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_GPIO_BASE 0x3C
+#define HIVE_SIZE_sp_GPIO_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_pipeline_acc_stage_enable: 17F0 */
+#else
+/* function ia_css_pipeline_acc_stage_enable: 1818 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_unlock_exp_id: 2041 */
+#else
+/* function ia_css_tagger_sp_unlock_exp_id: 2091 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_ph
+#define HIVE_MEM_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_ph 0x62E4
+#else
+#define HIVE_ADDR_isp_ph 0x6340
+#endif
+#define HIVE_SIZE_isp_ph 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_ph scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_ph 0x62E4
+#else
+#define HIVE_ADDR_sp_isp_ph 0x6340
+#endif
+#define HIVE_SIZE_sp_isp_ph 28
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_flush: 6022 */
+#else
+/* function ia_css_isys_sp_token_map_flush: 615D */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_ds: 37CB */
+#else
+/* function ia_css_ispctrl_sp_init_ds: 39FA */
+#endif
+
+#ifndef ISP2401
+/* function get_xmem_base_addr_raw: 3B78 */
+#else
+/* function get_xmem_base_addr_raw: 3DB3 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_param
+#define HIVE_MEM_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_param 0x4790
+#else
+#define HIVE_ADDR_sp_all_cbs_param 0x47D8
+#endif
+#define HIVE_SIZE_sp_all_cbs_param 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_param scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x4790
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_param 0x47D8
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_param 16
+
+#ifndef ISP2401
+/* function ia_css_circbuf_create: 1026 */
+#else
+/* function ia_css_circbuf_create: 101B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_sp_group
+#define HIVE_MEM_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_sp_group 0x47A0
+#else
+#define HIVE_ADDR_sem_for_sp_group 0x47E8
+#endif
+#define HIVE_SIZE_sem_for_sp_group 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_sp_group scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_sp_group 0x47A0
+#else
+#define HIVE_ADDR_sp_sem_for_sp_group 0x47E8
+#endif
+#define HIVE_SIZE_sp_sem_for_sp_group 20
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_wait_for_in_frame: 64F8 */
+#else
+/* function __ia_css_dmaproxy_sp_configure_channel_text: 34F0 */
+
+/* function ia_css_framebuf_sp_wait_for_in_frame: 6633 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_tag_frame: 5588 */
+#else
+/* function ia_css_sp_rawcopy_tag_frame: 57C9 */
+#endif
+
+#ifndef ISP2401
+/* function isp_hmem_clear: B25 */
+#else
+/* function isp_hmem_clear: B1F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_framebuf_sp_release_in_frame: 653B */
+#else
+/* function ia_css_framebuf_sp_release_in_frame: 6676 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_snd_acquire_request: 5A78 */
+#else
+/* function ia_css_isys_sp_backend_snd_acquire_request: 5BB3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_is_full: 5EA9 */
+#else
+/* function ia_css_isys_sp_token_map_is_full: 5FE4 */
+#endif
+
+#ifndef ISP2401
+/* function input_system_acquisition_run: AF9 */
+#else
+/* function input_system_acquisition_run: AF3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_start_binary: 364A */
+#else
+/* function ia_css_ispctrl_sp_start_binary: 384C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs
+#define HIVE_MEM_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x58F4
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 0x5950
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_h_pipe_private_ddr_ptrs 20
+
+#ifndef ISP2401
+/* function ia_css_eventq_sp_recv: 34E0 */
+#else
+/* function ia_css_eventq_sp_recv: 36D4 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_pool
+#define HIVE_MEM_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_pool 0x2E8
+#else
+#define HIVE_ADDR_isp_pool 0x300
+#endif
+#define HIVE_SIZE_isp_pool 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_pool scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_pool 0x2E8
+#else
+#define HIVE_ADDR_sp_isp_pool 0x300
+#endif
+#define HIVE_SIZE_sp_isp_pool 4
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_rel_gen: 622A */
+#else
+/* function ia_css_rmgr_sp_rel_gen: 6365 */
+
+/* function ia_css_tagger_sp_unblock_clients: 2919 */
+#endif
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_end: 1FC0 */
+#else
+/* function css_get_frame_processing_time_end: 2010 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_event_any_pending_mask
+#define HIVE_MEM_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_event_any_pending_mask 0x300
+#else
+#define HIVE_ADDR_event_any_pending_mask 0x318
+#endif
+#define HIVE_SIZE_event_any_pending_mask 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_event_any_pending_mask scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_event_any_pending_mask 0x300
+#else
+#define HIVE_ADDR_sp_event_any_pending_mask 0x318
+#endif
+#define HIVE_SIZE_sp_event_any_pending_mask 8
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_push: 5A2F */
+#else
+/* function ia_css_isys_sp_backend_push: 5B6A */
+#endif
+
+/* function sh_css_decode_tag_descr: 352 */
+
+/* function debug_enqueue_isp: 27B */
+
+#ifndef ISP2401
+/* function qos_scheduler_update_stage_budget: 65AF */
+#else
+/* function qos_scheduler_update_stage_budget: 66F2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_uninit: 596A */
+#else
+/* function ia_css_spctrl_sp_uninit: 5AA5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SWITCH_CODE
+#define HIVE_MEM_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SWITCH_CODE 0x1D8
+#define HIVE_SIZE_HIVE_IF_SWITCH_CODE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SWITCH_CODE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SWITCH_CODE 0x1D8
+#define HIVE_SIZE_sp_HIVE_IF_SWITCH_CODE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_dis_bufs 140
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_dis_bufs scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5908
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_dis_bufs 0x5964
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_dis_bufs 140
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_lock_from_start: 2AF2 */
+#else
+/* function ia_css_tagger_buf_sp_lock_from_start: 2C94 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_isp_idle
+#define HIVE_MEM_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_isp_idle 0x47B4
+#else
+#define HIVE_ADDR_sem_for_isp_idle 0x47FC
+#endif
+#define HIVE_SIZE_sem_for_isp_idle 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_isp_idle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x47B4
+#else
+#define HIVE_ADDR_sp_sem_for_isp_idle 0x47FC
+#endif
+#define HIVE_SIZE_sp_sem_for_isp_idle 20
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write_byte_addr: 31FF */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr: 33DE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init: 3176 */
+#else
+/* function ia_css_dmaproxy_sp_init: 3355 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2D7B */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf_clock_tick: 2F1D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_VAMEM_BASE
+#define HIVE_MEM_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_ISP_VAMEM_BASE 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_VAMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_VAMEM_BASE 0x14
+#define HIVE_SIZE_sp_ISP_VAMEM_BASE 12
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_rawcopy_sp_tagger
+#define HIVE_MEM_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x6294
+#else
+#define HIVE_ADDR_ia_css_rawcopy_sp_tagger 0x62F0
+#endif
+#define HIVE_SIZE_ia_css_rawcopy_sp_tagger 24
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_rawcopy_sp_tagger scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x6294
+#else
+#define HIVE_ADDR_sp_ia_css_rawcopy_sp_tagger 0x62F0
+#endif
+#define HIVE_SIZE_sp_ia_css_rawcopy_sp_tagger 24
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x5994
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_exp_ids 70
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_exp_ids scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x5994
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_exp_ids 0x59F0
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_exp_ids 70
+
+#ifndef ISP2401
+/* function ia_css_queue_item_load: 4D19 */
+#else
+/* function ia_css_queue_item_load: 4F77 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_spctrl_sp_get_state: 5955 */
+#else
+/* function ia_css_spctrl_sp_get_state: 5A90 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_uninit: 603F */
+#else
+/* function ia_css_isys_sp_token_map_uninit: 617A */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_callout_sp_thread
+#define HIVE_MEM_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_callout_sp_thread 0x49DC
+#else
+#define HIVE_ADDR_callout_sp_thread 0x1E0
+#endif
+#define HIVE_SIZE_callout_sp_thread 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_callout_sp_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_callout_sp_thread 0x49DC
+#else
+#define HIVE_ADDR_sp_callout_sp_thread 0x1E0
+#endif
+#define HIVE_SIZE_sp_callout_sp_thread 4
+
+#ifndef ISP2401
+/* function thread_fiber_sp_init: E2F */
+#else
+/* function thread_fiber_sp_init: E24 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_SP_PMEM_BASE
+#define HIVE_MEM_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_SP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_SP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_SP_PMEM_BASE 0x0
+#define HIVE_SIZE_sp_SP_PMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_snd_acquire_req: 5FAF */
+#else
+/* function ia_css_isys_sp_token_map_snd_acquire_req: 60EA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_input_stream_format
+#define HIVE_MEM_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_input_stream_format 0x40F8
+#else
+#define HIVE_ADDR_sp_isp_input_stream_format 0x4118
+#endif
+#define HIVE_SIZE_sp_isp_input_stream_format 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_input_stream_format scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x40F8
+#else
+#define HIVE_ADDR_sp_sp_isp_input_stream_format 0x4118
+#endif
+#define HIVE_SIZE_sp_sp_isp_input_stream_format 20
+
+#ifndef ISP2401
+/* function __mod: 68A7 */
+#else
+/* function __mod: 6A1A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 3260 */
+#else
+/* function ia_css_dmaproxy_sp_init_dmem_channel: 343F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_join: CFF */
+#else
+/* function ia_css_thread_sp_join: CF4 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_add_command: 6F4F */
+#else
+/* function ia_css_dmaproxy_sp_add_command: 7082 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_thread_func: 5809 */
+#else
+/* function ia_css_sp_metadata_thread_func: 5968 */
+#endif
+
+#ifndef ISP2401
+/* function __sp_event_proxy_func_critical: 6975 */
+#else
+/* function __sp_event_proxy_func_critical: 6AE8 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_metadata_wait: 591C */
+#else
+/* function ia_css_sp_metadata_wait: 5A57 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_peek_from_start: F08 */
+#else
+/* function ia_css_circbuf_peek_from_start: EFD */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_event_sp_encode: 356B */
+#else
+/* function ia_css_event_sp_encode: 375F */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_thread_sp_run: D72 */
+#else
+/* function ia_css_thread_sp_run: D67 */
+#endif
+
+#ifndef ISP2401
+/* function sp_isys_copy_func: 6F6 */
+#else
+/* function sp_isys_copy_func: 68A */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_flush: 5A98 */
+#else
+/* function ia_css_isys_sp_backend_flush: 5BD3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_frame_exists: 59B4 */
+#else
+/* function ia_css_isys_sp_backend_frame_exists: 5AEF */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_init_isp_memories: 47A2 */
+#else
+/* function ia_css_sp_isp_param_init_isp_memories: 4A2A */
+#endif
+
+#ifndef ISP2401
+/* function register_isr: 8A9 */
+#else
+/* function register_isr: 83D */
+#endif
+
+/* function irq_raise: C8 */
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 313D */
+#else
+/* function ia_css_dmaproxy_sp_mmu_invalidate: 32E5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_HIVE_IF_SRST_ADDRESS
+#define HIVE_MEM_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem
+#define HIVE_ADDR_HIVE_IF_SRST_ADDRESS 0x1B8
+#define HIVE_SIZE_HIVE_IF_SRST_ADDRESS 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_HIVE_IF_SRST_ADDRESS scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_HIVE_IF_SRST_ADDRESS 0x1B8
+#define HIVE_SIZE_sp_HIVE_IF_SRST_ADDRESS 16
+
+#ifndef ISP2401
+/* function pipeline_sp_initialize_stage: 1924 */
+#else
+/* function pipeline_sp_initialize_stage: 195E */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_isys_sp_frontend_states
+#define HIVE_MEM_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x62C8
+#else
+#define HIVE_ADDR_ia_css_isys_sp_frontend_states 0x6324
+#endif
+#define HIVE_SIZE_ia_css_isys_sp_frontend_states 12
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_isys_sp_frontend_states scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x62C8
+#else
+#define HIVE_ADDR_sp_ia_css_isys_sp_frontend_states 0x6324
+#endif
+#define HIVE_SIZE_sp_ia_css_isys_sp_frontend_states 12
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6E1E */
+#else
+/* function ia_css_dmaproxy_sp_read_byte_addr_mmio: 6F62 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_done_ds: 37B2 */
+#else
+/* function ia_css_ispctrl_sp_done_ds: 39E1 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_get_mem_inits: 477D */
+#else
+/* function ia_css_sp_isp_param_get_mem_inits: 4A05 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_init_buffer_queues: 13D0 */
+#else
+/* function ia_css_parambuf_sp_init_buffer_queues: 13F1 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_pfp_spref
+#define HIVE_MEM_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_pfp_spref 0x2F0
+#else
+#define HIVE_ADDR_vbuf_pfp_spref 0x308
+#endif
+#define HIVE_SIZE_vbuf_pfp_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_pfp_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x2F0
+#else
+#define HIVE_ADDR_sp_vbuf_pfp_spref 0x308
+#endif
+#define HIVE_SIZE_sp_vbuf_pfp_spref 4
+
+#ifndef ISP2401
+/* function input_system_cfg: ABB */
+#else
+/* function input_system_cfg: AB5 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_HMEM_BASE
+#define HIVE_MEM_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_ISP_HMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_HMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_HMEM_BASE 0x20
+#define HIVE_SIZE_sp_ISP_HMEM_BASE 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_pipe_private_frames
+#define HIVE_MEM_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x59DC
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_pipe_private_frames 0x5A38
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_pipe_private_frames 280
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_pipe_private_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x59DC
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_pipe_private_frames 0x5A38
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_pipe_private_frames 280
+
+#ifndef ISP2401
+/* function qos_scheduler_init_stage_budget: 65E8 */
+#else
+/* function qos_scheduler_init_stage_budget: 6750 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_release: 5B0D */
+#else
+/* function ia_css_isys_sp_backend_release: 5C48 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_backend_destroy: 5B37 */
+#else
+/* function ia_css_isys_sp_backend_destroy: 5C72 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp2host_buffer_queue_handle
+#define HIVE_MEM_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5AF4
+#else
+#define HIVE_ADDR_sp2host_buffer_queue_handle 0x5B50
+#endif
+#define HIVE_SIZE_sp2host_buffer_queue_handle 96
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp2host_buffer_queue_handle scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5AF4
+#else
+#define HIVE_ADDR_sp_sp2host_buffer_queue_handle 0x5B50
+#endif
+#define HIVE_SIZE_sp_sp2host_buffer_queue_handle 96
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 5F73 */
+#else
+/* function ia_css_isys_sp_token_map_check_mipi_frame_size: 60AE */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_ispctrl_sp_init_isp_vars: 449C */
+#else
+/* function ia_css_ispctrl_sp_init_isp_vars: 46F7 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5B89 */
+#else
+/* function ia_css_isys_sp_frontend_has_empty_mipi_buffer_cb: 5CC4 */
+#endif
+
+#ifndef ISP2401
+/* function sp_warning: 8DC */
+#else
+/* function sp_warning: 870 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_vbuf_enqueue: 631D */
+#else
+/* function ia_css_rmgr_sp_vbuf_enqueue: 6458 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_sp_tag_exp_id: 215B */
+#else
+/* function ia_css_tagger_sp_tag_exp_id: 21AB */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_write: 3216 */
+#else
+/* function ia_css_dmaproxy_sp_write: 33F5 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_parambuf_sp_release_in_param: 1250 */
+#else
+/* function ia_css_parambuf_sp_release_in_param: 1245 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_irq_sw_interrupt_token
+#define HIVE_MEM_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_irq_sw_interrupt_token 0x40F4
+#else
+#define HIVE_ADDR_irq_sw_interrupt_token 0x4114
+#endif
+#define HIVE_SIZE_irq_sw_interrupt_token 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_irq_sw_interrupt_token scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x40F4
+#else
+#define HIVE_ADDR_sp_irq_sw_interrupt_token 0x4114
+#endif
+#define HIVE_SIZE_sp_irq_sw_interrupt_token 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_isp_addresses
+#define HIVE_MEM_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_addresses 0x5F44
+#else
+#define HIVE_ADDR_sp_isp_addresses 0x5FA4
+#endif
+#define HIVE_SIZE_sp_isp_addresses 172
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_isp_addresses scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_isp_addresses 0x5F44
+#else
+#define HIVE_ADDR_sp_sp_isp_addresses 0x5FA4
+#endif
+#define HIVE_SIZE_sp_sp_isp_addresses 172
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_acq_gen: 6242 */
+#else
+/* function ia_css_rmgr_sp_acq_gen: 637D */
+#endif
+
+#ifndef ISP2401
+/* function receiver_reg_load: AD0 */
+#else
+/* function receiver_reg_load: ACA */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isps
+#define HIVE_MEM_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isps 0x6300
+#else
+#define HIVE_ADDR_isps 0x635C
+#endif
+#define HIVE_SIZE_isps 28
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isps scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isps 0x6300
+#else
+#define HIVE_ADDR_sp_isps 0x635C
+#endif
+#define HIVE_SIZE_sp_isps 28
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_host_sp_queues_initialized
+#define HIVE_MEM_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_host_sp_queues_initialized 0x410C
+#else
+#define HIVE_ADDR_host_sp_queues_initialized 0x412C
+#endif
+#define HIVE_SIZE_host_sp_queues_initialized 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_host_sp_queues_initialized scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x410C
+#else
+#define HIVE_ADDR_sp_host_sp_queues_initialized 0x412C
+#endif
+#define HIVE_SIZE_sp_host_sp_queues_initialized 4
+
+#ifndef ISP2401
+/* function ia_css_queue_uninit: 4BE5 */
+#else
+/* function ia_css_queue_uninit: 4E43 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_ispctrl_sp_isp_started
+#define HIVE_MEM_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5BFC
+#else
+#define HIVE_ADDR_ia_css_ispctrl_sp_isp_started 0x5C58
+#endif
+#define HIVE_SIZE_ia_css_ispctrl_sp_isp_started 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_ispctrl_sp_isp_started scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5BFC
+#else
+#define HIVE_ADDR_sp_ia_css_ispctrl_sp_isp_started 0x5C58
+#endif
+#define HIVE_SIZE_sp_ia_css_ispctrl_sp_isp_started 4
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_release_dynamic_buf: 2DE7 */
+#else
+/* function ia_css_bufq_sp_release_dynamic_buf: 2F89 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_set_height_exception: 330E */
+#else
+/* function ia_css_dmaproxy_sp_set_height_exception: 3502 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 3293 */
+#else
+/* function ia_css_dmaproxy_sp_init_vmem_channel: 3473 */
+#endif
+
+#ifndef ISP2401
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_num_ready_threads
+#define HIVE_MEM_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_num_ready_threads 0x49E4
+#define HIVE_SIZE_num_ready_threads 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_num_ready_threads scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_num_ready_threads 0x49E4
+#define HIVE_SIZE_sp_num_ready_threads 4
+
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 31E8 */
+#else
+/* function ia_css_dmaproxy_sp_write_byte_addr_mmio: 33C7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_vbuf_spref
+#define HIVE_MEM_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_vbuf_spref 0x2EC
+#else
+#define HIVE_ADDR_vbuf_spref 0x304
+#endif
+#define HIVE_SIZE_vbuf_spref 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_vbuf_spref scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_vbuf_spref 0x2EC
+#else
+#define HIVE_ADDR_sp_vbuf_spref 0x304
+#endif
+#define HIVE_SIZE_sp_vbuf_spref 4
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_metadata_thread
+#define HIVE_MEM_sp_metadata_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_metadata_thread 0x4998
+#define HIVE_SIZE_sp_metadata_thread 68
+#else
+#define HIVE_ADDR_sp_metadata_thread 0x49F8
+#define HIVE_SIZE_sp_metadata_thread 72
+#endif
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_metadata_thread scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_metadata_thread 0x4998
+#define HIVE_SIZE_sp_sp_metadata_thread 68
+#else
+#define HIVE_ADDR_sp_sp_metadata_thread 0x49F8
+#define HIVE_SIZE_sp_sp_metadata_thread 72
+#endif
+
+#ifndef ISP2401
+/* function ia_css_queue_enqueue: 4B2F */
+#else
+/* function ia_css_queue_enqueue: 4D8D */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_request
+#define HIVE_MEM_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_request 0x4A98
+#else
+#define HIVE_ADDR_ia_css_flash_sp_request 0x4AF4
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_request 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_request scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4A98
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_request 0x4AF4
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_request 4
+
+#ifndef ISP2401
+/* function ia_css_dmaproxy_sp_vmem_write: 31B9 */
+#else
+/* function ia_css_dmaproxy_sp_vmem_write: 3398 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_tagger_frames
+#define HIVE_MEM_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_tagger_frames 0x49EC
+#else
+#define HIVE_ADDR_tagger_frames 0x4A48
+#endif
+#define HIVE_SIZE_tagger_frames 168
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_tagger_frames scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_tagger_frames 0x49EC
+#else
+#define HIVE_ADDR_sp_tagger_frames 0x4A48
+#endif
+#define HIVE_SIZE_sp_tagger_frames 168
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_token_map_snd_capture_req: 5FD1 */
+#else
+/* function ia_css_isys_sp_token_map_snd_capture_req: 610C */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_reading_if
+#define HIVE_MEM_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_reading_if 0x47C8
+#else
+#define HIVE_ADDR_sem_for_reading_if 0x4810
+#endif
+#define HIVE_SIZE_sem_for_reading_if 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_reading_if scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_reading_if 0x47C8
+#else
+#define HIVE_ADDR_sp_sem_for_reading_if 0x4810
+#endif
+#define HIVE_SIZE_sp_sem_for_reading_if 20
+
+#ifndef ISP2401
+/* function sp_generate_interrupts: 95B */
+#else
+/* function sp_generate_interrupts: 8EF */
+
+/* function ia_css_pipeline_sp_start: 1871 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_pipeline_sp_start: 1837 */
+#else
+/* function ia_css_thread_default_callout: 6BE3 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_sp_rawcopy_init: 510C */
+#else
+/* function ia_css_sp_rawcopy_init: 536A */
+#endif
+
+#ifndef ISP2401
+/* function tmr_clock_read: 13F1 */
+#else
+/* function tmr_clock_read: 1412 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_BAMEM_BASE
+#define HIVE_MEM_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x2F8
+#else
+#define HIVE_ADDR_ISP_BAMEM_BASE 0x310
+#endif
+#define HIVE_SIZE_ISP_BAMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_BAMEM_BASE scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x2F8
+#else
+#define HIVE_ADDR_sp_ISP_BAMEM_BASE 0x310
+#endif
+#define HIVE_SIZE_sp_ISP_BAMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5C38 */
+#else
+/* function ia_css_isys_sp_frontend_rcv_capture_ack: 5D73 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues
+#define HIVE_MEM_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54
+#else
+#define HIVE_ADDR_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0
+#endif
+#define HIVE_SIZE_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5B54
+#else
+#define HIVE_ADDR_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 0x5BB0
+#endif
+#define HIVE_SIZE_sp_ia_css_bufq_sp_sems_for_sp2host_buf_queues 160
+
+#ifndef ISP2401
+/* function css_get_frame_processing_time_start: 1FC8 */
+#else
+/* function css_get_frame_processing_time_start: 2018 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_all_cbs_frame
+#define HIVE_MEM_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_all_cbs_frame 0x47DC
+#else
+#define HIVE_ADDR_sp_all_cbs_frame 0x4824
+#endif
+#define HIVE_SIZE_sp_all_cbs_frame 16
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_all_cbs_frame scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x47DC
+#else
+#define HIVE_ADDR_sp_sp_all_cbs_frame 0x4824
+#endif
+#define HIVE_SIZE_sp_sp_all_cbs_frame 16
+
+#ifndef ISP2401
+/* function thread_sp_queue_print: D8F */
+#else
+/* function thread_sp_queue_print: D84 */
+#endif
+
+#ifndef ISP2401
+/* function sp_notify_eof: 907 */
+#else
+/* function sp_notify_eof: 89B */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sem_for_str2mem
+#define HIVE_MEM_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sem_for_str2mem 0x47EC
+#else
+#define HIVE_ADDR_sem_for_str2mem 0x4834
+#endif
+#define HIVE_SIZE_sem_for_str2mem 20
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sem_for_str2mem scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sem_for_str2mem 0x47EC
+#else
+#define HIVE_ADDR_sp_sem_for_str2mem 0x4834
+#endif
+#define HIVE_SIZE_sp_sem_for_str2mem 20
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 2B5A */
+#else
+/* function ia_css_tagger_buf_sp_is_marked_from_start: 2CFC */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 2F9F */
+#else
+/* function ia_css_bufq_sp_acquire_dynamic_buf: 3141 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_circbuf_destroy: 101D */
+#else
+/* function ia_css_circbuf_destroy: 1012 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ISP_PMEM_BASE
+#define HIVE_MEM_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_ISP_PMEM_BASE 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ISP_PMEM_BASE scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_ISP_PMEM_BASE 0xC
+#define HIVE_SIZE_sp_ISP_PMEM_BASE 4
+
+#ifndef ISP2401
+/* function ia_css_sp_isp_param_mem_load: 4710 */
+#else
+/* function ia_css_sp_isp_param_mem_load: 4998 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_pop_from_start: 2946 */
+#else
+/* function ia_css_tagger_buf_sp_pop_from_start: 2AE8 */
+#endif
+
+#ifndef ISP2401
+/* function __div: 685F */
+#else
+/* function __div: 69D2 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_isys_sp_frontend_create: 5E09 */
+#else
+/* function ia_css_isys_sp_frontend_create: 5F44 */
+#endif
+
+#ifndef ISP2401
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 633C */
+#else
+/* function ia_css_rmgr_sp_refcount_release_vbuf: 6477 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_ia_css_flash_sp_in_use
+#define HIVE_MEM_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4A9C
+#else
+#define HIVE_ADDR_ia_css_flash_sp_in_use 0x4AF8
+#endif
+#define HIVE_SIZE_ia_css_flash_sp_in_use 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_ia_css_flash_sp_in_use scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4A9C
+#else
+#define HIVE_ADDR_sp_ia_css_flash_sp_in_use 0x4AF8
+#endif
+#define HIVE_SIZE_sp_ia_css_flash_sp_in_use 4
+
+#ifndef ISP2401
+/* function ia_css_thread_sem_sp_wait: 6B42 */
+#else
+/* function ia_css_thread_sem_sp_wait: 6CB7 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_sleep_mode
+#define HIVE_MEM_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sleep_mode 0x4110
+#else
+#define HIVE_ADDR_sp_sleep_mode 0x4130
+#endif
+#define HIVE_SIZE_sp_sleep_mode 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_sleep_mode scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_sp_sleep_mode 0x4110
+#else
+#define HIVE_ADDR_sp_sp_sleep_mode 0x4130
+#endif
+#define HIVE_SIZE_sp_sp_sleep_mode 4
+
+#ifndef ISP2401
+/* function ia_css_tagger_buf_sp_push: 2A55 */
+#else
+/* function ia_css_tagger_buf_sp_push: 2BF7 */
+#endif
+
+/* function mmu_invalidate_cache: D3 */
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_sp_max_cb_elems
+#define HIVE_MEM_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_max_cb_elems 8
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_sp_max_cb_elems scalar_processor_2400_dmem
+#define HIVE_ADDR_sp_sp_max_cb_elems 0x148
+#define HIVE_SIZE_sp_sp_max_cb_elems 8
+
+#ifndef ISP2401
+/* function ia_css_queue_remote_init: 4C07 */
+#else
+/* function ia_css_queue_remote_init: 4E65 */
+#endif
+
+#ifndef HIVE_MULTIPLE_PROGRAMS
+#ifndef HIVE_MEM_isp_stop_req
+#define HIVE_MEM_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_isp_stop_req 0x4680
+#else
+#define HIVE_ADDR_isp_stop_req 0x46C8
+#endif
+#define HIVE_SIZE_isp_stop_req 4
+#else
+#endif
+#endif
+#define HIVE_MEM_sp_isp_stop_req scalar_processor_2400_dmem
+#ifndef ISP2401
+#define HIVE_ADDR_sp_isp_stop_req 0x4680
+#else
+#define HIVE_ADDR_sp_isp_stop_req 0x46C8
+#endif
+#define HIVE_SIZE_sp_isp_stop_req 4
+
+#ifndef ISP2401
+#define HIVE_ICACHE_sp_critical_SEGMENT_START 0
+#define HIVE_ICACHE_sp_critical_NUM_SEGMENTS 1
+#endif
+
+#endif /* _sp_map_h_ */
+#ifndef ISP2401
+extern void sh_css_dump_sp_dmem(void);
+void sh_css_dump_sp_dmem(void)
+{
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
new file mode 100644
index 000000000000..1f6a55ff5db8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_api_version.h
@@ -0,0 +1,673 @@
+/*
+#ifndef ISP2401
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+#endif
+
+#ifdef ISP2401
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+#ifndef __CSS_API_VERSION_H
+#define __CSS_API_VERSION_H
+
+/** @file
+ * CSS API version file. This file contains the version number of the CSS-API.
+ *
+ * This file is generated from a set of input files describing the CSS-API
+ * changes. Don't edit this file directly.
+ */
+
+
+/**
+
+The version string has four dot-separated numbers, read left to right:
+ The first two are the API version, and should not be changed.
+ The third number is incremented by a CSS firmware developer when the
+ API change is not backwards compatible.
+ The fourth number is incremented by the a CSS firmware developer for
+ every API change.
+ It should be zeroed when the third number changes.
+
+*/
+
+#ifndef ISP2401
+#define CSS_API_VERSION_STRING "2.1.15.3"
+#else
+#define CSS_API_VERSION_STRING "2.1.20.9"
+#endif
+
+/*
+Change log
+
+v2.0.1.0, initial version:
+- added API versioning
+
+v2.0.1.1, activate CSS-API versioning:
+- added description of major and minor version numbers
+
+v2.0.1.2, modified struct ia_css_frame_info:
+- added new member ia_css_crop_info
+
+v2.0.1.3, added IA_CSS_ERR_NOT_SUPPORTED
+
+v2.1.0.0
+- moved version number to 2.1.0.0
+- created new files for refactoring the code
+
+v2.1.1.0, modified struct ia_css_pipe_config and struct ia_css_pipe_info and struct ia_css_pipe:
+- use array to handle multiple output ports
+
+v2.1.1.1
+- added api to lock/unlock of RAW Buffers to Support HALv3 Feature
+
+v2.1.1.2, modified struct ia_css_stream_config:
+- to support multiple isys streams in one virtual channel, keep the old one for backward compatibility
+
+v2.1.2.0, modify ia_css_stream_config:
+- add isys_config and input_config to support multiple isys stream within one virtual channel
+
+v2.1.2.1, add IA_CSS_STREAM_FORMAT_NUM
+- add IA_CSS_STREAM_FORMAT_NUM definition to reflect the number of ia_css_stream_format enums
+
+v2.1.2.2, modified enum ia_css_stream_format
+- Add 16bit YUV formats to ia_css_stream_format enum:
+- IA_CSS_STREAM_FORMAT_YUV420_16 (directly after IA_CSS_STREAM_FORMAT_YUV420_10)
+- IA_CSS_STREAM_FORMAT_YUV422_16 (directly after IA_CSS_STREAM_FORMAT_YUV422_10)
+
+v2.1.2.3
+- added api to enable/disable digital zoom for capture pipe.
+
+v2.1.2.4, change CSS API to generate the shading table which should be directly sent to ISP:
+- keep the old CSS API (which uses the conversion of the shading table in CSS) for backward compatibility
+
+v2.1.2.5
+- Added SP frame time measurement (in ticks) and result is sent on a new member
+- in ia_css_buffer.h.
+
+v2.1.2.6, add function ia_css_check_firmware_version()
+- the function ia_css_check_firmware_version() returns true when the firmware version matches and returns false otherwise.
+
+v2.1.2.7
+- rename dynamic_data_index to dynamic_queue_id in struct ia_css_frame.
+- update IA_CSS_PIPE_MODE_NUM
+
+v2.1.2.8
+- added flag for video full range
+
+v2.1.2.9
+- add public parameters for xnr3 kernel
+
+v2.1.2.10
+- add new interface to enable output mirroring
+
+v2.1.2.11, MIPI buffers optimization
+- modified struct ia_css_mipi_buffer_config, added number of MIPI buffers needed for the stream
+- backwards compatible, need another patch to remove legacy function and code
+
+v2.1.2.12
+- create consolidated firmware package for 2400, 2401, csi2p, bxtpoc
+
+v2.1.3.0
+- rename ia_css_output_config.enable_mirror
+- add new interface to enable vertical output flipping
+
+v2.1.3.1
+- deprecated ia_css_rx_get_irq_info and ia_css_rx_clear_irq_info because both are hardcoded to work on CSI port 1.
+- added new functions ia_css_rx_port_get_irq_info and ia_css_rx_port_clear_irq_info, both have a port ID as extra argument.
+
+v2.1.3.2
+- reverted v2.1.3.0 change
+
+v2.1.3.3
+- Added isys event queue.
+- Renamed ia_css_dequeue_event to ia_css_dequeue_psys_event
+- Made ia_css_dequeue_event deprecated
+
+v2.1.3.4
+- added new interface to support ACC extension QoS feature.
+- added IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE.
+
+v2.1.3.5
+- added tiled frame format IA_CSS_FRAME_FORMAT_NV12_TILEY
+
+v2.1.3.6
+- added functions ia_css_host_data_allocate and ia_css_host_data_free
+
+v2.1.4.0, default pipe config change
+- disable enable_dz param by default
+
+v2.1.5.0
+- removed mix_range field from yuvp1_y_ee_nr_frng_public_config
+
+v2.1.5.1, exposure IDs per stream
+- added MIN/MAX exposure ID macros
+- made exposure ID sequence per-stream instead of global (across all streams)
+
+#ifdef ISP2401
+v2.1.5.1, Add parameters to mmgr routines via a macro.
+- Replaced mmgr funtions with macros to add caller func name + line #.
+- This is done to help debug memory access issues, allocation issues, etc.
+
+#endif
+v2.1.6.0, Interface for vertical output flip
+- add new interface to enable vertical output flipping
+- rename ia_css_output_config.enable_mirror
+
+#ifndef ISP2401
+v2.1.6.1, Effective res on pipe
+#else
+v2.1.6.2 (2 changes parallel), Effective res on pipe
+#endif
+- Added input_effective_res to struct ia_css_pipe_config in ia_css_pipe_public.h.
+
+#ifndef ISP2401
+v2.1.6.2, CSS-API version file generated from individual changes
+#else
+v2.1.6.3 (2 changes parallel), CSS-API version file generated from individual changes
+#endif
+- Avoid merge-conflicts by generating version file from individual CSS-API changes.
+- Parallel CSS-API changes can map to the same version number after this change.
+- Version numbers for a change could increase due to parallel changes being merged.
+- The version number would not decrease for a change.
+
+#ifndef ISP2401
+v2.1.6.5 (2 changes parallel), Add SP FW error event
+#else
+v2.1.6.6 (4 changes parallel), Add SP FW error event
+#endif
+- Added FW error event. This gets raised when the SP FW runs into an
+- error situation from which it cannot recover.
+
+#ifndef ISP2401
+v2.1.6.5 (2 changes parallel), expose bnr FF enable bits in bnr public API
+#else
+v2.1.6.6 (4 changes parallel), expose bnr FF enable bits in bnr public API
+#endif
+- Added ff enable bits to bnr_public_config_dn_detect_ctrl_config_t struct
+
+#ifndef ISP2401
+v2.1.6.5 (2 changes parallel), ISP configuration per pipe
+#else
+v2.1.6.6 (4 changes parallel), ISP configuration per pipe
+#endif
+- Added ISP configuration per pipe support: p_isp_config field in
+- struct ia_css_pipe_config and ia_css_pipe_set_isp_config_on_pipe
+- and ia_css_pipe_set_isp_config functions
+
+#ifndef ISP2401
+v2.1.7.0, removed css_version.h
+#else
+v2.1.7.0 (2 changes parallel), removed css_version.h
+#endif
+- Removed css_version.h that was used for versioning in manual (non-CI) releases.
+
+#ifndef ISP2401
+v2.1.7.1, Add helpers (get and set) for ISP cfg per pipe
+#else
+v2.1.7.2 (2 changes parallel), Add helpers (get and set) for ISP cfg per pipe
+#endif
+- Add helpers (get and set) for ISP configuration per pipe
+
+#ifndef ISP2401
+v2.1.7.2, Add feature to lock all RAW buffers
+#else
+v2.1.7.3 (2 changes parallel), Add feature to lock all RAW buffers
+#endif
+- This API change adds a boolean flag (lock_all) in the stream_config struct.
+- If this flag is set to true, then all frames will be locked if locking is
+- enabled. By default this flag is set to false.
+- When this flag is false, then only buffers that are sent to the preview pipe
+- will be locked. If continuous viewfinder is disabled, the flag should be set
+- to true.
+
+#ifndef ISP2401
+v2.1.8.0 (2 changes parallel), Various changes to support ACC configuration per pipe
+#else
+v2.1.8.0 (4 changes parallel), Various changes to support ACC configuration per pipe
+#endif
+- Add ia_css_pipe_get_isp_config()
+- Remove ia_css_pipe_set_isp_config_on_pipe (duplicated
+- by ia_css_pipe_set_isp_config)
+- Add isp configuration as parameter for
+- ia_css_pipe_set_isp_config
+- Remove ia_css_pipe_isp_config_set()
+- Remove ia_css_pipe_isp_config_get()
+
+#ifndef ISP2401
+v2.1.8.2 (2 changes parallel), Added member num_invalid_frames to ia_css_pipe_info structure.
+#else
+v2.1.8.3 (4 changes parallel), Added member num_invalid_frames to ia_css_pipe_info structure.
+#endif
+- Added member num_invalid_frames to ia_css_pipe_info structure.
+- This helps the driver make sure that the first valid output
+- frame goes into the first user-supplied output buffer.
+
+#ifndef ISP2401
+v2.1.8.4 (2 changes parallel), ISYS EOF timestamp for output buffers
+#else
+v2.1.8.5 (4 changes parallel), ISYS EOF timestamp for output buffers
+#endif
+- driver gets EOF timer to every out frame . ia_css_buffer modified to accomodate same.
+
+#ifndef ISP2401
+v2.1.8.4 (4 changes parallel), display_config
+#else
+v2.1.8.5 (6 changes parallel), display_config
+#endif
+- Added formats- and output config parameters for configuration of the (optional) display output.
+
+#ifndef ISP2401
+v2.1.8.4 (2 changes parallel), Adding zoom region parameters to CSS API
+#else
+v2.1.8.5 (4 changes parallel), Adding zoom region parameters to CSS API
+#endif
+- Adding ia_css_point and ia_css_region structures to css-api.
+- Adding zoom_region(type ia_css_region) parameter to ia_css_dz_config structure.
+- By using this user can do the zoom based on zoom region and
+- the center of the zoom region is not restricted at the center of the input frame.
+
+#ifndef ISP2401
+v2.1.8.6 (1 changes parallel), Add new ia_css_fw_warning type
+#else
+v2.1.8.7 (3 changes parallel), Add new ia_css_fw_warning type
+#endif
+- Add IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED enum to ia_css_fw_warning type
+- Extend sp_warning() with exp_id parameter
+
+#ifndef ISP2401
+v2.1.8.6 (1 changes parallel), Add includes in GC, GC2 kernel interface files
+#else
+v2.1.8.7 (3 changes parallel), Add includes in GC, GC2 kernel interface files
+#endif
+- add ia_css_ctc_types.h includes in ia_css_gc_types.h and ia_css_gc2_types.h. Needed to get ia_css_vamem_type.
+
+#ifndef ISP2401
+v2.1.9.0 (1 changes parallel), Introduce sp assert event.
+#else
+v2.1.9.0 (3 changes parallel), Introduce sp assert event.
+#endif
+- Add IA_CSS_EVENT_TYPE_FW_ASSERT. The FW sends the event in case an assert goes off.
+
+#ifndef ISP2401
+v2.1.9.1 (1 changes parallel), Exclude driver part from ia_css_buffer.h as it is also used by SP
+#else
+v2.1.9.2 (3 changes parallel), Exclude driver part from ia_css_buffer.h as it is also used by SP
+#endif
+- Excluded driver part of the interface from SP/ISP code
+- Driver I/F is not affected
+
+#ifndef ISP2401
+v2.1.9.2, added IA_CSS_EVENT_TYPE_TIMER
+#else
+v2.1.9.3 (2 changes parallel), added IA_CSS_EVENT_TYPE_TIMER
+#endif
+- Added a new event called IA_CSS_EVENT_TYPE_TIMER
+
+#ifndef ISP2401
+v2.1.10.0 (4 changes parallel), Add a flag "enable_dpc" to "struct ia_css_pipe_config"
+#else
+v2.1.10.0 (6 changes parallel), Add a flag "enable_dpc" to "struct ia_css_pipe_config"
+#endif
+- Add a flag "enable_dpc" to "struct ia_css_pipe_config"
+
+#ifndef ISP2401
+v2.1.10.6 (6 changes parallel), change the pipe version type from integer to enum
+#else
+v2.1.10.8 (9 changes parallel), change the pipe version type from integer to enum
+#endif
+- add new enum to enumerate ISP pipe version
+- change the pipe version type in pipe_config from integer to enum
+
+#ifndef ISP2401
+v2.1.13.0 (8 changes parallel), Stop Support for Skycam B0
+#else
+v2.1.14.0 (12 changes parallel), Stop Support for Skycam B0
+#endif
+- Remove a few pre-processor defines for Skycam B0/C0 as support
+
+#ifndef ISP2401
+v2.1.14.0 (24 changes parallel), change the pipe version type from integer to enum
+#else
+v2.1.15.0 (28 changes parallel), change the pipe version type from integer to enum
+#endif
+- remove the temporary workaround for backward compatability
+
+#ifndef ISP2401
+v2.1.14.0 (13 changes parallel), expose_gamma_enable_option
+#else
+v2.1.15.0 (17 changes parallel), expose_gamma_enable_option
+#endif
+- added enable param to gamma_corr_public_config
+- added documentation to rgbpp_public.h
+
+#ifndef ISP2401
+v2.1.14.0 (12 changes parallel), Remove deprecated FW_ERROR event.
+#else
+v2.1.15.0 (16 changes parallel), Remove deprecated FW_ERROR event.
+#endif
+- Remove code for deprecated FW_ERROR event.
+
+#ifndef ISP2401
+v2.1.14.3 (5 changes parallel), fix IEFD's puclic API types
+#else
+v2.1.15.5 (8 changes parallel), fix IEFD's puclic API types
+#endif
+- fix IEFD public API members types: rad_cu6_x1,rad_cu_unsharp_x1 & unsharp_amount
+
+#ifndef ISP2401
+v2.1.14.3 (5 changes parallel), Add IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH
+#else
+v2.1.15.5 (8 changes parallel), Add IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH
+#endif
+- Add IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH enum to ia_css_fw_warning type
+
+#ifndef ISP2401
+v2.1.14.4 (5 changes parallel), new API getter functions for gdc in buffer information
+#else
+v2.1.15.8 (11 changes parallel), add_flag_to_disable_continous_viewfinder
+- add a new flag in stream_config to disable continuous viewfinder
+- in ZSL use case.
+
+v2.1.16.0 (8 changes parallel), revert ia_css_skc_dvs_statistics field size change
+- Reverted field size change, change was not ready for driver yet.
+
+v2.1.17.0 (7 changes parallel), change CSS API to fix the shading correction off-center issue
+- update the ia_css_shading_info structure in ia_css_types.h
+
+v2.1.17.0 (32 changes parallel), add_flag_to_disable_continous_viewfinder_part2
+- remove the old interfaces
+
+v2.1.17.4 (8 changes parallel), Added public interface for setting the scaler LUT.
+- Added the public struct to output system and modified the global config struct.
+
+v2.1.17.5 (7 changes parallel), Add parameters for new TNR3 component
+- Add new parameters for new TNR3 component
+
+v2.1.17.6 (9 changes parallel), Update skycam DPC_MAX_NUMBER_OF_DP
+- Automated tooling requires an API change request
+- This change changes the implementation of #define DPC_MAX_NUMBER_OF_DP
+- it now returns a different number
+
+v2.1.17.6 (8 changes parallel), Return an error when both DPC and BDS are enabled in a pipe config
+- Return an error when both DPC and BDS are enabled in a pipe config
+
+v2.1.17.6 (9 changes parallel), new API getter functions for gdc in buffer information
+#endif
+- ia_css_pipe_get_dvs_filter() added
+- ia_css_pipe_get_gdc_in_buffer_info() added
+
+#ifndef ISP2401
+v2.1.14.5 (8 changes parallel), Update CNR2 ineffective values
+#else
+v2.1.17.7 (12 changes parallel), Update CNR2 ineffective values
+#endif
+- Fixed Incorrect ineffective values listed in ia_css_cnr_config
+- Correct Ineffective value is 8191
+
+#ifndef ISP2401
+v2.1.14.5 (8 changes parallel), af_roi_api
+#else
+v2.1.17.7 (12 changes parallel), af_roi_api
+#endif
+- added a new function to set AF ROI ia_css_set_af_roi
+- added a new struct ia_css_s3a_roi_offset
+
+#ifndef ISP2401
+v2.1.14.5 (8 changes parallel), remove x_y_end_from_ae_and_awb
+#else
+v2.1.17.7 (12 changes parallel), Enlarge AF AWB_FR stats buffers
+- Enlarge AF and AWB_FR stats buffers to support max grid width per stripe as oppose to per frame
+
+v2.1.17.7 (12 changes parallel), remove x_y_end_from_ae_and_awb
+#endif
+- added a flag to prepare removal of x_end and y_end from ae grid public config
+- added a flag to prepare removal of x_end and y_end from awb grid public config
+
+#ifndef ISP2401
+v2.1.14.5 (4 changes parallel), Added public interface for setting the scaler LUT.
+- Added the public struct to output system and modified the global config struct.
+#else
+v2.1.17.8 (5 changes parallel)
+- added input_yuv , input_raw to ia_css_binary_info.enable
+- struct, these attributes were always there but not saved
+- in the binary_info struct
+#endif
+
+#ifndef ISP2401
+v2.1.14.6 (8 changes parallel), add_flag_to_disable_continous_viewfinder
+- add a new flag in stream_config to disable continuous viewfinder
+- in ZSL use case.
+#else
+v2.1.17.9 (6 changes parallel), cleanup_awb_ae_rgb_integration_flags
+- this change only cleans up an approved api CR see wikis below
+#endif
+
+#ifndef ISP2401
+v2.1.14.6 (8 changes parallel), Enlarge AF AWB_FR stats buffers
+- Enlarge AF and AWB_FR stats buffers to support max grid width per stripe as oppose to per frame
+#else
+v2.1.17.10 (6 changes parallel), output_system_input_resolution
+- adedd gdc_output_system_in_resolution to pipe config struct
+#endif
+
+#ifndef ISP2401
+v2.1.14.8 (6 changes parallel), pipe config option for vf output bci mode downscaling
+#else
+v2.1.17.10 (5 changes parallel), Per pipe DPC configuration is added to ia_css_isp_parameters
+- Per pipe DPC configuration is added to ia_css_isp_parameters
+
+v2.1.17.10 (10 changes parallel), pipe config option for vf output bci mode downscaling
+#endif
+- vf downscaling using yuv_scale binary.
+
+#ifndef ISP2401
+v2.1.14.10 (7 changes parallel), Add scale mode GDC V2 LUT to CSS API
+#else
+v2.1.17.12 (11 changes parallel), Add scale mode GDC V2 LUT to CSS API
+#endif
+- Allow client to set global LUT for gdc v2 (First step in this change. See wiki page for more details)
+
+#ifndef ISP2401
+v2.1.14.10 (8 changes parallel), Include added to type-support.h.
+#else
+v2.1.17.12 (12 changes parallel), Include added to type-support.h.
+#endif
+- Include of hive/cell_support.h was added to type-support.h, in order to
+- have access to define HAVE_STDINT.
+
+#ifndef ISP2401
+v2.1.14.11 (7 changes parallel), Pipe configuration to enable BLI mode downscaling for
+#else
+v2.1.17.13 (11 changes parallel), Pipe configuration to enable BLI mode downscaling for
+#endif
+- BLI mode downscaling for capture post-processing
+
+#ifndef ISP2401
+v2.1.14.14 (9 changes parallel), Fix copyright headers (no functional change)
+#else
+v2.1.17.15 (8 changes parallel), Add copyright headers to css files
+- Add copyright headers to css API files
+
+v2.1.17.15 (8 changes parallel), add copyright header to include files
+- add copyright header to include files
+
+v2.1.17.15 (8 changes parallel), add copyright header to isp files
+- add copyright header to isp files
+
+v2.1.17.15 (8 changes parallel), add copyright header to refactored code
+- add copyright header to refactored code
+- (base, camera, runtime directories)
+
+v2.1.17.16 (13 changes parallel), Fix copyright headers (no functional change)
+#endif
+- No functional change; only fixes copyright headers
+
+#ifndef ISP2401
+v2.1.14.14 (6 changes parallel), Remove continuous mode special case handling in ia_css_pipe_set_isp_config
+#else
+v2.1.17.16 (10 changes parallel), Remove continuous mode special case handling in ia_css_pipe_set_isp_config
+#endif
+- For continuous mode isp_config was being send to all pipes,
+- even though API ia_css_pipe_set_isp_config is for single pipe
+- Removed incorrect case
+
+#ifndef ISP2401
+v2.1.14.14 (6 changes parallel), DVS statistics grid produced by accelerator
+#else
+v2.1.17.16 (5 changes parallel), Added documentation to formats_config header file
+- Added description of ranges for full-range flag
+
+v2.1.17.16 (10 changes parallel), DVS statistics grid produced by accelerator
+#endif
+- Add DVS statistics produced by accelerator grid to pipe info
+- Add ia_css_pipe_has_dvs_stats function
+
+#ifndef ISP2401
+v2.1.14.15 (7 changes parallel), cont_remove_x_y_end_from_ae_and_awb
+#else
+v2.1.17.17 (5 changes parallel), Provide the CSS interface to select the luma only binaries
+- Add a flag "enable_luma_only" to "struct ia_css_pipe_config"
+
+v2.1.17.17 (11 changes parallel), cont_remove_x_y_end_from_ae_and_awb
+#endif
+- this patch doesn't introduce any new api change, it only fixes a recent
+- api merged change (#31938) , in order to have success CI i had to upload an api change request
+
+#ifndef ISP2401
+v2.1.14.17 (6 changes parallel), Add XNR3 blending strength to kernel interface
+- Added a blending strength field to the XNR3 kernel interface to add
+- support for blending.
+#else
+v2.1.17.17 (10 changes parallel), GAC state dump for debug
+- added ia_css_dump_gac_state function
+
+v2.1.17.18 (23 changes parallel), output_format_nv12_16
+- added new output fromat nv12_16
+#endif
+
+#ifndef ISP2401
+v2.1.14.18 (22 changes parallel), eliminate two_pixels_per_clock field
+#else
+v2.1.17.18 (4 changes parallel), Remove author details from SKC src code
+- remove author details from skc src code
+
+v2.1.17.19 (26 changes parallel), eliminate two_pixels_per_clock field
+#endif
+- remove obsolete field two_pixels_per_clock
+
+#ifndef ISP2401
+v2.1.14.19 (3 changes parallel), Fix copyright headers (no functional change)
+#else
+v2.1.17.20 (7 changes parallel), Fix copyright headers (no functional change)
+#endif
+- No functional change; only fixes copyright headers
+
+#ifndef ISP2401
+v2.1.14.21 (3 changes parallel), ia_css_skc_dvs_statistics field size change
+- ia_css_skc_dvs_statistics field size change
+#else
+v2.1.17.20 (11 changes parallel), Add XNR3 blending strength to kernel interface
+- Added a blending strength field to the XNR3 kernel interface to add
+- support for blending.
+#endif
+
+#ifndef ISP2401
+v2.1.15.0 (3 changes parallel), revert ia_css_skc_dvs_statistics field size change
+- Reverted field size change, change was not ready for driver yet.
+#else
+v2.1.17.21 (24 changes parallel), Add N_CSS_PRBS_IDS and N_CSS_TPG_IDS
+- Add N_CSS_PRBS_IDS to reflect the number of ia_css_prbs_id enum
+- Add N_CSS_TPG_IDS to reflect the number of ia_css_tpg_id enum
+#endif
+
+#ifndef ISP2401
+v2.1.15.2 (3 changes parallel), Return an error when both DPC and BDS are enabled in a pipe config
+- Return an error when both DPC and BDS are enabled in a pipe config
+#else
+v2.1.17.23 (8 changes parallel), ia_css_skc_dvs_statistics field size change
+- ia_css_skc_dvs_statistics field size change
+#endif
+
+#ifndef ISP2401
+v2.1.15.3 (2 changes parallel), Update skycam DPC_MAX_NUMBER_OF_DP
+- Automated tooling requires an API change request
+- This change changes the implementation of #define DPC_MAX_NUMBER_OF_DP
+- it now returns a different number
+#else
+v2.1.19.0 (6 changes parallel)
+- Added code to calculate input_res using the Windows specification of binning
+#endif
+
+#ifndef ISP2401
+v2.1.15.3 (18 changes parallel), output_format_nv12_16
+- added new output fromat nv12_16
+#else
+v2.1.20.0 (7 changes parallel), Add interface to select TNR enabled binaries
+- Add a bool "enable_tnr" to "struct ia_css_pipe_config"
+
+v2.1.20.0 (6 changes parallel), OSYS & GDC Debug dump function addition
+- add GDC state dump function
+- add OSYS state dump function
+
+v2.1.20.4 (7 changes parallel), Add ref_buf_select parameter for TNR3 to kernel interface
+- Added a ref_buf_select parameter to the TNR3 kernel interface to add
+- support for multiple reference buffers.
+
+v2.1.20.4 (6 changes parallel), DVS MAX grid dimensions to cover maximal resolution
+- rename DVS_TABLE_HEIGHT/WIDTH to MAX_DVS_COORDS_TABLE_HEIGHT/WIDTH
+- modify value of the above macros to cover max resolution
+
+v2.1.20.5 (54 changes parallel), add input feeder calculations getter
+- add input_feeder_config public struct
+- add get_input_feeder_config getter
+
+v2.1.20.5 (4 changes parallel), Enable runtime updating mapped args for QoS extension pipe
+- added ia_css_pipe_update_qos_ext_mapped_arg()
+
+v2.1.20.7 (77 changes parallel), Add parameters to CPU routines via a macro.
+- Replaced CPU memory allocation functions with macros to add caller func name + line number.
+- This is done to help debug memory access issues, allocation issues, etc.
+- Changed API: only ia_css_env.h
+
+v2.1.20.7 (2 changes parallel), Frame format override
+- Added a function call to the pipe interface for overriding
+- the frame format as set in the pipe.
+- This is an optional interface that can be used under
+- some strict conditions.
+
+v2.1.20.7 (2 changes parallel), Output_system_in_res Information
+- Output_system_in_res_info field added to pipe_info struct
+
+v2.1.20.8, Temprarily disable memory debug features for SVOS.
+- Temporary commented out the additions to allow SKC testing till root cause found
+- Changed files ia_css_env.h and sh_css.c.
+
+v2.1.20.9, Enable ISP 2.7 naming
+- Add IA_CSS_PIPE_VERSION_2_7 to enum ia_css_pipe_version
+- Add #define SH_CSS_ISP_PIPE_VERSION_2_7 4
+#endif
+
+*/
+
+#endif /*__CSS_API_VERSION_H*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h
new file mode 100644
index 000000000000..01f7c33b5b40
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_trace.h
@@ -0,0 +1,388 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSS_TRACE_H_
+#define __CSS_TRACE_H_
+
+#include <type_support.h>
+#ifdef ISP2401
+#include "sh_css_internal.h" /* for SH_CSS_MAX_SP_THREADS */
+#endif
+
+/*
+ structs and constants for tracing
+*/
+
+/* one tracer item: major, minor and counter. The counter value can be used for GP data */
+struct trace_item_t {
+ uint8_t major;
+ uint8_t minor;
+ uint16_t counter;
+};
+
+#ifdef ISP2401
+#define MAX_SCRATCH_DATA 4
+#define MAX_CMD_DATA 2
+
+#endif
+/* trace header: holds the version and the topology of the tracer. */
+struct trace_header_t {
+#ifndef ISP2401
+ /* 1st dword */
+#else
+ /* 1st dword: descriptor */
+#endif
+ uint8_t version;
+ uint8_t max_threads;
+ uint16_t max_tracer_points;
+#ifdef ISP2401
+ /* 2nd field: command + data */
+#endif
+ /* 2nd dword */
+ uint32_t command;
+ /* 3rd & 4th dword */
+#ifndef ISP2401
+ uint32_t data[2];
+#else
+ uint32_t data[MAX_CMD_DATA];
+ /* 3rd field: debug pointer */
+#endif
+ /* 5th & 6th dword: debug pointer mechanism */
+ uint32_t debug_ptr_signature;
+ uint32_t debug_ptr_value;
+#ifdef ISP2401
+ /* Rest of the header: status & scratch data */
+ uint8_t thr_status_byte[SH_CSS_MAX_SP_THREADS];
+ uint16_t thr_status_word[SH_CSS_MAX_SP_THREADS];
+ uint32_t thr_status_dword[SH_CSS_MAX_SP_THREADS];
+ uint32_t scratch_debug[MAX_SCRATCH_DATA];
+#endif
+};
+
+#ifndef ISP2401
+#define TRACER_VER 2
+#else
+/* offsets for master_port read/write */
+#define HDR_HDR_OFFSET 0 /* offset of the header */
+#define HDR_COMMAND_OFFSET offsetof(struct trace_header_t, command)
+#define HDR_DATA_OFFSET offsetof(struct trace_header_t, data)
+#define HDR_DEBUG_SIGNATURE_OFFSET offsetof(struct trace_header_t, debug_ptr_signature)
+#define HDR_DEBUG_POINTER_OFFSET offsetof(struct trace_header_t, debug_ptr_value)
+#define HDR_STATUS_OFFSET offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_BYTE offsetof(struct trace_header_t, thr_status_byte)
+#define HDR_STATUS_OFFSET_WORD offsetof(struct trace_header_t, thr_status_word)
+#define HDR_STATUS_OFFSET_DWORD offsetof(struct trace_header_t, thr_status_dword)
+#define HDR_STATUS_OFFSET_SCRATCH offsetof(struct trace_header_t, scratch_debug)
+
+/*
+Trace version history:
+ 1: initial version, hdr = descr, command & ptr.
+ 2: added ISP + 24-bit fields.
+ 3: added thread ID.
+ 4: added status in header.
+*/
+#define TRACER_VER 4
+
+#endif
+#define TRACE_BUFF_ADDR 0xA000
+#define TRACE_BUFF_SIZE 0x1000 /* 4K allocated */
+
+#define TRACE_ENABLE_SP0 0
+#define TRACE_ENABLE_SP1 0
+#define TRACE_ENABLE_ISP 0
+
+#ifndef ISP2401
+typedef enum {
+#else
+enum TRACE_CORE_ID {
+#endif
+ TRACE_SP0_ID,
+ TRACE_SP1_ID,
+ TRACE_ISP_ID
+#ifndef ISP2401
+} TRACE_CORE_ID;
+#else
+};
+#endif
+
+/* TODO: add timing format? */
+#ifndef ISP2401
+typedef enum {
+ TRACE_DUMP_FORMAT_POINT,
+ TRACE_DUMP_FORMAT_VALUE24_HEX,
+ TRACE_DUMP_FORMAT_VALUE24_DEC,
+#else
+enum TRACE_DUMP_FORMAT {
+ TRACE_DUMP_FORMAT_POINT_NO_TID,
+ TRACE_DUMP_FORMAT_VALUE24,
+#endif
+ TRACE_DUMP_FORMAT_VALUE24_TIMING,
+#ifndef ISP2401
+ TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA
+} TRACE_DUMP_FORMAT;
+#else
+ TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA,
+ TRACE_DUMP_FORMAT_POINT
+};
+#endif
+
+
+/* currently divided as follows:*/
+#if (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 3)
+/* can be divided as needed */
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE/4)
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE/4)
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE/2)
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 2)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE/2)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE/2)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE/2)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#elif (TRACE_ENABLE_SP0 + TRACE_ENABLE_SP1 + TRACE_ENABLE_ISP == 1)
+#if TRACE_ENABLE_SP0
+#define TRACE_SP0_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP0_SIZE (0)
+#endif
+#if TRACE_ENABLE_SP1
+#define TRACE_SP1_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_SP1_SIZE (0)
+#endif
+#if TRACE_ENABLE_ISP
+#define TRACE_ISP_SIZE (TRACE_BUFF_SIZE)
+#else
+#define TRACE_ISP_SIZE (0)
+#endif
+#else
+#define TRACE_SP0_SIZE (0)
+#define TRACE_SP1_SIZE (0)
+#define TRACE_ISP_SIZE (0)
+#endif
+
+#define TRACE_SP0_ADDR (TRACE_BUFF_ADDR)
+#define TRACE_SP1_ADDR (TRACE_SP0_ADDR + TRACE_SP0_SIZE)
+#define TRACE_ISP_ADDR (TRACE_SP1_ADDR + TRACE_SP1_SIZE)
+
+/* check if it's a legal division */
+#if (TRACE_BUFF_SIZE < TRACE_SP0_SIZE + TRACE_SP1_SIZE + TRACE_ISP_SIZE)
+#error trace sizes are not divided correctly and are above limit
+#endif
+
+#define TRACE_SP0_HEADER_ADDR (TRACE_SP0_ADDR)
+#define TRACE_SP0_HEADER_SIZE (sizeof(struct trace_header_t))
+#ifndef ISP2401
+#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE)
+#else
+#define TRACE_SP0_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP0_DATA_ADDR (TRACE_SP0_HEADER_ADDR + TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_DATA_SIZE (TRACE_SP0_SIZE - TRACE_SP0_HEADER_SIZE)
+#define TRACE_SP0_MAX_POINTS (TRACE_SP0_DATA_SIZE / TRACE_SP0_ITEM_SIZE)
+#endif
+
+#define TRACE_SP1_HEADER_ADDR (TRACE_SP1_ADDR)
+#define TRACE_SP1_HEADER_SIZE (sizeof(struct trace_header_t))
+#ifndef ISP2401
+#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE)
+#else
+#define TRACE_SP1_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_SP1_DATA_ADDR (TRACE_SP1_HEADER_ADDR + TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_DATA_SIZE (TRACE_SP1_SIZE - TRACE_SP1_HEADER_SIZE)
+#define TRACE_SP1_MAX_POINTS (TRACE_SP1_DATA_SIZE / TRACE_SP1_ITEM_SIZE)
+#endif
+
+#define TRACE_ISP_HEADER_ADDR (TRACE_ISP_ADDR)
+#define TRACE_ISP_HEADER_SIZE (sizeof(struct trace_header_t))
+#ifndef ISP2401
+#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE)
+
+#else
+#define TRACE_ISP_ITEM_SIZE (sizeof(struct trace_item_t))
+#define TRACE_ISP_DATA_ADDR (TRACE_ISP_HEADER_ADDR + TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_DATA_SIZE (TRACE_ISP_SIZE - TRACE_ISP_HEADER_SIZE)
+#define TRACE_ISP_MAX_POINTS (TRACE_ISP_DATA_SIZE / TRACE_ISP_ITEM_SIZE)
+#endif
+
+#ifndef ISP2401
+/* offsets for master_port read/write */
+#define HDR_HDR_OFFSET 0 /* offset of the header */
+#define HDR_COMMAND_OFFSET 4 /* offset of the command */
+#define HDR_DATA_OFFSET 8 /* offset of the command data */
+#define HDR_DEBUG_SIGNATURE_OFFSET 16 /* offset of the param debug signature in trace_header_t */
+#define HDR_DEBUG_POINTER_OFFSET 20 /* offset of the param debug pointer in trace_header_t */
+#endif
+
+/* common majors */
+#ifdef ISP2401
+/* SP0 */
+#endif
+#define MAJOR_MAIN 1
+#define MAJOR_ISP_STAGE_ENTRY 2
+#define MAJOR_DMA_PRXY 3
+#define MAJOR_START_ISP 4
+#ifdef ISP2401
+/* SP1 */
+#define MAJOR_OBSERVER_ISP0_EVENT 21
+#define MAJOR_OBSERVER_OUTPUT_FORM_EVENT 22
+#define MAJOR_OBSERVER_OUTPUT_SCAL_EVENT 23
+#define MAJOR_OBSERVER_IF_ACK 24
+#define MAJOR_OBSERVER_SP0_EVENT 25
+#define MAJOR_OBSERVER_SP_TERMINATE_EVENT 26
+#define MAJOR_OBSERVER_DMA_ACK 27
+#define MAJOR_OBSERVER_ACC_ACK 28
+#endif
+
+#define DEBUG_PTR_SIGNATURE 0xABCD /* signature for the debug parameter pointer */
+
+/* command codes (1st byte) */
+typedef enum {
+ CMD_SET_ONE_MAJOR = 1, /* mask in one major. 2nd byte in the command is the major code */
+ CMD_UNSET_ONE_MAJOR = 2, /* mask out one major. 2nd byte in the command is the major code */
+ CMD_SET_ALL_MAJORS = 3, /* set the major print mask. the full mask is in the data DWORD */
+ CMD_SET_VERBOSITY = 4 /* set verbosity level */
+} DBG_commands;
+
+/* command signature */
+#define CMD_SIGNATURE 0xAABBCC00
+
+/* shared macros in traces infrastructure */
+/* increment the pointer cyclicly */
+#define DBG_NEXT_ITEM(x, max_items) (((x+1) >= max_items) ? 0 : x+1)
+#define DBG_PREV_ITEM(x, max_items) ((x) ? x-1 : max_items-1)
+
+#define FIELD_MASK(width) (((1 << (width)) - 1))
+#define FIELD_PACK(value,mask,offset) (((value) & (mask)) << (offset))
+#define FIELD_UNPACK(value,mask,offset) (((value) >> (offset)) & (mask))
+
+
+#define FIELD_VALUE_OFFSET (0)
+#define FIELD_VALUE_WIDTH (16)
+#define FIELD_VALUE_MASK FIELD_MASK(FIELD_VALUE_WIDTH)
+#define FIELD_VALUE_PACK(f) FIELD_PACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET)
+#ifndef ISP2401
+#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET)
+#else
+#define FIELD_VALUE_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_MASK,FIELD_VALUE_OFFSET)
+#endif
+
+#define FIELD_MINOR_OFFSET (FIELD_VALUE_OFFSET + FIELD_VALUE_WIDTH)
+#define FIELD_MINOR_WIDTH (8)
+#define FIELD_MINOR_MASK FIELD_MASK(FIELD_MINOR_WIDTH)
+#define FIELD_MINOR_PACK(f) FIELD_PACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET)
+#ifndef ISP2401
+#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET)
+#else
+#define FIELD_MINOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MINOR_MASK,FIELD_MINOR_OFFSET)
+#endif
+
+#define FIELD_MAJOR_OFFSET (FIELD_MINOR_OFFSET + FIELD_MINOR_WIDTH)
+#define FIELD_MAJOR_WIDTH (5)
+#define FIELD_MAJOR_MASK FIELD_MASK(FIELD_MAJOR_WIDTH)
+#define FIELD_MAJOR_PACK(f) FIELD_PACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET)
+#ifndef ISP2401
+#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET)
+#else
+#define FIELD_MAJOR_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_MASK,FIELD_MAJOR_OFFSET)
+#endif
+
+#ifndef ISP2401
+#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH)
+#define FIELD_FORMAT_WIDTH (3)
+#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_FORMAT_WIDTH)
+#define FIELD_FORMAT_PACK(f) FIELD_PACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET)
+#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET)
+#else
+/* for quick traces - only insertion, compatible with the regular point */
+#define FIELD_FULL_MAJOR_WIDTH (8)
+#define FIELD_FULL_MAJOR_MASK FIELD_MASK(FIELD_FULL_MAJOR_WIDTH)
+#define FIELD_FULL_MAJOR_PACK(f) FIELD_PACK(f,FIELD_FULL_MAJOR_MASK,FIELD_MAJOR_OFFSET)
+
+/* The following 2 fields are used only when FIELD_TID value is 111b.
+ * it means we don't want to use thread id, but format. In this case,
+ * the last 2 MSB bits of the major field will indicates the format
+ */
+#define FIELD_MAJOR_W_FMT_OFFSET FIELD_MAJOR_OFFSET
+#define FIELD_MAJOR_W_FMT_WIDTH (3)
+#define FIELD_MAJOR_W_FMT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_MAJOR_W_FMT_PACK(f) FIELD_PACK(f,FIELD_MAJOR_W_FMT_MASK,FIELD_MAJOR_W_FMT_OFFSET)
+#define FIELD_MAJOR_W_FMT_UNPACK(f) FIELD_UNPACK(f,FIELD_MAJOR_W_FMT_MASK,FIELD_MAJOR_W_FMT_OFFSET)
+
+#define FIELD_FORMAT_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_WIDTH (2)
+#define FIELD_FORMAT_MASK FIELD_MASK(FIELD_MAJOR_W_FMT_WIDTH)
+#define FIELD_FORMAT_PACK(f) FIELD_PACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET)
+#define FIELD_FORMAT_UNPACK(f) FIELD_UNPACK(f,FIELD_FORMAT_MASK,FIELD_FORMAT_OFFSET)
+
+#define FIELD_TID_SEL_FORMAT_PAT (7)
+
+#define FIELD_TID_OFFSET (FIELD_MAJOR_OFFSET + FIELD_MAJOR_WIDTH)
+#define FIELD_TID_WIDTH (3)
+#define FIELD_TID_MASK FIELD_MASK(FIELD_TID_WIDTH)
+#define FIELD_TID_PACK(f) FIELD_PACK(f,FIELD_TID_MASK,FIELD_TID_OFFSET)
+#define FIELD_TID_UNPACK(f) FIELD_UNPACK(f,FIELD_TID_MASK,FIELD_TID_OFFSET)
+#endif
+
+#define FIELD_VALUE_24_OFFSET (0)
+#define FIELD_VALUE_24_WIDTH (24)
+#ifndef ISP2401
+#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH)
+#else
+#define FIELD_VALUE_24_MASK FIELD_MASK(FIELD_VALUE_24_WIDTH)
+#endif
+#define FIELD_VALUE_24_PACK(f) FIELD_PACK(f,FIELD_VALUE_24_MASK,FIELD_VALUE_24_OFFSET)
+#define FIELD_VALUE_24_UNPACK(f) FIELD_UNPACK(f,FIELD_VALUE_24_MASK,FIELD_VALUE_24_OFFSET)
+
+#ifndef ISP2401
+#define PACK_TRACEPOINT(format,major, minor, value) \
+ (FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+#else
+#define PACK_TRACEPOINT(tid, major, minor, value) \
+ (FIELD_TID_PACK(tid) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+
+#define PACK_QUICK_TRACEPOINT(major, minor) \
+ (FIELD_FULL_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor))
+
+#define PACK_FORMATTED_TRACEPOINT(format, major, minor, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_MINOR_PACK(minor) | FIELD_VALUE_PACK(value))
+#endif
+
+#ifndef ISP2401
+#define PACK_TRACE_VALUE24(format, major, value) \
+ (FIELD_FORMAT_PACK(format) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value))
+#else
+#define PACK_TRACE_VALUE24(major, value) \
+ (FIELD_TID_PACK(FIELD_TID_SEL_FORMAT_PAT) | FIELD_MAJOR_PACK(major) | FIELD_VALUE_24_PACK(value))
+#endif
+
+#endif /* __CSS_TRACE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h
new file mode 100644
index 000000000000..076c4ba76175
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/debug_global.h
@@ -0,0 +1,83 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_GLOBAL_H_INCLUDED__
+#define __DEBUG_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define DEBUG_BUF_SIZE 1024
+#define DEBUG_BUF_MASK (DEBUG_BUF_SIZE - 1)
+
+#define DEBUG_DATA_ENABLE_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_ADDR 0x04
+#define DEBUG_DATA_HEAD_ADDR 0x08
+#define DEBUG_DATA_TAIL_ADDR 0x0C
+#define DEBUG_DATA_BUF_ADDR 0x10
+
+#define DEBUG_DATA_ENABLE_DDR_ADDR 0x00
+#define DEBUG_DATA_BUF_MODE_DDR_ADDR HIVE_ISP_DDR_WORD_BYTES
+#define DEBUG_DATA_HEAD_DDR_ADDR (2 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_TAIL_DDR_ADDR (3 * HIVE_ISP_DDR_WORD_BYTES)
+#define DEBUG_DATA_BUF_DDR_ADDR (4 * HIVE_ISP_DDR_WORD_BYTES)
+
+#define DEBUG_BUFFER_ISP_DMEM_ADDR 0x0
+
+/*
+ * Enable HAS_WATCHDOG_SP_THREAD_DEBUG for additional SP thread and
+ * pipe information on watchdog output
+ * #undef HAS_WATCHDOG_SP_THREAD_DEBUG
+ * #define HAS_WATCHDOG_SP_THREAD_DEBUG
+ */
+
+
+/*
+ * The linear buffer mode will accept data until the first
+ * overflow and then stop accepting new data
+ * The circular buffer mode will accept if there is place
+ * and discard the data if the buffer is full
+ */
+typedef enum {
+ DEBUG_BUFFER_MODE_LINEAR = 0,
+ DEBUG_BUFFER_MODE_CIRCULAR,
+ N_DEBUG_BUFFER_MODE
+} debug_buf_mode_t;
+
+struct debug_data_s {
+ uint32_t enable;
+ uint32_t bufmode;
+ uint32_t head;
+ uint32_t tail;
+ uint32_t buf[DEBUG_BUF_SIZE];
+};
+
+/* thread.sp.c doesn't have a notion of HIVE_ISP_DDR_WORD_BYTES
+ still one point of control is needed for debug purposes */
+
+#ifdef HIVE_ISP_DDR_WORD_BYTES
+struct debug_data_ddr_s {
+ uint32_t enable;
+ int8_t padding1[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)];
+ uint32_t bufmode;
+ int8_t padding2[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)];
+ uint32_t head;
+ int8_t padding3[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)];
+ uint32_t tail;
+ int8_t padding4[HIVE_ISP_DDR_WORD_BYTES-sizeof(uint32_t)];
+ uint32_t buf[DEBUG_BUF_SIZE];
+};
+#endif
+
+#endif /* __DEBUG_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h
new file mode 100644
index 000000000000..60d6de1332cd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/dma_global.h
@@ -0,0 +1,255 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_GLOBAL_H_INCLUDED__
+#define __DMA_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_DMA_VERSION_2
+
+#define HIVE_ISP_NUM_DMA_CONNS 3
+#define HIVE_ISP_NUM_DMA_CHANNELS 32
+
+#define N_DMA_CHANNEL_ID HIVE_ISP_NUM_DMA_CHANNELS
+
+#include "dma_v2_defs.h"
+
+/*
+ * Command token bit mappings
+ *
+ * transfer / config
+ * param id[4] channel id[5] cmd id[6]
+ * | b14 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ *
+ * fast transfer:
+ * height[5] width[8] width[8] channel id[5] cmd id[6]
+ * | b31 .. b26 | b25 .. b18 | b17 .. b11 | b10 ... b6 | b5 ... b0 |
+ *
+ */
+
+#define _DMA_PACKING_SETUP_PARAM _DMA_V2_PACKING_SETUP_PARAM
+#define _DMA_HEIGHT_PARAM _DMA_V2_HEIGHT_PARAM
+#define _DMA_STRIDE_A_PARAM _DMA_V2_STRIDE_A_PARAM
+#define _DMA_ELEM_CROPPING_A_PARAM _DMA_V2_ELEM_CROPPING_A_PARAM
+#define _DMA_WIDTH_A_PARAM _DMA_V2_WIDTH_A_PARAM
+#define _DMA_STRIDE_B_PARAM _DMA_V2_STRIDE_B_PARAM
+#define _DMA_ELEM_CROPPING_B_PARAM _DMA_V2_ELEM_CROPPING_B_PARAM
+#define _DMA_WIDTH_B_PARAM _DMA_V2_WIDTH_B_PARAM
+
+#define _DMA_ZERO_EXTEND _DMA_V2_ZERO_EXTEND
+#define _DMA_SIGN_EXTEND _DMA_V2_SIGN_EXTEND
+
+
+typedef unsigned int dma_channel;
+
+typedef enum {
+ dma_isp_to_bus_connection = HIVE_DMA_ISP_BUS_CONN,
+ dma_isp_to_ddr_connection = HIVE_DMA_ISP_DDR_CONN,
+ dma_bus_to_ddr_connection = HIVE_DMA_BUS_DDR_CONN,
+} dma_connection;
+
+typedef enum {
+ dma_zero_extension = _DMA_ZERO_EXTEND,
+ dma_sign_extension = _DMA_SIGN_EXTEND
+} dma_extension;
+
+
+#define DMA_PROP_SHIFT(val, param) ((val) << _DMA_V2_ ## param ## _IDX)
+#define DMA_PROP_MASK(param) ((1U << _DMA_V2_ ## param ## _BITS)-1)
+#define DMA_PACK(val, param) DMA_PROP_SHIFT((val) & DMA_PROP_MASK(param), param)
+
+#define DMA_PACK_COMMAND(cmd) DMA_PACK(cmd, CMD)
+#define DMA_PACK_CHANNEL(ch) DMA_PACK(ch, CHANNEL)
+#define DMA_PACK_PARAM(par) DMA_PACK(par, PARAM)
+#define DMA_PACK_EXTENSION(ext) DMA_PACK(ext, EXTENSION)
+#define DMA_PACK_LEFT_CROPPING(lc) DMA_PACK(lc, LEFT_CROPPING)
+#define DMA_PACK_WIDTH_A(w) DMA_PACK(w, SPEC_DEV_A_XB)
+#define DMA_PACK_WIDTH_B(w) DMA_PACK(w, SPEC_DEV_B_XB)
+#define DMA_PACK_HEIGHT(h) DMA_PACK(h, SPEC_YB)
+
+#define DMA_PACK_CMD_CHANNEL(cmd, ch) (DMA_PACK_COMMAND(cmd) | DMA_PACK_CHANNEL(ch))
+#define DMA_PACK_SETUP(conn, ext) ((conn) | DMA_PACK_EXTENSION(ext))
+#define DMA_PACK_CROP_ELEMS(elems, crop) ((elems) | DMA_PACK_LEFT_CROPPING(crop))
+
+#define hive_dma_snd(dma_id, token) OP_std_snd(dma_id, (unsigned int)(token))
+
+#define DMA_PACK_BLOCK_CMD(cmd, ch, width_a, width_b, height) \
+ (DMA_PACK_COMMAND(cmd) | \
+ DMA_PACK_CHANNEL(ch) | \
+ DMA_PACK_WIDTH_A(width_a) | \
+ DMA_PACK_WIDTH_B(width_b) | \
+ DMA_PACK_HEIGHT(height))
+
+#define hive_dma_move_data(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read?_DMA_V2_MOVE_B2A_COMMAND:_DMA_V2_MOVE_A2B_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read?(unsigned)(addr_b):(unsigned)(addr_a)); \
+ hive_dma_snd(dma_id, read?(unsigned)(addr_a):(unsigned)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+#define hive_dma_move_data_no_ack(dma_id, read, channel, addr_a, addr_b, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(read?_DMA_V2_NO_ACK_MOVE_B2A_NO_SYNC_CHK_COMMAND:_DMA_V2_NO_ACK_MOVE_A2B_NO_SYNC_CHK_COMMAND, channel)); \
+ hive_dma_snd(dma_id, read?(unsigned)(addr_b):(unsigned)(addr_a)); \
+ hive_dma_snd(dma_id, read?(unsigned)(addr_a):(unsigned)(addr_b)); \
+ hive_dma_snd(dma_id, to_is_var); \
+ hive_dma_snd(dma_id, from_is_var); \
+}
+
+#define hive_dma_move_b2a_data(dma_id, channel, to_addr, from_addr, to_is_var, from_is_var) \
+{ \
+ hive_dma_move_data(dma_id, true, channel, to_addr, from_addr, to_is_var, from_is_var) \
+}
+
+#define hive_dma_move_a2b_data(dma_id, channel, from_addr, to_addr, from_is_var, to_is_var) \
+{ \
+ hive_dma_move_data(dma_id, false, channel, from_addr, to_addr, from_is_var, to_is_var) \
+}
+
+#define hive_dma_set_data(dma_id, channel, address, value, is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_INIT_A_COMMAND, channel)); \
+ hive_dma_snd(dma_id, value); \
+ hive_dma_snd(dma_id, address); \
+ hive_dma_snd(dma_id, is_var); \
+}
+
+#define hive_dma_clear_data(dma_id, channel, address, is_var) hive_dma_set_data(dma_id, channel, address, 0, is_var)
+
+#define hive_dma_configure(dma_id, channel, connection, extension, height, \
+ stride_A, elems_A, cropping_A, width_A, \
+ stride_B, elems_B, cropping_B, width_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, stride_A); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, cropping_A)); \
+ hive_dma_snd(dma_id, width_A); \
+ hive_dma_snd(dma_id, stride_B); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, cropping_B)); \
+ hive_dma_snd(dma_id, width_B); \
+ hive_dma_snd(dma_id, height); \
+}
+
+#define hive_dma_execute(dma_id, channel, cmd, to_addr, from_addr_value, to_is_var, from_is_var) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK(_DMA_V2_SET_CRUN_COMMAND, CMD)); \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(cmd, channel)); \
+ hive_dma_snd(dma_id, to_addr); \
+ hive_dma_snd(dma_id, from_addr_value); \
+ hive_dma_snd(dma_id, to_is_var); \
+ if ((cmd & DMA_CLEAR_CMDBIT) == 0) { \
+ hive_dma_snd(dma_id, from_is_var); \
+ } \
+}
+
+#define hive_dma_configure_fast(dma_id, channel, connection, extension, elems_A, elems_B) \
+{ \
+ hive_dma_snd(dma_id, DMA_PACK_CMD_CHANNEL(_DMA_V2_CONFIG_CHANNEL_COMMAND, channel)); \
+ hive_dma_snd(dma_id, DMA_PACK_SETUP(connection, extension)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_A, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, DMA_PACK_CROP_ELEMS(elems_B, 0)); \
+ hive_dma_snd(dma_id, 0); \
+ hive_dma_snd(dma_id, 1); \
+}
+
+#define hive_dma_set_parameter(dma_id, channel, param, value) \
+{ \
+ hive_dma_snd(dma_id, _DMA_V2_SET_CHANNEL_PARAM_COMMAND | DMA_PACK_CHANNEL(channel) | DMA_PACK_PARAM(param)); \
+ hive_dma_snd(dma_id, value); \
+}
+
+#define DMA_SPECIFIC_CMDBIT 0x01
+#define DMA_CHECK_CMDBIT 0x02
+#define DMA_RW_CMDBIT 0x04
+#define DMA_CLEAR_CMDBIT 0x08
+#define DMA_ACK_CMDBIT 0x10
+#define DMA_CFG_CMDBIT 0x20
+#define DMA_PARAM_CMDBIT 0x01
+
+/* Write complete check not necessary if there's no ack */
+#define DMA_NOACK_CMD (DMA_ACK_CMDBIT | DMA_CHECK_CMDBIT)
+#define DMA_CFG_CMD (DMA_CFG_CMDBIT)
+#define DMA_CFGPARAM_CMD (DMA_CFG_CMDBIT | DMA_PARAM_CMDBIT)
+
+#define DMA_CMD_NEEDS_ACK(cmd) ((cmd & DMA_NOACK_CMD) == 0)
+#define DMA_CMD_IS_TRANSFER(cmd) ((cmd & DMA_CFG_CMDBIT) == 0)
+#define DMA_CMD_IS_WR(cmd) ((cmd & DMA_RW_CMDBIT) != 0)
+#define DMA_CMD_IS_RD(cmd) ((cmd & DMA_RW_CMDBIT) == 0)
+#define DMA_CMD_IS_CLR(cmd) ((cmd & DMA_CLEAR_CMDBIT) != 0)
+#define DMA_CMD_IS_CFG(cmd) ((cmd & DMA_CFG_CMDBIT) != 0)
+#define DMA_CMD_IS_PARAMCFG(cmd) ((cmd & DMA_CFGPARAM_CMD) == DMA_CFGPARAM_CMD)
+
+/* As a matter of convention */
+#define DMA_TRANSFER_READ DMA_TRANSFER_B2A
+#define DMA_TRANSFER_WRITE DMA_TRANSFER_A2B
+/* store/load from the PoV of the system(memory) */
+#define DMA_TRANSFER_STORE DMA_TRANSFER_B2A
+#define DMA_TRANSFER_LOAD DMA_TRANSFER_A2B
+#define DMA_TRANSFER_CLEAR DMA_TRANSFER_CLEAR_A
+
+typedef enum {
+ DMA_TRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT, /* 8 */
+ DMA_TRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT, /* 12 */
+ DMA_TRANSFER_A2B = DMA_RW_CMDBIT, /* 4 */
+ DMA_TRANSFER_B2A = 0, /* 0 */
+ DMA_TRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD, /* 26 */
+ DMA_TRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 30 */
+ DMA_TRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD, /* 22 */
+ DMA_TRANSFER_B2A_NOACK = DMA_NOACK_CMD, /* 18 */
+ DMA_FASTTRANSFER_CLEAR_A = DMA_CLEAR_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B = DMA_RW_CMDBIT | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A = DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_A_NOACK = DMA_CLEAR_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_CLEAR_B_NOACK = DMA_CLEAR_CMDBIT | DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_A2B_NOACK = DMA_RW_CMDBIT | DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+ DMA_FASTTRANSFER_B2A_NOACK = DMA_NOACK_CMD | DMA_SPECIFIC_CMDBIT,
+} dma_transfer_type_t;
+
+typedef enum {
+ DMA_CONFIG_SETUP = _DMA_V2_PACKING_SETUP_PARAM,
+ DMA_CONFIG_HEIGHT = _DMA_V2_HEIGHT_PARAM,
+ DMA_CONFIG_STRIDE_A_ = _DMA_V2_STRIDE_A_PARAM,
+ DMA_CONFIG_CROP_ELEM_A = _DMA_V2_ELEM_CROPPING_A_PARAM,
+ DMA_CONFIG_WIDTH_A = _DMA_V2_WIDTH_A_PARAM,
+ DMA_CONFIG_STRIDE_B_ = _DMA_V2_STRIDE_B_PARAM,
+ DMA_CONFIG_CROP_ELEM_B = _DMA_V2_ELEM_CROPPING_B_PARAM,
+ DMA_CONFIG_WIDTH_B = _DMA_V2_WIDTH_B_PARAM,
+} dma_config_type_t;
+
+struct dma_port_config {
+ uint8_t crop, elems;
+ uint16_t width;
+ uint32_t stride;
+};
+
+/* Descriptor for dma configuration */
+struct dma_channel_config {
+ uint8_t connection;
+ uint8_t extension;
+ uint8_t height;
+ struct dma_port_config a, b;
+};
+
+#endif /* __DMA_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h
new file mode 100644
index 000000000000..4df7a405cdcf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/event_fifo_global.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_GLOBAL_H
+#define __EVENT_FIFO_GLOBAL_H
+
+/*#error "event_global.h: No global event information permitted"*/
+
+#endif /* __EVENT_FIFO_GLOBAL_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h
new file mode 100644
index 000000000000..f43bf0ad2468
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/fifo_monitor_global.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+#define __FIFO_MONITOR_GLOBAL_H_INCLUDED__
+
+#define IS_FIFO_MONITOR_VERSION_2
+
+/*
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 2
+ *
+ * Actually, "HIVE_ISP_CSS_STREAM_SWITCH_SP = 1", "HIVE_ISP_CSS_STREAM_SWITCH_ISP = 0"
+ * "hive_isp_css_stream_switch_hrt.h"
+ */
+#define HIVE_ISP_CSS_STREAM_SWITCH_ISP 0
+#define HIVE_ISP_CSS_STREAM_SWITCH_SP 1
+#define HIVE_ISP_CSS_STREAM_SWITCH_NONE 2
+
+#endif /* __FIFO_MONITOR_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h
new file mode 100644
index 000000000000..4505775b224c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gdc_global.h
@@ -0,0 +1,90 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_GLOBAL_H_INCLUDED__
+#define __GDC_GLOBAL_H_INCLUDED__
+
+#define IS_GDC_VERSION_2
+
+#include <type_support.h>
+#include "gdc_v2_defs.h"
+
+/*
+ * Storage addresses for packed data transfer
+ */
+#define GDC_PARAM_ICX_LEFT_ROUNDED_IDX 0
+#define GDC_PARAM_OXDIM_FLOORED_IDX 1
+#define GDC_PARAM_OXDIM_LAST_IDX 2
+#define GDC_PARAM_WOIX_LAST_IDX 3
+#define GDC_PARAM_IY_TOPLEFT_IDX 4
+#define GDC_PARAM_CHUNK_CNT_IDX 5
+/*#define GDC_PARAM_ELEMENTS_PER_XMEM_ADDR_IDX 6 */ /* Derived from bpp */
+#define GDC_PARAM_BPP_IDX 6
+#define GDC_PARAM_BLOCK_HEIGHT_IDX 7
+/*#define GDC_PARAM_DMA_CHANNEL_STRIDE_A_IDX 8*/ /* The DMA stride == the GDC buffer stride */
+#define GDC_PARAM_WOIX_IDX 8
+#define GDC_PARAM_DMA_CHANNEL_STRIDE_B_IDX 9
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_A_IDX 10
+#define GDC_PARAM_DMA_CHANNEL_WIDTH_B_IDX 11
+#define GDC_PARAM_VECTORS_PER_LINE_IN_IDX 12
+#define GDC_PARAM_VECTORS_PER_LINE_OUT_IDX 13
+#define GDC_PARAM_VMEM_IN_DIMY_IDX 14
+#define GDC_PARAM_COMMAND_IDX 15
+#define N_GDC_PARAM 16
+
+/* Because of the packed parameter transfer max(params) == max(fragments) */
+#define N_GDC_FRAGMENTS N_GDC_PARAM
+
+/* The GDC is capable of higher internal precision than the parameter data structures */
+#define HRT_GDC_COORD_SCALE_BITS 6
+#define HRT_GDC_COORD_SCALE (1 << HRT_GDC_COORD_SCALE_BITS)
+
+typedef enum {
+ GDC_CH0_ID = 0,
+ N_GDC_CHANNEL_ID
+} gdc_channel_ID_t;
+
+typedef enum {
+ gdc_8_bpp = 8,
+ gdc_10_bpp = 10,
+ gdc_12_bpp = 12,
+ gdc_14_bpp = 14
+} gdc_bits_per_pixel_t;
+
+typedef struct gdc_scale_param_mem_s {
+ uint16_t params[N_GDC_PARAM];
+ uint16_t ipx_start_array[N_GDC_PARAM];
+ uint16_t ibuf_offset[N_GDC_PARAM];
+ uint16_t obuf_offset[N_GDC_PARAM];
+} gdc_scale_param_mem_t;
+
+typedef struct gdc_warp_param_mem_s {
+ uint32_t origin_x;
+ uint32_t origin_y;
+ uint32_t in_addr_offset;
+ uint32_t in_block_width;
+ uint32_t in_block_height;
+ uint32_t p0_x;
+ uint32_t p0_y;
+ uint32_t p1_x;
+ uint32_t p1_y;
+ uint32_t p2_x;
+ uint32_t p2_y;
+ uint32_t p3_x;
+ uint32_t p3_y;
+ uint32_t padding[3];
+} gdc_warp_param_mem_t;
+
+
+#endif /* __GDC_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h
new file mode 100644
index 000000000000..30ad77059d93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_device_global.h
@@ -0,0 +1,85 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_GLOBAL_H_INCLUDED__
+#define __GP_DEVICE_GLOBAL_H_INCLUDED__
+
+#define IS_GP_DEVICE_VERSION_2
+
+#define _REG_GP_IRQ_REQ0_ADDR 0x08
+#define _REG_GP_IRQ_REQ1_ADDR 0x0C
+/* The SP sends SW interrupt info to this register */
+#define _REG_GP_IRQ_REQUEST0_ADDR _REG_GP_IRQ_REQ0_ADDR
+#define _REG_GP_IRQ_REQUEST1_ADDR _REG_GP_IRQ_REQ1_ADDR
+
+/* The SP configures FIFO switches in these registers */
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+
+/* @ GP_DEVICE_BASE */
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+
+
+#endif /* __GP_DEVICE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h
new file mode 100644
index 000000000000..ee636ad6c5b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gp_timer_global.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_GLOBAL_H_INCLUDED__
+#define __GP_TIMER_GLOBAL_H_INCLUDED__
+
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ */
+
+/* from gp_timer_defs.h*/
+#define GP_TIMER_COUNT_TYPE_HIGH 0
+#define GP_TIMER_COUNT_TYPE_LOW 1
+#define GP_TIMER_COUNT_TYPE_POSEDGE 2
+#define GP_TIMER_COUNT_TYPE_NEGEDGE 3
+#define GP_TIMER_COUNT_TYPE_TYPES 4
+
+/* timer - 3 is selected */
+#define GP_TIMER_SEL 3
+
+/*HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ is selected*/
+#define GP_TIMER_SIGNAL_SELECT HIVE_GP_TIMER_SP_DMEM_ERROR_IRQ
+
+#endif /* __GP_TIMER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h
new file mode 100644
index 000000000000..a82ca2a8cada
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/gpio_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_GLOBAL_H_INCLUDED__
+#define __GPIO_GLOBAL_H_INCLUDED__
+
+#define IS_GPIO_VERSION_1
+
+#include <gpio_block_defs.h>
+
+/* pqiao: following part only defines in hive_isp_css_defs.h in fpga system.
+ port it here
+*/
+
+/* GPIO pin defines */
+/*#define HIVE_GPIO_CAMERA_BOARD_RESET_PIN_NR 0
+#define HIVE_GPIO_LCD_CLOCK_SELECT_PIN_NR 7
+#define HIVE_GPIO_HDMI_CLOCK_SELECT_PIN_NR 8
+#define HIVE_GPIO_LCD_VERT_FLIP_PIN_NR 8
+#define HIVE_GPIO_LCD_HOR_FLIP_PIN_NR 9
+#define HIVE_GPIO_AS3683_GPIO_P0_PIN_NR 1
+#define HIVE_GPIO_AS3683_DATA_P1_PIN_NR 2
+#define HIVE_GPIO_AS3683_CLK_P2_PIN_NR 3
+#define HIVE_GPIO_AS3683_T1_F0_PIN_NR 4
+#define HIVE_GPIO_AS3683_SFL_F1_PIN_NR 5
+#define HIVE_GPIO_AS3683_STROBE_F2_PIN_NR 6
+#define HIVE_GPIO_MAX1577_EN1_PIN_NR 1
+#define HIVE_GPIO_MAX1577_EN2_PIN_NR 2
+#define HIVE_GPIO_MAX8685A_EN_PIN_NR 3
+#define HIVE_GPIO_MAX8685A_TRIG_PIN_NR 4*/
+
+#define HIVE_GPIO_STROBE_TRIGGER_PIN 2
+
+#endif /* __GPIO_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h
new file mode 100644
index 000000000000..7e05d7d880d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/hmem_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_GLOBAL_H_INCLUDED__
+#define __HMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_HMEM_VERSION_1
+
+#include "isp.h"
+
+/*
+#define ISP_HIST_ADDRESS_BITS 12
+#define ISP_HIST_ALIGNMENT 4
+#define ISP_HIST_COMP_IN_PREC 12
+#define ISP_HIST_DEPTH 1024
+#define ISP_HIST_WIDTH 24
+#define ISP_HIST_COMPONENTS 4
+*/
+#define ISP_HIST_ALIGNMENT_LOG2 2
+
+#define HMEM_SIZE_LOG2 (ISP_HIST_ADDRESS_BITS-ISP_HIST_ALIGNMENT_LOG2)
+#define HMEM_SIZE ISP_HIST_DEPTH
+
+#define HMEM_UNIT_SIZE (HMEM_SIZE/ISP_HIST_COMPONENTS)
+#define HMEM_UNIT_COUNT ISP_HIST_COMPONENTS
+
+#define HMEM_RANGE_LOG2 ISP_HIST_WIDTH
+#define HMEM_RANGE (1UL<<HMEM_RANGE_LOG2)
+
+typedef uint32_t hmem_data_t;
+
+#endif /* __HMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug.c
new file mode 100644
index 000000000000..c412810887b3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug.c
@@ -0,0 +1,72 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "debug.h"
+
+#ifndef __INLINE_DEBUG__
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#include "memory_access.h"
+
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "assert_support.h"
+
+/* The address of the remote copy */
+hrt_address debug_buffer_address = (hrt_address)-1;
+hrt_vaddress debug_buffer_ddr_address = (hrt_vaddress)-1;
+/* The local copy */
+debug_data_t debug_data;
+debug_data_t *debug_data_ptr = &debug_data;
+
+void debug_buffer_init(const hrt_address addr)
+{
+ debug_buffer_address = addr;
+
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_ddr_init(const hrt_vaddress addr)
+{
+ debug_buf_mode_t mode = DEBUG_BUFFER_MODE_LINEAR;
+ uint32_t enable = 1;
+ uint32_t head = 0;
+ uint32_t tail = 0;
+ /* set the ddr queue */
+ debug_buffer_ddr_address = addr;
+ mmgr_store(addr + DEBUG_DATA_BUF_MODE_DDR_ADDR,
+ &mode, sizeof(debug_buf_mode_t));
+ mmgr_store(addr + DEBUG_DATA_HEAD_DDR_ADDR,
+ &head, sizeof(uint32_t));
+ mmgr_store(addr + DEBUG_DATA_TAIL_DDR_ADDR,
+ &tail, sizeof(uint32_t));
+ mmgr_store(addr + DEBUG_DATA_ENABLE_DDR_ADDR,
+ &enable, sizeof(uint32_t));
+
+ /* set the local copy */
+ debug_data.head = 0;
+ debug_data.tail = 0;
+}
+
+void debug_buffer_setmode(const debug_buf_mode_t mode)
+{
+ assert(debug_buffer_address != ((hrt_address)-1));
+
+ sp_dmem_store_uint32(SP0_ID,
+ debug_buffer_address + DEBUG_DATA_BUF_MODE_ADDR, mode);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_local.h
new file mode 100644
index 000000000000..2b0c5f48c0e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_local.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_LOCAL_H_INCLUDED__
+#define __DEBUG_LOCAL_H_INCLUDED__
+
+#include "debug_global.h"
+
+#endif /* __DEBUG_LOCAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_private.h
new file mode 100644
index 000000000000..a047aadc6619
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/debug_private.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_PRIVATE_H_INCLUDED__
+#define __DEBUG_PRIVATE_H_INCLUDED__
+
+#include "debug_public.h"
+
+#include "sp.h"
+
+#define __INLINE_ISP__
+#include "isp.h"
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DEBUG_C bool is_debug_buffer_empty(void)
+{
+ return (debug_data_ptr->head == debug_data_ptr->tail);
+}
+
+STORAGE_CLASS_DEBUG_C hrt_data debug_dequeue(void)
+{
+ hrt_data value = 0;
+
+ assert(debug_buffer_address != ((hrt_address)-1));
+
+ debug_synch_queue();
+
+ if (!is_debug_buffer_empty()) {
+ value = debug_data_ptr->buf[debug_data_ptr->head];
+ debug_data_ptr->head = (debug_data_ptr->head + 1) & DEBUG_BUF_MASK;
+ sp_dmem_store_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_HEAD_ADDR, debug_data_ptr->head);
+ }
+
+ return value;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue(void)
+{
+ uint32_t remote_tail = sp_dmem_load_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_TAIL_ADDR);
+/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ sp_dmem_load(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_isp(void)
+{
+ uint32_t remote_tail = isp_dmem_load_uint32(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_TAIL_ADDR);
+/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ isp_dmem_load(ISP0_ID, DEBUG_BUFFER_ISP_DMEM_ADDR + DEBUG_DATA_BUF_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+STORAGE_CLASS_DEBUG_C void debug_synch_queue_ddr(void)
+{
+ uint32_t remote_tail;
+
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_TAIL_DDR_ADDR, &remote_tail, sizeof(uint32_t));
+/* We could move the remote head after the upload, but we would have to limit the upload w.r.t. the local head. This is easier */
+ if (remote_tail > debug_data_ptr->tail) {
+ size_t delta = remote_tail - debug_data_ptr->tail;
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ } else if (remote_tail < debug_data_ptr->tail) {
+ size_t delta = DEBUG_BUF_SIZE - debug_data_ptr->tail;
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR + debug_data_ptr->tail*sizeof(uint32_t), (void *)&(debug_data_ptr->buf[debug_data_ptr->tail]), delta*sizeof(uint32_t));
+ mmgr_load(debug_buffer_ddr_address + DEBUG_DATA_BUF_DDR_ADDR, (void *)&(debug_data_ptr->buf[0]), remote_tail*sizeof(uint32_t));
+ } /* else we are up to date */
+ debug_data_ptr->tail = remote_tail;
+}
+
+#endif /* __DEBUG_PRIVATE_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
new file mode 100644
index 000000000000..87a25d4289ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
@@ -0,0 +1,299 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <stddef.h> /* NULL */
+
+#include "dma.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_DMA__
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+void dma_get_state(const dma_ID_t ID, dma_state_t *state)
+{
+ int i;
+ hrt_data tmp;
+
+ assert(ID < N_DMA_ID);
+ assert(state != NULL);
+
+ tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX);
+ //reg [3:0] : flags error [3], stall, run, idle [0]
+ //reg [9:4] : command
+ //reg[14:10] : channel
+ //reg [23:15] : param
+ state->fsm_command_idle = tmp & 0x1;
+ state->fsm_command_run = tmp & 0x2;
+ state->fsm_command_stalling = tmp & 0x4;
+ state->fsm_command_error = tmp & 0x8;
+ state->last_command_channel = (tmp>>10 & 0x1F);
+ state->last_command_param = (tmp>>15 & 0x0F);
+ tmp = (tmp>>4) & 0x3F;
+/* state->last_command = (dma_commands_t)tmp; */
+/* if the enumerator is made non-linear */
+ /* AM: the list below does not cover all the cases*/
+ /* and these are not correct */
+ /* therefore for just dumpinmg this command*/
+ state->last_command = tmp;
+
+/*
+ if (tmp == 0)
+ state->last_command = DMA_COMMAND_READ;
+ if (tmp == 1)
+ state->last_command = DMA_COMMAND_WRITE;
+ if (tmp == 2)
+ state->last_command = DMA_COMMAND_SET_CHANNEL;
+ if (tmp == 3)
+ state->last_command = DMA_COMMAND_SET_PARAM;
+ if (tmp == 4)
+ state->last_command = DMA_COMMAND_READ_SPECIFIC;
+ if (tmp == 5)
+ state->last_command = DMA_COMMAND_WRITE_SPECIFIC;
+ if (tmp == 8)
+ state->last_command = DMA_COMMAND_INIT;
+ if (tmp == 12)
+ state->last_command = DMA_COMMAND_INIT_SPECIFIC;
+ if (tmp == 15)
+ state->last_command = DMA_COMMAND_RST;
+*/
+
+/* No sub-fields, idx = 0 */
+ state->current_command = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX));
+ state->current_addr_a = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX));
+ state->current_addr_b = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_idle = tmp & 0x1;
+ state->fsm_ctrl_run = tmp & 0x2;
+ state->fsm_ctrl_stalling = tmp & 0x4;
+ state->fsm_ctrl_error = tmp & 0x8;
+ tmp = tmp >> 4;
+/* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */
+ if (tmp == 0)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE;
+ if (tmp == 1)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV;
+ if (tmp == 2)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_RCV;
+ if (tmp == 3)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ;
+ if (tmp == 4)
+ state->fsm_ctrl_state = DMA_CTRL_STATE_INIT;
+ state->fsm_ctrl_source_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_addr = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_stride = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_source_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_dest_addr = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_dest_stride = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_source_elems = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+ state->fsm_ctrl_pack_extension = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX,
+ _DMA_FSM_GROUP_FSM_CTRL_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_idle = tmp & 0x1;
+ state->pack_run = tmp & 0x2;
+ state->pack_stalling = tmp & 0x4;
+ state->pack_error = tmp & 0x8;
+ state->pack_cnt_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_src_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+ state->pack_dest_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX,
+ _DMA_FSM_GROUP_FSM_PACK_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+/* state->read_state = (dma_rw_states_t)tmp; */
+ if (tmp == 0)
+ state->read_state = DMA_RW_STATE_IDLE;
+ if (tmp == 1)
+ state->read_state = DMA_RW_STATE_REQ;
+ if (tmp == 2)
+ state->read_state = DMA_RW_STATE_NEXT_LINE;
+ if (tmp == 3)
+ state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL;
+ state->read_cnt_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+ state->read_cnt_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX,
+ _DMA_FSM_GROUP_FSM_REQ_IDX));
+
+ tmp = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_STATE_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+/* state->write_state = (dma_rw_states_t)tmp; */
+ if (tmp == 0)
+ state->write_state = DMA_RW_STATE_IDLE;
+ if (tmp == 1)
+ state->write_state = DMA_RW_STATE_REQ;
+ if (tmp == 2)
+ state->write_state = DMA_RW_STATE_NEXT_LINE;
+ if (tmp == 3)
+ state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL;
+ state->write_height = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+ state->write_width = dma_reg_load(ID,
+ DMA_CG_INFO_REG_IDX(
+ _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX,
+ _DMA_FSM_GROUP_FSM_WR_IDX));
+
+ for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
+ dma_port_state_t *port = &(state->port_states[i]);
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i));
+ port->req_cs = ((tmp & 0x1) != 0);
+ port->req_we_n = ((tmp & 0x2) != 0);
+ port->req_run = ((tmp & 0x4) != 0);
+ port->req_ack = ((tmp & 0x8) != 0);
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i));
+ port->send_cs = ((tmp & 0x1) != 0);
+ port->send_we_n = ((tmp & 0x2) != 0);
+ port->send_run = ((tmp & 0x4) != 0);
+ port->send_ack = ((tmp & 0x8) != 0);
+
+ tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i));
+ if (tmp & 0x1)
+ port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL;
+ if (tmp & 0x2)
+ port->fifo_state = DMA_FIFO_STATE_FULL;
+ if (tmp & 0x4)
+ port->fifo_state = DMA_FIFO_STATE_EMPTY;
+ port->fifo_counter = tmp >> 3;
+ }
+
+ for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
+ dma_channel_state_t *ch = &(state->channel_states[i]);
+
+ ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_PACKING_SETUP_PARAM)));
+ ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_PACKING_SETUP_PARAM)));
+ ch->height = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_HEIGHT_PARAM));
+ ch->stride_a = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_STRIDE_A_PARAM));
+ ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_A_PARAM)));
+ ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_A_PARAM)));
+ ch->width_a = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_WIDTH_A_PARAM));
+ ch->stride_b = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_STRIDE_B_PARAM));
+ ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_B_PARAM)));
+ ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_ELEM_CROPPING_B_PARAM)));
+ ch->width_b = dma_reg_load(ID,
+ DMA_CHANNEL_PARAM_REG_IDX(i,
+ _DMA_WIDTH_B_PARAM));
+ }
+}
+
+void
+dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn,
+ uint32_t max_burst_size)
+{
+ assert(ID < N_DMA_ID);
+ assert(max_burst_size > 0);
+ dma_reg_store(ID, DMA_DEV_INFO_REG_IDX(_DMA_DEV_INTERF_MAX_BURST_IDX, conn),
+ max_burst_size - 1);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h
new file mode 100644
index 000000000000..ab631e6f64b5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_local.h
@@ -0,0 +1,207 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_LOCAL_H_INCLUDED__
+#define __DMA_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "dma_global.h"
+
+#include <hrt/defs.h> /* HRTCAT() */
+#include <hrt/bits.h> /* _hrt_get_bits() */
+#include <hive_isp_css_defs.h> /* HIVE_DMA_NUM_CHANNELS */
+#include <dma_v2_defs.h>
+
+#define _DMA_FSM_GROUP_CMD_IDX _DMA_V2_FSM_GROUP_CMD_IDX
+#define _DMA_FSM_GROUP_ADDR_A_IDX _DMA_V2_FSM_GROUP_ADDR_SRC_IDX
+#define _DMA_FSM_GROUP_ADDR_B_IDX _DMA_V2_FSM_GROUP_ADDR_DEST_IDX
+
+#define _DMA_FSM_GROUP_CMD_CTRL_IDX _DMA_V2_FSM_GROUP_CMD_CTRL_IDX
+
+#define _DMA_FSM_GROUP_FSM_CTRL_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_REQ_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX _DMA_V2_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX
+
+#define _DMA_FSM_GROUP_FSM_PACK_IDX _DMA_V2_FSM_GROUP_FSM_PACK_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_STATE_IDX _DMA_V2_FSM_GROUP_FSM_PACK_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX _DMA_V2_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX
+
+#define _DMA_FSM_GROUP_FSM_REQ_IDX _DMA_V2_FSM_GROUP_FSM_REQ_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_STATE_IDX _DMA_V2_FSM_GROUP_FSM_REQ_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_REQ_CNT_XB_IDX
+
+#define _DMA_FSM_GROUP_FSM_WR_IDX _DMA_V2_FSM_GROUP_FSM_WR_IDX
+#define _DMA_FSM_GROUP_FSM_WR_STATE_IDX _DMA_V2_FSM_GROUP_FSM_WR_STATE_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_YB_IDX
+#define _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX _DMA_V2_FSM_GROUP_FSM_WR_CNT_XB_IDX
+
+#define _DMA_DEV_INTERF_MAX_BURST_IDX _DMA_V2_DEV_INTERF_MAX_BURST_IDX
+
+/*
+ * Macro's to compute the DMA parameter register indices
+ */
+#define DMA_SEL_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_COMP_BITS)) << _DMA_V2_ADDR_SEL_COMP_IDX)
+#define DMA_SEL_CH(ch) (((ch) & _hrt_ones(_DMA_V2_ADDR_SEL_CH_REG_BITS)) << _DMA_V2_ADDR_SEL_CH_REG_IDX)
+#define DMA_SEL_PARAM(param) (((param) & _hrt_ones(_DMA_V2_ADDR_SEL_PARAM_BITS)) << _DMA_V2_ADDR_SEL_PARAM_IDX)
+/* CG = Connection Group */
+#define DMA_SEL_CG_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_INFO_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_INFO_IDX)
+#define DMA_SEL_CG_COMP(comp) (((comp) & _hrt_ones(_DMA_V2_ADDR_SEL_GROUP_COMP_BITS)) << _DMA_V2_ADDR_SEL_GROUP_COMP_IDX)
+#define DMA_SEL_DEV_INFO(info) (((info) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_INFO_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_INFO_IDX)
+#define DMA_SEL_DEV_ID(dev) (((dev) & _hrt_ones(_DMA_V2_ADDR_SEL_DEV_INTERF_IDX_BITS)) << _DMA_V2_ADDR_SEL_DEV_INTERF_IDX_IDX)
+
+#define DMA_COMMAND_FSM_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_FSM_CMD) >> 2)
+#define DMA_CHANNEL_PARAM_REG_IDX(ch, param) ((DMA_SEL_COMP(_DMA_V2_SEL_CH_REG) | DMA_SEL_CH(ch) | DMA_SEL_PARAM(param)) >> 2)
+#define DMA_CG_INFO_REG_IDX(info_id, comp_id) ((DMA_SEL_COMP(_DMA_V2_SEL_CONN_GROUP) | DMA_SEL_CG_INFO(info_id) | DMA_SEL_CG_COMP(comp_id)) >> 2)
+#define DMA_DEV_INFO_REG_IDX(info_id, dev_id) ((DMA_SEL_COMP(_DMA_V2_SEL_DEV_INTERF) | DMA_SEL_DEV_INFO(info_id) | DMA_SEL_DEV_ID(dev_id)) >> 2)
+#define DMA_RST_REG_IDX (DMA_SEL_COMP(_DMA_V2_SEL_RESET) >> 2)
+
+#define DMA_GET_CONNECTION(val) _hrt_get_bits(val, _DMA_V2_CONNECTION_IDX, _DMA_V2_CONNECTION_BITS)
+#define DMA_GET_EXTENSION(val) _hrt_get_bits(val, _DMA_V2_EXTENSION_IDX, _DMA_V2_EXTENSION_BITS)
+#define DMA_GET_ELEMENTS(val) _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX, _DMA_V2_ELEMENTS_BITS)
+#define DMA_GET_CROPPING(val) _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS)
+
+typedef enum {
+ DMA_CTRL_STATE_IDLE,
+ DMA_CTRL_STATE_REQ_RCV,
+ DMA_CTRL_STATE_RCV,
+ DMA_CTRL_STATE_RCV_REQ,
+ DMA_CTRL_STATE_INIT,
+ N_DMA_CTRL_STATES
+} dma_ctrl_states_t;
+
+typedef enum {
+ DMA_COMMAND_READ,
+ DMA_COMMAND_WRITE,
+ DMA_COMMAND_SET_CHANNEL,
+ DMA_COMMAND_SET_PARAM,
+ DMA_COMMAND_READ_SPECIFIC,
+ DMA_COMMAND_WRITE_SPECIFIC,
+ DMA_COMMAND_INIT,
+ DMA_COMMAND_INIT_SPECIFIC,
+ DMA_COMMAND_RST,
+ N_DMA_COMMANDS
+} dma_commands_t;
+
+typedef enum {
+ DMA_RW_STATE_IDLE,
+ DMA_RW_STATE_REQ,
+ DMA_RW_STATE_NEXT_LINE,
+ DMA_RW_STATE_UNLOCK_CHANNEL,
+ N_DMA_RW_STATES
+} dma_rw_states_t;
+
+typedef enum {
+ DMA_FIFO_STATE_WILL_BE_FULL,
+ DMA_FIFO_STATE_FULL,
+ DMA_FIFO_STATE_EMPTY,
+ N_DMA_FIFO_STATES
+} dma_fifo_states_t;
+
+/* typedef struct dma_state_s dma_state_t; */
+typedef struct dma_channel_state_s dma_channel_state_t;
+typedef struct dma_port_state_s dma_port_state_t;
+
+struct dma_port_state_s {
+ bool req_cs;
+ bool req_we_n;
+ bool req_run;
+ bool req_ack;
+ bool send_cs;
+ bool send_we_n;
+ bool send_run;
+ bool send_ack;
+ dma_fifo_states_t fifo_state;
+ int fifo_counter;
+};
+
+struct dma_channel_state_s {
+ int connection;
+ bool sign_extend;
+ int height;
+ int stride_a;
+ int elems_a;
+ int cropping_a;
+ int width_a;
+ int stride_b;
+ int elems_b;
+ int cropping_b;
+ int width_b;
+};
+
+struct dma_state_s {
+ bool fsm_command_idle;
+ bool fsm_command_run;
+ bool fsm_command_stalling;
+ bool fsm_command_error;
+ dma_commands_t last_command;
+ int last_command_channel;
+ int last_command_param;
+ dma_commands_t current_command;
+ int current_addr_a;
+ int current_addr_b;
+ bool fsm_ctrl_idle;
+ bool fsm_ctrl_run;
+ bool fsm_ctrl_stalling;
+ bool fsm_ctrl_error;
+ dma_ctrl_states_t fsm_ctrl_state;
+ int fsm_ctrl_source_dev;
+ int fsm_ctrl_source_addr;
+ int fsm_ctrl_source_stride;
+ int fsm_ctrl_source_width;
+ int fsm_ctrl_source_height;
+ int fsm_ctrl_pack_source_dev;
+ int fsm_ctrl_pack_dest_dev;
+ int fsm_ctrl_dest_addr;
+ int fsm_ctrl_dest_stride;
+ int fsm_ctrl_pack_source_width;
+ int fsm_ctrl_pack_dest_height;
+ int fsm_ctrl_pack_dest_width;
+ int fsm_ctrl_pack_source_elems;
+ int fsm_ctrl_pack_dest_elems;
+ int fsm_ctrl_pack_extension;
+ int pack_idle;
+ int pack_run;
+ int pack_stalling;
+ int pack_error;
+ int pack_cnt_height;
+ int pack_src_cnt_width;
+ int pack_dest_cnt_width;
+ dma_rw_states_t read_state;
+ int read_cnt_height;
+ int read_cnt_width;
+ dma_rw_states_t write_state;
+ int write_height;
+ int write_width;
+ dma_port_state_t port_states[HIVE_ISP_NUM_DMA_CONNS];
+ dma_channel_state_t channel_states[HIVE_DMA_NUM_CHANNELS];
+};
+
+#endif /* __DMA_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h
new file mode 100644
index 000000000000..ba54b1f0467b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma_private.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_PRIVATE_H_INCLUDED__
+#define __DMA_PRIVATE_H_INCLUDED__
+
+#include "dma_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_DMA_C void dma_reg_store(const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(DMA_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+
+STORAGE_CLASS_DMA_C hrt_data dma_reg_load(const dma_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_DMA_ID);
+ assert(DMA_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(DMA_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __DMA_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c
new file mode 100644
index 000000000000..777670948d6f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo.c
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "event_fifo.h"
+
+#ifndef __INLINE_EVENT__
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h
new file mode 100644
index 000000000000..c595692c6ea9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_local.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _EVENT_FIFO_LOCAL_H
+#define _EVENT_FIFO_LOCAL_H
+
+/*
+ * All events come from connections mapped on the system
+ * bus but do not use a global IRQ
+ */
+#include "event_fifo_global.h"
+
+typedef enum {
+ SP0_EVENT_ID,
+ ISP0_EVENT_ID,
+ STR2MIPI_EVENT_ID,
+ N_EVENT_ID
+} event_ID_t;
+
+#define EVENT_QUERY_BIT 0
+
+/* Events are read from FIFO */
+static const hrt_address event_source_addr[N_EVENT_ID] = {
+ 0x0000000000380000ULL,
+ 0x0000000000380004ULL,
+ 0xffffffffffffffffULL};
+
+/* Read from FIFO are blocking, query data availability */
+static const hrt_address event_source_query_addr[N_EVENT_ID] = {
+ 0x0000000000380010ULL,
+ 0x0000000000380014ULL,
+ 0xffffffffffffffffULL};
+
+/* Events are written to FIFO */
+static const hrt_address event_sink_addr[N_EVENT_ID] = {
+ 0x0000000000380008ULL,
+ 0x000000000038000CULL,
+ 0x0000000000090104ULL};
+
+/* Writes to FIFO are blocking, query data space */
+static const hrt_address event_sink_query_addr[N_EVENT_ID] = {
+ 0x0000000000380018ULL,
+ 0x000000000038001CULL,
+ 0x000000000009010CULL};
+
+#endif /* _EVENT_FIFO_LOCAL_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
new file mode 100644
index 000000000000..9d3a29696094
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
@@ -0,0 +1,75 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_PRIVATE_H
+#define __EVENT_FIFO_PRIVATE_H
+
+#include "event_fifo_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#include <hrt/bits.h> /* _hrt_get_bits() */
+
+STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address)-1));
+ (void)ia_css_device_load_uint32(event_source_addr[ID]);
+return;
+}
+
+STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ event_wait_for(ID);
+ }
+}
+
+STORAGE_CLASS_EVENT_C hrt_data event_receive_token(const event_ID_t ID)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_source_addr[ID] != ((hrt_address)-1));
+ return ia_css_device_load_uint32(event_source_addr[ID]);
+}
+
+STORAGE_CLASS_EVENT_C void event_send_token(const event_ID_t ID,
+ const hrt_data token)
+{
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_addr[ID] != ((hrt_address)-1));
+ ia_css_device_store_uint32(event_sink_addr[ID], token);
+}
+
+STORAGE_CLASS_EVENT_C bool is_event_pending(const event_ID_t ID)
+{
+ hrt_data value;
+ assert(ID < N_EVENT_ID);
+ assert(event_source_query_addr[ID] != ((hrt_address)-1));
+ value = ia_css_device_load_uint32(event_source_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+STORAGE_CLASS_EVENT_C bool can_event_send_token(const event_ID_t ID)
+{
+ hrt_data value;
+ assert(ID < N_EVENT_ID);
+ assert(event_sink_query_addr[ID] != ((hrt_address)-1));
+ value = ia_css_device_load_uint32(event_sink_query_addr[ID]);
+ return !_hrt_get_bit(value, EVENT_QUERY_BIT);
+}
+
+#endif /* __EVENT_FIFO_PRIVATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
new file mode 100644
index 000000000000..1087944d637f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
@@ -0,0 +1,567 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "fifo_monitor.h"
+
+#include <type_support.h>
+#include "device_access.h"
+
+#include <hrt/bits.h>
+
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_DATA static const
+#else
+#define STORAGE_CLASS_FIFO_MONITOR_DATA const
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
+ _REG_GP_SWITCH_IF_ADDR,
+ _REG_GP_SWITCH_GDC1_ADDR,
+ _REG_GP_SWITCH_GDC2_ADDR};
+
+#ifndef __INLINE_FIFO_MONITOR__
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id);
+
+
+void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state)
+{
+ assert(channel_id < N_FIFO_CHANNEL);
+ assert(state != NULL);
+
+ switch (channel_id) {
+ case FIFO_CHANNEL_ISP0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_ISP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP); /* ISP_STR_MON_PORT_SP2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP); /* ISP_STR_MON_PORT_ISP2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_SP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A); /* ISP_STR_MON_PORT_PIFA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B); /* ISP_STR_MON_PORT_ISP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* ISP_STR_MON_PORT_PIFB2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA); /* ISP_STR_MON_PORT_ISP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP); /* MOD_STR_MON_PORT_ISP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_ISP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_ISP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP); /* MOD_STR_MON_PORT_DMA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2ISP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA); /* ISP_STR_MON_PORT_DMA2ISP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC); /* ISP_STR_MON_PORT_ISP2GDC1 */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GDC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC); /* MOD_STR_MON_PORT_CELLS2GDC1 */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC); /* MOD_STR_MON_PORT_GDC12CELLS */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_GDC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC); /* ISP_STR_MON_PORT_GDC12ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GDC);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_ISP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_ISP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_GDC22ISP);
+ break;
+ case FIFO_CHANNEL_ISP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD); /* ISP_STR_MON_PORT_ISP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380014ULL);
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_ISP0:
+ {
+ hrt_data value = ia_css_device_load_uint32(0x000000000038001CULL);
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD); /* ISP_STR_MON_PORT_FA2ISP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_ISP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A); /* SP_STR_MON_PORT_SP2PIFA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_IF0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_A);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A); /* SP_STR_MON_PORT_PIFA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_PIF_A);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B); /* SP_STR_MON_PORT_SP2PIFB */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_IF1_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_PIF_B);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B); /* SP_STR_MON_PORT_PIFB2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ ISP_STR_MON_PORT_RCV_PIF_B);
+ break;
+ case FIFO_CHANNEL_SP0_TO_IF2:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF); /* SP_STR_MON_PORT_SP2SIF */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF); /* MOD_STR_MON_PORT_SP2SIF */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_IF2_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF); /* MOD_STR_MON_PORT_SIF2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_SIF);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF); /* SP_STR_MON_PORT_SIF2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_SIF);
+ break;
+ case FIFO_CHANNEL_SP0_TO_DMA0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA); /* SP_STR_MON_PORT_SP2DMA */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_DMA);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP); /* MOD_STR_MON_PORT_SP2DMA */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_DMA_FR_SP);
+ break;
+ case FIFO_CHANNEL_DMA0_TO_SP0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP); /* MOD_STR_MON_PORT_DMA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_DMA2SP);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA); /* SP_STR_MON_PORT_DMA2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_DMA);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC1);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC1);
+ break;
+ case FIFO_CHANNEL_GDC0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC12CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC12SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_GDC1:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_SP2GDC2);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_CELLS2GDC2);
+ break;
+ case FIFO_CHANNEL_GDC1_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_GDC22CELLS);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_B_IDX,
+ SP_STR_MON_PORT_B_GDC22SP);
+ break;
+ case FIFO_CHANNEL_SP0_TO_HOST0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD); /* SP_STR_MON_PORT_SP2GPD */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_GPD);
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380010ULL);
+ state->fifo_valid = !_hrt_get_bit(value, 0);
+ state->sink_accept = false; /* no monitor connected */
+ }
+ break;
+ case FIFO_CHANNEL_HOST0_TO_SP0:
+ {
+ hrt_data value = ia_css_device_load_uint32(0x0000000000380018ULL);
+ state->fifo_valid = false; /* no monitor connected */
+ state->sink_accept = !_hrt_get_bit(value, 0);
+ }
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD); /* SP_STR_MON_PORT_FA2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_GPD);
+ break;
+ case FIFO_CHANNEL_SP0_TO_STREAM2MEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_SP2MC */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SND_MC);
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_SP2MC */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_STREAM2MEM0_TO_SP0:
+ state->fifo_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_MC2SP */
+ state->sink_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_MOD_STREAM_STAT_IDX,
+ MOD_STR_MON_PORT_SND_MC);
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_MC2SP */
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_RCV_MC);
+ break;
+ case FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0:
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_SP2ISYS);
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ break;
+ case FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0:
+ state->fifo_valid = false;
+ state->sink_accept = false;
+ state->src_valid = fifo_monitor_status_valid(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ state->fifo_accept = fifo_monitor_status_accept(ID,
+ HIVE_GP_REGS_SP_STREAM_STAT_IDX,
+ SP_STR_MON_PORT_ISYS2SP);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ return;
+}
+
+void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state)
+{
+ hrt_data data = (hrt_data)-1;
+
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(switch_id < N_FIFO_SWITCH);
+ assert(state != NULL);
+
+ (void)ID;
+
+ data = gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+
+ state->is_none = (data == HIVE_ISP_CSS_STREAM_SWITCH_NONE);
+ state->is_sp = (data == HIVE_ISP_CSS_STREAM_SWITCH_SP);
+ state->is_isp = (data == HIVE_ISP_CSS_STREAM_SWITCH_ISP);
+
+ return;
+}
+
+void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state)
+{
+ fifo_channel_t ch_id;
+ fifo_switch_t sw_id;
+
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(state != NULL);
+
+ for (ch_id = 0; ch_id < N_FIFO_CHANNEL; ch_id++) {
+ fifo_channel_get_state(ID, ch_id,
+ &(state->fifo_channels[ch_id]));
+ }
+
+ for (sw_id = 0; sw_id < N_FIFO_SWITCH; sw_id++) {
+ fifo_switch_get_state(ID, sw_id,
+ &(state->fifo_switches[sw_id]));
+ }
+ return;
+}
+
+STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
+}
+
+STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const unsigned int port_id)
+{
+ hrt_data data = fifo_monitor_reg_load(ID, reg);
+
+ return (data >> (((port_id * 2) + _hive_str_mon_accept_offset))) & 0x1;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h
new file mode 100644
index 000000000000..ed2f86181788
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_local.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_LOCAL_H_INCLUDED__
+#define __FIFO_MONITOR_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "fifo_monitor_global.h"
+
+#include "hive_isp_css_defs.h" /* ISP_STR_MON_PORT_SND_SP, ... */
+
+#define _hive_str_mon_valid_offset 0
+#define _hive_str_mon_accept_offset 1
+
+#define FIFO_CHANNEL_SP_VALID_MASK 0x55555555
+#define FIFO_CHANNEL_SP_VALID_B_MASK 0x00000055
+#define FIFO_CHANNEL_ISP_VALID_MASK 0x15555555
+#define FIFO_CHANNEL_MOD_VALID_MASK 0x55555555
+
+typedef enum fifo_switch {
+ FIFO_SWITCH_IF,
+ FIFO_SWITCH_GDC0,
+ FIFO_SWITCH_GDC1,
+ N_FIFO_SWITCH
+} fifo_switch_t;
+
+typedef enum fifo_channel {
+ FIFO_CHANNEL_ISP0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_ISP0,
+ FIFO_CHANNEL_ISP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_ISP0,
+ FIFO_CHANNEL_SP0_TO_IF0,
+ FIFO_CHANNEL_IF0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF1,
+ FIFO_CHANNEL_IF1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_IF2,
+ FIFO_CHANNEL_IF2_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_DMA0,
+ FIFO_CHANNEL_DMA0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC0,
+ FIFO_CHANNEL_GDC0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_GDC1,
+ FIFO_CHANNEL_GDC1_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_HOST0,
+ FIFO_CHANNEL_HOST0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0,
+ FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0,
+ FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0,
+/*
+ * No clue what this is
+ *
+ FIFO_CHANNEL_SP0_TO_IRQ0,
+ FIFO_CHANNEL_IRQ0_TO_SP0,
+ */
+ N_FIFO_CHANNEL
+} fifo_channel_t;
+
+struct fifo_channel_state_s {
+ bool src_valid;
+ bool fifo_accept;
+ bool fifo_valid;
+ bool sink_accept;
+};
+
+/* The switch is tri-state */
+struct fifo_switch_state_s {
+ bool is_none;
+ bool is_isp;
+ bool is_sp;
+};
+
+struct fifo_monitor_state_s {
+ struct fifo_channel_state_s fifo_channels[N_FIFO_CHANNEL];
+ struct fifo_switch_state_s fifo_switches[N_FIFO_SWITCH];
+};
+
+#endif /* __FIFO_MONITOR_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
new file mode 100644
index 000000000000..618b2f7e9c75
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+#define __FIFO_MONITOR_PRIVATE_H_INCLUDED__
+
+#include "fifo_monitor_public.h"
+
+#define __INLINE_GP_DEVICE__
+#include "gp_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+#ifdef __INLINE_FIFO_MONITOR__
+extern const unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH];
+#endif
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel)
+{
+assert(ID == FIFO_MONITOR0_ID);
+assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+ gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
+
+return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id)
+{
+assert(ID == FIFO_MONITOR0_ID);
+assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+assert(switch_id < N_FIFO_SWITCH);
+ (void)ID;
+
+return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+}
+
+
+STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+assert(ID < N_FIFO_MONITOR_ID);
+assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg)
+{
+assert(ID < N_FIFO_MONITOR_ID);
+assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
new file mode 100644
index 000000000000..69fa616889b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
@@ -0,0 +1,127 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* The name "gdc.h is already taken" */
+#include "gdc_device.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+/*
+ * Local function declarations
+ */
+STORAGE_CLASS_INLINE void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+ const gdc_ID_t ID,
+ const unsigned int reg);
+
+
+#ifndef __INLINE_GDC__
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+/*
+ * Exported function implementations
+ */
+void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N])
+{
+ unsigned int i, lut_offset = HRT_GDC_LUT_IDX;
+
+ assert(ID < N_GDC_ID);
+ assert(HRT_GDC_LUT_COEFF_OFFSET <= (4*sizeof(hrt_data)));
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ hrt_data entry_0 = data[0][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_1 = data[1][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_2 = data[2][i] & HRT_GDC_BCI_COEF_MASK;
+ hrt_data entry_3 = data[3][i] & HRT_GDC_BCI_COEF_MASK;
+
+ hrt_data word_0 = entry_0 |
+ (entry_1 << HRT_GDC_LUT_COEFF_OFFSET);
+ hrt_data word_1 = entry_2 |
+ (entry_3 << HRT_GDC_LUT_COEFF_OFFSET);
+
+ gdc_reg_store(ID, lut_offset++, word_0);
+ gdc_reg_store(ID, lut_offset++, word_1);
+ }
+return;
+}
+
+/*
+ * Input LUT format:
+ * c0[0-1023], c1[0-1023], c2[0-1023] c3[0-1023]
+ *
+ * Output LUT format (interleaved):
+ * c0[0], c1[0], c2[0], c3[0], c0[1], c1[1], c2[1], c3[1], ....
+ * c0[1023], c1[1023], c2[1023], c3[1023]
+ *
+ * The first format needs c0[0], c1[0] (which are 1024 words apart)
+ * to program gdc LUT registers. This makes it difficult to do piecemeal
+ * reads in SP side gdc_lut_store
+ *
+ * Interleaved format allows use of contiguous bytes to store into
+ * gdc LUT registers.
+ *
+ * See gdc_lut_store() definition in host/gdc.c vs sp/gdc_private.h
+ *
+ */
+void gdc_lut_convert_to_isp_format(const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N])
+{
+ unsigned int i;
+ int *out = (int *)out_lut;
+
+ for (i = 0; i < HRT_GDC_N; i++) {
+ out[0] = in_lut[0][i];
+ out[1] = in_lut[1][i];
+ out[2] = in_lut[2][i];
+ out[3] = in_lut[3][i];
+ out += 4;
+ }
+}
+
+int gdc_get_unity(
+ const gdc_ID_t ID)
+{
+ assert(ID < N_GDC_ID);
+ (void)ID;
+return (int)(1UL << HRT_GDC_FRAC_BITS);
+}
+
+
+/*
+ * Local function implementations
+ */
+STORAGE_CLASS_INLINE void gdc_reg_store(
+ const gdc_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ ia_css_device_store_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+ const gdc_ID_t ID,
+ const unsigned int reg)
+{
+return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h
new file mode 100644
index 000000000000..0c6de867e012
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_LOCAL_H_INCLUDED__
+#define __GDC_LOCAL_H_INCLUDED__
+
+#include "gdc_global.h"
+
+#endif /* __GDC_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h
new file mode 100644
index 000000000000..f7dec75adf78
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc_private.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_PRIVATE_H_INCLUDED__
+#define __GDC_PRIVATE_H_INCLUDED__
+
+#include "gdc_public.h"
+
+#endif /* __GDC_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
new file mode 100644
index 000000000000..9a34ac052adf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
@@ -0,0 +1,108 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "gp_device.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state)
+{
+ assert(ID < N_GP_DEVICE_ID);
+ assert(state != NULL);
+
+ state->syncgen_enable = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_ENABLE_ADDR);
+ state->syncgen_free_running = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FREE_RUNNING_ADDR);
+ state->syncgen_pause = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_PAUSE_ADDR);
+ state->nr_frames = gp_device_reg_load(ID,
+ _REG_GP_NR_FRAMES_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_pix = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_PIX_ADDR);
+ state->syngen_nr_lines = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_NR_LINES_ADDR);
+ state->syngen_hblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR);
+ state->syngen_vblank_cycles = gp_device_reg_load(ID,
+ _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR);
+ state->isel_sof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOF_ADDR);
+ state->isel_eof = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOF_ADDR);
+ state->isel_sol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SOL_ADDR);
+ state->isel_eol = gp_device_reg_load(ID,
+ _REG_GP_ISEL_EOL_ADDR);
+ state->isel_lfsr_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_ADDR);
+ state->isel_lfsr_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_ENABLE_B_ADDR);
+ state->isel_lfsr_reset_value = gp_device_reg_load(ID,
+ _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR);
+ state->isel_tpg_enable = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_ADDR);
+ state->isel_tpg_enable_b = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_ENABLE_B_ADDR);
+ state->isel_hor_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_MASK_ADDR);
+ state->isel_ver_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_MASK_ADDR);
+ state->isel_xy_cnt_mask = gp_device_reg_load(ID,
+ _REG_GP_ISEL_XY_CNT_MASK_ADDR);
+ state->isel_hor_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_HOR_CNT_DELTA_ADDR);
+ state->isel_ver_cnt_delta = gp_device_reg_load(ID,
+ _REG_GP_ISEL_VER_CNT_DELTA_ADDR);
+ state->isel_tpg_mode = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_MODE_ADDR);
+ state->isel_tpg_red1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED1_ADDR);
+ state->isel_tpg_green1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN1_ADDR);
+ state->isel_tpg_blue1 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE1_ADDR);
+ state->isel_tpg_red2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_RED2_ADDR);
+ state->isel_tpg_green2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_GREEN2_ADDR);
+ state->isel_tpg_blue2 = gp_device_reg_load(ID,
+ _REG_GP_ISEL_TPG_BLUE2_ADDR);
+ state->isel_ch_id = gp_device_reg_load(ID,
+ _REG_GP_ISEL_CH_ID_ADDR);
+ state->isel_fmt_type = gp_device_reg_load(ID,
+ _REG_GP_ISEL_FMT_TYPE_ADDR);
+ state->isel_data_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_DATA_SEL_ADDR);
+ state->isel_sband_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SBAND_SEL_ADDR);
+ state->isel_sync_sel = gp_device_reg_load(ID,
+ _REG_GP_ISEL_SYNC_SEL_ADDR);
+ state->syncgen_hor_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_HOR_CNT_ADDR);
+ state->syncgen_ver_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_VER_CNT_ADDR);
+ state->syncgen_frame_cnt = gp_device_reg_load(ID,
+ _REG_GP_SYNCGEN_FRAME_CNT_ADDR);
+ state->soft_reset = gp_device_reg_load(ID,
+ _REG_GP_SOFT_RESET_ADDR);
+return;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h
new file mode 100644
index 000000000000..113d5ed32d42
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_local.h
@@ -0,0 +1,143 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_LOCAL_H_INCLUDED__
+#define __GP_DEVICE_LOCAL_H_INCLUDED__
+
+#include "gp_device_global.h"
+
+/* @ GP_REGS_BASE -> GP_DEVICE_BASE */
+#define _REG_GP_SDRAM_WAKEUP_ADDR 0x00
+#define _REG_GP_IDLE_ADDR 0x04
+/* #define _REG_GP_IRQ_REQ0_ADDR 0x08 */
+/* #define _REG_GP_IRQ_REQ1_ADDR 0x0C */
+#define _REG_GP_SP_STREAM_STAT_ADDR 0x10
+#define _REG_GP_SP_STREAM_STAT_B_ADDR 0x14
+#define _REG_GP_ISP_STREAM_STAT_ADDR 0x18
+#define _REG_GP_MOD_STREAM_STAT_ADDR 0x1C
+#define _REG_GP_SP_STREAM_STAT_IRQ_COND_ADDR 0x20
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_COND_ADDR 0x24
+#define _REG_GP_ISP_STREAM_STAT_IRQ_COND_ADDR 0x28
+#define _REG_GP_MOD_STREAM_STAT_IRQ_COND_ADDR 0x2C
+#define _REG_GP_SP_STREAM_STAT_IRQ_ENABLE_ADDR 0x30
+#define _REG_GP_SP_STREAM_STAT_B_IRQ_ENABLE_ADDR 0x34
+#define _REG_GP_ISP_STREAM_STAT_IRQ_ENABLE_ADDR 0x38
+#define _REG_GP_MOD_STREAM_STAT_IRQ_ENABLE_ADDR 0x3C
+/*
+#define _REG_GP_SWITCH_IF_ADDR 0x40
+#define _REG_GP_SWITCH_GDC1_ADDR 0x44
+#define _REG_GP_SWITCH_GDC2_ADDR 0x48
+*/
+#define _REG_GP_SLV_REG_RST_ADDR 0x50
+#define _REG_GP_SWITCH_ISYS2401_ADDR 0x54
+
+/* @ INPUT_FORMATTER_BASE -> GP_DEVICE_BASE */
+/*
+#define _REG_GP_IFMT_input_switch_lut_reg0 0x00030800
+#define _REG_GP_IFMT_input_switch_lut_reg1 0x00030804
+#define _REG_GP_IFMT_input_switch_lut_reg2 0x00030808
+#define _REG_GP_IFMT_input_switch_lut_reg3 0x0003080C
+#define _REG_GP_IFMT_input_switch_lut_reg4 0x00030810
+#define _REG_GP_IFMT_input_switch_lut_reg5 0x00030814
+#define _REG_GP_IFMT_input_switch_lut_reg6 0x00030818
+#define _REG_GP_IFMT_input_switch_lut_reg7 0x0003081C
+#define _REG_GP_IFMT_input_switch_fsync_lut 0x00030820
+#define _REG_GP_IFMT_srst 0x00030824
+#define _REG_GP_IFMT_slv_reg_srst 0x00030828
+#define _REG_GP_IFMT_input_switch_ch_id_fmt_type 0x0003082C
+*/
+/* @ GP_DEVICE_BASE */
+/*
+#define _REG_GP_SYNCGEN_ENABLE_ADDR 0x00090000
+#define _REG_GP_SYNCGEN_FREE_RUNNING_ADDR 0x00090004
+#define _REG_GP_SYNCGEN_PAUSE_ADDR 0x00090008
+#define _REG_GP_NR_FRAMES_ADDR 0x0009000C
+#define _REG_GP_SYNGEN_NR_PIX_ADDR 0x00090010
+#define _REG_GP_SYNGEN_NR_LINES_ADDR 0x00090014
+#define _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR 0x00090018
+#define _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR 0x0009001C
+#define _REG_GP_ISEL_SOF_ADDR 0x00090020
+#define _REG_GP_ISEL_EOF_ADDR 0x00090024
+#define _REG_GP_ISEL_SOL_ADDR 0x00090028
+#define _REG_GP_ISEL_EOL_ADDR 0x0009002C
+#define _REG_GP_ISEL_LFSR_ENABLE_ADDR 0x00090030
+#define _REG_GP_ISEL_LFSR_ENABLE_B_ADDR 0x00090034
+#define _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR 0x00090038
+#define _REG_GP_ISEL_TPG_ENABLE_ADDR 0x0009003C
+#define _REG_GP_ISEL_TPG_ENABLE_B_ADDR 0x00090040
+#define _REG_GP_ISEL_HOR_CNT_MASK_ADDR 0x00090044
+#define _REG_GP_ISEL_VER_CNT_MASK_ADDR 0x00090048
+#define _REG_GP_ISEL_XY_CNT_MASK_ADDR 0x0009004C
+#define _REG_GP_ISEL_HOR_CNT_DELTA_ADDR 0x00090050
+#define _REG_GP_ISEL_VER_CNT_DELTA_ADDR 0x00090054
+#define _REG_GP_ISEL_TPG_MODE_ADDR 0x00090058
+#define _REG_GP_ISEL_TPG_RED1_ADDR 0x0009005C
+#define _REG_GP_ISEL_TPG_GREEN1_ADDR 0x00090060
+#define _REG_GP_ISEL_TPG_BLUE1_ADDR 0x00090064
+#define _REG_GP_ISEL_TPG_RED2_ADDR 0x00090068
+#define _REG_GP_ISEL_TPG_GREEN2_ADDR 0x0009006C
+#define _REG_GP_ISEL_TPG_BLUE2_ADDR 0x00090070
+#define _REG_GP_ISEL_CH_ID_ADDR 0x00090074
+#define _REG_GP_ISEL_FMT_TYPE_ADDR 0x00090078
+#define _REG_GP_ISEL_DATA_SEL_ADDR 0x0009007C
+#define _REG_GP_ISEL_SBAND_SEL_ADDR 0x00090080
+#define _REG_GP_ISEL_SYNC_SEL_ADDR 0x00090084
+#define _REG_GP_SYNCGEN_HOR_CNT_ADDR 0x00090088
+#define _REG_GP_SYNCGEN_VER_CNT_ADDR 0x0009008C
+#define _REG_GP_SYNCGEN_FRAME_CNT_ADDR 0x00090090
+#define _REG_GP_SOFT_RESET_ADDR 0x00090094
+*/
+
+struct gp_device_state_s {
+ int syncgen_enable;
+ int syncgen_free_running;
+ int syncgen_pause;
+ int nr_frames;
+ int syngen_nr_pix;
+ int syngen_nr_lines;
+ int syngen_hblank_cycles;
+ int syngen_vblank_cycles;
+ int isel_sof;
+ int isel_eof;
+ int isel_sol;
+ int isel_eol;
+ int isel_lfsr_enable;
+ int isel_lfsr_enable_b;
+ int isel_lfsr_reset_value;
+ int isel_tpg_enable;
+ int isel_tpg_enable_b;
+ int isel_hor_cnt_mask;
+ int isel_ver_cnt_mask;
+ int isel_xy_cnt_mask;
+ int isel_hor_cnt_delta;
+ int isel_ver_cnt_delta;
+ int isel_tpg_mode;
+ int isel_tpg_red1;
+ int isel_tpg_green1;
+ int isel_tpg_blue1;
+ int isel_tpg_red2;
+ int isel_tpg_green2;
+ int isel_tpg_blue2;
+ int isel_ch_id;
+ int isel_fmt_type;
+ int isel_data_sel;
+ int isel_sband_sel;
+ int isel_sync_sel;
+ int syncgen_hor_cnt;
+ int syncgen_ver_cnt;
+ int syncgen_frame_cnt;
+ int soft_reset;
+};
+
+#endif /* __GP_DEVICE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
new file mode 100644
index 000000000000..bce1fdf79114
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_PRIVATE_H_INCLUDED__
+#define __GP_DEVICE_PRIVATE_H_INCLUDED__
+
+#include "gp_device_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value)
+{
+assert(ID < N_GP_DEVICE_ID);
+assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
+return;
+}
+
+STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr)
+{
+assert(ID < N_GP_DEVICE_ID);
+assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+assert((reg_addr % sizeof(hrt_data)) == 0);
+return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
new file mode 100644
index 000000000000..5a4eabf79ee2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer.c
@@ -0,0 +1,70 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h> /*uint32_t */
+#include "gp_timer.h" /*system_local.h,
+ gp_timer_public.h*/
+
+#ifndef __INLINE_GP_TIMER__
+#include "gp_timer_private.h" /*device_access.h*/
+#endif /* __INLINE_GP_TIMER__ */
+#include "system_local.h"
+
+/** FIXME: not sure if reg_load(), reg_store() should be API.
+ */
+static uint32_t
+gp_timer_reg_load(uint32_t reg);
+
+static void
+gp_timer_reg_store(uint32_t reg, uint32_t value);
+
+uint32_t
+gp_timer_reg_load(uint32_t reg)
+{
+ return ia_css_device_load_uint32(
+ GP_TIMER_BASE +
+ (reg * sizeof(uint32_t)));
+}
+
+static void
+gp_timer_reg_store(uint32_t reg, uint32_t value)
+{
+ ia_css_device_store_uint32((GP_TIMER_BASE +
+ (reg * sizeof(uint32_t))),
+ value);
+}
+
+void gp_timer_init(gp_timer_ID_t ID)
+{
+ /* set_overall_enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1);
+
+ /*set enable*/
+ gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1);
+
+ /* set signal select */
+ gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT);
+
+ /*set count type */
+ gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW);
+
+ /*reset gp timer */
+ gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF);
+}
+
+uint32_t
+gp_timer_read(gp_timer_ID_t ID)
+{
+ return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID));
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h
new file mode 100644
index 000000000000..19ce35d87291
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_local.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_LOCAL_H_INCLUDED__
+#define __GP_TIMER_LOCAL_H_INCLUDED__
+
+#include "gp_timer_global.h" /*GP_TIMER_SEL
+ GP_TIMER_SIGNAL_SELECT*/
+
+#include "gp_timer_defs.h" /*HIVE_GP_TIMER_xxx registers*/
+#include "hive_isp_css_defs.h" /*HIVE_GP_TIMER_NUM_COUNTERS
+ HIVE_GP_TIMER_NUM_IRQS*/
+
+#define _REG_GP_TIMER_RESET_REG HIVE_GP_TIMER_RESET_REG_IDX
+#define _REG_GP_TIMER_OVERALL_ENABLE HIVE_GP_TIMER_OVERALL_ENABLE_REG_IDX
+
+/*Register offsets for timers [1,7] can be obtained
+ * by adding (GP_TIMERx_ID * sizeof(uint32_t))*/
+#define _REG_GP_TIMER_ENABLE_ID(timer_id) HIVE_GP_TIMER_ENABLE_REG_IDX(timer_id)
+#define _REG_GP_TIMER_VALUE_ID(timer_id) HIVE_GP_TIMER_VALUE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_COUNT_TYPE_ID(timer_id) HIVE_GP_TIMER_COUNT_TYPE_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+#define _REG_GP_TIMER_SIGNAL_SELECT_ID(timer_id) HIVE_GP_TIMER_SIGNAL_SELECT_REG_IDX(timer_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+
+#define _REG_GP_TIMER_IRQ_TRIGGER_VALUE_ID(irq_id) HIVE_GP_TIMER_IRQ_TRIGGER_VALUE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS)
+
+#define _REG_GP_TIMER_IRQ_TIMER_SELECT_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_TIMER_SELECT_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+#define _REG_GP_TIMER_IRQ_ENABLE_ID(irq_id) \
+ HIVE_GP_TIMER_IRQ_ENABLE_REG_IDX(irq_id, HIVE_GP_TIMER_NUM_COUNTERS, HIVE_GP_TIMER_NUM_IRQS)
+
+
+#endif /*__GP_TIMER_LOCAL_H_INCLUDED__*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h
new file mode 100644
index 000000000000..705be5e5cc70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_timer_private.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_PRIVATE_H_INCLUDED__
+#define __GP_TIMER_PRIVATE_H_INCLUDED__
+
+#include "gp_timer_public.h"
+#include "device_access.h"
+#include "assert_support.h"
+
+#endif /* __GP_TIMER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h
new file mode 100644
index 000000000000..f4652b79734d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_LOCAL_H_INCLUDED__
+#define __GPIO_LOCAL_H_INCLUDED__
+
+#include "gpio_global.h"
+
+#endif /* __GPIO_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
new file mode 100644
index 000000000000..6ace2184b522
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_PRIVATE_H_INCLUDED__
+#define __GPIO_PRIVATE_H_INCLUDED__
+
+#include "gpio_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_GPIO_C void gpio_reg_store(
+ const gpio_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+OP___assert(ID < N_GPIO_ID);
+OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
+ const gpio_ID_t ID,
+ const unsigned int reg)
+{
+OP___assert(ID < N_GPIO_ID);
+OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_ddr_hrt_modified.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_ddr_hrt_modified.h
new file mode 100644
index 000000000000..39785aa21459
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_ddr_hrt_modified.h
@@ -0,0 +1,148 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_ddr_hrt_modified_h_
+#define _hive_isp_css_ddr_hrt_modified_h_
+
+#include <hmm_64/hmm.h>
+
+/* This function reads an image from DDR and stores it in the img_buf array
+ that has been allocated by the caller.
+ The specifics of how the pixels are stored into DDR by the DMA are taken
+ into account (bits padded to a width of 256, depending on the number of
+ elements per ddr word).
+ The DMA specific parameters give to this function (elems_per_xword and sign_extend)
+ should correspond to those given to the DMA engine.
+ The address is a virtual address which will be translated to a physical address before
+ data is loaded from or stored to that address.
+
+ The return value is 0 in case of success and 1 in case of failure.
+ */
+unsigned int
+hrt_isp_css_read_image_from_ddr(
+ unsigned short *img_buf,
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword,
+ unsigned int sign_extend,
+ hmm_ptr virt_addr);
+
+/* This function writes an image to DDR, keeping the same aspects into account as the read_image function
+ above. */
+unsigned int
+hrt_isp_css_write_image_to_ddr(
+ const unsigned short *img_buf,
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword,
+ unsigned int sign_extend,
+ hmm_ptr virt_addr);
+
+/* return the size in bytes of an image (frame or plane). */
+unsigned int
+hrt_isp_css_sizeof_image_in_ddr(
+ unsigned int width,
+ unsigned int height,
+ unsigned int bits_per_element);
+
+unsigned int
+hrt_isp_css_stride_of_image_in_ddr(
+ unsigned int width,
+ unsigned int bits_per_element);
+
+hmm_ptr
+hrt_isp_css_alloc_image_in_ddr(
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword);
+
+hmm_ptr
+hrt_isp_css_calloc_image_in_ddr(
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword);
+
+#ifndef HIVE_ISP_NO_GDC
+#include "gdc_v2_defs.h"
+
+hmm_ptr
+hrt_isp_css_alloc_gdc_lut_in_ddr(void);
+
+void
+hrt_isp_css_write_gdc_lut_to_ddr(
+ short values[4][HRT_GDC_N],
+ hmm_ptr virt_addr);
+#endif
+
+#ifdef _HIVE_ISP_CSS_FPGA_SYSTEM
+hmm_ptr
+hrt_isp_css_alloc_image_for_display(
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword);
+
+hmm_ptr
+hrt_isp_css_calloc_image_for_display(
+ unsigned int width,
+ unsigned int height,
+ unsigned int elems_per_xword);
+#endif
+
+/* New set of functions, these do not require the elems_per_xword, but use bits_per_element instead,
+ this way the user does not need to know about the width of a DDR word. */
+unsigned int
+hrt_isp_css_read_unsigned(
+ unsigned short *target,
+ unsigned int width,
+ unsigned int height,
+ unsigned int source_bits_per_element,
+ hmm_ptr source);
+
+unsigned int
+hrt_isp_css_read_signed(
+ short *target,
+ unsigned int width,
+ unsigned int height,
+ unsigned int source_bits_per_element,
+ hmm_ptr source);
+
+unsigned int
+hrt_isp_css_write_unsigned(
+ const unsigned short *source,
+ unsigned int width,
+ unsigned int height,
+ unsigned int target_bits_per_element,
+ hmm_ptr target);
+
+unsigned int
+hrt_isp_css_write_signed(
+ const short *source,
+ unsigned int width,
+ unsigned int height,
+ unsigned int target_bits_per_element,
+ hmm_ptr target);
+
+hmm_ptr
+hrt_isp_css_alloc(
+ unsigned int width,
+ unsigned int height,
+ unsigned int bits_per_element);
+
+hmm_ptr
+hrt_isp_css_calloc(
+ unsigned int width,
+ unsigned int height,
+ unsigned int bits_per_element);
+
+#endif /* _hive_isp_css_ddr_hrt_modified_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_hrt_modified.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_hrt_modified.h
new file mode 100644
index 000000000000..342553d10e08
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hive_isp_css_hrt_modified.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _hive_isp_css_hrt_h
+#define _hive_isp_css_hrt_h
+
+#include "system_types.h"
+
+#include "hive_isp_css_host_ids_hrt.h"
+#include "hive_isp_css_defs.h"
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+/*#include "hive_isp_css_custom_host_hrt.h"*/
+#endif
+
+#include <gpio_block.h>
+#include <gp_regs.h>
+#include <gp_timer_hrt.h>
+ #include <css_receiver_2400_hrt.h>
+// #include <isp2400_mamoiada_params.h>
+// #include <isp2400_support.h>
+ /* insert idle signal clearing and setting around hrt_main */
+ #if !defined(HRT_HW) || defined(HRT_ISP_CSS_INSERT_IDLE_SIGNAL)
+ #define hrt_main _hrt_isp_css_main
+ #endif
+ #ifdef _HIVE_ISP_CSS_SPECMAN_SYSTEM
+ #include "hive_isp_css_2400_specman_system.h"
+ #else
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+ #include "hive_isp_css_2400_system.h"
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+ #include "hive_isp_css_2401_system.h"
+#else
+#error "hive_isp_css_hrt_modified.h: SYSTEM must be one of {2400_MAMOIADA_SYSTEM, 2401_MAMOIADA_SYSTEM}"
+#endif
+ #endif
+#include <sp_hrt.h>
+#include <input_system_hrt.h>
+#include <input_selector_hrt.h>
+#include <sig_monitor_hrt.h>
+
+#include "hive_isp_css_sdram_wakeup_hrt.h"
+#include "hive_isp_css_idle_signal_hrt.h"
+#include "hive_isp_css_sp_hrt.h"
+#include "hive_isp_css_isp_hrt.h"
+#include "hive_isp_css_streaming_to_mipi_hrt.h"
+#include "hive_isp_css_testbench_hrt.h"
+#include "hive_isp_css_streaming_monitors_hrt.h"
+#include "hive_isp_css_gp_regs_hrt.h"
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+#include "hive_isp_css_irq_hrt.h"
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+#include "hive_isp_css_2401_irq_hrt.h"
+#else
+#error "hive_isp_css_hrt_modified.h: SYSTEM must be one of {2400_MAMOIADA_SYSTEM, 2401_MAMOIADA_SYSTEM}"
+#endif
+
+#include "hive_isp_css_stream_switch_hrt.h"
+
+#include "hive_isp_css_ddr_hrt_modified.h"
+#include "hive_isp_css_dma_set_hrt.h"
+
+#define HIVE_ISP_CSS_NO_STREAM_SWITCH 1
+
+#endif /* _hive_isp_css_hrt_h */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c
new file mode 100644
index 000000000000..e48f180c9507
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem.c
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "hmem.h"
+
+#ifndef __INLINE_HMEM__
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h
new file mode 100644
index 000000000000..499f55f07253
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_LOCAL_H_INCLUDED__
+#define __HMEM_LOCAL_H_INCLUDED__
+
+#include "hmem_global.h"
+
+#endif /* __HMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
new file mode 100644
index 000000000000..2b636e0e6482
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_PRIVATE_H_INCLUDED__
+#define __HMEM_PRIVATE_H_INCLUDED__
+
+#include "hmem_public.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
+ const hmem_ID_t ID)
+{
+assert(ID < N_HMEM_ID);
+ (void)ID;
+return HMEM_SIZE*sizeof(hmem_data_t);
+}
+
+#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c
new file mode 100644
index 000000000000..a8997e45738e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter.c
@@ -0,0 +1,227 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "input_formatter.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_OFFSET,
+ INPUT_FORMATTER1_SRST_OFFSET,
+ INPUT_FORMATTER2_SRST_OFFSET,
+ INPUT_FORMATTER3_SRST_OFFSET};
+
+const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID] = {
+ INPUT_FORMATTER0_SRST_MASK,
+ INPUT_FORMATTER1_SRST_MASK,
+ INPUT_FORMATTER2_SRST_MASK,
+ INPUT_FORMATTER3_SRST_MASK};
+
+const uint8_t HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID] = {
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_PRIM,
+ HIVE_INPUT_SWITCH_SELECT_IF_SEC,
+ HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM};
+
+/* MW Should be part of system_global.h, where we have the main enumeration */
+const bool HIVE_IF_BIN_COPY[N_INPUT_FORMATTER_ID] = {
+ false, false, false, true};
+
+void input_formatter_rst(
+ const input_formatter_ID_t ID)
+{
+ hrt_address addr;
+ hrt_data rst;
+
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ addr = HIVE_IF_SRST_ADDRESS[ID];
+ rst = HIVE_IF_SRST_MASK[ID];
+
+ /* TEMPORARY HACK: THIS RESET BREAKS THE METADATA FEATURE
+ * WICH USES THE STREAM2MEMRY BLOCK.
+ * MUST BE FIXED PROPERLY
+ */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID, addr, rst);
+ }
+
+ return;
+}
+
+unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ return input_formatter_alignment[ID];
+}
+
+void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+
+ /* cnd_input_formatter_reg_store() */
+ if (!HIVE_IF_BIN_COPY[ID]) {
+ input_formatter_reg_store(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS, enable);
+ }
+ return;
+}
+
+void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state != NULL);
+
+ /* We'll change this into an intelligent function to get switch info per IF */
+ (void)ID;
+
+ state->if_input_switch_lut_reg[0] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg0);
+ state->if_input_switch_lut_reg[1] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg1);
+ state->if_input_switch_lut_reg[2] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg2);
+ state->if_input_switch_lut_reg[3] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg3);
+ state->if_input_switch_lut_reg[4] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg4);
+ state->if_input_switch_lut_reg[5] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg5);
+ state->if_input_switch_lut_reg[6] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg6);
+ state->if_input_switch_lut_reg[7] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg7);
+ state->if_input_switch_fsync_lut = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_fsync_lut);
+ state->if_input_switch_ch_id_fmt_type = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_ch_id_fmt_type);
+
+ return;
+}
+
+void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state != NULL);
+/*
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_IF_RESET_ADDRESS);
+ */
+ state->start_line = input_formatter_reg_load(ID,
+ HIVE_IF_START_LINE_ADDRESS);
+ state->start_column = input_formatter_reg_load(ID,
+ HIVE_IF_START_COLUMN_ADDRESS);
+ state->cropped_height = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_HEIGHT_ADDRESS);
+ state->cropped_width = input_formatter_reg_load(ID,
+ HIVE_IF_CROPPED_WIDTH_ADDRESS);
+ state->ver_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_VERTICAL_DECIMATION_ADDRESS);
+ state->hor_decimation = input_formatter_reg_load(ID,
+ HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS);
+ state->hor_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_H_DEINTERLEAVING_ADDRESS);
+ state->left_padding = input_formatter_reg_load(ID,
+ HIVE_IF_LEFTPADDING_WIDTH_ADDRESS);
+ state->eol_offset = input_formatter_reg_load(ID,
+ HIVE_IF_END_OF_LINE_OFFSET_ADDRESS);
+ state->vmem_start_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_START_ADDRESS_ADDRESS);
+ state->vmem_end_address = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_END_ADDRESS_ADDRESS);
+ state->vmem_increment = input_formatter_reg_load(ID,
+ HIVE_IF_VMEM_INCREMENT_ADDRESS);
+ state->is_yuv420 = input_formatter_reg_load(ID,
+ HIVE_IF_YUV_420_FORMAT_ADDRESS);
+ state->vsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS);
+ state->hsync_active_low = input_formatter_reg_load(ID,
+ HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS);
+ state->allow_fifo_overflow = input_formatter_reg_load(ID,
+ HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS);
+ state->block_fifo_when_no_req = input_formatter_reg_load(ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS);
+ state->ver_deinterleaving = input_formatter_reg_load(ID,
+ HIVE_IF_V_DEINTERLEAVING_ADDRESS);
+/* FSM */
+ state->fsm_sync_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_STATUS);
+ state->fsm_sync_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_SYNC_COUNTER);
+ state->fsm_crop_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_STATUS);
+ state->fsm_crop_line_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_LINE_COUNTER);
+ state->fsm_crop_pixel_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_CROP_PIXEL_COUNTER);
+ state->fsm_deinterleaving_index = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DEINTERLEAVING_IDX);
+ state->fsm_dec_h_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_H_COUNTER);
+ state->fsm_dec_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_V_COUNTER);
+ state->fsm_dec_block_v_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER);
+ state->fsm_padding_status = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_STATUS);
+ state->fsm_padding_elem_counter = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_PADDING_ELEMENT_COUNTER);
+ state->fsm_vector_support_error = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_ERROR);
+ state->fsm_vector_buffer_full = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL);
+ state->vector_support = input_formatter_reg_load(ID,
+ HIVE_IF_FSM_VECTOR_SUPPORT);
+ state->sensor_data_lost = input_formatter_reg_load(ID,
+ HIVE_IF_FIFO_SENSOR_STATUS);
+
+ return;
+}
+
+void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state)
+{
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(state != NULL);
+
+ state->reset = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS);
+ state->input_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS);
+ state->output_endianness = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS);
+ state->bitswap = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS);
+ state->block_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS);
+ state->packet_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS);
+ state->readpostwrite_synch = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS);
+ state->is_2ppc = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS);
+ state->en_status_update = input_formatter_reg_load(ID,
+ HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS);
+ return;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h
new file mode 100644
index 000000000000..3e00b5e6bad7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_local.h
@@ -0,0 +1,120 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+#define __INPUT_FORMATTER_LOCAL_H_INCLUDED__
+
+#include "input_formatter_global.h"
+
+#include "isp.h" /* ISP_VEC_ALIGN */
+
+typedef struct input_formatter_switch_state_s input_formatter_switch_state_t;
+typedef struct input_formatter_state_s input_formatter_state_t;
+typedef struct input_formatter_bin_state_s input_formatter_bin_state_t;
+
+#define HIVE_IF_FSM_SYNC_STATUS 0x100
+#define HIVE_IF_FSM_SYNC_COUNTER 0x104
+#define HIVE_IF_FSM_DEINTERLEAVING_IDX 0x114
+#define HIVE_IF_FSM_DECIMATION_H_COUNTER 0x118
+#define HIVE_IF_FSM_DECIMATION_V_COUNTER 0x11C
+#define HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER 0x120
+#define HIVE_IF_FSM_PADDING_STATUS 0x124
+#define HIVE_IF_FSM_PADDING_ELEMENT_COUNTER 0x128
+#define HIVE_IF_FSM_VECTOR_SUPPORT_ERROR 0x12C
+#define HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL 0x130
+#define HIVE_IF_FSM_VECTOR_SUPPORT 0x134
+#define HIVE_IF_FIFO_SENSOR_STATUS 0x138
+
+/*
+ * The switch LUT's coding defines a sink for each
+ * single channel ID + channel format type. Conversely
+ * the sink (i.e. an input formatter) can be reached
+ * from multiple channel & format type combinations
+ *
+ * LUT[0,1] channel=0, format type {0,1,...31}
+ * LUT[2,3] channel=1, format type {0,1,...31}
+ * LUT[4,5] channel=2, format type {0,1,...31}
+ * LUT[6,7] channel=3, format type {0,1,...31}
+ *
+ * Each register hold 16 2-bit fields encoding the sink
+ * {0,1,2,3}, "0" means unconnected.
+ *
+ * The single FSYNCH register uses four 3-bit fields of 1-hot
+ * encoded sink information, "0" means unconnected.
+ *
+ * The encoding is redundant. The FSYNCH setting will connect
+ * a channel to a sink. At that point the LUT's belonging to
+ * that channel can be directed to another sink. Thus the data
+ * goes to another place than the synch
+ */
+struct input_formatter_switch_state_s {
+ int if_input_switch_lut_reg[8];
+ int if_input_switch_fsync_lut;
+ int if_input_switch_ch_id_fmt_type;
+ bool if_input_switch_map[HIVE_SWITCH_N_CHANNELS][HIVE_SWITCH_N_FORMATTYPES];
+};
+
+struct input_formatter_state_s {
+/* int reset; */
+ int start_line;
+ int start_column;
+ int cropped_height;
+ int cropped_width;
+ int ver_decimation;
+ int hor_decimation;
+ int ver_deinterleaving;
+ int hor_deinterleaving;
+ int left_padding;
+ int eol_offset;
+ int vmem_start_address;
+ int vmem_end_address;
+ int vmem_increment;
+ int is_yuv420;
+ int vsync_active_low;
+ int hsync_active_low;
+ int allow_fifo_overflow;
+ int block_fifo_when_no_req;
+ int fsm_sync_status;
+ int fsm_sync_counter;
+ int fsm_crop_status;
+ int fsm_crop_line_counter;
+ int fsm_crop_pixel_counter;
+ int fsm_deinterleaving_index;
+ int fsm_dec_h_counter;
+ int fsm_dec_v_counter;
+ int fsm_dec_block_v_counter;
+ int fsm_padding_status;
+ int fsm_padding_elem_counter;
+ int fsm_vector_support_error;
+ int fsm_vector_buffer_full;
+ int vector_support;
+ int sensor_data_lost;
+};
+
+struct input_formatter_bin_state_s {
+ uint32_t reset;
+ uint32_t input_endianness;
+ uint32_t output_endianness;
+ uint32_t bitswap;
+ uint32_t block_synch;
+ uint32_t packet_synch;
+ uint32_t readpostwrite_synch;
+ uint32_t is_2ppc;
+ uint32_t en_status_update;
+};
+
+static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = {
+ ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES};
+
+#endif /* __INPUT_FORMATTER_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
new file mode 100644
index 000000000000..d34933e44aa9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+#define __INPUT_FORMATTER_PRIVATE_H_INCLUDED__
+
+#include "input_formatter_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value)
+{
+assert(ID < N_INPUT_FORMATTER_ID);
+assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+assert((reg_addr % sizeof(hrt_data)) == 0);
+ ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
+return;
+}
+
+STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr)
+{
+assert(ID < N_INPUT_FORMATTER_ID);
+assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+assert((reg_addr % sizeof(hrt_data)) == 0);
+return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+}
+
+#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
new file mode 100644
index 000000000000..f35e18987b67
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
@@ -0,0 +1,1823 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "input_system.h"
+#include <type_support.h>
+#include "gp_device.h"
+
+#include "assert_support.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#define ZERO (0x0)
+#define ONE (1U)
+
+const ib_buffer_t IB_BUFFER_NULL = {0 ,0, 0 };
+
+static input_system_error_t input_system_configure_channel(
+ const channel_cfg_t channel);
+
+static input_system_error_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel);
+
+static input_system_error_t input_buffer_configuration(void);
+
+static input_system_error_t configuration_to_registers(void);
+
+static void receiver_rst(const rx_ID_t ID);
+static void input_system_network_rst(const input_system_ID_t ID);
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t* const cfg);
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t* const cfg);
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t* const cfg);
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t * const cfg);
+
+// MW: CSI is previously named as "rx" short for "receiver"
+static input_system_error_t set_csi_cfg(
+ csi_cfg_t* const lhs,
+ const csi_cfg_t* const rhs,
+ input_system_config_flags_t* const flags);
+
+static input_system_error_t set_source_type(
+ input_system_source_t* const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t* const flags);
+
+static input_system_error_t input_system_multiplexer_cfg(
+ input_system_multiplex_t* const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t* const flags);
+
+
+
+STORAGE_CLASS_INLINE void capture_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ capture_unit_state_t *state);
+
+STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ acquisition_unit_state_t *state);
+
+STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ ctrl_unit_state_t *state);
+
+STORAGE_CLASS_INLINE void mipi_port_get_state(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ mipi_port_state_t *state);
+
+STORAGE_CLASS_INLINE void rx_channel_get_state(
+ const rx_ID_t ID,
+ const unsigned int ch_id,
+ rx_channel_state_t *state);
+
+static void gp_device_rst(const gp_device_ID_t ID);
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID);
+
+static void input_switch_rst(const gp_device_ID_t ID);
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t * const cfg
+);
+
+void input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state)
+{
+ sub_system_ID_t sub_id;
+
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(state != NULL);
+
+ state->str_multicastA_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX);
+ state->str_multicastB_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX);
+ state->str_multicastC_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX);
+ state->str_mux_sel = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX);
+ state->str_mon_status = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_STAT_IDX);
+ state->str_mon_irq_cond = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_COND_IDX);
+ state->str_mon_irq_en = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX);
+ state->isys_srst = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_SRST_IDX);
+ state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_SLV_REG_SRST_IDX);
+ state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_REG_PORT_A_IDX);
+ state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_REG_PORT_B_IDX);
+
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) {
+ capture_unit_get_state(ID, sub_id,
+ &(state->capture_unit[sub_id - CAPTURE_UNIT0_ID]));
+ }
+ for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ acquisition_unit_get_state(ID, sub_id,
+ &(state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID]));
+ }
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) {
+ ctrl_unit_get_state(ID, sub_id,
+ &(state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]));
+ }
+
+return;
+}
+
+void receiver_get_state(
+ const rx_ID_t ID,
+ receiver_state_t *state)
+{
+ mipi_port_ID_t port_id;
+ unsigned int ch_id;
+
+ assert(ID < N_RX_ID);
+ assert(state != NULL);
+
+ state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX);
+ state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX);
+ state->data_to_le_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX);
+ state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX);
+ state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX);
+ state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX);
+ state->is_two_ppc = (bool)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX);
+ state->backend_rst = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX);
+ state->raw18 = (uint16_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_RAW18_REG_IDX);
+ state->force_raw8 = (bool)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX);
+ state->raw16 = (uint16_t)receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_RAW16_REG_IDX);
+
+ for (port_id = (mipi_port_ID_t)0; port_id < N_MIPI_PORT_ID; port_id++) {
+ mipi_port_get_state(ID, port_id,
+ &(state->mipi_port_state[port_id]));
+ }
+ for (ch_id = (unsigned int)0; ch_id < N_RX_CHANNEL_ID; ch_id++) {
+ rx_channel_get_state(ID, ch_id,
+ &(state->rx_channel_state[ch_id]));
+ }
+
+ state->be_gsp_acc_ovl = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX);
+ state->be_srst = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_SRST_REG_IDX);
+ state->be_is_two_ppc = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX);
+ state->be_comp_format0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX);
+ state->be_comp_format1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX);
+ state->be_comp_format2 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX);
+ state->be_comp_format3 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX);
+ state->be_sel = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_SEL_REG_IDX);
+ state->be_raw16_config = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX);
+ state->be_raw18_config = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX);
+ state->be_force_raw8 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX);
+ state->be_irq_status = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX);
+ state->be_irq_clear = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
+
+return;
+}
+
+bool is_mipi_format_yuv420(
+ const mipi_format_t mipi_format)
+{
+ bool is_yuv420 = (
+ (mipi_format == MIPI_FORMAT_YUV420_8) ||
+ (mipi_format == MIPI_FORMAT_YUV420_10) ||
+ (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) ||
+ (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
+/* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
+
+return is_yuv420;
+}
+
+void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred)
+{
+ const unsigned int field_id = cfg_ID % N_MIPI_FORMAT_CUSTOM;
+ const unsigned int ch_id = cfg_ID / N_MIPI_FORMAT_CUSTOM;
+ hrt_data val;
+ hrt_address addr = 0;
+ hrt_data reg;
+
+ assert(ID < N_RX_ID);
+ assert(cfg_ID < N_MIPI_COMPRESSOR_CONTEXT);
+ assert(field_id < N_MIPI_FORMAT_CUSTOM);
+ assert(ch_id < N_RX_CHANNEL_ID);
+ assert(comp < N_MIPI_COMPRESSOR_METHODS);
+ assert(pred < N_MIPI_PREDICTOR_TYPES);
+
+ val = (((uint8_t)pred) << 3) | comp;
+
+ switch (ch_id) {
+ case 0: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
+ break;
+ case 1: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
+ break;
+ case 2: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
+ break;
+ case 3: addr = ((field_id<6)?_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX:_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
+ break;
+ default:
+ /* should not happen */
+ assert(false);
+ return;
+ }
+
+ reg = ((field_id < 6)?(val << (field_id * 5)):(val << ((field_id - 6) * 5)));
+ receiver_reg_store(ID, addr, reg);
+
+return;
+}
+
+void receiver_port_enable(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const bool cnd)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+
+ if (cnd) {
+ reg |= 0x01;
+ } else {
+ reg &= ~0x01;
+ }
+
+ receiver_port_reg_store(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
+return;
+}
+
+bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID)
+{
+ hrt_data reg = receiver_port_reg_load(ID, port_ID,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+return ((reg & 0x01) != 0);
+}
+
+void receiver_irq_enable(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
+return;
+}
+
+rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID)
+{
+return receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void receiver_irq_clear(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const rx_irq_info_t irq_info)
+{
+ receiver_port_reg_store(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
+return;
+}
+
+STORAGE_CLASS_INLINE void capture_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ capture_unit_state_t *state)
+{
+ assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID));
+ assert(state != NULL);
+
+ state->StartMode = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_MODE_REG_ID);
+ state->Start_Addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_ADDR_REG_ID);
+ state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_MEM_REGION_SIZE_REG_ID);
+ state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+/* state->Init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_INIT_REG_ID);
+ state->Start = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_START_REG_ID);
+ state->Stop = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_STOP_REG_ID);
+*/
+ state->Packet_Length = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_PACKET_LENGTH_REG_ID);
+ state->Received_Length = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_LENGTH_REG_ID);
+ state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_SHORT_PACKETS_REG_ID);
+ state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_RECEIVED_LONG_PACKETS_REG_ID);
+ state->Last_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_LAST_COMMAND_REG_ID);
+ state->Next_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NEXT_COMMAND_REG_ID);
+ state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_LAST_ACKNOWLEDGE_REG_ID);
+ state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_NEXT_ACKNOWLEDGE_REG_ID);
+ state->FSM_State_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ CAPT_FSM_STATE_INFO_REG_ID);
+
+return;
+}
+
+STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ acquisition_unit_state_t *state)
+{
+ assert(sub_id == ACQUISITION_UNIT0_ID);
+ assert(state != NULL);
+
+ state->Start_Addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_START_ADDR_REG_ID);
+ state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_MEM_REGION_SIZE_REG_ID);
+ state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+/* state->Init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_INIT_REG_ID);
+*/
+ state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_RECEIVED_SHORT_PACKETS_REG_ID);
+ state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_RECEIVED_LONG_PACKETS_REG_ID);
+ state->Last_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_LAST_COMMAND_REG_ID);
+ state->Next_Command = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NEXT_COMMAND_REG_ID);
+ state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_LAST_ACKNOWLEDGE_REG_ID);
+ state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_NEXT_ACKNOWLEDGE_REG_ID);
+ state->FSM_State_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_FSM_STATE_INFO_REG_ID);
+ state->Int_Cntr_Info = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ACQ_INT_CNTR_INFO_REG_ID);
+
+return;
+}
+
+STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ ctrl_unit_state_t *state)
+{
+ assert(sub_id == CTRL_UNIT0_ID);
+ assert(state != NULL);
+
+ state->captA_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_A_REG_ID);
+ state->captB_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_B_REG_ID);
+ state->captC_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_C_REG_ID);
+ state->captA_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID);
+ state->captB_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID);
+ state->captC_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID);
+ state->captA_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID);
+ state->captB_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID);
+ state->captC_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID);
+ state->acq_start_addr = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_START_ADDR_REG_ID);
+ state->acq_mem_region_size = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID);
+ state->acq_num_mem_regions = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID);
+// AM: Illegal read from following registers.
+/* state->ctrl_init = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_INIT_REG_ID);
+*/
+ state->last_cmd = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_LAST_COMMAND_REG_ID);
+ state->next_cmd = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_NEXT_COMMAND_REG_ID);
+ state->last_ack = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID);
+ state->next_ack = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID);
+ state->top_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_FSM_STATE_INFO_REG_ID);
+ state->captA_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID);
+ state->captB_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID);
+ state->captC_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID);
+ state->acq_fsm_state = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID);
+ state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
+
+return;
+}
+
+STORAGE_CLASS_INLINE void mipi_port_get_state(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ mipi_port_state_t *state)
+{
+ int i;
+
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(state != NULL);
+
+ state->device_ready = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
+ state->irq_status = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+ state->irq_enable = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+ state->timeout_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX);
+ state->init_count = (uint16_t)receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX);
+ state->raw16_18 = (uint16_t)receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX);
+ state->sync_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX);
+ state->rx_count = receiver_port_reg_load(ID,
+ port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX);
+
+ for (i = 0; i < MIPI_4LANE_CFG ; i++) {
+ state->lane_sync_count[i] = (uint8_t)((state->sync_count)>>(i*8));
+ state->lane_rx_count[i] = (uint8_t)((state->rx_count)>>(i*8));
+ }
+
+return;
+}
+
+STORAGE_CLASS_INLINE void rx_channel_get_state(
+ const rx_ID_t ID,
+ const unsigned int ch_id,
+ rx_channel_state_t *state)
+{
+ int i;
+
+ assert(ID < N_RX_ID);
+ assert(ch_id < N_RX_CHANNEL_ID);
+ assert(state != NULL);
+
+ switch (ch_id) {
+ case 0:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
+ break;
+ case 1:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
+ break;
+ case 2:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
+ break;
+ case 3:
+ state->comp_scheme0 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
+ state->comp_scheme1 = receiver_reg_load(ID,
+ _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
+ break;
+ }
+
+/* See Table 7.1.17,..., 7.1.24 */
+ for (i = 0; i < 6; i++) {
+ uint8_t val = (uint8_t)((state->comp_scheme0)>>(i*5)) & 0x1f;
+ state->comp[i] = (mipi_compressor_t)(val & 0x07);
+ state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
+ }
+ for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ uint8_t val = (uint8_t)((state->comp_scheme0)>>((i-6)*5)) & 0x1f;
+ state->comp[i] = (mipi_compressor_t)(val & 0x07);
+ state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
+ }
+
+return;
+}
+
+// MW: "2400" in the name is not good, but this is to avoid a naming conflict
+input_system_cfg2400_t config;
+
+static void receiver_rst(
+ const rx_ID_t ID)
+{
+ mipi_port_ID_t port_id;
+
+ assert(ID < N_RX_ID);
+
+// Disable all ports.
+ for (port_id = MIPI_PORT0_ID; port_id < N_MIPI_PORT_ID; port_id++) {
+ receiver_port_enable(ID, port_id, false);
+ }
+
+ // AM: Additional actions for stopping receiver?
+
+ return;
+}
+
+//Single function to reset all the devices mapped via GP_DEVICE.
+static void gp_device_rst(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_SYNCGEN_ENABLE_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_PAUSE_ADDR, ONE);
+ // gp_device_reg_store(ID, _REG_GP_NR_FRAMES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_LINES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR, ZERO);
+// AM: Following calls cause strange warnings. Probably they should not be initialized.
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ZERO);
+// gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_MODE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ //gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR, ZERO);
+ // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO); // AM: Maybe this soft reset is not safe.
+
+ return;
+}
+
+static void input_selector_cfg_for_sensor(const gp_device_ID_t ID)
+{
+ assert(ID < N_GP_DEVICE_ID);
+
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ONE);
+ gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO);
+ gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO);
+
+ return;
+}
+
+static void input_switch_rst(const gp_device_ID_t ID)
+{
+ int addr;
+
+ assert(ID < N_GP_DEVICE_ID);
+
+ // Initialize the data&hsync LUT.
+ for (addr = _REG_GP_IFMT_input_switch_lut_reg0;
+ addr <= _REG_GP_IFMT_input_switch_lut_reg7; addr += SIZEOF_HRT_REG) {
+
+ gp_device_reg_store(ID, addr, ZERO);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ ZERO);
+
+ return;
+}
+
+static void input_switch_cfg(
+ const gp_device_ID_t ID,
+ const input_switch_cfg_t * const cfg)
+{
+ int addr_offset;
+
+ assert(ID < N_GP_DEVICE_ID);
+ assert(cfg != NULL);
+
+ // Initialize the data&hsync LUT.
+ for (addr_offset = 0; addr_offset < N_RX_CHANNEL_ID * 2; addr_offset++) {
+ assert(addr_offset * SIZEOF_HRT_REG + _REG_GP_IFMT_input_switch_lut_reg0 <= _REG_GP_IFMT_input_switch_lut_reg7);
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_lut_reg0 + addr_offset * SIZEOF_HRT_REG,
+ cfg->hsync_data_reg[addr_offset]);
+ }
+
+ // Initialize the vsync LUT.
+ gp_device_reg_store(ID,
+ _REG_GP_IFMT_input_switch_fsync_lut,
+ cfg->vsync_data_reg);
+
+ return;
+}
+
+
+static void input_system_network_rst(const input_system_ID_t ID)
+{
+ unsigned int sub_id;
+
+ // Reset all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ INPUT_SYSTEM_DISCARD_ALL);
+
+ // Reset stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ N_INPUT_SYSTEM_MULTIPLEX);
+
+ // Reset 3 capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_INIT_REG_ID,
+ 1U << CAPT_INIT_RST_REG_BIT);
+ }
+
+ // Reset acquisition unit.
+ for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_INIT_REG_ID,
+ 1U << ACQ_INIT_RST_REG_BIT);
+ }
+
+ // DMA unit reset is not needed.
+
+ // Reset controller units.
+ // NB: In future we need to keep part of ctrl_state for split capture and
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) {
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_INIT_REG_ID,
+ 1U); //AM: Is there any named constant?
+ }
+
+ return;
+}
+
+// Function that resets current configuration.
+input_system_error_t input_system_configuration_reset(void)
+{
+ unsigned int i;
+
+ receiver_rst(RX0_ID);
+
+ input_system_network_rst(INPUT_SYSTEM0_ID);
+
+ gp_device_rst(INPUT_SYSTEM0_ID);
+
+ input_switch_rst(INPUT_SYSTEM0_ID);
+
+ //target_rst();
+
+ // Reset IRQ_CTRLs.
+
+ // Reset configuration data structures.
+ for (i = 0; i < N_CHANNELS; i++ ) {
+ config.ch_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_isp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_sp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.target_strm2mem_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ }
+
+ for (i = 0; i < N_CSI_PORTS; i++ ) {
+ config.csi_buffer_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.multicast[i] = INPUT_SYSTEM_CFG_FLAG_RESET;
+ }
+
+ config.source_type_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_RESET;
+ config.unallocated_ib_mem_words = IB_CAPACITY_IN_WORDS;
+ //config.acq_allocated_ib_mem_words = 0;
+
+ // Set the start of the session cofiguration.
+ config.session_flags = INPUT_SYSTEM_CFG_FLAG_REQUIRED;
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// MW: Comments are good, but doxygen is required, place it at the declaration
+// Function that appends the channel to current configuration.
+static input_system_error_t input_system_configure_channel(
+ const channel_cfg_t channel)
+{
+ input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+ // Check if channel is not already configured.
+ if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET){
+ return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET;
+ } else {
+ switch (channel.source_type){
+ case INPUT_SYSTEM_SOURCE_SENSOR :
+ error = input_system_configure_channel_sensor(channel);
+ break;
+ case INPUT_SYSTEM_SOURCE_TPG :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_SOURCE_PRBS :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_SOURCE_FIFO :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ default :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) return error;
+ // Input switch channel configurations must be combined in united config.
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2] =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[0];
+ config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2 + 1] =
+ channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[1];
+ config.input_switch_cfg.vsync_data_reg |=
+ (channel.target_cfg.input_switch_channel_cfg.vsync_data_reg & 0x7) << (channel.source_cfg.csi_cfg.csi_port * 3);
+
+ // Other targets are just copied and marked as set.
+ config.target_isp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_isp_cfg;
+ config.target_sp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_sp_cfg;
+ config.target_strm2mem[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_strm2mem_cfg;
+ config.target_isp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_sp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET;
+ config.target_strm2mem_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET;
+
+ config.ch_flags[channel.ch_id] = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Function that partitions input buffer space with determining addresses.
+static input_system_error_t input_buffer_configuration(void)
+{
+ uint32_t current_address = 0;
+ uint32_t unallocated_memory = IB_CAPACITY_IN_WORDS;
+
+ ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL;
+ uint32_t size_requested;
+ input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET;
+ input_system_csi_port_t port;
+ for (port = INPUT_SYSTEM_PORT_A; port < N_INPUT_SYSTEM_PORTS; port++) {
+
+ csi_cfg_t source = config.csi_value[port];//.csi_cfg;
+
+ if ( config.csi_flags[port] & INPUT_SYSTEM_CFG_FLAG_SET) {
+
+ // Check and set csi buffer in input buffer.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE :
+ case INPUT_SYSTEM_XMEM_ACQUIRE :
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED; // Well, not used.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING :
+ case INPUT_SYSTEM_SRAM_BUFFERING :
+ case INPUT_SYSTEM_XMEM_BUFFERING :
+ case INPUT_SYSTEM_XMEM_CAPTURE :
+ size_requested = source.csi_buffer.mem_reg_size * source.csi_buffer.nof_mem_regs;
+ if (source.csi_buffer.mem_reg_size > 0
+ && source.csi_buffer.nof_mem_regs >0
+ && size_requested <= unallocated_memory
+ ) {
+ config.csi_buffer[port].mem_reg_addr = current_address;
+ config.csi_buffer[port].mem_reg_size = source.csi_buffer.mem_reg_size;
+ config.csi_buffer[port].nof_mem_regs = source.csi_buffer.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_SET;
+ } else {
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ break;
+
+ default :
+ config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+
+ // Check acquisition buffer specified but set it later since it has to be unique.
+ switch (source.buffering_mode) {
+ case INPUT_SYSTEM_FIFO_CAPTURE :
+ case INPUT_SYSTEM_SRAM_BUFFERING :
+ case INPUT_SYSTEM_XMEM_CAPTURE :
+ // Nothing to do.
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING :
+ case INPUT_SYSTEM_XMEM_BUFFERING :
+ case INPUT_SYSTEM_XMEM_ACQUIRE :
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_RESET) {
+ size_requested = source.acquisition_buffer.mem_reg_size
+ * source.acquisition_buffer.nof_mem_regs;
+ if (source.acquisition_buffer.mem_reg_size > 0
+ && source.acquisition_buffer.nof_mem_regs >0
+ && size_requested <= unallocated_memory
+ ) {
+ candidate_buffer_acq = source.acquisition_buffer;
+ acq_already_specified = INPUT_SYSTEM_CFG_FLAG_SET;
+ }
+ } else {
+ // Check if specified acquisition buffer is the same as specified before.
+ if (source.acquisition_buffer.mem_reg_size != candidate_buffer_acq.mem_reg_size
+ || source.acquisition_buffer.nof_mem_regs != candidate_buffer_acq.nof_mem_regs
+ ) {
+ config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ break;
+
+ default :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+ } else {
+ config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ }
+ } // end of for ( port )
+
+ // Set the acquisition buffer at the end.
+ size_requested = candidate_buffer_acq.mem_reg_size * candidate_buffer_acq.nof_mem_regs;
+ if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_SET
+ && size_requested <= unallocated_memory) {
+ config.acquisition_buffer_unique.mem_reg_addr = current_address;
+ config.acquisition_buffer_unique.mem_reg_size = candidate_buffer_acq.mem_reg_size;
+ config.acquisition_buffer_unique.nof_mem_regs = candidate_buffer_acq.nof_mem_regs;
+ current_address += size_requested;
+ unallocated_memory -= size_requested;
+ config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_SET;
+
+ assert(current_address <= IB_CAPACITY_IN_WORDS);
+ }
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+static void capture_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t* const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID)); // Commented part is always true.
+ assert(cfg != NULL);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ CAPT_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+
+ return;
+}
+
+
+static void acquisition_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ib_buffer_t* const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == ACQUISITION_UNIT0_ID);
+ assert(cfg != NULL);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_START_ADDR_REG_ID,
+ cfg->mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->mem_reg_size);
+
+ return;
+}
+
+
+static void ctrl_unit_configure(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_id,
+ const ctrl_unit_cfg_t* const cfg)
+{
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_id == CTRL_UNIT0_ID);
+ assert(cfg != NULL);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT0_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT1_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_START_ADDR_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID,
+ cfg->buffer_mipi[CAPTURE_UNIT2_ID].nof_mem_regs);
+
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_START_ADDR_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_addr);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_size);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID,
+ cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].nof_mem_regs);
+ input_system_sub_system_reg_store(ID,
+ sub_id,
+ ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID,
+ 0);
+ return;
+}
+
+static void input_system_network_configure(
+ const input_system_ID_t ID,
+ const input_system_network_cfg_t * const cfg)
+{
+ uint32_t sub_id;
+
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(cfg != NULL);
+
+ // Set all 3 multicasts.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT0_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_B_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT1_ID]);
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_C_IDX,
+ cfg->multicast_cfg[CAPTURE_UNIT2_ID]);
+
+ // Set stream mux.
+ input_system_sub_system_reg_store(ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ cfg->mux_cfg);
+
+ // Set capture units.
+ for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) {
+ capture_unit_configure(ID,
+ sub_id,
+ &(cfg->ctrl_unit_cfg[ID].buffer_mipi[sub_id - CAPTURE_UNIT0_ID]));
+ }
+
+ // Set acquisition units.
+ for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
+ acquisition_unit_configure(ID,
+ sub_id,
+ &(cfg->ctrl_unit_cfg[sub_id - ACQUISITION_UNIT0_ID].buffer_acquire[sub_id - ACQUISITION_UNIT0_ID]));
+ }
+
+ // No DMA configuration needed. Ctrl_unit will fully control it.
+
+ // Set controller units.
+ for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) {
+ ctrl_unit_configure(ID,
+ sub_id,
+ &(cfg->ctrl_unit_cfg[sub_id - CTRL_UNIT0_ID]));
+ }
+
+ return;
+}
+
+static input_system_error_t configuration_to_registers(void)
+{
+ input_system_network_cfg_t input_system_network_cfg;
+ int i;
+
+ assert(config.source_type_flags & INPUT_SYSTEM_CFG_FLAG_SET);
+
+ switch (config.source_type) {
+ case INPUT_SYSTEM_SOURCE_SENSOR :
+
+ // Determine stream multicasts setting based on the mode of csi_cfg_t.
+ // AM: This should be moved towards earlier function call, e.g. in
+ // the commit function.
+ for (i = MIPI_PORT0_ID; i < N_MIPI_PORT_ID; i++) {
+ if (config.csi_flags[i] & INPUT_SYSTEM_CFG_FLAG_SET) {
+
+ switch (config.csi_value[i].buffering_mode) {
+
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+ config.multicast[i] = INPUT_SYSTEM_CSI_BACKEND;
+ break;
+
+ case INPUT_SYSTEM_XMEM_CAPTURE:
+ case INPUT_SYSTEM_SRAM_BUFFERING:
+ case INPUT_SYSTEM_XMEM_BUFFERING:
+ config.multicast[i] = INPUT_SYSTEM_INPUT_BUFFER;
+ break;
+
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING:
+ config.multicast[i] = INPUT_SYSTEM_MULTICAST;
+ break;
+
+ case INPUT_SYSTEM_XMEM_ACQUIRE:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ break;
+
+ default:
+ config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ //break;
+ }
+ } else {
+ config.multicast[i]= INPUT_SYSTEM_DISCARD_ALL;
+ }
+
+ input_system_network_cfg.multicast_cfg[i] = config.multicast[i];
+
+ } // for
+
+ input_system_network_cfg.mux_cfg = config.multiplexer;
+
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT0_ID] = config.csi_buffer[MIPI_PORT0_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT1_ID] = config.csi_buffer[MIPI_PORT1_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT2_ID] = config.csi_buffer[MIPI_PORT2_ID];
+ input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID] =
+ config.acquisition_buffer_unique;
+
+ // First set input network around CSI receiver.
+ input_system_network_configure(INPUT_SYSTEM0_ID, &input_system_network_cfg);
+
+ // Set the CSI receiver.
+ //...
+ break;
+
+ case INPUT_SYSTEM_SOURCE_TPG :
+
+ break;
+
+ case INPUT_SYSTEM_SOURCE_PRBS :
+
+ break;
+
+ case INPUT_SYSTEM_SOURCE_FIFO :
+ break;
+
+ default :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+
+ } // end of switch (source_type)
+
+ // Set input selector.
+ input_selector_cfg_for_sensor(INPUT_SYSTEM0_ID);
+
+ // Set input switch.
+ input_switch_cfg(INPUT_SYSTEM0_ID, &config.input_switch_cfg);
+
+ // Set input formatters.
+ // AM: IF are set dynamically.
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+
+// Function that applies the whole configuration.
+input_system_error_t input_system_configuration_commit(void)
+{
+ // The last configuration step is to configure the input buffer.
+ input_system_error_t error = input_buffer_configuration();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into registers.
+ error = configuration_to_registers();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR) {
+ return error;
+ }
+
+ // Translate the whole configuration into ctrl commands etc.
+
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+
+
+// FIFO
+
+input_system_error_t input_system_csi_fifo_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t csi_mem_reg_size,
+ uint32_t csi_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+// SRAM
+
+input_system_error_t input_system_csi_sram_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t csi_mem_reg_size,
+ uint32_t csi_nof_mem_regs,
+ // uint32_t acq_mem_reg_size,
+ // uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_SRAM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = 0;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+//XMEM
+
+// Collects all parameters and puts them in channel_cfg_t.
+input_system_error_t input_system_csi_xmem_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t csi_mem_reg_size,
+ uint32_t csi_nof_mem_regs,
+ uint32_t acq_mem_reg_size,
+ uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_BUFFERING;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_xmem_buffers;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+
+
+input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t acq_mem_reg_size,
+ uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_ACQUIRE;
+ channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ input_system_csi_port_t port,
+ uint32_t csi_mem_reg_size,
+ uint32_t csi_nof_mem_regs,
+ uint32_t acq_mem_reg_size,
+ uint32_t acq_nof_mem_regs,
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ channel.ch_id = ch_id;
+ //channel.backend_ch = backend_ch;
+ channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR;
+ //channel.source
+ channel.source_cfg.csi_cfg.csi_port = port;
+ //channel.source_cfg.csi_cfg.backend_ch = backend_ch;
+ channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_CAPTURE;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size;
+ channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs;
+ channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size;
+ channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs;
+ channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0;
+ channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+
+
+// Non - CSI
+
+input_system_error_t input_system_prbs_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,//not used yet
+ uint32_t seed,
+ uint32_t sync_gen_width,
+ uint32_t sync_gen_height,
+ uint32_t sync_gen_hblank_cycles,
+ uint32_t sync_gen_vblank_cycles,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type= INPUT_SYSTEM_SOURCE_PRBS;
+
+ channel.source_cfg.prbs_cfg.seed = seed;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.width = sync_gen_width;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.height = sync_gen_height;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles;
+ channel.source_cfg.prbs_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles;
+
+ channel.target_cfg = target;
+
+ return input_system_configure_channel(channel);
+}
+
+
+
+input_system_error_t input_system_tpg_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,//not used yet
+ uint32_t x_mask,
+ uint32_t y_mask,
+ uint32_t x_delta,
+ uint32_t y_delta,
+ uint32_t xy_mask,
+ uint32_t sync_gen_width,
+ uint32_t sync_gen_height,
+ uint32_t sync_gen_hblank_cycles,
+ uint32_t sync_gen_vblank_cycles,
+ target_cfg2400_t target
+)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_TPG;
+
+ channel.source_cfg.tpg_cfg.x_mask = x_mask;
+ channel.source_cfg.tpg_cfg.y_mask = y_mask;
+ channel.source_cfg.tpg_cfg.x_delta = x_delta;
+ channel.source_cfg.tpg_cfg.y_delta = y_delta;
+ channel.source_cfg.tpg_cfg.xy_mask = xy_mask;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.width = sync_gen_width;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.height = sync_gen_height;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles;
+ channel.source_cfg.tpg_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+// MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg
+input_system_error_t input_system_gpfifo_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames, //not used yet
+ target_cfg2400_t target)
+{
+ channel_cfg_t channel;
+
+ (void)nof_frames;
+
+ channel.ch_id = ch_id;
+ channel.source_type = INPUT_SYSTEM_SOURCE_FIFO;
+
+ channel.target_cfg = target;
+ return input_system_configure_channel(channel);
+}
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Private specialized functions for channel setting.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Fills the parameters to config.csi_value[port]
+static input_system_error_t input_system_configure_channel_sensor(
+ const channel_cfg_t channel)
+{
+ const uint32_t port = channel.source_cfg.csi_cfg.csi_port;
+ input_system_error_t status = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ input_system_multiplex_t mux;
+
+ if (port >= N_INPUT_SYSTEM_PORTS)
+ return INPUT_SYSTEM_ERR_GENERIC;
+
+ //check if port > N_INPUT_SYSTEM_MULTIPLEX
+
+ status = set_source_type(&(config.source_type), channel.source_type, &config.source_type_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+ // Check for conflicts on source (implicitly on multicast, capture unit and input buffer).
+
+ status = set_csi_cfg(&(config.csi_value[port]), &channel.source_cfg.csi_cfg, &(config.csi_flags[port]));
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+
+
+ switch (channel.source_cfg.csi_cfg.buffering_mode){
+ case INPUT_SYSTEM_FIFO_CAPTURE:
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_MIPI_PORT0 + port;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_CSI_BACKEND;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_SRAM_BUFFERING :
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_XMEM_BUFFERING :
+
+ // Check for conflicts on mux.
+ mux = INPUT_SYSTEM_ACQUISITION_UNIT;
+ status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags);
+ if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status;
+ config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER;
+
+ // Shared resource, so it should be blocked.
+ //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+ //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED;
+
+ break;
+ case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_XMEM_CAPTURE :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ case INPUT_SYSTEM_XMEM_ACQUIRE :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ default :
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ break;
+ }
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+// Test flags and set structure.
+static input_system_error_t set_source_type(
+ input_system_source_t * const lhs,
+ const input_system_source_t rhs,
+ input_system_config_flags_t * const flags)
+{
+ // MW: Not enough asserts
+ assert(lhs != NULL);
+ assert(flags != NULL);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ }
+ else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_SOURCE) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+
+// Test flags and set structure.
+static input_system_error_t set_csi_cfg(
+ csi_cfg_t* const lhs,
+ const csi_cfg_t* const rhs,
+ input_system_config_flags_t * const flags)
+{
+ uint32_t memory_required;
+ uint32_t acq_memory_required;
+
+ assert(lhs != NULL);
+ assert(flags != NULL);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if (*flags & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // check for consistency with already set value.
+ if (/*lhs->backend_ch == rhs.backend_ch
+ &&*/ lhs->buffering_mode == rhs->buffering_mode
+ && lhs->csi_buffer.mem_reg_size == rhs->csi_buffer.mem_reg_size
+ && lhs->csi_buffer.nof_mem_regs == rhs->csi_buffer.nof_mem_regs
+ && lhs->acquisition_buffer.mem_reg_size == rhs->acquisition_buffer.mem_reg_size
+ && lhs->acquisition_buffer.nof_mem_regs == rhs->acquisition_buffer.nof_mem_regs
+ && lhs->nof_xmem_buffers == rhs->nof_xmem_buffers
+ ) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ }
+ else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ // no check for backend_ch
+ // no check for nof_xmem_buffers
+ memory_required = rhs->csi_buffer.mem_reg_size * rhs->csi_buffer.nof_mem_regs;
+ acq_memory_required = rhs->acquisition_buffer.mem_reg_size * rhs->acquisition_buffer.nof_mem_regs;
+ if (rhs->buffering_mode >= N_INPUT_SYSTEM_BUFFERING_MODE
+ ||
+ // Check if required memory is available in input buffer (SRAM).
+ (memory_required + acq_memory_required )> config.unallocated_ib_mem_words
+
+ ) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ // Set the value.
+ //lhs[port]->backend_ch = rhs.backend_ch;
+ lhs->buffering_mode = rhs->buffering_mode;
+ lhs->nof_xmem_buffers = rhs->nof_xmem_buffers;
+
+ lhs->csi_buffer.mem_reg_size = rhs->csi_buffer.mem_reg_size;
+ lhs->csi_buffer.nof_mem_regs = rhs->csi_buffer.nof_mem_regs;
+ lhs->acquisition_buffer.mem_reg_size = rhs->acquisition_buffer.mem_reg_size;
+ lhs->acquisition_buffer.nof_mem_regs = rhs->acquisition_buffer.nof_mem_regs;
+ // ALX: NB: Here we just set buffer parameters, but still not allocate it
+ // (no addresses determined). That will be done during commit.
+
+ // FIXIT: acq_memory_required is not deducted, since it can be allocated multiple times.
+ config.unallocated_ib_mem_words -= memory_required;
+//assert(config.unallocated_ib_mem_words >=0);
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+
+
+// Test flags and set structure.
+static input_system_error_t input_system_multiplexer_cfg(
+ input_system_multiplex_t* const lhs,
+ const input_system_multiplex_t rhs,
+ input_system_config_flags_t* const flags)
+{
+ assert(lhs != NULL);
+ assert(flags != NULL);
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+
+ if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) {
+ // Check for consistency with already set value.
+ if ((*lhs) == (rhs)) {
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+ }
+ else {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE;
+ }
+ }
+ // Check the value (individually).
+ if (rhs >= N_INPUT_SYSTEM_MULTIPLEX) {
+ *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT;
+ return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED;
+ }
+ // Set the value.
+ *lhs = rhs;
+
+ *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
+ return INPUT_SYSTEM_ERR_NO_ERROR;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h
new file mode 100644
index 000000000000..3e8bd00082dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_local.h
@@ -0,0 +1,533 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#include "input_system_global.h"
+
+#include "input_system_defs.h" /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
+#include "css_receiver_2400_defs.h" /* _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX, _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX,... */
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+#include "isp_capture_defs.h"
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/* Same name, but keep the distinction,it is a different device */
+#include "isp_capture_defs.h"
+#else
+#error "input_system_local.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+#include "isp_acquisition_defs.h"
+#include "input_system_ctrl_defs.h"
+
+
+typedef enum {
+ INPUT_SYSTEM_ERR_NO_ERROR = 0,
+ INPUT_SYSTEM_ERR_GENERIC,
+ INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET,
+ INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE,
+ INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED,
+ N_INPUT_SYSTEM_ERR
+} input_system_error_t;
+
+typedef enum {
+ INPUT_SYSTEM_PORT_A = 0,
+ INPUT_SYSTEM_PORT_B,
+ INPUT_SYSTEM_PORT_C,
+ N_INPUT_SYSTEM_PORTS
+} input_system_csi_port_t;
+
+typedef struct ctrl_unit_cfg_s ctrl_unit_cfg_t;
+typedef struct input_system_network_cfg_s input_system_network_cfg_t;
+typedef struct target_cfg2400_s target_cfg2400_t;
+typedef struct channel_cfg_s channel_cfg_t;
+typedef struct backend_channel_cfg_s backend_channel_cfg_t;
+typedef struct input_system_cfg2400_s input_system_cfg2400_t;
+typedef struct mipi_port_state_s mipi_port_state_t;
+typedef struct rx_channel_state_s rx_channel_state_t;
+typedef struct input_switch_cfg_channel_s input_switch_cfg_channel_t;
+typedef struct input_switch_cfg_s input_switch_cfg_t;
+
+struct ctrl_unit_cfg_s {
+ ib_buffer_t buffer_mipi[N_CAPTURE_UNIT_ID];
+ ib_buffer_t buffer_acquire[N_ACQUISITION_UNIT_ID];
+};
+
+struct input_system_network_cfg_s {
+ input_system_connection_t multicast_cfg[N_CAPTURE_UNIT_ID];
+ input_system_multiplex_t mux_cfg;
+ ctrl_unit_cfg_t ctrl_unit_cfg[N_CTRL_UNIT_ID];
+};
+
+typedef struct {
+// TBD.
+ uint32_t dummy_parameter;
+} target_isp_cfg_t;
+
+
+typedef struct {
+// TBD.
+ uint32_t dummy_parameter;
+} target_sp_cfg_t;
+
+
+typedef struct {
+// TBD.
+ uint32_t dummy_parameter;
+} target_strm2mem_cfg_t;
+
+struct input_switch_cfg_channel_s {
+ uint32_t hsync_data_reg[2];
+ uint32_t vsync_data_reg;
+};
+
+struct target_cfg2400_s {
+ input_switch_cfg_channel_t input_switch_channel_cfg;
+ target_isp_cfg_t target_isp_cfg;
+ target_sp_cfg_t target_sp_cfg;
+ target_strm2mem_cfg_t target_strm2mem_cfg;
+};
+
+struct backend_channel_cfg_s {
+ uint32_t fmt_control_word_1; // Format config.
+ uint32_t fmt_control_word_2;
+ uint32_t no_side_band;
+};
+
+typedef union {
+ csi_cfg_t csi_cfg;
+ tpg_cfg_t tpg_cfg;
+ prbs_cfg_t prbs_cfg;
+ gpfifo_cfg_t gpfifo_cfg;
+} source_cfg_t;
+
+
+struct input_switch_cfg_s {
+ uint32_t hsync_data_reg[N_RX_CHANNEL_ID * 2];
+ uint32_t vsync_data_reg;
+};
+
+// Configuration of a channel.
+struct channel_cfg_s {
+ uint32_t ch_id;
+ backend_channel_cfg_t backend_ch;
+ input_system_source_t source_type;
+ source_cfg_t source_cfg;
+ target_cfg2400_t target_cfg;
+};
+
+
+// Complete configuration for input system.
+struct input_system_cfg2400_s {
+
+ input_system_source_t source_type; input_system_config_flags_t source_type_flags;
+ //channel_cfg_t channel[N_CHANNELS];
+ input_system_config_flags_t ch_flags[N_CHANNELS];
+ // This is the place where the buffers' settings are collected, as given.
+ csi_cfg_t csi_value[N_CSI_PORTS]; input_system_config_flags_t csi_flags[N_CSI_PORTS];
+
+ // Possible another struct for ib.
+ // This buffers set at the end, based on the all configurations.
+ ib_buffer_t csi_buffer[N_CSI_PORTS]; input_system_config_flags_t csi_buffer_flags[N_CSI_PORTS];
+ ib_buffer_t acquisition_buffer_unique; input_system_config_flags_t acquisition_buffer_unique_flags;
+ uint32_t unallocated_ib_mem_words; // Used for check.DEFAULT = IB_CAPACITY_IN_WORDS.
+ //uint32_t acq_allocated_ib_mem_words;
+
+ input_system_connection_t multicast[N_CSI_PORTS];
+ input_system_multiplex_t multiplexer; input_system_config_flags_t multiplexer_flags;
+
+
+ tpg_cfg_t tpg_value; input_system_config_flags_t tpg_flags;
+ prbs_cfg_t prbs_value; input_system_config_flags_t prbs_flags;
+ gpfifo_cfg_t gpfifo_value; input_system_config_flags_t gpfifo_flags;
+
+
+ input_switch_cfg_t input_switch_cfg;
+
+
+ target_isp_cfg_t target_isp [N_CHANNELS]; input_system_config_flags_t target_isp_flags [N_CHANNELS];
+ target_sp_cfg_t target_sp [N_CHANNELS]; input_system_config_flags_t target_sp_flags [N_CHANNELS];
+ target_strm2mem_cfg_t target_strm2mem [N_CHANNELS]; input_system_config_flags_t target_strm2mem_flags [N_CHANNELS];
+
+ input_system_config_flags_t session_flags;
+
+};
+
+/*
+ * For each MIPI port
+ */
+#define _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX _HRT_CSS_RECEIVER_2400_DEVICE_READY_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_STATUS_REG_IDX
+#define _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX _HRT_CSS_RECEIVER_2400_IRQ_ENABLE_REG_IDX
+#define _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX
+/* new regs for each MIPI port w.r.t. 2300 */
+#define _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_18_DATAID_REG_IDX
+#define _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX
+#define _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX
+
+/* _HRT_CSS_RECEIVER_2400_COMP_FORMAT_REG_IDX is not defined per MIPI port but per channel */
+/* _HRT_CSS_RECEIVER_2400_COMP_PREDICT_REG_IDX is not defined per MIPI port but per channel */
+#define _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FS_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LS_TO_DATA_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_DATA_TO_LE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_FE_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_FE_TO_FS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX _HRT_CSS_RECEIVER_2400_LE_TO_LS_DELAY_REG_IDX
+#define _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX _HRT_CSS_RECEIVER_2400_TWO_PIXEL_EN_REG_IDX
+#define _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX _HRT_CSS_RECEIVER_2400_BACKEND_RST_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW18_REG_IDX _HRT_CSS_RECEIVER_2400_RAW18_REG_IDX
+#define _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX _HRT_CSS_RECEIVER_2400_FORCE_RAW8_REG_IDX
+#define _HRT_CSS_RECEIVER_RAW16_REG_IDX _HRT_CSS_RECEIVER_2400_RAW16_REG_IDX
+
+/* Previously MIPI port regs, now 2x2 logical channel regs */
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC0_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC1_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC2_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG0_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX
+#define _HRT_CSS_RECEIVER_COMP_SCHEME_VC3_REG1_IDX _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX
+
+/* Second backend is at offset 0x0700 w.r.t. the first port at offset 0x0100 */
+#define _HRT_CSS_BE_OFFSET 448
+#define _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_GSP_ACC_OVL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SRST_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SRST_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_TWO_PPC_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG0_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG1_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG2_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX (_HRT_CSS_RECEIVER_2400_BE_COMP_FORMAT_REG3_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_SEL_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_SEL_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW16_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_RAW18_CONFIG_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_FORCE_RAW8_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_STATUS_REG_IDX + _HRT_CSS_BE_OFFSET)
+#define _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX (_HRT_CSS_RECEIVER_2400_BE_IRQ_CLEAR_REG_IDX + _HRT_CSS_BE_OFFSET)
+
+
+#define _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT _HRT_CSS_RECEIVER_2400_IRQ_OVERRUN_BIT
+#define _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_RESERVED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_ENTRY_BIT
+#define _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT _HRT_CSS_RECEIVER_2400_IRQ_SLEEP_MODE_EXIT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_SOT_SYNC_HS_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CONTROL_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_DOUBLE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_CORRECTED_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ECC_NO_CORRECTION_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_CRC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ID_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_SYNC_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_FRAME_DATA_BIT
+#define _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT _HRT_CSS_RECEIVER_2400_IRQ_DATA_TIMEOUT_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_ESCAPE_BIT
+#define _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT _HRT_CSS_RECEIVER_2400_IRQ_ERR_LINE_SYNC_BIT
+
+#define _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX _HRT_CSS_RECEIVER_2400_CSI2_FUNC_PROG_REG_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX
+#define _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS
+
+typedef struct capture_unit_state_s capture_unit_state_t;
+typedef struct acquisition_unit_state_s acquisition_unit_state_t;
+typedef struct ctrl_unit_state_s ctrl_unit_state_t;
+
+/*
+ * In 2300 ports can be configured independently and stream
+ * formats need to be specified. In 2400, there are only 8
+ * supported configurations but the HW is fused to support
+ * only a single one.
+ *
+ * In 2300 the compressed format types are programmed by the
+ * user. In 2400 all stream formats are encoded on the stream.
+ *
+ * Use the enum to check validity of a user configuration
+ */
+typedef enum {
+ MONO_4L_1L_0L = 0,
+ MONO_3L_1L_0L,
+ MONO_2L_1L_0L,
+ MONO_1L_1L_0L,
+ STEREO_2L_1L_2L,
+ STEREO_3L_1L_1L,
+ STEREO_2L_1L_1L,
+ STEREO_1L_1L_1L,
+ N_RX_MODE
+} rx_mode_t;
+
+typedef enum {
+ MIPI_PREDICTOR_NONE = 0,
+ MIPI_PREDICTOR_TYPE1,
+ MIPI_PREDICTOR_TYPE2,
+ N_MIPI_PREDICTOR_TYPES
+} mipi_predictor_t;
+
+typedef enum {
+ MIPI_COMPRESSOR_NONE = 0,
+ MIPI_COMPRESSOR_10_6_10,
+ MIPI_COMPRESSOR_10_7_10,
+ MIPI_COMPRESSOR_10_8_10,
+ MIPI_COMPRESSOR_12_6_12,
+ MIPI_COMPRESSOR_12_7_12,
+ MIPI_COMPRESSOR_12_8_12,
+ N_MIPI_COMPRESSOR_METHODS
+} mipi_compressor_t;
+
+typedef enum {
+ MIPI_FORMAT_RGB888 = 0,
+ MIPI_FORMAT_RGB555,
+ MIPI_FORMAT_RGB444,
+ MIPI_FORMAT_RGB565,
+ MIPI_FORMAT_RGB666,
+ MIPI_FORMAT_RAW8, /* 5 */
+ MIPI_FORMAT_RAW10,
+ MIPI_FORMAT_RAW6,
+ MIPI_FORMAT_RAW7,
+ MIPI_FORMAT_RAW12,
+ MIPI_FORMAT_RAW14, /* 10 */
+ MIPI_FORMAT_YUV420_8,
+ MIPI_FORMAT_YUV420_10,
+ MIPI_FORMAT_YUV422_8,
+ MIPI_FORMAT_YUV422_10,
+ MIPI_FORMAT_CUSTOM0, /* 15 */
+ MIPI_FORMAT_YUV420_8_LEGACY,
+ MIPI_FORMAT_EMBEDDED,
+ MIPI_FORMAT_CUSTOM1,
+ MIPI_FORMAT_CUSTOM2,
+ MIPI_FORMAT_CUSTOM3, /* 20 */
+ MIPI_FORMAT_CUSTOM4,
+ MIPI_FORMAT_CUSTOM5,
+ MIPI_FORMAT_CUSTOM6,
+ MIPI_FORMAT_CUSTOM7,
+ MIPI_FORMAT_YUV420_8_SHIFT, /* 25 */
+ MIPI_FORMAT_YUV420_10_SHIFT,
+ MIPI_FORMAT_RAW16,
+ MIPI_FORMAT_RAW18,
+ N_MIPI_FORMAT,
+} mipi_format_t;
+
+#define MIPI_FORMAT_JPEG MIPI_FORMAT_CUSTOM0
+#define MIPI_FORMAT_BINARY_8 MIPI_FORMAT_CUSTOM0
+#define N_MIPI_FORMAT_CUSTOM 8
+
+/* The number of stores for compressed format types */
+#define N_MIPI_COMPRESSOR_CONTEXT (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
+
+typedef enum {
+ RX_IRQ_INFO_BUFFER_OVERRUN = 1UL << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT,
+ RX_IRQ_INFO_INIT_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT,
+ RX_IRQ_INFO_ENTER_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT,
+ RX_IRQ_INFO_EXIT_SLEEP_MODE = 1UL << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT,
+ RX_IRQ_INFO_ECC_CORRECTED = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT,
+ RX_IRQ_INFO_ERR_SOT = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT,
+ RX_IRQ_INFO_ERR_SOT_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT,
+ RX_IRQ_INFO_ERR_CONTROL = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT,
+ RX_IRQ_INFO_ERR_ECC_DOUBLE = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT,
+/* RX_IRQ_INFO_NO_ERR = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT, */
+ RX_IRQ_INFO_ERR_CRC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ID = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT,
+ RX_IRQ_INFO_ERR_FRAME_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT,
+ RX_IRQ_INFO_ERR_FRAME_DATA = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT,
+ RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1UL << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT,
+ RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT,
+ RX_IRQ_INFO_ERR_LINE_SYNC = 1UL << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT,
+} rx_irq_info_t;
+
+typedef struct rx_cfg_s rx_cfg_t;
+
+/*
+ * Applied per port
+ */
+struct rx_cfg_s {
+ rx_mode_t mode; /* The HW config */
+ mipi_port_ID_t port; /* The port ID to apply the control on */
+ unsigned int timeout;
+ unsigned int initcount;
+ unsigned int synccount;
+ unsigned int rxcount;
+ mipi_predictor_t comp; /* Just for backward compatibility */
+ bool is_two_ppc;
+};
+
+/* NOTE: The base has already an offset of 0x0100 */
+static const hrt_address MIPI_PORT_OFFSET[N_MIPI_PORT_ID] = {
+ 0x00000000UL,
+ 0x00000100UL,
+ 0x00000200UL};
+
+static const mipi_lane_cfg_t MIPI_PORT_MAXLANES[N_MIPI_PORT_ID] = {
+ MIPI_4LANE_CFG,
+ MIPI_1LANE_CFG,
+ MIPI_2LANE_CFG};
+
+static const bool MIPI_PORT_ACTIVE[N_RX_MODE][N_MIPI_PORT_ID] = {
+ {true, true, false},
+ {true, true, false},
+ {true, true, false},
+ {true, true, false},
+ {true, true, true},
+ {true, true, true},
+ {true, true, true},
+ {true, true, true}};
+
+static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
+ {MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_2LANE_CFG},
+ {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
+ {MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}};
+
+static const hrt_address SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = {
+ 0x00001000UL,
+ 0x00002000UL,
+ 0x00003000UL,
+ 0x00004000UL,
+ 0x00005000UL,
+ 0x00009000UL,
+ 0x0000A000UL,
+ 0x0000B000UL,
+ 0x0000C000UL};
+
+struct capture_unit_state_s {
+ int Packet_Length;
+ int Received_Length;
+ int Received_Short_Packets;
+ int Received_Long_Packets;
+ int Last_Command;
+ int Next_Command;
+ int Last_Acknowledge;
+ int Next_Acknowledge;
+ int FSM_State_Info;
+ int StartMode;
+ int Start_Addr;
+ int Mem_Region_Size;
+ int Num_Mem_Regions;
+/* int Init; write-only registers
+ int Start;
+ int Stop; */
+};
+
+struct acquisition_unit_state_s {
+/* int Init; write-only register */
+ int Received_Short_Packets;
+ int Received_Long_Packets;
+ int Last_Command;
+ int Next_Command;
+ int Last_Acknowledge;
+ int Next_Acknowledge;
+ int FSM_State_Info;
+ int Int_Cntr_Info;
+ int Start_Addr;
+ int Mem_Region_Size;
+ int Num_Mem_Regions;
+};
+
+struct ctrl_unit_state_s {
+ int last_cmd;
+ int next_cmd;
+ int last_ack;
+ int next_ack;
+ int top_fsm_state;
+ int captA_fsm_state;
+ int captB_fsm_state;
+ int captC_fsm_state;
+ int acq_fsm_state;
+ int captA_start_addr;
+ int captB_start_addr;
+ int captC_start_addr;
+ int captA_mem_region_size;
+ int captB_mem_region_size;
+ int captC_mem_region_size;
+ int captA_num_mem_regions;
+ int captB_num_mem_regions;
+ int captC_num_mem_regions;
+ int acq_start_addr;
+ int acq_mem_region_size;
+ int acq_num_mem_regions;
+/* int ctrl_init; write only register */
+ int capt_reserve_one_mem_region;
+};
+
+struct input_system_state_s {
+ int str_multicastA_sel;
+ int str_multicastB_sel;
+ int str_multicastC_sel;
+ int str_mux_sel;
+ int str_mon_status;
+ int str_mon_irq_cond;
+ int str_mon_irq_en;
+ int isys_srst;
+ int isys_slv_reg_srst;
+ int str_deint_portA_cnt;
+ int str_deint_portB_cnt;
+ struct capture_unit_state_s capture_unit[N_CAPTURE_UNIT_ID];
+ struct acquisition_unit_state_s acquisition_unit[N_ACQUISITION_UNIT_ID];
+ struct ctrl_unit_state_s ctrl_unit_state[N_CTRL_UNIT_ID];
+};
+
+struct mipi_port_state_s {
+ int device_ready;
+ int irq_status;
+ int irq_enable;
+ uint32_t timeout_count;
+ uint16_t init_count;
+ uint16_t raw16_18;
+ uint32_t sync_count; /*4 x uint8_t */
+ uint32_t rx_count; /*4 x uint8_t */
+ uint8_t lane_sync_count[MIPI_4LANE_CFG];
+ uint8_t lane_rx_count[MIPI_4LANE_CFG];
+};
+
+struct rx_channel_state_s {
+ uint32_t comp_scheme0;
+ uint32_t comp_scheme1;
+ mipi_predictor_t pred[N_MIPI_FORMAT_CUSTOM];
+ mipi_compressor_t comp[N_MIPI_FORMAT_CUSTOM];
+};
+
+struct receiver_state_s {
+ uint8_t fs_to_ls_delay;
+ uint8_t ls_to_data_delay;
+ uint8_t data_to_le_delay;
+ uint8_t le_to_fe_delay;
+ uint8_t fe_to_fs_delay;
+ uint8_t le_to_fs_delay;
+ bool is_two_ppc;
+ int backend_rst;
+ uint16_t raw18;
+ bool force_raw8;
+ uint16_t raw16;
+ struct mipi_port_state_s mipi_port_state[N_MIPI_PORT_ID];
+ struct rx_channel_state_s rx_channel_state[N_RX_CHANNEL_ID];
+ int be_gsp_acc_ovl;
+ int be_srst;
+ int be_is_two_ppc;
+ int be_comp_format0;
+ int be_comp_format1;
+ int be_comp_format2;
+ int be_comp_format3;
+ int be_sel;
+ int be_raw16_config;
+ int be_raw18_config;
+ int be_force_raw8;
+ int be_irq_status;
+ int be_irq_clear;
+};
+
+#endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
new file mode 100644
index 000000000000..ed1b947b00f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
@@ -0,0 +1,116 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+
+#include "input_system_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+assert(ID < N_INPUT_SYSTEM_ID);
+assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg)
+{
+assert(ID < N_INPUT_SYSTEM_ID);
+assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+assert(ID < N_RX_ID);
+assert(RX_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg)
+{
+assert(ID < N_RX_ID);
+assert(RX_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+assert(ID < N_RX_ID);
+assert(port_ID < N_MIPI_PORT_ID);
+assert(RX_BASE[ID] != (hrt_address)-1);
+assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const hrt_address reg)
+{
+assert(ID < N_RX_ID);
+assert(port_ID < N_MIPI_PORT_ID);
+assert(RX_BASE[ID] != (hrt_address)-1);
+assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+assert(ID < N_INPUT_SYSTEM_ID);
+assert(sub_ID < N_SUB_SYSTEM_ID);
+assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg)
+{
+assert(ID < N_INPUT_SYSTEM_ID);
+assert(sub_ID < N_SUB_SYSTEM_ID);
+assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
new file mode 100644
index 000000000000..6b58bc13dc1b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
@@ -0,0 +1,448 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "irq.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define __INLINE_GP_DEVICE__
+#endif
+#include "gp_device.h" /* _REG_GP_IRQ_REQUEST_ADDR */
+
+#include "platform_support.h" /* hrt_sleep() */
+
+STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+ const irq_ID_t ID);
+
+STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+ const irq_ID_t ID);
+
+STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+ const virq_id_t irq_ID,
+ unsigned int *channel_ID);
+
+#ifndef __INLINE_IRQ__
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+static unsigned short IRQ_N_CHANNEL[N_IRQ_ID] = {
+ IRQ0_ID_N_CHANNEL,
+ IRQ1_ID_N_CHANNEL,
+ IRQ2_ID_N_CHANNEL,
+ IRQ3_ID_N_CHANNEL};
+
+static unsigned short IRQ_N_ID_OFFSET[N_IRQ_ID + 1] = {
+ IRQ0_ID_OFFSET,
+ IRQ1_ID_OFFSET,
+ IRQ2_ID_OFFSET,
+ IRQ3_ID_OFFSET,
+ IRQ_END_OFFSET};
+
+static virq_id_t IRQ_NESTING_ID[N_IRQ_ID] = {
+ N_virq_id,
+ virq_ifmt,
+ virq_isys,
+ virq_isel};
+
+void irq_clear_all(
+ const irq_ID_t ID)
+{
+ hrt_data mask = 0xFFFFFFFF;
+
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_N_CHANNEL[ID] <= HRT_DATA_WIDTH);
+
+ if (IRQ_N_CHANNEL[ID] < HRT_DATA_WIDTH) {
+ mask = ~((~(hrt_data)0)>>IRQ_N_CHANNEL[ID]);
+ }
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
+return;
+}
+
+/*
+ * Do we want the user to be able to set the signalling method ?
+ */
+void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int edge_in = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask |= me;
+ enable |= me;
+ edge_in |= me; /* rising edge */
+
+/* to avoid mishaps configuration must follow the following order */
+
+/* mask this interrupt */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask & ~me);
+/* rising edge at input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX, edge_in);
+/* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+/* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+/* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+
+ irq_wait_for_write_complete(ID);
+
+return;
+}
+
+void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse)
+{
+ unsigned int edge_out = 0x0;
+
+ if (pulse) {
+ edge_out = 0xffffffff;
+ }
+ /* output is given as edge, not pulse */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
+return;
+}
+
+void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_id)
+{
+ unsigned int mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ unsigned int enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ unsigned int me = 1U << irq_id;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id < IRQ_N_CHANNEL[ID]);
+
+ mask &= ~me;
+ enable &= ~me;
+
+/* enable interrupt to output */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable);
+/* unmask interrupt from input */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask);
+/* clear current irq only */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me);
+
+ irq_wait_for_write_complete(ID);
+
+return;
+}
+
+enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+
+ assert(ID < N_IRQ_ID);
+ assert(irq_id != NULL);
+
+/* find the first irq bit */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+ if (idx == IRQ_N_CHANNEL[ID])
+ return hrt_isp_css_irq_status_error;
+
+/* now check whether there are more bits set */
+ if (irq_status != (1U << idx))
+ status = hrt_isp_css_irq_status_more_irqs;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ if (irq_id != NULL)
+ *irq_id = (unsigned int)idx;
+
+return status;
+}
+
+static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
+ _REG_GP_IRQ_REQUEST0_ADDR,
+ _REG_GP_IRQ_REQUEST1_ADDR};
+
+void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id)
+{
+ hrt_address addr;
+
+ OP___assert(ID == IRQ0_ID);
+ OP___assert(IRQ_BASE[ID] != (hrt_address)-1);
+ OP___assert(irq_id < N_IRQ_SW_CHANNEL_ID);
+
+ (void)ID;
+
+ addr = IRQ_REQUEST_ADDR[irq_id];
+/* The SW IRQ pins are remapped to offset zero */
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 1);
+ gp_device_reg_store(GP_DEVICE0_ID,
+ (unsigned int)addr, 0);
+return;
+}
+
+void irq_controller_get_state(
+ const irq_ID_t ID,
+ irq_controller_state_t *state)
+{
+ assert(ID < N_IRQ_ID);
+ assert(state != NULL);
+
+ state->irq_edge = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
+ state->irq_mask = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
+ state->irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ state->irq_enable = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+ state->irq_level_not_pulse = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
+return;
+}
+
+bool any_virq_signal(void)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+return (irq_status != 0);
+}
+
+void cnd_virq_enable_channel(
+ const virq_id_t irq_ID,
+ const bool en)
+{
+ irq_ID_t i;
+ unsigned int channel_ID;
+ irq_ID_t ID = virq_get_irq_id(irq_ID, &channel_ID);
+
+ assert(ID < N_IRQ_ID);
+
+ for (i=IRQ1_ID;i<N_IRQ_ID;i++) {
+ /* It is not allowed to enable the pin of a nested IRQ directly */
+ assert(irq_ID != IRQ_NESTING_ID[i]);
+ }
+
+ if (en) {
+ irq_enable_channel(ID, channel_ID);
+ if (IRQ_NESTING_ID[ID] != N_virq_id) {
+/* Single level nesting, otherwise we'd need to recurse */
+ irq_enable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ } else {
+ irq_disable_channel(ID, channel_ID);
+ if ((IRQ_NESTING_ID[ID] != N_virq_id) && !any_irq_channel_enabled(ID)) {
+/* Only disable the top if the nested ones are empty */
+ irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
+ }
+ }
+return;
+}
+
+
+void virq_clear_all(void)
+{
+ irq_ID_t irq_id;
+
+ for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
+ irq_clear_all(irq_id);
+ }
+return;
+}
+
+enum hrt_isp_css_irq_status virq_get_channel_signals(
+ virq_info_t *irq_info)
+{
+ enum hrt_isp_css_irq_status irq_status = hrt_isp_css_irq_status_error;
+ irq_ID_t ID;
+
+ assert(irq_info != NULL);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (any_irq_channel_enabled(ID)) {
+ hrt_data irq_data = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+
+ if (irq_data != 0) {
+/* The error condition is an IRQ pulse received with no IRQ status written */
+ irq_status = hrt_isp_css_irq_status_success;
+ }
+
+ irq_info->irq_status_reg[ID] |= irq_data;
+
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, irq_data);
+
+ irq_wait_for_write_complete(ID);
+ }
+ }
+
+return irq_status;
+}
+
+void virq_clear_info(
+ virq_info_t *irq_info)
+{
+ irq_ID_t ID;
+
+ assert(irq_info != NULL);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ irq_info->irq_status_reg[ID] = 0;
+ }
+return;
+}
+
+enum hrt_isp_css_irq_status virq_get_channel_id(
+ virq_id_t *irq_id)
+{
+ unsigned int irq_status = irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+ unsigned int idx;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success;
+ irq_ID_t ID;
+
+ assert(irq_id != NULL);
+
+/* find the first irq bit on device 0 */
+ for (idx = 0; idx < IRQ_N_CHANNEL[IRQ0_ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[IRQ0_ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+/* Check whether there are more bits set on device 0 */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ }
+
+/* Check whether we have an IRQ on one of the nested devices */
+ for (ID = N_IRQ_ID-1 ; ID > (irq_ID_t)0; ID--) {
+ if (IRQ_NESTING_ID[ID] == (virq_id_t)idx) {
+ break;
+ }
+ }
+
+/* If we have a nested IRQ, load that state, discard the device 0 state */
+ if (ID != IRQ0_ID) {
+ irq_status = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
+/* find the first irq bit on device "id" */
+ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) {
+ if (irq_status & (1U << idx))
+ break;
+ }
+
+ if (idx == IRQ_N_CHANNEL[ID]) {
+ return hrt_isp_css_irq_status_error;
+ }
+
+/* Alternatively check whether there are more bits set on this device */
+ if (irq_status != (1U << idx)) {
+ status = hrt_isp_css_irq_status_more_irqs;
+ } else {
+/* If this device is empty, clear the state on device 0 */
+ irq_reg_store(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << IRQ_NESTING_ID[ID]);
+ }
+ } /* if (ID != IRQ0_ID) */
+
+/* Here we proceed to clear the IRQ on detected device, if no nested IRQ, this is device 0 */
+ irq_reg_store(ID,
+ _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx);
+
+ irq_wait_for_write_complete(ID);
+
+ idx += IRQ_N_ID_OFFSET[ID];
+ if (irq_id != NULL)
+ *irq_id = (virq_id_t)idx;
+
+return status;
+}
+
+STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+ const irq_ID_t ID)
+{
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ (void)ia_css_device_load_uint32(IRQ_BASE[ID] +
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX*sizeof(hrt_data));
+}
+
+STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+ const irq_ID_t ID)
+{
+ hrt_data en_reg;
+
+ assert(ID < N_IRQ_ID);
+
+ en_reg = irq_reg_load(ID,
+ _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
+
+return (en_reg != 0);
+}
+
+STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+ const virq_id_t irq_ID,
+ unsigned int *channel_ID)
+{
+ irq_ID_t ID;
+
+ assert(channel_ID != NULL);
+
+ for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
+ if (irq_ID < IRQ_N_ID_OFFSET[ID + 1]) {
+ break;
+ }
+ }
+
+ *channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
+
+return ID;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h
new file mode 100644
index 000000000000..f522dfd1a9f1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_local.h
@@ -0,0 +1,136 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_LOCAL_H_INCLUDED__
+#define __IRQ_LOCAL_H_INCLUDED__
+
+#include "irq_global.h"
+
+#include <irq_controller_defs.h>
+
+/* IRQ0_ID */
+#include "hive_isp_css_defs.h"
+#define HIVE_GP_DEV_IRQ_NUM_IRQS 32
+/* IRQ1_ID */
+#include "input_formatter_subsystem_defs.h"
+#define HIVE_IFMT_IRQ_NUM_IRQS 5
+/* IRQ2_ID */
+#include "input_system_defs.h"
+/* IRQ3_ID */
+#include "input_selector_defs.h"
+
+
+#define IRQ_ID_OFFSET 32
+#define IRQ0_ID_OFFSET 0
+#define IRQ1_ID_OFFSET IRQ_ID_OFFSET
+#define IRQ2_ID_OFFSET (2*IRQ_ID_OFFSET)
+#define IRQ3_ID_OFFSET (3*IRQ_ID_OFFSET)
+#define IRQ_END_OFFSET (4*IRQ_ID_OFFSET)
+
+#define IRQ0_ID_N_CHANNEL HIVE_GP_DEV_IRQ_NUM_IRQS
+#define IRQ1_ID_N_CHANNEL HIVE_IFMT_IRQ_NUM_IRQS
+#define IRQ2_ID_N_CHANNEL HIVE_ISYS_IRQ_NUM_BITS
+#define IRQ3_ID_N_CHANNEL HIVE_ISEL_IRQ_NUM_IRQS
+
+typedef struct virq_info_s virq_info_t;
+typedef struct irq_controller_state_s irq_controller_state_t;
+
+
+typedef enum {
+ virq_gpio_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_0_BIT_ID,
+ virq_gpio_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_1_BIT_ID,
+ virq_gpio_pin_2 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_2_BIT_ID,
+ virq_gpio_pin_3 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_3_BIT_ID,
+ virq_gpio_pin_4 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_4_BIT_ID,
+ virq_gpio_pin_5 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_5_BIT_ID,
+ virq_gpio_pin_6 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_6_BIT_ID,
+ virq_gpio_pin_7 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_7_BIT_ID,
+ virq_gpio_pin_8 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_8_BIT_ID,
+ virq_gpio_pin_9 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_9_BIT_ID,
+ virq_gpio_pin_10 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_10_BIT_ID,
+ virq_gpio_pin_11 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GPIO_PIN_11_BIT_ID,
+ virq_sp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_BIT_ID,
+ virq_isp = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BIT_ID,
+ virq_isys = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISYS_BIT_ID,
+ virq_isel = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISEL_BIT_ID,
+ virq_ifmt = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IFMT_BIT_ID,
+ virq_sp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_BIT_ID,
+ virq_isp_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_STREAM_MON_BIT_ID,
+ virq_mod_stream_mon = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MOD_STREAM_MON_BIT_ID,
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+ virq_isp_pmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_PMEM_ERROR_BIT_ID,
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+ virq_isys_2401 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_IS2401_BIT_ID,
+#else
+#error "irq_local.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+ virq_isp_bamem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_BAMEM_ERROR_BIT_ID,
+ virq_isp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_ISP_DMEM_ERROR_BIT_ID,
+ virq_sp_icache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_ICACHE_MEM_ERROR_BIT_ID,
+ virq_sp_dmem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_DMEM_ERROR_BIT_ID,
+ virq_mmu_cache_mem_error = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_MMU_CACHE_MEM_ERROR_BIT_ID,
+ virq_gp_timer_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_0_BIT_ID,
+ virq_gp_timer_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_GP_TIMER_1_BIT_ID,
+ virq_sw_pin_0 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_0_BIT_ID,
+ virq_sw_pin_1 = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SW_PIN_1_BIT_ID,
+ virq_dma = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_DMA_BIT_ID,
+ virq_sp_stream_mon_b = IRQ0_ID_OFFSET + HIVE_GP_DEV_IRQ_SP_STREAM_MON_B_BIT_ID,
+
+ virq_ifmt0_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_BIT_ID,
+ virq_ifmt1_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_PRIM_B_BIT_ID,
+ virq_ifmt2_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_IFT_SEC_BIT_ID,
+ virq_ifmt3_id = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_MEM_CPY_BIT_ID,
+ virq_ifmt_sideband_changed = IRQ1_ID_OFFSET + HIVE_IFMT_IRQ_SIDEBAND_CHANGED_BIT_ID,
+
+ virq_isys_sof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOF_BIT_ID,
+ virq_isys_eof = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOF_BIT_ID,
+ virq_isys_sol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_SOL_BIT_ID,
+ virq_isys_eol = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_EOL_BIT_ID,
+ virq_isys_csi = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BIT_ID,
+ virq_isys_csi_be = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CSI_RECEIVER_BE_BIT_ID,
+ virq_isys_capt0_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_NO_SOP,
+ virq_isys_capt0_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_A_LATE_SOP,
+ virq_isys_capt1_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_NO_SOP,
+ virq_isys_capt1_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_B_LATE_SOP,
+ virq_isys_capt2_id_no_sop = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_NO_SOP,
+ virq_isys_capt2_id_late_sop= IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CAP_UNIT_C_LATE_SOP,
+ virq_isys_acq_sop_mismatch = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_ACQ_UNIT_SOP_MISMATCH,
+ virq_isys_ctrl_capt0 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPA,
+ virq_isys_ctrl_capt1 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPB,
+ virq_isys_ctrl_capt2 = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_INP_CTRL_CAPC,
+ virq_isys_cio_to_ahb = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_CIO2AHB,
+ virq_isys_dma = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_DMA_BIT_ID,
+ virq_isys_fifo_monitor = IRQ2_ID_OFFSET + HIVE_ISYS_IRQ_STREAM_MON_BIT_ID,
+
+ virq_isel_sof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOF_BIT_ID,
+ virq_isel_eof = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOF_BIT_ID,
+ virq_isel_sol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_SOL_BIT_ID,
+ virq_isel_eol = IRQ3_ID_OFFSET + HIVE_ISEL_IRQ_SYNC_GEN_EOL_BIT_ID,
+
+ N_virq_id = IRQ_END_OFFSET
+} virq_id_t;
+
+struct virq_info_s {
+ hrt_data irq_status_reg[N_IRQ_ID];
+};
+
+struct irq_controller_state_s {
+ unsigned int irq_edge;
+ unsigned int irq_mask;
+ unsigned int irq_status;
+ unsigned int irq_enable;
+ unsigned int irq_level_not_pulse;
+};
+
+#endif /* __IRQ_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
new file mode 100644
index 000000000000..eb325e870e88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_PRIVATE_H_INCLUDED__
+#define __IRQ_PRIVATE_H_INCLUDED__
+
+#include "irq_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_IRQ_C void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+assert(ID < N_IRQ_ID);
+assert(IRQ_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg)
+{
+assert(ID < N_IRQ_ID);
+assert(IRQ_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
new file mode 100644
index 000000000000..47c21e486c25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
@@ -0,0 +1,129 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <system_global.h>
+#include "isp.h"
+
+#ifndef __INLINE_ISP__
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ isp_ctrl_setbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT);
+/* Enabling the IRQ immediately triggers an interrupt, clear it */
+ isp_ctrl_setbit(ID, ISP_IRQ_CLEAR_REG, ISP_IRQ_CLEAR_BIT);
+ } else {
+ isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
+ ISP_IRQ_READY_BIT);
+ }
+return;
+}
+
+void isp_get_state(
+ const isp_ID_t ID,
+ isp_state_t *state,
+ isp_stall_t *stall)
+{
+ hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG);
+
+ assert(state != NULL);
+ assert(stall != NULL);
+
+#if defined(_hrt_sysmem_ident_address)
+ /* Patch to avoid compiler unused symbol warning in C_RUN build */
+ (void)__hrt_sysmem_ident_address;
+ (void)_hrt_sysmem_map_var;
+#endif
+
+ state->pc = isp_ctrl_load(ID, ISP_PC_REG);
+ state->status_register = sc;
+ state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT);
+ state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
+ state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
+ state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT);
+ stall->stat_ctrl =
+ !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT);
+ stall->pmem =
+ !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT);
+ stall->dmem =
+ !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT);
+ stall->vmem =
+ !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT);
+ stall->fifo0 =
+ !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT);
+ stall->fifo1 =
+ !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT);
+ stall->fifo2 =
+ !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT);
+ stall->fifo3 =
+ !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT);
+ stall->fifo4 =
+ !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT);
+ stall->fifo5 =
+ !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT);
+ stall->fifo6 =
+ !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT);
+ stall->vamem1 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT);
+ stall->vamem2 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT);
+ stall->vamem3 =
+ !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT);
+ stall->hmem =
+ !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT);
+/*
+ stall->icache_master =
+ !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
+ ISP_ICACHE_MT_SINK_BIT);
+ */
+return;
+}
+
+/* ISP functions to control the ISP state from the host, even in crun. */
+
+/* Inspect readiness of an ISP indexed by ID */
+unsigned isp_is_ready(isp_ID_t ID)
+{
+ assert (ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
+}
+
+/* Inspect sleeping of an ISP indexed by ID */
+unsigned isp_is_sleeping(isp_ID_t ID)
+{
+ assert (ID < N_ISP_ID);
+ return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
+}
+
+/* To be called by the host immediately before starting ISP ID. */
+void isp_start(isp_ID_t ID)
+{
+ assert (ID < N_ISP_ID);
+}
+
+/* Wake up ISP ID. */
+void isp_wake(isp_ID_t ID)
+{
+ assert (ID < N_ISP_ID);
+ isp_ctrl_setbit(ID, ISP_SC_REG, ISP_START_BIT);
+ hrt_sleep();
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h
new file mode 100644
index 000000000000..5dcc52dff3dd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_local.h
@@ -0,0 +1,57 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_LOCAL_H_INCLUDED__
+#define __ISP_LOCAL_H_INCLUDED__
+
+#include <stdbool.h>
+
+#include "isp_global.h"
+
+#include <isp2400_support.h>
+
+#define HIVE_ISP_VMEM_MASK ((1U<<ISP_VMEM_ELEMBITS)-1)
+
+typedef struct isp_state_s isp_state_t;
+typedef struct isp_stall_s isp_stall_t;
+
+struct isp_state_s {
+ int pc;
+ int status_register;
+ bool is_broken;
+ bool is_idle;
+ bool is_sleeping;
+ bool is_stalling;
+};
+
+struct isp_stall_s {
+ bool fifo0;
+ bool fifo1;
+ bool fifo2;
+ bool fifo3;
+ bool fifo4;
+ bool fifo5;
+ bool fifo6;
+ bool stat_ctrl;
+ bool dmem;
+ bool vmem;
+ bool vamem1;
+ bool vamem2;
+ bool vamem3;
+ bool hmem;
+ bool pmem;
+ bool icache_master;
+};
+
+#endif /* __ISP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_private.h
new file mode 100644
index 000000000000..7f63255d3fc5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp_private.h
@@ -0,0 +1,157 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_PRIVATE_H_INCLUDED__
+#define __ISP_PRIVATE_H_INCLUDED__
+
+#ifdef HRT_MEMORY_ACCESS
+#include <hrt/api.h>
+#endif
+
+#include "isp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+#include "type_support.h"
+
+STORAGE_CLASS_ISP_C void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+#else
+ hrt_master_port_store_32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_CTRL_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+#else
+ return hrt_master_port_uload_32(ISP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+#endif
+}
+
+STORAGE_CLASS_ISP_C bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data val = isp_ctrl_load(ID, reg);
+ return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+ isp_ctrl_store(ID, reg, (data | (1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit)
+{
+ hrt_data data = isp_ctrl_load(ID, reg);
+ isp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_store(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#else
+ hrt_master_port_load(ISP_DMEM_BASE[ID] + addr, data, size);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store_uint32(ISP_DMEM_BASE[ID] + addr, data);
+#else
+ hrt_master_port_store_32(ISP_DMEM_BASE[ID] + addr, data);
+#endif
+ return;
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr)
+{
+ assert(ID < N_ISP_ID);
+ assert(ISP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+#if !defined(HRT_MEMORY_ACCESS)
+ return ia_css_device_load_uint32(ISP_DMEM_BASE[ID] + addr);
+#else
+ return hrt_master_port_uload_32(ISP_DMEM_BASE[ID] + addr);
+#endif
+}
+
+STORAGE_CLASS_ISP_C uint32_t isp_2w_cat_1w(
+ const uint16_t x0,
+ const uint16_t x1)
+{
+ uint32_t out = ((uint32_t)(x1 & HIVE_ISP_VMEM_MASK) << ISP_VMEM_ELEMBITS)
+ | (x0 & HIVE_ISP_VMEM_MASK);
+ return out;
+}
+
+#endif /* __ISP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
new file mode 100644
index 000000000000..b75d0f85d524
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* The name "mmu.h is already taken" */
+#include "mmu_device.h"
+
+#ifndef __INLINE_MMU__
+#include "mmu_private.h"
+#endif /* __INLINE_MMU__ */
+
+void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index)
+{
+ mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
+return;
+}
+
+hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID)
+{
+return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+}
+
+void mmu_invalidate_cache(
+ const mmu_ID_t ID)
+{
+ mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
+return;
+}
+
+void mmu_invalidate_cache_all(void)
+{
+ mmu_ID_t mmu_id;
+ for (mmu_id = (mmu_ID_t)0;mmu_id < N_MMU_ID; mmu_id++) {
+ mmu_invalidate_cache(mmu_id);
+ }
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h
new file mode 100644
index 000000000000..7c3ad157189f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_LOCAL_H_INCLUDED__
+#define __MMU_LOCAL_H_INCLUDED__
+
+#include "mmu_global.h"
+
+#endif /* __MMU_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
new file mode 100644
index 000000000000..392b6cc24e8f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_PRIVATE_H_INCLUDED__
+#define __MMU_PRIVATE_H_INCLUDED__
+
+#include "mmu_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_MMU_H void mmu_reg_store(
+ const mmu_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+assert(ID < N_MMU_ID);
+assert(MMU_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_MMU_H hrt_data mmu_reg_load(
+ const mmu_ID_t ID,
+ const unsigned int reg)
+{
+assert(ID < N_MMU_ID);
+assert(MMU_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+#endif /* __MMU_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c
new file mode 100644
index 000000000000..db694d3a6fbb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp.c
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sp.h"
+
+#ifndef __INLINE_SP__
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#include "assert_support.h"
+
+void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd)
+{
+ if (cnd) {
+ sp_ctrl_setbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+/* Enabling the IRQ immediately triggers an interrupt, clear it */
+ sp_ctrl_setbit(ID, SP_IRQ_CLEAR_REG, SP_IRQ_CLEAR_BIT);
+ } else {
+ sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
+ }
+}
+
+void sp_get_state(
+ const sp_ID_t ID,
+ sp_state_t *state,
+ sp_stall_t *stall)
+{
+ hrt_data sc = sp_ctrl_load(ID, SP_SC_REG);
+
+ assert(state != NULL);
+ assert(stall != NULL);
+
+ state->pc = sp_ctrl_load(ID, SP_PC_REG);
+ state->status_register = sc;
+ state->is_broken = (sc & (1U << SP_BROKEN_BIT)) != 0;
+ state->is_idle = (sc & (1U << SP_IDLE_BIT)) != 0;
+ state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0;
+ state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0;
+ stall->fifo0 =
+ !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT);
+ stall->fifo1 =
+ !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT);
+ stall->fifo2 =
+ !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT);
+ stall->fifo3 =
+ !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT);
+ stall->fifo4 =
+ !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT);
+ stall->fifo5 =
+ !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT);
+ stall->fifo6 =
+ !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT);
+ stall->fifo7 =
+ !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT);
+ stall->fifo8 =
+ !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT);
+ stall->fifo9 =
+ !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT);
+ stall->fifoa =
+ !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT);
+ stall->dmem =
+ !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT);
+ stall->control_master =
+ !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT);
+ stall->icache_master =
+ !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG,
+ SP_ICACHE_MT_SINK_BIT);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h
new file mode 100644
index 000000000000..3c70b8fdb532
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_local.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_LOCAL_H_INCLUDED__
+#define __SP_LOCAL_H_INCLUDED__
+
+#include <type_support.h>
+#include "sp_global.h"
+
+struct sp_state_s {
+ int pc;
+ int status_register;
+ bool is_broken;
+ bool is_idle;
+ bool is_sleeping;
+ bool is_stalling;
+};
+
+struct sp_stall_s {
+ bool fifo0;
+ bool fifo1;
+ bool fifo2;
+ bool fifo3;
+ bool fifo4;
+ bool fifo5;
+ bool fifo6;
+ bool fifo7;
+ bool fifo8;
+ bool fifo9;
+ bool fifoa;
+ bool dmem;
+ bool control_master;
+ bool icache_master;
+};
+
+#define sp_address_of(var) (HIVE_ADDR_ ## var)
+
+/*
+ * deprecated
+ */
+#define store_sp_int(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define store_sp_ptr(var, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(var), \
+ (uint32_t)(value))
+
+#define load_sp_uint(var) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned)sp_address_of(var))
+
+#define load_sp_array_uint8(array_name, index) \
+ sp_dmem_load_uint8(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint8_t))
+
+#define load_sp_array_uint16(array_name, index) \
+ sp_dmem_load_uint16(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint16_t))
+
+#define load_sp_array_uint(array_name, index) \
+ sp_dmem_load_uint32(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint32_t))
+
+#define store_sp_var(var, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned)sp_address_of(var), data, bytes)
+
+#define store_sp_array_uint8(array_name, index, value) \
+ sp_dmem_store_uint8(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint8_t), value)
+
+#define store_sp_array_uint16(array_name, index, value) \
+ sp_dmem_store_uint16(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint16_t), value)
+
+#define store_sp_array_uint(array_name, index, value) \
+ sp_dmem_store_uint32(SP0_ID, (unsigned)sp_address_of(array_name) + \
+ (index)*sizeof(uint32_t), value)
+
+#define store_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_store(SP0_ID, (unsigned)sp_address_of(var) + \
+ offset, data, bytes)
+
+#define load_sp_var(var, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned)sp_address_of(var), data, bytes)
+
+#define load_sp_var_with_offset(var, offset, data, bytes) \
+ sp_dmem_load(SP0_ID, (unsigned)sp_address_of(var) + offset, \
+ data, bytes)
+
+#endif /* __SP_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
new file mode 100644
index 000000000000..e6283bf67ad3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
@@ -0,0 +1,163 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_PRIVATE_H_INCLUDED__
+#define __SP_PRIVATE_H_INCLUDED__
+
+#include "sp_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_SP_C void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value)
+{
+assert(ID < N_SP_ID);
+assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+return;
+}
+
+STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg)
+{
+assert(ID < N_SP_ID);
+assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+}
+
+STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data val = sp_ctrl_load(ID, reg);
+return (val & (1UL << bit)) != 0;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+ sp_ctrl_store(ID, reg, (data | (1UL << bit)));
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit)
+{
+ hrt_data data = sp_ctrl_load(ID, reg);
+ sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
+return;
+}
+
+STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
+return;
+}
+
+STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr)
+{
+assert(ID < N_SP_ID);
+assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ (void)ID;
+ return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
+}
+
+#endif /* __SP_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h
new file mode 100644
index 000000000000..111b346dfafb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/system_local.h
@@ -0,0 +1,306 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
+/* This interface is deprecated */
+/*#include "hive_isp_css_custom_host_hrt.h"*/
+#endif
+
+#include "system_global.h"
+
+#ifdef __FIST__
+#define HRT_ADDRESS_WIDTH 32 /* Surprise, this is a local property and even differs per platform */
+#else
+/* HRT assumes 32 by default (see Linux/include/hrt/hive_types.h), overrule it in case it is different */
+#undef HRT_ADDRESS_WIDTH
+#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
+#endif
+
+#if !defined(__KERNEL__) || (1==1)
+/* This interface is deprecated */
+#include "hrt/hive_types.h"
+#else /* __KERNEL__ */
+#include <linux/types.h>
+
+#if HRT_ADDRESS_WIDTH==64
+typedef uint64_t hrt_address;
+#elif HRT_ADDRESS_WIDTH==32
+typedef uint32_t hrt_address;
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+typedef uint32_t hrt_vaddress;
+typedef uint32_t hrt_data;
+#endif /* __KERNEL__ */
+
+/*
+ * Cell specific address maps
+ */
+#if HRT_ADDRESS_WIDTH==64
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* DDR */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ (hrt_address)0x0000000120000000ULL};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ (hrt_address)0x0000000000020000ULL};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ (hrt_address)0x0000000000200000ULL};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ (hrt_address)0x0000000000100000ULL};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ (hrt_address)0x00000000001C0000ULL,
+ (hrt_address)0x00000000001D0000ULL,
+ (hrt_address)0x00000000001E0000ULL};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ (hrt_address)0x00000000001F0000ULL};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ (hrt_address)0x0000000000010000ULL};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x0000000000300000ULL};
+
+static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x00000000000B0000ULL};
+
+/* MMU */
+#if defined (IS_ISP_2400_MAMOIADA_SYSTEM) || defined (IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ (hrt_address)0x0000000000070000ULL,
+ (hrt_address)0x00000000000A0000ULL};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ (hrt_address)0x0000000000040000ULL};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ (hrt_address)0x0000000000000500ULL,
+ (hrt_address)0x0000000000030A00ULL,
+ (hrt_address)0x000000000008C000ULL,
+ (hrt_address)0x0000000000090200ULL};
+/*
+ (hrt_address)0x0000000000000500ULL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ (hrt_address)0x0000000000050000ULL,
+ (hrt_address)0x0000000000060000ULL};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ (hrt_address)0x0000000000000000ULL};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ (hrt_address)0x0000000000000000ULL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x0000000000000000ULL};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ (hrt_address)0x0000000000000400ULL};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ (hrt_address)0x0000000000000100ULL};
+
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ (hrt_address)0x0000000000030000ULL,
+ (hrt_address)0x0000000000030200ULL,
+ (hrt_address)0x0000000000030400ULL,
+ (hrt_address)0x0000000000030600ULL}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ (hrt_address)0x0000000000080000ULL};
+/* (hrt_address)0x0000000000081000ULL, */ /* capture A */
+/* (hrt_address)0x0000000000082000ULL, */ /* capture B */
+/* (hrt_address)0x0000000000083000ULL, */ /* capture C */
+/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */
+/* (hrt_address)0x0000000000085000ULL, */ /* DMA */
+/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */
+/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */
+/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */
+/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ (hrt_address)0x0000000000080100ULL};
+
+#elif HRT_ADDRESS_WIDTH==32
+
+#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
+
+/* DDR : Attention, this value not defined in 32-bit */
+static const hrt_address DDR_BASE[N_DDR_ID] = {
+ (hrt_address)0x00000000UL};
+
+/* ISP */
+static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ (hrt_address)0x00020000UL};
+
+static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ (hrt_address)0x00200000UL};
+
+static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ (hrt_address)0x100000UL};
+
+static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
+ (hrt_address)0xffffffffUL,
+ (hrt_address)0xffffffffUL,
+ (hrt_address)0xffffffffUL};
+
+static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
+ (hrt_address)0xffffffffUL};
+
+/* SP */
+static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ (hrt_address)0x00010000UL};
+
+static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x00300000UL};
+
+static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
+ (hrt_address)0x000B0000UL};
+
+/* MMU */
+#if defined (IS_ISP_2400_MAMOIADA_SYSTEM) || defined (IS_ISP_2401_MAMOIADA_SYSTEM)
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+static const hrt_address MMU_BASE[N_MMU_ID] = {
+ (hrt_address)0x00070000UL,
+ (hrt_address)0x000A0000UL};
+#else
+#error "system_local.h: SYSTEM must be one of {2400, 2401 }"
+#endif
+
+/* DMA */
+static const hrt_address DMA_BASE[N_DMA_ID] = {
+ (hrt_address)0x00040000UL};
+
+/* IRQ */
+static const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ (hrt_address)0x00000500UL,
+ (hrt_address)0x00030A00UL,
+ (hrt_address)0x0008C000UL,
+ (hrt_address)0x00090200UL};
+/*
+ (hrt_address)0x00000500UL};
+ */
+
+/* GDC */
+static const hrt_address GDC_BASE[N_GDC_ID] = {
+ (hrt_address)0x00050000UL,
+ (hrt_address)0x00060000UL};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ (hrt_address)0x00000000UL};
+
+/*
+static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ (hrt_address)0x00000000UL};
+
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x00090000UL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ (hrt_address)0x00000000UL};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+static const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x00000600UL;
+
+/* GPIO */
+static const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ (hrt_address)0x00000400UL};
+
+/* TIMED_CTRL */
+static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ (hrt_address)0x00000100UL};
+
+
+/* INPUT_FORMATTER */
+static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ (hrt_address)0x00030000UL,
+ (hrt_address)0x00030200UL,
+ (hrt_address)0x00030400UL};
+/* (hrt_address)0x00030600UL, */ /* memcpy() */
+
+/* INPUT_SYSTEM */
+static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ (hrt_address)0x00080000UL};
+/* (hrt_address)0x00081000UL, */ /* capture A */
+/* (hrt_address)0x00082000UL, */ /* capture B */
+/* (hrt_address)0x00083000UL, */ /* capture C */
+/* (hrt_address)0x00084000UL, */ /* Acquisition */
+/* (hrt_address)0x00085000UL, */ /* DMA */
+/* (hrt_address)0x00089000UL, */ /* ctrl */
+/* (hrt_address)0x0008A000UL, */ /* GP regs */
+/* (hrt_address)0x0008B000UL, */ /* FIFO */
+/* (hrt_address)0x0008C000UL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+static const hrt_address RX_BASE[N_RX_ID] = {
+ (hrt_address)0x00080100UL};
+
+#else
+#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
+#endif
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c
new file mode 100644
index 000000000000..cd12d74024f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "timed_ctrl.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#include "assert_support.h"
+
+void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value)
+{
+ OP___assert(ID == TIMED_CTRL0_ID);
+ OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1);
+
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, mask);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, condition);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, counter);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, (hrt_data)addr);
+ timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, value);
+}
+
+/* pqiao TODO: make sure the following commands get
+ correct BASE address both for csim and android */
+
+void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(SP_ID < N_SP_ID);
+ OP___assert(SP_DMEM_BASE[SP_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ SP_DMEM_BASE[SP_ID]+offset, value);
+}
+
+void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value)
+{
+ OP___assert(GPIO_ID < N_GPIO_ID);
+ OP___assert(GPIO_BASE[GPIO_ID] != (hrt_address)-1);
+
+ timed_ctrl_snd_commnd(ID, mask, condition, counter,
+ GPIO_BASE[GPIO_ID]+offset, value);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h
new file mode 100644
index 000000000000..e570813af28d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_LOCAL_H_INCLUDED__
+#define __TIMED_CTRL_LOCAL_H_INCLUDED__
+
+#include "timed_ctrl_global.h"
+
+#endif /* __TIMED_CTRL_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h
new file mode 100644
index 000000000000..fb0fdbb88435
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/timed_ctrl_private.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_PRIVATE_H_INCLUDED__
+#define __TIMED_CTRL_PRIVATE_H_INCLUDED__
+
+#include "timed_ctrl_public.h"
+
+#include "device_access.h"
+
+#include "assert_support.h"
+
+STORAGE_CLASS_TIMED_CTRL_C void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value)
+{
+OP___assert(ID < N_TIMED_CTRL_ID);
+OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1);
+ ia_css_device_store_uint32(TIMED_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
+}
+
+#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h
new file mode 100644
index 000000000000..c4e99afe0d29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_LOCAL_H_INCLUDED__
+#define __VAMEM_LOCAL_H_INCLUDED__
+
+#include "vamem_global.h"
+
+#endif /* __VAMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h
new file mode 100644
index 000000000000..5e05258673d5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vamem_private.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_PRIVATE_H_INCLUDED__
+#define __VAMEM_PRIVATE_H_INCLUDED__
+
+#include "vamem_public.h"
+
+#include <hrt/api.h>
+
+#include "assert_support.h"
+
+
+STORAGE_CLASS_ISP_C void isp_vamem_store(
+ const vamem_ID_t ID,
+ vamem_data_t *addr,
+ const vamem_data_t *data,
+ const size_t size) /* in vamem_data_t */
+{
+ assert(ID < N_VAMEM_ID);
+ assert(ISP_VAMEM_BASE[ID] != (hrt_address)-1);
+ hrt_master_port_store(ISP_VAMEM_BASE[ID] + (unsigned)addr, data, size * sizeof(vamem_data_t));
+}
+
+
+#endif /* __VAMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c
new file mode 100644
index 000000000000..ea22c23fc7a4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem.c
@@ -0,0 +1,258 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010 - 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "isp.h"
+#include "vmem.h"
+#include "vmem_local.h"
+
+#if !defined(HRT_MEMORY_ACCESS)
+#include "ia_css_device_access.h"
+#endif
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+typedef unsigned long long hive_uedge;
+typedef hive_uedge *hive_wide;
+
+/* Copied from SDK: sim_semantics.c */
+
+/* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */
+#define SUBWORD(w, start, end) (((w) & (((1ULL << ((end)-1))-1) << 1 | 1)) >> (start))
+
+/* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */
+#define INV_SUBWORD(w, start, end) ((w) & (~(((1ULL << ((end)-1))-1) << 1 | 1) | ((1ULL << (start))-1)) )
+
+#define uedge_bits (8*sizeof(hive_uedge))
+#define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit)
+#define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits)
+#define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits)
+
+static void
+move_subword (
+ hive_uedge *target,
+ unsigned target_bit,
+ hive_uedge src,
+ unsigned src_start,
+ unsigned src_end)
+{
+ unsigned int start_elem = target_bit / uedge_bits;
+ unsigned int start_bit = target_bit % uedge_bits;
+ unsigned subword_width = src_end - src_start;
+
+ hive_uedge src_subword = SUBWORD(src, src_start, src_end);
+
+ if (subword_width + start_bit > uedge_bits) { /* overlap */
+ hive_uedge old_val1;
+ hive_uedge old_val0 = INV_SUBWORD(target[start_elem], start_bit, uedge_bits);
+ target[start_elem] = old_val0 | (src_subword << start_bit);
+ old_val1 = INV_SUBWORD(target[start_elem+1], 0, subword_width + start_bit - uedge_bits);
+ target[start_elem+1] = old_val1 | (src_subword >> ( uedge_bits - start_bit));
+ } else {
+ hive_uedge old_val = INV_SUBWORD(target[start_elem], start_bit, start_bit + subword_width);
+ target[start_elem] = old_val | (src_subword << start_bit);
+ }
+}
+
+static void
+hive_sim_wide_unpack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+ unsigned int start_bit = (elem_bits * index) % uedge_bits;
+ unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits;
+ unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1;
+
+ if (elem_bits == uedge_bits) {
+ /* easy case for speedup: */
+ elem[0] = vector[index];
+ } else if (start_elem == end_elem) {
+ /* only one (<=64 bits) element needs to be (partly) copied: */
+ move_subword(elem, 0, vector[start_elem], start_bit, end_bit);
+ } else {
+ /* general case: handles edge spanning cases (includes >64bit elements) */
+ unsigned int bits_written = 0;
+ unsigned int i;
+ move_upper_bits(elem, bits_written, vector[start_elem], start_bit);
+ bits_written += (64 - start_bit);
+ for(i = start_elem+1; i < end_elem; i++) {
+ move_word(elem, bits_written, vector[i]);
+ bits_written += uedge_bits;
+ }
+ move_lower_bits(elem, bits_written , vector[end_elem], end_bit);
+ }
+}
+
+static void
+hive_sim_wide_pack(
+ hive_wide vector,
+ hive_wide elem,
+ hive_uint elem_bits,
+ hive_uint index)
+{
+ /* pointers into wide_type: */
+ unsigned int start_elem = (elem_bits * index) / uedge_bits;
+
+ /* easy case for speedup: */
+ if (elem_bits == uedge_bits) {
+ vector[start_elem] = elem[0];
+ } else if (elem_bits > uedge_bits) {
+ unsigned bits_to_write = elem_bits;
+ unsigned start_bit = elem_bits * index;
+ unsigned i = 0;
+ for(; bits_to_write > uedge_bits; bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) {
+ move_word(vector, start_bit, elem[i]);
+ }
+ move_lower_bits(vector, start_bit, elem[i], bits_to_write);
+ } else {
+ /* only one element needs to be (partly) copied: */
+ move_lower_bits(vector, elem_bits * index, elem[0], elem_bits);
+ }
+}
+
+static void load_vector (
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned i;
+ hive_uedge *data;
+ unsigned size = sizeof(short)*ISP_NWAY;
+ VMEM_ARRAY(v, 2*ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#else
+ hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size);
+#endif
+ data = (hive_uedge *)v;
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_uedge elem = 0;
+ hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i);
+ to[i] = elem;
+ }
+ hrt_sleep(); /* Spend at least 1 cycles per vector */
+}
+
+static void store_vector (
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from)
+{
+ unsigned i;
+ unsigned size = sizeof(short)*ISP_NWAY;
+ VMEM_ARRAY(v, 2*ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */
+ //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */
+ hive_uedge *data = (hive_uedge *)v;
+ for (i = 0; i < ISP_NWAY; i++) {
+ hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i);
+ }
+ assert(ISP_BAMEM_BASE[ID] != (hrt_address)-1);
+#if !defined(HRT_MEMORY_ACCESS)
+ ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#else
+ //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */
+ hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size);
+#endif
+ hrt_sleep(); /* Spend at least 1 cycles per vector */
+}
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned elems) /* In t_vmem_elem */
+{
+ unsigned c;
+ const t_vmem_elem *vp = from;
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ load_vector(ID, &to[c], vp);
+ vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned elems) /* In t_vmem_elem */
+{
+ unsigned c;
+ t_vmem_elem *vp = to;
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(elems % ISP_NWAY == 0);
+ for (c = 0; c < elems; c += ISP_NWAY) {
+ store_vector (ID, vp, &from[c]);
+ vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_load (
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned height,
+ unsigned width,
+ unsigned stride_to, /* In t_vmem_elem */
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)from % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_from % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned c;
+ const t_vmem_elem *vp = from;
+ for (c = 0; c < width; c += ISP_NWAY) {
+ load_vector(ID, &to[stride_to*h + c], vp);
+ vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN);
+ }
+ from = (const t_vmem_elem *)((const char *)from + stride_from/ISP_NWAY*ISP_VEC_ALIGN);
+ }
+}
+
+void isp_vmem_2d_store (
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned height,
+ unsigned width,
+ unsigned stride_to, /* In t_vmem_elem */
+ unsigned stride_from /* In t_vmem_elem */)
+{
+ unsigned h;
+
+ assert(ID < N_ISP_ID);
+ assert((unsigned long)to % ISP_VEC_ALIGN == 0);
+ assert(width % ISP_NWAY == 0);
+ assert(stride_to % ISP_NWAY == 0);
+ for (h = 0; h < height; h++) {
+ unsigned c;
+ t_vmem_elem *vp = to;
+ for (c = 0; c < width; c += ISP_NWAY) {
+ store_vector (ID, vp, &from[stride_from*h + c]);
+ vp = (t_vmem_elem *)((char*)vp + ISP_VEC_ALIGN);
+ }
+ to = (t_vmem_elem *)((char *)to + stride_to/ISP_NWAY*ISP_VEC_ALIGN);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h
new file mode 100644
index 000000000000..de85644b885e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_local.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_LOCAL_H_INCLUDED__
+#define __VMEM_LOCAL_H_INCLUDED__
+
+#include "type_support.h"
+#include "vmem_global.h"
+
+typedef uint16_t t_vmem_elem;
+
+#define VMEM_ARRAY(x,s) t_vmem_elem x[s/ISP_NWAY][ISP_NWAY]
+
+void isp_vmem_load(
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned elems); /* In t_vmem_elem */
+
+void isp_vmem_store(
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned elems); /* In t_vmem_elem */
+
+void isp_vmem_2d_load (
+ const isp_ID_t ID,
+ const t_vmem_elem *from,
+ t_vmem_elem *to,
+ unsigned height,
+ unsigned width,
+ unsigned stride_to, /* In t_vmem_elem */
+ unsigned stride_from /* In t_vmem_elem */);
+
+void isp_vmem_2d_store (
+ const isp_ID_t ID,
+ t_vmem_elem *to,
+ const t_vmem_elem *from,
+ unsigned height,
+ unsigned width,
+ unsigned stride_to, /* In t_vmem_elem */
+ unsigned stride_from /* In t_vmem_elem */);
+
+#endif /* __VMEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h
new file mode 100644
index 000000000000..f48d1281b5a7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/vmem_private.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_PRIVATE_H_INCLUDED__
+#define __VMEM_PRIVATE_H_INCLUDED__
+
+#include "vmem_public.h"
+
+#endif /* __VMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h
new file mode 100644
index 000000000000..5654d911db65
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_formatter_global.h
@@ -0,0 +1,130 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+#define __INPUT_FORMATTER_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_FORMATTER_VERSION2
+#define IS_INPUT_SWITCH_VERSION2
+
+#include <type_support.h>
+#include <system_types.h>
+#include "if_defs.h"
+#include "str2mem_defs.h"
+#include "input_switch_2400_defs.h"
+
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_REG_LSB(ch_id) ((ch_id) * 3)
+
+#define HIVE_SWITCH_N_CHANNELS 4
+#define HIVE_SWITCH_N_FORMATTYPES 32
+#define HIVE_SWITCH_N_SWITCH_CODE 4
+#define HIVE_SWITCH_M_CHANNELS 0x00000003
+#define HIVE_SWITCH_M_FORMATTYPES 0x0000001f
+#define HIVE_SWITCH_M_SWITCH_CODE 0x00000003
+#define HIVE_SWITCH_M_FSYNC 0x00000007
+
+#define HIVE_SWITCH_ENCODE_FSYNC(x) \
+ (1U<<(((x)-1)&HIVE_SWITCH_M_CHANNELS))
+
+#define _HIVE_INPUT_SWITCH_GET_LUT_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_SWITCH_CODE)
+#define _HIVE_INPUT_SWITCH_SET_LUT_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_SWITCH_CODE<<(bit_index))) | (((hrt_data)(val)&HIVE_SWITCH_M_SWITCH_CODE)<<(bit_index)))
+#define _HIVE_INPUT_SWITCH_GET_FSYNC_FIELD(reg, bit_index) \
+ (((reg) >> (bit_index)) & HIVE_SWITCH_M_FSYNC)
+#define _HIVE_INPUT_SWITCH_SET_FSYNC_FIELD(reg, bit_index, val) \
+ (((reg) & ~(HIVE_SWITCH_M_FSYNC<<(bit_index))) | (((hrt_data)(val)&HIVE_SWITCH_M_FSYNC)<<(bit_index)))
+
+typedef struct input_formatter_cfg_s input_formatter_cfg_t;
+
+/* Hardware registers */
+/*#define HIVE_IF_RESET_ADDRESS 0x000*/ /* deprecated */
+#define HIVE_IF_START_LINE_ADDRESS 0x004
+#define HIVE_IF_START_COLUMN_ADDRESS 0x008
+#define HIVE_IF_CROPPED_HEIGHT_ADDRESS 0x00C
+#define HIVE_IF_CROPPED_WIDTH_ADDRESS 0x010
+#define HIVE_IF_VERTICAL_DECIMATION_ADDRESS 0x014
+#define HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS 0x018
+#define HIVE_IF_H_DEINTERLEAVING_ADDRESS 0x01C
+#define HIVE_IF_LEFTPADDING_WIDTH_ADDRESS 0x020
+#define HIVE_IF_END_OF_LINE_OFFSET_ADDRESS 0x024
+#define HIVE_IF_VMEM_START_ADDRESS_ADDRESS 0x028
+#define HIVE_IF_VMEM_END_ADDRESS_ADDRESS 0x02C
+#define HIVE_IF_VMEM_INCREMENT_ADDRESS 0x030
+#define HIVE_IF_YUV_420_FORMAT_ADDRESS 0x034
+#define HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS 0x038
+#define HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS 0x03C
+#define HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS 0x040
+#define HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS 0x044
+#define HIVE_IF_V_DEINTERLEAVING_ADDRESS 0x048
+#define HIVE_IF_FSM_CROP_PIXEL_COUNTER 0x110
+#define HIVE_IF_FSM_CROP_LINE_COUNTER 0x10C
+#define HIVE_IF_FSM_CROP_STATUS 0x108
+
+/* Registers only for simulation */
+#define HIVE_IF_CRUN_MODE_ADDRESS 0x04C
+#define HIVE_IF_DUMP_OUTPUT_ADDRESS 0x050
+
+/* Follow the DMA syntax, "cmd" last */
+#define IF_PACK(val, cmd) ((val & 0x0fff) | (cmd /*& 0xf000*/))
+
+#define HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS (_STR2MEM_SOFT_RESET_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_INPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS (_STR2MEM_OUTPUT_ENDIANNESS_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS (_STR2MEM_BIT_SWAPPING_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_BLOCK_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS (_STR2MEM_PACKET_SYNC_LEVEL_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS (_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS (_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ID * _STR2MEM_REG_ALIGN)
+#define HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS (_STR2MEM_EN_STAT_UPDATE_ID * _STR2MEM_REG_ALIGN)
+
+/*
+ * This data structure is shared between host and SP
+ */
+struct input_formatter_cfg_s {
+ uint32_t start_line;
+ uint32_t start_column;
+ uint32_t left_padding;
+ uint32_t cropped_height;
+ uint32_t cropped_width;
+ uint32_t deinterleaving;
+ uint32_t buf_vecs;
+ uint32_t buf_start_index;
+ uint32_t buf_increment;
+ uint32_t buf_eol_offset;
+ uint32_t is_yuv420_format;
+ uint32_t block_no_reqs;
+};
+
+#define DEFAULT_IF_CONFIG \
+{ \
+ 0, /* start_line */\
+ 0, /* start_column */\
+ 0, /* left_padding */\
+ 0, /* cropped_height */\
+ 0, /* cropped_width */\
+ 0, /* deinterleaving */\
+ 0, /*.buf_vecs */\
+ 0, /* buf_start_index */\
+ 0, /* buf_increment */\
+ 0, /* buf_eol_offset */\
+ false, /* is_yuv420_format */\
+ false /* block_no_reqs */\
+}
+
+extern const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID];
+extern const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID];
+extern const uint8_t HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID];
+
+#endif /* __INPUT_FORMATTER_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h
new file mode 100644
index 000000000000..9ba36525e8d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/input_system_global.h
@@ -0,0 +1,155 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+#define __INPUT_SYSTEM_GLOBAL_H_INCLUDED__
+
+#define IS_INPUT_SYSTEM_VERSION_2
+
+#include <type_support.h>
+
+//CSI reveiver has 3 ports.
+#define N_CSI_PORTS (3)
+//AM: Use previous define for this.
+
+//MIPI allows upto 4 channels.
+#define N_CHANNELS (4)
+// 12KB = 256bit x 384 words
+#define IB_CAPACITY_IN_WORDS (384)
+
+typedef enum {
+ MIPI_0LANE_CFG = 0,
+ MIPI_1LANE_CFG = 1,
+ MIPI_2LANE_CFG = 2,
+ MIPI_3LANE_CFG = 3,
+ MIPI_4LANE_CFG = 4
+} mipi_lane_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_SOURCE_SENSOR = 0,
+ INPUT_SYSTEM_SOURCE_FIFO,
+ INPUT_SYSTEM_SOURCE_TPG,
+ INPUT_SYSTEM_SOURCE_PRBS,
+ INPUT_SYSTEM_SOURCE_MEMORY,
+ N_INPUT_SYSTEM_SOURCE
+} input_system_source_t;
+
+/* internal routing configuration */
+typedef enum {
+ INPUT_SYSTEM_DISCARD_ALL = 0,
+ INPUT_SYSTEM_CSI_BACKEND = 1,
+ INPUT_SYSTEM_INPUT_BUFFER = 2,
+ INPUT_SYSTEM_MULTICAST = 3,
+ N_INPUT_SYSTEM_CONNECTION
+} input_system_connection_t;
+
+typedef enum {
+ INPUT_SYSTEM_MIPI_PORT0,
+ INPUT_SYSTEM_MIPI_PORT1,
+ INPUT_SYSTEM_MIPI_PORT2,
+ INPUT_SYSTEM_ACQUISITION_UNIT,
+ N_INPUT_SYSTEM_MULTIPLEX
+} input_system_multiplex_t;
+
+typedef enum {
+ INPUT_SYSTEM_SINK_MEMORY = 0,
+ INPUT_SYSTEM_SINK_ISP,
+ INPUT_SYSTEM_SINK_SP,
+ N_INPUT_SYSTEM_SINK
+} input_system_sink_t;
+
+typedef enum {
+ INPUT_SYSTEM_FIFO_CAPTURE = 0,
+ INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING,
+ INPUT_SYSTEM_SRAM_BUFFERING,
+ INPUT_SYSTEM_XMEM_BUFFERING,
+ INPUT_SYSTEM_XMEM_CAPTURE,
+ INPUT_SYSTEM_XMEM_ACQUIRE,
+ N_INPUT_SYSTEM_BUFFERING_MODE
+} buffering_mode_t;
+
+typedef struct input_system_cfg_s input_system_cfg_t;
+typedef struct sync_generator_cfg_s sync_generator_cfg_t;
+typedef struct tpg_cfg_s tpg_cfg_t;
+typedef struct prbs_cfg_s prbs_cfg_t;
+
+/* MW: uint16_t should be sufficient */
+struct input_system_cfg_s {
+ uint32_t no_side_band;
+ uint32_t fmt_type;
+ uint32_t ch_id;
+ uint32_t input_mode;
+};
+
+struct sync_generator_cfg_s {
+ uint32_t width;
+ uint32_t height;
+ uint32_t hblank_cycles;
+ uint32_t vblank_cycles;
+};
+
+/* MW: tpg & prbs are exclusive */
+struct tpg_cfg_s {
+ uint32_t x_mask;
+ uint32_t y_mask;
+ uint32_t x_delta;
+ uint32_t y_delta;
+ uint32_t xy_mask;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct prbs_cfg_s {
+ uint32_t seed;
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+struct gpfifo_cfg_s {
+// TBD.
+ sync_generator_cfg_t sync_gen_cfg;
+};
+
+typedef struct gpfifo_cfg_s gpfifo_cfg_t;
+
+//ALX:Commented out to pass the compilation.
+//typedef struct input_system_cfg_s input_system_cfg_t;
+
+struct ib_buffer_s {
+ uint32_t mem_reg_size;
+ uint32_t nof_mem_regs;
+ uint32_t mem_reg_addr;
+};
+
+typedef struct ib_buffer_s ib_buffer_t;
+
+struct csi_cfg_s {
+ uint32_t csi_port;
+ buffering_mode_t buffering_mode;
+ ib_buffer_t csi_buffer;
+ ib_buffer_t acquisition_buffer;
+ uint32_t nof_xmem_buffers;
+};
+
+typedef struct csi_cfg_s csi_cfg_t;
+
+typedef enum {
+ INPUT_SYSTEM_CFG_FLAG_RESET = 0,
+ INPUT_SYSTEM_CFG_FLAG_SET = 1U << 0,
+ INPUT_SYSTEM_CFG_FLAG_BLOCKED = 1U << 1,
+ INPUT_SYSTEM_CFG_FLAG_REQUIRED = 1U << 2,
+ INPUT_SYSTEM_CFG_FLAG_CONFLICT = 1U << 3 // To mark a conflicting configuration.
+} input_system_cfg_flag_t;
+
+typedef uint32_t input_system_config_flags_t;
+
+#endif /* __INPUT_SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h
new file mode 100644
index 000000000000..64554d80dc0b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/irq_global.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_GLOBAL_H_INCLUDED__
+#define __IRQ_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#define IS_IRQ_VERSION_2
+#define IS_IRQ_MAP_VERSION_2
+
+/* We cannot include the (hrt host ID) file defining the "CSS_RECEIVER" property without side effects */
+#ifndef HAS_NO_RX
+#if defined(IS_ISP_2400_MAMOIADA_SYSTEM)
+/*#define CSS_RECEIVER testbench_isp_inp_sys_csi_receiver*/
+#include "hive_isp_css_irq_types_hrt.h" /* enum hrt_isp_css_irq */
+#elif defined(IS_ISP_2401_MAMOIADA_SYSTEM)
+/*#define CSS_RECEIVER testbench_isp_is_2400_inp_sys_csi_receiver*/
+#include "hive_isp_css_2401_irq_types_hrt.h" /* enum hrt_isp_css_irq */
+#else
+#error "irq_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+#endif
+
+/* The IRQ is not mapped uniformly on its related interfaces */
+#define IRQ_SW_CHANNEL_OFFSET hrt_isp_css_irq_sw_pin_0
+
+typedef enum {
+ IRQ_SW_CHANNEL0_ID = hrt_isp_css_irq_sw_pin_0 - IRQ_SW_CHANNEL_OFFSET,
+ IRQ_SW_CHANNEL1_ID = hrt_isp_css_irq_sw_pin_1 - IRQ_SW_CHANNEL_OFFSET,
+ N_IRQ_SW_CHANNEL_ID
+} irq_sw_channel_id_t;
+
+#endif /* __IRQ_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h
new file mode 100644
index 000000000000..14d574849a5b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/isp_global.h
@@ -0,0 +1,115 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_GLOBAL_H_INCLUDED__
+#define __ISP_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#if defined (HAS_ISP_2401_MAMOIADA)
+#define IS_ISP_2401_MAMOIADA
+
+#include "isp2401_mamoiada_params.h"
+#elif defined (HAS_ISP_2400_MAMOIADA)
+#define IS_ISP_2400_MAMOIADA
+
+#include "isp2400_mamoiada_params.h"
+#else
+#error "isp_global_h: ISP_2400_MAMOIDA must be one of {2400, 2401 }"
+#endif
+
+#define ISP_PMEM_WIDTH_LOG2 ISP_LOG2_PMEM_WIDTH
+#define ISP_PMEM_SIZE ISP_PMEM_DEPTH
+
+#define ISP_NWAY_LOG2 6
+#define ISP_VEC_NELEMS_LOG2 ISP_NWAY_LOG2
+
+#ifdef ISP2401
+#ifdef PIPE_GENERATION
+#define PIPEMEM(x) MEM(x)
+#define ISP_NWAY (1<<ISP_NWAY_LOG2)
+#else
+#define PIPEMEM(x)
+#endif
+
+#endif
+/* The number of data bytes in a vector disregarding the reduced precision */
+#define ISP_VEC_BYTES (ISP_VEC_NELEMS*sizeof(uint16_t))
+
+/* ISP SC Registers */
+#define ISP_SC_REG 0x00
+#define ISP_PC_REG 0x07
+#define ISP_IRQ_READY_REG 0x00
+#define ISP_IRQ_CLEAR_REG 0x00
+
+/* ISP SC Register bits */
+#define ISP_RST_BIT 0x00
+#define ISP_START_BIT 0x01
+#define ISP_BREAK_BIT 0x02
+#define ISP_RUN_BIT 0x03
+#define ISP_BROKEN_BIT 0x04
+#define ISP_IDLE_BIT 0x05 /* READY */
+#define ISP_SLEEPING_BIT 0x06
+#define ISP_STALLING_BIT 0x07
+#define ISP_IRQ_CLEAR_BIT 0x08
+#define ISP_IRQ_READY_BIT 0x0A
+#define ISP_IRQ_SLEEPING_BIT 0x0B
+
+/* ISP Register bits */
+#define ISP_CTRL_SINK_BIT 0x00
+#define ISP_PMEM_SINK_BIT 0x01
+#define ISP_DMEM_SINK_BIT 0x02
+#define ISP_FIFO0_SINK_BIT 0x03
+#define ISP_FIFO1_SINK_BIT 0x04
+#define ISP_FIFO2_SINK_BIT 0x05
+#define ISP_FIFO3_SINK_BIT 0x06
+#define ISP_FIFO4_SINK_BIT 0x07
+#define ISP_FIFO5_SINK_BIT 0x08
+#define ISP_FIFO6_SINK_BIT 0x09
+#define ISP_VMEM_SINK_BIT 0x0A
+#define ISP_VAMEM1_SINK_BIT 0x0B
+#define ISP_VAMEM2_SINK_BIT 0x0C
+#define ISP_VAMEM3_SINK_BIT 0x0D
+#define ISP_HMEM_SINK_BIT 0x0E
+
+#define ISP_CTRL_SINK_REG 0x08
+#define ISP_PMEM_SINK_REG 0x08
+#define ISP_DMEM_SINK_REG 0x08
+#define ISP_FIFO0_SINK_REG 0x08
+#define ISP_FIFO1_SINK_REG 0x08
+#define ISP_FIFO2_SINK_REG 0x08
+#define ISP_FIFO3_SINK_REG 0x08
+#define ISP_FIFO4_SINK_REG 0x08
+#define ISP_FIFO5_SINK_REG 0x08
+#define ISP_FIFO6_SINK_REG 0x08
+#define ISP_VMEM_SINK_REG 0x08
+#define ISP_VAMEM1_SINK_REG 0x08
+#define ISP_VAMEM2_SINK_REG 0x08
+#define ISP_VAMEM3_SINK_REG 0x08
+#define ISP_HMEM_SINK_REG 0x08
+
+#ifdef ISP2401
+#define ISP_BAMEM_ALIGN_ELEM ISP_VMEM_ALIGN_ELEM
+#define BAMEM VMEM
+
+#define XNR3_DOWN_BAMEM_BASE_ADDRESS (0x16880)
+#define XNR3_UP_BAMEM_BASE_ADDRESS (0x12880)
+
+#define bmem_ldrow(fu, pid, offset, data) bmem_ldrow_s(fu, pid, offset, data)
+#define bmem_strow(fu, pid, offset, data) bmem_strow_s(fu, pid, offset, data)
+#define bmem_ldblk(fu, pid, offset, data) bmem_ldblk_s(fu, pid, offset, data)
+#define bmem_stblk(fu, pid, offset, data) bmem_stblk_s(fu, pid, offset, data)
+
+#endif
+#endif /* __ISP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/mmu_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/mmu_global.h
new file mode 100644
index 000000000000..83ca418c8ff2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/mmu_global.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_GLOBAL_H_INCLUDED__
+#define __MMU_GLOBAL_H_INCLUDED__
+
+#define IS_MMU_VERSION_2
+
+#include <mmu_defs.h>
+
+#endif /* __MMU_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/resource_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/resource_global.h
new file mode 100644
index 000000000000..01c915c033a9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/resource_global.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __RESOURCE_GLOBAL_H_INCLUDED__
+#define __RESOURCE_GLOBAL_H_INCLUDED__
+
+#define IS_RESOURCE_VERSION_1
+
+typedef enum {
+ DMA_CHANNEL_RESOURCE_TYPE,
+ IRQ_CHANNEL_RESOURCE_TYPE,
+ MEM_SECTION_RESOURCE_TYPE,
+ N_RESOURCE_TYPE
+} resource_type_ID_t;
+
+typedef enum {
+ PERMANENT_RESOURCE_RESERVATION,
+ PERSISTENT_RESOURCE_RESERVATION,
+ DEDICTATED_RESOURCE_RESERVATION,
+ SHARED_RESOURCE_RESERVATION,
+ N_RESOURCE_RESERVATION
+} resource_reservation_t;
+
+#endif /* __RESOURCE_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h
new file mode 100644
index 000000000000..6ec4e590e3b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/sp_global.h
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_GLOBAL_H_INCLUDED__
+#define __SP_GLOBAL_H_INCLUDED__
+
+#include <system_types.h>
+
+#if defined(HAS_SP_2401)
+#define IS_SP_2401
+/* 2401 uses 2400 */
+#include <scalar_processor_2400_params.h>
+#elif defined(HAS_SP_2400)
+#define IS_SP_2400
+
+#include <scalar_processor_2400_params.h>
+#else
+#error "sp_global.h: SP_2400 must be one of {2400, 2401 }"
+#endif
+
+#define SP_PMEM_WIDTH_LOG2 SP_PMEM_LOG_WIDTH_BITS
+#define SP_PMEM_SIZE SP_PMEM_DEPTH
+
+#define SP_DMEM_SIZE 0x4000
+
+/* SP Registers */
+#define SP_PC_REG 0x09
+#define SP_SC_REG 0x00
+#define SP_START_ADDR_REG 0x01
+#define SP_ICACHE_ADDR_REG 0x05
+#define SP_IRQ_READY_REG 0x00
+#define SP_IRQ_CLEAR_REG 0x00
+#define SP_ICACHE_INV_REG 0x00
+#define SP_CTRL_SINK_REG 0x0A
+
+/* SP Register bits */
+#define SP_RST_BIT 0x00
+#define SP_START_BIT 0x01
+#define SP_BREAK_BIT 0x02
+#define SP_RUN_BIT 0x03
+#define SP_BROKEN_BIT 0x04
+#define SP_IDLE_BIT 0x05 /* READY */
+#define SP_SLEEPING_BIT 0x06
+#define SP_STALLING_BIT 0x07
+#define SP_IRQ_CLEAR_BIT 0x08
+#define SP_IRQ_READY_BIT 0x0A
+#define SP_IRQ_SLEEPING_BIT 0x0B
+
+#define SP_ICACHE_INV_BIT 0x0C
+#define SP_IPREFETCH_EN_BIT 0x0D
+
+#define SP_FIFO0_SINK_BIT 0x00
+#define SP_FIFO1_SINK_BIT 0x01
+#define SP_FIFO2_SINK_BIT 0x02
+#define SP_FIFO3_SINK_BIT 0x03
+#define SP_FIFO4_SINK_BIT 0x04
+#define SP_FIFO5_SINK_BIT 0x05
+#define SP_FIFO6_SINK_BIT 0x06
+#define SP_FIFO7_SINK_BIT 0x07
+#define SP_FIFO8_SINK_BIT 0x08
+#define SP_FIFO9_SINK_BIT 0x09
+#define SP_FIFOA_SINK_BIT 0x0A
+#define SP_DMEM_SINK_BIT 0x0B
+#define SP_CTRL_MT_SINK_BIT 0x0C
+#define SP_ICACHE_MT_SINK_BIT 0x0D
+
+#define SP_FIFO0_SINK_REG 0x0A
+#define SP_FIFO1_SINK_REG 0x0A
+#define SP_FIFO2_SINK_REG 0x0A
+#define SP_FIFO3_SINK_REG 0x0A
+#define SP_FIFO4_SINK_REG 0x0A
+#define SP_FIFO5_SINK_REG 0x0A
+#define SP_FIFO6_SINK_REG 0x0A
+#define SP_FIFO7_SINK_REG 0x0A
+#define SP_FIFO8_SINK_REG 0x0A
+#define SP_FIFO9_SINK_REG 0x0A
+#define SP_FIFOA_SINK_REG 0x0A
+#define SP_DMEM_SINK_REG 0x0A
+#define SP_CTRL_MT_SINK_REG 0x0A
+#define SP_ICACHE_MT_SINK_REG 0x0A
+
+#endif /* __SP_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h
new file mode 100644
index 000000000000..d803efd7400a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/system_global.h
@@ -0,0 +1,348 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ */
+#define ISP_DMA_MAX_BURST_LENGTH 128
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+#define IS_ISP_2400_SYSTEM
+/*
+ * Since this file is visible everywhere and the system definition
+ * macros are not, detect the separate definitions for {host, SP, ISP}
+ *
+ * The 2401 system has the nice property that it uses a vanilla 2400 SP
+ * so the SP will believe it is a 2400 system rather than 2401...
+ */
+//#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada) || defined(__scalar_processor_2401)
+#if defined(SYSTEM_hive_isp_css_2401_system) || defined(__isp2401_mamoiada)
+#define IS_ISP_2401_MAMOIADA_SYSTEM
+#define HAS_ISP_2401_MAMOIADA
+#define HAS_SP_2400
+//#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada) || defined(__scalar_processor_2400)
+#elif defined(SYSTEM_hive_isp_css_2400_system) || defined(__isp2400_mamoiada)
+#define IS_ISP_2400_MAMOIADA_SYSTEM
+#define HAS_ISP_2400_MAMOIADA
+#define HAS_SP_2400
+#else
+#error "system_global.h: 2400_SYSTEM must be one of {2400, 2401 }"
+#endif
+
+#define USE_INPUT_SYSTEM_VERSION_2
+
+#define HAS_MMU_VERSION_2
+#define HAS_DMA_VERSION_2
+#define HAS_GDC_VERSION_2
+#define HAS_VAMEM_VERSION_2
+#define HAS_HMEM_VERSION_1
+#define HAS_BAMEM_VERSION_2
+#define HAS_IRQ_VERSION_2
+#define HAS_IRQ_MAP_VERSION_2
+#define HAS_INPUT_FORMATTER_VERSION_2
+/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */
+#define HAS_INPUT_SYSTEM_VERSION_2
+#define HAS_BUFFERED_SENSOR
+#define HAS_FIFO_MONITORS_VERSION_2
+/* #define HAS_GP_REGS_VERSION_2 */
+#define HAS_GP_DEVICE_VERSION_2
+#define HAS_GPIO_VERSION_1
+#define HAS_TIMED_CTRL_VERSION_1
+#define HAS_RX_VERSION_2
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+/*
+ * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/
+#define HRT_DATA_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH>>3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH/8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+/* per-frame parameter handling support */
+#define SH_CSS_ENABLE_PER_FRAME_PARAMS
+
+typedef uint32_t hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID, through the DLI by address
+ * The enumerator terminators are used to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+#if defined (IS_ISP_2401_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#elif defined (IS_ISP_2400_MAMOIADA_SYSTEM)
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+#else
+#error "system_global.h: SYSTEM must be one of {2400, 2401}"
+#endif
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums.
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+/*
+typedef enum {
+ IRQ0_ID = 0,
+ N_IRQ_ID
+} irq_ID_t;
+*/
+
+typedef enum {
+ IRQ0_ID = 0, // GP IRQ block
+ IRQ1_ID, // Input formatter
+ IRQ2_ID, // input system
+ IRQ3_ID, // input selector
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+/*
+ * Deprecated: Since all gp_reg instances are different
+ * and put in the address maps of other devices we cannot
+ * enumerate them as that assumes the instrances are the
+ * same.
+ *
+ * We define a single GP_DEVICE containing all gp_regs
+ * w.r.t. a single base address
+ *
+typedef enum {
+ GP_REGS0_ID = 0,
+ N_GP_REGS_ID
+} gp_regs_ID_t;
+ */
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+typedef enum {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+} mipi_port_ID_t;
+
+#define N_RX_CHANNEL_ID 4
+
+/* Generic port enumeration with an internal port type ID */
+typedef enum {
+ CSI_PORT0_ID = 0,
+ CSI_PORT1_ID,
+ CSI_PORT2_ID,
+ TPG_PORT0_ID,
+ PRBS_PORT0_ID,
+ FIFO_PORT0_ID,
+ MEMORY_PORT0_ID,
+ N_INPUT_PORT_ID
+} input_port_ID_t;
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatability */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+#if 0
+typedef enum {
+ dev_chn, /* device channels, external resource */
+ ext_mem, /* external memories */
+ int_mem, /* internal memories */
+ int_chn /* internal channels, user defined */
+} resource_type_t;
+
+/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */
+typedef enum {
+ vied_nci_dev_chn_dma_ext0,
+ int_mem_vmem0,
+ int_mem_dmem0
+} resource_id_t;
+
+/* enum listing the different memories within a program group.
+ This enum is used in the mem_ptr_t type */
+typedef enum {
+ buf_mem_invalid = 0,
+ buf_mem_vmem_prog0,
+ buf_mem_dmem_prog0
+} buf_mem_t;
+
+#endif
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h
new file mode 100644
index 000000000000..c3e8a0104092
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/timed_ctrl_global.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_GLOBAL_H_INCLUDED__
+#define __TIMED_CTRL_GLOBAL_H_INCLUDED__
+
+#define IS_TIMED_CTRL_VERSION_1
+
+#include <timed_controller_defs.h>
+
+/**
+ * Order of the input bits for the timed controller taken from
+ * ISP_CSS_2401 System Architecture Description valid for
+ * 2400, 2401.
+ *
+ * Check for other systems.
+ */
+#define HIVE_TIMED_CTRL_GPIO_PIN_0_BIT_ID 0
+#define HIVE_TIMED_CTRL_GPIO_PIN_1_BIT_ID 1
+#define HIVE_TIMED_CTRL_GPIO_PIN_2_BIT_ID 2
+#define HIVE_TIMED_CTRL_GPIO_PIN_3_BIT_ID 3
+#define HIVE_TIMED_CTRL_GPIO_PIN_4_BIT_ID 4
+#define HIVE_TIMED_CTRL_GPIO_PIN_5_BIT_ID 5
+#define HIVE_TIMED_CTRL_GPIO_PIN_6_BIT_ID 6
+#define HIVE_TIMED_CTRL_GPIO_PIN_7_BIT_ID 7
+#define HIVE_TIMED_CTRL_GPIO_PIN_8_BIT_ID 8
+#define HIVE_TIMED_CTRL_GPIO_PIN_9_BIT_ID 9
+#define HIVE_TIMED_CTRL_GPIO_PIN_10_BIT_ID 10
+#define HIVE_TIMED_CTRL_GPIO_PIN_11_BIT_ID 11
+#define HIVE_TIMED_CTRL_IRQ_SP_BIT_ID 12
+#define HIVE_TIMED_CTRL_IRQ_ISP_BIT_ID 13
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SYSTEM_BIT_ID 14
+#define HIVE_TIMED_CTRL_IRQ_INPUT_SELECTOR_BIT_ID 15
+#define HIVE_TIMED_CTRL_IRQ_IF_BLOCK_BIT_ID 16
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_0_BIT_ID 17
+#define HIVE_TIMED_CTRL_IRQ_GP_TIMER_1_BIT_ID 18
+#define HIVE_TIMED_CTRL_CSI_SOL_BIT_ID 19
+#define HIVE_TIMED_CTRL_CSI_EOL_BIT_ID 20
+#define HIVE_TIMED_CTRL_CSI_SOF_BIT_ID 21
+#define HIVE_TIMED_CTRL_CSI_EOF_BIT_ID 22
+#define HIVE_TIMED_CTRL_IRQ_IS_STREAMING_MONITOR_BIT_ID 23
+
+
+
+#endif /* __TIMED_CTRL_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h
new file mode 100644
index 000000000000..58713c6583b9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vamem_global.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_GLOBAL_H_INCLUDED__
+#define __VAMEM_GLOBAL_H_INCLUDED__
+
+#include <type_support.h>
+
+#define IS_VAMEM_VERSION_2
+
+/* (log) stepsize of linear interpolation */
+#define VAMEM_INTERP_STEP_LOG2 4
+#define VAMEM_INTERP_STEP (1<<VAMEM_INTERP_STEP_LOG2)
+/* (physical) size of the tables */
+#define VAMEM_TABLE_UNIT_SIZE ((1<<(ISP_VAMEM_ADDRESS_BITS-VAMEM_INTERP_STEP_LOG2)) + 1)
+/* (logical) size of the tables */
+#define VAMEM_TABLE_UNIT_STEP ((VAMEM_TABLE_UNIT_SIZE-1)<<1)
+/* Number of tables */
+#define VAMEM_TABLE_UNIT_COUNT (ISP_VAMEM_DEPTH/VAMEM_TABLE_UNIT_STEP)
+
+typedef uint16_t vamem_data_t;
+
+#endif /* __VAMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vmem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vmem_global.h
new file mode 100644
index 000000000000..7867cd137f3f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/vmem_global.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_GLOBAL_H_INCLUDED__
+#define __VMEM_GLOBAL_H_INCLUDED__
+
+#include "isp.h"
+
+#define VMEM_SIZE ISP_VMEM_DEPTH
+#define VMEM_ELEMBITS ISP_VMEM_ELEMBITS
+#define VMEM_ALIGN ISP_VMEM_ALIGN
+
+#ifndef PIPE_GENERATION
+typedef tvector *pvector;
+#endif
+
+#endif /* __VMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/xmem_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/xmem_global.h
new file mode 100644
index 000000000000..1d3a43abe55d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/xmem_global.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __XMEM_GLOBAL_H_INCLUDED__
+#define __XMEM_GLOBAL_H_INCLUDED__
+
+#include "isp.h"
+
+#endif /* __XMEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
new file mode 100644
index 000000000000..92fb15d04703
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
@@ -0,0 +1,103 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ASSERT_SUPPORT_H_INCLUDED__
+#define __ASSERT_SUPPORT_H_INCLUDED__
+
+#include "storage_class.h"
+
+/**
+ * The following macro can help to test the size of a struct at compile
+ * time rather than at run-time. It does not work for all compilers; see
+ * below.
+ *
+ * Depending on the value of 'condition', the following macro is expanded to:
+ * - condition==true:
+ * an expression containing an array declaration with negative size,
+ * usually resulting in a compilation error
+ * - condition==false:
+ * (void) 1; // C statement with no effect
+ *
+ * example:
+ * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT);
+ *
+ * verify that the macro indeed triggers a compilation error with your compiler:
+ * COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != (sizeof(struct host_sp_queues)+1) );
+ *
+ * Not all compilers will trigger an error with this macro; use a search engine to search for
+ * BUILD_BUG_ON to find other methods.
+ */
+#define COMPILATION_ERROR_IF(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+
+/* Compile time assertion */
+#ifndef CT_ASSERT
+#define CT_ASSERT(cnd) ((void)sizeof(char[(cnd)?1:-1]))
+#endif /* CT_ASSERT */
+
+#ifdef NDEBUG
+
+#define assert(cnd) ((void)0)
+
+#else
+
+#if defined(_MSC_VER)
+#ifdef _KERNEL_MODE
+/* Windows kernel mode compilation */
+#include <wdm.h>
+#define assert(cnd) ASSERT(cnd)
+#else
+/* Windows usermode compilation */
+#include <assert.h>
+#endif
+
+#elif defined(__KERNEL__)
+#include <linux/bug.h>
+
+/* TODO: it would be cleaner to use this:
+ * #define assert(cnd) BUG_ON(cnd)
+ * but that causes many compiler warnings (==errors) under Android
+ * because it seems that the BUG_ON() macro is not seen as a check by
+ * gcc like the BUG() macro is. */
+#define assert(cnd) \
+ do { \
+ if (!(cnd)) \
+ BUG(); \
+ } while (0)
+
+#elif defined(__FIST__) || defined(__GNUC__)
+
+/* enable assert for crun */
+#include "assert.h"
+
+#else /* default for unknown environments */
+#define assert(cnd) ((void)0)
+#endif
+
+#endif /* NDEBUG */
+
+#ifndef PIPE_GENERATION
+/* Deprecated OP___assert, this is still used in ~1000 places
+ * in the code. This will be removed over time.
+ * The implemenation for the pipe generation tool is in see support.isp.h */
+#define OP___assert(cnd) assert(cnd)
+
+STORAGE_CLASS_INLINE void compile_time_assert (unsigned cond)
+{
+ /* Call undefined function if cond is false */
+ extern void _compile_time_assert (void);
+ if (!cond) _compile_time_assert();
+}
+#endif /* PIPE_GENERATION */
+
+#endif /* __ASSERT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
new file mode 100644
index 000000000000..d71e08f27a42
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BAMEM_H_INCLUDED__
+#define __BAMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the BAMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "bamem_local.h"
+
+#ifndef __INLINE_BAMEM__
+#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_BAMEM_C
+#include "bamem_public.h"
+#else /* __INLINE_BAMEM__ */
+#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_BAMEM_C STORAGE_CLASS_INLINE
+#include "bamem_private.h"
+#endif /* __INLINE_BAMEM__ */
+
+#endif /* __BAMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bbb_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bbb_config.h
new file mode 100644
index 000000000000..18bc5ef3d0bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bbb_config.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BBB_CONFIG_H_INCLUDED__
+#define __BBB_CONFIG_H_INCLUDED__
+/* This header contains BBB defines common to ISP and host */
+
+#define BFA_MAX_KWAY (49)
+#define BFA_RW_LUT_SIZE (7)
+
+#define SAD3x3_IN_SHIFT (2) /* input right shift value for SAD3x3 */
+#define SAD3x3_OUT_SHIFT (2) /* output right shift value for SAD3x3 */
+
+/* XCU and BMA related defines shared between host and ISP
+ * also need to be moved here */
+#endif /* __BBB_CONFIG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h
new file mode 100644
index 000000000000..1b271c3c6a25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bitop_support.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BITOP_SUPPORT_H_INCLUDED__
+#define __BITOP_SUPPORT_H_INCLUDED__
+
+#define bitop_setbit(a, b) ((a) |= (1UL << (b)))
+
+#define bitop_getbit(a, b) (((a) & (1UL << (b))) != 0)
+
+#define bitop_clearbit(a, b) ((a) &= ~(1UL << (b)))
+
+#endif /* __BITOP_SUPPORT_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/cpu_mem_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/cpu_mem_support.h
new file mode 100644
index 000000000000..6d014fafb713
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/cpu_mem_support.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CPU_MEM_SUPPORT_H_INCLUDED__
+#define __CPU_MEM_SUPPORT_H_INCLUDED__
+
+#if defined (__KERNEL__)
+#include <linux/string.h> /* memset */
+#else
+#include <string.h> /* memset */
+#endif
+
+#include "sh_css_internal.h" /* sh_css_malloc and sh_css_free */
+
+static inline void*
+ia_css_cpu_mem_alloc(unsigned int size)
+{
+ return sh_css_malloc(size);
+}
+
+static inline void*
+ia_css_cpu_mem_copy(void* dst, const void* src, unsigned int size)
+{
+ if(!src || !dst)
+ return NULL;
+
+ return memcpy(dst, src, size);
+}
+
+static inline void*
+ia_css_cpu_mem_set_zero(void* dst, unsigned int size)
+{
+ if(!dst)
+ return NULL;
+
+ return memset(dst, 0, size);
+}
+
+static inline void
+ia_css_cpu_mem_free(void* ptr)
+{
+ if(!ptr)
+ return;
+
+ sh_css_free(ptr);
+}
+
+#endif /* __CPU_MEM_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
new file mode 100644
index 000000000000..0398f5802f05
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_H_INCLUDED__
+#define __CSI_RX_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "csi_rx_local.h"
+
+#ifndef __INLINE_CSI_RX__
+#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_CSI_RX_C
+#include "csi_rx_public.h"
+#else /* __INLINE_CSI_RX__ */
+#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_CSI_RX_C STORAGE_CLASS_INLINE
+#include "csi_rx_private.h"
+#endif /* __INLINE_CSI_RX__ */
+
+#endif /* __CSI_RX_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
new file mode 100644
index 000000000000..7d8011735033
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_H_INCLUDED__
+#define __DEBUG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "debug_local.h"
+
+#ifndef __INLINE_DEBUG__
+#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DEBUG_C
+#include "debug_public.h"
+#else /* __INLINE_DEBUG__ */
+#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DEBUG_C STORAGE_CLASS_INLINE
+#include "debug_private.h"
+#endif /* __INLINE_DEBUG__ */
+
+#endif /* __DEBUG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h
new file mode 100644
index 000000000000..834e7c3e0814
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/device_access/device_access.h
@@ -0,0 +1,194 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __DEVICE_ACCESS_H_INCLUDED__
+#define __DEVICE_ACCESS_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the public interface for physical system
+ * access functions to SRAM and registers. Access
+ * types are limited to those defined in <stdint.h>
+ * All accesses are aligned
+ *
+ * The address representation is private to the system
+ * and represented as/stored in "hrt_address".
+ *
+ * The system global address can differ by an offset;
+ * The device base address. This offset must be added
+ * by the implementation of the access function
+ *
+ * "store" is a transfer to the device
+ * "load" is a transfer from the device
+ */
+
+#include <type_support.h>
+
+/*
+ * User provided file that defines the system address types:
+ * - hrt_address a type that can hold the (sub)system address range
+ */
+#include "system_types.h"
+/*
+ * We cannot assume that the global system address size is the size of
+ * a pointer because a (say) 64-bit host can be simulated in a 32-bit
+ * environment. Only if the host environment is modelled as on the target
+ * we could use a pointer. Even then, prototyping may need to be done
+ * before the target environment is available. AS we cannot wait for that
+ * we are stuck with integer addresses
+ */
+
+/*typedef char *sys_address;*/
+typedef hrt_address sys_address;
+
+/*! Set the (sub)system base address
+
+ \param base_addr[in] The offset on which the (sub)system is located
+ in the global address map
+
+ \return none,
+ */
+extern void device_set_base_address(
+ const sys_address base_addr);
+
+
+/*! Get the (sub)system base address
+
+ \return base_address,
+ */
+extern sys_address device_get_base_address(void);
+
+/*! Read an 8-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+extern uint8_t ia_css_device_load_uint8(
+ const hrt_address addr);
+
+/*! Read a 16-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+extern uint16_t ia_css_device_load_uint16(
+ const hrt_address addr);
+
+/*! Read a 32-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+extern uint32_t ia_css_device_load_uint32(
+ const hrt_address addr);
+
+/*! Read a 64-bit value from a device register or memory in the device
+
+ \param addr[in] Local address
+
+ \return device[addr]
+ */
+extern uint64_t ia_css_device_load_uint64(
+ const hrt_address addr);
+
+/*! Write an 8-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+extern void ia_css_device_store_uint8(
+ const hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+extern void ia_css_device_store_uint16(
+ const hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+extern void ia_css_device_store_uint32(
+ const hrt_address addr,
+ const uint32_t data);
+
+/*! Write a 64-bit value to a device register or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] value
+
+ \return none, device[addr] = value
+ */
+extern void ia_css_device_store_uint64(
+ const hrt_address addr,
+ const uint64_t data);
+
+/*! Read an array of bytes from device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[out] pointer to the destination array
+ \param size[in] number of bytes to read
+
+ \return none
+ */
+extern void ia_css_device_load(
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write an array of bytes to device registers or memory in the device
+
+ \param addr[in] Local address
+ \param data[in] pointer to the source array
+ \param size[in] number of bytes to write
+
+ \return none
+ */
+extern void ia_css_device_store(
+ const hrt_address addr,
+ const void *data,
+ const size_t size);
+
+#endif /* __DEVICE_ACCESS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
new file mode 100644
index 000000000000..b266191f21ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_H_INCLUDED__
+#define __DMA_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "dma_local.h"
+
+#ifndef __INLINE_DMA__
+#define STORAGE_CLASS_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DMA_C
+#include "dma_public.h"
+#else /* __INLINE_DMA__ */
+#define STORAGE_CLASS_DMA_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DMA_C STORAGE_CLASS_INLINE
+#include "dma_private.h"
+#endif /* __INLINE_DMA__ */
+
+#endif /* __DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h
new file mode 100644
index 000000000000..6e5e5dd4107d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/error_support.h
@@ -0,0 +1,70 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ERROR_SUPPORT_H_INCLUDED__
+#define __ERROR_SUPPORT_H_INCLUDED__
+
+#if defined(_MSC_VER)
+#include <errno.h>
+/*
+ * Put here everything _MSC_VER specific not covered in
+ * "errno.h"
+ */
+#define EINVAL 22
+#define EBADE 52
+#define ENODATA 61
+#define ENOTCONN 107
+#define ENOTSUP 252
+#define ENOBUFS 233
+
+
+#elif defined(__KERNEL__)
+#include <linux/errno.h>
+/*
+ * Put here everything __KERNEL__ specific not covered in
+ * "errno.h"
+ */
+#define ENOTSUP 252
+
+#elif defined(__GNUC__)
+#include <errno.h>
+/*
+ * Put here everything __GNUC__ specific not covered in
+ * "errno.h"
+ */
+
+#else /* default is for the FIST environment */
+#include <errno.h>
+/*
+ * Put here everything FIST specific not covered in
+ * "errno.h"
+ */
+
+#endif
+
+#define verifexit(cond,error_tag) \
+do { \
+ if (!(cond)){ \
+ goto EXIT; \
+ } \
+} while(0)
+
+#define verifjmpexit(cond) \
+do { \
+ if (!(cond)){ \
+ goto EXIT; \
+ } \
+} while(0)
+
+#endif /* __ERROR_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
new file mode 100644
index 000000000000..78827c554cc3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_H
+#define __EVENT_FIFO_H
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "event_fifo_local.h"
+
+#ifndef __INLINE_EVENT__
+#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_EVENT_C
+#include "event_fifo_public.h"
+#else /* __INLINE_EVENT__ */
+#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_EVENT_C STORAGE_CLASS_INLINE
+#include "event_fifo_private.h"
+#endif /* __INLINE_EVENT__ */
+
+#endif /* __EVENT_FIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
new file mode 100644
index 000000000000..3bdd260bcaa5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_H_INCLUDED__
+#define __FIFO_MONITOR_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "fifo_monitor_local.h"
+
+#ifndef __INLINE_FIFO_MONITOR__
+#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_FIFO_MONITOR_C
+#include "fifo_monitor_public.h"
+#else /* __INLINE_FIFO_MONITOR__ */
+#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_FIFO_MONITOR_C STORAGE_CLASS_INLINE
+#include "fifo_monitor_private.h"
+#endif /* __INLINE_FIFO_MONITOR__ */
+
+#endif /* __FIFO_MONITOR_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
new file mode 100644
index 000000000000..016132ba0b7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_DEVICE_H_INCLUDED__
+#define __GDC_DEVICE_H_INCLUDED__
+
+/* The file gdc.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the GDC device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "gdc_local.h"
+
+#ifndef __INLINE_GDC__
+#define STORAGE_CLASS_GDC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GDC_C
+#include "gdc_public.h"
+#else /* __INLINE_GDC__ */
+#define STORAGE_CLASS_GDC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GDC_C STORAGE_CLASS_INLINE
+#include "gdc_private.h"
+#endif /* __INLINE_GDC__ */
+
+#endif /* __GDC_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
new file mode 100644
index 000000000000..766d2532d8f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_H_INCLUDED__
+#define __GP_DEVICE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "gp_device_local.h"
+
+#ifndef __INLINE_GP_DEVICE__
+#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_DEVICE_C
+#include "gp_device_public.h"
+#else /* __INLINE_GP_DEVICE__ */
+#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_DEVICE_C STORAGE_CLASS_INLINE
+#include "gp_device_private.h"
+#endif /* __INLINE_GP_DEVICE__ */
+
+#endif /* __GP_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
new file mode 100644
index 000000000000..ca70f5603bf8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_H_INCLUDED__
+#define __GP_TIMER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h" /*GP_TIMER_BASE address */
+#include "gp_timer_local.h" /*GP_TIMER register offsets */
+
+#ifndef __INLINE_GP_TIMER__
+#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_TIMER_C
+#include "gp_timer_public.h" /* functions*/
+#else /* __INLINE_GP_TIMER__ */
+#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_TIMER_C STORAGE_CLASS_INLINE
+#include "gp_timer_private.h" /* inline functions*/
+#endif /* __INLINE_GP_TIMER__ */
+
+#endif /* __GP_TIMER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
new file mode 100644
index 000000000000..dec21bcb6f47
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_H_INCLUDED__
+#define __GPIO_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "gpio_local.h"
+
+#ifndef __INLINE_GPIO__
+#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GPIO_C
+#include "gpio_public.h"
+#else /* __INLINE_GPIO__ */
+#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GPIO_C STORAGE_CLASS_INLINE
+#include "gpio_private.h"
+#endif /* __INLINE_GPIO__ */
+
+#endif /* __GPIO_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
new file mode 100644
index 000000000000..671dd5b5fca6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_H_INCLUDED__
+#define __HMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the HMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "hmem_local.h"
+
+#ifndef __INLINE_HMEM__
+#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_HMEM_C
+#include "hmem_public.h"
+#else /* __INLINE_HMEM__ */
+#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_HMEM_C STORAGE_CLASS_INLINE
+#include "hmem_private.h"
+#endif /* __INLINE_HMEM__ */
+
+#endif /* __HMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
new file mode 100644
index 000000000000..396240954bed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
@@ -0,0 +1,135 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __CSI_RX_PUBLIC_H_INCLUDED__
+#define __CSI_RX_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the csi rx frontend state.
+ * Get the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx frontend state.
+ * Dump the state of the csi rx frontend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx fe controller.
+ * @param[in] state Point to the register-state.
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
+ const csi_rx_frontend_ID_t ID,
+ csi_rx_fe_ctrl_state_t *state);
+/**
+ * @brief Get the state of the csi rx fe dlane.
+ * Get the state of the register set per dlane process.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] lane The lane ID.
+ * @param[out] state Point to the dlane state.
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
+ const csi_rx_frontend_ID_t ID,
+ const uint32_t lane,
+ csi_rx_fe_ctrl_lane_t *dlane_state);
+/**
+ * @brief Get the csi rx backend state.
+ * Get the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/**
+ * @brief Dump the csi rx backend state.
+ * Dump the state of the csi rx backend regiester-set.
+ *
+ * @param[in] id The global unique ID of the csi rx be controller.
+ * @param[in] state Point to the register-state.
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
+ const csi_rx_backend_ID_t ID,
+ csi_rx_be_ctrl_state_t *state);
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the csi rx fe.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
+ const csi_rx_frontend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the csirx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the csi rx be.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_reg_store(
+ const csi_rx_backend_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/** end of DLI */
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h
new file mode 100644
index 000000000000..90b4ba7e023f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/debug_public.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DEBUG_PUBLIC_H_INCLUDED__
+#define __DEBUG_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! brief
+ *
+ * Simple queuing trace buffer for debug data
+ * instantiatable in SP DMEM
+ *
+ * The buffer has a remote and and a local store
+ * which contain duplicate data (when in sync).
+ * The buffers are automatically synched when the
+ * user dequeues, or manualy using the synch function
+ *
+ * An alternative (storage efficient) implementation
+ * could manage the buffers to contain unique data
+ *
+ * The buffer empty status is computed from local
+ * state which does not reflect the presence of data
+ * in the remote buffer (unless the alternative
+ * implementation is followed)
+ */
+
+typedef struct debug_data_s debug_data_t;
+typedef struct debug_data_ddr_s debug_data_ddr_t;
+
+extern debug_data_t *debug_data_ptr;
+extern hrt_address debug_buffer_address;
+extern hrt_vaddress debug_buffer_ddr_address;
+
+/*! Check the empty state of the local debug data buffer
+
+ \return isEmpty(buffer)
+ */
+STORAGE_CLASS_DEBUG_H bool is_debug_buffer_empty(void);
+
+/*! Dequeue a token from the debug data buffer
+
+ \return isEmpty(buffer)?0:buffer[head]
+ */
+STORAGE_CLASS_DEBUG_H hrt_data debug_dequeue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue(void);
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_isp(void);
+
+
+/*! Synchronise the remote buffer to the local buffer
+
+ \return none
+ */
+STORAGE_CLASS_DEBUG_H void debug_synch_queue_ddr(void);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+extern void debug_buffer_init(
+ const hrt_address addr);
+
+/*! Set the offset/address of the (remote) debug buffer
+
+ \return none
+ */
+extern void debug_buffer_ddr_init(
+ const hrt_vaddress addr);
+
+/*! Set the (remote) operating mode of the debug buffer
+
+ \return none
+ */
+extern void debug_buffer_setmode(
+ const debug_buf_mode_t mode);
+
+#endif /* __DEBUG_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h
new file mode 100644
index 000000000000..1d5e38ffe938
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/dma_public.h
@@ -0,0 +1,73 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DMA_PUBLIC_H_INCLUDED__
+#define __DMA_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct dma_state_s dma_state_t;
+
+/*! Read the control registers of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = DMA[ID].state
+ */
+extern void dma_get_state(
+ const dma_ID_t ID,
+ dma_state_t *state);
+
+/*! Write to a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, DMA[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_DMA_H void dma_reg_store(
+ const dma_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return DMA[ID].ctrl[reg]
+ */
+STORAGE_CLASS_DMA_H hrt_data dma_reg_load(
+ const dma_ID_t ID,
+ const unsigned int reg);
+
+
+/*! Set maximum burst size of DMA[ID]
+
+ \param ID[in] DMA identifier
+ \param conn[in] Connection to set max burst size for
+ \param max_burst_size[in] Maximum burst size in words
+
+ \return none
+*/
+void
+dma_set_max_burst_size(
+ dma_ID_t ID,
+ dma_connection conn,
+ uint32_t max_burst_size);
+
+#endif /* __DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h
new file mode 100644
index 000000000000..d95bc7070f4c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/event_fifo_public.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __EVENT_FIFO_PUBLIC_H
+#define __EVENT_FIFO_PUBLIC_H
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return none, dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_wait_for(
+ const event_ID_t ID);
+
+/*! Conditional blocking wait for an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void cnd_event_wait_for(
+ const event_ID_t ID,
+ const bool cnd);
+
+/*! Blocking read from an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return dequeue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H hrt_data event_receive_token(
+ const event_ID_t ID);
+
+/*! Blocking write to an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+ \param token[in] token to be written on the event
+
+ \return none, enqueue(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H void event_send_token(
+ const event_ID_t ID,
+ const hrt_data token);
+
+/*! Query an event source EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isempty(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool is_event_pending(
+ const event_ID_t ID);
+
+/*! Query an event sink EVENT[ID]
+
+ \param ID[in] EVENT identifier
+
+ \return !isfull(event_queue[ID])
+ */
+STORAGE_CLASS_EVENT_H bool can_event_send_token(
+ const event_ID_t ID);
+
+#endif /* __EVENT_FIFO_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h
new file mode 100644
index 000000000000..329f5d5049f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/fifo_monitor_public.h
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+#define __FIFO_MONITOR_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct fifo_channel_state_s fifo_channel_state_t;
+typedef struct fifo_switch_state_s fifo_switch_state_t;
+typedef struct fifo_monitor_state_s fifo_monitor_state_t;
+
+/*! Set a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param sel[in] fifo switch selector
+
+ \return none, fifo_switch[switch_id].sel = sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_switch_set(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ const hrt_data sel);
+
+/*! Get a fifo switch multiplex
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+
+ \return fifo_switch[switch_id].sel
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_switch_get(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id);
+
+/*! Read the state of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param state[out] fifo monitor state structure
+
+ \return none, state = FIFO_MONITOR[ID].state
+ */
+extern void fifo_monitor_get_state(
+ const fifo_monitor_ID_t ID,
+ fifo_monitor_state_t *state);
+
+/*! Read the state of a fifo channel
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param channel_id[in] fifo channel identifier
+ \param state[out] fifo channel state structure
+
+ \return none, state = fifo_channel[channel_id].state
+ */
+extern void fifo_channel_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_channel_t channel_id,
+ fifo_channel_state_t *state);
+
+/*! Read the state of a fifo switch
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param switch_id[in] fifo switch identifier
+ \param state[out] fifo switch state structure
+
+ \return none, state = fifo_switch[switch_id].state
+ */
+extern void fifo_switch_get_state(
+ const fifo_monitor_ID_t ID,
+ const fifo_switch_t switch_id,
+ fifo_switch_state_t *state);
+
+/*! Write to a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, FIFO_MONITOR[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_FIFO_MONITOR_H void fifo_monitor_reg_store(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of FIFO_MONITOR[ID]
+
+ \param ID[in] FIFO_MONITOR identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return FIFO_MONITOR[ID].ctrl[reg]
+ */
+STORAGE_CLASS_FIFO_MONITOR_H hrt_data fifo_monitor_reg_load(
+ const fifo_monitor_ID_t ID,
+ const unsigned int reg);
+
+#endif /* __FIFO_MONITOR_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
new file mode 100644
index 000000000000..d27f87a719db
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GDC_PUBLIC_H_INCLUDED__
+#define __GDC_PUBLIC_H_INCLUDED__
+
+/*! Write the bicubic interpolation table of GDC[ID]
+
+ \param ID[in] GDC identifier
+ \param data[in] The data matrix to be written
+
+ \pre
+ - data must point to a matrix[4][HRT_GDC_N]
+
+ \implementation dependent
+ - The value of "HRT_GDC_N" is device specific
+ - The LUT should not be partially written
+ - The LUT format is a quadri-phase interpolation
+ table. The layout is device specific
+ - The range of the values data[n][m] is device
+ specific
+
+ \return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
+ */
+STORAGE_CLASS_EXTERN void gdc_lut_store(
+ const gdc_ID_t ID,
+ const int data[4][HRT_GDC_N]);
+
+/*! Convert the bicubic interpolation table of GDC[ID] to the ISP-specific format
+
+ \param ID[in] GDC identifier
+ \param in_lut[in] The data matrix to be converted
+ \param out_lut[out] The data matrix as the output of conversion
+ */
+STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
+ const int in_lut[4][HRT_GDC_N],
+ int out_lut[4][HRT_GDC_N]);
+
+/*! Return the integer representation of 1.0 of GDC[ID]
+
+ \param ID[in] GDC identifier
+
+ \return unity
+ */
+STORAGE_CLASS_EXTERN int gdc_get_unity(
+ const gdc_ID_t ID);
+
+#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h
new file mode 100644
index 000000000000..acbce0fd658f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_device_public.h
@@ -0,0 +1,58 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_DEVICE_PUBLIC_H_INCLUDED__
+#define __GP_DEVICE_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+typedef struct gp_device_state_s gp_device_state_t;
+
+/*! Read the state of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param state[out] gp device state structure
+
+ \return none, state = GP_DEVICE[ID].state
+ */
+extern void gp_device_get_state(
+ const gp_device_ID_t ID,
+ gp_device_state_t *state);
+
+/*! Write to a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, GP_DEVICE[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_GP_DEVICE_H void gp_device_reg_store(
+ const gp_device_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of GP_DEVICE[ID]
+
+ \param ID[in] GP_DEVICE identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return GP_DEVICE[ID].ctrl[reg]
+ */
+STORAGE_CLASS_GP_DEVICE_H hrt_data gp_device_reg_load(
+ const gp_device_ID_t ID,
+ const hrt_address reg_addr);
+
+#endif /* __GP_DEVICE_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h
new file mode 100644
index 000000000000..276e2fa9b1e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gp_timer_public.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GP_TIMER_PUBLIC_H_INCLUDED__
+#define __GP_TIMER_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! initialize mentioned timer
+param ID timer_id
+*/
+extern void
+gp_timer_init(gp_timer_ID_t ID);
+
+
+/*! read timer value for (platform selected)selected timer.
+param ID timer_id
+ \return uint32_t 32 bit timer value
+*/
+extern uint32_t
+gp_timer_read(gp_timer_ID_t ID);
+
+#endif /* __GP_TIMER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h
new file mode 100644
index 000000000000..82eaa0d48bee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gpio_public.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __GPIO_PUBLIC_H_INCLUDED__
+#define __GPIO_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! Write to a control register of GPIO[ID]
+
+ \param ID[in] GPIO identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, GPIO[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_GPIO_H void gpio_reg_store(
+ const gpio_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of GPIO[ID]
+
+ \param ID[in] GPIO identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return GPIO[ID].ctrl[reg]
+ */
+STORAGE_CLASS_GPIO_H hrt_data gpio_reg_load(
+ const gpio_ID_t ID,
+ const unsigned int reg_addr);
+
+#endif /* __GPIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
new file mode 100644
index 000000000000..9b8e7c92442d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __HMEM_PUBLIC_H_INCLUDED__
+#define __HMEM_PUBLIC_H_INCLUDED__
+
+#include <stddef.h> /* size_t */
+
+/*! Return the size of HMEM[ID]
+
+ \param ID[in] HMEM identifier
+
+ \Note: The size is the byte size of the area it occupies
+ in the address map. I.e. disregarding internal structure
+
+ \return sizeof(HMEM[ID])
+ */
+STORAGE_CLASS_HMEM_H size_t sizeof_hmem(
+ const hmem_ID_t ID);
+
+#endif /* __HMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
new file mode 100644
index 000000000000..1ac0e64e539c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ibuf_ctrl_public.h
@@ -0,0 +1,93 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_PUBLIC_H_INCLUDED__
+#define __IBUF_CTRL_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the ibuf-controller state.
+ * Get the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state);
+
+/**
+ * @brief Get the state of the ibuf-controller process.
+ * Get the state of the register set per buf-controller process.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] proc_id The process ID.
+ * @param[out] state Point to the process state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_get_proc_state(
+ const ibuf_ctrl_ID_t ID,
+ const uint32_t proc_id,
+ ibuf_ctrl_proc_state_t *state);
+/**
+ * @brief Dump the ibuf-controller state.
+ * Dump the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the input-buffer controller.
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_dump_state(
+ const ibuf_ctrl_ID_t ID,
+ ibuf_ctrl_state_t *state);
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the ibuf-controller.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_IBUF_CTRL_H hrt_data ibuf_ctrl_reg_load(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg);
+
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the ibuf-controller.
+ *
+ * @param[in] ID The global unique ID for the ibuf-controller instance.
+ * @param[in] reg The offet address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_IBUF_CTRL_H void ibuf_ctrl_reg_store(
+ const ibuf_ctrl_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/** end of DLI */
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __IBUF_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h
new file mode 100644
index 000000000000..2db70893daf9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_formatter_public.h
@@ -0,0 +1,115 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+#define __INPUT_FORMATTER_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Reset INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return none, reset(INPUT_FORMATTER[ID])
+ */
+extern void input_formatter_rst(
+ const input_formatter_ID_t ID);
+
+/*! Set the blocking mode of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param enable[in] blocking enable flag
+
+ \use
+ - In HW, the capture unit will deliver an infinite stream of frames,
+ the input formatter will synchronise on the first SOF. In simulation
+ there are only a fixed number of frames, presented only once. By
+ enabling blocking the inputformatter will wait on the first presented
+ frame, thus avoiding race in the simulation setup.
+
+ \return none, INPUT_FORMATTER[ID].blocking_mode = enable
+ */
+extern void input_formatter_set_fifo_blocking_mode(
+ const input_formatter_ID_t ID,
+ const bool enable);
+
+/*! Return the data alignment of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+
+ \return alignment(INPUT_FORMATTER[ID].data)
+ */
+extern unsigned int input_formatter_get_alignment(
+ const input_formatter_ID_t ID);
+
+/*! Read the source switch state into INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter switch state structure
+
+ \return none, state = INPUT_FORMATTER[ID].switch_state
+ */
+extern void input_formatter_get_switch_state(
+ const input_formatter_ID_t ID,
+ input_formatter_switch_state_t *state);
+
+/*! Read the control registers of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+extern void input_formatter_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_state_t *state);
+
+/*! Read the control registers of bin copy INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param state[out] input formatter state structure
+
+ \return none, state = INPUT_FORMATTER[ID].state
+ */
+extern void input_formatter_bin_get_state(
+ const input_formatter_ID_t ID,
+ input_formatter_bin_state_t *state);
+
+/*! Write to a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, INPUT_FORMATTER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H void input_formatter_reg_store(
+ const input_formatter_ID_t ID,
+ const hrt_address reg_addr,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_FORMATTER[ID]
+
+ \param ID[in] INPUT_FORMATTER identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return INPUT_FORMATTER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_FORMATTER_H hrt_data input_formatter_reg_load(
+ const input_formatter_ID_t ID,
+ const unsigned int reg_addr);
+
+#endif /* __INPUT_FORMATTER_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h
new file mode 100644
index 000000000000..1596757fe9ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/input_system_public.h
@@ -0,0 +1,376 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include "isys_public.h"
+#else
+
+typedef struct input_system_state_s input_system_state_t;
+typedef struct receiver_state_s receiver_state_t;
+
+/*! Read the state of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[out] input system state structure
+
+ \return none, state = INPUT_SYSTEM[ID].state
+ */
+extern void input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+
+/*! Read the state of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param state[out] receiver state structure
+
+ \return none, state = RECEIVER[ID].state
+ */
+extern void receiver_get_state(
+ const rx_ID_t ID,
+ receiver_state_t *state);
+
+/*! Flag whether a MIPI format is YUV420
+
+ \param mipi_format[in] MIPI format
+
+ \return mipi_format == YUV420
+ */
+extern bool is_mipi_format_yuv420(
+ const mipi_format_t mipi_format);
+
+/*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param cfg_ID[in] Configuration identifier
+ \param comp[in] Compression method
+ \param pred[in] Predictor method
+
+ \NOTE: the storage of compression configuration is
+ implementation specific. The config can be
+ carried either on MIPI ports or on MIPI channels
+
+ \return none, RECEIVER[ID].cfg[cfg_ID] = {comp, pred}
+ */
+extern void receiver_set_compression(
+ const rx_ID_t ID,
+ const unsigned int cfg_ID,
+ const mipi_compressor_t comp,
+ const mipi_predictor_t pred);
+
+/*! Enable PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param cnd[in] irq predicate
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID])
+ */
+extern void receiver_port_enable(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const bool cnd);
+
+/*! Flag if PORT[port_ID] of RECEIVER[ID] is enabled
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return enable(RECEIVER[ID].PORT[port_ID]) == true
+ */
+extern bool is_receiver_port_enabled(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID);
+
+/*! Enable the IRQ channels of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq channels
+
+ \return None, enable(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+extern void receiver_irq_enable(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Return the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+
+ \return RECEIVER[ID].PORT[port_ID].irq_info
+ */
+extern rx_irq_info_t receiver_get_irq_info(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID);
+
+/*! Clear the IRQ status of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param irq_info[in] irq status
+
+ \return None, clear(RECEIVER[ID].PORT[port_ID].irq_info)
+ */
+extern void receiver_irq_clear(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const rx_irq_info_t irq_info);
+
+/*! Write to a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_reg_store(
+ const input_system_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_reg_load(
+ const input_system_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_reg_store(
+ const rx_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_reg_load(
+ const rx_ID_t ID,
+ const hrt_address reg);
+
+/*! Write to a control register of PORT[port_ID] of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, RECEIVER[ID].PORT[port_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void receiver_port_reg_store(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register PORT[port_ID] of of RECEIVER[ID]
+
+ \param ID[in] RECEIVER identifier
+ \param port_ID[in] mipi PORT identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return RECEIVER[ID].PORT[port_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data receiver_port_reg_load(
+ const rx_ID_t ID,
+ const mipi_port_ID_t port_ID,
+ const hrt_address reg);
+
+/*! Write to a control register of SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_sub_system_reg_store(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from a control register SUB_SYSTEM[sub_ID] of INPUT_SYSTEM[ID]
+
+ \param ID[in] INPUT_SYSTEM identifier
+ \param port_ID[in] sub system identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return INPUT_SYSTEM[ID].SUB_SYSTEM[sub_ID].ctrl[reg]
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H hrt_data input_system_sub_system_reg_load(
+ const input_system_ID_t ID,
+ const sub_system_ID_t sub_ID,
+ const hrt_address reg);
+
+
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Functions for configuration phase on input system.
+//
+///////////////////////////////////////////////////////////////////////////
+
+// Function that resets current configuration.
+// remove the argument since it should be private.
+input_system_error_t input_system_configuration_reset(void);
+
+// Function that commits current configuration.
+// remove the argument since it should be private.
+input_system_error_t input_system_configuration_commit(void);
+
+///////////////////////////////////////////////////////////////////////////
+//
+// User functions:
+// (encoded generic function)
+// - no checking
+// - decoding name and agruments into the generic (channel) configuration
+// function.
+//
+///////////////////////////////////////////////////////////////////////////
+
+
+// FIFO channel config function user
+
+input_system_error_t input_system_csi_fifo_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_csi_fifo_channel_with_counting_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frame,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t mem_region_size,
+ uint32_t nof_mem_regions,
+ target_cfg2400_t target
+);
+
+
+// SRAM channel config function user
+
+input_system_error_t input_system_csi_sram_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t csi_mem_region_size,
+ uint32_t csi_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+
+//XMEM channel config function user
+
+input_system_error_t input_system_csi_xmem_channel_cfg(
+ uint32_t ch_id,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t mem_region_size,
+ uint32_t nof_mem_regions,
+ uint32_t acq_mem_region_size,
+ uint32_t acq_nof_mem_regions,
+ target_cfg2400_t target,
+ uint32_t nof_xmem_buffers
+);
+
+input_system_error_t input_system_csi_xmem_capture_only_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ input_system_csi_port_t port,
+ uint32_t csi_mem_region_size,
+ uint32_t csi_nof_mem_regions,
+ uint32_t acq_mem_region_size,
+ uint32_t acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+input_system_error_t input_system_csi_xmem_acquire_only_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ input_system_csi_port_t port,
+ backend_channel_cfg_t backend_ch,
+ uint32_t acq_mem_region_size,
+ uint32_t acq_nof_mem_regions,
+ target_cfg2400_t target
+);
+
+// Non - CSI channel config function user
+
+input_system_error_t input_system_prbs_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ uint32_t seed,
+ uint32_t sync_gen_width,
+ uint32_t sync_gen_height,
+ uint32_t sync_gen_hblank_cycles,
+ uint32_t sync_gen_vblank_cycles,
+ target_cfg2400_t target
+);
+
+
+input_system_error_t input_system_tpg_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,//not used yet
+ uint32_t x_mask,
+ uint32_t y_mask,
+ uint32_t x_delta,
+ uint32_t y_delta,
+ uint32_t xy_mask,
+ uint32_t sync_gen_width,
+ uint32_t sync_gen_height,
+ uint32_t sync_gen_hblank_cycles,
+ uint32_t sync_gen_vblank_cycles,
+ target_cfg2400_t target
+);
+
+
+input_system_error_t input_system_gpfifo_channel_cfg(
+ uint32_t ch_id,
+ uint32_t nof_frames,
+ target_cfg2400_t target
+);
+#endif /* #ifdef USE_INPUT_SYSTEM_VERSION_2401 */
+
+#endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h
new file mode 100644
index 000000000000..9aeaf8f082d2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/irq_public.h
@@ -0,0 +1,184 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_PUBLIC_H_INCLUDED__
+#define __IRQ_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Read the control registers of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param state[out] irq controller state structure
+
+ \return none, state = IRQ[ID].state
+ */
+extern void irq_controller_get_state(
+ const irq_ID_t ID,
+ irq_controller_state_t *state);
+
+/*! Write to a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, IRQ[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_IRQ_H void irq_reg_store(
+ const irq_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of IRQ[ID]
+
+ \param ID[in] IRQ identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return IRQ[ID].ctrl[reg]
+ */
+STORAGE_CLASS_IRQ_H hrt_data irq_reg_load(
+ const irq_ID_t ID,
+ const unsigned int reg);
+
+/*! Enable an IRQ channel of IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, enable(IRQ[ID].channel[irq_ID])
+ */
+extern void irq_enable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq_ID);
+
+/*! Enable pulse interrupts for IRQ[ID] with a mode
+
+ \param ID[in] IRQ (device) identifier
+ \param enable enable/disable pulse interrupts
+
+ \return none
+ */
+extern void irq_enable_pulse(
+ const irq_ID_t ID,
+ bool pulse);
+
+/*! Disable an IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq[in] IRQ (channel) identifier
+
+ \return none, disable(IRQ[ID].channel[irq_ID])
+ */
+extern void irq_disable_channel(
+ const irq_ID_t ID,
+ const unsigned int irq);
+
+/*! Clear the state of all IRQ channels of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+
+ \return none, clear(IRQ[ID].channel[])
+ */
+extern void irq_clear_all(
+ const irq_ID_t ID);
+
+/*! Return the ID of a signalling IRQ channel of IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[ID])
+ */
+extern enum hrt_isp_css_irq_status irq_get_channel_id(
+ const irq_ID_t ID,
+ unsigned int *irq_id);
+
+/*! Raise an interrupt on channel irq_id of device IRQ[ID]
+
+ \param ID[in] IRQ (device) identifier
+ \param irq_id[in] IRQ (channel) identifier
+
+ \return none, signal(IRQ[ID].channel[irq_id])
+ */
+extern void irq_raise(
+ const irq_ID_t ID,
+ const irq_sw_channel_id_t irq_id);
+
+/*! Test if any IRQ channel of the virtual super IRQ has raised a signal
+
+ \return any(VIRQ.channel[irq_ID] != 0)
+ */
+extern bool any_virq_signal(void);
+
+/*! Enable an IRQ channel of the virtual super IRQ
+
+ \param irq[in] IRQ (channel) identifier
+ \param en[in] predicate channel enable
+
+ \return none, VIRQ.channel[irq_ID].enable = en
+ */
+extern void cnd_virq_enable_channel(
+ const virq_id_t irq_ID,
+ const bool en);
+
+/*! Clear the state of all IRQ channels of the virtual super IRQ
+
+ \return none, clear(VIRQ.channel[])
+ */
+extern void virq_clear_all(void);
+
+/*! Clear the IRQ info state of the virtual super IRQ
+
+ \param irq_info[in/out] The IRQ (channel) state
+
+ \return none
+ */
+extern void virq_clear_info(
+ virq_info_t *irq_info);
+
+/*! Return the ID of a signalling IRQ channel of the virtual super IRQ
+
+ \param irq_id[out] active IRQ (channel) identifier
+
+ \Note: This function operates as strtok(), based on the return
+ state the user is informed if there are additional signalling
+ channels
+
+ \return state(IRQ[...])
+ */
+extern enum hrt_isp_css_irq_status virq_get_channel_id(
+ virq_id_t *irq_id);
+
+/*! Return the IDs of all signaling IRQ channels of the virtual super IRQ
+
+ \param irq_info[out] all active IRQ (channel) identifiers
+
+ \Note: Unlike "irq_get_channel_id()" this function returns all
+ channel signaling info. The new info is OR'd with the current
+ info state. N.B. this is the same as repeatedly calling the function
+ "irq_get_channel_id()" in a (non-blocked) handler routine
+
+ \return (error(state(IRQ[...]))
+ */
+extern enum hrt_isp_css_irq_status virq_get_channel_signals(
+ virq_info_t *irq_info);
+
+#endif /* __IRQ_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2400_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2400_config.h
new file mode 100644
index 000000000000..ab3391716c82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2400_config.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP2400_CONFIG_H_INCLUDED__
+#define __ISP2400_CONFIG_H_INCLUDED__
+
+#define NUM_BITS 14
+#define NUM_SLICE_ELEMS 4
+#define ROUNDMODE ROUND_NEAREST_EVEN
+#define MAX_SHIFT_1W (NUM_BITS-1) /* Max number of bits a 1w input can be shifted */
+#define MAX_SHIFT_2W (2*NUM_BITS-1) /* Max number of bits a 2w input can be shifted */
+
+#endif /* __ISP2400_CONFIG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2500_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2500_config.h
new file mode 100644
index 000000000000..4fae856f5a23
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2500_config.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP2500_CONFIG_H_INCLUDED__
+#define __ISP2500_CONFIG_H_INCLUDED__
+
+#define NUM_BITS 12
+#define NUM_SLICE_ELEMS 4
+#define ROUNDMODE ROUND_NEAREST_EVEN
+#define MAX_SHIFT_1W (NUM_BITS-1) /* Max number of bits a 1w input can be shifted */
+#define MAX_SHIFT_2W (2*NUM_BITS-1) /* Max number of bits a 2w input can be shifted */
+
+
+#define HAS_div_unit
+
+#define HAS_vec_sub
+
+#endif /* __ISP2500_CONFIG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2600_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2600_config.h
new file mode 100644
index 000000000000..6086be8cb0d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2600_config.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP2600_CONFIG_H_INCLUDED__
+#define __ISP2600_CONFIG_H_INCLUDED__
+
+
+#define NUM_BITS 16
+
+
+#define NUM_SLICE_ELEMS 8
+#define ROUNDMODE ROUND_NEAREST_EVEN
+#define MAX_SHIFT_1W (NUM_BITS-1) /* Max number of bits a 1w input can be shifted */
+#define MAX_SHIFT_2W (2*NUM_BITS-1) /* Max number of bits a 2w input can be shifted */
+#define ISP_NWAY 32 /* Number of elements in a vector in ISP 2600 */
+
+#define HAS_div_unit
+#define HAS_1w_sqrt_u_unit
+#define HAS_2w_sqrt_u_unit
+
+#define HAS_vec_sub
+
+#endif /* __ISP2600_CONFIG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2601_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2601_config.h
new file mode 100644
index 000000000000..beceefa24ca0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp2601_config.h
@@ -0,0 +1,70 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP2601_CONFIG_H_INCLUDED__
+#define __ISP2601_CONFIG_H_INCLUDED__
+
+#define NUM_BITS 16
+#define ISP_VEC_ELEMBITS NUM_BITS
+#define ISP_NWAY 32
+#define NUM_SLICE_ELEMS 4
+#define ROUNDMODE ROUND_NEAREST_EVEN
+#define MAX_SHIFT_1W (NUM_BITS-1) /* Max number of bits a 1w input can be shifted */
+#define MAX_SHIFT_2W (2*NUM_BITS-1) /* Max number of bits a 2w input can be shifted */
+
+#define HAS_div_unit
+#define HAS_bfa_unit
+#define HAS_1w_sqrt_u_unit
+#define HAS_2w_sqrt_u_unit
+
+#define HAS_vec_sub
+
+/* Bit widths and element widths defined in HW implementation of BFA */
+#define BFA_THRESHOLD_BIT_CNT (8)
+#define BFA_THRESHOLD_MASK ((1<<BFA_THRESHOLD_BIT_CNT)-1)
+#define BFA_SW_BIT_CNT (7)
+#define BFA_SW_MASK ((1<<BFA_SW_BIT_CNT)-1)
+
+#define BFA_RW_BIT_CNT (7)
+#define BFA_RW_MASK ((1<<BFA_RW_BIT_CNT)-1)
+#define BFA_RW_SLOPE_BIT_POS (8)
+#define BFA_RW_SLOPE_BIT_SHIFT (5)
+
+#define BFA_RW_IDX_BIT_CNT (3)
+#define BFA_RW_FRAC_BIT_CNT (5)
+#define BFA_RW_LUT0_FRAC_START_BIT (0)
+#define BFA_RW_LUT0_FRAC_END_BIT (BFA_RW_LUT0_FRAC_START_BIT+BFA_RW_FRAC_BIT_CNT-1) /* 4 */
+#define BFA_RW_LUT1_FRAC_START_BIT (2)
+#define BFA_RW_LUT1_FRAC_END_BIT (BFA_RW_LUT1_FRAC_START_BIT+BFA_RW_FRAC_BIT_CNT-1) /* 6 */
+/* LUT IDX end bit computation, start+idx_bit_cnt-2, one -1 comes as we count
+ * bits from 0, another -1 comes as we use 2 lut table, so idx_bit_cnt is one
+ * bit more */
+#define BFA_RW_LUT0_IDX_START_BIT (BFA_RW_LUT0_FRAC_END_BIT+1) /* 5 */
+#define BFA_RW_LUT0_IDX_END_BIT (BFA_RW_LUT0_IDX_START_BIT+BFA_RW_IDX_BIT_CNT-2) /* 6 */
+#define BFA_RW_LUT1_IDX_START_BIT (BFA_RW_LUT1_FRAC_END_BIT + 1) /* 7 */
+#define BFA_RW_LUT1_IDX_END_BIT (BFA_RW_LUT1_IDX_START_BIT+BFA_RW_IDX_BIT_CNT-2) /* 8 */
+#define BFA_RW_LUT_THRESHOLD (1<<(BFA_RW_LUT1_IDX_END_BIT-1)) /* 0x80 : next bit after lut1 end is set */
+#define BFA_RW_LUT1_IDX_OFFSET ((1<<(BFA_RW_IDX_BIT_CNT-1))-1) /* 3 */
+
+#define BFA_CP_MASK (0xFFFFFF80)
+#define BFA_SUBABS_SHIFT (6)
+#define BFA_SUBABS_BIT_CNT (8)
+#define BFA_SUBABS_MAX ((1<<BFA_SUBABS_BIT_CNT)-1)
+#define BFA_SUBABSSAT_BIT_CNT (9)
+#define BFA_SUBABSSAT_MAX ((1<<BFA_SUBABSSAT_BIT_CNT)-1)
+#define BFA_WEIGHT_SHIFT (6)
+
+#endif /* __ISP2601_CONFIG_H_INCLUDED__ */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_config.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_config.h
new file mode 100644
index 000000000000..80506f2419a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_config.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_CONFIG_H_INCLUDED__
+#define __ISP_CONFIG_H_INCLUDED__
+
+#if defined(ISP2400) || defined(ISP2401)
+#include "isp2400_config.h"
+#else
+#error "Please define a core {ISP2400, ISP2401}"
+#endif
+
+#endif /* __ISP_CONFIG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
new file mode 100644
index 000000000000..2251f372145b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
@@ -0,0 +1,845 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_OP1W_H_INCLUDED__
+#define __ISP_OP1W_H_INCLUDED__
+
+/*
+ * This file is part of the Multi-precision vector operations exstension package.
+ */
+
+/*
+ * Single-precision vector operations
+ */
+
+/*
+ * Prerequisites:
+ *
+ */
+#include "storage_class.h"
+
+#ifdef INLINE_ISP_OP1W
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_INLINE_DATA
+#else /* INLINE_ISP_OP1W */
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#endif /* INLINE_ISP_OP1W */
+
+/*
+ * Single-precision data type specification
+ */
+
+#include "isp_op1w_types.h"
+#include "isp_op2w_types.h" // for doubling operations.
+
+/*
+ * Single-precision prototype specification
+ */
+
+/* Arithmetic */
+
+/** @brief bitwise AND
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise and of both input arguments
+ *
+ * This function will calculate the bitwise and.
+ * result = _a & _b
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_and(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief bitwise OR
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise or of both input arguments
+ *
+ * This function will calculate the bitwise or.
+ * result = _a | _b
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_or(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief bitwise XOR
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise xor of both input arguments
+ *
+ * This function will calculate the bitwise xor.
+ * result = _a ^ _b
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_xor(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief bitwise inverse
+ *
+ * @param[in] _a first argument
+ *
+ * @return bitwise inverse of both input arguments
+ *
+ * This function will calculate the bitwise inverse.
+ * result = ~_a
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_inv(
+ const tvector1w _a);
+
+/* Additive */
+
+/** @brief addition
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return sum of both input arguments
+ *
+ * This function will calculate the sum of the input arguments.
+ * in case of overflow it will wrap around.
+ * result = _a + _b
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_add(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief subtraction
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _b subtracted from _a.
+ *
+ * This function will subtract _b from _a.
+ * in case of overflow it will wrap around.
+ * result = _a - _b
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_sub(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief saturated addition
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated sum of both input arguments
+ *
+ * This function will calculate the sum of the input arguments.
+ * in case of overflow it will saturate.
+ * result = CLIP(_a + _b, MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_addsat(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief saturated subtraction
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated subtraction of both input arguments
+ *
+ * This function will subtract _b from _a.
+ * in case of overflow it will saturate.
+ * result = CLIP(_a - _b, MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subsat(
+ const tvector1w _a,
+ const tvector1w _b);
+
+#ifdef ISP2401
+/** @brief Unsigned saturated subtraction
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated subtraction of both input arguments
+ *
+ * This function will subtract _b from _a.
+ * in case of overflow it will saturate.
+ * result = CLIP(_a - _b, 0, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_subsat_u(
+ const tvector1w_unsigned _a,
+ const tvector1w_unsigned _b);
+
+#endif
+/** @brief subtraction with shift right and rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (a - b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit with rounding.
+ * No overflow can occur.
+ * result = (_a - _b) >> 1
+ *
+ * Note: This function will be deprecated due to
+ * the naming confusion and it will be replaced
+ * by "OP_1w_subhalfrnd".
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subasr1(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Subtraction with shift right and rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a - _b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit with rounding.
+ * No overflow can occur.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalfrnd(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Subtraction with shift right and no rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a - _b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit without rounding (i.e. truncation).
+ * No overflow can occur.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subhalf(
+ const tvector1w _a,
+ const tvector1w _b);
+
+
+/** @brief saturated absolute value
+ *
+ * @param[in] _a input
+ *
+ * @return saturated absolute value of the input
+ *
+ * This function will calculate the saturated absolute value of the input.
+ * in case of overflow it will saturate.
+ * if (_a > 0) return _a;<br>
+ * else return CLIP(-_a, MIN_RANGE, MAX_RANGE);<br>
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_abs(
+ const tvector1w _a);
+
+/** @brief saturated absolute difference
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return sat(abs(a-b));
+ *
+ * This function will calculate the saturated absolute value
+ * of the saturated difference of both inputs.
+ * result = sat(abs(sat(_a - _b)));
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_subabssat(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/* Multiplicative */
+
+/** @brief doubling multiply
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return product of _a and _b
+ *
+ * This function will calculate the product
+ * of the input arguments and returns a double
+ * precision result.
+ * No overflow can occur.
+ * result = _a * _b;
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_muld(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief integer multiply
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return product of _a and _b
+ *
+ * This function will calculate the product
+ * of the input arguments and returns the LSB
+ * aligned single precision result.
+ * In case of overflow it will wrap around.
+ * result = _a * _b;
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mul(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief fractional saturating multiply
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated product of _a and _b
+ *
+ * This function will calculate the fixed point
+ * product of the input arguments
+ * and returns a single precision result.
+ * In case of overflow it will saturate.
+ * FP_UNITY * FP_UNITY => FP_UNITY.
+ * result = CLIP(_a * _b >> (NUM_BITS-1), MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qmul(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief fractional saturating multiply with rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return product of _a and _b
+ *
+ * This function will calculate the fixed point
+ * product of the input arguments
+ * and returns a single precision result.
+ * FP_UNITY * FP_UNITY => FP_UNITY.
+ * Depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ * result = CLIP(_a * _b >> (NUM_BITS-1), MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qrmul(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/* Comparative */
+
+/** @brief equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a == _b
+ *
+ * This function will return true if both inputs
+ * are equal, and false if not equal.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_eq(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief not equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a != _b
+ *
+ * This function will return false if both inputs
+ * are equal, and true if not equal.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ne(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief less or equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a <= _b
+ *
+ * This function will return true if _a is smaller
+ * or equal than _b.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_le(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief less then
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a < _b
+ *
+ * This function will return true if _a is smaller
+ * than _b.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_lt(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief greater or equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a >= _b
+ *
+ * This function will return true if _a is greater
+ * or equal than _b.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_ge(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief greater than
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a > _b
+ *
+ * This function will return true if _a is greater
+ * than _b.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tflags OP_1w_gt(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/* Shift */
+
+/** @brief aritmetic shift right
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * This function will shift _a with _b bits to the right,
+ * preserving the sign bit.
+ * It asserts 0 <= _b <= MAX_SHIFT_1W.
+ *
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asr(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief aritmetic shift right with rounding
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * If _b < NUM_BITS, this function will shift _a with _b bits to the right,
+ * preserving the sign bit, and depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ * If _b >= NUM_BITS, this function will return 0.
+ * It asserts 0 <= _b <= MAX_SHIFT_1W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asrrnd(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief saturating arithmetic shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * If _b < MAX_BITDEPTH, this function will shift _a with _b bits to the left,
+ * saturating at MIN_RANGE/MAX_RANGE in case of overflow.
+ * If _b >= MAX_BITDEPTH, this function will return MIN_RANGE if _a < 0,
+ * MAX_RANGE if _a > 0, 0 if _a == 0.
+ * (with MAX_BITDEPTH=64)
+ * It asserts 0 <= _b <= MAX_SHIFT_1W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_asl(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief saturating aritmetic shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * This function is identical to OP_1w_asl( )
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_aslsat(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief logical shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * This function will shift _a with _b bits to the left.
+ * It will insert zeroes on the right.
+ * It asserts 0 <= _b <= MAX_SHIFT_1W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsl(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief logical shift right
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * This function will shift _a with _b bits to the right.
+ * It will insert zeroes on the left.
+ * It asserts 0 <= _b <= MAX_SHIFT_1W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lsr(
+ const tvector1w _a,
+ const tvector1w _b);
+
+#ifdef ISP2401
+/** @brief bidirectional saturating arithmetic shift
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << |_b| if _b is positive
+ * _a >> |_b| if _b is negative
+ *
+ * If _b > 0, this function will shift _a with _b bits to the left,
+ * saturating at MIN_RANGE/MAX_RANGE in case of overflow.
+ * if _b < 0, this function will shift _a with _b bits to the right.
+ * It asserts -MAX_SHIFT_1W <= _b <= MAX_SHIFT_1W.
+ * If _b = 0, it returns _a.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift_sat(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief bidirectional non-saturating arithmetic shift
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << |_b| if _b is positive
+ * _a >> |_b| if _b is negative
+ *
+ * If _b > 0, this function will shift _a with _b bits to the left,
+ * no saturation is performed in case of overflow.
+ * if _b < 0, this function will shift _a with _b bits to the right.
+ * It asserts -MAX_SHIFT_1W <= _b <= MAX_SHIFT_1W.
+ * If _b = 0, it returns _a.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_ashift(
+ const tvector1w _a,
+ const tvector1w _b);
+
+
+/** @brief bidirectional logical shift
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << |_b| if _b is positive
+ * _a >> |_b| if _b is negative
+ *
+ * This function will shift _a with _b bits to the left if _b is positive.
+ * This function will shift _a with _b bits to the right if _b is negative.
+ * It asserts -MAX_SHIFT_1W <= _b <= MAX_SHIFT_1W.
+ * It inserts zeros on the left or right depending on the shift direction:
+ * right or left.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_lshift(
+ const tvector1w _a,
+ const tvector1w _b);
+
+#endif
+/* Cast */
+
+/** @brief Cast from int to 1w
+ *
+ * @param[in] _a input
+ *
+ * @return _a
+ *
+ * This function casts the input from integer type to
+ * single precision. It asserts there is no overflow.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_int_cast_to_1w(
+ const int _a);
+
+/** @brief Cast from 1w to int
+ *
+ * @param[in] _a input
+ *
+ * @return _a
+ *
+ * This function casts the input from single precision type to
+ * integer, preserving value and sign.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H int OP_1w_cast_to_int(
+ const tvector1w _a);
+
+/** @brief Cast from 1w to 2w
+ *
+ * @param[in] _a input
+ *
+ * @return _a
+ *
+ * This function casts the input from single precision type to
+ * double precision, preserving value and sign.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector2w OP_1w_cast_to_2w(
+ const tvector1w _a);
+
+/** @brief Cast from 2w to 1w
+ *
+ * @param[in] _a input
+ *
+ * @return _a
+ *
+ * This function casts the input from double precision type to
+ * single precision. In case of overflow it will wrap around.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_cast_to_1w(
+ const tvector2w _a);
+
+
+/** @brief Cast from 2w to 1w with saturation
+ *
+ * @param[in] _a input
+ *
+ * @return _a
+ *
+ * This function casts the input from double precision type to
+ * single precision after saturating it to the range of single
+ * precision.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_2w_sat_cast_to_1w(
+ const tvector2w _a);
+
+/* clipping */
+
+/** @brief Clip asymmetrical
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a clipped between ~_b and b
+ *
+ * This function will clip the first argument between
+ * (-_b - 1) and _b.
+ * It asserts _b >= 0.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clip_asym(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Clip zero
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a clipped beteween 0 and _b
+ *
+ * This function will clip the first argument between
+ * zero and _b.
+ * It asserts _b >= 0.
+ *
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_clipz(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/* division */
+
+/** @brief Truncated division
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return trunc( _a / _b )
+ *
+ * This function will divide the first argument by
+ * the second argument, with rounding toward 0.
+ * If _b == 0 and _a < 0, the function will return MIN_RANGE.
+ * If _b == 0 and _a == 0, the function will return 0.
+ * If _b == 0 and _a > 0, the function will return MAX_RANGE.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_div(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Fractional saturating divide
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a / _b
+ *
+ * This function will perform fixed point division of
+ * the first argument by the second argument, with rounding toward 0.
+ * In case of overflow it will saturate.
+ * If _b == 0 and _a < 0, the function will return MIN_RANGE.
+ * If _b == 0 and _a == 0, the function will return 0.
+ * If _b == 0 and _a > 0, the function will return MAX_RANGE.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_qdiv(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Modulo
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a % _b
+ *
+ * This function will return the remainder r = _a - _b * trunc( _a / _b ),
+ * Note that the sign of the remainder is always equal to the sign of _a.
+ * If _b == 0 the function will return _a.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mod(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Unsigned integer Square root
+ *
+ * @param[in] _a input
+ *
+ * @return Integer square root of _a
+ *
+ * This function will calculate the Integer square root of _a
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w_unsigned OP_1w_sqrt_u(
+ const tvector1w_unsigned _a);
+
+/* Miscellaneous */
+
+/** @brief Multiplexer
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ * @param[in] _c condition
+ *
+ * @return _c ? _a : _b
+ *
+ * This function will return _a if the condition _c
+ * is true and _b otherwise.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_mux(
+ const tvector1w _a,
+ const tvector1w _b,
+ const tflags _c);
+
+/** @brief Average without rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a + _b) >> 1
+ *
+ * This function will add _a and _b, and right shift
+ * the result by one without rounding. No overflow
+ * will occur because addition is performed in the
+ * proper precision.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avg(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Average with rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a + _b) >> 1
+ *
+ * This function will add _a and _b at full precision,
+ * and right shift with rounding the result with 1 bit.
+ * Depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_avgrnd(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Minimum
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a < _b) ? _a : _b;
+ *
+ * This function will return the smallest of both
+ * input arguments.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_min(
+ const tvector1w _a,
+ const tvector1w _b);
+
+/** @brief Maximum
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a > _b) ? _a : _b;
+ *
+ * This function will return the largest of both
+ * input arguments.
+ */
+STORAGE_CLASS_ISP_OP1W_FUNC_H tvector1w OP_1w_max(
+ const tvector1w _a,
+ const tvector1w _b);
+
+#ifndef INLINE_ISP_OP1W
+#define STORAGE_CLASS_ISP_OP1W_FUNC_C
+#define STORAGE_CLASS_ISP_OP1W_DATA_C const
+#else /* INLINE_ISP_OP1W */
+#define STORAGE_CLASS_ISP_OP1W_FUNC_C STORAGE_CLASS_ISP_OP1W_FUNC_H
+#define STORAGE_CLASS_ISP_OP1W_DATA_C STORAGE_CLASS_ISP_OP1W_DATA_H
+#include "isp_op1w.c"
+#define ISP_OP1W_INLINED
+#endif /* INLINE_ISP_OP1W */
+
+#endif /* __ISP_OP1W_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w_types.h
new file mode 100644
index 000000000000..c81e587509a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w_types.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_OP1W_TYPES_H_INCLUDED__
+#define __ISP_OP1W_TYPES_H_INCLUDED__
+
+/*
+ * This file is part of the Multi-precision vector operations exstension package.
+ */
+
+/*
+ * Single-precision vector operations
+ */
+
+/*
+ * Prerequisites:
+ *
+ */
+
+#include "mpmath.h"
+
+/*
+ * Single-precision data type specification
+ */
+
+
+typedef mpsdata_t tvector1w;
+typedef mpsdata_t tscalar1w;
+typedef spsdata_t tflags;
+typedef mpudata_t tvector1w_unsigned;
+typedef mpsdata_t tscalar1w_weight;
+typedef mpsdata_t tvector1w_signed_positive;
+typedef mpsdata_t tvector1w_weight;
+#ifdef ISP2401
+typedef bool tscalar_bool;
+#endif
+
+typedef struct {
+ tvector1w d;
+ tflags f;
+} tvector1w_tflags1w;
+
+#endif /* __ISP_OP1W_TYPES_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
new file mode 100644
index 000000000000..1cfe6d717283
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
@@ -0,0 +1,675 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_OP2W_H_INCLUDED__
+#define __ISP_OP2W_H_INCLUDED__
+
+/*
+ * This file is part of the Multi-precision vector operations exstension package.
+ */
+
+/*
+ * Double-precision vector operations
+ */
+
+/*
+ * Prerequisites:
+ *
+ */
+#include "storage_class.h"
+
+#ifdef INLINE_ISP_OP2W
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_INLINE_DATA
+#else /* INLINE_ISP_OP2W */
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#endif /* INLINE_ISP_OP2W */
+
+/*
+ * Double-precision data type specification
+ */
+
+#include "isp_op2w_types.h"
+
+/*
+ * Double-precision prototype specification
+ */
+
+/* Arithmetic */
+
+/** @brief bitwise AND
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise and of both input arguments
+ *
+ * This function will calculate the bitwise and.
+ * result = _a & _b
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_and(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief bitwise OR
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise or of both input arguments
+ *
+ * This function will calculate the bitwise or.
+ * result = _a | _b
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_or(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief bitwise XOR
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return bitwise xor of both input arguments
+ *
+ * This function will calculate the bitwise xor.
+ * result = _a ^ _b
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_xor(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief bitwise inverse
+ *
+ * @param[in] _a first argument
+ *
+ * @return bitwise inverse of both input arguments
+ *
+ * This function will calculate the bitwise inverse.
+ * result = ~_a
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_inv(
+ const tvector2w _a);
+
+/* Additive */
+
+/** @brief addition
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return sum of both input arguments
+ *
+ * This function will calculate the sum of the input arguments.
+ * in case of overflow it will wrap around.
+ * result = _a + _b
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_add(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief subtraction
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _b subtracted from _a.
+ *
+ * This function will subtract _b from _a.
+ * in case of overflow it will wrap around.
+ * result = _a - _b
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_sub(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief saturated addition
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated sum of both input arguments
+ *
+ * This function will calculate the sum of the input arguments.
+ * in case of overflow it will saturate
+ * result = CLIP(_a + _b, MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_addsat(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief saturated subtraction
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated subtraction of both input arguments
+ *
+ * This function will subtract _b from _a.
+ * in case of overflow it will saturate
+ * result = CLIP(_a - _b, MIN_RANGE, MAX_RANGE);
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subsat(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief subtraction with shift right and rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (a - b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit with rounding.
+ * No overflow can occur.
+ * result = (_a - _b) >> 1
+ *
+ * Note: This function will be deprecated due to
+ * the naming confusion and it will be replaced
+ * by "OP_2w_subhalfrnd".
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subasr1(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Subtraction with shift right and rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a - _b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit with rounding.
+ * No overflow can occur.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalfrnd(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Subtraction with shift right and no rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a - _b) >> 1
+ *
+ * This function subtracts _b from _a and right shifts
+ * the result by 1 bit without rounding (i.e. truncation).
+ * No overflow can occur.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subhalf(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief saturated absolute value
+ *
+ * @param[in] _a input
+ *
+ * @return saturated absolute value of the input
+ *
+ * This function will calculate the saturated absolute value of the input.
+ * In case of overflow it will saturate.
+ * if (_a > 0) return _a;<br>
+ * else return CLIP(-_a, MIN_RANGE, MAX_RANGE);<br>
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_abs(
+ const tvector2w _a);
+
+/** @brief saturated absolute difference
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return sat(abs(sat(a-b)));
+ *
+ * This function will calculate the saturated absolute value
+ * of the saturated difference of both inputs.
+ * result = sat(abs(sat(_a - _b)));
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_subabssat(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/* Multiplicative */
+
+/** @brief integer multiply
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return product of _a and _b
+ *
+ * This function will calculate the product
+ * of the input arguments and returns the LSB
+ * aligned double precision result.
+ * In case of overflow it will wrap around.
+ * result = _a * _b;
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mul(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief fractional saturating multiply
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return saturated product of _a and _b
+ *
+ * This function will calculate the fixed point
+ * product of the input arguments
+ * and returns a double precision result.
+ * In case of overflow it will saturate.
+ * result =((_a * _b) << 1) >> (2*NUM_BITS);
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qmul(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief fractional saturating multiply with rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return product of _a and _b
+ *
+ * This function will calculate the fixed point
+ * product of the input arguments
+ * and returns a double precision result.
+ * Depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ * In case of overflow it will saturate.
+ * result = ((_a * _b) << 1) >> (2*NUM_BITS);
+ */
+
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_qrmul(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/* Comparative */
+
+/** @brief equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a == _b
+ *
+ * This function will return true if both inputs
+ * are equal, and false if not equal.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_eq(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief not equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a != _b
+ *
+ * This function will return false if both inputs
+ * are equal, and true if not equal.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ne(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief less or equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a <= _b
+ *
+ * This function will return true if _a is smaller
+ * or equal than _b.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_le(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief less then
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a < _b
+ *
+ * This function will return true if _a is smaller
+ * than _b.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_lt(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief greater or equal
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a >= _b
+ *
+ * This function will return true if _a is greater
+ * or equal than _b.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_ge(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief greater than
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a > _b
+ *
+ * This function will return true if _a is greater
+ * than _b.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tflags OP_2w_gt(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/* Shift */
+
+/** @brief aritmetic shift right
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * This function will shift _a with _b bits to the right,
+ * preserving the sign bit.
+ * It asserts 0 <= _b <= MAX_SHIFT_2W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asr(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief aritmetic shift right with rounding
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * If _b < 2*NUM_BITS, this function will shift _a with _b bits to the right,
+ * preserving the sign bit, and depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ * If _b >= 2*NUM_BITS, this function will return 0.
+ * It asserts 0 <= _b <= MAX_SHIFT_2W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asrrnd(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief saturating aritmetic shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * If _b < MAX_BITDEPTH, this function will shift _a with _b bits to the left,
+ * saturating at MIN_RANGE/MAX_RANGE in case of overflow.
+ * If _b >= MAX_BITDEPTH, this function will return MIN_RANGE if _a < 0,
+ * MAX_RANGE if _a > 0, 0 if _a == 0.
+ * (with MAX_BITDEPTH=64)
+ * It asserts 0 <= _b <= MAX_SHIFT_2W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_asl(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief saturating aritmetic shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * This function is identical to OP_2w_asl( )
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_aslsat(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief logical shift left
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a << _b
+ *
+ * This function will shift _a with _b bits to the left.
+ * It will insert zeroes on the right.
+ * It asserts 0 <= _b <= MAX_SHIFT_2W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsl(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief logical shift right
+ *
+ * @param[in] _a input
+ * @param[in] _b shift amount
+ *
+ * @return _a >> _b
+ *
+ * This function will shift _a with _b bits to the right.
+ * It will insert zeroes on the left.
+ * It asserts 0 <= _b <= MAX_SHIFT_2W.
+ * The operation count for this function assumes that
+ * the shift amount is a cloned scalar input.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_lsr(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/* clipping */
+
+/** @brief Clip asymmetrical
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a clipped between ~_b and b
+ *
+ * This function will clip the first argument between
+ * (-_b - 1) and _b.
+ * It asserts _b >= 0.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clip_asym(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Clip zero
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return _a clipped beteween 0 and _b
+ *
+ * This function will clip the first argument between
+ * zero and _b.
+ * It asserts _b >= 0.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_clipz(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/* division */
+
+/** @brief Truncated division
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return trunc( _a / _b )
+ *
+ * This function will divide the first argument by
+ * the second argument, with rounding toward 0.
+ * If _b == 0 and _a < 0, the function will return MIN_RANGE.
+ * If _b == 0 and _a == 0, the function will return 0.
+ * If _b == 0 and _a > 0, the function will return MAX_RANGE.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_div(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Saturating truncated division
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return CLIP( trunc( _a / _b ), MIN_RANGE1w, MAX_RANGE1w )
+ *
+ * This function will divide the first argument by
+ * the second argument, with rounding toward 0, and
+ * saturate the result to the range of single precision.
+ * If _b == 0 and _a < 0, the function will return MIN_RANGE.
+ * If _b == 0 and _a == 0, the function will return 0.
+ * If _b == 0 and _a > 0, the function will return MAX_RANGE.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w OP_2w_divh(
+ const tvector2w _a,
+ const tvector1w _b);
+
+/** @brief Modulo
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return n/a
+ *
+ * This function has not yet been implemented.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mod(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Unsigned Integer Square root
+ *
+ * @param[in] _a input
+ *
+ * @return square root of _a
+ *
+ * This function will calculate the unsigned integer square root of _a
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector1w_unsigned OP_2w_sqrt_u(
+ const tvector2w_unsigned _a);
+
+/* Miscellaneous */
+
+/** @brief Multiplexer
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ * @param[in] _c condition
+ *
+ * @return _c ? _a : _b
+ *
+ * This function will return _a if the condition _c
+ * is true and _b otherwise.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_mux(
+ const tvector2w _a,
+ const tvector2w _b,
+ const tflags _c);
+
+/** @brief Average without rounding
+ *
+ * @param[in] _a first operand
+ * @param[in] _b second operand
+ *
+ * @return (_a + _b) >> 1
+ *
+ * This function will add _a and _b, and right shift
+ * the result by one without rounding. No overflow
+ * will occur because addition is performed in the
+ * proper precision.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avg(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Average with rounding
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a + _b) >> 1
+ *
+ * This function will add _a and _b at full precision,
+ * and right shift with rounding the result with 1 bit.
+ * Depending on the rounding mode of the core
+ * it will round to nearest or to nearest even.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_avgrnd(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Minimum
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a < _b) ? _a : _b;
+ *
+ * This function will return the smallest of both
+ * input arguments.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_min(
+ const tvector2w _a,
+ const tvector2w _b);
+
+/** @brief Maximum
+ *
+ * @param[in] _a first argument
+ * @param[in] _b second argument
+ *
+ * @return (_a > _b) ? _a : _b;
+ *
+ * This function will return the largest of both
+ * input arguments.
+ */
+STORAGE_CLASS_ISP_OP2W_FUNC_H tvector2w OP_2w_max(
+ const tvector2w _a,
+ const tvector2w _b);
+
+#ifndef INLINE_ISP_OP2W
+#define STORAGE_CLASS_ISP_OP2W_FUNC_C
+#define STORAGE_CLASS_ISP_OP2W_DATA_C const
+#else /* INLINE_ISP_OP2W */
+#define STORAGE_CLASS_ISP_OP2W_FUNC_C STORAGE_CLASS_ISP_OP2W_FUNC_H
+#define STORAGE_CLASS_ISP_OP2W_DATA_C STORAGE_CLASS_ISP_OP2W_DATA_H
+#include "isp_op2w.c"
+#define ISP_OP2W_INLINED
+#endif /* INLINE_ISP_OP2W */
+
+#endif /* __ISP_OP2W_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w_types.h
new file mode 100644
index 000000000000..7e86083a8a33
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w_types.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_OP2W_TYPES_H_INCLUDED__
+#define __ISP_OP2W_TYPES_H_INCLUDED__
+
+/*
+ * This file is part of the Multi-precision vector operations exstension package.
+ */
+
+/*
+ * Double-precision vector operations
+ */
+
+/*
+ * Prerequisites:
+ *
+ */
+#include "mpmath.h"
+#include "isp_op1w_types.h"
+
+/*
+ * Single-precision data type specification
+ */
+
+
+typedef mpsdata_t tvector2w;
+typedef mpsdata_t tscalar2w;
+typedef mpsdata_t tvector2w_signed_positive;
+typedef mpudata_t tvector2w_unsigned;
+
+
+typedef struct {
+ tvector2w d;
+ tflags f;
+} tvector2w_tflags;
+
+#endif /* __ISP_OP2W_TYPES_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op_count.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op_count.h
new file mode 100644
index 000000000000..8e7b48d026b0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op_count.h
@@ -0,0 +1,226 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_OP_COUNT_H_INCLUDED__
+#define __ISP_OP_COUNT_H_INCLUDED__
+
+#include <stdio.h>
+
+typedef struct {
+ long long bbb_cnt; /* number of bbb */
+ int bbb_op; /* operations per bbb */
+ long long total_cnt; /* bbb_cnt * bbb_op */
+} bbb_stat_t;
+
+typedef enum {
+ bbb_func_OP_1w_and,
+ bbb_func_OP_1w_or,
+ bbb_func_OP_1w_xor,
+ bbb_func_OP_1w_inv,
+ bbb_func_OP_1w_add,
+ bbb_func_OP_1w_sub,
+ bbb_func_OP_1w_addsat,
+ bbb_func_OP_1w_subsat,
+ bbb_func_OP_1w_subasr1,
+ bbb_func_OP_1w_subhalf,
+ bbb_func_OP_1w_subhalfrnd,
+ bbb_func_OP_1w_abs,
+ bbb_func_OP_1w_subabssat,
+#ifdef ISP2401
+ bbb_func_OP_1w_subsat_u,
+#endif
+ bbb_func_OP_1w_muld,
+ bbb_func_OP_1w_mul,
+ bbb_func_OP_1w_qmul,
+ bbb_func_OP_1w_qrmul,
+ bbb_func_OP_1w_eq,
+ bbb_func_OP_1w_ne,
+ bbb_func_OP_1w_le,
+ bbb_func_OP_1w_lt,
+ bbb_func_OP_1w_ge,
+ bbb_func_OP_1w_gt,
+ bbb_func_OP_1w_asr,
+ bbb_func_OP_1w_asrrnd,
+ bbb_func_OP_1w_asl,
+ bbb_func_OP_1w_aslsat,
+ bbb_func_OP_1w_lsl,
+ bbb_func_OP_1w_lsr,
+#ifdef ISP2401
+ bbb_func_OP_1w_ashift,
+ bbb_func_OP_1w_lshift,
+#endif
+ bbb_func_OP_int_cast_to_1w ,
+ bbb_func_OP_1w_cast_to_int ,
+ bbb_func_OP_1w_cast_to_2w ,
+ bbb_func_OP_2w_cast_to_1w ,
+ bbb_func_OP_2w_sat_cast_to_1w ,
+ bbb_func_OP_1w_clip_asym,
+ bbb_func_OP_1w_clipz,
+ bbb_func_OP_1w_div,
+ bbb_func_OP_1w_qdiv,
+ bbb_func_OP_1w_mod,
+ bbb_func_OP_1w_sqrt_u,
+ bbb_func_OP_1w_mux,
+ bbb_func_OP_1w_avg,
+ bbb_func_OP_1w_avgrnd,
+ bbb_func_OP_1w_min,
+ bbb_func_OP_1w_max,
+ bbb_func_OP_2w_and,
+ bbb_func_OP_2w_or,
+ bbb_func_OP_2w_xor,
+ bbb_func_OP_2w_inv,
+ bbb_func_OP_2w_add,
+ bbb_func_OP_2w_sub,
+ bbb_func_OP_2w_addsat,
+ bbb_func_OP_2w_subsat,
+ bbb_func_OP_2w_subasr1,
+ bbb_func_OP_2w_subhalf,
+ bbb_func_OP_2w_subhalfrnd,
+ bbb_func_OP_2w_abs,
+ bbb_func_OP_2w_subabssat,
+ bbb_func_OP_2w_mul,
+ bbb_func_OP_2w_qmul,
+ bbb_func_OP_2w_qrmul,
+ bbb_func_OP_2w_eq,
+ bbb_func_OP_2w_ne,
+ bbb_func_OP_2w_le,
+ bbb_func_OP_2w_lt,
+ bbb_func_OP_2w_ge,
+ bbb_func_OP_2w_gt,
+ bbb_func_OP_2w_asr,
+ bbb_func_OP_2w_asrrnd,
+ bbb_func_OP_2w_asl,
+ bbb_func_OP_2w_aslsat,
+ bbb_func_OP_2w_lsl,
+ bbb_func_OP_2w_lsr,
+ bbb_func_OP_2w_clip_asym,
+ bbb_func_OP_2w_clipz,
+ bbb_func_OP_2w_div,
+ bbb_func_OP_2w_divh,
+ bbb_func_OP_2w_mod,
+ bbb_func_OP_2w_sqrt_u,
+ bbb_func_OP_2w_mux,
+ bbb_func_OP_2w_avg,
+ bbb_func_OP_2w_avgrnd,
+ bbb_func_OP_2w_min,
+ bbb_func_OP_2w_max,
+ bbb_func_OP_1w_mul_realigning,
+#ifdef ISP2401
+ bbb_func_OP_1w_imax32,
+ bbb_func_OP_1w_imaxidx32,
+ bbb_func_OP_1w_cond_add,
+#endif
+
+ bbb_func_num_functions
+} bbb_functions_t;
+
+typedef enum {
+ core_func_OP_and,
+ core_func_OP_or,
+ core_func_OP_xor,
+ core_func_OP_inv,
+ core_func_OP_add,
+ core_func_OP_sub,
+ core_func_OP_addsat,
+ core_func_OP_subsat,
+ core_func_OP_subasr1,
+ core_func_OP_abs,
+ core_func_OP_subabssat,
+#ifdef ISP2401
+ core_func_OP_subsat_u,
+#endif
+ core_func_OP_muld,
+ core_func_OP_mul,
+ core_func_OP_qrmul,
+ core_func_OP_eq,
+ core_func_OP_ne,
+ core_func_OP_le,
+ core_func_OP_lt,
+ core_func_OP_ge,
+ core_func_OP_gt,
+ core_func_OP_asr,
+ core_func_OP_asl,
+ core_func_OP_asrrnd,
+ core_func_OP_lsl,
+ core_func_OP_lslsat,
+ core_func_OP_lsr,
+ core_func_OP_lsrrnd,
+ core_func_OP_clip_asym,
+ core_func_OP_clipz,
+ core_func_OP_div,
+ core_func_OP_mod,
+ core_func_OP_sqrt,
+ core_func_OP_mux,
+ core_func_OP_avgrnd,
+ core_func_OP_min,
+ core_func_OP_max,
+
+ core_func_num_functions
+
+} core_functions_t;
+
+/* inc_bbb_count() can be used for building blocks that are implemented with one operation
+ inc_bbb_count_ext() will be used in case the operation count is not known or greater than one.
+
+ For some operations there is a difference in operation count for the cloned version and the
+ not cloned version. this difference is not vissible on the reference code side.
+ We could add a min and max operation count for those operations, and keep track of those counts
+ separately. That way in the report the impact can be seen. */
+
+#ifdef DISABLE_OPCNT
+#define inc_bbb_count(func)
+#define inc_bbb_count_ext(func, cnt)
+#define enable_bbb_count()
+#define disable_bbb_count()
+#else
+#define inc_bbb_count(func) _inc_bbb_count(func)
+#define inc_bbb_count_ext(func, cnt) _inc_bbb_count_ext(func, cnt)
+#define enable_bbb_count() _enable_bbb_count()
+#define disable_bbb_count() _disable_bbb_count()
+#endif
+
+void
+inc_core_count_n(
+ core_functions_t func,
+ unsigned n);
+
+void
+_enable_bbb_count(void);
+
+void
+_disable_bbb_count(void);
+
+void
+_inc_bbb_count(
+ bbb_functions_t func);
+
+void
+_inc_bbb_count_ext(
+ bbb_functions_t func,
+ int op_count);
+
+void
+bbb_func_reset_count(void);
+
+void
+bbb_func_print_totals(
+ FILE * fp,
+ unsigned non_zero_only);
+
+void
+core_func_print_totals(
+ FILE* fp,
+ unsigned non_zero_only);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h
new file mode 100644
index 000000000000..808ec050efc0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_public.h
@@ -0,0 +1,186 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_PUBLIC_H_INCLUDED__
+#define __ISP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+/*! Enable or disable the program complete irq signal of ISP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(ISP[ID].irq) else disable(ISP[ID].irq)
+ */
+extern void cnd_isp_irq_enable(
+ const isp_ID_t ID,
+ const bool cnd);
+
+/*! Read the state of cell ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param state[out] isp state structure
+ \param stall[out] isp stall conditions
+
+ \return none, state = ISP[ID].state, stall = ISP[ID].stall
+ */
+extern void isp_get_state(
+ const isp_ID_t ID,
+ isp_state_t *state,
+ isp_stall_t *stall);
+
+
+/*! Write to the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, ISP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_store(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return ISP[ID].sc[reg]
+ */
+STORAGE_CLASS_ISP_H hrt_data isp_ctrl_load(
+ const isp_ID_t ID,
+ const unsigned int reg);
+
+/*! Get the status of a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (ISP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_ISP_H bool isp_ctrl_getbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_setbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, ISP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_ISP_H void isp_ctrl_clearbit(
+ const isp_ID_t ID,
+ const unsigned int reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_load(
+ const isp_ID_t ID,
+ const unsigned int addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 32-bit datum to the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, ISP[ID].dmem[addr] = data
+ */
+STORAGE_CLASS_ISP_H void isp_dmem_store_uint32(
+ const isp_ID_t ID,
+ unsigned int addr,
+ const uint32_t data);
+
+/*! Load a 32-bit datum from the DMEM of ISP[ID]
+
+ \param ID[in] ISP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = ISP[ID].dmem[addr]
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_dmem_load_uint32(
+ const isp_ID_t ID,
+ const unsigned int addr);
+
+/*! Concatenate the LSW and MSW into a double precision word
+
+ \param x0[in] Integer containing the LSW
+ \param x1[in] Integer containing the MSW
+
+ \return x0 | (x1 << bits_per_vector_element)
+ */
+STORAGE_CLASS_ISP_H uint32_t isp_2w_cat_1w(
+ const uint16_t x0,
+ const uint16_t x1);
+
+unsigned isp_is_ready(isp_ID_t ID);
+
+unsigned isp_is_sleeping(isp_ID_t ID);
+
+void isp_start(isp_ID_t ID);
+
+void isp_wake(isp_ID_t ID);
+
+#endif /* __ISP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_dma_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_dma_public.h
new file mode 100644
index 000000000000..4b1603895f06
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_dma_public.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
+#define __ISYS_DMA_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "system_types.h"
+#include "type_support.h"
+
+STORAGE_CLASS_ISYS2401_DMA_H void isys2401_dma_reg_store(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg,
+ const hrt_data value);
+
+STORAGE_CLASS_ISYS2401_DMA_H hrt_data isys2401_dma_reg_load(
+ const isys2401_dma_ID_t dma_id,
+ const unsigned int reg);
+
+extern void isys2401_dma_set_max_burst_size(
+ const isys2401_dma_ID_t dma_id,
+ uint32_t max_burst_size);
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+
+#endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_irq_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_irq_public.h
new file mode 100644
index 000000000000..c3e6f76f8c4d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_irq_public.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_IRQ_PUBLIC_H__
+#define __ISYS_IRQ_PUBLIC_H__
+
+#include "isys_irq_global.h"
+#include "isys_irq_local.h"
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_get(
+ const isys_irq_ID_t isys_irqc_id,
+ isys_irqc_state_t *state);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_state_dump(
+ const isys_irq_ID_t isys_irqc_id,
+ const isys_irqc_state_t *state);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_reg_store(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx,
+ const hrt_data value);
+
+STORAGE_CLASS_ISYS2401_IRQ_H hrt_data isys_irqc_reg_load(
+ const isys_irq_ID_t isys_irqc_id,
+ const unsigned int reg_idx);
+
+STORAGE_CLASS_ISYS2401_IRQ_H void isys_irqc_status_enable(
+ const isys_irq_ID_t isys_irqc_id);
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_public.h
new file mode 100644
index 000000000000..097dde852c8a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_public.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_PUBLIC_H_INCLUDED__
+#define __ISYS_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*! Read the state of INPUT_SYSTEM[ID]
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[out] pointer to input system state structure
+ \return none, state = INPUT_SYSTEM[ID].state
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H input_system_err_t input_system_get_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+/*! Dump the state of INPUT_SYSTEM[ID]
+ \param ID[in] INPUT_SYSTEM identifier
+ \param state[in] pointer to input system state structure
+ \return none
+ \depends on host supplied print function as part of ia_css_init()
+ */
+STORAGE_CLASS_INPUT_SYSTEM_H void input_system_dump_state(
+ const input_system_ID_t ID,
+ input_system_state_t *state);
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __ISYS_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
new file mode 100644
index 000000000000..5624cfcfa015
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isys_stream2mmio_public.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__
+
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the stream2mmio-controller state.
+ * Get the state of the stream2mmio-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+
+/**
+ * @brief Get the state of the stream2mmio-controller sidess.
+ * Get the state of the register set per buf-controller sidess.
+ *
+ * @param[in] id The global unique ID of the steeam2mmio controller.
+ * @param[in] sid_id The sid ID.
+ * @param[out] state Point to the sid state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_get_sid_state(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ stream2mmio_sid_state_t *state);
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] sid_id The SID in question.
+ * @param[in] reg_idx The offet address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_STREAM2MMIO_H hrt_data stream2mmio_reg_load(
+ const stream2mmio_ID_t ID,
+ const stream2mmio_sid_ID_t sid_id,
+ const uint32_t reg_idx);
+
+/**
+ * @brief Dump the SID processor state.
+ * Dump the state of the sid regiester-set.
+ *
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_print_sid_state(
+ stream2mmio_sid_state_t *state);
+/**
+ * @brief Dump the stream2mmio state.
+ * Dump the state of the ibuf-controller regiester-set.
+ *
+ * @param[in] id The global unique ID of the st2mmio
+ * @param[in] state Pointer to the register-state.
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_dump_state(
+ const stream2mmio_ID_t ID,
+ stream2mmio_state_t *state);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the stream2mmio-controller.
+ *
+ * @param[in] ID The global unique ID for the stream2mmio-controller instance.
+ * @param[in] reg The offet address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_STREAM2MMIO_H void stream2mmio_reg_store(
+ const stream2mmio_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/** end of DLI */
+
+#endif /* __ISYS_STREAM2MMIO_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
new file mode 100644
index 000000000000..4258fa872087
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
@@ -0,0 +1,82 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_PUBLIC_H_INCLUDED__
+#define __MMU_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! Set the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return none, MMU[ID].page_table_base_index = base_index
+ */
+STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
+ const mmu_ID_t ID,
+ const hrt_data base_index);
+
+/*! Get the page table base index of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param base_index[in] page table base index
+
+ \return MMU[ID].page_table_base_index
+ */
+STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
+ const mmu_ID_t ID);
+
+/*! Invalidate the page table cache of MMU[ID]
+
+ \param ID[in] MMU identifier
+
+ \return none
+ */
+STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
+ const mmu_ID_t ID);
+
+
+/*! Invalidate the page table cache of all MMUs
+
+ \return none
+ */
+STORAGE_CLASS_EXTERN void mmu_invalidate_cache_all(void);
+
+/*! Write to a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, MMU[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_MMU_H void mmu_reg_store(
+ const mmu_ID_t ID,
+ const unsigned int reg,
+ const hrt_data value);
+
+/*! Read from a control register of MMU[ID]
+
+ \param ID[in] MMU identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return MMU[ID].ctrl[reg]
+ */
+STORAGE_CLASS_MMU_H hrt_data mmu_reg_load(
+ const mmu_ID_t ID,
+ const unsigned int reg);
+
+#endif /* __MMU_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/osys_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/osys_public.h
new file mode 100644
index 000000000000..8695e3c01fa6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/osys_public.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __OSYS_PUBLIC_H_INCLUDED__
+#define __OSYS_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+#endif /* __OSYS_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pipeline_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pipeline_public.h
new file mode 100644
index 000000000000..32cea582b4c4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pipeline_public.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIPELINE_PUBLIC_H_INCLUDED__
+#define __PIPELINE_PUBLIC_H_INCLUDED__
+
+#endif /* __PIPELINE_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
new file mode 100644
index 000000000000..c0f3f3ea32d7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/pixelgen_public.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
+#define __PIXELGEN_PUBLIC_H_INCLUDED__
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/*****************************************************
+ *
+ * Native command interface (NCI).
+ *
+ *****************************************************/
+/**
+ * @brief Get the pixelgen state.
+ * Get the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[out] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_get_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/**
+ * @brief Dump the pixelgen state.
+ * Dump the state of the pixelgen regiester-set.
+ *
+ * @param[in] id The global unique ID of the pixelgen controller.
+ * @param[in] state Point to the register-state.
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_dump_state(
+ const pixelgen_ID_t ID,
+ pixelgen_ctrl_state_t *state);
+/** end of NCI */
+
+/*****************************************************
+ *
+ * Device level interface (DLI).
+ *
+ *****************************************************/
+/**
+ * @brief Load the register value.
+ * Load the value of the register of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen instance.
+ * @param[in] reg The offet address of the register.
+ *
+ * @return the value of the register.
+ */
+STORAGE_CLASS_PIXELGEN_H hrt_data pixelgen_ctrl_reg_load(
+ const pixelgen_ID_t ID,
+ const hrt_address reg);
+/**
+ * @brief Store a value to the register.
+ * Store a value to the registe of the pixelgen
+ *
+ * @param[in] ID The global unique ID for the pixelgen.
+ * @param[in] reg The offet address of the register.
+ * @param[in] value The value to be stored.
+ *
+ */
+STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
+ const pixelgen_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+/** end of DLI */
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+#endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
new file mode 100644
index 000000000000..3e955fca2a94
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
@@ -0,0 +1,1222 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _REF_VECTOR_FUNC_H_INCLUDED_
+#define _REF_VECTOR_FUNC_H_INCLUDED_
+
+#include "storage_class.h"
+
+#ifdef INLINE_VECTOR_FUNC
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_INLINE_DATA
+#else /* INLINE_VECTOR_FUNC */
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_EXTERN_DATA
+#endif /* INLINE_VECTOR_FUNC */
+
+
+#include "ref_vector_func_types.h"
+
+/** @brief Doubling multiply accumulate with saturation
+ *
+ * @param[in] acc accumulator
+ * @param[in] a multiply input
+ * @param[in] b multiply input
+ *
+ * @return acc + (a*b)
+ *
+ * This function will do a doubling multiply ont
+ * inputs a and b, and will add the result to acc.
+ * in case of an overflow of acc, it will saturate.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd_sat(
+ tvector2w acc,
+ tvector1w a,
+ tvector1w b );
+
+/** @brief Doubling multiply accumulate
+ *
+ * @param[in] acc accumulator
+ * @param[in] a multiply input
+ * @param[in] b multiply input
+ *
+ * @return acc + (a*b)
+ *
+ * This function will do a doubling multiply ont
+ * inputs a and b, and will add the result to acc.
+ * in case of overflow it will not saturate but wrap around.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector2w OP_1w_maccd(
+ tvector2w acc,
+ tvector1w a,
+ tvector1w b );
+
+/** @brief Re-aligning multiply
+ *
+ * @param[in] a multiply input
+ * @param[in] b multiply input
+ * @param[in] shift shift amount
+ *
+ * @return (a*b)>>shift
+ *
+ * This function will multiply a with b, followed by a right
+ * shift with rounding. the result is saturated and casted
+ * to single precision.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_mul_realigning(
+ tvector1w a,
+ tvector1w b,
+ tscalar1w shift );
+
+/** @brief Leading bit index
+ *
+ * @param[in] a input
+ *
+ * @return index of the leading bit of each element
+ *
+ * This function finds the index of leading one (set) bit of the
+ * input. The index starts with 0 for the LSB and can go upto
+ * ISP_VEC_ELEMBITS-1 for the MSB. For an input equal to zero,
+ * the returned index is -1.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_lod(
+ tvector1w a);
+
+/** @brief Config Unit Input Processing
+ *
+ * @param[in] a input
+ * @param[in] input_scale input scaling factor
+ * @param[in] input_offset input offset factor
+ *
+ * @return scaled & offset added input clamped to MAXVALUE
+ *
+ * As part of input processing for piecewise linear estimation config unit,
+ * this function will perform scaling followed by adding offset and
+ * then clamping to the MAX InputValue
+ * It asserts -MAX_SHIFT_1W <= input_scale <= MAX_SHIFT_1W, and
+ * -MAX_SHIFT_1W <= input_offset <= MAX_SHIFT_1W
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_input_scaling_offset_clamping(
+ tvector1w a,
+ tscalar1w_5bit_signed input_scale,
+ tscalar1w_5bit_signed input_offset);
+
+/** @brief Config Unit Output Processing
+ *
+ * @param[in] a output
+ * @param[in] output_scale output scaling factor
+ *
+ * @return scaled & clamped output value
+ *
+ * As part of output processing for piecewise linear estimation config unit,
+ * This function will perform scaling and then clamping to output
+ * MAX value.
+ * It asserts -MAX_SHIFT_1W <= output_scale <= MAX_SHIFT_1W
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_output_scaling_clamping(
+ tvector1w a,
+ tscalar1w_5bit_signed output_scale);
+
+/** @brief Config Unit Piecewiselinear estimation
+ *
+ * @param[in] a input
+ * @param[in] config_points config parameter structure
+ *
+ * @return piecewise linear estimated output
+ *
+ * Given a set of N points {(x1,y1),()x2,y2), ....,(xn,yn)}, to find
+ * the functional value at an arbitrary point around the input set,
+ * this function will perform input processing followed by piecewise
+ * linear estimation and then output processing to yield the final value.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_piecewise_estimation(
+ tvector1w a,
+ ref_config_points config_points);
+
+/** @brief Fast Config Unit
+ *
+ * @param[in] x input
+ * @param[in] init_vectors LUT data structure
+ *
+ * @return piecewise linear estimated output
+ * This block gets an input x and a set of input configuration points stored in a look-up
+ * table of 32 elements. First, the x input is clipped to be within the range [x1, xn+1].
+ * Then, it computes the interval in which the input lies. Finally, the output is computed
+ * by performing linear interpolation based on the interval properties (i.e. x_prev, slope,
+ * and offset). This block assumes that the points are equally spaced and that the interval
+ * size is a power of 2.
+ **/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_XCU(
+ tvector1w x,
+ xcu_ref_init_vectors init_vectors);
+
+
+/** @brief LXCU
+ *
+ * @param[in] x input
+ * @param[in] init_vectors LUT data structure
+ *
+ * @return logarithmic piecewise linear estimated output.
+ * This block gets an input x and a set of input configuration points stored in a look-up
+ * table of 32 elements. It computes the interval in which the input lies.
+ * Then output is computed by performing linear interpolation based on the interval
+ * properties (i.e. x_prev, slope, * and offset).
+ * This BBB assumes spacing x-coordinates of "init vectors" increase exponentially as
+ * shown below.
+ * interval size : 2^0 2^1 2^2 2^3
+ * x-coordinates: x0<--->x1<---->x2<---->x3<---->
+ **/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_LXCU(
+ tvector1w x,
+ xcu_ref_init_vectors init_vectors);
+
+/** @brief Coring
+ *
+ * @param[in] coring_vec Amount of coring based on brightness level
+ * @param[in] filt_input Vector of input pixels on which Coring is applied
+ * @param[in] m_CnrCoring0 Coring Level0
+ *
+ * @return vector of filtered pixels after coring is applied
+ *
+ * This function will perform adaptive coring based on brightness level to
+ * remove noise
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w coring(
+ tvector1w coring_vec,
+ tvector1w filt_input,
+ tscalar1w m_CnrCoring0 );
+
+/** @brief Normalised FIR with coefficients [3,4,1]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [3,4,1],
+ *-5dB at Fs/2, -90 degree phase shift (quarter pixel)
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_m90_nrm (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [1,4,3]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1,4,3],
+ *-5dB at Fs/2, +90 degree phase shift (quarter pixel)
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_5dB_p90_nrm (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [1,2,1]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1,2,1], -6dB at Fs/2
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [13,16,3]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [13,16,3],
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph0 (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [9,16,7]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [9,16,7],
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph1 (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [5,16,11]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [5,16,11],
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph2 (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with coefficients [1,16,15]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1,16,15],
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_ph3 (
+ const s_1w_1x3_matrix m);
+
+/** @brief Normalised FIR with programable phase shift
+ *
+ * @param[in] m 1x3 matrix with pixels
+ * @param[in] coeff phase shift
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [8-coeff,16,8+coeff],
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_6dB_nrm_calc_coeff (
+ const s_1w_1x3_matrix m, tscalar1w_3bit coeff);
+
+/** @brief 3 tap FIR with coefficients [1,1,1]
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * FIR with coefficients [1,1,1], -9dB at Fs/2 normalized with factor 1/2
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x3m_9dB_nrm (
+ const s_1w_1x3_matrix m);
+
+#ifdef ISP2401
+/** @brief symmetric 3 tap FIR acts as LPF or BSF
+ *
+ * @param[in] m 1x3 matrix with pixels
+ * @param[in] k filter coefficient shift
+ * @param[in] bsf_flag 1 for BSF and 0 for LPF
+ *
+ * @return filtered output
+ *
+ * This function performs variable coefficient symmetric 3 tap filter which can
+ * be either used as Low Pass Filter or Band Stop Filter.
+ * Symmetric 3tap tap filter with DC gain 1 has filter coefficients [a, 1-2a, a]
+ * For LPF 'a' can be approximated as (1 - 2^(-k))/4, k = 0, 1, 2, ...
+ * and filter output can be approximated as:
+ * out_LPF = ((v00 + v02) - ((v00 + v02) >> k) + (2 * (v01 + (v01 >> k)))) >> 2
+ * For BSF 'a' can be approximated as (1 + 2^(-k))/4, k = 0, 1, 2, ...
+ * and filter output can be approximated as:
+ * out_BSF = ((v00 + v02) + ((v00 + v02) >> k) + (2 * (v01 - (v01 >> k)))) >> 2
+ * For a given filter coefficient shift 'k' and bsf_flag this function
+ * behaves either as LPF or BSF.
+ * All computation is done using 1w arithmetic and implementation does not use
+ * any multiplication.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+sym_fir1x3m_lpf_bsf(s_1w_1x3_matrix m,
+ tscalar1w k,
+ tscalar_bool bsf_flag);
+#endif
+
+/** @brief Normalised 2D FIR with coefficients [1;2;1] * [1,2,1]
+ *
+ * @param[in] m 3x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;2;1] * [1,2,1]
+ * Unity gain filter through repeated scaling and rounding
+ * - 6 rotate operations per output
+ * - 8 vector operations per output
+ * _______
+ * 14 total operations
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_6dB_nrm (
+ const s_1w_3x3_matrix m);
+
+/** @brief Normalised 2D FIR with coefficients [1;1;1] * [1,1,1]
+ *
+ * @param[in] m 3x3 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;1;1] * [1,1,1]
+ *
+ * (near) Unity gain filter through repeated scaling and rounding
+ * - 6 rotate operations per output
+ * - 8 vector operations per output
+ * _______
+ * 14 operations
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir3x3m_9dB_nrm (
+ const s_1w_3x3_matrix m);
+
+/** @brief Normalised dual output 2D FIR with coefficients [1;2;1] * [1,2,1]
+ *
+ * @param[in] m 4x3 matrix with pixels
+ *
+ * @return two filtered outputs (2x1 matrix)
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;2;1] * [1,2,1]
+ * and produce two outputs (vertical)
+ * Unity gain filter through repeated scaling and rounding
+ * compute two outputs per call to re-use common intermediates
+ * - 4 rotate operations per output
+ * - 6 vector operations per output (alternative possible, but in this
+ * form it's not obvious to re-use variables)
+ * _______
+ * 10 total operations
+ */
+ STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_6dB_out2x1_nrm (
+ const s_1w_4x3_matrix m);
+
+/** @brief Normalised dual output 2D FIR with coefficients [1;1;1] * [1,1,1]
+ *
+ * @param[in] m 4x3 matrix with pixels
+ *
+ * @return two filtered outputs (2x1 matrix)
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;1;1] * [1,1,1]
+ * and produce two outputs (vertical)
+ * (near) Unity gain filter through repeated scaling and rounding
+ * compute two outputs per call to re-use common intermediates
+ * - 4 rotate operations per output
+ * - 7 vector operations per output (alternative possible, but in this
+ * form it's not obvious to re-use variables)
+ * _______
+ * 11 total operations
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H s_1w_2x1_matrix fir3x3m_9dB_out2x1_nrm (
+ const s_1w_4x3_matrix m);
+
+/** @brief Normalised 2D FIR 5x5
+ *
+ * @param[in] m 5x5 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;1;1] * [1;2;1] * [1,2,1] * [1,1,1]
+ * and produce a filtered output
+ * (near) Unity gain filter through repeated scaling and rounding
+ * - 20 rotate operations per output
+ * - 28 vector operations per output
+ * _______
+ * 48 total operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_15dB_nrm (
+ const s_1w_5x5_matrix m);
+
+/** @brief Normalised FIR 1x5
+ *
+ * @param[in] m 1x5 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1,2,1] * [1,1,1] = [1,4,6,4,1]
+ * and produce a filtered output
+ * (near) Unity gain filter through repeated scaling and rounding
+ * - 4 rotate operations per output
+ * - 5 vector operations per output
+ * _______
+ * 9 total operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_12dB_nrm (
+ const s_1w_1x5_matrix m);
+
+/** @brief Normalised 2D FIR 5x5
+ *
+ * @param[in] m 5x5 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will calculate the
+ * Normalised FIR with coefficients [1;2;1] * [1;2;1] * [1,2,1] * [1,2,1]
+ * and produce a filtered output
+ * (near) Unity gain filter through repeated scaling and rounding
+ * - 20 rotate operations per output
+ * - 30 vector operations per output
+ * _______
+ * 50 total operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir5x5m_12dB_nrm (
+ const s_1w_5x5_matrix m);
+
+/** @brief Approximate averaging FIR 1x5
+ *
+ * @param[in] m 1x5 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will produce filtered output by
+ * applying the filter coefficients (1/8) * [1,1,1,1,1]
+ * _______
+ * 5 vector operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x5m_box (
+ s_1w_1x5_matrix m);
+
+/** @brief Approximate averaging FIR 1x9
+ *
+ * @param[in] m 1x9 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will produce filtered output by
+ * applying the filter coefficients (1/16) * [1,1,1,1,1,1,1,1,1]
+ * _______
+ * 9 vector operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x9m_box (
+ s_1w_1x9_matrix m);
+
+/** @brief Approximate averaging FIR 1x11
+ *
+ * @param[in] m 1x11 matrix with pixels
+ *
+ * @return filtered output
+ *
+ * This function will produce filtered output by
+ * applying the filter coefficients (1/16) * [1,1,1,1,1,1,1,1,1,1,1]
+ * _______
+ * 12 vector operations
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w fir1x11m_box (
+ s_1w_1x11_matrix m);
+
+/** @brief Symmetric 7 tap filter with normalization
+ *
+ * @param[in] in 1x7 matrix with pixels
+ * @param[in] coeff 1x4 matrix with coefficients
+ * @param[in] out_shift output pixel shift value for normalization
+ *
+ * @return symmetric 7 tap filter output
+ *
+ * This function performs symmetric 7 tap filter over input pixels.
+ * Filter sum is normalized by shifting out_shift bits.
+ * Filter sum: p0*c3 + p1*c2 + p2*c1 + p3*c0 + p4*c1 + p5*c2 + p6*c3
+ * is implemented as: (p0 + p6)*c3 + (p1 + p5)*c2 + (p2 + p4)*c1 + p3*c0 to
+ * reduce multiplication.
+ * Input pixels should to be scaled, otherwise overflow is possible during
+ * addition
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x7m_sym_nrm(s_1w_1x7_matrix in,
+ s_1w_1x4_matrix coeff,
+ tvector1w out_shift);
+
+/** @brief Symmetric 7 tap filter with normalization at input side
+ *
+ * @param[in] in 1x7 matrix with pixels
+ * @param[in] coeff 1x4 matrix with coefficients
+ *
+ * @return symmetric 7 tap filter output
+ *
+ * This function performs symmetric 7 tap filter over input pixels.
+ * Filter sum: p0*c3 + p1*c2 + p2*c1 + p3*c0 + p4*c1 + p5*c2 + p6*c3
+ * = (p0 + p6)*c3 + (p1 + p5)*c2 + (p2 + p4)*c1 + p3*c0
+ * Input pixels and coefficients are in Qn format, where n =
+ * ISP_VEC_ELEMBITS - 1 (ie Q15 for Broxton)
+ * To avoid double precision arithmetic input pixel sum and final sum is
+ * implemented using avgrnd and coefficient multiplication using qrmul.
+ * Final result is in Qm format where m = ISP_VEC_ELEMBITS - 2 (ie Q14 for
+ * Broxton)
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x7m_sym_innrm_approx(s_1w_1x7_matrix in,
+ s_1w_1x4_matrix coeff);
+
+/** @brief Symmetric 7 tap filter with normalization at output side
+ *
+ * @param[in] in 1x7 matrix with pixels
+ * @param[in] coeff 1x4 matrix with coefficients
+ *
+ * @return symmetric 7 tap filter output
+ *
+ * This function performs symmetric 7 tap filter over input pixels.
+ * Filter sum: p0*c3 + p1*c2 + p2*c1 + p3*c0 + p4*c1 + p5*c2 + p6*c3
+ * = (p0 + p6)*c3 + (p1 + p5)*c2 + (p2 + p4)*c1 + p3*c0
+ * Input pixels are in Qn and coefficients are in Qm format, where n =
+ * ISP_VEC_ELEMBITS - 2 and m = ISP_VEC_ELEMBITS - 1 (ie Q14 and Q15
+ * respectively for Broxton)
+ * To avoid double precision arithmetic input pixel sum and final sum is
+ * implemented using addsat and coefficient multiplication using qrmul.
+ * Final sum is left shifted by 2 and saturated to produce result is Qm format
+ * (ie Q15 for Broxton)
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x7m_sym_outnrm_approx(s_1w_1x7_matrix in,
+ s_1w_1x4_matrix coeff);
+
+/** @brief 4 tap filter with normalization
+ *
+ * @param[in] in 1x4 matrix with pixels
+ * @param[in] coeff 1x4 matrix with coefficients
+ * @param[in] out_shift output pixel shift value for normalization
+ *
+ * @return 4 tap filter output
+ *
+ * This function performs 4 tap filter over input pixels.
+ * Filter sum is normalized by shifting out_shift bits.
+ * Filter sum: p0*c0 + p1*c1 + p2*c2 + p3*c3
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x4m_nrm(s_1w_1x4_matrix in,
+ s_1w_1x4_matrix coeff,
+ tvector1w out_shift);
+
+/** @brief 4 tap filter with normalization for half pixel interpolation
+ *
+ * @param[in] in 1x4 matrix with pixels
+ *
+ * @return 4 tap filter output with filter tap [-1 9 9 -1]/16
+ *
+ * This function performs 4 tap filter over input pixels.
+ * Filter sum: -p0 + 9*p1 + 9*p2 - p3
+ * This filter implementation is completely free from multiplication and double
+ * precision arithmetic.
+ * Typical usage of this filter is to half pixel interpolation of Bezier
+ * surface
+ * */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x4m_bicubic_bezier_half(s_1w_1x4_matrix in);
+
+/** @brief 4 tap filter with normalization for quarter pixel interpolation
+ *
+ * @param[in] in 1x4 matrix with pixels
+ * @param[in] coeff 1x4 matrix with coefficients
+ *
+ * @return 4 tap filter output
+ *
+ * This function performs 4 tap filter over input pixels.
+ * Filter sum: p0*c0 + p1*c1 + p2*c2 + p3*c3
+ * To avoid double precision arithmetic we implemented multiplication using
+ * qrmul and addition using avgrnd. Coefficients( c0 to c3) formats are assumed
+ * to be: Qm, Qn, Qo, Qm, where m = n + 2 and o = n + 1.
+ * Typical usage of this filter is to quarter pixel interpolation of Bezier
+ * surface with filter coefficients:[-9 111 29 -3]/128. For which coefficient
+ * values should be: [-9216/2^17 28416/2^15 1484/2^16 -3072/2^17] for
+ * ISP_VEC_ELEMBITS = 16.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x4m_bicubic_bezier_quarter(s_1w_1x4_matrix in,
+ s_1w_1x4_matrix coeff);
+
+
+/** @brief Symmetric 3 tap filter with normalization
+ *
+ * @param[in] in 1x3 matrix with pixels
+ * @param[in] coeff 1x2 matrix with coefficients
+ * @param[in] out_shift output pixel shift value for normalization
+ *
+ * @return symmetric 3 tap filter output
+ *
+ * This function performs symmetric 3 tap filter input pixels.
+ * Filter sum is normalized by shifting out_shift bits.
+ * Filter sum: p0*c1 + p1*c0 + p2*c1
+ * is implemented as: (p0 + p2)*c1 + p1*c0 to reduce multiplication.
+ * Input pixels should to be scaled, otherwise overflow is possible during
+ * addition
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x3m_sym_nrm(s_1w_1x3_matrix in,
+ s_1w_1x2_matrix coeff,
+ tvector1w out_shift);
+
+/** @brief Symmetric 3 tap filter with normalization
+ *
+ * @param[in] in 1x3 matrix with pixels
+ * @param[in] coeff 1x2 matrix with coefficients
+ *
+ * @return symmetric 3 tap filter output
+ *
+ * This function performs symmetric 3 tap filter over input pixels.
+ * Filter sum: p0*c1 + p1*c0 + p2*c1 = (p0 + p2)*c1 + p1*c0
+ * Input pixels are in Qn and coefficient c0 is in Qm and c1 is in Qn format,
+ * where n = ISP_VEC_ELEMBITS - 1 and m = ISP_VEC_ELEMBITS - 2 ( ie Q15 and Q14
+ * respectively for Broxton)
+ * To avoid double precision arithmetic input pixel sum is implemented using
+ * avgrnd, coefficient multiplication using qrmul and final sum using addsat
+ * Final sum is Qm format (ie Q14 for Broxton)
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+fir1x3m_sym_nrm_approx(s_1w_1x3_matrix in,
+ s_1w_1x2_matrix coeff);
+
+/** @brief Mean of 1x3 matrix
+ *
+ * @param[in] m 1x3 matrix with pixels
+ *
+ * @return mean of 1x3 matrix
+ *
+ * This function calculates the mean of 1x3 pixels,
+ * with a factor of 4/3.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x3m(
+ s_1w_1x3_matrix m);
+
+/** @brief Mean of 3x3 matrix
+ *
+ * @param[in] m 3x3 matrix with pixels
+ *
+ * @return mean of 3x3 matrix
+ *
+ * This function calculates the mean of 3x3 pixels,
+ * with a factor of 16/9.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean3x3m(
+ s_1w_3x3_matrix m);
+
+/** @brief Mean of 1x4 matrix
+ *
+ * @param[in] m 1x4 matrix with pixels
+ *
+ * @return mean of 1x4 matrix
+ *
+ * This function calculates the mean of 1x4 pixels
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x4m(
+ s_1w_1x4_matrix m);
+
+/** @brief Mean of 4x4 matrix
+ *
+ * @param[in] m 4x4 matrix with pixels
+ *
+ * @return mean of 4x4 matrix
+ *
+ * This function calculates the mean of 4x4 matrix with pixels
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean4x4m(
+ s_1w_4x4_matrix m);
+
+/** @brief Mean of 2x3 matrix
+ *
+ * @param[in] m 2x3 matrix with pixels
+ *
+ * @return mean of 2x3 matrix
+ *
+ * This function calculates the mean of 2x3 matrix with pixels
+ * with a factor of 8/6.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean2x3m(
+ s_1w_2x3_matrix m);
+
+/** @brief Mean of 1x5 matrix
+ *
+ * @param[in] m 1x5 matrix with pixels
+ *
+ * @return mean of 1x5 matrix
+ *
+ * This function calculates the mean of 1x5 matrix with pixels
+ * with a factor of 8/5.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x5m(s_1w_1x5_matrix m);
+
+/** @brief Mean of 1x6 matrix
+ *
+ * @param[in] m 1x6 matrix with pixels
+ *
+ * @return mean of 1x6 matrix
+ *
+ * This function calculates the mean of 1x6 matrix with pixels
+ * with a factor of 8/6.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean1x6m(
+ s_1w_1x6_matrix m);
+
+/** @brief Mean of 5x5 matrix
+ *
+ * @param[in] m 5x5 matrix with pixels
+ *
+ * @return mean of 5x5 matrix
+ *
+ * This function calculates the mean of 5x5 matrix with pixels
+ * with a factor of 32/25.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean5x5m(
+ s_1w_5x5_matrix m);
+
+/** @brief Mean of 6x6 matrix
+ *
+ * @param[in] m 6x6 matrix with pixels
+ *
+ * @return mean of 6x6 matrix
+ *
+ * This function calculates the mean of 6x6 matrix with pixels
+ * with a factor of 64/36.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w mean6x6m(
+ s_1w_6x6_matrix m);
+
+/** @brief Minimum of 4x4 matrix
+ *
+ * @param[in] m 4x4 matrix with pixels
+ *
+ * @return minimum of 4x4 matrix
+ *
+ * This function calculates the minimum of
+ * 4x4 matrix with pixels.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w min4x4m(
+ s_1w_4x4_matrix m);
+
+/** @brief Maximum of 4x4 matrix
+ *
+ * @param[in] m 4x4 matrix with pixels
+ *
+ * @return maximum of 4x4 matrix
+ *
+ * This function calculates the maximum of
+ * 4x4 matrix with pixels.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w max4x4m(
+ s_1w_4x4_matrix m);
+
+/** @brief SAD between two 3x3 matrices
+ *
+ * @param[in] a 3x3 matrix with pixels
+ *
+ * @param[in] b 3x3 matrix with pixels
+ *
+ * @return 3x3 matrix SAD
+ *
+ * This function calculates the sum of absolute difference between two matrices.
+ * Both input pixels and SAD are normalized by a factor of SAD3x3_IN_SHIFT and
+ * SAD3x3_OUT_SHIFT respectively.
+ * Computed SAD is 1/(2 ^ (SAD3x3_IN_SHIFT + SAD3x3_OUT_SHIFT)) ie 1/16 factor
+ * of original SAD and it's more precise than sad3x3m()
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m_precise(
+ s_1w_3x3_matrix a,
+ s_1w_3x3_matrix b);
+
+/** @brief SAD between two 3x3 matrices
+ *
+ * @param[in] a 3x3 matrix with pixels
+ *
+ * @param[in] b 3x3 matrix with pixels
+ *
+ * @return 3x3 matrix SAD
+ *
+ * This function calculates the sum of absolute difference between two matrices.
+ * This version saves cycles by avoiding input normalization and wide vector
+ * operation during sum computation
+ * Input pixel differences are computed by absolute of rounded, halved
+ * subtraction. Normalized sum is computed by rounded averages.
+ * Computed SAD is (1/2)*(1/16) = 1/32 factor of original SAD. Factor 1/2 comes
+ * from input halving operation and factor 1/16 comes from mean operation
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad3x3m(
+ s_1w_3x3_matrix a,
+ s_1w_3x3_matrix b);
+
+/** @brief SAD between two 5x5 matrices
+ *
+ * @param[in] a 5x5 matrix with pixels
+ *
+ * @param[in] b 5x5 matrix with pixels
+ *
+ * @return 5x5 matrix SAD
+ *
+ * Computed SAD is = 1/32 factor of original SAD.
+*/
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w sad5x5m(
+ s_1w_5x5_matrix a,
+ s_1w_5x5_matrix b);
+
+/** @brief Absolute gradient between two sets of 1x5 matrices
+ *
+ * @param[in] m0 first set of 1x5 matrix with pixels
+ * @param[in] m1 second set of 1x5 matrix with pixels
+ *
+ * @return absolute gradient between two 1x5 matrices
+ *
+ * This function computes mean of two input 1x5 matrices and returns
+ * absolute difference between two mean values.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w
+absgrad1x5m(s_1w_1x5_matrix m0, s_1w_1x5_matrix m1);
+
+/** @brief Bi-linear Interpolation optimized(approximate)
+ *
+ * @param[in] a input0
+ * @param[in] b input1
+ * @param[in] c cloned weight factor
+ *
+ * @return (a-b)*c + b
+ *
+ * This function will do bi-linear Interpolation on
+ * inputs a and b using constant weight factor c
+ *
+ * Inputs a,b are assumed in S1.15 format
+ * Weight factor has to be in range [0,1] and is assumed to be in S2.14 format
+ *
+ * The bilinear interpolation equation is (a*c) + b*(1-c),
+ * But this is implemented as (a-b)*c + b for optimization
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx_c(
+ tvector1w a,
+ tvector1w b,
+ tscalar1w_weight c);
+
+/** @brief Bi-linear Interpolation optimized(approximate)
+ *
+ * @param[in] a input0
+ * @param[in] b input1
+ * @param[in] c weight factor
+ *
+ * @return (a-b)*c + b
+ *
+ * This function will do bi-linear Interpolation on
+ * inputs a and b using weight factor c
+ *
+ * Inputs a,b are assumed in S1.15 format
+ * Weight factor has to be in range [0,1] and is assumed to be in S2.14 format
+ *
+ * The bilinear interpolation equation is (a*c) + b*(1-c),
+ * But this is implemented as (a-b)*c + b for optimization
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol_approx(
+ tvector1w a,
+ tvector1w b,
+ tvector1w_weight c);
+
+/** @brief Bi-linear Interpolation
+ *
+ * @param[in] a input0
+ * @param[in] b input1
+ * @param[in] c weight factor
+ *
+ * @return (a*c) + b*(1-c)
+ *
+ * This function will do bi-linear Interpolation on
+ * inputs a and b using weight factor c
+ *
+ * Inputs a,b are assumed in S1.15 format
+ * Weight factor has to be in range [0,1] and is assumed to be in S2.14 format
+ *
+ * The bilinear interpolation equation is (a*c) + b*(1-c),
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_bilinear_interpol(
+ tvector1w a,
+ tvector1w b,
+ tscalar1w_weight c);
+
+/** @brief Generic Block Matching Algorithm
+ * @param[in] search_window pointer to input search window of 16x16 pixels
+ * @param[in] ref_block pointer to input reference block of 8x8 pixels, where N<=M
+ * @param[in] output pointer to output sads
+ * @param[in] search_sz search size for SAD computation
+ * @param[in] ref_sz block size
+ * @param[in] pixel_shift pixel shift to search the data
+ * @param[in] search_block_sz search window block size
+ * @param[in] shift shift value, with which the output is shifted right
+ *
+ * @return 0 when the computation is successful.
+
+ * * This function compares the reference block with a block of size NxN in the search
+ * window. Sum of absolute differences for each pixel in the reference block and the
+ * corresponding pixel in the search block. Whole search window os traversed with the
+ * reference block with the given pixel shift.
+ *
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H int generic_block_matching_algorithm(
+ tscalar1w **search_window,
+ tscalar1w **ref_block,
+ tscalar1w *output,
+ int search_sz,
+ int ref_sz,
+ int pixel_shift,
+ int search_block_sz,
+ tscalar1w_4bit_bma_shift shift);
+
+#ifndef ISP2401
+/** @brief OP_1w_asp_bma_16_1_32way
+#else
+/** @brief OP_1w_asp_bma_16_1_32way_nomask
+#endif
+ *
+ * @param[in] search_area input search window of 16x16 pixels
+ * @param[in] input_block input reference block of 8x8 pixels, where N<=M
+ * @param[in] shift shift value, with which the output is shifted right
+ *
+ * @return 81 SADs for all the search blocks.
+
+ * This function compares the reference block with a block of size 8x8 pixels in the
+ * search window of 16x16 pixels. Sum of absolute differences for each pixel in the
+ * reference block and the corresponding pixel in the search block is calculated.
+ * Whole search window is traversed with the reference block with the pixel shift of 1
+ * pixels. The output is right shifted with the given shift value. The shift value is
+ * a 4 bit value.
+ *
+ */
+
+#ifndef ISP2401
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way(
+#else
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_1 OP_1w_asp_bma_16_1_32way_nomask(
+#endif
+ bma_16x16_search_window search_area,
+ ref_block_8x8 input_block,
+ tscalar1w_4bit_bma_shift shift);
+
+#ifndef ISP2401
+/** @brief OP_1w_asp_bma_16_2_32way
+#else
+/** @brief OP_1w_asp_bma_16_2_32way_nomask
+#endif
+ *
+ * @param[in] search_area input search window of 16x16 pixels
+ * @param[in] input_block input reference block of 8x8 pixels, where N<=M
+ * @param[in] shift shift value, with which the output is shifted right
+ *
+ * @return 25 SADs for all the search blocks.
+ * This function compares the reference block with a block of size 8x8 in the search
+ * window of 16x61. Sum of absolute differences for each pixel in the reference block
+ * and the corresponding pixel in the search block is computed. Whole search window is
+ * traversed with the reference block with the given pixel shift of 2 pixels. The output
+ * is right shifted with the given shift value. The shift value is a 4 bit value.
+ *
+ */
+
+#ifndef ISP2401
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way(
+#else
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_16_2 OP_1w_asp_bma_16_2_32way_nomask(
+#endif
+ bma_16x16_search_window search_area,
+ ref_block_8x8 input_block,
+ tscalar1w_4bit_bma_shift shift);
+#ifndef ISP2401
+/** @brief OP_1w_asp_bma_14_1_32way
+#else
+/** @brief OP_1w_asp_bma_14_1_32way_nomask
+#endif
+ *
+ * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
+ * @param[in] input_block input reference block of 8x8 pixels, where N<=M
+ * @param[in] shift shift value, with which the output is shifted right
+ *
+ * @return 49 SADs for all the search blocks.
+ * This function compares the reference block with a block of size 8x8 in the search
+ * window of 14x14. Sum of absolute differences for each pixel in the reference block
+ * and the corresponding pixel in the search block. Whole search window is traversed
+ * with the reference block with 2 pixel shift. The output is right shifted with the
+ * given shift value. The shift value is a 4 bit value. Input is always a 16x16 block
+ * but the search window is 14x14, with last 2 pixels of row and column are not used
+ * for computation.
+ *
+ */
+
+#ifndef ISP2401
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way(
+#else
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_1 OP_1w_asp_bma_14_1_32way_nomask(
+#endif
+ bma_16x16_search_window search_area,
+ ref_block_8x8 input_block,
+ tscalar1w_4bit_bma_shift shift);
+
+#ifndef ISP2401
+/** @brief OP_1w_asp_bma_14_2_32way
+#else
+/** @brief OP_1w_asp_bma_14_2_32way_nomask
+#endif
+ *
+ * @param[in] search_area input search block of 16x16 pixels with search window of 14x14 pixels
+ * @param[in] input_block input reference block of 8x8 pixels, where N<=M
+ * @param[in] shift shift value, with which the output is shifted right
+ *
+ * @return 16 SADs for all the search blocks.
+ * This function compares the reference block with a block of size 8x8 in the search
+ * window of 14x14. Sum of absolute differences for each pixel in the reference block
+ * and the corresponding pixel in the search block. Whole search window is traversed
+ * with the reference block with 2 pixels shift. The output is right shifted with the
+ * given shift value. The shift value is a 4 bit value.
+ *
+ */
+
+#ifndef ISP2401
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way(
+#else
+STORAGE_CLASS_REF_VECTOR_FUNC_H bma_output_14_2 OP_1w_asp_bma_14_2_32way_nomask(
+#endif
+ bma_16x16_search_window search_area,
+ ref_block_8x8 input_block,
+ tscalar1w_4bit_bma_shift shift);
+
+#ifdef ISP2401
+/** @brief multiplex addition and passing
+ *
+ * @param[in] _a first pixel
+ * @param[in] _b second pixel
+ * @param[in] _c condition flag
+ *
+ * @return (_a + _b) if condition flag is true
+ * _a if condition flag is false
+ *
+ * This function does multiplex addition depending on the input condition flag
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H tvector1w OP_1w_cond_add(
+ tvector1w _a,
+ tvector1w _b,
+ tflags _c);
+
+#endif
+#ifdef HAS_bfa_unit
+/** @brief OP_1w_single_bfa_7x7
+ *
+ * @param[in] weights - spatial and range weight lut
+ * @param[in] threshold - threshold plane, for range weight scaling
+ * @param[in] central_pix - central pixel plane
+ * @param[in] src_plane - src pixel plane
+ *
+ * @return Bilateral filter output
+ *
+ * This function implements, 7x7 single bilateral filter.
+ * Output = {sum(pixel * weight), sum(weight)}
+ * Where sum is summation over 7x7 block set.
+ * weight = spatial weight * range weight
+ * spatial weights are loaded from spatial_weight_lut depending on src pixel
+ * position in the 7x7 block
+ * range weights are computed by table look up from range_weight_lut depending
+ * on scaled absolute difference between src and central pixels.
+ * threshold is used as scaling factor. range_weight_lut consists of
+ * BFA_RW_LUT_SIZE numbers of LUT entries to model any distribution function.
+ * Piecewise linear approximation technique is used to compute range weight
+ * It computes absolute difference between central pixel and 61 src pixels.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_single_bfa_7x7(
+ bfa_weights weights,
+ tvector1w threshold,
+ tvector1w central_pix,
+ s_1w_7x7_matrix src_plane);
+
+/** @brief OP_1w_joint_bfa_7x7
+ *
+ * @param[in] weights - spatial and range weight lut
+ * @param[in] threshold0 - 1st threshold plane, for range weight scaling
+ * @param[in] central_pix0 - 1st central pixel plane
+ * @param[in] src0_plane - 1st pixel plane
+ * @param[in] threshold1 - 2nd threshold plane, for range weight scaling
+ * @param[in] central_pix1 - 2nd central pixel plane
+ * @param[in] src1_plane - 2nd pixel plane
+ *
+ * @return Joint bilateral filter output
+ *
+ * This function implements, 7x7 joint bilateral filter.
+ * Output = {sum(pixel * weight), sum(weight)}
+ * Where sum is summation over 7x7 block set.
+ * weight = spatial weight * range weight
+ * spatial weights are loaded from spatial_weight_lut depending on src pixel
+ * position in the 7x7 block
+ * range weights are computed by table look up from range_weight_lut depending
+ * on sum of scaled absolute difference between central pixel and two src pixel
+ * planes. threshold is used as scaling factor. range_weight_lut consists of
+ * BFA_RW_LUT_SIZE numbers of LUT entries to model any distribution function.
+ * Piecewise linear approximation technique is used to compute range weight
+ * It computes absolute difference between central pixel and 61 src pixels.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H bfa_7x7_output OP_1w_joint_bfa_7x7(
+ bfa_weights weights,
+ tvector1w threshold0,
+ tvector1w central_pix0,
+ s_1w_7x7_matrix src0_plane,
+ tvector1w threshold1,
+ tvector1w central_pix1,
+ s_1w_7x7_matrix src1_plane);
+
+/** @brief bbb_bfa_gen_spatial_weight_lut
+ *
+ * @param[in] in - 7x7 matrix of spatial weights
+ * @param[in] out - generated LUT
+ *
+ * @return None
+ *
+ * This function implements, creates spatial weight look up table used
+ * for bilaterl filter instruction.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_spatial_weight_lut(
+ s_1w_7x7_matrix in,
+ tvector1w out[BFA_MAX_KWAY]);
+
+/** @brief bbb_bfa_gen_range_weight_lut
+ *
+ * @param[in] in - input range weight,
+ * @param[in] out - generated LUT
+ *
+ * @return None
+ *
+ * This function implements, creates range weight look up table used
+ * for bilaterl filter instruction.
+ * 8 unsigned 7b weights are represented in 7 16bits LUT
+ * LUT formation is done as follows:
+ * higher 8 bit: Point(N) = Point(N+1) - Point(N)
+ * lower 8 bit: Point(N) = Point(N)
+ * Weight function can be any monotonic decreasing function for x >= 0
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H void bbb_bfa_gen_range_weight_lut(
+ tvector1w in[BFA_RW_LUT_SIZE+1],
+ tvector1w out[BFA_RW_LUT_SIZE]);
+#endif
+
+#ifdef ISP2401
+/** @brief OP_1w_imax32
+ *
+ * @param[in] src - structure that holds an array of 32 elements.
+ *
+ * @return maximum element among input array.
+ *
+ *This function gets maximum element from an array of 32 elements.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imax32(
+ imax32_ref_in_vector src);
+
+/** @brief OP_1w_imaxidx32
+ *
+ * @param[in] src - structure that holds a vector of elements.
+ *
+ * @return index of first element with maximum value among array.
+ *
+ * This function gets index of first element with maximum value
+ * from 32 elements.
+ */
+STORAGE_CLASS_REF_VECTOR_FUNC_H int OP_1w_imaxidx32(
+ imax32_ref_in_vector src);
+
+#endif
+#ifndef INLINE_VECTOR_FUNC
+#define STORAGE_CLASS_REF_VECTOR_FUNC_C
+#define STORAGE_CLASS_REF_VECTOR_DATA_C const
+#else /* INLINE_VECTOR_FUNC */
+#define STORAGE_CLASS_REF_VECTOR_FUNC_C STORAGE_CLASS_REF_VECTOR_FUNC_H
+#define STORAGE_CLASS_REF_VECTOR_DATA_C STORAGE_CLASS_REF_VECTOR_DATA_H
+#include "ref_vector_func.c"
+#define VECTOR_FUNC_INLINED
+#endif /* INLINE_VECTOR_FUNC */
+
+#endif /*_REF_VECTOR_FUNC_H_INCLUDED_*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func_types.h
new file mode 100644
index 000000000000..4dd05eba852e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func_types.h
@@ -0,0 +1,385 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __REF_VECTOR_FUNC_TYPES_H_INCLUDED__
+#define __REF_VECTOR_FUNC_TYPES_H_INCLUDED__
+
+
+/*
+ * Prerequisites:
+ *
+ */
+#include "mpmath.h"
+#include "bbb_config.h"
+#include "isp_op1w_types.h"
+#include "isp_op2w_types.h"
+
+/* Defines for the Config Unit */
+#define MAX_CONFIG_POINTS 5
+#define INPUT_OFFSET_FACTOR 10
+#define INPUT_SCALE_FACTOR 10
+#define OUTPUT_SCALE_FACTOR 10
+#define SLOPE_A_RESOLUTION 10
+#define CONFIG_UNIT_LUT_SIZE_32 32 /*XCU works for ISP_NWAY = 32 */
+#define LXCU_LUT_SIZE 16
+#ifdef ISP2401
+#define IMAX32_ELEM_SIZE 32
+#endif
+
+#define ONE_IN_Q14 (1<<(NUM_BITS-2))
+#define Q29_TO_Q15_SHIFT_VAL (NUM_BITS-2)
+#define Q28_TO_Q15_SHIFT_VAL (NUM_BITS-3)
+#define MAX_ELEM(width_in_bits) ((1<<(width_in_bits))-1)
+
+/* Block matching algorithm related data */
+/* NUM_OF_SADS = ((SEARCH_AREA_HEIGHT - REF_BLOCK_HEIGHT)/PIXEL_SHIFT + 1)* \
+ ((SEARCH_AREA_WIDTH - REF_BLOCK_WIDTH)/PIXEL_SHIFT + 1) */
+
+#define SADS(sw_h,sw_w, ref_h, ref_w, p_sh) (((sw_h - ref_h)/p_sh + 1)*((sw_w - ref_w)/p_sh + 1))
+#define SADS_16x16_1 SADS(16, 16, 8, 8, 1)
+#define SADS_16x16_2 SADS(16, 16, 8, 8, 2)
+#define SADS_14x14_1 SADS(14, 14, 8, 8, 1)
+#define SADS_14x14_2 SADS(14, 14, 8, 8, 2)
+
+#define BMA_OUTPUT_MATRIX_DIM(sw_h, ref_h, p_sh) ((sw_h - ref_h)/p_sh + 1)
+#define BMA_OUT_16x16_2_32 BMA_OUTPUT_MATRIX_DIM(16, 8, 2)
+#define BMA_OUT_14x14_2_32 BMA_OUTPUT_MATRIX_DIM(14, 8, 2)
+#define BMA_OUT_16x16_1_32 BMA_OUTPUT_MATRIX_DIM(16, 8, 1)
+#define BMA_OUT_14x14_1_32 BMA_OUTPUT_MATRIX_DIM(14, 8, 1)
+#define BMA_SEARCH_BLOCK_SZ_16 16
+#define BMA_REF_BLOCK_SZ_8 8
+#define PIXEL_SHIFT_2 2
+#define PIXEL_SHIFT_1 1
+#define BMA_SEARCH_WIN_SZ_16 16
+#define BMA_SEARCH_WIN_SZ_14 14
+
+
+/*
+ * Struct type specification
+ */
+
+typedef unsigned short tscalar1w_3bit; /* tscalar1w in interval [0, 2^3) */
+typedef short tscalar1w_5bit_signed; /* tscalar1w in interval [-2^(5-1), 2^(5-1)) */
+typedef unsigned short tscalar1w_5bit; /* tscalar1w in interval [0, 2^5) */
+typedef short tscalar1w_range1wbit; /* tscalar1w in interval [-NUM_BITS, NUM_BITS] */
+typedef short tscalar1w_unsigned_range1wbit; /* tscalar1w in interval [0, NUM_BITS] */
+typedef unsigned short tvector_8bit; /* 8 bit positive number */
+typedef unsigned short tvector_5bit;
+typedef unsigned short tvector_4bit;
+typedef unsigned short tscalar1w_16bit;
+typedef unsigned short tscalar1w_4bit_bma_shift;
+
+typedef struct {
+ tvector1w v0 ;
+ tvector1w v1 ;
+} s_1w_2x1_matrix;
+
+#define S_1W_2X1_MATRIX_DEFAULT ((s_1w_2x1_matrix)\
+ { 0, 0 })
+
+typedef struct {
+ tvector1w v00;
+ tvector1w v01;
+} s_1w_1x2_matrix;
+
+#define S_1W_1X2_MATRIX_DEFAULT ((s_1w_1x2_matrix)\
+ { 0, 0 })
+
+typedef struct {
+ tvector1w v00 ;
+ tvector1w v01 ;
+ tvector1w v02 ;
+} s_1w_1x3_matrix;
+
+#define S_1W_1X3_MATRIX_DEFAULT ((s_1w_1x3_matrix)\
+ { 0, 0, 0, })
+
+typedef struct {
+ tvector1w v00; tvector1w v01; tvector1w v02;
+ tvector1w v10; tvector1w v11; tvector1w v12;
+} s_1w_2x3_matrix;
+
+#define S_1W_2X3_MATRIX_DEFAULT ((s_1w_2x3_matrix)\
+ { 0, 0, 0, \
+ 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00 ; tvector1w v01 ; tvector1w v02 ;
+ tvector1w v10 ; tvector1w v11 ; tvector1w v12 ;
+ tvector1w v20 ; tvector1w v21 ; tvector1w v22 ;
+} s_1w_3x3_matrix;
+
+#define S_1W_3X3_MATRIX_DEFAULT ((s_1w_3x3_matrix)\
+ { 0, 0, 0, \
+ 0, 0, 0, \
+ 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00 ; tvector1w v01 ; tvector1w v02 ;
+ tvector1w v10 ; tvector1w v11 ; tvector1w v12 ;
+ tvector1w v20 ; tvector1w v21 ; tvector1w v22 ;
+ tvector1w v30 ; tvector1w v31 ; tvector1w v32 ;
+} s_1w_4x3_matrix;
+
+#define S_1W_4X3_MATRIX_DEFAULT ((s_1w_4x3_matrix)\
+ { 0, 0, 0, \
+ 0, 0, 0, \
+ 0, 0, 0, \
+ 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00 ;
+ tvector1w v01 ;
+ tvector1w v02 ;
+ tvector1w v03 ;
+ tvector1w v04 ;
+} s_1w_1x5_matrix;
+
+#define S_1W_1X5_MATRIX_DEFAULT ((s_1w_1x5_matrix)\
+ { 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00 ; tvector1w v01 ; tvector1w v02 ; tvector1w v03 ; tvector1w v04 ;
+ tvector1w v10 ; tvector1w v11 ; tvector1w v12 ; tvector1w v13 ; tvector1w v14 ;
+ tvector1w v20 ; tvector1w v21 ; tvector1w v22 ; tvector1w v23 ; tvector1w v24 ;
+ tvector1w v30 ; tvector1w v31 ; tvector1w v32 ; tvector1w v33 ; tvector1w v34 ;
+ tvector1w v40 ; tvector1w v41 ; tvector1w v42 ; tvector1w v43 ; tvector1w v44 ;
+} s_1w_5x5_matrix;
+
+#define S_1W_5X5_MATRIX_DEFAULT ((s_1w_5x5_matrix)\
+ { 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0 })
+#ifndef ISP2401
+
+#else
+
+#endif
+typedef struct {
+ tvector1w v00;
+ tvector1w v01;
+ tvector1w v02;
+ tvector1w v03;
+ tvector1w v04;
+ tvector1w v05;
+ tvector1w v06;
+} s_1w_1x7_matrix;
+
+#define S_1W_1X7_MATRIX_DEFAULT ((s_1w_1x7_matrix)\
+ { 0, 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00;
+ tvector1w v01;
+ tvector1w v02;
+ tvector1w v03;
+ tvector1w v04;
+ tvector1w v05;
+ tvector1w v06;
+ tvector1w v07;
+ tvector1w v08;
+} s_1w_1x9_matrix;
+
+#define S_1W_1X9_MATRIX_DEFAULT ((s_1w_1x9_matrix)\
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00;
+ tvector1w v01;
+ tvector1w v02;
+ tvector1w v03;
+} s_1w_1x4_matrix;
+
+#define S_1W_1X4_MATRIX ((s_1w_1x4_matrix)\
+ { 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00; tvector1w v01; tvector1w v02; tvector1w v03;
+ tvector1w v10; tvector1w v11; tvector1w v12; tvector1w v13;
+ tvector1w v20; tvector1w v21; tvector1w v22; tvector1w v23;
+ tvector1w v30; tvector1w v31; tvector1w v32; tvector1w v33;
+} s_1w_4x4_matrix;
+
+#define S_1W_4X4_MATRIX_DEFAULT ((s_1w_4x4_matrix)\
+ { 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00;
+ tvector1w v01;
+ tvector1w v02;
+ tvector1w v03;
+ tvector1w v04;
+ tvector1w v05;
+} s_1w_1x6_matrix;
+
+#define S_1W_1X6_MATRIX_DEFAULT ((s_1w_1x6_matrix)\
+ { 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00; tvector1w v01; tvector1w v02; tvector1w v03; tvector1w v04; tvector1w v05;
+ tvector1w v10; tvector1w v11; tvector1w v12; tvector1w v13; tvector1w v14; tvector1w v15;
+ tvector1w v20; tvector1w v21; tvector1w v22; tvector1w v23; tvector1w v24; tvector1w v25;
+ tvector1w v30; tvector1w v31; tvector1w v32; tvector1w v33; tvector1w v34; tvector1w v35;
+ tvector1w v40; tvector1w v41; tvector1w v42; tvector1w v43; tvector1w v44; tvector1w v45;
+ tvector1w v50; tvector1w v51; tvector1w v52; tvector1w v53; tvector1w v54; tvector1w v55;
+} s_1w_6x6_matrix;
+
+#define S_1W_6X6_MATRIX_DEFAULT ((s_1w_6x6_matrix)\
+ { 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00; tvector1w v01; tvector1w v02; tvector1w v03; tvector1w v04;
+ tvector1w v05; tvector1w v06; tvector1w v07; tvector1w v08;
+ tvector1w v10; tvector1w v11; tvector1w v12; tvector1w v13; tvector1w v14;
+ tvector1w v15; tvector1w v16; tvector1w v17; tvector1w v18;
+ tvector1w v20; tvector1w v21; tvector1w v22; tvector1w v23; tvector1w v24;
+ tvector1w v25; tvector1w v26; tvector1w v27; tvector1w v28;
+ tvector1w v30; tvector1w v31; tvector1w v32; tvector1w v33; tvector1w v34;
+ tvector1w v35; tvector1w v36; tvector1w v37; tvector1w v38;
+ tvector1w v40; tvector1w v41; tvector1w v42; tvector1w v43; tvector1w v44;
+ tvector1w v45; tvector1w v46; tvector1w v47; tvector1w v48;
+ tvector1w v50; tvector1w v51; tvector1w v52; tvector1w v53; tvector1w v54;
+ tvector1w v55; tvector1w v56; tvector1w v57; tvector1w v58;
+ tvector1w v60; tvector1w v61; tvector1w v62; tvector1w v63; tvector1w v64;
+ tvector1w v65; tvector1w v66; tvector1w v67; tvector1w v68;
+ tvector1w v70; tvector1w v71; tvector1w v72; tvector1w v73; tvector1w v74;
+ tvector1w v75; tvector1w v76; tvector1w v77; tvector1w v78;
+ tvector1w v80; tvector1w v81; tvector1w v82; tvector1w v83; tvector1w v84;
+ tvector1w v85; tvector1w v86; tvector1w v87; tvector1w v88;
+} s_1w_9x9_matrix;
+
+#define S_1W_9X9_MATRIX_DEFAULT ((s_1w_9x9_matrix)\
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v00; tvector1w v01; tvector1w v02; tvector1w v03; tvector1w v04;
+ tvector1w v05; tvector1w v06;
+ tvector1w v10; tvector1w v11; tvector1w v12; tvector1w v13; tvector1w v14;
+ tvector1w v15; tvector1w v16;
+ tvector1w v20; tvector1w v21; tvector1w v22; tvector1w v23; tvector1w v24;
+ tvector1w v25; tvector1w v26;
+ tvector1w v30; tvector1w v31; tvector1w v32; tvector1w v33; tvector1w v34;
+ tvector1w v35; tvector1w v36;
+ tvector1w v40; tvector1w v41; tvector1w v42; tvector1w v43; tvector1w v44;
+ tvector1w v45; tvector1w v46;
+ tvector1w v50; tvector1w v51; tvector1w v52; tvector1w v53; tvector1w v54;
+ tvector1w v55; tvector1w v56;
+ tvector1w v60; tvector1w v61; tvector1w v62; tvector1w v63; tvector1w v64;
+ tvector1w v65; tvector1w v66;
+} s_1w_7x7_matrix;
+
+#define S_1W_7X7_MATRIX_DEFAULT ((s_1w_7x7_matrix)\
+ { 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w v0_0;
+ tvector1w v0_1;
+ tvector1w v0_2;
+ tvector1w v0_3;
+ tvector1w v0_4;
+ tvector1w v0_5;
+ tvector1w v0_6;
+ tvector1w v0_7;
+ tvector1w v0_8;
+ tvector1w v0_9;
+ tvector1w v0_10;
+} s_1w_1x11_matrix;
+
+#define S_1W_1X11_MATRIX_DEFAULT ((s_1w_1x11_matrix)\
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0 })
+
+typedef struct {
+ tvector1w x_cord[MAX_CONFIG_POINTS];
+ tvector1w slope[MAX_CONFIG_POINTS-1];
+ tvector1w y_offset[MAX_CONFIG_POINTS-1];
+} ref_config_points;
+
+typedef struct {
+ tscalar1w_range1wbit slope_vec[CONFIG_UNIT_LUT_SIZE_32];
+ tscalar1w_range1wbit offset_vec[CONFIG_UNIT_LUT_SIZE_32];
+ tscalar1w_16bit x_cord_vec[CONFIG_UNIT_LUT_SIZE_32];
+ tscalar1w_16bit x_cord_max;
+ tscalar1w_5bit exponent;
+ tscalar1w_5bit slope_resolution;
+} xcu_ref_init_vectors;
+
+typedef struct {
+#ifdef ISP2401
+ tvector1w elem[IMAX32_ELEM_SIZE];
+} imax32_ref_in_vector;
+
+typedef struct {
+#endif
+ tscalar1w search[BMA_SEARCH_BLOCK_SZ_16][BMA_SEARCH_BLOCK_SZ_16];
+} bma_16x16_search_window;
+
+typedef struct {
+ tscalar1w ref[BMA_REF_BLOCK_SZ_8][BMA_REF_BLOCK_SZ_8];
+} ref_block_8x8;
+
+typedef struct {
+ tscalar1w sads[SADS_16x16_1];
+} bma_output_16_1;
+
+typedef struct {
+ tscalar1w sads[SADS_16x16_2];
+} bma_output_16_2;
+
+typedef struct {
+ tscalar1w sads[SADS_14x14_2];
+} bma_output_14_2;
+
+typedef struct {
+ tscalar1w sads[SADS_14x14_1];
+} bma_output_14_1;
+
+typedef struct {
+ tvector1w spatial_weight_lut[BFA_MAX_KWAY]; /* spatial weight LUT */
+ /* range weight LUT, (BFA_RW_LUT_SIZE + 1) numbers of LUT values are compressed in BFA_RW_LUT_SIZE buffer.
+ * range_weight_lut[k] = packed(drop[k], range_weight[k])
+ * where, drop[k] = range_weight[k+1] - range_weight[k]
+ * pack(msb, lsb): two 8bits numbers packed in one 16bits number */
+ tvector1w range_weight_lut[BFA_RW_LUT_SIZE];
+} bfa_weights;
+
+/* Return type for BFA BBBs */
+typedef struct {
+ tvector2w sop; /* weighted sum of pixels */
+ tvector1w sow; /* sum of weights */
+} bfa_7x7_output;
+#endif /* __REF_VECTOR_FUNC_TYPES_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/sp_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/sp_public.h
new file mode 100644
index 000000000000..974ce6a33b4b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/sp_public.h
@@ -0,0 +1,223 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_PUBLIC_H_INCLUDED__
+#define __SP_PUBLIC_H_INCLUDED__
+
+#include <type_support.h>
+#include "system_types.h"
+
+typedef struct sp_state_s sp_state_t;
+typedef struct sp_stall_s sp_stall_t;
+
+/*! Enable or disable the program complete irq signal of SP[ID]
+
+ \param ID[in] SP identifier
+ \param cnd[in] predicate
+
+ \return none, if(cnd) enable(SP[ID].irq) else disable(SP[ID].irq)
+ */
+extern void cnd_sp_irq_enable(
+ const sp_ID_t ID,
+ const bool cnd);
+
+/*! Read the state of cell SP[ID]
+
+ \param ID[in] SP identifier
+ \param state[out] sp state structure
+ \param stall[out] isp stall conditions
+
+ \return none, state = SP[ID].state, stall = SP[ID].stall
+ */
+extern void sp_get_state(
+ const sp_ID_t ID,
+ sp_state_t *state,
+ sp_stall_t *stall);
+
+/*! Write to the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return none, SP[ID].sc[reg] = value
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_store(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const hrt_data value);
+
+/*! Read from the status and control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param value[in] The data to be written
+
+ \return SP[ID].sc[reg]
+ */
+STORAGE_CLASS_SP_H hrt_data sp_ctrl_load(
+ const sp_ID_t ID,
+ const hrt_address reg);
+
+/*! Get the status of a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be checked
+
+ \return (SP[ID].sc[reg] & (1<<bit)) != 0
+ */
+STORAGE_CLASS_SP_H bool sp_ctrl_getbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Set a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] |= (1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_setbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Clear a bitfield in the control register of SP[ID]
+
+ \param ID[in] SP identifier
+ \param reg[in] register index
+ \param bit[in] The bit index to be set
+
+ \return none, SP[ID].sc[reg] &= ~(1<<bit)
+ */
+STORAGE_CLASS_SP_H void sp_ctrl_clearbit(
+ const sp_ID_t ID,
+ const hrt_address reg,
+ const unsigned int bit);
+
+/*! Write to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const void *data,
+ const size_t size);
+
+/*! Read from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H void sp_dmem_load(
+ const sp_ID_t ID,
+ const hrt_address addr,
+ void *data,
+ const size_t size);
+
+/*! Write a 8-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint8(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint8_t data);
+
+/*! Write a 16-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint16(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint16_t data);
+
+/*! Write a 32-bit datum to the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be written
+ \param size[in] The size(in bytes) of the data to be written
+
+ \return none, SP[ID].dmem[addr...addr+size-1] = data
+ */
+STORAGE_CLASS_SP_H void sp_dmem_store_uint32(
+ const sp_ID_t ID,
+ hrt_address addr,
+ const uint32_t data);
+
+/*! Load a 8-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint8_t sp_dmem_load_uint8(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 16-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint16_t sp_dmem_load_uint16(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+/*! Load a 32-bit datum from the DMEM of SP[ID]
+
+ \param ID[in] SP identifier
+ \param addr[in] the address in DMEM
+ \param data[in] The data to be read
+ \param size[in] The size(in bytes) of the data to be read
+
+ \return none, data = SP[ID].dmem[addr...addr+size-1]
+ */
+STORAGE_CLASS_SP_H uint32_t sp_dmem_load_uint32(
+ const sp_ID_t ID,
+ const hrt_address addr);
+
+#endif /* __SP_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/tag_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/tag_public.h
new file mode 100644
index 000000000000..22ef747f3d4a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/tag_public.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_PUBLIC_H_INCLUDED__
+#define __TAG_PUBLIC_H_INCLUDED__
+
+/**
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr);
+
+/**
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag);
+
+#endif /* __TAG_PUBLIC_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/timed_ctrl_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/timed_ctrl_public.h
new file mode 100644
index 000000000000..b3becac16f49
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/timed_ctrl_public.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_PUBLIC_H_INCLUDED__
+#define __TIMED_CTRL_PUBLIC_H_INCLUDED__
+
+#include "system_types.h"
+
+/*! Write to a control register of TIMED_CTRL[ID]
+
+ \param ID[in] TIMED_CTRL identifier
+ \param reg_addr[in] register byte address
+ \param value[in] The data to be written
+
+ \return none, TIMED_CTRL[ID].ctrl[reg] = value
+ */
+STORAGE_CLASS_TIMED_CTRL_H void timed_ctrl_reg_store(
+ const timed_ctrl_ID_t ID,
+ const unsigned int reg_addr,
+ const hrt_data value);
+
+extern void timed_ctrl_snd_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ hrt_address addr,
+ hrt_data value);
+
+extern void timed_ctrl_snd_sp_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const sp_ID_t SP_ID,
+ hrt_address offset,
+ hrt_data value);
+
+extern void timed_ctrl_snd_gpio_commnd(
+ const timed_ctrl_ID_t ID,
+ hrt_data mask,
+ hrt_data condition,
+ hrt_data counter,
+ const gpio_ID_t GPIO_ID,
+ hrt_address offset,
+ hrt_data value);
+
+#endif /* __TIMED_CTRL_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vamem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vamem_public.h
new file mode 100644
index 000000000000..cee15d0ab2d8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vamem_public.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_PUBLIC_H_INCLUDED__
+#define __VAMEM_PUBLIC_H_INCLUDED__
+
+
+
+#endif /* __VAMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vmem_public.h
new file mode 100644
index 000000000000..e9801c0fe147
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/vmem_public.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_PUBLIC_H_INCLUDED__
+#define __VMEM_PUBLIC_H_INCLUDED__
+
+#include "isp.h" /* tmemvectoru */
+
+#endif /* __VMEM_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
new file mode 100644
index 000000000000..f5de0df7981e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IBUF_CTRL_H_INCLUDED__
+#define __IBUF_CTRL_H_INCLUDED__
+
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "ibuf_ctrl_local.h"
+
+#ifndef __INLINE_IBUF_CTRL__
+#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IBUF_CTRL_C
+#include "ibuf_ctrl_public.h"
+#else /* __INLINE_IBUF_CTRL__ */
+#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IBUF_CTRL_C STORAGE_CLASS_INLINE
+#include "ibuf_ctrl_private.h"
+#endif /* __INLINE_IBUF_CTRL__ */
+
+#endif /* __IBUF_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
new file mode 100644
index 000000000000..041c8b660aa4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_FORMATTER_H_INCLUDED__
+#define __INPUT_FORMATTER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "input_formatter_local.h"
+
+#ifndef __INLINE_INPUT_FORMATTER__
+#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_FORMATTER_C
+#include "input_formatter_public.h"
+#else /* __INLINE_INPUT_FORMATTER__ */
+#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_FORMATTER_C STORAGE_CLASS_INLINE
+#include "input_formatter_private.h"
+#endif /* __INLINE_INPUT_FORMATTER__ */
+
+#endif /* __INPUT_FORMATTER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
new file mode 100644
index 000000000000..182867367b48
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __INPUT_SYSTEM_H_INCLUDED__
+#define __INPUT_SYSTEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "input_system_local.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_SYSTEM_C
+#include "input_system_public.h"
+#else /* __INLINE_INPUT_SYSTEM__ */
+#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_SYSTEM_C STORAGE_CLASS_INLINE
+#include "input_system_private.h"
+#endif /* __INLINE_INPUT_SYSTEM__ */
+
+#endif /* __INPUT_SYSTEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
new file mode 100644
index 000000000000..1dc443892cc5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IRQ_H_INCLUDED__
+#define __IRQ_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the IRQ device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "irq_local.h"
+
+#ifndef __INLINE_IRQ__
+#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IRQ_C
+#include "irq_public.h"
+#else /* __INLINE_IRQ__ */
+#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IRQ_C STORAGE_CLASS_INLINE
+#include "irq_private.h"
+#endif /* __INLINE_IRQ__ */
+
+#endif /* __IRQ_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
new file mode 100644
index 000000000000..49190d0abc30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISP_H_INCLUDED__
+#define __ISP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the ISP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "isp_local.h"
+
+#ifndef __INLINE_ISP__
+#define STORAGE_CLASS_ISP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_C
+#include "isp_public.h"
+#else /* __INLINE_iSP__ */
+#define STORAGE_CLASS_ISP_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_C STORAGE_CLASS_INLINE
+#include "isp_private.h"
+#endif /* __INLINE_ISP__ */
+
+#endif /* __ISP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
new file mode 100644
index 000000000000..9a608f07adcb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_DMA_H_INCLUDED__
+#define __ISYS_DMA_H_INCLUDED__
+
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "isys_dma_local.h"
+
+#ifndef __INLINE_ISYS2401_DMA__
+#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_DMA_C
+#include "isys_dma_public.h"
+#else /* __INLINE_ISYS2401_DMA__ */
+#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_DMA_C STORAGE_CLASS_INLINE
+#include "isys_dma_private.h"
+#endif /* __INLINE_ISYS2401_DMA__ */
+
+#endif /* __ISYS_DMA_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
new file mode 100644
index 000000000000..cf858bcc8e45
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ISYS_IRQ_H__
+#define __IA_CSS_ISYS_IRQ_H__
+
+#include <type_support.h>
+#include <storage_class.h>
+#include <system_local.h>
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+#ifndef __INLINE_ISYS2401_IRQ__
+
+#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_EXTERN
+#include "isys_irq_public.h"
+
+#else /* __INLINE_ISYS2401_IRQ__ */
+
+#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_INLINE
+#include "isys_irq_private.h"
+
+#endif /* __INLINE_ISYS2401_IRQ__ */
+
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#endif /* __IA_CSS_ISYS_IRQ_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
new file mode 100644
index 000000000000..3e8cfe555ad5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __ISYS_STREAM2MMIO_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_H_INCLUDED__
+
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "isys_stream2mmio_local.h"
+
+#ifndef __INLINE_STREAM2MMIO__
+#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM2MMIO_C
+#include "isys_stream2mmio_public.h"
+#else /* __INLINE_STREAM2MMIO__ */
+#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM2MMIO_C STORAGE_CLASS_INLINE
+#include "isys_stream2mmio_private.h"
+#endif /* __INLINE_STREAM2MMIO__ */
+
+#endif /* __ISYS_STREAM2MMIO_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
new file mode 100644
index 000000000000..48d84bc0ad9e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -0,0 +1,224 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MATH_SUPPORT_H
+#define __MATH_SUPPORT_H
+
+#include "storage_class.h" /* for STORAGE_CLASS_INLINE */
+#if defined(__KERNEL__)
+#include <linux/kernel.h> /* Override the definition of max/min from linux kernel*/
+#endif /*__KERNEL__*/
+
+#if defined(_MSC_VER)
+#include <stdlib.h> /* Override the definition of max/min from stdlib.h*/
+#endif /* _MSC_VER */
+
+/* in case we have min/max/MIN/MAX macro's undefine them */
+#ifdef min
+#undef min
+#endif
+#ifdef max
+#undef max
+#endif
+#ifdef MIN /* also defined in include/hrt/numeric.h from SDK */
+#undef MIN
+#endif
+#ifdef MAX
+#undef MAX
+#endif
+#ifdef ABS
+#undef ABS
+#endif
+
+#define IS_ODD(a) ((a) & 0x1)
+#define IS_EVEN(a) (!IS_ODD(a))
+
+/* force a value to a lower even value */
+#define EVEN_FLOOR(x) ((x) & ~1)
+
+#ifdef ISP2401
+/* If the number is odd, find the next even number */
+#define EVEN_CEIL(x) ((IS_ODD(x)) ? ((x) + 1) : (x))
+
+#endif
+/* A => B */
+#define IMPLIES(a, b) (!(a) || (b))
+
+#define ABS(a) ((a) >= 0 ? (a) : -(a))
+
+/* for preprocessor and array sizing use MIN and MAX
+ otherwise use min and max */
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#ifdef ISP2401
+#define ROUND_DIV(a, b) ((b) ? ((a) + ((b) >> 1)) / (b) : 0)
+#endif
+#define CEIL_DIV(a, b) ((b) ? ((a) + (b) - 1) / (b) : 0)
+#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b))
+#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1))
+#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1)>>(b))
+#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b))
+#ifdef ISP2401
+#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0)
+#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b))
+#endif
+
+
+/*To Find next power of 2 number from x */
+#define bit2(x) ((x) | ((x) >> 1))
+#define bit4(x) (bit2(x) | (bit2(x) >> 2))
+#define bit8(x) (bit4(x) | (bit4(x) >> 4))
+#define bit16(x) (bit8(x) | (bit8(x) >> 8))
+#define bit32(x) (bit16(x) | (bit16(x) >> 16))
+#define NEXT_POWER_OF_2(x) (bit32(x-1) + 1)
+
+
+/* min and max should not be macros as they will evaluate their arguments twice.
+ if you really need a macro (e.g. for CPP or for initializing an array)
+ use MIN() and MAX(), otherwise use min() and max().
+
+
+*/
+
+#if !defined(PIPE_GENERATION)
+
+#ifndef INLINE_MATH_SUPPORT_UTILS
+/*
+This macro versions are added back as we are mixing types in usage of inline.
+This causes corner cases of calculations to be incorrect due to conversions
+between signed and unsigned variables or overflows.
+Before the addition of the inline functions, max, min and ceil_div were macros
+and therefore adding them back.
+
+Leaving out the other math utility functions as they are newly added
+*/
+
+#define max(a, b) (MAX(a, b))
+#define min(a, b) (MIN(a, b))
+#define ceil_div(a, b) (CEIL_DIV(a, b))
+
+#else /* !defined(INLINE_MATH_SUPPORT_UTILS) */
+
+STORAGE_CLASS_INLINE int max(int a, int b)
+{
+ return MAX(a, b);
+}
+
+STORAGE_CLASS_INLINE int min(int a, int b)
+{
+ return MIN(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b)
+{
+ return CEIL_DIV(a, b);
+}
+#endif /* !defined(INLINE_MATH_SUPPORT_UTILS) */
+
+STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b)
+{
+ return MAX(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b)
+{
+ return MIN(a, b);
+}
+
+
+STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b)
+{
+ return CEIL_MUL(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b)
+{
+ return CEIL_MUL2(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b)
+{
+ return CEIL_SHIFT(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
+{
+ return CEIL_SHIFT_MUL(a, b);
+}
+
+#ifdef ISP2401
+STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, unsigned int b)
+{
+ return ROUND_HALF_DOWN_DIV(a, b);
+}
+
+STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned int b)
+{
+ return ROUND_HALF_DOWN_MUL(a, b);
+}
+#endif
+
+/** @brief Next Power of Two
+ *
+ * @param[in] unsigned number
+ *
+ * @return next power of two
+ *
+ * This function rounds input to the nearest power of 2 (2^x)
+ * towards infinity
+ *
+ * Input Range: 0 .. 2^(8*sizeof(int)-1)
+ *
+ * IF input is a power of 2
+ * out = in
+ * OTHERWISE
+ * out = 2^(ceil(log2(in))
+ *
+ */
+
+STORAGE_CLASS_INLINE unsigned int ceil_pow2(unsigned int a)
+{
+ if (a == 0) {
+ return 1;
+ }
+ /* IF input is already a power of two*/
+ else if ((!((a)&((a)-1)))) {
+ return a;
+ }
+ else {
+ unsigned int v = a;
+ v |= v>>1;
+ v |= v>>2;
+ v |= v>>4;
+ v |= v>>8;
+ v |= v>>16;
+ return (v+1);
+ }
+}
+
+#endif /* !defined(PIPE_GENERATION) */
+
+#if !defined(__ISP)
+/*
+ * For SP and ISP, SDK provides the definition of OP_std_modadd.
+ * We need it only for host
+ */
+#define OP_std_modadd(base, offset, size) ((base+offset)%(size))
+#endif /* !defined(__ISP) */
+
+#if !defined(__KERNEL__)
+#define clamp(a, min_val, max_val) MIN(MAX((a), (min_val)), (max_val))
+#endif /* !defined(__KERNEL__) */
+
+#endif /* __MATH_SUPPORT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h
new file mode 100644
index 000000000000..195c4a5bceeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_access/memory_access.h
@@ -0,0 +1,174 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015-2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MEMORY_ACCESS_H_INCLUDED__
+#define __MEMORY_ACCESS_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the public interface for virtual memory
+ * access functions. Access types are limited to
+ * those defined in <stdint.h>
+ *
+ * The address representation is private to the system
+ * and represented as "hrt_vaddress" rather than a
+ * pointer, as the memory allocation cannot be accessed
+ * by dereferencing but reaquires load and store access
+ * functions
+ *
+ * The page table selection or virtual memory context;
+ * The page table base index; Is implicit. This page
+ * table base index must be set by the implementation
+ * of the access function
+ *
+ * "store" is a transfer to the system
+ * "load" is a transfer from the system
+ *
+ * Allocation properties can be specified by setting
+ * attributes (see below) in case of multiple physical
+ * memories the memory ID is encoded on the attribute
+ *
+ * Allocations in the same physical memory, but in a
+ * different (set of) page tables can be shared through
+ * a page table information mapping function
+ */
+
+#include <type_support.h>
+#include "platform_support.h" /* for __func__ */
+
+/*
+ * User provided file that defines the (sub)system address types:
+ * - hrt_vaddress a type that can hold the (sub)system virtual address range
+ */
+#include "system_types.h"
+
+/*
+ * The MMU base address is a physical address, thus the same type is used
+ * as for the device base address
+ */
+#include "device_access.h"
+
+#include "hmm/hmm.h"
+
+/*!
+ * \brief
+ * Bit masks for specialised allocation functions
+ * the default is "uncached", "not contiguous",
+ * "not page aligned" and "not cleared"
+ *
+ * Forcing alignment (usually) returns a pointer
+ * at an alignment boundary that is offset from
+ * the allocated pointer. Without storing this
+ * pointer/offset, we cannot free it. The memory
+ * manager is responsible for the bookkeeping, e.g.
+ * the allocation function creates a sentinel
+ * within the allocation referencable from the
+ * returned pointer/address.
+ */
+#define MMGR_ATTRIBUTE_MASK 0x000f
+#define MMGR_ATTRIBUTE_CACHED 0x0001
+#define MMGR_ATTRIBUTE_CONTIGUOUS 0x0002
+#define MMGR_ATTRIBUTE_PAGEALIGN 0x0004
+#define MMGR_ATTRIBUTE_CLEARED 0x0008
+#define MMGR_ATTRIBUTE_UNUSED 0xfff0
+
+/* #define MMGR_ATTRIBUTE_DEFAULT (MMGR_ATTRIBUTE_CACHED) */
+#define MMGR_ATTRIBUTE_DEFAULT 0
+
+extern const hrt_vaddress mmgr_NULL;
+extern const hrt_vaddress mmgr_EXCEPTION;
+
+/*! Return the address of an allocation in memory
+
+ \param size[in] Size in bytes of the allocation
+ \param caller_func[in] Caller function name
+ \param caller_line[in] Caller function line number
+
+ \return vaddress
+ */
+extern hrt_vaddress mmgr_malloc(const size_t size);
+
+/*! Return the address of a zero initialised allocation in memory
+
+ \param N[in] Horizontal dimension of array
+ \param size[in] Vertical dimension of array Total size is N*size
+
+ \return vaddress
+ */
+extern hrt_vaddress mmgr_calloc(const size_t N, const size_t size);
+
+/*! Return the address of an allocation in memory
+
+ \param size[in] Size in bytes of the allocation
+ \param attribute[in] Bit vector specifying the properties
+ of the allocation including zero initialisation
+
+ \return vaddress
+ */
+
+extern hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attribute);
+
+/*! Return the address of a mapped existing allocation in memory
+
+ \param ptr[in] Pointer to an allocation in a different
+ virtual memory page table, but the same
+ physical memory
+ \param size[in] Size of the memory of the pointer
+ \param attribute[in] Bit vector specifying the properties
+ of the allocation
+ \param context Pointer of a context provided by
+ client/driver for additonal parameters
+ needed by the implementation
+ \Note
+ This interface is tentative, limited to the desired function
+ the actual interface may require furhter parameters
+
+ \return vaddress
+ */
+extern hrt_vaddress mmgr_mmap(
+ const void *ptr,
+ const size_t size,
+ uint16_t attribute,
+ void *context);
+
+/*! Zero initialise an allocation in memory
+
+ \param vaddr[in] Address of an allocation
+ \param size[in] Size in bytes of the area to be cleared
+
+ \return none
+ */
+extern void mmgr_clear(hrt_vaddress vaddr, const size_t size);
+
+/*! Read an array of bytes from a virtual memory address
+
+ \param vaddr[in] Address of an allocation
+ \param data[out] pointer to the destination array
+ \param size[in] number of bytes to read
+
+ \return none
+ */
+extern void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size);
+
+/*! Write an array of bytes to device registers or memory in the device
+
+ \param vaddr[in] Address of an allocation
+ \param data[in] pointer to the source array
+ \param size[in] number of bytes to write
+
+ \return none
+ */
+extern void mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size);
+
+#endif /* __MEMORY_ACCESS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h
new file mode 100644
index 000000000000..f3b7273fed1b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/memory_realloc.h
@@ -0,0 +1,38 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#ifndef __MEMORY_REALLOC_H_INCLUDED__
+#define __MEMORY_REALLOC_H_INCLUDED__
+
+/*!
+ * \brief
+ * Define the internal reallocation of private css memory
+ *
+ */
+
+#include <type_support.h>
+/*
+ * User provided file that defines the (sub)system address types:
+ * - hrt_vaddress a type that can hold the (sub)system virtual address range
+ */
+#include "system_types.h"
+#include "ia_css_err.h"
+
+bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err);
+
+#endif /*__MEMORY_REALLOC_H_INCLUDED__*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h
new file mode 100644
index 000000000000..38db1ecef3c8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/misc_support.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MISC_SUPPORT_H_INCLUDED__
+#define __MISC_SUPPORT_H_INCLUDED__
+
+/* suppress compiler warnings on unused variables */
+#ifndef NOT_USED
+#define NOT_USED(a) ((void)(a))
+#endif
+
+/* Calculate the total bytes for pow(2) byte alignment */
+#define tot_bytes_for_pow2_align(pow2, cur_bytes) ((cur_bytes + (pow2 - 1)) & ~(pow2 - 1))
+
+#endif /* __MISC_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
new file mode 100644
index 000000000000..1b2017b029f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MMU_DEVICE_H_INCLUDED__
+#define __MMU_DEVICE_H_INCLUDED__
+
+/* The file mmu.h already exists */
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the MMU device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "mmu_local.h"
+
+#ifndef __INLINE_MMU__
+#define STORAGE_CLASS_MMU_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_MMU_C
+#include "mmu_public.h"
+#else /* __INLINE_MMU__ */
+#define STORAGE_CLASS_MMU_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_MMU_C STORAGE_CLASS_INLINE
+#include "mmu_private.h"
+#endif /* __INLINE_MMU__ */
+
+#endif /* __MMU_DEVICE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
new file mode 100644
index 000000000000..565983aafa4d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
@@ -0,0 +1,330 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MPMATH_H_INCLUDED__
+#define __MPMATH_H_INCLUDED__
+
+#include "storage_class.h"
+
+#ifdef INLINE_MPMATH
+#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_INLINE_DATA
+#else /* INLINE_MPMATH */
+#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_EXTERN_DATA
+#endif /* INLINE_MPMATH */
+
+#include <type_support.h>
+
+/*
+ * Implementation limits
+ */
+#define MIN_BITDEPTH 1
+#define MAX_BITDEPTH 64
+
+#define ROUND_NEAREST_EVEN 0
+#define ROUND_NEAREST 1
+
+/*
+ * The MP types
+ *
+ * "vector lane data" is scalar. With "scalar data" for limited range shift and address values
+ */
+typedef unsigned long long mpudata_t; /* Type of reference MP scalar / vector lane data; unsigned */
+typedef long long mpsdata_t; /* Type of reference MP scalar / vector lane data; signed */
+typedef unsigned short spudata_t; /* Type of reference SP scalar / vector lane data; unsigned */
+typedef short spsdata_t; /* Type of reference SP scalar / vector lane data; signed */
+typedef unsigned short bitdepth_t;
+
+typedef enum {
+ mp_zero_ID,
+ mp_one_ID,
+ mp_mone_ID,
+ mp_smin_ID,
+ mp_smax_ID,
+ mp_umin_ID,
+ mp_umax_ID,
+ N_mp_const_ID
+} mp_const_ID_t;
+
+#ifdef ISP2401
+/* _isValidMpudata is for internal use by mpmath and bbb's.
+ * isValidMpudata is for external use by functions on top.
+ */
+#ifndef ENABLE_VALID_MP_DATA_CHECK
+#define _isValidMpsdata(data,bitdepth) (1)
+#define _isValidMpudata(data,bitdepth) (1)
+#else
+#define _isValidMpsdata(data,bitdepth) isValidMpsdata(data,bitdepth)
+#define _isValidMpudata(data,bitdepth) isValidMpsdata(data,bitdepth)
+
+#endif
+#endif
+STORAGE_CLASS_MPMATH_FUNC_H bool isValidMpsdata(
+ const mpsdata_t data,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H bool isValidMpudata(
+ const mpudata_t data,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_castd (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_casth (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_scasth (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qcastd (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qcasth (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qrcasth (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_abs (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_limit (
+ const mpsdata_t bnd_low,
+ const mpsdata_t in0,
+ const mpsdata_t bnd_high,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_max (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_min (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_mux (
+ const spudata_t sel,
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_rmux (
+ const spudata_t sel,
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_add (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_sadd (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_sub (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_ssub (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_addasr1 (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_subasr1 (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_lsr (
+ const mpsdata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_asr (
+ const mpsdata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_rasr (
+ const mpsdata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+/* "mp_rasr_u()" is implemented by "mp_rasr()" */
+STORAGE_CLASS_MPMATH_FUNC_H mpudata_t mp_rasr_u (
+ const mpudata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_lsl (
+ const mpsdata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_asl (
+ const mpsdata_t in0,
+ const spsdata_t shft,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_muld (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_mul (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qmul (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qrmul (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qdiv (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_qdivh (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_div (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_divh (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_and (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_compl (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_or (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_xor (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isEQ (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isNE (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isGT (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isGE (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isLT (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isLE (
+ const mpsdata_t in0,
+ const mpsdata_t in1,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isEQZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isNEZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isGTZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isGEZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isLTZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H spudata_t mp_isLEZ (
+ const mpsdata_t in0,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpsdata_t mp_const (
+ const mp_const_ID_t ID,
+ const bitdepth_t bitdepth);
+
+STORAGE_CLASS_MPMATH_FUNC_H mpudata_t mp_sqrt_u(
+ const mpudata_t in0,
+ const bitdepth_t bitdepth);
+
+#ifndef INLINE_MPMATH
+#define STORAGE_CLASS_MPMATH_FUNC_C
+#define STORAGE_CLASS_MPMATH_DATA_C const
+#else /* INLINE_MPMATH */
+#define STORAGE_CLASS_MPMATH_FUNC_C STORAGE_CLASS_MPMATH_FUNC_H
+#define STORAGE_CLASS_MPMATH_DATA_C STORAGE_CLASS_MPMATH_DATA_H
+#include "mpmath.c"
+#define MPMATH_INLINED
+#endif /* INLINE_MPMATH */
+
+#endif /* __MPMATH_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
new file mode 100644
index 000000000000..6e48ea9afc29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __OSYS_H_INCLUDED__
+#define __OSYS_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the OSYS device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "osys_local.h"
+
+#ifndef __INLINE_OSYS__
+#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_OSYS_C
+#include "osys_public.h"
+#else /* __INLINE_OSYS__ */
+#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_OSYS_C STORAGE_CLASS_INLINE
+#include "osys_private.h"
+#endif /* __INLINE_OSYS__ */
+
+#endif /* __OSYS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
new file mode 100644
index 000000000000..67f7f3a14231
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PIXELGEN_H_INCLUDED__
+#define __PIXELGEN_H_INCLUDED__
+
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces &
+ * inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "pixelgen_local.h"
+
+#ifndef __INLINE_PIXELGEN__
+#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_PIXELGEN_C
+#include "pixelgen_public.h"
+#else /* __INLINE_PIXELGEN__ */
+#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_PIXELGEN_C STORAGE_CLASS_INLINE
+#include "pixelgen_private.h"
+#endif /* __INLINE_PIXELGEN__ */
+
+#endif /* __PIXELGEN_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
new file mode 100644
index 000000000000..02f9eee67ff3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PLATFORM_SUPPORT_H_INCLUDED__
+#define __PLATFORM_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific includes and functionality.
+*/
+
+#include "storage_class.h"
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* For definition of hrt_sleep() */
+#include "hive_isp_css_custom_host_hrt.h"
+
+#define UINT16_MAX USHRT_MAX
+#define UINT32_MAX UINT_MAX
+#define UCHAR_MAX (255)
+
+#define CSS_ALIGN(d, a) d __attribute__((aligned(a)))
+
+/*
+ * Put here everything __KERNEL__ specific not covered in
+ * "assert_support.h", "math_support.h", etc
+ */
+
+#endif /* __PLATFORM_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
new file mode 100644
index 000000000000..cfbc222ea0c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PRINT_SUPPORT_H_INCLUDED__
+#define __PRINT_SUPPORT_H_INCLUDED__
+
+#include "storage_class.h"
+
+#include <stdarg.h>
+#if !defined(__KERNEL__)
+#include <stdio.h>
+#endif
+
+extern int (*sh_css_printf) (const char *fmt, va_list args);
+/* depends on host supplied print function in ia_css_init() */
+STORAGE_CLASS_INLINE void ia_css_print(const char *fmt, ...)
+{
+ va_list ap;
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+/* Start adding support for bxt tracing functions for poc. From
+ * bxt_sandbox/support/print_support.h. */
+/* TODO: support these macros in userspace. */
+#define PWARN(format, ...) ia_css_print("warning: ", ##__VA_ARGS__)
+#define PRINT(format, ...) ia_css_print(format, ##__VA_ARGS__)
+#define PERROR(format, ...) ia_css_print("error: " format, ##__VA_ARGS__)
+#define PDEBUG(format, ...) ia_css_print("debug: " format, ##__VA_ARGS__)
+
+#endif /* __PRINT_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
new file mode 100644
index 000000000000..a3d874b9516a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_H_INCLUDED__
+#define __QUEUE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include <storage_class.h>
+
+#include "queue_local.h"
+
+#ifndef __INLINE_QUEUE__
+#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_QUEUE_C
+/* #include "queue_public.h" */
+#include "ia_css_queue.h"
+#else /* __INLINE_QUEUE__ */
+#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_QUEUE_C STORAGE_CLASS_INLINE
+#include "queue_private.h"
+#endif /* __INLINE_QUEUE__ */
+
+#endif /* __QUEUE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
new file mode 100644
index 000000000000..82c55acd0380
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __RESOURCE_H_INCLUDED__
+#define __RESOURCE_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses a RESOURCE manager. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "resource_local.h"
+
+#ifndef __INLINE_RESOURCE__
+#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RESOURCE_C
+#include "resource_public.h"
+#else /* __INLINE_RESOURCE__ */
+#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RESOURCE_C STORAGE_CLASS_INLINE
+#include "resource_private.h"
+#endif /* __INLINE_RESOURCE__ */
+
+#endif /* __RESOURCE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
new file mode 100644
index 000000000000..c34c2e75c51f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOCKET_H_INCLUDED__
+#define __SOCKET_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "socket_local.h"
+
+#ifndef __INLINE_SOCKET__
+#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SOCKET_C
+#include "socket_public.h"
+#else /* __INLINE_SOCKET__ */
+#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SOCKET_C STORAGE_CLASS_INLINE
+#include "socket_private.h"
+#endif /* __INLINE_SOCKET__ */
+
+#endif /* __SOCKET_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
new file mode 100644
index 000000000000..150fc2f6129b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SP_H_INCLUDED__
+#define __SP_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the SP cell. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "sp_local.h"
+
+#ifndef __INLINE_SP__
+#define STORAGE_CLASS_SP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SP_C
+#include "sp_public.h"
+#else /* __INLINE_SP__ */
+#define STORAGE_CLASS_SP_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SP_C STORAGE_CLASS_INLINE
+#include "sp_private.h"
+#endif /* __INLINE_SP__ */
+
+#endif /* __SP_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
new file mode 100644
index 000000000000..3908e668dacd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __STORAGE_CLASS_H_INCLUDED__
+#define __STORAGE_CLASS_H_INCLUDED__
+
+/**
+* @file
+* Platform specific includes and functionality.
+*/
+
+#define STORAGE_CLASS_EXTERN extern
+
+#if defined(_MSC_VER)
+#define STORAGE_CLASS_INLINE static __inline
+#else
+#define STORAGE_CLASS_INLINE static inline
+#endif
+
+#define STORAGE_CLASS_EXTERN_DATA extern const
+#define STORAGE_CLASS_INLINE_DATA static const
+
+#endif /* __STORAGE_CLASS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
new file mode 100644
index 000000000000..8e41f60b5d39
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __STREAM_BUFFER_H_INCLUDED__
+#define __STREAM_BUFFER_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the DMA device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "stream_buffer_local.h"
+
+#ifndef __INLINE_STREAM_BUFFER__
+#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM_BUFFER_C
+#include "stream_buffer_public.h"
+#else /* __INLINE_STREAM_BUFFER__ */
+#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM_BUFFER_C STORAGE_CLASS_INLINE
+#include "stream_buffer_private.h"
+#endif /* __INLINE_STREAM_BUFFER__ */
+
+#endif /* __STREAM_BUFFER_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
new file mode 100644
index 000000000000..568631698a3d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -0,0 +1,167 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __STRING_SUPPORT_H_INCLUDED__
+#define __STRING_SUPPORT_H_INCLUDED__
+#include <platform_support.h>
+#include <type_support.h>
+#include <storage_class.h>
+
+#if !defined(_MSC_VER)
+/*
+ * For all non microsoft cases, we need the following functions
+ */
+
+
+/** @brief Copy from src_buf to dest_buf.
+ *
+ * @param[out] dest_buf. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_buf. The source buffer
+ * @param[in] src_size. The size of the source buffer in bytes
+ * @return 0 on success, error code on failure
+ * @return EINVAL on Invalid arguments
+ * @return ERANGE on Destination size too small
+ */
+STORAGE_CLASS_INLINE int memcpy_s(
+ void* dest_buf,
+ size_t dest_size,
+ const void* src_buf,
+ size_t src_size)
+{
+ if ((src_buf == NULL) || (dest_buf == NULL)) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((dest_size < src_size) || (src_size == 0)) {
+ /* Destination too small*/
+ return ERANGE;
+ }
+
+ memcpy(dest_buf, src_buf, src_size);
+ return 0;
+}
+
+/** @brief Get the length of the string, excluding the null terminator
+ *
+ * @param[in] src_str. The source string
+ * @param[in] max_len. Look only for max_len bytes in the string
+ * @return Return the string length excluding null character
+ * @return Return max_len if no null character in the first max_len bytes
+ * @return Returns 0 if src_str is NULL
+ */
+static size_t strnlen_s(
+ const char* src_str,
+ size_t max_len)
+{
+ size_t ix;
+ if (src_str == NULL) {
+ /* Invalid arguments*/
+ return 0;
+ }
+
+ for (ix=0;
+ ((src_str[ix] != '\0') && (ix< max_len));
+ ++ix) /*Nothing else to do*/;
+
+ /* On Error, it will return src_size == max_len*/
+ return ix;
+}
+
+/** @brief Copy string from src_str to dest_str
+ *
+ * @param[out] dest_str. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_str. The source buffer
+ * @param[in] src_size. The size of the source buffer in bytes
+ * @return Returns 0 on success
+ * @return Returns EINVAL on invalid arguments
+ * @return Returns ERANGE on destination size too small
+ */
+STORAGE_CLASS_INLINE int strncpy_s(
+ char* dest_str,
+ size_t dest_size,
+ const char* src_str,
+ size_t src_size)
+{
+ size_t len;
+ if (dest_str == NULL) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((src_str == NULL) || (dest_size == 0)) {
+ /* Invalid arguments*/
+ dest_str[0] = '\0';
+ return EINVAL;
+ }
+
+ len = strnlen_s(src_str, src_size);
+
+ if (len >= dest_size) {
+ /* Destination too small*/
+ dest_str[0] = '\0';
+ return ERANGE;
+ }
+
+ /* dest_str is big enough for the len */
+ strncpy(dest_str, src_str, len);
+ dest_str[len+1] = '\0';
+ return 0;
+}
+
+/** @brief Copy string from src_str to dest_str
+ *
+ * @param[out] dest_str. Destination buffer to copy to
+ * @param[in] dest_size. The size of the destination buffer in bytes
+ * @param[in] src_str. The source buffer
+ * @return Returns 0 on success
+ * @return Returns EINVAL on invalid arguments
+ * @return Returns ERANGE on destination size too small
+ */
+STORAGE_CLASS_INLINE int strcpy_s(
+ char* dest_str,
+ size_t dest_size,
+ const char* src_str)
+{
+ size_t len;
+ if (dest_str == NULL) {
+ /* Invalid arguments*/
+ return EINVAL;
+ }
+
+ if ((src_str == NULL) || (dest_size == 0)) {
+ /* Invalid arguments*/
+ dest_str[0] = '\0';
+ return EINVAL;
+ }
+
+ len = strnlen_s(src_str, dest_size);
+
+ if (len >= dest_size) {
+ /* Destination too small*/
+ dest_str[0] = '\0';
+ return ERANGE;
+ }
+
+ /* dest_str is big enough for the len */
+ strncpy(dest_str, src_str, len);
+ dest_str[len+1] = '\0';
+ return 0;
+}
+
+#endif /*!defined(_MSC_VER)*/
+
+#endif /* __STRING_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h
new file mode 100644
index 000000000000..a8c19cee17da
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/system_types.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __SYSTEM_TYPES_H_INCLUDED__
+#define __SYSTEM_TYPES_H_INCLUDED__
+
+/**
+* @file
+* Platform specific types.
+*/
+
+
+#include "system_local.h"
+
+#endif /* __SYSTEM_TYPES_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
new file mode 100644
index 000000000000..7385fd11c95f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_H_INCLUDED__
+#define __TAG_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and is system agnostic
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: cell specific interfaces
+ * - private: cell specific inline implementations
+ * - global: inter cell constants and identifiers
+ * - local: cell specific constants and identifiers
+ *
+ */
+
+#include "storage_class.h"
+
+#include "tag_local.h"
+
+#ifndef __INLINE_TAG__
+#define STORAGE_CLASS_TAG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TAG_C
+#include "tag_public.h"
+#else /* __INLINE_TAG__ */
+#define STORAGE_CLASS_TAG_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TAG_C STORAGE_CLASS_INLINE
+#include "tag_private.h"
+#endif /* __INLINE_TAG__ */
+
+#endif /* __TAG_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
new file mode 100644
index 000000000000..ed13451c9261
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TIMED_CTRL_H_INCLUDED__
+#define __TIMED_CTRL_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the input system device(s). It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "timed_ctrl_local.h"
+
+#ifndef __INLINE_TIMED_CTRL__
+#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TIMED_CTRL_C
+#include "timed_ctrl_public.h"
+#else /* __INLINE_TIMED_CTRL__ */
+#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TIMED_CTRL_C STORAGE_CLASS_INLINE
+#include "timed_ctrl_private.h"
+#endif /* __INLINE_TIMED_CTRL__ */
+
+#endif /* __TIMED_CTRL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
new file mode 100644
index 000000000000..b82fa3eba79f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
@@ -0,0 +1,82 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TYPE_SUPPORT_H_INCLUDED__
+#define __TYPE_SUPPORT_H_INCLUDED__
+
+/**
+* @file
+* Platform specific types.
+*
+* Per the DLI spec, types are in "type_support.h" and
+* "platform_support.h" is for unclassified/to be refactored
+* platform specific definitions.
+*/
+
+#define IA_CSS_UINT8_T_BITS 8
+#define IA_CSS_UINT16_T_BITS 16
+#define IA_CSS_UINT32_T_BITS 32
+#define IA_CSS_INT32_T_BITS 32
+#define IA_CSS_UINT64_T_BITS 64
+
+#if defined(_MSC_VER)
+#include <stdint.h>
+/* For ATE compilation define the bool */
+#if defined(_ATE_)
+#define bool int
+#define true 1
+#define false 0
+#else
+#include <stdbool.h>
+#endif
+#include <stddef.h>
+#include <limits.h>
+#include <errno.h>
+#if defined(_M_X64)
+#define HOST_ADDRESS(x) (unsigned long long)(x)
+#else
+#define HOST_ADDRESS(x) (unsigned long)(x)
+#endif
+
+#elif defined(__KERNEL__)
+
+#define CHAR_BIT (8)
+
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/errno.h>
+#define HOST_ADDRESS(x) (unsigned long)(x)
+
+#elif defined(__GNUC__)
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <limits.h>
+#include <errno.h>
+#define HOST_ADDRESS(x) (unsigned long)(x)
+
+#else /* default is for the FIST environment */
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <limits.h>
+#include <errno.h>
+#define HOST_ADDRESS(x) (unsigned long)(x)
+
+#endif
+
+#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
new file mode 100644
index 000000000000..acf932e1f563
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VAMEM_H_INCLUDED__
+#define __VAMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VAMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "vamem_local.h"
+
+#ifndef __INLINE_VAMEM__
+#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VAMEM_C
+#include "vamem_public.h"
+#else /* __INLINE_VAMEM__ */
+#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VAMEM_C STORAGE_CLASS_INLINE
+#include "vamem_private.h"
+#endif /* __INLINE_VAMEM__ */
+
+#endif /* __VAMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
new file mode 100644
index 000000000000..5d3be31759e4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VECTOR_FUNC_H_INCLUDED__
+#define __VECTOR_FUNC_H_INCLUDED__
+
+#include "storage_class.h"
+
+/* TODO: Later filters will be moved to types directory,
+ * and we should only include matrix_MxN types */
+#include "filters/filters_1.0/filter_2x2.h"
+#include "filters/filters_1.0/filter_3x3.h"
+#include "filters/filters_1.0/filter_4x4.h"
+#include "filters/filters_1.0/filter_5x5.h"
+
+#include "vector_func_local.h"
+
+#ifndef __INLINE_VECTOR_FUNC__
+#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_FUNC_C
+#include "vector_func_public.h"
+#else /* __INLINE_VECTOR_FUNC__ */
+#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_FUNC_C STORAGE_CLASS_INLINE
+#include "vector_func_private.h"
+#endif /* __INLINE_VECTOR_FUNC__ */
+
+#endif /* __VECTOR_FUNC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
new file mode 100644
index 000000000000..261f87378ce5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VECTOR_OPS_H_INCLUDED__
+#define __VECTOR_OPS_H_INCLUDED__
+
+#include "storage_class.h"
+
+#include "vector_ops_local.h"
+
+#ifndef __INLINE_VECTOR_OPS__
+#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_OPS_C
+#include "vector_ops_public.h"
+#else /* __INLINE_VECTOR_OPS__ */
+#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_OPS_C STORAGE_CLASS_INLINE
+#include "vector_ops_private.h"
+#endif /* __INLINE_VECTOR_OPS__ */
+
+#endif /* __VECTOR_OPS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
new file mode 100644
index 000000000000..79a36755bfd9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __VMEM_H_INCLUDED__
+#define __VMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the VMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "vmem_local.h"
+
+#ifndef __INLINE_VMEM__
+#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VMEM_C
+#include "vmem_public.h"
+#else /* __INLINE_VMEM__ */
+#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VMEM_C STORAGE_CLASS_INLINE
+#include "vmem_private.h"
+#endif /* __INLINE_VMEM__ */
+
+#endif /* __VMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
new file mode 100644
index 000000000000..9169e04f9b4b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __XMEM_H_INCLUDED__
+#define __XMEM_H_INCLUDED__
+
+/*
+ * This file is included on every cell {SP,ISP,host} and on every system
+ * that uses the XMEM device. It defines the API to DLI bridge
+ *
+ * System and cell specific interfaces and inline code are included
+ * conditionally through Makefile path settings.
+ *
+ * - . system and cell agnostic interfaces, constants and identifiers
+ * - public: system agnostic, cell specific interfaces
+ * - private: system dependent, cell specific interfaces & inline implementations
+ * - global: system specific constants and identifiers
+ * - local: system and cell specific constants and identifiers
+ */
+
+#include "storage_class.h"
+
+#include "system_local.h"
+#include "xmem_local.h"
+
+#ifndef __INLINE_XMEM__
+#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_XMEM_C
+#include "xmem_public.h"
+#else /* __INLINE_XMEM__ */
+#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_XMEM_C STORAGE_CLASS_INLINE
+#include "xmem_private.h"
+#endif /* __INLINE_XMEM__ */
+
+#endif /* __XMEM_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h
new file mode 100644
index 000000000000..9f4060319b4b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_local.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_LOCAL_H_INCLUDED__
+#define __QUEUE_LOCAL_H_INCLUDED__
+
+#include "queue_global.h"
+
+#endif /* __QUEUE_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h
new file mode 100644
index 000000000000..2b396955cdad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/queue_private.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_PRIVATE_H_INCLUDED__
+#define __QUEUE_PRIVATE_H_INCLUDED__
+
+#endif /* __QUEUE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
new file mode 100644
index 000000000000..9aa8c168a803
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag.c
@@ -0,0 +1,95 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "tag.h"
+#include <platform_support.h> /* NULL */
+#include <assert_support.h>
+#include "tag_local.h"
+
+/**
+ * @brief Creates the tag description from the given parameters.
+ * @param[in] num_captures
+ * @param[in] skip
+ * @param[in] offset
+ * @param[out] tag_descr
+ */
+void
+sh_css_create_tag_descr(int num_captures,
+ unsigned int skip,
+ int offset,
+ unsigned int exp_id,
+ struct sh_css_tag_descr *tag_descr)
+{
+ assert(tag_descr != NULL);
+
+ tag_descr->num_captures = num_captures;
+ tag_descr->skip = skip;
+ tag_descr->offset = offset;
+ tag_descr->exp_id = exp_id;
+}
+
+/**
+ * @brief Encodes the members of tag description into a 32-bit value.
+ * @param[in] tag Pointer to the tag description
+ * @return (unsigned int) Encoded 32-bit tag-info
+ */
+unsigned int
+sh_css_encode_tag_descr(struct sh_css_tag_descr *tag)
+{
+ int num_captures;
+ unsigned int num_captures_sign;
+ unsigned int skip;
+ int offset;
+ unsigned int offset_sign;
+ unsigned int exp_id;
+ unsigned int encoded_tag;
+
+ assert(tag != NULL);
+
+ if (tag->num_captures < 0) {
+ num_captures = -tag->num_captures;
+ num_captures_sign = 1;
+ } else {
+ num_captures = tag->num_captures;
+ num_captures_sign = 0;
+ }
+ skip = tag->skip;
+ if (tag->offset < 0) {
+ offset = -tag->offset;
+ offset_sign = 1;
+ } else {
+ offset = tag->offset;
+ offset_sign = 0;
+ }
+ exp_id = tag->exp_id;
+
+ if (exp_id != 0)
+ {
+ /* we encode either an exp_id or capture data */
+ assert((num_captures == 0) && (skip == 0) && (offset == 0));
+
+ encoded_tag = TAG_EXP | (exp_id & 0xFF) << TAG_EXP_ID_SHIFT;
+ }
+ else
+ {
+ encoded_tag = TAG_CAP
+ | ((num_captures_sign & 0x00000001) << TAG_NUM_CAPTURES_SIGN_SHIFT)
+ | ((offset_sign & 0x00000001) << TAG_OFFSET_SIGN_SHIFT)
+ | ((num_captures & 0x000000FF) << TAG_NUM_CAPTURES_SHIFT)
+ | ((skip & 0x000000FF) << TAG_OFFSET_SHIFT)
+ | ((offset & 0x000000FF) << TAG_SKIP_SHIFT);
+
+ }
+ return encoded_tag;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h
new file mode 100644
index 000000000000..01a8977c189e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_local.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_LOCAL_H_INCLUDED__
+#define __TAG_LOCAL_H_INCLUDED__
+
+#include "tag_global.h"
+
+#define SH_CSS_MINIMUM_TAG_ID (-1)
+
+#endif /* __TAG_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h
new file mode 100644
index 000000000000..0570a95ec5bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/host/tag_private.h
@@ -0,0 +1,18 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_PRIVATE_H_INCLUDED__
+#define __TAG_PRIVATE_H_INCLUDED__
+
+#endif /* __TAG_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h
new file mode 100644
index 000000000000..61330daab734
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/queue_global.h
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __QUEUE_GLOBAL_H_INCLUDED__
+#define __QUEUE_GLOBAL_H_INCLUDED__
+
+#endif /* __QUEUE_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/socket_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/socket_global.h
new file mode 100644
index 000000000000..2b7025e90250
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/socket_global.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOCKET_GLOBAL_H_INCLUDED__
+#define __SOCKET_GLOBAL_H_INCLUDED__
+
+#include "stream_buffer.h"
+
+/* define the socket port direction */
+typedef enum {
+ SOCKET_PORT_DIRECTION_NULL,
+ SOCKET_PORT_DIRECTION_IN,
+ SOCKET_PORT_DIRECTION_OUT
+} socket_port_direction_t;
+
+/* pointer to the port's callout function */
+typedef void (*socket_port_callout_fp)(void);
+typedef struct socket_port_s socket_port_t;
+typedef struct socket_s socket_t;
+
+/* data structure of the socket port */
+struct socket_port_s {
+ unsigned channel; /* the port entity */
+ socket_port_direction_t direction; /* the port direction */
+ socket_port_callout_fp callout; /* the port callout function */
+
+ socket_t *socket; /* point to the socket */
+
+ struct {
+ unsigned data;
+ } buf; /* the buffer at the port */
+};
+
+/* data structure of the socket */
+struct socket_s {
+ socket_port_t *in; /* the in-direction port */
+ socket_port_t *out; /* the out-direction port */
+ stream_buffer_t buf; /* the buffer between in-ports and out-ports */
+};
+
+#endif /* __SOCKET_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/stream_buffer_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/stream_buffer_global.h
new file mode 100644
index 000000000000..b9664b9608dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/stream_buffer_global.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __STREAM_BUFFER_GLOBAL_H_INCLUDED__
+#define __STREAM_BUFFER_GLOBAL_H_INCLUDED__
+
+typedef struct stream_buffer_s stream_buffer_t;
+struct stream_buffer_s {
+ unsigned base;
+ unsigned limit;
+ unsigned top;
+};
+
+#endif /* __STREAM_BUFFER_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h
new file mode 100644
index 000000000000..c0d2efadbbe3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/sw_event_global.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SW_EVENT_GLOBAL_H_INCLUDED__
+#define __SW_EVENT_GLOBAL_H_INCLUDED__
+
+#define MAX_NR_OF_PAYLOADS_PER_SW_EVENT 4
+
+enum ia_css_psys_sw_event {
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED, /* from host to SP */
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED, /* from SP to host */
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, /* from SP to host, one way only */
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER,
+ IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE /* for extension state change enable/disable */
+};
+
+enum ia_css_isys_sw_event {
+ IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED
+};
+
+#endif /* __SW_EVENT_GLOBAL_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h
new file mode 100644
index 000000000000..fda457792c9c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_shared/tag_global.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TAG_GLOBAL_H_INCLUDED__
+#define __TAG_GLOBAL_H_INCLUDED__
+
+/* offsets for encoding/decoding the tag into an uint32_t */
+
+#define TAG_CAP 1
+#define TAG_EXP 2
+
+#define TAG_NUM_CAPTURES_SIGN_SHIFT 6
+#define TAG_OFFSET_SIGN_SHIFT 7
+#define TAG_NUM_CAPTURES_SHIFT 8
+#define TAG_OFFSET_SHIFT 16
+#define TAG_SKIP_SHIFT 24
+
+#define TAG_EXP_ID_SHIFT 8
+
+/* Data structure containing the tagging information which is used in
+ * continuous mode to specify which frames should be captured.
+ * num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * exp_id Exposure id of the RAW frame to tag.
+ *
+ * NOTE: Either exp_id = 0 or all other fields are 0
+ * (so yeah, this could be a union)
+ */
+
+struct sh_css_tag_descr {
+ int num_captures;
+ unsigned int skip;
+ int offset;
+ unsigned int exp_id;
+};
+
+#endif /* __TAG_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
new file mode 100644
index 000000000000..2458b3767c90
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css.h
@@ -0,0 +1,57 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_H_
+#define _IA_CSS_H_
+
+/** @file
+ * This file is the starting point of the CSS-API. It includes all CSS-API
+ * header files.
+ */
+
+#include "ia_css_3a.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+#include "ia_css_control.h"
+#include "ia_css_device_access.h"
+#include "ia_css_dvs.h"
+#include "ia_css_env.h"
+#include "ia_css_err.h"
+#include "ia_css_event_public.h"
+#include "ia_css_firmware.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_irq.h"
+#include "ia_css_metadata.h"
+#include "ia_css_mipi.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_prbs.h"
+#include "ia_css_properties.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_tpg.h"
+#include "ia_css_version.h"
+#include "ia_css_mmu.h"
+#include "ia_css_morph.h"
+#include "ia_css_shading.h"
+#include "ia_css_timer.h"
+
+/*
+ Please do not add code to this file. Public functionality is to be
+ exposed in a function/data type specific header file.
+ Please add to the appropriate header file or create a new one.
+ */
+
+#endif /* _IA_CSS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
new file mode 100644
index 000000000000..a80a7dbaf712
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_3a.h
@@ -0,0 +1,188 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_3A_H
+#define __IA_CSS_3A_H
+
+/** @file
+ * This file contains types used for 3A statistics
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "system_global.h"
+
+enum ia_css_3a_tables {
+ IA_CSS_S3A_TBL_HI,
+ IA_CSS_S3A_TBL_LO,
+ IA_CSS_RGBY_TBL,
+ IA_CSS_NUM_3A_TABLES
+};
+
+/** Structure that holds 3A statistics in the ISP internal
+ * format. Use ia_css_get_3a_statistics() to translate
+ * this to the format used on the host (3A library).
+ * */
+struct ia_css_isp_3a_statistics {
+ union {
+ struct {
+ ia_css_ptr s3a_tbl;
+ } dmem;
+ struct {
+ ia_css_ptr s3a_tbl_hi;
+ ia_css_ptr s3a_tbl_lo;
+ } vmem;
+ } data;
+ struct {
+ ia_css_ptr rgby_tbl;
+ } data_hmem;
+ uint32_t exp_id; /**< exposure id, to match statistics to a frame,
+ see ia_css_event_public.h for more detail. */
+ uint32_t isp_config_id;/**< Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr data_ptr; /**< pointer to base of all data */
+ uint32_t size; /**< total size of all data */
+ uint32_t dmem_size;
+ uint32_t vmem_size; /**< both lo and hi have this size */
+ uint32_t hmem_size;
+};
+#define SIZE_OF_DMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_VMEM_STRUCT \
+ (2 * SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_DATA_UNION \
+ (MAX(SIZE_OF_DMEM_STRUCT, SIZE_OF_VMEM_STRUCT))
+
+#define SIZE_OF_DATA_HMEM_STRUCT \
+ (SIZE_OF_IA_CSS_PTR)
+
+#define SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT \
+ (SIZE_OF_DATA_UNION + \
+ SIZE_OF_DATA_HMEM_STRUCT + \
+ sizeof(uint32_t) + \
+ sizeof(uint32_t) + \
+ SIZE_OF_IA_CSS_PTR + \
+ 4 * sizeof(uint32_t))
+
+/** Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocated contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_3a_statistics_map {
+ void *data_ptr; /**< Pointer to start of memory */
+ struct ia_css_3a_output *dmem_stats;
+ uint16_t *vmem_stats_hi;
+ uint16_t *vmem_stats_lo;
+ struct ia_css_bh_table *hmem_stats;
+ uint32_t size; /**< total size in bytes of data_ptr */
+ uint32_t data_allocated; /**< indicate whether data_ptr
+ was allocated or not. */
+};
+
+/** @brief Copy and translate 3A statistics from an ISP buffer to a host buffer
+ * @param[out] host_stats Host buffer.
+ * @param[in] isp_stats ISP buffer.
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This copies 3a statistics from an ISP pointer to a host pointer and then
+ * translates some of the statistics, details depend on which ISP binary is
+ * used.
+ * Always use this function, never copy the buffer directly.
+ */
+enum ia_css_err
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats);
+
+/** @brief Translate 3A statistics from ISP format to host format.
+ * @param[out] host_stats host-format statistics
+ * @param[in] isp_stats ISP-format statistics
+ * @return None
+ *
+ * This function translates statistics from the internal ISP-format to
+ * the host-format. This function does not include an additional copy
+ * step.
+ * */
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats);
+
+/* Convenience functions for alloc/free of certain datatypes */
+
+/** @brief Allocate memory for the 3a statistics on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the ISP
+*/
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/** @brief Free the 3a statistics memory on the isp
+ * @param[in] me Pointer to the 3a statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me);
+
+/** @brief Allocate memory for the 3a statistics on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated 3a statistics buffer on the host
+*/
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid);
+
+/** @brief Free the 3a statistics memory on the host
+ * @param[in] me Pointer to the 3a statistics buffer on the host.
+ * @return None
+ */
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me);
+
+/** @brief Allocate a 3a statistics map structure
+ * @param[in] isp_stats pointer to ISP 3a statistis struct
+ * @param[in] data_ptr host-side pointer to ISP 3a statistics.
+ * @return Pointer to the allocated 3a statistics map
+ *
+ * This function allocates the ISP 3a statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the 3a statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_3a_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr);
+
+/** @brief Free the 3a statistics map
+ * @param[in] me Pointer to the 3a statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_3a_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me);
+
+#endif /* __IA_CSS_3A_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
new file mode 100644
index 000000000000..a2a1873aca83
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_acc_types.h
@@ -0,0 +1,468 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_ACC_TYPES_H
+#define _IA_CSS_ACC_TYPES_H
+
+/** @file
+ * This file contains types used for acceleration
+ */
+
+#include <system_types.h> /* HAS_IRQ_MAP_VERSION_# */
+#include <type_support.h>
+#include <platform_support.h>
+#include <debug_global.h>
+
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* Types for the acceleration API.
+ * These should be moved to sh_css_internal.h once the old acceleration
+ * argument handling has been completed.
+ * After that, interpretation of these structures is no longer needed
+ * in the kernel and HAL.
+*/
+
+/** Type of acceleration.
+ */
+enum ia_css_acc_type {
+ IA_CSS_ACC_NONE, /**< Normal binary */
+ IA_CSS_ACC_OUTPUT, /**< Accelerator stage on output frame */
+ IA_CSS_ACC_VIEWFINDER, /**< Accelerator stage on viewfinder frame */
+ IA_CSS_ACC_STANDALONE, /**< Stand-alone acceleration */
+};
+
+/** Cells types
+ */
+enum ia_css_cell_type {
+ IA_CSS_SP0 = 0,
+ IA_CSS_SP1,
+ IA_CSS_ISP,
+ MAX_NUM_OF_CELLS
+};
+
+/** Firmware types.
+ */
+enum ia_css_fw_type {
+ ia_css_sp_firmware, /**< Firmware for the SP */
+ ia_css_isp_firmware, /**< Firmware for the ISP */
+ ia_css_bootloader_firmware, /**< Firmware for the BootLoader */
+ ia_css_acc_firmware /**< Firmware for accelrations */
+};
+
+struct ia_css_blob_descr;
+
+/** Blob descriptor.
+ * This structure describes an SP or ISP blob.
+ * It describes the test, data and bss sections as well as position in a
+ * firmware file.
+ * For convenience, it contains dynamic data after loading.
+ */
+struct ia_css_blob_info {
+ /**< Static blob data */
+ uint32_t offset; /**< Blob offset in fw file */
+ struct ia_css_isp_param_memory_offsets memory_offsets; /**< offset wrt hdr in bytes */
+ uint32_t prog_name_offset; /**< offset wrt hdr in bytes */
+ uint32_t size; /**< Size of blob */
+ uint32_t padding_size; /**< total cummulative of bytes added due to section alignment */
+ uint32_t icache_source; /**< Position of icache in blob */
+ uint32_t icache_size; /**< Size of icache section */
+ uint32_t icache_padding;/**< bytes added due to icache section alignment */
+ uint32_t text_source; /**< Position of text in blob */
+ uint32_t text_size; /**< Size of text section */
+ uint32_t text_padding; /**< bytes added due to text section alignment */
+ uint32_t data_source; /**< Position of data in blob */
+ uint32_t data_target; /**< Start of data in SP dmem */
+ uint32_t data_size; /**< Size of text section */
+ uint32_t data_padding; /**< bytes added due to data section alignment */
+ uint32_t bss_target; /**< Start position of bss in SP dmem */
+ uint32_t bss_size; /**< Size of bss section */
+ /**< Dynamic data filled by loader */
+ CSS_ALIGN(const void *code, 8); /**< Code section absolute pointer within fw, code = icache + text */
+ CSS_ALIGN(const void *data, 8); /**< Data section absolute pointer within fw, data = data + bss */
+};
+
+struct ia_css_binary_input_info {
+ uint32_t min_width;
+ uint32_t min_height;
+ uint32_t max_width;
+ uint32_t max_height;
+ uint32_t source; /* memory, sensor, variable */
+};
+
+struct ia_css_binary_output_info {
+ uint32_t min_width;
+ uint32_t min_height;
+ uint32_t max_width;
+ uint32_t max_height;
+ uint32_t num_chunks;
+ uint32_t variable_format;
+};
+
+struct ia_css_binary_internal_info {
+ uint32_t max_width;
+ uint32_t max_height;
+};
+
+struct ia_css_binary_bds_info {
+ uint32_t supported_bds_factors;
+};
+
+struct ia_css_binary_dvs_info {
+ uint32_t max_envelope_width;
+ uint32_t max_envelope_height;
+};
+
+struct ia_css_binary_vf_dec_info {
+ uint32_t is_variable;
+ uint32_t max_log_downscale;
+};
+
+struct ia_css_binary_s3a_info {
+ uint32_t s3atbl_use_dmem;
+ uint32_t fixed_s3a_deci_log;
+};
+
+/** DPC related binary info */
+struct ia_css_binary_dpc_info {
+ uint32_t bnr_lite; /**< bnr lite enable flag */
+};
+
+struct ia_css_binary_iterator_info {
+ uint32_t num_stripes;
+ uint32_t row_stripes_height;
+ uint32_t row_stripes_overlap_lines;
+};
+
+struct ia_css_binary_address_info {
+ uint32_t isp_addresses; /* Address in ISP dmem */
+ uint32_t main_entry; /* Address of entry fct */
+ uint32_t in_frame; /* Address in ISP dmem */
+ uint32_t out_frame; /* Address in ISP dmem */
+ uint32_t in_data; /* Address in ISP dmem */
+ uint32_t out_data; /* Address in ISP dmem */
+ uint32_t sh_dma_cmd_ptr; /* In ISP dmem */
+};
+
+struct ia_css_binary_uds_info {
+ uint16_t bpp;
+ uint16_t use_bci;
+ uint16_t use_str;
+ uint16_t woix;
+ uint16_t woiy;
+ uint16_t extra_out_vecs;
+ uint16_t vectors_per_line_in;
+ uint16_t vectors_per_line_out;
+ uint16_t vectors_c_per_line_in;
+ uint16_t vectors_c_per_line_out;
+ uint16_t vmem_gdc_in_block_height_y;
+ uint16_t vmem_gdc_in_block_height_c;
+ /* uint16_t padding; */
+};
+
+struct ia_css_binary_pipeline_info {
+ uint32_t mode;
+ uint32_t isp_pipe_version;
+ uint32_t pipelining;
+ uint32_t c_subsampling;
+ uint32_t top_cropping;
+ uint32_t left_cropping;
+ uint32_t variable_resolution;
+};
+
+struct ia_css_binary_block_info {
+ uint32_t block_width;
+ uint32_t block_height;
+ uint32_t output_block_height;
+};
+
+/** Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ * This part is to be used by the SP.
+ * Future refactoring should move binary properties to ia_css_binary_xinfo,
+ * thereby making the SP code more binary independent.
+ */
+struct ia_css_binary_info {
+ CSS_ALIGN(uint32_t id, 8); /* IA_CSS_BINARY_ID_* */
+ struct ia_css_binary_pipeline_info pipeline;
+ struct ia_css_binary_input_info input;
+ struct ia_css_binary_output_info output;
+ struct ia_css_binary_internal_info internal;
+ struct ia_css_binary_bds_info bds;
+ struct ia_css_binary_dvs_info dvs;
+ struct ia_css_binary_vf_dec_info vf_dec;
+ struct ia_css_binary_s3a_info s3a;
+ struct ia_css_binary_dpc_info dpc_bnr; /**< DPC related binary info */
+ struct ia_css_binary_iterator_info iterator;
+ struct ia_css_binary_address_info addresses;
+ struct ia_css_binary_uds_info uds;
+ struct ia_css_binary_block_info block;
+ struct ia_css_isp_param_isp_segments mem_initializers;
+/* MW: Packing (related) bools in an integer ?? */
+ struct {
+#ifdef ISP2401
+ uint8_t luma_only;
+ uint8_t input_yuv;
+ uint8_t input_raw;
+#endif
+ uint8_t reduced_pipe;
+ uint8_t vf_veceven;
+ uint8_t dis;
+ uint8_t dvs_envelope;
+ uint8_t uds;
+ uint8_t dvs_6axis;
+ uint8_t block_output;
+ uint8_t streaming_dma;
+ uint8_t ds;
+ uint8_t bayer_fir_6db;
+ uint8_t raw_binning;
+ uint8_t continuous;
+ uint8_t s3a;
+ uint8_t fpnr;
+ uint8_t sc;
+ uint8_t macc;
+ uint8_t output;
+ uint8_t ref_frame;
+ uint8_t tnr;
+ uint8_t xnr;
+ uint8_t params;
+ uint8_t ca_gdc;
+ uint8_t isp_addresses;
+ uint8_t in_frame;
+ uint8_t out_frame;
+ uint8_t high_speed;
+ uint8_t dpc;
+ uint8_t padding[2];
+ } enable;
+ struct {
+/* DMA channel ID: [0,...,HIVE_ISP_NUM_DMA_CHANNELS> */
+ uint8_t ref_y_channel;
+ uint8_t ref_c_channel;
+ uint8_t tnr_channel;
+ uint8_t tnr_out_channel;
+ uint8_t dvs_coords_channel;
+ uint8_t output_channel;
+ uint8_t c_channel;
+ uint8_t vfout_channel;
+ uint8_t vfout_c_channel;
+ uint8_t vfdec_bits_per_pixel;
+ uint8_t claimed_by_isp;
+ uint8_t padding[2];
+ } dma;
+};
+
+/** Structure describing an ISP binary.
+ * It describes the capabilities of a binary, like the maximum resolution,
+ * support features, dma channels, uds features, etc.
+ */
+struct ia_css_binary_xinfo {
+ /* Part that is of interest to the SP. */
+ struct ia_css_binary_info sp;
+
+ /* Rest of the binary info, only interesting to the host. */
+ enum ia_css_acc_type type;
+ CSS_ALIGN(int32_t num_output_formats, 8);
+ enum ia_css_frame_format output_formats[IA_CSS_FRAME_FORMAT_NUM];
+ CSS_ALIGN(int32_t num_vf_formats, 8); /**< number of supported vf formats */
+ enum ia_css_frame_format vf_formats[IA_CSS_FRAME_FORMAT_NUM]; /**< types of supported vf formats */
+ uint8_t num_output_pins;
+ ia_css_ptr xmem_addr;
+ CSS_ALIGN(const struct ia_css_blob_descr *blob, 8);
+ CSS_ALIGN(uint32_t blob_index, 8);
+ CSS_ALIGN(union ia_css_all_memory_offsets mem_offsets, 8);
+ CSS_ALIGN(struct ia_css_binary_xinfo *next, 8);
+};
+
+/** Structure describing the Bootloader (an ISP binary).
+ * It contains several address, either in ddr, isp_dmem or
+ * the entry function in icache.
+ */
+struct ia_css_bl_info {
+ uint32_t num_dma_cmds; /**< Number of cmds sent by CSS */
+ uint32_t dma_cmd_list; /**< Dma command list sent by CSS */
+ uint32_t sw_state; /**< Polled from css */
+ /* Entry functions */
+ uint32_t bl_entry; /**< The SP entry function */
+};
+
+/** Structure describing the SP binary.
+ * It contains several address, either in ddr, sp_dmem or
+ * the entry function in pmem.
+ */
+struct ia_css_sp_info {
+ uint32_t init_dmem_data; /**< data sect config, stored to dmem */
+ uint32_t per_frame_data; /**< Per frame data, stored to dmem */
+ uint32_t group; /**< Per pipeline data, loaded by dma */
+ uint32_t output; /**< SP output data, loaded by dmem */
+ uint32_t host_sp_queue; /**< Host <-> SP queues */
+ uint32_t host_sp_com;/**< Host <-> SP commands */
+ uint32_t isp_started; /**< Polled from sensor thread, csim only */
+ uint32_t sw_state; /**< Polled from css */
+ uint32_t host_sp_queues_initialized; /**< Polled from the SP */
+ uint32_t sleep_mode; /**< different mode to halt SP */
+ uint32_t invalidate_tlb; /**< inform SP to invalidate mmu TLB */
+#ifndef ISP2401
+ uint32_t stop_copy_preview; /**< suspend copy and preview pipe when capture */
+#endif
+ uint32_t debug_buffer_ddr_address; /**< inform SP the address
+ of DDR debug queue */
+ uint32_t perf_counter_input_system_error; /**< input system perf
+ counter array */
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+ uint32_t debug_wait; /**< thread/pipe post mortem debug */
+ uint32_t debug_stage; /**< thread/pipe post mortem debug */
+ uint32_t debug_stripe; /**< thread/pipe post mortem debug */
+#endif
+ uint32_t threads_stack; /**< sp thread's stack pointers */
+ uint32_t threads_stack_size; /**< sp thread's stack sizes */
+ uint32_t curr_binary_id; /**< current binary id */
+ uint32_t raw_copy_line_count; /**< raw copy line counter */
+ uint32_t ddr_parameter_address; /**< acc param ddrptr, sp dmem */
+ uint32_t ddr_parameter_size; /**< acc param size, sp dmem */
+ /* Entry functions */
+ uint32_t sp_entry; /**< The SP entry function */
+ uint32_t tagger_frames_addr; /**< Base address of tagger state */
+};
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+#if !defined(__ISP)
+/** Accelerator firmware information.
+ */
+struct ia_css_acc_info {
+ uint32_t per_frame_data; /**< Dummy for now */
+};
+
+/** Firmware information.
+ */
+union ia_css_fw_union {
+ struct ia_css_binary_xinfo isp; /**< ISP info */
+ struct ia_css_sp_info sp; /**< SP info */
+ struct ia_css_bl_info bl; /**< Bootloader info */
+ struct ia_css_acc_info acc; /**< Accelerator info */
+};
+
+/** Firmware information.
+ */
+struct ia_css_fw_info {
+ size_t header_size; /**< size of fw header */
+ CSS_ALIGN(uint32_t type, 8);
+ union ia_css_fw_union info; /**< Binary info */
+ struct ia_css_blob_info blob; /**< Blob info */
+ /* Dynamic part */
+ struct ia_css_fw_info *next;
+ CSS_ALIGN(uint32_t loaded, 8); /**< Firmware has been loaded */
+ CSS_ALIGN(const uint8_t *isp_code, 8); /**< ISP pointer to code */
+ /**< Firmware handle between user space and kernel */
+ CSS_ALIGN(uint32_t handle, 8);
+ /**< Sections to copy from/to ISP */
+ struct ia_css_isp_param_css_segments mem_initializers;
+ /**< Initializer for local ISP memories */
+};
+
+struct ia_css_blob_descr {
+ const unsigned char *blob;
+ struct ia_css_fw_info header;
+ const char *name;
+ union ia_css_all_memory_offsets mem_offsets;
+};
+
+struct ia_css_acc_fw;
+
+/** Structure describing the SP binary of a stand-alone accelerator.
+ */
+struct ia_css_acc_sp {
+ void (*init)(struct ia_css_acc_fw *); /**< init for crun */
+ uint32_t sp_prog_name_offset; /**< program name offset wrt hdr in bytes */
+ uint32_t sp_blob_offset; /**< blob offset wrt hdr in bytes */
+ void *entry; /**< Address of sp entry point */
+ uint32_t *css_abort; /**< SP dmem abort flag */
+ void *isp_code; /**< SP dmem address holding xmem
+ address of isp code */
+ struct ia_css_fw_info fw; /**< SP fw descriptor */
+ const uint8_t *code; /**< ISP pointer of allocated SP code */
+};
+
+/** Acceleration firmware descriptor.
+ * This descriptor descibes either SP code (stand-alone), or
+ * ISP code (a separate pipeline stage).
+ */
+struct ia_css_acc_fw_hdr {
+ enum ia_css_acc_type type; /**< Type of accelerator */
+ uint32_t isp_prog_name_offset; /**< program name offset wrt
+ header in bytes */
+ uint32_t isp_blob_offset; /**< blob offset wrt header
+ in bytes */
+ uint32_t isp_size; /**< Size of isp blob */
+ const uint8_t *isp_code; /**< ISP pointer to code */
+ struct ia_css_acc_sp sp; /**< Standalone sp code */
+ /**< Firmware handle between user space and kernel */
+ uint32_t handle;
+ struct ia_css_data parameters; /**< Current SP parameters */
+};
+
+/** Firmware structure.
+ * This contains the header and actual blobs.
+ * For standalone, it contains SP and ISP blob.
+ * For a pipeline stage accelerator, it contains ISP code only.
+ * Since its members are variable size, their offsets are described in the
+ * header and computed using the access macros below.
+ */
+struct ia_css_acc_fw {
+ struct ia_css_acc_fw_hdr header; /**< firmware header */
+ /*
+ int8_t isp_progname[]; **< ISP program name
+ int8_t sp_progname[]; **< SP program name, stand-alone only
+ uint8_t sp_code[]; **< SP blob, stand-alone only
+ uint8_t isp_code[]; **< ISP blob
+ */
+};
+
+/* Access macros for firmware */
+#define IA_CSS_ACC_OFFSET(t, f, n) ((t)((uint8_t *)(f)+(f->header.n)))
+#define IA_CSS_ACC_SP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ sp.sp_prog_name_offset)
+#define IA_CSS_ACC_ISP_PROG_NAME(f) IA_CSS_ACC_OFFSET(const char *, f, \
+ isp_prog_name_offset)
+#define IA_CSS_ACC_SP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t *, f, \
+ sp.sp_blob_offset)
+#define IA_CSS_ACC_SP_DATA(f) (IA_CSS_ACC_SP_CODE(f) + \
+ (f)->header.sp.fw.blob.data_source)
+#define IA_CSS_ACC_ISP_CODE(f) IA_CSS_ACC_OFFSET(uint8_t*, f,\
+ isp_blob_offset)
+#define IA_CSS_ACC_ISP_SIZE(f) ((f)->header.isp_size)
+
+/* Binary name follows header immediately */
+#define IA_CSS_EXT_ISP_PROG_NAME(f) ((const char *)(f)+(f)->blob.prog_name_offset)
+#define IA_CSS_EXT_ISP_MEM_OFFSETS(f) \
+ ((const struct ia_css_memory_offsets *)((const char *)(f)+(f)->blob.mem_offsets))
+
+#endif /* !defined(__ISP) */
+
+enum ia_css_sp_sleep_mode {
+ SP_DISABLE_SLEEP_MODE = 0,
+ SP_SLEEP_AFTER_FRAME = 1 << 0,
+ SP_SLEEP_AFTER_IRQ = 1 << 1
+};
+#endif /* _IA_CSS_ACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
new file mode 100644
index 000000000000..b2ecf3618c15
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_buffer.h
@@ -0,0 +1,84 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BUFFER_H
+#define __IA_CSS_BUFFER_H
+
+/** @file
+ * This file contains datastructures and types for buffers used in CSS
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_timer.h"
+
+/** Enumeration of buffer types. Buffers can be queued and de-queued
+ * to hand them over between IA and ISP.
+ */
+enum ia_css_buffer_type {
+ IA_CSS_BUFFER_TYPE_INVALID = -1,
+ IA_CSS_BUFFER_TYPE_3A_STATISTICS = 0,
+ IA_CSS_BUFFER_TYPE_DIS_STATISTICS,
+ IA_CSS_BUFFER_TYPE_LACE_STATISTICS,
+ IA_CSS_BUFFER_TYPE_INPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IA_CSS_BUFFER_TYPE_CUSTOM_INPUT,
+ IA_CSS_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IA_CSS_BUFFER_TYPE_METADATA,
+ IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IA_CSS_NUM_DYNAMIC_BUFFER_TYPE,
+ IA_CSS_NUM_BUFFER_TYPE
+};
+
+/* Driver API is not SP/ISP visible, 64 bit types not supported on hivecc */
+#if !defined(__ISP)
+/** Buffer structure. This is a container structure that enables content
+ * independent buffer queues and access functions.
+ */
+struct ia_css_buffer {
+ enum ia_css_buffer_type type; /**< Buffer type. */
+ unsigned int exp_id;
+ /**< exposure id for this buffer; 0 = not available
+ see ia_css_event_public.h for more detail. */
+ union {
+ struct ia_css_isp_3a_statistics *stats_3a; /**< 3A statistics & optionally RGBY statistics. */
+ struct ia_css_isp_dvs_statistics *stats_dvs; /**< DVS statistics. */
+ struct ia_css_isp_skc_dvs_statistics *stats_skc_dvs; /**< SKC DVS statistics. */
+ struct ia_css_frame *frame; /**< Frame buffer. */
+ struct ia_css_acc_param *custom_data; /**< Custom buffer. */
+ struct ia_css_metadata *metadata; /**< Sensor metadata. */
+ } data; /**< Buffer data pointer. */
+ uint64_t driver_cookie; /**< cookie for the driver */
+ struct ia_css_time_meas timing_data; /**< timing data (readings from the timer) */
+ struct ia_css_clock_tick isys_eof_clock_tick; /**< ISYS's end of frame timer tick*/
+};
+
+/** @brief Dequeue param buffers from sp2host_queue
+ *
+ * @return None
+ *
+ * This function must be called at every driver interrupt handler to prevent
+ * overflow of sp2host_queue.
+ */
+void
+ia_css_dequeue_param_buffers(void);
+
+#endif /* !__ISP */
+
+#endif /* __IA_CSS_BUFFER_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
new file mode 100644
index 000000000000..a15d3e368341
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_control.h
@@ -0,0 +1,157 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONTROL_H
+#define __IA_CSS_CONTROL_H
+
+/** @file
+ * This file contains functionality for starting and controlling CSS
+ */
+
+#include <type_support.h>
+#include <ia_css_env.h>
+#include <ia_css_firmware.h>
+#include <ia_css_irq.h>
+
+/** @brief Initialize the CSS API.
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing. May not be NULL.
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * if fw is NULL the firmware must be loaded before
+ * through a call of ia_css_load_firmware
+ * @param[in] l1_base Base index (isp2400)
+ * of the L1 page table. This is a physical
+ * address or index.
+ * @param[in] irq_type The type of interrupt to be used (edge or level)
+ * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any
+ * errors and IA_CSS_SUCCESS otherwise.
+ *
+ * This function initializes the API which includes allocating and initializing
+ * internal data structures. This also interprets the firmware package. All
+ * contents of this firmware package are copied into local data structures, so
+ * the fw pointer could be freed after this function completes.
+ */
+enum ia_css_err ia_css_init(
+ const struct ia_css_env *env,
+ const struct ia_css_fw *fw,
+ uint32_t l1_base,
+ enum ia_css_irq_type irq_type);
+
+/** @brief Un-initialize the CSS API.
+ * @return None
+ *
+ * This function deallocates all memory that has been allocated by the CSS API
+ * Exception: if you explicitly loaded firmware through ia_css_load_firmware
+ * you need to call ia_css_unload_firmware to deallocate the memory reserved
+ * for the firmware.
+ * After this function is called, no other CSS functions should be called
+ * with the exception of ia_css_init which will re-initialize the CSS code,
+ * ia_css_unload_firmware to unload the firmware or ia_css_load_firmware
+ * to load new firmware
+ */
+void
+ia_css_uninit(void);
+
+/** @brief Suspend CSS API for power down
+ * @return success or faulure code
+ *
+ * suspend shuts down the system by:
+ * unloading all the streams
+ * stopping SP
+ * performing uninit
+ *
+ * Currently stream memory is deallocated because of rmmgr issues.
+ * Need to come up with a bypass that will leave the streams intact.
+ */
+enum ia_css_err
+ia_css_suspend(void);
+
+/** @brief Resume CSS API from power down
+ * @return success or failure code
+ *
+ * After a power cycle, this function will bring the CSS API back into
+ * a state where it can be started.
+ * This will re-initialize the hardware and all the streams.
+ * Call this function only after ia_css_suspend() has been called.
+ */
+enum ia_css_err
+ia_css_resume(void);
+
+/** @brief Enable use of a separate queue for ISYS events.
+ *
+ * @param[in] enable: enable or disable use of separate ISYS event queues.
+ * @return error if called when SP is running.
+ *
+ * @deprecated{This is a temporary function that allows drivers to migrate to
+ * the use of the separate ISYS event queue. Once all drivers supports this, it
+ * will be made the default and this function will be removed.
+ * This function should only be called when the SP is not running, calling it
+ * when the SP is running will result in an error value being returned. }
+ */
+enum ia_css_err
+ia_css_enable_isys_event_queue(bool enable);
+
+/** @brief Test whether the ISP has started.
+ *
+ * @return Boolean flag true if the ISP has started or false otherwise.
+ *
+ * Temporary function to poll whether the ISP has been started. Once it has,
+ * the sensor can also be started. */
+bool
+ia_css_isp_has_started(void);
+
+/** @brief Test whether the SP has initialized.
+ *
+ * @return Boolean flag true if the SP has initialized or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been initialized. Once it has,
+ * we can enqueue buffers. */
+bool
+ia_css_sp_has_initialized(void);
+
+/** @brief Test whether the SP has terminated.
+ *
+ * @return Boolean flag true if the SP has terminated or false otherwise.
+ *
+ * Temporary function to poll whether the SP has been terminated. Once it has,
+ * we can switch mode. */
+bool
+ia_css_sp_has_terminated(void);
+
+/** @brief start SP hardware
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * It will boot the SP hardware and start multi-threading infrastructure.
+ * All threads will be started and blocked by semaphore. This function should
+ * be called before any ia_css_stream_start().
+ */
+enum ia_css_err
+ia_css_start_sp(void);
+
+
+/** @brief stop SP hardware
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function will terminate all threads and shut down SP. It should be
+ * called after all ia_css_stream_stop().
+ */
+enum ia_css_err
+ia_css_stop_sp(void);
+
+#endif /* __IA_CSS_CONTROL_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c
new file mode 100644
index 000000000000..21b842379acc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.c
@@ -0,0 +1,95 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_device_access.h"
+#include <type_support.h> /* for uint*, size_t */
+#include <system_types.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+#include <assert_support.h> /* for assert */
+
+static struct ia_css_hw_access_env my_env;
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env)
+{
+ assert(env != NULL);
+
+ my_env = *env;
+}
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr)
+{
+ return my_env.load_8(addr);
+}
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr)
+{
+ return my_env.load_16(addr);
+}
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr)
+{
+ return my_env.load_32(addr);
+}
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr)
+{
+ assert(0);
+
+ (void)addr;
+ return 0;
+}
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data)
+{
+ my_env.store_8(addr, data);
+}
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data)
+{
+ my_env.store_16(addr, data);
+}
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data)
+{
+ my_env.store_32(addr, data);
+}
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data)
+{
+ assert(0);
+
+ (void)addr;
+ (void)data;
+}
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size)
+{
+ my_env.load(addr, data, (uint32_t)size);
+}
+
+void
+ia_css_device_store(const hrt_address addr, const void *data, const size_t size)
+{
+ my_env.store(addr, data, (uint32_t)size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
new file mode 100644
index 000000000000..59459f7a9876
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_device_access.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_DEVICE_ACCESS_H
+#define _IA_CSS_DEVICE_ACCESS_H
+
+/** @file
+ * File containing internal functions for the CSS-API to access the CSS device.
+ */
+
+#include <type_support.h> /* for uint*, size_t */
+#include <system_types.h> /* for hrt_address */
+#include <ia_css_env.h> /* for ia_css_hw_access_env */
+
+void
+ia_css_device_access_init(const struct ia_css_hw_access_env *env);
+
+uint8_t
+ia_css_device_load_uint8(const hrt_address addr);
+
+uint16_t
+ia_css_device_load_uint16(const hrt_address addr);
+
+uint32_t
+ia_css_device_load_uint32(const hrt_address addr);
+
+uint64_t
+ia_css_device_load_uint64(const hrt_address addr);
+
+void
+ia_css_device_store_uint8(const hrt_address addr, const uint8_t data);
+
+void
+ia_css_device_store_uint16(const hrt_address addr, const uint16_t data);
+
+void
+ia_css_device_store_uint32(const hrt_address addr, const uint32_t data);
+
+void
+ia_css_device_store_uint64(const hrt_address addr, const uint64_t data);
+
+void
+ia_css_device_load(const hrt_address addr, void *data, const size_t size);
+
+void
+ia_css_device_store(const hrt_address addr, const void *data, const size_t size);
+
+#endif /* _IA_CSS_DEVICE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
new file mode 100644
index 000000000000..147bf81959d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_dvs.h
@@ -0,0 +1,299 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_H
+#define __IA_CSS_DVS_H
+
+/** @file
+ * This file contains types for DVS statistics
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_public.h"
+
+enum dvs_statistics_type {
+ DVS_STATISTICS,
+ DVS2_STATISTICS,
+ SKC_DVS_STATISTICS
+};
+
+
+/** Structure that holds DVS statistics in the ISP internal
+ * format. Use ia_css_get_dvs_statistics() to translate
+ * this to the format used on the host (DVS engine).
+ * */
+struct ia_css_isp_dvs_statistics {
+ ia_css_ptr hor_proj;
+ ia_css_ptr ver_proj;
+ uint32_t hor_size;
+ uint32_t ver_size;
+ uint32_t exp_id; /**< see ia_css_event_public.h for more detail */
+ ia_css_ptr data_ptr; /* base pointer containing all memory */
+ uint32_t size; /* size of allocated memory in data_ptr */
+};
+
+/** Structure that holds SKC DVS statistics in the ISP internal
+ * format. Use ia_css_dvs_statistics_get() to translate this to
+ * the format used on the host.
+ * */
+struct ia_css_isp_skc_dvs_statistics;
+
+
+#define SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT \
+ ((3 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)))
+
+/* Map with host-side pointers to ISP-format statistics.
+ * These pointers can either be copies of ISP data or memory mapped
+ * ISP pointers.
+ * All of the data behind these pointers is allocatd contiguously, the
+ * allocated pointer is stored in the data_ptr field. The other fields
+ * point into this one block of data.
+ */
+struct ia_css_isp_dvs_statistics_map {
+ void *data_ptr;
+ int32_t *hor_proj;
+ int32_t *ver_proj;
+ uint32_t size; /* total size in bytes */
+ uint32_t data_allocated; /* indicate whether data was allocated */
+};
+
+union ia_css_dvs_statistics_isp {
+ struct ia_css_isp_dvs_statistics *p_dvs_statistics_isp;
+ struct ia_css_isp_skc_dvs_statistics *p_skc_dvs_statistics_isp;
+};
+
+union ia_css_dvs_statistics_host {
+ struct ia_css_dvs_statistics *p_dvs_statistics_host;
+ struct ia_css_dvs2_statistics *p_dvs2_statistics_host;
+ struct ia_css_skc_dvs_statistics *p_skc_dvs_statistics_host;
+};
+
+/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs_statistics() function instead.
+ */
+enum ia_css_err
+ia_css_get_dvs_statistics(struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/** @brief Translate DVS statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs statistics from the ISP-internal
+ * format to the format used by the DVS library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/** @brief Copy DVS 2.0 statistics from an ISP buffer to a host buffer.
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return error value if temporary memory cannot be allocated
+ *
+ * This may include a translation step as well depending
+ * on the ISP version.
+ * Always use this function, never copy the buffer directly.
+ * Note that this function uses the mem_load function from the CSS
+ * environment struct.
+ * In certain environments this may be slow. In those cases it is
+ * advised to map the ISP memory into a host-side pointer and use
+ * the ia_css_translate_dvs2_statistics() function instead.
+ */
+enum ia_css_err
+ia_css_get_dvs2_statistics(struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+/** @brief Translate DVS2 statistics from ISP format to host format
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ *
+ * This function translates the dvs2 statistics from the ISP-internal
+ * format to the format used by the DVS2 library on the CPU.
+ * This function takes a host-side pointer as input. This can either
+ * point to a copy of the data or be a memory mapped pointer to the
+ * ISP memory pages.
+ */
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+/** @brief Copy DVS statistics from an ISP buffer to a host buffer.
+ * @param[in] type - DVS statistics type
+ * @param[in] host_stats Host buffer
+ * @param[in] isp_stats ISP buffer
+ * @return None
+ */
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats);
+
+/** @brief Allocate the DVS statistics memory on the ISP
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS statistics memory on the ISP
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/** @brief Allocate the DVS 2.0 statistics memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS statistics buffer on the ISP.
+ * @return None
+*/
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me);
+
+/** @brief Allocate the DVS statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS statistics buffer on the host
+*/
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS statistics memory on the host
+ * @param[in] me Pointer to the DVS statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me);
+
+/** @brief Allocate the DVS coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS coefficients buffer
+*/
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS coefficients memory
+ * @param[in] me Pointer to the DVS coefficients buffer.
+ * @return None
+ */
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me);
+
+/** @brief Allocate the DVS 2.0 statistics memory on the host
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 statistics buffer on the host
+ */
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS 2.0 statistics memory
+ * @param[in] me Pointer to the DVS 2.0 statistics buffer on the host.
+ * @return None
+*/
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me);
+
+/** @brief Allocate the DVS 2.0 coefficients memory
+ * @param[in] grid The grid.
+ * @return Pointer to the allocated DVS 2.0 coefficients buffer
+*/
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid);
+
+/** @brief Free the DVS 2.0 coefficients memory
+ * @param[in] me Pointer to the DVS 2.0 coefficients buffer.
+ * @return None
+*/
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me);
+
+/** @brief Allocate the DVS 2.0 6-axis config memory
+ * @param[in] stream The stream.
+ * @return Pointer to the allocated DVS 6axis configuration buffer
+*/
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream);
+
+/** @brief Free the DVS 2.0 6-axis config memory
+ * @param[in] dvs_6axis_config Pointer to the DVS 6axis configuration buffer
+ * @return None
+ */
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config);
+
+/** @brief Allocate a dvs statistics map structure
+ * @param[in] isp_stats pointer to ISP dvs statistis struct
+ * @param[in] data_ptr host-side pointer to ISP dvs statistics.
+ * @return Pointer to the allocated dvs statistics map
+ *
+ * This function allocates the ISP dvs statistics map structure
+ * and uses the data_ptr as base pointer to set the appropriate
+ * pointers to all relevant subsets of the dvs statistics (dmem,
+ * vmem, hmem).
+ * If the data_ptr is NULL, this function will allocate the host-side
+ * memory. This information is stored in the struct and used in the
+ * ia_css_isp_dvs_statistics_map_free() function to determine whether
+ * the memory should be freed or not.
+ * Note that this function does not allocate or map any ISP
+ * memory.
+*/
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr);
+
+/** @brief Free the dvs statistics map
+ * @param[in] me Pointer to the dvs statistics map
+ * @return None
+ *
+ * This function frees the map struct. If the data_ptr inside it
+ * was allocated inside ia_css_isp_dvs_statistics_map_allocate(), it
+ * will be freed in this function. Otherwise it will not be freed.
+ */
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me);
+
+/** @brief Allocate memory for the SKC DVS statistics on the ISP
+ * @return Pointer to the allocated ACC DVS statistics buffer on the ISP
+*/
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void);
+
+#endif /* __IA_CSS_DVS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
new file mode 100644
index 000000000000..1ae9daf0be76
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_env.h
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ENV_H
+#define __IA_CSS_ENV_H
+
+#include <type_support.h>
+#include <stdarg.h> /* va_list */
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+
+/** @file
+ * This file contains prototypes for functions that need to be provided to the
+ * CSS-API host-code by the environment in which the CSS-API code runs.
+ */
+
+/** Memory allocation attributes, for use in ia_css_css_mem_env. */
+enum ia_css_mem_attr {
+ IA_CSS_MEM_ATTR_CACHED = 1 << 0,
+ IA_CSS_MEM_ATTR_ZEROED = 1 << 1,
+ IA_CSS_MEM_ATTR_PAGEALIGN = 1 << 2,
+ IA_CSS_MEM_ATTR_CONTIGUOUS = 1 << 3,
+};
+
+/** Environment with function pointers for local IA memory allocation.
+ * This provides the CSS code with environment specific functionality
+ * for memory allocation of small local buffers such as local data structures.
+ * This is never expected to allocate more than one page of memory (4K bytes).
+ */
+struct ia_css_cpu_mem_env {
+ void (*flush)(struct ia_css_acc_fw *fw);
+ /**< Flush function to flush the cache for given accelerator. */
+};
+
+/** Environment with function pointers to access the CSS hardware. This includes
+ * registers and local memories.
+ */
+struct ia_css_hw_access_env {
+ void (*store_8)(hrt_address addr, uint8_t data);
+ /**< Store an 8 bit value into an address in the CSS HW address space.
+ The address must be an 8 bit aligned address. */
+ void (*store_16)(hrt_address addr, uint16_t data);
+ /**< Store a 16 bit value into an address in the CSS HW address space.
+ The address must be a 16 bit aligned address. */
+ void (*store_32)(hrt_address addr, uint32_t data);
+ /**< Store a 32 bit value into an address in the CSS HW address space.
+ The address must be a 32 bit aligned address. */
+ uint8_t (*load_8)(hrt_address addr);
+ /**< Load an 8 bit value from an address in the CSS HW address
+ space. The address must be an 8 bit aligned address. */
+ uint16_t (*load_16)(hrt_address addr);
+ /**< Load a 16 bit value from an address in the CSS HW address
+ space. The address must be a 16 bit aligned address. */
+ uint32_t (*load_32)(hrt_address addr);
+ /**< Load a 32 bit value from an address in the CSS HW address
+ space. The address must be a 32 bit aligned address. */
+ void (*store)(hrt_address addr, const void *data, uint32_t bytes);
+ /**< Store a number of bytes into a byte-aligned address in the CSS HW address space. */
+ void (*load)(hrt_address addr, void *data, uint32_t bytes);
+ /**< Load a number of bytes from a byte-aligned address in the CSS HW address space. */
+};
+
+/** Environment with function pointers to print error and debug messages.
+ */
+struct ia_css_print_env {
+ int (*debug_print)(const char *fmt, va_list args);
+ /**< Print a debug message. */
+ int (*error_print)(const char *fmt, va_list args);
+ /**< Print an error message.*/
+};
+
+/** Environment structure. This includes function pointers to access several
+ * features provided by the environment in which the CSS API is used.
+ * This is used to run the camera IP in multiple platforms such as Linux,
+ * Windows and several simulation environments.
+ */
+struct ia_css_env {
+ struct ia_css_cpu_mem_env cpu_mem_env; /**< local flush. */
+ struct ia_css_hw_access_env hw_access_env; /**< CSS HW access functions */
+ struct ia_css_print_env print_env; /**< Message printing env. */
+};
+
+#endif /* __IA_CSS_ENV_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
new file mode 100644
index 000000000000..572e4e55c69e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_err.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ERR_H
+#define __IA_CSS_ERR_H
+
+/** @file
+ * This file contains possible return values for most
+ * functions in the CSS-API.
+ */
+
+/** Errors, these values are used as the return value for most
+ * functions in this API.
+ */
+enum ia_css_err {
+ IA_CSS_SUCCESS,
+ IA_CSS_ERR_INTERNAL_ERROR,
+ IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY,
+ IA_CSS_ERR_INVALID_ARGUMENTS,
+ IA_CSS_ERR_SYSTEM_NOT_IDLE,
+ IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER,
+ IA_CSS_ERR_QUEUE_IS_FULL,
+ IA_CSS_ERR_QUEUE_IS_EMPTY,
+ IA_CSS_ERR_RESOURCE_NOT_AVAILABLE,
+ IA_CSS_ERR_RESOURCE_LIST_TO_SMALL,
+ IA_CSS_ERR_RESOURCE_ITEMS_STILL_ALLOCATED,
+ IA_CSS_ERR_RESOURCE_EXHAUSTED,
+ IA_CSS_ERR_RESOURCE_ALREADY_ALLOCATED,
+ IA_CSS_ERR_VERSION_MISMATCH,
+ IA_CSS_ERR_NOT_SUPPORTED
+};
+
+/** FW warnings. This enum contains a value for each warning that
+ * the SP FW could indicate potential performance issue
+ */
+enum ia_css_fw_warning {
+ IA_CSS_FW_WARNING_NONE,
+ IA_CSS_FW_WARNING_ISYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the ISys queue.
+ This warning can be avoided by de-queing ISYS buffers more timely. */
+ IA_CSS_FW_WARNING_PSYS_QUEUE_FULL, /** < CSS system delayed because of insufficient space in the PSys queue.
+ This warning can be avoided by de-queing PSYS buffers more timely. */
+ IA_CSS_FW_WARNING_CIRCBUF_ALL_LOCKED, /** < CSS system delayed because of insufficient available buffers.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_EXP_ID_LOCKED, /** < Exposure ID skipped because the frame associated to it was still locked.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED, /** < Exposure ID cannot be found on the circular buffer.
+ This warning can be avoided by unlocking locked frame-buffers more timely. */
+ IA_CSS_FW_WARNING_FRAME_PARAM_MISMATCH, /** < Frame and param pair mismatched in tagger.
+ This warning can be avoided by providing a param set for each frame. */
+};
+
+#endif /* __IA_CSS_ERR_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
new file mode 100644
index 000000000000..aaf349772abe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_event_public.h
@@ -0,0 +1,196 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EVENT_PUBLIC_H
+#define __IA_CSS_EVENT_PUBLIC_H
+
+/** @file
+ * This file contains CSS-API events functionality
+ */
+
+#include <type_support.h> /* uint8_t */
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_timer.h> /* ia_css_timer */
+
+/** The event type, distinguishes the kind of events that
+ * can are generated by the CSS system.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum ia_css_event_type {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE = 1 << 0,
+ /**< Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE = 1 << 1,
+ /**< Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE = 1 << 2,
+ /**< Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE = 1 << 3,
+ /**< Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE = 1 << 4,
+ /**< Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE = 1 << 5,
+ /**< Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE = 1 << 6,
+ /**< Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED = 1 << 7,
+ /**< Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE = 1 << 8,
+ /**< Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE = 1 << 9,
+ /**< Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE = 1 << 10,
+ /**< Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE = 1 << 11,
+ /**< Extension stage complete. */
+ IA_CSS_EVENT_TYPE_TIMER = 1 << 12,
+ /**< Timer event for measuring the SP side latencies. It contains the
+ 32-bit timer value from the SP */
+ IA_CSS_EVENT_TYPE_PORT_EOF = 1 << 13,
+ /**< End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING = 1 << 14,
+ /**< Performance warning encounter by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT = 1 << 15,
+ /**< Assertion hit by FW */
+};
+
+#define IA_CSS_EVENT_TYPE_NONE 0
+
+/** IA_CSS_EVENT_TYPE_ALL is a mask for all pipe related events.
+ * The other events (such as PORT_EOF) cannot be enabled/disabled
+ * and are hence excluded from this macro.
+ */
+#define IA_CSS_EVENT_TYPE_ALL \
+ (IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE | \
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED | \
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE | \
+ IA_CSS_EVENT_TYPE_METADATA_DONE | \
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE | \
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE)
+
+/** The event struct, container for the event type and its related values.
+ * Depending on the event type, either pipe or port will be filled.
+ * Pipeline related events (like buffer/frame events) will return a valid and filled pipe handle.
+ * For non pipeline related events (but i.e. stream specific, like EOF event), the port will be
+ * filled.
+ */
+struct ia_css_event {
+ struct ia_css_pipe *pipe;
+ /**< Pipe handle on which event happened, NULL for non pipe related
+ events. */
+ enum ia_css_event_type type;
+ /**< Type of Event, always valid/filled. */
+ uint8_t port;
+ /**< Port number for EOF event (not valid for other events). */
+ uint8_t exp_id;
+ /**< Exposure id for EOF/FRAME_TAGGED/FW_WARNING event (not valid for other events)
+ The exposure ID is unique only within a logical stream and it is
+ only generated on systems that have an input system (such as 2400
+ and 2401).
+ Most outputs produced by the CSS are tagged with an exposure ID.
+ This allows users of the CSS API to keep track of which buffer
+ was generated from which sensor output frame. This includes:
+ EOF event, output frames, 3A statistics, DVS statistics and
+ sensor metadata.
+ Exposure IDs start at IA_CSS_MIN_EXPOSURE_ID, increment by one
+ until IA_CSS_MAX_EXPOSURE_ID is reached, after that they wrap
+ around to IA_CSS_MIN_EXPOSURE_ID again.
+ Note that in case frames are dropped, this will not be reflected
+ in the exposure IDs. Therefor applications should not use this
+ to detect frame drops. */
+ uint32_t fw_handle;
+ /**< Firmware Handle for ACC_STAGE_COMPLETE event (not valid for other
+ events). */
+ enum ia_css_fw_warning fw_warning;
+ /**< Firmware warning code, only for WARNING events. */
+ uint8_t fw_assert_module_id;
+ /**< Firmware module id, only for ASSERT events, should be logged by driver. */
+ uint16_t fw_assert_line_no;
+ /**< Firmware line number, only for ASSERT events, should be logged by driver. */
+ clock_value_t timer_data;
+ /**< For storing the full 32-bit of the timer value. Valid only for TIMER
+ event */
+ uint8_t timer_code;
+ /**< For storing the code of the TIMER event. Valid only for
+ TIMER event */
+ uint8_t timer_subcode;
+ /**< For storing the subcode of the TIMER event. Valid only
+ for TIMER event */
+};
+
+/** @brief Dequeue a PSYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues an event from the PSYS event queue. The queue is
+ * between the Host CPU and the CSS system. This function can be
+ * called after an interrupt has been generated that signalled that a new event
+ * was available and can be used in a polling-like situation where the NO_EVENT
+ * return value is used to determine whether an event was available or not.
+ */
+enum ia_css_err
+ia_css_dequeue_psys_event(struct ia_css_event *event);
+
+/** @brief Dequeue an event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * deprecated{Use ia_css_dequeue_psys_event instead}.
+ * Unless the isys event queue is explicitly enabled, this function will
+ * dequeue both isys (EOF) and psys events (all others).
+ */
+enum ia_css_err
+ia_css_dequeue_event(struct ia_css_event *event);
+
+/** @brief Dequeue an ISYS event from the CSS system.
+ *
+ * @param[out] event Pointer to the event struct which will be filled by
+ * this function if an event is available.
+ * @return IA_CSS_ERR_QUEUE_IS_EMPTY if no events are
+ * available or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues an event from the ISYS event queue. The queue is
+ * between host and the CSS system.
+ * Unlike the ia_css_dequeue_event() function, this function can be called
+ * directly from an interrupt service routine (ISR) and it is safe to call
+ * this function in parallel with other CSS API functions (but only one
+ * call to this function should be in flight at any point in time).
+ *
+ * The reason for having the ISYS events separate is to prevent them from
+ * incurring additional latency due to locks being held by other CSS API
+ * functions.
+ */
+enum ia_css_err
+ia_css_dequeue_isys_event(struct ia_css_event *event);
+
+#endif /* __IA_CSS_EVENT_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
new file mode 100644
index 000000000000..06d375a09be2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_firmware.h
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIRMWARE_H
+#define __IA_CSS_FIRMWARE_H
+
+/** @file
+ * This file contains firmware loading/unloading support functionality
+ */
+
+#include "ia_css_err.h"
+#include "ia_css_env.h"
+
+/** CSS firmware package structure.
+ */
+struct ia_css_fw {
+ void *data; /**< pointer to the firmware data */
+ unsigned int bytes; /**< length in bytes of firmware data */
+};
+
+/** @brief Loads the firmware
+ * @param[in] env Environment, provides functions to access the
+ * environment in which the CSS code runs. This is
+ * used for host side memory access and message
+ * printing.
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * @return Returns IA_CSS_ERR_INTERNAL_ERROR in case of any
+ * errors and IA_CSS_SUCCESS otherwise.
+ *
+ * This function interprets the firmware package. All
+ * contents of this firmware package are copied into local data structures, so
+ * the fw pointer could be freed after this function completes.
+ *
+ * Rationale for this function is that it can be called before ia_css_init, and thus
+ * speeds up ia_css_init (ia_css_init is called each time a stream is created but the
+ * firmware only needs to be loaded once).
+ */
+enum ia_css_err
+ia_css_load_firmware(const struct ia_css_env *env,
+ const struct ia_css_fw *fw);
+
+/** @brief Unloads the firmware
+ * @return None
+ *
+ * This function unloads the firmware loaded by ia_css_load_firmware.
+ * It is pointless to call this function if no firmware is loaded,
+ * but it won't harm. Use this to deallocate all memory associated with the firmware.
+ */
+void
+ia_css_unload_firmware(void);
+
+/** @brief Checks firmware version
+ * @param[in] fw Firmware package containing the firmware for all
+ * predefined ISP binaries.
+ * @return Returns true when the firmware version matches with the CSS
+ * host code version and returns false otherwise.
+ * This function checks if the firmware package version matches with the CSS host code version.
+ */
+bool
+ia_css_check_firmware_version(const struct ia_css_fw *fw);
+
+#endif /* __IA_CSS_FIRMWARE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
new file mode 100644
index 000000000000..da9c60144c6d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frac.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_FRAC_H
+#define _IA_CSS_FRAC_H
+
+/** @file
+ * This file contains typedefs used for fractional numbers
+ */
+
+#include <type_support.h>
+
+/* Fixed point types.
+ * NOTE: the 16 bit fixed point types actually occupy 32 bits
+ * to save on extension operations in the ISP code.
+ */
+/** Unsigned fixed point value, 0 integer bits, 16 fractional bits */
+typedef uint32_t ia_css_u0_16;
+/** Unsigned fixed point value, 5 integer bits, 11 fractional bits */
+typedef uint32_t ia_css_u5_11;
+/** Unsigned fixed point value, 8 integer bits, 8 fractional bits */
+typedef uint32_t ia_css_u8_8;
+/** Signed fixed point value, 0 integer bits, 15 fractional bits */
+typedef int32_t ia_css_s0_15;
+
+#endif /* _IA_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
new file mode 100644
index 000000000000..d534fbd91380
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_format.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_FORMAT_H
+#define __IA_CSS_FRAME_FORMAT_H
+
+/** @file
+ * This file contains information about formats supported in the ISP
+ */
+
+/** Frame formats, some of these come from fourcc.org, others are
+ better explained by video4linux2. The NV11 seems to be described only
+ on MSDN pages, but even those seem to be gone now.
+ Frames can come in many forms, the main categories are RAW, RGB and YUV
+ (or YCbCr). The YUV frames come in 4 flavors, determined by how the U and V
+ values are subsampled:
+ 1. YUV420: hor = 2, ver = 2
+ 2. YUV411: hor = 4, ver = 1
+ 3. YUV422: hor = 2, ver = 1
+ 4. YUV444: hor = 1, ver = 1
+
+ Warning: not all frame formats are supported as input or output to/from ISP.
+ Some of these formats are therefore not defined in the output table module.
+ Modifications in below frame format enum can require modifications in the
+ output table module.
+
+ Warning2: Throughout the CSS code assumptions are made on the order
+ of formats in this enumeration type, or some sort of copy is maintained.
+ The following files are identified:
+ - FileSupport.h
+ - css/isp/kernels/fc/fc_1.0/formats.isp.c
+ - css/isp/kernels/output/output_1.0/output_table.isp.c
+ - css/isp/kernels/output/sc_output_1.0/formats.hive.c
+ - css/isp/modes/interface/isp_formats.isp.h
+ - css/bxt_sandbox/psyspoc/interface/ia_css_pg_info.h
+ - css/bxt_sandbox/psysapi/data/interface/ia_css_program_group_data.h
+ - css/bxt_sandbox/isysapi/interface/ia_css_isysapi_fw_types.h
+*/
+enum ia_css_frame_format {
+ IA_CSS_FRAME_FORMAT_NV11 = 0, /**< 12 bit YUV 411, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12, /**< 12 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_16, /**< 16 bit YUV 420, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV12_TILEY, /**< 12 bit YUV 420, Intel proprietary tiled format, TileY */
+ IA_CSS_FRAME_FORMAT_NV16, /**< 16 bit YUV 422, Y, UV plane */
+ IA_CSS_FRAME_FORMAT_NV21, /**< 12 bit YUV 420, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_NV61, /**< 16 bit YUV 422, Y, VU plane */
+ IA_CSS_FRAME_FORMAT_YV12, /**< 12 bit YUV 420, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YV16, /**< 16 bit YUV 422, Y, V, U plane */
+ IA_CSS_FRAME_FORMAT_YUV420, /**< 12 bit YUV 420, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV420_16, /**< yuv420, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_YUV422, /**< 16 bit YUV 422, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV422_16, /**< yuv422, 16 bits per subpixel */
+ IA_CSS_FRAME_FORMAT_UYVY, /**< 16 bit YUV 422, UYVY interleaved */
+ IA_CSS_FRAME_FORMAT_YUYV, /**< 16 bit YUV 422, YUYV interleaved */
+ IA_CSS_FRAME_FORMAT_YUV444, /**< 24 bit YUV 444, Y, U, V plane */
+ IA_CSS_FRAME_FORMAT_YUV_LINE, /**< Internal format, 2 y lines followed
+ by a uvinterleaved line */
+ IA_CSS_FRAME_FORMAT_RAW, /**< RAW, 1 plane */
+ IA_CSS_FRAME_FORMAT_RGB565, /**< 16 bit RGB, 1 plane. Each 3 sub
+ pixels are packed into one 16 bit
+ value, 5 bits for R, 6 bits for G
+ and 5 bits for B. */
+ IA_CSS_FRAME_FORMAT_PLANAR_RGB888, /**< 24 bit RGB, 3 planes */
+ IA_CSS_FRAME_FORMAT_RGBA888, /**< 32 bit RGBA, 1 plane, A=Alpha
+ (alpha is unused) */
+ IA_CSS_FRAME_FORMAT_QPLANE6, /**< Internal, for advanced ISP */
+ IA_CSS_FRAME_FORMAT_BINARY_8, /**< byte stream, used for jpeg. For
+ frames of this type, we set the
+ height to 1 and the width to the
+ number of allocated bytes. */
+ IA_CSS_FRAME_FORMAT_MIPI, /**< MIPI frame, 1 plane */
+ IA_CSS_FRAME_FORMAT_RAW_PACKED, /**< RAW, 1 plane, packed */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8, /**< 8 bit per Y/U/V.
+ Y odd line; UYVY
+ interleaved even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /**< Legacy YUV420. UY odd
+ line; VY even line */
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 /**< 10 bit per Y/U/V. Y odd
+ line; UYVY interleaved
+ even line */
+};
+
+/* NOTE: IA_CSS_FRAME_FORMAT_NUM was purposely defined outside of enum type ia_css_frame_format, */
+/* because of issues this would cause with the Clockwork code checking tool. */
+#define IA_CSS_FRAME_FORMAT_NUM (IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10 + 1)
+
+/** Number of valid output frame formats for ISP **/
+#define IA_CSS_FRAME_OUT_FORMAT_NUM (IA_CSS_FRAME_FORMAT_RGBA888 + 1)
+
+#endif /* __IA_CSS_FRAME_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
new file mode 100644
index 000000000000..92f2389176b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_frame_public.h
@@ -0,0 +1,365 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FRAME_PUBLIC_H
+#define __IA_CSS_FRAME_PUBLIC_H
+
+/** @file
+ * This file contains structs to describe various frame-formats supported by the ISP.
+ */
+
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_types.h"
+#include "ia_css_frame_format.h"
+#include "ia_css_buffer.h"
+
+/** For RAW input, the bayer order needs to be specified separately. There
+ * are 4 possible orders. The name is constructed by taking the first two
+ * colors on the first line and the first two colors from the second line.
+ */
+enum ia_css_bayer_order {
+ IA_CSS_BAYER_ORDER_GRBG, /**< GRGRGRGRGR .. BGBGBGBGBG */
+ IA_CSS_BAYER_ORDER_RGGB, /**< RGRGRGRGRG .. GBGBGBGBGB */
+ IA_CSS_BAYER_ORDER_BGGR, /**< BGBGBGBGBG .. GRGRGRGRGR */
+ IA_CSS_BAYER_ORDER_GBRG, /**< GBGBGBGBGB .. RGRGRGRGRG */
+};
+#define IA_CSS_BAYER_ORDER_NUM (IA_CSS_BAYER_ORDER_GBRG + 1)
+
+/** Frame plane structure. This describes one plane in an image
+ * frame buffer.
+ */
+struct ia_css_frame_plane {
+ unsigned int height; /**< height of a plane in lines */
+ unsigned int width; /**< width of a line, in DMA elements, note that
+ for RGB565 the three subpixels are stored in
+ one element. For all other formats this is
+ the number of subpixels per line. */
+ unsigned int stride; /**< stride of a line in bytes */
+ unsigned int offset; /**< offset in bytes to start of frame data.
+ offset is wrt data field in ia_css_frame */
+};
+
+/** Binary "plane". This is used to story binary streams such as jpeg
+ * images. This is not actually a real plane.
+ */
+struct ia_css_frame_binary_plane {
+ unsigned int size; /**< number of bytes in the stream */
+ struct ia_css_frame_plane data; /**< plane */
+};
+
+/** Container for planar YUV frames. This contains 3 planes.
+ */
+struct ia_css_frame_yuv_planes {
+ struct ia_css_frame_plane y; /**< Y plane */
+ struct ia_css_frame_plane u; /**< U plane */
+ struct ia_css_frame_plane v; /**< V plane */
+};
+
+/** Container for semi-planar YUV frames.
+ */
+struct ia_css_frame_nv_planes {
+ struct ia_css_frame_plane y; /**< Y plane */
+ struct ia_css_frame_plane uv; /**< UV plane */
+};
+
+/** Container for planar RGB frames. Each color has its own plane.
+ */
+struct ia_css_frame_rgb_planes {
+ struct ia_css_frame_plane r; /**< Red plane */
+ struct ia_css_frame_plane g; /**< Green plane */
+ struct ia_css_frame_plane b; /**< Blue plane */
+};
+
+/** Container for 6-plane frames. These frames are used internally
+ * in the advanced ISP only.
+ */
+struct ia_css_frame_plane6_planes {
+ struct ia_css_frame_plane r; /**< Red plane */
+ struct ia_css_frame_plane r_at_b; /**< Red at blue plane */
+ struct ia_css_frame_plane gr; /**< Red-green plane */
+ struct ia_css_frame_plane gb; /**< Blue-green plane */
+ struct ia_css_frame_plane b; /**< Blue plane */
+ struct ia_css_frame_plane b_at_r; /**< Blue at red plane */
+};
+
+/* Crop info struct - stores the lines to be cropped in isp */
+struct ia_css_crop_info {
+ /* the final start column and start line
+ * sum of lines to be cropped + bayer offset
+ */
+ unsigned int start_column;
+ unsigned int start_line;
+};
+
+/** Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_info {
+ struct ia_css_resolution res; /**< Frame resolution (valid data) */
+ unsigned int padded_width; /**< stride of line in memory (in pixels) */
+ enum ia_css_frame_format format; /**< format of the frame data */
+ unsigned int raw_bit_depth; /**< number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ enum ia_css_bayer_order raw_bayer_order; /**< bayer order, only valid
+ for RAW bayer frames */
+ /* the params below are computed based on bayer_order
+ * we can remove the raw_bayer_order if it is redundant
+ * keeping it for now as bxt and fpn code seem to use it
+ */
+ struct ia_css_crop_info crop_info;
+};
+
+#define IA_CSS_BINARY_DEFAULT_FRAME_INFO \
+{ \
+ {0, /* width */ \
+ 0}, /* height */ \
+ 0, /* padded_width */ \
+ IA_CSS_FRAME_FORMAT_NUM, /* format */ \
+ 0, /* raw_bit_depth */ \
+ IA_CSS_BAYER_ORDER_NUM, /* raw_bayer_order */ \
+ {0, /*start col */ \
+ 0}, /*start line */ \
+}
+
+/**
+ * Specifies the DVS loop delay in "frame periods"
+ */
+enum ia_css_frame_delay {
+ IA_CSS_FRAME_DELAY_0, /**< Frame delay = 0 */
+ IA_CSS_FRAME_DELAY_1, /**< Frame delay = 1 */
+ IA_CSS_FRAME_DELAY_2 /**< Frame delay = 2 */
+};
+
+enum ia_css_frame_flash_state {
+ IA_CSS_FRAME_FLASH_STATE_NONE,
+ IA_CSS_FRAME_FLASH_STATE_PARTIAL,
+ IA_CSS_FRAME_FLASH_STATE_FULL
+};
+
+/** Frame structure. This structure describes an image buffer or frame.
+ * This is the main structure used for all input and output images.
+ */
+struct ia_css_frame {
+ struct ia_css_frame_info info; /**< info struct describing the frame */
+ ia_css_ptr data; /**< pointer to start of image data */
+ unsigned int data_bytes; /**< size of image data in bytes */
+ /* LA: move this to ia_css_buffer */
+ /*
+ * -1 if data address is static during life time of pipeline
+ * >=0 if data address can change per pipeline/frame iteration
+ * index to dynamic data: ia_css_frame_in, ia_css_frame_out
+ * ia_css_frame_out_vf
+ * index to host-sp queue id: queue_0, queue_1 etc.
+ */
+ int dynamic_queue_id;
+ /*
+ * if it is dynamic frame, buf_type indicates which buffer type it
+ * should use for event generation. we have this because in vf_pp
+ * binary, we use output port, but we expect VF_OUTPUT_DONE event
+ */
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_frame_flash_state flash_state;
+ unsigned int exp_id;
+ /**< exposure id, see ia_css_event_public.h for more detail */
+ uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
+ bool valid; /**< First video output frame is not valid */
+ bool contiguous; /**< memory is allocated physically contiguously */
+ union {
+ unsigned int _initialisation_dummy;
+ struct ia_css_frame_plane raw;
+ struct ia_css_frame_plane rgb;
+ struct ia_css_frame_rgb_planes planar_rgb;
+ struct ia_css_frame_plane yuyv;
+ struct ia_css_frame_yuv_planes yuv;
+ struct ia_css_frame_nv_planes nv;
+ struct ia_css_frame_plane6_planes plane6;
+ struct ia_css_frame_binary_plane binary;
+ } planes; /**< frame planes, select the right one based on
+ info.format */
+};
+
+#define DEFAULT_FRAME \
+{ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* info */ \
+ 0, /* data */ \
+ 0, /* data_bytes */ \
+ SH_CSS_INVALID_QUEUE_ID, /* dynamic_data_index */ \
+ IA_CSS_BUFFER_TYPE_INVALID, /* buf_type */ \
+ IA_CSS_FRAME_FLASH_STATE_NONE, /* flash_state */ \
+ 0, /* exp_id */ \
+ 0, /* isp_config_id */ \
+ false, /* valid */ \
+ false, /* contiguous */ \
+ { 0 } /* planes */ \
+}
+
+/** @brief Fill a frame with zeros
+ *
+ * @param frame The frame.
+ * @return None
+ *
+ * Fill a frame with pixel values of zero
+ */
+void ia_css_frame_zero(struct ia_css_frame *frame);
+
+/** @brief Allocate a CSS frame structure
+ *
+ * @param frame The allocated frame.
+ * @param width The width (in pixels) of the frame.
+ * @param height The height (in lines) of the frame.
+ * @param format The frame format.
+ * @param stride The padded stride, in pixels.
+ * @param raw_bit_depth The raw bit depth, in bits.
+ * @return The error code.
+ *
+ * Allocate a CSS frame structure. The memory for the frame data will be
+ * allocated in the CSS address space.
+ */
+enum ia_css_err
+ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int stride,
+ unsigned int raw_bit_depth);
+
+/** @brief Allocate a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate a frame using the resolution and format from a frame info struct.
+ * This is a convenience function, implemented on top of
+ * ia_css_frame_allocate().
+ */
+enum ia_css_err
+ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+/** @brief Free a CSS frame structure.
+ *
+ * @param[in] frame Pointer to the frame.
+ * @return None
+ *
+ * Free a CSS frame structure. This will free both the frame structure
+ * and the pixel data pointer contained within the frame structure.
+ */
+void
+ia_css_frame_free(struct ia_css_frame *frame);
+
+/** @brief Allocate a contiguous CSS frame structure
+ *
+ * @param frame The allocated frame.
+ * @param width The width (in pixels) of the frame.
+ * @param height The height (in lines) of the frame.
+ * @param format The frame format.
+ * @param stride The padded stride, in pixels.
+ * @param raw_bit_depth The raw bit depth, in bits.
+ * @return The error code.
+ *
+ * Contiguous frame allocation, only for FPGA display driver which needs
+ * physically contiguous memory.
+ * Deprecated.
+ */
+enum ia_css_err
+ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int stride,
+ unsigned int raw_bit_depth);
+
+/** @brief Allocate a contiguous CSS frame from a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate a frame using the resolution and format from a frame info struct.
+ * This is a convenience function, implemented on top of
+ * ia_css_frame_allocate_contiguous().
+ * Only for FPGA display driver which needs physically contiguous memory.
+ * Deprecated.
+ */
+enum ia_css_err
+ia_css_frame_allocate_contiguous_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+
+/** @brief Allocate a CSS frame structure using a frame info structure.
+ *
+ * @param frame The allocated frame.
+ * @param[in] info The frame info structure.
+ * @return The error code.
+ *
+ * Allocate an empty CSS frame with no data buffer using the parameters
+ * in the frame info.
+ */
+enum ia_css_err
+ia_css_frame_create_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info);
+
+/** @brief Set a mapped data buffer to a CSS frame
+ *
+ * @param[in] frame Valid CSS frame pointer
+ * @param[in] mapped_data Mapped data buffer to be assigned to the CSS frame
+ * @param[in] data_size_bytes Size of the mapped_data in bytes
+ * @return The error code.
+ *
+ * Sets a mapped data buffer to this frame. This function can be called multiple
+ * times with different buffers or NULL to reset the data pointer. This API
+ * would not try free the mapped_data and its the callers responsiblity to
+ * free the mapped_data buffer. However if ia_css_frame_free() is called and
+ * the frame had a valid data buffer, it would be freed along with the frame.
+ */
+enum ia_css_err
+ia_css_frame_set_data(struct ia_css_frame *frame,
+ const ia_css_ptr mapped_data,
+ size_t data_size_bytes);
+
+/** @brief Map an existing frame data pointer to a CSS frame.
+ *
+ * @param frame Pointer to the frame to be initialized
+ * @param[in] info The frame info.
+ * @param[in] data Pointer to the allocated frame data.
+ * @param[in] attribute Attributes to be passed to mmgr_mmap.
+ * @param[in] context Pointer to the a context to be passed to mmgr_mmap.
+ * @return The allocated frame structure.
+ *
+ * This function maps a pre-allocated pointer into a CSS frame. This can be
+ * used when an upper software layer is responsible for allocating the frame
+ * data and it wants to share that frame pointer with the CSS code.
+ * This function will fill the CSS frame structure just like
+ * ia_css_frame_allocate() does, but instead of allocating the memory, it will
+ * map the pre-allocated memory into the CSS address space.
+ */
+enum ia_css_err
+ia_css_frame_map(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info,
+ const void *data,
+ uint16_t attribute,
+ void *context);
+
+/** @brief Unmap a CSS frame structure.
+ *
+ * @param[in] frame Pointer to the CSS frame.
+ * @return None
+ *
+ * This function unmaps the frame data pointer within a CSS frame and
+ * then frees the CSS frame structure. Use this for frame pointers created
+ * using ia_css_frame_map().
+ */
+void
+ia_css_frame_unmap(struct ia_css_frame *frame);
+
+#endif /* __IA_CSS_FRAME_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h
new file mode 100644
index 000000000000..4557e66891df
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_host_data.h
@@ -0,0 +1,46 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_HOST_DATA_H
+#define __SH_CSS_HOST_DATA_H
+
+#include <ia_css_types.h> /* ia_css_pipe */
+
+/**
+ * @brief Allocate structure ia_css_host_data.
+ *
+ * @param[in] size Size of the requested host data
+ *
+ * @return
+ * - NULL, can't allocate requested size
+ * - pointer to structure, field address points to host data with size bytes
+ */
+struct ia_css_host_data *
+ia_css_host_data_allocate(size_t size);
+
+/**
+ * @brief Free structure ia_css_host_data.
+ *
+ * @param[in] me Pointer to structure, if a NULL is passed functions
+ * returns without error. Otherwise a valid pointer to
+ * structure must be passed and a related memory
+ * is freed.
+ *
+ * @return
+ */
+void ia_css_host_data_free(struct ia_css_host_data *me);
+
+#endif /* __SH_CSS_HOST_DATA_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
new file mode 100644
index 000000000000..8a17c3346caa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_input_port.h
@@ -0,0 +1,66 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_INPUT_PORT_H
+#define __IA_CSS_INPUT_PORT_H
+
+/** @file
+ * This file contains information about the possible input ports for CSS
+ */
+
+/** Enumeration of the physical input ports on the CSS hardware.
+ * There are 3 MIPI CSI-2 ports.
+ */
+enum ia_css_csi2_port {
+ IA_CSS_CSI2_PORT0, /* Implicitly map to MIPI_PORT0_ID */
+ IA_CSS_CSI2_PORT1, /* Implicitly map to MIPI_PORT1_ID */
+ IA_CSS_CSI2_PORT2 /* Implicitly map to MIPI_PORT2_ID */
+};
+
+/** Backward compatible for CSS API 2.0 only
+ * TO BE REMOVED when all drivers move to CSS API 2.1
+ */
+#define IA_CSS_CSI2_PORT_4LANE IA_CSS_CSI2_PORT0
+#define IA_CSS_CSI2_PORT_1LANE IA_CSS_CSI2_PORT1
+#define IA_CSS_CSI2_PORT_2LANE IA_CSS_CSI2_PORT2
+
+/** The CSI2 interface supports 2 types of compression or can
+ * be run without compression.
+ */
+enum ia_css_csi2_compression_type {
+ IA_CSS_CSI2_COMPRESSION_TYPE_NONE, /**< No compression */
+ IA_CSS_CSI2_COMPRESSION_TYPE_1, /**< Compression scheme 1 */
+ IA_CSS_CSI2_COMPRESSION_TYPE_2 /**< Compression scheme 2 */
+};
+
+struct ia_css_csi2_compression {
+ enum ia_css_csi2_compression_type type;
+ /**< Compression used */
+ unsigned int compressed_bits_per_pixel;
+ /**< Compressed bits per pixel (only when compression is enabled) */
+ unsigned int uncompressed_bits_per_pixel;
+ /**< Uncompressed bits per pixel (only when compression is enabled) */
+};
+
+/** Input port structure.
+ */
+struct ia_css_input_port {
+ enum ia_css_csi2_port port; /**< Physical CSI-2 port */
+ unsigned int num_lanes; /**< Number of lanes used (4-lane port only) */
+ unsigned int timeout; /**< Timeout value */
+ unsigned int rxcount; /**< Register value, should include all lanes */
+ struct ia_css_csi2_compression compression; /**< Compression used */
+};
+
+#endif /* __IA_CSS_INPUT_PORT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
new file mode 100644
index 000000000000..416ca4d28732
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_irq.h
@@ -0,0 +1,235 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_IRQ_H
+#define __IA_CSS_IRQ_H
+
+/** @file
+ * This file contains information for Interrupts/IRQs from CSS
+ */
+
+#include "ia_css_err.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_input_port.h"
+
+/** Interrupt types, these enumerate all supported interrupt types.
+ */
+enum ia_css_irq_type {
+ IA_CSS_IRQ_TYPE_EDGE, /**< Edge (level) sensitive interrupt */
+ IA_CSS_IRQ_TYPE_PULSE /**< Pulse-shaped interrupt */
+};
+
+/** Interrupt request type.
+ * When the CSS hardware generates an interrupt, a function in this API
+ * needs to be called to retrieve information about the interrupt.
+ * This interrupt type is part of this information and indicates what
+ * type of information the interrupt signals.
+ *
+ * Note that one interrupt can carry multiple interrupt types. For
+ * example: the online video ISP will generate only 2 interrupts, one to
+ * signal that the statistics (3a and DIS) are ready and one to signal
+ * that all output frames are done (output and viewfinder).
+ *
+ * DEPRECATED, this interface is not portable it should only define user
+ * (SW) interrupts
+ */
+enum ia_css_irq_info {
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR = 1 << 0,
+ /**< the css receiver has encountered an error */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_FIFO_OVERFLOW = 1 << 1,
+ /**< the FIFO in the csi receiver has overflown */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF = 1 << 2,
+ /**< the css receiver received the start of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF = 1 << 3,
+ /**< the css receiver received the end of frame */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SOL = 1 << 4,
+ /**< the css receiver received the start of line */
+ IA_CSS_IRQ_INFO_PSYS_EVENTS_READY = 1 << 5,
+ /**< One or more events are available in the PSYS event queue */
+ IA_CSS_IRQ_INFO_EVENTS_READY = IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+ /**< deprecated{obsolete version of IA_CSS_IRQ_INFO_PSYS_EVENTS_READY,
+ * same functionality.} */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_EOL = 1 << 6,
+ /**< the css receiver received the end of line */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_SIDEBAND_CHANGED = 1 << 7,
+ /**< the css receiver received a change in side band signals */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_0 = 1 << 8,
+ /**< generic short packets (0) */
+ IA_CSS_IRQ_INFO_CSS_RECEIVER_GEN_SHORT_1 = 1 << 9,
+ /**< generic short packets (1) */
+ IA_CSS_IRQ_INFO_IF_PRIM_ERROR = 1 << 10,
+ /**< the primary input formatter (A) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_PRIM_B_ERROR = 1 << 11,
+ /**< the primary input formatter (B) has encountered an error */
+ IA_CSS_IRQ_INFO_IF_SEC_ERROR = 1 << 12,
+ /**< the secondary input formatter has encountered an error */
+ IA_CSS_IRQ_INFO_STREAM_TO_MEM_ERROR = 1 << 13,
+ /**< the stream-to-memory device has encountered an error */
+ IA_CSS_IRQ_INFO_SW_0 = 1 << 14,
+ /**< software interrupt 0 */
+ IA_CSS_IRQ_INFO_SW_1 = 1 << 15,
+ /**< software interrupt 1 */
+ IA_CSS_IRQ_INFO_SW_2 = 1 << 16,
+ /**< software interrupt 2 */
+ IA_CSS_IRQ_INFO_ISP_BINARY_STATISTICS_READY = 1 << 17,
+ /**< ISP binary statistics are ready */
+ IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR = 1 << 18,
+ /**< the input system in in error */
+ IA_CSS_IRQ_INFO_IF_ERROR = 1 << 19,
+ /**< the input formatter in in error */
+ IA_CSS_IRQ_INFO_DMA_ERROR = 1 << 20,
+ /**< the dma in in error */
+ IA_CSS_IRQ_INFO_ISYS_EVENTS_READY = 1 << 21,
+ /**< end-of-frame events are ready in the isys_event queue */
+};
+
+/** CSS receiver error types. Whenever the CSS receiver has encountered
+ * an error, this enumeration is used to indicate which errors have occurred.
+ *
+ * Note that multiple error flags can be enabled at once and that this is in
+ * fact common (whenever an error occurs, it usually results in multiple
+ * errors).
+ *
+ * DEPRECATED: This interface is not portable, different systems have
+ * different receiver types, or possibly none in case of tests systems.
+ */
+enum ia_css_rx_irq_info {
+ IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN = 1U << 0, /**< buffer overrun */
+ IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE = 1U << 1, /**< entering sleep mode */
+ IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE = 1U << 2, /**< exited sleep mode */
+ IA_CSS_RX_IRQ_INFO_ECC_CORRECTED = 1U << 3, /**< ECC corrected */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT = 1U << 4,
+ /**< Start of transmission */
+ IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC = 1U << 5, /**< SOT sync (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_CONTROL = 1U << 6, /**< Control (??) */
+ IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE = 1U << 7, /**< Double ECC */
+ IA_CSS_RX_IRQ_INFO_ERR_CRC = 1U << 8, /**< CRC error */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID = 1U << 9, /**< Unknown ID */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC = 1U << 10,/**< Frame sync error */
+ IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA = 1U << 11,/**< Frame data error */
+ IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT = 1U << 12,/**< Timeout occurred */
+ IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC = 1U << 13,/**< Unknown escape seq. */
+ IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC = 1U << 14,/**< Line Sync error */
+ IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT = 1U << 15,
+};
+
+/** Interrupt info structure. This structure contains information about an
+ * interrupt. This needs to be used after an interrupt is received on the IA
+ * to perform the correct action.
+ */
+struct ia_css_irq {
+ enum ia_css_irq_info type; /**< Interrupt type. */
+ unsigned int sw_irq_0_val; /**< In case of SW interrupt 0, value. */
+ unsigned int sw_irq_1_val; /**< In case of SW interrupt 1, value. */
+ unsigned int sw_irq_2_val; /**< In case of SW interrupt 2, value. */
+ struct ia_css_pipe *pipe;
+ /**< The image pipe that generated the interrupt. */
+};
+
+/** @brief Obtain interrupt information.
+ *
+ * @param[out] info Pointer to the interrupt info. The interrupt
+ * information wil be written to this info.
+ * @return If an error is encountered during the interrupt info
+ * and no interrupt could be translated successfully, this
+ * will return IA_CSS_INTERNAL_ERROR. Otherwise
+ * IA_CSS_SUCCESS.
+ *
+ * This function is expected to be executed after an interrupt has been sent
+ * to the IA from the CSS. This function returns information about the interrupt
+ * which is needed by the IA code to properly handle the interrupt. This
+ * information includes the image pipe, buffer type etc.
+ */
+enum ia_css_err
+ia_css_irq_translate(unsigned int *info);
+
+/** @brief Get CSI receiver error info.
+ *
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ *
+ *@deprecated {this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_get_irq_info(unsigned int *irq_bits);
+
+/** @brief Get CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[out] irq_bits Pointer to the interrupt bits. The interrupt
+ * bits will be written this info.
+ * This will be the error bits that are enabled in the CSI
+ * receiver error register.
+ * @return None
+ *
+ * This function should be used whenever a CSI receiver error interrupt is
+ * generated. It provides the detailed information (bits) on the exact error
+ * that occurred.
+ */
+void
+ia_css_rx_port_get_irq_info(enum ia_css_csi2_port port, unsigned int *irq_bits);
+
+/** @brief Clear CSI receiver error info.
+ *
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ *
+ * @deprecated{this function is DEPRECATED since it only works on CSI port 1.
+ * Use the function below instead and specify the appropriate port.}
+ */
+void
+ia_css_rx_clear_irq_info(unsigned int irq_bits);
+
+/** @brief Clear CSI receiver error info.
+ *
+ * @param[in] port Input port identifier.
+ * @param[in] irq_bits The bits that should be cleared from the CSI receiver
+ * interrupt bits register.
+ * @return None
+ *
+ * This function should be called after ia_css_rx_get_irq_info has been called
+ * and the error bits have been interpreted. It is advised to use the return
+ * value of that function as the argument to this function to make sure no new
+ * error bits get overwritten.
+ */
+void
+ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port port, unsigned int irq_bits);
+
+/** @brief Enable or disable specific interrupts.
+ *
+ * @param[in] type The interrupt type that will be enabled/disabled.
+ * @param[in] enable enable or disable.
+ * @return Returns IA_CSS_INTERNAL_ERROR if this interrupt
+ * type cannot be enabled/disabled which is true for
+ * CSS internal interrupts. Otherwise returns
+ * IA_CSS_SUCCESS.
+ */
+enum ia_css_err
+ia_css_irq_enable(enum ia_css_irq_info type, bool enable);
+
+#endif /* __IA_CSS_IRQ_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c
new file mode 100644
index 000000000000..282075942ba6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_memory_access.c
@@ -0,0 +1,83 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015-2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <system_types.h>
+#include <assert_support.h>
+#include <memory_access.h>
+#include <ia_css_env.h>
+#include <hrt/hive_isp_css_mm_hrt.h>
+
+const hrt_vaddress mmgr_NULL = (hrt_vaddress)0;
+const hrt_vaddress mmgr_EXCEPTION = (hrt_vaddress)-1;
+
+hrt_vaddress
+mmgr_malloc(const size_t size)
+{
+ return mmgr_alloc_attr(size, 0);
+}
+
+hrt_vaddress mmgr_alloc_attr(const size_t size, const uint16_t attrs)
+{
+ uint16_t masked_attrs = attrs & MMGR_ATTRIBUTE_MASK;
+ WARN_ON(attrs & MMGR_ATTRIBUTE_CONTIGUOUS);
+
+ if (masked_attrs & MMGR_ATTRIBUTE_CLEARED) {
+ if (masked_attrs & MMGR_ATTRIBUTE_CACHED)
+ return (ia_css_ptr) hrt_isp_css_mm_calloc_cached(size);
+ else
+ return (ia_css_ptr) hrt_isp_css_mm_calloc(size);
+ } else {
+ if (masked_attrs & MMGR_ATTRIBUTE_CACHED)
+ return (ia_css_ptr) hrt_isp_css_mm_alloc_cached(size);
+ else
+ return (ia_css_ptr) hrt_isp_css_mm_alloc(size);
+ }
+}
+
+hrt_vaddress
+mmgr_calloc(const size_t N, const size_t size)
+{
+ return mmgr_alloc_attr(size * N, MMGR_ATTRIBUTE_CLEARED);
+}
+
+void mmgr_clear(hrt_vaddress vaddr, const size_t size)
+{
+ if (vaddr)
+ hmm_set(vaddr, 0, size);
+}
+
+void mmgr_load(const hrt_vaddress vaddr, void *data, const size_t size)
+{
+ if (vaddr && data)
+ hmm_load(vaddr, data, size);
+}
+
+void
+mmgr_store(const hrt_vaddress vaddr, const void *data, const size_t size)
+{
+ if (vaddr && data)
+ hmm_store(vaddr, data, size);
+}
+
+hrt_vaddress
+mmgr_mmap(const void *ptr, const size_t size,
+ uint16_t attribute, void *context)
+{
+ struct hrt_userbuffer_attr *userbuffer_attr = context;
+ return hrt_isp_css_mm_alloc_user_ptr(
+ size, (void *)ptr, userbuffer_attr->pgnr,
+ userbuffer_attr->type,
+ attribute & HRT_BUF_FLAG_CACHED);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
new file mode 100644
index 000000000000..c40c5a19bfe1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_metadata.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_METADATA_H
+#define __IA_CSS_METADATA_H
+
+/** @file
+ * This file contains structure for processing sensor metadata.
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_stream_format.h"
+
+/** Metadata configuration. This data structure contains necessary info
+ * to process sensor metadata.
+ */
+struct ia_css_metadata_config {
+ enum ia_css_stream_format data_type; /**< Data type of CSI-2 embedded
+ data. The default value is IA_CSS_STREAM_FORMAT_EMBEDDED. For
+ certain sensors, user can choose non-default data type for embedded
+ data. */
+ struct ia_css_resolution resolution; /**< Resolution */
+};
+
+struct ia_css_metadata_info {
+ struct ia_css_resolution resolution; /**< Resolution */
+ uint32_t stride; /**< Stride in bytes */
+ uint32_t size; /**< Total size in bytes */
+};
+
+struct ia_css_metadata {
+ struct ia_css_metadata_info info; /**< Layout info */
+ ia_css_ptr address; /**< CSS virtual address */
+ uint32_t exp_id;
+ /**< Exposure ID, see ia_css_event_public.h for more detail */
+};
+#define SIZE_OF_IA_CSS_METADATA_STRUCT sizeof(struct ia_css_metadata)
+
+/** @brief Allocate a metadata buffer.
+ * @param[in] metadata_info Metadata info struct, contains details on metadata buffers.
+ * @return Pointer of metadata buffer or NULL (if error)
+ *
+ * This function allocates a metadata buffer according to the properties
+ * specified in the metadata_info struct.
+ */
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info);
+
+/** @brief Free a metadata buffer.
+ *
+ * @param[in] metadata Pointer of metadata buffer.
+ * @return None
+ *
+ * This function frees a metadata buffer.
+ */
+void
+ia_css_metadata_free(struct ia_css_metadata *metadata);
+
+#endif /* __IA_CSS_METADATA_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
new file mode 100644
index 000000000000..fd2c01b60b28
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mipi.h
@@ -0,0 +1,82 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MIPI_H
+#define __IA_CSS_MIPI_H
+
+/** @file
+ * This file contains MIPI support functionality
+ */
+
+#include <type_support.h>
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_input_port.h"
+
+/** Backward compatible for CSS API 2.0 only
+ * TO BE REMOVED when all drivers move to CSS API 2.1.
+ */
+/** @brief Specify a CSS MIPI frame buffer.
+ *
+ * @param[in] size_mem_words The frame size in memory words (32B).
+ * @param[in] contiguous Allocate memory physically contiguously or not.
+ * @return The error code.
+ *
+ * \deprecated{Use ia_css_mipi_buffer_config instead.}
+ *
+ * Specifies a CSS MIPI frame buffer: size in memory words (32B).
+ */
+enum ia_css_err
+ia_css_mipi_frame_specify(const unsigned int size_mem_words,
+ const bool contiguous);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+/** @brief Register size of a CSS MIPI frame for check during capturing.
+ *
+ * @param[in] port CSI-2 port this check is registered.
+ * @param[in] size_mem_words The frame size in memory words (32B).
+ * @return Return the error in case of failure. E.g. MAX_NOF_ENTRIES REACHED
+ *
+ * Register size of a CSS MIPI frame to check during capturing. Up to
+ * IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES entries per port allowed. Entries are reset
+ * when stream is stopped.
+ *
+ *
+ */
+enum ia_css_err
+ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
+ const unsigned int size_mem_words);
+#endif
+
+/** @brief Calculate the size of a mipi frame.
+ *
+ * @param[in] width The width (in pixels) of the frame.
+ * @param[in] height The height (in lines) of the frame.
+ * @param[in] format The frame (MIPI) format.
+ * @param[in] hasSOLandEOL Whether frame (MIPI) contains (optional) SOL and EOF packets.
+ * @param[in] embedded_data_size_words Embedded data size in memory words.
+ * @param size_mem_words The mipi frame size in memory words (32B).
+ * @return The error code.
+ *
+ * Calculate the size of a mipi frame, based on the resolution and format.
+ */
+enum ia_css_err
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum ia_css_stream_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words);
+
+#endif /* __IA_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
new file mode 100644
index 000000000000..48f8855d61f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MMU_H
+#define __IA_CSS_MMU_H
+
+/** @file
+ * This file contains one support function for invalidating the CSS MMU cache
+ */
+
+/** @brief Invalidate the MMU internal cache.
+ * @return None
+ *
+ * This function triggers an invalidation of the translate-look-aside
+ * buffer (TLB) that's inside the CSS MMU. This function should be called
+ * every time the page tables used by the MMU change.
+ */
+void
+ia_css_mmu_invalidate_cache(void);
+
+#endif /* __IA_CSS_MMU_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h
new file mode 100644
index 000000000000..7c8500903b5c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h
@@ -0,0 +1,31 @@
+#ifdef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MMU_PRIVATE_H
+#define __IA_CSS_MMU_PRIVATE_H
+
+#include "system_local.h"
+
+/*
+ * This function sets the L1 pagetable address.
+ * After power-up of the ISP the L1 pagetable can be set.
+ * Once being set the L1 pagetable is protected against
+ * further modifications.
+ */
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index);
+
+#endif /* __IA_CSS_MMU_PRIVATE_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
new file mode 100644
index 000000000000..969840da52b2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_morph.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MORPH_H
+#define __IA_CSS_MORPH_H
+
+/** @file
+ * This file contains supporting for morphing table
+ */
+
+#include <ia_css_types.h>
+
+/** @brief Morphing table
+ * @param[in] width Width of the morphing table.
+ * @param[in] height Height of the morphing table.
+ * @return Pointer to the morphing table
+*/
+struct ia_css_morph_table *
+ia_css_morph_table_allocate(unsigned int width, unsigned int height);
+
+/** @brief Free the morph table
+ * @param[in] me Pointer to the morph table.
+ * @return None
+*/
+void
+ia_css_morph_table_free(struct ia_css_morph_table *me);
+
+#endif /* __IA_CSS_MORPH_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h
new file mode 100644
index 000000000000..d0c0e6b92025
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe.h
@@ -0,0 +1,228 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_H__
+#define __IA_CSS_PIPE_H__
+
+#include <type_support.h>
+#include "ia_css_stream.h"
+#include "ia_css_frame.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#define PIPE_ENTRY_EMPTY_TOKEN (~0U)
+#define PIPE_ENTRY_RESERVED_TOKEN (0x1)
+
+struct ia_css_preview_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary preview_binary;
+ struct ia_css_binary vf_pp_binary;
+
+ /* 2401 only for these two - do we in fact use them for anything real */
+ struct ia_css_frame *delay_frames[MAX_NUM_DELAY_FRAMES];
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+ struct ia_css_pipe *acc_pipe;
+};
+
+#define IA_CSS_DEFAULT_PREVIEW_SETTINGS \
+{ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* copy_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* preview_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* vf_pp_binary */\
+ { NULL }, /* dvs_frames */ \
+ { NULL }, /* tnr_frames */ \
+ NULL, /* copy_pipe */\
+ NULL, /* capture_pipe */\
+ NULL, /* acc_pipe */\
+}
+
+struct ia_css_capture_settings {
+ struct ia_css_binary copy_binary;
+ /* we extend primary binary to multiple stages because in ISP2.6.1
+ * the computation load is too high to fit in one single binary. */
+ struct ia_css_binary primary_binary[MAX_NUM_PRIMARY_STAGES];
+ unsigned int num_primary_stage;
+ struct ia_css_binary pre_isp_binary;
+ struct ia_css_binary anr_gdc_binary;
+ struct ia_css_binary post_isp_binary;
+ struct ia_css_binary capture_pp_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary capture_ldc_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_CAPTURE_SETTINGS \
+{ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* copy_binary */\
+ {IA_CSS_BINARY_DEFAULT_SETTINGS}, /* primary_binary */\
+ 0, /* num_primary_stage */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* pre_isp_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* anr_gdc_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* post_isp_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* capture_pp_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* vf_pp_binary */\
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* capture_ldc_binary */\
+ NULL, /* yuv_scaler_binary */ \
+ { NULL }, /* delay_frames[ref_frames] */ \
+ NULL, /* is_output_stage */ \
+ 0, /* num_yuv_scaler */ \
+}
+
+struct ia_css_video_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary video_binary;
+ struct ia_css_binary vf_pp_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+#ifndef ISP2401
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
+#else
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+#endif
+ struct ia_css_frame *vf_pp_in_frame;
+ struct ia_css_pipe *copy_pipe;
+ struct ia_css_pipe *capture_pipe;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+};
+
+#define IA_CSS_DEFAULT_VIDEO_SETTINGS \
+{ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* copy_binary */ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* video_binary */ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* vf_pp_binary */ \
+ NULL, /* yuv_scaler_binary */ \
+ { NULL }, /* delay_frames */ \
+ { NULL }, /* tnr_frames */ \
+ NULL, /* vf_pp_in_frame */ \
+ NULL, /* copy_pipe */ \
+ NULL, /* capture_pipe */ \
+ NULL, /* is_output_stage */ \
+ 0, /* num_yuv_scaler */ \
+}
+
+struct ia_css_yuvpp_settings {
+ struct ia_css_binary copy_binary;
+ struct ia_css_binary *yuv_scaler_binary;
+ struct ia_css_binary *vf_pp_binary;
+ bool *is_output_stage;
+ unsigned int num_yuv_scaler;
+ unsigned int num_vf_pp;
+ unsigned int num_output;
+};
+
+#define IA_CSS_DEFAULT_YUVPP_SETTINGS \
+{ \
+ IA_CSS_BINARY_DEFAULT_SETTINGS, /* copy_binary */ \
+ NULL, /* yuv_scaler_binary */ \
+ NULL, /* vf_pp_binary */ \
+ NULL, /* is_output_stage */ \
+ 0, /* num_yuv_scaler */ \
+ 0, /* num_vf_pp */ \
+ 0, /* num_output */ \
+}
+
+struct osys_object;
+
+struct ia_css_pipe {
+ /* TODO: Remove stop_requested and use stop_requested in the pipeline */
+ bool stop_requested;
+ struct ia_css_pipe_config config;
+ struct ia_css_pipe_extra_config extra_config;
+ struct ia_css_pipe_info info;
+ enum ia_css_pipe_id mode;
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_pipeline pipeline;
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info bds_output_info;
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info out_yuv_ds_input_info;
+ struct ia_css_frame_info vf_yuv_ds_input_info;
+ struct ia_css_fw_info *output_stage; /* extra output stage */
+ struct ia_css_fw_info *vf_stage; /* extra vf_stage */
+ unsigned int required_bds_factor;
+ unsigned int dvs_frame_delay;
+ int num_invalid_frames;
+ bool enable_viewfinder[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_stream *stream;
+ struct ia_css_frame in_frame_struct;
+ struct ia_css_frame out_frame_struct;
+ struct ia_css_frame vf_frame_struct;
+ struct ia_css_frame *continuous_frames[NUM_CONTINUOUS_FRAMES];
+ struct ia_css_metadata *cont_md_buffers[NUM_CONTINUOUS_FRAMES];
+ union {
+ struct ia_css_preview_settings preview;
+ struct ia_css_video_settings video;
+ struct ia_css_capture_settings capture;
+ struct ia_css_yuvpp_settings yuvpp;
+ } pipe_settings;
+ hrt_vaddress scaler_pp_lut;
+ struct osys_object *osys_obj;
+
+ /* This number is unique per pipe each instance of css. This number is
+ * reused as pipeline number also. There is a 1-1 mapping between pipe_num
+ * and sp thread id. Current logic limits pipe_num to
+ * SH_CSS_MAX_SP_THREADS */
+ unsigned int pipe_num;
+};
+
+#define IA_CSS_DEFAULT_PIPE \
+{ \
+ false, /* stop_requested */ \
+ DEFAULT_PIPE_CONFIG, /* config */ \
+ DEFAULT_PIPE_EXTRA_CONFIG, /* extra_config */ \
+ DEFAULT_PIPE_INFO, /* info */ \
+ IA_CSS_PIPE_ID_ACC, /* mode (pipe_id) */ \
+ NULL, /* shading_table */ \
+ DEFAULT_PIPELINE, /* pipeline */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* output_info */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* bds_output_info */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* vf_output_info */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* out_yuv_ds_input_info */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* vf_yuv_ds_input_info */ \
+ NULL, /* output_stage */ \
+ NULL, /* vf_stage */ \
+ SH_CSS_BDS_FACTOR_1_00, /* required_bds_factor */ \
+ 1, /* dvs_frame_delay */ \
+ 0, /* num_invalid_frames */ \
+ {true}, /* enable_viewfinder */ \
+ NULL, /* stream */ \
+ DEFAULT_FRAME, /* in_frame_struct */ \
+ DEFAULT_FRAME, /* out_frame_struct */ \
+ DEFAULT_FRAME, /* vf_frame_struct */ \
+ { NULL }, /* continuous_frames */ \
+ { NULL }, /* cont_md_buffers */ \
+ { IA_CSS_DEFAULT_PREVIEW_SETTINGS }, /* pipe_settings */ \
+ 0, /* scaler_pp_lut */ \
+ NULL, /* osys object */ \
+ PIPE_ENTRY_EMPTY_TOKEN, /* pipe_num */\
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map);
+
+enum ia_css_err
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit, struct ia_css_pipe *pipe);
+
+
+
+#endif /* __IA_CSS_PIPE_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
new file mode 100644
index 000000000000..733e0ef3afe8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_pipe_public.h
@@ -0,0 +1,659 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PIPE_PUBLIC_H
+#define __IA_CSS_PIPE_PUBLIC_H
+
+/** @file
+ * This file contains the public interface for CSS pipes.
+ */
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_buffer.h>
+#ifdef ISP2401
+#include <ia_css_acc_types.h>
+#endif
+
+enum {
+ IA_CSS_PIPE_OUTPUT_STAGE_0 = 0,
+ IA_CSS_PIPE_OUTPUT_STAGE_1,
+ IA_CSS_PIPE_MAX_OUTPUT_STAGE,
+};
+
+/** Enumeration of pipe modes. This mode can be used to create
+ * an image pipe for this mode. These pipes can be combined
+ * to configure and run streams on the ISP.
+ *
+ * For example, one can create a preview and capture pipe to
+ * create a continuous capture stream.
+ */
+enum ia_css_pipe_mode {
+ IA_CSS_PIPE_MODE_PREVIEW, /**< Preview pipe */
+ IA_CSS_PIPE_MODE_VIDEO, /**< Video pipe */
+ IA_CSS_PIPE_MODE_CAPTURE, /**< Still capture pipe */
+ IA_CSS_PIPE_MODE_ACC, /**< Accelerated pipe */
+ IA_CSS_PIPE_MODE_COPY, /**< Copy pipe, only used for embedded/image data copying */
+ IA_CSS_PIPE_MODE_YUVPP, /**< YUV post processing pipe, used for all use cases with YUV input,
+ for SoC sensor and external ISP */
+};
+/* Temporary define */
+#define IA_CSS_PIPE_MODE_NUM (IA_CSS_PIPE_MODE_YUVPP + 1)
+
+/**
+ * Enumeration of pipe versions.
+ * the order should match with definition in sh_css_defs.h
+ */
+enum ia_css_pipe_version {
+ IA_CSS_PIPE_VERSION_1 = 1, /**< ISP1.0 pipe */
+ IA_CSS_PIPE_VERSION_2_2 = 2, /**< ISP2.2 pipe */
+ IA_CSS_PIPE_VERSION_2_6_1 = 3, /**< ISP2.6.1 pipe */
+ IA_CSS_PIPE_VERSION_2_7 = 4 /**< ISP2.7 pipe */
+};
+
+/**
+ * Pipe configuration structure.
+ * Resolution properties are filled by Driver, kernel configurations are
+ * set by AIC
+ */
+struct ia_css_pipe_config {
+ enum ia_css_pipe_mode mode;
+ /**< mode, indicates which mode the pipe should use. */
+ enum ia_css_pipe_version isp_pipe_version;
+ /**< pipe version, indicates which imaging pipeline the pipe should use. */
+ struct ia_css_resolution input_effective_res;
+ /**< input effective resolution */
+ struct ia_css_resolution bayer_ds_out_res;
+ /**< bayer down scaling */
+ struct ia_css_resolution capt_pp_in_res;
+#ifndef ISP2401
+ /**< bayer down scaling */
+#else
+ /**< capture post processing input resolution */
+#endif
+ struct ia_css_resolution vf_pp_in_res;
+#ifndef ISP2401
+ /**< bayer down scaling */
+#else
+ /**< view finder post processing input resolution */
+ struct ia_css_resolution output_system_in_res;
+ /**< For IPU3 only: use output_system_in_res to specify what input resolution
+ will OSYS receive, this resolution is equal to the output resolution of GDC
+ if not determined CSS will set output_system_in_res with main osys output pin resolution
+ All other IPUs may ignore this property */
+#endif
+ struct ia_css_resolution dvs_crop_out_res;
+ /**< dvs crop, video only, not in use yet. Use dvs_envelope below. */
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /**< output of YUV scaling */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /**< output of VF YUV scaling */
+ struct ia_css_fw_info *acc_extension;
+ /**< Pipeline extension accelerator */
+ struct ia_css_fw_info **acc_stages;
+ /**< Standalone accelerator stages */
+ uint32_t num_acc_stages;
+ /**< Number of standalone accelerator stages */
+ struct ia_css_capture_config default_capture_config;
+ /**< Default capture config for initial capture pipe configuration. */
+ struct ia_css_resolution dvs_envelope; /**< temporary */
+ enum ia_css_frame_delay dvs_frame_delay;
+ /**< indicates the DVS loop delay in frame periods */
+ int acc_num_execs;
+ /**< For acceleration pipes only: determine how many times the pipe
+ should be run. Setting this to -1 means it will run until
+ stopped. */
+ bool enable_dz;
+ /**< Disabling digital zoom for a pipeline, if this is set to false,
+ then setting a zoom factor will have no effect.
+ In some use cases this provides better performance. */
+ bool enable_dpc;
+ /**< Disabling "Defect Pixel Correction" for a pipeline, if this is set
+ to false. In some use cases this provides better performance. */
+ bool enable_vfpp_bci;
+ /**< Enabling BCI mode will cause yuv_scale binary to be picked up
+ instead of vf_pp. This only applies to viewfinder post
+ processing stages. */
+#ifdef ISP2401
+ bool enable_luma_only;
+ /**< Enabling of monochrome mode for a pipeline. If enabled only luma processing
+ will be done. */
+ bool enable_tnr;
+ /**< Enabling of TNR (temporal noise reduction). This is only applicable to video
+ pipes. Non video-pipes should always set this parameter to false. */
+#endif
+ struct ia_css_isp_config *p_isp_config;
+ /**< Pointer to ISP configuration */
+ struct ia_css_resolution gdc_in_buffer_res;
+ /**< GDC in buffer resolution. */
+ struct ia_css_point gdc_in_buffer_offset;
+ /**< GDC in buffer offset - indicates the pixel coordinates of the first valid pixel inside the buffer */
+#ifdef ISP2401
+ struct ia_css_coordinate internal_frame_origin_bqs_on_sctbl;
+ /**< Origin of internal frame positioned on shading table at shading correction in ISP.
+ NOTE: Shading table is larger than or equal to internal frame.
+ Shading table has shading gains and internal frame has bayer data.
+ The origin of internal frame is used in shading correction in ISP
+ to retrieve shading gains which correspond to bayer data. */
+#endif
+};
+
+
+#ifdef ISP2401
+/**
+ * Default origin of internal frame positioned on shading table.
+ */
+#define IA_CSS_PIPE_DEFAULT_INTERNAL_FRAME_ORIGIN_BQS_ON_SCTBL \
+{ \
+ 0, /* x [bqs] */ \
+ 0 /* y [bqs] */ \
+}
+
+/**
+ * Default settings for newly created pipe configurations.
+ */
+#define DEFAULT_PIPE_CONFIG \
+{ \
+ IA_CSS_PIPE_MODE_PREVIEW, /* mode */ \
+ 1, /* isp_pipe_version */ \
+ { 0, 0 }, /* pipe_effective_input_res */ \
+ { 0, 0 }, /* bayer_ds_out_res */ \
+ { 0, 0 }, /* vf_pp_in_res */ \
+ { 0, 0 }, /* capt_pp_in_res */ \
+ { 0, 0 }, /* output_system_in_res */ \
+ { 0, 0 }, /* dvs_crop_out_res */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* output_info */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* vf_output_info */ \
+ NULL, /* acc_extension */ \
+ NULL, /* acc_stages */ \
+ 0, /* num_acc_stages */ \
+ DEFAULT_CAPTURE_CONFIG, /* default_capture_config */ \
+ { 0, 0 }, /* dvs_envelope */ \
+ IA_CSS_FRAME_DELAY_1, /* dvs_frame_delay */ \
+ -1, /* acc_num_execs */ \
+ false, /* enable_dz */ \
+ false, /* enable_dpc */ \
+ false, /* enable_vfpp_bci */ \
+ false, /* enable_luma_only */ \
+ false, /* enable_tnr */ \
+ NULL, /* p_isp_config */\
+ { 0, 0 }, /* gdc_in_buffer_res */ \
+ { 0, 0 }, /* gdc_in_buffer_offset */ \
+ IA_CSS_PIPE_DEFAULT_INTERNAL_FRAME_ORIGIN_BQS_ON_SCTBL /* internal_frame_origin_bqs_on_sctbl */ \
+}
+
+#else
+
+/**
+ * Default settings for newly created pipe configurations.
+ */
+#define DEFAULT_PIPE_CONFIG \
+{ \
+ IA_CSS_PIPE_MODE_PREVIEW, /* mode */ \
+ 1, /* isp_pipe_version */ \
+ { 0, 0 }, /* pipe_effective_input_res */ \
+ { 0, 0 }, /* bayer_ds_out_res */ \
+ { 0, 0 }, /* vf_pp_in_res */ \
+ { 0, 0 }, /* capt_pp_in_res */ \
+ { 0, 0 }, /* dvs_crop_out_res */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* output_info */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* vf_output_info */ \
+ NULL, /* acc_extension */ \
+ NULL, /* acc_stages */ \
+ 0, /* num_acc_stages */ \
+ DEFAULT_CAPTURE_CONFIG, /* default_capture_config */ \
+ { 0, 0 }, /* dvs_envelope */ \
+ IA_CSS_FRAME_DELAY_1, /* dvs_frame_delay */ \
+ -1, /* acc_num_execs */ \
+ false, /* enable_dz */ \
+ false, /* enable_dpc */ \
+ false, /* enable_vfpp_bci */ \
+ NULL, /* p_isp_config */\
+ { 0, 0 }, /* gdc_in_buffer_res */ \
+ { 0, 0 } /* gdc_in_buffer_offset */ \
+}
+
+#endif
+
+/** Pipe info, this struct describes properties of a pipe after it's stream has
+ * been created.
+ * ~~~** DO NOT ADD NEW FIELD **~~~ This structure will be deprecated.
+ * - On the Behalf of CSS-API Committee.
+ */
+struct ia_css_pipe_info {
+ struct ia_css_frame_info output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /**< Info about output resolution. This contains the stride which
+ should be used for memory allocation. */
+ struct ia_css_frame_info vf_output_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ /**< Info about viewfinder output resolution (optional). This contains
+ the stride that should be used for memory allocation. */
+ struct ia_css_frame_info raw_output_info;
+ /**< Raw output resolution. This indicates the resolution of the
+ RAW bayer output for pipes that support this. Currently, only the
+ still capture pipes support this feature. When this resolution is
+ smaller than the input resolution, cropping will be performed by
+ the ISP. The first cropping that will be performed is on the upper
+ left corner where we crop 8 lines and 8 columns to remove the
+ pixels normally used to initialize the ISP filters.
+ This is why the raw output resolution should normally be set to
+ the input resolution - 8x8. */
+#ifdef ISP2401
+ struct ia_css_resolution output_system_in_res_info;
+ /**< For IPU3 only. Info about output system in resolution which is considered
+ as gdc out resolution. */
+#endif
+ struct ia_css_shading_info shading_info;
+ /**< After an image pipe is created, this field will contain the info
+ for the shading correction. */
+ struct ia_css_grid_info grid_info;
+ /**< After an image pipe is created, this field will contain the grid
+ info for 3A and DVS. */
+ int num_invalid_frames;
+ /**< The very first frames in a started stream do not contain valid data.
+ In this field, the CSS-firmware communicates to the host-driver how
+ many initial frames will contain invalid data; this allows the
+ host-driver to discard those initial invalid frames and start it's
+ output at the first valid frame. */
+};
+
+/**
+ * Defaults for ia_css_pipe_info structs.
+ */
+#ifdef ISP2401
+
+#define DEFAULT_PIPE_INFO \
+{ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* output_info */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* vf_output_info */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* raw_output_info */ \
+ { 0, 0}, /* output system in res */ \
+ DEFAULT_SHADING_INFO, /* shading_info */ \
+ DEFAULT_GRID_INFO, /* grid_info */ \
+ 0 /* num_invalid_frames */ \
+}
+
+#else
+
+#define DEFAULT_PIPE_INFO \
+{ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* output_info */ \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, /* vf_output_info */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, /* raw_output_info */ \
+ DEFAULT_SHADING_INFO, /* shading_info */ \
+ DEFAULT_GRID_INFO, /* grid_info */ \
+ 0 /* num_invalid_frames */ \
+}
+
+#endif
+
+/** @brief Load default pipe configuration
+ * @param[out] pipe_config The pipe configuration.
+ * @return None
+ *
+ * This function will load the default pipe configuration:
+@code
+ struct ia_css_pipe_config def_config = {
+ IA_CSS_PIPE_MODE_PREVIEW, // mode
+ 1, // isp_pipe_version
+ {0, 0}, // bayer_ds_out_res
+ {0, 0}, // capt_pp_in_res
+ {0, 0}, // vf_pp_in_res
+ {0, 0}, // dvs_crop_out_res
+ {{0, 0}, 0, 0, 0, 0}, // output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_output_info
+ {{0, 0}, 0, 0, 0, 0}, // vf_output_info
+ {{0, 0}, 0, 0, 0, 0}, // second_vf_output_info
+ NULL, // acc_extension
+ NULL, // acc_stages
+ 0, // num_acc_stages
+ {
+ IA_CSS_CAPTURE_MODE_RAW, // mode
+ false, // enable_xnr
+ false // enable_raw_output
+ }, // default_capture_config
+ {0, 0}, // dvs_envelope
+ 1, // dvs_frame_delay
+ -1, // acc_num_execs
+ true, // enable_dz
+ NULL, // p_isp_config
+ };
+@endcode
+ */
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config);
+
+/** @brief Create a pipe
+ * @param[in] config The pipe configuration.
+ * @param[out] pipe The pipe.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will create a pipe with the given
+ * configuration.
+ */
+enum ia_css_err
+ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe);
+
+/** @brief Destroy a pipe
+ * @param[in] pipe The pipe.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_destroy(struct ia_css_pipe *pipe);
+
+/** @brief Provides information about a pipe
+ * @param[in] pipe The pipe.
+ * @param[out] pipe_info The pipe information.
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS.
+ *
+ * This function will provide information about a given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info);
+
+/** @brief Configure a pipe with filter coefficients.
+ * @param[in] pipe The pipe.
+ * @param[in] config The pointer to ISP configuration.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * pipe.
+ */
+enum ia_css_err
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/** @brief Controls when the Event generator raises an IRQ to the Host.
+ *
+ * @param[in] pipe The pipe.
+ * @param[in] or_mask Binary or of enum ia_css_event_irq_mask_type. Each pipe
+ related event that is part of this mask will directly
+ raise an IRQ to the Host when the event occurs in the
+ CSS.
+ * @param[in] and_mask Binary or of enum ia_css_event_irq_mask_type. An event
+ IRQ for the Host is only raised after all pipe related
+ events have occurred at least once for all the active
+ pipes. Events are remembered and don't need to occure
+ at the same moment in time. There is no control over
+ the order of these events. Once an IRQ has been raised
+ all remembered events are reset.
+ * @return IA_CSS_SUCCESS.
+ *
+ Controls when the Event generator in the CSS raises an IRQ to the Host.
+ The main purpose of this function is to reduce the amount of interrupts
+ between the CSS and the Host. This will help saving power as it wakes up the
+ Host less often. In case both or_mask and and_mask are
+ IA_CSS_EVENT_TYPE_NONE for all pipes, no event IRQ's will be raised. An
+ exception holds for IA_CSS_EVENT_TYPE_PORT_EOF, for this event an IRQ is always
+ raised.
+ Note that events are still queued and the Host can poll for them. The
+ or_mask and and_mask may be be active at the same time\n
+ \n
+ Default values, for all pipe id's, after ia_css_init:\n
+ or_mask = IA_CSS_EVENT_TYPE_ALL\n
+ and_mask = IA_CSS_EVENT_TYPE_NONE\n
+ \n
+ Examples\n
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe,
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE |
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE ,
+ IA_CSS_EVENT_TYPE_NONE);
+ \endcode
+ The event generator will only raise an interrupt to the Host when there are
+ 3A or DIS statistics available from the preview pipe. It will not generate
+ an interrupt for any other event of the preview pipe e.g when there is an
+ output frame available.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE |
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_NONE,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is
+ both a frame done and 3A event available from the preview pipe AND when there
+ is a frame done available from the capture pipe. Note that these events
+ may occur at different moments in time. Also the order of the events is not
+ relevant.
+
+ \code
+ ia_css_pipe_set_irq_mask(h_pipe_preview,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+
+ ia_css_pipe_set_irq_mask(h_pipe_capture,
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE,
+ IA_CSS_EVENT_TYPE_ALL );
+ \endcode
+ The event generator will only raise an interrupt to the Host when there is an
+ output frame from the preview pipe OR an output frame from the capture pipe.
+ All other events (3A, VF output, pipeline done) will not raise an interrupt
+ to the Host. These events are not lost but always stored in the event queue.
+ */
+enum ia_css_err
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask);
+
+/** @brief Reads the current event IRQ mask from the CSS.
+ *
+ * @param[in] pipe The pipe.
+ * @param[out] or_mask Current or_mask. The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @param[out] and_mask Current and_mask.The bits in this mask are a binary or
+ of enum ia_css_event_irq_mask_type. Pointer may be NULL.
+ * @return IA_CSS_SUCCESS.
+ *
+ Reads the current event IRQ mask from the CSS. Reading returns the actual
+ values as used by the SP and not any mirrored values stored at the Host.\n
+\n
+Precondition:\n
+SP must be running.\n
+
+*/
+enum ia_css_err
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask);
+
+/** @brief Queue a buffer for an image pipe.
+ *
+ * @param[in] pipe The pipe that will own the buffer.
+ * @param[in] buffer Pointer to the buffer.
+ * Note that the caller remains owner of the buffer
+ * structure. Only the data pointer within it will
+ * be passed into the internal queues.
+ * @return IA_CSS_INTERNAL_ERROR in case of unexpected errors,
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function adds a buffer (which has a certain buffer type) to the queue
+ * for this type. This queue is owned by the image pipe. After this function
+ * completes successfully, the buffer is now owned by the image pipe and should
+ * no longer be accessed by any other code until it gets dequeued. The image
+ * pipe will dequeue buffers from this queue, use them and return them to the
+ * host code via an interrupt. Buffers will be consumed in the same order they
+ * get queued, but may be returned to the host out of order.
+ */
+enum ia_css_err
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer);
+
+/** @brief Dequeue a buffer from an image pipe.
+ *
+ * @param[in] pipe The pipeline that the buffer queue belongs to.
+ * @param[in,out] buffer The buffer is used to lookup the type which determines
+ * which internal queue to use.
+ * The resulting buffer pointer is written into the dta
+ * field.
+ * @return IA_CSS_ERR_NO_BUFFER if the queue is empty or
+ * IA_CSS_SUCCESS otherwise.
+ *
+ * This function dequeues a buffer from a buffer queue. The queue is indicated
+ * by the buffer type argument. This function can be called after an interrupt
+ * has been generated that signalled that a new buffer was available and can
+ * be used in a polling-like situation where the NO_BUFFER return value is used
+ * to determine whether a buffer was available or not.
+ */
+enum ia_css_err
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer);
+
+
+/** @brief Set the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
+ * @param[in] enable Enable Flag (1 to enable ; 0 to disable)
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * This function will request state change (enable or disable) for the Extension
+ * stage (firmware handle) in the given pipe.
+ *
+ * Note:
+ * 1. Extension can be enabled/disabled only on QOS Extensions
+ * 2. Extension can be enabled/disabled only with an active QOS Pipe
+ * 3. Initial(Default) state of QOS Extensions is Disabled
+ * 4. State change cannot be guaranteed immediately OR on frame boundary
+ *
+ */
+enum ia_css_err
+ia_css_pipe_set_qos_ext_state (struct ia_css_pipe *pipe,
+ uint32_t fw_handle,
+ bool enable);
+
+/** @brief Get the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle)
+ * @param[out] *enable Enable Flag
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * This function will query the state of the Extension stage (firmware handle)
+ * in the given Pipe.
+ *
+ * Note:
+ * 1. Extension state can be queried only on QOS Extensions
+ * 2. Extension can be enabled/disabled only with an active QOS Pipe
+ * 3. Initial(Default) state of QOS Extensions is Disabled.
+ *
+ */
+enum ia_css_err
+ia_css_pipe_get_qos_ext_state (struct ia_css_pipe *pipe,
+ uint32_t fw_handle,
+ bool * enable);
+
+#ifdef ISP2401
+/** @brief Update mapped CSS and ISP arguments for QoS pipe during SP runtime.
+ * @param[in] pipe Pipe handle.
+ * @param[in] fw_handle Extension firmware Handle (ia_css_fw_info.handle).
+ * @param[in] css_seg Parameter memory descriptors for CSS segments.
+ * @param[in] isp_seg Parameter memory descriptors for ISP segments.
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_RESOURCE_NOT_AVAILABLE : Inactive QOS Pipe
+ * (No active stream with this pipe)
+ *
+ * \deprecated{This interface is used to temporarily support a late-developed,
+ * specific use-case on a specific IPU2 platform. It will not be supported or
+ * maintained on IPU3 or further.}
+ */
+enum ia_css_err
+ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_handle,
+ struct ia_css_isp_param_css_segments *css_seg,
+ struct ia_css_isp_param_isp_segments *isp_seg);
+
+#endif
+/** @brief Get selected configuration settings
+ * @param[in] pipe The pipe.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+/** @brief Set the scaler lut on this pipe. A copy of lut is made in the inuit
+ * address space. So the LUT can be freed by caller.
+ * @param[in] pipe Pipe handle.
+ * @param[in] lut Look up tabel
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ *
+ * Note:
+ * 1) Note that both GDC's are programmed with the same table.
+ * 2) Current implementation ignores the pipe and overrides the
+ * global lut. This will be fixed in the future
+ * 3) This function must be called before stream start
+ *
+ */
+enum ia_css_err
+ia_css_pipe_set_bci_scaler_lut( struct ia_css_pipe *pipe,
+ const void *lut);
+/** @brief Checking of DVS statistics ability
+ * @param[in] pipe_info The pipe info.
+ * @return true - has DVS statistics ability
+ * false - otherwise
+ */
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info);
+
+#ifdef ISP2401
+/** @brief Override the frameformat set on the output pins.
+ * @param[in] pipe Pipe handle.
+ * @param[in] output_pin Pin index to set the format on
+ * 0 - main output pin
+ * 1 - display output pin
+ * @param[in] format Format to set
+ *
+ * @return
+ * IA_CSS_SUCCESS : Success
+ * IA_CSS_ERR_INVALID_ARGUMENTS : Invalid Parameters
+ * IA_CSS_ERR_INTERNAL_ERROR : Pipe misses binary info
+ *
+ * Note:
+ * 1) This is an optional function to override the formats set in the pipe.
+ * 2) Only overriding with IA_CSS_FRAME_FORMAT_NV12_TILEY is currently allowed.
+ * 3) This function is only to be used on pipes that use the output system.
+ * 4) If this function is used, it MUST be called after ia_css_pipe_create.
+ * 5) If this function is used, this function MUST be called before ia_css_stream_start.
+ */
+enum ia_css_err
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int output_pin,
+ enum ia_css_frame_format format);
+
+#endif
+#endif /* __IA_CSS_PIPE_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
new file mode 100644
index 000000000000..9b0eeb08ca04
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_prbs.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PRBS_H
+#define __IA_CSS_PRBS_H
+
+/** @file
+ * This file contains support for Pseudo Random Bit Sequence (PRBS) inputs
+ */
+
+/** Enumerate the PRBS IDs.
+ */
+enum ia_css_prbs_id {
+ IA_CSS_PRBS_ID0,
+ IA_CSS_PRBS_ID1,
+ IA_CSS_PRBS_ID2
+};
+
+/**
+ * Maximum number of PRBS IDs.
+ *
+ * Make sure the value of this define gets changed to reflect the correct
+ * number of ia_css_prbs_id enum if you add/delete an item in the enum.
+ */
+#define N_CSS_PRBS_IDS (IA_CSS_PRBS_ID2+1)
+
+/**
+ * PRBS configuration structure.
+ *
+ * Seed the for the Pseudo Random Bit Sequence.
+ *
+ * @deprecated{This interface is deprecated, it is not portable -> move to input system API}
+ */
+struct ia_css_prbs_config {
+ enum ia_css_prbs_id id;
+ unsigned int h_blank; /**< horizontal blank */
+ unsigned int v_blank; /**< vertical blank */
+ int seed; /**< random seed for the 1st 2-pixel-components/clock */
+ int seed1; /**< random seed for the 2nd 2-pixel-components/clock */
+};
+
+#endif /* __IA_CSS_PRBS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
new file mode 100644
index 000000000000..19af4021b24c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_properties.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PROPERTIES_H
+#define __IA_CSS_PROPERTIES_H
+
+/** @file
+ * This file contains support for retrieving properties of some hardware the CSS system
+ */
+
+#include <type_support.h> /* bool */
+#include <ia_css_types.h> /* ia_css_vamem_type */
+
+struct ia_css_properties {
+ int gdc_coord_one;
+ bool l1_base_is_index; /**< Indicate whether the L1 page base
+ is a page index or a byte address. */
+ enum ia_css_vamem_type vamem_type;
+};
+
+/** @brief Get hardware properties
+ * @param[in,out] properties The hardware properties
+ * @return None
+ *
+ * This function returns a number of hardware properties.
+ */
+void
+ia_css_get_properties(struct ia_css_properties *properties);
+
+#endif /* __IA_CSS_PROPERTIES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
new file mode 100644
index 000000000000..cb0f249e98c8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_shading.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SHADING_H
+#define __IA_CSS_SHADING_H
+
+/** @file
+ * This file contains support for setting the shading table for CSS
+ */
+
+#include <ia_css_types.h>
+
+/** @brief Shading table
+ * @param[in] width Width of the shading table.
+ * @param[in] height Height of the shading table.
+ * @return Pointer to the shading table
+*/
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(unsigned int width,
+ unsigned int height);
+
+/** @brief Free shading table
+ * @param[in] table Pointer to the shading table.
+ * @return None
+*/
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table);
+
+#endif /* __IA_CSS_SHADING_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
new file mode 100644
index 000000000000..453fe4db0133
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream.h
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_STREAM_H_
+#define _IA_CSS_STREAM_H_
+
+#include <type_support.h>
+#include <system_local.h>
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include <input_system.h>
+#endif
+#include "ia_css_types.h"
+#include "ia_css_stream_public.h"
+
+/**
+ * structure to hold all internal stream related information
+ */
+struct ia_css_stream {
+ struct ia_css_stream_config config;
+ struct ia_css_stream_info info;
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ rx_cfg_t csi_rx_config;
+#endif
+ bool reconfigure_css_rx;
+ struct ia_css_pipe *last_pipe;
+ int num_pipes;
+ struct ia_css_pipe **pipes;
+ struct ia_css_pipe *continuous_pipe;
+ struct ia_css_isp_parameters *isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_isp_params_configs;
+
+ bool cont_capt;
+ bool disable_cont_vf;
+#ifndef ISP2401
+ bool stop_copy_preview;
+#endif
+ bool started;
+};
+
+/** @brief Get a binary in the stream, which binary has the shading correction.
+ *
+ * @param[in] stream: The stream.
+ * @return The binary which has the shading correction.
+ *
+ */
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream);
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream);
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream);
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream, unsigned int sensor_binning);
+
+void
+sh_css_invalidate_params(struct ia_css_stream *stream);
+
+/* The following functions are used for testing purposes only */
+const struct ia_css_fpn_table *
+ia_css_get_fpn_table(struct ia_css_stream *stream);
+
+/** @brief Get a pointer to the shading table.
+ *
+ * @param[in] stream: The stream.
+ * @return The pointer to the shading table.
+ *
+ */
+struct ia_css_shading_table *
+ia_css_get_shading_table(struct ia_css_stream *stream);
+
+void
+ia_css_get_isp_dis_coefficients(struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+void
+ia_css_get_isp_dvs2_coefficients(struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+enum ia_css_err
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream);
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream);
+
+#endif /*_IA_CSS_STREAM_H_*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
new file mode 100644
index 000000000000..ae608a9c9051
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_format.h
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_STREAM_FORMAT_H
+#define __IA_CSS_STREAM_FORMAT_H
+
+/** @file
+ * This file contains formats usable for ISP streaming input
+ */
+
+#include <type_support.h> /* bool */
+
+/** The ISP streaming input interface supports the following formats.
+ * These match the corresponding MIPI formats.
+ */
+enum ia_css_stream_format {
+ IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, /**< 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_8, /**< 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_10, /**< 10 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV420_16, /**< 16 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_8, /**< UYVY..UYVY, 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_10, /**< UYVY..UYVY, 10 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_YUV422_16, /**< UYVY..UYVY, 16 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_444, /**< BGR..BGR, 4 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_555, /**< BGR..BGR, 5 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_565, /**< BGR..BGR, 5 bits B and R, 6 bits G */
+ IA_CSS_STREAM_FORMAT_RGB_666, /**< BGR..BGR, 6 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RGB_888, /**< BGR..BGR, 8 bits per subpixel */
+ IA_CSS_STREAM_FORMAT_RAW_6, /**< RAW data, 6 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_7, /**< RAW data, 7 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_8, /**< RAW data, 8 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_10, /**< RAW data, 10 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_12, /**< RAW data, 12 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_14, /**< RAW data, 14 bits per pixel */
+ IA_CSS_STREAM_FORMAT_RAW_16, /**< RAW data, 16 bits per pixel, which is
+ not specified in CSI-MIPI standard*/
+ IA_CSS_STREAM_FORMAT_BINARY_8, /**< Binary byte stream, which is target at
+ JPEG. */
+
+ /** CSI2-MIPI specific format: Generic short packet data. It is used to
+ * keep the timing information for the opening/closing of shutters,
+ * triggering of flashes and etc.
+ */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT1, /**< Generic Short Packet Code 1 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT2, /**< Generic Short Packet Code 2 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT3, /**< Generic Short Packet Code 3 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT4, /**< Generic Short Packet Code 4 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT5, /**< Generic Short Packet Code 5 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT6, /**< Generic Short Packet Code 6 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT7, /**< Generic Short Packet Code 7 */
+ IA_CSS_STREAM_FORMAT_GENERIC_SHORT8, /**< Generic Short Packet Code 8 */
+
+ /** CSI2-MIPI specific format: YUV data.
+ */
+ IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+ IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT, /**< YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+
+ /** CSI2-MIPI specific format: Generic long packet data
+ */
+ IA_CSS_STREAM_FORMAT_EMBEDDED, /**< Embedded 8-bit non Image Data */
+
+ /** CSI2-MIPI specific format: User defined byte-based data. For example,
+ * the data transmitter (e.g. the SoC sensor) can keep the JPEG data as
+ * the User Defined Data Type 4 and the MPEG data as the
+ * User Defined Data Type 7.
+ */
+ IA_CSS_STREAM_FORMAT_USER_DEF1, /**< User defined 8-bit data type 1 */
+ IA_CSS_STREAM_FORMAT_USER_DEF2, /**< User defined 8-bit data type 2 */
+ IA_CSS_STREAM_FORMAT_USER_DEF3, /**< User defined 8-bit data type 3 */
+ IA_CSS_STREAM_FORMAT_USER_DEF4, /**< User defined 8-bit data type 4 */
+ IA_CSS_STREAM_FORMAT_USER_DEF5, /**< User defined 8-bit data type 5 */
+ IA_CSS_STREAM_FORMAT_USER_DEF6, /**< User defined 8-bit data type 6 */
+ IA_CSS_STREAM_FORMAT_USER_DEF7, /**< User defined 8-bit data type 7 */
+ IA_CSS_STREAM_FORMAT_USER_DEF8, /**< User defined 8-bit data type 8 */
+};
+
+#define IA_CSS_STREAM_FORMAT_NUM IA_CSS_STREAM_FORMAT_USER_DEF8
+
+unsigned int ia_css_util_input_format_bpp(
+ enum ia_css_stream_format format,
+ bool two_ppc);
+
+#endif /* __IA_CSS_STREAM_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
new file mode 100644
index 000000000000..2c8d9de10a59
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_stream_public.h
@@ -0,0 +1,582 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_STREAM_PUBLIC_H
+#define __IA_CSS_STREAM_PUBLIC_H
+
+/** @file
+ * This file contains support for configuring and controlling streams
+ */
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_metadata.h"
+#include "ia_css_tpg.h"
+#include "ia_css_prbs.h"
+#include "ia_css_input_port.h"
+
+/** Input modes, these enumerate all supported input modes.
+ * Note that not all ISP modes support all input modes.
+ */
+enum ia_css_input_mode {
+ IA_CSS_INPUT_MODE_SENSOR, /**< data from sensor */
+ IA_CSS_INPUT_MODE_FIFO, /**< data from input-fifo */
+ IA_CSS_INPUT_MODE_TPG, /**< data from test-pattern generator */
+ IA_CSS_INPUT_MODE_PRBS, /**< data from pseudo-random bit stream */
+ IA_CSS_INPUT_MODE_MEMORY, /**< data from a frame in memory */
+ IA_CSS_INPUT_MODE_BUFFERED_SENSOR /**< data is sent through mipi buffer */
+};
+
+/** Structure of the MIPI buffer configuration
+ */
+struct ia_css_mipi_buffer_config {
+ unsigned int size_mem_words; /**< The frame size in the system memory
+ words (32B) */
+ bool contiguous; /**< Allocated memory physically
+ contiguously or not. \deprecated{Will be false always.}*/
+ unsigned int nof_mipi_buffers; /**< The number of MIPI buffers required for this
+ stream */
+};
+
+enum {
+ IA_CSS_STREAM_ISYS_STREAM_0 = 0,
+ IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX = IA_CSS_STREAM_ISYS_STREAM_0,
+ IA_CSS_STREAM_ISYS_STREAM_1,
+ IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH
+};
+
+/** This is input data configuration for one MIPI data type. We can have
+ * multiple of this in one virtual channel.
+ */
+struct ia_css_stream_isys_stream_config {
+ struct ia_css_resolution input_res; /**< Resolution of input data */
+ enum ia_css_stream_format format; /**< Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ int linked_isys_stream_id; /**< default value is -1, other value means
+ current isys_stream shares the same buffer with
+ indicated isys_stream*/
+ bool valid; /**< indicate whether other fields have valid value */
+};
+
+struct ia_css_stream_input_config {
+ struct ia_css_resolution input_res; /**< Resolution of input data */
+ struct ia_css_resolution effective_res; /**< Resolution of input data.
+ Used for CSS 2400/1 System and deprecated for other
+ systems (replaced by input_effective_res in
+ ia_css_pipe_config) */
+ enum ia_css_stream_format format; /**< Format of input stream. This data
+ format will be mapped to MIPI data
+ type internally. */
+ enum ia_css_bayer_order bayer_order; /**< Bayer order for RAW streams */
+};
+
+
+/** Input stream description. This describes how input will flow into the
+ * CSS. This is used to program the CSS hardware.
+ */
+struct ia_css_stream_config {
+ enum ia_css_input_mode mode; /**< Input mode */
+ union {
+ struct ia_css_input_port port; /**< Port, for sensor only. */
+ struct ia_css_tpg_config tpg; /**< TPG configuration */
+ struct ia_css_prbs_config prbs; /**< PRBS configuration */
+ } source; /**< Source of input data */
+ unsigned int channel_id; /**< Channel on which input data
+ will arrive. Use this field
+ to specify virtual channel id.
+ Valid values are: 0, 1, 2, 3 */
+ struct ia_css_stream_isys_stream_config isys_config[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ struct ia_css_stream_input_config input_config;
+
+#ifdef ISP2401
+ /* Currently, Android and Windows platforms interpret the binning_factor parameter
+ * differently. In Android, the binning factor is expressed in the form
+ * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N
+ * To use the Windows method of specification, the caller has to define
+ * macro USE_WINDOWS_BINNING_FACTOR. This is for backward compatibility only
+ * and will be deprecated. In the future,all platforms will use the N*N method
+ */
+#endif
+ unsigned int sensor_binning_factor; /**< Binning factor used by sensor
+ to produce image data. This is
+ used for shading correction. */
+ unsigned int pixels_per_clock; /**< Number of pixels per clock, which can be
+ 1, 2 or 4. */
+ bool online; /**< offline will activate RAW copy on SP, use this for
+ continuous capture. */
+ /* ISYS2401 usage: ISP receives data directly from sensor, no copy. */
+ unsigned init_num_cont_raw_buf; /**< initial number of raw buffers to
+ allocate */
+ unsigned target_num_cont_raw_buf; /**< total number of raw buffers to
+ allocate */
+ bool pack_raw_pixels; /**< Pack pixels in the raw buffers */
+ bool continuous; /**< Use SP copy feature to continuously capture frames
+ to system memory and run pipes in offline mode */
+ bool disable_cont_viewfinder; /**< disable continous viewfinder for ZSL use case */
+ int32_t flash_gpio_pin; /**< pin on which the flash is connected, -1 for no flash */
+ int left_padding; /**< The number of input-formatter left-paddings, -1 for default from binary.*/
+ struct ia_css_mipi_buffer_config mipi_buffer_config; /**< mipi buffer configuration */
+ struct ia_css_metadata_config metadata_config; /**< Metadata configuration. */
+ bool ia_css_enable_raw_buffer_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+ bool lock_all;
+ /**< Lock all RAW buffers (true) or lock only buffers processed by
+ video or preview pipe (false).
+ This setting needs to be enabled to allow raw buffer locking
+ without continuous viewfinder. */
+};
+
+struct ia_css_stream;
+
+/** Stream info, this struct describes properties of a stream after it has been
+ * created.
+ */
+struct ia_css_stream_info {
+ struct ia_css_metadata_info metadata_info;
+ /**< Info about the metadata layout, this contains the stride. */
+};
+
+/** @brief Load default stream configuration
+ * @param[in,out] stream_config The stream configuration.
+ * @return None
+ *
+ * This function will reset the stream configuration to the default state:
+@code
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+@endcode
+ */
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config);
+
+/*
+ * create the internal structures and fill in the configuration data and pipes
+ */
+
+ /** @brief Creates a stream
+ * @param[in] stream_config The stream configuration.
+ * @param[in] num_pipes The number of pipes to incorporate in the stream.
+ * @param[in] pipes The pipes.
+ * @param[out] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will create a stream with a given configuration and given pipes.
+ */
+enum ia_css_err
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream);
+
+/** @brief Destroys a stream
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+enum ia_css_err
+ia_css_stream_destroy(struct ia_css_stream *stream);
+
+/** @brief Provides information about a stream
+ * @param[in] stream The stream.
+ * @param[out] stream_info The information about the stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * This function will destroy a given stream.
+ */
+enum ia_css_err
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info);
+
+/** @brief load (rebuild) a stream that was unloaded.
+ * @param[in] stream The stream
+ * @return IA_CSS_SUCCESS or the error code
+ *
+ * Rebuild a stream, including allocating structs, setting configuration and
+ * building the required pipes.
+ */
+enum ia_css_err
+ia_css_stream_load(struct ia_css_stream *stream);
+
+/** @brief Starts the stream.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * The dynamic data in
+ * the buffers are not used and need to be queued with a separate call
+ * to ia_css_pipe_enqueue_buffer.
+ * NOTE: this function will only send start event to corresponding
+ * thread and will not start SP any more.
+ */
+enum ia_css_err
+ia_css_stream_start(struct ia_css_stream *stream);
+
+/** @brief Stop the stream.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or the error code.
+ *
+ * NOTE: this function will send stop event to pipes belong to this
+ * stream but will not terminate threads.
+ */
+enum ia_css_err
+ia_css_stream_stop(struct ia_css_stream *stream);
+
+/** @brief Check if a stream has stopped
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream has stopped and return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream);
+
+/** @brief destroy a stream according to the stream seed previosly saved in the seed array.
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS (no other errors are generated now)
+ *
+ * Destroy the stream and all the pipes related to it.
+ */
+enum ia_css_err
+ia_css_stream_unload(struct ia_css_stream *stream);
+
+/** @brief Returns stream format
+ * @param[in] stream The stream.
+ * @return format of the string
+ *
+ * This function will return the stream format.
+ */
+enum ia_css_stream_format
+ia_css_stream_get_format(const struct ia_css_stream *stream);
+
+/** @brief Check if the stream is configured for 2 pixels per clock
+ * @param[in] stream The stream.
+ * @return boolean flag
+ *
+ * This function will check if the stream is configured for 2 pixels per clock and
+ * return the correspondent boolean flag.
+ */
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream);
+
+/** @brief Sets the output frame stride (at the last pipe)
+ * @param[in] stream The stream
+ * @param[in] output_padded_width - the output buffer stride.
+ * @return ia_css_err
+ *
+ * This function will Set the output frame stride (at the last pipe)
+ */
+enum ia_css_err
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width);
+
+/** @brief Return max number of continuous RAW frames.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The maximum number of continuous RAW frames.
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
+ *
+ * This function will return the maximum number of continuous RAW frames
+ * the system can support.
+ */
+enum ia_css_err
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
+
+/** @brief Set nr of continuous RAW frames to use.
+ *
+ * @param[in] stream The stream.
+ * @param[in] buffer_depth Number of frames to set.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Set the number of continuous frames to use during continuous modes.
+ */
+enum ia_css_err
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth);
+
+/** @brief Get number of continuous RAW frames to use.
+ * @param[in] stream The stream.
+ * @param[out] buffer_depth The number of frames to use
+ * @return IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS
+ *
+ * Get the currently set number of continuous frames
+ * to use during continuous modes.
+ */
+enum ia_css_err
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth);
+
+/* ===== CAPTURE ===== */
+
+/** @brief Configure the continuous capture
+ *
+ * @param[in] stream The stream.
+ * @param[in] num_captures The number of RAW frames to be processed to
+ * YUV. Setting this to -1 will make continuous
+ * capture run until it is stopped.
+ * This number will also be used to allocate RAW
+ * buffers. To allow the viewfinder to also
+ * keep operating, 2 extra buffers will always be
+ * allocated.
+ * If the offset is negative and the skip setting
+ * is greater than 0, additional buffers may be
+ * needed.
+ * @param[in] skip Skip N frames in between captures. This can be
+ * used to select a slower capture frame rate than
+ * the sensor output frame rate.
+ * @param[in] offset Start the RAW-to-YUV processing at RAW buffer
+ * with this offset. This allows the user to
+ * process RAW frames that were captured in the
+ * past or future.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * For example, to capture the current frame plus the 2 previous
+ * frames and 2 subsequent frames, you would call
+ * ia_css_stream_capture(5, 0, -2).
+ */
+enum ia_css_err
+ia_css_stream_capture(struct ia_css_stream *stream,
+ int num_captures,
+ unsigned int skip,
+ int offset);
+
+/** @brief Specify which raw frame to tag based on exp_id found in frame info
+ *
+ * @param[in] stream The stream.
+ * @param[in] exp_id The exposure id of the raw frame to tag.
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function allows the user to tag a raw frame based on the exposure id
+ * found in the viewfinder frames' frame info.
+ */
+enum ia_css_err
+ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id);
+
+/* ===== VIDEO ===== */
+
+/** @brief Send streaming data into the css input FIFO
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Pointer to the pixels to be send.
+ * @param[in] width Width of the input frame.
+ * @param[in] height Height of the input frame.
+ * @return None
+ *
+ * Send streaming data into the css input FIFO. This is for testing purposes
+ * only. This uses the channel ID and input format as set by the user with
+ * the regular functions for this.
+ * This function blocks until the entire frame has been written into the
+ * input FIFO.
+ *
+ * Note:
+ * For higher flexibility the ia_css_stream_send_input_frame is replaced by
+ * three separate functions:
+ * 1) ia_css_stream_start_input_frame
+ * 2) ia_css_stream_send_input_line
+ * 3) ia_css_stream_end_input_frame
+ * In this way it is possible to stream multiple frames on different
+ * channel ID's on a line basis. It will be possible to simulate
+ * line-interleaved Stereo 3D muxed on 1 mipi port.
+ * These 3 functions are for testing purpose only and can be used in
+ * conjunction with ia_css_stream_send_input_frame
+ */
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height);
+
+/** @brief Start an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Starts the streaming to mipi frame by sending SoF for channel channel_id.
+ * It will use the input_format and two_pixels_per_clock as provided by
+ * the user.
+ * For the "correct" use-case, input_format and two_pixels_per_clock must match
+ * with the values as set by the user with the regular functions.
+ * To simulate an error, the user can provide "incorrect" values for
+ * input_format and/or two_pixels_per_clock.
+ */
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream);
+
+/** @brief Send a line of input data into the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @param[in] data Array of the first line of image data.
+ * @param width The width (in pixels) of the first line.
+ * @param[in] data2 Array of the second line of image data.
+ * @param width2 The width (in pixels) of the second line.
+ * @return None
+ *
+ * Sends 1 frame line. Start with SoL followed by width bytes of data, followed
+ * by width2 bytes of data2 and followed by and EoL
+ * It will use the input_format and two_pixels_per_clock settings as provided
+ * with the ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+/** @brief Send a line of input embedded data into the CSS input FIFO.
+ *
+ * @param[in] stream Pointer of the stream.
+ * @param[in] format Format of the embedded data.
+ * @param[in] data Pointer of the embedded data line.
+ * @param[in] width The width (in pixels) of the line.
+ * @return None
+ *
+ * Sends one embedded data line to input fifo. Start with SoL followed by
+ * width bytes of data, and followed by and EoL.
+ * It will use the two_pixels_per_clock settings as provided with the
+ * ia_css_stream_start_input_frame function call.
+ *
+ * This function blocks until the entire line has been written into the
+ * input FIFO.
+ */
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum ia_css_stream_format format,
+ const unsigned short *data,
+ unsigned int width);
+
+/** @brief End an input frame on the CSS input FIFO.
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Send the end-of-frame signal into the CSS input FIFO.
+ */
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream);
+
+/** @brief send a request flash command to SP
+ *
+ * @param[in] stream The stream.
+ * @return None
+ *
+ * Driver needs to call this function to send a flash request command
+ * to SP, SP will be responsible for switching on/off the flash at proper
+ * time. Due to the SP multi-threading environment, this request may have
+ * one-frame delay, the driver needs to check the flashed flag in frame info
+ * to determine which frame is being flashed.
+ */
+void
+ia_css_stream_request_flash(struct ia_css_stream *stream);
+
+/** @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config_on_pipe()}
+ *
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @param[in] pipe Pipe to be updated when set isp config, NULL means to
+ * update all pipes in the stream.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behavior most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+enum ia_css_err
+ia_css_stream_set_isp_config_on_pipe(struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+/** @brief Configure a stream with filter coefficients.
+ * @deprecated {Replaced by
+ * ia_css_pipe_set_isp_config()}
+ * @param[in] stream The stream.
+ * @param[in] config The set of filter coefficients.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * This function configures the filter coefficients for an image
+ * stream. For image pipes that do not execute any ISP filters, this
+ * function will have no effect. All pipes of a stream will be updated.
+ * See ::ia_css_stream_set_isp_config_on_pipe() for the per-pipe alternative.
+ * It is safe to call this function while the image stream is running,
+ * in fact this is the expected behaviour most of the time. Proper
+ * resource locking and double buffering is in place to allow for this.
+ */
+enum ia_css_err
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config);
+
+/** @brief Get selected configuration settings
+ * @param[in] stream The stream.
+ * @param[out] config Configuration settings.
+ * @return None
+ */
+void
+ia_css_stream_get_isp_config(const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config);
+
+/** @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the allocation part
+ * and next one is update part
+ */
+enum ia_css_err
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream);
+
+/** @brief allocate continuous raw frames for continuous capture
+ * @param[in] stream The stream.
+ * @return IA_CSS_SUCCESS or error code.
+ *
+ * because this allocation takes a long time (around 120ms per frame),
+ * we separate the allocation part and update part to let driver call
+ * this function without locking. This function is the update part
+ */
+enum ia_css_err
+ia_css_update_continuous_frames(struct ia_css_stream *stream);
+
+/** @brief ia_css_unlock_raw_frame . unlock a raw frame (HALv3 Support)
+ * @param[in] stream The stream.
+ * @param[in] exp_id exposure id that uniquely identifies the locked Raw Frame Buffer
+ * @return ia_css_err IA_CSS_SUCCESS or error code
+ *
+ * As part of HALv3 Feature requirement, SP locks raw buffer until the Application
+ * releases its reference to a raw buffer (which are managed by SP), this function allows
+ * application to explicitly unlock that buffer in SP.
+ */
+enum ia_css_err
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id);
+
+/** @brief ia_css_en_dz_capt_pipe . Enable/Disable digital zoom for capture pipe
+ * @param[in] stream The stream.
+ * @param[in] enable - true, disable - false
+ * @return None
+ *
+ * Enables or disables digital zoom for capture pipe in provided stream, if capture pipe
+ * exists. This function sets enable_zoom flag in CAPTURE_PP stage of the capture pipe.
+ * In process_zoom_and_motion(), decision to enable or disable zoom for every stage depends
+ * on this flag.
+ */
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable);
+#endif /* __IA_CSS_STREAM_PUBLIC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
new file mode 100644
index 000000000000..575bb28b4bec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_timer.h
@@ -0,0 +1,84 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_TIMER_H
+#define __IA_CSS_TIMER_H
+
+/** @file
+ * Timer interface definitions
+ */
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_err.h"
+
+/** @brief timer reading definition */
+typedef uint32_t clock_value_t;
+
+/** @brief 32 bit clock tick,(timestamp based on timer-value of CSS-internal timer)*/
+struct ia_css_clock_tick {
+ clock_value_t ticks; /**< measured time in ticks.*/
+};
+
+/** @brief TIMER event codes */
+enum ia_css_tm_event {
+ IA_CSS_TM_EVENT_AFTER_INIT,
+ /**< Timer Event after Initialization */
+ IA_CSS_TM_EVENT_MAIN_END,
+ /**< Timer Event after end of Main */
+ IA_CSS_TM_EVENT_THREAD_START,
+ /**< Timer Event after thread start */
+ IA_CSS_TM_EVENT_FRAME_PROC_START,
+ /**< Timer Event after Frame Process Start */
+ IA_CSS_TM_EVENT_FRAME_PROC_END
+ /**< Timer Event after Frame Process End */
+};
+
+/** @brief code measurement common struct */
+struct ia_css_time_meas {
+ clock_value_t start_timer_value; /**< measured time in ticks */
+ clock_value_t end_timer_value; /**< measured time in ticks */
+};
+
+/**@brief SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT checks to ensure correct alignment for struct ia_css_clock_tick. */
+#define SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT sizeof(clock_value_t)
+/** @brief checks to ensure correct alignment for ia_css_time_meas. */
+#define SIZE_OF_IA_CSS_TIME_MEAS_STRUCT (sizeof(clock_value_t) \
+ + sizeof(clock_value_t))
+
+/** @brief API to fetch timer count directly
+*
+* @param curr_ts [out] measured count value
+* @return IA_CSS_SUCCESS if success
+*
+*/
+enum ia_css_err
+ia_css_timer_get_current_tick(
+ struct ia_css_clock_tick *curr_ts);
+
+#endif /* __IA_CSS_TIMER_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
new file mode 100644
index 000000000000..9238a3317a46
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_tpg.h
@@ -0,0 +1,78 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TPG_H
+#define __IA_CSS_TPG_H
+
+/** @file
+ * This file contains support for the test pattern generator (TPG)
+ */
+
+/** Enumerate the TPG IDs.
+ */
+enum ia_css_tpg_id {
+ IA_CSS_TPG_ID0,
+ IA_CSS_TPG_ID1,
+ IA_CSS_TPG_ID2
+};
+
+/**
+ * Maximum number of TPG IDs.
+ *
+ * Make sure the value of this define gets changed to reflect the correct
+ * number of ia_css_tpg_id enum if you add/delete an item in the enum.
+ */
+#define N_CSS_TPG_IDS (IA_CSS_TPG_ID2+1)
+
+/** Enumerate the TPG modes.
+ */
+enum ia_css_tpg_mode {
+ IA_CSS_TPG_MODE_RAMP,
+ IA_CSS_TPG_MODE_CHECKERBOARD,
+ IA_CSS_TPG_MODE_FRAME_BASED_COLOR,
+ IA_CSS_TPG_MODE_MONO
+};
+
+/** @brief Configure the test pattern generator.
+ *
+ * Configure the Test Pattern Generator, the way these values are used to
+ * generate the pattern can be seen in the HRT extension for the test pattern
+ * generator:
+ * devices/test_pat_gen/hrt/include/test_pat_gen.h: hrt_calc_tpg_data().
+ *
+ * This interface is deprecated, it is not portable -> move to input system API
+ *
+@code
+unsigned int test_pattern_value(unsigned int x, unsigned int y)
+{
+ unsigned int x_val, y_val;
+ if (x_delta > 0) (x_val = (x << x_delta) & x_mask;
+ else (x_val = (x >> -x_delta) & x_mask;
+ if (y_delta > 0) (y_val = (y << y_delta) & y_mask;
+ else (y_val = (y >> -y_delta) & x_mask;
+ return (x_val + y_val) & xy_mask;
+}
+@endcode
+ */
+struct ia_css_tpg_config {
+ enum ia_css_tpg_id id;
+ enum ia_css_tpg_mode mode;
+ unsigned int x_mask;
+ int x_delta;
+ unsigned int y_mask;
+ int y_delta;
+ unsigned int xy_mask;
+};
+
+#endif /* __IA_CSS_TPG_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
new file mode 100644
index 000000000000..5fec3d5c89d8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_types.h
@@ -0,0 +1,654 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_TYPES_H
+#define _IA_CSS_TYPES_H
+
+/** @file
+ * This file contains types used for the ia_css parameters.
+ * These types are in a separate file because they are expected
+ * to be used in software layers that do not access the CSS API
+ * directly but still need to forward parameters for it.
+ */
+
+#include <type_support.h>
+
+#include "ia_css_frac.h"
+
+#include "isp/kernels/aa/aa_2/ia_css_aa2_types.h"
+#include "isp/kernels/anr/anr_1.0/ia_css_anr_types.h"
+#include "isp/kernels/anr/anr_2/ia_css_anr2_types.h"
+#include "isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h"
+#include "isp/kernels/csc/csc_1.0/ia_css_csc_types.h"
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h"
+#include "isp/kernels/dp/dp_1.0/ia_css_dp_types.h"
+#include "isp/kernels/de/de_1.0/ia_css_de_types.h"
+#include "isp/kernels/de/de_2/ia_css_de2_types.h"
+#include "isp/kernels/fc/fc_1.0/ia_css_formats_types.h"
+#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h"
+#include "isp/kernels/gc/gc_1.0/ia_css_gc_types.h"
+#include "isp/kernels/gc/gc_2/ia_css_gc2_types.h"
+#include "isp/kernels/macc/macc_1.0/ia_css_macc_types.h"
+#include "isp/kernels/ob/ob_1.0/ia_css_ob_types.h"
+#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h"
+#include "isp/kernels/sc/sc_1.0/ia_css_sc_types.h"
+#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h"
+#include "isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h"
+#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h"
+#include "isp/kernels/wb/wb_1.0/ia_css_wb_types.h"
+#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h"
+#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h"
+#ifdef ISP2401
+#include "isp/kernels/tnr/tnr3/ia_css_tnr3_types.h"
+#endif
+#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h"
+#include "isp/kernels/output/output_1.0/ia_css_output_types.h"
+
+#define IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
+/**< Should be removed after Driver adaptation will be done */
+
+#define IA_CSS_VERSION_MAJOR 2
+#define IA_CSS_VERSION_MINOR 0
+#define IA_CSS_VERSION_REVISION 2
+
+#define IA_CSS_MORPH_TABLE_NUM_PLANES 6
+
+/* Min and max exposure IDs. These macros are here to allow
+ * the drivers to get this information. Changing these macros
+ * constitutes a CSS API change. */
+#define IA_CSS_ISYS_MIN_EXPOSURE_ID 1 /**< Minimum exposure ID */
+#define IA_CSS_ISYS_MAX_EXPOSURE_ID 250 /**< Maximum exposure ID */
+
+/* opaque types */
+struct ia_css_isp_parameters;
+struct ia_css_pipe;
+struct ia_css_memory_offsets;
+struct ia_css_config_memory_offsets;
+struct ia_css_state_memory_offsets;
+
+/** Virtual address within the CSS address space. */
+typedef uint32_t ia_css_ptr;
+
+/** Generic resolution structure.
+ */
+struct ia_css_resolution {
+ uint32_t width; /**< Width */
+ uint32_t height; /**< Height */
+};
+
+/** Generic coordinate structure.
+ */
+struct ia_css_coordinate {
+ int32_t x; /**< Value of a coordinate on the horizontal axis */
+ int32_t y; /**< Value of a coordinate on the vertical axis */
+};
+
+/** Vector with signed values. This is used to indicate motion for
+ * Digital Image Stabilization.
+ */
+struct ia_css_vector {
+ int32_t x; /**< horizontal motion (in pixels) */
+ int32_t y; /**< vertical motion (in pixels) */
+};
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/** CSS data descriptor */
+struct ia_css_data {
+ ia_css_ptr address; /**< CSS virtual address */
+ uint32_t size; /**< Disabled if 0 */
+};
+
+/** Host data descriptor */
+struct ia_css_host_data {
+ char *address; /**< Host address */
+ uint32_t size; /**< Disabled if 0 */
+};
+
+/** ISP data descriptor */
+struct ia_css_isp_data {
+ uint32_t address; /**< ISP address */
+ uint32_t size; /**< Disabled if 0 */
+};
+
+/** Shading Correction types. */
+enum ia_css_shading_correction_type {
+#ifndef ISP2401
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400) */
+#else
+ IA_CSS_SHADING_CORRECTION_NONE, /**< Shading Correction is not processed in the pipe. */
+ IA_CSS_SHADING_CORRECTION_TYPE_1 /**< Shading Correction 1.0 (pipe 1.0 on ISP2300, pipe 2.2 on ISP2400/2401) */
+#endif
+
+ /**< More shading correction types can be added in the future. */
+};
+
+/** Shading Correction information. */
+struct ia_css_shading_info {
+ enum ia_css_shading_correction_type type; /**< Shading Correction type. */
+
+ union { /** Shading Correction information of each Shading Correction types. */
+
+ /** Shading Correction information of IA_CSS_SHADING_CORRECTION_TYPE_1.
+ *
+ * This structure contains the information necessary to generate
+ * the shading table required in the isp.
+ * This structure is filled in the css,
+ * and the driver needs to get it to generate the shading table.
+ *
+ * Before the shading correction is applied, NxN-filter and/or scaling
+ * are applied in the isp, depending on the isp binaries.
+ * Then, these should be considered in generating the shading table.
+ * - Bad pixels on left/top sides generated by NxN-filter
+ * (Bad pixels are NOT considered currently,
+ * because they are subtle.)
+ * - Down-scaling/Up-scaling factor
+ *
+ * Shading correction is applied to the area
+ * which has real sensor data and margin.
+ * Then, the shading table should cover the area including margin.
+ * This structure has this information.
+ * - Origin coordinate of bayer (real sensor data)
+ * on the shading table
+ *
+ * ------------------------ISP 2401-----------------------
+ *
+ * the shading table directly required from ISP.
+ * This structure is filled in CSS, and the driver needs to get it to generate the shading table.
+ *
+ * The shading correction is applied to the bayer area which contains sensor data and padding data.
+ * The shading table should cover this bayer area.
+ *
+ * The shading table size directly required from ISP is expressed by these parameters.
+ * 1. uint32_t num_hor_grids;
+ * 2. uint32_t num_ver_grids;
+ * 3. uint32_t bqs_per_grid_cell;
+ *
+ * In some isp binaries, the bayer scaling is applied before the shading correction is applied.
+ * Then, this scaling factor should be considered in generating the shading table.
+ * The scaling factor is expressed by these parameters.
+ * 4. uint32_t bayer_scale_hor_ratio_in;
+ * 5. uint32_t bayer_scale_hor_ratio_out;
+ * 6. uint32_t bayer_scale_ver_ratio_in;
+ * 7. uint32_t bayer_scale_ver_ratio_out;
+ *
+ * The sensor data size inputted to ISP is expressed by this parameter.
+ * This is the size BEFORE the bayer scaling is applied.
+ * 8. struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ *
+ * The origin of the sensor data area positioned on the shading table at the shading correction
+ * is expressed by this parameter.
+ * The size of this area assumes the size AFTER the bayer scaling is applied
+ * to the isp_input_sensor_data_resolution_bqs.
+ * 9. struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ *
+ * ****** Definitions of the shading table and the sensor data at the shading correction ******
+ *
+ * (0,0)--------------------- TW -------------------------------
+ * | shading table |
+ * | (ox,oy)---------- W -------------------------- |
+ * | | sensor data | |
+ * | | | |
+ * TH H sensor data center | |
+ * | | (cx,cy) | |
+ * | | | |
+ * | | | |
+ * | | | |
+ * | ------------------------------------------- |
+ * | |
+ * ----------------------------------------------------------
+ *
+ * Example of still mode for output 1080p:
+ *
+ * num_hor_grids = 66
+ * num_ver_grids = 37
+ * bqs_per_grid_cell = 16
+ * bayer_scale_hor_ratio_in = 1
+ * bayer_scale_hor_ratio_out = 1
+ * bayer_scale_ver_ratio_in = 1
+ * bayer_scale_ver_ratio_out = 1
+ * isp_input_sensor_data_resolution_bqs = {966, 546}
+ * sensor_data_origin_bqs_on_sctbl = {61, 15}
+ *
+ * TW, TH [bqs]: width and height of shading table
+ * TW = (num_hor_grids - 1) * bqs_per_grid_cell = (66 - 1) * 16 = 1040
+ * TH = (num_ver_grids - 1) * bqs_per_grid_cell = (37 - 1) * 16 = 576
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * = isp_input_sensor_data_res_bqs.width
+ * * bayer_scale_hor_ratio_out / bayer_scale_hor_ratio_in + 0.5 = 966
+ * H = sensor_data_res_bqs.height
+ * = isp_input_sensor_data_res_bqs.height
+ * * bayer_scale_ver_ratio_out / bayer_scale_ver_ratio_in + 0.5 = 546
+ *
+ * (ox, oy) [bqs]: origin of sensor data positioned on shading table at shading correction
+ * ox = sensor_data_origin_bqs_on_sctbl.x = 61
+ * oy = sensor_data_origin_bqs_on_sctbl.y = 15
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = ox + W/2 = 61 + 966/2 = 544
+ * cy = oy + H/2 = 15 + 546/2 = 288
+ *
+ * ****** Relation between the shading table and the sensor data ******
+ *
+ * The origin of the sensor data should be on the shading table.
+ * 0 <= ox < TW, 0 <= oy < TH
+ *
+ * ****** How to center the shading table on the sensor data ******
+ *
+ * To center the shading table on the sensor data,
+ * CSS decides the shading table size so that a certain grid point is positioned
+ * on the center of the sensor data at the shading correction.
+ * CSS expects the shading center is set on this grid point
+ * when the shading table data is calculated in AIC.
+ *
+ * W, H [bqs]: width and height of sensor data at shading correction
+ * W = sensor_data_res_bqs.width
+ * H = sensor_data_res_bqs.height
+ *
+ * (cx, cy) [bqs]: center of sensor data positioned on shading table at shading correction
+ * cx = sensor_data_origin_bqs_on_sctbl.x + W/2
+ * cy = sensor_data_origin_bqs_on_sctbl.y + H/2
+ *
+ * CSS decides the shading table size and the sensor data position
+ * so that the (cx, cy) satisfies this condition.
+ * mod(cx, bqs_per_grid_cell) = 0
+ * mod(cy, bqs_per_grid_cell) = 0
+ *
+ * ****** How to change the sensor data size by processes in the driver and ISP ******
+ *
+ * 1. sensor data size: Physical sensor size
+ * (The struct ia_css_shading_info does not have this information.)
+ * 2. process: Driver applies the sensor cropping/binning/scaling to physical sensor size.
+ * 3. sensor data size: ISP input size (== shading_info.isp_input_sensor_data_res_bqs)
+ * (ISP assumes the ISP input sensor data is centered on the physical sensor.)
+ * 4. process: ISP applies the bayer scaling by the factor of shading_info.bayer_scale_*.
+ * 5. sensor data size: Scaling factor * ISP input size (== shading_info.sensor_data_res_bqs)
+ * 6. process: ISP applies the shading correction.
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+ struct {
+#ifndef ISP2401
+ uint32_t enable; /**< Shading correction enabled.
+ 0:disabled, 1:enabled */
+ uint32_t num_hor_grids; /**< Number of data points per line
+ per color on shading table. */
+ uint32_t num_ver_grids; /**< Number of lines of data points
+ per color on shading table. */
+ uint32_t bqs_per_grid_cell; /**< Grid cell size
+ in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ Valid values are 8,16,32,64. */
+#else
+ uint32_t num_hor_grids; /**< Number of data points per line per color on shading table. */
+ uint32_t num_ver_grids; /**< Number of lines of data points per color on shading table. */
+ uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ unit.
+ NOTE: bqs = size in BQ(Bayer Quad) unit.
+ 1BQ means {Gr,R,B,Gb} (2x2 pixels).
+ Horizontal 1 bqs corresponds to horizontal 2 pixels.
+ Vertical 1 bqs corresponds to vertical 2 pixels. */
+#endif
+ uint32_t bayer_scale_hor_ratio_in;
+ uint32_t bayer_scale_hor_ratio_out;
+#ifndef ISP2401
+ /**< Horizontal ratio of bayer scaling
+ between input width and output width, for the scaling
+ which should be done before shading correction.
+ output_width = input_width * bayer_scale_hor_ratio_out
+ / bayer_scale_hor_ratio_in */
+#else
+ /**< Horizontal ratio of bayer scaling between input width and output width,
+ for the scaling which should be done before shading correction.
+ output_width = input_width * bayer_scale_hor_ratio_out
+ / bayer_scale_hor_ratio_in + 0.5 */
+#endif
+ uint32_t bayer_scale_ver_ratio_in;
+ uint32_t bayer_scale_ver_ratio_out;
+#ifndef ISP2401
+ /**< Vertical ratio of bayer scaling
+ between input height and output height, for the scaling
+ which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in */
+ uint32_t sc_bayer_origin_x_bqs_on_shading_table;
+ /**< X coordinate (in bqs) of bayer origin on shading table.
+ This indicates the left-most pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the left-most pixel of bayer
+ inputted to isp from sensor. */
+ uint32_t sc_bayer_origin_y_bqs_on_shading_table;
+ /**< Y coordinate (in bqs) of bayer origin on shading table.
+ This indicates the top pixel of bayer
+ (not include margin) inputted to the shading correction.
+ This corresponds to the top pixel of bayer
+ inputted to isp from sensor. */
+#else
+ /**< Vertical ratio of bayer scaling between input height and output height,
+ for the scaling which should be done before shading correction.
+ output_height = input_height * bayer_scale_ver_ratio_out
+ / bayer_scale_ver_ratio_in + 0.5 */
+ struct ia_css_resolution isp_input_sensor_data_res_bqs;
+ /**< Sensor data size (in bqs) inputted to ISP. This is the size BEFORE bayer scaling.
+ NOTE: This is NOT the size of the physical sensor size.
+ CSS requests the driver that ISP inputs sensor data
+ by the size of isp_input_sensor_data_res_bqs.
+ The driver sends the sensor data to ISP,
+ after the adequate cropping/binning/scaling
+ are applied to the physical sensor data area.
+ ISP assumes the area of isp_input_sensor_data_res_bqs
+ is centered on the physical sensor. */
+ struct ia_css_resolution sensor_data_res_bqs;
+ /**< Sensor data size (in bqs) at shading correction.
+ This is the size AFTER bayer scaling. */
+ struct ia_css_coordinate sensor_data_origin_bqs_on_sctbl;
+ /**< Origin of sensor data area positioned on shading table at shading correction.
+ The coordinate x,y should be positive values. */
+#endif
+ } type_1;
+
+ /**< More structures can be added here when more shading correction types will be added
+ in the future. */
+ } info;
+};
+
+#ifndef ISP2401
+
+/** Default Shading Correction information of Shading Correction Type 1. */
+#define DEFAULT_SHADING_INFO_TYPE_1 \
+{ \
+ IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
+ { /* info */ \
+ { \
+ 0, /* enable */ \
+ 0, /* num_hor_grids */ \
+ 0, /* num_ver_grids */ \
+ 0, /* bqs_per_grid_cell */ \
+ 1, /* bayer_scale_hor_ratio_in */ \
+ 1, /* bayer_scale_hor_ratio_out */ \
+ 1, /* bayer_scale_ver_ratio_in */ \
+ 1, /* bayer_scale_ver_ratio_out */ \
+ 0, /* sc_bayer_origin_x_bqs_on_shading_table */ \
+ 0 /* sc_bayer_origin_y_bqs_on_shading_table */ \
+ } \
+ } \
+}
+
+#else
+
+/** Default Shading Correction information of Shading Correction Type 1. */
+#define DEFAULT_SHADING_INFO_TYPE_1 \
+{ \
+ IA_CSS_SHADING_CORRECTION_TYPE_1, /* type */ \
+ { /* info */ \
+ { \
+ 0, /* num_hor_grids */ \
+ 0, /* num_ver_grids */ \
+ 0, /* bqs_per_grid_cell */ \
+ 1, /* bayer_scale_hor_ratio_in */ \
+ 1, /* bayer_scale_hor_ratio_out */ \
+ 1, /* bayer_scale_ver_ratio_in */ \
+ 1, /* bayer_scale_ver_ratio_out */ \
+ {0, 0}, /* isp_input_sensor_data_res_bqs */ \
+ {0, 0}, /* sensor_data_res_bqs */ \
+ {0, 0} /* sensor_data_origin_bqs_on_sctbl */ \
+ } \
+ } \
+}
+
+#endif
+
+/** Default Shading Correction information. */
+#define DEFAULT_SHADING_INFO DEFAULT_SHADING_INFO_TYPE_1
+
+/** structure that describes the 3A and DIS grids */
+struct ia_css_grid_info {
+ /** \name ISP input size
+ * that is visible for user
+ * @{
+ */
+ uint32_t isp_in_width;
+ uint32_t isp_in_height;
+ /** @}*/
+
+ struct ia_css_3a_grid_info s3a_grid; /**< 3A grid info */
+ union ia_css_dvs_grid_u dvs_grid;
+ /**< All types of DVS statistics grid info union */
+
+ enum ia_css_vamem_type vamem_type;
+};
+
+/** defaults for ia_css_grid_info structs */
+#define DEFAULT_GRID_INFO \
+{ \
+ 0, /* isp_in_width */ \
+ 0, /* isp_in_height */ \
+ DEFAULT_3A_GRID_INFO, /* s3a_grid */ \
+ DEFAULT_DVS_GRID_INFO, /* dvs_grid */ \
+ IA_CSS_VAMEM_TYPE_1 /* vamem_type */ \
+}
+
+/** Morphing table, used for geometric distortion and chromatic abberration
+ * correction (GDCAC, also called GDC).
+ * This table describes the imperfections introduced by the lens, the
+ * advanced ISP can correct for these imperfections using this table.
+ */
+struct ia_css_morph_table {
+ uint32_t enable; /**< To disable GDC, set this field to false. The
+ coordinates fields can be set to NULL in this case. */
+ uint32_t height; /**< Table height */
+ uint32_t width; /**< Table width */
+ uint16_t *coordinates_x[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /**< X coordinates that describe the sensor imperfection */
+ uint16_t *coordinates_y[IA_CSS_MORPH_TABLE_NUM_PLANES];
+ /**< Y coordinates that describe the sensor imperfection */
+};
+
+struct ia_css_dvs_6axis_config {
+ unsigned int exp_id;
+ /**< Exposure ID, see ia_css_event_public.h for more detail */
+ uint32_t width_y;
+ uint32_t height_y;
+ uint32_t width_uv;
+ uint32_t height_uv;
+ uint32_t *xcoords_y;
+ uint32_t *ycoords_y;
+ uint32_t *xcoords_uv;
+ uint32_t *ycoords_uv;
+};
+
+/**
+ * This specifies the coordinates (x,y)
+ */
+struct ia_css_point {
+ int32_t x; /**< x coordinate */
+ int32_t y; /**< y coordinate */
+};
+
+/**
+ * This specifies the region
+ */
+struct ia_css_region {
+ struct ia_css_point origin; /**< Starting point coordinates for the region */
+ struct ia_css_resolution resolution; /**< Region resolution */
+};
+
+/**
+ * Digital zoom:
+ * This feature is currently available only for video, but will become
+ * available for preview and capture as well.
+ * Set the digital zoom factor, this is a logarithmic scale. The actual zoom
+ * factor will be 64/x.
+ * Setting dx or dy to 0 disables digital zoom for that direction.
+ * New API change for Digital zoom:(added struct ia_css_region zoom_region)
+ * zoom_region specifies the origin of the zoom region and width and
+ * height of that region.
+ * origin : This is the coordinate (x,y) within the effective input resolution
+ * of the stream. where, x >= 0 and y >= 0. (0,0) maps to the upper left of the
+ * effective input resolution.
+ * resolution : This is resolution of zoom region.
+ * where, x + width <= effective input width
+ * y + height <= effective input height
+ */
+struct ia_css_dz_config {
+ uint32_t dx; /**< Horizontal zoom factor */
+ uint32_t dy; /**< Vertical zoom factor */
+ struct ia_css_region zoom_region; /**< region for zoom */
+};
+
+/** The still capture mode, this can be RAW (simply copy sensor input to DDR),
+ * Primary ISP, the Advanced ISP (GDC) or the low-light ISP (ANR).
+ */
+enum ia_css_capture_mode {
+ IA_CSS_CAPTURE_MODE_RAW, /**< no processing, copy data only */
+ IA_CSS_CAPTURE_MODE_BAYER, /**< bayer processing, up to demosaic */
+ IA_CSS_CAPTURE_MODE_PRIMARY, /**< primary ISP */
+ IA_CSS_CAPTURE_MODE_ADVANCED, /**< advanced ISP (GDC) */
+ IA_CSS_CAPTURE_MODE_LOW_LIGHT /**< low light ISP (ANR) */
+};
+
+struct ia_css_capture_config {
+ enum ia_css_capture_mode mode; /**< Still capture mode */
+ uint32_t enable_xnr; /**< Enable/disable XNR */
+ uint32_t enable_raw_output;
+ bool enable_capture_pp_bli; /**< Enable capture_pp_bli mode */
+};
+
+/** default settings for ia_css_capture_config structs */
+#define DEFAULT_CAPTURE_CONFIG \
+{ \
+ IA_CSS_CAPTURE_MODE_PRIMARY, /* mode (capture) */ \
+ false, /* enable_xnr */ \
+ false, /* enable_raw_output */ \
+ false /* enable_capture_pp_bli */ \
+}
+
+
+/** ISP filter configuration. This is a collection of configurations
+ * for each of the ISP filters (modules).
+ *
+ * NOTE! The contents of all pointers is copied when get or set with the
+ * exception of the shading and morph tables. For these we only copy the
+ * pointer, so the caller must make sure the memory contents of these pointers
+ * remain valid as long as they are used by the CSS. This will be fixed in the
+ * future by copying the contents instead of just the pointer.
+ *
+ * Comment:
+ * ["ISP block", 1&2] : ISP block is used both for ISP1 and ISP2.
+ * ["ISP block", 1only] : ISP block is used only for ISP1.
+ * ["ISP block", 2only] : ISP block is used only for ISP2.
+ */
+struct ia_css_isp_config {
+ struct ia_css_wb_config *wb_config; /**< White Balance
+ [WB1, 1&2] */
+ struct ia_css_cc_config *cc_config; /**< Color Correction
+ [CSC1, 1only] */
+ struct ia_css_tnr_config *tnr_config; /**< Temporal Noise Reduction
+ [TNR1, 1&2] */
+ struct ia_css_ecd_config *ecd_config; /**< Eigen Color Demosaicing
+ [DE2, 2only] */
+ struct ia_css_ynr_config *ynr_config; /**< Y(Luma) Noise Reduction
+ [YNR2&YEE2, 2only] */
+ struct ia_css_fc_config *fc_config; /**< Fringe Control
+ [FC2, 2only] */
+ struct ia_css_formats_config *formats_config; /**< Formats Control for main output
+ [FORMATS, 1&2] */
+ struct ia_css_cnr_config *cnr_config; /**< Chroma Noise Reduction
+ [CNR2, 2only] */
+ struct ia_css_macc_config *macc_config; /**< MACC
+ [MACC2, 2only] */
+ struct ia_css_ctc_config *ctc_config; /**< Chroma Tone Control
+ [CTC2, 2only] */
+ struct ia_css_aa_config *aa_config; /**< YUV Anti-Aliasing
+ [AA2, 2only]
+ (not used currently) */
+ struct ia_css_aa_config *baa_config; /**< Bayer Anti-Aliasing
+ [BAA2, 1&2] */
+ struct ia_css_ce_config *ce_config; /**< Chroma Enhancement
+ [CE1, 1only] */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ob_config *ob_config; /**< Objective Black
+ [OB1, 1&2] */
+ struct ia_css_dp_config *dp_config; /**< Defect Pixel Correction
+ [DPC1/DPC2, 1&2] */
+ struct ia_css_nr_config *nr_config; /**< Noise Reduction
+ [BNR1&YNR1&CNR1, 1&2]*/
+ struct ia_css_ee_config *ee_config; /**< Edge Enhancement
+ [YEE1, 1&2] */
+ struct ia_css_de_config *de_config; /**< Demosaic
+ [DE1, 1only] */
+ struct ia_css_gc_config *gc_config; /**< Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_anr_config *anr_config; /**< Advanced Noise Reduction */
+ struct ia_css_3a_config *s3a_config; /**< 3A Statistics config */
+ struct ia_css_xnr_config *xnr_config; /**< eXtra Noise Reduction */
+ struct ia_css_dz_config *dz_config; /**< Digital Zoom */
+ struct ia_css_cc_config *yuv2rgb_cc_config; /**< Color Correction
+ [CCM2, 2only] */
+ struct ia_css_cc_config *rgb2yuv_cc_config; /**< Color Correction
+ [CSC2, 2only] */
+ struct ia_css_macc_table *macc_table; /**< MACC
+ [MACC1/MACC2, 1&2]*/
+ struct ia_css_gamma_table *gamma_table; /**< Gamma Correction (for YUV)
+ [GC1, 1only] */
+ struct ia_css_ctc_table *ctc_table; /**< Chroma Tone Control
+ [CTC1, 1only] */
+
+ /** \deprecated */
+ struct ia_css_xnr_table *xnr_table; /**< eXtra Noise Reduction
+ [XNR1, 1&2] */
+ struct ia_css_rgb_gamma_table *r_gamma_table;/**< sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *g_gamma_table;/**< sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_rgb_gamma_table *b_gamma_table;/**< sRGB Gamma Correction
+ [GC2, 2only] */
+ struct ia_css_vector *motion_vector; /**< For 2-axis DVS */
+ struct ia_css_shading_table *shading_table;
+ struct ia_css_morph_table *morph_table;
+ struct ia_css_dvs_coefficients *dvs_coefs; /**< DVS 1.0 coefficients */
+ struct ia_css_dvs2_coefficients *dvs2_coefs; /**< DVS 2.0 coefficients */
+ struct ia_css_capture_config *capture_config;
+ struct ia_css_anr_thres *anr_thres;
+ /** @deprecated{Old shading settings, see bugzilla bz675 for details} */
+ struct ia_css_shading_settings *shading_settings;
+ struct ia_css_xnr3_config *xnr3_config; /**< eXtreme Noise Reduction v3 */
+ /** comment from Lasse: Be aware how this feature will affect coordinate
+ * normalization in different parts of the system. (e.g. face detection,
+ * touch focus, 3A statistics and windows of interest, shading correction,
+ * DVS, GDC) from IQ tool level and application level down-to ISP FW level.
+ * the risk for regression is not in the individual blocks, but how they
+ * integrate together. */
+ struct ia_css_output_config *output_config; /**< Main Output Mirroring, flipping */
+
+#ifdef ISP2401
+ struct ia_css_tnr3_kernel_config *tnr3_config; /**< TNR3 config */
+#endif
+ struct ia_css_scaler_config *scaler_config; /**< Skylake: scaler config (optional) */
+ struct ia_css_formats_config *formats_config_display;/**< Formats control for viewfinder/display output (optional)
+ [OSYS, n/a] */
+ struct ia_css_output_config *output_config_display; /**< Viewfinder/display output mirroring, flipping (optional) */
+
+ struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
+ uint32_t isp_config_id; /**< Unique ID to track which config was actually applied to a particular frame */
+};
+
+#endif /* _IA_CSS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
new file mode 100644
index 000000000000..48c59896e847
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VERSION_H
+#define __IA_CSS_VERSION_H
+
+/** @file
+ * This file contains functions to retrieve CSS-API version information
+ */
+
+#include <ia_css_err.h>
+
+/** a common size for the version arrays */
+#define MAX_VERSION_SIZE 500
+
+/** @brief Retrieves the current CSS version
+ * @param[out] version A pointer to a buffer where to put the generated
+ * version string. NULL is ignored.
+ * @param[in] max_size Size of the version buffer. If version string
+ * would be larger than max_size, an error is
+ * returned by this function.
+ *
+ * This function generates and returns the version string. If FW is loaded, it
+ * attaches the FW version.
+ */
+enum ia_css_err
+ia_css_get_version(char *version, int max_size);
+
+#endif /* __IA_CSS_VERSION_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h
new file mode 100644
index 000000000000..aad592cb86ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_version_data.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+//
+// This file contains the version data for the CSS
+//
+// === Do not change - automatically generated ===
+//
+
+#ifndef __IA_CSS_VERSION_DATA_H
+#define __IA_CSS_VERSION_DATA_H
+
+
+#ifndef ISP2401
+#define CSS_VERSION_STRING "REL:20150521_21.4_0539; API:2.1.15.3; GIT:irci_candrpv_0415_20150504_35b345#35b345be52ac575f8934abb3a88fea26a94e7343; SDK:/nfs/iir/disks/iir_hivepackages_003/iir_hivepkgs_disk017/Css_Mizuchi/packages/Css_Mizuchi/int_css_mizuchi_20140829_1053; USER:viedifw; "
+#else
+#define CSS_VERSION_STRING "REL:20150911_37.5_1652; API:2.1.20.9; GIT:irci___#ebf437d53a8951bb7ff6d13fdb7270dab393a92a; SDK:; USER:viedifw; "
+#endif
+
+
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c
new file mode 100644
index 000000000000..f7dd256b6f7a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.c
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_aa2.host.h"
+
+/* YUV Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_aa_config = {
+ 8191 /* default should be 0 */
+};
+
+/* Bayer Anti-Aliasing configuration. */
+const struct ia_css_aa_config default_baa_config = {
+ 8191 /* default should be 0 */
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h
new file mode 100644
index 000000000000..71587d85ff2d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2.host.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA_HOST_H
+#define __IA_CSS_AA_HOST_H
+
+#include "ia_css_aa2_types.h"
+#include "ia_css_aa2_param.h"
+
+/* YUV Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_aa_config;
+
+/* Bayer Anti-Aliasing configuration. */
+extern const struct ia_css_aa_config default_baa_config;
+
+#endif /* __IA_CSS_AA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h
new file mode 100644
index 000000000000..dbab4d6c6cd5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_param.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA_PARAM_H
+#define __IA_CSS_AA_PARAM_H
+
+#include "type_support.h"
+
+struct sh_css_isp_aa_params {
+ int32_t strength;
+};
+
+#endif /* __IA_CSS_AA_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_state.h
new file mode 100644
index 000000000000..cc404018b112
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_state.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA2_STATE_H
+#define __IA_CSS_AA2_STATE_H
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+/* Denotes the maximum number of pixels per line that can be processed:
+* MAX_AA_VECTORS_PER_LINE = maximum_line_width / ISP_NWAY */
+#ifndef MAX_AA_VECTORS_PER_LINE
+#error Please define MAX_AA_VECTORS_PER_LINE.
+#endif
+
+/* This uses 2 history lines for both y, u and v*/
+#define AA_STATE_Y_BUFFER_HEIGHT 2
+#define AA_STATE_UV_BUFFER_HEIGHT 2
+#define AA_STATE_Y_BUFFER_WIDTH MAX_AA_VECTORS_PER_LINE
+/* The number of u and v elements is half y due to yuv420 downsampling. */
+#define AA_STATE_UV_BUFFER_WIDTH (AA_STATE_Y_BUFFER_WIDTH/2)
+
+
+struct ia_css_isp_aa_vmem_state {
+ VMEM_ARRAY(y[AA_STATE_Y_BUFFER_HEIGHT], AA_STATE_Y_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(u[AA_STATE_UV_BUFFER_HEIGHT], AA_STATE_UV_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(v[AA_STATE_UV_BUFFER_HEIGHT], AA_STATE_UV_BUFFER_WIDTH*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_AA2_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
new file mode 100644
index 000000000000..834eedbbeeff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/aa/aa_2/ia_css_aa2_types.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_AA2_TYPES_H
+#define __IA_CSS_AA2_TYPES_H
+
+/** @file
+* CSS-API header file for Anti-Aliasing parameters.
+*/
+
+
+/** Anti-Aliasing configuration.
+ *
+ * This structure is used both for YUV AA and Bayer AA.
+ *
+ * 1. YUV Anti-Aliasing
+ * struct ia_css_aa_config *aa_config
+ *
+ * ISP block: AA2
+ * (ISP1: AA2 is not used.)
+ * ISP2: AA2 should be used. But, AA2 is not used currently.
+ *
+ * 2. Bayer Anti-Aliasing
+ * struct ia_css_aa_config *baa_config
+ *
+ * ISP block: BAA2
+ * ISP1: BAA2 is used.
+ * ISP2: BAA2 is used.
+ */
+struct ia_css_aa_config {
+ uint16_t strength; /**< Strength of the filter.
+ u0.13, [0,8191],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_AA2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
new file mode 100644
index 000000000000..edc4f1ae6d5e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr.host.h"
+
+const struct ia_css_anr_config default_anr_config = {
+ 10,
+ { 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4,
+ 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4},
+ {10, 20, 30}
+};
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->threshold = from->threshold;
+}
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned level)
+{
+ if (!anr) return;
+ ia_css_debug_dtrace(level, "Advance Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "anr_threshold", anr->threshold);
+}
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n",
+ config->threshold);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
new file mode 100644
index 000000000000..29566c07653c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_HOST_H
+#define __IA_CSS_ANR_HOST_H
+
+#include "ia_css_anr_types.h"
+#include "ia_css_anr_param.h"
+
+extern const struct ia_css_anr_config default_anr_config;
+
+void
+ia_css_anr_encode(
+ struct sh_css_isp_anr_params *to,
+ const struct ia_css_anr_config *from,
+ unsigned size);
+
+void
+ia_css_anr_dump(
+ const struct sh_css_isp_anr_params *anr,
+ unsigned level);
+
+void
+ia_css_anr_debug_dtrace(
+ const struct ia_css_anr_config *config, unsigned level)
+;
+
+#endif /* __IA_CSS_ANR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
new file mode 100644
index 000000000000..2621b920c3dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_PARAM_H
+#define __IA_CSS_ANR_PARAM_H
+
+#include "type_support.h"
+
+/* ANR (Advanced Noise Reduction) */
+struct sh_css_isp_anr_params {
+ int32_t threshold;
+};
+
+#endif /* __IA_CSS_ANR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
new file mode 100644
index 000000000000..e205574098f2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_1.0/ia_css_anr_types.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR_TYPES_H
+#define __IA_CSS_ANR_TYPES_H
+
+/** @file
+* CSS-API header file for Advanced Noise Reduction kernel v1
+*/
+
+/* Application specific DMA settings */
+#define ANR_BPP 10
+#define ANR_ELEMENT_BITS ((CEIL_DIV(ANR_BPP, 8))*8)
+
+/** Advanced Noise Reduction configuration.
+ * This is also known as Low-Light.
+ */
+struct ia_css_anr_config {
+ int32_t threshold; /**< Threshold */
+ int32_t thresholds[4*4*4];
+ int32_t factors[3];
+};
+
+#endif /* __IA_CSS_ANR_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c
new file mode 100644
index 000000000000..b338c434453e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.c
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_anr2.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size)
+{
+ unsigned i;
+
+ (void)size;
+ for (i = 0; i < ANR_PARAM_SIZE; i++) {
+ unsigned j;
+ for (j = 0; j < ISP_VEC_NELEMS; j++) {
+ to->data[i][j] = from->data[i*ISP_VEC_NELEMS+j];
+ }
+ }
+}
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h
new file mode 100644
index 000000000000..83c37e328591
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2.host.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_HOST_H
+#define __IA_CSS_ANR2_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_anr2_types.h"
+#include "ia_css_anr_param.h"
+#include "ia_css_anr2_table.host.h"
+
+void
+ia_css_anr2_vmem_encode(
+ struct ia_css_isp_anr2_params *to,
+ const struct ia_css_anr_thres *from,
+ size_t size);
+
+void
+ia_css_anr2_debug_dtrace(
+ const struct ia_css_anr_thres *config, unsigned level)
+;
+
+#endif /* __IA_CSS_ANR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
new file mode 100644
index 000000000000..2de51fe45623
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
@@ -0,0 +1,52 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_anr2_table.host.h"
+
+#if 1
+const struct ia_css_anr_thres default_anr_thres = {
+{128, 384, 640, 896, 896, 640, 384, 128, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 128, 384, 640, 896, 896, 640, 384, 128,
+0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20,
+0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40,
+0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60,
+30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50,
+60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100,
+90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150,
+10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30,
+20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60,
+30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90,
+20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40,
+40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80,
+60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120}
+};
+#else
+const struct ia_css_anr_thres default_anr_thres = {
+{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+};
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
new file mode 100644
index 000000000000..534119e064c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_TABLE_HOST_H
+#define __IA_CSS_ANR2_TABLE_HOST_H
+
+#include "ia_css_anr2_types.h"
+
+extern const struct ia_css_anr_thres default_anr_thres;
+
+#endif /* __IA_CSS_ANR2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
new file mode 100644
index 000000000000..3832ada433ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr2_types.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_TYPES_H
+#define __IA_CSS_ANR2_TYPES_H
+
+/** @file
+* CSS-API header file for Advanced Noise Reduction kernel v2
+*/
+
+#include "type_support.h"
+
+#define ANR_PARAM_SIZE 13
+
+/** Advanced Noise Reduction (ANR) thresholds */
+struct ia_css_anr_thres {
+ int16_t data[13*64];
+};
+
+#endif /* __IA_CSS_ANR2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
new file mode 100644
index 000000000000..4a289853367a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/anr/anr_2/ia_css_anr_param.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ANR2_PARAM_H
+#define __IA_CSS_ANR2_PARAM_H
+
+#include "vmem.h"
+#include "ia_css_anr2_types.h"
+
+/** Advanced Noise Reduction (ANR) thresholds */
+
+struct ia_css_isp_anr2_params {
+ VMEM_ARRAY(data, ANR_PARAM_SIZE*ISP_VEC_NELEMS);
+};
+
+#endif /* __IA_CSS_ANR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_load_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_load_param.h
new file mode 100644
index 000000000000..8e1f300bcd39
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_load_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_LOAD_PARAM_H
+#define __IA_CSS_BAYER_LOAD_PARAM_H
+
+#include "ia_css_bayer_ls_param.h"
+
+#endif /* __IA_CSS_BAYER_LOAD_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
new file mode 100644
index 000000000000..75ca7606b95c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_ls_param.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_LS_PARAM_H
+#define __IA_CSS_BAYER_LS_PARAM_H
+
+#include "type_support.h"
+#ifndef ISP2401
+
+#define NUM_BAYER_LS 2
+#define BAYER_IDX_GR 0
+#define BAYER_IDX_R 1
+#define BAYER_IDX_B 2
+#define BAYER_IDX_GB 3
+#define BAYER_QUAD_WIDTH 2
+#define BAYER_QUAD_HEIGHT 2
+#define NOF_BAYER_VECTORS 4
+
+/** bayer load/store */
+struct sh_css_isp_bayer_ls_isp_config {
+ uint32_t base_address[NUM_BAYER_LS];
+ uint32_t width[NUM_BAYER_LS];
+ uint32_t height[NUM_BAYER_LS];
+ uint32_t stride[NUM_BAYER_LS];
+};
+
+#else
+#include "../../io_ls/common/ia_css_common_io_types.h"
+#endif
+
+#endif /* __IA_CSS_BAYER_LS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_store_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_store_param.h
new file mode 100644
index 000000000000..f330be80efa6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bayer_ls/bayer_ls_1.0/ia_css_bayer_store_param.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_STORE_PARAM_H
+#define __IA_CSS_BAYER_STORE_PARAM_H
+
+#include "ia_css_bayer_ls_param.h"
+
+
+#endif /* __IA_CSS_BAYER_STORE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c
new file mode 100644
index 000000000000..99c80d2d8f11
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.c
@@ -0,0 +1,66 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#if !defined(HAS_NO_HMEM)
+
+#include "memory_access.h"
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "assert_support.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bh.host.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf)
+{
+ int i;
+
+ /*
+ * No weighted histogram, hence no grid definition
+ */
+ if(!hmem_buf)
+ return;
+ assert(sizeof_hmem(HMEM0_ID) == sizeof(*hmem_buf));
+
+ /* Deinterleave */
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ out_ptr[i].r = hmem_buf->hmem[BH_COLOR_R][i];
+ out_ptr[i].g = hmem_buf->hmem[BH_COLOR_G][i];
+ out_ptr[i].b = hmem_buf->hmem[BH_COLOR_B][i];
+ out_ptr[i].y = hmem_buf->hmem[BH_COLOR_Y][i];
+ /* sh_css_print ("hmem[%d] = %d, %d, %d, %d\n",
+ i, out_ptr[i].r, out_ptr[i].g, out_ptr[i].b, out_ptr[i].y); */
+ }
+}
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h
new file mode 100644
index 000000000000..cbb09299cf21
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh.host.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BH_HOST_H
+#define __IA_CSS_BH_HOST_H
+
+#include "ia_css_bh_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a_types.h"
+
+void
+ia_css_bh_hmem_decode(
+ struct ia_css_3a_rgby_output *out_ptr,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_bh_encode(
+ struct sh_css_isp_bh_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size);
+
+#endif /* __IA_CSS_BH_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h
new file mode 100644
index 000000000000..b0a8ef3862e0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_param.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HB_PARAM_H
+#define __IA_CSS_HB_PARAM_H
+
+#include "type_support.h"
+
+#ifndef PIPE_GENERATION
+#define __INLINE_HMEM__
+#include "hmem.h"
+#endif
+
+#include "ia_css_bh_types.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_bh_params {
+ /* coefficients to calculate Y */
+ int32_t y_coef_r;
+ int32_t y_coef_g;
+ int32_t y_coef_b;
+};
+
+/* This should be hmem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_bh_hmem_params {
+ uint32_t bh[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_HB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
new file mode 100644
index 000000000000..9ae27a9e0baa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bh/bh_2/ia_css_bh_types.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BH_TYPES_H
+#define __IA_CSS_BH_TYPES_H
+
+/** Number of elements in the BH table.
+ * Should be consistent with hmem.h
+ */
+#define IA_CSS_HMEM_BH_TABLE_SIZE ISP_HIST_DEPTH
+#define IA_CSS_HMEM_BH_UNIT_SIZE (ISP_HIST_DEPTH/ISP_HIST_COMPONENTS)
+
+#define BH_COLOR_R (0)
+#define BH_COLOR_G (1)
+#define BH_COLOR_B (2)
+#define BH_COLOR_Y (3)
+#define BH_COLOR_NUM (4)
+
+/** BH table */
+struct ia_css_bh_table {
+ uint32_t hmem[ISP_HIST_COMPONENTS][IA_CSS_HMEM_BH_UNIT_SIZE];
+};
+
+#endif /* __IA_CSS_BH_TYPES_H */
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c
new file mode 100644
index 000000000000..6d12e031e6fc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.c
@@ -0,0 +1,183 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnlm.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+#include <assert_support.h>
+
+#define BNLM_DIV_LUT_SIZE (12)
+static const int32_t div_lut_nearests[BNLM_DIV_LUT_SIZE] = {
+ 0, 454, 948, 1484, 2070, 2710, 3412, 4184, 5035, 5978, 7025, 8191
+};
+
+static const int32_t div_lut_slopes[BNLM_DIV_LUT_SIZE] = {
+ -7760, -6960, -6216, -5536, -4912, -4344, -3832, -3360, -2936, -2552, -2208, -2208
+};
+
+static const int32_t div_lut_intercepts[BNLM_DIV_LUT_SIZE] = {
+ 8184, 7752, 7336, 6928, 6536, 6152, 5776, 5416, 5064, 4728, 4408, 4408
+};
+
+/* Encodes a look-up table from BNLM public parameters to vmem parameters.
+ * Input:
+ * lut : bnlm_lut struct containing encoded vmem parameters look-up table
+ * lut_thr : array containing threshold values for lut
+ * lut_val : array containing output values related to lut_thr
+ * lut_size: Size of lut_val array
+ */
+static inline void
+bnlm_lut_encode(struct bnlm_lut *lut, const int32_t *lut_thr, const int32_t *lut_val, const uint32_t lut_size)
+{
+ u32 blk, i;
+ const u32 block_size = 16;
+ const u32 total_blocks = ISP_VEC_NELEMS / block_size;
+
+ /* Create VMEM LUTs from the threshold and value arrays.
+ *
+ * Min size of the LUT is 2 entries.
+ *
+ * Max size of the LUT is 16 entries, so that the LUT can fit into a
+ * single group of 16 elements inside a vector.
+ * Then these elements are copied into other groups inside the same
+ * vector. If the LUT size is less than 16, then remaining elements are
+ * set to 0.
+ */
+ assert((lut_size >= 2) && (lut_size <= block_size));
+ /* array lut_thr has (lut_size-1) entries */
+ for (i = 0; i < lut_size-2; i++) {
+ /* Check if the lut_thr is monotonically increasing */
+ assert(lut_thr[i] <= lut_thr[i+1]);
+ }
+
+ /* Initialize */
+ for (i = 0; i < total_blocks * block_size; i++) {
+ lut->thr[0][i] = 0;
+ lut->val[0][i] = 0;
+ }
+
+ /* Copy all data */
+ for (i = 0; i < lut_size - 1; i++) {
+ lut->thr[0][i] = lut_thr[i];
+ lut->val[0][i] = lut_val[i];
+ }
+ lut->val[0][i] = lut_val[i]; /* val has one more element than thr */
+
+ /* Copy data from first block to all blocks */
+ for (blk = 1; blk < total_blocks; blk++) {
+ u32 blk_offset = blk * block_size;
+ for (i = 1; i < lut_size; i++) {
+ lut->thr[0][blk_offset + i] = lut->thr[0][i];
+ lut->val[0][blk_offset + i] = lut->val[0][i];
+ }
+ }
+}
+
+/*
+ * - Encodes BNLM public parameters into VMEM parameters
+ * - Generates VMEM parameters which will needed internally ISP
+ */
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ int i;
+ (void)size;
+
+ /* Initialize LUTs in VMEM parameters */
+ bnlm_lut_encode(&to->mu_root_lut, from->mu_root_lut_thr, from->mu_root_lut_val, 16);
+ bnlm_lut_encode(&to->sad_norm_lut, from->sad_norm_lut_thr, from->sad_norm_lut_val, 16);
+ bnlm_lut_encode(&to->sig_detail_lut, from->sig_detail_lut_thr, from->sig_detail_lut_val, 16);
+ bnlm_lut_encode(&to->sig_rad_lut, from->sig_rad_lut_thr, from->sig_rad_lut_val, 16);
+ bnlm_lut_encode(&to->rad_pow_lut, from->rad_pow_lut_thr, from->rad_pow_lut_val, 16);
+ bnlm_lut_encode(&to->nl_0_lut, from->nl_0_lut_thr, from->nl_0_lut_val, 16);
+ bnlm_lut_encode(&to->nl_1_lut, from->nl_1_lut_thr, from->nl_1_lut_val, 16);
+ bnlm_lut_encode(&to->nl_2_lut, from->nl_2_lut_thr, from->nl_2_lut_val, 16);
+ bnlm_lut_encode(&to->nl_3_lut, from->nl_3_lut_thr, from->nl_3_lut_val, 16);
+
+ /* Initialize arrays in VMEM parameters */
+ memset(to->nl_th, 0, sizeof(to->nl_th));
+ to->nl_th[0][0] = from->nl_th[0];
+ to->nl_th[0][1] = from->nl_th[1];
+ to->nl_th[0][2] = from->nl_th[2];
+
+ memset(to->match_quality_max_idx, 0, sizeof(to->match_quality_max_idx));
+ to->match_quality_max_idx[0][0] = from->match_quality_max_idx[0];
+ to->match_quality_max_idx[0][1] = from->match_quality_max_idx[1];
+ to->match_quality_max_idx[0][2] = from->match_quality_max_idx[2];
+ to->match_quality_max_idx[0][3] = from->match_quality_max_idx[3];
+
+ bnlm_lut_encode(&to->div_lut, div_lut_nearests, div_lut_slopes, BNLM_DIV_LUT_SIZE);
+ memset(to->div_lut_intercepts, 0, sizeof(to->div_lut_intercepts));
+ for(i = 0; i < BNLM_DIV_LUT_SIZE; i++) {
+ to->div_lut_intercepts[0][i] = div_lut_intercepts[i];
+ }
+
+ memset(to->power_of_2, 0, sizeof(to->power_of_2));
+ for (i = 0; i < (ISP_VEC_ELEMBITS-1); i++) {
+ to->power_of_2[0][i] = 1 << i;
+ }
+}
+
+/* - Encodes BNLM public parameters into DMEM parameters */
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size)
+{
+ (void)size;
+ to->rad_enable = from->rad_enable;
+ to->rad_x_origin = from->rad_x_origin;
+ to->rad_y_origin = from->rad_y_origin;
+ to->avg_min_th = from->avg_min_th;
+ to->max_min_th = from->max_min_th;
+
+ to->exp_coeff_a = from->exp_coeff_a;
+ to->exp_coeff_b = from->exp_coeff_b;
+ to->exp_coeff_c = from->exp_coeff_c;
+ to->exp_exponent = from->exp_exponent;
+}
+
+/* Prints debug traces for BNLM public parameters */
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned level)
+{
+ if (!config)
+ return;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(level, "BNLM:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_enable", config->rad_enable);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_x_origin", config->rad_x_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_y_origin", config->rad_y_origin);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "avg_min_th", config->avg_min_th);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "max_min_th", config->max_min_th);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_a", config->exp_coeff_a);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_b", config->exp_coeff_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_c", config->exp_coeff_c);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_exponent", config->exp_exponent);
+
+ /* ToDo: print traces for LUTs */
+#endif /* IA_CSS_NO_DEBUG */
+
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h
new file mode 100644
index 000000000000..b99c0644ab38
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_HOST_H
+#define __IA_CSS_BNLM_HOST_H
+
+#include "ia_css_bnlm_types.h"
+#include "ia_css_bnlm_param.h"
+#include "ia_css_bnlm_default.host.h"
+
+void
+ia_css_bnlm_vmem_encode(
+ struct bnlm_vmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+void
+ia_css_bnlm_encode(
+ struct bnlm_dmem_params *to,
+ const struct ia_css_bnlm_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnlm_debug_trace(
+ const struct ia_css_bnlm_config *config,
+ unsigned level);
+#endif
+
+#endif /* __IA_CSS_BNLM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.c
new file mode 100644
index 000000000000..e2eb88c0f123
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.c
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_bnlm_types.h"
+
+const struct ia_css_bnlm_config default_bnlm_config = {
+
+ .rad_enable = true,
+ .rad_x_origin = 0,
+ .rad_y_origin = 0,
+ .avg_min_th = 127,
+ .max_min_th = 2047,
+
+ .exp_coeff_a = 6048,
+ .exp_coeff_b = 7828,
+ .exp_coeff_c = 0,
+ .exp_exponent = 3,
+
+ .nl_th = {2252, 2251, 2250},
+ .match_quality_max_idx = {2, 3, 3, 1},
+
+ .mu_root_lut_thr = {
+ 26, 56, 128, 216, 462, 626, 932, 1108, 1480, 1564, 1824, 1896, 2368, 3428, 4560},
+ .mu_root_lut_val = {
+ 384, 320, 320, 264, 248, 240, 224, 192, 192, 160, 160, 160, 136, 130, 96, 80},
+ .sad_norm_lut_thr = {
+ 236, 328, 470, 774, 964, 1486, 2294, 3244, 4844, 6524, 6524, 6524, 6524, 6524, 6524},
+ .sad_norm_lut_val = {
+ 8064, 7680, 7168, 6144, 5120, 3840, 2560, 2304, 1984, 1792, 1792, 1792, 1792, 1792, 1792, 1792},
+ .sig_detail_lut_thr = {
+ 2936, 3354, 3943, 4896, 5230, 5682, 5996, 7299, 7299, 7299, 7299, 7299, 7299, 7299, 7299},
+ .sig_detail_lut_val = {
+ 8191, 7680, 7168, 6144, 5120, 4608, 4224, 4032, 4032, 4032, 4032, 4032, 4032, 4032, 4032, 4032},
+ .sig_rad_lut_thr = {
+ 18, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20},
+ .sig_rad_lut_val = {
+ 2560, 7168, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188, 8188},
+ .rad_pow_lut_thr = {
+ 0, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013, 7013},
+ .rad_pow_lut_val = {
+ 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191},
+ .nl_0_lut_thr = {
+ 1072, 7000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000, 8000},
+ .nl_0_lut_val = {
+ 2560, 3072, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120, 5120},
+ .nl_1_lut_thr = {
+ 624, 3224, 3392, 7424, 7424, 7424, 7424, 7424, 7424, 7424, 7424, 7424, 7424, 7424, 7424},
+ .nl_1_lut_val = {
+ 3584, 4608, 5120, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144, 6144},
+ .nl_2_lut_thr = {
+ 745, 2896, 3720, 6535, 7696, 8040, 8040, 8040, 8040, 8040, 8040, 8040, 8040, 8040, 8040},
+ .nl_2_lut_val = {
+ 3584, 4608, 6144, 7168, 7936, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191},
+ .nl_3_lut_thr = {
+ 4848, 4984, 5872, 6000, 6517, 6960, 7944, 8088, 8161, 8161, 8161, 8161, 8161, 8161, 8161},
+ .nl_3_lut_val = {
+ 3072, 4104, 4608, 5120, 6144, 7168, 7680, 8128, 8191, 8191, 8191, 8191, 8191, 8191, 8191, 8191},
+
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.h
new file mode 100644
index 000000000000..f18c8070abba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_default.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_DEFAULT_HOST_H
+#define __IA_CSS_BNLM_DEFAULT_HOST_H
+
+#include "ia_css_bnlm_types.h"
+extern const struct ia_css_bnlm_config default_bnlm_config;
+
+#endif /* __IA_CSS_BNLM_DEFAULT_HOST_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h
new file mode 100644
index 000000000000..2f4be43e594e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_param.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_PARAM_H
+#define __IA_CSS_BNLM_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct bnlm_lut {
+ VMEM_ARRAY(thr, ISP_VEC_NELEMS); /* thresholds */
+ VMEM_ARRAY(val, ISP_VEC_NELEMS); /* values */
+};
+
+struct bnlm_vmem_params {
+ VMEM_ARRAY(nl_th, ISP_VEC_NELEMS);
+ VMEM_ARRAY(match_quality_max_idx, ISP_VEC_NELEMS);
+ struct bnlm_lut mu_root_lut;
+ struct bnlm_lut sad_norm_lut;
+ struct bnlm_lut sig_detail_lut;
+ struct bnlm_lut sig_rad_lut;
+ struct bnlm_lut rad_pow_lut;
+ struct bnlm_lut nl_0_lut;
+ struct bnlm_lut nl_1_lut;
+ struct bnlm_lut nl_2_lut;
+ struct bnlm_lut nl_3_lut;
+
+ /* LUTs used for division approximiation */
+ struct bnlm_lut div_lut;
+ VMEM_ARRAY(div_lut_intercepts, ISP_VEC_NELEMS);
+
+ /* 240x does not have an ISP instruction to left shift each element of a
+ * vector by different shift value. Hence it will be simulated by multiplying
+ * the elements by required 2^shift. */
+ VMEM_ARRAY(power_of_2, ISP_VEC_NELEMS);
+};
+
+/* BNLM ISP parameters */
+struct bnlm_dmem_params {
+ bool rad_enable;
+ int32_t rad_x_origin;
+ int32_t rad_y_origin;
+ int32_t avg_min_th;
+ int32_t max_min_th;
+
+ int32_t exp_coeff_a;
+ uint32_t exp_coeff_b;
+ int32_t exp_coeff_c;
+ uint32_t exp_exponent;
+};
+
+#endif /* __IA_CSS_BNLM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_state.h
new file mode 100644
index 000000000000..79cce0e40e82
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_state.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_STATE_H
+#define __IA_CSS_BNLM_STATE_H
+
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+#include "bnlm.isp.h"
+
+struct bnlm_vmem_state {
+ /* State buffers required for BNLM */
+ VMEM_ARRAY(buf[BNLM_STATE_BUF_HEIGHT], BNLM_STATE_BUF_WIDTH*ISP_NWAY);
+};
+
+
+
+#endif /* __IA_CSS_BNLM_STATE_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
new file mode 100644
index 000000000000..219fb835cb26
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnlm/ia_css_bnlm_types.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNLM_TYPES_H
+#define __IA_CSS_BNLM_TYPES_H
+
+/** @file
+* CSS-API header file for Bayer Non-Linear Mean parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/** Bayer Non-Linear Mean configuration
+ *
+ * \brief BNLM public parameters.
+ * \details Struct with all parameters for the BNLM kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNLM is used.
+ */
+struct ia_css_bnlm_config {
+ bool rad_enable; /**< Enable a radial dependency in a weight calculation */
+ int32_t rad_x_origin; /**< Initial x coordinate for a radius calculation */
+ int32_t rad_y_origin; /**< Initial x coordinate for a radius calculation */
+ /* a threshold for average of weights if this < Th, do not denoise pixel */
+ int32_t avg_min_th;
+ /* minimum weight for denoising if max < th, do not denoise pixel */
+ int32_t max_min_th;
+
+ /**@{*/
+ /** Coefficient for approximation, in the form of (1 + x / N)^N,
+ * that fits the first-order exp() to default exp_lut in BNLM sheet
+ * */
+ int32_t exp_coeff_a;
+ uint32_t exp_coeff_b;
+ int32_t exp_coeff_c;
+ uint32_t exp_exponent;
+ /**@}*/
+
+ int32_t nl_th[3]; /**< Detail thresholds */
+
+ /** Index for n-th maximum candidate weight for each detail group */
+ int32_t match_quality_max_idx[4];
+
+ /**@{*/
+ /** A lookup table for 1/sqrt(1+mu) approximation */
+ int32_t mu_root_lut_thr[15];
+ int32_t mu_root_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** A lookup table for SAD normalization */
+ int32_t sad_norm_lut_thr[15];
+ int32_t sad_norm_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** A lookup table that models a weight's dependency on textures */
+ int32_t sig_detail_lut_thr[15];
+ int32_t sig_detail_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** A lookup table that models a weight's dependency on a pixel's radial distance */
+ int32_t sig_rad_lut_thr[15];
+ int32_t sig_rad_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** A lookup table to control denoise power depending on a pixel's radial distance */
+ int32_t rad_pow_lut_thr[15];
+ int32_t rad_pow_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** Non linear transfer functions to calculate the blending coefficient depending on detail group */
+ /** detail group 0 */
+ /**@{*/
+ int32_t nl_0_lut_thr[15];
+ int32_t nl_0_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** detail group 1 */
+ int32_t nl_1_lut_thr[15];
+ int32_t nl_1_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** detail group 2 */
+ int32_t nl_2_lut_thr[15];
+ int32_t nl_2_lut_val[16];
+ /**@}*/
+ /**@{*/
+ /** detail group 3 */
+ int32_t nl_3_lut_thr[15];
+ int32_t nl_3_lut_val[16];
+ /**@}*/
+ /**@}*/
+};
+
+#endif /* __IA_CSS_BNLM_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
new file mode 100644
index 000000000000..a7de6ecb950d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
@@ -0,0 +1,122 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "ia_css_bnr2_2.host.h"
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h" /* ia_css_debug_dtrace() */
+#endif
+
+/* Default kernel parameters. */
+const struct ia_css_bnr2_2_config default_bnr2_2_config = {
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 200,
+ 200,
+ 200,
+ 0,
+ 0,
+ 0,
+ 0,
+ 4096,
+ 8191,
+ 128,
+ 1,
+ 0,
+ 0,
+ 0,
+ 8191,
+ 0,
+ 8191
+};
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size)
+{
+ (void)size;
+ to->d_var_gain_r = from->d_var_gain_r;
+ to->d_var_gain_g = from->d_var_gain_g;
+ to->d_var_gain_b = from->d_var_gain_b;
+ to->d_var_gain_slope_r = from->d_var_gain_slope_r;
+ to->d_var_gain_slope_g = from->d_var_gain_slope_g;
+ to->d_var_gain_slope_b = from->d_var_gain_slope_b;
+
+ to->n_var_gain_r = from->n_var_gain_r;
+ to->n_var_gain_g = from->n_var_gain_g;
+ to->n_var_gain_b = from->n_var_gain_b;
+ to->n_var_gain_slope_r = from->n_var_gain_slope_r;
+ to->n_var_gain_slope_g = from->n_var_gain_slope_g;
+ to->n_var_gain_slope_b = from->n_var_gain_slope_b;
+
+ to->dir_thres = from->dir_thres;
+ to->dir_thres_w = from->dir_thres_w;
+ to->var_offset_coef = from->var_offset_coef;
+
+ to->dir_gain = from->dir_gain;
+ to->detail_gain = from->detail_gain;
+ to->detail_gain_divisor = from->detail_gain_divisor;
+ to->detail_level_offset = from->detail_level_offset;
+
+ to->d_var_th_min = from->d_var_th_min;
+ to->d_var_th_max = from->d_var_th_max;
+ to->n_var_th_min = from->n_var_th_min;
+ to->n_var_th_max = from->n_var_th_max;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *bnr,
+ unsigned level)
+{
+ if (!bnr)
+ return;
+
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction 2.2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_r", bnr->d_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_g", bnr->d_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_b", bnr->d_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_r", bnr->d_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_g", bnr->d_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_b", bnr->d_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_r", bnr->n_var_gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_g", bnr->n_var_gain_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_b", bnr->n_var_gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_r", bnr->n_var_gain_slope_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_g", bnr->n_var_gain_slope_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_b", bnr->n_var_gain_slope_b);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres", bnr->dir_thres);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres_w", bnr->dir_thres_w);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "var_offset_coef", bnr->var_offset_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_gain", bnr->dir_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain", bnr->detail_gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain_divisor", bnr->detail_gain_divisor);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_level_offset", bnr->detail_level_offset);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_min", bnr->d_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_max", bnr->d_var_th_max);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_min", bnr->n_var_th_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_max", bnr->n_var_th_max);
+}
+#endif /* IA_CSS_NO_DEBUG */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
new file mode 100644
index 000000000000..c94b366b8142
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef __IA_CSS_BNR2_2_HOST_H
+#define __IA_CSS_BNR2_2_HOST_H
+
+#include "ia_css_bnr2_2_types.h"
+#include "ia_css_bnr2_2_param.h"
+
+extern const struct ia_css_bnr2_2_config default_bnr2_2_config;
+
+void
+ia_css_bnr2_2_encode(
+ struct sh_css_isp_bnr2_2_params *to,
+ const struct ia_css_bnr2_2_config *from,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_bnr2_2_debug_dtrace(
+ const struct ia_css_bnr2_2_config *config,
+ unsigned level);
+#endif
+
+#endif /* __IA_CSS_BNR2_2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
new file mode 100644
index 000000000000..6dec27a99d8f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_param.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR2_2_PARAM_H
+#define __IA_CSS_BNR2_2_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) ISP parameters */
+struct sh_css_isp_bnr2_2_params {
+ int32_t d_var_gain_r;
+ int32_t d_var_gain_g;
+ int32_t d_var_gain_b;
+ int32_t d_var_gain_slope_r;
+ int32_t d_var_gain_slope_g;
+ int32_t d_var_gain_slope_b;
+ int32_t n_var_gain_r;
+ int32_t n_var_gain_g;
+ int32_t n_var_gain_b;
+ int32_t n_var_gain_slope_r;
+ int32_t n_var_gain_slope_g;
+ int32_t n_var_gain_slope_b;
+ int32_t dir_thres;
+ int32_t dir_thres_w;
+ int32_t var_offset_coef;
+ int32_t dir_gain;
+ int32_t detail_gain;
+ int32_t detail_gain_divisor;
+ int32_t detail_level_offset;
+ int32_t d_var_th_min;
+ int32_t d_var_th_max;
+ int32_t n_var_th_min;
+ int32_t n_var_th_max;
+};
+
+#endif /* __IA_CSS_BNR2_2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
new file mode 100644
index 000000000000..be80f705d8a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2_types.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR2_2_TYPES_H
+#define __IA_CSS_BNR2_2_TYPES_H
+
+/** @file
+* CSS-API header file for Bayer Noise Reduction parameters.
+*/
+
+#include "type_support.h" /* int32_t */
+
+/** Bayer Noise Reduction 2.2 configuration
+ *
+ * \brief BNR2_2 public parameters.
+ * \details Struct with all parameters for the BNR2.2 kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: BNR2.2 is used.
+ */
+struct ia_css_bnr2_2_config {
+ /**@{*/
+ /** Directional variance gain for R/G/B components in dark region */
+ int32_t d_var_gain_r;
+ int32_t d_var_gain_g;
+ int32_t d_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /** Slope of Directional variance gain between dark and bright region */
+ int32_t d_var_gain_slope_r;
+ int32_t d_var_gain_slope_g;
+ int32_t d_var_gain_slope_b;
+ /**@}*/
+ /**@{*/
+ /** Non-Directional variance gain for R/G/B components in dark region */
+ int32_t n_var_gain_r;
+ int32_t n_var_gain_g;
+ int32_t n_var_gain_b;
+ /**@}*/
+ /**@{*/
+ /** Slope of Non-Directional variance gain between dark and bright region */
+ int32_t n_var_gain_slope_r;
+ int32_t n_var_gain_slope_g;
+ int32_t n_var_gain_slope_b;
+ /**@}*/
+
+ int32_t dir_thres; /**< Threshold for directional filtering */
+ int32_t dir_thres_w; /**< Threshold width for directional filtering */
+ int32_t var_offset_coef; /**< Variance offset coefficient */
+ int32_t dir_gain; /**< Gain for directional coefficient */
+ int32_t detail_gain; /**< Gain for low contrast texture control */
+ int32_t detail_gain_divisor; /**< Gain divisor for low contrast texture control */
+ int32_t detail_level_offset; /**< Bias value for low contrast texture control */
+ int32_t d_var_th_min; /**< Minimum clipping value for directional variance*/
+ int32_t d_var_th_max; /**< Maximum clipping value for diretional variance*/
+ int32_t n_var_th_min; /**< Minimum clipping value for non-directional variance*/
+ int32_t n_var_th_max; /**< Maximum clipping value for non-directional variance*/
+};
+
+#endif /* __IA_CSS_BNR2_2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
new file mode 100644
index 000000000000..d1baca54c3ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_bnr.host.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned size)
+{
+ (void)size;
+ /* BNR (Bayer Noise Reduction) */
+ to->threshold_low =
+ uDIGIT_FITTING(from->direction, 16, SH_CSS_BAYER_BITS);
+ to->threshold_width_log2 = uFRACTION_BITS_FITTING(8);
+ to->threshold_width =
+ 1 << to->threshold_width_log2;
+ to->gain_all =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT);
+ to->clip = uDIGIT_FITTING((unsigned)16384, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned level)
+{
+ if (!bnr) return;
+ ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_all", bnr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_gain_dir", bnr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_low",
+ bnr->threshold_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width_log2",
+ bnr->threshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_threshold_width",
+ bnr->threshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "bnr_clip", bnr->clip);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
new file mode 100644
index 000000000000..ccd2abc60537
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR_HOST_H
+#define __IA_CSS_BNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ynr/ynr_1.0/ia_css_ynr_types.h"
+#include "ia_css_bnr_param.h"
+
+void
+ia_css_bnr_encode(
+ struct sh_css_isp_bnr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned size);
+
+void
+ia_css_bnr_dump(
+ const struct sh_css_isp_bnr_params *bnr,
+ unsigned level);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
new file mode 100644
index 000000000000..331e05885ef4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/bnr/bnr_1.0/ia_css_bnr_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BNR_PARAM_H
+#define __IA_CSS_BNR_PARAM_H
+
+#include "type_support.h"
+
+/* BNR (Bayer Noise Reduction) */
+struct sh_css_isp_bnr_params {
+ int32_t gain_all;
+ int32_t gain_dir;
+ int32_t threshold_low;
+ int32_t threshold_width_log2;
+ int32_t threshold_width;
+ int32_t clip;
+};
+
+#endif /* __IA_CSS_BNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
new file mode 100644
index 000000000000..d14fd8fc08b1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr.host.h"
+
+/* keep the interface here, it is not enabled yet because host doesn't know the size of individual state */
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
new file mode 100644
index 000000000000..6f00d280b7d6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR_HOST_H
+#define __IA_CSS_CNR_HOST_H
+
+#include "ia_css_cnr_param.h"
+
+void
+ia_css_init_cnr_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ *state,
+ size_t size);
+
+#endif /* __IA_CSS_CNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
new file mode 100644
index 000000000000..c1af207cbf9a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_param.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR_PARAM_H
+#define __IA_CSS_CNR_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+/* Reuse YNR1 param structure */
+#include "../../ynr/ynr_1.0/ia_css_ynr_param.h"
+
+#endif /* __IA_CSS_CNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_state.h
new file mode 100644
index 000000000000..795fba76bb20
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_1.0/ia_css_cnr_state.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR_STATE_H
+#define __IA_CSS_CNR_STATE_H
+
+#include "type_support.h"
+
+#include "vmem.h"
+
+typedef struct
+{
+ VMEM_ARRAY(u, ISP_NWAY);
+ VMEM_ARRAY(v, ISP_NWAY);
+} s_cnr_buf;
+
+/* CNR (color noise reduction) */
+struct sh_css_isp_cnr_vmem_state {
+ s_cnr_buf cnr_buf[2][MAX_VECTORS_PER_BUF_LINE/2];
+};
+
+#endif /* __IA_CSS_CNR_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
new file mode 100644
index 000000000000..4b4b2b715407
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_cnr2.host.h"
+
+const struct ia_css_cnr_config default_cnr_config = {
+ 0,
+ 0,
+ 100,
+ 100,
+ 100,
+ 50,
+ 50,
+ 50
+};
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->coring_u = from->coring_u;
+ to->coring_v = from->coring_v;
+ to->sense_gain_vy = from->sense_gain_vy;
+ to->sense_gain_vu = from->sense_gain_vu;
+ to->sense_gain_vv = from->sense_gain_vv;
+ to->sense_gain_hy = from->sense_gain_hy;
+ to->sense_gain_hu = from->sense_gain_hu;
+ to->sense_gain_hv = from->sense_gain_hv;
+}
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.coring_u=%d, config.coring_v=%d, "
+ "config.sense_gain_vy=%d, config.sense_gain_hy=%d, "
+ "config.sense_gain_vu=%d, config.sense_gain_hu=%d, "
+ "config.sense_gain_vv=%d, config.sense_gain_hv=%d\n",
+ config->coring_u, config->coring_v,
+ config->sense_gain_vy, config->sense_gain_hy,
+ config->sense_gain_vu, config->sense_gain_hu,
+ config->sense_gain_vv, config->sense_gain_hv);
+}
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
new file mode 100644
index 000000000000..abcf0eba706f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_HOST_H
+#define __IA_CSS_CNR2_HOST_H
+
+#include "ia_css_cnr2_types.h"
+#include "ia_css_cnr2_param.h"
+
+extern const struct ia_css_cnr_config default_cnr_config;
+
+void
+ia_css_cnr_encode(
+ struct sh_css_isp_cnr_params *to,
+ const struct ia_css_cnr_config *from,
+ unsigned size);
+
+void
+ia_css_cnr_dump(
+ const struct sh_css_isp_cnr_params *cnr,
+ unsigned level);
+
+void
+ia_css_cnr_debug_dtrace(
+ const struct ia_css_cnr_config *config,
+ unsigned level);
+
+void
+ia_css_init_cnr2_state(
+ void/*struct sh_css_isp_cnr_vmem_state*/ *state,
+ size_t size);
+#endif /* __IA_CSS_CNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
new file mode 100644
index 000000000000..d6f490e26c94
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_PARAM_H
+#define __IA_CSS_CNR2_PARAM_H
+
+#include "type_support.h"
+
+/* CNR (Chroma Noise Reduction) */
+struct sh_css_isp_cnr_params {
+ int32_t coring_u;
+ int32_t coring_v;
+ int32_t sense_gain_vy;
+ int32_t sense_gain_vu;
+ int32_t sense_gain_vv;
+ int32_t sense_gain_hy;
+ int32_t sense_gain_hu;
+ int32_t sense_gain_hv;
+};
+
+#endif /* __IA_CSS_CNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
new file mode 100644
index 000000000000..6df6c2be9a70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr2_types.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_TYPES_H
+#define __IA_CSS_CNR2_TYPES_H
+
+/** @file
+* CSS-API header file for Chroma Noise Reduction (CNR) parameters
+*/
+
+/** Chroma Noise Reduction configuration.
+ *
+ * Small sensitivity of edge means strong smoothness and NR performance.
+ * If you see blurred color on vertical edges,
+ * set higher values on sense_gain_h*.
+ * If you see blurred color on horizontal edges,
+ * set higher values on sense_gain_v*.
+ *
+ * ISP block: CNR2
+ * (ISP1: CNR1 is used.)
+ * (ISP2: CNR1 is used for Preview/Video.)
+ * ISP2: CNR2 is used for Still.
+ */
+struct ia_css_cnr_config {
+ uint16_t coring_u; /**< Coring level of U.
+ u0.13, [0,8191], default/ineffective 0 */
+ uint16_t coring_v; /**< Coring level of V.
+ u0.13, [0,8191], default/ineffective 0 */
+ uint16_t sense_gain_vy; /**< Sensitivity of horizontal edge of Y.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ uint16_t sense_gain_vu; /**< Sensitivity of horizontal edge of U.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ uint16_t sense_gain_vv; /**< Sensitivity of horizontal edge of V.
+ u13.0, [0,8191], default 100, ineffective 8191 */
+ uint16_t sense_gain_hy; /**< Sensitivity of vertical edge of Y.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ uint16_t sense_gain_hu; /**< Sensitivity of vertical edge of U.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+ uint16_t sense_gain_hv; /**< Sensitivity of vertical edge of V.
+ u13.0, [0,8191], default 50, ineffective 8191 */
+};
+
+#endif /* __IA_CSS_CNR2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h
new file mode 100644
index 000000000000..56651ba62598
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNRX_PARAM_H
+#define __IA_CSS_CNRX_PARAM_H
+
+#include "ia_css_cnr2_param.h"
+
+#endif /* __IA_CSS_CNRX_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_state.h
new file mode 100644
index 000000000000..e533e2fa8cd5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/cnr/cnr_2/ia_css_cnr_state.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CNR2_STATE_H
+#define __IA_CSS_CNR2_STATE_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+typedef struct
+{
+ VMEM_ARRAY(y, (MAX_VECTORS_PER_BUF_LINE/2)*ISP_NWAY);
+ VMEM_ARRAY(u, (MAX_VECTORS_PER_BUF_LINE/2)*ISP_NWAY);
+ VMEM_ARRAY(v, (MAX_VECTORS_PER_BUF_LINE/2)*ISP_NWAY);
+} s_cnr_buf;
+
+/* CNR (color noise reduction) */
+struct sh_css_isp_cnr_vmem_state {
+ s_cnr_buf cnr_buf;
+};
+
+#endif /* __IA_CSS_CNR2_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
new file mode 100644
index 000000000000..8f25ee180cda
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_conversion.host.h"
+
+const struct ia_css_conversion_config default_conversion_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->en = from->en;
+ to->dummy0 = from->dummy0;
+ to->dummy1 = from->dummy1;
+ to->dummy2 = from->dummy2;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
new file mode 100644
index 000000000000..da7a0a034a71
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_HOST_H
+#define __IA_CSS_CONVERSION_HOST_H
+
+#include "ia_css_conversion_types.h"
+#include "ia_css_conversion_param.h"
+
+extern const struct ia_css_conversion_config default_conversion_config;
+
+void
+ia_css_conversion_encode(
+ struct sh_css_isp_conversion_params *to,
+ const struct ia_css_conversion_config *from,
+ unsigned size);
+
+#ifdef ISP2401
+/* workaround until code generation in isp_kernelparameters.host.c is fixed */
+#define ia_css_conversion_par_encode(to, from, size) ia_css_conversion_encode(to, from, size)
+#endif
+#endif /* __IA_CSS_CONVERSION_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
new file mode 100644
index 000000000000..301d506f447e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_param.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_PARAM_H
+#define __IA_CSS_CONVERSION_PARAM_H
+
+#include "type_support.h"
+
+/* CONVERSION */
+struct sh_css_isp_conversion_params {
+ uint32_t en;
+ uint32_t dummy0;
+ uint32_t dummy1;
+ uint32_t dummy2;
+};
+
+#endif /* __IA_CSS_CONVERSION_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
new file mode 100644
index 000000000000..3f11442500f0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/conversion/conversion_1.0/ia_css_conversion_types.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CONVERSION_TYPES_H
+#define __IA_CSS_CONVERSION_TYPES_H
+
+/**
+ * Conversion Kernel parameters.
+ * Deinterleave bayer quad into isys format
+ *
+ * ISP block: CONVERSION
+ *
+ */
+struct ia_css_conversion_config {
+ uint32_t en; /**< en parameter */
+ uint32_t dummy0; /**< dummy0 dummy parameter 0 */
+ uint32_t dummy1; /**< dummy1 dummy parameter 1 */
+ uint32_t dummy2; /**< dummy2 dummy parameter 2 */
+};
+
+#endif /* __IA_CSS_CONVERSION_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
new file mode 100644
index 000000000000..45e1ea8b1fb0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_copy_output.host.h"
+#include "ia_css_binary.h"
+#include "type_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+static const struct ia_css_copy_output_configuration default_config = {
+ .enable = false,
+};
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned size)
+{
+ (void)size;
+ to->enable = from->enable;
+}
+
+void
+ia_css_copy_output_configure(
+ const struct ia_css_binary *binary,
+ bool enable)
+{
+ struct ia_css_copy_output_configuration config = default_config;
+
+ config.enable = enable;
+
+ ia_css_configure_copy_output(binary, &config);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
new file mode 100644
index 000000000000..3eb77365f8d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COPY_OUTPUT_HOST_H
+#define __IA_CSS_COPY_OUTPUT_HOST_H
+
+#include "type_support.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_copy_output_param.h"
+
+void
+ia_css_copy_output_config(
+ struct sh_css_isp_copy_output_isp_config *to,
+ const struct ia_css_copy_output_configuration *from,
+ unsigned size);
+
+void
+ia_css_copy_output_configure(
+ const struct ia_css_binary *binary,
+ bool enable);
+
+#endif /* __IA_CSS_COPY_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
new file mode 100644
index 000000000000..622d9181e13f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output_param.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COPY_PARAM_H
+#define __IA_CSS_COPY_PARAM_H
+
+struct ia_css_copy_output_configuration {
+ bool enable;
+};
+
+struct sh_css_isp_copy_output_isp_config {
+ uint32_t enable;
+};
+
+#endif /* __IA_CSS_COPY_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
new file mode 100644
index 000000000000..92905220d862
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
@@ -0,0 +1,64 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_crop.host.h"
+
+static const struct ia_css_crop_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->crop_pos = from->crop_pos;
+}
+
+void
+ia_css_crop_config(
+ struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_crop_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_crop_configuration config = default_config;
+
+ config.info = info;
+
+ ia_css_configure_crop(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
new file mode 100644
index 000000000000..9c1a4c7cac98
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_HOST_H
+#define __IA_CSS_CROP_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_crop_types.h"
+#include "ia_css_crop_param.h"
+
+void
+ia_css_crop_encode(
+ struct sh_css_isp_crop_isp_params *to,
+ const struct ia_css_crop_config *from,
+ unsigned size);
+
+void
+ia_css_crop_config(
+ struct sh_css_isp_crop_isp_config *to,
+ const struct ia_css_crop_configuration *from,
+ unsigned size);
+
+void
+ia_css_crop_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_CROP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
new file mode 100644
index 000000000000..8bfc8dad37a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_param.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_PARAM_H
+#define __IA_CSS_CROP_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "sh_css_internal.h" /* sh_css_crop_pos */
+
+/** Crop frame */
+struct sh_css_isp_crop_isp_config {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_crop_isp_params {
+ struct sh_css_crop_pos crop_pos;
+};
+
+#endif /* __IA_CSS_CROP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
new file mode 100644
index 000000000000..8091ad4d4602
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/crop/crop_1.0/ia_css_crop_types.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CROP_TYPES_H
+#define __IA_CSS_CROP_TYPES_H
+
+/** Crop frame
+ *
+ * ISP block: crop frame
+ */
+
+#include <ia_css_frame_public.h>
+#include "sh_css_uds.h" /* sh_css_crop_pos */
+
+struct ia_css_crop_config {
+ struct sh_css_crop_pos crop_pos;
+};
+
+struct ia_css_crop_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_CROP_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
new file mode 100644
index 000000000000..9f94ef1de572
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
@@ -0,0 +1,132 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_csc.host.h"
+
+const struct ia_css_cc_config default_cc_config = {
+ 8,
+ {255, 29, 120, 0, -374, -342, 0, -672, 301},
+};
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size)
+{
+ (void)size;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() enter:\n");
+#endif
+
+ to->m_shift = (int16_t) from->fraction_bits;
+ to->m00 = (int16_t) from->matrix[0];
+ to->m01 = (int16_t) from->matrix[1];
+ to->m02 = (int16_t) from->matrix[2];
+ to->m10 = (int16_t) from->matrix[3];
+ to->m11 = (int16_t) from->matrix[4];
+ to->m12 = (int16_t) from->matrix[5];
+ to->m20 = (int16_t) from->matrix[6];
+ to->m21 = (int16_t) from->matrix[7];
+ to->m22 = (int16_t) from->matrix[8];
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() leave:\n");
+#endif
+}
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned level,
+ const char *name)
+{
+ if (!csc) return;
+ ia_css_debug_dtrace(level, "%s\n", name);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m_shift",
+ csc->m_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m00",
+ csc->m00);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m01",
+ csc->m01);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m02",
+ csc->m02);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m10",
+ csc->m10);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m11",
+ csc->m11);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m12",
+ csc->m12);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m20",
+ csc->m20);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m21",
+ csc->m21);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "m22",
+ csc->m22);
+}
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned level)
+{
+ ia_css_cc_dump(csc, level, "Color Space Conversion");
+}
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.m[0]=%d, "
+ "config.m[1]=%d, config.m[2]=%d, "
+ "config.m[3]=%d, config.m[4]=%d, "
+ "config.m[5]=%d, config.m[6]=%d, "
+ "config.m[7]=%d, config.m[8]=%d\n",
+ config->matrix[0],
+ config->matrix[1], config->matrix[2],
+ config->matrix[3], config->matrix[4],
+ config->matrix[5], config->matrix[6],
+ config->matrix[7], config->matrix[8]);
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
new file mode 100644
index 000000000000..eb10d8a5709d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc.host.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_HOST_H
+#define __IA_CSS_CSC_HOST_H
+
+#include "ia_css_csc_types.h"
+#include "ia_css_csc_param.h"
+
+extern const struct ia_css_cc_config default_cc_config;
+
+void
+ia_css_encode_cc(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size);
+
+void
+ia_css_csc_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_cc_dump(
+ const struct sh_css_isp_csc_params *csc, unsigned level,
+ const char *name);
+
+void
+ia_css_csc_dump(
+ const struct sh_css_isp_csc_params *csc,
+ unsigned level);
+
+void
+ia_css_cc_config_debug_dtrace(
+ const struct ia_css_cc_config *config,
+ unsigned level);
+
+#define ia_css_csc_debug_dtrace ia_css_cc_config_debug_dtrace
+#endif
+
+#endif /* __IA_CSS_CSC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
new file mode 100644
index 000000000000..0b054a939baf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_param.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_PARAM_H
+#define __IA_CSS_CSC_PARAM_H
+
+#include "type_support.h"
+/* CSC (Color Space Conversion) */
+struct sh_css_isp_csc_params {
+ uint16_t m_shift;
+ int16_t m00;
+ int16_t m01;
+ int16_t m02;
+ int16_t m10;
+ int16_t m11;
+ int16_t m12;
+ int16_t m20;
+ int16_t m21;
+ int16_t m22;
+};
+
+
+#endif /* __IA_CSS_CSC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
new file mode 100644
index 000000000000..54ced072467f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/csc/csc_1.0/ia_css_csc_types.h
@@ -0,0 +1,78 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CSC_TYPES_H
+#define __IA_CSS_CSC_TYPES_H
+
+/** @file
+* CSS-API header file for Color Space Conversion parameters.
+*/
+
+/** Color Correction configuration.
+ *
+ * This structure is used for 3 cases.
+ * ("YCgCo" is the output format of Demosaic.)
+ *
+ * 1. Color Space Conversion (YCgCo to YUV) for ISP1.
+ * ISP block: CSC1 (Color Space Conversion)
+ * struct ia_css_cc_config *cc_config
+ *
+ * 2. Color Correction Matrix (YCgCo to RGB) for ISP2.
+ * ISP block: CCM2 (Color Correction Matrix)
+ * struct ia_css_cc_config *yuv2rgb_cc_config
+ *
+ * 3. Color Space Conversion (RGB to YUV) for ISP2.
+ * ISP block: CSC2 (Color Space Conversion)
+ * struct ia_css_cc_config *rgb2yuv_cc_config
+ *
+ * default/ineffective:
+ * 1. YCgCo -> YUV
+ * 1 0.174 0.185
+ * 0 -0.66252 -0.66874
+ * 0 -0.83738 0.58131
+ *
+ * fraction_bits = 12
+ * 4096 713 758
+ * 0 -2714 -2739
+ * 0 -3430 2381
+ *
+ * 2. YCgCo -> RGB
+ * 1 -1 1
+ * 1 1 0
+ * 1 -1 -1
+ *
+ * fraction_bits = 12
+ * 4096 -4096 4096
+ * 4096 4096 0
+ * 4096 -4096 -4096
+ *
+ * 3. RGB -> YUV
+ * 0.299 0.587 0.114
+ * -0.16874 -0.33126 0.5
+ * 0.5 -0.41869 -0.08131
+ *
+ * fraction_bits = 13
+ * 2449 4809 934
+ * -1382 -2714 4096
+ * 4096 -3430 -666
+ */
+struct ia_css_cc_config {
+ uint32_t fraction_bits;/**< Fractional bits of matrix.
+ u8.0, [0,13] */
+ int32_t matrix[3 * 3]; /**< Conversion matrix.
+ s[13-fraction_bits].[fraction_bits],
+ [-8192,8191] */
+};
+
+#endif /* __IA_CSS_CSC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
new file mode 100644
index 000000000000..e27648c46a25
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
@@ -0,0 +1,120 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ia_css_ctc1_5.host.h"
+
+static void ctc_gradient(
+ int *dydx, int *shift,
+ int y1, int y0, int x1, int x0)
+{
+ int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16);
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int dydx_int;
+ int dydx_frc;
+ int sft;
+ /* max_dydx = the maxinum gradient = the maximum y (gain) */
+ int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+
+ if (dx == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() error, illegal division operation\n");
+ return;
+ } else {
+ dydx_int = dy / dx;
+ dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx;
+ }
+
+ assert(y0 >= 0 && y0 <= max_dydx);
+ assert(y1 >= 0 && y1 <= max_dydx);
+ assert(x0 < x1);
+ assert(dydx != NULL);
+ assert(shift != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n");
+
+ /* search "sft" which meets this condition:
+ (1 << (IA_CSS_CTC_COEF_SHIFT - 1))
+ <= (((float)dy / (float)dx) * (1 << sft))
+ <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */
+ for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) {
+ int tmp_dydx = (dydx_int << sft)
+ + (dydx_frc >> (frc_bits - sft));
+ if (tmp_dydx <= max_dydx) {
+ *dydx = tmp_dydx;
+ *shift = sft;
+ }
+ if (tmp_dydx >= max_dydx)
+ break;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n");
+}
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->y0 = from->y0;
+ to->y1 = from->y1;
+ to->y2 = from->y2;
+ to->y3 = from->y3;
+ to->y4 = from->y4;
+ to->y5 = from->y5;
+
+ to->ce_gain_exp = from->ce_gain_exp;
+
+ to->x1 = from->x1;
+ to->x2 = from->x2;
+ to->x3 = from->x3;
+ to->x4 = from->x4;
+
+ ctc_gradient(&(to->dydx0),
+ &(to->dydx0_shift),
+ from->y1, from->y0,
+ from->x1, 0);
+
+ ctc_gradient(&(to->dydx1),
+ &(to->dydx1_shift),
+ from->y2, from->y1,
+ from->x2, from->x1);
+
+ ctc_gradient(&to->dydx2,
+ &to->dydx2_shift,
+ from->y3, from->y2,
+ from->x3, from->x2);
+
+ ctc_gradient(&to->dydx3,
+ &to->dydx3_shift,
+ from->y4, from->y3,
+ from->x4, from->x3);
+
+ ctc_gradient(&(to->dydx4),
+ &(to->dydx4_shift),
+ from->y5, from->y4,
+ SH_CSS_BAYER_MAXVAL, from->x4);
+}
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned level);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
new file mode 100644
index 000000000000..d943aff28152
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC1_5_HOST_H
+#define __IA_CSS_CTC1_5_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc1_5_param.h"
+
+void
+ia_css_ctc_encode(
+ struct sh_css_isp_ctc_params *to,
+ const struct ia_css_ctc_config *from,
+ unsigned size);
+
+void
+ia_css_ctc_dump(
+ const struct sh_css_isp_ctc_params *ctc,
+ unsigned level);
+
+#endif /* __IA_CSS_CTC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
new file mode 100644
index 000000000000..8d9ac2b1832c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5_param.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC1_5_PARAM_H
+#define __IA_CSS_CTC1_5_PARAM_H
+
+#include "type_support.h"
+#include "ctc/ctc_1.0/ia_css_ctc_param.h" /* vamem params */
+
+/* CTC (Color Tone Control) */
+struct sh_css_isp_ctc_params {
+ int32_t y0;
+ int32_t y1;
+ int32_t y2;
+ int32_t y3;
+ int32_t y4;
+ int32_t y5;
+ int32_t ce_gain_exp;
+ int32_t x1;
+ int32_t x2;
+ int32_t x3;
+ int32_t x4;
+ int32_t dydx0;
+ int32_t dydx0_shift;
+ int32_t dydx1;
+ int32_t dydx1_shift;
+ int32_t dydx2;
+ int32_t dydx2_shift;
+ int32_t dydx3;
+ int32_t dydx3_shift;
+ int32_t dydx4;
+ int32_t dydx4_shift;
+};
+
+#endif /* __IA_CSS_CTC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h
new file mode 100644
index 000000000000..dcd471f9bd66
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc1_5/ia_css_ctc_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTCX_PARAM_H
+#define __IA_CSS_CTCX_PARAM_H
+
+#include "ia_css_ctc1_5_param.h"
+
+#endif /* __IA_CSS_CTCX_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
new file mode 100644
index 000000000000..07bd24edc7bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
@@ -0,0 +1,156 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc2.host.h"
+
+#define INEFFECTIVE_VAL 4096
+#define BASIC_VAL 819
+
+/*Default configuration of parameters for Ctc2*/
+const struct ia_css_ctc2_config default_ctc2_config = {
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL * 2, BASIC_VAL * 4, BASIC_VAL * 6,
+ BASIC_VAL * 8, INEFFECTIVE_VAL, INEFFECTIVE_VAL,
+ BASIC_VAL >> 1, BASIC_VAL};
+
+/* (dydx) = ctc2_slope(y1, y0, x1, x0)
+ * -----------------------------------------------
+ * Calculation of the Slope of a Line = ((y1 - y0) >> 8)/(x1 - x0)
+ *
+ * Note: y1, y0 , x1 & x0 must lie within the range 0 <-> 8191
+ */
+static int ctc2_slope(int y1, int y0, int x1, int x0)
+{
+ const int shift_val = 8;
+ const int max_slope = (1 << IA_CSS_CTC_COEF_SHIFT) - 1;
+ int dy = y1 - y0;
+ int dx = x1 - x0;
+ int rounding = (dx + 1) >> 1;
+ int dy_shift = dy << shift_val;
+ int slope, dydx;
+
+ /*Protection for paramater values, & avoiding zero divisions*/
+ assert(y0 >= 0 && y0 <= max_slope);
+ assert(y1 >= 0 && y1 <= max_slope);
+ assert(x0 >= 0 && x0 <= max_slope);
+ assert(x1 > 0 && x1 <= max_slope);
+ assert(dx > 0);
+
+ if (dy < 0)
+ rounding = -rounding;
+ slope = (int) (dy_shift + rounding) / dx;
+
+ /*the slope must lie within the range
+ (-max_slope-1) >= (dydx) >= (max_slope)
+ */
+ if (slope <= -max_slope-1) {
+ dydx = -max_slope-1;
+ } else if (slope >= max_slope) {
+ dydx = max_slope;
+ } else {
+ dydx = slope;
+ }
+
+ return dydx;
+}
+
+/* (void) = ia_css_ctc2_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate Y parameters from userspace into ISP space
+ */
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ unsigned i, j;
+ const unsigned shffl_blck = 4;
+ const unsigned lenght_zeros = 11;
+ short dydx0, dydx1, dydx2, dydx3, dydx4;
+
+ (void)size;
+ /*
+ * Calculation of slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0
+ */
+ dydx0 = ctc2_slope(from->y_y1, from->y_y0,
+ from->y_x1, 0);
+ dydx1 = ctc2_slope(from->y_y2, from->y_y1,
+ from->y_x2, from->y_x1);
+ dydx2 = ctc2_slope(from->y_y3, from->y_y2,
+ from->y_x3, from->y_x2);
+ dydx3 = ctc2_slope(from->y_y4, from->y_y3,
+ from->y_x4, from->y_x3);
+ dydx4 = ctc2_slope(from->y_y5, from->y_y4,
+ SH_CSS_BAYER_MAXVAL, from->y_x4);
+
+ /*Fill 3 arrays with:
+ * - Luma input gain values y_y0, y_y1, y_y2, y_3, y_y4
+ * - Luma kneepoints 0, y_x1, y_x2, y_x3, y_x4
+ * - Calculated slopes dydx0, dyxd1, dydx2, dydx3, dydx4
+ *
+ * - Each 64-element array is divided in blocks of 16 elements:
+ * the 5 parameters + zeros in the remaining 11 positions
+ * - All blocks of the same array will contain the same data
+ */
+ for (i = 0; i < shffl_blck; i++) {
+ to->y_x[0][(i << shffl_blck)] = 0;
+ to->y_x[0][(i << shffl_blck) + 1] = from->y_x1;
+ to->y_x[0][(i << shffl_blck) + 2] = from->y_x2;
+ to->y_x[0][(i << shffl_blck) + 3] = from->y_x3;
+ to->y_x[0][(i << shffl_blck) + 4] = from->y_x4;
+
+ to->y_y[0][(i << shffl_blck)] = from->y_y0;
+ to->y_y[0][(i << shffl_blck) + 1] = from->y_y1;
+ to->y_y[0][(i << shffl_blck) + 2] = from->y_y2;
+ to->y_y[0][(i << shffl_blck) + 3] = from->y_y3;
+ to->y_y[0][(i << shffl_blck) + 4] = from->y_y4;
+
+ to->e_y_slope[0][(i << shffl_blck)] = dydx0;
+ to->e_y_slope[0][(i << shffl_blck) + 1] = dydx1;
+ to->e_y_slope[0][(i << shffl_blck) + 2] = dydx2;
+ to->e_y_slope[0][(i << shffl_blck) + 3] = dydx3;
+ to->e_y_slope[0][(i << shffl_blck) + 4] = dydx4;
+
+ for (j = 0; j < lenght_zeros; j++) {
+ to->y_x[0][(i << shffl_blck) + 5 + j] = 0;
+ to->y_y[0][(i << shffl_blck) + 5 + j] = 0;
+ to->e_y_slope[0][(i << shffl_blck)+ 5 + j] = 0;
+ }
+ }
+}
+
+/* (void) = ia_css_ctc2_encode(*to, *from)
+ * -----------------------------------------------
+ * DMEM Encode Function to translate UV parameters from userspace into ISP space
+ */
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ to->uv_y0 = from->uv_y0;
+ to->uv_y1 = from->uv_y1;
+ to->uv_x0 = from->uv_x0;
+ to->uv_x1 = from->uv_x1;
+
+ /*Slope Calculation*/
+ to->uv_dydx = ctc2_slope(from->uv_y1, from->uv_y0,
+ from->uv_x1, from->uv_x0);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
new file mode 100644
index 000000000000..3733aee24dcd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2.host.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_HOST_H
+#define __IA_CSS_CTC2_HOST_H
+
+#include "ia_css_ctc2_param.h"
+#include "ia_css_ctc2_types.h"
+
+extern const struct ia_css_ctc2_config default_ctc2_config;
+
+/*Encode Functions to translate parameters from userspace into ISP space*/
+
+void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to,
+ const struct ia_css_ctc2_config *from,
+ size_t size);
+
+void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to,
+ struct ia_css_ctc2_config *from,
+ size_t size);
+
+#endif /* __IA_CSS_CTC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
new file mode 100644
index 000000000000..c66e823618f6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_param.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_PARAM_H
+#define __IA_CSS_CTC2_PARAM_H
+
+#define IA_CSS_CTC_COEF_SHIFT 13
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+/* CTC (Chroma Tone Control)ISP Parameters */
+
+/*VMEM Luma params*/
+struct ia_css_isp_ctc2_vmem_params {
+ /**< Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/
+ VMEM_ARRAY(y_x, ISP_VEC_NELEMS);
+ /** kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/
+ VMEM_ARRAY(y_y, ISP_VEC_NELEMS);
+ /** Slopes of lines interconnecting
+ * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/
+ VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS);
+};
+
+/*DMEM Chroma params*/
+struct ia_css_isp_ctc2_dmem_params {
+
+ /** Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/
+ int32_t uv_y0;
+ int32_t uv_y1;
+
+ /** Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/
+ int32_t uv_x0;
+ int32_t uv_x1;
+
+ /** Slope of line interconnecting uv_x0 -> uv_x1*/
+ int32_t uv_dydx;
+
+};
+#endif /* __IA_CSS_CTC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
new file mode 100644
index 000000000000..7b75f01e2ad2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc2/ia_css_ctc2_types.h
@@ -0,0 +1,55 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC2_TYPES_H
+#define __IA_CSS_CTC2_TYPES_H
+
+/** Chroma Tone Control configuration.
+*
+* ISP block: CTC2 (CTC by polygonal approximation)
+* (ISP1: CTC1 (CTC by look-up table) is used.)
+* ISP2: CTC2 is used.
+* ISP261: CTC2 (CTC by Fast Approximate Distance)
+*/
+struct ia_css_ctc2_config {
+
+ /**< Gains by Y(Luma) at Y =0.0,Y_X1, Y_X2, Y_X3, Y_X4 and Y_X5
+ * --default/ineffective value: 4096(0.5f)
+ */
+ int32_t y_y0;
+ int32_t y_y1;
+ int32_t y_y2;
+ int32_t y_y3;
+ int32_t y_y4;
+ int32_t y_y5;
+ /** 1st-4th kneepoints by Y(Luma) --default/ineffective value:n/a
+ * requirement: 0.0 < y_x1 < y_x2 <y _x3 < y_x4 < 1.0
+ */
+ int32_t y_x1;
+ int32_t y_x2;
+ int32_t y_x3;
+ int32_t y_x4;
+ /** Gains by UV(Chroma) under threholds uv_x0 and uv_x1
+ * --default/ineffective value: 4096(0.5f)
+ */
+ int32_t uv_y0;
+ int32_t uv_y1;
+ /** Minimum and Maximum Thresholds by UV(Chroma)- uv_x0 and uv_x1
+ * --default/ineffective value: n/a
+ */
+ int32_t uv_x0;
+ int32_t uv_x1;
+ };
+
+#endif /* __IA_CSS_CTC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
new file mode 100644
index 000000000000..7c1a367918a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ctc.host.h"
+
+const struct ia_css_ctc_config default_ctc_config = {
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */
+ 1,
+ SH_CSS_BAYER_MAXVAL / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 2 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 3 / 5, /* To be implemented */
+ SH_CSS_BAYER_MAXVAL * 4 / 5, /* To be implemented */
+};
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->ctc, &from->data, sizeof(to->ctc));
+}
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ce_gain_exp=%d, config.y0=%d, "
+ "config.x1=%d, config.y1=%d, "
+ "config.x2=%d, config.y2=%d, "
+ "config.x3=%d, config.y3=%d, "
+ "config.x4=%d, config.y4=%d\n",
+ config->ce_gain_exp, config->y0,
+ config->x1, config->y1,
+ config->x2, config->y2,
+ config->x3, config->y3,
+ config->x4, config->y4);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
new file mode 100644
index 000000000000..bec52a6519f9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_HOST_H
+#define __IA_CSS_CTC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_ctc_param.h"
+#include "ia_css_ctc_table.host.h"
+
+extern const struct ia_css_ctc_config default_ctc_config;
+
+void
+ia_css_ctc_vamem_encode(
+ struct sh_css_isp_ctc_vamem_params *to,
+ const struct ia_css_ctc_table *from,
+ unsigned size);
+
+void
+ia_css_ctc_debug_dtrace(
+ const struct ia_css_ctc_config *config, unsigned level)
+;
+
+#endif /* __IA_CSS_CTC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
new file mode 100644
index 000000000000..6e88ad3d2420
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_param.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_PARAM_H
+#define __IA_CSS_CTC_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#include "ia_css_ctc_types.h"
+
+#ifndef PIPE_GENERATION
+#if defined(HAS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_2_CTC_TABLE_SIZE
+#elif defined(HAS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_CTC_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_CTC_TABLE_SIZE IA_CSS_VAMEM_1_CTC_TABLE_SIZE
+#else
+#error "VAMEM should be {VERSION1, VERSION2}"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_CTC_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_ctc_vamem_params {
+ uint16_t ctc[SH_CSS_ISP_CTC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_CTC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
new file mode 100644
index 000000000000..edf85aba7716
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
@@ -0,0 +1,215 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_ctc_table.host.h"
+
+struct ia_css_ctc_table default_ctc_table;
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = {
+ 0, 384, 837, 957, 1011, 1062, 1083, 1080,
+1078, 1077, 1053, 1039, 1012, 992, 969, 951,
+ 929, 906, 886, 866, 845, 823, 809, 790,
+ 772, 758, 741, 726, 711, 701, 688, 675,
+ 666, 656, 648, 639, 633, 626, 618, 612,
+ 603, 594, 582, 572, 557, 545, 529, 516,
+ 504, 491, 480, 467, 459, 447, 438, 429,
+ 419, 412, 404, 397, 389, 382, 376, 368,
+ 363, 357, 351, 345, 340, 336, 330, 326,
+ 321, 318, 312, 308, 304, 300, 297, 294,
+ 291, 286, 284, 281, 278, 275, 271, 268,
+ 261, 257, 251, 245, 240, 235, 232, 225,
+ 223, 218, 213, 209, 206, 204, 199, 197,
+ 193, 189, 186, 185, 183, 179, 177, 175,
+ 172, 170, 169, 167, 164, 164, 162, 160,
+ 158, 157, 156, 154, 154, 152, 151, 150,
+ 149, 148, 146, 147, 146, 144, 143, 143,
+ 142, 141, 140, 141, 139, 138, 138, 138,
+ 137, 136, 136, 135, 134, 134, 134, 133,
+ 132, 132, 131, 130, 131, 130, 129, 128,
+ 129, 127, 127, 127, 127, 125, 125, 125,
+ 123, 123, 122, 120, 118, 115, 114, 111,
+ 110, 108, 106, 105, 103, 102, 100, 99,
+ 97, 97, 96, 95, 94, 93, 93, 91,
+ 91, 91, 90, 90, 89, 89, 88, 88,
+ 89, 88, 88, 87, 87, 87, 87, 86,
+ 87, 87, 86, 87, 86, 86, 84, 84,
+ 82, 80, 78, 76, 74, 72, 70, 68,
+ 67, 65, 62, 60, 58, 56, 55, 54,
+ 53, 51, 49, 49, 47, 45, 45, 45,
+ 41, 40, 39, 39, 34, 33, 34, 32,
+ 25, 23, 24, 20, 13, 9, 12, 0,
+ 0
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+/* Default Parameters */
+static const uint16_t
+default_ctc_table_data[IA_CSS_VAMEM_1_CTC_TABLE_SIZE] = {
+ 0, 0, 256, 384, 384, 497, 765, 806,
+ 837, 851, 888, 901, 957, 981, 993, 1001,
+ 1011, 1029, 1028, 1039, 1062, 1059, 1073, 1080,
+ 1083, 1085, 1085, 1098, 1080, 1084, 1085, 1093,
+ 1078, 1073, 1070, 1069, 1077, 1066, 1072, 1063,
+ 1053, 1044, 1046, 1053, 1039, 1028, 1025, 1024,
+ 1012, 1013, 1016, 996, 992, 990, 990, 980,
+ 969, 968, 961, 955, 951, 949, 933, 930,
+ 929, 925, 921, 916, 906, 901, 895, 893,
+ 886, 877, 872, 869, 866, 861, 857, 849,
+ 845, 838, 836, 832, 823, 821, 815, 813,
+ 809, 805, 796, 793, 790, 785, 784, 778,
+ 772, 768, 766, 763, 758, 752, 749, 745,
+ 741, 740, 736, 730, 726, 724, 723, 718,
+ 711, 709, 706, 704, 701, 698, 691, 689,
+ 688, 683, 683, 678, 675, 673, 671, 669,
+ 666, 663, 661, 660, 656, 656, 653, 650,
+ 648, 647, 646, 643, 639, 638, 637, 635,
+ 633, 632, 629, 627, 626, 625, 622, 621,
+ 618, 618, 614, 614, 612, 609, 606, 606,
+ 603, 600, 600, 597, 594, 591, 590, 586,
+ 582, 581, 578, 575, 572, 569, 563, 560,
+ 557, 554, 551, 548, 545, 539, 536, 533,
+ 529, 527, 524, 519, 516, 513, 510, 507,
+ 504, 501, 498, 493, 491, 488, 485, 484,
+ 480, 476, 474, 471, 467, 466, 464, 460,
+ 459, 455, 453, 449, 447, 446, 443, 441,
+ 438, 435, 432, 432, 429, 427, 426, 422,
+ 419, 418, 416, 414, 412, 410, 408, 406,
+ 404, 402, 401, 398, 397, 395, 393, 390,
+ 389, 388, 387, 384, 382, 380, 378, 377,
+ 376, 375, 372, 370, 368, 368, 366, 364,
+ 363, 361, 360, 358, 357, 355, 354, 352,
+ 351, 350, 349, 346, 345, 344, 344, 342,
+ 340, 339, 337, 337, 336, 335, 333, 331,
+ 330, 329, 328, 326, 326, 324, 324, 322,
+ 321, 320, 318, 318, 318, 317, 315, 313,
+ 312, 311, 311, 310, 308, 307, 306, 306,
+ 304, 304, 302, 301, 300, 300, 299, 297,
+ 297, 296, 296, 294, 294, 292, 291, 291,
+ 291, 290, 288, 287, 286, 286, 287, 285,
+ 284, 283, 282, 282, 281, 281, 279, 278,
+ 278, 278, 276, 276, 275, 274, 274, 273,
+ 271, 270, 269, 268, 268, 267, 265, 262,
+ 261, 260, 260, 259, 257, 254, 252, 252,
+ 251, 251, 249, 246, 245, 244, 243, 242,
+ 240, 239, 239, 237, 235, 235, 233, 231,
+ 232, 230, 229, 226, 225, 224, 225, 224,
+ 223, 220, 219, 219, 218, 217, 217, 214,
+ 213, 213, 212, 211, 209, 209, 209, 208,
+ 206, 205, 204, 203, 204, 203, 201, 200,
+ 199, 197, 198, 198, 197, 195, 194, 194,
+ 193, 192, 192, 191, 189, 190, 189, 188,
+ 186, 187, 186, 185, 185, 184, 183, 181,
+ 183, 182, 181, 180, 179, 178, 178, 178,
+ 177, 176, 175, 176, 175, 174, 174, 173,
+ 172, 173, 172, 171, 170, 170, 169, 169,
+ 169, 168, 167, 166, 167, 167, 166, 165,
+ 164, 164, 164, 163, 164, 163, 162, 163,
+ 162, 161, 160, 161, 160, 160, 160, 159,
+ 158, 157, 158, 158, 157, 157, 156, 156,
+ 156, 156, 155, 155, 154, 154, 154, 154,
+ 154, 153, 152, 153, 152, 152, 151, 152,
+ 151, 152, 151, 150, 150, 149, 149, 150,
+ 149, 149, 148, 148, 148, 149, 148, 147,
+ 146, 146, 147, 146, 147, 146, 145, 146,
+ 146, 145, 144, 145, 144, 145, 144, 144,
+ 143, 143, 143, 144, 143, 142, 142, 142,
+ 142, 142, 142, 141, 141, 141, 141, 140,
+ 140, 141, 140, 140, 141, 140, 139, 139,
+ 139, 140, 139, 139, 138, 138, 137, 139,
+ 138, 138, 138, 137, 138, 137, 137, 137,
+ 137, 136, 137, 136, 136, 136, 136, 135,
+ 136, 135, 135, 135, 135, 136, 135, 135,
+ 134, 134, 133, 135, 134, 134, 134, 133,
+ 134, 133, 134, 133, 133, 132, 133, 133,
+ 132, 133, 132, 132, 132, 132, 131, 131,
+ 131, 132, 131, 131, 130, 131, 130, 132,
+ 131, 130, 130, 129, 130, 129, 130, 129,
+ 129, 129, 130, 129, 128, 128, 128, 128,
+ 129, 128, 128, 127, 127, 128, 128, 127,
+ 127, 126, 126, 127, 127, 126, 126, 126,
+ 127, 126, 126, 126, 125, 125, 126, 125,
+ 125, 124, 124, 124, 125, 125, 124, 124,
+ 123, 124, 124, 123, 123, 122, 122, 122,
+ 122, 122, 121, 120, 120, 119, 118, 118,
+ 118, 117, 117, 116, 115, 115, 115, 114,
+ 114, 113, 113, 112, 111, 111, 111, 110,
+ 110, 109, 109, 108, 108, 108, 107, 107,
+ 106, 106, 105, 105, 105, 104, 104, 103,
+ 103, 102, 102, 102, 102, 101, 101, 100,
+ 100, 99, 99, 99, 99, 99, 99, 98,
+ 97, 98, 97, 97, 97, 96, 96, 95,
+ 96, 95, 96, 95, 95, 94, 94, 95,
+ 94, 94, 94, 93, 93, 92, 93, 93,
+ 93, 93, 92, 92, 91, 92, 92, 92,
+ 91, 91, 90, 90, 91, 91, 91, 90,
+ 90, 90, 90, 91, 90, 90, 90, 89,
+ 89, 89, 90, 89, 89, 89, 89, 89,
+ 88, 89, 89, 88, 88, 88, 88, 87,
+ 89, 88, 88, 88, 88, 88, 87, 88,
+ 88, 88, 87, 87, 87, 87, 87, 88,
+ 87, 87, 87, 87, 87, 87, 88, 87,
+ 87, 87, 87, 86, 86, 87, 87, 87,
+ 87, 86, 86, 86, 87, 87, 86, 87,
+ 86, 86, 86, 87, 87, 86, 86, 86,
+ 86, 86, 87, 87, 86, 85, 85, 85,
+ 84, 85, 85, 84, 84, 83, 83, 82,
+ 82, 82, 81, 81, 80, 79, 79, 79,
+ 78, 77, 77, 76, 76, 76, 75, 74,
+ 74, 74, 73, 73, 72, 71, 71, 71,
+ 70, 70, 69, 69, 68, 68, 67, 67,
+ 67, 66, 66, 65, 65, 64, 64, 63,
+ 62, 62, 62, 61, 60, 60, 59, 59,
+ 58, 58, 57, 57, 56, 56, 56, 55,
+ 55, 54, 55, 55, 54, 53, 53, 52,
+ 53, 53, 52, 51, 51, 50, 51, 50,
+ 49, 49, 50, 49, 49, 48, 48, 47,
+ 47, 48, 46, 45, 45, 45, 46, 45,
+ 45, 44, 45, 45, 45, 43, 42, 42,
+ 41, 43, 41, 40, 40, 39, 40, 41,
+ 39, 39, 39, 39, 39, 38, 35, 35,
+ 34, 37, 36, 34, 33, 33, 33, 35,
+ 34, 32, 32, 31, 32, 30, 29, 26,
+ 25, 25, 27, 26, 23, 23, 23, 25,
+ 24, 24, 22, 21, 20, 19, 16, 14,
+ 13, 13, 13, 10, 9, 7, 7, 7,
+ 12, 12, 12, 7, 0, 0, 0, 0
+};
+
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_ctc_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data,
+ sizeof(default_ctc_table_data));
+ default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_ctc_table.data.vamem_1, default_ctc_table_data,
+ sizeof(default_ctc_table_data));
+ default_ctc_table.vamem_type = 1IA_CSS_VAMEM_TYPE_1;
+#endif
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
new file mode 100644
index 000000000000..a350dec8b4ad
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_TABLE_HOST_H
+#define __IA_CSS_CTC_TABLE_HOST_H
+
+#include "ia_css_ctc_types.h"
+
+extern struct ia_css_ctc_table default_ctc_table;
+
+void ia_css_config_ctc_table(void);
+
+#endif /* __IA_CSS_CTC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
new file mode 100644
index 000000000000..1da215bb966d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_CTC_TYPES_H
+#define __IA_CSS_CTC_TYPES_H
+
+/** @file
+* CSS-API header file for Chroma Tone Control parameters.
+*/
+
+/** Fractional bits for CTC gain (used only for ISP1).
+ *
+ * IA_CSS_CTC_COEF_SHIFT(=13) includes not only the fractional bits
+ * of gain(=8), but also the bits(=5) to convert chroma
+ * from 13bit precision to 8bit precision.
+ *
+ * Gain (struct ia_css_ctc_table) : u5.8
+ * Input(Chorma) : s0.12 (13bit precision)
+ * Output(Chorma): s0.7 (8bit precision)
+ * Output = (Input * Gain) >> IA_CSS_CTC_COEF_SHIFT
+ */
+#define IA_CSS_CTC_COEF_SHIFT 13
+
+/** Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2 10
+/** Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_1_CTC_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_CTC_TABLE_SIZE_LOG2)
+
+/** Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2 8
+/** Number of elements in the CTC table. */
+#define IA_CSS_VAMEM_2_CTC_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_CTC_TABLE_SIZE_LOG2) + 1)
+
+enum ia_css_vamem_type {
+ IA_CSS_VAMEM_TYPE_1,
+ IA_CSS_VAMEM_TYPE_2
+};
+
+/** Chroma Tone Control configuration.
+ *
+ * ISP block: CTC2 (CTC by polygonal line approximation)
+ * (ISP1: CTC1 (CTC by look-up table) is used.)
+ * ISP2: CTC2 is used.
+ */
+struct ia_css_ctc_config {
+ uint16_t y0; /**< 1st kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t y1; /**< 2nd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t y2; /**< 3rd kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t y3; /**< 4th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t y4; /**< 5th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t y5; /**< 6th kneepoint gain.
+ u[ce_gain_exp].[13-ce_gain_exp], [0,8191],
+ default/ineffective 4096(0.5) */
+ uint16_t ce_gain_exp; /**< Common exponent of y-axis gain.
+ u8.0, [0,13],
+ default/ineffective 1 */
+ uint16_t x1; /**< 2nd kneepoint luma.
+ u0.13, [0,8191], constraints: 0<x1<x2,
+ default/ineffective 1024 */
+ uint16_t x2; /**< 3rd kneepoint luma.
+ u0.13, [0,8191], constraints: x1<x2<x3,
+ default/ineffective 2048 */
+ uint16_t x3; /**< 4th kneepoint luma.
+ u0.13, [0,8191], constraints: x2<x3<x4,
+ default/ineffective 6144 */
+ uint16_t x4; /**< 5tn kneepoint luma.
+ u0.13, [0,8191], constraints: x3<x4<8191,
+ default/ineffective 7168 */
+};
+
+union ia_css_ctc_data {
+ uint16_t vamem_1[IA_CSS_VAMEM_1_CTC_TABLE_SIZE];
+ uint16_t vamem_2[IA_CSS_VAMEM_2_CTC_TABLE_SIZE];
+};
+
+/** CTC table, used for Chroma Tone Control.
+ *
+ * ISP block: CTC1 (CTC by look-up table)
+ * ISP1: CTC1 is used.
+ * (ISP2: CTC2 (CTC by polygonal line approximation) is used.)
+ */
+struct ia_css_ctc_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_ctc_data data;
+};
+
+#endif /* __IA_CSS_CTC_TYPES_H */
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.c
new file mode 100644
index 000000000000..fbab2f1c396c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.c
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "ia_css_de.host.h"
+
+const struct ia_css_de_config default_de_config = {
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->pixelnoise =
+ uDIGIT_FITTING(from->pixelnoise, 16, SH_CSS_BAYER_BITS);
+ to->c1_coring_threshold =
+ uDIGIT_FITTING(from->c1_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+ to->c2_coring_threshold =
+ uDIGIT_FITTING(from->c2_coring_threshold, 16,
+ SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned level)
+{
+ if (!de) return;
+ ia_css_debug_dtrace(level, "Demosaic:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_pixelnoise", de->pixelnoise);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c1_coring_threshold",
+ de->c1_coring_threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "de_c2_coring_threshold",
+ de->c2_coring_threshold);
+}
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.pixelnoise=%d, "
+ "config.c1_coring_threshold=%d, config.c2_coring_threshold=%d\n",
+ config->pixelnoise,
+ config->c1_coring_threshold, config->c2_coring_threshold);
+}
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h
new file mode 100644
index 000000000000..5dd6f06f2bf1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de.host.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_HOST_H
+#define __IA_CSS_DE_HOST_H
+
+#include "ia_css_de_types.h"
+#include "ia_css_de_param.h"
+
+extern const struct ia_css_de_config default_de_config;
+
+void
+ia_css_de_encode(
+ struct sh_css_isp_de_params *to,
+ const struct ia_css_de_config *from,
+ unsigned size);
+
+void
+ia_css_de_dump(
+ const struct sh_css_isp_de_params *de,
+ unsigned level);
+
+void
+ia_css_de_debug_dtrace(
+ const struct ia_css_de_config *config,
+ unsigned level);
+
+void
+ia_css_init_de_state(
+ void/*struct sh_css_isp_de_vmem_state*/ *state,
+ size_t size);
+
+#endif /* __IA_CSS_DE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h
new file mode 100644
index 000000000000..833c80afc7a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_param.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_PARAM_H
+#define __IA_CSS_DE_PARAM_H
+
+#include "type_support.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_de_params {
+ int32_t pixelnoise;
+ int32_t c1_coring_threshold;
+ int32_t c2_coring_threshold;
+};
+
+#endif /* __IA_CSS_DE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h
new file mode 100644
index 000000000000..d64511763436
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_STATE_H
+#define __IA_CSS_DE_STATE_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_de_vmem_state {
+ VMEM_ARRAY(de_buf[4], MAX_VECTORS_PER_BUF_LINE*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_DE_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
new file mode 100644
index 000000000000..525c838d5a99
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_1.0/ia_css_de_types.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE_TYPES_H
+#define __IA_CSS_DE_TYPES_H
+
+/** @file
+* CSS-API header file for Demosaic (bayer-to-YCgCo) parameters.
+*/
+
+/** Demosaic (bayer-to-YCgCo) configuration.
+ *
+ * ISP block: DE1
+ * ISP1: DE1 is used.
+ * (ISP2: DE2 is used.)
+ */
+struct ia_css_de_config {
+ ia_css_u0_16 pixelnoise; /**< Pixel noise used in moire elimination.
+ u0.16, [0,65535],
+ default 0, ineffective 0 */
+ ia_css_u0_16 c1_coring_threshold; /**< Coring threshold for C1.
+ This is the same as nr_config.threshold_cb.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+ ia_css_u0_16 c2_coring_threshold; /**< Coring threshold for C2.
+ This is the same as nr_config.threshold_cr.
+ u0.16, [0,65535],
+ default 128(0.001953125), ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c
new file mode 100644
index 000000000000..a5247a57bafb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.c
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+
+#include "ia_css_de2.host.h"
+
+const struct ia_css_ecd_config default_ecd_config = {
+ (1 << (ISP_VEC_ELEMBITS - 1)) * 2 / 3, /* 2/3 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1.0 */
+ 0, /* 0.0 */
+};
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->zip_strength = from->zip_strength;
+ to->fc_strength = from->fc_strength;
+ to->fc_debias = from->fc_debias;
+}
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.zip_strength=%d, "
+ "config.fc_strength=%d, config.fc_debias=%d\n",
+ config->zip_strength,
+ config->fc_strength, config->fc_debias);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h
new file mode 100644
index 000000000000..f7cd8448cb30
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2.host.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_HOST_H
+#define __IA_CSS_DE2_HOST_H
+
+#include "ia_css_de2_types.h"
+#include "ia_css_de2_param.h"
+
+extern const struct ia_css_ecd_config default_ecd_config;
+
+void
+ia_css_ecd_encode(
+ struct sh_css_isp_ecd_params *to,
+ const struct ia_css_ecd_config *from,
+ unsigned size);
+
+void
+ia_css_ecd_dump(
+ const struct sh_css_isp_ecd_params *ecd,
+ unsigned level);
+
+void
+ia_css_ecd_debug_dtrace(
+ const struct ia_css_ecd_config *config, unsigned level);
+
+#endif /* __IA_CSS_DE2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h
new file mode 100644
index 000000000000..ea2da73a4927
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_PARAM_H
+#define __IA_CSS_DE2_PARAM_H
+
+#include "type_support.h"
+
+/* Reuse DE1 params and extend them */
+#include "../de_1.0/ia_css_de_param.h"
+
+/* DE (Demosaic) */
+struct sh_css_isp_ecd_params {
+ int32_t zip_strength;
+ int32_t fc_strength;
+ int32_t fc_debias;
+};
+
+#endif /* __IA_CSS_DE2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
new file mode 100644
index 000000000000..eac1b2779857
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de2_types.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_TYPES_H
+#define __IA_CSS_DE2_TYPES_H
+
+/** @file
+* CSS-API header file for Demosaicing parameters.
+*/
+
+/** Eigen Color Demosaicing configuration.
+ *
+ * ISP block: DE2
+ * (ISP1: DE1 is used.)
+ * ISP2: DE2 is used.
+ */
+struct ia_css_ecd_config {
+ uint16_t zip_strength; /**< Strength of zipper reduction.
+ u0.13, [0,8191],
+ default 5489(0.67), ineffective 0 */
+ uint16_t fc_strength; /**< Strength of false color reduction.
+ u0.13, [0,8191],
+ default 8191(almost 1.0), ineffective 0 */
+ uint16_t fc_debias; /**< Prevent color change
+ on noise or Gr/Gb imbalance.
+ u0.13, [0,8191],
+ default 0, ineffective 0 */
+};
+
+#endif /* __IA_CSS_DE2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h
new file mode 100644
index 000000000000..59af9523604d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DEX_PARAM_H
+#define __IA_CSS_DEX_PARAM_H
+
+#include "ia_css_de2_param.h"
+
+#endif /* __IA_CSS_DEX_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h
new file mode 100644
index 000000000000..f2c65ba58983
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/de/de_2/ia_css_de_state.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DE2_STATE_H
+#define __IA_CSS_DE2_STATE_H
+
+/* Reuse DE1 states */
+#include "../de_1.0/ia_css_de_state.h"
+
+#endif /* __IA_CSS_DE2_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
new file mode 100644
index 000000000000..b1f9dc8d662d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
@@ -0,0 +1,132 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_dp.host.h"
+
+#ifdef ISP2401
+/* We use a different set of DPC configuration parameters when
+ * DPC is used before OBC and NORM. Currently these parameters
+ * are used in usecases which selects both BDS and DPC.
+ **/
+const struct ia_css_dp_config default_dp_10bpp_config = {
+ 1024,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+#endif
+const struct ia_css_dp_config default_dp_config = {
+ 8192,
+ 2048,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned size)
+{
+ int gain = from->gain;
+ int gr = from->gr;
+ int r = from->r;
+ int b = from->b;
+ int gb = from->gb;
+
+ (void)size;
+ to->threshold_single =
+ SH_CSS_BAYER_MAXVAL;
+ to->threshold_2adjacent =
+ uDIGIT_FITTING(from->threshold, 16, SH_CSS_BAYER_BITS);
+ to->gain =
+ uDIGIT_FITTING(from->gain, 8, SH_CSS_DP_GAIN_SHIFT);
+
+ to->coef_rr_gr =
+ uDIGIT_FITTING (gain * gr / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_rr_gb =
+ uDIGIT_FITTING (gain * gb / r, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gb =
+ uDIGIT_FITTING (gain * gb / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_bb_gr =
+ uDIGIT_FITTING (gain * gr / b, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_rr =
+ uDIGIT_FITTING (gain * r / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gr_bb =
+ uDIGIT_FITTING (gain * b / gr, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_bb =
+ uDIGIT_FITTING (gain * b / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+ to->coef_gb_rr =
+ uDIGIT_FITTING (gain * r / gb, 8, SH_CSS_DP_GAIN_SHIFT);
+}
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned level)
+{
+ if (!dp) return;
+ ia_css_debug_dtrace(level, "Defect Pixel Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_single_w_2adj_on",
+ dp->threshold_single);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_threshold_2adj_w_2adj_on",
+ dp->threshold_2adjacent);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dp_gain", dp->gain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gr", dp->coef_rr_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_rr_gb", dp->coef_rr_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gb", dp->coef_bb_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_bb_gr", dp->coef_bb_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_rr", dp->coef_gr_rr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gr_bb", dp->coef_gr_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_bb", dp->coef_gb_bb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "dpc_coef_gb_rr", dp->coef_gb_rr);
+}
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d\n",
+ config->threshold, config->gain);
+}
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
new file mode 100644
index 000000000000..db21814ad3db
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_HOST_H
+#define __IA_CSS_DP_HOST_H
+
+#include "ia_css_dp_types.h"
+#include "ia_css_dp_param.h"
+
+extern const struct ia_css_dp_config default_dp_config;
+#ifdef ISP2401
+extern const struct ia_css_dp_config default_dp_10bpp_config;
+#endif
+
+void
+ia_css_dp_encode(
+ struct sh_css_isp_dp_params *to,
+ const struct ia_css_dp_config *from,
+ unsigned size);
+
+void
+ia_css_dp_dump(
+ const struct sh_css_isp_dp_params *dp,
+ unsigned level);
+
+void
+ia_css_dp_debug_dtrace(
+ const struct ia_css_dp_config *config,
+ unsigned level);
+
+void
+ia_css_init_dp_state(
+ void/*struct sh_css_isp_dp_vmem_state*/ *state,
+ size_t size);
+
+#endif /* __IA_CSS_DP_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
new file mode 100644
index 000000000000..fc9035a98d92
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_PARAM_H
+#define __IA_CSS_DP_PARAM_H
+
+#include "type_support.h"
+#include "bnr/bnr_1.0/ia_css_bnr_param.h"
+
+/* DP (Defect Pixel Correction) */
+struct sh_css_isp_dp_params {
+ int32_t threshold_single;
+ int32_t threshold_2adjacent;
+ int32_t gain;
+ int32_t coef_rr_gr;
+ int32_t coef_rr_gb;
+ int32_t coef_bb_gb;
+ int32_t coef_bb_gr;
+ int32_t coef_gr_rr;
+ int32_t coef_gr_bb;
+ int32_t coef_gb_bb;
+ int32_t coef_gb_rr;
+};
+
+#endif /* __IA_CSS_DP_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_state.h
new file mode 100644
index 000000000000..f832b3697908
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_state.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_STATE_H
+#define __IA_CSS_DP_STATE_H
+
+#include "type_support.h"
+
+#include "vmem.h"
+#ifndef ISP2401
+#if NEED_BDS_OTHER_THAN_1_00
+#else
+#if ENABLE_FIXED_BAYER_DS
+#endif
+#define MAX_VECTORS_PER_DP_LINE MAX_VECTORS_PER_BUF_INPUT_LINE
+#else
+#define MAX_VECTORS_PER_DP_LINE MAX_VECTORS_PER_BUF_LINE
+#endif
+
+/* DP (Defect Pixel Correction) */
+struct sh_css_isp_dp_vmem_state {
+ VMEM_ARRAY(dp_buf[4], MAX_VECTORS_PER_DP_LINE*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_DP_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
new file mode 100644
index 000000000000..b5d7b6b175b6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dp/dp_1.0/ia_css_dp_types.h
@@ -0,0 +1,50 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DP_TYPES_H
+#define __IA_CSS_DP_TYPES_H
+
+/** @file
+* CSS-API header file for Defect Pixel Correction (DPC) parameters.
+*/
+
+
+/** Defect Pixel Correction configuration.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ */
+struct ia_css_dp_config {
+ ia_css_u0_16 threshold; /**< The threshold of defect pixel correction,
+ representing the permissible difference of
+ intensity between one pixel and its
+ surrounding pixels. Smaller values result
+ in more frequent pixel corrections.
+ u0.16, [0,65535],
+ default 8192, ineffective 65535 */
+ ia_css_u8_8 gain; /**< The sensitivity of mis-correction. ISP will
+ miss a lot of defects if the value is set
+ too large.
+ u8.8, [0,65535],
+ default 4096, ineffective 65535 */
+ uint32_t gr; /* unsigned <integer_bits>.<16-integer_bits> */
+ uint32_t r; /* unsigned <integer_bits>.<16-integer_bits> */
+ uint32_t b; /* unsigned <integer_bits>.<16-integer_bits> */
+ uint32_t gb; /* unsigned <integer_bits>.<16-integer_bits> */
+};
+
+#endif /* __IA_CSS_DP_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c
new file mode 100644
index 000000000000..bc14b85cf952
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.c
@@ -0,0 +1,65 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_dpc2.host.h"
+#include "assert_support.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size)
+{
+ (void)size;
+
+ assert ((from->metric1 >= 0) && (from->metric1 <= METRIC1_ONE_FP));
+ assert ((from->metric3 >= 0) && (from->metric3 <= METRIC3_ONE_FP));
+ assert ((from->metric2 >= METRIC2_ONE_FP) &&
+ (from->metric2 < 256*METRIC2_ONE_FP));
+ assert ((from->wb_gain_gr > 0) && (from->wb_gain_gr < 16*WBGAIN_ONE_FP));
+ assert ((from->wb_gain_r > 0) && (from->wb_gain_r < 16*WBGAIN_ONE_FP));
+ assert ((from->wb_gain_b > 0) && (from->wb_gain_b < 16*WBGAIN_ONE_FP));
+ assert ((from->wb_gain_gb > 0) && (from->wb_gain_gb < 16*WBGAIN_ONE_FP));
+
+ to->metric1 = from->metric1;
+ to->metric2 = from->metric2;
+ to->metric3 = from->metric3;
+
+ to->wb_gain_gr = from->wb_gain_gr;
+ to->wb_gain_r = from->wb_gain_r;
+ to->wb_gain_b = from->wb_gain_b;
+ to->wb_gain_gb = from->wb_gain_gb;
+}
+
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size)
+{
+ (void)state;
+ (void)size;
+}
+
+#ifndef IA_CSS_NO_DEBUG
+/* TODO: AM: This needs a proper implementation. */
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h
new file mode 100644
index 000000000000..641564b4af8e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2.host.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_HOST_H
+#define __IA_CSS_DPC2_HOST_H
+
+#include "ia_css_dpc2_types.h"
+#include "ia_css_dpc2_param.h"
+#include "ia_css_dpc2_default.host.h"
+
+void
+ia_css_dpc2_encode(
+ struct ia_css_isp_dpc2_params *to,
+ const struct ia_css_dpc2_config *from,
+ size_t size);
+
+void
+ia_css_init_dpc2_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_dpc2_debug_dtrace(
+ const struct ia_css_dpc2_config *config,
+ unsigned level);
+#endif
+
+#endif /* __IA_CSS_DPC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.c
new file mode 100644
index 000000000000..c102601cc635
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.c
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_dpc2_types.h"
+
+const struct ia_css_dpc2_config default_dpc2_config = {
+ .metric1 = 1638,
+ .metric2 = 128,
+ .metric3 = 1638,
+ .wb_gain_gr = 512,
+ .wb_gain_r = 512,
+ .wb_gain_b = 512,
+ .wb_gain_gb = 512
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.h
new file mode 100644
index 000000000000..a1527ce3eddc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_default.host.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_DEFAULT_HOST_H
+#define __IA_CSS_DPC2_DEFAULT_HOST_H
+
+#include "ia_css_dpc2_types.h"
+
+extern const struct ia_css_dpc2_config default_dpc2_config;
+
+#endif /* __IA_CSS_DPC2_DEFAULT_HOST_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h
new file mode 100644
index 000000000000..ef668d54fe16
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_param.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_PARAM_H
+#define __IA_CSS_DPC2_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* ToDo: Move this to testsetup */
+#define MAX_FRAME_SIMDWIDTH 30
+
+/* 3 lines state per color plane input_line_state */
+#define DPC2_STATE_INPUT_BUFFER_HEIGHT (3 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane for local deviation state*/
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_HEIGHT (1 * NUM_PLANES)
+/* Each plane has width equal to half frame line */
+#define DPC2_STATE_LOCAL_DEVIATION_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* MINMAX state buffer stores 1 full input line (GR-R color line) */
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT 1
+#define DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH MAX_FRAME_SIMDWIDTH
+
+
+struct ia_css_isp_dpc2_params {
+ int32_t metric1;
+ int32_t metric2;
+ int32_t metric3;
+ int32_t wb_gain_gr;
+ int32_t wb_gain_r;
+ int32_t wb_gain_b;
+ int32_t wb_gain_gb;
+};
+
+#endif /* __IA_CSS_DPC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_state.h
new file mode 100644
index 000000000000..cbf1e81e83a6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_state.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_STATE_H
+#define __IA_CSS_DPC2_STATE_H
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+#include "ia_css_dpc2_param.h"
+
+struct sh_css_isp_dpc2_vmem_state {
+ VMEM_ARRAY(dpc2_input_lines[DPC2_STATE_INPUT_BUFFER_HEIGHT], DPC2_STATE_INPUT_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(dpc2_local_deviations[DPC2_STATE_LOCAL_DEVIATION_BUFFER_HEIGHT], DPC2_STATE_LOCAL_DEVIATION_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(dpc2_second_min[DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT], DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(dpc2_second_max[DPC2_STATE_SECOND_MINMAX_BUFFER_HEIGHT], DPC2_STATE_SECOND_MINMAX_BUFFER_WIDTH*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_DPC2_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
new file mode 100644
index 000000000000..b2c974196ce8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dpc2/ia_css_dpc2_types.h
@@ -0,0 +1,59 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DPC2_TYPES_H
+#define __IA_CSS_DPC2_TYPES_H
+
+/** @file
+* CSS-API header file for Defect Pixel Correction 2 (DPC2) parameters.
+*/
+
+#include "type_support.h"
+
+/**@{*/
+/** Floating point constants for different metrics. */
+#define METRIC1_ONE_FP (1<<12)
+#define METRIC2_ONE_FP (1<<5)
+#define METRIC3_ONE_FP (1<<12)
+#define WBGAIN_ONE_FP (1<<9)
+/**@}*/
+
+/**@{*/
+/** Defect Pixel Correction 2 configuration.
+ *
+ * \brief DPC2 public parameters.
+ * \details Struct with all parameters for the Defect Pixel Correction 2
+ * kernel that can be set from the CSS API.
+ *
+ * ISP block: DPC1 (DPC after WB)
+ * DPC2 (DPC before WB)
+ * ISP1: DPC1 is used.
+ * ISP2: DPC2 is used.
+ *
+ */
+struct ia_css_dpc2_config {
+ /**@{*/
+ int32_t metric1;
+ int32_t metric2;
+ int32_t metric3;
+ int32_t wb_gain_gr;
+ int32_t wb_gain_r;
+ int32_t wb_gain_b;
+ int32_t wb_gain_gb;
+ /**@}*/
+};
+/**@}*/
+
+#endif /* __IA_CSS_DPC2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
new file mode 100644
index 000000000000..955adc4d6ab0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
@@ -0,0 +1,306 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame_public.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "ia_css_types.h"
+#include "ia_css_host_data.h"
+#include "sh_css_param_dvs.h"
+#include "sh_css_params.h"
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "memory_access.h"
+#include "assert_support.h"
+
+#include "ia_css_dvs.host.h"
+
+static const struct ia_css_dvs_configuration default_config = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned size)
+{
+ (void)size;
+ to->num_horizontal_blocks =
+ DVS_NUM_BLOCKS_X(from->info->res.width);
+ to->num_vertical_blocks =
+ DVS_NUM_BLOCKS_Y(from->info->res.height);
+}
+
+void
+ia_css_dvs_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_dvs_configuration config = default_config;
+
+ config.info = info;
+
+ ia_css_configure_dvs(binary, &config);
+}
+
+static void
+convert_coords_to_ispparams(
+ struct ia_css_host_data *gdc_warp_table,
+ const struct ia_css_dvs_6axis_config *config,
+ unsigned int i_stride,
+ unsigned int o_width,
+ unsigned int o_height,
+ unsigned int uv_flag)
+{
+ unsigned int i, j;
+#ifndef ISP2401
+ /* Coverity CID 298073 - initialize */
+#endif
+ gdc_warp_param_mem_t s = { 0 };
+ unsigned int x00, x01, x10, x11,
+ y00, y01, y10, y11;
+
+ unsigned int xmin, ymin, xmax, ymax;
+ unsigned int topleft_x, topleft_y, bottom_x, bottom_y,
+ topleft_x_frac, topleft_y_frac;
+ unsigned int dvs_interp_envelope = (DVS_GDC_INTERP_METHOD == HRT_GDC_BLI_MODE ?
+ DVS_GDC_BLI_INTERP_ENVELOPE : DVS_GDC_BCI_INTERP_ENVELOPE);
+
+ /* number of blocks per height and width */
+ unsigned int num_blocks_y = (uv_flag ? DVS_NUM_BLOCKS_Y_CHROMA(o_height) : DVS_NUM_BLOCKS_Y(o_height) );
+ unsigned int num_blocks_x = (uv_flag ? DVS_NUM_BLOCKS_X_CHROMA(o_width) : DVS_NUM_BLOCKS_X(o_width) ); // round num_x up to blockdim_x, if it concerns the Y0Y1 block (uv_flag==0) round up to even
+
+
+ unsigned int in_stride = i_stride * DVS_INPUT_BYTES_PER_PIXEL;
+ unsigned width, height;
+ unsigned int *xbuff = NULL;
+ unsigned int *ybuff = NULL;
+ struct gdc_warp_param_mem_s *ptr;
+
+ assert(config != NULL);
+ assert(gdc_warp_table != NULL);
+ assert(gdc_warp_table->address != NULL);
+
+ ptr = (struct gdc_warp_param_mem_s *)gdc_warp_table->address;
+
+ ptr += (2 * uv_flag); /* format is Y0 Y1 UV, so UV starts at 3rd position */
+
+ if(uv_flag == 0)
+ {
+ xbuff = config->xcoords_y;
+ ybuff = config->ycoords_y;
+ width = config->width_y;
+ height = config->height_y;
+ }
+ else
+ {
+ xbuff = config->xcoords_uv;
+ ybuff = config->ycoords_uv;
+ width = config->width_uv;
+ height = config->height_uv;
+ }
+
+ IA_CSS_LOG("blockdim_x %d blockdim_y %d",
+ DVS_BLOCKDIM_X, DVS_BLOCKDIM_Y_LUMA >> uv_flag);
+ IA_CSS_LOG("num_blocks_x %d num_blocks_y %d", num_blocks_x,num_blocks_y);
+ IA_CSS_LOG("width %d height %d", width, height);
+
+ assert(width == num_blocks_x + 1); // the width and height of the provided morphing table should be 1 more than the number of blocks
+ assert(height == num_blocks_y + 1);
+
+ for (j = 0; j < num_blocks_y; j++) {
+ for (i = 0; i < num_blocks_x; i++) {
+
+ x00 = xbuff[j * width + i];
+ x01 = xbuff[j * width + (i+1)];
+ x10 = xbuff[(j+1) * width + i];
+ x11 = xbuff[(j+1) * width + (i+1)];
+
+ y00 = ybuff[j * width + i];
+ y01 = ybuff[j * width + (i+1)];
+ y10 = ybuff[(j+1) * width + i];
+ y11 = ybuff[(j+1) * width + (i+1)];
+
+ xmin = min(x00, x10);
+ xmax = max(x01, x11);
+ ymin = min(y00, y01);
+ ymax = max(y10, y11);
+
+ /* Assert that right column's X is greater */
+ assert ( x01 >= xmin);
+ assert ( x11 >= xmin);
+ /* Assert that bottom row's Y is greater */
+ assert ( y10 >= ymin);
+ assert ( y11 >= ymin);
+
+ topleft_y = ymin >> DVS_COORD_FRAC_BITS;
+ topleft_x = ((xmin >> DVS_COORD_FRAC_BITS)
+ >> XMEM_ALIGN_LOG2)
+ << (XMEM_ALIGN_LOG2);
+ s.in_addr_offset = topleft_y * in_stride + topleft_x;
+
+ /* similar to topleft_y calculation, but round up if ymax
+ * has any fraction bits */
+ bottom_y = CEIL_DIV(ymax, 1 << DVS_COORD_FRAC_BITS);
+ s.in_block_height = bottom_y - topleft_y + dvs_interp_envelope;
+
+ bottom_x = CEIL_DIV(xmax, 1 << DVS_COORD_FRAC_BITS);
+ s.in_block_width = bottom_x - topleft_x + dvs_interp_envelope;
+
+ topleft_x_frac = topleft_x << (DVS_COORD_FRAC_BITS);
+ topleft_y_frac = topleft_y << (DVS_COORD_FRAC_BITS);
+
+ s.p0_x = x00 - topleft_x_frac;
+ s.p1_x = x01 - topleft_x_frac;
+ s.p2_x = x10 - topleft_x_frac;
+ s.p3_x = x11 - topleft_x_frac;
+
+ s.p0_y = y00 - topleft_y_frac;
+ s.p1_y = y01 - topleft_y_frac;
+ s.p2_y = y10 - topleft_y_frac;
+ s.p3_y = y11 - topleft_y_frac;
+
+ // block should fit within the boundingbox.
+ assert(s.p0_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p1_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p2_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p3_x < (s.in_block_width << DVS_COORD_FRAC_BITS));
+ assert(s.p0_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p1_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p2_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+ assert(s.p3_y < (s.in_block_height << DVS_COORD_FRAC_BITS));
+
+ // block size should be greater than zero.
+ assert(s.p0_x < s.p1_x);
+ assert(s.p2_x < s.p3_x);
+ assert(s.p0_y < s.p2_y);
+ assert(s.p1_y < s.p3_y);
+
+#if 0
+ printf("j: %d\ti:%d\n", j, i);
+ printf("offset: %d\n", s.in_addr_offset);
+ printf("p0_x: %d\n", s.p0_x);
+ printf("p0_y: %d\n", s.p0_y);
+ printf("p1_x: %d\n", s.p1_x);
+ printf("p1_y: %d\n", s.p1_y);
+ printf("p2_x: %d\n", s.p2_x);
+ printf("p2_y: %d\n", s.p2_y);
+ printf("p3_x: %d\n", s.p3_x);
+ printf("p3_y: %d\n", s.p3_y);
+
+ printf("p0_x_nofrac[0]: %d\n", s.p0_x>>DVS_COORD_FRAC_BITS);
+ printf("p0_y_nofrac[1]: %d\n", s.p0_y>>DVS_COORD_FRAC_BITS);
+ printf("p1_x_nofrac[2]: %d\n", s.p1_x>>DVS_COORD_FRAC_BITS);
+ printf("p1_y_nofrac[3]: %d\n", s.p1_y>>DVS_COORD_FRAC_BITS);
+ printf("p2_x_nofrac[0]: %d\n", s.p2_x>>DVS_COORD_FRAC_BITS);
+ printf("p2_y_nofrac[1]: %d\n", s.p2_y>>DVS_COORD_FRAC_BITS);
+ printf("p3_x_nofrac[2]: %d\n", s.p3_x>>DVS_COORD_FRAC_BITS);
+ printf("p3_y_nofrac[3]: %d\n", s.p3_y>>DVS_COORD_FRAC_BITS);
+ printf("\n");
+#endif
+
+ *ptr = s;
+
+ // storage format:
+ // Y0 Y1 UV0 Y2 Y3 UV1
+ /* if uv_flag equals true increment with 2 incase x is odd, this to
+ skip the uv position. */
+ if (uv_flag)
+ ptr += 3;
+ else
+ ptr += (1 + (i&1));
+ }
+ }
+}
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info)
+{
+ unsigned int i_stride;
+ unsigned int o_width;
+ unsigned int o_height;
+ struct ia_css_host_data *me;
+ struct gdc_warp_param_mem_s *isp_data_ptr;
+
+ assert(binary != NULL);
+ assert(dvs_6axis_config != NULL);
+ assert(dvs_in_frame_info != NULL);
+
+ me = ia_css_host_data_allocate((size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3));
+
+ if (!me)
+ return NULL;
+
+ /*DVS only supports input frame of YUV420 or NV12. Fail for all other cases*/
+ assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12)
+ || (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420));
+
+ isp_data_ptr = (struct gdc_warp_param_mem_s *)me->address;
+
+ i_stride = dvs_in_frame_info->padded_width;
+
+ o_width = binary->out_frame_info[0].res.width;
+ o_height = binary->out_frame_info[0].res.height;
+
+ /* Y plane */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width, o_height, 0);
+
+ if (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420) {
+ /*YUV420 has half the stride for U/V plane*/
+ i_stride /=2;
+ }
+
+ /* UV plane (packed inside the y plane) */
+ convert_coords_to_ispparams(me, dvs_6axis_config,
+ i_stride, o_width/2, o_height/2, 1);
+
+ return me;
+}
+
+enum ia_css_err
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ hrt_vaddress ddr_addr_y)
+{
+
+ struct ia_css_host_data *me;
+ assert(dvs_6axis_config != NULL);
+ assert(ddr_addr_y != mmgr_NULL);
+ assert(dvs_in_frame_info != NULL);
+
+ me = convert_allocate_dvs_6axis_config(dvs_6axis_config,
+ binary,
+ dvs_in_frame_info);
+
+ if (!me) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ ia_css_params_store_ia_css_host_data(
+ ddr_addr_y,
+ me);
+ ia_css_host_data_free(me);
+
+ return IA_CSS_SUCCESS;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
new file mode 100644
index 000000000000..2f513e29d88c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_HOST_H
+#define __IA_CSS_DVS_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "sh_css_params.h"
+
+#include "ia_css_types.h"
+#include "ia_css_dvs_types.h"
+#include "ia_css_dvs_param.h"
+
+/* For bilinear interpolation, we need to add +1 to input block height calculation.
+ * For bicubic interpolation, we will need to add +3 instaed */
+#define DVS_GDC_BLI_INTERP_ENVELOPE 1
+#define DVS_GDC_BCI_INTERP_ENVELOPE 3
+
+void
+ia_css_dvs_config(
+ struct sh_css_isp_dvs_isp_config *to,
+ const struct ia_css_dvs_configuration *from,
+ unsigned size);
+
+void
+ia_css_dvs_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+convert_dvs_6axis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_binary *binary);
+
+struct ia_css_host_data *
+convert_allocate_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info);
+
+enum ia_css_err
+store_dvs_6axis_config(
+ const struct ia_css_dvs_6axis_config *dvs_6axis_config,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *dvs_in_frame_info,
+ hrt_vaddress ddr_addr_y);
+
+#endif /* __IA_CSS_DVS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
new file mode 100644
index 000000000000..4d0abfe4d0fd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_param.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_PARAM_H
+#define __IA_CSS_DVS_PARAM_H
+
+#include <type_support.h>
+#ifdef ISP2401
+
+#if !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) && !defined(PARAMBIN_GENERATION)
+#endif
+#include "dma.h"
+#ifdef ISP2401
+#endif /* !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) */
+
+#endif
+#include "uds/uds_1.0/ia_css_uds_param.h"
+
+#ifdef ISP2401
+
+#endif
+/** dvserence frame */
+struct sh_css_isp_dvs_isp_config {
+ uint32_t num_horizontal_blocks;
+ uint32_t num_vertical_blocks;
+};
+
+#endif /* __IA_CSS_DVS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
new file mode 100644
index 000000000000..216c54a21ea5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/dvs/dvs_1.0/ia_css_dvs_types.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_DVS_TYPES_H
+#define __IA_CSS_DVS_TYPES_H
+
+/** DVS frame
+ *
+ * ISP block: dvs frame
+ */
+
+#include "ia_css_frame_public.h"
+
+struct ia_css_dvs_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_DVS_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c
new file mode 100644
index 000000000000..682f8b709ff9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.c
@@ -0,0 +1,321 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+
+#include "type_support.h"
+#include "assert_support.h"
+#include "math_support.h" /* for min and max */
+
+#include "ia_css_eed1_8.host.h"
+
+/* WARNING1: Number of inv points should be less or equal to 16,
+ * due to implementation limitation. See kernel design document
+ * for more details.
+ * WARNING2: Do not modify the number of inv points without correcting
+ * the EED1_8 kernel implementation assumptions.
+ */
+#define NUMBER_OF_CHGRINV_POINTS 15
+#define NUMBER_OF_TCINV_POINTS 9
+#define NUMBER_OF_FCINV_POINTS 9
+
+const int16_t chgrinv_x[NUMBER_OF_CHGRINV_POINTS] = {
+0, 16, 64, 144, 272, 448, 672, 976,
+1376, 1888, 2528, 3312, 4256, 5376, 6688};
+
+const int16_t chgrinv_a[NUMBER_OF_CHGRINV_POINTS] = {
+-7171, -256, -29, -3456, -1071, -475, -189, -102,
+-48, -38, -10, -9, -7, -6, 0};
+
+const int16_t chgrinv_b[NUMBER_OF_CHGRINV_POINTS] = {
+8191, 1021, 256, 114, 60, 37, 24, 17,
+12, 9, 6, 5, 4, 3, 2};
+
+const int16_t chgrinv_c[NUMBER_OF_CHGRINV_POINTS] = {
+1, 1, 1, 0, 0, 0, 0, 0,
+0, 0, 0, 0, 0, 0, 0};
+
+const int16_t tcinv_x[NUMBER_OF_TCINV_POINTS] = {
+0, 4, 11, 23, 42, 68, 102, 148, 205};
+
+const int16_t tcinv_a[NUMBER_OF_TCINV_POINTS] = {
+-6364, -631, -126, -34, -13, -6, -4452, -2156, 0};
+
+const int16_t tcinv_b[NUMBER_OF_TCINV_POINTS] = {
+8191, 1828, 726, 352, 197, 121, 80, 55, 40};
+
+const int16_t tcinv_c[NUMBER_OF_TCINV_POINTS] = {
+1, 1, 1, 1, 1, 1, 0, 0, 0};
+
+const int16_t fcinv_x[NUMBER_OF_FCINV_POINTS] = {
+0, 80, 216, 456, 824, 1344, 2040, 2952, 4096};
+
+const int16_t fcinv_a[NUMBER_OF_FCINV_POINTS] = {
+-5244, -486, -86, -2849, -961, -400, -180, -86, 0};
+
+const int16_t fcinv_b[NUMBER_OF_FCINV_POINTS] = {
+8191, 1637, 607, 287, 159, 98, 64, 44, 32};
+
+const int16_t fcinv_c[NUMBER_OF_FCINV_POINTS] = {
+1, 1, 1, 0, 0, 0, 0, 0, 0};
+
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ unsigned i, j, base;
+ const unsigned total_blocks = 4;
+ const unsigned shuffle_block = 16;
+
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->e_dew_enh_x[0][i] = 0;
+ to->e_dew_enh_y[0][i] = 0;
+ to->e_dew_enh_a[0][i] = 0;
+ to->e_dew_enh_f[0][i] = 0;
+ to->chgrinv_x[0][i] = 0;
+ to->chgrinv_a[0][i] = 0;
+ to->chgrinv_b[0][i] = 0;
+ to->chgrinv_c[0][i] = 0;
+ to->tcinv_x[0][i] = 0;
+ to->tcinv_a[0][i] = 0;
+ to->tcinv_b[0][i] = 0;
+ to->tcinv_c[0][i] = 0;
+ to->fcinv_x[0][i] = 0;
+ to->fcinv_a[0][i] = 0;
+ to->fcinv_b[0][i] = 0;
+ to->fcinv_c[0][i] = 0;
+ }
+
+ /* Constraints on dew_enhance_seg_x and dew_enhance_seg_y:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+
+ /* Checking constraints: */
+ /* TODO: investigate if an assert is the right way to report that
+ * the constraints are violated.
+ */
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > -1);
+ assert(from->dew_enhance_seg_y[j] > -1);
+ }
+
+ for (j = 1; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ assert(from->dew_enhance_seg_x[j] > from->dew_enhance_seg_x[j-1]);
+ assert(from->dew_enhance_seg_y[j] > from->dew_enhance_seg_y[j-1]);
+ }
+
+ assert(from->dew_enhance_seg_x[0] == 0);
+ assert(from->dew_enhance_seg_y[0] == 0);
+
+ /* Constraints on chgrinv_x, tcinv_x and fcinv_x:
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ * - value of index zero is equal to 0.
+ */
+ assert(chgrinv_x[0] == 0);
+ assert(tcinv_x[0] == 0);
+ assert(fcinv_x[0] == 0);
+
+ for (j = 1; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ assert(chgrinv_x[j] > chgrinv_x[j-1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_TCINV_POINTS; j++) {
+ assert(tcinv_x[j] > tcinv_x[j-1]);
+ }
+
+ for (j = 1; j < NUMBER_OF_FCINV_POINTS; j++) {
+ assert(fcinv_x[j] > fcinv_x[j-1]);
+ }
+
+ /* The implementation of the calulating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for(i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) {
+ to->e_dew_enh_x[0][base + j] = min(max(from->dew_enhance_seg_x[j], 0), 8191);
+ to->e_dew_enh_y[0][base + j] = min(max(from->dew_enhance_seg_y[j], -8192), 8191);
+ }
+
+ for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) {
+ to->e_dew_enh_a[0][base + j] = min(max(from->dew_enhance_seg_slope[j], -8192), 8191);
+ /* Convert dew_enhance_seg_exp to flag:
+ * 0 -> 0
+ * 1...13 -> 1
+ */
+ to->e_dew_enh_f[0][base + j] = (min(max(from->dew_enhance_seg_exp[j], 0), 13) > 0);
+ }
+
+ /* Hard-coded to 0, in order to be able to handle out of
+ * range input in the same way as the other segments.
+ * See KFS for more details.
+ */
+ to->e_dew_enh_a[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+ to->e_dew_enh_f[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0;
+
+ for (j = 0; j < NUMBER_OF_CHGRINV_POINTS; j++) {
+ to->chgrinv_x[0][base + j] = chgrinv_x[j];
+ to->chgrinv_a[0][base + j] = chgrinv_a[j];
+ to->chgrinv_b[0][base + j] = chgrinv_b[j];
+ to->chgrinv_c[0][base + j] = chgrinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_TCINV_POINTS; j++) {
+ to->tcinv_x[0][base + j] = tcinv_x[j];
+ to->tcinv_a[0][base + j] = tcinv_a[j];
+ to->tcinv_b[0][base + j] = tcinv_b[j];
+ to->tcinv_c[0][base + j] = tcinv_c[j];
+ }
+
+ for (j = 0; j < NUMBER_OF_FCINV_POINTS; j++) {
+ to->fcinv_x[0][base + j] = fcinv_x[j];
+ to->fcinv_a[0][base + j] = fcinv_a[j];
+ to->fcinv_b[0][base + j] = fcinv_b[j];
+ to->fcinv_c[0][base + j] = fcinv_c[j];
+ }
+ }
+}
+
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size)
+{
+ int i;
+ int min_exp = 0;
+
+ (void)size;
+
+ to->rbzp_strength = from->rbzp_strength;
+
+ to->fcstrength = from->fcstrength;
+ to->fcthres_0 = from->fcthres_0;
+ to->fc_sat_coef = from->fc_sat_coef;
+ to->fc_coring_prm = from->fc_coring_prm;
+ to->fc_slope = from->fcthres_1 - from->fcthres_0;
+
+ to->aerel_thres0 = from->aerel_thres0;
+ to->aerel_gain0 = from->aerel_gain0;
+ to->aerel_thres_diff = from->aerel_thres1 - from->aerel_thres0;
+ to->aerel_gain_diff = from->aerel_gain1 - from->aerel_gain0;
+
+ to->derel_thres0 = from->derel_thres0;
+ to->derel_gain0 = from->derel_gain0;
+ to->derel_thres_diff = (from->derel_thres1 - from->derel_thres0);
+ to->derel_gain_diff = (from->derel_gain1 - from->derel_gain0);
+
+ to->coring_pos0 = from->coring_pos0;
+ to->coring_pos_diff = (from->coring_pos1 - from->coring_pos0);
+ to->coring_neg0 = from->coring_neg0;
+ to->coring_neg_diff = (from->coring_neg1 - from->coring_neg0);
+
+ /* Note: (ISP_VEC_ELEMBITS -1)
+ * TODO: currently the testbench does not support to use
+ * ISP_VEC_ELEMBITS. Investigate how to fix this
+ */
+ to->gain_exp = (13 - from->gain_exp);
+ to->gain_pos0 = from->gain_pos0;
+ to->gain_pos_diff = (from->gain_pos1 - from->gain_pos0);
+ to->gain_neg0 = from->gain_neg0;
+ to->gain_neg_diff = (from->gain_neg1 - from->gain_neg0);
+
+ to->margin_pos0 = from->pos_margin0;
+ to->margin_pos_diff = (from->pos_margin1 - from->pos_margin0);
+ to->margin_neg0 = from->neg_margin0;
+ to->margin_neg_diff = (from->neg_margin1 - from->neg_margin0);
+
+ /* Encode DEWEnhance exp (e_dew_enh_asr) */
+ for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) {
+ min_exp = max(min_exp, from->dew_enhance_seg_exp[i]);
+ }
+ to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13);
+
+ to->dedgew_max = from->dedgew_max;
+}
+
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
+
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *eed,
+ unsigned level)
+{
+ if (!eed)
+ return;
+
+ ia_css_debug_dtrace(level, "Edge Enhancing Demosaic 1.8:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rbzp_strength", eed->rbzp_strength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcstrength", eed->fcstrength);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_0", eed->fcthres_0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_1", eed->fcthres_1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_sat_coef", eed->fc_sat_coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_coring_prm", eed->fc_coring_prm);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres0", eed->aerel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain0", eed->aerel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres1", eed->aerel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain1", eed->aerel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres0", eed->derel_thres0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain0", eed->derel_gain0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres1", eed->derel_thres1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain1", eed->derel_gain1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos0", eed->coring_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos1", eed->coring_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg0", eed->coring_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg1", eed->coring_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_exp", eed->gain_exp);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos0", eed->gain_pos0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos1", eed->gain_pos1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg0", eed->gain_neg0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg1", eed->gain_neg1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin0", eed->pos_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin1", eed->pos_margin1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin0", eed->neg_margin0);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin1", eed->neg_margin1);
+
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dedgew_max", eed->dedgew_max);
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h
new file mode 100644
index 000000000000..355ff13273b0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8.host.h
@@ -0,0 +1,46 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_HOST_H
+#define __IA_CSS_EED1_8_HOST_H
+
+#include "ia_css_eed1_8_types.h"
+#include "ia_css_eed1_8_param.h"
+#include "ia_css_eed1_8_default.host.h"
+
+void
+ia_css_eed1_8_vmem_encode(
+ struct eed1_8_vmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_eed1_8_encode(
+ struct eed1_8_dmem_params *to,
+ const struct ia_css_eed1_8_config *from,
+ size_t size);
+
+void
+ia_css_init_eed1_8_state(
+ void *state,
+ size_t size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_eed1_8_debug_dtrace(
+ const struct ia_css_eed1_8_config *config,
+ unsigned level);
+#endif
+
+#endif /* __IA_CSS_EED1_8_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.c
new file mode 100644
index 000000000000..3622719dafa5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.c
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_eed1_8_types.h"
+
+/* The default values for the kernel parameters are based on
+ * ISP261 CSS API public parameter list_all.xlsx from 12-09-2014
+ * The parameter list is available on the ISP261 sharepoint
+ */
+
+/* Default kernel parameters. */
+const struct ia_css_eed1_8_config default_eed1_8_config = {
+ .rbzp_strength = 5489,
+ .fcstrength = 6554,
+ .fcthres_0 = 0,
+ .fcthres_1 = 0,
+ .fc_sat_coef = 8191,
+ .fc_coring_prm = 128,
+ .aerel_thres0 = 0,
+ .aerel_gain0 = 8191,
+ .aerel_thres1 = 16,
+ .aerel_gain1 = 20,
+ .derel_thres0 = 1229,
+ .derel_gain0 = 1,
+ .derel_thres1 = 819,
+ .derel_gain1 = 1,
+ .coring_pos0 = 0,
+ .coring_pos1 = 0,
+ .coring_neg0 = 0,
+ .coring_neg1 = 0,
+ .gain_exp = 2,
+ .gain_pos0 = 6144,
+ .gain_pos1 = 2048,
+ .gain_neg0 = 2048,
+ .gain_neg1 = 6144,
+ .pos_margin0 = 1475,
+ .pos_margin1 = 1475,
+ .neg_margin0 = 1475,
+ .neg_margin1 = 1475,
+ .dew_enhance_seg_x = {
+ 0,
+ 64,
+ 272,
+ 688,
+ 1376,
+ 2400,
+ 3840,
+ 5744,
+ 8191
+ },
+ .dew_enhance_seg_y = {
+ 0,
+ 144,
+ 480,
+ 1040,
+ 1852,
+ 2945,
+ 4357,
+ 6094,
+ 8191
+ },
+ .dew_enhance_seg_slope = {
+ 4608,
+ 3308,
+ 2757,
+ 2417,
+ 2186,
+ 8033,
+ 7473,
+ 7020
+ },
+ .dew_enhance_seg_exp = {
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 0,
+ 0,
+ 0
+ },
+ .dedgew_max = 6144
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.h
new file mode 100644
index 000000000000..782f739ca8b5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_default.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_DEFAULT_HOST_H
+#define __IA_CSS_EED1_8_DEFAULT_HOST_H
+
+#include "ia_css_eed1_8_types.h"
+
+extern const struct ia_css_eed1_8_config default_eed1_8_config;
+
+#endif /* __IA_CSS_EED1_8_DEFAULT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h
new file mode 100644
index 000000000000..bc3a07fd07eb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_param.h
@@ -0,0 +1,154 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_PARAM_H
+#define __IA_CSS_EED1_8_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+#include "ia_css_eed1_8_types.h" /* IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS */
+
+
+/* Configuration parameters: */
+
+/* Enable median for false color correction
+ * 0: Do not use median
+ * 1: Use median
+ * Default: 1
+ */
+#define EED1_8_FC_ENABLE_MEDIAN 1
+
+/* Coring Threshold minima
+ * Used in Tint color suppression.
+ * Default: 1
+ */
+#define EED1_8_CORINGTHMIN 1
+
+/* Define size of the state..... TODO: check if this is the correct place */
+/* 4 planes : GR, R, B, GB */
+#define NUM_PLANES 4
+
+/* 5 lines state per color plane input_line_state */
+#define EED1_8_STATE_INPUT_BUFFER_HEIGHT (5 * NUM_PLANES)
+
+/* Each plane has width equal to half frame line */
+#define EED1_8_STATE_INPUT_BUFFER_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_H state */
+#define EED1_8_STATE_LD_H_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_H_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane LD_V state */
+#define EED1_8_STATE_LD_V_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_LD_V_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hr state */
+#define EED1_8_STATE_D_HR_HEIGHT 1
+#define EED1_8_STATE_D_HR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line (single plane) state for D_Hb state */
+#define EED1_8_STATE_D_HB_HEIGHT 1
+#define EED1_8_STATE_D_HB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines (single plane) state for D_Vr state */
+#define EED1_8_STATE_D_VR_HEIGHT 2
+#define EED1_8_STATE_D_VR_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 line (single plane) state for D_Vb state */
+#define EED1_8_STATE_D_VB_HEIGHT 2
+#define EED1_8_STATE_D_VB_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 2 lines state for R and B (= 2 planes) rb_zipped_state */
+#define EED1_8_STATE_RB_ZIPPED_HEIGHT (2 * 2)
+#define EED1_8_STATE_RB_ZIPPED_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+#if EED1_8_FC_ENABLE_MEDIAN
+/* 1 full input line (GR-R color line) for Yc state */
+#define EED1_8_STATE_YC_HEIGHT 1
+#define EED1_8_STATE_YC_WIDTH MAX_FRAME_SIMDWIDTH
+
+/* 1 line state per color plane Cg_state */
+#define EED1_8_STATE_CG_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CG_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 line state per color plane Co_state */
+#define EED1_8_STATE_CO_HEIGHT (1 * NUM_PLANES)
+#define EED1_8_STATE_CO_WIDTH CEIL_DIV(MAX_FRAME_SIMDWIDTH, 2)
+
+/* 1 full input line (GR-R color line) for AbsK state */
+#define EED1_8_STATE_ABSK_HEIGHT 1
+#define EED1_8_STATE_ABSK_WIDTH MAX_FRAME_SIMDWIDTH
+#endif
+
+struct eed1_8_vmem_params {
+ VMEM_ARRAY(e_dew_enh_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_y, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(e_dew_enh_f, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(chgrinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(fcinv_c, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(tcinv_c, ISP_VEC_NELEMS);
+};
+
+/* EED (Edge Enhancing Demosaic) ISP parameters */
+struct eed1_8_dmem_params {
+ int32_t rbzp_strength;
+
+ int32_t fcstrength;
+ int32_t fcthres_0;
+ int32_t fc_sat_coef;
+ int32_t fc_coring_prm;
+ int32_t fc_slope;
+
+ int32_t aerel_thres0;
+ int32_t aerel_gain0;
+ int32_t aerel_thres_diff;
+ int32_t aerel_gain_diff;
+
+ int32_t derel_thres0;
+ int32_t derel_gain0;
+ int32_t derel_thres_diff;
+ int32_t derel_gain_diff;
+
+ int32_t coring_pos0;
+ int32_t coring_pos_diff;
+ int32_t coring_neg0;
+ int32_t coring_neg_diff;
+
+ int32_t gain_exp;
+ int32_t gain_pos0;
+ int32_t gain_pos_diff;
+ int32_t gain_neg0;
+ int32_t gain_neg_diff;
+
+ int32_t margin_pos0;
+ int32_t margin_pos_diff;
+ int32_t margin_neg0;
+ int32_t margin_neg_diff;
+
+ int32_t e_dew_enh_asr;
+ int32_t dedgew_max;
+};
+
+#endif /* __IA_CSS_EED1_8_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_state.h
new file mode 100644
index 000000000000..47e451b15044
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_state.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_STATE_H
+#define __IA_CSS_EED1_8_STATE_H
+
+#include "type_support.h"
+#include "vmem.h" /* for VMEM_ARRAY*/
+
+#include "ia_css_eed1_8_param.h"
+
+struct eed1_8_vmem_state {
+ VMEM_ARRAY(eed1_8_input_lines[EED1_8_STATE_INPUT_BUFFER_HEIGHT], EED1_8_STATE_INPUT_BUFFER_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_LD_H[EED1_8_STATE_LD_H_HEIGHT], EED1_8_STATE_LD_H_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_LD_V[EED1_8_STATE_LD_V_HEIGHT], EED1_8_STATE_LD_V_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_D_Hr[EED1_8_STATE_D_HR_HEIGHT], EED1_8_STATE_D_HR_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_D_Hb[EED1_8_STATE_D_HB_HEIGHT], EED1_8_STATE_D_HB_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_D_Vr[EED1_8_STATE_D_VR_HEIGHT], EED1_8_STATE_D_VR_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_D_Vb[EED1_8_STATE_D_VB_HEIGHT], EED1_8_STATE_D_VB_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_rb_zipped[EED1_8_STATE_RB_ZIPPED_HEIGHT], EED1_8_STATE_RB_ZIPPED_WIDTH*ISP_NWAY);
+#if EED1_8_FC_ENABLE_MEDIAN
+ VMEM_ARRAY(eed1_8_Yc[EED1_8_STATE_YC_HEIGHT], EED1_8_STATE_YC_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_Cg[EED1_8_STATE_CG_HEIGHT], EED1_8_STATE_CG_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_Co[EED1_8_STATE_CO_HEIGHT], EED1_8_STATE_CO_WIDTH*ISP_NWAY);
+ VMEM_ARRAY(eed1_8_AbsK[EED1_8_STATE_ABSK_HEIGHT], EED1_8_STATE_ABSK_WIDTH*ISP_NWAY);
+#endif
+};
+
+#endif /* __IA_CSS_EED1_8_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
new file mode 100644
index 000000000000..07651f0ac558
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/eed1_8/ia_css_eed1_8_types.h
@@ -0,0 +1,86 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_EED1_8_TYPES_H
+#define __IA_CSS_EED1_8_TYPES_H
+
+/** @file
+* CSS-API header file for Edge Enhanced Demosaic parameters.
+*/
+
+
+#include "type_support.h"
+
+/**
+ * \brief EED1_8 public parameters.
+ * \details Struct with all parameters for the EED1.8 kernel that can be set
+ * from the CSS API.
+ */
+
+/* parameter list is based on ISP261 CSS API public parameter list_all.xlsx from 28-01-2015 */
+
+/* Number of segments + 1 segment used in edge reliability enhancement
+ * Ineffective: N/A
+ * Default: 9
+ */
+#define IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS 9
+
+/** Edge Enhanced Demosaic configuration
+ *
+ * ISP2.6.1: EED1_8 is used.
+ */
+
+struct ia_css_eed1_8_config {
+ int32_t rbzp_strength; /**< Strength of zipper reduction. */
+
+ int32_t fcstrength; /**< Strength of false color reduction. */
+ int32_t fcthres_0; /**< Threshold to prevent chroma coring due to noise or green disparity in dark region. */
+ int32_t fcthres_1; /**< Threshold to prevent chroma coring due to noise or green disparity in bright region. */
+ int32_t fc_sat_coef; /**< How much color saturation to maintain in high color saturation region. */
+ int32_t fc_coring_prm; /**< Chroma coring coefficient for tint color suppression. */
+
+ int32_t aerel_thres0; /**< Threshold for Non-Directional Reliability at dark region. */
+ int32_t aerel_gain0; /**< Gain for Non-Directional Reliability at dark region. */
+ int32_t aerel_thres1; /**< Threshold for Non-Directional Reliability at bright region. */
+ int32_t aerel_gain1; /**< Gain for Non-Directional Reliability at bright region. */
+
+ int32_t derel_thres0; /**< Threshold for Directional Reliability at dark region. */
+ int32_t derel_gain0; /**< Gain for Directional Reliability at dark region. */
+ int32_t derel_thres1; /**< Threshold for Directional Reliability at bright region. */
+ int32_t derel_gain1; /**< Gain for Directional Reliability at bright region. */
+
+ int32_t coring_pos0; /**< Positive Edge Coring Threshold in dark region. */
+ int32_t coring_pos1; /**< Positive Edge Coring Threshold in bright region. */
+ int32_t coring_neg0; /**< Negative Edge Coring Threshold in dark region. */
+ int32_t coring_neg1; /**< Negative Edge Coring Threshold in bright region. */
+
+ int32_t gain_exp; /**< Common Exponent of Gain. */
+ int32_t gain_pos0; /**< Gain for Positive Edge in dark region. */
+ int32_t gain_pos1; /**< Gain for Positive Edge in bright region. */
+ int32_t gain_neg0; /**< Gain for Negative Edge in dark region. */
+ int32_t gain_neg1; /**< Gain for Negative Edge in bright region. */
+
+ int32_t pos_margin0; /**< Margin for Positive Edge in dark region. */
+ int32_t pos_margin1; /**< Margin for Positive Edge in bright region. */
+ int32_t neg_margin0; /**< Margin for Negative Edge in dark region. */
+ int32_t neg_margin1; /**< Margin for Negative Edge in bright region. */
+
+ int32_t dew_enhance_seg_x[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: X. */
+ int32_t dew_enhance_seg_y[IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS]; /**< Segment data for directional edge weight: Y. */
+ int32_t dew_enhance_seg_slope[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Slope. */
+ int32_t dew_enhance_seg_exp[(IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)]; /**< Segment data for directional edge weight: Exponent. */
+ int32_t dedgew_max; /**< Max Weight for Directional Edge. */
+};
+
+#endif /* __IA_CSS_EED1_8_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
new file mode 100644
index 000000000000..94631eee8614
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
@@ -0,0 +1,62 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_formats.host.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+/*#include "sh_css_frac.h"*/
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+const struct ia_css_formats_config default_formats_config = {
+ 1
+};
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->video_full_range_flag = from->video_full_range_flag;
+}
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned level)
+{
+ if (!formats) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "video_full_range_flag", formats->video_full_range_flag);
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.video_full_range_flag=%d\n",
+ config->video_full_range_flag);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
new file mode 100644
index 000000000000..8a90cd83b248
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats.host.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_HOST_H
+#define __IA_CSS_FORMATS_HOST_H
+
+#include "ia_css_formats_types.h"
+#include "ia_css_formats_param.h"
+
+extern const struct ia_css_formats_config default_formats_config;
+
+void
+ia_css_formats_encode(
+ struct sh_css_isp_formats_params *to,
+ const struct ia_css_formats_config *from,
+ unsigned size);
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_dump(
+ const struct sh_css_isp_formats_params *formats,
+ unsigned level);
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+void
+ia_css_formats_debug_dtrace(
+ const struct ia_css_formats_config *formats,
+ unsigned level);
+#endif /*IA_CSS_NO_DEBUG*/
+
+#endif /* __IA_CSS_FORMATS_HOST_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
new file mode 100644
index 000000000000..2eb6030b6081
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_PARAM_H
+#define __IA_CSS_FORMATS_PARAM_H
+
+#include "type_support.h"
+
+/* FORMATS (Format conversion) */
+struct sh_css_isp_formats_params {
+ int32_t video_full_range_flag;
+};
+
+#endif /* __IA_CSS_FORMATS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
new file mode 100644
index 000000000000..df1565a5914c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fc/fc_1.0/ia_css_formats_types.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FORMATS_TYPES_H
+#define __IA_CSS_FORMATS_TYPES_H
+
+/** @file
+* CSS-API header file for output format parameters.
+*/
+
+#include "type_support.h"
+
+/** Formats configuration.
+ *
+ * ISP block: FORMATS
+ * ISP1: FORMATS is used.
+ * ISP2: FORMATS is used.
+ */
+struct ia_css_formats_config {
+ uint32_t video_full_range_flag; /**< selects the range of YUV output.
+ u8.0, [0,1],
+ default 1, ineffective n/a\n
+ 1 - full range, luma 0-255, chroma 0-255\n
+ 0 - reduced range, luma 16-235, chroma 16-240 */
+};
+
+#endif /* __IA_CSS_FORMATS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
new file mode 100644
index 000000000000..cc8dd1a7007f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_PARAM_H
+#define __IA_CSS_FIXEDBDS_PARAM_H
+
+#include "type_support.h"
+
+#ifdef ISP2401
+#define BDS_UNIT 8
+#define FRAC_LOG 3
+#define FRAC_ACC (1<<FRAC_LOG)
+#if FRAC_ACC != BDS_UNIT
+#error "FRAC_ACC and BDS_UNIT need to be merged into one define"
+#endif
+
+#endif
+struct sh_css_isp_bds_params {
+ int baf_strength;
+};
+
+#endif /* __IA_CSS_FIXEDBDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
new file mode 100644
index 000000000000..5b59d9dec4d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FIXEDBDS_TYPES_H
+#define __IA_CSS_FIXEDBDS_TYPES_H
+
+
+struct sh_css_bds_factor {
+ unsigned numerator;
+ unsigned denominator;
+ unsigned int bds_factor;
+};
+
+
+#endif /*__IA_CSS_FIXEDBDS_TYPES_H*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
new file mode 100644
index 000000000000..1fb9f27540f3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#include <ia_css_types.h>
+#include <sh_css_defs.h>
+#include <ia_css_debug.h>
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_fpn.host.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned size)
+{
+ (void)size;
+ to->shift = from->shift;
+ to->enabled = from->data != NULL;
+}
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned level)
+{
+ if (!fpn) return;
+ ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_shift", fpn->shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "fpn_enabled", fpn->enabled);
+}
+
+void
+ia_css_fpn_config(
+ struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_fpn_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ const struct ia_css_fpn_configuration config = {
+ &my_info
+ };
+
+ my_info.res.width = CEIL_DIV(info->res.width, 2); /* Packed by 2x */
+ my_info.res.height = info->res.height;
+ my_info.padded_width = CEIL_DIV(info->padded_width, 2); /* Packed by 2x */
+ my_info.format = info->format;
+ my_info.raw_bit_depth = FPN_BITS_PER_PIXEL;
+ my_info.raw_bayer_order = info->raw_bayer_order;
+ my_info.crop_info = info->crop_info;
+
+ ia_css_configure_fpn(binary, &config);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
new file mode 100644
index 000000000000..bb905c8db8c8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h
@@ -0,0 +1,44 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_HOST_H
+#define __IA_CSS_FPN_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_fpn_types.h"
+#include "ia_css_fpn_param.h"
+
+void
+ia_css_fpn_encode(
+ struct sh_css_isp_fpn_params *to,
+ const struct ia_css_fpn_table *from,
+ unsigned size);
+
+void
+ia_css_fpn_dump(
+ const struct sh_css_isp_fpn_params *fpn,
+ unsigned level);
+
+void
+ia_css_fpn_config(
+ struct sh_css_isp_fpn_isp_config *to,
+ const struct ia_css_fpn_configuration *from,
+ unsigned size);
+
+void
+ia_css_fpn_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_FPN_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
new file mode 100644
index 000000000000..68765c3f3bf7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_param.h
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_PARAM_H
+#define __IA_CSS_FPN_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+#define FPN_BITS_PER_PIXEL 16
+
+/* FPNR (Fixed Pattern Noise Reduction) */
+struct sh_css_isp_fpn_params {
+ int32_t shift;
+ int32_t enabled;
+};
+
+struct sh_css_isp_fpn_isp_config {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+};
+
+#endif /* __IA_CSS_FPN_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
new file mode 100644
index 000000000000..5a2f0c06a80d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/fpn/fpn_1.0/ia_css_fpn_types.h
@@ -0,0 +1,52 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_FPN_TYPES_H
+#define __IA_CSS_FPN_TYPES_H
+
+/** @file
+* CSS-API header file for Fixed Pattern Noise parameters.
+*/
+
+/** Fixed Pattern Noise table.
+ *
+ * This contains the fixed patterns noise values
+ * obtained from a black frame capture.
+ *
+ * "shift" should be set as the smallest value
+ * which satisfies the requirement the maximum data is less than 64.
+ *
+ * ISP block: FPN1
+ * ISP1: FPN1 is used.
+ * ISP2: FPN1 is used.
+ */
+
+struct ia_css_fpn_table {
+ int16_t *data; /**< Table content (fixed patterns noise).
+ u0.[13-shift], [0,63] */
+ uint32_t width; /**< Table width (in pixels).
+ This is the input frame width. */
+ uint32_t height; /**< Table height (in pixels).
+ This is the input frame height. */
+ uint32_t shift; /**< Common exponent of table content.
+ u8.0, [0,13] */
+ uint32_t enabled; /**< Fpn is enabled.
+ bool */
+};
+
+struct ia_css_fpn_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_FPN_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
new file mode 100644
index 000000000000..0cfb5c94447f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
@@ -0,0 +1,118 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "vamem.h"
+
+#include "ia_css_gc.host.h"
+
+const struct ia_css_gc_config default_gc_config = {
+ 0,
+ 0
+};
+
+const struct ia_css_ce_config default_ce_config = {
+ 0,
+ 255
+};
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->gain_k1 =
+ uDIGIT_FITTING((int)from->gain_k1, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+ to->gain_k2 =
+ uDIGIT_FITTING((int)from->gain_k2, 16,
+ IA_CSS_GAMMA_GAIN_K_SHIFT);
+}
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->uv_level_min = from->uv_level_min;
+ to->uv_level_max = from->uv_level_max;
+}
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned level)
+{
+ if (!gc) return;
+ ia_css_debug_dtrace(level, "Gamma Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k1", gc->gain_k1);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "gamma_gain_k2", gc->gain_k2);
+}
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level, "Chroma Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_min", ce->uv_level_min);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ce_uv_level_max", ce->uv_level_max);
+}
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_k1=%d, config.gain_k2=%d\n",
+ config->gain_k1, config->gain_k2);
+}
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.uv_level_min=%d, config.uv_level_max=%d\n",
+ config->uv_level_min, config->uv_level_max);
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
new file mode 100644
index 000000000000..06f08840563e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc.host.h
@@ -0,0 +1,65 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_HOST_H
+#define __IA_CSS_GC_HOST_H
+
+#include "ia_css_gc_param.h"
+#include "ia_css_gc_table.host.h"
+
+extern const struct ia_css_gc_config default_gc_config;
+extern const struct ia_css_ce_config default_ce_config;
+
+void
+ia_css_gc_encode(
+ struct sh_css_isp_gc_params *to,
+ const struct ia_css_gc_config *from,
+ unsigned size);
+
+void
+ia_css_gc_vamem_encode(
+ struct sh_css_isp_gc_vamem_params *to,
+ const struct ia_css_gamma_table *from,
+ unsigned size);
+
+void
+ia_css_ce_encode(
+ struct sh_css_isp_ce_params *to,
+ const struct ia_css_ce_config *from,
+ unsigned size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_gc_dump(
+ const struct sh_css_isp_gc_params *gc,
+ unsigned level);
+
+void
+ia_css_ce_dump(
+ const struct sh_css_isp_ce_params *ce,
+ unsigned level);
+
+void
+ia_css_gc_debug_dtrace(
+ const struct ia_css_gc_config *config,
+ unsigned level);
+
+void
+ia_css_ce_debug_dtrace(
+ const struct ia_css_ce_config *config,
+ unsigned level);
+
+#endif
+
+#endif /* __IA_CSS_GC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
new file mode 100644
index 000000000000..52972b1a07ff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_param.h
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_PARAM_H
+#define __IA_CSS_GC_PARAM_H
+
+#include "type_support.h"
+#ifndef PIPE_GENERATION
+#ifdef __ISP
+#define __INLINE_VAMEM__
+#endif
+#include "vamem.h"
+#include "ia_css_gc_types.h"
+
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_GAMMA_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_GC_TABLE_SIZE IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_GC_TABLE_SIZE 0
+#endif
+
+#define GAMMA_OUTPUT_BITS 8
+#define GAMMA_OUTPUT_MAX_VAL ((1<<GAMMA_OUTPUT_BITS)-1)
+
+/* GC (Gamma Correction) */
+struct sh_css_isp_gc_params {
+ int32_t gain_k1;
+ int32_t gain_k2;
+};
+
+/* CE (Chroma Enhancement) */
+struct sh_css_isp_ce_params {
+ int32_t uv_level_min;
+ int32_t uv_level_max;
+};
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_gc_vamem_params {
+ uint16_t gc[SH_CSS_ISP_GC_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
new file mode 100644
index 000000000000..082db22a5329
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
@@ -0,0 +1,214 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc_table.host.h"
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+struct ia_css_gamma_table default_gamma_table;
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = {
+ 0, 4, 8, 12, 17, 21, 27, 32,
+ 38, 44, 49, 55, 61, 66, 71, 76,
+ 80, 84, 88, 92, 95, 98, 102, 105,
+108, 110, 113, 116, 118, 121, 123, 126,
+128, 130, 132, 135, 137, 139, 141, 143,
+145, 146, 148, 150, 152, 153, 155, 156,
+158, 160, 161, 162, 164, 165, 166, 168,
+169, 170, 171, 172, 174, 175, 176, 177,
+178, 179, 180, 181, 182, 183, 184, 184,
+185, 186, 187, 188, 189, 189, 190, 191,
+192, 192, 193, 194, 195, 195, 196, 197,
+197, 198, 198, 199, 200, 200, 201, 201,
+202, 203, 203, 204, 204, 205, 205, 206,
+206, 207, 207, 208, 208, 209, 209, 210,
+210, 210, 211, 211, 212, 212, 213, 213,
+214, 214, 214, 215, 215, 216, 216, 216,
+217, 217, 218, 218, 218, 219, 219, 220,
+220, 220, 221, 221, 222, 222, 222, 223,
+223, 223, 224, 224, 225, 225, 225, 226,
+226, 226, 227, 227, 227, 228, 228, 228,
+229, 229, 229, 230, 230, 230, 231, 231,
+231, 232, 232, 232, 233, 233, 233, 234,
+234, 234, 234, 235, 235, 235, 236, 236,
+236, 237, 237, 237, 237, 238, 238, 238,
+239, 239, 239, 239, 240, 240, 240, 241,
+241, 241, 241, 242, 242, 242, 242, 243,
+243, 243, 243, 244, 244, 244, 245, 245,
+245, 245, 246, 246, 246, 246, 247, 247,
+247, 247, 248, 248, 248, 248, 249, 249,
+249, 249, 250, 250, 250, 250, 251, 251,
+251, 251, 252, 252, 252, 252, 253, 253,
+253, 253, 254, 254, 254, 254, 255, 255,
+255
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 16,
+ 17, 18, 19, 20, 21, 23, 24, 25,
+ 27, 28, 29, 31, 32, 33, 35, 36,
+ 38, 39, 41, 42, 44, 45, 47, 48,
+ 49, 51, 52, 54, 55, 57, 58, 60,
+ 61, 62, 64, 65, 66, 68, 69, 70,
+ 71, 72, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 93, 94,
+ 95, 96, 97, 98, 98, 99, 100, 101,
+ 102, 102, 103, 104, 105, 105, 106, 107,
+ 108, 108, 109, 110, 110, 111, 112, 112,
+ 113, 114, 114, 115, 116, 116, 117, 118,
+ 118, 119, 120, 120, 121, 121, 122, 123,
+ 123, 124, 125, 125, 126, 126, 127, 127, /* 128 */
+ 128, 129, 129, 130, 130, 131, 131, 132,
+ 132, 133, 134, 134, 135, 135, 136, 136,
+ 137, 137, 138, 138, 139, 139, 140, 140,
+ 141, 141, 142, 142, 143, 143, 144, 144,
+ 145, 145, 145, 146, 146, 147, 147, 148,
+ 148, 149, 149, 150, 150, 150, 151, 151,
+ 152, 152, 152, 153, 153, 154, 154, 155,
+ 155, 155, 156, 156, 156, 157, 157, 158,
+ 158, 158, 159, 159, 160, 160, 160, 161,
+ 161, 161, 162, 162, 162, 163, 163, 163,
+ 164, 164, 164, 165, 165, 165, 166, 166,
+ 166, 167, 167, 167, 168, 168, 168, 169,
+ 169, 169, 170, 170, 170, 170, 171, 171,
+ 171, 172, 172, 172, 172, 173, 173, 173,
+ 174, 174, 174, 174, 175, 175, 175, 176,
+ 176, 176, 176, 177, 177, 177, 177, 178, /* 256 */
+ 178, 178, 178, 179, 179, 179, 179, 180,
+ 180, 180, 180, 181, 181, 181, 181, 182,
+ 182, 182, 182, 182, 183, 183, 183, 183,
+ 184, 184, 184, 184, 184, 185, 185, 185,
+ 185, 186, 186, 186, 186, 186, 187, 187,
+ 187, 187, 187, 188, 188, 188, 188, 188,
+ 189, 189, 189, 189, 189, 190, 190, 190,
+ 190, 190, 191, 191, 191, 191, 191, 192,
+ 192, 192, 192, 192, 192, 193, 193, 193,
+ 193, 193, 194, 194, 194, 194, 194, 194,
+ 195, 195, 195, 195, 195, 195, 196, 196,
+ 196, 196, 196, 196, 197, 197, 197, 197,
+ 197, 197, 198, 198, 198, 198, 198, 198,
+ 198, 199, 199, 199, 199, 199, 199, 200,
+ 200, 200, 200, 200, 200, 200, 201, 201,
+ 201, 201, 201, 201, 201, 202, 202, 202, /* 384 */
+ 202, 202, 202, 202, 203, 203, 203, 203,
+ 203, 203, 203, 204, 204, 204, 204, 204,
+ 204, 204, 204, 205, 205, 205, 205, 205,
+ 205, 205, 205, 206, 206, 206, 206, 206,
+ 206, 206, 206, 207, 207, 207, 207, 207,
+ 207, 207, 207, 208, 208, 208, 208, 208,
+ 208, 208, 208, 209, 209, 209, 209, 209,
+ 209, 209, 209, 209, 210, 210, 210, 210,
+ 210, 210, 210, 210, 210, 211, 211, 211,
+ 211, 211, 211, 211, 211, 211, 212, 212,
+ 212, 212, 212, 212, 212, 212, 212, 213,
+ 213, 213, 213, 213, 213, 213, 213, 213,
+ 214, 214, 214, 214, 214, 214, 214, 214,
+ 214, 214, 215, 215, 215, 215, 215, 215,
+ 215, 215, 215, 216, 216, 216, 216, 216,
+ 216, 216, 216, 216, 216, 217, 217, 217, /* 512 */
+ 217, 217, 217, 217, 217, 217, 217, 218,
+ 218, 218, 218, 218, 218, 218, 218, 218,
+ 218, 219, 219, 219, 219, 219, 219, 219,
+ 219, 219, 219, 220, 220, 220, 220, 220,
+ 220, 220, 220, 220, 220, 221, 221, 221,
+ 221, 221, 221, 221, 221, 221, 221, 221,
+ 222, 222, 222, 222, 222, 222, 222, 222,
+ 222, 222, 223, 223, 223, 223, 223, 223,
+ 223, 223, 223, 223, 223, 224, 224, 224,
+ 224, 224, 224, 224, 224, 224, 224, 224,
+ 225, 225, 225, 225, 225, 225, 225, 225,
+ 225, 225, 225, 226, 226, 226, 226, 226,
+ 226, 226, 226, 226, 226, 226, 226, 227,
+ 227, 227, 227, 227, 227, 227, 227, 227,
+ 227, 227, 228, 228, 228, 228, 228, 228,
+ 228, 228, 228, 228, 228, 228, 229, 229,
+ 229, 229, 229, 229, 229, 229, 229, 229,
+ 229, 229, 230, 230, 230, 230, 230, 230,
+ 230, 230, 230, 230, 230, 230, 231, 231,
+ 231, 231, 231, 231, 231, 231, 231, 231,
+ 231, 231, 231, 232, 232, 232, 232, 232,
+ 232, 232, 232, 232, 232, 232, 232, 233,
+ 233, 233, 233, 233, 233, 233, 233, 233,
+ 233, 233, 233, 233, 234, 234, 234, 234,
+ 234, 234, 234, 234, 234, 234, 234, 234,
+ 234, 235, 235, 235, 235, 235, 235, 235,
+ 235, 235, 235, 235, 235, 235, 236, 236,
+ 236, 236, 236, 236, 236, 236, 236, 236,
+ 236, 236, 236, 236, 237, 237, 237, 237,
+ 237, 237, 237, 237, 237, 237, 237, 237,
+ 237, 237, 238, 238, 238, 238, 238, 238,
+ 238, 238, 238, 238, 238, 238, 238, 238,
+ 239, 239, 239, 239, 239, 239, 239, 239,
+ 239, 239, 239, 239, 239, 239, 240, 240,
+ 240, 240, 240, 240, 240, 240, 240, 240,
+ 240, 240, 240, 240, 241, 241, 241, 241,
+ 241, 241, 241, 241, 241, 241, 241, 241,
+ 241, 241, 241, 242, 242, 242, 242, 242,
+ 242, 242, 242, 242, 242, 242, 242, 242,
+ 242, 242, 243, 243, 243, 243, 243, 243,
+ 243, 243, 243, 243, 243, 243, 243, 243,
+ 243, 244, 244, 244, 244, 244, 244, 244,
+ 244, 244, 244, 244, 244, 244, 244, 244,
+ 245, 245, 245, 245, 245, 245, 245, 245,
+ 245, 245, 245, 245, 245, 245, 245, 246,
+ 246, 246, 246, 246, 246, 246, 246, 246,
+ 246, 246, 246, 246, 246, 246, 246, 247,
+ 247, 247, 247, 247, 247, 247, 247, 247,
+ 247, 247, 247, 247, 247, 247, 247, 248,
+ 248, 248, 248, 248, 248, 248, 248, 248,
+ 248, 248, 248, 248, 248, 248, 248, 249,
+ 249, 249, 249, 249, 249, 249, 249, 249,
+ 249, 249, 249, 249, 249, 249, 249, 250,
+ 250, 250, 250, 250, 250, 250, 250, 250,
+ 250, 250, 250, 250, 250, 250, 250, 251,
+ 251, 251, 251, 251, 251, 251, 251, 251,
+ 251, 251, 251, 251, 251, 251, 251, 252,
+ 252, 252, 252, 252, 252, 252, 252, 252,
+ 252, 252, 252, 252, 252, 252, 252, 253,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 253, 253, 253, 253, 253, 253, 253, 253,
+ 254, 254, 254, 254, 254, 254, 254, 254,
+ 254, 254, 254, 254, 254, 254, 254, 254,
+ 255, 255, 255, 255, 255, 255, 255, 255
+};
+
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_gamma_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
new file mode 100644
index 000000000000..9686623d9cdd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.h
@@ -0,0 +1,24 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_TABLE_HOST_H
+#define __IA_CSS_GC_TABLE_HOST_H
+
+#include "ia_css_gc_types.h"
+
+extern struct ia_css_gamma_table default_gamma_table;
+
+void ia_css_config_gamma_table(void);
+
+#endif /* __IA_CSS_GC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
new file mode 100644
index 000000000000..dd9f0eda3353
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_1.0/ia_css_gc_types.h
@@ -0,0 +1,97 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC_TYPES_H
+#define __IA_CSS_GC_TYPES_H
+
+/** @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: Needed for ia_css_vamem_type */
+
+/** Fractional bits for GAMMA gain */
+#define IA_CSS_GAMMA_GAIN_K_SHIFT 13
+
+/** Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2 10
+#define IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE_LOG2)
+
+/** Number of elements in the gamma table. */
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/** Gamma table, used for Y(Luma) Gamma Correction.
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2(sRGB Gamma Correction) is used.)
+ */
+/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_gc_data {
+ uint16_t vamem_1[IA_CSS_VAMEM_1_GAMMA_TABLE_SIZE];
+ /**< Y(Luma) Gamma table on vamem type 1. u0.8, [0,255] */
+ uint16_t vamem_2[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE];
+ /**< Y(Luma) Gamma table on vamem type 2. u0.8, [0,255] */
+};
+
+struct ia_css_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_gc_data data;
+};
+
+/** Gamma Correction configuration (used only for YUV Gamma Correction).
+ *
+ * ISP block: GC1 (YUV Gamma Correction)
+ * ISP1: GC1 is used.
+ * (ISP2: GC2 (sRGB Gamma Correction) is used.)
+ */
+struct ia_css_gc_config {
+ uint16_t gain_k1; /**< Gain to adjust U after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+ uint16_t gain_k2; /**< Gain to adjust V after YUV Gamma Correction.
+ u0.16, [0,65535],
+ default/ineffective 19000(0.29) */
+};
+
+/** Chroma Enhancement configuration.
+ *
+ * This parameter specifies range of chroma output level.
+ * The standard range is [0,255] or [16,240].
+ *
+ * ISP block: CE1
+ * ISP1: CE1 is used.
+ * (ISP2: CE1 is not used.)
+ */
+struct ia_css_ce_config {
+ uint8_t uv_level_min; /**< Minimum of chroma output level.
+ u0.8, [0,255], default/ineffective 0 */
+ uint8_t uv_level_max; /**< Maximum of chroma output level.
+ u0.8, [0,255], default/ineffective 255 */
+};
+
+/** Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * (ISP1: MACC1 (MACC by only matrix) is used.)
+ * ISP2: MACC2 is used.
+ */
+struct ia_css_macc_config {
+ uint8_t exp; /**< Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_GC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c
new file mode 100644
index 000000000000..0fb1a919fccd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.c
@@ -0,0 +1,110 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "vamem.h"
+
+#include "ia_css_gc2.host.h"
+
+const struct ia_css_cc_config default_yuv2rgb_cc_config = {
+ 12,
+ {4096, -4096, 4096, 4096, 4096, 0, 4096, -4096, -4096}
+};
+
+const struct ia_css_cc_config default_rgb2yuv_cc_config = {
+ 13,
+ {2449, 4809, 934, -1382, -2714, 4096, 4096, -3430, -666}
+};
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size)
+{
+ ia_css_encode_cc(to, from, size);
+}
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->gc, &from->data, sizeof(to->gc));
+}
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->gc, &from->data, sizeof(to->gc));
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned level)
+{
+ ia_css_cc_dump(yuv2rgb, level, "YUV to RGB Conversion");
+}
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned level)
+{
+ ia_css_cc_dump(rgb2yuv, level, "RGB to YUV Conversion");
+}
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h
new file mode 100644
index 000000000000..ba140eefd525
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2.host.h
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_HOST_H
+#define __IA_CSS_GC2_HOST_H
+
+#include "ia_css_gc2_types.h"
+#include "ia_css_gc2_param.h"
+#include "ia_css_gc2_table.host.h"
+
+extern const struct ia_css_cc_config default_yuv2rgb_cc_config;
+extern const struct ia_css_cc_config default_rgb2yuv_cc_config;
+
+void
+ia_css_yuv2rgb_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size);
+
+void
+ia_css_rgb2yuv_encode(
+ struct sh_css_isp_csc_params *to,
+ const struct ia_css_cc_config *from,
+ unsigned size);
+
+void
+ia_css_r_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size);
+
+void
+ia_css_g_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size);
+
+void
+ia_css_b_gamma_vamem_encode(
+ struct sh_css_isp_rgb_gamma_vamem_params *to,
+ const struct ia_css_rgb_gamma_table *from,
+ unsigned size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_yuv2rgb_dump(
+ const struct sh_css_isp_csc_params *yuv2rgb,
+ unsigned level);
+
+void
+ia_css_rgb2yuv_dump(
+ const struct sh_css_isp_csc_params *rgb2yuv,
+ unsigned level);
+
+void
+ia_css_rgb_gamma_table_debug_dtrace(
+ const struct ia_css_rgb_gamma_table *config,
+ unsigned level);
+
+#define ia_css_yuv2rgb_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_rgb2yuv_debug_dtrace ia_css_cc_config_debug_dtrace
+#define ia_css_r_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_g_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+#define ia_css_b_gamma_debug_dtrace ia_css_rgb_gamma_table_debug_dtrace
+
+#endif
+
+#endif /* __IA_CSS_GC2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h
new file mode 100644
index 000000000000..d25239f4d86f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_param.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_PARAM_H
+#define __IA_CSS_GC2_PARAM_H
+
+#include "type_support.h"
+/* Extend GC1 */
+#include "ia_css_gc2_types.h"
+#include "gc/gc_1.0/ia_css_gc_param.h"
+#include "csc/csc_1.0/ia_css_csc_param.h"
+
+#ifndef PIPE_GENERATION
+#if defined(IS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE
+#elif defined(IS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE
+#else
+#error "Undefined vamem version"
+#endif
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_rgb_gamma_vamem_params {
+ uint16_t gc[SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE];
+};
+
+#endif /* __IA_CSS_GC2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
new file mode 100644
index 000000000000..f14a66b78714
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
@@ -0,0 +1,132 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_gc2_table.host.h"
+
+struct ia_css_rgb_gamma_table default_r_gamma_table;
+struct ia_css_rgb_gamma_table default_g_gamma_table;
+struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+/* Identical default gamma table for R, G, and B. */
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = {
+ 0, 72, 144, 216, 288, 360, 426, 486,
+ 541, 592, 641, 687, 730, 772, 812, 850,
+ 887, 923, 958, 991, 1024, 1055, 1086, 1117,
+1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
+1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
+1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
+1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
+1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
+2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
+2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
+2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
+2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
+2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
+2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
+2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
+2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
+2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
+2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
+3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
+3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
+3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
+3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
+3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
+3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
+3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
+3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
+3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
+3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
+3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
+3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
+3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
+4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088,
+4095
+};
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_gamma_table_data[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE] = {
+ 0, 72, 144, 216, 288, 360, 426, 486,
+ 541, 592, 641, 687, 730, 772, 812, 850,
+ 887, 923, 958, 991, 1024, 1055, 1086, 1117,
+1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335,
+1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525,
+1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694,
+1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848,
+1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990,
+2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122,
+2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247,
+2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364,
+2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476,
+2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583,
+2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685,
+2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783,
+2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878,
+2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970,
+2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058,
+3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144,
+3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228,
+3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309,
+3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388,
+3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465,
+3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540,
+3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614,
+3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686,
+3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756,
+3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825,
+3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893,
+3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959,
+3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024,
+4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088
+};
+#else
+#error "VAMEM version must be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_rgb_gamma_tables(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+ memcpy(default_r_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_g_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+#else
+ memcpy(default_r_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_g_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ memcpy(default_b_gamma_table.data.vamem_1, default_gamma_table_data,
+ sizeof(default_gamma_table_data));
+ default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+ default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+ default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
new file mode 100644
index 000000000000..8686e6e3586c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_table.host.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_TABLE_HOST_H
+#define __IA_CSS_GC2_TABLE_HOST_H
+
+#include "ia_css_gc2_types.h"
+
+extern struct ia_css_rgb_gamma_table default_r_gamma_table;
+extern struct ia_css_rgb_gamma_table default_g_gamma_table;
+extern struct ia_css_rgb_gamma_table default_b_gamma_table;
+
+void ia_css_config_rgb_gamma_tables(void);
+
+#endif /* __IA_CSS_GC2_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
new file mode 100644
index 000000000000..e439583bdfb6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/gc/gc_2/ia_css_gc2_types.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_GC2_TYPES_H
+#define __IA_CSS_GC2_TYPES_H
+
+#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc_types.h" /* FIXME: needed for ia_css_vamem_type */
+
+/** @file
+* CSS-API header file for Gamma Correction parameters.
+*/
+
+/** sRGB Gamma table, used for sRGB Gamma Correction.
+ *
+ * ISP block: GC2 (sRGB Gamma Correction)
+ * (ISP1: GC1(YUV Gamma Correction) is used.)
+ * ISP2: GC2 is used.
+ */
+
+/** Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE_LOG2)
+
+/** Number of elements in the sRGB gamma table. */
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2 8
+#define IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE ((1U<<IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE_LOG2) + 1)
+
+/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_rgb_gamma_data {
+ uint16_t vamem_1[IA_CSS_VAMEM_1_RGB_GAMMA_TABLE_SIZE];
+ /**< RGB Gamma table on vamem type1. This table is not used,
+ because sRGB Gamma Correction is not implemented for ISP2300. */
+ uint16_t vamem_2[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE];
+ /**< RGB Gamma table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_rgb_gamma_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_rgb_gamma_data data;
+};
+
+#endif /* __IA_CSS_GC2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.c
new file mode 100644
index 000000000000..8215ae47d384
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.c
@@ -0,0 +1,41 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_hdr.host.h"
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned size)
+{
+ int i;
+ (void)size;
+
+ for (i = 0; i < HDR_NUM_INPUT_FRAMES - 1; i++) {
+ to->irradiance.match_shift[i] = from->irradiance.match_shift[i];
+ to->irradiance.match_mul[i] = from->irradiance.match_mul[i];
+ to->irradiance.thr_low[i] = from->irradiance.thr_low[i];
+ to->irradiance.thr_high[i] = from->irradiance.thr_high[i];
+ to->irradiance.thr_coeff[i] = from->irradiance.thr_coeff[i];
+ to->irradiance.thr_shift[i] = from->irradiance.thr_shift[i];
+ }
+ to->irradiance.test_irr = from->irradiance.test_irr;
+ to->irradiance.weight_bpp = from->irradiance.weight_bpp;
+
+ to->deghost.test_deg = from->deghost.test_deg;
+ to->exclusion.test_excl = from->exclusion.test_excl;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h
new file mode 100644
index 000000000000..8f89bc8f1ca2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr.host.h
@@ -0,0 +1,31 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_HOST_H
+#define __IA_CSS_HDR_HOST_H
+
+#include "ia_css_hdr_param.h"
+#include "ia_css_hdr_types.h"
+
+extern const struct ia_css_hdr_config default_hdr_config;
+
+void
+ia_css_hdr_init_config(
+ struct sh_css_isp_hdr_params *to,
+ const struct ia_css_hdr_config *from,
+ unsigned size);
+
+#endif /* __IA_CSS_HDR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h
new file mode 100644
index 000000000000..1c053af7d0d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_param.h
@@ -0,0 +1,53 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_PARAMS_H
+#define __IA_CSS_HDR_PARAMS_H
+
+#include "type_support.h"
+
+#define HDR_NUM_INPUT_FRAMES (3)
+
+/* HDR irradiance map parameters on ISP. */
+struct sh_css_hdr_irradiance_params {
+ int32_t test_irr;
+ int32_t match_shift[HDR_NUM_INPUT_FRAMES - 1]; /* Histogram matching shift parameter */
+ int32_t match_mul[HDR_NUM_INPUT_FRAMES - 1]; /* Histogram matching multiplication parameter */
+ int32_t thr_low[HDR_NUM_INPUT_FRAMES - 1]; /* Weight map soft threshold low bound parameter */
+ int32_t thr_high[HDR_NUM_INPUT_FRAMES - 1]; /* Weight map soft threshold high bound parameter */
+ int32_t thr_coeff[HDR_NUM_INPUT_FRAMES - 1]; /* Soft threshold linear function coefficient */
+ int32_t thr_shift[HDR_NUM_INPUT_FRAMES - 1]; /* Soft threshold precision shift parameter */
+ int32_t weight_bpp; /* Weight map bits per pixel */
+};
+
+/* HDR deghosting parameters on ISP */
+struct sh_css_hdr_deghost_params {
+ int32_t test_deg;
+};
+
+/* HDR exclusion parameters on ISP */
+struct sh_css_hdr_exclusion_params {
+ int32_t test_excl;
+};
+
+/* HDR ISP parameters */
+struct sh_css_isp_hdr_params {
+ struct sh_css_hdr_irradiance_params irradiance;
+ struct sh_css_hdr_deghost_params deghost;
+ struct sh_css_hdr_exclusion_params exclusion;
+};
+
+#endif /* __IA_CSS_HDR_PARAMS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
new file mode 100644
index 000000000000..c3345b32e3e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/hdr/ia_css_hdr_types.h
@@ -0,0 +1,64 @@
+/* Release Version: irci_stable_candrpv_0415_20150521_0458 */
+/* Release Version: irci_ecr-master_20150911_0724 */
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_HDR_TYPES_H
+#define __IA_CSS_HDR_TYPES_H
+
+#define IA_CSS_HDR_MAX_NUM_INPUT_FRAMES (3)
+
+/**
+ * \brief HDR Irradiance Parameters
+ * \detail Currently HDR paramters are used only for testing purposes
+ */
+struct ia_css_hdr_irradiance_params {
+ int test_irr; /**< Test parameter */
+ int match_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching shift parameter */
+ int match_mul[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Histogram matching multiplication parameter */
+ int thr_low[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold low bound parameter */
+ int thr_high[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Weight map soft threshold high bound parameter */
+ int thr_coeff[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold linear function coefficien */
+ int thr_shift[IA_CSS_HDR_MAX_NUM_INPUT_FRAMES - 1]; /**< Soft threshold precision shift parameter */
+ int weight_bpp; /**< Weight map bits per pixel */
+};
+
+/**
+ * \brief HDR Deghosting Parameters
+ * \detail Currently HDR paramters are used only for testing purposes
+ */
+struct ia_css_hdr_deghost_params {
+ int test_deg; /**< Test parameter */
+};
+
+/**
+ * \brief HDR Exclusion Parameters
+ * \detail Currently HDR paramters are used only for testing purposes
+ */
+struct ia_css_hdr_exclusion_params {
+ int test_excl; /**< Test parameter */
+};
+
+/**
+ * \brief HDR public paramterers.
+ * \details Struct with all paramters for HDR that can be seet from
+ * the CSS API. Currenly, only test paramters are defined.
+ */
+struct ia_css_hdr_config {
+ struct ia_css_hdr_irradiance_params irradiance; /**< HDR irradiance paramaters */
+ struct ia_css_hdr_deghost_params deghost; /**< HDR deghosting parameters */
+ struct ia_css_hdr_exclusion_params exclusion; /**< HDR exclusion parameters */
+};
+
+#endif /* __IA_CSS_HDR_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c
new file mode 100644
index 000000000000..a31c9e828e22
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -0,0 +1,86 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_bayer_io.host.h"
+#include "dma.h"
+#include "math_support.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info;
+
+ const unsigned ddr_bits_per_element = sizeof(short) * 8;
+ const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element);
+ unsigned size_get = 0, size_put = 0;
+ unsigned offset = 0;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, in_frame_info);
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->info.res.width;
+ to->height = out_frames[0]->info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part leave:\n");
+#endif
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h
new file mode 100644
index 000000000000..7e5d4cfe3454
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h
@@ -0,0 +1,31 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __BAYER_IO_HOST_H
+#define __BAYER_IO_HOST_H
+
+#include "ia_css_bayer_io_param.h"
+#include "ia_css_bayer_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__BAYER_IO_HOST_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h
new file mode 100644
index 000000000000..7b6f581c4a80
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_param.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_PARAM
+#define __IA_CSS_BAYER_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_BAYER_IO_PARAM */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h
new file mode 100644
index 000000000000..2291b01452f8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io_types.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_BAYER_IO_TYPES_H
+#define __IA_CSS_BAYER_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_BAYER_IO_TYPES_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h
new file mode 100644
index 000000000000..f1ce03aa7951
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_param.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COMMON_IO_PARAM
+#define __IA_CSS_COMMON_IO_PARAM
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_COMMON_IO_PARAM */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h
new file mode 100644
index 000000000000..8a9a97063264
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/common/ia_css_common_io_types.h
@@ -0,0 +1,31 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_COMMON_IO_TYPES
+#define __IA_CSS_COMMON_IO_TYPES
+
+#define MAX_IO_DMA_CHANNELS 2
+
+struct ia_css_common_io_config {
+ unsigned base_address;
+ unsigned width;
+ unsigned height;
+ unsigned stride;
+ unsigned ddr_elems_per_word;
+ unsigned dma_channel[MAX_IO_DMA_CHANNELS];
+};
+
+#endif /* __IA_CSS_COMMON_IO_TYPES */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_param.h
new file mode 100644
index 000000000000..213ef3b385aa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_param.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PLANE_IO_PARAM_H
+#define __IA_CSS_PLANE_IO_PARAM_H
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_PLANE_IO_PARAM_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_types.h
new file mode 100644
index 000000000000..d635741505e2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/plane_io_ls/ia_css_plane_io_types.h
@@ -0,0 +1,30 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_PLANE_IO_TYPES_H
+#define __IA_CSS_PLANE_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#define PLANE_IO_LS_NUM_PLANES 3
+
+struct ia_css_plane_io_config {
+ struct ia_css_common_io_config get_plane_io_config[PLANE_IO_LS_NUM_PLANES];
+ struct ia_css_common_io_config put_plane_io_config[PLANE_IO_LS_NUM_PLANES];
+};
+
+#endif /* __IA_CSS_PLANE_IO_TYPES_H */
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h
new file mode 100644
index 000000000000..52450a9a55a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV420_IO_PARAM
+#define __IA_CSS_YUV420_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h
new file mode 100644
index 000000000000..99ec1143b214
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV420_IO_TYPES
+#define __IA_CSS_YUV420_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
new file mode 100644
index 000000000000..91fb5168c357
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV444_IO_PARAM
+#define __IA_CSS_YUV444_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
new file mode 100644
index 000000000000..dac440309394
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
@@ -0,0 +1,22 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV444_IO_TYPES
+#define __IA_CSS_YUV444_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
new file mode 100644
index 000000000000..78e159c04851
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
@@ -0,0 +1,86 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#include "ia_css_bayer_io.host.h"
+#include "dma.h"
+#include "math_support.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info;
+
+ const unsigned ddr_bits_per_element = sizeof(short) * 8;
+ const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element);
+ unsigned size_get = 0, size_put = 0;
+ unsigned offset = 0;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, in_frame_info);
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->info.res.width;
+ to->height = out_frames[0]->info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part leave:\n");
+#endif
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
new file mode 100644
index 000000000000..ab9fa31bfc5e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h
@@ -0,0 +1,31 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __BAYER_IO_HOST_H
+#define __BAYER_IO_HOST_H
+
+#include "ia_css_bayer_io_param.h"
+#include "ia_css_bayer_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+
+void
+ia_css_bayer_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__BAYER_IO_HOST_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
new file mode 100644
index 000000000000..bf5a3eccb330
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_param.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_BAYER_IO_PARAM
+#define __IA_CSS_BAYER_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_BAYER_IO_PARAM */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
new file mode 100644
index 000000000000..9e3c622db4d4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io_types.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_BAYER_IO_TYPES_H
+#define __IA_CSS_BAYER_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_BAYER_IO_TYPES_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
new file mode 100644
index 000000000000..e5fdcfff0cf7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_param.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_COMMON_IO_PARAM
+#define __IA_CSS_COMMON_IO_PARAM
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif /* __IA_CSS_COMMON_IO_PARAM */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
new file mode 100644
index 000000000000..0a19e2d1aff4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/common/ia_css_common_io_types.h
@@ -0,0 +1,31 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_COMMON_IO_TYPES
+#define __IA_CSS_COMMON_IO_TYPES
+
+#define MAX_IO_DMA_CHANNELS 3
+
+struct ia_css_common_io_config {
+ unsigned base_address;
+ unsigned width;
+ unsigned height;
+ unsigned stride;
+ unsigned ddr_elems_per_word;
+ unsigned dma_channel[MAX_IO_DMA_CHANNELS];
+};
+
+#endif /* __IA_CSS_COMMON_IO_TYPES */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_param.h
new file mode 100644
index 000000000000..881b7e5236dc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_param.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_PLANE_IO_PARAM_H
+#define __IA_CSS_PLANE_IO_PARAM_H
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif /* __IA_CSS_PLANE_IO_PARAM_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_types.h
new file mode 100644
index 000000000000..f4b9e8de3d8e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/plane_io_ls/ia_css_plane_io_types.h
@@ -0,0 +1,30 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_PLANE_IO_TYPES_H
+#define __IA_CSS_PLANE_IO_TYPES_H
+
+#include "../common/ia_css_common_io_types.h"
+
+#define PLANE_IO_LS_NUM_PLANES 3
+
+struct ia_css_plane_io_config {
+ struct ia_css_common_io_config get_plane_io_config[PLANE_IO_LS_NUM_PLANES];
+ struct ia_css_common_io_config put_plane_io_config[PLANE_IO_LS_NUM_PLANES];
+};
+
+#endif /* __IA_CSS_PLANE_IO_TYPES_H */
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h
new file mode 100644
index 000000000000..86184b545fed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_param.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV420_IO_PARAM
+#define __IA_CSS_YUV420_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h
new file mode 100644
index 000000000000..ad750f530013
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv420_io_ls/ia_css_yuv420_io_types.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV420_IO_TYPES
+#define __IA_CSS_YUV420_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
new file mode 100644
index 000000000000..f7e1a632c47e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
@@ -0,0 +1,86 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#include "ia_css_yuv444_io.host.h"
+#include "dma.h"
+#include "math_support.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "ia_css_isp_params.h"
+#include "ia_css_frame.h"
+
+void
+ia_css_yuv444_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args)
+{
+ const struct ia_css_frame *in_frame = args->in_frame;
+ const struct ia_css_frame **out_frames = (const struct ia_css_frame **)& args->out_frame;
+ const struct ia_css_frame_info *in_frame_info = (in_frame) ? &in_frame->info : &binary->in_frame_info;
+
+ const unsigned ddr_bits_per_element = sizeof(short) * 8;
+ const unsigned ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element);
+ unsigned size_get = 0, size_put = 0;
+ unsigned offset = 0;
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_get = binary->info->mem_offsets.offsets.param->dmem.get.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.get.offset;
+ }
+
+ if (size_get) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, in_frame_info);
+ // The base_address of the input frame will be set in the ISP
+ to->width = in_frame_info->res.width;
+ to->height = in_frame_info->res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part leave:\n");
+#endif
+ }
+
+ if (binary->info->mem_offsets.offsets.param) {
+ size_put = binary->info->mem_offsets.offsets.param->dmem.put.size;
+ offset = binary->info->mem_offsets.offsets.param->dmem.put.offset;
+ }
+
+ if (size_put) {
+ struct ia_css_common_io_config *to = (struct ia_css_common_io_config *)&binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
+ struct dma_port_config config;
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part enter:\n");
+#endif
+
+ ia_css_dma_configure_from_info(&config, &out_frames[0]->info);
+ to->base_address = out_frames[0]->data;
+ to->width = out_frames[0]->info.res.width;
+ to->height = out_frames[0]->info.res.height;
+ to->stride = config.stride;
+ to->ddr_elems_per_word = ddr_elems_per_word;
+
+#ifndef IA_CSS_NO_DEBUG
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part leave:\n");
+#endif
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
new file mode 100644
index 000000000000..480172d39aee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.h
@@ -0,0 +1,31 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __YUV444_IO_HOST_H
+#define __YUV444_IO_HOST_H
+
+#include "ia_css_yuv444_io_param.h"
+#include "ia_css_yuv444_io_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+
+
+void
+ia_css_yuv444_io_config(
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args);
+
+#endif /*__YUV44_IO_HOST_H */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
new file mode 100644
index 000000000000..cc8eda19c6e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_param.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV444_IO_PARAM
+#define __IA_CSS_YUV444_IO_PARAM
+
+#include "../common/ia_css_common_io_param.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
new file mode 100644
index 000000000000..343325a111e1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io_types.h
@@ -0,0 +1,22 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __IA_CSS_YUV444_IO_TYPES
+#define __IA_CSS_YUV444_IO_TYPES
+
+#include "../common/ia_css_common_io_types.h"
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
new file mode 100644
index 000000000000..9e41cc0a307f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_iterator.host.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+static const struct ia_css_iterator_configuration default_config = {
+ .input_info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned size)
+{
+ (void)size;
+ ia_css_frame_info_to_frame_sp_info(&to->input_info, from->input_info);
+ ia_css_frame_info_to_frame_sp_info(&to->internal_info, from->internal_info);
+ ia_css_frame_info_to_frame_sp_info(&to->output_info, from->output_info);
+ ia_css_frame_info_to_frame_sp_info(&to->vf_info, from->vf_info);
+ ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope);
+}
+
+enum ia_css_err
+ia_css_iterator_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info)
+{
+ struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_iterator_configuration config = default_config;
+
+ config.input_info = &binary->in_frame_info;
+ config.internal_info = &binary->internal_frame_info;
+ config.output_info = &binary->out_frame_info[0];
+ config.vf_info = &binary->vf_frame_info;
+ config.dvs_envelope = &binary->dvs_envelope;
+
+ /* Use in_info iso binary->in_frame_info.
+ * They can differ in padded width in case of scaling, e.g. for capture_pp.
+ * Find out why.
+ */
+ if (in_info)
+ config.input_info = in_info;
+ if (binary->out_frame_info[0].res.width == 0)
+ config.output_info = &binary->out_frame_info[1];
+ my_info = *config.output_info;
+ config.output_info = &my_info;
+ /* we do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ binary->vf_downscale_log2 > 0) {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info files properly */
+ my_info.padded_width <<= binary->vf_downscale_log2;
+ my_info.res.width <<= binary->vf_downscale_log2;
+ my_info.res.height <<= binary->vf_downscale_log2;
+ }
+
+ ia_css_configure_iterator(binary, &config);
+
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
new file mode 100644
index 000000000000..d8f249c5a53b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ITERATOR_HOST_H
+#define __IA_CSS_ITERATOR_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+#include "ia_css_err.h"
+#include "ia_css_iterator_param.h"
+
+void
+ia_css_iterator_config(
+ struct sh_css_isp_iterator_isp_config *to,
+ const struct ia_css_iterator_configuration *from,
+ unsigned size);
+
+enum ia_css_err
+ia_css_iterator_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info);
+
+#endif /* __IA_CSS_ITERATOR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
new file mode 100644
index 000000000000..d308126e41d3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/iterator/iterator_1.0/ia_css_iterator_param.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_ITERATOR_PARAM_H
+#define __IA_CSS_ITERATOR_PARAM_H
+
+#include "ia_css_types.h" /* ia_css_resolution */
+#include "ia_css_frame_public.h" /* ia_css_frame_info */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+struct ia_css_iterator_configuration {
+ const struct ia_css_frame_info *input_info;
+ const struct ia_css_frame_info *internal_info;
+ const struct ia_css_frame_info *output_info;
+ const struct ia_css_frame_info *vf_info;
+ const struct ia_css_resolution *dvs_envelope;
+};
+
+struct sh_css_isp_iterator_isp_config {
+ struct ia_css_frame_sp_info input_info;
+ struct ia_css_frame_sp_info internal_info;
+ struct ia_css_frame_sp_info output_info;
+ struct ia_css_frame_sp_info vf_info;
+ struct ia_css_sp_resolution dvs_envelope;
+};
+
+#endif /* __IA_CSS_ITERATOR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
new file mode 100644
index 000000000000..5ddf61fc95fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+
+#ifndef IA_CSS_NO_DEBUG
+/* FIXME: See BZ 4427 */
+#include "ia_css_debug.h"
+#endif
+
+#include "ia_css_macc1_5.host.h"
+
+const struct ia_css_macc1_5_config default_macc1_5_config = {
+ 1
+};
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size)
+{
+ unsigned int i, j, k, idx;
+ unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8};
+
+ (void)size;
+
+ for (k = 0; k < 4; k++)
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = idx_map[i] + (k * IA_CSS_MACC_NUM_AXES);
+ j = 4 * i;
+
+ params->data[0][(idx)] = from->data[j];
+ params->data[1][(idx)] = from->data[j + 1];
+ params->data[2][(idx)] = from->data[j + 2];
+ params->data[3][(idx)] = from->data[j + 3];
+ }
+
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
new file mode 100644
index 000000000000..53ef18f7e912
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_HOST_H
+#define __IA_CSS_MACC1_5_HOST_H
+
+#include "ia_css_macc1_5_param.h"
+#include "ia_css_macc1_5_table.host.h"
+
+extern const struct ia_css_macc1_5_config default_macc1_5_config;
+
+void
+ia_css_macc1_5_encode(
+ struct sh_css_isp_macc1_5_params *to,
+ const struct ia_css_macc1_5_config *from,
+ unsigned int size);
+
+void
+ia_css_macc1_5_vmem_encode(
+ struct sh_css_isp_macc1_5_vmem_params *params,
+ const struct ia_css_macc1_5_table *from,
+ unsigned int size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_macc1_5_debug_dtrace(
+ const struct ia_css_macc1_5_config *config,
+ unsigned int level);
+#endif
+#endif /* __IA_CSS_MACC1_5_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
new file mode 100644
index 000000000000..41a2da460dcf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_param.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_PARAM_H
+#define __IA_CSS_MACC1_5_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+#include "ia_css_macc1_5_types.h"
+
+/* MACC */
+struct sh_css_isp_macc1_5_params {
+ int32_t exp;
+};
+
+struct sh_css_isp_macc1_5_vmem_params {
+ VMEM_ARRAY(data, IA_CSS_MACC_NUM_COEFS*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_MACC1_5_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
new file mode 100644
index 000000000000..89714bf87b52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc1_5_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ */
+const struct ia_css_macc1_5_table default_macc1_5_table = {
+ { 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096 }
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
new file mode 100644
index 000000000000..10a50aa82be8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TABLE_HOST_H
+#define __IA_CSS_MACC1_5_TABLE_HOST_H
+
+#include "macc/macc1_5/ia_css_macc1_5_types.h"
+
+extern const struct ia_css_macc1_5_table default_macc1_5_table;
+
+#endif /* __IA_CSS_MACC1_5_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
new file mode 100644
index 000000000000..3d510bf5886a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc1_5/ia_css_macc1_5_types.h
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC1_5_TYPES_H
+#define __IA_CSS_MACC1_5_TYPES_H
+
+/** @file
+* CSS-API header file for Multi-Axis Color Conversion algorithm parameters.
+*/
+
+/** Multi-Axis Color Conversion configuration
+ *
+ * ISP2.6.1: MACC1_5 is used.
+ */
+
+
+/** Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/** Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+
+/** Multi-Axes Color Correction (MACC) table.
+ *
+ * ISP block: MACC (MACC by only matrix)
+ * MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC is used.
+ * ISP2: MACC1_5 is used.
+ *
+ * [MACC]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC1_5]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+struct ia_css_macc1_5_table {
+ int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /**< 16 of 2x2 matix
+ MACC1_5: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+/** Multi-Axes Color Correction (MACC) configuration.
+ *
+ * ISP block: MACC1_5 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP2: MACC1_5 is used.
+ */
+struct ia_css_macc1_5_config {
+ uint8_t exp; /**< Common exponent of ia_css_macc_table.
+ u8.0, [0,13], default 1, ineffective 1 */
+};
+
+#endif /* __IA_CSS_MACC1_5_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
new file mode 100644
index 000000000000..1f7e9e4eec3c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_macc.host.h"
+
+const struct ia_css_macc_config default_macc_config = {
+ 1,
+};
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->exp = from->exp;
+}
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.exp=%d\n",
+ config->exp);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
new file mode 100644
index 000000000000..044b01d38ad6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc.host.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_HOST_H
+#define __IA_CSS_MACC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_macc_param.h"
+#include "ia_css_macc_table.host.h"
+
+extern const struct ia_css_macc_config default_macc_config;
+
+void
+ia_css_macc_encode(
+ struct sh_css_isp_macc_params *to,
+ const struct ia_css_macc_config *from,
+ unsigned size);
+
+
+void
+ia_css_macc_dump(
+ const struct sh_css_isp_macc_params *macc,
+ unsigned level);
+
+void
+ia_css_macc_debug_dtrace(
+ const struct ia_css_macc_config *config,
+ unsigned level);
+
+#endif /* __IA_CSS_MACC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
new file mode 100644
index 000000000000..6a12b922c485
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_param.h
@@ -0,0 +1,25 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_PARAM_H
+#define __IA_CSS_MACC_PARAM_H
+
+#include "type_support.h"
+
+/* MACC */
+struct sh_css_isp_macc_params {
+ int32_t exp;
+};
+
+#endif /* __IA_CSS_MACC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
new file mode 100644
index 000000000000..8a6c3cafabdc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_global.h"
+#include "ia_css_types.h"
+#include "ia_css_macc_table.host.h"
+
+/* Multi-Axes Color Correction table for ISP1.
+ * 64values = 2x2matrix for 16area, [s2.13]
+ * ineffective: 16 of "identity 2x2 matix" {8192,0,0,8192}
+ */
+const struct ia_css_macc_table default_macc_table = {
+ { 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192,
+ 8192, 0, 0, 8192, 8192, 0, 0, 8192 }
+};
+
+/* Multi-Axes Color Correction table for ISP2.
+ * 64values = 2x2matrix for 16area, [s1.12]
+ * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096}
+ */
+const struct ia_css_macc_table default_macc2_table = {
+ { 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096,
+ 4096, 0, 0, 4096, 4096, 0, 0, 4096 }
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
new file mode 100644
index 000000000000..96d62c9912b8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_TABLE_HOST_H
+#define __IA_CSS_MACC_TABLE_HOST_H
+
+#include "ia_css_macc_types.h"
+
+extern const struct ia_css_macc_table default_macc_table;
+extern const struct ia_css_macc_table default_macc2_table;
+
+#endif /* __IA_CSS_MACC_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
new file mode 100644
index 000000000000..a25581c6f3ac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/macc/macc_1.0/ia_css_macc_types.h
@@ -0,0 +1,63 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_MACC_TYPES_H
+#define __IA_CSS_MACC_TYPES_H
+
+/** @file
+* CSS-API header file for Multi-Axis Color Correction (MACC) parameters.
+*/
+
+/** Number of axes in the MACC table. */
+#define IA_CSS_MACC_NUM_AXES 16
+/** Number of coefficients per MACC axes. */
+#define IA_CSS_MACC_NUM_COEFS 4
+/** The number of planes in the morphing table. */
+
+/** Multi-Axis Color Correction (MACC) table.
+ *
+ * ISP block: MACC1 (MACC by only matrix)
+ * MACC2 (MACC by matrix and exponent(ia_css_macc_config))
+ * ISP1: MACC1 is used.
+ * ISP2: MACC2 is used.
+ *
+ * [MACC1]
+ * OutU = (data00 * InU + data01 * InV) >> 13
+ * OutV = (data10 * InU + data11 * InV) >> 13
+ *
+ * default/ineffective:
+ * OutU = (8192 * InU + 0 * InV) >> 13
+ * OutV = ( 0 * InU + 8192 * InV) >> 13
+ *
+ * [MACC2]
+ * OutU = (data00 * InU + data01 * InV) >> (13 - exp)
+ * OutV = (data10 * InU + data11 * InV) >> (13 - exp)
+ *
+ * default/ineffective: (exp=1)
+ * OutU = (4096 * InU + 0 * InV) >> (13 - 1)
+ * OutV = ( 0 * InU + 4096 * InV) >> (13 - 1)
+ */
+
+struct ia_css_macc_table {
+ int16_t data[IA_CSS_MACC_NUM_COEFS * IA_CSS_MACC_NUM_AXES];
+ /**< 16 of 2x2 matix
+ MACC1: s2.13, [-65536,65535]
+ default/ineffective:
+ 16 of "identity 2x2 matix" {8192,0,0,8192}
+ MACC2: s[macc_config.exp].[13-macc_config.exp], [-8192,8191]
+ default/ineffective: (s1.12)
+ 16 of "identity 2x2 matix" {4096,0,0,4096} */
+};
+
+#endif /* __IA_CSS_MACC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
new file mode 100644
index 000000000000..2c2c5a5854a0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_norm.host.h"
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
new file mode 100644
index 000000000000..42b5143ef78f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm.host.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_NORM_HOST_H
+#define __IA_CSS_NORM_HOST_H
+
+#include "ia_css_norm_param.h"
+
+#endif /* __IA_CSS_NORM_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
new file mode 100644
index 000000000000..85dc6fc0a56b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_param.h
@@ -0,0 +1,19 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_NORM_PARAM_H
+#define __IA_CSS_NORM_PARAM_H
+
+
+#endif /* __IA_CSS_NORM_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_types.h
new file mode 100644
index 000000000000..5581bddf9f9b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/norm/norm_1.0/ia_css_norm_types.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_NORM_TYPES_H
+#define __IA_CSS_NORM_TYPES_H
+
+
+#endif /* __IA_CSS_NORM_TYPES_H */
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c
new file mode 100644
index 000000000000..f77aff13f8e3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.c
@@ -0,0 +1,79 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "sh_css_frac.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "isp.h"
+#include "ia_css_ob2.host.h"
+
+const struct ia_css_ob2_config default_ob2_config = {
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned size)
+{
+ (void)size;
+
+ /* Blacklevels types are u0_16 */
+ to->blacklevel_gr = uDIGIT_FITTING(from->level_gr, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_r = uDIGIT_FITTING(from->level_r, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_b = uDIGIT_FITTING(from->level_b, 16, SH_CSS_BAYER_BITS);
+ to->blacklevel_gb = uDIGIT_FITTING(from->level_gb, 16, SH_CSS_BAYER_BITS);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned level)
+{
+ if (!ob2)
+ return;
+
+ ia_css_debug_dtrace(level, "Optical Black 2:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gr", ob2->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_r", ob2->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_b", ob2->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob2_blacklevel_gb", ob2->blacklevel_gb);
+
+}
+
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.level_gr=%d, config.level_r=%d, "
+ "config.level_b=%d, config.level_gb=%d, ",
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb);
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h
new file mode 100644
index 000000000000..06846502eca3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2.host.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_HOST_H
+#define __IA_CSS_OB2_HOST_H
+
+#include "ia_css_ob2_types.h"
+#include "ia_css_ob2_param.h"
+
+extern const struct ia_css_ob2_config default_ob2_config;
+
+void
+ia_css_ob2_encode(
+ struct sh_css_isp_ob2_params *to,
+ const struct ia_css_ob2_config *from,
+ unsigned size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ob2_dump(
+ const struct sh_css_isp_ob2_params *ob2,
+ unsigned level);
+
+void
+ia_css_ob2_debug_dtrace(
+ const struct ia_css_ob2_config *config, unsigned level);
+#endif
+
+#endif /* __IA_CSS_OB2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h
new file mode 100644
index 000000000000..5c21d6a3911b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_param.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_PARAM_H
+#define __IA_CSS_OB2_PARAM_H
+
+#include "type_support.h"
+
+
+/* OB2 (Optical Black) */
+struct sh_css_isp_ob2_params {
+ int32_t blacklevel_gr;
+ int32_t blacklevel_r;
+ int32_t blacklevel_b;
+ int32_t blacklevel_gb;
+};
+
+#endif /* __IA_CSS_OB2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
new file mode 100644
index 000000000000..eeaadfeb5a1e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob2/ia_css_ob2_types.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB2_TYPES_H
+#define __IA_CSS_OB2_TYPES_H
+
+/** @file
+* CSS-API header file for Optical Black algorithm parameters.
+*/
+
+/** Optical Black configuration
+ *
+ * ISP2.6.1: OB2 is used.
+ */
+
+#include "ia_css_frac.h"
+
+struct ia_css_ob2_config {
+ ia_css_u0_16 level_gr; /**< Black level for GR pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /**< Black level for R pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /**< Black level for B pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /**< Black level for GB pixels.
+ u0.16, [0,65535],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
new file mode 100644
index 000000000000..fd891ac092ed
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
@@ -0,0 +1,159 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "isp.h"
+
+#include "ia_css_ob.host.h"
+
+const struct ia_css_ob_config default_ob_config = {
+ IA_CSS_OB_MODE_NONE,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+};
+
+/* TODO: include ob.isp.h to get isp knowledge and
+ add assert on platform restrictions */
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ config->isp_pipe_version = isp_pipe_version;
+ config->raw_bit_depth = raw_bit_depth;
+}
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned size)
+{
+ unsigned int ob_bit_depth
+ = config->isp_pipe_version == 2 ? SH_CSS_BAYER_BITS : config->raw_bit_depth;
+ unsigned int scale = 16 - ob_bit_depth;
+
+ (void)size;
+ switch (from->mode) {
+ case IA_CSS_OB_MODE_FIXED:
+ to->blacklevel_gr = from->level_gr >> scale;
+ to->blacklevel_r = from->level_r >> scale;
+ to->blacklevel_b = from->level_b >> scale;
+ to->blacklevel_gb = from->level_gb >> scale;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ case IA_CSS_OB_MODE_RASTER:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = from->start_position;
+ to->area_length_bq =
+ (from->end_position - from->start_position) + 1;
+ to->area_length_bq_inverse = AREA_LENGTH_UNIT / to->area_length_bq;
+ break;
+ default:
+ to->blacklevel_gr = 0;
+ to->blacklevel_r = 0;
+ to->blacklevel_b = 0;
+ to->blacklevel_gb = 0;
+ to->area_start_bq = 0;
+ to->area_length_bq = 0;
+ to->area_length_bq_inverse = 0;
+ break;
+ }
+}
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned size)
+{
+ struct sh_css_isp_ob_params tmp;
+ struct sh_css_isp_ob_params *ob = &tmp;
+
+ (void)size;
+ ia_css_ob_encode(&tmp, from, config, sizeof(tmp));
+
+ {
+ unsigned i;
+ unsigned sp_obarea_start_bq = ob->area_start_bq;
+ unsigned sp_obarea_length_bq = ob->area_length_bq;
+ unsigned low = sp_obarea_start_bq;
+ unsigned high = low + sp_obarea_length_bq;
+ uint16_t all_ones = ~0;
+
+ for (i = 0; i < OBAREA_MASK_SIZE; i++) {
+ if (i >= low && i < high)
+ to->vmask[i/ISP_VEC_NELEMS][i%ISP_VEC_NELEMS] = all_ones;
+ else
+ to->vmask[i/ISP_VEC_NELEMS][i%ISP_VEC_NELEMS] = 0;
+ }
+ }
+}
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned level)
+{
+ if (!ob) return;
+ ia_css_debug_dtrace(level, "Optical Black:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gr", ob->blacklevel_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_r", ob->blacklevel_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_b", ob->blacklevel_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ob_blacklevel_gb", ob->blacklevel_gb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_start_bq", ob->area_start_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq", ob->area_length_bq);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "obarea_length_bq_inverse",
+ ob->area_length_bq_inverse);
+}
+
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.mode=%d, "
+ "config.level_gr=%d, config.level_r=%d, "
+ "config.level_b=%d, config.level_gb=%d, "
+ "config.start_position=%d, config.end_position=%d\n",
+ config->mode,
+ config->level_gr, config->level_r,
+ config->level_b, config->level_gb,
+ config->start_position, config->end_position);
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
new file mode 100644
index 000000000000..4af181470f8d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob.host.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_HOST_H
+#define __IA_CSS_OB_HOST_H
+
+#include "ia_css_ob_types.h"
+#include "ia_css_ob_param.h"
+
+extern const struct ia_css_ob_config default_ob_config;
+
+void
+ia_css_ob_configure(
+ struct sh_css_isp_ob_stream_config *config,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth);
+
+void
+ia_css_ob_encode(
+ struct sh_css_isp_ob_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned size);
+
+void
+ia_css_ob_vmem_encode(
+ struct sh_css_isp_ob_vmem_params *to,
+ const struct ia_css_ob_config *from,
+ const struct sh_css_isp_ob_stream_config *config,
+ unsigned size);
+
+void
+ia_css_ob_dump(
+ const struct sh_css_isp_ob_params *ob,
+ unsigned level);
+
+void
+ia_css_ob_debug_dtrace(
+ const struct ia_css_ob_config *config, unsigned level)
+;
+
+#endif /* __IA_CSS_OB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
new file mode 100644
index 000000000000..a60a644bb4ff
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_param.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_PARAM_H
+#define __IA_CSS_OB_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+#define OBAREA_MASK_SIZE 64
+#define OBAREA_LENGTHBQ_INVERSE_SHIFT 12
+
+/* AREA_LENGTH_UNIT is dependent on NWAY, requires rewrite */
+#define AREA_LENGTH_UNIT (1<<12)
+
+
+/* OB (Optical Black) */
+struct sh_css_isp_ob_stream_config {
+ unsigned isp_pipe_version;
+ unsigned raw_bit_depth;
+};
+
+struct sh_css_isp_ob_params {
+ int32_t blacklevel_gr;
+ int32_t blacklevel_r;
+ int32_t blacklevel_b;
+ int32_t blacklevel_gb;
+ int32_t area_start_bq;
+ int32_t area_length_bq;
+ int32_t area_length_bq_inverse;
+};
+
+struct sh_css_isp_ob_vmem_params {
+ VMEM_ARRAY(vmask, OBAREA_MASK_SIZE);
+};
+
+#endif /* __IA_CSS_OB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
new file mode 100644
index 000000000000..88459b6c003d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ob/ob_1.0/ia_css_ob_types.h
@@ -0,0 +1,69 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OB_TYPES_H
+#define __IA_CSS_OB_TYPES_H
+
+/** @file
+* CSS-API header file for Optical Black level parameters.
+*/
+
+#include "ia_css_frac.h"
+
+/** Optical black mode.
+ */
+enum ia_css_ob_mode {
+ IA_CSS_OB_MODE_NONE, /**< OB has no effect. */
+ IA_CSS_OB_MODE_FIXED, /**< Fixed OB */
+ IA_CSS_OB_MODE_RASTER /**< Raster OB */
+};
+
+/** Optical Black level configuration.
+ *
+ * ISP block: OB1
+ * ISP1: OB1 is used.
+ * ISP2: OB1 is used.
+ */
+struct ia_css_ob_config {
+ enum ia_css_ob_mode mode; /**< Mode (None / Fixed / Raster).
+ enum, [0,2],
+ default 1, ineffective 0 */
+ ia_css_u0_16 level_gr; /**< Black level for GR pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_r; /**< Black level for R pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_b; /**< Black level for B pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ ia_css_u0_16 level_gb; /**< Black level for GB pixels
+ (used for Fixed Mode only).
+ u0.16, [0,65535],
+ default/ineffective 0 */
+ uint16_t start_position; /**< Start position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+ uint16_t end_position; /**< End position of OB area
+ (used for Raster Mode only).
+ u16.0, [0,63],
+ default/ineffective 0 */
+};
+
+#endif /* __IA_CSS_OB_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c
new file mode 100644
index 000000000000..8fdf47c9310c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.c
@@ -0,0 +1,162 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_debug.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "ia_css_output.host.h"
+#include "isp.h"
+
+#include "assert_support.h"
+
+const struct ia_css_output_config default_output_config = {
+ 0,
+ 0
+};
+
+static const struct ia_css_output_configuration default_output_configuration = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output0_configuration default_output0_configuration = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+static const struct ia_css_output1_configuration default_output1_configuration = {
+ .info = (struct ia_css_frame_info *)NULL,
+};
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->enable_hflip = from->enable_hflip;
+ to->enable_vflip = from->enable_vflip;
+}
+
+void
+ia_css_output_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->height = from->info->res.height;
+ to->enable = from->info != NULL;
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_output0_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned size)
+{
+ ia_css_output_config (
+ to, (const struct ia_css_output_configuration *)from, size);
+}
+
+void
+ia_css_output1_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned size)
+{
+ ia_css_output_config (
+ to, (const struct ia_css_output_configuration *)from, size);
+}
+
+void
+ia_css_output_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (NULL != info) {
+ struct ia_css_output_configuration config =
+ default_output_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output(binary, &config);
+ }
+}
+
+void
+ia_css_output0_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ if (NULL != info) {
+ struct ia_css_output0_configuration config =
+ default_output0_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output0(binary, &config);
+ }
+}
+
+void
+ia_css_output1_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+
+ if (NULL != info) {
+ struct ia_css_output1_configuration config =
+ default_output1_configuration;
+
+ config.info = info;
+
+ ia_css_configure_output1(binary, &config);
+ }
+}
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned level)
+{
+ if (!output) return;
+ ia_css_debug_dtrace(level, "Horizontal Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_hflip);
+ ia_css_debug_dtrace(level, "Vertical Output Flip:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "enable", output->enable_vflip);
+}
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.enable_hflip=%d",
+ config->enable_hflip);
+ ia_css_debug_dtrace(level,
+ "config.enable_vflip=%d",
+ config->enable_vflip);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h
new file mode 100644
index 000000000000..530f934ce81e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output.host.h
@@ -0,0 +1,75 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_HOST_H
+#define __IA_CSS_OUTPUT_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_output_types.h"
+#include "ia_css_output_param.h"
+
+extern const struct ia_css_output_config default_output_config;
+
+void
+ia_css_output_encode(
+ struct sh_css_isp_output_params *to,
+ const struct ia_css_output_config *from,
+ unsigned size);
+
+void
+ia_css_output_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output_configuration *from,
+ unsigned size);
+
+void
+ia_css_output0_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output0_configuration *from,
+ unsigned size);
+
+void
+ia_css_output1_config(
+ struct sh_css_isp_output_isp_config *to,
+ const struct ia_css_output1_configuration *from,
+ unsigned size);
+
+void
+ia_css_output_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output0_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output1_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+void
+ia_css_output_dump(
+ const struct sh_css_isp_output_params *output,
+ unsigned level);
+
+void
+ia_css_output_debug_dtrace(
+ const struct ia_css_output_config *config,
+ unsigned level);
+
+#endif /* __IA_CSS_OUTPUT_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
new file mode 100644
index 000000000000..26ec27e085c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_PARAM_H
+#define __IA_CSS_OUTPUT_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+
+/** output frame */
+struct sh_css_isp_output_isp_config {
+ uint32_t width_a_over_b;
+ uint32_t height;
+ uint32_t enable;
+ struct ia_css_frame_sp_info info;
+ struct dma_port_config port_b;
+};
+
+struct sh_css_isp_output_params {
+ uint8_t enable_hflip;
+ uint8_t enable_vflip;
+};
+
+#endif /* __IA_CSS_OUTPUT_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
new file mode 100644
index 000000000000..4335ac28b31d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/output/output_1.0/ia_css_output_types.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_OUTPUT_TYPES_H
+#define __IA_CSS_OUTPUT_TYPES_H
+
+/** @file
+* CSS-API header file for parameters of output frames.
+*/
+
+/** Output frame
+ *
+ * ISP block: output frame
+ */
+
+//#include "ia_css_frame_public.h"
+struct ia_css_frame_info;
+
+struct ia_css_output_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output0_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output1_configuration {
+ const struct ia_css_frame_info *info;
+};
+
+struct ia_css_output_config {
+ uint8_t enable_hflip; /**< enable horizontal output mirroring */
+ uint8_t enable_vflip; /**< enable vertical output mirroring */
+};
+
+#endif /* __IA_CSS_OUTPUT_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
new file mode 100644
index 000000000000..d1fb4b116003
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
@@ -0,0 +1,61 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_qplane.host.h"
+
+static const struct ia_css_qplane_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+void
+ia_css_qplane_config(
+ struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, from->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = from->info->format;
+}
+
+void
+ia_css_qplane_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *info)
+{
+ struct ia_css_qplane_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.info = info;
+
+ ia_css_configure_qplane(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
new file mode 100644
index 000000000000..c41e9e5e0fd7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane.host.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_HOST_H
+#define __IA_CSS_QPLANE_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#if 0
+/* Cannot be included, since sh_css_internal.h is too generic
+ * e.g. for FW generation.
+*/
+#include "sh_css_internal.h" /* sh_css_sp_pipeline */
+#endif
+
+#include "ia_css_qplane_types.h"
+#include "ia_css_qplane_param.h"
+
+void
+ia_css_qplane_config(
+ struct sh_css_isp_qplane_isp_config *to,
+ const struct ia_css_qplane_configuration *from,
+ unsigned size);
+
+void
+ia_css_qplane_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *from);
+
+#endif /* __IA_CSS_QPLANE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
new file mode 100644
index 000000000000..5885f621de88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_param.h
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_PARAM_H
+#define __IA_CSS_QPLANE_PARAM_H
+
+#include <type_support.h>
+#include "dma.h"
+
+/* qplane channel */
+struct sh_css_isp_qplane_isp_config {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+ uint32_t inout_port_config;
+ uint32_t input_needs_raw_binning;
+ uint32_t format; /* enum ia_css_frame_format */
+};
+
+#endif /* __IA_CSS_QPLANE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
new file mode 100644
index 000000000000..955fd472a241
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/qplane/qplane_2/ia_css_qplane_types.h
@@ -0,0 +1,33 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_QPLANE_TYPES_H
+#define __IA_CSS_QPLANE_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/** qplane frame
+ *
+ * ISP block: qplane frame
+ */
+
+
+struct ia_css_qplane_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_QPLANE_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
new file mode 100644
index 000000000000..68a27f0cfba0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
@@ -0,0 +1,136 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_frame.h"
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "isp/modes/interface/isp_types.h"
+
+#include "ia_css_raw.host.h"
+
+
+static const struct ia_css_raw_configuration default_config = {
+ .pipe = (struct sh_css_sp_pipeline *)NULL,
+};
+
+static inline unsigned
+sh_css_elems_bytes_from_info (unsigned raw_bit_depth)
+{
+ return CEIL_DIV(raw_bit_depth,8);
+}
+
+/* MW: These areMIPI / ISYS properties, not camera function properties */
+static enum sh_stream_format
+css2isp_stream_format(enum ia_css_stream_format from)
+{
+ switch (from) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ return sh_stream_format_yuv420_legacy;
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ return sh_stream_format_yuv420;
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ return sh_stream_format_yuv422;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ return sh_stream_format_rgb;
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ return sh_stream_format_raw;
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ default:
+ return sh_stream_format_raw;
+ }
+}
+
+void
+ia_css_raw_config(
+ struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+ const struct ia_css_frame_info *in_info = from->in_info;
+ const struct ia_css_frame_info *internal_info = from->internal_info;
+
+ (void)size;
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* 2401 input system uses input width width */
+ in_info = internal_info;
+#else
+ /*in some cases, in_info is NULL*/
+ if (in_info)
+ (void)internal_info;
+ else
+ in_info = internal_info;
+
+#endif
+ ia_css_dma_configure_from_info(&to->port_b, in_info);
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) ||
+ (elems_a % to->port_b.elems == 0));
+
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->inout_port_config = from->pipe->inout_port_config;
+ to->format = in_info->format;
+ to->required_bds_factor = from->pipe->required_bds_factor;
+ to->two_ppc = from->two_ppc;
+ to->stream_format = css2isp_stream_format(from->stream_format);
+ to->deinterleaved = from->deinterleaved;
+#if (defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(CONFIG_CSI2_PLUS))
+ to->start_column = in_info->crop_info.start_column;
+ to->start_line = in_info->crop_info.start_line;
+ to->enable_left_padding = from->enable_left_padding;
+#endif
+}
+
+void
+ia_css_raw_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved)
+{
+ uint8_t enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0);
+ struct ia_css_raw_configuration config = default_config;
+
+ config.pipe = pipe;
+ config.in_info = in_info;
+ config.internal_info = internal_info;
+ config.two_ppc = two_ppc;
+ config.stream_format = binary->input_format;
+ config.deinterleaved = deinterleaved;
+ config.enable_left_padding = enable_left_padding;
+
+ ia_css_configure_raw(binary, &config);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
new file mode 100644
index 000000000000..ac6b7f6b59c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw.host.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_HOST_H
+#define __IA_CSS_RAW_HOST_H
+
+#include "ia_css_binary.h"
+
+#include "ia_css_raw_types.h"
+#include "ia_css_raw_param.h"
+
+void
+ia_css_raw_config(
+ struct sh_css_isp_raw_isp_config *to,
+ const struct ia_css_raw_configuration *from,
+ unsigned size);
+
+void
+ia_css_raw_configure(
+ const struct sh_css_sp_pipeline *pipe,
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *internal_info,
+ bool two_ppc,
+ bool deinterleaved);
+
+#endif /* __IA_CSS_RAW_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
new file mode 100644
index 000000000000..12168b2dec2d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_param.h
@@ -0,0 +1,38 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_PARAM_H
+#define __IA_CSS_RAW_PARAM_H
+
+#include "type_support.h"
+
+#include "dma.h"
+
+/* Raw channel */
+struct sh_css_isp_raw_isp_config {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+ uint32_t inout_port_config;
+ uint32_t input_needs_raw_binning;
+ uint32_t format; /* enum ia_css_frame_format */
+ uint32_t required_bds_factor;
+ uint32_t two_ppc;
+ uint32_t stream_format; /* enum sh_stream_format */
+ uint32_t deinterleaved;
+ uint32_t start_column; /*left crop offset*/
+ uint32_t start_line; /*top crop offset*/
+ uint8_t enable_left_padding; /*need this for multiple binary case*/
+};
+
+#endif /* __IA_CSS_RAW_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
new file mode 100644
index 000000000000..54f8c299d227
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw/raw_1.0/ia_css_raw_types.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAW_TYPES_H
+#define __IA_CSS_RAW_TYPES_H
+
+#include <ia_css_frame_public.h>
+#include "sh_css_internal.h"
+
+/** Raw frame
+ *
+ * ISP block: Raw frame
+ */
+
+struct ia_css_raw_configuration {
+ const struct sh_css_sp_pipeline *pipe;
+ const struct ia_css_frame_info *in_info;
+ const struct ia_css_frame_info *internal_info;
+ bool two_ppc;
+ enum ia_css_stream_format stream_format;
+ bool deinterleaved;
+ uint8_t enable_left_padding;
+};
+
+#endif /* __IA_CSS_RAW_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
new file mode 100644
index 000000000000..92168211683d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
@@ -0,0 +1,35 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#if !defined(HAS_NO_HMEM)
+
+#include "memory_access.h"
+#include "ia_css_types.h"
+#include "sh_css_internal.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_raa.host.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned size)
+{
+ (void)size;
+ (void)to;
+ (void)from;
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
new file mode 100644
index 000000000000..b4f245c19f18
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h
@@ -0,0 +1,27 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_RAA_HOST_H
+#define __IA_CSS_RAA_HOST_H
+
+#include "aa/aa_2/ia_css_aa2_types.h"
+#include "aa/aa_2/ia_css_aa2_param.h"
+
+void
+ia_css_raa_encode(
+ struct sh_css_isp_aa_params *to,
+ const struct ia_css_aa_config *from,
+ unsigned size);
+
+#endif /* __IA_CSS_RAA_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
new file mode 100644
index 000000000000..4c0ed5d4d971
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
@@ -0,0 +1,74 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_frame.h>
+#include <ia_css_binary.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+#include "ia_css_ref.host.h"
+
+void
+ia_css_ref_config(
+ struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS, i;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, &(from->ref_frames[0]->info));
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->dvs_frame_delay = from->dvs_frame_delay;
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (from->ref_frames[i]) {
+ to->ref_frame_addr_y[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.y.offset;
+ to->ref_frame_addr_c[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.u.offset;
+ } else {
+ to->ref_frame_addr_y[i] = 0;
+ to->ref_frame_addr_c[i] = 0;
+ }
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_ref_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **ref_frames,
+ const uint32_t dvs_frame_delay)
+{
+ struct ia_css_ref_configuration config;
+ unsigned i;
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ config.ref_frames[i] = ref_frames[i];
+ config.dvs_frame_delay = dvs_frame_delay;
+ ia_css_configure_ref(binary, &config);
+}
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned size)
+{
+ (void)size;
+ assert(MAX_NUM_VIDEO_DELAY_FRAMES >= 2);
+ state->ref_in_buf_idx = 0;
+ state->ref_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
new file mode 100644
index 000000000000..3c6d728d49ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref.host.h
@@ -0,0 +1,41 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_HOST_H
+#define __IA_CSS_REF_HOST_H
+
+#include <ia_css_frame_public.h>
+#include <ia_css_binary.h>
+
+#include "ia_css_ref_types.h"
+#include "ia_css_ref_param.h"
+#include "ia_css_ref_state.h"
+
+void
+ia_css_ref_config(
+ struct sh_css_isp_ref_isp_config *to,
+ const struct ia_css_ref_configuration *from,
+ unsigned size);
+
+void
+ia_css_ref_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **ref_frames,
+ const uint32_t dvs_frame_delay);
+
+void
+ia_css_init_ref_state(
+ struct sh_css_isp_ref_dmem_state *state,
+ unsigned size);
+#endif /* __IA_CSS_REF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
new file mode 100644
index 000000000000..1f1b72a417d1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_param.h
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_PARAM_H
+#define __IA_CSS_REF_PARAM_H
+
+#include <type_support.h>
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/** Reference frame */
+struct ia_css_ref_configuration {
+ const struct ia_css_frame *ref_frames[MAX_NUM_VIDEO_DELAY_FRAMES];
+ uint32_t dvs_frame_delay;
+};
+
+struct sh_css_isp_ref_isp_config {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+ hrt_vaddress ref_frame_addr_y[MAX_NUM_VIDEO_DELAY_FRAMES];
+ hrt_vaddress ref_frame_addr_c[MAX_NUM_VIDEO_DELAY_FRAMES];
+ uint32_t dvs_frame_delay;
+};
+
+#endif /* __IA_CSS_REF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
new file mode 100644
index 000000000000..7867be8a7958
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_STATE_H
+#define __IA_CSS_REF_STATE_H
+
+#include "type_support.h"
+
+/* REF (temporal noise reduction) */
+struct sh_css_isp_ref_dmem_state {
+ int32_t ref_in_buf_idx;
+ int32_t ref_out_buf_idx;
+};
+
+#endif /* __IA_CSS_REF_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
new file mode 100644
index 000000000000..ce0eaeeee9c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ref/ref_1.0/ia_css_ref_types.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_REF_TYPES_H
+#define __IA_CSS_REF_TYPES_H
+
+/** Reference frame
+ *
+ * ISP block: reference frame
+ */
+
+#include <ia_css_frame_public.h>
+
+
+
+#endif /* __IA_CSS_REF_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
new file mode 100644
index 000000000000..8ef6c54ee813
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -0,0 +1,386 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+#include "assert_support.h"
+
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "ia_css_s3a.host.h"
+
+const struct ia_css_3a_config default_3a_config = {
+ 25559,
+ 32768,
+ 7209,
+ 65535,
+ 0,
+ 65535,
+ {-3344, -6104, -19143, 19143, 6104, 3344, 0},
+ {1027, 0, -9219, 16384, -9219, 1027, 0}
+};
+
+static unsigned int s3a_raw_bit_depth;
+
+void
+ia_css_s3a_configure(unsigned int raw_bit_depth)
+{
+ s3a_raw_bit_depth = raw_bit_depth;
+}
+
+static void
+ia_css_ae_encode(
+ struct sh_css_isp_ae_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size)
+{
+ (void)size;
+ /* coefficients to calculate Y */
+ to->y_coef_r =
+ uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_g =
+ uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT);
+ to->y_coef_b =
+ uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT);
+}
+
+static void
+ia_css_awb_encode(
+ struct sh_css_isp_awb_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size)
+{
+ (void)size;
+ /* AWB level gate */
+ to->lg_high_raw =
+ uDIGIT_FITTING(from->awb_lg_high_raw, 16, s3a_raw_bit_depth);
+ to->lg_low =
+ uDIGIT_FITTING(from->awb_lg_low, 16, SH_CSS_BAYER_BITS);
+ to->lg_high =
+ uDIGIT_FITTING(from->awb_lg_high, 16, SH_CSS_BAYER_BITS);
+}
+
+static void
+ia_css_af_encode(
+ struct sh_css_isp_af_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size)
+{
+ unsigned int i;
+ (void)size;
+
+ /* af fir coefficients */
+ for (i = 0; i < 7; ++i) {
+ to->fir1[i] =
+ sDIGIT_FITTING(from->af_fir1_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ to->fir2[i] =
+ sDIGIT_FITTING(from->af_fir2_coef[i], 15,
+ SH_CSS_AF_FIR_SHIFT);
+ }
+}
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size)
+{
+ (void)size;
+
+ ia_css_ae_encode(&to->ae, from, sizeof(to->ae));
+ ia_css_awb_encode(&to->awb, from, sizeof(to->awb));
+ ia_css_af_encode(&to->af, from, sizeof(to->af));
+}
+
+#if 0
+void
+ia_css_process_s3a(
+ unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params)
+{
+ short dmem_offset = stage->binary->info->mem_offsets->dmem.s3a;
+
+ assert(params != NULL);
+
+ if (dmem_offset >= 0) {
+ ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ ia_css_bh_encode((struct sh_css_isp_bh_params *)
+ &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset],
+ &params->s3a_config);
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM0] = true;
+ }
+
+ params->isp_params_changed = true;
+}
+#endif
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned level)
+{
+ if (!ae) return;
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_r", ae->y_coef_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_g", ae->y_coef_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ae_y_coef_b", ae->y_coef_b);
+}
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high_raw", awb->lg_high_raw);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_low", awb->lg_low);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "awb_lg_high", awb->lg_high);
+}
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[0]", af->fir1[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[1]", af->fir1[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[2]", af->fir1[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[3]", af->fir1[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[4]", af->fir1[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[5]", af->fir1[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir1[6]", af->fir1[6]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[0]", af->fir2[0]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[1]", af->fir2[1]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[2]", af->fir2[2]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[3]", af->fir2[3]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[4]", af->fir2[4]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[5]", af->fir2[5]);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "af_fir2[6]", af->fir2[6]);
+}
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level, "S3A Support:\n");
+ ia_css_ae_dump (&s3a->ae, level);
+ ia_css_awb_dump (&s3a->awb, level);
+ ia_css_af_dump (&s3a->af, level);
+}
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.ae_y_coef_r=%d, config.ae_y_coef_g=%d, "
+ "config.ae_y_coef_b=%d, config.awb_lg_high_raw=%d, "
+ "config.awb_lg_low=%d, config.awb_lg_high=%d\n",
+ config->ae_y_coef_r, config->ae_y_coef_g,
+ config->ae_y_coef_b, config->awb_lg_high_raw,
+ config->awb_lg_low, config->awb_lg_high);
+}
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf)
+{
+#if defined(HAS_NO_HMEM)
+ (void)host_stats;
+ (void)hmem_buf;
+#else
+ struct ia_css_3a_rgby_output *out_ptr;
+ int i;
+
+ /* pixel counts(BQ) for 3A area */
+ int count_for_3a;
+ int sum_r, diff;
+
+ assert(host_stats != NULL);
+ assert(host_stats->rgby_data != NULL);
+ assert(hmem_buf != NULL);
+
+ count_for_3a = host_stats->grid.width * host_stats->grid.height
+ * host_stats->grid.bqs_per_grid_cell
+ * host_stats->grid.bqs_per_grid_cell;
+
+ out_ptr = host_stats->rgby_data;
+
+ ia_css_bh_hmem_decode(out_ptr, hmem_buf);
+
+ /* Calculate sum of histogram of R,
+ which should not be less than count_for_3a */
+ sum_r = 0;
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_r += out_ptr[i].r;
+ }
+ if (sum_r < count_for_3a) {
+ /* histogram is invalid */
+ return;
+ }
+
+ /* Verify for sum of histogram of R/G/B/Y */
+#if 0
+ {
+ int sum_g = 0;
+ int sum_b = 0;
+ int sum_y = 0;
+ for (i = 0; i < HMEM_UNIT_SIZE; i++) {
+ sum_g += out_ptr[i].g;
+ sum_b += out_ptr[i].b;
+ sum_y += out_ptr[i].y;
+ }
+ if (sum_g != sum_r || sum_b != sum_r || sum_y != sum_r) {
+ /* histogram is invalid */
+ return;
+ }
+ }
+#endif
+
+ /*
+ * Limit the histogram area only to 3A area.
+ * In DSP, the histogram of 0 is incremented for pixels
+ * which are outside of 3A area. That amount should be subtracted here.
+ * hist[0] = hist[0] - ((sum of all hist[]) - (pixel count for 3A area))
+ */
+ diff = sum_r - count_for_3a;
+ out_ptr[0].r -= diff;
+ out_ptr[0].g -= diff;
+ out_ptr[0].b -= diff;
+ out_ptr[0].y -= diff;
+#endif
+}
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats)
+{
+ int isp_width, host_width, height, i;
+ struct ia_css_3a_output *host_ptr;
+
+ assert(host_stats != NULL);
+ assert(host_stats->data != NULL);
+ assert(isp_stats != NULL);
+
+ isp_width = host_stats->grid.aligned_width;
+ host_width = host_stats->grid.width;
+ height = host_stats->grid.height;
+ host_ptr = host_stats->data;
+
+ /* Getting 3A statistics from DMEM does not involve any
+ * transformation (like the VMEM version), we just copy the data
+ * using a different output width. */
+ for (i = 0; i < height; i++) {
+ memcpy(host_ptr, isp_stats, host_width * sizeof(*host_ptr));
+ isp_stats += isp_width;
+ host_ptr += host_width;
+ }
+}
+
+/* MW: this is an ISP function */
+STORAGE_CLASS_INLINE int
+merge_hi_lo_14(unsigned short hi, unsigned short lo)
+{
+ int val = (int) ((((unsigned int) hi << 14) & 0xfffc000) |
+ ((unsigned int) lo & 0x3fff));
+ return val;
+}
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const uint16_t *isp_stats_hi,
+ const uint16_t *isp_stats_lo)
+{
+ int out_width, out_height, chunk, rest, kmax, y, x, k, elm_start, elm, ofs;
+ const uint16_t *hi, *lo;
+ struct ia_css_3a_output *output;
+
+ assert(host_stats!= NULL);
+ assert(host_stats->data != NULL);
+ assert(isp_stats_hi != NULL);
+ assert(isp_stats_lo != NULL);
+
+ output = host_stats->data;
+ out_width = host_stats->grid.width;
+ out_height = host_stats->grid.height;
+ hi = isp_stats_hi;
+ lo = isp_stats_lo;
+
+ chunk = ISP_VEC_NELEMS >> host_stats->grid.deci_factor_log2;
+ chunk = max(chunk, 1);
+
+ for (y = 0; y < out_height; y++) {
+ elm_start = y * ISP_S3ATBL_HI_LO_STRIDE;
+ rest = out_width;
+ x = 0;
+ while (x < out_width) {
+ kmax = (rest > chunk) ? chunk : rest;
+ ofs = y * out_width + x;
+ elm = elm_start + x * sizeof(*output) / sizeof(int32_t);
+ for (k = 0; k < kmax; k++, elm++) {
+ output[ofs + k].ae_y = merge_hi_lo_14(
+ hi[elm + chunk * 0], lo[elm + chunk * 0]);
+ output[ofs + k].awb_cnt = merge_hi_lo_14(
+ hi[elm + chunk * 1], lo[elm + chunk * 1]);
+ output[ofs + k].awb_gr = merge_hi_lo_14(
+ hi[elm + chunk * 2], lo[elm + chunk * 2]);
+ output[ofs + k].awb_r = merge_hi_lo_14(
+ hi[elm + chunk * 3], lo[elm + chunk * 3]);
+ output[ofs + k].awb_b = merge_hi_lo_14(
+ hi[elm + chunk * 4], lo[elm + chunk * 4]);
+ output[ofs + k].awb_gb = merge_hi_lo_14(
+ hi[elm + chunk * 5], lo[elm + chunk * 5]);
+ output[ofs + k].af_hpf1 = merge_hi_lo_14(
+ hi[elm + chunk * 6], lo[elm + chunk * 6]);
+ output[ofs + k].af_hpf2 = merge_hi_lo_14(
+ hi[elm + chunk * 7], lo[elm + chunk * 7]);
+ }
+ x += chunk;
+ rest -= chunk;
+ }
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
new file mode 100644
index 000000000000..4bc6c0bf478f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_HOST_H
+#define __IA_CSS_S3A_HOST_H
+
+#include "ia_css_s3a_types.h"
+#include "ia_css_s3a_param.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+
+extern const struct ia_css_3a_config default_3a_config;
+
+void
+ia_css_s3a_configure(
+ unsigned int raw_bit_depth);
+
+void
+ia_css_s3a_encode(
+ struct sh_css_isp_s3a_params *to,
+ const struct ia_css_3a_config *from,
+ unsigned size);
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_ae_dump(
+ const struct sh_css_isp_ae_params *ae,
+ unsigned level);
+
+void
+ia_css_awb_dump(
+ const struct sh_css_isp_awb_params *awb,
+ unsigned level);
+
+void
+ia_css_af_dump(
+ const struct sh_css_isp_af_params *af,
+ unsigned level);
+
+void
+ia_css_s3a_dump(
+ const struct sh_css_isp_s3a_params *s3a,
+ unsigned level);
+
+void
+ia_css_s3a_debug_dtrace(
+ const struct ia_css_3a_config *config,
+ unsigned level);
+#endif
+
+void
+ia_css_s3a_hmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_bh_table *hmem_buf);
+
+void
+ia_css_s3a_dmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_3a_output *isp_stats);
+
+void
+ia_css_s3a_vmem_decode(
+ struct ia_css_3a_statistics *host_stats,
+ const uint16_t *isp_stats_hi,
+ const uint16_t *isp_stats_lo);
+
+#endif /* __IA_CSS_S3A_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
new file mode 100644
index 000000000000..35fb0a2c921a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_param.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_PARAM_H
+#define __IA_CSS_S3A_PARAM_H
+
+#include "type_support.h"
+
+/* AE (3A Support) */
+struct sh_css_isp_ae_params {
+ /* coefficients to calculate Y */
+ int32_t y_coef_r;
+ int32_t y_coef_g;
+ int32_t y_coef_b;
+};
+
+/* AWB (3A Support) */
+struct sh_css_isp_awb_params {
+ int32_t lg_high_raw;
+ int32_t lg_low;
+ int32_t lg_high;
+};
+
+/* AF (3A Support) */
+struct sh_css_isp_af_params {
+ int32_t fir1[7];
+ int32_t fir2[7];
+};
+
+/* S3A (3A Support) */
+struct sh_css_isp_s3a_params {
+ /* coefficients to calculate Y */
+ struct sh_css_isp_ae_params ae;
+
+ /* AWB level gate */
+ struct sh_css_isp_awb_params awb;
+
+ /* af fir coefficients */
+ struct sh_css_isp_af_params af;
+};
+
+
+#endif /* __IA_CSS_S3A_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
new file mode 100644
index 000000000000..f57ed1ec5981
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a_types.h
@@ -0,0 +1,266 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_TYPES_H
+#define __IA_CSS_S3A_TYPES_H
+
+/** @file
+* CSS-API header file for 3A statistics parameters.
+*/
+
+#include <ia_css_frac.h>
+
+#if (defined(SYSTEM_css_skycam_c0_system)) && (! defined(PIPE_GENERATION) )
+#include "../../../../components/stats_3a/src/stats_3a_public.h"
+#endif
+
+/** 3A configuration. This configures the 3A statistics collection
+ * module.
+ */
+
+/** 3A statistics grid
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1 (Histogram is not used for AE))
+ * S3A2 (3A Support for 3A ver.2 (Histogram is used for AE))
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_grid_info {
+
+#if defined(SYSTEM_css_skycam_c0_system)
+ uint32_t ae_enable; /**< ae enabled in binary,
+ 0:disabled, 1:enabled */
+ struct ae_public_config_grid_config ae_grd_info; /**< see description in ae_public.h*/
+
+ uint32_t awb_enable; /**< awb enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_public_config_grid_config awb_grd_info; /**< see description in awb_public.h*/
+
+ uint32_t af_enable; /**< af enabled in binary,
+ 0:disabled, 1:enabled */
+ struct af_public_grid_config af_grd_info; /**< see description in af_public.h*/
+
+ uint32_t awb_fr_enable; /**< awb_fr enabled in binary,
+ 0:disabled, 1:enabled */
+ struct awb_fr_public_grid_config awb_fr_grd_info;/**< see description in awb_fr_public.h*/
+
+ uint32_t elem_bit_depth; /**< TODO:Taken from BYT - need input from AIQ
+ if needed for SKC
+ Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+
+#else
+ uint32_t enable; /**< 3A statistics enabled.
+ 0:disabled, 1:enabled */
+ uint32_t use_dmem; /**< DMEM or VMEM determines layout.
+ 0:3A statistics are stored to VMEM,
+ 1:3A statistics are stored to DMEM */
+ uint32_t has_histogram; /**< Statistics include histogram.
+ 0:no histogram, 1:has histogram */
+ uint32_t width; /**< Width of 3A grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ uint32_t height; /**< Height of 3A grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.) */
+ uint32_t aligned_width; /**< Horizontal stride (for alloc).
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ uint32_t aligned_height; /**< Vertical stride (for alloc).
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ Valid values are 8,16,32,64. */
+ uint32_t deci_factor_log2; /**< log2 of bqs_per_grid_cell. */
+ uint32_t elem_bit_depth; /**< Bit depth of element used
+ to calculate 3A statistics.
+ This is 13, which is the normalized
+ bayer bit depth in DSP. */
+#endif
+};
+
+
+#if defined(SYSTEM_css_skycam_c0_system)
+#if defined USE_NEW_AE_STRUCT || defined USE_NEW_AWB_STRUCT
+#define DEFAULT_3A_GRID_INFO \
+{ \
+ 0, /* ae_enable */ \
+ {0,0,0,0,0,0,0}, /* AE: width,height,b_width,b_height,x_start,y_start*/ \
+ 0, /* awb_enable */ \
+ {0,0,0,0,0,0}, /* AWB: width,height,b_width,b_height,x_start,y_start*/ \
+ 0, /* af_enable */ \
+ {0,0,0,0,0,0,0}, /* AF: width,height,b_width,b_height,x_start,y_start,ff_en*/ \
+ 0, /* awb_fr_enable */ \
+ {0,0,0,0,0,0,0}, /* AWB_FR: width,height,b_width,b_height,x_start,y_start,ff_en*/ \
+ 0, /* elem_bit_depth */ \
+}
+#else
+#define DEFAULT_3A_GRID_INFO \
+{ \
+ 0, /* ae_enable */ \
+ {0,0,0,0,0,0,0,0,0}, /* AE: width,height,b_width,b_height,x_start,y_start,x_end,y_end*/ \
+ 0, /* awb_enable */ \
+ {0,0,0,0,0,0,0,0}, /* AWB: width,height,b_width,b_height,x_start,y_start,x_end,y_end*/ \
+ 0, /* af_enable */ \
+ {0,0,0,0,0,0,0}, /* AF: width,height,b_width,b_height,x_start,y_start,ff_en*/ \
+ 0, /* awb_fr_enable */ \
+ {0,0,0,0,0,0,0}, /* AWB_FR: width,height,b_width,b_height,x_start,y_start,ff_en*/ \
+ 0, /* elem_bit_depth */ \
+}
+#endif /* USE_NEW_AE_STRUCT || defined USE_NEW_AWB_STRUCT */
+
+#else
+#define DEFAULT_3A_GRID_INFO \
+{ \
+ 0, /* enable */ \
+ 0, /* use_dmem */ \
+ 0, /* has_histogram */ \
+ 0, /* width */ \
+ 0, /* height */ \
+ 0, /* aligned_width */ \
+ 0, /* aligned_height */ \
+ 0, /* bqs_per_grid_cell */ \
+ 0, /* deci_factor_log2 */ \
+ 0, /* elem_bit_depth */ \
+}
+
+#endif
+
+/* This struct should be split into 3, for AE, AWB and AF.
+ * However, that will require driver/ 3A lib modifications.
+ */
+
+/** 3A configuration. This configures the 3A statistics collection
+ * module.
+ *
+ * ae_y_*: Coefficients to calculate luminance from bayer.
+ * awb_lg_*: Thresholds to check the saturated bayer pixels for AWB.
+ * Condition of effective pixel for AWB level gate check:
+ * bayer(sensor) <= awb_lg_high_raw &&
+ * bayer(when AWB statisitcs is calculated) >= awb_lg_low &&
+ * bayer(when AWB statisitcs is calculated) <= awb_lg_high
+ * af_fir*: Coefficients of high pass filter to calculate AF statistics.
+ *
+ * ISP block: S3A1(ae_y_* for AE/AF, awb_lg_* for AWB)
+ * S3A2(ae_y_* for AF, awb_lg_* for AWB)
+ * SDVS1(ae_y_*)
+ * SDVS2(ae_y_*)
+ * ISP1: S3A1 and SDVS1 are used.
+ * ISP2: S3A2 and SDVS2 are used.
+ */
+struct ia_css_3a_config {
+ ia_css_u0_16 ae_y_coef_r; /**< Weight of R for Y.
+ u0.16, [0,65535],
+ default/ineffective 25559 */
+ ia_css_u0_16 ae_y_coef_g; /**< Weight of G for Y.
+ u0.16, [0,65535],
+ default/ineffective 32768 */
+ ia_css_u0_16 ae_y_coef_b; /**< Weight of B for Y.
+ u0.16, [0,65535],
+ default/ineffective 7209 */
+ ia_css_u0_16 awb_lg_high_raw; /**< AWB level gate high for raw.
+ u0.16, [0,65535],
+ default 65472(=1023*64),
+ ineffective 65535 */
+ ia_css_u0_16 awb_lg_low; /**< AWB level gate low.
+ u0.16, [0,65535],
+ default 64(=1*64),
+ ineffective 0 */
+ ia_css_u0_16 awb_lg_high; /**< AWB level gate high.
+ u0.16, [0,65535],
+ default 65535,
+ ineffective 65535 */
+ ia_css_s0_15 af_fir1_coef[7]; /**< AF FIR coefficients of fir1.
+ s0.15, [-32768,32767],
+ default/ineffective
+ -6689,-12207,-32768,32767,12207,6689,0 */
+ ia_css_s0_15 af_fir2_coef[7]; /**< AF FIR coefficients of fir2.
+ s0.15, [-32768,32767],
+ default/ineffective
+ 2053,0,-18437,32767,-18437,2053,0 */
+};
+
+/** 3A statistics. This structure describes the data stored
+ * in each 3A grid point.
+ *
+ * ISP block: S3A1 (3A Support for 3A ver.1) (Histogram is not used for AE)
+ * S3A2 (3A Support for 3A ver.2) (Histogram is used for AE)
+ * - ae_y is used only for S3A1.
+ * - awb_* and af_* are used both for S3A1 and S3A2.
+ * ISP1: S3A1 is used.
+ * ISP2: S3A2 is used.
+ */
+struct ia_css_3a_output {
+ int32_t ae_y; /**< Sum of Y in a statistics window, for AE.
+ (u19.13) */
+ int32_t awb_cnt; /**< Number of effective pixels
+ in a statistics window.
+ Pixels passed by the AWB level gate check are
+ judged as "effective". (u32) */
+ int32_t awb_gr; /**< Sum of Gr in a statistics window, for AWB.
+ All Gr pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ int32_t awb_r; /**< Sum of R in a statistics window, for AWB.
+ All R pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ int32_t awb_b; /**< Sum of B in a statistics window, for AWB.
+ All B pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ int32_t awb_gb; /**< Sum of Gb in a statistics window, for AWB.
+ All Gb pixels (not only for effective pixels)
+ are summed. (u19.13) */
+ int32_t af_hpf1; /**< Sum of |Y| following high pass filter af_fir1
+ within a statistics window, for AF. (u19.13) */
+ int32_t af_hpf2; /**< Sum of |Y| following high pass filter af_fir2
+ within a statistics window, for AF. (u19.13) */
+};
+
+
+/** 3A Statistics. This structure describes the statistics that are generated
+ * using the provided configuration (ia_css_3a_config).
+ */
+struct ia_css_3a_statistics {
+ struct ia_css_3a_grid_info grid; /**< grid info contains the dimensions of the 3A grid */
+ struct ia_css_3a_output *data; /**< the pointer to 3a_output[grid.width * grid.height]
+ containing the 3A statistics */
+ struct ia_css_3a_rgby_output *rgby_data;/**< the pointer to 3a_rgby_output[256]
+ containing the histogram */
+};
+
+/** Histogram (Statistics for AE).
+ *
+ * 4 histograms(r,g,b,y),
+ * 256 bins for each histogram, unsigned 24bit value for each bin.
+ * struct ia_css_3a_rgby_output data[256];
+
+ * ISP block: HIST2
+ * (ISP1: HIST2 is not used.)
+ * ISP2: HIST2 is used.
+ */
+struct ia_css_3a_rgby_output {
+ uint32_t r; /**< Number of R of one bin of the histogram R. (u24) */
+ uint32_t g; /**< Number of G of one bin of the histogram G. (u24) */
+ uint32_t b; /**< Number of B of one bin of the histogram B. (u24) */
+ uint32_t y; /**< Number of Y of one bin of the histogram Y. (u24) */
+};
+
+#endif /* __IA_CSS_S3A_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
new file mode 100644
index 000000000000..8b2b56b0310b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_ls_param.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_STAT_LS_PARAM_H
+#define __IA_CSS_S3A_STAT_LS_PARAM_H
+
+#include "type_support.h"
+#ifdef ISP2401
+#include "../../io_ls/common/ia_css_common_io_types.h"
+#endif
+
+#define NUM_S3A_LS 1
+
+/** s3a statistics store */
+#ifdef ISP2401
+struct ia_css_s3a_stat_ls_configuration {
+ uint32_t s3a_grid_size_log2;
+};
+
+#endif
+struct sh_css_isp_s3a_stat_ls_isp_config {
+#ifndef ISP2401
+ uint32_t base_address[NUM_S3A_LS];
+ uint32_t width[NUM_S3A_LS];
+ uint32_t height[NUM_S3A_LS];
+ uint32_t stride[NUM_S3A_LS];
+#endif
+ uint32_t s3a_grid_size_log2[NUM_S3A_LS];
+};
+
+#ifndef ISP2401
+
+#endif
+#endif /* __IA_CSS_S3A_STAT_LS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_store_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_store_param.h
new file mode 100644
index 000000000000..676b42d364e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a_stat_ls/ia_css_s3a_stat_store_param.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_S3A_STAT_STORE_PARAM_H
+#define __IA_CSS_S3A_STAT_STORE_PARAM_H
+
+#include "ia_css_s3a_stat_ls_param.h"
+
+
+#endif /* __IA_CSS_S3A_STAT_STORE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
new file mode 100644
index 000000000000..565ae45b7541
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
@@ -0,0 +1,130 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+#ifdef ISP2401
+#include "math_support.h" /* min() */
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#endif
+
+#include "ia_css_sc.host.h"
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned size)
+{
+ (void)size;
+ to->gain_shift = (*from)->fraction_bits;
+}
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned level)
+{
+ if (!sc) return;
+ ia_css_debug_dtrace(level, "Shading Correction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "sc_gain_shift", sc->gain_shift);
+}
+
+#ifdef ISP2401
+void
+ia_css_sc_config(
+ struct sh_css_isp_sc_isp_config *to,
+ const struct ia_css_sc_configuration *from,
+ unsigned size)
+{
+ uint32_t internal_org_x_bqs = from->internal_frame_origin_x_bqs_on_sctbl;
+ uint32_t internal_org_y_bqs = from->internal_frame_origin_y_bqs_on_sctbl;
+ uint32_t slice, rest, i;
+
+ (void)size;
+
+ /* The internal_frame_origin_x_bqs_on_sctbl is separated to 8 times of slice_vec. */
+ rest = internal_org_x_bqs;
+ for (i = 0; i < SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES; i++) {
+ slice = min(rest, ((uint32_t)ISP_SLICE_NELEMS));
+ rest = rest - slice;
+ to->interped_gain_hor_slice_bqs[i] = slice;
+ }
+
+ to->internal_frame_origin_y_bqs_on_sctbl = internal_org_y_bqs;
+}
+
+void
+ia_css_sc_configure(
+ const struct ia_css_binary *binary,
+ uint32_t internal_frame_origin_x_bqs_on_sctbl,
+ uint32_t internal_frame_origin_y_bqs_on_sctbl)
+{
+ const struct ia_css_sc_configuration config = {
+ internal_frame_origin_x_bqs_on_sctbl,
+ internal_frame_origin_y_bqs_on_sctbl };
+
+ ia_css_configure_sc(binary, &config);
+}
+
+#endif
+/* ------ deprecated(bz675) : from ------ */
+/* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions,
+ for parameters which should be used in the isp kernels.
+ However, the ia_css_shading_settings structure has a parameter which is used only in the css,
+ and does not have a parameter which is used in the isp kernels.
+ Then, I did not use @parameter{} to generate the get/set function
+ for the ia_css_shading_settings structure. (michie) */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings)
+{
+ if (settings == NULL)
+ return;
+ assert(params != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() enter: settings=%p\n", settings);
+
+ *settings = params->shading_settings;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_get_shading_settings() leave: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+}
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings)
+{
+ if (settings == NULL)
+ return;
+ assert(params != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() enter: settings.enable_shading_table_conversion=%d\n",
+ settings->enable_shading_table_conversion);
+
+ params->shading_settings = *settings;
+ params->shading_settings_changed = true;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_set_shading_settings() leave: return_void\n");
+}
+/* ------ deprecated(bz675) : to ------ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
new file mode 100644
index 000000000000..44e3c43a5d4a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc.host.h
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_HOST_H
+#define __IA_CSS_SC_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_sc_types.h"
+#include "ia_css_sc_param.h"
+
+void
+ia_css_sc_encode(
+ struct sh_css_isp_sc_params *to,
+ struct ia_css_shading_table **from,
+ unsigned size);
+
+void
+ia_css_sc_dump(
+ const struct sh_css_isp_sc_params *sc,
+ unsigned level);
+
+#ifdef ISP2401
+/** @brief Configure the shading correction.
+ * @param[out] to Parameters used in the shading correction kernel in the isp.
+ * @param[in] from Parameters passed from the host.
+ * @param[in] size Size of the sh_css_isp_sc_isp_config structure.
+ *
+ * This function passes the parameters for the shading correction from the host to the isp.
+ */
+void
+ia_css_sc_config(
+ struct sh_css_isp_sc_isp_config *to,
+ const struct ia_css_sc_configuration *from,
+ unsigned size);
+
+/** @brief Configure the shading correction.
+ * @param[in] binary The binary, which has the shading correction.
+ * @param[in] internal_frame_origin_x_bqs_on_sctbl
+ * X coordinate (in bqs) of the origin of the internal frame on the shading table.
+ * @param[in] internal_frame_origin_y_bqs_on_sctbl
+ * Y coordinate (in bqs) of the origin of the internal frame on the shading table.
+ *
+ * This function calls the ia_css_configure_sc() function.
+ * (The ia_css_configure_sc() function is automatically generated in ia_css_isp.configs.c.)
+ * The ia_css_configure_sc() function calls the ia_css_sc_config() function
+ * to pass the parameters for the shading correction from the host to the isp.
+ */
+void
+ia_css_sc_configure(
+ const struct ia_css_binary *binary,
+ uint32_t internal_frame_origin_x_bqs_on_sctbl,
+ uint32_t internal_frame_origin_y_bqs_on_sctbl);
+
+#endif
+/* ------ deprecated(bz675) : from ------ */
+void
+sh_css_get_shading_settings(const struct ia_css_isp_parameters *params,
+ struct ia_css_shading_settings *settings);
+
+void
+sh_css_set_shading_settings(struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_settings *settings);
+/* ------ deprecated(bz675) : to ------ */
+
+#endif /* __IA_CSS_SC_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
new file mode 100644
index 000000000000..d997d5137634
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_param.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_PARAM_H
+#define __IA_CSS_SC_PARAM_H
+
+#include "type_support.h"
+
+#ifdef ISP2401
+/* To position the shading center grid point on the center of output image,
+ * one more grid cell is needed as margin. */
+#define SH_CSS_SCTBL_CENTERING_MARGIN 1
+
+/* The shading table width and height are the number of grids, not cells. The last grid should be counted. */
+#define SH_CSS_SCTBL_LAST_GRID_COUNT 1
+
+/* Number of horizontal grids per color in the shading table. */
+#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* Number of vertical grids per color in the shading table. */
+#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + \
+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* Legacy API: Number of horizontal grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+/* Legacy API: Number of vertical grids per color in the shading table. */
+#define _ISP_SCTBL_LEGACY_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + SH_CSS_SCTBL_LAST_GRID_COUNT)
+
+#endif
+/* SC (Shading Corrction) */
+struct sh_css_isp_sc_params {
+ int32_t gain_shift;
+};
+
+#ifdef ISP2401
+/* Number of horizontal slice times for interpolated gain:
+ *
+ * The start position of the internal frame does not match the start position of the shading table.
+ * To get a vector of shading gains (interpolated horizontally and vertically)
+ * which matches a vector on the internal frame,
+ * vec_slice is used for 2 adjacent vectors of shading gains.
+ * The number of shift times by vec_slice is 8.
+ * Max grid cell bqs to support the shading table centerting: N = 32
+ * CEIL_DIV(N-1, ISP_SLICE_NELEMS) = CEIL_DIV(31, 4) = 8
+ */
+#define SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES 8
+
+struct sh_css_isp_sc_isp_config {
+ uint32_t interped_gain_hor_slice_bqs[SH_CSS_SC_INTERPED_GAIN_HOR_SLICE_TIMES];
+ uint32_t internal_frame_origin_y_bqs_on_sctbl;
+};
+
+#endif
+#endif /* __IA_CSS_SC_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
new file mode 100644
index 000000000000..5a833bc48af1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sc/sc_1.0/ia_css_sc_types.h
@@ -0,0 +1,136 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SC_TYPES_H
+#define __IA_CSS_SC_TYPES_H
+
+/** @file
+* CSS-API header file for Lens Shading Correction (SC) parameters.
+*/
+
+
+/** Number of color planes in the shading table. */
+#define IA_CSS_SC_NUM_COLORS 4
+
+/** The 4 colors that a shading table consists of.
+ * For each color we store a grid of values.
+ */
+enum ia_css_sc_color {
+ IA_CSS_SC_COLOR_GR, /**< Green on a green-red line */
+ IA_CSS_SC_COLOR_R, /**< Red */
+ IA_CSS_SC_COLOR_B, /**< Blue */
+ IA_CSS_SC_COLOR_GB /**< Green on a green-blue line */
+};
+
+/** Lens Shading Correction table.
+ *
+ * This describes the color shading artefacts
+ * introduced by lens imperfections. To correct artefacts,
+ * bayer values should be multiplied by gains in this table.
+ *
+ *------------ deprecated(bz675) : from ---------------------------
+ * When shading_settings.enable_shading_table_conversion is set as 0,
+ * this shading table is directly sent to the isp. This table should contain
+ * the data based on the ia_css_shading_info information filled in the css.
+ * So, the driver needs to get the ia_css_shading_info information
+ * from the css, prior to generating the shading table.
+ *
+ * When shading_settings.enable_shading_table_conversion is set as 1,
+ * this shading table is converted in the legacy way in the css
+ * before it is sent to the isp.
+ * The driver does not need to get the ia_css_shading_info information.
+ *
+ * NOTE:
+ * The shading table conversion will be removed from the css in the near future,
+ * because it does not support the bayer scaling by sensor.
+ * Also, we had better generate the shading table only in one place(AIC).
+ * At the moment, to support the old driver which assumes the conversion is done in the css,
+ * shading_settings.enable_shading_table_conversion is set as 1 by default.
+ *------------ deprecated(bz675) : to ---------------------------
+ *
+ * ISP block: SC1
+ * ISP1: SC1 is used.
+ * ISP2: SC1 is used.
+ */
+struct ia_css_shading_table {
+ uint32_t enable; /**< Set to false for no shading correction.
+ The data field can be NULL when enable == true */
+/* ------ deprecated(bz675) : from ------ */
+ uint32_t sensor_width; /**< Native sensor width in pixels. */
+ uint32_t sensor_height; /**< Native sensor height in lines.
+ When shading_settings.enable_shading_table_conversion is set
+ as 0, sensor_width and sensor_height are NOT used.
+ These are used only in the legacy shading table conversion
+ in the css, when shading_settings.
+ enable_shading_table_conversion is set as 1. */
+/* ------ deprecated(bz675) : to ------ */
+ uint32_t width; /**< Number of data points per line per color.
+ u8.0, [0,81] */
+ uint32_t height; /**< Number of lines of data points per color.
+ u8.0, [0,61] */
+ uint32_t fraction_bits; /**< Bits of fractional part in the data
+ points.
+ u8.0, [0,13] */
+ uint16_t *data[IA_CSS_SC_NUM_COLORS];
+ /**< Table data, one array for each color.
+ Use ia_css_sc_color to index this array.
+ u[13-fraction_bits].[fraction_bits], [0,8191] */
+};
+
+/* ------ deprecated(bz675) : from ------ */
+/** Shading Correction settings.
+ *
+ * NOTE:
+ * This structure should be removed when the shading table conversion is
+ * removed from the css.
+ */
+struct ia_css_shading_settings {
+ uint32_t enable_shading_table_conversion; /**< Set to 0,
+ if the conversion of the shading table should be disabled
+ in the css. (default 1)
+ 0: The shading table is directly sent to the isp.
+ The shading table should contain the data based on the
+ ia_css_shading_info information filled in the css.
+ 1: The shading table is converted in the css, to be fitted
+ to the shading table definition required in the isp.
+ NOTE:
+ Previously, the shading table was always converted in the css
+ before it was sent to the isp, and this config was not defined.
+ Currently, the driver is supposed to pass the shading table
+ which should be directly sent to the isp.
+ However, some drivers may still pass the shading table which
+ needs the conversion without setting this config as 1.
+ To support such an unexpected case for the time being,
+ enable_shading_table_conversion is set as 1 by default
+ in the css. */
+};
+/* ------ deprecated(bz675) : to ------ */
+
+#ifdef ISP2401
+
+/** Shading Correction configuration.
+ *
+ * NOTE: The shading table size is larger than or equal to the internal frame size.
+ */
+struct ia_css_sc_configuration {
+ uint32_t internal_frame_origin_x_bqs_on_sctbl; /**< Origin X (in bqs) of internal frame on shading table. */
+ uint32_t internal_frame_origin_y_bqs_on_sctbl; /**< Origin Y (in bqs) of internal frame on shading table. */
+ /**< NOTE: bqs = size in BQ(Bayer Quad) unit.
+ 1BQ means {Gr,R,B,Gb}(2x2 pixels).
+ Horizontal 1 bqs corresponds to horizontal 2 pixels.
+ Vertical 1 bqs corresponds to vertical 2 pixels. */
+};
+#endif
+
+#endif /* __IA_CSS_SC_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/scale/scale_1.0/ia_css_scale_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/scale/scale_1.0/ia_css_scale_param.h
new file mode 100644
index 000000000000..fd19f008ff91
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/scale/scale_1.0/ia_css_scale_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_SCALE_PARAM_H
+#define _IA_CSS_SCALE_PARAM_H
+
+#include "uds/uds_1.0/ia_css_uds_param.h"
+
+#endif /* _IA_CSS_SCALE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h
new file mode 100644
index 000000000000..4eb4910798fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common.host.h
@@ -0,0 +1,99 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_SDIS_COMMON_HOST_H
+#define _IA_CSS_SDIS_COMMON_HOST_H
+
+#define ISP_MAX_SDIS_HOR_PROJ_NUM_ISP \
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, ISP_MAX_INTERNAL_HEIGHT, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2, ISP_PIPE_VERSION)
+#define ISP_MAX_SDIS_VER_PROJ_NUM_ISP \
+ __ISP_SDIS_VER_PROJ_NUM_ISP(ISP_MAX_INTERNAL_WIDTH, \
+ SH_CSS_DIS_DECI_FACTOR_LOG2)
+
+#define _ISP_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_HOR_COEF_NUM_VECS \
+ __ISP_SDIS_HOR_COEF_NUM_VECS(ISP_MAX_INTERNAL_WIDTH)
+#define ISP_MAX_SDIS_VER_COEF_NUM_VECS \
+ __ISP_SDIS_VER_COEF_NUM_VECS(ISP_MAX_INTERNAL_HEIGHT)
+
+/* SDIS Coefficients: */
+/* The ISP uses vectors to store the coefficients, so we round
+ the number of coefficients up to vectors. */
+#define __ISP_SDIS_HOR_COEF_NUM_VECS(in_width) _ISP_VECS(_ISP_BQS(in_width))
+#define __ISP_SDIS_VER_COEF_NUM_VECS(in_height) _ISP_VECS(_ISP_BQS(in_height))
+
+/* SDIS Projections:
+ * SDIS1: Horizontal projections are calculated for each line.
+ * Vertical projections are calculated for each column.
+ * SDIS2: Projections are calculated for each grid cell.
+ * Grid cells that do not fall completely within the image are not
+ * valid. The host needs to use the bigger one for the stride but
+ * should only return the valid ones to the 3A. */
+#define __ISP_SDIS_HOR_PROJ_NUM_ISP(in_width, in_height, deci_factor_log2, \
+ isp_pipe_version) \
+ ((isp_pipe_version == 1) ? \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2) : \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2))
+
+#define __ISP_SDIS_VER_PROJ_NUM_ISP(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+
+#define SH_CSS_DIS_VER_NUM_COEF_TYPES(b) \
+ (((b)->info->sp.pipeline.isp_pipe_version == 2) ? \
+ IA_CSS_DVS2_NUM_COEF_TYPES : \
+ IA_CSS_DVS_NUM_COEF_TYPES)
+
+#ifndef PIPE_GENERATION
+#if defined(__ISP) || defined (MK_FIRMWARE)
+
+/* Array cannot be 2-dimensional, since driver ddr allocation does not know stride */
+struct sh_css_isp_sdis_hori_proj_tbl {
+ int32_t tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_HOR_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ int32_t margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_vert_proj_tbl {
+ int32_t tbl[ISP_DVS_NUM_COEF_TYPES * ISP_MAX_SDIS_VER_PROJ_NUM_ISP];
+#if DVS2_PROJ_MARGIN > 0
+ int32_t margin[DVS2_PROJ_MARGIN];
+#endif
+};
+
+struct sh_css_isp_sdis_hori_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES], ISP_MAX_SDIS_HOR_COEF_NUM_VECS*ISP_NWAY);
+};
+
+struct sh_css_isp_sdis_vert_coef_tbl {
+ VMEM_ARRAY(tbl[ISP_DVS_NUM_COEF_TYPES], ISP_MAX_SDIS_VER_COEF_NUM_VECS*ISP_NWAY);
+};
+
+#endif /* defined(__ISP) || defined (MK_FIRMWARE) */
+#endif /* PIPE_GENERATION */
+
+#ifndef PIPE_GENERATION
+struct s_sdis_config {
+ unsigned horicoef_vectors;
+ unsigned vertcoef_vectors;
+ unsigned horiproj_num;
+ unsigned vertproj_num;
+};
+
+extern struct s_sdis_config sdis_config;
+#endif
+
+#endif /* _IA_CSS_SDIS_COMMON_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
new file mode 100644
index 000000000000..295dc60b778c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_common_types.h
@@ -0,0 +1,232 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_COMMON_TYPES_H
+#define __IA_CSS_SDIS_COMMON_TYPES_H
+
+/** @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+#include <type_support.h>
+
+/** DVS statistics grid dimensions in number of cells.
+ */
+
+struct ia_css_dvs_grid_dim {
+ uint32_t width; /**< Width of DVS grid table in cells */
+ uint32_t height; /**< Height of DVS grid table in cells */
+};
+
+/** DVS statistics dimensions in number of cells for
+ * grid, coeffieicient and projection.
+ */
+
+struct ia_css_sdis_info {
+ struct {
+ struct ia_css_dvs_grid_dim dim; /* Dimensions */
+ struct ia_css_dvs_grid_dim pad; /* Padded dimensions */
+ } grid, coef, proj;
+ uint32_t deci_factor_log2;
+};
+
+#define IA_CSS_DEFAULT_SDIS_INFO \
+ { \
+ { { 0, 0 }, /* dim */ \
+ { 0, 0 }, /* pad */ \
+ }, /* grid */ \
+ { { 0, 0 }, /* dim */ \
+ { 0, 0 }, /* pad */ \
+ }, /* coef */ \
+ { { 0, 0 }, /* dim */ \
+ { 0, 0 }, /* pad */ \
+ }, /* proj */ \
+ 0, /* dis_deci_factor_log2 */ \
+ }
+
+/** DVS statistics grid
+ *
+ * ISP block: SDVS1 (DIS/DVS Support for DIS/DVS ver.1 (2-axes))
+ * SDVS2 (DVS Support for DVS ver.2 (6-axes))
+ * ISP1: SDVS1 is used.
+ * ISP2: SDVS2 is used.
+ */
+struct ia_css_dvs_grid_res {
+ uint32_t width; /**< Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ uint32_t aligned_width; /**< Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ uint32_t height; /**< Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ uint32_t aligned_height;/**< Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+};
+
+/* TODO: use ia_css_dvs_grid_res in here.
+ * However, that implies driver I/F changes
+ */
+struct ia_css_dvs_grid_info {
+ uint32_t enable; /**< DVS statistics enabled.
+ 0:disabled, 1:enabled */
+ uint32_t width; /**< Width of DVS grid table.
+ (= Horizontal number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, this is equal to
+ the number of vertical statistics. */
+ uint32_t aligned_width; /**< Stride of each grid line.
+ (= Horizontal number of grid cells
+ in table, which means
+ the allocated width.) */
+ uint32_t height; /**< Height of DVS grid table.
+ (= Vertical number of grid cells
+ in table, which cells have effective
+ statistics.)
+ For DVS1, This is equal to
+ the number of horizontal statistics. */
+ uint32_t aligned_height;/**< Stride of each grid column.
+ (= Vertical number of grid cells
+ in table, which means
+ the allocated height.) */
+ uint32_t bqs_per_grid_cell; /**< Grid cell size in BQ(Bayer Quad) unit.
+ (1BQ means {Gr,R,B,Gb}(2x2 pixels).)
+ For DVS1, valid value is 64.
+ For DVS2, valid value is only 64,
+ currently. */
+ uint32_t num_hor_coefs; /**< Number of horizontal coefficients. */
+ uint32_t num_ver_coefs; /**< Number of vertical coefficients. */
+};
+
+/** Number of DVS statistics levels
+ */
+#define IA_CSS_DVS_STAT_NUM_OF_LEVELS 3
+
+/** DVS statistics generated by accelerator global configuration
+ */
+struct dvs_stat_public_dvs_global_cfg {
+ unsigned char kappa;
+ /**< DVS statistics global configuration - kappa */
+ unsigned char match_shift;
+ /**< DVS statistics global configuration - match_shift */
+ unsigned char ybin_mode;
+ /**< DVS statistics global configuration - y binning mode */
+};
+
+/** DVS statistics generated by accelerator level grid
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_cfg {
+ unsigned char grid_width;
+ /**< DVS statistics grid width */
+ unsigned char grid_height;
+ /**< DVS statistics grid height */
+ unsigned char block_width;
+ /**< DVS statistics block width */
+ unsigned char block_height;
+ /**< DVS statistics block height */
+};
+
+/** DVS statistics generated by accelerator level grid start
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_start {
+ unsigned short x_start;
+ /**< DVS statistics level x start */
+ unsigned short y_start;
+ /**< DVS statistics level y start */
+ unsigned char enable;
+ /**< DVS statistics level enable */
+};
+
+/** DVS statistics generated by accelerator level grid end
+ * configuration
+ */
+struct dvs_stat_public_dvs_level_grid_end {
+ unsigned short x_end;
+ /**< DVS statistics level x end */
+ unsigned short y_end;
+ /**< DVS statistics level y end */
+};
+
+/** DVS statistics generated by accelerator Feature Extraction
+ * Region Of Interest (FE-ROI) configuration
+ */
+struct dvs_stat_public_dvs_level_fe_roi_cfg {
+ unsigned char x_start;
+ /**< DVS statistics fe-roi level x start */
+ unsigned char y_start;
+ /**< DVS statistics fe-roi level y start */
+ unsigned char x_end;
+ /**< DVS statistics fe-roi level x end */
+ unsigned char y_end;
+ /**< DVS statistics fe-roi level y end */
+};
+
+/** DVS statistics generated by accelerator public configuration
+ */
+struct dvs_stat_public_dvs_grd_cfg {
+ struct dvs_stat_public_dvs_level_grid_cfg grd_cfg;
+ /**< DVS statistics level grid configuration */
+ struct dvs_stat_public_dvs_level_grid_start grd_start;
+ /**< DVS statistics level grid start configuration */
+ struct dvs_stat_public_dvs_level_grid_end grd_end;
+ /**< DVS statistics level grid end configuration */
+};
+
+/** DVS statistics grid generated by accelerator
+ */
+struct ia_css_dvs_stat_grid_info {
+ struct dvs_stat_public_dvs_global_cfg dvs_gbl_cfg;
+ /**< DVS statistics global configuration (kappa, match, binning) */
+ struct dvs_stat_public_dvs_grd_cfg grd_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /**< DVS statistics grid configuration (blocks and grids) */
+ struct dvs_stat_public_dvs_level_fe_roi_cfg fe_roi_cfg[IA_CSS_DVS_STAT_NUM_OF_LEVELS];
+ /**< DVS statistics FE ROI (region of interest) configuration */
+};
+
+/** DVS statistics generated by accelerator default grid info
+ */
+#define DEFAULT_DVS_GRID_INFO { \
+{ \
+ { 0, 0, 0}, /* GBL CFG reg: kappa, match_shifrt, binning mode*/ \
+ {{{0, 0, 0, 0}, {0, 0, 0}, {0, 0} }, \
+ {{0, 0, 0, 0}, {0, 0, 0}, {0, 0} }, \
+ {{0, 0, 0, 0}, {0, 0, 0}, {0, 0} } }, \
+ {{0, 0, 0, 0}, {4, 0, 0, 0}, {0, 0, 0, 0} } } \
+}
+
+
+/** Union that holds all types of DVS statistics grid info in
+ * CSS format
+ * */
+union ia_css_dvs_grid_u {
+ struct ia_css_dvs_stat_grid_info dvs_stat_grid_info;
+ /**< DVS statistics produced by accelerator grid info */
+ struct ia_css_dvs_grid_info dvs_grid_info;
+ /**< DVS (DVS1/DVS2) grid info */
+};
+
+#endif /* __IA_CSS_SDIS_COMMON_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_param.h
new file mode 100644
index 000000000000..586cc4315c1f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/common/ia_css_sdis_param.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_PARAM_COMMON_H
+#define __IA_CSS_SDIS_PARAM_COMMON_H
+
+
+#include "sdis/common/ia_css_sdis_common.host.h"
+
+#endif /* __IA_CSS_SDIS_PARAM_COMMON_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
new file mode 100644
index 000000000000..0daab1176865
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -0,0 +1,424 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "ia_css_sdis_types.h"
+#include "sdis/common/ia_css_sdis_common.host.h"
+#include "ia_css_sdis.host.h"
+
+const struct ia_css_dvs_coefficients default_sdis_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = NULL,
+ .ver_coefs = NULL
+};
+
+static void
+fill_row(short *private, const short *public, unsigned width, unsigned padding)
+{
+ assert((int)width >= 0);
+ assert((int)padding >= 0);
+ memcpy (private, public, width*sizeof(short));
+ memset (&private[width], 0, padding*sizeof(short));
+}
+
+void ia_css_sdis_horicoef_vmem_encode (
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size)
+{
+ unsigned aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell;
+ unsigned width = from->grid.num_hor_coefs;
+ int padding = aligned_width-width;
+ unsigned stride = size/IA_CSS_DVS_NUM_COEF_TYPES/sizeof(short);
+ unsigned total_bytes = aligned_width*IA_CSS_DVS_NUM_COEF_TYPES*sizeof(short);
+ short *public = from->hor_coefs;
+ short *private = (short*)to;
+ unsigned type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type*stride], &public[type*width], width, padding);
+ }
+}
+
+void ia_css_sdis_vertcoef_vmem_encode (
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size)
+{
+ unsigned aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell;
+ unsigned height = from->grid.num_ver_coefs;
+ int padding = aligned_height-height;
+ unsigned stride = size/IA_CSS_DVS_NUM_COEF_TYPES/sizeof(short);
+ unsigned total_bytes = aligned_height*IA_CSS_DVS_NUM_COEF_TYPES*sizeof(short);
+ short *public = from->ver_coefs;
+ short *private = (short*)to;
+ unsigned type;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0);
+
+ for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) {
+ fill_row(&private[type*stride], &public[type*height], height, padding);
+ }
+}
+
+void ia_css_sdis_horiproj_encode (
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis_vertproj_encode (
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_isp, ver_num_isp;
+ unsigned int hor_num_3a, ver_num_3a;
+ int i;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(horizontal_coefficients != NULL);
+ assert(vertical_coefficients != NULL);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_isp = dvs_binary->dis.coef.pad.width;
+ ver_num_isp = dvs_binary->dis.coef.pad.height;
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ fill_row(&horizontal_coefficients[i*hor_num_isp],
+ &params->dvs_coefs.hor_coefs[i*hor_num_3a], hor_num_3a, hor_num_isp-hor_num_3a);
+ }
+ for (i = 0; i < SH_CSS_DIS_VER_NUM_COEF_TYPES(dvs_binary); i++) {
+ fill_row(&vertical_coefficients[i*ver_num_isp],
+ &params->dvs_coefs.ver_coefs[i*ver_num_3a], ver_num_3a, ver_num_isp-ver_num_3a);
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+size_t
+ia_css_sdis_hor_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ if (binary->info->sp.pipeline.isp_pipe_version == 1)
+ return sizeof(short) * IA_CSS_DVS_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+ else
+ return sizeof(short) * IA_CSS_DVS2_NUM_COEF_TYPES * binary->dis.coef.pad.width;
+}
+
+size_t
+ia_css_sdis_ver_coef_tbl_bytes(
+ const struct ia_css_binary *binary)
+{
+ return sizeof(short) * SH_CSS_DIS_VER_NUM_COEF_TYPES(binary) * binary->dis.coef.pad.height;
+}
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned sc_3a_dis_width,
+ unsigned sc_3a_dis_padded_width,
+ unsigned sc_3a_dis_height,
+ unsigned isp_pipe_version,
+ unsigned enabled)
+{
+ if (!enabled) {
+ struct ia_css_sdis_info default_dis = IA_CSS_DEFAULT_SDIS_INFO;
+ *dis = default_dis;
+ return;
+ }
+
+ dis->deci_factor_log2 = SH_CSS_DIS_DECI_FACTOR_LOG2;
+
+ dis->grid.dim.width =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.dim.height =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->grid.pad.width =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_padded_width), SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->grid.pad.height =
+ CEIL_SHIFT(_ISP_BQS(sc_3a_dis_height), SH_CSS_DIS_DECI_FACTOR_LOG2);
+
+ dis->coef.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.dim.height =
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->coef.pad.width =
+ __ISP_SDIS_HOR_COEF_NUM_VECS(sc_3a_dis_padded_width) * ISP_VEC_NELEMS;
+ dis->coef.pad.height =
+ __ISP_SDIS_VER_COEF_NUM_VECS(sc_3a_dis_height) * ISP_VEC_NELEMS;
+ if (isp_pipe_version == 1) {
+ dis->proj.dim.width =
+ _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ dis->proj.dim.height =
+ _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2;
+ } else {
+ dis->proj.dim.width =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ dis->proj.dim.height =
+ (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) *
+ (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2);
+ }
+ dis->proj.pad.width =
+ __ISP_SDIS_HOR_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ SH_CSS_DIS_DECI_FACTOR_LOG2,
+ isp_pipe_version);
+ dis->proj.pad.height =
+ __ISP_SDIS_VER_PROJ_NUM_ISP(sc_3a_dis_padded_width,
+ SH_CSS_DIS_DECI_FACTOR_LOG2);
+}
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ dvs_coefs->hor_coefs = NULL;
+ dvs_coefs->ver_coefs = NULL;
+}
+
+enum ia_css_err
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats)
+{
+ struct ia_css_isp_dvs_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats != NULL);
+ assert(isp_stats != NULL);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map) {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int hor_num_isp, ver_num_isp, hor_num_dvs, ver_num_dvs, i;
+ int32_t *hor_ptr_dvs, *ver_ptr_dvs, *hor_ptr_isp, *ver_ptr_isp;
+
+ assert(host_stats != NULL);
+ assert(host_stats->hor_proj != NULL);
+ assert(host_stats->ver_proj != NULL);
+ assert(isp_stats != NULL);
+ assert(isp_stats->hor_proj != NULL);
+ assert(isp_stats->ver_proj != NULL);
+
+ IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%x, vaddr=%x",
+ host_stats->hor_proj, host_stats->ver_proj,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ hor_num_isp = host_stats->grid.aligned_height;
+ ver_num_isp = host_stats->grid.aligned_width;
+ hor_ptr_isp = isp_stats->hor_proj;
+ ver_ptr_isp = isp_stats->ver_proj;
+ hor_num_dvs = host_stats->grid.height;
+ ver_num_dvs = host_stats->grid.width;
+ hor_ptr_dvs = host_stats->hor_proj;
+ ver_ptr_dvs = host_stats->ver_proj;
+
+ for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) {
+ memcpy(hor_ptr_dvs, hor_ptr_isp, hor_num_dvs * sizeof(int32_t));
+ hor_ptr_isp += hor_num_isp;
+ hor_ptr_dvs += hor_num_dvs;
+
+ memcpy(ver_ptr_dvs, ver_ptr_isp, ver_num_dvs * sizeof(int32_t));
+ ver_ptr_isp += ver_num_isp;
+ ver_ptr_dvs += ver_num_dvs;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int hor_size, ver_size;
+
+ assert(grid != NULL);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1,sizeof(*me));
+ if (!me)
+ goto err;
+
+ hor_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_height,
+ HIVE_ISP_DDR_WORD_BYTES);
+ ver_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_width,
+ HIVE_ISP_DDR_WORD_BYTES);
+
+
+ me->size = hor_size + ver_size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_size = hor_size;
+ me->hor_proj = me->data_ptr;
+ me->ver_size = ver_size;
+ me->ver_proj = me->data_ptr + hor_size;
+
+ IA_CSS_LEAVE("return=%p", me);
+
+ return me;
+err:
+ ia_css_isp_dvs_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+struct ia_css_isp_dvs_statistics_map *
+ia_css_isp_dvs_statistics_map_allocate(
+ const struct ia_css_isp_dvs_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_dvs_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = sh_css_malloc(sizeof(*me));
+ if (!me) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = data_ptr == NULL;
+
+ if (!me->data_ptr) {
+ me->data_ptr = sh_css_malloc(isp_stats->size);
+ if (!me->data_ptr) {
+ IA_CSS_LOG("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->hor_proj = (void*)base_ptr;
+ me->ver_proj = (void*)(base_ptr + isp_stats->hor_size);
+
+ return me;
+err:
+ if (me)
+ sh_css_free(me);
+ return NULL;
+}
+
+void
+ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated)
+ sh_css_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void
+ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me != NULL) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
new file mode 100644
index 000000000000..95e2c61bbcba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h
@@ -0,0 +1,101 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_HOST_H
+#define __IA_CSS_SDIS_HOST_H
+
+#include "ia_css_sdis_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs_coefficients default_sdis_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis_horicoef_vmem_encode (
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis_vertcoef_vmem_encode (
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis_horiproj_encode (
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis_vertproj_encode (
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs_coefficients *from,
+ unsigned size);
+
+void ia_css_get_isp_dis_coefficients(
+ struct ia_css_stream *stream,
+ short *horizontal_coefficients,
+ short *vertical_coefficients);
+
+enum ia_css_err
+ia_css_get_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs_statistics(
+ struct ia_css_dvs_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+size_t ia_css_sdis_hor_coef_tbl_bytes(const struct ia_css_binary *binary);
+size_t ia_css_sdis_ver_coef_tbl_bytes(const struct ia_css_binary *binary);
+
+void
+ia_css_sdis_init_info(
+ struct ia_css_sdis_info *dis,
+ unsigned sc_3a_dis_width,
+ unsigned sc_3a_dis_padded_width,
+ unsigned sc_3a_dis_height,
+ unsigned isp_pipe_version,
+ unsigned enabled);
+
+void ia_css_sdis_clear_coefficients(
+ struct ia_css_dvs_coefficients *dvs_coefs);
+
+void ia_css_sdis_horicoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level);
+
+void ia_css_sdis_vertcoef_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level);
+
+void ia_css_sdis_horiproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level);
+
+void ia_css_sdis_vertproj_debug_dtrace(
+ const struct ia_css_dvs_coefficients *config, unsigned level);
+
+#endif /* __IA_CSS_SDIS_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_param.h
new file mode 100644
index 000000000000..2dd8696802d0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_param.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_PARAM_H
+#define __IA_CSS_SDIS_PARAM_H
+
+#include "sdis.isp.h"
+
+#endif /* __IA_CSS_SDIS_PARAM_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
new file mode 100644
index 000000000000..d408b58a027d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis_types.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS_TYPES_H
+#define __IA_CSS_SDIS_TYPES_H
+
+/** @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/** Number of DVS coefficient types */
+#define IA_CSS_DVS_NUM_COEF_TYPES 6
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/** DVS 1.0 Coefficients.
+ * This structure describes the coefficients that are needed for the dvs statistics.
+ */
+
+struct ia_css_dvs_coefficients {
+ struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
+ int16_t *hor_coefs; /**< the pointer to int16_t[grid.num_hor_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal coefficients */
+ int16_t *ver_coefs; /**< the pointer to int16_t[grid.num_ver_coefs * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical coefficients */
+};
+
+/** DVS 1.0 Statistics.
+ * This structure describes the statistics that are generated using the provided coefficients.
+ */
+
+struct ia_css_dvs_statistics {
+ struct ia_css_dvs_grid_info grid;/**< grid info contains the dimensions of the dvs grid */
+ int32_t *hor_proj; /**< the pointer to int16_t[grid.height * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the horizontal projections */
+ int32_t *ver_proj; /**< the pointer to int16_t[grid.width * IA_CSS_DVS_NUM_COEF_TYPES]
+ containing the vertical projections */
+};
+
+#endif /* __IA_CSS_SDIS_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
new file mode 100644
index 000000000000..5a0c103e9eb7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -0,0 +1,338 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <assert_support.h>
+#include "memory_access.h"
+#include "ia_css_debug.h"
+#include "ia_css_sdis2.host.h"
+
+const struct ia_css_dvs2_coefficients default_sdis2_config = {
+ .grid = { 0, 0, 0, 0, 0, 0, 0, 0 },
+ .hor_coefs = { NULL, NULL, NULL, NULL },
+ .ver_coefs = { NULL, NULL, NULL, NULL },
+};
+
+static void
+fill_row(short *private, const short *public, unsigned width, unsigned padding)
+{
+ memcpy (private, public, width*sizeof(short));
+ memset (&private[width], 0, padding*sizeof(short));
+}
+
+void ia_css_sdis2_horicoef_vmem_encode (
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size)
+{
+ unsigned aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell;
+ unsigned width = from->grid.num_hor_coefs;
+ int padding = aligned_width-width;
+ unsigned stride = size/IA_CSS_DVS2_NUM_COEF_TYPES/sizeof(short);
+ unsigned total_bytes = aligned_width*IA_CSS_DVS2_NUM_COEF_TYPES*sizeof(short);
+ short *private = (short*)to;
+
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0);
+ fill_row(&private[0*stride], from->hor_coefs.odd_real, width, padding);
+ fill_row(&private[1*stride], from->hor_coefs.odd_imag, width, padding);
+ fill_row(&private[2*stride], from->hor_coefs.even_real, width, padding);
+ fill_row(&private[3*stride], from->hor_coefs.even_imag, width, padding);
+}
+
+void ia_css_sdis2_vertcoef_vmem_encode (
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size)
+{
+ unsigned aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell;
+ unsigned height = from->grid.num_ver_coefs;
+ int padding = aligned_height-height;
+ unsigned stride = size/IA_CSS_DVS2_NUM_COEF_TYPES/sizeof(short);
+ unsigned total_bytes = aligned_height*IA_CSS_DVS2_NUM_COEF_TYPES*sizeof(short);
+ short *private = (short*)to;
+
+ /* Copy the table, add padding */
+ assert(padding >= 0);
+ assert(total_bytes <= size);
+ assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES*ISP_VEC_NELEMS*sizeof(short)) == 0);
+ fill_row(&private[0*stride], from->ver_coefs.odd_real, height, padding);
+ fill_row(&private[1*stride], from->ver_coefs.odd_imag, height, padding);
+ fill_row(&private[2*stride], from->ver_coefs.even_real, height, padding);
+ fill_row(&private[3*stride], from->ver_coefs.even_imag, height, padding);
+}
+
+void ia_css_sdis2_horiproj_encode (
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_sdis2_vertproj_encode (
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size)
+{
+ (void)to;
+ (void)from;
+ (void)size;
+}
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned int hor_num_3a, ver_num_3a;
+ unsigned int hor_num_isp, ver_num_isp;
+ struct ia_css_binary *dvs_binary;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream != NULL);
+ assert(hor_coefs_odd_real != NULL);
+ assert(hor_coefs_odd_imag != NULL);
+ assert(hor_coefs_even_real != NULL);
+ assert(hor_coefs_even_imag != NULL);
+ assert(ver_coefs_odd_real != NULL);
+ assert(ver_coefs_odd_imag != NULL);
+ assert(ver_coefs_even_real != NULL);
+ assert(ver_coefs_even_imag != NULL);
+
+ params = stream->isp_params_configs;
+
+ /* Only video pipe supports DVS */
+ dvs_binary = ia_css_stream_get_dvs_binary(stream);
+ if (!dvs_binary)
+ return;
+
+ hor_num_3a = dvs_binary->dis.coef.dim.width;
+ ver_num_3a = dvs_binary->dis.coef.dim.height;
+ hor_num_isp = dvs_binary->dis.coef.pad.width;
+ ver_num_isp = dvs_binary->dis.coef.pad.height;
+
+ memcpy (hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real, hor_num_3a * sizeof(short));
+ memcpy (hor_coefs_odd_imag, params->dvs2_coefs.hor_coefs.odd_imag, hor_num_3a * sizeof(short));
+ memcpy (hor_coefs_even_real, params->dvs2_coefs.hor_coefs.even_real, hor_num_3a * sizeof(short));
+ memcpy (hor_coefs_even_imag, params->dvs2_coefs.hor_coefs.even_imag, hor_num_3a * sizeof(short));
+ memcpy (ver_coefs_odd_real, params->dvs2_coefs.ver_coefs.odd_real, ver_num_3a * sizeof(short));
+ memcpy (ver_coefs_odd_imag, params->dvs2_coefs.ver_coefs.odd_imag, ver_num_3a * sizeof(short));
+ memcpy (ver_coefs_even_real, params->dvs2_coefs.ver_coefs.even_real, ver_num_3a * sizeof(short));
+ memcpy (ver_coefs_even_imag, params->dvs2_coefs.ver_coefs.even_imag, ver_num_3a * sizeof(short));
+
+ IA_CSS_LEAVE("void");
+}
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ dvs2_coefs->hor_coefs.odd_real = NULL;
+ dvs2_coefs->hor_coefs.odd_imag = NULL;
+ dvs2_coefs->hor_coefs.even_real = NULL;
+ dvs2_coefs->hor_coefs.even_imag = NULL;
+ dvs2_coefs->ver_coefs.odd_real = NULL;
+ dvs2_coefs->ver_coefs.odd_imag = NULL;
+ dvs2_coefs->ver_coefs.even_real = NULL;
+ dvs2_coefs->ver_coefs.even_imag = NULL;
+}
+
+enum ia_css_err
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats)
+{
+ struct ia_css_isp_dvs_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats != NULL);
+ assert(isp_stats != NULL);
+
+ map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
+ if (map) {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_dvs2_statistics(host_stats, map);
+ ia_css_isp_dvs_statistics_map_free(map);
+ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats)
+{
+ unsigned int size_bytes, table_width, table_size, height;
+ unsigned int src_offset = 0, dst_offset = 0;
+ int32_t *htemp_ptr, *vtemp_ptr;
+
+ assert(host_stats != NULL);
+ assert(host_stats->hor_prod.odd_real != NULL);
+ assert(host_stats->hor_prod.odd_imag != NULL);
+ assert(host_stats->hor_prod.even_real != NULL);
+ assert(host_stats->hor_prod.even_imag != NULL);
+ assert(host_stats->ver_prod.odd_real != NULL);
+ assert(host_stats->ver_prod.odd_imag != NULL);
+ assert(host_stats->ver_prod.even_real != NULL);
+ assert(host_stats->ver_prod.even_imag != NULL);
+ assert(isp_stats != NULL);
+ assert(isp_stats->hor_proj != NULL);
+ assert(isp_stats->ver_proj != NULL);
+
+ IA_CSS_ENTER("hor_coefs.odd_real=%p, hor_coefs.odd_imag=%p, "
+ "hor_coefs.even_real=%p, hor_coefs.even_imag=%p, "
+ "ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, "
+ "ver_coefs.even_real=%p, ver_coefs.even_imag=%p, "
+ "haddr=%x, vaddr=%x",
+ host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag,
+ host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag,
+ host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag,
+ host_stats->ver_prod.even_real, host_stats->ver_prod.even_imag,
+ isp_stats->hor_proj, isp_stats->ver_proj);
+
+ /* Host side: reflecting the true width in bytes */
+ size_bytes = host_stats->grid.aligned_width * sizeof(*htemp_ptr);
+
+ /* DDR side: need to be aligned to the system bus width */
+ /* statistics table width in terms of 32-bit words*/
+ table_width = CEIL_MUL(size_bytes, HIVE_ISP_DDR_WORD_BYTES) / sizeof(*htemp_ptr);
+ table_size = table_width * host_stats->grid.aligned_height;
+
+ htemp_ptr = isp_stats->hor_proj; /* horizontal stats */
+ vtemp_ptr = isp_stats->ver_proj; /* vertical stats */
+ for (height = 0; height < host_stats->grid.aligned_height; height++) {
+ /* hor stats */
+ memcpy(host_stats->hor_prod.odd_real + dst_offset,
+ &htemp_ptr[0*table_size+src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.odd_imag + dst_offset,
+ &htemp_ptr[1*table_size+src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_real + dst_offset,
+ &htemp_ptr[2*table_size+src_offset], size_bytes);
+ memcpy(host_stats->hor_prod.even_imag + dst_offset,
+ &htemp_ptr[3*table_size+src_offset], size_bytes);
+
+ /* ver stats */
+ memcpy(host_stats->ver_prod.odd_real + dst_offset,
+ &vtemp_ptr[0*table_size+src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.odd_imag + dst_offset,
+ &vtemp_ptr[1*table_size+src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_real + dst_offset,
+ &vtemp_ptr[2*table_size+src_offset], size_bytes);
+ memcpy(host_stats->ver_prod.even_imag + dst_offset,
+ &vtemp_ptr[3*table_size+src_offset], size_bytes);
+
+ src_offset += table_width; /* aligned table width */
+ dst_offset += host_stats->grid.aligned_width;
+ }
+
+ IA_CSS_LEAVE("void");
+}
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_isp_dvs_statistics *me;
+ int size;
+
+ assert(grid != NULL);
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1,sizeof(*me));
+ if (!me)
+ goto err;
+
+ /* on ISP 2 SDIS DMA model, every row of projection table width must be
+ aligned to HIVE_ISP_DDR_WORD_BYTES
+ */
+ size = CEIL_MUL(sizeof(int) * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES)
+ * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES;
+
+ me->size = 2*size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL)
+ goto err;
+ me->hor_proj = me->data_ptr;
+ me->hor_size = size;
+ me->ver_proj = me->data_ptr + size;
+ me->ver_size = size;
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_isp_dvs2_statistics_free(me);
+ IA_CSS_LEAVE("return=%p", NULL);
+
+ return NULL;
+}
+
+void
+ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me)
+{
+ if (me != NULL) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
new file mode 100644
index 000000000000..60198d4279b4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h
@@ -0,0 +1,95 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS2_HOST_H
+#define __IA_CSS_SDIS2_HOST_H
+
+#include "ia_css_sdis2_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_stream.h"
+#include "sh_css_params.h"
+
+extern const struct ia_css_dvs2_coefficients default_sdis2_config;
+
+/* Opaque here, since size is binary dependent. */
+struct sh_css_isp_sdis_hori_coef_tbl;
+struct sh_css_isp_sdis_vert_coef_tbl;
+struct sh_css_isp_sdis_hori_proj_tbl;
+struct sh_css_isp_sdis_vert_proj_tbl;
+
+void ia_css_sdis2_horicoef_vmem_encode (
+ struct sh_css_isp_sdis_hori_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis2_vertcoef_vmem_encode (
+ struct sh_css_isp_sdis_vert_coef_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis2_horiproj_encode (
+ struct sh_css_isp_sdis_hori_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size);
+
+void ia_css_sdis2_vertproj_encode (
+ struct sh_css_isp_sdis_vert_proj_tbl *to,
+ const struct ia_css_dvs2_coefficients *from,
+ unsigned size);
+
+void ia_css_get_isp_dvs2_coefficients(
+ struct ia_css_stream *stream,
+ short *hor_coefs_odd_real,
+ short *hor_coefs_odd_imag,
+ short *hor_coefs_even_real,
+ short *hor_coefs_even_imag,
+ short *ver_coefs_odd_real,
+ short *ver_coefs_odd_imag,
+ short *ver_coefs_even_real,
+ short *ver_coefs_even_imag);
+
+void ia_css_sdis2_clear_coefficients(
+ struct ia_css_dvs2_coefficients *dvs2_coefs);
+
+enum ia_css_err
+ia_css_get_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics *isp_stats);
+
+void
+ia_css_translate_dvs2_statistics(
+ struct ia_css_dvs2_statistics *host_stats,
+ const struct ia_css_isp_dvs_statistics_map *isp_stats);
+
+struct ia_css_isp_dvs_statistics *
+ia_css_isp_dvs2_statistics_allocate(
+ const struct ia_css_dvs_grid_info *grid);
+
+void
+ia_css_isp_dvs2_statistics_free(
+ struct ia_css_isp_dvs_statistics *me);
+
+void ia_css_sdis2_horicoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level);
+
+void ia_css_sdis2_vertcoef_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level);
+
+void ia_css_sdis2_horiproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level);
+
+void ia_css_sdis2_vertproj_debug_dtrace(
+ const struct ia_css_dvs2_coefficients *config, unsigned level);
+
+#endif /* __IA_CSS_SDIS2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
new file mode 100644
index 000000000000..7db7dd10fe00
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2_types.h
@@ -0,0 +1,69 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS2_TYPES_H
+#define __IA_CSS_SDIS2_TYPES_H
+
+/** @file
+* CSS-API header file for DVS statistics parameters.
+*/
+
+/** Number of DVS coefficient types */
+#define IA_CSS_DVS2_NUM_COEF_TYPES 4
+
+#ifndef PIPE_GENERATION
+#include "isp/kernels/sdis/common/ia_css_sdis_common_types.h"
+#endif
+
+/** DVS 2.0 Coefficient types. This structure contains 4 pointers to
+ * arrays that contain the coeffients for each type.
+ */
+struct ia_css_dvs2_coef_types {
+ int16_t *odd_real; /**< real part of the odd coefficients*/
+ int16_t *odd_imag; /**< imaginary part of the odd coefficients*/
+ int16_t *even_real;/**< real part of the even coefficients*/
+ int16_t *even_imag;/**< imaginary part of the even coefficients*/
+};
+
+/** DVS 2.0 Coefficients. This structure describes the coefficients that are needed for the dvs statistics.
+ * e.g. hor_coefs.odd_real is the pointer to int16_t[grid.num_hor_coefs] containing the horizontal odd real
+ * coefficients.
+ */
+struct ia_css_dvs2_coefficients {
+ struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_coef_types hor_coefs; /**< struct with pointers that contain the horizontal coefficients */
+ struct ia_css_dvs2_coef_types ver_coefs; /**< struct with pointers that contain the vertical coefficients */
+};
+
+/** DVS 2.0 Statistic types. This structure contains 4 pointers to
+ * arrays that contain the statistics for each type.
+ */
+struct ia_css_dvs2_stat_types {
+ int32_t *odd_real; /**< real part of the odd statistics*/
+ int32_t *odd_imag; /**< imaginary part of the odd statistics*/
+ int32_t *even_real;/**< real part of the even statistics*/
+ int32_t *even_imag;/**< imaginary part of the even statistics*/
+};
+
+/** DVS 2.0 Statistics. This structure describes the statistics that are generated using the provided coefficients.
+ * e.g. hor_prod.odd_real is the pointer to int16_t[grid.aligned_height][grid.aligned_width] containing
+ * the horizontal odd real statistics. Valid statistics data area is int16_t[0..grid.height-1][0..grid.width-1]
+ */
+struct ia_css_dvs2_statistics {
+ struct ia_css_dvs_grid_info grid; /**< grid info contains the dimensions of the dvs grid */
+ struct ia_css_dvs2_stat_types hor_prod; /**< struct with pointers that contain the horizontal statistics */
+ struct ia_css_dvs2_stat_types ver_prod; /**< struct with pointers that contain the vertical statistics */
+};
+
+#endif /* __IA_CSS_SDIS2_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis_param.h
new file mode 100644
index 000000000000..cea352e45713
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis_param.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_SDIS2_PARAM_H
+#define __IA_CSS_SDIS2_PARAM_H
+
+#include "sdis.isp.h"
+
+#endif /* __IA_CSS_SDIS2_PARAM_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
new file mode 100644
index 000000000000..e775af51c0c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_debug.h"
+#include "ia_css_tdf.host.h"
+
+const int16_t g_pyramid[8][8] = {
+{128, 384, 640, 896, 896, 640, 384, 128},
+{384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+{640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+{896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+{896, 2688, 4480, 6272, 6272, 4480, 2688, 896},
+{640, 1920, 3200, 4480, 4480, 3200, 1920, 640},
+{384, 1152, 1920, 2688, 2688, 1920, 1152, 384},
+{128, 384, 640, 896, 896, 640, 384, 128}
+};
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ unsigned i;
+ (void)size;
+
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->pyramid[0][i] = g_pyramid[i/8][i%8];
+ to->threshold_flat[0][i] = from->thres_flat_table[i];
+ to->threshold_detail[0][i] = from->thres_detail_table[i];
+ }
+
+}
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size)
+{
+ (void)size;
+ to->Epsilon_0 = from->epsilon_0;
+ to->Epsilon_1 = from->epsilon_1;
+ to->EpsScaleText = from->eps_scale_text;
+ to->EpsScaleEdge = from->eps_scale_edge;
+ to->Sepa_flat = from->sepa_flat;
+ to->Sepa_Edge = from->sepa_edge;
+ to->Blend_Flat = from->blend_flat;
+ to->Blend_Text = from->blend_text;
+ to->Blend_Edge = from->blend_edge;
+ to->Shading_Gain = from->shading_gain;
+ to->Shading_baseGain = from->shading_base_gain;
+ to->LocalY_Gain = from->local_y_gain;
+ to->LocalY_baseGain = from->local_y_base_gain;
+}
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
new file mode 100644
index 000000000000..1b3e759e41a3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_HOST_H
+#define __IA_CSS_TDF_HOST_H
+
+#include "ia_css_tdf_types.h"
+#include "ia_css_tdf_param.h"
+#include "ia_css_tdf_default.host.h"
+
+void
+ia_css_tdf_vmem_encode(
+ struct ia_css_isp_tdf_vmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_encode(
+ struct ia_css_isp_tdf_dmem_params *to,
+ const struct ia_css_tdf_config *from,
+ size_t size);
+
+void
+ia_css_tdf_debug_dtrace(
+ const struct ia_css_tdf_config *config, unsigned level)
+;
+
+#endif /* __IA_CSS_TDF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.c
new file mode 100644
index 000000000000..9bb42daf070d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.c
@@ -0,0 +1,36 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_tdf_types.h"
+
+const struct ia_css_tdf_config default_tdf_config = {
+ .thres_flat_table = {0},
+ .thres_detail_table = {0},
+ .epsilon_0 = 4095,
+ .epsilon_1 = 5733,
+ .eps_scale_text = 409,
+ .eps_scale_edge = 3686,
+ .sepa_flat = 1294,
+ .sepa_edge = 4095,
+ .blend_flat = 819,
+ .blend_text = 819,
+ .blend_edge = 8191,
+ .shading_gain = 1024,
+ .shading_base_gain = 8191,
+ .local_y_gain = 0,
+ .local_y_base_gain = 2047,
+ .rad_x_origin = 0,
+ .rad_y_origin = 0
+};
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.h
new file mode 100644
index 000000000000..cd8fb70e5a87
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_default.host.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_DEFAULT_HOST_H
+#define __IA_CSS_TDF_DEFAULT_HOST_H
+
+#include "ia_css_tdf_types.h"
+
+extern const struct ia_css_tdf_config default_tdf_config;
+
+#endif /* __IA_CSS_TDF_DEFAULT_HOST_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
new file mode 100644
index 000000000000..9334f2e0698b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_param.h
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_PARAM_H
+#define __IA_CSS_TDF_PARAM_H
+
+#include "type_support.h"
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+struct ia_css_isp_tdf_vmem_params {
+ VMEM_ARRAY(pyramid, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_flat, ISP_VEC_NELEMS);
+ VMEM_ARRAY(threshold_detail, ISP_VEC_NELEMS);
+};
+
+struct ia_css_isp_tdf_dmem_params {
+ int32_t Epsilon_0;
+ int32_t Epsilon_1;
+ int32_t EpsScaleText;
+ int32_t EpsScaleEdge;
+ int32_t Sepa_flat;
+ int32_t Sepa_Edge;
+ int32_t Blend_Flat;
+ int32_t Blend_Text;
+ int32_t Blend_Edge;
+ int32_t Shading_Gain;
+ int32_t Shading_baseGain;
+ int32_t LocalY_Gain;
+ int32_t LocalY_baseGain;
+};
+
+#endif /* __IA_CSS_TDF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
new file mode 100644
index 000000000000..cc47a50e5ad5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tdf/tdf_1.0/ia_css_tdf_types.h
@@ -0,0 +1,53 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TDF_TYPES_H
+#define __IA_CSS_TDF_TYPES_H
+
+/** @file
+* CSS-API header file for Transform Domain Filter parameters.
+*/
+
+#include "type_support.h"
+
+/** Transform Domain Filter configuration
+ *
+ * \brief TDF public parameters.
+ * \details Struct with all parameters for the TDF kernel that can be set
+ * from the CSS API.
+ *
+ * ISP2.6.1: TDF is used.
+ */
+struct ia_css_tdf_config {
+ int32_t thres_flat_table[64]; /**< Final optimized strength table of NR for flat region. */
+ int32_t thres_detail_table[64]; /**< Final optimized strength table of NR for detail region. */
+ int32_t epsilon_0; /**< Coefficient to control variance for dark area (for flat region). */
+ int32_t epsilon_1; /**< Coefficient to control variance for bright area (for flat region). */
+ int32_t eps_scale_text; /**< Epsilon scaling coefficient for texture region. */
+ int32_t eps_scale_edge; /**< Epsilon scaling coefficient for edge region. */
+ int32_t sepa_flat; /**< Threshold to judge flat (edge < m_Flat_thre). */
+ int32_t sepa_edge; /**< Threshold to judge edge (edge > m_Edge_thre). */
+ int32_t blend_flat; /**< Blending ratio at flat region. */
+ int32_t blend_text; /**< Blending ratio at texture region. */
+ int32_t blend_edge; /**< Blending ratio at edge region. */
+ int32_t shading_gain; /**< Gain of Shading control. */
+ int32_t shading_base_gain; /**< Base Gain of Shading control. */
+ int32_t local_y_gain; /**< Gain of local luminance control. */
+ int32_t local_y_base_gain; /**< Base gain of local luminance control. */
+ int32_t rad_x_origin; /**< Initial x coord. for radius computation. */
+ int32_t rad_y_origin; /**< Initial y coord. for radius computation. */
+};
+
+#endif /* __IA_CSS_TDF_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
new file mode 100644
index 000000000000..135563f52174
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr3/ia_css_tnr3_types.h
@@ -0,0 +1,61 @@
+#ifdef ISP2401
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef _IA_CSS_TNR3_TYPES_H
+#define _IA_CSS_TNR3_TYPES_H
+
+/** @file
+* CSS-API header file for Temporal Noise Reduction v3 (TNR3) kernel
+*/
+
+/**
+ * \brief Number of piecewise linear segments.
+ * \details The parameters to TNR3 are specified as a piecewise linear segment.
+ * The number of such segments is fixed at 3.
+ */
+#define TNR3_NUM_SEGMENTS 3
+
+/** Temporal Noise Reduction v3 (TNR3) configuration.
+ * The parameter to this kernel is fourfold
+ * 1. Three piecewise linear graphs (one for each plane) with three segments
+ * each. Each line graph has Luma values on the x axis and sigma values for
+ * each plane on the y axis. The three linear segments may have a different
+ * slope and the point of Luma value which where the slope may change is called
+ * a "Knee" point. As there are three such segments, four points need to be
+ * specified each on the Luma axis and the per plane Sigma axis. On the Luma
+ * axis two points are fixed (namely 0 and maximum luma value - depending on
+ * ISP bit depth). The other two points are the points where the slope may
+ * change its value. These two points are called knee points. The four points on
+ * the per plane sigma axis are also specified at the interface.
+ * 2. One rounding adjustment parameter for each plane
+ * 3. One maximum feedback threshold value for each plane
+ * 4. Selection of the reference frame buffer to be used for noise reduction.
+ */
+struct ia_css_tnr3_kernel_config {
+ unsigned int maxfb_y; /**< Maximum Feedback Gain for Y */
+ unsigned int maxfb_u; /**< Maximum Feedback Gain for U */
+ unsigned int maxfb_v; /**< Maximum Feedback Gain for V */
+ unsigned int round_adj_y; /**< Rounding Adjust for Y */
+ unsigned int round_adj_u; /**< Rounding Adjust for U */
+ unsigned int round_adj_v; /**< Rounding Adjust for V */
+ unsigned int knee_y[TNR3_NUM_SEGMENTS - 1]; /**< Knee points */
+ unsigned int sigma_y[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for Y at points Y0, Y1, Y2, Y3 */
+ unsigned int sigma_u[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for U at points U0, U1, U2, U3 */
+ unsigned int sigma_v[TNR3_NUM_SEGMENTS + 1]; /**< Standard deviation for V at points V0, V1, V2, V3 */
+ unsigned int ref_buf_select; /**< Selection of the reference buffer */
+};
+
+#endif
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
new file mode 100644
index 000000000000..804c19ab4485
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
@@ -0,0 +1,130 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+#include "assert_support.h"
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#include "isp.h"
+
+#include "ia_css_tnr.host.h"
+const struct ia_css_tnr_config default_tnr_config = {
+ 32768,
+ 32,
+ 32,
+};
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->coef =
+ uDIGIT_FITTING(from->gain, 16, SH_CSS_TNR_COEF_SHIFT);
+ to->threshold_Y =
+ uDIGIT_FITTING(from->threshold_y, 16, SH_CSS_ISP_YUV_BITS);
+ to->threshold_C =
+ uDIGIT_FITTING(from->threshold_uv, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned level)
+{
+ if (!tnr) return;
+ ia_css_debug_dtrace(level, "Temporal Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_coef", tnr->coef);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "tnr_threshold_Y", tnr->threshold_Y);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n"
+ "tnr_threshold_C", tnr->threshold_C);
+}
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain=%d, "
+ "config.threshold_y=%d, config.threshold_uv=%d\n",
+ config->gain,
+ config->threshold_y, config->threshold_uv);
+}
+
+void
+ia_css_tnr_config(
+ struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+ unsigned i;
+
+ (void)size;
+ ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->info);
+ to->width_a_over_b = elems_a / to->port_b.elems;
+ to->frame_height = from->tnr_frames[0]->info.res.height;
+#ifndef ISP2401
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+#else
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+#endif
+ to->tnr_frame_addr[i] = from->tnr_frames[i]->data + from->tnr_frames[i]->planes.yuyv.offset;
+ }
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->port_b.elems == 0);
+}
+
+void
+ia_css_tnr_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **frames)
+{
+ struct ia_css_tnr_configuration config;
+ unsigned i;
+
+#ifndef ISP2401
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
+#else
+ for (i = 0; i < NUM_TNR_FRAMES; i++)
+#endif
+ config.tnr_frames[i] = frames[i];
+
+ ia_css_configure_tnr(binary, &config);
+}
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size)
+{
+ (void)size;
+
+#ifndef ISP2401
+ assert(NUM_VIDEO_TNR_FRAMES >= 2);
+#endif
+ assert(sizeof(*state) == size);
+ state->tnr_in_buf_idx = 0;
+ state->tnr_out_buf_idx = 1;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
new file mode 100644
index 000000000000..9290dfad574e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_HOST_H
+#define __IA_CSS_TNR_HOST_H
+
+#include "ia_css_binary.h"
+#include "ia_css_tnr_state.h"
+#include "ia_css_tnr_types.h"
+#include "ia_css_tnr_param.h"
+
+extern const struct ia_css_tnr_config default_tnr_config;
+
+void
+ia_css_tnr_encode(
+ struct sh_css_isp_tnr_params *to,
+ const struct ia_css_tnr_config *from,
+ unsigned size);
+
+void
+ia_css_tnr_dump(
+ const struct sh_css_isp_tnr_params *tnr,
+ unsigned level);
+
+void
+ia_css_tnr_debug_dtrace(
+ const struct ia_css_tnr_config *config,
+ unsigned level);
+
+void
+ia_css_tnr_config(
+ struct sh_css_isp_tnr_isp_config *to,
+ const struct ia_css_tnr_configuration *from,
+ unsigned size);
+
+void
+ia_css_tnr_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame **frames);
+
+void
+ia_css_init_tnr_state(
+ struct sh_css_isp_tnr_dmem_state *state,
+ size_t size);
+#endif /* __IA_CSS_TNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
new file mode 100644
index 000000000000..db4a7cced264
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_param.h
@@ -0,0 +1,48 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_PARAM_H
+#define __IA_CSS_TNR_PARAM_H
+
+#include "type_support.h"
+#include "sh_css_defs.h"
+#include "dma.h"
+
+/* TNR (Temporal Noise Reduction) */
+struct sh_css_isp_tnr_params {
+ int32_t coef;
+ int32_t threshold_Y;
+ int32_t threshold_C;
+};
+
+struct ia_css_tnr_configuration {
+#ifndef ISP2401
+ const struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES];
+#else
+ const struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES];
+#endif
+};
+
+struct sh_css_isp_tnr_isp_config {
+ uint32_t width_a_over_b;
+ uint32_t frame_height;
+ struct dma_port_config port_b;
+#ifndef ISP2401
+ hrt_vaddress tnr_frame_addr[NUM_VIDEO_TNR_FRAMES];
+#else
+ hrt_vaddress tnr_frame_addr[NUM_TNR_FRAMES];
+#endif
+};
+
+#endif /* __IA_CSS_TNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
new file mode 100644
index 000000000000..8b1218f7235d
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_STATE_H
+#define __IA_CSS_TNR_STATE_H
+
+#include "type_support.h"
+
+/* TNR (temporal noise reduction) */
+struct sh_css_isp_tnr_dmem_state {
+ uint32_t tnr_in_buf_idx;
+ uint32_t tnr_out_buf_idx;
+};
+
+#endif /* __IA_CSS_TNR_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
new file mode 100644
index 000000000000..4fd35e6ccd70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr_types.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_TNR_TYPES_H
+#define __IA_CSS_TNR_TYPES_H
+
+/** @file
+* CSS-API header file for Temporal Noise Reduction (TNR) parameters.
+*/
+
+/** Temporal Noise Reduction (TNR) configuration.
+ *
+ * When difference between current frame and previous frame is less than or
+ * equal to threshold, TNR works and current frame is mixed
+ * with previous frame.
+ * When difference between current frame and previous frame is greater
+ * than threshold, we judge motion is detected. Then, TNR does not work and
+ * current frame is outputted as it is.
+ * Therefore, when threshold_y and threshold_uv are set as 0, TNR can be disabled.
+ *
+ * ISP block: TNR1
+ * ISP1: TNR1 is used.
+ * ISP2: TNR1 is used.
+ */
+
+
+struct ia_css_tnr_config {
+ ia_css_u0_16 gain; /**< Interpolation ratio of current frame
+ and previous frame.
+ gain=0.0 -> previous frame is outputted.
+ gain=1.0 -> current frame is outputted.
+ u0.16, [0,65535],
+ default 32768(0.5), ineffective 65535(almost 1.0) */
+ ia_css_u0_16 threshold_y; /**< Threshold to enable interpolation of Y.
+ If difference between current frame and
+ previous frame is greater than threshold_y,
+ TNR for Y is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+ ia_css_u0_16 threshold_uv; /**< Threshold to enable interpolation of
+ U/V.
+ If difference between current frame and
+ previous frame is greater than threshold_uv,
+ TNR for UV is disabled.
+ u0.16, [0,65535], default/ineffective 0 */
+};
+
+
+#endif /* __IA_CSS_TNR_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
new file mode 100644
index 000000000000..26b7b5bc9391
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/uds/uds_1.0/ia_css_uds_param.h
@@ -0,0 +1,31 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_UDS_PARAM_H
+#define __IA_CSS_UDS_PARAM_H
+
+#include "sh_css_uds.h"
+
+/* uds (Up and Down scaling) */
+struct ia_css_uds_config {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+struct sh_css_sp_uds_params {
+ struct sh_css_crop_pos crop_pos;
+ struct sh_css_uds_info uds;
+};
+
+#endif /* __IA_CSS_UDS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
new file mode 100644
index 000000000000..5610833ed595
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
@@ -0,0 +1,140 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_vf.host.h"
+#include <assert_support.h>
+#include <ia_css_err.h>
+#include <ia_css_frame.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipeline.h>
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+
+#include "isp.h"
+
+void
+ia_css_vf_config(
+ struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned size)
+{
+ unsigned elems_a = ISP_VEC_NELEMS;
+
+ (void)size;
+ to->vf_downscale_bits = from->vf_downscale_bits;
+ to->enable = from->info != NULL;
+
+ if (from->info) {
+ ia_css_frame_info_to_frame_sp_info(&to->info, from->info);
+ ia_css_dma_configure_from_info(&to->dma.port_b, from->info);
+ to->dma.width_a_over_b = elems_a / to->dma.port_b.elems;
+
+ /* Assume divisiblity here, may need to generalize to fixed point. */
+ assert (elems_a % to->dma.port_b.elems == 0);
+ }
+}
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+enum ia_css_err
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2)
+{
+ unsigned int ds_log2 = 0;
+ unsigned int out_width;
+
+ if ((out_info == NULL) | (vf_info == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ out_width = out_info->res.width;
+
+ if (out_width == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* downscale until width smaller than the viewfinder width. We don't
+ * test for the height since the vmem buffers only put restrictions on
+ * the width of a line, not on the number of lines in a frame.
+ */
+ while (out_width >= vf_info->res.width) {
+ ds_log2++;
+ out_width /= 2;
+ }
+ /* now width is smaller, so we go up one step */
+ if ((ds_log2 > 0) && (out_width < ia_css_binary_max_vf_width()))
+ ds_log2--;
+ /* TODO: use actual max input resolution of vf_pp binary */
+ if ((out_info->res.width >> ds_log2) >= 2 * ia_css_binary_max_vf_width())
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ *downscale_log2 = ds_log2;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+configure_kernel(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2,
+ struct ia_css_vf_configuration *config)
+{
+ enum ia_css_err err;
+ unsigned vf_log_ds = 0;
+
+ /* First compute value */
+ if (vf_info) {
+ err = sh_css_vf_downscale_log2(out_info, vf_info, &vf_log_ds);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ vf_log_ds = min(vf_log_ds, info->vf_dec.max_log_downscale);
+ *downscale_log2 = vf_log_ds;
+
+ /* Then store it in isp config section */
+ config->vf_downscale_bits = vf_log_ds;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+configure_dma(
+ struct ia_css_vf_configuration *config,
+ const struct ia_css_frame_info *vf_info)
+{
+ config->info = vf_info;
+}
+
+enum ia_css_err
+ia_css_vf_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2)
+{
+ enum ia_css_err err;
+ struct ia_css_vf_configuration config;
+ const struct ia_css_binary_info *info = &binary->info->sp;
+
+ err = configure_kernel(info, out_info, vf_info, downscale_log2, &config);
+ configure_dma(&config, vf_info);
+ if (binary) {
+ if (vf_info)
+ vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel;
+ ia_css_configure_vf (binary, &config);
+ }
+ return IA_CSS_SUCCESS;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
new file mode 100644
index 000000000000..c7c3625a9a96
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_HOST_H
+#define __IA_CSS_VF_HOST_H
+
+#include "ia_css_frame_public.h"
+#include "ia_css_binary.h"
+
+#include "ia_css_vf_types.h"
+#include "ia_css_vf_param.h"
+
+/* compute the log2 of the downscale factor needed to get closest
+ * to the requested viewfinder resolution on the upper side. The output cannot
+ * be smaller than the requested viewfinder resolution.
+ */
+enum ia_css_err
+sh_css_vf_downscale_log2(
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+void
+ia_css_vf_config(
+ struct sh_css_isp_vf_isp_config *to,
+ const struct ia_css_vf_configuration *from,
+ unsigned size);
+
+enum ia_css_err
+ia_css_vf_configure(
+ const struct ia_css_binary *binary,
+ const struct ia_css_frame_info *out_info,
+ struct ia_css_frame_info *vf_info,
+ unsigned int *downscale_log2);
+
+#endif /* __IA_CSS_VF_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
new file mode 100644
index 000000000000..df5d37c8c946
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_param.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_PARAM_H
+#define __IA_CSS_VF_PARAM_H
+
+#include "type_support.h"
+#include "dma.h"
+#include "gc/gc_1.0/ia_css_gc_param.h" /* GAMMA_OUTPUT_BITS */
+#include "ia_css_frame_comm.h" /* ia_css_frame_sp_info */
+#include "ia_css_vf_types.h"
+
+#define VFDEC_BITS_PER_PIXEL GAMMA_OUTPUT_BITS
+
+/** Viewfinder decimation */
+struct sh_css_isp_vf_isp_config {
+ uint32_t vf_downscale_bits; /**< Log VF downscale value */
+ uint32_t enable;
+ struct ia_css_frame_sp_info info;
+ struct {
+ uint32_t width_a_over_b;
+ struct dma_port_config port_b;
+ } dma;
+};
+
+#endif /* __IA_CSS_VF_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
new file mode 100644
index 000000000000..d8cfdfbc8c0b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/vf/vf_1.0/ia_css_vf_types.h
@@ -0,0 +1,32 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_VF_TYPES_H
+#define __IA_CSS_VF_TYPES_H
+
+/** Viewfinder decimation
+ *
+ * ISP block: vfeven_horizontal_downscale
+ */
+
+#include <ia_css_frame_public.h>
+#include <type_support.h>
+
+struct ia_css_vf_configuration {
+ uint32_t vf_downscale_bits; /**< Log VF downscale value */
+ const struct ia_css_frame_info *info;
+};
+
+#endif /* __IA_CSS_VF_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
new file mode 100644
index 000000000000..b43cb88c6ae4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
@@ -0,0 +1,89 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#ifndef IA_CSS_NO_DEBUG
+#include "ia_css_debug.h"
+#endif
+#include "sh_css_frac.h"
+
+#include "ia_css_wb.host.h"
+
+const struct ia_css_wb_config default_wb_config = {
+ 1,
+ 32768,
+ 32768,
+ 32768,
+ 32768
+};
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->gain_shift =
+ uISP_REG_BIT - from->integer_bits;
+ to->gain_gr =
+ uDIGIT_FITTING(from->gr, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_r =
+ uDIGIT_FITTING(from->r, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_b =
+ uDIGIT_FITTING(from->b, 16 - from->integer_bits,
+ to->gain_shift);
+ to->gain_gb =
+ uDIGIT_FITTING(from->gb, 16 - from->integer_bits,
+ to->gain_shift);
+}
+
+#ifndef IA_CSS_NO_DEBUG
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned level)
+{
+ if (!wb) return;
+ ia_css_debug_dtrace(level, "White Balance:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_shift", wb->gain_shift);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gr", wb->gain_gr);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_r", wb->gain_r);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_b", wb->gain_b);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "wb_gain_gb", wb->gain_gb);
+}
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.integer_bits=%d, "
+ "config.gr=%d, config.r=%d, "
+ "config.b=%d, config.gb=%d\n",
+ config->integer_bits,
+ config->gr, config->r,
+ config->b, config->gb);
+}
+#endif
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
new file mode 100644
index 000000000000..18666baf9f76
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb.host.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_HOST_H
+#define __IA_CSS_WB_HOST_H
+
+#include "ia_css_wb_types.h"
+#include "ia_css_wb_param.h"
+
+extern const struct ia_css_wb_config default_wb_config;
+
+void
+ia_css_wb_encode(
+ struct sh_css_isp_wb_params *to,
+ const struct ia_css_wb_config *from,
+ unsigned size);
+
+void
+ia_css_wb_dump(
+ const struct sh_css_isp_wb_params *wb,
+ unsigned level);
+
+void
+ia_css_wb_debug_dtrace(
+ const struct ia_css_wb_config *wb,
+ unsigned level);
+
+#endif /* __IA_CSS_WB_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
new file mode 100644
index 000000000000..c95c53a24067
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_param.h
@@ -0,0 +1,29 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_PARAM_H
+#define __IA_CSS_WB_PARAM_H
+
+#include "type_support.h"
+
+/* WB (White Balance) */
+struct sh_css_isp_wb_params {
+ int32_t gain_shift;
+ int32_t gain_gr;
+ int32_t gain_r;
+ int32_t gain_b;
+ int32_t gain_gb;
+};
+
+#endif /* __IA_CSS_WB_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
new file mode 100644
index 000000000000..6bcfa274be88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/wb/wb_1.0/ia_css_wb_types.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_WB_TYPES_H
+#define __IA_CSS_WB_TYPES_H
+
+/** @file
+* CSS-API header file for White Balance parameters.
+*/
+
+
+/** White Balance configuration (Gain Adjust).
+ *
+ * ISP block: WB1
+ * ISP1: WB1 is used.
+ * ISP2: WB1 is used.
+ */
+struct ia_css_wb_config {
+ uint32_t integer_bits; /**< Common exponent of gains.
+ u8.0, [0,3],
+ default 1, ineffective 1 */
+ uint32_t gr; /**< Significand of Gr gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ uint32_t r; /**< Significand of R gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ uint32_t b; /**< Significand of B gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+ uint32_t gb; /**< Significand of Gb gain.
+ u[integer_bits].[16-integer_bits], [0,65535],
+ default/ineffective 32768(u1.15, 1.0) */
+};
+
+#endif /* __IA_CSS_WB_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
new file mode 100644
index 000000000000..3018100f6f76
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
@@ -0,0 +1,66 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "ia_css_xnr.host.h"
+
+const struct ia_css_xnr_config default_xnr_config = {
+ /** default threshold 6400 translates to 25 on ISP. */
+ 6400
+};
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned size)
+{
+ (void)size;
+ memcpy (&to->xnr, &from->data, sizeof(to->xnr));
+}
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned size)
+{
+ (void)size;
+
+ to->threshold =
+ (uint16_t)uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS);
+}
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d\n", config->threshold);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
new file mode 100644
index 000000000000..eb3425eafbbe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h
@@ -0,0 +1,47 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_HOST_H
+#define __IA_CSS_XNR_HOST_H
+
+#include "sh_css_params.h"
+
+#include "ia_css_xnr_param.h"
+#include "ia_css_xnr_table.host.h"
+
+extern const struct ia_css_xnr_config default_xnr_config;
+
+void
+ia_css_xnr_table_vamem_encode(
+ struct sh_css_isp_xnr_vamem_params *to,
+ const struct ia_css_xnr_table *from,
+ unsigned size);
+
+void
+ia_css_xnr_encode(
+ struct sh_css_isp_xnr_params *to,
+ const struct ia_css_xnr_config *from,
+ unsigned size);
+
+void
+ia_css_xnr_table_debug_dtrace(
+ const struct ia_css_xnr_table *s3a,
+ unsigned level);
+
+void
+ia_css_xnr_debug_dtrace(
+ const struct ia_css_xnr_config *config,
+ unsigned level);
+
+#endif /* __IA_CSS_XNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
new file mode 100644
index 000000000000..806c9f8f0e2e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_param.h
@@ -0,0 +1,51 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_PARAM_H
+#define __IA_CSS_XNR_PARAM_H
+
+#include "type_support.h"
+#include <system_global.h>
+
+#ifndef PIPE_GENERATION
+#if defined(HAS_VAMEM_VERSION_2)
+#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_2_XNR_TABLE_SIZE
+#elif defined(HAS_VAMEM_VERSION_1)
+#define SH_CSS_ISP_XNR_TABLE_SIZE_LOG2 IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2
+#define SH_CSS_ISP_XNR_TABLE_SIZE IA_CSS_VAMEM_1_XNR_TABLE_SIZE
+#else
+#error "Unknown vamem type"
+#endif
+
+
+#else
+/* For pipe generation, the size is not relevant */
+#define SH_CSS_ISP_XNR_TABLE_SIZE 0
+#endif
+
+/* This should be vamem_data_t, but that breaks the pipe generator */
+struct sh_css_isp_xnr_vamem_params {
+ uint16_t xnr[SH_CSS_ISP_XNR_TABLE_SIZE];
+};
+
+struct sh_css_isp_xnr_params {
+ /** XNR threshold.
+ * type:u0.16 but actual valid range is:[0,255]
+ * valid range is dependent on SH_CSS_ISP_YUV_BITS (currently 8bits)
+ * default: 25 */
+ uint16_t threshold;
+};
+
+#endif /* __IA_CSS_XNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
new file mode 100644
index 000000000000..cd5fb72fce3f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <type_support.h>
+#include <string_support.h> /* memcpy */
+#include "system_global.h"
+#include "vamem.h"
+#include "ia_css_types.h"
+#include "ia_css_xnr_table.host.h"
+
+struct ia_css_xnr_table default_xnr_table;
+
+#if defined(HAS_VAMEM_VERSION_2)
+
+static const uint16_t
+default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = {
+ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
+ 8191>>1, 4096>>1, 2730>>1, 2048>>1, 1638>>1, 1365>>1, 1170>>1, 1024>>1, 910>>1, 819>>1, 744>>1, 682>>1, 630>>1, 585>>1,
+ 546>>1, 512>>1,
+
+ /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
+ 481>>1, 455>>1, 431>>1, 409>>1, 390>>1, 372>>1, 356>>1, 341>>1, 327>>1, 315>>1, 303>>1, 292>>1, 282>>1, 273>>1, 264>>1,
+ 256>>1,
+
+ /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
+ 248>>1, 240>>1, 234>>1, 227>>1, 221>>1, 215>>1, 210>>1, 204>>1, 199>>1, 195>>1, 190>>1, 186>>1, 182>>1, 178>>1, 174>>1,
+ 170>>1,
+
+ /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
+ 167>>1, 163>>1, 160>>1, 157>>1, 154>>1, 151>>1, 148>>1, 146>>1, 143>>1, 141>>1, 138>>1, 136>>1, 134>>1, 132>>1, 130>>1, 128>>1
+};
+
+#elif defined(HAS_VAMEM_VERSION_1)
+
+static const uint16_t
+default_xnr_table_data[IA_CSS_VAMEM_1_XNR_TABLE_SIZE] = {
+ /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */
+ 8191>>1, 4096>>1, 2730>>1, 2048>>1, 1638>>1, 1365>>1, 1170>>1, 1024>>1, 910>>1, 819>>1, 744>>1, 682>>1, 630>>1, 585>>1,
+ 546>>1, 512>>1,
+
+ /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */
+ 481>>1, 455>>1, 431>>1, 409>>1, 390>>1, 372>>1, 356>>1, 341>>1, 327>>1, 315>>1, 303>>1, 292>>1, 282>>1, 273>>1, 264>>1,
+ 256>>1,
+
+ /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */
+ 248>>1, 240>>1, 234>>1, 227>>1, 221>>1, 215>>1, 210>>1, 204>>1, 199>>1, 195>>1, 190>>1, 186>>1, 182>>1, 178>>1, 174>>1,
+ 170>>1,
+
+ /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */
+ 167>>1, 163>>1, 160>>1, 157>>1, 154>>1, 151>>1, 148>>1, 146>>1, 143>>1, 141>>1, 138>>1, 136>>1, 134>>1, 132>>1, 130>>1, 128>>1
+};
+
+#else
+#error "sh_css_params.c: VAMEM version must \
+ be one of {VAMEM_VERSION_1, VAMEM_VERSION_2}"
+#endif
+
+void
+ia_css_config_xnr_table(void)
+{
+#if defined(HAS_VAMEM_VERSION_2)
+ memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data,
+ sizeof(default_xnr_table_data));
+ default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+ memcpy(default_xnr_table.data.vamem_1, default_xnr_table_data,
+ sizeof(default_xnr_table_data));
+ default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_1;
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
new file mode 100644
index 000000000000..130086713a7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.h
@@ -0,0 +1,22 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_TABLE_HOST_H
+#define __IA_CSS_XNR_TABLE_HOST_H
+
+extern struct ia_css_xnr_table default_xnr_table;
+
+void ia_css_config_xnr_table(void);
+
+#endif /* __IA_CSS_XNR_TABLE_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
new file mode 100644
index 000000000000..89e8b0f17e8c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_1.0/ia_css_xnr_types.h
@@ -0,0 +1,71 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR_TYPES_H
+#define __IA_CSS_XNR_TYPES_H
+
+/** @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/** XNR table.
+ *
+ * NOTE: The driver does not need to set this table,
+ * because the default values are set inside the css.
+ *
+ * This table contains coefficients used for division in XNR.
+ *
+ * u0.12, [0,4095],
+ * {4095, 2048, 1365, .........., 65, 64}
+ * ({1/1, 1/2, 1/3, ............., 1/63, 1/64})
+ *
+ * ISP block: XNR1
+ * ISP1: XNR1 is used.
+ * ISP2: XNR1 is used.
+ *
+ */
+
+/** Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2 6
+/** Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_1_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_1_XNR_TABLE_SIZE_LOG2)
+
+/** Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2 6
+/** Number of elements in the xnr table. */
+#define IA_CSS_VAMEM_2_XNR_TABLE_SIZE (1U<<IA_CSS_VAMEM_2_XNR_TABLE_SIZE_LOG2)
+
+/**< IA_CSS_VAMEM_TYPE_1(ISP2300) or
+ IA_CSS_VAMEM_TYPE_2(ISP2400) */
+union ia_css_xnr_data {
+ uint16_t vamem_1[IA_CSS_VAMEM_1_XNR_TABLE_SIZE];
+ /**< Coefficients table on vamem type1. u0.12, [0,4095] */
+ uint16_t vamem_2[IA_CSS_VAMEM_2_XNR_TABLE_SIZE];
+ /**< Coefficients table on vamem type2. u0.12, [0,4095] */
+};
+
+struct ia_css_xnr_table {
+ enum ia_css_vamem_type vamem_type;
+ union ia_css_xnr_data data;
+};
+
+struct ia_css_xnr_config {
+ /** XNR threshold.
+ * type:u0.16 valid range:[0,65535]
+ * default: 6400 */
+ uint16_t threshold;
+};
+
+#endif /* __IA_CSS_XNR_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
new file mode 100644
index 000000000000..955b6c8a8738
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
@@ -0,0 +1,265 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "type_support.h"
+#include "math_support.h"
+#include "sh_css_defs.h"
+#include "ia_css_types.h"
+#ifdef ISP2401
+#include "assert_support.h"
+#endif
+#include "ia_css_xnr3.host.h"
+
+/* Maximum value for alpha on ISP interface */
+#define XNR_MAX_ALPHA ((1 << (ISP_VEC_ELEMBITS - 1)) - 1)
+
+/* Minimum value for sigma on host interface. Lower values translate to
+ * max_alpha.
+ */
+#define XNR_MIN_SIGMA (IA_CSS_XNR3_SIGMA_SCALE / 100)
+
+/*
+#ifdef ISP2401
+ * division look-up table
+ * Refers to XNR3.0.5
+ */
+#define XNR3_LOOK_UP_TABLE_POINTS 16
+
+static const int16_t x[XNR3_LOOK_UP_TABLE_POINTS] = {
+1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
+2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120};
+
+static const int16_t a[XNR3_LOOK_UP_TABLE_POINTS] = {
+-7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
+-4529, -3697, -3010, -2485, -2070, -1727, -1428, 0};
+
+static const int16_t b[XNR3_LOOK_UP_TABLE_POINTS] = {
+4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
+1603, 1446, 1307, 1185, 1077, 981, 895, 819};
+
+static const int16_t c[XNR3_LOOK_UP_TABLE_POINTS] = {
+1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+/*
+#endif
+ * Default kernel parameters. In general, default is bypass mode or as close
+ * to the ineffective values as possible. Due to the chroma down+upsampling,
+ * perfect bypass mode is not possible for xnr3 filter itself. Instead, the
+ * 'blending' parameter is used to create a bypass.
+ */
+const struct ia_css_xnr3_config default_xnr3_config = {
+ /* sigma */
+ { 0, 0, 0, 0, 0, 0 },
+ /* coring */
+ { 0, 0, 0, 0 },
+ /* blending */
+ { 0 }
+};
+
+/*
+ * Compute an alpha value for the ISP kernel from sigma value on the host
+ * parameter interface as: alpha_scale * 1/(sigma/sigma_scale)
+ */
+static int32_t
+compute_alpha(int sigma)
+{
+ int32_t alpha;
+#if defined(XNR_ATE_ROUNDING_BUG)
+ int32_t alpha_unscaled;
+#else
+ int offset = sigma / 2;
+#endif
+ if (sigma < XNR_MIN_SIGMA) {
+ alpha = XNR_MAX_ALPHA;
+ } else {
+#if defined(XNR_ATE_ROUNDING_BUG)
+ /* The scale factor for alpha must be the same as on the ISP,
+ * For sigma, it must match the public interface. The code
+ * below mimics the rounding and unintended loss of precision
+ * of the ATE reference code. It computes an unscaled alpha,
+ * rounds down, and then scales it to get the required fixed
+ * point representation. It would have been more precise to
+ * round after scaling. */
+ alpha_unscaled = IA_CSS_XNR3_SIGMA_SCALE / sigma;
+ alpha = alpha_unscaled * XNR_ALPHA_SCALE_FACTOR;
+#else
+ alpha = ((IA_CSS_XNR3_SIGMA_SCALE * XNR_ALPHA_SCALE_FACTOR) + offset)/ sigma;
+#endif
+
+ if (alpha > XNR_MAX_ALPHA)
+ alpha = XNR_MAX_ALPHA;
+ }
+
+ return alpha;
+}
+
+/*
+ * Compute the scaled coring value for the ISP kernel from the value on the
+ * host parameter interface.
+ */
+static int32_t
+compute_coring(int coring)
+{
+ int32_t isp_coring;
+ int32_t isp_scale = XNR_CORING_SCALE_FACTOR;
+ int32_t host_scale = IA_CSS_XNR3_CORING_SCALE;
+ int32_t offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. Clip to [0, isp_scale-1).
+ */
+ isp_coring = ((coring * isp_scale) + offset) / host_scale;
+ return min(max(isp_coring, 0), isp_scale - 1);
+}
+
+/*
+ * Compute the scaled blending strength for the ISP kernel from the value on
+ * the host parameter interface.
+ */
+static int32_t
+compute_blending(int strength)
+{
+ int32_t isp_strength;
+ int32_t isp_scale = XNR_BLENDING_SCALE_FACTOR;
+ int32_t host_scale = IA_CSS_XNR3_BLENDING_SCALE;
+ int32_t offset = host_scale / 2; /* fixed-point 0.5 */
+
+ /* Convert from public host-side scale factor to isp-side scale
+ * factor. The blending factor is positive on the host side, but
+ * negative on the ISP side because +1.0 cannot be represented
+ * exactly as s0.11 fixed point, but -1.0 can.
+ */
+ isp_strength = -(((strength * isp_scale) + offset) / host_scale);
+ return max(min(isp_strength, 0), -XNR_BLENDING_SCALE_FACTOR);
+}
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned size)
+{
+ int kernel_size = XNR_FILTER_SIZE;
+ /* The adjust factor is the next power of 2
+ w.r.t. the kernel size*/
+ int adjust_factor = ceil_pow2(kernel_size);
+ int32_t max_diff = (1 << (ISP_VEC_ELEMBITS - 1)) - 1;
+ int32_t min_diff = -(1 << (ISP_VEC_ELEMBITS - 1));
+
+ int32_t alpha_y0 = compute_alpha(from->sigma.y0);
+ int32_t alpha_y1 = compute_alpha(from->sigma.y1);
+ int32_t alpha_u0 = compute_alpha(from->sigma.u0);
+ int32_t alpha_u1 = compute_alpha(from->sigma.u1);
+ int32_t alpha_v0 = compute_alpha(from->sigma.v0);
+ int32_t alpha_v1 = compute_alpha(from->sigma.v1);
+ int32_t alpha_ydiff = (alpha_y1 - alpha_y0) * adjust_factor / kernel_size;
+ int32_t alpha_udiff = (alpha_u1 - alpha_u0) * adjust_factor / kernel_size;
+ int32_t alpha_vdiff = (alpha_v1 - alpha_v0) * adjust_factor / kernel_size;
+
+ int32_t coring_u0 = compute_coring(from->coring.u0);
+ int32_t coring_u1 = compute_coring(from->coring.u1);
+ int32_t coring_v0 = compute_coring(from->coring.v0);
+ int32_t coring_v1 = compute_coring(from->coring.v1);
+ int32_t coring_udiff = (coring_u1 - coring_u0) * adjust_factor / kernel_size;
+ int32_t coring_vdiff = (coring_v1 - coring_v0) * adjust_factor / kernel_size;
+
+ int32_t blending = compute_blending(from->blending.strength);
+
+ (void)size;
+
+ /* alpha's are represented in qN.5 format */
+ to->alpha.y0 = alpha_y0;
+ to->alpha.u0 = alpha_u0;
+ to->alpha.v0 = alpha_v0;
+ to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff);
+ to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff);
+ to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff);
+
+ /* coring parameters are expressed in q1.NN format */
+ to->coring.u0 = coring_u0;
+ to->coring.v0 = coring_v0;
+ to->coring.udiff = min(max(coring_udiff, min_diff), max_diff);
+ to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff);
+
+ /* blending strength is expressed in q1.NN format */
+ to->blending.strength = blending;
+}
+
+#ifdef ISP2401
+/* (void) = ia_css_xnr3_vmem_encode(*to, *from)
+ * -----------------------------------------------
+ * VMEM Encode Function to translate UV parameters from userspace into ISP space
+*/
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned size)
+{
+ unsigned i, j, base;
+ const unsigned total_blocks = 4;
+ const unsigned shuffle_block = 16;
+
+ (void)from;
+ (void)size;
+
+ /* Init */
+ for (i = 0; i < ISP_VEC_NELEMS; i++) {
+ to->x[0][i] = 0;
+ to->a[0][i] = 0;
+ to->b[0][i] = 0;
+ to->c[0][i] = 0;
+ }
+
+ /* Constraints on "x":
+ * - values should be greater or equal to 0.
+ * - values should be ascending.
+ */
+ assert(x[0] >= 0);
+
+ for (j = 1; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ assert(x[j] >= 0);
+ assert(x[j] > x[j - 1]);
+
+ }
+
+ /* The implementation of the calulating 1/x is based on the availability
+ * of the OP_vec_shuffle16 operation.
+ * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to
+ * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or
+ * initialised as described in the KFS. The remaining elements of a vector are set to 0.
+ */
+ /* TODO: guard this code with above assumptions */
+ for (i = 0; i < total_blocks; i++) {
+ base = shuffle_block * i;
+
+ for (j = 0; j < XNR3_LOOK_UP_TABLE_POINTS; j++) {
+ to->x[0][base + j] = x[j];
+ to->a[0][base + j] = a[j];
+ to->b[0][base + j] = b[j];
+ to->c[0][base + j] = c[j];
+ }
+ }
+}
+
+#endif
+/* Dummy Function added as the tool expects it*/
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned level)
+{
+ (void)config;
+ (void)level;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
new file mode 100644
index 000000000000..6a86924a71fe
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_HOST_H
+#define __IA_CSS_XNR3_HOST_H
+
+#include "ia_css_xnr3_param.h"
+#include "ia_css_xnr3_types.h"
+
+extern const struct ia_css_xnr3_config default_xnr3_config;
+
+void
+ia_css_xnr3_encode(
+ struct sh_css_isp_xnr3_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned size);
+
+#ifdef ISP2401
+void
+ia_css_xnr3_vmem_encode(
+ struct sh_css_isp_xnr3_vmem_params *to,
+ const struct ia_css_xnr3_config *from,
+ unsigned size);
+
+#endif
+void
+ia_css_xnr3_debug_dtrace(
+ const struct ia_css_xnr3_config *config,
+ unsigned level);
+
+#endif /* __IA_CSS_XNR3_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
new file mode 100644
index 000000000000..06c24e848234
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_param.h
@@ -0,0 +1,96 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_PARAM_H
+#define __IA_CSS_XNR3_PARAM_H
+
+#include "type_support.h"
+#ifdef ISP2401
+#include "vmem.h" /* needed for VMEM_ARRAY */
+
+#endif
+
+/* Scaling factor of the alpha values: which fixed-point value represents 1.0?
+ * It must be chosen such that 1/min_sigma still fits in an ISP vector
+ * element. */
+#define XNR_ALPHA_SCALE_LOG2 5
+#define XNR_ALPHA_SCALE_FACTOR (1 << XNR_ALPHA_SCALE_LOG2)
+
+/* Scaling factor of the coring values on the ISP. */
+#define XNR_CORING_SCALE_LOG2 (ISP_VEC_ELEMBITS-1)
+#define XNR_CORING_SCALE_FACTOR (1 << XNR_CORING_SCALE_LOG2)
+
+/* Scaling factor of the blending strength on the ISP. */
+#define XNR_BLENDING_SCALE_LOG2 (ISP_VEC_ELEMBITS-1)
+#define XNR_BLENDING_SCALE_FACTOR (1 << XNR_BLENDING_SCALE_LOG2)
+
+/* XNR3 filter size. Must be 11x11, 9x9 or 5x5. */
+#ifdef FLT_KERNEL_9x9
+#define XNR_FILTER_SIZE 9
+#else
+#ifdef FLT_KERNEL_11x11
+#define XNR_FILTER_SIZE 11
+#else
+#define XNR_FILTER_SIZE 5
+#endif
+#endif
+
+/* XNR3 alpha (1/sigma) parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_alpha_params {
+ int32_t y0;
+ int32_t u0;
+ int32_t v0;
+ int32_t ydiff;
+ int32_t udiff;
+ int32_t vdiff;
+};
+
+/* XNR3 coring parameters on the ISP, expressed as a base (0) value
+ * for dark areas, and a scaled diff towards the value for bright areas. */
+struct sh_css_xnr3_coring_params {
+ int32_t u0;
+ int32_t v0;
+ int32_t udiff;
+ int32_t vdiff;
+};
+
+/* XNR3 blending strength on the ISP. */
+struct sh_css_xnr3_blending_params {
+ int32_t strength;
+};
+
+/* XNR3 ISP parameters */
+struct sh_css_isp_xnr3_params {
+ struct sh_css_xnr3_alpha_params alpha;
+ struct sh_css_xnr3_coring_params coring;
+ struct sh_css_xnr3_blending_params blending;
+};
+
+#ifdef ISP2401
+/*
+ * STRUCT sh_css_isp_xnr3_vmem_params
+ * -----------------------------------------------
+ * ISP VMEM parameters
+ */
+struct sh_css_isp_xnr3_vmem_params {
+ VMEM_ARRAY(x, ISP_VEC_NELEMS);
+ VMEM_ARRAY(a, ISP_VEC_NELEMS);
+ VMEM_ARRAY(b, ISP_VEC_NELEMS);
+ VMEM_ARRAY(c, ISP_VEC_NELEMS);
+};
+
+
+#endif
+#endif /*__IA_CSS_XNR3_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
new file mode 100644
index 000000000000..8f14d1080651
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_types.h
@@ -0,0 +1,98 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_TYPES_H
+#define __IA_CSS_XNR3_TYPES_H
+
+/** @file
+* CSS-API header file for Extra Noise Reduction (XNR) parameters.
+*/
+
+/**
+ * \brief Scale of the XNR sigma parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_SIGMA_SCALE (1 << 10)
+
+/**
+ * \brief Scale of the XNR coring parameters.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_CORING_SCALE (1 << 15)
+
+/**
+ * \brief Scale of the XNR blending parameter.
+ * \details The define specifies which fixed-point value represents 1.0.
+ */
+#define IA_CSS_XNR3_BLENDING_SCALE (1 << 11)
+
+
+/**
+ * \brief XNR3 Sigma Parameters.
+ * \details Sigma parameters define the strength of the XNR filter.
+ * A higher number means stronger filtering. There are two values for each of
+ * the three YUV planes: one for dark areas and one for bright areas. All
+ * sigma parameters are fixed-point values between 0.0 and 1.0, scaled with
+ * IA_CSS_XNR3_SIGMA_SCALE.
+ */
+struct ia_css_xnr3_sigma_params {
+ int y0; /**< Sigma for Y range similarity in dark area */
+ int y1; /**< Sigma for Y range similarity in bright area */
+ int u0; /**< Sigma for U range similarity in dark area */
+ int u1; /**< Sigma for U range similarity in bright area */
+ int v0; /**< Sigma for V range similarity in dark area */
+ int v1; /**< Sigma for V range similarity in bright area */
+};
+
+/**
+ * \brief XNR3 Coring Parameters
+ * \details Coring parameters define the "coring" strength, which is a soft
+ * thresholding technique to avoid false coloring. There are two values for
+ * each of the two chroma planes: one for dark areas and one for bright areas.
+ * All coring parameters are fixed-point values between 0.0 and 1.0, scaled
+ * with IA_CSS_XNR3_CORING_SCALE. The ineffective value is 0.
+ */
+struct ia_css_xnr3_coring_params {
+ int u0; /**< Coring threshold of U channel in dark area */
+ int u1; /**< Coring threshold of U channel in bright area */
+ int v0; /**< Coring threshold of V channel in dark area */
+ int v1; /**< Coring threshold of V channel in bright area */
+};
+
+/**
+ * \brief XNR3 Blending Parameters
+ * \details Blending parameters define the blending strength of filtered
+ * output pixels with the original chroma pixels from before xnr3. The
+ * blending strength is a fixed-point value between 0.0 and 1.0 (inclusive),
+ * scaled with IA_CSS_XNR3_BLENDING_SCALE.
+ * A higher number applies xnr filtering more strongly. A value of 1.0
+ * disables the blending and returns the xnr3 filtered output, while a
+ * value of 0.0 bypasses the entire xnr3 filter.
+ */
+struct ia_css_xnr3_blending_params {
+ int strength; /**< Blending strength */
+};
+
+/**
+ * \brief XNR3 public parameters.
+ * \details Struct with all parameters for the XNR3 kernel that can be set
+ * from the CSS API.
+ */
+struct ia_css_xnr3_config {
+ struct ia_css_xnr3_sigma_params sigma; /**< XNR3 sigma parameters */
+ struct ia_css_xnr3_coring_params coring; /**< XNR3 coring parameters */
+ struct ia_css_xnr3_blending_params blending; /**< XNR3 blending parameters */
+};
+
+#endif /* __IA_CSS_XNR3_TYPES_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_wrapper_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_wrapper_param.h
new file mode 100644
index 000000000000..1a98555fd5d9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/xnr/xnr_3.0/ia_css_xnr3_wrapper_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_XNR3_WRAPPER_PARAM_H
+#define __IA_CSS_XNR3_WRAPPER_PARAM_H
+
+#include "ia_css_xnr3_param.h"
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
new file mode 100644
index 000000000000..d8dccce772a9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
@@ -0,0 +1,219 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_frac.h"
+
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "ia_css_ynr.host.h"
+
+const struct ia_css_nr_config default_nr_config = {
+ 16384,
+ 8192,
+ 1280,
+ 0,
+ 0
+};
+
+const struct ia_css_ee_config default_ee_config = {
+ 8192,
+ 128,
+ 2048
+};
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned size)
+{
+ (void)size;
+ /* YNR (Y Noise Reduction) */
+ to->threshold =
+ uDIGIT_FITTING((unsigned)8192, 16, SH_CSS_BAYER_BITS);
+ to->gain_all =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->gain_dir =
+ uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT);
+ to->threshold_cb =
+ uDIGIT_FITTING(from->threshold_cb, 16, SH_CSS_BAYER_BITS);
+ to->threshold_cr =
+ uDIGIT_FITTING(from->threshold_cr, 16, SH_CSS_BAYER_BITS);
+}
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned size)
+{
+ int asiWk1 = (int) from->ee.gain;
+ int asiWk2 = asiWk1 / 8;
+ int asiWk3 = asiWk1 / 4;
+
+ (void)size;
+ /* YEE (Y Edge Enhancement) */
+ to->dirthreshold_s =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 1),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_g =
+ min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS)
+ << 4),
+ SH_CSS_BAYER_MAXVAL);
+ to->dirthreshold_width_log2 =
+ uFRACTION_BITS_FITTING(8);
+ to->dirthreshold_width =
+ 1 << to->dirthreshold_width_log2;
+ to->detailgain =
+ uDIGIT_FITTING(from->ee.detail_gain, 11,
+ SH_CSS_YEE_DETAIL_GAIN_SHIFT);
+ to->coring_s =
+ (uDIGIT_FITTING((unsigned)56, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ to->coring_g =
+ (uDIGIT_FITTING((unsigned)224, 16, SH_CSS_BAYER_BITS) *
+ from->ee.threshold) >> 8;
+ /* 8; // *1.125 ->[s4.8] */
+ to->scale_plus_s =
+ (asiWk1 + asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( * -.25)->[s4.8] */
+ to->scale_plus_g =
+ (0 - asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // *0.875 ->[s4.8] */
+ to->scale_minus_s =
+ (asiWk1 - asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ /* 8; // ( *.25 ) ->[s4.8] */
+ to->scale_minus_g =
+ (asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT);
+ to->clip_plus_s =
+ uDIGIT_FITTING((unsigned)32760, 16, SH_CSS_BAYER_BITS);
+ to->clip_plus_g = 0;
+ to->clip_minus_s =
+ uDIGIT_FITTING((unsigned)504, 16, SH_CSS_BAYER_BITS);
+ to->clip_minus_g =
+ uDIGIT_FITTING((unsigned)32256, 16, SH_CSS_BAYER_BITS);
+ to->Yclip = SH_CSS_BAYER_MAXVAL;
+}
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned level)
+{
+ if (!ynr) return;
+ ia_css_debug_dtrace(level,
+ "Y Noise Reduction:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold", ynr->threshold);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_all", ynr->gain_all);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_gain_dir", ynr->gain_dir);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cb", ynr->threshold_cb);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynr_threshold_cr", ynr->threshold_cr);
+}
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "Y Edge Enhancement:\n");
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_s",
+ yee->dirthreshold_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_g",
+ yee->dirthreshold_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width_log2",
+ yee->dirthreshold_width_log2);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_dirthreshold_width",
+ yee->dirthreshold_width);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_detailgain",
+ yee->detailgain);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_s",
+ yee->coring_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_coring_g",
+ yee->coring_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_s",
+ yee->scale_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_plus_g",
+ yee->scale_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_s",
+ yee->scale_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_scale_minus_g",
+ yee->scale_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_s",
+ yee->clip_plus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_plus_g",
+ yee->clip_plus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_s",
+ yee->clip_minus_s);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "yee_clip_minus_g",
+ yee->clip_minus_g);
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
+ "ynryee_Yclip",
+ yee->Yclip);
+}
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.direction=%d, "
+ "config.bnr_gain=%d, config.ynr_gain=%d, "
+ "config.threshold_cb=%d, config.threshold_cr=%d\n",
+ config->direction,
+ config->bnr_gain, config->ynr_gain,
+ config->threshold_cb, config->threshold_cr);
+}
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.threshold=%d, config.gain=%d, config.detail_gain=%d\n",
+ config->threshold, config->gain, config->detail_gain);
+}
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ *state,
+ size_t size)
+{
+ memset(state, 0, size);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
new file mode 100644
index 000000000000..b5730df313ef
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_HOST_H
+#define __IA_CSS_YNR_HOST_H
+
+#include "ia_css_ynr_types.h"
+#include "ia_css_ynr_param.h"
+
+extern const struct ia_css_nr_config default_nr_config;
+extern const struct ia_css_ee_config default_ee_config;
+
+void
+ia_css_nr_encode(
+ struct sh_css_isp_ynr_params *to,
+ const struct ia_css_nr_config *from,
+ unsigned size);
+
+void
+ia_css_yee_encode(
+ struct sh_css_isp_yee_params *to,
+ const struct ia_css_yee_config *from,
+ unsigned size);
+
+void
+ia_css_nr_dump(
+ const struct sh_css_isp_ynr_params *ynr,
+ unsigned level);
+
+void
+ia_css_yee_dump(
+ const struct sh_css_isp_yee_params *yee,
+ unsigned level);
+
+void
+ia_css_nr_debug_dtrace(
+ const struct ia_css_nr_config *config,
+ unsigned level);
+
+void
+ia_css_ee_debug_dtrace(
+ const struct ia_css_ee_config *config,
+ unsigned level);
+
+void
+ia_css_init_ynr_state(
+ void/*struct sh_css_isp_ynr_vmem_state*/ *state,
+ size_t size);
+#endif /* __IA_CSS_YNR_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
new file mode 100644
index 000000000000..ad61ec1211e8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_param.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_PARAM_H
+#define __IA_CSS_YNR_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction) */
+struct sh_css_isp_ynr_params {
+ int32_t threshold;
+ int32_t gain_all;
+ int32_t gain_dir;
+ int32_t threshold_cb;
+ int32_t threshold_cr;
+};
+
+/* YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee_params {
+ int32_t dirthreshold_s;
+ int32_t dirthreshold_g;
+ int32_t dirthreshold_width_log2;
+ int32_t dirthreshold_width;
+ int32_t detailgain;
+ int32_t coring_s;
+ int32_t coring_g;
+ int32_t scale_plus_s;
+ int32_t scale_plus_g;
+ int32_t scale_minus_s;
+ int32_t scale_minus_g;
+ int32_t clip_plus_s;
+ int32_t clip_plus_g;
+ int32_t clip_minus_s;
+ int32_t clip_minus_g;
+ int32_t Yclip;
+};
+
+#endif /* __IA_CSS_YNR_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h
new file mode 100644
index 000000000000..b2348b19c3cd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_state.h
@@ -0,0 +1,26 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_STATE_H
+#define __IA_CSS_YNR_STATE_H
+
+#include "type_support.h"
+#include "vmem.h"
+
+/* YNR (luminance noise reduction) */
+struct sh_css_isp_ynr_vmem_state {
+ VMEM_ARRAY(ynr_buf[4], MAX_VECTORS_PER_BUF_LINE*ISP_NWAY);
+};
+
+#endif /* __IA_CSS_YNR_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
new file mode 100644
index 000000000000..3f46655bee57
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_1.0/ia_css_ynr_types.h
@@ -0,0 +1,81 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR_TYPES_H
+#define __IA_CSS_YNR_TYPES_H
+
+/** @file
+* CSS-API header file for Noise Reduction (BNR) and YCC Noise Reduction (YNR,CNR).
+*/
+
+/** Configuration used by Bayer Noise Reduction (BNR) and
+ * YCC Noise Reduction (YNR,CNR).
+ *
+ * ISP block: BNR1, YNR1, CNR1
+ * ISP1: BNR1,YNR1,CNR1 are used.
+ * ISP2: BNR1,YNR1,CNR1 are used for Preview/Video.
+ * BNR1,YNR2,CNR2 are used for Still.
+ */
+struct ia_css_nr_config {
+ ia_css_u0_16 bnr_gain; /**< Strength of noise reduction (BNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 ynr_gain; /**< Strength of noise reduction (YNR).
+ u0.16, [0,65535],
+ default 14336(0.21875), ineffective 0 */
+ ia_css_u0_16 direction; /**< Sensitivity of edge (BNR).
+ u0.16, [0,65535],
+ default 512(0.0078125), ineffective 0 */
+ ia_css_u0_16 threshold_cb; /**< Coring threshold for Cb (CNR).
+ This is the same as
+ de_config.c1_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+ ia_css_u0_16 threshold_cr; /**< Coring threshold for Cr (CNR).
+ This is the same as
+ de_config.c2_coring_threshold.
+ u0.16, [0,65535],
+ default 0(0), ineffective 0 */
+};
+
+/** Edge Enhancement (sharpen) configuration.
+ *
+ * ISP block: YEE1
+ * ISP1: YEE1 is used.
+ * ISP2: YEE1 is used for Preview/Video.
+ * (YEE2 is used for Still.)
+ */
+struct ia_css_ee_config {
+ ia_css_u5_11 gain; /**< The strength of sharpness.
+ u5.11, [0,65535],
+ default 8192(4.0), ineffective 0 */
+ ia_css_u8_8 threshold; /**< The threshold that divides noises from
+ edge.
+ u8.8, [0,65535],
+ default 256(1.0), ineffective 65535 */
+ ia_css_u5_11 detail_gain; /**< The strength of sharpness in pell-mell
+ area.
+ u5.11, [0,65535],
+ default 2048(1.0), ineffective 0 */
+};
+
+/** YNR and YEE (sharpen) configuration.
+ */
+struct ia_css_yee_config {
+ struct ia_css_nr_config nr; /**< The NR configuration. */
+ struct ia_css_ee_config ee; /**< The EE configuration. */
+};
+
+#endif /* __IA_CSS_YNR_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
new file mode 100644
index 000000000000..44b005004238
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
@@ -0,0 +1,125 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "assert_support.h"
+
+#include "ia_css_ynr2.host.h"
+
+const struct ia_css_ynr_config default_ynr_config = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+const struct ia_css_fc_config default_fc_config = {
+ 1,
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ 0, /* 0 -> ineffective */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */
+ (int16_t)- (1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+ (int16_t)- (1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */
+};
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->edge_sense_gain_0 = from->edge_sense_gain_0;
+ to->edge_sense_gain_1 = from->edge_sense_gain_1;
+ to->corner_sense_gain_0 = from->corner_sense_gain_0;
+ to->corner_sense_gain_1 = from->corner_sense_gain_1;
+}
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned size)
+{
+ (void)size;
+ to->gain_exp = from->gain_exp;
+
+ to->coring_pos_0 = from->coring_pos_0;
+ to->coring_pos_1 = from->coring_pos_1;
+ to->coring_neg_0 = from->coring_neg_0;
+ to->coring_neg_1 = from->coring_neg_1;
+
+ to->gain_pos_0 = from->gain_pos_0;
+ to->gain_pos_1 = from->gain_pos_1;
+ to->gain_neg_0 = from->gain_neg_0;
+ to->gain_neg_1 = from->gain_neg_1;
+
+ to->crop_pos_0 = from->crop_pos_0;
+ to->crop_pos_1 = from->crop_pos_1;
+ to->crop_neg_0 = from->crop_neg_0;
+ to->crop_neg_1 = from->crop_neg_1;
+}
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.gain_exp=%d, "
+ "config.coring_pos_0=%d, config.coring_pos_1=%d, "
+ "config.coring_neg_0=%d, config.coring_neg_1=%d, "
+ "config.gain_pos_0=%d, config.gain_pos_1=%d, "
+ "config.gain_neg_0=%d, config.gain_neg_1=%d, "
+ "config.crop_pos_0=%d, config.crop_pos_1=%d, "
+ "config.crop_neg_0=%d, config.crop_neg_1=%d\n",
+ config->gain_exp,
+ config->coring_pos_0, config->coring_pos_1,
+ config->coring_neg_0, config->coring_neg_1,
+ config->gain_pos_0, config->gain_pos_1,
+ config->gain_neg_0, config->gain_neg_1,
+ config->crop_pos_0, config->crop_pos_1,
+ config->crop_neg_0, config->crop_neg_1);
+}
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned level)
+{
+ ia_css_debug_dtrace(level,
+ "config.edge_sense_gain_0=%d, config.edge_sense_gain_1=%d, "
+ "config.corner_sense_gain_0=%d, config.corner_sense_gain_1=%d\n",
+ config->edge_sense_gain_0, config->edge_sense_gain_1,
+ config->corner_sense_gain_0, config->corner_sense_gain_1);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
new file mode 100644
index 000000000000..71e89c469e4c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h
@@ -0,0 +1,56 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_HOST_H
+#define __IA_CSS_YNR2_HOST_H
+
+#include "ia_css_ynr2_types.h"
+#include "ia_css_ynr2_param.h"
+
+extern const struct ia_css_ynr_config default_ynr_config;
+extern const struct ia_css_fc_config default_fc_config;
+
+void
+ia_css_ynr_encode(
+ struct sh_css_isp_yee2_params *to,
+ const struct ia_css_ynr_config *from,
+ unsigned size);
+
+void
+ia_css_fc_encode(
+ struct sh_css_isp_fc_params *to,
+ const struct ia_css_fc_config *from,
+ unsigned size);
+
+void
+ia_css_ynr_dump(
+ const struct sh_css_isp_yee2_params *yee2,
+ unsigned level);
+
+void
+ia_css_fc_dump(
+ const struct sh_css_isp_fc_params *fc,
+ unsigned level);
+
+void
+ia_css_fc_debug_dtrace(
+ const struct ia_css_fc_config *config,
+ unsigned level);
+
+void
+ia_css_ynr_debug_dtrace(
+ const struct ia_css_ynr_config *config,
+ unsigned level);
+
+#endif /* __IA_CSS_YNR2_HOST_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
new file mode 100644
index 000000000000..e56b695bef27
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_param.h
@@ -0,0 +1,45 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_PARAM_H
+#define __IA_CSS_YNR2_PARAM_H
+
+#include "type_support.h"
+
+/* YNR (Y Noise Reduction), YEE (Y Edge Enhancement) */
+struct sh_css_isp_yee2_params {
+ int32_t edge_sense_gain_0;
+ int32_t edge_sense_gain_1;
+ int32_t corner_sense_gain_0;
+ int32_t corner_sense_gain_1;
+};
+
+/* Fringe Control */
+struct sh_css_isp_fc_params {
+ int32_t gain_exp;
+ uint16_t coring_pos_0;
+ uint16_t coring_pos_1;
+ uint16_t coring_neg_0;
+ uint16_t coring_neg_1;
+ int32_t gain_pos_0;
+ int32_t gain_pos_1;
+ int32_t gain_neg_0;
+ int32_t gain_neg_1;
+ int32_t crop_pos_0;
+ int32_t crop_pos_1;
+ int32_t crop_neg_0;
+ int32_t crop_neg_1;
+};
+
+#endif /* __IA_CSS_YNR2_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
new file mode 100644
index 000000000000..e0a0b10ac5fa
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr2_types.h
@@ -0,0 +1,94 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_TYPES_H
+#define __IA_CSS_YNR2_TYPES_H
+
+/** @file
+* CSS-API header file for Y(Luma) Noise Reduction.
+*/
+
+/** Y(Luma) Noise Reduction configuration.
+ *
+ * ISP block: YNR2 & YEE2
+ * (ISP1: YNR1 and YEE1 are used.)
+ * (ISP2: YNR1 and YEE1 are used for Preview/Video.)
+ * ISP2: YNR2 and YEE2 are used for Still.
+ */
+struct ia_css_ynr_config {
+ uint16_t edge_sense_gain_0; /**< Sensitivity of edge in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ uint16_t edge_sense_gain_1; /**< Sensitivity of edge in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ uint16_t corner_sense_gain_0; /**< Sensitivity of corner in dark area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+ uint16_t corner_sense_gain_1; /**< Sensitivity of corner in bright area.
+ u13.0, [0,8191],
+ default 1000, ineffective 0 */
+};
+
+/** Fringe Control configuration.
+ *
+ * ISP block: FC2 (FC2 is used with YNR2/YEE2.)
+ * (ISP1: FC2 is not used.)
+ * (ISP2: FC2 is not for Preview/Video.)
+ * ISP2: FC2 is used for Still.
+ */
+struct ia_css_fc_config {
+ uint8_t gain_exp; /**< Common exponent of gains.
+ u8.0, [0,13],
+ default 1, ineffective 0 */
+ uint16_t coring_pos_0; /**< Coring threshold for positive edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ uint16_t coring_pos_1; /**< Coring threshold for positive edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ uint16_t coring_neg_0; /**< Coring threshold for negative edge in dark area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ uint16_t coring_neg_1; /**< Coring threshold for negative edge in bright area.
+ u0.13, [0,8191],
+ default 0(0), ineffective 0 */
+ uint16_t gain_pos_0; /**< Gain for positive edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ uint16_t gain_pos_1; /**< Gain for positive edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ uint16_t gain_neg_0; /**< Gain for negative edge in dark area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ uint16_t gain_neg_1; /**< Gain for negative edge in bright area.
+ u0.13, [0,8191],
+ default 4096(0.5), ineffective 0 */
+ uint16_t crop_pos_0; /**< Limit for positive edge in dark area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ uint16_t crop_pos_1; /**< Limit for positive edge in bright area.
+ u0.13, [0,8191],
+ default/ineffective 8191(almost 1.0) */
+ int16_t crop_neg_0; /**< Limit for negative edge in dark area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+ int16_t crop_neg_1; /**< Limit for negative edge in bright area.
+ s0.13, [-8192,0],
+ default/ineffective -8192(-1.0) */
+};
+
+#endif /* __IA_CSS_YNR2_TYPES_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h
new file mode 100644
index 000000000000..48fb7d22d7c1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNRX_PARAM_H
+#define __IA_CSS_YNRX_PARAM_H
+
+#include "ia_css_ynr2_param.h"
+
+#endif /* __IA_CSS_YNRX_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h
new file mode 100644
index 000000000000..2516dd3dc12b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/ynr/ynr_2/ia_css_ynr_state.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YNR2_STATE_H
+#define __IA_CSS_YNR2_STATE_H
+
+/* Reuse YNR1 states */
+#include "../ynr_1.0/ia_css_ynr_state.h"
+
+#endif /* __IA_CSS_YNR2_STATE_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_load_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_load_param.h
new file mode 100644
index 000000000000..400c6790cbf5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_load_param.h
@@ -0,0 +1,20 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV_LOAD_PARAM_H
+#define __IA_CSS_YUV_LOAD_PARAM_H
+
+#include "ia_css_yuv_ls_param.h"
+
+#endif /* __IA_CSS_YUV_LOAD_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
new file mode 100644
index 000000000000..63a8703c9c44
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_ls_param.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV_LS_PARAM_H
+#define __IA_CSS_YUV_LS_PARAM_H
+
+#include "type_support.h"
+#ifndef ISP2401
+
+/* The number of load/store kernels in a pipeline can be greater than one.
+ * A kernel can consume more than one input or can produce more
+ * than one output.
+ */
+#define NUM_YUV_LS 2
+
+/** YUV load/store */
+struct sh_css_isp_yuv_ls_isp_config {
+ unsigned base_address[NUM_YUV_LS];
+ unsigned width[NUM_YUV_LS];
+ unsigned height[NUM_YUV_LS];
+ unsigned stride[NUM_YUV_LS];
+};
+
+#else
+#include "../../io_ls/common/ia_css_common_io_types.h"
+#endif
+
+#endif /* __IA_CSS_YUV_LS_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_store_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_store_param.h
new file mode 100644
index 000000000000..69c474ea1ffd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/yuv_ls/yuv_ls_1.0/ia_css_yuv_store_param.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IA_CSS_YUV_STORE_PARAM_H
+#define __IA_CSS_YUV_STORE_PARAM_H
+
+#include "ia_css_yuv_ls_param.h"
+
+
+#endif /* __IA_CSS_YUV_STORE_PARAM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h
new file mode 100644
index 000000000000..32714d5870cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/input_buf.isp.h
@@ -0,0 +1,73 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _INPUT_BUF_ISP_H_
+#define _INPUT_BUF_ISP_H_
+
+/* Temporary include, since IA_CSS_BINARY_MODE_COPY is still needed */
+#include "sh_css_defs.h"
+#include "isp_const.h" /* MAX_VECTORS_PER_INPUT_LINE */
+
+#define INPUT_BUF_HEIGHT 2 /* double buffer */
+#define INPUT_BUF_LINES 2
+
+#ifndef ENABLE_CONTINUOUS
+#define ENABLE_CONTINUOUS 0
+#endif
+
+/* In continuous mode, the input buffer must be a fixed size for all binaries
+ * and at a fixed address since it will be used by the SP. */
+#define EXTRA_INPUT_VECTORS 2 /* For left padding */
+#define MAX_VECTORS_PER_INPUT_LINE_CONT (CEIL_DIV(SH_CSS_MAX_SENSOR_WIDTH, ISP_NWAY) + EXTRA_INPUT_VECTORS)
+
+/* The input buffer should be on a fixed address in vmem, for continuous capture */
+#define INPUT_BUF_ADDR 0x0
+#if (defined(__ISP) && (!defined(MODE) || MODE != IA_CSS_BINARY_MODE_COPY))
+
+#if ENABLE_CONTINUOUS
+typedef struct {
+ tmemvectoru raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE_CONT]; /* 2 bayer lines */
+ /* Two more lines for SP raw copy efficiency */
+#ifndef ENABLE_REDUCED_INPUT_BUFFER
+ /* "Workaround" solution in the case that space needed vmem exceeds the size of the vmem. */
+ /* Since in theory this buffer is not needed for IPU 2.2/2.3, */
+ /* the workaround solution will not be needed (and the whole buffer) after the code refactoring. */
+ tmemvectoru _raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE_CONT]; /* 2 bayer lines */
+#endif
+} input_line_type;
+#else /* ENABLE CONTINUOUS == 0 */
+typedef struct {
+ tmemvectoru raw[INPUT_BUF_HEIGHT][INPUT_BUF_LINES][MAX_VECTORS_PER_INPUT_LINE]; /* 2 bayer lines */
+} input_line_type;
+#endif /* ENABLE_CONTINUOUS */
+
+#endif /*MODE*/
+
+#endif /* _INPUT_BUF_ISP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h
new file mode 100644
index 000000000000..005eaaa9eb6c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h
@@ -0,0 +1,498 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _COMMON_ISP_CONST_H_
+#define _COMMON_ISP_CONST_H_
+
+/*#include "isp.h"*/ /* ISP_VEC_NELEMS */
+
+/* Binary independent constants */
+
+#ifndef NO_HOIST
+# define NO_HOIST HIVE_ATTRIBUTE (( no_hoist ))
+#endif
+
+#define NO_HOIST_CSE HIVE_ATTRIBUTE ((no_hoist, no_cse))
+
+#define UNION struct /* Union constructors not allowed in C++ */
+
+/* ISP binary identifiers.
+ These determine the order in which the binaries are looked up, do not change
+ this!
+ Also, the SP firmware uses this same order (isp_loader.hive.c).
+ Also, gen_firmware.c uses this order in its firmware_header.
+*/
+/* The binary id is used in pre-processor expressions so we cannot
+ * use an enum here. */
+ /* 24xx pipelines*/
+#define SH_CSS_BINARY_ID_COPY 0
+#define SH_CSS_BINARY_ID_BAYER_DS 1
+#define SH_CSS_BINARY_ID_VF_PP_FULL 2
+#define SH_CSS_BINARY_ID_VF_PP_OPT 3
+#define SH_CSS_BINARY_ID_YUV_SCALE 4
+#define SH_CSS_BINARY_ID_CAPTURE_PP 5
+#define SH_CSS_BINARY_ID_PRE_ISP 6
+#define SH_CSS_BINARY_ID_PRE_ISP_ISP2 7
+#define SH_CSS_BINARY_ID_GDC 8
+#define SH_CSS_BINARY_ID_POST_ISP 9
+#define SH_CSS_BINARY_ID_POST_ISP_ISP2 10
+#define SH_CSS_BINARY_ID_ANR 11
+#define SH_CSS_BINARY_ID_ANR_ISP2 12
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_DS 13
+#define SH_CSS_BINARY_ID_PREVIEW_DS 14
+#define SH_CSS_BINARY_ID_PREVIEW_DEC 15
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS125_ISP2 16
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_DPC_BDS150_ISP2 17
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS150_ISP2 18
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_DPC_BDS200_ISP2 19
+#define SH_CSS_BINARY_ID_PREVIEW_CONT_BDS200_ISP2 20
+#define SH_CSS_BINARY_ID_PREVIEW_DZ 21
+#define SH_CSS_BINARY_ID_PREVIEW_DZ_ISP2 22
+#define SH_CSS_BINARY_ID_PRIMARY_DS 23
+#define SH_CSS_BINARY_ID_PRIMARY_VAR 24
+#define SH_CSS_BINARY_ID_PRIMARY_VAR_ISP2 25
+#define SH_CSS_BINARY_ID_PRIMARY_SMALL 26
+#define SH_CSS_BINARY_ID_PRIMARY_STRIPED 27
+#define SH_CSS_BINARY_ID_PRIMARY_STRIPED_ISP2 28
+#define SH_CSS_BINARY_ID_PRIMARY_8MP 29
+#define SH_CSS_BINARY_ID_PRIMARY_14MP 30
+#define SH_CSS_BINARY_ID_PRIMARY_16MP 31
+#define SH_CSS_BINARY_ID_PRIMARY_REF 32
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE0 33
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE1 34
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE2 35
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE3 36
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE4 37
+#define SH_CSS_BINARY_ID_PRIMARY_ISP261_STAGE5 38
+#define SH_CSS_BINARY_ID_VIDEO_OFFLINE 39
+#define SH_CSS_BINARY_ID_VIDEO_DS 40
+#define SH_CSS_BINARY_ID_VIDEO_YUV_DS 41
+#define SH_CSS_BINARY_ID_VIDEO_DZ 42
+#define SH_CSS_BINARY_ID_VIDEO_DZ_2400_ONLY 43
+#define SH_CSS_BINARY_ID_VIDEO_HIGH 44
+#define SH_CSS_BINARY_ID_VIDEO_NODZ 45
+#define SH_CSS_BINARY_ID_VIDEO_CONT_MULTIBDS_ISP2_MIN 46
+#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS_300_600_ISP2_MIN 47
+#define SH_CSS_BINARY_ID_VIDEO_CONT_DPC_BDS150_ISP2_MIN 48
+#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS150_ISP2_MIN 49
+#define SH_CSS_BINARY_ID_VIDEO_CONT_DPC_BDS200_ISP2_MIN 50
+#define SH_CSS_BINARY_ID_VIDEO_CONT_BDS200_ISP2_MIN 51
+#define SH_CSS_BINARY_ID_VIDEO_CONT_NOBDS_ISP2_MIN 52
+#define SH_CSS_BINARY_ID_VIDEO_DZ_ISP2_MIN 53
+#define SH_CSS_BINARY_ID_VIDEO_DZ_ISP2 54
+#define SH_CSS_BINARY_ID_VIDEO_LP_ISP2 55
+#define SH_CSS_BINARY_ID_RESERVED1 56
+#define SH_CSS_BINARY_ID_ACCELERATION 57
+#define SH_CSS_BINARY_ID_PRE_DE_ISP2 58
+#define SH_CSS_BINARY_ID_KERNEL_TEST_LOAD_STORE 59
+#define SH_CSS_BINARY_ID_CAPTURE_PP_BLI 60
+#define SH_CSS_BINARY_ID_CAPTURE_PP_LDC 61
+#ifdef ISP2401
+#define SH_CSS_BINARY_ID_PRIMARY_STRIPED_ISP2_XNR 62
+#endif
+
+/* skycam kerneltest pipelines */
+#ifndef ISP2401
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM 120
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM_STRIPED 121
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN 122
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN_STRIPED 123
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD 124
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD_STRIPED 125
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB 126
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A 127
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A_STRIPED 128
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AF 129
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID 130
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE 131
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE_STRIPED 132
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DEMOSAIC 133
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP1_C0 134
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2 135
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF 136
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF_STRIPED 137
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_REF 138
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS 139
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR 140
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_STRIPED 141
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_BLENDING 142
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_BLOCK 143
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AE 144
+#define SH_CSS_BINARY_ID_VIDEO_RAW 145
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB_FR 146
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP 147
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP_STRIPED 148
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR 149
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_IF 150
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_IF_STRIPED 151
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM 152
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_STRIPED 153
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS_STRIPED 154
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID_STRIPED 155
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV 156
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV_BLOCK 157
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_BLOCK 158
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_STRIPED 159
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_BLOCK_STRIPED 160
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_INPUT_YUV 161
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV 162
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV_16 163
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SPLIT 164
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM_STRIPED 165
+
+#else
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM 121
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_NORM_STRIPED 122
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID 123
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OBGRID_STRIPED 124
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN 125
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_LIN_STRIPED 126
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD 127
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_SHD_STRIPED 128
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AE 129
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB 130
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AF 131
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_ACC_AWB_FR 132
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A 133
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_3A_STRIPED 134
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE 135
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_BAYER_DENOISE_STRIPED 136
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR 137
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR_STRIPED 138
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DEMOSAIC 139
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP 140
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DM_RGBPP_STRIPED 141
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP1_C0 142
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2 143
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2_STRIPED 144
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_REF 145
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR 146
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_STRIPED 147
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_XNR_BLENDING 148
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF 149
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_REF_STRIPED 150
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS 151
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_DVS_STRIPED 152
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DVS_STAT_C0 153
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_BLOCK 154
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR_STRIPED 155
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM 156
+#define SH_CSS_BINARY_ID_VIDEO_RAW 157
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV 158
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV_BLOCK 159
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_BLOCK 160
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_YUV16_STRIPED 161
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_COPY_BLOCK_STRIPED 162
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_INPUT_YUV 163
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV 164
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_YUV_16 165
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SPLIT 166
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_OUTPUT_SYSTEM_STRIPED 167
+#define SH_CSS_BINARY_ID_COPY_KERNELTEST_OUTPUT_SYSTEM 168
+#endif
+
+/* skycam partial test pipelines*/
+#ifndef ISP2401
+#define SH_CSS_BINARY_ID_IF_TO_DPC 201
+#define SH_CSS_BINARY_ID_IF_TO_BDS 202
+#else
+#define SH_CSS_BINARY_ID_IF_TO_BDS 201
+#define SH_CSS_BINARY_ID_IF_TO_BDS_STRIPED 202
+#endif
+#define SH_CSS_BINARY_ID_IF_TO_NORM 203
+#ifndef ISP2401
+#define SH_CSS_BINARY_ID_IF_TO_OB 204
+#define SH_CSS_BINARY_ID_IF_TO_LIN 205
+#define SH_CSS_BINARY_ID_IF_TO_SHD 206
+#define SH_CSS_BINARY_ID_IF_TO_BNR 207
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP_NV12_16 208
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP 210
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1 211
+#define SH_CSS_BINARY_ID_IF_TO_DM 214
+#define SH_CSS_BINARY_ID_IF_TO_YUVP2_C0 216
+#define SH_CSS_BINARY_ID_IF_TO_YUVP2_ANR_VIA_ISP 217
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_DVS 218
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_TNR 219
+#define SH_CSS_BINARY_ID_IF_TO_BDS_STRIPED 224
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_ANR_STRIPED 225
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_YUVP2_STRIPED 227
+#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0 228
+#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0_STRIPED 229
+#define SH_CSS_BINARY_ID_IF_TO_REF 236
+#define SH_CSS_BINARY_ID_IF_TO_DVS_STRIPED 237
+#define SH_CSS_BINARY_ID_IF_TO_YUVP2_STRIPED 238
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_STRIPED 239
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP_STRIPED 240
+#define SH_CSS_BINARY_ID_IF_TO_ANR_STRIPED 241
+#define SH_CSS_BINARY_ID_IF_TO_BNR_STRIPED 242
+#define SH_CSS_BINARY_ID_IF_TO_SHD_STRIPED 243
+#define SH_CSS_BINARY_ID_IF_TO_LIN_STRIPED 244
+#define SH_CSS_BINARY_ID_IF_TO_OB_STRIPED 245
+#define SH_CSS_BINARY_ID_IF_TO_NORM_STRIPED 248
+#define SH_CSS_BINARY_ID_COPY_KERNELTEST_OUTPUT_SYSTEM 253
+#define SH_CSS_BINARY_ID_IF_TO_XNR 256
+#define SH_CSS_BINARY_ID_IF_TO_XNR_STRIPED 257
+#define SH_CSS_BINARY_ID_IF_TO_REF_STRIPED 258
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS 259
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0 262
+#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY 263
+#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY_STRIPED 264
+#define SH_CSS_BINARY_ID_IF_TO_ANR 265
+#define SH_CSS_BINARY_ID_VIDEO_TEST_ACC_DVS_STAT_C0 266
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS_STRIPED 270
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY 276
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY_STRIPED 277
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0_STRIPED 278
+#else
+#define SH_CSS_BINARY_ID_IF_TO_NORM_STRIPED 204
+#define SH_CSS_BINARY_ID_IF_TO_OB 205
+#define SH_CSS_BINARY_ID_IF_TO_OB_STRIPED 206
+#define SH_CSS_BINARY_ID_IF_TO_LIN 207
+#define SH_CSS_BINARY_ID_IF_TO_LIN_STRIPED 208
+#define SH_CSS_BINARY_ID_IF_TO_SHD 209
+#define SH_CSS_BINARY_ID_IF_TO_SHD_STRIPED 210
+#define SH_CSS_BINARY_ID_IF_TO_BNR 211
+#define SH_CSS_BINARY_ID_IF_TO_BNR_STRIPED 212
+#define SH_CSS_BINARY_ID_IF_TO_ANR 213
+#define SH_CSS_BINARY_ID_IF_TO_ANR_STRIPED 214
+#define SH_CSS_BINARY_ID_IF_TO_DM 215
+#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0 216
+#define SH_CSS_BINARY_ID_IF_TO_BDS_RGBP_DVS_STAT_C0_STRIPED 217
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP 218
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP_NV12_16 219
+#define SH_CSS_BINARY_ID_IF_TO_RGBPP_STRIPED 220
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1 221
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_STRIPED 222
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0 223
+#define SH_CSS_BINARY_ID_IF_TO_YUVP2_C0 224
+#define SH_CSS_BINARY_ID_IF_TO_YUVP2_STRIPED 225
+#define SH_CSS_BINARY_ID_IF_TO_XNR 226
+#define SH_CSS_BINARY_ID_IF_TO_XNR_STRIPED 227
+#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY 228
+#define SH_CSS_BINARY_ID_IF_TO_XNR_PRIMARY_STRIPED 229
+#define SH_CSS_BINARY_ID_IF_TO_REF 230
+#define SH_CSS_BINARY_ID_IF_TO_REF_STRIPED 231
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_DVS 232
+#define SH_CSS_BINARY_ID_IF_TO_DVS_STRIPED 233
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_TNR 234
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS 235
+#define SH_CSS_BINARY_ID_VIDEO_IF_TO_OSYS_STRIPED 236
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY 237
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PRIMARY_STRIPED 238
+#define SH_CSS_BINARY_ID_IF_TO_YUVP1_C0_STRIPED 239
+#define SH_CSS_BINARY_ID_VIDEO_YUVP1_TO_OSYS 240
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PREVIEW 241
+#define SH_CSS_BINARY_ID_IF_TO_OSYS_PREVIEW_STRIPED 242
+#endif
+
+/* Skycam IR camera binaries */
+#ifndef ISP2401
+#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_XNR 300
+#define SH_CSS_BINARY_ID_VIDEO_IR_IF_TO_OSYS_NO_DVS_NO_TNR_NO_XNR 301
+#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_XNR_NO_DVS_PRIMARY 302
+#else
+#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS 300
+#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_NO_TNR3 301
+#define SH_CSS_BINARY_ID_IR_IF_TO_OSYS_PRIMARY 302
+
+/* Binaries under development */
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR3 401
+#define SH_CSS_BINARY_ID_VIDEO_KERNELTEST_TNR3_STRIPED 402
+
+#endif
+
+#define XMEM_WIDTH_BITS HIVE_ISP_DDR_WORD_BITS
+#define XMEM_SHORTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS/16)
+#define XMEM_INTS_PER_WORD (HIVE_ISP_DDR_WORD_BITS/32)
+#define XMEM_POW2_BYTES_PER_WORD HIVE_ISP_DDR_WORD_BYTES
+
+#define BITS8_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 8)
+#define BITS16_ELEMENTS_PER_XMEM_ADDR CEIL_DIV(XMEM_WIDTH_BITS, 16)
+
+#if ISP_VEC_NELEMS == 64
+#define ISP_NWAY_LOG2 6
+#elif ISP_VEC_NELEMS == 32
+#define ISP_NWAY_LOG2 5
+#elif ISP_VEC_NELEMS == 16
+#define ISP_NWAY_LOG2 4
+#elif ISP_VEC_NELEMS == 8
+#define ISP_NWAY_LOG2 3
+#else
+#error "isp_const.h ISP_VEC_NELEMS must be one of {8, 16, 32, 64}"
+#endif
+
+/* *****************************
+ * ISP input/output buffer sizes
+ * ****************************/
+/* input image */
+#define INPUT_BUF_DMA_HEIGHT 2
+#define INPUT_BUF_HEIGHT 2 /* double buffer */
+#define OUTPUT_BUF_DMA_HEIGHT 2
+#define OUTPUT_BUF_HEIGHT 2 /* double buffer */
+#define OUTPUT_NUM_TRANSFERS 4
+
+/* GDC accelerator: Up/Down Scaling */
+/* These should be moved to the gdc_defs.h in the device */
+#define UDS_SCALING_N HRT_GDC_N
+/* AB: This should cover the zooming up to 16MP */
+#define UDS_MAX_OXDIM 5000
+/* We support maximally 2 planes with different parameters
+ - luma and chroma (YUV420) */
+#define UDS_MAX_PLANES 2
+#define UDS_BLI_BLOCK_HEIGHT 2
+#define UDS_BCI_BLOCK_HEIGHT 4
+#define UDS_BLI_INTERP_ENVELOPE 1
+#define UDS_BCI_INTERP_ENVELOPE 3
+#define UDS_MAX_ZOOM_FAC 64
+/* Make it always one FPGA vector.
+ Four FPGA vectors are required and
+ four of them fit in one ASIC vector.*/
+#define UDS_MAX_CHUNKS 16
+
+#define ISP_LEFT_PADDING _ISP_LEFT_CROP_EXTRA(ISP_LEFT_CROPPING)
+#define ISP_LEFT_PADDING_VECS CEIL_DIV(ISP_LEFT_PADDING, ISP_VEC_NELEMS)
+/* in case of continuous the croppong of the current binary doesn't matter for the buffer calculation, but the cropping of the sp copy should be used */
+#define ISP_LEFT_PADDING_CONT _ISP_LEFT_CROP_EXTRA(SH_CSS_MAX_LEFT_CROPPING)
+#define ISP_LEFT_PADDING_VECS_CONT CEIL_DIV(ISP_LEFT_PADDING_CONT, ISP_VEC_NELEMS)
+
+#define CEIL_ROUND_DIV_STRIPE(width, stripe, padding) \
+ CEIL_MUL(padding + CEIL_DIV(width - padding, stripe), ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS)?4:2))
+
+/* output (Y,U,V) image, 4:2:0 */
+#define MAX_VECTORS_PER_LINE \
+ CEIL_ROUND_DIV_STRIPE(CEIL_DIV(ISP_MAX_INTERNAL_WIDTH, ISP_VEC_NELEMS), \
+ ISP_NUM_STRIPES, \
+ ISP_LEFT_PADDING_VECS)
+
+/*
+ * ITERATOR_VECTOR_INCREMENT' explanation:
+ * when striping an even number of iterations, one of the stripes is
+ * one iteration wider than the other to account for overlap
+ * so the calc for the output buffer vmem size is:
+ * ((width[vectors]/num_of_stripes) + 2[vectors])
+ */
+#if defined(HAS_RES_MGR)
+#define MAX_VECTORS_PER_OUTPUT_LINE \
+ (CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS) + \
+ ITERATOR_VECTOR_INCREMENT)
+
+#define MAX_VECTORS_PER_INPUT_LINE CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS)
+#define MAX_VECTORS_PER_INPUT_STRIPE (CEIL_ROUND_DIV_STRIPE(CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) , \
+ ISP_NUM_STRIPES, \
+ ISP_LEFT_PADDING_VECS) + \
+ ITERATOR_VECTOR_INCREMENT)
+#else /* !defined(HAS_RES_MGR)*/
+#define MAX_VECTORS_PER_OUTPUT_LINE \
+ CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS)
+
+/* Must be even due to interlaced bayer input */
+#define MAX_VECTORS_PER_INPUT_LINE CEIL_MUL((CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + ISP_LEFT_PADDING_VECS), 2)
+#define MAX_VECTORS_PER_INPUT_STRIPE CEIL_ROUND_DIV_STRIPE(MAX_VECTORS_PER_INPUT_LINE, \
+ ISP_NUM_STRIPES, \
+ ISP_LEFT_PADDING_VECS)
+#endif /* HAS_RES_MGR */
+
+
+/* Add 2 for left croppping */
+#define MAX_SP_RAW_COPY_VECTORS_PER_INPUT_LINE (CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) + 2)
+
+#define MAX_VECTORS_PER_BUF_LINE \
+ (MAX_VECTORS_PER_LINE + DUMMY_BUF_VECTORS)
+#define MAX_VECTORS_PER_BUF_INPUT_LINE \
+ (MAX_VECTORS_PER_INPUT_STRIPE + DUMMY_BUF_VECTORS)
+#define MAX_OUTPUT_Y_FRAME_WIDTH \
+ (MAX_VECTORS_PER_LINE * ISP_VEC_NELEMS)
+#define MAX_OUTPUT_Y_FRAME_SIMDWIDTH \
+ MAX_VECTORS_PER_LINE
+#define MAX_OUTPUT_C_FRAME_WIDTH \
+ (MAX_OUTPUT_Y_FRAME_WIDTH / 2)
+#define MAX_OUTPUT_C_FRAME_SIMDWIDTH \
+ CEIL_DIV(MAX_OUTPUT_C_FRAME_WIDTH, ISP_VEC_NELEMS)
+
+/* should be even */
+#define NO_CHUNKING (OUTPUT_NUM_CHUNKS == 1)
+
+#define MAX_VECTORS_PER_CHUNK \
+ (NO_CHUNKING ? MAX_VECTORS_PER_LINE \
+ : 2*CEIL_DIV(MAX_VECTORS_PER_LINE, \
+ 2*OUTPUT_NUM_CHUNKS))
+
+#define MAX_C_VECTORS_PER_CHUNK \
+ (MAX_VECTORS_PER_CHUNK/2)
+
+/* should be even */
+#define MAX_VECTORS_PER_OUTPUT_CHUNK \
+ (NO_CHUNKING ? MAX_VECTORS_PER_OUTPUT_LINE \
+ : 2*CEIL_DIV(MAX_VECTORS_PER_OUTPUT_LINE, \
+ 2*OUTPUT_NUM_CHUNKS))
+
+#define MAX_C_VECTORS_PER_OUTPUT_CHUNK \
+ (MAX_VECTORS_PER_OUTPUT_CHUNK/2)
+
+
+
+/* should be even */
+#define MAX_VECTORS_PER_INPUT_CHUNK \
+ (INPUT_NUM_CHUNKS == 1 ? MAX_VECTORS_PER_INPUT_STRIPE \
+ : 2*CEIL_DIV(MAX_VECTORS_PER_INPUT_STRIPE, \
+ 2*OUTPUT_NUM_CHUNKS))
+
+#define DEFAULT_C_SUBSAMPLING 2
+
+/****** DMA buffer properties */
+
+#define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2)
+
+#if defined(HAS_RES_MGR)
+#define RAW_BUF_STRIDE (MAX_VECTORS_PER_INPUT_STRIPE)
+#else /* !defined(HAS_RES_MGR) */
+#define RAW_BUF_STRIDE \
+ (BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \
+ ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE+_ISP_EXTRA_PADDING_VECS : \
+ !ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \
+ MAX_VECTORS_PER_INPUT_CHUNK)
+#endif /* HAS_RES_MGR */
+
+/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB),
+ multiples of NWAY */
+#define SCTBL_VECTORS_PER_LINE_PER_COLOR \
+ CEIL_DIV(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+/* [isp vmem] table size[vectors] per line for 4colors (GR,R,B,GB),
+ multiples of NWAY */
+#define SCTBL_VECTORS_PER_LINE \
+ (SCTBL_VECTORS_PER_LINE_PER_COLOR * IA_CSS_SC_NUM_COLORS)
+
+/*************/
+
+/* Format for fixed primaries */
+
+#define ISP_FIXED_PRIMARY_FORMAT IA_CSS_FRAME_FORMAT_NV12
+
+#endif /* _COMMON_ISP_CONST_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h
new file mode 100644
index 000000000000..8b59a8caec52
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h
@@ -0,0 +1,309 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _COMMON_ISP_EXPRS_H_
+#define _COMMON_ISP_EXPRS_H_
+
+/* Binary independent pre-processor expressions */
+
+#include "sh_css_defs.h"
+#include "isp_const.h"
+
+#ifdef __HOST
+#error "isp_exprs.h: Do not include on HOST, contains ISP specific defines"
+#endif
+
+#ifndef __ISP
+#if defined(MODE)
+#define MODE aap
+#error "isp_exprs.h: is mode independent, but MODE is set"
+#endif
+#if defined(VARIABLE_RESOLUTION)
+#define VARIABLE_RESOLUTION noot
+#error "isp_exprs.h: is mode independent, but VARIABLE_RESOLUTION is set"
+#endif
+#if defined(DECI_FACTOR_LOG2)
+#define DECI_FACTOR_LOG2 mies
+#error "isp_exprs.h: is mode independent, but DECI_FACTOR_LOG2 is set"
+#endif
+#endif
+
+#define LOG_VECTOR_STEP _ISP_LOG_VECTOR_STEP(MODE)
+/* should be even and multiple of vf downscaling */
+#define ISP_OUTPUT_CHUNK_LOG_FACTOR (MAX_VF_LOG_DOWNSCALE<=1 ? LOG_VECTOR_STEP : \
+ umax(VF_LOG_DOWNSCALE, LOG_VECTOR_STEP))
+
+#define CEIL_DIV_CHUNKS(n,c) ((c) == 1 ? (n) \
+ : CEIL_SHIFT(CEIL_DIV((n), (c)), ISP_OUTPUT_CHUNK_LOG_FACTOR)<<ISP_OUTPUT_CHUNK_LOG_FACTOR)
+
+
+#define ISP_VARIABLE_INPUT (ISP_INPUT == IA_CSS_BINARY_INPUT_VARIABLE)
+
+/* Binary independent versions, see isp_defs.h for binary dependent ones */
+#ifndef __ISP
+#define IMAGEFORMAT_IS_RAW(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_RAW)
+
+#define IMAGEFORMAT_IS_RAW_INTERLEAVED(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_RAW)
+
+#define IMAGEFORMAT_IS_RGB(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_RGBA888 || (fmt) == IA_CSS_FRAME_FORMAT_PLANAR_RGB888 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_RGB565)
+
+#define IMAGEFORMAT_IS_RGB_INTERLEAVED(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_RGBA888 || (fmt) == IA_CSS_FRAME_FORMAT_RGB565)
+
+#define IMAGEFORMAT_UV_INTERLEAVED(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_NV11 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV12 || (fmt) == IA_CSS_FRAME_FORMAT_NV21 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV16 || (fmt) == IA_CSS_FRAME_FORMAT_NV61 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_UYVY || (fmt) == IA_CSS_FRAME_FORMAT_YUYV || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV12_16 || (fmt) == IA_CSS_FRAME_FORMAT_NV12_TILEY)
+
+#define IMAGEFORMAT_YUV_INTERLEAVED(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_UYVY || (fmt) == IA_CSS_FRAME_FORMAT_YUYV)
+
+#define IMAGEFORMAT_INTERLEAVED(fmt) (IMAGEFORMAT_UV_INTERLEAVED(fmt) || IMAGEFORMAT_IS_RGB_INTERLEAVED(fmt))
+
+#define IMAGEFORMAT_SUB_SAMPL_420(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_YUV420 || (fmt) == IA_CSS_FRAME_FORMAT_YV12 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV12 || (fmt) == IA_CSS_FRAME_FORMAT_NV21 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV12_16 || (fmt) == IA_CSS_FRAME_FORMAT_NV12TILEY)
+
+#define IMAGEFORMAT_SUB_SAMPL_422(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_YUV422 || (fmt) == IA_CSS_FRAME_FORMAT_YV16 || \
+ (fmt) == IA_CSS_FRAME_FORMAT_NV16 || (fmt) == IA_CSS_FRAME_FORMAT_NV61)
+
+#define IMAGEFORMAT_SUB_SAMPL_444(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_YUV444)
+
+#define IMAGEFORMAT_UV_SWAPPED(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_NV21 || (fmt) == IA_CSS_FRAME_FORMAT_NV61)
+
+#define IMAGEFORMAT_IS_RGBA(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_RGBA888)
+
+#define IMAGEFORMAT_IS_NV11(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_NV11)
+
+#define IMAGEFORMAT_IS_16BIT(fmt) ((fmt) == IA_CSS_FRAME_FORMAT_YUV420_16 || (fmt) == IA_CSS_FRAME_FORMAT_NV12_16 || (fmt) == IA_CSS_FRAME_FORMAT_YUV422_16)
+
+#endif
+
+
+/******** GDCAC settings *******/
+#define GDCAC_BPP ISP_VEC_ELEMBITS /* We use 14 bits per pixel component for the GDCAC mode */
+#define GDC_INPUT_BLOCK_WIDTH 2 /* Two vectors are needed */
+#define GDC_OUTPUT_BLOCK_WIDTH 1 /* One vector is produced */
+
+#if ISP_VEC_NELEMS == 16
+/* For 16*16 output block, the distortion fits in 13.312 lines __ALWAYS__ */
+#define GDC_INPUT_BLOCK_HEIGHT 14
+#elif ISP_VEC_NELEMS == 64
+/* For 64*64 output block, the distortion fits in 47. lines __ALWAYS__ */
+#define GDC_INPUT_BLOCK_HEIGHT 48
+#endif
+/*******************************/
+
+
+#define ENABLE_HUP ((isp_input_width - isp_envelope_width) < isp_output_width)
+#define ENABLE_VUP ((isp_input_height - isp_envelope_height) < isp_output_height)
+
+#define ISP_INPUT_WIDTH (ENABLE_DS | ENABLE_HUP ? isp_input_width : ISP_INTERNAL_WIDTH)
+#define ISP_INPUT_HEIGHT (ENABLE_DS | ENABLE_VUP ? isp_input_height : isp_internal_height)
+
+#define DECI_FACTOR_LOG2 (ISP_FIXED_S3A_DECI_LOG ? ISP_FIXED_S3A_DECI_LOG : isp_deci_log_factor)
+
+#define ISP_S3ATBL_WIDTH \
+ _ISP_S3ATBL_ISP_WIDTH(_ISP_S3A_ELEMS_ISP_WIDTH((ENABLE_HUP ? ISP_INTERNAL_WIDTH : ISP_INPUT_WIDTH), ISP_LEFT_CROPPING), \
+ DECI_FACTOR_LOG2)
+#define S3ATBL_WIDTH_BYTES (sizeof(struct ia_css_3a_output) * ISP_S3ATBL_WIDTH)
+#define S3ATBL_WIDTH_SHORTS (S3ATBL_WIDTH_BYTES / sizeof(short))
+
+/* should be even?? */
+#define ISP_UV_OUTPUT_CHUNK_VECS CEIL_DIV(ISP_OUTPUT_CHUNK_VECS, 2)
+
+
+#if defined(__ISP) || defined(INIT_VARS)
+
+#define ISP_USE_IF (ISP_INPUT == IA_CSS_BINARY_INPUT_MEMORY ? 0 : \
+ ISP_INPUT == IA_CSS_BINARY_INPUT_SENSOR ? 1 : \
+ isp_online)
+
+#define ISP_DVS_ENVELOPE_WIDTH 0
+#define ISP_DVS_ENVELOPE_HEIGHT 0
+
+#define _ISP_INPUT_WIDTH_VECS _ISP_VECS(ISP_INPUT_WIDTH)
+
+#if !defined(__ISP) || (VARIABLE_RESOLUTION && !__HOST)
+#define ISP_INPUT_WIDTH_VECS isp_vectors_per_input_line
+#else
+#define ISP_INPUT_WIDTH_VECS _ISP_INPUT_WIDTH_VECS
+#endif
+
+#if !defined(__ISP) || VARIABLE_RESOLUTION
+#define ISP_INTERNAL_WIDTH_VECS isp_vectors_per_line
+#else
+#define ISP_INTERNAL_WIDTH_VECS _ISP_INTERNAL_WIDTH_VECS
+#endif
+
+#define _ISP_INTERNAL_HEIGHT __ISP_INTERNAL_HEIGHT(isp_output_height, ISP_TOP_CROPPING, ISP_DVS_ENVELOPE_HEIGHT)
+
+#define ISP_INTERNAL_HEIGHT isp_internal_height
+
+#define _ISP_INTERNAL_WIDTH __ISP_INTERNAL_WIDTH(ISP_OUTPUT_WIDTH, ISP_DVS_ENVELOPE_WIDTH, \
+ ISP_LEFT_CROPPING, MODE, ISP_C_SUBSAMPLING, \
+ OUTPUT_NUM_CHUNKS, ISP_PIPELINING)
+
+#define ISP_UV_INTERNAL_WIDTH (ISP_INTERNAL_WIDTH / 2)
+#define ISP_UV_INTERNAL_HEIGHT (ISP_INTERNAL_HEIGHT / 2)
+
+#define _ISP_INTERNAL_WIDTH_VECS (_ISP_INTERNAL_WIDTH / ISP_VEC_NELEMS)
+#define _ISP_UV_INTERNAL_WIDTH_VECS CEIL_DIV(ISP_UV_INTERNAL_WIDTH, ISP_VEC_NELEMS)
+
+#define ISP_VF_OUTPUT_WIDTH _ISP_VF_OUTPUT_WIDTH(ISP_VF_OUTPUT_WIDTH_VECS)
+#define ISP_VF_OUTPUT_HEIGHT _ISP_VF_OUTPUT_HEIGHT(isp_output_height, VF_LOG_DOWNSCALE)
+
+#if defined (__ISP) && !VARIABLE_RESOLUTION
+#define ISP_INTERNAL_WIDTH _ISP_INTERNAL_WIDTH
+#define ISP_VF_OUTPUT_WIDTH_VECS _ISP_VF_OUTPUT_WIDTH_VECS
+#else
+#define ISP_INTERNAL_WIDTH (VARIABLE_RESOLUTION ? isp_internal_width : _ISP_INTERNAL_WIDTH)
+#define ISP_VF_OUTPUT_WIDTH_VECS (VARIABLE_RESOLUTION ? isp_vf_output_width_vecs : _ISP_VF_OUTPUT_WIDTH_VECS)
+#endif
+
+#if defined(__ISP) && !VARIABLE_RESOLUTION
+#define ISP_OUTPUT_WIDTH ISP_MAX_OUTPUT_WIDTH
+#define VF_LOG_DOWNSCALE MAX_VF_LOG_DOWNSCALE
+#else
+#define ISP_OUTPUT_WIDTH isp_output_width
+#define VF_LOG_DOWNSCALE isp_vf_downscale_bits
+#endif
+
+#if !defined(__ISP) || VARIABLE_RESOLUTION
+#define _ISP_MAX_VF_OUTPUT_WIDTH __ISP_MAX_VF_OUTPUT_WIDTH(2*SH_CSS_MAX_VF_WIDTH, ISP_LEFT_CROPPING)
+#elif defined(MODE) && MODE == IA_CSS_BINARY_MODE_PRIMARY && ISP_OUTPUT_WIDTH > 3328
+/* Because of vmem issues, should be fixed later */
+#define _ISP_MAX_VF_OUTPUT_WIDTH (SH_CSS_MAX_VF_WIDTH - 2*ISP_VEC_NELEMS + (ISP_LEFT_CROPPING ? 2 * ISP_VEC_NELEMS : 0))
+#else
+#define _ISP_MAX_VF_OUTPUT_WIDTH (ISP_VF_OUTPUT_WIDTH + (ISP_LEFT_CROPPING ? (2 >> VF_LOG_DOWNSCALE) * ISP_VEC_NELEMS : 0))
+#endif
+
+#define ISP_MAX_VF_OUTPUT_VECS CEIL_DIV(_ISP_MAX_VF_OUTPUT_WIDTH, ISP_VEC_NELEMS)
+
+
+
+#define ISP_MIN_STRIPE_WIDTH (ISP_PIPELINING * (1<<_ISP_LOG_VECTOR_STEP(MODE)))
+
+/******* STRIPING-RELATED MACROS *******/
+#define NO_STRIPING (ISP_NUM_STRIPES == 1)
+
+#if defined(HAS_RES_MGR)
+
+#define ISP_OUTPUT_CHUNK_VECS ISP_INTERNAL_WIDTH_VECS
+
+#if defined(__ISP)
+#define VECTORS_PER_LINE ISP_INTERNAL_WIDTH_VECS
+#else
+#define VECTORS_PER_LINE \
+ (NO_STRIPING ? ISP_INTERNAL_WIDTH_VECS \
+ : ISP_IO_STRIPE_WIDTH_VECS(ISP_INTERNAL_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
+#endif
+
+#define VECTORS_PER_INPUT_LINE \
+ (NO_STRIPING ? ISP_INPUT_WIDTH_VECS \
+ : ISP_IO_STRIPE_WIDTH_VECS(ISP_INPUT_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
+
+#else
+
+#define ISP_OUTPUT_CHUNK_VECS \
+ (NO_STRIPING ? CEIL_DIV_CHUNKS(ISP_OUTPUT_VECS_EXTRA_CROP, OUTPUT_NUM_CHUNKS) \
+ : ISP_IO_STRIPE_WIDTH_VECS(ISP_OUTPUT_VECS_EXTRA_CROP, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
+
+#define VECTORS_PER_LINE \
+ (NO_STRIPING ? ISP_INTERNAL_WIDTH_VECS \
+ : ISP_IO_STRIPE_WIDTH_VECS(ISP_INTERNAL_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
+
+#define VECTORS_PER_INPUT_LINE \
+ (NO_STRIPING ? ISP_INPUT_WIDTH_VECS \
+ : ISP_IO_STRIPE_WIDTH_VECS(ISP_INPUT_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH)+_ISP_EXTRA_PADDING_VECS)
+
+#endif
+
+#define ISP_MAX_VF_OUTPUT_STRIPE_VECS \
+ (NO_STRIPING ? ISP_MAX_VF_OUTPUT_VECS \
+ : CEIL_MUL(CEIL_DIV(ISP_MAX_VF_OUTPUT_VECS, ISP_NUM_STRIPES), 2))
+#define _ISP_VF_OUTPUT_WIDTH_VECS \
+ (NO_STRIPING ? __ISP_VF_OUTPUT_WIDTH_VECS(ISP_OUTPUT_WIDTH, VF_LOG_DOWNSCALE) \
+ : __ISP_VF_OUTPUT_WIDTH_VECS(CEIL_DIV(ISP_OUTPUT_WIDTH, ISP_NUM_STRIPES), VF_LOG_DOWNSCALE))
+
+#define ISP_IO_STRIPE_WIDTH_VECS(width, padding, num_stripes, min_stripe) \
+ MAX(CEIL_MUL(padding + CEIL_DIV(width-padding, num_stripes) \
+ , 2) \
+ , min_stripe)
+////////// INPUT & INTERNAL
+/* should be even */
+#define INPUT_NUM_CHUNKS OUTPUT_NUM_CHUNKS
+
+#define INPUT_VECTORS_PER_CHUNK CEIL_DIV_CHUNKS(VECTORS_PER_INPUT_LINE, INPUT_NUM_CHUNKS)
+
+/* only for ISP code, will be removed: */
+#define VECTORS_PER_FULL_LINE ISP_INTERNAL_WIDTH_VECS
+#define VECTORS_PER_INPUT_FULL_LINE ISP_INPUT_WIDTH_VECS
+
+////////// OUTPUT
+/* should at least even and also multiple of vf scaling */
+#define ISP_OUTPUT_VECS_EXTRA_CROP CEIL_DIV(ISP_OUTPUT_WIDTH_EXTRA_CROP, ISP_VEC_NELEMS)
+
+/* Output is decoupled from input */
+#define ISP_OUTPUT_WIDTH_EXTRA_CROP CEIL_MUL(CEIL_MUL((ENABLE_DVS_ENVELOPE ? ISP_OUTPUT_WIDTH : ISP_INTERNAL_WIDTH), 2*ISP_VEC_NELEMS), \
+ ISP_C_SUBSAMPLING * OUTPUT_NUM_CHUNKS * HIVE_ISP_DDR_WORD_BYTES)
+
+#define ISP_MAX_VF_OUTPUT_CHUNK_VECS \
+ (NO_CHUNKING ? ISP_MAX_VF_OUTPUT_STRIPE_VECS \
+ : 2*CEIL_DIV(ISP_MAX_VF_OUTPUT_STRIPE_VECS, 2*OUTPUT_NUM_CHUNKS))
+
+#define OUTPUT_VECTORS_PER_CHUNK CEIL_DIV_CHUNKS(VECTORS_PER_LINE,OUTPUT_NUM_CHUNKS)
+
+/* should be even?? */
+#if !defined(HAS_RES_MGR)
+#define OUTPUT_C_VECTORS_PER_CHUNK CEIL_DIV(OUTPUT_VECTORS_PER_CHUNK, 2)
+#else
+#define OUTPUT_C_VECTORS_PER_CHUNK CEIL_DIV(MAX_VECTORS_PER_CHUNK, 2)
+#endif
+
+#ifndef ISP2401
+/**** SCTBL defs *******/
+#define ISP_SCTBL_HEIGHT \
+ _ISP_SCTBL_HEIGHT(ISP_INPUT_HEIGHT, DECI_FACTOR_LOG2)
+
+#endif
+/**** UDS defs *********/
+#define UDS_DMACH_STRIDE_B_IN_Y (( ISP_INTERNAL_WIDTH /BITS8_ELEMENTS_PER_XMEM_ADDR)*HIVE_ISP_DDR_WORD_BYTES)
+#define UDS_DMACH_STRIDE_B_IN_C (((ISP_INTERNAL_WIDTH/2)/BITS8_ELEMENTS_PER_XMEM_ADDR)*HIVE_ISP_DDR_WORD_BYTES)
+
+#else /* defined(__ISP) || defined(INIT_VARS) */
+
+#define ISP_INTERNAL_WIDTH isp_internal_width
+#define ISP_INTERNAL_HEIGHT isp_internal_height
+
+#endif /* defined(__ISP) || defined(INIT_VARS) */
+
+#endif /* _COMMON_ISP_EXPRS_H_ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h
new file mode 100644
index 000000000000..37a7d28f6d9f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_types.h
@@ -0,0 +1,128 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _ISP_TYPES_H_
+#define _ISP_TYPES_H_
+
+/* Workaround: hivecc complains about "tag "sh_css_3a_output" already declared"
+ without this extra decl. */
+struct ia_css_3a_output;
+
+#if defined(__ISP)
+struct isp_uds_config {
+ int hive_dx;
+ int hive_dy;
+ unsigned hive_woix;
+ unsigned hive_bpp; /* gdc_bits_per_pixel */
+ unsigned hive_bci;
+};
+
+struct s_isp_gdcac_config {
+ unsigned nbx;
+ unsigned nby;
+};
+
+/* output.hive.c request information */
+typedef enum {
+ output_y_channel,
+ output_c_channel,
+ OUTPUT_NUM_CHANNELS
+} output_channel_type;
+
+typedef struct s_output_dma_info {
+ unsigned cond; /* Condition for transfer */
+ output_channel_type channel_type;
+ dma_channel channel;
+ unsigned width_a;
+ unsigned width_b;
+ unsigned stride;
+ unsigned v_delta; /* Offset for v address to do cropping */
+ char *x_base; /* X base address */
+} output_dma_info_type;
+#endif
+
+/* Input stream formats, these correspond to the MIPI formats and the way
+ * the CSS receiver sends these to the input formatter.
+ * The bit depth of each pixel element is stored in the global variable
+ * isp_bits_per_pixel.
+ * NOTE: for rgb565, we set isp_bits_per_pixel to 565, for all other rgb
+ * formats it's the actual depth (4, for 444, 8 for 888 etc).
+ */
+enum sh_stream_format {
+ sh_stream_format_yuv420_legacy,
+ sh_stream_format_yuv420,
+ sh_stream_format_yuv422,
+ sh_stream_format_rgb,
+ sh_stream_format_raw,
+ sh_stream_format_binary, /* bytestream such as jpeg */
+};
+
+struct s_isp_frames {
+ /* global variables that are written to by either the SP or the host,
+ every ISP binary needs these. */
+ /* output frame */
+ char *xmem_base_addr_y;
+ char *xmem_base_addr_uv;
+ char *xmem_base_addr_u;
+ char *xmem_base_addr_v;
+ /* 2nd output frame */
+ char *xmem_base_addr_second_out_y;
+ char *xmem_base_addr_second_out_u;
+ char *xmem_base_addr_second_out_v;
+ /* input yuv frame */
+ char *xmem_base_addr_y_in;
+ char *xmem_base_addr_u_in;
+ char *xmem_base_addr_v_in;
+ /* input raw frame */
+ char *xmem_base_addr_raw;
+ /* output raw frame */
+ char *xmem_base_addr_raw_out;
+ /* viewfinder output (vf_veceven) */
+ char *xmem_base_addr_vfout_y;
+ char *xmem_base_addr_vfout_u;
+ char *xmem_base_addr_vfout_v;
+ /* overlay frame (for vf_pp) */
+ char *xmem_base_addr_overlay_y;
+ char *xmem_base_addr_overlay_u;
+ char *xmem_base_addr_overlay_v;
+ /* pre-gdc output frame (gdc input) */
+ char *xmem_base_addr_qplane_r;
+ char *xmem_base_addr_qplane_ratb;
+ char *xmem_base_addr_qplane_gr;
+ char *xmem_base_addr_qplane_gb;
+ char *xmem_base_addr_qplane_b;
+ char *xmem_base_addr_qplane_batr;
+ /* YUV as input, used by postisp binary */
+ char *xmem_base_addr_yuv_16_y;
+ char *xmem_base_addr_yuv_16_u;
+ char *xmem_base_addr_yuv_16_v;
+};
+
+#endif /* _ISP_TYPES_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
new file mode 100644
index 000000000000..e814f1bf19f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/memory_realloc.c
@@ -0,0 +1,81 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#include "memory_realloc.h"
+#include "ia_css_debug.h"
+#include "ia_css_refcount.h"
+#include "memory_access.h"
+
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute);
+
+
+bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err)
+{
+ bool ret;
+ uint16_t mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ret = realloc_isp_css_mm_buf(curr_buf,
+ curr_size, needed_size, force, err, mmgr_attribute);
+
+ IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
+ return ret;
+}
+
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute)
+{
+ int32_t id;
+
+ *err = IA_CSS_SUCCESS;
+ /* Possible optimization: add a function sh_css_isp_css_mm_realloc()
+ * and implement on top of hmm. */
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (ia_css_refcount_is_single(*curr_buf) && !force && *curr_size >= needed_size) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+
+ id = IA_CSS_REFCOUNT_PARAM_BUFFER;
+ ia_css_refcount_decrement(id, *curr_buf);
+ *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size,
+ mmgr_attribute));
+
+ if (!*curr_buf) {
+ *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ *curr_size = 0;
+ } else {
+ *curr_size = needed_size;
+ }
+ IA_CSS_LEAVE_PRIVATE("true");
+ return true;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
new file mode 100644
index 000000000000..c65194619a34
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/interface/ia_css_binary.h
@@ -0,0 +1,333 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_BINARY_H_
+#define _IA_CSS_BINARY_H_
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_err.h"
+#include "ia_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "sh_css_metrics.h"
+#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_types.h"
+
+/* The binary mode is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define IA_CSS_BINARY_MODE_COPY 0
+#define IA_CSS_BINARY_MODE_PREVIEW 1
+#define IA_CSS_BINARY_MODE_PRIMARY 2
+#define IA_CSS_BINARY_MODE_VIDEO 3
+#define IA_CSS_BINARY_MODE_PRE_ISP 4
+#define IA_CSS_BINARY_MODE_GDC 5
+#define IA_CSS_BINARY_MODE_POST_ISP 6
+#define IA_CSS_BINARY_MODE_ANR 7
+#define IA_CSS_BINARY_MODE_CAPTURE_PP 8
+#define IA_CSS_BINARY_MODE_VF_PP 9
+#define IA_CSS_BINARY_MODE_PRE_DE 10
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0 11
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1 12
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2 13
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3 14
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4 15
+#define IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5 16
+#define IA_CSS_BINARY_NUM_MODES 17
+
+#define MAX_NUM_PRIMARY_STAGES 6
+#define NUM_PRIMARY_HQ_STAGES 6 /* number of primary stages for ISP2.6.1 high quality pipe */
+#define NUM_PRIMARY_STAGES 1 /* number of primary satges for ISP1/ISP2.2 pipe */
+
+/* Indicate where binaries can read input from */
+#define IA_CSS_BINARY_INPUT_SENSOR 0
+#define IA_CSS_BINARY_INPUT_MEMORY 1
+#define IA_CSS_BINARY_INPUT_VARIABLE 2
+
+/* Should be included without the path.
+ However, that requires adding the path to numerous makefiles
+ that have nothing to do with isp parameters.
+ */
+#include "runtime/isp_param/interface/ia_css_isp_param_types.h"
+
+/* now these ports only include output ports but not vf output ports */
+enum {
+ IA_CSS_BINARY_OUTPUT_PORT_0 = 0,
+ IA_CSS_BINARY_OUTPUT_PORT_1 = 1,
+ IA_CSS_BINARY_MAX_OUTPUT_PORTS = 2
+};
+
+struct ia_css_cas_binary_descr {
+ unsigned int num_stage;
+ unsigned int num_output_stage;
+ struct ia_css_frame_info *in_info;
+ struct ia_css_frame_info *internal_out_info;
+ struct ia_css_frame_info *out_info;
+ struct ia_css_frame_info *vf_info;
+ bool *is_output_stage;
+};
+
+#define IA_CSS_DEFAULT_CAS_BINARY_DESCR \
+{ \
+ 0, \
+ 0, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+ NULL, \
+}
+
+struct ia_css_binary_descr {
+ int mode;
+ bool online;
+ bool continuous;
+ bool striped;
+ bool two_ppc;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_dz;
+ bool enable_xnr;
+ bool enable_fractional_ds;
+ bool enable_dpc;
+#ifdef ISP2401
+ bool enable_luma_only;
+ bool enable_tnr;
+#endif
+ bool enable_capture_pp_bli;
+ struct ia_css_resolution dvs_env;
+ enum ia_css_stream_format stream_format;
+ struct ia_css_frame_info *in_info; /* the info of the input-frame with the
+ ISP required resolution. */
+ struct ia_css_frame_info *bds_out_info;
+ struct ia_css_frame_info *out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame_info *vf_info;
+ unsigned int isp_pipe_version;
+ unsigned int required_bds_factor;
+ int stream_config_left_padding;
+};
+
+struct ia_css_binary {
+ const struct ia_css_binary_xinfo *info;
+ enum ia_css_stream_format input_format;
+ struct ia_css_frame_info in_frame_info;
+ struct ia_css_frame_info internal_frame_info;
+ struct ia_css_frame_info out_frame_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_frame_res;
+ struct ia_css_frame_info vf_frame_info;
+ int input_buf_vectors;
+ int deci_factor_log2;
+ int vf_downscale_log2;
+ int s3atbl_width;
+ int s3atbl_height;
+ int s3atbl_isp_width;
+ int s3atbl_isp_height;
+ unsigned int morph_tbl_width;
+ unsigned int morph_tbl_aligned_width;
+ unsigned int morph_tbl_height;
+ int sctbl_width_per_color;
+ int sctbl_aligned_width_per_color;
+ int sctbl_height;
+#ifdef ISP2401
+ int sctbl_legacy_width_per_color;
+ int sctbl_legacy_height;
+#endif
+ struct ia_css_sdis_info dis;
+ struct ia_css_resolution dvs_envelope;
+ bool online;
+ unsigned int uds_xc;
+ unsigned int uds_yc;
+ unsigned int left_padding;
+ struct sh_css_binary_metrics metrics;
+ struct ia_css_isp_param_host_segments mem_params;
+ struct ia_css_isp_param_css_segments css_params;
+};
+
+#ifdef ISP2401
+
+#define IA_CSS_BINARY_DEFAULT_SETTINGS \
+{ \
+ NULL, \
+ IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ { 0, 0},/* effective_in_frame_res */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ 0, /* input_buf_vectors */ \
+ 0, /* deci_factor_log2 */ \
+ 0, /* vf_downscale_log2 */ \
+ 0, /* s3atbl_width */ \
+ 0, /* s3atbl_height */ \
+ 0, /* s3atbl_isp_width */ \
+ 0, /* s3atbl_isp_height */ \
+ 0, /* morph_tbl_width */ \
+ 0, /* morph_tbl_aligned_width */ \
+ 0, /* morph_tbl_height */ \
+ 0, /* sctbl_width_per_color */ \
+ 0, /* sctbl_aligned_width_per_color */ \
+ 0, /* sctbl_height */ \
+ 0, /* sctbl_legacy_width_per_color */ \
+ 0, /* sctbl_legacy_height */ \
+ IA_CSS_DEFAULT_SDIS_INFO, /* dis */ \
+ { 0, 0},/* dvs_envelope_info */ \
+ false, /* online */ \
+ 0, /* uds_xc */ \
+ 0, /* uds_yc */ \
+ 0, /* left_padding */ \
+ DEFAULT_BINARY_METRICS, /* metrics */ \
+ IA_CSS_DEFAULT_ISP_MEM_PARAMS, /* mem_params */ \
+ IA_CSS_DEFAULT_ISP_CSS_PARAMS, /* css_params */ \
+}
+
+#else
+
+#define IA_CSS_BINARY_DEFAULT_SETTINGS \
+{ \
+ NULL, \
+ IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY, \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ {IA_CSS_BINARY_DEFAULT_FRAME_INFO}, \
+ { 0, 0},/* effective_in_frame_res */ \
+ IA_CSS_BINARY_DEFAULT_FRAME_INFO, \
+ 0, /* input_buf_vectors */ \
+ 0, /* deci_factor_log2 */ \
+ 0, /* vf_downscale_log2 */ \
+ 0, /* s3atbl_width */ \
+ 0, /* s3atbl_height */ \
+ 0, /* s3atbl_isp_width */ \
+ 0, /* s3atbl_isp_height */ \
+ 0, /* morph_tbl_width */ \
+ 0, /* morph_tbl_aligned_width */ \
+ 0, /* morph_tbl_height */ \
+ 0, /* sctbl_width_per_color */ \
+ 0, /* sctbl_aligned_width_per_color */ \
+ 0, /* sctbl_height */ \
+ IA_CSS_DEFAULT_SDIS_INFO, /* dis */ \
+ { 0, 0},/* dvs_envelope_info */ \
+ false, /* online */ \
+ 0, /* uds_xc */ \
+ 0, /* uds_yc */ \
+ 0, /* left_padding */ \
+ DEFAULT_BINARY_METRICS, /* metrics */ \
+ IA_CSS_DEFAULT_ISP_MEM_PARAMS, /* mem_params */ \
+ IA_CSS_DEFAULT_ISP_CSS_PARAMS, /* css_params */ \
+}
+
+#endif
+
+enum ia_css_err
+ia_css_binary_init_infos(void);
+
+enum ia_css_err
+ia_css_binary_uninit(void);
+
+enum ia_css_err
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum ia_css_stream_format stream_format,
+ const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info[],
+ const struct ia_css_frame_info *vf_info,
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator);
+
+enum ia_css_err
+ia_css_binary_find(struct ia_css_binary_descr *descr,
+ struct ia_css_binary *binary);
+
+/** @brief Get the shading information of the specified shading correction type.
+ *
+ * @param[in] binary: The isp binary which has the shading correction.
+ * @param[in] type: The shading correction type.
+ * @param[in] required_bds_factor: The bayer downscaling factor required in the pipe.
+ * @param[in] stream_config: The stream configuration.
+#ifndef ISP2401
+ * @param[out] info: The shading information.
+#else
+ * @param[out] shading_info: The shading information.
+ * The shading information necessary as API is stored in the shading_info.
+#endif
+ * The driver needs to get this information to generate
+#ifndef ISP2401
+ * the shading table directly required in the isp.
+#else
+ * the shading table directly required from ISP.
+ * @param[out] pipe_config: The pipe configuration.
+ * The shading information related to ISP (but, not necessary as API) is stored in the pipe_config.
+#endif
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary,
+ enum ia_css_shading_correction_type type,
+ unsigned int required_bds_factor,
+ const struct ia_css_stream_config *stream_config,
+#ifndef ISP2401
+ struct ia_css_shading_info *info);
+#else
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config);
+#endif
+
+enum ia_css_err
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe);
+
+unsigned
+ia_css_binary_max_vf_width(void);
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary);
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries);
+
+#endif /* _IA_CSS_BINARY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
new file mode 100644
index 000000000000..a8b93a756e41
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -0,0 +1,1873 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <math_support.h>
+#include <gdc_device.h> /* HR_GDC_N */
+#include "isp.h" /* ISP_VEC_NELEMS */
+
+#include "ia_css_binary.h"
+#include "ia_css_debug.h"
+#include "ia_css_util.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_internal.h"
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sh_css_defs.h"
+#include "sh_css_legacy.h"
+
+#include "vf/vf_1.0/ia_css_vf.host.h"
+#ifdef ISP2401
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#endif
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#ifdef ISP2401
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" /* FRAC_ACC */
+#endif
+
+#include "camera/pipe/interface/ia_css_pipe_binarydesc.h"
+#if defined(HAS_RES_MGR)
+#include <components/resolutions_mgr/src/host/resolutions_mgr.host.h>
+#include <components/acc_cluster/acc_dvs_stat/host/dvs_stat.host.h>
+#endif
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+
+#define IMPLIES(a, b) (!(a) || (b)) /* A => B */
+
+static struct ia_css_binary_xinfo *all_binaries; /* ISP binaries only (no SP) */
+static struct ia_css_binary_xinfo
+ *binary_infos[IA_CSS_BINARY_NUM_MODES] = { NULL, };
+
+static void
+ia_css_binary_dvs_env(const struct ia_css_binary_info *info,
+ const struct ia_css_resolution *dvs_env,
+ struct ia_css_resolution *binary_dvs_env)
+{
+ if (info->enable.dvs_envelope) {
+ assert(dvs_env != NULL);
+ binary_dvs_env->width = max(dvs_env->width, SH_CSS_MIN_DVS_ENVELOPE);
+ binary_dvs_env->height = max(dvs_env->height, SH_CSS_MIN_DVS_ENVELOPE);
+ }
+}
+
+static void
+ia_css_binary_internal_res(const struct ia_css_frame_info *in_info,
+ const struct ia_css_frame_info *bds_out_info,
+ const struct ia_css_frame_info *out_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_binary_info *info,
+ struct ia_css_resolution *internal_res)
+{
+ unsigned int isp_tmp_internal_width = 0,
+ isp_tmp_internal_height = 0;
+ bool binary_supports_yuv_ds = info->enable.ds & 2;
+ struct ia_css_resolution binary_dvs_env;
+
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+
+ if (binary_supports_yuv_ds) {
+ if (in_info != NULL) {
+ isp_tmp_internal_width = in_info->res.width
+ + info->pipeline.left_cropping + binary_dvs_env.width;
+ isp_tmp_internal_height = in_info->res.height
+ + info->pipeline.top_cropping + binary_dvs_env.height;
+ }
+ } else if ((bds_out_info != NULL) && (out_info != NULL) &&
+ /* TODO: hack to make video_us case work. this should be reverted after
+ a nice solution in ISP */
+ (bds_out_info->res.width >= out_info->res.width)) {
+ isp_tmp_internal_width = bds_out_info->padded_width;
+ isp_tmp_internal_height = bds_out_info->res.height;
+ } else {
+ if (out_info != NULL) {
+ isp_tmp_internal_width = out_info->padded_width;
+ isp_tmp_internal_height = out_info->res.height;
+ }
+ }
+
+ /* We first calculate the resolutions used by the ISP. After that,
+ * we use those resolutions to compute sizes for tables etc. */
+ internal_res->width = __ISP_INTERNAL_WIDTH(isp_tmp_internal_width,
+ (int)binary_dvs_env.width,
+ info->pipeline.left_cropping, info->pipeline.mode,
+ info->pipeline.c_subsampling,
+ info->output.num_chunks, info->pipeline.pipelining);
+ internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height,
+ info->pipeline.top_cropping,
+ binary_dvs_env.height);
+#if defined(HAS_RES_MGR)
+ internal_res->height = (bds_out_info == NULL) ? internal_res->height : bds_out_info->res.height;
+ internal_res->width = (bds_out_info == NULL) ? internal_res->width: bds_out_info->res.width;
+#endif
+}
+
+#ifndef ISP2401
+/* Computation results of the origin coordinate of bayer on the shading table. */
+struct sh_css_shading_table_bayer_origin_compute_results {
+ uint32_t bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */
+ uint32_t bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of bayer scaling. */
+ uint32_t bayer_scale_ver_ratio_in; /* Vertical ratio (in) of bayer scaling. */
+ uint32_t bayer_scale_ver_ratio_out; /* Vertical ratio (out) of bayer scaling. */
+ uint32_t sc_bayer_origin_x_bqs_on_shading_table; /* X coordinate (in bqs) of bayer origin on shading table. */
+ uint32_t sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */
+#else
+/* Requirements for the shading correction. */
+struct sh_css_binary_sc_requirements {
+ /* Bayer scaling factor, for the scaling which is applied before shading correction. */
+ uint32_t bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of scaling applied BEFORE shading correction. */
+ uint32_t bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of scaling applied BEFORE shading correction. */
+ uint32_t bayer_scale_ver_ratio_in; /* Vertical ratio (in) of scaling applied BEFORE shading correction. */
+ uint32_t bayer_scale_ver_ratio_out; /* Vertical ratio (out) of scaling applied BEFORE shading correction. */
+
+ /* ISP internal frame is composed of the real sensor data and the padding data. */
+ uint32_t sensor_data_origin_x_bqs_on_internal; /* X origin (in bqs) of sensor data on internal frame
+ at shading correction. */
+ uint32_t sensor_data_origin_y_bqs_on_internal; /* Y origin (in bqs) of sensor data on internal frame
+ at shading correction. */
+#endif
+};
+
+/* Get the requirements for the shading correction. */
+static enum ia_css_err
+#ifndef ISP2401
+ia_css_binary_compute_shading_table_bayer_origin(
+ const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */
+#else
+sh_css_binary_get_sc_requirements(
+ const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+ struct sh_css_binary_sc_requirements *scr) /* [out] */
+#endif
+{
+ enum ia_css_err err;
+
+#ifndef ISP2401
+ /* Numerator and denominator of the fixed bayer downscaling factor.
+ (numerator >= denominator) */
+#else
+ /* Numerator and denominator of the fixed bayer downscaling factor. (numerator >= denominator) */
+#endif
+ unsigned int bds_num, bds_den;
+
+#ifndef ISP2401
+ /* Horizontal/Vertical ratio of bayer scaling
+ between input area and output area. */
+ unsigned int bs_hor_ratio_in;
+ unsigned int bs_hor_ratio_out;
+ unsigned int bs_ver_ratio_in;
+ unsigned int bs_ver_ratio_out;
+#else
+ /* Horizontal/Vertical ratio of bayer scaling between input area and output area. */
+ unsigned int bs_hor_ratio_in, bs_hor_ratio_out, bs_ver_ratio_in, bs_ver_ratio_out;
+#endif
+
+ /* Left padding set by InputFormatter. */
+#ifndef ISP2401
+ unsigned int left_padding_bqs; /* in bqs */
+#else
+ unsigned int left_padding_bqs;
+#endif
+
+#ifndef ISP2401
+ /* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */
+ unsigned int need_bds_factor_2_00;
+
+ /* Left padding adjusted inside the isp. */
+ unsigned int left_padding_adjusted_bqs; /* in bqs */
+
+ /* Bad pixels caused by filters.
+ NxN-filter (before/after bayer scaling) moves the image position
+ to right/bottom directions by a few pixels.
+ It causes bad pixels at left/top sides,
+ and effective bayer size decreases. */
+ unsigned int bad_bqs_on_left_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_left_after_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_before_bs; /* in bqs */
+ unsigned int bad_bqs_on_top_after_bs; /* in bqs */
+
+ /* Get the numerator and denominator of bayer downscaling factor. */
+ err = sh_css_bds_factor_get_numerator_denominator
+ (required_bds_factor, &bds_num, &bds_den);
+ if (err != IA_CSS_SUCCESS)
+#else
+ /* Flags corresponding to NEED_BDS_FACTOR_2_00/NEED_BDS_FACTOR_1_50/NEED_BDS_FACTOR_1_25 macros
+ * defined in isp kernels. */
+ unsigned int need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25;
+
+ /* Left padding adjusted inside the isp kernels. */
+ unsigned int left_padding_adjusted_bqs;
+
+ /* Top padding padded inside the isp kernel for bayer downscaling binaries. */
+ unsigned int top_padding_bqs;
+
+ /* Bayer downscaling factor 1.0 by fixed-point. */
+ int bds_frac_acc = FRAC_ACC; /* FRAC_ACC is defined in ia_css_fixedbds_param.h. */
+
+ /* Right/Down shift amount caused by filters applied BEFORE shading corrertion. */
+ unsigned int right_shift_bqs_before_bs; /* right shift before bayer scaling */
+ unsigned int right_shift_bqs_after_bs; /* right shift after bayer scaling */
+ unsigned int down_shift_bqs_before_bs; /* down shift before bayer scaling */
+ unsigned int down_shift_bqs_after_bs; /* down shift after bayer scaling */
+
+ /* Origin of the real sensor data area on the internal frame at shading correction. */
+ unsigned int sensor_data_origin_x_bqs_on_internal;
+ unsigned int sensor_data_origin_y_bqs_on_internal;
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
+ binary, required_bds_factor, stream_config);
+
+ /* Get the numerator and denominator of the required bayer downscaling factor. */
+ err = sh_css_bds_factor_get_numerator_denominator(required_bds_factor, &bds_num, &bds_den);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+#ifdef ISP2401
+ }
+#endif
+
+#ifndef ISP2401
+ /* Set the horizontal/vertical ratio of bayer scaling
+ between input area and output area. */
+#else
+ IA_CSS_LOG("bds_num=%d, bds_den=%d", bds_num, bds_den);
+
+ /* Set the horizontal/vertical ratio of bayer scaling between input area and output area. */
+#endif
+ bs_hor_ratio_in = bds_num;
+ bs_hor_ratio_out = bds_den;
+ bs_ver_ratio_in = bds_num;
+ bs_ver_ratio_out = bds_den;
+
+#ifndef ISP2401
+ /* Set the left padding set by InputFormatter. (ifmtr.c) */
+#else
+ /* Set the left padding set by InputFormatter. (ia_css_ifmtr_configure() in ifmtr.c) */
+#endif
+ if (stream_config->left_padding == -1)
+ left_padding_bqs = _ISP_BQS(binary->left_padding);
+ else
+#ifndef ISP2401
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS
+ - _ISP_BQS(stream_config->left_padding));
+#else
+ left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding));
+#endif
+
+#ifndef ISP2401
+ /* Set the left padding adjusted inside the isp.
+ When bds_factor 2.00 is needed, some padding is added to left_padding
+ inside the isp, before bayer downscaling. (raw.isp.c)
+ (Hopefully, left_crop/left_padding/top_crop should be defined in css
+ appropriately, depending on bds_factor.)
+ */
+#else
+ IA_CSS_LOG("stream.left_padding=%d, binary.left_padding=%d, left_padding_bqs=%d",
+ stream_config->left_padding, binary->left_padding, left_padding_bqs);
+
+ /* Set the left padding adjusted inside the isp kernels.
+ * When the bds_factor isn't 1.00, the left padding size is adjusted inside the isp,
+ * before bayer downscaling. (scaled_hor_plane_index(), raw_compute_hphase() in raw.isp.c)
+ */
+#endif
+ need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0);
+
+#ifndef ISP2401
+ if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0)
+ left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS;
+ else
+#else
+ need_bds_factor_1_50 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00))) != 0);
+
+ need_bds_factor_1_25 = ((binary->info->sp.bds.supported_bds_factors &
+ (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_1_25) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) |
+ PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00))) != 0);
+
+ if (binary->info->sp.pipeline.left_cropping > 0 &&
+ (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25)) {
+ /*
+ * downscale 2.0 -> first_vec_adjusted_bqs = 128
+ * downscale 1.5 -> first_vec_adjusted_bqs = 96
+ * downscale 1.25 -> first_vec_adjusted_bqs = 80
+ */
+ unsigned int first_vec_adjusted_bqs
+ = ISP_VEC_NELEMS * bs_hor_ratio_in / bs_hor_ratio_out;
+ left_padding_adjusted_bqs = first_vec_adjusted_bqs
+ - _ISP_BQS(binary->info->sp.pipeline.left_cropping);
+ } else
+#endif
+ left_padding_adjusted_bqs = left_padding_bqs;
+
+#ifndef ISP2401
+ /* Currently, the bad pixel caused by filters before bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied
+ to each color plane(Gr/R/B/Gb) before bayer downscaling.
+ This filter moves each color plane to right/bottom directions
+ by 1 pixel at the most, depending on downscaling factor.
+ */
+ bad_bqs_on_left_before_bs = 0;
+ bad_bqs_on_top_before_bs = 0;
+#else
+ IA_CSS_LOG("supported_bds_factors=%d, need_bds_factor:2_00=%d, 1_50=%d, 1_25=%d",
+ binary->info->sp.bds.supported_bds_factors,
+ need_bds_factor_2_00, need_bds_factor_1_50, need_bds_factor_1_25);
+ IA_CSS_LOG("left_cropping=%d, left_padding_adjusted_bqs=%d",
+ binary->info->sp.pipeline.left_cropping, left_padding_adjusted_bqs);
+
+ /* Set the top padding padded inside the isp kernel for bayer downscaling binaries.
+ * When the bds_factor isn't 1.00, the top padding is padded inside the isp
+ * before bayer downscaling, because the top cropping size (input margin) is not enough.
+ * (calculate_input_line(), raw_compute_vphase(), dma_read_raw() in raw.isp.c)
+ * NOTE: In dma_read_raw(), the factor passed to raw_compute_vphase() is got by get_bds_factor_for_dma_read().
+ * This factor is BDS_FPVAL_100/BDS_FPVAL_125/BDS_FPVAL_150/BDS_FPVAL_200.
+ */
+ top_padding_bqs = 0;
+ if (binary->info->sp.pipeline.top_cropping > 0 &&
+ (required_bds_factor == SH_CSS_BDS_FACTOR_1_25 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_1_50 ||
+ required_bds_factor == SH_CSS_BDS_FACTOR_2_00)) {
+ /* Calculation from calculate_input_line() and raw_compute_vphase() in raw.isp.c. */
+ int top_cropping_bqs = _ISP_BQS(binary->info->sp.pipeline.top_cropping);
+ /* top cropping (in bqs) */
+ int factor = bds_num * bds_frac_acc / bds_den; /* downscaling factor by fixed-point */
+ int top_padding_bqsxfrac_acc = (top_cropping_bqs * factor - top_cropping_bqs * bds_frac_acc)
+ + (2 * bds_frac_acc - factor); /* top padding by fixed-point (in bqs) */
+
+ top_padding_bqs = (unsigned int)((top_padding_bqsxfrac_acc + bds_frac_acc/2 - 1) / bds_frac_acc);
+ }
+
+ IA_CSS_LOG("top_cropping=%d, top_padding_bqs=%d", binary->info->sp.pipeline.top_cropping, top_padding_bqs);
+
+ /* Set the right/down shift amount caused by filters applied BEFORE bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When the bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb)
+ * before bayer downscaling.
+ * This filter shifts each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+ right_shift_bqs_before_bs = 0;
+ down_shift_bqs_before_bs = 0;
+#endif
+
+#ifndef ISP2401
+ /* Currently, the bad pixel caused by filters after bayer scaling
+ is NOT considered, because the bad pixel is subtle.
+ When some large filter is used in the future,
+ we need to consider the bad pixel.
+
+ Currently, when DPC&BNR is processed between bayer scaling and
+ shading correction, DPC&BNR moves each color plane to
+ right/bottom directions by 1 pixel.
+ */
+ bad_bqs_on_left_after_bs = 0;
+ bad_bqs_on_top_after_bs = 0;
+#else
+ if (need_bds_factor_2_00 || need_bds_factor_1_50 || need_bds_factor_1_25) {
+ right_shift_bqs_before_bs = 1;
+ down_shift_bqs_before_bs = 1;
+ }
+
+ IA_CSS_LOG("right_shift_bqs_before_bs=%d, down_shift_bqs_before_bs=%d",
+ right_shift_bqs_before_bs, down_shift_bqs_before_bs);
+
+ /* Set the right/down shift amount caused by filters applied AFTER bayer scaling,
+ * which scaling is applied BEFORE shading corrertion.
+ *
+ * When DPC&BNR is processed between bayer scaling and shading correction,
+ * DPC&BNR moves each color plane (Gr/R/B/Gb) to right/down directions by 1 pixel.
+ */
+ right_shift_bqs_after_bs = 0;
+ down_shift_bqs_after_bs = 0;
+#endif
+
+#ifndef ISP2401
+ /* Calculate the origin of bayer (real sensor data area)
+ located on the shading table during the shading correction. */
+ res->sc_bayer_origin_x_bqs_on_shading_table
+ = ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs)
+ * bs_hor_ratio_out + bs_hor_ratio_in/2) / bs_hor_ratio_in
+ + bad_bqs_on_left_after_bs;
+ /* "+ bs_hor_ratio_in/2": rounding for division by bs_hor_ratio_in */
+ res->sc_bayer_origin_y_bqs_on_shading_table
+ = (bad_bqs_on_top_before_bs
+ * bs_ver_ratio_out + bs_ver_ratio_in/2) / bs_ver_ratio_in
+ + bad_bqs_on_top_after_bs;
+ /* "+ bs_ver_ratio_in/2": rounding for division by bs_ver_ratio_in */
+
+ res->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+ res->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+ res->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+ res->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+#else
+ if (binary->info->mem_offsets.offsets.param->dmem.dp.size != 0) { /* if DPC&BNR is enabled in the binary */
+ right_shift_bqs_after_bs = 1;
+ down_shift_bqs_after_bs = 1;
+ }
+
+ IA_CSS_LOG("right_shift_bqs_after_bs=%d, down_shift_bqs_after_bs=%d",
+ right_shift_bqs_after_bs, down_shift_bqs_after_bs);
+
+ /* Set the origin of the sensor data area on the internal frame at shading correction. */
+ {
+ unsigned int bs_frac = bds_frac_acc; /* scaling factor 1.0 in fixed point */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+
+ bs_out = bs_hor_ratio_out * bs_frac;
+ bs_in = bs_hor_ratio_in * bs_frac;
+ sensor_data_origin_x_bqs_on_internal
+ = ((left_padding_adjusted_bqs + right_shift_bqs_before_bs) * bs_out + bs_in/2) / bs_in
+ + right_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+
+ bs_out = bs_ver_ratio_out * bs_frac;
+ bs_in = bs_ver_ratio_in * bs_frac;
+ sensor_data_origin_y_bqs_on_internal
+ = ((top_padding_bqs + down_shift_bqs_before_bs) * bs_out + bs_in/2) / bs_in
+ + down_shift_bqs_after_bs; /* "+ bs_in/2": rounding */
+ }
+
+ scr->bayer_scale_hor_ratio_in = (uint32_t)bs_hor_ratio_in;
+ scr->bayer_scale_hor_ratio_out = (uint32_t)bs_hor_ratio_out;
+ scr->bayer_scale_ver_ratio_in = (uint32_t)bs_ver_ratio_in;
+ scr->bayer_scale_ver_ratio_out = (uint32_t)bs_ver_ratio_out;
+ scr->sensor_data_origin_x_bqs_on_internal = (uint32_t)sensor_data_origin_x_bqs_on_internal;
+ scr->sensor_data_origin_y_bqs_on_internal = (uint32_t)sensor_data_origin_y_bqs_on_internal;
+
+ IA_CSS_LOG("sc_requirements: %d, %d, %d, %d, %d, %d",
+ scr->bayer_scale_hor_ratio_in, scr->bayer_scale_hor_ratio_out,
+ scr->bayer_scale_ver_ratio_in, scr->bayer_scale_ver_ratio_out,
+ scr->sensor_data_origin_x_bqs_on_internal, scr->sensor_data_origin_y_bqs_on_internal);
+#endif
+
+#ifdef ISP2401
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+}
+
+/* Get the shading information of Shading Correction Type 1. */
+static enum ia_css_err
+ia_css_binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+#ifndef ISP2401
+ struct ia_css_shading_info *info) /* [out] */
+#else
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+#endif
+{
+ enum ia_css_err err;
+#ifndef ISP2401
+ struct sh_css_shading_table_bayer_origin_compute_results res;
+#else
+ struct sh_css_binary_sc_requirements scr;
+ struct ia_css_shading_info default_shading_info_type_1 = DEFAULT_SHADING_INFO_TYPE_1;
+#endif
+
+#ifndef ISP2401
+ assert(binary != NULL);
+ assert(info != NULL);
+#else
+ uint32_t in_width_bqs, in_height_bqs, internal_width_bqs, internal_height_bqs;
+ uint32_t num_hor_grids, num_ver_grids, bqs_per_grid_cell, tbl_width_bqs, tbl_height_bqs;
+ uint32_t sensor_org_x_bqs_on_internal, sensor_org_y_bqs_on_internal, sensor_width_bqs, sensor_height_bqs;
+ uint32_t sensor_center_x_bqs_on_internal, sensor_center_y_bqs_on_internal;
+ uint32_t left, right, upper, lower;
+ uint32_t adjust_left, adjust_right, adjust_upper, adjust_lower, adjust_width_bqs, adjust_height_bqs;
+ uint32_t internal_org_x_bqs_on_tbl, internal_org_y_bqs_on_tbl;
+ uint32_t sensor_org_x_bqs_on_tbl, sensor_org_y_bqs_on_tbl;
+#endif
+
+#ifndef ISP2401
+ info->type = IA_CSS_SHADING_CORRECTION_TYPE_1;
+#else
+ assert(binary != NULL);
+ assert(stream_config != NULL);
+ assert(shading_info != NULL);
+ assert(pipe_config != NULL);
+#endif
+
+#ifndef ISP2401
+ info->info.type_1.enable = binary->info->sp.enable.sc;
+ info->info.type_1.num_hor_grids = binary->sctbl_width_per_color;
+ info->info.type_1.num_ver_grids = binary->sctbl_height;
+ info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+#else
+ IA_CSS_ENTER_PRIVATE("binary=%p, required_bds_factor=%d, stream_config=%p",
+ binary, required_bds_factor, stream_config);
+#endif
+
+ /* Initialize by default values. */
+#ifndef ISP2401
+ info->info.type_1.bayer_scale_hor_ratio_in = 1;
+ info->info.type_1.bayer_scale_hor_ratio_out = 1;
+ info->info.type_1.bayer_scale_ver_ratio_in = 1;
+ info->info.type_1.bayer_scale_ver_ratio_out = 1;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = 0;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = 0;
+
+ err = ia_css_binary_compute_shading_table_bayer_origin(
+ binary,
+ required_bds_factor,
+ stream_config,
+ &res);
+ if (err != IA_CSS_SUCCESS)
+#else
+ *shading_info = default_shading_info_type_1;
+
+ err = sh_css_binary_get_sc_requirements(binary, required_bds_factor, stream_config, &scr);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+#ifdef ISP2401
+ }
+
+ IA_CSS_LOG("binary: id=%d, sctbl=%dx%d, deci=%d",
+ binary->info->sp.id, binary->sctbl_width_per_color, binary->sctbl_height, binary->deci_factor_log2);
+ IA_CSS_LOG("binary: in=%dx%d, in_padded_w=%d, int=%dx%d, int_padded_w=%d, out=%dx%d, out_padded_w=%d",
+ binary->in_frame_info.res.width, binary->in_frame_info.res.height, binary->in_frame_info.padded_width,
+ binary->internal_frame_info.res.width, binary->internal_frame_info.res.height,
+ binary->internal_frame_info.padded_width,
+ binary->out_frame_info[0].res.width, binary->out_frame_info[0].res.height,
+ binary->out_frame_info[0].padded_width);
+
+ /* Set the input size from sensor, which includes left/top crop size. */
+ in_width_bqs = _ISP_BQS(binary->in_frame_info.res.width);
+ in_height_bqs = _ISP_BQS(binary->in_frame_info.res.height);
+
+ /* Frame size internally used in ISP, including sensor data and padding.
+ * This is the frame size, to which the shading correction is applied.
+ */
+ internal_width_bqs = _ISP_BQS(binary->internal_frame_info.res.width);
+ internal_height_bqs = _ISP_BQS(binary->internal_frame_info.res.height);
+
+ /* Shading table. */
+ num_hor_grids = binary->sctbl_width_per_color;
+ num_ver_grids = binary->sctbl_height;
+ bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+ tbl_width_bqs = (num_hor_grids - 1) * bqs_per_grid_cell;
+ tbl_height_bqs = (num_ver_grids - 1) * bqs_per_grid_cell;
+#endif
+
+#ifndef ISP2401
+ info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in;
+ info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out;
+ info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in;
+ info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out;
+ info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table;
+ info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table;
+#else
+ IA_CSS_LOG("tbl_width_bqs=%d, tbl_height_bqs=%d", tbl_width_bqs, tbl_height_bqs);
+#endif
+
+#ifdef ISP2401
+ /* Real sensor data area on the internal frame at shading correction.
+ * Filters and scaling are applied to the internal frame before shading correction, depending on the binary.
+ */
+ sensor_org_x_bqs_on_internal = scr.sensor_data_origin_x_bqs_on_internal;
+ sensor_org_y_bqs_on_internal = scr.sensor_data_origin_y_bqs_on_internal;
+ {
+ unsigned int bs_frac = 8; /* scaling factor 1.0 in fixed point (8 == FRAC_ACC macro in ISP) */
+ unsigned int bs_out, bs_in; /* scaling ratio in fixed point */
+
+ bs_out = scr.bayer_scale_hor_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_hor_ratio_in * bs_frac;
+ sensor_width_bqs = (in_width_bqs * bs_out + bs_in/2) / bs_in; /* "+ bs_in/2": rounding */
+
+ bs_out = scr.bayer_scale_ver_ratio_out * bs_frac;
+ bs_in = scr.bayer_scale_ver_ratio_in * bs_frac;
+ sensor_height_bqs = (in_height_bqs * bs_out + bs_in/2) / bs_in; /* "+ bs_in/2": rounding */
+ }
+
+ /* Center of the sensor data on the internal frame at shading correction. */
+ sensor_center_x_bqs_on_internal = sensor_org_x_bqs_on_internal + sensor_width_bqs / 2;
+ sensor_center_y_bqs_on_internal = sensor_org_y_bqs_on_internal + sensor_height_bqs / 2;
+
+ /* Size of left/right/upper/lower sides of the sensor center on the internal frame. */
+ left = sensor_center_x_bqs_on_internal;
+ right = internal_width_bqs - sensor_center_x_bqs_on_internal;
+ upper = sensor_center_y_bqs_on_internal;
+ lower = internal_height_bqs - sensor_center_y_bqs_on_internal;
+
+ /* Align the size of left/right/upper/lower sides to a multiple of the grid cell size. */
+ adjust_left = CEIL_MUL(left, bqs_per_grid_cell);
+ adjust_right = CEIL_MUL(right, bqs_per_grid_cell);
+ adjust_upper = CEIL_MUL(upper, bqs_per_grid_cell);
+ adjust_lower = CEIL_MUL(lower, bqs_per_grid_cell);
+
+ /* Shading table should cover the adjusted frame size. */
+ adjust_width_bqs = adjust_left + adjust_right;
+ adjust_height_bqs = adjust_upper + adjust_lower;
+
+ IA_CSS_LOG("adjust_width_bqs=%d, adjust_height_bqs=%d", adjust_width_bqs, adjust_height_bqs);
+
+ if (adjust_width_bqs > tbl_width_bqs || adjust_height_bqs > tbl_height_bqs) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Origin of the internal frame on the shading table. */
+ internal_org_x_bqs_on_tbl = adjust_left - left;
+ internal_org_y_bqs_on_tbl = adjust_upper - upper;
+
+ /* Origin of the real sensor data area on the shading table. */
+ sensor_org_x_bqs_on_tbl = internal_org_x_bqs_on_tbl + sensor_org_x_bqs_on_internal;
+ sensor_org_y_bqs_on_tbl = internal_org_y_bqs_on_tbl + sensor_org_y_bqs_on_internal;
+
+ /* The shading information necessary as API is stored in the shading_info. */
+ shading_info->info.type_1.num_hor_grids = num_hor_grids;
+ shading_info->info.type_1.num_ver_grids = num_ver_grids;
+ shading_info->info.type_1.bqs_per_grid_cell = bqs_per_grid_cell;
+
+ shading_info->info.type_1.bayer_scale_hor_ratio_in = scr.bayer_scale_hor_ratio_in;
+ shading_info->info.type_1.bayer_scale_hor_ratio_out = scr.bayer_scale_hor_ratio_out;
+ shading_info->info.type_1.bayer_scale_ver_ratio_in = scr.bayer_scale_ver_ratio_in;
+ shading_info->info.type_1.bayer_scale_ver_ratio_out = scr.bayer_scale_ver_ratio_out;
+
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.width = in_width_bqs;
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.height = in_height_bqs;
+
+ shading_info->info.type_1.sensor_data_res_bqs.width = sensor_width_bqs;
+ shading_info->info.type_1.sensor_data_res_bqs.height = sensor_height_bqs;
+
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x = (int32_t)sensor_org_x_bqs_on_tbl;
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y = (int32_t)sensor_org_y_bqs_on_tbl;
+
+ /* The shading information related to ISP (but, not necessary as API) is stored in the pipe_config. */
+ pipe_config->internal_frame_origin_bqs_on_sctbl.x = (int32_t)internal_org_x_bqs_on_tbl;
+ pipe_config->internal_frame_origin_bqs_on_sctbl.y = (int32_t)internal_org_y_bqs_on_tbl;
+
+ IA_CSS_LOG("shading_info: grids=%dx%d, cell=%d, scale=%d,%d,%d,%d, input=%dx%d, data=%dx%d, origin=(%d,%d)",
+ shading_info->info.type_1.num_hor_grids,
+ shading_info->info.type_1.num_ver_grids,
+ shading_info->info.type_1.bqs_per_grid_cell,
+ shading_info->info.type_1.bayer_scale_hor_ratio_in,
+ shading_info->info.type_1.bayer_scale_hor_ratio_out,
+ shading_info->info.type_1.bayer_scale_ver_ratio_in,
+ shading_info->info.type_1.bayer_scale_ver_ratio_out,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.width,
+ shading_info->info.type_1.isp_input_sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_res_bqs.width,
+ shading_info->info.type_1.sensor_data_res_bqs.height,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.x,
+ shading_info->info.type_1.sensor_data_origin_bqs_on_sctbl.y);
+
+ IA_CSS_LOG("pipe_config: origin=(%d,%d)",
+ pipe_config->internal_frame_origin_bqs_on_sctbl.x,
+ pipe_config->internal_frame_origin_bqs_on_sctbl.y);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+#endif
+ return err;
+}
+
+enum ia_css_err
+ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */
+ enum ia_css_shading_correction_type type, /* [in] */
+ unsigned int required_bds_factor, /* [in] */
+ const struct ia_css_stream_config *stream_config, /* [in] */
+#ifndef ISP2401
+ struct ia_css_shading_info *info) /* [out] */
+#else
+ struct ia_css_shading_info *shading_info, /* [out] */
+ struct ia_css_pipe_config *pipe_config) /* [out] */
+#endif
+{
+ enum ia_css_err err;
+
+ assert(binary != NULL);
+#ifndef ISP2401
+ assert(info != NULL);
+#else
+ assert(shading_info != NULL);
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p",
+ binary, type, required_bds_factor, stream_config);
+#endif
+
+ if (type == IA_CSS_SHADING_CORRECTION_TYPE_1)
+#ifndef ISP2401
+ err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config, info);
+#else
+ err = ia_css_binary_get_shading_info_type_1(binary, required_bds_factor, stream_config,
+ shading_info, pipe_config);
+#endif
+
+ /* Other function calls can be added here when other shading correction types will be added in the future. */
+
+ else
+ err = IA_CSS_ERR_NOT_SUPPORTED;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void sh_css_binary_common_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info)
+{
+ assert(binary != NULL);
+ assert(info != NULL);
+
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+}
+
+void
+ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_dvs_grid_info *dvs_info;
+
+ (void)pipe;
+ assert(binary != NULL);
+ assert(info != NULL);
+
+ dvs_info = &info->dvs_grid.dvs_grid_info;
+
+ /* for DIS, we use a division instead of a ceil_div. If this is smaller
+ * than the 3a grid size, it indicates that the outer values are not
+ * valid for DIS.
+ */
+ dvs_info->enable = binary->info->sp.enable.dis;
+ dvs_info->width = binary->dis.grid.dim.width;
+ dvs_info->height = binary->dis.grid.dim.height;
+ dvs_info->aligned_width = binary->dis.grid.pad.width;
+ dvs_info->aligned_height = binary->dis.grid.pad.height;
+ dvs_info->bqs_per_grid_cell = 1 << binary->dis.deci_factor_log2;
+ dvs_info->num_hor_coefs = binary->dis.coef.dim.width;
+ dvs_info->num_ver_coefs = binary->dis.coef.dim.height;
+
+ sh_css_binary_common_grid_info(binary, info);
+}
+
+void
+ia_css_binary_dvs_stat_grid_info(
+ const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+#if defined(HAS_RES_MGR)
+ struct ia_css_dvs_stat_grid_info *dvs_stat_info;
+ unsigned int i;
+
+ assert(binary != NULL);
+ assert(info != NULL);
+ dvs_stat_info = &info->dvs_grid.dvs_stat_grid_info;
+
+ if (binary->info->sp.enable.dvs_stats) {
+ for (i = 0; i < IA_CSS_SKC_DVS_STAT_NUM_OF_LEVELS; i++) {
+ dvs_stat_info->grd_cfg[i].grd_start.enable = 1;
+ }
+ ia_css_dvs_stat_grid_calculate(pipe, dvs_stat_info);
+ }
+ else {
+ memset(dvs_stat_info, 0, sizeof(struct ia_css_dvs_stat_grid_info));
+ }
+
+#endif
+ (void)pipe;
+ sh_css_binary_common_grid_info(binary, info);
+ return;
+}
+
+enum ia_css_err
+ia_css_binary_3a_grid_info(const struct ia_css_binary *binary,
+ struct ia_css_grid_info *info,
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_3a_grid_info *s3a_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("binary=%p, info=%p, pipe=%p",
+ binary, info, pipe);
+
+ assert(binary != NULL);
+ assert(info != NULL);
+ s3a_info = &info->s3a_grid;
+
+
+ /* 3A statistics grid */
+ s3a_info->enable = binary->info->sp.enable.s3a;
+ s3a_info->width = binary->s3atbl_width;
+ s3a_info->height = binary->s3atbl_height;
+ s3a_info->aligned_width = binary->s3atbl_isp_width;
+ s3a_info->aligned_height = binary->s3atbl_isp_height;
+ s3a_info->bqs_per_grid_cell = (1 << binary->deci_factor_log2);
+ s3a_info->deci_factor_log2 = binary->deci_factor_log2;
+ s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS;
+ s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem;
+#if defined(HAS_NO_HMEM)
+ s3a_info->has_histogram = 1;
+#else
+ s3a_info->has_histogram = 0;
+#endif
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+binary_init_pc_histogram(struct sh_css_pc_histogram *histo)
+{
+ assert(histo != NULL);
+
+ histo->length = 0;
+ histo->run = NULL;
+ histo->stall = NULL;
+}
+
+static void
+binary_init_metrics(struct sh_css_binary_metrics *metrics,
+ const struct ia_css_binary_info *info)
+{
+ assert(metrics != NULL);
+ assert(info != NULL);
+
+ metrics->mode = info->pipeline.mode;
+ metrics->id = info->id;
+ metrics->next = NULL;
+ binary_init_pc_histogram(&metrics->isp_histogram);
+ binary_init_pc_histogram(&metrics->sp_histogram);
+}
+
+/* move to host part of output module */
+static bool
+binary_supports_output_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info != NULL);
+
+ for (i = 0; i < info->num_output_formats; i++) {
+ if (info->output_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+#ifdef ISP2401
+static bool
+binary_supports_input_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_stream_format format)
+{
+
+ assert(info != NULL);
+ (void)format;
+
+ return true;
+}
+#endif
+
+static bool
+binary_supports_vf_format(const struct ia_css_binary_xinfo *info,
+ enum ia_css_frame_format format)
+{
+ int i;
+
+ assert(info != NULL);
+
+ for (i = 0; i < info->num_vf_formats; i++) {
+ if (info->vf_formats[i] == format)
+ return true;
+ }
+ return false;
+}
+
+/* move to host part of bds module */
+static bool
+supports_bds_factor(uint32_t supported_factors,
+ uint32_t bds_factor)
+{
+ return ((supported_factors & PACK_BDS_FACTOR(bds_factor)) != 0);
+}
+
+static enum ia_css_err
+binary_init_info(struct ia_css_binary_xinfo *info, unsigned int i,
+ bool *binary_found)
+{
+ const unsigned char *blob = sh_css_blob_info[i].blob;
+ unsigned size = sh_css_blob_info[i].header.blob.size;
+
+ if ((info == NULL) || (binary_found == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ *info = sh_css_blob_info[i].header.info.isp;
+ *binary_found = blob != NULL;
+ info->blob_index = i;
+ /* we don't have this binary, skip it */
+ if (!size)
+ return IA_CSS_SUCCESS;
+
+ info->xmem_addr = sh_css_load_blob(blob, size);
+ if (!info->xmem_addr)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+
+/* When binaries are put at the beginning, they will only
+ * be selected if no other primary matches.
+ */
+enum ia_css_err
+ia_css_binary_init_infos(void)
+{
+ unsigned int i;
+ unsigned int num_of_isp_binaries = sh_css_num_binaries - NUM_OF_SPS - NUM_OF_BLS;
+
+ if (num_of_isp_binaries == 0)
+ return IA_CSS_SUCCESS;
+
+ all_binaries = sh_css_malloc(num_of_isp_binaries *
+ sizeof(*all_binaries));
+ if (all_binaries == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < num_of_isp_binaries; i++) {
+ enum ia_css_err ret;
+ struct ia_css_binary_xinfo *binary = &all_binaries[i];
+ bool binary_found;
+
+ ret = binary_init_info(binary, i, &binary_found);
+ if (ret != IA_CSS_SUCCESS)
+ return ret;
+ if (!binary_found)
+ continue;
+ /* Prepend new binary information */
+ binary->next = binary_infos[binary->sp.pipeline.mode];
+ binary_infos[binary->sp.pipeline.mode] = binary;
+ binary->blob = &sh_css_blob_info[i];
+ binary->mem_offsets = sh_css_blob_info[i].mem_offsets;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_binary_uninit(void)
+{
+ unsigned int i;
+ struct ia_css_binary_xinfo *b;
+
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
+ for (b = binary_infos[i]; b; b = b->next) {
+ if (b->xmem_addr)
+ hmm_free(b->xmem_addr);
+ b->xmem_addr = mmgr_NULL;
+ }
+ binary_infos[i] = NULL;
+ }
+ sh_css_free(all_binaries);
+ return IA_CSS_SUCCESS;
+}
+
+/** @brief Compute decimation factor for 3A statistics and shading correction.
+ *
+ * @param[in] width Frame width in pixels.
+ * @param[in] height Frame height in pixels.
+ * @return Log2 of decimation factor (= grid cell size) in bayer quads.
+ */
+static int
+binary_grid_deci_factor_log2(int width, int height)
+{
+/* 3A/Shading decimation factor spcification (at August 2008)
+ * ------------------------------------------------------------------
+ * [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells]
+#ifndef ISP2401
+ * 1280 ?c 32 40 ?c
+ * 640 ?c 1279 16 40 ?c 80
+ * ?c 639 8 ?c 80
+#else
+ * from 1280 32 from 40
+ * from 640 to 1279 16 from 40 to 80
+ * to 639 8 to 80
+#endif
+ * ------------------------------------------------------------------
+ */
+/* Maximum and minimum decimation factor by the specification */
+#define MAX_SPEC_DECI_FACT_LOG2 5
+#define MIN_SPEC_DECI_FACT_LOG2 3
+/* the smallest frame width in bayer quads when decimation factor (log2) is 5 or 4, by the specification */
+#define DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ 1280
+#define DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ 640
+
+ int smallest_factor; /* the smallest factor (log2) where the number of cells does not exceed the limitation */
+ int spec_factor; /* the factor (log2) which satisfies the specification */
+
+ /* Currently supported maximum width and height are 5120(=80*64) and 3840(=60*64). */
+ assert(ISP_BQ_GRID_WIDTH(width, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_WIDTH);
+ assert(ISP_BQ_GRID_HEIGHT(height, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_HEIGHT);
+
+ /* Compute the smallest factor. */
+ smallest_factor = MAX_SPEC_DECI_FACT_LOG2;
+ while (ISP_BQ_GRID_WIDTH(width, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_WIDTH &&
+ ISP_BQ_GRID_HEIGHT(height, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_HEIGHT
+ && smallest_factor > MIN_SPEC_DECI_FACT_LOG2)
+ smallest_factor--;
+
+ /* Get the factor by the specification. */
+ if (_ISP_BQS(width) >= DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 5;
+ else if (_ISP_BQS(width) >= DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ)
+ spec_factor = 4;
+ else
+ spec_factor = 3;
+
+ /* If smallest_factor is smaller than or equal to spec_factor, choose spec_factor to follow the specification.
+ If smallest_factor is larger than spec_factor, choose smallest_factor.
+
+ ex. width=2560, height=1920
+ smallest_factor=4, spec_factor=5
+ smallest_factor < spec_factor -> return spec_factor
+
+ ex. width=300, height=3000
+ smallest_factor=5, spec_factor=3
+ smallest_factor > spec_factor -> return smallest_factor
+ */
+ return max(smallest_factor, spec_factor);
+
+#undef MAX_SPEC_DECI_FACT_LOG2
+#undef MIN_SPEC_DECI_FACT_LOG2
+#undef DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ
+#undef DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ
+}
+
+static int
+binary_in_frame_padded_width(int in_frame_width,
+ int isp_internal_width,
+ int dvs_env_width,
+ int stream_config_left_padding,
+ int left_cropping,
+ bool need_scaling)
+{
+ int rval;
+ int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* the output image line of Input System 2401 does not have the left paddings */
+ nr_of_left_paddings = 0;
+#else
+ /* in other cases, the left padding pixels are always 128 */
+ nr_of_left_paddings = 2*ISP_VEC_NELEMS;
+#endif
+#if defined(HAS_RES_MGR)
+ (void)dvs_env_width;
+#endif
+ if (need_scaling) {
+ /* In SDV use-case, we need to match left-padding of
+ * primary and the video binary. */
+ if (stream_config_left_padding != -1) {
+ /* Different than before, we do left&right padding. */
+ rval =
+ CEIL_MUL(in_frame_width + nr_of_left_paddings,
+ 2*ISP_VEC_NELEMS);
+ } else {
+ /* Different than before, we do left&right padding. */
+#if !defined(HAS_RES_MGR) /* dvs env is included already */
+ in_frame_width += dvs_env_width;
+#endif
+ rval =
+ CEIL_MUL(in_frame_width +
+ (left_cropping ? nr_of_left_paddings : 0),
+ 2*ISP_VEC_NELEMS);
+ }
+ } else {
+ rval = isp_internal_width;
+ }
+
+ return rval;
+}
+
+
+enum ia_css_err
+ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
+ bool online,
+ bool two_ppc,
+ enum ia_css_stream_format stream_format,
+ const struct ia_css_frame_info *in_info, /* can be NULL */
+ const struct ia_css_frame_info *bds_out_info, /* can be NULL */
+ const struct ia_css_frame_info *out_info[], /* can be NULL */
+ const struct ia_css_frame_info *vf_info, /* can be NULL */
+ struct ia_css_binary *binary,
+ struct ia_css_resolution *dvs_env,
+ int stream_config_left_padding,
+ bool accelerator)
+{
+ const struct ia_css_binary_info *info = &xinfo->sp;
+ unsigned int dvs_env_width = 0,
+ dvs_env_height = 0,
+ vf_log_ds = 0,
+ s3a_log_deci = 0,
+ bits_per_pixel = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_padded_width = 0,
+ /* Resolution at SC/3A/DIS kernel. */
+ sc_3a_dis_height = 0,
+ isp_internal_width = 0,
+ isp_internal_height = 0,
+ s3a_isp_width = 0;
+
+ bool need_scaling = false;
+ struct ia_css_resolution binary_dvs_env, internal_res;
+ enum ia_css_err err;
+ unsigned int i;
+ const struct ia_css_frame_info *bin_out_info = NULL;
+
+ assert(info != NULL);
+ assert(binary != NULL);
+
+ binary->info = xinfo;
+ if (!accelerator) {
+ /* binary->css_params has been filled by accelerator itself. */
+ err = ia_css_isp_param_allocate_isp_parameters(
+ &binary->mem_params, &binary->css_params,
+ &info->mem_initializers);
+ if (err != IA_CSS_SUCCESS) {
+ return err;
+ }
+ }
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (out_info[i] && (out_info[i]->res.width != 0)) {
+ bin_out_info = out_info[i];
+ break;
+ }
+ }
+ if (in_info != NULL && bin_out_info != NULL) {
+ need_scaling = (in_info->res.width != bin_out_info->res.width) ||
+ (in_info->res.height != bin_out_info->res.height);
+ }
+
+
+ /* binary_dvs_env has to be equal or larger than SH_CSS_MIN_DVS_ENVELOPE */
+ binary_dvs_env.width = 0;
+ binary_dvs_env.height = 0;
+ ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env);
+ dvs_env_width = binary_dvs_env.width;
+ dvs_env_height = binary_dvs_env.height;
+ binary->dvs_envelope.width = dvs_env_width;
+ binary->dvs_envelope.height = dvs_env_height;
+
+ /* internal resolution calculation */
+ internal_res.width = 0;
+ internal_res.height = 0;
+ ia_css_binary_internal_res(in_info, bds_out_info, bin_out_info, dvs_env,
+ info, &internal_res);
+ isp_internal_width = internal_res.width;
+ isp_internal_height = internal_res.height;
+
+ /* internal frame info */
+ if (bin_out_info != NULL) /* { */
+ binary->internal_frame_info.format = bin_out_info->format;
+ /* } */
+ binary->internal_frame_info.res.width = isp_internal_width;
+ binary->internal_frame_info.padded_width = CEIL_MUL(isp_internal_width, 2*ISP_VEC_NELEMS);
+ binary->internal_frame_info.res.height = isp_internal_height;
+ binary->internal_frame_info.raw_bit_depth = bits_per_pixel;
+
+ if (in_info != NULL) {
+ binary->effective_in_frame_res.width = in_info->res.width;
+ binary->effective_in_frame_res.height = in_info->res.height;
+
+ bits_per_pixel = in_info->raw_bit_depth;
+
+ /* input info */
+ binary->in_frame_info.res.width = in_info->res.width + info->pipeline.left_cropping;
+ binary->in_frame_info.res.height = in_info->res.height + info->pipeline.top_cropping;
+
+#if !defined(HAS_RES_MGR) /* dvs env is included already */
+ binary->in_frame_info.res.width += dvs_env_width;
+ binary->in_frame_info.res.height += dvs_env_height;
+#endif
+
+ binary->in_frame_info.padded_width =
+ binary_in_frame_padded_width(in_info->res.width,
+ isp_internal_width,
+ dvs_env_width,
+ stream_config_left_padding,
+ info->pipeline.left_cropping,
+ need_scaling);
+
+ binary->in_frame_info.format = in_info->format;
+ binary->in_frame_info.raw_bayer_order = in_info->raw_bayer_order;
+ binary->in_frame_info.crop_info = in_info->crop_info;
+ }
+
+ if (online) {
+ bits_per_pixel = ia_css_util_input_format_bpp(
+ stream_format, two_ppc);
+ }
+ binary->in_frame_info.raw_bit_depth = bits_per_pixel;
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (out_info[i] != NULL) {
+ binary->out_frame_info[i].res.width = out_info[i]->res.width;
+ binary->out_frame_info[i].res.height = out_info[i]->res.height;
+ binary->out_frame_info[i].padded_width = out_info[i]->padded_width;
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_COPY) {
+ binary->out_frame_info[i].raw_bit_depth = bits_per_pixel;
+ } else {
+ /* Only relevant for RAW format.
+ * At the moment, all outputs are raw, 16 bit per pixel, except for copy.
+ * To do this cleanly, the binary should specify in its info
+ * the bit depth per output channel.
+ */
+ binary->out_frame_info[i].raw_bit_depth = 16;
+ }
+ binary->out_frame_info[i].format = out_info[i]->format;
+ }
+ }
+
+ if (vf_info && (vf_info->res.width != 0)) {
+ err = ia_css_vf_configure(binary, bin_out_info, (struct ia_css_frame_info *)vf_info, &vf_log_ds);
+ if (err != IA_CSS_SUCCESS) {
+ if (!accelerator) {
+ ia_css_isp_param_destroy_isp_parameters(
+ &binary->mem_params,
+ &binary->css_params);
+ }
+ return err;
+ }
+ }
+ binary->vf_downscale_log2 = vf_log_ds;
+
+ binary->online = online;
+ binary->input_format = stream_format;
+
+ /* viewfinder output info */
+ if ((vf_info != NULL) && (vf_info->res.width != 0)) {
+ unsigned int vf_out_vecs, vf_out_width, vf_out_height;
+ binary->vf_frame_info.format = vf_info->format;
+ if (bin_out_info == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ vf_out_vecs = __ISP_VF_OUTPUT_WIDTH_VECS(bin_out_info->padded_width,
+ vf_log_ds);
+ vf_out_width = _ISP_VF_OUTPUT_WIDTH(vf_out_vecs);
+ vf_out_height = _ISP_VF_OUTPUT_HEIGHT(bin_out_info->res.height,
+ vf_log_ds);
+
+ /* For preview mode, output pin is used instead of vf. */
+ if (info->pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW) {
+ binary->out_frame_info[0].res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->out_frame_info[0].padded_width = vf_out_width;
+ binary->out_frame_info[0].res.height = vf_out_height;
+
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ } else {
+ /* we also store the raw downscaled width. This is
+ * used for digital zoom in preview to zoom only on
+ * the width that we actually want to keep, not on
+ * the aligned width. */
+ binary->vf_frame_info.res.width =
+ (bin_out_info->res.width >> vf_log_ds);
+ binary->vf_frame_info.padded_width = vf_out_width;
+ binary->vf_frame_info.res.height = vf_out_height;
+ }
+ } else {
+ binary->vf_frame_info.res.width = 0;
+ binary->vf_frame_info.padded_width = 0;
+ binary->vf_frame_info.res.height = 0;
+ }
+
+ if (info->enable.ca_gdc) {
+ binary->morph_tbl_width =
+ _ISP_MORPH_TABLE_WIDTH(isp_internal_width);
+ binary->morph_tbl_aligned_width =
+ _ISP_MORPH_TABLE_ALIGNED_WIDTH(isp_internal_width);
+ binary->morph_tbl_height =
+ _ISP_MORPH_TABLE_HEIGHT(isp_internal_height);
+ } else {
+ binary->morph_tbl_width = 0;
+ binary->morph_tbl_aligned_width = 0;
+ binary->morph_tbl_height = 0;
+ }
+
+ sc_3a_dis_width = binary->in_frame_info.res.width;
+ sc_3a_dis_padded_width = binary->in_frame_info.padded_width;
+ sc_3a_dis_height = binary->in_frame_info.res.height;
+ if (bds_out_info != NULL && in_info != NULL &&
+ bds_out_info->res.width != in_info->res.width) {
+ /* TODO: Next, "internal_frame_info" should be derived from
+ * bds_out. So this part will change once it is in place! */
+ sc_3a_dis_width = bds_out_info->res.width + info->pipeline.left_cropping;
+ sc_3a_dis_padded_width = isp_internal_width;
+ sc_3a_dis_height = isp_internal_height;
+ }
+
+
+ s3a_isp_width = _ISP_S3A_ELEMS_ISP_WIDTH(sc_3a_dis_padded_width,
+ info->pipeline.left_cropping);
+ if (info->s3a.fixed_s3a_deci_log) {
+ s3a_log_deci = info->s3a.fixed_s3a_deci_log;
+ } else {
+ s3a_log_deci = binary_grid_deci_factor_log2(s3a_isp_width,
+ sc_3a_dis_height);
+ }
+ binary->deci_factor_log2 = s3a_log_deci;
+
+ if (info->enable.s3a) {
+ binary->s3atbl_width =
+ _ISP_S3ATBL_WIDTH(sc_3a_dis_width,
+ s3a_log_deci);
+ binary->s3atbl_height =
+ _ISP_S3ATBL_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ binary->s3atbl_isp_width =
+ _ISP_S3ATBL_ISP_WIDTH(s3a_isp_width,
+ s3a_log_deci);
+ binary->s3atbl_isp_height =
+ _ISP_S3ATBL_ISP_HEIGHT(sc_3a_dis_height,
+ s3a_log_deci);
+ } else {
+ binary->s3atbl_width = 0;
+ binary->s3atbl_height = 0;
+ binary->s3atbl_isp_width = 0;
+ binary->s3atbl_isp_height = 0;
+ }
+
+ if (info->enable.sc) {
+ binary->sctbl_width_per_color =
+#ifndef ISP2401
+ _ISP_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width,
+ s3a_log_deci);
+#else
+ _ISP_SCTBL_WIDTH_PER_COLOR(isp_internal_width, s3a_log_deci);
+#endif
+ binary->sctbl_aligned_width_per_color =
+ SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR;
+ binary->sctbl_height =
+#ifndef ISP2401
+ _ISP_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci);
+#else
+ _ISP_SCTBL_HEIGHT(isp_internal_height, s3a_log_deci);
+ binary->sctbl_legacy_width_per_color =
+ _ISP_SCTBL_LEGACY_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci);
+ binary->sctbl_legacy_height =
+ _ISP_SCTBL_LEGACY_HEIGHT(sc_3a_dis_height, s3a_log_deci);
+#endif
+ } else {
+ binary->sctbl_width_per_color = 0;
+ binary->sctbl_aligned_width_per_color = 0;
+ binary->sctbl_height = 0;
+#ifdef ISP2401
+ binary->sctbl_legacy_width_per_color = 0;
+ binary->sctbl_legacy_height = 0;
+#endif
+ }
+ ia_css_sdis_init_info(&binary->dis,
+ sc_3a_dis_width,
+ sc_3a_dis_padded_width,
+ sc_3a_dis_height,
+ info->pipeline.isp_pipe_version,
+ info->enable.dis);
+ if (info->pipeline.left_cropping)
+ binary->left_padding = 2 * ISP_VEC_NELEMS - info->pipeline.left_cropping;
+ else
+ binary->left_padding = 0;
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_binary_find(struct ia_css_binary_descr *descr,
+ struct ia_css_binary *binary)
+{
+ int mode;
+ bool online;
+ bool two_ppc;
+ enum ia_css_stream_format stream_format;
+ const struct ia_css_frame_info *req_in_info,
+ *req_bds_out_info,
+ *req_out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS],
+ *req_bin_out_info = NULL,
+ *req_vf_info;
+
+ struct ia_css_binary_xinfo *xcandidate;
+#ifndef ISP2401
+ bool need_ds, need_dz, need_dvs, need_xnr, need_dpc;
+#else
+ bool need_ds, need_dz, need_dvs, need_xnr, need_dpc, need_tnr;
+#endif
+ bool striped;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_capture_pp_bli;
+#ifdef ISP2401
+ bool enable_luma_only;
+#endif
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ bool continuous;
+ unsigned int isp_pipe_version;
+ struct ia_css_resolution dvs_env, internal_res;
+ unsigned int i;
+
+ assert(descr != NULL);
+ /* MW: used after an error check, may accept NULL, but doubtfull */
+ assert(binary != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n",
+ descr, descr->mode,
+ binary);
+
+ mode = descr->mode;
+ online = descr->online;
+ two_ppc = descr->two_ppc;
+ stream_format = descr->stream_format;
+ req_in_info = descr->in_info;
+ req_bds_out_info = descr->bds_out_info;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ req_out_info[i] = descr->out_info[i];
+ if (req_out_info[i] && (req_out_info[i]->res.width != 0))
+ req_bin_out_info = req_out_info[i];
+ }
+ if (req_bin_out_info == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+#ifndef ISP2401
+ req_vf_info = descr->vf_info;
+#else
+
+ if ((descr->vf_info != NULL) && (descr->vf_info->res.width == 0))
+ /* width==0 means that there is no vf pin (e.g. in SkyCam preview case) */
+ req_vf_info = NULL;
+ else
+ req_vf_info = descr->vf_info;
+#endif
+
+ need_xnr = descr->enable_xnr;
+ need_ds = descr->enable_fractional_ds;
+ need_dz = false;
+ need_dvs = false;
+ need_dpc = descr->enable_dpc;
+#ifdef ISP2401
+ need_tnr = descr->enable_tnr;
+#endif
+ enable_yuv_ds = descr->enable_yuv_ds;
+ enable_high_speed = descr->enable_high_speed;
+ enable_dvs_6axis = descr->enable_dvs_6axis;
+ enable_reduced_pipe = descr->enable_reduced_pipe;
+ enable_capture_pp_bli = descr->enable_capture_pp_bli;
+#ifdef ISP2401
+ enable_luma_only = descr->enable_luma_only;
+#endif
+ continuous = descr->continuous;
+ striped = descr->striped;
+ isp_pipe_version = descr->isp_pipe_version;
+
+ dvs_env.width = 0;
+ dvs_env.height = 0;
+ internal_res.width = 0;
+ internal_res.height = 0;
+
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO) {
+ dvs_env = descr->dvs_env;
+ need_dz = descr->enable_dz;
+ /* Video is the only mode that has a nodz variant. */
+ need_dvs = dvs_env.width || dvs_env.height;
+ }
+
+ /* print a map of the binary file */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n");
+ for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) {
+ xcandidate = binary_infos[i];
+ if (xcandidate) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i);
+ while (xcandidate) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n",
+ xcandidate->blob->name, xcandidate->type,
+ xcandidate->sp.enable.continuous);
+ xcandidate = xcandidate->next;
+ }
+ }
+ }
+
+ /* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */
+ for (xcandidate = binary_infos[mode]; xcandidate;
+ xcandidate = xcandidate->next) {
+ struct ia_css_binary_info *candidate = &xcandidate->sp;
+ /* printf("sh_css_binary_find: evaluating candidate:
+ * %d\n",candidate->id); */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n",
+ candidate, candidate->pipeline.mode, candidate->id);
+
+ /*
+ * MW: Only a limited set of jointly configured binaries can
+ * be used in a continuous preview/video mode unless it is
+ * the copy mode and runs on SP.
+ */
+ if (!candidate->enable.continuous &&
+ continuous && (mode != IA_CSS_BINARY_MODE_COPY)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n",
+ __LINE__, candidate->enable.continuous,
+ continuous, mode,
+ IA_CSS_BINARY_MODE_COPY);
+ continue;
+ }
+ if (striped && candidate->iterator.num_stripes == 1) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: binary is not striped\n",
+ __LINE__);
+ continue;
+ }
+
+ if (candidate->pipeline.isp_pipe_version != isp_pipe_version &&
+ (mode != IA_CSS_BINARY_MODE_COPY) &&
+ (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) &&
+ (mode != IA_CSS_BINARY_MODE_VF_PP)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d != %d)\n",
+ __LINE__,
+ candidate->pipeline.isp_pipe_version, isp_pipe_version);
+ continue;
+ }
+ if (!candidate->enable.reduced_pipe && enable_reduced_pipe) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.reduced_pipe,
+ enable_reduced_pipe);
+ continue;
+ }
+ if (!candidate->enable.dvs_6axis && enable_dvs_6axis) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.dvs_6axis,
+ enable_dvs_6axis);
+ continue;
+ }
+ if (candidate->enable.high_speed && !enable_high_speed) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ candidate->enable.high_speed,
+ enable_high_speed);
+ continue;
+ }
+ if (!candidate->enable.xnr && need_xnr) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ candidate->enable.xnr,
+ need_xnr);
+ continue;
+ }
+ if (!(candidate->enable.ds & 2) && enable_yuv_ds) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ ((candidate->enable.ds & 2) != 0),
+ enable_yuv_ds);
+ continue;
+ }
+ if ((candidate->enable.ds & 2) && !enable_yuv_ds) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && !%d\n",
+ __LINE__,
+ ((candidate->enable.ds & 2) != 0),
+ enable_yuv_ds);
+ continue;
+ }
+
+ if (mode == IA_CSS_BINARY_MODE_VIDEO &&
+ candidate->enable.ds && need_ds)
+ need_dz = false;
+
+ /* when we require vf output, we need to have vf_veceven */
+ if ((req_vf_info != NULL) && !(candidate->enable.vf_veceven ||
+ /* or variable vf vec even */
+ candidate->vf_dec.is_variable ||
+ /* or more than one output pin. */
+ xcandidate->num_output_pins > 1)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n",
+ __LINE__, req_vf_info,
+ candidate->enable.vf_veceven,
+ candidate->vf_dec.is_variable,
+ xcandidate->num_output_pins, 1);
+ continue;
+ }
+ if (!candidate->enable.dvs_envelope && need_dvs) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__,
+ candidate->enable.dvs_envelope, (int)need_dvs);
+ continue;
+ }
+ /* internal_res check considers input, output, and dvs envelope sizes */
+ ia_css_binary_internal_res(req_in_info, req_bds_out_info,
+ req_bin_out_info, &dvs_env, candidate, &internal_res);
+ if (internal_res.width > candidate->internal.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.width,
+ candidate->internal.max_width);
+ continue;
+ }
+ if (internal_res.height > candidate->internal.max_height) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, internal_res.height,
+ candidate->internal.max_height);
+ continue;
+ }
+ if (!candidate->enable.ds && need_ds & !(xcandidate->num_output_pins > 1)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.ds, (int)need_ds);
+ continue;
+ }
+ if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n",
+ __LINE__, candidate->enable.uds,
+ candidate->enable.dvs_6axis, (int)need_dz);
+ continue;
+ }
+ if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_MEMORY);
+ continue;
+ }
+ if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n",
+ __LINE__, online, candidate->input.source,
+ IA_CSS_BINARY_INPUT_SENSOR);
+ continue;
+ }
+ if (req_bin_out_info->res.width < candidate->output.min_width ||
+ req_bin_out_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n",
+ __LINE__,
+ req_bin_out_info->padded_width,
+ candidate->output.min_width,
+ req_bin_out_info->padded_width,
+ candidate->output.max_width);
+ continue;
+ }
+ if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */
+ req_vf_info) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__,
+ req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+ if (req_in_info->padded_width > candidate->input.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d)\n",
+ __LINE__, req_in_info->padded_width,
+ candidate->input.max_width);
+ continue;
+ }
+ if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__,
+ binary_supports_output_format(xcandidate, req_bin_out_info->format));
+ continue;
+ }
+#ifdef ISP2401
+ if (!binary_supports_input_format(xcandidate, descr->stream_format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d\n",
+ __LINE__,
+ binary_supports_input_format(xcandidate, req_in_info->format));
+ continue;
+ }
+#endif
+ if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */
+ req_vf_info && /* and we need vf output. */
+ /* check if the required vf format
+ is supported. */
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info,
+ binary_supports_output_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf format */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
+ __LINE__, xcandidate->num_output_pins, 1,
+ req_vf_info, candidate->enable.vf_veceven,
+ binary_supports_vf_format(xcandidate, req_vf_info->format));
+ continue;
+ }
+
+ /* Check if vf_veceven supports the requested vf width */
+ if (xcandidate->num_output_pins == 1 &&
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ if (req_vf_info->res.width > candidate->output.max_width) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: (%d < %d)\n",
+ __LINE__,
+ req_vf_info->res.width,
+ candidate->output.max_width);
+ continue;
+ }
+ }
+
+ if (!supports_bds_factor(candidate->bds.supported_bds_factors,
+ descr->required_bds_factor)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->bds.supported_bds_factors,
+ descr->required_bds_factor);
+ continue;
+ }
+
+ if (!candidate->enable.dpc && need_dpc) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->enable.dpc,
+ descr->enable_dpc);
+ continue;
+ }
+
+ if (candidate->uds.use_bci && enable_capture_pp_bli) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n",
+ __LINE__, candidate->uds.use_bci,
+ descr->enable_capture_pp_bli);
+ continue;
+ }
+
+#ifdef ISP2401
+ if (candidate->enable.luma_only != enable_luma_only) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: %d != %d\n",
+ __LINE__, candidate->enable.luma_only,
+ descr->enable_luma_only);
+ continue;
+ }
+
+ if(!candidate->enable.tnr && need_tnr) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() [%d] continue: !%d && %d\n",
+ __LINE__, candidate->enable.tnr,
+ descr->enable_tnr);
+ continue;
+ }
+
+#endif
+ /* reconfigure any variable properties of the binary */
+ err = ia_css_binary_fill_info(xcandidate, online, two_ppc,
+ stream_format, req_in_info,
+ req_bds_out_info,
+ req_out_info, req_vf_info,
+ binary, &dvs_env,
+ descr->stream_config_left_padding,
+ false);
+
+ if (err != IA_CSS_SUCCESS)
+ break;
+ binary_init_metrics(&binary->metrics, &binary->info->sp);
+ break;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() selected = %p, mode = %d ID = %d\n",
+ xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_binary_find() leave: return_err=%d\n", err);
+
+ return err;
+}
+
+unsigned
+ia_css_binary_max_vf_width(void)
+{
+ /* This is (should be) true for IPU1 and IPU2 */
+ /* For IPU3 (SkyCam) this pointer is guarenteed to be NULL simply because such a binary does not exist */
+ if (binary_infos[IA_CSS_BINARY_MODE_VF_PP])
+ return binary_infos[IA_CSS_BINARY_MODE_VF_PP]->sp.output.max_width;
+ return 0;
+}
+
+void
+ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary)
+{
+ if (binary) {
+ ia_css_isp_param_destroy_isp_parameters(&binary->mem_params,
+ &binary->css_params);
+ }
+}
+
+void
+ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries,
+ uint32_t *num_isp_binaries)
+{
+ assert(binaries != NULL);
+
+ if (num_isp_binaries)
+ *num_isp_binaries = 0;
+
+ *binaries = all_binaries;
+ if (all_binaries && num_isp_binaries) {
+ /* -1 to account for sp binary which is not stored in all_binaries */
+ if (sh_css_num_binaries > 0)
+ *num_isp_binaries = sh_css_num_binaries - 1;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h
new file mode 100644
index 000000000000..034ec15ec4a1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq.h
@@ -0,0 +1,197 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_BUFQ_H
+#define _IA_CSS_BUFQ_H
+
+#include <type_support.h>
+#include "ia_css_bufq_comm.h"
+#include "ia_css_buffer.h"
+#include "ia_css_err.h"
+#define BUFQ_EVENT_SIZE 4
+
+
+/**
+ * @brief Query the internal frame ID.
+ *
+ * @param[in] key The query key.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val
+ );
+
+
+/**
+ * @brief Map buffer type to a internal queue id.
+ *
+ * @param[in] thread id Thread in which the buffer type has to be mapped or unmapped
+ * @param[in] buf_type buffer type.
+ * @param[in] map boolean flag to specify map or unmap
+ * @return none
+ */
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map
+ );
+
+
+/**
+ * @brief Initilize buffer type to a queue id mapping
+ * @return none
+ */
+void ia_css_queue_map_init(void);
+
+
+/**
+ * @brief initializes bufq module
+ * It create instances of
+ * -host to SP buffer queue which is a list with predefined size,
+ * MxN queues where M is the number threads and N is the number queues per thread
+ *-SP to host buffer queue , is a list with N queues
+ *-host to SP event communication queue
+ * -SP to host event communication queue
+ * -queue for tagger commands
+ * @return none
+ */
+void ia_css_bufq_init(void);
+
+
+/**
+* @brief Enqueues an item into host to SP buffer queue
+ *
+ * @param thread_index[in] Thread in which the item to be enqueued
+ *
+ * @param queue_id[in] Index of the queue in the specified thread
+ * @param item[in] Object to enqueue.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item);
+
+/**
+* @brief Dequeues an item from SP to host buffer queue.
+ *
+ * @param queue_id[in] Specifies the index of the queue in the list where
+ * the item has to be read.
+ * @paramitem [out] Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item);
+
+/**
+* @brief Enqueue an event item into host to SP communication event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_psys_event(
+ uint8_t evt_id,
+ uint8_t evt_payload_0,
+ uint8_t evt_payload_1,
+ uint8_t evt_payload_2
+ );
+
+/**
+ * @brief Dequeue an item from SP to host communication event queue.
+ *
+ * @param item Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_dequeue_psys_event(
+ uint8_t item[BUFQ_EVENT_SIZE]
+ );
+
+/**
+ * @brief Enqueue an event item into host to SP EOF event queue.
+ *
+ * @param[in] evt_id The event ID.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_bufq_enqueue_isys_event(
+ uint8_t evt_id);
+
+/**
+* @brief Dequeue an item from SP to host communication EOF event queue.
+
+ *
+ * @param item Object to be dequeued into this item.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_bufq_dequeue_isys_event(
+ uint8_t item[BUFQ_EVENT_SIZE]);
+
+/**
+* @brief Enqueue a tagger command item into tagger command queue..
+ *
+ * @param item Object to be enqueue.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item);
+
+/**
+* @brief Uninitializes bufq module.
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+*/
+enum ia_css_err ia_css_bufq_deinit(void);
+
+/**
+* @brief Dump queue states
+ *
+ * @return None
+ *
+*/
+void ia_css_bufq_dump_queue_info(void);
+
+#endif /* _IA_CSS_BUFQ_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h
new file mode 100644
index 000000000000..bb77080591b9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/interface/ia_css_bufq_comm.h
@@ -0,0 +1,66 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_BUFQ_COMM_H
+#define _IA_CSS_BUFQ_COMM_H
+
+#include "system_global.h"
+
+enum sh_css_queue_id {
+ SH_CSS_INVALID_QUEUE_ID = -1,
+ SH_CSS_QUEUE_A_ID = 0,
+ SH_CSS_QUEUE_B_ID,
+ SH_CSS_QUEUE_C_ID,
+ SH_CSS_QUEUE_D_ID,
+ SH_CSS_QUEUE_E_ID,
+ SH_CSS_QUEUE_F_ID,
+ SH_CSS_QUEUE_G_ID,
+#if defined(HAS_NO_INPUT_SYSTEM)
+ /* input frame queue for skycam */
+ SH_CSS_QUEUE_H_ID,
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ SH_CSS_QUEUE_H_ID, /* for metadata */
+#endif
+
+#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_H_ID+1)
+#else
+#define SH_CSS_MAX_NUM_QUEUES (SH_CSS_QUEUE_G_ID+1)
+#endif
+
+};
+
+#define SH_CSS_MAX_DYNAMIC_BUFFERS_PER_THREAD SH_CSS_MAX_NUM_QUEUES
+/* for now we staticaly assign queue 0 & 1 to parameter sets */
+#define IA_CSS_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_A_ID
+#define IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID SH_CSS_QUEUE_B_ID
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
new file mode 100644
index 000000000000..ed33d4c4c84a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -0,0 +1,590 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h" /* assert */
+#include "ia_css_buffer.h"
+#include "sp.h"
+#include "ia_css_bufq.h" /* Bufq API's */
+#include "ia_css_queue.h" /* ia_css_queue_t */
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_eventq.h" /* ia_css_eventq_recv()*/
+#include "ia_css_debug.h" /* ia_css_debug_dtrace*/
+#include "sh_css_internal.h" /* sh_css_queue_type */
+#include "sp_local.h" /* sp_address_of */
+#include "ia_css_util.h" /* ia_css_convert_errno()*/
+#include "sh_css_firmware.h" /* sh_css_sp_fw*/
+
+#define BUFQ_DUMP_FILE_NAME_PREFIX_SIZE 256
+
+static char prefix[BUFQ_DUMP_FILE_NAME_PREFIX_SIZE] = {0};
+
+/*********************************************************/
+/* Global Queue objects used by CSS */
+/*********************************************************/
+
+#ifndef ISP2401
+
+struct sh_css_queues {
+ /* Host2SP buffer queue */
+ ia_css_queue_t host2sp_buffer_queue_handles
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ /* SP2Host buffer queue */
+ ia_css_queue_t sp2host_buffer_queue_handles
+ [SH_CSS_MAX_NUM_QUEUES];
+
+ /* Host2SP event queue */
+ ia_css_queue_t host2sp_psys_event_queue_handle;
+
+ /* SP2Host event queue */
+ ia_css_queue_t sp2host_psys_event_queue_handle;
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /* Host2SP ISYS event queue */
+ ia_css_queue_t host2sp_isys_event_queue_handle;
+
+ /* SP2Host ISYS event queue */
+ ia_css_queue_t sp2host_isys_event_queue_handle;
+#endif
+ /* Tagger command queue */
+ ia_css_queue_t host2sp_tag_cmd_queue_handle;
+};
+
+#else
+
+struct sh_css_queues {
+ /* Host2SP buffer queue */
+ ia_css_queue_t host2sp_buffer_queue_handles
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ /* SP2Host buffer queue */
+ ia_css_queue_t sp2host_buffer_queue_handles
+ [SH_CSS_MAX_NUM_QUEUES];
+
+ /* Host2SP event queue */
+ ia_css_queue_t host2sp_psys_event_queue_handle;
+
+ /* SP2Host event queue */
+ ia_css_queue_t sp2host_psys_event_queue_handle;
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /* Host2SP ISYS event queue */
+ ia_css_queue_t host2sp_isys_event_queue_handle;
+
+ /* SP2Host ISYS event queue */
+ ia_css_queue_t sp2host_isys_event_queue_handle;
+
+ /* Tagger command queue */
+ ia_css_queue_t host2sp_tag_cmd_queue_handle;
+#endif
+};
+
+#endif
+
+struct sh_css_queues css_queues;
+
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static int buffer_type_to_queue_id_map[SH_CSS_MAX_SP_THREADS][IA_CSS_NUM_DYNAMIC_BUFFER_TYPE];
+static bool queue_availability[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+ );
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type
+ );
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread
+ );
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_queue_map_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++)
+ queue_availability[i][j] = true;
+ }
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE; j++)
+ buffer_type_to_queue_id_map[i][j] = SH_CSS_INVALID_QUEUE_ID;
+ }
+}
+
+void ia_css_queue_map(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type,
+ bool map)
+{
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_queue_map() enter: buf_type=%d, thread_id=%d\n", buf_type, thread_id);
+
+ if (map)
+ map_buffer_type_to_queue_id(thread_id, buf_type);
+ else
+ unmap_buffer_type_to_queue_id(thread_id, buf_type);
+}
+
+/**
+ * @brief Query the internal queue ID.
+ */
+bool ia_css_query_internal_queue_id(
+ enum ia_css_buffer_type buf_type,
+ unsigned int thread_id,
+ enum sh_css_queue_id *val)
+{
+ IA_CSS_ENTER("buf_type=%d, thread_id=%d, val = %p", buf_type, thread_id, val);
+
+ if ((val == NULL) || (thread_id >= SH_CSS_MAX_SP_THREADS) || (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)) {
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+
+ *val = buffer_type_to_queue_id_map[thread_id][buf_type];
+ if ((*val == SH_CSS_INVALID_QUEUE_ID) || (*val >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LOG("INVALID queue ID MAP = %d\n", *val);
+ IA_CSS_LEAVE("return_val = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return_val = true");
+ return true;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void map_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ unsigned int i;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] == SH_CSS_INVALID_QUEUE_ID);
+
+ /* queue 0 is reserved for parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] = IA_CSS_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ /* queue 1 is reserved for per frame parameters because it doesn't depend on events */
+ if (buf_type == IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET) {
+ assert(queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID]);
+ queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] = IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID;
+ return;
+ }
+
+ for (i = SH_CSS_QUEUE_C_ID; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ if (queue_availability[thread_id][i] == true) {
+ queue_availability[thread_id][i] = false;
+ buffer_type_to_queue_id_map[thread_id][buf_type] = i;
+ break;
+ }
+ }
+
+ assert(i != SH_CSS_MAX_NUM_QUEUES);
+ return;
+}
+
+static void unmap_buffer_type_to_queue_id(
+ unsigned int thread_id,
+ enum ia_css_buffer_type buf_type)
+{
+ int queue_id;
+
+ assert(thread_id < SH_CSS_MAX_SP_THREADS);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ assert(buffer_type_to_queue_id_map[thread_id][buf_type] != SH_CSS_INVALID_QUEUE_ID);
+
+ queue_id = buffer_type_to_queue_id_map[thread_id][buf_type];
+ buffer_type_to_queue_id_map[thread_id][buf_type] = SH_CSS_INVALID_QUEUE_ID;
+ queue_availability[thread_id][queue_id] = true;
+}
+
+
+static ia_css_queue_t *bufq_get_qhandle(
+ enum sh_css_queue_type type,
+ enum sh_css_queue_id id,
+ int thread)
+{
+ ia_css_queue_t *q = 0;
+
+ switch (type) {
+ case sh_css_host2sp_buffer_queue:
+ if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) ||
+ (id == SH_CSS_INVALID_QUEUE_ID))
+ break;
+ q = &css_queues.host2sp_buffer_queue_handles[thread][id];
+ break;
+ case sh_css_sp2host_buffer_queue:
+ if (id == SH_CSS_INVALID_QUEUE_ID)
+ break;
+ q = &css_queues.sp2host_buffer_queue_handles[id];
+ break;
+ case sh_css_host2sp_psys_event_queue:
+ q = &css_queues.host2sp_psys_event_queue_handle;
+ break;
+ case sh_css_sp2host_psys_event_queue:
+ q = &css_queues.sp2host_psys_event_queue_handle;
+ break;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ case sh_css_host2sp_isys_event_queue:
+ q = &css_queues.host2sp_isys_event_queue_handle;
+ break;
+ case sh_css_sp2host_isys_event_queue:
+ q = &css_queues.sp2host_isys_event_queue_handle;
+ break;
+#endif
+ case sh_css_host2sp_tag_cmd_queue:
+ q = &css_queues.host2sp_tag_cmd_queue_handle;
+ break;
+ default:
+ break;
+ }
+
+ return q;
+}
+
+/* Local function to initialize a buffer queue. This reduces
+ * the chances of copy-paste errors or typos.
+ */
+STORAGE_CLASS_INLINE void
+init_bufq(unsigned int desc_offset,
+ unsigned int elems_offset,
+ ia_css_queue_t *handle)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int q_base_addr;
+ ia_css_queue_remote_t remoteq;
+
+ fw = &sh_css_sp_fw;
+ q_base_addr = fw->info.sp.host_sp_queue;
+
+ /* Setup queue location as SP and proc id as SP0_ID */
+ remoteq.location = IA_CSS_QUEUE_LOC_SP;
+ remoteq.proc_id = SP0_ID;
+ remoteq.cb_desc_addr = q_base_addr + desc_offset;
+ remoteq.cb_elems_addr = q_base_addr + elems_offset;
+ /* Initialize the queue instance and obtain handle */
+ ia_css_queue_remote_init(handle, &remoteq);
+}
+
+void ia_css_bufq_init(void)
+{
+ int i, j;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Setup all the local queue descriptors for Host2SP Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_desc[i][j]),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_elems[i][j]),
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+
+ /* Setup all the local queue descriptors for SP2Host Buffer Queues */
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ init_bufq(offsetof(struct host_sp_queues, sp2host_buffer_queues_desc[i]),
+ offsetof(struct host_sp_queues, sp2host_buffer_queues_elems[i]),
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+
+ /* Host2SP event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_elems),
+ &css_queues.host2sp_psys_event_queue_handle);
+
+ /* SP2Host event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems),
+ &css_queues.sp2host_psys_event_queue_handle);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /* Host2SP ISYS event queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_elems),
+ &css_queues.host2sp_isys_event_queue_handle);
+
+ /* SP2Host ISYS event queue*/
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_elems),
+ &css_queues.sp2host_isys_event_queue_handle);
+
+ /* Host2SP tagger command queue */
+ init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc),
+ (uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems),
+ &css_queues.host2sp_tag_cmd_queue_handle);
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+enum ia_css_err ia_css_bufq_enqueue_buffer(
+ int thread_index,
+ int queue_id,
+ uint32_t item)
+{
+ enum ia_css_err return_err = IA_CSS_SUCCESS;
+ ia_css_queue_t *q;
+ int error;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((thread_index >= SH_CSS_MAX_SP_THREADS) || (thread_index < 0) ||
+ (queue_id == SH_CSS_INVALID_QUEUE_ID))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Get the queue for communication */
+ q = bufq_get_qhandle(sh_css_host2sp_buffer_queue,
+ queue_id,
+ thread_index);
+ if (q != NULL) {
+ error = ia_css_queue_enqueue(q, item);
+ return_err = ia_css_convert_errno(error);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_dequeue_buffer(
+ int queue_id,
+ uint32_t *item)
+{
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
+ if ((item == NULL) ||
+ (queue_id <= SH_CSS_INVALID_QUEUE_ID) ||
+ (queue_id >= SH_CSS_MAX_NUM_QUEUES)
+ )
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_buffer_queue,
+ queue_id,
+ -1);
+ if (q != NULL) {
+ error = ia_css_queue_dequeue(q, item);
+ return_err = ia_css_convert_errno(error);
+ } else {
+ IA_CSS_ERROR("queue is not initialized");
+ return_err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_enqueue_psys_event(
+ uint8_t evt_id,
+ uint8_t evt_payload_0,
+ uint8_t evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1);
+ if (NULL == q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ error = ia_css_eventq_send(q,
+ evt_id, evt_payload_0, evt_payload_1, evt_payload_2);
+
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+}
+
+enum ia_css_err ia_css_bufq_dequeue_psys_event(
+ uint8_t item[BUFQ_EVENT_SIZE])
+{
+ enum ia_css_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (item == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1);
+ if (NULL == q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_eventq_recv(q, item);
+
+ return ia_css_convert_errno(error);
+
+}
+
+enum ia_css_err ia_css_bufq_dequeue_isys_event(
+ uint8_t item[BUFQ_EVENT_SIZE])
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ /* No ENTER/LEAVE in this function since this is polled
+ * by some test apps. Enablign logging here floods the log
+ * files which may cause timeouts. */
+ if (item == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1);
+ if (q == NULL) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_eventq_recv(q, item);
+ return ia_css_convert_errno(error);
+#else
+ (void)item;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("event_id=%d", evt_id);
+ q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1);
+ if (q == NULL) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ error = ia_css_eventq_send(q, evt_id, 0, 0, 0);
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+#else
+ (void)evt_id;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_enqueue_tag_cmd(
+ uint32_t item)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ enum ia_css_err return_err;
+ int error = 0;
+ ia_css_queue_t *q;
+
+ IA_CSS_ENTER_PRIVATE("item=%d", item);
+ q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1);
+ if (NULL == q) {
+ IA_CSS_ERROR("queue is not initialized");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ error = ia_css_queue_enqueue(q, item);
+
+ return_err = ia_css_convert_errno(error);
+ IA_CSS_LEAVE_ERR_PRIVATE(return_err);
+ return return_err;
+#else
+ (void)item;
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+#endif
+}
+
+enum ia_css_err ia_css_bufq_deinit(void)
+{
+ return IA_CSS_SUCCESS;
+}
+
+static void bufq_dump_queue_info(const char *prefix, ia_css_queue_t *qhandle)
+{
+ uint32_t free = 0, used = 0;
+ assert(prefix != NULL && qhandle != NULL);
+ ia_css_queue_get_used_space(qhandle, &used);
+ ia_css_queue_get_free_space(qhandle, &free);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: used=%u free=%u\n",
+ prefix, used, free);
+
+}
+
+void ia_css_bufq_dump_queue_info(void)
+{
+ int i, j;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Queue Information:\n");
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "host2sp_buffer_queue[%u][%u]", i, j);
+ bufq_dump_queue_info(prefix,
+ &css_queues.host2sp_buffer_queue_handles[i][j]);
+ }
+ }
+
+ for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
+ snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
+ "sp2host_buffer_queue[%u]", i);
+ bufq_dump_queue_info(prefix,
+ &css_queues.sp2host_buffer_queue_handles[i]);
+ }
+ bufq_dump_queue_info("host2sp_psys_event",
+ &css_queues.host2sp_psys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_psys_event",
+ &css_queues.sp2host_psys_event_queue_handle);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ bufq_dump_queue_info("host2sp_isys_event",
+ &css_queues.host2sp_isys_event_queue_handle);
+ bufq_dump_queue_info("sp2host_isys_event",
+ &css_queues.sp2host_isys_event_queue_handle);
+ bufq_dump_queue_info("host2sp_tag_cmd",
+ &css_queues.host2sp_tag_cmd_queue_handle);
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
new file mode 100644
index 000000000000..be7df3a30c21
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -0,0 +1,508 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _IA_CSS_DEBUG_H_
+#define _IA_CSS_DEBUG_H_
+
+/*! \file */
+
+#include <type_support.h>
+#include <stdarg.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_metadata.h"
+#include "sh_css_internal.h"
+#ifdef ISP2401
+#if defined(IS_ISP_2500_SYSTEM)
+#include "ia_css_pipe.h"
+#endif
+#endif
+
+/* available levels */
+/*! Level for tracing errors */
+#define IA_CSS_DEBUG_ERROR 1
+/*! Level for tracing warnings */
+#define IA_CSS_DEBUG_WARNING 3
+/*! Level for tracing debug messages */
+#define IA_CSS_DEBUG_VERBOSE 5
+/*! Level for tracing trace messages a.o. ia_css public function calls */
+#define IA_CSS_DEBUG_TRACE 6
+/*! Level for tracing trace messages a.o. ia_css private function calls */
+#define IA_CSS_DEBUG_TRACE_PRIVATE 7
+/*! Level for tracing parameter messages e.g. in and out params of functions */
+#define IA_CSS_DEBUG_PARAM 8
+/*! Level for tracing info messages */
+#define IA_CSS_DEBUG_INFO 9
+/* Global variable which controls the verbosity levels of the debug tracing */
+extern unsigned int ia_css_debug_trace_level;
+
+/*! @brief Enum defining the different isp parameters to dump.
+ * Values can be combined to dump a combination of sets.
+ */
+enum ia_css_debug_enable_param_dump {
+ IA_CSS_DEBUG_DUMP_FPN = 1 << 0, /**< FPN table */
+ IA_CSS_DEBUG_DUMP_OB = 1 << 1, /**< OB table */
+ IA_CSS_DEBUG_DUMP_SC = 1 << 2, /**< Shading table */
+ IA_CSS_DEBUG_DUMP_WB = 1 << 3, /**< White balance */
+ IA_CSS_DEBUG_DUMP_DP = 1 << 4, /**< Defect Pixel */
+ IA_CSS_DEBUG_DUMP_BNR = 1 << 5, /**< Bayer Noise Reductions */
+ IA_CSS_DEBUG_DUMP_S3A = 1 << 6, /**< 3A Statistics */
+ IA_CSS_DEBUG_DUMP_DE = 1 << 7, /**< De Mosaicing */
+ IA_CSS_DEBUG_DUMP_YNR = 1 << 8, /**< Luma Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CSC = 1 << 9, /**< Color Space Conversion */
+ IA_CSS_DEBUG_DUMP_GC = 1 << 10, /**< Gamma Correction */
+ IA_CSS_DEBUG_DUMP_TNR = 1 << 11, /**< Temporal Noise Reduction */
+ IA_CSS_DEBUG_DUMP_ANR = 1 << 12, /**< Advanced Noise Reduction */
+ IA_CSS_DEBUG_DUMP_CE = 1 << 13, /**< Chroma Enhancement */
+ IA_CSS_DEBUG_DUMP_ALL = 1 << 14 /**< Dump all device parameters */
+};
+
+#define IA_CSS_ERROR(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, \
+ "%s() %d: error: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define IA_CSS_WARNING(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_WARNING, \
+ "%s() %d: warning: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+/* Logging macros for public functions (API functions) */
+#define IA_CSS_ENTER(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an enum ia_css_err return value */
+#define IA_CSS_LEAVE_ERR(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for logging other than enter/leave.
+ * Note that this macro always uses the PRIVATE logging level.
+ */
+#define IA_CSS_LOG(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Logging macros for non-API functions. These have a lower trace level */
+#define IA_CSS_ENTER_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: " fmt "\n", __func__, ##__VA_ARGS__)
+
+#define IA_CSS_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/* Shorthand for returning an enum ia_css_err return value */
+#define IA_CSS_LEAVE_ERR_PRIVATE(__err) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s() %d: leave: return_err=%d\n", __func__, __LINE__, __err)
+
+/* Use this macro for small functions that do not call other functions. */
+#define IA_CSS_ENTER_LEAVE_PRIVATE(fmt, ...) \
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, \
+ "%s(): enter: leave: " fmt "\n", __func__, ##__VA_ARGS__)
+
+/*! @brief Function for tracing to the provided printf function in the
+ * environment.
+ * @param[in] level Level of the message.
+ * @param[in] fmt printf like format string
+ * @param[in] args arguments for the format string
+ */
+STORAGE_CLASS_INLINE void
+ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
+{
+ if (ia_css_debug_trace_level >= level)
+ sh_css_vprint(fmt, args);
+}
+
+extern void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...);
+
+/*! @brief Dump sp thread's stack contents
+ * SP thread's stack contents are set to 0xcafecafe. This function dumps the
+ * stack to inspect if the stack's boundaries are compromised.
+ * @return None
+ */
+void ia_css_debug_dump_sp_stack_info(void);
+
+/*! @brief Function to set the global dtrace verbosity level.
+ * @param[in] trace_level Maximum level of the messages to be traced.
+ * @return None
+ */
+void ia_css_debug_set_dtrace_level(
+ const unsigned int trace_level);
+
+/*! @brief Function to get the global dtrace verbosity level.
+ * @return global dtrace verbosity level
+ */
+unsigned int ia_css_debug_get_dtrace_level(void);
+
+/*! @brief Dump input formatter state.
+ * Dumps the input formatter state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_if_state(void);
+
+/*! @brief Dump isp hardware state.
+ * Dumps the isp hardware state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_state(void);
+
+/*! @brief Dump sp hardware state.
+ * Dumps the sp hardware state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_sp_state(void);
+
+#ifdef ISP2401
+/*! @brief Dump GAC hardware state.
+ * Dumps the GAC ACB hardware registers. may be useful for
+ * detecting a GAC which got hang.
+ * @return None
+ */
+void ia_css_debug_dump_gac_state(void);
+
+#endif
+/*! @brief Dump dma controller state.
+ * Dumps the dma controller state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_state(void);
+
+/*! @brief Dump internal sp software state.
+ * Dumps the sp software state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_sp_sw_debug_info(void);
+
+/*! @brief Dump all related hardware state to the trace output
+ * @param[in] context String to identify context in output.
+ * @return None
+ */
+void ia_css_debug_dump_debug_info(
+ const char *context);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+void ia_css_debug_print_sp_debug_state(
+ const struct sh_css_sp_debug_state *state);
+#endif
+
+/*! @brief Dump all related binary info data
+ * @param[in] bi Binary info struct.
+ * @return None
+ */
+void ia_css_debug_binary_print(
+ const struct ia_css_binary *bi);
+
+void ia_css_debug_sp_dump_mipi_fifo_high_water(void);
+
+/*! @brief Dump isp gdc fifo state to the trace output
+ * Dumps the isp gdc fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_gdc_fifo_state(void);
+
+/*! @brief Dump dma isp fifo state
+ * Dumps the dma isp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_isp_fifo_state(void);
+
+/*! @brief Dump dma sp fifo state
+ * Dumps the dma sp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_dma_sp_fifo_state(void);
+
+/*! \brief Dump pif A isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_pif_a_isp_fifo_state(void);
+
+/*! \brief Dump pif B isp fifo state
+ * Dumps the primary input formatter state to tracing output.
+ * \return None
+ */
+void ia_css_debug_dump_pif_b_isp_fifo_state(void);
+
+/*! @brief Dump stream-to-memory sp fifo state
+ * Dumps the stream-to-memory block state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_str2mem_sp_fifo_state(void);
+
+/*! @brief Dump isp sp fifo state
+ * Dumps the isp sp fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isp_sp_fifo_state(void);
+
+/*! @brief Dump all fifo state info to the output
+ * Dumps all fifo state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_all_fifo_state(void);
+
+/*! @brief Dump the rx state to the output
+ * Dumps the rx state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_rx_state(void);
+
+/*! @brief Dump the input system state to the output
+ * Dumps the input system state to tracing output.
+ * @return None
+ */
+void ia_css_debug_dump_isys_state(void);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to tracing output.
+ * @param[in] frame pointer to struct ia_css_frame
+ * @param[in] descr description output along with the frame info
+ * @return None
+ */
+void ia_css_debug_frame_print(
+ const struct ia_css_frame *frame,
+ const char *descr);
+
+/*! @brief Function to enable sp sleep mode.
+ * Function that enables sp sleep mode
+ * @param[in] mode indicates when to put sp to sleep
+ * @return None
+ */
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode);
+
+/*! @brief Function to wake up sp when in sleep mode.
+ * After sp has been put to sleep, use this function to let it continue
+ * to run again.
+ * @return None
+ */
+void ia_css_debug_wake_up_sp(void);
+
+/*! @brief Function to dump isp parameters.
+ * Dump isp parameters to tracing output
+ * @param[in] stream pointer to ia_css_stream struct
+ * @param[in] enable flag indicating which parameters to dump.
+ * @return None
+ */
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream, unsigned int enable);
+
+/*! @brief Function to dump some sp performance counters.
+ * Dump sp performance counters, currently input system errors.
+ * @return None
+ */
+void ia_css_debug_dump_perf_counters(void);
+
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+void sh_css_dump_thread_wait_info(void);
+void sh_css_dump_pipe_stage_info(void);
+void sh_css_dump_pipe_stripe_info(void);
+#endif
+
+void ia_css_debug_dump_isp_binary(void);
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced);
+
+/*! @brief Dump the resolution info to the trace output
+ * Dumps the resolution info to the trace output.
+ * @param[in] res pointer to struct ia_css_resolution
+ * @param[in] label description of resolution output
+ * @return None
+ */
+void ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label);
+
+/*! @brief Dump the frame info to the trace output
+ * Dumps the frame info to the trace output.
+ * @param[in] info pointer to struct ia_css_frame_info
+ * @param[in] label description of frame_info output
+ * @return None
+ */
+void ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label);
+
+/*! @brief Dump the capture config info to the trace output
+ * Dumps the capture config info to the trace output.
+ * @param[in] config pointer to struct ia_css_capture_config
+ * @return None
+ */
+void ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config);
+
+/*! @brief Dump the pipe extra config info to the trace output
+ * Dumps the pipe extra config info to the trace output.
+ * @param[in] extra_config pointer to struct ia_css_pipe_extra_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config);
+
+/*! @brief Dump the pipe config info to the trace output
+ * Dumps the pipe config info to the trace output.
+ * @param[in] config pointer to struct ia_css_pipe_config
+ * @return None
+ */
+void ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config);
+
+
+/*! @brief Dump the stream config source info to the trace output
+ * Dumps the stream config source info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @return None
+ */
+void ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config);
+
+/*! @brief Dump the mipi buffer config info to the trace output
+ * Dumps the mipi buffer config info to the trace output.
+ * @param[in] config pointer to struct ia_css_mipi_buffer_config
+ * @return None
+ */
+void ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config);
+
+/*! @brief Dump the metadata config info to the trace output
+ * Dumps the metadata config info to the trace output.
+ * @param[in] config pointer to struct ia_css_metadata_config
+ * @return None
+ */
+void ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config);
+
+/*! @brief Dump the stream config info to the trace output
+ * Dumps the stream config info to the trace output.
+ * @param[in] config pointer to struct ia_css_stream_config
+ * @param[in] num_pipes number of pipes for the stream
+ * @return None
+ */
+void ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes);
+
+/*! @brief Dump the state of the SP tagger
+ * Dumps the internal state of the SP tagger
+ * @return None
+ */
+void ia_css_debug_tagger_state(void);
+
+/**
+ * @brief Initialize the debug mode.
+ *
+ * WARNING:
+ * This API should be called ONLY once in the debug mode.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_init(void);
+
+/**
+ * @brief Disable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * This is part of the DMA API -> dma.h
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_disable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+/**
+ * @brief Enable the DMA channel.
+ *
+ * @param[in] dma_ID The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool ia_css_debug_mode_enable_dma_channel(
+ int dma_ID,
+ int channel_id,
+ int request_type);
+
+/**
+ * @brief Dump tracer data.
+ * [Currently support is only for SKC]
+ *
+ * @return
+ * - none.
+ */
+void ia_css_debug_dump_trace(void);
+
+#ifdef ISP2401
+/**
+ * @brief Program counter dumping (in loop)
+ *
+ * @param[in] id The ID of the SP
+ * @param[in] num_of_dumps The number of dumps
+ *
+ * @return
+ * - none
+ */
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps);
+
+#if defined(IS_ISP_2500_SYSTEM)
+/*! @brief Dump all states for ISP hang case.
+ * Dumps the ISP previous and current configurations
+ * GACs status, SP0/1 statuses.
+ *
+ * @param[in] pipe The current pipe
+ *
+ * @return None
+ */
+void ia_css_debug_dump_hang_status(
+ struct ia_css_pipe *pipe);
+
+/*! @brief External command handler
+ * External command handler
+ *
+ * @return None
+ */
+void ia_css_debug_ext_command_handler(void);
+
+#endif
+#endif
+
+#endif /* _IA_CSS_DEBUG_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h
new file mode 100644
index 000000000000..88d025807201
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_internal.h
@@ -0,0 +1,31 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+/* TO DO: Move debug related code from ia_css_internal.h in */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h
new file mode 100644
index 000000000000..72ac0e32ebf7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug_pipe.h
@@ -0,0 +1,84 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_DEBUG_PIPE_H_
+#define _IA_CSS_DEBUG_PIPE_H_
+
+/*! \file */
+
+#include <ia_css_frame_public.h>
+#include <ia_css_stream_public.h>
+#include "ia_css_pipeline.h"
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+extern void ia_css_debug_pipe_graph_dump_prologue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ *
+ * @return None
+ */
+extern void ia_css_debug_pipe_graph_dump_epilogue(void);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stage Pipeline stage.
+ * @param[in] id Pipe id.
+ *
+ * @return None
+ */
+extern void ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id);
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] out_frame Output frame of SP raw copy.
+ *
+ * @return None
+ */
+extern void ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame);
+
+
+/**
+ * @brief Internal debug support for constructing a pipe graph.
+ * @param[in] stream_config info about sensor and input formatter.
+ *
+ * @return None
+ */
+extern void ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config);
+
+#endif /* _IA_CSS_DEBUG_PIPE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
new file mode 100644
index 000000000000..030810bd0878
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
@@ -0,0 +1,3611 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "debug.h"
+#include "memory_access.h"
+
+#ifndef __INLINE_INPUT_SYSTEM__
+#define __INLINE_INPUT_SYSTEM__
+#endif
+#ifndef __INLINE_IBUF_CTRL__
+#define __INLINE_IBUF_CTRL__
+#endif
+#ifndef __INLINE_CSI_RX__
+#define __INLINE_CSI_RX__
+#endif
+#ifndef __INLINE_PIXELGEN__
+#define __INLINE_PIXELGEN__
+#endif
+#ifndef __INLINE_STREAM2MMIO__
+#define __INLINE_STREAM2MMIO__
+#endif
+
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_irq.h"
+#include "ia_css_stream.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "ia_css_bufq.h"
+#ifdef ISP2401
+#include "ia_css_queue.h"
+#endif
+
+#include "ia_css_isp_params.h"
+
+#include "system_local.h"
+#include "assert_support.h"
+#include "print_support.h"
+#include "string_support.h"
+#ifdef ISP2401
+#include "ia_css_system_ctrl.h"
+#endif
+
+#include "fifo_monitor.h"
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+#include "dma.h"
+#include "irq.h"
+#include "gp_device.h"
+#include "sp.h"
+#include "isp.h"
+#include "type_support.h"
+#include "math_support.h" /* CEIL_DIV */
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include "input_system.h" /* input_formatter_reg_load */
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#include "ia_css_tagger_common.h"
+#endif
+
+#include "sh_css_internal.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+#include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */
+
+#include "css_trace.h" /* tracer */
+
+#include "device_access.h" /* for ia_css_device_load_uint32 */
+
+/* Include all kernel host interfaces for ISP1 */
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+
+/* Global variable to store the dtrace verbosity level */
+unsigned int ia_css_debug_trace_level = IA_CSS_DEBUG_WARNING;
+
+/* Assumes that IA_CSS_STREAM_FORMAT_BINARY_8 is last */
+#define N_IA_CSS_STREAM_FORMAT (IA_CSS_STREAM_FORMAT_BINARY_8+1)
+
+#define DPG_START "ia_css_debug_pipe_graph_dump_start "
+#define DPG_END " ia_css_debug_pipe_graph_dump_end\n"
+
+#define ENABLE_LINE_MAX_LENGTH (25)
+
+#ifdef ISP2401
+#define DBG_EXT_CMD_TRACE_PNTS_DUMP (1 << 8)
+#define DBG_EXT_CMD_PUB_CFG_DUMP (1 << 9)
+#define DBG_EXT_CMD_GAC_REG_DUMP (1 << 10)
+#define DBG_EXT_CMD_GAC_ACB_REG_DUMP (1 << 11)
+#define DBG_EXT_CMD_FIFO_DUMP (1 << 12)
+#define DBG_EXT_CMD_QUEUE_DUMP (1 << 13)
+#define DBG_EXT_CMD_DMA_DUMP (1 << 14)
+#define DBG_EXT_CMD_MASK 0xAB0000CD
+
+#endif
+/*
+ * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads
+ * future rework should fix this and remove the define MAX_THREAD_NUM
+ */
+#define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS)
+
+static struct pipe_graph_class {
+ bool do_init;
+ int height;
+ int width;
+ int eff_height;
+ int eff_width;
+ enum ia_css_stream_format stream_format;
+} pg_inst = {true, 0, 0, 0, 0, N_IA_CSS_STREAM_FORMAT};
+
+static const char * const queue_id_to_str[] = {
+ /* [SH_CSS_QUEUE_A_ID] =*/ "queue_A",
+ /* [SH_CSS_QUEUE_B_ID] =*/ "queue_B",
+ /* [SH_CSS_QUEUE_C_ID] =*/ "queue_C",
+ /* [SH_CSS_QUEUE_D_ID] =*/ "queue_D",
+ /* [SH_CSS_QUEUE_E_ID] =*/ "queue_E",
+ /* [SH_CSS_QUEUE_F_ID] =*/ "queue_F",
+ /* [SH_CSS_QUEUE_G_ID] =*/ "queue_G",
+ /* [SH_CSS_QUEUE_H_ID] =*/ "queue_H"
+};
+
+static const char * const pipe_id_to_str[] = {
+ /* [IA_CSS_PIPE_ID_PREVIEW] =*/ "preview",
+ /* [IA_CSS_PIPE_ID_COPY] =*/ "copy",
+ /* [IA_CSS_PIPE_ID_VIDEO] =*/ "video",
+ /* [IA_CSS_PIPE_ID_CAPTURE] =*/ "capture",
+ /* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp",
+ /* [IA_CSS_PIPE_ID_ACC] =*/ "accelerator"
+};
+
+static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME+10];
+static char ring_buffer[200];
+
+void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ ia_css_debug_vdtrace(level, fmt, ap);
+ va_end(ap);
+}
+
+#if !defined(HRT_UNSCHED)
+static void debug_dump_long_array_formatted(
+ const sp_ID_t sp_id,
+ hrt_address stack_sp_addr,
+ unsigned stack_size)
+{
+ unsigned int i;
+ uint32_t val;
+ uint32_t addr = (uint32_t) stack_sp_addr;
+ uint32_t stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t));
+
+ /* When size is not multiple of four, last word is only relevant for
+ * remaining bytes */
+ for (i = 0; i < stack_size_words; i++) {
+ val = sp_dmem_load_uint32(sp_id, (hrt_address)addr);
+ if ((i%8) == 0)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val);
+ addr += sizeof(uint32_t);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
+}
+
+static void debug_dump_sp_stack_info(
+ const sp_ID_t sp_id)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_threads_stack;
+ unsigned int HIVE_ADDR_sp_threads_stack_size;
+ uint32_t stack_sizes[MAX_THREAD_NUM];
+ uint32_t stack_sp_addr[MAX_THREAD_NUM];
+ unsigned int i;
+
+ fw = &sh_css_sp_fw;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n",
+ fw->info.sp.threads_stack,
+ fw->info.sp.threads_stack_size);
+
+ HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack;
+ HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size;
+
+ if (fw->info.sp.threads_stack == 0 ||
+ fw->info.sp.threads_stack_size == 0)
+ return;
+
+ (void) HIVE_ADDR_sp_threads_stack;
+ (void) HIVE_ADDR_sp_threads_stack_size;
+
+ sp_dmem_load(sp_id,
+ (unsigned int)sp_address_of(sp_threads_stack),
+ &stack_sp_addr, sizeof(stack_sp_addr));
+ sp_dmem_load(sp_id,
+ (unsigned int)sp_address_of(sp_threads_stack_size),
+ &stack_sizes, sizeof(stack_sizes));
+
+ for (i = 0 ; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "thread: %u stack_addr: 0x%08x stack_size: %u\n",
+ i, stack_sp_addr[i], stack_sizes[i]);
+ debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i],
+ stack_sizes[i]);
+ }
+}
+
+void ia_css_debug_dump_sp_stack_info(void)
+{
+ debug_dump_sp_stack_info(SP0_ID);
+}
+#else
+/* Empty def for crun */
+void ia_css_debug_dump_sp_stack_info(void)
+{
+}
+#endif /* #if !HRT_UNSCHED */
+
+
+void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
+{
+ ia_css_debug_trace_level = trace_level;
+ return;
+}
+
+unsigned int ia_css_debug_get_dtrace_level(void)
+{
+ return ia_css_debug_trace_level;
+}
+
+static const char *debug_stream_format2str(const enum ia_css_stream_format stream_format)
+{
+ switch (stream_format) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ return "yuv420-8-legacy";
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ return "yuv420-8";
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ return "yuv420-10";
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ return "yuv420-16";
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ return "yuv422-8";
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ return "yuv422-10";
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ return "yuv422-16";
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ return "rgb444";
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ return "rgb555";
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ return "rgb565";
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ return "rgb666";
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ return "rgb888";
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ return "raw6";
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ return "raw7";
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ return "raw8";
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ return "raw10";
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ return "raw12";
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ return "raw14";
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ return "raw16";
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ return "binary8";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT1:
+ return "generic-short1";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT2:
+ return "generic-short2";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT3:
+ return "generic-short3";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT4:
+ return "generic-short4";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT5:
+ return "generic-short5";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT6:
+ return "generic-short6";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT7:
+ return "generic-short7";
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT8:
+ return "generic-short8";
+ case IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT:
+ return "yuv420-8-shift";
+ case IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT:
+ return "yuv420-10-shift";
+ case IA_CSS_STREAM_FORMAT_EMBEDDED:
+ return "embedded-8";
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ return "user-def-8-type-1";
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ return "user-def-8-type-2";
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ return "user-def-8-type-3";
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ return "user-def-8-type-4";
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ return "user-def-8-type-5";
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ return "user-def-8-type-6";
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ return "user-def-8-type-7";
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ return "user-def-8-type-8";
+
+ default:
+ assert(!"Unknown stream format");
+ return "unknown-stream-format";
+ }
+};
+
+static const char *debug_frame_format2str(const enum ia_css_frame_format frame_format)
+{
+ switch (frame_format) {
+
+ case IA_CSS_FRAME_FORMAT_NV11:
+ return "NV11";
+ case IA_CSS_FRAME_FORMAT_NV12:
+ return "NV12";
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ return "NV12_16";
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ return "NV12_TILEY";
+ case IA_CSS_FRAME_FORMAT_NV16:
+ return "NV16";
+ case IA_CSS_FRAME_FORMAT_NV21:
+ return "NV21";
+ case IA_CSS_FRAME_FORMAT_NV61:
+ return "NV61";
+ case IA_CSS_FRAME_FORMAT_YV12:
+ return "YV12";
+ case IA_CSS_FRAME_FORMAT_YV16:
+ return "YV16";
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ return "YUV420";
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ return "YUV420_16";
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ return "YUV422";
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ return "YUV422_16";
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ return "UYVY";
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ return "YUYV";
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ return "YUV444";
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ return "YUV_LINE";
+ case IA_CSS_FRAME_FORMAT_RAW:
+ return "RAW";
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ return "RGB565";
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ return "PLANAR_RGB888";
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ return "RGBA888";
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ return "QPLANE6";
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ return "BINARY_8";
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ return "MIPI";
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ return "RAW_PACKED";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ return "CSI_MIPI_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ return "CSI_MIPI_LEGACY_YUV420_8";
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ return "CSI_MIPI_YUV420_10";
+
+ default:
+ assert(!"Unknown frame format");
+ return "unknown-frame-format";
+ }
+}
+
+static void debug_print_sp_state(const sp_state_t *state, const char *cell)
+{
+ assert(cell != NULL);
+ assert(state != NULL);
+
+ ia_css_debug_dtrace(2, "%s state:\n", cell);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
+ state->status_register);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
+ state->is_sleeping);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
+ state->is_stalling);
+ return;
+}
+
+static void debug_print_isp_state(const isp_state_t *state, const char *cell)
+{
+ assert(state != NULL);
+ assert(cell != NULL);
+
+ ia_css_debug_dtrace(2, "%s state:\n", cell);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
+ ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
+ state->status_register);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
+ state->is_sleeping);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
+ state->is_stalling);
+ return;
+}
+
+void ia_css_debug_dump_isp_state(void)
+{
+ isp_state_t state;
+ isp_stall_t stall;
+
+ isp_get_state(ISP0_ID, &state, &stall);
+
+ debug_print_isp_state(&state, "ISP");
+
+ if (state.is_stalling) {
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[0] if_prim_a_FIFO stalled", stall.fifo0);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "[1] if_prim_b_FIFO stalled", stall.fifo1);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled",
+ stall.fifo2);
+#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA) || defined(IS_ISP_2500_SYSTEM)
+
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled",
+ stall.fifo3);
+#if !defined(IS_ISP_2500_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled",
+ stall.fifo4);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled",
+ stall.fifo5);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled",
+ stall.fifo6);
+#else
+#error "ia_css_debug: ISP cell must be one of {2400_MAMOIADA,, 2401_MAMOIADA, 2500_SKYCAM}"
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "status & control stalled",
+ stall.stat_ctrl);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
+ stall.dmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled",
+ stall.vmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled",
+ stall.vamem1);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled",
+ stall.vamem2);
+#if defined(HAS_ISP_2400_MAMOIADA) || defined(HAS_ISP_2401_MAMOIADA)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled",
+ stall.vamem3);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled",
+ stall.hmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled",
+ stall.pmem);
+#endif
+ }
+ return;
+}
+
+void ia_css_debug_dump_sp_state(void)
+{
+ sp_state_t state;
+ sp_stall_t stall;
+ sp_get_state(SP0_ID, &state, &stall);
+ debug_print_sp_state(&state, "SP");
+ if (state.is_stalling) {
+#if defined(HAS_SP_2400) || defined(IS_ISP_2500_SYSTEM)
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled",
+ stall.fifo0);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled",
+ stall.fifo1);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "str_to_mem_FIFO stalled", stall.fifo2);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled",
+ stall.fifo3);
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_a_FIFO stalled", stall.fifo4);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled",
+ stall.fifo5);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled",
+ stall.fifo6);
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "if_prim_b_FIFO stalled", stall.fifo7);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled",
+ stall.fifo8);
+#if !defined(IS_ISP_2500_SYSTEM)
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled",
+ stall.fifo9);
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled",
+ stall.fifoa);
+#else
+#error "ia_css_debug: SP cell must be one of {SP2400, SP2500}"
+#endif
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
+ stall.dmem);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "control master stalled",
+ stall.control_master);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n",
+ "i-cache master stalled",
+ stall.icache_master);
+ }
+ ia_css_debug_dump_trace();
+ return;
+}
+
+static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
+ const char *descr)
+{
+ assert(state != NULL);
+ assert(descr != NULL);
+
+ ia_css_debug_dtrace(2, "FIFO channel: %s\n", descr);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "source valid",
+ state->src_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo accept",
+ state->fifo_accept);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo valid",
+ state->fifo_valid);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "sink accept",
+ state->sink_accept);
+ return;
+}
+
+#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_debug_dump_pif_a_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF0_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF0, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF A to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF A");
+}
+
+void ia_css_debug_dump_pif_b_isp_fifo_state(void)
+{
+ fifo_channel_state_t pif_to_isp, isp_to_pif;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_IF1_TO_ISP0, &pif_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_IF1, &isp_to_pif);
+ debug_print_fifo_channel_state(&pif_to_isp, "Primary IF B to ISP");
+ debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF B");
+}
+
+void ia_css_debug_dump_str2mem_sp_fifo_state(void)
+{
+ fifo_channel_state_t s2m_to_sp, sp_to_s2m;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_STREAM2MEM0_TO_SP0, &s2m_to_sp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_STREAM2MEM0, &sp_to_s2m);
+ debug_print_fifo_channel_state(&s2m_to_sp, "Stream-to-memory to SP");
+ debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
+}
+
+static void debug_print_if_state(input_formatter_state_t *state, const char *id)
+{
+ unsigned int val;
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_1)
+ const char *st_reset = (state->reset ? "Active" : "Not active");
+#endif
+ const char *st_vsync_active_low =
+ (state->vsync_active_low ? "low" : "high");
+ const char *st_hsync_active_low =
+ (state->hsync_active_low ? "low" : "high");
+
+ const char *fsm_sync_status_str = "unknown";
+ const char *fsm_crop_status_str = "unknown";
+ const char *fsm_padding_status_str = "unknown";
+
+ int st_stline = state->start_line;
+ int st_stcol = state->start_column;
+ int st_crpht = state->cropped_height;
+ int st_crpwd = state->cropped_width;
+ int st_verdcm = state->ver_decimation;
+ int st_hordcm = state->hor_decimation;
+ int st_ver_deinterleaving = state->ver_deinterleaving;
+ int st_hor_deinterleaving = state->hor_deinterleaving;
+ int st_leftpd = state->left_padding;
+ int st_eoloff = state->eol_offset;
+ int st_vmstartaddr = state->vmem_start_address;
+ int st_vmendaddr = state->vmem_end_address;
+ int st_vmincr = state->vmem_increment;
+ int st_yuv420 = state->is_yuv420;
+ int st_allow_fifo_overflow = state->allow_fifo_overflow;
+ int st_block_fifo_when_no_req = state->block_fifo_when_no_req;
+
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id);
+
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_1)
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "Software reset", st_reset);
+#endif
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Ver deinterleaving", st_ver_deinterleaving);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Hor deinterleaving", st_hor_deinterleaving);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "EOL offset (bytes)", st_eoloff);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM start address", st_vmstartaddr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM end address", st_vmendaddr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
+ "VMEM increment", st_vmincr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420);
+ ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
+ "Vsync", st_vsync_active_low);
+ ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
+ "Hsync", st_hsync_active_low);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Allow FIFO overflow", st_allow_fifo_overflow);
+/* Flag that tells whether the IF gives backpressure on frames */
+/*
+ * FYI, this is only on the frame request (indicate), when the IF has
+ * synch'd on a frame it will always give back pressure
+ */
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Block when no request", st_block_fifo_when_no_req);
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_2)
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "IF_BLOCKED_FIFO_NO_REQ_ADDRESS",
+ input_formatter_reg_load(INPUT_FORMATTER0_ID,
+ HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS)
+ );
+
+ ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg0",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg0));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg1",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg1));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg2",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg2));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg3",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg3));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg4",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg4));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg5",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg5));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg6",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg6));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_lut_reg7",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_lut_reg7));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_input_switch_fsync_lut",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_input_switch_fsync_lut));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_srst",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_srst));
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "_REG_GP_IFMT_slv_reg_srst",
+ gp_device_reg_load(GP_DEVICE0_ID,
+ _REG_GP_IFMT_slv_reg_srst));
+#endif
+
+ ia_css_debug_dtrace(2, "\tFSM Status:\n");
+
+ val = state->fsm_sync_status;
+
+ if (val > 7)
+ fsm_sync_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_sync_status_str = "idle";
+ break;
+ case 1:
+ fsm_sync_status_str = "request frame";
+ break;
+ case 2:
+ fsm_sync_status_str = "request lines";
+ break;
+ case 3:
+ fsm_sync_status_str = "request vectors";
+ break;
+ case 4:
+ fsm_sync_status_str = "send acknowledge";
+ break;
+ default:
+ fsm_sync_status_str = "unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
+ "FSM Synchronization Status", val,
+ fsm_sync_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Synchronization Counter",
+ state->fsm_sync_counter);
+
+ val = state->fsm_crop_status;
+
+ if (val > 7)
+ fsm_crop_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_crop_status_str = "idle";
+ break;
+ case 1:
+ fsm_crop_status_str = "wait line";
+ break;
+ case 2:
+ fsm_crop_status_str = "crop line";
+ break;
+ case 3:
+ fsm_crop_status_str = "crop pixel";
+ break;
+ case 4:
+ fsm_crop_status_str = "pass pixel";
+ break;
+ case 5:
+ fsm_crop_status_str = "pass line";
+ break;
+ case 6:
+ fsm_crop_status_str = "lost line";
+ break;
+ default:
+ fsm_crop_status_str = "unknown";
+ break;
+ }
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
+ "FSM Crop Status", val, fsm_crop_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Crop Line Counter",
+ state->fsm_crop_line_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Crop Pixel Counter",
+ state->fsm_crop_pixel_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Deinterleaving idx buffer",
+ state->fsm_deinterleaving_index);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM H decimation counter",
+ state->fsm_dec_h_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM V decimation counter",
+ state->fsm_dec_v_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM block V decimation counter",
+ state->fsm_dec_block_v_counter);
+
+ val = state->fsm_padding_status;
+
+ if (val > 7)
+ fsm_padding_status_str = "ERROR";
+
+ switch (val & 0x7) {
+ case 0:
+ fsm_padding_status_str = "idle";
+ break;
+ case 1:
+ fsm_padding_status_str = "left pad";
+ break;
+ case 2:
+ fsm_padding_status_str = "write";
+ break;
+ case 3:
+ fsm_padding_status_str = "right pad";
+ break;
+ case 4:
+ fsm_padding_status_str = "send end of line";
+ break;
+ default:
+ fsm_padding_status_str = "unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status",
+ val, fsm_padding_status_str);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM Padding element idx counter",
+ state->fsm_padding_elem_counter);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error",
+ state->fsm_vector_support_error);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full",
+ state->fsm_vector_buffer_full);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support",
+ state->vector_support);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost",
+ state->sensor_data_lost);
+ return;
+}
+
+static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
+{
+ ia_css_debug_dtrace(2, "Stream-to-memory state:\n");
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness",
+ state->input_endianness);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness",
+ state->output_endianness);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch",
+ state->block_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch",
+ state->packet_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync",
+ state->readpostwrite_synch);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update",
+ state->en_status_update);
+}
+
+void ia_css_debug_dump_if_state(void)
+{
+ input_formatter_state_t if_state;
+ input_formatter_bin_state_t if_bin_state;
+
+ input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state);
+ debug_print_if_state(&if_state, "Primary IF A");
+ ia_css_debug_dump_pif_a_isp_fifo_state();
+
+ input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state);
+ debug_print_if_state(&if_state, "Primary IF B");
+ ia_css_debug_dump_pif_b_isp_fifo_state();
+
+ input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state);
+ debug_print_if_bin_state(&if_bin_state);
+ ia_css_debug_dump_str2mem_sp_fifo_state();
+}
+#endif
+
+void ia_css_debug_dump_dma_state(void)
+{
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static dma_state_t state;
+ int i, ch_id;
+
+ const char *fsm_cmd_st_lbl = "FSM Command flag state";
+ const char *fsm_ctl_st_lbl = "FSM Control flag state";
+ const char *fsm_ctl_state = NULL;
+ const char *fsm_ctl_flag = NULL;
+ const char *fsm_pack_st = NULL;
+ const char *fsm_read_st = NULL;
+ const char *fsm_write_st = NULL;
+ char last_cmd_str[64];
+
+ dma_get_state(DMA0_ID, &state);
+ /* Print header for DMA dump status */
+ ia_css_debug_dtrace(2, "DMA dump status:\n");
+
+ /* Print FSM command flag state */
+ if (state.fsm_command_idle)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE");
+ if (state.fsm_command_run)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN");
+ if (state.fsm_command_stalling)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
+ "STALL");
+ if (state.fsm_command_error)
+ ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
+ "ERROR");
+
+ /* Print last command along with the channel */
+ ch_id = state.last_command_channel;
+
+ switch (state.last_command) {
+ case DMA_COMMAND_READ:
+ snprintf(last_cmd_str, 64,
+ "Read 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_WRITE:
+ snprintf(last_cmd_str, 64,
+ "Write 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_SET_CHANNEL:
+ snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_SET_PARAM:
+ snprintf(last_cmd_str, 64,
+ "Set Param: %d [Channel: %d]",
+ state.last_command_param, ch_id);
+ break;
+ case DMA_COMMAND_READ_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Read Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_WRITE_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Write Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_INIT:
+ snprintf(last_cmd_str, 64,
+ "Init 2D Block on Device A [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_INIT_SPECIFIC:
+ snprintf(last_cmd_str, 64,
+ "Init Specific 2D Block [Channel: %d]", ch_id);
+ break;
+ case DMA_COMMAND_RST:
+ snprintf(last_cmd_str, 64, "DMA SW Reset");
+ break;
+ case N_DMA_COMMANDS:
+ snprintf(last_cmd_str, 64, "UNKNOWN");
+ break;
+ default:
+ snprintf(last_cmd_str, 64,
+ "unknown [Channel: %d]", ch_id);
+ break;
+ }
+ ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n",
+ "last command received", state.last_command,
+ last_cmd_str);
+
+ /* Print DMA registers */
+ ia_css_debug_dtrace(2, "\t%-32s\n",
+ "DMA registers, connection group 0");
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command",
+ state.current_command);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A",
+ state.current_addr_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B",
+ state.current_addr_b);
+
+ if (state.fsm_ctrl_idle)
+ fsm_ctl_flag = "IDLE";
+ else if (state.fsm_ctrl_run)
+ fsm_ctl_flag = "RUN";
+ else if (state.fsm_ctrl_stalling)
+ fsm_ctl_flag = "STAL";
+ else if (state.fsm_ctrl_error)
+ fsm_ctl_flag = "ERROR";
+ else
+ fsm_ctl_flag = "UNKNOWN";
+
+ switch (state.fsm_ctrl_state) {
+ case DMA_CTRL_STATE_IDLE:
+ fsm_ctl_state = "Idle state";
+ break;
+ case DMA_CTRL_STATE_REQ_RCV:
+ fsm_ctl_state = "Req Rcv state";
+ break;
+ case DMA_CTRL_STATE_RCV:
+ fsm_ctl_state = "Rcv state";
+ break;
+ case DMA_CTRL_STATE_RCV_REQ:
+ fsm_ctl_state = "Rcv Req state";
+ break;
+ case DMA_CTRL_STATE_INIT:
+ fsm_ctl_state = "Init state";
+ break;
+ case N_DMA_CTRL_STATES:
+ fsm_ctl_state = "Unknown";
+ break;
+ }
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl,
+ fsm_ctl_flag, fsm_ctl_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev",
+ state.fsm_ctrl_source_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr",
+ state.fsm_ctrl_source_addr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride",
+ state.fsm_ctrl_source_stride);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width",
+ state.fsm_ctrl_source_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height",
+ state.fsm_ctrl_source_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev",
+ state.fsm_ctrl_pack_source_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev",
+ state.fsm_ctrl_pack_dest_dev);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr",
+ state.fsm_ctrl_dest_addr);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride",
+ state.fsm_ctrl_dest_stride);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width",
+ state.fsm_ctrl_pack_source_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height",
+ state.fsm_ctrl_pack_dest_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width",
+ state.fsm_ctrl_pack_dest_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems",
+ state.fsm_ctrl_pack_source_elems);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems",
+ state.fsm_ctrl_pack_dest_elems);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension",
+ state.fsm_ctrl_pack_extension);
+
+ if (state.pack_idle)
+ fsm_pack_st = "IDLE";
+ if (state.pack_run)
+ fsm_pack_st = "RUN";
+ if (state.pack_stalling)
+ fsm_pack_st = "STALL";
+ if (state.pack_error)
+ fsm_pack_st = "ERROR";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state",
+ fsm_pack_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height",
+ state.pack_cnt_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width",
+ state.pack_src_cnt_width);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width",
+ state.pack_dest_cnt_width);
+
+ if (state.read_state == DMA_RW_STATE_IDLE)
+ fsm_read_st = "Idle state";
+ if (state.read_state == DMA_RW_STATE_REQ)
+ fsm_read_st = "Req state";
+ if (state.read_state == DMA_RW_STATE_NEXT_LINE)
+ fsm_read_st = "Next line";
+ if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL)
+ fsm_read_st = "Unlock channel";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state",
+ fsm_read_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height",
+ state.read_cnt_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width",
+ state.read_cnt_width);
+
+ if (state.write_state == DMA_RW_STATE_IDLE)
+ fsm_write_st = "Idle state";
+ if (state.write_state == DMA_RW_STATE_REQ)
+ fsm_write_st = "Req state";
+ if (state.write_state == DMA_RW_STATE_NEXT_LINE)
+ fsm_write_st = "Next line";
+ if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL)
+ fsm_write_st = "Unlock channel";
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state",
+ fsm_write_st);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height",
+ state.write_height);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width",
+ state.write_width);
+
+ for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
+ dma_port_state_t *port = &(state.port_states[i]);
+ ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i);
+ ia_css_debug_dtrace(2, "\t\tDMA internal side state\n");
+ ia_css_debug_dtrace(2,
+ "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
+ port->req_cs, port->req_we_n, port->req_run,
+ port->req_ack);
+ ia_css_debug_dtrace(2, "\t\tMaster Output side state\n");
+ ia_css_debug_dtrace(2,
+ "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
+ port->send_cs, port->send_we_n,
+ port->send_run, port->send_ack);
+ ia_css_debug_dtrace(2, "\t\tFifo state\n");
+ if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL)
+ ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n");
+ else if (port->fifo_state == DMA_FIFO_STATE_FULL)
+ ia_css_debug_dtrace(2, "\t\t\tFifo Full\n");
+ else if (port->fifo_state == DMA_FIFO_STATE_EMPTY)
+ ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n");
+ else
+ ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n");
+
+ ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n",
+ port->fifo_counter);
+ }
+
+ for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
+ dma_channel_state_t *ch = &(state.channel_states[i]);
+ ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register",
+ i);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection",
+ ch->connection);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend",
+ ch->sign_extend);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A",
+ ch->stride_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A",
+ ch->elems_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A",
+ ch->cropping_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A",
+ ch->width_a);
+ ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B",
+ ch->stride_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B",
+ ch->elems_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B",
+ ch->cropping_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B",
+ ch->width_b);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height);
+ }
+ ia_css_debug_dtrace(2, "\n");
+ return;
+}
+
+void ia_css_debug_dump_dma_sp_fifo_state(void)
+{
+ fifo_channel_state_t dma_to_sp, sp_to_dma;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma);
+ debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP");
+ debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA");
+ return;
+}
+
+void ia_css_debug_dump_dma_isp_fifo_state(void)
+{
+ fifo_channel_state_t dma_to_isp, isp_to_dma;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma);
+ debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP");
+ debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA");
+ return;
+}
+
+void ia_css_debug_dump_isp_sp_fifo_state(void)
+{
+ fifo_channel_state_t sp_to_isp, isp_to_sp;
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp);
+ debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP");
+ debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP");
+ return;
+}
+
+void ia_css_debug_dump_isp_gdc_fifo_state(void)
+{
+ fifo_channel_state_t gdc_to_isp, isp_to_gdc;
+
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp);
+ fifo_channel_get_state(FIFO_MONITOR0_ID,
+ FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc);
+ debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP");
+ debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC");
+ return;
+}
+
+void ia_css_debug_dump_all_fifo_state(void)
+{
+ int i;
+ fifo_monitor_state_t state;
+ fifo_monitor_get_state(FIFO_MONITOR0_ID, &state);
+
+ for (i = 0; i < N_FIFO_CHANNEL; i++)
+ debug_print_fifo_channel_state(&(state.fifo_channels[i]),
+ "squepfstqkt");
+ return;
+}
+
+static void debug_binary_info_print(const struct ia_css_binary_xinfo *info)
+{
+ assert(info != NULL);
+ ia_css_debug_dtrace(2, "id = %d\n", info->sp.id);
+ ia_css_debug_dtrace(2, "mode = %d\n", info->sp.pipeline.mode);
+ ia_css_debug_dtrace(2, "max_input_width = %d\n", info->sp.input.max_width);
+ ia_css_debug_dtrace(2, "min_output_width = %d\n",
+ info->sp.output.min_width);
+ ia_css_debug_dtrace(2, "max_output_width = %d\n",
+ info->sp.output.max_width);
+ ia_css_debug_dtrace(2, "top_cropping = %d\n", info->sp.pipeline.top_cropping);
+ ia_css_debug_dtrace(2, "left_cropping = %d\n", info->sp.pipeline.left_cropping);
+ ia_css_debug_dtrace(2, "xmem_addr = %d\n", info->xmem_addr);
+ ia_css_debug_dtrace(2, "enable_vf_veceven = %d\n",
+ info->sp.enable.vf_veceven);
+ ia_css_debug_dtrace(2, "enable_dis = %d\n", info->sp.enable.dis);
+ ia_css_debug_dtrace(2, "enable_uds = %d\n", info->sp.enable.uds);
+ ia_css_debug_dtrace(2, "enable ds = %d\n", info->sp.enable.ds);
+ ia_css_debug_dtrace(2, "s3atbl_use_dmem = %d\n", info->sp.s3a.s3atbl_use_dmem);
+ return;
+}
+
+void ia_css_debug_binary_print(const struct ia_css_binary *bi)
+{
+ unsigned int i;
+ debug_binary_info_print(bi->info);
+ ia_css_debug_dtrace(2,
+ "input: %dx%d, format = %d, padded width = %d\n",
+ bi->in_frame_info.res.width,
+ bi->in_frame_info.res.height,
+ bi->in_frame_info.format,
+ bi->in_frame_info.padded_width);
+ ia_css_debug_dtrace(2,
+ "internal :%dx%d, format = %d, padded width = %d\n",
+ bi->internal_frame_info.res.width,
+ bi->internal_frame_info.res.height,
+ bi->internal_frame_info.format,
+ bi->internal_frame_info.padded_width);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (bi->out_frame_info[i].res.width != 0) {
+ ia_css_debug_dtrace(2,
+ "out%d: %dx%d, format = %d, padded width = %d\n",
+ i,
+ bi->out_frame_info[i].res.width,
+ bi->out_frame_info[i].res.height,
+ bi->out_frame_info[i].format,
+ bi->out_frame_info[i].padded_width);
+ }
+ }
+ ia_css_debug_dtrace(2,
+ "vf out: %dx%d, format = %d, padded width = %d\n",
+ bi->vf_frame_info.res.width,
+ bi->vf_frame_info.res.height,
+ bi->vf_frame_info.format,
+ bi->vf_frame_info.padded_width);
+ ia_css_debug_dtrace(2, "online = %d\n", bi->online);
+ ia_css_debug_dtrace(2, "input_buf_vectors = %d\n",
+ bi->input_buf_vectors);
+ ia_css_debug_dtrace(2, "deci_factor_log2 = %d\n", bi->deci_factor_log2);
+ ia_css_debug_dtrace(2, "vf_downscale_log2 = %d\n",
+ bi->vf_downscale_log2);
+ ia_css_debug_dtrace(2, "dis_deci_factor_log2 = %d\n",
+ bi->dis.deci_factor_log2);
+ ia_css_debug_dtrace(2, "dis hor coef num = %d\n",
+ bi->dis.coef.pad.width);
+ ia_css_debug_dtrace(2, "dis ver coef num = %d\n",
+ bi->dis.coef.pad.height);
+ ia_css_debug_dtrace(2, "dis hor proj num = %d\n",
+ bi->dis.proj.pad.height);
+ ia_css_debug_dtrace(2, "sctbl_width_per_color = %d\n",
+ bi->sctbl_width_per_color);
+ ia_css_debug_dtrace(2, "s3atbl_width = %d\n", bi->s3atbl_width);
+ ia_css_debug_dtrace(2, "s3atbl_height = %d\n", bi->s3atbl_height);
+ return;
+}
+
+void ia_css_debug_frame_print(const struct ia_css_frame *frame,
+ const char *descr)
+{
+ char *data = NULL;
+
+ assert(frame != NULL);
+ assert(descr != NULL);
+
+ data = (char *)HOST_ADDRESS(frame->data);
+ ia_css_debug_dtrace(2, "frame %s (%p):\n", descr, frame);
+ ia_css_debug_dtrace(2, " resolution = %dx%d\n",
+ frame->info.res.width, frame->info.res.height);
+ ia_css_debug_dtrace(2, " padded width = %d\n",
+ frame->info.padded_width);
+ ia_css_debug_dtrace(2, " format = %d\n", frame->info.format);
+ ia_css_debug_dtrace(2, " is contiguous = %s\n",
+ frame->contiguous ? "yes" : "no");
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.nv.y.offset);
+ ia_css_debug_dtrace(2, " UV = %p\n",
+ data + frame->planes.nv.uv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ ia_css_debug_dtrace(2, " YUYV = %p\n",
+ data + frame->planes.yuyv.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ ia_css_debug_dtrace(2, " Y = %p\n",
+ data + frame->planes.yuv.y.offset);
+ ia_css_debug_dtrace(2, " U = %p\n",
+ data + frame->planes.yuv.u.offset);
+ ia_css_debug_dtrace(2, " V = %p\n",
+ data + frame->planes.yuv.v.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ ia_css_debug_dtrace(2, " RAW PACKED = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ ia_css_debug_dtrace(2, " RAW = %p\n",
+ data + frame->planes.raw.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ ia_css_debug_dtrace(2, " RGB = %p\n",
+ data + frame->planes.rgb.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ ia_css_debug_dtrace(2, " R = %p\n",
+ data + frame->planes.plane6.r.offset);
+ ia_css_debug_dtrace(2, " RatB = %p\n",
+ data + frame->planes.plane6.r_at_b.offset);
+ ia_css_debug_dtrace(2, " Gr = %p\n",
+ data + frame->planes.plane6.gr.offset);
+ ia_css_debug_dtrace(2, " Gb = %p\n",
+ data + frame->planes.plane6.gb.offset);
+ ia_css_debug_dtrace(2, " B = %p\n",
+ data + frame->planes.plane6.b.offset);
+ ia_css_debug_dtrace(2, " BatR = %p\n",
+ data + frame->planes.plane6.b_at_r.offset);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ ia_css_debug_dtrace(2, " Binary data = %p\n",
+ data + frame->planes.binary.data.offset);
+ break;
+ default:
+ ia_css_debug_dtrace(2, " unknown frame type\n");
+ break;
+ }
+ return;
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
+ *state)
+{
+
+#endif
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+ assert(state != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current SP software counter: %d\n",
+ state->debug[0]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue head: 0x%x\n",
+ state->debug[1]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer queue tail: 0x%x\n",
+ state->debug[2]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue head: 0x%x\n",
+ state->debug[3]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a buffer queue tail: 0x%x\n",
+ state->debug[4]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue head: 0x%x\n",
+ state->debug[5]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full output buffer queue tail: 0x%x\n",
+ state->debug[6]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue head: 0x%x\n",
+ state->debug[7]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "full s3a buffer queue tail: 0x%x\n",
+ state->debug[8]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue head: 0x%x\n",
+ state->debug[9]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue tail: 0x%x\n",
+ state->debug[10]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "num of stages of current pipeline: 0x%x\n",
+ state->debug[11]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 1: 0x%x\n",
+ state->debug[12]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 2: 0x%x\n",
+ state->debug[13]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage out_vf buffer idx: 0x%x\n",
+ state->debug[14]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage output buffer idx: 0x%x\n",
+ state->debug[15]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "current stage s3a buffer idx: 0x%x\n",
+ state->debug[16]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first char of current stage name: 0x%x\n",
+ state->debug[17]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP thread id: 0x%x\n",
+ state->debug[18]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 1: 0x%x\n",
+ state->debug[19]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty output buffer address 2: 0x%x\n",
+ state->debug[20]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 1: 0x%x\n",
+ state->debug[21]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty out_vf buffer address 2: 0x%x\n",
+ state->debug[22]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 1: 0x%x\n",
+ state->debug[23]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_hi buffer address 2: 0x%x\n",
+ state->debug[24]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 1: 0x%x\n",
+ state->debug[25]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty s3a_lo buffer address 2: 0x%x\n",
+ state->debug[26]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 1: 0x%x\n",
+ state->debug[27]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_hor buffer address 2: 0x%x\n",
+ state->debug[28]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 1: 0x%x\n",
+ state->debug[29]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty dis_ver buffer address 2: 0x%x\n",
+ state->debug[30]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "empty param buffer address: 0x%x\n",
+ state->debug[31]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame address: 0x%x\n",
+ state->debug[32]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container address: 0x%x\n",
+ state->debug[33]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect frame container payload: 0x%x\n",
+ state->debug[34]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi address: 0x%x\n",
+ state->debug[35]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container address: 0x%x\n",
+ state->debug[36]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_hi container payload: 0x%x\n",
+ state->debug[37]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo address: 0x%x\n",
+ state->debug[38]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container address: 0x%x\n",
+ state->debug[39]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "first incorrect s3a_lo container payload: 0x%x\n",
+ state->debug[40]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash start function: 0x%x\n",
+ state->debug[41]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of calling flash close function: 0x%x\n",
+ state->debug[42]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of flashed frame: 0x%x\n",
+ state->debug[43]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "flash in use flag: 0x%x\n",
+ state->debug[44]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of update frame flashed flag: 0x%x\n",
+ state->debug[46]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "number of active threads: 0x%x\n",
+ state->debug[45]);
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+ /* Remember last_index because we only want to print new entries */
+ static int last_index;
+ int sp_index = state->index;
+ int n;
+
+ assert(state != NULL);
+ if (sp_index < last_index) {
+ /* SP has been reset */
+ last_index = 0;
+ }
+
+ if (last_index == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace init: sp_dbg_if_start_line=%d, "
+ "sp_dbg_if_start_column=%d, "
+ "sp_dbg_if_cropped_height=%d, "
+ "sp_debg_if_cropped_width=%d\n",
+ state->if_start_line,
+ state->if_start_column,
+ state->if_cropped_height,
+ state->if_cropped_width);
+ }
+
+ if ((last_index + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ last_index = sp_index - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = last_index; n < sp_index; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+ if (state->trace[i].frame != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "copy-trace: frame=%d, line=%d, "
+ "pixel_distance=%d, "
+ "mipi_used_dword=%d, "
+ "sp_index=%d\n",
+ state->trace[i].frame,
+ state->trace[i].line,
+ state->trace[i].pixel_distance,
+ state->trace[i].mipi_used_dword,
+ state->trace[i].sp_index);
+ }
+ }
+
+ last_index = sp_index;
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+/**
+ * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will
+ * me mapped on the file name string.
+ *
+ * Adjust this to your trace case!
+ */
+ static char const * const id2filename[8] = {
+ "param_buffer.sp.c | tagger.sp.c | pipe_data.sp.c",
+ "isp_init.sp.c",
+ "sp_raw_copy.hive.c",
+ "dma_configure.sp.c",
+ "sp.hive.c",
+ "event_proxy_sp.hive.c",
+ "circular_buffer.sp.c",
+ "frame_buffer.sp.c"
+ };
+
+#if 1
+ /* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */
+ /* Adjust this to your trace case */
+ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
+ "default"
+ };
+#else
+ /* Example SH_CSS_SP_DBG_NR_OF_TRACES==4 */
+ /* Adjust this to your trace case */
+ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = {
+ "copy", "preview/video", "capture", "acceleration"
+ };
+#endif
+
+ /* Remember host_index_last because we only want to print new entries */
+ static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 };
+ int t, n;
+
+ assert(state != NULL);
+
+ for (t = 0; t < SH_CSS_SP_DBG_NR_OF_TRACES; t++) {
+ int sp_index_last = state->index_last[t];
+
+ if (sp_index_last < host_index_last[t]) {
+ /* SP has been reset */
+ host_index_last[t] = 0;
+ }
+
+ if ((host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH) <
+ sp_index_last) {
+ /* last index can be multiple rounds behind */
+ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "Warning: trace %s has gap of %d "
+ "traces\n",
+ trace_name[t],
+ (sp_index_last -
+ (host_index_last[t] +
+ SH_CSS_SP_DBG_TRACE_DEPTH)));
+
+ host_index_last[t] =
+ sp_index_last - SH_CSS_SP_DBG_TRACE_DEPTH;
+ }
+
+ for (n = host_index_last[t]; n < sp_index_last; n++) {
+ int i = n % SH_CSS_SP_DBG_TRACE_DEPTH;
+ int l = state->trace[t][i].location &
+ ((1 << SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS) - 1);
+ int fid = state->trace[t][i].location >>
+ SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS;
+ int ts = state->trace[t][i].time_stamp;
+
+ if (ts) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "%05d trace=%s, file=%s:%d, "
+ "data=0x%08x\n",
+ ts,
+ trace_name[t],
+ id2filename[fid], l,
+ state->trace[t][i].data);
+ }
+ }
+ host_index_last[t] = sp_index_last;
+ }
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+ int i;
+ int base = 0;
+ int limit = SH_CSS_NUM_SP_DEBUG;
+ int step = 1;
+
+ assert(state != NULL);
+
+ for (i = base; i < limit; i += step) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sp_dbg_trace[%d] = %d\n",
+ i, state->debug[i]);
+ }
+#endif
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+ return;
+}
+#endif
+
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+static void debug_print_rx_mipi_port_state(mipi_port_state_t *state)
+{
+ int i;
+ unsigned int bits, infos;
+
+ assert(state != NULL);
+
+ bits = state->irq_status;
+ infos = ia_css_isys_rx_translate_irq_infos(bits);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n",
+ "receiver errors", bits);
+
+ if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ ia_css_debug_dtrace(2, "\t\t\tcontrol error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ ia_css_debug_dtrace(2, "\t\t\tunknown error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tframe sync error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ ia_css_debug_dtrace(2, "\t\t\tframe data error\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ ia_css_debug_dtrace(2, "\t\t\tdata timeout\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n");
+ if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ ia_css_debug_dtrace(2, "\t\t\tline sync error\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "device_ready", state->device_ready);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_status", state->irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_enable", state->irq_enable);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "timeout_count", state->timeout_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "init_count", state->init_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "sync_count", state->sync_count);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count);
+
+ for (i = 0; i < MIPI_4LANE_CFG; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
+ "lane_sync_count[", i, "]",
+ state->lane_sync_count[i]);
+ }
+
+ for (i = 0; i < MIPI_4LANE_CFG; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
+ "lane_rx_count[", i, "]",
+ state->lane_rx_count[i]);
+ }
+
+ return;
+}
+
+static void debug_print_rx_channel_state(rx_channel_state_t *state)
+{
+ int i;
+
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "compression_scheme0", state->comp_scheme0);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "compression_scheme1", state->comp_scheme1);
+
+ for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
+ "MIPI Predictor ", i, state->pred[i]);
+ }
+
+ for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
+ ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
+ "MIPI Compressor ", i, state->comp[i]);
+ }
+
+ return;
+}
+
+static void debug_print_rx_state(receiver_state_t *state)
+{
+ int i;
+
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "CSI Receiver State:\n");
+
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "fs_to_ls_delay", state->fs_to_ls_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "ls_to_data_delay", state->ls_to_data_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "data_to_le_delay", state->data_to_le_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "le_to_fe_delay", state->le_to_fe_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "fe_to_fs_delay", state->fe_to_fs_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "le_to_fs_delay", state->le_to_fs_delay);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "is_two_ppc", state->is_two_ppc);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "backend_rst", state->backend_rst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "force_raw8", state->force_raw8);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_gsp_acc_ovl", state->be_gsp_acc_ovl);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_is_two_ppc", state->be_is_two_ppc);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format0", state->be_comp_format0);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format1", state->be_comp_format1);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format2", state->be_comp_format2);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_comp_format3", state->be_comp_format3);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_raw16_config", state->be_raw16_config);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_raw18_config", state->be_raw18_config);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_force_raw8", state->be_force_raw8);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_irq_status", state->be_irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "be_irq_clear", state->be_irq_clear);
+
+ /* mipi port state */
+ for (i = 0; i < N_MIPI_PORT_ID; i++) {
+ ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i);
+
+ debug_print_rx_mipi_port_state(&state->mipi_port_state[i]);
+ }
+ /* end of mipi port state */
+
+ /* rx channel state */
+ for (i = 0; i < N_RX_CHANNEL_ID; i++) {
+ ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i);
+
+ debug_print_rx_channel_state(&state->rx_channel_state[i]);
+ }
+ /* end of rx channel state */
+
+ return;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_debug_dump_rx_state(void)
+{
+#if defined(HAS_INPUT_FORMATTER_VERSION_2) && !defined(HAS_NO_INPUT_FORMATTER)
+ receiver_state_t state;
+
+ receiver_get_state(RX0_ID, &state);
+ debug_print_rx_state(&state);
+#endif
+}
+#endif
+
+void ia_css_debug_dump_sp_sw_debug_info(void)
+{
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state state;
+
+ sh_css_sp_get_debug_state(&state);
+ ia_css_debug_print_sp_debug_state(&state);
+#endif
+ ia_css_bufq_dump_queue_info();
+ ia_css_pipeline_dump_thread_map_info();
+ return;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+static void debug_print_isys_capture_unit_state(capture_unit_state_t *state)
+{
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Packet_Length", state->Packet_Length);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Length", state->Received_Length);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Short_Packets",
+ state->Received_Short_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Long_Packets",
+ state->Received_Long_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Command", state->Last_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Command", state->Next_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Acknowledge", state->Last_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Acknowledge", state->Next_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM_State_Info", state->FSM_State_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "StartMode", state->StartMode);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Start_Addr", state->Start_Addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Mem_Region_Size", state->Mem_Region_Size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Num_Mem_Regions", state->Num_Mem_Regions);
+ return;
+}
+
+static void debug_print_isys_acquisition_unit_state(
+ acquisition_unit_state_t *state)
+{
+ assert(state != NULL);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Short_Packets",
+ state->Received_Short_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Received_Long_Packets",
+ state->Received_Long_Packets);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Command", state->Last_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Command", state->Next_Command);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Last_Acknowledge", state->Last_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Next_Acknowledge", state->Next_Acknowledge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "FSM_State_Info", state->FSM_State_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Int_Cntr_Info", state->Int_Cntr_Info);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Start_Addr", state->Start_Addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Mem_Region_Size", state->Mem_Region_Size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "Num_Mem_Regions", state->Num_Mem_Regions);
+}
+
+static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state)
+{
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "top_fsm_state", state->top_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_fsm_state", state->captA_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_fsm_state", state->captB_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_fsm_state", state->captC_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_fsm_state", state->acq_fsm_state);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_start_addr", state->captA_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_start_addr", state->captB_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_start_addr", state->captC_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_mem_region_size",
+ state->captA_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_mem_region_size",
+ state->captB_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_mem_region_size",
+ state->captC_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captA_num_mem_regions",
+ state->captA_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captB_num_mem_regions",
+ state->captB_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "captC_num_mem_regions",
+ state->captC_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_start_addr", state->acq_start_addr);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_mem_region_size", state->acq_mem_region_size);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "acq_num_mem_regions", state->acq_num_mem_regions);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "capt_reserve_one_mem_region",
+ state->capt_reserve_one_mem_region);
+
+ return;
+}
+
+static void debug_print_isys_state(input_system_state_t *state)
+{
+ int i;
+
+ assert(state != NULL);
+ ia_css_debug_dtrace(2, "InputSystem State:\n");
+
+ /* configuration */
+ ia_css_debug_dtrace(2, "\tConfiguration:\n");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multiCastA_sel", state->str_multicastA_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multicastB_sel", state->str_multicastB_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_multicastC_sel", state->str_multicastC_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mux_sel", state->str_mux_sel);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_status", state->str_mon_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_irq_cond", state->str_mon_irq_cond);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_mon_irq_en", state->str_mon_irq_en);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "isys_srst", state->isys_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "isys_slv_reg_srst", state->isys_slv_reg_srst);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_deint_portA_cnt", state->str_deint_portA_cnt);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "str_deint_portB_cnd", state->str_deint_portB_cnt);
+ /* end of configuration */
+
+ /* capture unit state */
+ for (i = 0; i < N_CAPTURE_UNIT_ID; i++) {
+ capture_unit_state_t *capture_unit_state;
+
+ ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i);
+
+ capture_unit_state = &state->capture_unit[i];
+ debug_print_isys_capture_unit_state(capture_unit_state);
+ }
+ /* end of capture unit state */
+
+ /* acquisition unit state */
+ for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) {
+ acquisition_unit_state_t *acquisition_unit_state;
+
+ ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i);
+
+ acquisition_unit_state = &state->acquisition_unit[i];
+ debug_print_isys_acquisition_unit_state(acquisition_unit_state);
+ }
+ /* end of acquisition unit state */
+
+ /* control unit state */
+ for (i = 0; i < N_CTRL_UNIT_ID; i++) {
+ ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i);
+
+ debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]);
+ }
+ /* end of control unit state */
+}
+
+void ia_css_debug_dump_isys_state(void)
+{
+ input_system_state_t state;
+
+ input_system_get_state(INPUT_SYSTEM0_ID, &state);
+ debug_print_isys_state(&state);
+
+ return;
+}
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_debug_dump_isys_state(void)
+{
+ /* Android compilation fails if made a local variable
+ stack size on android is limited to 2k and this structure
+ is around 3.5K, in place of static malloc can be done but
+ if this call is made too often it will lead to fragment memory
+ versus a fixed allocation */
+ static input_system_state_t state;
+
+ input_system_get_state(INPUT_SYSTEM0_ID, &state);
+ input_system_dump_state(INPUT_SYSTEM0_ID, &state);
+}
+#endif
+
+void ia_css_debug_dump_debug_info(const char *context)
+{
+ if (context == NULL)
+ context = "No Context provided";
+
+ ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_rx_state();
+#endif
+#if !defined(HAS_NO_INPUT_FORMATTER) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_if_state();
+#endif
+ ia_css_debug_dump_isp_state();
+ ia_css_debug_dump_isp_sp_fifo_state();
+ ia_css_debug_dump_isp_gdc_fifo_state();
+ ia_css_debug_dump_sp_state();
+ ia_css_debug_dump_perf_counters();
+
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+ sh_css_dump_thread_wait_info();
+ sh_css_dump_pipe_stage_info();
+ sh_css_dump_pipe_stripe_info();
+#endif
+ ia_css_debug_dump_dma_isp_fifo_state();
+ ia_css_debug_dump_dma_sp_fifo_state();
+ ia_css_debug_dump_dma_state();
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_debug_dump_isys_state();
+
+ {
+ irq_controller_state_t state;
+ irq_controller_get_state(IRQ2_ID, &state);
+
+ ia_css_debug_dtrace(2, "\t%-32s:\n",
+ "Input System IRQ Controller State");
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_edge", state.irq_edge);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_mask", state.irq_mask);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_status", state.irq_status);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_enable", state.irq_enable);
+
+ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
+ "irq_level_not_pulse",
+ state.irq_level_not_pulse);
+ }
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ ia_css_debug_dump_isys_state();
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ ia_css_debug_tagger_state();
+#endif
+ return;
+}
+
+/** this function is for debug use, it can make SP go to sleep
+ state after each frame, then user can dump the stable SP dmem.
+ this function can be called after ia_css_start_sp()
+ and before sh_css_init_buffer_queues()
+*/
+void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+
+ (void)HIVE_ADDR_sp_sleep_mode; /* Suppres warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t) mode);
+}
+
+void ia_css_debug_wake_up_sp(void)
+{
+ /*hrt_ctl_start(SP); */
+ sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT);
+}
+
+#if !defined(IS_ISP_2500_SYSTEM)
+#define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \
+ (struct HRTCAT(HRTCAT(sh_css_isp_, type), _params) *) \
+ findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel))
+
+#define FIND_DMEM_PARAMS(stream, kernel) FIND_DMEM_PARAMS_TYPE(stream, kernel, kernel)
+
+/* Find a stage that support the kernel and return the parameters for that kernel */
+static char *
+findf_dmem_params(struct ia_css_stream *stream, short idx)
+{
+ int i;
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ struct ia_css_binary *binary = stage->binary;
+ short *offsets = (short *)&binary->info->mem_offsets.offsets.param->dmem;
+ short dmem_offset = offsets[idx];
+ const struct ia_css_host_data *isp_data =
+ ia_css_isp_param_get_mem_init(&binary->mem_params,
+ IA_CSS_PARAM_CLASS_PARAM, IA_CSS_ISP_DMEM0);
+ if (dmem_offset < 0)
+ continue;
+ return &isp_data->address[dmem_offset];
+ }
+ }
+ return NULL;
+}
+#endif
+
+void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
+ unsigned int enable)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n");
+#if defined(IS_ISP_2500_SYSTEM)
+ (void)enable;
+ (void)stream;
+#else
+
+ assert(stream != NULL);
+ if ((enable & IA_CSS_DEBUG_DUMP_FPN)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_fpn_dump(FIND_DMEM_PARAMS(stream, fpn), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_OB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ob_dump(FIND_DMEM_PARAMS(stream, ob), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_SC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_sc_dump(FIND_DMEM_PARAMS(stream, sc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_WB)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_wb_dump(FIND_DMEM_PARAMS(stream, wb), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DP)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_dp_dump(FIND_DMEM_PARAMS(stream, dp), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_BNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_bnr_dump(FIND_DMEM_PARAMS(stream, bnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_S3A)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_s3a_dump(FIND_DMEM_PARAMS(stream, s3a), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_DE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_de_dump(FIND_DMEM_PARAMS(stream, de), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_YNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_nr_dump(FIND_DMEM_PARAMS_TYPE(stream, nr, ynr), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yee_dump(FIND_DMEM_PARAMS(stream, yee), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CSC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_csc_dump(FIND_DMEM_PARAMS(stream, csc), IA_CSS_DEBUG_VERBOSE);
+ ia_css_yuv2rgb_dump(FIND_DMEM_PARAMS_TYPE(stream, yuv2rgb, csc), IA_CSS_DEBUG_VERBOSE);
+ ia_css_rgb2yuv_dump(FIND_DMEM_PARAMS_TYPE(stream, rgb2yuv, csc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_GC)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_gc_dump(FIND_DMEM_PARAMS(stream, gc), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_TNR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_tnr_dump(FIND_DMEM_PARAMS(stream, tnr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_ANR)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_anr_dump(FIND_DMEM_PARAMS(stream, anr), IA_CSS_DEBUG_VERBOSE);
+ }
+ if ((enable & IA_CSS_DEBUG_DUMP_CE)
+ || (enable & IA_CSS_DEBUG_DUMP_ALL)) {
+ ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE);
+ }
+#endif
+}
+
+void sh_css_dump_sp_raw_copy_linecount(bool reduced)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_raw_copy_line_count;
+ int32_t raw_copy_line_count;
+ static int32_t prev_raw_copy_line_count = -1;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_raw_copy_line_count =
+ fw->info.sp.raw_copy_line_count;
+
+ (void)HIVE_ADDR_raw_copy_line_count;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(raw_copy_line_count),
+ &raw_copy_line_count,
+ sizeof(raw_copy_line_count));
+
+ /* only indicate if copy loop is active */
+ if (reduced)
+ raw_copy_line_count = (raw_copy_line_count < 0)?raw_copy_line_count:1;
+ /* do the handling */
+ if (prev_raw_copy_line_count != raw_copy_line_count) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_sp_raw_copy_linecount() "
+ "line_count=%d\n",
+ raw_copy_line_count);
+ prev_raw_copy_line_count = raw_copy_line_count;
+ }
+}
+
+void ia_css_debug_dump_isp_binary(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_pipeline_sp_curr_binary_id;
+ uint32_t curr_binary_id;
+ static uint32_t prev_binary_id = 0xFFFFFFFF;
+ static uint32_t sample_count;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_pipeline_sp_curr_binary_id = fw->info.sp.curr_binary_id;
+
+ (void)HIVE_ADDR_pipeline_sp_curr_binary_id;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(pipeline_sp_curr_binary_id),
+ &curr_binary_id,
+ sizeof(curr_binary_id));
+
+ /* do the handling */
+ sample_count++;
+ if (prev_binary_id != curr_binary_id) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "sh_css_dump_isp_binary() "
+ "pipe_id=%d, binary_id=%d, sample_count=%d\n",
+ (curr_binary_id >> 16),
+ (curr_binary_id & 0x0ffff),
+ sample_count);
+ sample_count = 0;
+ prev_binary_id = curr_binary_id;
+ }
+}
+
+void ia_css_debug_dump_perf_counters(void)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt;
+ int32_t ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1]; /* 3 Capture Units and 1 Acquire Unit. */
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_ia_css_isys_sp_error_cnt = fw->info.sp.perf_counter_input_system_error;
+
+ (void)HIVE_ADDR_ia_css_isys_sp_error_cnt;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt),
+ &ia_css_sp_input_system_error_cnt,
+ sizeof(ia_css_sp_input_system_error_cnt));
+
+ for (i = 0; i < N_MIPI_PORT_ID + 1; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n",
+ i, ia_css_sp_input_system_error_cnt[i]);
+ }
+#endif
+}
+
+/*
+
+void sh_css_init_ddr_debug_queue(void)
+{
+ hrt_vaddress ddr_debug_queue_addr =
+ mmgr_malloc(sizeof(debug_data_ddr_t));
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_debug_buffer_ddr_address;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_debug_buffer_ddr_address =
+ fw->info.sp.debug_buffer_ddr_address;
+
+ (void)HIVE_ADDR_debug_buffer_ddr_address;
+
+ debug_buffer_ddr_init(ddr_debug_queue_addr);
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(debug_buffer_ddr_address),
+ (uint32_t)(ddr_debug_queue_addr));
+}
+
+void sh_css_load_ddr_debug_queue(void)
+{
+ debug_synch_queue_ddr();
+}
+
+void ia_css_debug_dump_ddr_debug_queue(void)
+{
+ int i;
+ sh_css_load_ddr_debug_queue();
+ for (i = 0; i < DEBUG_BUF_SIZE; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "ddr_debug_queue[%d] = 0x%x\n",
+ i, debug_data_ptr->buf[i]);
+ }
+}
+*/
+
+/**
+ * @brief Initialize the debug mode.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool ia_css_debug_mode_init(void)
+{
+ bool rc;
+ rc = sh_css_sp_init_dma_sw_reg(0);
+ return rc;
+}
+
+/**
+ * @brief Disable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_disable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, false);
+
+ return rc;
+}
+
+/**
+ * @brief Enable the DMA channel.
+ * Refer to "ia_css_debug.h" for more details.
+ */
+bool
+ia_css_debug_mode_enable_dma_channel(int dma_id,
+ int channel_id, int request_type)
+{
+ bool rc;
+
+ rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, true);
+
+ return rc;
+}
+
+void dtrace_dot(const char *fmt, ...)
+{
+ va_list ap;
+
+ assert(fmt != NULL);
+ va_start(ap, fmt);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_START);
+ ia_css_debug_vdtrace(IA_CSS_DEBUG_INFO, fmt, ap);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_END);
+ va_end(ap);
+}
+#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
+void sh_css_dump_thread_wait_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_thread_wait;
+ int32_t sp_thread_wait[MAX_THREAD_NUM];
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_thread_wait =
+ fw->info.sp.debug_wait;
+
+ (void)HIVE_ADDR_sp_thread_wait;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_thread_wait),
+ &sp_thread_wait,
+ sizeof(sp_thread_wait));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\twait[%d] = 0x%X\n",
+ i, sp_thread_wait[i]);
+ }
+
+}
+
+void sh_css_dump_pipe_stage_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_pipe_stage;
+ int32_t sp_pipe_stage[MAX_THREAD_NUM];
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_pipe_stage =
+ fw->info.sp.debug_stage;
+
+ (void)HIVE_ADDR_sp_pipe_stage;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_pipe_stage),
+ &sp_pipe_stage,
+ sizeof(sp_pipe_stage));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\tstage[%d] = %d\n",
+ i, sp_pipe_stage[i]);
+ }
+
+}
+
+void sh_css_dump_pipe_stripe_info(void)
+{
+ const struct ia_css_fw_info *fw;
+ int i;
+ unsigned int HIVE_ADDR_sp_pipe_stripe;
+ int32_t sp_pipe_stripe[MAX_THREAD_NUM];
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_pipe_stripe =
+ fw->info.sp.debug_stripe;
+
+ (void)HIVE_ADDR_sp_pipe_stripe;
+
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_pipe_stripe),
+ &sp_pipe_stripe,
+ sizeof(sp_pipe_stripe));
+ for (i = 0; i < MAX_THREAD_NUM; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
+ "\tstripe[%d] = %d\n",
+ i, sp_pipe_stripe[i]);
+ }
+
+}
+#endif
+
+static void
+ia_css_debug_pipe_graph_dump_frame(
+ struct ia_css_frame *frame,
+ enum ia_css_pipe_id id,
+ char const *blob_name,
+ char const *frame_name,
+ bool in_frame)
+{
+ char bufinfo[100];
+
+ if (frame->dynamic_queue_id == SH_CSS_INVALID_QUEUE_ID) {
+ snprintf(bufinfo, sizeof(bufinfo), "Internal");
+ } else {
+ snprintf(bufinfo, sizeof(bufinfo), "Queue: %s %s",
+ pipe_id_to_str[id],
+ queue_id_to_str[frame->dynamic_queue_id]);
+ }
+ dtrace_dot(
+ "node [shape = box, "
+ "fixedsize=true, width=2, height=0.7]; \"0x%08lx\" "
+ "[label = \"%s\\n%d(%d) x %d, %dbpp\\n%s\"];",
+ HOST_ADDRESS(frame),
+ debug_frame_format2str(frame->info.format),
+ frame->info.res.width,
+ frame->info.padded_width,
+ frame->info.res.height,
+ frame->info.raw_bit_depth,
+ bufinfo);
+
+ if (in_frame) {
+ dtrace_dot(
+ "\"0x%08lx\"->\"%s(pipe%d)\" "
+ "[label = %s_frame];",
+ HOST_ADDRESS(frame),
+ blob_name, id, frame_name);
+ } else {
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"0x%08lx\" "
+ "[label = %s_frame];",
+ blob_name, id,
+ HOST_ADDRESS(frame),
+ frame_name);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_prologue(void)
+{
+ dtrace_dot("digraph sh_css_pipe_graph {");
+ dtrace_dot("rankdir=LR;");
+
+ dtrace_dot("fontsize=9;");
+ dtrace_dot("label = \"\\nEnable options: rp=reduced pipe, vfve=vf_veceven, "
+ "dvse=dvs_envelope, dvs6=dvs_6axis, bo=block_out, "
+ "fbds=fixed_bayer_ds, bf6=bayer_fir_6db, "
+ "rawb=raw_binning, cont=continuous, disc=dis_crop\\n"
+ "dp2a=dp_2adjacent, outp=output, outt=out_table, "
+ "reff=ref_frame, par=params, gam=gamma, "
+ "cagdc=ca_gdc, ispa=isp_addresses, inf=in_frame, "
+ "outf=out_frame, hs=high_speed, inpc=input_chunking\"");
+}
+
+void ia_css_debug_pipe_graph_dump_epilogue(void)
+{
+
+ if (strlen(ring_buffer) > 0) {
+ dtrace_dot(ring_buffer);
+ }
+
+
+ if (pg_inst.stream_format != N_IA_CSS_STREAM_FORMAT) {
+ /* An input stream format has been set so assume we have
+ * an input system and sensor
+ */
+
+
+ dtrace_dot(
+ "node [shape = doublecircle, "
+ "fixedsize=true, width=2.5]; \"input_system\" "
+ "[label = \"Input system\"];");
+
+ dtrace_dot(
+ "\"input_system\"->\"%s\" "
+ "[label = \"%s\"];",
+ dot_id_input_bin, debug_stream_format2str(pg_inst.stream_format));
+
+ dtrace_dot(
+ "node [shape = doublecircle, "
+ "fixedsize=true, width=2.5]; \"sensor\" "
+ "[label = \"Sensor\"];");
+
+ dtrace_dot(
+ "\"sensor\"->\"input_system\" "
+ "[label = \"%s\\n%d x %d\\n(%d x %d)\"];",
+ debug_stream_format2str(pg_inst.stream_format),
+ pg_inst.width, pg_inst.height,
+ pg_inst.eff_width, pg_inst.eff_height);
+ }
+
+ dtrace_dot("}");
+
+ /* Reset temp strings */
+ memset(dot_id_input_bin, 0, sizeof(dot_id_input_bin));
+ memset(ring_buffer, 0, sizeof(ring_buffer));
+
+ pg_inst.do_init = true;
+ pg_inst.width = 0;
+ pg_inst.height = 0;
+ pg_inst.eff_width = 0;
+ pg_inst.eff_height = 0;
+ pg_inst.stream_format = N_IA_CSS_STREAM_FORMAT;
+}
+
+void
+ia_css_debug_pipe_graph_dump_stage(
+ struct ia_css_pipeline_stage *stage,
+ enum ia_css_pipe_id id)
+{
+ char blob_name[SH_CSS_MAX_BINARY_NAME+10] = "<unknown type>";
+ char const *bin_type = "<unknown type>";
+ int i;
+
+ assert(stage != NULL);
+ if (stage->sp_func != IA_CSS_PIPELINE_NO_FUNC)
+ return;
+
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ if (stage->binary) {
+ bin_type = "binary";
+ if (stage->binary->info->blob)
+ snprintf(blob_name, sizeof(blob_name), "%s_stage%d",
+ stage->binary->info->blob->name, stage->stage_num);
+ } else if (stage->firmware) {
+ bin_type = "firmware";
+ strncpy_s(blob_name, sizeof(blob_name), IA_CSS_EXT_ISP_PROG_NAME(stage->firmware), sizeof(blob_name));
+ }
+
+ /* Guard in case of binaries that don't have any binary_info */
+ if (stage->binary_info != NULL) {
+ char enable_info1[100];
+ char enable_info2[100];
+ char enable_info3[100];
+ char enable_info[200];
+ struct ia_css_binary_info *bi = stage->binary_info;
+
+ /* Split it in 2 function-calls to keep the amount of
+ * parameters per call "reasonable"
+ */
+ snprintf(enable_info1, sizeof(enable_info1),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.reduced_pipe ? "rp," : "",
+ bi->enable.vf_veceven ? "vfve," : "",
+ bi->enable.dis ? "dis," : "",
+ bi->enable.dvs_envelope ? "dvse," : "",
+ bi->enable.uds ? "uds," : "",
+ bi->enable.dvs_6axis ? "dvs6," : "",
+ bi->enable.block_output ? "bo," : "",
+ bi->enable.ds ? "ds," : "",
+ bi->enable.bayer_fir_6db ? "bf6," : "",
+ bi->enable.raw_binning ? "rawb," : "",
+ bi->enable.continuous ? "cont," : "",
+ bi->enable.s3a ? "s3a," : "",
+ bi->enable.fpnr ? "fpnr," : "",
+ bi->enable.sc ? "sc," : ""
+ );
+
+ snprintf(enable_info2, sizeof(enable_info2),
+ "%s%s%s%s%s%s%s%s%s%s%s",
+ bi->enable.macc ? "macc," : "",
+ bi->enable.output ? "outp," : "",
+ bi->enable.ref_frame ? "reff," : "",
+ bi->enable.tnr ? "tnr," : "",
+ bi->enable.xnr ? "xnr," : "",
+ bi->enable.params ? "par," : "",
+ bi->enable.ca_gdc ? "cagdc," : "",
+ bi->enable.isp_addresses ? "ispa," : "",
+ bi->enable.in_frame ? "inf," : "",
+ bi->enable.out_frame ? "outf," : "",
+ bi->enable.high_speed ? "hs," : ""
+ );
+
+ /* And merge them into one string */
+ snprintf(enable_info, sizeof(enable_info), "%s%s",
+ enable_info1, enable_info2);
+ {
+ int l, p;
+ char *ei = enable_info;
+
+ l = strlen(ei);
+
+ /* Replace last ',' with \0 if present */
+ if (l && enable_info[l-1] == ',')
+ enable_info[--l] = '\0';
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* It fits on one line, copy string and init */
+ /* other helper strings with empty string */
+ strcpy_s(enable_info,
+ sizeof(enable_info),
+ ei);
+ } else {
+ /* Too big for one line, find last comma */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ /* Last comma found, copy till that comma */
+ strncpy_s(enable_info1,
+ sizeof(enable_info1),
+ ei, p);
+ enable_info1[p] = '\0';
+
+ ei += p+1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 2nd line fits */
+ /* we cannot use ei as argument because
+ * it is not guarenteed dword aligned
+ */
+ strncpy_s(enable_info2,
+ sizeof(enable_info2),
+ ei, l);
+ enable_info2[l] = '\0';
+ snprintf(enable_info, sizeof(enable_info), "%s\\n%s",
+ enable_info1, enable_info2);
+
+ } else {
+ /* 2nd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ strncpy_s(enable_info2,
+ sizeof(enable_info2),
+ ei, p);
+ enable_info2[p] = '\0';
+ ei += p+1;
+ l = strlen(ei);
+
+ if (l <= ENABLE_LINE_MAX_LENGTH) {
+ /* The 3rd line fits */
+ /* we cannot use ei as argument because
+ * it is not guarenteed dword aligned
+ */
+ strcpy_s(enable_info3,
+ sizeof(enable_info3), ei);
+ enable_info3[l] = '\0';
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ } else {
+ /* 3rd line is still too long */
+ p = ENABLE_LINE_MAX_LENGTH;
+ while (ei[p] != ',')
+ p--;
+ strncpy_s(enable_info3,
+ sizeof(enable_info3),
+ ei, p);
+ enable_info3[p] = '\0';
+ ei += p+1;
+ strcpy_s(enable_info3,
+ sizeof(enable_info3), ei);
+ snprintf(enable_info, sizeof(enable_info),
+ "%s\\n%s\\n%s",
+ enable_info1, enable_info2,
+ enable_info3);
+ }
+ }
+ }
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, "
+ "label=\"%s\\n%s\\n\\n%s\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, enable_info, blob_name, id);
+
+ }
+ else {
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, "
+ "label=\"%s\\n%s\\n\"]; \"%s(pipe%d)\"",
+ bin_type, blob_name, blob_name, id);
+ }
+
+ if (stage->stage_num == 0) {
+ /*
+ * There are some implicite assumptions about which bin is the
+ * input binary e.g. which one is connected to the input system
+ * Priority:
+ * 1) sp_raw_copy bin has highest priority
+ * 2) First stage==0 binary of preview, video or capture
+ */
+ if (strlen(dot_id_input_bin) == 0) {
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin),
+ "%s(pipe%d)", blob_name, id);
+ }
+ }
+
+ if (stage->args.in_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.in_frame, id, blob_name,
+ "in", true);
+ }
+
+#ifndef ISP2401
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+#else
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+#endif
+ if (stage->args.tnr_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.tnr_frames[i], id,
+ blob_name, "tnr_frame", true);
+ }
+ }
+
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) {
+ if (stage->args.delay_frames[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.delay_frames[i], id,
+ blob_name, "delay_frame", true);
+ }
+ }
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->args.out_frame[i]) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_frame[i], id, blob_name,
+ "out", false);
+ }
+ }
+
+ if (stage->args.out_vf_frame) {
+ ia_css_debug_pipe_graph_dump_frame(
+ stage->args.out_vf_frame, id, blob_name,
+ "out_vf", false);
+ }
+}
+
+void
+ia_css_debug_pipe_graph_dump_sp_raw_copy(
+ struct ia_css_frame *out_frame)
+{
+ assert(out_frame != NULL);
+ if (pg_inst.do_init) {
+ ia_css_debug_pipe_graph_dump_prologue();
+ pg_inst.do_init = false;
+ }
+
+ dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, "
+ "label=\"%s\\n%s\"]; \"%s(pipe%d)\"",
+ "sp-binary", "sp_raw_copy", "sp_raw_copy", 1);
+
+ snprintf(ring_buffer, sizeof(ring_buffer),
+ "node [shape = box, "
+ "fixedsize=true, width=2, height=0.7]; \"0x%08lx\" "
+ "[label = \"%s\\n%d(%d) x %d\\nRingbuffer\"];",
+ HOST_ADDRESS(out_frame),
+ debug_frame_format2str(out_frame->info.format),
+ out_frame->info.res.width,
+ out_frame->info.padded_width,
+ out_frame->info.res.height);
+
+ dtrace_dot(ring_buffer);
+
+ dtrace_dot(
+ "\"%s(pipe%d)\"->\"0x%08lx\" "
+ "[label = out_frame];",
+ "sp_raw_copy", 1, HOST_ADDRESS(out_frame));
+
+ snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)", "sp_raw_copy", 1);
+}
+
+void
+ia_css_debug_pipe_graph_dump_stream_config(
+ const struct ia_css_stream_config *stream_config)
+{
+ pg_inst.width = stream_config->input_config.input_res.width;
+ pg_inst.height = stream_config->input_config.input_res.height;
+ pg_inst.eff_width = stream_config->input_config.effective_res.width;
+ pg_inst.eff_height = stream_config->input_config.effective_res.height;
+ pg_inst.stream_format = stream_config->input_config.format;
+}
+
+void
+ia_css_debug_dump_resolution(
+ const struct ia_css_resolution *res,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: =%d x =%d\n",
+ label, res->width, res->height);
+}
+
+void
+ia_css_debug_dump_frame_info(
+ const struct ia_css_frame_info *info,
+ const char *label)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", label);
+ ia_css_debug_dump_resolution(&info->res, "res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "padded_width: %d\n",
+ info->padded_width);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", info->format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bit_depth: %d\n",
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bayer_order: %d\n",
+ info->raw_bayer_order);
+}
+
+void
+ia_css_debug_dump_capture_config(
+ const struct ia_css_capture_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_xnr: %d\n",
+ config->enable_xnr);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_output: %d\n",
+ config->enable_raw_output);
+}
+
+void
+ia_css_debug_dump_pipe_extra_config(
+ const struct ia_css_pipe_extra_config *extra_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__);
+ if (extra_config) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_raw_binning: %d\n",
+ extra_config->enable_raw_binning);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_yuv_ds: %d\n",
+ extra_config->enable_yuv_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_high_speed: %d\n",
+ extra_config->enable_high_speed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_dvs_6axis: %d\n",
+ extra_config->enable_dvs_6axis);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_reduced_pipe: %d\n",
+ extra_config->enable_reduced_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "enable_fractional_ds: %d\n",
+ extra_config->enable_fractional_ds);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "disable_vf_pp: %d\n",
+ extra_config->disable_vf_pp);
+ }
+}
+
+void
+ia_css_debug_dump_pipe_config(
+ const struct ia_css_pipe_config *config)
+{
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p", config);
+ if (!config) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "isp_pipe_version: %d\n",
+ config->isp_pipe_version);
+ ia_css_debug_dump_resolution(&config->bayer_ds_out_res,
+ "bayer_ds_out_res");
+ ia_css_debug_dump_resolution(&config->capt_pp_in_res,
+ "capt_pp_in_res");
+ ia_css_debug_dump_resolution(&config->vf_pp_in_res, "vf_pp_in_res");
+#ifdef ISP2401
+ ia_css_debug_dump_resolution(&config->output_system_in_res,
+ "output_system_in_res");
+#endif
+ ia_css_debug_dump_resolution(&config->dvs_crop_out_res,
+ "dvs_crop_out_res");
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ ia_css_debug_dump_frame_info(&config->output_info[i], "output_info");
+ ia_css_debug_dump_frame_info(&config->vf_output_info[i],
+ "vf_output_info");
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_extension: 0x%x\n",
+ config->acc_extension);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_acc_stages: %d\n",
+ config->num_acc_stages);
+ ia_css_debug_dump_capture_config(&config->default_capture_config);
+ ia_css_debug_dump_resolution(&config->dvs_envelope, "dvs_envelope");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "dvs_frame_delay: %d\n",
+ config->dvs_frame_delay);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_num_execs: %d\n",
+ config->acc_num_execs);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dz: %d\n",
+ config->enable_dz);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void
+ia_css_debug_dump_stream_config_source(
+ const struct ia_css_stream_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ switch (config->mode) {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.port\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "port: %d\n",
+ config->source.port.port);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_lanes: %d\n",
+ config->source.port.num_lanes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n",
+ config->source.port.timeout);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n",
+ config->source.port.compression);
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.tpg\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n",
+ config->source.tpg.id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n",
+ config->source.tpg.mode);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_mask: 0x%x\n",
+ config->source.tpg.x_mask);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_delta: %d\n",
+ config->source.tpg.x_delta);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_mask: 0x%x\n",
+ config->source.tpg.y_mask);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_delta: %d\n",
+ config->source.tpg.y_delta);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "xy_mask: 0x%x\n",
+ config->source.tpg.xy_mask);
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.prbs\n");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n",
+ config->source.prbs.id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "h_blank: %d\n",
+ config->source.prbs.h_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "v_blank: %d\n",
+ config->source.prbs.v_blank);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed: 0x%x\n",
+ config->source.prbs.seed);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed1: 0x%x\n",
+ config->source.prbs.seed1);
+ break;
+ default:
+ case IA_CSS_INPUT_MODE_FIFO:
+ case IA_CSS_INPUT_MODE_MEMORY:
+ break;
+ }
+}
+
+void
+ia_css_debug_dump_mipi_buffer_config(
+ const struct ia_css_mipi_buffer_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "size_mem_words: %d\n",
+ config->size_mem_words);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "nof_mipi_buffers: %d\n",
+ config->nof_mipi_buffers);
+}
+
+void
+ia_css_debug_dump_metadata_config(
+ const struct ia_css_metadata_config *config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "data_type: %d\n",
+ config->data_type);
+ ia_css_debug_dump_resolution(&config->resolution, "resolution");
+}
+
+void
+ia_css_debug_dump_stream_config(
+ const struct ia_css_stream_config *config,
+ int num_pipes)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_pipes: %d\n", num_pipes);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode);
+ ia_css_debug_dump_stream_config_source(config);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "channel_id: %d\n",
+ config->channel_id);
+ ia_css_debug_dump_resolution(&config->input_config.input_res, "input_res");
+ ia_css_debug_dump_resolution(&config->input_config.effective_res, "effective_res");
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n",
+ config->input_config.format);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "bayer_order: %d\n",
+ config->input_config.bayer_order);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sensor_binning_factor: %d\n",
+ config->sensor_binning_factor);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pixels_per_clock: %d\n",
+ config->pixels_per_clock);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "online: %d\n",
+ config->online);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "init_num_cont_raw_buf: %d\n",
+ config->init_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "target_num_cont_raw_buf: %d\n",
+ config->target_num_cont_raw_buf);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pack_raw_pixels: %d\n",
+ config->pack_raw_pixels);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "continuous: %d\n",
+ config->continuous);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "flash_gpio_pin: %d\n",
+ config->flash_gpio_pin);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "left_padding: %d\n",
+ config->left_padding);
+ ia_css_debug_dump_mipi_buffer_config(&config->mipi_buffer_config);
+ ia_css_debug_dump_metadata_config(&config->metadata_config);
+}
+
+/*
+ Trace support.
+
+ This tracer is using a buffer to trace the flow of the FW and dump misc values (see below for details).
+ Currently, support is only for SKC.
+ To enable support for other platforms:
+ - Allocate a buffer for tracing in DMEM. The longer the better.
+ - Use the DBG_init routine in sp.hive.c to initiatilize the tracer with the address and size selected.
+ - Add trace points in the SP code wherever needed.
+ - Enable the dump below with the required address and required adjustments.
+ Dump is called at the end of ia_css_debug_dump_sp_state().
+*/
+
+/*
+ dump_trace() : dump the trace points from DMEM2.
+ for every trace point, the following are printed: index, major:minor and the 16-bit attached value.
+ The routine looks for the first 0, and then prints from it cyclically.
+ Data forma in DMEM2:
+ first 4 DWORDS: header
+ DWORD 0: data description
+ byte 0: version
+ byte 1: number of threads (for future use)
+ byte 2+3: number ot TPs
+ DWORD 1: command byte + data (for future use)
+ byte 0: command
+ byte 1-3: command signature
+ DWORD 2-3: additional data (for future use)
+ Following data is 4-byte oriented:
+ byte 0: major
+ byte 1: minor
+ byte 2-3: data
+*/
+#if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP
+#ifndef ISP2401
+static void debug_dump_one_trace(TRACE_CORE_ID proc_id)
+#else
+static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id)
+#endif
+{
+#if defined(HAS_TRACER_V2)
+ uint32_t start_addr;
+ uint32_t start_addr_data;
+ uint32_t item_size;
+#ifndef ISP2401
+ uint32_t tmp;
+#else
+ uint8_t tid_val;
+ enum TRACE_DUMP_FORMAT dump_format;
+#endif
+ int i, j, max_trace_points, point_num, limit = -1;
+ /* using a static buffer here as the driver has issues allocating memory */
+ static uint32_t trace_read_buf[TRACE_BUFF_SIZE] = {0};
+#ifdef ISP2401
+ static struct trace_header_t header;
+ uint8_t *header_arr;
+#endif
+
+ /* read the header and parse it */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "~~~ Tracer ");
+ switch (proc_id)
+ {
+ case TRACE_SP0_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP0");
+ start_addr = TRACE_SP0_ADDR;
+ start_addr_data = TRACE_SP0_DATA_ADDR;
+ item_size = TRACE_SP0_ITEM_SIZE;
+ max_trace_points = TRACE_SP0_MAX_POINTS;
+ break;
+ case TRACE_SP1_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP1");
+ start_addr = TRACE_SP1_ADDR;
+ start_addr_data = TRACE_SP1_DATA_ADDR;
+ item_size = TRACE_SP1_ITEM_SIZE;
+ max_trace_points = TRACE_SP1_MAX_POINTS;
+ break;
+ case TRACE_ISP_ID:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ISP");
+ start_addr = TRACE_ISP_ADDR;
+ start_addr_data = TRACE_ISP_DATA_ADDR;
+ item_size = TRACE_ISP_ITEM_SIZE;
+ max_trace_points = TRACE_ISP_MAX_POINTS;
+ break;
+ default:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\ttraces are not supported for this processor ID - exiting\n");
+ return;
+ }
+#ifndef ISP2401
+ tmp = ia_css_device_load_uint32(start_addr);
+ point_num = (tmp >> 16) & 0xFFFF;
+#endif
+
+#ifndef ISP2401
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", tmp & 0xFF, point_num);
+ if ((tmp & 0xFF) != TRACER_VER) {
+#else
+ /* Loading byte-by-byte as using the master routine had issues */
+ header_arr = (uint8_t *)&header;
+ for (i = 0; i < (int)sizeof(struct trace_header_t); i++)
+ header_arr[i] = ia_css_device_load_uint8(start_addr + (i));
+
+ point_num = header.max_tracer_points;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", header.version, point_num);
+ if ((header.version & 0xFF) != TRACER_VER) {
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tUnknown version - exiting\n");
+ return;
+ }
+ if (point_num > max_trace_points) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tToo many points - exiting\n");
+ return;
+ }
+ /* copy the TPs and find the first 0 */
+ for (i = 0; i < point_num; i++) {
+ trace_read_buf[i] = ia_css_device_load_uint32(start_addr_data + (i * item_size));
+ if ((limit == (-1)) && (trace_read_buf[i] == 0))
+ limit = i;
+ }
+#ifdef ISP2401
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Status:\n");
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\tT%d: %3d (%02x) %6d (%04x) %10d (%08x)\n", i,
+ header.thr_status_byte[i], header.thr_status_byte[i],
+ header.thr_status_word[i], header.thr_status_word[i],
+ header.thr_status_dword[i], header.thr_status_dword[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Scratch:\n");
+ for (i = 0; i < MAX_SCRATCH_DATA; i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%10d (%08x) ",
+ header.scratch_debug[i], header.scratch_debug[i]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\n");
+
+#endif
+ /* two 0s in the beginning: empty buffer */
+ if ((trace_read_buf[0] == 0) && (trace_read_buf[1] == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tEmpty tracer - exiting\n");
+ return;
+ }
+ /* no overrun: start from 0 */
+ if ((limit == point_num-1) || /* first 0 is at the end - border case */
+ (trace_read_buf[limit+1] == 0)) /* did not make a full cycle after the memset */
+ limit = 0;
+ /* overrun: limit is the first non-zero after the first zero */
+ else
+ limit++;
+
+ /* print the TPs */
+ for (i = 0; i < point_num; i++) {
+ j = (limit + i) % point_num;
+ if (trace_read_buf[j])
+ {
+#ifndef ISP2401
+ TRACE_DUMP_FORMAT dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+#else
+
+ tid_val = FIELD_TID_UNPACK(trace_read_buf[j]);
+ dump_format = TRACE_DUMP_FORMAT_POINT;
+
+ /*
+ * When tid value is 111b, the data will be interpreted differently:
+ * tid val is ignored, major field contains 2 bits (msb) for format type
+ */
+ if (tid_val == FIELD_TID_SEL_FORMAT_PAT) {
+ dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]);
+ }
+#endif
+ switch (dump_format)
+ {
+ case TRACE_DUMP_FORMAT_POINT:
+ ia_css_debug_dtrace(
+#ifndef ISP2401
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %d\n",
+ j, FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+#else
+ IA_CSS_DEBUG_TRACE, "\t\t%d T%d %d:%d value - %x (%d)\n",
+ j,
+ tid_val,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+#endif
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+#ifdef ISP2401
+ FIELD_VALUE_UNPACK(trace_read_buf[j]),
+#endif
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+ break;
+#ifndef ISP2401
+ case TRACE_DUMP_FORMAT_VALUE24_HEX:
+#else
+ case TRACE_DUMP_FORMAT_POINT_NO_TID:
+#endif
+ ia_css_debug_dtrace(
+#ifndef ISP2401
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x H\n",
+#else
+ IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %x (%d)\n",
+#endif
+ j,
+#ifndef ISP2401
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+#else
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_MINOR_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_UNPACK(trace_read_buf[j]));
+#endif
+ break;
+#ifndef ISP2401
+ case TRACE_DUMP_FORMAT_VALUE24_DEC:
+#else
+ case TRACE_DUMP_FORMAT_VALUE24:
+#endif
+ ia_css_debug_dtrace(
+#ifndef ISP2401
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %d D\n",
+#else
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x (%d)\n",
+#endif
+ j,
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+#ifdef ISP2401
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]),
+#endif
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+#ifdef ISP2401
+
+#endif
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing %x\n",
+ j,
+#ifndef ISP2401
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+#else
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+#endif
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ case TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing delta %x\n",
+ j,
+#ifndef ISP2401
+ FIELD_MAJOR_UNPACK(trace_read_buf[j]),
+#else
+ FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]),
+#endif
+ FIELD_VALUE_24_UNPACK(trace_read_buf[j]));
+ break;
+ default:
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "no such trace dump format %d",
+#ifndef ISP2401
+ FIELD_FORMAT_UNPACK(trace_read_buf[j]));
+#else
+ dump_format);
+#endif
+ break;
+ }
+ }
+ }
+#else
+ (void)proc_id;
+#endif /* HAS_TRACER_V2 */
+}
+#endif /* TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP */
+
+void ia_css_debug_dump_trace(void)
+{
+#if TRACE_ENABLE_SP0
+ debug_dump_one_trace(TRACE_SP0_ID);
+#endif
+#if TRACE_ENABLE_SP1
+ debug_dump_one_trace(TRACE_SP1_ID);
+#endif
+#if TRACE_ENABLE_ISP
+ debug_dump_one_trace(TRACE_ISP_ID);
+#endif
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/* Tagger state dump function. The tagger is only available when the CSS
+ * contains an input system (2400 or 2401). */
+void ia_css_debug_tagger_state(void)
+{
+ unsigned int i;
+ unsigned int HIVE_ADDR_tagger_frames;
+ ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER];
+
+ HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr;
+
+ /* This variable is not used in crun */
+ (void)HIVE_ADDR_tagger_frames;
+
+ /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(tagger_frames),
+ tbuf_frames,
+ sizeof(tbuf_frames));
+
+ ia_css_debug_dtrace(2, "Tagger Info:\n");
+ for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) {
+ ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n",
+ i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock);
+ }
+
+}
+#endif /* defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+#ifdef ISP2401
+void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
+{
+ unsigned int pc;
+ unsigned int i;
+ hrt_data sc = sp_ctrl_load(id, SP_SC_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Status reg: 0x%X\n", id, sc);
+ sc = sp_ctrl_load(id, SP_CTRL_SINK_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Stall reg: 0x%X\n", id, sc);
+ for (i = 0; i < num_of_dumps; i++) {
+ pc = sp_ctrl_load(id, SP_PC_REG);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d PC: 0x%X\n", id, pc);
+ }
+}
+#endif
+
+#if defined(HRT_SCHED) || defined(SH_CSS_DEBUG_SPMEM_DUMP_SUPPORT)
+#include "spmem_dump.c"
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h
new file mode 100644
index 000000000000..ab1d9bed9fd8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/interface/ia_css_event.h
@@ -0,0 +1,46 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_EVENT_H
+#define _IA_CSS_EVENT_H
+
+#include <type_support.h>
+#include "sw_event_global.h" /*event macros.TODO : Change File Name..???*/
+
+bool ia_css_event_encode(
+ uint8_t *in,
+ uint8_t nr,
+ uint32_t *out);
+
+void ia_css_event_decode(
+ uint32_t event,
+ uint8_t *payload);
+
+#endif /*_IA_CSS_EVENT_H*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
new file mode 100644
index 000000000000..2698c3e1adb0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/event/src/event.c
@@ -0,0 +1,126 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "sh_css_sp.h"
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include <type_support.h>
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_internal.h"
+#include "sh_css_legacy.h"
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+#include "ia_css_queue.h" /* host_sp_enqueue_XXX */
+#include "ia_css_event.h" /* ia_css_event_encode */
+/**
+ * @brief Encode the information into the software-event.
+ * Refer to "sw_event_public.h" for details.
+ */
+bool ia_css_event_encode(
+ uint8_t *in,
+ uint8_t nr,
+ uint32_t *out)
+{
+ bool ret;
+ uint32_t nr_of_bits;
+ uint32_t i;
+ assert(in != NULL);
+ assert(out != NULL);
+ OP___assert(nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ /* initialize the output */
+ *out = 0;
+
+ /* get the number of bits per information */
+ nr_of_bits = sizeof(uint32_t) * 8 / nr;
+
+ /* compress the all inputs into a signle output */
+ for (i = 0; i < nr; i++) {
+ *out <<= nr_of_bits;
+ *out |= in[i];
+ }
+
+ /* get the return value */
+ ret = (nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT);
+
+ return ret;
+}
+
+void ia_css_event_decode(
+ uint32_t event,
+ uint8_t *payload)
+{
+ assert(payload[1] == 0);
+ assert(payload[2] == 0);
+ assert(payload[3] == 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_event_decode() enter:\n");
+
+ /* First decode according to the common case
+ * In case of a PORT_EOF event we overwrite with
+ * the specific values
+ * This is somewhat ugly but probably somewhat efficient
+ * (and it avoids some code duplication)
+ */
+ payload[0] = event & 0xff; /*event_code */
+ payload[1] = (event >> 8) & 0xff;
+ payload[2] = (event >> 16) & 0xff;
+ payload[3] = 0;
+
+ switch (payload[0]) {
+ case SH_CSS_SP_EVENT_PORT_EOF:
+ payload[2] = 0;
+ payload[3] = (event >> 24) & 0xff;
+ break;
+
+ case SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE:
+ case SH_CSS_SP_EVENT_TIMER:
+ case SH_CSS_SP_EVENT_FRAME_TAGGED:
+ case SH_CSS_SP_EVENT_FW_WARNING:
+ case SH_CSS_SP_EVENT_FW_ASSERT:
+ payload[3] = (event >> 24) & 0xff;
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h
new file mode 100644
index 000000000000..67eb8fdb33c5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/interface/ia_css_eventq.h
@@ -0,0 +1,69 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_EVENTQ_H
+#define _IA_CSS_EVENTQ_H
+
+#include "ia_css_queue.h" /* queue APIs */
+
+/**
+ * @brief HOST receives event from SP.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] payload The event payload.
+ * @return 0 - Successfully dequeue.
+ * @return EINVAL - Invalid argument.
+ * @return ENODATA - Queue is empty.
+ */
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload);
+
+/**
+ * @brief The Host sends the event to SP.
+ * The caller of this API will be blocked until the event
+ * is sent.
+ *
+ * @param[in] eventq_handle eventq_handle.
+ * @param[in] evt_id The event ID.
+ * @param[in] evt_payload_0 The event payload.
+ * @param[in] evt_payload_1 The event payload.
+ * @param[in] evt_payload_2 The event payload.
+ * @return 0 - Successfully enqueue.
+ * @return EINVAL - Invalid argument.
+ * @return ENOBUFS - Queue is full.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ uint8_t evt_id,
+ uint8_t evt_payload_0,
+ uint8_t evt_payload_1,
+ uint8_t evt_payload_2);
+#endif /* _IA_CSS_EVENTQ_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
new file mode 100644
index 000000000000..56d6858890ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/eventq/src/eventq.c
@@ -0,0 +1,77 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_types.h"
+#include "assert_support.h"
+#include "ia_css_queue.h" /* sp2host_dequeue_irq_event() */
+#include "ia_css_eventq.h"
+#include "ia_css_event.h" /* ia_css_event_encode()
+ ia_css_event_decode()
+ */
+#include "platform_support.h" /* hrt_sleep() */
+
+int ia_css_eventq_recv(
+ ia_css_queue_t *eventq_handle,
+ uint8_t *payload)
+{
+ uint32_t sp_event;
+ int error;
+
+ /* dequeue the IRQ event */
+ error = ia_css_queue_dequeue(eventq_handle, &sp_event);
+
+ /* check whether the IRQ event is available or not */
+ if (!error)
+ ia_css_event_decode(sp_event, payload);
+ return error;
+}
+
+/**
+ * @brief The Host sends the event to the SP.
+ * Refer to "sh_css_sp.h" for details.
+ */
+int ia_css_eventq_send(
+ ia_css_queue_t *eventq_handle,
+ uint8_t evt_id,
+ uint8_t evt_payload_0,
+ uint8_t evt_payload_1,
+ uint8_t evt_payload_2)
+{
+ uint8_t tmp[4];
+ uint32_t sw_event;
+ int error = ENOSYS;
+
+ /*
+ * Encode the queue type, the thread ID and
+ * the queue ID into the event.
+ */
+ tmp[0] = evt_id;
+ tmp[1] = evt_payload_0;
+ tmp[2] = evt_payload_1;
+ tmp[3] = evt_payload_2;
+ ia_css_event_encode(tmp, 4, &sw_event);
+
+ /* queue the software event (busy-waiting) */
+ for ( ; ; ) {
+ error = ia_css_queue_enqueue(eventq_handle, sw_event);
+ if (ENOBUFS != error) {
+ /* We were able to successfully send the event
+ or had a real failure. return the status*/
+ break;
+ }
+ /* Wait for the queue to be not full and try again*/
+ hrt_sleep();
+ }
+ return error;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
new file mode 100644
index 000000000000..c7e07b79f4e5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame.h
@@ -0,0 +1,180 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_FRAME_H__
+#define __IA_CSS_FRAME_H__
+
+#ifdef ISP2401
+#include <ia_css_types.h>
+#endif
+#include <ia_css_frame_format.h>
+#include <ia_css_frame_public.h>
+#include "dma.h"
+
+/*********************************************************************
+**** Frame INFO APIs
+**********************************************************************/
+/** @brief Sets the given width and alignment to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width);
+
+/** @brief Sets the given format to the frame info
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] format The format to be set to info
+ * @return
+ */
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format);
+
+/** @brief Sets the frame info with the given parameters
+ *
+ * @param
+ * @param[in] info The info to which parameters would set
+ * @param[in] width The width to be set to info
+ * @param[in] height The height to be set to info
+ * @param[in] format The format to be set to info
+ * @param[in] aligned The aligned to be set to info
+ * @return
+ */
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned);
+
+/** @brief Checks whether 2 frame infos has the same resolution
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+/** @brief Check the frame info is valid
+ *
+ * @param
+ * @param[in] info The frame attributes to be initialized
+ * @return The error code.
+ */
+enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info);
+
+/*********************************************************************
+**** Frame APIs
+**********************************************************************/
+
+/** @brief Initialize the plane depending on the frame type
+ *
+ * @param
+ * @param[in] frame The frame attributes to be initialized
+ * @return The error code.
+ */
+enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame);
+
+/** @brief Free an array of frames
+ *
+ * @param
+ * @param[in] num_frames The number of frames to be freed in the array
+ * @param[in] **frames_array The array of frames to be removed
+ * @return
+ */
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array);
+
+/** @brief Allocate a CSS frame structure of given size in bytes..
+ *
+ * @param frame The allocated frame.
+ * @param[in] size_bytes The frame size in bytes.
+ * @param[in] contiguous Allocate memory physically contiguously or not.
+ * @return The error code.
+ *
+ * Allocate a frame using the given size in bytes.
+ * The frame structure is partially null initialized.
+ */
+enum ia_css_err ia_css_frame_allocate_with_buffer_size(
+ struct ia_css_frame **frame,
+ const unsigned int size_bytes,
+ const bool contiguous);
+
+/** @brief Check whether 2 frames are same type
+ *
+ * @param
+ * @param[in] frame_a The first frame to be compared
+ * @param[in] frame_b The second frame to be compared
+ * @return Returns true if the frames are equal
+ */
+bool ia_css_frame_is_same_type(
+ const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+/** @brief Configure a dma port from frame info
+ *
+ * @param
+ * @param[in] config The DAM port configuration
+ * @param[in] info The frame info
+ * @return
+ */
+void ia_css_dma_configure_from_info(
+ struct dma_port_config *config,
+ const struct ia_css_frame_info *info);
+
+#ifdef ISP2401
+/** @brief Finds the cropping resolution
+ * This function finds the maximum cropping resolution in an input image keeping
+ * the aspect ratio for the given output resolution.Calculates the coordinates
+ * for cropping from the center and returns the starting pixel location of the
+ * region in the input image. Also returns the dimension of the cropping
+ * resolution.
+ *
+ * @param
+ * @param[in] in_res Resolution of input image
+ * @param[in] out_res Resolution of output image
+ * @param[out] crop_res Crop resolution of input image
+ * @return Returns IA_CSS_SUCCESS or IA_CSS_ERR_INVALID_ARGUMENTS on error
+ */
+enum ia_css_err
+ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
+ const struct ia_css_resolution *out_res,
+ struct ia_css_resolution *crop_res);
+
+#endif
+#endif /* __IA_CSS_FRAME_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h
new file mode 100644
index 000000000000..a469e0afb2b5
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/interface/ia_css_frame_comm.h
@@ -0,0 +1,132 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_FRAME_COMM_H__
+#define __IA_CSS_FRAME_COMM_H__
+
+#include "type_support.h"
+#include "platform_support.h"
+#include "runtime/bufq/interface/ia_css_bufq_comm.h"
+#include <system_types.h> /* hrt_vaddress */
+
+/*
+ * These structs are derived from structs defined in ia_css_types.h
+ * (just take out the "_sp" from the struct name to get the "original")
+ * All the fields that are not needed by the SP are removed.
+ */
+struct ia_css_frame_sp_plane {
+ unsigned int offset; /* offset in bytes to start of frame data */
+ /* offset is wrt data in sh_css_sp_sp_frame */
+};
+
+struct ia_css_frame_sp_binary_plane {
+ unsigned int size;
+ struct ia_css_frame_sp_plane data;
+};
+
+struct ia_css_frame_sp_yuv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane u;
+ struct ia_css_frame_sp_plane v;
+};
+
+struct ia_css_frame_sp_nv_planes {
+ struct ia_css_frame_sp_plane y;
+ struct ia_css_frame_sp_plane uv;
+};
+
+struct ia_css_frame_sp_rgb_planes {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane g;
+ struct ia_css_frame_sp_plane b;
+};
+
+struct ia_css_frame_sp_plane6 {
+ struct ia_css_frame_sp_plane r;
+ struct ia_css_frame_sp_plane r_at_b;
+ struct ia_css_frame_sp_plane gr;
+ struct ia_css_frame_sp_plane gb;
+ struct ia_css_frame_sp_plane b;
+ struct ia_css_frame_sp_plane b_at_r;
+};
+
+struct ia_css_sp_resolution {
+ uint16_t width; /* width of valid data in pixels */
+ uint16_t height; /* Height of valid data in lines */
+};
+
+/*
+ * Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct ia_css_frame_sp_info {
+ struct ia_css_sp_resolution res;
+ uint16_t padded_width; /* stride of line in memory
+ (in pixels) */
+ unsigned char format; /* format of the frame data */
+ unsigned char raw_bit_depth; /* number of valid bits per pixel,
+ only valid for RAW bayer frames */
+ unsigned char raw_bayer_order; /* bayer order, only valid
+ for RAW bayer frames */
+ unsigned char padding[3]; /* Extend to 32 bit multiple */
+};
+
+struct ia_css_buffer_sp {
+ union {
+ hrt_vaddress xmem_addr;
+ enum sh_css_queue_id queue_id;
+ } buf_src;
+ enum ia_css_buffer_type buf_type;
+};
+
+struct ia_css_frame_sp {
+ struct ia_css_frame_sp_info info;
+ struct ia_css_buffer_sp buf_attr;
+ union {
+ struct ia_css_frame_sp_plane raw;
+ struct ia_css_frame_sp_plane rgb;
+ struct ia_css_frame_sp_rgb_planes planar_rgb;
+ struct ia_css_frame_sp_plane yuyv;
+ struct ia_css_frame_sp_yuv_planes yuv;
+ struct ia_css_frame_sp_nv_planes nv;
+ struct ia_css_frame_sp_plane6 plane6;
+ struct ia_css_frame_sp_binary_plane binary;
+ } planes;
+};
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *sp_info,
+ const struct ia_css_frame_info *info);
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *sp_info,
+ const struct ia_css_resolution *info);
+
+#endif /*__IA_CSS_FRAME_COMM_H__*/
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
new file mode 100644
index 000000000000..f1a943cf04c0
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/frame/src/frame.c
@@ -0,0 +1,1026 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "ia_css_frame.h"
+#include <math_support.h>
+#include "assert_support.h"
+#include "ia_css_debug.h"
+#include "isp.h"
+#include "sh_css_internal.h"
+#include "memory_access.h"
+
+
+#define NV12_TILEY_TILE_WIDTH 128
+#define NV12_TILEY_TILE_HEIGHT 32
+
+/**************************************************************************
+** Static functions declarations
+**************************************************************************/
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset);
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel);
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel);
+
+static void frame_init_mipi_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel);
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element);
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element);
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element);
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame);
+
+static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame);
+
+static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous);
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous,
+ bool valid);
+
+static unsigned
+ia_css_elems_bytes_from_info(
+ const struct ia_css_frame_info *info);
+
+/**************************************************************************
+** CSS API functions, exposed by ia_css.h
+**************************************************************************/
+
+void ia_css_frame_zero(struct ia_css_frame *frame)
+{
+ assert(frame != NULL);
+ mmgr_clear(frame->data, frame->data_bytes);
+}
+
+enum ia_css_err ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ if (frame == NULL || info == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() enter:\n");
+ err =
+ ia_css_frame_allocate(frame, info->res.width, info->res.height,
+ info->format, info->padded_width,
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_from_info() leave:\n");
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (frame == NULL || width == 0 || height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+#ifndef ISP2401
+ "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d\n",
+ width, height, format);
+#else
+ "ia_css_frame_allocate() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
+ width, height, format, padded_width, raw_bit_depth);
+#endif
+
+ err = frame_allocate_with_data(frame, width, height, format,
+ padded_width, raw_bit_depth, false);
+
+#ifndef ISP2401
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p\n", *frame);
+#else
+ if ((*frame != NULL) && err == IA_CSS_SUCCESS)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", *frame, (*frame)->data);
+ else
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n",
+ (void *)-1, (unsigned int)-1);
+#endif
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_map(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info,
+ const void *data,
+ uint16_t attribute,
+ void *context)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *me;
+ assert(frame != NULL);
+
+ /* Create the frame structure */
+ err = ia_css_frame_create_from_info(&me, info);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (err == IA_CSS_SUCCESS) {
+ /* use mmgr_mmap to map */
+ me->data = (ia_css_ptr) mmgr_mmap(data,
+ me->data_bytes,
+ attribute, context);
+ if (me->data == mmgr_NULL)
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ };
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+#ifndef ISP2401
+ return err;
+#else
+ me = NULL;
+#endif
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_create_from_info(struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *me;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() enter:\n");
+ if (frame == NULL || info == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() leave:"
+ " invalid arguments\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = frame_create(info->res.width,
+ info->res.height,
+ info->format,
+ info->padded_width,
+ info->raw_bit_depth,
+ false,
+ false);
+ if (me == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_create_from_info() leave:"
+ " frame create failed\n");
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ err = ia_css_frame_init_planes(me);
+
+#ifndef ISP2401
+ if (err == IA_CSS_SUCCESS)
+ *frame = me;
+ else
+#else
+ if (err != IA_CSS_SUCCESS) {
+#endif
+ sh_css_free(me);
+#ifdef ISP2401
+ me = NULL;
+ }
+
+ *frame = me;
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_create_from_info() leave:\n");
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_set_data(struct ia_css_frame *frame,
+ const ia_css_ptr mapped_data,
+ size_t data_bytes)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() enter:\n");
+ if (frame == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() leave: NULL frame\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* If we are setting a valid data.
+ * Make sure that there is enough
+ * room for the expected frame format
+ */
+ if ((mapped_data != mmgr_NULL) && (frame->data_bytes > data_bytes)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_set_data() leave: invalid arguments\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ frame->data = mapped_data;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_set_data() leave:\n");
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate_contiguous(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous() "
+#ifndef ISP2401
+ "enter: width=%d, height=%d, format=%d\n",
+ width, height, format);
+#else
+ "enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
+ width, height, format, padded_width, raw_bit_depth);
+#endif
+
+ err = frame_allocate_with_data(frame, width, height, format,
+ padded_width, raw_bit_depth, true);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous() leave: frame=%p\n",
+ frame ? *frame : (void *)-1);
+
+ return err;
+}
+
+enum ia_css_err ia_css_frame_allocate_contiguous_from_info(
+ struct ia_css_frame **frame,
+ const struct ia_css_frame_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ assert(frame != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous_from_info() enter:\n");
+ err = ia_css_frame_allocate_contiguous(frame,
+ info->res.width,
+ info->res.height,
+ info->format,
+ info->padded_width,
+ info->raw_bit_depth);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_allocate_contiguous_from_info() leave:\n");
+ return err;
+}
+
+void ia_css_frame_free(struct ia_css_frame *frame)
+{
+ IA_CSS_ENTER_PRIVATE("frame = %p", frame);
+
+ if (frame != NULL) {
+ hmm_free(frame->data);
+ sh_css_free(frame);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/**************************************************************************
+** Module public functions
+**************************************************************************/
+
+enum ia_css_err ia_css_frame_check_info(const struct ia_css_frame_info *info)
+{
+ assert(info != NULL);
+ if (info->res.width == 0 || info->res.height == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_frame_init_planes(struct ia_css_frame *frame)
+{
+ assert(frame != NULL);
+
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_MIPI:
+ frame_init_mipi_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth <= 8 ? 1 : 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ frame_init_raw_single_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth);
+ break;
+ case IA_CSS_FRAME_FORMAT_RAW:
+ frame_init_single_plane(frame, &frame->planes.raw,
+ frame->info.res.height,
+ frame->info.padded_width,
+ frame->info.raw_bit_depth <= 8 ? 1 : 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->info.res.height,
+ frame->info.padded_width, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ frame_init_single_plane(frame, &frame->planes.rgb,
+ frame->info.res.height,
+ frame->info.padded_width * 4, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ frame_init_rgb_planes(frame, 1);
+ break;
+ /* yuyv and uyvu have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->info.res.height,
+ frame->info.padded_width * 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ /* Needs 3 extra lines to allow vf_pp prefetching */
+ frame_init_single_plane(frame, &frame->planes.yuyv,
+ frame->info.res.height * 3 / 2 + 3,
+ frame->info.padded_width, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ frame_init_nv_planes(frame, 4, 1, 1);
+ break;
+ /* nv12 and nv21 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ frame_init_nv_planes(frame, 2, 2, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ frame_init_nv_planes(frame, 2, 2, 2);
+ break;
+ /* nv16 and nv61 have the same frame layout, only the data
+ * positioning differs.
+ */
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ frame_init_nv_planes(frame, 2, 1, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ frame_init_yuv_planes(frame, 2, 2, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ frame_init_yuv_planes(frame, 2, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ frame_init_yuv_planes(frame, 1, 1, false, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ frame_init_yuv_planes(frame, 2, 2, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ frame_init_yuv_planes(frame, 2, 1, false, 2);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV12:
+ frame_init_yuv_planes(frame, 2, 2, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_YV16:
+ frame_init_yuv_planes(frame, 2, 1, true, 1);
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ frame_init_qplane6_planes(frame);
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ frame_init_single_plane(frame, &frame->planes.binary.data,
+ frame->info.res.height,
+ frame->info.padded_width, 1);
+ frame->planes.binary.size = 0;
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int min_padded_width)
+{
+ unsigned int align;
+
+ IA_CSS_ENTER_PRIVATE("info = %p,width = %d, minimum padded width = %d",
+ info, width, min_padded_width);
+ if (info == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ if (min_padded_width > width)
+ align = min_padded_width;
+ else
+ align = width;
+
+ info->res.width = width;
+ /* frames with a U and V plane of 8 bits per pixel need to have
+ all planes aligned, this means double the alignment for the
+ Y plane if the horizontal decimation is 2. */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV420 ||
+ info->format == IA_CSS_FRAME_FORMAT_YV12 ||
+ info->format == IA_CSS_FRAME_FORMAT_NV12 ||
+ info->format == IA_CSS_FRAME_FORMAT_NV21 ||
+ info->format == IA_CSS_FRAME_FORMAT_BINARY_8 ||
+ info->format == IA_CSS_FRAME_FORMAT_YUV_LINE)
+ info->padded_width =
+ CEIL_MUL(align, 2 * HIVE_ISP_DDR_WORD_BYTES);
+ else if (info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY)
+ info->padded_width = CEIL_MUL(align, NV12_TILEY_TILE_WIDTH);
+ else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)
+ info->padded_width = CEIL_MUL(align, 2 * ISP_VEC_NELEMS);
+ else {
+ info->padded_width = CEIL_MUL(align, HIVE_ISP_DDR_WORD_BYTES);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
+ enum ia_css_frame_format format)
+{
+ assert(info != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_info_set_format() enter:\n");
+ info->format = format;
+}
+
+void ia_css_frame_info_init(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int aligned)
+{
+ IA_CSS_ENTER_PRIVATE("info = %p, width = %d, height = %d, format = %d, aligned = %d",
+ info, width, height, format, aligned);
+ if (info == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ info->res.height = height;
+ info->format = format;
+ ia_css_frame_info_set_width(info, width, aligned);
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+void ia_css_frame_free_multiple(unsigned int num_frames,
+ struct ia_css_frame **frames_array)
+{
+ unsigned int i;
+ for (i = 0; i < num_frames; i++) {
+ if (frames_array[i]) {
+ ia_css_frame_free(frames_array[i]);
+ frames_array[i] = NULL;
+ }
+ }
+}
+
+enum ia_css_err ia_css_frame_allocate_with_buffer_size(
+ struct ia_css_frame **frame,
+ const unsigned int buffer_size_bytes,
+ const bool contiguous)
+{
+ /* AM: Body coppied from frame_allocate_with_data(). */
+ enum ia_css_err err;
+ struct ia_css_frame *me = frame_create(0, 0,
+ IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */
+ 0, 0, contiguous, false);
+
+ if (me == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ /* Get the data size */
+ me->data_bytes = buffer_size_bytes;
+
+ err = frame_allocate_buffer_data(me);
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+#ifndef ISP2401
+ return err;
+#else
+ me = NULL;
+#endif
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+bool ia_css_frame_info_is_same_resolution(
+ const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b)
+{
+ if (!info_a || !info_b)
+ return false;
+ return (info_a->res.width == info_b->res.width) &&
+ (info_a->res.height == info_b->res.height);
+}
+
+bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b)
+{
+ bool is_equal = false;
+ const struct ia_css_frame_info *info_a = &frame_a->info,
+ *info_b = &frame_b->info;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() enter:\n");
+
+ if (!info_a || !info_b)
+ return false;
+ if (info_a->format != info_b->format)
+ return false;
+ if (info_a->padded_width != info_b->padded_width)
+ return false;
+ is_equal = ia_css_frame_info_is_same_resolution(info_a, info_b);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_frame_is_same_type() leave:\n");
+
+ return is_equal;
+}
+
+void
+ia_css_dma_configure_from_info(
+ struct dma_port_config *config,
+ const struct ia_css_frame_info *info)
+{
+ unsigned is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ unsigned bits_per_pixel = is_raw_packed ? info->raw_bit_depth : ia_css_elems_bytes_from_info(info)*8;
+ unsigned pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ unsigned words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword);
+ unsigned elems_b = pix_per_ddrword;
+
+ config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+ config->elems = (uint8_t)elems_b;
+ config->width = (uint16_t)info->res.width;
+ config->crop = 0;
+ assert(config->width <= info->padded_width);
+}
+
+/**************************************************************************
+** Static functions
+**************************************************************************/
+
+static void frame_init_plane(struct ia_css_frame_plane *plane,
+ unsigned int width,
+ unsigned int stride,
+ unsigned int height,
+ unsigned int offset)
+{
+ plane->height = height;
+ plane->width = width;
+ plane->stride = stride;
+ plane->offset = offset;
+}
+
+static void frame_init_single_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel)
+{
+ unsigned int stride;
+
+ stride = subpixels_per_line * bytes_per_pixel;
+ /* Frame height needs to be even number - needed by hw ISYS2401
+ In case of odd number, round up to even.
+ Images won't be impacted by this round up,
+ only needed by jpeg/embedded data.
+ As long as buffer allocation and release are using data_bytes,
+ there won't be memory leak. */
+ frame->data_bytes = stride * CEIL_MUL2(height, 2);
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_raw_single_plane(
+ struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bits_per_pixel)
+{
+ unsigned int stride;
+ assert(frame != NULL);
+
+ stride = HIVE_ISP_DDR_WORD_BYTES *
+ CEIL_DIV(subpixels_per_line,
+ HIVE_ISP_DDR_WORD_BITS / bits_per_pixel);
+ frame->data_bytes = stride * height;
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_mipi_plane(struct ia_css_frame *frame,
+ struct ia_css_frame_plane *plane,
+ unsigned int height,
+ unsigned int subpixels_per_line,
+ unsigned int bytes_per_pixel)
+{
+ unsigned int stride;
+
+ stride = subpixels_per_line * bytes_per_pixel;
+ frame->data_bytes = 8388608; /* 8*1024*1024 */
+ frame->valid = false;
+ frame->contiguous = true;
+ frame_init_plane(plane, subpixels_per_line, stride, height, 0);
+ return;
+}
+
+static void frame_init_nv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->info.padded_width;
+ unsigned int y_height = frame->info.res.height;
+ unsigned int uv_width;
+ unsigned int uv_height;
+ unsigned int y_bytes;
+ unsigned int uv_bytes;
+ unsigned int y_stride;
+ unsigned int uv_stride;
+
+ assert(horizontal_decimation != 0 && vertical_decimation != 0);
+
+ uv_width = 2 * (y_width / horizontal_decimation);
+ uv_height = y_height / vertical_decimation;
+
+ if (IA_CSS_FRAME_FORMAT_NV12_TILEY == frame->info.format) {
+ y_width = CEIL_MUL(y_width, NV12_TILEY_TILE_WIDTH);
+ uv_width = CEIL_MUL(uv_width, NV12_TILEY_TILE_WIDTH);
+ y_height = CEIL_MUL(y_height, NV12_TILEY_TILE_HEIGHT);
+ uv_height = CEIL_MUL(uv_height, NV12_TILEY_TILE_HEIGHT);
+ }
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + uv_bytes;
+ frame_init_plane(&frame->planes.nv.y, y_width, y_stride, y_height, 0);
+ frame_init_plane(&frame->planes.nv.uv, uv_width,
+ uv_stride, uv_height, y_bytes);
+ return;
+}
+
+static void frame_init_yuv_planes(struct ia_css_frame *frame,
+ unsigned int horizontal_decimation,
+ unsigned int vertical_decimation,
+ bool swap_uv,
+ unsigned int bytes_per_element)
+{
+ unsigned int y_width = frame->info.padded_width,
+ y_height = frame->info.res.height,
+ uv_width = y_width / horizontal_decimation,
+ uv_height = y_height / vertical_decimation,
+ y_stride, y_bytes, uv_bytes, uv_stride;
+
+ y_stride = y_width * bytes_per_element;
+ uv_stride = uv_width * bytes_per_element;
+ y_bytes = y_stride * y_height;
+ uv_bytes = uv_stride * uv_height;
+
+ frame->data_bytes = y_bytes + 2 * uv_bytes;
+ frame_init_plane(&frame->planes.yuv.y, y_width, y_stride, y_height, 0);
+ if (swap_uv) {
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ } else {
+ frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
+ uv_height, y_bytes);
+ frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
+ uv_height, y_bytes + uv_bytes);
+ }
+ return;
+}
+
+static void frame_init_rgb_planes(struct ia_css_frame *frame,
+ unsigned int bytes_per_element)
+{
+ unsigned int width = frame->info.res.width,
+ height = frame->info.res.height, stride, bytes;
+
+ stride = width * bytes_per_element;
+ bytes = stride * height;
+ frame->data_bytes = 3 * bytes;
+ frame_init_plane(&frame->planes.planar_rgb.r, width, stride, height, 0);
+ frame_init_plane(&frame->planes.planar_rgb.g,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.planar_rgb.b,
+ width, stride, height, 2 * bytes);
+ return;
+}
+
+static void frame_init_qplane6_planes(struct ia_css_frame *frame)
+{
+ unsigned int width = frame->info.padded_width / 2,
+ height = frame->info.res.height / 2, bytes, stride;
+
+ stride = width * 2;
+ bytes = stride * height;
+
+ frame->data_bytes = 6 * bytes;
+ frame_init_plane(&frame->planes.plane6.r,
+ width, stride, height, 0 * bytes);
+ frame_init_plane(&frame->planes.plane6.r_at_b,
+ width, stride, height, 1 * bytes);
+ frame_init_plane(&frame->planes.plane6.gr,
+ width, stride, height, 2 * bytes);
+ frame_init_plane(&frame->planes.plane6.gb,
+ width, stride, height, 3 * bytes);
+ frame_init_plane(&frame->planes.plane6.b,
+ width, stride, height, 4 * bytes);
+ frame_init_plane(&frame->planes.plane6.b_at_r,
+ width, stride, height, 5 * bytes);
+ return;
+}
+
+static enum ia_css_err frame_allocate_buffer_data(struct ia_css_frame *frame)
+{
+#ifdef ISP2401
+ IA_CSS_ENTER_LEAVE_PRIVATE("frame->data_bytes=%d\n", frame->data_bytes);
+#endif
+ frame->data = mmgr_alloc_attr(frame->data_bytes,
+ frame->contiguous ?
+ MMGR_ATTRIBUTE_CONTIGUOUS :
+ MMGR_ATTRIBUTE_DEFAULT);
+
+ if (frame->data == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err frame_allocate_with_data(struct ia_css_frame **frame,
+ unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous)
+{
+ enum ia_css_err err;
+ struct ia_css_frame *me = frame_create(width,
+ height,
+ format,
+ padded_width,
+ raw_bit_depth,
+ contiguous,
+ true);
+
+ if (me == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ err = ia_css_frame_init_planes(me);
+
+ if (err == IA_CSS_SUCCESS)
+ err = frame_allocate_buffer_data(me);
+
+ if (err != IA_CSS_SUCCESS) {
+ sh_css_free(me);
+#ifndef ISP2401
+ return err;
+#else
+ me = NULL;
+#endif
+ }
+
+ *frame = me;
+
+ return err;
+}
+
+static struct ia_css_frame *frame_create(unsigned int width,
+ unsigned int height,
+ enum ia_css_frame_format format,
+ unsigned int padded_width,
+ unsigned int raw_bit_depth,
+ bool contiguous,
+ bool valid)
+{
+ struct ia_css_frame *me = sh_css_malloc(sizeof(*me));
+
+ if (me == NULL)
+ return NULL;
+
+ memset(me, 0, sizeof(*me));
+ me->info.res.width = width;
+ me->info.res.height = height;
+ me->info.format = format;
+ me->info.padded_width = padded_width;
+ me->info.raw_bit_depth = raw_bit_depth;
+ me->contiguous = contiguous;
+ me->valid = valid;
+ me->data_bytes = 0;
+ me->data = mmgr_NULL;
+ /* To indicate it is not valid frame. */
+ me->dynamic_queue_id = (int)SH_CSS_INVALID_QUEUE_ID;
+ me->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+
+ return me;
+}
+
+static unsigned
+ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info)
+{
+ if (info->format == IA_CSS_FRAME_FORMAT_RGB565)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16)
+ return 2; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16)
+ return 2; /* bytes per pixel */
+ /* Note: Essentially NV12_16 is a 2 bytes per pixel format, this return value is used
+ * to configure DMA for the output buffer,
+ * At least in SKC this data is overwriten by isp_output_init.sp.c except for elements(elems),
+ * which is configured from this return value,
+ * NV12_16 is implemented by a double buffer of 8 bit elements hence elems should be configured as 8 */
+ if (info->format == IA_CSS_FRAME_FORMAT_NV12_16)
+ return 1; /* bytes per pixel */
+
+ if (info->format == IA_CSS_FRAME_FORMAT_RAW
+ || (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) {
+ if (info->raw_bit_depth)
+ return CEIL_DIV(info->raw_bit_depth,8);
+ else
+ return 2; /* bytes per pixel */
+ }
+ if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888)
+ return 3; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_RGBA888)
+ return 4; /* bytes per pixel */
+ if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6)
+ return 2; /* bytes per pixel */
+ return 1; /* Default is 1 byte per pixel */
+}
+
+void ia_css_frame_info_to_frame_sp_info(
+ struct ia_css_frame_sp_info *to,
+ const struct ia_css_frame_info *from)
+{
+ ia_css_resolution_to_sp_resolution(&to->res, &from->res);
+ to->padded_width = (uint16_t)from->padded_width;
+ to->format = (uint8_t)from->format;
+ to->raw_bit_depth = (uint8_t)from->raw_bit_depth;
+ to->raw_bayer_order = from->raw_bayer_order;
+}
+
+void ia_css_resolution_to_sp_resolution(
+ struct ia_css_sp_resolution *to,
+ const struct ia_css_resolution *from)
+{
+ to->width = (uint16_t)from->width;
+ to->height = (uint16_t)from->height;
+}
+#ifdef ISP2401
+
+enum ia_css_err
+ia_css_frame_find_crop_resolution(const struct ia_css_resolution *in_res,
+ const struct ia_css_resolution *out_res,
+ struct ia_css_resolution *crop_res)
+{
+ uint32_t wd_even_ceil, ht_even_ceil;
+ uint32_t in_ratio, out_ratio;
+
+ if ((in_res == NULL) || (out_res == NULL) || (crop_res == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ IA_CSS_ENTER_PRIVATE("in(%ux%u) -> out(%ux%u)", in_res->width,
+ in_res->height, out_res->width, out_res->height);
+
+ if ((in_res->width == 0)
+ || (in_res->height == 0)
+ || (out_res->width == 0)
+ || (out_res->height == 0))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if ((out_res->width > in_res->width) ||
+ (out_res->height > in_res->height))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* If aspect ratio (width/height) of out_res is higher than the aspect
+ * ratio of the in_res, then we crop vertically, otherwise we crop
+ * horizontally.
+ */
+ in_ratio = in_res->width * out_res->height;
+ out_ratio = out_res->width * in_res->height;
+
+ if (in_ratio == out_ratio) {
+ crop_res->width = in_res->width;
+ crop_res->height = in_res->height;
+ } else if (out_ratio > in_ratio) {
+ crop_res->width = in_res->width;
+ crop_res->height = ROUND_DIV(out_res->height * crop_res->width,
+ out_res->width);
+ } else {
+ crop_res->height = in_res->height;
+ crop_res->width = ROUND_DIV(out_res->width * crop_res->height,
+ out_res->height);
+ }
+
+ /* Round new (cropped) width and height to an even number.
+ * binarydesc_calculate_bds_factor is such that we should consider as
+ * much of the input as possible. This is different only when we end up
+ * with an odd number in the last step. So, we take the next even number
+ * if it falls within the input, otherwise take the previous even no.
+ */
+ wd_even_ceil = EVEN_CEIL(crop_res->width);
+ ht_even_ceil = EVEN_CEIL(crop_res->height);
+ if ((wd_even_ceil > in_res->width) || (ht_even_ceil > in_res->height)) {
+ crop_res->width = EVEN_FLOOR(crop_res->width);
+ crop_res->height = EVEN_FLOOR(crop_res->height);
+ } else {
+ crop_res->width = wd_even_ceil;
+ crop_res->height = ht_even_ceil;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("in(%ux%u) -> out(%ux%u)", crop_res->width,
+ crop_res->height, out_res->width, out_res->height);
+ return IA_CSS_SUCCESS;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h
new file mode 100644
index 000000000000..d02bff1bbf46
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/interface/ia_css_ifmtr.h
@@ -0,0 +1,49 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_IFMTR_H__
+#define __IA_CSS_IFMTR_H__
+
+#include <type_support.h>
+#include <ia_css_stream_public.h>
+#include <ia_css_binary.h>
+
+extern bool ifmtr_set_if_blocking_mode_reset;
+
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config);
+
+enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary);
+
+#endif /* __IA_CSS_IFMTR_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
new file mode 100644
index 000000000000..a7c6bba7e094
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
@@ -0,0 +1,568 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+
+#include "ia_css_ifmtr.h"
+#include <math_support.h>
+#include "sh_css_internal.h"
+#include "input_formatter.h"
+#include "assert_support.h"
+#include "sh_css_sp.h"
+#include "isp/modes/interface/input_buf.isp.h"
+
+/************************************************************
+ * Static functions declarations
+ ************************************************************/
+static enum ia_css_err ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column);
+
+static enum ia_css_err ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line);
+
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t * const config_a,
+ const input_formatter_cfg_t * const config_b);
+
+/************************************************************
+ * Public functions
+ ************************************************************/
+
+/* ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+unsigned int ia_css_ifmtr_lines_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config != NULL);
+ if ((IA_CSS_BAYER_ORDER_BGGR == config->input_config.bayer_order)
+ || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order))
+ return 1;
+
+ return 0;
+}
+
+unsigned int ia_css_ifmtr_columns_needed_for_bayer_order(
+ const struct ia_css_stream_config *config)
+{
+ assert(config != NULL);
+ if ((IA_CSS_BAYER_ORDER_RGGB == config->input_config.bayer_order)
+ || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order))
+ return 1;
+
+ return 0;
+}
+
+enum ia_css_err ia_css_ifmtr_configure(struct ia_css_stream_config *config,
+ struct ia_css_binary *binary)
+{
+ unsigned int start_line, start_column = 0,
+ cropped_height,
+ cropped_width,
+ num_vectors,
+ buffer_height = 2,
+ buffer_width,
+ two_ppc,
+ vmem_increment = 0,
+ deinterleaving = 0,
+ deinterleaving_b = 0,
+ width_a = 0,
+ width_b = 0,
+ bits_per_pixel,
+ vectors_per_buffer,
+ vectors_per_line = 0,
+ buffers_per_line = 0,
+ buf_offset_a = 0,
+ buf_offset_b = 0,
+ line_width = 0,
+ width_b_factor = 1, start_column_b,
+ left_padding = 0;
+ input_formatter_cfg_t if_a_config, if_b_config;
+ enum ia_css_stream_format input_format;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ uint8_t if_config_index;
+
+ /* Determine which input formatter config set is targeted. */
+ /* Index is equal to the CSI-2 port used. */
+ enum ia_css_csi2_port port;
+
+ if (binary) {
+ cropped_height = binary->in_frame_info.res.height;
+ cropped_width = binary->in_frame_info.res.width;
+ /* This should correspond to the input buffer definition for
+ ISP binaries in input_buf.isp.h */
+ if (binary->info->sp.enable.continuous && binary->info->sp.pipeline.mode != IA_CSS_BINARY_MODE_COPY)
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ else
+ buffer_width = binary->info->sp.input.max_width;
+ input_format = binary->input_format;
+ } else {
+ /* sp raw copy pipe (IA_CSS_PIPE_MODE_COPY): binary is NULL */
+ cropped_height = config->input_config.input_res.height;
+ cropped_width = config->input_config.input_res.width;
+ buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+ input_format = config->input_config.format;
+ }
+ two_ppc = config->pixels_per_clock == 2;
+ if (config->mode == IA_CSS_INPUT_MODE_SENSOR
+ || config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ port = config->source.port.port;
+ if_config_index = (uint8_t) (port - IA_CSS_CSI2_PORT0);
+ } else if (config->mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else {
+ if_config_index = 0;
+ }
+
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS
+ || if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED);
+
+ /* TODO: check to see if input is RAW and if current mode interprets
+ * RAW data in any particular bayer order. copy binary with output
+ * format other than raw should not result in dropping lines and/or
+ * columns.
+ */
+ err = ifmtr_input_start_line(config, cropped_height, &start_line);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ifmtr_start_column(config, cropped_width, &start_column);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (config->left_padding == -1)
+ if (!binary)
+ /* sp raw copy pipe: set left_padding value */
+ left_padding = 0;
+ else
+ left_padding = binary->left_padding;
+ else
+ left_padding = 2*ISP_VEC_NELEMS - config->left_padding;
+
+
+ if (left_padding) {
+ num_vectors = CEIL_DIV(cropped_width + left_padding,
+ ISP_VEC_NELEMS);
+ } else {
+ num_vectors = CEIL_DIV(cropped_width, ISP_VEC_NELEMS);
+ num_vectors *= buffer_height;
+ /* todo: in case of left padding,
+ num_vectors is vectors per line,
+ otherwise vectors per line * buffer_height. */
+ }
+
+ start_column_b = start_column;
+
+ bits_per_pixel = input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * 8 / ISP_VEC_NELEMS;
+ switch (input_format) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ deinterleaving_b = 1;
+ /* half lines */
+ width_a = cropped_width * deinterleaving / 2;
+ width_b_factor = 2;
+ /* full lines */
+ width_b = width_a * width_b_factor;
+ buffer_width *= deinterleaving * 2;
+ /* Patch from bayer to yuv */
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column /= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 3;
+ width_a = cropped_width * deinterleaving / 2;
+ buffer_width = buffer_width * deinterleaving / 2;
+ /* Patch from bayer to yuv */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ start_column = start_column * deinterleaving / 2;
+ }
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving / 2;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ vectors_per_line = num_vectors / buffer_height;
+ /* Even lines are half size */
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID) /
+ 2;
+ start_column *= deinterleaving;
+ start_column /= 2;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ if (two_ppc) {
+ vmem_increment = 1;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving * 2;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ start_column_b = start_column;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ num_vectors *= deinterleaving;
+ start_column *= deinterleaving;
+ }
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ num_vectors *= 2;
+ if (two_ppc) {
+ deinterleaving = 2; /* BR in if_a, G in if_b */
+ deinterleaving_b = 1; /* BR in if_a, G in if_b */
+ buffers_per_line = 4;
+ start_column_b = start_column;
+ start_column *= deinterleaving;
+ start_column_b *= deinterleaving_b;
+ } else {
+ deinterleaving = 3; /* BGR */
+ buffers_per_line = 3;
+ start_column *= deinterleaving;
+ }
+ vmem_increment = 1;
+ width_a = cropped_width * deinterleaving;
+ width_b = cropped_width * deinterleaving_b;
+ buffer_width *= buffers_per_line;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 * deinterleaving;
+ buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ if (two_ppc) {
+ int crop_col = (start_column % 2) == 1;
+ vmem_increment = 2;
+ deinterleaving = 1;
+ width_a = width_b = cropped_width / 2;
+
+ /* When two_ppc is enabled AND we need to crop one extra
+ * column, if_a crops by one extra and we swap the
+ * output offsets to interleave the bayer pattern in
+ * the correct order.
+ */
+ buf_offset_a = crop_col ? 1 : 0;
+ buf_offset_b = crop_col ? 0 : 1;
+ start_column_b = start_column / 2;
+ start_column = start_column / 2 + crop_col;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ if ((!binary) || (config->continuous && binary
+ && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)) {
+ /* !binary -> sp raw copy pipe, no deinterleaving */
+ deinterleaving = 1;
+ }
+ width_a = cropped_width;
+ /* Must be multiple of deinterleaving */
+ num_vectors = CEIL_MUL(num_vectors, deinterleaving);
+ }
+ buffer_height *= 2;
+ if ((!binary) || config->continuous)
+ /* !binary -> sp raw copy pipe */
+ buffer_height *= 2;
+ vectors_per_line = CEIL_DIV(cropped_width, ISP_VEC_NELEMS);
+ vectors_per_line = CEIL_MUL(vectors_per_line, deinterleaving);
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ if (two_ppc) {
+ num_vectors *= 2;
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = width_b = cropped_width;
+ /* B buffer is one line further */
+ buf_offset_b = buffer_width / ISP_VEC_NELEMS;
+ bits_per_pixel *= 2;
+ } else {
+ vmem_increment = 1;
+ deinterleaving = 2;
+ width_a = cropped_width;
+ start_column /= deinterleaving;
+ }
+ buffer_height *= 2;
+ break;
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT1:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT2:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT3:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT4:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT5:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT6:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT7:
+ case IA_CSS_STREAM_FORMAT_GENERIC_SHORT8:
+ case IA_CSS_STREAM_FORMAT_YUV420_8_SHIFT:
+ case IA_CSS_STREAM_FORMAT_YUV420_10_SHIFT:
+ case IA_CSS_STREAM_FORMAT_EMBEDDED:
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ break;
+ }
+ if (width_a == 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (two_ppc)
+ left_padding /= 2;
+
+ /* Default values */
+ if (left_padding)
+ vectors_per_line = num_vectors;
+ if (!vectors_per_line) {
+ vectors_per_line = CEIL_MUL(num_vectors / buffer_height,
+ deinterleaving);
+ line_width = 0;
+ }
+ if (!line_width)
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ if (!buffers_per_line)
+ buffers_per_line = deinterleaving;
+ line_width = CEIL_MUL(line_width,
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID)
+ * vmem_increment);
+
+ vectors_per_buffer = buffer_height * buffer_width / ISP_VEC_NELEMS;
+
+ if (config->mode == IA_CSS_INPUT_MODE_TPG &&
+ ((binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO) ||
+ (!binary))) {
+ /* !binary -> sp raw copy pipe */
+ /* workaround for TPG in video mode */
+ start_line = 0;
+ start_column = 0;
+ cropped_height -= start_line;
+ width_a -= start_column;
+ }
+
+ if_a_config.start_line = start_line;
+ if_a_config.start_column = start_column;
+ if_a_config.left_padding = left_padding / deinterleaving;
+ if_a_config.cropped_height = cropped_height;
+ if_a_config.cropped_width = width_a;
+ if_a_config.deinterleaving = deinterleaving;
+ if_a_config.buf_vecs = vectors_per_buffer;
+ if_a_config.buf_start_index = buf_offset_a;
+ if_a_config.buf_increment = vmem_increment;
+ if_a_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_a_config.is_yuv420_format =
+ (input_format == IA_CSS_STREAM_FORMAT_YUV420_8)
+ || (input_format == IA_CSS_STREAM_FORMAT_YUV420_10)
+ || (input_format == IA_CSS_STREAM_FORMAT_YUV420_16);
+ if_a_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (two_ppc) {
+ if (deinterleaving_b) {
+ deinterleaving = deinterleaving_b;
+ width_b = cropped_width * deinterleaving;
+ buffer_width *= deinterleaving;
+ /* Patch from bayer to rgb */
+ num_vectors = num_vectors / 2 *
+ deinterleaving * width_b_factor;
+ vectors_per_line = num_vectors / buffer_height;
+ line_width = vectors_per_line *
+ input_formatter_get_alignment(INPUT_FORMATTER0_ID);
+ }
+ if_b_config.start_line = start_line;
+ if_b_config.start_column = start_column_b;
+ if_b_config.left_padding = left_padding / deinterleaving;
+ if_b_config.cropped_height = cropped_height;
+ if_b_config.cropped_width = width_b;
+ if_b_config.deinterleaving = deinterleaving;
+ if_b_config.buf_vecs = vectors_per_buffer;
+ if_b_config.buf_start_index = buf_offset_b;
+ if_b_config.buf_increment = vmem_increment;
+ if_b_config.buf_eol_offset =
+ buffer_width * bits_per_pixel / 8 - line_width;
+ if_b_config.is_yuv420_format =
+ input_format == IA_CSS_STREAM_FORMAT_YUV420_8
+ || input_format == IA_CSS_STREAM_FORMAT_YUV420_10
+ || input_format == IA_CSS_STREAM_FORMAT_YUV420_16;
+ if_b_config.block_no_reqs =
+ (config->mode != IA_CSS_INPUT_MODE_SENSOR);
+
+ if (SH_CSS_IF_CONFIG_NOT_NEEDED != if_config_index) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, &if_b_config);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, &if_b_config,
+ if_config_index);
+ }
+ } else {
+ if (SH_CSS_IF_CONFIG_NOT_NEEDED != if_config_index) {
+ assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS);
+
+ ifmtr_set_if_blocking_mode(&if_a_config, NULL);
+ /* Set the ifconfigs to SP group */
+ sh_css_sp_set_if_configs(&if_a_config, NULL,
+ if_config_index);
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+bool ifmtr_set_if_blocking_mode_reset = true;
+
+/************************************************************
+ * Static functions
+ ************************************************************/
+static void ifmtr_set_if_blocking_mode(
+ const input_formatter_cfg_t * const config_a,
+ const input_formatter_cfg_t * const config_b)
+{
+ int i;
+ bool block[] = { false, false, false, false };
+ assert(N_INPUT_FORMATTER_ID <= (sizeof(block) / sizeof(block[0])));
+
+#if !defined(IS_ISP_2400_SYSTEM)
+#error "ifmtr_set_if_blocking_mode: ISP_SYSTEM must be one of {IS_ISP_2400_SYSTEM}"
+#endif
+
+ block[INPUT_FORMATTER0_ID] = (bool)config_a->block_no_reqs;
+ if (NULL != config_b)
+ block[INPUT_FORMATTER1_ID] = (bool)config_b->block_no_reqs;
+
+ /* TODO: next could cause issues when streams are started after
+ * eachother. */
+ /*IF should not be reconfigured/reset from host */
+ if (ifmtr_set_if_blocking_mode_reset) {
+ ifmtr_set_if_blocking_mode_reset = false;
+ for (i = 0; i < N_INPUT_FORMATTER_ID; i++) {
+ input_formatter_ID_t id = (input_formatter_ID_t) i;
+ input_formatter_rst(id);
+ input_formatter_set_fifo_blocking_mode(id, block[id]);
+ }
+ }
+
+ return;
+}
+
+static enum ia_css_err ifmtr_start_column(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_column)
+{
+ unsigned int in = config->input_config.input_res.width, start,
+ for_bayer = ia_css_ifmtr_columns_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start column by 2. */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra columns is 2 or odd, we round the start
+ * column down */
+ start &= ~0x1;
+
+ /* now we add the one column (if needed) to correct for the bayer
+ * order).
+ */
+ start += for_bayer;
+ *start_column = start;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err ifmtr_input_start_line(
+ const struct ia_css_stream_config *config,
+ unsigned int bin_in,
+ unsigned int *start_line)
+{
+ unsigned int in = config->input_config.input_res.height, start,
+ for_bayer = ia_css_ifmtr_lines_needed_for_bayer_order(config);
+
+ if (bin_in + 2 * for_bayer > in)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* On the hardware, we want to use the middle of the input, so we
+ * divide the start line by 2. On the simulator, we cannot handle extra
+ * lines at the end of the frame.
+ */
+ start = (in - bin_in) / 2;
+ /* in case the number of extra lines is 2 or odd, we round the start
+ * line down.
+ */
+ start &= ~0x1;
+
+ /* now we add the one line (if needed) to correct for the bayer order */
+ start += for_bayer;
+ *start_line = start;
+ return IA_CSS_SUCCESS;
+}
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h
new file mode 100644
index 000000000000..47d0f7e53f47
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/interface/ia_css_inputfifo.h
@@ -0,0 +1,69 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_INPUTFIFO_H
+#define _IA_CSS_INPUTFIFO_H
+
+#include <sp.h>
+#include <isp.h>
+
+#include "ia_css_stream_format.h"
+
+/* SP access */
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum ia_css_stream_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum ia_css_stream_format input_format,
+ bool two_ppc);
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2);
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum ia_css_stream_format data_type,
+ const unsigned short *data,
+ unsigned int width);
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id);
+
+#endif /* _IA_CSS_INPUTFIFO_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
new file mode 100644
index 000000000000..cf02970d4f59
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
@@ -0,0 +1,613 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "platform_support.h"
+
+#include "ia_css_inputfifo.h"
+
+#include "device_access.h"
+
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */
+#endif
+
+#include "assert_support.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+
+#define HBLANK_CYCLES (187)
+#define MARKER_CYCLES (6)
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include <hive_isp_css_streaming_to_mipi_types_hrt.h>
+#endif
+
+/* The data type is used to send special cases:
+ * yuv420: odd lines (1, 3 etc) are twice as wide as even
+ * lines (0, 2, 4 etc).
+ * rgb: for two pixels per clock, the R and B values are sent
+ * to output_0 while only G is sent to output_1. This means
+ * that output_1 only gets half the number of values of output_0.
+ * WARNING: This type should also be used for Legacy YUV420.
+ * regular: used for all other data types (RAW, YUV422, etc)
+ */
+enum inputfifo_mipi_data_type {
+ inputfifo_mipi_data_type_regular,
+ inputfifo_mipi_data_type_yuv420,
+ inputfifo_mipi_data_type_yuv420_legacy,
+ inputfifo_mipi_data_type_rgb,
+};
+#if !defined(HAS_NO_INPUT_SYSTEM)
+static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type;
+#endif
+struct inputfifo_instance {
+ unsigned int ch_id;
+ enum ia_css_stream_format input_format;
+ bool two_ppc;
+ bool streaming;
+ unsigned int hblank_cycles;
+ unsigned int marker_cycles;
+ unsigned int fmt_type;
+ enum inputfifo_mipi_data_type type;
+};
+#if !defined(HAS_NO_INPUT_SYSTEM)
+/*
+ * Maintain a basic streaming to Mipi administration with ch_id as index
+ * ch_id maps on the "Mipi virtual channel ID" and can have value 0..3
+ */
+#define INPUTFIFO_NR_OF_S2M_CHANNELS (4)
+static struct inputfifo_instance
+ inputfifo_inst_admin[INPUTFIFO_NR_OF_S2M_CHANNELS];
+
+/* Streaming to MIPI */
+static unsigned inputfifo_wrap_marker(
+/* STORAGE_CLASS_INLINE unsigned inputfifo_wrap_marker( */
+ unsigned marker)
+{
+ return marker |
+ (inputfifo_curr_ch_id << HIVE_STR_TO_MIPI_CH_ID_LSB) |
+ (inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
+}
+
+STORAGE_CLASS_INLINE void
+_sh_css_fifo_snd(unsigned token)
+{
+ while (!can_event_send_token(STR2MIPI_EVENT_ID))
+ hrt_sleep();
+ event_send_token(STR2MIPI_EVENT_ID, token);
+ return;
+}
+
+static void inputfifo_send_data_a(
+/* STORAGE_CLASS_INLINE void inputfifo_send_data_a( */
+unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (data << HIVE_STR_TO_MIPI_DATA_A_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_data_b(
+/* STORAGE_CLASS_INLINE void inputfifo_send_data_b( */
+ unsigned int data)
+{
+ unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (data << _HIVE_STR_TO_MIPI_DATA_B_LSB);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_data(
+/* STORAGE_CLASS_INLINE void inputfifo_send_data( */
+ unsigned int a,
+ unsigned int b)
+{
+ unsigned int token = ((1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
+ (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
+ (a << HIVE_STR_TO_MIPI_DATA_A_LSB) |
+ (b << _HIVE_STR_TO_MIPI_DATA_B_LSB));
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_sol(void)
+/* STORAGE_CLASS_INLINE void inputfifo_send_sol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOL_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_eol(void)
+/* STORAGE_CLASS_INLINE void inputfifo_send_eol(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOL_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_sof(void)
+/* STORAGE_CLASS_INLINE void inputfifo_send_sof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_SOF_BIT);
+
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_eof(void)
+/* STORAGE_CLASS_INLINE void inputfifo_send_eof(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(
+ 1 << HIVE_STR_TO_MIPI_EOF_BIT);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+#ifdef __ON__
+static void inputfifo_send_ch_id(
+/* STORAGE_CLASS_INLINE void inputfifo_send_ch_id( */
+ unsigned int ch_id)
+{
+ hrt_data token;
+ inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK;
+ /* we send an zero marker, this will wrap the ch_id and
+ * fmt_type automatically.
+ */
+ token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+static void inputfifo_send_fmt_type(
+/* STORAGE_CLASS_INLINE void inputfifo_send_fmt_type( */
+ unsigned int fmt_type)
+{
+ hrt_data token;
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+ /* we send an zero marker, this will wrap the ch_id and
+ * fmt_type automatically.
+ */
+ token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+#endif /* __ON__ */
+
+
+
+static void inputfifo_send_ch_id_and_fmt_type(
+/* STORAGE_CLASS_INLINE
+void inputfifo_send_ch_id_and_fmt_type( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ hrt_data token;
+ inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+ /* we send an zero marker, this will wrap the ch_id and
+ * fmt_type automatically.
+ */
+ token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_send_empty_token(void)
+/* STORAGE_CLASS_INLINE void inputfifo_send_empty_token(void) */
+{
+ hrt_data token = inputfifo_wrap_marker(0);
+ _sh_css_fifo_snd(token);
+ return;
+}
+
+
+
+static void inputfifo_start_frame(
+/* STORAGE_CLASS_INLINE void inputfifo_start_frame( */
+ unsigned int ch_id,
+ unsigned int fmt_type)
+{
+ inputfifo_send_ch_id_and_fmt_type(ch_id, fmt_type);
+ inputfifo_send_sof();
+ return;
+}
+
+
+
+static void inputfifo_end_frame(
+ unsigned int marker_cycles)
+{
+ unsigned int i;
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eof();
+ return;
+}
+
+
+
+static void inputfifo_send_line2(
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i, is_rgb = 0, is_legacy = 0;
+
+ assert(data != NULL);
+ assert((data2 != NULL) || (width2 == 0));
+ if (type == inputfifo_mipi_data_type_rgb)
+ is_rgb = 1;
+
+ if (type == inputfifo_mipi_data_type_yuv420_legacy)
+ is_legacy = 1;
+
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_sol();
+ for (i = 0; i < marker_cycles; i++)
+ inputfifo_send_empty_token();
+ for (i = 0; i < width; i++, data++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data[0], 0);
+ } else {
+ inputfifo_send_data(
+ data[0], data[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data[0]);
+ } else {
+ inputfifo_send_data_a(data[0]);
+ }
+ }
+
+ for (i = 0; i < width2; i++, data2++) {
+ /* for RGB in two_ppc, we only actually send 2 pixels per
+ * clock in the even pixels (0, 2 etc). In the other cycles,
+ * we only send 1 pixel, to data2[0].
+ */
+ unsigned int send_two_pixels = two_ppc;
+ if ((is_rgb || is_legacy) && (i % 3 == 2))
+ send_two_pixels = 0;
+ if (send_two_pixels) {
+ if (i + 1 == width2) {
+ /* for jpg (binary) copy, this can occur
+ * if the file contains an odd number of bytes.
+ */
+ inputfifo_send_data(
+ data2[0], 0);
+ } else {
+ inputfifo_send_data(
+ data2[0], data2[1]);
+ }
+ /* Additional increment because we send 2 pixels */
+ data2++;
+ i++;
+ } else if (two_ppc && is_legacy) {
+ inputfifo_send_data_b(data2[0]);
+ } else {
+ inputfifo_send_data_a(data2[0]);
+ }
+ }
+ for (i = 0; i < hblank_cycles; i++)
+ inputfifo_send_empty_token();
+ inputfifo_send_eol();
+ return;
+}
+
+
+
+static void
+inputfifo_send_line(const unsigned short *data,
+ unsigned int width,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ assert(data != NULL);
+ inputfifo_send_line2(data, width, NULL, 0,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc,
+ type);
+}
+
+
+/* Send a frame of data into the input network via the GP FIFO.
+ * Parameters:
+ * - data: array of 16 bit values that contains all data for the frame.
+ * - width: width of a line in number of subpixels, for yuv420 it is the
+ * number of Y components per line.
+ * - height: height of the frame in number of lines.
+ * - ch_id: channel ID.
+ * - fmt_type: format type.
+ * - hblank_cycles: length of horizontal blanking in cycles.
+ * - marker_cycles: number of empty cycles after start-of-line and before
+ * end-of-frame.
+ * - two_ppc: boolean, describes whether to send one or two pixels per clock
+ * cycle. In this mode, we sent pixels N and N+1 in the same cycle,
+ * to IF_PRIM_A and IF_PRIM_B respectively. The caller must make
+ * sure the input data has been formatted correctly for this.
+ * For example, for RGB formats this means that unused values
+ * must be inserted.
+ * - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In
+ * this mode, the odd lines (1,3,5 etc) are half as long as the
+ * even lines (2,4,6 etc).
+ * Note that the first line is odd (1) and the second line is even
+ * (2).
+ *
+ * This function does not do any reordering of pixels, the caller must make
+ * sure the data is in the righ format. Please refer to the CSS receiver
+ * documentation for details on the data formats.
+ */
+
+static void inputfifo_send_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ unsigned int fmt_type,
+ unsigned int hblank_cycles,
+ unsigned int marker_cycles,
+ unsigned int two_ppc,
+ enum inputfifo_mipi_data_type type)
+{
+ unsigned int i;
+
+ assert(data != NULL);
+ inputfifo_start_frame(ch_id, fmt_type);
+
+ for (i = 0; i < height; i++) {
+ if ((type == inputfifo_mipi_data_type_yuv420) &&
+ (i & 1) == 1) {
+ inputfifo_send_line(data, 2 * width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += 2 * width;
+ } else {
+ inputfifo_send_line(data, width,
+ hblank_cycles,
+ marker_cycles,
+ two_ppc, type);
+ data += width;
+ }
+ }
+ inputfifo_end_frame(marker_cycles);
+ return;
+}
+
+
+
+static enum inputfifo_mipi_data_type inputfifo_determine_type(
+ enum ia_css_stream_format input_format)
+{
+ enum inputfifo_mipi_data_type type;
+
+ type = inputfifo_mipi_data_type_regular;
+ if (input_format == IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY) {
+ type =
+ inputfifo_mipi_data_type_yuv420_legacy;
+ } else if (input_format == IA_CSS_STREAM_FORMAT_YUV420_8 ||
+ input_format == IA_CSS_STREAM_FORMAT_YUV420_10 ||
+ input_format == IA_CSS_STREAM_FORMAT_YUV420_16) {
+ type =
+ inputfifo_mipi_data_type_yuv420;
+ } else if (input_format >= IA_CSS_STREAM_FORMAT_RGB_444 &&
+ input_format <= IA_CSS_STREAM_FORMAT_RGB_888) {
+ type =
+ inputfifo_mipi_data_type_rgb;
+ }
+ return type;
+}
+
+
+
+static struct inputfifo_instance *inputfifo_get_inst(
+ unsigned int ch_id)
+{
+ return &inputfifo_inst_admin[ch_id];
+}
+
+void ia_css_inputfifo_send_input_frame(
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int ch_id,
+ enum ia_css_stream_format input_format,
+ bool two_ppc)
+{
+ unsigned int fmt_type, hblank_cycles, marker_cycles;
+ enum inputfifo_mipi_data_type type;
+
+ assert(data != NULL);
+ hblank_cycles = HBLANK_CYCLES;
+ marker_cycles = MARKER_CYCLES;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+
+ type = inputfifo_determine_type(input_format);
+
+ inputfifo_send_frame(data, width, height,
+ ch_id, fmt_type, hblank_cycles, marker_cycles,
+ two_ppc, type);
+}
+
+
+
+void ia_css_inputfifo_start_frame(
+ unsigned int ch_id,
+ enum ia_css_stream_format input_format,
+ bool two_ppc)
+{
+ struct inputfifo_instance *s2mi;
+ s2mi = inputfifo_get_inst(ch_id);
+
+ s2mi->ch_id = ch_id;
+ ia_css_isys_convert_stream_format_to_mipi_format(input_format,
+ MIPI_PREDICTOR_NONE,
+ &s2mi->fmt_type);
+ s2mi->two_ppc = two_ppc;
+ s2mi->type = inputfifo_determine_type(input_format);
+ s2mi->hblank_cycles = HBLANK_CYCLES;
+ s2mi->marker_cycles = MARKER_CYCLES;
+ s2mi->streaming = true;
+
+ inputfifo_start_frame(ch_id, s2mi->fmt_type);
+ return;
+}
+
+
+
+void ia_css_inputfifo_send_line(
+ unsigned int ch_id,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
+ struct inputfifo_instance *s2mi;
+
+ assert(data != NULL);
+ assert((data2 != NULL) || (width2 == 0));
+ s2mi = inputfifo_get_inst(ch_id);
+
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line2(data, width, data2, width2,
+ s2mi->hblank_cycles,
+ s2mi->marker_cycles,
+ s2mi->two_ppc,
+ s2mi->type);
+}
+
+
+void ia_css_inputfifo_send_embedded_line(
+ unsigned int ch_id,
+ enum ia_css_stream_format data_type,
+ const unsigned short *data,
+ unsigned int width)
+{
+ struct inputfifo_instance *s2mi;
+ unsigned int fmt_type;
+
+ assert(data != NULL);
+ s2mi = inputfifo_get_inst(ch_id);
+ ia_css_isys_convert_stream_format_to_mipi_format(data_type,
+ MIPI_PREDICTOR_NONE, &fmt_type);
+
+ /* Set format_type for metadata line. */
+ inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
+
+ inputfifo_send_line(data, width, s2mi->hblank_cycles, s2mi->marker_cycles,
+ s2mi->two_ppc, inputfifo_mipi_data_type_regular);
+}
+
+
+void ia_css_inputfifo_end_frame(
+ unsigned int ch_id)
+{
+ struct inputfifo_instance *s2mi;
+ s2mi = inputfifo_get_inst(ch_id);
+
+ /* Set global variables that indicate channel_id and format_type */
+ inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
+ inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
+
+ /* Call existing HRT function */
+ inputfifo_end_frame(s2mi->marker_cycles);
+
+ s2mi->streaming = false;
+ return;
+}
+#endif /* #if !defined(HAS_NO_INPUT_SYSTEM) */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h
new file mode 100644
index 000000000000..285749885105
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param.h
@@ -0,0 +1,118 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_ISP_PARAM_H_
+#define _IA_CSS_ISP_PARAM_H_
+
+#include <ia_css_err.h>
+#include "ia_css_isp_param_types.h"
+
+/* Set functions for parameter memory descriptors */
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size);
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ hrt_vaddress address, size_t size);
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ uint32_t address, size_t size);
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data*
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_data*
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+const struct ia_css_isp_data*
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem);
+
+/* Initialize the memory interface sizes and addresses */
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params);
+
+/* Allocate memory parameters */
+enum ia_css_err
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers);
+
+/* Destroy memory parameters */
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params);
+
+/* Load fw parameters */
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init);
+
+/* Copy host parameter images to ddr */
+enum ia_css_err
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass);
+
+/* Enable a pipeline by setting the control field in the isp dmem parameters */
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params);
+
+#endif /* _IA_CSS_ISP_PARAM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
new file mode 100644
index 000000000000..8e651b80345a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/interface/ia_css_isp_param_types.h
@@ -0,0 +1,107 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_ISP_PARAM_TYPES_H_
+#define _IA_CSS_ISP_PARAM_TYPES_H_
+
+#include "ia_css_types.h"
+#include <platform_support.h>
+#include <system_global.h>
+
+/* Short hands */
+#define IA_CSS_ISP_DMEM IA_CSS_ISP_DMEM0
+#define IA_CSS_ISP_VMEM IA_CSS_ISP_VMEM0
+
+/* The driver depends on this, to be removed later. */
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+/* Explicit member numbering to avoid fish type checker bug */
+enum ia_css_param_class {
+ IA_CSS_PARAM_CLASS_PARAM = 0, /* Late binding parameters, like 3A */
+ IA_CSS_PARAM_CLASS_CONFIG = 1, /* Pipe config time parameters, like resolution */
+ IA_CSS_PARAM_CLASS_STATE = 2, /* State parameters, like tnr buffer index */
+#if 0 /* Not yet implemented */
+ IA_CSS_PARAM_CLASS_FRAME = 3, /* Frame time parameters, like output buffer */
+#endif
+};
+#define IA_CSS_NUM_PARAM_CLASSES (IA_CSS_PARAM_CLASS_STATE + 1)
+
+/** ISP parameter descriptor */
+struct ia_css_isp_parameter {
+ uint32_t offset; /* Offset in isp_<mem>)parameters, etc. */
+ uint32_t size; /* Disabled if 0 */
+};
+
+
+/* Address/size of each parameter class in each isp memory, host memory pointers */
+struct ia_css_isp_param_host_segments {
+ struct ia_css_host_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, css memory pointers */
+struct ia_css_isp_param_css_segments {
+ struct ia_css_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Address/size of each parameter class in each isp memory, isp memory pointers */
+struct ia_css_isp_param_isp_segments {
+ struct ia_css_isp_data params[IA_CSS_NUM_PARAM_CLASSES][IA_CSS_NUM_MEMORIES];
+};
+
+/* Memory offsets in binary info */
+struct ia_css_isp_param_memory_offsets {
+ uint32_t offsets[IA_CSS_NUM_PARAM_CLASSES]; /**< offset wrt hdr in bytes */
+};
+
+/** Offsets for ISP kernel parameters per isp memory.
+ * Only relevant for standard ISP binaries, not ACC or SP.
+ */
+union ia_css_all_memory_offsets {
+ struct {
+ CSS_ALIGN(struct ia_css_memory_offsets *param, 8);
+ CSS_ALIGN(struct ia_css_config_memory_offsets *config, 8);
+ CSS_ALIGN(struct ia_css_state_memory_offsets *state, 8);
+ } offsets;
+ struct {
+ CSS_ALIGN(void *ptr, 8);
+ } array[IA_CSS_NUM_PARAM_CLASSES];
+};
+
+#define IA_CSS_DEFAULT_ISP_MEM_PARAMS \
+ { { { { 0, 0 } } } }
+
+#define IA_CSS_DEFAULT_ISP_CSS_PARAMS \
+ { { { { 0, 0 } } } }
+
+#define IA_CSS_DEFAULT_ISP_ISP_PARAMS \
+ { { { { 0, 0 } } } }
+
+#endif /* _IA_CSS_ISP_PARAM_TYPES_H_ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
new file mode 100644
index 000000000000..832d9e16edeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isp_param/src/isp_param.c
@@ -0,0 +1,227 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "memory_access.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+
+/* Set functions for parameter memory descriptors */
+
+void
+ia_css_isp_param_set_mem_init(
+ struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ char *address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_css_mem_init(
+ struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ hrt_vaddress address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+void
+ia_css_isp_param_set_isp_mem_init(
+ struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem,
+ uint32_t address, size_t size)
+{
+ mem_init->params[pclass][mem].address = address;
+ mem_init->params[pclass][mem].size = (uint32_t)size;
+}
+
+/* Get functions for parameter memory descriptors */
+const struct ia_css_host_data*
+ia_css_isp_param_get_mem_init(
+ const struct ia_css_isp_param_host_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_data*
+ia_css_isp_param_get_css_mem_init(
+ const struct ia_css_isp_param_css_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+const struct ia_css_isp_data*
+ia_css_isp_param_get_isp_mem_init(
+ const struct ia_css_isp_param_isp_segments *mem_init,
+ enum ia_css_param_class pclass,
+ enum ia_css_isp_memories mem)
+{
+ return &mem_init->params[pclass][mem];
+}
+
+void
+ia_css_init_memory_interface(
+ struct ia_css_isp_param_css_segments *isp_mem_if,
+ const struct ia_css_isp_param_host_segments *mem_params,
+ const struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned pclass, mem;
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ memset(isp_mem_if->params[pclass], 0, sizeof(isp_mem_if->params[pclass]));
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ if (!mem_params->params[pclass][mem].address)
+ continue;
+ isp_mem_if->params[pclass][mem].size = mem_params->params[pclass][mem].size;
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM)
+ isp_mem_if->params[pclass][mem].address = css_params->params[pclass][mem].address;
+ }
+ }
+}
+
+enum ia_css_err
+ia_css_isp_param_allocate_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params,
+ const struct ia_css_isp_param_isp_segments *mem_initializers)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned mem, pclass;
+
+ pclass = IA_CSS_PARAM_CLASS_PARAM;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ uint32_t size = 0;
+ if (mem_initializers)
+ size = mem_initializers->params[pclass][mem].size;
+ mem_params->params[pclass][mem].size = size;
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].size = size;
+ css_params->params[pclass][mem].address = 0x0;
+ if (size) {
+ mem_params->params[pclass][mem].address = sh_css_calloc(1, size);
+ if (!mem_params->params[pclass][mem].address) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto cleanup;
+ }
+ if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
+ css_params->params[pclass][mem].address = mmgr_malloc(size);
+ if (!css_params->params[pclass][mem].address) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto cleanup;
+ }
+ }
+ }
+ }
+ }
+ return err;
+cleanup:
+ ia_css_isp_param_destroy_isp_parameters(mem_params, css_params);
+ return err;
+}
+
+void
+ia_css_isp_param_destroy_isp_parameters(
+ struct ia_css_isp_param_host_segments *mem_params,
+ struct ia_css_isp_param_css_segments *css_params)
+{
+ unsigned mem, pclass;
+
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ if (mem_params->params[pclass][mem].address)
+ sh_css_free(mem_params->params[pclass][mem].address);
+ if (css_params->params[pclass][mem].address)
+ hmm_free(css_params->params[pclass][mem].address);
+ mem_params->params[pclass][mem].address = NULL;
+ css_params->params[pclass][mem].address = 0x0;
+ }
+ }
+}
+
+void
+ia_css_isp_param_load_fw_params(
+ const char *fw,
+ union ia_css_all_memory_offsets *mem_offsets,
+ const struct ia_css_isp_param_memory_offsets *memory_offsets,
+ bool init)
+{
+ unsigned pclass;
+ for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
+ mem_offsets->array[pclass].ptr = NULL;
+ if (init)
+ mem_offsets->array[pclass].ptr = (void *)(fw + memory_offsets->offsets[pclass]);
+ }
+}
+
+enum ia_css_err
+ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ struct ia_css_isp_param_css_segments *ddr,
+ const struct ia_css_isp_param_host_segments *host,
+ enum ia_css_param_class pclass)
+{
+ unsigned mem;
+
+ for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) {
+ size_t size = host->params[pclass][mem].size;
+ hrt_vaddress ddr_mem_ptr = ddr->params[pclass][mem].address;
+ char *host_mem_ptr = host->params[pclass][mem].address;
+ if (size != ddr->params[pclass][mem].size)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ if (!size)
+ continue;
+ mmgr_store(ddr_mem_ptr, host_mem_ptr, size);
+ }
+ return IA_CSS_SUCCESS;
+}
+
+void
+ia_css_isp_param_enable_pipeline(
+ const struct ia_css_isp_param_host_segments *mem_params)
+{
+ /* By protocol b0 of the mandatory uint32_t first field of the
+ input parameter is a disable bit*/
+ short dmem_offset = 0;
+
+ if (mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].size == 0)
+ return;
+
+ *(uint32_t *)&mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].address[dmem_offset] = 0x0;
+}
+
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
new file mode 100644
index 000000000000..02bf908d94e6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys.h
@@ -0,0 +1,201 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_ISYS_H__
+#define __IA_CSS_ISYS_H__
+
+#include <type_support.h>
+#include <input_system.h>
+#include <ia_css_input_port.h>
+#include <ia_css_stream_format.h>
+#include <ia_css_stream_public.h>
+#include <system_global.h>
+#include "ia_css_isys_comm.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+typedef input_system_cfg_t ia_css_isys_descr_t;
+/** end of Virtual Input System */
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+input_system_error_t ia_css_isys_init(void);
+void ia_css_isys_uninit(void);
+mipi_port_ID_t ia_css_isys_port_to_mipi_port(
+ enum ia_css_csi2_port api_port);
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+
+/**
+ * @brief Register one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if
+ * there is already a stream registered with the same handle
+ */
+enum ia_css_err ia_css_isys_csi_rx_register_stream(
+ enum ia_css_csi2_port port,
+ uint32_t isys_stream_id);
+
+/**
+ * @brief Unregister one (virtual) stream. This is used to track when all
+ * virtual streams are configured inside the input system. The CSI RX is
+ * only started when all registered streams are configured.
+ *
+ * @param[in] port CSI port
+ * @param[in] isys_stream_id Stream handle generated with ia_css_isys_generate_stream_id()
+ * Must be lower than SH_CSS_MAX_ISYS_CHANNEL_NODES
+ * @return IA_CSS_SUCCESS if successful, IA_CSS_ERR_INTERNAL_ERROR if
+ * there is no stream registered with that handle
+ */
+enum ia_css_err ia_css_isys_csi_rx_unregister_stream(
+ enum ia_css_csi2_port port,
+ uint32_t isys_stream_id);
+
+enum ia_css_err ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct input_system_cfg_s *cfg);
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum ia_css_stream_format fmt_type);
+#endif
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+/* CSS Receiver */
+void ia_css_isys_rx_configure(
+ const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode);
+
+void ia_css_isys_rx_disable(void);
+
+void ia_css_isys_rx_enable_all_interrupts(mipi_port_ID_t port);
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(mipi_port_ID_t port);
+void ia_css_isys_rx_get_irq_info(mipi_port_ID_t port,
+ unsigned int *irq_infos);
+void ia_css_isys_rx_clear_irq_info(mipi_port_ID_t port,
+ unsigned int irq_infos);
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
+
+#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+/** @brief Translate format and compression to format type.
+ *
+ * @param[in] input_format The input format.
+ * @param[in] compression The compression scheme.
+ * @param[out] fmt_type Pointer to the resulting format type.
+ * @return Error code.
+ *
+ * Translate an input format and mipi compression pair to the fmt_type.
+ * This is normally done by the sensor, but when using the input fifo, this
+ * format type must be sumitted correctly by the application.
+ */
+enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format(
+ enum ia_css_stream_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+/**
+ * Virtual Input System. (Input System 2401)
+ */
+extern ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id);
+
+extern void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream);
+
+extern ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg);
+
+extern void ia_css_isys_csi_rx_lut_rmgr_init(void);
+
+extern void ia_css_isys_csi_rx_lut_rmgr_uninit(void);
+
+extern bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+extern void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+
+extern void ia_css_isys_ibuf_rmgr_init(void);
+
+extern void ia_css_isys_ibuf_rmgr_uninit(void);
+
+extern bool ia_css_isys_ibuf_rmgr_acquire(
+ uint32_t size,
+ uint32_t *start_addr);
+
+extern void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr);
+
+extern void ia_css_isys_dma_channel_rmgr_init(void);
+
+extern void ia_css_isys_dma_channel_rmgr_uninit(void);
+
+extern bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+extern void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+extern void ia_css_isys_stream2mmio_sid_rmgr_init(void);
+
+extern void ia_css_isys_stream2mmio_sid_rmgr_uninit(void);
+
+extern bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+extern void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+/** end of Virtual Input System */
+#endif
+
+#endif /* __IA_CSS_ISYS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h
new file mode 100644
index 000000000000..0c3434ad0613
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/interface/ia_css_isys_comm.h
@@ -0,0 +1,69 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_ISYS_COMM_H
+#define __IA_CSS_ISYS_COMM_H
+
+#include <type_support.h>
+#include <input_system.h>
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include <platform_support.h> /* inline */
+#include <input_system_global.h>
+#include <ia_css_stream_public.h> /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
+
+#define SH_CSS_NODES_PER_THREAD 2
+#define SH_CSS_MAX_ISYS_CHANNEL_NODES (SH_CSS_MAX_SP_THREADS * SH_CSS_NODES_PER_THREAD)
+
+/*
+ * a) ia_css_isys_stream_h & ia_css_isys_stream_cfg_t come from host.
+ *
+ * b) Here it is better to use actual structures for stream handle
+ * instead of opaque handles. Otherwise, we need to have another
+ * communication channel to interpret that opaque handle(this handle is
+ * maintained by host and needs to be populated to sp for every stream open)
+ * */
+typedef virtual_input_system_stream_t *ia_css_isys_stream_h;
+typedef virtual_input_system_stream_cfg_t ia_css_isys_stream_cfg_t;
+
+/*
+ * error check for ISYS APIs.
+ * */
+typedef bool ia_css_isys_error_t;
+
+static inline uint32_t ia_css_isys_generate_stream_id(
+ uint32_t sp_thread_id,
+ uint32_t stream_id)
+{
+ return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
+}
+
+#endif /* USE_INPUT_SYSTEM_VERSION_2401*/
+#endif /*_IA_CSS_ISYS_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
new file mode 100644
index 000000000000..d1d4f79c00f1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.c
@@ -0,0 +1,179 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "ia_css_pipeline.h" /* ia_css_pipeline_get_pipe_io_status() */
+#include "sh_css_internal.h" /* sh_css_sp_pipeline_io_status
+ * SH_CSS_MAX_SP_THREADS
+ */
+#include "csi_rx_rmgr.h"
+
+static isys_csi_rx_rsrc_t isys_csi_rx_rsrc[N_CSI_RX_BACKEND_ID];
+
+void ia_css_isys_csi_rx_lut_rmgr_init(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_uninit(void)
+{
+ memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
+}
+
+bool ia_css_isys_csi_rx_lut_rmgr_acquire(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ bool retval = false;
+ uint32_t max_num_packets_of_type;
+ uint32_t num_active_of_type;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ uint16_t i;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert((packet_type == CSI_MIPI_PACKET_TYPE_LONG) || (packet_type == CSI_MIPI_PACKET_TYPE_SHORT));
+ assert(entry != NULL);
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry != NULL)) {
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets_of_type = N_LONG_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_long_packets;
+ } else {
+ max_num_packets_of_type = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ num_active_of_type = cur_rsrc->num_short_packets;
+ }
+
+ if (num_active_of_type < max_num_packets_of_type) {
+ for (i = 0; i < max_num_packets_of_type; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ entry->long_packet_entry = i;
+ entry->short_packet_entry = 0;
+ cur_rsrc->num_long_packets++;
+ } else {
+ entry->long_packet_entry = 0;
+ entry->short_packet_entry = i;
+ cur_rsrc->num_short_packets++;
+ }
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_csi_rx_lut_rmgr_release(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ uint32_t max_num_packets;
+ isys_csi_rx_rsrc_t *cur_rsrc = NULL;
+ uint32_t packet_entry = 0;
+
+ assert(backend < N_CSI_RX_BACKEND_ID);
+ assert(entry != NULL);
+ assert((packet_type >= CSI_MIPI_PACKET_TYPE_LONG) || (packet_type <= CSI_MIPI_PACKET_TYPE_SHORT));
+
+ if ((backend < N_CSI_RX_BACKEND_ID) && (entry != NULL)) {
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
+ max_num_packets = N_LONG_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->long_packet_entry;
+ } else {
+ max_num_packets = N_SHORT_PACKET_LUT_ENTRIES[backend];
+ packet_entry = entry->short_packet_entry;
+ }
+
+ cur_rsrc = &isys_csi_rx_rsrc[backend];
+ if ((packet_entry < max_num_packets) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, packet_entry) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, packet_entry);
+
+ if (packet_type == CSI_MIPI_PACKET_TYPE_LONG)
+ cur_rsrc->num_long_packets--;
+ else
+ cur_rsrc->num_short_packets--;
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
+
+enum ia_css_err ia_css_isys_csi_rx_register_stream(
+ enum ia_css_csi2_port port,
+ uint32_t isys_stream_id)
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 0) {
+ bitop_setbit(pipe_io_status->active[port], isys_stream_id);
+ pipe_io_status->running[port] = 0;
+ retval = IA_CSS_SUCCESS;
+ }
+ }
+ return retval;
+}
+
+enum ia_css_err ia_css_isys_csi_rx_unregister_stream(
+ enum ia_css_csi2_port port,
+ uint32_t isys_stream_id)
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+
+ if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
+ (isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
+ struct sh_css_sp_pipeline_io_status *pipe_io_status;
+ pipe_io_status = ia_css_pipeline_get_pipe_io_status();
+ if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 1) {
+ bitop_clearbit(pipe_io_status->active[port], isys_stream_id);
+ retval = IA_CSS_SUCCESS;
+ }
+ }
+ return retval;
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h
new file mode 100644
index 000000000000..c27b0ab83c93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/csi_rx_rmgr.h
@@ -0,0 +1,43 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __CSI_RX_RMGR_H_INCLUDED__
+#define __CSI_RX_RMGR_H_INCLUDED__
+
+typedef struct isys_csi_rx_rsrc_s isys_csi_rx_rsrc_t;
+struct isys_csi_rx_rsrc_s {
+ uint32_t active_table;
+ uint32_t num_active;
+ uint16_t num_long_packets;
+ uint16_t num_short_packets;
+};
+
+#endif /* __CSI_RX_RMGR_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
new file mode 100644
index 000000000000..76d9142fd37e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c
@@ -0,0 +1,141 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "ibuf_ctrl_rmgr.h"
+
+static ibuf_rsrc_t ibuf_rsrc;
+
+static ibuf_handle_t *getHandle(uint16_t index)
+{
+ ibuf_handle_t *handle = NULL;
+
+ if (index < MAX_IBUF_HANDLES)
+ handle = &ibuf_rsrc.handles[index];
+ return handle;
+}
+
+void ia_css_isys_ibuf_rmgr_init(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+void ia_css_isys_ibuf_rmgr_uninit(void)
+{
+ memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
+ ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
+}
+
+bool ia_css_isys_ibuf_rmgr_acquire(
+ uint32_t size,
+ uint32_t *start_addr)
+{
+ bool retval = false;
+ bool input_buffer_found = false;
+ uint32_t aligned_size;
+ ibuf_handle_t *handle = NULL;
+ uint16_t i;
+
+ assert(start_addr != NULL);
+ assert(size > 0);
+
+ aligned_size = (size + (IBUF_ALIGN - 1)) & ~(IBUF_ALIGN - 1);
+
+ /* Check if there is an available un-used handle with the size
+ * that will fulfill the request.
+ */
+ if (ibuf_rsrc.num_active < ibuf_rsrc.num_allocated) {
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if (!handle->active) {
+ if (handle->size >= aligned_size) {
+ handle->active = true;
+ input_buffer_found = true;
+ ibuf_rsrc.num_active++;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!input_buffer_found) {
+ /* There were no available handles that fulfilled the
+ * request. Allocate a new handle with the requested size.
+ */
+ if ((ibuf_rsrc.num_allocated < MAX_IBUF_HANDLES) &&
+ (ibuf_rsrc.free_size >= aligned_size)) {
+ handle = getHandle(ibuf_rsrc.num_allocated);
+ handle->start_addr = ibuf_rsrc.free_start_addr;
+ handle->size = aligned_size;
+ handle->active = true;
+
+ ibuf_rsrc.free_start_addr += aligned_size;
+ ibuf_rsrc.free_size -= aligned_size;
+ ibuf_rsrc.num_active++;
+ ibuf_rsrc.num_allocated++;
+
+ input_buffer_found = true;
+ }
+ }
+
+ if (input_buffer_found && handle) {
+ *start_addr = handle->start_addr;
+ retval = true;
+ }
+
+ return retval;
+}
+
+void ia_css_isys_ibuf_rmgr_release(
+ uint32_t *start_addr)
+{
+ uint16_t i;
+ ibuf_handle_t *handle = NULL;
+
+ assert(start_addr != NULL);
+
+ for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
+ handle = getHandle(i);
+ if ((handle->start_addr == *start_addr)
+ && ( true == handle->active)) {
+ handle->active = false;
+ ibuf_rsrc.num_active--;
+ break;
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h
new file mode 100644
index 000000000000..424cfe9f3b2a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.h
@@ -0,0 +1,55 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IBUF_CTRL_RMGR_H_INCLUDED__
+#define __IBUF_CTRL_RMGR_H_INCLUDED__
+
+#define MAX_IBUF_HANDLES 24
+#define MAX_INPUT_BUFFER_SIZE (64 * 1024)
+#define IBUF_ALIGN 8
+
+typedef struct ibuf_handle_s ibuf_handle_t;
+struct ibuf_handle_s {
+ uint32_t start_addr;
+ uint32_t size;
+ bool active;
+};
+
+typedef struct ibuf_rsrc_s ibuf_rsrc_t;
+struct ibuf_rsrc_s {
+ uint32_t free_start_addr;
+ uint32_t free_size;
+ uint16_t num_active;
+ uint16_t num_allocated;
+ ibuf_handle_t handles[MAX_IBUF_HANDLES];
+};
+
+#endif /* __IBUF_CTRL_RMGR_H_INCLUDED */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
new file mode 100644
index 000000000000..5032627342d9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.c
@@ -0,0 +1,103 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_dma_rmgr.h"
+
+static isys_dma_rsrc_t isys_dma_rsrc[N_ISYS2401_DMA_ID];
+
+void ia_css_isys_dma_channel_rmgr_init(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+void ia_css_isys_dma_channel_rmgr_uninit(void)
+{
+ memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
+}
+
+bool ia_css_isys_dma_channel_rmgr_acquire(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ bool retval = false;
+ isys2401_dma_channel i;
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel != NULL);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if (cur_rsrc->num_active < max_dma_channel) {
+ for (i = ISYS2401_DMA_CHANNEL_0; i < N_ISYS2401_DMA_CHANNEL; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *channel = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+
+ return retval;
+}
+
+void ia_css_isys_dma_channel_rmgr_release(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ isys2401_dma_channel max_dma_channel;
+ isys_dma_rsrc_t *cur_rsrc = NULL;
+
+ assert(dma_id < N_ISYS2401_DMA_ID);
+ assert(channel != NULL);
+
+ max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
+ cur_rsrc = &isys_dma_rsrc[dma_id];
+
+ if ((*channel < max_dma_channel) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *channel) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *channel);
+ cur_rsrc->num_active--;
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h
new file mode 100644
index 000000000000..b2c286537774
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_dma_rmgr.h
@@ -0,0 +1,41 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __ISYS_DMA_RMGR_H_INCLUDED__
+#define __ISYS_DMA_RMGR_H_INCLUDED__
+
+typedef struct isys_dma_rsrc_s isys_dma_rsrc_t;
+struct isys_dma_rsrc_s {
+ uint32_t active_table;
+ uint16_t num_active;
+};
+
+#endif /* __ISYS_DMA_RMGR_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
new file mode 100644
index 000000000000..239ef310bdeb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_init.c
@@ -0,0 +1,141 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "input_system.h"
+
+#ifdef HAS_INPUT_SYSTEM_VERSION_2
+#include "ia_css_isys.h"
+#include "platform_support.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#include "isys_dma.h" /* isys2401_dma_set_max_burst_size() */
+#include "isys_irq.h"
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+input_system_error_t ia_css_isys_init(void)
+{
+ backend_channel_cfg_t backend_ch0;
+ backend_channel_cfg_t backend_ch1;
+ target_cfg2400_t targetB;
+ target_cfg2400_t targetC;
+ uint32_t acq_mem_region_size = 24;
+ uint32_t acq_nof_mem_regions = 2;
+ input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t));
+ memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t));
+ memset(&targetB, 0, sizeof(targetB));
+ memset(&targetC, 0, sizeof(targetC));
+
+ error = input_system_configuration_reset();
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 0, /*ch_id */
+ INPUT_SYSTEM_PORT_A, /*port */
+ backend_ch0, /*backend_ch */
+ 32, /*mem_region_size */
+ 6, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 1, /*ch_id */
+ INPUT_SYSTEM_PORT_B, /*port */
+ backend_ch0, /*backend_ch */
+ 16, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetB, /*target */
+ 3); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_csi_xmem_channel_cfg(
+ 2, /*ch_id */
+ INPUT_SYSTEM_PORT_C, /*port */
+ backend_ch1, /*backend_ch */
+ 32, /*mem_region_size */
+ 3, /*nof_mem_regions */
+ acq_mem_region_size, /*acq_mem_region_size */
+ acq_nof_mem_regions, /*acq_nof_mem_regions */
+ targetC, /*target */
+ 2); /*nof_xmem_buffers */
+ if (error != INPUT_SYSTEM_ERR_NO_ERROR)
+ return error;
+
+ error = input_system_configuration_commit();
+
+ return error;
+}
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+input_system_error_t ia_css_isys_init(void)
+{
+ input_system_error_t error = INPUT_SYSTEM_ERR_NO_ERROR;
+
+ ia_css_isys_csi_rx_lut_rmgr_init();
+ ia_css_isys_ibuf_rmgr_init();
+ ia_css_isys_dma_channel_rmgr_init();
+ ia_css_isys_stream2mmio_sid_rmgr_init();
+
+ isys2401_dma_set_max_burst_size(ISYS2401_DMA0_ID,
+ 1 /* Non Burst DMA transactions */);
+
+ /* Enable 2401 input system IRQ status for driver to retrieve */
+ isys_irqc_status_enable(ISYS_IRQ0_ID);
+ isys_irqc_status_enable(ISYS_IRQ1_ID);
+ isys_irqc_status_enable(ISYS_IRQ2_ID);
+
+ return error;
+}
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+void ia_css_isys_uninit(void)
+{
+}
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_uninit(void)
+{
+ ia_css_isys_csi_rx_lut_rmgr_uninit();
+ ia_css_isys_ibuf_rmgr_uninit();
+ ia_css_isys_dma_channel_rmgr_uninit();
+ ia_css_isys_stream2mmio_sid_rmgr_uninit();
+}
+#endif
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
new file mode 100644
index 000000000000..a93c7f44ff12
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.c
@@ -0,0 +1,105 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "assert_support.h"
+#include "platform_support.h"
+#include "ia_css_isys.h"
+#include "bitop_support.h"
+#include "isys_stream2mmio_rmgr.h"
+
+static isys_stream2mmio_rsrc_t isys_stream2mmio_rsrc[N_STREAM2MMIO_ID];
+
+void ia_css_isys_stream2mmio_sid_rmgr_init(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_uninit(void)
+{
+ memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
+}
+
+bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ bool retval = false;
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+ stream2mmio_sid_ID_t i;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid != NULL);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid != NULL)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+
+ if (cur_rsrc->num_active < max_sid) {
+ for (i = STREAM2MMIO_SID0_ID; i < max_sid; i++) {
+ if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
+ bitop_setbit(cur_rsrc->active_table, i);
+ *sid = i;
+ cur_rsrc->num_active++;
+ retval = true;
+ break;
+ }
+ }
+ }
+ }
+ return retval;
+}
+
+void ia_css_isys_stream2mmio_sid_rmgr_release(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ stream2mmio_sid_ID_t max_sid;
+ isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
+
+ assert(stream2mmio < N_STREAM2MMIO_ID);
+ assert(sid != NULL);
+
+ if ((stream2mmio < N_STREAM2MMIO_ID) && (sid != NULL)) {
+ max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
+ cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
+ if ((*sid < max_sid) && (cur_rsrc->num_active > 0)) {
+ if (bitop_getbit(cur_rsrc->active_table, *sid) == 1) {
+ bitop_clearbit(cur_rsrc->active_table, *sid);
+ cur_rsrc->num_active--;
+ }
+ }
+ }
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h
new file mode 100644
index 000000000000..4f63005b1071
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/isys_stream2mmio_rmgr.h
@@ -0,0 +1,41 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+#define __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__
+
+typedef struct isys_stream2mmio_rsrc_s isys_stream2mmio_rsrc_t;
+struct isys_stream2mmio_rsrc_s {
+ uint32_t active_table;
+ uint16_t num_active;
+};
+
+#endif /* __ISYS_STREAM2MMIO_RMGR_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
new file mode 100644
index 000000000000..46a157f64343
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/rx.c
@@ -0,0 +1,607 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#define __INLINE_INPUT_SYSTEM__
+#include "input_system.h"
+#include "assert_support.h"
+#include "ia_css_isys.h"
+#include "ia_css_irq.h"
+#include "sh_css_internal.h"
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_rx_enable_all_interrupts(mipi_port_ID_t port)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) |
+#if defined(HAS_RX_VERSION_2)
+ (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) |
+#endif
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) |
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) |
+ (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT);
+ /*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ /*
+ * The CSI is nested into the Iunit IRQ's
+ */
+ ia_css_irq_enable(IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR, true);
+
+ return;
+}
+
+/* This function converts between the enum used on the CSS API and the
+ * internal DLI enum type.
+ * We do not use an array for this since we cannot use named array
+ * initializers in Windows. Without that there is no easy way to guarantee
+ * that the array values would be in the correct order.
+ * */
+mipi_port_ID_t ia_css_isys_port_to_mipi_port(enum ia_css_csi2_port api_port)
+{
+ /* In this module the validity of the inptu variable should
+ * have been checked already, so we do not check for erroneous
+ * values. */
+ mipi_port_ID_t port = MIPI_PORT0_ID;
+
+ if (api_port == IA_CSS_CSI2_PORT1)
+ port = MIPI_PORT1_ID;
+ else if (api_port == IA_CSS_CSI2_PORT2)
+ port = MIPI_PORT2_ID;
+
+ return port;
+}
+
+unsigned int ia_css_isys_rx_get_interrupt_reg(mipi_port_ID_t port)
+{
+ return receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
+}
+
+void ia_css_rx_get_irq_info(unsigned int *irq_infos)
+{
+ ia_css_rx_port_get_irq_info(IA_CSS_CSI2_PORT1, irq_infos);
+}
+
+void ia_css_rx_port_get_irq_info(enum ia_css_csi2_port api_port,
+ unsigned int *irq_infos)
+{
+ mipi_port_ID_t port = ia_css_isys_port_to_mipi_port(api_port);
+ ia_css_isys_rx_get_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_get_irq_info(mipi_port_ID_t port,
+ unsigned int *irq_infos)
+{
+ unsigned int bits;
+
+ assert(irq_infos != NULL);
+ bits = ia_css_isys_rx_get_interrupt_reg(port);
+ *irq_infos = ia_css_isys_rx_translate_irq_infos(bits);
+}
+
+/* Translate register bits to CSS API enum mask */
+unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits)
+{
+ unsigned int infos = 0;
+
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN;
+#if defined(HAS_RX_VERSION_2)
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT;
+#endif
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ECC_CORRECTED;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CONTROL;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_CRC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC;
+ if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT))
+ infos |= IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC;
+
+ return infos;
+}
+
+void ia_css_rx_clear_irq_info(unsigned int irq_infos)
+{
+ ia_css_rx_port_clear_irq_info(IA_CSS_CSI2_PORT1, irq_infos);
+}
+
+void ia_css_rx_port_clear_irq_info(enum ia_css_csi2_port api_port, unsigned int irq_infos)
+{
+ mipi_port_ID_t port = ia_css_isys_port_to_mipi_port(api_port);
+ ia_css_isys_rx_clear_irq_info(port, irq_infos);
+}
+
+void ia_css_isys_rx_clear_irq_info(mipi_port_ID_t port, unsigned int irq_infos)
+{
+ hrt_data bits = receiver_port_reg_load(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
+
+ /* MW: Why do we remap the receiver bitmap */
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT;
+#if defined(HAS_RX_VERSION_2)
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT;
+#endif
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ECC_CORRECTED)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT;
+ if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
+ bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT;
+
+ receiver_port_reg_store(RX0_ID,
+ port,
+ _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
+
+ return;
+}
+#endif /* #if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
+enum ia_css_err ia_css_isys_convert_stream_format_to_mipi_format(
+ enum ia_css_stream_format input_format,
+ mipi_predictor_t compression,
+ unsigned int *fmt_type)
+{
+ assert(fmt_type != NULL);
+ /*
+ * Custom (user defined) modes. Used for compressed
+ * MIPI transfers
+ *
+ * Checkpatch thinks the indent before "if" is suspect
+ * I think the only suspect part is the missing "else"
+ * because of the return.
+ */
+ if (compression != MIPI_PREDICTOR_NONE) {
+ switch (input_format) {
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ *fmt_type = 6;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ *fmt_type = 7;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ *fmt_type = 8;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ *fmt_type = 10;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ *fmt_type = 12;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ *fmt_type = 14;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ *fmt_type = 16;
+ break;
+ default:
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ return IA_CSS_SUCCESS;
+ }
+ /*
+ * This mapping comes from the Arasan CSS function spec
+ * (CSS_func_spec1.08_ahb_sep29_08.pdf).
+ *
+ * MW: For some reason the mapping is not 1-to-1
+ */
+ switch (input_format) {
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ *fmt_type = MIPI_FORMAT_RGB888;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ *fmt_type = MIPI_FORMAT_RGB555;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ *fmt_type = MIPI_FORMAT_RGB444;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ *fmt_type = MIPI_FORMAT_RGB565;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ *fmt_type = MIPI_FORMAT_RGB666;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ *fmt_type = MIPI_FORMAT_RAW8;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ *fmt_type = MIPI_FORMAT_RAW10;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ *fmt_type = MIPI_FORMAT_RAW6;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ *fmt_type = MIPI_FORMAT_RAW7;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ *fmt_type = MIPI_FORMAT_RAW12;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ *fmt_type = MIPI_FORMAT_RAW14;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ *fmt_type = MIPI_FORMAT_YUV420_8;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ *fmt_type = MIPI_FORMAT_YUV420_10;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ *fmt_type = MIPI_FORMAT_YUV422_8;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ *fmt_type = MIPI_FORMAT_YUV422_10;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY;
+ break;
+ case IA_CSS_STREAM_FORMAT_EMBEDDED:
+ *fmt_type = MIPI_FORMAT_EMBEDDED;
+ break;
+#ifndef USE_INPUT_SYSTEM_VERSION_2401
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ /* This is not specified by Arasan, so we use
+ * 17 for now.
+ */
+ *fmt_type = MIPI_FORMAT_RAW16;
+ break;
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ *fmt_type = MIPI_FORMAT_BINARY_8;
+ break;
+#else
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ *fmt_type = MIPI_FORMAT_CUSTOM0;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ *fmt_type = MIPI_FORMAT_CUSTOM1;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ *fmt_type = MIPI_FORMAT_CUSTOM2;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ *fmt_type = MIPI_FORMAT_CUSTOM3;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ *fmt_type = MIPI_FORMAT_CUSTOM4;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ *fmt_type = MIPI_FORMAT_CUSTOM5;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ *fmt_type = MIPI_FORMAT_CUSTOM6;
+ break;
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ *fmt_type = MIPI_FORMAT_CUSTOM7;
+ break;
+#endif
+
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ default:
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ return IA_CSS_SUCCESS;
+}
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(enum ia_css_csi2_compression_type type)
+{
+ mipi_predictor_t predictor = MIPI_PREDICTOR_NONE;
+
+ switch (type) {
+ case IA_CSS_CSI2_COMPRESSION_TYPE_1:
+ predictor = MIPI_PREDICTOR_TYPE1-1;
+ break;
+ case IA_CSS_CSI2_COMPRESSION_TYPE_2:
+ predictor = MIPI_PREDICTOR_TYPE2-1;
+ default:
+ break;
+ }
+ return predictor;
+}
+enum ia_css_err ia_css_isys_convert_compressed_format(
+ struct ia_css_csi2_compression *comp,
+ struct input_system_cfg_s *cfg)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ assert(comp != NULL);
+ assert(cfg != NULL);
+
+ if (comp->type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+ /* compression register bit slicing
+ 4 bit for each user defined data type
+ 3 bit indicate compression scheme
+ 000 No compression
+ 001 10-6-10
+ 010 10-7-10
+ 011 10-8-10
+ 100 12-6-12
+ 101 12-6-12
+ 100 12-7-12
+ 110 12-8-12
+ 1 bit indicate predictor
+ */
+ if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_6_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_7_10;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_8_10;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ switch (comp->compressed_bits_per_pixel) {
+ case COMPRESSED_BITS_PER_PIXEL_6:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_6_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_7:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_7_12;
+ break;
+ case COMPRESSED_BITS_PER_PIXEL_8:
+ cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_8_12;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ cfg->csi_port_attr.comp_predictor = sh_css_csi2_compression_type_2_mipi_predictor(comp->type);
+ cfg->csi_port_attr.comp_enable = true;
+ } else /* No compression */
+ cfg->csi_port_attr.comp_enable = false;
+ return err;
+}
+
+unsigned int ia_css_csi2_calculate_input_system_alignment(
+ enum ia_css_stream_format fmt_type)
+{
+ unsigned int memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+
+ switch (fmt_type) {
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ memory_alignment_in_bytes = 2 * ISP_VEC_NELEMS;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ /* Planar YUV formats need to have all planes aligned, this means
+ * double the alignment for the Y plane if the horizontal decimation is 2. */
+ memory_alignment_in_bytes = 2 * HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ case IA_CSS_STREAM_FORMAT_EMBEDDED:
+ default:
+ memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
+ break;
+ }
+ return memory_alignment_in_bytes;
+}
+
+#endif
+
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+void ia_css_isys_rx_configure(const rx_cfg_t *config,
+ const enum ia_css_input_mode input_mode)
+{
+#if defined(HAS_RX_VERSION_2)
+ bool port_enabled[N_MIPI_PORT_ID];
+ bool any_port_enabled = false;
+ mipi_port_ID_t port;
+
+ if ((config == NULL)
+ || (config->mode >= N_RX_MODE)
+ || (config->port >= N_MIPI_PORT_ID)) {
+ assert(0);
+ return;
+ }
+ for (port = (mipi_port_ID_t) 0; port < N_MIPI_PORT_ID; port++) {
+ if (is_receiver_port_enabled(RX0_ID, port))
+ any_port_enabled = true;
+ }
+ /* AM: Check whether this is a problem with multiple
+ * streams. MS: This is the case. */
+
+ port = config->port;
+ receiver_port_enable(RX0_ID, port, false);
+
+ port = config->port;
+
+ /* AM: Check whether this is a problem with multiple streams. */
+ if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX,
+ config->timeout);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX,
+ config->initcount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX,
+ config->synccount);
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX,
+ config->rxcount);
+
+ port_enabled[port] = true;
+
+ if (input_mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+
+ /* MW: A bit of a hack, straight wiring of the capture
+ * units,assuming they are linearly enumerated. */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_CSI_BACKEND);
+ /* MW: Like the integration test example we overwite,
+ * the GPREG_MUX register */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ (input_system_multiplex_t) port);
+ } else {
+ /*
+ * AM: A bit of a hack, wiring the input system.
+ */
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ + (unsigned int)port,
+ INPUT_SYSTEM_INPUT_BUFFER);
+ input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
+ GPREGS_UNIT0_ID,
+ HIVE_ISYS_GPREG_MUX_IDX,
+ INPUT_SYSTEM_ACQUISITION_UNIT);
+ }
+ }
+ /*
+ * The 2ppc is shared for all ports, so we cannot
+ * disable->configure->enable individual ports
+ */
+ /* AM: Check whether this is a problem with multiple streams. */
+ /* MS: 2ppc should be a property per binary and should be
+ * enabled/disabled per binary.
+ * Currently it is implemented as a system wide setting due
+ * to effort and risks. */
+ if (!any_port_enabled) {
+ receiver_reg_store(RX0_ID,
+ _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX,
+ config->is_two_ppc);
+ receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX,
+ config->is_two_ppc);
+ }
+ receiver_port_enable(RX0_ID, port, true);
+ /* TODO: JB: need to add the beneath used define to mizuchi */
+ /* sh_css_sw_hive_isp_css_2400_system_20121224_0125\css
+ * \hrt\input_system_defs.h
+ * #define INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG 0X207
+ */
+ /* TODO: need better name for define
+ * input_system_reg_store(INPUT_SYSTEM0_ID,
+ * INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1);
+ */
+ input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1);
+#else
+#error "rx.c: RX version must be one of {RX_VERSION_2}"
+#endif
+
+ return;
+}
+
+void ia_css_isys_rx_disable(void)
+{
+ mipi_port_ID_t port;
+ for (port = (mipi_port_ID_t) 0; port < N_MIPI_PORT_ID; port++) {
+ receiver_port_reg_store(RX0_ID, port,
+ _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX,
+ false);
+ }
+ return;
+}
+#endif /* if !defined(USE_INPUT_SYSTEM_VERSION_2401) */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
new file mode 100644
index 000000000000..0f1e8a2f6b10
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.c
@@ -0,0 +1,898 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "system_global.h"
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+
+#include "ia_css_isys.h"
+#include "ia_css_debug.h"
+#include "math_support.h"
+#include "string_support.h"
+#include "virtual_isys.h"
+#include "isp.h"
+#include "sh_css_defs.h"
+
+/*************************************************
+ *
+ * Forwarded Declaration
+ *
+ *************************************************/
+#ifndef ISP2401
+
+#endif
+static bool create_input_system_channel(
+ input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *channel);
+
+static void destroy_input_system_channel(
+ input_system_channel_t *channel);
+
+static bool create_input_system_input_port(
+ input_system_cfg_t *cfg,
+ input_system_input_port_t *input_port);
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *input_port);
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata);
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg);
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid);
+
+static bool acquire_ib_buffer(
+ int32_t bits_per_pixel,
+ int32_t pixels_per_line,
+ int32_t lines_per_frame,
+ int32_t align_in_bytes,
+ bool online,
+ ib_buffer_t *buf);
+
+static void release_ib_buffer(
+ ib_buffer_t *buf);
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel);
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry);
+
+static bool calculate_tpg_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_tpg_cfg_t *cfg);
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg);
+
+static bool calculate_fe_cfg(
+ const input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg);
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg);
+
+static bool calculate_stream2mmio_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg);
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg);
+
+static bool calculate_isys2401_dma_port_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg);
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type);
+
+static int32_t calculate_stride(
+ int32_t bits_per_pixel,
+ int32_t pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes);
+
+/** end of Forwarded Declaration */
+
+/**************************************************
+ *
+ * Public Methods
+ *
+ **************************************************/
+ia_css_isys_error_t ia_css_isys_stream_create(
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_h isys_stream,
+ uint32_t isys_stream_id)
+{
+ ia_css_isys_error_t rc;
+
+ if (isys_stream_descr == NULL || isys_stream == NULL ||
+ isys_stream_id >= SH_CSS_MAX_ISYS_CHANNEL_NODES)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() enter:\n");
+
+ /*Reset isys_stream to 0*/
+ memset(isys_stream, 0, sizeof(*isys_stream));
+ isys_stream->enable_metadata = isys_stream_descr->metadata.enable;
+ isys_stream->id = isys_stream_id;
+
+ isys_stream->linked_isys_stream_id = isys_stream_descr->linked_isys_stream_id;
+ rc = create_input_system_input_port(isys_stream_descr, &(isys_stream->input_port));
+ if (rc == false)
+ return false;
+
+ rc = create_input_system_channel(isys_stream_descr, false, &(isys_stream->channel));
+ if (rc == false) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ return false;
+ }
+
+#ifdef ISP2401
+ /*
+ * Early polling is required for timestamp accuracy in certain cause.
+ * The ISYS HW polling is started on
+ * ia_css_isys_stream_capture_indication() instead of
+ * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
+ * capture takes longer than getting an ISYS frame
+ */
+ isys_stream->polling_mode = isys_stream_descr->polling_mode;
+
+#endif
+ /* create metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ rc = create_input_system_channel(isys_stream_descr, true, &isys_stream->md_channel);
+ if (rc == false) {
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&isys_stream->channel);
+ return false;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_create() leave:\n");
+
+ return true;
+}
+
+void ia_css_isys_stream_destroy(
+ ia_css_isys_stream_h isys_stream)
+{
+ destroy_input_system_input_port(&isys_stream->input_port);
+ destroy_input_system_channel(&(isys_stream->channel));
+ if (isys_stream->enable_metadata) {
+ /* Destroy metadata channel only if its allocated*/
+ destroy_input_system_channel(&isys_stream->md_channel);
+ }
+}
+
+ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
+ ia_css_isys_stream_h isys_stream,
+ ia_css_isys_descr_t *isys_stream_descr,
+ ia_css_isys_stream_cfg_t *isys_stream_cfg)
+{
+ ia_css_isys_error_t rc;
+
+ if (isys_stream_cfg == NULL ||
+ isys_stream_descr == NULL ||
+ isys_stream == NULL)
+ return false;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() enter:\n");
+
+ rc = calculate_input_system_channel_cfg(
+ &(isys_stream->channel),
+ &(isys_stream->input_port),
+ isys_stream_descr,
+ &(isys_stream_cfg->channel_cfg),
+ false);
+ if (rc == false)
+ return false;
+
+ /* configure metadata channel */
+ if (isys_stream_descr->metadata.enable) {
+ isys_stream_cfg->enable_metadata = true;
+ rc = calculate_input_system_channel_cfg(
+ &isys_stream->md_channel,
+ &isys_stream->input_port,
+ isys_stream_descr,
+ &isys_stream_cfg->md_channel_cfg,
+ true);
+ if (rc == false)
+ return false;
+ }
+
+ rc = calculate_input_system_input_port_cfg(
+ &(isys_stream->channel),
+ &(isys_stream->input_port),
+ isys_stream_descr,
+ &(isys_stream_cfg->input_port_cfg));
+ if (rc == false)
+ return false;
+
+ isys_stream->valid = 1;
+ isys_stream_cfg->valid = 1;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_isys_stream_calculate_cfg() leave:\n");
+ return rc;
+}
+
+/** end of Public Methods */
+
+/**************************************************
+ *
+ * Private Methods
+ *
+ **************************************************/
+static bool create_input_system_channel(
+ input_system_cfg_t *cfg,
+ bool metadata,
+ input_system_channel_t *me)
+{
+ bool rc = true;
+
+ me->dma_id = ISYS2401_DMA0_ID;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->stream2mmio_id = STREAM2MMIO0_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL0_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->stream2mmio_id = STREAM2MMIO1_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL1_ID;
+ break;
+
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->stream2mmio_id = STREAM2MMIO2_ID;
+ me->ibuf_ctrl_id = IBUF_CTRL2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ if (rc == false)
+ return false;
+
+ if (!acquire_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id))) {
+ return false;
+ }
+
+ if (!acquire_ib_buffer(
+ metadata ? cfg->metadata.bits_per_pixel : cfg->input_port_resolution.bits_per_pixel,
+ metadata ? cfg->metadata.pixels_per_line : cfg->input_port_resolution.pixels_per_line,
+ metadata ? cfg->metadata.lines_per_frame : cfg->input_port_resolution.lines_per_frame,
+ metadata ? cfg->metadata.align_req_in_bytes : cfg->input_port_resolution.align_req_in_bytes,
+ cfg->online,
+ &(me->ib_buffer))) {
+ release_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id));
+ return false;
+ }
+
+ if (!acquire_dma_channel(me->dma_id, &(me->dma_channel))) {
+ release_sid(me->stream2mmio_id, &(me->stream2mmio_sid_id));
+ release_ib_buffer(&(me->ib_buffer));
+ return false;
+ }
+
+ return true;
+}
+
+static void destroy_input_system_channel(
+ input_system_channel_t *me)
+{
+ release_sid(me->stream2mmio_id,
+ &(me->stream2mmio_sid_id));
+
+ release_ib_buffer(&(me->ib_buffer));
+
+ release_dma_channel(me->dma_id, &(me->dma_channel));
+}
+
+static bool create_input_system_input_port(
+ input_system_cfg_t *cfg,
+ input_system_input_port_t *me)
+{
+ csi_mipi_packet_type_t packet_type;
+ bool rc = true;
+
+ switch (cfg->input_port_id) {
+ case INPUT_SYSTEM_CSI_PORT0_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND0_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND0_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &(me->csi_rx.backend_lut_entry));
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN0_ID;
+ break;
+ case INPUT_SYSTEM_CSI_PORT1_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND1_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND1_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &(me->csi_rx.backend_lut_entry));
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN1_ID;
+
+ break;
+ case INPUT_SYSTEM_CSI_PORT2_ID:
+ me->csi_rx.frontend_id = CSI_RX_FRONTEND2_ID;
+ me->csi_rx.backend_id = CSI_RX_BACKEND2_ID;
+
+ packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
+ me->csi_rx.packet_type = packet_type;
+
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ packet_type,
+ &(me->csi_rx.backend_lut_entry));
+ break;
+ case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
+ me->pixelgen.pixelgen_id = PIXELGEN2_ID;
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ me->source_type = cfg->mode;
+
+ /* for metadata */
+ me->metadata.packet_type = CSI_MIPI_PACKET_TYPE_UNDEFINED;
+ if (rc && cfg->metadata.enable) {
+ me->metadata.packet_type = get_csi_mipi_packet_type(
+ cfg->metadata.fmt_type);
+ rc = acquire_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+
+ return rc;
+}
+
+static void destroy_input_system_input_port(
+ input_system_input_port_t *me)
+{
+ if (me->source_type == INPUT_SYSTEM_SOURCE_TYPE_SENSOR) {
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->csi_rx.packet_type,
+ &me->csi_rx.backend_lut_entry);
+ }
+
+ if (me->metadata.packet_type != CSI_MIPI_PACKET_TYPE_UNDEFINED) {
+ /*Free the backend lut allocated for metadata*/
+ release_be_lut_entry(
+ me->csi_rx.backend_id,
+ me->metadata.packet_type,
+ &me->metadata.backend_lut_entry);
+ }
+}
+
+static bool calculate_input_system_channel_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_channel_cfg_t *channel_cfg,
+ bool metadata)
+{
+ bool rc;
+
+ rc = calculate_stream2mmio_cfg(isys_cfg, metadata,
+ &(channel_cfg->stream2mmio_cfg));
+ if (rc == false)
+ return false;
+
+ rc = calculate_ibuf_ctrl_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &(channel_cfg->ibuf_ctrl_cfg));
+ if (rc == false)
+ return false;
+ if (metadata)
+ channel_cfg->ibuf_ctrl_cfg.stores_per_frame = isys_cfg->metadata.lines_per_frame;
+
+ rc = calculate_isys2401_dma_cfg(
+ channel,
+ isys_cfg,
+ &(channel_cfg->dma_cfg));
+ if (rc == false)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ false,
+ metadata,
+ &(channel_cfg->dma_src_port_cfg));
+ if (rc == false)
+ return false;
+
+ rc = calculate_isys2401_dma_port_cfg(
+ isys_cfg,
+ isys_cfg->raw_packed,
+ metadata,
+ &(channel_cfg->dma_dest_port_cfg));
+ if (rc == false)
+ return false;
+
+ return true;
+}
+
+static bool calculate_input_system_input_port_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ input_system_input_port_cfg_t *input_port_cfg)
+{
+ bool rc;
+
+ switch (input_port->source_type) {
+ case INPUT_SYSTEM_SOURCE_TYPE_SENSOR:
+ rc = calculate_fe_cfg(
+ isys_cfg,
+ &(input_port_cfg->csi_rx_cfg.frontend_cfg));
+
+ rc &= calculate_be_cfg(
+ input_port,
+ isys_cfg,
+ false,
+ &(input_port_cfg->csi_rx_cfg.backend_cfg));
+
+ if (rc && isys_cfg->metadata.enable)
+ rc &= calculate_be_cfg(input_port, isys_cfg, true,
+ &input_port_cfg->csi_rx_cfg.md_backend_cfg);
+ break;
+ case INPUT_SYSTEM_SOURCE_TYPE_TPG:
+ rc = calculate_tpg_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &(input_port_cfg->pixelgen_cfg.tpg_cfg));
+ break;
+ case INPUT_SYSTEM_SOURCE_TYPE_PRBS:
+ rc = calculate_prbs_cfg(
+ channel,
+ input_port,
+ isys_cfg,
+ &(input_port_cfg->pixelgen_cfg.prbs_cfg));
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool acquire_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ return ia_css_isys_stream2mmio_sid_rmgr_acquire(stream2mmio, sid);
+}
+
+static void release_sid(
+ stream2mmio_ID_t stream2mmio,
+ stream2mmio_sid_ID_t *sid)
+{
+ ia_css_isys_stream2mmio_sid_rmgr_release(stream2mmio, sid);
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static int32_t calculate_stride(
+ int32_t bits_per_pixel,
+ int32_t pixels_per_line,
+ bool raw_packed,
+ int32_t align_in_bytes)
+{
+ int32_t bytes_per_line;
+ int32_t pixels_per_word;
+ int32_t words_per_line;
+ int32_t pixels_per_line_padded;
+
+ pixels_per_line_padded = CEIL_MUL(pixels_per_line, align_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word);
+ bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
+
+ return bytes_per_line;
+}
+
+static bool acquire_ib_buffer(
+ int32_t bits_per_pixel,
+ int32_t pixels_per_line,
+ int32_t lines_per_frame,
+ int32_t align_in_bytes,
+ bool online,
+ ib_buffer_t *buf)
+{
+ buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false, align_in_bytes);
+ if (online)
+ buf->lines = 4; /* use double buffering for online usecases */
+ else
+ buf->lines = 2;
+
+ (void)(lines_per_frame);
+ return ia_css_isys_ibuf_rmgr_acquire(buf->stride * buf->lines, &buf->start_addr);
+}
+
+static void release_ib_buffer(
+ ib_buffer_t *buf)
+{
+ ia_css_isys_ibuf_rmgr_release(&buf->start_addr);
+}
+
+static bool acquire_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ return ia_css_isys_dma_channel_rmgr_acquire(dma_id, channel);
+}
+
+static void release_dma_channel(
+ isys2401_dma_ID_t dma_id,
+ isys2401_dma_channel *channel)
+{
+ ia_css_isys_dma_channel_rmgr_release(dma_id, channel);
+}
+
+static bool acquire_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ return ia_css_isys_csi_rx_lut_rmgr_acquire(backend, packet_type, entry);
+}
+
+static void release_be_lut_entry(
+ csi_rx_backend_ID_t backend,
+ csi_mipi_packet_type_t packet_type,
+ csi_rx_backend_lut_entry_t *entry)
+{
+ ia_css_isys_csi_rx_lut_rmgr_release(backend, packet_type, entry);
+}
+
+static bool calculate_tpg_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_tpg_cfg_t *cfg)
+{
+ (void)channel;
+ (void)input_port;
+
+ memcpy_s(
+ (void *)cfg,
+ sizeof(pixelgen_tpg_cfg_t),
+ (void *)(&(isys_cfg->tpg_port_attr)),
+ sizeof(pixelgen_tpg_cfg_t));
+ return true;
+}
+
+static bool calculate_prbs_cfg(
+ input_system_channel_t *channel,
+ input_system_input_port_t *input_port,
+ input_system_cfg_t *isys_cfg,
+ pixelgen_prbs_cfg_t *cfg)
+{
+ (void)channel;
+ (void)input_port;
+
+ memcpy_s(
+ (void *)cfg,
+ sizeof(pixelgen_prbs_cfg_t),
+ (void *)(&(isys_cfg->prbs_port_attr)),
+ sizeof(pixelgen_prbs_cfg_t));
+ return true;
+}
+
+static bool calculate_fe_cfg(
+ const input_system_cfg_t *isys_cfg,
+ csi_rx_frontend_cfg_t *cfg)
+{
+ cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes;
+ return true;
+}
+
+static bool calculate_be_cfg(
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ csi_rx_backend_cfg_t *cfg)
+{
+
+ memcpy_s(
+ (void *)(&cfg->lut_entry),
+ sizeof(csi_rx_backend_lut_entry_t),
+ metadata ? (void *)(&input_port->metadata.backend_lut_entry) :
+ (void *)(&input_port->csi_rx.backend_lut_entry),
+ sizeof(csi_rx_backend_lut_entry_t));
+
+ cfg->csi_mipi_cfg.virtual_channel = isys_cfg->csi_port_attr.ch_id;
+ if (metadata) {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(isys_cfg->metadata.fmt_type);
+ cfg->csi_mipi_cfg.comp_enable = false;
+ cfg->csi_mipi_cfg.data_type = isys_cfg->metadata.fmt_type;
+ }
+ else {
+ cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(isys_cfg->csi_port_attr.fmt_type);
+ cfg->csi_mipi_cfg.data_type = isys_cfg->csi_port_attr.fmt_type;
+ cfg->csi_mipi_cfg.comp_enable = isys_cfg->csi_port_attr.comp_enable;
+ cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
+ cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
+ cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type - MIPI_FORMAT_CUSTOM0;
+ }
+
+ return true;
+}
+
+static bool calculate_stream2mmio_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool metadata,
+ stream2mmio_cfg_t *cfg
+)
+{
+ cfg->bits_per_pixel = metadata ? isys_cfg->metadata.bits_per_pixel :
+ isys_cfg->input_port_resolution.bits_per_pixel;
+
+ cfg->enable_blocking =
+ ((isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_TPG) ||
+ (isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_PRBS));
+
+ return true;
+}
+
+static bool calculate_ibuf_ctrl_cfg(
+ const input_system_channel_t *channel,
+ const input_system_input_port_t *input_port,
+ const input_system_cfg_t *isys_cfg,
+ ibuf_ctrl_cfg_t *cfg)
+{
+ const int32_t bits_per_byte = 8;
+ int32_t bits_per_pixel;
+ int32_t bytes_per_pixel;
+ int32_t left_padding;
+
+ (void)input_port;
+
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte);
+
+ left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
+ * bytes_per_pixel;
+
+ cfg->online = isys_cfg->online;
+
+ cfg->dma_cfg.channel = channel->dma_channel;
+ cfg->dma_cfg.cmd = _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND;
+
+ cfg->dma_cfg.shift_returned_items = 0;
+ cfg->dma_cfg.elems_per_word_in_ibuf = 0;
+ cfg->dma_cfg.elems_per_word_in_dest = 0;
+
+ cfg->ib_buffer.start_addr = channel->ib_buffer.start_addr;
+ cfg->ib_buffer.stride = channel->ib_buffer.stride;
+ cfg->ib_buffer.lines = channel->ib_buffer.lines;
+
+ /*
+#ifndef ISP2401
+ * zhengjie.lu@intel.com:
+#endif
+ * "dest_buf_cfg" should be part of the input system output
+ * port configuration.
+ *
+ * TODO: move "dest_buf_cfg" to the input system output
+ * port configuration.
+ */
+
+ /* input_buf addr only available in sched mode;
+ this buffer is allocated in isp, crun mode addr
+ can be passed by after ISP allocation */
+ if (cfg->online) {
+ cfg->dest_buf_cfg.start_addr = ISP_INPUT_BUF_START_ADDR + left_padding;
+ cfg->dest_buf_cfg.stride = bytes_per_pixel
+ * isys_cfg->output_port_attr.max_isp_input_width;
+ cfg->dest_buf_cfg.lines = LINES_OF_ISP_INPUT_BUF;
+ } else if (isys_cfg->raw_packed) {
+ cfg->dest_buf_cfg.stride = calculate_stride(bits_per_pixel,
+ isys_cfg->input_port_resolution.pixels_per_line,
+ isys_cfg->raw_packed,
+ isys_cfg->input_port_resolution.align_req_in_bytes);
+ } else {
+ cfg->dest_buf_cfg.stride = channel->ib_buffer.stride;
+ }
+
+ /*
+#ifndef ISP2401
+ * zhengjie.lu@intel.com:
+#endif
+ * "items_per_store" is hard coded as "1", which is ONLY valid
+ * when the CSI-MIPI long packet is transferred.
+ *
+ * TODO: After the 1st stage of MERR+, make the proper solution to
+ * configure "items_per_store" so that it can also handle the CSI-MIPI
+ * short packet.
+ */
+ cfg->items_per_store = 1;
+
+ cfg->stores_per_frame = isys_cfg->input_port_resolution.lines_per_frame;
+
+
+ cfg->stream2mmio_cfg.sync_cmd = _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME;
+
+ /* TODO: Define conditions as when to use store words vs store packets */
+ cfg->stream2mmio_cfg.store_cmd = _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS;
+
+ return true;
+}
+
+static bool calculate_isys2401_dma_cfg(
+ const input_system_channel_t *channel,
+ const input_system_cfg_t *isys_cfg,
+ isys2401_dma_cfg_t *cfg)
+{
+ cfg->channel = channel->dma_channel;
+
+ /* only online/sensor mode goto vmem
+ offline/buffered_sensor, tpg and prbs will go to ddr */
+ if (isys_cfg->online)
+ cfg->connection = isys2401_dma_ibuf_to_vmem_connection;
+ else
+ cfg->connection = isys2401_dma_ibuf_to_ddr_connection;
+
+ cfg->extension = isys2401_dma_zero_extension;
+ cfg->height = 1;
+
+ return true;
+}
+
+/* See also: ia_css_dma_configure_from_info() */
+static bool calculate_isys2401_dma_port_cfg(
+ const input_system_cfg_t *isys_cfg,
+ bool raw_packed,
+ bool metadata,
+ isys2401_dma_port_cfg_t *cfg)
+{
+ int32_t bits_per_pixel;
+ int32_t pixels_per_line;
+ int32_t align_req_in_bytes;
+
+ /* TODO: Move metadata away from isys_cfg to application layer */
+ if (metadata) {
+ bits_per_pixel = isys_cfg->metadata.bits_per_pixel;
+ pixels_per_line = isys_cfg->metadata.pixels_per_line;
+ align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes;
+ } else {
+ bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
+ pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line;
+ align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes;
+ }
+
+ cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed, align_req_in_bytes);
+
+ if (!raw_packed)
+ bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
+
+ cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
+ cfg->cropping = 0;
+ cfg->width = CEIL_DIV(cfg->stride, HIVE_ISP_DDR_WORD_BYTES);
+
+ return true;
+}
+
+static csi_mipi_packet_type_t get_csi_mipi_packet_type(
+ int32_t data_type)
+{
+ csi_mipi_packet_type_t packet_type;
+
+ packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
+
+ if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8)
+ packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
+
+ if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT)
+ packet_type = CSI_MIPI_PACKET_TYPE_LONG;
+
+ return packet_type;
+}
+/** end of Private Methods */
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h
new file mode 100644
index 000000000000..66c7293c0a93
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/virtual_isys.h
@@ -0,0 +1,41 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __VIRTUAL_ISYS_H_INCLUDED__
+#define __VIRTUAL_ISYS_H_INCLUDED__
+
+/* cmd for storing a number of packets indicated by reg _STREAM2MMIO_NUM_ITEMS*/
+#define _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS 1
+
+/* command for waiting for a frame start */
+#define _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME 2
+
+#endif /* __VIRTUAL_ISYS_H_INCLUDED__ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
new file mode 100644
index 000000000000..90646f5f8885
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline.h
@@ -0,0 +1,308 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_PIPELINE_H__
+#define __IA_CSS_PIPELINE_H__
+
+#include "sh_css_internal.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_pipeline_common.h"
+
+#define IA_CSS_PIPELINE_NUM_MAX (20)
+
+
+/* Pipeline stage to be executed on SP/ISP */
+struct ia_css_pipeline_stage {
+ unsigned int stage_num;
+ struct ia_css_binary *binary; /* built-in binary */
+ struct ia_css_binary_info *binary_info;
+ const struct ia_css_fw_info *firmware; /* acceleration binary */
+ /* SP function for SP stage */
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned max_input_width; /* For SP raw copy */
+ struct sh_css_binary_args args;
+ int mode;
+ bool out_frame_allocated[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool vf_frame_allocated;
+ struct ia_css_pipeline_stage *next;
+ bool enable_zoom;
+};
+
+/* Pipeline of n stages to be executed on SP/ISP per stage */
+struct ia_css_pipeline {
+ enum ia_css_pipe_id pipe_id;
+ uint8_t pipe_num;
+ bool stop_requested;
+ struct ia_css_pipeline_stage *stages;
+ struct ia_css_pipeline_stage *current_stage;
+ unsigned num_stages;
+ struct ia_css_frame in_frame;
+ struct ia_css_frame out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ unsigned int dvs_frame_delay;
+ unsigned inout_port_config;
+ int num_execs;
+ bool acquire_isp_each_stage;
+ uint32_t pipe_qos_config;
+};
+
+#define DEFAULT_PIPELINE \
+{ \
+ IA_CSS_PIPE_ID_PREVIEW, /* pipe_id */ \
+ 0, /* pipe_num */ \
+ false, /* stop_requested */ \
+ NULL, /* stages */ \
+ NULL, /* current_stage */ \
+ 0, /* num_stages */ \
+ DEFAULT_FRAME, /* in_frame */ \
+ {DEFAULT_FRAME}, /* out_frame */ \
+ {DEFAULT_FRAME}, /* vf_frame */ \
+ IA_CSS_FRAME_DELAY_1, /* frame_delay */ \
+ 0, /* inout_port_config */ \
+ -1, /* num_execs */ \
+ true, /* acquire_isp_each_stage */\
+ QOS_INVALID /* pipe_qos_config */\
+}
+
+/* Stage descriptor used to create a new stage in the pipeline */
+struct ia_css_pipeline_stage_desc {
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ enum ia_css_pipeline_stage_sp_func sp_func;
+ unsigned max_input_width;
+ unsigned int mode;
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+};
+
+/** @brief initialize the pipeline module
+ *
+ * @return None
+ *
+ * Initializes the pipeline module. This API has to be called
+ * before any operation on the pipeline module is done
+ */
+void ia_css_pipeline_init(void);
+
+/** @brief initialize the pipeline structure with default values
+ *
+ * @param[out] pipeline structure to be initialized with defaults
+ * @param[in] pipe_id
+ * @param[in] pipe_num Number that uniquely identifies a pipeline.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Initializes the pipeline structure with a set of default values.
+ * This API is expected to be used when a pipeline structure is allocated
+ * externally and needs sane defaults
+ */
+enum ia_css_err ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+/** @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline);
+
+
+/** @brief Starts a pipeline
+ *
+ * @param[in] pipe_id
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline);
+
+/** @brief Request to stop a pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline);
+
+/** @brief Check whether pipeline has stopped
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline has stopped
+ *
+ */
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipe);
+
+/** @brief clean all the stages pipeline and make it as new
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline);
+
+/** @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and output
+ * arguments.
+*/
+enum ia_css_err ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage);
+
+/** @brief Finalize the stages in a pipeline
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @return None
+ *
+ * This API is expected to be called after adding all stages
+*/
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous);
+
+/** @brief gets a stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/** @brief Gets a pipeline stage corresponding Firmware handle from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] fw_handle
+ * @param[out] stage Pointer to Stage
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeline,
+ uint32_t fw_handle,
+ struct ia_css_pipeline_stage **stage);
+
+/** @brief Gets the Firmware handle correponding the stage num from the pipeline
+ *
+ * @param[in] pipeline
+ * @param[in] stage_num
+ * @param[out] fw_handle
+ *
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeline,
+ uint32_t stage_num,
+ uint32_t *fw_handle);
+
+/** @brief gets the output stage from the pipeline
+ *
+ * @param[in] pipeline
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ */
+enum ia_css_err ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage);
+
+/** @brief Checks whether the pipeline uses params
+ *
+ * @param[in] pipeline
+ * @return true if the pipeline uses params
+ *
+ */
+bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
+
+/**
+ * @brief get the SP thread ID.
+ *
+ * @param[in] key The query key, typical use is pipe_num.
+ * @param[out] val The query value.
+ *
+ * @return
+ * true, if the query succeeds;
+ * false, if the query fails.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+/**
+ * @brief Get the pipeline io status
+ *
+ * @param[in] None
+ * @return
+ * Pointer to pipe_io_status
+ */
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void);
+#endif
+
+/**
+ * @brief Map an SP thread to this pipeline
+ *
+ * @param[in] pipe_num
+ * @param[in] map true for mapping and false for unmapping sp threads.
+ *
+ */
+void ia_css_pipeline_map(unsigned int pipe_num, bool map);
+
+/**
+ * @brief Checks whether the pipeline is mapped to SP threads
+ *
+ * @param[in] Query key, typical use is pipe_num
+ *
+ * return
+ * true, pipeline is mapped to SP threads
+ * false, pipeline is not mapped to SP threads
+ */
+bool ia_css_pipeline_is_mapped(unsigned int key);
+
+/**
+ * @brief Print pipeline thread mapping
+ *
+ * @param[in] none
+ *
+ * return none
+ */
+void ia_css_pipeline_dump_thread_map_info(void);
+
+#endif /*__IA_CSS_PIPELINE_H__*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h
new file mode 100644
index 000000000000..a7e6edf41cdb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/interface/ia_css_pipeline_common.h
@@ -0,0 +1,42 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_PIPELINE_COMMON_H__
+#define __IA_CSS_PIPELINE_COMMON_H__
+
+enum ia_css_pipeline_stage_sp_func {
+ IA_CSS_PIPELINE_RAW_COPY = 0,
+ IA_CSS_PIPELINE_BIN_COPY = 1,
+ IA_CSS_PIPELINE_ISYS_COPY = 2,
+ IA_CSS_PIPELINE_NO_FUNC = 3,
+};
+#define IA_CSS_PIPELINE_NUM_STAGE_FUNCS 3
+
+#endif /*__IA_CSS_PIPELINE_COMMON_H__*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
new file mode 100644
index 000000000000..95542fc82217
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
@@ -0,0 +1,806 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "ia_css_debug.h"
+#include "sw_event_global.h" /* encode_sw_event */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "assert_support.h"
+#include "memory_access.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_param.h"
+#include "ia_css_bufq.h"
+
+#define PIPELINE_NUM_UNMAPPED (~0U)
+#define PIPELINE_SP_THREAD_EMPTY_TOKEN (0x0)
+#define PIPELINE_SP_THREAD_RESERVED_TOKEN (0x1)
+
+
+/*******************************************************
+*** Static variables
+********************************************************/
+static unsigned int pipeline_num_to_sp_thread_map[IA_CSS_PIPELINE_NUM_MAX];
+static unsigned int pipeline_sp_thread_list[SH_CSS_MAX_SP_THREADS];
+
+/*******************************************************
+*** Static functions
+********************************************************/
+static void pipeline_init_sp_thread_map(void);
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num);
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay);
+
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage);
+static enum ia_css_err pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage);
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline);
+static void ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
+ bool continuous);
+
+/*******************************************************
+*** Public functions
+********************************************************/
+void ia_css_pipeline_init(void)
+{
+ pipeline_init_sp_thread_map();
+}
+
+enum ia_css_err ia_css_pipeline_create(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ assert(pipeline != NULL);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p, pipe_id = %d, pipe_num = %d, dvs_frame_delay = %d",
+ pipeline, pipe_id, pipe_num, dvs_frame_delay);
+ if (pipeline == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipeline_init_defaults(pipeline, pipe_id, pipe_num, dvs_frame_delay);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipeline_map(unsigned int pipe_num, bool map)
+{
+ assert(pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+ IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map);
+
+ if (pipe_num >= IA_CSS_PIPELINE_NUM_MAX) {
+ IA_CSS_ERROR("Invalid pipe number");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ if (map)
+ pipeline_map_num_to_sp_thread(pipe_num);
+ else
+ pipeline_unmap_num_to_sp_thread(pipe_num);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/** @brief destroy a pipeline
+ *
+ * @param[in] pipeline
+ * @return None
+ *
+ */
+void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline)
+{
+ assert(pipeline != NULL);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (pipeline == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+
+ IA_CSS_LOG("pipe_num = %d", pipeline->pipe_num);
+
+ /* Free the pipeline number */
+ ia_css_pipeline_clean(pipeline);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/* Run a pipeline and wait till it completes. */
+void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
+ struct ia_css_pipeline *pipeline)
+{
+ uint8_t pipe_num = 0;
+ unsigned int thread_id;
+
+ assert(pipeline != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() enter: pipe_id=%d, pipeline=%p\n",
+ pipe_id, pipeline);
+ pipeline->pipe_id = pipe_id;
+ sh_css_sp_init_pipeline(pipeline, pipe_id, pipe_num,
+ false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
+#ifndef ISP2401
+ IA_CSS_INPUT_MODE_MEMORY, NULL, NULL
+#else
+ IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , (mipi_port_ID_t) 0
+#else
+ (mipi_port_ID_t) 0,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ NULL, NULL);
+#endif
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ if (!sh_css_sp_is_running()) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() error,leaving\n");
+ /* queues are invalid*/
+ return;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_start() leave: return_void\n");
+}
+
+/**
+ * @brief Query the SP thread ID.
+ * Refer to "sh_css_internal.h" for details.
+ */
+bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val)
+{
+
+ IA_CSS_ENTER("key=%d, val=%p", key, val);
+
+ if ((val == NULL) || (key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+
+ *val = pipeline_num_to_sp_thread_map[key];
+
+ if (*val == (unsigned)PIPELINE_NUM_UNMAPPED) {
+ IA_CSS_LOG("unmapped pipeline number");
+ IA_CSS_LEAVE("return value = false");
+ return false;
+ }
+ IA_CSS_LEAVE("return value = true");
+ return true;
+}
+
+void ia_css_pipeline_dump_thread_map_info(void)
+{
+ unsigned int i;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipeline_num_to_sp_thread_map:\n");
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_num: %u, tid: 0x%x\n", i, pipeline_num_to_sp_thread_map[i]);
+ }
+}
+
+enum ia_css_err ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+
+ assert(pipeline != NULL);
+
+ if (pipeline == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() enter: pipeline=%p\n",
+ pipeline);
+ pipeline->stop_requested = true;
+
+ /* Send stop event to the sp*/
+ /* This needs improvement, stop on all the pipes available
+ * in the stream*/
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ if (!sh_css_sp_is_running())
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leaving\n");
+ /* queues are invalid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
+ (uint8_t)thread_id,
+ 0,
+ 0);
+ sh_css_sp_uninit_pipeline(pipeline->pipe_num);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_request_stop() leave: return_err=%d\n",
+ err);
+ return err;
+}
+
+void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *s;
+
+ assert(pipeline != NULL);
+ IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
+
+ if (pipeline == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ IA_CSS_LEAVE_PRIVATE("void");
+ return;
+ }
+ s = pipeline->stages;
+
+ while (s) {
+ struct ia_css_pipeline_stage *next = s->next;
+ pipeline_stage_destroy(s);
+ s = next;
+ }
+ pipeline_init_defaults(pipeline, pipeline->pipe_id, pipeline->pipe_num, pipeline->dvs_frame_delay);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/** @brief Add a stage to pipeline.
+ *
+ * @param pipeline Pointer to the pipeline to be added to.
+ * @param[in] stage_desc The description of the stage
+ * @param[out] stage The successor of the stage.
+ * @return IA_CSS_SUCCESS or error code upon error.
+ *
+ * Add a new stage to a non-NULL pipeline.
+ * The stage consists of an ISP binary or firmware and input and
+ * output arguments.
+*/
+enum ia_css_err ia_css_pipeline_create_and_add_stage(
+ struct ia_css_pipeline *pipeline,
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *last, *new_stage = NULL;
+ enum ia_css_err err;
+
+ /* other arguments can be NULL */
+ assert(pipeline != NULL);
+ assert(stage_desc != NULL);
+ last = pipeline->stages;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() enter:\n");
+ if (!stage_desc->binary && !stage_desc->firmware
+ && (stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done:"
+ " Invalid args\n");
+
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Find the last stage */
+ while (last && last->next)
+ last = last->next;
+
+ /* if in_frame is not set, we use the out_frame from the previous
+ * stage, if no previous stage, it's an error.
+ */
+ if ((stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)
+ && (!stage_desc->in_frame)
+ && (!stage_desc->firmware)
+ && (!stage_desc->binary->online)) {
+
+ /* Do this only for ISP stages*/
+ if (last && last->args.out_frame[0])
+ stage_desc->in_frame = last->args.out_frame[0];
+
+ if (!stage_desc->in_frame)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Create the new stage */
+ err = pipeline_stage_create(stage_desc, &new_stage);
+ if (err != IA_CSS_SUCCESS) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done:"
+ " stage_create_failed\n");
+ return err;
+ }
+
+ if (last)
+ last->next = new_stage;
+ else
+ pipeline->stages = new_stage;
+
+ /* Output the new stage */
+ if (stage)
+ *stage = new_stage;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_create_and_add_stage() done:\n");
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
+ bool continuous)
+{
+ unsigned i = 0;
+ struct ia_css_pipeline_stage *stage;
+
+ assert(pipeline != NULL);
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ stage->stage_num = i;
+ i++;
+ }
+ pipeline->num_stages = i;
+
+ ia_css_pipeline_set_zoom_stage(pipeline);
+ ia_css_pipeline_configure_inout_port(pipeline, continuous);
+}
+
+enum ia_css_err ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+ assert(pipeline != NULL);
+ assert(stage != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_stage() enter:\n");
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->mode == mode) {
+ *stage = s;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline *pipeline,
+ uint32_t fw_handle,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+ assert(pipeline != NULL);
+ assert(stage != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,"%s() \n",__func__);
+ for (s = pipeline->stages; s; s = s->next) {
+ if ((s->firmware) && (s->firmware->handle == fw_handle)) {
+ *stage = s;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline *pipeline,
+ uint32_t stage_num,
+ uint32_t *fw_handle)
+{
+ struct ia_css_pipeline_stage *s;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,"%s() \n",__func__);
+ if ((pipeline == NULL) || (fw_handle == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ for (s = pipeline->stages; s; s = s->next) {
+ if((s->stage_num == stage_num) && (s->firmware)) {
+ *fw_handle = s->firmware->handle;
+ return IA_CSS_SUCCESS;
+ }
+ }
+ return IA_CSS_ERR_INTERNAL_ERROR;
+}
+
+enum ia_css_err ia_css_pipeline_get_output_stage(
+ struct ia_css_pipeline *pipeline,
+ int mode,
+ struct ia_css_pipeline_stage **stage)
+{
+ struct ia_css_pipeline_stage *s;
+ assert(pipeline != NULL);
+ assert(stage != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_get_output_stage() enter:\n");
+
+ *stage = NULL;
+ /* First find acceleration firmware at end of pipe */
+ for (s = pipeline->stages; s; s = s->next) {
+ if (s->firmware && s->mode == mode &&
+ s->firmware->info.isp.sp.enable.output)
+ *stage = s;
+ }
+ if (*stage)
+ return IA_CSS_SUCCESS;
+ /* If no firmware, find binary in pipe */
+ return ia_css_pipeline_get_stage(pipeline, mode, stage);
+}
+
+bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
+{
+ /* Android compilation files if made an local variable
+ stack size on android is limited to 2k and this structure
+ is around 2.5K, in place of static malloc can be done but
+ if this call is made too often it will lead to fragment memory
+ versus a fixed allocation */
+ static struct sh_css_sp_group sp_group;
+ unsigned int thread_id;
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_group;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_group = fw->info.sp.group;
+
+ ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group, sizeof(struct sh_css_sp_group));
+ return sp_group.pipe[thread_id].num_stages == 0;
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
+{
+ return(&sh_css_sp_group.pipe_io_status);
+}
+#endif
+
+bool ia_css_pipeline_is_mapped(unsigned int key)
+{
+ bool ret = false;
+
+ IA_CSS_ENTER_PRIVATE("key = %d", key);
+
+ if ((key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_ERROR("Invalid key!!");
+ IA_CSS_LEAVE_PRIVATE("return = %d", false);
+ return false;
+ }
+
+ ret = (bool)(pipeline_num_to_sp_thread_map[key] != (unsigned)PIPELINE_NUM_UNMAPPED);
+
+ IA_CSS_LEAVE_PRIVATE("return = %d", ret);
+ return ret;
+}
+
+/*******************************************************
+*** Static functions
+********************************************************/
+
+/* Pipeline:
+ * To organize the several different binaries for each type of mode,
+ * we use a pipeline. A pipeline contains a number of stages, each with
+ * their own binary and frame pointers.
+ * When stages are added to a pipeline, output frames that are not passed
+ * from outside are automatically allocated.
+ * When input frames are not passed from outside, each stage will use the
+ * output frame of the previous stage as input (the full resolution output,
+ * not the viewfinder output).
+ * Pipelines must be cleaned and re-created when settings of the binaries
+ * change.
+ */
+static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage)
+{
+ unsigned int i;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (stage->out_frame_allocated[i]) {
+ ia_css_frame_free(stage->args.out_frame[i]);
+ stage->args.out_frame[i] = NULL;
+ }
+ }
+ if (stage->vf_frame_allocated) {
+ ia_css_frame_free(stage->args.out_vf_frame);
+ stage->args.out_vf_frame = NULL;
+ }
+ sh_css_free(stage);
+}
+
+static void pipeline_init_sp_thread_map(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < SH_CSS_MAX_SP_THREADS; i++)
+ pipeline_sp_thread_list[i] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
+ pipeline_num_to_sp_thread_map[i] = PIPELINE_NUM_UNMAPPED;
+}
+
+static void pipeline_map_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int i;
+ bool found_sp_thread = false;
+
+ /* pipe is not mapped to any thread */
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ == (unsigned)PIPELINE_NUM_UNMAPPED);
+
+ for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
+ if (pipeline_sp_thread_list[i] ==
+ PIPELINE_SP_THREAD_EMPTY_TOKEN) {
+ pipeline_sp_thread_list[i] =
+ PIPELINE_SP_THREAD_RESERVED_TOKEN;
+ pipeline_num_to_sp_thread_map[pipe_num] = i;
+ found_sp_thread = true;
+ break;
+ }
+ }
+
+ /* Make sure a mapping is found */
+ /* I could do:
+ assert(i < SH_CSS_MAX_SP_THREADS);
+
+ But the below is more descriptive.
+ */
+ assert(found_sp_thread != false);
+}
+
+static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+ assert(pipeline_num_to_sp_thread_map[pipe_num]
+ != (unsigned)PIPELINE_NUM_UNMAPPED);
+
+ thread_id = pipeline_num_to_sp_thread_map[pipe_num];
+ pipeline_num_to_sp_thread_map[pipe_num] = PIPELINE_NUM_UNMAPPED;
+ pipeline_sp_thread_list[thread_id] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
+}
+
+static enum ia_css_err pipeline_stage_create(
+ struct ia_css_pipeline_stage_desc *stage_desc,
+ struct ia_css_pipeline_stage **new_stage)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *binary;
+ struct ia_css_frame *vf_frame;
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ const struct ia_css_fw_info *firmware;
+ unsigned int i;
+
+ /* Verify input parameters*/
+ if (!(stage_desc->in_frame) && !(stage_desc->firmware)
+ && (stage_desc->binary) && !(stage_desc->binary->online)) {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ binary = stage_desc->binary;
+ firmware = stage_desc->firmware;
+ vf_frame = stage_desc->vf_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ out_frame[i] = stage_desc->out_frame[i];
+ }
+
+ stage = sh_css_malloc(sizeof(*stage));
+ if (stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ memset(stage, 0, sizeof(*stage));
+
+ if (firmware) {
+ stage->binary = NULL;
+ stage->binary_info =
+ (struct ia_css_binary_info *)&firmware->info.isp;
+ } else {
+ stage->binary = binary;
+ if (binary)
+ stage->binary_info =
+ (struct ia_css_binary_info *)binary->info;
+ else
+ stage->binary_info = NULL;
+ }
+
+ stage->firmware = firmware;
+ stage->sp_func = stage_desc->sp_func;
+ stage->max_input_width = stage_desc->max_input_width;
+ stage->mode = stage_desc->mode;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->out_frame_allocated[i] = false;
+ stage->vf_frame_allocated = false;
+ stage->next = NULL;
+ sh_css_binary_args_reset(&stage->args);
+
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (!(out_frame[i]) && (binary)
+ && (binary->out_frame_info[i].res.width)) {
+ err = ia_css_frame_allocate_from_info(&out_frame[i],
+ &binary->out_frame_info[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ stage->out_frame_allocated[i] = true;
+ }
+ }
+ /* VF frame is not needed in case of need_pp
+ However, the capture binary needs a vf frame to write to.
+ */
+ if (!vf_frame) {
+ if ((binary && binary->vf_frame_info.res.width) ||
+ (firmware && firmware->info.isp.sp.enable.vf_veceven)
+ ) {
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &binary->vf_frame_info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ stage->vf_frame_allocated = true;
+ }
+ } else if (vf_frame && binary && binary->vf_frame_info.res.width
+ && !firmware) {
+ /* only mark as allocated if buffer pointer available */
+ if (vf_frame->data != mmgr_NULL)
+ stage->vf_frame_allocated = true;
+ }
+
+ stage->args.in_frame = stage_desc->in_frame;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ stage->args.out_frame[i] = out_frame[i];
+ stage->args.out_vf_frame = vf_frame;
+ *new_stage = stage;
+ return err;
+ERR:
+ if (stage != NULL)
+ pipeline_stage_destroy(stage);
+ return err;
+}
+
+static void pipeline_init_defaults(
+ struct ia_css_pipeline *pipeline,
+ enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ unsigned int dvs_frame_delay)
+{
+ struct ia_css_frame init_frame = DEFAULT_FRAME;
+ unsigned int i;
+
+ pipeline->pipe_id = pipe_id;
+ pipeline->stages = NULL;
+ pipeline->stop_requested = false;
+ pipeline->current_stage = NULL;
+ pipeline->in_frame = init_frame;
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ pipeline->out_frame[i] = init_frame;
+ pipeline->vf_frame[i] = init_frame;
+ }
+ pipeline->num_execs = -1;
+ pipeline->acquire_isp_each_stage = true;
+ pipeline->pipe_num = (uint8_t)pipe_num;
+ pipeline->dvs_frame_delay = dvs_frame_delay;
+}
+
+static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline)
+{
+ struct ia_css_pipeline_stage *stage = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipeline != NULL);
+ if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ /* in preview pipeline, vf_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VF_PP, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ /* in capture pipeline, capture_pp stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ /* in video pipeline, video stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VIDEO, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ } else if (pipeline->pipe_id == IA_CSS_PIPE_ID_YUVPP) {
+ /* in yuvpp pipeline, first yuv_scaler stage should do zoom */
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = true;
+ }
+}
+
+static void
+ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me, bool continuous)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() enter: pipe_id(%d) continuous(%d)\n",
+ me->pipe_id, continuous);
+ switch (me->pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ case IA_CSS_PIPE_ID_VIDEO:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_COPYSINK_TYPE : SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_COPY: /*Copy pipe ports configured to "offline" mode*/
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ if (continuous) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_COPYSINK_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_TAGGERSINK_TYPE, 1);
+ } else {
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(continuous ? SH_CSS_TAGGERSINK_TYPE : SH_CSS_HOST_TYPE),
+ 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)(SH_CSS_HOST_TYPE), 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ break;
+ default:
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "ia_css_pipeline_configure_inout_port() leave: inout_port_config(%x)\n",
+ me->inout_port_config);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
new file mode 100644
index 000000000000..e50a0f813753
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue.h
@@ -0,0 +1,192 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_QUEUE_H
+#define __IA_CSS_QUEUE_H
+
+#include <platform_support.h>
+#include <type_support.h>
+
+#include "ia_css_queue_comm.h"
+#include "../src/queue_access.h"
+
+/* Local Queue object descriptor */
+struct ia_css_queue_local {
+ ia_css_circbuf_desc_t *cb_desc; /*Circbuf desc for local queues*/
+ ia_css_circbuf_elem_t *cb_elems; /*Circbuf elements*/
+};
+typedef struct ia_css_queue_local ia_css_queue_local_t;
+
+/* Handle for queue object*/
+typedef struct ia_css_queue ia_css_queue_t;
+
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+/** @brief Initialize a local queue instance.
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of local queue instance.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+extern int ia_css_queue_local_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_local_t *desc);
+
+/** @brief Initialize a remote queue instance
+ *
+ * @param[out] qhandle. Handle to queue instance for use with API
+ * @param[in] desc. Descriptor with queue properties filled-in
+ * @return 0 - Successful init of remote queue instance.
+ * @return EINVAL - Invalid argument.
+ */
+extern int ia_css_queue_remote_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_remote_t *desc);
+
+/** @brief Uninitialize a queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @return 0 - Successful uninit.
+ *
+ */
+extern int ia_css_queue_uninit(
+ ia_css_queue_t *qhandle);
+
+/** @brief Enqueue an item in the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] item. Object to be enqueued.
+ * @return 0 - Successful enqueue.
+ * @return EINVAL - Invalid argument.
+ * @return ENOBUFS - Queue is full.
+ *
+ */
+extern int ia_css_queue_enqueue(
+ ia_css_queue_t *qhandle,
+ uint32_t item);
+
+/** @brief Dequeue an item from the queue instance
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] item. Object to be dequeued into this item.
+
+ * @return 0 - Successful dequeue.
+ * @return EINVAL - Invalid argument.
+ * @return ENODATA - Queue is empty.
+ *
+ */
+extern int ia_css_queue_dequeue(
+ ia_css_queue_t *qhandle,
+ uint32_t *item);
+
+/** @brief Check if the queue is empty
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_empty True if empty, False if not.
+ * @return 0 - Successful access state.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+extern int ia_css_queue_is_empty(
+ ia_css_queue_t *qhandle,
+ bool *is_empty);
+
+/** @brief Check if the queue is full
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] is_full True if Full, False if not.
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+extern int ia_css_queue_is_full(
+ ia_css_queue_t *qhandle,
+ bool *is_full);
+
+/** @brief Get used space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of available elements in the queue
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+extern int ia_css_queue_get_used_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/** @brief Get free space in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] size Number of free elements in the queue
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+extern int ia_css_queue_get_free_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+/** @brief Peek at an element in the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[in] offset Offset of element to peek,
+ * starting from head of queue
+ * @param[in] element Value of element returned
+ * @return 0 - Successfully access state.
+ * @return EINVAL - Invalid argument.
+ *
+ */
+extern int ia_css_queue_peek(
+ ia_css_queue_t *qhandle,
+ uint32_t offset,
+ uint32_t *element);
+
+/** @brief Get the usable size for the queue
+ *
+ * @param[in] qhandle. Handle to queue instance
+ * @param[out] size Size value to be returned here.
+ * @return 0 - Successful get size.
+ * @return EINVAL - Invalid argument.
+ * @return ENOSYS - Function not implemented.
+ *
+ */
+extern int ia_css_queue_get_size(
+ ia_css_queue_t *qhandle,
+ uint32_t *size);
+
+#endif /* __IA_CSS_QUEUE_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h
new file mode 100644
index 000000000000..4ebaeb0c1847
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/interface/ia_css_queue_comm.h
@@ -0,0 +1,69 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_QUEUE_COMM_H
+#define __IA_CSS_QUEUE_COMM_H
+
+#include "type_support.h"
+#include "ia_css_circbuf.h"
+/*****************************************************************************
+ * Queue Public Data Structures
+ *****************************************************************************/
+
+/* Queue location specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_LOC_HOST 0
+#define IA_CSS_QUEUE_LOC_SP 1
+#define IA_CSS_QUEUE_LOC_ISP 2
+
+/* Queue type specifier */
+/* Avoiding enums to save space */
+#define IA_CSS_QUEUE_TYPE_LOCAL 0
+#define IA_CSS_QUEUE_TYPE_REMOTE 1
+
+/* for DDR Allocated queues,
+allocate minimum these many elements.
+DDR->SP' DMEM DMA transfer needs 32byte aligned address.
+Since each element size is 4 bytes, 8 elements need to be
+DMAed to access single element.*/
+#define IA_CSS_MIN_ELEM_COUNT 8
+#define IA_CSS_DMA_XFER_MASK (IA_CSS_MIN_ELEM_COUNT - 1)
+
+/* Remote Queue object descriptor */
+struct ia_css_queue_remote {
+ uint32_t cb_desc_addr; /*Circbuf desc address for remote queues*/
+ uint32_t cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ uint8_t location; /* Cell location for queue */
+ uint8_t proc_id; /* Processor id for queue access */
+};
+typedef struct ia_css_queue_remote ia_css_queue_remote_t;
+
+
+#endif /* __IA_CSS_QUEUE_COMM_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c
new file mode 100644
index 000000000000..606376fdf0ba
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue.c
@@ -0,0 +1,412 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_queue.h"
+#include <math_support.h>
+#include <ia_css_circbuf.h>
+#include <ia_css_circbuf_desc.h>
+#include "queue_access.h"
+
+/*****************************************************************************
+ * Queue Public APIs
+ *****************************************************************************/
+int ia_css_queue_local_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_local_t *desc)
+{
+ if (NULL == qhandle || NULL == desc
+ || NULL == desc->cb_elems || NULL == desc->cb_desc) {
+ /* Invalid parameters, return error*/
+ return EINVAL;
+ }
+
+ /* Mark the queue as Local */
+ qhandle->type = IA_CSS_QUEUE_TYPE_LOCAL;
+
+ /* Create a local circular buffer queue*/
+ ia_css_circbuf_create(&qhandle->desc.cb_local,
+ desc->cb_elems,
+ desc->cb_desc);
+
+ return 0;
+}
+
+int ia_css_queue_remote_init(
+ ia_css_queue_t *qhandle,
+ ia_css_queue_remote_t *desc)
+{
+ if (NULL == qhandle || NULL == desc) {
+ /* Invalid parameters, return error*/
+ return EINVAL;
+ }
+
+ /* Mark the queue as remote*/
+ qhandle->type = IA_CSS_QUEUE_TYPE_REMOTE;
+
+ /* Copy over the local queue descriptor*/
+ qhandle->location = desc->location;
+ qhandle->proc_id = desc->proc_id;
+ qhandle->desc.remote.cb_desc_addr = desc->cb_desc_addr;
+ qhandle->desc.remote.cb_elems_addr = desc->cb_elems_addr;
+
+ /* If queue is remote, we let the local processor
+ * do its init, before using it. This is just to get us
+ * started, we can remove this restriction as we go ahead
+ */
+
+ return 0;
+}
+
+int ia_css_queue_uninit(
+ ia_css_queue_t *qhandle)
+{
+ if (!qhandle)
+ return EINVAL;
+
+ /* Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Local queues are created. Destroy it*/
+ ia_css_circbuf_destroy(&qhandle->desc.cb_local);
+ }
+
+ return 0;
+}
+
+int ia_css_queue_enqueue(
+ ia_css_queue_t *qhandle,
+ uint32_t item)
+{
+ int error = 0;
+ if (NULL == qhandle)
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_full(&qhandle->desc.cb_local)) {
+ /* Cannot push the element. Return*/
+ return ENOBUFS;
+ }
+
+ /* Push the element*/
+ ia_css_circbuf_push(&qhandle->desc.cb_local, item);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ /* a. Load the queue cb_desc from remote */
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_full(&cb_desc))
+ return ENOBUFS;
+
+ cb_elem.val = item;
+
+ error = ia_css_queue_item_store(qhandle, cb_desc.end, &cb_elem);
+ if (error != 0)
+ return error;
+
+ cb_desc.end = (cb_desc.end + 1) % cb_desc.size;
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids uncessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS;
+
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_dequeue(
+ ia_css_queue_t *qhandle,
+ uint32_t *item)
+{
+ int error = 0;
+ if (qhandle == NULL || NULL == item)
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ if (ia_css_circbuf_is_empty(&qhandle->desc.cb_local)) {
+ /* Nothing to pop. Return empty queue*/
+ return ENODATA;
+ }
+
+ *item = ia_css_circbuf_pop(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ if (ia_css_circbuf_desc_is_empty(&cb_desc))
+ return ENODATA;
+
+ error = ia_css_queue_item_load(qhandle, cb_desc.start, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *item = cb_elem.val;
+
+ cb_desc.start = OP_std_modadd(cb_desc.start, 1, cb_desc.size);
+
+ /* c. Store the queue object */
+ /* Set only fields requiring update with
+ * valid value. Avoids uncessary calls
+ * to load/store functions
+ */
+ ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS;
+ error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+ }
+ return 0;
+}
+
+int ia_css_queue_is_full(
+ ia_css_queue_t *qhandle,
+ bool *is_full)
+{
+ int error = 0;
+ if ((qhandle == NULL) || (is_full == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_full = ia_css_circbuf_is_full(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_full = ia_css_circbuf_desc_is_full(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_free_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+ if ((qhandle == NULL) || (size == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_free_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_free_elems(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_used_space(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+ if ((qhandle == NULL) || (size == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *size = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *size = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_peek(
+ ia_css_queue_t *qhandle,
+ uint32_t offset,
+ uint32_t *element)
+{
+ uint32_t num_elems = 0;
+ int error = 0;
+
+ if ((qhandle == NULL) || (element == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
+ if (offset > num_elems)
+ return EINVAL;
+
+ *element = ia_css_circbuf_peek_from_start(&qhandle->desc.cb_local, (int) offset);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ ia_css_circbuf_elem_t cb_elem;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Check if offset is valid */
+ num_elems = ia_css_circbuf_desc_get_num_elems(&cb_desc);
+ if (offset > num_elems)
+ return EINVAL;
+
+ offset = OP_std_modadd(cb_desc.start, offset, cb_desc.size);
+ error = ia_css_queue_item_load(qhandle, (uint8_t)offset, &cb_elem);
+ if (error != 0)
+ return error;
+
+ *element = cb_elem.val;
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_is_empty(
+ ia_css_queue_t *qhandle,
+ bool *is_empty)
+{
+ int error = 0;
+ if ((qhandle == NULL) || (is_empty == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ *is_empty = ia_css_circbuf_is_empty(&qhandle->desc.cb_local);
+ return 0;
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* b. Operate on the queue */
+ *is_empty = ia_css_circbuf_desc_is_empty(&cb_desc);
+ return 0;
+ }
+
+ return EINVAL;
+}
+
+int ia_css_queue_get_size(
+ ia_css_queue_t *qhandle,
+ uint32_t *size)
+{
+ int error = 0;
+ if ((qhandle == NULL) || (size == NULL))
+ return EINVAL;
+
+ /* 1. Load the required queue object */
+ if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
+ /* Directly de-ref the object and
+ * operate on the queue
+ */
+ /* Return maximum usable capacity */
+ *size = ia_css_circbuf_get_size(&qhandle->desc.cb_local);
+ } else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
+ /* a. Load the queue from remote */
+ ia_css_circbuf_desc_t cb_desc;
+ uint32_t ignore_desc_flags = QUEUE_IGNORE_START_END_STEP_FLAGS;
+
+ QUEUE_CB_DESC_INIT(&cb_desc);
+
+ error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
+ if (error != 0)
+ return error;
+
+ /* Return maximum usable capacity */
+ *size = cb_desc.size;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
new file mode 100644
index 000000000000..946d4f2d2108
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.c
@@ -0,0 +1,192 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "type_support.h"
+#include "queue_access.h"
+#include "ia_css_circbuf.h"
+#include "sp.h"
+#include "memory_access.h"
+#include "assert_support.h"
+
+int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (rdesc == NULL || cb_desc == NULL)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) {
+ cb_desc->size = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size));
+
+ if (0 == cb_desc->size) {
+ /* Adding back the workaround which was removed
+ while refactoring queues. When reading size
+ through sp_dmem_load_*, sometimes we get back
+ the value as zero. This causes division by 0
+ exception as the size is used in a modular
+ division operation. */
+ return EDOM;
+ }
+ }
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ cb_desc->start = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ cb_desc->end = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end));
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ cb_desc->step = sp_dmem_load_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step));
+
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ mmgr_load(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags)
+{
+ if (rdesc == NULL || cb_desc == NULL)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, size),
+ cb_desc->size);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, start),
+ cb_desc->start);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, end),
+ cb_desc->end);
+
+ if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG))
+ sp_dmem_store_uint8(rdesc->proc_id,
+ rdesc->desc.remote.cb_desc_addr
+ + offsetof(ia_css_circbuf_desc_t, step),
+ cb_desc->step);
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ /* doing DMA transfer of entire structure */
+ mmgr_store(rdesc->desc.remote.cb_desc_addr,
+ (void *)cb_desc,
+ sizeof(ia_css_circbuf_desc_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ uint8_t position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (rdesc == NULL || item == NULL)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_load(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ mmgr_load(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
+
+int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ uint8_t position,
+ ia_css_circbuf_elem_t *item)
+{
+ if (rdesc == NULL || item == NULL)
+ return EINVAL;
+
+ if (rdesc->location == IA_CSS_QUEUE_LOC_SP) {
+ sp_dmem_store(rdesc->proc_id,
+ rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) {
+ mmgr_store(rdesc->desc.remote.cb_elems_addr
+ + position * sizeof(ia_css_circbuf_elem_t),
+ (void *)item,
+ sizeof(ia_css_circbuf_elem_t));
+ } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) {
+ /* Not supported yet */
+ return ENOTSUP;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h
new file mode 100644
index 000000000000..4775513f54cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/queue/src/queue_access.h
@@ -0,0 +1,101 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __QUEUE_ACCESS_H
+#define __QUEUE_ACCESS_H
+
+#include <type_support.h>
+#include <ia_css_queue_comm.h>
+#include <ia_css_circbuf.h>
+#include <error_support.h>
+
+#define QUEUE_IGNORE_START_FLAG 0x0001
+#define QUEUE_IGNORE_END_FLAG 0x0002
+#define QUEUE_IGNORE_SIZE_FLAG 0x0004
+#define QUEUE_IGNORE_STEP_FLAG 0x0008
+#define QUEUE_IGNORE_DESC_FLAGS_MAX 0x000f
+
+#define QUEUE_IGNORE_SIZE_START_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_SIZE_END_STEP_FLAGS \
+ (QUEUE_IGNORE_SIZE_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_IGNORE_START_END_STEP_FLAGS \
+ (QUEUE_IGNORE_START_FLAG | \
+ QUEUE_IGNORE_END_FLAG | \
+ QUEUE_IGNORE_STEP_FLAG)
+
+#define QUEUE_CB_DESC_INIT(cb_desc) \
+ do { \
+ (cb_desc)->size = 0; \
+ (cb_desc)->step = 0; \
+ (cb_desc)->start = 0; \
+ (cb_desc)->end = 0; \
+ } while(0)
+
+struct ia_css_queue {
+ uint8_t type; /* Specify remote/local type of access */
+ uint8_t location; /* Cell location for queue */
+ uint8_t proc_id; /* Processor id for queue access */
+ union {
+ ia_css_circbuf_t cb_local;
+ struct {
+ uint32_t cb_desc_addr; /*Circbuf desc address for remote queues*/
+ uint32_t cb_elems_addr; /*Circbuf elements addr for remote queue*/
+ } remote;
+ } desc;
+};
+
+extern int ia_css_queue_load(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+extern int ia_css_queue_store(
+ struct ia_css_queue *rdesc,
+ ia_css_circbuf_desc_t *cb_desc,
+ uint32_t ignore_desc_flags);
+
+extern int ia_css_queue_item_load(
+ struct ia_css_queue *rdesc,
+ uint8_t position,
+ ia_css_circbuf_elem_t *item);
+
+extern int ia_css_queue_item_store(
+ struct ia_css_queue *rdesc,
+ uint8_t position,
+ ia_css_circbuf_elem_t *item);
+
+#endif /* __QUEUE_ACCESS_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
new file mode 100644
index 000000000000..a0bb9f663ce6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
@@ -0,0 +1,89 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_RMGR_H
+#define _IA_CSS_RMGR_H
+
+#include "storage_class.h"
+#include <ia_css_err.h>
+
+#ifndef __INLINE_RMGR__
+#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RMGR_C
+#else /* __INLINE_RMGR__ */
+#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RMGR_C STORAGE_CLASS_INLINE
+#endif /* __INLINE_RMGR__ */
+
+/**
+ * @brief Initialize resource manager (host/common)
+ */
+enum ia_css_err ia_css_rmgr_init(void);
+
+/**
+ * @brief Uninitialize resource manager (host/common)
+ */
+void ia_css_rmgr_uninit(void);
+
+/*****************************************************************
+ * Interface definition - resource type (host/common)
+ *****************************************************************
+ *
+ * struct ia_css_rmgr_<type>_pool;
+ * struct ia_css_rmgr_<type>_handle;
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_init_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_<type>(
+ * struct ia_css_rmgr_<type>_pool *pool,
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ *****************************************************************
+ * Interface definition - refcounting (host/common)
+ *****************************************************************
+ *
+ * void ia_css_rmgr_refcount_retain_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ *
+ * void ia_css_rmgr_refcount_release_<type>(
+ * struct ia_css_rmgr_<type>_handle **handle);
+ */
+
+#include "ia_css_rmgr_vbuf.h"
+
+#endif /* _IA_CSS_RMGR_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
new file mode 100644
index 000000000000..90ac27cf02cf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr_vbuf.h
@@ -0,0 +1,115 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef _IA_CSS_RMGR_VBUF_H
+#define _IA_CSS_RMGR_VBUF_H
+
+#include "ia_css_rmgr.h"
+#include <type_support.h>
+#include <system_types.h>
+
+/**
+ * @brief Data structure for the resource handle (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_handle {
+ hrt_vaddress vptr;
+ uint8_t count;
+ uint32_t size;
+};
+
+/**
+ * @brief Data structure for the resource pool (host, vbuf)
+ */
+struct ia_css_rmgr_vbuf_pool {
+ uint8_t copy_on_write;
+ uint8_t recycle;
+ uint32_t size;
+ uint32_t index;
+ struct ia_css_rmgr_vbuf_handle **handles;
+};
+
+/**
+ * @brief VBUF resource pools
+ */
+extern struct ia_css_rmgr_vbuf_pool *vbuf_ref;
+extern struct ia_css_rmgr_vbuf_pool *vbuf_write;
+extern struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool;
+
+/**
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H enum ia_css_err ia_css_rmgr_init_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_uninit_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool);
+
+/**
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_acq_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+STORAGE_CLASS_RMGR_H void ia_css_rmgr_rel_vbuf(
+ struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+/**
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle);
+
+#endif /* _IA_CSS_RMGR_VBUF_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
new file mode 100644
index 000000000000..efa9c140484f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr.c
@@ -0,0 +1,55 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "ia_css_rmgr.h"
+
+enum ia_css_err ia_css_rmgr_init(void)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ err = ia_css_rmgr_init_vbuf(vbuf_ref);
+ if (err == IA_CSS_SUCCESS)
+ err = ia_css_rmgr_init_vbuf(vbuf_write);
+ if (err == IA_CSS_SUCCESS)
+ err = ia_css_rmgr_init_vbuf(hmm_buffer_pool);
+ if (err != IA_CSS_SUCCESS)
+ ia_css_rmgr_uninit();
+ return err;
+}
+
+/**
+ * @brief Uninitialize resource pool (host)
+ */
+void ia_css_rmgr_uninit(void)
+{
+ ia_css_rmgr_uninit_vbuf(hmm_buffer_pool);
+ ia_css_rmgr_uninit_vbuf(vbuf_write);
+ ia_css_rmgr_uninit_vbuf(vbuf_ref);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
new file mode 100644
index 000000000000..fa92d8da8f1c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
@@ -0,0 +1,330 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2010-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_rmgr.h"
+
+#include <type_support.h>
+#include <assert_support.h>
+#include <platform_support.h> /* memset */
+#include <memory_access.h> /* mmmgr_malloc, mhmm_free */
+#include <ia_css_debug.h>
+
+/**
+ * @brief VBUF resource handles
+ */
+#define NUM_HANDLES 1000
+struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES];
+
+/**
+ * @brief VBUF resource pool - refpool
+ */
+struct ia_css_rmgr_vbuf_pool refpool = {
+ false, /* copy_on_write */
+ false, /* recycle */
+ 0, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+/**
+ * @brief VBUF resource pool - writepool
+ */
+struct ia_css_rmgr_vbuf_pool writepool = {
+ true, /* copy_on_write */
+ false, /* recycle */
+ 0, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+/**
+ * @brief VBUF resource pool - hmmbufferpool
+ */
+struct ia_css_rmgr_vbuf_pool hmmbufferpool = {
+ true, /* copy_on_write */
+ true, /* recycle */
+ 32, /* size */
+ 0, /* index */
+ NULL, /* handles */
+};
+
+struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool;
+struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool;
+struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool;
+
+/**
+ * @brief Initialize the reference count (host, vbuf)
+ */
+static void rmgr_refcount_init_vbuf(void)
+{
+ /* initialize the refcount table */
+ memset(&handle_table, 0, sizeof(handle_table));
+}
+
+/**
+ * @brief Retain the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ int i;
+ struct ia_css_rmgr_vbuf_handle *h;
+ if ((handle == NULL) || (*handle == NULL)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* new vbuf to count on */
+ if ((*handle)->count == 0) {
+ h = *handle;
+ *handle = NULL;
+ for (i = 0; i < NUM_HANDLES; i++) {
+ if (handle_table[i].count == 0) {
+ *handle = &handle_table[i];
+ break;
+ }
+ }
+ /* if the loop dus not break and *handle == NULL
+ this is an error handle and report it.
+ */
+ if (*handle == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_i_host_refcount_retain_vbuf() failed to find empty slot!\n");
+ return;
+ }
+ (*handle)->vptr = h->vptr;
+ (*handle)->size = h->size;
+ }
+ (*handle)->count++;
+}
+
+/**
+ * @brief Release the reference count for a handle (host, vbuf)
+ *
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((handle == NULL) || ((*handle) == NULL) || (((*handle)->count) == 0)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_rmgr_refcount_release_vbuf() invalid arguments!\n");
+ return;
+ }
+ /* decrease reference count */
+ (*handle)->count--;
+ /* remove from admin */
+ if ((*handle)->count == 0) {
+ (*handle)->vptr = 0x0;
+ (*handle)->size = 0;
+ *handle = NULL;
+ }
+}
+
+/**
+ * @brief Initialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ size_t bytes_needed;
+ rmgr_refcount_init_vbuf();
+ assert(pool != NULL);
+ if (pool == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ /* initialize the recycle pool if used */
+ if (pool->recycle && pool->size) {
+ /* allocate memory for storing the handles */
+ bytes_needed =
+ sizeof(void *) *
+ pool->size;
+ pool->handles = sh_css_malloc(bytes_needed);
+ if (pool->handles != NULL)
+ memset(pool->handles, 0, bytes_needed);
+ else
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ /* just in case, set the size to 0 */
+ pool->size = 0;
+ pool->handles = NULL;
+ }
+ return err;
+}
+
+/**
+ * @brief Uninitialize the resource pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ */
+void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
+{
+ uint32_t i;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n");
+ if (pool == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n");
+ return;
+ }
+ if (pool->handles != NULL) {
+ /* free the hmm buffers */
+ for (i = 0; i < pool->size; i++) {
+ if (pool->handles[i] != NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ " freeing/releasing %x (count=%d)\n",
+ pool->handles[i]->vptr,
+ pool->handles[i]->count);
+ /* free memory */
+ hmm_free(pool->handles[i]->vptr);
+ /* remove from refcount admin */
+ ia_css_rmgr_refcount_release_vbuf(
+ &pool->handles[i]);
+ }
+ }
+ /* now free the pool handles list */
+ sh_css_free(pool->handles);
+ pool->handles = NULL;
+ }
+}
+
+/**
+ * @brief Push a handle to the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ uint32_t i;
+ bool succes = false;
+ assert(pool != NULL);
+ assert(pool->recycle);
+ assert(pool->handles != NULL);
+ assert(handle != NULL);
+ for (i = 0; i < pool->size; i++) {
+ if (pool->handles[i] == NULL) {
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+ pool->handles[i] = *handle;
+ succes = true;
+ break;
+ }
+ }
+ assert(succes);
+}
+
+/**
+ * @brief Pop a handle from the pool
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+static
+void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ uint32_t i;
+ bool succes = false;
+ assert(pool != NULL);
+ assert(pool->recycle);
+ assert(pool->handles != NULL);
+ assert(handle != NULL);
+ assert(*handle != NULL);
+ for (i = 0; i < pool->size; i++) {
+ if ((pool->handles[i] != NULL) &&
+ (pool->handles[i]->size == (*handle)->size)) {
+ *handle = pool->handles[i];
+ pool->handles[i] = NULL;
+ /* dont release, we are returning it...
+ ia_css_rmgr_refcount_release_vbuf(handle); */
+ succes = true;
+ break;
+ }
+ }
+}
+
+/**
+ * @brief Acquire a handle from the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ struct ia_css_rmgr_vbuf_handle h;
+
+ if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+
+ if (pool->copy_on_write) {
+ /* only one reference, reuse (no new retain) */
+ if ((*handle)->count == 1)
+ return;
+ /* more than one reference, release current buffer */
+ if ((*handle)->count > 1) {
+ /* store current values */
+ h.vptr = 0x0;
+ h.size = (*handle)->size;
+ /* release ref to current buffer */
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ *handle = &h;
+ }
+ /* get new buffer for needed size */
+ if ((*handle)->vptr == 0x0) {
+ if (pool->recycle) {
+ /* try and pop from pool */
+ rmgr_pop_handle(pool, handle);
+ }
+ if ((*handle)->vptr == 0x0) {
+ /* we need to allocate */
+ (*handle)->vptr = mmgr_malloc((*handle)->size);
+ } else {
+ /* we popped a buffer */
+ return;
+ }
+ }
+ }
+ /* Note that handle will change to an internally maintained one */
+ ia_css_rmgr_refcount_retain_vbuf(handle);
+}
+
+/**
+ * @brief Release a handle to the pool (host, vbuf)
+ *
+ * @param pool The pointer to the pool
+ * @param handle The pointer to the handle
+ */
+void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
+ struct ia_css_rmgr_vbuf_handle **handle)
+{
+ if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) {
+ IA_CSS_LOG("Invalid inputs");
+ return;
+ }
+ /* release the handle */
+ if ((*handle)->count == 1) {
+ if (!pool->recycle) {
+ /* non recycling pool, free mem */
+ hmm_free((*handle)->vptr);
+ } else {
+ /* recycle to pool */
+ rmgr_push_handle(pool, handle);
+ }
+ }
+ ia_css_rmgr_refcount_release_vbuf(handle);
+ *handle = NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
new file mode 100644
index 000000000000..27e9eb1e2102
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl.h
@@ -0,0 +1,87 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_SPCTRL_H__
+#define __IA_CSS_SPCTRL_H__
+
+#include <system_global.h>
+#include <ia_css_err.h>
+#include "ia_css_spctrl_comm.h"
+
+
+typedef struct {
+ uint32_t ddr_data_offset; /**< posistion of data in DDR */
+ uint32_t dmem_data_addr; /**< data segment address in dmem */
+ uint32_t dmem_bss_addr; /**< bss segment address in dmem */
+ uint32_t data_size; /**< data segment size */
+ uint32_t bss_size; /**< bss segment size */
+ uint32_t spctrl_config_dmem_addr; /** <location of dmem_cfg in SP dmem */
+ uint32_t spctrl_state_dmem_addr; /** < location of state in SP dmem */
+ unsigned int sp_entry; /** < entry function ptr on SP */
+ const void *code; /**< location of firmware */
+ uint32_t code_size;
+ char *program_name; /**< not used on hardware, only for simulation */
+} ia_css_spctrl_cfg;
+
+/* Get the code addr in DDR of SP */
+hrt_vaddress get_sp_code_addr(sp_ID_t sp_id);
+
+/* ! Load firmware on to specfied SP
+*/
+enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
+ ia_css_spctrl_cfg *spctrl_cfg);
+
+#ifdef ISP2401
+/*! Setup registers for reloading FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id);
+
+#endif
+/*! Unload/release any memory allocated to hold the firmware
+*/
+enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id);
+
+
+/*! Intilaize dmem_cfg in SP dmem and start SP program
+*/
+enum ia_css_err ia_css_spctrl_start(sp_ID_t sp_id);
+
+/*! stop spctrl
+*/
+enum ia_css_err ia_css_spctrl_stop(sp_ID_t sp_id);
+
+/*! Query the state of SP
+*/
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id);
+
+/*! Check if SP is idle/ready
+*/
+int ia_css_spctrl_is_idle(sp_ID_t sp_id);
+
+#endif /* __IA_CSS_SPCTRL_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
new file mode 100644
index 000000000000..3af2891efca7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/interface/ia_css_spctrl_comm.h
@@ -0,0 +1,61 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_SPCTRL_COMM_H__
+#define __IA_CSS_SPCTRL_COMM_H__
+
+#include <type_support.h>
+
+/* state of SP */
+typedef enum {
+ IA_CSS_SP_SW_TERMINATED = 0,
+ IA_CSS_SP_SW_INITIALIZED,
+ IA_CSS_SP_SW_CONNECTED,
+ IA_CSS_SP_SW_RUNNING
+} ia_css_spctrl_sp_sw_state;
+
+/** Structure to encapsulate required arguments for
+ * initialization of SP DMEM using the SP itself
+ */
+struct ia_css_sp_init_dmem_cfg {
+ ia_css_ptr ddr_data_addr; /**< data segment address in ddr */
+ uint32_t dmem_data_addr; /**< data segment address in dmem */
+ uint32_t dmem_bss_addr; /**< bss segment address in dmem */
+ uint32_t data_size; /**< data segment size */
+ uint32_t bss_size; /**< bss segment size */
+ sp_ID_t sp_id; /** <sp Id */
+};
+
+#define SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT \
+ (1 * SIZE_OF_IA_CSS_PTR) + \
+ (4 * sizeof(uint32_t)) + \
+ (1 * sizeof(sp_ID_t))
+
+#endif /* __IA_CSS_SPCTRL_COMM_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
new file mode 100644
index 000000000000..b36d7b00ebe8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -0,0 +1,199 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include "ia_css_types.h"
+#define __INLINE_SP__
+#include "sp.h"
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "ia_css_spctrl.h"
+#include "ia_css_debug.h"
+
+typedef struct {
+ struct ia_css_sp_init_dmem_cfg dmem_config;
+ uint32_t spctrl_config_dmem_addr; /** location of dmem_cfg in SP dmem */
+ uint32_t spctrl_state_dmem_addr;
+ unsigned int sp_entry; /* entry function ptr on SP */
+ hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
+ uint32_t code_size;
+ char *program_name; /* used in case of PLATFORM_SIM */
+} spctrl_context_info;
+
+static spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static bool spctrl_loaded[N_SP_ID] = {0};
+
+/* Load firmware */
+enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
+ ia_css_spctrl_cfg *spctrl_cfg)
+{
+ hrt_vaddress code_addr = mmgr_NULL;
+ struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;
+
+ if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
+
+#if defined(HRT_UNSCHED)
+ (void)init_dmem_cfg;
+ code_addr = mmgr_malloc(1);
+ if (code_addr == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+#else
+ init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
+ init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
+ init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr;
+ init_dmem_cfg->data_size = spctrl_cfg->data_size;
+ init_dmem_cfg->bss_size = spctrl_cfg->bss_size;
+ init_dmem_cfg->sp_id = sp_id;
+
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr;
+ spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr;
+
+ /* store code (text + icache) and data to DDR
+ *
+ * Data used to be stored separately, because of access alignment constraints,
+ * fix the FW generation instead
+ */
+ code_addr = mmgr_malloc(spctrl_cfg->code_size);
+ if (code_addr == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);
+
+ if (sizeof(hrt_vaddress) > sizeof(hrt_data)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "size of hrt_vaddress can not be greater than hrt_data\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset;
+ if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "DDR address pointer is not properly aligned for DMA transfer\n");
+ hmm_free(code_addr);
+ code_addr = mmgr_NULL;
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+#endif
+ spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
+ spctrl_cofig_info[sp_id].code_addr = code_addr;
+ spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;
+
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+ return IA_CSS_SUCCESS;
+}
+
+#ifdef ISP2401
+/* reload pre-loaded FW */
+void sh_css_spctrl_reload_fw(sp_ID_t sp_id)
+{
+ /* now we program the base address into the icache and
+ * invalidate the cache.
+ */
+ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr);
+ sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
+ spctrl_loaded[sp_id] = true;
+}
+#endif
+
+hrt_vaddress get_sp_code_addr(sp_ID_t sp_id)
+{
+ return spctrl_cofig_info[sp_id].code_addr;
+}
+
+enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* freeup the resource */
+ if (spctrl_cofig_info[sp_id].code_addr)
+ hmm_free(spctrl_cofig_info[sp_id].code_addr);
+ spctrl_loaded[sp_id] = false;
+ return IA_CSS_SUCCESS;
+}
+
+/* Initialize dmem_cfg in SP dmem and start SP program*/
+enum ia_css_err ia_css_spctrl_start(sp_ID_t sp_id)
+{
+ if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Set descr in the SP to initialize the SP DMEM */
+ /*
+ * The FW stores user-space pointers to the FW, the ISP pointer
+ * is only available here
+ *
+ */
+ assert(sizeof(unsigned int) <= sizeof(hrt_data));
+
+ sp_dmem_store(sp_id,
+ spctrl_cofig_info[sp_id].spctrl_config_dmem_addr,
+ &spctrl_cofig_info[sp_id].dmem_config,
+ sizeof(spctrl_cofig_info[sp_id].dmem_config));
+ /* set the start address */
+ sp_ctrl_store(sp_id, SP_START_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].sp_entry);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_RUN_BIT);
+ sp_ctrl_setbit(sp_id, SP_SC_REG, SP_START_BIT);
+ return IA_CSS_SUCCESS;
+}
+
+/* Query the state of SP1 */
+ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id)
+{
+ ia_css_spctrl_sp_sw_state state = 0;
+ unsigned int HIVE_ADDR_sp_sw_state;
+ if (sp_id >= N_SP_ID)
+ return IA_CSS_SP_SW_TERMINATED;
+
+ HIVE_ADDR_sp_sw_state = spctrl_cofig_info[sp_id].spctrl_state_dmem_addr;
+ (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
+ if (sp_id == SP0_ID)
+ state = sp_dmem_load_uint32(sp_id, (unsigned)sp_address_of(sp_sw_state));
+ return state;
+}
+
+int ia_css_spctrl_is_idle(sp_ID_t sp_id)
+{
+ int state = 0;
+ assert (sp_id < N_SP_ID);
+
+ state = sp_ctrl_getbit(sp_id, SP_SC_REG, SP_IDLE_BIT);
+ return state;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h
new file mode 100644
index 000000000000..d0d74957358b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/tagger/interface/ia_css_tagger_common.h
@@ -0,0 +1,59 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#ifndef __IA_CSS_TAGGER_COMMON_H__
+#define __IA_CSS_TAGGER_COMMON_H__
+
+#include <system_local.h>
+#include <type_support.h>
+
+/**
+ * @brief The tagger's circular buffer.
+ *
+ * Should be one less than NUM_CONTINUOUS_FRAMES in sh_css_internal.h
+ */
+#if defined(HAS_SP_2400)
+#define MAX_CB_ELEMS_FOR_TAGGER 14
+#else
+#define MAX_CB_ELEMS_FOR_TAGGER 9
+#endif
+
+/**
+ * @brief Data structure for the tagger buffer element.
+ */
+typedef struct {
+ uint32_t frame; /* the frame value stored in the element */
+ uint32_t param; /* the param value stored in the element */
+ uint8_t mark; /* the mark on the element */
+ uint8_t lock; /* the lock on the element */
+ uint8_t exp_id; /* exp_id of frame, for debugging only */
+} ia_css_tagger_buf_sp_elem_t;
+
+#endif /* __IA_CSS_TAGGER_COMMON_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
new file mode 100644
index 000000000000..49c69e60ca5c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/timer/src/timer.c
@@ -0,0 +1,48 @@
+#ifndef ISP2401
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#else
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+#endif
+
+#include <type_support.h> /* for uint32_t */
+#include "ia_css_timer.h" /*struct ia_css_clock_tick */
+#include "sh_css_legacy.h" /* IA_CSS_PIPE_ID_NUM*/
+#include "gp_timer.h" /*gp_timer_read()*/
+#include "assert_support.h"
+
+enum ia_css_err
+ia_css_timer_get_current_tick(
+ struct ia_css_clock_tick *curr_ts) {
+
+ assert(curr_ts != NULL);
+ if (curr_ts == NULL) {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ curr_ts->ticks = (clock_value_t)gp_timer_read(GP_TIMER_SEL);
+ return IA_CSS_SUCCESS;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
new file mode 100644
index 000000000000..73c76583610a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -0,0 +1,11364 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/*! \file */
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ia_css.h"
+#include "sh_css_hrt.h" /* only for file 2 MIPI */
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_internal.h"
+#include "sh_css_mipi.h"
+#include "sh_css_sp.h" /* sh_css_sp_group */
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+#include "ia_css_frame.h"
+#include "sh_css_defs.h"
+#include "sh_css_firmware.h"
+#include "sh_css_params.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "ia_css_refcount.h"
+#include "ia_css_rmgr.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_device_access.h"
+#include "device_access.h"
+#include "sh_css_legacy.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_stream.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_pipe.h"
+#include "ia_css_util.h"
+#include "ia_css_pipe_util.h"
+#include "ia_css_pipe_binarydesc.h"
+#include "ia_css_pipe_stagedesc.h"
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+#include "ia_css_isys.h"
+#endif
+
+#include "memory_access.h"
+#include "tag.h"
+#include "assert_support.h"
+#include "math_support.h"
+#include "sw_event_global.h" /* Event IDs.*/
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "ia_css_ifmtr.h"
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "input_system.h"
+#endif
+#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
+//#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
+#include "gdc_device.h" /* HRT_GDC_N */
+#include "dma.h" /* dma_set_max_burst_size() */
+#include "irq.h" /* virq */
+#include "sp.h" /* cnd_sp_irq_enable() */
+#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
+#include "gp_device.h" /* gp_device_reg_store() */
+#define __INLINE_GPIO__
+#include "gpio.h"
+#include "timed_ctrl.h"
+#include "platform_support.h" /* hrt_sleep(), inline */
+#include "ia_css_inputfifo.h"
+#define WITH_PC_MONITORING 0
+
+#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0
+
+#if WITH_PC_MONITORING
+#define MULTIPLE_SAMPLES 1
+#define NOF_SAMPLES 60
+#include "linux/kthread.h"
+#include "linux/sched.h"
+#include "linux/delay.h"
+#include "sh_css_metrics.h"
+static int thread_alive;
+#endif /* WITH_PC_MONITORING */
+
+#include "ia_css_spctrl.h"
+#include "ia_css_version_data.h"
+#include "sh_css_struct.h"
+#include "ia_css_bufq.h"
+#include "ia_css_timer.h" /* clock_value_t */
+
+#include "isp/modes/interface/input_buf.isp.h"
+
+#if defined(HAS_BL)
+#include "support/bootloader/interface/ia_css_blctrl.h"
+#endif
+#if defined(HAS_RES_MGR)
+#include "components/acc_cluster/gen/host/acc_cluster.host.h"
+#endif
+
+/* Name of the sp program: should not be built-in */
+#define SP_PROG_NAME "sp"
+#if defined(HAS_BL)
+#define BL_PROG_NAME "bootloader"
+#endif
+/* Size of Refcount List */
+#define REFCOUNT_SIZE 1000
+
+/* for JPEG, we don't know the length of the image upfront,
+ * but since we support sensor upto 16MP, we take this as
+ * upper limit.
+ */
+#define JPEG_BYTES (16 * 1024 * 1024)
+
+#define STATS_ENABLED(stage) (stage && stage->binary && stage->binary->info && \
+ (stage->binary->info->sp.enable.s3a || stage->binary->info->sp.enable.dis))
+
+#define DEFAULT_PLANES { {0, 0, 0, 0} }
+
+struct sh_css my_css;
+
+int (*sh_css_printf) (const char *fmt, va_list args) = NULL;
+
+/* modes of work: stream_create and stream_destroy will update the save/restore data
+ only when in working mode, not suspend/resume
+*/
+enum ia_sh_css_modes {
+ sh_css_mode_none = 0,
+ sh_css_mode_working,
+ sh_css_mode_suspend,
+ sh_css_mode_resume
+};
+
+/* a stream seed, to save and restore the stream data.
+ the stream seed contains all the data required to "grow" the seed again after it was closed.
+*/
+struct sh_css_stream_seed {
+ struct ia_css_stream **orig_stream; /* pointer to restore the original handle */
+ struct ia_css_stream *stream; /* handle, used as ID too.*/
+ struct ia_css_stream_config stream_config; /* stream config struct */
+ int num_pipes;
+ struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM]; /* pipe handles */
+ struct ia_css_pipe **orig_pipes[IA_CSS_PIPE_ID_NUM]; /* pointer to restore original handle */
+ struct ia_css_pipe_config pipe_config[IA_CSS_PIPE_ID_NUM]; /* pipe config structs */
+};
+
+#define MAX_ACTIVE_STREAMS 5
+/* A global struct for save/restore to hold all the data that should sustain power-down:
+ MMU base, IRQ type, env for routines, binary loaded FW and the stream seeds.
+*/
+struct sh_css_save {
+ enum ia_sh_css_modes mode;
+ uint32_t mmu_base; /* the last mmu_base */
+ enum ia_css_irq_type irq_type;
+ struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS];
+ struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
+ struct ia_css_env driver_env; /* driver-supplied env copy */
+};
+
+static bool my_css_save_initialized; /* if my_css_save was initialized */
+static struct sh_css_save my_css_save;
+
+/* pqiao NOTICE: this is for css internal buffer recycling when stopping pipeline,
+ this array is temporary and will be replaced by resource manager*/
+/* Taking the biggest Size for number of Elements */
+#define MAX_HMM_BUFFER_NUM \
+ (SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2))
+
+struct sh_css_hmm_buffer_record {
+ bool in_use;
+ enum ia_css_buffer_type type;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ hrt_address kernel_ptr;
+};
+
+static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
+
+#define GPIO_FLASH_PIN_MASK (1 << HIVE_GPIO_STROBE_TRIGGER_PIN)
+
+static bool fw_explicitly_loaded = false;
+
+/**
+ * Local prototypes
+ */
+
+static enum ia_css_err
+allocate_delay_frames(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+sh_css_pipe_start(struct ia_css_stream *stream);
+
+#ifdef ISP2401
+/**
+ * @brief Stop all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance.
+ *
+ * @param[in] stream Point to the target "ia_css_stream" instance.
+ *
+ * @return
+ * - IA_CSS_SUCCESS, if the "stop" requests have been sucessfully sent out.
+ * - CSS error code, otherwise.
+ *
+ *
+ * NOTE
+ * This API sends the "stop" requests to the "ia_css_pipe"
+ * instances in the same "ia_css_stream" instance. It will
+ * return without waiting for all "ia_css_pipe" instatnces
+ * being stopped.
+ */
+static enum ia_css_err
+sh_css_pipes_stop(struct ia_css_stream *stream);
+
+/**
+ * @brief Check if all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance have stopped.
+ *
+ * @param[in] stream Point to the target "ia_css_stream" instance.
+ *
+ * @return
+ * - true, if all "ia_css_pipe" instances in the target "ia_css_stream"
+ * instance have ben stopped.
+ * - false, otherwise.
+ */
+static bool
+sh_css_pipes_have_stopped(struct ia_css_stream *stream);
+
+static enum ia_css_err
+ia_css_pipe_check_format(struct ia_css_pipe *pipe, enum ia_css_frame_format format);
+
+static enum ia_css_err
+check_pipe_resolutions(const struct ia_css_pipe *pipe);
+
+#endif
+
+static enum ia_css_err
+ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware);
+
+static void
+ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware);
+static void
+ia_css_reset_defaults(struct sh_css* css);
+
+static void
+sh_css_init_host_sp_control_vars(void);
+
+#ifndef ISP2401
+static void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index);
+
+#endif
+static enum ia_css_err set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version);
+
+static bool
+need_capture_pp(const struct ia_css_pipe *pipe);
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe);
+
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr);
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr *descr);
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res);
+
+static bool need_capt_ldc(const struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe);
+
+static
+enum ia_css_err sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static enum ia_css_err
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx);
+
+static enum ia_css_err
+capture_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+video_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+preview_start(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+yuvpp_start(struct ia_css_pipe *pipe);
+
+static bool copy_on_sp(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx);
+
+static enum ia_css_err
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format);
+
+static enum ia_css_err
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx);
+
+static enum ia_css_err
+sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
+ const void *acc_fw);
+
+static enum ia_css_err
+alloc_continuous_frames(
+ struct ia_css_pipe *pipe, bool init_time);
+
+static void
+pipe_global_init(void);
+
+static enum ia_css_err
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe, unsigned int *pipe_number);
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num);
+
+static enum ia_css_err
+create_host_pipeline_structure(struct ia_css_stream *stream);
+
+static enum ia_css_err
+create_host_pipeline(struct ia_css_stream *stream);
+
+static enum ia_css_err
+create_host_preview_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_video_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned max_input_width,
+ struct ia_css_frame *out_frame);
+
+static enum ia_css_err
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_capture_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe);
+
+static enum ia_css_err
+create_host_acc_pipeline(struct ia_css_pipe *pipe);
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq);
+
+static struct ia_css_binary *ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe);
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe);
+
+static void
+sh_css_hmm_buffer_record_init(void);
+
+static void
+sh_css_hmm_buffer_record_uninit(void);
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record);
+
+#ifndef ISP2401
+static bool
+sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+#else
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+#endif
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr);
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr,
+ enum ia_css_buffer_type type);
+
+void
+ia_css_get_acc_configs(
+ struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config);
+
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info *info, struct frame_data_wrapper *frame);
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static unsigned int get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config);
+static unsigned int get_crop_columns_for_bayer_order(const struct ia_css_stream_config *config);
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column);
+#endif
+
+#ifdef ISP2401
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static enum ia_css_err
+aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
+ struct ia_css_pipe *pipes[],
+ bool *do_crop_status);
+
+static bool
+aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe);
+
+static enum ia_css_err
+aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
+ struct ia_css_resolution *effective_res);
+#endif
+
+#endif
+static void
+sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ if (pipe->shading_table)
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+}
+
+static enum ia_css_frame_format yuv420_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,
+ IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8
+};
+
+static enum ia_css_frame_format yuv422_copy_formats[] = {
+ IA_CSS_FRAME_FORMAT_NV12,
+ IA_CSS_FRAME_FORMAT_NV16,
+ IA_CSS_FRAME_FORMAT_NV21,
+ IA_CSS_FRAME_FORMAT_NV61,
+ IA_CSS_FRAME_FORMAT_YV12,
+ IA_CSS_FRAME_FORMAT_YV16,
+ IA_CSS_FRAME_FORMAT_YUV420,
+ IA_CSS_FRAME_FORMAT_YUV420_16,
+ IA_CSS_FRAME_FORMAT_YUV422,
+ IA_CSS_FRAME_FORMAT_YUV422_16,
+ IA_CSS_FRAME_FORMAT_UYVY,
+ IA_CSS_FRAME_FORMAT_YUYV
+};
+
+#define array_length(array) (sizeof(array)/sizeof(array[0]))
+
+/* Verify whether the selected output format is can be produced
+ * by the copy binary given the stream format.
+ * */
+static enum ia_css_err
+verify_copy_out_frame_format(struct ia_css_pipe *pipe)
+{
+ enum ia_css_frame_format out_fmt = pipe->output_info[0].format;
+ unsigned int i, found = 0;
+
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+
+ switch (pipe->stream->config.input_config.format) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ for (i=0; i<array_length(yuv420_copy_formats) && !found; i++)
+ found = (out_fmt == yuv420_copy_formats[i]);
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ for (i=0; i<array_length(yuv422_copy_formats) && !found; i++)
+ found = (out_fmt == yuv422_copy_formats[i]);
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV422_16 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_RGB565);
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
+ out_fmt == IA_CSS_FRAME_FORMAT_YUV420);
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_RAW) ||
+ (out_fmt == IA_CSS_FRAME_FORMAT_RAW_PACKED);
+ break;
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ found = (out_fmt == IA_CSS_FRAME_FORMAT_BINARY_8);
+ break;
+ default:
+ break;
+ }
+ if (!found)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_SUCCESS;
+}
+
+unsigned int
+ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
+{
+ int bpp = 0;
+
+ if (stream != NULL)
+ bpp = ia_css_util_input_format_bpp(stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+
+ return bpp;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+static enum ia_css_err
+sh_css_config_input_network(struct ia_css_stream *stream)
+{
+ unsigned int fmt_type;
+ struct ia_css_pipe *pipe = stream->last_pipe;
+ struct ia_css_binary *binary = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stream != NULL);
+ assert(pipe != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter:\n");
+
+ if (pipe->pipeline.stages)
+ binary = pipe->pipeline.stages->binary;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream->config.input_config.format,
+ stream->csi_rx_config.comp,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ sh_css_sp_program_input_circuit(fmt_type,
+ stream->config.channel_id,
+ stream->config.mode);
+
+ if ((binary && (binary->online || stream->config.continuous)) ||
+ pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
+ err = ia_css_ifmtr_configure(&stream->config,
+ binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ stream->config.mode == IA_CSS_INPUT_MODE_PRBS) {
+ unsigned int hblank_cycles = 100,
+ vblank_lines = 6,
+ width,
+ height,
+ vblank_cycles;
+ width = (stream->config.input_config.input_res.width) / (1 + (stream->config.pixels_per_clock == 2));
+ height = stream->config.input_config.input_res.height;
+ vblank_cycles = vblank_lines * (width + hblank_cycles);
+ sh_css_sp_configure_sync_gen(width, height, hblank_cycles,
+ vblank_cycles);
+#if defined(IS_ISP_2400_SYSTEM)
+ if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) {
+ /* TODO: move define to proper file in tools */
+ #define GP_ISEL_TPG_MODE 0x90058
+ ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0);
+ }
+#endif
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+ return IA_CSS_SUCCESS;
+}
+#elif !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
+ enum ia_css_stream_format format,
+ unsigned int pixels_per_line)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYY0 UYY0 ... UYY0
+ * Line 1: VYY0 VYY0 ... VYY0
+ * Line 2: UYY0 UYY0 ... UYY0
+ * Line 3: VYY0 VYY0 ... VYY0
+ * ...
+ * Line (n-2): UYY0 UYY0 ... UYY0
+ * Line (n-1): VYY0 VYY0 ... VYY0
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ * The 0 is introduced by the input system
+ * (mipi backend).
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: YYYY YYYY ... YYYY
+ * Line 1: UYVY UYVY ... UYVY UYVY
+ * Line 2: YYYY YYYY ... YYYY
+ * Line 3: UYVY UYVY ... UYVY UYVY
+ * ...
+ * Line (n-2): YYYY YYYY ... YYYY
+ * Line (n-1): UYVY UYVY ... UYVY UYVY
+ *
+ * In this frame format, the odd-line is twice
+ * wider than the even-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: UYVY UYVY ... UYVY
+ * Line 1: UYVY UYVY ... UYVY
+ * Line 2: UYVY UYVY ... UYVY
+ * Line 3: UYVY UYVY ... UYVY
+ * ...
+ * Line (n-2): UYVY UYVY ... UYVY
+ * Line (n-1): UYVY UYVY ... UYVY
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 2;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: ABGR ABGR ... ABGR
+ * Line 1: ABGR ABGR ... ABGR
+ * Line 2: ABGR ABGR ... ABGR
+ * Line 3: ABGR ABGR ... ABGR
+ * ...
+ * Line (n-2): ABGR ABGR ... ABGR
+ * Line (n-1): ABGR ABGR ... ABGR
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line * 4;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ /*
+ * The frame format layout is shown below.
+ *
+ * Line 0: Pixel Pixel ... Pixel
+ * Line 1: Pixel Pixel ... Pixel
+ * Line 2: Pixel Pixel ... Pixel
+ * Line 3: Pixel Pixel ... Pixel
+ * ...
+ * Line (n-2): Pixel Pixel ... Pixel
+ * Line (n-1): Pixel Pixel ... Pixel
+ *
+ * In this frame format, the even-line is
+ * as wide as the odd-line.
+ */
+ rval = pixels_per_line;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_id(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+
+ if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
+ } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
+ } else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
+ }
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
+ } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
+ } else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
+ }
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ if (stream_cfg->source.port.port == IA_CSS_CSI2_PORT0) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID;
+ } else if (stream_cfg->source.port.port == IA_CSS_CSI2_PORT1) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID;
+ } else if (stream_cfg->source.port.port == IA_CSS_CSI2_PORT2) {
+ isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID;
+ }
+
+ break;
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_type(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_TPG;
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_PRBS;
+
+ break;
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+
+ isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_SENSOR;
+ break;
+
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ rc = true;
+ switch (stream_cfg->mode) {
+ case IA_CSS_INPUT_MODE_TPG:
+ if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_RAMP;
+ } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_CHBO;
+ } else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO) {
+ isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_MONO;
+ } else {
+ rc = false;
+ }
+
+ /*
+ * TODO
+ * - Make "color_cfg" as part of "ia_css_tpg_config".
+ */
+ isys_stream_descr->tpg_port_attr.color_cfg.R1 = 51;
+ isys_stream_descr->tpg_port_attr.color_cfg.G1 = 102;
+ isys_stream_descr->tpg_port_attr.color_cfg.B1 = 255;
+ isys_stream_descr->tpg_port_attr.color_cfg.R2 = 0;
+ isys_stream_descr->tpg_port_attr.color_cfg.G2 = 100;
+ isys_stream_descr->tpg_port_attr.color_cfg.B2 = 160;
+
+ isys_stream_descr->tpg_port_attr.mask_cfg.h_mask = stream_cfg->source.tpg.x_mask;
+ isys_stream_descr->tpg_port_attr.mask_cfg.v_mask = stream_cfg->source.tpg.y_mask;
+ isys_stream_descr->tpg_port_attr.mask_cfg.hv_mask = stream_cfg->source.tpg.xy_mask;
+
+ isys_stream_descr->tpg_port_attr.delta_cfg.h_delta = stream_cfg->source.tpg.x_delta;
+ isys_stream_descr->tpg_port_attr.delta_cfg.v_delta = stream_cfg->source.tpg.y_delta;
+
+ /*
+ * TODO
+ * - Make "sync_gen_cfg" as part of "ia_css_tpg_config".
+ */
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.hblank_cycles = 100;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.vblank_cycles = 100;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_clock = stream_cfg->pixels_per_clock;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t) ~(0x0);
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_line = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
+ isys_stream_descr->tpg_port_attr.sync_gen_cfg.lines_per_frame = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
+
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+
+ isys_stream_descr->prbs_port_attr.seed0 = stream_cfg->source.prbs.seed;
+ isys_stream_descr->prbs_port_attr.seed1 = stream_cfg->source.prbs.seed1;
+
+ /*
+ * TODO
+ * - Make "sync_gen_cfg" as part of "ia_css_prbs_config".
+ */
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.hblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.vblank_cycles = 100;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_clock = stream_cfg->pixels_per_clock;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t) ~(0x0);
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_line = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
+ isys_stream_descr->prbs_port_attr.sync_gen_cfg.lines_per_frame = stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
+
+ break;
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ {
+ enum ia_css_err err;
+ unsigned int fmt_type;
+
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->isys_config[isys_stream_idx].format,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+
+ isys_stream_descr->csi_port_attr.active_lanes = stream_cfg->source.port.num_lanes;
+ isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
+ isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ isys_stream_descr->online = stream_cfg->online;
+#endif
+ err |= ia_css_isys_convert_compressed_format(
+ &stream_cfg->source.port.compression,
+ isys_stream_descr);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+
+ /* metadata */
+ isys_stream_descr->metadata.enable = false;
+ if (stream_cfg->metadata_config.resolution.height > 0) {
+ err = ia_css_isys_convert_stream_format_to_mipi_format(
+ stream_cfg->metadata_config.data_type,
+ MIPI_PREDICTOR_NONE,
+ &fmt_type);
+ if (err != IA_CSS_SUCCESS)
+ rc = false;
+ isys_stream_descr->metadata.fmt_type = fmt_type;
+ isys_stream_descr->metadata.bits_per_pixel =
+ ia_css_util_input_format_bpp(stream_cfg->metadata_config.data_type, true);
+ isys_stream_descr->metadata.pixels_per_line = stream_cfg->metadata_config.resolution.width;
+ isys_stream_descr->metadata.lines_per_frame = stream_cfg->metadata_config.resolution.height;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For new input system, number of str2mmio requests must be even.
+ * So we round up number of metadata lines to be even. */
+ if (isys_stream_descr->metadata.lines_per_frame > 0)
+ isys_stream_descr->metadata.lines_per_frame +=
+ (isys_stream_descr->metadata.lines_per_frame & 1);
+#endif
+ isys_stream_descr->metadata.align_req_in_bytes =
+ ia_css_csi2_calculate_input_system_alignment(stream_cfg->metadata_config.data_type);
+ isys_stream_descr->metadata.enable = true;
+ }
+
+ break;
+ }
+ default:
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
+ struct ia_css_stream_config *stream_cfg,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ unsigned int bits_per_subpixel;
+ unsigned int max_subpixels_per_line;
+ unsigned int lines_per_frame;
+ unsigned int align_req_in_bytes;
+ enum ia_css_stream_format fmt_type;
+
+ fmt_type = stream_cfg->isys_config[isys_stream_idx].format;
+ if ((stream_cfg->mode == IA_CSS_INPUT_MODE_SENSOR ||
+ stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) &&
+ stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
+
+ if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_10) {
+ fmt_type = IA_CSS_STREAM_FORMAT_RAW_10;
+ }
+ else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
+ UNCOMPRESSED_BITS_PER_PIXEL_12) {
+ fmt_type = IA_CSS_STREAM_FORMAT_RAW_12;
+ }
+ else
+ return false;
+ }
+
+ bits_per_subpixel =
+ sh_css_stream_format_2_bits_per_subpixel(fmt_type);
+ if (bits_per_subpixel == 0)
+ return false;
+
+ max_subpixels_per_line =
+ csi2_protocol_calculate_max_subpixels_per_line(fmt_type,
+ stream_cfg->isys_config[isys_stream_idx].input_res.width);
+ if (max_subpixels_per_line == 0)
+ return false;
+
+ lines_per_frame = stream_cfg->isys_config[isys_stream_idx].input_res.height;
+ if (lines_per_frame == 0)
+ return false;
+
+ align_req_in_bytes = ia_css_csi2_calculate_input_system_alignment(fmt_type);
+
+ /* HW needs subpixel info for their settings */
+ isys_stream_descr->input_port_resolution.bits_per_pixel = bits_per_subpixel;
+ isys_stream_descr->input_port_resolution.pixels_per_line = max_subpixels_per_line;
+ isys_stream_descr->input_port_resolution.lines_per_frame = lines_per_frame;
+ isys_stream_descr->input_port_resolution.align_req_in_bytes = align_req_in_bytes;
+
+ return true;
+}
+
+static bool sh_css_translate_stream_cfg_to_isys_stream_descr(
+ struct ia_css_stream_config *stream_cfg,
+ bool early_polling,
+ ia_css_isys_descr_t *isys_stream_descr,
+ int isys_stream_idx)
+{
+ bool rc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() enter:\n");
+ rc = sh_css_translate_stream_cfg_to_input_system_input_port_id(stream_cfg, isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_type(stream_cfg, isys_stream_descr);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_attr(stream_cfg, isys_stream_descr, isys_stream_idx);
+ rc &= sh_css_translate_stream_cfg_to_input_system_input_port_resolution(stream_cfg, isys_stream_descr, isys_stream_idx);
+
+ isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels;
+ isys_stream_descr->linked_isys_stream_id = (int8_t) stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id;
+ /*
+ * Early polling is required for timestamp accuracy in certain case.
+ * The ISYS HW polling is started on
+ * ia_css_isys_stream_capture_indication() instead of
+ * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of
+ * capture takes longer than getting an ISYS frame
+ *
+ * Only 2401 relevant ??
+ */
+ isys_stream_descr->polling_mode
+ = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST
+ : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
+
+ return rc;
+}
+
+static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
+ struct ia_css_binary *binary,
+ ia_css_isys_descr_t *isys_stream_descr)
+{
+ if (!binary)
+ return false;
+
+ isys_stream_descr->output_port_attr.left_padding = binary->left_padding;
+ isys_stream_descr->output_port_attr.max_isp_input_width = binary->info->sp.input.max_width;
+
+ return true;
+}
+
+static enum ia_css_err
+sh_css_config_input_network(struct ia_css_stream *stream)
+{
+ bool rc;
+ ia_css_isys_descr_t isys_stream_descr;
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+ struct ia_css_pipe *pipe = NULL;
+ struct ia_css_binary *binary = NULL;
+ int i;
+ uint32_t isys_stream_id;
+ bool early_polling = false;
+
+ assert(stream != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() enter 0x%p:\n", stream);
+
+ if (stream->config.continuous == true) {
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ pipe = stream->last_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP) {
+ pipe = stream->last_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
+ pipe = stream->last_pipe->pipe_settings.preview.copy_pipe;
+ } else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
+ }
+ } else {
+ pipe = stream->last_pipe;
+ if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ /*
+ * We need to poll the ISYS HW in capture_indication itself
+ * for "non-continous" capture usecase for getting accurate
+ * isys frame capture timestamps.
+ * This is because the capturepipe propcessing takes longer
+ * to execute than the input system frame capture.
+ * 2401 specific
+ */
+ early_polling = true;
+ }
+ }
+
+ assert(pipe != NULL);
+ if (pipe == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (pipe->pipeline.stages != NULL)
+ if (pipe->pipeline.stages->binary != NULL)
+ binary = pipe->pipeline.stages->binary;
+
+
+
+ if (binary) {
+ /* this was being done in ifmtr in 2400.
+ * online and cont bypass the init_in_frameinfo_memory_defaults
+ * so need to do it here
+ */
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+
+ /* get the SP thread id */
+ rc = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &sp_thread_id);
+ if (rc != true)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ /* get the target input terminal */
+ sp_pipeline_input_terminal = &(sh_css_sp_group.pipe_io[sp_thread_id].input);
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ /* initialization */
+ memset((void*)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t));
+ sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0;
+ sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i].valid = 0;
+
+ if (!stream->config.isys_config[i].valid)
+ continue;
+
+ /* translate the stream configuration to the Input System (2401) configuration */
+ rc = sh_css_translate_stream_cfg_to_isys_stream_descr(
+ &(stream->config),
+ early_polling,
+ &(isys_stream_descr), i);
+
+ if (stream->config.online) {
+ rc &= sh_css_translate_binary_info_to_input_system_output_port_attr(
+ binary,
+ &(isys_stream_descr));
+ }
+
+ if (rc != true)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, i);
+
+ /* create the virtual Input System (2401) */
+ rc = ia_css_isys_stream_create(
+ &(isys_stream_descr),
+ &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]),
+ isys_stream_id);
+ if (rc != true)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ /* calculate the configuration of the virtual Input System (2401) */
+ rc = ia_css_isys_stream_calculate_cfg(
+ &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]),
+ &(isys_stream_descr),
+ &(sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i]));
+ if (rc != true) {
+ ia_css_isys_stream_destroy(&(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]));
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_config_input_network() leave:\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+static inline struct ia_css_pipe *stream_get_last_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *last_pipe = NULL;
+ if (stream != NULL)
+ last_pipe = stream->last_pipe;
+
+ return last_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_copy_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *last_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+
+ last_pipe = stream_get_last_pipe(stream);
+
+ if ((stream != NULL) &&
+ (last_pipe != NULL) &&
+ (stream->config.continuous)) {
+
+ pipe_id = last_pipe->mode;
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = last_pipe->pipe_settings.preview.copy_pipe;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = last_pipe->pipe_settings.video.copy_pipe;
+ break;
+ default:
+ copy_pipe = NULL;
+ break;
+ }
+ }
+
+ return copy_pipe;
+}
+
+static inline struct ia_css_pipe *stream_get_target_pipe(
+ struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *target_pipe;
+
+ /* get the pipe that consumes the stream */
+ if (stream->config.continuous) {
+ target_pipe = stream_get_copy_pipe(stream);
+ } else {
+ target_pipe = stream_get_last_pipe(stream);
+ }
+
+ return target_pipe;
+}
+
+static enum ia_css_err stream_csi_rx_helper(
+ struct ia_css_stream *stream,
+ enum ia_css_err (*func)(enum ia_css_csi2_port, uint32_t))
+{
+ enum ia_css_err retval = IA_CSS_ERR_INTERNAL_ERROR;
+ uint32_t sp_thread_id, stream_id;
+ bool rc;
+ struct ia_css_pipe *target_pipe = NULL;
+
+ if ((stream == NULL) || (stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ goto exit;
+
+ target_pipe = stream_get_target_pipe(stream);
+
+ if (target_pipe == NULL)
+ goto exit;
+
+ rc = ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(target_pipe),
+ &sp_thread_id);
+
+ if (!rc)
+ goto exit;
+
+ /* (un)register all valid "virtual isys streams" within the ia_css_stream */
+ stream_id = 0;
+ do {
+ if (stream->config.isys_config[stream_id].valid) {
+ uint32_t isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, stream_id);
+ retval = func(stream->config.source.port.port, isys_stream_id);
+ }
+ stream_id++;
+ } while ((retval == IA_CSS_SUCCESS) &&
+ (stream_id < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH));
+
+exit:
+ return retval;
+}
+
+static inline enum ia_css_err stream_register_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_register_stream);
+}
+
+static inline enum ia_css_err stream_unregister_with_csi_rx(
+ struct ia_css_stream *stream)
+{
+ return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
+}
+#endif
+
+#if WITH_PC_MONITORING
+static struct task_struct *my_kthread; /* Handle for the monitoring thread */
+static int sh_binary_running; /* Enable sampling in the thread */
+
+static void print_pc_histo(char *core_name, struct sh_css_pc_histogram *hist)
+{
+ unsigned i;
+ unsigned cnt_run = 0;
+ unsigned cnt_stall = 0;
+
+ if (hist == NULL)
+ return;
+
+ sh_css_print("%s histogram length = %d\n", core_name, hist->length);
+ sh_css_print("%s PC\trun\tstall\n", core_name);
+
+ for (i = 0; i < hist->length; i++) {
+ if ((hist->run[i] == 0) && (hist->run[i] == hist->stall[i]))
+ continue;
+ sh_css_print("%s %d\t%d\t%d\n",
+ core_name, i, hist->run[i], hist->stall[i]);
+ cnt_run += hist->run[i];
+ cnt_stall += hist->stall[i];
+ }
+
+ sh_css_print(" Statistics for %s, cnt_run = %d, cnt_stall = %d, "
+ "hist->length = %d\n",
+ core_name, cnt_run, cnt_stall, hist->length);
+}
+
+static void print_pc_histogram(void)
+{
+ struct ia_css_binary_metrics *metrics;
+
+ for (metrics = sh_css_metrics.binary_metrics;
+ metrics;
+ metrics = metrics->next) {
+ if (metrics->mode == IA_CSS_BINARY_MODE_PREVIEW ||
+ metrics->mode == IA_CSS_BINARY_MODE_VF_PP) {
+ sh_css_print("pc_histogram for binary %d is SKIPPED\n",
+ metrics->id);
+ continue;
+ }
+
+ sh_css_print(" pc_histogram for binary %d\n", metrics->id);
+ print_pc_histo(" ISP", &metrics->isp_histogram);
+ print_pc_histo(" SP", &metrics->sp_histogram);
+ sh_css_print("print_pc_histogram() done for binay->id = %d, "
+ "done.\n", metrics->id);
+ }
+
+ sh_css_print("PC_MONITORING:print_pc_histogram() -- DONE\n");
+}
+
+static int pc_monitoring(void *data)
+{
+ int i = 0;
+
+ (void)data;
+ while (true) {
+ if (sh_binary_running) {
+ sh_css_metrics_sample_pcs();
+#if MULTIPLE_SAMPLES
+ for (i = 0; i < NOF_SAMPLES; i++)
+ sh_css_metrics_sample_pcs();
+#endif
+ }
+ usleep_range(10, 50);
+ }
+ return 0;
+}
+
+static void spying_thread_create(void)
+{
+ my_kthread = kthread_run(pc_monitoring, NULL, "sh_pc_monitor");
+ sh_css_metrics_enable_pc_histogram(1);
+}
+
+static void input_frame_info(struct ia_css_frame_info frame_info)
+{
+ sh_css_print("SH_CSS:input_frame_info() -- frame->info.res.width = %d, "
+ "frame->info.res.height = %d, format = %d\n",
+ frame_info.res.width, frame_info.res.height, frame_info.format);
+}
+#endif /* WITH_PC_MONITORING */
+
+static void
+start_binary(struct ia_css_pipe *pipe,
+ struct ia_css_binary *binary)
+{
+ struct ia_css_stream *stream;
+
+ assert(pipe != NULL);
+ /* Acceleration uses firmware, the binary thus can be NULL */
+ /* assert(binary != NULL); */
+
+ (void)binary;
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ stream = pipe->stream;
+#else
+ (void)pipe;
+ (void)stream;
+#endif
+
+ if (binary)
+ sh_css_metrics_start_binary(&binary->metrics);
+
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() -- binary id = %d , "
+ "enable_dvs_envelope = %d\n",
+ __func__, binary->info->sp.id,
+ binary->info->sp.enable.dvs_envelope);
+ input_frame_info(binary->in_frame_info);
+
+ if (binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO)
+ sh_binary_running = true;
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
+ pipe->stream->config.mode);
+ stream->reconfigure_css_rx = false;
+ }
+#endif
+}
+
+/* start the copy function on the SP */
+static enum ia_css_err
+start_copy_on_sp(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame)
+{
+
+ (void)out_frame;
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+
+ if ((pipe == NULL) || (pipe->stream == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->stream->reconfigure_css_rx)
+ ia_css_isys_rx_disable();
+#endif
+
+ if (pipe->stream->config.input_config.format != IA_CSS_STREAM_FORMAT_BINARY_8)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config, pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+#endif
+
+ return IA_CSS_SUCCESS;
+}
+
+void sh_css_binary_args_reset(struct sh_css_binary_args *args)
+{
+ unsigned int i;
+
+#ifndef ISP2401
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
+#else
+ for (i = 0; i < NUM_TNR_FRAMES; i++)
+#endif
+ args->tnr_frames[i] = NULL;
+ for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
+ args->delay_frames[i] = NULL;
+ args->in_frame = NULL;
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
+ args->out_frame[i] = NULL;
+ args->out_vf_frame = NULL;
+ args->copy_vf = false;
+ args->copy_output = true;
+ args->vf_downscale_log2 = 0;
+}
+
+static void start_pipe(
+ struct ia_css_pipe *me,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode)
+{
+#if defined(HAS_NO_INPUT_SYSTEM)
+ (void)input_mode;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
+ me, copy_ovrd, input_mode);
+
+ assert(me != NULL); /* all callers are in this file and call with non null argument */
+
+ sh_css_sp_init_pipeline(&me->pipeline,
+ me->mode,
+ (uint8_t)ia_css_pipe_get_pipe_num(me),
+ me->config.default_capture_config.enable_xnr != 0,
+ me->stream->config.pixels_per_clock == 2,
+ me->stream->config.continuous,
+ false,
+ me->required_bds_factor,
+ copy_ovrd,
+ input_mode,
+ &me->stream->config.metadata_config,
+#ifndef ISP2401
+ &me->stream->info.metadata_info
+#else
+ &me->stream->info.metadata_info,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , (input_mode==IA_CSS_INPUT_MODE_MEMORY)?
+#else
+ (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
+#endif
+ (mipi_port_ID_t)0 :
+#ifndef ISP2401
+ me->stream->config.source.port.port
+#else
+ me->stream->config.source.port.port,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &me->config.internal_frame_origin_bqs_on_sctbl,
+ me->stream->isp_params_configs);
+#endif
+
+ if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ struct ia_css_pipeline_stage *stage;
+ stage = me->pipeline.stages;
+ if (stage) {
+ me->pipeline.current_stage = stage;
+ start_binary(me, stage->binary);
+ }
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
+{
+ int i;
+ assert(stream != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() enter:\n");
+
+ for (i=0; i<stream->num_pipes; i++) {
+ assert(stream->pipes[i] != NULL);
+ sh_css_pipe_free_shading_table(stream->pipes[i]);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_invalidate_shading_tables() leave: return_void\n");
+}
+
+#ifndef ISP2401
+static void
+enable_interrupts(enum ia_css_irq_type irq_type)
+{
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+ mipi_port_ID_t port;
+#endif
+ bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
+ IA_CSS_ENTER_PRIVATE("");
+ /* Enable IRQ on the SP which signals that SP goes to idle
+ * (aka ready state) */
+ cnd_sp_irq_enable(SP0_ID, true);
+ /* Set the IRQ device 0 to either level or pulse */
+ irq_enable_pulse(IRQ0_ID, enable_pulse);
+
+ cnd_virq_enable_channel(virq_sp, true);
+
+ /* Enable SW interrupt 0, this is used to signal ISYS events */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL0_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+ /* Enable SW interrupt 1, this is used to signal PSYS events */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+#if !defined(HAS_IRQ_MAP_VERSION_2)
+ /* IRQ_SW_CHANNEL2_ID does not exist on 240x systems */
+ cnd_virq_enable_channel(
+ (virq_id_t)(IRQ_SW_CHANNEL2_ID + IRQ_SW_CHANNEL_OFFSET),
+ true);
+ virq_clear_all();
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2
+ for (port = 0; port < N_MIPI_PORT_ID; port++)
+ ia_css_isys_rx_enable_all_interrupts(port);
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+#endif
+#if defined(HAS_BL)
+static bool sh_css_setup_blctrl_config(const struct ia_css_fw_info *fw,
+ const char *program,
+ ia_css_blctrl_cfg *blctrl_cfg)
+{
+ if((fw == NULL)||(blctrl_cfg == NULL))
+ return false;
+ blctrl_cfg->bl_entry = 0;
+ blctrl_cfg->program_name = (char *)(program);
+
+#if !defined(HRT_UNSCHED)
+ blctrl_cfg->ddr_data_offset = fw->blob.data_source;
+ blctrl_cfg->dmem_data_addr = fw->blob.data_target;
+ blctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
+ blctrl_cfg->data_size = fw->blob.data_size ;
+ blctrl_cfg->bss_size = fw->blob.bss_size;
+
+ blctrl_cfg->blctrl_state_dmem_addr = fw->info.bl.sw_state;
+ blctrl_cfg->blctrl_dma_cmd_list = fw->info.bl.dma_cmd_list;
+ blctrl_cfg->blctrl_nr_of_dma_cmds = fw->info.bl.num_dma_cmds;
+
+ blctrl_cfg->code_size = fw->blob.size;
+ blctrl_cfg->code = fw->blob.code;
+ blctrl_cfg->bl_entry = fw->info.bl.bl_entry; /* entry function ptr on Bootloader */
+#endif
+ return true;
+}
+#endif
+static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
+ const char * program,
+ ia_css_spctrl_cfg *spctrl_cfg)
+{
+ if((fw == NULL)||(spctrl_cfg == NULL))
+ return false;
+ spctrl_cfg->sp_entry = 0;
+ spctrl_cfg->program_name = (char *)(program);
+
+#if !defined(HRT_UNSCHED)
+ spctrl_cfg->ddr_data_offset = fw->blob.data_source;
+ spctrl_cfg->dmem_data_addr = fw->blob.data_target;
+ spctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
+ spctrl_cfg->data_size = fw->blob.data_size ;
+ spctrl_cfg->bss_size = fw->blob.bss_size;
+
+ spctrl_cfg->spctrl_config_dmem_addr = fw->info.sp.init_dmem_data;
+ spctrl_cfg->spctrl_state_dmem_addr = fw->info.sp.sw_state;
+
+ spctrl_cfg->code_size = fw->blob.size;
+ spctrl_cfg->code = fw->blob.code;
+ spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */
+#endif
+ return true;
+}
+void
+ia_css_unload_firmware(void)
+{
+ if (sh_css_num_binaries)
+ {
+ /* we have already loaded before so get rid of the old stuff */
+ ia_css_binary_uninit();
+ sh_css_unload_firmware();
+ }
+ fw_explicitly_loaded = false;
+}
+
+static void
+ia_css_reset_defaults(struct sh_css* css)
+{
+ struct sh_css default_css;
+
+ /* Reset everything to zero */
+ memset(&default_css, 0, sizeof(default_css));
+
+ /* Initialize the non zero values*/
+ default_css.check_system_idle = true;
+ default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES;
+
+ /* All should be 0: but memset does it already.
+ * default_css.num_mipi_frames[N_CSI_PORTS] = 0;
+ */
+
+ default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE;
+
+ /*Set the defaults to the output */
+ *css = default_css;
+}
+
+bool
+ia_css_check_firmware_version(const struct ia_css_fw *fw)
+{
+ bool retval = false;
+
+ if (fw != NULL) {
+ retval = sh_css_check_firmware_version(fw->data);
+ }
+ return retval;
+}
+
+enum ia_css_err
+ia_css_load_firmware(const struct ia_css_env *env,
+ const struct ia_css_fw *fw)
+{
+ enum ia_css_err err;
+
+ if (env == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (fw == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n");
+
+ /* make sure we initialize my_css */
+ if (my_css.flush != env->cpu_mem_env.flush) {
+ ia_css_reset_defaults(&my_css);
+ my_css.flush = env->cpu_mem_env.flush;
+ }
+
+ ia_css_unload_firmware(); /* in case we are called twice */
+ err = sh_css_load_firmware(fw->data, fw->bytes);
+ if (err == IA_CSS_SUCCESS) {
+ err = ia_css_binary_init_infos();
+ if (err == IA_CSS_SUCCESS)
+ fw_explicitly_loaded = true;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave \n");
+ return err;
+}
+
+enum ia_css_err
+ia_css_init(const struct ia_css_env *env,
+ const struct ia_css_fw *fw,
+ uint32_t mmu_l1_base,
+ enum ia_css_irq_type irq_type)
+{
+ enum ia_css_err err;
+ ia_css_spctrl_cfg spctrl_cfg;
+#if defined(HAS_BL)
+ ia_css_blctrl_cfg blctrl_cfg;
+#endif
+
+ void (*flush_func)(struct ia_css_acc_fw *fw);
+ hrt_data select, enable;
+
+ /**
+ * The C99 standard does not specify the exact object representation of structs;
+ * the representation is compiler dependent.
+ *
+ * The structs that are communicated between host and SP/ISP should have the
+ * exact same object representation. The compiler that is used to compile the
+ * firmware is hivecc.
+ *
+ * To check if a different compiler, used to compile a host application, uses
+ * another object representation, macros are defined specifying the size of
+ * the structs as expected by the firmware.
+ *
+ * A host application shall verify that a sizeof( ) of the struct is equal to
+ * the SIZE_OF_XXX macro of the corresponding struct. If they are not
+ * equal, functionality will break.
+ */
+ /* Check struct sh_css_ddr_address_map */
+ COMPILATION_ERROR_IF( sizeof(struct sh_css_ddr_address_map) != SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT );
+ /* Check struct host_sp_queues */
+ COMPILATION_ERROR_IF( sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_circbuf_desc_s) != SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_circbuf_elem_s) != SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT );
+
+ /* Check struct host_sp_communication */
+ COMPILATION_ERROR_IF( sizeof(struct host_sp_communication) != SIZE_OF_HOST_SP_COMMUNICATION_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct sh_css_event_irq_mask) != SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT );
+
+ /* Check struct sh_css_hmm_buffer */
+ COMPILATION_ERROR_IF( sizeof(struct sh_css_hmm_buffer) != SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_isp_3a_statistics) != SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_isp_dvs_statistics) != SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT );
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_metadata) != SIZE_OF_IA_CSS_METADATA_STRUCT );
+
+ /* Check struct ia_css_init_dmem_cfg */
+ COMPILATION_ERROR_IF( sizeof(struct ia_css_sp_init_dmem_cfg) != SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT );
+
+ if (fw == NULL && !fw_explicitly_loaded)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (env == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ sh_css_printf = env->print_env.debug_print;
+
+ IA_CSS_ENTER("void");
+
+ flush_func = env->cpu_mem_env.flush;
+
+ pipe_global_init();
+ ia_css_pipeline_init();
+ ia_css_queue_map_init();
+
+ ia_css_device_access_init(&env->hw_access_env);
+
+ select = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_select)
+ & (~GPIO_FLASH_PIN_MASK);
+ enable = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_e)
+ | GPIO_FLASH_PIN_MASK;
+ sh_css_mmu_set_page_table_base_index(mmu_l1_base);
+#ifndef ISP2401
+ my_css_save.mmu_base = mmu_l1_base;
+#else
+ ia_css_save_mmu_base_addr(mmu_l1_base);
+#endif
+
+ ia_css_reset_defaults(&my_css);
+
+ my_css_save.driver_env = *env;
+ my_css.flush = flush_func;
+
+ err = ia_css_rmgr_init();
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#ifndef ISP2401
+ IA_CSS_LOG("init: %d", my_css_save_initialized);
+#else
+ ia_css_save_restore_data_init();
+#endif
+
+#ifndef ISP2401
+ if (!my_css_save_initialized)
+ {
+ my_css_save_initialized = true;
+ my_css_save.mode = sh_css_mode_working;
+ memset(my_css_save.stream_seeds, 0, sizeof(struct sh_css_stream_seed) * MAX_ACTIVE_STREAMS);
+ IA_CSS_LOG("init: %d mode=%d", my_css_save_initialized, my_css_save.mode);
+ }
+#endif
+ mipi_init();
+
+#ifndef ISP2401
+ /* In case this has been programmed already, update internal
+ data structure ... DEPRECATED */
+ my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
+
+#endif
+ my_css.irq_type = irq_type;
+#ifndef ISP2401
+ my_css_save.irq_type = irq_type;
+#else
+ ia_css_save_irq_type(irq_type);
+#endif
+ enable_interrupts(my_css.irq_type);
+
+ /* configure GPIO to output mode */
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_select, select);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_e, enable);
+ gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0);
+
+ err = ia_css_refcount_init(REFCOUNT_SIZE);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ err = sh_css_params_init();
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ if (fw)
+ {
+ ia_css_unload_firmware(); /* in case we already had firmware loaded */
+ err = sh_css_load_firmware(fw->data, fw->bytes);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ err = ia_css_binary_init_infos();
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ fw_explicitly_loaded = false;
+#ifndef ISP2401
+ my_css_save.loaded_fw = (struct ia_css_fw *)fw;
+#endif
+ }
+ if(!sh_css_setup_spctrl_config(&sh_css_sp_fw,SP_PROG_NAME,&spctrl_cfg))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if defined(HAS_BL)
+ if (!sh_css_setup_blctrl_config(&sh_css_bl_fw, BL_PROG_NAME, &blctrl_cfg))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ err = ia_css_blctrl_load_fw(&blctrl_cfg);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#ifdef ISP2401
+ err = ia_css_blctrl_add_target_fw_info(&sh_css_sp_fw, IA_CSS_SP0,
+ get_sp_code_addr(SP0_ID));
+
+#endif
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif /* HAS_BL */
+
+#if WITH_PC_MONITORING
+ if (!thread_alive) {
+ thread_alive++;
+ sh_css_print("PC_MONITORING: %s() -- create thread DISABLED\n",
+ __func__);
+ spying_thread_create();
+ }
+#endif
+ if (!sh_css_hrt_system_is_idle()) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_SYSTEM_NOT_IDLE);
+ return IA_CSS_ERR_SYSTEM_NOT_IDLE;
+ }
+ /* can be called here, queuing works, but:
+ - when sp is started later, it will wipe queued items
+ so for now we leave it for later and make sure
+ updates are not called to frequently.
+ sh_css_init_buffer_queues();
+ */
+
+#if defined(HAS_INPUT_SYSTEM_VERSION_2) && defined(HAS_INPUT_SYSTEM_VERSION_2401)
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 0);
+#elif defined (USE_INPUT_SYSTEM_VERSION_2401)
+ gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
+#endif
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP_DMA_MAX_BURST_LENGTH);
+
+ if(ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+#endif
+
+ sh_css_params_map_and_store_default_gdc_lut();
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err ia_css_suspend(void)
+{
+ int i;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_suspend() enter\n");
+ my_css_save.mode = sh_css_mode_suspend;
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ if (my_css_save.stream_seeds[i].stream != NULL)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "==*> unloading seed %d (%p)\n", i, my_css_save.stream_seeds[i].stream);
+ ia_css_stream_unload(my_css_save.stream_seeds[i].stream);
+ }
+ my_css_save.mode = sh_css_mode_working;
+ ia_css_stop_sp();
+ ia_css_uninit();
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "==*> after 1: seed %d (%p)\n", i, my_css_save.stream_seeds[i].stream);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_suspend() leave\n");
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_resume(void)
+{
+ int i, j;
+ enum ia_css_err err;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_resume() enter: void\n");
+
+ err = ia_css_init(&(my_css_save.driver_env), my_css_save.loaded_fw, my_css_save.mmu_base, my_css_save.irq_type);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_start_sp();
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ my_css_save.mode = sh_css_mode_resume;
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "==*> seed stream %p\n", my_css_save.stream_seeds[i].stream);
+ if (my_css_save.stream_seeds[i].stream != NULL)
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "==*> loading seed %d\n", i);
+ err = ia_css_stream_load(my_css_save.stream_seeds[i].stream);
+ if (err != IA_CSS_SUCCESS)
+ {
+ if (i)
+ for(j=0;j<i;j++)
+ ia_css_stream_unload(my_css_save.stream_seeds[j].stream);
+ return err;
+ }
+ err = ia_css_stream_start(my_css_save.stream_seeds[i].stream);
+ if (err != IA_CSS_SUCCESS)
+ {
+ for(j=0;j<=i;j++)
+ {
+ ia_css_stream_stop(my_css_save.stream_seeds[j].stream);
+ ia_css_stream_unload(my_css_save.stream_seeds[j].stream);
+ }
+ return err;
+ }
+ *my_css_save.stream_seeds[i].orig_stream = my_css_save.stream_seeds[i].stream;
+ for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ *(my_css_save.stream_seeds[i].orig_pipes[j]) = my_css_save.stream_seeds[i].pipes[j];
+ }
+ }
+ my_css_save.mode = sh_css_mode_working;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_resume() leave: return_void\n");
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_enable_isys_event_queue(bool enable)
+{
+ if (sh_css_sp_is_running())
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ sh_css_sp_enable_isys_event_queue(enable);
+ return IA_CSS_SUCCESS;
+}
+
+void *sh_css_malloc(size_t size)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_malloc() enter: size=%d\n",size);
+ /* FIXME: This first test can probably go away */
+ if (size == 0)
+ return NULL;
+ if (size > PAGE_SIZE)
+ return vmalloc(size);
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void *sh_css_calloc(size_t N, size_t size)
+{
+ void *p;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_calloc() enter: N=%d, size=%d\n",N,size);
+
+ /* FIXME: this test can probably go away */
+ if (size > 0) {
+ p = sh_css_malloc(N*size);
+ if (p)
+ memset(p, 0, size);
+ }
+ return NULL;
+}
+
+void sh_css_free(void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
+/* For Acceleration API: Flush FW (shared buffer pointer) arguments */
+void
+sh_css_flush(struct ia_css_acc_fw *fw)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_flush() enter:\n");
+ if ((fw != NULL) && (my_css.flush != NULL))
+ my_css.flush(fw);
+}
+
+/* Mapping sp threads. Currently, this is done when a stream is created and
+ * pipelines are ready to be converted to sp pipelines. Be careful if you are
+ * doing it from stream_create since we could run out of sp threads due to
+ * allocation on inactive pipelines. */
+static enum ia_css_err
+map_sp_threads(struct ia_css_stream *stream, bool map)
+{
+ struct ia_css_pipe *main_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+ struct ia_css_pipe *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_pipe_id pipe_id;
+
+ assert(stream != NULL);
+ IA_CSS_ENTER_PRIVATE("stream = %p, map = %p", stream, map);
+
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ ia_css_pipeline_map(main_pipe->pipe_num, map);
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ case IA_CSS_PIPE_ID_ACC:
+ default:
+ break;
+ }
+
+ if (acc_pipe) {
+ ia_css_pipeline_map(acc_pipe->pipe_num, map);
+ }
+
+ if(capture_pipe) {
+ ia_css_pipeline_map(capture_pipe->pipe_num, map);
+ }
+
+ /* Firmware expects copy pipe to be the last pipe mapped. (if needed) */
+ if(copy_pipe) {
+ ia_css_pipeline_map(copy_pipe->pipe_num, map);
+ }
+ /* DH regular multi pipe - not continuous mode: map the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+ for (i = 1; i < stream->num_pipes; i++)
+ ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* creates a host pipeline skeleton for all pipes in a stream. Called during
+ * stream_create. */
+static enum ia_css_err
+create_host_pipeline_structure(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int copy_pipe_delay = 0,
+ capture_pipe_delay = 0;
+
+ assert(stream != NULL);
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ assert(main_pipe != NULL);
+ if (main_pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = main_pipe->mode;
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ copy_pipe_delay = main_pipe->dvs_frame_delay;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+ capture_pipe_delay = main_pipe->dvs_frame_delay;
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
+ main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ case IA_CSS_PIPE_ID_ACC:
+ err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode, main_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ break;
+
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((IA_CSS_SUCCESS == err) && copy_pipe) {
+ err = ia_css_pipeline_create(&copy_pipe->pipeline,
+ copy_pipe->mode,
+ copy_pipe->pipe_num,
+ copy_pipe_delay);
+ }
+
+ if ((IA_CSS_SUCCESS == err) && capture_pipe) {
+ err = ia_css_pipeline_create(&capture_pipe->pipeline,
+ capture_pipe->mode,
+ capture_pipe->pipe_num,
+ capture_pipe_delay);
+ }
+
+ if ((IA_CSS_SUCCESS == err) && acc_pipe) {
+ err = ia_css_pipeline_create(&acc_pipe->pipeline, acc_pipe->mode, acc_pipe->pipe_num, main_pipe->dvs_frame_delay);
+ }
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous) {
+ int i;
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) {
+ main_pipe = stream->pipes[i];
+ err = ia_css_pipeline_create(&main_pipe->pipeline,
+ main_pipe->mode,
+ main_pipe->pipe_num,
+ main_pipe->dvs_frame_delay);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* creates a host pipeline for all pipes in a stream. Called during
+ * stream_start. */
+static enum ia_css_err
+create_host_pipeline(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
+ struct ia_css_pipe *acc_pipe = NULL;
+ enum ia_css_pipe_id pipe_id;
+ struct ia_css_pipe *main_pipe = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned max_input_width = 0;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ main_pipe = stream->last_pipe;
+ pipe_id = main_pipe->mode;
+
+ /* No continuous frame allocation for capture pipe. It uses the
+ * "main" pipe's frames. */
+ if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
+ (pipe_id == IA_CSS_PIPE_ID_VIDEO)) {
+ /* About pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
+ * The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is too strong. E.g. in SkyCam (with memory
+ * based input frames) there is no continuous mode and thus no need for allocated continuous frames
+ * This is not only for SkyCam but for all preview cases that use DDR based input frames. For this
+ * reason the stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed added.
+ */
+ if (stream->config.continuous ||
+ (pipe_id == IA_CSS_PIPE_ID_PREVIEW && stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)) {
+ err = alloc_continuous_frames(main_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
+ if (pipe_id != IA_CSS_PIPE_ID_ACC) {
+ err = allocate_mipi_frames(main_pipe, &stream->info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if ((pipe_id != IA_CSS_PIPE_ID_ACC) &&
+ (main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+ err = allocate_mipi_frames(main_pipe, &stream->info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+#endif
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = main_pipe->pipe_settings.preview.acc_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.preview.preview_binary.info->sp.input.max_width;
+
+ err = create_host_preview_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_VIDEO:
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
+ max_input_width =
+ main_pipe->pipe_settings.video.video_binary.info->sp.input.max_width;
+
+ err = create_host_video_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_CAPTURE:
+ capture_pipe = main_pipe;
+
+ break;
+
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+
+ case IA_CSS_PIPE_ID_ACC:
+ err = create_host_acc_pipeline(main_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ if(copy_pipe) {
+ err = create_host_copy_pipeline(copy_pipe, max_input_width,
+ main_pipe->continuous_frames[0]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if(capture_pipe) {
+ err = create_host_capture_pipeline(capture_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (acc_pipe) {
+ err = create_host_acc_pipeline(acc_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ /* DH regular multi pipe - not continuous mode: create the next pipelines too */
+ if (!stream->config.continuous) {
+ int i;
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = create_host_preview_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = create_host_video_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = create_host_capture_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = create_host_yuvpp_pipeline(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ err = create_host_acc_pipeline(stream->pipes[i]);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+init_pipe_defaults(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe *pipe,
+ bool copy_pipe)
+{
+ static struct ia_css_pipe default_pipe = IA_CSS_DEFAULT_PIPE;
+ static struct ia_css_preview_settings prev = IA_CSS_DEFAULT_PREVIEW_SETTINGS;
+ static struct ia_css_capture_settings capt = IA_CSS_DEFAULT_CAPTURE_SETTINGS;
+ static struct ia_css_video_settings video = IA_CSS_DEFAULT_VIDEO_SETTINGS;
+ static struct ia_css_yuvpp_settings yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS;
+
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Initialize pipe to pre-defined defaults */
+ *pipe = default_pipe;
+
+ /* TODO: JB should not be needed, but temporary backward reference */
+ switch (mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ pipe->mode = IA_CSS_PIPE_ID_PREVIEW;
+ pipe->pipe_settings.preview = prev;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (copy_pipe) {
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+ } else {
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ }
+ pipe->pipe_settings.capture = capt;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ pipe->mode = IA_CSS_PIPE_ID_VIDEO;
+ pipe->pipe_settings.video = video;
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ pipe->mode = IA_CSS_PIPE_ID_ACC;
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ pipe->mode = IA_CSS_PIPE_ID_YUVPP;
+ pipe->pipe_settings.yuvpp = yuvpp;
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+pipe_global_init(void)
+{
+ uint8_t i;
+
+ my_css.pipe_counter = 0;
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ my_css.all_pipes[i] = NULL;
+ }
+}
+
+static enum ia_css_err
+pipe_generate_pipe_num(const struct ia_css_pipe *pipe, unsigned int *pipe_number)
+{
+ const uint8_t INVALID_PIPE_NUM = (uint8_t)~(0);
+ uint8_t pipe_num = INVALID_PIPE_NUM;
+ uint8_t i;
+
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Assign a new pipe_num .... search for empty place */
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
+ if (my_css.all_pipes[i] == NULL) {
+ /*position is reserved */
+ my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
+ pipe_num = i;
+ break;
+ }
+ }
+ if (pipe_num == INVALID_PIPE_NUM) {
+ /* Max number of pipes already allocated */
+ IA_CSS_ERROR("Max number of pipes already created");
+ return IA_CSS_ERR_RESOURCE_EXHAUSTED;
+ }
+
+ my_css.pipe_counter++;
+
+ IA_CSS_LOG("pipe_num (%d)", pipe_num);
+
+ *pipe_number = pipe_num;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+pipe_release_pipe_num(unsigned int pipe_num)
+{
+ my_css.all_pipes[pipe_num] = NULL;
+ my_css.pipe_counter--;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "pipe_release_pipe_num (%d)\n", pipe_num);
+}
+
+static enum ia_css_err
+create_pipe(enum ia_css_pipe_mode mode,
+ struct ia_css_pipe **pipe,
+ bool copy_pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *me;
+
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL pipe parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (!me)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ err = init_pipe_defaults(mode, me, copy_pipe);
+ if (err != IA_CSS_SUCCESS) {
+ kfree(me);
+ return err;
+ }
+
+ err = pipe_generate_pipe_num(me, &(me->pipe_num));
+ if (err != IA_CSS_SUCCESS) {
+ kfree(me);
+ return err;
+ }
+
+ *pipe = me;
+ return IA_CSS_SUCCESS;
+}
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num)
+{
+ unsigned int i;
+ for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++){
+ if (my_css.all_pipes[i] &&
+ ia_css_pipe_get_pipe_num(my_css.all_pipes[i]) == pipe_num) {
+ return my_css.all_pipes[i];
+ }
+ }
+ return NULL;
+}
+
+static void sh_css_pipe_free_acc_binaries (
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+
+ assert(pipe != NULL);
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL input pointer");
+ return;
+ }
+ pipeline = &pipe->pipeline;
+
+ /* loop through the stages and unload them */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ struct ia_css_fw_info *firmware = (struct ia_css_fw_info *)
+ stage->firmware;
+ if (firmware)
+ ia_css_pipe_unload_extension(pipe, firmware);
+ }
+}
+
+enum ia_css_err
+ia_css_pipe_destroy(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ IA_CSS_ENTER("pipe = %p", pipe);
+
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (pipe->stream != NULL) {
+ IA_CSS_LOG("ia_css_stream_destroy not called!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ /* need to take into account that this function is also called
+ on the internal copy pipe */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.preview.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.preview.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_destroy(): "
+ "destroyed internal copy pipe err=%d\n", err);
+ }
+ }
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->continuous_frames);
+ ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
+ pipe->cont_md_buffers);
+ if (pipe->pipe_settings.video.copy_pipe) {
+ err = ia_css_pipe_destroy(pipe->pipe_settings.video.copy_pipe);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_destroy(): "
+ "destroyed internal copy pipe err=%d\n", err);
+ }
+ }
+#ifndef ISP2401
+ ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES, pipe->pipe_settings.video.tnr_frames);
+#else
+ ia_css_frame_free_multiple(NUM_TNR_FRAMES, pipe->pipe_settings.video.tnr_frames);
+#endif
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES, pipe->pipe_settings.video.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES, pipe->pipe_settings.capture.delay_frames);
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ sh_css_pipe_free_acc_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_MODE_COPY:
+ break;
+ case IA_CSS_PIPE_MODE_YUVPP:
+ break;
+ }
+
+#ifndef ISP2401
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
+#else
+ sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+#endif
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
+ sh_css_pipe_free_shading_table(pipe);
+
+ ia_css_pipeline_destroy(&pipe->pipeline);
+ pipe_release_pipe_num(ia_css_pipe_get_pipe_num(pipe));
+
+ /* Temporarily, not every sh_css_pipe has an acc_extension. */
+ if (pipe->config.acc_extension) {
+ ia_css_pipe_unload_extension(pipe, pipe->config.acc_extension);
+ }
+ kfree(pipe);
+ IA_CSS_LEAVE("err = %d", err);
+ return err;
+}
+
+void
+ia_css_uninit(void)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n");
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() -- started\n", __func__);
+ print_pc_histogram();
+#endif
+
+ sh_css_params_free_default_gdc_lut();
+
+
+ /* TODO: JB: implement decent check and handling of freeing mipi frames */
+ //assert(ref_count_mipi_allocation == 0); //mipi frames are not freed
+ /* cleanup generic data */
+ sh_css_params_uninit();
+ ia_css_refcount_uninit();
+
+ ia_css_rmgr_uninit();
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ /* needed for reprogramming the inputformatter after power cycle of css */
+ ifmtr_set_if_blocking_mode_reset = true;
+#endif
+
+ if (fw_explicitly_loaded == false) {
+ ia_css_unload_firmware();
+ }
+ ia_css_spctrl_unload_fw(SP0_ID);
+ sh_css_sp_set_sp_running(false);
+#if defined(HAS_BL)
+ ia_css_blctrl_unload_fw();
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* check and free any remaining mipi frames */
+ free_mipi_frames(NULL);
+#endif
+
+ sh_css_sp_reset_global_vars();
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_isys_uninit();
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
+}
+
+#ifndef ISP2401
+/* Deprecated, this is an HRT backend function (memory_access.h) */
+static void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index)
+{
+ int i;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_mmu_set_page_table_base_index() enter: base_index=0x%08x\n",base_index);
+ my_css.page_table_base_index = base_index;
+ for (i = 0; i < (int)N_MMU_ID; i++) {
+ mmu_ID_t mmu_id = (mmu_ID_t)i;
+ mmu_set_page_table_base_index(mmu_id, base_index);
+ mmu_invalidate_cache(mmu_id);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_mmu_set_page_table_base_index() leave: return_void\n");
+}
+
+#endif
+#if defined(HAS_IRQ_MAP_VERSION_2)
+enum ia_css_err ia_css_irq_translate(
+ unsigned int *irq_infos)
+{
+ virq_id_t irq;
+ enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_more_irqs;
+ unsigned int infos = 0;
+
+/* irq_infos can be NULL, but that would make the function useless */
+/* assert(irq_infos != NULL); */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_irq_translate() enter: irq_infos=%p\n",irq_infos);
+
+ while (status == hrt_isp_css_irq_status_more_irqs) {
+ status = virq_get_channel_id(&irq);
+ if (status == hrt_isp_css_irq_status_error)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+#if WITH_PC_MONITORING
+ sh_css_print("PC_MONITORING: %s() irq = %d, "
+ "sh_binary_running set to 0\n", __func__, irq);
+ sh_binary_running = 0 ;
+#endif
+
+ switch (irq) {
+ case virq_sp:
+ /* When SP goes to idle, info is available in the
+ * event queue. */
+ infos |= IA_CSS_IRQ_INFO_EVENTS_READY;
+ break;
+ case virq_isp:
+ break;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ case virq_isys_sof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
+ break;
+ case virq_isys_eof:
+ infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF;
+ break;
+ case virq_isys_csi:
+ infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
+ break;
+#endif
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ case virq_ifmt0_id:
+ infos |= IA_CSS_IRQ_INFO_IF_ERROR;
+ break;
+#endif
+ case virq_dma:
+ infos |= IA_CSS_IRQ_INFO_DMA_ERROR;
+ break;
+ case virq_sw_pin_0:
+ infos |= sh_css_get_sw_interrupt_value(0);
+ break;
+ case virq_sw_pin_1:
+ infos |= sh_css_get_sw_interrupt_value(1);
+ /* pqiao TODO: also assumption here */
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (irq_infos)
+ *irq_infos = infos;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_irq_translate() "
+ "leave: irq_infos=%p\n", infos);
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err ia_css_irq_enable(
+ enum ia_css_irq_info info,
+ bool enable)
+{
+ virq_id_t irq = N_virq_id;
+ IA_CSS_ENTER("info=%d, enable=%d", info, enable);
+
+ switch (info) {
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
+ irq = virq_isys_sof;
+ break;
+ case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
+ irq = virq_isys_eof;
+ break;
+ case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
+ irq = virq_isys_csi;
+ break;
+#endif
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ case IA_CSS_IRQ_INFO_IF_ERROR:
+ irq = virq_ifmt0_id;
+ break;
+#endif
+ case IA_CSS_IRQ_INFO_DMA_ERROR:
+ irq = virq_dma;
+ break;
+ case IA_CSS_IRQ_INFO_SW_0:
+ irq = virq_sw_pin_0;
+ break;
+ case IA_CSS_IRQ_INFO_SW_1:
+ irq = virq_sw_pin_1;
+ break;
+ default:
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ cnd_virq_enable_channel(irq, enable);
+
+ IA_CSS_LEAVE_ERR(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+#else
+#error "sh_css.c: IRQ MAP must be one of \
+ {IRQ_MAP_VERSION_2}"
+#endif
+
+static unsigned int
+sh_css_get_sw_interrupt_value(unsigned int irq)
+{
+ unsigned int irq_value;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_get_sw_interrupt_value() enter: irq=%d\n",irq);
+ irq_value = sh_css_sp_get_sw_interrupt_value(irq);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_get_sw_interrupt_value() leave: irq_value=%d\n",irq_value);
+ return irq_value;
+}
+
+/* configure and load the copy binary, the next binary is used to
+ determine whether the copy binary needs to do left padding. */
+static enum ia_css_err load_copy_binary(
+ struct ia_css_pipe *pipe,
+ struct ia_css_binary *copy_binary,
+ struct ia_css_binary *next_binary)
+{
+ struct ia_css_frame_info copy_out_info, copy_in_info, copy_vf_info;
+ unsigned int left_padding;
+ enum ia_css_err err;
+ struct ia_css_binary_descr copy_descr;
+
+ /* next_binary can be NULL */
+ assert(pipe != NULL);
+ assert(copy_binary != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "load_copy_binary() enter:\n");
+
+ if (next_binary != NULL) {
+ copy_out_info = next_binary->in_frame_info;
+ left_padding = next_binary->left_padding;
+ } else {
+ copy_out_info = pipe->output_info[0];
+ copy_vf_info = pipe->vf_output_info[0];
+ ia_css_frame_info_set_format(&copy_vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+ left_padding = 0;
+ }
+
+ ia_css_pipe_get_copy_binarydesc(pipe, &copy_descr,
+ &copy_in_info, &copy_out_info, (next_binary != NULL) ? NULL : NULL/*TODO: &copy_vf_info*/);
+ err = ia_css_binary_find(&copy_descr, copy_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ copy_binary->left_padding = left_padding;
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+alloc_continuous_frames(
+ struct ia_css_pipe *pipe, bool init_time)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame_info ref_info;
+ enum ia_css_pipe_id pipe_id;
+ bool continuous;
+ unsigned int i, idx;
+ unsigned int num_frames;
+ struct ia_css_pipe *capture_pipe = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
+
+ if ((pipe == NULL) || (pipe->stream == NULL)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+ continuous = pipe->stream->config.continuous;
+
+ if (continuous) {
+ if (init_time) {
+ num_frames = pipe->stream->config.init_num_cont_raw_buf;
+ pipe->stream->continuous_pipe = pipe;
+ } else
+ num_frames = pipe->stream->config.target_num_cont_raw_buf;
+ } else {
+ num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES;
+ }
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ ref_info = pipe->pipe_settings.video.video_binary.in_frame_info;
+ }
+ else {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* For CSI2+, the continuous frame will hold the full input frame */
+ ref_info.res.width = pipe->stream->config.input_config.input_res.width;
+ ref_info.res.height = pipe->stream->config.input_config.input_res.height;
+
+ /* Ensure padded width is aligned for 2401 */
+ ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
+#endif
+
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ if (pipe->stream->config.pack_raw_pixels) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
+ } else
+#endif
+ {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n");
+ ref_info.format = IA_CSS_FRAME_FORMAT_RAW;
+ }
+
+ /* Write format back to binary */
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ pipe->pipe_settings.preview.preview_binary.in_frame_info.format = ref_info.format;
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ } else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
+ pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format;
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+ } else {
+ /* should not happen */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ if (init_time)
+ idx = 0;
+ else
+ idx = pipe->stream->config.init_num_cont_raw_buf;
+
+ for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++) {
+ /* free previous frame */
+ if (pipe->continuous_frames[i]) {
+ ia_css_frame_free(pipe->continuous_frames[i]);
+ pipe->continuous_frames[i] = NULL;
+ }
+ /* free previous metadata buffer */
+ ia_css_metadata_free(pipe->cont_md_buffers[i]);
+ pipe->cont_md_buffers[i] = NULL;
+
+ /* check if new frame needed */
+ if (i < num_frames) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_from_info(
+ &pipe->continuous_frames[i],
+ &ref_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* allocate metadata buffer */
+ pipe->cont_md_buffers[i] = ia_css_metadata_allocate(
+ &pipe->stream->info.metadata_info);
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream)
+{
+ if (stream == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return alloc_continuous_frames(stream->continuous_pipe, false);
+}
+
+static enum ia_css_err
+load_preview_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info prev_in_info,
+ prev_bds_out_info,
+ prev_out_info,
+ prev_vf_info;
+ struct ia_css_binary_descr preview_descr;
+ bool online;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool continuous, need_vf_pp = false;
+ bool need_isp_copy_binary = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+#endif
+ /* preview only have 1 output pin now */
+ struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
+#ifdef ISP2401
+ struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
+
+#endif
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
+
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+
+#ifndef ISP2401
+ if (pipe->pipe_settings.preview.preview_binary.info)
+#else
+ if (mycs->preview_binary.info)
+#endif
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* Note: the current selection of vf_pp binary and
+ * parameterization of the preview binary contains a few pieces
+ * of hardcoded knowledge. This needs to be cleaned up such that
+ * the binary selection becomes more generic.
+ * The vf_pp binary is needed if one or more of the following features
+ * are required:
+ * 1. YUV downscaling.
+ * 2. Digital zoom.
+ * 3. An output format that is not supported by the preview binary.
+ * In practice this means something other than yuv_line or nv12.
+ * The decision if the vf_pp binary is needed for YUV downscaling is
+ * made after the preview binary selection, since some preview binaries
+ * can perform the requested YUV downscaling.
+ * */
+ need_vf_pp = pipe->config.enable_dz;
+ need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE &&
+ !(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_16 ||
+ pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY);
+
+ /* Preview step 1 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+ /* If vf_pp is needed, then preview must output yuv_line.
+ * The exception is when vf_pp is manually disabled, that is only
+ * used in combination with a pipeline extension that requires
+ * yuv_line as input.
+ * */
+ if (need_vf_pp)
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_binary_find(&preview_descr,
+#ifndef ISP2401
+ &pipe->pipe_settings.preview.preview_binary);
+#else
+ &mycs->preview_binary);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+#ifdef ISP2401
+ /* The delay latency determines the number of invalid frames after
+ * a stream is started. */
+ pipe->num_invalid_frames = pipe->dvs_frame_delay;
+ pipe->info.num_invalid_frames = pipe->num_invalid_frames;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_preview_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
+ pipe->num_invalid_frames, pipe->dvs_frame_delay);
+
+#endif
+ /* The vf_pp binary is needed when (further) YUV downscaling is required */
+#ifndef ISP2401
+ need_vf_pp |= pipe->pipe_settings.preview.preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
+ need_vf_pp |= pipe->pipe_settings.preview.preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
+#else
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
+ need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
+#endif
+
+ /* When vf_pp is needed, then the output format of the selected
+ * preview binary must be yuv_line. If this is not the case,
+ * then the preview binary selection is done again.
+ */
+ if (need_vf_pp &&
+#ifndef ISP2401
+ (pipe->pipe_settings.preview.preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
+#else
+ (mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
+#endif
+
+ /* Preview step 2 */
+ if (pipe->vf_yuv_ds_input_info.res.width)
+ prev_vf_info = pipe->vf_yuv_ds_input_info;
+ else
+ prev_vf_info = *pipe_out_info;
+
+ ia_css_frame_info_set_format(&prev_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ err = ia_css_pipe_get_preview_binarydesc(
+ pipe,
+ &preview_descr,
+ &prev_in_info,
+ &prev_bds_out_info,
+ &prev_out_info,
+ &prev_vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_binary_find(&preview_descr,
+#ifndef ISP2401
+ &pipe->pipe_settings.preview.preview_binary);
+#else
+ &mycs->preview_binary);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (need_vf_pp) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ /* Viewfinder post-processing */
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+#ifndef ISP2401
+ &pipe->pipe_settings.preview.preview_binary.out_frame_info[0],
+#else
+ &mycs->preview_binary.out_frame_info[0],
+#endif
+ pipe_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+#ifndef ISP2401
+ &pipe->pipe_settings.preview.vf_pp_binary);
+#else
+ &mycs->vf_pp_binary);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, only the Direct Sensor Mode
+ * Offline Preview uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+#else
+#ifndef ISP2401
+ need_isp_copy_binary = !online && !continuous;
+#else
+ /* About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
+ * This is typical the case with SkyCam (which has no input system) but it also applies to all cases
+ * where the driver chooses for memory based input frames. In these cases, a copy binary (which typical
+ * copies sensor data to DDR) does not have much use.
+ */
+ need_isp_copy_binary = !online && !continuous && !(pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY);
+#endif
+#endif
+
+ /* Copy */
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+#ifndef ISP2401
+ &pipe->pipe_settings.preview.copy_binary,
+ &pipe->pipe_settings.preview.preview_binary);
+#else
+ &mycs->copy_binary,
+ &mycs->preview_binary);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (pipe->shading_table) {
+ ia_css_shading_table_free(pipe->shading_table);
+ pipe->shading_table = NULL;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+ia_css_binary_unload(struct ia_css_binary *binary)
+{
+ ia_css_binary_destroy_isp_parameters(binary);
+}
+
+static enum ia_css_err
+unload_preview_binaries(struct ia_css_pipe *pipe)
+{
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.preview.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.preview_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.preview.vf_pp_binary);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static const struct ia_css_fw_info *last_output_firmware(
+ const struct ia_css_fw_info *fw)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+/* fw can be NULL */
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+
+ for (; fw; fw = fw->next) {
+ const struct ia_css_fw_info *info = fw;
+ if (info->info.isp.sp.enable.output)
+ last_fw = fw;
+ }
+ return last_fw;
+}
+
+static enum ia_css_err add_firmwares(
+ struct ia_css_pipeline *me,
+ struct ia_css_binary *binary,
+ const struct ia_css_fw_info *fw,
+ const struct ia_css_fw_info *last_fw,
+ unsigned int binary_mode,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *vf_frame,
+ struct ia_css_pipeline_stage **my_stage,
+ struct ia_css_pipeline_stage **vf_stage)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *extra_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+/* all args can be NULL ??? */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_firmwares() enter:\n");
+
+ for (; fw; fw = fw->next) {
+ struct ia_css_frame *out[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+ struct ia_css_frame *in = NULL;
+ struct ia_css_frame *vf = NULL;
+ if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0)) {
+ out[0] = out_frame;
+ }
+ if (fw->info.isp.sp.enable.in_frame != 0) {
+ in = in_frame;
+ }
+ if (fw->info.isp.sp.enable.out_frame != 0) {
+ vf = vf_frame;
+ }
+ ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary,
+ out, in, vf, fw, binary_mode);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &extra_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ if (fw->info.isp.sp.enable.output != 0)
+ in_frame = extra_stage->args.out_frame[0];
+ if (my_stage && !*my_stage && extra_stage)
+ *my_stage = extra_stage;
+ if (vf_stage && !*vf_stage && extra_stage &&
+ fw->info.isp.sp.enable.vf_veceven)
+ *vf_stage = extra_stage;
+ }
+ return err;
+}
+
+static enum ia_css_err add_vf_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *vf_pp_binary,
+ struct ia_css_pipeline_stage **vf_pp_stage)
+{
+
+ struct ia_css_pipeline *me = NULL;
+ const struct ia_css_fw_info *last_fw = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+/* out_frame can be NULL ??? */
+
+ if (pipe == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (in_frame == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (vf_pp_binary == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (vf_pp_stage == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ me = &pipe->pipeline;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_vf_pp_stage() enter:\n");
+
+ *vf_pp_stage = NULL;
+
+ last_fw = last_output_firmware(pipe->vf_stage);
+ if (!pipe->extra_config.disable_vf_pp) {
+ if (last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ } else{
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ in_frame = (*vf_pp_stage)->args.out_frame[0];
+ }
+ err = add_firmwares(me, vf_pp_binary, pipe->vf_stage, last_fw,
+ IA_CSS_BINARY_MODE_VF_PP,
+ in_frame, out_frame, NULL,
+ vf_pp_stage, NULL);
+ return err;
+}
+
+static enum ia_css_err add_yuv_scaler_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_frame *internal_out_frame,
+ struct ia_css_binary *yuv_scaler_binary,
+ struct ia_css_pipeline_stage **pre_vf_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame != NULL);
+ assert(pipe != NULL);
+ assert(me != NULL);
+ assert(yuv_scaler_binary != NULL);
+ assert(pre_vf_pp_stage != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() enter:\n");
+
+ *pre_vf_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+
+ if(last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_util_set_output_frames(out_frames, 1, internal_out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ yuv_scaler_binary, out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ pre_vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ in_frame = (*pre_vf_pp_stage)->args.out_frame[0];
+
+ err = add_firmwares(me, yuv_scaler_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, pre_vf_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ (*pre_vf_pp_stage)->args.vf_downscale_log2 = yuv_scaler_binary->vf_downscale_log2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_yuv_scaler_stage() leave:\n");
+ return err;
+}
+
+static enum ia_css_err add_capture_pp_stage(
+ struct ia_css_pipe *pipe,
+ struct ia_css_pipeline *me,
+ struct ia_css_frame *in_frame,
+ struct ia_css_frame *out_frame,
+ struct ia_css_binary *capture_pp_binary,
+ struct ia_css_pipeline_stage **capture_pp_stage)
+{
+ const struct ia_css_fw_info *last_fw = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *vf_frame = NULL;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ /* out_frame can be NULL ??? */
+ assert(in_frame != NULL);
+ assert(pipe != NULL);
+ assert(me != NULL);
+ assert(capture_pp_binary != NULL);
+ assert(capture_pp_stage != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "add_capture_pp_stage() enter:\n");
+
+ *capture_pp_stage = NULL;
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+ last_fw = last_output_firmware(pipe->output_stage);
+ err = ia_css_frame_allocate_from_info(&vf_frame,
+ &capture_pp_binary->vf_frame_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ if(last_fw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc,
+ capture_pp_binary, out_frames, NULL, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ capture_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw,
+ IA_CSS_BINARY_MODE_CAPTURE_PP,
+ in_frame, out_frame, vf_frame,
+ NULL, capture_pp_stage);
+ /* If a firmware produce vf_pp output, we set that as vf_pp input */
+ if (*capture_pp_stage) {
+ (*capture_pp_stage)->args.vf_downscale_log2 =
+ capture_pp_binary->vf_downscale_log2;
+ }
+ return err;
+}
+
+static void sh_css_setup_queues(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+
+ sh_css_hmm_buffer_record_init();
+
+ sh_css_event_init_irq_mask();
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+
+ ia_css_bufq_init();
+
+ /* set "host_sp_queues_initialized" to "true" */
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(1));
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_setup_queues() leave:\n");
+}
+
+static enum ia_css_err
+init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *vf_frame, unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(vf_frame != NULL);
+
+ sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->info, idx);
+ vf_frame->contiguous = false;
+ vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ vf_frame->dynamic_queue_id = queue_id;
+ vf_frame->buf_type = IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx;
+
+ err = ia_css_frame_init_planes(vf_frame);
+ return err;
+}
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static unsigned int
+get_crop_lines_for_bayer_order (
+ const struct ia_css_stream_config *config)
+{
+ assert(config != NULL);
+ if ((IA_CSS_BAYER_ORDER_BGGR == config->input_config.bayer_order)
+ || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order))
+ return 1;
+
+ return 0;
+}
+
+static unsigned int
+get_crop_columns_for_bayer_order (
+ const struct ia_css_stream_config *config)
+{
+ assert(config != NULL);
+ if ((IA_CSS_BAYER_ORDER_RGGB == config->input_config.bayer_order)
+ || (IA_CSS_BAYER_ORDER_GBRG == config->input_config.bayer_order))
+ return 1;
+
+ return 0;
+}
+
+/* This function is to get the sum of all extra pixels in addition to the effective
+ * input, it includes dvs envelop and filter run-in */
+static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
+ unsigned int *extra_row, unsigned int *extra_column)
+{
+ enum ia_css_pipe_id pipe_id = pipe->mode;
+ unsigned int left_cropping = 0, top_cropping = 0;
+ unsigned int i;
+ struct ia_css_resolution dvs_env = pipe->config.dvs_envelope;
+
+ /* The dvs envelope info may not be correctly sent down via pipe config
+ * The check is made and the correct value is populated in the binary info
+ * Use this value when computing crop, else excess lines may get trimmed
+ */
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ if (pipe->pipe_settings.preview.preview_binary.info) {
+ left_cropping = pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.left_cropping;
+ top_cropping = pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.preview.preview_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ if (pipe->pipe_settings.video.video_binary.info) {
+ left_cropping = pipe->pipe_settings.video.video_binary.info->sp.pipeline.left_cropping;
+ top_cropping = pipe->pipe_settings.video.video_binary.info->sp.pipeline.top_cropping;
+ }
+ dvs_env = pipe->pipe_settings.video.video_binary.dvs_envelope;
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info) {
+ left_cropping += pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.left_cropping;
+ top_cropping += pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.top_cropping;
+ }
+ dvs_env.width += pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.width;
+ dvs_env.height += pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.height;
+ }
+ break;
+ default:
+ break;
+ }
+
+ *extra_row = top_cropping + dvs_env.height;
+ *extra_column = left_cropping + dvs_env.width;
+}
+
+void
+ia_css_get_crop_offsets (
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame)
+{
+ unsigned int row = 0;
+ unsigned int column = 0;
+ struct ia_css_resolution *input_res;
+ struct ia_css_resolution *effective_res;
+ unsigned int extra_row = 0, extra_col = 0;
+ unsigned int min_reqd_height, min_reqd_width;
+
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ assert(in_frame != NULL);
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p effective_wd = %u effective_ht = %u",
+ pipe, pipe->config.input_effective_res.width,
+ pipe->config.input_effective_res.height);
+
+ input_res = &pipe->stream->config.input_config.input_res;
+#ifndef ISP2401
+ effective_res = &pipe->stream->config.input_config.effective_res;
+#else
+ effective_res = &pipe->config.input_effective_res;
+#endif
+
+ get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
+
+ in_frame->raw_bayer_order = pipe->stream->config.input_config.bayer_order;
+
+ min_reqd_height = effective_res->height + extra_row;
+ min_reqd_width = effective_res->width + extra_col;
+
+ if (input_res->height > min_reqd_height) {
+ row = (input_res->height - min_reqd_height) / 2;
+ row &= ~0x1;
+ }
+ if (input_res->width > min_reqd_width) {
+ column = (input_res->width - min_reqd_width) / 2;
+ column &= ~0x1;
+ }
+
+ /*
+ * TODO:
+ * 1. Require the special support for RAW10 packed mode.
+ * 2. Require the special support for the online use cases.
+ */
+
+ /* ISP expects GRBG bayer order, we skip one line and/or one row
+ * to correct in case the input bayer order is different.
+ */
+ column += get_crop_columns_for_bayer_order(&pipe->stream->config);
+ row += get_crop_lines_for_bayer_order(&pipe->stream->config);
+
+ in_frame->crop_info.start_column = column;
+ in_frame->crop_info.start_line = row;
+
+ IA_CSS_LEAVE_PRIVATE("void start_col: %u start_row: %u", column, row);
+
+ return;
+}
+#endif
+
+static enum ia_css_err
+init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *frame, enum ia_css_frame_format format)
+{
+ struct ia_css_frame *in_frame;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(frame != NULL);
+ in_frame = frame;
+
+ in_frame->info.format = format;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (format == IA_CSS_FRAME_FORMAT_RAW)
+ in_frame->info.format = (pipe->stream->config.pack_raw_pixels) ?
+ IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
+#endif
+
+
+ in_frame->info.res.width = pipe->stream->config.input_config.input_res.width;
+ in_frame->info.res.height = pipe->stream->config.input_config.input_res.height;
+ in_frame->info.raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ ia_css_frame_info_set_width(&in_frame->info, pipe->stream->config.input_config.input_res.width, 0);
+ in_frame->contiguous = false;
+ in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
+ in_frame->dynamic_queue_id = queue_id;
+ in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ ia_css_get_crop_offsets(pipe, &in_frame->info);
+#endif
+ err = ia_css_frame_init_planes(in_frame);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "init_in_frameinfo_memory_defaults() bayer_order = %d:\n", in_frame->info.raw_bayer_order);
+
+ return err;
+}
+
+static enum ia_css_err
+init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
+ struct ia_css_frame *out_frame, unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+
+ assert(out_frame != NULL);
+
+ sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, idx);
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx;
+ err = ia_css_frame_init_planes(out_frame);
+
+ return err;
+}
+
+/* Create stages for video pipe */
+static enum ia_css_err create_host_video_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_binary *copy_binary, *video_binary,
+ *yuv_scaler_binary, *vf_pp_binary;
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *video_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline *me;
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool need_copy = false;
+ bool need_vf_pp = false;
+ bool need_yuv_pp = false;
+ unsigned num_output_pins;
+ bool need_in_frameinfo_memory = false;
+
+ unsigned int i, num_yuv_scaler;
+ bool *is_output_stage = NULL;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_pipe_util_create_output_frames(out_frames);
+ out_frame = &pipe->out_frame_struct;
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ me->dvs_frame_delay = pipe->dvs_frame_delay;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following: online or continuous
+ */
+ need_in_frameinfo_memory = !(pipe->stream->config.online || pipe->stream->config.continuous);
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+
+ /* Construct in_frame info (only in case we have dynamic input */
+ if (need_in_frameinfo_memory) {
+ in_frame = &pipe->in_frame_struct;
+ err = init_in_frameinfo_memory_defaults(pipe, in_frame, IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ out_frame->data = 0;
+ err = init_out_frameinfo_defaults(pipe, out_frame, 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ vf_frame = &pipe->vf_frame_struct;
+ vf_frame->data = 0;
+ err = init_vf_frameinfo_defaults(pipe, vf_frame, 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ copy_binary = &pipe->pipe_settings.video.copy_binary;
+ video_binary = &pipe->pipe_settings.video.video_binary;
+ vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary;
+ num_output_pins = video_binary->info->num_output_pins;
+
+ yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.video.is_output_stage;
+
+ need_copy = (copy_binary != NULL && copy_binary->info != NULL);
+ need_vf_pp = (vf_pp_binary != NULL && vf_pp_binary->info != NULL);
+ need_yuv_pp = (yuv_scaler_binary != NULL && yuv_scaler_binary->info != NULL);
+
+ if (need_copy) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+ } else if (pipe->stream->config.continuous) {
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When continous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+#else
+ in_frame = pipe->continuous_frames[0];
+#endif
+ }
+
+ ia_css_pipe_util_set_output_frames(out_frames, 0, need_yuv_pp ? NULL : out_frame);
+
+ /* when the video binary supports a second output pin,
+ it can directly produce the vf_frame. */
+ if(need_vf_pp) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
+ out_frames, in_frame, vf_frame);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &video_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ /* If we use copy iso video, the input must be yuv iso raw */
+ if(video_stage) {
+ video_stage->args.copy_vf =
+ video_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ video_stage->args.copy_output = video_stage->args.copy_vf;
+ }
+
+ /* when the video binary supports only 1 output pin, vf_pp is needed to
+ produce the vf_frame.*/
+ if (need_vf_pp && video_stage) {
+ in_frame = video_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ if (video_stage) {
+ int frm;
+#ifndef ISP2401
+ for (frm = 0; frm < NUM_VIDEO_TNR_FRAMES; frm++) {
+#else
+ for (frm = 0; frm < NUM_TNR_FRAMES; frm++) {
+#endif
+ video_stage->args.tnr_frames[frm] =
+ pipe->pipe_settings.video.tnr_frames[frm];
+ }
+ for (frm = 0; frm < MAX_NUM_VIDEO_DELAY_FRAMES; frm++) {
+ video_stage->args.delay_frames[frm] =
+ pipe->pipe_settings.video.delay_frames[frm];
+ }
+ }
+
+ /* Append Extension on Video out, if enabled */
+ if (!need_vf_pp && video_stage && pipe->config.acc_extension &&
+ (pipe->config.acc_extension->info.isp.type == IA_CSS_ACC_OUTPUT))
+ {
+ struct ia_css_frame *out = NULL;
+ struct ia_css_frame *in = NULL;
+
+ if ((pipe->config.acc_extension->info.isp.sp.enable.output) &&
+ (pipe->config.acc_extension->info.isp.sp.enable.in_frame) &&
+ (pipe->config.acc_extension->info.isp.sp.enable.out_frame)) {
+
+ /* In/Out Frame mapping to support output frame extension.*/
+ out = video_stage->args.out_frame[0];
+ err = ia_css_frame_allocate_from_info(&in, &(pipe->output_info[0]));
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ video_stage->args.out_frame[0] = in;
+ }
+
+ err = add_firmwares( me, video_binary, pipe->output_stage,
+ last_output_firmware(pipe->output_stage),
+ IA_CSS_BINARY_MODE_VIDEO,
+ in, out, NULL, &video_stage, NULL);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ if (need_yuv_pp && video_stage) {
+ struct ia_css_frame *tmp_in_frame = video_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ if (is_output_stage[i] == true) {
+ tmp_out_frame = out_frame;
+ } else {
+ tmp_out_frame = NULL;
+ }
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ if (yuv_scaler_stage)
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+create_host_acc_pipeline(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->stream == NULL)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe->pipeline.num_execs = pipe->config.acc_num_execs;
+ /* Reset pipe_qos_config to default disable all QOS extension stages */
+ if (pipe->config.acc_extension)
+ pipe->pipeline.pipe_qos_config = 0;
+
+{
+ const struct ia_css_fw_info *fw = pipe->vf_stage;
+ for (i = 0; fw; fw = fw->next){
+ err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+}
+
+ for (i=0; i<pipe->config.num_acc_stages; i++) {
+ struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
+ err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Create stages for preview */
+static enum ia_css_err
+create_host_preview_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline_stage *copy_stage = NULL;
+ struct ia_css_pipeline_stage *preview_stage = NULL;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_pipeline *me = NULL;
+ struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
+ struct ia_css_frame *in_frame = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+
+ ia_css_pipe_util_create_output_frames(out_frames);
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Preview
+ * - Buffered Sensor Mode Online Preview
+ * - Direct Sensor Mode Continuous Preview
+ * - Buffered Sensor Mode Continous Preview
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ if (need_in_frameinfo_memory) {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame, IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ out_frame = &me->out_frame[0];
+
+ copy_binary = &pipe->pipe_settings.preview.copy_binary;
+ preview_binary = &pipe->pipe_settings.preview.preview_binary;
+ if (pipe->pipe_settings.preview.vf_pp_binary.info)
+ vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
+
+ if (pipe->pipe_settings.preview.copy_binary.info) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ in_frame = me->stages->args.out_frame[0];
+#ifndef ISP2401
+ } else {
+#else
+ } else if (pipe->stream->config.continuous) {
+#endif
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When continuous is enabled, configure in_frame with the
+ * last pipe, which is the copy pipe.
+ */
+ if (continuous || !online){
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ }
+#else
+ in_frame = pipe->continuous_frames[0];
+#endif
+ }
+
+ if (vf_pp_binary) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
+ out_frames, in_frame, NULL);
+ }
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &preview_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ /* If we use copy iso preview, the input must be yuv iso raw */
+ preview_stage->args.copy_vf =
+ preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
+ preview_stage->args.copy_output = !preview_stage->args.copy_vf;
+ if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame) {
+ /* in case of copy, use the vf frame as output frame */
+ preview_stage->args.out_vf_frame =
+ preview_stage->args.out_frame[0];
+ }
+ if (vf_pp_binary) {
+ if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)
+ in_frame = preview_stage->args.out_vf_frame;
+ else
+ in_frame = preview_stage->args.out_frame[0];
+ err = add_vf_pp_stage(pipe, in_frame, out_frame, vf_pp_binary,
+ &vf_pp_stage);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+
+ pipe->pipeline.acquire_isp_each_stage = false;
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void send_raw_frames(struct ia_css_pipe *pipe)
+{
+ if (pipe->stream->config.continuous) {
+ unsigned int i;
+
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.init_num_cont_raw_buf, true);
+ sh_css_update_host2sp_cont_num_raw_frames
+ (pipe->stream->config.target_num_cont_raw_buf, false);
+
+ /* Hand-over all the SP-internal buffers */
+ for (i = 0; i < pipe->stream->config.init_num_cont_raw_buf; i++) {
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+ }
+ }
+
+ return;
+}
+
+static enum ia_css_err
+preview_start(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me ;
+ struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ struct ia_css_pipe *acc_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode preview_pipe_input_mode;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = &pipe->pipeline;
+
+ preview_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ acc_pipe = pipe->pipe_settings.preview.acc_pipe;
+
+ copy_binary = &pipe->pipe_settings.preview.copy_binary;
+ preview_binary = &pipe->pipe_settings.preview.preview_binary;
+ if (pipe->pipe_settings.preview.vf_pp_binary.info)
+ vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
+
+ sh_css_metrics_start_frame();
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* multi stream video needs mipi buffers */
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+#endif
+ send_raw_frames(pipe);
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous) {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+#ifndef ISP2401
+ &pipe->stream->info.metadata_info
+#else
+ &pipe->stream->info.metadata_info,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , pipe->stream->config.source.port.port
+#else
+ pipe->stream->config.source.port.port,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &pipe->config.internal_frame_origin_bqs_on_sctbl,
+ pipe->stream->isp_params_configs);
+#endif
+
+ /* make the preview pipe start with mem mode input, copy handles
+ the actual mode */
+ preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt) {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+#ifndef ISP2401
+ &pipe->stream->info.metadata_info
+#else
+ &pipe->stream->info.metadata_info,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , (mipi_port_ID_t)0
+#else
+ (mipi_port_ID_t)0,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &capture_pipe->config.internal_frame_origin_bqs_on_sctbl,
+ capture_pipe->stream->isp_params_configs);
+#endif
+ }
+
+ if (acc_pipe) {
+ sh_css_sp_init_pipeline(&acc_pipe->pipeline,
+ IA_CSS_PIPE_ID_ACC,
+ (uint8_t) ia_css_pipe_get_pipe_num(acc_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2,
+ false, /* continuous */
+ false, /* offline */
+ pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ NULL,
+#ifndef ISP2401
+ NULL
+#else
+ NULL,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , (mipi_port_ID_t) 0
+#else
+ (mipi_port_ID_t) 0,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &pipe->config.internal_frame_origin_bqs_on_sctbl,
+ pipe->stream->isp_params_configs);
+#endif
+ }
+
+ start_pipe(pipe, copy_ovrd, preview_pipe_input_mode);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ERR:
+#endif
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
+ const struct ia_css_buffer *buffer)
+{
+ enum ia_css_err return_err = IA_CSS_SUCCESS;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ struct ia_css_rmgr_vbuf_handle p_vbuf;
+ struct ia_css_rmgr_vbuf_handle *h_vbuf;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((pipe == NULL) || (buffer == NULL)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ buf_type = buffer->type;
+ /* following code will be enabled when IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME
+ is removed */
+#if 0
+ if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
+ bool found_pipe = false;
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if ((buffer->data.frame->info.res.width == pipe->output_info[i].res.width) &&
+ (buffer->data.frame->info.res.height == pipe->output_info[i].res.height)) {
+ buf_type += i;
+ found_pipe = true;
+ break;
+ }
+ }
+ if (!found_pipe)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
+ bool found_pipe = false;
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if ((buffer->data.frame->info.res.width == pipe->vf_output_info[i].res.width) &&
+ (buffer->data.frame->info.res.height == pipe->vf_output_info[i].res.height)) {
+ buf_type += i;
+ found_pipe = true;
+ break;
+ }
+ }
+ if (!found_pipe)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+#endif
+ pipe_id = pipe->mode;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+
+ assert(pipe_id < IA_CSS_PIPE_ID_NUM);
+ assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
+ if ((buf_type == IA_CSS_BUFFER_TYPE_INVALID) ||
+ (buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE) ||
+ (pipe_id >= IA_CSS_PIPE_ID_NUM)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+
+ pipeline = &pipe->pipeline;
+
+ assert(pipeline != NULL ||
+ pipe_id == IA_CSS_PIPE_ID_COPY ||
+ pipe_id == IA_CSS_PIPE_ID_ACC);
+
+ assert(sizeof(NULL) <= sizeof(ddr_buffer.kernel_ptr));
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL);
+ ddr_buffer.cookie_ptr = buffer->driver_cookie;
+ ddr_buffer.timing_data = buffer->timing_data;
+
+ if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) {
+ if (buffer->data.stats_3a == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a);
+ ddr_buffer.payload.s3a = *buffer->data.stats_3a;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) {
+ if (buffer->data.stats_dvs == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs);
+ ddr_buffer.payload.dis = *buffer->data.stats_dvs;
+ } else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
+ if (buffer->data.metadata == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata);
+ ddr_buffer.payload.metadata = *buffer->data.metadata;
+ } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)) {
+ if (buffer->data.frame == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.frame);
+ ddr_buffer.payload.frame.frame_data = buffer->data.frame->data;
+ ddr_buffer.payload.frame.flashed = 0;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+ return_err = set_config_on_frame_enqueue(
+ &buffer->data.frame->info,
+ &ddr_buffer.payload.frame);
+ if (IA_CSS_SUCCESS != return_err) {
+ IA_CSS_LEAVE_ERR(return_err);
+ return return_err;
+ }
+#endif
+ }
+
+ /* start of test for using rmgr for acq/rel memory */
+ p_vbuf.vptr = 0;
+ p_vbuf.count = 0;
+ p_vbuf.size = sizeof(struct sh_css_hmm_buffer);
+ h_vbuf = &p_vbuf;
+ /* TODO: change next to correct pool for optimization */
+ ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf);
+
+ assert(h_vbuf != NULL);
+ assert(h_vbuf->vptr != 0x0);
+
+ if ((h_vbuf == NULL) || (h_vbuf->vptr == 0x0)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ mmgr_store(h_vbuf->vptr,
+ (void *)(&ddr_buffer),
+ sizeof(struct sh_css_hmm_buffer));
+ if ((buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS)
+ || (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS)
+ || (buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS)) {
+ if (pipeline == NULL) {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_LOG("pipeline is empty!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ /* The SP will read the params
+ after it got empty 3a and dis */
+ if (STATS_ENABLED(stage)) {
+ /* there is a stage that needs it */
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+ }
+ }
+ } else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ queue_id,
+ (uint32_t)h_vbuf->vptr);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if ((return_err == IA_CSS_SUCCESS) && (IA_CSS_BUFFER_TYPE_OUTPUT_FRAME == buf_type)) {
+ IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
+ ddr_buffer.payload.frame.frame_data,
+ queue_id, thread_id);
+ }
+#endif
+
+ }
+
+ if (return_err == IA_CSS_SUCCESS) {
+#ifndef ISP2401
+ bool found_record = false;
+ found_record = sh_css_hmm_buffer_record_acquire(
+#else
+ struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
+
+ hmm_buffer_record = sh_css_hmm_buffer_record_acquire(
+#endif
+ h_vbuf, buf_type,
+ HOST_ADDRESS(ddr_buffer.kernel_ptr));
+#ifndef ISP2401
+ if (found_record == true) {
+#else
+ if (hmm_buffer_record) {
+#endif
+ IA_CSS_LOG("send vbuf=0x%x", h_vbuf);
+ } else {
+ return_err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n");
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (return_err == IA_CSS_SUCCESS) {
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ return_err = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ queue_id,
+ 0);
+ } else {
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
+ IA_CSS_ERROR("buffer not enqueued");
+ }
+
+ IA_CSS_LEAVE("return value = %d", return_err);
+
+ return return_err;
+}
+
+/*
+ * TODO: Free up the hmm memory space.
+ */
+enum ia_css_err
+ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
+ struct ia_css_buffer *buffer)
+{
+ enum ia_css_err return_err;
+ enum sh_css_queue_id queue_id;
+ hrt_vaddress ddr_buffer_addr = (hrt_vaddress)0;
+ struct sh_css_hmm_buffer ddr_buffer;
+ enum ia_css_buffer_type buf_type;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ hrt_address kernel_ptr = 0;
+ bool ret_err;
+
+ IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
+
+ if ((pipe == NULL) || (buffer == NULL)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+
+ buf_type = buffer->type;
+
+ IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
+
+ ddr_buffer.kernel_ptr = 0;
+
+ ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
+ if (!ret_err) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ return_err = ia_css_bufq_dequeue_buffer(queue_id,
+ (uint32_t *)&ddr_buffer_addr);
+
+ if (return_err == IA_CSS_SUCCESS) {
+ struct ia_css_frame *frame;
+ struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
+
+ IA_CSS_LOG("receive vbuf=%x", (int)ddr_buffer_addr);
+
+ /* Validate the ddr_buffer_addr and buf_type */
+ hmm_buffer_record = sh_css_hmm_buffer_record_validate(
+ ddr_buffer_addr, buf_type);
+ if (hmm_buffer_record != NULL) {
+ /* valid hmm_buffer_record found. Save the kernel_ptr
+ * for validation after performing mmgr_load. The
+ * vbuf handle and buffer_record can be released.
+ */
+ kernel_ptr = hmm_buffer_record->kernel_ptr;
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(hmm_buffer_record);
+ } else {
+ IA_CSS_ERROR("hmm_buffer_record not found (0x%p) buf_type(%d)",
+ ddr_buffer_addr, buf_type);
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ mmgr_load(ddr_buffer_addr,
+ &ddr_buffer,
+ sizeof(struct sh_css_hmm_buffer));
+
+ /* if the kernel_ptr is 0 or an invalid, return an error.
+ * do not access the buffer via the kernal_ptr.
+ */
+ if ((ddr_buffer.kernel_ptr == 0) ||
+ (kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_ERROR("kernel_ptr invalid");
+ IA_CSS_ERROR("expected: (0x%p)", kernel_ptr);
+ IA_CSS_ERROR("actual: (0x%p)", HOST_ADDRESS(ddr_buffer.kernel_ptr));
+ IA_CSS_ERROR("buf_type: %d\n", buf_type);
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ if (ddr_buffer.kernel_ptr != 0) {
+ /* buffer->exp_id : all instances to be removed later once the driver change
+ * is completed. See patch #5758 for reference */
+ buffer->exp_id = 0;
+ buffer->driver_cookie = ddr_buffer.cookie_ptr;
+ buffer->timing_data = ddr_buffer.timing_data;
+
+ if ((buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) ||
+ (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)) {
+ buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick;
+ }
+
+ switch (buf_type) {
+ case IA_CSS_BUFFER_TYPE_INPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
+ if ((pipe) && (pipe->stop_requested == true))
+ {
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* free mipi frames only for old input system
+ * for 2401 it is done in ia_css_stream_destroy call
+ */
+ return_err = free_mipi_frames(pipe);
+ if (return_err != IA_CSS_SUCCESS) {
+ IA_CSS_LOG("free_mipi_frames() failed");
+ IA_CSS_LEAVE_ERR(return_err);
+ return return_err;
+ }
+#endif
+ pipe->stop_requested = false;
+ }
+ case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
+ case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
+ frame = (struct ia_css_frame*)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->data.frame = frame;
+ buffer->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->exp_id = ddr_buffer.payload.frame.exp_id;
+ frame->isp_config_id = ddr_buffer.payload.frame.isp_parameters_id;
+ if (ddr_buffer.payload.frame.flashed == 1)
+ frame->flash_state =
+ IA_CSS_FRAME_FLASH_STATE_PARTIAL;
+ if (ddr_buffer.payload.frame.flashed == 2)
+ frame->flash_state =
+ IA_CSS_FRAME_FLASH_STATE_FULL;
+ frame->valid = pipe->num_invalid_frames == 0;
+ if (!frame->valid)
+ pipe->num_invalid_frames--;
+
+ if (frame->info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ frame->planes.binary.size = frame->data_bytes;
+#else
+ frame->planes.binary.size =
+ sh_css_sp_get_binary_copy_size();
+#endif
+ }
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (IA_CSS_BUFFER_TYPE_OUTPUT_FRAME == buf_type) {
+ IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
+ frame->data, frame->isp_config_id, thread_id);
+ }
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
+ buf_type, buffer->data.frame->data);
+
+ break;
+ case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
+ buffer->data.stats_3a =
+ (struct ia_css_isp_3a_statistics*)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->exp_id = ddr_buffer.payload.s3a.exp_id;
+ buffer->data.stats_3a->isp_config_id = ddr_buffer.payload.s3a.isp_config_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
+ buffer->data.stats_dvs =
+ (struct ia_css_isp_dvs_statistics*)
+ HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.dis.exp_id;
+ buffer->data.stats_dvs->exp_id = ddr_buffer.payload.dis.exp_id;
+ break;
+ case IA_CSS_BUFFER_TYPE_LACE_STATISTICS:
+ break;
+ case IA_CSS_BUFFER_TYPE_METADATA:
+ buffer->data.metadata =
+ (struct ia_css_metadata*)HOST_ADDRESS(ddr_buffer.kernel_ptr);
+ buffer->exp_id = ddr_buffer.payload.metadata.exp_id;
+ buffer->data.metadata->exp_id = ddr_buffer.payload.metadata.exp_id;
+ break;
+ default:
+ return_err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Tell the SP which queues are not full,
+ * by sending the software event.
+ */
+ if (return_err == IA_CSS_SUCCESS){
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LOG("SP is not running!");
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ queue_id,
+ 0);
+ }
+ IA_CSS_LEAVE("buffer=%p", buffer);
+
+ return return_err;
+}
+
+/*
+ * Cannot Move this to event module as it is of ia_css_event_type which is declared in ia_css.h
+ * TODO: modify and move it if possible.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
+ IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /**< Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /**< Second output frame ready. */
+ IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /**< Viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /**< Second viewfinder Output frame ready. */
+ IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /**< Indication that 3A statistics are available. */
+ IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /**< Indication that DIS statistics are available. */
+ IA_CSS_EVENT_TYPE_PIPELINE_DONE, /**< Pipeline Done event, sent after last pipeline stage. */
+ IA_CSS_EVENT_TYPE_FRAME_TAGGED, /**< Frame tagged. */
+ IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /**< Input frame ready. */
+ IA_CSS_EVENT_TYPE_METADATA_DONE, /**< Metadata ready. */
+ IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /**< Indication that LACE statistics are available. */
+ IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /**< Extension stage executed. */
+ IA_CSS_EVENT_TYPE_TIMER, /**< Timing measurement data. */
+ IA_CSS_EVENT_TYPE_PORT_EOF, /**< End Of Frame event, sent when in buffered sensor mode. */
+ IA_CSS_EVENT_TYPE_FW_WARNING, /**< Performance warning encountered by FW */
+ IA_CSS_EVENT_TYPE_FW_ASSERT, /**< Assertion hit by FW */
+ 0, /** error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
+};
+
+enum ia_css_err
+ia_css_dequeue_event(struct ia_css_event *event)
+{
+ return ia_css_dequeue_psys_event(event);
+}
+
+enum ia_css_err
+ia_css_dequeue_psys_event(struct ia_css_event *event)
+{
+ enum ia_css_pipe_id pipe_id = 0;
+ uint8_t payload[4] = {0,0,0,0};
+ enum ia_css_err ret_err;
+
+ /*TODO:
+ * a) use generic decoding function , same as the one used by sp.
+ * b) group decode and dequeue into eventQueue module
+ *
+ * We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling. */
+ if (event == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* dequeue the event (if any) from the psys event queue */
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err != IA_CSS_SUCCESS)
+ return ret_err;
+
+ IA_CSS_LOG("event dequeued from psys event queue");
+
+ /* Tell the SP that we dequeued an event from the event queue. */
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+
+ /* Events are decoded into 4 bytes of payload, the first byte
+ * contains the sp event type. This is converted to a host enum.
+ * TODO: can this enum conversion be eliminated */
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* Some sane default values since not all events use all fields. */
+ event->pipe = NULL;
+ event->port = IA_CSS_CSI2_PORT0;
+ event->exp_id = 0;
+ event->fw_warning = IA_CSS_FW_WARNING_NONE;
+ event->fw_handle = 0;
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
+ /* timer event ??? get the 2nd event and decode the data into the event struct */
+ uint32_t tmp_data;
+ /* 1st event: LSB 16-bit timer data and code */
+ event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_code = payload[2];
+ payload[0] = payload[1] = payload[2] = payload[3] = 0;
+ ret_err = ia_css_bufq_dequeue_psys_event(payload);
+ if (ret_err != IA_CSS_SUCCESS) {
+ /* no 2nd event ??? an error */
+ /* Putting IA_CSS_ERROR is resulting in failures in
+ * Merrifield smoke testing */
+ IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n");
+ return ret_err;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
+ event->type = convert_event_sp_to_host_domain[payload[0]];
+ /* It's a timer */
+ if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
+ /* 2nd event data: MSB 16-bit timer and subcode */
+ tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
+ event->timer_data |= (tmp_data << 16);
+ event->timer_subcode = payload[2];
+ }
+ /* It's a non timer event. So clear first half of the timer event data.
+ * If the second part of the TIMER event is not recieved, we discard
+ * the first half of the timer data and process the non timer event without
+ * affecting the flow. So the non timer event falls through
+ * the code. */
+ else {
+ event->timer_data = 0;
+ event->timer_code = 0;
+ event->timer_subcode = 0;
+ IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded");
+ }
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF) {
+ event->port = (enum ia_css_csi2_port)payload[1];
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING) {
+ event->fw_warning = (enum ia_css_fw_warning)payload[1];
+ /* exp_id is only available in these warning types */
+ if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED ||
+ event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED)
+ event->exp_id = payload[3];
+ } else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT) {
+ event->fw_assert_module_id = payload[1]; /* module */
+ event->fw_assert_line_no = (payload[2] << 8) + payload[3];
+ /* payload[2] is line_no>>8, payload[3] is line_no&0xff */
+ } else if (event->type != IA_CSS_EVENT_TYPE_TIMER) {
+ /* pipe related events.
+ * payload[1] contains the pipe_num,
+ * payload[2] contains the pipe_id. These are different. */
+ event->pipe = find_pipe_by_num(payload[1]);
+ pipe_id = (enum ia_css_pipe_id)payload[2];
+ /* Check to see if pipe still exists */
+ if (!event->pipe)
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+
+ if (event->type == IA_CSS_EVENT_TYPE_FRAME_TAGGED) {
+ /* find the capture pipe that goes with this */
+ int i, n;
+ n = event->pipe->stream->num_pipes;
+ for (i = 0; i < n; i++) {
+ struct ia_css_pipe *p =
+ event->pipe->stream->pipes[i];
+ if (p->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
+ event->pipe = p;
+ break;
+ }
+ }
+ event->exp_id = payload[3];
+ }
+ if (event->type == IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) {
+ /* payload[3] contains the acc fw handle. */
+ uint32_t stage_num = (uint32_t)payload[3];
+ ret_err = ia_css_pipeline_get_fw_from_stage(
+ &(event->pipe->pipeline),
+ stage_num,
+ &(event->fw_handle));
+ if (ret_err != IA_CSS_SUCCESS) {
+ IA_CSS_ERROR("Invalid stage num received for ACC event. stage_num:%u",
+ stage_num);
+ return ret_err;
+ }
+ }
+ }
+
+ if (event->pipe)
+ IA_CSS_LEAVE("event_id=%d, pipe_id=%d", event->type, pipe_id);
+ else
+ IA_CSS_LEAVE("event_id=%d", event->type);
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_dequeue_isys_event(struct ia_css_event *event)
+{
+ uint8_t payload[4] = {0, 0, 0, 0};
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ /* We skip the IA_CSS_ENTER logging call
+ * to avoid flooding the logs when the host application
+ * uses polling. */
+ if (event == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ err = ia_css_bufq_dequeue_isys_event(payload);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ IA_CSS_LOG("event dequeued from isys event queue");
+
+ /* Update SP state to indicate that element was dequeued. */
+ ia_css_bufq_enqueue_isys_event(IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED);
+
+ /* Fill return struct with appropriate info */
+ event->type = IA_CSS_EVENT_TYPE_PORT_EOF;
+ /* EOF events are associated with a CSI port, not with a pipe */
+ event->pipe = NULL;
+ event->port = payload[1];
+ event->exp_id = payload[3];
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static void
+acc_start(struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+
+ start_pipe(pipe, SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
+ pipe->stream->config.mode);
+}
+
+static enum ia_css_err
+sh_css_pipe_start(struct ia_css_stream *stream)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ struct ia_css_pipe *pipe;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ pipe = stream->last_pipe;
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe_id = pipe->mode;
+
+ if(stream->started == true) {
+ IA_CSS_WARNING("Cannot start stream that is already started");
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ pipe->stop_requested = false;
+
+ switch (pipe_id) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = preview_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = video_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = capture_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = yuvpp_start(pipe);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ acc_start(pipe);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* DH regular multi pipe - not continuous mode: start the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+ for (i = 1; i < stream->num_pipes && IA_CSS_SUCCESS == err ; i++) {
+ switch (stream->pipes[i]->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ stream->pipes[i]->stop_requested = false;
+ err = preview_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ stream->pipes[i]->stop_requested = false;
+ err = video_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ stream->pipes[i]->stop_requested = false;
+ err = capture_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ stream->pipes[i]->stop_requested = false;
+ err = yuvpp_start(stream->pipes[i]);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ stream->pipes[i]->stop_requested = false;
+ acc_start(stream->pipes[i]);
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ }
+ }
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* Force ISP parameter calculation after a mode change
+ * Acceleration API examples pass NULL for stream but they
+ * don't use ISP parameters anyway. So this should be okay.
+ * The SP binary (jpeg) copy does not use any parameters.
+ */
+ if (!copy_on_sp(pipe)) {
+ sh_css_invalidate_params(stream);
+ err = sh_css_param_update_isp_params(pipe,
+ stream->isp_params_configs, true, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_debug_pipe_graph_dump_epilogue();
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ /* SP is not running. The queues are not valid */
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+
+ /* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */
+ if (!stream->config.continuous) {
+ int i;
+ for (i = 1; i < stream->num_pipes; i++) {
+ ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(stream->pipes[i]),
+ &thread_id);
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ }
+
+ /* in case of continuous capture mode, we also start capture thread and copy thread*/
+ if (pipe->stream->config.continuous) {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = pipe->pipe_settings.preview.copy_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+
+ if (copy_pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(copy_pipe), &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+ if (pipe->stream->cont_capt) {
+ struct ia_css_pipe *capture_pipe = NULL;
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ capture_pipe = pipe->pipe_settings.preview.capture_pipe;
+ else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ if (capture_pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t)thread_id, 0, 0);
+ }
+
+ /* in case of PREVIEW mode, check whether QOS acc_pipe is available, then start the qos pipe */
+ if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
+ struct ia_css_pipe *acc_pipe = NULL;
+ acc_pipe = pipe->pipe_settings.preview.acc_pipe;
+
+ if (acc_pipe){
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(acc_pipe), &thread_id);
+ /* by the time we reach here q is initialized and handle is available.*/
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_START_STREAM,
+ (uint8_t) thread_id, 0, 0);
+ }
+ }
+
+ stream->started = true;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#ifndef ISP2401
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_enable_cont_capt() enter: enable=%d\n", enable);
+//my_css.cont_capt = enable;
+ my_css.stop_copy_preview = stop_copy_preview;
+}
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num)
+#else
+/**
+ * @brief Stop all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance.
+ *
+ * Refer to "Local prototypes" for more info.
+ */
+static enum ia_css_err
+sh_css_pipes_stop(struct ia_css_stream *stream)
+#endif
+{
+#ifndef ISP2401
+ struct ia_css_pipe *pipe;
+ bool continuous;
+#else
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *main_pipe;
+ enum ia_css_pipe_id main_pipe_id;
+ int i;
+#endif
+
+#ifndef ISP2401
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num);
+#else
+ assert(stream != NULL);
+ if (stream == NULL) {
+ IA_CSS_LOG("stream does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+#endif
+
+#ifndef ISP2401
+ pipe = find_pipe_by_num(pipe_num);
+ continuous = pipe && pipe->stream->config.continuous;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() leave: enable=%d\n",
+ continuous);
+ return continuous;
+}
+#else
+ main_pipe = stream->last_pipe;
+ assert(main_pipe != NULL);
+ if (main_pipe == NULL) {
+ IA_CSS_LOG("main_pipe does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+#endif
+
+#ifndef ISP2401
+enum ia_css_err
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
+{
+ if (buffer_depth == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = NUM_CONTINUOUS_FRAMES;
+ return IA_CSS_SUCCESS;
+}
+#else
+ main_pipe_id = main_pipe->mode;
+ IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
+#endif
+
+#ifndef ISP2401
+enum ia_css_err
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n",buffer_depth);
+ (void)stream;
+ if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ /* ok, value allowed */
+ stream->config.target_num_cont_raw_buf = buffer_depth;
+ /* TODO: check what to regarding initialization */
+ return IA_CSS_SUCCESS;
+}
+#else
+ /**
+ * Stop all "ia_css_pipe" instances in this target
+ * "ia_css_stream" instance.
+ */
+ for (i = 0; i < stream->num_pipes; i++) {
+ /* send the "stop" request to the "ia_css_pipe" instance */
+ IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
+ stream->pipes[i]->pipeline.pipe_id);
+ err = ia_css_pipeline_request_stop(&stream->pipes[i]->pipeline);
+#endif
+
+#ifndef ISP2401
+enum ia_css_err
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
+{
+ if (buffer_depth == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
+#else
+ /*
+ * Exit this loop if "ia_css_pipeline_request_stop()"
+ * returns the error code.
+ *
+ * The error code would be generated in the following
+ * two cases:
+ * (1) The Scalar Processor has already been stopped.
+ * (2) The "Host->SP" event queue is full.
+ *
+ * As the convention of using CSS API 2.0/2.1, such CSS
+ * error code would be propogated from the CSS-internal
+ * API returned value to the CSS API returned value. Then
+ * the CSS driver should capture these error code and
+ * handle it in the driver exception handling mechanism.
+ */
+ if (err != IA_CSS_SUCCESS) {
+ goto ERR;
+ }
+ }
+
+ /**
+ * In the CSS firmware use scenario "Continuous Preview"
+ * as well as "Continuous Video", the "ia_css_pipe" instance
+ * "Copy Pipe" is activated. This "Copy Pipe" is private to
+ * the CSS firmware so that it is not listed in the target
+ * "ia_css_stream" instance.
+ *
+ * We need to stop this "Copy Pipe", as well.
+ */
+ if (main_pipe->stream->config.continuous) {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ /* get the reference to "Copy Pipe" */
+ if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+
+ /* return the error code if "Copy Pipe" does NOT exist */
+ assert(copy_pipe != NULL);
+ if (copy_pipe == NULL) {
+ IA_CSS_LOG("Copy Pipe does NOT exist!");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+
+ /* send the "stop" request to "Copy Pipe" */
+ IA_CSS_LOG("Send the stop-request to the pipe: pipe_id=%d",
+ copy_pipe->pipeline.pipe_id);
+ err = ia_css_pipeline_request_stop(&copy_pipe->pipeline);
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/**
+ * @brief Check if all "ia_css_pipe" instances in the target
+ * "ia_css_stream" instance have stopped.
+ *
+ * Refer to "Local prototypes" for more info.
+ */
+static bool
+sh_css_pipes_have_stopped(struct ia_css_stream *stream)
+{
+ bool rval = true;
+
+ struct ia_css_pipe *main_pipe;
+ enum ia_css_pipe_id main_pipe_id;
+
+ int i;
+
+ assert(stream != NULL);
+ if (stream == NULL) {
+ IA_CSS_LOG("stream does NOT exist!");
+ rval = false;
+ goto RET;
+ }
+
+ main_pipe = stream->last_pipe;
+ assert(main_pipe != NULL);
+
+ if (main_pipe == NULL) {
+ IA_CSS_LOG("main_pipe does NOT exist!");
+ rval = false;
+ goto RET;
+ }
+
+ main_pipe_id = main_pipe->mode;
+ IA_CSS_ENTER_PRIVATE("main_pipe_id=%d", main_pipe_id);
+
+ /**
+ * Check if every "ia_css_pipe" instance in this target
+ * "ia_css_stream" instance has stopped.
+ */
+ for (i = 0; i < stream->num_pipes; i++) {
+ rval = rval && ia_css_pipeline_has_stopped(&stream->pipes[i]->pipeline);
+ IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
+ stream->pipes[i]->pipeline.pipe_id,
+ rval);
+ }
+
+ /**
+ * In the CSS firmware use scenario "Continuous Preview"
+ * as well as "Continuous Video", the "ia_css_pipe" instance
+ * "Copy Pipe" is activated. This "Copy Pipe" is private to
+ * the CSS firmware so that it is not listed in the target
+ * "ia_css_stream" instance.
+ *
+ * We need to check if this "Copy Pipe" has stopped, as well.
+ */
+ if (main_pipe->stream->config.continuous) {
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ /* get the reference to "Copy Pipe" */
+ if (main_pipe_id == IA_CSS_PIPE_ID_PREVIEW)
+ copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
+ else if (main_pipe_id == IA_CSS_PIPE_ID_VIDEO)
+ copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
+
+ /* return if "Copy Pipe" does NOT exist */
+ assert(copy_pipe != NULL);
+ if (copy_pipe == NULL) {
+ IA_CSS_LOG("Copy Pipe does NOT exist!");
+
+ rval = false;
+ goto RET;
+ }
+
+ /* check if "Copy Pipe" has stopped or not */
+ rval = rval && ia_css_pipeline_has_stopped(&copy_pipe->pipeline);
+ IA_CSS_LOG("Pipe has stopped: pipe_id=%d, stopped=%d",
+ copy_pipe->pipeline.pipe_id,
+ rval);
+ }
+
+RET:
+ IA_CSS_LEAVE_PRIVATE("rval=%d", rval);
+ return rval;
+}
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num)
+{
+ struct ia_css_pipe *pipe;
+ bool continuous;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num);
+
+ pipe = find_pipe_by_num(pipe_num);
+ continuous = pipe && pipe->stream->config.continuous;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_continuous_is_enabled() leave: enable=%d\n",
+ continuous);
+ return continuous;
+}
+
+enum ia_css_err
+ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
+{
+ if (buffer_depth == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
+ (void)stream;
+ *buffer_depth = NUM_CONTINUOUS_FRAMES;
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n",buffer_depth);
+ (void)stream;
+ if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ /* ok, value allowed */
+ stream->config.target_num_cont_raw_buf = buffer_depth;
+ /* TODO: check what to regarding initialization */
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_stream_get_buffer_depth(struct ia_css_stream *stream, int *buffer_depth)
+{
+ if (buffer_depth == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
+#endif
+ (void)stream;
+ *buffer_depth = stream->config.target_num_cont_raw_buf;
+ return IA_CSS_SUCCESS;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
+{
+ OP___assert(port < N_CSI_PORTS);
+ OP___assert(idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_get_mipi_sizes_for_check(port %d, idx %d): %d\n",
+ port, idx, my_css.mipi_sizes_for_check[port][idx]);
+ return my_css.mipi_sizes_for_check[port][idx];
+}
+#endif
+
+static enum ia_css_err sh_css_pipe_configure_output(
+ struct ia_css_pipe *pipe,
+ unsigned int width,
+ unsigned int height,
+ unsigned int padded_width,
+ enum ia_css_frame_format format,
+ unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, paddaed width = %d, format = %d, idx = %d",
+ pipe, width, height, padded_width, format, idx);
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ err = ia_css_util_check_res(width, height);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->output_info[idx].res.width != width ||
+ pipe->output_info[idx].res.height != height ||
+ pipe->output_info[idx].format != format)
+ {
+ ia_css_frame_info_init(
+ &pipe->output_info[idx],
+ width,
+ height,
+ format,
+ padded_width);
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
+#ifndef ISP2401
+ struct ia_css_shading_info *info)
+#else
+ struct ia_css_shading_info *shading_info,
+ struct ia_css_pipe_config *pipe_config)
+#endif
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe != NULL);
+#ifndef ISP2401
+ assert(info != NULL);
+#else
+ assert(shading_info != NULL);
+ assert(pipe_config != NULL);
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_shading_info() enter:\n");
+
+ binary = ia_css_pipe_get_shading_correction_binary(pipe);
+
+ if (binary) {
+ err = ia_css_binary_get_shading_info(binary,
+ IA_CSS_SHADING_CORRECTION_TYPE_1,
+ pipe->required_bds_factor,
+ (const struct ia_css_stream_config *)&pipe->stream->config,
+#ifndef ISP2401
+ info);
+#else
+ shading_info, pipe_config);
+#endif
+ /* Other function calls can be added here when other shading correction types will be added
+ * in the future.
+ */
+ } else {
+ /* When the pipe does not have a binary which has the shading
+ * correction, this function does not need to fill the shading
+ * information. It is not a error case, and then
+ * this function should return IA_CSS_SUCCESS.
+ */
+#ifndef ISP2401
+ memset(info, 0, sizeof(*info));
+#else
+ memset(shading_info, 0, sizeof(*shading_info));
+#endif
+ }
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
+ struct ia_css_grid_info *info)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe != NULL);
+ assert(info != NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ if (binary) {
+ err = ia_css_binary_3a_grid_info(binary, info, pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ } else
+ memset(&info->s3a_grid, 0, sizeof(info->s3a_grid));
+
+ binary = ia_css_pipe_get_sdis_binary(pipe);
+
+ if (binary) {
+ ia_css_binary_dvs_grid_info(binary, info, pipe);
+ ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
+ } else {
+ memset(&info->dvs_grid.dvs_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_grid_info));
+ memset(&info->dvs_grid.dvs_stat_grid_info, 0,
+ sizeof(info->dvs_grid.dvs_stat_grid_info));
+ }
+
+ if (binary != NULL) {
+ /* copy pipe does not have ISP binary*/
+ info->isp_in_width = binary->internal_frame_info.res.width;
+ info->isp_in_height = binary->internal_frame_info.res.height;
+ }
+
+#if defined(HAS_VAMEM_VERSION_2)
+ info->vamem_type = IA_CSS_VAMEM_TYPE_2;
+#elif defined(HAS_VAMEM_VERSION_1)
+ info->vamem_type = IA_CSS_VAMEM_TYPE_1;
+#else
+#error "Unknown VAMEM version"
+#endif
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#ifdef ISP2401
+/**
+ * @brief Check if a format is supported by the pipe.
+ *
+ */
+static enum ia_css_err
+ia_css_pipe_check_format(struct ia_css_pipe *pipe, enum ia_css_frame_format format)
+{
+ const enum ia_css_frame_format *supported_formats;
+ int number_of_formats;
+ int found = 0;
+ int i;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info) {
+ IA_CSS_ERROR("Pipe or binary info is not set");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats;
+ number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats)/sizeof(enum ia_css_frame_format);
+
+ for (i = 0; i < number_of_formats && !found; i++) {
+ if (supported_formats[i] == format) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ IA_CSS_ERROR("Requested format is not supported by binary");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+}
+#endif
+
+static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info video_in_info, tnr_info,
+ *video_vf_info, video_bds_out_info, *pipe_out_info, *pipe_vf_out_info;
+ bool online;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool continuous = pipe->stream->config.continuous;
+ unsigned int i;
+ unsigned num_output_pins;
+ struct ia_css_frame_info video_bin_out_info;
+ bool need_scaler = false;
+ bool vf_res_different_than_output = false;
+ bool need_vf_pp = false;
+ int vf_ds_log2;
+ struct ia_css_video_settings *mycs = &pipe->pipe_settings.video;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO);
+ /* we only test the video_binary because offline video doesn't need a
+ * vf_pp binary and online does not (always use) the copy_binary.
+ * All are always reset at the same time anyway.
+ */
+ if (mycs->video_binary.info)
+ return IA_CSS_SUCCESS;
+
+ online = pipe->stream->config.online;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ assert(pipe_out_info != NULL);
+
+ /*
+ * There is no explicit input format requirement for raw or yuv
+ * What matters is that there is a binary that supports the stream format.
+ * This is checked in the binary_find(), so no need to check it here
+ */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ /* cannot have online video and input_mode memory */
+ if (online && pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ pipe_vf_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ if (pipe->out_yuv_ds_input_info.res.width)
+ video_bin_out_info = pipe->out_yuv_ds_input_info;
+ else
+ video_bin_out_info = *pipe_out_info;
+
+ /* Video */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]){
+ video_vf_info = pipe_vf_out_info;
+ vf_res_different_than_output = (video_vf_info->res.width != video_bin_out_info.res.width) ||
+ (video_vf_info->res.height != video_bin_out_info.res.height);
+ }
+ else {
+ video_vf_info = NULL;
+ }
+
+ need_scaler = need_downscaling(video_bin_out_info.res, pipe_out_info->res);
+
+ /* we build up the pipeline starting at the end */
+ /* YUV post-processing if needed */
+ if (need_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr
+ = IA_CSS_DEFAULT_CAS_BINARY_DESCR;
+
+ /* NV12 is the common format that is supported by both */
+ /* yuv_scaler and the video_xx_isp2_min binaries. */
+ video_bin_out_info.format = IA_CSS_FRAME_FORMAT_NV12;
+
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &video_bin_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (mycs->yuv_scaler_binary == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return err;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage
+ * sizeof(bool), GFP_KERNEL);
+ if (mycs->is_output_stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ kfree(mycs->is_output_stage);
+ mycs->is_output_stage = NULL;
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ }
+
+
+ {
+ struct ia_css_binary_descr video_descr;
+ enum ia_css_frame_format vf_info_format;
+
+ err = ia_css_pipe_get_video_binarydesc(pipe,
+ &video_descr, &video_in_info, &video_bds_out_info, &video_bin_out_info, video_vf_info,
+ pipe->stream->config.left_padding);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* In the case where video_vf_info is not NULL, this allows
+ * us to find a potential video library with desired vf format.
+ * If success, no vf_pp binary is needed.
+ * If failed, we will look up video binary with YUV_LINE vf format
+ */
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ if (err != IA_CSS_SUCCESS) {
+ if (video_vf_info) {
+ /* This will do another video binary lookup later for YUV_LINE format*/
+ need_vf_pp = true;
+ } else
+ return err;
+ } else if (video_vf_info) {
+ /* The first video binary lookup is successful, but we may
+ * still need vf_pp binary based on additiona check */
+ num_output_pins = mycs->video_binary.info->num_output_pins;
+ vf_ds_log2 = mycs->video_binary.vf_downscale_log2;
+
+ /* If the binary has dual output pins, we need vf_pp if the resolution
+ * is different. */
+ need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output);
+
+ /* If the binary has single output pin, we need vf_pp if additional
+ * scaling is needed for vf */
+ need_vf_pp |= ((num_output_pins == 1) &&
+ ((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) ||
+ (video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height)));
+ }
+
+ if (need_vf_pp) {
+ /* save the current vf_info format for restoration later */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() need_vf_pp; find video binary with YUV_LINE again\n");
+
+ vf_info_format = video_vf_info->format;
+
+ if (!pipe->config.enable_vfpp_bci)
+ ia_css_frame_info_set_format(video_vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ ia_css_binary_destroy_isp_parameters(&mycs->video_binary);
+
+ err = ia_css_binary_find(&video_descr,
+ &mycs->video_binary);
+
+ /* restore original vf_info format */
+ ia_css_frame_info_set_format(video_vf_info,
+ vf_info_format);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ }
+
+ /* If a video binary does not use a ref_frame, we set the frame delay
+ * to 0. This is the case for the 1-stage low-power video binary. */
+ if (!mycs->video_binary.info->sp.enable.ref_frame)
+ pipe->dvs_frame_delay = 0;
+
+ /* The delay latency determines the number of invalid frames after
+ * a stream is started. */
+ pipe->num_invalid_frames = pipe->dvs_frame_delay;
+ pipe->info.num_invalid_frames = pipe->num_invalid_frames;
+
+ /* Viewfinder frames also decrement num_invalid_frames. If the pipe
+ * outputs a viewfinder output, then we need double the number of
+ * invalid frames */
+ if (video_vf_info)
+ pipe->num_invalid_frames *= 2;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "load_video_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
+ pipe->num_invalid_frames, pipe->dvs_frame_delay);
+
+/* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* Copy */
+ if (!online && !continuous) {
+ /* TODO: what exactly needs doing, prepend the copy binary to
+ * video base this only on !online?
+ */
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->video_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#else
+ (void)continuous;
+#endif
+
+#if !defined(HAS_OUTPUT_SYSTEM)
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ if (mycs->video_binary.vf_frame_info.format
+ == IA_CSS_FRAME_FORMAT_YUV_LINE) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info);
+ } else {
+ /* output from main binary is not yuv line. currently this is
+ * possible only when bci is enabled on vfpp output */
+ assert(pipe->config.enable_vfpp_bci == true);
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr,
+ &mycs->video_binary.vf_frame_info,
+ pipe_vf_out_info, NULL, NULL);
+ }
+
+ err = ia_css_binary_find(&vf_pp_descr,
+ &mycs->vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#endif
+
+ err = allocate_delay_frames(pipe);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ if (mycs->video_binary.info->sp.enable.block_output) {
+#ifdef ISP2401
+ unsigned int tnr_width;
+ unsigned int tnr_height;
+#endif
+ tnr_info = mycs->video_binary.out_frame_info[0];
+#ifdef ISP2401
+
+ /* Select resolution for TNR. If
+ * output_system_in_resolution(GDC_out_resolution) is
+ * being used, then select that as it will also be in resolution for
+ * TNR. At present, it only make sense for Skycam */
+ if (pipe->config.output_system_in_res.width && pipe->config.output_system_in_res.height) {
+ tnr_width = pipe->config.output_system_in_res.width;
+ tnr_height = pipe->config.output_system_in_res.height;
+ } else {
+ tnr_width = tnr_info.res.width;
+ tnr_height = tnr_info.res.height;
+ }
+
+ /* Make tnr reference buffers output block width(in pix) align */
+ tnr_info.res.width =
+ CEIL_MUL(tnr_width,
+ (mycs->video_binary.info->sp.block.block_width * ISP_NWAY));
+ tnr_info.padded_width = tnr_info.res.width;
+
+#endif
+ /* Make tnr reference buffers output block height align */
+ tnr_info.res.height =
+#ifndef ISP2401
+ CEIL_MUL(tnr_info.res.height,
+#else
+ CEIL_MUL(tnr_height,
+#endif
+ mycs->video_binary.info->sp.block.output_block_height);
+ } else {
+ tnr_info = mycs->video_binary.internal_frame_info;
+ }
+ tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH;
+
+#ifndef ISP2401
+ for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
+#else
+ for (i = 0; i < NUM_TNR_FRAMES; i++) {
+#endif
+ if (mycs->tnr_frames[i]) {
+ ia_css_frame_free(mycs->tnr_frames[i]);
+ mycs->tnr_frames[i] = NULL;
+ }
+ err = ia_css_frame_allocate_from_info(
+ &mycs->tnr_frames[i],
+ &tnr_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+unload_video_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.video.copy_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.video_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary);
+#ifndef ISP2401
+ ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary);
+#endif
+
+ for (i = 0; i < pipe->pipe_settings.video.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.video.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.video.is_output_stage);
+ pipe->pipe_settings.video.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.video.yuv_scaler_binary);
+ pipe->pipe_settings.video.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err video_start(struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *copy_binary;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipe *copy_pipe, *capture_pipe;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode video_pipe_input_mode;
+
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ video_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_pipe = pipe->pipe_settings.video.copy_pipe;
+ capture_pipe = pipe->pipe_settings.video.capture_pipe;
+
+ copy_binary = &pipe->pipe_settings.video.copy_binary;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+#endif
+
+ send_raw_frames(pipe);
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ if (pipe->stream->cont_capt) {
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe), &thread_id);
+ copy_ovrd |= 1 << thread_id;
+ }
+ }
+
+ /* Construct and load the copy pipe */
+ if (pipe->stream->config.continuous) {
+ sh_css_sp_init_pipeline(&copy_pipe->pipeline,
+ IA_CSS_PIPE_ID_COPY,
+ (uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
+ false,
+ pipe->stream->config.pixels_per_clock == 2, false,
+ false, pipe->required_bds_factor,
+ copy_ovrd,
+ pipe->stream->config.mode,
+ &pipe->stream->config.metadata_config,
+#ifndef ISP2401
+ &pipe->stream->info.metadata_info
+#else
+ &pipe->stream->info.metadata_info,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , pipe->stream->config.source.port.port
+#else
+ pipe->stream->config.source.port.port,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &copy_pipe->config.internal_frame_origin_bqs_on_sctbl,
+ copy_pipe->stream->isp_params_configs);
+#endif
+
+ /* make the video pipe start with mem mode input, copy handles
+ the actual mode */
+ video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
+ }
+
+ /* Construct and load the capture pipe */
+ if (pipe->stream->cont_capt) {
+ sh_css_sp_init_pipeline(&capture_pipe->pipeline,
+ IA_CSS_PIPE_ID_CAPTURE,
+ (uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
+ capture_pipe->config.default_capture_config.enable_xnr != 0,
+ capture_pipe->stream->config.pixels_per_clock == 2,
+ true, /* continuous */
+ false, /* offline */
+ capture_pipe->required_bds_factor,
+ 0,
+ IA_CSS_INPUT_MODE_MEMORY,
+ &pipe->stream->config.metadata_config,
+#ifndef ISP2401
+ &pipe->stream->info.metadata_info
+#else
+ &pipe->stream->info.metadata_info,
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#ifndef ISP2401
+ , (mipi_port_ID_t)0
+#else
+ (mipi_port_ID_t)0,
+#endif
+#endif
+#ifndef ISP2401
+ );
+#else
+ &capture_pipe->config.internal_frame_origin_bqs_on_sctbl,
+ capture_pipe->stream->isp_params_configs);
+#endif
+ }
+
+ start_pipe(pipe, copy_ovrd, video_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static
+enum ia_css_err sh_css_pipe_get_viewfinder_frame_info(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
+ assert(pipe != NULL);
+ assert(info != NULL);
+
+/* We could print the pointer as input arg, and the values as output */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_get_viewfinder_frame_info() enter: void\n");
+
+ if ( pipe->mode == IA_CSS_PIPE_ID_CAPTURE &&
+ (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER))
+ return IA_CSS_ERR_MODE_HAS_NO_VIEWFINDER;
+ /* offline video does not generate viewfinder output */
+ *info = pipe->vf_output_info[idx];
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_viewfinder_frame_info() leave: \
+ info.res.width=%d, info.res.height=%d, \
+ info.padded_width=%d, info.format=%d, \
+ info.raw_bit_depth=%d, info.raw_bayer_order=%d\n",
+ info->res.width,info->res.height,
+ info->padded_width,info->format,
+ info->raw_bit_depth,info->raw_bayer_order);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
+ unsigned int height, unsigned int min_width,
+ enum ia_css_frame_format format,
+ unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n",
+ pipe, width, height, min_width, format, idx);
+
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+
+ err = ia_css_util_check_res(width, height);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (pipe->vf_output_info[idx].res.width != width ||
+ pipe->vf_output_info[idx].res.height != height ||
+ pipe->vf_output_info[idx].format != format) {
+ ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height,
+ format, min_width);
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err load_copy_binaries(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipe != NULL);
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.copy_binary.info)
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_frame_check_info(&pipe->output_info[0]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ err = verify_copy_out_frame_format(pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ NULL);
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static bool need_capture_pp(
+ const struct ia_css_pipe *pipe)
+{
+ const struct ia_css_frame_info *out_info = &pipe->output_info[0];
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+#ifdef ISP2401
+
+ /* ldc and capture_pp are not supported in the same pipeline */
+ if (need_capt_ldc(pipe) == true)
+ return false;
+#endif
+ /* determine whether we need to use the capture_pp binary.
+ * This is needed for:
+ * 1. XNR or
+ * 2. Digital Zoom or
+ * 3. YUV downscaling
+ */
+ if (pipe->out_yuv_ds_input_info.res.width &&
+ ((pipe->out_yuv_ds_input_info.res.width != out_info->res.width) ||
+ (pipe->out_yuv_ds_input_info.res.height != out_info->res.height)))
+ return true;
+
+ if (pipe->config.default_capture_config.enable_xnr != 0)
+ return true;
+
+ if ((pipe->stream->isp_params_configs->dz_config.dx < HRT_GDC_N) ||
+ (pipe->stream->isp_params_configs->dz_config.dy < HRT_GDC_N) ||
+ pipe->config.enable_dz)
+ return true;
+
+ return false;
+}
+
+static bool need_capt_ldc(
+ const struct ia_css_pipe *pipe)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+ return (pipe->extra_config.enable_dvs_6axis) ? true:false;
+}
+
+static enum ia_css_err set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (num == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (version) {
+ case IA_CSS_PIPE_VERSION_2_6_1:
+ *num = NUM_PRIMARY_HQ_STAGES;
+ break;
+ case IA_CSS_PIPE_VERSION_2_2:
+ case IA_CSS_PIPE_VERSION_1:
+ *num = NUM_PRIMARY_STAGES;
+ break;
+ default:
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ break;
+ }
+
+ return err;
+}
+
+static enum ia_css_err load_primary_binaries(
+ struct ia_css_pipe *pipe)
+{
+ bool online = false;
+ bool memory = false;
+ bool continuous = false;
+ bool need_pp = false;
+ bool need_isp_copy_binary = false;
+ bool need_ldc = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+#endif
+ struct ia_css_frame_info prim_in_info,
+ prim_out_info,
+ capt_pp_out_info, vf_info,
+ *vf_pp_in_info, *pipe_out_info,
+#ifndef ISP2401
+ *pipe_vf_out_info, *capt_pp_in_info,
+ capt_ldc_out_info;
+#else
+ *pipe_vf_out_info;
+#endif
+#if defined(HAS_RES_MGR)
+ struct ia_css_frame_info bds_out_info;
+#endif
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_capture_settings *mycs;
+ unsigned int i;
+ bool need_extra_yuv_scaler = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ online = pipe->stream->config.online;
+ memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+ continuous = pipe->stream->config.continuous;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+#endif
+
+ mycs = &pipe->pipe_settings.capture;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ if (mycs->primary_binary[0].info)
+ return IA_CSS_SUCCESS;
+
+ err = set_num_primary_stages(&mycs->num_primary_stage, pipe->config.isp_pipe_version);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ err = ia_css_util_check_vf_out_info(pipe_out_info, pipe_vf_out_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ else{
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ need_pp = need_capture_pp(pipe);
+
+ /* we use the vf output info to get the primary/capture_pp binary
+ configured for vf_veceven. It will select the closest downscaling
+ factor. */
+ vf_info = *pipe_vf_out_info;
+
+/*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* TODO: All this yuv_scaler and capturepp calculation logic
+ * can be shared later. Capture_pp is also a yuv_scale binary
+ * with extra XNR funcionality. Therefore, it can be made as the
+ * first step of the cascade. */
+ capt_pp_out_info = pipe->out_yuv_ds_input_info;
+ capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ capt_pp_out_info.res.height /= MAX_PREFERRED_YUV_DS_PER_STEP;
+ ia_css_frame_info_set_width(&capt_pp_out_info, capt_pp_out_info.res.width, 0);
+
+/*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ need_extra_yuv_scaler = need_downscaling(capt_pp_out_info.res,
+ pipe_out_info->res);
+
+ if (need_extra_yuv_scaler) {
+ struct ia_css_cas_binary_descr cas_scaler_descr
+ = IA_CSS_DEFAULT_CAS_BINARY_DESCR;
+ err = ia_css_pipe_create_cas_scaler_desc_single_output(
+ &capt_pp_out_info,
+ pipe_out_info,
+ NULL,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (mycs->yuv_scaler_binary == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(bool), GFP_KERNEL);
+ if (mycs->is_output_stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+#if defined(HAS_RES_MGR)
+ bds_out_info.res = pipe->config.bayer_ds_out_res;
+ yuv_scaler_descr.bds_out_info = &bds_out_info;
+#endif
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+
+ } else {
+ capt_pp_out_info = pipe->output_info[0];
+ }
+
+ /* TODO Do we disable ldc for skycam */
+ need_ldc = need_capt_ldc(pipe);
+#ifdef ISP2401
+ /* ldc and capt_pp are not supported in the same pipeline */
+ if (need_ldc) {
+ struct ia_css_binary_descr capt_ldc_descr;
+ ia_css_pipe_get_ldc_binarydesc(pipe,
+ &capt_ldc_descr, &prim_out_info,
+ &capt_pp_out_info);
+#endif
+
+#ifdef ISP2401
+ err = ia_css_binary_find(&capt_ldc_descr,
+ &mycs->capture_ldc_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (need_pp) {
+#endif
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+#ifndef ISP2401
+ if (need_pp) {
+#endif
+ struct ia_css_binary_descr capture_pp_descr;
+#ifndef ISP2401
+ capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
+#endif
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+#ifndef ISP2401
+ &capture_pp_descr, capt_pp_in_info,
+#else
+ &capture_pp_descr, &prim_out_info,
+#endif
+ &capt_pp_out_info, &vf_info);
+#if defined(HAS_RES_MGR)
+ bds_out_info.res = pipe->config.bayer_ds_out_res;
+ capture_pp_descr.bds_out_info = &bds_out_info;
+#endif
+ err = ia_css_binary_find(&capture_pp_descr,
+ &mycs->capture_pp_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#ifndef ISP2401
+
+ if(need_ldc) {
+ struct ia_css_binary_descr capt_ldc_descr;
+ ia_css_pipe_get_ldc_binarydesc(pipe,
+ &capt_ldc_descr, &prim_out_info,
+ &capt_ldc_out_info);
+
+ err = ia_css_binary_find(&capt_ldc_descr,
+ &mycs->capture_ldc_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+#endif
+ } else {
+ prim_out_info = *pipe_out_info;
+ }
+
+ /* Primary */
+ {
+ struct ia_css_binary_descr prim_descr[MAX_NUM_PRIMARY_STAGES];
+
+ for (i = 0; i < mycs->num_primary_stage; i++) {
+ struct ia_css_frame_info *local_vf_info = NULL;
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && (i == mycs->num_primary_stage - 1))
+ local_vf_info = &vf_info;
+ ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i], &prim_in_info, &prim_out_info, local_vf_info, i);
+#if defined(HAS_RES_MGR)
+ bds_out_info.res = pipe->config.bayer_ds_out_res;
+ prim_descr[i].bds_out_info = &bds_out_info;
+#endif
+ err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ }
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &mycs->capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info;
+ }
+
+/*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed for Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. Thisshould not be considered as a clean solution.
+ * Proper * investigation should be done to come up with the clean
+ * solution.
+ * */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+#if defined(HAS_RES_MGR)
+ bds_out_info.res = pipe->config.bayer_ds_out_res;
+ vf_pp_descr.bds_out_info = &bds_out_info;
+#endif
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ err = allocate_delay_frames(pipe);
+
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, only the Direct Sensor Mode
+ * Offline Capture uses the ISP copy binary.
+ */
+ need_isp_copy_binary = !online && sensor;
+#else
+ need_isp_copy_binary = !online && !continuous && !memory;
+#endif
+
+ /* ISP Copy */
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ &mycs->primary_binary[0]);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+allocate_delay_frames(struct ia_css_pipe *pipe)
+{
+ unsigned int num_delay_frames = 0, i = 0;
+ unsigned int dvs_frame_delay = 0;
+ struct ia_css_frame_info ref_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_pipe_id mode = IA_CSS_PIPE_ID_VIDEO;
+ struct ia_css_frame **delay_frames = NULL;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (pipe == NULL) {
+ IA_CSS_ERROR("Invalid args - pipe %x", pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ mode = pipe->mode;
+ dvs_frame_delay = pipe->dvs_frame_delay;
+
+ if (dvs_frame_delay > 0)
+ num_delay_frames = dvs_frame_delay + 1;
+
+ switch (mode) {
+ case IA_CSS_PIPE_ID_CAPTURE:
+ {
+ struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture;
+ (void)mycs_capture;
+ return err;
+ }
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ {
+ struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video;
+ ref_info = mycs_video->video_binary.internal_frame_info;
+ /*The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_video->delay_frames;
+ }
+ break;
+ case IA_CSS_PIPE_ID_PREVIEW:
+ {
+ struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview;
+ ref_info = mycs_preview->preview_binary.internal_frame_info;
+ /*The ref frame expects
+ * 1. Y plane
+ * 2. UV plane with line interleaving, like below
+ * UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
+ *
+ * This format is not YUV420(which has Y, U and V planes).
+ * Its closer to NV12, except that the UV plane has UV
+ * interleaving, like UVUVUVUVUVUVUVUVU...
+ *
+ * TODO: make this ref_frame format as a separate frame format
+ */
+ ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
+ delay_frames = mycs_preview->delay_frames;
+ }
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ }
+
+ ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH;
+
+ assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES);
+ for (i = 0; i < num_delay_frames; i++) {
+ err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err load_advanced_binaries(
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_in_info, gdc_in_info,
+ post_in_info, post_out_info,
+ vf_info, *vf_pp_in_info, *pipe_out_info,
+ *pipe_vf_out_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+ pipe_out_info = &pipe->output_info[0];
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info, &vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-gdc */
+ {
+ struct ia_css_binary_descr post_gdc_descr;
+
+ ia_css_pipe_get_post_gdc_binarydesc(pipe,
+ &post_gdc_descr, &post_in_info, &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_gdc_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Gdc */
+ {
+ struct ia_css_binary_descr gdc_descr;
+
+ ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&gdc_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-gdc */
+ {
+ struct ia_css_binary_descr pre_gdc_descr;
+
+ ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_gdc_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Copy */
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+ if (need_isp_copy)
+ load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static enum ia_css_err load_bayer_isp_binaries(
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_isp_in_info, *pipe_out_info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_binary_descr pre_de_descr;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+ pipe_out_info = &pipe->output_info[0];
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+
+ err = ia_css_frame_check_info(pipe_out_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr,
+ &pre_isp_in_info,
+ pipe_out_info);
+
+ err = ia_css_binary_find(&pre_de_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static enum ia_css_err load_low_light_binaries(
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_frame_info pre_in_info, anr_in_info,
+ post_in_info, post_out_info,
+ vf_info, *pipe_vf_out_info, *pipe_out_info,
+ *vf_pp_in_info;
+ bool need_pp;
+ bool need_isp_copy = true;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.pre_isp_binary.info)
+ return IA_CSS_SUCCESS;
+ pipe_vf_out_info = &pipe->vf_output_info[0];
+ pipe_out_info = &pipe->output_info[0];
+
+ vf_info = *pipe_vf_out_info;
+ err = ia_css_util_check_vf_out_info(pipe_out_info,
+ &vf_info);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ need_pp = need_capture_pp(pipe);
+
+ ia_css_frame_info_set_format(&vf_info,
+ IA_CSS_FRAME_FORMAT_YUV_LINE);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_pp) {
+ struct ia_css_binary_descr capture_pp_descr;
+
+ ia_css_pipe_get_capturepp_binarydesc(pipe,
+ &capture_pp_descr, &post_out_info, pipe_out_info, &vf_info);
+ err = ia_css_binary_find(&capture_pp_descr,
+ &pipe->pipe_settings.capture.capture_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ } else {
+ post_out_info = *pipe_out_info;
+ }
+
+ /* Post-anr */
+ {
+ struct ia_css_binary_descr post_anr_descr;
+
+ ia_css_pipe_get_post_anr_binarydesc(pipe,
+ &post_anr_descr, &post_in_info, &post_out_info, &vf_info);
+ err = ia_css_binary_find(&post_anr_descr,
+ &pipe->pipe_settings.capture.post_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Anr */
+ {
+ struct ia_css_binary_descr anr_descr;
+
+ ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info,
+ &pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
+ err = ia_css_binary_find(&anr_descr,
+ &pipe->pipe_settings.capture.anr_gdc_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
+ pipe->pipe_settings.capture.post_isp_binary.left_padding;
+
+ /* Pre-anr */
+ {
+ struct ia_css_binary_descr pre_anr_descr;
+
+ ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info,
+ &pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
+ err = ia_css_binary_find(&pre_anr_descr,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ pipe->pipe_settings.capture.pre_isp_binary.left_padding =
+ pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
+
+ /* Viewfinder post-processing */
+ if (need_pp) {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
+ } else {
+ vf_pp_in_info =
+ &pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
+ err = ia_css_binary_find(&vf_pp_descr,
+ &pipe->pipe_settings.capture.vf_pp_binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+
+ /* Copy */
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* For CSI2+, only the direct sensor mode/online requires ISP copy */
+ need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+#endif
+ if (need_isp_copy)
+ err = load_copy_binary(pipe,
+ &pipe->pipe_settings.capture.copy_binary,
+ &pipe->pipe_settings.capture.pre_isp_binary);
+
+ return err;
+}
+
+static bool copy_on_sp(struct ia_css_pipe *pipe)
+{
+ bool rval;
+
+ assert(pipe != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "copy_on_sp() enter:\n");
+
+ rval = true;
+
+ rval &= (pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
+
+ rval &= (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW);
+
+ rval &= ((pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_BINARY_8) ||
+ (pipe->config.mode == IA_CSS_PIPE_MODE_COPY));
+
+ return rval;
+}
+
+static enum ia_css_err load_capture_binaries(
+ struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool must_be_raw;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ if (pipe->pipe_settings.capture.primary_binary[0].info) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ /* in primary, advanced,low light or bayer,
+ the input format must be raw */
+ must_be_raw =
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT;
+ err = ia_css_util_check_input(&pipe->stream->config, must_be_raw, false);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ &pipe->output_info[0],
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ switch (pipe->config.default_capture_config.mode) {
+ case IA_CSS_CAPTURE_MODE_RAW:
+ err = load_copy_binaries(pipe);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (err == IA_CSS_SUCCESS)
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+#endif
+ break;
+ case IA_CSS_CAPTURE_MODE_BAYER:
+ err = load_bayer_isp_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_PRIMARY:
+ err = load_primary_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_ADVANCED:
+ err = load_advanced_binaries(pipe);
+ break;
+ case IA_CSS_CAPTURE_MODE_LOW_LIGHT:
+ err = load_low_light_binaries(pipe);
+ break;
+ }
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+unload_capture_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((pipe == NULL) || ((pipe->mode != IA_CSS_PIPE_ID_CAPTURE) && (pipe->mode != IA_CSS_PIPE_ID_COPY))) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.capture.copy_binary);
+ for (i = 0; i < MAX_NUM_PRIMARY_STAGES; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.primary_binary[i]);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.pre_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.anr_gdc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.post_isp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_pp_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.capture_ldc_binary);
+ ia_css_binary_unload(&pipe->pipe_settings.capture.vf_pp_binary);
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_yuv_scaler; i++)
+ ia_css_binary_unload(&pipe->pipe_settings.capture.yuv_scaler_binary[i]);
+
+ kfree(pipe->pipe_settings.capture.is_output_stage);
+ pipe->pipe_settings.capture.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.capture.yuv_scaler_binary);
+ pipe->pipe_settings.capture.yuv_scaler_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+need_downscaling(const struct ia_css_resolution in_res,
+ const struct ia_css_resolution out_res)
+{
+
+ if (in_res.width > out_res.width || in_res.height > out_res.height)
+ return true;
+
+ return false;
+}
+
+static bool
+need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ struct ia_css_resolution in_res, out_res;
+
+ bool need_format_conversion = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ /* TODO: make generic function */
+ need_format_conversion =
+ ((pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY) &&
+ (pipe->output_info[0].format != IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8));
+
+ in_res = pipe->config.input_effective_res;
+
+ if (pipe->config.enable_dz)
+ return true;
+
+ if ((pipe->output_info[0].res.width != 0) && need_format_conversion)
+ return true;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_res = pipe->output_info[i].res;
+
+ /* A non-zero width means it is a valid output port */
+ if ((out_res.width != 0) && need_downscaling(in_res, out_res))
+ return true;
+ }
+
+ return false;
+}
+
+/* TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc */
+/* which has some hard-coded knowledge which prevents reuse of the function. */
+/* Later, merge this with ia_css_pipe_create_cas_scaler_desc */
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
+ struct ia_css_frame_info *cas_scaler_in_info,
+ struct ia_css_frame_info *cas_scaler_out_info,
+ struct ia_css_frame_info *cas_scaler_vf_info,
+ struct ia_css_cas_binary_descr *descr)
+{
+ unsigned int i;
+ unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+
+ unsigned max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ assert(cas_scaler_in_info != NULL);
+ assert(cas_scaler_out_info != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ /* We assume that this function is used only for single output port case. */
+ descr->num_output_stage = 1;
+
+ hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width , cas_scaler_out_info->res.width);
+ ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height, cas_scaler_out_info->res.height);
+ /* use the same horizontal and vertical downscaling factor for simplicity */
+ assert(hor_ds_factor == ver_ds_factor);
+
+ i = 1;
+ while (i < hor_ds_factor) {
+ descr->num_stage++;
+ i *= max_scale_factor_per_stage;
+ }
+
+ descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->in_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->internal_out_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->out_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->vf_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ if (descr->is_output_stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ tmp_in_info = *cas_scaler_in_info;
+ for (i = 0; i < descr->num_stage; i++) {
+
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= cas_scaler_out_info->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width;
+ descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height;
+ descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = cas_scaler_out_info->res.width;
+ descr->out_info[i].res.height = cas_scaler_out_info->res.height;
+ descr->out_info[i].padded_width = cas_scaler_out_info->padded_width;
+ descr->out_info[i].format = cas_scaler_out_info->format;
+ if (cas_scaler_vf_info != NULL) {
+ descr->vf_info[i].res.width = cas_scaler_vf_info->res.width;
+ descr->vf_info[i].res.height = cas_scaler_vf_info->res.height;
+ descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width / max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height / max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+/* FIXME: merge most of this and single output version */
+static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pipe,
+ struct ia_css_cas_binary_descr *descr)
+{
+ struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
+ unsigned int i, j;
+ unsigned int hor_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ ver_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
+ scale_factor = 0;
+ unsigned int num_stages = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ unsigned max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() enter:\n");
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_info[i] = NULL;
+ vf_out_info[i] = NULL;
+ hor_scale_factor[i] = 0;
+ ver_scale_factor[i] = 0;
+ }
+
+ in_info.res = pipe->config.input_effective_res;
+ in_info.padded_width = in_info.res.width;
+ descr->num_output_stage = 0;
+ /* Find out how much scaling we need for each output */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (pipe->output_info[i].res.width != 0) {
+ out_info[i] = &pipe->output_info[i];
+ if (pipe->vf_output_info[i].res.width != 0)
+ vf_out_info[i] = &pipe->vf_output_info[i];
+ descr->num_output_stage += 1;
+ }
+
+ if (out_info[i] != NULL) {
+ hor_scale_factor[i] = CEIL_DIV(in_info.res.width, out_info[i]->res.width);
+ ver_scale_factor[i] = CEIL_DIV(in_info.res.height, out_info[i]->res.height);
+ /* use the same horizontal and vertical scaling factor for simplicity */
+ assert(hor_scale_factor[i] == ver_scale_factor[i]);
+ scale_factor = 1;
+ do {
+ num_stages++;
+ scale_factor *= max_scale_factor_per_stage;
+ } while (scale_factor < hor_scale_factor[i]);
+
+ in_info.res = out_info[i]->res;
+ }
+ }
+
+ if (need_yuv_scaler_stage(pipe) && (num_stages == 0))
+ num_stages = 1;
+
+ descr->num_stage = num_stages;
+
+ descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->in_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->internal_out_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->out_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
+ if (descr->vf_info == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
+ if (descr->is_output_stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (out_info[i]) {
+ if (i > 0) {
+ assert((out_info[i-1]->res.width >= out_info[i]->res.width) &&
+ (out_info[i-1]->res.height >= out_info[i]->res.height));
+ }
+ }
+ }
+
+ tmp_in_info.res = pipe->config.input_effective_res;
+ tmp_in_info.format = IA_CSS_FRAME_FORMAT_YUV420;
+ for (i = 0, j = 0; i < descr->num_stage; i++) {
+ assert(j < 2);
+ assert(out_info[j] != NULL);
+
+ descr->in_info[i] = tmp_in_info;
+ if ((tmp_in_info.res.width / max_scale_factor_per_stage) <= out_info[j]->res.width) {
+ descr->is_output_stage[i] = true;
+ if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
+ descr->internal_out_info[i].res.width = out_info[j]->res.width;
+ descr->internal_out_info[i].res.height = out_info[j]->res.height;
+ descr->internal_out_info[i].padded_width = out_info[j]->padded_width;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ } else {
+ assert(i == (descr->num_stage - 1));
+ descr->internal_out_info[i].res.width = 0;
+ descr->internal_out_info[i].res.height = 0;
+ }
+ descr->out_info[i].res.width = out_info[j]->res.width;
+ descr->out_info[i].res.height = out_info[j]->res.height;
+ descr->out_info[i].padded_width = out_info[j]->padded_width;
+ descr->out_info[i].format = out_info[j]->format;
+ if (vf_out_info[j] != NULL) {
+ descr->vf_info[i].res.width = vf_out_info[j]->res.width;
+ descr->vf_info[i].res.height = vf_out_info[j]->res.height;
+ descr->vf_info[i].padded_width = vf_out_info[j]->padded_width;
+ ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
+ } else {
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ descr->vf_info[i].padded_width = 0;
+ }
+ j++;
+ } else {
+ descr->is_output_stage[i] = false;
+ descr->internal_out_info[i].res.width = tmp_in_info.res.width / max_scale_factor_per_stage;
+ descr->internal_out_info[i].res.height = tmp_in_info.res.height / max_scale_factor_per_stage;
+ descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(&descr->internal_out_info[i],
+ tmp_in_info.res.width / max_scale_factor_per_stage,
+ tmp_in_info.res.height / max_scale_factor_per_stage,
+ IA_CSS_FRAME_FORMAT_YUV420, 0);
+ descr->out_info[i].res.width = 0;
+ descr->out_info[i].res.height = 0;
+ descr->vf_info[i].res.width = 0;
+ descr->vf_info[i].res.height = 0;
+ }
+ tmp_in_info = descr->internal_out_info[i];
+ }
+ERR:
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr *descr)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_destroy_cas_scaler_desc() enter:\n");
+ kfree(descr->in_info);
+ descr->in_info = NULL;
+ kfree(descr->internal_out_info);
+ descr->internal_out_info = NULL;
+ kfree(descr->out_info);
+ descr->out_info = NULL;
+ kfree(descr->vf_info);
+ descr->vf_info = NULL;
+ kfree(descr->is_output_stage);
+ descr->is_output_stage = NULL;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_pipe_destroy_cas_scaler_desc() leave\n");
+}
+
+static enum ia_css_err
+load_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool need_scaler = false;
+ struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_yuvpp_settings *mycs;
+ struct ia_css_binary *next_binary;
+ struct ia_css_cas_binary_descr cas_scaler_descr = IA_CSS_DEFAULT_CAS_BINARY_DESCR;
+ unsigned int i, j;
+ bool need_isp_copy_binary = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info)
+ goto ERR;
+
+ /* Set both must_be_raw and must_be_yuv to false then yuvpp can take rgb inputs */
+ err = ia_css_util_check_input(&pipe->stream->config, false, false);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ mycs = &pipe->pipe_settings.yuvpp;
+
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = ia_css_util_check_vf_out_info(&pipe->output_info[i],
+ &pipe->vf_output_info[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ vf_pp_in_info[i] = NULL;
+ }
+
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ /* we build up the pipeline starting at the end */
+ /* Capture post-processing */
+ if (need_scaler) {
+ struct ia_css_binary_descr yuv_scaler_descr;
+
+ err = ia_css_pipe_create_cas_scaler_desc(pipe,
+ &cas_scaler_descr);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ mycs->num_output = cas_scaler_descr.num_output_stage;
+ mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
+ mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(struct ia_css_binary), GFP_KERNEL);
+ if (mycs->yuv_scaler_binary == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
+ sizeof(bool), GFP_KERNEL);
+ if (mycs->is_output_stage == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+ for (i = 0; i < cas_scaler_descr.num_stage; i++) {
+ mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
+ ia_css_pipe_get_yuvscaler_binarydesc(pipe,
+ &yuv_scaler_descr, &cas_scaler_descr.in_info[i],
+ &cas_scaler_descr.out_info[i],
+ &cas_scaler_descr.internal_out_info[i],
+ &cas_scaler_descr.vf_info[i]);
+ err = ia_css_binary_find(&yuv_scaler_descr,
+ &mycs->yuv_scaler_binary[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ } else {
+ mycs->num_output = 1;
+ }
+
+ if (need_scaler) {
+ next_binary = &mycs->yuv_scaler_binary[0];
+ } else {
+ next_binary = NULL;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /*
+ * NOTES
+ * - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
+ * its input is "IA_CSS_STREAM_FORMAT_YUV422_8"?
+ *
+ * In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
+ * binary". However, the "yuv_scale_binary" does NOT support the input-frame
+ * format as "IA_CSS_STREAM _FORMAT_YUV422_8".
+ *
+ * Hence, the "isp_copy_binary" is required to be present in front of the "yuv
+ * _scale_binary". It would translate the input-frame to the frame formats that
+ * are supported by the "yuv_scale_binary".
+ *
+ * Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
+ * pp_defs.h" for the list of input-frame formats that are supported by the
+ * "yuv_scale_binary".
+ */
+ need_isp_copy_binary =
+ (pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_YUV422_8);
+#else /* !USE_INPUT_SYSTEM_VERSION_2401 */
+ need_isp_copy_binary = true;
+#endif /* USE_INPUT_SYSTEM_VERSION_2401 */
+
+ if (need_isp_copy_binary) {
+ err = load_copy_binary(pipe,
+ &mycs->copy_binary,
+ next_binary);
+
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ /*
+ * NOTES
+ * - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
+ *
+ * In some use cases, the first stage in the "yuvpp" pipe is the
+ * "isp_copy_binary". The "isp_copy_binary" is designed to process
+ * the input from either the system DDR or from the IPU internal VMEM.
+ * So it provides the flag "online" to specify where its input is from,
+ * i.e.:
+ *
+ * (1) "online <= true", the input is from the IPU internal VMEM.
+ * (2) "online <= false", the input is from the system DDR.
+ *
+ * In other use cases, the first stage in the "yuvpp" pipe is the
+ * "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
+ * input ONLY from the system DDR. So it does not provide the flag "online"
+ * to specify where its input is from.
+ */
+ pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
+ }
+
+ /* Viewfinder post-processing */
+ if (need_scaler) {
+ for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) {
+ if (mycs->is_output_stage[i]) {
+ assert(j < 2);
+ vf_pp_in_info[j] =
+ &mycs->yuv_scaler_binary[i].vf_frame_info;
+ j++;
+ }
+ }
+ mycs->num_vf_pp = j;
+ } else {
+ vf_pp_in_info[0] =
+ &mycs->copy_binary.vf_frame_info;
+ for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ vf_pp_in_info[i] = NULL;
+ }
+ mycs->num_vf_pp = 1;
+ }
+ mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
+ GFP_KERNEL);
+ if (mycs->vf_pp_binary == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto ERR;
+ }
+
+ {
+ struct ia_css_binary_descr vf_pp_descr;
+
+ for (i = 0; i < mycs->num_vf_pp; i++) {
+ if (pipe->vf_output_info[i].res.width != 0) {
+ ia_css_pipe_get_vfpp_binarydesc(pipe,
+ &vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]);
+ err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary[i]);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+ }
+
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ERR:
+ if (need_scaler) {
+ ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n",
+ err);
+ return err;
+}
+
+static enum ia_css_err
+unload_yuvpp_binaries(struct ia_css_pipe *pipe)
+{
+ unsigned int i;
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary);
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++) {
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]);
+ }
+ for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++) {
+ ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]);
+ }
+ kfree(pipe->pipe_settings.yuvpp.is_output_stage);
+ pipe->pipe_settings.yuvpp.is_output_stage = NULL;
+ kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary);
+ pipe->pipe_settings.yuvpp.yuv_scaler_binary = NULL;
+ kfree(pipe->pipe_settings.yuvpp.vf_pp_binary);
+ pipe->pipe_settings.yuvpp.vf_pp_binary = NULL;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err yuvpp_start(struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *copy_binary;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum sh_css_pipe_config_override copy_ovrd;
+ enum ia_css_input_mode yuvpp_pipe_input_mode;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ yuvpp_pipe_input_mode = pipe->stream->config.mode;
+
+ copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
+
+ sh_css_metrics_start_frame();
+
+ /* multi stream video needs mipi buffers */
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && ( defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401) )
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#endif
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+ }
+
+ start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+
+ switch (pipe->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = unload_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = unload_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = unload_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = unload_yuvpp_binaries(pipe);
+ break;
+ default:
+ break;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_pipe_load_binaries(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipe != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_load_binaries() enter:\n");
+
+ /* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ return err;
+
+ switch (pipe->mode) {
+ case IA_CSS_PIPE_ID_PREVIEW:
+ err = load_preview_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_VIDEO:
+ err = load_video_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_CAPTURE:
+ err = load_capture_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_YUVPP:
+ err = load_yuvpp_binaries(pipe);
+ break;
+ case IA_CSS_PIPE_ID_ACC:
+ break;
+ default:
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ break;
+ }
+ if (err != IA_CSS_SUCCESS) {
+ if (sh_css_pipe_unload_binaries(pipe) != IA_CSS_SUCCESS) {
+ /* currently css does not support multiple error returns in a single function,
+ * using IA_CSS_ERR_INTERNAL_ERROR in this case */
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+ return err;
+}
+
+static enum ia_css_err
+create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage *vf_pp_stage = NULL,
+ *copy_stage = NULL,
+ *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *vf_pp_binary,
+ *yuv_scaler_binary;
+ bool need_scaler = false;
+ unsigned int num_stage, num_vf_pp_stage, num_output_stage;
+ unsigned int i, j;
+
+ struct ia_css_frame *in_frame = NULL;
+ struct ia_css_frame *out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_frame *bin_out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if ((pipe == NULL) || (pipe->stream == NULL) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ out_frame[i] = NULL;
+ vf_frame[i] = NULL;
+ }
+ ia_css_pipe_util_create_output_frames(bin_out_frame);
+ num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
+ num_vf_pp_stage = pipe->pipe_settings.yuvpp.num_vf_pp;
+ num_output_stage = pipe->pipe_settings.yuvpp.num_output;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continous Capture
+ */
+ sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
+ buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && continuous));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ /* the input frame can come from:
+ * a) memory: connect yuvscaler to me->in_frame
+ * b) sensor, via copy binary: connect yuvscaler to copy binary later on */
+ if (need_in_frameinfo_memory) {
+ /* TODO: improve for different input formats. */
+
+ /*
+ * "pipe->stream->config.input_config.format" represents the sensor output
+ * frame format, e.g. YUV422 8-bit.
+ *
+ * "in_frame_format" represents the imaging pipe's input frame format, e.g.
+ * Bayer-Quad RAW.
+ */
+ int in_frame_format;
+ if (pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY) {
+ in_frame_format = IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8;
+ } else if (pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_YUV422_8) {
+ /*
+ * When the sensor output frame format is "IA_CSS_STREAM_FORMAT_YUV422_8",
+ * the "isp_copy_var" binary is selected as the first stage in the yuvpp
+ * pipe.
+ *
+ * For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
+ * the frame buffer (at DDR) to the frame-line buffer (at VMEM).
+ *
+ * By now, the "isp_copy_var" binary does NOT provide a separated
+ * frame-line buffer to store the YUV422-8 pixels. Instead, it stores
+ * the YUV422-8 pixels in the frame-line buffer which is designed to
+ * store the Bayer-Quad RAW pixels.
+ *
+ * To direct the "isp_copy_var" binary reading from the RAW frame-line
+ * buffer, its input frame format must be specified as "IA_CSS_FRAME_
+ * FORMAT_RAW".
+ */
+ in_frame_format = IA_CSS_FRAME_FORMAT_RAW;
+ } else {
+ in_frame_format = IA_CSS_FRAME_FORMAT_NV12;
+ }
+
+ err = init_in_frameinfo_memory_defaults(pipe,
+ &me->in_frame,
+ in_frame_format);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+
+ for (i = 0; i < num_output_stage; i++) {
+ assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE);
+ if (pipe->output_info[i].res.width != 0) {
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame[i] = &me->out_frame[i];
+ }
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->vf_output_info[i].res.width != 0) {
+ err = init_vf_frameinfo_defaults(pipe, &me->vf_frame[i], i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ vf_frame[i] = &me->vf_frame[i];
+ }
+ }
+
+ copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
+ vf_pp_binary = pipe->pipe_settings.yuvpp.vf_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary;
+ need_scaler = need_yuv_scaler_stage(pipe);
+
+ if (pipe->pipe_settings.yuvpp.copy_binary.info) {
+
+ struct ia_css_frame *in_frame_local = NULL;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* After isp copy is enabled in_frame needs to be passed. */
+ if (!online)
+ in_frame_local = in_frame;
+#endif
+
+ if (need_scaler) {
+ ia_css_pipe_util_set_output_frames(bin_out_frame, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ bin_out_frame, in_frame_local, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(bin_out_frame, 0, out_frame[0]);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ bin_out_frame, in_frame_local, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &copy_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (copy_stage) {
+ /* if we use yuv scaler binary, vf output should be from there */
+ copy_stage->args.copy_vf = !need_scaler;
+ /* for yuvpp pipe, it should always be enabled */
+ copy_stage->args.copy_output = true;
+ /* connect output of copy binary to input of yuv scaler */
+ in_frame = copy_stage->args.out_frame[0];
+ }
+ }
+
+ if (need_scaler) {
+ struct ia_css_frame *tmp_out_frame = NULL;
+ struct ia_css_frame *tmp_vf_frame = NULL;
+ struct ia_css_frame *tmp_in_frame = in_frame;
+
+ for (i = 0, j = 0; i < num_stage; i++) {
+ assert(j < num_output_stage);
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i] == true) {
+ tmp_out_frame = out_frame[j];
+ tmp_vf_frame = vf_frame[j];
+ } else {
+ tmp_out_frame = NULL;
+ tmp_vf_frame = NULL;
+ }
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ if (pipe->pipe_settings.yuvpp.is_output_stage[i] == true) {
+ if (tmp_vf_frame && (tmp_vf_frame->info.res.width != 0)) {
+ in_frame = yuv_scaler_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, tmp_vf_frame, &vf_pp_binary[j],
+ &vf_pp_stage);
+
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ j++;
+ }
+ }
+ } else if (copy_stage != NULL) {
+ if (vf_frame[0] != NULL && vf_frame[0]->info.res.width != 0) {
+ in_frame = copy_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame[0], &vf_pp_binary[0],
+ &vf_pp_stage);
+ }
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+create_host_copy_pipeline(struct ia_css_pipe *pipe,
+ unsigned max_input_width,
+ struct ia_css_frame *out_frame)
+{
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage_desc stage_desc;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() enter:\n");
+
+ /* pipeline already created as part of create_host_pipeline_structure */
+ me = &pipe->pipeline;
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ &out_frame->info,
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ } else if (out_frame->info.format == IA_CSS_FRAME_FORMAT_RAW) {
+ out_frame->info.raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+ }
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_COPY;
+ pipe->mode = IA_CSS_PIPE_ID_COPY;
+
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_RAW_COPY, max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_copy_pipeline() leave:\n");
+
+ return err;
+}
+
+static enum ia_css_err
+create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me = &pipe->pipeline;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ struct ia_css_frame *out_frame = &me->out_frame[0];
+ struct ia_css_pipeline_stage *out_stage = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ unsigned int max_input_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() enter:\n");
+ ia_css_pipeline_clean(me);
+
+ /* Construct out_frame info */
+ err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->info, 0);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ out_frame->contiguous = false;
+ out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id);
+ out_frame->dynamic_queue_id = queue_id;
+ out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME;
+
+ me->num_stages = 1;
+ me->pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
+ IA_CSS_PIPELINE_ISYS_COPY, max_input_width);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, &out_stage);
+ if(err != IA_CSS_SUCCESS)
+ return err;
+
+ ia_css_pipeline_finalize_stages(me, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_isyscopy_capture_pipeline() leave:\n");
+
+ return err;
+}
+
+static enum ia_css_err
+create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_capture_mode mode;
+ struct ia_css_pipeline_stage *current_stage = NULL;
+ struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
+ struct ia_css_binary *copy_binary,
+ *primary_binary[MAX_NUM_PRIMARY_STAGES],
+ *vf_pp_binary,
+ *pre_isp_binary,
+ *anr_gdc_binary,
+ *post_isp_binary,
+ *yuv_scaler_binary,
+ *capture_pp_binary,
+ *capture_ldc_binary;
+ bool need_pp = false;
+ bool raw;
+
+ struct ia_css_frame *in_frame;
+ struct ia_css_frame *out_frame;
+ struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_frame *vf_frame;
+ struct ia_css_pipeline_stage_desc stage_desc;
+ bool need_in_frameinfo_memory = false;
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool sensor = false;
+ bool buffered_sensor = false;
+ bool online = false;
+ bool continuous = false;
+#endif
+ unsigned int i, num_yuv_scaler, num_primary_stage;
+ bool need_yuv_pp = false;
+ bool *is_output_stage = NULL;
+ bool need_ldc = false;
+
+ IA_CSS_ENTER_PRIVATE("");
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE || pipe->mode == IA_CSS_PIPE_ID_COPY);
+
+ me = &pipe->pipeline;
+ mode = pipe->config.default_capture_config.mode;
+ raw = (mode == IA_CSS_CAPTURE_MODE_RAW);
+ ia_css_pipeline_clean(me);
+ ia_css_pipe_util_create_output_frames(out_frames);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ /* When the input system is 2401, always enable 'in_frameinfo_memory'
+ * except for the following:
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Online Capture
+ * - Direct Sensor Mode Continuous Capture
+ * - Buffered Sensor Mode Continous Capture
+ */
+ sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
+ buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
+ online = pipe->stream->config.online;
+ continuous = pipe->stream->config.continuous;
+ need_in_frameinfo_memory =
+ !((sensor && (online || continuous)) || (buffered_sensor && (online || continuous)));
+#else
+ /* Construct in_frame info (only in case we have dynamic input */
+ need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+ if (need_in_frameinfo_memory) {
+ err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame, IA_CSS_FRAME_FORMAT_RAW);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ in_frame = &me->in_frame;
+ } else {
+ in_frame = NULL;
+ }
+
+ err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ out_frame = &me->out_frame[0];
+
+ /* Construct vf_frame info (only in case we have VF) */
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
+ if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ /* These modes don't support viewfinder output */
+ vf_frame = NULL;
+ } else {
+ init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0);
+ vf_frame = &me->vf_frame[0];
+ }
+ } else {
+ vf_frame = NULL;
+ }
+
+ copy_binary = &pipe->pipe_settings.capture.copy_binary;
+ num_primary_stage = pipe->pipe_settings.capture.num_primary_stage;
+ if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ for (i = 0; i < num_primary_stage; i++) {
+ primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i];
+ }
+ vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary;
+ pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary;
+ anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary;
+ post_isp_binary = &pipe->pipe_settings.capture.post_isp_binary;
+ capture_pp_binary = &pipe->pipe_settings.capture.capture_pp_binary;
+ yuv_scaler_binary = pipe->pipe_settings.capture.yuv_scaler_binary;
+ num_yuv_scaler = pipe->pipe_settings.capture.num_yuv_scaler;
+ is_output_stage = pipe->pipe_settings.capture.is_output_stage;
+ capture_ldc_binary = &pipe->pipe_settings.capture.capture_ldc_binary;
+
+ need_pp = (need_capture_pp(pipe) || pipe->output_stage) &&
+ mode != IA_CSS_CAPTURE_MODE_RAW &&
+ mode != IA_CSS_CAPTURE_MODE_BAYER;
+ need_yuv_pp = (yuv_scaler_binary != NULL && yuv_scaler_binary->info != NULL);
+ need_ldc = (capture_ldc_binary != NULL && capture_ldc_binary->info != NULL);
+
+ if (pipe->pipe_settings.capture.copy_binary.info) {
+ if (raw) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (!continuous) {
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, in_frame, NULL);
+ } else {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, in_frame, NULL);
+ }
+#else
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+#endif
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, in_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
+ out_frames, NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (pipe->stream->config.continuous) {
+ in_frame = pipe->stream->last_pipe->continuous_frames[0];
+ }
+
+ if (mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int frm;
+ struct ia_css_frame *local_in_frame = NULL;
+ struct ia_css_frame *local_out_frame = NULL;
+
+ for (i = 0; i < num_primary_stage; i++) {
+ if (i == 0)
+ local_in_frame = in_frame;
+ else
+ local_in_frame = NULL;
+#ifndef ISP2401
+ if (!need_pp && (i == num_primary_stage - 1))
+#else
+ if (!need_pp && (i == num_primary_stage - 1) && !need_ldc)
+#endif
+ local_out_frame = out_frame;
+ else
+ local_out_frame = NULL;
+ ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame);
+/*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The
+ * vf-pp stage has been removed from Skycam in the solution
+ * provided. The vf-pp stage should be re-introduced when
+ * required. This * should not be considered as a clean solution.
+ * Proper investigation should be done to come up with the clean
+ * solution.
+ * */
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, primary_binary[i],
+ out_frames, local_in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ (void)frm;
+ /* If we use copy iso primary,
+ the input must be yuv iso raw */
+ current_stage->args.copy_vf =
+ primary_binary[0]->info->sp.pipeline.mode ==
+ IA_CSS_BINARY_MODE_COPY;
+ current_stage->args.copy_output = current_stage->args.copy_vf;
+ } else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary,
+ out_frames, NULL, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if(need_pp) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
+ out_frames, NULL, NULL);
+ } else {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, post_isp_binary,
+ out_frames, NULL, NULL);
+ }
+
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc, &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ } else if (mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#ifndef ISP2401
+ if (need_pp && current_stage) {
+ struct ia_css_frame *local_in_frame = NULL;
+ local_in_frame = current_stage->args.out_frame[0];
+
+ if(need_ldc) {
+ ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
+ out_frames, local_in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ &current_stage);
+ local_in_frame = current_stage->args.out_frame[0];
+ }
+ err = add_capture_pp_stage(pipe, me, local_in_frame, need_yuv_pp ? NULL : out_frame,
+#else
+ /* ldc and capture_pp not supported in same pipeline */
+ if (need_ldc && current_stage) {
+ in_frame = current_stage->args.out_frame[0];
+ ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
+ ia_css_pipe_get_generic_stage_desc(&stage_desc, capture_ldc_binary,
+ out_frames, in_frame, NULL);
+ err = ia_css_pipeline_create_and_add_stage(me,
+ &stage_desc,
+ NULL);
+ } else if (need_pp && current_stage) {
+ in_frame = current_stage->args.out_frame[0];
+ err = add_capture_pp_stage(pipe, me, in_frame, need_yuv_pp ? NULL : out_frame,
+#endif
+ capture_pp_binary,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ if (need_yuv_pp && current_stage) {
+ struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0];
+ struct ia_css_frame *tmp_out_frame = NULL;
+
+ for (i = 0; i < num_yuv_scaler; i++) {
+ if (is_output_stage[i] == true)
+ tmp_out_frame = out_frame;
+ else
+ tmp_out_frame = NULL;
+
+ err = add_yuv_scaler_stage(pipe, me, tmp_in_frame, tmp_out_frame,
+ NULL,
+ &yuv_scaler_binary[i],
+ &yuv_scaler_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* we use output port 1 as internal output port */
+ tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
+ }
+ }
+
+/*
+ * WARNING: The #if def flag has been added below as a
+ * temporary solution to solve the problem of enabling the
+ * view finder in a single binary in a capture flow. The vf-pp
+ * stage has been removed from Skycam in the solution provided.
+ * The vf-pp stage should be re-introduced when required. This
+ * should not be considered as a clean solution. Proper
+ * investigation should be done to come up with the clean solution.
+ * */
+ if (mode != IA_CSS_CAPTURE_MODE_RAW && mode != IA_CSS_CAPTURE_MODE_BAYER && current_stage && vf_frame) {
+ in_frame = current_stage->args.out_vf_frame;
+ err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
+ &current_stage);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "create_host_regular_capture_pipeline() leave:\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+create_host_capture_pipeline(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
+ err = create_host_isyscopy_capture_pipeline(pipe);
+ else
+ err = create_host_regular_capture_pipeline(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+ return err;
+}
+
+static enum ia_css_err capture_start(
+ struct ia_css_pipe *pipe)
+{
+ struct ia_css_pipeline *me;
+
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum sh_css_pipe_config_override copy_ovrd;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ me = &pipe->pipeline;
+
+ if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ) &&
+ (pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+ if (copy_on_sp(pipe)) {
+ err = start_copy_on_sp(pipe, &me->out_frame[0]);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* old isys: need to send_mipi_frames() in all pipe modes */
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+#elif defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if (pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = send_mipi_frames(pipe);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+#endif
+
+ {
+ unsigned int thread_id;
+
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ copy_ovrd = 1 << thread_id;
+
+ }
+ start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /*
+ * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
+ * which is currently done in start_binary(); but COPY pipe contains no binary,
+ * and does not call start_binary(); so we need to configure the rx here.
+ */
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY && pipe->stream->reconfigure_css_rx) {
+ ia_css_isys_rx_configure(&pipe->stream->csi_rx_config, pipe->stream->config.mode);
+ pipe->stream->reconfigure_css_rx = false;
+ }
+#endif
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+
+}
+
+static enum ia_css_err
+sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *info,
+ unsigned int idx)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(pipe != NULL);
+ assert(info != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() enter:\n");
+
+ *info = pipe->output_info[idx];
+ if (copy_on_sp(pipe) &&
+ pipe->stream->config.input_config.format == IA_CSS_STREAM_FORMAT_BINARY_8) {
+ ia_css_frame_info_init(
+ info,
+ JPEG_BYTES,
+ 1,
+ IA_CSS_FRAME_FORMAT_BINARY_8,
+ 0);
+ } else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
+ info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) {
+ info->raw_bit_depth =
+ ia_css_pipe_util_pipe_input_format_bpp(pipe);
+
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_pipe_get_output_frame_info() leave:\n");
+ return err;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+void
+ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ unsigned int height)
+{
+ assert(stream != NULL);
+
+ ia_css_inputfifo_send_input_frame(
+ data, width, height,
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_start_input_frame(const struct ia_css_stream *stream)
+{
+ assert(stream != NULL);
+
+ ia_css_inputfifo_start_frame(
+ stream->config.channel_id,
+ stream->config.input_config.format,
+ stream->config.pixels_per_clock == 2);
+}
+
+void
+ia_css_stream_send_input_line(const struct ia_css_stream *stream,
+ const unsigned short *data,
+ unsigned int width,
+ const unsigned short *data2,
+ unsigned int width2)
+{
+ assert(stream != NULL);
+
+ ia_css_inputfifo_send_line(stream->config.channel_id,
+ data, width, data2, width2);
+}
+
+void
+ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
+ enum ia_css_stream_format format,
+ const unsigned short *data,
+ unsigned int width)
+{
+ assert(stream != NULL);
+ if (data == NULL || width == 0)
+ return;
+ ia_css_inputfifo_send_embedded_line(stream->config.channel_id,
+ format, data, width);
+}
+
+void
+ia_css_stream_end_input_frame(const struct ia_css_stream *stream)
+{
+ assert(stream != NULL);
+
+ ia_css_inputfifo_end_frame(stream->config.channel_id);
+}
+#endif
+
+static void
+append_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
+{
+ IA_CSS_ENTER_PRIVATE("l = %p, firmware = %p", l , firmware);
+ if (l == NULL) {
+ IA_CSS_ERROR("NULL fw_info");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+ while (*l)
+ l = &(*l)->next;
+ *l = firmware;
+ /*firmware->next = NULL;*/ /* when multiple acc extensions are loaded, 'next' can be not NULL */
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+static void
+remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
+{
+ assert(*l);
+ assert(firmware);
+ (void)l;
+ (void)firmware;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() enter:\n");
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "remove_firmware() leave:\n");
+ return; /* removing single and multiple firmware is handled in acc_unload_extension() */
+}
+
+#if !defined(HRT_UNSCHED)
+static enum ia_css_err
+upload_isp_code(struct ia_css_fw_info *firmware)
+{
+ hrt_vaddress binary;
+
+ if (firmware == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ binary = firmware->info.isp.xmem_addr;
+
+ if (!binary) {
+ unsigned size = firmware->blob.size;
+ const unsigned char *blob;
+ const unsigned char *binary_name;
+ binary_name =
+ (const unsigned char *)(IA_CSS_EXT_ISP_PROG_NAME(
+ firmware));
+ blob = binary_name +
+ strlen((const char *)binary_name) +
+ 1;
+ binary = sh_css_load_blob(blob, size);
+ firmware->info.isp.xmem_addr = binary;
+ }
+
+ if (!binary)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ return IA_CSS_SUCCESS;
+}
+#endif
+
+static enum ia_css_err
+acc_load_extension(struct ia_css_fw_info *firmware)
+{
+#if !defined(HRT_UNSCHED)
+ enum ia_css_err err;
+ struct ia_css_fw_info *hd = firmware;
+ while (hd){
+ err = upload_isp_code(hd);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ hd = hd->next;
+ }
+#endif
+
+ if (firmware == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ firmware->loaded = true;
+ return IA_CSS_SUCCESS;
+}
+
+static void
+acc_unload_extension(struct ia_css_fw_info *firmware)
+{
+ struct ia_css_fw_info *hd = firmware;
+ struct ia_css_fw_info *hdn = NULL;
+
+ if (firmware == NULL) /* should not happen */
+ return;
+ /* unload and remove multiple firmwares */
+ while (hd){
+ hdn = (hd->next) ? &(*hd->next) : NULL;
+ if (hd->info.isp.xmem_addr) {
+ hmm_free(hd->info.isp.xmem_addr);
+ hd->info.isp.xmem_addr = mmgr_NULL;
+ }
+ hd->isp_code = NULL;
+ hd->next = NULL;
+ hd = hdn;
+ }
+
+ firmware->loaded = false;
+}
+/* Load firmware for extension */
+static enum ia_css_err
+ia_css_pipe_load_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
+
+ if ((firmware == NULL) || (pipe == NULL)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT) {
+ if (&pipe->output_stage != NULL)
+ append_firmware(&pipe->output_stage, firmware);
+ else {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+ else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER) {
+ if (&pipe->vf_stage != NULL)
+ append_firmware(&pipe->vf_stage, firmware);
+ else {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ }
+ err = acc_load_extension(firmware);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Unload firmware for extension */
+static void
+ia_css_pipe_unload_extension(struct ia_css_pipe *pipe,
+ struct ia_css_fw_info *firmware)
+{
+ IA_CSS_ENTER_PRIVATE("fw = %p pipe = %p", firmware, pipe);
+
+ if ((firmware == NULL) || (pipe == NULL)) {
+ IA_CSS_ERROR("NULL input parameters");
+ IA_CSS_LEAVE_PRIVATE("");
+ return;
+ }
+
+ if (firmware->info.isp.type == IA_CSS_ACC_OUTPUT)
+ remove_firmware(&pipe->output_stage, firmware);
+ else if (firmware->info.isp.type == IA_CSS_ACC_VIEWFINDER)
+ remove_firmware(&pipe->vf_stage, firmware);
+ acc_unload_extension(firmware);
+
+ IA_CSS_LEAVE_PRIVATE("");
+}
+
+bool
+ia_css_pipeline_uses_params(struct ia_css_pipeline *me)
+{
+ struct ia_css_pipeline_stage *stage;
+
+ assert(me != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() enter: me=%p\n", me);
+
+ for (stage = me->stages; stage; stage = stage->next)
+ if (stage->binary_info && stage->binary_info->enable.params) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: "
+ "return_bool=true\n");
+ return true;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipeline_uses_params() leave: return_bool=false\n");
+ return false;
+}
+
+static enum ia_css_err
+sh_css_pipeline_add_acc_stage(struct ia_css_pipeline *pipeline,
+ const void *acc_fw)
+{
+ struct ia_css_fw_info *fw = (struct ia_css_fw_info *)acc_fw;
+ /* In QoS case, load_extension already called, so skipping */
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ if (fw->loaded == false)
+ err = acc_load_extension(fw);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_pipeline_add_acc_stage() enter: pipeline=%p,"
+ " acc_fw=%p\n", pipeline, acc_fw);
+
+ if (err == IA_CSS_SUCCESS) {
+ struct ia_css_pipeline_stage_desc stage_desc;
+ ia_css_pipe_get_acc_stage_desc(&stage_desc, NULL, fw);
+ err = ia_css_pipeline_create_and_add_stage(pipeline,
+ &stage_desc,
+ NULL);
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "sh_css_pipeline_add_acc_stage() leave: return_err=%d\n",err);
+ return err;
+}
+
+/**
+ * @brief Tag a specific frame in continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+enum ia_css_err ia_css_stream_capture_frame(struct ia_css_stream *stream,
+ unsigned int exp_id)
+{
+ struct sh_css_tag_descr tag_descr;
+ uint32_t encoded_tag_descr;
+ enum ia_css_err err;
+
+ assert(stream != NULL);
+ IA_CSS_ENTER("exp_id=%d", exp_id);
+
+ /* Only continuous streams have a tagger */
+ if (exp_id == 0 || !stream->config.continuous) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr);
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+ /* Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue */
+ err= ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/**
+ * @brief Configure the continuous capture.
+ * Refer to "sh_css_internal.h" for details.
+ */
+enum ia_css_err ia_css_stream_capture(
+ struct ia_css_stream *stream,
+ int num_captures,
+ unsigned int skip,
+ int offset)
+{
+ struct sh_css_tag_descr tag_descr;
+ unsigned int encoded_tag_descr;
+ enum ia_css_err return_err;
+
+ if (stream == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() enter: num_captures=%d,"
+ " skip=%d, offset=%d\n", num_captures, skip,offset);
+
+ /* Check if the tag descriptor is valid */
+ if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Create the tag descriptor from the parameters */
+ sh_css_create_tag_descr(num_captures, skip, offset, 0, &tag_descr);
+
+
+ /* Encode the tag descriptor into a 32-bit value */
+ encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
+
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leaving:"
+ "queues unavailable\n");
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* Enqueue the encoded tag to the host2sp queue.
+ * Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
+ * on both host and the SP side.
+ * It is mainly because it is enough to have only one tag_cmd queue */
+ return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ return_err);
+
+ return return_err;
+}
+
+void ia_css_stream_request_flash(struct ia_css_stream *stream)
+{
+ (void)stream;
+
+ assert(stream != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_request_flash() enter: void\n");
+
+#ifndef ISP2401
+ sh_css_write_host2sp_command(host2sp_cmd_start_flash);
+#else
+ if (sh_css_sp_is_running()) {
+ if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ ia_css_debug_dump_debug_info(NULL);
+ }
+ } else
+ IA_CSS_LOG("SP is not running!");
+
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_request_flash() leave: return_void\n");
+}
+
+static void
+sh_css_init_host_sp_control_vars(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started;
+
+ unsigned int HIVE_ADDR_host_sp_queues_initialized;
+ unsigned int HIVE_ADDR_sp_sleep_mode;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+#ifndef ISP2401
+ unsigned int HIVE_ADDR_sp_stop_copy_preview;
+#endif
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int i;
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() enter: void\n");
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+
+ HIVE_ADDR_host_sp_queues_initialized =
+ fw->info.sp.host_sp_queues_initialized;
+ HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+#ifndef ISP2401
+ HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview;
+#endif
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+
+ (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
+
+ (void)HIVE_ADDR_sp_sleep_mode;
+ (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+#ifndef ISP2401
+ (void)HIVE_ADDR_sp_stop_copy_preview;
+#endif
+ (void)HIVE_ADDR_host_sp_com;
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
+ (uint32_t)(0));
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_queues_initialized),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sleep_mode),
+ (uint32_t)(0));
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ (uint32_t)(false));
+#ifndef ISP2401
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_stop_copy_preview),
+ my_css.stop_copy_preview?(uint32_t)(1):(uint32_t)(0));
+#endif
+ store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ for (i = 0; i < N_CSI_PORTS; i++) {
+ sh_css_update_host2sp_num_mipi_frames
+ (my_css.num_mipi_frames[i]);
+ }
+#endif
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_init_host_sp_control_vars() leave: return_void\n");
+}
+
+/**
+ * create the internal structures and fill in the configuration data
+ */
+void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
+{
+ struct ia_css_pipe_config def_config = DEFAULT_PIPE_CONFIG;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_config_defaults()\n");
+ *pipe_config = def_config;
+}
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config)
+{
+ if (extra_config == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ return;
+ }
+
+ extra_config->enable_raw_binning = false;
+ extra_config->enable_yuv_ds = false;
+ extra_config->enable_high_speed = false;
+ extra_config->enable_dvs_6axis = false;
+ extra_config->enable_reduced_pipe = false;
+ extra_config->disable_vf_pp = false;
+ extra_config->enable_fractional_ds = false;
+}
+
+void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_config_defaults()\n");
+ assert(stream_config != NULL);
+ memset(stream_config, 0, sizeof(*stream_config));
+ stream_config->online = true;
+ stream_config->left_padding = -1;
+ stream_config->pixels_per_clock = 1;
+ /* temporary default value for backwards compatibility.
+ * This field used to be hardcoded within CSS but this has now
+ * been moved to the stream_config struct. */
+ stream_config->source.port.rxcount = 0x04040404;
+}
+
+static enum ia_css_err
+ia_css_acc_pipe_create(struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if (pipe == NULL) {
+ IA_CSS_ERROR("NULL input parameter");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* There is not meaning for num_execs = 0 semantically. Run atleast once. */
+ if (pipe->config.acc_num_execs == 0)
+ pipe->config.acc_num_execs = 1;
+
+ if (pipe->config.acc_extension) {
+ err = ia_css_pipe_load_extension(pipe, pipe->config.acc_extension);
+ }
+
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_create(const struct ia_css_pipe_config *config,
+ struct ia_css_pipe **pipe)
+{
+#ifndef ISP2401
+ if (config == NULL)
+#else
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe);
+
+ if (config == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+#endif
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+#ifndef ISP2401
+ if (pipe == NULL)
+#else
+ }
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+#endif
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+#ifndef ISP2401
+ return ia_css_pipe_create_extra(config, NULL, pipe);
+#else
+ }
+
+ err = ia_css_pipe_create_extra(config, NULL, pipe);
+
+ if(err == IA_CSS_SUCCESS) {
+ IA_CSS_LOG("pipe created successfuly = %p", *pipe);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+
+ return err;
+#endif
+}
+
+enum ia_css_err
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe)
+{
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ struct ia_css_pipe *internal_pipe = NULL;
+ unsigned int i;
+
+ IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe);
+
+ /* do not allow to create more than the maximum limit */
+ if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_EXHAUSTED);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((pipe == NULL) || (config == NULL)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ia_css_debug_dump_pipe_config(config);
+ ia_css_debug_dump_pipe_extra_config(extra_config);
+
+ err = create_pipe(config->mode, &internal_pipe, false);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* now we have a pipe structure to fill */
+ internal_pipe->config = *config;
+ if (extra_config)
+ internal_pipe->extra_config = *extra_config;
+ else
+ ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
+
+ if (config->mode == IA_CSS_PIPE_MODE_ACC) {
+ /* Temporary hack to migrate acceleration to CSS 2.0.
+ * In the future the code for all pipe types should be
+ * unified. */
+ *pipe = internal_pipe;
+ if (!internal_pipe->config.acc_extension &&
+ internal_pipe->config.num_acc_stages == 0){ /* if no acc binary and no standalone stage */
+ *pipe = NULL;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+ }
+ return ia_css_acc_pipe_create(internal_pipe);
+ }
+
+ /* Use config value when dvs_frame_delay setting equal to 2, otherwise always 1 by default */
+ if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2)
+ internal_pipe->dvs_frame_delay = 2;
+ else
+ internal_pipe->dvs_frame_delay = 1;
+
+
+ /* we still keep enable_raw_binning for backward compatibility, for any new
+ fractional bayer downscaling, we should use bayer_ds_out_res. if both are
+ specified, bayer_ds_out_res will take precedence.if none is specified, we
+ set bayer_ds_out_res equal to IF output resolution(IF may do cropping on
+ sensor output) or use default decimation factor 1. */
+ if (internal_pipe->extra_config.enable_raw_binning &&
+ internal_pipe->config.bayer_ds_out_res.width) {
+ /* fill some code here, if no code is needed, please remove it during integration */
+ }
+
+ /* YUV downscaling */
+ if ((internal_pipe->config.vf_pp_in_res.width ||
+ internal_pipe->config.capt_pp_in_res.width)) {
+ enum ia_css_frame_format format;
+ if (internal_pipe->config.vf_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV_LINE;
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ format, 0);
+ }
+ if (internal_pipe->config.capt_pp_in_res.width) {
+ format = IA_CSS_FRAME_FORMAT_YUV420;
+ ia_css_frame_info_init(
+ &internal_pipe->out_yuv_ds_input_info,
+ internal_pipe->config.capt_pp_in_res.width,
+ internal_pipe->config.capt_pp_in_res.height,
+ format, 0);
+ }
+ }
+ if (internal_pipe->config.vf_pp_in_res.width &&
+ internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
+ ia_css_frame_info_init(
+ &internal_pipe->vf_yuv_ds_input_info,
+ internal_pipe->config.vf_pp_in_res.width,
+ internal_pipe->config.vf_pp_in_res.height,
+ IA_CSS_FRAME_FORMAT_YUV_LINE, 0);
+ }
+ /* handle bayer downscaling output info */
+ if (internal_pipe->config.bayer_ds_out_res.width) {
+ ia_css_frame_info_init(
+ &internal_pipe->bds_output_info,
+ internal_pipe->config.bayer_ds_out_res.width,
+ internal_pipe->config.bayer_ds_out_res.height,
+ IA_CSS_FRAME_FORMAT_RAW, 0);
+ }
+
+ /* handle output info, assume always needed */
+ for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
+ if (internal_pipe->config.output_info[i].res.width) {
+ err = sh_css_pipe_configure_output(
+ internal_pipe,
+ internal_pipe->config.output_info[i].res.width,
+ internal_pipe->config.output_info[i].res.height,
+ internal_pipe->config.output_info[i].padded_width,
+ internal_pipe->config.output_info[i].format,
+ i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+
+ /* handle vf output info, when configured */
+ internal_pipe->enable_viewfinder[i] = (internal_pipe->config.vf_output_info[i].res.width != 0);
+ if (internal_pipe->config.vf_output_info[i].res.width) {
+ err = sh_css_pipe_configure_viewfinder(
+ internal_pipe,
+ internal_pipe->config.vf_output_info[i].res.width,
+ internal_pipe->config.vf_output_info[i].res.height,
+ internal_pipe->config.vf_output_info[i].padded_width,
+ internal_pipe->config.vf_output_info[i].format,
+ i);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ internal_pipe = NULL;
+ return err;
+ }
+ }
+ }
+ if (internal_pipe->config.acc_extension) {
+ err = ia_css_pipe_load_extension(internal_pipe,
+ internal_pipe->config.acc_extension);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ sh_css_free(internal_pipe);
+ return err;
+ }
+ }
+ /* set all info to zeroes first */
+ memset(&internal_pipe->info, 0, sizeof(internal_pipe->info));
+
+ /* all went well, return the pipe */
+ *pipe = internal_pipe;
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+
+enum ia_css_err
+ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
+ struct ia_css_pipe_info *pipe_info)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_pipe_get_info()\n");
+ assert(pipe_info != NULL);
+ if (pipe_info == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: pipe_info cannot be NULL\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ if (pipe == NULL || pipe->stream == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "ia_css_pipe_get_info: ia_css_stream_create needs to"
+ " be called before ia_css_[stream/pipe]_get_info\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* we succeeded return the info */
+ *pipe_info = pipe->info;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_get_info() leave\n");
+ return IA_CSS_SUCCESS;
+}
+
+bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info)
+{
+ unsigned int i;
+
+ if (pipe_info != NULL) {
+ for (i = 0; i < IA_CSS_DVS_STAT_NUM_OF_LEVELS; i++) {
+ if (pipe_info->grid_info.dvs_grid.dvs_stat_grid_info.grd_cfg[i].grd_start.enable)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#ifdef ISP2401
+enum ia_css_err
+ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
+ int pin_index,
+ enum ia_css_frame_format new_format)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format);
+
+ if (NULL == pipe) {
+ IA_CSS_ERROR("pipe is not set");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (0 != pin_index && 1 != pin_index) {
+ IA_CSS_ERROR("pin index is not valid");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (IA_CSS_FRAME_FORMAT_NV12_TILEY != new_format) {
+ IA_CSS_ERROR("new format is not valid");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else {
+ err = ia_css_pipe_check_format(pipe, new_format);
+ if (IA_CSS_SUCCESS == err) {
+ if (pin_index == 0) {
+ pipe->output_info[0].format = new_format;
+ } else {
+ pipe->vf_output_info[0].format = new_format;
+ }
+ }
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#endif
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
+static enum ia_css_err
+ia_css_stream_configure_rx(struct ia_css_stream *stream)
+{
+ struct ia_css_input_port *config;
+ assert(stream != NULL);
+
+ config = &stream->config.source.port;
+/* AM: this code is not reliable, especially for 2400 */
+ if (config->num_lanes == 1)
+ stream->csi_rx_config.mode = MONO_1L_1L_0L;
+ else if (config->num_lanes == 2)
+ stream->csi_rx_config.mode = MONO_2L_1L_0L;
+ else if (config->num_lanes == 3)
+ stream->csi_rx_config.mode = MONO_3L_1L_0L;
+ else if (config->num_lanes == 4)
+ stream->csi_rx_config.mode = MONO_4L_1L_0L;
+ else if (config->num_lanes != 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (config->port > IA_CSS_CSI2_PORT2)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ stream->csi_rx_config.port =
+ ia_css_isys_port_to_mipi_port(config->port);
+ stream->csi_rx_config.timeout = config->timeout;
+ stream->csi_rx_config.initcount = 0;
+ stream->csi_rx_config.synccount = 0x28282828;
+ stream->csi_rx_config.rxcount = config->rxcount;
+ if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
+ stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
+ else {
+ /* not implemented yet, requires extension of the rx_cfg_t
+ * struct */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
+ stream->reconfigure_css_rx = true;
+ return IA_CSS_SUCCESS;
+}
+#endif
+
+static struct ia_css_pipe *
+find_pipe(struct ia_css_pipe *pipes[],
+ unsigned int num_pipes,
+ enum ia_css_pipe_mode mode,
+ bool copy_pipe)
+{
+ unsigned i;
+ assert(pipes != NULL);
+ for (i = 0; i < num_pipes; i++) {
+ assert(pipes[i] != NULL);
+ if (pipes[i]->config.mode != mode)
+ continue;
+ if (copy_pipe && pipes[i]->mode != IA_CSS_PIPE_ID_COPY)
+ continue;
+ return pipes[i];
+ }
+ return NULL;
+}
+
+static enum ia_css_err
+ia_css_acc_stream_create(struct ia_css_stream *stream)
+{
+ int i;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stream != NULL);
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ assert(pipe != NULL);
+ if (pipe == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe->stream = stream;
+ }
+
+ /* Map SP threads before doing anything. */
+ err = map_sp_threads(stream, true);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ assert(pipe != NULL);
+ ia_css_pipe_map_queue(pipe, true);
+ }
+
+ err = create_host_pipeline_structure(stream);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ stream->started = false;
+
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+metadata_info_init(const struct ia_css_metadata_config *mdc,
+ struct ia_css_metadata_info *md)
+{
+ /* Either both width and height should be set or neither */
+ if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ md->resolution = mdc->resolution;
+ /* We round up the stride to a multiple of the width
+ * of the port going to DDR, this is a HW requirements (DMA). */
+ md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES);
+ md->size = mdc->resolution.height * md->stride;
+ return IA_CSS_SUCCESS;
+}
+
+#ifdef ISP2401
+static enum ia_css_err check_pipe_resolutions(const struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (!pipe || !pipe->stream) {
+ IA_CSS_ERROR("null arguments");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto EXIT;
+ }
+
+ if (ia_css_util_check_res(pipe->config.input_effective_res.width,
+ pipe->config.input_effective_res.height) != IA_CSS_SUCCESS) {
+ IA_CSS_ERROR("effective resolution not supported");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ if (!ia_css_util_resolution_is_zero(pipe->stream->config.input_config.input_res)) {
+ if (!ia_css_util_res_leq(pipe->config.input_effective_res,
+ pipe->stream->config.input_config.input_res)) {
+ IA_CSS_ERROR("effective resolution is larger than input resolution");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ }
+ if (!ia_css_util_resolution_is_even(pipe->config.output_info[0].res)) {
+ IA_CSS_ERROR("output resolution must be even");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+ if (!ia_css_util_resolution_is_even(pipe->config.vf_output_info[0].res)) {
+ IA_CSS_ERROR("VF resolution must be even");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto EXIT;
+ }
+EXIT:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#endif
+
+enum ia_css_err
+ia_css_stream_create(const struct ia_css_stream_config *stream_config,
+ int num_pipes,
+ struct ia_css_pipe *pipes[],
+ struct ia_css_stream **stream)
+{
+ struct ia_css_pipe *curr_pipe;
+ struct ia_css_stream *curr_stream = NULL;
+ bool spcopyonly;
+ bool sensor_binning_changed;
+ int i, j;
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ struct ia_css_metadata_info md_info;
+#ifndef ISP2401
+ struct ia_css_resolution effective_res;
+#else
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ bool aspect_ratio_crop_enabled = false;
+#endif
+#endif
+
+ IA_CSS_ENTER("num_pipes=%d", num_pipes);
+ ia_css_debug_dump_stream_config(stream_config, num_pipes);
+
+ /* some checks */
+ if (num_pipes == 0 ||
+ stream == NULL ||
+ pipes == NULL) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* We don't support metadata for JPEG stream, since they both use str2mem */
+ if (stream_config->input_config.format == IA_CSS_STREAM_FORMAT_BINARY_8 &&
+ stream_config->metadata_config.resolution.height > 0) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (stream_config->online && stream_config->pack_raw_pixels) {
+ IA_CSS_LOG("online and pack raw is invalid on input system 2401");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ ia_css_debug_pipe_graph_dump_stream_config(stream_config);
+
+ /* check if mipi size specified */
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (!stream_config->online)
+#endif
+ {
+ unsigned int port = (unsigned int) stream_config->source.port.port;
+ if (port >= N_MIPI_PORT_ID) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0){
+ my_css.mipi_frame_size[port] = my_css.size_mem_words;
+ } else if (stream_config->mipi_buffer_config.size_mem_words != 0) {
+ my_css.mipi_frame_size[port] = stream_config->mipi_buffer_config.size_mem_words;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set mipi frame size.\n");
+ assert(stream_config->mipi_buffer_config.size_mem_words != 0);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if (my_css.size_mem_words != 0) {
+ my_css.num_mipi_frames[port] = 2; /* Temp change: Default for backwards compatibility. */
+ } else if (stream_config->mipi_buffer_config.nof_mipi_buffers != 0) {
+ my_css.num_mipi_frames[port] = stream_config->mipi_buffer_config.nof_mipi_buffers;
+ } else {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_create() exit: error, need to set number of mipi frames.\n");
+ assert(stream_config->mipi_buffer_config.nof_mipi_buffers != 0);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ }
+#endif
+
+ /* Currently we only supported metadata up to a certain size. */
+ err = metadata_info_init(&stream_config->metadata_config, &md_info);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ /* allocate the stream instance */
+ curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
+ if (curr_stream == NULL) {
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* default all to 0 */
+ memset(curr_stream, 0, sizeof(struct ia_css_stream));
+ curr_stream->info.metadata_info = md_info;
+
+ /* allocate pipes */
+ curr_stream->num_pipes = num_pipes;
+ curr_stream->pipes = kzalloc(num_pipes * sizeof(struct ia_css_pipe *), GFP_KERNEL);
+ if (curr_stream->pipes == NULL) {
+ curr_stream->num_pipes = 0;
+ kfree(curr_stream);
+ curr_stream = NULL;
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+ /* store pipes */
+ spcopyonly = (num_pipes == 1) && (pipes[0]->config.mode == IA_CSS_PIPE_MODE_COPY);
+ for (i = 0; i < num_pipes; i++)
+ curr_stream->pipes [i] = pipes[i];
+ curr_stream->last_pipe = curr_stream->pipes[0];
+ /* take over stream config */
+ curr_stream->config = *stream_config;
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401) && defined(CSI2P_DISABLE_ISYS2401_ONLINE_MODE)
+ if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
+ stream_config->online)
+ curr_stream->config.online = false;
+#endif
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (curr_stream->config.online) {
+ curr_stream->config.source.port.num_lanes = stream_config->source.port.num_lanes;
+ curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
+ }
+#endif
+ /* in case driver doesn't configure init number of raw buffers, configure it here */
+ if (curr_stream->config.target_num_cont_raw_buf == 0)
+ curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES;
+ if (curr_stream->config.init_num_cont_raw_buf == 0)
+ curr_stream->config.init_num_cont_raw_buf = curr_stream->config.target_num_cont_raw_buf;
+
+ /* Enable locking & unlocking of buffers in RAW buffer pool */
+ if (curr_stream->config.ia_css_enable_raw_buffer_locking)
+ sh_css_sp_configure_enable_raw_pool_locking(
+ curr_stream->config.lock_all);
+
+ /* copy mode specific stuff */
+ switch (curr_stream->config.mode) {
+ case IA_CSS_INPUT_MODE_SENSOR:
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ ia_css_stream_configure_rx(curr_stream);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+
+ sh_css_sp_configure_tpg(
+ curr_stream->config.source.tpg.x_mask,
+ curr_stream->config.source.tpg.y_mask,
+ curr_stream->config.source.tpg.x_delta,
+ curr_stream->config.source.tpg.y_delta,
+ curr_stream->config.source.tpg.xy_mask);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ IA_CSS_LOG("mode prbs");
+ sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
+#endif
+ break;
+ case IA_CSS_INPUT_MODE_MEMORY:
+ IA_CSS_LOG("mode memory");
+ curr_stream->reconfigure_css_rx = false;
+ break;
+ default:
+ IA_CSS_LOG("mode sensor/default");
+ }
+
+#ifdef ISP2401
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ err = aspect_ratio_crop_init(curr_stream,
+ pipes,
+ &aspect_ratio_crop_enabled);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+#endif
+
+#endif
+ for (i = 0; i < num_pipes; i++) {
+#ifdef ISP2401
+ struct ia_css_resolution effective_res;
+#endif
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+ if (effective_res.height == 0 || effective_res.width == 0) {
+ effective_res = curr_pipe->stream->config.input_config.effective_res;
+#ifdef ISP2401
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* The aspect ratio cropping is currently only
+ * supported on the new input system. */
+ if (aspect_ratio_crop_check(aspect_ratio_crop_enabled, curr_pipe)) {
+
+ struct ia_css_resolution crop_res;
+
+ err = aspect_ratio_crop(curr_pipe, &crop_res);
+ if (err == IA_CSS_SUCCESS) {
+ effective_res = crop_res;
+ } else {
+ /* in case of error fallback to default
+ * effective resolution from driver. */
+ IA_CSS_LOG("aspect_ratio_crop() failed with err(%d)", err);
+ }
+ }
+#endif
+#endif
+ curr_pipe->config.input_effective_res = effective_res;
+ }
+ IA_CSS_LOG("effective_res=%dx%d",
+ effective_res.width,
+ effective_res.height);
+ }
+
+#ifdef ISP2401
+ for (i = 0; i < num_pipes; i++) {
+ if (pipes[i]->config.mode != IA_CSS_PIPE_MODE_ACC &&
+ pipes[i]->config.mode != IA_CSS_PIPE_MODE_COPY) {
+ err = check_pipe_resolutions(pipes[i]);
+ if (err != IA_CSS_SUCCESS) {
+ goto ERR;
+ }
+ }
+ }
+
+#endif
+ err = ia_css_stream_isp_parameters_init(curr_stream);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs);
+
+ if (num_pipes == 1 && pipes[0]->config.mode == IA_CSS_PIPE_MODE_ACC) {
+ *stream = curr_stream;
+ err = ia_css_acc_stream_create(curr_stream);
+ goto ERR;
+ }
+ /* sensor binning */
+ if (!spcopyonly){
+ sensor_binning_changed =
+ sh_css_params_set_binning_factor(curr_stream, curr_stream->config.sensor_binning_factor);
+ } else {
+ sensor_binning_changed = false;
+ }
+
+ IA_CSS_LOG("sensor_binning=%d, changed=%d",
+ curr_stream->config.sensor_binning_factor, sensor_binning_changed);
+ /* loop over pipes */
+ IA_CSS_LOG("num_pipes=%d", num_pipes);
+ curr_stream->cont_capt = false;
+ /* Temporary hack: we give the preview pipe a reference to the capture
+ * pipe in continuous capture mode. */
+ if (curr_stream->config.continuous) {
+ /* Search for the preview pipe and create the copy pipe */
+ struct ia_css_pipe *preview_pipe;
+ struct ia_css_pipe *video_pipe;
+ struct ia_css_pipe *acc_pipe;
+ struct ia_css_pipe *capture_pipe = NULL;
+ struct ia_css_pipe *copy_pipe = NULL;
+
+ if (num_pipes >= 2) {
+ curr_stream->cont_capt = true;
+ curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder;
+#ifndef ISP2401
+ curr_stream->stop_copy_preview = my_css.stop_copy_preview;
+#endif
+ }
+
+ /* Create copy pipe here, since it may not be exposed to the driver */
+ preview_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_PREVIEW, false);
+ video_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_VIDEO, false);
+ acc_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_ACC, false);
+ if (acc_pipe && num_pipes == 2 && curr_stream->cont_capt == true)
+ curr_stream->cont_capt = false; /* preview + QoS case will not need cont_capt switch */
+ if (curr_stream->cont_capt == true) {
+ capture_pipe = find_pipe(pipes, num_pipes,
+ IA_CSS_PIPE_MODE_CAPTURE, false);
+ if (capture_pipe == NULL) {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto ERR;
+ }
+ }
+ /* We do not support preview and video pipe at the same time */
+ if (preview_pipe && video_pipe) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto ERR;
+ }
+
+ if (preview_pipe && !preview_pipe->pipe_settings.preview.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (preview_pipe && (curr_stream->cont_capt == true)) {
+ preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
+ }
+ if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
+ err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, &copy_pipe, true);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ ia_css_pipe_config_defaults(&copy_pipe->config);
+ video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
+ copy_pipe->stream = curr_stream;
+ }
+ if (video_pipe && (curr_stream->cont_capt == true)) {
+ video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
+ }
+ if (preview_pipe && acc_pipe) {
+ preview_pipe->pipe_settings.preview.acc_pipe = acc_pipe;
+ }
+ }
+ for (i = 0; i < num_pipes; i++) {
+ curr_pipe = pipes[i];
+ /* set current stream */
+ curr_pipe->stream = curr_stream;
+#ifndef ISP2401
+ /* take over effective info */
+
+ effective_res = curr_pipe->config.input_effective_res;
+#endif
+
+#ifndef ISP2401
+ err = ia_css_util_check_res(
+ effective_res.width,
+ effective_res.height);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+#endif
+ /* sensor binning per pipe */
+ if (sensor_binning_changed)
+ sh_css_pipe_free_shading_table(curr_pipe);
+ }
+
+ /* now pipes have been configured, info should be available */
+ for (i = 0; i < num_pipes; i++) {
+ struct ia_css_pipe_info *pipe_info = NULL;
+ curr_pipe = pipes[i];
+
+ err = sh_css_pipe_load_binaries(curr_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+
+#if defined(HAS_RES_MGR)
+ /* update acc configuration - striping info is ready */
+ err = ia_css_update_cfg_stripe_info(curr_pipe);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+#endif
+
+ /* handle each pipe */
+ pipe_info = &curr_pipe->info;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ err = sh_css_pipe_get_output_frame_info(curr_pipe,
+ &pipe_info->output_info[j], j);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+#ifdef ISP2401
+ pipe_info->output_system_in_res_info = curr_pipe->config.output_system_in_res;
+#endif
+ if (!spcopyonly){
+ err = sh_css_pipe_get_shading_info(curr_pipe,
+#ifndef ISP2401
+ &pipe_info->shading_info);
+#else
+ &pipe_info->shading_info, &curr_pipe->config);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ err = sh_css_pipe_get_grid_info(curr_pipe,
+ &pipe_info->grid_info);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
+ sh_css_pipe_get_viewfinder_frame_info(curr_pipe,
+ &pipe_info->vf_output_info[j], j);
+ if (err != IA_CSS_SUCCESS)
+ goto ERR;
+ }
+ }
+
+ my_css.active_pipes[ia_css_pipe_get_pipe_num(curr_pipe)] = curr_pipe;
+ }
+
+ curr_stream->started = false;
+
+ /* Map SP threads before doing anything. */
+ err = map_sp_threads(curr_stream, true);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LOG("map_sp_threads: return_err=%d", err);
+ goto ERR;
+ }
+
+ for (i = 0; i < num_pipes; i++) {
+ curr_pipe = pipes[i];
+ ia_css_pipe_map_queue(curr_pipe, true);
+ }
+
+ /* Create host side pipeline objects without stages */
+ err = create_host_pipeline_structure(curr_stream);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err);
+ goto ERR;
+ }
+
+ /* assign curr_stream */
+ *stream = curr_stream;
+
+ERR:
+#ifndef ISP2401
+ if (err == IA_CSS_SUCCESS)
+ {
+ /* working mode: enter into the seed list */
+ if (my_css_save.mode == sh_css_mode_working)
+ for(i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (my_css_save.stream_seeds[i].stream == NULL)
+ {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for(j = 0; j < num_pipes; j++)
+ {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
+ }
+#else
+ if (err == IA_CSS_SUCCESS) {
+ err = ia_css_save_stream(curr_stream);
+#endif
+ } else {
+ ia_css_stream_destroy(curr_stream);
+ }
+#ifndef ISP2401
+ IA_CSS_LEAVE("return_err=%d mode=%d", err, my_css_save.mode);
+#else
+ IA_CSS_LEAVE("return_err=%d", err);
+#endif
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_destroy(struct ia_css_stream *stream)
+{
+ int i;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+#ifdef ISP2401
+ enum ia_css_err err1 = IA_CSS_SUCCESS;
+ enum ia_css_err err2 = IA_CSS_SUCCESS;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("stream = %p", stream);
+ if (stream == NULL) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ ia_css_stream_isp_parameters_uninit(stream);
+
+ if ((stream->last_pipe != NULL) &&
+ ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ unsigned int sp_thread_id;
+ struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+
+ assert(entry != NULL);
+ if (entry != NULL) {
+ /* get the SP thread id */
+ if (ia_css_pipeline_get_sp_thread_id(
+ ia_css_pipe_get_pipe_num(entry), &sp_thread_id) != true)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ /* get the target input terminal */
+ sp_pipeline_input_terminal =
+ &(sh_css_sp_group.pipe_io[sp_thread_id].input);
+
+ for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+ ia_css_isys_stream_h isys_stream =
+ &(sp_pipeline_input_terminal->context.virtual_input_system_stream[i]);
+ if (stream->config.isys_config[i].valid && isys_stream->valid)
+ ia_css_isys_stream_destroy(isys_stream);
+ }
+ }
+ }
+#ifndef ISP2401
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+#else
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
+ stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ stream->config.mode == IA_CSS_INPUT_MODE_PRBS) {
+#endif
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ /* free any mipi frames that are remaining:
+ * some test stream create-destroy cycles do not generate output frames
+ * and the mipi buffer is not freed in the deque function
+ */
+ if (entry != NULL)
+ free_mipi_frames(entry);
+ }
+ }
+ stream_unregister_with_csi_rx(stream);
+#endif
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *curr_pipe = stream->pipes[i];
+ assert(curr_pipe != NULL);
+ ia_css_pipe_map_queue(curr_pipe, false);
+ }
+
+ err = map_sp_threads(stream, false);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+
+ /* remove references from pipes to stream */
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *entry = stream->pipes[i];
+ assert(entry != NULL);
+ if (entry != NULL) {
+ /* clear reference to stream */
+ entry->stream = NULL;
+ /* check internal copy pipe */
+ if (entry->mode == IA_CSS_PIPE_ID_PREVIEW &&
+ entry->pipe_settings.preview.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal preview copy pipe");
+ entry->pipe_settings.preview.copy_pipe->stream = NULL;
+ }
+ if (entry->mode == IA_CSS_PIPE_ID_VIDEO &&
+ entry->pipe_settings.video.copy_pipe) {
+ IA_CSS_LOG("clearing stream on internal video copy pipe");
+ entry->pipe_settings.video.copy_pipe->stream = NULL;
+ }
+ err = sh_css_pipe_unload_binaries(entry);
+ }
+ }
+ /* free associated memory of stream struct */
+ kfree(stream->pipes);
+ stream->pipes = NULL;
+ stream->num_pipes = 0;
+#ifndef ISP2401
+ /* working mode: take out of the seed list */
+ if (my_css_save.mode == sh_css_mode_working)
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ if (my_css_save.stream_seeds[i].stream == stream)
+ {
+ IA_CSS_LOG("took out stream %d", i);
+ my_css_save.stream_seeds[i].stream = NULL;
+ break;
+ }
+#else
+ err2 = ia_css_save_restore_remove_stream(stream);
+
+ err1 = (err != IA_CSS_SUCCESS) ? err : err2;
+#endif
+ kfree(stream);
+#ifndef ISP2401
+ IA_CSS_LEAVE_ERR(err);
+#else
+ IA_CSS_LEAVE_ERR(err1);
+#endif
+
+#ifndef ISP2401
+ return err;
+#else
+ return err1;
+#endif
+}
+
+enum ia_css_err
+ia_css_stream_get_info(const struct ia_css_stream *stream,
+ struct ia_css_stream_info *stream_info)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_info: enter/exit\n");
+ assert(stream != NULL);
+ assert(stream_info != NULL);
+
+ *stream_info = stream->info;
+ return IA_CSS_SUCCESS;
+}
+
+/*
+ * Rebuild a stream, including allocating structs, setting configuration and
+ * building the required pipes.
+ * The data is taken from the css_save struct updated upon stream creation.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
+enum ia_css_err
+ia_css_stream_load(struct ia_css_stream *stream)
+{
+#ifndef ISP2401
+ int i;
+ enum ia_css_err err;
+ assert(stream != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter, \n");
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ if (my_css_save.stream_seeds[i].stream == stream)
+ {
+ int j;
+ for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS)
+ {
+ if (j)
+ {
+ int k;
+ for(k=0;k<j;k++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
+ }
+ return err;
+ }
+ err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config), my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes, &(my_css_save.stream_seeds[i].stream));
+ if (err != IA_CSS_SUCCESS)
+ {
+ ia_css_stream_destroy(stream);
+ for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ return err;
+ }
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit, \n");
+ return IA_CSS_SUCCESS;
+#else
+ /* TODO remove function - DEPRECATED */
+ (void)stream;
+ return IA_CSS_ERR_NOT_SUPPORTED;
+#endif
+}
+
+enum ia_css_err
+ia_css_stream_start(struct ia_css_stream *stream)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ IA_CSS_ENTER("stream = %p", stream);
+ if ((stream == NULL) || (stream->last_pipe == NULL)) {
+ IA_CSS_LEAVE_ERR(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ IA_CSS_LOG("starting %d", stream->last_pipe->mode);
+
+ sh_css_sp_set_disable_continuous_viewfinder(stream->disable_cont_vf);
+
+ /* Create host side pipeline. */
+ err = create_host_pipeline(stream);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ if((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
+ (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+ stream_register_with_csi_rx(stream);
+#endif
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* Initialize mipi size checks */
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ {
+ unsigned int idx;
+ unsigned int port = (unsigned int) (stream->config.source.port.port) ;
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = sh_css_get_mipi_sizes_for_check(port, idx);
+ }
+ }
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
+ err = sh_css_config_input_network(stream);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+#endif /* !HAS_NO_INPUT_SYSTEM */
+
+ err = sh_css_pipe_start(stream);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_stream_stop(struct ia_css_stream *stream)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n");
+ assert(stream != NULL);
+ assert(stream->last_pipe != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
+ stream->last_pipe->mode);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* De-initialize mipi size checks */
+ if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
+ {
+ unsigned int idx;
+ unsigned int port = (unsigned int) (stream->config.source.port.port) ;
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
+ sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
+ }
+ }
+#endif
+#ifndef ISP2401
+ err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
+#else
+
+ err = sh_css_pipes_stop(stream);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ /* Ideally, unmapping should happen after pipeline_stop, but current
+ * semantics do not allow that. */
+ /* err = map_sp_threads(stream, false); */
+
+ return err;
+}
+
+bool
+ia_css_stream_has_stopped(struct ia_css_stream *stream)
+{
+ bool stopped;
+ assert(stream != NULL);
+
+#ifndef ISP2401
+ stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
+#else
+ stopped = sh_css_pipes_have_stopped(stream);
+#endif
+
+ return stopped;
+}
+
+#ifndef ISP2401
+/*
+ * Destroy the stream and all the pipes related to it.
+ * The stream handle is used to identify the correct entry in the css_save struct
+ */
+enum ia_css_err
+ia_css_stream_unload(struct ia_css_stream *stream)
+{
+ int i;
+ assert(stream != NULL);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() enter, \n");
+ /* some checks */
+ assert (stream != NULL);
+ for(i=0;i<MAX_ACTIVE_STREAMS;i++)
+ if (my_css_save.stream_seeds[i].stream == stream)
+ {
+ int j;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload(): unloading %d (%p)\n", i, my_css_save.stream_seeds[i].stream);
+ ia_css_stream_destroy(stream);
+ for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload(): after unloading %d (%p)\n", i, my_css_save.stream_seeds[i].stream);
+ break;
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() exit, \n");
+ return IA_CSS_SUCCESS;
+}
+
+#endif
+enum ia_css_err
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe, enum ia_css_pipe_id *pipe_id)
+{
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_temp_pipe_to_pipe_id() enter/exit\n");
+ if (pipe != NULL)
+ *pipe_id = pipe->mode;
+ else
+ *pipe_id = IA_CSS_PIPE_ID_COPY;
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_stream_format
+ia_css_stream_get_format(const struct ia_css_stream *stream)
+{
+ return stream->config.input_config.format;
+}
+
+bool
+ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream)
+{
+ return (stream->config.pixels_per_clock == 2);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_shading_correction_binary(const struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *pipe;
+
+ assert(stream != NULL);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1] != NULL);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ return ia_css_pipe_get_shading_correction_binary(pipe);
+}
+
+struct ia_css_binary *
+ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream)
+{
+ int i;
+ struct ia_css_pipe *video_pipe = NULL;
+
+ /* First we find the video pipe */
+ for (i=0; i<stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ if (pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
+ video_pipe = pipe;
+ break;
+ }
+ }
+ if (video_pipe)
+ return &video_pipe->pipe_settings.video.video_binary;
+ return NULL;
+}
+
+struct ia_css_binary *
+ia_css_stream_get_3a_binary(const struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *pipe;
+ struct ia_css_binary *s3a_binary = NULL;
+
+ assert(stream != NULL);
+
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1] != NULL);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+
+ s3a_binary = ia_css_pipe_get_s3a_binary(pipe);
+
+ return s3a_binary;
+}
+
+
+enum ia_css_err
+ia_css_stream_set_output_padded_width(struct ia_css_stream *stream, unsigned int output_padded_width)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ struct ia_css_pipe *pipe;
+
+ assert(stream != NULL);
+
+ pipe = stream->last_pipe;
+
+ assert(pipe != NULL);
+
+ /* set the config also just in case (redundant info? why do we save config in pipe?) */
+ pipe->config.output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+ pipe->output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
+
+ return err;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe != NULL);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.sc) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ }
+ else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && binary->info->sp.enable.sc)
+ return binary;
+
+ return NULL;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe != NULL);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ binary = (struct ia_css_binary*)&pipe->pipe_settings.preview.preview_binary;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary*)&pipe->pipe_settings.video.video_binary;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ unsigned int i;
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
+ break;
+ }
+ }
+ }
+ else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
+ if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
+ else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
+ binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
+ else
+ assert(0);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.s3a)
+ binary = NULL;
+
+ return binary;
+}
+
+static struct ia_css_binary *
+ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe)
+{
+ struct ia_css_binary *binary = NULL;
+
+ assert(pipe != NULL);
+
+ switch (pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_VIDEO:
+ binary = (struct ia_css_binary*)&pipe->pipe_settings.video.video_binary;
+ break;
+ default:
+ break;
+ }
+
+ if (binary && !binary->info->sp.enable.dis)
+ binary = NULL;
+
+ return binary;
+}
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+
+ return (struct ia_css_pipeline*)&pipe->pipeline;
+}
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+
+ /* KW was not sure this function was not returning a value
+ that was out of range; so added an assert, and, for the
+ case when asserts are not enabled, clip to the largest
+ value; pipe_num is unsigned so the value cannot be too small
+ */
+ assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX);
+
+ if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX)
+ return (IA_CSS_PIPELINE_NUM_MAX - 1);
+
+ return pipe->pipe_num;
+}
+
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+
+ return (unsigned int)pipe->config.isp_pipe_version;
+}
+
+#if defined(HAS_BL)
+#define BL_START_TIMEOUT_US 30000000
+static enum ia_css_err
+ia_css_start_bl(void)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned long timeout;
+
+ IA_CSS_ENTER("");
+ sh_css_start_bl();
+ /* waiting for the Bootloader to complete execution */
+ timeout = BL_START_TIMEOUT_US;
+ while((ia_css_blctrl_get_state() == BOOTLOADER_BUSY) && timeout) {
+ timeout--;
+ hrt_sleep();
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "Bootloader state %d\n", ia_css_blctrl_get_state());
+ if (timeout == 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "Bootloader Execution Timeout\n");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ if (ia_css_blctrl_get_state() != BOOTLOADER_OK) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "Bootloader Execution Failed\n");
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ }
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+#endif
+
+#define SP_START_TIMEOUT_US 30000000
+
+enum ia_css_err
+ia_css_start_sp(void)
+{
+ unsigned long timeout;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+#if defined(HAS_BL)
+ /* Starting bootloader before Sp0 and Sp1
+ * and not exposing CSS API */
+ err = ia_css_start_bl();
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE("Bootloader fails");
+ return err;
+ }
+#endif
+ sh_css_sp_start_isp();
+
+ /* waiting for the SP is completely started */
+ timeout = SP_START_TIMEOUT_US;
+ while((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout) {
+ timeout--;
+ hrt_sleep();
+ }
+ if (timeout == 0) {
+ IA_CSS_ERROR("timeout during SP initialization");
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ /* Workaround, in order to run two streams in parallel. See TASK 4271*/
+ /* TODO: Fix this. */
+
+ sh_css_init_host_sp_control_vars();
+
+ /* buffers should be initialized only when sp is started */
+ /* AM: At the moment it will be done only when there is no stream active. */
+
+ sh_css_setup_queues();
+ ia_css_bufq_dump_queue_info();
+
+#ifdef ISP2401
+ if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */
+ ia_css_set_system_mode(IA_CSS_SYS_MODE_WORKING);
+ }
+#endif
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+/**
+ * Time to wait SP for termincate. Only condition when this can happen
+ * is a fatal hw failure, but we must be able to detect this and emit
+ * a proper error trace.
+ */
+#define SP_SHUTDOWN_TIMEOUT_US 200000
+
+enum ia_css_err
+ia_css_stop_sp(void)
+{
+ unsigned long timeout;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("void");
+
+ if (!sh_css_sp_is_running()) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE("SP already stopped : return_err=%d", err);
+
+ /* Return an error - stop SP should not have been called by driver */
+ return err;
+ }
+
+ /* For now, stop whole SP */
+#ifndef ISP2401
+ sh_css_write_host2sp_command(host2sp_cmd_terminate);
+#else
+ if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
+ IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
+ ia_css_debug_dump_sp_sw_debug_info();
+ ia_css_debug_dump_debug_info(NULL);
+ }
+#endif
+ sh_css_sp_set_sp_running(false);
+
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!ia_css_spctrl_is_idle(SP0_ID) && timeout) {
+ timeout--;
+ hrt_sleep();
+ }
+ if ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED))
+ IA_CSS_WARNING("SP has not terminated (SW)");
+
+ if (timeout == 0) {
+ IA_CSS_WARNING("SP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+ timeout = SP_SHUTDOWN_TIMEOUT_US;
+ while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout) {
+ timeout--;
+ hrt_sleep();
+ }
+ if (timeout == 0) {
+ IA_CSS_WARNING("ISP is not idle");
+ ia_css_debug_dump_sp_sw_debug_info();
+ }
+
+ sh_css_hmm_buffer_record_uninit();
+
+#ifndef ISP2401
+ /* clear pending param sets from refcount */
+ sh_css_param_clear_param_sets();
+#else
+ if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */
+ /* clear pending param sets from refcount */
+ sh_css_param_clear_param_sets();
+ ia_css_set_system_mode(IA_CSS_SYS_MODE_INIT); /* System is initialized but not 'running' */
+ }
+#endif
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_update_continuous_frames(struct ia_css_stream *stream)
+{
+ struct ia_css_pipe *pipe;
+ unsigned int i;
+
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() enter:\n");
+
+ if (stream == NULL) {
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ pipe = stream->continuous_pipe;
+
+ for (i = stream->config.init_num_cont_raw_buf;
+ i < stream->config.target_num_cont_raw_buf; i++) {
+ sh_css_update_host2sp_offline_frame(i,
+ pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
+ }
+ sh_css_update_host2sp_cont_num_raw_frames
+ (stream->config.target_num_cont_raw_buf, true);
+ ia_css_debug_dtrace(
+ IA_CSS_DEBUG_TRACE,
+ "sh_css_update_continuous_frames() leave: return_void\n");
+
+ return IA_CSS_SUCCESS;
+}
+
+void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
+{
+ unsigned int thread_id;
+ enum ia_css_pipe_id pipe_id;
+ unsigned int pipe_num;
+ bool need_input_queue;
+
+ IA_CSS_ENTER("");
+ assert(pipe != NULL);
+
+ pipe_id = pipe->mode;
+ pipe_num = pipe->pipe_num;
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+#if defined(HAS_NO_INPUT_SYSTEM) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ need_input_queue = true;
+#else
+ need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
+#endif
+
+ /* map required buffer queues to resources */
+ /* TODO: to be improved */
+ if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->pipe_settings.preview.preview_binary.info &&
+ pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE) {
+ unsigned int i;
+
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
+ for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
+ if (pipe->pipe_settings.capture.primary_binary[i].info &&
+ pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ break;
+ }
+ }
+ } else if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT ||
+ pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) {
+ if (pipe->pipe_settings.capture.pre_isp_binary.info &&
+ pipe->pipe_settings.capture.pre_isp_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ }
+ } else if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ if (pipe->pipe_settings.video.video_binary.info &&
+ pipe->pipe_settings.video.video_binary.info->sp.enable.s3a)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
+ if (pipe->pipe_settings.video.video_binary.info &&
+ (pipe->pipe_settings.video.video_binary.info->sp.enable.dis
+ ))
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map);
+ } else if (pipe->mode == IA_CSS_PIPE_ID_COPY) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ if (!pipe->stream->config.continuous)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ } else if (pipe->mode == IA_CSS_PIPE_ID_ACC) {
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ } else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) {
+ unsigned int idx;
+ for (idx = 0; idx < IA_CSS_PIPE_MAX_OUTPUT_STAGE; idx++) {
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map);
+ if (pipe->enable_viewfinder[idx])
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map);
+ }
+ if (need_input_queue)
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
+#endif
+ }
+ IA_CSS_LEAVE("");
+}
+
+#if CONFIG_ON_FRAME_ENQUEUE()
+static enum ia_css_err set_config_on_frame_enqueue(struct ia_css_frame_info *info, struct frame_data_wrapper *frame)
+{
+ frame->config_on_frame_enqueue.padded_width = 0;
+
+ /* currently we support configuration on frame enqueue only on YUV formats */
+ /* on other formats the padded_width is zeroed for no configuration override */
+ switch (info->format) {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ if (info->padded_width > info->res.width)
+ {
+ frame->config_on_frame_enqueue.padded_width = info->padded_width;
+ }
+ else if ((info->padded_width < info->res.width) && (info->padded_width > 0))
+ {
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* nothing to do if width == padded width or padded width is zeroed (the same) */
+ break;
+ default:
+ break;
+ }
+
+ return IA_CSS_SUCCESS;
+}
+#endif
+
+enum ia_css_err
+ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
+{
+ enum ia_css_err ret;
+
+ IA_CSS_ENTER("");
+
+ /* Only continuous streams have a tagger to which we can send the
+ * unlock message. */
+ if (stream == NULL || !stream->config.continuous) {
+ IA_CSS_ERROR("invalid stream pointer");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID ||
+ exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID) {
+ IA_CSS_ERROR("invalid expsure ID: %d\n", exp_id);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* Send the event. Since we verified that the exp_id is valid,
+ * we can safely assign it to an 8-bit argument here. */
+ ret = ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0);
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+/** @brief Set the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_set_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool enable)
+{
+ unsigned int thread_id;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+
+ /* Parameter Check */
+ if (pipe == NULL || pipe->stream == NULL) {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension)) {
+ IA_CSS_ERROR("Invalid Pipe(No Extension Firmware)");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running()) {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else {
+ /* Query the threadid and stage_num for the Extension firmware*/
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&(pipe->pipeline), fw_handle, &stage);
+ if (err == IA_CSS_SUCCESS) {
+ /* Set the Extension State;. TODO: Add check for stage firmware.type (QOS)*/
+ err = ia_css_bufq_enqueue_psys_event(
+ (uint8_t) IA_CSS_PSYS_SW_EVENT_STAGE_ENABLE_DISABLE,
+ (uint8_t) thread_id,
+ (uint8_t) stage->stage_num,
+ (enable == true) ? 1 : 0);
+ if (err == IA_CSS_SUCCESS) {
+ if(enable)
+ SH_CSS_QOS_STAGE_ENABLE(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num);
+ else
+ SH_CSS_QOS_STAGE_DISABLE(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num);
+ }
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, enable);
+ return err;
+}
+
+/** @brief Get the state (Enable or Disable) of the Extension stage in the
+ * given pipe.
+ */
+enum ia_css_err
+ia_css_pipe_get_qos_ext_state(struct ia_css_pipe *pipe, uint32_t fw_handle, bool *enable)
+{
+ struct ia_css_pipeline_stage *stage;
+ unsigned int thread_id;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("");
+
+ /* Parameter Check */
+ if (pipe == NULL || pipe->stream == NULL) {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension)) {
+ IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running()) {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else {
+ /* Query the threadid and stage_num corresponding to the Extension firmware*/
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&pipe->pipeline, fw_handle, &stage);
+
+ if (err == IA_CSS_SUCCESS) {
+ /* Get the Extension State */
+ *enable = (SH_CSS_QOS_STAGE_IS_ENABLED(&(sh_css_sp_group.pipe[thread_id]),stage->stage_num)) ? true : false;
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u enable:%d", err, fw_handle, *enable);
+ return err;
+}
+
+#ifdef ISP2401
+enum ia_css_err
+ia_css_pipe_update_qos_ext_mapped_arg(struct ia_css_pipe *pipe, uint32_t fw_handle,
+ struct ia_css_isp_param_css_segments *css_seg, struct ia_css_isp_param_isp_segments *isp_seg)
+{
+ unsigned int HIVE_ADDR_sp_group;
+ static struct sh_css_sp_group sp_group;
+ static struct sh_css_sp_stage sp_stage;
+ static struct sh_css_isp_stage isp_stage;
+ const struct ia_css_fw_info *fw;
+ unsigned int thread_id;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int stage_num = 0;
+ enum ia_css_isp_memories mem;
+ bool enabled;
+
+ IA_CSS_ENTER("");
+
+ fw = &sh_css_sp_fw;
+
+ /* Parameter Check */
+ if (pipe == NULL || pipe->stream == NULL) {
+ IA_CSS_ERROR("Invalid Pipe.");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!(pipe->config.acc_extension)) {
+ IA_CSS_ERROR("Invalid Pipe (No Extension Firmware).");
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else if (!sh_css_sp_is_running()) {
+ IA_CSS_ERROR("Leaving: queue unavailable.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else {
+ /* Query the thread_id and stage_num corresponding to the Extension firmware */
+ ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
+ err = ia_css_pipeline_get_stage_from_fw(&(pipe->pipeline), fw_handle, &stage);
+ if (err == IA_CSS_SUCCESS) {
+ /* Get the Extension State */
+ enabled = (SH_CSS_QOS_STAGE_IS_ENABLED(&(sh_css_sp_group.pipe[thread_id]), stage->stage_num)) ? true : false;
+ /* Update mapped arg only when extension stage is not enabled */
+ if (enabled) {
+ IA_CSS_ERROR("Leaving: cannot update when stage is enabled.");
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ } else {
+ stage_num = stage->stage_num;
+
+ HIVE_ADDR_sp_group = fw->info.sp.group;
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(sp_group),
+ &sp_group, sizeof(struct sh_css_sp_group));
+ mmgr_load(sp_group.pipe[thread_id].sp_stage_addr[stage_num],
+ &sp_stage, sizeof(struct sh_css_sp_stage));
+
+ mmgr_load(sp_stage.isp_stage_addr,
+ &isp_stage, sizeof(struct sh_css_isp_stage));
+
+ for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) {
+ isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address =
+ css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
+ isp_stage.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size =
+ css_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
+ isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].address =
+ isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].address;
+ isp_stage.binary_info.mem_initializers.params[IA_CSS_PARAM_CLASS_PARAM][mem].size =
+ isp_seg->params[IA_CSS_PARAM_CLASS_PARAM][mem].size;
+ }
+
+ mmgr_store(sp_stage.isp_stage_addr,
+ &isp_stage, sizeof(struct sh_css_isp_stage));
+ }
+ }
+ }
+ IA_CSS_LEAVE("err:%d handle:%u", err, fw_handle);
+ return err;
+}
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+static enum ia_css_err
+aspect_ratio_crop_init(struct ia_css_stream *curr_stream,
+ struct ia_css_pipe *pipes[],
+ bool *do_crop_status)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+ struct ia_css_pipe *curr_pipe;
+ uint32_t pipe_mask = 0;
+
+ if ((curr_stream == NULL) ||
+ (curr_stream->num_pipes == 0) ||
+ (pipes == NULL) ||
+ (do_crop_status == NULL)) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ for (i = 0; i < curr_stream->num_pipes; i++) {
+ curr_pipe = pipes[i];
+ pipe_mask |= (1 << curr_pipe->config.mode);
+ }
+
+ *do_crop_status =
+ (((pipe_mask & (1 << IA_CSS_PIPE_MODE_PREVIEW)) ||
+ (pipe_mask & (1 << IA_CSS_PIPE_MODE_VIDEO))) &&
+ (pipe_mask & (1 << IA_CSS_PIPE_MODE_CAPTURE)) &&
+ curr_stream->config.continuous);
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+aspect_ratio_crop_check(bool enabled, struct ia_css_pipe *curr_pipe)
+{
+ bool status = false;
+
+ if ((curr_pipe != NULL) && enabled) {
+ if ((curr_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) ||
+ (curr_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) ||
+ (curr_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE))
+ status = true;
+ }
+
+ return status;
+}
+
+static enum ia_css_err
+aspect_ratio_crop(struct ia_css_pipe *curr_pipe,
+ struct ia_css_resolution *effective_res)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_resolution crop_res;
+ struct ia_css_resolution *in_res = NULL;
+ struct ia_css_resolution *out_res = NULL;
+ bool use_bds_output_info = false;
+ bool use_vf_pp_in_res = false;
+ bool use_capt_pp_in_res = false;
+
+ if ((curr_pipe == NULL) ||
+ (effective_res == NULL)) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ if ((curr_pipe->config.mode != IA_CSS_PIPE_MODE_PREVIEW) &&
+ (curr_pipe->config.mode != IA_CSS_PIPE_MODE_VIDEO) &&
+ (curr_pipe->config.mode != IA_CSS_PIPE_MODE_CAPTURE)) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+ }
+
+ use_bds_output_info =
+ ((curr_pipe->bds_output_info.res.width != 0) &&
+ (curr_pipe->bds_output_info.res.height != 0));
+
+ use_vf_pp_in_res =
+ ((curr_pipe->config.vf_pp_in_res.width != 0) &&
+ (curr_pipe->config.vf_pp_in_res.height != 0));
+
+ use_capt_pp_in_res =
+ ((curr_pipe->config.capt_pp_in_res.width != 0) &&
+ (curr_pipe->config.capt_pp_in_res.height != 0));
+
+ in_res = &curr_pipe->stream->config.input_config.effective_res;
+ out_res = &curr_pipe->output_info[0].res;
+
+ switch (curr_pipe->config.mode) {
+ case IA_CSS_PIPE_MODE_PREVIEW:
+ if (use_bds_output_info)
+ out_res = &curr_pipe->bds_output_info.res;
+ else if (use_vf_pp_in_res)
+ out_res = &curr_pipe->config.vf_pp_in_res;
+ break;
+ case IA_CSS_PIPE_MODE_VIDEO:
+ if (use_bds_output_info)
+ out_res = &curr_pipe->bds_output_info.res;
+ break;
+ case IA_CSS_PIPE_MODE_CAPTURE:
+ if (use_capt_pp_in_res)
+ out_res = &curr_pipe->config.capt_pp_in_res;
+ break;
+ case IA_CSS_PIPE_MODE_ACC:
+ case IA_CSS_PIPE_MODE_COPY:
+ case IA_CSS_PIPE_MODE_YUVPP:
+ default:
+ IA_CSS_ERROR("aspect ratio cropping invalid args: mode[%d]\n",
+ curr_pipe->config.mode);
+ assert(0);
+ break;
+ }
+
+ err = ia_css_frame_find_crop_resolution(in_res, out_res, &crop_res);
+ if (err == IA_CSS_SUCCESS) {
+ *effective_res = crop_res;
+ } else {
+ /* in case of error fallback to default
+ * effective resolution from driver. */
+ IA_CSS_LOG("ia_css_frame_find_crop_resolution() failed with err(%d)", err);
+ }
+ return err;
+}
+#endif
+
+#endif
+static void
+sh_css_hmm_buffer_record_init(void)
+{
+ int i;
+
+#ifndef ISP2401
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]);
+#else
+ if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]);
+ }
+#endif
+ }
+}
+
+static void
+sh_css_hmm_buffer_record_uninit(void)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+
+#ifndef ISP2401
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (buffer_record->in_use) {
+ if (buffer_record->h_vbuf != NULL)
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(buffer_record);
+#else
+ if (ia_css_is_system_mode_suspend_or_resume() == false) { /* skip in suspend/resume flow */
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (buffer_record->in_use) {
+ if (buffer_record->h_vbuf != NULL)
+ ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf);
+ sh_css_hmm_buffer_record_reset(buffer_record);
+ }
+ buffer_record++;
+#endif
+ }
+#ifndef ISP2401
+ buffer_record++;
+#endif
+ }
+}
+
+static void
+sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record)
+{
+ assert(buffer_record != NULL);
+ buffer_record->in_use = false;
+ buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID;
+ buffer_record->h_vbuf = NULL;
+ buffer_record->kernel_ptr = 0;
+}
+
+#ifndef ISP2401
+static bool
+sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+#else
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
+#endif
+ enum ia_css_buffer_type type,
+ hrt_address kernel_ptr)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+#ifndef ISP2401
+ bool found_record = false;
+#else
+ struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
+#endif
+
+ assert(h_vbuf != NULL);
+ assert((type > IA_CSS_BUFFER_TYPE_INVALID) && (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
+ assert(kernel_ptr != 0);
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if (buffer_record->in_use == false) {
+ buffer_record->in_use = true;
+ buffer_record->type = type;
+ buffer_record->h_vbuf = h_vbuf;
+ buffer_record->kernel_ptr = kernel_ptr;
+#ifndef ISP2401
+ found_record = true;
+#else
+ out_buffer_record = buffer_record;
+#endif
+ break;
+ }
+ buffer_record++;
+ }
+
+#ifndef ISP2401
+ return found_record;
+#else
+ return out_buffer_record;
+#endif
+}
+
+static struct sh_css_hmm_buffer_record
+*sh_css_hmm_buffer_record_validate(hrt_vaddress ddr_buffer_addr,
+ enum ia_css_buffer_type type)
+{
+ int i;
+ struct sh_css_hmm_buffer_record *buffer_record = NULL;
+ bool found_record = false;
+
+ buffer_record = &hmm_buffer_record[0];
+ for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
+ if ((buffer_record->in_use == true) &&
+ (buffer_record->type == type) &&
+ (buffer_record->h_vbuf != NULL) &&
+ (buffer_record->h_vbuf->vptr == ddr_buffer_addr)) {
+ found_record = true;
+ break;
+ }
+ buffer_record++;
+ }
+
+ if (found_record == true)
+ return buffer_record;
+ else
+ return NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h
new file mode 100644
index 000000000000..4072c564f911
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_defs.h
@@ -0,0 +1,410 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_DEFS_H_
+#define _SH_CSS_DEFS_H_
+
+#include "isp.h"
+
+/*#include "vamem.h"*/ /* Cannot include for VAMEM properties this file is visible on ISP -> pipeline generator */
+
+#include "math_support.h" /* max(), min, etc etc */
+
+/* ID's for refcount */
+#define IA_CSS_REFCOUNT_PARAM_SET_POOL 0xCAFE0001
+#define IA_CSS_REFCOUNT_PARAM_BUFFER 0xCAFE0002
+
+/* Digital Image Stabilization */
+#define SH_CSS_DIS_DECI_FACTOR_LOG2 6
+
+/* UV offset: 1:uv=-128...127, 0:uv=0...255 */
+#define SH_CSS_UV_OFFSET_IS_0 0
+
+/* Bits of bayer is adjusted as 13 in ISP */
+#define SH_CSS_BAYER_BITS 13
+
+/* Max value of bayer data (unsigned 13bit in ISP) */
+#define SH_CSS_BAYER_MAXVAL ((1U << SH_CSS_BAYER_BITS) - 1)
+
+/* Bits of yuv in ISP */
+#define SH_CSS_ISP_YUV_BITS 8
+
+#define SH_CSS_DP_GAIN_SHIFT 5
+#define SH_CSS_BNR_GAIN_SHIFT 13
+#define SH_CSS_YNR_GAIN_SHIFT 13
+#define SH_CSS_AE_YCOEF_SHIFT 13
+#define SH_CSS_AF_FIR_SHIFT 13
+#define SH_CSS_YEE_DETAIL_GAIN_SHIFT 8 /* [u5.8] */
+#define SH_CSS_YEE_SCALE_SHIFT 8
+#define SH_CSS_TNR_COEF_SHIFT 13
+#define SH_CSS_MACC_COEF_SHIFT 11 /* [s2.11] for ISP1 */
+#define SH_CSS_MACC2_COEF_SHIFT 13 /* [s[exp].[13-exp]] for ISP2 */
+#define SH_CSS_DIS_COEF_SHIFT 13
+
+/* enumeration of the bayer downscale factors. When a binary supports multiple
+ * factors, the OR of these defines is used to build the mask of supported
+ * factors. The BDS factor is used in pre-processor expressions so we cannot
+ * use an enum here. */
+#define SH_CSS_BDS_FACTOR_1_00 (0)
+#define SH_CSS_BDS_FACTOR_1_25 (1)
+#define SH_CSS_BDS_FACTOR_1_50 (2)
+#define SH_CSS_BDS_FACTOR_2_00 (3)
+#define SH_CSS_BDS_FACTOR_2_25 (4)
+#define SH_CSS_BDS_FACTOR_2_50 (5)
+#define SH_CSS_BDS_FACTOR_3_00 (6)
+#define SH_CSS_BDS_FACTOR_4_00 (7)
+#define SH_CSS_BDS_FACTOR_4_50 (8)
+#define SH_CSS_BDS_FACTOR_5_00 (9)
+#define SH_CSS_BDS_FACTOR_6_00 (10)
+#define SH_CSS_BDS_FACTOR_8_00 (11)
+#define NUM_BDS_FACTORS (12)
+
+#define PACK_BDS_FACTOR(factor) (1<<(factor))
+
+/* Following macros should match with the type enum ia_css_pipe_version in
+ * ia_css_pipe_public.h. The reason to add these macros is that enum type
+ * will be evaluted to 0 in preprocessing time. */
+#define SH_CSS_ISP_PIPE_VERSION_1 1
+#define SH_CSS_ISP_PIPE_VERSION_2_2 2
+#define SH_CSS_ISP_PIPE_VERSION_2_6_1 3
+#define SH_CSS_ISP_PIPE_VERSION_2_7 4
+
+/*--------------- sRGB Gamma -----------------
+CCM : YCgCo[0,8191] -> RGB[0,4095]
+sRGB Gamma : RGB [0,4095] -> RGB[0,8191]
+CSC : RGB [0,8191] -> YUV[0,8191]
+
+CCM:
+Y[0,8191],CgCo[-4096,4095],coef[-8192,8191] -> RGB[0,4095]
+
+sRGB Gamma:
+RGB[0,4095] -(interpolation step16)-> RGB[0,255] -(LUT 12bit)-> RGB[0,4095] -> RGB[0,8191]
+
+CSC:
+RGB[0,8191],coef[-8192,8191] -> RGB[0,8191]
+--------------------------------------------*/
+/* Bits of input/output of sRGB Gamma */
+#define SH_CSS_RGB_GAMMA_INPUT_BITS 12 /* [0,4095] */
+#define SH_CSS_RGB_GAMMA_OUTPUT_BITS 13 /* [0,8191] */
+
+/* Bits of fractional part of interpolation in vamem, [0,4095]->[0,255] */
+#define SH_CSS_RGB_GAMMA_FRAC_BITS \
+ (SH_CSS_RGB_GAMMA_INPUT_BITS - SH_CSS_ISP_RGB_GAMMA_TABLE_SIZE_LOG2)
+#define SH_CSS_RGB_GAMMA_ONE (1 << SH_CSS_RGB_GAMMA_FRAC_BITS)
+
+/* Bits of input of CCM, = 13, Y[0,8191],CgCo[-4096,4095] */
+#define SH_CSS_YUV2RGB_CCM_INPUT_BITS SH_CSS_BAYER_BITS
+
+/* Bits of output of CCM, = 12, RGB[0,4095] */
+#define SH_CSS_YUV2RGB_CCM_OUTPUT_BITS SH_CSS_RGB_GAMMA_INPUT_BITS
+
+/* Maximum value of output of CCM */
+#define SH_CSS_YUV2RGB_CCM_MAX_OUTPUT \
+ ((1 << SH_CSS_YUV2RGB_CCM_OUTPUT_BITS) - 1)
+
+#define SH_CSS_NUM_INPUT_BUF_LINES 4
+
+/* Left cropping only applicable for sufficiently large nway */
+#if ISP_VEC_NELEMS == 16
+#define SH_CSS_MAX_LEFT_CROPPING 0
+#define SH_CSS_MAX_TOP_CROPPING 0
+#else
+#define SH_CSS_MAX_LEFT_CROPPING 12
+#define SH_CSS_MAX_TOP_CROPPING 12
+#endif
+
+#define SH_CSS_SP_MAX_WIDTH 1280
+
+/* This is the maximum grid we can handle in the ISP binaries.
+ * The host code makes sure no bigger grid is ever selected. */
+#define SH_CSS_MAX_BQ_GRID_WIDTH 80
+#define SH_CSS_MAX_BQ_GRID_HEIGHT 60
+
+/* The minimum dvs envelope is 12x12(for IPU2) to make sure the
+ * invalid rows/columns that result from filter initialization are skipped. */
+#define SH_CSS_MIN_DVS_ENVELOPE 12U
+
+/* The FPGA system (vec_nelems == 16) only supports upto 5MP */
+#if ISP_VEC_NELEMS == 16
+#define SH_CSS_MAX_SENSOR_WIDTH 2560
+#define SH_CSS_MAX_SENSOR_HEIGHT 1920
+#else
+#define SH_CSS_MAX_SENSOR_WIDTH 4608
+#define SH_CSS_MAX_SENSOR_HEIGHT 3450
+#endif
+
+/* Limited to reduce vmem pressure */
+#if ISP_VMEM_DEPTH >= 3072
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT SH_CSS_MAX_SENSOR_HEIGHT
+#else
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH 3264
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT 2448
+#endif
+/* When using bayer decimation */
+/*
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC 4224
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC 3168
+*/
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC SH_CSS_MAX_SENSOR_WIDTH
+#define SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC SH_CSS_MAX_SENSOR_HEIGHT
+
+#define SH_CSS_MIN_SENSOR_WIDTH 2
+#define SH_CSS_MIN_SENSOR_HEIGHT 2
+
+#if defined(IS_ISP_2400_SYSTEM)
+/* MAX width and height set to the same to allow for rotated
+ * resolutions. */
+#define SH_CSS_MAX_VF_WIDTH 1920
+#define SH_CSS_MAX_VF_HEIGHT 1920
+#else
+#define SH_CSS_MAX_VF_WIDTH 1280
+#define SH_CSS_MAX_VF_HEIGHT 960
+#endif
+/*
+#define SH_CSS_MAX_VF_WIDTH_DEC 1920
+#define SH_CSS_MAX_VF_HEIGHT_DEC 1080
+*/
+#define SH_CSS_MAX_VF_WIDTH_DEC SH_CSS_MAX_VF_WIDTH
+#define SH_CSS_MAX_VF_HEIGHT_DEC SH_CSS_MAX_VF_HEIGHT
+
+/* We use 16 bits per coordinate component, including integer
+ and fractional bits */
+#define SH_CSS_MORPH_TABLE_GRID ISP_VEC_NELEMS
+#define SH_CSS_MORPH_TABLE_ELEM_BYTES 2
+#define SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD \
+ (HIVE_ISP_DDR_WORD_BYTES/SH_CSS_MORPH_TABLE_ELEM_BYTES)
+
+#ifndef ISP2401
+#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 1)
+#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 1)
+#else
+/* TODO: I will move macros of "*_SCTBL_*" to SC kernel.
+ "+ 2" should be "+ SH_CSS_SCTBL_CENTERING_MARGIN + SH_CSS_SCTBL_LAST_GRID_COUNT". (michie, Sep/23/2014) */
+#define SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR (SH_CSS_MAX_BQ_GRID_WIDTH + 2)
+#define SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR (SH_CSS_MAX_BQ_GRID_HEIGHT + 2)
+#endif
+#define SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR \
+ CEIL_MUL(SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR, ISP_VEC_NELEMS)
+
+/* Each line of this table is aligned to the maximum line width. */
+#define SH_CSS_MAX_S3ATBL_WIDTH SH_CSS_MAX_BQ_GRID_WIDTH
+
+#ifndef ISP2401
+/* The video binary supports a delay of 1 or 2 */
+#define MAX_DVS_FRAME_DELAY 2
+/* We always need one additional frame because the video binary
+ * reads the previous and writes the current frame concurrently */
+#define MAX_NUM_VIDEO_DELAY_FRAMES (MAX_DVS_FRAME_DELAY + 1)
+#define NUM_VIDEO_TNR_FRAMES 2
+
+#define NUM_TNR_FRAMES 2 /* FIXME */
+
+
+#define MAX_NUM_DELAY_FRAMES MAX_NUM_VIDEO_DELAY_FRAMES
+
+#else
+/* Video mode specific DVS define */
+/* The video binary supports a delay of 1 or 2 frames */
+#define VIDEO_FRAME_DELAY 2
+/* +1 because DVS reads the previous and writes the current frame concurrently */
+#define MAX_NUM_VIDEO_DELAY_FRAMES (VIDEO_FRAME_DELAY + 1)
+
+/* Preview mode specific DVS define. */
+/* In preview we only need GDC functionality (and not the DVS functionality) */
+/* The minimum number of DVS frames you need is 2, one were GDC reads from and another where GDC writes into */
+#define NUM_PREVIEW_DVS_FRAMES (2)
+
+/* TNR is no longer exclusive to video, SkyCam preview has TNR too (same kernel as video).
+ * All uses the generic define NUM_TNR_FRAMES. The define NUM_VIDEO_TNR_FRAMES has been deprecated.
+ *
+ * Notes
+ * 1) The value depends on the used TNR kernel and is not something that depends on the mode
+ * and it is not something you just could choice.
+ * 2) For the luma only pipeline a version that supports two different sets of TNR reference frames
+ * is being used.
+ *.
+ */
+#define NUM_VALID_TNR_REF_FRAMES (1) /* At least one valid TNR reference frame is required */
+#define NUM_TNR_FRAMES_PER_REF_BUF_SET (2)
+
+/* In luma-only mode alternate illuminated frames are supported, that requires two double buffers */
+#ifdef ENABLE_LUMA_ONLY
+#define NUM_TNR_REF_BUF_SETS (2)
+#else
+#define NUM_TNR_REF_BUF_SETS (1)
+#endif
+
+#define NUM_TNR_FRAMES (NUM_TNR_FRAMES_PER_REF_BUF_SET * NUM_TNR_REF_BUF_SETS)
+
+#define MAX_NUM_DELAY_FRAMES MAX(MAX_NUM_VIDEO_DELAY_FRAMES, NUM_PREVIEW_DVS_FRAMES)
+
+#endif
+
+/* Note that this is the define used to configure all data structures common for all modes */
+/* It should be equal or bigger to the max number of DVS frames for all possible modes */
+/* Rules: these implement logic shared between the host code and ISP firmware.
+ The ISP firmware needs these rules to be applied at pre-processor time,
+ that's why these are macros, not functions. */
+#define _ISP_BQS(num) ((num)/2)
+#define _ISP_VECS(width) CEIL_DIV(width, ISP_VEC_NELEMS)
+
+#define ISP_BQ_GRID_WIDTH(elements_per_line, deci_factor_log2) \
+ CEIL_SHIFT(elements_per_line/2, deci_factor_log2)
+#define ISP_BQ_GRID_HEIGHT(lines_per_frame, deci_factor_log2) \
+ CEIL_SHIFT(lines_per_frame/2, deci_factor_log2)
+#define ISP_C_VECTORS_PER_LINE(elements_per_line) \
+ _ISP_VECS(elements_per_line/2)
+
+/* The morphing table is similar to the shading table in the sense that we
+ have 1 more value than we have cells in the grid. */
+#define _ISP_MORPH_TABLE_WIDTH(int_width) \
+ (CEIL_DIV(int_width, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_HEIGHT(int_height) \
+ (CEIL_DIV(int_height, SH_CSS_MORPH_TABLE_GRID) + 1)
+#define _ISP_MORPH_TABLE_ALIGNED_WIDTH(width) \
+ CEIL_MUL(_ISP_MORPH_TABLE_WIDTH(width), \
+ SH_CSS_MORPH_TABLE_ELEMS_PER_DDR_WORD)
+
+#ifndef ISP2401
+#define _ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ (ISP_BQ_GRID_WIDTH(input_width, deci_factor_log2) + 1)
+#define _ISP_SCTBL_HEIGHT(input_height, deci_factor_log2) \
+ (ISP_BQ_GRID_HEIGHT(input_height, deci_factor_log2) + 1)
+#define _ISP_SCTBL_ALIGNED_WIDTH_PER_COLOR(input_width, deci_factor_log2) \
+ CEIL_MUL(_ISP_SCTBL_WIDTH_PER_COLOR(input_width, deci_factor_log2), \
+ ISP_VEC_NELEMS)
+
+#endif
+/* *****************************************************************
+ * Statistics for 3A (Auto Focus, Auto White Balance, Auto Exposure)
+ * *****************************************************************/
+/* if left cropping is used, 3A statistics are also cropped by 2 vectors. */
+#define _ISP_S3ATBL_WIDTH(in_width, deci_factor_log2) \
+ (_ISP_BQS(in_width) >> deci_factor_log2)
+#define _ISP_S3ATBL_HEIGHT(in_height, deci_factor_log2) \
+ (_ISP_BQS(in_height) >> deci_factor_log2)
+#define _ISP_S3A_ELEMS_ISP_WIDTH(width, left_crop) \
+ (width - ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define _ISP_S3ATBL_ISP_WIDTH(in_width, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_width), deci_factor_log2)
+#define _ISP_S3ATBL_ISP_HEIGHT(in_height, deci_factor_log2) \
+ CEIL_SHIFT(_ISP_BQS(in_height), deci_factor_log2)
+#define ISP_S3ATBL_VECTORS \
+ _ISP_VECS(SH_CSS_MAX_S3ATBL_WIDTH * \
+ (sizeof(struct ia_css_3a_output)/sizeof(int32_t)))
+#define ISP_S3ATBL_HI_LO_STRIDE \
+ (ISP_S3ATBL_VECTORS * ISP_VEC_NELEMS)
+#define ISP_S3ATBL_HI_LO_STRIDE_BYTES \
+ (sizeof(unsigned short) * ISP_S3ATBL_HI_LO_STRIDE)
+
+/* Viewfinder support */
+#define __ISP_MAX_VF_OUTPUT_WIDTH(width, left_crop) \
+ (width - 2*ISP_VEC_NELEMS + ((left_crop) ? 2 * ISP_VEC_NELEMS : 0))
+
+#define __ISP_VF_OUTPUT_WIDTH_VECS(out_width, vf_log_downscale) \
+ (_ISP_VECS((out_width) >> (vf_log_downscale)))
+
+#define _ISP_VF_OUTPUT_WIDTH(vf_out_vecs) ((vf_out_vecs) * ISP_VEC_NELEMS)
+#define _ISP_VF_OUTPUT_HEIGHT(out_height, vf_log_ds) \
+ ((out_height) >> (vf_log_ds))
+
+#define _ISP_LOG_VECTOR_STEP(mode) \
+ ((mode) == IA_CSS_BINARY_MODE_CAPTURE_PP ? 2 : 1)
+
+/* It is preferred to have not more than 2x scaling at one step
+ * in GDC (assumption is for capture_pp and yuv_scale stages) */
+#define MAX_PREFERRED_YUV_DS_PER_STEP 2
+
+/* Rules for computing the internal width. This is extremely complicated
+ * and definitely needs to be commented and explained. */
+#define _ISP_LEFT_CROP_EXTRA(left_crop) ((left_crop) > 0 ? 2*ISP_VEC_NELEMS : 0)
+
+#define __ISP_MIN_INTERNAL_WIDTH(num_chunks, pipelining, mode) \
+ ((num_chunks) * (pipelining) * (1<<_ISP_LOG_VECTOR_STEP(mode)) * \
+ ISP_VEC_NELEMS)
+
+#define __ISP_PADDED_OUTPUT_WIDTH(out_width, dvs_env_width, left_crop) \
+ ((out_width) + MAX(dvs_env_width, _ISP_LEFT_CROP_EXTRA(left_crop)))
+
+#define __ISP_CHUNK_STRIDE_ISP(mode) \
+ ((1<<_ISP_LOG_VECTOR_STEP(mode)) * ISP_VEC_NELEMS)
+
+#define __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ ((c_subsampling) * (num_chunks) * HIVE_ISP_DDR_WORD_BYTES)
+#define __ISP_INTERNAL_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop, \
+ mode, \
+ c_subsampling, \
+ num_chunks, \
+ pipelining) \
+ CEIL_MUL2(CEIL_MUL2(MAX(__ISP_PADDED_OUTPUT_WIDTH(out_width, \
+ dvs_env_width, \
+ left_crop), \
+ __ISP_MIN_INTERNAL_WIDTH(num_chunks, \
+ pipelining, \
+ mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_ISP(mode) \
+ ), \
+ __ISP_CHUNK_STRIDE_DDR(c_subsampling, num_chunks) \
+ )
+
+#define __ISP_INTERNAL_HEIGHT(out_height, dvs_env_height, top_crop) \
+ ((out_height) + (dvs_env_height) + top_crop)
+
+/* @GC: Input can be up to sensor resolution when either bayer downscaling
+ * or raw binning is enabled.
+ * Also, during continuous mode, we need to align to 4*NWAY since input
+ * should support binning */
+#define _ISP_MAX_INPUT_WIDTH(max_internal_width, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_WIDTH :\
+ (enable_fixed_bayer_ds) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH_DEC, 4*ISP_VEC_NELEMS) : \
+ (enable_raw_bin) ? \
+ CEIL_MUL(SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH, 4*ISP_VEC_NELEMS) : \
+ (enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_WIDTH \
+ : max_internal_width)
+
+#define _ISP_INPUT_WIDTH(internal_width, ds_input_width, enable_ds) \
+ ((enable_ds) ? (ds_input_width) : (internal_width))
+
+#define _ISP_MAX_INPUT_HEIGHT(max_internal_height, enable_ds, enable_fixed_bayer_ds, enable_raw_bin, \
+ enable_continuous) \
+ ((enable_ds) ? \
+ SH_CSS_MAX_SENSOR_HEIGHT :\
+ (enable_fixed_bayer_ds) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT_DEC : \
+ (enable_raw_bin || enable_continuous) ? \
+ SH_CSS_MAX_CONTINUOUS_SENSOR_HEIGHT \
+ : max_internal_height)
+
+#define _ISP_INPUT_HEIGHT(internal_height, ds_input_height, enable_ds) \
+ ((enable_ds) ? (ds_input_height) : (internal_height))
+
+#define SH_CSS_MAX_STAGES 8 /* primary_stage[1-6], capture_pp, vf_pp */
+
+/* For CSI2+ input system, it requires extra paddinga from vmem */
+#ifdef CONFIG_CSI2_PLUS
+#define _ISP_EXTRA_PADDING_VECS 2
+#else
+#define _ISP_EXTRA_PADDING_VECS 0
+#endif /* CONFIG_CSI2_PLUS */
+
+#endif /* _SH_CSS_DEFS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h
new file mode 100644
index 000000000000..23044aad654f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_dvs_info.h
@@ -0,0 +1,36 @@
+/**
+Support for Intel Camera Imaging ISP subsystem.
+Copyright (c) 2010 - 2015, Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+*/
+
+#ifndef __SH_CSS_DVS_INFO_H__
+#define __SH_CSS_DVS_INFO_H__
+
+#include <math_support.h>
+
+/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
+#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
+
+/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
+#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
+
+/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
+ * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
+#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
+
+#define DVS_INPUT_BYTES_PER_PIXEL (1)
+
+#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
+
+#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
+
+#endif /* __SH_CSS_DVS_INFO_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
new file mode 100644
index 000000000000..34cc56f0b471
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
@@ -0,0 +1,342 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <math_support.h>
+#include "platform_support.h"
+#include "sh_css_firmware.h"
+
+#include "sh_css_defs.h"
+#include "ia_css_debug.h"
+#include "sh_css_internal.h"
+#include "ia_css_isp_param.h"
+
+#include "memory_access.h"
+#include "assert_support.h"
+#include "string_support.h"
+
+#include "isp.h" /* PMEM_WIDTH_LOG2 */
+
+#include "ia_css_isp_params.h"
+#include "ia_css_isp_configs.h"
+#include "ia_css_isp_states.h"
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+
+struct firmware_header {
+ struct sh_css_fw_bi_file_h file_header;
+ struct ia_css_fw_info binary_header;
+};
+
+struct fw_param {
+ const char *name;
+ const void *buffer;
+};
+
+/* Warning: same order as SH_CSS_BINARY_ID_* */
+static struct firmware_header *firmware_header;
+
+/* The string STR is a place holder
+ * which will be replaced with the actual RELEASE_VERSION
+ * during package generation. Please do not modify */
+#ifndef ISP2401
+static const char *release_version = STR(irci_stable_candrpv_0415_20150521_0458);
+#else
+static const char *release_version = STR(irci_ecr-master_20150911_0724);
+#endif
+
+#define MAX_FW_REL_VER_NAME 300
+static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
+
+struct ia_css_fw_info sh_css_sp_fw;
+#if defined(HAS_BL)
+struct ia_css_fw_info sh_css_bl_fw;
+#endif /* HAS_BL */
+struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */
+unsigned sh_css_num_binaries; /* This includes 1 SP binary */
+
+static struct fw_param *fw_minibuffer;
+
+
+char *sh_css_get_fw_version(void)
+{
+ return FW_rel_ver_name;
+}
+
+
+/*
+ * Split the loaded firmware into blobs
+ */
+
+/* Setup sp/sp1 binary */
+static enum ia_css_err
+setup_binary(struct ia_css_fw_info *fw, const char *fw_data, struct ia_css_fw_info *sh_css_fw, unsigned binary_id)
+{
+ const char *blob_data;
+
+ if ((fw == NULL) || (fw_data == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ blob_data = fw_data + fw->blob.offset;
+
+ *sh_css_fw = *fw;
+
+#if defined(HRT_UNSCHED)
+ sh_css_fw->blob.code = vmalloc(1);
+#else
+ sh_css_fw->blob.code = vmalloc(fw->blob.size);
+#endif
+
+ if (sh_css_fw->blob.code == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ memcpy((void *)sh_css_fw->blob.code, blob_data, fw->blob.size);
+ sh_css_fw->blob.data = (char *)sh_css_fw->blob.code + fw->blob.data_source;
+ fw_minibuffer[binary_id].buffer = sh_css_fw->blob.code;
+
+ return IA_CSS_SUCCESS;
+}
+enum ia_css_err
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia_css_blob_descr *bd, unsigned index)
+{
+ const char *name;
+ const unsigned char *blob;
+
+ if ((fw == NULL) || (bd == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ /* Special case: only one binary in fw */
+ if (bi == NULL) bi = (const struct ia_css_fw_info *)fw;
+
+ name = fw + bi->blob.prog_name_offset;
+ blob = (const unsigned char *)fw + bi->blob.offset;
+
+ /* sanity check */
+ if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size + bi->blob.data_size + bi->blob.padding_size) {
+ /* sanity check, note the padding bytes added for section to DDR alignment */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ if ((bi->blob.offset % (1UL<<(ISP_PMEM_WIDTH_LOG2-3))) != 0)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ bd->blob = blob;
+ bd->header = *bi;
+
+ if ((bi->type == ia_css_isp_firmware) || (bi->type == ia_css_sp_firmware)
+#if defined(HAS_BL)
+ || (bi->type == ia_css_bootloader_firmware)
+#endif /* HAS_BL */
+ )
+ {
+ char *namebuffer;
+ int namelength = (int)strlen(name);
+
+ namebuffer = (char *) kmalloc(namelength + 1, GFP_KERNEL);
+ if (namebuffer == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ memcpy(namebuffer, name, namelength + 1);
+
+ bd->name = fw_minibuffer[index].name = namebuffer;
+ } else {
+ bd->name = name;
+ }
+
+ if (bi->type == ia_css_isp_firmware) {
+ size_t paramstruct_size = sizeof(struct ia_css_memory_offsets);
+ size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
+ size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
+
+ char *parambuf = (char *)kmalloc(paramstruct_size + configstruct_size + statestruct_size,
+ GFP_KERNEL);
+ if (parambuf == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = NULL;
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = NULL;
+
+ fw_minibuffer[index].buffer = parambuf;
+
+ /* copy ia_css_memory_offsets */
+ memcpy(parambuf, (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_PARAM]),
+ paramstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = parambuf;
+
+ /* copy ia_css_config_memory_offsets */
+ memcpy(parambuf + paramstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_CONFIG]),
+ configstruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = parambuf + paramstruct_size;
+
+ /* copy ia_css_state_memory_offsets */
+ memcpy(parambuf + paramstruct_size + configstruct_size,
+ (void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_STATE]),
+ statestruct_size);
+ bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = parambuf + paramstruct_size + configstruct_size;
+ }
+ return IA_CSS_SUCCESS;
+}
+
+bool
+sh_css_check_firmware_version(const char *fw_data)
+{
+ struct sh_css_fw_bi_file_h *file_header;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+
+ if (strcmp(file_header->version, release_version) != 0) {
+ return false;
+ } else {
+ /* firmware version matches */
+ return true;
+ }
+}
+
+enum ia_css_err
+sh_css_load_firmware(const char *fw_data,
+ unsigned int fw_size)
+{
+ unsigned i;
+ struct ia_css_fw_info *binaries;
+ struct sh_css_fw_bi_file_h *file_header;
+ bool valid_firmware = false;
+
+ firmware_header = (struct firmware_header *)fw_data;
+ file_header = &firmware_header->file_header;
+ binaries = &firmware_header->binary_header;
+ strncpy(FW_rel_ver_name, file_header->version, min(sizeof(FW_rel_ver_name), sizeof(file_header->version)) - 1);
+ valid_firmware = sh_css_check_firmware_version(fw_data);
+ if (!valid_firmware) {
+#if !defined(HRT_RTL)
+ IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
+ file_header->version, release_version);
+ return IA_CSS_ERR_VERSION_MISMATCH;
+#endif
+ } else {
+ IA_CSS_LOG("successfully load firmware version %s", release_version);
+ }
+
+ /* some sanity checks */
+ if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ sh_css_num_binaries = file_header->binary_nr;
+ /* Only allocate memory for ISP blob info */
+ if (sh_css_num_binaries > (NUM_OF_SPS + NUM_OF_BLS)) {
+ sh_css_blob_info = kmalloc(
+ (sh_css_num_binaries - (NUM_OF_SPS + NUM_OF_BLS)) *
+ sizeof(*sh_css_blob_info), GFP_KERNEL);
+ if (sh_css_blob_info == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ sh_css_blob_info = NULL;
+ }
+
+ fw_minibuffer = kzalloc(sh_css_num_binaries * sizeof(struct fw_param), GFP_KERNEL);
+ if (fw_minibuffer == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < sh_css_num_binaries; i++) {
+ struct ia_css_fw_info *bi = &binaries[i];
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_blob_descr bd;
+ enum ia_css_err err;
+
+ err = sh_css_load_blob_info(fw_data, bi, &bd, i);
+
+ if (err != IA_CSS_SUCCESS)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->blob.offset + bi->blob.size > fw_size)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->type == ia_css_sp_firmware) {
+ if (i != SP_FIRMWARE)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ err = setup_binary(bi, fw_data, &sh_css_sp_fw, i);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+#if defined(HAS_BL)
+ } else if (bi->type == ia_css_bootloader_firmware) {
+ if (i != BOOTLOADER_FIRMWARE)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ err = setup_binary(bi, fw_data, &sh_css_bl_fw, i);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ IA_CSS_LOG("Bootloader binary recognized\n");
+#endif
+ } else {
+ /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS+NUM_OF_BLS) are ISP firmware */
+ if (i < (NUM_OF_SPS + NUM_OF_BLS))
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ if (bi->type != ia_css_isp_firmware)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ if (sh_css_blob_info == NULL) /* cannot happen but KW does not see this */
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ sh_css_blob_info[i-(NUM_OF_SPS + NUM_OF_BLS)] = bd;
+ }
+ }
+
+ return IA_CSS_SUCCESS;
+}
+
+void sh_css_unload_firmware(void)
+{
+
+ /* release firmware minibuffer */
+ if (fw_minibuffer) {
+ unsigned int i = 0;
+ for (i = 0; i < sh_css_num_binaries; i++) {
+ if (fw_minibuffer[i].name)
+ kfree((void *)fw_minibuffer[i].name);
+ if (fw_minibuffer[i].buffer)
+ vfree((void *)fw_minibuffer[i].buffer);
+ }
+ kfree(fw_minibuffer);
+ fw_minibuffer = NULL;
+ }
+
+ memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
+ if (sh_css_blob_info) {
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
+ }
+ sh_css_num_binaries = 0;
+}
+
+hrt_vaddress
+sh_css_load_blob(const unsigned char *blob, unsigned size)
+{
+ hrt_vaddress target_addr = mmgr_malloc(size);
+ /* this will allocate memory aligned to a DDR word boundary which
+ is required for the CSS DMA to read the instructions. */
+
+ assert(blob != NULL);
+ if (target_addr)
+ mmgr_store(target_addr, blob, size);
+ return target_addr;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h
new file mode 100644
index 000000000000..588aabde8a86
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.h
@@ -0,0 +1,54 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_FIRMWARE_H_
+#define _SH_CSS_FIRMWARE_H_
+
+#include <system_types.h>
+
+#include <ia_css_err.h>
+#include <ia_css_acc_types.h>
+
+/* This is for the firmware loaded from user space */
+struct sh_css_fw_bi_file_h {
+ char version[64]; /* branch tag + week day + time */
+ int binary_nr; /* Number of binaries */
+ unsigned int h_size; /* sizeof(struct sh_css_fw_bi_file_h) */
+};
+
+extern struct ia_css_fw_info sh_css_sp_fw;
+#if defined(HAS_BL)
+extern struct ia_css_fw_info sh_css_bl_fw;
+#endif /* HAS_BL */
+extern struct ia_css_blob_descr *sh_css_blob_info;
+extern unsigned sh_css_num_binaries;
+
+char
+*sh_css_get_fw_version(void);
+
+bool
+sh_css_check_firmware_version(const char *fw_data);
+
+enum ia_css_err
+sh_css_load_firmware(const char *fw_data,
+ unsigned int fw_size);
+
+void sh_css_unload_firmware(void);
+
+hrt_vaddress sh_css_load_blob(const unsigned char *blob, unsigned size);
+
+enum ia_css_err
+sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia_css_blob_descr *bd, unsigned int i);
+
+#endif /* _SH_CSS_FIRMWARE_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h
new file mode 100644
index 000000000000..1d1771d71f3c
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_frac.h
@@ -0,0 +1,40 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_FRAC_H
+#define __SH_CSS_FRAC_H
+
+#include <math_support.h>
+
+#define sISP_REG_BIT ISP_VEC_ELEMBITS
+#define uISP_REG_BIT ((unsigned)(sISP_REG_BIT-1))
+#define sSHIFT (16-sISP_REG_BIT)
+#define uSHIFT ((unsigned)(16-uISP_REG_BIT))
+#define sFRACTION_BITS_FITTING(a) (a-sSHIFT)
+#define uFRACTION_BITS_FITTING(a) ((unsigned)(a-uSHIFT))
+#define sISP_VAL_MIN (-(1<<uISP_REG_BIT))
+#define sISP_VAL_MAX ((1<<uISP_REG_BIT)-1)
+#define uISP_VAL_MIN ((unsigned)0)
+#define uISP_VAL_MAX ((unsigned)((1<<uISP_REG_BIT)-1))
+
+/* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+#define sDIGIT_FITTING(v, a, b) \
+ min(max((((v)>>sSHIFT) >> max(sFRACTION_BITS_FITTING(a)-(b), 0)), \
+ sISP_VAL_MIN), sISP_VAL_MAX)
+#define uDIGIT_FITTING(v, a, b) \
+ min((unsigned)max((unsigned)(((v)>>uSHIFT) \
+ >> max((int)(uFRACTION_BITS_FITTING(a)-(b)), 0)), \
+ uISP_VAL_MIN), uISP_VAL_MAX)
+
+#endif /* __SH_CSS_FRAC_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c
new file mode 100644
index 000000000000..348183a221a8
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_host_data.c
@@ -0,0 +1,42 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+#include <ia_css_host_data.h>
+#include <sh_css_internal.h>
+
+struct ia_css_host_data *ia_css_host_data_allocate(size_t size)
+{
+ struct ia_css_host_data *me;
+
+ me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL);
+ if (!me)
+ return NULL;
+ me->size = (uint32_t)size;
+ me->address = sh_css_malloc(size);
+ if (!me->address) {
+ kfree(me);
+ return NULL;
+ }
+ return me;
+}
+
+void ia_css_host_data_free(struct ia_css_host_data *me)
+{
+ if (me) {
+ sh_css_free(me->address);
+ me->address = NULL;
+ kfree(me);
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
new file mode 100644
index 000000000000..0bfebced63af
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
@@ -0,0 +1,84 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "platform_support.h"
+
+#include "sh_css_hrt.h"
+#include "ia_css_debug.h"
+
+#include "device_access.h"
+
+#define __INLINE_EVENT__
+#include "event_fifo.h"
+#define __INLINE_SP__
+#include "sp.h"
+#define __INLINE_ISP__
+#include "isp.h"
+#define __INLINE_IRQ__
+#include "irq.h"
+#define __INLINE_FIFO_MONITOR__
+#include "fifo_monitor.h"
+
+/* System independent */
+#include "sh_css_internal.h"
+
+bool sh_css_hrt_system_is_idle(void)
+{
+ bool not_idle = false, idle;
+ fifo_channel_t ch;
+
+ idle = sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("SP not idle");
+
+ idle = isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT);
+ not_idle |= !idle;
+ if (!idle)
+ IA_CSS_WARNING("ISP not idle");
+
+ for (ch=0; ch<N_FIFO_CHANNEL; ch++) {
+ fifo_channel_state_t state;
+ fifo_channel_get_state(FIFO_MONITOR0_ID, ch, &state);
+ if (state.fifo_valid) {
+ IA_CSS_WARNING("FIFO channel %d is not empty", ch);
+ not_idle = true;
+ }
+ }
+
+ return !not_idle;
+}
+
+enum ia_css_err sh_css_hrt_sp_wait(void)
+{
+#if defined(HAS_IRQ_MAP_VERSION_2)
+ irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL0_ID;
+#else
+ irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL2_ID;
+#endif
+ /*
+ * Wait till SP is idle or till there is a SW2 interrupt
+ * The SW2 interrupt will be used when frameloop runs on SP
+ * and signals an event with similar meaning as SP idle
+ * (e.g. frame_done)
+ */
+ while (!sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT) &&
+ ((irq_reg_load(IRQ0_ID,
+ _HRT_IRQ_CONTROLLER_STATUS_REG_IDX) &
+ (1U<<(irq_id + IRQ_SW_CHANNEL_OFFSET))) == 0)) {
+ hrt_sleep();
+ }
+
+return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.h
new file mode 100644
index 000000000000..fd23ed1848ec
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.h
@@ -0,0 +1,34 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_HRT_H_
+#define _SH_CSS_HRT_H_
+
+#include <sp.h>
+#include <isp.h>
+
+#include <ia_css_err.h>
+
+/* SP access */
+void sh_css_hrt_sp_start_si(void);
+
+void sh_css_hrt_sp_start_copy_frame(void);
+
+void sh_css_hrt_sp_start_isp(void);
+
+enum ia_css_err sh_css_hrt_sp_wait(void);
+
+bool sh_css_hrt_system_is_idle(void);
+
+#endif /* _SH_CSS_HRT_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
new file mode 100644
index 000000000000..e2b6f06ed099
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -0,0 +1,1096 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_INTERNAL_H_
+#define _SH_CSS_INTERNAL_H_
+
+#include <system_global.h>
+#include <math_support.h>
+#include <type_support.h>
+#include <platform_support.h>
+#include <stdarg.h>
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "input_system.h"
+#endif
+
+#include "ia_css_types.h"
+#include "ia_css_acc_types.h"
+#include "ia_css_buffer.h"
+
+#include "ia_css_binary.h"
+#if !defined(__ISP)
+#include "sh_css_firmware.h" /* not needed/desired on SP/ISP */
+#endif
+#include "sh_css_legacy.h"
+#include "sh_css_defs.h"
+#include "sh_css_uds.h"
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+#include "ia_css_circbuf_comm.h" /* Circular buffer */
+#include "ia_css_frame_comm.h"
+#include "ia_css_3a.h"
+#include "ia_css_dvs.h"
+#include "ia_css_metadata.h"
+#include "runtime/bufq/interface/ia_css_bufq.h"
+#include "ia_css_timer.h"
+
+/* TODO: Move to a more suitable place when sp pipeline design is done. */
+#define IA_CSS_NUM_CB_SEM_READ_RESOURCE 2
+#define IA_CSS_NUM_CB_SEM_WRITE_RESOURCE 1
+#define IA_CSS_NUM_CBS 2
+#define IA_CSS_CB_MAX_ELEMS 2
+
+/* Use case specific. index limited to IA_CSS_NUM_CB_SEM_READ_RESOURCE or
+ * IA_CSS_NUM_CB_SEM_WRITE_RESOURCE for read and write respectively.
+ * TODO: Enforce the limitation above.
+*/
+#define IA_CSS_COPYSINK_SEM_INDEX 0
+#define IA_CSS_TAGGER_SEM_INDEX 1
+
+/* Force generation of output event. Used by acceleration pipe. */
+#define IA_CSS_POST_OUT_EVENT_FORCE 2
+
+#define SH_CSS_MAX_BINARY_NAME 64
+
+#define SP_DEBUG_NONE (0)
+#define SP_DEBUG_DUMP (1)
+#define SP_DEBUG_COPY (2)
+#define SP_DEBUG_TRACE (3)
+#define SP_DEBUG_MINIMAL (4)
+
+#define SP_DEBUG SP_DEBUG_NONE
+#define SP_DEBUG_MINIMAL_OVERWRITE 1
+
+#define SH_CSS_TNR_BIT_DEPTH 8
+#define SH_CSS_REF_BIT_DEPTH 8
+
+/* keep next up to date with the definition for MAX_CB_ELEMS_FOR_TAGGER in tagger.sp.c */
+#if defined(HAS_SP_2400)
+#define NUM_CONTINUOUS_FRAMES 15
+#else
+#define NUM_CONTINUOUS_FRAMES 10
+#endif
+#define NUM_MIPI_FRAMES_PER_STREAM 2
+
+#define NUM_ONLINE_INIT_CONTINUOUS_FRAMES 2
+
+#define NR_OF_PIPELINES IA_CSS_PIPE_ID_NUM /* Must match with IA_CSS_PIPE_ID_NUM */
+
+#define SH_CSS_MAX_IF_CONFIGS 3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
+#define SH_CSS_IF_CONFIG_NOT_NEEDED 0xFF
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_ENABLE_METADATA
+#endif
+
+#if defined(SH_CSS_ENABLE_METADATA) && !defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SH_CSS_ENABLE_METADATA_THREAD
+#endif
+
+
+ /*
+ * SH_CSS_MAX_SP_THREADS:
+ * sp threads visible to host with connected communication queues
+ * these threads are capable of running an image pipe
+ * SH_CSS_MAX_SP_INTERNAL_THREADS:
+ * internal sp service threads, no communication queues to host
+ * these threads can't be used as image pipe
+ */
+
+#if defined(SH_CSS_ENABLE_METADATA_THREAD)
+#define SH_CSS_SP_INTERNAL_METADATA_THREAD 1
+#else
+#define SH_CSS_SP_INTERNAL_METADATA_THREAD 0
+#endif
+
+#define SH_CSS_SP_INTERNAL_SERVICE_THREAD 1
+
+#ifdef __DISABLE_UNUSED_THREAD__
+ #define SH_CSS_MAX_SP_THREADS 0
+#else
+ #define SH_CSS_MAX_SP_THREADS 5
+#endif
+
+#define SH_CSS_MAX_SP_INTERNAL_THREADS (\
+ SH_CSS_SP_INTERNAL_SERVICE_THREAD +\
+ SH_CSS_SP_INTERNAL_METADATA_THREAD)
+
+#define SH_CSS_MAX_PIPELINES SH_CSS_MAX_SP_THREADS
+
+/**
+ * The C99 standard does not specify the exact object representation of structs;
+ * the representation is compiler dependent.
+ *
+ * The structs that are communicated between host and SP/ISP should have the
+ * exact same object representation. The compiler that is used to compile the
+ * firmware is hivecc.
+ *
+ * To check if a different compiler, used to compile a host application, uses
+ * another object representation, macros are defined specifying the size of
+ * the structs as expected by the firmware.
+ *
+ * A host application shall verify that a sizeof( ) of the struct is equal to
+ * the SIZE_OF_XXX macro of the corresponding struct. If they are not
+ * equal, functionality will break.
+ */
+#define CALC_ALIGNMENT_MEMBER(x, y) (CEIL_MUL(x, y) - x)
+#define SIZE_OF_HRT_VADDRESS sizeof(hive_uint32)
+#define SIZE_OF_IA_CSS_PTR sizeof(uint32_t)
+
+/* Number of SP's */
+#define NUM_OF_SPS 1
+
+#if defined(HAS_BL)
+#define NUM_OF_BLS 1
+#else
+#define NUM_OF_BLS 0
+#endif
+
+/* Enum for order of Binaries */
+enum sh_css_order_binaries {
+ SP_FIRMWARE = 0,
+#if defined(HAS_BL)
+ BOOTLOADER_FIRMWARE,
+#endif
+ ISP_FIRMWARE
+};
+
+ /*
+ * JB: keep next enum in sync with thread id's
+ * and pipe id's
+ */
+enum sh_css_pipe_config_override {
+ SH_CSS_PIPE_CONFIG_OVRD_NONE = 0,
+ SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD = 0xffff
+};
+
+enum host2sp_commands {
+ host2sp_cmd_error = 0,
+ /*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+ host2sp_cmd_ready = 1,
+ /* Command written by the Host */
+ host2sp_cmd_dummy, /* No action, can be used as watchdog */
+ host2sp_cmd_start_flash, /* Request SP to start the flash */
+ host2sp_cmd_terminate, /* SP should terminate itself */
+ N_host2sp_cmd
+};
+
+/** Enumeration used to indicate the events that are produced by
+ * the SP and consumed by the Host.
+ *
+ * !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
+ * 1) "enum ia_css_event_type" (ia_css_event_public.h)
+ * 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
+ * 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
+ * 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
+ */
+enum sh_css_sp_event_type {
+ SH_CSS_SP_EVENT_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_SECOND_VF_OUTPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_3A_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_DIS_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_PIPELINE_DONE,
+ SH_CSS_SP_EVENT_FRAME_TAGGED,
+ SH_CSS_SP_EVENT_INPUT_FRAME_DONE,
+ SH_CSS_SP_EVENT_METADATA_DONE,
+ SH_CSS_SP_EVENT_LACE_STATISTICS_DONE,
+ SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE,
+ SH_CSS_SP_EVENT_TIMER,
+ SH_CSS_SP_EVENT_PORT_EOF,
+ SH_CSS_SP_EVENT_FW_WARNING,
+ SH_CSS_SP_EVENT_FW_ASSERT,
+ SH_CSS_SP_EVENT_NR_OF_TYPES /* must be last */
+};
+
+/* xmem address map allocation per pipeline, css pointers */
+/* Note that the struct below should only consist of hrt_vaddress-es
+ Otherwise this will cause a fail in the function ref_sh_css_ddr_address_map
+ */
+struct sh_css_ddr_address_map {
+ hrt_vaddress isp_param;
+ hrt_vaddress isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ hrt_vaddress macc_tbl;
+ hrt_vaddress fpn_tbl;
+ hrt_vaddress sc_tbl;
+ hrt_vaddress tetra_r_x;
+ hrt_vaddress tetra_r_y;
+ hrt_vaddress tetra_gr_x;
+ hrt_vaddress tetra_gr_y;
+ hrt_vaddress tetra_gb_x;
+ hrt_vaddress tetra_gb_y;
+ hrt_vaddress tetra_b_x;
+ hrt_vaddress tetra_b_y;
+ hrt_vaddress tetra_ratb_x;
+ hrt_vaddress tetra_ratb_y;
+ hrt_vaddress tetra_batr_x;
+ hrt_vaddress tetra_batr_y;
+ hrt_vaddress dvs_6axis_params_y;
+};
+#define SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (SH_CSS_MAX_STAGES * IA_CSS_NUM_MEMORIES * SIZE_OF_HRT_VADDRESS) + \
+ (16 * SIZE_OF_HRT_VADDRESS))
+
+/* xmem address map allocation per pipeline */
+struct sh_css_ddr_address_map_size {
+ size_t isp_param;
+ size_t isp_mem_param[SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ size_t macc_tbl;
+ size_t fpn_tbl;
+ size_t sc_tbl;
+ size_t tetra_r_x;
+ size_t tetra_r_y;
+ size_t tetra_gr_x;
+ size_t tetra_gr_y;
+ size_t tetra_gb_x;
+ size_t tetra_gb_y;
+ size_t tetra_b_x;
+ size_t tetra_b_y;
+ size_t tetra_ratb_x;
+ size_t tetra_ratb_y;
+ size_t tetra_batr_x;
+ size_t tetra_batr_y;
+ size_t dvs_6axis_params_y;
+};
+
+struct sh_css_ddr_address_map_compound {
+ struct sh_css_ddr_address_map map;
+ struct sh_css_ddr_address_map_size size;
+};
+
+struct ia_css_isp_parameter_set_info {
+ struct sh_css_ddr_address_map mem_map;/**< pointers to Parameters in ISP format IMPT:
+ This should be first member of this struct */
+ uint32_t isp_parameters_id;/**< Unique ID to track which config was actually applied to a particular frame */
+ ia_css_ptr output_frame_ptr;/**< Output frame to which this config has to be applied (optional) */
+};
+
+/* this struct contains all arguments that can be passed to
+ a binary. It depends on the binary which ones are used. */
+struct sh_css_binary_args {
+ struct ia_css_frame *in_frame; /* input frame */
+ struct ia_css_frame *delay_frames[MAX_NUM_VIDEO_DELAY_FRAMES]; /* reference input frame */
+#ifndef ISP2401
+ struct ia_css_frame *tnr_frames[NUM_VIDEO_TNR_FRAMES]; /* tnr frames */
+#else
+ struct ia_css_frame *tnr_frames[NUM_TNR_FRAMES]; /* tnr frames */
+#endif
+ struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS]; /* output frame */
+ struct ia_css_frame *out_vf_frame; /* viewfinder output frame */
+ bool copy_vf;
+ bool copy_output;
+ unsigned vf_downscale_log2;
+};
+
+#if SP_DEBUG == SP_DEBUG_DUMP
+
+#define SH_CSS_NUM_SP_DEBUG 48
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#elif SP_DEBUG == SP_DEBUG_COPY
+
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+
+struct sh_css_sp_debug_trace {
+ uint16_t frame;
+ uint16_t line;
+ uint16_t pixel_distance;
+ uint16_t mipi_used_dword;
+ uint16_t sp_index;
+};
+
+struct sh_css_sp_debug_state {
+ uint16_t if_start_line;
+ uint16_t if_start_column;
+ uint16_t if_cropped_height;
+ uint16_t if_cropped_width;
+ unsigned int index;
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_TRACE_DEPTH];
+};
+
+#elif SP_DEBUG == SP_DEBUG_TRACE
+
+#if 1
+/* Example of just one global trace */
+#define SH_CSS_SP_DBG_NR_OF_TRACES (1)
+#define SH_CSS_SP_DBG_TRACE_DEPTH (40)
+#else
+/* E.g. if you like seperate traces for 4 threads */
+#define SH_CSS_SP_DBG_NR_OF_TRACES (4)
+#define SH_CSS_SP_DBG_TRACE_DEPTH (10)
+#endif
+
+#define SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS (13)
+
+struct sh_css_sp_debug_trace {
+ uint16_t time_stamp;
+ uint16_t location; /* bit 15..13 = file_id, 12..0 = line nr. */
+ uint32_t data;
+};
+
+struct sh_css_sp_debug_state {
+ struct sh_css_sp_debug_trace
+ trace[SH_CSS_SP_DBG_NR_OF_TRACES][SH_CSS_SP_DBG_TRACE_DEPTH];
+ uint16_t index_last[SH_CSS_SP_DBG_NR_OF_TRACES];
+ uint8_t index[SH_CSS_SP_DBG_NR_OF_TRACES];
+};
+
+#elif SP_DEBUG == SP_DEBUG_MINIMAL
+
+#define SH_CSS_NUM_SP_DEBUG 128
+
+struct sh_css_sp_debug_state {
+ unsigned int error;
+ unsigned int debug[SH_CSS_NUM_SP_DEBUG];
+};
+
+#endif
+
+
+struct sh_css_sp_debug_command {
+ /*
+ * The DMA software-mask,
+ * Bit 31...24: unused.
+ * Bit 23...16: unused.
+ * Bit 15...08: reading-request enabling bits for DMA channel 7..0
+ * Bit 07...00: writing-reqeust enabling bits for DMA channel 7..0
+ *
+ * For example, "0...0 0...0 11111011 11111101" indicates that the
+ * writing request through DMA Channel 1 and the reading request
+ * through DMA channel 2 are both disabled. The others are enabled.
+ */
+ uint32_t dma_sw_reg;
+};
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+/* SP input formatter configuration.*/
+struct sh_css_sp_input_formatter_set {
+ uint32_t stream_format;
+ input_formatter_cfg_t config_a;
+ input_formatter_cfg_t config_b;
+};
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
+#endif
+
+/* SP configuration information */
+struct sh_css_sp_config {
+ uint8_t no_isp_sync; /* Signal host immediately after start */
+ uint8_t enable_raw_pool_locking; /**< Enable Raw Buffer Locking for HALv3 Support */
+ uint8_t lock_all;
+ /**< If raw buffer locking is enabled, this flag indicates whether raw
+ frames are locked when their EOF event is successfully sent to the
+ host (true) or when they are passed to the preview/video pipe
+ (false). */
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ struct {
+ uint8_t a_changed;
+ uint8_t b_changed;
+ uint8_t isp_2ppc;
+ struct sh_css_sp_input_formatter_set set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
+ } input_formatter;
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+ sync_generator_cfg_t sync_gen;
+ tpg_cfg_t tpg;
+ prbs_cfg_t prbs;
+ input_system_cfg_t input_circuit;
+ uint8_t input_circuit_cfg_changed;
+ uint32_t mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+#endif
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ uint8_t enable_isys_event_queue;
+#endif
+ uint8_t disable_cont_vf;
+};
+
+enum sh_css_stage_type {
+ SH_CSS_SP_STAGE_TYPE = 0,
+ SH_CSS_ISP_STAGE_TYPE = 1
+};
+#define SH_CSS_NUM_STAGE_TYPES 2
+
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS (1 << 0)
+#define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
+ ((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS)-1)
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+struct sh_css_sp_pipeline_terminal {
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_t virtual_input_system_stream[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } context;
+ /*
+ * TODO
+ * - Remove "virtual_input_system_cfg" when the ISYS2401 DLI is ready.
+ */
+ union {
+ /* Input System 2401 */
+ virtual_input_system_stream_cfg_t virtual_input_system_stream_cfg[IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH];
+ } ctrl;
+};
+
+struct sh_css_sp_pipeline_io {
+ struct sh_css_sp_pipeline_terminal input;
+ /* pqiao: comment out temporarily to save dmem */
+ /*struct sh_css_sp_pipeline_terminal output;*/
+};
+
+/** This struct tracks how many streams are registered per CSI port.
+ * This is used to track which streams have already been configured.
+ * Only when all streams are configured, the CSI RX is started for that port.
+ */
+struct sh_css_sp_pipeline_io_status {
+ uint32_t active[N_INPUT_SYSTEM_CSI_PORT]; /**< registered streams */
+ uint32_t running[N_INPUT_SYSTEM_CSI_PORT]; /**< configured streams */
+};
+
+#endif
+enum sh_css_port_dir {
+ SH_CSS_PORT_INPUT = 0,
+ SH_CSS_PORT_OUTPUT = 1
+};
+
+enum sh_css_port_type {
+ SH_CSS_HOST_TYPE = 0,
+ SH_CSS_COPYSINK_TYPE = 1,
+ SH_CSS_TAGGERSINK_TYPE = 2
+};
+
+/* Pipe inout settings: output port on 7-4bits, input port on 3-0bits */
+#define SH_CSS_PORT_FLD_WIDTH_IN_BITS (4)
+#define SH_CSS_PORT_TYPE_BIT_FLD(pt) (0x1 << (pt))
+#define SH_CSS_PORT_FLD(pd) ((pd) ? SH_CSS_PORT_FLD_WIDTH_IN_BITS : 0)
+#define SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) ((p) |= (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt) ((p) &= ~(SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_SET(p, pd, pt, val) ((val) ? \
+ SH_CSS_PIPE_PORT_CONFIG_ON(p, pd, pt) : SH_CSS_PIPE_PORT_CONFIG_OFF(p, pd, pt))
+#define SH_CSS_PIPE_PORT_CONFIG_GET(p, pd, pt) ((p) & (SH_CSS_PORT_TYPE_BIT_FLD(pt) << SH_CSS_PORT_FLD(pd)))
+#define SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(p) \
+ (!(SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_INPUT, SH_CSS_HOST_TYPE) && \
+ SH_CSS_PIPE_PORT_CONFIG_GET(p, SH_CSS_PORT_OUTPUT, SH_CSS_HOST_TYPE)))
+
+#define IA_CSS_ACQUIRE_ISP_POS 31
+
+/* Flags for metadata processing */
+#define SH_CSS_METADATA_ENABLED 0x01
+#define SH_CSS_METADATA_PROCESSED 0x02
+#define SH_CSS_METADATA_OFFLINE_MODE 0x04
+#define SH_CSS_METADATA_WAIT_INPUT 0x08
+
+/** @brief Free an array of metadata buffers.
+ *
+ * @param[in] num_bufs Number of metadata buffers to be freed.
+ * @param[in] bufs Pointer of array of metadata buffers.
+ *
+ * This function frees an array of metadata buffers.
+ */
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs, struct ia_css_metadata **bufs);
+
+/* Macro for handling pipe_qos_config */
+#define QOS_INVALID (~0U)
+#define QOS_ALL_STAGES_DISABLED (0U)
+#define QOS_STAGE_MASK(num) (0x00000001 << num)
+#define SH_CSS_IS_QOS_PIPE(pipe) ((pipe)->pipe_qos_config != QOS_INVALID)
+#define SH_CSS_QOS_STAGE_ENABLE(pipe, num) ((pipe)->pipe_qos_config |= QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_DISABLE(pipe, num) ((pipe)->pipe_qos_config &= ~QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_IS_ENABLED(pipe, num) ((pipe)->pipe_qos_config & QOS_STAGE_MASK(num))
+#define SH_CSS_QOS_STAGE_IS_ALL_DISABLED(pipe) ((pipe)->pipe_qos_config == QOS_ALL_STAGES_DISABLED)
+#define SH_CSS_QOS_MODE_PIPE_ADD(mode, pipe) ((mode) |= (0x1 << (pipe)->pipe_id))
+#define SH_CSS_QOS_MODE_PIPE_REMOVE(mode, pipe) ((mode) &= ~(0x1 << (pipe)->pipe_id))
+#define SH_CSS_IS_QOS_ONLY_MODE(mode) ((mode) == (0x1 << IA_CSS_PIPE_ID_ACC))
+
+/* Information for a pipeline */
+struct sh_css_sp_pipeline {
+ uint32_t pipe_id; /* the pipe ID */
+ uint32_t pipe_num; /* the dynamic pipe number */
+ uint32_t thread_id; /* the sp thread ID */
+ uint32_t pipe_config; /* the pipe config */
+ uint32_t pipe_qos_config; /* Bitmap of multiple QOS extension fw state.
+ (0xFFFFFFFF) indicates non QOS pipe.*/
+ uint32_t inout_port_config;
+ uint32_t required_bds_factor;
+ uint32_t dvs_frame_delay;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ uint32_t input_system_mode; /* enum ia_css_input_mode */
+ uint32_t port_id; /* port_id for input system */
+#endif
+ uint32_t num_stages; /* the pipe config */
+ uint32_t running; /* needed for pipe termination */
+ hrt_vaddress sp_stage_addr[SH_CSS_MAX_STAGES];
+ hrt_vaddress scaler_pp_lut; /* Early bound LUT */
+ uint32_t dummy; /* stage ptr is only used on sp but lives in
+ this struct; needs cleanup */
+ int32_t num_execs; /* number of times to run if this is
+ an acceleration pipe. */
+#if defined(SH_CSS_ENABLE_METADATA)
+ struct {
+ uint32_t format; /* Metadata format in hrt format */
+ uint32_t width; /* Width of a line */
+ uint32_t height; /* Number of lines */
+ uint32_t stride; /* Stride (in bytes) per line */
+ uint32_t size; /* Total size (in bytes) */
+ hrt_vaddress cont_buf; /* Address of continuous buffer */
+ } metadata;
+#endif
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ uint32_t output_frame_queue_id;
+#endif
+ union {
+ struct {
+ uint32_t bytes_available;
+ } bin;
+ struct {
+ uint32_t height;
+ uint32_t width;
+ uint32_t padded_width;
+ uint32_t max_input_width;
+ uint32_t raw_bit_depth;
+ } raw;
+ } copy;
+#ifdef ISP2401
+
+ /* Parameters passed to Shading Correction kernel. */
+ struct {
+ uint32_t internal_frame_origin_x_bqs_on_sctbl; /* Origin X (bqs) of internal frame on shading table */
+ uint32_t internal_frame_origin_y_bqs_on_sctbl; /* Origin Y (bqs) of internal frame on shading table */
+ } shading;
+#endif
+};
+
+/*
+ * The first frames (with comment Dynamic) can be dynamic or static
+ * The other frames (ref_in and below) can only be static
+ * Static means that the data addres will not change during the life time
+ * of the associated pipe. Dynamic means that the data address can
+ * change with every (frame) iteration of the associated pipe
+ *
+ * s3a and dis are now also dynamic but (stil) handled seperately
+ */
+#define SH_CSS_NUM_DYNAMIC_FRAME_IDS (3)
+
+struct ia_css_frames_sp {
+ struct ia_css_frame_sp in;
+ struct ia_css_frame_sp out[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
+ struct ia_css_resolution effective_in_res;
+ struct ia_css_frame_sp out_vf;
+ struct ia_css_frame_sp_info internal_frame_info;
+ struct ia_css_buffer_sp s3a_buf;
+ struct ia_css_buffer_sp dvs_buf;
+#if defined SH_CSS_ENABLE_METADATA
+ struct ia_css_buffer_sp metadata_buf;
+#endif
+};
+
+/* Information for a single pipeline stage for an ISP */
+struct sh_css_isp_stage {
+ /*
+ * For compatability and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ struct ia_css_blob_info blob_info;
+ struct ia_css_binary_info binary_info;
+ char binary_name[SH_CSS_MAX_BINARY_NAME];
+ struct ia_css_isp_param_css_segments mem_initializers;
+};
+
+/* Information for a single pipeline stage */
+struct sh_css_sp_stage {
+ /*
+ * For compatability and portabilty, only types
+ * from "stdint.h" are allowed
+ *
+ * Use of "enum" and "bool" is prohibited
+ * Multiple boolean flags can be stored in an
+ * integer
+ */
+ uint8_t num; /* Stage number */
+ uint8_t isp_online;
+ uint8_t isp_copy_vf;
+ uint8_t isp_copy_output;
+ uint8_t sp_enable_xnr;
+ uint8_t isp_deci_log_factor;
+ uint8_t isp_vf_downscale_bits;
+ uint8_t deinterleaved;
+/*
+ * NOTE: Programming the input circuit can only be done at the
+ * start of a session. It is illegal to program it during execution
+ * The input circuit defines the connectivity
+ */
+ uint8_t program_input_circuit;
+/* enum ia_css_pipeline_stage_sp_func func; */
+ uint8_t func;
+ /* The type of the pipe-stage */
+ /* enum sh_css_stage_type stage_type; */
+ uint8_t stage_type;
+ uint8_t num_stripes;
+ uint8_t isp_pipe_version;
+ struct {
+ uint8_t vf_output;
+ uint8_t s3a;
+ uint8_t sdis;
+ uint8_t dvs_stats;
+ uint8_t lace_stats;
+ } enable;
+ /* Add padding to come to a word boundary */
+ /* unsigned char padding[0]; */
+
+ struct sh_css_crop_pos sp_out_crop_pos;
+ struct ia_css_frames_sp frames;
+ struct ia_css_resolution dvs_envelope;
+ struct sh_css_uds_info uds;
+ hrt_vaddress isp_stage_addr;
+ hrt_vaddress xmem_bin_addr;
+ hrt_vaddress xmem_map_addr;
+
+ uint16_t top_cropping;
+ uint16_t row_stripes_height;
+ uint16_t row_stripes_overlap_lines;
+ uint8_t if_config_index; /* Which should be applied by this stage. */
+};
+
+/*
+ * Time: 2012-07-19, 17:40.
+ * Note: Add a new data memeber "debug" in "sh_css_sp_group". This
+ * data member is used to pass the debugging command from the
+ * Host to the SP.
+ *
+ * Time: Before 2012-07-19.
+ * Note:
+ * Group all host initialized SP variables into this struct.
+ * This is initialized every stage through dma.
+ * The stage part itself is transfered through sh_css_sp_stage.
+*/
+struct sh_css_sp_group {
+ struct sh_css_sp_config config;
+ struct sh_css_sp_pipeline pipe[SH_CSS_MAX_SP_THREADS];
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2401)
+ struct sh_css_sp_pipeline_io pipe_io[SH_CSS_MAX_SP_THREADS];
+ struct sh_css_sp_pipeline_io_status pipe_io_status;
+#endif
+ struct sh_css_sp_debug_command debug;
+};
+
+/* Data in SP dmem that is set from the host every stage. */
+struct sh_css_sp_per_frame_data {
+ /* ddr address of sp_group and sp_stage */
+ hrt_vaddress sp_group_addr;
+};
+
+#define SH_CSS_NUM_SDW_IRQS 3
+
+/* Output data from SP to css */
+struct sh_css_sp_output {
+ unsigned int bin_copy_bytes_copied;
+#if SP_DEBUG != SP_DEBUG_NONE
+ struct sh_css_sp_debug_state debug;
+#endif
+ unsigned int sw_interrupt_value[SH_CSS_NUM_SDW_IRQS];
+};
+
+#define CONFIG_ON_FRAME_ENQUEUE() 0
+
+/**
+ * @brief Data structure for the circular buffer.
+ * The circular buffer is empty if "start == end". The
+ * circular buffer is full if "(end + 1) % size == start".
+ */
+/* Variable Sized Buffer Queue Elements */
+
+#define IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE 6
+#define IA_CSS_NUM_ELEMS_HOST2SP_PARAM_QUEUE 3
+#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 6
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+/* sp-to-host queue is expected to be emptied in ISR since
+ * it is used instead of HW interrupts (due to HW design issue).
+ * We need one queue element per CSI port. */
+#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+/* The host-to-sp queue needs to allow for some delay
+ * in the emptying of this queue in the SP since there is no
+ * separate SP thread for this. */
+#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE (2 * N_CSI_PORTS)
+#else
+#define IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE 0
+#define IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE 0
+#define IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE 0
+#endif
+
+#if defined(HAS_SP_2400)
+#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 13
+#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 19
+#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 26 /* holds events for all type of buffers, hence deeper */
+#else
+#define IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE 6
+#define IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE 6
+#define IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE 6
+#endif
+
+struct sh_css_hmm_buffer {
+ union {
+ struct ia_css_isp_3a_statistics s3a;
+ struct ia_css_isp_dvs_statistics dis;
+ hrt_vaddress skc_dvs_statistics;
+ hrt_vaddress lace_stat;
+ struct ia_css_metadata metadata;
+ struct frame_data_wrapper {
+ hrt_vaddress frame_data;
+ uint32_t flashed;
+ uint32_t exp_id;
+ uint32_t isp_parameters_id; /**< Unique ID to track which config was
+ actually applied to a particular frame */
+#if CONFIG_ON_FRAME_ENQUEUE()
+ struct sh_css_config_on_frame_enqueue config_on_frame_enqueue;
+#endif
+ } frame;
+ hrt_vaddress ddr_ptrs;
+ } payload;
+ /*
+ * kernel_ptr is present for host administration purposes only.
+ * type is uint64_t in order to be 64-bit host compatible.
+ * uint64_t does not exist on SP/ISP.
+ * Size of the struct is checked by sp.hive.c.
+ */
+#if !defined(__ISP)
+ CSS_ALIGN(uint64_t cookie_ptr, 8); /* TODO: check if this alignment is needed */
+ uint64_t kernel_ptr;
+#else
+ CSS_ALIGN(struct { uint32_t a[2]; } cookie_ptr, 8); /* TODO: check if this alignment is needed */
+ struct { uint32_t a[2]; } kernel_ptr;
+#endif
+ struct ia_css_time_meas timing_data;
+ clock_value_t isys_eof_clock_tick;
+};
+#if CONFIG_ON_FRAME_ENQUEUE()
+#define SIZE_OF_FRAME_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (3 * sizeof(uint32_t)) + \
+ sizeof(uint32_t))
+#else
+#define SIZE_OF_FRAME_STRUCT \
+ (SIZE_OF_HRT_VADDRESS + \
+ (3 * sizeof(uint32_t)))
+#endif
+
+#define SIZE_OF_PAYLOAD_UNION \
+ (MAX(MAX(MAX(MAX( \
+ SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT, \
+ SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT), \
+ SIZE_OF_IA_CSS_METADATA_STRUCT), \
+ SIZE_OF_FRAME_STRUCT), \
+ SIZE_OF_HRT_VADDRESS))
+
+/* Do not use sizeof(uint64_t) since that does not exist of SP */
+#define SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT \
+ (SIZE_OF_PAYLOAD_UNION + \
+ CALC_ALIGNMENT_MEMBER(SIZE_OF_PAYLOAD_UNION, 8) + \
+ 8 + \
+ 8 + \
+ SIZE_OF_IA_CSS_TIME_MEAS_STRUCT + \
+ SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT + \
+ CALC_ALIGNMENT_MEMBER(SIZE_OF_IA_CSS_CLOCK_TICK_STRUCT, 8))
+
+enum sh_css_queue_type {
+ sh_css_invalid_queue_type = -1,
+ sh_css_host2sp_buffer_queue,
+ sh_css_sp2host_buffer_queue,
+ sh_css_host2sp_psys_event_queue,
+ sh_css_sp2host_psys_event_queue,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ sh_css_sp2host_isys_event_queue,
+ sh_css_host2sp_isys_event_queue,
+ sh_css_host2sp_tag_cmd_queue,
+#endif
+};
+
+struct sh_css_event_irq_mask {
+ uint16_t or_mask;
+ uint16_t and_mask;
+};
+#define SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT \
+ (2 * sizeof(uint16_t))
+
+struct host_sp_communication {
+ /*
+ * Don't use enum host2sp_commands, because the sizeof an enum is
+ * compiler dependant and thus non-portable
+ */
+ uint32_t host2sp_command;
+
+ /*
+ * The frame buffers that are reused by the
+ * copy pipe in the offline preview mode.
+ *
+ * host2sp_offline_frames[0]: the input frame of the preview pipe.
+ * host2sp_offline_frames[1]: the output frame of the copy pipe.
+ *
+ * TODO:
+ * Remove it when the Host and the SP is decoupled.
+ */
+ hrt_vaddress host2sp_offline_frames[NUM_CONTINUOUS_FRAMES];
+ hrt_vaddress host2sp_offline_metadata[NUM_CONTINUOUS_FRAMES];
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ hrt_vaddress host2sp_mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ hrt_vaddress host2sp_mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ uint32_t host2sp_num_mipi_frames[N_CSI_PORTS];
+#endif
+ uint32_t host2sp_cont_avail_num_raw_frames;
+ uint32_t host2sp_cont_extra_num_raw_frames;
+ uint32_t host2sp_cont_target_num_raw_frames;
+ struct sh_css_event_irq_mask host2sp_event_irq_mask[NR_OF_PIPELINES];
+
+};
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
+ (sizeof(uint32_t) + \
+ (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
+ (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM * SIZE_OF_HRT_VADDRESS * 2) + \
+ ((3 + N_CSI_PORTS) * sizeof(uint32_t)) + \
+ (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
+#else
+#define SIZE_OF_HOST_SP_COMMUNICATION_STRUCT \
+ (sizeof(uint32_t) + \
+ (NUM_CONTINUOUS_FRAMES * SIZE_OF_HRT_VADDRESS * 2) + \
+ (3 * sizeof(uint32_t)) + \
+ (NR_OF_PIPELINES * SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT))
+#endif
+
+struct host_sp_queues {
+ /*
+ * Queues for the dynamic frame information,
+ * i.e. the "in_frame" buffer, the "out_frame"
+ * buffer and the "vf_out_frame" buffer.
+ */
+ ia_css_circbuf_desc_t host2sp_buffer_queues_desc
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t host2sp_buffer_queues_elems
+ [SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES]
+ [IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE];
+ ia_css_circbuf_desc_t sp2host_buffer_queues_desc
+ [SH_CSS_MAX_NUM_QUEUES];
+ ia_css_circbuf_elem_t sp2host_buffer_queues_elems
+ [SH_CSS_MAX_NUM_QUEUES][IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE];
+
+ /*
+ * The queues for the events.
+ */
+ ia_css_circbuf_desc_t host2sp_psys_event_queue_desc;
+ ia_css_circbuf_elem_t host2sp_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_psys_event_queue_desc;
+ ia_css_circbuf_elem_t sp2host_psys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE];
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ /*
+ * The queues for the ISYS events.
+ */
+ ia_css_circbuf_desc_t host2sp_isys_event_queue_desc;
+ ia_css_circbuf_elem_t host2sp_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE];
+ ia_css_circbuf_desc_t sp2host_isys_event_queue_desc;
+ ia_css_circbuf_elem_t sp2host_isys_event_queue_elems
+ [IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE];
+ /*
+ * The queue for the tagger commands.
+ * CHECK: are these last two present on the 2401 ?
+ */
+ ia_css_circbuf_desc_t host2sp_tag_cmd_queue_desc;
+ ia_css_circbuf_elem_t host2sp_tag_cmd_queue_elems
+ [IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE];
+#endif
+};
+
+#define SIZE_OF_QUEUES_ELEMS \
+ (SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT * \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_HOST2SP_BUFFER_QUEUE) + \
+ (SH_CSS_MAX_NUM_QUEUES * IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_PSYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_SP2HOST_ISYS_EVENT_QUEUE) + \
+ (IA_CSS_NUM_ELEMS_HOST2SP_TAG_CMD_QUEUE)))
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#define IA_CSS_NUM_CIRCBUF_DESCS 5
+#else
+#ifndef ISP2401
+#define IA_CSS_NUM_CIRCBUF_DESCS 3
+#else
+#define IA_CSS_NUM_CIRCBUF_DESCS 2
+#endif
+#endif
+
+#define SIZE_OF_QUEUES_DESC \
+ ((SH_CSS_MAX_SP_THREADS * SH_CSS_MAX_NUM_QUEUES * \
+ SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (SH_CSS_MAX_NUM_QUEUES * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT) + \
+ (IA_CSS_NUM_CIRCBUF_DESCS * SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT))
+
+#define SIZE_OF_HOST_SP_QUEUES_STRUCT \
+ (SIZE_OF_QUEUES_ELEMS + SIZE_OF_QUEUES_DESC)
+
+extern int (*sh_css_printf)(const char *fmt, va_list args);
+
+STORAGE_CLASS_INLINE void
+sh_css_print(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (sh_css_printf) {
+ va_start(ap, fmt);
+ sh_css_printf(fmt, ap);
+ va_end(ap);
+ }
+}
+
+STORAGE_CLASS_INLINE void
+sh_css_vprint(const char *fmt, va_list args)
+{
+ if (sh_css_printf)
+ sh_css_printf(fmt, args);
+}
+
+/* The following #if is there because this header file is also included
+ by SP and ISP code but they do not need this data and HIVECC has alignment
+ issue with the firmware struct/union's.
+ More permanent solution will be to refactor this include.
+*/
+#if !defined(__ISP)
+hrt_vaddress
+sh_css_params_ddr_address_map(void);
+
+enum ia_css_err
+sh_css_params_init(void);
+
+void
+sh_css_params_uninit(void);
+
+void *sh_css_malloc(size_t size);
+
+void *sh_css_calloc(size_t N, size_t size);
+
+void sh_css_free(void *ptr);
+
+/* For Acceleration API: Flush FW (shared buffer pointer) arguments */
+void sh_css_flush(struct ia_css_acc_fw *fw);
+
+
+void
+sh_css_binary_args_reset(struct sh_css_binary_args *args);
+
+/* Check two frames for equality (format, resolution, bits per element) */
+bool
+sh_css_frame_equal_types(const struct ia_css_frame *frame_a,
+ const struct ia_css_frame *frame_b);
+
+bool
+sh_css_frame_info_equal_resolution(const struct ia_css_frame_info *info_a,
+ const struct ia_css_frame_info *info_b);
+
+void
+sh_css_capture_enable_bayer_downscaling(bool enable);
+
+void
+sh_css_binary_print(const struct ia_css_binary *binary);
+
+/* aligned argument of sh_css_frame_info_set_width can be used for an extra alignment requirement.
+ When 0, no extra alignment is done. */
+void
+sh_css_frame_info_set_width(struct ia_css_frame_info *info,
+ unsigned int width,
+ unsigned int aligned);
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+
+unsigned int
+sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx);
+
+#endif
+
+hrt_vaddress
+sh_css_store_sp_group_to_ddr(void);
+
+hrt_vaddress
+sh_css_store_sp_stage_to_ddr(unsigned pipe, unsigned stage);
+
+hrt_vaddress
+sh_css_store_isp_stage_to_ddr(unsigned pipe, unsigned stage);
+
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ bool enable_zoom
+ );
+
+void
+sh_css_invalidate_shading_tables(struct ia_css_stream *stream);
+
+struct ia_css_pipeline *
+ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe);
+
+unsigned int
+ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe);
+
+bool
+sh_css_continuous_is_enabled(uint8_t pipe_num);
+
+struct ia_css_pipe *
+find_pipe_by_num(uint32_t pipe_num);
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+void
+ia_css_get_crop_offsets(
+ struct ia_css_pipe *pipe,
+ struct ia_css_frame_info *in_frame);
+#endif
+#endif /* !defined(__ISP) */
+
+#endif /* _SH_CSS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c
new file mode 100644
index 000000000000..37e954aea36f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_irq.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
new file mode 100644
index 000000000000..e12789236bb9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_legacy.h
@@ -0,0 +1,88 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_LEGACY_H_
+#define _SH_CSS_LEGACY_H_
+
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include <ia_css_frame_public.h>
+#include <ia_css_pipe_public.h>
+#include <ia_css_stream_public.h>
+
+/** The pipe id type, distinguishes the kind of pipes that
+ * can be run in parallel.
+ */
+enum ia_css_pipe_id {
+ IA_CSS_PIPE_ID_PREVIEW,
+ IA_CSS_PIPE_ID_COPY,
+ IA_CSS_PIPE_ID_VIDEO,
+ IA_CSS_PIPE_ID_CAPTURE,
+ IA_CSS_PIPE_ID_YUVPP,
+#ifndef ISP2401
+ IA_CSS_PIPE_ID_ACC,
+ IA_CSS_PIPE_ID_NUM
+#else
+ IA_CSS_PIPE_ID_ACC
+#endif
+};
+#ifdef ISP2401
+#define IA_CSS_PIPE_ID_NUM (IA_CSS_PIPE_ID_ACC+1)
+#endif
+
+struct ia_css_pipe_extra_config {
+ bool enable_raw_binning;
+ bool enable_yuv_ds;
+ bool enable_high_speed;
+ bool enable_dvs_6axis;
+ bool enable_reduced_pipe;
+ bool enable_fractional_ds;
+ bool disable_vf_pp;
+};
+
+#define DEFAULT_PIPE_EXTRA_CONFIG \
+{ \
+ false, /* enable_raw_binning */ \
+ false, /* enable_yuv_ds */ \
+ false, /* enable_high_speed */ \
+ false, /* enable_dvs_6axis */ \
+ false, /* enable_reduced_pipe */ \
+ false, /* enable_fractional_ds */ \
+ false, /* disable_vf_pp */ \
+}
+
+enum ia_css_err
+ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
+ const struct ia_css_pipe_extra_config *extra_config,
+ struct ia_css_pipe **pipe);
+
+void
+ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config);
+
+enum ia_css_err
+ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
+ enum ia_css_pipe_id *pipe_id);
+
+/* DEPRECATED. FPN is not supported. */
+enum ia_css_err
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame);
+
+#ifndef ISP2401
+void
+sh_css_enable_cont_capt(bool enable, bool stop_copy_preview);
+
+#endif
+#endif /* _SH_CSS_LEGACY_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c
new file mode 100644
index 000000000000..ebdf84d4a138
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metadata.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_metadata.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c
new file mode 100644
index 000000000000..48e5542b3a43
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.c
@@ -0,0 +1,176 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "assert_support.h"
+#include "sh_css_metrics.h"
+
+#include "sp.h"
+#include "isp.h"
+
+#include "sh_css_internal.h"
+
+#define MULTIPLE_PCS 0
+#define SUSPEND 0
+#define NOF_PCS 1
+#define RESUME_MASK 0x8
+#define STOP_MASK 0x0
+
+static bool pc_histogram_enabled;
+static struct sh_css_pc_histogram *isp_histogram;
+static struct sh_css_pc_histogram *sp_histogram;
+
+struct sh_css_metrics sh_css_metrics;
+
+void
+sh_css_metrics_start_frame(void)
+{
+ sh_css_metrics.frame_metrics.num_frames++;
+}
+
+static void
+clear_histogram(struct sh_css_pc_histogram *histogram)
+{
+ unsigned i;
+
+ assert(histogram != NULL);
+
+ for (i = 0; i < histogram->length; i++) {
+ histogram->run[i] = 0;
+ histogram->stall[i] = 0;
+ histogram->msink[i] = 0xFFFF;
+ }
+}
+
+void
+sh_css_metrics_enable_pc_histogram(bool enable)
+{
+ pc_histogram_enabled = enable;
+}
+
+static void
+make_histogram(struct sh_css_pc_histogram *histogram, unsigned length)
+{
+ assert(histogram != NULL);
+
+ if (histogram->length)
+ return;
+ if (histogram->run)
+ return;
+ histogram->run = sh_css_malloc(length * sizeof(*histogram->run));
+ if (!histogram->run)
+ return;
+ histogram->stall = sh_css_malloc(length * sizeof(*histogram->stall));
+ if (!histogram->stall)
+ return;
+ histogram->msink = sh_css_malloc(length * sizeof(*histogram->msink));
+ if (!histogram->msink)
+ return;
+
+ histogram->length = length;
+ clear_histogram(histogram);
+}
+
+static void
+insert_binary_metrics(struct sh_css_binary_metrics **l,
+ struct sh_css_binary_metrics *metrics)
+{
+ assert(l != NULL);
+ assert(*l != NULL);
+ assert(metrics != NULL);
+
+ for (; *l; l = &(*l)->next)
+ if (*l == metrics)
+ return;
+
+ *l = metrics;
+ metrics->next = NULL;
+}
+
+void
+sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics)
+{
+ assert(metrics != NULL);
+
+ if (!pc_histogram_enabled)
+ return;
+
+ isp_histogram = &metrics->isp_histogram;
+ sp_histogram = &metrics->sp_histogram;
+ make_histogram(isp_histogram, ISP_PMEM_DEPTH);
+ make_histogram(sp_histogram, SP_PMEM_DEPTH);
+ insert_binary_metrics(&sh_css_metrics.binary_metrics, metrics);
+}
+
+void
+sh_css_metrics_sample_pcs(void)
+{
+ bool stall;
+ unsigned int pc;
+ unsigned int msink;
+
+#if SUSPEND
+ unsigned int sc = 0;
+ unsigned int stopped_sc = 0;
+ unsigned int resume_sc = 0;
+#endif
+
+
+#if MULTIPLE_PCS
+ int i;
+ unsigned int pc_tab[NOF_PCS];
+
+ for (i = 0; i < NOF_PCS; i++)
+ pc_tab[i] = 0;
+#endif
+
+ if (!pc_histogram_enabled)
+ return;
+
+ if (isp_histogram) {
+#if SUSPEND
+ /* STOP the ISP */
+ isp_ctrl_store(ISP0_ID, ISP_SC_REG, STOP_MASK);
+#endif
+ msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG);
+#if MULTIPLE_PCS
+ for (i = 0; i < NOF_PCS; i++)
+ pc_tab[i] = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
+#else
+ pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
+#endif
+
+#if SUSPEND
+ /* RESUME the ISP */
+ isp_ctrl_store(ISP0_ID, ISP_SC_REG, RESUME_MASK);
+#endif
+ isp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+
+ if (stall)
+ isp_histogram->stall[pc]++;
+ else
+ isp_histogram->run[pc]++;
+ }
+
+ if (sp_histogram && 0) {
+ msink = sp_ctrl_load(SP0_ID, SP_CTRL_SINK_REG);
+ pc = sp_ctrl_load(SP0_ID, SP_PC_REG);
+ sp_histogram->msink[pc] &= msink;
+ stall = (msink != 0x7FF);
+ if (stall)
+ sp_histogram->stall[pc]++;
+ else
+ sp_histogram->run[pc]++;
+ }
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h
new file mode 100644
index 000000000000..40840ea318ab
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_metrics.h
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_METRICS_H_
+#define _SH_CSS_METRICS_H_
+
+#include <type_support.h>
+
+struct sh_css_pc_histogram {
+ unsigned length;
+ unsigned *run;
+ unsigned *stall;
+ unsigned *msink;
+};
+
+#if !defined(__USE_DESIGNATED_INITIALISERS__)
+#define DEFAULT_PC_HISTOGRAM \
+{ \
+ 0, \
+ NULL, \
+ NULL, \
+ NULL \
+}
+#endif
+
+struct sh_css_binary_metrics {
+ unsigned mode;
+ unsigned id;
+ struct sh_css_pc_histogram isp_histogram;
+ struct sh_css_pc_histogram sp_histogram;
+ struct sh_css_binary_metrics *next;
+};
+
+#if !defined(__USE_DESIGNATED_INITIALISERS__)
+#define DEFAULT_BINARY_METRICS \
+{ \
+ 0, \
+ 0, \
+ DEFAULT_PC_HISTOGRAM, \
+ DEFAULT_PC_HISTOGRAM, \
+ NULL \
+}
+#endif
+
+struct ia_css_frame_metrics {
+ unsigned num_frames;
+};
+
+struct sh_css_metrics {
+ struct sh_css_binary_metrics *binary_metrics;
+ struct ia_css_frame_metrics frame_metrics;
+};
+
+extern struct sh_css_metrics sh_css_metrics;
+
+/* includes ia_css_binary.h, which depends on sh_css_metrics.h */
+#include "ia_css_types.h"
+
+/* Sample ISP and SP pc and add to histogram */
+void sh_css_metrics_enable_pc_histogram(bool enable);
+void sh_css_metrics_start_frame(void);
+void sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics);
+void sh_css_metrics_sample_pcs(void);
+
+#endif /* _SH_CSS_METRICS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
new file mode 100644
index 000000000000..7e3893c6c08a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
@@ -0,0 +1,749 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_mipi.h"
+#include "sh_css_mipi.h"
+#include <type_support.h>
+#include "system_global.h"
+#include "ia_css_err.h"
+#include "ia_css_pipe.h"
+#include "ia_css_stream_format.h"
+#include "sh_css_stream_format.h"
+#include "ia_css_stream_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_input_port.h"
+#include "ia_css_debug.h"
+#include "sh_css_struct.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */
+#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+static uint32_t ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
+#endif
+
+enum ia_css_err
+ia_css_mipi_frame_specify(const unsigned int size_mem_words,
+ const bool contiguous)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ my_css.size_mem_words = size_mem_words;
+ (void)contiguous;
+
+ return err;
+}
+
+#ifdef ISP2401
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/*
+ * Check if a source port or TPG/PRBS ID is valid
+ */
+static bool ia_css_mipi_is_source_port_valid(struct ia_css_pipe *pipe,
+ unsigned int *pport)
+{
+ bool ret = true;
+ unsigned int port = 0;
+ unsigned int max_ports = 0;
+
+ switch (pipe->stream->config.mode) {
+ case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
+ port = (unsigned int) pipe->stream->config.source.port.port;
+ max_ports = N_CSI_PORTS;
+ break;
+ case IA_CSS_INPUT_MODE_TPG:
+ port = (unsigned int) pipe->stream->config.source.tpg.id;
+ max_ports = N_CSS_TPG_IDS;
+ break;
+ case IA_CSS_INPUT_MODE_PRBS:
+ port = (unsigned int) pipe->stream->config.source.prbs.id;
+ max_ports = N_CSS_PRBS_IDS;
+ break;
+ default:
+ assert(false);
+ ret = false;
+ break;
+ }
+
+ if (ret) {
+ assert(port < max_ports);
+
+ if (port >= max_ports)
+ ret = false;
+ }
+
+ *pport = port;
+
+ return ret;
+}
+#endif
+
+#endif
+/* Assumptions:
+ * - A line is multiple of 4 bytes = 1 word.
+ * - Each frame has SOF and EOF (each 1 word).
+ * - Each line has format header and optionally SOL and EOL (each 1 word).
+ * - Odd and even lines of YUV420 format are different in bites per pixel size.
+ * - Custom size of embedded data.
+ * -- Interleaved frames are not taken into account.
+ * -- Lines are multiples of 8B, and not necessary of (custom 3B, or 7B
+ * etc.).
+ * Result is given in DDR mem words, 32B or 256 bits
+ */
+enum ia_css_err
+ia_css_mipi_frame_calculate_size(const unsigned int width,
+ const unsigned int height,
+ const enum ia_css_stream_format format,
+ const bool hasSOLandEOL,
+ const unsigned int embedded_data_size_words,
+ unsigned int *size_mem_words)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ unsigned int bits_per_pixel = 0;
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_for_first_line = 0;
+ unsigned int words_per_even_line = 0;
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+ unsigned int mem_words_for_first_line = 0;
+ unsigned int mem_words_for_EOF = 0;
+ unsigned int mem_words = 0;
+ unsigned int width_padded = width;
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ width_padded += (2 * ISP_VEC_NELEMS);
+#endif
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
+ width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
+
+ switch (format) {
+ case IA_CSS_STREAM_FORMAT_RAW_6: /* 4p, 3B, 24bits */
+ bits_per_pixel = 6; break;
+ case IA_CSS_STREAM_FORMAT_RAW_7: /* 8p, 7B, 56bits */
+ bits_per_pixel = 7; break;
+ case IA_CSS_STREAM_FORMAT_RAW_8: /* 1p, 1B, 8bits */
+ case IA_CSS_STREAM_FORMAT_BINARY_8: /* 8bits, TODO: check. */
+ case IA_CSS_STREAM_FORMAT_YUV420_8: /* odd 2p, 2B, 16bits, even 2p, 4B, 32bits */
+ bits_per_pixel = 8; break;
+ case IA_CSS_STREAM_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */
+ case IA_CSS_STREAM_FORMAT_RAW_10: /* 4p, 5B, 40bits */
+#if !defined(HAS_NO_PACKED_RAW_PIXELS)
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ bits_per_pixel = 10;
+#else
+ bits_per_pixel = 16;
+#endif
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */
+ case IA_CSS_STREAM_FORMAT_RAW_12: /* 2p, 3B, 24bits */
+ bits_per_pixel = 12; break;
+ case IA_CSS_STREAM_FORMAT_RAW_14: /* 4p, 7B, 56bits */
+ bits_per_pixel = 14; break;
+ case IA_CSS_STREAM_FORMAT_RGB_444: /* 1p, 2B, 16bits */
+ case IA_CSS_STREAM_FORMAT_RGB_555: /* 1p, 2B, 16bits */
+ case IA_CSS_STREAM_FORMAT_RGB_565: /* 1p, 2B, 16bits */
+ case IA_CSS_STREAM_FORMAT_YUV422_8: /* 2p, 4B, 32bits */
+ bits_per_pixel = 16; break;
+ case IA_CSS_STREAM_FORMAT_RGB_666: /* 4p, 9B, 72bits */
+ bits_per_pixel = 18; break;
+ case IA_CSS_STREAM_FORMAT_YUV422_10: /* 2p, 5B, 40bits */
+ bits_per_pixel = 20; break;
+ case IA_CSS_STREAM_FORMAT_RGB_888: /* 1p, 3B, 24bits */
+ bits_per_pixel = 24; break;
+
+ case IA_CSS_STREAM_FORMAT_YUV420_16: /* Not supported */
+ case IA_CSS_STREAM_FORMAT_YUV422_16: /* Not supported */
+ case IA_CSS_STREAM_FORMAT_RAW_16: /* TODO: not specified in MIPI SPEC, check */
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == IA_CSS_STREAM_FORMAT_YUV420_8
+ || format == IA_CSS_STREAM_FORMAT_YUV420_10
+ || format == IA_CSS_STREAM_FORMAT_YUV420_16) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ /* a frame represented in memory: ()- optional; data - payload words.
+ * addr 0 1 2 3 4 5 6 7:
+ * first SOF (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * second (EOL) (SOL) PACK_H data data data data data
+ * data data data data data data data data
+ * ...
+ * data data 0 0 0 0 0 0
+ * ...
+ * last (EOL) EOF 0 0 0 0 0 0
+ *
+ * Embedded lines are regular lines stored before the first and after
+ * payload lines.
+ */
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+ words_for_first_line = words_per_odd_line + 2 + (hasSOLandEOL ? 1 : 0);
+ /* + SOF +packet header + optionally (SOL), but (EOL) is not in the first line */
+ words_per_odd_line += (1 + (hasSOLandEOL ? 2 : 0));
+ /* each non-first line has format header, and optionally (SOL) and (EOL). */
+ words_per_even_line += (1 + (hasSOLandEOL ? 2 : 0));
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_for_first_line = (words_for_first_line + 7) >> 3;
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+ mem_words_for_EOF = 1; /* last line consisit of the optional (EOL) and EOF */
+
+ mem_words = ((embedded_data_size_words + 7) >> 3) +
+ mem_words_for_first_line +
+ (((height + 1) >> 1) - 1) * mem_words_per_odd_line +
+ /* ceil (height/2) - 1 (first line is calculated separatelly) */
+ (height >> 1) * mem_words_per_even_line + /* floor(height/2) */
+ mem_words_for_EOF;
+
+ *size_mem_words = mem_words; /* ceil(words/8); mem word is 32B = 8words. */
+ /* Check if the above is still needed. */
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+enum ia_css_err
+ia_css_mipi_frame_enable_check_on_size(const enum ia_css_csi2_port port,
+ const unsigned int size_mem_words)
+{
+ uint32_t idx;
+
+ enum ia_css_err err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+
+ OP___assert(port < N_CSI_PORTS);
+ OP___assert(size_mem_words != 0);
+
+ for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
+ my_css.mipi_sizes_for_check[port][idx] != 0;
+ idx++) { /* do nothing */
+ }
+ if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) {
+ my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
+ err = IA_CSS_SUCCESS;
+ }
+
+ return err;
+}
+#endif
+
+void
+mipi_init(void)
+{
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int i;
+
+ for (i = 0; i < N_CSI_PORTS; i++)
+ ref_count_mipi_allocation[i] = 0;
+#endif
+}
+
+enum ia_css_err
+calculate_mipi_buff_size(
+ struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words)
+{
+#if !defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ (void)stream_cfg;
+ (void)size_mem_words;
+#else
+ unsigned int width;
+ unsigned int height;
+ enum ia_css_stream_format format;
+ bool pack_raw_pixels;
+
+ unsigned int width_padded;
+ unsigned int bits_per_pixel = 0;
+
+ unsigned int even_line_bytes = 0;
+ unsigned int odd_line_bytes = 0;
+
+ unsigned int words_per_odd_line = 0;
+ unsigned int words_per_even_line = 0;
+
+ unsigned int mem_words_per_even_line = 0;
+ unsigned int mem_words_per_odd_line = 0;
+
+ unsigned int mem_words_per_buff_line = 0;
+ unsigned int mem_words_per_buff = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ /**
+#ifndef ISP2401
+ * zhengjie.lu@intel.com
+ *
+#endif
+ * NOTE
+ * - In the struct "ia_css_stream_config", there
+ * are two members: "input_config" and "isys_config".
+ * Both of them provide the same information, e.g.
+ * input_res and format.
+ *
+ * Question here is that: which one shall be used?
+ */
+ width = stream_cfg->input_config.input_res.width;
+ height = stream_cfg->input_config.input_res.height;
+ format = stream_cfg->input_config.format;
+ pack_raw_pixels = stream_cfg->pack_raw_pixels;
+ /** end of NOTE */
+
+ /**
+#ifndef ISP2401
+ * zhengjie.lu@intel.com
+ *
+#endif
+ * NOTE
+ * - The following code is derived from the
+ * existing code "ia_css_mipi_frame_calculate_size()".
+ *
+ * Question here is: why adding "2 * ISP_VEC_NELEMS"
+ * to "width_padded", but not making "width_padded"
+ * aligned with "2 * ISP_VEC_NELEMS"?
+ */
+ /* The changes will be reverted as soon as RAW
+ * Buffers are deployed by the 2401 Input System
+ * in the non-continuous use scenario.
+ */
+ width_padded = width + (2 * ISP_VEC_NELEMS);
+ /** end of NOTE */
+
+ IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
+ width_padded, height, format);
+
+ bits_per_pixel = sh_css_stream_format_2_bits_per_subpixel(format);
+ bits_per_pixel =
+ (format == IA_CSS_STREAM_FORMAT_RAW_10 && pack_raw_pixels) ? bits_per_pixel : 16;
+ if (bits_per_pixel == 0)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+
+ odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+
+ /* Even lines for YUV420 formats are double in bits_per_pixel. */
+ if (format == IA_CSS_STREAM_FORMAT_YUV420_8
+ || format == IA_CSS_STREAM_FORMAT_YUV420_10) {
+ even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
+ } else {
+ even_line_bytes = odd_line_bytes;
+ }
+
+ words_per_odd_line = (odd_line_bytes + 3) >> 2;
+ /* ceil(odd_line_bytes/4); word = 4 bytes */
+ words_per_even_line = (even_line_bytes + 3) >> 2;
+
+ mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
+ /* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
+ mem_words_per_even_line = (words_per_even_line + 7) >> 3;
+
+ mem_words_per_buff_line =
+ (mem_words_per_odd_line > mem_words_per_even_line) ? mem_words_per_odd_line : mem_words_per_even_line;
+ mem_words_per_buff = mem_words_per_buff_line * height;
+
+ *size_mem_words = mem_words_per_buff;
+
+ IA_CSS_LEAVE_ERR(err);
+#endif
+ return err;
+}
+
+enum ia_css_err
+allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info)
+{
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+#ifndef ISP2401
+ unsigned int port;
+#else
+ unsigned int port = 0;
+#endif
+ struct ia_css_frame_info mipi_intermediate_info;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) enter:\n", pipe);
+
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ if ((pipe == NULL) || (pipe->stream == NULL)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: pipe or stream is null.\n",
+ pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ if (pipe->stream->config.online) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
+ pipe);
+ return IA_CSS_SUCCESS;
+ }
+
+#endif
+#ifndef ISP2401
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+#else
+ if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) {
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
+ pipe);
+ return IA_CSS_SUCCESS; /* AM TODO: Check */
+ }
+
+#ifndef ISP2401
+ port = (unsigned int) pipe->stream->config.source.port.port;
+ assert(port < N_CSI_PORTS);
+ if (port >= N_CSI_PORTS) {
+#else
+ if (!ia_css_mipi_is_source_port_valid(pipe, &port)) {
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n",
+ pipe, port);
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+ err = calculate_mipi_buff_size(
+ &(pipe->stream->config),
+ &(my_css.mipi_frame_size[port]));
+#endif
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ if (ref_count_mipi_allocation[port] != 0) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit: already allocated for this port (port=%d).\n",
+ pipe, port);
+ return IA_CSS_SUCCESS;
+ }
+#else
+ /* 2401 system allows multiple streams to use same physical port. This is not
+ * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+ if (ref_count_mipi_allocation[port] != 0) {
+ ref_count_mipi_allocation[port]++;
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
+ pipe, port);
+ return IA_CSS_SUCCESS;
+ }
+#endif
+
+ ref_count_mipi_allocation[port]++;
+
+ /* TODO: Cleaning needed. */
+ /* This code needs to modified to allocate the MIPI frames in the correct normal way
+ with an allocate from info, by justin */
+ mipi_intermediate_info = pipe->pipe_settings.video.video_binary.internal_frame_info;
+ mipi_intermediate_info.res.width = 0;
+ mipi_intermediate_info.res.height = 0;
+ /* To indicate it is not (yet) valid format. */
+ mipi_intermediate_info.format = IA_CSS_FRAME_FORMAT_NUM;
+ mipi_intermediate_info.padded_width = 0;
+ mipi_intermediate_info.raw_bit_depth = 0;
+
+ /* AM TODO: mipi frames number should come from stream struct. */
+ my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM;
+
+ /* Incremental allocation (per stream), not for all streams at once. */
+ { /* limit the scope of i,j */
+ unsigned i, j;
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* free previous frame */
+ if (my_css.mipi_frames[port][i]) {
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ /* check if new frame is needed */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new frame */
+ err = ia_css_frame_allocate_with_buffer_size(
+ &my_css.mipi_frames[port][i],
+ my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES,
+ false);
+ if (err != IA_CSS_SUCCESS) {
+ for (j = 0; j < i; j++) {
+ if (my_css.mipi_frames[port][j]) {
+ ia_css_frame_free(my_css.mipi_frames[port][j]);
+ my_css.mipi_frames[port][j] = NULL;
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p, %d) exit: error: allocation failed.\n",
+ pipe, port);
+ return err;
+ }
+ }
+ if (info->metadata_info.size > 0) {
+ /* free previous metadata buffer */
+ if (my_css.mipi_metadata[port][i] != NULL) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ /* check if need to allocate a new metadata buffer */
+ if (i < my_css.num_mipi_frames[port]) {
+ /* allocate new metadata buffer */
+ my_css.mipi_metadata[port][i] = ia_css_metadata_allocate(&info->metadata_info);
+ if (my_css.mipi_metadata[port][i] == NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_metadata(%p, %d) failed.\n",
+ pipe, port);
+ return err;
+ }
+ }
+ }
+ }
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "allocate_mipi_frames(%p) exit:\n", pipe);
+
+ return err;
+#else
+ (void)pipe;
+ (void)info;
+ return IA_CSS_SUCCESS;
+#endif
+}
+
+enum ia_css_err
+free_mipi_frames(struct ia_css_pipe *pipe)
+{
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+#ifndef ISP2401
+ unsigned int port;
+#else
+ unsigned int port = 0;
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) enter:\n", pipe);
+
+ /* assert(pipe != NULL); TEMP: TODO: Should be assert only. */
+ if (pipe != NULL) {
+ assert(pipe->stream != NULL);
+ if ((pipe == NULL) || (pipe->stream == NULL)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: pipe or stream is null.\n",
+ pipe);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+#ifndef ISP2401
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+#else
+ if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) {
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: wrong mode.\n",
+ pipe);
+ return err;
+ }
+
+#ifndef ISP2401
+ port = (unsigned int) pipe->stream->config.source.port.port;
+ assert(port < N_CSI_PORTS);
+ if (port >= N_CSI_PORTS) {
+#else
+ if (!ia_css_mipi_is_source_port_valid(pipe, &port)) {
+#endif
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+#ifndef ISP2401
+ "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n",
+#else
+ "free_mipi_frames(%p) exit: error: pipe port is not correct (port=%d).\n",
+#endif
+ pipe, port);
+ return err;
+ }
+#ifdef ISP2401
+
+#endif
+ if (ref_count_mipi_allocation[port] > 0) {
+#if defined(USE_INPUT_SYSTEM_VERSION_2)
+ assert(ref_count_mipi_allocation[port] == 1);
+ if (ref_count_mipi_allocation[port] != 1) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit: error: wrong ref_count (ref_count=%d).\n",
+ pipe, ref_count_mipi_allocation[port]);
+ return err;
+ }
+#endif
+
+ ref_count_mipi_allocation[port]--;
+
+ if (ref_count_mipi_allocation[port] == 0) {
+ /* no streams are using this buffer, so free it */
+ unsigned int i;
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i] != NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i] != NULL) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) exit (deallocated).\n", pipe);
+ }
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ else {
+ /* 2401 system allows multiple streams to use same physical port. This is not
+ * true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
+ * TODO AM: Once that is changed (removed) this code should be removed as well.
+ * In that case only 2400 related code should remain.
+ */
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(%p) leave: nothing to do, other streams still use this port (port=%d).\n",
+ pipe, port);
+ }
+#endif
+ }
+ } else { /* pipe ==NULL */
+ /* AM TEMP: free-ing all mipi buffers just like a legacy code. */
+ for (port = CSI_PORT0_ID; port < N_CSI_PORTS; port++) {
+ unsigned int i;
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ if (my_css.mipi_frames[port][i] != NULL) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "free_mipi_frames(port=%d, num=%d).\n", port, i);
+ ia_css_frame_free(my_css.mipi_frames[port][i]);
+ my_css.mipi_frames[port][i] = NULL;
+ }
+ if (my_css.mipi_metadata[port][i] != NULL) {
+ ia_css_metadata_free(my_css.mipi_metadata[port][i]);
+ my_css.mipi_metadata[port][i] = NULL;
+ }
+ }
+ ref_count_mipi_allocation[port] = 0;
+ }
+ }
+#else
+ (void)pipe;
+#endif
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+send_mipi_frames(struct ia_css_pipe *pipe)
+{
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ enum ia_css_err err = IA_CSS_ERR_INTERNAL_ERROR;
+ unsigned int i;
+#ifndef ISP2401
+ unsigned int port;
+#else
+ unsigned int port = 0;
+#endif
+
+ IA_CSS_ENTER_PRIVATE("pipe=%d", pipe);
+
+ assert(pipe != NULL);
+ assert(pipe->stream != NULL);
+ if (pipe == NULL || pipe->stream == NULL) {
+ IA_CSS_ERROR("pipe or stream is null");
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ /* multi stream video needs mipi buffers */
+ /* nothing to be done in other cases. */
+#ifndef ISP2401
+ if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+#else
+ if (!(pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
+ pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS)) {
+#endif
+ IA_CSS_LOG("nothing to be done for this mode");
+ return IA_CSS_SUCCESS;
+ /* TODO: AM: maybe this should be returning an error. */
+ }
+
+#ifndef ISP2401
+ port = (unsigned int) pipe->stream->config.source.port.port;
+ assert(port < N_CSI_PORTS);
+ if (port >= N_CSI_PORTS) {
+ IA_CSS_ERROR("invalid port specified (%d)", port);
+#else
+ if (!ia_css_mipi_is_source_port_valid(pipe, &port)) {
+ IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n", pipe, port);
+#endif
+ return err;
+ }
+
+ /* Hand-over the SP-internal mipi buffers */
+ for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
+ /* Need to include the ofset for port. */
+ sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_frames[port][i]);
+ sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i,
+ my_css.mipi_metadata[port][i]);
+ }
+ sh_css_update_host2sp_num_mipi_frames(my_css.num_mipi_frames[port]);
+
+ /**********************************
+ * Send an event to inform the SP
+ * that all MIPI frames are passed.
+ **********************************/
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_ERROR("sp is not running");
+ return err;
+ }
+
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
+ (uint8_t)port,
+ (uint8_t)my_css.num_mipi_frames[port],
+ 0 /* not used */);
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+#else
+ (void)pipe;
+#endif
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h
new file mode 100644
index 000000000000..990f678422fd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.h
@@ -0,0 +1,49 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_MIPI_H
+#define __SH_CSS_MIPI_H
+
+#include <ia_css_err.h> /* ia_css_err */
+#include <ia_css_types.h> /* ia_css_pipe */
+#include <ia_css_stream_public.h> /* ia_css_stream_config */
+
+void
+mipi_init(void);
+
+enum ia_css_err
+allocate_mipi_frames(struct ia_css_pipe *pipe, struct ia_css_stream_info *info);
+
+enum ia_css_err
+free_mipi_frames(struct ia_css_pipe *pipe);
+
+enum ia_css_err
+send_mipi_frames(struct ia_css_pipe *pipe);
+
+/**
+ * @brief Calculate the required MIPI buffer sizes.
+ * Based on the stream configuration, calculate the
+ * required MIPI buffer sizes (in DDR words).
+ *
+ * @param[in] stream_cfg Point to the target stream configuration
+ * @param[out] size_mem_words MIPI buffer size in DDR words.
+ *
+ * @return
+ */
+enum ia_css_err
+calculate_mipi_buff_size(
+ struct ia_css_stream_config *stream_cfg,
+ unsigned int *size_mem_words);
+
+#endif /* __SH_CSS_MIPI_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c
new file mode 100644
index 000000000000..6de8472f1b07
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c
@@ -0,0 +1,62 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_mmu.h"
+#ifdef ISP2401
+#include "ia_css_mmu_private.h"
+#endif
+#include <ia_css_debug.h>
+#include "sh_css_sp.h"
+#include "sh_css_firmware.h"
+#include "sp.h"
+#ifdef ISP2401
+#include "mmu_device.h"
+#endif
+
+void
+ia_css_mmu_invalidate_cache(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_mmu_invalidate_cache() enter\n");
+
+ /* if the SP is not running we should not access its dmem */
+ if (sh_css_sp_is_running()) {
+ HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
+
+ (void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; /* Suppres warnings in CRUN */
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
+ true);
+ }
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_mmu_invalidate_cache() leave\n");
+}
+#ifdef ISP2401
+
+/* Deprecated, this is an HRT backend function (memory_access.h) */
+void
+sh_css_mmu_set_page_table_base_index(hrt_data base_index)
+{
+ int i;
+ IA_CSS_ENTER_PRIVATE("base_index=0x%08x\n", base_index);
+ for (i = 0; i < N_MMU_ID; i++) {
+ mmu_ID_t mmu_id = i;
+ mmu_set_page_table_base_index(mmu_id, base_index);
+ mmu_invalidate_cache(mmu_id);
+ }
+ IA_CSS_LEAVE_PRIVATE("");
+}
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c
new file mode 100644
index 000000000000..1f4fa25b1e79
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_morph.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_morph.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c
new file mode 100644
index 000000000000..57dd5e7988c9
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.c
@@ -0,0 +1,267 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_param_dvs.h"
+#include <assert_support.h>
+#include <type_support.h>
+#include <ia_css_err.h>
+#include <ia_css_types.h>
+#include "ia_css_debug.h"
+#include "memory_access.h"
+
+static struct ia_css_dvs_6axis_config *
+alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res, struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = 0;
+ unsigned int height_y = 0;
+ unsigned int width_uv = 0;
+ unsigned int height_uv = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+
+ dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_malloc(sizeof(struct ia_css_dvs_6axis_config));
+ if (dvs_config == NULL) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ else
+ { /*Initialize new struct with latest config settings*/
+ if (NULL != dvs_config_src) {
+ dvs_config->width_y = width_y = dvs_config_src->width_y;
+ dvs_config->height_y = height_y = dvs_config_src->height_y;
+ dvs_config->width_uv = width_uv = dvs_config_src->width_uv;
+ dvs_config->height_uv = height_uv = dvs_config_src->height_uv;
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ }
+ else if (NULL != frame_res) {
+ dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width);
+ dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(frame_res->height);
+ dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(frame_res->width / 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/
+ dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(frame_res->height / 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/
+ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
+ }
+
+ /* Generate Y buffers */
+ dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t));
+ if (dvs_config->xcoords_y == NULL) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t));
+ if (dvs_config->ycoords_y == NULL) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ /* Generate UV buffers */
+ IA_CSS_LOG("UV W %d H %d", width_uv, height_uv);
+
+ dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t));
+ if (dvs_config->xcoords_uv == NULL) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ goto exit;
+ }
+
+ dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t));
+ if (dvs_config->ycoords_uv == NULL) {
+ IA_CSS_ERROR("out of memory");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+exit:
+ if (err != IA_CSS_SUCCESS) {
+ free_dvs_6axis_table(&dvs_config); /* we might have allocated some memory, release this */
+ dvs_config = NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("dvs_config=%p", dvs_config);
+ return dvs_config;
+}
+
+static void
+init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config, const struct ia_css_resolution *dvs_offset)
+{
+ unsigned int x, y;
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d",
+ dvs_offset->width, dvs_offset->height, width_y, height_y);
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->xcoords_y[y*width_y + x] = (dvs_offset->width + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_y; y++) {
+ for (x = 0; x < width_y; x++) {
+ dvs_config->ycoords_y[y*width_y + x] = (dvs_offset->height + y*DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->xcoords_uv[y*width_uv + x] = ((dvs_offset->width / 2) + x*DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+ for (y = 0; y < height_uv; y++) {
+ for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
+ dvs_config->ycoords_uv[y*width_uv + x] = ((dvs_offset->height / 2) + y*DVS_BLOCKDIM_Y_CHROMA) << DVS_COORD_FRAC_BITS;
+ }
+ }
+
+}
+
+static void
+init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config, struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y = dvs_config->width_y;
+ unsigned int height_y = dvs_config->height_y;
+ unsigned int width_uv = dvs_config->width_uv;
+ unsigned int height_uv = dvs_config->height_uv;
+
+ memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t)));
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(frame_res != NULL);
+ assert(dvs_offset != NULL);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ struct ia_css_dvs_6axis_config *dvs_6axis_table;
+
+ assert(NULL != dvs_config_src);
+
+ dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src);
+ if (dvs_6axis_table) {
+ init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src);
+ return dvs_6axis_table;
+ }
+ return NULL;
+}
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config)
+{
+ assert(dvs_6axis_config != NULL);
+ assert(*dvs_6axis_config != NULL);
+
+ if ((dvs_6axis_config != NULL) && (*dvs_6axis_config != NULL))
+ {
+ IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ if ((*dvs_6axis_config)->xcoords_y != NULL)
+ {
+ sh_css_free((*dvs_6axis_config)->xcoords_y);
+ (*dvs_6axis_config)->xcoords_y = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_y != NULL)
+ {
+ sh_css_free((*dvs_6axis_config)->ycoords_y);
+ (*dvs_6axis_config)->ycoords_y = NULL;
+ }
+
+ /* Free up UV buffers */
+ if ((*dvs_6axis_config)->xcoords_uv != NULL)
+ {
+ sh_css_free((*dvs_6axis_config)->xcoords_uv);
+ (*dvs_6axis_config)->xcoords_uv = NULL;
+ }
+
+ if ((*dvs_6axis_config)->ycoords_uv != NULL)
+ {
+ sh_css_free((*dvs_6axis_config)->ycoords_uv);
+ (*dvs_6axis_config)->ycoords_uv = NULL;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
+ sh_css_free(*dvs_6axis_config);
+ *dvs_6axis_config = NULL;
+ }
+}
+
+void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src)
+{
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(dvs_config_src != NULL);
+ assert(dvs_config_dst != NULL);
+ assert(dvs_config_src->xcoords_y != NULL);
+ assert(dvs_config_src->xcoords_uv != NULL);
+ assert(dvs_config_src->ycoords_y != NULL);
+ assert(dvs_config_src->ycoords_uv != NULL);
+ assert(dvs_config_src->width_y == dvs_config_dst->width_y);
+ assert(dvs_config_src->width_uv == dvs_config_dst->width_uv);
+ assert(dvs_config_src->height_y == dvs_config_dst->height_y);
+ assert(dvs_config_src->height_uv == dvs_config_dst->height_uv);
+
+ width_y = dvs_config_src->width_y;
+ height_y = dvs_config_src->height_y;
+ width_uv = dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/
+ height_uv = dvs_config_src->height_uv;
+
+ memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t)));
+
+ memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t)));
+ memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t)));
+
+}
+
+void
+ia_css_dvs_statistics_get(enum dvs_statistics_type type,
+ union ia_css_dvs_statistics_host *host_stats,
+ const union ia_css_dvs_statistics_isp *isp_stats)
+{
+
+ if (DVS_STATISTICS == type)
+ {
+ ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ } else if (DVS2_STATISTICS == type)
+ {
+ ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host,
+ isp_stats->p_dvs_statistics_isp);
+ }
+ return;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h
new file mode 100644
index 000000000000..79b563dc78ee
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_dvs.h
@@ -0,0 +1,86 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_DVS_H_
+#define _SH_CSS_PARAMS_DVS_H_
+
+#include <math_support.h>
+#include <ia_css_types.h>
+#ifdef ISP2401
+#include <sh_css_dvs_info.h>
+#endif
+#include "gdc_global.h" /* gdc_warp_param_mem_t */
+
+#define DVS_ENV_MIN_X (12)
+#define DVS_ENV_MIN_Y (12)
+
+#define DVS_BLOCKDIM_X (64) /* X block height*/
+#define DVS_BLOCKDIM_Y_LUMA (64) /* Y block height*/
+#define DVS_BLOCKDIM_Y_CHROMA (32) /* UV height block size is half the Y block height*/
+
+#ifndef ISP2401
+/* horizontal 64x64 blocks round up to DVS_BLOCKDIM_X, make even */
+#define DVS_NUM_BLOCKS_X(X) (CEIL_MUL(CEIL_DIV((X), DVS_BLOCKDIM_X), 2))
+
+/* vertical 64x64 blocks round up to DVS_BLOCKDIM_Y */
+#define DVS_NUM_BLOCKS_Y(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_LUMA))
+#define DVS_NUM_BLOCKS_X_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_X))
+#define DVS_NUM_BLOCKS_Y_CHROMA(X) (CEIL_DIV((X), DVS_BLOCKDIM_Y_CHROMA))
+
+
+#endif
+#define DVS_TABLE_IN_BLOCKDIM_X_LUMA(X) (DVS_NUM_BLOCKS_X(X) + 1) /* N blocks have N + 1 set of coords */
+#define DVS_TABLE_IN_BLOCKDIM_X_CHROMA(X) (DVS_NUM_BLOCKS_X_CHROMA(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_LUMA(X) (DVS_NUM_BLOCKS_Y(X) + 1)
+#define DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(X) (DVS_NUM_BLOCKS_Y_CHROMA(X) + 1)
+
+#define DVS_ENVELOPE_X(X) (((X) == 0) ? (DVS_ENV_MIN_X) : (X))
+#define DVS_ENVELOPE_Y(X) (((X) == 0) ? (DVS_ENV_MIN_Y) : (X))
+
+#define DVS_COORD_FRAC_BITS (10)
+#ifndef ISP2401
+#define DVS_INPUT_BYTES_PER_PIXEL (1)
+#endif
+#define XMEM_ALIGN_LOG2 (5)
+
+#define DVS_6AXIS_COORDS_ELEMS CEIL_MUL(sizeof(gdc_warp_param_mem_t) \
+ , HIVE_ISP_DDR_WORD_BYTES)
+
+/* currently we only support two output with the same resolution, output 0 is th default one. */
+#define DVS_6AXIS_BYTES(binary) \
+ (DVS_6AXIS_COORDS_ELEMS \
+ * DVS_NUM_BLOCKS_X((binary)->out_frame_info[0].res.width) \
+ * DVS_NUM_BLOCKS_Y((binary)->out_frame_info[0].res.height))
+
+#ifndef ISP2401
+/* Bilinear interpolation (HRT_GDC_BLI_MODE) is the supported method currently.
+ * Bicubic interpolation (HRT_GDC_BCI_MODE) is not supported yet */
+#define DVS_GDC_INTERP_METHOD HRT_GDC_BLI_MODE
+
+#endif
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset);
+
+struct ia_css_dvs_6axis_config *
+generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config_src);
+
+void
+free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config);
+
+void
+copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
+ const struct ia_css_dvs_6axis_config *dvs_config_src);
+
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
new file mode 100644
index 000000000000..eaf60e7b2dac
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
@@ -0,0 +1,419 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/slab.h>
+
+#include <math_support.h>
+#include "sh_css_param_shading.h"
+#include "ia_css_shading.h"
+#include "assert_support.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_debug.h"
+#include "ia_css_pipe_binarydesc.h"
+
+#include "sh_css_hrt.h"
+
+#include "platform_support.h"
+
+/* Bilinear interpolation on shading tables:
+ * For each target point T, we calculate the 4 surrounding source points:
+ * ul (upper left), ur (upper right), ll (lower left) and lr (lower right).
+ * We then calculate the distances from the T to the source points: x0, x1,
+ * y0 and y1.
+ * We then calculate the value of T:
+ * dx0*dy0*Slr + dx0*dy1*Sur + dx1*dy0*Sll + dx1*dy1*Sul.
+ * We choose a grid size of 1x1 which means:
+ * dx1 = 1-dx0
+ * dy1 = 1-dy0
+ *
+ * Sul dx0 dx1 Sur
+ * .<----->|<------------->.
+ * ^
+ * dy0|
+ * v T
+ * - .
+ * ^
+ * |
+ * dy1|
+ * v
+ * . .
+ * Sll Slr
+ *
+ * Padding:
+ * The area that the ISP operates on can include padding both on the left
+ * and the right. We need to padd the shading table such that the shading
+ * values end up on the correct pixel values. This means we must padd the
+ * shading table to match the ISP padding.
+ * We can have 5 cases:
+ * 1. All 4 points fall in the left padding.
+ * 2. The left 2 points fall in the left padding.
+ * 3. All 4 points fall in the cropped (target) region.
+ * 4. The right 2 points fall in the right padding.
+ * 5. All 4 points fall in the right padding.
+ * Cases 1 and 5 are easy to handle: we simply use the
+ * value 1 in the shading table.
+ * Cases 2 and 4 require interpolation that takes into
+ * account how far into the padding area the pixels
+ * fall. We extrapolate the shading table into the
+ * padded area and then interpolate.
+ */
+static void
+crop_and_interpolate(unsigned int cropped_width,
+ unsigned int cropped_height,
+ unsigned int left_padding,
+ int right_padding,
+ int top_padding,
+ const struct ia_css_shading_table *in_table,
+ struct ia_css_shading_table *out_table,
+ enum ia_css_sc_color color)
+{
+ unsigned int i, j,
+ sensor_width,
+ sensor_height,
+ table_width,
+ table_height,
+ table_cell_h,
+ out_cell_size,
+ in_cell_size,
+ out_start_row,
+ padded_width;
+ int out_start_col, /* can be negative to indicate padded space */
+ table_cell_w;
+ unsigned short *in_ptr,
+ *out_ptr;
+
+ assert(in_table != NULL);
+ assert(out_table != NULL);
+
+ sensor_width = in_table->sensor_width;
+ sensor_height = in_table->sensor_height;
+ table_width = in_table->width;
+ table_height = in_table->height;
+ in_ptr = in_table->data[color];
+ out_ptr = out_table->data[color];
+
+ padded_width = cropped_width + left_padding + right_padding;
+ out_cell_size = CEIL_DIV(padded_width, out_table->width - 1);
+ in_cell_size = CEIL_DIV(sensor_width, table_width - 1);
+
+ out_start_col = ((int)sensor_width - (int)cropped_width)/2 - left_padding;
+ out_start_row = ((int)sensor_height - (int)cropped_height)/2 - top_padding;
+ table_cell_w = (int)((table_width-1) * in_cell_size);
+ table_cell_h = (table_height-1) * in_cell_size;
+
+ for (i = 0; i < out_table->height; i++) {
+ int ty, src_y0, src_y1;
+ unsigned int sy0, sy1, dy0, dy1, divy;
+
+ /* calculate target point and make sure it falls within
+ the table */
+ ty = out_start_row + i * out_cell_size;
+
+ /* calculate closest source points in shading table and
+ make sure they fall within the table */
+ src_y0 = ty / (int)in_cell_size;
+ if (in_cell_size < out_cell_size)
+ src_y1 = (ty + out_cell_size) / in_cell_size;
+ else
+ src_y1 = src_y0 + 1;
+ src_y0 = clamp(src_y0, 0, (int)table_height-1);
+ src_y1 = clamp(src_y1, 0, (int)table_height-1);
+ ty = min(clamp(ty, 0, (int)sensor_height-1),
+ (int)table_cell_h);
+
+ /* calculate closest source points for distance computation */
+ sy0 = min(src_y0 * in_cell_size, sensor_height-1);
+ sy1 = min(src_y1 * in_cell_size, sensor_height-1);
+ /* calculate distance between source and target pixels */
+ dy0 = ty - sy0;
+ dy1 = sy1 - ty;
+ divy = sy1 - sy0;
+ if (divy == 0) {
+ dy0 = 1;
+ divy = 1;
+ }
+
+ for (j = 0; j < out_table->width; j++, out_ptr++) {
+ int tx, src_x0, src_x1;
+ unsigned int sx0, sx1, dx0, dx1, divx;
+ unsigned short s_ul, s_ur, s_ll, s_lr;
+
+ /* calculate target point */
+ tx = out_start_col + j * out_cell_size;
+ /* calculate closest source points. */
+ src_x0 = tx / (int)in_cell_size;
+ if (in_cell_size < out_cell_size) {
+ src_x1 = (tx + out_cell_size) /
+ (int)in_cell_size;
+ } else {
+ src_x1 = src_x0 + 1;
+ }
+ /* if src points fall in padding, select closest ones.*/
+ src_x0 = clamp(src_x0, 0, (int)table_width-1);
+ src_x1 = clamp(src_x1, 0, (int)table_width-1);
+ tx = min(clamp(tx, 0, (int)sensor_width-1),
+ (int)table_cell_w);
+ /* calculate closest source points for distance
+ computation */
+ sx0 = min(src_x0 * in_cell_size, sensor_width-1);
+ sx1 = min(src_x1 * in_cell_size, sensor_width-1);
+ /* calculate distances between source and target
+ pixels */
+ dx0 = tx - sx0;
+ dx1 = sx1 - tx;
+ divx = sx1 - sx0;
+ /* if we're at the edge, we just use the closest
+ point still in the grid. We make up for the divider
+ in this case by setting the distance to
+ out_cell_size, since it's actually 0. */
+ if (divx == 0) {
+ dx0 = 1;
+ divx = 1;
+ }
+
+ /* get source pixel values */
+ s_ul = in_ptr[(table_width*src_y0)+src_x0];
+ s_ur = in_ptr[(table_width*src_y0)+src_x1];
+ s_ll = in_ptr[(table_width*src_y1)+src_x0];
+ s_lr = in_ptr[(table_width*src_y1)+src_x1];
+
+ *out_ptr = (unsigned short) ((dx0*dy0*s_lr + dx0*dy1*s_ur + dx1*dy0*s_ll + dx1*dy1*s_ul) /
+ (divx*divy));
+ }
+ }
+}
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+#ifndef ISP2401
+ const struct ia_css_binary *binary)
+#else
+ unsigned int table_width,
+ unsigned int table_height)
+#endif
+{
+ /* initialize table with ones, shift becomes zero */
+#ifndef ISP2401
+ unsigned int i, j, table_width, table_height;
+#else
+ unsigned int i, j;
+#endif
+ struct ia_css_shading_table *result;
+
+ assert(target_table != NULL);
+#ifndef ISP2401
+ assert(binary != NULL);
+#endif
+
+#ifndef ISP2401
+ table_width = binary->sctbl_width_per_color;
+ table_height = binary->sctbl_height;
+#endif
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (result == NULL) {
+ *target_table = NULL;
+ return;
+ }
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ for (j = 0; j < table_height * table_width; j++)
+ result->data[i][j] = 1;
+ }
+ result->fraction_bits = 0;
+ *target_table = result;
+}
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor)
+{
+ unsigned int input_width,
+ input_height,
+ table_width,
+ table_height,
+ left_padding,
+ top_padding,
+ padded_width,
+ left_cropping,
+ i;
+ unsigned int bds_numerator, bds_denominator;
+ int right_padding;
+
+ struct ia_css_shading_table *result;
+
+ assert(target_table != NULL);
+ assert(binary != NULL);
+
+ if (!in_table) {
+#ifndef ISP2401
+ sh_css_params_shading_id_table_generate(target_table, binary);
+#else
+ sh_css_params_shading_id_table_generate(target_table,
+ binary->sctbl_legacy_width_per_color, binary->sctbl_legacy_height);
+#endif
+ return;
+ }
+
+ padded_width = binary->in_frame_info.padded_width;
+ /* We use the ISP input resolution for the shading table because
+ shading correction is performed in the bayer domain (before bayer
+ down scaling). */
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ padded_width = CEIL_MUL(binary->effective_in_frame_res.width + 2*ISP_VEC_NELEMS,
+ 2*ISP_VEC_NELEMS);
+#endif
+ input_height = binary->in_frame_info.res.height;
+ input_width = binary->in_frame_info.res.width;
+ left_padding = binary->left_padding;
+ left_cropping = (binary->info->sp.pipeline.left_cropping == 0) ?
+ binary->dvs_envelope.width : 2*ISP_VEC_NELEMS;
+
+ sh_css_bds_factor_get_numerator_denominator
+ (bds_factor, &bds_numerator, &bds_denominator);
+
+ left_padding = (left_padding + binary->info->sp.pipeline.left_cropping) * bds_numerator / bds_denominator - binary->info->sp.pipeline.left_cropping;
+ right_padding = (binary->internal_frame_info.res.width - binary->effective_in_frame_res.width * bds_denominator / bds_numerator - left_cropping) * bds_numerator / bds_denominator;
+ top_padding = binary->info->sp.pipeline.top_cropping * bds_numerator / bds_denominator - binary->info->sp.pipeline.top_cropping;
+
+#if !defined(USE_WINDOWS_BINNING_FACTOR)
+ /* @deprecated{This part of the code will be replaced by the code
+ * in the #else section below to make the calculation same across
+ * all platforms.
+ * Android and Windows platforms interpret the binning_factor parameter
+ * differently. In Android, the binning factor is expressed in the form
+ * 2^N * 2^N, whereas in Windows platform, the binning factor is N*N}
+ */
+
+ /* We take into account the binning done by the sensor. We do this
+ by cropping the non-binned part of the shading table and then
+ increasing the size of a grid cell with this same binning factor. */
+ input_width <<= sensor_binning;
+ input_height <<= sensor_binning;
+ /* We also scale the padding by the same binning factor. This will
+ make it much easier later on to calculate the padding of the
+ shading table. */
+ left_padding <<= sensor_binning;
+ right_padding <<= sensor_binning;
+ top_padding <<= sensor_binning;
+#else
+ input_width *= sensor_binning;
+ input_height *= sensor_binning;
+ left_padding *= sensor_binning;
+ right_padding *= sensor_binning;
+ top_padding *= sensor_binning;
+#endif /*USE_WINDOWS_BINNING_FACTOR*/
+
+ /* during simulation, the used resolution can exceed the sensor
+ resolution, so we clip it. */
+ input_width = min(input_width, in_table->sensor_width);
+ input_height = min(input_height, in_table->sensor_height);
+
+#ifndef ISP2401
+ table_width = binary->sctbl_width_per_color;
+ table_height = binary->sctbl_height;
+#else
+ /* This prepare_shading_table() function is called only in legacy API (not in new API).
+ Then, the legacy shading table width and height should be used. */
+ table_width = binary->sctbl_legacy_width_per_color;
+ table_height = binary->sctbl_legacy_height;
+#endif
+
+ result = ia_css_shading_table_alloc(table_width, table_height);
+ if (result == NULL) {
+ *target_table = NULL;
+ return;
+ }
+ result->sensor_width = in_table->sensor_width;
+ result->sensor_height = in_table->sensor_height;
+ result->fraction_bits = in_table->fraction_bits;
+
+ /* now we crop the original shading table and then interpolate to the
+ requested resolution and decimation factor. */
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ crop_and_interpolate(input_width, input_height,
+ left_padding, right_padding, top_padding,
+ in_table,
+ result, i);
+ }
+ *target_table = result;
+}
+
+struct ia_css_shading_table *
+ia_css_shading_table_alloc(
+ unsigned int width,
+ unsigned int height)
+{
+ unsigned int i;
+ struct ia_css_shading_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = kmalloc(sizeof(*me), GFP_KERNEL);
+ if (me == NULL) {
+ IA_CSS_ERROR("out of memory");
+ return me;
+ }
+
+ me->width = width;
+ me->height = height;
+ me->sensor_width = 0;
+ me->sensor_height = 0;
+ me->fraction_bits = 0;
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ me->data[i] =
+ sh_css_malloc(width * height * sizeof(*me->data[0]));
+ if (me->data[i] == NULL) {
+ unsigned int j;
+ for (j = 0; j < i; j++) {
+ sh_css_free(me->data[j]);
+ me->data[j] = NULL;
+ }
+ kfree(me);
+ return NULL;
+ }
+ }
+
+ IA_CSS_LEAVE("");
+ return me;
+}
+
+void
+ia_css_shading_table_free(struct ia_css_shading_table *table)
+{
+ unsigned int i;
+
+ if (table == NULL)
+ return;
+
+ /* We only output logging when the table is not NULL, otherwise
+ * logs will give the impression that a table was freed.
+ * */
+ IA_CSS_ENTER("");
+
+ for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
+ if (table->data[i]) {
+ sh_css_free(table->data[i]);
+ table->data[i] = NULL;
+ }
+ }
+ kfree(table);
+
+ IA_CSS_LEAVE("");
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h
new file mode 100644
index 000000000000..e87863b7c8cc
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.h
@@ -0,0 +1,39 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_PARAMS_SHADING_H
+#define __SH_CSS_PARAMS_SHADING_H
+
+#include <ia_css_types.h>
+#include <ia_css_binary.h>
+
+void
+sh_css_params_shading_id_table_generate(
+ struct ia_css_shading_table **target_table,
+#ifndef ISP2401
+ const struct ia_css_binary *binary);
+#else
+ unsigned int table_width,
+ unsigned int table_height);
+#endif
+
+void
+prepare_shading_table(const struct ia_css_shading_table *in_table,
+ unsigned int sensor_binning,
+ struct ia_css_shading_table **target_table,
+ const struct ia_css_binary *binary,
+ unsigned int bds_factor);
+
+#endif /* __SH_CSS_PARAMS_SHADING_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
new file mode 100644
index 000000000000..561f4a7236f7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
@@ -0,0 +1,5268 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "gdc_device.h" /* gdc_lut_store(), ... */
+#include "isp.h" /* ISP_VEC_ELEMBITS */
+#include "vamem.h"
+#if !defined(HAS_NO_HMEM)
+#ifndef __INLINE_HMEM__
+#define __INLINE_HMEM__
+#endif
+#include "hmem.h"
+#endif /* !defined(HAS_NO_HMEM) */
+#define IA_CSS_INCLUDE_PARAMETERS
+#define IA_CSS_INCLUDE_ACC_PARAMETERS
+
+#include "sh_css_params.h"
+#include "ia_css_queue.h"
+#include "sw_event_global.h" /* Event IDs */
+
+#include "platform_support.h"
+#include "assert_support.h"
+#include "misc_support.h" /* NOT_USED */
+#include "math_support.h" /* max(), min() EVEN_FLOOR()*/
+
+#include "ia_css_stream.h"
+#include "sh_css_params_internal.h"
+#include "sh_css_param_shading.h"
+#include "sh_css_param_dvs.h"
+#include "ia_css_refcount.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_shading.h"
+#include "sh_css_defs.h"
+#include "sh_css_sp.h"
+#include "ia_css_pipeline.h"
+#include "ia_css_debug.h"
+#include "memory_access.h"
+#if 0 /* FIXME */
+#include "memory_realloc.h"
+#endif
+#include "ia_css_isp_param.h"
+#include "ia_css_isp_params.h"
+#include "ia_css_mipi.h"
+#include "ia_css_morph.h"
+#include "ia_css_host_data.h"
+#include "ia_css_pipe.h"
+#include "ia_css_pipe_binarydesc.h"
+#if 0
+#include "ia_css_system_ctrl.h"
+#endif
+
+/* Include all kernel host interfaces for ISP1 */
+
+#include "anr/anr_1.0/ia_css_anr.host.h"
+#include "cnr/cnr_1.0/ia_css_cnr.host.h"
+#include "csc/csc_1.0/ia_css_csc.host.h"
+#include "de/de_1.0/ia_css_de.host.h"
+#include "dp/dp_1.0/ia_css_dp.host.h"
+#include "bnr/bnr_1.0/ia_css_bnr.host.h"
+#include "dvs/dvs_1.0/ia_css_dvs.host.h"
+#include "fpn/fpn_1.0/ia_css_fpn.host.h"
+#include "gc/gc_1.0/ia_css_gc.host.h"
+#include "macc/macc_1.0/ia_css_macc.host.h"
+#include "ctc/ctc_1.0/ia_css_ctc.host.h"
+#include "ob/ob_1.0/ia_css_ob.host.h"
+#include "raw/raw_1.0/ia_css_raw.host.h"
+#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
+#include "s3a/s3a_1.0/ia_css_s3a.host.h"
+#include "sc/sc_1.0/ia_css_sc.host.h"
+#include "sdis/sdis_1.0/ia_css_sdis.host.h"
+#include "tnr/tnr_1.0/ia_css_tnr.host.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "wb/wb_1.0/ia_css_wb.host.h"
+#include "ynr/ynr_1.0/ia_css_ynr.host.h"
+#include "xnr/xnr_1.0/ia_css_xnr.host.h"
+
+/* Include additional kernel host interfaces for ISP2 */
+
+#include "aa/aa_2/ia_css_aa2.host.h"
+#include "anr/anr_2/ia_css_anr2.host.h"
+#include "bh/bh_2/ia_css_bh.host.h"
+#include "cnr/cnr_2/ia_css_cnr2.host.h"
+#include "ctc/ctc1_5/ia_css_ctc1_5.host.h"
+#include "de/de_2/ia_css_de2.host.h"
+#include "gc/gc_2/ia_css_gc2.host.h"
+#include "sdis/sdis_2/ia_css_sdis2.host.h"
+#include "ynr/ynr_2/ia_css_ynr2.host.h"
+#include "fc/fc_1.0/ia_css_formats.host.h"
+
+#include "xnr/xnr_3.0/ia_css_xnr3.host.h"
+
+#if defined(HAS_OUTPUT_SYSTEM)
+#include <components/output_system/sc_output_system_1.0/host/output_system.host.h>
+#endif
+
+#include "sh_css_frac.h"
+#include "ia_css_bufq.h"
+
+#define FPNTBL_BYTES(binary) \
+ (sizeof(char) * (binary)->in_frame_info.res.height * \
+ (binary)->in_frame_info.padded_width)
+
+#ifndef ISP2401
+
+#define SCTBL_BYTES(binary) \
+ (sizeof(unsigned short) * (binary)->sctbl_height * \
+ (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
+
+#else
+
+#define SCTBL_BYTES(binary) \
+ (sizeof(unsigned short) * max((binary)->sctbl_height, (binary)->sctbl_legacy_height) * \
+ /* height should be the larger height between new api and legacy api */ \
+ (binary)->sctbl_aligned_width_per_color * IA_CSS_SC_NUM_COLORS)
+
+#endif
+
+#define MORPH_PLANE_BYTES(binary) \
+ (SH_CSS_MORPH_TABLE_ELEM_BYTES * (binary)->morph_tbl_aligned_width * \
+ (binary)->morph_tbl_height)
+
+/* We keep a second copy of the ptr struct for the SP to access.
+ Again, this would not be necessary on the chip. */
+static hrt_vaddress sp_ddr_ptrs;
+
+/* sp group address on DDR */
+static hrt_vaddress xmem_sp_group_ptrs;
+
+static hrt_vaddress xmem_sp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+ [SH_CSS_MAX_STAGES];
+static hrt_vaddress xmem_isp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
+ [SH_CSS_MAX_STAGES];
+
+static hrt_vaddress default_gdc_lut;
+static int interleaved_lut_temp[4][HRT_GDC_N];
+
+/* END DO NOT MOVE INTO VIMALS_WORLD */
+
+/* Digital Zoom lookup table. See documentation for more details about the
+ * contents of this table.
+ */
+#if defined(HAS_GDC_VERSION_2)
+#if defined(CONFIG_CSI2_PLUS)
+/*
+ * Coefficients from
+ * Css_Mizuchi/regressions/20140424_0930/all/applications/common/gdc_v2_common/lut.h
+ */
+
+static const int zoom_table[4][HRT_GDC_N] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -2, -2, -2, -2, -2, -2, -2,
+ -3, -3, -3, -3, -3, -3, -3, -4,
+ -4, -4, -4, -4, -5, -5, -5, -5,
+ -5, -5, -6, -6, -6, -6, -7, -7,
+ -7, -7, -7, -8, -8, -8, -8, -9,
+ -9, -9, -9, -10, -10, -10, -10, -11,
+ -11, -11, -12, -12, -12, -12, -13, -13,
+ -13, -14, -14, -14, -15, -15, -15, -15,
+ -16, -16, -16, -17, -17, -17, -18, -18,
+ -18, -19, -19, -20, -20, -20, -21, -21,
+ -21, -22, -22, -22, -23, -23, -24, -24,
+ -24, -25, -25, -25, -26, -26, -27, -27,
+ -28, -28, -28, -29, -29, -30, -30, -30,
+ -31, -31, -32, -32, -33, -33, -33, -34,
+ -34, -35, -35, -36, -36, -37, -37, -37,
+ -38, -38, -39, -39, -40, -40, -41, -41,
+ -42, -42, -43, -43, -44, -44, -45, -45,
+ -46, -46, -47, -47, -48, -48, -49, -49,
+ -50, -50, -51, -51, -52, -52, -53, -53,
+ -54, -54, -55, -55, -56, -56, -57, -57,
+ -58, -59, -59, -60, -60, -61, -61, -62,
+ -62, -63, -63, -64, -65, -65, -66, -66,
+ -67, -67, -68, -69, -69, -70, -70, -71,
+ -71, -72, -73, -73, -74, -74, -75, -75,
+ -76, -77, -77, -78, -78, -79, -80, -80,
+ -81, -81, -82, -83, -83, -84, -84, -85,
+ -86, -86, -87, -87, -88, -89, -89, -90,
+ -91, -91, -92, -92, -93, -94, -94, -95,
+ -96, -96, -97, -97, -98, -99, -99, -100,
+ -101, -101, -102, -102, -103, -104, -104, -105,
+ -106, -106, -107, -108, -108, -109, -109, -110,
+ -111, -111, -112, -113, -113, -114, -115, -115,
+ -116, -117, -117, -118, -119, -119, -120, -121,
+ -121, -122, -122, -123, -124, -124, -125, -126,
+ -126, -127, -128, -128, -129, -130, -130, -131,
+ -132, -132, -133, -134, -134, -135, -136, -136,
+ -137, -138, -138, -139, -140, -140, -141, -142,
+ -142, -143, -144, -144, -145, -146, -146, -147,
+ -148, -148, -149, -150, -150, -151, -152, -152,
+ -153, -154, -154, -155, -156, -156, -157, -158,
+ -158, -159, -160, -160, -161, -162, -162, -163,
+ -164, -164, -165, -166, -166, -167, -168, -168,
+ -169, -170, -170, -171, -172, -172, -173, -174,
+ -174, -175, -176, -176, -177, -178, -178, -179,
+ -180, -180, -181, -181, -182, -183, -183, -184,
+ -185, -185, -186, -187, -187, -188, -189, -189,
+ -190, -191, -191, -192, -193, -193, -194, -194,
+ -195, -196, -196, -197, -198, -198, -199, -200,
+ -200, -201, -201, -202, -203, -203, -204, -205,
+ -205, -206, -206, -207, -208, -208, -209, -210,
+ -210, -211, -211, -212, -213, -213, -214, -215,
+ -215, -216, -216, -217, -218, -218, -219, -219,
+ -220, -221, -221, -222, -222, -223, -224, -224,
+ -225, -225, -226, -227, -227, -228, -228, -229,
+ -229, -230, -231, -231, -232, -232, -233, -233,
+ -234, -235, -235, -236, -236, -237, -237, -238,
+ -239, -239, -240, -240, -241, -241, -242, -242,
+ -243, -244, -244, -245, -245, -246, -246, -247,
+ -247, -248, -248, -249, -249, -250, -250, -251,
+ -251, -252, -252, -253, -253, -254, -254, -255,
+ -256, -256, -256, -257, -257, -258, -258, -259,
+ -259, -260, -260, -261, -261, -262, -262, -263,
+ -263, -264, -264, -265, -265, -266, -266, -266,
+ -267, -267, -268, -268, -269, -269, -270, -270,
+ -270, -271, -271, -272, -272, -273, -273, -273,
+ -274, -274, -275, -275, -275, -276, -276, -277,
+ -277, -277, -278, -278, -279, -279, -279, -280,
+ -280, -280, -281, -281, -282, -282, -282, -283,
+ -283, -283, -284, -284, -284, -285, -285, -285,
+ -286, -286, -286, -287, -287, -287, -288, -288,
+ -288, -289, -289, -289, -289, -290, -290, -290,
+ -291, -291, -291, -291, -292, -292, -292, -293,
+ -293, -293, -293, -294, -294, -294, -294, -295,
+ -295, -295, -295, -295, -296, -296, -296, -296,
+ -297, -297, -297, -297, -297, -298, -298, -298,
+ -298, -298, -299, -299, -299, -299, -299, -299,
+ -300, -300, -300, -300, -300, -300, -300, -301,
+ -301, -301, -301, -301, -301, -301, -301, -301,
+ -302, -302, -302, -302, -302, -302, -302, -302,
+ -302, -302, -302, -302, -302, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -303, -303, -303, -303, -303, -303,
+ -303, -303, -302, -302, -302, -302, -302, -302,
+ -302, -302, -302, -302, -302, -302, -301, -301,
+ -301, -301, -301, -301, -301, -301, -300, -300,
+ -300, -300, -300, -300, -299, -299, -299, -299,
+ -299, -299, -298, -298, -298, -298, -298, -297,
+ -297, -297, -297, -296, -296, -296, -296, -295,
+ -295, -295, -295, -294, -294, -294, -293, -293,
+ -293, -293, -292, -292, -292, -291, -291, -291,
+ -290, -290, -290, -289, -289, -289, -288, -288,
+ -288, -287, -287, -286, -286, -286, -285, -285,
+ -284, -284, -284, -283, -283, -282, -282, -281,
+ -281, -280, -280, -279, -279, -279, -278, -278,
+ -277, -277, -276, -276, -275, -275, -274, -273,
+ -273, -272, -272, -271, -271, -270, -270, -269,
+ -268, -268, -267, -267, -266, -266, -265, -264,
+ -264, -263, -262, -262, -261, -260, -260, -259,
+ -259, -258, -257, -256, -256, -255, -254, -254,
+ -253, -252, -252, -251, -250, -249, -249, -248,
+ -247, -246, -246, -245, -244, -243, -242, -242,
+ -241, -240, -239, -238, -238, -237, -236, -235,
+ -234, -233, -233, -232, -231, -230, -229, -228,
+ -227, -226, -226, -225, -224, -223, -222, -221,
+ -220, -219, -218, -217, -216, -215, -214, -213,
+ -212, -211, -210, -209, -208, -207, -206, -205,
+ -204, -203, -202, -201, -200, -199, -198, -197,
+ -196, -194, -193, -192, -191, -190, -189, -188,
+ -187, -185, -184, -183, -182, -181, -180, -178,
+ -177, -176, -175, -174, -172, -171, -170, -169,
+ -167, -166, -165, -164, -162, -161, -160, -158,
+ -157, -156, -155, -153, -152, -151, -149, -148,
+ -147, -145, -144, -142, -141, -140, -138, -137,
+ -135, -134, -133, -131, -130, -128, -127, -125,
+ -124, -122, -121, -120, -118, -117, -115, -114,
+ -112, -110, -109, -107, -106, -104, -103, -101,
+ -100, -98, -96, -95, -93, -92, -90, -88,
+ -87, -85, -83, -82, -80, -78, -77, -75,
+ -73, -72, -70, -68, -67, -65, -63, -61,
+ -60, -58, -56, -54, -52, -51, -49, -47,
+ -45, -43, -42, -40, -38, -36, -34, -32,
+ -31, -29, -27, -25, -23, -21, -19, -17,
+ -15, -13, -11, -9, -7, -5, -3, -1
+ },
+ { 0, 2, 4, 6, 8, 10, 12, 14,
+ 16, 18, 20, 22, 25, 27, 29, 31,
+ 33, 36, 38, 40, 43, 45, 47, 50,
+ 52, 54, 57, 59, 61, 64, 66, 69,
+ 71, 74, 76, 79, 81, 84, 86, 89,
+ 92, 94, 97, 99, 102, 105, 107, 110,
+ 113, 116, 118, 121, 124, 127, 129, 132,
+ 135, 138, 141, 144, 146, 149, 152, 155,
+ 158, 161, 164, 167, 170, 173, 176, 179,
+ 182, 185, 188, 191, 194, 197, 200, 203,
+ 207, 210, 213, 216, 219, 222, 226, 229,
+ 232, 235, 239, 242, 245, 248, 252, 255,
+ 258, 262, 265, 269, 272, 275, 279, 282,
+ 286, 289, 292, 296, 299, 303, 306, 310,
+ 313, 317, 321, 324, 328, 331, 335, 338,
+ 342, 346, 349, 353, 357, 360, 364, 368,
+ 372, 375, 379, 383, 386, 390, 394, 398,
+ 402, 405, 409, 413, 417, 421, 425, 429,
+ 432, 436, 440, 444, 448, 452, 456, 460,
+ 464, 468, 472, 476, 480, 484, 488, 492,
+ 496, 500, 504, 508, 512, 516, 521, 525,
+ 529, 533, 537, 541, 546, 550, 554, 558,
+ 562, 567, 571, 575, 579, 584, 588, 592,
+ 596, 601, 605, 609, 614, 618, 622, 627,
+ 631, 635, 640, 644, 649, 653, 657, 662,
+ 666, 671, 675, 680, 684, 689, 693, 698,
+ 702, 707, 711, 716, 720, 725, 729, 734,
+ 738, 743, 747, 752, 757, 761, 766, 771,
+ 775, 780, 784, 789, 794, 798, 803, 808,
+ 813, 817, 822, 827, 831, 836, 841, 846,
+ 850, 855, 860, 865, 870, 874, 879, 884,
+ 889, 894, 898, 903, 908, 913, 918, 923,
+ 928, 932, 937, 942, 947, 952, 957, 962,
+ 967, 972, 977, 982, 986, 991, 996, 1001,
+ 1006, 1011, 1016, 1021, 1026, 1031, 1036, 1041,
+ 1046, 1051, 1056, 1062, 1067, 1072, 1077, 1082,
+ 1087, 1092, 1097, 1102, 1107, 1112, 1117, 1122,
+ 1128, 1133, 1138, 1143, 1148, 1153, 1158, 1164,
+ 1169, 1174, 1179, 1184, 1189, 1195, 1200, 1205,
+ 1210, 1215, 1221, 1226, 1231, 1236, 1242, 1247,
+ 1252, 1257, 1262, 1268, 1273, 1278, 1284, 1289,
+ 1294, 1299, 1305, 1310, 1315, 1321, 1326, 1331,
+ 1336, 1342, 1347, 1352, 1358, 1363, 1368, 1374,
+ 1379, 1384, 1390, 1395, 1400, 1406, 1411, 1417,
+ 1422, 1427, 1433, 1438, 1443, 1449, 1454, 1460,
+ 1465, 1470, 1476, 1481, 1487, 1492, 1497, 1503,
+ 1508, 1514, 1519, 1525, 1530, 1535, 1541, 1546,
+ 1552, 1557, 1563, 1568, 1574, 1579, 1585, 1590,
+ 1596, 1601, 1606, 1612, 1617, 1623, 1628, 1634,
+ 1639, 1645, 1650, 1656, 1661, 1667, 1672, 1678,
+ 1683, 1689, 1694, 1700, 1705, 1711, 1716, 1722,
+ 1727, 1733, 1738, 1744, 1749, 1755, 1761, 1766,
+ 1772, 1777, 1783, 1788, 1794, 1799, 1805, 1810,
+ 1816, 1821, 1827, 1832, 1838, 1844, 1849, 1855,
+ 1860, 1866, 1871, 1877, 1882, 1888, 1893, 1899,
+ 1905, 1910, 1916, 1921, 1927, 1932, 1938, 1943,
+ 1949, 1955, 1960, 1966, 1971, 1977, 1982, 1988,
+ 1993, 1999, 2005, 2010, 2016, 2021, 2027, 2032,
+ 2038, 2043, 2049, 2055, 2060, 2066, 2071, 2077,
+ 2082, 2088, 2093, 2099, 2105, 2110, 2116, 2121,
+ 2127, 2132, 2138, 2143, 2149, 2154, 2160, 2165,
+ 2171, 2177, 2182, 2188, 2193, 2199, 2204, 2210,
+ 2215, 2221, 2226, 2232, 2237, 2243, 2248, 2254,
+ 2259, 2265, 2270, 2276, 2281, 2287, 2292, 2298,
+ 2304, 2309, 2314, 2320, 2325, 2331, 2336, 2342,
+ 2347, 2353, 2358, 2364, 2369, 2375, 2380, 2386,
+ 2391, 2397, 2402, 2408, 2413, 2419, 2424, 2429,
+ 2435, 2440, 2446, 2451, 2457, 2462, 2467, 2473,
+ 2478, 2484, 2489, 2495, 2500, 2505, 2511, 2516,
+ 2522, 2527, 2532, 2538, 2543, 2549, 2554, 2559,
+ 2565, 2570, 2575, 2581, 2586, 2591, 2597, 2602,
+ 2607, 2613, 2618, 2623, 2629, 2634, 2639, 2645,
+ 2650, 2655, 2661, 2666, 2671, 2676, 2682, 2687,
+ 2692, 2698, 2703, 2708, 2713, 2719, 2724, 2729,
+ 2734, 2740, 2745, 2750, 2755, 2760, 2766, 2771,
+ 2776, 2781, 2786, 2792, 2797, 2802, 2807, 2812,
+ 2817, 2823, 2828, 2833, 2838, 2843, 2848, 2853,
+ 2859, 2864, 2869, 2874, 2879, 2884, 2889, 2894,
+ 2899, 2904, 2909, 2914, 2919, 2924, 2930, 2935,
+ 2940, 2945, 2950, 2955, 2960, 2965, 2970, 2975,
+ 2980, 2984, 2989, 2994, 2999, 3004, 3009, 3014,
+ 3019, 3024, 3029, 3034, 3039, 3044, 3048, 3053,
+ 3058, 3063, 3068, 3073, 3078, 3082, 3087, 3092,
+ 3097, 3102, 3106, 3111, 3116, 3121, 3126, 3130,
+ 3135, 3140, 3145, 3149, 3154, 3159, 3163, 3168,
+ 3173, 3177, 3182, 3187, 3191, 3196, 3201, 3205,
+ 3210, 3215, 3219, 3224, 3228, 3233, 3238, 3242,
+ 3247, 3251, 3256, 3260, 3265, 3269, 3274, 3279,
+ 3283, 3287, 3292, 3296, 3301, 3305, 3310, 3314,
+ 3319, 3323, 3327, 3332, 3336, 3341, 3345, 3349,
+ 3354, 3358, 3362, 3367, 3371, 3375, 3380, 3384,
+ 3388, 3393, 3397, 3401, 3405, 3410, 3414, 3418,
+ 3422, 3426, 3431, 3435, 3439, 3443, 3447, 3451,
+ 3455, 3460, 3464, 3468, 3472, 3476, 3480, 3484,
+ 3488, 3492, 3496, 3500, 3504, 3508, 3512, 3516,
+ 3520, 3524, 3528, 3532, 3536, 3540, 3544, 3548,
+ 3552, 3555, 3559, 3563, 3567, 3571, 3575, 3578,
+ 3582, 3586, 3590, 3593, 3597, 3601, 3605, 3608,
+ 3612, 3616, 3619, 3623, 3627, 3630, 3634, 3638,
+ 3641, 3645, 3649, 3652, 3656, 3659, 3663, 3666,
+ 3670, 3673, 3677, 3680, 3684, 3687, 3691, 3694,
+ 3698, 3701, 3704, 3708, 3711, 3714, 3718, 3721,
+ 3724, 3728, 3731, 3734, 3738, 3741, 3744, 3747,
+ 3751, 3754, 3757, 3760, 3763, 3767, 3770, 3773,
+ 3776, 3779, 3782, 3785, 3788, 3791, 3794, 3798,
+ 3801, 3804, 3807, 3809, 3812, 3815, 3818, 3821,
+ 3824, 3827, 3830, 3833, 3836, 3839, 3841, 3844,
+ 3847, 3850, 3853, 3855, 3858, 3861, 3864, 3866,
+ 3869, 3872, 3874, 3877, 3880, 3882, 3885, 3887,
+ 3890, 3893, 3895, 3898, 3900, 3903, 3905, 3908,
+ 3910, 3913, 3915, 3917, 3920, 3922, 3925, 3927,
+ 3929, 3932, 3934, 3936, 3939, 3941, 3943, 3945,
+ 3948, 3950, 3952, 3954, 3956, 3958, 3961, 3963,
+ 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979,
+ 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3994,
+ 3996, 3998, 4000, 4002, 4004, 4005, 4007, 4009,
+ 4011, 4012, 4014, 4016, 4017, 4019, 4021, 4022,
+ 4024, 4025, 4027, 4028, 4030, 4031, 4033, 4034,
+ 4036, 4037, 4039, 4040, 4042, 4043, 4044, 4046,
+ 4047, 4048, 4050, 4051, 4052, 4053, 4055, 4056,
+ 4057, 4058, 4059, 4060, 4062, 4063, 4064, 4065,
+ 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
+ 4074, 4075, 4075, 4076, 4077, 4078, 4079, 4079,
+ 4080, 4081, 4082, 4082, 4083, 4084, 4084, 4085,
+ 4086, 4086, 4087, 4087, 4088, 4088, 4089, 4089,
+ 4090, 4090, 4091, 4091, 4092, 4092, 4092, 4093,
+ 4093, 4093, 4094, 4094, 4094, 4094, 4095, 4095,
+ 4095, 4095, 4095, 4095, 4095, 4095, 4095, 4095
+ },
+ { 4096, 4095, 4095, 4095, 4095, 4095, 4095, 4095,
+ 4095, 4095, 4095, 4094, 4094, 4094, 4094, 4093,
+ 4093, 4093, 4092, 4092, 4092, 4091, 4091, 4090,
+ 4090, 4089, 4089, 4088, 4088, 4087, 4087, 4086,
+ 4086, 4085, 4084, 4084, 4083, 4082, 4082, 4081,
+ 4080, 4079, 4079, 4078, 4077, 4076, 4075, 4075,
+ 4074, 4073, 4072, 4071, 4070, 4069, 4068, 4067,
+ 4066, 4065, 4064, 4063, 4062, 4060, 4059, 4058,
+ 4057, 4056, 4055, 4053, 4052, 4051, 4050, 4048,
+ 4047, 4046, 4044, 4043, 4042, 4040, 4039, 4037,
+ 4036, 4034, 4033, 4031, 4030, 4028, 4027, 4025,
+ 4024, 4022, 4021, 4019, 4017, 4016, 4014, 4012,
+ 4011, 4009, 4007, 4005, 4004, 4002, 4000, 3998,
+ 3996, 3994, 3993, 3991, 3989, 3987, 3985, 3983,
+ 3981, 3979, 3977, 3975, 3973, 3971, 3969, 3967,
+ 3965, 3963, 3961, 3958, 3956, 3954, 3952, 3950,
+ 3948, 3945, 3943, 3941, 3939, 3936, 3934, 3932,
+ 3929, 3927, 3925, 3922, 3920, 3917, 3915, 3913,
+ 3910, 3908, 3905, 3903, 3900, 3898, 3895, 3893,
+ 3890, 3887, 3885, 3882, 3880, 3877, 3874, 3872,
+ 3869, 3866, 3864, 3861, 3858, 3855, 3853, 3850,
+ 3847, 3844, 3841, 3839, 3836, 3833, 3830, 3827,
+ 3824, 3821, 3818, 3815, 3812, 3809, 3807, 3804,
+ 3801, 3798, 3794, 3791, 3788, 3785, 3782, 3779,
+ 3776, 3773, 3770, 3767, 3763, 3760, 3757, 3754,
+ 3751, 3747, 3744, 3741, 3738, 3734, 3731, 3728,
+ 3724, 3721, 3718, 3714, 3711, 3708, 3704, 3701,
+ 3698, 3694, 3691, 3687, 3684, 3680, 3677, 3673,
+ 3670, 3666, 3663, 3659, 3656, 3652, 3649, 3645,
+ 3641, 3638, 3634, 3630, 3627, 3623, 3619, 3616,
+ 3612, 3608, 3605, 3601, 3597, 3593, 3590, 3586,
+ 3582, 3578, 3575, 3571, 3567, 3563, 3559, 3555,
+ 3552, 3548, 3544, 3540, 3536, 3532, 3528, 3524,
+ 3520, 3516, 3512, 3508, 3504, 3500, 3496, 3492,
+ 3488, 3484, 3480, 3476, 3472, 3468, 3464, 3460,
+ 3455, 3451, 3447, 3443, 3439, 3435, 3431, 3426,
+ 3422, 3418, 3414, 3410, 3405, 3401, 3397, 3393,
+ 3388, 3384, 3380, 3375, 3371, 3367, 3362, 3358,
+ 3354, 3349, 3345, 3341, 3336, 3332, 3327, 3323,
+ 3319, 3314, 3310, 3305, 3301, 3296, 3292, 3287,
+ 3283, 3279, 3274, 3269, 3265, 3260, 3256, 3251,
+ 3247, 3242, 3238, 3233, 3228, 3224, 3219, 3215,
+ 3210, 3205, 3201, 3196, 3191, 3187, 3182, 3177,
+ 3173, 3168, 3163, 3159, 3154, 3149, 3145, 3140,
+ 3135, 3130, 3126, 3121, 3116, 3111, 3106, 3102,
+ 3097, 3092, 3087, 3082, 3078, 3073, 3068, 3063,
+ 3058, 3053, 3048, 3044, 3039, 3034, 3029, 3024,
+ 3019, 3014, 3009, 3004, 2999, 2994, 2989, 2984,
+ 2980, 2975, 2970, 2965, 2960, 2955, 2950, 2945,
+ 2940, 2935, 2930, 2924, 2919, 2914, 2909, 2904,
+ 2899, 2894, 2889, 2884, 2879, 2874, 2869, 2864,
+ 2859, 2853, 2848, 2843, 2838, 2833, 2828, 2823,
+ 2817, 2812, 2807, 2802, 2797, 2792, 2786, 2781,
+ 2776, 2771, 2766, 2760, 2755, 2750, 2745, 2740,
+ 2734, 2729, 2724, 2719, 2713, 2708, 2703, 2698,
+ 2692, 2687, 2682, 2676, 2671, 2666, 2661, 2655,
+ 2650, 2645, 2639, 2634, 2629, 2623, 2618, 2613,
+ 2607, 2602, 2597, 2591, 2586, 2581, 2575, 2570,
+ 2565, 2559, 2554, 2549, 2543, 2538, 2532, 2527,
+ 2522, 2516, 2511, 2505, 2500, 2495, 2489, 2484,
+ 2478, 2473, 2467, 2462, 2457, 2451, 2446, 2440,
+ 2435, 2429, 2424, 2419, 2413, 2408, 2402, 2397,
+ 2391, 2386, 2380, 2375, 2369, 2364, 2358, 2353,
+ 2347, 2342, 2336, 2331, 2325, 2320, 2314, 2309,
+ 2304, 2298, 2292, 2287, 2281, 2276, 2270, 2265,
+ 2259, 2254, 2248, 2243, 2237, 2232, 2226, 2221,
+ 2215, 2210, 2204, 2199, 2193, 2188, 2182, 2177,
+ 2171, 2165, 2160, 2154, 2149, 2143, 2138, 2132,
+ 2127, 2121, 2116, 2110, 2105, 2099, 2093, 2088,
+ 2082, 2077, 2071, 2066, 2060, 2055, 2049, 2043,
+ 2038, 2032, 2027, 2021, 2016, 2010, 2005, 1999,
+ 1993, 1988, 1982, 1977, 1971, 1966, 1960, 1955,
+ 1949, 1943, 1938, 1932, 1927, 1921, 1916, 1910,
+ 1905, 1899, 1893, 1888, 1882, 1877, 1871, 1866,
+ 1860, 1855, 1849, 1844, 1838, 1832, 1827, 1821,
+ 1816, 1810, 1805, 1799, 1794, 1788, 1783, 1777,
+ 1772, 1766, 1761, 1755, 1749, 1744, 1738, 1733,
+ 1727, 1722, 1716, 1711, 1705, 1700, 1694, 1689,
+ 1683, 1678, 1672, 1667, 1661, 1656, 1650, 1645,
+ 1639, 1634, 1628, 1623, 1617, 1612, 1606, 1601,
+ 1596, 1590, 1585, 1579, 1574, 1568, 1563, 1557,
+ 1552, 1546, 1541, 1535, 1530, 1525, 1519, 1514,
+ 1508, 1503, 1497, 1492, 1487, 1481, 1476, 1470,
+ 1465, 1460, 1454, 1449, 1443, 1438, 1433, 1427,
+ 1422, 1417, 1411, 1406, 1400, 1395, 1390, 1384,
+ 1379, 1374, 1368, 1363, 1358, 1352, 1347, 1342,
+ 1336, 1331, 1326, 1321, 1315, 1310, 1305, 1299,
+ 1294, 1289, 1284, 1278, 1273, 1268, 1262, 1257,
+ 1252, 1247, 1242, 1236, 1231, 1226, 1221, 1215,
+ 1210, 1205, 1200, 1195, 1189, 1184, 1179, 1174,
+ 1169, 1164, 1158, 1153, 1148, 1143, 1138, 1133,
+ 1128, 1122, 1117, 1112, 1107, 1102, 1097, 1092,
+ 1087, 1082, 1077, 1072, 1067, 1062, 1056, 1051,
+ 1046, 1041, 1036, 1031, 1026, 1021, 1016, 1011,
+ 1006, 1001, 996, 991, 986, 982, 977, 972,
+ 967, 962, 957, 952, 947, 942, 937, 932,
+ 928, 923, 918, 913, 908, 903, 898, 894,
+ 889, 884, 879, 874, 870, 865, 860, 855,
+ 850, 846, 841, 836, 831, 827, 822, 817,
+ 813, 808, 803, 798, 794, 789, 784, 780,
+ 775, 771, 766, 761, 757, 752, 747, 743,
+ 738, 734, 729, 725, 720, 716, 711, 707,
+ 702, 698, 693, 689, 684, 680, 675, 671,
+ 666, 662, 657, 653, 649, 644, 640, 635,
+ 631, 627, 622, 618, 614, 609, 605, 601,
+ 596, 592, 588, 584, 579, 575, 571, 567,
+ 562, 558, 554, 550, 546, 541, 537, 533,
+ 529, 525, 521, 516, 512, 508, 504, 500,
+ 496, 492, 488, 484, 480, 476, 472, 468,
+ 464, 460, 456, 452, 448, 444, 440, 436,
+ 432, 429, 425, 421, 417, 413, 409, 405,
+ 402, 398, 394, 390, 386, 383, 379, 375,
+ 372, 368, 364, 360, 357, 353, 349, 346,
+ 342, 338, 335, 331, 328, 324, 321, 317,
+ 313, 310, 306, 303, 299, 296, 292, 289,
+ 286, 282, 279, 275, 272, 269, 265, 262,
+ 258, 255, 252, 248, 245, 242, 239, 235,
+ 232, 229, 226, 222, 219, 216, 213, 210,
+ 207, 203, 200, 197, 194, 191, 188, 185,
+ 182, 179, 176, 173, 170, 167, 164, 161,
+ 158, 155, 152, 149, 146, 144, 141, 138,
+ 135, 132, 129, 127, 124, 121, 118, 116,
+ 113, 110, 107, 105, 102, 99, 97, 94,
+ 92, 89, 86, 84, 81, 79, 76, 74,
+ 71, 69, 66, 64, 61, 59, 57, 54,
+ 52, 50, 47, 45, 43, 40, 38, 36,
+ 33, 31, 29, 27, 25, 22, 20, 18,
+ 16, 14, 12, 10, 8, 6, 4, 2
+ },
+ { 0, -1, -3, -5, -7, -9, -11, -13,
+ -15, -17, -19, -20, -23, -25, -27, -28,
+ -30, -33, -34, -36, -39, -40, -42, -43,
+ -45, -46, -49, -50, -52, -54, -56, -58,
+ -60, -61, -62, -65, -66, -68, -70, -72,
+ -73, -74, -77, -78, -80, -82, -83, -85,
+ -87, -89, -90, -92, -93, -95, -96, -98,
+ -100, -102, -103, -105, -106, -107, -108, -110,
+ -112, -114, -116, -116, -118, -120, -122, -122,
+ -124, -126, -127, -128, -130, -131, -133, -133,
+ -136, -137, -138, -139, -141, -142, -144, -145,
+ -147, -147, -150, -151, -151, -153, -155, -156,
+ -157, -159, -160, -161, -163, -164, -165, -166,
+ -168, -168, -170, -171, -172, -174, -174, -176,
+ -177, -178, -180, -181, -182, -183, -184, -185,
+ -187, -188, -189, -190, -191, -192, -193, -195,
+ -196, -196, -198, -199, -200, -200, -202, -204,
+ -204, -205, -206, -207, -208, -209, -211, -212,
+ -212, -213, -214, -215, -216, -217, -218, -220,
+ -220, -221, -222, -223, -224, -225, -225, -227,
+ -227, -228, -229, -230, -230, -231, -233, -234,
+ -234, -235, -235, -237, -238, -239, -239, -240,
+ -240, -242, -242, -243, -243, -245, -246, -247,
+ -247, -249, -248, -249, -250, -251, -251, -253,
+ -253, -253, -255, -255, -256, -256, -257, -258,
+ -259, -259, -260, -261, -261, -262, -262, -264,
+ -263, -265, -265, -265, -266, -267, -267, -268,
+ -269, -269, -269, -270, -271, -271, -272, -273,
+ -273, -273, -274, -274, -276, -275, -276, -277,
+ -277, -278, -278, -278, -279, -279, -280, -281,
+ -280, -281, -282, -283, -283, -282, -284, -284,
+ -284, -285, -285, -286, -286, -286, -287, -287,
+ -288, -288, -288, -289, -289, -289, -290, -290,
+ -290, -291, -291, -292, -291, -291, -292, -292,
+ -292, -293, -293, -293, -294, -294, -295, -295,
+ -294, -295, -295, -296, -297, -297, -297, -297,
+ -297, -297, -298, -298, -297, -298, -298, -298,
+ -299, -299, -300, -299, -299, -300, -299, -300,
+ -301, -300, -300, -301, -300, -301, -301, -301,
+ -301, -301, -302, -301, -302, -301, -302, -302,
+ -302, -302, -302, -302, -302, -302, -303, -302,
+ -303, -302, -303, -303, -302, -303, -303, -303,
+ -302, -303, -303, -302, -303, -303, -302, -303,
+ -303, -302, -303, -303, -302, -303, -303, -303,
+ -303, -302, -303, -303, -302, -302, -302, -303,
+ -302, -302, -302, -301, -303, -302, -301, -302,
+ -301, -301, -301, -302, -301, -301, -301, -300,
+ -301, -300, -300, -300, -300, -299, -300, -299,
+ -300, -300, -299, -300, -299, -299, -299, -299,
+ -298, -299, -298, -297, -297, -297, -296, -297,
+ -296, -296, -296, -296, -295, -296, -295, -296,
+ -295, -294, -294, -294, -293, -294, -294, -293,
+ -293, -292, -293, -292, -292, -292, -291, -290,
+ -291, -290, -291, -289, -289, -290, -289, -289,
+ -288, -288, -288, -288, -286, -287, -286, -286,
+ -286, -285, -286, -284, -284, -284, -284, -283,
+ -283, -283, -282, -282, -282, -281, -280, -281,
+ -279, -280, -280, -278, -279, -278, -278, -277,
+ -278, -276, -276, -277, -275, -276, -274, -275,
+ -274, -273, -273, -272, -273, -272, -272, -271,
+ -270, -270, -269, -269, -269, -268, -268, -267,
+ -267, -266, -266, -266, -265, -265, -264, -264,
+ -263, -263, -262, -262, -261, -261, -260, -260,
+ -259, -259, -258, -258, -257, -257, -256, -256,
+ -256, -255, -254, -254, -253, -253, -252, -252,
+ -251, -251, -250, -250, -249, -249, -248, -248,
+ -247, -247, -246, -246, -245, -245, -244, -244,
+ -243, -242, -242, -241, -241, -240, -239, -239,
+ -239, -238, -238, -237, -237, -235, -235, -235,
+ -234, -234, -232, -233, -232, -232, -231, -229,
+ -230, -229, -228, -228, -227, -226, -227, -225,
+ -224, -225, -223, -223, -222, -222, -221, -221,
+ -220, -219, -219, -218, -218, -216, -217, -216,
+ -215, -215, -214, -213, -212, -213, -211, -211,
+ -210, -210, -209, -209, -208, -206, -207, -206,
+ -205, -204, -204, -204, -203, -202, -202, -200,
+ -200, -200, -200, -198, -197, -197, -196, -195,
+ -195, -195, -194, -194, -192, -192, -191, -191,
+ -189, -189, -188, -188, -187, -186, -186, -186,
+ -185, -185, -183, -183, -182, -182, -181, -181,
+ -180, -178, -178, -177, -177, -176, -176, -174,
+ -174, -173, -173, -172, -172, -172, -170, -170,
+ -168, -168, -167, -167, -167, -165, -165, -164,
+ -164, -164, -162, -162, -161, -160, -160, -158,
+ -158, -158, -157, -156, -155, -155, -154, -153,
+ -153, -152, -151, -151, -150, -149, -149, -148,
+ -147, -147, -146, -146, -144, -144, -144, -142,
+ -142, -141, -142, -140, -140, -139, -138, -138,
+ -137, -136, -136, -134, -134, -133, -134, -132,
+ -132, -131, -130, -130, -128, -128, -128, -127,
+ -127, -126, -124, -124, -124, -123, -123, -122,
+ -121, -120, -120, -119, -118, -118, -117, -117,
+ -116, -115, -115, -115, -114, -113, -111, -111,
+ -110, -110, -109, -109, -108, -107, -107, -106,
+ -105, -104, -104, -103, -102, -103, -102, -101,
+ -101, -100, -99, -99, -98, -97, -97, -96,
+ -96, -95, -94, -94, -93, -92, -92, -91,
+ -91, -90, -89, -88, -88, -88, -87, -86,
+ -85, -86, -84, -84, -83, -82, -82, -81,
+ -81, -80, -80, -78, -79, -77, -77, -77,
+ -76, -76, -75, -74, -74, -73, -72, -72,
+ -72, -71, -70, -70, -69, -68, -68, -68,
+ -66, -67, -66, -65, -65, -65, -63, -63,
+ -62, -62, -61, -61, -60, -60, -60, -58,
+ -58, -58, -56, -56, -56, -55, -54, -55,
+ -54, -54, -53, -52, -51, -51, -51, -50,
+ -49, -49, -49, -49, -48, -47, -46, -46,
+ -46, -46, -45, -43, -43, -43, -43, -42,
+ -42, -42, -40, -40, -40, -39, -39, -38,
+ -38, -38, -37, -37, -36, -36, -35, -35,
+ -34, -35, -34, -33, -33, -32, -32, -31,
+ -31, -31, -30, -29, -29, -29, -28, -27,
+ -28, -28, -27, -26, -26, -25, -25, -25,
+ -24, -24, -24, -23, -23, -22, -22, -22,
+ -21, -21, -20, -20, -20, -20, -19, -18,
+ -19, -18, -18, -17, -18, -17, -16, -17,
+ -16, -15, -15, -15, -14, -14, -15, -13,
+ -13, -13, -13, -12, -12, -11, -12, -11,
+ -12, -10, -10, -10, -10, -10, -9, -10,
+ -9, -9, -9, -8, -8, -7, -8, -7,
+ -7, -7, -6, -6, -6, -7, -6, -6,
+ -5, -5, -5, -5, -5, -4, -4, -5,
+ -4, -4, -3, -3, -3, -3, -3, -2,
+ -3, -2, -2, -2, -1, -2, -1, -2,
+ -1, -1, -1, -1, -1, 0, -1, 0,
+ -1, -1, 0, 0, -1, 0, 0, -1,
+ 1, 1, 0, 0, 0, 1, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ }
+};
+#else /* defined(CONFIG_CSI2_PLUS) */
+static const int zoom_table[4][HRT_GDC_N] = {
+ { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4,
+ -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4, -7<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4
+ },
+ { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4,
+ 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4,
+ 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4,
+ 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4,
+ 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4,
+ 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4,
+ 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4,
+ 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4,
+ 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4,
+ 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4,
+ 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4,
+ 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4,
+ 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4,
+ 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4,
+ 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4,
+ 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4,
+ 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4,
+ 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4,
+ 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4,
+ 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4,
+ 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4,
+ 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4,
+ 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4,
+ 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4,
+ 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4,
+ 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4,
+ 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4,
+ 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4,
+ 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4,
+ 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4,
+ 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4,
+ 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4,
+ 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4,
+ 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4,
+ 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4,
+ 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4,
+ 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4,
+ 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4,
+ 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4,
+ 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4,
+ 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4,
+ 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4,
+ 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4,
+ 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4,
+ 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4,
+ 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4,
+ 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4,
+ 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4,
+ 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4,
+ 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4,
+ 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4,
+ 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4,
+ 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4,
+ 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4,
+ 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4,
+ 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4,
+ 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4,
+ 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4,
+ 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4,
+ 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4,
+ 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4,
+ 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4,
+ 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4,
+ 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4,
+ 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4,
+ 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4,
+ 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4,
+ 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4,
+ 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4,
+ 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4,
+ 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4,
+ 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4,
+ 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4,
+ 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4,
+ 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4,
+ 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4,
+ 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4,
+ 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4,
+ 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4,
+ 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4,
+ 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4,
+ 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4,
+ 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4,
+ 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4,
+ 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4,
+ 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4,
+ 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4,
+ 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4,
+ 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4,
+ 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4,
+ 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4,
+ 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4,
+ 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4,
+ 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4,
+ 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4,
+ 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4,
+ 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4,
+ 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4,
+ 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4,
+ 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4,
+ 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4,
+ 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4,
+ 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4,
+ 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4,
+ 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4,
+ 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4,
+ 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4,
+ 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4,
+ 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4,
+ 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4,
+ 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4,
+ 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4,
+ 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4,
+ 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4,
+ 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4,
+ 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4,
+ 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4,
+ 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4,
+ 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4,
+ 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4,
+ 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4,
+ 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4
+ },
+ { 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4,
+ 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4, 256<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4, 255<<4,
+ 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4,
+ 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4, 254<<4,
+ 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4,
+ 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4, 253<<4,
+ 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4,
+ 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4, 252<<4,
+ 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4,
+ 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4, 250<<4,
+ 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4,
+ 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4, 248<<4,
+ 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4,
+ 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4, 246<<4,
+ 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4,
+ 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4, 244<<4,
+ 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4,
+ 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4, 241<<4,
+ 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4,
+ 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4, 239<<4,
+ 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4,
+ 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4, 236<<4,
+ 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4,
+ 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4, 232<<4,
+ 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4,
+ 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4, 229<<4,
+ 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4,
+ 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4, 225<<4,
+ 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4,
+ 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4, 222<<4,
+ 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4,
+ 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4, 218<<4,
+ 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4,
+ 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4, 213<<4,
+ 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4,
+ 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4, 209<<4,
+ 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4,
+ 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4, 205<<4,
+ 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4,
+ 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4, 200<<4,
+ 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4,
+ 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4, 195<<4,
+ 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4,
+ 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4, 191<<4,
+ 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4,
+ 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4, 186<<4,
+ 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4,
+ 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4, 181<<4,
+ 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4,
+ 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4, 176<<4,
+ 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4,
+ 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4, 170<<4,
+ 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4,
+ 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4, 165<<4,
+ 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4,
+ 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4, 160<<4,
+ 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4,
+ 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4, 154<<4,
+ 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4,
+ 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4, 149<<4,
+ 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4,
+ 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4, 144<<4,
+ 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4,
+ 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4, 138<<4,
+ 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4,
+ 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4, 132<<4,
+ 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4,
+ 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4, 127<<4,
+ 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4,
+ 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4, 121<<4,
+ 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4,
+ 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4, 116<<4,
+ 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4,
+ 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4, 110<<4,
+ 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4,
+ 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4, 105<<4,
+ 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4,
+ 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4, 99<<4,
+ 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4,
+ 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4, 94<<4,
+ 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4,
+ 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4, 88<<4,
+ 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4,
+ 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4, 83<<4,
+ 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4,
+ 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4, 78<<4,
+ 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4,
+ 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4, 73<<4,
+ 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4,
+ 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4, 67<<4,
+ 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4,
+ 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4, 62<<4,
+ 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4,
+ 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4, 58<<4,
+ 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4,
+ 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4, 53<<4,
+ 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4,
+ 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4, 48<<4,
+ 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4,
+ 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4, 43<<4,
+ 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4,
+ 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4, 39<<4,
+ 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4,
+ 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4, 35<<4,
+ 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4,
+ 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4, 31<<4,
+ 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4,
+ 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4, 27<<4,
+ 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4,
+ 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4, 23<<4,
+ 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4,
+ 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4, 19<<4,
+ 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4,
+ 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4, 16<<4,
+ 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4,
+ 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4, 12<<4,
+ 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4,
+ 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4, 9<<4,
+ 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4,
+ 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4, 7<<4,
+ 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4,
+ 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4, 4<<4,
+ 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4,
+ 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4, 2<<4
+ },
+ { 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4,
+ -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4, -10<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4, -19<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4, -18<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4, -17<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4, -16<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4, -15<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4, -14<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4, -13<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4, -12<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4, -11<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4, -9<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4, -8<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4, -6<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4, -5<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4, -4<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4, -3<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4, -2<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4, -1<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4,
+ 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4, 1<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4,
+ 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4, 0<<4
+ }
+};
+#endif
+#else
+#error "sh_css_params.c: GDC version must be \
+ one of {GDC_VERSION_2}"
+#endif
+
+static const struct ia_css_dz_config default_dz_config = {
+ HRT_GDC_N,
+ HRT_GDC_N,
+ { \
+ {0, 0}, \
+ {0, 0}, \
+ }
+};
+
+static const struct ia_css_vector default_motion_config = {
+ 0,
+ 0
+};
+
+/* ------ deprecated(bz675) : from ------ */
+static const struct ia_css_shading_settings default_shading_settings = {
+ 1 /* enable shading table conversion in the css
+ (This matches the legacy way.) */
+};
+/* ------ deprecated(bz675) : to ------ */
+
+struct ia_css_isp_skc_dvs_statistics {
+ ia_css_ptr p_data;
+};
+
+static enum ia_css_err
+ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out);
+
+static enum ia_css_err
+write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ hrt_vaddress *out);
+
+static enum ia_css_err
+free_ia_css_isp_parameter_set_info(hrt_vaddress ptr);
+
+static enum ia_css_err
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size);
+
+static enum ia_css_err
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out);
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in);
+
+static enum ia_css_err
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in);
+
+static enum ia_css_err
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+static enum ia_css_err
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe);
+#endif
+
+static enum ia_css_err
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom);
+
+hrt_vaddress
+sh_css_params_ddr_address_map(void)
+{
+ return sp_ddr_ptrs;
+}
+
+/* ****************************************************
+ * Each coefficient is stored as 7bits to fit 2 of them into one
+ * ISP vector element, so we will store 4 coefficents on every
+ * memory word (32bits)
+ *
+ * 0: Coefficient 0 used bits
+ * 1: Coefficient 1 used bits
+ * 2: Coefficient 2 used bits
+ * 3: Coefficient 3 used bits
+ * x: not used
+ *
+ * xx33333332222222 | xx11111110000000
+ *
+ * ***************************************************
+ */
+static struct ia_css_host_data *
+convert_allocate_fpntbl(struct ia_css_isp_parameters *params)
+{
+ unsigned int i, j;
+ short *data_ptr;
+ struct ia_css_host_data *me;
+ unsigned int isp_format_data_size;
+ uint32_t *isp_format_data_ptr;
+
+ assert(params != NULL);
+
+ data_ptr = params->fpn_config.data;
+ isp_format_data_size = params->fpn_config.height * params->fpn_config.width * sizeof(uint32_t);
+
+ me = ia_css_host_data_allocate(isp_format_data_size);
+
+ if (!me)
+ return NULL;
+
+ isp_format_data_ptr = (uint32_t *)me->address;
+
+ for (i = 0; i < params->fpn_config.height; i++) {
+ for (j = 0;
+ j < params->fpn_config.width;
+ j += 4, data_ptr += 4, isp_format_data_ptr++) {
+ int data = data_ptr[0] << 0 |
+ data_ptr[1] << 7 |
+ data_ptr[2] << 16 |
+ data_ptr[3] << 23;
+ *isp_format_data_ptr = data;
+ }
+ }
+ return me;
+}
+
+static enum ia_css_err
+store_fpntbl(struct ia_css_isp_parameters *params, hrt_vaddress ptr)
+{
+ struct ia_css_host_data *isp_data;
+
+ assert(params != NULL);
+ assert(ptr != mmgr_NULL);
+
+ isp_data = convert_allocate_fpntbl(params);
+ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ ia_css_params_store_ia_css_host_data(ptr, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return IA_CSS_SUCCESS;
+}
+
+static void
+convert_raw_to_fpn(struct ia_css_isp_parameters *params)
+{
+ int maxval = 0;
+ unsigned int i;
+
+ assert(params != NULL);
+
+ /* Find the maximum value in the table */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) {
+ int val = params->fpn_config.data[i];
+ /* Make sure FPN value can be represented in 13-bit unsigned
+ * number (ISP precision - 1), but note that actual input range
+ * depends on precision of input frame data.
+ */
+ if (val < 0) {
+/* Checkpatch patch */
+ val = 0;
+ } else if (val >= (1 << 13)) {
+/* Checkpatch patch */
+/* MW: BUG, is "13" a system or application property */
+ val = (1 << 13) - 1;
+ }
+ maxval = max(maxval, val);
+ }
+ /* Find the lowest shift value to remap the values in the range
+ * 0..maxval to 0..2^shiftval*63.
+ */
+ params->fpn_config.shift = 0;
+ while (maxval > 63) {
+/* MW: BUG, is "63" a system or application property */
+ maxval >>= 1;
+ params->fpn_config.shift++;
+ }
+ /* Adjust the values in the table for the shift value */
+ for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++)
+ ((unsigned short *) params->fpn_config.data)[i] >>= params->fpn_config.shift;
+}
+
+static void
+ia_css_process_kernel(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ void (*process)(unsigned pipe_id,
+ const struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params))
+{
+ int i;
+ for (i = 0; i < stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe = stream->pipes[i];
+ struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
+ struct ia_css_pipeline_stage *stage;
+
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ if (!stage || !stage->binary) continue;
+ process(pipeline->pipe_id, stage, params);
+ }
+ }
+}
+
+static enum ia_css_err
+sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe, bool *is_dp_10bpp) {
+
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ /* Currently we check if 10bpp DPC configuration is required based
+ * on the use case,i.e. if BDS and DPC is both enabled. The more cleaner
+ * design choice would be to expose the type of DPC (either 10bpp or 13bpp)
+ * using the binary info, but the current control flow does not allow this
+ * implementation. (This is because the configuration is set before a
+ * binary is selected, and the binary info is not available)
+ */
+ if((pipe == NULL) || (is_dp_10bpp == NULL)) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INTERNAL_ERROR);
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ } else {
+ *is_dp_10bpp = false;
+
+ /* check if DPC is enabled from the host */
+ if (pipe->config.enable_dpc) {
+ /*check if BDS is enabled*/
+ unsigned int required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
+ if ((pipe->config.bayer_ds_out_res.width != 0) &&
+ (pipe->config.bayer_ds_out_res.height != 0)) {
+ if (IA_CSS_SUCCESS == binarydesc_calculate_bds_factor(
+ pipe->config.input_effective_res,
+ pipe->config.bayer_ds_out_res,
+ &required_bds_factor)) {
+ if (SH_CSS_BDS_FACTOR_1_00 != required_bds_factor) {
+ /*we use 10bpp BDS configuration*/
+ *is_dp_10bpp = true;
+ }
+ }
+ }
+ }
+ }
+
+ return err;
+}
+
+enum ia_css_err
+sh_css_set_black_frame(struct ia_css_stream *stream,
+ const struct ia_css_frame *raw_black_frame)
+{
+ struct ia_css_isp_parameters *params;
+ /* this function desperately needs to be moved to the ISP or SP such
+ * that it can use the DMA.
+ */
+ unsigned int height, width, y, x, k, data;
+ hrt_vaddress ptr;
+
+ assert(stream != NULL);
+ assert(raw_black_frame != NULL);
+
+ params = stream->isp_params_configs;
+ height = raw_black_frame->info.res.height;
+ width = raw_black_frame->info.padded_width,
+
+ ptr = raw_black_frame->data
+ + raw_black_frame->planes.raw.offset;
+
+ IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame);
+
+ if (params->fpn_config.data &&
+ (params->fpn_config.width != width || params->fpn_config.height != height)) {
+ sh_css_free(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+ if (params->fpn_config.data == NULL) {
+ params->fpn_config.data = sh_css_malloc(height * width * sizeof(short));
+ if (!params->fpn_config.data) {
+ IA_CSS_ERROR("out of memory");
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ params->fpn_config.width = width;
+ params->fpn_config.height = height;
+ params->fpn_config.shift = 0;
+ }
+
+ /* store raw to fpntbl */
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) {
+ int ofs = y * width + x;
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ mmgr_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k] =
+ (short) (data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 2] =
+ (short) ((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
+ mmgr_load(ptr, (void *)(&data), sizeof(int));
+ params->fpn_config.data[ofs + 2 * k + 1] =
+ (short) (data & 0xFFFF);
+ params->fpn_config.data[ofs + 2 * k + 3] =
+ (short) ((data >> 16) & 0xFFFF);
+ ptr += sizeof(int); /* byte system address */
+ }
+ }
+ }
+
+ /* raw -> fpn */
+ convert_raw_to_fpn(params);
+
+ /* overwrite isp parameter */
+ ia_css_process_kernel(stream, params, ia_css_kernel_process_param[IA_CSS_FPN_ID]);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+bool
+sh_css_params_set_binning_factor(struct ia_css_stream *stream, unsigned int binning_fact)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream != NULL);
+
+ params = stream->isp_params_configs;
+
+ if (params->sensor_binning != binning_fact) {
+ params->sensor_binning = binning_fact;
+ params->sc_table_changed = true;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return params->sc_table_changed;
+}
+
+static void
+sh_css_update_shading_table_status(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params)
+{
+ if (params && pipe && (pipe->pipe_num != params->sc_table_last_pipe_num)) {
+ params->sc_table_dirty = true;
+ params->sc_table_last_pipe_num = pipe->pipe_num;
+ }
+}
+
+static void
+sh_css_set_shading_table(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_shading_table *table)
+{
+ IA_CSS_ENTER_PRIVATE("");
+ if (table == NULL)
+ return;
+ assert(stream != NULL);
+
+ if (!table->enable)
+ table = NULL;
+
+ if ((table != params->sc_table) || params->sc_table_dirty) {
+ params->sc_table = table;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ /* Not very clean, this goes to sh_css.c to invalidate the
+ * shading table for all pipes. Should replaced by a loop
+ * and a pipe-specific call.
+ */
+ if (!params->output_frame)
+ sh_css_invalidate_shading_tables(stream);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_params_store_ia_css_host_data(
+ hrt_vaddress ddr_addr,
+ struct ia_css_host_data *data)
+{
+ assert(data != NULL);
+ assert(data->address != NULL);
+ assert(ddr_addr != mmgr_NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ mmgr_store(ddr_addr,
+ (void *)(data->address),
+ (size_t)data->size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table)
+{
+ const struct ia_css_binary *binary = stage->binary;
+ struct ia_css_host_data *sctbl;
+ unsigned int i, j, aligned_width, row_padding;
+ unsigned int sctbl_size;
+ short int *ptr;
+
+ assert(binary != NULL);
+ assert(shading_table != NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (shading_table == NULL) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return NULL;
+ }
+
+ aligned_width = binary->sctbl_aligned_width_per_color;
+ row_padding = aligned_width - shading_table->width;
+ sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width * sizeof(short);
+
+ sctbl = ia_css_host_data_allocate((size_t)sctbl_size);
+
+ if (!sctbl)
+ return NULL;
+ ptr = (short int*)sctbl->address;
+ memset(ptr,
+ 0,
+ sctbl_size);
+
+ for (i = 0; i < shading_table->height; i++) {
+ for (j = 0; j < IA_CSS_SC_NUM_COLORS; j++) {
+ memcpy(ptr,
+ &shading_table->data[j]
+ [i*shading_table->width],
+ shading_table->width * sizeof(short));
+ ptr += aligned_width;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return sctbl;
+}
+
+enum ia_css_err ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ hrt_vaddress sc_tbl,
+ const struct ia_css_shading_table *sc_config)
+{
+ struct ia_css_host_data *isp_sc_tbl;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ if (sc_config == NULL) {
+ IA_CSS_LEAVE_PRIVATE("void");
+ return IA_CSS_SUCCESS;
+ }
+
+ isp_sc_tbl = ia_css_params_alloc_convert_sctbl(stage, sc_config);
+ if (!isp_sc_tbl) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ /* store the shading table to ddr */
+ ia_css_params_store_ia_css_host_data(sc_tbl, isp_sc_tbl);
+ ia_css_host_data_free(isp_sc_tbl);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+sh_css_enable_pipeline(const struct ia_css_binary *binary)
+{
+ if (!binary)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ ia_css_isp_param_enable_pipeline(&binary->mem_params);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static enum ia_css_err
+ia_css_process_zoom_and_motion(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *first_stage)
+{
+ /* first_stage can be NULL */
+ const struct ia_css_pipeline_stage *stage;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_resolution pipe_in_res;
+ pipe_in_res.width = 0;
+ pipe_in_res.height = 0;
+
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ /* Go through all stages to udate uds and cropping */
+ for (stage = first_stage; stage; stage = stage->next) {
+
+ struct ia_css_binary *binary;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+
+ const struct ia_css_binary_xinfo *info = NULL;
+
+ binary = stage->binary;
+ if (binary) {
+ info = binary->info;
+ } else {
+ const struct sh_css_binary_args *args = &stage->args;
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+ if (args->out_frame[0])
+ out_infos[0] = &args->out_frame[0]->info;
+ info = &stage->firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ IA_CSS_STREAM_FORMAT_RAW_10,
+ args->in_frame ? &args->in_frame->info : NULL,
+ NULL,
+ out_infos,
+ args->out_vf_frame ? &args->out_vf_frame->info
+ : NULL,
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ }
+
+ if (stage == first_stage) {
+ /* we will use pipe_in_res to scale the zoom crop region if needed */
+ pipe_in_res = binary->effective_in_frame_res;
+ }
+
+ assert(stage->stage_num < SH_CSS_MAX_STAGES);
+ if (params->dz_config.zoom_region.resolution.width == 0 &&
+ params->dz_config.zoom_region.resolution.height == 0) {
+ sh_css_update_uds_and_crop_info(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ stage->enable_zoom);
+ } else {
+ err = sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ &info->sp,
+ &binary->in_frame_info,
+ &binary->out_frame_info[0],
+ &binary->dvs_envelope,
+ &params->dz_config,
+ &params->motion_config,
+ &params->uds[stage->stage_num].uds,
+ &params->uds[stage->stage_num].crop_pos,
+ pipe_in_res,
+ stage->enable_zoom);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ }
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+static void
+sh_css_set_gamma_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_gamma_table *table)
+{
+ if (table == NULL)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ params->gc_table = *table;
+ params->config_changed[IA_CSS_GC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_gamma_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_gamma_table *table)
+{
+ if (table == NULL)
+ return;
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ *table = params->gc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ctc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_ctc_table *table)
+{
+ if (table == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ params->ctc_table = *table;
+ params->config_changed[IA_CSS_CTC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ctc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_ctc_table *table)
+{
+ if (table == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ *table = params->ctc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_macc_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_macc_table *table)
+{
+ if (table == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ params->macc_table = *table;
+ params->config_changed[IA_CSS_MACC_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_macc_table(const struct ia_css_isp_parameters *params,
+ struct ia_css_macc_table *table)
+{
+ if (table == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ *table = params->macc_table;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_morph_table_free(
+ struct ia_css_morph_table *me)
+{
+
+ unsigned int i;
+
+ if (me == NULL)
+ return;
+
+ IA_CSS_ENTER("");
+
+
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ if (me->coordinates_x[i]) {
+ sh_css_free(me->coordinates_x[i]);
+ me->coordinates_x[i] = NULL;
+ }
+ if (me->coordinates_y[i]) {
+ sh_css_free(me->coordinates_y[i]);
+ me->coordinates_y[i] = NULL;
+ }
+ }
+
+ sh_css_free(me);
+ IA_CSS_LEAVE("void");
+
+}
+
+
+struct ia_css_morph_table *ia_css_morph_table_allocate(
+ unsigned int width,
+ unsigned int height)
+{
+
+ unsigned int i;
+ struct ia_css_morph_table *me;
+
+ IA_CSS_ENTER("");
+
+ me = sh_css_malloc(sizeof(*me));
+ if (me == NULL) {
+ IA_CSS_ERROR("out of memory");
+ return me;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] = NULL;
+ me->coordinates_y[i] = NULL;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ me->coordinates_x[i] =
+ sh_css_malloc(height * width *
+ sizeof(*me->coordinates_x[i]));
+ me->coordinates_y[i] =
+ sh_css_malloc(height * width *
+ sizeof(*me->coordinates_y[i]));
+
+ if ((me->coordinates_x[i] == NULL) ||
+ (me->coordinates_y[i] == NULL)) {
+ ia_css_morph_table_free(me);
+ me = NULL;
+ return me;
+ }
+ }
+ me->width = width;
+ me->height = height;
+ IA_CSS_LEAVE("");
+ return me;
+
+}
+
+
+static enum ia_css_err sh_css_params_default_morph_table(
+ struct ia_css_morph_table **table,
+ const struct ia_css_binary *binary)
+{
+ /* MW 2400 advanced requires different scaling */
+ unsigned int i, j, k, step, width, height;
+ short start_x[IA_CSS_MORPH_TABLE_NUM_PLANES] = { -8, 0, -8, 0, 0, -8 },
+ start_y[IA_CSS_MORPH_TABLE_NUM_PLANES] = { 0, 0, -8, -8, -8, 0 };
+ struct ia_css_morph_table *tab;
+
+ assert(table != NULL);
+ assert(binary != NULL);
+
+ IA_CSS_ENTER_PRIVATE("");
+
+ step = (ISP_VEC_NELEMS / 16) * 128,
+ width = binary->morph_tbl_width,
+ height = binary->morph_tbl_height;
+
+ tab = ia_css_morph_table_allocate(width, height);
+ if (tab == NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ short val_y = start_y[i];
+ for (j = 0; j < height; j++) {
+ short val_x = start_x[i];
+ unsigned short *x_ptr, *y_ptr;
+
+ x_ptr = &tab->coordinates_x[i][j * width];
+ y_ptr = &tab->coordinates_y[i][j * width];
+ for (k = 0; k < width;
+ k++, x_ptr++, y_ptr++, val_x += (short)step) {
+ if (k == 0)
+ *x_ptr = 0;
+ else if (k == width - 1)
+ *x_ptr = val_x + 2 * start_x[i];
+ else
+ *x_ptr = val_x;
+ if (j == 0)
+ *y_ptr = 0;
+ else
+ *y_ptr = val_y;
+ }
+ val_y += (short)step;
+ }
+ }
+ *table = tab;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+
+ return IA_CSS_SUCCESS;
+}
+
+static void
+sh_css_set_morph_table(struct ia_css_isp_parameters *params,
+ const struct ia_css_morph_table *table)
+{
+ if (table == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("table=%p", table);
+
+ assert(params != NULL);
+ if (table->enable == false)
+ table = NULL;
+ params->morph_table = table;
+ params->morph_table_changed = true;
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_translate_3a_statistics(
+ struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics_map *isp_stats)
+{
+ IA_CSS_ENTER("");
+ if (host_stats->grid.use_dmem) {
+ IA_CSS_LOG("3A: DMEM");
+ ia_css_s3a_dmem_decode(host_stats, isp_stats->dmem_stats);
+ } else {
+ IA_CSS_LOG("3A: VMEM");
+ ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi,
+ isp_stats->vmem_stats_lo);
+ }
+#if !defined(HAS_NO_HMEM)
+ IA_CSS_LOG("3A: HMEM");
+ ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats);
+#endif
+
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me)
+{
+ if (me) {
+ if (me->data_allocated) {
+ sh_css_free(me->data_ptr);
+ me->data_ptr = NULL;
+ me->data_allocated = false;
+ }
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_isp_3a_statistics_map *
+ia_css_isp_3a_statistics_map_allocate(
+ const struct ia_css_isp_3a_statistics *isp_stats,
+ void *data_ptr)
+{
+ struct ia_css_isp_3a_statistics_map *me;
+ /* Windows compiler does not like adding sizes to a void *
+ * so we use a local char * instead. */
+ char *base_ptr;
+
+ me = sh_css_malloc(sizeof(*me));
+ if (!me) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+
+ me->data_ptr = data_ptr;
+ me->data_allocated = data_ptr == NULL;
+ if (!data_ptr) {
+ me->data_ptr = sh_css_malloc(isp_stats->size);
+ if (!me->data_ptr) {
+ IA_CSS_LEAVE("cannot allocate memory");
+ goto err;
+ }
+ }
+ base_ptr = me->data_ptr;
+
+ me->size = isp_stats->size;
+ /* GCC complains when we assign a char * to a void *, so these
+ * casts are necessary unfortunately. */
+ me->dmem_stats = (void *)base_ptr;
+ me->vmem_stats_hi = (void *)(base_ptr + isp_stats->dmem_size);
+ me->vmem_stats_lo = (void *)(base_ptr + isp_stats->dmem_size +
+ isp_stats->vmem_size);
+ me->hmem_stats = (void *)(base_ptr + isp_stats->dmem_size +
+ 2 * isp_stats->vmem_size);
+
+ IA_CSS_LEAVE("map=%p", me);
+ return me;
+
+err:
+ if (me)
+ sh_css_free(me);
+ return NULL;
+
+}
+
+enum ia_css_err
+ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
+ const struct ia_css_isp_3a_statistics *isp_stats)
+{
+ struct ia_css_isp_3a_statistics_map *map;
+ enum ia_css_err ret = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
+
+ assert(host_stats != NULL);
+ assert(isp_stats != NULL);
+
+ map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
+ if (map) {
+ mmgr_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
+ ia_css_translate_3a_statistics(host_stats, map);
+ ia_css_isp_3a_statistics_map_free(map);
+ } else {
+ IA_CSS_ERROR("out of memory");
+ ret = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ IA_CSS_LEAVE_ERR(ret);
+ return ret;
+}
+
+/* Parameter encoding is not yet orthogonal.
+ This function hnadles some of the exceptions.
+*/
+static void
+ia_css_set_param_exceptions(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params)
+{
+ assert(params != NULL);
+
+ /* Copy also to DP. Should be done by the driver. */
+ params->dp_config.gr = params->wb_config.gr;
+ params->dp_config.r = params->wb_config.r;
+ params->dp_config.b = params->wb_config.b;
+ params->dp_config.gb = params->wb_config.gb;
+#ifdef ISP2401
+ assert(pipe != NULL);
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
+ params->pipe_dp_config[pipe->mode].gr = params->wb_config.gr;
+ params->pipe_dp_config[pipe->mode].r = params->wb_config.r;
+ params->pipe_dp_config[pipe->mode].b = params->wb_config.b;
+ params->pipe_dp_config[pipe->mode].gb = params->wb_config.gb;
+ }
+#endif
+}
+
+#ifdef ISP2401
+static void
+sh_css_set_dp_config(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dp_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ assert(pipe != NULL);
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+ ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ if (pipe->mode < IA_CSS_PIPE_ID_NUM) {
+ params->pipe_dp_config[pipe->mode] = *config;
+ params->pipe_dpc_config_changed[pipe->mode] = true;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+#endif
+
+static void
+sh_css_get_dp_config(const struct ia_css_pipe *pipe,
+ const struct ia_css_isp_parameters *params,
+ struct ia_css_dp_config *config)
+{
+ if (config == NULL)
+ return;
+
+ assert(params != NULL);
+ assert(pipe != NULL);
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->pipe_dp_config[pipe->mode];
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_nr_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_nr_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ params->nr_config = *config;
+ params->yee_config.nr = *config;
+ params->config_changed[IA_CSS_NR_ID] = true;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+ params->config_changed[IA_CSS_BNR_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_ee_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_ee_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+
+ params->ee_config = *config;
+ params->yee_config.ee = *config;
+ params->config_changed[IA_CSS_YEE_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_ee_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_ee_config *config)
+{
+ if (config == NULL)
+ return;
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ assert(params != NULL);
+ *config = params->ee_config;
+
+ ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (dvs_config == NULL)
+ return;
+ assert(params != NULL);
+ assert(pipe != NULL);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * (dvs_config->width_uv - 1));
+ assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config);
+
+#if !defined(HAS_NO_DVS_6AXIS_CONFIG_UPDATE)
+ params->pipe_dvs_6axis_config_changed[pipe->mode] = true;
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
+ const struct ia_css_isp_parameters *params,
+ struct ia_css_dvs_6axis_config *dvs_config)
+{
+ if (dvs_config == NULL)
+ return;
+ assert(params != NULL);
+ assert(pipe != NULL);
+ assert(dvs_config->height_y == dvs_config->height_uv);
+ assert((dvs_config->width_y - 1) == 2 * dvs_config->width_uv - 1);
+
+ IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
+
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (dvs_config->width_y == params->pipe_dvs_6axis_config[pipe->mode]->width_y) &&
+ (dvs_config->height_y == params->pipe_dvs_6axis_config[pipe->mode]->height_y) &&
+ (dvs_config->width_uv == params->pipe_dvs_6axis_config[pipe->mode]->width_uv) &&
+ (dvs_config->height_uv == params->pipe_dvs_6axis_config[pipe->mode]->height_uv) &&
+ dvs_config->xcoords_y &&
+ dvs_config->ycoords_y &&
+ dvs_config->xcoords_uv &&
+ dvs_config->ycoords_uv)
+ {
+ copy_dvs_6axis_table(dvs_config, params->pipe_dvs_6axis_config[pipe->mode]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_baa_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_aa_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ params->bds_config = *config;
+ params->config_changed[IA_CSS_BDS_ID] = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_baa_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_aa_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->bds_config;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_set_dz_config(struct ia_css_isp_parameters *params,
+ const struct ia_css_dz_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+
+ assert(config->dx <= HRT_GDC_N);
+ assert(config->dy <= HRT_GDC_N);
+
+ params->dz_config = *config;
+ params->dz_config_changed = true;
+ /* JK: Why isp params changed?? */
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_dz_config(const struct ia_css_isp_parameters *params,
+ struct ia_css_dz_config *config)
+{
+ if (config == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("config=%p", config);
+
+ *config = params->dz_config;
+
+ IA_CSS_LEAVE_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
+}
+
+static void
+sh_css_set_motion_vector(struct ia_css_isp_parameters *params,
+ const struct ia_css_vector *motion)
+{
+ if (motion == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+
+ params->motion_config = *motion;
+ /* JK: Why do isp params change? */
+ params->motion_config_changed = true;
+ params->isp_params_changed = true;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+sh_css_get_motion_vector(const struct ia_css_isp_parameters *params,
+ struct ia_css_vector *motion)
+{
+ if (motion == NULL)
+ return;
+ assert(params != NULL);
+
+ IA_CSS_ENTER_PRIVATE("motion=%p", motion);
+
+ *motion = params->motion_config;
+
+ IA_CSS_LEAVE_PRIVATE("x=%d, y=%d", motion->x, motion->y);
+}
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe)
+{
+ if (pipe == NULL)
+ {
+ IA_CSS_ERROR("pipe=%p", NULL);
+ return NULL;
+ }
+ return pipe->config.p_isp_config;
+}
+
+enum ia_css_err
+ia_css_stream_set_isp_config(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config)
+{
+ return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL);
+}
+
+enum ia_css_err
+ia_css_stream_set_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ if ((stream == NULL) || (config == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe);
+ else
+#endif
+ err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe);
+
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+enum ia_css_err
+ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config)
+{
+ struct ia_css_pipe *pipe_in = pipe;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER("pipe=%p", pipe);
+
+ if ((pipe == NULL) || (pipe->stream == NULL))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ if (config->output_frame)
+ err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe);
+ else
+#endif
+ err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in);
+ IA_CSS_LEAVE_ERR(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_set_global_isp_config_on_pipe(
+ struct ia_css_pipe *curr_pipe,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_err err1 = IA_CSS_SUCCESS;
+ enum ia_css_err err2 = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", curr_pipe, config, pipe);
+
+ err1 = sh_css_init_isp_params_from_config(curr_pipe, curr_pipe->stream->isp_params_configs, config, pipe);
+
+ /* Now commit all changes to the SP */
+ err2 = sh_css_param_update_isp_params(curr_pipe, curr_pipe->stream->isp_params_configs, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_isp_params_from_config interface
+ * throws an error when both DPC and BDS is enabled. The CSS API must pass this error
+ * information to the caller, ie. the host. We do not return this error immediately,
+ * but instead continue with updating the ISP params to enable testing of features
+ * which are currently in TR phase. */
+
+ err = (err1 != IA_CSS_SUCCESS ) ? err1 : ((err2 != IA_CSS_SUCCESS) ? err2 : err);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+static enum ia_css_err
+sh_css_set_per_frame_isp_config_on_pipe(
+ struct ia_css_stream *stream,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe)
+{
+ unsigned i;
+ bool per_frame_config_created = false;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ enum ia_css_err err1 = IA_CSS_SUCCESS;
+ enum ia_css_err err2 = IA_CSS_SUCCESS;
+ enum ia_css_err err3 = IA_CSS_SUCCESS;
+
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe);
+
+ if (!pipe) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ goto exit;
+ }
+
+ /* create per-frame ISP params object with default values
+ * from stream->isp_params_configs if one doesn't already exist
+ */
+ if (!stream->per_frame_isp_params_configs)
+ {
+ err = sh_css_create_isp_params(stream,
+ &stream->per_frame_isp_params_configs);
+ if(err != IA_CSS_SUCCESS)
+ goto exit;
+ per_frame_config_created = true;
+ }
+
+ params = stream->per_frame_isp_params_configs;
+
+ /* update new ISP params object with the new config */
+ if (!sh_css_init_isp_params_from_global(stream, params, false, pipe)) {
+ err1 = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe);
+
+ if (per_frame_config_created)
+ {
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+ }
+
+ /* now commit to ddr */
+ err3 = sh_css_param_update_isp_params(stream->pipes[0], params, sh_css_sp_is_running(), pipe);
+
+ /* The following code is intentional. The sh_css_init_sp_params_from_config and
+ * sh_css_init_isp_params_from_config throws an error when both DPC and BDS is enabled.
+ * The CSS API must pass this error information to the caller, ie. the host.
+ * We do not return this error immediately, but instead continue with updating the ISP params
+ * to enable testing of features which are currently in TR phase. */
+ err = (err1 != IA_CSS_SUCCESS) ? err1 :
+ (err2 != IA_CSS_SUCCESS) ? err2 :
+ (err3 != IA_CSS_SUCCESS) ? err3 : err;
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+#endif
+
+static enum ia_css_err
+sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_isp_config *config,
+ struct ia_css_pipe *pipe_in)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool is_dp_10bpp = true;
+ assert(pipe != NULL);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, config=%p, params=%p", pipe, config, params);
+
+ ia_css_set_configs(params, config);
+
+
+ sh_css_set_nr_config(params, config->nr_config);
+ sh_css_set_ee_config(params, config->ee_config);
+ sh_css_set_baa_config(params, config->baa_config);
+ if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
+ (params->pipe_dvs_6axis_config[pipe->mode]))
+ sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_set_dz_config(params, config->dz_config);
+ sh_css_set_motion_vector(params, config->motion_vector);
+ sh_css_update_shading_table_status(pipe_in, params);
+ sh_css_set_shading_table(pipe->stream, params, config->shading_table);
+ sh_css_set_morph_table(params, config->morph_table);
+ sh_css_set_macc_table(params, config->macc_table);
+ sh_css_set_gamma_table(params, config->gamma_table);
+ sh_css_set_ctc_table(params, config->ctc_table);
+/* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, config->shading_settings);
+/* ------ deprecated(bz675) : to ------ */
+
+ params->dis_coef_table_changed = (config->dvs_coefs != NULL);
+ params->dvs2_coef_table_changed = (config->dvs2_coefs != NULL);
+
+ params->output_frame = config->output_frame;
+ params->isp_parameters_id = config->isp_config_id;
+#ifdef ISP2401
+ /* Currently we do not offer CSS interface to set different
+ * configurations for DPC, i.e. depending on DPC being enabled
+ * before (NORM+OBC) or after. The folllowing code to set the
+ * DPC configuration should be updated when this interface is made
+ * available */
+ sh_css_set_dp_config(pipe, params, config->dp_config);
+ ia_css_set_param_exceptions(pipe, params);
+#endif
+
+ if (IA_CSS_SUCCESS ==
+ sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) {
+ /* return an error when both DPC and BDS is enabled by the
+ * user. */
+ /* we do not exit from this point immediately to allow internal
+ * firmware feature testing. */
+ if(is_dp_10bpp) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ } else {
+ err = IA_CSS_ERR_INTERNAL_ERROR;
+ goto exit;
+ }
+
+#ifndef ISP2401
+ ia_css_set_param_exceptions(pipe, params);
+#endif
+exit:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+void
+ia_css_stream_get_isp_config(
+ const struct ia_css_stream *stream,
+ struct ia_css_isp_config *config)
+{
+ IA_CSS_ENTER("void");
+ ia_css_pipe_get_isp_config(stream->pipes[0], config);
+ IA_CSS_LEAVE("void");
+}
+
+void
+ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
+ struct ia_css_isp_config *config)
+{
+ struct ia_css_isp_parameters *params = NULL;
+
+ assert(config != NULL);
+
+ IA_CSS_ENTER("config=%p", config);
+
+ params = pipe->stream->isp_params_configs;
+ assert(params != NULL);
+
+ ia_css_get_configs(params, config);
+
+ sh_css_get_ee_config(params, config->ee_config);
+ sh_css_get_baa_config(params, config->baa_config);
+ sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
+ sh_css_get_dp_config(pipe, params, config->dp_config);
+ sh_css_get_macc_table(params, config->macc_table);
+ sh_css_get_gamma_table(params, config->gamma_table);
+ sh_css_get_ctc_table(params, config->ctc_table);
+ sh_css_get_dz_config(params, config->dz_config);
+ sh_css_get_motion_vector(params, config->motion_vector);
+/* ------ deprecated(bz675) : from ------ */
+ sh_css_get_shading_settings(params, config->shading_settings);
+/* ------ deprecated(bz675) : to ------ */
+
+ config->output_frame = params->output_frame;
+ config->isp_config_id = params->isp_parameters_id;
+
+ IA_CSS_LEAVE("void");
+}
+
+#ifndef ISP2401
+/*
+ * coding style says the return of "mmgr_NULL" is the error signal
+ *
+ * Deprecated: Implement mmgr_realloc()
+ */
+static bool realloc_isp_css_mm_buf(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err,
+ uint16_t mmgr_attribute)
+{
+ int32_t id;
+
+ *err = IA_CSS_SUCCESS;
+ /* Possible optimization: add a function sh_css_isp_css_mm_realloc()
+ * and implement on top of hmm. */
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!force && *curr_size >= needed_size) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+ /* don't reallocate if single ref to buffer and same size */
+ if (*curr_size == needed_size && ia_css_refcount_is_single(*curr_buf)) {
+ IA_CSS_LEAVE_PRIVATE("false");
+ return false;
+ }
+
+ id = IA_CSS_REFCOUNT_PARAM_BUFFER;
+ ia_css_refcount_decrement(id, *curr_buf);
+ *curr_buf = ia_css_refcount_increment(id, mmgr_alloc_attr(needed_size,
+ mmgr_attribute));
+
+ if (!*curr_buf) {
+ *err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ *curr_size = 0;
+ } else {
+ *curr_size = needed_size;
+ }
+ IA_CSS_LEAVE_PRIVATE("true");
+ return true;
+}
+
+static bool reallocate_buffer(
+ hrt_vaddress *curr_buf,
+ size_t *curr_size,
+ size_t needed_size,
+ bool force,
+ enum ia_css_err *err)
+{
+ bool ret;
+ uint16_t mmgr_attribute = MMGR_ATTRIBUTE_DEFAULT;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ret = realloc_isp_css_mm_buf(curr_buf,
+ curr_size, needed_size, force, err, mmgr_attribute);
+
+ IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
+ return ret;
+}
+
+#endif
+
+struct ia_css_isp_3a_statistics *
+ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_isp_3a_statistics *me;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid != NULL);
+
+ /* MW: Does "grid->enable" also control the histogram output ?? */
+ if (!grid->enable)
+ return NULL;
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ if (grid->use_dmem) {
+ me->dmem_size = sizeof(struct ia_css_3a_output) *
+ grid->aligned_width *
+ grid->aligned_height;
+ } else {
+ me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES *
+ grid->aligned_height;
+ }
+#if !defined(HAS_NO_HMEM)
+ me->hmem_size = sizeof_hmem(HMEM0_ID);
+#endif
+
+ /* All subsections need to be aligned to the system bus width */
+ me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->vmem_size = CEIL_MUL(me->vmem_size, HIVE_ISP_DDR_WORD_BYTES);
+ me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES);
+
+ me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size;
+ me->data_ptr = mmgr_malloc(me->size);
+ if (me->data_ptr == mmgr_NULL) {
+ sh_css_free(me);
+ me = NULL;
+ goto err;
+ }
+ if (me->dmem_size)
+ me->data.dmem.s3a_tbl = me->data_ptr;
+ if (me->vmem_size) {
+ me->data.vmem.s3a_tbl_hi = me->data_ptr + me->dmem_size;
+ me->data.vmem.s3a_tbl_lo = me->data_ptr + me->dmem_size + me->vmem_size;
+ }
+ if (me->hmem_size)
+ me->data_hmem.rgby_tbl = me->data_ptr + me->dmem_size + 2 * me->vmem_size;
+
+
+err:
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+}
+
+void
+ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me)
+{
+ if (me != NULL) {
+ hmm_free(me->data_ptr);
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void)
+{
+ return NULL;
+}
+
+struct ia_css_metadata *
+ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info)
+{
+ struct ia_css_metadata *md = NULL;
+
+ IA_CSS_ENTER("");
+
+ if (metadata_info->size == 0)
+ return NULL;
+
+ md = sh_css_malloc(sizeof(*md));
+ if (md == NULL)
+ goto error;
+
+ md->info = *metadata_info;
+ md->exp_id = 0;
+ md->address = mmgr_malloc(metadata_info->size);
+ if (md->address == mmgr_NULL)
+ goto error;
+
+ IA_CSS_LEAVE("return=%p", md);
+ return md;
+
+error:
+ ia_css_metadata_free(md);
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_metadata_free(struct ia_css_metadata *me)
+{
+ if (me != NULL) {
+ /* The enter and leave macros are placed inside
+ * the condition to avoid false logging of metadata
+ * free events when metadata is disabled.
+ * We found this to be confusing during development
+ * and debugging. */
+ IA_CSS_ENTER("me=%p", me);
+ hmm_free(me->address);
+ sh_css_free(me);
+ IA_CSS_LEAVE("void");
+ }
+}
+
+void
+ia_css_metadata_free_multiple(unsigned int num_bufs, struct ia_css_metadata **bufs)
+{
+ unsigned int i;
+
+ if (bufs != NULL) {
+ for (i = 0; i < num_bufs; i++)
+ ia_css_metadata_free(bufs[i]);
+ }
+}
+
+unsigned g_param_buffer_dequeue_count = 0;
+unsigned g_param_buffer_enqueue_count = 0;
+
+enum ia_css_err
+ia_css_stream_isp_parameters_init(struct ia_css_stream *stream)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ struct ia_css_isp_parameters *params;
+
+ assert(stream != NULL);
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (stream == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ stream->per_frame_isp_params_configs = NULL;
+ err = sh_css_create_isp_params(stream,
+ &stream->isp_params_configs);
+ if(err != IA_CSS_SUCCESS)
+ goto ERR;
+
+ params = stream->isp_params_configs;
+ if (!sh_css_init_isp_params_from_global(stream, params, true, NULL)) {
+ /* we do not return the error immediately to enable internal
+ * firmware feature testing */
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ /* create per pipe reference to general ddr_ptrs */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ ref_sh_css_ddr_address_map(ddr_ptrs, &params->pipe_ddr_ptrs[i]);
+ params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
+ }
+
+ERR:
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static void
+ia_css_set_sdis_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs_coefficients *dvs_coefs)
+{
+ ia_css_set_sdis_horicoef_config(params, dvs_coefs);
+ ia_css_set_sdis_vertcoef_config(params, dvs_coefs);
+ ia_css_set_sdis_horiproj_config(params, dvs_coefs);
+ ia_css_set_sdis_vertproj_config(params, dvs_coefs);
+}
+
+static void
+ia_css_set_sdis2_config(
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_dvs2_coefficients *dvs2_coefs)
+{
+ ia_css_set_sdis2_horicoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertcoef_config(params, dvs2_coefs);
+ ia_css_set_sdis2_horiproj_config(params, dvs2_coefs);
+ ia_css_set_sdis2_vertproj_config(params, dvs2_coefs);
+}
+
+static enum ia_css_err
+sh_css_create_isp_params(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters **isp_params_out)
+{
+ bool succ = true;
+ unsigned i;
+ struct sh_css_ddr_address_map *ddr_ptrs;
+ struct sh_css_ddr_address_map_size *ddr_ptrs_size;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ size_t params_size;
+ struct ia_css_isp_parameters *params =
+ sh_css_malloc(sizeof(struct ia_css_isp_parameters));
+
+ if (!params)
+ {
+ *isp_params_out = NULL;
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ } else {
+ memset(params, 0, sizeof(struct ia_css_isp_parameters));
+ }
+
+ ddr_ptrs = &params->ddr_ptrs;
+ ddr_ptrs_size = &params->ddr_ptrs_size;
+
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ memset(&params->pipe_ddr_ptrs[i], 0,
+ sizeof(params->pipe_ddr_ptrs[i]));
+ memset(&params->pipe_ddr_ptrs_size[i], 0,
+ sizeof(params->pipe_ddr_ptrs_size[i]));
+ }
+
+ memset(ddr_ptrs, 0, sizeof(*ddr_ptrs));
+ memset(ddr_ptrs_size, 0, sizeof(*ddr_ptrs_size));
+
+ params_size = sizeof(params->uds);
+ ddr_ptrs_size->isp_param = params_size;
+ ddr_ptrs->isp_param =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ mmgr_malloc(params_size));
+ succ &= (ddr_ptrs->isp_param != mmgr_NULL);
+
+ ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table);
+ ddr_ptrs->macc_tbl =
+ ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
+ mmgr_malloc(sizeof(struct ia_css_macc_table)));
+ succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
+
+ *isp_params_out = params;
+ return err;
+}
+
+static bool
+sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
+ struct ia_css_isp_parameters *params,
+ bool use_default_config,
+ struct ia_css_pipe *pipe_in)
+{
+ bool retval = true;
+ int i = 0;
+ bool is_dp_10bpp = true;
+ unsigned isp_pipe_version = ia_css_pipe_get_isp_pipe_version(stream->pipes[0]);
+ struct ia_css_isp_parameters *stream_params = stream->isp_params_configs;
+
+ if (!use_default_config && !stream_params) {
+ retval = false;
+ goto exit;
+ }
+
+ params->output_frame = NULL;
+ params->isp_parameters_id = 0;
+
+ if (use_default_config)
+ {
+ ia_css_set_xnr3_config(params, &default_xnr3_config);
+
+ sh_css_set_nr_config(params, &default_nr_config);
+ sh_css_set_ee_config(params, &default_ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &default_macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &default_macc2_table);
+ sh_css_set_gamma_table(params, &default_gamma_table);
+ sh_css_set_ctc_table(params, &default_ctc_table);
+ sh_css_set_baa_config(params, &default_baa_config);
+ sh_css_set_dz_config(params, &default_dz_config);
+/* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &default_shading_settings);
+/* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &default_3a_config);
+ ia_css_set_wb_config(params, &default_wb_config);
+ ia_css_set_csc_config(params, &default_cc_config);
+ ia_css_set_tnr_config(params, &default_tnr_config);
+ ia_css_set_ob_config(params, &default_ob_config);
+ ia_css_set_dp_config(params, &default_dp_config);
+#ifndef ISP2401
+ ia_css_set_param_exceptions(pipe_in, params);
+#else
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ if (IA_CSS_SUCCESS == sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) {
+ /* set the return value as false if both DPC and
+ * BDS is enabled by the user. But we do not return
+ * the value immediately to enable internal firmware
+ * feature testing. */
+ if(is_dp_10bpp) {
+ sh_css_set_dp_config(stream->pipes[i], params, &default_dp_10bpp_config);
+ } else {
+ sh_css_set_dp_config(stream->pipes[i], params, &default_dp_config);
+ }
+ } else {
+ retval = false;
+ goto exit;
+ }
+
+ ia_css_set_param_exceptions(stream->pipes[i], params);
+ }
+
+#endif
+ ia_css_set_de_config(params, &default_de_config);
+ ia_css_set_gc_config(params, &default_gc_config);
+ ia_css_set_anr_config(params, &default_anr_config);
+ ia_css_set_anr2_config(params, &default_anr_thres);
+ ia_css_set_ce_config(params, &default_ce_config);
+ ia_css_set_xnr_table_config(params, &default_xnr_table);
+ ia_css_set_ecd_config(params, &default_ecd_config);
+ ia_css_set_ynr_config(params, &default_ynr_config);
+ ia_css_set_fc_config(params, &default_fc_config);
+ ia_css_set_cnr_config(params, &default_cnr_config);
+ ia_css_set_macc_config(params, &default_macc_config);
+ ia_css_set_ctc_config(params, &default_ctc_config);
+ ia_css_set_aa_config(params, &default_aa_config);
+ ia_css_set_r_gamma_config(params, &default_r_gamma_table);
+ ia_css_set_g_gamma_config(params, &default_g_gamma_table);
+ ia_css_set_b_gamma_config(params, &default_b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &default_yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &default_rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &default_xnr_config);
+ ia_css_set_sdis_config(params, &default_sdis_config);
+ ia_css_set_sdis2_config(params, &default_sdis2_config);
+ ia_css_set_formats_config(params, &default_formats_config);
+
+ params->fpn_config.data = NULL;
+ params->config_changed[IA_CSS_FPN_ID] = true;
+ params->fpn_config.enabled = 0;
+
+ params->motion_config = default_motion_config;
+ params->motion_config_changed = true;
+
+ params->morph_table = NULL;
+ params->morph_table_changed = true;
+
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ params->sc_table_last_pipe_num = 0;
+
+ ia_css_sdis2_clear_coefficients(&params->dvs2_coefs);
+ params->dvs2_coef_table_changed = true;
+
+ ia_css_sdis_clear_coefficients(&params->dvs_coefs);
+ params->dis_coef_table_changed = true;
+#ifdef ISP2401
+ ia_css_tnr3_set_default_config(&params->tnr3_config);
+#endif
+ }
+ else
+ {
+ ia_css_set_xnr3_config(params, &stream_params->xnr3_config);
+
+ sh_css_set_nr_config(params, &stream_params->nr_config);
+ sh_css_set_ee_config(params, &stream_params->ee_config);
+ if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
+ sh_css_set_macc_table(params, &stream_params->macc_table);
+ sh_css_set_gamma_table(params, &stream_params->gc_table);
+ sh_css_set_ctc_table(params, &stream_params->ctc_table);
+ sh_css_set_baa_config(params, &stream_params->bds_config);
+ sh_css_set_dz_config(params, &stream_params->dz_config);
+/* ------ deprecated(bz675) : from ------ */
+ sh_css_set_shading_settings(params, &stream_params->shading_settings);
+/* ------ deprecated(bz675) : to ------ */
+
+ ia_css_set_s3a_config(params, &stream_params->s3a_config);
+ ia_css_set_wb_config(params, &stream_params->wb_config);
+ ia_css_set_csc_config(params, &stream_params->cc_config);
+ ia_css_set_tnr_config(params, &stream_params->tnr_config);
+ ia_css_set_ob_config(params, &stream_params->ob_config);
+ ia_css_set_dp_config(params, &stream_params->dp_config);
+ ia_css_set_de_config(params, &stream_params->de_config);
+ ia_css_set_gc_config(params, &stream_params->gc_config);
+ ia_css_set_anr_config(params, &stream_params->anr_config);
+ ia_css_set_anr2_config(params, &stream_params->anr_thres);
+ ia_css_set_ce_config(params, &stream_params->ce_config);
+ ia_css_set_xnr_table_config(params, &stream_params->xnr_table);
+ ia_css_set_ecd_config(params, &stream_params->ecd_config);
+ ia_css_set_ynr_config(params, &stream_params->ynr_config);
+ ia_css_set_fc_config(params, &stream_params->fc_config);
+ ia_css_set_cnr_config(params, &stream_params->cnr_config);
+ ia_css_set_macc_config(params, &stream_params->macc_config);
+ ia_css_set_ctc_config(params, &stream_params->ctc_config);
+ ia_css_set_aa_config(params, &stream_params->aa_config);
+ ia_css_set_r_gamma_config(params, &stream_params->r_gamma_table);
+ ia_css_set_g_gamma_config(params, &stream_params->g_gamma_table);
+ ia_css_set_b_gamma_config(params, &stream_params->b_gamma_table);
+ ia_css_set_yuv2rgb_config(params, &stream_params->yuv2rgb_cc_config);
+ ia_css_set_rgb2yuv_config(params, &stream_params->rgb2yuv_cc_config);
+ ia_css_set_xnr_config(params, &stream_params->xnr_config);
+ ia_css_set_formats_config(params, &stream_params->formats_config);
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ if (IA_CSS_SUCCESS ==
+ sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) {
+ /* set the return value as false if both DPC and
+ * BDS is enabled by the user. But we do not return
+ * the value immediately to enable internal firmware
+ * feature testing. */
+#ifndef ISP2401
+ retval = !is_dp_10bpp;
+#else
+ if (is_dp_10bpp) {
+ retval = false;
+ }
+ } else {
+ retval = false;
+ goto exit;
+ }
+ if (stream->pipes[i]->mode < IA_CSS_PIPE_ID_NUM) {
+ sh_css_set_dp_config(stream->pipes[i], params,
+ &stream_params->pipe_dp_config[stream->pipes[i]->mode]);
+ ia_css_set_param_exceptions(stream->pipes[i], params);
+#endif
+ } else {
+ retval = false;
+ goto exit;
+ }
+ }
+
+#ifndef ISP2401
+ ia_css_set_param_exceptions(pipe_in, params);
+
+#endif
+ params->fpn_config.data = stream_params->fpn_config.data;
+ params->config_changed[IA_CSS_FPN_ID] = stream_params->config_changed[IA_CSS_FPN_ID];
+ params->fpn_config.enabled = stream_params->fpn_config.enabled;
+
+ sh_css_set_motion_vector(params, &stream_params->motion_config);
+ sh_css_set_morph_table(params, stream_params->morph_table);
+
+ if (stream_params->sc_table) {
+ sh_css_update_shading_table_status(pipe_in, params);
+ sh_css_set_shading_table(stream, params, stream_params->sc_table);
+ }
+ else {
+ params->sc_table = NULL;
+ params->sc_table_changed = true;
+ params->sc_table_dirty = false;
+ params->sc_table_last_pipe_num = 0;
+ }
+
+ /* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (stream_params->pipe_dvs_6axis_config[i]) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ copy_dvs_6axis_table(params->pipe_dvs_6axis_config[i],
+ stream_params->pipe_dvs_6axis_config[i]);
+ } else {
+ params->pipe_dvs_6axis_config[i] =
+ generate_dvs_6axis_table_from_config(stream_params->pipe_dvs_6axis_config[i]);
+ }
+ }
+ }
+ ia_css_set_sdis_config(params, &stream_params->dvs_coefs);
+ params->dis_coef_table_changed = stream_params->dis_coef_table_changed;
+
+ ia_css_set_sdis2_config(params, &stream_params->dvs2_coefs);
+ params->dvs2_coef_table_changed = stream_params->dvs2_coef_table_changed;
+ params->sensor_binning = stream_params->sensor_binning;
+ }
+
+exit:
+ return retval;
+}
+
+enum ia_css_err
+sh_css_params_init(void)
+{
+ int i, p;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* TMP: tracking of paramsets */
+ g_param_buffer_dequeue_count = 0;
+ g_param_buffer_enqueue_count = 0;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) {
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ xmem_sp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ mmgr_calloc(1,
+ sizeof(struct sh_css_sp_stage)));
+ xmem_isp_stage_ptrs[p][i] =
+ ia_css_refcount_increment(-1,
+ mmgr_calloc(1,
+ sizeof(struct sh_css_isp_stage)));
+
+ if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
+ (xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
+ sh_css_params_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ }
+ }
+
+ ia_css_config_gamma_table();
+ ia_css_config_ctc_table();
+ ia_css_config_rgb_gamma_tables();
+ ia_css_config_xnr_table();
+
+ sp_ddr_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1,
+ CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
+ HIVE_ISP_DDR_WORD_BYTES)));
+ xmem_sp_group_ptrs = ia_css_refcount_increment(-1, mmgr_calloc(1,
+ sizeof(struct sh_css_sp_group)));
+
+ if ((sp_ddr_ptrs == mmgr_NULL) ||
+ (xmem_sp_group_ptrs == mmgr_NULL)) {
+ ia_css_uninit();
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+static void host_lut_store(const void *lut)
+{
+ unsigned i;
+
+ for (i = 0; i < N_GDC_ID; i++)
+ gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut);
+}
+
+/* Note that allocation is in ipu address space. */
+inline hrt_vaddress sh_css_params_alloc_gdc_lut(void)
+{
+ return mmgr_malloc(sizeof(zoom_table));
+}
+
+inline void sh_css_params_free_gdc_lut(hrt_vaddress addr)
+{
+ if (addr != mmgr_NULL)
+ hmm_free(addr);
+}
+
+enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
+ const void *lut)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+#ifndef ISP2401
+ bool store = true;
+#else
+ bool stream_started = false;
+#endif
+ IA_CSS_ENTER("pipe=%p lut=%p", pipe, lut);
+
+ if (lut == NULL || pipe == NULL) {
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE("err=%d", err);
+ return err;
+ }
+
+ /* If the pipe belongs to a stream and the stream has started, it is not
+ * safe to store lut to gdc HW. If pipe->stream is NULL, then no stream is
+ * created with this pipe, so it is safe to do this operation as long as
+ * ia_css_init() has been called. */
+ if (pipe->stream && pipe->stream->started) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to set scaler lut since stream has started\n");
+#ifndef ISP2401
+ store = false;
+#else
+ stream_started = true;
+#endif
+ err = IA_CSS_ERR_NOT_SUPPORTED;
+ }
+
+ /* Free any existing tables. */
+#ifndef ISP2401
+ if (pipe->scaler_pp_lut != mmgr_NULL) {
+ hmm_free(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+ }
+#else
+ sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
+ pipe->scaler_pp_lut = mmgr_NULL;
+#endif
+
+#ifndef ISP2401
+ if (store) {
+ pipe->scaler_pp_lut = mmgr_malloc(sizeof(zoom_table));
+#else
+ if (!stream_started) {
+ pipe->scaler_pp_lut = sh_css_params_alloc_gdc_lut();
+#endif
+ if (pipe->scaler_pp_lut == mmgr_NULL) {
+#ifndef ISP2401
+ IA_CSS_LEAVE("lut(%p) err=%d", pipe->scaler_pp_lut, err);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+#else
+ ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
+ "unable to allocate scaler_pp_lut\n");
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ } else {
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut,
+ interleaved_lut_temp);
+ mmgr_store(pipe->scaler_pp_lut,
+ (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+#endif
+ }
+#ifndef ISP2401
+
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut, interleaved_lut_temp);
+ mmgr_store(pipe->scaler_pp_lut, (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+#endif
+ }
+
+ IA_CSS_LEAVE("lut(%p) err=%d", pipe->scaler_pp_lut, err);
+ return err;
+}
+
+/* if pipe is NULL, returns default lut addr. */
+hrt_vaddress sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe)
+{
+ assert(pipe != NULL);
+
+ if (pipe->scaler_pp_lut != mmgr_NULL)
+ return pipe->scaler_pp_lut;
+ else
+ return sh_css_params_get_default_gdc_lut();
+}
+
+enum ia_css_err sh_css_params_map_and_store_default_gdc_lut(void)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* Is table already mapped? Nothing to do if it is mapped. */
+ if (default_gdc_lut != mmgr_NULL)
+ return err;
+
+ host_lut_store((void *)zoom_table);
+
+#ifndef ISP2401
+ default_gdc_lut = mmgr_malloc(sizeof(zoom_table));
+#else
+ default_gdc_lut = sh_css_params_alloc_gdc_lut();
+#endif
+ if (default_gdc_lut == mmgr_NULL)
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])zoom_table,
+ interleaved_lut_temp);
+ mmgr_store(default_gdc_lut, (int *)interleaved_lut_temp,
+ sizeof(zoom_table));
+
+ IA_CSS_LEAVE_PRIVATE("lut(%p) err=%d", default_gdc_lut, err);
+ return err;
+}
+
+void sh_css_params_free_default_gdc_lut(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+#ifndef ISP2401
+ if (default_gdc_lut != mmgr_NULL) {
+ hmm_free(default_gdc_lut);
+ default_gdc_lut = mmgr_NULL;
+ }
+#else
+ sh_css_params_free_gdc_lut(default_gdc_lut);
+ default_gdc_lut = mmgr_NULL;
+#endif
+
+ IA_CSS_LEAVE_PRIVATE("void");
+
+}
+
+hrt_vaddress sh_css_params_get_default_gdc_lut(void)
+{
+ return default_gdc_lut;
+}
+
+static void free_param_set_callback(
+ hrt_vaddress ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ free_ia_css_isp_parameter_set_info(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void free_buffer_callback(
+ hrt_vaddress ptr)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ hmm_free(ptr);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_param_clear_param_sets(void)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+/*
+ * MW: we can define hmm_free() to return a NULL
+ * then you can write ptr = hmm_free(ptr);
+ */
+#define safe_free(id, x) \
+ do { \
+ ia_css_refcount_decrement(id, x); \
+ (x) = mmgr_NULL; \
+ } while(0)
+
+static void free_map(struct sh_css_ddr_address_map *map)
+{
+ unsigned int i;
+
+ hrt_vaddress *addrs = (hrt_vaddress *)map;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* free buffers */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/
+ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+ safe_free(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream)
+{
+ int i;
+ struct ia_css_isp_parameters *params = stream->isp_params_configs;
+ struct ia_css_isp_parameters *per_frame_params =
+ stream->per_frame_isp_params_configs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ if (params == NULL) {
+ IA_CSS_LEAVE_PRIVATE("isp_param_configs is NULL");
+ return;
+ }
+
+ /* free existing ddr_ptr maps */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
+ {
+ free_map(&params->pipe_ddr_ptrs[i]);
+ if (per_frame_params)
+ free_map(&per_frame_params->pipe_ddr_ptrs[i]);
+ /* Free up theDVS table memory blocks before recomputing new table */
+ if (params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&(params->pipe_dvs_6axis_config[i]));
+ if (per_frame_params && per_frame_params->pipe_dvs_6axis_config[i])
+ free_dvs_6axis_table(&(per_frame_params->pipe_dvs_6axis_config[i]));
+ }
+ free_map(&params->ddr_ptrs);
+ if (per_frame_params)
+ free_map(&per_frame_params->ddr_ptrs);
+
+ if (params->fpn_config.data) {
+ sh_css_free(params->fpn_config.data);
+ params->fpn_config.data = NULL;
+ }
+
+ /* Free up sc_config (temporal shading table) if it is allocated. */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ if (per_frame_params) {
+ if (per_frame_params->sc_config) {
+ ia_css_shading_table_free(per_frame_params->sc_config);
+ per_frame_params->sc_config = NULL;
+ }
+ }
+
+ sh_css_free(params);
+ if (per_frame_params)
+ sh_css_free(per_frame_params);
+ stream->isp_params_configs = NULL;
+ stream->per_frame_isp_params_configs = NULL;
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_params_uninit(void)
+{
+ unsigned p, i;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ ia_css_refcount_decrement(-1, sp_ddr_ptrs);
+ sp_ddr_ptrs = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_sp_group_ptrs);
+ xmem_sp_group_ptrs = mmgr_NULL;
+
+ for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
+ ia_css_refcount_decrement(-1, xmem_sp_stage_ptrs[p][i]);
+ xmem_sp_stage_ptrs[p][i] = mmgr_NULL;
+ ia_css_refcount_decrement(-1, xmem_isp_stage_ptrs[p][i]);
+ xmem_isp_stage_ptrs[p][i] = mmgr_NULL;
+ }
+
+ /* go through the pools to clear references */
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
+ ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_BUFFER, &free_buffer_callback);
+ ia_css_refcount_clear(-1, &free_buffer_callback);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static struct ia_css_host_data *
+convert_allocate_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ unsigned int aligned_width)
+{
+ unsigned int i, j, padding, w;
+ struct ia_css_host_data *me;
+ unsigned int isp_data_size;
+ uint16_t *isp_data_ptr;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ /* currently we don't have morph table interpolation yet,
+ * so we allow a wider table to be used. This will be removed
+ * in the future. */
+ if (width > aligned_width) {
+ padding = 0;
+ w = aligned_width;
+ } else {
+ padding = aligned_width - width;
+ w = width;
+ }
+ isp_data_size = height * (w + padding) * sizeof(uint16_t);
+
+ me = ia_css_host_data_allocate((size_t) isp_data_size);
+
+ if (!me) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return NULL;
+ }
+
+ isp_data_ptr = (uint16_t *)me->address;
+
+ memset(isp_data_ptr, 0, (size_t)isp_data_size);
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < w; j++)
+ *isp_data_ptr++ = (uint16_t)data[j];
+ isp_data_ptr += padding;
+ data += width;
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+ return me;
+}
+
+static enum ia_css_err
+store_morph_plane(
+ unsigned short *data,
+ unsigned int width,
+ unsigned int height,
+ hrt_vaddress dest,
+ unsigned int aligned_width)
+{
+ struct ia_css_host_data *isp_data;
+
+ assert(dest != mmgr_NULL);
+
+ isp_data = convert_allocate_morph_plane(data, width, height, aligned_width);
+ if (!isp_data) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ ia_css_params_store_ia_css_host_data(dest, isp_data);
+
+ ia_css_host_data_free(isp_data);
+ return IA_CSS_SUCCESS;
+}
+
+static void sh_css_update_isp_params_to_ddr(
+ struct ia_css_isp_parameters *params,
+ hrt_vaddress ddr_ptr)
+{
+ size_t size = sizeof(params->uds);
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(params != NULL);
+
+ mmgr_store(ddr_ptr, &(params->uds), size);
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void sh_css_update_isp_mem_params_to_ddr(
+ const struct ia_css_binary *binary,
+ hrt_vaddress ddr_mem_ptr,
+ size_t size,
+ enum ia_css_isp_memories mem)
+{
+ const struct ia_css_host_data *params;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ params = ia_css_isp_param_get_mem_init(&binary->mem_params, IA_CSS_PARAM_CLASS_PARAM, mem);
+ mmgr_store(ddr_mem_ptr, params->address, size);
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void ia_css_dequeue_param_buffers(/*unsigned int pipe_num*/ void)
+{
+ unsigned int i;
+ hrt_vaddress cpy;
+ enum sh_css_queue_id param_queue_ids[3] = { IA_CSS_PARAMETER_SET_QUEUE_ID,
+ IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID,
+ SH_CSS_INVALID_QUEUE_ID};
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ if (!sh_css_sp_is_running()) {
+ IA_CSS_LEAVE_PRIVATE("sp is not running");
+ /* SP is not running. The queues are not valid */
+ return;
+ }
+
+ for (i = 0; SH_CSS_INVALID_QUEUE_ID != param_queue_ids[i]; i++) {
+ cpy = (hrt_vaddress)0;
+ /* clean-up old copy */
+ while (IA_CSS_SUCCESS == ia_css_bufq_dequeue_buffer(param_queue_ids[i], (uint32_t *)&cpy)) {
+ /* TMP: keep track of dequeued param set count
+ */
+ g_param_buffer_dequeue_count++;
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
+ 0,
+ param_queue_ids[i],
+ 0);
+
+ IA_CSS_LOG("dequeued param set %x from %d, release ref", cpy, 0);
+ free_ia_css_isp_parameter_set_info(cpy);
+ cpy = (hrt_vaddress)0;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static void
+process_kernel_parameters(unsigned int pipe_id,
+ struct ia_css_pipeline_stage *stage,
+ struct ia_css_isp_parameters *params,
+ unsigned int isp_pipe_version,
+ unsigned int raw_bit_depth)
+{
+ unsigned param_id;
+
+ (void)isp_pipe_version;
+ (void)raw_bit_depth;
+
+ sh_css_enable_pipeline(stage->binary);
+
+ if (params->config_changed[IA_CSS_OB_ID]) {
+ ia_css_ob_configure(&params->stream_configs.ob,
+ isp_pipe_version, raw_bit_depth);
+ }
+ if (params->config_changed[IA_CSS_S3A_ID]) {
+ ia_css_s3a_configure(raw_bit_depth);
+ }
+ /* Copy stage uds parameters to config, since they can differ per stage.
+ */
+ params->crop_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.crop_pos = params->uds[stage->stage_num].crop_pos;
+ params->uds_config.uds = params->uds[stage->stage_num].uds;
+ /* Call parameter process functions for all kernels */
+ /* Skip SC, since that is called on a temp sc table */
+ for (param_id = 0; param_id < IA_CSS_NUM_PARAMETER_IDS; param_id++) {
+ if (param_id == IA_CSS_SC_ID) continue;
+ if (params->config_changed[param_id])
+ ia_css_kernel_process_param[param_id](pipe_id, stage, params);
+ }
+}
+
+enum ia_css_err
+sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
+ struct ia_css_isp_parameters *params,
+ bool commit,
+ struct ia_css_pipe *pipe_in)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ hrt_vaddress cpy;
+ int i;
+ unsigned int raw_bit_depth = 10;
+ unsigned int isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_1;
+ bool acc_cluster_params_changed = false;
+ unsigned int thread_id, pipe_num;
+
+ (void)acc_cluster_params_changed;
+
+ assert(curr_pipe != NULL);
+
+ IA_CSS_ENTER_PRIVATE("pipe=%p, isp_parameters_id=%d", pipe_in, params->isp_parameters_id);
+ raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream);
+
+ /* now make the map available to the sp */
+ if (!commit) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* enqueue a copies of the mem_map to
+ the designated pipelines */
+ for (i = 0; i < curr_pipe->stream->num_pipes; i++) {
+ struct ia_css_pipe *pipe;
+ struct sh_css_ddr_address_map *cur_map;
+ struct sh_css_ddr_address_map_size *cur_map_size;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+
+ enum sh_css_queue_id queue_id;
+
+ (void)stage;
+ pipe = curr_pipe->stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe);
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ ia_css_query_internal_queue_id(params->output_frame
+ ? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET
+ : IA_CSS_BUFFER_TYPE_PARAMETER_SET,
+ thread_id, &queue_id);
+#else
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_PARAMETER_SET, thread_id, &queue_id);
+#endif
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ err = IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ break;
+ }
+ cur_map = &params->pipe_ddr_ptrs[pipeline->pipe_id];
+ cur_map_size = &params->pipe_ddr_ptrs_size[pipeline->pipe_id];
+
+ /* TODO: Normally, zoom and motion parameters shouldn't
+ * be part of "isp_params" as it is resolution/pipe dependant
+ * Therefore, move the zoom config elsewhere (e.g. shading
+ * table can be taken as an example! @GC
+ * */
+ {
+ /* we have to do this per pipeline because */
+ /* the processing is a.o. resolution dependent */
+ err = ia_css_process_zoom_and_motion(params,
+ pipeline->stages);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ }
+ /* check if to actually update the parameters for this pipe */
+ /* When API change is implemented making good distinction between
+ * stream config and pipe config this skipping code can be moved out of the #ifdef */
+ if (pipe_in && (pipe != pipe_in)) {
+ IA_CSS_LOG("skipping pipe %x", pipe);
+ continue;
+ }
+
+ /* BZ 125915, should be moved till after "update other buff" */
+ /* update the other buffers to the pipe specific copies */
+ for (stage = pipeline->stages; stage; stage = stage->next) {
+ unsigned mem;
+
+ if (!stage || !stage->binary)
+ continue;
+
+ process_kernel_parameters(pipeline->pipe_id,
+ stage, params,
+ isp_pipe_version, raw_bit_depth);
+
+ err = sh_css_params_write_to_ddr_internal(
+ pipe,
+ pipeline->pipe_id,
+ params,
+ stage,
+ cur_map,
+ cur_map_size);
+
+ if (err != IA_CSS_SUCCESS)
+ break;
+ for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
+ params->isp_mem_params_changed
+ [pipeline->pipe_id][stage->stage_num][mem] = false;
+ }
+ } /* for */
+ if (err != IA_CSS_SUCCESS)
+ break;
+ /* update isp_params to pipe specific copies */
+ if (params->isp_params_changed) {
+ reallocate_buffer(&cur_map->isp_param,
+ &cur_map_size->isp_param,
+ cur_map_size->isp_param,
+ true,
+ &err);
+ if (err != IA_CSS_SUCCESS)
+ break;
+ sh_css_update_isp_params_to_ddr(params, cur_map->isp_param);
+ }
+
+ /* last make referenced copy */
+ err = ref_sh_css_ddr_address_map(
+ cur_map,
+ &isp_params_info.mem_map);
+ if (err != IA_CSS_SUCCESS)
+ break;
+
+ /* Update Parameters ID */
+ isp_params_info.isp_parameters_id = params->isp_parameters_id;
+
+ /* Update output frame pointer */
+ isp_params_info.output_frame_ptr =
+ (params->output_frame) ? params->output_frame->data : mmgr_NULL;
+
+ /* now write the copy to ddr */
+ err = write_ia_css_isp_parameter_set_info_to_ddr(&isp_params_info, &cpy);
+ if (err != IA_CSS_SUCCESS)
+ break;
+
+ /* enqueue the set to sp */
+ IA_CSS_LOG("queue param set %x to %d", cpy, thread_id);
+
+ err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy);
+ if (IA_CSS_SUCCESS != err) {
+ free_ia_css_isp_parameter_set_info(cpy);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+#endif
+ break;
+ }
+ else {
+ /* TMP: check discrepancy between nr of enqueued
+ * parameter sets and dequeued sets
+ */
+ g_param_buffer_enqueue_count++;
+ assert(g_param_buffer_enqueue_count < g_param_buffer_dequeue_count+50);
+#ifdef ISP2401
+ ia_css_save_latest_paramset_ptr(pipe, cpy);
+#endif
+ /*
+ * Tell the SP which queues are not empty,
+ * by sending the software event.
+ */
+ if (!sh_css_sp_is_running()) {
+ /* SP is not running. The queues are not valid */
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_RESOURCE_NOT_AVAILABLE);
+ return IA_CSS_ERR_RESOURCE_NOT_AVAILABLE;
+ }
+ ia_css_bufq_enqueue_psys_event(
+ IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
+ (uint8_t)thread_id,
+ (uint8_t)queue_id,
+ 0);
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d",
+ isp_params_info.isp_parameters_id,
+ isp_params_info.output_frame_ptr,
+ queue_id, thread_id);
+#endif
+ }
+ /* clean-up old copy */
+ ia_css_dequeue_param_buffers(/*pipe_num*/);
+ params->pipe_dvs_6axis_config_changed[pipeline->pipe_id] = false;
+ } /* end for each 'active' pipeline */
+ /* clear the changed flags after all params
+ for all pipelines have been updated */
+ params->isp_params_changed = false;
+ params->sc_table_changed = false;
+ params->dis_coef_table_changed = false;
+ params->dvs2_coef_table_changed = false;
+ params->morph_table_changed = false;
+ params->dz_config_changed = false;
+ params->motion_config_changed = false;
+/* ------ deprecated(bz675) : from ------ */
+ params->shading_settings_changed = false;
+/* ------ deprecated(bz675) : to ------ */
+
+ memset(&params->config_changed[0], 0, sizeof(params->config_changed));
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+sh_css_params_write_to_ddr_internal(
+ struct ia_css_pipe *pipe,
+ unsigned pipe_id,
+ struct ia_css_isp_parameters *params,
+ const struct ia_css_pipeline_stage *stage,
+ struct sh_css_ddr_address_map *ddr_map,
+ struct sh_css_ddr_address_map_size *ddr_map_size)
+{
+ enum ia_css_err err;
+ const struct ia_css_binary *binary;
+
+ unsigned stage_num;
+ unsigned mem;
+ bool buff_realloced;
+
+ /* struct is > 128 bytes so it should not be on stack (see checkpatch) */
+ static struct ia_css_macc_table converted_macc_table;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(params != NULL);
+ assert(ddr_map != NULL);
+ assert(ddr_map_size != NULL);
+ assert(stage != NULL);
+
+ binary = stage->binary;
+ assert(binary != NULL);
+
+
+ stage_num = stage->stage_num;
+
+ if (binary->info->sp.enable.fpnr) {
+ buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl,
+ &ddr_map_size->fpn_tbl,
+ (size_t)(FPNTBL_BYTES(binary)),
+ params->config_changed[IA_CSS_FPN_ID],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->config_changed[IA_CSS_FPN_ID] || buff_realloced) {
+ if (params->fpn_config.enabled) {
+ err = store_fpntbl(params, ddr_map->fpn_tbl);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ }
+ }
+
+ if (binary->info->sp.enable.sc) {
+ uint32_t enable_conv = params->
+ shading_settings.enable_shading_table_conversion;
+
+ buff_realloced = reallocate_buffer(&ddr_map->sc_tbl,
+ &ddr_map_size->sc_tbl,
+ (size_t)(SCTBL_BYTES(binary)),
+ params->sc_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->shading_settings_changed ||
+ params->sc_table_changed || buff_realloced) {
+ if (enable_conv == 0) {
+ if (params->sc_table) {
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_table);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ /* set sc_config to isp */
+ params->sc_config = (struct ia_css_shading_table *)params->sc_table;
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+ params->sc_config = NULL;
+ } else {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+#ifndef ISP2401
+ sh_css_params_shading_id_table_generate(&params->sc_config, binary);
+#else
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color, binary->sctbl_height);
+#endif
+ if (params->sc_config == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ } else { /* legacy */
+/* ------ deprecated(bz675) : from ------ */
+ /* shading table is full resolution, reduce */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+ if (params->sc_config == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+
+ /* store the shading table to ddr */
+ err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ /* set sc_config to isp */
+ ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
+
+ /* free the shading table */
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+/* ------ deprecated(bz675) : to ------ */
+ }
+ }
+ }
+#ifdef ISP2401
+ /* DPC configuration is made pipe specific to allow flexibility in positioning of the
+ * DPC kernel. The code below sets the pipe specific configuration to
+ * individual binaries. */
+ if (params->pipe_dpc_config_changed[pipe_id] && binary->info->sp.enable.dpc) {
+ unsigned size = stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
+
+ unsigned offset = stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
+ if (size) {
+ ia_css_dp_encode((struct sh_css_isp_dp_params *)
+ &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
+ &params->pipe_dp_config[pipe_id], size);
+#endif
+
+#ifdef ISP2401
+ params->isp_params_changed = true;
+ params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] = true;
+ }
+ }
+#endif
+ if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) {
+ unsigned int i, j, idx;
+ unsigned int idx_map[] = {
+ 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8};
+
+ for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
+ idx = 4*idx_map[i];
+ j = 4*i;
+
+ if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) {
+ converted_macc_table.data[idx] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx+1] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j+1],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx+2] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j+2],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ converted_macc_table.data[idx+3] =
+ (int16_t)sDIGIT_FITTING(params->macc_table.data[j+3],
+ 13, SH_CSS_MACC_COEF_SHIFT);
+ } else if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2) {
+ converted_macc_table.data[idx] =
+ params->macc_table.data[j];
+ converted_macc_table.data[idx+1] =
+ params->macc_table.data[j+1];
+ converted_macc_table.data[idx+2] =
+ params->macc_table.data[j+2];
+ converted_macc_table.data[idx+3] =
+ params->macc_table.data[j+3];
+ }
+ }
+ reallocate_buffer(&ddr_map->macc_tbl,
+ &ddr_map_size->macc_tbl,
+ ddr_map_size->macc_tbl,
+ true,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ mmgr_store(ddr_map->macc_tbl,
+ converted_macc_table.data,
+ sizeof(converted_macc_table.data));
+ }
+
+ if (binary->info->sp.enable.dvs_6axis) {
+ /* because UV is packed into the Y plane, calc total
+ * YYU size = /2 gives size of UV-only,
+ * total YYU size = UV-only * 3.
+ */
+ buff_realloced = reallocate_buffer(
+ &ddr_map->dvs_6axis_params_y,
+ &ddr_map_size->dvs_6axis_params_y,
+ (size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3),
+ params->pipe_dvs_6axis_config_changed[pipe_id],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ if (params->pipe_dvs_6axis_config_changed[pipe_id] || buff_realloced) {
+ const struct ia_css_frame_info *dvs_in_frame_info;
+
+ if ( stage->args.delay_frames[0] ) {
+ /*When delay frames are present(as in case of video),
+ they are used for dvs. Configure DVS using those params*/
+ dvs_in_frame_info = &stage->args.delay_frames[0]->info;
+ } else {
+ /*Otherwise, use input frame to configure DVS*/
+ dvs_in_frame_info = &stage->args.in_frame->info;
+ }
+
+ /* Generate default DVS unity table on start up*/
+ if (params->pipe_dvs_6axis_config[pipe_id] == NULL) {
+
+#ifndef ISP2401
+ struct ia_css_resolution dvs_offset;
+ dvs_offset.width =
+#else
+ struct ia_css_resolution dvs_offset = {0, 0};
+ if (binary->dvs_envelope.width || binary->dvs_envelope.height) {
+ dvs_offset.width =
+#endif
+ (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
+#ifndef ISP2401
+ dvs_offset.height =
+#else
+ dvs_offset.height =
+#endif
+ (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2;
+#ifdef ISP2401
+ }
+#endif
+
+ params->pipe_dvs_6axis_config[pipe_id] =
+ generate_dvs_6axis_table(&binary->out_frame_info[0].res, &dvs_offset);
+ if (params->pipe_dvs_6axis_config[pipe_id] == NULL) {
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY);
+ return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+ }
+ params->pipe_dvs_6axis_config_changed[pipe_id] = true;
+ }
+
+ store_dvs_6axis_config(params->pipe_dvs_6axis_config[pipe_id],
+ binary,
+ dvs_in_frame_info,
+ ddr_map->dvs_6axis_params_y);
+ params->isp_params_changed = true;
+ }
+ }
+
+ if (binary->info->sp.enable.ca_gdc) {
+ unsigned int i;
+ hrt_vaddress *virt_addr_tetra_x[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_x[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ hrt_vaddress *virt_addr_tetra_y[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+ size_t *virt_size_tetra_y[
+ IA_CSS_MORPH_TABLE_NUM_PLANES];
+
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+
+ buff_realloced = false;
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_x[i],
+ virt_size_tetra_x[i],
+ (size_t)
+ (MORPH_PLANE_BYTES(binary)),
+ params->morph_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ buff_realloced |=
+ reallocate_buffer(virt_addr_tetra_y[i],
+ virt_size_tetra_y[i],
+ (size_t)
+ (MORPH_PLANE_BYTES(binary)),
+ params->morph_table_changed,
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ }
+ if (params->morph_table_changed || buff_realloced) {
+ const struct ia_css_morph_table *table = params->morph_table;
+ struct ia_css_morph_table *id_table = NULL;
+
+ if ((table != NULL) &&
+ (table->width < binary->morph_tbl_width ||
+ table->height < binary->morph_tbl_height)) {
+ table = NULL;
+ }
+ if (table == NULL) {
+ err = sh_css_params_default_morph_table(&id_table,
+ binary);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ table = id_table;
+ }
+
+ for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
+ store_morph_plane(table->coordinates_x[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_x[i],
+ binary->morph_tbl_aligned_width);
+ store_morph_plane(table->coordinates_y[i],
+ table->width,
+ table->height,
+ *virt_addr_tetra_y[i],
+ binary->morph_tbl_aligned_width);
+ }
+ if (id_table != NULL)
+ ia_css_morph_table_free(id_table);
+ }
+ }
+
+ /* After special cases like SC, FPN since they may change parameters */
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ const struct ia_css_isp_data *isp_data =
+ ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers, IA_CSS_PARAM_CLASS_PARAM, mem);
+ size_t size = isp_data->size;
+ if (!size) continue;
+ buff_realloced = reallocate_buffer(&ddr_map->isp_mem_param[stage_num][mem],
+ &ddr_map_size->isp_mem_param[stage_num][mem],
+ size,
+ params->isp_mem_params_changed[pipe_id][stage_num][mem],
+ &err);
+ if (err != IA_CSS_SUCCESS) {
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+ if (params->isp_mem_params_changed[pipe_id][stage_num][mem] || buff_realloced) {
+ sh_css_update_isp_mem_params_to_ddr(binary,
+ ddr_map->isp_mem_param[stage_num][mem],
+ ddr_map_size->isp_mem_param[stage_num][mem], mem);
+ }
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_SUCCESS);
+ return IA_CSS_SUCCESS;
+}
+
+const struct ia_css_fpn_table *ia_css_get_fpn_table(struct ia_css_stream *stream)
+{
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER_LEAVE("void");
+ assert(stream != NULL);
+
+ params = stream->isp_params_configs;
+
+ return &(params->fpn_config);
+}
+
+struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream *stream)
+{
+ struct ia_css_shading_table *table = NULL;
+ struct ia_css_isp_parameters *params;
+
+ IA_CSS_ENTER("void");
+
+ assert(stream != NULL);
+
+ params = stream->isp_params_configs;
+ if (!params)
+ return NULL;
+
+ if (params->shading_settings.enable_shading_table_conversion == 0) {
+ if (params->sc_table) {
+ table = (struct ia_css_shading_table *)params->sc_table;
+ } else {
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ if (binary) {
+ /* generate the identical shading table */
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+#ifndef ISP2401
+ sh_css_params_shading_id_table_generate(&params->sc_config, binary);
+
+#else
+ sh_css_params_shading_id_table_generate(&params->sc_config,
+ binary->sctbl_width_per_color, binary->sctbl_height);
+#endif
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+ }
+ } else {
+/* ------ deprecated(bz675) : from ------ */
+ const struct ia_css_binary *binary
+ = ia_css_stream_get_shading_correction_binary(stream);
+ struct ia_css_pipe *pipe;
+
+ /**********************************************************************/
+ /* following code is copied from function ia_css_stream_get_shading_correction_binary()
+ * to match with the binary */
+ pipe = stream->pipes[0];
+
+ if (stream->num_pipes == 2) {
+ assert(stream->pipes[1] != NULL);
+ if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
+ stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
+ pipe = stream->pipes[1];
+ }
+ /**********************************************************************/
+ if (binary) {
+ if (params->sc_config) {
+ ia_css_shading_table_free(params->sc_config);
+ params->sc_config = NULL;
+ }
+ prepare_shading_table(
+ (const struct ia_css_shading_table *)params->sc_table,
+ params->sensor_binning,
+ &params->sc_config,
+ binary, pipe->required_bds_factor);
+
+ table = params->sc_config;
+ /* The sc_config will be freed in the
+ * ia_css_stream_isp_parameters_uninit function. */
+ }
+/* ------ deprecated(bz675) : to ------ */
+ }
+
+ IA_CSS_LEAVE("table=%p", table);
+
+ return table;
+}
+
+
+hrt_vaddress sh_css_store_sp_group_to_ddr(void)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_sp_group_ptrs,
+ &sh_css_sp_group,
+ sizeof(struct sh_css_sp_group));
+ return xmem_sp_group_ptrs;
+}
+
+hrt_vaddress sh_css_store_sp_stage_to_ddr(
+ unsigned pipe,
+ unsigned stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_sp_stage_ptrs[pipe][stage],
+ &sh_css_sp_stage,
+ sizeof(struct sh_css_sp_stage));
+ return xmem_sp_stage_ptrs[pipe][stage];
+}
+
+hrt_vaddress sh_css_store_isp_stage_to_ddr(
+ unsigned pipe,
+ unsigned stage)
+{
+ IA_CSS_ENTER_LEAVE_PRIVATE("void");
+ mmgr_store(xmem_isp_stage_ptrs[pipe][stage],
+ &sh_css_isp_stage,
+ sizeof(struct sh_css_isp_stage));
+ return xmem_isp_stage_ptrs[pipe][stage];
+}
+
+static enum ia_css_err ref_sh_css_ddr_address_map(
+ struct sh_css_ddr_address_map *map,
+ struct sh_css_ddr_address_map *out)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ unsigned int i;
+
+ /* we will use a union to copy things; overlaying an array
+ with the struct; that way adding fields in the struct
+ will keep things working, and we will not get type errors.
+ */
+ union {
+ struct sh_css_ddr_address_map *map;
+ hrt_vaddress *addrs;
+ } in_addrs, to_addrs;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(map != NULL);
+ assert(out != NULL);
+
+ in_addrs.map = map;
+ to_addrs.map = out;
+
+ assert(sizeof(struct sh_css_ddr_address_map_size)/sizeof(size_t) ==
+ sizeof(struct sh_css_ddr_address_map)/sizeof(hrt_vaddress));
+
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/
+ sizeof(size_t)); i++) {
+ if (in_addrs.addrs[i] == mmgr_NULL)
+ to_addrs.addrs[i] = mmgr_NULL;
+ else
+ to_addrs.addrs[i] = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER, in_addrs.addrs[i]);
+ }
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err write_ia_css_isp_parameter_set_info_to_ddr(
+ struct ia_css_isp_parameter_set_info *me,
+ hrt_vaddress *out)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ bool succ;
+
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(me != NULL);
+ assert(out != NULL);
+
+ *out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL, mmgr_malloc(
+ sizeof(struct ia_css_isp_parameter_set_info)));
+ succ = (*out != mmgr_NULL);
+ if (succ)
+ mmgr_store(*out,
+ me, sizeof(struct ia_css_isp_parameter_set_info));
+ else
+ err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+static enum ia_css_err
+free_ia_css_isp_parameter_set_info(
+ hrt_vaddress ptr)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ struct ia_css_isp_parameter_set_info isp_params_info;
+ unsigned int i;
+ hrt_vaddress *addrs = (hrt_vaddress *)&isp_params_info.mem_map;
+
+ IA_CSS_ENTER_PRIVATE("ptr = %p", ptr);
+
+ /* sanity check - ptr must be valid */
+ if (!ia_css_refcount_is_valid(ptr)) {
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__, ptr);
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+ }
+
+ mmgr_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map));
+ /* copy map using size info */
+ for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size)/
+ sizeof(size_t)); i++) {
+ if (addrs[i] == mmgr_NULL)
+ continue;
+
+ /* sanity check - ptr must be valid */
+#ifndef ISP2401
+ if (!ia_css_refcount_is_valid(addrs[i])) {
+#else
+ if (ia_css_refcount_is_valid(addrs[i])) {
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+ } else {
+#endif
+ IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_BUFFER(0x%x) invalid arg", __func__, ptr);
+ err = IA_CSS_ERR_INVALID_ARGUMENTS;
+ continue;
+ }
+#ifndef ISP2401
+
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
+#endif
+ }
+ ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_SET_POOL, ptr);
+
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
+ return err;
+}
+
+/* Mark all parameters as changed to force recomputing the derived ISP parameters */
+void
+sh_css_invalidate_params(struct ia_css_stream *stream)
+{
+ struct ia_css_isp_parameters *params;
+ unsigned i, j, mem;
+
+ IA_CSS_ENTER_PRIVATE("void");
+ assert(stream != NULL);
+
+ params = stream->isp_params_configs;
+ params->isp_params_changed = true;
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ for (j = 0; j < SH_CSS_MAX_STAGES; j++) {
+ for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
+ params->isp_mem_params_changed[i][j][mem] = true;
+ }
+ }
+ }
+
+ memset(&params->config_changed[0], 1, sizeof(params->config_changed));
+ params->dis_coef_table_changed = true;
+ params->dvs2_coef_table_changed = true;
+ params->morph_table_changed = true;
+ params->sc_table_changed = true;
+ params->dz_config_changed = true;
+ params->motion_config_changed = true;
+
+ /*Free up theDVS table memory blocks before recomputing new table */
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ if (params->pipe_dvs_6axis_config[i]) {
+ free_dvs_6axis_table(&(params->pipe_dvs_6axis_config[i]));
+ params->pipe_dvs_6axis_config_changed[i] = true;
+ }
+ }
+
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+void
+sh_css_update_uds_and_crop_info(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ bool enable_zoom)
+{
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info != NULL);
+ assert(in_frame_info != NULL);
+ assert(out_frame_info != NULL);
+ assert(dvs_env != NULL);
+ assert(zoom != NULL);
+ assert(motion_vector != NULL);
+ assert(uds != NULL);
+ assert(sp_out_crop_pos != NULL);
+
+ uds->curr_dx = enable_zoom ? (uint16_t)zoom->dx : HRT_GDC_N;
+ uds->curr_dy = enable_zoom ? (uint16_t)zoom->dy : HRT_GDC_N;
+
+ if (info->enable.dvs_envelope) {
+ unsigned int crop_x = 0,
+ crop_y = 0,
+ uds_xc = 0,
+ uds_yc = 0,
+ env_width, env_height;
+ int half_env_x, half_env_y;
+ int motion_x = motion_vector->x;
+ int motion_y = motion_vector->y;
+ bool upscale_x = in_frame_info->res.width < out_frame_info->res.width;
+ bool upscale_y = in_frame_info->res.height < out_frame_info->res.height;
+
+ if (info->enable.uds && !info->enable.ds) {
+ /**
+ * we calculate with the envelope that we can actually
+ * use, the min dvs envelope is for the filter
+ * initialization.
+ */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /**
+ * for digital zoom, we use the dvs envelope and make
+ * sure that we don't include the 8 leftmost pixels or
+ * 8 topmost rows.
+ */
+ if (upscale_x) {
+ uds_xc = (in_frame_info->res.width
+ + env_width
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_xc = (out_frame_info->res.width
+ + env_width) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ if (upscale_y) {
+ uds_yc = (in_frame_info->res.height
+ + env_height
+ + SH_CSS_MIN_DVS_ENVELOPE) / 2;
+ } else {
+ uds_yc = (out_frame_info->res.height
+ + env_height) / 2
+ + SH_CSS_MIN_DVS_ENVELOPE;
+ }
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ uds_xc += motion_x;
+ uds_yc += motion_y;
+ /* uds can be pipelined, remove top lines */
+ crop_y = 2;
+ } else if (info->enable.ds) {
+ env_width = dvs_env->width;
+ env_height = dvs_env->height;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ /* clip the motion vector to +/- half the envelope */
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ /* for video with downscaling, the envelope is included
+ in the input resolution. */
+ uds_xc = in_frame_info->res.width/2 + motion_x;
+ uds_yc = in_frame_info->res.height/2 + motion_y;
+ crop_x = info->pipeline.left_cropping;
+ /* ds == 2 (yuv_ds) can be pipelined, remove top
+ lines */
+ if (info->enable.ds & 1)
+ crop_y = info->pipeline.top_cropping;
+ else
+ crop_y = 2;
+ } else {
+ /* video nodz: here we can only crop. We make sure we
+ crop at least the first 8x8 pixels away. */
+ env_width = dvs_env->width -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ env_height = dvs_env->height -
+ SH_CSS_MIN_DVS_ENVELOPE;
+ half_env_x = env_width / 2;
+ half_env_y = env_height / 2;
+ motion_x = clamp(motion_x, -half_env_x, half_env_x);
+ motion_y = clamp(motion_y, -half_env_y, half_env_y);
+ crop_x = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_x + motion_x;
+ crop_y = SH_CSS_MIN_DVS_ENVELOPE
+ + half_env_y + motion_y;
+ }
+
+ /* Must enforce that the crop position is even */
+ crop_x = EVEN_FLOOR(crop_x);
+ crop_y = EVEN_FLOOR(crop_y);
+ uds_xc = EVEN_FLOOR(uds_xc);
+ uds_yc = EVEN_FLOOR(uds_yc);
+
+ uds->xc = (uint16_t)uds_xc;
+ uds->yc = (uint16_t)uds_yc;
+ sp_out_crop_pos->x = (uint16_t)crop_x;
+ sp_out_crop_pos->y = (uint16_t)crop_y;
+ }
+ else {
+ /* for down scaling, we always use the center of the image */
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+}
+
+static enum ia_css_err
+sh_css_update_uds_and_crop_info_based_on_zoom_region(
+ const struct ia_css_binary_info *info,
+ const struct ia_css_frame_info *in_frame_info,
+ const struct ia_css_frame_info *out_frame_info,
+ const struct ia_css_resolution *dvs_env,
+ const struct ia_css_dz_config *zoom,
+ const struct ia_css_vector *motion_vector,
+ struct sh_css_uds_info *uds, /* out */
+ struct sh_css_crop_pos *sp_out_crop_pos, /* out */
+ struct ia_css_resolution pipe_in_res,
+ bool enable_zoom)
+{
+ unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ /* Note:
+ * Filter_Envelope = 0 for NND/LUT
+ * Filter_Envelope = 1 for BCI
+ * Filter_Envelope = 3 for BLI
+ * Currently, not considering this filter envelope because, In uds.sp.c is recalculating
+ * the dx/dy based on filter envelope and other information (ia_css_uds_sp_scale_params)
+ * Ideally, That should be done on host side not on sp side.
+ */
+ unsigned int filter_envelope = 0;
+ IA_CSS_ENTER_PRIVATE("void");
+
+ assert(info != NULL);
+ assert(in_frame_info != NULL);
+ assert(out_frame_info != NULL);
+ assert(dvs_env != NULL);
+ assert(zoom != NULL);
+ assert(motion_vector != NULL);
+ assert(uds != NULL);
+ assert(sp_out_crop_pos != NULL);
+ x0 = zoom->zoom_region.origin.x;
+ y0 = zoom->zoom_region.origin.y;
+ x1 = zoom->zoom_region.resolution.width + x0;
+ y1 = zoom->zoom_region.resolution.height + y0;
+
+ if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height))
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ if (!enable_zoom) {
+ uds->curr_dx = HRT_GDC_N;
+ uds->curr_dy = HRT_GDC_N;
+ }
+
+ if (info->enable.dvs_envelope) {
+ /* Zoom region is only supported by the UDS module on ISP
+ * 2 and higher. It is not supported in video mode on ISP 1 */
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ } else {
+ if (enable_zoom) {
+ /* A. Calculate dx/dy based on crop region using in_frame_info
+ * Scale the crop region if in_frame_info to the stage is not same as
+ * actual effective input of the pipeline
+ */
+ if (in_frame_info->res.width != pipe_in_res.width ||
+ in_frame_info->res.height != pipe_in_res.height) {
+ x0 = (x0 * in_frame_info->res.width) / (pipe_in_res.width);
+ y0 = (y0 * in_frame_info->res.height) / (pipe_in_res.height);
+ x1 = (x1 * in_frame_info->res.width) / (pipe_in_res.width);
+ y1 = (y1 * in_frame_info->res.height) / (pipe_in_res.height);
+ }
+ uds->curr_dx =
+ ((x1 - x0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.width;
+ uds->curr_dy =
+ ((y1 - y0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.height;
+
+ /* B. Calculate xc/yc based on crop region */
+ uds->xc = (uint16_t) x0 + (((x1)-(x0)) / 2);
+ uds->yc = (uint16_t) y0 + (((y1)-(y0)) / 2);
+ } else {
+ uds->xc = (uint16_t)in_frame_info->res.width / 2;
+ uds->yc = (uint16_t)in_frame_info->res.height / 2;
+ }
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "uds->curr_dx=%d, uds->xc=%d, uds->yc=%d\n",
+ uds->curr_dx, uds->xc, uds->yc);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x0=%d, y0=%d, x1=%d, y1=%d\n",
+ x0, y0, x1, y1);
+ sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
+ sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
+ }
+ IA_CSS_LEAVE_PRIVATE("void");
+ return err;
+}
+
+struct ia_css_3a_statistics *
+ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
+{
+ struct ia_css_3a_statistics *me;
+ int grid_size;
+
+ IA_CSS_ENTER("grid=%p", grid);
+
+ assert(grid != NULL);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ grid_size = grid->width * grid->height;
+ me->data = sh_css_malloc(grid_size * sizeof(*me->data));
+ if (!me->data)
+ goto err;
+#if !defined(HAS_NO_HMEM)
+ /* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
+ me->rgby_data = (struct ia_css_3a_rgby_output *)sh_css_malloc(sizeof_hmem(HMEM0_ID));
+#else
+ me->rgby_data = NULL;
+#endif
+
+ IA_CSS_LEAVE("return=%p", me);
+ return me;
+err:
+ ia_css_3a_statistics_free(me);
+
+ IA_CSS_LEAVE("return=%p", NULL);
+ return NULL;
+}
+
+void
+ia_css_3a_statistics_free(struct ia_css_3a_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->rgby_data);
+ sh_css_free(me->data);
+ memset(me, 0, sizeof(struct ia_css_3a_statistics));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_statistics *
+ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_statistics *me;
+
+ assert(grid != NULL);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+ me->hor_proj = sh_css_malloc(grid->height * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_proj));
+ if (!me->hor_proj)
+ goto err;
+
+ me->ver_proj = sh_css_malloc(grid->width * IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_proj));
+ if (!me->ver_proj)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_statistics_free(me);
+ return NULL;
+
+}
+
+void
+ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->hor_proj);
+ sh_css_free(me->ver_proj);
+ memset(me, 0, sizeof(struct ia_css_dvs_statistics));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_coefficients *
+ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs_coefficients *me;
+
+ assert(grid != NULL);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs = sh_css_malloc(grid->num_hor_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->hor_coefs));
+ if (!me->hor_coefs)
+ goto err;
+
+ me->ver_coefs = sh_css_malloc(grid->num_ver_coefs *
+ IA_CSS_DVS_NUM_COEF_TYPES *
+ sizeof(*me->ver_coefs));
+ if (!me->ver_coefs)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me)
+{
+ if (me) {
+ sh_css_free(me->hor_coefs);
+ sh_css_free(me->ver_coefs);
+ memset(me, 0, sizeof(struct ia_css_dvs_coefficients));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs2_statistics *
+ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_statistics *me;
+
+ assert(grid != NULL);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_prod.odd_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.odd_real));
+ if (!me->hor_prod.odd_real)
+ goto err;
+
+ me->hor_prod.odd_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.odd_imag));
+ if (!me->hor_prod.odd_imag)
+ goto err;
+
+ me->hor_prod.even_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.even_real));
+ if (!me->hor_prod.even_real)
+ goto err;
+
+ me->hor_prod.even_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->hor_prod.even_imag));
+ if (!me->hor_prod.even_imag)
+ goto err;
+
+ me->ver_prod.odd_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.odd_real));
+ if (!me->ver_prod.odd_real)
+ goto err;
+
+ me->ver_prod.odd_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.odd_imag));
+ if (!me->ver_prod.odd_imag)
+ goto err;
+
+ me->ver_prod.even_real = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.even_real));
+ if (!me->ver_prod.even_real)
+ goto err;
+
+ me->ver_prod.even_imag = sh_css_malloc(grid->aligned_width *
+ grid->aligned_height * sizeof(*me->ver_prod.even_imag));
+ if (!me->ver_prod.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_statistics_free(me);
+ return NULL;
+
+}
+
+void
+ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me)
+{
+ if (me) {
+ sh_css_free(me->hor_prod.odd_real);
+ sh_css_free(me->hor_prod.odd_imag);
+ sh_css_free(me->hor_prod.even_real);
+ sh_css_free(me->hor_prod.even_imag);
+ sh_css_free(me->ver_prod.odd_real);
+ sh_css_free(me->ver_prod.odd_imag);
+ sh_css_free(me->ver_prod.even_real);
+ sh_css_free(me->ver_prod.even_imag);
+ memset(me, 0, sizeof(struct ia_css_dvs2_statistics));
+ sh_css_free(me);
+ }
+}
+
+
+struct ia_css_dvs2_coefficients *
+ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
+{
+ struct ia_css_dvs2_coefficients *me;
+
+ assert(grid != NULL);
+
+ me = sh_css_calloc(1, sizeof(*me));
+ if (!me)
+ goto err;
+
+ me->grid = *grid;
+
+ me->hor_coefs.odd_real = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_real));
+ if (!me->hor_coefs.odd_real)
+ goto err;
+
+ me->hor_coefs.odd_imag = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.odd_imag));
+ if (!me->hor_coefs.odd_imag)
+ goto err;
+
+ me->hor_coefs.even_real = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_real));
+ if (!me->hor_coefs.even_real)
+ goto err;
+
+ me->hor_coefs.even_imag = sh_css_malloc(grid->num_hor_coefs *
+ sizeof(*me->hor_coefs.even_imag));
+ if (!me->hor_coefs.even_imag)
+ goto err;
+
+ me->ver_coefs.odd_real = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_real));
+ if (!me->ver_coefs.odd_real)
+ goto err;
+
+ me->ver_coefs.odd_imag = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.odd_imag));
+ if (!me->ver_coefs.odd_imag)
+ goto err;
+
+ me->ver_coefs.even_real = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_real));
+ if (!me->ver_coefs.even_real)
+ goto err;
+
+ me->ver_coefs.even_imag = sh_css_malloc(grid->num_ver_coefs *
+ sizeof(*me->ver_coefs.even_imag));
+ if (!me->ver_coefs.even_imag)
+ goto err;
+
+ return me;
+err:
+ ia_css_dvs2_coefficients_free(me);
+ return NULL;
+}
+
+void
+ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me)
+{
+ if (me) {
+ sh_css_free(me->hor_coefs.odd_real);
+ sh_css_free(me->hor_coefs.odd_imag);
+ sh_css_free(me->hor_coefs.even_real);
+ sh_css_free(me->hor_coefs.even_imag);
+ sh_css_free(me->ver_coefs.odd_real);
+ sh_css_free(me->ver_coefs.odd_imag);
+ sh_css_free(me->ver_coefs.even_real);
+ sh_css_free(me->ver_coefs.even_imag);
+ memset(me, 0, sizeof(struct ia_css_dvs2_coefficients));
+ sh_css_free(me);
+ }
+}
+
+struct ia_css_dvs_6axis_config *
+ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream)
+{
+ struct ia_css_dvs_6axis_config *dvs_config = NULL;
+ struct ia_css_isp_parameters *params = NULL;
+ unsigned int width_y;
+ unsigned int height_y;
+ unsigned int width_uv;
+ unsigned int height_uv;
+
+ assert(stream != NULL);
+ params = stream->isp_params_configs;
+
+ /* Backward compatibility by default consider pipe as Video*/
+ if (!params || (params && !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])) {
+ goto err;
+ }
+
+ dvs_config = (struct ia_css_dvs_6axis_config *)sh_css_calloc(1, sizeof(struct ia_css_dvs_6axis_config));
+ if (!dvs_config)
+ goto err;
+
+ dvs_config->width_y = width_y = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_y;
+ dvs_config->height_y = height_y = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_y;
+ dvs_config->width_uv = width_uv = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_uv;
+ dvs_config->height_uv = height_uv = params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_uv;
+ IA_CSS_LOG("table Y: W %d H %d", width_y, height_y);
+ IA_CSS_LOG("table UV: W %d H %d", width_uv, height_uv);
+ dvs_config->xcoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t));
+ if (!dvs_config->xcoords_y)
+ goto err;
+
+ dvs_config->ycoords_y = (uint32_t *)sh_css_malloc(width_y * height_y * sizeof(uint32_t));
+ if (!dvs_config->ycoords_y)
+ goto err;
+
+ dvs_config->xcoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t));
+ if (!dvs_config->xcoords_uv)
+ goto err;
+
+ dvs_config->ycoords_uv = (uint32_t *)sh_css_malloc(width_uv * height_uv * sizeof(uint32_t));
+ if (!dvs_config->ycoords_uv)
+ goto err;
+
+ return dvs_config;
+err:
+ ia_css_dvs2_6axis_config_free(dvs_config);
+ return NULL;
+}
+
+void
+ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config)
+{
+ if (dvs_6axis_config) {
+ sh_css_free(dvs_6axis_config->xcoords_y);
+ sh_css_free(dvs_6axis_config->ycoords_y);
+ sh_css_free(dvs_6axis_config->xcoords_uv);
+ sh_css_free(dvs_6axis_config->ycoords_uv);
+ memset(dvs_6axis_config, 0, sizeof(struct ia_css_dvs_6axis_config));
+ sh_css_free(dvs_6axis_config);
+ }
+}
+
+void
+ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable)
+{
+ struct ia_css_pipe *pipe;
+ struct ia_css_pipeline *pipeline;
+ struct ia_css_pipeline_stage *stage;
+ enum ia_css_pipe_id pipe_id;
+ enum ia_css_err err;
+ int i;
+
+ if (stream == NULL)
+ return;
+
+ for (i = 0; i < stream->num_pipes; i++) {
+ pipe = stream->pipes[i];
+ pipeline = ia_css_pipe_get_pipeline(pipe);
+ pipe_id = pipeline->pipe_id;
+
+ if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
+ err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP, &stage);
+ if (err == IA_CSS_SUCCESS)
+ stage->enable_zoom = enable;
+ break;
+ }
+ }
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
new file mode 100644
index 000000000000..a7ffe6d8331b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.h
@@ -0,0 +1,188 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_H_
+#define _SH_CSS_PARAMS_H_
+
+/*! \file */
+
+/* Forward declaration to break mutual dependency */
+struct ia_css_isp_parameters;
+
+#include <type_support.h>
+#include "ia_css_types.h"
+#include "ia_css_binary.h"
+#include "sh_css_legacy.h"
+
+#include "sh_css_defs.h" /* SH_CSS_MAX_STAGES */
+#include "ia_css_pipeline.h"
+#include "ia_css_isp_params.h"
+#include "uds/uds_1.0/ia_css_uds_param.h"
+#include "crop/crop_1.0/ia_css_crop_types.h"
+
+
+#define PIX_SHIFT_FILTER_RUN_IN_X 12
+#define PIX_SHIFT_FILTER_RUN_IN_Y 12
+
+#include "ob/ob_1.0/ia_css_ob_param.h"
+/* Isp configurations per stream */
+struct sh_css_isp_param_configs {
+ /* OB (Optical Black) */
+ struct sh_css_isp_ob_stream_config ob;
+};
+
+
+/* Isp parameters per stream */
+struct ia_css_isp_parameters {
+ /* UDS */
+ struct sh_css_sp_uds_params uds[SH_CSS_MAX_STAGES];
+ struct sh_css_isp_param_configs stream_configs;
+ struct ia_css_fpn_table fpn_config;
+ struct ia_css_vector motion_config;
+ const struct ia_css_morph_table *morph_table;
+ const struct ia_css_shading_table *sc_table;
+ struct ia_css_shading_table *sc_config;
+ struct ia_css_macc_table macc_table;
+ struct ia_css_gamma_table gc_table;
+ struct ia_css_ctc_table ctc_table;
+ struct ia_css_xnr_table xnr_table;
+
+ struct ia_css_dz_config dz_config;
+ struct ia_css_3a_config s3a_config;
+ struct ia_css_wb_config wb_config;
+ struct ia_css_cc_config cc_config;
+ struct ia_css_cc_config yuv2rgb_cc_config;
+ struct ia_css_cc_config rgb2yuv_cc_config;
+ struct ia_css_tnr_config tnr_config;
+ struct ia_css_ob_config ob_config;
+ /*----- DPC configuration -----*/
+ /* The default DPC configuration is retained and currently set
+ * using the stream configuration. The code generated from genparams
+ * uses this configuration to set the DPC parameters per stage but this
+ * will be overwritten by the per pipe configuration */
+ struct ia_css_dp_config dp_config;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698)*/
+ struct ia_css_dp_config pipe_dp_config[IA_CSS_PIPE_ID_NUM];
+ struct ia_css_nr_config nr_config;
+ struct ia_css_ee_config ee_config;
+ struct ia_css_de_config de_config;
+ struct ia_css_gc_config gc_config;
+ struct ia_css_anr_config anr_config;
+ struct ia_css_ce_config ce_config;
+ struct ia_css_formats_config formats_config;
+/* ---- deprecated: replaced with pipe_dvs_6axis_config---- */
+ struct ia_css_dvs_6axis_config *dvs_6axis_config;
+ struct ia_css_ecd_config ecd_config;
+ struct ia_css_ynr_config ynr_config;
+ struct ia_css_yee_config yee_config;
+ struct ia_css_fc_config fc_config;
+ struct ia_css_cnr_config cnr_config;
+ struct ia_css_macc_config macc_config;
+ struct ia_css_ctc_config ctc_config;
+ struct ia_css_aa_config aa_config;
+ struct ia_css_aa_config bds_config;
+ struct ia_css_aa_config raa_config;
+ struct ia_css_rgb_gamma_table r_gamma_table;
+ struct ia_css_rgb_gamma_table g_gamma_table;
+ struct ia_css_rgb_gamma_table b_gamma_table;
+ struct ia_css_anr_thres anr_thres;
+ struct ia_css_xnr_config xnr_config;
+ struct ia_css_xnr3_config xnr3_config;
+ struct ia_css_uds_config uds_config;
+ struct ia_css_crop_config crop_config;
+ struct ia_css_output_config output_config;
+ struct ia_css_dvs_6axis_config *pipe_dvs_6axis_config[IA_CSS_PIPE_ID_NUM];
+/* ------ deprecated(bz675) : from ------ */
+ struct ia_css_shading_settings shading_settings;
+/* ------ deprecated(bz675) : to ------ */
+ struct ia_css_dvs_coefficients dvs_coefs;
+ struct ia_css_dvs2_coefficients dvs2_coefs;
+
+ bool isp_params_changed;
+ bool isp_mem_params_changed
+ [IA_CSS_PIPE_ID_NUM][SH_CSS_MAX_STAGES][IA_CSS_NUM_MEMORIES];
+ bool dz_config_changed;
+ bool motion_config_changed;
+ bool dis_coef_table_changed;
+ bool dvs2_coef_table_changed;
+ bool morph_table_changed;
+ bool sc_table_changed;
+ bool sc_table_dirty;
+ unsigned int sc_table_last_pipe_num;
+ bool anr_thres_changed;
+/* ---- deprecated: replaced with pipe_dvs_6axis_config_changed ---- */
+ bool dvs_6axis_config_changed;
+ /* ------ pipe specific DPC configuration ------ */
+ /* Please note that this implementation is a temporary solution and
+ * should be replaced by CSS per pipe configuration when the support
+ * is ready (HSD 1303967698) */
+ bool pipe_dpc_config_changed[IA_CSS_PIPE_ID_NUM];
+/* ------ deprecated(bz675) : from ------ */
+ bool shading_settings_changed;
+/* ------ deprecated(bz675) : to ------ */
+ bool pipe_dvs_6axis_config_changed[IA_CSS_PIPE_ID_NUM];
+
+ bool config_changed[IA_CSS_NUM_PARAMETER_IDS];
+
+ unsigned int sensor_binning;
+ /* local buffers, used to re-order the 3a statistics in vmem-format */
+ struct sh_css_ddr_address_map pipe_ddr_ptrs[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map_size pipe_ddr_ptrs_size[IA_CSS_PIPE_ID_NUM];
+ struct sh_css_ddr_address_map ddr_ptrs;
+ struct sh_css_ddr_address_map_size ddr_ptrs_size;
+ struct ia_css_frame *output_frame; /**< Output frame the config is to be applied to (optional) */
+ uint32_t isp_parameters_id; /**< Unique ID to track which config was actually applied to a particular frame */
+};
+
+void
+ia_css_params_store_ia_css_host_data(
+ hrt_vaddress ddr_addr,
+ struct ia_css_host_data *data);
+
+enum ia_css_err
+ia_css_params_store_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ hrt_vaddress ddr_addr,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_host_data *
+ia_css_params_alloc_convert_sctbl(
+ const struct ia_css_pipeline_stage *stage,
+ const struct ia_css_shading_table *shading_table);
+
+struct ia_css_isp_config *
+sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe);
+
+/* ipu address allocation/free for gdc lut */
+hrt_vaddress
+sh_css_params_alloc_gdc_lut(void);
+void
+sh_css_params_free_gdc_lut(hrt_vaddress addr);
+
+enum ia_css_err
+sh_css_params_map_and_store_default_gdc_lut(void);
+
+void
+sh_css_params_free_default_gdc_lut(void);
+
+hrt_vaddress
+sh_css_params_get_default_gdc_lut(void);
+
+hrt_vaddress
+sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe);
+
+#endif /* _SH_CSS_PARAMS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h
new file mode 100644
index 000000000000..baca24532f9f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params_internal.h
@@ -0,0 +1,21 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_PARAMS_INTERNAL_H_
+#define _SH_CSS_PARAMS_INTERNAL_H_
+
+void
+sh_css_param_clear_param_sets(void);
+
+#endif /* _SH_CSS_PARAMS_INTERNAL_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c
new file mode 100644
index 000000000000..1f57ffad8921
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_pipe.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_pipe.h and ia_css_pipe_public.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c
new file mode 100644
index 000000000000..ad46996cfbd3
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_properties.c
@@ -0,0 +1,43 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_properties.h"
+#include <assert_support.h>
+#include "ia_css_types.h"
+#include "gdc_device.h"
+
+void
+ia_css_get_properties(struct ia_css_properties *properties)
+{
+ assert(properties != NULL);
+#if defined(HAS_GDC_VERSION_2) || defined(HAS_GDC_VERSION_3)
+/*
+ * MW: We don't want to store the coordinates
+ * full range in memory: Truncate
+ */
+ properties->gdc_coord_one = gdc_get_unity(GDC0_ID)/HRT_GDC_COORD_SCALE;
+#else
+#error "Unknown GDC version"
+#endif
+
+ properties->l1_base_is_index = true;
+
+#if defined(HAS_VAMEM_VERSION_1)
+ properties->vamem_type = IA_CSS_VAMEM_TYPE_1;
+#elif defined(HAS_VAMEM_VERSION_2)
+ properties->vamem_type = IA_CSS_VAMEM_TYPE_2;
+#else
+#error "Unknown VAMEM version"
+#endif
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c
new file mode 100644
index 000000000000..2a2d0f4db44b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_shading.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_shading.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
new file mode 100644
index 000000000000..e6a345979ff1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.c
@@ -0,0 +1,1814 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_sp.h"
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+
+#include "dma.h" /* N_DMA_CHANNEL_ID */
+
+#include "ia_css_buffer.h"
+#include "ia_css_binary.h"
+#include "sh_css_hrt.h"
+#include "sh_css_defs.h"
+#include "sh_css_internal.h"
+#include "ia_css_control.h"
+#include "ia_css_debug.h"
+#include "ia_css_debug_pipe.h"
+#include "ia_css_event_public.h"
+#include "ia_css_mmu.h"
+#include "ia_css_stream.h"
+#include "ia_css_isp_param.h"
+#include "sh_css_params.h"
+#include "sh_css_legacy.h"
+#include "ia_css_frame_comm.h"
+#if !defined(HAS_NO_INPUT_SYSTEM)
+#include "ia_css_isys.h"
+#endif
+
+#include "gdc_device.h" /* HRT_GDC_N */
+
+/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
+
+#include "memory_access.h"
+
+#include "assert_support.h"
+#include "platform_support.h" /* hrt_sleep() */
+
+#include "sw_event_global.h" /* Event IDs.*/
+#include "ia_css_event.h"
+#include "mmu_device.h"
+#include "ia_css_spctrl.h"
+
+#ifndef offsetof
+#define offsetof(T, x) ((unsigned)&(((T *)0)->x))
+#endif
+
+#define IA_CSS_INCLUDE_CONFIGURATIONS
+#include "ia_css_isp_configs.h"
+#define IA_CSS_INCLUDE_STATES
+#include "ia_css_isp_states.h"
+
+#ifndef ISP2401
+#include "isp/kernels/io_ls/bayer_io_ls/ia_css_bayer_io.host.h"
+#else
+#include "isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h"
+#endif
+
+struct sh_css_sp_group sh_css_sp_group;
+struct sh_css_sp_stage sh_css_sp_stage;
+struct sh_css_isp_stage sh_css_isp_stage;
+struct sh_css_sp_output sh_css_sp_output;
+static struct sh_css_sp_per_frame_data per_frame_data;
+
+/* true if SP supports frame loop and host2sp_commands */
+/* For the moment there is only code that sets this bool to true */
+/* TODO: add code that sets this bool to false */
+static bool sp_running;
+
+static enum ia_css_err
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned idx);
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const hrt_vaddress xmem_addr,
+ const enum ia_css_buffer_type buf_type);
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr);
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames);
+
+/* This data is stored every frame */
+void
+store_sp_group_data(void)
+{
+ per_frame_data.sp_group_addr = sh_css_store_sp_group_to_ddr();
+}
+
+static void
+copy_isp_stage_to_sp_stage(void)
+{
+ /* [WW07.5]type casting will cause potential issues */
+ sh_css_sp_stage.num_stripes = (uint8_t) sh_css_isp_stage.binary_info.iterator.num_stripes;
+ sh_css_sp_stage.row_stripes_height = (uint16_t) sh_css_isp_stage.binary_info.iterator.row_stripes_height;
+ sh_css_sp_stage.row_stripes_overlap_lines = (uint16_t) sh_css_isp_stage.binary_info.iterator.row_stripes_overlap_lines;
+ sh_css_sp_stage.top_cropping = (uint16_t) sh_css_isp_stage.binary_info.pipeline.top_cropping;
+ /* moved to sh_css_sp_init_stage
+ sh_css_sp_stage.enable.vf_output =
+ sh_css_isp_stage.binary_info.enable.vf_veceven ||
+ sh_css_isp_stage.binary_info.num_output_pins > 1;
+ */
+ sh_css_sp_stage.enable.sdis = sh_css_isp_stage.binary_info.enable.dis;
+ sh_css_sp_stage.enable.s3a = sh_css_isp_stage.binary_info.enable.s3a;
+#ifdef ISP2401
+ sh_css_sp_stage.enable.lace_stats = sh_css_isp_stage.binary_info.enable.lace_stats;
+#endif
+}
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num, unsigned stage)
+{
+ unsigned int thread_id;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ copy_isp_stage_to_sp_stage();
+ if (id != IA_CSS_PIPE_ID_COPY)
+ sh_css_sp_stage.isp_stage_addr =
+ sh_css_store_isp_stage_to_ddr(pipe_num, stage);
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] =
+ sh_css_store_sp_stage_to_ddr(pipe_num, stage);
+
+ /* Clear for next frame */
+ sh_css_sp_stage.program_input_circuit = false;
+}
+
+static void
+store_sp_per_frame_data(const struct ia_css_fw_info *fw)
+{
+ unsigned int HIVE_ADDR_sp_per_frame_data = 0;
+
+ assert(fw != NULL);
+
+ switch (fw->type) {
+ case ia_css_sp_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.sp.per_frame_data;
+ break;
+ case ia_css_acc_firmware:
+ HIVE_ADDR_sp_per_frame_data = fw->info.acc.per_frame_data;
+ break;
+ case ia_css_isp_firmware:
+ return;
+ }
+
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(sp_per_frame_data),
+ &per_frame_data,
+ sizeof(per_frame_data));
+}
+
+static void
+sh_css_store_sp_per_frame_data(enum ia_css_pipe_id pipe_id,
+ unsigned int pipe_num,
+ const struct ia_css_fw_info *sp_fw)
+{
+ if (!sp_fw)
+ sp_fw = &sh_css_sp_fw;
+
+ store_sp_stage_data(pipe_id, pipe_num, 0);
+ store_sp_group_data();
+ store_sp_per_frame_data(sp_fw);
+}
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned i;
+ unsigned offset = (unsigned int)offsetof(struct sh_css_sp_output, debug)/sizeof(int);
+
+ assert(state != NULL);
+
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ for (i = 0; i < sizeof(*state)/sizeof(int); i++)
+ ((unsigned *)state)[i] = load_sp_array_uint(sp_output, i+offset);
+}
+
+#endif
+
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num, struct ia_css_frame *out_frame,
+ unsigned two_ppc)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ struct sh_css_sp_pipeline *pipe;
+ uint8_t stage_num = 0;
+
+ assert(out_frame != NULL);
+ pipe_id = IA_CSS_PIPE_ID_CAPTURE;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.bin.bytes_available = out_frame->data_bytes;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_num = pipe_num;
+ pipe->thread_id = thread_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func =
+ (unsigned int)IA_CSS_PIPELINE_BIN_COPY;
+
+ set_output_frame_buffer(out_frame, 0);
+
+ /* sp_bin_copy_init on the SP does not deal with dynamica/static yet */
+ /* For now always update the dynamic data from out frames. */
+ sh_css_store_sp_per_frame_data(pipe_id, pipe_num, &sh_css_sp_fw);
+}
+
+static void
+sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
+ unsigned pipe_num,
+ unsigned two_ppc,
+ unsigned max_input_width,
+ enum sh_css_pipe_config_override pipe_conf_override,
+ unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ uint8_t stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+
+ assert(out_frame != NULL);
+
+ {
+ /**
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ uint8_t program_input_circuit;
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->info.res.height;
+ pipe->copy.raw.width = out_frame->info.res.width;
+ pipe->copy.raw.padded_width = out_frame->info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (pipe_conf_override == SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD)
+ pipe->pipe_config =
+ (SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id);
+ else
+ pipe->pipe_config = pipe_conf_override;
+
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ if (pipe->inout_port_config == 0) {
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_INPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
+ (uint8_t)SH_CSS_PORT_OUTPUT,
+ (uint8_t)SH_CSS_HOST_TYPE, 1);
+ }
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe->pipe_id, pipe->inout_port_config);
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_RAW_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t) if_config_index;
+ set_output_frame_buffer(out_frame, 0);
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+static void
+sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
+ unsigned pipe_num, unsigned max_input_width, unsigned int if_config_index)
+{
+ enum ia_css_pipe_id pipe_id;
+ unsigned int thread_id;
+ uint8_t stage_num = 0;
+ struct sh_css_sp_pipeline *pipe;
+#if defined SH_CSS_ENABLE_METADATA
+ enum sh_css_queue_id queue_id;
+#endif
+
+ assert(out_frame != NULL);
+
+ {
+ /**
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ uint8_t program_input_circuit;
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = program_input_circuit;
+ }
+
+ pipe_id = IA_CSS_PIPE_ID_COPY;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ pipe = &sh_css_sp_group.pipe[thread_id];
+
+ pipe->copy.raw.height = out_frame->info.res.height;
+ pipe->copy.raw.width = out_frame->info.res.width;
+ pipe->copy.raw.padded_width = out_frame->info.padded_width;
+ pipe->copy.raw.raw_bit_depth = out_frame->info.raw_bit_depth;
+ pipe->copy.raw.max_input_width = max_input_width;
+ pipe->num_stages = 1;
+ pipe->pipe_id = pipe_id;
+ pipe->pipe_config = 0x0; /* No parameters */
+ pipe->pipe_qos_config = QOS_INVALID;
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ sh_css_sp_stage.num = stage_num;
+ sh_css_sp_stage.xmem_bin_addr = 0x0;
+ sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
+ sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_ISYS_COPY;
+ sh_css_sp_stage.if_config_index = (uint8_t) if_config_index;
+
+ set_output_frame_buffer(out_frame, 0);
+
+#if defined SH_CSS_ENABLE_METADATA
+ if (pipe->metadata.height > 0) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
+ }
+#endif
+
+ ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
+}
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
+ bin_copy_bytes_copied) / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset);
+}
+
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
+ unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output, sw_interrupt_value)
+ / sizeof(int);
+ (void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
+ return load_sp_array_uint(sp_output, offset+irq);
+}
+
+static void
+sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
+ const enum sh_css_queue_id queue_id,
+ const hrt_vaddress xmem_addr,
+ const enum ia_css_buffer_type buf_type)
+{
+ assert(buf_type < IA_CSS_NUM_BUFFER_TYPE);
+ if (queue_id > SH_CSS_INVALID_QUEUE_ID) {
+ /*
+ * value >=0 indicates that function init_frame_pointers()
+ * should use the dynamic data address
+ */
+ assert(queue_id < SH_CSS_MAX_NUM_QUEUES);
+
+ /* Klocwork assumes assert can be disabled;
+ Since we can get there with any type, and it does not
+ know that frame_in->dynamic_data_index can only be set
+ for one of the types in the assert) it has to assume we
+ can get here for any type. however this could lead to an
+ out of bounds reference when indexing buf_type about 10
+ lines below. In order to satisfy KW an additional if
+ has been added. This one will always yield true.
+ */
+ if ((queue_id < SH_CSS_MAX_NUM_QUEUES))
+ {
+ dest_buf->buf_src.queue_id = queue_id;
+ }
+ } else {
+ assert(xmem_addr != mmgr_EXCEPTION);
+ dest_buf->buf_src.xmem_addr = xmem_addr;
+ }
+ dest_buf->buf_type = buf_type;
+}
+
+static void
+sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out,
+ const struct ia_css_frame *frame_in)
+{
+ assert(frame_in != NULL);
+
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
+ "sh_css_copy_frame_to_spframe():\n");
+
+
+ sh_css_copy_buffer_attr_to_spbuffer(&sp_frame_out->buf_attr,
+ frame_in->dynamic_queue_id,
+ frame_in->data,
+ frame_in->buf_type);
+
+ ia_css_frame_info_to_frame_sp_info(&sp_frame_out->info, &frame_in->info);
+
+ switch (frame_in->info.format) {
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ sp_frame_out->planes.planar_rgb.r.offset =
+ frame_in->planes.planar_rgb.r.offset;
+ sp_frame_out->planes.planar_rgb.g.offset =
+ frame_in->planes.planar_rgb.g.offset;
+ sp_frame_out->planes.planar_rgb.b.offset =
+ frame_in->planes.planar_rgb.b.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ sp_frame_out->planes.nv.y.offset =
+ frame_in->planes.nv.y.offset;
+ sp_frame_out->planes.nv.uv.offset =
+ frame_in->planes.nv.uv.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ sp_frame_out->planes.yuv.y.offset =
+ frame_in->planes.yuv.y.offset;
+ sp_frame_out->planes.yuv.u.offset =
+ frame_in->planes.yuv.u.offset;
+ sp_frame_out->planes.yuv.v.offset =
+ frame_in->planes.yuv.v.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ sp_frame_out->planes.plane6.r.offset =
+ frame_in->planes.plane6.r.offset;
+ sp_frame_out->planes.plane6.r_at_b.offset =
+ frame_in->planes.plane6.r_at_b.offset;
+ sp_frame_out->planes.plane6.gr.offset =
+ frame_in->planes.plane6.gr.offset;
+ sp_frame_out->planes.plane6.gb.offset =
+ frame_in->planes.plane6.gb.offset;
+ sp_frame_out->planes.plane6.b.offset =
+ frame_in->planes.plane6.b.offset;
+ sp_frame_out->planes.plane6.b_at_r.offset =
+ frame_in->planes.plane6.b_at_r.offset;
+ break;
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ sp_frame_out->planes.binary.data.offset =
+ frame_in->planes.binary.data.offset;
+ break;
+ default:
+ /* This should not happen, but in case it does,
+ * nullify the planes
+ */
+ memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes));
+ break;
+ }
+
+}
+
+static enum ia_css_err
+set_input_frame_buffer(const struct ia_css_frame *frame)
+{
+ if (frame == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.in, frame);
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+set_output_frame_buffer(const struct ia_css_frame *frame,
+ unsigned idx)
+{
+ if (frame == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format) {
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YUV422:
+ case IA_CSS_FRAME_FORMAT_YUV444:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_YV16:
+ case IA_CSS_FRAME_FORMAT_YUV420_16:
+ case IA_CSS_FRAME_FORMAT_YUV422_16:
+ case IA_CSS_FRAME_FORMAT_NV11:
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+ case IA_CSS_FRAME_FORMAT_NV16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_NV61:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ case IA_CSS_FRAME_FORMAT_RGB565:
+ case IA_CSS_FRAME_FORMAT_RGBA888:
+ case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
+ case IA_CSS_FRAME_FORMAT_RAW:
+ case IA_CSS_FRAME_FORMAT_RAW_PACKED:
+ case IA_CSS_FRAME_FORMAT_QPLANE6:
+ case IA_CSS_FRAME_FORMAT_BINARY_8:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out[idx], frame);
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+set_view_finder_buffer(const struct ia_css_frame *frame)
+{
+ if (frame == NULL)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+
+ switch (frame->info.format) {
+ /* the dual output pin */
+ case IA_CSS_FRAME_FORMAT_NV12:
+ case IA_CSS_FRAME_FORMAT_NV12_16:
+ case IA_CSS_FRAME_FORMAT_NV21:
+ case IA_CSS_FRAME_FORMAT_YUYV:
+ case IA_CSS_FRAME_FORMAT_UYVY:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
+ case IA_CSS_FRAME_FORMAT_YUV420:
+ case IA_CSS_FRAME_FORMAT_YV12:
+ case IA_CSS_FRAME_FORMAT_NV12_TILEY:
+
+ /* for vf_veceven */
+ case IA_CSS_FRAME_FORMAT_YUV_LINE:
+ break;
+ default:
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ }
+
+ sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out_vf, frame);
+ return IA_CSS_SUCCESS;
+}
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+void sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index
+ )
+{
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ assert(config_a != NULL);
+
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_a = *config_a;
+ sh_css_sp_group.config.input_formatter.a_changed = true;
+
+ if (config_b != NULL) {
+ sh_css_sp_group.config.input_formatter.set[if_config_index].config_b = *config_b;
+ sh_css_sp_group.config.input_formatter.b_changed = true;
+ }
+
+ return;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode)
+{
+ sh_css_sp_group.config.input_circuit.no_side_band = false;
+ sh_css_sp_group.config.input_circuit.fmt_type = fmt_type;
+ sh_css_sp_group.config.input_circuit.ch_id = ch_id;
+ sh_css_sp_group.config.input_circuit.input_mode = input_mode;
+/*
+ * The SP group is only loaded at SP boot time and is read once
+ * change flags as "input_circuit_cfg_changed" must be reset on the SP
+ */
+ sh_css_sp_group.config.input_circuit_cfg_changed = true;
+ sh_css_sp_stage.program_input_circuit = true;
+}
+#endif
+
+#if !defined(HAS_NO_INPUT_SYSTEM) && defined(USE_INPUT_SYSTEM_VERSION_2)
+void
+sh_css_sp_configure_sync_gen(int width, int height,
+ int hblank_cycles,
+ int vblank_cycles)
+{
+ sh_css_sp_group.config.sync_gen.width = width;
+ sh_css_sp_group.config.sync_gen.height = height;
+ sh_css_sp_group.config.sync_gen.hblank_cycles = hblank_cycles;
+ sh_css_sp_group.config.sync_gen.vblank_cycles = vblank_cycles;
+}
+
+void
+sh_css_sp_configure_tpg(int x_mask,
+ int y_mask,
+ int x_delta,
+ int y_delta,
+ int xy_mask)
+{
+ sh_css_sp_group.config.tpg.x_mask = x_mask;
+ sh_css_sp_group.config.tpg.y_mask = y_mask;
+ sh_css_sp_group.config.tpg.x_delta = x_delta;
+ sh_css_sp_group.config.tpg.y_delta = y_delta;
+ sh_css_sp_group.config.tpg.xy_mask = xy_mask;
+}
+
+void
+sh_css_sp_configure_prbs(int seed)
+{
+ sh_css_sp_group.config.prbs.seed = seed;
+}
+#endif
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
+{
+ sh_css_sp_group.config.enable_raw_pool_locking = true;
+ sh_css_sp_group.config.lock_all = lock_all;
+}
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable)
+{
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ sh_css_sp_group.config.enable_isys_event_queue = enable;
+#else
+ (void)enable;
+#endif
+}
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag)
+{
+ sh_css_sp_group.config.disable_cont_vf = flag;
+}
+
+static enum ia_css_err
+sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+
+ assert(args != NULL);
+
+ if (args->in_frame)
+ err = set_input_frame_buffer(args->in_frame);
+ if (err == IA_CSS_SUCCESS && args->out_vf_frame)
+ err = set_view_finder_buffer(args->out_vf_frame);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ if (err == IA_CSS_SUCCESS && args->out_frame[i])
+ err = set_output_frame_buffer(args->out_frame[i], i);
+ }
+
+ /* we don't pass this error back to the upper layer, so we add a assert here
+ because we actually hit the error here but it still works by accident... */
+ if (err != IA_CSS_SUCCESS) assert(false);
+ return err;
+}
+
+static void
+sh_css_sp_init_group(bool two_ppc,
+ enum ia_css_stream_format input_format,
+ bool no_isp_sync,
+ uint8_t if_config_index)
+{
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
+#else
+ (void)two_ppc;
+#endif
+
+ sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
+ /* decide whether the frame is processed online or offline */
+ if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
+#if !defined(HAS_NO_INPUT_FORMATTER)
+ assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+ sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format = input_format;
+#else
+ (void)input_format;
+#endif
+}
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info)
+{
+ assert(info != NULL);
+ sh_css_isp_stage.binary_info = *info;
+}
+
+static enum ia_css_err
+copy_isp_mem_if_to_ddr(struct ia_css_binary *binary)
+{
+ enum ia_css_err err;
+
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_CONFIG);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
+ &binary->css_params,
+ &binary->mem_params,
+ IA_CSS_PARAM_CLASS_STATE);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+ return IA_CSS_SUCCESS;
+}
+
+static bool
+is_sp_stage(struct ia_css_pipeline_stage *stage)
+{
+ assert(stage != NULL);
+ return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC;
+}
+
+static enum ia_css_err
+configure_isp_from_args(
+ const struct sh_css_sp_pipeline *pipeline,
+ const struct ia_css_binary *binary,
+ const struct sh_css_binary_args *args,
+ bool two_ppc,
+ bool deinterleaved)
+{
+ enum ia_css_err err = IA_CSS_SUCCESS;
+#ifdef ISP2401
+ struct ia_css_pipe *pipe = find_pipe_by_num(pipeline->pipe_num);
+ const struct ia_css_resolution *res;
+
+#endif
+ ia_css_fpn_configure(binary, &binary->in_frame_info);
+ ia_css_crop_configure(binary, &args->delay_frames[0]->info);
+ ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
+ ia_css_output0_configure(binary, &args->out_frame[0]->info);
+ ia_css_output1_configure(binary, &args->out_vf_frame->info);
+ ia_css_copy_output_configure(binary, args->copy_output);
+ ia_css_output0_configure(binary, &args->out_frame[0]->info);
+#ifdef ISP2401
+ ia_css_sc_configure(binary, pipeline->shading.internal_frame_origin_x_bqs_on_sctbl,
+ pipeline->shading.internal_frame_origin_y_bqs_on_sctbl);
+#endif
+ ia_css_iterator_configure(binary, &args->in_frame->info);
+ ia_css_dvs_configure(binary, &args->out_frame[0]->info);
+ ia_css_output_configure(binary, &args->out_frame[0]->info);
+ ia_css_raw_configure(pipeline, binary, &args->in_frame->info, &binary->in_frame_info, two_ppc, deinterleaved);
+ ia_css_ref_configure(binary, (const struct ia_css_frame **)args->delay_frames, pipeline->dvs_frame_delay);
+ ia_css_tnr_configure(binary, (const struct ia_css_frame **)args->tnr_frames);
+ ia_css_bayer_io_config(binary, args);
+ return err;
+}
+
+static void
+initialize_isp_states(const struct ia_css_binary *binary)
+{
+ unsigned int i;
+
+ if (!binary->info->mem_offsets.offsets.state)
+ return;
+ for (i = 0; i < IA_CSS_NUM_STATE_IDS; i++) {
+ ia_css_kernel_init_state[i](binary);
+ }
+}
+
+static void
+initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr)
+{
+ buf_attr->buf_src.queue_id = SH_CSS_INVALID_QUEUE_ID;
+ buf_attr->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
+}
+
+static void
+initialize_stage_frames(struct ia_css_frames_sp *frames)
+{
+ unsigned int i;
+
+ initialize_frame_buffer_attribute(&frames->in.buf_attr);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ initialize_frame_buffer_attribute(&frames->out[i].buf_attr);
+ }
+ initialize_frame_buffer_attribute(&frames->out_vf.buf_attr);
+ initialize_frame_buffer_attribute(&frames->s3a_buf);
+ initialize_frame_buffer_attribute(&frames->dvs_buf);
+#if defined SH_CSS_ENABLE_METADATA
+ initialize_frame_buffer_attribute(&frames->metadata_buf);
+#endif
+}
+
+static enum ia_css_err
+sh_css_sp_init_stage(struct ia_css_binary *binary,
+ const char *binary_name,
+ const struct ia_css_blob_info *blob_info,
+ const struct sh_css_binary_args *args,
+ unsigned int pipe_num,
+ unsigned stage,
+ bool xnr,
+ const struct ia_css_isp_param_css_segments *isp_mem_if,
+ unsigned int if_config_index,
+ bool two_ppc)
+{
+ const struct ia_css_binary_xinfo *xinfo;
+ const struct ia_css_binary_info *info;
+ enum ia_css_err err = IA_CSS_SUCCESS;
+ int i;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned int thread_id;
+ enum sh_css_queue_id queue_id;
+ bool continuous = sh_css_continuous_is_enabled((uint8_t)pipe_num);
+
+ assert(binary != NULL);
+ assert(blob_info != NULL);
+ assert(args != NULL);
+ assert(isp_mem_if != NULL);
+
+ xinfo = binary->info;
+ info = &xinfo->sp;
+ {
+ /**
+ * Clear sh_css_sp_stage for easy debugging.
+ * program_input_circuit must be saved as it is set outside
+ * this function.
+ */
+ uint8_t program_input_circuit;
+ program_input_circuit = sh_css_sp_stage.program_input_circuit;
+ memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
+ sh_css_sp_stage.program_input_circuit = (uint8_t)program_input_circuit;
+ }
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+
+ if (info == NULL) {
+ sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL;
+ return IA_CSS_SUCCESS;
+ }
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401)
+ (void)continuous;
+ sh_css_sp_stage.deinterleaved = 0;
+#else
+ sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
+#endif
+
+ initialize_stage_frames(&sh_css_sp_stage.frames);
+ /*
+ * TODO: Make the Host dynamically determine
+ * the stage type.
+ */
+ sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE;
+ sh_css_sp_stage.num = (uint8_t)stage;
+ sh_css_sp_stage.isp_online = (uint8_t)binary->online;
+ sh_css_sp_stage.isp_copy_vf = (uint8_t)args->copy_vf;
+ sh_css_sp_stage.isp_copy_output = (uint8_t)args->copy_output;
+ sh_css_sp_stage.enable.vf_output = (args->out_vf_frame != NULL);
+
+ /* Copy the frame infos first, to be overwritten by the frames,
+ if these are present.
+ */
+ sh_css_sp_stage.frames.effective_in_res.width = binary->effective_in_frame_res.width;
+ sh_css_sp_stage.frames.effective_in_res.height = binary->effective_in_frame_res.height;
+
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info,
+ &binary->in_frame_info);
+ for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info,
+ &binary->out_frame_info[i]);
+ }
+ ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.internal_frame_info,
+ &binary->internal_frame_info);
+ sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width;
+ sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height;
+ sh_css_sp_stage.isp_pipe_version = (uint8_t)info->pipeline.isp_pipe_version;
+ sh_css_sp_stage.isp_deci_log_factor = (uint8_t)binary->deci_factor_log2;
+ sh_css_sp_stage.isp_vf_downscale_bits = (uint8_t)binary->vf_downscale_log2;
+
+ sh_css_sp_stage.if_config_index = (uint8_t) if_config_index;
+
+ sh_css_sp_stage.sp_enable_xnr = (uint8_t)xnr;
+ sh_css_sp_stage.xmem_bin_addr = xinfo->xmem_addr;
+ sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map();
+ sh_css_isp_stage.blob_info = *blob_info;
+ sh_css_stage_write_binary_info((struct ia_css_binary_info *)info);
+
+ /* Make sure binary name is smaller than allowed string size */
+ assert(strlen(binary_name) < SH_CSS_MAX_BINARY_NAME-1);
+ strncpy(sh_css_isp_stage.binary_name, binary_name, SH_CSS_MAX_BINARY_NAME-1);
+ sh_css_isp_stage.binary_name[SH_CSS_MAX_BINARY_NAME - 1] = 0;
+ sh_css_isp_stage.mem_initializers = *isp_mem_if;
+
+ /**
+ * Even when a stage does not need uds and does not params,
+ * ia_css_uds_sp_scale_params() seems to be called (needs
+ * further investigation). This function can not deal with
+ * dx, dy = {0, 0}
+ */
+
+ err = sh_css_sp_write_frame_pointers(args);
+ /* TODO: move it to a better place */
+ if (binary->info->sp.enable.s3a) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_3A_STATISTICS);
+ }
+ if (binary->info->sp.enable.dis) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_DIS_STATISTICS);
+ }
+#if defined SH_CSS_ENABLE_METADATA
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
+ sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
+#endif
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+#ifdef USE_INPUT_SYSTEM_VERSION_2401
+#ifndef ISP2401
+ if (args->in_frame) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (pipe == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &args->in_frame->info);
+ } else if (&binary->in_frame_info) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (pipe == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+#else
+ if (stage == 0) {
+ if (args->in_frame) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (pipe == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &args->in_frame->info);
+ } else if (&binary->in_frame_info) {
+ pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+ if (pipe == NULL)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+ }
+#endif
+ }
+#else
+ (void)pipe; /*avoid build warning*/
+#endif
+
+ err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
+ binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ initialize_isp_states(binary);
+
+ /* we do this only for preview pipe because in fill_binary_info function
+ * we assign vf_out res to out res, but for ISP internal processing, we need
+ * the original out res. for video pipe, it has two output pins --- out and
+ * vf_out, so it can keep these two resolutions already. */
+ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
+ (binary->vf_downscale_log2 > 0)) {
+ /* TODO: Remove this after preview output decimation is fixed
+ * by configuring out&vf info fiels properly */
+ sh_css_sp_stage.frames.out[0].info.padded_width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.width
+ <<= binary->vf_downscale_log2;
+ sh_css_sp_stage.frames.out[0].info.res.height
+ <<= binary->vf_downscale_log2;
+ }
+ err = copy_isp_mem_if_to_ddr(binary);
+ if (err != IA_CSS_SUCCESS)
+ return err;
+
+ return IA_CSS_SUCCESS;
+}
+
+static enum ia_css_err
+sp_init_stage(struct ia_css_pipeline_stage *stage,
+ unsigned int pipe_num,
+ bool xnr,
+ unsigned int if_config_index,
+ bool two_ppc)
+{
+ struct ia_css_binary *binary;
+ const struct ia_css_fw_info *firmware;
+ const struct sh_css_binary_args *args;
+ unsigned stage_num;
+/*
+ * Initialiser required because of the "else" path below.
+ * Is this a valid path ?
+ */
+ const char *binary_name = "";
+ const struct ia_css_binary_xinfo *info = NULL;
+ /* note: the var below is made static as it is quite large;
+ if it is not static it ends up on the stack which could
+ cause issues for drivers
+ */
+ static struct ia_css_binary tmp_binary;
+ const struct ia_css_blob_info *blob_info = NULL;
+ struct ia_css_isp_param_css_segments isp_mem_if;
+ /* LA: should be ia_css_data, should not contain host pointer.
+ However, CSS/DDR pointer is not available yet.
+ Hack is to store it in params->ddr_ptrs and then copy it late in the SP just before vmem init.
+ TODO: Call this after CSS/DDR allocation and store that pointer.
+ Best is to allocate it at stage creation time together with host pointer.
+ Remove vmem from params.
+ */
+ struct ia_css_isp_param_css_segments *mem_if = &isp_mem_if;
+
+ enum ia_css_err err = IA_CSS_SUCCESS;
+
+ assert(stage != NULL);
+
+ binary = stage->binary;
+ firmware = stage->firmware;
+ args = &stage->args;
+ stage_num = stage->stage_num;
+
+
+ if (binary) {
+ info = binary->info;
+ binary_name = (const char *)(info->blob->name);
+ blob_info = &info->blob->header.blob;
+ ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params);
+ } else if (firmware) {
+ const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
+ if (args->out_frame[0])
+ out_infos[0] = &args->out_frame[0]->info;
+ info = &firmware->info.isp;
+ ia_css_binary_fill_info(info, false, false,
+ IA_CSS_STREAM_FORMAT_RAW_10,
+ args->in_frame ? &args->in_frame->info : NULL,
+ NULL,
+ out_infos,
+ args->out_vf_frame ? &args->out_vf_frame->info
+ : NULL,
+ &tmp_binary,
+ NULL,
+ -1, true);
+ binary = &tmp_binary;
+ binary->info = info;
+ binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware);
+ blob_info = &firmware->blob;
+ mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers;
+ } else {
+ /* SP stage */
+ assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC);
+ /* binary and blob_info are now NULL.
+ These will be passed to sh_css_sp_init_stage
+ and dereferenced there, so passing a NULL
+ pointer is no good. return an error */
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ }
+
+ err = sh_css_sp_init_stage(binary,
+ (const char *)binary_name,
+ blob_info,
+ args,
+ pipe_num,
+ stage_num,
+ xnr,
+ mem_if,
+ if_config_index,
+ two_ppc);
+ return err;
+}
+
+static void
+sp_init_sp_stage(struct ia_css_pipeline_stage *stage,
+ unsigned pipe_num,
+ bool two_ppc,
+ enum sh_css_pipe_config_override copy_ovrd,
+ unsigned int if_config_index)
+{
+ const struct sh_css_binary_args *args = &stage->args;
+
+ assert(stage != NULL);
+ switch (stage->sp_func) {
+ case IA_CSS_PIPELINE_RAW_COPY:
+ sh_css_sp_start_raw_copy(args->out_frame[0],
+ pipe_num, two_ppc,
+ stage->max_input_width,
+ copy_ovrd, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_BIN_COPY:
+ assert(false); /* TBI */
+ case IA_CSS_PIPELINE_ISYS_COPY:
+ sh_css_sp_start_isys_copy(args->out_frame[0],
+ pipe_num, stage->max_input_width, if_config_index);
+ break;
+ case IA_CSS_PIPELINE_NO_FUNC:
+ assert(false);
+ }
+}
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ uint8_t pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ const mipi_port_ID_t port_id
+#endif
+#ifdef ISP2401
+ ,
+ const struct ia_css_coordinate *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
+ positioned on shading table at shading correction in ISP. */
+ const struct ia_css_isp_parameters *params
+#endif
+ )
+{
+ /* Get first stage */
+ struct ia_css_pipeline_stage *stage = NULL;
+ struct ia_css_binary *first_binary = NULL;
+ struct ia_css_pipe *pipe = NULL;
+ unsigned num;
+
+ enum ia_css_pipe_id pipe_id = id;
+ unsigned int thread_id;
+ uint8_t if_config_index, tmp_if_config_index;
+
+ assert(me != NULL);
+
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ assert(me->stages != NULL);
+
+ first_binary = me->stages->binary;
+
+ if (input_mode == IA_CSS_INPUT_MODE_SENSOR ||
+ input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+ assert(port_id < N_MIPI_PORT_ID);
+ if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */
+ return; /* we should be able to return an error */
+ if_config_index = (uint8_t) (port_id - MIPI_PORT0_ID);
+ } else if (input_mode == IA_CSS_INPUT_MODE_MEMORY) {
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ } else {
+ if_config_index = 0x0;
+ }
+#else
+ (void)input_mode;
+ if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+#endif
+
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
+
+ /* Count stages */
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ stage->stage_num = num;
+ ia_css_debug_pipe_graph_dump_stage(stage, id);
+ }
+ me->num_stages = num;
+
+ if (first_binary != NULL) {
+ /* Init pipeline data */
+ sh_css_sp_init_group(two_ppc, first_binary->input_format,
+ offline, if_config_index);
+ } /* if (first_binary != NULL) */
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2401) || defined(USE_INPUT_SYSTEM_VERSION_2)
+ /* Signal the host immediately after start for SP_ISYS_COPY only */
+ if ((me->num_stages == 1) && me->stages &&
+ (me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY))
+ sh_css_sp_group.config.no_isp_sync = true;
+#endif
+
+ /* Init stage data */
+ sh_css_init_host2sp_frame_data();
+
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+ sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id;
+ sh_css_sp_group.pipe[thread_id].thread_id = thread_id;
+ sh_css_sp_group.pipe[thread_id].pipe_num = pipe_num;
+ sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs;
+ sh_css_sp_group.pipe[thread_id].pipe_qos_config = me->pipe_qos_config;
+ sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor;
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ sh_css_sp_group.pipe[thread_id].input_system_mode
+ = (uint32_t)input_mode;
+ sh_css_sp_group.pipe[thread_id].port_id = port_id;
+#endif
+ sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay;
+
+ /* TODO: next indicates from which queues parameters need to be
+ sampled, needs checking/improvement */
+ if (ia_css_pipeline_uses_params(me)) {
+ sh_css_sp_group.pipe[thread_id].pipe_config =
+ SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id;
+ }
+
+ /* For continuous use-cases, SP copy is responsible for sampling the
+ * parameters */
+ if (continuous)
+ sh_css_sp_group.pipe[thread_id].pipe_config = 0;
+
+ sh_css_sp_group.pipe[thread_id].inout_port_config = me->inout_port_config;
+
+ pipe = find_pipe_by_num(pipe_num);
+ assert(pipe != NULL);
+ if (pipe == NULL) {
+ return;
+ }
+ sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
+
+#if defined(SH_CSS_ENABLE_METADATA)
+ if (md_info != NULL && md_info->size > 0) {
+ sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
+ sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
+ sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride;
+ sh_css_sp_group.pipe[thread_id].metadata.size = md_info->size;
+ ia_css_isys_convert_stream_format_to_mipi_format(
+ md_config->data_type, MIPI_PREDICTOR_NONE,
+ &sh_css_sp_group.pipe[thread_id].metadata.format);
+ }
+#else
+ (void)md_config;
+ (void)md_info;
+#endif
+
+#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
+ sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
+ if (IA_CSS_PIPE_ID_COPY != pipe_id) {
+ ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, (enum sh_css_queue_id *)(&sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
+ }
+#endif
+
+#ifdef ISP2401
+ /* For the shading correction type 1 (the legacy shading table conversion in css is not used),
+ * the parameters are passed to the isp for the shading table centering.
+ */
+ if (internal_frame_origin_bqs_on_sctbl != NULL &&
+ params != NULL && params->shading_settings.enable_shading_table_conversion == 0) {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->x;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl
+ = (uint32_t)internal_frame_origin_bqs_on_sctbl->y;
+ } else {
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
+ sh_css_sp_group.pipe[thread_id].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
+ }
+
+#endif
+ IA_CSS_LOG("pipe_id %d port_config %08x",
+ pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
+
+ for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
+ sh_css_sp_group.pipe[thread_id].num_stages++;
+ if (is_sp_stage(stage)) {
+ sp_init_sp_stage(stage, pipe_num, two_ppc,
+ copy_ovrd, if_config_index);
+ } else {
+ if ((stage->stage_num != 0) || SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(me->inout_port_config))
+ tmp_if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
+ else
+ tmp_if_config_index = if_config_index;
+ sp_init_stage(stage, pipe_num,
+ xnr, tmp_if_config_index, two_ppc);
+ }
+
+ store_sp_stage_data(pipe_id, pipe_num, num);
+ }
+ sh_css_sp_group.pipe[thread_id].pipe_config |= (uint32_t)
+ (me->acquire_isp_each_stage << IA_CSS_ACQUIRE_ISP_POS);
+ store_sp_group_data();
+
+}
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num)
+{
+ unsigned int thread_id;
+ ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
+ /*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/
+ sh_css_sp_group.pipe[thread_id].num_stages = 0;
+}
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+ enum host2sp_commands last_cmd = host2sp_cmd_error;
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* Previous command must be handled by SP (by design) */
+ last_cmd = load_sp_array_uint(host_sp_com, offset);
+ if (last_cmd != host2sp_cmd_ready)
+ IA_CSS_ERROR("last host command not handled by SP(%d)", last_cmd);
+
+ store_sp_array_uint(host_sp_com, offset, host2sp_command);
+
+ return (last_cmd == host2sp_cmd_ready);
+}
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
+ / sizeof(int);
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+ return (enum host2sp_commands)load_sp_array_uint(host_sp_com, offset);
+}
+
+
+/*
+ * Frame data is no longer part of the sp_stage structure but part of a
+ * seperate structure. The aim is to make the sp_data struct static
+ * (it defines a pipeline) and that the dynamic (per frame) data is stored
+ * separetly.
+ *
+ * This function must be called first every where were you start constructing
+ * a new pipeline by defining one or more stages with use of variable
+ * sh_css_sp_stage. Even the special cases like accelerator and copy_frame
+ * These have a pipeline of just 1 stage.
+ */
+void
+sh_css_init_host2sp_frame_data(void)
+{
+ /* Clean table */
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+ /*
+ * rvanimme: don't clean it to save static frame info line ref_in
+ * ref_out, and tnr_frames. Once this static data is in a
+ * seperate data struct, this may be enable (but still, there is
+ * no need for it)
+ */
+}
+
+
+/**
+ * @brief Update the offline frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ assert(frame_num < NUM_CONTINUOUS_FRAMES);
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_offline_frames)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, frame ? frame->data : 0);
+
+ /* Write metadata buffer into SP DMEM */
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_offline_metadata)
+ / sizeof(int);
+ offset += frame_num;
+ store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0);
+}
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/**
+ * @brief Update the mipi frame information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned frame_num,
+ struct ia_css_frame *frame)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_mipi_frames)
+ / sizeof(int);
+ offset += frame_num;
+
+ store_sp_array_uint(host_sp_com, offset,
+ frame ? frame->data : 0);
+}
+
+/**
+ * @brief Update the mipi metadata information in host_sp_communication.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned frame_num,
+ struct ia_css_metadata *metadata)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int o;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* MIPI buffers are dedicated to port, so now there are more of them. */
+ assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ o = offsetof(struct host_sp_communication, host2sp_mipi_metadata)
+ / sizeof(int);
+ o += frame_num;
+ store_sp_array_uint(host_sp_com, o,
+ metadata ? metadata->address : 0);
+}
+
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned num_frames)
+{
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int offset;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* Write new frame data into SP DMEM */
+ HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_num_mipi_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+#endif
+
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned num_frames, bool set_avail)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_host_sp_com;
+ unsigned int extra_num_frames, avail_num_frames;
+ unsigned int offset, offset_extra;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ /* Write new frame data into SP DMEM */
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
+ if (set_avail) {
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_avail_num_raw_frames)
+ / sizeof(int);
+ avail_num_frames = load_sp_array_uint(host_sp_com, offset);
+ extra_num_frames = num_frames - avail_num_frames;
+ offset_extra = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_extra_num_raw_frames)
+ / sizeof(int);
+ store_sp_array_uint(host_sp_com, offset_extra, extra_num_frames);
+ } else
+ offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_cont_target_num_raw_frames)
+ / sizeof(int);
+
+ store_sp_array_uint(host_sp_com, offset, num_frames);
+}
+
+void
+sh_css_event_init_irq_mask(void)
+{
+ int i;
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask_init;
+
+ event_irq_mask_init.or_mask = IA_CSS_EVENT_TYPE_ALL;
+ event_irq_mask_init.and_mask = IA_CSS_EVENT_TYPE_NONE;
+ (void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
+
+ assert(sizeof(event_irq_mask_init) % HRT_BUS_BYTES == 0);
+ for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[i]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask_init, sizeof(event_irq_mask_init));
+ }
+
+}
+
+enum ia_css_err
+ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
+ unsigned int or_mask,
+ unsigned int and_mask)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ assert(pipe != NULL);
+
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+ /* Linux kernel does not have UINT16_MAX
+ * Therefore decided to comment out these 2 asserts for Linux
+ * Alternatives that were not chosen:
+ * - add a conditional #define for UINT16_MAX
+ * - compare with (uint16_t)~0 or 0xffff
+ * - different assert for Linux and Windows
+ */
+#ifndef __KERNEL__
+ assert(or_mask <= UINT16_MAX);
+ assert(and_mask <= UINT16_MAX);
+#endif
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ IA_CSS_LOG("or_mask=%x, and_mask=%x", or_mask, and_mask);
+ event_irq_mask.or_mask = (uint16_t)or_mask;
+ event_irq_mask.and_mask = (uint16_t)and_mask;
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_store(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ return IA_CSS_SUCCESS;
+}
+
+enum ia_css_err
+ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
+ unsigned int *or_mask,
+ unsigned int *and_mask)
+{
+ unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
+ unsigned int offset;
+ struct sh_css_event_irq_mask event_irq_mask;
+ unsigned int pipe_num;
+
+ (void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
+
+ IA_CSS_ENTER_LEAVE("");
+
+ assert(pipe != NULL);
+ assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
+
+ pipe_num = ia_css_pipe_get_pipe_num(pipe);
+ if (pipe_num >= IA_CSS_PIPE_ID_NUM)
+ return IA_CSS_ERR_INTERNAL_ERROR;
+ offset = (unsigned int)offsetof(struct host_sp_communication,
+ host2sp_event_irq_mask[pipe_num]);
+ assert(offset % HRT_BUS_BYTES == 0);
+ sp_dmem_load(SP0_ID,
+ (unsigned int)sp_address_of(host_sp_com) + offset,
+ &event_irq_mask, sizeof(event_irq_mask));
+
+ if (or_mask)
+ *or_mask = event_irq_mask.or_mask;
+
+ if (and_mask)
+ *and_mask = event_irq_mask.and_mask;
+
+ return IA_CSS_SUCCESS;
+}
+
+void
+sh_css_sp_set_sp_running(bool flag)
+{
+ sp_running = flag;
+}
+
+bool
+sh_css_sp_is_running(void)
+{
+ return sp_running;
+}
+
+void
+sh_css_sp_start_isp(void)
+{
+ const struct ia_css_fw_info *fw;
+ unsigned int HIVE_ADDR_sp_sw_state;
+
+ fw = &sh_css_sp_fw;
+ HIVE_ADDR_sp_sw_state = fw->info.sp.sw_state;
+
+
+ if (sp_running)
+ return;
+
+ (void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
+
+ /* no longer here, sp started immediately */
+ /*ia_css_debug_pipe_graph_dump_epilogue();*/
+
+ store_sp_group_data();
+ store_sp_per_frame_data(fw);
+
+ sp_dmem_store_uint32(SP0_ID,
+ (unsigned int)sp_address_of(sp_sw_state),
+ (uint32_t)(IA_CSS_SP_SW_TERMINATED));
+
+
+ /* Note 1: The sp_start_isp function contains a wait till
+ * the input network is configured by the SP.
+ * Note 2: Not all SP binaries supports host2sp_commands.
+ * In case a binary does support it, the host2sp_command
+ * will have status cmd_ready after return of the function
+ * sh_css_hrt_sp_start_isp. There is no race-condition here
+ * because only after the process_frame command has been
+ * received, the SP starts configuring the input network.
+ */
+
+ /* we need to set sp_running before we call ia_css_mmu_invalidate_cache
+ * as ia_css_mmu_invalidate_cache checks on sp_running to
+ * avoid that it accesses dmem while the SP is not powered
+ */
+ sp_running = true;
+ ia_css_mmu_invalidate_cache();
+ /* Invalidate all MMU caches */
+ mmu_invalidate_cache_all();
+
+ ia_css_spctrl_start(SP0_ID);
+
+}
+
+bool
+ia_css_isp_has_started(void)
+{
+ const struct ia_css_fw_info *fw = &sh_css_sp_fw;
+ unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
+ (void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
+
+ return (bool)load_sp_uint(ia_css_ispctrl_sp_isp_started);
+}
+
+
+/**
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id)
+{
+ int i;
+
+ /* enable all the DMA channels */
+ for (i = 0; i < N_DMA_CHANNEL_ID; i++) {
+ /* enable the writing request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 0,
+ true);
+ /* enable the reading request */
+ sh_css_sp_set_dma_sw_reg(dma_id,
+ i,
+ 1,
+ true);
+ }
+
+ return true;
+}
+
+/**
+ * @brief Set the DMA software-mask in the debug mode.
+ * Refer to "sh_css_sp.h" for more details.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable)
+{
+ uint32_t sw_reg;
+ uint32_t bit_val;
+ uint32_t bit_offset;
+ uint32_t bit_mask;
+
+ (void)dma_id;
+
+ assert(channel_id >= 0 && channel_id < N_DMA_CHANNEL_ID);
+ assert(request_type >= 0);
+
+ /* get the software-mask */
+ sw_reg =
+ sh_css_sp_group.debug.dma_sw_reg;
+
+ /* get the offest of the target bit */
+ bit_offset = (8 * request_type) + channel_id;
+
+ /* clear the value of the target bit */
+ bit_mask = ~(1 << bit_offset);
+ sw_reg &= bit_mask;
+
+ /* set the value of the bit for the DMA channel */
+ bit_val = enable ? 1 : 0;
+ bit_val <<= bit_offset;
+ sw_reg |= bit_val;
+
+ /* update the software status of DMA channels */
+ sh_css_sp_group.debug.dma_sw_reg = sw_reg;
+
+ return true;
+}
+
+void
+sh_css_sp_reset_global_vars(void)
+{
+ memset(&sh_css_sp_group, 0, sizeof(struct sh_css_sp_group));
+ memset(&sh_css_sp_stage, 0, sizeof(struct sh_css_sp_stage));
+ memset(&sh_css_isp_stage, 0, sizeof(struct sh_css_isp_stage));
+ memset(&sh_css_sp_output, 0, sizeof(struct sh_css_sp_output));
+ memset(&per_frame_data, 0, sizeof(struct sh_css_sp_per_frame_data));
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h
new file mode 100644
index 000000000000..98444a3cc3e4
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_sp.h
@@ -0,0 +1,248 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_SP_H_
+#define _SH_CSS_SP_H_
+
+#include <system_global.h>
+#include <type_support.h>
+#if !defined(HAS_NO_INPUT_FORMATTER)
+#include "input_formatter.h"
+#endif
+
+#include "ia_css_binary.h"
+#include "ia_css_types.h"
+#include "ia_css_pipeline.h"
+
+/* Function to initialize the data and bss section descr of the binary */
+void
+sh_css_sp_store_init_dmem(const struct ia_css_fw_info *fw);
+
+void
+store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num, unsigned stage);
+
+void
+sh_css_stage_write_binary_info(struct ia_css_binary_info *info);
+
+void
+store_sp_group_data(void);
+
+/* Start binary (jpeg) copy on the SP */
+void
+sh_css_sp_start_binary_copy(unsigned int pipe_num, struct ia_css_frame *out_frame,
+ unsigned two_ppc);
+
+unsigned int
+sh_css_sp_get_binary_copy_size(void);
+
+/* Return the value of a SW interrupt */
+unsigned int
+sh_css_sp_get_sw_interrupt_value(unsigned int irq);
+
+void
+sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
+ enum ia_css_pipe_id id,
+ uint8_t pipe_num,
+ bool xnr,
+ bool two_ppc,
+ bool continuous,
+ bool offline,
+ unsigned int required_bds_factor,
+ enum sh_css_pipe_config_override copy_ovrd,
+ enum ia_css_input_mode input_mode,
+ const struct ia_css_metadata_config *md_config,
+ const struct ia_css_metadata_info *md_info,
+#if !defined(HAS_NO_INPUT_SYSTEM)
+ const mipi_port_ID_t port_id
+#endif
+#ifdef ISP2401
+ ,
+ const struct ia_css_coordinate *internal_frame_origin_bqs_on_sctbl, /* Origin of internal frame
+ positioned on shading table at shading correction in ISP. */
+ const struct ia_css_isp_parameters *params
+#endif
+ );
+
+void
+sh_css_sp_uninit_pipeline(unsigned int pipe_num);
+
+bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command);
+
+enum host2sp_commands
+sh_css_read_host2sp_command(void);
+
+void
+sh_css_init_host2sp_frame_data(void);
+
+/**
+ * @brief Update the offline frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The offline frame number.
+ * @param[in] frame The pointer to the offline frame.
+ */
+void
+sh_css_update_host2sp_offline_frame(
+ unsigned frame_num,
+ struct ia_css_frame *frame,
+ struct ia_css_metadata *metadata);
+
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+/**
+ * @brief Update the mipi frame information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] frame The pointer to the mipi frame.
+ */
+void
+sh_css_update_host2sp_mipi_frame(
+ unsigned frame_num,
+ struct ia_css_frame *frame);
+
+/**
+ * @brief Update the mipi metadata information in host_sp_communication.
+ *
+ * @param[in] frame_num The mipi frame number.
+ * @param[in] metadata The pointer to the mipi metadata.
+ */
+void
+sh_css_update_host2sp_mipi_metadata(
+ unsigned frame_num,
+ struct ia_css_metadata *metadata);
+
+/**
+ * @brief Update the nr of mipi frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of mipi frames to use.
+ */
+void
+sh_css_update_host2sp_num_mipi_frames(unsigned num_frames);
+#endif
+
+/**
+ * @brief Update the nr of offline frames to use in host_sp_communication.
+ *
+ * @param[in] num_frames The number of raw frames to use.
+ */
+void
+sh_css_update_host2sp_cont_num_raw_frames(unsigned num_frames, bool set_avail);
+
+void
+sh_css_event_init_irq_mask(void);
+
+void
+sh_css_sp_start_isp(void);
+
+void
+sh_css_sp_set_sp_running(bool flag);
+
+bool
+sh_css_sp_is_running(void);
+
+#if SP_DEBUG != SP_DEBUG_NONE
+
+void
+sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
+
+#endif
+
+#if !defined(HAS_NO_INPUT_FORMATTER)
+void
+sh_css_sp_set_if_configs(
+ const input_formatter_cfg_t *config_a,
+ const input_formatter_cfg_t *config_b,
+ const uint8_t if_config_index);
+#endif
+
+void
+sh_css_sp_program_input_circuit(int fmt_type,
+ int ch_id,
+ enum ia_css_input_mode input_mode);
+
+void
+sh_css_sp_configure_sync_gen(int width,
+ int height,
+ int hblank_cycles,
+ int vblank_cycles);
+
+void
+sh_css_sp_configure_tpg(int x_mask,
+ int y_mask,
+ int x_delta,
+ int y_delta,
+ int xy_mask);
+
+void
+sh_css_sp_configure_prbs(int seed);
+
+void
+sh_css_sp_configure_enable_raw_pool_locking(bool lock_all);
+
+void
+sh_css_sp_enable_isys_event_queue(bool enable);
+
+void
+sh_css_sp_set_disable_continuous_viewfinder(bool flag);
+
+void
+sh_css_sp_reset_global_vars(void);
+
+/**
+ * @brief Initialize the DMA software-mask in the debug mode.
+ * This API should be ONLY called in the debugging mode.
+ * And it should be always called before the first call of
+ * "sh_css_set_dma_sw_reg(...)".
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_init_dma_sw_reg(int dma_id);
+
+/**
+ * @brief Set the DMA software-mask in the debug mode.
+ * This API should be ONLYL called in the debugging mode. Must
+ * call "sh_css_set_dma_sw_reg(...)" before this
+ * API is called for the first time.
+ *
+ * @param[in] dma_id The ID of the target DMA.
+ * @param[in] channel_id The ID of the target DMA channel.
+ * @param[in] request_type The type of the DMA request.
+ * For example:
+ * - "0" indicates the writing request.
+ * - "1" indicates the reading request.
+ *
+ * @param[in] enable If it is "true", the target DMA
+ * channel is enabled in the software.
+ * Otherwise, the target DMA channel
+ * is disabled in the software.
+ *
+ * @return
+ * - true, if it is successful.
+ * - false, otherwise.
+ */
+bool
+sh_css_sp_set_dma_sw_reg(int dma_id,
+ int channel_id,
+ int request_type,
+ bool enable);
+
+
+extern struct sh_css_sp_group sh_css_sp_group;
+extern struct sh_css_sp_stage sh_css_sp_stage;
+extern struct sh_css_isp_stage sh_css_isp_stage;
+
+#endif /* _SH_CSS_SP_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c
new file mode 100644
index 000000000000..60bddbb3d4c6
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream.c
@@ -0,0 +1,16 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file will contain the code to implement the functions declared in ia_css_stream.h
+ and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c
new file mode 100644
index 000000000000..52d0a6471597
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.c
@@ -0,0 +1,76 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "sh_css_stream_format.h"
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum ia_css_stream_format format)
+{
+ unsigned int rval;
+
+ switch (format) {
+ case IA_CSS_STREAM_FORMAT_RGB_444:
+ rval = 4;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_555:
+ rval = 5;
+ break;
+ case IA_CSS_STREAM_FORMAT_RGB_565:
+ case IA_CSS_STREAM_FORMAT_RGB_666:
+ case IA_CSS_STREAM_FORMAT_RAW_6:
+ rval = 6;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_7:
+ rval = 7;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
+ case IA_CSS_STREAM_FORMAT_YUV420_8:
+ case IA_CSS_STREAM_FORMAT_YUV422_8:
+ case IA_CSS_STREAM_FORMAT_RGB_888:
+ case IA_CSS_STREAM_FORMAT_RAW_8:
+ case IA_CSS_STREAM_FORMAT_BINARY_8:
+ case IA_CSS_STREAM_FORMAT_USER_DEF1:
+ case IA_CSS_STREAM_FORMAT_USER_DEF2:
+ case IA_CSS_STREAM_FORMAT_USER_DEF3:
+ case IA_CSS_STREAM_FORMAT_USER_DEF4:
+ case IA_CSS_STREAM_FORMAT_USER_DEF5:
+ case IA_CSS_STREAM_FORMAT_USER_DEF6:
+ case IA_CSS_STREAM_FORMAT_USER_DEF7:
+ case IA_CSS_STREAM_FORMAT_USER_DEF8:
+ rval = 8;
+ break;
+ case IA_CSS_STREAM_FORMAT_YUV420_10:
+ case IA_CSS_STREAM_FORMAT_YUV422_10:
+ case IA_CSS_STREAM_FORMAT_RAW_10:
+ rval = 10;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_12:
+ rval = 12;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_14:
+ rval = 14;
+ break;
+ case IA_CSS_STREAM_FORMAT_RAW_16:
+ case IA_CSS_STREAM_FORMAT_YUV420_16:
+ case IA_CSS_STREAM_FORMAT_YUV422_16:
+ rval = 16;
+ break;
+ default:
+ rval = 0;
+ break;
+ }
+
+ return rval;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h
new file mode 100644
index 000000000000..aab2b6207051
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_stream_format.h
@@ -0,0 +1,23 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_STREAM_FORMAT_H
+#define __SH_CSS_STREAM_FORMAT_H
+
+#include <ia_css_stream_format.h>
+
+unsigned int sh_css_stream_format_2_bits_per_subpixel(
+ enum ia_css_stream_format format);
+
+#endif /* __SH_CSS_STREAM_FORMAT_H */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
new file mode 100644
index 000000000000..e49e478ab354
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_struct.h
@@ -0,0 +1,80 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SH_CSS_STRUCT_H
+#define __SH_CSS_STRUCT_H
+
+/* This header files contains the definition of the
+ sh_css struct and friends; locigally the file would
+ probably be called sh_css.h after the pattern
+ <type>.h but sh_css.h is the predecesssor of ia_css.h
+ so this could cause confusion; hence the _struct
+ in the filename
+*/
+
+#include <type_support.h>
+#include <system_types.h>
+#include "ia_css_pipeline.h"
+#include "ia_css_pipe_public.h"
+#include "ia_css_frame_public.h"
+#include "ia_css_queue.h"
+#include "ia_css_irq.h"
+
+struct sh_css {
+ struct ia_css_pipe *active_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ /* All of the pipes created at any point of time. At this moment there can
+ * be no more than MAX_SP_THREADS of them because pipe_num is reused as SP
+ * thread_id to which a pipe's pipeline is associated. At a later point, if
+ * we support more pipe objects, we should add test code to test that
+ * possibility. Also, active_pipes[] should be able to hold only
+ * SH_CSS_MAX_SP_THREADS objects. Anything else is misleading. */
+ struct ia_css_pipe *all_pipes[IA_CSS_PIPELINE_NUM_MAX];
+ void * (*malloc)(size_t bytes, bool zero_mem);
+ void (*free)(void *ptr);
+#ifdef ISP2401
+ void * (*malloc_ex)(size_t bytes, bool zero_mem, const char *caller_func, int caller_line);
+ void (*free_ex)(void *ptr, const char *caller_func, int caller_line);
+#endif
+ void (*flush)(struct ia_css_acc_fw *fw);
+ bool check_system_idle;
+#ifndef ISP2401
+ bool stop_copy_preview;
+#endif
+ unsigned int num_cont_raw_frames;
+#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
+ unsigned int num_mipi_frames[N_CSI_PORTS];
+ struct ia_css_frame *mipi_frames[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ struct ia_css_metadata *mipi_metadata[N_CSI_PORTS][NUM_MIPI_FRAMES_PER_STREAM];
+ unsigned int mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
+ unsigned int mipi_frame_size[N_CSI_PORTS];
+#endif
+ hrt_vaddress sp_bin_addr;
+ hrt_data page_table_base_index;
+ unsigned int size_mem_words; /** \deprecated{Use ia_css_mipi_buffer_config instead.}*/
+ enum ia_css_irq_type irq_type;
+ unsigned int pipe_counter;
+
+ unsigned int type; /* 2400 or 2401 for now */
+};
+
+#define IPU_2400 1
+#define IPU_2401 2
+
+#define IS_2400() (my_css.type == IPU_2400)
+#define IS_2401() (my_css.type == IPU_2401)
+
+extern struct sh_css my_css;
+
+#endif /* __SH_CSS_STRUCT_H */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h
new file mode 100644
index 000000000000..5ded3a1437bf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_uds.h
@@ -0,0 +1,37 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _SH_CSS_UDS_H_
+#define _SH_CSS_UDS_H_
+
+#include <type_support.h>
+
+#define SIZE_OF_SH_CSS_UDS_INFO_IN_BITS (4 * 16)
+#define SIZE_OF_SH_CSS_CROP_POS_IN_BITS (2 * 16)
+
+/* Uds types, used in pipeline_global.h and sh_css_internal.h */
+
+struct sh_css_uds_info {
+ uint16_t curr_dx;
+ uint16_t curr_dy;
+ uint16_t xc;
+ uint16_t yc;
+};
+
+struct sh_css_crop_pos {
+ uint16_t x;
+ uint16_t y;
+};
+
+#endif /* _SH_CSS_UDS_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c
new file mode 100644
index 000000000000..6e0c5e7f8620
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_version.c
@@ -0,0 +1,30 @@
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "ia_css_version.h"
+#include "ia_css_version_data.h"
+#include "ia_css_err.h"
+#include "sh_css_firmware.h"
+
+enum ia_css_err
+ia_css_get_version(char *version, int max_size)
+{
+ if (max_size <= (int)strlen(CSS_VERSION_STRING) + (int)strlen(sh_css_get_fw_version()) + 5)
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
+ strcpy(version, CSS_VERSION_STRING);
+ strcat(version, "FW:");
+ strcat(version, sh_css_get_fw_version());
+ strcat(version, "; ");
+ return IA_CSS_SUCCESS;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
new file mode 100644
index 000000000000..57295397da3e
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -0,0 +1,728 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains entry functions for memory management of ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/highmem.h> /* for kmap */
+#include <linux/io.h> /* for page_to_phys */
+#include <linux/sysfs.h>
+
+#include "hmm/hmm.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+#include "atomisp_internal.h"
+#include "asm/cacheflush.h"
+#include "mmu/isp_mmu.h"
+#include "mmu/sh_mmu_mrfld.h"
+
+struct hmm_bo_device bo_device;
+struct hmm_pool dynamic_pool;
+struct hmm_pool reserved_pool;
+static ia_css_ptr dummy_ptr;
+struct _hmm_mem_stat hmm_mem_stat;
+
+/* p: private
+ s: shared
+ u: user
+ i: ion */
+static const char hmm_bo_type_string[] = "psui";
+
+static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
+ char *buf, struct list_head *bo_list, bool active)
+{
+ ssize_t ret = 0;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int i;
+ long total[HMM_BO_LAST] = { 0 };
+ long count[HMM_BO_LAST] = { 0 };
+ int index1 = 0;
+ int index2 = 0;
+
+ ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
+ if (ret <= 0)
+ return 0;
+
+ index1 += ret;
+
+ spin_lock_irqsave(&bo_device.list_lock, flags);
+ list_for_each_entry(bo, bo_list, list) {
+ if ((active && (bo->status & HMM_BO_ALLOCED)) ||
+ (!active && !(bo->status & HMM_BO_ALLOCED))) {
+ ret = scnprintf(buf + index1, PAGE_SIZE - index1,
+ "%c %d\n",
+ hmm_bo_type_string[bo->type], bo->pgnr);
+
+ total[bo->type] += bo->pgnr;
+ count[bo->type]++;
+ if (ret > 0)
+ index1 += ret;
+ }
+ }
+ spin_unlock_irqrestore(&bo_device.list_lock, flags);
+
+ for (i = 0; i < HMM_BO_LAST; i++) {
+ if (count[i]) {
+ ret = scnprintf(buf + index1 + index2,
+ PAGE_SIZE - index1 - index2,
+ "%ld %c buffer objects: %ld KB\n",
+ count[i], hmm_bo_type_string[i], total[i] * 4);
+ if (ret > 0)
+ index2 += ret;
+ }
+ }
+
+ /* Add trailing zero, not included by scnprintf */
+ return index1 + index2 + 1;
+}
+
+static ssize_t active_bo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
+}
+
+static ssize_t free_bo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
+}
+
+static ssize_t reserved_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
+ pinfo->index, pinfo->pgnr);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static ssize_t dynamic_pool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret = 0;
+
+ struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
+ unsigned long flags;
+
+ if (!pinfo || !pinfo->initialized)
+ return 0;
+
+ spin_lock_irqsave(&pinfo->list_lock, flags);
+ ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
+ pinfo->pgnr, pinfo->pool_size);
+ spin_unlock_irqrestore(&pinfo->list_lock, flags);
+
+ if (ret > 0)
+ ret++; /* Add trailing zero, not included by scnprintf */
+
+ return ret;
+};
+
+static DEVICE_ATTR(active_bo, 0444, active_bo_show, NULL);
+static DEVICE_ATTR(free_bo, 0444, free_bo_show, NULL);
+static DEVICE_ATTR(reserved_pool, 0444, reserved_pool_show, NULL);
+static DEVICE_ATTR(dynamic_pool, 0444, dynamic_pool_show, NULL);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+ &dev_attr_active_bo.attr,
+ &dev_attr_free_bo.attr,
+ &dev_attr_reserved_pool.attr,
+ &dev_attr_dynamic_pool.attr,
+ NULL
+};
+
+static struct attribute_group atomisp_attribute_group[] = {
+ {.attrs = sysfs_attrs_ctrl },
+};
+
+int hmm_init(void)
+{
+ int ret;
+
+ ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
+ ISP_VM_START, ISP_VM_SIZE);
+ if (ret)
+ dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
+
+ /*
+ * As hmm use NULL to indicate invalid ISP virtual address,
+ * and ISP_VM_START is defined to 0 too, so we allocate
+ * one piece of dummy memory, which should return value 0,
+ * at the beginning, to avoid hmm_alloc return 0 in the
+ * further allocation.
+ */
+ dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, 0, HMM_UNCACHED);
+
+ if (!ret) {
+ ret = sysfs_create_group(&atomisp_dev->kobj,
+ atomisp_attribute_group);
+ if (ret)
+ dev_err(atomisp_dev,
+ "%s Failed to create sysfs\n", __func__);
+ }
+
+ return ret;
+}
+
+void hmm_cleanup(void)
+{
+ sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
+
+ /*
+ * free dummy memory first
+ */
+ hmm_free(dummy_ptr);
+ dummy_ptr = 0;
+
+ hmm_bo_device_exit(&bo_device);
+}
+
+ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ int from_highmem, void *userptr, bool cached)
+{
+ unsigned int pgnr;
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ /* Check if we are initialized. In the ideal world we wouldn't need
+ this but we can tackle it once the driver is a lot cleaner */
+
+ if (!dummy_ptr)
+ hmm_init();
+ /*Get page number from size*/
+ pgnr = size_to_pgnr_ceil(bytes);
+
+ /*Buffer object structure init*/
+ bo = hmm_bo_alloc(&bo_device, pgnr);
+ if (!bo) {
+ dev_err(atomisp_dev, "hmm_bo_create failed.\n");
+ goto create_bo_err;
+ }
+
+ /*Allocate pages for memory*/
+ ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "hmm_bo_alloc_pages failed.\n");
+ goto alloc_page_err;
+ }
+
+ /*Combind the virtual address and pages togather*/
+ ret = hmm_bo_bind(bo);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
+ goto bind_err;
+ }
+
+ hmm_mem_stat.tol_cnt += pgnr;
+
+ return bo->start;
+
+bind_err:
+ hmm_bo_free_pages(bo);
+alloc_page_err:
+ hmm_bo_unref(bo);
+create_bo_err:
+ return 0;
+}
+
+void hmm_free(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ WARN_ON(!virt);
+
+ bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
+
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with "
+ "address 0x%x\n", (unsigned int)virt);
+ return;
+ }
+
+ hmm_mem_stat.tol_cnt -= bo->pgnr;
+
+ hmm_bo_unbind(bo);
+ hmm_bo_free_pages(bo);
+ hmm_bo_unref(bo);
+}
+
+static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
+{
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains "
+ "address 0x%x\n", ptr);
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_page_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no page allocated.\n");
+ return -EINVAL;
+ }
+
+ if (!hmm_bo_allocated(bo)) {
+ dev_err(atomisp_dev,
+ "buffer object has no virtual address"
+ " space allocated.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ des = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ src = (char *)kmap(bo->page_obj[idx].page) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len; /* update virt for next loop */
+
+ if (des) {
+ memcpy(des, src, len);
+ des += len;
+ }
+
+ clflush_cache_range(src, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *src = bo->vmap_addr;
+
+ src += (virt - bo->start);
+ memcpy(data, src, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(src, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (!vptr)
+ return load_and_flush_by_kmap(virt, data, bytes);
+ else
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(data, vptr, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ }
+
+ return 0;
+}
+
+/*Read function in ISP memory management*/
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
+{
+ if (!data) {
+ dev_err(atomisp_dev,
+ "hmm_load NULL argument\n");
+ return -EINVAL;
+ }
+ return load_and_flush(virt, data, bytes);
+}
+
+/*Flush hmm data from the data cache*/
+int hmm_flush(ia_css_ptr virt, unsigned int bytes)
+{
+ return load_and_flush(virt, NULL, bytes);
+}
+
+/*Write function in ISP memory management*/
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *src, *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memcpy(dst, data, bytes);
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+
+ memcpy(vptr, data, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ src = (char *)data;
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ if (in_atomic())
+ des = (char *)kmap_atomic(bo->page_obj[idx].page);
+ else
+ des = (char *)kmap(bo->page_obj[idx].page);
+
+ if (!des) {
+ dev_err(atomisp_dev,
+ "kmap buffer object page failed: "
+ "pg_idx = %d\n", idx);
+ return -EINVAL;
+ }
+
+ des += offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memcpy(des, src, len);
+
+ src += len;
+
+ clflush_cache_range(des, len);
+
+ if (in_atomic())
+ /*
+ * Note: kunmap_atomic requires return addr from
+ * kmap_atomic, not the page. See linux/highmem.h
+ */
+ kunmap_atomic(des - offset);
+ else
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*memset function in ISP memory management*/
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
+{
+ struct hmm_buffer_object *bo;
+ unsigned int idx, offset, len;
+ char *des;
+ int ret;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ ret = hmm_check_bo(bo, virt);
+ if (ret)
+ return ret;
+
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ void *dst = bo->vmap_addr;
+
+ dst += (virt - bo->start);
+ memset(dst, c, bytes);
+
+ if (bo->status & HMM_BO_VMAPED_CACHED)
+ clflush_cache_range(dst, bytes);
+ } else {
+ void *vptr;
+
+ vptr = hmm_bo_vmap(bo, true);
+ if (vptr) {
+ vptr = vptr + (virt - bo->start);
+ memset(vptr, c, bytes);
+ clflush_cache_range(vptr, bytes);
+ hmm_bo_vunmap(bo);
+ return 0;
+ }
+ }
+
+ while (bytes) {
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ des = (char *)kmap(bo->page_obj[idx].page) + offset;
+
+ if ((bytes + offset) >= PAGE_SIZE) {
+ len = PAGE_SIZE - offset;
+ bytes -= len;
+ } else {
+ len = bytes;
+ bytes = 0;
+ }
+
+ virt += len;
+
+ memset(des, c, len);
+
+ clflush_cache_range(des, len);
+
+ kunmap(bo->page_obj[idx].page);
+ }
+
+ return 0;
+}
+
+/*Virtual address to physical address convert*/
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
+{
+ unsigned int idx, offset;
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return -1;
+ }
+
+ idx = (virt - bo->start) >> PAGE_SHIFT;
+ offset = (virt - bo->start) - (idx << PAGE_SHIFT);
+
+ return page_to_phys(bo->page_obj[idx].page) + offset;
+}
+
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_start(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object start with address 0x%x\n",
+ virt);
+ return -EINVAL;
+ }
+
+ return hmm_bo_mmap(vma, bo);
+}
+
+/*Map ISP virtual address into IA virtual address*/
+void *hmm_vmap(ia_css_ptr virt, bool cached)
+{
+ struct hmm_buffer_object *bo;
+ void *ptr;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_err(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return NULL;
+ }
+
+ ptr = hmm_bo_vmap(bo, cached);
+ if (ptr)
+ return ptr + (virt - bo->start);
+ else
+ return NULL;
+}
+
+/* Flush the memory which is mapped as cached memory through hmm_vmap */
+void hmm_flush_vmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ hmm_bo_flush_vmap(bo);
+}
+
+void hmm_vunmap(ia_css_ptr virt)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_in_range(&bo_device, virt);
+ if (!bo) {
+ dev_warn(atomisp_dev,
+ "can not find buffer object contains address 0x%x\n",
+ virt);
+ return;
+ }
+
+ return hmm_bo_vunmap(bo);
+}
+
+int hmm_pool_register(unsigned int pool_size,
+ enum hmm_pool_type pool_type)
+{
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ reserved_pool.pops = &reserved_pops;
+ return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
+ pool_size);
+ case HMM_POOL_TYPE_DYNAMIC:
+ dynamic_pool.pops = &dynamic_pops;
+ return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
+ pool_size);
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ return -EINVAL;
+ }
+}
+
+void hmm_pool_unregister(enum hmm_pool_type pool_type)
+{
+ switch (pool_type) {
+ case HMM_POOL_TYPE_RESERVED:
+ if (reserved_pool.pops && reserved_pool.pops->pool_exit)
+ reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
+ break;
+ case HMM_POOL_TYPE_DYNAMIC:
+ if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
+ dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
+ break;
+ default:
+ dev_err(atomisp_dev, "invalid pool type.\n");
+ break;
+ }
+
+ return;
+}
+
+void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
+{
+ return hmm_vmap(ptr, cached);
+ /* vmunmap will be done in hmm_bo_release() */
+}
+
+ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
+ if (bo)
+ return bo->start;
+
+ dev_err(atomisp_dev,
+ "can not find buffer object whose kernel virtual address is %p\n",
+ ptr);
+ return 0;
+}
+
+void hmm_show_mem_stat(const char *func, const int line)
+{
+ trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
+ hmm_mem_stat.tol_cnt,
+ hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
+ hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
+ hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
+}
+
+void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
+{
+ hmm_mem_stat.res_size = res_pgnr;
+ /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
+ if (0 == hmm_mem_stat.res_size) {
+ hmm_mem_stat.res_size = -1;
+ hmm_mem_stat.res_cnt = -1;
+ }
+
+ /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
+ if (!dyc_en) {
+ hmm_mem_stat.dyc_size = -1;
+ hmm_mem_stat.dyc_thr = -1;
+ } else {
+ hmm_mem_stat.dyc_size = 0;
+ hmm_mem_stat.dyc_thr = dyc_pgnr;
+ }
+ hmm_mem_stat.usr_size = 0;
+ hmm_mem_stat.sys_size = 0;
+ hmm_mem_stat.tol_cnt = 0;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
new file mode 100644
index 000000000000..11162f595fc7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -0,0 +1,1544 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for buffer object structure management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h> /* for GFP_ATOMIC */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/hugetlb.h>
+#include <linux/highmem.h>
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/current.h>
+#include <linux/sched/signal.h>
+#include <linux/file.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_pool.h"
+#include "hmm/hmm_bo.h"
+
+static unsigned int order_to_nr(unsigned int order)
+{
+ return 1U << order;
+}
+
+static unsigned int nr_to_order_bottom(unsigned int nr)
+{
+ return fls(nr) - 1;
+}
+
+struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
+{
+ struct hmm_buffer_object *bo;
+
+ bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
+ if (!bo)
+ dev_err(atomisp_dev, "%s: failed!\n", __func__);
+
+ return bo;
+}
+
+static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
+ "hmm_bo_device not inited yet.\n");
+ /* prevent zero size buffer object */
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return -EINVAL;
+ }
+
+ memset(bo, 0, sizeof(*bo));
+ mutex_init(&bo->mutex);
+
+ /* init the bo->list HEAD as an element of entire_bo_list */
+ INIT_LIST_HEAD(&bo->list);
+
+ bo->bdev = bdev;
+ bo->vmap_addr = NULL;
+ bo->status = HMM_BO_FREE;
+ bo->start = bdev->start;
+ bo->pgnr = pgnr;
+ bo->end = bo->start + pgnr_to_size(pgnr);
+ bo->prev = NULL;
+ bo->next = NULL;
+
+ return 0;
+}
+
+struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+ struct rb_node *node, unsigned int pgnr)
+{
+ struct hmm_buffer_object *this, *ret_bo, *temp_bo;
+
+ this = rb_entry(node, struct hmm_buffer_object, node);
+ if (this->pgnr == pgnr ||
+ (this->pgnr > pgnr && this->node.rb_left == NULL)) {
+ goto remove_bo_and_return;
+ } else {
+ if (this->pgnr < pgnr) {
+ if (!this->node.rb_right)
+ return NULL;
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_right, pgnr);
+ } else {
+ ret_bo = __bo_search_and_remove_from_free_rbtree(
+ this->node.rb_left, pgnr);
+ }
+ if (!ret_bo) {
+ if (this->pgnr > pgnr)
+ goto remove_bo_and_return;
+ else
+ return NULL;
+ }
+ return ret_bo;
+ }
+
+remove_bo_and_return:
+ /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
+ * 1. check if 'this->next' is NULL:
+ * yes: erase 'this' node and rebalance rbtree, return 'this'.
+ */
+ if (this->next == NULL) {
+ rb_erase(&this->node, &this->bdev->free_rbtree);
+ return this;
+ }
+ /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
+ * 2. check if 'this->next->next' is NULL:
+ * yes: change the related 'next/prev' pointer,
+ * return 'this->next' but the rbtree stays unchanged.
+ */
+ temp_bo = this->next;
+ this->next = temp_bo->next;
+ if (temp_bo->next)
+ temp_bo->next->prev = this;
+ temp_bo->next = NULL;
+ temp_bo->prev = NULL;
+ return temp_bo;
+}
+
+struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+ ia_css_ptr start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (n->rb_left == NULL)
+ return NULL;
+ n = n->rb_left;
+ } else if (bo->start < start) {
+ if (n->rb_right == NULL)
+ return NULL;
+ n = n->rb_right;
+ } else {
+ return bo;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
+ unsigned int start)
+{
+ struct rb_node *n = root->rb_node;
+ struct hmm_buffer_object *bo;
+
+ do {
+ bo = rb_entry(n, struct hmm_buffer_object, node);
+
+ if (bo->start > start) {
+ if (n->rb_left == NULL)
+ return NULL;
+ n = n->rb_left;
+ } else {
+ if (bo->end > start)
+ return bo;
+ if (n->rb_right == NULL)
+ return NULL;
+ n = n->rb_right;
+ }
+ } while (n);
+
+ return NULL;
+}
+
+static void __bo_insert_to_free_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int pgnr = bo->pgnr;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (pgnr < this->pgnr) {
+ new = &((*new)->rb_left);
+ } else if (pgnr > this->pgnr) {
+ new = &((*new)->rb_right);
+ } else {
+ bo->prev = this;
+ bo->next = this->next;
+ if (this->next)
+ this->next->prev = bo;
+ this->next = bo;
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+ return;
+ }
+ }
+
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
+ struct hmm_buffer_object *bo)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+ struct hmm_buffer_object *this;
+ unsigned int start = bo->start;
+
+ while (*new) {
+ parent = *new;
+ this = container_of(*new, struct hmm_buffer_object, node);
+
+ if (start < this->start)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ kref_init(&bo->kref);
+ bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
+
+ rb_link_node(&bo->node, parent, new);
+ rb_insert_color(&bo->node, root);
+}
+
+struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+ struct hmm_buffer_object *bo,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *new_bo;
+ unsigned long flags;
+ int ret;
+
+ new_bo = __bo_alloc(bdev->bo_cache);
+ if (!new_bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ return NULL;
+ }
+ ret = __bo_init(bdev, new_bo, pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, new_bo);
+ return NULL;
+ }
+
+ new_bo->start = bo->start;
+ new_bo->end = new_bo->start + pgnr_to_size(pgnr);
+ bo->start = new_bo->end;
+ bo->pgnr = bo->pgnr - pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&new_bo->list, &bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ return new_bo;
+}
+
+static void __bo_take_off_handling(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ /* There are 4 situations when we take off a known bo from free rbtree:
+ * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
+ * and does not have a linked list after bo, to take off this bo,
+ * we just need erase bo directly and rebalance the free rbtree
+ */
+ if (bo->prev == NULL && bo->next == NULL) {
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
+ * and has a linked list,to take off this bo we need erase bo
+ * first, then, insert bo->next into free rbtree and rebalance
+ * the free rbtree
+ */
+ } else if (bo->prev == NULL && bo->next != NULL) {
+ bo->next->prev = NULL;
+ rb_erase(&bo->node, &bdev->free_rbtree);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
+ bo->next = NULL;
+ /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
+ * node, bo is the last element of the linked list after rbtree
+ * node, to take off this bo, we just need set the "prev/next"
+ * pointers to NULL, the free rbtree stays unchaged
+ */
+ } else if (bo->prev != NULL && bo->next == NULL) {
+ bo->prev->next = NULL;
+ bo->prev = NULL;
+ /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
+ * node, bo is in the middle of the linked list after rbtree node,
+ * to take off this bo, we just set take the "prev/next" pointers
+ * to NULL, the free rbtree stays unchaged
+ */
+ } else {
+ bo->next->prev = bo->prev;
+ bo->prev->next = bo->next;
+ bo->next = NULL;
+ bo->prev = NULL;
+ }
+}
+
+struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+ struct hmm_buffer_object *next_bo)
+{
+ struct hmm_bo_device *bdev;
+ unsigned long flags;
+
+ bdev = bo->bdev;
+ next_bo->start = bo->start;
+ next_bo->pgnr = next_bo->pgnr + bo->pgnr;
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bo->bdev->bo_cache, bo);
+
+ return next_bo;
+}
+
+/*
+ * hmm_bo_device functions.
+ */
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start,
+ unsigned int size)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+ int ret;
+
+ check_bodev_null_return(bdev, -EINVAL);
+
+ ret = isp_mmu_init(&bdev->mmu, mmu_driver);
+ if (ret) {
+ dev_err(atomisp_dev, "isp_mmu_init failed.\n");
+ return ret;
+ }
+
+ bdev->start = vaddr_start;
+ bdev->pgnr = size_to_pgnr_ceil(size);
+ bdev->size = pgnr_to_size(bdev->pgnr);
+
+ spin_lock_init(&bdev->list_lock);
+ mutex_init(&bdev->rbtree_mutex);
+
+ bdev->flag = HMM_BO_DEVICE_INITED;
+
+ INIT_LIST_HEAD(&bdev->entire_bo_list);
+ bdev->allocated_rbtree = RB_ROOT;
+ bdev->free_rbtree = RB_ROOT;
+
+ bdev->bo_cache = kmem_cache_create("bo_cache",
+ sizeof(struct hmm_buffer_object), 0, 0, NULL);
+ if (!bdev->bo_cache) {
+ dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ bo = __bo_alloc(bdev->bo_cache);
+ if (!bo) {
+ dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
+ isp_mmu_exit(&bdev->mmu);
+ return -ENOMEM;
+ }
+
+ ret = __bo_init(bdev, bo, bdev->pgnr);
+ if (ret) {
+ dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
+ kmem_cache_free(bdev->bo_cache, bo);
+ isp_mmu_exit(&bdev->mmu);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_add_tail(&bo->list, &bdev->entire_bo_list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ return 0;
+}
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr)
+{
+ struct hmm_buffer_object *bo, *new_bo;
+ struct rb_root *root = &bdev->free_rbtree;
+
+ check_bodev_null_return(bdev, NULL);
+ var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
+ "hmm_bo_device not inited yet.\n");
+
+ if (pgnr == 0) {
+ dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
+ return NULL;
+ }
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
+ __func__);
+ return NULL;
+ }
+
+ if (bo->pgnr > pgnr) {
+ new_bo = __bo_break_up(bdev, bo, pgnr);
+ if (!new_bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
+ __func__);
+ return NULL;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return new_bo;
+ }
+
+ __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return bo;
+}
+
+void hmm_bo_release(struct hmm_buffer_object *bo)
+{
+ struct hmm_bo_device *bdev = bo->bdev;
+ struct hmm_buffer_object *next_bo, *prev_bo;
+
+ mutex_lock(&bdev->rbtree_mutex);
+
+ /*
+ * FIX ME:
+ *
+ * how to destroy the bo when it is stilled MMAPED?
+ *
+ * ideally, this will not happened as hmm_bo_release
+ * will only be called when kref reaches 0, and in mmap
+ * operation the hmm_bo_ref will eventually be called.
+ * so, if this happened, something goes wrong.
+ */
+ if (bo->status & HMM_BO_MMAPED) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
+ return;
+ }
+
+ if (bo->status & HMM_BO_BINDED) {
+ dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
+ hmm_bo_unbind(bo);
+ }
+
+ if (bo->status & HMM_BO_PAGE_ALLOCED) {
+ dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
+ hmm_bo_free_pages(bo);
+ }
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
+ hmm_bo_vunmap(bo);
+ }
+
+ rb_erase(&bo->node, &bdev->allocated_rbtree);
+
+ prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
+ next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
+
+ if (bo->list.prev != &bdev->entire_bo_list &&
+ prev_bo->end == bo->start &&
+ (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(prev_bo);
+ bo = __bo_merge(prev_bo, bo);
+ }
+
+ if (bo->list.next != &bdev->entire_bo_list &&
+ next_bo->start == bo->end &&
+ (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
+ __bo_take_off_handling(next_bo);
+ bo = __bo_merge(bo, next_bo);
+ }
+
+ __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
+
+ mutex_unlock(&bdev->rbtree_mutex);
+ return;
+}
+
+void hmm_bo_device_exit(struct hmm_bo_device *bdev)
+{
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
+
+ check_bodev_null_return_void(bdev);
+
+ /*
+ * release all allocated bos even they a in use
+ * and all bos will be merged into a big bo
+ */
+ while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
+ hmm_bo_release(
+ rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
+
+ dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
+ __func__);
+
+ /* free all bos to release all ISP virtual memory */
+ while (!list_empty(&bdev->entire_bo_list)) {
+ bo = list_to_hmm_bo(bdev->entire_bo_list.next);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_del(&bo->list);
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+
+ kmem_cache_free(bdev->bo_cache, bo);
+ }
+
+ dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
+
+ kmem_cache_destroy(bdev->bo_cache);
+
+ isp_mmu_exit(&bdev->mmu);
+}
+
+int hmm_bo_device_inited(struct hmm_bo_device *bdev)
+{
+ check_bodev_null_return(bdev, -EINVAL);
+
+ return bdev->flag == HMM_BO_DEVICE_INITED;
+}
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return(bo, 0);
+
+ return bo->status & HMM_BO_ALLOCED;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, unsigned int vaddr)
+{
+ struct hmm_buffer_object *bo;
+
+ check_bodev_null_return(bdev, NULL);
+
+ mutex_lock(&bdev->rbtree_mutex);
+ bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
+ if (!bo) {
+ mutex_unlock(&bdev->rbtree_mutex);
+ dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
+ __func__, vaddr);
+ return NULL;
+ }
+ mutex_unlock(&bdev->rbtree_mutex);
+
+ return bo;
+}
+
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr)
+{
+ struct list_head *pos;
+ struct hmm_buffer_object *bo;
+ unsigned long flags;
+
+ check_bodev_null_return(bdev, NULL);
+
+ spin_lock_irqsave(&bdev->list_lock, flags);
+ list_for_each(pos, &bdev->entire_bo_list) {
+ bo = list_to_hmm_bo(pos);
+ /* pass bo which has no vm_node allocated */
+ if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
+ continue;
+ if (bo->vmap_addr == vaddr)
+ goto found;
+ }
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return NULL;
+found:
+ spin_unlock_irqrestore(&bdev->list_lock, flags);
+ return bo;
+
+}
+
+
+static void free_private_bo_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool,
+ int free_pgnr)
+{
+ int i, ret;
+
+ for (i = 0; i < free_pgnr; i++) {
+ switch (bo->page_obj[i].type) {
+ case HMM_PAGE_TYPE_RESERVED:
+ if (repool->pops
+ && repool->pops->pool_free_pages) {
+ repool->pops->pool_free_pages(repool->pool_info,
+ &bo->page_obj[i]);
+ hmm_mem_stat.res_cnt--;
+ }
+ break;
+ /*
+ * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
+ * memory, so when free them, they should be put into dynamic
+ * pool.
+ */
+ case HMM_PAGE_TYPE_DYNAMIC:
+ case HMM_PAGE_TYPE_GENERAL:
+ if (dypool->pops
+ && dypool->pops->pool_inited
+ && dypool->pops->pool_inited(dypool->pool_info)) {
+ if (dypool->pops->pool_free_pages)
+ dypool->pops->pool_free_pages(
+ dypool->pool_info,
+ &bo->page_obj[i]);
+ break;
+ }
+
+ /*
+ * if dynamic memory pool doesn't exist, need to free
+ * pages to system directly.
+ */
+ default:
+ ret = set_pages_wb(bo->page_obj[i].page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret = %d\n",
+ ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid,it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(bo->page_obj[i].page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ break;
+ }
+ }
+
+ return;
+}
+
+/*Allocate pages which will be used only by ISP*/
+static int alloc_private_pages(struct hmm_buffer_object *bo,
+ int from_highmem,
+ bool cached,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ int ret;
+ unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
+ struct page *pages;
+ gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
+ int i, j;
+ int failure_number = 0;
+ bool reduce_order = false;
+ bool lack_mem = true;
+
+ if (from_highmem)
+ gfp |= __GFP_HIGHMEM;
+
+ pgnr = bo->pgnr;
+
+ bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
+ GFP_KERNEL);
+ if (unlikely(!bo->page_obj)) {
+ dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ alloc_pgnr = 0;
+
+ /*
+ * get physical pages from dynamic pages pool.
+ */
+ if (dypool->pops && dypool->pops->pool_alloc_pages) {
+ alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
+ bo->page_obj, pgnr,
+ cached);
+ hmm_mem_stat.dyc_size -= alloc_pgnr;
+
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ /*
+ * get physical pages from reserved pages pool for atomisp.
+ */
+ if (repool->pops && repool->pops->pool_alloc_pages) {
+ alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
+ &bo->page_obj[i], pgnr,
+ cached);
+ hmm_mem_stat.res_cnt += alloc_pgnr;
+ if (alloc_pgnr == pgnr)
+ return 0;
+ }
+
+ pgnr -= alloc_pgnr;
+ i += alloc_pgnr;
+
+ while (pgnr) {
+ order = nr_to_order_bottom(pgnr);
+ /*
+ * if be short of memory, we will set order to 0
+ * everytime.
+ */
+ if (lack_mem)
+ order = HMM_MIN_ORDER;
+ else if (order > HMM_MAX_ORDER)
+ order = HMM_MAX_ORDER;
+retry:
+ /*
+ * When order > HMM_MIN_ORDER, for performance reasons we don't
+ * want alloc_pages() to sleep. In case it fails and fallbacks
+ * to HMM_MIN_ORDER or in case the requested order is originally
+ * the minimum value, we can allow alloc_pages() to sleep for
+ * robustness purpose.
+ *
+ * REVISIT: why __GFP_FS is necessary?
+ */
+ if (order == HMM_MIN_ORDER) {
+ gfp &= ~GFP_NOWAIT;
+ gfp |= __GFP_RECLAIM | __GFP_FS;
+ }
+
+ pages = alloc_pages(gfp, order);
+ if (unlikely(!pages)) {
+ /*
+ * in low memory case, if allocation page fails,
+ * we turn to try if order=0 allocation could
+ * succeed. if order=0 fails too, that means there is
+ * no memory left.
+ */
+ if (order == HMM_MIN_ORDER) {
+ dev_err(atomisp_dev,
+ "%s: cannot allocate pages\n",
+ __func__);
+ goto cleanup;
+ }
+ order = HMM_MIN_ORDER;
+ failure_number++;
+ reduce_order = true;
+ /*
+ * if fail two times continuously, we think be short
+ * of memory now.
+ */
+ if (failure_number == 2) {
+ lack_mem = true;
+ failure_number = 0;
+ }
+ goto retry;
+ } else {
+ blk_pgnr = order_to_nr(order);
+
+ if (!cached) {
+ /*
+ * set memory to uncacheable -- UC_MINUS
+ */
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page uncacheable"
+ "failed.\n");
+
+ __free_pages(pages, order);
+
+ goto cleanup;
+ }
+ }
+
+ for (j = 0; j < blk_pgnr; j++) {
+ bo->page_obj[i].page = pages + j;
+ bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
+ }
+
+ pgnr -= blk_pgnr;
+ hmm_mem_stat.sys_size += blk_pgnr;
+
+ /*
+ * if order is not reduced this time, clear
+ * failure_number.
+ */
+ if (reduce_order)
+ reduce_order = false;
+ else
+ failure_number = 0;
+ }
+ }
+
+ return 0;
+cleanup:
+ alloc_pgnr = i;
+ free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
+
+ kfree(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_private_pages(struct hmm_buffer_object *bo,
+ struct hmm_pool *dypool,
+ struct hmm_pool *repool)
+{
+ free_private_bo_pages(bo, dypool, repool, bo->pgnr);
+
+ kfree(bo->page_obj);
+}
+
+/*
+ * Hacked from kernel function __get_user_pages in mm/memory.c
+ *
+ * Handle buffers allocated by other kernel space driver and mmaped into user
+ * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
+ *
+ * Get physical pages from user space virtual address and update into page list
+ */
+static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int i, ret;
+ unsigned long vm_flags;
+
+ if (nr_pages <= 0)
+ return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+ /*
+ * Require read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ i = 0;
+
+ do {
+ struct vm_area_struct *vma;
+
+ vma = find_vma(mm, start);
+ if (!vma) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ return i ? : -EFAULT;
+ }
+
+ if (is_vm_hugetlb_page(vma)) {
+ /*
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i, gup_flags);
+ */
+ continue;
+ }
+
+ do {
+ struct page *page;
+ unsigned long pfn;
+
+ /*
+ * If we have a pending SIGKILL, don't keep faulting
+ * pages and potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current))) {
+ dev_err(atomisp_dev,
+ "fatal_signal_pending in %s\n",
+ __func__);
+ return i ? i : -ERESTARTSYS;
+ }
+
+ ret = follow_pfn(vma, start, &pfn);
+ if (ret) {
+ dev_err(atomisp_dev, "follow_pfn() failed\n");
+ return i ? : -EFAULT;
+ }
+
+ page = pfn_to_page(pfn);
+ if (IS_ERR(page))
+ return i ? i : PTR_ERR(page);
+ if (pages) {
+ pages[i] = page;
+ get_page(page);
+ flush_anon_page(vma, page, start);
+ flush_dcache_page(page);
+ }
+ if (vmas)
+ vmas[i] = vma;
+ i++;
+ start += PAGE_SIZE;
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
+
+ return i;
+}
+
+static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = FOLL_TOUCH;
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+}
+
+/*
+ * Convert user space virtual address into pages list
+ */
+static int alloc_user_pages(struct hmm_buffer_object *bo,
+ void *userptr, bool cached)
+{
+ int page_nr;
+ int i;
+ struct vm_area_struct *vma;
+ struct page **pages;
+
+ pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
+ if (unlikely(!pages)) {
+ dev_err(atomisp_dev, "out of memory for pages...\n");
+ return -ENOMEM;
+ }
+
+ bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
+ GFP_KERNEL);
+ if (unlikely(!bo->page_obj)) {
+ dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&bo->mutex);
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)userptr);
+ up_read(&current->mm->mmap_sem);
+ if (vma == NULL) {
+ dev_err(atomisp_dev, "find_vma failed\n");
+ kfree(bo->page_obj);
+ kfree(pages);
+ mutex_lock(&bo->mutex);
+ return -EFAULT;
+ }
+ mutex_lock(&bo->mutex);
+ /*
+ * Handle frame buffer allocated in other kerenl space driver
+ * and map to user space
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ page_nr = get_pfnmap_pages(current, current->mm,
+ (unsigned long)userptr,
+ (int)(bo->pgnr), 1, 0,
+ pages, NULL);
+ bo->mem_type = HMM_BO_MEM_TYPE_PFN;
+ } else {
+ /*Handle frame buffer allocated in user space*/
+ mutex_unlock(&bo->mutex);
+ down_read(&current->mm->mmap_sem);
+ page_nr = get_user_pages((unsigned long)userptr,
+ (int)(bo->pgnr), 1, pages, NULL);
+ up_read(&current->mm->mmap_sem);
+ mutex_lock(&bo->mutex);
+ bo->mem_type = HMM_BO_MEM_TYPE_USER;
+ }
+
+ /* can be written by caller, not forced */
+ if (page_nr != bo->pgnr) {
+ dev_err(atomisp_dev,
+ "get_user_pages err: bo->pgnr = %d, "
+ "pgnr actually pinned = %d.\n",
+ bo->pgnr, page_nr);
+ goto out_of_mem;
+ }
+
+ for (i = 0; i < bo->pgnr; i++) {
+ bo->page_obj[i].page = pages[i];
+ bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
+ }
+ hmm_mem_stat.usr_size += bo->pgnr;
+ kfree(pages);
+
+ return 0;
+
+out_of_mem:
+ for (i = 0; i < page_nr; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ kfree(bo->page_obj);
+
+ return -ENOMEM;
+}
+
+static void free_user_pages(struct hmm_buffer_object *bo)
+{
+ int i;
+
+ for (i = 0; i < bo->pgnr; i++)
+ put_page(bo->page_obj[i].page);
+ hmm_mem_stat.usr_size -= bo->pgnr;
+
+ kfree(bo->page_obj);
+}
+
+/*
+ * allocate/free physical pages for the bo.
+ *
+ * type indicate where are the pages from. currently we have 3 types
+ * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
+ *
+ * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
+ * try to alloc memory from highmem if from_highmem is set.
+ *
+ * userptr is only valid when type is HMM_BO_USER, it indicates
+ * the start address from user space task.
+ *
+ * from_highmem and userptr will both be ignored when type is
+ * HMM_BO_SHARE.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type, int from_highmem,
+ void *userptr, bool cached)
+{
+ int ret = -EINVAL;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+ check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ /*
+ * TO DO:
+ * add HMM_BO_USER type
+ */
+ if (type == HMM_BO_PRIVATE) {
+ ret = alloc_private_pages(bo, from_highmem,
+ cached, &dynamic_pool, &reserved_pool);
+ } else if (type == HMM_BO_USER) {
+ ret = alloc_user_pages(bo, userptr, cached);
+ } else {
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ ret = -EINVAL;
+ }
+ if (ret)
+ goto alloc_err;
+
+ bo->type = type;
+
+ bo->status |= HMM_BO_PAGE_ALLOCED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+alloc_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "alloc pages err...\n");
+ return ret;
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object has already page allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * free physical pages of the bo.
+ */
+void hmm_bo_free_pages(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
+
+ /* clear the flag anyway. */
+ bo->status &= (~HMM_BO_PAGE_ALLOCED);
+
+ if (bo->type == HMM_BO_PRIVATE)
+ free_private_pages(bo, &dynamic_pool, &reserved_pool);
+ else if (bo->type == HMM_BO_USER)
+ free_user_pages(bo);
+ else
+ dev_err(atomisp_dev, "invalid buffer type.\n");
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+}
+
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ ret = bo->status & HMM_BO_PAGE_ALLOCED;
+
+ return ret;
+}
+
+/*
+ * get physical page info of the bo.
+ */
+int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
+ struct hmm_page_object **page_obj, int *pgnr)
+{
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ *page_obj = bo->page_obj;
+ *pgnr = bo->pgnr;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev,
+ "buffer object not page allocated yet.\n");
+ mutex_unlock(&bo->mutex);
+ return -EINVAL;
+}
+
+/*
+ * bind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo)
+{
+ int ret;
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
+ status_err1);
+
+ check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ ret =
+ isp_mmu_map(&bdev->mmu, virt,
+ page_to_phys(bo->page_obj[i].page), 1);
+ if (ret)
+ goto map_err;
+ virt += (1 << PAGE_SHIFT);
+ }
+
+ /*
+ * flush TBL here.
+ *
+ * theoretically, we donot need to flush TLB as we didnot change
+ * any existed address mappings, but for Silicon Hive's MMU, its
+ * really a bug here. I guess when fetching PTEs (page table entity)
+ * to TLB, its MMU will fetch additional INVALID PTEs automatically
+ * for performance issue. EX, we only set up 1 page address mapping,
+ * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
+ * so the additional 3 PTEs are invalid.
+ */
+ if (bo->start != 0x0)
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status |= HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return 0;
+
+map_err:
+ /* unbind the physical pages with related virtual address space */
+ virt = bo->start;
+ for ( ; i > 0; i--) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "setup MMU address mapping failed.\n");
+ return ret;
+
+status_err2:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "buffer object already binded.\n");
+ return -EINVAL;
+status_err1:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer object vm_node or page not allocated.\n");
+ return -EINVAL;
+}
+
+/*
+ * unbind the physical pages with related virtual address space.
+ */
+void hmm_bo_unbind(struct hmm_buffer_object *bo)
+{
+ unsigned int virt;
+ struct hmm_bo_device *bdev;
+ unsigned int i;
+
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+
+ check_bo_status_yes_goto(bo,
+ HMM_BO_PAGE_ALLOCED |
+ HMM_BO_ALLOCED |
+ HMM_BO_BINDED, status_err);
+
+ bdev = bo->bdev;
+
+ virt = bo->start;
+
+ for (i = 0; i < bo->pgnr; i++) {
+ isp_mmu_unmap(&bdev->mmu, virt, 1);
+ virt += pgnr_to_size(1);
+ }
+
+ /*
+ * flush TLB as the address mapping has been removed and
+ * related TLBs should be invalidated.
+ */
+ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
+ (bo->pgnr << PAGE_SHIFT));
+
+ bo->status &= (~HMM_BO_BINDED);
+
+ mutex_unlock(&bo->mutex);
+
+ return;
+
+status_err:
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev,
+ "buffer vm or page not allocated or not binded yet.\n");
+}
+
+int hmm_bo_binded(struct hmm_buffer_object *bo)
+{
+ int ret;
+
+ check_bo_null_return(bo, 0);
+
+ mutex_lock(&bo->mutex);
+
+ ret = bo->status & HMM_BO_BINDED;
+
+ mutex_unlock(&bo->mutex);
+
+ return ret;
+}
+
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
+{
+ struct page **pages;
+ int i;
+
+ check_bo_null_return(bo, NULL);
+
+ mutex_lock(&bo->mutex);
+ if (((bo->status & HMM_BO_VMAPED) && !cached) ||
+ ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+ }
+
+ /* cached status need to be changed, so vunmap first */
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
+ if (unlikely(!pages)) {
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "out of memory for pages...\n");
+ return NULL;
+ }
+
+ for (i = 0; i < bo->pgnr; i++)
+ pages[i] = bo->page_obj[i].page;
+
+ bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
+ cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
+ if (unlikely(!bo->vmap_addr)) {
+ kfree(pages);
+ mutex_unlock(&bo->mutex);
+ dev_err(atomisp_dev, "vmap failed...\n");
+ return NULL;
+ }
+ bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
+
+ kfree(pages);
+
+ mutex_unlock(&bo->mutex);
+ return bo->vmap_addr;
+}
+
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
+ mutex_unlock(&bo->mutex);
+ return;
+ }
+
+ clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
+ mutex_unlock(&bo->mutex);
+}
+
+void hmm_bo_vunmap(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ mutex_lock(&bo->mutex);
+ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
+ vunmap(bo->vmap_addr);
+ bo->vmap_addr = NULL;
+ bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
+ }
+
+ mutex_unlock(&bo->mutex);
+ return;
+}
+
+void hmm_bo_ref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_get(&bo->kref);
+}
+
+static void kref_hmm_bo_release(struct kref *kref)
+{
+ if (!kref)
+ return;
+
+ hmm_bo_release(kref_to_hmm_bo(kref));
+}
+
+void hmm_bo_unref(struct hmm_buffer_object *bo)
+{
+ check_bo_null_return_void(bo);
+
+ kref_put(&bo->kref, kref_hmm_bo_release);
+}
+
+static void hmm_bo_vm_open(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_ref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->status |= HMM_BO_MMAPED;
+
+ bo->mmap_count++;
+
+ mutex_unlock(&bo->mutex);
+}
+
+static void hmm_bo_vm_close(struct vm_area_struct *vma)
+{
+ struct hmm_buffer_object *bo =
+ (struct hmm_buffer_object *)vma->vm_private_data;
+
+ check_bo_null_return_void(bo);
+
+ hmm_bo_unref(bo);
+
+ mutex_lock(&bo->mutex);
+
+ bo->mmap_count--;
+
+ if (!bo->mmap_count) {
+ bo->status &= (~HMM_BO_MMAPED);
+ vma->vm_private_data = NULL;
+ }
+
+ mutex_unlock(&bo->mutex);
+}
+
+static const struct vm_operations_struct hmm_bo_vm_ops = {
+ .open = hmm_bo_vm_open,
+ .close = hmm_bo_vm_close,
+};
+
+/*
+ * mmap the bo to user space.
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
+{
+ unsigned int start, end;
+ unsigned int virt;
+ unsigned int pgnr, i;
+ unsigned int pfn;
+
+ check_bo_null_return(bo, -EINVAL);
+
+ check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
+
+ pgnr = bo->pgnr;
+ start = vma->vm_start;
+ end = vma->vm_end;
+
+ /*
+ * check vma's virtual address space size and buffer object's size.
+ * must be the same.
+ */
+ if ((start + pgnr_to_size(pgnr)) != end) {
+ dev_warn(atomisp_dev,
+ "vma's address space size not equal"
+ " to buffer object's size");
+ return -EINVAL;
+ }
+
+ virt = vma->vm_start;
+ for (i = 0; i < pgnr; i++) {
+ pfn = page_to_pfn(bo->page_obj[i].page);
+ if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
+ dev_warn(atomisp_dev,
+ "remap_pfn_range failed:"
+ " virt = 0x%x, pfn = 0x%x,"
+ " mapped_pgnr = %d\n", virt, pfn, 1);
+ return -EINVAL;
+ }
+ virt += PAGE_SIZE;
+ }
+
+ vma->vm_private_data = bo;
+
+ vma->vm_ops = &hmm_bo_vm_ops;
+ vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
+
+ /*
+ * call hmm_bo_vm_open explictly.
+ */
+ hmm_bo_vm_open(vma);
+
+ return 0;
+
+status_err:
+ dev_err(atomisp_dev, "buffer page not allocated yet.\n");
+ return -EINVAL;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
new file mode 100644
index 000000000000..19e0e9ee37de
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -0,0 +1,241 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for dynamic memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+
+#include "hmm/hmm_pool.h"
+
+/*
+ * dynamic memory pool ops.
+ */
+static unsigned int get_pages_from_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ unsigned int i = 0;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (dypool_info->initialized) {
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ dypool_info->pgnr--;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ page_obj[i].page = hmm_page->page;
+ page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+
+ if (i == size)
+ return i;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ return i;
+}
+
+static void free_pages_to_dynamic_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
+ return;
+
+ if (dypool_info->pgnr >= dypool_info->pool_size) {
+ /* free page directly back to system */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why page
+ address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+ hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
+ GFP_KERNEL);
+ if (!hmm_page) {
+ dev_err(atomisp_dev, "out of memory for hmm_page.\n");
+
+ /* free page directly */
+ ret = set_pages_wb(page_obj->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err ...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(page_obj->page, 0);
+ hmm_mem_stat.sys_size--;
+ }
+ return;
+ }
+
+ hmm_page->page = page_obj->page;
+
+ /*
+ * add to pages_list of pages_pool
+ */
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ list_add_tail(&hmm_page->list, &dypool_info->pages_list);
+ dypool_info->pgnr++;
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ hmm_mem_stat.dyc_size++;
+}
+
+static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
+{
+ struct hmm_dynamic_pool_info *dypool_info;
+
+ if (pool_size == 0)
+ return 0;
+
+ dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
+ GFP_KERNEL);
+ if (unlikely(!dypool_info)) {
+ dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ return -ENOMEM;
+ }
+
+ dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
+ sizeof(struct hmm_page), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dypool_info->pgptr_cache) {
+ kfree(dypool_info);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&dypool_info->pages_list);
+ spin_lock_init(&dypool_info->list_lock);
+ dypool_info->initialized = true;
+ dypool_info->pool_size = pool_size;
+ dypool_info->pgnr = 0;
+
+ *pool = dypool_info;
+
+ return 0;
+}
+
+static void hmm_dynamic_pool_exit(void **pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = *pool;
+ struct hmm_page *hmm_page;
+ unsigned long flags;
+ int ret;
+
+ if (!dypool_info)
+ return;
+
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ if (!dypool_info->initialized) {
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+ return;
+ }
+ dypool_info->initialized = false;
+
+ while (!list_empty(&dypool_info->pages_list)) {
+ hmm_page = list_entry(dypool_info->pages_list.next,
+ struct hmm_page, list);
+
+ list_del(&hmm_page->list);
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ /* can cause thread sleep, so cannot be put into spin_lock */
+ ret = set_pages_wb(hmm_page->page, 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ if (!ret) {
+ __free_pages(hmm_page->page, 0);
+ hmm_mem_stat.dyc_size--;
+ hmm_mem_stat.sys_size--;
+ }
+ kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
+ spin_lock_irqsave(&dypool_info->list_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dypool_info->list_lock, flags);
+
+ kmem_cache_destroy(dypool_info->pgptr_cache);
+
+ kfree(dypool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_dynamic_pool_inited(void *pool)
+{
+ struct hmm_dynamic_pool_info *dypool_info = pool;
+
+ if (!dypool_info)
+ return 0;
+
+ return dypool_info->initialized;
+}
+
+struct hmm_pool_ops dynamic_pops = {
+ .pool_init = hmm_dynamic_pool_init,
+ .pool_exit = hmm_dynamic_pool_exit,
+ .pool_alloc_pages = get_pages_from_dynamic_pool,
+ .pool_free_pages = free_pages_to_dynamic_pool,
+ .pool_inited = hmm_dynamic_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
new file mode 100644
index 000000000000..bf6586805f7f
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -0,0 +1,259 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains functions for reserved memory pool management
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <asm/set_memory.h>
+
+#include "atomisp_internal.h"
+#include "hmm/hmm_pool.h"
+
+/*
+ * reserved memory pool ops.
+ */
+static unsigned int get_pages_from_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached)
+{
+ unsigned long flags;
+ unsigned int i = 0;
+ unsigned int repool_pgnr;
+ int j;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (repool_info->initialized) {
+ repool_pgnr = repool_info->index;
+
+ for (j = repool_pgnr-1; j >= 0; j--) {
+ page_obj[i].page = repool_info->pages[j];
+ page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
+ i++;
+ repool_info->index--;
+ if (i == size)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return i;
+}
+
+static void free_pages_to_reserved_pool(void *pool,
+ struct hmm_page_object *page_obj)
+{
+ unsigned long flags;
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+
+ if (repool_info->initialized &&
+ repool_info->index < repool_info->pgnr &&
+ page_obj->type == HMM_PAGE_TYPE_RESERVED) {
+ repool_info->pages[repool_info->index++] = page_obj->page;
+ }
+
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+}
+
+static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
+ unsigned int pool_size)
+{
+ struct hmm_reserved_pool_info *pool_info;
+
+ pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
+ GFP_KERNEL);
+ if (unlikely(!pool_info)) {
+ dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ return -ENOMEM;
+ }
+
+ pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
+ GFP_KERNEL);
+ if (unlikely(!pool_info->pages)) {
+ dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
+ kfree(pool_info);
+ return -ENOMEM;
+ }
+
+ pool_info->index = 0;
+ pool_info->pgnr = 0;
+ spin_lock_init(&pool_info->list_lock);
+ pool_info->initialized = true;
+
+ *repool_info = pool_info;
+
+ return 0;
+}
+
+static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
+{
+ int ret;
+ unsigned int blk_pgnr;
+ unsigned int pgnr = pool_size;
+ unsigned int order = 0;
+ unsigned int i = 0;
+ int fail_number = 0;
+ struct page *pages;
+ int j;
+ struct hmm_reserved_pool_info *repool_info;
+ if (pool_size == 0)
+ return 0;
+
+ ret = hmm_reserved_pool_setup(&repool_info, pool_size);
+ if (ret) {
+ dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
+ return ret;
+ }
+
+ pgnr = pool_size;
+
+ i = 0;
+ order = MAX_ORDER;
+
+ while (pgnr) {
+ blk_pgnr = 1U << order;
+ while (blk_pgnr > pgnr) {
+ order--;
+ blk_pgnr >>= 1U;
+ }
+ BUG_ON(order > MAX_ORDER);
+
+ pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
+ if (unlikely(!pages)) {
+ if (order == 0) {
+ fail_number++;
+ dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
+ __func__, fail_number);
+ /* if fail five times, will goto end */
+
+ /* FIXME: whether is the mechanism is ok? */
+ if (fail_number == ALLOC_PAGE_FAIL_NUM)
+ goto end;
+ } else {
+ order--;
+ }
+ } else {
+ blk_pgnr = 1U << order;
+
+ ret = set_pages_uc(pages, blk_pgnr);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set pages uncached failed\n");
+ __free_pages(pages, order);
+ goto end;
+ }
+
+ for (j = 0; j < blk_pgnr; j++)
+ repool_info->pages[i++] = pages + j;
+
+ repool_info->index += blk_pgnr;
+ repool_info->pgnr += blk_pgnr;
+
+ pgnr -= blk_pgnr;
+
+ fail_number = 0;
+ }
+ }
+
+end:
+ repool_info->initialized = true;
+
+ *pool = repool_info;
+
+ dev_info(atomisp_dev,
+ "hmm_reserved_pool init successfully,"
+ "hmm_reserved_pool is with %d pages.\n",
+ repool_info->pgnr);
+ return 0;
+}
+
+static void hmm_reserved_pool_exit(void **pool)
+{
+ unsigned long flags;
+ int i, ret;
+ unsigned int pgnr;
+ struct hmm_reserved_pool_info *repool_info = *pool;
+
+ if (!repool_info)
+ return;
+
+ spin_lock_irqsave(&repool_info->list_lock, flags);
+ if (!repool_info->initialized) {
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+ return;
+ }
+ pgnr = repool_info->pgnr;
+ repool_info->index = 0;
+ repool_info->pgnr = 0;
+ repool_info->initialized = false;
+ spin_unlock_irqrestore(&repool_info->list_lock, flags);
+
+ for (i = 0; i < pgnr; i++) {
+ ret = set_pages_wb(repool_info->pages[i], 1);
+ if (ret)
+ dev_err(atomisp_dev,
+ "set page to WB err...ret=%d\n", ret);
+ /*
+ W/A: set_pages_wb seldom return value = -EFAULT
+ indicate that address of page is not in valid
+ range(0xffff880000000000~0xffffc7ffffffffff)
+ then, _free_pages would panic; Do not know why
+ page address be valid, it maybe memory corruption by lowmemory
+ */
+ if (!ret)
+ __free_pages(repool_info->pages[i], 0);
+ }
+
+ kfree(repool_info->pages);
+ kfree(repool_info);
+
+ *pool = NULL;
+}
+
+static int hmm_reserved_pool_inited(void *pool)
+{
+ struct hmm_reserved_pool_info *repool_info = pool;
+
+ if (!repool_info)
+ return 0;
+
+ return repool_info->initialized;
+}
+
+struct hmm_pool_ops reserved_pops = {
+ .pool_init = hmm_reserved_pool_init,
+ .pool_exit = hmm_reserved_pool_exit,
+ .pool_alloc_pages = get_pages_from_reserved_pool,
+ .pool_free_pages = free_pages_to_reserved_pool,
+ .pool_inited = hmm_reserved_pool_inited,
+};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
new file mode 100644
index 000000000000..0722a68a49e7
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
@@ -0,0 +1,218 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * This file contains function for ISP virtual address management in ISP driver
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_vm.h"
+#include "hmm/hmm_common.h"
+
+static unsigned int vm_node_end(unsigned int start, unsigned int pgnr)
+{
+ return start + pgnr_to_size(pgnr);
+}
+
+static int addr_in_vm_node(unsigned int addr,
+ struct hmm_vm_node *node)
+{
+ return (addr >= node->start) && (addr < (node->start + node->size));
+}
+
+int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
+ unsigned int size)
+{
+ if (!vm)
+ return -1;
+
+ vm->start = start;
+ vm->pgnr = size_to_pgnr_ceil(size);
+ vm->size = pgnr_to_size(vm->pgnr);
+
+ INIT_LIST_HEAD(&vm->vm_node_list);
+ spin_lock_init(&vm->lock);
+ vm->cache = kmem_cache_create("atomisp_vm", sizeof(struct hmm_vm_node),
+ 0, 0, NULL);
+
+ return vm->cache != NULL ? 0 : -ENOMEM;
+}
+
+void hmm_vm_clean(struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node, *tmp;
+ struct list_head new_head;
+
+ if (!vm)
+ return;
+
+ spin_lock(&vm->lock);
+ list_replace_init(&vm->vm_node_list, &new_head);
+ spin_unlock(&vm->lock);
+
+ list_for_each_entry_safe(node, tmp, &new_head, list) {
+ list_del(&node->list);
+ kmem_cache_free(vm->cache, node);
+ }
+
+ kmem_cache_destroy(vm->cache);
+}
+
+static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
+ struct hmm_vm *vm)
+{
+ struct hmm_vm_node *node;
+
+ node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
+ if (!node) {
+ dev_err(atomisp_dev, "out of memory.\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&node->list);
+ node->pgnr = pgnr;
+ node->size = pgnr_to_size(pgnr);
+ node->vm = vm;
+
+ return node;
+}
+
+struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, unsigned int pgnr)
+{
+ struct list_head *head;
+ struct hmm_vm_node *node, *cur, *next;
+ unsigned int vm_start, vm_end;
+ unsigned int addr;
+ unsigned int size;
+
+ if (!vm)
+ return NULL;
+
+ vm_start = vm->start;
+ vm_end = vm_node_end(vm->start, vm->pgnr);
+ size = pgnr_to_size(pgnr);
+
+ addr = vm_start;
+ head = &vm->vm_node_list;
+
+ node = alloc_hmm_vm_node(pgnr, vm);
+ if (!node) {
+ dev_err(atomisp_dev, "no memory to allocate hmm vm node.\n");
+ return NULL;
+ }
+
+ spin_lock(&vm->lock);
+ /*
+ * if list is empty, the loop code will not be executed.
+ */
+ list_for_each_entry(cur, head, list) {
+ /* Add gap between vm areas as helper to not hide overflow */
+ addr = PAGE_ALIGN(vm_node_end(cur->start, cur->pgnr) + 1);
+
+ if (list_is_last(&cur->list, head)) {
+ if (addr + size > vm_end) {
+ /* vm area does not have space anymore */
+ spin_unlock(&vm->lock);
+ kmem_cache_free(vm->cache, node);
+ dev_err(atomisp_dev,
+ "no enough virtual address space.\n");
+ return NULL;
+ }
+
+ /* We still have vm space to add new node to tail */
+ break;
+ }
+
+ next = list_entry(cur->list.next, struct hmm_vm_node, list);
+ if ((next->start - addr) > size)
+ break;
+ }
+ node->start = addr;
+ node->vm = vm;
+ list_add(&node->list, &cur->list);
+ spin_unlock(&vm->lock);
+
+ return node;
+}
+
+void hmm_vm_free_node(struct hmm_vm_node *node)
+{
+ struct hmm_vm *vm;
+
+ if (!node)
+ return;
+
+ vm = node->vm;
+
+ spin_lock(&vm->lock);
+ list_del(&node->list);
+ spin_unlock(&vm->lock);
+
+ kmem_cache_free(vm->cache, node);
+}
+
+struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (node->start == addr) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}
+
+struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
+ unsigned int addr)
+{
+ struct hmm_vm_node *node;
+
+ if (!vm)
+ return NULL;
+
+ spin_lock(&vm->lock);
+
+ list_for_each_entry(node, &vm->vm_node_list, list) {
+ if (addr_in_vm_node(addr, node)) {
+ spin_unlock(&vm->lock);
+ return node;
+ }
+ }
+
+ spin_unlock(&vm->lock);
+ return NULL;
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
new file mode 100644
index 000000000000..46a5d29e2d3a
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
@@ -0,0 +1,107 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef _hive_isp_css_custom_host_hrt_h_
+#define _hive_isp_css_custom_host_hrt_h_
+
+#include <linux/delay.h>
+#include "atomisp_helper.h"
+
+/*
+ * _hrt_master_port_store/load/uload -macros using __force attributed
+ * cast to intentional dereferencing __iomem attributed (noderef)
+ * pointer from atomisp_get_io_virt_addr
+ */
+#define _hrt_master_port_store_8(a, d) \
+ (*((s8 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_store_16(a, d) \
+ (*((s16 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_store_32(a, d) \
+ (*((s32 __force *)atomisp_get_io_virt_addr(a)) = (d))
+
+#define _hrt_master_port_load_8(a) \
+ (*(s8 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_load_16(a) \
+ (*(s16 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_load_32(a) \
+ (*(s32 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_8(a) \
+ (*(u8 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_16(a) \
+ (*(u16 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_uload_32(a) \
+ (*(u32 __force *)atomisp_get_io_virt_addr(a))
+
+#define _hrt_master_port_store_8_volatile(a, d) _hrt_master_port_store_8(a, d)
+#define _hrt_master_port_store_16_volatile(a, d) _hrt_master_port_store_16(a, d)
+#define _hrt_master_port_store_32_volatile(a, d) _hrt_master_port_store_32(a, d)
+
+#define _hrt_master_port_load_8_volatile(a) _hrt_master_port_load_8(a)
+#define _hrt_master_port_load_16_volatile(a) _hrt_master_port_load_16(a)
+#define _hrt_master_port_load_32_volatile(a) _hrt_master_port_load_32(a)
+
+#define _hrt_master_port_uload_8_volatile(a) _hrt_master_port_uload_8(a)
+#define _hrt_master_port_uload_16_volatile(a) _hrt_master_port_uload_16(a)
+#define _hrt_master_port_uload_32_volatile(a) _hrt_master_port_uload_32(a)
+
+static inline void hrt_sleep(void)
+{
+ udelay(1);
+}
+
+static inline uint32_t _hrt_mem_store(uint32_t to, const void *from, size_t n)
+{
+ unsigned i;
+ uint32_t _to = to;
+ const char *_from = (const char *)from;
+ for (i = 0; i < n; i++, _to++, _from++)
+ _hrt_master_port_store_8(_to, *_from);
+ return _to;
+}
+
+static inline void *_hrt_mem_load(uint32_t from, void *to, size_t n)
+{
+ unsigned i;
+ char *_to = (char *)to;
+ uint32_t _from = from;
+ for (i = 0; i < n; i++, _to++, _from++)
+ *_to = _hrt_master_port_load_8(_from);
+ return _to;
+}
+
+static inline uint32_t _hrt_mem_set(uint32_t to, int c, size_t n)
+{
+ unsigned i;
+ uint32_t _to = to;
+ for (i = 0; i < n; i++, _to++)
+ _hrt_master_port_store_8(_to, c);
+ return _to;
+}
+
+#endif /* _hive_isp_css_custom_host_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
new file mode 100644
index 000000000000..7dff22f59e29
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
@@ -0,0 +1,129 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include "atomisp_internal.h"
+
+#include "hive_isp_css_mm_hrt.h"
+#include "hmm/hmm.h"
+
+#define __page_align(size) (((size) + (PAGE_SIZE-1)) & (~(PAGE_SIZE-1)))
+
+static void *my_userptr;
+static unsigned my_num_pages;
+static enum hrt_userptr_type my_usr_type;
+
+void hrt_isp_css_mm_set_user_ptr(void *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type)
+{
+ my_userptr = userptr;
+ my_num_pages = num_pages;
+ my_usr_type = type;
+}
+
+static ia_css_ptr __hrt_isp_css_mm_alloc(size_t bytes, void *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type,
+ bool cached)
+{
+#ifdef CONFIG_ION
+ if (type == HRT_USR_ION)
+ return hmm_alloc(bytes, HMM_BO_ION, 0,
+ userptr, cached);
+
+#endif
+ if (type == HRT_USR_PTR) {
+ if (userptr == NULL)
+ return hmm_alloc(bytes, HMM_BO_PRIVATE, 0,
+ 0, cached);
+ else {
+ if (num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is less"
+ " than the expected size..\n");
+ else if (num_pages > ((__page_align(bytes))
+ >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is"
+ " large than the expected size..\n");
+
+ return hmm_alloc(bytes, HMM_BO_USER, 0,
+ userptr, cached);
+ }
+ } else {
+ dev_err(atomisp_dev, "user ptr type is incorrect.\n");
+ return 0;
+ }
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes)
+{
+ return __hrt_isp_css_mm_alloc(bytes, my_userptr,
+ my_num_pages, my_usr_type, false);
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes, void *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type type,
+ bool cached)
+{
+ return __hrt_isp_css_mm_alloc(bytes, userptr, num_pages,
+ type, cached);
+}
+
+ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes)
+{
+ if (my_userptr == NULL)
+ return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, 0,
+ HMM_CACHED);
+ else {
+ if (my_num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is less"
+ " than the expected size..\n");
+ else if (my_num_pages > ((__page_align(bytes)) >> PAGE_SHIFT))
+ dev_err(atomisp_dev,
+ "user space memory size is"
+ " large than the expected size..\n");
+
+ return hmm_alloc(bytes, HMM_BO_USER, 0,
+ my_userptr, HMM_CACHED);
+ }
+}
+
+ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes)
+{
+ ia_css_ptr ptr = hrt_isp_css_mm_alloc(bytes);
+ if (ptr)
+ hmm_set(ptr, 0, bytes);
+ return ptr;
+}
+
+ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes)
+{
+ ia_css_ptr ptr = hrt_isp_css_mm_alloc_cached(bytes);
+ if (ptr)
+ hmm_set(ptr, 0, bytes);
+ return ptr;
+}
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
new file mode 100644
index 000000000000..1328944a7afd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
@@ -0,0 +1,60 @@
+/*
+ * Support for Medfield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef _hive_isp_css_mm_hrt_h_
+#define _hive_isp_css_mm_hrt_h_
+
+#include <hmm/hmm.h>
+#include <hrt/hive_isp_css_custom_host_hrt.h>
+
+#define HRT_BUF_FLAG_CACHED (1 << 0)
+
+enum hrt_userptr_type {
+ HRT_USR_PTR = 0,
+#ifdef CONFIG_ION
+ HRT_USR_ION,
+#endif
+};
+
+struct hrt_userbuffer_attr {
+ enum hrt_userptr_type type;
+ unsigned int pgnr;
+};
+
+void hrt_isp_css_mm_set_user_ptr(void *userptr,
+ unsigned int num_pages, enum hrt_userptr_type);
+
+/* Allocate memory, returns a virtual address */
+ia_css_ptr hrt_isp_css_mm_alloc(size_t bytes);
+ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes, void *userptr,
+ unsigned int num_pages,
+ enum hrt_userptr_type,
+ bool cached);
+ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes);
+
+/* allocate memory and initialize with zeros,
+ returns a virtual address */
+ia_css_ptr hrt_isp_css_mm_calloc(size_t bytes);
+ia_css_ptr hrt_isp_css_mm_calloc_cached(size_t bytes);
+
+#endif /* _hive_isp_css_mm_hrt_h_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
new file mode 100644
index 000000000000..6b9fb1b2caaf
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
@@ -0,0 +1,106 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __HMM_H__
+#define __HMM_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "hmm/hmm_pool.h"
+#include "ia_css_types.h"
+
+#define HMM_CACHED true
+#define HMM_UNCACHED false
+
+int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type);
+void hmm_pool_unregister(enum hmm_pool_type pool_type);
+
+int hmm_init(void);
+void hmm_cleanup(void);
+
+ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
+ int from_highmem, void *userptr, bool cached);
+void hmm_free(ia_css_ptr ptr);
+int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes);
+int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes);
+int hmm_set(ia_css_ptr virt, int c, unsigned int bytes);
+int hmm_flush(ia_css_ptr virt, unsigned int bytes);
+
+/*
+ * get kernel memory physical address from ISP virtual address.
+ */
+phys_addr_t hmm_virt_to_phys(ia_css_ptr virt);
+
+/*
+ * map ISP memory starts with virt to kernel virtual address
+ * by using vmap. return NULL if failed.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+void *hmm_vmap(ia_css_ptr virt, bool cached);
+void hmm_vunmap(ia_css_ptr virt);
+
+/*
+ * flush the cache for the vmapped buffer.
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_flush_vmap(ia_css_ptr virt);
+
+/*
+ * Address translation from ISP shared memory address to kernel virtual address
+ * if the memory is not vmmaped, then do it.
+ */
+void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached);
+
+/*
+ * Address translation from kernel virtual address to ISP shared memory address
+ */
+ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr);
+
+/*
+ * map ISP memory starts with virt to specific vma.
+ *
+ * used for mmap operation.
+ *
+ * virt must be the start address of ISP memory (return by hmm_alloc),
+ * do not pass any other address.
+ */
+int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt);
+
+/* show memory statistic
+ */
+void hmm_show_mem_stat(const char *func, const int line);
+
+/* init memory statistic
+ */
+void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr);
+
+extern bool dypool_enable;
+extern unsigned int dypool_pgnr;
+extern struct hmm_bo_device bo_device;
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
new file mode 100644
index 000000000000..dffd6e9cf693
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
@@ -0,0 +1,323 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __HMM_BO_H__
+#define __HMM_BO_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_common.h"
+#include "ia_css_types.h"
+
+#define check_bodev_null_return(bdev, exp) \
+ check_null_return(bdev, exp, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bodev_null_return_void(bdev) \
+ check_null_return_void(bdev, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bo_status_yes_goto(bo, _status, label) \
+ var_not_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status not contain %s.\n", \
+ #_status)
+
+#define check_bo_status_no_goto(bo, _status, label) \
+ var_equal_goto((bo->status & (_status)), (_status), \
+ label, \
+ "HMM buffer status contains %s.\n", \
+ #_status)
+
+#define rbtree_node_to_hmm_bo(root_node) \
+ container_of((root_node), struct hmm_buffer_object, node)
+
+#define list_to_hmm_bo(list_ptr) \
+ list_entry((list_ptr), struct hmm_buffer_object, list)
+
+#define kref_to_hmm_bo(kref_ptr) \
+ list_entry((kref_ptr), struct hmm_buffer_object, kref)
+
+#define check_bo_null_return(bo, exp) \
+ check_null_return(bo, exp, "NULL hmm buffer object.\n")
+
+#define check_bo_null_return_void(bo) \
+ check_null_return_void(bo, "NULL hmm buffer object.\n")
+
+#define HMM_MAX_ORDER 3
+#define HMM_MIN_ORDER 0
+
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
+#define ISP_PTR_NULL NULL
+
+#define HMM_BO_DEVICE_INITED 0x1
+
+enum hmm_bo_type {
+ HMM_BO_PRIVATE,
+ HMM_BO_SHARE,
+ HMM_BO_USER,
+#ifdef CONFIG_ION
+ HMM_BO_ION,
+#endif
+ HMM_BO_LAST,
+};
+
+enum hmm_page_type {
+ HMM_PAGE_TYPE_RESERVED,
+ HMM_PAGE_TYPE_DYNAMIC,
+ HMM_PAGE_TYPE_GENERAL,
+};
+
+#define HMM_BO_MASK 0x1
+#define HMM_BO_FREE 0x0
+#define HMM_BO_ALLOCED 0x1
+#define HMM_BO_PAGE_ALLOCED 0x2
+#define HMM_BO_BINDED 0x4
+#define HMM_BO_MMAPED 0x8
+#define HMM_BO_VMAPED 0x10
+#define HMM_BO_VMAPED_CACHED 0x20
+#define HMM_BO_ACTIVE 0x1000
+#define HMM_BO_MEM_TYPE_USER 0x1
+#define HMM_BO_MEM_TYPE_PFN 0x2
+
+struct hmm_bo_device {
+ struct isp_mmu mmu;
+
+ /* start/pgnr/size is used to record the virtual memory of this bo */
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+
+ /* list lock is used to protect the entire_bo_list */
+ spinlock_t list_lock;
+#ifdef CONFIG_ION
+ struct ion_client *iclient;
+#endif
+ int flag;
+
+ /* linked list for entire buffer object */
+ struct list_head entire_bo_list;
+ /* rbtree for maintain entire allocated vm */
+ struct rb_root allocated_rbtree;
+ /* rbtree for maintain entire free vm */
+ struct rb_root free_rbtree;
+ struct mutex rbtree_mutex;
+ struct kmem_cache *bo_cache;
+};
+
+struct hmm_page_object {
+ struct page *page;
+ enum hmm_page_type type;
+};
+
+struct hmm_buffer_object {
+ struct hmm_bo_device *bdev;
+ struct list_head list;
+ struct kref kref;
+
+ /* mutex protecting this BO */
+ struct mutex mutex;
+ enum hmm_bo_type type;
+ struct hmm_page_object *page_obj; /* physical pages */
+ int from_highmem;
+ int mmap_count;
+#ifdef CONFIG_ION
+ struct ion_handle *ihandle;
+#endif
+ int status;
+ int mem_type;
+ void *vmap_addr; /* kernel virtual address by vmap */
+
+ struct rb_node node;
+ unsigned int start;
+ unsigned int end;
+ unsigned int pgnr;
+ /*
+ * When insert a bo which has the same pgnr with an existed
+ * bo node in the free_rbtree, using "prev & next" pointer
+ * to maintain a bo linked list instead of insert this bo
+ * into free_rbtree directly, it will make sure each node
+ * in free_rbtree has different pgnr.
+ * "prev & next" default is NULL.
+ */
+ struct hmm_buffer_object *prev;
+ struct hmm_buffer_object *next;
+};
+
+struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
+ unsigned int pgnr);
+
+void hmm_bo_release(struct hmm_buffer_object *bo);
+
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start, unsigned int size);
+
+/*
+ * clean up all hmm_bo_device related things.
+ */
+void hmm_bo_device_exit(struct hmm_bo_device *bdev);
+
+/*
+ * whether the bo device is inited or not.
+ */
+int hmm_bo_device_inited(struct hmm_bo_device *bdev);
+
+/*
+ * increse buffer object reference.
+ */
+void hmm_bo_ref(struct hmm_buffer_object *bo);
+
+/*
+ * decrese buffer object reference. if reference reaches 0,
+ * release function of the buffer object will be called.
+ *
+ * this call is also used to release hmm_buffer_object or its
+ * upper level object with it embedded in. you need to call
+ * this function when it is no longer used.
+ *
+ * Note:
+ *
+ * user dont need to care about internal resource release of
+ * the buffer object in the release callback, it will be
+ * handled internally.
+ *
+ * this call will only release internal resource of the buffer
+ * object but will not free the buffer object itself, as the
+ * buffer object can be both pre-allocated statically or
+ * dynamically allocated. so user need to deal with the release
+ * of the buffer object itself manually. below example shows
+ * the normal case of using the buffer object.
+ *
+ * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
+ * ......
+ * hmm_bo_unref(bo);
+ *
+ * or:
+ *
+ * struct hmm_buffer_object bo;
+ *
+ * hmm_bo_init(bdev, &bo, pgnr, NULL);
+ * ...
+ * hmm_bo_unref(&bo);
+ */
+void hmm_bo_unref(struct hmm_buffer_object *bo);
+
+
+/*
+ * allocate/free physical pages for the bo. will try to alloc mem
+ * from highmem if from_highmem is set, and type indicate that the
+ * pages will be allocated by using video driver (for share buffer)
+ * or by ISP driver itself.
+ */
+
+
+int hmm_bo_allocated(struct hmm_buffer_object *bo);
+
+
+/*
+ * allocate/free physical pages for the bo. will try to alloc mem
+ * from highmem if from_highmem is set, and type indicate that the
+ * pages will be allocated by using video driver (for share buffer)
+ * or by ISP driver itself.
+ */
+int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
+ enum hmm_bo_type type, int from_highmem,
+ void *userptr, bool cached);
+void hmm_bo_free_pages(struct hmm_buffer_object *bo);
+int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
+
+/*
+ * get physical page info of the bo.
+ */
+int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
+ struct hmm_page_object **page_obj, int *pgnr);
+
+/*
+ * bind/unbind the physical pages to a virtual address space.
+ */
+int hmm_bo_bind(struct hmm_buffer_object *bo);
+void hmm_bo_unbind(struct hmm_buffer_object *bo);
+int hmm_bo_binded(struct hmm_buffer_object *bo);
+
+/*
+ * vmap buffer object's pages to contiguous kernel virtual address.
+ * if the buffer has been vmaped, return the virtual address directly.
+ */
+void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
+
+/*
+ * flush the cache for the vmapped buffer object's pages,
+ * if the buffer has not been vmapped, return directly.
+ */
+void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
+
+/*
+ * vunmap buffer object's kernel virtual address.
+ */
+void hmm_bo_vunmap(struct hmm_buffer_object *bo);
+
+/*
+ * mmap the bo's physical pages to specific vma.
+ *
+ * vma's address space size must be the same as bo's size,
+ * otherwise it will return -EINVAL.
+ *
+ * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
+ */
+int hmm_bo_mmap(struct vm_area_struct *vma,
+ struct hmm_buffer_object *bo);
+
+extern struct hmm_pool dynamic_pool;
+extern struct hmm_pool reserved_pool;
+
+/*
+ * find the buffer object by its virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object by its virtual address.
+ * it does not need to be the start address of one bo,
+ * it can be an address within the range of one bo.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object with kernel virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr);
+
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
new file mode 100644
index 000000000000..a9446adb4c70
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
@@ -0,0 +1,130 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __HMM_BO_DEV_H__
+#define __HMM_BO_DEV_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include "mmu/isp_mmu.h"
+#include "hmm/hmm_common.h"
+#include "hmm/hmm_vm.h"
+#include "ia_css_types.h"
+
+#define check_bodev_null_return(bdev, exp) \
+ check_null_return(bdev, exp, \
+ "NULL hmm_bo_device.\n")
+
+#define check_bodev_null_return_void(bdev) \
+ check_null_return_void(bdev, \
+ "NULL hmm_bo_device.\n")
+
+#define HMM_BO_DEVICE_INITED 0x1
+
+#define HMM_BO_CACHE_SIZE 2
+
+
+struct hmm_buffer_object;
+
+struct hmm_bo_device {
+ /* isp_mmu provides lock itself */
+ struct isp_mmu mmu;
+
+ /* hmm_vm provides lock itself */
+ struct hmm_vm vaddr_space;
+
+ struct list_head free_bo_list;
+ struct list_head active_bo_list;
+
+ /* list lock is used to protect both of the buffer object lists */
+ spinlock_t list_lock;
+#ifdef CONFIG_ION
+ struct ion_client *iclient;
+#endif
+ int flag;
+};
+
+int hmm_bo_device_init(struct hmm_bo_device *bdev,
+ struct isp_mmu_client *mmu_driver,
+ unsigned int vaddr_start, unsigned int size);
+
+/*
+ * clean up all hmm_bo_device related things.
+ */
+void hmm_bo_device_exit(struct hmm_bo_device *bdev);
+
+/*
+ * whether the bo device is inited or not.
+ */
+int hmm_bo_device_inited(struct hmm_bo_device *bdev);
+
+/*
+ * find the buffer object with virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_start(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object with virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_in_range(
+ struct hmm_bo_device *bdev, ia_css_ptr vaddr);
+
+/*
+ * find the buffer object with kernel virtual address vaddr.
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
+ struct hmm_bo_device *bdev, const void *vaddr);
+
+/*
+ * find a buffer object with pgnr pages from free_bo_list and
+ * activate it (remove from free_bo_list and add to
+ * active_bo_list)
+ *
+ * return NULL if no such buffer object found.
+ */
+struct hmm_buffer_object *hmm_bo_device_get_bo(
+ struct hmm_bo_device *bdev, unsigned int pgnr);
+
+/*
+ * destroy all buffer objects in the free_bo_list.
+ */
+void hmm_bo_device_destroy_free_bo_list(struct hmm_bo_device *bdev);
+/*
+ * destroy buffer object with start virtual address vaddr.
+ */
+void hmm_bo_device_destroy_free_bo_addr(struct hmm_bo_device *bdev,
+ ia_css_ptr vaddr);
+/*
+ * destroy all buffer objects with pgnr pages.
+ */
+void hmm_bo_device_destroy_free_bo_size(struct hmm_bo_device *bdev,
+ unsigned int pgnr);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
new file mode 100644
index 000000000000..f1593aa38ce1
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
@@ -0,0 +1,100 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __HMM_BO_COMMON_H__
+#define __HMM_BO_COMMON_H__
+
+#define HMM_BO_NAME "HMM"
+
+/*
+ * some common use micros
+ */
+#define var_equal_return(var1, var2, exp, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return exp;\
+ } \
+ } while (0)
+
+#define var_equal_return_void(var1, var2, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ return;\
+ } \
+ } while (0)
+
+#define var_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) == (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define var_not_equal_goto(var1, var2, label, fmt, arg ...) \
+ do { \
+ if ((var1) != (var2)) { \
+ dev_err(atomisp_dev, \
+ fmt, ## arg); \
+ goto label;\
+ } \
+ } while (0)
+
+#define check_null_return(ptr, exp, fmt, arg ...) \
+ var_equal_return(ptr, NULL, exp, fmt, ## arg)
+
+#define check_null_return_void(ptr, fmt, arg ...) \
+ var_equal_return_void(ptr, NULL, fmt, ## arg)
+
+/* hmm_mem_stat is used to trace the hmm mem used by ISP pipe. The unit is page
+ * number.
+ *
+ * res_size: reserved mem pool size, being allocated from system at system boot time.
+ * res_size >= res_cnt.
+ * sys_size: system mem pool size, being allocated from system at camera running time.
+ * dyc_size: dynamic mem pool size.
+ * dyc_thr: dynamic mem pool high watermark.
+ * dyc_size <= dyc_thr.
+ * usr_size: user ptr mem size.
+ *
+ * res_cnt: track the mem allocated from reserved pool at camera running time.
+ * tol_cnt: track the total mem used by ISP pipe at camera running time.
+ */
+struct _hmm_mem_stat {
+ int res_size;
+ int sys_size;
+ int dyc_size;
+ int dyc_thr;
+ int usr_size;
+ int res_cnt;
+ int tol_cnt;
+};
+
+extern struct _hmm_mem_stat hmm_mem_stat;
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
new file mode 100644
index 000000000000..1ba360433d88
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
@@ -0,0 +1,119 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef __HMM_POOL_H__
+#define __HMM_POOL_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include "hmm_common.h"
+#include "hmm/hmm_bo.h"
+
+#define ALLOC_PAGE_FAIL_NUM 5
+
+enum hmm_pool_type {
+ HMM_POOL_TYPE_RESERVED,
+ HMM_POOL_TYPE_DYNAMIC,
+};
+
+/**
+ * struct hmm_pool_ops - memory pool callbacks.
+ *
+ * @pool_init: initialize the memory pool.
+ * @pool_exit: uninitialize the memory pool.
+ * @pool_alloc_pages: allocate pages from memory pool.
+ * @pool_free_pages: free pages to memory pool.
+ * @pool_inited: check whether memory pool is initialized.
+ */
+struct hmm_pool_ops {
+ int (*pool_init)(void **pool, unsigned int pool_size);
+ void (*pool_exit)(void **pool);
+ unsigned int (*pool_alloc_pages)(void *pool,
+ struct hmm_page_object *page_obj,
+ unsigned int size, bool cached);
+ void (*pool_free_pages)(void *pool,
+ struct hmm_page_object *page_obj);
+ int (*pool_inited)(void *pool);
+};
+
+struct hmm_pool {
+ struct hmm_pool_ops *pops;
+
+ void *pool_info;
+};
+
+/**
+ * struct hmm_reserved_pool_info - represents reserved pool private data.
+ * @pages: a array that store physical pages.
+ * The array is as reserved memory pool.
+ * @index: to indicate the first blank page number
+ * in reserved memory pool(pages array).
+ * @pgnr: the valid page amount in reserved memory
+ * pool.
+ * @list_lock: list lock is used to protect the operation
+ * to reserved memory pool.
+ * @flag: reserved memory pool state flag.
+ */
+struct hmm_reserved_pool_info {
+ struct page **pages;
+
+ unsigned int index;
+ unsigned int pgnr;
+ spinlock_t list_lock;
+ bool initialized;
+};
+
+/**
+ * struct hmm_dynamic_pool_info - represents dynamic pool private data.
+ * @pages_list: a list that store physical pages.
+ * The pages list is as dynamic memory pool.
+ * @list_lock: list lock is used to protect the operation
+ * to dynamic memory pool.
+ * @flag: dynamic memory pool state flag.
+ * @pgptr_cache: struct kmem_cache, manages a cache.
+ */
+struct hmm_dynamic_pool_info {
+ struct list_head pages_list;
+
+ /* list lock is used to protect the free pages block lists */
+ spinlock_t list_lock;
+
+ struct kmem_cache *pgptr_cache;
+ bool initialized;
+
+ unsigned int pool_size;
+ unsigned int pgnr;
+};
+
+struct hmm_page {
+ struct page *page;
+ struct list_head list;
+};
+
+extern struct hmm_pool_ops reserved_pops;
+extern struct hmm_pool_ops dynamic_pops;
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
new file mode 100644
index 000000000000..07d40662de32
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
@@ -0,0 +1,68 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __HMM_VM_H__
+#define __HMM_VM_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+struct hmm_vm {
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+ struct list_head vm_node_list;
+ spinlock_t lock;
+ struct kmem_cache *cache;
+};
+
+struct hmm_vm_node {
+ struct list_head list;
+ unsigned int start;
+ unsigned int pgnr;
+ unsigned int size;
+ struct hmm_vm *vm;
+};
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
+#define ISP_PTR_NULL NULL
+
+int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
+ unsigned int size);
+
+void hmm_vm_clean(struct hmm_vm *vm);
+
+struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm,
+ unsigned int pgnr);
+
+void hmm_vm_free_node(struct hmm_vm_node *node);
+
+struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm,
+ unsigned int addr);
+
+struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
+ unsigned int addr);
+
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
new file mode 100644
index 000000000000..6b4eefc929e2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
@@ -0,0 +1,175 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * ISP MMU driver for classic two-level page tables
+ */
+#ifndef __ISP_MMU_H__
+#define __ISP_MMU_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+/*
+ * do not change these values, the page size for ISP must be the
+ * same as kernel's page size.
+ */
+#define ISP_PAGE_OFFSET 12
+#define ISP_PAGE_SIZE (1U << ISP_PAGE_OFFSET)
+#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_OFFSET 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
+
+#define ISP_L2PT_OFFSET 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK|(~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
+ >> ISP_L1PT_OFFSET)
+
+#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
+ >> ISP_L2PT_OFFSET)
+
+#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE-1)) \
+ & ISP_PAGE_MASK)
+
+#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
+ ((l1_idx) << ISP_L1PT_OFFSET) | \
+ ((l2_idx) << ISP_L2PT_OFFSET) | \
+ (offset)\
+} while (0)
+
+#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
+#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
+ >> ISP_PAGE_OFFSET)
+#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
+
+struct isp_mmu;
+
+struct isp_mmu_client {
+ /*
+ * const value
+ *
+ * @name:
+ * driver name
+ * @pte_valid_mask:
+ * should be 1 bit valid data, meaning the value should
+ * be power of 2.
+ */
+ char *name;
+ unsigned int pte_valid_mask;
+ unsigned int null_pte;
+
+ /*
+ * set/get page directory base address (physical address).
+ *
+ * must be provided.
+ */
+ int (*set_pd_base) (struct isp_mmu *mmu,
+ phys_addr_t pd_base);
+ unsigned int (*get_pd_base) (struct isp_mmu *mmu, phys_addr_t pd_base);
+ /*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+ void (*tlb_flush_range) (struct isp_mmu *mmu,
+ unsigned int addr, unsigned int size);
+ void (*tlb_flush_all) (struct isp_mmu *mmu);
+ unsigned int (*phys_to_pte) (struct isp_mmu *mmu,
+ phys_addr_t phys);
+ phys_addr_t (*pte_to_phys) (struct isp_mmu *mmu,
+ unsigned int pte);
+
+};
+
+struct isp_mmu {
+ struct isp_mmu_client *driver;
+ unsigned int l1_pte;
+ int l2_pgt_refcount[ISP_L1PT_PTES];
+ phys_addr_t base_address;
+
+ struct mutex pt_mutex;
+ struct kmem_cache *tbl_cache;
+};
+
+/* flags for PDE and PTE */
+#define ISP_PTE_VALID_MASK(mmu) \
+ ((mmu)->driver->pte_valid_mask)
+
+#define ISP_PTE_VALID(mmu, pte) \
+ ((pte) & ISP_PTE_VALID_MASK(mmu))
+
+#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
+#define PAGE_VALID(page) ((page) != NULL_PAGE)
+
+/*
+ * init mmu with specific mmu driver.
+ */
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
+/*
+ * cleanup all mmu related things.
+ */
+void isp_mmu_exit(struct isp_mmu *mmu);
+
+/*
+ * setup/remove address mapping for pgnr continous physical pages
+ * and isp_virt.
+ *
+ * map/unmap is mutex lock protected, and caller does not have
+ * to do lock/unlock operation.
+ *
+ * map/unmap will not flush tlb, and caller needs to deal with
+ * this itself.
+ */
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr);
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr);
+
+static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_all)
+ mmu->driver->tlb_flush_all(mmu);
+}
+
+#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
+
+static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
+ unsigned int start, unsigned int size)
+{
+ if (mmu->driver && mmu->driver->tlb_flush_range)
+ mmu->driver->tlb_flush_range(mmu, start, size);
+}
+
+#endif /* ISP_MMU_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
new file mode 100644
index 000000000000..06041e94cbb2
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
@@ -0,0 +1,76 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#ifndef SH_MMU_H_
+#define SH_MMU_H_
+
+
+#include <sh_css.h>
+
+#include "mmu/isp_mmu.h"
+
+
+/*
+ * include SH header file here
+ */
+
+/*
+ * set page directory base address (physical address).
+ *
+ * must be provided.
+ */
+static int sh_set_pd_base(struct isp_mmu *mmu,
+ unsigned int phys)
+{
+ sh_css_mmu_set_page_table_base_address((void *)phys);
+ return 0;
+}
+
+/*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+static void sh_tlb_flush(struct isp_mmu *mmu)
+{
+ sh_css_mmu_invalidate_cache();
+}
+
+static struct isp_mmu_driver sh_mmu_driver = {
+ .name = "Silicon Hive ISP3000 MMU",
+ .pte_valid_mask = 0x1,
+ .set_pd_base = sh_set_pd_base,
+ .tlb_flush_all = sh_tlb_flush,
+};
+
+#define ISP_VM_START 0x0
+#define ISP_VM_SIZE (1 << 30) /* 1G address space */
+#define ISP_PTR_NULL NULL
+
+#endif /* SH_MMU_H_ */
+
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
new file mode 100644
index 000000000000..b9bad9f06235
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
@@ -0,0 +1,28 @@
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#ifndef __SH_MMU_MRFLD_H__
+#define __SH_MMU_MRFLD_H__
+
+extern struct isp_mmu_client sh_mmu_mrfld;
+#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
new file mode 100644
index 000000000000..706bd43e8b1b
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
@@ -0,0 +1,597 @@
+/*
+ * Support for Medifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+/*
+ * ISP MMU management wrap code
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+#include <linux/mm.h> /* for GFP_ATOMIC */
+#include <linux/slab.h> /* for kmalloc */
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/sizes.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include "atomisp_internal.h"
+#include "mmu/isp_mmu.h"
+
+/*
+ * 64-bit x86 processor physical address layout:
+ * 0 - 0x7fffffff DDR RAM (2GB)
+ * 0x80000000 - 0xffffffff MMIO (2GB)
+ * 0x100000000 - 0x3fffffffffff DDR RAM (64TB)
+ * So if the system has more than 2GB DDR memory, the lower 2GB occupies the
+ * physical address 0 - 0x7fffffff and the rest will start from 0x100000000.
+ * We have to make sure memory is allocated from the lower 2GB for devices
+ * that are only 32-bit capable(e.g. the ISP MMU).
+ *
+ * For any confusion, contact bin.gao@intel.com.
+ */
+#define NR_PAGES_2GB (SZ_2G / PAGE_SIZE)
+
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt);
+
+static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+ return *(pt_virt + idx);
+}
+
+static void atomisp_set_pte(phys_addr_t pt,
+ unsigned int idx, unsigned int pte)
+{
+ unsigned int *pt_virt = phys_to_virt(pt);
+ *(pt_virt + idx) = pte;
+}
+
+static void *isp_pt_phys_to_virt(phys_addr_t phys)
+{
+ return phys_to_virt(phys);
+}
+
+static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ return mmu->driver->pte_to_phys(mmu, pte);
+}
+
+static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
+ return (unsigned int) (pte | ISP_PTE_VALID_MASK(mmu));
+}
+
+/*
+ * allocate a uncacheable page table.
+ * return physical address.
+ */
+static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
+{
+ int i;
+ phys_addr_t page;
+ void *virt;
+
+ /*page table lock may needed here*/
+ /*
+ * The slab allocator(kmem_cache and kmalloc family) doesn't handle
+ * GFP_DMA32 flag, so we have to use buddy allocator.
+ */
+ if (totalram_pages > (unsigned long)NR_PAGES_2GB)
+ virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ else
+ virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
+ if (!virt)
+ return (phys_addr_t)NULL_PAGE;
+
+ /*
+ * we need a uncacheable page table.
+ */
+#ifdef CONFIG_X86
+ set_memory_uc((unsigned long)virt, 1);
+#endif
+
+ page = virt_to_phys(virt);
+
+ for (i = 0; i < 1024; i++) {
+ /* NEED CHECK */
+ atomisp_set_pte(page, i, mmu->driver->null_pte);
+ }
+
+ return page;
+}
+
+static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
+{
+ void *virt;
+ page &= ISP_PAGE_MASK;
+ /*
+ * reset the page to write back before free
+ */
+ virt = phys_to_virt(page);
+
+#ifdef CONFIG_X86
+ set_memory_wb((unsigned long)virt, 1);
+#endif
+
+ kmem_cache_free(mmu->tbl_cache, virt);
+}
+
+static void mmu_remap_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, phys_addr_t old_phys,
+ phys_addr_t new_phys)
+{
+ dev_err(atomisp_dev, "address remap:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, "
+ "idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, "
+ "idx = %d\n"
+ "\told: isp_virt = 0x%x, phys = 0x%llx\n"
+ "\tnew: isp_virt = 0x%x, phys = 0x%llx\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ (u64)old_phys, isp_virt,
+ (u64)new_phys);
+}
+
+static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ phys_addr_t l2_pt, unsigned int l2_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap unvalid L2 pte:\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, "
+ "idx = %d\n"
+ "\tL2 PT: virt = %p, phys = 0x%llx, "
+ "idx = %d\n"
+ "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx,
+ isp_pt_phys_to_virt(l2_pt),
+ (u64)l2_pt, l2_idx, isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
+ phys_addr_t l1_pt, unsigned int l1_idx,
+ unsigned int isp_virt, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap unvalid L1 pte (L2 PT):\n\n"
+ "\tL1 PT: virt = %p, phys = 0x%llx, "
+ "idx = %d\n"
+ "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n",
+ isp_pt_phys_to_virt(l1_pt),
+ (u64)l1_pt, l1_idx, (unsigned int)isp_virt,
+ pte);
+}
+
+static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
+{
+ dev_err(atomisp_dev, "unmap unvalid L1PT:\n\n"
+ "L1PT = 0x%x\n", (unsigned int)pte);
+}
+
+/*
+ * Update L2 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end, phys_addr_t phys)
+{
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ mmu_remap_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte, phys);
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+
+ pte = isp_pgaddr_to_pte_valid(mmu, phys);
+
+ atomisp_set_pte(l2_pt, idx, pte);
+ mmu->l2_pgt_refcount[l1_idx]++;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ phys += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ return 0;
+}
+
+/*
+ * Update L1 page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end,
+ phys_addr_t phys)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+ int ret;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+ phys &= ISP_PAGE_MASK;
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ l2_pt = alloc_page_table(mmu);
+ if (l2_pt == NULL_PAGE) {
+ dev_err(atomisp_dev,
+ "alloc page table fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -ENOMEM;
+ }
+
+ l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
+
+ atomisp_set_pte(l1_pt, idx, l2_pte);
+ mmu->l2_pgt_refcount[idx] = 0;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, l1_aligned, phys);
+ phys += (l1_aligned - ptr);
+ ptr = l1_aligned;
+ } else {
+ ret = mmu_l2_map(mmu, l1_pt, idx,
+ l2_pt, ptr, end, phys);
+ phys += (end - ptr);
+ ptr = end;
+ }
+
+ if (ret) {
+ dev_err(atomisp_dev, "setup mapping in L2PT fail.\n");
+
+ /* free all mapped pages */
+ free_mmu_map(mmu, start, ptr);
+
+ return -EINVAL;
+ }
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+
+ return 0;
+}
+
+/*
+ * Update page table according to isp virtual address and page physical
+ * address
+ */
+static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+ int ret;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ /*
+ * allocate 1 new page for L1 page table
+ */
+ l1_pt = alloc_page_table(mmu);
+ if (l1_pt == NULL_PAGE) {
+ dev_err(atomisp_dev, "alloc page table fail.\n");
+ mutex_unlock(&mmu->pt_mutex);
+ return -ENOMEM;
+ }
+
+ /*
+ * setup L1 page table physical addr to MMU
+ */
+ ret = mmu->driver->set_pd_base(mmu, l1_pt);
+ if (ret) {
+ dev_err(atomisp_dev,
+ "set page directory base address fail.\n");
+ mutex_unlock(&mmu->pt_mutex);
+ return ret;
+ }
+ mmu->base_address = l1_pt;
+ mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
+ memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+ phys &= ISP_PAGE_MASK;
+
+ ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
+
+ if (ret)
+ dev_err(atomisp_dev, "setup mapping in L1PT fail.\n");
+
+ mutex_unlock(&mmu->pt_mutex);
+ return ret;
+}
+
+/*
+ * Free L2 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int l1_idx, phys_addr_t l2_pt,
+ unsigned int start, unsigned int end)
+{
+
+ unsigned int ptr;
+ unsigned int idx;
+ unsigned int pte;
+
+ l2_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L2_IDX(ptr);
+
+ pte = atomisp_get_pte(l2_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, pte))
+ mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
+ l2_pt, idx, ptr, pte);
+
+ atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
+ mmu->l2_pgt_refcount[l1_idx]--;
+ ptr += (1U << ISP_L2PT_OFFSET);
+ } while (ptr < end && idx < ISP_L2PT_PTES - 1);
+
+ if (mmu->l2_pgt_refcount[l1_idx] == 0) {
+ free_page_table(mmu, l2_pt);
+ atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
+ }
+}
+
+/*
+ * Free L1 page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
+ unsigned int start, unsigned int end)
+{
+ phys_addr_t l2_pt;
+ unsigned int ptr, l1_aligned;
+ unsigned int idx;
+ unsigned int l2_pte;
+
+ l1_pt &= ISP_PAGE_MASK;
+
+ start = start & ISP_PAGE_MASK;
+ end = ISP_PAGE_ALIGN(end);
+
+ ptr = start;
+ do {
+ idx = ISP_PTR_TO_L1_IDX(ptr);
+
+ l2_pte = atomisp_get_pte(l1_pt, idx);
+
+ if (!ISP_PTE_VALID(mmu, l2_pte)) {
+ mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
+ continue;
+ }
+
+ l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
+
+ l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET);
+
+ if (l1_aligned < end) {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
+ ptr = l1_aligned;
+ } else {
+ mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
+ ptr = end;
+ }
+ /*
+ * use the same L2 page next time, so we dont
+ * need to invalidate and free this PT.
+ */
+ /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */
+ } while (ptr < end && idx < ISP_L1PT_PTES);
+}
+
+/*
+ * Free page table according to isp virtual address and page physical
+ * address
+ */
+static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ unsigned int start, end;
+ phys_addr_t l1_pt;
+
+ mutex_lock(&mmu->pt_mutex);
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
+ mutex_unlock(&mmu->pt_mutex);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ start = (isp_virt) & ISP_PAGE_MASK;
+ end = start + (pgnr << ISP_PAGE_OFFSET);
+
+ mmu_l1_unmap(mmu, l1_pt, start, end);
+ mutex_unlock(&mmu->pt_mutex);
+}
+
+/*
+ * Free page tables according to isp start virtual address and end virtual
+ * address.
+ */
+static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
+ unsigned int end_isp_virt)
+{
+ unsigned int pgnr;
+ unsigned int start, end;
+
+ start = (start_isp_virt) & ISP_PAGE_MASK;
+ end = (end_isp_virt) & ISP_PAGE_MASK;
+ pgnr = (end - start) >> ISP_PAGE_OFFSET;
+ mmu_unmap(mmu, start, pgnr);
+}
+
+int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
+ phys_addr_t phys, unsigned int pgnr)
+{
+ return mmu_map(mmu, isp_virt, phys, pgnr);
+}
+
+void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
+ unsigned int pgnr)
+{
+ mmu_unmap(mmu, isp_virt, pgnr);
+}
+
+static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
+ unsigned int start,
+ unsigned int size)
+{
+ isp_mmu_flush_tlb(mmu);
+}
+
+/*MMU init for internal structure*/
+int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
+{
+ if (!mmu) /* error */
+ return -EINVAL;
+ if (!driver) /* error */
+ return -EINVAL;
+
+ if (!driver->name)
+ dev_warn(atomisp_dev, "NULL name for MMU driver...\n");
+
+ mmu->driver = driver;
+
+ if (!driver->set_pd_base || !driver->tlb_flush_all) {
+ dev_err(atomisp_dev,
+ "set_pd_base or tlb_flush_all operation "
+ "not provided.\n");
+ return -EINVAL;
+ }
+
+ if (!driver->tlb_flush_range)
+ driver->tlb_flush_range = isp_mmu_flush_tlb_range_default;
+
+ if (!driver->pte_valid_mask) {
+ dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
+ return -EINVAL;
+ }
+
+ mmu->l1_pte = driver->null_pte;
+
+ mutex_init(&mmu->pt_mutex);
+
+ mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
+ ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!mmu->tbl_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*Free L1 and L2 page table*/
+void isp_mmu_exit(struct isp_mmu *mmu)
+{
+ unsigned int idx;
+ unsigned int pte;
+ phys_addr_t l1_pt, l2_pt;
+
+ if (!mmu)
+ return;
+
+ if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
+ dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n",
+ (unsigned int)mmu->l1_pte);
+ return;
+ }
+
+ l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
+
+ for (idx = 0; idx < ISP_L1PT_PTES; idx++) {
+ pte = atomisp_get_pte(l1_pt, idx);
+
+ if (ISP_PTE_VALID(mmu, pte)) {
+ l2_pt = isp_pte_to_pgaddr(mmu, pte);
+
+ free_page_table(mmu, l2_pt);
+ }
+ }
+
+ free_page_table(mmu, l1_pt);
+
+ kmem_cache_destroy(mmu->tbl_cache);
+}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
new file mode 100644
index 000000000000..97546bd124cd
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
@@ -0,0 +1,93 @@
+/*
+ * Support for Merrifield PNW Camera Imaging ISP subsystem.
+ *
+ * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
+ *
+ * Copyright (c) 2012 Silicon Hive www.siliconhive.com.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#include "type_support.h"
+#include "mmu/isp_mmu.h"
+#include "memory_access/memory_access.h"
+#include "atomisp_compat.h"
+
+#define MERR_VALID_PTE_MASK 0x80000000
+
+/*
+ * include SH header file here
+ */
+
+static unsigned int sh_phys_to_pte(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ return phys >> ISP_PAGE_OFFSET;
+}
+
+static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu,
+ unsigned int pte)
+{
+ unsigned int mask = mmu->driver->pte_valid_mask;
+ return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET);
+}
+
+/*
+ * set page directory base address (physical address).
+ *
+ * must be provided.
+ */
+static int sh_set_pd_base(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = sh_phys_to_pte(mmu, phys);
+ /*mmgr_set_base_address(HOST_ADDRESS(pte));*/
+ atomisp_css_mmu_set_page_table_base_index(HOST_ADDRESS(pte));
+ return 0;
+}
+
+static unsigned int sh_get_pd_base(struct isp_mmu *mmu,
+ phys_addr_t phys)
+{
+ unsigned int pte = sh_phys_to_pte(mmu, phys);
+ return HOST_ADDRESS(pte);
+}
+
+/*
+ * callback to flush tlb.
+ *
+ * tlb_flush_range will at least flush TLBs containing
+ * address mapping from addr to addr + size.
+ *
+ * tlb_flush_all will flush all TLBs.
+ *
+ * tlb_flush_all is must be provided. if tlb_flush_range is
+ * not valid, it will set to tlb_flush_all by default.
+ */
+static void sh_tlb_flush(struct isp_mmu *mmu)
+{
+ atomisp_css_mmu_invalidate_cache();
+}
+
+struct isp_mmu_client sh_mmu_mrfld = {
+ .name = "Silicon Hive ISP3000 MMU",
+ .pte_valid_mask = MERR_VALID_PTE_MASK,
+ .null_pte = ~MERR_VALID_PTE_MASK,
+ .set_pd_base = sh_set_pd_base,
+ .get_pd_base = sh_get_pd_base,
+ .tlb_flush_all = sh_tlb_flush,
+ .phys_to_pte = sh_phys_to_pte,
+ .pte_to_phys = sh_pte_to_phys,
+};
diff --git a/drivers/staging/media/atomisp/platform/Makefile b/drivers/staging/media/atomisp/platform/Makefile
new file mode 100644
index 000000000000..df157630bda9
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for camera drivers.
+#
+
+obj-$(CONFIG_INTEL_ATOMISP) += clock/
+obj-$(CONFIG_INTEL_ATOMISP) += intel-mid/
diff --git a/drivers/staging/media/atomisp/platform/clock/Makefile b/drivers/staging/media/atomisp/platform/clock/Makefile
new file mode 100644
index 000000000000..82fbe8b6968a
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/clock/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for clock devices.
+#
+
+obj-$(CONFIG_INTEL_ATOMISP) += vlv2_plat_clock.o
+obj-$(CONFIG_INTEL_ATOMISP) += platform_vlv2_plat_clk.o
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
new file mode 100644
index 000000000000..0aae9b0283bb
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
@@ -0,0 +1,40 @@
+/*
+ * platform_vlv2_plat_clk.c - VLV2 platform clock driver
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author: Asutosh Pathak <asutosh.pathak@intel.com>
+ * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
+ * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+static int __init vlv2_plat_clk_init(void)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("vlv2_plat_clk", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("platform_vlv2_plat_clk:register failed: %ld\n",
+ PTR_ERR(pdev));
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+device_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
new file mode 100644
index 000000000000..b730ab0e8223
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
@@ -0,0 +1,27 @@
+/*
+ * platform_vlv2_plat_clk.h: platform clock driver library header file
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author: Asutosh Pathak <asutosh.pathak@intel.com>
+ * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
+ * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#ifndef _PLATFORM_VLV2_PLAT_CLK_H_
+#define _PLATFORM_VLV2_PLAT_CLK_H_
+
+#include <linux/sfi.h>
+#include <asm/intel-mid.h>
+
+extern void __init *vlv2_plat_clk_device_platform_data(
+ void *info) __attribute__((weak));
+#endif
diff --git a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c b/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
new file mode 100644
index 000000000000..f96789a31819
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
@@ -0,0 +1,247 @@
+/*
+ * vlv2_plat_clock.c - VLV2 platform clock driver
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author: Asutosh Pathak <asutosh.pathak@intel.com>
+ * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include "../../include/linux/vlv2_plat_clock.h"
+
+/* NOTE: Most of below constants could come from platform data.
+ * To be fixed when appropriate ACPI support comes.
+ */
+#define VLV2_PMC_CLK_BASE_ADDRESS 0xfed03060
+#define PLT_CLK_CTL_OFFSET(x) (0x04 * (x))
+
+#define CLK_CONFG_BIT_POS 0
+#define CLK_CONFG_BIT_LEN 2
+#define CLK_CONFG_D3_GATED 0
+#define CLK_CONFG_FORCE_ON 1
+#define CLK_CONFG_FORCE_OFF 2
+
+#define CLK_FREQ_TYPE_BIT_POS 2
+#define CLK_FREQ_TYPE_BIT_LEN 1
+#define CLK_FREQ_TYPE_XTAL 0 /* 25 MHz */
+#define CLK_FREQ_TYPE_PLL 1 /* 19.2 MHz */
+
+#define MAX_CLK_COUNT 5
+
+/* Helper macros to manipulate bitfields */
+#define REG_MASK(n) (((1 << (n##_BIT_LEN)) - 1) << (n##_BIT_POS))
+#define REG_SET_FIELD(r, n, v) (((r) & ~REG_MASK(n)) | \
+ (((v) << (n##_BIT_POS)) & REG_MASK(n)))
+#define REG_GET_FIELD(r, n) (((r) & REG_MASK(n)) >> n##_BIT_POS)
+/*
+ * vlv2 platform has 6 platform clocks, controlled by 4 byte registers
+ * Total size required for mapping is 6*4 = 24 bytes
+ */
+#define PMC_MAP_SIZE 24
+
+static DEFINE_MUTEX(clk_mutex);
+static void __iomem *pmc_base;
+
+/*
+ * vlv2_plat_set_clock_freq - Set clock frequency to a specified platform clock
+ * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
+ * @freq_type: Clock frequency (0-25 MHz(XTAL), 1-19.2 MHz(PLL) )
+ */
+int vlv2_plat_set_clock_freq(int clk_num, int freq_type)
+{
+ void __iomem *addr;
+
+ if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
+ pr_err("Clock number out of range (%d)\n", clk_num);
+ return -EINVAL;
+ }
+
+ if (freq_type != CLK_FREQ_TYPE_XTAL &&
+ freq_type != CLK_FREQ_TYPE_PLL) {
+ pr_err("wrong clock type\n");
+ return -EINVAL;
+ }
+
+ if (!pmc_base) {
+ pr_err("memio map is not set\n");
+ return -EINVAL;
+ }
+
+ addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
+
+ mutex_lock(&clk_mutex);
+ writel(REG_SET_FIELD(readl(addr), CLK_FREQ_TYPE, freq_type), addr);
+ mutex_unlock(&clk_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vlv2_plat_set_clock_freq);
+
+/*
+ * vlv2_plat_get_clock_freq - Get the status of specified platform clock
+ * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
+ *
+ * Returns 0 for 25 MHz(XTAL) and 1 for 19.2 MHz(PLL)
+ */
+int vlv2_plat_get_clock_freq(int clk_num)
+{
+ u32 ret;
+
+ if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
+ pr_err("Clock number out of range (%d)\n", clk_num);
+ return -EINVAL;
+ }
+
+ if (!pmc_base) {
+ pr_err("memio map is not set\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&clk_mutex);
+ ret = REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
+ CLK_FREQ_TYPE);
+ mutex_unlock(&clk_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_freq);
+
+/*
+ * vlv2_plat_configure_clock - Configure the specified platform clock
+ * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
+ * @conf: Clock gating:
+ * 0 - Clock gated on D3 state
+ * 1 - Force on
+ * 2,3 - Force off
+ */
+int vlv2_plat_configure_clock(int clk_num, u32 conf)
+{
+ void __iomem *addr;
+
+ if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
+ pr_err("Clock number out of range (%d)\n", clk_num);
+ return -EINVAL;
+ }
+
+ if (conf != CLK_CONFG_D3_GATED &&
+ conf != CLK_CONFG_FORCE_ON &&
+ conf != CLK_CONFG_FORCE_OFF) {
+ pr_err("Invalid clock configuration requested\n");
+ return -EINVAL;
+ }
+
+ if (!pmc_base) {
+ pr_err("memio map is not set\n");
+ return -EINVAL;
+ }
+
+ addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
+
+ mutex_lock(&clk_mutex);
+ writel(REG_SET_FIELD(readl(addr), CLK_CONFG, conf), addr);
+ mutex_unlock(&clk_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vlv2_plat_configure_clock);
+
+/*
+ * vlv2_plat_get_clock_status - Get the status of specified platform clock
+ * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
+ *
+ * Returns 1 - On, 0 - Off
+ */
+int vlv2_plat_get_clock_status(int clk_num)
+{
+ int ret;
+
+ if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
+ pr_err("Clock number out of range (%d)\n", clk_num);
+ return -EINVAL;
+ }
+
+ if (!pmc_base) {
+ pr_err("memio map is not set\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&clk_mutex);
+ ret = (int)REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
+ CLK_CONFG);
+ mutex_unlock(&clk_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_status);
+
+static int vlv2_plat_clk_probe(struct platform_device *pdev)
+{
+ int i = 0;
+
+ pmc_base = ioremap_nocache(VLV2_PMC_CLK_BASE_ADDRESS, PMC_MAP_SIZE);
+ if (!pmc_base) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize all clocks as disabled */
+ for (i = 0; i < MAX_CLK_COUNT; i++)
+ vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
+
+ dev_info(&pdev->dev, "vlv2_plat_clk initialized\n");
+ return 0;
+}
+
+static const struct platform_device_id vlv2_plat_clk_id[] = {
+ {"vlv2_plat_clk", 0},
+ {}
+};
+
+static int vlv2_resume(struct device *device)
+{
+ int i;
+
+ /* Initialize all clocks as disabled */
+ for (i = 0; i < MAX_CLK_COUNT; i++)
+ vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
+
+ return 0;
+}
+
+static int vlv2_suspend(struct device *device)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops vlv2_pm_ops = {
+ .suspend = vlv2_suspend,
+ .resume = vlv2_resume,
+};
+
+static struct platform_driver vlv2_plat_clk_driver = {
+ .probe = vlv2_plat_clk_probe,
+ .id_table = vlv2_plat_clk_id,
+ .driver = {
+ .name = "vlv2_plat_clk",
+ .pm = &vlv2_pm_ops,
+ },
+};
+
+static int __init vlv2_plat_clk_init(void)
+{
+ return platform_driver_register(&vlv2_plat_clk_driver);
+}
+arch_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/Makefile b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
new file mode 100644
index 000000000000..4621261c35db
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for intel-mid devices.
+#
+obj-$(CONFIG_INTEL_ATOMISP) += intel_mid_pcihelpers.o
+obj-$(CONFIG_INTEL_ATOMISP) += atomisp_gmin_platform.o
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
new file mode 100644
index 000000000000..5b4506a71126
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
@@ -0,0 +1,758 @@
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include "../../include/linux/vlv2_plat_clock.h"
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include "../../include/linux/atomisp_platform.h"
+#include "../../include/linux/atomisp_gmin_platform.h"
+
+#define MAX_SUBDEVS 8
+
+/* Should be defined in vlv2_plat_clock API, isn't: */
+#define VLV2_CLK_PLL_19P2MHZ 1
+#define VLV2_CLK_XTAL_19P2MHZ 0
+#define VLV2_CLK_ON 1
+#define VLV2_CLK_OFF 2
+#define ELDO1_SEL_REG 0x19
+#define ELDO1_1P8V 0x16
+#define ELDO1_CTRL_SHIFT 0x00
+#define ELDO2_SEL_REG 0x1a
+#define ELDO2_1P8V 0x16
+#define ELDO2_CTRL_SHIFT 0x01
+
+struct gmin_subdev {
+ struct v4l2_subdev *subdev;
+ int clock_num;
+ int clock_src;
+ struct gpio_desc *gpio0;
+ struct gpio_desc *gpio1;
+ struct regulator *v1p8_reg;
+ struct regulator *v2p8_reg;
+ struct regulator *v1p2_reg;
+ struct regulator *v2p8_vcm_reg;
+ enum atomisp_camera_port csi_port;
+ unsigned int csi_lanes;
+ enum atomisp_input_format csi_fmt;
+ enum atomisp_bayer_order csi_bayer;
+ bool v1p8_on;
+ bool v2p8_on;
+ bool v1p2_on;
+ bool v2p8_vcm_on;
+};
+
+static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
+
+static enum { PMIC_UNSET = 0, PMIC_REGULATOR, PMIC_AXP, PMIC_TI ,
+ PMIC_CRYSTALCOVE } pmic_id;
+
+/* The atomisp uses type==0 for the end-of-list marker, so leave space. */
+static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1];
+
+static const struct atomisp_platform_data pdata = {
+ .subdevs = pdata_subdevs,
+};
+
+/*
+ * Something of a hack. The ECS E7 board drives camera 2.8v from an
+ * external regulator instead of the PMIC. There's a gmin_CamV2P8
+ * config variable that specifies the GPIO to handle this particular
+ * case, but this needs a broader architecture for handling camera
+ * power.
+ */
+enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 };
+static int v2p8_gpio = V2P8_GPIO_UNSET;
+
+/*
+ * Something of a hack. The CHT RVP board drives camera 1.8v from an
+ * external regulator instead of the PMIC just like ECS E7 board, see the
+ * comments above.
+ */
+enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 };
+static int v1p8_gpio = V1P8_GPIO_UNSET;
+
+static LIST_HEAD(vcm_devices);
+static DEFINE_MUTEX(vcm_lock);
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev);
+
+/*
+ * Legacy/stub behavior copied from upstream platform_camera.c. The
+ * atomisp driver relies on these values being non-NULL in a few
+ * places, even though they are hard-coded in all current
+ * implementations.
+ */
+const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void)
+{
+ static const struct atomisp_camera_caps caps = {
+ .sensor_num = 1,
+ .sensor = {
+ { .stream_num = 1, },
+ },
+ };
+ return &caps;
+}
+EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps);
+
+const struct atomisp_platform_data *atomisp_get_platform_data(void)
+{
+ return &pdata;
+}
+EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
+
+static int af_power_ctrl(struct v4l2_subdev *subdev, int flag)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs && gs->v2p8_vcm_on == flag)
+ return 0;
+ gs->v2p8_vcm_on = flag;
+
+ /*
+ * The power here is used for dw9817,
+ * regulator is from rear sensor
+ */
+ if (gs->v2p8_vcm_reg) {
+ if (flag)
+ return regulator_enable(gs->v2p8_vcm_reg);
+ else
+ return regulator_disable(gs->v2p8_vcm_reg);
+ }
+ return 0;
+}
+
+/*
+ * Used in a handful of modules. Focus motor control, I think. Note
+ * that there is no configurability in the API, so this needs to be
+ * fixed where it is used.
+ *
+ * struct camera_af_platform_data {
+ * int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
+ * };
+ *
+ * Note that the implementation in MCG platform_camera.c is stubbed
+ * out anyway (i.e. returns zero from the callback) on BYT. So
+ * neither needed on gmin platforms or supported upstream.
+ */
+const struct camera_af_platform_data *camera_get_af_platform_data(void)
+{
+ static struct camera_af_platform_data afpd = {
+ .power_ctrl = af_power_ctrl,
+ };
+ return &afpd;
+}
+EXPORT_SYMBOL_GPL(camera_get_af_platform_data);
+
+int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
+ struct camera_sensor_platform_data *plat_data,
+ enum intel_v4l2_subdev_type type)
+{
+ int i;
+ struct i2c_board_info *bi;
+ struct gmin_subdev *gs;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev;
+
+ dev_info(&client->dev, "register atomisp i2c module type %d\n", type);
+
+ /* The windows driver model (and thus most BIOSes by default)
+ * uses ACPI runtime power management for camera devices, but
+ * we don't. Disable it, or else the rails will be needlessly
+ * tickled during suspend/resume. This has caused power and
+ * performance issues on multiple devices. */
+ adev = ACPI_COMPANION(&client->dev);
+ if (adev)
+ adev->power.flags.power_resources = 0;
+
+ for (i=0; i < MAX_SUBDEVS; i++)
+ if (!pdata.subdevs[i].type)
+ break;
+
+ if (pdata.subdevs[i].type)
+ return -ENOMEM;
+
+ /* Note subtlety of initialization order: at the point where
+ * this registration API gets called, the platform data
+ * callbacks have probably already been invoked, so the
+ * gmin_subdev struct is already initialized for us. */
+ gs = find_gmin_subdev(subdev);
+
+ pdata.subdevs[i].type = type;
+ pdata.subdevs[i].port = gs->csi_port;
+ pdata.subdevs[i].subdev = subdev;
+ pdata.subdevs[i].v4l2_subdev.i2c_adapter_id = client->adapter->nr;
+
+ /* Convert i2c_client to i2c_board_info */
+ bi = &pdata.subdevs[i].v4l2_subdev.board_info;
+ memcpy(bi->type, client->name, I2C_NAME_SIZE);
+ bi->flags = client->flags;
+ bi->addr = client->addr;
+ bi->irq = client->irq;
+ bi->platform_data = plat_data;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_register_i2c_module);
+
+struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
+ struct i2c_board_info *board_info)
+{
+ int i;
+ for (i=0; i < MAX_SUBDEVS && pdata.subdevs[i].type; i++) {
+ struct intel_v4l2_subdev_table *sd = &pdata.subdevs[i];
+ if (sd->v4l2_subdev.i2c_adapter_id == adapter->nr &&
+ sd->v4l2_subdev.board_info.addr == board_info->addr)
+ return sd->subdev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_find_subdev);
+
+int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd)
+{
+ int i, j;
+
+ if (!sd)
+ return 0;
+
+ for (i = 0; i < MAX_SUBDEVS; i++) {
+ if (pdata.subdevs[i].subdev == sd) {
+ for (j = i + 1; j <= MAX_SUBDEVS; j++)
+ pdata.subdevs[j - 1] = pdata.subdevs[j];
+ }
+ if (gmin_subdevs[i].subdev == sd) {
+ if (gmin_subdevs[i].gpio0)
+ gpiod_put(gmin_subdevs[i].gpio0);
+ gmin_subdevs[i].gpio0 = NULL;
+ if (gmin_subdevs[i].gpio1)
+ gpiod_put(gmin_subdevs[i].gpio1);
+ gmin_subdevs[i].gpio1 = NULL;
+ if (pmic_id == PMIC_REGULATOR) {
+ regulator_put(gmin_subdevs[i].v1p8_reg);
+ regulator_put(gmin_subdevs[i].v2p8_reg);
+ regulator_put(gmin_subdevs[i].v1p2_reg);
+ regulator_put(gmin_subdevs[i].v2p8_vcm_reg);
+ }
+ gmin_subdevs[i].subdev = NULL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_remove_subdev);
+
+struct gmin_cfg_var {
+ const char *name, *val;
+};
+
+static const struct gmin_cfg_var ffrd8_vars[] = {
+ { "INTCF1B:00_ImxId", "0x134" },
+ { "INTCF1B:00_CsiPort", "1" },
+ { "INTCF1B:00_CsiLanes", "4" },
+ { "INTCF1B:00_CamClk", "0" },
+ {},
+};
+
+/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified
+ * vs. T100 hardware */
+static const struct gmin_cfg_var t100_vars[] = {
+ { "INT33F0:00_CsiPort", "0" },
+ { "INT33F0:00_CsiLanes", "1" },
+ { "INT33F0:00_CamClk", "1" },
+ {},
+};
+
+static const struct gmin_cfg_var mrd7_vars[] = {
+ {"INT33F8:00_CamType", "1"},
+ {"INT33F8:00_CsiPort", "1"},
+ {"INT33F8:00_CsiLanes","2"},
+ {"INT33F8:00_CsiFmt","13"},
+ {"INT33F8:00_CsiBayer", "0"},
+ {"INT33F8:00_CamClk", "0"},
+ {"INT33F9:00_CamType", "1"},
+ {"INT33F9:00_CsiPort", "0"},
+ {"INT33F9:00_CsiLanes","1"},
+ {"INT33F9:00_CsiFmt","13"},
+ {"INT33F9:00_CsiBayer", "0"},
+ {"INT33F9:00_CamClk", "1"},
+ {},
+};
+
+static const struct gmin_cfg_var ecs7_vars[] = {
+ {"INT33BE:00_CsiPort", "1"},
+ {"INT33BE:00_CsiLanes","2"},
+ {"INT33BE:00_CsiFmt","13"},
+ {"INT33BE:00_CsiBayer", "2"},
+ {"INT33BE:00_CamClk", "0"},
+ {"INT33F0:00_CsiPort", "0"},
+ {"INT33F0:00_CsiLanes","1"},
+ {"INT33F0:00_CsiFmt","13"},
+ {"INT33F0:00_CsiBayer", "0"},
+ {"INT33F0:00_CamClk", "1"},
+ {"gmin_V2P8GPIO","402"},
+ {},
+};
+
+
+static const struct gmin_cfg_var i8880_vars[] = {
+ {"XXOV2680:00_CsiPort", "1"},
+ {"XXOV2680:00_CsiLanes","1"},
+ {"XXOV2680:00_CamClk","0"},
+ {"XXGC0310:00_CsiPort", "0"},
+ {"XXGC0310:00_CsiLanes", "1"},
+ {"XXGC0310:00_CamClk", "1"},
+ {},
+};
+
+static const struct {
+ const char *dmi_board_name;
+ const struct gmin_cfg_var *vars;
+} hard_vars[] = {
+ { "BYT-T FFD8", ffrd8_vars },
+ { "T100TA", t100_vars },
+ { "MRD7", mrd7_vars },
+ { "ST70408", ecs7_vars },
+ { "VTA0803", i8880_vars },
+};
+
+
+#define GMIN_CFG_VAR_EFI_GUID EFI_GUID(0xecb54cd9, 0xe5ae, 0x4fdc, \
+ 0xa9, 0x71, 0xe8, 0x77, \
+ 0x75, 0x60, 0x68, 0xf7)
+
+#define CFG_VAR_NAME_MAX 64
+
+static int gmin_platform_init(struct i2c_client *client)
+{
+ return 0;
+}
+
+static int gmin_platform_deinit(void)
+{
+ return 0;
+}
+
+static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
+{
+ int i, ret;
+ struct device *dev;
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (!pmic_id) {
+
+ pmic_id = PMIC_REGULATOR;
+ }
+
+ if (!client)
+ return NULL;
+
+ dev = &client->dev;
+
+ for (i=0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
+ ;
+ if (i >= MAX_SUBDEVS)
+ return NULL;
+
+ dev_info(dev,
+ "gmin: initializing atomisp module subdev data.PMIC ID %d\n",
+ pmic_id);
+
+ gmin_subdevs[i].subdev = subdev;
+ gmin_subdevs[i].clock_num = gmin_get_var_int(dev, "CamClk", 0);
+ /*WA:CHT requires XTAL clock as PLL is not stable.*/
+ gmin_subdevs[i].clock_src = gmin_get_var_int(dev, "ClkSrc",
+ VLV2_CLK_PLL_19P2MHZ);
+ gmin_subdevs[i].csi_port = gmin_get_var_int(dev, "CsiPort", 0);
+ gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, "CsiLanes", 1);
+ gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
+ gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
+
+ if (!IS_ERR(gmin_subdevs[i].gpio0)) {
+ ret = gpiod_direction_output(gmin_subdevs[i].gpio0, 0);
+ if (ret)
+ dev_err(dev, "gpio0 set output failed: %d\n", ret);
+ } else {
+ gmin_subdevs[i].gpio0 = NULL;
+ }
+
+ if (!IS_ERR(gmin_subdevs[i].gpio1)) {
+ ret = gpiod_direction_output(gmin_subdevs[i].gpio1, 0);
+ if (ret)
+ dev_err(dev, "gpio1 set output failed: %d\n", ret);
+ } else {
+ gmin_subdevs[i].gpio1 = NULL;
+ }
+
+ if (pmic_id == PMIC_REGULATOR) {
+ gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
+ gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX");
+ gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A");
+ gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
+
+ /* Note: ideally we would initialize v[12]p8_on to the
+ * output of regulator_is_enabled(), but sadly that
+ * API is broken with the current drivers, returning
+ * "1" for a regulator that will then emit a
+ * "unbalanced disable" WARNing if we try to disable
+ * it. */
+ }
+
+ return &gmin_subdevs[i];
+}
+
+static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
+{
+ int i;
+ for (i=0; i < MAX_SUBDEVS; i++)
+ if (gmin_subdevs[i].subdev == subdev)
+ return &gmin_subdevs[i];
+ return gmin_subdev_add(subdev);
+}
+
+static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ if (gs && gs->gpio0) {
+ gpiod_set_value(gs->gpio0, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ if (gs && gs->gpio1) {
+ gpiod_set_value(gs->gpio1, on);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
+ if (gs && gs->v1p2_on == on)
+ return 0;
+ gs->v1p2_on = on;
+
+ if (gs->v1p2_reg) {
+ if (on)
+ return regulator_enable(gs->v1p2_reg);
+ else
+ return regulator_disable(gs->v1p2_reg);
+ }
+
+ /*TODO:v1p2 needs to extend to other PMICs*/
+
+ return -EINVAL;
+}
+int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+
+ if (v1p8_gpio == V1P8_GPIO_UNSET) {
+ v1p8_gpio = gmin_get_var_int(NULL, "V1P8GPIO", V1P8_GPIO_NONE);
+ if (v1p8_gpio != V1P8_GPIO_NONE) {
+ pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
+ v1p8_gpio);
+ ret = gpio_request(v1p8_gpio, "camera_v1p8_en");
+ if (!ret)
+ ret = gpio_direction_output(v1p8_gpio, 0);
+ if (ret)
+ pr_err("V1P8 GPIO initialization failed\n");
+ }
+ }
+
+ if (gs && gs->v1p8_on == on)
+ return 0;
+ gs->v1p8_on = on;
+
+ if (v1p8_gpio >= 0)
+ gpio_set_value(v1p8_gpio, on);
+
+ if (gs->v1p8_reg) {
+ regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
+ if (on)
+ return regulator_enable(gs->v1p8_reg);
+ else
+ return regulator_disable(gs->v1p8_reg);
+ }
+
+ return -EINVAL;
+}
+
+int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ int ret;
+
+ if (v2p8_gpio == V2P8_GPIO_UNSET) {
+ v2p8_gpio = gmin_get_var_int(NULL, "V2P8GPIO", V2P8_GPIO_NONE);
+ if (v2p8_gpio != V2P8_GPIO_NONE) {
+ pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
+ v2p8_gpio);
+ ret = gpio_request(v2p8_gpio, "camera_v2p8");
+ if (!ret)
+ ret = gpio_direction_output(v2p8_gpio, 0);
+ if (ret)
+ pr_err("V2P8 GPIO initialization failed\n");
+ }
+ }
+
+ if (gs && gs->v2p8_on == on)
+ return 0;
+ gs->v2p8_on = on;
+
+ if (v2p8_gpio >= 0)
+ gpio_set_value(v2p8_gpio, on);
+
+ if (gs->v2p8_reg) {
+ regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
+ if (on)
+ return regulator_enable(gs->v2p8_reg);
+ else
+ return regulator_disable(gs->v2p8_reg);
+ }
+
+ return -EINVAL;
+}
+
+int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ int ret = 0;
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ if (on)
+ ret = vlv2_plat_set_clock_freq(gs->clock_num, gs->clock_src);
+ if (ret)
+ return ret;
+ return vlv2_plat_configure_clock(gs->clock_num,
+ on ? VLV2_CLK_ON : VLV2_CLK_OFF);
+}
+
+static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct gmin_subdev *gs = find_gmin_subdev(sd);
+
+ if (!client || !gs)
+ return -ENODEV;
+
+ return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes,
+ gs->csi_fmt, gs->csi_bayer, flag);
+}
+
+static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
+ char *camera_module)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct camera_vcm_control *vcm;
+
+ if (client == NULL || gs == NULL)
+ return NULL;
+
+ if (!camera_module)
+ return NULL;
+
+ mutex_lock(&vcm_lock);
+ list_for_each_entry(vcm, &vcm_devices, list) {
+ if (!strcmp(camera_module, vcm->camera_module)) {
+ mutex_unlock(&vcm_lock);
+ return vcm;
+ }
+ }
+
+ mutex_unlock(&vcm_lock);
+ return NULL;
+}
+
+static struct camera_sensor_platform_data gmin_plat = {
+ .gpio0_ctrl = gmin_gpio0_ctrl,
+ .gpio1_ctrl = gmin_gpio1_ctrl,
+ .v1p8_ctrl = gmin_v1p8_ctrl,
+ .v2p8_ctrl = gmin_v2p8_ctrl,
+ .v1p2_ctrl = gmin_v1p2_ctrl,
+ .flisclk_ctrl = gmin_flisclk_ctrl,
+ .platform_init = gmin_platform_init,
+ .platform_deinit = gmin_platform_deinit,
+ .csi_cfg = gmin_csi_cfg,
+ .get_vcm_ctrl = gmin_get_vcm_ctrl,
+};
+
+struct camera_sensor_platform_data *gmin_camera_platform_data(
+ struct v4l2_subdev *subdev,
+ enum atomisp_input_format csi_format,
+ enum atomisp_bayer_order csi_bayer)
+{
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ gs->csi_fmt = csi_format;
+ gs->csi_bayer = csi_bayer;
+
+ return &gmin_plat;
+}
+EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
+
+int atomisp_gmin_register_vcm_control(struct camera_vcm_control *vcmCtrl)
+{
+ if (!vcmCtrl)
+ return -EINVAL;
+
+ mutex_lock(&vcm_lock);
+ list_add_tail(&vcmCtrl->list, &vcm_devices);
+ mutex_unlock(&vcm_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(atomisp_gmin_register_vcm_control);
+
+/* Retrieves a device-specific configuration variable. The dev
+ * argument should be a device with an ACPI companion, as all
+ * configuration is based on firmware ID. */
+int gmin_get_config_var(struct device *dev, const char *var, char *out, size_t *out_len)
+{
+ char var8[CFG_VAR_NAME_MAX];
+ efi_char16_t var16[CFG_VAR_NAME_MAX];
+ struct efivar_entry *ev;
+ u32 efiattr_dummy;
+ int i, j, ret;
+ unsigned long efilen;
+
+ if (dev && ACPI_COMPANION(dev))
+ dev = &ACPI_COMPANION(dev)->dev;
+
+ if (dev)
+ ret = snprintf(var8, sizeof(var8), "%s_%s", dev_name(dev), var);
+ else
+ ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
+
+ if (ret < 0 || ret >= sizeof(var8) - 1)
+ return -EINVAL;
+
+ /* First check a hard-coded list of board-specific variables.
+ * Some device firmwares lack the ability to set EFI variables at
+ * runtime. */
+ for (i = 0; i < ARRAY_SIZE(hard_vars); i++) {
+ if (dmi_match(DMI_BOARD_NAME, hard_vars[i].dmi_board_name)) {
+ for (j = 0; hard_vars[i].vars[j].name; j++) {
+ size_t vl;
+ const struct gmin_cfg_var *gv;
+
+ gv = &hard_vars[i].vars[j];
+ vl = strlen(gv->val);
+
+ if (strcmp(var8, gv->name))
+ continue;
+ if (vl > *out_len - 1)
+ return -ENOSPC;
+
+ memcpy(out, gv->val, min(*out_len, vl+1));
+ out[*out_len-1] = 0;
+ *out_len = vl;
+
+ return 0;
+ }
+ }
+ }
+
+ /* Our variable names are ASCII by construction, but EFI names
+ * are wide chars. Convert and zero-pad. */
+ memset(var16, 0, sizeof(var16));
+ for (i = 0; i < sizeof(var8) && var8[i]; i++)
+ var16[i] = var8[i];
+
+ /* To avoid owerflows when calling the efivar API */
+ if (*out_len > ULONG_MAX)
+ return -EINVAL;
+
+ /* Not sure this API usage is kosher; efivar_entry_get()'s
+ * implementation simply uses VariableName and VendorGuid from
+ * the struct and ignores the rest, but it seems like there
+ * ought to be an "official" efivar_entry registered
+ * somewhere? */
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+ memcpy(&ev->var.VariableName, var16, sizeof(var16));
+ ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID;
+
+ efilen = *out_len;
+ ret = efivar_entry_get(ev, &efiattr_dummy, &efilen, out);
+
+ kfree(ev);
+ *out_len = efilen;
+
+ if (ret)
+ dev_warn(dev, "Failed to find gmin variable %s\n", var8);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gmin_get_config_var);
+
+int gmin_get_var_int(struct device *dev, const char *var, int def)
+{
+ char val[CFG_VAR_NAME_MAX];
+ size_t len = sizeof(val);
+ long result;
+ int ret;
+
+ ret = gmin_get_config_var(dev, var, val, &len);
+ if (!ret) {
+ val[len] = 0;
+ ret = kstrtol(val, 0, &result);
+ }
+
+ return ret ? def : result;
+}
+EXPORT_SYMBOL_GPL(gmin_get_var_int);
+
+int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
+ u32 lanes, u32 format, u32 bayer_order, int flag)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *csi = NULL;
+
+ if (flag) {
+ csi = kzalloc(sizeof(*csi), GFP_KERNEL);
+ if (!csi) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+ csi->port = port;
+ csi->num_lanes = lanes;
+ csi->input_format = format;
+ csi->raw_bayer_order = bayer_order;
+ v4l2_set_subdev_hostdata(sd, (void *)csi);
+ csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ csi->metadata_effective_width = NULL;
+ dev_info(&client->dev,
+ "camera pdata: port: %d lanes: %d order: %8.8x\n",
+ port, lanes, bayer_order);
+ } else {
+ csi = v4l2_get_subdev_hostdata(sd);
+ kfree(csi);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(camera_sensor_csi);
+
+/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
+ * work. Disable so the kernel framework doesn't hang the device
+ * trying. The driver itself does direct calls to the PUNIT to manage
+ * ISP power. */
+static void isp_pm_cap_fixup(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n");
+ dev->pm_cap = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
new file mode 100644
index 000000000000..a6c0f5f8c3f8
--- /dev/null
+++ b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
@@ -0,0 +1,297 @@
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/pm_qos.h>
+#include <linux/delay.h>
+
+/* G-Min addition: "platform_is()" lives in intel_mid_pm.h in the MCG
+ * tree, but it's just platform ID info and we don't want to pull in
+ * the whole SFI-based PM architecture. */
+#define INTEL_ATOM_MRST 0x26
+#define INTEL_ATOM_MFLD 0x27
+#define INTEL_ATOM_CLV 0x35
+#define INTEL_ATOM_MRFLD 0x4a
+#define INTEL_ATOM_BYT 0x37
+#define INTEL_ATOM_MOORFLD 0x5a
+#define INTEL_ATOM_CHT 0x4c
+/* synchronization for sharing the I2C controller */
+#define PUNIT_PORT 0x04
+#define PUNIT_DOORBELL_OPCODE (0xE0)
+#define PUNIT_DOORBELL_REG (0x0)
+#ifndef CSTATE_EXIT_LATENCY
+#define CSTATE_EXIT_LATENCY_C1 1
+#endif
+static inline int platform_is(u8 model)
+{
+ return (boot_cpu_data.x86_model == model);
+}
+
+#include "../../include/asm/intel_mid_pcihelpers.h"
+
+/* Unified message bus read/write operation */
+static DEFINE_SPINLOCK(msgbus_lock);
+
+static struct pci_dev *pci_root;
+static struct pm_qos_request pm_qos;
+int qos;
+
+#define DW_I2C_NEED_QOS (platform_is(INTEL_ATOM_BYT))
+
+static int intel_mid_msgbus_init(void)
+{
+ pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (!pci_root) {
+ pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
+ return -ENODEV;
+ }
+
+ if (DW_I2C_NEED_QOS) {
+ pm_qos_add_request(&pm_qos,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+ }
+ return 0;
+}
+fs_initcall(intel_mid_msgbus_init);
+
+u32 intel_mid_msgbus_read32_raw(u32 cmd)
+{
+ unsigned long irq_flags;
+ u32 data;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+ return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
+
+/*
+ * GU: this function is only used by the VISA and 'VXD' drivers.
+ */
+u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext)
+{
+ unsigned long irq_flags;
+ u32 data;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+ return data;
+}
+EXPORT_SYMBOL(intel_mid_msgbus_read32_raw_ext);
+
+void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
+
+/*
+ * GU: this function is only used by the VISA and 'VXD' drivers.
+ */
+void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32_raw_ext);
+
+u32 intel_mid_msgbus_read32(u8 port, u32 addr)
+{
+ unsigned long irq_flags;
+ u32 data;
+ u32 cmd;
+ u32 cmdext;
+
+ cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
+ ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+ cmdext = addr & 0xffffff00;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+
+ if (cmdext) {
+ /* This resets to 0 automatically, no need to write 0 */
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+ cmdext);
+ }
+
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+
+ return data;
+}
+
+EXPORT_SYMBOL(intel_mid_msgbus_read32);
+void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
+{
+ unsigned long irq_flags;
+ u32 cmd;
+ u32 cmdext;
+
+ cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
+ ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+ cmdext = addr & 0xffffff00;
+
+ spin_lock_irqsave(&msgbus_lock, irq_flags);
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
+
+ if (cmdext) {
+ /* This resets to 0 automatically, no need to write 0 */
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
+ cmdext);
+ }
+
+ pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
+ spin_unlock_irqrestore(&msgbus_lock, irq_flags);
+}
+EXPORT_SYMBOL(intel_mid_msgbus_write32);
+
+/* called only from where is later then fs_initcall */
+u32 intel_mid_soc_stepping(void)
+{
+ return pci_root->revision;
+}
+EXPORT_SYMBOL(intel_mid_soc_stepping);
+
+static bool is_south_complex_device(struct pci_dev *dev)
+{
+ unsigned base_class = dev->class >> 16;
+ unsigned sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
+
+ /* other than camera, pci bridges and display,
+ * everything else are south complex devices.
+ */
+ if (((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
+ (sub_class == ISP_SUB_CLASS)) ||
+ (base_class == PCI_BASE_CLASS_BRIDGE) ||
+ ((base_class == PCI_BASE_CLASS_DISPLAY) && !sub_class))
+ return false;
+ else
+ return true;
+}
+
+/* In BYT platform, d3_delay for internal south complex devices,
+ * they are not subject to 10 ms d3 to d0 delay required by pci spec.
+ */
+static void pci_d3_delay_fixup(struct pci_dev *dev)
+{
+ if (platform_is(INTEL_ATOM_BYT) ||
+ platform_is(INTEL_ATOM_CHT)) {
+ /* All internal devices are in bus 0. */
+ if (dev->bus->number == 0 && is_south_complex_device(dev)) {
+ dev->d3_delay = INTERNAL_PCI_PM_D3_WAIT;
+ dev->d3cold_delay = INTERNAL_PCI_PM_D3_WAIT;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3_delay_fixup);
+
+#define PUNIT_SEMAPHORE (platform_is(INTEL_ATOM_BYT) ? 0x7 : 0x10E)
+#define GET_SEM() (intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE) & 0x1)
+
+static void reset_semaphore(void)
+{
+ u32 data;
+
+ data = intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE);
+ smp_mb();
+ data = data & 0xfffffffc;
+ intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, data);
+ smp_mb();
+
+}
+
+int intel_mid_dw_i2c_acquire_ownership(void)
+{
+ u32 ret = 0;
+ u32 data = 0; /* data sent to PUNIT */
+ u32 cmd;
+ u32 cmdext;
+ int timeout = 1000;
+
+ if (DW_I2C_NEED_QOS)
+ pm_qos_update_request(&pm_qos, CSTATE_EXIT_LATENCY_C1 - 1);
+
+ /*
+ * We need disable irq. Otherwise, the main thread
+ * might be preempted and the other thread jumps to
+ * disable irq for a long time. Another case is
+ * some irq handlers might trigger power voltage change
+ */
+ BUG_ON(irqs_disabled());
+ local_irq_disable();
+
+ /* host driver writes 0x2 to side band register 0x7 */
+ intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, 0x2);
+ smp_mb();
+
+ /* host driver sends 0xE0 opcode to PUNIT and writes 0 register */
+ cmd = (PUNIT_DOORBELL_OPCODE << 24) | (PUNIT_PORT << 16) |
+ ((PUNIT_DOORBELL_REG & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
+ cmdext = PUNIT_DOORBELL_REG & 0xffffff00;
+
+ if (cmdext)
+ intel_mid_msgbus_write32_raw_ext(cmd, cmdext, data);
+ else
+ intel_mid_msgbus_write32_raw(cmd, data);
+
+ /* host driver waits for bit 0 to be set in side band 0x7 */
+ while (GET_SEM() != 0x1) {
+ udelay(100);
+ timeout--;
+ if (timeout <= 0) {
+ pr_err("Timeout: semaphore timed out, reset sem\n");
+ ret = -ETIMEDOUT;
+ reset_semaphore();
+ /*Delay 1ms in case race with punit*/
+ udelay(1000);
+ if (GET_SEM() != 0) {
+ /*Reset again as kernel might race with punit*/
+ reset_semaphore();
+ }
+ pr_err("PUNIT SEM: %d\n",
+ intel_mid_msgbus_read32(PUNIT_PORT,
+ PUNIT_SEMAPHORE));
+ local_irq_enable();
+
+ if (DW_I2C_NEED_QOS) {
+ pm_qos_update_request(&pm_qos,
+ PM_QOS_DEFAULT_VALUE);
+ }
+
+ return ret;
+ }
+ }
+ smp_mb();
+
+ return ret;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_acquire_ownership);
+
+int intel_mid_dw_i2c_release_ownership(void)
+{
+ reset_semaphore();
+ local_irq_enable();
+
+ if (DW_I2C_NEED_QOS)
+ pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ return 0;
+}
+EXPORT_SYMBOL(intel_mid_dw_i2c_release_ownership);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index d605c41d0424..38f72d069e27 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -177,12 +177,12 @@
#define BCM2048_FREQDEV_UNIT 10000
#define BCM2048_FREQV4L2_MULTI 625
-#define dev_to_v4l2(f) ((f * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
-#define v4l2_to_dev(f) ((f * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
+#define dev_to_v4l2(f) (((f) * BCM2048_FREQDEV_UNIT) / BCM2048_FREQV4L2_MULTI)
+#define v4l2_to_dev(f) (((f) * BCM2048_FREQV4L2_MULTI) / BCM2048_FREQDEV_UNIT)
-#define msb(x) ((u8)((u16)x >> 8))
-#define lsb(x) ((u8)((u16)x & 0x00FF))
-#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb)
+#define msb(x) ((u8)((u16)(x) >> 8))
+#define lsb(x) ((u8)((u16)(x) & 0x00FF))
+#define compose_u16(msb, lsb) (((u16)(msb) << 8) | (lsb))
#define BCM2048_DEFAULT_POWERING_DELAY 20
#define BCM2048_DEFAULT_REGION 0x02
@@ -1534,7 +1534,11 @@ static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i,
if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
return 0;
- BUG_ON((index+2) >= BCM2048_MAX_RDS_RT);
+ if ((index + 2) >= BCM2048_MAX_RDS_RT) {
+ dev_err(&bdev->client->dev,
+ "Incorrect index = %d\n", index);
+ return 0;
+ }
if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
BCM2048_RDS_BLOCK_C) {
@@ -1557,7 +1561,11 @@ static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i,
if (crc == BCM2048_RDS_CRC_UNRECOVARABLE)
return;
- BUG_ON((index+4) >= BCM2048_MAX_RDS_RT);
+ if ((index + 4) >= BCM2048_MAX_RDS_RT) {
+ dev_err(&bdev->client->dev,
+ "Incorrect index = %d\n", index);
+ return;
+ }
if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) ==
BCM2048_RDS_BLOCK_D)
@@ -2008,7 +2016,7 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
if (!bdev) \
return -ENODEV; \
\
- out = kzalloc(size + 1, GFP_KERNEL); \
+ out = kzalloc((size) + 1, GFP_KERNEL); \
if (!out) \
return -ENOMEM; \
\
@@ -2634,7 +2642,7 @@ exit:
return err;
}
-static int __exit bcm2048_i2c_driver_remove(struct i2c_client *client)
+static int bcm2048_i2c_driver_remove(struct i2c_client *client)
{
struct bcm2048_device *bdev = i2c_get_clientdata(client);
@@ -2673,7 +2681,7 @@ static struct i2c_driver bcm2048_i2c_driver = {
.name = BCM2048_DRIVER_NAME,
},
.probe = bcm2048_i2c_driver_probe,
- .remove = __exit_p(bcm2048_i2c_driver_remove),
+ .remove = bcm2048_i2c_driver_remove,
.id_table = bcm2048_id,
};
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index c72c3f09f175..18186d0fa1a6 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -131,7 +131,6 @@ static int read_reg(struct cxd *ci, u8 reg, u8 *val)
return read_block(ci, reg, val, 1);
}
-
static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
{
int status;
@@ -152,8 +151,8 @@ static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
if (!status) {
u8 buf[256] = {3};
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n+1);
+ memcpy(buf + 1, data, n);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
}
return status;
}
@@ -181,34 +180,6 @@ static int write_io(struct cxd *ci, u16 address, u8 val)
return status;
}
-#if 0
-static int read_io_data(struct cxd *ci, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = { 2, 0, 0 };
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status)
- status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
- return 0;
-}
-
-static int write_io_data(struct cxd *ci, u8 *data, u8 n)
-{
- int status;
- u8 addr[3] = {2, 0, 0};
-
- status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
- if (!status) {
- u8 buf[256] = {3};
-
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
- }
- return 0;
-}
-#endif
-
static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
{
int status;
@@ -292,8 +263,6 @@ static void cam_mode(struct cxd *ci, int mode)
ci->cammode = mode;
}
-
-
static int init(struct cxd *ci)
{
int status;
@@ -329,12 +298,6 @@ static int init(struct cxd *ci)
if (status < 0)
break;
-#if 0
- /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
- status = write_reg(ci, 0x09, 0x4D);
- if (status < 0)
- break;
-#endif
/* TOSTRT = 8, Mode B (gated clock), falling Edge,
* Serial, POL=HIGH, MSB
*/
@@ -432,23 +395,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot, int address)
{
struct cxd *ci = ca->data;
-#if 0
- if (ci->amem_read) {
- if (address <= 0 || address > 1024)
- return -EIO;
- return ci->amem[address];
- }
-
- mutex_lock(&ci->lock);
- write_regm(ci, 0x06, 0x00, 0x05);
- read_pccard(ci, 0, &ci->amem[0], 128);
- read_pccard(ci, 128, &ci->amem[0], 128);
- read_pccard(ci, 256, &ci->amem[0], 128);
- read_pccard(ci, 384, &ci->amem[0], 128);
- write_regm(ci, 0x06, 0x05, 0x05);
- mutex_unlock(&ci->lock);
- return ci->amem[address];
-#else
u8 val;
mutex_lock(&ci->lock);
@@ -457,7 +403,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca,
mutex_unlock(&ci->lock);
/* printk(KERN_INFO "%02x:%02x\n", address,val); */
return val;
-#endif
}
static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
@@ -502,15 +447,6 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
-#if 0
- write_reg(ci, 0x00, 0x21);
- write_reg(ci, 0x06, 0x1F);
- write_reg(ci, 0x00, 0x31);
-#else
-#if 0
- write_reg(ci, 0x06, 0x1F);
- write_reg(ci, 0x06, 0x2F);
-#else
cam_mode(ci, 0);
write_reg(ci, 0x00, 0x21);
write_reg(ci, 0x06, 0x1F);
@@ -518,25 +454,14 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
write_regm(ci, 0x20, 0x80, 0x80);
write_reg(ci, 0x03, 0x02);
ci->ready = 0;
-#endif
-#endif
ci->mode = -1;
{
int i;
-#if 0
- u8 val;
-#endif
+
for (i = 0; i < 100; i++) {
usleep_range(10000, 11000);
-#if 0
- read_reg(ci, 0x06, &val);
- dev_info(&ci->i2c->dev, "%d:%02x\n", i, val);
- if (!(val&0x10))
- break;
-#else
if (ci->ready)
break;
-#endif
}
}
mutex_unlock(&ci->lock);
@@ -572,7 +497,6 @@ static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
return 0;
}
-
static int campoll(struct cxd *ci)
{
u8 istat;
@@ -582,18 +506,18 @@ static int campoll(struct cxd *ci)
return 0;
write_reg(ci, 0x05, istat);
- if (istat&0x40) {
+ if (istat & 0x40) {
ci->dr = 1;
dev_info(&ci->i2c->dev, "DR\n");
}
- if (istat&0x20)
+ if (istat & 0x20)
dev_info(&ci->i2c->dev, "WC\n");
- if (istat&2) {
+ if (istat & 2) {
u8 slotstat;
read_reg(ci, 0x01, &slotstat);
- if (!(2&slotstat)) {
+ if (!(2 & slotstat)) {
if (!ci->slot_stat) {
ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT;
write_regm(ci, 0x03, 0x08, 0x08);
@@ -607,7 +531,7 @@ static int campoll(struct cxd *ci)
ci->ready = 0;
}
}
- if (istat&8 &&
+ if (istat & 8 &&
ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
@@ -616,7 +540,6 @@ static int campoll(struct cxd *ci)
return 0;
}
-
static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct cxd *ci = ca->data;
@@ -648,7 +571,7 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
mutex_lock(&ci->lock);
read_reg(ci, 0x0f, &msb);
read_reg(ci, 0x10, &lsb);
- len = (msb<<8)|lsb;
+ len = (msb << 8) | lsb;
read_block(ci, 0x12, ebuf, len);
ci->dr = 0;
mutex_unlock(&ci->lock);
@@ -662,8 +585,8 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
mutex_lock(&ci->lock);
dev_info(&ci->i2c->dev, "write_data %d\n", ecount);
- write_reg(ci, 0x0d, ecount>>8);
- write_reg(ci, 0x0e, ecount&0xff);
+ write_reg(ci, 0x0d, ecount >> 8);
+ write_reg(ci, 0x0e, ecount & 0xff);
write_block(ci, 0x11, ebuf, ecount);
mutex_unlock(&ci->lock);
return ecount;
@@ -698,7 +621,7 @@ struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
return NULL;
}
- ci = kzalloc(sizeof(struct cxd), GFP_KERNEL);
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
if (!ci)
return NULL;
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
index d3f34f9bf712..7cc115c9ebe6 100644
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -155,8 +155,8 @@ struct vpfe_isif_dfc {
};
/************************************************************************
-* Digital/Black clamp or DC Subtract parameters
-************************************************************************/
+ * Digital/Black clamp or DC Subtract parameters
+ ************************************************************************/
/**
* Horizontal Black Clamp modes
*/
@@ -309,8 +309,8 @@ struct vpfe_isif_black_clamp {
};
/*************************************************************************
-** Color Space Conversion (CSC)
-*************************************************************************/
+ ** Color Space Conversion (CSC)
+ *************************************************************************/
/**
* Number of Coefficient values used for CSC
*/
@@ -331,8 +331,8 @@ struct float_16_bit {
};
/*************************************************************************
-** Color Space Conversion parameters
-*************************************************************************/
+ ** Color Space Conversion parameters
+ *************************************************************************/
/**
* Structure used for CSC config params
*/
@@ -365,8 +365,8 @@ enum vpfe_isif_datasft {
#define VPFE_ISIF_LINEAR_TAB_SIZE 192
/*************************************************************************
-** Linearization parameters
-*************************************************************************/
+ ** Linearization parameters
+ *************************************************************************/
/**
* Structure for Sensor data linearization
*/
@@ -382,8 +382,8 @@ struct vpfe_isif_linearize {
};
/*************************************************************************
-** ISIF Raw configuration parameters
-*************************************************************************/
+ ** ISIF Raw configuration parameters
+ *************************************************************************/
enum vpfe_isif_fmt_mode {
VPFE_ISIF_SPLIT,
VPFE_ISIF_COMBINE
@@ -1189,8 +1189,8 @@ struct vpfe_ipipe_config {
};
/*******************************************************************
-** Resizer API structures
-*******************************************************************/
+ ** Resizer API structures
+ *******************************************************************/
/* Interpolation types used for horizontal rescale */
enum vpfe_rsz_intp_t {
VPFE_RSZ_INTP_CUBIC,
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index ff47a8f369fc..6a3434cebd79 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1803,14 +1803,14 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
return -EBUSY;
ipipe->base_addr = ioremap_nocache(res->start, res_len);
if (!ipipe->base_addr)
- return -EBUSY;
+ goto error_release;
res = platform_get_resource(pdev, IORESOURCE_MEM, 6);
if (!res)
- return -ENOENT;
+ goto error_unmap;
ipipe->isp5_base_addr = ioremap_nocache(res->start, res_len);
if (!ipipe->isp5_base_addr)
- return -EBUSY;
+ goto error_unmap;
v4l2_subdev_init(sd, &ipipe_v4l2_ops);
sd->internal_ops = &ipipe_v4l2_internal_ops;
@@ -1839,6 +1839,12 @@ vpfe_ipipe_init(struct vpfe_ipipe_device *ipipe, struct platform_device *pdev)
sd->ctrl_handler = &ipipe->ctrls;
return media_entity_pads_init(me, IPIPE_PADS_NUM, pads);
+
+error_unmap:
+ iounmap(ipipe->base_addr);
+error_release:
+ release_mem_region(res->start, res_len);
+ return -ENOMEM;
}
/*
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index 958ef71ee4d5..a893072d0f04 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -1003,8 +1003,8 @@ void ipipe_set_car_regs(void __iomem *base_addr, struct vpfe_ipipe_car *car)
ipipe_set_mf(base_addr);
ipipe_set_gain_ctrl(base_addr, car);
/* Set the threshold for switching between
- * the two Here we overwrite the MF SW0 value
- */
+ * the two Here we overwrite the MF SW0 value
+ */
regw_ip(base_addr, VPFE_IPIPE_CAR_DYN_SWITCH, CAR_TYP);
val = car->sw1;
val <<= CAR_SW1_SHIFT;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
index 8aceabb43f8e..64fbb459baa2 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif_regs.h
@@ -59,8 +59,8 @@
#define REC656IF 0x84
#define CCDCFG 0x88
/*****************************************************
-* Defect Correction registers
-*****************************************************/
+ * Defect Correction registers
+ *****************************************************/
#define DFCCTL 0x8c
#define VDFSATLV 0x90
#define DFCMEMCTL 0x94
@@ -70,8 +70,8 @@
#define DFCMEM3 0xa4
#define DFCMEM4 0xa8
/****************************************************
-* Black Clamp registers
-****************************************************/
+ * Black Clamp registers
+ ****************************************************/
#define CLAMPCFG 0xac
#define CLDCOFST 0xb0
#define CLSV 0xb4
@@ -84,8 +84,8 @@
#define CLVWIN2 0xd0
#define CLVWIN3 0xd4
/****************************************************
-* Lense Shading Correction
-****************************************************/
+ * Lense Shading Correction
+ ****************************************************/
#define DATAHOFST 0xd8
#define DATAVOFST 0xdc
#define LSCHVAL 0xe0
@@ -102,8 +102,8 @@
#define TWODLSCIRQEN 0x10c
#define TWODLSCIRQST 0x110
/****************************************************
-* Data formatter
-****************************************************/
+ * Data formatter
+ ****************************************************/
#define FMTCFG 0x114
#define FMTPLEN 0x118
#define FMTSPH 0x11c
@@ -128,8 +128,8 @@
#define FMTPGMAPS6 0x19c
#define FMTPGMAPS7 0x1a0
/************************************************
-* Color Space Converter
-************************************************/
+ * Color Space Converter
+ ************************************************/
#define CSCCTL 0x1a4
#define CSCM0 0x1a8
#define CSCM1 0x1ac
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 5fbc2d447ff2..857b0e847c5e 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -1133,9 +1133,9 @@ void vpfe_resizer_buffer_isr(struct vpfe_resizer_device *resizer)
}
} else if (fid == 0) {
/*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
video_out->field_id = fid;
}
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index 32109cdd73a6..bffe2153b910 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -228,7 +228,7 @@ static int vpfe_enable_clock(struct vpfe_device *vpfe_dev)
vpfe_dev->clks = kcalloc(vpfe_cfg->num_clocks,
sizeof(*vpfe_dev->clks), GFP_KERNEL);
- if (vpfe_dev->clks == NULL)
+ if (!vpfe_dev->clks)
return -ENOMEM;
for (i = 0; i < vpfe_cfg->num_clocks; i++) {
@@ -348,7 +348,7 @@ static int register_i2c_devices(struct vpfe_device *vpfe_dev)
vpfe_dev->sd =
kcalloc(num_subdevs, sizeof(struct v4l2_subdev *),
GFP_KERNEL);
- if (vpfe_dev->sd == NULL)
+ if (!vpfe_dev->sd)
return -ENOMEM;
for (i = 0, k = 0; i < num_subdevs; i++) {
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index bc67da254262..3e350a9922de 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -12,18 +12,6 @@ menuconfig LIRC_STAGING
if LIRC_STAGING
-config LIRC_SASEM
- tristate "Sasem USB IR Remote"
- depends on LIRC && USB
- help
- Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
-
-config LIRC_SIR
- tristate "Built-in SIR IrDA port"
- depends on RC_CORE
- help
- Driver for the SIR IrDA port
-
config LIRC_ZILOG
tristate "Zilog/Hauppauge IR Transmitter"
depends on LIRC && I2C
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index 28740c94349c..665562436e30 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -3,6 +3,4 @@
# Each configuration option enables a list of files.
-obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o
-obj-$(CONFIG_LIRC_SIR) += lirc_sir.o
obj-$(CONFIG_LIRC_ZILOG) += lirc_zilog.o
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
deleted file mode 100644
index b0c176e14b6b..000000000000
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ /dev/null
@@ -1,899 +0,0 @@
-/*
- * lirc_sasem.c - USB remote support for LIRC
- * Version 0.5
- *
- * Copyright (C) 2004-2005 Oliver Stabel <oliver.stabel@gmx.de>
- * Tim Davies <tim@opensystems.net.au>
- *
- * This driver was derived from:
- * Venky Raju <dev@venky.ws>
- * "lirc_imon - "LIRC/VFD driver for Ahanix/Soundgraph IMON IR/VFD"
- * Paul Miller <pmiller9@users.sourceforge.net>'s 2003-2004
- * "lirc_atiusb - USB remote support for LIRC"
- * Culver Consulting Services <henry@culcon.com>'s 2003
- * "Sasem OnAir VFD/IR USB driver"
- *
- *
- * NOTE - The LCDproc iMon driver should work with this module. More info at
- * http://www.frogstorm.info/sasem
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-#include <linux/ktime.h>
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define MOD_AUTHOR "Oliver Stabel <oliver.stabel@gmx.de>, " \
- "Tim Davies <tim@opensystems.net.au>"
-#define MOD_DESC "USB Driver for Sasem Remote Controller V1.1"
-#define MOD_NAME "lirc_sasem"
-#define MOD_VERSION "0.5"
-
-#define VFD_MINOR_BASE 144 /* Same as LCD */
-#define DEVICE_NAME "lcd%d"
-
-#define BUF_CHUNK_SIZE 8
-#define BUF_SIZE 128
-
-#define IOCTL_LCD_CONTRAST 1
-
-/*** P R O T O T Y P E S ***/
-
-/* USB Callback prototypes */
-static int sasem_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void sasem_disconnect(struct usb_interface *interface);
-static void usb_rx_callback(struct urb *urb);
-static void usb_tx_callback(struct urb *urb);
-
-/* VFD file_operations function prototypes */
-static int vfd_open(struct inode *inode, struct file *file);
-static long vfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static int vfd_close(struct inode *inode, struct file *file);
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos);
-
-/* LIRC driver function prototypes */
-static int ir_open(void *data);
-static void ir_close(void *data);
-
-/*** G L O B A L S ***/
-#define SASEM_DATA_BUF_SZ 32
-
-struct sasem_context {
- struct usb_device *dev;
- int vfd_isopen; /* VFD port has been opened */
- unsigned int vfd_contrast; /* VFD contrast */
- int ir_isopen; /* IR port has been opened */
- int dev_present; /* USB device presence */
- struct mutex ctx_lock; /* to lock this object */
- wait_queue_head_t remove_ok; /* For unexpected USB disconnects */
-
- struct lirc_driver *driver;
- struct usb_endpoint_descriptor *rx_endpoint;
- struct usb_endpoint_descriptor *tx_endpoint;
- struct urb *rx_urb;
- struct urb *tx_urb;
- unsigned char usb_rx_buf[8];
- unsigned char usb_tx_buf[8];
-
- struct tx_t {
- unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
- * buffer
- */
- struct completion finished; /* wait for write to finish */
- atomic_t busy; /* write in progress */
- int status; /* status of tx completion */
- } tx;
-
- /* for dealing with repeat codes (wish there was a toggle bit!) */
- ktime_t presstime;
- char lastcode[8];
- int codesaved;
-};
-
-/* VFD file operations */
-static const struct file_operations vfd_fops = {
- .owner = THIS_MODULE,
- .open = &vfd_open,
- .write = vfd_write,
- .unlocked_ioctl = &vfd_ioctl,
- .release = &vfd_close,
- .llseek = noop_llseek,
-};
-
-/* USB Device ID for Sasem USB Control Board */
-static struct usb_device_id sasem_usb_id_table[] = {
- /* Sasem USB Control Board */
- { USB_DEVICE(0x11ba, 0x0101) },
- /* Terminating entry */
- {}
-};
-
-/* USB Device data */
-static struct usb_driver sasem_driver = {
- .name = MOD_NAME,
- .probe = sasem_probe,
- .disconnect = sasem_disconnect,
- .id_table = sasem_usb_id_table,
-};
-
-static struct usb_class_driver sasem_class = {
- .name = DEVICE_NAME,
- .fops = &vfd_fops,
- .minor_base = VFD_MINOR_BASE,
-};
-
-/* to prevent races between open() and disconnect() */
-static DEFINE_MUTEX(disconnect_lock);
-
-static int debug;
-
-
-/*** M O D U L E C O D E ***/
-MODULE_AUTHOR(MOD_AUTHOR);
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL");
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
-
-static void delete_context(struct sasem_context *context)
-{
- usb_free_urb(context->tx_urb); /* VFD */
- usb_free_urb(context->rx_urb); /* IR */
- lirc_buffer_free(context->driver->rbuf);
- kfree(context->driver->rbuf);
- kfree(context->driver);
- kfree(context);
-}
-
-static void deregister_from_lirc(struct sasem_context *context)
-{
- int retval;
- int minor = context->driver->minor;
-
- retval = lirc_unregister_driver(minor);
- if (retval)
- dev_err(&context->dev->dev,
- "%s: unable to deregister from lirc (%d)\n",
- __func__, retval);
- else
- dev_info(&context->dev->dev,
- "Deregistered Sasem driver (minor:%d)\n", minor);
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is opened by the application.
- */
-static int vfd_open(struct inode *inode, struct file *file)
-{
- struct usb_interface *interface;
- struct sasem_context *context = NULL;
- int subminor;
- int retval = 0;
-
- /* prevent races with disconnect */
- mutex_lock(&disconnect_lock);
-
- subminor = iminor(inode);
- interface = usb_find_interface(&sasem_driver, subminor);
- if (!interface) {
- pr_err("%s: could not find interface for minor %d\n",
- __func__, subminor);
- retval = -ENODEV;
- goto exit;
- }
- context = usb_get_intfdata(interface);
-
- if (!context) {
- dev_err(&interface->dev, "no context found for minor %d\n",
- subminor);
- retval = -ENODEV;
- goto exit;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (context->vfd_isopen) {
- dev_err(&interface->dev,
- "%s: VFD port is already open", __func__);
- retval = -EBUSY;
- } else {
- context->vfd_isopen = 1;
- file->private_data = context;
- dev_info(&interface->dev, "VFD port opened\n");
- }
-
- mutex_unlock(&context->ctx_lock);
-
-exit:
- mutex_unlock(&disconnect_lock);
- return retval;
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is closed by the application.
- */
-static long vfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct sasem_context *context;
-
- context = (struct sasem_context *) file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- switch (cmd) {
- case IOCTL_LCD_CONTRAST:
- if (arg > 1000)
- arg = 1000;
- context->vfd_contrast = (unsigned int)arg;
- break;
- default:
- pr_info("Unknown IOCTL command\n");
- mutex_unlock(&context->ctx_lock);
- return -ENOIOCTLCMD; /* not supported */
- }
-
- mutex_unlock(&context->ctx_lock);
- return 0;
-}
-
-/**
- * Called when the VFD device (e.g. /dev/usb/lcd)
- * is closed by the application.
- */
-static int vfd_close(struct inode *inode, struct file *file)
-{
- struct sasem_context *context = NULL;
- int retval = 0;
-
- context = (struct sasem_context *) file->private_data;
-
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->vfd_isopen) {
- dev_err(&context->dev->dev, "%s: VFD is not open\n", __func__);
- retval = -EIO;
- } else {
- context->vfd_isopen = 0;
- dev_info(&context->dev->dev, "VFD port closed\n");
- if (!context->dev_present && !context->ir_isopen) {
- /* Device disconnected before close and IR port is
- * not open. If IR port is open, context will be
- * deleted by ir_close.
- */
- mutex_unlock(&context->ctx_lock);
- delete_context(context);
- return retval;
- }
- }
-
- mutex_unlock(&context->ctx_lock);
- return retval;
-}
-
-/**
- * Sends a packet to the VFD.
- */
-static int send_packet(struct sasem_context *context)
-{
- unsigned int pipe;
- int interval = 0;
- int retval = 0;
-
- pipe = usb_sndintpipe(context->dev,
- context->tx_endpoint->bEndpointAddress);
- interval = context->tx_endpoint->bInterval;
-
- usb_fill_int_urb(context->tx_urb, context->dev, pipe,
- context->usb_tx_buf, sizeof(context->usb_tx_buf),
- usb_tx_callback, context, interval);
-
- context->tx_urb->actual_length = 0;
-
- init_completion(&context->tx.finished);
- atomic_set(&context->tx.busy, 1);
-
- retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
- if (retval) {
- atomic_set(&context->tx.busy, 0);
- dev_err(&context->dev->dev, "error submitting urb (%d)\n",
- retval);
- } else {
- /* Wait for transmission to complete (or abort) */
- mutex_unlock(&context->ctx_lock);
- wait_for_completion(&context->tx.finished);
- mutex_lock(&context->ctx_lock);
-
- retval = context->tx.status;
- if (retval)
- dev_err(&context->dev->dev,
- "packet tx failed (%d)\n", retval);
- }
-
- return retval;
-}
-
-/**
- * Writes data to the VFD. The Sasem VFD is 2x16 characters
- * and requires data in 9 consecutive USB interrupt packets,
- * each packet carrying 8 bytes.
- */
-static ssize_t vfd_write(struct file *file, const char __user *buf,
- size_t n_bytes, loff_t *pos)
-{
- int i;
- int retval = 0;
- struct sasem_context *context;
- int *data_buf = NULL;
-
- context = (struct sasem_context *) file->private_data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&context->ctx_lock);
-
- if (!context->dev_present) {
- pr_err("%s: no Sasem device present\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (n_bytes <= 0 || n_bytes > SASEM_DATA_BUF_SZ) {
- dev_err(&context->dev->dev, "%s: invalid payload size\n",
- __func__);
- retval = -EINVAL;
- goto exit;
- }
-
- data_buf = memdup_user(buf, n_bytes);
- if (IS_ERR(data_buf)) {
- mutex_unlock(&context->ctx_lock);
- return PTR_ERR(data_buf);
- }
-
- memcpy(context->tx.data_buf, data_buf, n_bytes);
-
- /* Pad with spaces */
- for (i = n_bytes; i < SASEM_DATA_BUF_SZ; ++i)
- context->tx.data_buf[i] = ' ';
-
- /* Nine 8 byte packets to be sent */
- /* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
- * will clear the VFD
- */
- for (i = 0; i < 9; i++) {
- switch (i) {
- case 0:
- memcpy(context->usb_tx_buf, "\x07\0\0\0\0\0\0\0", 8);
- context->usb_tx_buf[1] = (context->vfd_contrast) ?
- (0x2B - (context->vfd_contrast - 1) / 250)
- : 0x2B;
- break;
- case 1:
- memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8);
- break;
- case 2:
- memcpy(context->usb_tx_buf, "\x0b\x01\0\0\0\0\0\0", 8);
- break;
- case 3:
- memcpy(context->usb_tx_buf, context->tx.data_buf, 8);
- break;
- case 4:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 8, 8);
- break;
- case 5:
- memcpy(context->usb_tx_buf, "\x09\x01\0\0\0\0\0\0", 8);
- break;
- case 6:
- memcpy(context->usb_tx_buf, "\x0b\x02\0\0\0\0\0\0", 8);
- break;
- case 7:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 16, 8);
- break;
- case 8:
- memcpy(context->usb_tx_buf,
- context->tx.data_buf + 24, 8);
- break;
- }
- retval = send_packet(context);
- if (retval) {
- dev_err(&context->dev->dev,
- "send packet failed for packet #%d\n", i);
- goto exit;
- }
- }
-exit:
-
- mutex_unlock(&context->ctx_lock);
- kfree(data_buf);
-
- return (!retval) ? n_bytes : retval;
-}
-
-/**
- * Callback function for USB core API: transmit data
- */
-static void usb_tx_callback(struct urb *urb)
-{
- struct sasem_context *context;
-
- if (!urb)
- return;
- context = (struct sasem_context *) urb->context;
- if (!context)
- return;
-
- context->tx.status = urb->status;
-
- /* notify waiters that write has finished */
- atomic_set(&context->tx.busy, 0);
- complete(&context->tx.finished);
-}
-
-/**
- * Called by lirc_dev when the application opens /dev/lirc
- */
-static int ir_open(void *data)
-{
- int retval = 0;
- struct sasem_context *context;
-
- /* prevent races with disconnect */
- mutex_lock(&disconnect_lock);
-
- context = data;
-
- mutex_lock(&context->ctx_lock);
-
- if (context->ir_isopen) {
- dev_err(&context->dev->dev, "%s: IR port is already open\n",
- __func__);
- retval = -EBUSY;
- goto exit;
- }
-
- usb_fill_int_urb(context->rx_urb, context->dev,
- usb_rcvintpipe(context->dev,
- context->rx_endpoint->bEndpointAddress),
- context->usb_rx_buf, sizeof(context->usb_rx_buf),
- usb_rx_callback, context, context->rx_endpoint->bInterval);
-
- retval = usb_submit_urb(context->rx_urb, GFP_KERNEL);
-
- if (retval)
- dev_err(&context->dev->dev,
- "usb_submit_urb failed for ir_open (%d)\n", retval);
- else {
- context->ir_isopen = 1;
- dev_info(&context->dev->dev, "IR port opened\n");
- }
-
-exit:
- mutex_unlock(&context->ctx_lock);
-
- mutex_unlock(&disconnect_lock);
- return retval;
-}
-
-/**
- * Called by lirc_dev when the application closes /dev/lirc
- */
-static void ir_close(void *data)
-{
- struct sasem_context *context;
-
- context = data;
- if (!context) {
- pr_err("%s: no context for device\n", __func__);
- return;
- }
-
- mutex_lock(&context->ctx_lock);
-
- usb_kill_urb(context->rx_urb);
- context->ir_isopen = 0;
- pr_info("IR port closed\n");
-
- if (!context->dev_present) {
-
- /*
- * Device disconnected while IR port was
- * still open. Driver was not deregistered
- * at disconnect time, so do it now.
- */
- deregister_from_lirc(context);
- if (!context->vfd_isopen) {
- mutex_unlock(&context->ctx_lock);
- delete_context(context);
- return;
- }
- /* If VFD port is open, context will be deleted by vfd_close */
- }
-
- mutex_unlock(&context->ctx_lock);
-}
-
-/**
- * Process the incoming packet
- */
-static void incoming_packet(struct sasem_context *context,
- struct urb *urb)
-{
- int len = urb->actual_length;
- unsigned char *buf = urb->transfer_buffer;
- u64 ns;
- ktime_t kt;
-
- if (len != 8) {
- dev_warn(&context->dev->dev,
- "%s: invalid incoming packet size (%d)\n",
- __func__, len);
- return;
- }
-
- if (debug)
- dev_info(&context->dev->dev, "Incoming data: %*ph\n", len, buf);
- /*
- * Lirc could deal with the repeat code, but we really need to block it
- * if it arrives too late. Otherwise we could repeat the wrong code.
- */
-
- /* get the time since the last button press */
- kt = ktime_get();
- ns = ktime_to_ns(ktime_sub(kt, context->presstime));
-
- if (memcmp(buf, "\x08\0\0\0\0\0\0\0", 8) == 0) {
- /*
- * the repeat code is being sent, so we copy
- * the old code to LIRC
- */
-
- /*
- * NOTE: Only if the last code was less than 250ms ago
- * - no one should be able to push another (undetected) button
- * in that time and then get a false repeat of the previous
- * press but it is long enough for a genuine repeat
- */
- if ((ns < 250 * NSEC_PER_MSEC) && (context->codesaved != 0)) {
- memcpy(buf, &context->lastcode, 8);
- context->presstime = kt;
- }
- } else {
- /* save the current valid code for repeats */
- memcpy(&context->lastcode, buf, 8);
- /*
- * set flag to signal a valid code was save;
- * just for safety reasons
- */
- context->codesaved = 1;
- context->presstime = kt;
- }
-
- lirc_buffer_write(context->driver->rbuf, buf);
- wake_up(&context->driver->rbuf->wait_poll);
-}
-
-/**
- * Callback function for USB core API: receive data
- */
-static void usb_rx_callback(struct urb *urb)
-{
- struct sasem_context *context;
-
- if (!urb)
- return;
- context = (struct sasem_context *) urb->context;
- if (!context)
- return;
-
- switch (urb->status) {
- case -ENOENT: /* usbcore unlink successful! */
- return;
-
- case 0:
- if (context->ir_isopen)
- incoming_packet(context, urb);
- break;
-
- default:
- dev_warn(&urb->dev->dev, "%s: status (%d): ignored",
- __func__, urb->status);
- break;
- }
-
- usb_submit_urb(context->rx_urb, GFP_ATOMIC);
-}
-
-/**
- * Callback function for USB core API: Probe
- */
-static int sasem_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = NULL;
- struct usb_host_interface *iface_desc = NULL;
- struct usb_endpoint_descriptor *rx_endpoint = NULL;
- struct usb_endpoint_descriptor *tx_endpoint = NULL;
- struct urb *rx_urb = NULL;
- struct urb *tx_urb = NULL;
- struct lirc_driver *driver = NULL;
- struct lirc_buffer *rbuf = NULL;
- int lirc_minor = 0;
- int num_endpoints;
- int retval = 0;
- int vfd_ep_found;
- int ir_ep_found;
- int alloc_status;
- struct sasem_context *context = NULL;
- int i;
-
- dev_info(&interface->dev, "%s: found Sasem device\n", __func__);
-
-
- dev = usb_get_dev(interface_to_usbdev(interface));
- iface_desc = interface->cur_altsetting;
- num_endpoints = iface_desc->desc.bNumEndpoints;
-
- /*
- * Scan the endpoint list and set:
- * first input endpoint = IR endpoint
- * first output endpoint = VFD endpoint
- */
-
- ir_ep_found = 0;
- vfd_ep_found = 0;
-
- for (i = 0; i < num_endpoints && !(ir_ep_found && vfd_ep_found); ++i) {
-
- struct usb_endpoint_descriptor *ep;
-
- ep = &iface_desc->endpoint [i].desc;
-
- if (!ir_ep_found &&
- usb_endpoint_is_int_in(ep)) {
-
- rx_endpoint = ep;
- ir_ep_found = 1;
- if (debug)
- dev_info(&interface->dev,
- "%s: found IR endpoint\n", __func__);
-
- } else if (!vfd_ep_found &&
- usb_endpoint_is_int_out(ep)) {
- tx_endpoint = ep;
- vfd_ep_found = 1;
- if (debug)
- dev_info(&interface->dev,
- "%s: found VFD endpoint\n", __func__);
- }
- }
-
- /* Input endpoint is mandatory */
- if (!ir_ep_found) {
- dev_err(&interface->dev,
- "%s: no valid input (IR) endpoint found.\n", __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- if (!vfd_ep_found)
- dev_info(&interface->dev,
- "%s: no valid output (VFD) endpoint found.\n",
- __func__);
-
-
- /* Allocate memory */
- alloc_status = 0;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- alloc_status = 1;
- goto alloc_status_switch;
- }
- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
- if (!driver) {
- alloc_status = 2;
- goto alloc_status_switch;
- }
- rbuf = kmalloc(sizeof(*rbuf), GFP_KERNEL);
- if (!rbuf) {
- alloc_status = 3;
- goto alloc_status_switch;
- }
- if (lirc_buffer_init(rbuf, BUF_CHUNK_SIZE, BUF_SIZE)) {
- dev_err(&interface->dev,
- "%s: lirc_buffer_init failed\n", __func__);
- alloc_status = 4;
- goto alloc_status_switch;
- }
- rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rx_urb) {
- alloc_status = 5;
- goto alloc_status_switch;
- }
- if (vfd_ep_found) {
- tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_urb) {
- alloc_status = 6;
- goto alloc_status_switch;
- }
- }
-
- mutex_init(&context->ctx_lock);
-
- strcpy(driver->name, MOD_NAME);
- driver->minor = -1;
- driver->code_length = 64;
- driver->sample_rate = 0;
- driver->features = LIRC_CAN_REC_LIRCCODE;
- driver->data = context;
- driver->rbuf = rbuf;
- driver->set_use_inc = ir_open;
- driver->set_use_dec = ir_close;
- driver->dev = &interface->dev;
- driver->owner = THIS_MODULE;
-
- mutex_lock(&context->ctx_lock);
-
- lirc_minor = lirc_register_driver(driver);
- if (lirc_minor < 0) {
- dev_err(&interface->dev,
- "%s: lirc_register_driver failed\n", __func__);
- alloc_status = 7;
- retval = lirc_minor;
- goto unlock;
- } else
- dev_info(&interface->dev,
- "%s: Registered Sasem driver (minor:%d)\n",
- __func__, lirc_minor);
-
- /* Needed while unregistering! */
- driver->minor = lirc_minor;
-
- context->dev = dev;
- context->dev_present = 1;
- context->rx_endpoint = rx_endpoint;
- context->rx_urb = rx_urb;
- if (vfd_ep_found) {
- context->tx_endpoint = tx_endpoint;
- context->tx_urb = tx_urb;
- context->vfd_contrast = 1000; /* range 0 - 1000 */
- }
- context->driver = driver;
-
- usb_set_intfdata(interface, context);
-
- if (vfd_ep_found) {
-
- if (debug)
- dev_info(&interface->dev,
- "Registering VFD with sysfs\n");
- if (usb_register_dev(interface, &sasem_class))
- /* Not a fatal error, so ignore */
- dev_info(&interface->dev,
- "%s: could not get a minor number for VFD\n",
- __func__);
- }
-
- dev_info(&interface->dev,
- "%s: Sasem device on usb<%d:%d> initialized\n",
- __func__, dev->bus->busnum, dev->devnum);
-unlock:
- mutex_unlock(&context->ctx_lock);
-
-alloc_status_switch:
- switch (alloc_status) {
-
- case 7:
- if (vfd_ep_found)
- usb_free_urb(tx_urb);
- case 6:
- usb_free_urb(rx_urb);
- /* fall-through */
- case 5:
- lirc_buffer_free(rbuf);
- /* fall-through */
- case 4:
- kfree(rbuf);
- /* fall-through */
- case 3:
- kfree(driver);
- /* fall-through */
- case 2:
- kfree(context);
- context = NULL;
- /* fall-through */
- case 1:
- if (retval == 0)
- retval = -ENOMEM;
- }
-
-exit:
- return retval;
-}
-
-/**
- * Callback function for USB core API: disconnect
- */
-static void sasem_disconnect(struct usb_interface *interface)
-{
- struct sasem_context *context;
-
- /* prevent races with ir_open()/vfd_open() */
- mutex_lock(&disconnect_lock);
-
- context = usb_get_intfdata(interface);
- mutex_lock(&context->ctx_lock);
-
- dev_info(&interface->dev, "%s: Sasem device disconnected\n",
- __func__);
-
- usb_set_intfdata(interface, NULL);
- context->dev_present = 0;
-
- /* Stop reception */
- usb_kill_urb(context->rx_urb);
-
- /* Abort ongoing write */
- if (atomic_read(&context->tx.busy)) {
-
- usb_kill_urb(context->tx_urb);
- wait_for_completion(&context->tx.finished);
- }
-
- /* De-register from lirc_dev if IR port is not open */
- if (!context->ir_isopen)
- deregister_from_lirc(context);
-
- usb_deregister_dev(interface, &sasem_class);
-
- mutex_unlock(&context->ctx_lock);
-
- if (!context->ir_isopen && !context->vfd_isopen)
- delete_context(context);
-
- mutex_unlock(&disconnect_lock);
-}
-
-module_usb_driver(sasem_driver);
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
deleted file mode 100644
index c6c3de94adaa..000000000000
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ /dev/null
@@ -1,839 +0,0 @@
-/*
- * LIRC SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
- *
- * sir_ir - Device driver for use with SIR (serial infra red)
- * mode of IrDA on many notebooks.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
- * 2000/09/16 Frank Przybylski <mail@frankprzybylski.de> :
- * added timeout and relaxed pulse detection, removed gap bug
- *
- * 2000/12/15 Christoph Bartelmus <lirc@bartelmus.de> :
- * added support for Tekram Irmate 210 (sending does not work yet,
- * kind of disappointing that nobody was able to implement that
- * before),
- * major clean-up
- *
- * 2001/02/27 Christoph Bartelmus <lirc@bartelmus.de> :
- * added support for StrongARM SA1100 embedded microprocessor
- * parts cut'n'pasted from sa1100_ir.c (C) 2000 Russell King
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/sched/signal.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <linux/fcntl.h>
-#include <linux/platform_device.h>
-
-#include <linux/timer.h>
-
-#include <media/rc-core.h>
-
-/* SECTION: Definitions */
-
-/*** Tekram dongle ***/
-#ifdef LIRC_SIR_TEKRAM
-/* stolen from kernel source */
-/* definitions for Tekram dongle */
-#define TEKRAM_115200 0x00
-#define TEKRAM_57600 0x01
-#define TEKRAM_38400 0x02
-#define TEKRAM_19200 0x03
-#define TEKRAM_9600 0x04
-#define TEKRAM_2400 0x08
-
-#define TEKRAM_PW 0x10 /* Pulse select bit */
-
-/* 10bit * 1s/115200bit in milliseconds = 87ms*/
-#define TIME_CONST (10000000ul/115200ul)
-
-#endif
-
-#ifdef LIRC_SIR_ACTISYS_ACT200L
-static void init_act200(void);
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
-static void init_act220(void);
-#endif
-
-#define PULSE '['
-
-#ifndef LIRC_SIR_TEKRAM
-/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
-#define TIME_CONST (9000000ul/115200ul)
-#endif
-
-
-/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
-#define SIR_TIMEOUT (HZ*5/100)
-
-#ifndef LIRC_ON_SA1100
-#ifndef LIRC_IRQ
-#define LIRC_IRQ 4
-#endif
-#ifndef LIRC_PORT
-/* for external dongles, default to com1 */
-#if defined(LIRC_SIR_ACTISYS_ACT200L) || \
- defined(LIRC_SIR_ACTISYS_ACT220L) || \
- defined(LIRC_SIR_TEKRAM)
-#define LIRC_PORT 0x3f8
-#else
-/* onboard sir ports are typically com3 */
-#define LIRC_PORT 0x3e8
-#endif
-#endif
-
-static int io = LIRC_PORT;
-static int irq = LIRC_IRQ;
-static int threshold = 3;
-#endif
-
-static DEFINE_SPINLOCK(timer_lock);
-static struct timer_list timerlist;
-/* time of last signal change detected */
-static ktime_t last;
-/* time of last UART data ready interrupt */
-static ktime_t last_intr_time;
-static int last_value;
-static struct rc_dev *rcdev;
-
-static struct platform_device *sir_ir_dev;
-
-static DEFINE_SPINLOCK(hardware_lock);
-
-static bool debug;
-
-/* SECTION: Prototypes */
-
-/* Communication with user-space */
-static void add_read_queue(int flag, unsigned long val);
-static int init_chrdev(void);
-/* Hardware */
-static irqreturn_t sir_interrupt(int irq, void *dev_id);
-static void send_space(unsigned long len);
-static void send_pulse(unsigned long len);
-static int init_hardware(void);
-static void drop_hardware(void);
-/* Initialisation */
-static int init_port(void);
-static void drop_port(void);
-
-static inline unsigned int sinp(int offset)
-{
- return inb(io + offset);
-}
-
-static inline void soutp(int offset, int value)
-{
- outb(value, io + offset);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
- while (usecs > MAX_UDELAY_US) {
- udelay(MAX_UDELAY_US);
- usecs -= MAX_UDELAY_US;
- }
- udelay(usecs);
-}
-
-/* SECTION: Communication with user-space */
-static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
- unsigned int count)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < count;) {
- if (tx_buf[i])
- send_pulse(tx_buf[i]);
- i++;
- if (i >= count)
- break;
- if (tx_buf[i])
- send_space(tx_buf[i]);
- i++;
- }
- local_irq_restore(flags);
-
- return count;
-}
-
-static void add_read_queue(int flag, unsigned long val)
-{
- DEFINE_IR_RAW_EVENT(ev);
-
- pr_debug("add flag %d with val %lu\n", flag, val);
-
- /*
- * statistically, pulses are ~TIME_CONST/2 too long. we could
- * maybe make this more exact, but this is good enough
- */
- if (flag) {
- /* pulse */
- if (val > TIME_CONST / 2)
- val -= TIME_CONST / 2;
- else /* should not ever happen */
- val = 1;
- ev.pulse = true;
- } else {
- val += TIME_CONST / 2;
- }
- ev.duration = US_TO_NS(val);
-
- ir_raw_event_store_with_filter(rcdev, &ev);
-}
-
-static int init_chrdev(void)
-{
- rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->tx_ir = sir_tx_ir;
- rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->dev.parent = &sir_ir_dev->dev;
-
- return devm_rc_register_device(&sir_ir_dev->dev, rcdev);
-}
-
-/* SECTION: Hardware */
-static void sir_timeout(unsigned long data)
-{
- /*
- * if last received signal was a pulse, but receiving stopped
- * within the 9 bit frame, we need to finish this pulse and
- * simulate a signal change to from pulse to space. Otherwise
- * upper layers will receive two sequences next time.
- */
-
- unsigned long flags;
- unsigned long pulse_end;
-
- /* avoid interference with interrupt */
- spin_lock_irqsave(&timer_lock, flags);
- if (last_value) {
- /* clear unread bits in UART and restart */
- outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
- /* determine 'virtual' pulse end: */
- pulse_end = min_t(unsigned long,
- ktime_us_delta(last, last_intr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
- add_read_queue(last_value, pulse_end);
- last_value = 0;
- last = last_intr_time;
- }
- spin_unlock_irqrestore(&timer_lock, flags);
- ir_raw_event_handle(rcdev);
-}
-
-static irqreturn_t sir_interrupt(int irq, void *dev_id)
-{
- unsigned char data;
- ktime_t curr_time;
- static unsigned long delt;
- unsigned long deltintr;
- unsigned long flags;
- int iir, lsr;
-
- while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
- switch (iir&UART_IIR_ID) { /* FIXME toto treba preriedit */
- case UART_IIR_MSI:
- (void) inb(io + UART_MSR);
- break;
- case UART_IIR_RLSI:
- (void) inb(io + UART_LSR);
- break;
- case UART_IIR_THRI:
-#if 0
- if (lsr & UART_LSR_THRE) /* FIFO is empty */
- outb(data, io + UART_TX)
-#endif
- break;
- case UART_IIR_RDI:
- /* avoid interference with timer */
- spin_lock_irqsave(&timer_lock, flags);
- do {
- del_timer(&timerlist);
- data = inb(io + UART_RX);
- curr_time = ktime_get();
- delt = min_t(unsigned long,
- ktime_us_delta(last, curr_time),
- IR_MAX_DURATION);
- deltintr = min_t(unsigned long,
- ktime_us_delta(last_intr_time,
- curr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
- deltintr, (int)data);
- /*
- * if nothing came in last X cycles,
- * it was gap
- */
- if (deltintr > TIME_CONST * threshold) {
- if (last_value) {
- dev_dbg(&sir_ir_dev->dev, "GAP\n");
- /* simulate signal change */
- add_read_queue(last_value,
- delt -
- deltintr);
- last_value = 0;
- last = last_intr_time;
- delt = deltintr;
- }
- }
- data = 1;
- if (data ^ last_value) {
- /*
- * deltintr > 2*TIME_CONST, remember?
- * the other case is timeout
- */
- add_read_queue(last_value,
- delt-TIME_CONST);
- last_value = data;
- last = curr_time;
- last = ktime_sub_us(last,
- TIME_CONST);
- }
- last_intr_time = curr_time;
- if (data) {
- /*
- * start timer for end of
- * sequence detection
- */
- timerlist.expires = jiffies +
- SIR_TIMEOUT;
- add_timer(&timerlist);
- }
-
- lsr = inb(io + UART_LSR);
- } while (lsr & UART_LSR_DR); /* data ready */
- spin_unlock_irqrestore(&timer_lock, flags);
- break;
- default:
- break;
- }
- }
- ir_raw_event_handle(rcdev);
- return IRQ_RETVAL(IRQ_HANDLED);
-}
-
-static void send_space(unsigned long len)
-{
- safe_udelay(len);
-}
-
-static void send_pulse(unsigned long len)
-{
- long bytes_out = len / TIME_CONST;
-
- if (bytes_out == 0)
- bytes_out++;
-
- while (bytes_out--) {
- outb(PULSE, io + UART_TX);
- /* FIXME treba seriozne cakanie z char/serial.c */
- while (!(inb(io + UART_LSR) & UART_LSR_THRE))
- ;
- }
-}
-
-static int init_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
- /* reset UART */
-#if defined(LIRC_SIR_TEKRAM)
- /* disable FIFO */
- soutp(UART_FCR,
- UART_FCR_CLEAR_RCVR|
- UART_FCR_CLEAR_XMIT|
- UART_FCR_TRIGGER_1);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* First of all, disable all interrupts */
- soutp(UART_IER, sinp(UART_IER) &
- (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set divisor to 12 => 9600 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* power supply */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- safe_udelay(50*1000);
-
- /* -DTR low -> reset PIC */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(1*1000);
-
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(100);
-
-
- /* -RTS low -> send control byte */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- udelay(7);
- soutp(UART_TX, TEKRAM_115200|TEKRAM_PW);
-
- /* one byte takes ~1042 usec to transmit at 9600,8N1 */
- udelay(1500);
-
- /* back to normal operation */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(50);
-
- udelay(1500);
-
- /* read previous control byte */
- pr_info("0x%02x\n", sinp(UART_RX));
-
- /* Set DLAB 1. */
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0, 8 Bit */
- soutp(UART_LCR, UART_LCR_WLEN8);
- /* enable interrupts */
- soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI);
-#else
- outb(0, io + UART_MCR);
- outb(0, io + UART_IER);
- /* init UART */
- /* set DLAB, speed = 115200 */
- outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
- outb(1, io + UART_DLL); outb(0, io + UART_DLM);
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
- outb(UART_LCR_WLEN7, io + UART_LCR);
- /* FIFO operation */
- outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
- /* interrupts */
- /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
- outb(UART_IER_RDI, io + UART_IER);
- /* turn on UART */
- outb(UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2, io + UART_MCR);
-#ifdef LIRC_SIR_ACTISYS_ACT200L
- init_act200();
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
- init_act220();
-#endif
-#endif
- spin_unlock_irqrestore(&hardware_lock, flags);
- return 0;
-}
-
-static void drop_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /* turn off interrupts */
- outb(0, io + UART_IER);
-
- spin_unlock_irqrestore(&hardware_lock, flags);
-}
-
-/* SECTION: Initialisation */
-
-static int init_port(void)
-{
- int retval;
-
- /* get I/O port access and IRQ line */
- if (!request_region(io, 8, KBUILD_MODNAME)) {
- pr_err("i/o port 0x%.4x already in use.\n", io);
- return -EBUSY;
- }
- retval = request_irq(irq, sir_interrupt, 0,
- KBUILD_MODNAME, NULL);
- if (retval < 0) {
- release_region(io, 8);
- pr_err("IRQ %d already in use.\n", irq);
- return retval;
- }
- pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
-
- setup_timer(&timerlist, sir_timeout, 0);
-
- return 0;
-}
-
-static void drop_port(void)
-{
- free_irq(irq, NULL);
- del_timer_sync(&timerlist);
- release_region(io, 8);
-}
-
-#ifdef LIRC_SIR_ACTISYS_ACT200L
-/* Crystal/Cirrus CS8130 IR transceiver, used in Actisys Act200L dongle */
-/* some code borrowed from Linux IRDA driver */
-
-/* Register 0: Control register #1 */
-#define ACT200L_REG0 0x00
-#define ACT200L_TXEN 0x01 /* Enable transmitter */
-#define ACT200L_RXEN 0x02 /* Enable receiver */
-#define ACT200L_ECHO 0x08 /* Echo control chars */
-
-/* Register 1: Control register #2 */
-#define ACT200L_REG1 0x10
-#define ACT200L_LODB 0x01 /* Load new baud rate count value */
-#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
-
-/* Register 3: Transmit mode register #2 */
-#define ACT200L_REG3 0x30
-#define ACT200L_B0 0x01 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */
-#define ACT200L_B1 0x02 /* DataBits, 0=6, 1=7, 2=8, 3=9(8P) */
-#define ACT200L_CHSY 0x04 /* StartBit Synced 0=bittime, 1=startbit */
-
-/* Register 4: Output Power register */
-#define ACT200L_REG4 0x40
-#define ACT200L_OP0 0x01 /* Enable LED1C output */
-#define ACT200L_OP1 0x02 /* Enable LED2C output */
-#define ACT200L_BLKR 0x04
-
-/* Register 5: Receive Mode register */
-#define ACT200L_REG5 0x50
-#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
- /*.. other various IRDA bit modes, and TV remote modes..*/
-
-/* Register 6: Receive Sensitivity register #1 */
-#define ACT200L_REG6 0x60
-#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
-#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
-
-/* Register 7: Receive Sensitivity register #2 */
-#define ACT200L_REG7 0x70
-#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
-
-/* Register 8,9: Baud Rate Divider register #1,#2 */
-#define ACT200L_REG8 0x80
-#define ACT200L_REG9 0x90
-
-#define ACT200L_2400 0x5f
-#define ACT200L_9600 0x17
-#define ACT200L_19200 0x0b
-#define ACT200L_38400 0x05
-#define ACT200L_57600 0x03
-#define ACT200L_115200 0x01
-
-/* Register 13: Control register #3 */
-#define ACT200L_REG13 0xd0
-#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
-
-/* Register 15: Status register */
-#define ACT200L_REG15 0xf0
-
-/* Register 21: Control register #4 */
-#define ACT200L_REG21 0x50
-#define ACT200L_EXCK 0x02 /* Disable clock output driver */
-#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
-
-static void init_act200(void)
-{
- int i;
- __u8 control[] = {
- ACT200L_REG15,
- ACT200L_REG13 | ACT200L_SHDW,
- ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
- ACT200L_REG13,
- ACT200L_REG7 | ACT200L_ENPOS,
- ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
- ACT200L_REG5 | ACT200L_RWIDL,
- ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
- ACT200L_REG3 | ACT200L_B0,
- ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN,
- ACT200L_REG8 | (ACT200L_115200 & 0x0f),
- ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f),
- ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE
- };
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN8);
-
- /* Set divisor to 12 => 9600 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, UART_LCR_WLEN8);
- /* Set divisor to 12 => 9600 Baud */
-
- /* power supply */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- for (i = 0; i < 50; i++)
- safe_udelay(1000);
-
- /* Reset the dongle : set RTS low for 25 ms */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- for (i = 0; i < 25; i++)
- udelay(1000);
-
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(100);
-
- /* Clear DTR and set RTS to enter command mode */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(7);
-
- /* send out the control register settings for 115K 7N1 SIR operation */
- for (i = 0; i < sizeof(control); i++) {
- soutp(UART_TX, control[i]);
- /* one byte takes ~1042 usec to transmit at 9600,8N1 */
- udelay(1500);
- }
-
- /* back to normal operation */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- udelay(50);
-
- udelay(1500);
- soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0. */
- soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
- /* Set DLAB 0, 7 Bit */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* enable interrupts */
- soutp(UART_IER, sinp(UART_IER)|UART_IER_RDI);
-}
-#endif
-
-#ifdef LIRC_SIR_ACTISYS_ACT220L
-/*
- * Derived from linux IrDA driver (net/irda/actisys.c)
- * Drop me a mail for any kind of comment: maxx@spaceboyz.net
- */
-
-void init_act220(void)
-{
- int i;
-
- /* DLAB 1 */
- soutp(UART_LCR, UART_LCR_DLAB|UART_LCR_WLEN7);
-
- /* 9600 baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 12);
-
- /* DLAB 0 */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* reset the dongle, set DTR low for 10us */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_OUT2);
- udelay(10);
-
- /* back to normal (still 9600) */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2);
-
- /*
- * send RTS pulses until we reach 115200
- * i hope this is really the same for act220l/act220l+
- */
- for (i = 0; i < 3; i++) {
- udelay(10);
- /* set RTS low for 10 us */
- soutp(UART_MCR, UART_MCR_DTR|UART_MCR_OUT2);
- udelay(10);
- /* set RTS high for 10 us */
- soutp(UART_MCR, UART_MCR_RTS|UART_MCR_DTR|UART_MCR_OUT2);
- }
-
- /* back to normal operation */
- udelay(1500); /* better safe than sorry ;) */
-
- /* Set DLAB 1. */
- soutp(UART_LCR, UART_LCR_DLAB | UART_LCR_WLEN7);
-
- /* Set divisor to 1 => 115200 Baud */
- soutp(UART_DLM, 0);
- soutp(UART_DLL, 1);
-
- /* Set DLAB 0, 7 Bit */
- /* The dongle doesn't seem to have any problems with operation at 7N1 */
- soutp(UART_LCR, UART_LCR_WLEN7);
-
- /* enable interrupts */
- soutp(UART_IER, UART_IER_RDI);
-}
-#endif
-
-static int init_sir_ir(void)
-{
- int retval;
-
- retval = init_port();
- if (retval < 0)
- return retval;
- init_hardware();
- pr_info("Installed.\n");
- return 0;
-}
-
-static int sir_ir_probe(struct platform_device *dev)
-{
- return 0;
-}
-
-static int sir_ir_remove(struct platform_device *dev)
-{
- return 0;
-}
-
-static struct platform_driver sir_ir_driver = {
- .probe = sir_ir_probe,
- .remove = sir_ir_remove,
- .driver = {
- .name = "sir_ir",
- },
-};
-
-static int __init sir_ir_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&sir_ir_driver);
- if (retval) {
- pr_err("Platform driver register failed!\n");
- return -ENODEV;
- }
-
- sir_ir_dev = platform_device_alloc("sir_ir", 0);
- if (!sir_ir_dev) {
- pr_err("Platform device alloc failed!\n");
- retval = -ENOMEM;
- goto pdev_alloc_fail;
- }
-
- retval = platform_device_add(sir_ir_dev);
- if (retval) {
- pr_err("Platform device add failed!\n");
- retval = -ENODEV;
- goto pdev_add_fail;
- }
-
- retval = init_chrdev();
- if (retval < 0)
- goto fail;
-
- retval = init_sir_ir();
- if (retval)
- goto fail;
-
- return 0;
-
-fail:
- platform_device_del(sir_ir_dev);
-pdev_add_fail:
- platform_device_put(sir_ir_dev);
-pdev_alloc_fail:
- platform_driver_unregister(&sir_ir_driver);
- return retval;
-}
-
-static void __exit sir_ir_exit(void)
-{
- drop_hardware();
- drop_port();
- platform_device_unregister(sir_ir_dev);
- platform_driver_unregister(&sir_ir_driver);
- pr_info("Uninstalled.\n");
-}
-
-module_init(sir_ir_init);
-module_exit(sir_ir_exit);
-
-#ifdef LIRC_SIR_TEKRAM
-MODULE_DESCRIPTION("Infrared receiver driver for Tekram Irmate 210");
-MODULE_AUTHOR("Christoph Bartelmus");
-#elif defined(LIRC_SIR_ACTISYS_ACT200L)
-MODULE_DESCRIPTION("LIRC driver for Actisys Act200L");
-MODULE_AUTHOR("Karl Bongers");
-#elif defined(LIRC_SIR_ACTISYS_ACT220L)
-MODULE_DESCRIPTION("LIRC driver for Actisys Act220L(+)");
-MODULE_AUTHOR("Jan Roemisch");
-#else
-MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
-MODULE_AUTHOR("Milan Pikula");
-#endif
-MODULE_LICENSE("GPL");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(threshold, int, S_IRUGO);
-MODULE_PARM_DESC(threshold, "space detection threshold (3)");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index e4a533b6beb3..8ce1db04414a 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1475,7 +1475,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir = get_ir_device_by_adapter(adap);
if (ir == NULL) {
ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
- if (ir == NULL) {
+ if (!ir) {
ret = -ENOMEM;
goto out_no_ir;
}
@@ -1515,7 +1515,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Set up a struct IR_tx instance */
tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
- if (tx == NULL) {
+ if (!tx) {
ret = -ENOMEM;
goto out_put_xx;
}
@@ -1559,7 +1559,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Set up a struct IR_rx instance */
rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
- if (rx == NULL) {
+ if (!rx) {
ret = -ENOMEM;
goto out_put_xx;
}
@@ -1597,7 +1597,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
- goto out_put_xx;
+ goto out_put_tx;
}
/* Proceed only if the Tx client is also ready */
@@ -1637,6 +1637,7 @@ out_ok:
out_put_xx:
if (rx != NULL)
put_ir_rx(rx, true);
+out_put_tx:
if (tx != NULL)
put_ir_tx(tx, true);
out_put_ir:
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index f71d5f2f179f..f6acc541e8a2 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -1268,7 +1268,7 @@ static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
snprintf(name, sizeof(name), "CSI2%s", subname);
snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, csi2);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/staging/media/omap4iss/iss_ipipe.c b/drivers/staging/media/omap4iss/iss_ipipe.c
index d38782e8e84c..d86ef8a031f2 100644
--- a/drivers/staging/media/omap4iss/iss_ipipe.c
+++ b/drivers/staging/media/omap4iss/iss_ipipe.c
@@ -508,7 +508,7 @@ static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
v4l2_subdev_init(sd, &ipipe_v4l2_ops);
sd->internal_ops = &ipipe_v4l2_internal_ops;
strlcpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, ipipe);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/staging/media/omap4iss/iss_ipipeif.c b/drivers/staging/media/omap4iss/iss_ipipeif.c
index 23de8330731d..cb88b2bd0d82 100644
--- a/drivers/staging/media/omap4iss/iss_ipipeif.c
+++ b/drivers/staging/media/omap4iss/iss_ipipeif.c
@@ -739,7 +739,7 @@ static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
sd->internal_ops = &ipipeif_v4l2_internal_ops;
strlcpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, ipipeif);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/staging/media/omap4iss/iss_resizer.c b/drivers/staging/media/omap4iss/iss_resizer.c
index f1d352c711d5..4bbfa20b3c38 100644
--- a/drivers/staging/media/omap4iss/iss_resizer.c
+++ b/drivers/staging/media/omap4iss/iss_resizer.c
@@ -782,7 +782,7 @@ static int resizer_init_entities(struct iss_resizer_device *resizer)
v4l2_subdev_init(sd, &resizer_v4l2_ops);
sd->internal_ops = &resizer_v4l2_internal_ops;
strlcpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
- sd->grp_id = 1 << 16; /* group ID for iss subdevs */
+ sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, resizer);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index bb0e3b4a4558..0bac58241a22 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -128,7 +128,8 @@ static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
pix->width = mbus->width;
pix->height = mbus->height;
- /* Skip the last format in the loop so that it will be selected if no
+ /*
+ * Skip the last format in the loop so that it will be selected if no
* match is found.
*/
for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
@@ -138,7 +139,8 @@ static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
- /* Clamp the requested bytes per line value. If the maximum bytes per
+ /*
+ * Clamp the requested bytes per line value. If the maximum bytes per
* line value is zero, the module doesn't support user configurable line
* sizes. Override the requested value with the minimum in that case.
*/
@@ -172,7 +174,8 @@ static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
mbus->width = pix->width;
mbus->height = pix->height;
- /* Skip the last format in the loop so that it will be selected if no
+ /*
+ * Skip the last format in the loop so that it will be selected if no
* match is found.
*/
for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
@@ -298,7 +301,8 @@ iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
static int iss_video_queue_setup(struct vb2_queue *vq,
unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], struct device *alloc_devs[])
+ unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
struct iss_video *video = vfh->video;
@@ -360,7 +364,8 @@ static void iss_video_buf_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&video->qlock, flags);
- /* Mark the buffer is faulty and give it back to the queue immediately
+ /*
+ * Mark the buffer is faulty and give it back to the queue immediately
* if the video node has registered an error. vb2 will perform the same
* check when preparing the buffer, but that is inherently racy, so we
* need to handle the race condition with an authoritative check here.
@@ -443,7 +448,8 @@ struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
buf->vb.vb2_buf.timestamp = ktime_get_ns();
- /* Do frame number propagation only if this is the output video node.
+ /*
+ * Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
* incremented here if H3A is not active.
* Note: There is no guarantee that the output buffer will finish
@@ -605,7 +611,8 @@ iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
mutex_lock(&video->mutex);
- /* Fill the bytesperline and sizeimage fields by converting to media bus
+ /*
+ * Fill the bytesperline and sizeimage fields by converting to media bus
* format and back to pixel format.
*/
iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
@@ -678,8 +685,9 @@ iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
if (subdev == NULL)
return -EINVAL;
- /* Try the get selection operation first and fallback to get format if not
- * implemented.
+ /*
+ * Try the get selection operation first and fallback to get format if
+ * not implemented.
*/
sdsel.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
@@ -867,7 +875,8 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
mutex_lock(&video->stream_lock);
- /* Start streaming on the pipeline. No link touching an entity in the
+ /*
+ * Start streaming on the pipeline. No link touching an entity in the
* pipeline can be activated or deactivated once streaming is started.
*/
pipe = entity->pipe
@@ -895,7 +904,8 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
while ((entity = media_graph_walk_next(&graph)))
media_entity_enum_set(&pipe->ent_enum, entity);
- /* Verify that the currently configured format matches the output of
+ /*
+ * Verify that the currently configured format matches the output of
* the connected subdev.
*/
ret = iss_video_check_format(video, vfh);
@@ -905,7 +915,8 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
video->bpl_padding = ret;
video->bpl_value = vfh->format.fmt.pix.bytesperline;
- /* Find the ISS video node connected at the far end of the pipeline and
+ /*
+ * Find the ISS video node connected at the far end of the pipeline and
* update the pipeline.
*/
far_end = iss_video_far_end(video);
@@ -930,7 +941,8 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
pipe->state |= state;
spin_unlock_irqrestore(&pipe->lock, flags);
- /* Set the maximum time per frame as the value requested by userspace.
+ /*
+ * Set the maximum time per frame as the value requested by userspace.
* This is a soft limit that can be overridden if the hardware doesn't
* support the request limit.
*/
@@ -946,7 +958,8 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_iss_video_check_format;
- /* In sensor-to-memory mode, the stream can be started synchronously
+ /*
+ * In sensor-to-memory mode, the stream can be started synchronously
* to the stream on command. In memory-to-memory mode, it will be
* started when buffers are queued on both the input and output.
*/
diff --git a/drivers/staging/media/platform/bcm2835/Kconfig b/drivers/staging/media/platform/bcm2835/Kconfig
deleted file mode 100644
index 7c5245dc3225..000000000000
--- a/drivers/staging/media/platform/bcm2835/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config VIDEO_BCM2835
- tristate "Broadcom BCM2835 camera driver"
- depends on VIDEO_V4L2 && (ARCH_BCM2835 || COMPILE_TEST)
- depends on BCM2835_VCHIQ
- depends on ARM
- select VIDEOBUF2_VMALLOC
- help
- Say Y here to enable camera host interface devices for
- Broadcom BCM2835 SoC. This operates over the VCHIQ interface
- to a service running on VideoCore.
diff --git a/drivers/staging/media/platform/bcm2835/TODO b/drivers/staging/media/platform/bcm2835/TODO
deleted file mode 100644
index 61a509992b9a..000000000000
--- a/drivers/staging/media/platform/bcm2835/TODO
+++ /dev/null
@@ -1,39 +0,0 @@
-1) Support dma-buf memory management.
-
-In order to zero-copy import camera images into the 3D or display
-pipelines, we need to export our buffers through dma-buf so that the
-vc4 driver can import them. This may involve bringing in the VCSM
-driver (which allows long-term management of regions of memory in the
-space that the VPU reserved and Linux otherwise doesn't have access
-to), or building some new protocol that allows VCSM-style management
-of Linux's CMA memory.
-
-2) Avoid extra copies for padding of images.
-
-We expose V4L2_PIX_FMT_* formats that have a specified stride/height
-padding in the V4L2 spec, but that padding doesn't match what the
-hardware can do. If we exposed the native padding requirements
-through the V4L2 "multiplanar" formats, the firmware would have one
-less copy it needed to do.
-
-3) Port to ARM64
-
-The bulk_receive() does some manual cache flushing that are 32-bit ARM
-only, which we should convert to proper cross-platform APIs.
-
-4) Convert to be a platform driver.
-
-Right now when the module probes, it tries to initialize VCHI and
-errors out if it wasn't ready yet. If bcm2835-v4l2 was built in, then
-VCHI generally isn't ready because it depends on both the firmware and
-mailbox drivers having already loaded.
-
-We should have VCHI create a platform device once it's initialized,
-and have this driver bind to it, so that we automatically load the
-v4l2 module after VCHI loads.
-
-5) Drop the gstreamer workaround.
-
-This was a temporary workaround for a bug that was fixed mid-2014, and
-we should remove it before stabilizing the driver.
-
diff --git a/drivers/staging/media/platform/bcm2835/bcm2835-camera.c b/drivers/staging/media/platform/bcm2835/bcm2835-camera.c
deleted file mode 100644
index ca15a698e018..000000000000
--- a/drivers/staging/media/platform/bcm2835/bcm2835-camera.c
+++ /dev/null
@@ -1,2024 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <media/videobuf2-vmalloc.h>
-#include <media/videobuf2-dma-contig.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-common.h>
-#include <linux/delay.h>
-
-#include "mmal-common.h"
-#include "mmal-encodings.h"
-#include "mmal-vchiq.h"
-#include "mmal-msg.h"
-#include "mmal-parameters.h"
-#include "bcm2835-camera.h"
-
-#define BM2835_MMAL_VERSION "0.0.2"
-#define BM2835_MMAL_MODULE_NAME "bcm2835-v4l2"
-#define MIN_WIDTH 32
-#define MIN_HEIGHT 32
-#define MIN_BUFFER_SIZE (80 * 1024)
-
-#define MAX_VIDEO_MODE_WIDTH 1280
-#define MAX_VIDEO_MODE_HEIGHT 720
-
-#define MAX_BCM2835_CAMERAS 2
-
-MODULE_DESCRIPTION("Broadcom 2835 MMAL video capture");
-MODULE_AUTHOR("Vincent Sanders");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(BM2835_MMAL_VERSION);
-
-int bcm2835_v4l2_debug;
-module_param_named(debug, bcm2835_v4l2_debug, int, 0644);
-MODULE_PARM_DESC(bcm2835_v4l2_debug, "Debug level 0-2");
-
-#define UNSET (-1)
-static int video_nr[] = {[0 ... (MAX_BCM2835_CAMERAS - 1)] = UNSET };
-module_param_array(video_nr, int, NULL, 0644);
-MODULE_PARM_DESC(video_nr, "videoX start numbers, -1 is autodetect");
-
-static int max_video_width = MAX_VIDEO_MODE_WIDTH;
-static int max_video_height = MAX_VIDEO_MODE_HEIGHT;
-module_param(max_video_width, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
-module_param(max_video_height, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
-
-/* Gstreamer bug https://bugzilla.gnome.org/show_bug.cgi?id=726521
- * v4l2src does bad (and actually wrong) things when the vidioc_enum_framesizes
- * function says type V4L2_FRMSIZE_TYPE_STEPWISE, which we do by default.
- * It's happier if we just don't say anything at all, when it then
- * sets up a load of defaults that it thinks might work.
- * If gst_v4l2src_is_broken is non-zero, then we remove the function from
- * our function table list (actually switch to an alternate set, but same
- * result).
- */
-static int gst_v4l2src_is_broken;
-module_param(gst_v4l2src_is_broken, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(gst_v4l2src_is_broken, "If non-zero, enable workaround for Gstreamer");
-
-/* global device data array */
-static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
-
-#define FPS_MIN 1
-#define FPS_MAX 90
-
-/* timeperframe: min/max and default */
-static const struct v4l2_fract
- tpf_min = {.numerator = 1, .denominator = FPS_MAX},
- tpf_max = {.numerator = 1, .denominator = FPS_MIN},
- tpf_default = {.numerator = 1000, .denominator = 30000};
-
-/* video formats */
-static struct mmal_fmt formats[] = {
- {
- .name = "4:2:0, planar, YUV",
- .fourcc = V4L2_PIX_FMT_YUV420,
- .flags = 0,
- .mmal = MMAL_ENCODING_I420,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .flags = 0,
- .mmal = MMAL_ENCODING_YUYV,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "RGB24 (LE)",
- .fourcc = V4L2_PIX_FMT_RGB24,
- .flags = 0,
- .mmal = MMAL_ENCODING_RGB24,
- .depth = 24,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 3,
- },
- {
- .name = "JPEG",
- .fourcc = V4L2_PIX_FMT_JPEG,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_JPEG,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_IMAGE_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "H264",
- .fourcc = V4L2_PIX_FMT_H264,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_H264,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "MJPEG",
- .fourcc = V4L2_PIX_FMT_MJPEG,
- .flags = V4L2_FMT_FLAG_COMPRESSED,
- .mmal = MMAL_ENCODING_MJPEG,
- .depth = 8,
- .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
- .ybbp = 0,
- },
- {
- .name = "4:2:2, packed, YVYU",
- .fourcc = V4L2_PIX_FMT_YVYU,
- .flags = 0,
- .mmal = MMAL_ENCODING_YVYU,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:2, packed, VYUY",
- .fourcc = V4L2_PIX_FMT_VYUY,
- .flags = 0,
- .mmal = MMAL_ENCODING_VYUY,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:2, packed, UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .flags = 0,
- .mmal = MMAL_ENCODING_UYVY,
- .depth = 16,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 2,
- },
- {
- .name = "4:2:0, planar, NV12",
- .fourcc = V4L2_PIX_FMT_NV12,
- .flags = 0,
- .mmal = MMAL_ENCODING_NV12,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "RGB24 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR24,
- .flags = 0,
- .mmal = MMAL_ENCODING_BGR24,
- .depth = 24,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 3,
- },
- {
- .name = "4:2:0, planar, YVU",
- .fourcc = V4L2_PIX_FMT_YVU420,
- .flags = 0,
- .mmal = MMAL_ENCODING_YV12,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "4:2:0, planar, NV21",
- .fourcc = V4L2_PIX_FMT_NV21,
- .flags = 0,
- .mmal = MMAL_ENCODING_NV21,
- .depth = 12,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 1,
- },
- {
- .name = "RGB32 (BE)",
- .fourcc = V4L2_PIX_FMT_BGR32,
- .flags = 0,
- .mmal = MMAL_ENCODING_BGRA,
- .depth = 32,
- .mmal_component = MMAL_COMPONENT_CAMERA,
- .ybbp = 4,
- },
-};
-
-static struct mmal_fmt *get_format(struct v4l2_format *f)
-{
- struct mmal_fmt *fmt;
- unsigned int k;
-
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
- break;
- }
-
- if (k == ARRAY_SIZE(formats))
- return NULL;
-
- return &formats[k];
-}
-
-/* ------------------------------------------------------------------
- Videobuf queue operations
- ------------------------------------------------------------------*/
-
-static int queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_ctxs[])
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
- unsigned long size;
-
- /* refuse queue setup if port is not configured */
- if (dev->capture.port == NULL) {
- v4l2_err(&dev->v4l2_dev,
- "%s: capture port not configured\n", __func__);
- return -EINVAL;
- }
-
- size = dev->capture.port->current_buffer.size;
- if (size == 0) {
- v4l2_err(&dev->v4l2_dev,
- "%s: capture port buffer size is zero\n", __func__);
- return -EINVAL;
- }
-
- if (*nbuffers < (dev->capture.port->current_buffer.num + 2))
- *nbuffers = (dev->capture.port->current_buffer.num + 2);
-
- *nplanes = 1;
-
- sizes[0] = size;
-
- /*
- * videobuf2-vmalloc allocator is context-less so no need to set
- * alloc_ctxs array.
- */
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
- __func__, dev);
-
- return 0;
-}
-
-static int buffer_prepare(struct vb2_buffer *vb)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
- __func__, dev);
-
- BUG_ON(dev->capture.port == NULL);
- BUG_ON(dev->capture.fmt == NULL);
-
- size = dev->capture.stride * dev->capture.height;
- if (vb2_plane_size(vb, 0) < size) {
- v4l2_err(&dev->v4l2_dev,
- "%s data will not fit into plane (%lu < %lu)\n",
- __func__, vb2_plane_size(vb, 0), size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static inline bool is_capturing(struct bm2835_mmal_dev *dev)
-{
- return dev->capture.camera_port ==
- &dev->
- component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
-}
-
-static void buffer_cb(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- int status,
- struct mmal_buffer *buf,
- unsigned long length, u32 mmal_flags, s64 dts, s64 pts)
-{
- struct bm2835_mmal_dev *dev = port->cb_ctx;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: status:%d, buf:%p, length:%lu, flags %u, pts %lld\n",
- __func__, status, buf, length, mmal_flags, pts);
-
- if (status != 0) {
- /* error in transfer */
- if (buf != NULL) {
- /* there was a buffer with the error so return it */
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- }
- return;
- } else if (length == 0) {
- /* stream ended */
- if (buf != NULL) {
- /* this should only ever happen if the port is
- * disabled and there are buffers still queued
- */
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- pr_debug("Empty buffer");
- } else if (dev->capture.frame_count) {
- /* grab another frame */
- if (is_capturing(dev)) {
- pr_debug("Grab another frame");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.
- camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.
- frame_count,
- sizeof(dev->capture.frame_count));
- }
- } else {
- /* signal frame completion */
- complete(&dev->capture.frame_cmplt);
- }
- } else {
- if (dev->capture.frame_count) {
- if (dev->capture.vc_start_timestamp != -1 &&
- pts != 0) {
- struct timeval timestamp;
- s64 runtime_us = pts -
- dev->capture.vc_start_timestamp;
- u32 div = 0;
- u32 rem = 0;
-
- div =
- div_u64_rem(runtime_us, USEC_PER_SEC, &rem);
- timestamp.tv_sec =
- dev->capture.kernel_start_ts.tv_sec + div;
- timestamp.tv_usec =
- dev->capture.kernel_start_ts.tv_usec + rem;
-
- if (timestamp.tv_usec >=
- USEC_PER_SEC) {
- timestamp.tv_sec++;
- timestamp.tv_usec -=
- USEC_PER_SEC;
- }
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Convert start time %d.%06d and %llu "
- "with offset %llu to %d.%06d\n",
- (int)dev->capture.kernel_start_ts.
- tv_sec,
- (int)dev->capture.kernel_start_ts.
- tv_usec,
- dev->capture.vc_start_timestamp, pts,
- (int)timestamp.tv_sec,
- (int)timestamp.tv_usec);
- buf->vb.vb2_buf.timestamp = timestamp.tv_sec * 1000000000ULL +
- timestamp.tv_usec * 1000ULL;
- } else {
- buf->vb.vb2_buf.timestamp = ktime_get_ns();
- }
-
- vb2_set_plane_payload(&buf->vb.vb2_buf, 0, length);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
-
- if (mmal_flags & MMAL_BUFFER_HEADER_FLAG_EOS &&
- is_capturing(dev)) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Grab another frame as buffer has EOS");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.
- camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.
- frame_count,
- sizeof(dev->capture.frame_count));
- }
- } else {
- /* signal frame completion */
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
- complete(&dev->capture.frame_cmplt);
- }
- }
-}
-
-static int enable_camera(struct bm2835_mmal_dev *dev)
-{
- int ret;
-
- if (!dev->camera_use_count) {
- ret = vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[MMAL_COMPONENT_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
- sizeof(dev->camera_num));
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "Failed setting camera num, ret %d\n", ret);
- return -EINVAL;
- }
-
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[MMAL_COMPONENT_CAMERA]);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "Failed enabling camera, ret %d\n", ret);
- return -EINVAL;
- }
- }
- dev->camera_use_count++;
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev, "enabled camera (refcount %d)\n",
- dev->camera_use_count);
- return 0;
-}
-
-static int disable_camera(struct bm2835_mmal_dev *dev)
-{
- int ret;
-
- if (!dev->camera_use_count) {
- v4l2_err(&dev->v4l2_dev,
- "Disabled the camera when already disabled\n");
- return -EINVAL;
- }
- dev->camera_use_count--;
- if (!dev->camera_use_count) {
- unsigned int i = 0xFFFFFFFF;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Disabling camera\n");
- ret =
- vchiq_mmal_component_disable(
- dev->instance,
- dev->component[MMAL_COMPONENT_CAMERA]);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "Failed disabling camera, ret %d\n", ret);
- return -EINVAL;
- }
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[MMAL_COMPONENT_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &i,
- sizeof(i));
- }
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Camera refcount now %d\n", dev->camera_use_count);
- return 0;
-}
-
-static void buffer_queue(struct vb2_buffer *vb)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
- struct mmal_buffer *buf = container_of(vb2, struct mmal_buffer, vb);
- int ret;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: dev:%p buf:%p\n", __func__, dev, buf);
-
- buf->buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
- buf->buffer_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
-
- ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port, buf);
- if (ret < 0)
- v4l2_err(&dev->v4l2_dev, "%s: error submitting buffer\n",
- __func__);
-}
-
-static int start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
- int ret;
- int parameter_size;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
- __func__, dev);
-
- /* ensure a format has actually been set */
- if (dev->capture.port == NULL)
- return -EINVAL;
-
- if (enable_camera(dev) < 0) {
- v4l2_err(&dev->v4l2_dev, "Failed to enable camera\n");
- return -EINVAL;
- }
-
- /*init_completion(&dev->capture.frame_cmplt); */
-
- /* enable frame capture */
- dev->capture.frame_count = 1;
-
- /* if the preview is not already running, wait for a few frames for AGC
- * to settle down.
- */
- if (!dev->component[MMAL_COMPONENT_PREVIEW]->enabled)
- msleep(300);
-
- /* enable the connection from camera to encoder (if applicable) */
- if (dev->capture.camera_port != dev->capture.port
- && dev->capture.camera_port) {
- ret = vchiq_mmal_port_enable(dev->instance,
- dev->capture.camera_port, NULL);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to enable encode tunnel - error %d\n",
- ret);
- return -1;
- }
- }
-
- /* Get VC timestamp at this point in time */
- parameter_size = sizeof(dev->capture.vc_start_timestamp);
- if (vchiq_mmal_port_parameter_get(dev->instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_SYSTEM_TIME,
- &dev->capture.vc_start_timestamp,
- &parameter_size)) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to get VC start time - update your VC f/w\n");
-
- /* Flag to indicate just to rely on kernel timestamps */
- dev->capture.vc_start_timestamp = -1;
- } else
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Start time %lld size %d\n",
- dev->capture.vc_start_timestamp, parameter_size);
-
- v4l2_get_timestamp(&dev->capture.kernel_start_ts);
-
- /* enable the camera port */
- dev->capture.port->cb_ctx = dev;
- ret =
- vchiq_mmal_port_enable(dev->instance, dev->capture.port, buffer_cb);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "Failed to enable capture port - error %d. "
- "Disabling camera port again\n", ret);
-
- vchiq_mmal_port_disable(dev->instance,
- dev->capture.camera_port);
- if (disable_camera(dev) < 0) {
- v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
- return -EINVAL;
- }
- return -1;
- }
-
- /* capture the first frame */
- vchiq_mmal_port_parameter_set(dev->instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
- return 0;
-}
-
-/* abort streaming and wait for last buffer */
-static void stop_streaming(struct vb2_queue *vq)
-{
- int ret;
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
- __func__, dev);
-
- init_completion(&dev->capture.frame_cmplt);
- dev->capture.frame_count = 0;
-
- /* ensure a format has actually been set */
- if (dev->capture.port == NULL) {
- v4l2_err(&dev->v4l2_dev,
- "no capture port - stream not started?\n");
- return;
- }
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "stopping capturing\n");
-
- /* stop capturing frames */
- vchiq_mmal_port_parameter_set(dev->instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
-
- /* wait for last frame to complete */
- ret = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
- if (ret <= 0)
- v4l2_err(&dev->v4l2_dev,
- "error %d waiting for frame completion\n", ret);
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "disabling connection\n");
-
- /* disable the connection from camera to encoder */
- ret = vchiq_mmal_port_disable(dev->instance, dev->capture.camera_port);
- if (!ret && dev->capture.camera_port != dev->capture.port) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "disabling port\n");
- ret = vchiq_mmal_port_disable(dev->instance, dev->capture.port);
- } else if (dev->capture.camera_port != dev->capture.port) {
- v4l2_err(&dev->v4l2_dev, "port_disable failed, error %d\n",
- ret);
- }
-
- if (disable_camera(dev) < 0)
- v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
-}
-
-static void bm2835_mmal_lock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_lock(&dev->mutex);
-}
-
-static void bm2835_mmal_unlock(struct vb2_queue *vq)
-{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
-
- mutex_unlock(&dev->mutex);
-}
-
-static struct vb2_ops bm2835_mmal_video_qops = {
- .queue_setup = queue_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .start_streaming = start_streaming,
- .stop_streaming = stop_streaming,
- .wait_prepare = bm2835_mmal_unlock,
- .wait_finish = bm2835_mmal_lock,
-};
-
-/* ------------------------------------------------------------------
- IOCTL operations
- ------------------------------------------------------------------*/
-
-static int set_overlay_params(struct bm2835_mmal_dev *dev,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_parameter_displayregion prev_config = {
- .set = MMAL_DISPLAY_SET_LAYER | MMAL_DISPLAY_SET_ALPHA |
- MMAL_DISPLAY_SET_DEST_RECT | MMAL_DISPLAY_SET_FULLSCREEN,
- .layer = PREVIEW_LAYER,
- .alpha = dev->overlay.global_alpha,
- .fullscreen = 0,
- .dest_rect = {
- .x = dev->overlay.w.left,
- .y = dev->overlay.w.top,
- .width = dev->overlay.w.width,
- .height = dev->overlay.w.height,
- },
- };
- ret = vchiq_mmal_port_parameter_set(dev->instance, port,
- MMAL_PARAMETER_DISPLAYREGION,
- &prev_config, sizeof(prev_config));
-
- return ret;
-}
-
-/* overlay ioctl */
-static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct mmal_fmt *fmt;
-
- if (f->index >= ARRAY_SIZE(formats))
- return -EINVAL;
-
- fmt = &formats[f->index];
-
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
- f->flags = fmt->flags;
-
- return 0;
-}
-
-static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
-
- f->fmt.win = dev->overlay;
-
- return 0;
-}
-
-static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
-
- f->fmt.win.field = V4L2_FIELD_NONE;
- f->fmt.win.chromakey = 0;
- f->fmt.win.clips = NULL;
- f->fmt.win.clipcount = 0;
- f->fmt.win.bitmap = NULL;
-
- v4l_bound_align_image(&f->fmt.win.w.width, MIN_WIDTH, dev->max_width, 1,
- &f->fmt.win.w.height, MIN_HEIGHT, dev->max_height,
- 1, 0);
- v4l_bound_align_image(&f->fmt.win.w.left, MIN_WIDTH, dev->max_width, 1,
- &f->fmt.win.w.top, MIN_HEIGHT, dev->max_height,
- 1, 0);
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Overlay: Now w/h %dx%d l/t %dx%d\n",
- f->fmt.win.w.width, f->fmt.win.w.height,
- f->fmt.win.w.left, f->fmt.win.w.top);
-
- v4l2_dump_win_format(1,
- bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- &f->fmt.win,
- __func__);
- return 0;
-}
-
-static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
-
- vidioc_try_fmt_vid_overlay(file, priv, f);
-
- dev->overlay = f->fmt.win;
- if (dev->component[MMAL_COMPONENT_PREVIEW]->enabled) {
- set_overlay_params(dev,
- &dev->component[MMAL_COMPONENT_PREVIEW]->input[0]);
- }
-
- return 0;
-}
-
-static int vidioc_overlay(struct file *file, void *f, unsigned int on)
-{
- int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- struct vchiq_mmal_port *src;
- struct vchiq_mmal_port *dst;
-
- if ((on && dev->component[MMAL_COMPONENT_PREVIEW]->enabled) ||
- (!on && !dev->component[MMAL_COMPONENT_PREVIEW]->enabled))
- return 0; /* already in requested state */
-
- src =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
-
- if (!on) {
- /* disconnect preview ports and disable component */
- ret = vchiq_mmal_port_disable(dev->instance, src);
- if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(dev->instance, src,
- NULL);
- if (ret >= 0)
- ret = vchiq_mmal_component_disable(
- dev->instance,
- dev->component[MMAL_COMPONENT_PREVIEW]);
-
- disable_camera(dev);
- return ret;
- }
-
- /* set preview port format and connect it to output */
- dst = &dev->component[MMAL_COMPONENT_PREVIEW]->input[0];
-
- ret = vchiq_mmal_port_set_format(dev->instance, src);
- if (ret < 0)
- goto error;
-
- ret = set_overlay_params(dev, dst);
- if (ret < 0)
- goto error;
-
- if (enable_camera(dev) < 0)
- goto error;
-
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[MMAL_COMPONENT_PREVIEW]);
- if (ret < 0)
- goto error;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "connecting %p to %p\n",
- src, dst);
- ret = vchiq_mmal_port_connect_tunnel(dev->instance, src, dst);
- if (!ret)
- ret = vchiq_mmal_port_enable(dev->instance, src, NULL);
-error:
- return ret;
-}
-
-static int vidioc_g_fbuf(struct file *file, void *fh,
- struct v4l2_framebuffer *a)
-{
- /* The video overlay must stay within the framebuffer and can't be
- positioned independently. */
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- struct vchiq_mmal_port *preview_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
-
- a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
- V4L2_FBUF_CAP_GLOBAL_ALPHA;
- a->flags = V4L2_FBUF_FLAG_OVERLAY;
- a->fmt.width = preview_port->es.video.width;
- a->fmt.height = preview_port->es.video.height;
- a->fmt.pixelformat = V4L2_PIX_FMT_YUV420;
- a->fmt.bytesperline = preview_port->es.video.width;
- a->fmt.sizeimage = (preview_port->es.video.width *
- preview_port->es.video.height * 3) >> 1;
- a->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
- return 0;
-}
-
-/* input ioctls */
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- /* only a single camera input */
- if (inp->index != 0)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- sprintf(inp->name, "Camera %u", inp->index);
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/* capture ioctls */
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- u32 major;
- u32 minor;
-
- vchiq_mmal_version(dev->instance, &major, &minor);
-
- strcpy(cap->driver, "bm2835 mmal");
- snprintf(cap->card, sizeof(cap->card), "mmal service %d.%d",
- major, minor);
-
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev->v4l2_dev.name);
- cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
- V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
-
- return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- struct mmal_fmt *fmt;
-
- if (f->index >= ARRAY_SIZE(formats))
- return -EINVAL;
-
- fmt = &formats[f->index];
-
- strlcpy(f->description, fmt->name, sizeof(f->description));
- f->pixelformat = fmt->fourcc;
- f->flags = fmt->flags;
-
- return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
-
- f->fmt.pix.width = dev->capture.width;
- f->fmt.pix.height = dev->capture.height;
- f->fmt.pix.field = V4L2_FIELD_NONE;
- f->fmt.pix.pixelformat = dev->capture.fmt->fourcc;
- f->fmt.pix.bytesperline = dev->capture.stride;
- f->fmt.pix.sizeimage = dev->capture.buffersize;
-
- if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_RGB24)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- else if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_JPEG)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- else
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
-
- v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
- __func__);
- return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- struct mmal_fmt *mfmt;
-
- mfmt = get_format(f);
- if (!mfmt) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Fourcc format (0x%08x) unknown.\n",
- f->fmt.pix.pixelformat);
- f->fmt.pix.pixelformat = formats[0].fourcc;
- mfmt = get_format(f);
- }
-
- f->fmt.pix.field = V4L2_FIELD_NONE;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Clipping/aligning %dx%d format %08X\n",
- f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
-
- v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, dev->max_width, 1,
- &f->fmt.pix.height, MIN_HEIGHT, dev->max_height,
- 1, 0);
- f->fmt.pix.bytesperline = f->fmt.pix.width * mfmt->ybbp;
-
- /* Image buffer has to be padded to allow for alignment, even though
- * we then remove that padding before delivering the buffer.
- */
- f->fmt.pix.sizeimage = ((f->fmt.pix.height + 15) & ~15) *
- (((f->fmt.pix.width + 31) & ~31) * mfmt->depth) >> 3;
-
- if ((mfmt->flags & V4L2_FMT_FLAG_COMPRESSED) &&
- f->fmt.pix.sizeimage < MIN_BUFFER_SIZE)
- f->fmt.pix.sizeimage = MIN_BUFFER_SIZE;
-
- if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
- else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- else
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Now %dx%d format %08X\n",
- f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
-
- v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
- __func__);
- return 0;
-}
-
-static int mmal_setup_components(struct bm2835_mmal_dev *dev,
- struct v4l2_format *f)
-{
- int ret;
- struct vchiq_mmal_port *port = NULL, *camera_port = NULL;
- struct vchiq_mmal_component *encode_component = NULL;
- struct mmal_fmt *mfmt = get_format(f);
-
- BUG_ON(!mfmt);
-
- if (dev->capture.encode_component) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "vid_cap - disconnect previous tunnel\n");
-
- /* Disconnect any previous connection */
- vchiq_mmal_port_connect_tunnel(dev->instance,
- dev->capture.camera_port, NULL);
- dev->capture.camera_port = NULL;
- ret = vchiq_mmal_component_disable(dev->instance,
- dev->capture.
- encode_component);
- if (ret)
- v4l2_err(&dev->v4l2_dev,
- "Failed to disable encode component %d\n",
- ret);
-
- dev->capture.encode_component = NULL;
- }
- /* format dependent port setup */
- switch (mfmt->mmal_component) {
- case MMAL_COMPONENT_CAMERA:
- /* Make a further decision on port based on resolution */
- if (f->fmt.pix.width <= max_video_width
- && f->fmt.pix.height <= max_video_height)
- camera_port = port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO];
- else
- camera_port = port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_CAPTURE];
- break;
- case MMAL_COMPONENT_IMAGE_ENCODE:
- encode_component = dev->component[MMAL_COMPONENT_IMAGE_ENCODE];
- port = &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->output[0];
- camera_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_CAPTURE];
- break;
- case MMAL_COMPONENT_VIDEO_ENCODE:
- encode_component = dev->component[MMAL_COMPONENT_VIDEO_ENCODE];
- port = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
- camera_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO];
- break;
- default:
- break;
- }
-
- if (!port)
- return -EINVAL;
-
- if (encode_component)
- camera_port->format.encoding = MMAL_ENCODING_OPAQUE;
- else
- camera_port->format.encoding = mfmt->mmal;
-
- if (dev->rgb_bgr_swapped) {
- if (camera_port->format.encoding == MMAL_ENCODING_RGB24)
- camera_port->format.encoding = MMAL_ENCODING_BGR24;
- else if (camera_port->format.encoding == MMAL_ENCODING_BGR24)
- camera_port->format.encoding = MMAL_ENCODING_RGB24;
- }
-
- camera_port->format.encoding_variant = 0;
- camera_port->es.video.width = f->fmt.pix.width;
- camera_port->es.video.height = f->fmt.pix.height;
- camera_port->es.video.crop.x = 0;
- camera_port->es.video.crop.y = 0;
- camera_port->es.video.crop.width = f->fmt.pix.width;
- camera_port->es.video.crop.height = f->fmt.pix.height;
- camera_port->es.video.frame_rate.num = 0;
- camera_port->es.video.frame_rate.den = 1;
- camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
-
- ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
-
- if (!ret
- && camera_port ==
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO]) {
- bool overlay_enabled =
- !!dev->component[MMAL_COMPONENT_PREVIEW]->enabled;
- struct vchiq_mmal_port *preview_port =
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW];
- /* Preview and encode ports need to match on resolution */
- if (overlay_enabled) {
- /* Need to disable the overlay before we can update
- * the resolution
- */
- ret =
- vchiq_mmal_port_disable(dev->instance,
- preview_port);
- if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- NULL);
- }
- preview_port->es.video.width = f->fmt.pix.width;
- preview_port->es.video.height = f->fmt.pix.height;
- preview_port->es.video.crop.x = 0;
- preview_port->es.video.crop.y = 0;
- preview_port->es.video.crop.width = f->fmt.pix.width;
- preview_port->es.video.crop.height = f->fmt.pix.height;
- preview_port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- preview_port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
- ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
- if (overlay_enabled) {
- ret = vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- &dev->component[MMAL_COMPONENT_PREVIEW]->input[0]);
- if (!ret)
- ret = vchiq_mmal_port_enable(dev->instance,
- preview_port,
- NULL);
- }
- }
-
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s failed to set format %dx%d %08X\n", __func__,
- f->fmt.pix.width, f->fmt.pix.height,
- f->fmt.pix.pixelformat);
- /* ensure capture is not going to be tried */
- dev->capture.port = NULL;
- } else {
- if (encode_component) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "vid_cap - set up encode comp\n");
-
- /* configure buffering */
- camera_port->current_buffer.size =
- camera_port->recommended_buffer.size;
- camera_port->current_buffer.num =
- camera_port->recommended_buffer.num;
-
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- camera_port,
- &encode_component->input[0]);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to create connection\n",
- __func__);
- /* ensure capture is not going to be tried */
- dev->capture.port = NULL;
- } else {
- port->es.video.width = f->fmt.pix.width;
- port->es.video.height = f->fmt.pix.height;
- port->es.video.crop.x = 0;
- port->es.video.crop.y = 0;
- port->es.video.crop.width = f->fmt.pix.width;
- port->es.video.crop.height = f->fmt.pix.height;
- port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
-
- port->format.encoding = mfmt->mmal;
- port->format.encoding_variant = 0;
- /* Set any encoding specific parameters */
- switch (mfmt->mmal_component) {
- case MMAL_COMPONENT_VIDEO_ENCODE:
- port->format.bitrate =
- dev->capture.encode_bitrate;
- break;
- case MMAL_COMPONENT_IMAGE_ENCODE:
- /* Could set EXIF parameters here */
- break;
- default:
- break;
- }
- ret = vchiq_mmal_port_set_format(dev->instance,
- port);
- if (ret)
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to set format %dx%d fmt %08X\n",
- __func__,
- f->fmt.pix.width,
- f->fmt.pix.height,
- f->fmt.pix.pixelformat
- );
- }
-
- if (!ret) {
- ret = vchiq_mmal_component_enable(
- dev->instance,
- encode_component);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s Failed to enable encode components\n",
- __func__);
- }
- }
- if (!ret) {
- /* configure buffering */
- port->current_buffer.num = 1;
- port->current_buffer.size =
- f->fmt.pix.sizeimage;
- if (port->format.encoding ==
- MMAL_ENCODING_JPEG) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "JPG - buf size now %d was %d\n",
- f->fmt.pix.sizeimage,
- port->current_buffer.size);
- port->current_buffer.size =
- (f->fmt.pix.sizeimage <
- (100 << 10))
- ? (100 << 10) : f->fmt.pix.
- sizeimage;
- }
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "vid_cap - cur_buf.size set to %d\n",
- f->fmt.pix.sizeimage);
- port->current_buffer.alignment = 0;
- }
- } else {
- /* configure buffering */
- camera_port->current_buffer.num = 1;
- camera_port->current_buffer.size = f->fmt.pix.sizeimage;
- camera_port->current_buffer.alignment = 0;
- }
-
- if (!ret) {
- dev->capture.fmt = mfmt;
- dev->capture.stride = f->fmt.pix.bytesperline;
- dev->capture.width = camera_port->es.video.crop.width;
- dev->capture.height = camera_port->es.video.crop.height;
- dev->capture.buffersize = port->current_buffer.size;
-
- /* select port for capture */
- dev->capture.port = port;
- dev->capture.camera_port = camera_port;
- dev->capture.encode_component = encode_component;
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
- port->format.encoding,
- dev->capture.width, dev->capture.height,
- dev->capture.stride, dev->capture.buffersize);
- }
- }
-
- /* todo: Need to convert the vchiq/mmal error into a v4l2 error. */
- return ret;
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- struct mmal_fmt *mfmt;
-
- /* try the format to set valid parameters */
- ret = vidioc_try_fmt_vid_cap(file, priv, f);
- if (ret) {
- v4l2_err(&dev->v4l2_dev,
- "vid_cap - vidioc_try_fmt_vid_cap failed\n");
- return ret;
- }
-
- /* if a capture is running refuse to set format */
- if (vb2_is_busy(&dev->capture.vb_vidq)) {
- v4l2_info(&dev->v4l2_dev, "%s device busy\n", __func__);
- return -EBUSY;
- }
-
- /* If the format is unsupported v4l2 says we should switch to
- * a supported one and not return an error. */
- mfmt = get_format(f);
- if (!mfmt) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Fourcc format (0x%08x) unknown.\n",
- f->fmt.pix.pixelformat);
- f->fmt.pix.pixelformat = formats[0].fourcc;
- mfmt = get_format(f);
- }
-
- ret = mmal_setup_components(dev, f);
- if (ret != 0) {
- v4l2_err(&dev->v4l2_dev,
- "%s: failed to setup mmal components: %d\n",
- __func__, ret);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static int vidioc_enum_framesizes(struct file *file, void *fh,
- struct v4l2_frmsizeenum *fsize)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- static const struct v4l2_frmsize_stepwise sizes = {
- MIN_WIDTH, 0, 2,
- MIN_HEIGHT, 0, 2
- };
- int i;
-
- if (fsize->index)
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(formats); i++)
- if (formats[i].fourcc == fsize->pixel_format)
- break;
- if (i == ARRAY_SIZE(formats))
- return -EINVAL;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = sizes;
- fsize->stepwise.max_width = dev->max_width;
- fsize->stepwise.max_height = dev->max_height;
- return 0;
-}
-
-/* timeperframe is arbitrary and continuous */
-static int vidioc_enum_frameintervals(struct file *file, void *priv,
- struct v4l2_frmivalenum *fival)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- int i;
-
- if (fival->index)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(formats); i++)
- if (formats[i].fourcc == fival->pixel_format)
- break;
- if (i == ARRAY_SIZE(formats))
- return -EINVAL;
-
- /* regarding width & height - we support any within range */
- if (fival->width < MIN_WIDTH || fival->width > dev->max_width ||
- fival->height < MIN_HEIGHT || fival->height > dev->max_height)
- return -EINVAL;
-
- fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
-
- /* fill in stepwise (step=1.0 is required by V4L2 spec) */
- fival->stepwise.min = tpf_min;
- fival->stepwise.max = tpf_max;
- fival->stepwise.step = (struct v4l2_fract) {1, 1};
-
- return 0;
-}
-
-static int vidioc_g_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- parm->parm.capture.timeperframe = dev->capture.timeperframe;
- parm->parm.capture.readbuffers = 1;
- return 0;
-}
-
-#define FRACT_CMP(a, OP, b) \
- ((u64)(a).numerator * (b).denominator OP \
- (u64)(b).numerator * (a).denominator)
-
-static int vidioc_s_parm(struct file *file, void *priv,
- struct v4l2_streamparm *parm)
-{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
- struct v4l2_fract tpf;
- struct mmal_parameter_rational fps_param;
-
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- tpf = parm->parm.capture.timeperframe;
-
- /* tpf: {*, 0} resets timing; clip to [min, max]*/
- tpf = tpf.denominator ? tpf : tpf_default;
- tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
- tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
-
- dev->capture.timeperframe = tpf;
- parm->parm.capture.timeperframe = tpf;
- parm->parm.capture.readbuffers = 1;
- parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
-
- fps_param.num = 0; /* Select variable fps, and then use
- * FPS_RANGE to select the actual limits.
- */
- fps_param.den = 1;
- set_framerate_params(dev);
-
- return 0;
-}
-
-static const struct v4l2_ioctl_ops camera0_ioctl_ops = {
- /* overlay */
- .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
- .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
- .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
- .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_overlay = vidioc_overlay,
- .vidioc_g_fbuf = vidioc_g_fbuf,
-
- /* inputs */
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
-
- /* capture */
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
-
- /* buffer management */
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_enum_framesizes = vidioc_enum_framesizes,
- .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
- .vidioc_g_parm = vidioc_g_parm,
- .vidioc_s_parm = vidioc_s_parm,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
-
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct v4l2_ioctl_ops camera0_ioctl_ops_gstreamer = {
- /* overlay */
- .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
- .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
- .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
- .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
- .vidioc_overlay = vidioc_overlay,
- .vidioc_g_fbuf = vidioc_g_fbuf,
-
- /* inputs */
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
-
- /* capture */
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
-
- /* buffer management */
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- /* Remove this function ptr to fix gstreamer bug
- .vidioc_enum_framesizes = vidioc_enum_framesizes, */
- .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
- .vidioc_g_parm = vidioc_g_parm,
- .vidioc_s_parm = vidioc_s_parm,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
-
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-/* ------------------------------------------------------------------
- Driver init/finalise
- ------------------------------------------------------------------*/
-
-static const struct v4l2_file_operations camera0_fops = {
- .owner = THIS_MODULE,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .read = vb2_fop_read,
- .poll = vb2_fop_poll,
- .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = vb2_fop_mmap,
-};
-
-static struct video_device vdev_template = {
- .name = "camera0",
- .fops = &camera0_fops,
- .ioctl_ops = &camera0_ioctl_ops,
- .release = video_device_release_empty,
-};
-
-/* Returns the number of cameras, and also the max resolution supported
- * by those cameras.
- */
-static int get_num_cameras(struct vchiq_mmal_instance *instance,
- unsigned int resolutions[][2], int num_resolutions)
-{
- int ret;
- struct vchiq_mmal_component *cam_info_component;
- struct mmal_parameter_camera_info_t cam_info = {0};
- int param_size = sizeof(cam_info);
- int i;
-
- /* create a camera_info component */
- ret = vchiq_mmal_component_init(instance, "camera_info",
- &cam_info_component);
- if (ret < 0)
- /* Unusual failure - let's guess one camera. */
- return 1;
-
- if (vchiq_mmal_port_parameter_get(instance,
- &cam_info_component->control,
- MMAL_PARAMETER_CAMERA_INFO,
- &cam_info,
- &param_size)) {
- pr_info("Failed to get camera info\n");
- }
- for (i = 0;
- i < (cam_info.num_cameras > num_resolutions ?
- num_resolutions :
- cam_info.num_cameras);
- i++) {
- resolutions[i][0] = cam_info.cameras[i].max_width;
- resolutions[i][1] = cam_info.cameras[i].max_height;
- }
-
- vchiq_mmal_component_finalise(instance,
- cam_info_component);
-
- return cam_info.num_cameras;
-}
-
-static int set_camera_parameters(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *camera,
- struct bm2835_mmal_dev *dev)
-{
- int ret;
- struct mmal_parameter_camera_config cam_config = {
- .max_stills_w = dev->max_width,
- .max_stills_h = dev->max_height,
- .stills_yuv422 = 1,
- .one_shot_stills = 1,
- .max_preview_video_w = (max_video_width > 1920) ?
- max_video_width : 1920,
- .max_preview_video_h = (max_video_height > 1088) ?
- max_video_height : 1088,
- .num_preview_video_frames = 3,
- .stills_capture_circular_buffer_height = 0,
- .fast_preview_resume = 0,
- .use_stc_timestamp = MMAL_PARAM_TIMESTAMP_MODE_RAW_STC
- };
-
- ret = vchiq_mmal_port_parameter_set(instance, &camera->control,
- MMAL_PARAMETER_CAMERA_CONFIG,
- &cam_config, sizeof(cam_config));
- return ret;
-}
-
-#define MAX_SUPPORTED_ENCODINGS 20
-
-/* MMAL instance and component init */
-static int __init mmal_init(struct bm2835_mmal_dev *dev)
-{
- int ret;
- struct mmal_es_format *format;
- u32 bool_true = 1;
- u32 supported_encodings[MAX_SUPPORTED_ENCODINGS];
- int param_size;
- struct vchiq_mmal_component *camera;
-
- ret = vchiq_mmal_init(&dev->instance);
- if (ret < 0)
- return ret;
-
- /* get the camera component ready */
- ret = vchiq_mmal_component_init(dev->instance, "ril.camera",
- &dev->component[MMAL_COMPONENT_CAMERA]);
- if (ret < 0)
- goto unreg_mmal;
-
- camera = dev->component[MMAL_COMPONENT_CAMERA];
- if (camera->outputs < MMAL_CAMERA_PORT_COUNT) {
- ret = -EINVAL;
- goto unreg_camera;
- }
-
- ret = set_camera_parameters(dev->instance,
- camera,
- dev);
- if (ret < 0)
- goto unreg_camera;
-
- /* There was an error in the firmware that meant the camera component
- * produced BGR instead of RGB.
- * This is now fixed, but in order to support the old firmwares, we
- * have to check.
- */
- dev->rgb_bgr_swapped = true;
- param_size = sizeof(supported_encodings);
- ret = vchiq_mmal_port_parameter_get(dev->instance,
- &camera->output[MMAL_CAMERA_PORT_CAPTURE],
- MMAL_PARAMETER_SUPPORTED_ENCODINGS,
- &supported_encodings,
- &param_size);
- if (ret == 0) {
- int i;
-
- for (i = 0; i < param_size / sizeof(u32); i++) {
- if (supported_encodings[i] == MMAL_ENCODING_BGR24) {
- /* Found BGR24 first - old firmware. */
- break;
- }
- if (supported_encodings[i] == MMAL_ENCODING_RGB24) {
- /* Found RGB24 first
- * new firmware, so use RGB24.
- */
- dev->rgb_bgr_swapped = false;
- break;
- }
- }
- }
- format = &camera->output[MMAL_CAMERA_PORT_PREVIEW].format;
-
- format->encoding = MMAL_ENCODING_OPAQUE;
- format->encoding_variant = MMAL_ENCODING_I420;
-
- format->es->video.width = 1024;
- format->es->video.height = 768;
- format->es->video.crop.x = 0;
- format->es->video.crop.y = 0;
- format->es->video.crop.width = 1024;
- format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
-
- format = &camera->output[MMAL_CAMERA_PORT_VIDEO].format;
-
- format->encoding = MMAL_ENCODING_OPAQUE;
- format->encoding_variant = MMAL_ENCODING_I420;
-
- format->es->video.width = 1024;
- format->es->video.height = 768;
- format->es->video.crop.x = 0;
- format->es->video.crop.y = 0;
- format->es->video.crop.width = 1024;
- format->es->video.crop.height = 768;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
-
- vchiq_mmal_port_parameter_set(dev->instance,
- &camera->output[MMAL_CAMERA_PORT_VIDEO],
- MMAL_PARAMETER_NO_IMAGE_PADDING,
- &bool_true, sizeof(bool_true));
-
- format = &camera->output[MMAL_CAMERA_PORT_CAPTURE].format;
-
- format->encoding = MMAL_ENCODING_OPAQUE;
-
- format->es->video.width = 2592;
- format->es->video.height = 1944;
- format->es->video.crop.x = 0;
- format->es->video.crop.y = 0;
- format->es->video.crop.width = 2592;
- format->es->video.crop.height = 1944;
- format->es->video.frame_rate.num = 0; /* Rely on fps_range */
- format->es->video.frame_rate.den = 1;
-
- dev->capture.width = format->es->video.width;
- dev->capture.height = format->es->video.height;
- dev->capture.fmt = &formats[0];
- dev->capture.encode_component = NULL;
- dev->capture.timeperframe = tpf_default;
- dev->capture.enc_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
- dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
-
- vchiq_mmal_port_parameter_set(dev->instance,
- &camera->output[MMAL_CAMERA_PORT_CAPTURE],
- MMAL_PARAMETER_NO_IMAGE_PADDING,
- &bool_true, sizeof(bool_true));
-
- /* get the preview component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.video_render",
- &dev->component[MMAL_COMPONENT_PREVIEW]);
- if (ret < 0)
- goto unreg_camera;
-
- if (dev->component[MMAL_COMPONENT_PREVIEW]->inputs < 1) {
- ret = -EINVAL;
- pr_debug("too few input ports %d needed %d\n",
- dev->component[MMAL_COMPONENT_PREVIEW]->inputs, 1);
- goto unreg_preview;
- }
-
- /* get the image encoder component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.image_encode",
- &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]);
- if (ret < 0)
- goto unreg_preview;
-
- if (dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->inputs < 1) {
- ret = -EINVAL;
- v4l2_err(&dev->v4l2_dev, "too few input ports %d needed %d\n",
- dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->inputs,
- 1);
- goto unreg_image_encoder;
- }
-
- /* get the video encoder component ready */
- ret = vchiq_mmal_component_init(dev->instance, "ril.video_encode",
- &dev->
- component[MMAL_COMPONENT_VIDEO_ENCODE]);
- if (ret < 0)
- goto unreg_image_encoder;
-
- if (dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->inputs < 1) {
- ret = -EINVAL;
- v4l2_err(&dev->v4l2_dev, "too few input ports %d needed %d\n",
- dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->inputs,
- 1);
- goto unreg_vid_encoder;
- }
-
- {
- struct vchiq_mmal_port *encoder_port =
- &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
- encoder_port->format.encoding = MMAL_ENCODING_H264;
- ret = vchiq_mmal_port_set_format(dev->instance,
- encoder_port);
- }
-
- {
- unsigned int enable = 1;
-
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->control,
- MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
- &enable, sizeof(enable));
-
- vchiq_mmal_port_parameter_set(dev->instance,
- &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->control,
- MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
- &enable,
- sizeof(enable));
- }
- ret = bm2835_mmal_set_all_camera_controls(dev);
- if (ret < 0)
- goto unreg_vid_encoder;
-
- return 0;
-
-unreg_vid_encoder:
- pr_err("Cleanup: Destroy video encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[MMAL_COMPONENT_VIDEO_ENCODE]);
-
-unreg_image_encoder:
- pr_err("Cleanup: Destroy image encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[MMAL_COMPONENT_IMAGE_ENCODE]);
-
-unreg_preview:
- pr_err("Cleanup: Destroy video render\n");
- vchiq_mmal_component_finalise(dev->instance,
- dev->component[MMAL_COMPONENT_PREVIEW]);
-
-unreg_camera:
- pr_err("Cleanup: Destroy camera\n");
- vchiq_mmal_component_finalise(dev->instance,
- dev->component[MMAL_COMPONENT_CAMERA]);
-
-unreg_mmal:
- vchiq_mmal_finalise(dev->instance);
- return ret;
-}
-
-static int __init bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
- struct video_device *vfd)
-{
- int ret;
-
- *vfd = vdev_template;
- if (gst_v4l2src_is_broken) {
- v4l2_info(&dev->v4l2_dev,
- "Work-around for gstreamer issue is active.\n");
- vfd->ioctl_ops = &camera0_ioctl_ops_gstreamer;
- }
-
- vfd->v4l2_dev = &dev->v4l2_dev;
-
- vfd->lock = &dev->mutex;
-
- vfd->queue = &dev->capture.vb_vidq;
-
- /* video device needs to be able to access instance data */
- video_set_drvdata(vfd, dev);
-
- ret = video_register_device(vfd,
- VFL_TYPE_GRABBER,
- video_nr[dev->camera_num]);
- if (ret < 0)
- return ret;
-
- v4l2_info(vfd->v4l2_dev,
- "V4L2 device registered as %s - stills mode > %dx%d\n",
- video_device_node_name(vfd),
- max_video_width, max_video_height);
-
- return 0;
-}
-
-static void bcm2835_cleanup_instance(struct bm2835_mmal_dev *dev)
-{
- if (!dev)
- return;
-
- v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
- video_device_node_name(&dev->vdev));
-
- video_unregister_device(&dev->vdev);
-
- if (dev->capture.encode_component) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "mmal_exit - disconnect tunnel\n");
- vchiq_mmal_port_connect_tunnel(dev->instance,
- dev->capture.camera_port, NULL);
- vchiq_mmal_component_disable(dev->instance,
- dev->capture.encode_component);
- }
- vchiq_mmal_component_disable(dev->instance,
- dev->component[MMAL_COMPONENT_CAMERA]);
-
- vchiq_mmal_component_finalise(dev->instance,
- dev->
- component[MMAL_COMPONENT_VIDEO_ENCODE]);
-
- vchiq_mmal_component_finalise(dev->instance,
- dev->
- component[MMAL_COMPONENT_IMAGE_ENCODE]);
-
- vchiq_mmal_component_finalise(dev->instance,
- dev->component[MMAL_COMPONENT_PREVIEW]);
-
- vchiq_mmal_component_finalise(dev->instance,
- dev->component[MMAL_COMPONENT_CAMERA]);
-
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
- v4l2_device_unregister(&dev->v4l2_dev);
-
- kfree(dev);
-}
-
-static struct v4l2_format default_v4l2_format = {
- .fmt.pix.pixelformat = V4L2_PIX_FMT_JPEG,
- .fmt.pix.width = 1024,
- .fmt.pix.bytesperline = 0,
- .fmt.pix.height = 768,
- .fmt.pix.sizeimage = 1024 * 768,
-};
-
-static int __init bm2835_mmal_init(void)
-{
- int ret;
- struct bm2835_mmal_dev *dev;
- struct vb2_queue *q;
- int camera;
- unsigned int num_cameras;
- struct vchiq_mmal_instance *instance;
- unsigned int resolutions[MAX_BCM2835_CAMERAS][2];
-
- ret = vchiq_mmal_init(&instance);
- if (ret < 0)
- return ret;
-
- num_cameras = get_num_cameras(instance,
- resolutions,
- MAX_BCM2835_CAMERAS);
- if (num_cameras > MAX_BCM2835_CAMERAS)
- num_cameras = MAX_BCM2835_CAMERAS;
-
- for (camera = 0; camera < num_cameras; camera++) {
- dev = kzalloc(sizeof(struct bm2835_mmal_dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->camera_num = camera;
- dev->max_width = resolutions[camera][0];
- dev->max_height = resolutions[camera][1];
-
- /* setup device defaults */
- dev->overlay.w.left = 150;
- dev->overlay.w.top = 50;
- dev->overlay.w.width = 1024;
- dev->overlay.w.height = 768;
- dev->overlay.clipcount = 0;
- dev->overlay.field = V4L2_FIELD_NONE;
- dev->overlay.global_alpha = 255;
-
- dev->capture.fmt = &formats[3]; /* JPEG */
-
- /* v4l device registration */
- snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
- "%s", BM2835_MMAL_MODULE_NAME);
- ret = v4l2_device_register(NULL, &dev->v4l2_dev);
- if (ret)
- goto free_dev;
-
- /* setup v4l controls */
- ret = bm2835_mmal_init_controls(dev, &dev->ctrl_handler);
- if (ret < 0)
- goto unreg_dev;
- dev->v4l2_dev.ctrl_handler = &dev->ctrl_handler;
-
- /* mmal init */
- dev->instance = instance;
- ret = mmal_init(dev);
- if (ret < 0)
- goto unreg_dev;
-
- /* initialize queue */
- q = &dev->capture.vb_vidq;
- memset(q, 0, sizeof(*q));
- q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct mmal_buffer);
- q->ops = &bm2835_mmal_video_qops;
- q->mem_ops = &vb2_vmalloc_memops;
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- ret = vb2_queue_init(q);
- if (ret < 0)
- goto unreg_dev;
-
- /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
- mutex_init(&dev->mutex);
-
- /* initialise video devices */
- ret = bm2835_mmal_init_device(dev, &dev->vdev);
- if (ret < 0)
- goto unreg_dev;
-
- /* Really want to call vidioc_s_fmt_vid_cap with the default
- * format, but currently the APIs don't join up.
- */
- ret = mmal_setup_components(dev, &default_v4l2_format);
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev,
- "%s: could not setup components\n", __func__);
- goto unreg_dev;
- }
-
- v4l2_info(&dev->v4l2_dev,
- "Broadcom 2835 MMAL video capture ver %s loaded.\n",
- BM2835_MMAL_VERSION);
-
- gdev[camera] = dev;
- }
- return 0;
-
-unreg_dev:
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- v4l2_device_unregister(&dev->v4l2_dev);
-
-free_dev:
- kfree(dev);
-
- for ( ; camera > 0; camera--) {
- bcm2835_cleanup_instance(gdev[camera]);
- gdev[camera] = NULL;
- }
- pr_info("%s: error %d while loading driver\n",
- BM2835_MMAL_MODULE_NAME, ret);
-
- return ret;
-}
-
-static void __exit bm2835_mmal_exit(void)
-{
- int camera;
- struct vchiq_mmal_instance *instance = gdev[0]->instance;
-
- for (camera = 0; camera < MAX_BCM2835_CAMERAS; camera++) {
- bcm2835_cleanup_instance(gdev[camera]);
- gdev[camera] = NULL;
- }
- vchiq_mmal_finalise(instance);
-}
-
-module_init(bm2835_mmal_init);
-module_exit(bm2835_mmal_exit);
diff --git a/drivers/staging/media/platform/bcm2835/bcm2835-camera.h b/drivers/staging/media/platform/bcm2835/bcm2835-camera.h
deleted file mode 100644
index e6aeb7e7e381..000000000000
--- a/drivers/staging/media/platform/bcm2835/bcm2835-camera.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- *
- * core driver device
- */
-
-#define V4L2_CTRL_COUNT 29 /* number of v4l controls */
-
-enum {
- MMAL_COMPONENT_CAMERA = 0,
- MMAL_COMPONENT_PREVIEW,
- MMAL_COMPONENT_IMAGE_ENCODE,
- MMAL_COMPONENT_VIDEO_ENCODE,
- MMAL_COMPONENT_COUNT
-};
-
-enum {
- MMAL_CAMERA_PORT_PREVIEW = 0,
- MMAL_CAMERA_PORT_VIDEO,
- MMAL_CAMERA_PORT_CAPTURE,
- MMAL_CAMERA_PORT_COUNT
-};
-
-#define PREVIEW_LAYER 2
-
-extern int bcm2835_v4l2_debug;
-
-struct bm2835_mmal_dev {
- /* v4l2 devices */
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex mutex;
-
- /* controls */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
- enum v4l2_scene_mode scene_mode;
- struct mmal_colourfx colourfx;
- int hflip;
- int vflip;
- int red_gain;
- int blue_gain;
- enum mmal_parameter_exposuremode exposure_mode_user;
- enum v4l2_exposure_auto_type exposure_mode_v4l2_user;
- /* active exposure mode may differ if selected via a scene mode */
- enum mmal_parameter_exposuremode exposure_mode_active;
- enum mmal_parameter_exposuremeteringmode metering_mode;
- unsigned int manual_shutter_speed;
- bool exp_auto_priority;
- bool manual_iso_enabled;
- uint32_t iso;
-
- /* allocated mmal instance and components */
- struct vchiq_mmal_instance *instance;
- struct vchiq_mmal_component *component[MMAL_COMPONENT_COUNT];
- int camera_use_count;
-
- struct v4l2_window overlay;
-
- struct {
- unsigned int width; /* width */
- unsigned int height; /* height */
- unsigned int stride; /* stride */
- unsigned int buffersize; /* buffer size with padding */
- struct mmal_fmt *fmt;
- struct v4l2_fract timeperframe;
-
- /* H264 encode bitrate */
- int encode_bitrate;
- /* H264 bitrate mode. CBR/VBR */
- int encode_bitrate_mode;
- /* H264 profile */
- enum v4l2_mpeg_video_h264_profile enc_profile;
- /* H264 level */
- enum v4l2_mpeg_video_h264_level enc_level;
- /* JPEG Q-factor */
- int q_factor;
-
- struct vb2_queue vb_vidq;
-
- /* VC start timestamp for streaming */
- s64 vc_start_timestamp;
- /* Kernel start timestamp for streaming */
- struct timeval kernel_start_ts;
-
- struct vchiq_mmal_port *port; /* port being used for capture */
- /* camera port being used for capture */
- struct vchiq_mmal_port *camera_port;
- /* component being used for encode */
- struct vchiq_mmal_component *encode_component;
- /* number of frames remaining which driver should capture */
- unsigned int frame_count;
- /* last frame completion */
- struct completion frame_cmplt;
-
- } capture;
-
- unsigned int camera_num;
- unsigned int max_width;
- unsigned int max_height;
- unsigned int rgb_bgr_swapped;
-};
-
-int bm2835_mmal_init_controls(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl);
-
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev);
-int set_framerate_params(struct bm2835_mmal_dev *dev);
-
-/* Debug helpers */
-
-#define v4l2_dump_pix_format(level, debug, dev, pix_fmt, desc) \
-{ \
- v4l2_dbg(level, debug, dev, \
-"%s: w %u h %u field %u pfmt 0x%x bpl %u sz_img %u colorspace 0x%x priv %u\n", \
- desc == NULL ? "" : desc, \
- (pix_fmt)->width, (pix_fmt)->height, (pix_fmt)->field, \
- (pix_fmt)->pixelformat, (pix_fmt)->bytesperline, \
- (pix_fmt)->sizeimage, (pix_fmt)->colorspace, (pix_fmt)->priv); \
-}
-#define v4l2_dump_win_format(level, debug, dev, win_fmt, desc) \
-{ \
- v4l2_dbg(level, debug, dev, \
-"%s: w %u h %u l %u t %u field %u chromakey %06X clip %p " \
-"clipcount %u bitmap %p\n", \
- desc == NULL ? "" : desc, \
- (win_fmt)->w.width, (win_fmt)->w.height, \
- (win_fmt)->w.left, (win_fmt)->w.top, \
- (win_fmt)->field, \
- (win_fmt)->chromakey, \
- (win_fmt)->clips, (win_fmt)->clipcount, \
- (win_fmt)->bitmap); \
-}
diff --git a/drivers/staging/media/platform/bcm2835/controls.c b/drivers/staging/media/platform/bcm2835/controls.c
deleted file mode 100644
index a40987b2e75d..000000000000
--- a/drivers/staging/media/platform/bcm2835/controls.c
+++ /dev/null
@@ -1,1335 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <media/videobuf2-vmalloc.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-common.h>
-
-#include "mmal-common.h"
-#include "mmal-vchiq.h"
-#include "mmal-parameters.h"
-#include "bcm2835-camera.h"
-
-/* The supported V4L2_CID_AUTO_EXPOSURE_BIAS values are from -4.0 to +4.0.
- * MMAL values are in 1/6th increments so the MMAL range is -24 to +24.
- * V4L2 docs say value "is expressed in terms of EV, drivers should interpret
- * the values as 0.001 EV units, where the value 1000 stands for +1 EV."
- * V4L2 is limited to a max of 32 values in a menu, so count in 1/3rds from
- * -4 to +4
- */
-static const s64 ev_bias_qmenu[] = {
- -4000, -3667, -3333,
- -3000, -2667, -2333,
- -2000, -1667, -1333,
- -1000, -667, -333,
- 0, 333, 667,
- 1000, 1333, 1667,
- 2000, 2333, 2667,
- 3000, 3333, 3667,
- 4000
-};
-
-/* Supported ISO values (*1000)
- * ISOO = auto ISO
- */
-static const s64 iso_qmenu[] = {
- 0, 100000, 200000, 400000, 800000,
-};
-static const uint32_t iso_values[] = {
- 0, 100, 200, 400, 800,
-};
-
-static const s64 mains_freq_qmenu[] = {
- V4L2_CID_POWER_LINE_FREQUENCY_DISABLED,
- V4L2_CID_POWER_LINE_FREQUENCY_50HZ,
- V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
- V4L2_CID_POWER_LINE_FREQUENCY_AUTO
-};
-
-/* Supported video encode modes */
-static const s64 bitrate_mode_qmenu[] = {
- (s64)V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
- (s64)V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
-};
-
-enum bm2835_mmal_ctrl_type {
- MMAL_CONTROL_TYPE_STD,
- MMAL_CONTROL_TYPE_STD_MENU,
- MMAL_CONTROL_TYPE_INT_MENU,
- MMAL_CONTROL_TYPE_CLUSTER, /* special cluster entry */
-};
-
-struct bm2835_mmal_v4l2_ctrl;
-
-typedef int(bm2835_mmal_v4l2_ctrl_cb)(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl);
-
-struct bm2835_mmal_v4l2_ctrl {
- u32 id; /* v4l2 control identifier */
- enum bm2835_mmal_ctrl_type type;
- /* control minimum value or
- * mask for MMAL_CONTROL_TYPE_STD_MENU */
- s32 min;
- s32 max; /* maximum value of control */
- s32 def; /* default value of control */
- s32 step; /* step size of the control */
- const s64 *imenu; /* integer menu array */
- u32 mmal_id; /* mmal parameter id */
- bm2835_mmal_v4l2_ctrl_cb *setter;
- bool ignore_errors;
-};
-
-struct v4l2_to_mmal_effects_setting {
- u32 v4l2_effect;
- u32 mmal_effect;
- s32 col_fx_enable;
- s32 col_fx_fixed_cbcr;
- u32 u;
- u32 v;
- u32 num_effect_params;
- u32 effect_params[MMAL_MAX_IMAGEFX_PARAMETERS];
-};
-
-static const struct v4l2_to_mmal_effects_setting
- v4l2_to_mmal_effects_values[] = {
- { V4L2_COLORFX_NONE, MMAL_PARAM_IMAGEFX_NONE,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_BW, MMAL_PARAM_IMAGEFX_NONE,
- 1, 0, 128, 128, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SEPIA, MMAL_PARAM_IMAGEFX_NONE,
- 1, 0, 87, 151, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_NEGATIVE, MMAL_PARAM_IMAGEFX_NEGATIVE,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_EMBOSS, MMAL_PARAM_IMAGEFX_EMBOSS,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SKETCH, MMAL_PARAM_IMAGEFX_SKETCH,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SKY_BLUE, MMAL_PARAM_IMAGEFX_PASTEL,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_GRASS_GREEN, MMAL_PARAM_IMAGEFX_WATERCOLOUR,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SKIN_WHITEN, MMAL_PARAM_IMAGEFX_WASHEDOUT,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_VIVID, MMAL_PARAM_IMAGEFX_SATURATION,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_AQUA, MMAL_PARAM_IMAGEFX_NONE,
- 1, 0, 171, 121, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_ART_FREEZE, MMAL_PARAM_IMAGEFX_HATCH,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SILHOUETTE, MMAL_PARAM_IMAGEFX_FILM,
- 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
- { V4L2_COLORFX_SOLARIZATION, MMAL_PARAM_IMAGEFX_SOLARIZE,
- 0, 0, 0, 0, 5, {1, 128, 160, 160, 48} },
- { V4L2_COLORFX_ANTIQUE, MMAL_PARAM_IMAGEFX_COLOURBALANCE,
- 0, 0, 0, 0, 3, {108, 274, 238, 0, 0} },
- { V4L2_COLORFX_SET_CBCR, MMAL_PARAM_IMAGEFX_NONE,
- 1, 1, 0, 0, 0, {0, 0, 0, 0, 0} }
-};
-
-struct v4l2_mmal_scene_config {
- enum v4l2_scene_mode v4l2_scene;
- enum mmal_parameter_exposuremode exposure_mode;
- enum mmal_parameter_exposuremeteringmode metering_mode;
-};
-
-static const struct v4l2_mmal_scene_config scene_configs[] = {
- /* V4L2_SCENE_MODE_NONE automatically added */
- {
- V4L2_SCENE_MODE_NIGHT,
- MMAL_PARAM_EXPOSUREMODE_NIGHT,
- MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
- },
- {
- V4L2_SCENE_MODE_SPORTS,
- MMAL_PARAM_EXPOSUREMODE_SPORTS,
- MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
- },
-};
-
-/* control handlers*/
-
-static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- struct mmal_parameter_rational rational_value;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- rational_value.num = ctrl->val;
- rational_value.den = 100;
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &rational_value,
- sizeof(rational_value));
-}
-
-static int ctrl_set_value(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- u32_value = ctrl->val;
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *control;
-
- if (ctrl->val > mmal_ctrl->max || ctrl->val < mmal_ctrl->min)
- return 1;
-
- if (ctrl->id == V4L2_CID_ISO_SENSITIVITY)
- dev->iso = iso_values[ctrl->val];
- else if (ctrl->id == V4L2_CID_ISO_SENSITIVITY_AUTO)
- dev->manual_iso_enabled =
- (ctrl->val == V4L2_ISO_SENSITIVITY_MANUAL ?
- true :
- false);
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- if (dev->manual_iso_enabled)
- u32_value = dev->iso;
- else
- u32_value = 0;
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_ISO,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- s32 s32_value;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- s32_value = (ctrl->val - 12) * 2; /* Convert from index to 1/6ths */
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &s32_value, sizeof(s32_value));
-}
-
-static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret;
- u32 u32_value;
- struct vchiq_mmal_component *camera;
-
- camera = dev->component[MMAL_COMPONENT_CAMERA];
-
- u32_value = ((ctrl->val % 360) / 90) * 90;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
- if (ret < 0)
- return ret;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
- if (ret < 0)
- return ret;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-
- return ret;
-}
-
-static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret;
- u32 u32_value;
- struct vchiq_mmal_component *camera;
-
- if (ctrl->id == V4L2_CID_HFLIP)
- dev->hflip = ctrl->val;
- else
- dev->vflip = ctrl->val;
-
- camera = dev->component[MMAL_COMPONENT_CAMERA];
-
- if (dev->hflip && dev->vflip)
- u32_value = MMAL_PARAM_MIRROR_BOTH;
- else if (dev->hflip)
- u32_value = MMAL_PARAM_MIRROR_HORIZONTAL;
- else if (dev->vflip)
- u32_value = MMAL_PARAM_MIRROR_VERTICAL;
- else
- u32_value = MMAL_PARAM_MIRROR_NONE;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
- if (ret < 0)
- return ret;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
- if (ret < 0)
- return ret;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-
- return ret;
-}
-
-static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- enum mmal_parameter_exposuremode exp_mode = dev->exposure_mode_user;
- u32 shutter_speed = 0;
- struct vchiq_mmal_port *control;
- int ret = 0;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- if (mmal_ctrl->mmal_id == MMAL_PARAMETER_SHUTTER_SPEED) {
- /* V4L2 is in 100usec increments.
- * MMAL is 1usec.
- */
- dev->manual_shutter_speed = ctrl->val * 100;
- } else if (mmal_ctrl->mmal_id == MMAL_PARAMETER_EXPOSURE_MODE) {
- switch (ctrl->val) {
- case V4L2_EXPOSURE_AUTO:
- exp_mode = MMAL_PARAM_EXPOSUREMODE_AUTO;
- break;
-
- case V4L2_EXPOSURE_MANUAL:
- exp_mode = MMAL_PARAM_EXPOSUREMODE_OFF;
- break;
- }
- dev->exposure_mode_user = exp_mode;
- dev->exposure_mode_v4l2_user = ctrl->val;
- } else if (mmal_ctrl->id == V4L2_CID_EXPOSURE_AUTO_PRIORITY) {
- dev->exp_auto_priority = ctrl->val;
- }
-
- if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
- if (exp_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
- shutter_speed = dev->manual_shutter_speed;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance,
- control,
- MMAL_PARAMETER_SHUTTER_SPEED,
- &shutter_speed,
- sizeof(shutter_speed));
- ret += vchiq_mmal_port_parameter_set(dev->instance,
- control,
- MMAL_PARAMETER_EXPOSURE_MODE,
- &exp_mode,
- sizeof(u32));
- dev->exposure_mode_active = exp_mode;
- }
- /* exposure_dynamic_framerate (V4L2_CID_EXPOSURE_AUTO_PRIORITY) should
- * always apply irrespective of scene mode.
- */
- ret += set_framerate_params(dev);
-
- return ret;
-}
-
-static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- switch (ctrl->val) {
- case V4L2_EXPOSURE_METERING_AVERAGE:
- dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE;
- break;
-
- case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
- dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT;
- break;
-
- case V4L2_EXPOSURE_METERING_SPOT:
- dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT;
- break;
-
- /* todo matrix weighting not added to Linux API till 3.9
- case V4L2_EXPOSURE_METERING_MATRIX:
- dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
- break;
- */
- }
-
- if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
- struct vchiq_mmal_port *control;
- u32 u32_value = dev->metering_mode;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
- } else
- return 0;
-}
-
-static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- switch (ctrl->val) {
- case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
- u32_value = MMAL_PARAM_FLICKERAVOID_OFF;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
- u32_value = MMAL_PARAM_FLICKERAVOID_50HZ;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
- u32_value = MMAL_PARAM_FLICKERAVOID_60HZ;
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
- u32_value = MMAL_PARAM_FLICKERAVOID_AUTO;
- break;
- }
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- switch (ctrl->val) {
- case V4L2_WHITE_BALANCE_MANUAL:
- u32_value = MMAL_PARAM_AWBMODE_OFF;
- break;
-
- case V4L2_WHITE_BALANCE_AUTO:
- u32_value = MMAL_PARAM_AWBMODE_AUTO;
- break;
-
- case V4L2_WHITE_BALANCE_INCANDESCENT:
- u32_value = MMAL_PARAM_AWBMODE_INCANDESCENT;
- break;
-
- case V4L2_WHITE_BALANCE_FLUORESCENT:
- u32_value = MMAL_PARAM_AWBMODE_FLUORESCENT;
- break;
-
- case V4L2_WHITE_BALANCE_FLUORESCENT_H:
- u32_value = MMAL_PARAM_AWBMODE_TUNGSTEN;
- break;
-
- case V4L2_WHITE_BALANCE_HORIZON:
- u32_value = MMAL_PARAM_AWBMODE_HORIZON;
- break;
-
- case V4L2_WHITE_BALANCE_DAYLIGHT:
- u32_value = MMAL_PARAM_AWBMODE_SUNLIGHT;
- break;
-
- case V4L2_WHITE_BALANCE_FLASH:
- u32_value = MMAL_PARAM_AWBMODE_FLASH;
- break;
-
- case V4L2_WHITE_BALANCE_CLOUDY:
- u32_value = MMAL_PARAM_AWBMODE_CLOUDY;
- break;
-
- case V4L2_WHITE_BALANCE_SHADE:
- u32_value = MMAL_PARAM_AWBMODE_SHADE;
- break;
- }
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- struct vchiq_mmal_port *control;
- struct mmal_parameter_awbgains gains;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- if (ctrl->id == V4L2_CID_RED_BALANCE)
- dev->red_gain = ctrl->val;
- else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
- dev->blue_gain = ctrl->val;
-
- gains.r_gain.num = dev->red_gain;
- gains.b_gain.num = dev->blue_gain;
- gains.r_gain.den = gains.b_gain.den = 1000;
-
- return vchiq_mmal_port_parameter_set(dev->instance, control,
- mmal_ctrl->mmal_id,
- &gains, sizeof(gains));
-}
-
-static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret = -EINVAL;
- int i, j;
- struct vchiq_mmal_port *control;
- struct mmal_parameter_imagefx_parameters imagefx;
-
- for (i = 0; i < ARRAY_SIZE(v4l2_to_mmal_effects_values); i++) {
- if (ctrl->val == v4l2_to_mmal_effects_values[i].v4l2_effect) {
- imagefx.effect =
- v4l2_to_mmal_effects_values[i].mmal_effect;
- imagefx.num_effect_params =
- v4l2_to_mmal_effects_values[i].num_effect_params;
-
- if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
- imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
-
- for (j = 0; j < imagefx.num_effect_params; j++)
- imagefx.effect_parameter[j] =
- v4l2_to_mmal_effects_values[i].effect_params[j];
-
- dev->colourfx.enable =
- v4l2_to_mmal_effects_values[i].col_fx_enable;
- if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
- dev->colourfx.u =
- v4l2_to_mmal_effects_values[i].u;
- dev->colourfx.v =
- v4l2_to_mmal_effects_values[i].v;
- }
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
- &imagefx, sizeof(imagefx));
- if (ret)
- goto exit;
-
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &dev->colourfx, sizeof(dev->colourfx));
- }
- }
-
-exit:
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "mmal_ctrl:%p ctrl id:0x%x ctrl val:%d imagefx:0x%x color_effect:%s u:%d v:%d ret %d(%d)\n",
- mmal_ctrl, ctrl->id, ctrl->val, imagefx.effect,
- dev->colourfx.enable ? "true" : "false",
- dev->colourfx.u, dev->colourfx.v,
- ret, (ret == 0 ? 0 : -EINVAL));
- return (ret == 0 ? 0 : EINVAL);
-}
-
-static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret = -EINVAL;
- struct vchiq_mmal_port *control;
-
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- dev->colourfx.enable = (ctrl->val & 0xff00) >> 8;
- dev->colourfx.enable = ctrl->val & 0xff;
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &dev->colourfx,
- sizeof(dev->colourfx));
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
- __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
- (ret == 0 ? 0 : -EINVAL));
- return (ret == 0 ? 0 : EINVAL);
-}
-
-static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret;
- struct vchiq_mmal_port *encoder_out;
-
- dev->capture.encode_bitrate = ctrl->val;
-
- encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
- mmal_ctrl->mmal_id,
- &ctrl->val, sizeof(ctrl->val));
- ret = 0;
- return ret;
-}
-
-static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 bitrate_mode;
- struct vchiq_mmal_port *encoder_out;
-
- encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
-
- dev->capture.encode_bitrate_mode = ctrl->val;
- switch (ctrl->val) {
- default:
- case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
- bitrate_mode = MMAL_VIDEO_RATECONTROL_VARIABLE;
- break;
- case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
- bitrate_mode = MMAL_VIDEO_RATECONTROL_CONSTANT;
- break;
- }
-
- vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
- mmal_ctrl->mmal_id,
- &bitrate_mode,
- sizeof(bitrate_mode));
- return 0;
-}
-
-static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *jpeg_out;
-
- jpeg_out = &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->output[0];
-
- u32_value = ctrl->val;
-
- return vchiq_mmal_port_parameter_set(dev->instance, jpeg_out,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- u32 u32_value;
- struct vchiq_mmal_port *vid_enc_ctl;
-
- vid_enc_ctl = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
-
- u32_value = ctrl->val;
-
- return vchiq_mmal_port_parameter_set(dev->instance, vid_enc_ctl,
- mmal_ctrl->mmal_id,
- &u32_value, sizeof(u32_value));
-}
-
-static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- struct mmal_parameter_video_profile param;
- int ret = 0;
-
- if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_PROFILE) {
- switch (ctrl->val) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- dev->capture.enc_profile = ctrl->val;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_LEVEL) {
- switch (ctrl->val) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- dev->capture.enc_level = ctrl->val;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- }
-
- if (!ret) {
- switch (dev->capture.enc_profile) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- param.profile = MMAL_VIDEO_PROFILE_H264_BASELINE;
- break;
- case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- param.profile =
- MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE;
- break;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- param.profile = MMAL_VIDEO_PROFILE_H264_MAIN;
- break;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- param.profile = MMAL_VIDEO_PROFILE_H264_HIGH;
- break;
- default:
- /* Should never get here */
- break;
- }
-
- switch (dev->capture.enc_level) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- param.level = MMAL_VIDEO_LEVEL_H264_1;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- param.level = MMAL_VIDEO_LEVEL_H264_1b;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- param.level = MMAL_VIDEO_LEVEL_H264_11;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- param.level = MMAL_VIDEO_LEVEL_H264_12;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- param.level = MMAL_VIDEO_LEVEL_H264_13;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- param.level = MMAL_VIDEO_LEVEL_H264_2;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- param.level = MMAL_VIDEO_LEVEL_H264_21;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- param.level = MMAL_VIDEO_LEVEL_H264_22;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- param.level = MMAL_VIDEO_LEVEL_H264_3;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- param.level = MMAL_VIDEO_LEVEL_H264_31;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- param.level = MMAL_VIDEO_LEVEL_H264_32;
- break;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- param.level = MMAL_VIDEO_LEVEL_H264_4;
- break;
- default:
- /* Should never get here */
- break;
- }
-
- ret = vchiq_mmal_port_parameter_set(dev->instance,
- &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0],
- mmal_ctrl->mmal_id,
- &param, sizeof(param));
- }
- return ret;
-}
-
-static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
-{
- int ret = 0;
- int shutter_speed;
- struct vchiq_mmal_port *control;
-
- v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "scene mode selected %d, was %d\n", ctrl->val,
- dev->scene_mode);
- control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
-
- if (ctrl->val == dev->scene_mode)
- return 0;
-
- if (ctrl->val == V4L2_SCENE_MODE_NONE) {
- /* Restore all user selections */
- dev->scene_mode = V4L2_SCENE_MODE_NONE;
-
- if (dev->exposure_mode_user == MMAL_PARAM_EXPOSUREMODE_OFF)
- shutter_speed = dev->manual_shutter_speed;
- else
- shutter_speed = 0;
-
- v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
- __func__, shutter_speed, dev->exposure_mode_user,
- dev->metering_mode);
- ret = vchiq_mmal_port_parameter_set(dev->instance,
- control,
- MMAL_PARAMETER_SHUTTER_SPEED,
- &shutter_speed,
- sizeof(shutter_speed));
- ret += vchiq_mmal_port_parameter_set(dev->instance,
- control,
- MMAL_PARAMETER_EXPOSURE_MODE,
- &dev->exposure_mode_user,
- sizeof(u32));
- dev->exposure_mode_active = dev->exposure_mode_user;
- ret += vchiq_mmal_port_parameter_set(dev->instance,
- control,
- MMAL_PARAMETER_EXP_METERING_MODE,
- &dev->metering_mode,
- sizeof(u32));
- ret += set_framerate_params(dev);
- } else {
- /* Set up scene mode */
- int i;
- const struct v4l2_mmal_scene_config *scene = NULL;
- int shutter_speed;
- enum mmal_parameter_exposuremode exposure_mode;
- enum mmal_parameter_exposuremeteringmode metering_mode;
-
- for (i = 0; i < ARRAY_SIZE(scene_configs); i++) {
- if (scene_configs[i].v4l2_scene ==
- ctrl->val) {
- scene = &scene_configs[i];
- break;
- }
- }
- if (!scene)
- return -EINVAL;
- if (i >= ARRAY_SIZE(scene_configs))
- return -EINVAL;
-
- /* Set all the values */
- dev->scene_mode = ctrl->val;
-
- if (scene->exposure_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
- shutter_speed = dev->manual_shutter_speed;
- else
- shutter_speed = 0;
- exposure_mode = scene->exposure_mode;
- metering_mode = scene->metering_mode;
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
- __func__, shutter_speed, exposure_mode, metering_mode);
-
- ret = vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_SHUTTER_SPEED,
- &shutter_speed,
- sizeof(shutter_speed));
- ret += vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_EXPOSURE_MODE,
- &exposure_mode,
- sizeof(u32));
- dev->exposure_mode_active = exposure_mode;
- ret += vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_EXPOSURE_MODE,
- &exposure_mode,
- sizeof(u32));
- ret += vchiq_mmal_port_parameter_set(dev->instance, control,
- MMAL_PARAMETER_EXP_METERING_MODE,
- &metering_mode,
- sizeof(u32));
- ret += set_framerate_params(dev);
- }
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "%s: Setting scene to %d, ret=%d\n",
- __func__, ctrl->val, ret);
- ret = -EINVAL;
- }
- return 0;
-}
-
-static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct bm2835_mmal_dev *dev =
- container_of(ctrl->handler, struct bm2835_mmal_dev,
- ctrl_handler);
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
- int ret;
-
- if ((mmal_ctrl == NULL) ||
- (mmal_ctrl->id != ctrl->id) ||
- (mmal_ctrl->setter == NULL)) {
- pr_warn("mmal_ctrl:%p ctrl id:%d\n", mmal_ctrl, ctrl->id);
- return -EINVAL;
- }
-
- ret = mmal_ctrl->setter(dev, ctrl, mmal_ctrl);
- if (ret)
- pr_warn("ctrl id:%d/MMAL param %08X- returned ret %d\n",
- ctrl->id, mmal_ctrl->mmal_id, ret);
- if (mmal_ctrl->ignore_errors)
- ret = 0;
- return ret;
-}
-
-static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = {
- .s_ctrl = bm2835_mmal_s_ctrl,
-};
-
-static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
- {
- V4L2_CID_SATURATION, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_SATURATION,
- &ctrl_set_rational,
- false
- },
- {
- V4L2_CID_SHARPNESS, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_SHARPNESS,
- &ctrl_set_rational,
- false
- },
- {
- V4L2_CID_CONTRAST, MMAL_CONTROL_TYPE_STD,
- -100, 100, 0, 1, NULL,
- MMAL_PARAMETER_CONTRAST,
- &ctrl_set_rational,
- false
- },
- {
- V4L2_CID_BRIGHTNESS, MMAL_CONTROL_TYPE_STD,
- 0, 100, 50, 1, NULL,
- MMAL_PARAMETER_BRIGHTNESS,
- &ctrl_set_rational,
- false
- },
- {
- V4L2_CID_ISO_SENSITIVITY, MMAL_CONTROL_TYPE_INT_MENU,
- 0, ARRAY_SIZE(iso_qmenu) - 1, 0, 1, iso_qmenu,
- MMAL_PARAMETER_ISO,
- &ctrl_set_iso,
- false
- },
- {
- V4L2_CID_ISO_SENSITIVITY_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
- 0, 1, V4L2_ISO_SENSITIVITY_AUTO, 1, NULL,
- MMAL_PARAMETER_ISO,
- &ctrl_set_iso,
- false
- },
- {
- V4L2_CID_IMAGE_STABILIZATION, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_VIDEO_STABILISATION,
- &ctrl_set_value,
- false
- },
-/* {
- 0, MMAL_CONTROL_TYPE_CLUSTER, 3, 1, 0, NULL, 0, NULL
- }, */
- {
- V4L2_CID_EXPOSURE_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
- ~0x03, 3, V4L2_EXPOSURE_AUTO, 0, NULL,
- MMAL_PARAMETER_EXPOSURE_MODE,
- &ctrl_set_exposure,
- false
- },
-/* todo this needs mixing in with set exposure
- {
- V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
- },
- */
- {
- V4L2_CID_EXPOSURE_ABSOLUTE, MMAL_CONTROL_TYPE_STD,
- /* Units of 100usecs */
- 1, 1 * 1000 * 10, 100 * 10, 1, NULL,
- MMAL_PARAMETER_SHUTTER_SPEED,
- &ctrl_set_exposure,
- false
- },
- {
- V4L2_CID_AUTO_EXPOSURE_BIAS, MMAL_CONTROL_TYPE_INT_MENU,
- 0, ARRAY_SIZE(ev_bias_qmenu) - 1,
- (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, 0, ev_bias_qmenu,
- MMAL_PARAMETER_EXPOSURE_COMP,
- &ctrl_set_value_ev,
- false
- },
- {
- V4L2_CID_EXPOSURE_AUTO_PRIORITY, MMAL_CONTROL_TYPE_STD,
- 0, 1,
- 0, 1, NULL,
- 0, /* Dummy MMAL ID as it gets mapped into FPS range*/
- &ctrl_set_exposure,
- false
- },
- {
- V4L2_CID_EXPOSURE_METERING,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~0x7, 2, V4L2_EXPOSURE_METERING_AVERAGE, 0, NULL,
- MMAL_PARAMETER_EXP_METERING_MODE,
- &ctrl_set_metering_mode,
- false
- },
- {
- V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~0x3ff, 9, V4L2_WHITE_BALANCE_AUTO, 0, NULL,
- MMAL_PARAMETER_AWB_MODE,
- &ctrl_set_awb_mode,
- false
- },
- {
- V4L2_CID_RED_BALANCE, MMAL_CONTROL_TYPE_STD,
- 1, 7999, 1000, 1, NULL,
- MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- &ctrl_set_awb_gains,
- false
- },
- {
- V4L2_CID_BLUE_BALANCE, MMAL_CONTROL_TYPE_STD,
- 1, 7999, 1000, 1, NULL,
- MMAL_PARAMETER_CUSTOM_AWB_GAINS,
- &ctrl_set_awb_gains,
- false
- },
- {
- V4L2_CID_COLORFX, MMAL_CONTROL_TYPE_STD_MENU,
- 0, 15, V4L2_COLORFX_NONE, 0, NULL,
- MMAL_PARAMETER_IMAGE_EFFECT,
- &ctrl_set_image_effect,
- false
- },
- {
- V4L2_CID_COLORFX_CBCR, MMAL_CONTROL_TYPE_STD,
- 0, 0xffff, 0x8080, 1, NULL,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &ctrl_set_colfx,
- false
- },
- {
- V4L2_CID_ROTATE, MMAL_CONTROL_TYPE_STD,
- 0, 360, 0, 90, NULL,
- MMAL_PARAMETER_ROTATION,
- &ctrl_set_rotate,
- false
- },
- {
- V4L2_CID_HFLIP, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_MIRROR,
- &ctrl_set_flip,
- false
- },
- {
- V4L2_CID_VFLIP, MMAL_CONTROL_TYPE_STD,
- 0, 1, 0, 1, NULL,
- MMAL_PARAMETER_MIRROR,
- &ctrl_set_flip,
- false
- },
- {
- V4L2_CID_MPEG_VIDEO_BITRATE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
- 0, ARRAY_SIZE(bitrate_mode_qmenu) - 1,
- 0, 0, bitrate_mode_qmenu,
- MMAL_PARAMETER_RATECONTROL,
- &ctrl_set_bitrate_mode,
- false
- },
- {
- V4L2_CID_MPEG_VIDEO_BITRATE, MMAL_CONTROL_TYPE_STD,
- 25 * 1000, 25 * 1000 * 1000, 10 * 1000 * 1000, 25 * 1000, NULL,
- MMAL_PARAMETER_VIDEO_BIT_RATE,
- &ctrl_set_bitrate,
- false
- },
- {
- V4L2_CID_JPEG_COMPRESSION_QUALITY, MMAL_CONTROL_TYPE_STD,
- 1, 100,
- 30, 1, NULL,
- MMAL_PARAMETER_JPEG_Q_FACTOR,
- &ctrl_set_image_encode_output,
- false
- },
- {
- V4L2_CID_POWER_LINE_FREQUENCY, MMAL_CONTROL_TYPE_STD_MENU,
- 0, ARRAY_SIZE(mains_freq_qmenu) - 1,
- 1, 1, NULL,
- MMAL_PARAMETER_FLICKER_AVOID,
- &ctrl_set_flicker_avoidance,
- false
- },
- {
- V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER, MMAL_CONTROL_TYPE_STD,
- 0, 1,
- 0, 1, NULL,
- MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
- &ctrl_set_video_encode_param_output,
- true /* Errors ignored as requires latest firmware to work */
- },
- {
- V4L2_CID_MPEG_VIDEO_H264_PROFILE,
- MMAL_CONTROL_TYPE_STD_MENU,
- ~((1<<V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
- (1<<V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
- (1<<V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
- (1<<V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
- V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- &ctrl_set_video_encode_profile_level,
- false
- },
- {
- V4L2_CID_MPEG_VIDEO_H264_LEVEL, MMAL_CONTROL_TYPE_STD_MENU,
- ~((1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
- (1<<V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
- V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- &ctrl_set_video_encode_profile_level,
- false
- },
- {
- V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
- -1, /* Min is computed at runtime */
- V4L2_SCENE_MODE_TEXT,
- V4L2_SCENE_MODE_NONE, 1, NULL,
- MMAL_PARAMETER_PROFILE,
- &ctrl_set_scene_mode,
- false
- },
- {
- V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, MMAL_CONTROL_TYPE_STD,
- 0, 0x7FFFFFFF, 60, 1, NULL,
- MMAL_PARAMETER_INTRAPERIOD,
- &ctrl_set_video_encode_param_output,
- false
- },
-};
-
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
-{
- int c;
- int ret = 0;
-
- for (c = 0; c < V4L2_CTRL_COUNT; c++) {
- if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) {
- ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c],
- &v4l2_ctrls[c]);
- if (!v4l2_ctrls[c].ignore_errors && ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Failed when setting default values for ctrl %d\n",
- c);
- break;
- }
- }
- }
- return ret;
-}
-
-int set_framerate_params(struct bm2835_mmal_dev *dev)
-{
- struct mmal_parameter_fps_range fps_range;
- int ret;
-
- if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
- (dev->exp_auto_priority)) {
- /* Variable FPS. Define min FPS as 1fps.
- * Max as max defined FPS.
- */
- fps_range.fps_low.num = 1;
- fps_range.fps_low.den = 1;
- fps_range.fps_high.num = dev->capture.timeperframe.denominator;
- fps_range.fps_high.den = dev->capture.timeperframe.numerator;
- } else {
- /* Fixed FPS - set min and max to be the same */
- fps_range.fps_low.num = fps_range.fps_high.num =
- dev->capture.timeperframe.denominator;
- fps_range.fps_low.den = fps_range.fps_high.den =
- dev->capture.timeperframe.numerator;
- }
-
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Set fps range to %d/%d to %d/%d\n",
- fps_range.fps_low.num,
- fps_range.fps_low.den,
- fps_range.fps_high.num,
- fps_range.fps_high.den);
-
- ret = vchiq_mmal_port_parameter_set(dev->instance,
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_PREVIEW],
- MMAL_PARAMETER_FPS_RANGE,
- &fps_range, sizeof(fps_range));
- ret += vchiq_mmal_port_parameter_set(dev->instance,
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_VIDEO],
- MMAL_PARAMETER_FPS_RANGE,
- &fps_range, sizeof(fps_range));
- ret += vchiq_mmal_port_parameter_set(dev->instance,
- &dev->component[MMAL_COMPONENT_CAMERA]->
- output[MMAL_CAMERA_PORT_CAPTURE],
- MMAL_PARAMETER_FPS_RANGE,
- &fps_range, sizeof(fps_range));
- if (ret)
- v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "Failed to set fps ret %d\n", ret);
-
- return ret;
-}
-
-int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl)
-{
- int c;
- const struct bm2835_mmal_v4l2_ctrl *ctrl;
-
- v4l2_ctrl_handler_init(hdl, V4L2_CTRL_COUNT);
-
- for (c = 0; c < V4L2_CTRL_COUNT; c++) {
- ctrl = &v4l2_ctrls[c];
-
- switch (ctrl->type) {
- case MMAL_CONTROL_TYPE_STD:
- dev->ctrls[c] = v4l2_ctrl_new_std(hdl,
- &bm2835_mmal_ctrl_ops, ctrl->id,
- ctrl->min, ctrl->max, ctrl->step, ctrl->def);
- break;
-
- case MMAL_CONTROL_TYPE_STD_MENU:
- {
- int mask = ctrl->min;
-
- if (ctrl->id == V4L2_CID_SCENE_MODE) {
- /* Special handling to work out the mask
- * value based on the scene_configs array
- * at runtime. Reduces the chance of
- * mismatches.
- */
- int i;
- mask = 1<<V4L2_SCENE_MODE_NONE;
- for (i = 0;
- i < ARRAY_SIZE(scene_configs);
- i++) {
- mask |= 1<<scene_configs[i].v4l2_scene;
- }
- mask = ~mask;
- }
-
- dev->ctrls[c] = v4l2_ctrl_new_std_menu(hdl,
- &bm2835_mmal_ctrl_ops, ctrl->id,
- ctrl->max, mask, ctrl->def);
- break;
- }
-
- case MMAL_CONTROL_TYPE_INT_MENU:
- dev->ctrls[c] = v4l2_ctrl_new_int_menu(hdl,
- &bm2835_mmal_ctrl_ops, ctrl->id,
- ctrl->max, ctrl->def, ctrl->imenu);
- break;
-
- case MMAL_CONTROL_TYPE_CLUSTER:
- /* skip this entry when constructing controls */
- continue;
- }
-
- if (hdl->error)
- break;
-
- dev->ctrls[c]->priv = (void *)ctrl;
- }
-
- if (hdl->error) {
- pr_err("error adding control %d/%d id 0x%x\n", c,
- V4L2_CTRL_COUNT, ctrl->id);
- return hdl->error;
- }
-
- for (c = 0; c < V4L2_CTRL_COUNT; c++) {
- ctrl = &v4l2_ctrls[c];
-
- switch (ctrl->type) {
- case MMAL_CONTROL_TYPE_CLUSTER:
- v4l2_ctrl_auto_cluster(ctrl->min,
- &dev->ctrls[c + 1],
- ctrl->max,
- ctrl->def);
- break;
-
- case MMAL_CONTROL_TYPE_STD:
- case MMAL_CONTROL_TYPE_STD_MENU:
- case MMAL_CONTROL_TYPE_INT_MENU:
- break;
- }
- }
-
- return 0;
-}
diff --git a/drivers/staging/media/platform/bcm2835/mmal-encodings.h b/drivers/staging/media/platform/bcm2835/mmal-encodings.h
deleted file mode 100644
index 024d620dc1df..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-encodings.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-#ifndef MMAL_ENCODINGS_H
-#define MMAL_ENCODINGS_H
-
-#define MMAL_ENCODING_H264 MMAL_FOURCC('H', '2', '6', '4')
-#define MMAL_ENCODING_H263 MMAL_FOURCC('H', '2', '6', '3')
-#define MMAL_ENCODING_MP4V MMAL_FOURCC('M', 'P', '4', 'V')
-#define MMAL_ENCODING_MP2V MMAL_FOURCC('M', 'P', '2', 'V')
-#define MMAL_ENCODING_MP1V MMAL_FOURCC('M', 'P', '1', 'V')
-#define MMAL_ENCODING_WMV3 MMAL_FOURCC('W', 'M', 'V', '3')
-#define MMAL_ENCODING_WMV2 MMAL_FOURCC('W', 'M', 'V', '2')
-#define MMAL_ENCODING_WMV1 MMAL_FOURCC('W', 'M', 'V', '1')
-#define MMAL_ENCODING_WVC1 MMAL_FOURCC('W', 'V', 'C', '1')
-#define MMAL_ENCODING_VP8 MMAL_FOURCC('V', 'P', '8', ' ')
-#define MMAL_ENCODING_VP7 MMAL_FOURCC('V', 'P', '7', ' ')
-#define MMAL_ENCODING_VP6 MMAL_FOURCC('V', 'P', '6', ' ')
-#define MMAL_ENCODING_THEORA MMAL_FOURCC('T', 'H', 'E', 'O')
-#define MMAL_ENCODING_SPARK MMAL_FOURCC('S', 'P', 'R', 'K')
-#define MMAL_ENCODING_MJPEG MMAL_FOURCC('M', 'J', 'P', 'G')
-
-#define MMAL_ENCODING_JPEG MMAL_FOURCC('J', 'P', 'E', 'G')
-#define MMAL_ENCODING_GIF MMAL_FOURCC('G', 'I', 'F', ' ')
-#define MMAL_ENCODING_PNG MMAL_FOURCC('P', 'N', 'G', ' ')
-#define MMAL_ENCODING_PPM MMAL_FOURCC('P', 'P', 'M', ' ')
-#define MMAL_ENCODING_TGA MMAL_FOURCC('T', 'G', 'A', ' ')
-#define MMAL_ENCODING_BMP MMAL_FOURCC('B', 'M', 'P', ' ')
-
-#define MMAL_ENCODING_I420 MMAL_FOURCC('I', '4', '2', '0')
-#define MMAL_ENCODING_I420_SLICE MMAL_FOURCC('S', '4', '2', '0')
-#define MMAL_ENCODING_YV12 MMAL_FOURCC('Y', 'V', '1', '2')
-#define MMAL_ENCODING_I422 MMAL_FOURCC('I', '4', '2', '2')
-#define MMAL_ENCODING_I422_SLICE MMAL_FOURCC('S', '4', '2', '2')
-#define MMAL_ENCODING_YUYV MMAL_FOURCC('Y', 'U', 'Y', 'V')
-#define MMAL_ENCODING_YVYU MMAL_FOURCC('Y', 'V', 'Y', 'U')
-#define MMAL_ENCODING_UYVY MMAL_FOURCC('U', 'Y', 'V', 'Y')
-#define MMAL_ENCODING_VYUY MMAL_FOURCC('V', 'Y', 'U', 'Y')
-#define MMAL_ENCODING_NV12 MMAL_FOURCC('N', 'V', '1', '2')
-#define MMAL_ENCODING_NV21 MMAL_FOURCC('N', 'V', '2', '1')
-#define MMAL_ENCODING_ARGB MMAL_FOURCC('A', 'R', 'G', 'B')
-#define MMAL_ENCODING_RGBA MMAL_FOURCC('R', 'G', 'B', 'A')
-#define MMAL_ENCODING_ABGR MMAL_FOURCC('A', 'B', 'G', 'R')
-#define MMAL_ENCODING_BGRA MMAL_FOURCC('B', 'G', 'R', 'A')
-#define MMAL_ENCODING_RGB16 MMAL_FOURCC('R', 'G', 'B', '2')
-#define MMAL_ENCODING_RGB24 MMAL_FOURCC('R', 'G', 'B', '3')
-#define MMAL_ENCODING_RGB32 MMAL_FOURCC('R', 'G', 'B', '4')
-#define MMAL_ENCODING_BGR16 MMAL_FOURCC('B', 'G', 'R', '2')
-#define MMAL_ENCODING_BGR24 MMAL_FOURCC('B', 'G', 'R', '3')
-#define MMAL_ENCODING_BGR32 MMAL_FOURCC('B', 'G', 'R', '4')
-
-/** SAND Video (YUVUV128) format, native format understood by VideoCore.
- * This format is *not* opaque - if requested you will receive full frames
- * of YUV_UV video.
- */
-#define MMAL_ENCODING_YUVUV128 MMAL_FOURCC('S', 'A', 'N', 'D')
-
-/** VideoCore opaque image format, image handles are returned to
- * the host but not the actual image data.
- */
-#define MMAL_ENCODING_OPAQUE MMAL_FOURCC('O', 'P', 'Q', 'V')
-
-/** An EGL image handle
- */
-#define MMAL_ENCODING_EGL_IMAGE MMAL_FOURCC('E', 'G', 'L', 'I')
-
-/* }@ */
-
-/** \name Pre-defined audio encodings */
-/* @{ */
-#define MMAL_ENCODING_PCM_UNSIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'U')
-#define MMAL_ENCODING_PCM_UNSIGNED_LE MMAL_FOURCC('p', 'c', 'm', 'u')
-#define MMAL_ENCODING_PCM_SIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'S')
-#define MMAL_ENCODING_PCM_SIGNED_LE MMAL_FOURCC('p', 'c', 'm', 's')
-#define MMAL_ENCODING_PCM_FLOAT_BE MMAL_FOURCC('P', 'C', 'M', 'F')
-#define MMAL_ENCODING_PCM_FLOAT_LE MMAL_FOURCC('p', 'c', 'm', 'f')
-
-/* Pre-defined H264 encoding variants */
-
-/** ISO 14496-10 Annex B byte stream format */
-#define MMAL_ENCODING_VARIANT_H264_DEFAULT 0
-/** ISO 14496-15 AVC stream format */
-#define MMAL_ENCODING_VARIANT_H264_AVC1 MMAL_FOURCC('A', 'V', 'C', '1')
-/** Implicitly delineated NAL units without emulation prevention */
-#define MMAL_ENCODING_VARIANT_H264_RAW MMAL_FOURCC('R', 'A', 'W', ' ')
-
-
-/** \defgroup MmalColorSpace List of pre-defined video color spaces
- * This defines a list of common color spaces. This list isn't exhaustive and
- * is only provided as a convenience to avoid clients having to use FourCC
- * codes directly. However components are allowed to define and use their own
- * FourCC codes.
- */
-/* @{ */
-
-/** Unknown color space */
-#define MMAL_COLOR_SPACE_UNKNOWN 0
-/** ITU-R BT.601-5 [SDTV] */
-#define MMAL_COLOR_SPACE_ITUR_BT601 MMAL_FOURCC('Y', '6', '0', '1')
-/** ITU-R BT.709-3 [HDTV] */
-#define MMAL_COLOR_SPACE_ITUR_BT709 MMAL_FOURCC('Y', '7', '0', '9')
-/** JPEG JFIF */
-#define MMAL_COLOR_SPACE_JPEG_JFIF MMAL_FOURCC('Y', 'J', 'F', 'I')
-/** Title 47 Code of Federal Regulations (2003) 73.682 (a) (20) */
-#define MMAL_COLOR_SPACE_FCC MMAL_FOURCC('Y', 'F', 'C', 'C')
-/** Society of Motion Picture and Television Engineers 240M (1999) */
-#define MMAL_COLOR_SPACE_SMPTE240M MMAL_FOURCC('Y', '2', '4', '0')
-/** ITU-R BT.470-2 System M */
-#define MMAL_COLOR_SPACE_BT470_2_M MMAL_FOURCC('Y', '_', '_', 'M')
-/** ITU-R BT.470-2 System BG */
-#define MMAL_COLOR_SPACE_BT470_2_BG MMAL_FOURCC('Y', '_', 'B', 'G')
-/** JPEG JFIF, but with 16..255 luma */
-#define MMAL_COLOR_SPACE_JFIF_Y16_255 MMAL_FOURCC('Y', 'Y', '1', '6')
-/* @} MmalColorSpace List */
-
-#endif /* MMAL_ENCODINGS_H */
diff --git a/drivers/staging/media/platform/bcm2835/mmal-msg-format.h b/drivers/staging/media/platform/bcm2835/mmal-msg-format.h
deleted file mode 100644
index 123d86ef582b..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-msg-format.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-#ifndef MMAL_MSG_FORMAT_H
-#define MMAL_MSG_FORMAT_H
-
-#include "mmal-msg-common.h"
-
-/* MMAL_ES_FORMAT_T */
-
-
-struct mmal_audio_format {
- u32 channels; /**< Number of audio channels */
- u32 sample_rate; /**< Sample rate */
-
- u32 bits_per_sample; /**< Bits per sample */
- u32 block_align; /**< Size of a block of data */
-};
-
-struct mmal_video_format {
- u32 width; /**< Width of frame in pixels */
- u32 height; /**< Height of frame in rows of pixels */
- struct mmal_rect crop; /**< Visible region of the frame */
- struct mmal_rational frame_rate; /**< Frame rate */
- struct mmal_rational par; /**< Pixel aspect ratio */
-
- /* FourCC specifying the color space of the video stream. See the
- * \ref MmalColorSpace "pre-defined color spaces" for some examples.
- */
- u32 color_space;
-};
-
-struct mmal_subpicture_format {
- u32 x_offset;
- u32 y_offset;
-};
-
-union mmal_es_specific_format {
- struct mmal_audio_format audio;
- struct mmal_video_format video;
- struct mmal_subpicture_format subpicture;
-};
-
-/** Definition of an elementary stream format (MMAL_ES_FORMAT_T) */
-struct mmal_es_format {
- u32 type; /* enum mmal_es_type */
-
- u32 encoding; /* FourCC specifying encoding of the elementary stream.*/
- u32 encoding_variant; /* FourCC specifying the specific
- * encoding variant of the elementary
- * stream.
- */
-
- union mmal_es_specific_format *es; /* TODO: pointers in
- * message serialisation?!?
- */
- /* Type specific
- * information for the
- * elementary stream
- */
-
- u32 bitrate; /**< Bitrate in bits per second */
- u32 flags; /**< Flags describing properties of the elementary stream. */
-
- u32 extradata_size; /**< Size of the codec specific data */
- u8 *extradata; /**< Codec specific data */
-};
-
-#endif /* MMAL_MSG_FORMAT_H */
diff --git a/drivers/staging/media/platform/bcm2835/mmal-msg-port.h b/drivers/staging/media/platform/bcm2835/mmal-msg-port.h
deleted file mode 100644
index a55c1ea2eceb..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-msg-port.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-/* MMAL_PORT_TYPE_T */
-enum mmal_port_type {
- MMAL_PORT_TYPE_UNKNOWN = 0, /**< Unknown port type */
- MMAL_PORT_TYPE_CONTROL, /**< Control port */
- MMAL_PORT_TYPE_INPUT, /**< Input port */
- MMAL_PORT_TYPE_OUTPUT, /**< Output port */
- MMAL_PORT_TYPE_CLOCK, /**< Clock port */
-};
-
-/** The port is pass-through and doesn't need buffer headers allocated */
-#define MMAL_PORT_CAPABILITY_PASSTHROUGH 0x01
-/** The port wants to allocate the buffer payloads.
- * This signals a preference that payload allocation should be done
- * on this port for efficiency reasons. */
-#define MMAL_PORT_CAPABILITY_ALLOCATION 0x02
-/** The port supports format change events.
- * This applies to input ports and is used to let the client know
- * whether the port supports being reconfigured via a format
- * change event (i.e. without having to disable the port). */
-#define MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE 0x04
-
-/* mmal port structure (MMAL_PORT_T)
- *
- * most elements are informational only, the pointer values for
- * interogation messages are generally provided as additional
- * strucures within the message. When used to set values only teh
- * buffer_num, buffer_size and userdata parameters are writable.
- */
-struct mmal_port {
- void *priv; /* Private member used by the framework */
- const char *name; /* Port name. Used for debugging purposes (RO) */
-
- u32 type; /* Type of the port (RO) enum mmal_port_type */
- u16 index; /* Index of the port in its type list (RO) */
- u16 index_all; /* Index of the port in the list of all ports (RO) */
-
- u32 is_enabled; /* Indicates whether the port is enabled or not (RO) */
- struct mmal_es_format *format; /* Format of the elementary stream */
-
- u32 buffer_num_min; /* Minimum number of buffers the port
- * requires (RO). This is set by the
- * component.
- */
-
- u32 buffer_size_min; /* Minimum size of buffers the port
- * requires (RO). This is set by the
- * component.
- */
-
- u32 buffer_alignment_min; /* Minimum alignment requirement for
- * the buffers (RO). A value of
- * zero means no special alignment
- * requirements. This is set by the
- * component.
- */
-
- u32 buffer_num_recommended; /* Number of buffers the port
- * recommends for optimal
- * performance (RO). A value of
- * zero means no special
- * recommendation. This is set
- * by the component.
- */
-
- u32 buffer_size_recommended; /* Size of buffers the port
- * recommends for optimal
- * performance (RO). A value of
- * zero means no special
- * recommendation. This is set
- * by the component.
- */
-
- u32 buffer_num; /* Actual number of buffers the port will use.
- * This is set by the client.
- */
-
- u32 buffer_size; /* Actual maximum size of the buffers that
- * will be sent to the port. This is set by
- * the client.
- */
-
- void *component; /* Component this port belongs to (Read Only) */
-
- void *userdata; /* Field reserved for use by the client */
-
- u32 capabilities; /* Flags describing the capabilities of a
- * port (RO). Bitwise combination of \ref
- * portcapabilities "Port capabilities"
- * values.
- */
-
-};
diff --git a/drivers/staging/media/platform/bcm2835/mmal-msg.h b/drivers/staging/media/platform/bcm2835/mmal-msg.h
deleted file mode 100644
index 67b1076015a5..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-msg.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-/* all the data structures which serialise the MMAL protocol. note
- * these are directly mapped onto the recived message data.
- *
- * BEWARE: They seem to *assume* pointers are u32 and that there is no
- * structure padding!
- *
- * NOTE: this implementation uses kernel types to ensure sizes. Rather
- * than assigning values to enums to force their size the
- * implementation uses fixed size types and not the enums (though the
- * comments have the actual enum type
- */
-
-#define VC_MMAL_VER 15
-#define VC_MMAL_MIN_VER 10
-#define VC_MMAL_SERVER_NAME MAKE_FOURCC("mmal")
-
-/* max total message size is 512 bytes */
-#define MMAL_MSG_MAX_SIZE 512
-/* with six 32bit header elements max payload is therefore 488 bytes */
-#define MMAL_MSG_MAX_PAYLOAD 488
-
-#include "mmal-msg-common.h"
-#include "mmal-msg-format.h"
-#include "mmal-msg-port.h"
-
-enum mmal_msg_type {
- MMAL_MSG_TYPE_QUIT = 1,
- MMAL_MSG_TYPE_SERVICE_CLOSED,
- MMAL_MSG_TYPE_GET_VERSION,
- MMAL_MSG_TYPE_COMPONENT_CREATE,
- MMAL_MSG_TYPE_COMPONENT_DESTROY, /* 5 */
- MMAL_MSG_TYPE_COMPONENT_ENABLE,
- MMAL_MSG_TYPE_COMPONENT_DISABLE,
- MMAL_MSG_TYPE_PORT_INFO_GET,
- MMAL_MSG_TYPE_PORT_INFO_SET,
- MMAL_MSG_TYPE_PORT_ACTION, /* 10 */
- MMAL_MSG_TYPE_BUFFER_FROM_HOST,
- MMAL_MSG_TYPE_BUFFER_TO_HOST,
- MMAL_MSG_TYPE_GET_STATS,
- MMAL_MSG_TYPE_PORT_PARAMETER_SET,
- MMAL_MSG_TYPE_PORT_PARAMETER_GET, /* 15 */
- MMAL_MSG_TYPE_EVENT_TO_HOST,
- MMAL_MSG_TYPE_GET_CORE_STATS_FOR_PORT,
- MMAL_MSG_TYPE_OPAQUE_ALLOCATOR,
- MMAL_MSG_TYPE_CONSUME_MEM,
- MMAL_MSG_TYPE_LMK, /* 20 */
- MMAL_MSG_TYPE_OPAQUE_ALLOCATOR_DESC,
- MMAL_MSG_TYPE_DRM_GET_LHS32,
- MMAL_MSG_TYPE_DRM_GET_TIME,
- MMAL_MSG_TYPE_BUFFER_FROM_HOST_ZEROLEN,
- MMAL_MSG_TYPE_PORT_FLUSH, /* 25 */
- MMAL_MSG_TYPE_HOST_LOG,
- MMAL_MSG_TYPE_MSG_LAST
-};
-
-/* port action request messages differ depending on the action type */
-enum mmal_msg_port_action_type {
- MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unkown action */
- MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */
- MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */
- MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */
- MMAL_MSG_PORT_ACTION_TYPE_CONNECT, /* Connect ports */
- MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT, /* Disconnect ports */
- MMAL_MSG_PORT_ACTION_TYPE_SET_REQUIREMENTS, /* Set buffer requirements*/
-};
-
-struct mmal_msg_header {
- u32 magic;
- u32 type; /** enum mmal_msg_type */
-
- /* Opaque handle to the control service */
- struct mmal_control_service *control_service;
-
- struct mmal_msg_context *context; /** a u32 per message context */
- u32 status; /** The status of the vchiq operation */
- u32 padding;
-};
-
-/* Send from VC to host to report version */
-struct mmal_msg_version {
- u32 flags;
- u32 major;
- u32 minor;
- u32 minimum;
-};
-
-/* request to VC to create component */
-struct mmal_msg_component_create {
- void *client_component; /* component context */
- char name[128];
- u32 pid; /* For debug */
-};
-
-/* reply from VC to component creation request */
-struct mmal_msg_component_create_reply {
- u32 status; /** enum mmal_msg_status - how does this differ to
- * the one in the header?
- */
- u32 component_handle; /* VideoCore handle for component */
- u32 input_num; /* Number of input ports */
- u32 output_num; /* Number of output ports */
- u32 clock_num; /* Number of clock ports */
-};
-
-/* request to VC to destroy a component */
-struct mmal_msg_component_destroy {
- u32 component_handle;
-};
-
-struct mmal_msg_component_destroy_reply {
- u32 status; /** The component destruction status */
-};
-
-
-/* request and reply to VC to enable a component */
-struct mmal_msg_component_enable {
- u32 component_handle;
-};
-
-struct mmal_msg_component_enable_reply {
- u32 status; /** The component enable status */
-};
-
-
-/* request and reply to VC to disable a component */
-struct mmal_msg_component_disable {
- u32 component_handle;
-};
-
-struct mmal_msg_component_disable_reply {
- u32 status; /** The component disable status */
-};
-
-/* request to VC to get port information */
-struct mmal_msg_port_info_get {
- u32 component_handle; /* component handle port is associated with */
- u32 port_type; /* enum mmal_msg_port_type */
- u32 index; /* port index to query */
-};
-
-/* reply from VC to get port info request */
-struct mmal_msg_port_info_get_reply {
- u32 status; /** enum mmal_msg_status */
- u32 component_handle; /* component handle port is associated with */
- u32 port_type; /* enum mmal_msg_port_type */
- u32 port_index; /* port indexed in query */
- s32 found; /* unused */
- u32 port_handle; /**< Handle to use for this port */
- struct mmal_port port;
- struct mmal_es_format format; /* elementry stream format */
- union mmal_es_specific_format es; /* es type specific data */
- u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; /* es extra data */
-};
-
-/* request to VC to set port information */
-struct mmal_msg_port_info_set {
- u32 component_handle;
- u32 port_type; /* enum mmal_msg_port_type */
- u32 port_index; /* port indexed in query */
- struct mmal_port port;
- struct mmal_es_format format;
- union mmal_es_specific_format es;
- u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
-};
-
-/* reply from VC to port info set request */
-struct mmal_msg_port_info_set_reply {
- u32 status;
- u32 component_handle; /* component handle port is associated with */
- u32 port_type; /* enum mmal_msg_port_type */
- u32 index; /* port indexed in query */
- s32 found; /* unused */
- u32 port_handle; /**< Handle to use for this port */
- struct mmal_port port;
- struct mmal_es_format format;
- union mmal_es_specific_format es;
- u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
-};
-
-
-/* port action requests that take a mmal_port as a parameter */
-struct mmal_msg_port_action_port {
- u32 component_handle;
- u32 port_handle;
- u32 action; /* enum mmal_msg_port_action_type */
- struct mmal_port port;
-};
-
-/* port action requests that take handles as a parameter */
-struct mmal_msg_port_action_handle {
- u32 component_handle;
- u32 port_handle;
- u32 action; /* enum mmal_msg_port_action_type */
- u32 connect_component_handle;
- u32 connect_port_handle;
-};
-
-struct mmal_msg_port_action_reply {
- u32 status; /** The port action operation status */
-};
-
-
-
-
-/* MMAL buffer transfer */
-
-/** Size of space reserved in a buffer message for short messages. */
-#define MMAL_VC_SHORT_DATA 128
-
-/** Signals that the current payload is the end of the stream of data */
-#define MMAL_BUFFER_HEADER_FLAG_EOS (1<<0)
-/** Signals that the start of the current payload starts a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_START (1<<1)
-/** Signals that the end of the current payload ends a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_END (1<<2)
-/** Signals that the current payload contains only complete frames (>1) */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME \
- (MMAL_BUFFER_HEADER_FLAG_FRAME_START|MMAL_BUFFER_HEADER_FLAG_FRAME_END)
-/** Signals that the current payload is a keyframe (i.e. self decodable) */
-#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME (1<<3)
-/** Signals a discontinuity in the stream of data (e.g. after a seek).
- * Can be used for instance by a decoder to reset its state */
-#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY (1<<4)
-/** Signals a buffer containing some kind of config data for the component
- * (e.g. codec config data) */
-#define MMAL_BUFFER_HEADER_FLAG_CONFIG (1<<5)
-/** Signals an encrypted payload */
-#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED (1<<6)
-/** Signals a buffer containing side information */
-#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO (1<<7)
-/** Signals a buffer which is the snapshot/postview image from a stills
- * capture
- */
-#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT (1<<8)
-/** Signals a buffer which contains data known to be corrupted */
-#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED (1<<9)
-/** Signals that a buffer failed to be transmitted */
-#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED (1<<10)
-
-struct mmal_driver_buffer {
- u32 magic;
- u32 component_handle;
- u32 port_handle;
- void *client_context;
-};
-
-/* buffer header */
-struct mmal_buffer_header {
- struct mmal_buffer_header *next; /* next header */
- void *priv; /* framework private data */
- u32 cmd;
- void *data;
- u32 alloc_size;
- u32 length;
- u32 offset;
- u32 flags;
- s64 pts;
- s64 dts;
- void *type;
- void *user_data;
-};
-
-struct mmal_buffer_header_type_specific {
- union {
- struct {
- u32 planes;
- u32 offset[4];
- u32 pitch[4];
- u32 flags;
- } video;
- } u;
-};
-
-struct mmal_msg_buffer_from_host {
- /* The front 32 bytes of the buffer header are copied
- * back to us in the reply to allow for context. This
- * area is used to store two mmal_driver_buffer structures to
- * allow for multiple concurrent service users.
- */
- /* control data */
- struct mmal_driver_buffer drvbuf;
-
- /* referenced control data for passthrough buffer management */
- struct mmal_driver_buffer drvbuf_ref;
- struct mmal_buffer_header buffer_header; /* buffer header itself */
- struct mmal_buffer_header_type_specific buffer_header_type_specific;
- s32 is_zero_copy;
- s32 has_reference;
-
- /** allows short data to be xfered in control message */
- u32 payload_in_message;
- u8 short_data[MMAL_VC_SHORT_DATA];
-};
-
-
-/* port parameter setting */
-
-#define MMAL_WORKER_PORT_PARAMETER_SPACE 96
-
-struct mmal_msg_port_parameter_set {
- u32 component_handle; /* component */
- u32 port_handle; /* port */
- u32 id; /* Parameter ID */
- u32 size; /* Parameter size */
- uint32_t value[MMAL_WORKER_PORT_PARAMETER_SPACE];
-};
-
-struct mmal_msg_port_parameter_set_reply {
- u32 status; /** enum mmal_msg_status todo: how does this
- * differ to the one in the header?
- */
-};
-
-/* port parameter getting */
-
-struct mmal_msg_port_parameter_get {
- u32 component_handle; /* component */
- u32 port_handle; /* port */
- u32 id; /* Parameter ID */
- u32 size; /* Parameter size */
-};
-
-struct mmal_msg_port_parameter_get_reply {
- u32 status; /* Status of mmal_port_parameter_get call */
- u32 id; /* Parameter ID */
- u32 size; /* Parameter size */
- uint32_t value[MMAL_WORKER_PORT_PARAMETER_SPACE];
-};
-
-/* event messages */
-#define MMAL_WORKER_EVENT_SPACE 256
-
-struct mmal_msg_event_to_host {
- void *client_component; /* component context */
-
- u32 port_type;
- u32 port_num;
-
- u32 cmd;
- u32 length;
- u8 data[MMAL_WORKER_EVENT_SPACE];
- struct mmal_buffer_header *delayed_buffer;
-};
-
-/* all mmal messages are serialised through this structure */
-struct mmal_msg {
- /* header */
- struct mmal_msg_header h;
- /* payload */
- union {
- struct mmal_msg_version version;
-
- struct mmal_msg_component_create component_create;
- struct mmal_msg_component_create_reply component_create_reply;
-
- struct mmal_msg_component_destroy component_destroy;
- struct mmal_msg_component_destroy_reply component_destroy_reply;
-
- struct mmal_msg_component_enable component_enable;
- struct mmal_msg_component_enable_reply component_enable_reply;
-
- struct mmal_msg_component_disable component_disable;
- struct mmal_msg_component_disable_reply component_disable_reply;
-
- struct mmal_msg_port_info_get port_info_get;
- struct mmal_msg_port_info_get_reply port_info_get_reply;
-
- struct mmal_msg_port_info_set port_info_set;
- struct mmal_msg_port_info_set_reply port_info_set_reply;
-
- struct mmal_msg_port_action_port port_action_port;
- struct mmal_msg_port_action_handle port_action_handle;
- struct mmal_msg_port_action_reply port_action_reply;
-
- struct mmal_msg_buffer_from_host buffer_from_host;
-
- struct mmal_msg_port_parameter_set port_parameter_set;
- struct mmal_msg_port_parameter_set_reply
- port_parameter_set_reply;
- struct mmal_msg_port_parameter_get
- port_parameter_get;
- struct mmal_msg_port_parameter_get_reply
- port_parameter_get_reply;
-
- struct mmal_msg_event_to_host event_to_host;
-
- u8 payload[MMAL_MSG_MAX_PAYLOAD];
- } u;
-};
diff --git a/drivers/staging/media/platform/bcm2835/mmal-parameters.h b/drivers/staging/media/platform/bcm2835/mmal-parameters.h
deleted file mode 100644
index f6abb5cfa49d..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-parameters.h
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- */
-
-/* common parameters */
-
-/** @name Parameter groups
- * Parameters are divided into groups, and then allocated sequentially within
- * a group using an enum.
- * @{
- */
-
-/** Common parameter ID group, used with many types of component. */
-#define MMAL_PARAMETER_GROUP_COMMON (0<<16)
-/** Camera-specific parameter ID group. */
-#define MMAL_PARAMETER_GROUP_CAMERA (1<<16)
-/** Video-specific parameter ID group. */
-#define MMAL_PARAMETER_GROUP_VIDEO (2<<16)
-/** Audio-specific parameter ID group. */
-#define MMAL_PARAMETER_GROUP_AUDIO (3<<16)
-/** Clock-specific parameter ID group. */
-#define MMAL_PARAMETER_GROUP_CLOCK (4<<16)
-/** Miracast-specific parameter ID group. */
-#define MMAL_PARAMETER_GROUP_MIRACAST (5<<16)
-
-/* Common parameters */
-enum mmal_parameter_common_type {
- MMAL_PARAMETER_UNUSED /**< Never a valid parameter ID */
- = MMAL_PARAMETER_GROUP_COMMON,
- MMAL_PARAMETER_SUPPORTED_ENCODINGS, /**< MMAL_PARAMETER_ENCODING_T */
- MMAL_PARAMETER_URI, /**< MMAL_PARAMETER_URI_T */
-
- /** MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T */
- MMAL_PARAMETER_CHANGE_EVENT_REQUEST,
-
- /** MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_ZERO_COPY,
-
- /**< MMAL_PARAMETER_BUFFER_REQUIREMENTS_T */
- MMAL_PARAMETER_BUFFER_REQUIREMENTS,
-
- MMAL_PARAMETER_STATISTICS, /**< MMAL_PARAMETER_STATISTICS_T */
- MMAL_PARAMETER_CORE_STATISTICS, /**< MMAL_PARAMETER_CORE_STATISTICS_T */
- MMAL_PARAMETER_MEM_USAGE, /**< MMAL_PARAMETER_MEM_USAGE_T */
- MMAL_PARAMETER_BUFFER_FLAG_FILTER, /**< MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_SEEK, /**< MMAL_PARAMETER_SEEK_T */
- MMAL_PARAMETER_POWERMON_ENABLE, /**< MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_LOGGING, /**< MMAL_PARAMETER_LOGGING_T */
- MMAL_PARAMETER_SYSTEM_TIME, /**< MMAL_PARAMETER_UINT64_T */
- MMAL_PARAMETER_NO_IMAGE_PADDING /**< MMAL_PARAMETER_BOOLEAN_T */
-};
-
-/* camera parameters */
-
-enum mmal_parameter_camera_type {
- /* 0 */
- /** @ref MMAL_PARAMETER_THUMBNAIL_CONFIG_T */
- MMAL_PARAMETER_THUMBNAIL_CONFIGURATION
- = MMAL_PARAMETER_GROUP_CAMERA,
- MMAL_PARAMETER_CAPTURE_QUALITY, /**< Unused? */
- MMAL_PARAMETER_ROTATION, /**< @ref MMAL_PARAMETER_INT32_T */
- MMAL_PARAMETER_EXIF_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_EXIF, /**< @ref MMAL_PARAMETER_EXIF_T */
- MMAL_PARAMETER_AWB_MODE, /**< @ref MMAL_PARAM_AWBMODE_T */
- MMAL_PARAMETER_IMAGE_EFFECT, /**< @ref MMAL_PARAMETER_IMAGEFX_T */
- MMAL_PARAMETER_COLOUR_EFFECT, /**< @ref MMAL_PARAMETER_COLOURFX_T */
- MMAL_PARAMETER_FLICKER_AVOID, /**< @ref MMAL_PARAMETER_FLICKERAVOID_T */
- MMAL_PARAMETER_FLASH, /**< @ref MMAL_PARAMETER_FLASH_T */
- MMAL_PARAMETER_REDEYE, /**< @ref MMAL_PARAMETER_REDEYE_T */
- MMAL_PARAMETER_FOCUS, /**< @ref MMAL_PARAMETER_FOCUS_T */
- MMAL_PARAMETER_FOCAL_LENGTHS, /**< Unused? */
- MMAL_PARAMETER_EXPOSURE_COMP, /**< @ref MMAL_PARAMETER_INT32_T */
- MMAL_PARAMETER_ZOOM, /**< @ref MMAL_PARAMETER_SCALEFACTOR_T */
- MMAL_PARAMETER_MIRROR, /**< @ref MMAL_PARAMETER_MIRROR_T */
-
- /* 0x10 */
- MMAL_PARAMETER_CAMERA_NUM, /**< @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_CAPTURE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_EXPOSURE_MODE, /**< @ref MMAL_PARAMETER_EXPOSUREMODE_T */
- MMAL_PARAMETER_EXP_METERING_MODE, /**< @ref MMAL_PARAMETER_EXPOSUREMETERINGMODE_T */
- MMAL_PARAMETER_FOCUS_STATUS, /**< @ref MMAL_PARAMETER_FOCUS_STATUS_T */
- MMAL_PARAMETER_CAMERA_CONFIG, /**< @ref MMAL_PARAMETER_CAMERA_CONFIG_T */
- MMAL_PARAMETER_CAPTURE_STATUS, /**< @ref MMAL_PARAMETER_CAPTURE_STATUS_T */
- MMAL_PARAMETER_FACE_TRACK, /**< @ref MMAL_PARAMETER_FACE_TRACK_T */
- MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_JPEG_Q_FACTOR, /**< @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_FRAME_RATE, /**< @ref MMAL_PARAMETER_FRAME_RATE_T */
- MMAL_PARAMETER_USE_STC, /**< @ref MMAL_PARAMETER_CAMERA_STC_MODE_T */
- MMAL_PARAMETER_CAMERA_INFO, /**< @ref MMAL_PARAMETER_CAMERA_INFO_T */
- MMAL_PARAMETER_VIDEO_STABILISATION, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_FACE_TRACK_RESULTS, /**< @ref MMAL_PARAMETER_FACE_TRACK_RESULTS_T */
- MMAL_PARAMETER_ENABLE_RAW_CAPTURE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
-
- /* 0x20 */
- MMAL_PARAMETER_DPF_FILE, /**< @ref MMAL_PARAMETER_URI_T */
- MMAL_PARAMETER_ENABLE_DPF_FILE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_DPF_FAIL_IS_FATAL, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_CAPTURE_MODE, /**< @ref MMAL_PARAMETER_CAPTUREMODE_T */
- MMAL_PARAMETER_FOCUS_REGIONS, /**< @ref MMAL_PARAMETER_FOCUS_REGIONS_T */
- MMAL_PARAMETER_INPUT_CROP, /**< @ref MMAL_PARAMETER_INPUT_CROP_T */
- MMAL_PARAMETER_SENSOR_INFORMATION, /**< @ref MMAL_PARAMETER_SENSOR_INFORMATION_T */
- MMAL_PARAMETER_FLASH_SELECT, /**< @ref MMAL_PARAMETER_FLASH_SELECT_T */
- MMAL_PARAMETER_FIELD_OF_VIEW, /**< @ref MMAL_PARAMETER_FIELD_OF_VIEW_T */
- MMAL_PARAMETER_HIGH_DYNAMIC_RANGE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION, /**< @ref MMAL_PARAMETER_DRC_T */
- MMAL_PARAMETER_ALGORITHM_CONTROL, /**< @ref MMAL_PARAMETER_ALGORITHM_CONTROL_T */
- MMAL_PARAMETER_SHARPNESS, /**< @ref MMAL_PARAMETER_RATIONAL_T */
- MMAL_PARAMETER_CONTRAST, /**< @ref MMAL_PARAMETER_RATIONAL_T */
- MMAL_PARAMETER_BRIGHTNESS, /**< @ref MMAL_PARAMETER_RATIONAL_T */
- MMAL_PARAMETER_SATURATION, /**< @ref MMAL_PARAMETER_RATIONAL_T */
-
- /* 0x30 */
- MMAL_PARAMETER_ISO, /**< @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_ANTISHAKE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
-
- /** @ref MMAL_PARAMETER_IMAGEFX_PARAMETERS_T */
- MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_CAMERA_BURST_CAPTURE,
-
- /** @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_CAMERA_MIN_ISO,
-
- /** @ref MMAL_PARAMETER_CAMERA_USE_CASE_T */
- MMAL_PARAMETER_CAMERA_USE_CASE,
-
- /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_CAPTURE_STATS_PASS,
-
- /** @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_ENABLE_REGISTER_FILE,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_REGISTER_FAIL_IS_FATAL,
-
- /** @ref MMAL_PARAMETER_CONFIGFILE_T */
- MMAL_PARAMETER_CONFIGFILE_REGISTERS,
-
- /** @ref MMAL_PARAMETER_CONFIGFILE_CHUNK_T */
- MMAL_PARAMETER_CONFIGFILE_CHUNK_REGISTERS,
- MMAL_PARAMETER_JPEG_ATTACH_LOG, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_ZERO_SHUTTER_LAG, /**< @ref MMAL_PARAMETER_ZEROSHUTTERLAG_T */
- MMAL_PARAMETER_FPS_RANGE, /**< @ref MMAL_PARAMETER_FPS_RANGE_T */
- MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP, /**< @ref MMAL_PARAMETER_INT32_T */
-
- /* 0x40 */
- MMAL_PARAMETER_SW_SHARPEN_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_FLASH_REQUIRED, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_SW_SATURATION_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_SHUTTER_SPEED, /**< Takes a @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_CUSTOM_AWB_GAINS, /**< Takes a @ref MMAL_PARAMETER_AWB_GAINS_T */
-};
-
-struct mmal_parameter_rational {
- s32 num; /**< Numerator */
- s32 den; /**< Denominator */
-};
-
-enum mmal_parameter_camera_config_timestamp_mode {
- MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
- MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value
- * for the frame timestamp
- */
- MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, /* Use the STC timestamp
- * but subtract the
- * timestamp of the first
- * frame sent to give a
- * zero based timestamp.
- */
-};
-
-struct mmal_parameter_fps_range {
- /**< Low end of the permitted framerate range */
- struct mmal_parameter_rational fps_low;
- /**< High end of the permitted framerate range */
- struct mmal_parameter_rational fps_high;
-};
-
-
-/* camera configuration parameter */
-struct mmal_parameter_camera_config {
- /* Parameters for setting up the image pools */
- u32 max_stills_w; /* Max size of stills capture */
- u32 max_stills_h;
- u32 stills_yuv422; /* Allow YUV422 stills capture */
- u32 one_shot_stills; /* Continuous or one shot stills captures. */
-
- u32 max_preview_video_w; /* Max size of the preview or video
- * capture frames
- */
- u32 max_preview_video_h;
- u32 num_preview_video_frames;
-
- /** Sets the height of the circular buffer for stills capture. */
- u32 stills_capture_circular_buffer_height;
-
- /** Allows preview/encode to resume as fast as possible after the stills
- * input frame has been received, and then processes the still frame in
- * the background whilst preview/encode has resumed.
- * Actual mode is controlled by MMAL_PARAMETER_CAPTURE_MODE.
- */
- u32 fast_preview_resume;
-
- /** Selects algorithm for timestamping frames if
- * there is no clock component connected.
- * enum mmal_parameter_camera_config_timestamp_mode
- */
- s32 use_stc_timestamp;
-};
-
-
-enum mmal_parameter_exposuremode {
- MMAL_PARAM_EXPOSUREMODE_OFF,
- MMAL_PARAM_EXPOSUREMODE_AUTO,
- MMAL_PARAM_EXPOSUREMODE_NIGHT,
- MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
- MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
- MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
- MMAL_PARAM_EXPOSUREMODE_SPORTS,
- MMAL_PARAM_EXPOSUREMODE_SNOW,
- MMAL_PARAM_EXPOSUREMODE_BEACH,
- MMAL_PARAM_EXPOSUREMODE_VERYLONG,
- MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
- MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
- MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
-};
-
-enum mmal_parameter_exposuremeteringmode {
- MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
- MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
- MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
- MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
-};
-
-enum mmal_parameter_awbmode {
- MMAL_PARAM_AWBMODE_OFF,
- MMAL_PARAM_AWBMODE_AUTO,
- MMAL_PARAM_AWBMODE_SUNLIGHT,
- MMAL_PARAM_AWBMODE_CLOUDY,
- MMAL_PARAM_AWBMODE_SHADE,
- MMAL_PARAM_AWBMODE_TUNGSTEN,
- MMAL_PARAM_AWBMODE_FLUORESCENT,
- MMAL_PARAM_AWBMODE_INCANDESCENT,
- MMAL_PARAM_AWBMODE_FLASH,
- MMAL_PARAM_AWBMODE_HORIZON,
-};
-
-enum mmal_parameter_imagefx {
- MMAL_PARAM_IMAGEFX_NONE,
- MMAL_PARAM_IMAGEFX_NEGATIVE,
- MMAL_PARAM_IMAGEFX_SOLARIZE,
- MMAL_PARAM_IMAGEFX_POSTERIZE,
- MMAL_PARAM_IMAGEFX_WHITEBOARD,
- MMAL_PARAM_IMAGEFX_BLACKBOARD,
- MMAL_PARAM_IMAGEFX_SKETCH,
- MMAL_PARAM_IMAGEFX_DENOISE,
- MMAL_PARAM_IMAGEFX_EMBOSS,
- MMAL_PARAM_IMAGEFX_OILPAINT,
- MMAL_PARAM_IMAGEFX_HATCH,
- MMAL_PARAM_IMAGEFX_GPEN,
- MMAL_PARAM_IMAGEFX_PASTEL,
- MMAL_PARAM_IMAGEFX_WATERCOLOUR,
- MMAL_PARAM_IMAGEFX_FILM,
- MMAL_PARAM_IMAGEFX_BLUR,
- MMAL_PARAM_IMAGEFX_SATURATION,
- MMAL_PARAM_IMAGEFX_COLOURSWAP,
- MMAL_PARAM_IMAGEFX_WASHEDOUT,
- MMAL_PARAM_IMAGEFX_POSTERISE,
- MMAL_PARAM_IMAGEFX_COLOURPOINT,
- MMAL_PARAM_IMAGEFX_COLOURBALANCE,
- MMAL_PARAM_IMAGEFX_CARTOON,
-};
-
-enum MMAL_PARAM_FLICKERAVOID_T {
- MMAL_PARAM_FLICKERAVOID_OFF,
- MMAL_PARAM_FLICKERAVOID_AUTO,
- MMAL_PARAM_FLICKERAVOID_50HZ,
- MMAL_PARAM_FLICKERAVOID_60HZ,
- MMAL_PARAM_FLICKERAVOID_MAX = 0x7FFFFFFF
-};
-
-struct mmal_parameter_awbgains {
- struct mmal_parameter_rational r_gain; /**< Red gain */
- struct mmal_parameter_rational b_gain; /**< Blue gain */
-};
-
-/** Manner of video rate control */
-enum mmal_parameter_rate_control_mode {
- MMAL_VIDEO_RATECONTROL_DEFAULT,
- MMAL_VIDEO_RATECONTROL_VARIABLE,
- MMAL_VIDEO_RATECONTROL_CONSTANT,
- MMAL_VIDEO_RATECONTROL_VARIABLE_SKIP_FRAMES,
- MMAL_VIDEO_RATECONTROL_CONSTANT_SKIP_FRAMES
-};
-
-enum mmal_video_profile {
- MMAL_VIDEO_PROFILE_H263_BASELINE,
- MMAL_VIDEO_PROFILE_H263_H320CODING,
- MMAL_VIDEO_PROFILE_H263_BACKWARDCOMPATIBLE,
- MMAL_VIDEO_PROFILE_H263_ISWV2,
- MMAL_VIDEO_PROFILE_H263_ISWV3,
- MMAL_VIDEO_PROFILE_H263_HIGHCOMPRESSION,
- MMAL_VIDEO_PROFILE_H263_INTERNET,
- MMAL_VIDEO_PROFILE_H263_INTERLACE,
- MMAL_VIDEO_PROFILE_H263_HIGHLATENCY,
- MMAL_VIDEO_PROFILE_MP4V_SIMPLE,
- MMAL_VIDEO_PROFILE_MP4V_SIMPLESCALABLE,
- MMAL_VIDEO_PROFILE_MP4V_CORE,
- MMAL_VIDEO_PROFILE_MP4V_MAIN,
- MMAL_VIDEO_PROFILE_MP4V_NBIT,
- MMAL_VIDEO_PROFILE_MP4V_SCALABLETEXTURE,
- MMAL_VIDEO_PROFILE_MP4V_SIMPLEFACE,
- MMAL_VIDEO_PROFILE_MP4V_SIMPLEFBA,
- MMAL_VIDEO_PROFILE_MP4V_BASICANIMATED,
- MMAL_VIDEO_PROFILE_MP4V_HYBRID,
- MMAL_VIDEO_PROFILE_MP4V_ADVANCEDREALTIME,
- MMAL_VIDEO_PROFILE_MP4V_CORESCALABLE,
- MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCODING,
- MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCORE,
- MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSCALABLE,
- MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSIMPLE,
- MMAL_VIDEO_PROFILE_H264_BASELINE,
- MMAL_VIDEO_PROFILE_H264_MAIN,
- MMAL_VIDEO_PROFILE_H264_EXTENDED,
- MMAL_VIDEO_PROFILE_H264_HIGH,
- MMAL_VIDEO_PROFILE_H264_HIGH10,
- MMAL_VIDEO_PROFILE_H264_HIGH422,
- MMAL_VIDEO_PROFILE_H264_HIGH444,
- MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE,
- MMAL_VIDEO_PROFILE_DUMMY = 0x7FFFFFFF
-};
-
-enum mmal_video_level {
- MMAL_VIDEO_LEVEL_H263_10,
- MMAL_VIDEO_LEVEL_H263_20,
- MMAL_VIDEO_LEVEL_H263_30,
- MMAL_VIDEO_LEVEL_H263_40,
- MMAL_VIDEO_LEVEL_H263_45,
- MMAL_VIDEO_LEVEL_H263_50,
- MMAL_VIDEO_LEVEL_H263_60,
- MMAL_VIDEO_LEVEL_H263_70,
- MMAL_VIDEO_LEVEL_MP4V_0,
- MMAL_VIDEO_LEVEL_MP4V_0b,
- MMAL_VIDEO_LEVEL_MP4V_1,
- MMAL_VIDEO_LEVEL_MP4V_2,
- MMAL_VIDEO_LEVEL_MP4V_3,
- MMAL_VIDEO_LEVEL_MP4V_4,
- MMAL_VIDEO_LEVEL_MP4V_4a,
- MMAL_VIDEO_LEVEL_MP4V_5,
- MMAL_VIDEO_LEVEL_MP4V_6,
- MMAL_VIDEO_LEVEL_H264_1,
- MMAL_VIDEO_LEVEL_H264_1b,
- MMAL_VIDEO_LEVEL_H264_11,
- MMAL_VIDEO_LEVEL_H264_12,
- MMAL_VIDEO_LEVEL_H264_13,
- MMAL_VIDEO_LEVEL_H264_2,
- MMAL_VIDEO_LEVEL_H264_21,
- MMAL_VIDEO_LEVEL_H264_22,
- MMAL_VIDEO_LEVEL_H264_3,
- MMAL_VIDEO_LEVEL_H264_31,
- MMAL_VIDEO_LEVEL_H264_32,
- MMAL_VIDEO_LEVEL_H264_4,
- MMAL_VIDEO_LEVEL_H264_41,
- MMAL_VIDEO_LEVEL_H264_42,
- MMAL_VIDEO_LEVEL_H264_5,
- MMAL_VIDEO_LEVEL_H264_51,
- MMAL_VIDEO_LEVEL_DUMMY = 0x7FFFFFFF
-};
-
-struct mmal_parameter_video_profile {
- enum mmal_video_profile profile;
- enum mmal_video_level level;
-};
-
-/* video parameters */
-
-enum mmal_parameter_video_type {
- /** @ref MMAL_DISPLAYREGION_T */
- MMAL_PARAMETER_DISPLAYREGION = MMAL_PARAMETER_GROUP_VIDEO,
-
- /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
- MMAL_PARAMETER_SUPPORTED_PROFILES,
-
- /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
- MMAL_PARAMETER_PROFILE,
-
- /** @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_INTRAPERIOD,
-
- /** @ref MMAL_PARAMETER_VIDEO_RATECONTROL_T */
- MMAL_PARAMETER_RATECONTROL,
-
- /** @ref MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T */
- MMAL_PARAMETER_NALUNITFORMAT,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
-
- /** @ref MMAL_PARAMETER_UINT32_T.
- * Setting the value to zero resets to the default (one slice per frame).
- */
- MMAL_PARAMETER_MB_ROWS_PER_SLICE,
-
- /** @ref MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T */
- MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION,
-
- /** @ref MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T */
- MMAL_PARAMETER_VIDEO_EEDE_ENABLE,
-
- /** @ref MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T */
- MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. Request an I-frame. */
- MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME,
- /** @ref MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T */
- MMAL_PARAMETER_VIDEO_INTRA_REFRESH,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. */
- MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
-
- /** @ref MMAL_PARAMETER_UINT32_T. Run-time bit rate control */
- MMAL_PARAMETER_VIDEO_BIT_RATE,
-
- /** @ref MMAL_PARAMETER_FRAME_RATE_T */
- MMAL_PARAMETER_VIDEO_FRAME_RATE,
-
- /** @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT,
-
- /** @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT,
-
- /** @ref MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL,
-
- MMAL_PARAMETER_EXTRA_BUFFERS, /**< @ref MMAL_PARAMETER_UINT32_T. */
- /** @ref MMAL_PARAMETER_UINT32_T.
- * Changing this parameter from the default can reduce frame rate
- * because image buffers need to be re-pitched.
- */
- MMAL_PARAMETER_VIDEO_ALIGN_HORIZ,
-
- /** @ref MMAL_PARAMETER_UINT32_T.
- * Changing this parameter from the default can reduce frame rate
- * because image buffers need to be re-pitched.
- */
- MMAL_PARAMETER_VIDEO_ALIGN_VERT,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. */
- MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES,
-
- /** @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT,
-
- /**< @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_QP_P,
-
- /**< @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT,
-
- /** @ref MMAL_PARAMETER_UINT32_T */
- MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS,
-
- /** @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE,
-
- /* H264 specific parameters */
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_DISABLE_CABAC,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_LATENCY,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_AU_DELIMITERS,
-
- /** @ref MMAL_PARAMETER_UINT32_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_DEBLOCK_IDC,
-
- /** @ref MMAL_PARAMETER_VIDEO_ENCODER_H264_MB_INTRA_MODES_T. */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_MB_INTRA_MODE,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_ENCODE_HEADER_ON_OPEN,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_ENCODE_PRECODE_FOR_QP,
-
- /** @ref MMAL_PARAMETER_VIDEO_DRM_INIT_INFO_T. */
- MMAL_PARAMETER_VIDEO_DRM_INIT_INFO,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_TIMESTAMP_FIFO,
-
- /** @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_DECODE_ERROR_CONCEALMENT,
-
- /** @ref MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER_T. */
- MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER,
-
- /** @ref MMAL_PARAMETER_BYTES_T */
- MMAL_PARAMETER_VIDEO_DECODE_CONFIG_VD3,
-
- /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_VCL_HRD_PARAMETERS,
-
- /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_DELAY_HRD_FLAG,
-
- /**< @ref MMAL_PARAMETER_BOOLEAN_T */
- MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER
-};
-
-/** Valid mirror modes */
-enum mmal_parameter_mirror {
- MMAL_PARAM_MIRROR_NONE,
- MMAL_PARAM_MIRROR_VERTICAL,
- MMAL_PARAM_MIRROR_HORIZONTAL,
- MMAL_PARAM_MIRROR_BOTH,
-};
-
-enum mmal_parameter_displaytransform {
- MMAL_DISPLAY_ROT0 = 0,
- MMAL_DISPLAY_MIRROR_ROT0 = 1,
- MMAL_DISPLAY_MIRROR_ROT180 = 2,
- MMAL_DISPLAY_ROT180 = 3,
- MMAL_DISPLAY_MIRROR_ROT90 = 4,
- MMAL_DISPLAY_ROT270 = 5,
- MMAL_DISPLAY_ROT90 = 6,
- MMAL_DISPLAY_MIRROR_ROT270 = 7,
-};
-
-enum mmal_parameter_displaymode {
- MMAL_DISPLAY_MODE_FILL = 0,
- MMAL_DISPLAY_MODE_LETTERBOX = 1,
-};
-
-enum mmal_parameter_displayset {
- MMAL_DISPLAY_SET_NONE = 0,
- MMAL_DISPLAY_SET_NUM = 1,
- MMAL_DISPLAY_SET_FULLSCREEN = 2,
- MMAL_DISPLAY_SET_TRANSFORM = 4,
- MMAL_DISPLAY_SET_DEST_RECT = 8,
- MMAL_DISPLAY_SET_SRC_RECT = 0x10,
- MMAL_DISPLAY_SET_MODE = 0x20,
- MMAL_DISPLAY_SET_PIXEL = 0x40,
- MMAL_DISPLAY_SET_NOASPECT = 0x80,
- MMAL_DISPLAY_SET_LAYER = 0x100,
- MMAL_DISPLAY_SET_COPYPROTECT = 0x200,
- MMAL_DISPLAY_SET_ALPHA = 0x400,
-};
-
-struct mmal_parameter_displayregion {
- /** Bitfield that indicates which fields are set and should be
- * used. All other fields will maintain their current value.
- * \ref MMAL_DISPLAYSET_T defines the bits that can be
- * combined.
- */
- u32 set;
-
- /** Describes the display output device, with 0 typically
- * being a directly connected LCD display. The actual values
- * will depend on the hardware. Code using hard-wired numbers
- * (e.g. 2) is certain to fail.
- */
-
- u32 display_num;
- /** Indicates that we are using the full device screen area,
- * rather than a window of the display. If zero, then
- * dest_rect is used to specify a region of the display to
- * use.
- */
-
- s32 fullscreen;
- /** Indicates any rotation or flipping used to map frames onto
- * the natural display orientation.
- */
- u32 transform; /* enum mmal_parameter_displaytransform */
-
- /** Where to display the frame within the screen, if
- * fullscreen is zero.
- */
- struct vchiq_mmal_rect dest_rect;
-
- /** Indicates which area of the frame to display. If all
- * values are zero, the whole frame will be used.
- */
- struct vchiq_mmal_rect src_rect;
-
- /** If set to non-zero, indicates that any display scaling
- * should disregard the aspect ratio of the frame region being
- * displayed.
- */
- s32 noaspect;
-
- /** Indicates how the image should be scaled to fit the
- * display. \code MMAL_DISPLAY_MODE_FILL \endcode indicates
- * that the image should fill the screen by potentially
- * cropping the frames. Setting \code mode \endcode to \code
- * MMAL_DISPLAY_MODE_LETTERBOX \endcode indicates that all the
- * source region should be displayed and black bars added if
- * necessary.
- */
- u32 mode; /* enum mmal_parameter_displaymode */
-
- /** If non-zero, defines the width of a source pixel relative
- * to \code pixel_y \endcode. If zero, then pixels default to
- * being square.
- */
- u32 pixel_x;
-
- /** If non-zero, defines the height of a source pixel relative
- * to \code pixel_x \endcode. If zero, then pixels default to
- * being square.
- */
- u32 pixel_y;
-
- /** Sets the relative depth of the images, with greater values
- * being in front of smaller values.
- */
- u32 layer;
-
- /** Set to non-zero to ensure copy protection is used on
- * output.
- */
- s32 copyprotect_required;
-
- /** Level of opacity of the layer, where zero is fully
- * transparent and 255 is fully opaque.
- */
- u32 alpha;
-};
-
-#define MMAL_MAX_IMAGEFX_PARAMETERS 5
-
-struct mmal_parameter_imagefx_parameters {
- enum mmal_parameter_imagefx effect;
- u32 num_effect_params;
- u32 effect_parameter[MMAL_MAX_IMAGEFX_PARAMETERS];
-};
-
-#define MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS 4
-#define MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES 2
-#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16
-
-struct mmal_parameter_camera_info_camera_t {
- u32 port_id;
- u32 max_width;
- u32 max_height;
- u32 lens_present;
- u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
-};
-
-enum mmal_parameter_camera_info_flash_type_t {
- /* Make values explicit to ensure they match values in config ini */
- MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_XENON = 0,
- MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_LED = 1,
- MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_OTHER = 2,
- MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_MAX = 0x7FFFFFFF
-};
-
-struct mmal_parameter_camera_info_flash_t {
- enum mmal_parameter_camera_info_flash_type_t flash_type;
-};
-
-struct mmal_parameter_camera_info_t {
- u32 num_cameras;
- u32 num_flashes;
- struct mmal_parameter_camera_info_camera_t
- cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS];
- struct mmal_parameter_camera_info_flash_t
- flashes[MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES];
-};
diff --git a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c b/drivers/staging/media/platform/bcm2835/mmal-vchiq.c
deleted file mode 100644
index fdfb6a620a43..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-vchiq.c
+++ /dev/null
@@ -1,1916 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- *
- * V4L2 driver MMAL vchiq interface code
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/completion.h>
-#include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
-#include <media/videobuf2-vmalloc.h>
-
-#include "mmal-common.h"
-#include "mmal-vchiq.h"
-#include "mmal-msg.h"
-
-#define USE_VCHIQ_ARM
-#include "interface/vchi/vchi.h"
-
-/* maximum number of components supported */
-#define VCHIQ_MMAL_MAX_COMPONENTS 4
-
-/*#define FULL_MSG_DUMP 1*/
-
-#ifdef DEBUG
-static const char *const msg_type_names[] = {
- "UNKNOWN",
- "QUIT",
- "SERVICE_CLOSED",
- "GET_VERSION",
- "COMPONENT_CREATE",
- "COMPONENT_DESTROY",
- "COMPONENT_ENABLE",
- "COMPONENT_DISABLE",
- "PORT_INFO_GET",
- "PORT_INFO_SET",
- "PORT_ACTION",
- "BUFFER_FROM_HOST",
- "BUFFER_TO_HOST",
- "GET_STATS",
- "PORT_PARAMETER_SET",
- "PORT_PARAMETER_GET",
- "EVENT_TO_HOST",
- "GET_CORE_STATS_FOR_PORT",
- "OPAQUE_ALLOCATOR",
- "CONSUME_MEM",
- "LMK",
- "OPAQUE_ALLOCATOR_DESC",
- "DRM_GET_LHS32",
- "DRM_GET_TIME",
- "BUFFER_FROM_HOST_ZEROLEN",
- "PORT_FLUSH",
- "HOST_LOG",
-};
-#endif
-
-static const char *const port_action_type_names[] = {
- "UNKNOWN",
- "ENABLE",
- "DISABLE",
- "FLUSH",
- "CONNECT",
- "DISCONNECT",
- "SET_REQUIREMENTS",
-};
-
-#if defined(DEBUG)
-#if defined(FULL_MSG_DUMP)
-#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
- do { \
- pr_debug(TITLE" type:%s(%d) length:%d\n", \
- msg_type_names[(MSG)->h.type], \
- (MSG)->h.type, (MSG_LEN)); \
- print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
- 16, 4, (MSG), \
- sizeof(struct mmal_msg_header), 1); \
- print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
- 16, 4, \
- ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
- (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
- } while (0)
-#else
-#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
- { \
- pr_debug(TITLE" type:%s(%d) length:%d\n", \
- msg_type_names[(MSG)->h.type], \
- (MSG)->h.type, (MSG_LEN)); \
- }
-#endif
-#else
-#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
-#endif
-
-/* normal message context */
-struct mmal_msg_context {
- union {
- struct {
- /* work struct for defered callback - must come first */
- struct work_struct work;
- /* mmal instance */
- struct vchiq_mmal_instance *instance;
- /* mmal port */
- struct vchiq_mmal_port *port;
- /* actual buffer used to store bulk reply */
- struct mmal_buffer *buffer;
- /* amount of buffer used */
- unsigned long buffer_used;
- /* MMAL buffer flags */
- u32 mmal_flags;
- /* Presentation and Decode timestamps */
- s64 pts;
- s64 dts;
-
- int status; /* context status */
-
- } bulk; /* bulk data */
-
- struct {
- /* message handle to release */
- VCHI_HELD_MSG_T msg_handle;
- /* pointer to received message */
- struct mmal_msg *msg;
- /* received message length */
- u32 msg_len;
- /* completion upon reply */
- struct completion cmplt;
- } sync; /* synchronous response */
- } u;
-
-};
-
-struct vchiq_mmal_instance {
- VCHI_SERVICE_HANDLE_T handle;
-
- /* ensure serialised access to service */
- struct mutex vchiq_mutex;
-
- /* ensure serialised access to bulk operations */
- struct mutex bulk_mutex;
-
- /* vmalloc page to receive scratch bulk xfers into */
- void *bulk_scratch;
-
- /* component to use next */
- int component_idx;
- struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
-};
-
-static struct mmal_msg_context *get_msg_context(struct vchiq_mmal_instance
- *instance)
-{
- struct mmal_msg_context *msg_context;
-
- /* todo: should this be allocated from a pool to avoid kmalloc */
- msg_context = kmalloc(sizeof(*msg_context), GFP_KERNEL);
- memset(msg_context, 0, sizeof(*msg_context));
-
- return msg_context;
-}
-
-static void release_msg_context(struct mmal_msg_context *msg_context)
-{
- kfree(msg_context);
-}
-
-/* deals with receipt of event to host message */
-static void event_to_host_cb(struct vchiq_mmal_instance *instance,
- struct mmal_msg *msg, u32 msg_len)
-{
- pr_debug("unhandled event\n");
- pr_debug("component:%p port type:%d num:%d cmd:0x%x length:%d\n",
- msg->u.event_to_host.client_component,
- msg->u.event_to_host.port_type,
- msg->u.event_to_host.port_num,
- msg->u.event_to_host.cmd, msg->u.event_to_host.length);
-}
-
-/* workqueue scheduled callback
- *
- * we do this because it is important we do not call any other vchiq
- * sync calls from witin the message delivery thread
- */
-static void buffer_work_cb(struct work_struct *work)
-{
- struct mmal_msg_context *msg_context = (struct mmal_msg_context *)work;
-
- msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
- msg_context->u.bulk.port,
- msg_context->u.bulk.status,
- msg_context->u.bulk.buffer,
- msg_context->u.bulk.buffer_used,
- msg_context->u.bulk.mmal_flags,
- msg_context->u.bulk.dts,
- msg_context->u.bulk.pts);
-
- /* release message context */
- release_msg_context(msg_context);
-}
-
-/* enqueue a bulk receive for a given message context */
-static int bulk_receive(struct vchiq_mmal_instance *instance,
- struct mmal_msg *msg,
- struct mmal_msg_context *msg_context)
-{
- unsigned long rd_len;
- unsigned long flags = 0;
- int ret;
-
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
- rd_len = msg->u.buffer_from_host.buffer_header.length;
-
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to submit bulk receive\n");
-
- /* todo: this is a serious error, we should never have
- * committed a buffer_to_host operation to the mmal
- * port without the buffer to back it up (underflow
- * handling) and there is no obvious way to deal with
- * this - how is the mmal servie going to react when
- * we fail to do the xfer and reschedule a buffer when
- * it arrives? perhaps a starved flag to indicate a
- * waiting bulk receive?
- */
-
- mutex_unlock(&instance->bulk_mutex);
-
- return -EINVAL;
- }
-
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
- /* ensure we do not overrun the available buffer */
- if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
- rd_len = msg_context->u.bulk.buffer->buffer_size;
- pr_warn("short read as not enough receive buffer space\n");
- /* todo: is this the correct response, what happens to
- * the rest of the message data?
- */
- }
-
- /* store length */
- msg_context->u.bulk.buffer_used = rd_len;
- msg_context->u.bulk.mmal_flags =
- msg->u.buffer_from_host.buffer_header.flags;
- msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
- msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
-
- // only need to flush L1 cache here, as VCHIQ takes care of the L2
- // cache.
- __cpuc_flush_dcache_area(msg_context->u.bulk.buffer->buffer, rd_len);
-
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
- ret = vchi_bulk_queue_receive(instance->handle,
- msg_context->u.bulk.buffer->buffer,
- /* Actual receive needs to be a multiple
- * of 4 bytes
- */
- (rd_len + 3) & ~3,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
-
- vchi_service_release(instance->handle);
-
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
- return ret;
-}
-
-/* enque a dummy bulk receive for a given message context */
-static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
- struct mmal_msg_context *msg_context)
-{
- int ret;
-
- /* bulk mutex stops other bulk operations while we have a
- * receive in progress - released in callback
- */
- ret = mutex_lock_interruptible(&instance->bulk_mutex);
- if (ret != 0)
- return ret;
-
- /* zero length indicates this was a dummy transfer */
- msg_context->u.bulk.buffer_used = 0;
-
- /* queue the bulk submission */
- vchi_service_use(instance->handle);
-
- ret = vchi_bulk_queue_receive(instance->handle,
- instance->bulk_scratch,
- 8,
- VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
- VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
- msg_context);
-
- vchi_service_release(instance->handle);
-
- if (ret != 0) {
- /* callback will not be clearing the mutex */
- mutex_unlock(&instance->bulk_mutex);
- }
-
- return ret;
-}
-
-/* data in message, memcpy from packet into output buffer */
-static int inline_receive(struct vchiq_mmal_instance *instance,
- struct mmal_msg *msg,
- struct mmal_msg_context *msg_context)
-{
- unsigned long flags = 0;
-
- /* take buffer from queue */
- spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
- if (list_empty(&msg_context->u.bulk.port->buffers)) {
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
- pr_err("buffer list empty trying to receive inline\n");
-
- /* todo: this is a serious error, we should never have
- * committed a buffer_to_host operation to the mmal
- * port without the buffer to back it up (with
- * underflow handling) and there is no obvious way to
- * deal with this. Less bad than the bulk case as we
- * can just drop this on the floor but...unhelpful
- */
- return -EINVAL;
- }
-
- msg_context->u.bulk.buffer =
- list_entry(msg_context->u.bulk.port->buffers.next,
- struct mmal_buffer, list);
- list_del(&msg_context->u.bulk.buffer->list);
-
- spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
-
- memcpy(msg_context->u.bulk.buffer->buffer,
- msg->u.buffer_from_host.short_data,
- msg->u.buffer_from_host.payload_in_message);
-
- msg_context->u.bulk.buffer_used =
- msg->u.buffer_from_host.payload_in_message;
-
- return 0;
-}
-
-/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
-static int
-buffer_from_host(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port, struct mmal_buffer *buf)
-{
- struct mmal_msg_context *msg_context;
- struct mmal_msg m;
- int ret;
-
- pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
-
- /* bulk mutex stops other bulk operations while we
- * have a receive in progress
- */
- if (mutex_lock_interruptible(&instance->bulk_mutex))
- return -EINTR;
-
- /* get context */
- msg_context = get_msg_context(instance);
- if (!msg_context) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- /* store bulk message context for when data arrives */
- msg_context->u.bulk.instance = instance;
- msg_context->u.bulk.port = port;
- msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
- msg_context->u.bulk.buffer_used = 0;
-
- /* initialise work structure ready to schedule callback */
- INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
-
- /* prep the buffer from host message */
- memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
-
- m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
- m.h.magic = MMAL_MAGIC;
- m.h.context = msg_context;
- m.h.status = 0;
-
- /* drvbuf is our private data passed back */
- m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
- m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
- m.u.buffer_from_host.drvbuf.port_handle = port->handle;
- m.u.buffer_from_host.drvbuf.client_context = msg_context;
-
- /* buffer header */
- m.u.buffer_from_host.buffer_header.cmd = 0;
- m.u.buffer_from_host.buffer_header.data = buf->buffer;
- m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
- m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
- m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
- m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
- m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
- m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
-
- /* clear buffer type sepecific data */
- memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
- sizeof(m.u.buffer_from_host.buffer_header_type_specific));
-
- /* no payload in message */
- m.u.buffer_from_host.payload_in_message = 0;
-
- vchi_service_use(instance->handle);
-
- ret = vchi_queue_kernel_message(instance->handle,
- &m,
- sizeof(struct mmal_msg_header) +
- sizeof(m.u.buffer_from_host));
-
- if (ret != 0) {
- release_msg_context(msg_context);
- /* todo: is this correct error value? */
- }
-
- vchi_service_release(instance->handle);
-
-unlock:
- mutex_unlock(&instance->bulk_mutex);
-
- return ret;
-}
-
-/* submit a buffer to the mmal sevice
- *
- * the buffer_from_host uses size data from the ports next available
- * mmal_buffer and deals with there being no buffer available by
- * incrementing the underflow for later
- */
-static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_buffer *buf;
- unsigned long flags = 0;
-
- if (!port->enabled)
- return -EINVAL;
-
- /* peek buffer from queue */
- spin_lock_irqsave(&port->slock, flags);
- if (list_empty(&port->buffers)) {
- port->buffer_underflow++;
- spin_unlock_irqrestore(&port->slock, flags);
- return -ENOSPC;
- }
-
- buf = list_entry(port->buffers.next, struct mmal_buffer, list);
-
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* issue buffer to mmal service */
- ret = buffer_from_host(instance, port, buf);
- if (ret) {
- pr_err("adding buffer header failed\n");
- /* todo: how should this be dealt with */
- }
-
- return ret;
-}
-
-/* deals with receipt of buffer to host message */
-static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
- struct mmal_msg *msg, u32 msg_len)
-{
- struct mmal_msg_context *msg_context;
-
- pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
- instance, msg, msg_len);
-
- if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
- msg_context = msg->u.buffer_from_host.drvbuf.client_context;
- } else {
- pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
- return;
- }
-
- if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
- /* message reception had an error */
- pr_warn("error %d in reply\n", msg->h.status);
-
- msg_context->u.bulk.status = msg->h.status;
-
- } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
- /* empty buffer */
- if (msg->u.buffer_from_host.buffer_header.flags &
- MMAL_BUFFER_HEADER_FLAG_EOS) {
- msg_context->u.bulk.status =
- dummy_bulk_receive(instance, msg_context);
- if (msg_context->u.bulk.status == 0)
- return; /* successful bulk submission, bulk
- * completion will trigger callback
- */
- } else {
- /* do callback with empty buffer - not EOS though */
- msg_context->u.bulk.status = 0;
- msg_context->u.bulk.buffer_used = 0;
- }
- } else if (msg->u.buffer_from_host.payload_in_message == 0) {
- /* data is not in message, queue a bulk receive */
- msg_context->u.bulk.status =
- bulk_receive(instance, msg, msg_context);
- if (msg_context->u.bulk.status == 0)
- return; /* successful bulk submission, bulk
- * completion will trigger callback
- */
-
- /* failed to submit buffer, this will end badly */
- pr_err("error %d on bulk submission\n",
- msg_context->u.bulk.status);
-
- } else if (msg->u.buffer_from_host.payload_in_message <=
- MMAL_VC_SHORT_DATA) {
- /* data payload within message */
- msg_context->u.bulk.status = inline_receive(instance, msg,
- msg_context);
- } else {
- pr_err("message with invalid short payload\n");
-
- /* signal error */
- msg_context->u.bulk.status = -EINVAL;
- msg_context->u.bulk.buffer_used =
- msg->u.buffer_from_host.payload_in_message;
- }
-
- /* replace the buffer header */
- port_buffer_from_host(instance, msg_context->u.bulk.port);
-
- /* schedule the port callback */
- schedule_work(&msg_context->u.bulk.work);
-}
-
-static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
- struct mmal_msg_context *msg_context)
-{
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
- msg_context->u.bulk.status = 0;
-
- /* schedule the port callback */
- schedule_work(&msg_context->u.bulk.work);
-}
-
-static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
- struct mmal_msg_context *msg_context)
-{
- pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
-
- /* bulk receive operation complete */
- mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
-
- /* replace the buffer header */
- port_buffer_from_host(msg_context->u.bulk.instance,
- msg_context->u.bulk.port);
-
- msg_context->u.bulk.status = -EINTR;
-
- schedule_work(&msg_context->u.bulk.work);
-}
-
-/* incoming event service callback */
-static void service_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
- void *bulk_ctx)
-{
- struct vchiq_mmal_instance *instance = param;
- int status;
- u32 msg_len;
- struct mmal_msg *msg;
- VCHI_HELD_MSG_T msg_handle;
-
- if (!instance) {
- pr_err("Message callback passed NULL instance\n");
- return;
- }
-
- switch (reason) {
- case VCHI_CALLBACK_MSG_AVAILABLE:
- status = vchi_msg_hold(instance->handle, (void **)&msg,
- &msg_len, VCHI_FLAGS_NONE, &msg_handle);
- if (status) {
- pr_err("Unable to dequeue a message (%d)\n", status);
- break;
- }
-
- DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
-
- /* handling is different for buffer messages */
- switch (msg->h.type) {
- case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
- vchi_held_msg_release(&msg_handle);
- break;
-
- case MMAL_MSG_TYPE_EVENT_TO_HOST:
- event_to_host_cb(instance, msg, msg_len);
- vchi_held_msg_release(&msg_handle);
-
- break;
-
- case MMAL_MSG_TYPE_BUFFER_TO_HOST:
- buffer_to_host_cb(instance, msg, msg_len);
- vchi_held_msg_release(&msg_handle);
- break;
-
- default:
- /* messages dependent on header context to complete */
-
- /* todo: the msg.context really ought to be sanity
- * checked before we just use it, afaict it comes back
- * and is used raw from the videocore. Perhaps it
- * should be verified the address lies in the kernel
- * address space.
- */
- if (msg->h.context == NULL) {
- pr_err("received message context was null!\n");
- vchi_held_msg_release(&msg_handle);
- break;
- }
-
- /* fill in context values */
- msg->h.context->u.sync.msg_handle = msg_handle;
- msg->h.context->u.sync.msg = msg;
- msg->h.context->u.sync.msg_len = msg_len;
-
- /* todo: should this check (completion_done()
- * == 1) for no one waiting? or do we need a
- * flag to tell us the completion has been
- * interrupted so we can free the message and
- * its context. This probably also solves the
- * message arriving after interruption todo
- * below
- */
-
- /* complete message so caller knows it happened */
- complete(&msg->h.context->u.sync.cmplt);
- break;
- }
-
- break;
-
- case VCHI_CALLBACK_BULK_RECEIVED:
- bulk_receive_cb(instance, bulk_ctx);
- break;
-
- case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
- bulk_abort_cb(instance, bulk_ctx);
- break;
-
- case VCHI_CALLBACK_SERVICE_CLOSED:
- /* TODO: consider if this requires action if received when
- * driver is not explicitly closing the service
- */
- break;
-
- default:
- pr_err("Received unhandled message reason %d\n", reason);
- break;
- }
-}
-
-static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
- struct mmal_msg *msg,
- unsigned int payload_len,
- struct mmal_msg **msg_out,
- VCHI_HELD_MSG_T *msg_handle_out)
-{
- struct mmal_msg_context msg_context;
- int ret;
-
- /* payload size must not cause message to exceed max size */
- if (payload_len >
- (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
- pr_err("payload length %d exceeds max:%d\n", payload_len,
- (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header)));
- return -EINVAL;
- }
-
- init_completion(&msg_context.u.sync.cmplt);
-
- msg->h.magic = MMAL_MAGIC;
- msg->h.context = &msg_context;
- msg->h.status = 0;
-
- DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
- ">>> sync message");
-
- vchi_service_use(instance->handle);
-
- ret = vchi_queue_kernel_message(instance->handle,
- msg,
- sizeof(struct mmal_msg_header) +
- payload_len);
-
- vchi_service_release(instance->handle);
-
- if (ret) {
- pr_err("error %d queuing message\n", ret);
- return ret;
- }
-
- ret = wait_for_completion_timeout(&msg_context.u.sync.cmplt, 3 * HZ);
- if (ret <= 0) {
- pr_err("error %d waiting for sync completion\n", ret);
- if (ret == 0)
- ret = -ETIME;
- /* todo: what happens if the message arrives after aborting */
- return ret;
- }
-
- *msg_out = msg_context.u.sync.msg;
- *msg_handle_out = msg_context.u.sync.msg_handle;
-
- return 0;
-}
-
-static void dump_port_info(struct vchiq_mmal_port *port)
-{
- pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
-
- pr_debug("buffer minimum num:%d size:%d align:%d\n",
- port->minimum_buffer.num,
- port->minimum_buffer.size, port->minimum_buffer.alignment);
-
- pr_debug("buffer recommended num:%d size:%d align:%d\n",
- port->recommended_buffer.num,
- port->recommended_buffer.size,
- port->recommended_buffer.alignment);
-
- pr_debug("buffer current values num:%d size:%d align:%d\n",
- port->current_buffer.num,
- port->current_buffer.size, port->current_buffer.alignment);
-
- pr_debug("elementry stream: type:%d encoding:0x%x variant:0x%x\n",
- port->format.type,
- port->format.encoding, port->format.encoding_variant);
-
- pr_debug(" bitrate:%d flags:0x%x\n",
- port->format.bitrate, port->format.flags);
-
- if (port->format.type == MMAL_ES_TYPE_VIDEO) {
- pr_debug
- ("es video format: width:%d height:%d colourspace:0x%x\n",
- port->es.video.width, port->es.video.height,
- port->es.video.color_space);
-
- pr_debug(" : crop xywh %d,%d,%d,%d\n",
- port->es.video.crop.x,
- port->es.video.crop.y,
- port->es.video.crop.width, port->es.video.crop.height);
- pr_debug(" : framerate %d/%d aspect %d/%d\n",
- port->es.video.frame_rate.num,
- port->es.video.frame_rate.den,
- port->es.video.par.num, port->es.video.par.den);
- }
-}
-
-static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
-{
- /* todo do readonly fields need setting at all? */
- p->type = port->type;
- p->index = port->index;
- p->index_all = 0;
- p->is_enabled = port->enabled;
- p->buffer_num_min = port->minimum_buffer.num;
- p->buffer_size_min = port->minimum_buffer.size;
- p->buffer_alignment_min = port->minimum_buffer.alignment;
- p->buffer_num_recommended = port->recommended_buffer.num;
- p->buffer_size_recommended = port->recommended_buffer.size;
-
- /* only three writable fields in a port */
- p->buffer_num = port->current_buffer.num;
- p->buffer_size = port->current_buffer.size;
- p->userdata = port;
-}
-
-static int port_info_set(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- pr_debug("setting port info port %p\n", port);
- if (!port)
- return -1;
- dump_port_info(port);
-
- m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
-
- m.u.port_info_set.component_handle = port->component->handle;
- m.u.port_info_set.port_type = port->type;
- m.u.port_info_set.port_index = port->index;
-
- port_to_mmal_msg(port, &m.u.port_info_set.port);
-
- /* elementry stream format setup */
- m.u.port_info_set.format.type = port->format.type;
- m.u.port_info_set.format.encoding = port->format.encoding;
- m.u.port_info_set.format.encoding_variant =
- port->format.encoding_variant;
- m.u.port_info_set.format.bitrate = port->format.bitrate;
- m.u.port_info_set.format.flags = port->format.flags;
-
- memcpy(&m.u.port_info_set.es, &port->es,
- sizeof(union mmal_es_specific_format));
-
- m.u.port_info_set.format.extradata_size = port->format.extradata_size;
- memcpy(&m.u.port_info_set.extradata, port->format.extradata,
- port->format.extradata_size);
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.port_info_set),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- /* return operation status */
- ret = -rmsg->u.port_info_get_reply.status;
-
- pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
- port->component->handle, port->handle);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* use port info get message to retrieve port information */
-static int port_info_get(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- /* port info time */
- m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
- m.u.port_info_get.component_handle = port->component->handle;
- m.u.port_info_get.port_type = port->type;
- m.u.port_info_get.index = port->index;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.port_info_get),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- /* return operation status */
- ret = -rmsg->u.port_info_get_reply.status;
- if (ret != MMAL_MSG_STATUS_SUCCESS)
- goto release_msg;
-
- if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
- port->enabled = false;
- else
- port->enabled = true;
-
- /* copy the values out of the message */
- port->handle = rmsg->u.port_info_get_reply.port_handle;
-
- /* port type and index cached to use on port info set because
- * it does not use a port handle
- */
- port->type = rmsg->u.port_info_get_reply.port_type;
- port->index = rmsg->u.port_info_get_reply.port_index;
-
- port->minimum_buffer.num =
- rmsg->u.port_info_get_reply.port.buffer_num_min;
- port->minimum_buffer.size =
- rmsg->u.port_info_get_reply.port.buffer_size_min;
- port->minimum_buffer.alignment =
- rmsg->u.port_info_get_reply.port.buffer_alignment_min;
-
- port->recommended_buffer.alignment =
- rmsg->u.port_info_get_reply.port.buffer_alignment_min;
- port->recommended_buffer.num =
- rmsg->u.port_info_get_reply.port.buffer_num_recommended;
-
- port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
- port->current_buffer.size =
- rmsg->u.port_info_get_reply.port.buffer_size;
-
- /* stream format */
- port->format.type = rmsg->u.port_info_get_reply.format.type;
- port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
- port->format.encoding_variant =
- rmsg->u.port_info_get_reply.format.encoding_variant;
- port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
- port->format.flags = rmsg->u.port_info_get_reply.format.flags;
-
- /* elementry stream format */
- memcpy(&port->es,
- &rmsg->u.port_info_get_reply.es,
- sizeof(union mmal_es_specific_format));
- port->format.es = &port->es;
-
- port->format.extradata_size =
- rmsg->u.port_info_get_reply.format.extradata_size;
- memcpy(port->format.extradata,
- rmsg->u.port_info_get_reply.extradata,
- port->format.extradata_size);
-
- pr_debug("received port info\n");
- dump_port_info(port);
-
-release_msg:
-
- pr_debug("%s:result:%d component:0x%x port:%d\n",
- __func__, ret, port->component->handle, port->handle);
-
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* create comonent on vc */
-static int create_component(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component,
- const char *name)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- /* build component create message */
- m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
- m.u.component_create.client_component = component;
- strncpy(m.u.component_create.name, name,
- sizeof(m.u.component_create.name));
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.component_create),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != m.h.type) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.component_create_reply.status;
- if (ret != MMAL_MSG_STATUS_SUCCESS)
- goto release_msg;
-
- /* a valid component response received */
- component->handle = rmsg->u.component_create_reply.component_handle;
- component->inputs = rmsg->u.component_create_reply.input_num;
- component->outputs = rmsg->u.component_create_reply.output_num;
- component->clocks = rmsg->u.component_create_reply.clock_num;
-
- pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
- component->handle,
- component->inputs, component->outputs, component->clocks);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* destroys a component on vc */
-static int destroy_component(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
- m.u.component_destroy.component_handle = component->handle;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.component_destroy),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != m.h.type) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.component_destroy_reply.status;
-
-release_msg:
-
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* enable a component on vc */
-static int enable_component(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
- m.u.component_enable.component_handle = component->handle;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.component_enable),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != m.h.type) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.component_enable_reply.status;
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* disable a component on vc */
-static int disable_component(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
- m.u.component_disable.component_handle = component->handle;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.component_disable),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != m.h.type) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.component_disable_reply.status;
-
-release_msg:
-
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* get version of mmal implementation */
-static int get_version(struct vchiq_mmal_instance *instance,
- u32 *major_out, u32 *minor_out)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_GET_VERSION;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.version),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != m.h.type) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- *major_out = rmsg->u.version.major;
- *minor_out = rmsg->u.version.minor;
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* do a port action with a port as a parameter */
-static int port_action_port(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- enum mmal_msg_port_action_type action_type)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
- m.u.port_action_port.component_handle = port->component->handle;
- m.u.port_action_port.port_handle = port->handle;
- m.u.port_action_port.action = action_type;
-
- port_to_mmal_msg(port, &m.u.port_action_port.port);
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.port_action_port),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.port_action_reply.status;
-
- pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
- __func__,
- ret, port->component->handle, port->handle,
- port_action_type_names[action_type], action_type);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* do a port action with handles as parameters */
-static int port_action_handle(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- enum mmal_msg_port_action_type action_type,
- u32 connect_component_handle,
- u32 connect_port_handle)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
-
- m.u.port_action_handle.component_handle = port->component->handle;
- m.u.port_action_handle.port_handle = port->handle;
- m.u.port_action_handle.action = action_type;
-
- m.u.port_action_handle.connect_component_handle =
- connect_component_handle;
- m.u.port_action_handle.connect_port_handle = connect_port_handle;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(m.u.port_action_handle),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.port_action_reply.status;
-
- pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
- " connect component:0x%x connect port:%d\n",
- __func__,
- ret, port->component->handle, port->handle,
- port_action_type_names[action_type],
- action_type, connect_component_handle, connect_port_handle);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-static int port_parameter_set(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter_id, void *value, u32 value_size)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
-
- m.u.port_parameter_set.component_handle = port->component->handle;
- m.u.port_parameter_set.port_handle = port->handle;
- m.u.port_parameter_set.id = parameter_id;
- m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
- memcpy(&m.u.port_parameter_set.value, value, value_size);
-
- ret = send_synchronous_mmal_msg(instance, &m,
- (4 * sizeof(u32)) + value_size,
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
- /* got an unexpected message type in reply */
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.port_parameter_set_reply.status;
-
- pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
- __func__,
- ret, port->component->handle, port->handle, parameter_id);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-static int port_parameter_get(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter_id, void *value, u32 *value_size)
-{
- int ret;
- struct mmal_msg m;
- struct mmal_msg *rmsg;
- VCHI_HELD_MSG_T rmsg_handle;
-
- m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
-
- m.u.port_parameter_get.component_handle = port->component->handle;
- m.u.port_parameter_get.port_handle = port->handle;
- m.u.port_parameter_get.id = parameter_id;
- m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
-
- ret = send_synchronous_mmal_msg(instance, &m,
- sizeof(struct
- mmal_msg_port_parameter_get),
- &rmsg, &rmsg_handle);
- if (ret)
- return ret;
-
- if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
- /* got an unexpected message type in reply */
- pr_err("Incorrect reply type %d\n", rmsg->h.type);
- ret = -EINVAL;
- goto release_msg;
- }
-
- ret = -rmsg->u.port_parameter_get_reply.status;
- if (ret) {
- /* Copy only as much as we have space for
- * but report true size of parameter
- */
- memcpy(value, &rmsg->u.port_parameter_get_reply.value,
- *value_size);
- *value_size = rmsg->u.port_parameter_get_reply.size;
- } else
- memcpy(value, &rmsg->u.port_parameter_get_reply.value,
- rmsg->u.port_parameter_get_reply.size);
-
- pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
- ret, port->component->handle, port->handle, parameter_id);
-
-release_msg:
- vchi_held_msg_release(&rmsg_handle);
-
- return ret;
-}
-
-/* disables a port and drains buffers from it */
-static int port_disable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
- struct list_head *q, *buf_head;
- unsigned long flags = 0;
-
- if (!port->enabled)
- return 0;
-
- port->enabled = false;
-
- ret = port_action_port(instance, port,
- MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
- if (ret == 0) {
- /* drain all queued buffers on port */
- spin_lock_irqsave(&port->slock, flags);
-
- list_for_each_safe(buf_head, q, &port->buffers) {
- struct mmal_buffer *mmalbuf;
-
- mmalbuf = list_entry(buf_head, struct mmal_buffer,
- list);
- list_del(buf_head);
- if (port->buffer_cb)
- port->buffer_cb(instance,
- port, 0, mmalbuf, 0, 0,
- MMAL_TIME_UNKNOWN,
- MMAL_TIME_UNKNOWN);
- }
-
- spin_unlock_irqrestore(&port->slock, flags);
-
- ret = port_info_get(instance, port);
- }
-
- return ret;
-}
-
-/* enable a port */
-static int port_enable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- unsigned int hdr_count;
- struct list_head *buf_head;
- int ret;
-
- if (port->enabled)
- return 0;
-
- /* ensure there are enough buffers queued to cover the buffer headers */
- if (port->buffer_cb != NULL) {
- hdr_count = 0;
- list_for_each(buf_head, &port->buffers) {
- hdr_count++;
- }
- if (hdr_count < port->current_buffer.num)
- return -ENOSPC;
- }
-
- ret = port_action_port(instance, port,
- MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
- if (ret)
- goto done;
-
- port->enabled = true;
-
- if (port->buffer_cb) {
- /* send buffer headers to videocore */
- hdr_count = 1;
- list_for_each(buf_head, &port->buffers) {
- struct mmal_buffer *mmalbuf;
-
- mmalbuf = list_entry(buf_head, struct mmal_buffer,
- list);
- ret = buffer_from_host(instance, port, mmalbuf);
- if (ret)
- goto done;
-
- hdr_count++;
- if (hdr_count > port->current_buffer.num)
- break;
- }
- }
-
- ret = port_info_get(instance, port);
-
-done:
- return ret;
-}
-
-/* ------------------------------------------------------------------
- * Exported API
- *------------------------------------------------------------------*/
-
-int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- ret = port_info_set(instance, port);
- if (ret)
- goto release_unlock;
-
- /* read what has actually been set */
- ret = port_info_get(instance, port);
-
-release_unlock:
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter, void *value, u32 value_size)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- ret = port_parameter_set(instance, port, parameter, value, value_size);
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter, void *value, u32 *value_size)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- ret = port_parameter_get(instance, port, parameter, value, value_size);
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-/* enable a port
- *
- * enables a port and queues buffers for satisfying callbacks if we
- * provide a callback handler
- */
-int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- vchiq_mmal_buffer_cb buffer_cb)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- /* already enabled - noop */
- if (port->enabled) {
- ret = 0;
- goto unlock;
- }
-
- port->buffer_cb = buffer_cb;
-
- ret = port_enable(instance, port);
-
-unlock:
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- if (!port->enabled) {
- mutex_unlock(&instance->vchiq_mutex);
- return 0;
- }
-
- ret = port_disable(instance, port);
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-/* ports will be connected in a tunneled manner so data buffers
- * are not handled by client.
- */
-int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *src,
- struct vchiq_mmal_port *dst)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- /* disconnect ports if connected */
- if (src->connected != NULL) {
- ret = port_disable(instance, src);
- if (ret) {
- pr_err("failed disabling src port(%d)\n", ret);
- goto release_unlock;
- }
-
- /* do not need to disable the destination port as they
- * are connected and it is done automatically
- */
-
- ret = port_action_handle(instance, src,
- MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
- src->connected->component->handle,
- src->connected->handle);
- if (ret < 0) {
- pr_err("failed disconnecting src port\n");
- goto release_unlock;
- }
- src->connected->enabled = false;
- src->connected = NULL;
- }
-
- if (dst == NULL) {
- /* do not make new connection */
- ret = 0;
- pr_debug("not making new connection\n");
- goto release_unlock;
- }
-
- /* copy src port format to dst */
- dst->format.encoding = src->format.encoding;
- dst->es.video.width = src->es.video.width;
- dst->es.video.height = src->es.video.height;
- dst->es.video.crop.x = src->es.video.crop.x;
- dst->es.video.crop.y = src->es.video.crop.y;
- dst->es.video.crop.width = src->es.video.crop.width;
- dst->es.video.crop.height = src->es.video.crop.height;
- dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
- dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
-
- /* set new format */
- ret = port_info_set(instance, dst);
- if (ret) {
- pr_debug("setting port info failed\n");
- goto release_unlock;
- }
-
- /* read what has actually been set */
- ret = port_info_get(instance, dst);
- if (ret) {
- pr_debug("read back port info failed\n");
- goto release_unlock;
- }
-
- /* connect two ports together */
- ret = port_action_handle(instance, src,
- MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
- dst->component->handle, dst->handle);
- if (ret < 0) {
- pr_debug("connecting port %d:%d to %d:%d failed\n",
- src->component->handle, src->handle,
- dst->component->handle, dst->handle);
- goto release_unlock;
- }
- src->connected = dst;
-
-release_unlock:
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- struct mmal_buffer *buffer)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&port->slock, flags);
- list_add_tail(&buffer->list, &port->buffers);
- spin_unlock_irqrestore(&port->slock, flags);
-
- /* the port previously underflowed because it was missing a
- * mmal_buffer which has just been added, submit that buffer
- * to the mmal service.
- */
- if (port->buffer_underflow) {
- port_buffer_from_host(instance, port);
- port->buffer_underflow--;
- }
-
- return 0;
-}
-
-/* Initialise a mmal component and its ports
- *
- */
-int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
- const char *name,
- struct vchiq_mmal_component **component_out)
-{
- int ret;
- int idx; /* port index */
- struct vchiq_mmal_component *component;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
- ret = -EINVAL; /* todo is this correct error? */
- goto unlock;
- }
-
- component = &instance->component[instance->component_idx];
-
- ret = create_component(instance, component, name);
- if (ret < 0)
- goto unlock;
-
- /* ports info needs gathering */
- component->control.type = MMAL_PORT_TYPE_CONTROL;
- component->control.index = 0;
- component->control.component = component;
- spin_lock_init(&component->control.slock);
- INIT_LIST_HEAD(&component->control.buffers);
- ret = port_info_get(instance, &component->control);
- if (ret < 0)
- goto release_component;
-
- for (idx = 0; idx < component->inputs; idx++) {
- component->input[idx].type = MMAL_PORT_TYPE_INPUT;
- component->input[idx].index = idx;
- component->input[idx].component = component;
- spin_lock_init(&component->input[idx].slock);
- INIT_LIST_HEAD(&component->input[idx].buffers);
- ret = port_info_get(instance, &component->input[idx]);
- if (ret < 0)
- goto release_component;
- }
-
- for (idx = 0; idx < component->outputs; idx++) {
- component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
- component->output[idx].index = idx;
- component->output[idx].component = component;
- spin_lock_init(&component->output[idx].slock);
- INIT_LIST_HEAD(&component->output[idx].buffers);
- ret = port_info_get(instance, &component->output[idx]);
- if (ret < 0)
- goto release_component;
- }
-
- for (idx = 0; idx < component->clocks; idx++) {
- component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
- component->clock[idx].index = idx;
- component->clock[idx].component = component;
- spin_lock_init(&component->clock[idx].slock);
- INIT_LIST_HEAD(&component->clock[idx].buffers);
- ret = port_info_get(instance, &component->clock[idx]);
- if (ret < 0)
- goto release_component;
- }
-
- instance->component_idx++;
-
- *component_out = component;
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return 0;
-
-release_component:
- destroy_component(instance, component);
-unlock:
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-/*
- * cause a mmal component to be destroyed
- */
-int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- if (component->enabled)
- ret = disable_component(instance, component);
-
- ret = destroy_component(instance, component);
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-/*
- * cause a mmal component to be enabled
- */
-int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- if (component->enabled) {
- mutex_unlock(&instance->vchiq_mutex);
- return 0;
- }
-
- ret = enable_component(instance, component);
- if (ret == 0)
- component->enabled = true;
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-/*
- * cause a mmal component to be enabled
- */
-int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- if (!component->enabled) {
- mutex_unlock(&instance->vchiq_mutex);
- return 0;
- }
-
- ret = disable_component(instance, component);
- if (ret == 0)
- component->enabled = false;
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
- u32 *major_out, u32 *minor_out)
-{
- int ret;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- ret = get_version(instance, major_out, minor_out);
-
- mutex_unlock(&instance->vchiq_mutex);
-
- return ret;
-}
-
-int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
-{
- int status = 0;
-
- if (instance == NULL)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&instance->vchiq_mutex))
- return -EINTR;
-
- vchi_service_use(instance->handle);
-
- status = vchi_service_close(instance->handle);
- if (status != 0)
- pr_err("mmal-vchiq: VCHIQ close failed");
-
- mutex_unlock(&instance->vchiq_mutex);
-
- vfree(instance->bulk_scratch);
-
- kfree(instance);
-
- return status;
-}
-
-int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
-{
- int status;
- struct vchiq_mmal_instance *instance;
- static VCHI_CONNECTION_T *vchi_connection;
- static VCHI_INSTANCE_T vchi_instance;
- SERVICE_CREATION_T params = {
- VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
- VC_MMAL_SERVER_NAME,
- vchi_connection,
- 0, /* rx fifo size (unused) */
- 0, /* tx fifo size (unused) */
- service_callback,
- NULL, /* service callback parameter */
- 1, /* unaligned bulk receives */
- 1, /* unaligned bulk transmits */
- 0 /* want crc check on bulk transfers */
- };
-
- /* compile time checks to ensure structure size as they are
- * directly (de)serialised from memory.
- */
-
- /* ensure the header structure has packed to the correct size */
- BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
-
- /* ensure message structure does not exceed maximum length */
- BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
-
- /* mmal port struct is correct size */
- BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
-
- /* create a vchi instance */
- status = vchi_initialise(&vchi_instance);
- if (status) {
- pr_err("Failed to initialise VCHI instance (status=%d)\n",
- status);
- return -EIO;
- }
-
- status = vchi_connect(NULL, 0, vchi_instance);
- if (status) {
- pr_err("Failed to connect VCHI instance (status=%d)\n", status);
- return -EIO;
- }
-
- instance = kmalloc(sizeof(*instance), GFP_KERNEL);
- memset(instance, 0, sizeof(*instance));
-
- mutex_init(&instance->vchiq_mutex);
- mutex_init(&instance->bulk_mutex);
-
- instance->bulk_scratch = vmalloc(PAGE_SIZE);
-
- params.callback_param = instance;
-
- status = vchi_service_open(vchi_instance, &params, &instance->handle);
- if (status) {
- pr_err("Failed to open VCHI service connection (status=%d)\n",
- status);
- goto err_close_services;
- }
-
- vchi_service_release(instance->handle);
-
- *out_instance = instance;
-
- return 0;
-
-err_close_services:
-
- vchi_service_close(instance->handle);
- vfree(instance->bulk_scratch);
- kfree(instance);
- return -ENODEV;
-}
diff --git a/drivers/staging/media/platform/bcm2835/mmal-vchiq.h b/drivers/staging/media/platform/bcm2835/mmal-vchiq.h
deleted file mode 100644
index 9d1d11e4a53e..000000000000
--- a/drivers/staging/media/platform/bcm2835/mmal-vchiq.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Broadcom BM2835 V4L2 driver
- *
- * Copyright © 2013 Raspberry Pi (Trading) Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
- * Dave Stevenson <dsteve@broadcom.com>
- * Simon Mellor <simellor@broadcom.com>
- * Luke Diamand <luked@broadcom.com>
- *
- * MMAL interface to VCHIQ message passing
- */
-
-#ifndef MMAL_VCHIQ_H
-#define MMAL_VCHIQ_H
-
-#include "mmal-msg-format.h"
-
-#define MAX_PORT_COUNT 4
-
-/* Maximum size of the format extradata. */
-#define MMAL_FORMAT_EXTRADATA_MAX_SIZE 128
-
-struct vchiq_mmal_instance;
-
-enum vchiq_mmal_es_type {
- MMAL_ES_TYPE_UNKNOWN, /**< Unknown elementary stream type */
- MMAL_ES_TYPE_CONTROL, /**< Elementary stream of control commands */
- MMAL_ES_TYPE_AUDIO, /**< Audio elementary stream */
- MMAL_ES_TYPE_VIDEO, /**< Video elementary stream */
- MMAL_ES_TYPE_SUBPICTURE /**< Sub-picture elementary stream */
-};
-
-/* rectangle, used lots so it gets its own struct */
-struct vchiq_mmal_rect {
- s32 x;
- s32 y;
- s32 width;
- s32 height;
-};
-
-struct vchiq_mmal_port_buffer {
- unsigned int num; /* number of buffers */
- u32 size; /* size of buffers */
- u32 alignment; /* alignment of buffers */
-};
-
-struct vchiq_mmal_port;
-
-typedef void (*vchiq_mmal_buffer_cb)(
- struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- int status, struct mmal_buffer *buffer,
- unsigned long length, u32 mmal_flags, s64 dts, s64 pts);
-
-struct vchiq_mmal_port {
- bool enabled;
- u32 handle;
- u32 type; /* port type, cached to use on port info set */
- u32 index; /* port index, cached to use on port info set */
-
- /* component port belongs to, allows simple deref */
- struct vchiq_mmal_component *component;
-
- struct vchiq_mmal_port *connected; /* port conencted to */
-
- /* buffer info */
- struct vchiq_mmal_port_buffer minimum_buffer;
- struct vchiq_mmal_port_buffer recommended_buffer;
- struct vchiq_mmal_port_buffer current_buffer;
-
- /* stream format */
- struct mmal_es_format format;
- /* elementry stream format */
- union mmal_es_specific_format es;
-
- /* data buffers to fill */
- struct list_head buffers;
- /* lock to serialise adding and removing buffers from list */
- spinlock_t slock;
- /* count of how many buffer header refils have failed because
- * there was no buffer to satisfy them
- */
- int buffer_underflow;
- /* callback on buffer completion */
- vchiq_mmal_buffer_cb buffer_cb;
- /* callback context */
- void *cb_ctx;
-};
-
-struct vchiq_mmal_component {
- bool enabled;
- u32 handle; /* VideoCore handle for component */
- u32 inputs; /* Number of input ports */
- u32 outputs; /* Number of output ports */
- u32 clocks; /* Number of clock ports */
- struct vchiq_mmal_port control; /* control port */
- struct vchiq_mmal_port input[MAX_PORT_COUNT]; /* input ports */
- struct vchiq_mmal_port output[MAX_PORT_COUNT]; /* output ports */
- struct vchiq_mmal_port clock[MAX_PORT_COUNT]; /* clock ports */
-};
-
-
-int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance);
-int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance);
-
-/* Initialise a mmal component and its ports
-*
-*/
-int vchiq_mmal_component_init(
- struct vchiq_mmal_instance *instance,
- const char *name,
- struct vchiq_mmal_component **component_out);
-
-int vchiq_mmal_component_finalise(
- struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component);
-
-int vchiq_mmal_component_enable(
- struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component);
-
-int vchiq_mmal_component_disable(
- struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_component *component);
-
-
-
-/* enable a mmal port
- *
- * enables a port and if a buffer callback provided enque buffer
- * headers as apropriate for the port.
- */
-int vchiq_mmal_port_enable(
- struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- vchiq_mmal_buffer_cb buffer_cb);
-
-/* disable a port
- *
- * disable a port will dequeue any pending buffers
- */
-int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port);
-
-
-int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter,
- void *value,
- u32 value_size);
-
-int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- u32 parameter,
- void *value,
- u32 *value_size);
-
-int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port);
-
-int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *src,
- struct vchiq_mmal_port *dst);
-
-int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
- u32 *major_out,
- u32 *minor_out);
-
-int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
- struct vchiq_mmal_port *port,
- struct mmal_buffer *buf);
-
-#endif /* MMAL_VCHIQ_H */
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
deleted file mode 100644
index 7a3489df3e70..000000000000
--- a/drivers/staging/media/s5p-cec/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config VIDEO_SAMSUNG_S5P_CEC
- tristate "Samsung S5P CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_EXYNOS || COMPILE_TEST)
- ---help---
- This is a driver for Samsung S5P HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
-
diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO
deleted file mode 100644
index 64f21bab38f5..000000000000
--- a/drivers/staging/media/s5p-cec/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-This driver requires that userspace sets the physical address.
-However, this should be passed on from the corresponding
-Samsung HDMI driver.
-
-We have to wait until the HDMI notifier framework has been merged
-in order to handle this gracefully, until that time this driver
-has to remain in staging.
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
deleted file mode 100644
index 2a07968b5ac6..000000000000
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/* drivers/media/platform/s5p-cec/s5p_cec.c
- *
- * Samsung S5P CEC driver
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This driver is based on the "cec interface driver for exynos soc" by
- * SangPil Moon.
- */
-
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <media/cec.h>
-
-#include "exynos_hdmi_cec.h"
-#include "regs-cec.h"
-#include "s5p_cec.h"
-
-#define CEC_NAME "s5p-cec"
-
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-2)");
-
-static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
- struct s5p_cec_dev *cec = adap->priv;
-
- if (enable) {
- pm_runtime_get_sync(cec->dev);
-
- s5p_cec_reset(cec);
-
- s5p_cec_set_divider(cec);
- s5p_cec_threshold(cec);
-
- s5p_cec_unmask_tx_interrupts(cec);
- s5p_cec_unmask_rx_interrupts(cec);
- s5p_cec_enable_rx(cec);
- } else {
- s5p_cec_mask_tx_interrupts(cec);
- s5p_cec_mask_rx_interrupts(cec);
- pm_runtime_disable(cec->dev);
- }
-
- return 0;
-}
-
-static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
-{
- struct s5p_cec_dev *cec = adap->priv;
-
- s5p_cec_set_addr(cec, addr);
- return 0;
-}
-
-static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
-{
- struct s5p_cec_dev *cec = adap->priv;
-
- /*
- * Unclear if 0 retries are allowed by the hardware, so have 1 as
- * the minimum.
- */
- s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1));
- return 0;
-}
-
-static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
-{
- struct s5p_cec_dev *cec = priv;
- u32 status = 0;
-
- status = s5p_cec_get_status(cec);
-
- dev_dbg(cec->dev, "irq received\n");
-
- if (status & CEC_STATUS_TX_DONE) {
- if (status & CEC_STATUS_TX_ERROR) {
- dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
- cec->tx = STATE_ERROR;
- } else {
- dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n");
- cec->tx = STATE_DONE;
- }
- s5p_clr_pending_tx(cec);
- }
-
- if (status & CEC_STATUS_RX_DONE) {
- if (status & CEC_STATUS_RX_ERROR) {
- dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n");
- s5p_cec_rx_reset(cec);
- s5p_cec_enable_rx(cec);
- } else {
- dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n");
- if (cec->rx != STATE_IDLE)
- dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
- cec->rx = STATE_BUSY;
- cec->msg.len = status >> 24;
- cec->msg.rx_status = CEC_RX_STATUS_OK;
- s5p_cec_get_rx_buf(cec, cec->msg.len,
- cec->msg.msg);
- cec->rx = STATE_DONE;
- s5p_cec_enable_rx(cec);
- }
- /* Clear interrupt pending bit */
- s5p_clr_pending_rx(cec);
- }
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
-{
- struct s5p_cec_dev *cec = priv;
-
- dev_dbg(cec->dev, "irq processing thread\n");
- switch (cec->tx) {
- case STATE_DONE:
- cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
- cec->tx = STATE_IDLE;
- break;
- case STATE_ERROR:
- cec_transmit_done(cec->adap,
- CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
- 0, 0, 0, 1);
- cec->tx = STATE_IDLE;
- break;
- case STATE_BUSY:
- dev_err(cec->dev, "state set to busy, this should not occur here\n");
- break;
- default:
- break;
- }
-
- switch (cec->rx) {
- case STATE_DONE:
- cec_received_msg(cec->adap, &cec->msg);
- cec->rx = STATE_IDLE;
- break;
- default:
- break;
- }
-
- return IRQ_HANDLED;
-}
-
-static const struct cec_adap_ops s5p_cec_adap_ops = {
- .adap_enable = s5p_cec_adap_enable,
- .adap_log_addr = s5p_cec_adap_log_addr,
- .adap_transmit = s5p_cec_adap_transmit,
-};
-
-static int s5p_cec_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct s5p_cec_dev *cec;
- int ret;
-
- cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
- if (!cec)
- return -ENOMEM;
-
- cec->dev = dev;
-
- cec->irq = platform_get_irq(pdev, 0);
- if (cec->irq < 0)
- return cec->irq;
-
- ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler,
- s5p_cec_irq_handler_thread, 0, pdev->name, cec);
- if (ret)
- return ret;
-
- cec->clk = devm_clk_get(dev, "hdmicec");
- if (IS_ERR(cec->clk))
- return PTR_ERR(cec->clk);
-
- cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
- "samsung,syscon-phandle");
- if (IS_ERR(cec->pmu))
- return -EPROBE_DEFER;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(cec->reg))
- return PTR_ERR(cec->reg);
-
- cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
- CEC_NAME,
- CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
- ret = PTR_ERR_OR_ZERO(cec->adap);
- if (ret)
- return ret;
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
- }
-
- platform_set_drvdata(pdev, cec);
- pm_runtime_enable(dev);
-
- dev_dbg(dev, "successfuly probed\n");
- return 0;
-}
-
-static int s5p_cec_remove(struct platform_device *pdev)
-{
- struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
-
- cec_unregister_adapter(cec->adap);
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
-{
- struct s5p_cec_dev *cec = dev_get_drvdata(dev);
-
- clk_disable_unprepare(cec->clk);
- return 0;
-}
-
-static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
-{
- struct s5p_cec_dev *cec = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(cec->clk);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static const struct dev_pm_ops s5p_cec_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
- NULL)
-};
-
-static const struct of_device_id s5p_cec_match[] = {
- {
- .compatible = "samsung,s5p-cec",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, s5p_cec_match);
-
-static struct platform_driver s5p_cec_pdrv = {
- .probe = s5p_cec_probe,
- .remove = s5p_cec_remove,
- .driver = {
- .name = CEC_NAME,
- .of_match_table = s5p_cec_match,
- .pm = &s5p_cec_pm_ops,
- },
-};
-
-module_platform_driver(s5p_cec_pdrv);
-
-MODULE_AUTHOR("Kamil Debski <kamil@wypas.org>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Samsung S5P CEC driver");
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.h b/drivers/staging/media/s5p-cec/s5p_cec.h
deleted file mode 100644
index 03732c13d19f..000000000000
--- a/drivers/staging/media/s5p-cec/s5p_cec.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* drivers/media/platform/s5p-cec/s5p_cec.h
- *
- * Samsung S5P HDMI CEC driver
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef _S5P_CEC_H_
-#define _S5P_CEC_H_ __FILE__
-
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/timer.h>
-#include <linux/version.h>
-#include <linux/workqueue.h>
-#include <media/cec.h>
-
-#include "exynos_hdmi_cec.h"
-#include "regs-cec.h"
-#include "s5p_cec.h"
-
-#define CEC_NAME "s5p-cec"
-
-#define CEC_STATUS_TX_RUNNING (1 << 0)
-#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
-#define CEC_STATUS_TX_DONE (1 << 2)
-#define CEC_STATUS_TX_ERROR (1 << 3)
-#define CEC_STATUS_TX_BYTES (0xFF << 8)
-#define CEC_STATUS_RX_RUNNING (1 << 16)
-#define CEC_STATUS_RX_RECEIVING (1 << 17)
-#define CEC_STATUS_RX_DONE (1 << 18)
-#define CEC_STATUS_RX_ERROR (1 << 19)
-#define CEC_STATUS_RX_BCAST (1 << 20)
-#define CEC_STATUS_RX_BYTES (0xFF << 24)
-
-#define CEC_WORKER_TX_DONE (1 << 0)
-#define CEC_WORKER_RX_MSG (1 << 1)
-
-/* CEC Rx buffer size */
-#define CEC_RX_BUFF_SIZE 16
-/* CEC Tx buffer size */
-#define CEC_TX_BUFF_SIZE 16
-
-enum cec_state {
- STATE_IDLE,
- STATE_BUSY,
- STATE_DONE,
- STATE_ERROR
-};
-
-struct s5p_cec_dev {
- struct cec_adapter *adap;
- struct clk *clk;
- struct device *dev;
- struct mutex lock;
- struct regmap *pmu;
- int irq;
- void __iomem *reg;
-
- enum cec_state rx;
- enum cec_state tx;
- struct cec_msg msg;
-};
-
-#endif /* _S5P_CEC_H_ */
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
deleted file mode 100644
index c04283db58d6..000000000000
--- a/drivers/staging/media/st-cec/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-config VIDEO_STI_HDMI_CEC
- tristate "STMicroelectronics STiH4xx HDMI CEC driver"
- depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_STI || COMPILE_TEST)
- ---help---
- This is a driver for STIH4xx HDMI CEC interface. It uses the
- generic CEC framework interface.
- CEC bus is present in the HDMI connector and enables communication
- between compatible devices.
diff --git a/drivers/staging/media/st-cec/TODO b/drivers/staging/media/st-cec/TODO
deleted file mode 100644
index c61289742c5c..000000000000
--- a/drivers/staging/media/st-cec/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-This driver requires that userspace sets the physical address.
-However, this should be passed on from the corresponding
-ST HDMI driver.
-
-We have to wait until the HDMI notifier framework has been merged
-in order to handle this gracefully, until that time this driver
-has to remain in staging.
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
deleted file mode 100644
index 3c25638a9610..000000000000
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * drivers/staging/media/st-cec/stih-cec.c
- *
- * STIH4xx CEC driver
- * Copyright (C) STMicroelectronic SA 2016
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include <media/cec.h>
-
-#define CEC_NAME "stih-cec"
-
-/* CEC registers */
-#define CEC_CLK_DIV 0x0
-#define CEC_CTRL 0x4
-#define CEC_IRQ_CTRL 0x8
-#define CEC_STATUS 0xC
-#define CEC_EXT_STATUS 0x10
-#define CEC_TX_CTRL 0x14
-#define CEC_FREE_TIME_THRESH 0x18
-#define CEC_BIT_TOUT_THRESH 0x1C
-#define CEC_BIT_PULSE_THRESH 0x20
-#define CEC_DATA 0x24
-#define CEC_TX_ARRAY_CTRL 0x28
-#define CEC_CTRL2 0x2C
-#define CEC_TX_ERROR_STS 0x30
-#define CEC_ADDR_TABLE 0x34
-#define CEC_DATA_ARRAY_CTRL 0x38
-#define CEC_DATA_ARRAY_STATUS 0x3C
-#define CEC_TX_DATA_BASE 0x40
-#define CEC_TX_DATA_TOP 0x50
-#define CEC_TX_DATA_SIZE 0x1
-#define CEC_RX_DATA_BASE 0x54
-#define CEC_RX_DATA_TOP 0x64
-#define CEC_RX_DATA_SIZE 0x1
-
-/* CEC_CTRL2 */
-#define CEC_LINE_INACTIVE_EN BIT(0)
-#define CEC_AUTO_BUS_ERR_EN BIT(1)
-#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
-#define CEC_TX_REQ_WAIT_EN BIT(3)
-
-/* CEC_DATA_ARRAY_CTRL */
-#define CEC_TX_ARRAY_EN BIT(0)
-#define CEC_RX_ARRAY_EN BIT(1)
-#define CEC_TX_ARRAY_RESET BIT(2)
-#define CEC_RX_ARRAY_RESET BIT(3)
-#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
-#define CEC_TX_STOP_ON_NACK BIT(7)
-
-/* CEC_TX_ARRAY_CTRL */
-#define CEC_TX_N_OF_BYTES 0x1F
-#define CEC_TX_START BIT(5)
-#define CEC_TX_AUTO_SOM_EN BIT(6)
-#define CEC_TX_AUTO_EOM_EN BIT(7)
-
-/* CEC_IRQ_CTRL */
-#define CEC_TX_DONE_IRQ_EN BIT(0)
-#define CEC_ERROR_IRQ_EN BIT(2)
-#define CEC_RX_DONE_IRQ_EN BIT(3)
-#define CEC_RX_SOM_IRQ_EN BIT(4)
-#define CEC_RX_EOM_IRQ_EN BIT(5)
-#define CEC_FREE_TIME_IRQ_EN BIT(6)
-#define CEC_PIN_STS_IRQ_EN BIT(7)
-
-/* CEC_CTRL */
-#define CEC_IN_FILTER_EN BIT(0)
-#define CEC_PWR_SAVE_EN BIT(1)
-#define CEC_EN BIT(4)
-#define CEC_ACK_CTRL BIT(5)
-#define CEC_RX_RESET_EN BIT(6)
-#define CEC_IGNORE_RX_ERROR BIT(7)
-
-/* CEC_STATUS */
-#define CEC_TX_DONE_STS BIT(0)
-#define CEC_TX_ACK_GET_STS BIT(1)
-#define CEC_ERROR_STS BIT(2)
-#define CEC_RX_DONE_STS BIT(3)
-#define CEC_RX_SOM_STS BIT(4)
-#define CEC_RX_EOM_STS BIT(5)
-#define CEC_FREE_TIME_IRQ_STS BIT(6)
-#define CEC_PIN_STS BIT(7)
-#define CEC_SBIT_TOUT_STS BIT(8)
-#define CEC_DBIT_TOUT_STS BIT(9)
-#define CEC_LPULSE_ERROR_STS BIT(10)
-#define CEC_HPULSE_ERROR_STS BIT(11)
-#define CEC_TX_ERROR BIT(12)
-#define CEC_TX_ARB_ERROR BIT(13)
-#define CEC_RX_ERROR_MIN BIT(14)
-#define CEC_RX_ERROR_MAX BIT(15)
-
-/* Signal free time in bit periods (2.4ms) */
-#define CEC_PRESENT_INIT_SFT 7
-#define CEC_NEW_INIT_SFT 5
-#define CEC_RETRANSMIT_SFT 3
-
-/* Constants for CEC_BIT_TOUT_THRESH register */
-#define CEC_SBIT_TOUT_47MS BIT(1)
-#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
-#define CEC_SBIT_TOUT_50MS BIT(2)
-#define CEC_DBIT_TOUT_27MS BIT(0)
-#define CEC_DBIT_TOUT_28MS BIT(1)
-#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
-
-/* Constants for CEC_BIT_PULSE_THRESH register */
-#define CEC_BIT_LPULSE_03MS BIT(1)
-#define CEC_BIT_HPULSE_03MS BIT(3)
-
-/* Constants for CEC_DATA_ARRAY_STATUS register */
-#define CEC_RX_N_OF_BYTES 0x1F
-#define CEC_TX_N_OF_BYTES_SENT BIT(5)
-#define CEC_RX_OVERRUN BIT(6)
-
-struct stih_cec {
- struct cec_adapter *adap;
- struct device *dev;
- struct clk *clk;
- void __iomem *regs;
- int irq;
- u32 irq_status;
-};
-
-static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
- struct stih_cec *cec = adap->priv;
-
- if (enable) {
- /* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
- unsigned long clk_freq = clk_get_rate(cec->clk);
- u32 cec_clk_div = clk_freq / 10000;
-
- writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
-
- /* Configuration of the durations activating a timeout */
- writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
- cec->regs + CEC_BIT_TOUT_THRESH);
-
- /* Configuration of the smallest allowed duration for pulses */
- writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
- cec->regs + CEC_BIT_PULSE_THRESH);
-
- /* Minimum received bit period threshold */
- writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
-
- /* Configuration of transceiver data arrays */
- writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
- cec->regs + CEC_DATA_ARRAY_CTRL);
-
- /* Configuration of the control bits for CEC Transceiver */
- writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
- cec->regs + CEC_CTRL);
-
- /* Clear logical addresses */
- writel(0, cec->regs + CEC_ADDR_TABLE);
-
- /* Clear the status register */
- writel(0x0, cec->regs + CEC_STATUS);
-
- /* Enable the interrupts */
- writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
- CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
- CEC_ERROR_IRQ_EN,
- cec->regs + CEC_IRQ_CTRL);
-
- } else {
- /* Clear logical addresses */
- writel(0, cec->regs + CEC_ADDR_TABLE);
-
- /* Clear the status register */
- writel(0x0, cec->regs + CEC_STATUS);
-
- /* Disable the interrupts */
- writel(0, cec->regs + CEC_IRQ_CTRL);
- }
-
- return 0;
-}
-
-static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
-{
- struct stih_cec *cec = adap->priv;
- u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
-
- reg |= 1 << logical_addr;
-
- if (logical_addr == CEC_LOG_ADDR_INVALID)
- reg = 0;
-
- writel(reg, cec->regs + CEC_ADDR_TABLE);
-
- return 0;
-}
-
-static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
-{
- struct stih_cec *cec = adap->priv;
- int i;
-
- /* Copy message into registers */
- for (i = 0; i < msg->len; i++)
- writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
-
- /* Start transmission, configure hardware to add start and stop bits
- * Signal free time is handled by the hardware
- */
- writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
- msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
-
- return 0;
-}
-
-static void stih_tx_done(struct stih_cec *cec, u32 status)
-{
- if (status & CEC_TX_ERROR) {
- cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, 0, 0, 0, 1);
- return;
- }
-
- if (status & CEC_TX_ARB_ERROR) {
- cec_transmit_done(cec->adap,
- CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
- return;
- }
-
- if (!(status & CEC_TX_ACK_GET_STS)) {
- cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, 0, 1, 0, 0);
- return;
- }
-
- cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
-}
-
-static void stih_rx_done(struct stih_cec *cec, u32 status)
-{
- struct cec_msg msg = {};
- u8 i;
-
- if (status & CEC_RX_ERROR_MIN)
- return;
-
- if (status & CEC_RX_ERROR_MAX)
- return;
-
- msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
-
- if (!msg.len)
- return;
-
- if (msg.len > 16)
- msg.len = 16;
-
- for (i = 0; i < msg.len; i++)
- msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
-
- cec_received_msg(cec->adap, &msg);
-}
-
-static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
-{
- struct stih_cec *cec = priv;
-
- if (cec->irq_status & CEC_TX_DONE_STS)
- stih_tx_done(cec, cec->irq_status);
-
- if (cec->irq_status & CEC_RX_DONE_STS)
- stih_rx_done(cec, cec->irq_status);
-
- cec->irq_status = 0;
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
-{
- struct stih_cec *cec = priv;
-
- cec->irq_status = readl(cec->regs + CEC_STATUS);
- writel(cec->irq_status, cec->regs + CEC_STATUS);
-
- return IRQ_WAKE_THREAD;
-}
-
-static const struct cec_adap_ops sti_cec_adap_ops = {
- .adap_enable = stih_cec_adap_enable,
- .adap_log_addr = stih_cec_adap_log_addr,
- .adap_transmit = stih_cec_adap_transmit,
-};
-
-static int stih_cec_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct stih_cec *cec;
- int ret;
-
- cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
- if (!cec)
- return -ENOMEM;
-
- cec->dev = dev;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(cec->regs))
- return PTR_ERR(cec->regs);
-
- cec->irq = platform_get_irq(pdev, 0);
- if (cec->irq < 0)
- return cec->irq;
-
- ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
- stih_cec_irq_handler_thread, 0,
- pdev->name, cec);
- if (ret)
- return ret;
-
- cec->clk = devm_clk_get(dev, "cec-clk");
- if (IS_ERR(cec->clk)) {
- dev_err(dev, "Cannot get cec clock\n");
- return PTR_ERR(cec->clk);
- }
-
- cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
- CEC_NAME,
- CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
- CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, 1);
- ret = PTR_ERR_OR_ZERO(cec->adap);
- if (ret)
- return ret;
-
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
- }
-
- platform_set_drvdata(pdev, cec);
- return 0;
-}
-
-static int stih_cec_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static const struct of_device_id stih_cec_match[] = {
- {
- .compatible = "st,stih-cec",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, stih_cec_match);
-
-static struct platform_driver stih_cec_pdrv = {
- .probe = stih_cec_probe,
- .remove = stih_cec_remove,
- .driver = {
- .name = CEC_NAME,
- .of_match_table = stih_cec_match,
- },
-};
-
-module_platform_driver(stih_cec_pdrv);
-
-MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@linaro.org>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("STIH4xx CEC driver");
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index 7f51024dc5eb..1e5cbc893496 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -99,11 +99,16 @@ static void destroy_cdev(struct aim_channel *c)
device_destroy(aim_class, c->devno);
cdev_del(&c->cdev);
- kfifo_free(&c->fifo);
spin_lock_irqsave(&ch_list_lock, flags);
list_del(&c->list);
spin_unlock_irqrestore(&ch_list_lock, flags);
+}
+
+static void destroy_channel(struct aim_channel *c)
+{
ida_simple_remove(&minor_id, MINOR(c->devno));
+ kfifo_free(&c->fifo);
+ kfree(c);
}
/**
@@ -170,9 +175,8 @@ static int aim_close(struct inode *inode, struct file *filp)
stop_channel(c);
mutex_unlock(&c->io_mutex);
} else {
- destroy_cdev(c);
mutex_unlock(&c->io_mutex);
- kfree(c);
+ destroy_channel(c);
}
return 0;
}
@@ -337,14 +341,14 @@ static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
spin_lock(&c->unlink);
c->dev = NULL;
spin_unlock(&c->unlink);
+ destroy_cdev(c);
if (c->access_ref) {
stop_channel(c);
wake_up_interruptible(&c->wq);
mutex_unlock(&c->io_mutex);
} else {
- destroy_cdev(c);
mutex_unlock(&c->io_mutex);
- kfree(c);
+ destroy_channel(c);
}
return 0;
}
@@ -546,7 +550,7 @@ static void __exit mod_exit(void)
list_for_each_entry_safe(c, tmp, &channel_list, list) {
destroy_cdev(c);
- kfree(c);
+ destroy_channel(c);
}
class_destroy(aim_class);
unregister_chrdev_region(aim_devno, 1);
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
index e4198e5e064b..ea1366a44008 100644
--- a/drivers/staging/most/aim-sound/sound.c
+++ b/drivers/staging/most/aim-sound/sound.c
@@ -429,7 +429,7 @@ static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
default:
- pr_info("pcm_trigger(), invalid\n");
+ pr_info("%s(), invalid\n", __func__);
return -EINVAL;
}
return 0;
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index 0b9816ce1761..d604ec09df28 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -18,6 +18,7 @@
#include "dim2_errors.h"
#include "dim2_reg.h"
#include <linux/stddef.h>
+#include <linux/kernel.h>
/*
* Size factor for isochronous DBR buffer.
@@ -49,7 +50,7 @@
#define DBR_SIZE (16 * 1024) /* specified by IP */
#define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
-#define ROUND_UP_TO(x, d) (((x) + (d) - 1) / (d) * (d))
+#define ROUND_UP_TO(x, d) (DIV_ROUND_UP(x, (d)) * (d))
/* -------------------------------------------------------------------------- */
/* generic helper functions and macros */
@@ -117,7 +118,7 @@ static int alloc_dbr(u16 size)
return DBR_SIZE; /* out of memory */
for (i = 0; i < DBR_MAP_SIZE; i++) {
- u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
u32 mask = ~((~(u32)0) << blocks);
do {
@@ -137,7 +138,7 @@ static int alloc_dbr(u16 size)
static void free_dbr(int offs, int size)
{
int block_idx = offs / DBR_BLOCK_SIZE;
- u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+ u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
u32 mask = ~((~(u32)0) << blocks);
mask <<= block_idx % 32;
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 2bfea9b48366..a95b5910d9fc 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -281,7 +281,6 @@ static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
struct most_channel_config *conf = &mdev->conf[channel];
unsigned int frame_size = get_stream_frame_size(conf);
unsigned int j, num_frames;
- u16 rd_addr, wr_addr;
if (!frame_size)
return -EIO;
@@ -293,13 +292,10 @@ static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
return -EIO;
}
- for (j = 1; j < num_frames; j++) {
- wr_addr = (num_frames - j) * USB_MTU;
- rd_addr = (num_frames - j) * frame_size;
- memmove(mbo->virt_address + wr_addr,
- mbo->virt_address + rd_addr,
+ for (j = num_frames - 1; j > 0; j--)
+ memmove(mbo->virt_address + j * USB_MTU,
+ mbo->virt_address + j * frame_size,
frame_size);
- }
mbo->buffer_length = num_frames * USB_MTU;
return 0;
}
@@ -649,8 +645,6 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
{
unsigned int num_frames;
unsigned int frame_size;
- unsigned int temp_size;
- unsigned int tail_space;
struct most_dev *mdev = to_mdev(iface);
struct device *dev = &mdev->usb_device->dev;
@@ -685,7 +679,6 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
}
mdev->padding_active[channel] = true;
- temp_size = conf->buffer_size;
frame_size = get_stream_frame_size(conf);
if (frame_size == 0 || frame_size > USB_MTU) {
@@ -693,25 +686,19 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
return -EINVAL;
}
+ num_frames = conf->buffer_size / frame_size;
+
if (conf->buffer_size % frame_size) {
- u16 tmp_val;
-
- tmp_val = conf->buffer_size / frame_size;
- conf->buffer_size = tmp_val * frame_size;
- dev_notice(dev,
- "Channel %d - rounding buffer size to %d bytes, channel config says %d bytes\n",
- channel,
- conf->buffer_size,
- temp_size);
- }
+ u16 old_size = conf->buffer_size;
- num_frames = conf->buffer_size / frame_size;
- tail_space = num_frames * (USB_MTU - frame_size);
- temp_size += tail_space;
+ conf->buffer_size = num_frames * frame_size;
+ dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
+ mdev->suffix[channel], old_size, conf->buffer_size);
+ }
/* calculate extra length to comply w/ HW padding */
- conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU)
- - conf->buffer_size;
+ conf->extra_len = num_frames * (USB_MTU - frame_size);
+
exit:
mdev->conf[channel] = *conf;
if (conf->data_type == MOST_CH_ASYNC) {
@@ -1018,7 +1005,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
else if (!strcmp(name, "sync_ep"))
err = start_sync_ep(usb_dev, val);
- else if (!get_static_reg_addr(ro_regs, name, &reg_addr))
+ else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
err = drci_wr_reg(usb_dev, reg_addr, val);
else
return -EFAULT;
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 191404bc5906..069269db394c 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -82,7 +82,7 @@ struct most_inst_obj {
static const struct {
int most_ch_data_type;
- char *name;
+ const char *name;
} ch_data_type[] = {
{ MOST_CH_CONTROL, "control\n" },
{ MOST_CH_ASYNC, "async\n" },
@@ -127,10 +127,6 @@ struct most_c_attr {
#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
-#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
- struct most_c_attr most_chnl_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-
/**
* channel_attr_show - show function of channel object
* @kobj: pointer to its kobject
@@ -256,7 +252,7 @@ static void most_channel_release(struct kobject *kobj)
kfree(c);
}
-static ssize_t show_available_directions(struct most_c_obj *c,
+static ssize_t available_directions_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
@@ -271,7 +267,7 @@ static ssize_t show_available_directions(struct most_c_obj *c,
return strlen(buf);
}
-static ssize_t show_available_datatypes(struct most_c_obj *c,
+static ssize_t available_datatypes_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
@@ -290,10 +286,9 @@ static ssize_t show_available_datatypes(struct most_c_obj *c,
return strlen(buf);
}
-static
-ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
+static ssize_t number_of_packet_buffers_show(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
{
unsigned int i = c->channel_id;
@@ -301,10 +296,9 @@ ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
c->iface->channel_vector[i].num_buffers_packet);
}
-static
-ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
+static ssize_t number_of_stream_buffers_show(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
{
unsigned int i = c->channel_id;
@@ -312,10 +306,9 @@ ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
c->iface->channel_vector[i].num_buffers_streaming);
}
-static
-ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
+static ssize_t size_of_packet_buffer_show(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
{
unsigned int i = c->channel_id;
@@ -323,10 +316,9 @@ ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
c->iface->channel_vector[i].buffer_size_packet);
}
-static
-ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
- struct most_c_attr *attr,
- char *buf)
+static ssize_t size_of_stream_buffer_show(struct most_c_obj *c,
+ struct most_c_attr *attr,
+ char *buf)
{
unsigned int i = c->channel_id;
@@ -334,32 +326,21 @@ ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
c->iface->channel_vector[i].buffer_size_streaming);
}
-static ssize_t show_channel_starving(struct most_c_obj *c,
+static ssize_t channel_starving_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
}
-#define create_show_channel_attribute(val) \
- static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
-
-create_show_channel_attribute(available_directions);
-create_show_channel_attribute(available_datatypes);
-create_show_channel_attribute(number_of_packet_buffers);
-create_show_channel_attribute(number_of_stream_buffers);
-create_show_channel_attribute(size_of_stream_buffer);
-create_show_channel_attribute(size_of_packet_buffer);
-create_show_channel_attribute(channel_starving);
-
-static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
+static ssize_t set_number_of_buffers_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
}
-static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
+static ssize_t set_number_of_buffers_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -371,14 +352,14 @@ static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
return count;
}
-static ssize_t show_set_buffer_size(struct most_c_obj *c,
+static ssize_t set_buffer_size_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
}
-static ssize_t store_set_buffer_size(struct most_c_obj *c,
+static ssize_t set_buffer_size_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -390,7 +371,7 @@ static ssize_t store_set_buffer_size(struct most_c_obj *c,
return count;
}
-static ssize_t show_set_direction(struct most_c_obj *c,
+static ssize_t set_direction_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
@@ -401,7 +382,7 @@ static ssize_t show_set_direction(struct most_c_obj *c,
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
-static ssize_t store_set_direction(struct most_c_obj *c,
+static ssize_t set_direction_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -421,7 +402,7 @@ static ssize_t store_set_direction(struct most_c_obj *c,
return count;
}
-static ssize_t show_set_datatype(struct most_c_obj *c,
+static ssize_t set_datatype_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
@@ -434,7 +415,7 @@ static ssize_t show_set_datatype(struct most_c_obj *c,
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
-static ssize_t store_set_datatype(struct most_c_obj *c,
+static ssize_t set_datatype_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -455,14 +436,14 @@ static ssize_t store_set_datatype(struct most_c_obj *c,
return count;
}
-static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
+static ssize_t set_subbuffer_size_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
}
-static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
+static ssize_t set_subbuffer_size_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -474,14 +455,14 @@ static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
return count;
}
-static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
+static ssize_t set_packets_per_xact_show(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
}
-static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
+static ssize_t set_packets_per_xact_store(struct most_c_obj *c,
struct most_c_attr *attr,
const char *buf,
size_t count)
@@ -493,33 +474,39 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
return count;
}
-#define create_channel_attribute(value) \
- static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
-
-create_channel_attribute(set_buffer_size);
-create_channel_attribute(set_number_of_buffers);
-create_channel_attribute(set_direction);
-create_channel_attribute(set_datatype);
-create_channel_attribute(set_subbuffer_size);
-create_channel_attribute(set_packets_per_xact);
+static struct most_c_attr most_c_attrs[] = {
+ __ATTR_RO(available_directions),
+ __ATTR_RO(available_datatypes),
+ __ATTR_RO(number_of_packet_buffers),
+ __ATTR_RO(number_of_stream_buffers),
+ __ATTR_RO(size_of_stream_buffer),
+ __ATTR_RO(size_of_packet_buffer),
+ __ATTR_RO(channel_starving),
+ __ATTR_RW(set_buffer_size),
+ __ATTR_RW(set_number_of_buffers),
+ __ATTR_RW(set_direction),
+ __ATTR_RW(set_datatype),
+ __ATTR_RW(set_subbuffer_size),
+ __ATTR_RW(set_packets_per_xact),
+};
/**
* most_channel_def_attrs - array of default attributes of channel object
*/
static struct attribute *most_channel_def_attrs[] = {
- &most_chnl_attr_available_directions.attr,
- &most_chnl_attr_available_datatypes.attr,
- &most_chnl_attr_number_of_packet_buffers.attr,
- &most_chnl_attr_number_of_stream_buffers.attr,
- &most_chnl_attr_size_of_packet_buffer.attr,
- &most_chnl_attr_size_of_stream_buffer.attr,
- &most_chnl_attr_set_number_of_buffers.attr,
- &most_chnl_attr_set_buffer_size.attr,
- &most_chnl_attr_set_direction.attr,
- &most_chnl_attr_set_datatype.attr,
- &most_chnl_attr_set_subbuffer_size.attr,
- &most_chnl_attr_set_packets_per_xact.attr,
- &most_chnl_attr_channel_starving.attr,
+ &most_c_attrs[0].attr,
+ &most_c_attrs[1].attr,
+ &most_c_attrs[2].attr,
+ &most_c_attrs[3].attr,
+ &most_c_attrs[4].attr,
+ &most_c_attrs[5].attr,
+ &most_c_attrs[6].attr,
+ &most_c_attrs[7].attr,
+ &most_c_attrs[8].attr,
+ &most_c_attrs[9].attr,
+ &most_c_attrs[10].attr,
+ &most_c_attrs[11].attr,
+ &most_c_attrs[12].attr,
NULL,
};
@@ -562,9 +549,6 @@ create_most_c_obj(const char *name, struct kobject *parent)
/* ___ ___
* ___I N S T A N C E___
*/
-#define MOST_INST_ATTR(_name, _mode, _show, _store) \
- struct most_inst_attribute most_inst_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
static struct list_head instance_list;
@@ -652,7 +636,7 @@ static void most_inst_release(struct kobject *kobj)
kfree(inst);
}
-static ssize_t show_description(struct most_inst_obj *instance_obj,
+static ssize_t description_show(struct most_inst_obj *instance_obj,
struct most_inst_attribute *attr,
char *buf)
{
@@ -660,7 +644,7 @@ static ssize_t show_description(struct most_inst_obj *instance_obj,
instance_obj->iface->description);
}
-static ssize_t show_interface(struct most_inst_obj *instance_obj,
+static ssize_t interface_show(struct most_inst_obj *instance_obj,
struct most_inst_attribute *attr,
char *buf)
{
@@ -687,11 +671,11 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj,
return snprintf(buf, PAGE_SIZE, "unknown\n");
}
-#define create_inst_attribute(value) \
- static MOST_INST_ATTR(value, 0444, show_##value, NULL)
+static struct most_inst_attribute most_inst_attr_description =
+ __ATTR_RO(description);
-create_inst_attribute(description);
-create_inst_attribute(interface);
+static struct most_inst_attribute most_inst_attr_interface =
+ __ATTR_RO(interface);
static struct attribute *most_inst_def_attrs[] = {
&most_inst_attr_description.attr,
@@ -847,9 +831,9 @@ static void most_aim_release(struct kobject *kobj)
kfree(aim_obj);
}
-static ssize_t add_link_show(struct most_aim_obj *aim_obj,
- struct most_aim_attribute *attr,
- char *buf)
+static ssize_t links_show(struct most_aim_obj *aim_obj,
+ struct most_aim_attribute *attr,
+ char *buf)
{
struct most_c_obj *c;
struct most_inst_obj *i;
@@ -943,7 +927,7 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
}
/**
- * store_add_link - store() function for add_link attribute
+ * add_link_store - store() function for add_link attribute
* @aim_obj: pointer to AIM object
* @attr: its attributes
* @buf: buffer
@@ -1013,11 +997,8 @@ static ssize_t add_link_store(struct most_aim_obj *aim_obj,
return len;
}
-static struct most_aim_attribute most_aim_attr_add_link =
- __ATTR_RW(add_link);
-
/**
- * store_remove_link - store function for remove_link attribute
+ * remove_link_store - store function for remove_link attribute
* @aim_obj: pointer to AIM object
* @attr: its attributes
* @buf: buffer
@@ -1056,12 +1037,16 @@ static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
return len;
}
-static struct most_aim_attribute most_aim_attr_remove_link =
- __ATTR_WO(remove_link);
+static struct most_aim_attribute most_aim_attrs[] = {
+ __ATTR_RO(links),
+ __ATTR_WO(add_link),
+ __ATTR_WO(remove_link),
+};
static struct attribute *most_aim_def_attrs[] = {
- &most_aim_attr_add_link.attr,
- &most_aim_attr_remove_link.attr,
+ &most_aim_attrs[0].attr,
+ &most_aim_attrs[1].attr,
+ &most_aim_attrs[2].attr,
NULL,
};
diff --git a/drivers/staging/nvec/nvec-keytable.h b/drivers/staging/nvec/nvec-keytable.h
index 1dc22cb8812a..7008c96bdbbe 100644
--- a/drivers/staging/nvec/nvec-keytable.h
+++ b/drivers/staging/nvec/nvec-keytable.h
@@ -17,8 +17,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * with this program; if not, see http://www.gnu.org/licenses
*/
static unsigned short code_tab_102us[] = {
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index e881e6b26a4c..a01f486621eb 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -58,7 +58,7 @@ static int nvec_keys_notifier(struct notifier_block *nb,
unsigned long event_type, void *data)
{
int code, state;
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
if (event_type == NVEC_KB_EVT) {
int _size = (msg[0] & (3 << 5)) >> 5;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 684815c98789..f7f3a780ec10 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -290,7 +290,7 @@ static void dcon_source_switch(struct work_struct *work)
switch (source) {
case DCON_SOURCE_CPU:
- pr_info("dcon_source_switch to CPU\n");
+ pr_info("%s to CPU\n", __func__);
/* Enable the scanline interrupt bit */
if (dcon_write(dcon, DCON_REG_MODE,
dcon->disp_mode | MODE_SCAN_INT))
@@ -330,7 +330,7 @@ static void dcon_source_switch(struct work_struct *work)
{
ktime_t delta_t;
- pr_info("dcon_source_switch to DCON\n");
+ pr_info("%s to DCON\n", __func__);
/* Clear DCONLOAD - this implies that the DCON is in control */
pdata->set_dconload(0);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index 75c3c2fe9560..64584425b01c 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -64,7 +64,7 @@ static int dcon_init_xo_1_5(struct dcon_priv *dcon)
dcon_clear_irq();
/* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
- outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+ outb(inb(VX855_GPI_SCI_SMI) | BIT_GPIO12, VX855_GPI_SCI_SMI);
/* Determine the current state of DCONLOAD, likely set by firmware */
/* GPIO1 */
@@ -129,7 +129,7 @@ static void dcon_wiggle_xo_1_5(void)
udelay(5);
/* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */
- outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI);
+ outb(inb(VX855_GPI_SCI_SMI) | BIT_GPIO12, VX855_GPI_SCI_SMI);
}
static void dcon_set_dconload_xo_1_5(int val)
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 94f38793ab47..cb836c59d564 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -1,6 +1,7 @@
config R8188EU
tristate "Realtek RTL8188EU Wireless LAN NIC driver"
depends on WLAN && USB && CFG80211
+ depends on m
select WIRELESS_EXT
select WEXT_PRIV
---help---
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 1c8fa3a1f5bb..519b4d3584a2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -30,7 +30,6 @@ void init_mlme_ap_info(struct adapter *padapter)
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
@@ -448,10 +447,8 @@ void expire_timeout_chk(struct adapter *padapter)
void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
int i;
- u8 rf_type;
u32 init_rate = 0;
unsigned char sta_band = 0, raid, shortGIrate = false;
- unsigned char limit;
unsigned int tx_ra_bitmap = 0;
struct ht_priv *psta_ht = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -472,16 +469,9 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
}
/* n mode ra_bitmap */
if (psta_ht->ht_option) {
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if (rf_type == RF_2T2R)
- limit = 16;/* 2R */
- else
- limit = 8;/* 1R */
-
- for (i = 0; i < limit; i++) {
- if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
+ for (i = 0; i < 8; i++)
+ if (psta_ht->ht_cap.mcs.rx_mask[0] & BIT(i))
tx_ra_bitmap |= BIT(i + 12);
- }
/* max short GI rate */
shortGIrate = psta_ht->sgi;
@@ -729,7 +719,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
u16 bcn_interval;
u32 acparm;
- int ie_len;
+ uint ie_len;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -888,7 +878,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
@@ -1033,15 +1023,12 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
(pbss_network->IELength - _BEACON_IE_OFFSET_));
if (p && ie_len > 0) {
- u8 rf_type;
struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
pHT_caps_ie = p;
ht_cap = true;
network_type |= WIRELESS_11_24N;
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
(psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY & (0x07 << 2));
@@ -1051,10 +1038,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* set Max Rx AMPDU size to 64K */
pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03);
- if (rf_type == RF_1T1R) {
- pht_cap->mcs.rx_mask[0] = 0xff;
- pht_cap->mcs.rx_mask[1] = 0x0;
- }
+ pht_cap->mcs.rx_mask[0] = 0xff;
+ pht_cap->mcs.rx_mask[1] = 0x0;
memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 14979666dadd..9754322b506e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -1295,7 +1295,7 @@ void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pc
if (psta == NULL) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
goto exit;
}
exit:
@@ -1312,7 +1312,7 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
if (psta == NULL) {
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: %s => can't get sta_info\n\n", __func__));
goto exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index d1cd34011602..d1dafe0f20c6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -158,7 +158,7 @@ u8 *rtw_set_ie
/*----------------------------------------------------------------------------
index: the information element id index, limit is the limit for search
-----------------------------------------------------------------------------*/
-u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
+u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit)
{
int tmp, i;
u8 *p;
@@ -226,7 +226,8 @@ uint rtw_get_rateset_len(u8 *rateset)
int rtw_generate_ie(struct registry_priv *pregistrypriv)
{
u8 wireless_mode;
- int sz = 0, rateLen;
+ int rateLen;
+ uint sz = 0;
struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
u8 *ie = pdev_network->IEs;
@@ -291,9 +292,9 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
return sz;
}
-unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit)
{
- int len;
+ uint len;
u16 val16;
__le16 le_tmp;
unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
@@ -331,7 +332,7 @@ check_next_ie:
return NULL;
}
-unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len, int limit)
{
return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
@@ -1000,7 +1001,7 @@ int ieee80211_get_hdrlen(u16 fc)
static int rtw_get_cipher_info(struct wlan_network *pnetwork)
{
- int wpa_ielen;
+ uint wpa_ielen;
unsigned char *pbuf;
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
@@ -1045,7 +1046,7 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
__le16 le_tmp;
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
- int len;
+ uint len;
unsigned char *p;
memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 67508a6cf0e5..d8d88b5f68e5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -569,7 +569,6 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
struct registry_priv *pregistrypriv = &adapter->registrypriv;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- u8 rf_type = 0;
u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
u32 ht_ielen = 0;
@@ -586,9 +585,8 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
max_rate = rtw_mcs_rate(
- rf_type,
+ RF_1T1R,
bw_40MHz & (pregistrypriv->cbw40_enable),
short_GI_20,
short_GI_40,
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index a71928952eca..301085a459c9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -28,7 +28,6 @@
#include <rtw_ioctl_set.h>
#include <linux/vmalloc.h>
-extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
int rtw_init_mlme_priv(struct adapter *padapter)
@@ -56,7 +55,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
- if (pbuf == NULL) {
+ if (!pbuf) {
res = _FAIL;
goto exit;
}
@@ -148,7 +147,7 @@ static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *
u32 lifetime = SCANQUEUE_LIFETIME;
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
if (pnetwork->fixed)
@@ -172,7 +171,7 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
{
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
if (pnetwork->fixed)
return;
@@ -181,10 +180,10 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
}
/*
- return the wlan_network with the matching addr
-
- Shall be calle under atomic context... to avoid possible racing condition...
-*/
+ * return the wlan_network with the matching addr
+ *
+ * Shall be called under atomic context... to avoid possible racing condition...
+ */
struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
struct list_head *phead, *plist;
@@ -322,7 +321,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
-
s_cap = le16_to_cpu(le_scap);
d_cap = le16_to_cpu(le_dcap);
@@ -347,7 +345,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
pwlan = container_of(plist, struct wlan_network, list);
if (!pwlan->fixed) {
- if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
+ if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
}
@@ -392,7 +390,6 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
dst->PhyInfo.SignalStrength = ss_final;
dst->PhyInfo.SignalQuality = sq_final;
dst->Rssi = rssi_final;
-
}
static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
@@ -408,8 +405,8 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
/*
-Caller must hold pmlmepriv->lock first.
-*/
+ * Caller must hold pmlmepriv->lock first.
+ */
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
struct list_head *plist, *phead;
@@ -434,7 +431,8 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
plist = plist->next;
}
/* If we didn't find a match, then get a new network slot to initialize
- * with this beacon's information */
+ * with this beacon's information
+ */
if (phead == plist) {
if (list_empty(&(pmlmepriv->free_bss_pool.queue))) {
/* If there are no more slots, expire the oldest */
@@ -458,7 +456,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -493,7 +491,6 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
exit:
spin_unlock_bh(&queue->lock);
-
}
static void rtw_add_network(struct adapter *adapter,
@@ -527,7 +524,7 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -548,7 +545,6 @@ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *
bselected = false;
}
-
return bselected;
}
@@ -558,7 +554,6 @@ void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
}
-
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
u32 len;
@@ -663,7 +658,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
pmlmepriv->to_join = false;
s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
- if (_SUCCESS == s_ret) {
+ if (s_ret == _SUCCESS) {
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
} else if (s_ret == 2) { /* there is no need to wait for join */
@@ -673,7 +668,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
DBG_88E("try_to_join, but select scanning queue fail, to_roaming:%d\n", pmlmepriv->to_roaming);
if (pmlmepriv->to_roaming != 0) {
if (--pmlmepriv->to_roaming == 0 ||
- _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+ rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
pmlmepriv->to_roaming = 0;
rtw_free_assoc_resources(adapter);
rtw_indicate_disconnect(adapter);
@@ -726,8 +721,8 @@ static void free_scanqueue(struct mlme_priv *pmlmepriv)
}
/*
-*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
-*/
+ * rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
+ */
void rtw_free_assoc_resources(struct adapter *adapter)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -738,8 +733,8 @@ void rtw_free_assoc_resources(struct adapter *adapter)
}
/*
-*rtw_free_assoc_resources_locked: the caller has to lock pmlmepriv->lock
-*/
+ * rtw_free_assoc_resources_locked: the caller has to lock pmlmepriv->lock
+ */
void rtw_free_assoc_resources_locked(struct adapter *adapter)
{
struct wlan_network *pwlan = NULL;
@@ -789,8 +784,8 @@ void rtw_free_assoc_resources_locked(struct adapter *adapter)
}
/*
-*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
-*/
+ * rtw_indicate_connect: the caller has to lock pmlmepriv->lock
+ */
void rtw_indicate_connect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -815,8 +810,8 @@ void rtw_indicate_connect(struct adapter *padapter)
}
/*
-*rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
-*/
+ * rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
+ */
void rtw_indicate_disconnect(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -825,7 +820,6 @@ void rtw_indicate_disconnect(struct adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
-
if (pmlmepriv->to_roaming > 0)
_clr_fwstate_(pmlmepriv, _FW_LINKED);
@@ -877,7 +871,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
struct sta_priv *pstapriv = &padapter->stapriv;
psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
- if (psta == NULL)
+ if (!psta)
psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
if (psta) { /* update ptarget_sta */
@@ -959,7 +953,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
cur_network->aid = pnetwork->join_res;
-
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
@@ -1011,7 +1004,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
-
if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
else
@@ -1071,11 +1063,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
goto ignore_joinbss_callback;
}
-
/* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
- if (ptarget_sta == NULL) {
+ if (!ptarget_sta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto ignore_joinbss_callback;
@@ -1145,7 +1136,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
- if (pstapriv->sta_aid[aid-1] != NULL)
+ if (pstapriv->sta_aid[aid-1])
break;
}
mac_id = aid + 1;
@@ -1166,7 +1157,7 @@ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
u16 media_status;
u8 macid;
- if (psta == NULL)
+ if (!psta)
return;
macid = search_max_mac_id(adapter);
@@ -1198,13 +1189,13 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
#endif
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
+ if (psta) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta == NULL) {
+ if (!psta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
return;
}
@@ -1338,9 +1329,9 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
}
/*
-* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
-* @adapter: pointer to struct adapter structure
-*/
+ * _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+ * @adapter: pointer to struct adapter structure
+ */
void _rtw_join_timeout_handler (unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
@@ -1352,7 +1343,6 @@ void _rtw_join_timeout_handler (unsigned long data)
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
-
spin_lock_bh(&pmlmepriv->lock);
if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */
@@ -1361,7 +1351,7 @@ void _rtw_join_timeout_handler (unsigned long data)
if (pmlmepriv->to_roaming != 0) { /* try another , */
DBG_88E("%s try another roaming\n", __func__);
do_join_r = rtw_do_join(adapter);
- if (_SUCCESS != do_join_r) {
+ if (do_join_r != _SUCCESS) {
DBG_88E("%s roaming do_join return %d\n", __func__, do_join_r);
continue;
}
@@ -1380,9 +1370,9 @@ void _rtw_join_timeout_handler (unsigned long data)
}
/*
-* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
-* @adapter: pointer to struct adapter structure
-*/
+ * rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+ * @adapter: pointer to struct adapter structure
+ */
void rtw_scan_timeout_handler (unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
@@ -1437,10 +1427,10 @@ exit:
#define RTW_SCAN_RESULT_EXPIRE 2000
/*
-* Select a new join candidate from the original @param candidate and @param competitor
-* @return true: candidate is updated
-* @return false: candidate is not updated
-*/
+ * Select a new join candidate from the original @param candidate and @param competitor
+ * @return true: candidate is updated
+ * @return false: candidate is not updated
+ */
static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
, struct wlan_network **candidate, struct wlan_network *competitor)
{
@@ -1448,7 +1438,6 @@ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
unsigned long since_scan;
struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
-
/* check bssid, if needed */
if (pmlmepriv->assoc_by_bssid) {
if (memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
@@ -1491,11 +1480,11 @@ exit:
}
/*
-Calling context:
-The caller of the sub-routine will be in critical section...
-The caller must hold the following spinlock
-pmlmepriv->lock
-*/
+ * Calling context:
+ * The caller of the sub-routine will be in critical section...
+ * The caller must hold the following spinlock
+ * pmlmepriv->lock
+ */
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
{
@@ -1521,7 +1510,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_88E("%s: return _FAIL(candidate==NULL)\n", __func__);
ret = _FAIL;
goto exit;
@@ -1531,7 +1520,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
candidate->network.Configuration.DSConfig);
}
-
/* check for situation of _FW_LINKED */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
DBG_88E("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__);
@@ -1547,8 +1535,8 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant));
DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n",
- (2 == candidate->network.PhyInfo.Optimum_antenna) ? "A" : "B",
- (2 == cur_ant) ? "A" : "B"
+ (candidate->network.PhyInfo.Optimum_antenna == 2) ? "A" : "B",
+ (cur_ant == 2) ? "A" : "B"
);
}
@@ -1929,7 +1917,6 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
u32 rx_packet_offset, max_recvbuf_sz;
-
phtpriv->ht_option = false;
p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
@@ -2018,17 +2005,10 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
(le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & BIT(1)) &&
(pmlmeinfo->HT_info.infos[0] & BIT(2))) {
int i;
- u8 rf_type;
-
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
- else
- ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
/* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
@@ -2072,7 +2052,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
else
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
- if (psta == NULL)
+ if (!psta)
return;
phtpriv = &psta->htpriv;
@@ -2081,7 +2061,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
- if (0 == issued) {
+ if (issued == 0) {
DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
rtw_addbareq_cmd(padapter, (u8)priority, pattrib->ra);
@@ -2097,6 +2077,7 @@ void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
_rtw_roaming(padapter, tgt_network);
spin_unlock_bh(&pmlmepriv->lock);
}
+
void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -2104,12 +2085,12 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
struct wlan_network *pnetwork;
- if (tgt_network != NULL)
+ if (tgt_network)
pnetwork = tgt_network;
else
pnetwork = &pmlmepriv->cur_network;
- if (0 < pmlmepriv->to_roaming) {
+ if (pmlmepriv->to_roaming > 0) {
DBG_88E("roaming from %s(%pM length:%d\n",
pnetwork->network.Ssid.Ssid, pnetwork->network.MacAddress,
pnetwork->network.Ssid.SsidLength);
@@ -2119,13 +2100,13 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
while (1) {
do_join_r = rtw_do_join(padapter);
- if (_SUCCESS == do_join_r) {
+ if (do_join_r == _SUCCESS) {
break;
} else {
DBG_88E("roaming do_join return %d\n", do_join_r);
pmlmepriv->to_roaming--;
- if (0 < pmlmepriv->to_roaming) {
+ if (pmlmepriv->to_roaming > 0) {
continue;
} else {
DBG_88E("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index f45af407f76d..88a3a2b9c144 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -47,7 +47,6 @@ extern unsigned char REALTEK_96B_IE[];
/********************************************************
MCS rate definitions
*********************************************************/
-unsigned char MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
/********************************************************
@@ -287,7 +286,7 @@ static s32 dump_mgntframe_and_wait_ack(struct adapter *padapter,
static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
{
u8 *ssid_ie;
- int ssid_len_ori;
+ uint ssid_len_ori;
int len_diff = 0;
ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
@@ -1027,7 +1026,7 @@ static void issue_assocreq(struct adapter *padapter)
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
unsigned int i, j, ie_len, index = 0;
- unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ unsigned char bssrate[NumRates], sta_bssrate[NumRates];
struct ndis_802_11_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
@@ -1150,25 +1149,9 @@ static void issue_assocreq(struct adapter *padapter)
/* todo: disable SM power save mode */
pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x000c);
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- switch (rf_type) {
- case RF_1T1R:
- if (pregpriv->rx_stbc)
- pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
- memcpy((u8 *)&pmlmeinfo->HT_caps.mcs, MCS_rate_1R, 16);
- break;
- case RF_2T2R:
- case RF_1T2R:
- default:
- if ((pregpriv->rx_stbc == 0x3) ||/* enable for 2.4/5 GHz */
- ((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
- (pregpriv->wifi_spec == 1)) {
- DBG_88E("declare supporting RX STBC\n");
- pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
- }
- memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
- break;
- }
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.cap_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy((u8 *)&pmlmeinfo->HT_caps.mcs, MCS_rate_1R, 16);
pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
}
}
@@ -1803,7 +1786,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
plist = phead->next;
while (phead != plist) {
- int len;
+ uint len;
u8 *p;
struct wlan_bssid_ex *pbss_network;
@@ -2573,7 +2556,7 @@ static unsigned int OnProbeReq(struct adapter *padapter,
!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
return _SUCCESS;
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, &ielen,
len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
/* check (wildcard) SSID */
@@ -2810,7 +2793,7 @@ static unsigned int OnAuth(struct adapter *padapter,
/* checking for challenging txt... */
DBG_88E("checking for challenging txt...\n");
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
if ((p == NULL) || (ie_len <= 0)) {
@@ -2904,7 +2887,7 @@ static unsigned int OnAuthClient(struct adapter *padapter,
if (seq == 2) {
if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
/* legendary shared system */
- p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, &len,
pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
if (p == NULL)
@@ -2948,7 +2931,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
struct sta_info *pstat;
unsigned char reassoc, *p, *pos, *wpa_ie;
unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
- int i, ie_len, wpa_ie_len, left;
+ int i, wpa_ie_len, left;
unsigned char supportRate[16];
int supportRateNum;
unsigned short status = _STATS_SUCCESSFUL_;
@@ -2960,7 +2943,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->pkt->data;
- uint pkt_len = precv_frame->pkt->len;
+ uint ie_len, pkt_len = precv_frame->pkt->len;
if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
return _FAIL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 53dc33c3f913..c6c4404e717b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -392,7 +392,7 @@ static struct recv_frame *decryptor(struct adapter *padapter,
}
}
- if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt))) {
+ if ((prxattrib->encrypt > 0) && (prxattrib->bdecrypted == 0)) {
psecuritypriv->hw_decrypted = false;
switch (prxattrib->encrypt) {
@@ -452,7 +452,7 @@ static struct recv_frame *portctrl(struct adapter *adapter,
if (auth_alg == 2) {
/* get ether_type */
- ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
+ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
memcpy(&be_tmp, ptr, 2);
ether_type = ntohs(be_tmp);
@@ -1979,7 +1979,7 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
!IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
- (prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt) &&
+ prxattrib->bdecrypted == 0 &&
!is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
!psecuritypriv->busetkipkey) {
rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
@@ -2050,19 +2050,13 @@ static void rtw_signal_stat_timer_hdl(unsigned long data)
if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == false) {
tmp_s = avg_signal_strength +
(_alpha - 1) * recvpriv->signal_strength;
- if (tmp_s % _alpha)
- tmp_s = tmp_s / _alpha + 1;
- else
- tmp_s = tmp_s / _alpha;
+ tmp_s = DIV_ROUND_UP(tmp_s, _alpha);
if (tmp_s > 100)
tmp_s = 100;
tmp_q = avg_signal_qual +
(_alpha - 1) * recvpriv->signal_qual;
- if (tmp_q % _alpha)
- tmp_q = tmp_q / _alpha + 1;
- else
- tmp_q = tmp_q / _alpha;
+ tmp_q = DIV_ROUND_UP(tmp_q, _alpha);
if (tmp_q > 100)
tmp_q = 100;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index f6f1b09466a8..2db8a5d11c0d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -708,7 +708,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 rf_type;
u8 max_AMPDU_len, min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -744,15 +743,9 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
}
}
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
- else
- ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ ((u8 *)&pmlmeinfo->HT_caps.mcs)[i] &= MCS_rate_1R[i];
}
void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 630fdc33d58a..be2f46eb9f78 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -587,15 +587,13 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- ("update_attrib: encrypt=%d securitypriv.sw_encrypt=%d\n",
- pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+ ("update_attrib: encrypt=%d\n", pattrib->encrypt));
- if (pattrib->encrypt &&
- (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted)) {
+ if (pattrib->encrypt && !psecuritypriv->hw_decrypted) {
pattrib->bswenc = true;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
- ("update_attrib: encrypt=%d securitypriv.hw_decrypted=%d bswenc = true\n",
- pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+ ("update_attrib: encrypt=%d bswenc = true\n",
+ pattrib->encrypt));
} else {
pattrib->bswenc = false;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: bswenc = false\n"));
@@ -1141,9 +1139,8 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
{
- uint protection;
+ uint protection, erp_len;
u8 *perp;
- int erp_len;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
@@ -1763,20 +1760,20 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
switch (pattrib->priority) {
case 1:
case 2:
- wmmps_ac = psta->uapsd_bk&BIT(0);
+ wmmps_ac = psta->uapsd_bk & BIT(0);
break;
case 4:
case 5:
- wmmps_ac = psta->uapsd_vi&BIT(0);
+ wmmps_ac = psta->uapsd_vi & BIT(0);
break;
case 6:
case 7:
- wmmps_ac = psta->uapsd_vo&BIT(0);
+ wmmps_ac = psta->uapsd_vo & BIT(0);
break;
case 0:
case 3:
default:
- wmmps_ac = psta->uapsd_be&BIT(0);
+ wmmps_ac = psta->uapsd_be & BIT(0);
break;
}
@@ -1890,20 +1887,20 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
switch (pxmitframe->attrib.priority) {
case 1:
case 2:
- wmmps_ac = psta->uapsd_bk&BIT(1);
+ wmmps_ac = psta->uapsd_bk & BIT(1);
break;
case 4:
case 5:
- wmmps_ac = psta->uapsd_vi&BIT(1);
+ wmmps_ac = psta->uapsd_vi & BIT(1);
break;
case 6:
case 7:
- wmmps_ac = psta->uapsd_vo&BIT(1);
+ wmmps_ac = psta->uapsd_vo & BIT(1);
break;
case 0:
case 3:
default:
- wmmps_ac = psta->uapsd_be&BIT(1);
+ wmmps_ac = psta->uapsd_be & BIT(1);
break;
}
@@ -2016,20 +2013,20 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
switch (pxmitframe->attrib.priority) {
case 1:
case 2:
- wmmps_ac = psta->uapsd_bk&BIT(1);
+ wmmps_ac = psta->uapsd_bk & BIT(1);
break;
case 4:
case 5:
- wmmps_ac = psta->uapsd_vi&BIT(1);
+ wmmps_ac = psta->uapsd_vi & BIT(1);
break;
case 6:
case 7:
- wmmps_ac = psta->uapsd_vo&BIT(1);
+ wmmps_ac = psta->uapsd_vo & BIT(1);
break;
case 0:
case 3:
default:
- wmmps_ac = psta->uapsd_be&BIT(1);
+ wmmps_ac = psta->uapsd_be & BIT(1);
break;
}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 201c15b07f9e..81bf4944ef44 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -733,7 +733,7 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
pRAInfo->RTY[0], pRAInfo->RTY[1],
pRAInfo->RTY[2], pRAInfo->RTY[3],
pRAInfo->RTY[4], pRAInfo->DROP,
- macid_entry0 , macid_entry1));
+ macid_entry0, macid_entry1));
if (pRAInfo->PTActive) {
if (pRAInfo->RAstage < 5)
odm_RateDecision_8188E(dm_odm, pRAInfo);
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 16476e735011..ec8aae76bf40 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -832,6 +832,7 @@ void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+
if (IS_STA_VALID(pstat)) {
if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
@@ -896,6 +897,7 @@ void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
struct dm_priv *pdmpriv = &Adapter->HalData->dmpriv;
+
pdmpriv->bDynamicTxPowerEnable = false;
pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
@@ -1052,6 +1054,7 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
+
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index dd9b902c8ae3..91e0f6cee8f4 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -81,9 +81,9 @@ static void dm_trx_hw_antenna_div_init(struct odm_dm_struct *dm_odm)
/* antenna mapping table */
if (!dm_odm->bIsMPChip) { /* testchip */
- phy_set_bb_reg(adapter, ODM_REG_RX_DEFUALT_A_11N,
+ phy_set_bb_reg(adapter, ODM_REG_RX_DEFAULT_A_11N,
BIT(10) | BIT(9) | BIT(8), 1);
- phy_set_bb_reg(adapter, ODM_REG_RX_DEFUALT_A_11N,
+ phy_set_bb_reg(adapter, ODM_REG_RX_DEFAULT_A_11N,
BIT(13) | BIT(12) | BIT(11), 2);
} else { /* MPchip */
phy_set_bb_reg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord,
@@ -248,6 +248,7 @@ void rtl88eu_dm_ant_sel_statistics(struct odm_dm_struct *dm_odm,
u8 antsel_tr_mux, u32 mac_id, u8 rx_pwdb_all)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
if (antsel_tr_mux == MAIN_ANT_CG_TRX) {
dm_fat_tbl->MainAnt_Sum[mac_id] += rx_pwdb_all;
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 35c91e06cc47..054f5996f60d 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -1005,6 +1005,7 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD};
u32 retry_count = 9;
+
if (*(dm_odm->mp_mode) == 1)
retry_count = 9;
else
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index d0f59b7836f1..9b7ba9bffb0d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -210,6 +210,7 @@ void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
{
u8 opmode, macid;
u16 mst_rpt = le16_to_cpu(mstatus_rpt);
+
opmode = (u8)mst_rpt;
macid = (u8)(mst_rpt >> 8);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 0ce7db723a5d..3673f573ac3d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -70,6 +70,7 @@ s32 iol_execute(struct adapter *padapter, u8 control)
static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
{
s32 rst = _SUCCESS;
+
iol_mode_enable(padapter, 1);
usb_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
rst = iol_execute(padapter, CMD_INIT_LLT);
@@ -459,7 +460,7 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown , padapter->pwrctrlpriv.bSupportRemoteWakeup);
+ padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index 53e312aaefb5..a9912b60eb59 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -346,7 +346,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- struct security_priv *psecuritypriv = &adapt->securitypriv;
+
if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
(pxmitframe->attrib.ether_type != 0x0806) &&
(pxmitframe->attrib.ether_type != 0x888e) &&
@@ -365,7 +365,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
sz = pxmitpriv->frag_len;
- sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
+ sz = sz - 4 - pattrib->icv_len;
} else {
/* no frag */
sz = pattrib->last_txcmdsz;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 3675edb61942..674ac5396d00 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -337,6 +337,7 @@ static void _InitTransferPageSize(struct adapter *Adapter)
/* Tx page size is always 128. */
u8 value8;
+
value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
usb_write8(Adapter, REG_PBP, value8);
}
@@ -1361,6 +1362,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
if (*((u8 *)val)) { /* under sitesurvey */
/* config RCR to receive different BSSID & not to receive data frame */
u32 v = usb_read32(Adapter, REG_RCR);
+
v &= ~(RCR_CBSSID_BCN);
usb_write32(Adapter, REG_RCR, v);
/* reject all data frame */
@@ -1514,6 +1516,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_CAM_WRITE:
{
u32 cmd;
+
u32 *cam_val = (u32 *)val;
usb_write32(Adapter, WCAMI, cam_val[0]);
@@ -1618,6 +1621,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_RXDMA_AGG_PG_TH:
{
u8 threshold = *((u8 *)val);
+
if (threshold == 0)
threshold = haldata->UsbRxAggPageCount;
usb_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
@@ -1639,6 +1643,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_H2C_FW_JOINBSSRPT:
{
u8 mstatus = (*(u8 *)val);
+
rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
}
break;
@@ -1661,6 +1666,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_RPT_TIMER_SETTING:
{
u16 min_rpt_time = (*(u16 *)val);
+
ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
}
break;
@@ -1717,6 +1723,7 @@ void rtw_hal_set_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_TX_RPT_MAX_MACID:
{
u8 maxMacid = *val;
+
DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid+1);
usb_write8(Adapter, REG_TX_RPT_CTRL+1, maxMacid+1);
}
@@ -1745,9 +1752,6 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
val[0] = (BIT(0) & usb_read8(Adapter, REG_TDECTRL+2)) ? true : false;
break;
- case HW_VAR_RF_TYPE:
- val[0] = RF_1T1R;
- break;
case HW_VAR_FWLPS_RF_ON:
{
/* When we halt NIC, we should check if FW LPS is leave. */
@@ -1757,6 +1761,7 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
val[0] = true;
} else {
u32 valRCR;
+
valRCR = usb_read32(Adapter, REG_RCR);
valRCR &= 0x00070000;
if (valRCR)
@@ -1802,6 +1807,7 @@ u8 rtw_hal_get_def_var(
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct sta_priv *pstapriv = &Adapter->stapriv;
struct sta_info *psta;
+
psta = rtw_get_stainfo(pstapriv, pmlmepriv->cur_network.network.MacAddress);
if (psta)
*((int *)pValue) = psta->rssi_stat.UndecoratedSmoothedPWDB;
@@ -1828,18 +1834,21 @@ u8 rtw_hal_get_def_var(
case HAL_DEF_RA_DECISION_RATE:
{
u8 MacID = *((u8 *)pValue);
+
*((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_RA_SGI:
{
u8 MacID = *((u8 *)pValue);
+
*((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_PT_PWR_STATUS:
{
u8 MacID = *((u8 *)pValue);
+
*((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, MacID);
}
break;
@@ -1849,6 +1858,7 @@ u8 rtw_hal_get_def_var(
case HW_DEF_RA_INFO_DUMP:
{
u8 entry_id = *((u8 *)pValue);
+
if (check_fwstate(&Adapter->mlmepriv, _FW_LINKED)) {
DBG_88E("============ RA status check ===================\n");
DBG_88E("Mac_id:%d , RateID = %d, RAUseRate = 0x%08x, RateSGI = %d, DecisionRate = 0x%02x ,PTStage = %d\n",
@@ -1864,6 +1874,7 @@ u8 rtw_hal_get_def_var(
case HW_DEF_ODM_DBG_FLAG:
{
struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
+
pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
}
break;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 344c73d1081b..04159a9f90d3 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -179,7 +179,7 @@
/* RxIQ DC offset, Rx digital filter, DC notch filter */
#define rOFDM0_XARxAFE 0xc10
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
#define rOFDM0_XBRxAFE 0xc18
#define rOFDM0_XBRxIQImbalance 0xc1c
#define rOFDM0_XCRxAFE 0xc20
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 0fd2a2d9be20..c3517c0903ca 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -65,8 +65,6 @@ struct registry_priv {
u8 ips_mode;
u8 smart_ps;
u8 mp_mode;
- u8 software_encrypt;
- u8 software_decrypt;
u8 acm_method;
/* UAPSD */
u8 wmm_enable;
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index e1114a95d442..dfdbd0254886 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -57,7 +57,6 @@ enum hw_variables {
HW_VAR_ACK_PREAMBLE,
HW_VAR_SEC_CFG,
HW_VAR_BCN_VALID,
- HW_VAR_RF_TYPE,
HW_VAR_DM_FUNC_OP,
HW_VAR_DM_FUNC_SET,
HW_VAR_DM_FUNC_CLR,
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index fc58621368c1..22ab0c43e376 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -857,12 +857,12 @@ enum secondary_ch_offset {
SCB = 3, /* secondary channel below */
};
-u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
+u8 *rtw_get_ie(u8 *pbuf, int index, uint *len, int limit);
void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
-unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
-unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, uint *wpa_ie_len, int limit);
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, uint *rsn_ie_len, int limit);
int rtw_get_wpa_cipher_suite(u8 *s);
int rtw_get_wpa2_cipher_suite(u8 *s);
int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len);
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
index c82c09013487..f46f7d43ce00 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -41,8 +41,8 @@
#define ODM_REG_TX_ANT_CTRL_11N 0x80C
#define ODM_REG_BB_PWR_SAV5_11N 0x818
#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_RX_DEFAULT_A_11N 0x858
+#define ODM_REG_RX_DEFAULT_B_11N 0x85A
#define ODM_REG_BB_PWR_SAV3_11N 0x85C
#define ODM_REG_ANTSEL_CTRL_11N 0x860
#define ODM_REG_RX_ANT_CTRL_11N 0x864
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index 52e51f19f752..687ff3e9c09a 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -37,7 +37,7 @@
/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
#define ODM_DBG_SERIOUS 2
-/* Abnormal, rare, or unexpeted cases. */
+/* Abnormal, rare, or unexpected cases. */
/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
#define ODM_DBG_WARNING 3
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index afd61cf4cb15..addf90b60ce9 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -29,7 +29,7 @@
4: LPS--Low Power State
5: SUS--Suspend
- The transision from different states are defined below
+ The transition from different states are defined below
TRANS_CARDEMU_TO_ACT
TRANS_ACT_TO_CARDEMU
TRANS_CARDEMU_TO_SUS
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index fb82f663b1f5..c93e19d1c50f 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -1350,7 +1350,7 @@ Current IOREG MAP
#define EEPROM_Default_CrystalCap_88E 0x20
#define EEPROM_Default_ThermalMeter_88E 0x18
-/* New EFUSE deafult value */
+/* New EFUSE default value */
#define EEPROM_DEFAULT_24G_INDEX 0x2D
#define EEPROM_DEFAULT_24G_HT20_DIFF 0X02
#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 18a6530c9dde..f79feeb4e38f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -214,7 +214,7 @@ struct set_assocsta_rsp {
mac[0] == 0
==> CMD mode, return H2C_SUCCESS.
- The following condition must be ture under CMD mode
+ The following condition must be true under CMD mode
mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
s2 == (b1 << 8 | b0);
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index a6b1c854a061..0fa78ed2c1ab 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -58,12 +58,6 @@
#define OID_MP_SEG3 0xFF818700
#define OID_MP_SEG4 0xFF011100
-#define DEBUG_OID(dbg, str) \
- if ((!dbg)) { \
- RT_TRACE(_module_rtl871x_ioctl_c_, _drv_info_, \
- ("%s(%d): %s", __func__, __line__, str)); \
- }
-
enum oid_type {
QUERY_OID,
SET_OID
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 7324a95bb162..5c5d0ae8bdd1 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -216,7 +216,6 @@ void hostapd_mode_unload(struct adapter *padapter);
extern unsigned char WPA_TKIP_CIPHER[4];
extern unsigned char RSN_TKIP_CIPHER[4];
extern unsigned char REALTEK_96B_IE[];
-extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 02b300217185..4872a21b3103 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -198,7 +198,7 @@
#define rOFDM0_TRSWIsolation 0xc0c
#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imbalance matrix */
#define rOFDM0_XBRxAFE 0xc18
#define rOFDM0_XBRxIQImbalance 0xc1c
#define rOFDM0_XCRxAFE 0xc20
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index b369f08d4a15..121150860450 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -106,7 +106,7 @@ struct rx_pkt_attrib {
u8 privacy; /* in frame_ctrl field */
u8 bdecrypted;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorith */
+ * indicate the encrypt algorithm */
u8 iv_len;
u8 icv_len;
u8 crc_err;
@@ -176,7 +176,7 @@ struct recv_priv {
struct sk_buff_head rx_skb_queue;
struct recv_buf *precv_buf; /* 4 alignment */
struct __queue free_recv_buf_queue;
- /* For display the phy informatiom */
+ /* For display the phy information */
s8 rssi;
s8 rxpwdb;
u8 signal_strength;
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 7100d6b01b32..74fe664787e5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -133,8 +133,6 @@ struct security_priv {
u8 busetkipkey;
u8 bcheck_grpkey;
u8 bgrpkey_handshake;
- s32 sw_encrypt;/* from registry_priv */
- s32 sw_decrypt;/* from registry_priv */
s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
* it means the hw has not been ready. */
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 5f6a24546a91..add1ba00f3e9 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -49,9 +49,6 @@ MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
static int rtw_debug = 1;
-static int rtw_software_encrypt;
-static int rtw_software_decrypt;
-
static int rtw_acm_method;/* 0:By SW 1:By HW. */
static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
@@ -166,8 +163,6 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->power_mgnt = (u8)rtw_power_mgnt;
registry_par->ips_mode = (u8)rtw_ips_mode;
registry_par->mp_mode = 0;
- registry_par->software_encrypt = (u8)rtw_software_encrypt;
- registry_par->software_decrypt = (u8)rtw_software_decrypt;
registry_par->acm_method = (u8)rtw_acm_method;
/* UAPSD */
@@ -393,8 +388,6 @@ static u8 rtw_init_default_value(struct adapter *padapter)
/* security_priv */
psecuritypriv->binstallGrpkey = _FAIL;
- psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
- psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
psecuritypriv->dot11PrivacyKeyIndex = 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index e097c619ed1b..8bf8248e4ac7 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -72,7 +72,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (pxmitbuf->pxmit_urb[i] == NULL) {
+ if (!pxmitbuf->pxmit_urb[i]) {
DBG_88E("pxmitbuf->pxmit_urb[i]==NULL");
return _FAIL;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index 34453e38ba93..03421033d14a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef R8190P_DEF_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 81b3cf6588d4..85f93056d28b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_phyreg.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 7873a73a2f76..bbea13b452b2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef RTL8225H
#define RTL8225H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 757ffd4f2f89..467287ae6c1c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_hw.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index 7dd15d993481..a8c63ad2ac2e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 8d6bca61e7aa..4723a0bd5067 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
@@ -135,7 +135,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
{
u32 RegRCR, Type;
- Type = ((u8 *)(val))[0];
+ Type = val[0];
RegRCR = rtl92e_readl(dev, RCR);
priv->ReceiveConfig = RegRCR;
@@ -161,7 +161,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
{
u32 regTmp;
- priv->short_preamble = (bool)(*(u8 *)val);
+ priv->short_preamble = (bool)*val;
regTmp = priv->basic_rate;
if (priv->short_preamble)
regTmp |= BRSR_AckShortPmb;
@@ -175,7 +175,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case HW_VAR_AC_PARAM:
{
- u8 pAcParam = *((u8 *)val);
+ u8 pAcParam = *val;
u32 eACI = pAcParam;
u8 u1bAIFS;
u32 u4bAcParam;
@@ -221,7 +221,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
break;
}
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACM_CTRL,
- (u8 *)(&pAcParam));
+ &pAcParam);
break;
}
@@ -229,7 +229,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
{
struct rtllib_qos_parameters *qos_parameters =
&priv->rtllib->current_network.qos_data.parameters;
- u8 pAcParam = *((u8 *)val);
+ u8 pAcParam = *val;
u32 eACI = pAcParam;
union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
(qos_parameters->aifs[0]);
@@ -293,7 +293,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case HW_VAR_RF_TIMING:
{
- u8 Rf_Timing = *((u8 *)val);
+ u8 Rf_Timing = *val;
rtl92e_writeb(dev, rFPGA0_RFTiming1, Rf_Timing);
break;
@@ -372,7 +372,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
if (!priv->AutoloadFailFlag) {
for (i = 0; i < 6; i += 2) {
usValue = rtl92e_eeprom_read(dev,
- (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
+ (EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
*(u16 *)(&dev->dev_addr[i]) = usValue;
}
} else {
@@ -436,8 +436,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
usValue = rtl92e_eeprom_read(dev,
- (u16)((EEPROM_TxPwIndex_CCK +
- i) >> 1));
+ (EEPROM_TxPwIndex_CCK + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelCCK[i])) =
@@ -452,8 +451,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
usValue = rtl92e_eeprom_read(dev,
- (u16)((EEPROM_TxPwIndex_OFDM_24G
- + i) >> 1));
+ (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[i]))
@@ -1650,15 +1648,11 @@ static void _rtl92e_query_rxphystatus(
evm = rtl92e_evm_db_to_percent(rx_evmX);
if (bpacket_match_bssid) {
if (i == 0) {
- pstats->SignalQuality = (u8)(evm &
- 0xff);
- precord_stats->SignalQuality = (u8)(evm
- & 0xff);
+ pstats->SignalQuality = evm & 0xff;
+ precord_stats->SignalQuality = evm & 0xff;
}
- pstats->RxMIMOSignalQuality[i] = (u8)(evm &
- 0xff);
- precord_stats->RxMIMOSignalQuality[i] = (u8)(evm
- & 0xff);
+ pstats->RxMIMOSignalQuality[i] = evm & 0xff;
+ precord_stats->RxMIMOSignalQuality[i] = evm & 0xff;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index 6bb58193fc5c..f4233bb12f81 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _RTL8192E_H
#define _RTL8192E_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index bbe399010be1..3c7831250987 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_hw.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index b48ec9410c09..61c8dac826a8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef __INC_FIRMWARE_H
#define __INC_FIRMWARE_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index d298023ef079..5c20cb476281 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef R8180_HW
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
index d96b87d77e7a..4e2bbab6a413 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef __INC_HAL8192PciE_FW_IMG_H
#define __INC_HAL8192PciE_FW_IMG_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index dde492261451..73497d559b77 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include <linux/bitops.h>
#include "rtl_core.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 9ddfd4e3adb3..b534d72bf708 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _R819XU_PHY_H
#define _R819XU_PHY_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 50c79d3dc782..03d6d70b2d28 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _R819XU_PHYREG_H
#define _R819XU_PHYREG_H
@@ -692,7 +692,7 @@
* #define bRxPath4 0x08
* #define bTxPath1 0x10
* #define bTxPath2 0x20
-*/
+ */
#define bHTDetect 0x100
#define bCFOEn 0x10000
#define bCFOValue 0xfff00000
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 30f65af4d614..c62481fcf0b1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index aa12941dc685..12f01f196752 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _RTL_CAM_H
#define _RTL_CAM_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 4c0caa6701a9..a4d1bac4a844 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
@@ -101,8 +101,8 @@ static int _rtl92e_down(struct net_device *dev, bool shutdownrf);
static void _rtl92e_restart(void *data);
/****************************************************************************
- -----------------------------IO STUFF-------------------------
-*****************************************************************************/
+ * -----------------------------IO STUFF-------------------------
+ ****************************************************************************/
u8 rtl92e_readb(struct net_device *dev, int x)
{
@@ -141,8 +141,8 @@ void rtl92e_writew(struct net_device *dev, int x, u16 y)
}
/****************************************************************************
- -----------------------------GENERAL FUNCTION-------------------------
-*****************************************************************************/
+ * -----------------------------GENERAL FUNCTION-------------------------
+ ****************************************************************************/
bool rtl92e_set_rf_state(struct net_device *dev,
enum rt_rf_power_state StateToSet,
RT_RF_CHANGE_SOURCE ChangeSource)
@@ -346,7 +346,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
}
}
- if (net->mode & (IEEE_G|IEEE_N_24G)) {
+ if (net->mode & (IEEE_G | IEEE_N_24G)) {
u8 slot_time_val;
u8 CurSlotTime = priv->slot_time;
@@ -477,7 +477,7 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
u32 size = sizeof(struct rtllib_qos_parameters);
int set_qos_param = 0;
- if ((priv == NULL) || (network == NULL))
+ if (!priv || !network)
return 0;
if (priv->rtllib->state != RTLLIB_LINKED)
@@ -680,7 +680,7 @@ static u8 _rtl92e_get_supported_wireless_mode(struct net_device *dev)
case RF_8256:
case RF_6052:
case RF_PSEUDO_11N:
- ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G | WIRELESS_MODE_B);
+ ret = (WIRELESS_MODE_N_24G | WIRELESS_MODE_G | WIRELESS_MODE_B);
break;
case RF_8258:
ret = (WIRELESS_MODE_A | WIRELESS_MODE_N_5G);
@@ -742,7 +742,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
- (&(priv->rtllib->PowerSaveControl));
+ (&priv->rtllib->PowerSaveControl);
bool init_status = true;
priv->bDriverIsGoingToUnload = false;
@@ -790,7 +790,7 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
if (priv->up == 0)
return -1;
- if (priv->rtllib->rtllib_ips_leave != NULL)
+ if (priv->rtllib->rtllib_ips_leave)
priv->rtllib->rtllib_ips_leave(dev);
if (priv->rtllib->state == RTLLIB_LINKED)
@@ -888,7 +888,7 @@ static void _rtl92e_init_priv_constant(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
- &(priv->rtllib->PowerSaveControl);
+ &priv->rtllib->PowerSaveControl;
pPSC->RegMaxLPSAwakeIntvl = 5;
}
@@ -1015,9 +1015,9 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
(void *)_rtl92e_update_beacon, dev);
INIT_WORK_RSL(&priv->qos_activate, (void *)_rtl92e_qos_activate, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq,
- (void *) rtl92e_hw_wakeup_wq, dev);
+ (void *)rtl92e_hw_wakeup_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
- (void *) rtl92e_hw_sleep_wq, dev);
+ (void *)rtl92e_hw_sleep_wq, dev);
tasklet_init(&priv->irq_rx_tasklet,
(void(*)(unsigned long))_rtl92e_irq_rx_tasklet,
(unsigned long)priv);
@@ -1035,8 +1035,8 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
- if ((priv->rf_chip != RF_8225) && (priv->rf_chip != RF_8256)
- && (priv->rf_chip != RF_6052)) {
+ if ((priv->rf_chip != RF_8225) && (priv->rf_chip != RF_8256) &&
+ (priv->rf_chip != RF_6052)) {
netdev_err(dev, "%s: unknown rf chip, can't set channel map\n",
__func__);
return -1;
@@ -1062,7 +1062,7 @@ static short _rtl92e_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- memset(&(priv->stats), 0, sizeof(struct rt_stats));
+ memset(&priv->stats, 0, sizeof(struct rt_stats));
_rtl92e_init_priv_handler(dev);
_rtl92e_init_priv_constant(dev);
@@ -1077,7 +1077,7 @@ static short _rtl92e_init(struct net_device *dev)
setup_timer(&priv->watch_dog_timer,
_rtl92e_watchdog_timer_cb,
- (unsigned long) dev);
+ (unsigned long)dev);
setup_timer(&priv->gpio_polling_timer,
rtl92e_check_rfctrl_gpio_timer,
@@ -1102,8 +1102,8 @@ static short _rtl92e_init(struct net_device *dev)
}
/***************************************************************************
- -------------------------------WATCHDOG STUFF---------------------------
-***************************************************************************/
+ * -------------------------------WATCHDOG STUFF---------------------------
+ **************************************************************************/
static short _rtl92e_is_tx_queue_empty(struct net_device *dev)
{
int i = 0;
@@ -1134,7 +1134,7 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
switch (priv->rtllib->ps) {
case RTLLIB_PS_DISABLED:
break;
- case (RTLLIB_PS_MBCAST|RTLLIB_PS_UNICAST):
+ case (RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST):
break;
default:
break;
@@ -1387,7 +1387,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
static u8 check_reset_cnt;
unsigned long flags;
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
- (&(priv->rtllib->PowerSaveControl));
+ (&priv->rtllib->PowerSaveControl);
bool bBusyTraffic = false;
bool bHigherBusyTraffic = false;
bool bHigherBusyRxTraffic = false;
@@ -1469,7 +1469,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
_rtl92e_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
- if ((TotalRxBcnNum+TotalRxDataNum) == 0)
+ if ((TotalRxBcnNum + TotalRxDataNum) == 0)
priv->check_roaming_cnt++;
else
priv->check_roaming_cnt = 0;
@@ -1497,7 +1497,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
notify_wx_assoc_event(ieee);
if (!(ieee->rtllib_ap_sec_type(ieee) &
- (SEC_ALG_CCMP|SEC_ALG_TKIP)))
+ (SEC_ALG_CCMP | SEC_ALG_TKIP)))
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
@@ -1541,8 +1541,8 @@ static void _rtl92e_watchdog_timer_cb(unsigned long data)
}
/****************************************************************************
- ---------------------------- NIC TX/RX STUFF---------------------------
-*****************************************************************************/
+ * ---------------------------- NIC TX/RX STUFF---------------------------
+ ****************************************************************************/
void rtl92e_rx_enable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1603,7 +1603,7 @@ static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio)
ring->idx = (ring->idx + 1) % ring->entries;
}
- pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
+ pci_free_consistent(priv->pdev, sizeof(*ring->desc) * ring->entries,
ring->desc, ring->dma);
ring->desc = NULL;
}
@@ -1712,7 +1712,7 @@ static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb)
ring = &priv->tx_ring[TXCMD_QUEUE];
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
- entry = (struct tx_desc_cmd *) &ring->desc[idx];
+ entry = (struct tx_desc_cmd *)&ring->desc[idx];
tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
@@ -2031,7 +2031,7 @@ static void _rtl92e_rx_normal(struct net_device *dev)
struct rtllib_rx_stats stats = {
.signal = 0,
- .noise = (u8) -98,
+ .noise = (u8)-98,
.rate = 0,
.freq = RTLLIB_24GHZ_BAND,
};
@@ -2104,7 +2104,7 @@ static void _rtl92e_rx_normal(struct net_device *dev)
priv->rx_buf[rx_queue_idx][priv->rx_idx[rx_queue_idx]] =
skb;
- *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev,
+ *((dma_addr_t *)skb->cb) = pci_map_single(priv->pdev,
skb_tail_pointer_rsl(skb),
priv->rxbuffersize,
PCI_DMA_FROMDEVICE);
@@ -2117,7 +2117,7 @@ done:
pdesc->BufferAddress = *((dma_addr_t *)skb->cb);
pdesc->OWN = 1;
pdesc->Length = priv->rxbuffersize;
- if (priv->rx_idx[rx_queue_idx] == priv->rxringcount-1)
+ if (priv->rx_idx[rx_queue_idx] == priv->rxringcount - 1)
pdesc->EOR = 1;
priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
priv->rxringcount;
@@ -2156,8 +2156,8 @@ static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv)
}
/****************************************************************************
- ---------------------------- NIC START/CLOSE STUFF---------------------------
-*****************************************************************************/
+ * ---------------------------- NIC START/CLOSE STUFF---------------------------
+ ****************************************************************************/
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv)
{
cancel_delayed_work_sync(&priv->watch_dog_wq);
@@ -2348,8 +2348,9 @@ static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
0, key);
}
}
- if ((ieee->pairwise_key_type == KEY_TYPE_CCMP)
- && ieee->pHTInfo->bCurrentHTSupport) {
+ if ((ieee->pairwise_key_type ==
+ KEY_TYPE_CCMP) &&
+ ieee->pHTInfo->bCurrentHTSupport) {
rtl92e_writeb(dev, 0x173, 1);
}
@@ -2535,8 +2536,8 @@ done:
/****************************************************************************
- ---------------------------- PCI_STUFF---------------------------
-*****************************************************************************/
+ * ---------------------------- PCI_STUFF---------------------------
+ ****************************************************************************/
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = _rtl92e_open,
.ndo_stop = _rtl92e_close,
@@ -2726,7 +2727,7 @@ bool rtl92e_enable_nic(struct net_device *dev)
bool init_status = true;
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
- (&(priv->rtllib->PowerSaveControl));
+ (&priv->rtllib->PowerSaveControl);
if (!priv->up) {
netdev_warn(dev, "%s(): Driver is already down!\n", __func__);
@@ -2751,6 +2752,7 @@ bool rtl92e_enable_nic(struct net_device *dev)
RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
return init_status;
}
+
bool rtl92e_disable_nic(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2785,8 +2787,8 @@ void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
}
/***************************************************************************
- ------------------- module init / exit stubs ----------------
-****************************************************************************/
+ * ------------------- module init / exit stubs ----------------
+ ***************************************************************************/
MODULE_DESCRIPTION("Linux driver for Realtek RTL819x WiFi cards");
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index babc0b3bce95..0335823e2766 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _RTL_CORE_H
#define _RTL_CORE_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index dbb58fb16482..1a43c684f9f3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "rtl_dm.h"
#include "r8192E_hw.h"
@@ -886,11 +886,14 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
if (tmpCCK40Mindex >= CCK_Table_length)
tmpCCK40Mindex = CCK_Table_length-1;
} else {
- tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]);
- if (tmpval >= 6)
- tmpOFDMindex = tmpCCK20Mindex = 0;
- else
- tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
+ tmpval = (u8)tmpRegA - priv->ThermalMeter[0];
+ if (tmpval >= 6) {
+ tmpOFDMindex = 0;
+ tmpCCK20Mindex = 0;
+ } else {
+ tmpOFDMindex = 6 - tmpval;
+ tmpCCK20Mindex = 6 - tmpval;
+ }
tmpCCK40Mindex = 0;
}
if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
@@ -1330,7 +1333,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
* When Who Remark
* 03/04/2009 hpfan Create Version 0.
*
- *---------------------------------------------------------------------------*/
+ ******************************************************************************/
static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
{
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index 756a0dd00d56..52a4a1522bae 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef __R8192UDM_H__
#define __R8192UDM_H__
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index 162e06c0883c..e1d305d4fa20 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "rtl_eeprom.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index d63e8b0c185b..6212e5eadede 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#define EPROM_DELAY 10
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 9e04dc29fbbb..3e3273d3e043 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include "rtl_core.h"
#include "r8192E_hw.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 7625e3f31ea8..03fe79ff5a1b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef R8192E_PM_H
#define R8192E_PM_H
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index aa4b015c3cc7..9281116366d2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -35,7 +35,7 @@ static void _rtl92e_hw_sleep(struct net_device *dev)
if (priv->RFChangeInProgress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_DBG,
- "_rtl92e_hw_sleep(): RF Change in progress!\n");
+ "%s(): RF Change in progress!\n", __func__);
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
@@ -62,7 +62,7 @@ void rtl92e_hw_wakeup(struct net_device *dev)
if (priv->RFChangeInProgress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_DBG,
- "rtl92e_hw_wakeup(): RF Change in progress!\n");
+ "%s(): RF Change in progress!\n", __func__);
schedule_delayed_work(&priv->rtllib->hw_wakeup_wq,
msecs_to_jiffies(10));
return;
@@ -121,15 +121,15 @@ static void _rtl92e_ps_update_rf_state(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "_rtl92e_ps_update_rf_state() --------->\n");
+ RT_TRACE(COMP_PS, "%s() --------->\n", __func__);
pPSC->bSwRfProcessing = true;
- RT_TRACE(COMP_PS, "_rtl92e_ps_update_rf_state(): Set RF to %s.\n",
+ RT_TRACE(COMP_PS, "%s(): Set RF to %s.\n", __func__,
pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
pPSC->bSwRfProcessing = false;
- RT_TRACE(COMP_PS, "_rtl92e_ps_update_rf_state() <---------\n");
+ RT_TRACE(COMP_PS, "%s() <---------\n", __func__);
}
void rtl92e_ips_enter(struct net_device *dev)
@@ -144,7 +144,7 @@ void rtl92e_ips_enter(struct net_device *dev)
if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
(priv->rtllib->state != RTLLIB_LINKED) &&
(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
- RT_TRACE(COMP_PS, "rtl92e_ips_enter(): Turn off RF.\n");
+ RT_TRACE(COMP_PS, "%s(): Turn off RF.\n", __func__);
pPSC->eInactivePowerState = eRfOff;
priv->isRFOff = true;
priv->bInPowerSaveMode = true;
@@ -164,7 +164,7 @@ void rtl92e_ips_leave(struct net_device *dev)
rtState = priv->rtllib->eRFPowerState;
if (rtState != eRfOn && !pPSC->bSwRfProcessing &&
priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
- RT_TRACE(COMP_PS, "rtl92e_ips_leave(): Turn on RF.\n");
+ RT_TRACE(COMP_PS, "%s(): Turn on RF.\n", __func__);
pPSC->eInactivePowerState = eRfOn;
priv->bInPowerSaveMode = false;
_rtl92e_ps_update_rf_state(dev);
@@ -247,7 +247,7 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "rtl92e_leisure_ps_enter()...\n");
+ RT_TRACE(COMP_PS, "%s()...\n", __func__);
RT_TRACE(COMP_PS,
"pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdleCount is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
@@ -265,7 +265,7 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
RT_TRACE(COMP_LPS,
- "rtl92e_leisure_ps_enter(): Enter 802.11 power save mode...\n");
+ "%s(): Enter 802.11 power save mode...\n", __func__);
if (!pPSC->bFwCtrlLPS) {
if (priv->rtllib->SetFwCmdHandler)
@@ -287,14 +287,14 @@ void rtl92e_leisure_ps_leave(struct net_device *dev)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "rtl92e_leisure_ps_leave()...\n");
+ RT_TRACE(COMP_PS, "%s()...\n", __func__);
RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
pPSC->bLeisurePs, priv->rtllib->ps);
if (pPSC->bLeisurePs) {
if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
RT_TRACE(COMP_LPS,
- "rtl92e_leisure_ps_leave(): Busy Traffic , Leave 802.11 power save..\n");
+ "%s(): Busy Traffic , Leave 802.11 power save..\n", __func__);
_rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED);
if (!pPSC->bFwCtrlLPS) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 7413a100ca19..f802f60281f8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#include <linux/string.h>
#include "rtl_core.h"
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
index 7ecf6c5cf58b..c313fb79de4d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef R819x_WX_H
#define R819x_WX_H
diff --git a/drivers/staging/rtl8192e/rtl819x_BA.h b/drivers/staging/rtl8192e/rtl819x_BA.h
index 5002b4d1fe51..978c9a54043e 100644
--- a/drivers/staging/rtl8192e/rtl819x_BA.h
+++ b/drivers/staging/rtl8192e/rtl819x_BA.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _BATYPE_H_
#define _BATYPE_H_
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 20260af49ee7..1d3963136295 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -88,7 +88,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
return NULL;
}
skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
- if (skb == NULL)
+ if (!skb)
return NULL;
memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));
@@ -154,7 +154,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
DelbaParamSet.field.TID = pBA->BaParamSet.field.TID;
skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
- if (skb == NULL)
+ if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 6eb018f5feaa..24e86620c94c 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _RTL819XU_HTTYPE_H_
#define _RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index cded0f43cd33..4ae1d382ac5c 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -489,7 +489,7 @@ u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
if ((bitMap%2) != 0) {
if (HTMcsToDataRate(ieee, (8*i+j)) >
HTMcsToDataRate(ieee, mcsRate))
- mcsRate = (8*i+j);
+ mcsRate = 8 * i + j;
}
bitMap >>= 1;
}
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 61da8f7475bb..576241233a35 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index 2cabf4026e77..654c223030e3 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -11,7 +11,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index b895a537d3e4..827651b62791 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -619,7 +619,8 @@ struct ieee_ibss_seq {
/* NOTE: This data is for statistical purposes; not all hardware provides this
* information for frames received. Not setting these will not cause
- * any adverse affects. */
+ * any adverse affects.
+ */
struct rtllib_rx_stats {
u64 mac_time;
s8 rssi;
@@ -1138,7 +1139,8 @@ enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
#define ETHERNET_HEADER_SIZE 14 /* length of two Ethernet address
- * plus ether type*/
+ * plus ether type
+ */
enum erp_t {
ERP_NonERPpresent = 0x01,
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index f1c39c3f9d26..7b0e6e9c4456 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -17,7 +17,7 @@
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ *****************************************************************************/
#ifndef _RTL_DEBUG_H
#define _RTL_DEBUG_H
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 9d5788e04dd5..cdf4c9060c51 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -1,30 +1,30 @@
/*******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+ *
+ * Copyright(c) 2004 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are based on the WEP enablement code provided by the
+ * Host AP project hostap-drivers v0.1.3
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
#include <linux/compiler.h>
#include <linux/errno.h>
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index b1500ee9a5cf..c5c0d9676dc8 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -1,30 +1,30 @@
/******************************************************************************
-
- Copyright(c) 2004 Intel Corporation. All rights reserved.
-
- Portions of this file are based on the WEP enablement code provided by the
- Host AP project hostap-drivers v0.1.3
- Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- <jkmaline@cc.hut.fi>
- Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- The full GNU General Public License is included in this distribution in the
- file called LICENSE.
-
- Contact Information:
- James P. Ketrenos <ipw2100-admin@linux.intel.com>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+ *
+ * Copyright(c) 2004 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are based on the WEP enablement code provided by the
+ * Host AP project hostap-drivers v0.1.3
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * James P. Ketrenos <ipw2100-admin@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
#include <linux/wireless.h>
#include <linux/kmod.h>
#include <linux/module.h>
@@ -668,7 +668,7 @@ done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- if (ieee->reset_on_keychange &&
+ if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
netdev_dbg(ieee->dev, "Port reset failed\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 097147071df0..899c77ed2a43 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1456,10 +1456,10 @@ enum ieee80211_state {
-typedef struct tx_pending_t{
+struct tx_pending {
int frag;
struct ieee80211_txb *txb;
-}tx_pending_t;
+};
typedef struct _bandwidth_autoswitch {
long threshold_20Mhzto40Mhz;
@@ -1883,7 +1883,7 @@ struct ieee80211_device {
RT_POWER_SAVE_CONTROL PowerSaveControl;
//}
/* used if IEEE_SOFTMAC_TX_QUEUE is set */
- struct tx_pending_t tx_pending;
+ struct tx_pending tx_pending;
/* used if IEEE_SOFTMAC_ASSOCIATE is set */
struct timer_list associate_timer;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 2453413757b6..5039172409e3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -67,7 +67,7 @@ static void *ieee80211_tkip_init(int key_idx)
struct ieee80211_tkip_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
+ if (!priv)
goto fail;
priv->key_idx = key_idx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 0e8c876c1404..7ba4b07aa842 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -42,7 +42,7 @@ static void *prism2_wep_init(int keyidx)
struct prism2_wep_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
- if (priv == NULL)
+ if (!priv)
return NULL;
priv->key_idx = keyidx;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 5fdfff0816c5..a791175b86f5 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -283,8 +283,7 @@ int __init ieee80211_debug_init(void)
" proc directory\n");
return -EIO;
}
- e = proc_create("debug_level", S_IRUGO | S_IWUSR,
- ieee80211_proc, &fops);
+ e = proc_create("debug_level", 0644, ieee80211_proc, &fops);
if (!e) {
remove_proc_entry(DRV_NAME, init_net.proc_net);
ieee80211_proc = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 5241c5003ebf..7a31510f0524 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -559,10 +559,8 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
} else {
- u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
- memcpy(skb_push(sub_skb, 2), &len, 2);
+ put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
}
@@ -920,7 +918,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
int i;
struct ieee80211_rxb *rxb = NULL;
- // cheat the the hdr type
+ // cheat the hdr type
hdr = (struct rtl_80211_hdr_4addr *)skb->data;
stats = &ieee->stats;
@@ -941,7 +939,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (HTCCheck(ieee, skb->data))
{
if(net_ratelimit())
- printk("find HTCControl\n");
+ printk("find HTCControl\n");
hdrlen += 4;
rx_stats->bContainHTC = true;
}
@@ -1317,7 +1315,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
} else {
u16 len;
/* Leave Ethernet header part of hdr and full payload */
- len = htons(sub_skb->len);
+ len = be16_to_cpu(htons(sub_skb->len));
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
@@ -1480,13 +1478,15 @@ static int ieee80211_qos_convert_ac_to_parameters(struct
/* WMM spec P.11: The minimum value for AIFSN shall be 2 */
qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci];
- qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F;
+ qos_param->cw_min[aci] =
+ cpu_to_le16(ac_params->ecw_min_max & 0x0F);
- qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4;
+ qos_param->cw_max[aci] =
+ cpu_to_le16((ac_params->ecw_min_max & 0xF0) >> 4);
qos_param->flag[aci] =
(ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
- qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit);
+ qos_param->tx_op_limit[aci] = ac_params->tx_op_limit;
}
return 0;
}
@@ -2394,39 +2394,41 @@ static inline void ieee80211_process_probe_response(
#ifdef CONFIG_IEEE80211_DEBUG
struct ieee80211_info_element *info_element = &beacon->info_element[0];
#endif
+ int fc = WLAN_FC_GET_STYPE(le16_to_cpu(beacon->header.frame_ctl));
unsigned long flags;
short renew;
+ u16 capability;
//u8 wmm_info;
memset(&network, 0, sizeof(struct ieee80211_network));
+ capability = le16_to_cpu(beacon->capability);
IEEE80211_DEBUG_SCAN(
"'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
escape_essid(info_element->data, info_element->len),
beacon->header.addr3,
- (beacon->capability & (1<<0xf)) ? '1' : '0',
- (beacon->capability & (1<<0xe)) ? '1' : '0',
- (beacon->capability & (1<<0xd)) ? '1' : '0',
- (beacon->capability & (1<<0xc)) ? '1' : '0',
- (beacon->capability & (1<<0xb)) ? '1' : '0',
- (beacon->capability & (1<<0xa)) ? '1' : '0',
- (beacon->capability & (1<<0x9)) ? '1' : '0',
- (beacon->capability & (1<<0x8)) ? '1' : '0',
- (beacon->capability & (1<<0x7)) ? '1' : '0',
- (beacon->capability & (1<<0x6)) ? '1' : '0',
- (beacon->capability & (1<<0x5)) ? '1' : '0',
- (beacon->capability & (1<<0x4)) ? '1' : '0',
- (beacon->capability & (1<<0x3)) ? '1' : '0',
- (beacon->capability & (1<<0x2)) ? '1' : '0',
- (beacon->capability & (1<<0x1)) ? '1' : '0',
- (beacon->capability & (1<<0x0)) ? '1' : '0');
+ (capability & (1 << 0xf)) ? '1' : '0',
+ (capability & (1 << 0xe)) ? '1' : '0',
+ (capability & (1 << 0xd)) ? '1' : '0',
+ (capability & (1 << 0xc)) ? '1' : '0',
+ (capability & (1 << 0xb)) ? '1' : '0',
+ (capability & (1 << 0xa)) ? '1' : '0',
+ (capability & (1 << 0x9)) ? '1' : '0',
+ (capability & (1 << 0x8)) ? '1' : '0',
+ (capability & (1 << 0x7)) ? '1' : '0',
+ (capability & (1 << 0x6)) ? '1' : '0',
+ (capability & (1 << 0x5)) ? '1' : '0',
+ (capability & (1 << 0x4)) ? '1' : '0',
+ (capability & (1 << 0x3)) ? '1' : '0',
+ (capability & (1 << 0x2)) ? '1' : '0',
+ (capability & (1 << 0x1)) ? '1' : '0',
+ (capability & (1 << 0x0)) ? '1' : '0');
if (ieee80211_network_init(ieee, beacon, &network, stats)) {
IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
escape_essid(info_element->data,
info_element->len),
beacon->header.addr3,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
+ fc == IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
return;
}
@@ -2442,7 +2444,7 @@ static inline void ieee80211_process_probe_response(
return;
if (ieee->bGlobalDomain)
{
- if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP)
+ if (fc == IEEE80211_STYPE_PROBE_RESP)
{
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
@@ -2549,8 +2551,7 @@ static inline void ieee80211_process_probe_response(
escape_essid(network.ssid,
network.ssid_len),
network.bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
+ fc == IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
#endif
memcpy(target, &network, sizeof(*target));
@@ -2562,8 +2563,7 @@ static inline void ieee80211_process_probe_response(
escape_essid(target->ssid,
target->ssid_len),
target->bssid,
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
- IEEE80211_STYPE_PROBE_RESP ?
+ fc == IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
/* we have an entry and we are going to update it. But this entry may
@@ -2600,11 +2600,11 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct rtl_80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats)
{
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
case IEEE80211_STYPE_BEACON:
IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
IEEE80211_DEBUG_SCAN("Beacon\n");
ieee80211_process_probe_response(
ieee, (struct ieee80211_probe_response *)header, stats);
@@ -2612,7 +2612,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
case IEEE80211_STYPE_PROBE_RESP:
IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
IEEE80211_DEBUG_SCAN("Probe response\n");
ieee80211_process_probe_response(
ieee, (struct ieee80211_probe_response *)header, stats);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 0ea90aae4283..14aea26804f4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -466,7 +466,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
goto out;
ieee->set_chan(ieee->dev, ch);
if(channel_map[ch] == 1)
- ieee80211_send_probe_requests(ieee);
+ ieee80211_send_probe_requests(ieee);
/* this prevent excessive time wait when we
* need to wait for a syncro scan to end..
@@ -3025,7 +3025,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (new_crypt == NULL) {
+ if (!new_crypt) {
ret = -ENOMEM;
goto done;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 5704e4d7aa68..bdb96a45a9eb 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -171,7 +171,7 @@ static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
- *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
@@ -281,7 +281,6 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
if (eth->h_proto != htons(ETH_P_IP))
return 0;
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
ip = ip_hdr(skb);
switch (ip->tos & 0xfc) {
case 0x20:
@@ -887,7 +886,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
- //tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
@@ -895,8 +893,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
ieee80211_query_BandwidthMode(ieee, tcb_desc);
ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
-// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
- //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc));
}
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index e383ec2fb335..c925e53bf013 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -362,7 +362,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
/* take WEP into use */
new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
GFP_KERNEL);
- if (new_crypt == NULL)
+ if (!new_crypt)
return -ENOMEM;
new_crypt->ops = ieee80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
@@ -610,7 +610,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
ieee80211_crypt_delayed_deinit(ieee, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
- if (new_crypt == NULL) {
+ if (!new_crypt) {
ret = -ENOMEM;
goto done;
}
@@ -665,7 +665,7 @@ done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- if (ieee->reset_on_keychange &&
+ if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
index 2c398ca9a8ac..7abedc27d7c1 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
@@ -31,8 +31,8 @@ typedef union _SEQUENCE_CONTROL{
struct {
u16 FragNum:4;
u16 SeqNum:12;
- }field;
-}SEQUENCE_CONTROL, *PSEQUENCE_CONTROL;
+ } field;
+} SEQUENCE_CONTROL, *PSEQUENCE_CONTROL;
typedef union _BA_PARAM_SET {
u8 charData[2];
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 6619b8fb9700..e82b5073c3f1 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -117,7 +117,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
return NULL;
}
skb = dev_alloc_skb(len + sizeof( struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (skb == NULL) {
+ if (!skb) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
return NULL;
}
@@ -202,7 +202,7 @@ static struct sk_buff *ieee80211_DELBA(
DelbaParamSet.field.TID = pBA->BaParamSet.field.TID;
skb = dev_alloc_skb(len + sizeof( struct rtl_80211_hdr_3addr)); //need to add something others? FIXME
- if (skb == NULL) {
+ if (!skb) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc skb for ADDBA_REQ\n");
return NULL;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index c27397b14adb..60720997784b 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -976,17 +976,16 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
//
HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
-// if (pHTInfo->bCurBW40MHz)
- pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
+ pHTInfo->bCurTxBW40MHz = (pPeerHTInfo->RecommemdedTxWidth == 1);
//
// Update short GI/ long GI setting
//
// TODO:
- pHTInfo->bCurShortGI20MHz=
- ((pHTInfo->bRegShortGI20MHz)?((pPeerHTCap->ShortGI20Mhz==1)?true:false):false);
- pHTInfo->bCurShortGI40MHz=
- ((pHTInfo->bRegShortGI40MHz)?((pPeerHTCap->ShortGI40Mhz==1)?true:false):false);
+ pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz &&
+ (pPeerHTCap->ShortGI20Mhz == 1);
+ pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz &&
+ (pPeerHTCap->ShortGI40Mhz == 1);
//
// Config TX STBC setting
@@ -997,8 +996,8 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
// Config DSSS/CCK mode in 40MHz mode
//
// TODO:
- pHTInfo->bCurSuppCCK =
- ((pHTInfo->bRegSuppCCK)?((pPeerHTCap->DssCCk==1)?true:false):false);
+ pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK &&
+ (pPeerHTCap->DssCCk == 1);
//
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index a7ba8f37384e..e702afb5a70e 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -793,12 +793,12 @@ typedef struct _phy_cck_rx_status_report_819xusb {
} phy_sts_cck_819xusb_t;
-typedef struct _phy_ofdm_rx_status_rxsc_sgien_exintfflag {
+struct phy_ofdm_rx_status_rxsc_sgien_exintfflag {
u8 reserved:4;
u8 rxsc:2;
u8 sgi_en:1;
u8 ex_intf_flag:1;
-} phy_ofdm_rx_status_rxsc_sgien_exintfflag;
+};
typedef enum _RT_CUSTOMER_ID {
RT_CID_DEFAULT = 0,
@@ -1041,10 +1041,10 @@ typedef struct r8192_priv {
u8 rfc_txpowertrackingindex;
u8 rfc_txpowertrackingindex_real;
- s8 cck_present_attentuation;
- u8 cck_present_attentuation_20Mdefault;
- u8 cck_present_attentuation_40Mdefault;
- s8 cck_present_attentuation_difference;
+ s8 cck_present_attenuation;
+ u8 cck_present_attenuation_20Mdefault;
+ u8 cck_present_attenuation_40Mdefault;
+ s8 cck_present_attenuation_difference;
bool btxpower_tracking;
bool bcck_in_ch14;
bool btxpowerdata_readfromEEPORM;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index b631990b4969..9f370e8d84d3 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -269,7 +269,7 @@ int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
indx | 0xfe00, 0, usbdata, 1, HZ / 2);
kfree(usbdata);
- if (status < 0){
+ if (status < 0) {
netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n",
status);
return status;
@@ -1814,7 +1814,7 @@ static void rtl8192_link_change(struct net_device *dev)
}
}
-static struct ieee80211_qos_parameters def_qos_parameters = {
+static const struct ieee80211_qos_parameters def_qos_parameters = {
{cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
{cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
{2, 2, 2, 2},/* aifs */
@@ -2519,7 +2519,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
for (i = 0; i < 3; i++) {
if (bLoad_From_EEPOM) {
ret = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
- if ( ret < 0)
+ if (ret < 0)
return ret;
if (((EEPROM_TxPwIndex_OFDM_24G + i) % 2) == 0)
tmpValue = (u16)ret & 0x00ff;
@@ -3023,14 +3023,14 @@ static bool rtl8192_adapter_start(struct net_device *dev)
for (i = 0; i < CCKTxBBGainTableLength; i++) {
if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
- priv->cck_present_attentuation_20Mdefault = (u8)i;
+ priv->cck_present_attenuation_20Mdefault = (u8)i;
break;
}
}
- priv->cck_present_attentuation_40Mdefault = 0;
- priv->cck_present_attentuation_difference = 0;
- priv->cck_present_attentuation =
- priv->cck_present_attentuation_20Mdefault;
+ priv->cck_present_attenuation_40Mdefault = 0;
+ priv->cck_present_attenuation_difference = 0;
+ priv->cck_present_attenuation =
+ priv->cck_present_attenuation_20Mdefault;
}
}
write_nic_byte(dev, 0x87, 0x0);
@@ -4242,7 +4242,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
{
phy_sts_ofdm_819xusb_t *pofdm_buf;
phy_sts_cck_819xusb_t *pcck_buf;
- phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
+ struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
s8 rx_pwr[4], rx_pwr_all = 0;
@@ -4432,7 +4432,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
- prxsc = (phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
+ prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
&rxsc_sgien_exflg;
if (pdrvinfo->BW) /* 40M channel */
priv->stats.received_bwtype[1 + prxsc->rxsc]++;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 9209aad0515e..975f707827e1 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -598,8 +598,8 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n");
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation_difference = %d\n", priv->cck_present_attenuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation = %d\n", priv->cck_present_attenuation);
return;
}
if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) {
@@ -618,17 +618,17 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
}
}
- priv->cck_present_attentuation_difference
+ priv->cck_present_attenuation_difference
= priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- priv->cck_present_attentuation
- = priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference;
+ priv->cck_present_attenuation
+ = priv->cck_present_attenuation_20Mdefault + priv->cck_present_attenuation_difference;
else
- priv->cck_present_attentuation
- = priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference;
+ priv->cck_present_attenuation
+ = priv->cck_present_attenuation_40Mdefault + priv->cck_present_attenuation_difference;
- if (priv->cck_present_attentuation > -1 && priv->cck_present_attentuation < 23) {
+ if (priv->cck_present_attenuation > -1 && priv->cck_present_attenuation < 23) {
if (priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
@@ -640,10 +640,10 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
}
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation_difference = %d\n", priv->cck_present_attentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attentuation = %d\n", priv->cck_present_attentuation);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation_difference = %d\n", priv->cck_present_attenuation_difference);
+ RT_TRACE(COMP_POWER_TRACKING, "priv->cck_present_attenuation = %d\n", priv->cck_present_attenuation);
- if (priv->cck_present_attentuation_difference <= -12 || priv->cck_present_attentuation_difference >= 24) {
+ if (priv->cck_present_attenuation_difference <= -12 || priv->cck_present_attenuation_difference >= 24) {
priv->ieee80211->bdynamic_txpower_enable = true;
write_nic_byte(dev, 0x1ba, 0);
RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n");
@@ -725,10 +725,15 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
} else {
tmpval = (u8)tmpRegA - priv->ThermalMeter[0];
- if (tmpval >= 6) /* higher temperature */
- tmpOFDMindex = tmpCCK20Mindex = 0; /* max to +6dB */
- else
- tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
+ if (tmpval >= 6) {
+ /* higher temperature */
+ tmpOFDMindex = 0;
+ tmpCCK20Mindex = 0;
+ } else {
+ /* max to +6dB */
+ tmpOFDMindex = 6 - tmpval;
+ tmpCCK20Mindex = 6 - tmpval;
+ }
tmpCCK40Mindex = 0;
}
/*DbgPrint("%ddb, tmpOFDMindex = %d, tmpCCK20Mindex = %d, tmpCCK40Mindex = %d",
@@ -1379,35 +1384,35 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
TempVal = 0;
if (!bInCH14) {
/* Write 0xa22 0xa23 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8);
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[0] +
+ (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
/* Write 0xa24 ~ 0xa27 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[2] +
+ (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[3]<<8) +
+ (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[4]<<16)+
+ (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
/* Write 0xa28 0xa29 */
- TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8);
+ TempVal = priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[6] +
+ (priv->cck_txbbgain_table[priv->cck_present_attenuation].ccktxbb_valuearray[7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
} else {
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[0] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[1]<<8);
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[0] +
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[1]<<8);
rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
/* Write 0xa24 ~ 0xa27 */
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[2] +
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[3]<<8) +
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[4]<<16)+
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
/* Write 0xa28 0xa29 */
- TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[6] +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[7]<<8);
+ TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[6] +
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attenuation].ccktxbb_valuearray[7]<<8);
rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
@@ -1490,7 +1495,7 @@ static void dm_txpower_reset_recovery(
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n", priv->rfa_txpowertrackingindex);
RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n", priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n", priv->cck_present_attentuation);
+ RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n", priv->cck_present_attenuation);
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value);
@@ -2304,10 +2309,10 @@ static void dm_check_edca_turbo(
/* For Each time updating EDCA parameter, reset EDCA turbo mode status. */
dm_init_edca_turbo(dev);
u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
- u4bAcParam = (((u32)(qos_parameters->tx_op_limit[0])) << AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[0])) << AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[0])) << AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET);
+ u4bAcParam = (((le16_to_cpu(qos_parameters->tx_op_limit[0])) << AC_PARAM_TXOP_LIMIT_OFFSET)|
+ ((le16_to_cpu(qos_parameters->cw_max[0])) << AC_PARAM_ECW_MAX_OFFSET)|
+ ((le16_to_cpu(qos_parameters->cw_min[0])) << AC_PARAM_ECW_MIN_OFFSET)|
+ ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
/*write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);*/
write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 3e0731b04619..bb6d8bd6c7ac 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -495,7 +495,7 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
u8 element_id;
u8 *pcmd_buff;
- /* 0. Check inpt arguments. If is is a command queue message or
+ /* 0. Check inpt arguments. It is a command queue message or
* pointer is null.
*/
if (pstats == NULL)
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index c99130fdb8ee..3874f8307117 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1559,17 +1559,17 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
0x00100000, 1);
/* Correct the tx power for CCK rate in 20M. */
- priv->cck_present_attentuation =
- priv->cck_present_attentuation_20Mdefault +
- priv->cck_present_attentuation_difference;
-
- if (priv->cck_present_attentuation > 22)
- priv->cck_present_attentuation = 22;
- if (priv->cck_present_attentuation < 0)
- priv->cck_present_attentuation = 0;
+ priv->cck_present_attenuation =
+ priv->cck_present_attenuation_20Mdefault +
+ priv->cck_present_attenuation_difference;
+
+ if (priv->cck_present_attenuation > 22)
+ priv->cck_present_attenuation = 22;
+ if (priv->cck_present_attenuation < 0)
+ priv->cck_present_attenuation = 0;
RT_TRACE(COMP_INIT,
"20M, pHalData->CCKPresentAttentuation = %d\n",
- priv->cck_present_attentuation);
+ priv->cck_present_attenuation);
if (priv->chan == 14 && !priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
@@ -1590,18 +1590,18 @@ void rtl8192_SetBWModeWorkItem(struct net_device *dev)
rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
priv->nCur40MhzPrimeSC);
- priv->cck_present_attentuation =
- priv->cck_present_attentuation_40Mdefault +
- priv->cck_present_attentuation_difference;
+ priv->cck_present_attenuation =
+ priv->cck_present_attenuation_40Mdefault +
+ priv->cck_present_attenuation_difference;
- if (priv->cck_present_attentuation > 22)
- priv->cck_present_attentuation = 22;
- if (priv->cck_present_attentuation < 0)
- priv->cck_present_attentuation = 0;
+ if (priv->cck_present_attenuation > 22)
+ priv->cck_present_attenuation = 22;
+ if (priv->cck_present_attenuation < 0)
+ priv->cck_present_attenuation = 0;
RT_TRACE(COMP_INIT,
"40M, pHalData->CCKPresentAttentuation = %d\n",
- priv->cck_present_attentuation);
+ priv->cck_present_attenuation);
if (priv->chan == 14 && !priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index d84da2b6d6b3..f35121eedac6 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -288,8 +288,9 @@ int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
/* No WPA IE - fail silently */
return _FAIL;
}
- if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2))
- || (memcmp(wpa_ie + 2, (void *)WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
+ if ((*wpa_ie != _WPA_IE_ID_) ||
+ (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) ||
+ (memcmp(wpa_ie + 2, (void *)WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
return _FAIL;
pos = wpa_ie;
pos += 8;
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index 999c16d9c6c1..20372659d15d 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -110,12 +110,12 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter)
* disconnect with AP for 60 seconds.
*/
- memcpy(&backupPMKIDList[0], &adapter->securitypriv.
- PMKIDList[0], sizeof(struct RT_PMKID_LIST) *
- NUM_PMKID_CACHE);
+ memcpy(&backupPMKIDList[0],
+ &adapter->securitypriv.PMKIDList[0],
+ sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
- backupTKIPCountermeasure = adapter->securitypriv.
- btkip_countermeasure;
+ backupTKIPCountermeasure =
+ adapter->securitypriv.btkip_countermeasure;
memset((unsigned char *)&adapter->securitypriv, 0,
sizeof(struct security_priv));
setup_timer(&adapter->securitypriv.tkip_timer,
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index cbe4de05d26b..8836b31b4ef8 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
@@ -168,7 +164,7 @@ static void loadparam(struct _adapter *padapter, struct net_device *pnetdev)
registry_par->ampdu_enable = (u8)ampdu_enable;
registry_par->rf_config = (u8)rf_config;
registry_par->low_power = (u8)low_power;
- registry_par->wifi_test = (u8) wifi_test;
+ registry_par->wifi_test = (u8)wifi_test;
r8712_initmac = initmac;
}
@@ -185,8 +181,8 @@ static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p)
static struct net_device_stats *r871x_net_get_stats(struct net_device *pnetdev)
{
struct _adapter *padapter = netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
- struct recv_priv *precvpriv = &(padapter->recvpriv);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
padapter->stats.tx_packets = pxmitpriv->tx_pkts;
padapter->stats.rx_packets = precvpriv->rx_pkts;
@@ -392,7 +388,7 @@ static int netdev_open(struct net_device *pnetdev)
if (!r8712_initmac)
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
- padapter->eeprompriv.mac_addr, ETH_ALEN);
+ padapter->eeprompriv.mac_addr, ETH_ALEN);
else {
/* We have to inform f/w to use user-supplied MAC
* address.
@@ -409,7 +405,7 @@ static int netdev_open(struct net_device *pnetdev)
* users specify.
*/
memcpy(padapter->eeprompriv.mac_addr,
- pnetdev->dev_addr, ETH_ALEN);
+ pnetdev->dev_addr, ETH_ALEN);
}
if (start_drv_threads(padapter) != _SUCCESS)
goto netdev_open_error;
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index 317aeeed38e8..da1d4a641dcd 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -1734,7 +1734,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_LINK: /*solid blue*/
case LED_CTL_SITE_SURVEY:
if (IS_LED_WPS_BLINKING(pLed))
- return;
+ return;
pLed->CurrLedState = LED_STATE_ON;
pLed->BlinkingLedState = LED_STATE_ON;
pLed->bLedBlinkInProgress = false;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 20fe45a43e53..266ffefd55ed 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -444,9 +444,9 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
u16 cmd_len, drvinfo_sz;
struct recv_stat *prxstat;
- poffset = (u8 *)prxcmdbuf;
+ poffset = prxcmdbuf;
voffset = *(__le32 *)poffset;
- prxstat = (struct recv_stat *)prxcmdbuf;
+ prxstat = prxcmdbuf;
drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16;
drvinfo_sz <<= 3;
poffset += RXDESC_SIZE + drvinfo_sz;
@@ -634,8 +634,7 @@ _err_exit:
void r8712_reordering_ctrl_timeout_handler(void *pcontext)
{
unsigned long irql;
- struct recv_reorder_ctrl *preorder_ctrl =
- (struct recv_reorder_ctrl *)pcontext;
+ struct recv_reorder_ctrl *preorder_ctrl = pcontext;
struct _adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue =
&preorder_ctrl->pending_recvframe_queue;
@@ -976,7 +975,7 @@ int recv_func(struct _adapter *padapter, void *pcontext)
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- prframe = (union recv_frame *)pcontext;
+ prframe = pcontext;
orig_prframe = prframe;
pattrib = &prframe->u.hdr.attrib;
if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
@@ -1124,7 +1123,7 @@ _exit_recvbuf2recvframe:
static void recv_tasklet(void *priv)
{
struct sk_buff *pskb;
- struct _adapter *padapter = (struct _adapter *)priv;
+ struct _adapter *padapter = priv;
struct recv_priv *precvpriv = &padapter->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 4734ca856aa2..24da2ccea04f 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -144,7 +144,7 @@ struct disconnect_parm {
* #define IW_MODE_REPEAT 4 // Wireless Repeater (forwarder)
* #define IW_MODE_SECOND 5 // Secondary master/repeater (backup)
* #define IW_MODE_MONITOR 6 // Passive monitor (listen only)
-*/
+ */
struct setopmode_parm {
u8 mode;
u8 rsvd[3];
diff --git a/drivers/staging/rtl8712/rtl871x_event.h b/drivers/staging/rtl8712/rtl871x_event.h
index 5db8620980e5..517137906e6c 100644
--- a/drivers/staging/rtl8712/rtl871x_event.h
+++ b/drivers/staging/rtl8712/rtl871x_event.h
@@ -34,7 +34,7 @@
/*
* Used to report a bss has been scanned
-*/
+ */
struct survey_event {
struct wlan_bssid_ex bss;
};
@@ -42,7 +42,7 @@ struct survey_event {
/*
* Used to report that the requested site survey has been done.
* bss_cnt indicates the number of bss that has been reported.
-*/
+ */
struct surveydone_event {
unsigned int bss_cnt;
@@ -54,7 +54,7 @@ struct surveydone_event {
* -1: authentication fail
* -2: association fail
* > 0: TID
-*/
+ */
struct joinbss_event {
struct wlan_network network;
};
@@ -62,7 +62,7 @@ struct joinbss_event {
/*
* Used to report a given STA has joinned the created BSS.
* It is used in AP/Ad-HoC(M) mode.
-*/
+ */
struct stassoc_event {
unsigned char macaddr[6];
unsigned char rsvd[2];
diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
index 26dd24cddd38..dd054d7367b3 100644
--- a/drivers/staging/rtl8712/rtl871x_io.h
+++ b/drivers/staging/rtl8712/rtl871x_io.h
@@ -49,9 +49,9 @@
#define _IO_CMDMASK_ (0x1F80)
/*
- For prompt mode accessing, caller shall free io_req
- Otherwise, io_handler will free io_req
-*/
+ * For prompt mode accessing, caller shall free io_req
+ * Otherwise, io_handler will free io_req
+ */
/* IO STATUS TYPE */
#define _IO_ERR_ BIT(2)
#define _IO_SUCCESS_ BIT(1)
@@ -69,8 +69,8 @@
#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
/*
- Only Sync. burst accessing is provided.
-*/
+ * Only Sync. burst accessing is provided.
+ */
#define IO_WR_BURST(x) (IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | \
((x) & _IOSZ_MASK_))
#define IO_RD_BURST(x) (_IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
@@ -218,8 +218,8 @@ struct reg_protocol_wt {
};
/*
-Below is the data structure used by _io_handler
-*/
+ * Below is the data structure used by _io_handler
+ */
struct io_queue {
spinlock_t lock;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index f4167f14af70..e30a5be5f318 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -100,10 +100,10 @@ static inline void handle_pairwise_key(struct sta_info *psta,
memcpy(psta->x_UncstKey.skey, param->u.crypt.key,
(param->u.crypt. key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->tkiptxmickey. skey, &(param->u.crypt.
- key[16]), 8);
- memcpy(psta->tkiprxmickey. skey, &(param->u.crypt.
- key[24]), 8);
+ memcpy(psta->tkiptxmickey. skey,
+ &(param->u.crypt.key[16]), 8);
+ memcpy(psta->tkiprxmickey. skey,
+ &(param->u.crypt.key[24]), 8);
padapter->securitypriv. busetkipkey = false;
mod_timer(&padapter->securitypriv.tkip_timer,
jiffies + msecs_to_jiffies(50));
@@ -378,13 +378,12 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (param_len != (u32)((u8 *) param->u.crypt.key - (u8 *)param) +
param->u.crypt.key_len)
return -EINVAL;
- if (is_broadcast_ether_addr(param->sta_addr)) {
- if (param->u.crypt.idx >= WEP_KEYS) {
- /* for large key indices, set the default (0) */
- param->u.crypt.idx = 0;
- }
- } else {
+ if (!is_broadcast_ether_addr(param->sta_addr))
return -EINVAL;
+
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ /* for large key indices, set the default (0) */
+ param->u.crypt.idx = 0;
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
netdev_info(dev, "r8712u: %s: crypt.alg = WEP\n", __func__);
@@ -396,23 +395,19 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_len = param->u.crypt.key_len;
if (wep_key_idx >= WEP_KEYS)
wep_key_idx = 0;
- if (wep_key_len > 0) {
- wep_key_len = wep_key_len <= 5 ? 5 : 13;
- pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
- if (!pwep)
- return -ENOMEM;
- pwep->KeyLength = wep_key_len;
- pwep->Length = wep_key_len +
- FIELD_OFFSET(struct NDIS_802_11_WEP,
- KeyMaterial);
- if (wep_key_len == 13) {
- padapter->securitypriv.PrivacyAlgrthm =
- _WEP104_;
- padapter->securitypriv.XGrpPrivacy =
- _WEP104_;
- }
- } else {
+ if (wep_key_len <= 0)
return -EINVAL;
+
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
+ if (!pwep)
+ return -ENOMEM;
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_key_len +
+ FIELD_OFFSET(struct NDIS_802_11_WEP, KeyMaterial);
+ if (wep_key_len == 13) {
+ padapter->securitypriv.PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.XGrpPrivacy = _WEP104_;
}
pwep->KeyIndex = wep_key_idx;
pwep->KeyIndex |= 0x80000000;
@@ -700,14 +695,14 @@ static int r8711_wx_get_freq(struct net_device *dev,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- wrqu->freq.m = ieee80211_wlan_frequencies[
- pcur_bss->Configuration.DSConfig - 1] * 100000;
- wrqu->freq.e = 1;
- wrqu->freq.i = pcur_bss->Configuration.DSConfig;
- } else {
+ if (!check_fwstate(pmlmepriv, _FW_LINKED))
return -ENOLINK;
- }
+
+ wrqu->freq.m = ieee80211_wlan_frequencies[
+ pcur_bss->Configuration.DSConfig - 1] * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = pcur_bss->Configuration.DSConfig;
+
return 0;
}
@@ -1411,44 +1406,41 @@ static int r8711_wx_get_rate(struct net_device *dev,
u16 mcs_rate = 0;
i = 0;
- if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE)) {
- p = r8712_get_ie(&pcur_bss->IEs[12],
- _HT_CAPABILITY_IE_, &ht_ielen,
- pcur_bss->IELength - 12);
- if (p && ht_ielen > 0) {
- ht_cap = true;
- pht_capie = (struct ieee80211_ht_cap *)(p + 2);
- memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
- bw_40MHz = (le16_to_cpu(pht_capie->cap_info) &
- IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
- short_GI = (le16_to_cpu(pht_capie->cap_info) &
- (IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
- }
- while ((pcur_bss->rates[i] != 0) &&
- (pcur_bss->rates[i] != 0xFF)) {
- rate = pcur_bss->rates[i] & 0x7F;
- if (rate > max_rate)
- max_rate = rate;
- wrqu->bitrate.fixed = 0; /* no auto select */
- wrqu->bitrate.value = rate * 500000;
- i++;
- }
- if (ht_cap) {
- if (mcs_rate & 0x8000 /* MCS15 */
- &&
- rf_type == RTL8712_RF_2T2R)
- max_rate = (bw_40MHz) ? ((short_GI) ? 300 :
- 270) : ((short_GI) ? 144 : 130);
- else /* default MCS7 */
- max_rate = (bw_40MHz) ? ((short_GI) ? 150 :
- 135) : ((short_GI) ? 72 : 65);
- max_rate *= 2; /* Mbps/2 */
- }
- wrqu->bitrate.value = max_rate * 500000;
- } else {
+ if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE))
return -ENOLINK;
- }
+ p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen,
+ pcur_bss->IELength - 12);
+ if (p && ht_ielen > 0) {
+ ht_cap = true;
+ pht_capie = (struct ieee80211_ht_cap *)(p + 2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
+ bw_40MHz = (le16_to_cpu(pht_capie->cap_info) &
+ IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
+ short_GI = (le16_to_cpu(pht_capie->cap_info) &
+ (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
+ }
+ while ((pcur_bss->rates[i] != 0) &&
+ (pcur_bss->rates[i] != 0xFF)) {
+ rate = pcur_bss->rates[i] & 0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ wrqu->bitrate.fixed = 0; /* no auto select */
+ wrqu->bitrate.value = rate * 500000;
+ i++;
+ }
+ if (ht_cap) {
+ if (mcs_rate & 0x8000 /* MCS15 */
+ &&
+ rf_type == RTL8712_RF_2T2R)
+ max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) :
+ ((short_GI) ? 144 : 130);
+ else /* default MCS7 */
+ max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) :
+ ((short_GI) ? 72 : 65);
+ max_rate *= 2; /* Mbps/2 */
+ }
+ wrqu->bitrate.value = max_rate * 500000;
return 0;
}
@@ -1973,13 +1965,12 @@ static int r871x_get_ap_info(struct net_device *dev,
break;
}
pdata->flags = 0;
- if (pdata->length >= 32) {
- if (copy_from_user(data, pdata->pointer, 32))
- return -EINVAL;
- data[32] = 0;
- } else {
+ if (pdata->length < 32)
return -EINVAL;
- }
+ if (copy_from_user(data, pdata->pointer, 32))
+ return -EINVAL;
+ data[32] = 0;
+
spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
phead = &queue->queue;
plist = phead->next;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index c7f2e5167cb7..ca769f781e96 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -282,8 +282,7 @@ uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv
if (poid_par_priv->information_buf_len >= sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
padapter->recvpriv.rx_bytes;
- *poid_par_priv->bytes_rw = poid_par_priv->
- information_buf_len;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
return RNDIS_STATUS_INVALID_LENGTH;
}
@@ -325,8 +324,7 @@ uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))
pnic_Config = &pmlmepriv->cur_network.network.Configuration;
else
- pnic_Config = &padapter->registrypriv.dev_network.
- Configuration;
+ pnic_Config = &padapter->registrypriv.dev_network.Configuration;
channelnum = pnic_Config->DSConfig;
*(u32 *)poid_par_priv->information_buf = channelnum;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
@@ -483,8 +481,8 @@ uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv)
*/
if (!r8712_getrfreg_cmd(Adapter,
*(unsigned char *)poid_par_priv->information_buf,
- (unsigned char *)&Adapter->mppriv.workparam.
- io_value))
+ (unsigned char *)&Adapter->mppriv.workparam.io_value
+ ))
status = RNDIS_STATUS_NOT_ACCEPTED;
}
} else {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index 53a23234c598..b21f28140f53 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -68,14 +68,14 @@
#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
/*
-there are several "locks" in mlme_priv,
-since mlme_priv is a shared resource between many threads,
-like ISR/Call-Back functions, the OID handlers, and even timer functions.
-Each _queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
-To avoid possible dead lock, any thread trying to modify mlme_priv
-SHALL not lock up more than one lock at a time!
-*/
+ * there are several "locks" in mlme_priv,
+ * since mlme_priv is a shared resource between many threads,
+ * like ISR/Call-Back functions, the OID handlers, and even timer functions.
+ * Each _queue has its own locks, already.
+ * Other items are protected by mlme_priv.lock.
+ * To avoid possible dead lock, any thread trying to modify mlme_priv
+ * SHALL not lock up more than one lock at a time!
+ */
#define traffic_threshold 10
#define traffic_scan_period 500
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 1102451a733d..741006f1e45a 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -150,103 +150,126 @@ uint oid_rt_get_power_mode_hdl(
#ifdef _RTL871X_MP_IOCTL_C_ /* CAUTION!!! */
/* This ifdef _MUST_ be left in!! */
static const struct oid_obj_priv oid_rtl_seg_81_80_00[] = {
- {1, oid_null_function}, /*0x00 OID_RT_PRO_RESET_DUT */
- {1, oid_rt_pro_set_data_rate_hdl}, /*0x01*/
- {1, oid_rt_pro_start_test_hdl}, /*0x02*/
- {1, oid_rt_pro_stop_test_hdl}, /*0x03*/
- {1, oid_null_function}, /*0x04 OID_RT_PRO_SET_PREAMBLE*/
- {1, oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/
- {1, oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/
- {1, oid_null_function}, /*0x07
- * OID_RT_PRO_SET_MANUAL_DIVERS_BB
- */
- {1, oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/
- {1, oid_null_function}, /*0x09
- * OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL
- */
- {1, oid_null_function}, /*0x0A
- * OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL
- */
- {1, oid_rt_pro_set_continuous_tx_hdl}, /*0x0B
- * OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL
- */
- {1, oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C
- * OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS
- */
- {1, oid_null_function}, /*0x0D
- * OID_RT_PRO_SET_TX_ANTENNA_BB
- */
- {1, oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/
- {1, oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/
- {1, oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/
- {1, oid_rt_pro_set_tx_power_control_hdl}, /*0x11
- * OID_RT_PRO_SET_TX_POWER_CONTROL
- */
- {1, oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/
- {1, oid_null_function}, /*0x13
- * OID_RT_PRO_GET_TX_POWER_CONTROL
- */
- {1, oid_null_function}, /*0x14
- * OID_RT_PRO_GET_CR_SIGNAL_QUALITY
- */
- {1, oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/
- {1, oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/
- {1, oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/
- {1, oid_null_function}, /*0x18 OID_RT_PRO_GET_INTEGRATOR*/
- {1, oid_null_function}, /*0x19 OID_RT_PRO_GET_SIGNAL_QUALITY*/
- {1, oid_null_function}, /*0x1A OID_RT_PRO_QUERY_EEPROM_TYPE*/
- {1, oid_null_function}, /*0x1B OID_RT_PRO_WRITE_MAC_ADDRESS*/
- {1, oid_null_function}, /*0x1C OID_RT_PRO_READ_MAC_ADDRESS*/
- {1, oid_null_function}, /*0x1D OID_RT_PRO_WRITE_CIS_DATA*/
- {1, oid_null_function}, /*0x1E OID_RT_PRO_READ_CIS_DATA*/
- {1, oid_null_function} /*0x1F OID_RT_PRO_WRITE_POWER_CONTROL*/
+ /* 0x00 OID_RT_PRO_RESET_DUT */
+ {1, oid_null_function},
+ /* 0x01 */
+ {1, oid_rt_pro_set_data_rate_hdl},
+ /* 0x02 */
+ {1, oid_rt_pro_start_test_hdl},
+ /* 0x03 */
+ {1, oid_rt_pro_stop_test_hdl},
+ /* 0x04 OID_RT_PRO_SET_PREAMBLE */
+ {1, oid_null_function},
+ /* 0x05 OID_RT_PRO_SET_SCRAMBLER */
+ {1, oid_null_function},
+ /* 0x06 OID_RT_PRO_SET_FILTER_BB */
+ {1, oid_null_function},
+ /* 0x07 OID_RT_PRO_SET_MANUAL_DIVERS_BB */
+ {1, oid_null_function},
+ /* 0x08 */
+ {1, oid_rt_pro_set_channel_direct_call_hdl},
+ /* 0x09 OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL */
+ {1, oid_null_function},
+ /* 0x0A OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL */
+ {1, oid_null_function},
+ /* 0x0B OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL */
+ {1, oid_rt_pro_set_continuous_tx_hdl},
+ /* 0x0C OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS */
+ {1, oid_rt_pro_set_single_carrier_tx_hdl},
+ /* 0x0D OID_RT_PRO_SET_TX_ANTENNA_BB */
+ {1, oid_null_function},
+ /* 0x0E */
+ {1, oid_rt_pro_set_antenna_bb_hdl},
+ /* 0x0F OID_RT_PRO_SET_CR_SCRAMBLER */
+ {1, oid_null_function},
+ /* 0x10 OID_RT_PRO_SET_CR_NEW_FILTER */
+ {1, oid_null_function},
+ /* 0x11 OID_RT_PRO_SET_TX_POWER_CONTROL */
+ {1, oid_rt_pro_set_tx_power_control_hdl},
+ /* 0x12 OID_RT_PRO_SET_CR_TX_CONFIG */
+ {1, oid_null_function},
+ /* 0x13 OID_RT_PRO_GET_TX_POWER_CONTROL */
+ {1, oid_null_function},
+ /* 0x14 OID_RT_PRO_GET_CR_SIGNAL_QUALITY */
+ {1, oid_null_function},
+ /* 0x15 OID_RT_PRO_SET_CR_SETPOINT */
+ {1, oid_null_function},
+ /* 0x16 OID_RT_PRO_SET_INTEGRATOR */
+ {1, oid_null_function},
+ /* 0x17 OID_RT_PRO_SET_SIGNAL_QUALITY */
+ {1, oid_null_function},
+ /* 0x18 OID_RT_PRO_GET_INTEGRATOR */
+ {1, oid_null_function},
+ /* 0x19 OID_RT_PRO_GET_SIGNAL_QUALITY */
+ {1, oid_null_function},
+ /* 0x1A OID_RT_PRO_QUERY_EEPROM_TYPE */
+ {1, oid_null_function},
+ /* 0x1B OID_RT_PRO_WRITE_MAC_ADDRESS */
+ {1, oid_null_function},
+ /* 0x1C OID_RT_PRO_READ_MAC_ADDRESS */
+ {1, oid_null_function},
+ /* 0x1D OID_RT_PRO_WRITE_CIS_DATA */
+ {1, oid_null_function},
+ /* 0x1E OID_RT_PRO_READ_CIS_DATA */
+ {1, oid_null_function},
+ /* 0x1F OID_RT_PRO_WRITE_POWER_CONTROL */
+ {1, oid_null_function}
};
static const struct oid_obj_priv oid_rtl_seg_81_80_20[] = {
- {1, oid_null_function}, /*0x20 OID_RT_PRO_READ_POWER_CONTROL*/
- {1, oid_null_function}, /*0x21 OID_RT_PRO_WRITE_EEPROM*/
- {1, oid_null_function}, /*0x22 OID_RT_PRO_READ_EEPROM*/
- {1, oid_rt_pro_reset_tx_packet_sent_hdl}, /*0x23*/
- {1, oid_rt_pro_query_tx_packet_sent_hdl}, /*0x24*/
- {1, oid_rt_pro_reset_rx_packet_received_hdl}, /*0x25*/
- {1, oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/
- {1, oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/
- {1, oid_null_function}, /*0x28
- *OID_RT_PRO_QUERY_CURRENT_ADDRESS
- */
- {1, oid_null_function}, /*0x29
- *OID_RT_PRO_QUERY_PERMANENT_ADDRESS
- */
- {1, oid_null_function}, /*0x2A
- *OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS
- */
- {1, oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B
- *OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX
- */
- {1, oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/
- {1, oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/
- {1, oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/
- {1, oid_rt_pro_set_modulation_hdl} /*0x2F*/
+ /* 0x20 OID_RT_PRO_READ_POWER_CONTROL */
+ {1, oid_null_function},
+ /* 0x21 OID_RT_PRO_WRITE_EEPROM */
+ {1, oid_null_function},
+ /* 0x22 OID_RT_PRO_READ_EEPROM */
+ {1, oid_null_function},
+ /* 0x23 */
+ {1, oid_rt_pro_reset_tx_packet_sent_hdl},
+ /* 0x24 */
+ {1, oid_rt_pro_query_tx_packet_sent_hdl},
+ /* 0x25 */
+ {1, oid_rt_pro_reset_rx_packet_received_hdl},
+ /* 0x26 */
+ {1, oid_rt_pro_query_rx_packet_received_hdl},
+ /* 0x27 */
+ {1, oid_rt_pro_query_rx_packet_crc32_error_hdl},
+ /* 0x28 OID_RT_PRO_QUERY_CURRENT_ADDRESS */
+ {1, oid_null_function},
+ /* 0x29 OID_RT_PRO_QUERY_PERMANENT_ADDRESS */
+ {1, oid_null_function},
+ /* 0x2A OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS */
+ {1, oid_null_function},
+ /* 0x2B OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX */
+ {1, oid_rt_pro_set_carrier_suppression_tx_hdl},
+ /* 0x2C OID_RT_PRO_RECEIVE_PACKET */
+ {1, oid_null_function},
+ /* 0x2D OID_RT_PRO_WRITE_EEPROM_BYTE */
+ {1, oid_null_function},
+ /* 0x2E OID_RT_PRO_READ_EEPROM_BYTE */
+ {1, oid_null_function},
+ /* 0x2F */
+ {1, oid_rt_pro_set_modulation_hdl}
};
static const struct oid_obj_priv oid_rtl_seg_81_80_40[] = {
- {1, oid_null_function}, /*0x40*/
- {1, oid_null_function}, /*0x41*/
- {1, oid_null_function}, /*0x42*/
- {1, oid_rt_pro_set_single_tone_tx_hdl}, /*0x43*/
- {1, oid_null_function}, /*0x44*/
- {1, oid_null_function} /*0x45*/
+ {1, oid_null_function}, /* 0x40 */
+ {1, oid_null_function}, /* 0x41 */
+ {1, oid_null_function}, /* 0x42 */
+ {1, oid_rt_pro_set_single_tone_tx_hdl}, /* 0x43 */
+ {1, oid_null_function}, /* 0x44 */
+ {1, oid_null_function} /* 0x45 */
};
static const struct oid_obj_priv oid_rtl_seg_81_80_80[] = {
- {1, oid_null_function}, /*0x80 OID_RT_DRIVER_OPTION*/
- {1, oid_null_function}, /*0x81 OID_RT_RF_OFF*/
- {1, oid_null_function} /*0x82 OID_RT_AUTH_STATUS*/
+ {1, oid_null_function}, /* 0x80 OID_RT_DRIVER_OPTION */
+ {1, oid_null_function}, /* 0x81 OID_RT_RF_OFF */
+ {1, oid_null_function} /* 0x82 OID_RT_AUTH_STATUS */
};
static const struct oid_obj_priv oid_rtl_seg_81_85[] = {
- {1, oid_rt_wireless_mode_hdl} /*0x00 OID_RT_WIRELESS_MODE*/
+ /* 0x00 OID_RT_WIRELESS_MODE */
+ {1, oid_rt_wireless_mode_hdl}
};
#else /* _RTL871X_MP_IOCTL_C_ */
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.h b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
index c82fdf85d474..bd2c3a2df48b 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.h
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.h
@@ -48,11 +48,11 @@ enum Power_Mgnt {
};
/*
- BIT[2:0] = HW state
- BIT[3] = Protocol PS state, 0: register active state,
- 1: register sleep state
- BIT[4] = sub-state
-*/
+ * BIT[2:0] = HW state
+ * BIT[3] = Protocol PS state, 0: register active state,
+ * 1: register sleep state
+ * BIT[4] = sub-state
+ */
#define PS_DPS BIT(0)
#define PS_LCLK (PS_DPS)
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index f419943ad75d..9de06c5fe620 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -74,12 +74,12 @@ struct rx_pkt_attrib {
};
/*
-accesser of recv_priv: recv_entry(dispatch / passive level);
-recv_thread(passive) ; returnpkt(dispatch)
-; halt(passive) ;
-
-using enter_critical section to protect
-*/
+ * accesser of recv_priv: recv_entry(dispatch / passive level);
+ * recv_thread(passive) ; returnpkt(dispatch)
+ * ; halt(passive) ;
+ *
+ * using enter_critical section to protect
+ */
struct recv_priv {
spinlock_t lock;
struct __queue free_recv_queue;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index de88819faf05..eda2aee02ff8 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -213,8 +213,9 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) {
del_timer_sync(&pmlmepriv->dhcp_timer);
- r8712_set_ps_mode(padapter, padapter->registrypriv.
- power_mgnt, padapter->registrypriv.smart_ps);
+ r8712_set_ps_mode(padapter,
+ padapter->registrypriv.power_mgnt,
+ padapter->registrypriv.smart_ps);
}
}
}
@@ -416,15 +417,13 @@ static sint xmitframe_addmic(struct _adapter *padapter,
&pframe[10], 6);
}
if (pqospriv->qos_option == 1)
- priority[0] = (u8)pxmitframe->
- attrib.priority;
+ priority[0] = (u8)pxmitframe->attrib.priority;
r8712_secmicappend(&micdata, &priority[0], 4);
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
payload = (u8 *)RND4((addr_t)(payload));
- payload = payload + pattrib->
- hdrlen + pattrib->iv_len;
+ payload += pattrib->hdrlen + pattrib->iv_len;
if ((curfragnum + 1) == pattrib->nr_frags) {
length = pattrib->last_txcmdsz -
pattrib->hdrlen -
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 74dfc9b0e494..556367bfbe8a 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -370,7 +370,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/*-----------------------------------------------------------------------------
- Below is for the security related definition
+ * Below is for the security related definition
*-----------------------------------------------------------------------------
*/
#define _RESERVED_FRAME_TYPE_ 0
@@ -415,7 +415,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/* ---------------------------------------------------------------------------
- Below is the fixed elements...
+ * Below is the fixed elements...
* ---------------------------------------------------------------------------
*/
#define _AUTH_ALGM_NUM_ 2
@@ -444,14 +444,14 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
#define cap_ShortPremble BIT(5)
/*-----------------------------------------------------------------------------
- Below is the definition for 802.11i / 802.1x
+ * Below is the definition for 802.11i / 802.1x
*------------------------------------------------------------------------------
*/
#define _IEEE8021X_MGT_ 1 /*WPA */
#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
/*-----------------------------------------------------------------------------
- Below is the definition for WMM
+ * Below is the definition for WMM
*------------------------------------------------------------------------------
*/
#define _WMM_IE_Length_ 7 /* for WMM STA */
@@ -459,7 +459,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/*-----------------------------------------------------------------------------
- Below is the definition for 802.11n
+ * Below is the definition for 802.11n
*------------------------------------------------------------------------------
*/
@@ -498,7 +498,7 @@ struct ieee80211_bar {
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
- /**
+/*
* struct ieee80211_ht_cap - HT capabilities
*
* This structure refers to "HT capabilities element" as
diff --git a/drivers/staging/rtl8712/wlan_bssdef.h b/drivers/staging/rtl8712/wlan_bssdef.h
index c0654ae4d70d..9dc9ce5a2ccc 100644
--- a/drivers/staging/rtl8712/wlan_bssdef.h
+++ b/drivers/staging/rtl8712/wlan_bssdef.h
@@ -53,9 +53,9 @@ struct NDIS_802_11_CONFIGURATION_FH {
};
/*
- FW will only save the channel number in DSConfig.
- ODI Handler will convert the channel number to freq. number.
-*/
+ * FW will only save the channel number in DSConfig.
+ * ODI Handler will convert the channel number to freq. number.
+ */
struct NDIS_802_11_CONFIGURATION {
u32 Length; /* Length of structure */
u32 BeaconPeriod; /* units are Kusec */
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
new file mode 100644
index 000000000000..deae0427ba6c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -0,0 +1,11 @@
+config RTL8723BS
+ tristate "Realtek RTL8723BS SDIO Wireless LAN NIC driver"
+ depends on WLAN && MMC && CFG80211
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ ---help---
+ This option enables support for RTL8723BS SDIO drivers, such as
+ the wifi found on the 1st gen Intel Compute Stick, the CHIP
+ and many other Intel Atom and ARM based devices.
+ If built as a module, it will be called r8723bs.
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
new file mode 100644
index 000000000000..4e7b460a9c73
--- /dev/null
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -0,0 +1,70 @@
+r8723bs-y = \
+ core/rtw_ap.o \
+ core/rtw_btcoex.o \
+ core/rtw_cmd.o \
+ core/rtw_debug.o \
+ core/rtw_efuse.o \
+ core/rtw_io.o \
+ core/rtw_ioctl_set.o \
+ core/rtw_ieee80211.o \
+ core/rtw_mlme.o \
+ core/rtw_mlme_ext.o \
+ core/rtw_odm.o \
+ core/rtw_pwrctrl.o \
+ core/rtw_recv.o \
+ core/rtw_rf.o \
+ core/rtw_security.o \
+ core/rtw_sta_mgt.o \
+ core/rtw_wlan_util.o \
+ core/rtw_xmit.o \
+ hal/hal_intf.o \
+ hal/hal_com.o \
+ hal/hal_com_phycfg.o \
+ hal/hal_btcoex.o \
+ hal/hal_sdio.o \
+ hal/Hal8723BPwrSeq.o \
+ hal/HalPhyRf.o \
+ hal/HalPwrSeqCmd.o \
+ hal/odm.o \
+ hal/odm_CfoTracking.o \
+ hal/odm_debug.o \
+ hal/odm_DIG.o \
+ hal/odm_DynamicBBPowerSaving.o \
+ hal/odm_DynamicTxPower.o \
+ hal/odm_EdcaTurboCheck.o \
+ hal/odm_HWConfig.o \
+ hal/odm_NoiseMonitor.o \
+ hal/odm_PathDiv.o \
+ hal/odm_RegConfig8723B.o \
+ hal/odm_RTL8723B.o \
+ hal/rtl8723b_cmd.o \
+ hal/rtl8723b_dm.o \
+ hal/rtl8723b_hal_init.o \
+ hal/rtl8723b_phycfg.o \
+ hal/rtl8723b_rf6052.o \
+ hal/rtl8723b_rxdesc.o \
+ hal/rtl8723bs_recv.o \
+ hal/rtl8723bs_xmit.o \
+ hal/sdio_halinit.o \
+ hal/sdio_ops.o \
+ hal/HalBtc8723b1Ant.o \
+ hal/HalBtc8723b2Ant.o \
+ hal/HalHWImg8723B_BB.o \
+ hal/HalHWImg8723B_MAC.o \
+ hal/HalHWImg8723B_RF.o \
+ hal/HalPhyRf_8723B.o \
+ os_dep/ioctl_cfg80211.o \
+ os_dep/ioctl_linux.o \
+ os_dep/mlme_linux.o \
+ os_dep/osdep_service.o \
+ os_dep/os_intfs.o \
+ os_dep/recv_linux.o \
+ os_dep/rtw_proc.o \
+ os_dep/sdio_intf.o \
+ os_dep/sdio_ops_linux.o \
+ os_dep/wifi_regd.o \
+ os_dep/xmit_linux.o
+
+obj-$(CONFIG_RTL8723BS) := r8723bs.o
+
+ccflags-y += -I$(srctree)/$(src)/include -I$(srctree)/$(src)/hal
diff --git a/drivers/staging/rtl8723bs/TODO b/drivers/staging/rtl8723bs/TODO
new file mode 100644
index 000000000000..80dbdaca3a8f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/TODO
@@ -0,0 +1,16 @@
+TODO:
+- find and remove code blocks guarded by never set CONFIG_FOO defines
+- find and remove remaining code valid only for 5 HGz. Most of the obvious
+ ones have been removed, but things like channel > 14 still exist.
+- find and remove any code for other chips that is left over
+- convert any remaining unusual variable types
+- find codes that can use %pM and %Nph formatting
+- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
+ of them will require refactoring
+- merge Realtek's bugfixes and new features into the driver
+- switch to use LIB80211
+- switch to use MAC80211
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Bastien Nocera <hadess@hadess.net>, Hans de Goede <hdegoede@redhat.com>
+and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
new file mode 100644
index 000000000000..d3007c1c45e3
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -0,0 +1,2678 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_AP_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WMM_OUI[];
+extern unsigned char WPS_OUI[];
+extern unsigned char P2P_OUI[];
+extern unsigned char WFD_OUI[];
+
+void init_mlme_ap_info(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+
+ spin_lock_init(&pmlmepriv->bcn_update_lock);
+
+ /* for ACL */
+ _rtw_init_queue(&pacl_list->acl_node_q);
+
+ /* pmlmeext->bstart_bss = false; */
+
+ start_ap_mode(padapter);
+}
+
+void free_mlme_ap_info(struct adapter *padapter)
+{
+ struct sta_info *psta = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* stop_ap_mode(padapter); */
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ rtw_sta_flush(padapter);
+
+ pmlmeinfo->state = _HW_STATE_NOLINK_;
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ /* free bc/mc sta_info */
+ psta = rtw_get_bcmc_stainfo(padapter);
+ rtw_free_stainfo(padapter, psta);
+}
+
+static void update_BCNTIM(struct adapter *padapter)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ unsigned char *pie = pnetwork_mlmeext->IEs;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ /* update TIM IE */
+ /* if (pstapriv->tim_bitmap) */
+ if (true) {
+
+ u8 *p, *dst_ie, *premainder_ie = NULL, *pbackup_remainder_ie = NULL;
+ __le16 tim_bitmap_le;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
+
+ tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
+
+ p = rtw_get_ie(
+ pie + _FIXED_IE_LENGTH_,
+ _TIM_IE_,
+ &tim_ielen,
+ pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_
+ );
+ if (p != NULL && tim_ielen > 0) {
+
+ tim_ielen += 2;
+
+ premainder_ie = p+tim_ielen;
+
+ tim_ie_offset = (sint)(p - pie);
+
+ remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
+
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else{
+
+
+ tim_ielen = 0;
+
+ /* calucate head_len */
+ offset = _FIXED_IE_LENGTH_;
+
+ /* get ssid_ie len */
+ p = rtw_get_ie(
+ pie + _BEACON_IE_OFFSET_,
+ _SSID_IE_,
+ &tmp_len,
+ (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p != NULL)
+ offset += tmp_len+2;
+
+ /* get supported rates len */
+ p = rtw_get_ie(
+ pie + _BEACON_IE_OFFSET_,
+ _SUPPORTEDRATES_IE_, &tmp_len,
+ (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p != NULL)
+ offset += tmp_len+2;
+
+
+ /* DS Parameter Set IE, len =3 */
+ offset += 3;
+
+ premainder_ie = pie + offset;
+
+ remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
+
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+
+ }
+
+
+ if (remainder_ielen > 0) {
+
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ *dst_ie++ = _TIM_IE_;
+
+ if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fe))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
+
+ *dst_ie++ = tim_ielen;
+
+ *dst_ie++ = 0;/* DTIM count */
+ *dst_ie++ = 1;/* DTIM peroid */
+
+ if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
+
+ if (tim_ielen == 4) {
+
+ __le16 pvb;
+
+ if (pstapriv->tim_bitmap&0xff00)
+ pvb = cpu_to_le16(pstapriv->tim_bitmap >> 8);
+ else
+ pvb = tim_bitmap_le;
+
+ *dst_ie++ = le16_to_cpu(pvb);
+
+ } else if (tim_ielen == 5) {
+
+
+ memcpy(dst_ie, &tim_bitmap_le, 2);
+ dst_ie += 2;
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
+
+ }
+}
+
+u8 chk_sta_is_alive(struct sta_info *psta);
+u8 chk_sta_is_alive(struct sta_info *psta)
+{
+ #ifdef DBG_EXPIRATION_CHK
+ DBG_871X(
+ "sta:"MAC_FMT", rssi:%d, rx:"STA_PKTS_FMT", expire_to:%u, %s%ssq_len:%u\n"
+ , MAC_ARG(psta->hwaddr)
+ , psta->rssi_stat.UndecoratedSmoothedPWDB
+ /* STA_RX_PKTS_ARG(psta) */
+ , STA_RX_PKTS_DIFF_ARG(psta)
+ , psta->expire_to
+ , psta->state&WIFI_SLEEP_STATE?"PS, ":""
+ , psta->state&WIFI_STA_ALIVE_CHK_STATE?"SAC, ":""
+ , psta->sleepq_len
+ );
+ #endif
+
+ sta_update_last_rx_pkts(psta);
+
+ return true;
+}
+
+void expire_timeout_chk(struct adapter *padapter)
+{
+ struct list_head *phead, *plist;
+ u8 updated = false;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+
+ phead = &pstapriv->auth_list;
+ plist = get_next(phead);
+
+ /* check auth_queue */
+ #ifdef DBG_EXPIRATION_CHK
+ if (phead != plist) {
+ DBG_871X(FUNC_NDEV_FMT" auth_list, cnt:%u\n"
+ , FUNC_NDEV_ARG(padapter->pnetdev), pstapriv->auth_list_cnt);
+ }
+ #endif
+ while (phead != plist) {
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, auth_list);
+
+ plist = get_next(plist);
+
+ if (psta->expire_to > 0) {
+
+ psta->expire_to--;
+ if (psta->expire_to == 0) {
+
+ list_del_init(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+
+ DBG_871X(
+ "auth expire %02X%02X%02X%02X%02X%02X\n",
+ psta->hwaddr[0],
+ psta->hwaddr[1],
+ psta->hwaddr[2],
+ psta->hwaddr[3],
+ psta->hwaddr[4],
+ psta->hwaddr[5]
+ );
+
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ rtw_free_stainfo(padapter, psta);
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ }
+ }
+
+ }
+
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+ psta = NULL;
+
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ #ifdef DBG_EXPIRATION_CHK
+ if (phead != plist) {
+ DBG_871X(FUNC_NDEV_FMT" asoc_list, cnt:%u\n"
+ , FUNC_NDEV_ARG(padapter->pnetdev), pstapriv->asoc_list_cnt);
+ }
+ #endif
+ while (phead != plist) {
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+#ifdef CONFIG_AUTO_AP_MODE
+ if (psta->isrc)
+ continue;
+#endif
+ if (chk_sta_is_alive(psta) || !psta->expire_to) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ psta->under_exist_checking = 0;
+ } else {
+ if (psta->expire_to > 0)
+ psta->expire_to--;
+ }
+
+ if (psta->expire_to == 0) {
+
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (padapter->registrypriv.wifi_spec == 1) {
+
+ psta->expire_to = pstapriv->expire_to;
+ continue;
+ }
+
+ if (psta->state & WIFI_SLEEP_STATE) {
+ if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
+ /* to check if alive by another methods if staion is at ps mode. */
+ psta->expire_to = pstapriv->expire_to;
+ psta->state |= WIFI_STA_ALIVE_CHK_STATE;
+
+ /* DBG_871X("alive chk, sta:" MAC_FMT " is at ps mode!\n", MAC_ARG(psta->hwaddr)); */
+
+ /* to update bcn with tim_bitmap for this station */
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+
+ if (!pmlmeext->active_keep_alive_check)
+ continue;
+ }
+ }
+ if (pmlmeext->active_keep_alive_check) {
+ int stainfo_offset;
+
+ stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset))
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+
+
+ continue;
+ }
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ DBG_871X(
+ "asoc expire "MAC_FMT", state = 0x%x\n",
+ MAC_ARG(psta->hwaddr),
+ psta->state
+ );
+ updated = ap_free_sta(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
+ } else{
+
+
+ /* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
+ if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt)
+ && padapter->xmitpriv.free_xmitframe_cnt < ((
+ NR_XMITFRAME/pstapriv->asoc_list_cnt
+ )/2)
+ ) {
+ DBG_871X(
+ "%s sta:"MAC_FMT", sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n",
+ __func__,
+ MAC_ARG(psta->hwaddr),
+ psta->sleepq_len,
+ padapter->xmitpriv.free_xmitframe_cnt,
+ pstapriv->asoc_list_cnt
+ );
+ wakeup_sta_to_xmit(padapter, psta);
+ }
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ if (chk_alive_num) {
+ u8 backup_oper_channel = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch(padapter);
+ SelectChannel(padapter, pmlmeext->cur_channel);
+ }
+
+ /* issue null data to check sta alive*/
+ for (i = 0; i < chk_alive_num; i++) {
+ int ret = _FAIL;
+
+ psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+ if (!(psta->state & _FW_LINKED))
+ continue;
+
+ if (psta->state & WIFI_SLEEP_STATE)
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
+ else
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
+
+ psta->keep_alive_trycnt++;
+ if (ret == _SUCCESS) {
+ DBG_871X(
+ "asoc check, sta(" MAC_FMT ") is alive\n",
+ MAC_ARG(psta->hwaddr)
+ );
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ continue;
+ } else if (psta->keep_alive_trycnt <= 3) {
+
+ DBG_871X(
+ "ack check for asoc expire, keep_alive_trycnt =%d\n",
+ psta->keep_alive_trycnt);
+ psta->expire_to = 1;
+ continue;
+ }
+
+ psta->keep_alive_trycnt = 0;
+ DBG_871X(
+ "asoc expire "MAC_FMT", state = 0x%x\n",
+ MAC_ARG(psta->hwaddr),
+ psta->state);
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&psta->asoc_list) == false) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ }
+
+ if (backup_oper_channel > 0) /* back to the original operation channel */
+ SelectChannel(padapter, backup_oper_channel);
+ }
+
+ associated_clients_update(padapter, updated);
+}
+
+void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
+{
+ unsigned char sta_band = 0, shortGIrate = false;
+ unsigned int tx_ra_bitmap = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex
+ *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+
+ if (!psta)
+ return;
+
+ if (!(psta->state & _FW_LINKED))
+ return;
+
+ rtw_hal_update_sta_rate_mask(padapter, psta);
+ tx_ra_bitmap = psta->ra_mask;
+
+ shortGIrate = query_ra_short_GI(psta);
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_5N;
+
+ if (tx_ra_bitmap & 0xff0)
+ sta_band |= WIRELESS_11A;
+ } else {
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_24N;
+
+ if (tx_ra_bitmap & 0xff0)
+ sta_band |= WIRELESS_11G;
+
+ if (tx_ra_bitmap & 0x0f)
+ sta_band |= WIRELESS_11B;
+ }
+
+ psta->wireless_mode = sta_band;
+ psta->raid = rtw_hal_networktype_to_raid(padapter, psta);
+
+ if (psta->aid < NUM_STA) {
+
+ u8 arg[4] = {0};
+
+ arg[0] = psta->mac_id;
+ arg[1] = psta->raid;
+ arg[2] = shortGIrate;
+ arg[3] = psta->init_rate;
+
+ DBG_871X("%s => mac_id:%d , raid:%d , shortGIrate =%d, bitmap = 0x%x\n",
+ __func__, psta->mac_id, psta->raid, shortGIrate, tx_ra_bitmap);
+
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, rssi_level);
+ } else{
+
+
+ DBG_871X("station aid %d exceed the max number\n", psta->aid);
+ }
+
+}
+
+void update_bmc_sta(struct adapter *padapter)
+{
+ unsigned char network_type;
+ int supportRateNum = 0;
+ unsigned int tx_ra_bitmap = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex
+ *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
+
+ if (psta) {
+
+ psta->aid = 0;/* default set to 0 */
+ /* psta->mac_id = psta->aid+4; */
+ psta->mac_id = psta->aid + 1;/* mac_id = 1 for bc/mc stainfo */
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ psta->qos_option = 0;
+ psta->htpriv.ht_option = false;
+
+ psta->ieee8021x_blocked = 0;
+
+ memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ /* psta->dot118021XPrivacy = _NO_PRIVACY_;//!!! remove it, because it has been set before this. */
+
+ /* prepare for add_RATid */
+ supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
+ network_type = rtw_check_network_type(
+ (u8 *)&pcur_network->SupportedRates,
+ supportRateNum,
+ pcur_network->Configuration.DSConfig
+ );
+ if (IsSupportedTxCCK(network_type)) {
+ network_type = WIRELESS_11B;
+ } else if (network_type == WIRELESS_INVALID) { /* error handling */
+
+ if (pcur_network->Configuration.DSConfig > 14)
+ network_type = WIRELESS_11A;
+ else
+ network_type = WIRELESS_11B;
+ }
+ update_sta_basic_rate(psta, network_type);
+ psta->wireless_mode = network_type;
+
+ rtw_hal_update_sta_rate_mask(padapter, psta);
+ tx_ra_bitmap = psta->ra_mask;
+
+ psta->raid = rtw_hal_networktype_to_raid(padapter, psta);
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ /* if (pHalData->fw_ractrl == true) */
+ {
+ u8 arg[4] = {0};
+
+ arg[0] = psta->mac_id;
+ arg[1] = psta->raid;
+ arg[2] = 0;
+ arg[3] = psta->init_rate;
+
+ DBG_871X("%s => mac_id:%d , raid:%d , bitmap = 0x%x\n",
+ __func__, psta->mac_id, psta->raid, tx_ra_bitmap);
+
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, 0);
+ }
+
+ rtw_sta_media_status_rpt(padapter, psta, 1);
+
+ spin_lock_bh(&psta->lock);
+ psta->state = _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ } else{
+
+
+ DBG_871X("add_RATid_bmc_sta error!\n");
+ }
+
+}
+
+/* notes: */
+/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
+/* MAC_ID = AID+1 for sta in ap/adhoc mode */
+/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
+/* MAC_ID = 0 for bssid for sta/ap/adhoc */
+/* CAM_ID = 0~3 for default key, cmd_id =macid + 3, macid =aid+1; */
+
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+ u8 cur_ldpc_cap = 0, cur_stbc_cap = 0, cur_beamform_cap = 0;
+ /* set intf_tag to if1 */
+ /* psta->intf_tag = 0; */
+
+ DBG_871X("%s\n", __func__);
+
+ /* psta->mac_id = psta->aid+4; */
+ /* psta->mac_id = psta->aid+1;//alloc macid when call rtw_alloc_stainfo(), */
+ /* release macid when call rtw_free_stainfo() */
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->ieee8021x_blocked = true;
+ else
+ psta->ieee8021x_blocked = false;
+
+
+ /* update sta's cap */
+
+ /* ERP */
+ VCS_update(padapter, psta);
+
+ /* HT related cap */
+ if (phtpriv_sta->ht_option) {
+
+ /* check if sta supports rx ampdu */
+ phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
+
+ phtpriv_sta->rx_ampdu_min_spacing = (
+ phtpriv_sta->ht_cap.ampdu_params_info&IEEE80211_HT_CAP_AMPDU_DENSITY
+ )>>2;
+
+ /* bwmode */
+ if ((
+ phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
+ ) & cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH))
+ psta->bw_mode = CHANNEL_WIDTH_40;
+ else
+ psta->bw_mode = CHANNEL_WIDTH_20;
+
+ if (pmlmeext->cur_bwmode < psta->bw_mode)
+ psta->bw_mode = pmlmeext->cur_bwmode;
+
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+
+
+ /* check if sta support s Short GI 20M */
+ if ((
+ phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
+ ) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20))
+ phtpriv_sta->sgi_20m = true;
+
+ /* check if sta support s Short GI 40M */
+ if ((
+ phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info
+ ) & cpu_to_le16(IEEE80211_HT_CAP_SGI_40)) {
+
+ if (psta->bw_mode == CHANNEL_WIDTH_40) /* according to psta->bw_mode */
+ phtpriv_sta->sgi_40m = true;
+ else
+ phtpriv_sta->sgi_40m = false;
+ }
+
+ psta->qos_option = true;
+
+ /* B0 Config LDPC Coding Capability */
+ if (TEST_FLAG(phtpriv_ap->ldpc_cap, LDPC_HT_ENABLE_TX) &&
+ GET_HT_CAPABILITY_ELE_LDPC_CAP((u8 *)(&phtpriv_sta->ht_cap))) {
+
+ SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
+ DBG_871X("Enable HT Tx LDPC for STA(%d)\n", psta->aid);
+ }
+
+ /* B7 B8 B9 Config STBC setting */
+ if (TEST_FLAG(phtpriv_ap->stbc_cap, STBC_HT_ENABLE_TX) &&
+ GET_HT_CAPABILITY_ELE_RX_STBC((u8 *)(&phtpriv_sta->ht_cap))) {
+
+ SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
+ DBG_871X("Enable HT Tx STBC for STA(%d)\n", psta->aid);
+ }
+ } else{
+
+
+ phtpriv_sta->ampdu_enable = false;
+
+ phtpriv_sta->sgi_20m = false;
+ phtpriv_sta->sgi_40m = false;
+ psta->bw_mode = CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ phtpriv_sta->ldpc_cap = cur_ldpc_cap;
+ phtpriv_sta->stbc_cap = cur_stbc_cap;
+ phtpriv_sta->beamform_cap = cur_beamform_cap;
+
+ /* Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* originator */
+ phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
+ phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
+
+ update_ldpc_stbc_cap(psta);
+
+ /* todo: init other variables */
+
+ memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+
+ /* add ratid */
+ /* add_RATid(padapter, psta);//move to ap_sta_info_defer_update() */
+
+
+ spin_lock_bh(&psta->lock);
+ psta->state |= _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+
+}
+
+static void update_ap_info(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex
+ *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+
+ psta->bssratelen = rtw_get_rateset_len(pnetwork->SupportedRates);
+ memcpy(psta->bssrateset, pnetwork->SupportedRates, psta->bssratelen);
+
+ /* HT related cap */
+ if (phtpriv_ap->ht_option) {
+
+ /* check if sta supports rx ampdu */
+ /* phtpriv_ap->ampdu_enable = phtpriv_ap->ampdu_enable; */
+
+ /* check if sta support s Short GI 20M */
+ if ((phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_20))
+ phtpriv_ap->sgi_20m = true;
+
+ /* check if sta support s Short GI 40M */
+ if ((phtpriv_ap->ht_cap.cap_info) & cpu_to_le16(IEEE80211_HT_CAP_SGI_40))
+ phtpriv_ap->sgi_40m = true;
+
+
+ psta->qos_option = true;
+ } else{
+
+
+ phtpriv_ap->ampdu_enable = false;
+
+ phtpriv_ap->sgi_20m = false;
+ phtpriv_ap->sgi_40m = false;
+ }
+
+ psta->bw_mode = pmlmeext->cur_bwmode;
+ phtpriv_ap->ch_offset = pmlmeext->cur_ch_offset;
+
+ phtpriv_ap->agg_enable_bitmap = 0x0;/* reset */
+ phtpriv_ap->candidate_tid_bitmap = 0x0;/* reset */
+
+ memcpy(&psta->htpriv, &pmlmepriv->htpriv, sizeof(struct ht_priv));
+}
+
+static void update_hw_ht_param(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_871X("%s\n", __func__);
+
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c
+ ) >> 2;
+
+ rtw_hal_set_hwreg(
+ padapter,
+ HW_VAR_AMPDU_MIN_SPACE,
+ (u8 *)(&min_MPDU_spacing)
+ );
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+
+ /* */
+ /* Config SM Power Save setting */
+ /* */
+ pmlmeinfo->SM_PS = (le16_to_cpu(
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info
+ ) & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_871X("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+
+ /* */
+ /* Config current HT Protection mode. */
+ /* */
+ /* pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3; */
+
+}
+
+void start_bss_network(struct adapter *padapter, u8 *pbuf)
+{
+ u8 *p;
+ u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
+ u16 bcn_interval;
+ u32 acparm;
+ int ie_len;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct wlan_bssid_ex
+ *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct HT_info_element *pht_info = NULL;
+ u8 cbw40_enable = 0;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ bcn_interval = (u16)pnetwork->Configuration.BeaconPeriod;
+ cur_channel = pnetwork->Configuration.DSConfig;
+ cur_bwmode = CHANNEL_WIDTH_20;
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+
+ /* check if there is wps ie, */
+ /* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
+ /* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
+ if (NULL == rtw_get_wps_ie(
+ pnetwork->IEs+_FIXED_IE_LENGTH_,
+ pnetwork->IELength-_FIXED_IE_LENGTH_,
+ NULL,
+ NULL
+ ))
+ pmlmeext->bstart_bss = true;
+
+
+ /* todo: update wmm, ht cap */
+ /* pmlmeinfo->WMM_enable; */
+ /* pmlmeinfo->HT_enable; */
+ if (pmlmepriv->qospriv.qos_option)
+ pmlmeinfo->WMM_enable = true;
+ if (pmlmepriv->htpriv.ht_option) {
+
+ pmlmeinfo->WMM_enable = true;
+ pmlmeinfo->HT_enable = true;
+ /* pmlmeinfo->HT_info_enable = true; */
+ /* pmlmeinfo->HT_caps_enable = true; */
+
+ update_hw_ht_param(padapter);
+ }
+
+ if (pmlmepriv->cur_network.join_res != true) { /* setting only at first time */
+
+ /* WEP Key will be set before this function, do not clear CAM. */
+ if (
+ (psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
+ (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
+ )
+ flush_all_cam_entry(padapter); /* clear CAM */
+ }
+
+ /* set MSR to AP_Mode */
+ Set_MSR(padapter, _HW_STATE_AP_);
+
+ /* Set BSSID REG */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pnetwork->MacAddress);
+
+ /* Set EDCA param reg */
+ acparm = 0x002F3217; /* VO */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
+ acparm = 0x005E4317; /* VI */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
+ /* acparm = 0x00105320; // BE */
+ acparm = 0x005ea42b;
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
+ acparm = 0x0000A444; /* BK */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
+
+ /* Set Security */
+ val8 = (
+ psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X
+ ) ? 0xcc : 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* Beacon Control related register */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
+
+ if (pmlmepriv->cur_network.join_res != true) { /* setting only at first time */
+
+ /* u32 initialgain; */
+
+ /* initialgain = 0x1e; */
+
+
+ /* disable dynamic functions, such as high power, DIG */
+ /* Save_DM_Func_Flag(padapter); */
+ /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false); */
+
+ /* turn on all dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ }
+
+ /* set channel, bwmode */
+ p = rtw_get_ie(
+ (pnetwork->IEs + sizeof(struct ndis_802_11_fix_ie)),
+ _HT_ADD_INFO_IE_,
+ &ie_len,
+ (pnetwork->IELength - sizeof(struct ndis_802_11_fix_ie))
+ );
+ if (p && ie_len) {
+
+ pht_info = (struct HT_info_element *)(p+2);
+
+ if (cur_channel > 14) {
+ if ((pregpriv->bw_mode & 0xf0) > 0)
+ cbw40_enable = 1;
+ } else {
+ if ((pregpriv->bw_mode & 0x0f) > 0)
+ cbw40_enable = 1;
+ }
+
+ if ((cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+
+ /* switch to the 40M Hz mode */
+ /* pmlmeext->cur_bwmode = CHANNEL_WIDTH_40; */
+ cur_bwmode = CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+
+ case 1:
+ /* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; */
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case 3:
+ /* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER; */
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ /* pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; */
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+
+ }
+
+ }
+
+ set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
+ DBG_871X(
+ "CH =%d, BW =%d, offset =%d\n",
+ cur_channel,
+ cur_bwmode,
+ cur_ch_offset
+ );
+ pmlmeext->cur_channel = cur_channel;
+ pmlmeext->cur_bwmode = cur_bwmode;
+ pmlmeext->cur_ch_offset = cur_ch_offset;
+ pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
+
+ /* let pnetwork_mlmeext == pnetwork_mlme. */
+ memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
+
+ /* update cur_wireless_mode */
+ update_wireless_mode(padapter);
+
+ /* update RRSR after set channel and bandwidth */
+ UpdateBrateTbl(padapter, pnetwork->SupportedRates);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
+
+ /* udpate capability after cur_wireless_mode updated */
+ update_capinfo(
+ padapter,
+ rtw_get_capability((struct wlan_bssid_ex *)pnetwork)
+ );
+
+
+ if (true == pmlmeext->bstart_bss) {
+
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+
+#ifndef CONFIG_INTERRUPT_BASED_TXBCN /* other case will tx beacon when bcn interrupt coming in. */
+ /* issue beacon frame */
+ if (send_beacon(padapter) == _FAIL)
+ DBG_871X("issue_beacon, fail!\n");
+
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN */
+
+ }
+
+
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+
+ /* pmlmeext->bstart_bss = true; */
+
+}
+
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
+{
+ int ret = _SUCCESS;
+ u8 *p;
+ u8 *pHT_caps_ie = NULL;
+ u8 *pHT_info_ie = NULL;
+ struct sta_info *psta = NULL;
+ u16 cap, ht_cap = false;
+ uint ie_len = 0;
+ int group_cipher, pairwise_cipher;
+ u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
+ int supportRateNum = 0;
+ u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex
+ *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ u8 *ie = pbss_network->IEs;
+
+ /* SSID */
+ /* Supported rates */
+ /* DS Params */
+ /* WLAN_EID_COUNTRY */
+ /* ERP Information element */
+ /* Extended supported rates */
+ /* WPA/WPA2 */
+ /* Wi-Fi Wireless Multimedia Extensions */
+ /* ht_capab, ht_oper */
+ /* WPS IE */
+
+ DBG_871X("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return _FAIL;
+
+
+ if (len > MAX_IE_SZ)
+ return _FAIL;
+
+ pbss_network->IELength = len;
+
+ memset(ie, 0, MAX_IE_SZ);
+
+ memcpy(ie, pbuf, pbss_network->IELength);
+
+
+ if (pbss_network->InfrastructureMode != Ndis802_11APMode)
+ return _FAIL;
+
+ pbss_network->Rssi = 0;
+
+ memcpy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ /* beacon interval */
+ p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ /* pbss_network->Configuration.BeaconPeriod = le16_to_cpu(*(unsigned short*)p); */
+ pbss_network->Configuration.BeaconPeriod = RTW_GET_LE16(p);
+
+ /* capability */
+ /* cap = *(unsigned short *)rtw_get_capability_from_ie(ie); */
+ /* cap = le16_to_cpu(cap); */
+ cap = RTW_GET_LE16(ie);
+
+ /* SSID */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _SSID_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0) {
+
+ memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
+ pbss_network->Ssid.SsidLength = ie_len;
+ }
+
+ /* chnnel */
+ channel = 0;
+ pbss_network->Configuration.Length = 0;
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _DSSET_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0)
+ channel = *(p + 2);
+
+ pbss_network->Configuration.DSConfig = channel;
+
+
+ memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
+ /* get supported rates */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _SUPPORTEDRATES_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p != NULL) {
+
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+ }
+
+ /* get ext_supported rates */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _EXT_SUPPORTEDRATES_IE_,
+ &ie_len,
+ pbss_network->IELength - _BEACON_IE_OFFSET_
+ );
+ if (p != NULL) {
+
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+
+ }
+
+ network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+
+ rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
+
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _ERPINFO_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0)
+ ERP_IE_handler(padapter, (struct ndis_80211_var_ie *)p);
+
+ /* update privacy/security */
+ if (cap & BIT(4))
+ pbss_network->Privacy = 1;
+ else
+ pbss_network->Privacy = 0;
+
+ psecuritypriv->wpa_psk = 0;
+
+ /* wpa2 */
+ group_cipher = 0; pairwise_cipher = 0;
+ psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _RSN_IE_2_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0) {
+
+ if (rtw_parse_wpa2_ie(
+ p,
+ ie_len+2,
+ &group_cipher,
+ &pairwise_cipher,
+ NULL
+ ) == _SUCCESS) {
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+ psecuritypriv->wpa_psk |= BIT(1);
+
+ psecuritypriv->wpa2_group_cipher = group_cipher;
+ psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
+ }
+
+ }
+
+ /* wpa */
+ ie_len = 0;
+ group_cipher = 0; pairwise_cipher = 0;
+ psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
+ for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
+
+ p = rtw_get_ie(
+ p,
+ _SSN_IE_1_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2))
+ );
+ if ((p) && (!memcmp(p+2, OUI1, 4))) {
+
+ if (rtw_parse_wpa_ie(
+ p,
+ ie_len+2,
+ &group_cipher,
+ &pairwise_cipher,
+ NULL
+ ) == _SUCCESS) {
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+
+ psecuritypriv->wpa_psk |= BIT(0);
+
+ psecuritypriv->wpa_group_cipher = group_cipher;
+ psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
+ }
+
+ break;
+
+ }
+
+ if ((p == NULL) || (ie_len == 0))
+ break;
+
+
+ }
+
+ /* wmm */
+ ie_len = 0;
+ pmlmepriv->qospriv.qos_option = 0;
+ if (pregistrypriv->wmm_enable) {
+
+ for (p = ie + _BEACON_IE_OFFSET_; ; p += (ie_len + 2)) {
+
+ p = rtw_get_ie(
+ p,
+ _VENDOR_SPECIFIC_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2))
+ );
+ if ((p) && !memcmp(p+2, WMM_PARA_IE, 6)) {
+
+ pmlmepriv->qospriv.qos_option = 1;
+
+ *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+
+ /* disable all ACM bits since the WMM admission control is not supported */
+ *(p + 10) &= ~BIT(4); /* BE */
+ *(p + 14) &= ~BIT(4); /* BK */
+ *(p + 18) &= ~BIT(4); /* VI */
+ *(p + 22) &= ~BIT(4); /* VO */
+
+ break;
+ }
+
+ if ((p == NULL) || (ie_len == 0))
+ break;
+
+ }
+ }
+
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _HT_CAPABILITY_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0) {
+
+ u8 rf_type = 0;
+ u8 max_rx_ampdu_factor = 0;
+ struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p+2);
+
+ pHT_caps_ie = p;
+
+ ht_cap = true;
+ network_type |= WIRELESS_11_24N;
+
+ rtw_ht_use_default_setting(padapter);
+
+ if (pmlmepriv->htpriv.sgi_20m == false)
+ pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_SGI_20));
+
+ if (pmlmepriv->htpriv.sgi_40m == false)
+ pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_SGI_40));
+
+ if (!TEST_FLAG(pmlmepriv->htpriv.ldpc_cap, LDPC_HT_ENABLE_RX))
+ pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_LDPC_CODING));
+
+
+ if (!TEST_FLAG(pmlmepriv->htpriv.stbc_cap, STBC_HT_ENABLE_TX))
+ pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_TX_STBC));
+
+
+ if (!TEST_FLAG(pmlmepriv->htpriv.stbc_cap, STBC_HT_ENABLE_RX))
+ pht_cap->cap_info &= cpu_to_le16(~(IEEE80211_HT_CAP_RX_STBC_3R));
+
+
+ pht_cap->ampdu_params_info &= ~(
+ IEEE80211_HT_CAP_AMPDU_FACTOR|IEEE80211_HT_CAP_AMPDU_DENSITY
+ );
+
+ if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP)) {
+
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ } else{
+
+
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+ }
+
+ rtw_hal_get_def_var(
+ padapter,
+ HW_VAR_MAX_RX_AMPDU_FACTOR,
+ &max_rx_ampdu_factor
+ );
+ pht_cap->ampdu_params_info |= (
+ IEEE80211_HT_CAP_AMPDU_FACTOR & max_rx_ampdu_factor
+ ); /* set Max Rx AMPDU size to 64K */
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if (rf_type == RF_1T1R) {
+
+ pht_cap->supp_mcs_set[0] = 0xff;
+ pht_cap->supp_mcs_set[1] = 0x0;
+ }
+
+ memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+
+ }
+
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _HT_ADD_INFO_IE_,
+ &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && ie_len > 0)
+ pHT_info_ie = p;
+
+
+ switch (network_type) {
+
+ case WIRELESS_11B:
+ pbss_network->NetworkTypeInUse = Ndis802_11DS;
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ case WIRELESS_11A:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ break;
+ default:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ }
+
+ pmlmepriv->cur_network.network_type = network_type;
+
+ pmlmepriv->htpriv.ht_option = false;
+
+ if ((psecuritypriv->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
+ (psecuritypriv->wpa_pairwise_cipher&WPA_CIPHER_TKIP)) {
+
+ /* todo: */
+ /* ht_cap = false; */
+ }
+
+ /* ht_cap */
+ if (pregistrypriv->ht_enable && ht_cap == true) {
+
+ pmlmepriv->htpriv.ht_option = true;
+ pmlmepriv->qospriv.qos_option = 1;
+
+ if (pregistrypriv->ampdu_enable == 1)
+ pmlmepriv->htpriv.ampdu_enable = true;
+
+
+ HT_caps_handler(padapter, (struct ndis_80211_var_ie *)pHT_caps_ie);
+
+ HT_info_handler(padapter, (struct ndis_80211_var_ie *)pHT_info_ie);
+ }
+
+ pbss_network->Length = get_wlan_bssid_ex_sz(
+ (struct wlan_bssid_ex *)pbss_network
+ );
+
+ /* issue beacon to start bss network */
+ /* start_bss_network(padapter, (u8 *)pbss_network); */
+ rtw_startbss_cmd(padapter, RTW_CMDF_WAIT_ACK);
+
+
+ /* alloc sta_info for ap itself */
+ psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (!psta) {
+
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (psta == NULL)
+ return _FAIL;
+
+ }
+
+ /* update AP's sta info */
+ update_ap_info(padapter, psta);
+
+ psta->state |= WIFI_AP_STATE; /* Aries, add, fix bug of flush_cam_entry at STOP AP mode , 0724 */
+ rtw_indicate_connect(padapter);
+
+ pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
+
+ /* update bc/mc sta_info */
+ /* update_bmc_sta(padapter); */
+
+ return ret;
+
+}
+
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ DBG_871X("%s, mode =%d\n", __func__, mode);
+
+ pacl_list->mode = mode;
+}
+
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
+{
+ struct list_head *plist, *phead;
+ u8 added = false;
+ int i, ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_871X(
+ "%s(acl_num =%d) =" MAC_FMT "\n",
+ __func__,
+ pacl_list->num,
+ MAC_ARG(addr)
+ );
+
+ if ((NUM_ACL-1) < pacl_list->num)
+ return (-1);
+
+
+ spin_lock_bh(&(pacl_node_q->lock));
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
+
+ if (paclnode->valid == true) {
+
+ added = true;
+ DBG_871X("%s, sta has been added\n", __func__);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&(pacl_node_q->lock));
+
+
+ if (added == true)
+ return ret;
+
+
+ spin_lock_bh(&(pacl_node_q->lock));
+
+ for (i = 0; i < NUM_ACL; i++) {
+
+ paclnode = &pacl_list->aclnode[i];
+
+ if (paclnode->valid == false) {
+
+ INIT_LIST_HEAD(&paclnode->list);
+
+ memcpy(paclnode->addr, addr, ETH_ALEN);
+
+ paclnode->valid = true;
+
+ list_add_tail(&paclnode->list, get_list_head(pacl_node_q));
+
+ pacl_list->num++;
+
+ break;
+ }
+ }
+
+ DBG_871X("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ spin_unlock_bh(&(pacl_node_q->lock));
+
+ return ret;
+}
+
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
+{
+ struct list_head *plist, *phead;
+ int ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+ u8 baddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Baddr is used for clearing acl_list */
+
+ DBG_871X(
+ "%s(acl_num =%d) =" MAC_FMT "\n",
+ __func__,
+ pacl_list->num,
+ MAC_ARG(addr)
+ );
+
+ spin_lock_bh(&(pacl_node_q->lock));
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (
+ !memcmp(paclnode->addr, addr, ETH_ALEN) ||
+ !memcmp(baddr, addr, ETH_ALEN)
+ ) {
+
+ if (paclnode->valid == true) {
+
+ paclnode->valid = false;
+
+ list_del_init(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ }
+
+ spin_unlock_bh(&(pacl_node_q->lock));
+
+ DBG_871X("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ return ret;
+
+}
+
+u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(
+ sizeof(struct set_stakey_parm)
+ );
+ if (psetstakey_para == NULL) {
+ kfree((u8 *) ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+
+
+ psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy;
+
+ memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
+
+ memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
+
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+
+}
+
+static int rtw_ap_set_key(
+ struct adapter *padapter,
+ u8 *key,
+ u8 alg,
+ int keyid,
+ u8 set_tx
+)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ int res = _SUCCESS;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ kfree((unsigned char *)pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ psetkeyparm->keyid = (u8)keyid;
+ if (is_wep_enc(alg))
+ padapter->securitypriv.key_mask |= BIT(psetkeyparm->keyid);
+
+ psetkeyparm->algorithm = alg;
+
+ psetkeyparm->set_tx = set_tx;
+
+ switch (alg) {
+
+ case _WEP40_:
+ keylen = 5;
+ break;
+ case _WEP104_:
+ keylen = 13;
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ case _AES_:
+ default:
+ keylen = 16;
+ }
+
+ memcpy(&(psetkeyparm->key[0]), key, keylen);
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+int rtw_ap_set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
+{
+ DBG_871X("%s\n", __func__);
+
+ return rtw_ap_set_key(padapter, key, alg, keyid, 1);
+}
+
+int rtw_ap_set_wep_key(
+ struct adapter *padapter,
+ u8 *key,
+ u8 keylen,
+ int keyid,
+ u8 set_tx
+)
+{
+ u8 alg;
+
+ switch (keylen) {
+
+ case 5:
+ alg = _WEP40_;
+ break;
+ case 13:
+ alg = _WEP104_;
+ break;
+ default:
+ alg = _NO_PRIVACY_;
+ }
+
+ DBG_871X("%s\n", __func__);
+
+ return rtw_ap_set_key(padapter, key, alg, keyid, set_tx);
+}
+
+static void update_bcn_fixed_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_erpinfo_ie(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *p, *ie = pnetwork->IEs;
+ u32 len = 0;
+
+ DBG_871X("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
+
+ if (!pmlmeinfo->ERP_enable)
+ return;
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(
+ ie + _BEACON_IE_OFFSET_,
+ _ERPINFO_IE_,
+ &len,
+ (pnetwork->IELength - _BEACON_IE_OFFSET_)
+ );
+ if (p && len > 0) {
+
+ struct ndis_80211_var_ie *pIE = (struct ndis_80211_var_ie *)p;
+
+ if (pmlmepriv->num_sta_non_erp == 1)
+ pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION;
+ else
+ pIE->data[0] &= ~(
+ RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION
+ );
+
+ if (pmlmepriv->num_sta_no_short_preamble > 0)
+ pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
+ else
+ pIE->data[0] &= ~(RTW_ERP_INFO_BARKER_PREAMBLE_MODE);
+
+ ERP_IE_handler(padapter, pIE);
+ }
+
+}
+
+static void update_bcn_htcap_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_htinfo_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_rsn_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_wpa_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_wmm_ie(struct adapter *padapter)
+{
+ DBG_871X("%s\n", __func__);
+
+}
+
+static void update_bcn_wps_ie(struct adapter *padapter)
+{
+ u8 *pwps_ie = NULL;
+ u8 *pwps_ie_src;
+ u8 *premainder_ie;
+ u8 *pbackup_remainder_ie = NULL;
+
+ uint wps_ielen = 0, wps_offset, remainder_ielen;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *ie = pnetwork->IEs;
+ u32 ielen = pnetwork->IELength;
+
+
+ DBG_871X("%s\n", __func__);
+
+ pwps_ie = rtw_get_wps_ie(
+ ie+_FIXED_IE_LENGTH_,
+ ielen-_FIXED_IE_LENGTH_,
+ NULL,
+ &wps_ielen
+ );
+
+ if (pwps_ie == NULL || wps_ielen == 0)
+ return;
+
+ pwps_ie_src = pmlmepriv->wps_beacon_ie;
+ if (pwps_ie_src == NULL)
+ return;
+
+ wps_offset = (uint)(pwps_ie-ie);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = ielen - wps_offset - wps_ielen;
+
+ if (remainder_ielen > 0) {
+
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
+ pwps_ie += (wps_ielen+2);
+
+ if (pbackup_remainder_ie)
+ memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
+
+ /* update IELength */
+ pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ }
+
+ kfree(pbackup_remainder_ie);
+
+ /* deal with the case without set_tx_beacon_cmd() in update_beacon() */
+#if defined(CONFIG_INTERRUPT_BASED_TXBCN)
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+
+ u8 sr = 0;
+
+ rtw_get_wps_attr_content(
+ pwps_ie_src,
+ wps_ielen,
+ WPS_ATTR_SELECTED_REGISTRAR,
+ (u8 *)(&sr),
+ NULL
+ );
+
+ if (sr) {
+ set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
+ DBG_871X("%s, set WIFI_UNDER_WPS\n", __func__);
+ }
+ }
+#endif
+}
+
+static void update_bcn_p2p_ie(struct adapter *padapter)
+{
+
+}
+
+static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
+{
+ DBG_871X("%s\n", __func__);
+
+ if (!memcmp(RTW_WPA_OUI, oui, 4))
+ update_bcn_wpa_ie(padapter);
+
+ else if (!memcmp(WMM_OUI, oui, 4))
+ update_bcn_wmm_ie(padapter);
+
+ else if (!memcmp(WPS_OUI, oui, 4))
+ update_bcn_wps_ie(padapter);
+
+ else if (!memcmp(P2P_OUI, oui, 4))
+ update_bcn_p2p_ie(padapter);
+
+ else
+ DBG_871X("unknown OUI type!\n");
+
+
+
+}
+
+void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
+{
+ struct mlme_priv *pmlmepriv;
+ struct mlme_ext_priv *pmlmeext;
+ /* struct mlme_ext_info *pmlmeinfo; */
+
+ /* DBG_871X("%s\n", __func__); */
+
+ if (!padapter)
+ return;
+
+ pmlmepriv = &(padapter->mlmepriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+ /* pmlmeinfo = &(pmlmeext->mlmext_info); */
+
+ if (false == pmlmeext->bstart_bss)
+ return;
+
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
+
+ switch (ie_id) {
+
+ case 0xFF:
+
+ update_bcn_fixed_ie(padapter);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+
+ break;
+
+ case _TIM_IE_:
+
+ update_BCNTIM(padapter);
+
+ break;
+
+ case _ERPINFO_IE_:
+
+ update_bcn_erpinfo_ie(padapter);
+
+ break;
+
+ case _HT_CAPABILITY_IE_:
+
+ update_bcn_htcap_ie(padapter);
+
+ break;
+
+ case _RSN_IE_2_:
+
+ update_bcn_rsn_ie(padapter);
+
+ break;
+
+ case _HT_ADD_INFO_IE_:
+
+ update_bcn_htinfo_ie(padapter);
+
+ break;
+
+ case _VENDOR_SPECIFIC_IE_:
+
+ update_bcn_vendor_spec_ie(padapter, oui);
+
+ break;
+
+ default:
+ break;
+ }
+
+ pmlmepriv->update_bcn = true;
+
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
+
+#ifndef CONFIG_INTERRUPT_BASED_TXBCN
+ if (tx) {
+
+ /* send_beacon(padapter);//send_beacon must execute on TSR level */
+ set_tx_beacon_cmd(padapter);
+ }
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN */
+
+}
+
+/*
+op_mode
+Set to 0 (HT pure) under the followign conditions
+ - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+Set to 1 (HT non-member protection) if there may be non-HT STAs
+ in both the primary and the secondary channel
+Set to 2 if only HT STAs are associated in BSS,
+ however and at least one 20 MHz HT STA is associated
+Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ (currently non-GF HT station is considered as non-HT STA also)
+*/
+static int rtw_ht_operation_update(struct adapter *padapter)
+{
+ u16 cur_op_mode, new_op_mode;
+ int op_mode_changes = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+
+ if (pmlmepriv->htpriv.ht_option == true)
+ return 0;
+
+ /* if (!iface->conf->ieee80211n || iface->conf->ht_op_mode_fixed) */
+ /* return 0; */
+
+ DBG_871X("%s current operation mode = 0x%X\n",
+ __func__, pmlmepriv->ht_op_mode);
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT)
+ && pmlmepriv->num_sta_ht_no_gf) {
+ pmlmepriv->ht_op_mode |=
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf == 0) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ }
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode |= HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ }
+
+ /* Note: currently we switch to the MIXED op mode if HT non-greenfield
+ * station is associated. Probably it's a theoretical case, since
+ * it looks like all known HT STAs support greenfield.
+ */
+ new_op_mode = 0;
+ if (pmlmepriv->num_sta_no_ht ||
+ (pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
+ new_op_mode = OP_MODE_MIXED;
+ else if (
+ (le16_to_cpu(phtpriv_ap->ht_cap.cap_info) & IEEE80211_HT_CAP_SUP_WIDTH)
+ && pmlmepriv->num_sta_ht_20mhz)
+ new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
+ else if (pmlmepriv->olbc_ht)
+ new_op_mode = OP_MODE_MAY_BE_LEGACY_STAS;
+ else
+ new_op_mode = OP_MODE_PURE;
+
+ cur_op_mode = pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ if (cur_op_mode != new_op_mode) {
+ pmlmepriv->ht_op_mode &= ~HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ pmlmepriv->ht_op_mode |= new_op_mode;
+ op_mode_changes++;
+ }
+
+ DBG_871X("%s new operation mode = 0x%X changes =%d\n",
+ __func__, pmlmepriv->ht_op_mode, op_mode_changes);
+
+ return op_mode_changes;
+
+}
+
+void associated_clients_update(struct adapter *padapter, u8 updated)
+{
+ /* update associcated stations cap. */
+ if (updated == true) {
+
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while (phead != plist) {
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ VCS_update(padapter, psta);
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ }
+
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
+
+ if (!psta->no_short_preamble_set) {
+
+ psta->no_short_preamble_set = 1;
+
+ pmlmepriv->num_sta_no_short_preamble++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 1)) {
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ } else{
+
+
+ if (psta->no_short_preamble_set) {
+
+ psta->no_short_preamble_set = 0;
+
+ pmlmepriv->num_sta_no_short_preamble--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ }
+
+ if (psta->flags & WLAN_STA_NONERP) {
+
+ if (!psta->nonerp_set) {
+
+ psta->nonerp_set = 1;
+
+ pmlmepriv->num_sta_non_erp++;
+
+ if (pmlmepriv->num_sta_non_erp == 1) {
+
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ } else{
+
+
+ if (psta->nonerp_set) {
+
+ psta->nonerp_set = 0;
+
+ pmlmepriv->num_sta_non_erp--;
+
+ if (pmlmepriv->num_sta_non_erp == 0) {
+
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ }
+
+
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
+
+ if (!psta->no_short_slot_time_set) {
+
+ psta->no_short_slot_time_set = 1;
+
+ pmlmepriv->num_sta_no_short_slot_time++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+
+ }
+ } else{
+
+
+ if (psta->no_short_slot_time_set) {
+
+ psta->no_short_slot_time_set = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_HT) {
+
+ u16 ht_capab = le16_to_cpu(psta->htpriv.ht_cap.cap_info);
+
+ DBG_871X("HT: STA " MAC_FMT " HT Capabilities "
+ "Info: 0x%04x\n", MAC_ARG(psta->hwaddr), ht_capab);
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
+ if (!psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 1;
+ pmlmepriv->num_sta_ht_no_gf++;
+ }
+ DBG_871X("%s STA " MAC_FMT " - no "
+ "greenfield, num of non-gf stations %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_ht_no_gf);
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH) == 0) {
+ if (!psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 1;
+ pmlmepriv->num_sta_ht_20mhz++;
+ }
+ DBG_871X("%s STA " MAC_FMT " - 20 MHz HT, "
+ "num of 20MHz HT STAs %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_ht_20mhz);
+ }
+
+ } else{
+
+
+ if (!psta->no_ht_set) {
+ psta->no_ht_set = 1;
+ pmlmepriv->num_sta_no_ht++;
+ }
+ if (pmlmepriv->htpriv.ht_option == true) {
+ DBG_871X("%s STA " MAC_FMT
+ " - no HT, num of non-HT stations %d\n",
+ __func__, MAC_ARG(psta->hwaddr),
+ pmlmepriv->num_sta_no_ht);
+ }
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+ associated_clients_update(padapter, beacon_updated);
+
+ DBG_871X("%s, updated =%d\n", __func__, beacon_updated);
+
+}
+
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!psta)
+ return beacon_updated;
+
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+ pmlmepriv->num_sta_no_short_preamble--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B
+ && pmlmepriv->num_sta_no_short_preamble == 0){
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+ pmlmepriv->num_sta_non_erp--;
+ if (pmlmepriv->num_sta_non_erp == 0) {
+
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+ pmlmepriv->num_sta_no_short_slot_time--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B
+ && pmlmepriv->num_sta_no_short_slot_time == 0){
+
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 0;
+ pmlmepriv->num_sta_ht_no_gf--;
+ }
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if (psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 0;
+ pmlmepriv->num_sta_ht_20mhz--;
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+ /* associated_clients_update(padapter, beacon_updated); //move it to avoid deadlock */
+
+ DBG_871X("%s, updated =%d\n", __func__, beacon_updated);
+
+ return beacon_updated;
+
+}
+
+u8 ap_free_sta(
+ struct adapter *padapter,
+ struct sta_info *psta,
+ bool active,
+ u16 reason
+)
+{
+ u8 beacon_updated = false;
+
+ if (!psta)
+ return beacon_updated;
+
+ if (active == true) {
+
+ /* tear down Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* tear down TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* // originator */
+
+ issue_deauth(padapter, psta->hwaddr, reason);
+ }
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+
+ /* report_del_sta_event(padapter, psta->hwaddr, reason); */
+
+ /* clear cam entry / key */
+ rtw_clearstakey_cmd(padapter, psta, true);
+
+
+ spin_lock_bh(&psta->lock);
+ psta->state &= ~_FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ rtw_cfg80211_indicate_sta_disassoc(padapter, psta->hwaddr, reason);
+
+ report_del_sta_event(padapter, psta->hwaddr, reason);
+
+ beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
+
+ rtw_free_stainfo(padapter, psta);
+
+
+ return beacon_updated;
+
+}
+
+int rtw_sta_flush(struct adapter *padapter)
+{
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while (phead != plist) {
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ /* spin_unlock_bh(&pstapriv->asoc_list_lock); */
+ ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ /* spin_lock_bh(&pstapriv->asoc_list_lock); */
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+
+ issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
+
+ associated_clients_update(padapter, true);
+
+ return ret;
+
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void sta_info_update(struct adapter *padapter, struct sta_info *psta)
+{
+ int flags = psta->flags;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+
+ /* update wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* update 802.11n ht cap. */
+ if (WLAN_STA_HT&flags) {
+
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ } else{
+
+
+ psta->htpriv.ht_option = false;
+ }
+
+ if (pmlmepriv->htpriv.ht_option == false)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+
+
+}
+
+/* called >= TSR LEVEL for USB or SDIO Interface*/
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (psta->state & _FW_LINKED) {
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* add ratid */
+ add_RATid(padapter, psta, 0);/* DM_RATR_STA_INIT */
+ }
+}
+/* restore hw setting from sw data structures */
+void rtw_ap_restore_network(struct adapter *padapter)
+{
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct list_head *phead, *plist;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ rtw_setopmode_cmd(padapter, Ndis802_11APMode, false);
+
+ set_channel_bwmode(
+ padapter,
+ pmlmeext->cur_channel,
+ pmlmeext->cur_ch_offset,
+ pmlmeext->cur_bwmode
+ );
+
+ start_bss_network(padapter, (u8 *)&mlmepriv->cur_network.network);
+
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+
+ /* restore group key, WEP keys is restored in ips_leave() */
+ rtw_set_key(
+ padapter,
+ psecuritypriv,
+ psecuritypriv->dot118021XGrpKeyid,
+ 0,
+ false
+ );
+ }
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ int stainfo_offset;
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset))
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ for (i = 0; i < chk_alive_num; i++) {
+ psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+
+ if (psta == NULL) {
+ DBG_871X(FUNC_ADPT_FMT" sta_info is null\n", FUNC_ADPT_ARG(padapter));
+ } else if (psta->state & _FW_LINKED) {
+ rtw_sta_media_status_rpt(padapter, psta, 1);
+ Update_RA_Entry(padapter, psta);
+ /* pairwise key */
+ /* per sta pairwise key and settings */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_) ||
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+
+ rtw_setstakey_cmd(padapter, psta, true, false);
+ }
+ }
+ }
+
+}
+
+void start_ap_mode(struct adapter *padapter)
+{
+ int i;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ pmlmepriv->update_bcn = false;
+
+ /* init_mlme_ap_info(padapter); */
+ pmlmeext->bstart_bss = false;
+
+ pmlmepriv->num_sta_non_erp = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time = 0;
+
+ pmlmepriv->num_sta_no_short_preamble = 0;
+
+ pmlmepriv->num_sta_ht_no_gf = 0;
+ pmlmepriv->num_sta_no_ht = 0;
+ pmlmepriv->num_sta_ht_20mhz = 0;
+
+ pmlmepriv->olbc = false;
+
+ pmlmepriv->olbc_ht = false;
+
+ pmlmepriv->ht_op_mode = 0;
+
+ for (i = 0; i < NUM_STA; i++)
+ pstapriv->sta_aid[i] = NULL;
+
+ pmlmepriv->wps_beacon_ie = NULL;
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+
+ pmlmepriv->p2p_beacon_ie = NULL;
+ pmlmepriv->p2p_probe_resp_ie = NULL;
+
+
+ /* for ACL */
+ INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
+ pacl_list->num = 0;
+ pacl_list->mode = 0;
+ for (i = 0; i < NUM_ACL; i++) {
+ INIT_LIST_HEAD(&pacl_list->aclnode[i].list);
+ pacl_list->aclnode[i].valid = false;
+ }
+
+}
+
+void stop_ap_mode(struct adapter *padapter)
+{
+ struct list_head *phead, *plist;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ /* reset and init security priv , this can refine with rtw_reset_securitypriv */
+ memset(
+ (unsigned char *)&padapter->securitypriv,
+ 0,
+ sizeof(struct security_priv)
+ );
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* for ACL */
+ spin_lock_bh(&(pacl_node_q->lock));
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while (phead != plist) {
+
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (paclnode->valid == true) {
+
+ paclnode->valid = false;
+
+ list_del_init(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ spin_unlock_bh(&(pacl_node_q->lock));
+
+ DBG_871X("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+
+ rtw_sta_flush(padapter);
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ psta = rtw_get_bcmc_stainfo(padapter);
+ rtw_free_stainfo(padapter, psta);
+
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+
+ rtw_btcoex_MediaStatusNotify(padapter, 0); /* disconnect */
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
new file mode 100644
index 000000000000..3c5cb78b52ea
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -0,0 +1,243 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtw_btcoex.h>
+#include <hal_btcoex.h>
+
+
+void rtw_btcoex_Initialize(struct adapter *padapter)
+{
+ hal_btcoex_Initialize(padapter);
+}
+
+void rtw_btcoex_PowerOnSetting(struct adapter *padapter)
+{
+ hal_btcoex_PowerOnSetting(padapter);
+}
+
+void rtw_btcoex_HAL_Initialize(struct adapter *padapter, u8 bWifiOnly)
+{
+ hal_btcoex_InitHwConfig(padapter, bWifiOnly);
+}
+
+void rtw_btcoex_IpsNotify(struct adapter *padapter, u8 type)
+{
+ hal_btcoex_IpsNotify(padapter, type);
+}
+
+void rtw_btcoex_LpsNotify(struct adapter *padapter, u8 type)
+{
+ hal_btcoex_LpsNotify(padapter, type);
+}
+
+void rtw_btcoex_ScanNotify(struct adapter *padapter, u8 type)
+{
+ hal_btcoex_ScanNotify(padapter, type);
+}
+
+void rtw_btcoex_ConnectNotify(struct adapter *padapter, u8 action)
+{
+ hal_btcoex_ConnectNotify(padapter, action);
+}
+
+void rtw_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus)
+{
+ if ((RT_MEDIA_CONNECT == mediaStatus)
+ && (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)) {
+ rtw_hal_set_hwreg(padapter, HW_VAR_DL_RSVD_PAGE, NULL);
+ }
+
+ hal_btcoex_MediaStatusNotify(padapter, mediaStatus);
+}
+
+void rtw_btcoex_SpecialPacketNotify(struct adapter *padapter, u8 pktType)
+{
+ hal_btcoex_SpecialPacketNotify(padapter, pktType);
+}
+
+void rtw_btcoex_IQKNotify(struct adapter *padapter, u8 state)
+{
+ hal_btcoex_IQKNotify(padapter, state);
+}
+
+void rtw_btcoex_BtInfoNotify(struct adapter *padapter, u8 length, u8 *tmpBuf)
+{
+ hal_btcoex_BtInfoNotify(padapter, length, tmpBuf);
+}
+
+void rtw_btcoex_SuspendNotify(struct adapter *padapter, u8 state)
+{
+ hal_btcoex_SuspendNotify(padapter, state);
+}
+
+void rtw_btcoex_HaltNotify(struct adapter *padapter)
+{
+ if (false == padapter->bup) {
+ DBG_871X(FUNC_ADPT_FMT ": bup =%d Skip!\n",
+ FUNC_ADPT_ARG(padapter), padapter->bup);
+
+ return;
+ }
+
+ if (true == padapter->bSurpriseRemoved) {
+ DBG_871X(FUNC_ADPT_FMT ": bSurpriseRemoved =%d Skip!\n",
+ FUNC_ADPT_ARG(padapter), padapter->bSurpriseRemoved);
+
+ return;
+ }
+
+ hal_btcoex_HaltNotify(padapter);
+}
+
+u8 rtw_btcoex_IsBtDisabled(struct adapter *padapter)
+{
+ return hal_btcoex_IsBtDisabled(padapter);
+}
+
+void rtw_btcoex_Handler(struct adapter *padapter)
+{
+ hal_btcoex_Hanlder(padapter);
+}
+
+s32 rtw_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter)
+{
+ s32 coexctrl;
+
+ coexctrl = hal_btcoex_IsBTCoexCtrlAMPDUSize(padapter);
+
+ return coexctrl;
+}
+
+void rtw_btcoex_SetManualControl(struct adapter *padapter, u8 manual)
+{
+ if (true == manual) {
+ hal_btcoex_SetManualControl(padapter, true);
+ } else{
+ hal_btcoex_SetManualControl(padapter, false);
+ }
+}
+
+u8 rtw_btcoex_IsBtControlLps(struct adapter *padapter)
+{
+ return hal_btcoex_IsBtControlLps(padapter);
+}
+
+u8 rtw_btcoex_IsLpsOn(struct adapter *padapter)
+{
+ return hal_btcoex_IsLpsOn(padapter);
+}
+
+u8 rtw_btcoex_RpwmVal(struct adapter *padapter)
+{
+ return hal_btcoex_RpwmVal(padapter);
+}
+
+u8 rtw_btcoex_LpsVal(struct adapter *padapter)
+{
+ return hal_btcoex_LpsVal(padapter);
+}
+
+void rtw_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
+{
+ hal_btcoex_SetBTCoexist(padapter, bBtExist);
+}
+
+void rtw_btcoex_SetChipType(struct adapter *padapter, u8 chipType)
+{
+ hal_btcoex_SetChipType(padapter, chipType);
+}
+
+void rtw_btcoex_SetPGAntNum(struct adapter *padapter, u8 antNum)
+{
+ hal_btcoex_SetPgAntNum(padapter, antNum);
+}
+
+void rtw_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath)
+{
+ hal_btcoex_SetSingleAntPath(padapter, singleAntPath);
+}
+
+u32 rtw_btcoex_GetRaMask(struct adapter *padapter)
+{
+ return hal_btcoex_GetRaMask(padapter);
+}
+
+void rtw_btcoex_RecordPwrMode(struct adapter *padapter, u8 *pCmdBuf, u8 cmdLen)
+{
+ hal_btcoex_RecordPwrMode(padapter, pCmdBuf, cmdLen);
+}
+
+void rtw_btcoex_DisplayBtCoexInfo(struct adapter *padapter, u8 *pbuf, u32 bufsize)
+{
+ hal_btcoex_DisplayBtCoexInfo(padapter, pbuf, bufsize);
+}
+
+void rtw_btcoex_SetDBG(struct adapter *padapter, u32 *pDbgModule)
+{
+ hal_btcoex_SetDBG(padapter, pDbgModule);
+}
+
+u32 rtw_btcoex_GetDBG(struct adapter *padapter, u8 *pStrBuf, u32 bufSize)
+{
+ return hal_btcoex_GetDBG(padapter, pStrBuf, bufSize);
+}
+
+/* ================================================== */
+/* Below Functions are called by BT-Coex */
+/* ================================================== */
+void rtw_btcoex_RejectApAggregatedPacket(struct adapter *padapter, u8 enable)
+{
+ struct mlme_ext_info *pmlmeinfo;
+ struct sta_info *psta;
+
+ pmlmeinfo = &padapter->mlmeextpriv.mlmext_info;
+ psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
+
+ if (true == enable) {
+ pmlmeinfo->bAcceptAddbaReq = false;
+ if (psta)
+ send_delba(padapter, 0, psta->hwaddr);
+ } else{
+ pmlmeinfo->bAcceptAddbaReq = true;
+ }
+}
+
+void rtw_btcoex_LPS_Enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv;
+ u8 lpsVal;
+
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ pwrpriv->bpower_saving = true;
+ lpsVal = rtw_btcoex_LpsVal(padapter);
+ rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, lpsVal, "BTCOEX");
+}
+
+void rtw_btcoex_LPS_Leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv;
+
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0, "BTCOEX");
+ LPS_RF_ON_check(padapter, 100);
+ pwrpriv->bpower_saving = false;
+ }
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
new file mode 100644
index 000000000000..080c81b9aa94
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -0,0 +1,2226 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_CMD_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+
+static struct _cmd_callback rtw_cmd_callback[] = {
+ {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
+ {GEN_CMD_CODE(_Write_MACREG), NULL},
+ {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_BBREG), NULL},
+ {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
+ {GEN_CMD_CODE(_Read_EEPROM), NULL},
+ {GEN_CMD_CODE(_Write_EEPROM), NULL},
+ {GEN_CMD_CODE(_Read_EFUSE), NULL},
+ {GEN_CMD_CODE(_Write_EFUSE), NULL},
+
+ {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
+ {GEN_CMD_CODE(_Write_CAM), NULL},
+ {GEN_CMD_CODE(_setBCNITV), NULL},
+ {GEN_CMD_CODE(_setMBIDCFG), NULL},
+ {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/
+ {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/
+ {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
+ {GEN_CMD_CODE(_SetOpMode), NULL},
+ {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/
+ {GEN_CMD_CODE(_SetAuth), NULL},
+
+ {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
+ {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
+ {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
+ {GEN_CMD_CODE(_DelAssocSta), NULL},
+ {GEN_CMD_CODE(_SetStaPwrState), NULL},
+ {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
+ {GEN_CMD_CODE(_GetBasicRate), NULL},
+ {GEN_CMD_CODE(_SetDataRate), NULL},
+ {GEN_CMD_CODE(_GetDataRate), NULL},
+ {GEN_CMD_CODE(_SetPhyInfo), NULL},
+
+ {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
+ {GEN_CMD_CODE(_SetPhy), NULL},
+ {GEN_CMD_CODE(_GetPhy), NULL},
+ {GEN_CMD_CODE(_readRssi), NULL},
+ {GEN_CMD_CODE(_readGain), NULL},
+ {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
+ {GEN_CMD_CODE(_SetPwrMode), NULL},
+ {GEN_CMD_CODE(_JoinbssRpt), NULL},
+ {GEN_CMD_CODE(_SetRaTable), NULL},
+ {GEN_CMD_CODE(_GetRaTable), NULL},
+
+ {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
+ {GEN_CMD_CODE(_GetDTMReport), NULL},
+ {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
+ {GEN_CMD_CODE(_SetUsbSuspend), NULL},
+ {GEN_CMD_CODE(_SetH2cLbk), NULL},
+ {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
+ {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
+ {GEN_CMD_CODE(_SetTxPower), NULL},
+ {GEN_CMD_CODE(_SwitchAntenna), NULL},
+ {GEN_CMD_CODE(_SetCrystalCap), NULL},
+ {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
+
+ {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
+ {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
+ {GEN_CMD_CODE(_SetContinuousTx), NULL},
+ {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
+ {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
+
+ {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
+ {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
+ {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
+ {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
+ {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
+
+ {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
+ {GEN_CMD_CODE(_TDLS), NULL},/*62*/
+ {GEN_CMD_CODE(_ChkBMCSleepq), NULL}, /*63*/
+
+ {GEN_CMD_CODE(_RunInThreadCMD), NULL},/*64*/
+};
+
+static struct cmd_hdl wlancmds[] = {
+ GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct joinbss_parm), join_cmd_hdl) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct createbss_parm), createbss_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param), tx_beacon_hdl) /*55*/
+
+ GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/
+ GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/
+
+ GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl) /*59*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param), led_blink_hdl) /*60*/
+
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param), set_csa_hdl) /*61*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param), tdls_hdl) /*62*/
+ GEN_MLME_EXT_HANDLER(0, chk_bmc_sleepq_hdl) /*63*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct RunInThread_param), run_in_thread_hdl) /*63*/
+};
+
+/*
+Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+No irqsave is necessary.
+*/
+
+sint _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ sint res = _SUCCESS;
+
+ sema_init(&(pcmdpriv->cmd_queue_sema), 0);
+ /* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
+ sema_init(&(pcmdpriv->terminate_cmdthread_sema), 0);
+
+
+ _rtw_init_queue(&(pcmdpriv->cmd_queue));
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ pcmdpriv->cmd_seq = 1;
+
+ pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
+
+ if (pcmdpriv->cmd_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
+
+ pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
+
+ if (pcmdpriv->rsp_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
+
+ pcmdpriv->cmd_issued_cnt = pcmdpriv->cmd_done_cnt = pcmdpriv->rsp_cnt = 0;
+
+ mutex_init(&pcmdpriv->sctx_mutex);
+exit:
+ return res;
+}
+
+static void c2h_wk_callback(_workitem *work);
+sint _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+{
+ sint res = _SUCCESS;
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+ atomic_set(&pevtpriv->event_seq, 0);
+ pevtpriv->evt_done_cnt = 0;
+
+ _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ pevtpriv->c2h_wk_alive = false;
+ pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
+
+ return res;
+}
+
+void _rtw_free_evt_priv(struct evt_priv *pevtpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+_rtw_free_evt_priv\n"));
+
+ _cancel_workitem_sync(&pevtpriv->c2h_wk);
+ while (pevtpriv->c2h_wk_alive)
+ msleep(10);
+
+ while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+ void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+ if (c2h != NULL && c2h != (void *)pevtpriv) {
+ kfree(c2h);
+ }
+ }
+ kfree(pevtpriv->c2h_queue);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("-_rtw_free_evt_priv\n"));
+}
+
+void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ if (pcmdpriv) {
+ kfree(pcmdpriv->cmd_allocated_buf);
+
+ kfree(pcmdpriv->rsp_allocated_buf);
+
+ mutex_destroy(&pcmdpriv->sctx_mutex);
+ }
+}
+
+/*
+Calling Context:
+
+rtw_enqueue_cmd can only be called between kernel thread,
+since only spin_lock is used.
+
+ISR/Call-Back functions can't call this sub-function.
+
+*/
+
+sint _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+ _irqL irqL;
+
+ if (obj == NULL)
+ goto exit;
+
+ /* spin_lock_bh(&queue->lock); */
+ spin_lock_irqsave(&queue->lock, irqL);
+
+ list_add_tail(&obj->list, &queue->queue);
+
+ /* spin_unlock_bh(&queue->lock); */
+ spin_unlock_irqrestore(&queue->lock, irqL);
+
+exit:
+ return _SUCCESS;
+}
+
+struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
+{
+ _irqL irqL;
+ struct cmd_obj *obj;
+
+ /* spin_lock_bh(&(queue->lock)); */
+ spin_lock_irqsave(&queue->lock, irqL);
+ if (list_empty(&(queue->queue)))
+ obj = NULL;
+ else{
+ obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
+ list_del_init(&obj->list);
+ }
+
+ /* spin_unlock_bh(&(queue->lock)); */
+ spin_unlock_irqrestore(&queue->lock, irqL);
+
+ return obj;
+}
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ u32 res;
+
+ res = _rtw_init_cmd_priv(pcmdpriv);
+ return res;
+}
+
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
+{
+ int res;
+
+ res = _rtw_init_evt_priv(pevtpriv);
+ return res;
+}
+
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_evt_priv\n"));
+ _rtw_free_evt_priv(pevtpriv);
+}
+
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_cmd_priv\n"));
+ _rtw_free_cmd_priv(pcmdpriv);
+}
+
+int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj);
+int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
+
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
+ bAllow = true;
+
+ if ((pcmdpriv->padapter->hw_init_completed == false && bAllow == false)
+ || atomic_read(&(pcmdpriv->cmdthd_running)) == false /* com_thread not running */
+ ) {
+ /* DBG_871X("%s:%s: drop cmdcode:%u, hw_init_completed:%u, cmdthd_running:%u\n", caller_func, __func__, */
+ /* cmd_obj->cmdcode, */
+ /* pcmdpriv->padapter->hw_init_completed, */
+ /* pcmdpriv->cmdthd_running */
+ /* */
+
+ return _FAIL;
+ }
+ return _SUCCESS;
+}
+
+
+
+u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ int res = _FAIL;
+ struct adapter *padapter = pcmdpriv->padapter;
+
+ if (cmd_obj == NULL) {
+ goto exit;
+ }
+
+ cmd_obj->padapter = padapter;
+
+ res = rtw_cmd_filter(pcmdpriv, cmd_obj);
+ if (_FAIL == res) {
+ rtw_free_cmd_obj(cmd_obj);
+ goto exit;
+ }
+
+ res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
+
+ if (res == _SUCCESS)
+ up(&pcmdpriv->cmd_queue_sema);
+
+exit:
+ return res;
+}
+
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+{
+ struct cmd_obj *cmd_obj;
+
+ cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+
+ return cmd_obj;
+}
+
+void rtw_free_cmd_obj(struct cmd_obj *pcmd)
+{
+ if ((pcmd->cmdcode != _JoinBss_CMD_) &&
+ (pcmd->cmdcode != _CreateBss_CMD_)) {
+ /* free parmbuf in cmd_obj */
+ kfree((unsigned char *)pcmd->parmbuf);
+ }
+
+ if (pcmd->rsp != NULL) {
+ if (pcmd->rspsz != 0) {
+ /* free rsp in cmd_obj */
+ kfree((unsigned char *)pcmd->rsp);
+ }
+ }
+
+ /* free cmd_obj */
+ kfree((unsigned char *)pcmd);
+}
+
+
+void rtw_stop_cmd_thread(struct adapter *adapter)
+{
+ if (adapter->cmdThread &&
+ atomic_read(&(adapter->cmdpriv.cmdthd_running)) == true &&
+ adapter->cmdpriv.stop_req == 0) {
+ adapter->cmdpriv.stop_req = 1;
+ up(&adapter->cmdpriv.cmd_queue_sema);
+ down(&adapter->cmdpriv.terminate_cmdthread_sema);
+ }
+}
+
+int rtw_cmd_thread(void *context)
+{
+ u8 ret;
+ struct cmd_obj *pcmd;
+ u8 *pcmdbuf, *prspbuf;
+ unsigned long cmd_start_time;
+ unsigned long cmd_process_time;
+ u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
+ void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
+ struct adapter *padapter = (struct adapter *)context;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct drvextra_cmd_parm *extra_parm = NULL;
+
+ thread_enter("RTW_CMD_THREAD");
+
+ pcmdbuf = pcmdpriv->cmd_buf;
+ prspbuf = pcmdpriv->rsp_buf;
+
+ pcmdpriv->stop_req = 0;
+ atomic_set(&(pcmdpriv->cmdthd_running), true);
+ up(&pcmdpriv->terminate_cmdthread_sema);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
+
+ while (1) {
+ if (down_interruptible(&pcmdpriv->cmd_queue_sema)) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" down_interruptible(&pcmdpriv->cmd_queue_sema) return != 0, break\n", FUNC_ADPT_ARG(padapter));
+ break;
+ }
+
+ if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) {
+ DBG_871X_LEVEL(_drv_always_, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+
+ if (pcmdpriv->stop_req) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" stop_req:%u, break\n", FUNC_ADPT_ARG(padapter), pcmdpriv->stop_req);
+ break;
+ }
+
+ if (list_empty(&(pcmdpriv->cmd_queue.queue))) {
+ /* DBG_871X("%s: cmd queue is empty!\n", __func__); */
+ continue;
+ }
+
+ if (rtw_register_cmd_alive(padapter) != _SUCCESS) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_notice_,
+ ("%s: wait to leave LPS_LCLK\n", __func__));
+ continue;
+ }
+
+_next:
+ if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) {
+ DBG_871X_LEVEL(_drv_always_, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (!pcmd) {
+ rtw_unregister_cmd_alive(padapter);
+ continue;
+ }
+
+ cmd_start_time = jiffies;
+
+ if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+ pcmd->res = H2C_DROPPED;
+ goto post_process;
+ }
+
+ pcmdpriv->cmd_issued_cnt++;
+
+ pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+
+ memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
+
+ if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) {
+ cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
+
+ if (cmd_hdl) {
+ ret = cmd_hdl(pcmd->padapter, pcmdbuf);
+ pcmd->res = ret;
+ }
+
+ pcmdpriv->cmd_seq++;
+ } else{
+ pcmd->res = H2C_PARAMETERS_ERROR;
+ }
+
+ cmd_hdl = NULL;
+
+post_process:
+
+ if (mutex_lock_interruptible(&(pcmd->padapter->cmdpriv.sctx_mutex)) == 0) {
+ if (pcmd->sctx) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" pcmd->sctx\n",
+ FUNC_ADPT_ARG(pcmd->padapter));
+
+ if (pcmd->res == H2C_SUCCESS)
+ rtw_sctx_done(&pcmd->sctx);
+ else
+ rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR);
+ }
+ mutex_unlock(&(pcmd->padapter->cmdpriv.sctx_mutex));
+ }
+
+ cmd_process_time = jiffies_to_msecs(jiffies - cmd_start_time);
+ if (cmd_process_time > 1000) {
+ if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ DBG_871X(ADPT_FMT" cmd =%d process_time =%lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
+ /* rtw_warn_on(1); */
+ } else if (pcmd->cmdcode == GEN_CMD_CODE(_Set_MLME_EVT)) {
+ DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
+ /* rtw_warn_on(1); */
+ } else {
+ DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
+ /* rtw_warn_on(1); */
+ }
+ }
+
+ /* call callback function for post-processed */
+ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
+ pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
+ if (pcmd_callback == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n", pcmd_callback, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ } else{
+ /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
+ pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ }
+ } else{
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ }
+
+ flush_signals_thread();
+
+ goto _next;
+
+ }
+
+ /* free all cmd_obj resources */
+ do {
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (pcmd == NULL) {
+ rtw_unregister_cmd_alive(padapter);
+ break;
+ }
+
+ /* DBG_871X("%s: leaving... drop cmdcode:%u size:%d\n", __func__, pcmd->cmdcode, pcmd->cmdsz); */
+
+ if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ extra_parm = (struct drvextra_cmd_parm *)pcmd->parmbuf;
+ if (extra_parm->pbuf && extra_parm->size > 0) {
+ kfree(extra_parm->pbuf);
+ }
+ }
+
+ rtw_free_cmd_obj(pcmd);
+ } while (1);
+
+ up(&pcmdpriv->terminate_cmdthread_sema);
+ atomic_set(&(pcmdpriv->cmdthd_running), false);
+
+ thread_exit();
+}
+
+/*
+rtw_sitesurvey_cmd(~)
+ ### NOTE:#### (!!!!)
+ MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+*/
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
+ struct rtw_ieee80211_channel *ch, int ch_num)
+{
+ u8 res = _FAIL;
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree((unsigned char *) ph2c);
+ return _FAIL;
+ }
+
+ rtw_free_network_queue(padapter, false);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("%s: flush network queue\n", __func__));
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+
+ /* psurveyPara->bsslimit = 48; */
+ psurveyPara->scan_mode = pmlmepriv->scan_mode;
+
+ /* prepare ssid list */
+ if (ssid) {
+ int i;
+ for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (ssid[i].SsidLength) {
+ memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
+ psurveyPara->ssid_num++;
+
+ DBG_871X(FUNC_ADPT_FMT" ssid:(%s, %d)\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ssid[i].Ssid, psurveyPara->ssid[i].SsidLength);
+ }
+ }
+ }
+
+ /* prepare channel list */
+ if (ch) {
+ int i;
+ for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
+ if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
+ memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
+ psurveyPara->ch_num++;
+
+ DBG_871X(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ch[i].hw_value);
+ }
+ }
+ }
+
+ set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+ if (res == _SUCCESS) {
+
+ pmlmepriv->scan_start_time = jiffies;
+ _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
+ } else {
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ }
+ return res;
+}
+
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
+{
+ struct cmd_obj *ph2c;
+ struct setdatarate_parm *pbsetdataratepara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pbsetdataratepara = (struct setdatarate_parm *)rtw_zmalloc(sizeof(struct setdatarate_parm));
+ if (pbsetdataratepara == NULL) {
+ kfree((u8 *) ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
+ pbsetdataratepara->mac_id = 5;
+ memcpy(pbsetdataratepara->datarates, rateset, NumRates);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+ return res;
+}
+
+void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ /* rtw_free_cmd_obj(pcmd); */
+ kfree((unsigned char *) pcmd->parmbuf);
+ kfree((unsigned char *) pcmd);
+}
+
+u8 rtw_createbss_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
+ u8 res = _SUCCESS;
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ }
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&pcmd->list);
+ pcmd->cmdcode = _CreateBss_CMD_;
+ pcmd->parmbuf = (unsigned char *)pdev_network;
+ pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ pdev_network->Length = pcmd->cmdsz;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+ return res;
+}
+
+u8 rtw_startbss_cmd(struct adapter *padapter, int flags)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct submit_ctx sctx;
+ u8 res = _SUCCESS;
+
+ if (flags & RTW_CMDF_DIRECTLY) {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ start_bss_network(padapter, (u8 *)&(padapter->mlmepriv.cur_network.network));
+ } else {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&pcmd->list);
+ pcmd->cmdcode = GEN_CMD_CODE(_CreateBss);
+ pcmd->parmbuf = NULL;
+ pcmd->cmdsz = 0;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ if (flags & RTW_CMDF_WAIT_ACK) {
+ pcmd->sctx = &sctx;
+ rtw_sctx_init(&sctx, 2000);
+ }
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+ if (res == _SUCCESS && (flags & RTW_CMDF_WAIT_ACK)) {
+ rtw_sctx_wait(&sctx, __func__);
+ if (mutex_lock_interruptible(&pcmdpriv->sctx_mutex) == 0) {
+ if (sctx.status == RTW_SCTX_SUBMITTED)
+ pcmd->sctx = NULL;
+ mutex_unlock(&pcmdpriv->sctx_mutex);
+ }
+ }
+ }
+
+exit:
+ return res;
+}
+
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ u8 *auth, res = _SUCCESS;
+ uint t_len = 0;
+ struct wlan_bssid_ex *psecnetwork;
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE ndis_network_mode = pnetwork->network.InfrastructureMode;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 tmp_len;
+ u8 *ptmp = NULL;
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
+ }
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
+ goto exit;
+ }
+ /* for IEs is fix buf size */
+ t_len = sizeof(struct wlan_bssid_ex);
+
+
+ /* for hidden ap to set fw_state here */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) != true) {
+ switch (ndis_network_mode) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+
+ case Ndis802_11APMode:
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+
+ }
+ }
+
+ psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ if (psecnetwork == NULL) {
+ if (pcmd != NULL)
+ kfree((unsigned char *)pcmd);
+
+ res = _FAIL;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
+
+ goto exit;
+ }
+
+ memset(psecnetwork, 0, t_len);
+
+ memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
+
+ auth = &psecuritypriv->authenticator_ie[0];
+ psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
+
+ if ((psecnetwork->IELength-12) < (256-1)) {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
+ } else {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
+ }
+
+ psecnetwork->IELength = 0;
+ /* Added by Albert 2009/02/18 */
+ /* If the the driver wants to use the bssid to create the connection. */
+ /* If not, we have to copy the connecting AP's MAC address to it so that */
+ /* the driver just has the bssid information for PMKIDList searching. */
+
+ if (pmlmepriv->assoc_by_bssid == false) {
+ memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
+ }
+
+ psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
+
+
+ pqospriv->qos_option = 0;
+
+ if (pregistrypriv->wmm_enable) {
+ tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength);
+
+ if (psecnetwork->IELength != tmp_len) {
+ psecnetwork->IELength = tmp_len;
+ pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
+ } else{
+ pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
+ }
+ }
+
+ phtpriv->ht_option = false;
+ ptmp = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &tmp_len, pnetwork->network.IELength-12);
+ if (pregistrypriv->ht_enable && ptmp && tmp_len > 0) {
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
+ rtw_ht_use_default_setting(padapter);
+
+ rtw_build_wmm_ie_ht(padapter, &psecnetwork->IEs[12], &psecnetwork->IELength);
+
+ /* rtw_restructure_ht_ie */
+ rtw_restructure_ht_ie(padapter, &pnetwork->network.IEs[12], &psecnetwork->IEs[0],
+ pnetwork->network.IELength-12, &psecnetwork->IELength,
+ pnetwork->network.Configuration.DSConfig);
+ }
+ }
+
+ rtw_append_exented_cap(padapter, &psecnetwork->IEs[0], &psecnetwork->IELength);
+
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.IEs, pnetwork->network.IELength);
+
+ pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
+
+ INIT_LIST_HEAD(&pcmd->list);
+ pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
+ pcmd->parmbuf = (unsigned char *)psecnetwork;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+ return res;
+}
+
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */
+{
+ struct cmd_obj *cmdobj = NULL;
+ struct disconnect_parm *param = NULL;
+ struct cmd_priv *cmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
+
+ /* prepare cmd parameter */
+ param = (struct disconnect_parm *)rtw_zmalloc(sizeof(*param));
+ if (param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ param->deauth_timeout_ms = deauth_timeout_ms;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ cmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(*cmdobj));
+ if (cmdobj == NULL) {
+ res = _FAIL;
+ kfree((u8 *)param);
+ goto exit;
+ }
+ init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
+ res = rtw_enqueue_cmd(cmdpriv, cmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+ res = _FAIL;
+ kfree((u8 *)param);
+ }
+
+exit:
+ return res;
+}
+
+u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype, bool enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct setopmode_parm *psetop;
+
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ psetop = (struct setopmode_parm *)rtw_zmalloc(sizeof(struct setopmode_parm));
+
+ if (psetop == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetop->mode = (u8)networktype;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ kfree((u8 *)psetop);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else{
+ setopmode_hdl(padapter, (u8 *)psetop);
+ kfree((u8 *)psetop);
+ }
+exit:
+ return res;
+}
+
+u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_key, bool enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u8 res = _SUCCESS;
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ } else {
+ GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
+ }
+
+ if (unicast_key == true) {
+ memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
+ } else{
+ memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
+ }
+
+ /* jeff: set this becasue at least sw key is ready */
+ padapter->securitypriv.busetkipkey = true;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ kfree((u8 *) psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree((u8 *) ph2c);
+ kfree((u8 *) psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else{
+ set_stakey_hdl(padapter, (u8 *)psetstakey_para);
+ kfree((u8 *) psetstakey_para);
+ }
+exit:
+ return res;
+}
+
+u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+ s16 cam_id = 0;
+ u8 res = _SUCCESS;
+
+ if (!enqueue) {
+ while ((cam_id = rtw_camid_search(padapter, sta->hwaddr, -1)) >= 0) {
+ DBG_871X_LEVEL(_drv_always_, "clear key for addr:"MAC_FMT", camid:%d\n", MAC_ARG(sta->hwaddr), cam_id);
+ clear_cam_entry(padapter, cam_id);
+ rtw_camid_free(padapter, cam_id);
+ }
+ } else{
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree((u8 *) ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree((u8 *) ph2c);
+ kfree((u8 *) psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ psetstakey_para->algorithm = _NO_PRIVACY_;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+ }
+
+exit:
+ return res;
+}
+
+u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct addBaReq_parm *paddbareq_parm;
+
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm = (struct addBaReq_parm *)rtw_zmalloc(sizeof(struct addBaReq_parm));
+ if (paddbareq_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm->tid = tid;
+ memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
+
+ /* DBG_871X("rtw_addbareq_cmd, tid =%d\n", tid); */
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+/* add for CONFIG_IEEE80211W, none 11w can use it */
+u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = RESET_SECURITYPRIV;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = FREE_ASSOC_RESOURCES;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ /* only primary padapter does this cmd */
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig)
+{
+ struct cmd_obj *pcmdobj;
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
+
+ /* check if allow software config */
+ if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter) == true) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ /* check input parameter */
+ if (!rtw_is_channel_plan_valid(chplan)) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ /* prepare cmd parameter */
+ setChannelPlan_param = (struct SetChannelPlan_param *)rtw_zmalloc(sizeof(struct SetChannelPlan_param));
+ if (setChannelPlan_param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ setChannelPlan_param->channel_plan = chplan;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ kfree((u8 *)setChannelPlan_param);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+ } else{
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param))
+ res = _FAIL;
+
+ kfree((u8 *)setChannelPlan_param);
+ }
+
+ /* do something based on res... */
+ if (res == _SUCCESS)
+ padapter->mlmepriv.ChannelPlan = chplan;
+
+exit:
+ return res;
+}
+
+static void collect_traffic_statistics(struct adapter *padapter)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ /* Tx */
+ pdvobjpriv->traffic_stat.tx_bytes = padapter->xmitpriv.tx_bytes;
+ pdvobjpriv->traffic_stat.tx_pkts = padapter->xmitpriv.tx_pkts;
+ pdvobjpriv->traffic_stat.tx_drop = padapter->xmitpriv.tx_drop;
+
+ /* Rx */
+ pdvobjpriv->traffic_stat.rx_bytes = padapter->recvpriv.rx_bytes;
+ pdvobjpriv->traffic_stat.rx_pkts = padapter->recvpriv.rx_pkts;
+ pdvobjpriv->traffic_stat.rx_drop = padapter->recvpriv.rx_drop;
+
+ /* Calculate throughput in last interval */
+ pdvobjpriv->traffic_stat.cur_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes - pdvobjpriv->traffic_stat.last_tx_bytes;
+ pdvobjpriv->traffic_stat.cur_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes - pdvobjpriv->traffic_stat.last_rx_bytes;
+ pdvobjpriv->traffic_stat.last_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes;
+ pdvobjpriv->traffic_stat.last_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes;
+
+ pdvobjpriv->traffic_stat.cur_tx_tp = (u32)(pdvobjpriv->traffic_stat.cur_tx_bytes * 8/2/1024/1024);
+ pdvobjpriv->traffic_stat.cur_rx_tp = (u32)(pdvobjpriv->traffic_stat.cur_rx_bytes * 8/2/1024/1024);
+}
+
+u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
+{
+ u8 bEnterPS = false;
+ u16 BusyThresholdHigh = 25;
+ u16 BusyThresholdLow = 10;
+ u16 BusyThreshold = BusyThresholdHigh;
+ u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
+ u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
+
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ collect_traffic_statistics(padapter);
+
+ /* */
+ /* Determine if our traffic is busy now */
+ /* */
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ /*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
+ /* if we raise bBusyTraffic in last watchdog, using lower threshold. */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ BusyThreshold = BusyThresholdLow;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) {
+ bBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bRxBusyTraffic = true;
+ else
+ bTxBusyTraffic = true;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ bHigherBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bHigherBusyRxTraffic = true;
+ else
+ bHigherBusyTxTraffic = true;
+ }
+
+ /* check traffic for powersaving. */
+ if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) {
+ /* DBG_871X("(-)Tx = %d, Rx = %d\n", pmlmepriv->LinkDetectInfo.NumTxOkInPeriod, pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod); */
+ bEnterPS = false;
+
+ if (bBusyTraffic == true) {
+ if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount <= 4)
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 4;
+
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount++;
+
+ /* DBG_871X("Set TrafficTransitionCount to %d\n", pmlmepriv->LinkDetectInfo.TrafficTransitionCount); */
+
+ if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount > 30/*TrafficTransitionLevel*/) {
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 30;
+ }
+ }
+ } else{
+ /* DBG_871X("(+)Tx = %d, Rx = %d\n", pmlmepriv->LinkDetectInfo.NumTxOkInPeriod, pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod); */
+
+ if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount >= 2)
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount -= 2;
+ else
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
+
+ if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount == 0)
+ bEnterPS = true;
+ }
+
+ /* LeisurePS only work in infra mode. */
+ if (bEnterPS) {
+ if (!from_timer)
+ LPS_Enter(padapter, "TRAFFIC_IDLE");
+ } else {
+ if (!from_timer)
+ LPS_Leave(padapter, "TRAFFIC_BUSY");
+ else
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_TRAFFIC_BUSY, 1);
+ }
+ } else{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ int n_assoc_iface = 0;
+
+ if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
+ n_assoc_iface++;
+
+ if (!from_timer && n_assoc_iface == 0)
+ LPS_Leave(padapter, "NON_LINKED");
+ }
+
+ pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+
+ return bEnterPS;
+
+}
+
+static void dynamic_chk_wk_hdl(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv;
+ pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ expire_timeout_chk(padapter);
+ }
+
+ /* for debug purpose */
+ _linked_info_dump(padapter);
+
+
+ /* if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING|_FW_UNDER_SURVEY) ==false) */
+ {
+ linked_status_chk(padapter);
+ traffic_status_watchdog(padapter, 0);
+ }
+
+ rtw_hal_dm_watchdog(padapter);
+
+ /* check_hw_pbc(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type); */
+
+ /* */
+ /* BT-Coexist */
+ /* */
+ rtw_btcoex_Handler(padapter);
+
+
+ /* always call rtw_ps_processor() at last one. */
+ if (is_primary_adapter(padapter))
+ rtw_ps_processor(padapter);
+}
+
+void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type);
+void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 mstatus;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
+ || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ return;
+ }
+
+ switch (lps_ctrl_type) {
+ case LPS_CTRL_SCAN:
+ /* DBG_871X("LPS_CTRL_SCAN\n"); */
+ rtw_btcoex_ScanNotify(padapter, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ /* connect */
+ LPS_Leave(padapter, "LPS_CTRL_SCAN");
+ }
+ break;
+ case LPS_CTRL_JOINBSS:
+ /* DBG_871X("LPS_CTRL_JOINBSS\n"); */
+ LPS_Leave(padapter, "LPS_CTRL_JOINBSS");
+ break;
+ case LPS_CTRL_CONNECT:
+ /* DBG_871X("LPS_CTRL_CONNECT\n"); */
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ pwrpriv->LpsIdleCount = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ rtw_btcoex_MediaStatusNotify(padapter, mstatus);
+ break;
+ case LPS_CTRL_DISCONNECT:
+ /* DBG_871X("LPS_CTRL_DISCONNECT\n"); */
+ mstatus = 0;/* disconnect */
+ rtw_btcoex_MediaStatusNotify(padapter, mstatus);
+ LPS_Leave(padapter, "LPS_CTRL_DISCONNECT");
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ /* DBG_871X("LPS_CTRL_SPECIAL_PACKET\n"); */
+ pwrpriv->DelayLPSLastTimeStamp = jiffies;
+ rtw_btcoex_SpecialPacketNotify(padapter, PACKET_DHCP);
+ LPS_Leave(padapter, "LPS_CTRL_SPECIAL_PACKET");
+ break;
+ case LPS_CTRL_LEAVE:
+ /* DBG_871X("LPS_CTRL_LEAVE\n"); */
+ LPS_Leave(padapter, "LPS_CTRL_LEAVE");
+ break;
+ case LPS_CTRL_TRAFFIC_BUSY:
+ LPS_Leave(padapter, "LPS_CTRL_TRAFFIC_BUSY");
+ default:
+ break;
+ }
+}
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ /* struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter); */
+ u8 res = _SUCCESS;
+
+ /* if (!pwrctrlpriv->bLeisurePs) */
+ /* return res; */
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type = lps_ctrl_type;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else{
+ lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
+ }
+
+exit:
+ return res;
+}
+
+static void rtw_dm_in_lps_hdl(struct adapter *padapter)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_IN_LPS, NULL);
+}
+
+u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DM_IN_LPS_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+
+}
+
+static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+
+ if (dtim <= 0 || dtim > 16)
+ return;
+
+ if (rtw_btcoex_IsBtControlLps(padapter) == true)
+ return;
+
+ down(&pwrpriv->lock);
+
+ if (pwrpriv->dtim != dtim) {
+ DBG_871X("change DTIM from %d to %d, bFwCurrentInPSMode =%d, ps_mode =%d\n", pwrpriv->dtim, dtim,
+ pwrpriv->bFwCurrentInPSMode, pwrpriv->pwr_mode);
+
+ pwrpriv->dtim = dtim;
+ }
+
+ if ((pwrpriv->bFwCurrentInPSMode == true) && (pwrpriv->pwr_mode > PS_MODE_ACTIVE)) {
+ u8 ps_mode = pwrpriv->pwr_mode;
+
+ /* DBG_871X("change DTIM from %d to %d, ps_mode =%d\n", pwrpriv->dtim, dtim, ps_mode); */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ }
+
+ up(&pwrpriv->lock);
+}
+
+static void rtw_dm_ra_mask_hdl(struct adapter *padapter, struct sta_info *psta)
+{
+ if (psta) {
+ set_sta_rate(padapter, psta);
+ }
+}
+
+u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DM_RA_MSK_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = psta;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+
+}
+
+static void power_saving_wk_hdl(struct adapter *padapter)
+{
+ rtw_ps_processor(padapter);
+}
+
+/* add for CONFIG_IEEE80211W, none 11w can use it */
+static void reset_securitypriv_hdl(struct adapter *padapter)
+{
+ rtw_reset_securitypriv(padapter);
+}
+
+static void free_assoc_resources_hdl(struct adapter *padapter)
+{
+ rtw_free_assoc_resources(padapter, 1);
+}
+
+u8 rtw_ps_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ppscmd;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ppscmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ppscmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ppscmd);
+
+exit:
+ return res;
+}
+
+u32 g_wait_hiq_empty = 0;
+
+static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
+{
+ struct sta_info *psta_bmc;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ unsigned long start = jiffies;
+ u8 empty = false;
+
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
+
+ while (false == empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) {
+ msleep(100);
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
+ }
+
+ if (psta_bmc->sleepq_len == 0) {
+ if (empty == _SUCCESS) {
+ bool update_tim = false;
+
+ if (pstapriv->tim_bitmap & BIT(0))
+ update_tim = true;
+
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ if (update_tim == true)
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ } else{/* re check again */
+ rtw_chk_hi_queue_cmd(padapter);
+ }
+
+ }
+
+}
+
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+
+}
+
+struct btinfo {
+ u8 cid;
+ u8 len;
+
+ u8 bConnection:1;
+ u8 bSCOeSCO:1;
+ u8 bInQPage:1;
+ u8 bACLBusy:1;
+ u8 bSCOBusy:1;
+ u8 bHID:1;
+ u8 bA2DP:1;
+ u8 bFTP:1;
+
+ u8 retry_cnt:4;
+ u8 rsvd_34:1;
+ u8 rsvd_35:1;
+ u8 rsvd_36:1;
+ u8 rsvd_37:1;
+
+ u8 rssi;
+
+ u8 rsvd_50:1;
+ u8 rsvd_51:1;
+ u8 rsvd_52:1;
+ u8 rsvd_53:1;
+ u8 rsvd_54:1;
+ u8 rsvd_55:1;
+ u8 eSCO_SCO:1;
+ u8 Master_Slave:1;
+
+ u8 rsvd_6;
+ u8 rsvd_7;
+};
+
+static void rtw_btinfo_hdl(struct adapter *adapter, u8 *buf, u16 buf_len)
+{
+ #define BTINFO_WIFI_FETCH 0x23
+ #define BTINFO_BT_AUTO_RPT 0x27
+ struct btinfo *info = (struct btinfo *)buf;
+ u8 cmd_idx;
+ u8 len;
+
+ cmd_idx = info->cid;
+
+ if (info->len > buf_len-2) {
+ rtw_warn_on(1);
+ len = buf_len-2;
+ } else {
+ len = info->len;
+ }
+
+/* define DBG_PROC_SET_BTINFO_EVT */
+#ifdef DBG_PROC_SET_BTINFO_EVT
+ btinfo_evt_dump(RTW_DBGDUMP, info);
+#endif
+
+ /* transform BT-FW btinfo to WiFI-FW C2H format and notify */
+ if (cmd_idx == BTINFO_WIFI_FETCH)
+ buf[1] = 0;
+ else if (cmd_idx == BTINFO_BT_AUTO_RPT)
+ buf[1] = 2;
+ rtw_btcoex_BtInfoNotify(adapter, len+1, &buf[1]);
+}
+
+u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((u8 *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = length;
+ pdrvextra_cmd_parm->pbuf = pbuf;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+/* dont call R/W in this function, beucase SDIO interrupt have claim host */
+/* or deadlock will happen and cause special-systemserver-died in android */
+u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree((u8 *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
+ pdrvextra_cmd_parm->type = 0;
+ pdrvextra_cmd_parm->size = c2h_evt?16:0;
+ pdrvextra_cmd_parm->pbuf = c2h_evt;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+static void c2h_wk_callback(_workitem *work)
+{
+ struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
+ struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
+ u8 *c2h_evt;
+ c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
+
+ evtpriv->c2h_wk_alive = true;
+
+ while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
+ c2h_evt = (u8 *)rtw_cbuf_pop(evtpriv->c2h_queue);
+ if (c2h_evt != NULL) {
+ /* This C2H event is read, clear it */
+ c2h_evt_clear(adapter);
+ } else{
+ c2h_evt = (u8 *)rtw_malloc(16);
+ if (c2h_evt != NULL) {
+ /* This C2H event is not read, read & clear now */
+ if (rtw_hal_c2h_evt_read(adapter, c2h_evt) != _SUCCESS) {
+ kfree(c2h_evt);
+ continue;
+ }
+ }
+ }
+
+ /* Special pointer to trigger c2h_evt_clear only */
+ if ((void *)c2h_evt == (void *)evtpriv)
+ continue;
+
+ if (!rtw_hal_c2h_valid(adapter, c2h_evt)) {
+ kfree(c2h_evt);
+ continue;
+ }
+
+ if (ccx_id_filter(c2h_evt) == true) {
+ /* Handle CCX report here */
+ rtw_hal_c2h_handler(adapter, c2h_evt);
+ kfree(c2h_evt);
+ } else{
+ /* Enqueue into cmd_thread for others */
+ rtw_c2h_wk_cmd(adapter, c2h_evt);
+ }
+ }
+
+ evtpriv->c2h_wk_alive = false;
+}
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct drvextra_cmd_parm *pdrvextra_cmd;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
+
+ switch (pdrvextra_cmd->ec_id) {
+ case DYNAMIC_CHK_WK_CID:/* only primary padapter go to this cmd, but execute dynamic_chk_wk_hdl() for two interfaces */
+ dynamic_chk_wk_hdl(padapter);
+ break;
+ case POWER_SAVING_CTRL_WK_CID:
+ power_saving_wk_hdl(padapter);
+ break;
+ case LPS_CTRL_WK_CID:
+ lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type);
+ break;
+ case DM_IN_LPS_WK_CID:
+ rtw_dm_in_lps_hdl(padapter);
+ break;
+ case LPS_CHANGE_DTIM_CID:
+ rtw_lps_change_dtim_hdl(padapter, (u8)pdrvextra_cmd->type);
+ break;
+ case CHECK_HIQ_WK_CID:
+ rtw_chk_hi_queue_hdl(padapter);
+ break;
+#ifdef CONFIG_INTEL_WIDI
+ case INTEl_WIDI_WK_CID:
+ intel_widi_wk_hdl(padapter, pdrvextra_cmd->type, pdrvextra_cmd->pbuf);
+ break;
+#endif /* CONFIG_INTEL_WIDI */
+ /* add for CONFIG_IEEE80211W, none 11w can use it */
+ case RESET_SECURITYPRIV:
+ reset_securitypriv_hdl(padapter);
+ break;
+ case FREE_ASSOC_RESOURCES:
+ free_assoc_resources_hdl(padapter);
+ break;
+ case C2H_WK_CID:
+ rtw_hal_set_hwreg_with_buf(padapter, HW_VAR_C2H_HANDLE, pdrvextra_cmd->pbuf, pdrvextra_cmd->size);
+ break;
+ case DM_RA_MSK_WK_CID:
+ rtw_dm_ra_mask_hdl(padapter, (struct sta_info *)pdrvextra_cmd->pbuf);
+ break;
+ case BTINFO_WK_CID:
+ rtw_btinfo_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->size);
+ break;
+ default:
+ break;
+ }
+
+ if (pdrvextra_cmd->pbuf && pdrvextra_cmd->size > 0) {
+ kfree(pdrvextra_cmd->pbuf);
+ }
+
+ return H2C_SUCCESS;
+}
+
+void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
+ }
+
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+}
+
+void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res != H2C_SUCCESS) {
+ spin_lock_bh(&pmlmepriv->lock);
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
+ return;
+ }
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+}
+
+void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ rtw_free_cmd_obj(pcmd);
+}
+
+void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ u8 timer_cancelled;
+ struct sta_info *psta = NULL;
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+
+ if (pcmd->parmbuf == NULL)
+ goto exit;
+
+ if ((pcmd->res != H2C_SUCCESS)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback Fail ************\n\n."));
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
+ goto createbss_cmd_fail;
+ }
+ }
+
+ rtw_indicate_connect(padapter);
+ } else{
+ pwlan = _rtw_alloc_network(pmlmepriv);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ if (pwlan == NULL) {
+ pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
+ if (pwlan == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ goto createbss_cmd_fail;
+ }
+ pwlan->last_scanned = jiffies;
+ } else{
+ list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
+ }
+
+ pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
+ memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
+ /* pwlan->fixed = true; */
+
+ /* list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); */
+
+ /* copy pdev_network information to pmlmepriv->cur_network */
+ memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
+
+ /* reset DSConfig */
+ /* tgt_network->network.Configuration.DSConfig = (u32)rtw_ch2freq(pnetwork->Configuration.DSConfig); */
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
+
+ }
+
+createbss_cmd_fail:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+exit:
+ rtw_free_cmd_obj(pcmd);
+}
+
+
+
+void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
+ goto exit;
+ }
+exit:
+ rtw_free_cmd_obj(pcmd);
+}
+
+void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+ struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
+ goto exit;
+ }
+
+ psta->aid = psta->mac_id = passocsta_rsp->cam_id;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ rtw_free_cmd_obj(pcmd);
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
new file mode 100644
index 000000000000..3db02e9f27ab
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -0,0 +1,1447 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_DEBUG_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+u32 GlobalDebugLevel = _drv_err_;
+
+#ifdef DEBUG_RTL871X
+
+ u64 GlobalDebugComponents = \
+ _module_rtl871x_xmit_c_ |
+ _module_xmit_osdep_c_ |
+ _module_rtl871x_recv_c_ |
+ _module_recv_osdep_c_ |
+ _module_rtl871x_mlme_c_ |
+ _module_mlme_osdep_c_ |
+ _module_rtl871x_sta_mgt_c_ |
+ _module_rtl871x_cmd_c_ |
+ _module_cmd_osdep_c_ |
+ _module_rtl871x_io_c_ |
+ _module_io_osdep_c_ |
+ _module_os_intfs_c_|
+ _module_rtl871x_security_c_|
+ _module_rtl871x_eeprom_c_|
+ _module_hal_init_c_|
+ _module_hci_hal_init_c_|
+ _module_rtl871x_ioctl_c_|
+ _module_rtl871x_ioctl_set_c_|
+ _module_rtl871x_ioctl_query_c_|
+ _module_rtl871x_pwrctrl_c_|
+ _module_hci_intfs_c_|
+ _module_hci_ops_c_|
+ _module_hci_ops_os_c_|
+ _module_rtl871x_ioctl_os_c|
+ _module_rtl8712_cmd_c_|
+ _module_hal_xmit_c_|
+ _module_rtl8712_recv_c_ |
+ _module_mp_ |
+ _module_efuse_;
+
+#endif /* DEBUG_RTL871X */
+
+#include <rtw_version.h>
+
+void dump_drv_version(void *sel)
+{
+ DBG_871X_SEL_NL(sel, "%s %s\n", "rtl8723bs", DRIVERVERSION);
+}
+
+void dump_log_level(void *sel)
+{
+ DBG_871X_SEL_NL(sel, "log_level:%d\n", GlobalDebugLevel);
+}
+
+void sd_f0_reg_dump(void *sel, struct adapter *adapter)
+{
+ int i;
+
+ for (i = 0x0; i <= 0xff; i++) {
+ if (i%16 == 0)
+ DBG_871X_SEL_NL(sel, "0x%02x ", i);
+
+ DBG_871X_SEL(sel, "%02x ", rtw_sd_f0_read8(adapter, i));
+
+ if (i%16 == 15)
+ DBG_871X_SEL(sel, "\n");
+ else if (i%8 == 7)
+ DBG_871X_SEL(sel, "\t");
+ }
+}
+
+void mac_reg_dump(void *sel, struct adapter *adapter)
+{
+ int i, j = 1;
+
+ DBG_871X_SEL_NL(sel, "======= MAC REG =======\n");
+
+ for (i = 0x0; i < 0x800; i += 4) {
+ if (j%4 == 1)
+ DBG_871X_SEL_NL(sel, "0x%03x", i);
+ DBG_871X_SEL(sel, " 0x%08x ", rtw_read32(adapter, i));
+ if ((j++)%4 == 0)
+ DBG_871X_SEL(sel, "\n");
+ }
+}
+
+void bb_reg_dump(void *sel, struct adapter *adapter)
+{
+ int i, j = 1;
+
+ DBG_871X_SEL_NL(sel, "======= BB REG =======\n");
+ for (i = 0x800; i < 0x1000 ; i += 4) {
+ if (j%4 == 1)
+ DBG_871X_SEL_NL(sel, "0x%03x", i);
+ DBG_871X_SEL(sel, " 0x%08x ", rtw_read32(adapter, i));
+ if ((j++)%4 == 0)
+ DBG_871X_SEL(sel, "\n");
+ }
+}
+
+void rf_reg_dump(void *sel, struct adapter *adapter)
+{
+ int i, j = 1, path;
+ u32 value;
+ u8 rf_type = 0;
+ u8 path_nums = 0;
+
+ rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ DBG_871X_SEL_NL(sel, "======= RF REG =======\n");
+
+ for (path = 0; path < path_nums; path++) {
+ DBG_871X_SEL_NL(sel, "RF_Path(%x)\n", path);
+ for (i = 0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(adapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ DBG_871X_SEL_NL(sel, "0x%02x ", i);
+ DBG_871X_SEL(sel, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ DBG_871X_SEL(sel, "\n");
+ }
+ }
+}
+
+#ifdef PROC_DEBUG
+ssize_t proc_set_write_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 addr, val, len;
+
+ if (count < 3) {
+ DBG_871X("argument size is less than 3\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3) {
+ DBG_871X("invalid write_reg parameter!\n");
+ return count;
+ }
+
+ switch (len) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)val);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)val);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, val);
+ break;
+ default:
+ DBG_871X("error write length =%d", len);
+ break;
+ }
+
+ }
+
+ return count;
+
+}
+
+static u32 proc_get_read_addr = 0xeeeeeeee;
+static u32 proc_get_read_len = 0x4;
+
+int proc_get_read_reg(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (proc_get_read_addr == 0xeeeeeeee) {
+ DBG_871X_SEL_NL(m, "address not initialized\n");
+ return 0;
+ }
+
+ switch (proc_get_read_len) {
+ case 1:
+ DBG_871X_SEL_NL(m, "rtw_read8(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read8(padapter, proc_get_read_addr));
+ break;
+ case 2:
+ DBG_871X_SEL_NL(m, "rtw_read16(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read16(padapter, proc_get_read_addr));
+ break;
+ case 4:
+ DBG_871X_SEL_NL(m, "rtw_read32(0x%x) = 0x%x\n", proc_get_read_addr, rtw_read32(padapter, proc_get_read_addr));
+ break;
+ default:
+ DBG_871X_SEL_NL(m, "error read length =%d\n", proc_get_read_len);
+ break;
+ }
+
+ return 0;
+}
+
+ssize_t proc_set_read_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ char tmp[16];
+ u32 addr, len;
+
+ if (count < 2) {
+ DBG_871X("argument size is less than 2\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%x %x", &addr, &len);
+
+ if (num != 2) {
+ DBG_871X("invalid read_reg parameter!\n");
+ return count;
+ }
+
+ proc_get_read_addr = addr;
+
+ proc_get_read_len = len;
+ }
+
+ return count;
+
+}
+
+int proc_get_fwstate(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ DBG_871X_SEL_NL(m, "fwstate = 0x%x\n", get_fwstate(pmlmepriv));
+
+ return 0;
+}
+
+int proc_get_sec_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct security_priv *sec = &padapter->securitypriv;
+
+ DBG_871X_SEL_NL(m, "auth_alg = 0x%x, enc_alg = 0x%x, auth_type = 0x%x, enc_type = 0x%x\n",
+ sec->dot11AuthAlgrthm, sec->dot11PrivacyAlgrthm,
+ sec->ndisauthtype, sec->ndisencryptstatus);
+
+ DBG_871X_SEL_NL(m, "hw_decrypted =%d\n", sec->hw_decrypted);
+
+#ifdef DBG_SW_SEC_CNT
+ DBG_871X_SEL_NL(m, "wep_sw_enc_cnt =%llu, %llu, %llu\n"
+ , sec->wep_sw_enc_cnt_bc, sec->wep_sw_enc_cnt_mc, sec->wep_sw_enc_cnt_uc);
+ DBG_871X_SEL_NL(m, "wep_sw_dec_cnt =%llu, %llu, %llu\n"
+ , sec->wep_sw_dec_cnt_bc, sec->wep_sw_dec_cnt_mc, sec->wep_sw_dec_cnt_uc);
+
+ DBG_871X_SEL_NL(m, "tkip_sw_enc_cnt =%llu, %llu, %llu\n"
+ , sec->tkip_sw_enc_cnt_bc, sec->tkip_sw_enc_cnt_mc, sec->tkip_sw_enc_cnt_uc);
+ DBG_871X_SEL_NL(m, "tkip_sw_dec_cnt =%llu, %llu, %llu\n"
+ , sec->tkip_sw_dec_cnt_bc, sec->tkip_sw_dec_cnt_mc, sec->tkip_sw_dec_cnt_uc);
+
+ DBG_871X_SEL_NL(m, "aes_sw_enc_cnt =%llu, %llu, %llu\n"
+ , sec->aes_sw_enc_cnt_bc, sec->aes_sw_enc_cnt_mc, sec->aes_sw_enc_cnt_uc);
+ DBG_871X_SEL_NL(m, "aes_sw_dec_cnt =%llu, %llu, %llu\n"
+ , sec->aes_sw_dec_cnt_bc, sec->aes_sw_dec_cnt_mc, sec->aes_sw_dec_cnt_uc);
+#endif /* DBG_SW_SEC_CNT */
+
+ return 0;
+}
+
+int proc_get_mlmext_state(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_871X_SEL_NL(m, "pmlmeinfo->state = 0x%x\n", pmlmeinfo->state);
+
+ return 0;
+}
+
+int proc_get_roam_flags(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X_SEL_NL(m, "0x%02x\n", rtw_roam_flags(adapter));
+
+ return 0;
+}
+
+ssize_t proc_set_roam_flags(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ char tmp[32];
+ u8 flags;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%hhx", &flags);
+
+ if (num == 1)
+ rtw_assign_roam_flags(adapter, flags);
+ }
+
+ return count;
+
+}
+
+int proc_get_roam_param(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *mlme = &adapter->mlmepriv;
+
+ DBG_871X_SEL_NL(m, "%12s %12s %11s\n", "rssi_diff_th", "scanr_exp_ms", "scan_int_ms");
+ DBG_871X_SEL_NL(m, "%-12u %-12u %-11u\n"
+ , mlme->roam_rssi_diff_th
+ , mlme->roam_scanr_exp_ms
+ , mlme->roam_scan_int_ms
+ );
+
+ return 0;
+}
+
+ssize_t proc_set_roam_param(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *mlme = &adapter->mlmepriv;
+
+ char tmp[32];
+ u8 rssi_diff_th;
+ u32 scanr_exp_ms;
+ u32 scan_int_ms;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%hhu %u %u", &rssi_diff_th, &scanr_exp_ms, &scan_int_ms);
+
+ if (num >= 1)
+ mlme->roam_rssi_diff_th = rssi_diff_th;
+ if (num >= 2)
+ mlme->roam_scanr_exp_ms = scanr_exp_ms;
+ if (num >= 3)
+ mlme->roam_scan_int_ms = scan_int_ms;
+ }
+
+ return count;
+
+}
+
+ssize_t proc_set_roam_tgt_addr(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ char tmp[32];
+ u8 addr[ETH_ALEN];
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", addr, addr+1, addr+2, addr+3, addr+4, addr+5);
+ if (num == 6)
+ memcpy(adapter->mlmepriv.roam_tgt_addr, addr, ETH_ALEN);
+
+ DBG_871X("set roam_tgt_addr to "MAC_FMT"\n", MAC_ARG(adapter->mlmepriv.roam_tgt_addr));
+ }
+
+ return count;
+}
+
+int proc_get_qos_option(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ DBG_871X_SEL_NL(m, "qos_option =%d\n", pmlmepriv->qospriv.qos_option);
+
+ return 0;
+}
+
+int proc_get_ht_option(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ DBG_871X_SEL_NL(m, "ht_option =%d\n", pmlmepriv->htpriv.ht_option);
+
+ return 0;
+}
+
+int proc_get_rf_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ DBG_871X_SEL_NL(m, "cur_ch =%d, cur_bw =%d, cur_ch_offet =%d\n",
+ pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+
+ DBG_871X_SEL_NL(m, "oper_ch =%d, oper_bw =%d, oper_ch_offet =%d\n",
+ rtw_get_oper_ch(padapter), rtw_get_oper_bw(padapter), rtw_get_oper_choffset(padapter));
+
+ return 0;
+}
+
+int proc_get_survey_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct list_head *plist, *phead;
+ s32 notify_signal;
+ s16 notify_noise = 0;
+ u16 index = 0;
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ phead = get_list_head(queue);
+ plist = phead ? get_next(phead) : NULL;
+ if ((!phead) || (!plist)) {
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ return 0;
+ }
+
+ DBG_871X_SEL_NL(m, "%5s %-17s %3s %-3s %-4s %-4s %5s %s\n", "index", "bssid", "ch", "RSSI", "SdBm", "Noise", "age", "ssid");
+ while (1) {
+ if (phead == plist)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (!pnetwork)
+ break;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
+ notify_signal = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);/*dbm*/
+ } else {
+ notify_signal = translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength);/*dbm*/
+ }
+
+ #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &(pnetwork->network.Configuration.DSConfig), &(notify_noise));
+ #endif
+
+ DBG_871X_SEL_NL(m, "%5d "MAC_FMT" %3d %3d %4d %4d %5d %s\n",
+ ++index,
+ MAC_ARG(pnetwork->network.MacAddress),
+ pnetwork->network.Configuration.DSConfig,
+ (int)pnetwork->network.Rssi,
+ notify_signal,
+ notify_noise,
+ jiffies_to_msecs(jiffies - pnetwork->last_scanned),
+ /*translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength),*/
+ pnetwork->network.Ssid.Ssid);
+ plist = get_next(plist);
+ }
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ return 0;
+}
+
+int proc_get_ap_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct sta_info *psta;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta) {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_871X_SEL_NL(m, "SSID =%s\n", cur_network->network.Ssid.Ssid);
+ DBG_871X_SEL_NL(m, "sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr));
+ DBG_871X_SEL_NL(m, "cur_channel =%d, cur_bwmode =%d, cur_ch_offset =%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ DBG_871X_SEL_NL(m, "wireless_mode = 0x%x, rtsen =%d, cts2slef =%d\n", psta->wireless_mode, psta->rtsen, psta->cts2self);
+ DBG_871X_SEL_NL(m, "state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_871X_SEL_NL(m, "qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_871X_SEL_NL(m, "bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n", psta->bw_mode, psta->htpriv.ch_offset, psta->htpriv.sgi_20m, psta->htpriv.sgi_40m);
+ DBG_871X_SEL_NL(m, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_871X_SEL_NL(m, "agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ DBG_871X_SEL_NL(m, "ldpc_cap = 0x%x, stbc_cap = 0x%x, beamform_cap = 0x%x\n", psta->htpriv.ldpc_cap, psta->htpriv.stbc_cap, psta->htpriv.beamform_cap);
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable) {
+ DBG_871X_SEL_NL(m, "tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ }
+
+ } else{
+ DBG_871X_SEL_NL(m, "can't get sta's macaddr, cur_network's macaddr:" MAC_FMT "\n", MAC_ARG(cur_network->network.MacAddress));
+ }
+
+ return 0;
+}
+
+int proc_get_adapter_state(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X_SEL_NL(m, "name =%s, bSurpriseRemoved =%d, bDriverStopped =%d\n",
+ dev->name, padapter->bSurpriseRemoved, padapter->bDriverStopped);
+
+ return 0;
+}
+
+int proc_get_trx_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ int i;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct hw_xmit *phwxmit;
+
+ DBG_871X_SEL_NL(m, "free_xmitbuf_cnt =%d, free_xmitframe_cnt =%d\n"
+ , pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt);
+ DBG_871X_SEL_NL(m, "free_ext_xmitbuf_cnt =%d, free_xframe_ext_cnt =%d\n"
+ , pxmitpriv->free_xmit_extbuf_cnt, pxmitpriv->free_xframe_ext_cnt);
+ DBG_871X_SEL_NL(m, "free_recvframe_cnt =%d\n"
+ , precvpriv->free_recvframe_cnt);
+
+ for (i = 0; i < 4; i++) {
+ phwxmit = pxmitpriv->hwxmits + i;
+ DBG_871X_SEL_NL(m, "%d, hwq.accnt =%d\n", i, phwxmit->accnt);
+ }
+
+ return 0;
+}
+
+int proc_get_rate_ctl(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (adapter->fix_rate != 0xff) {
+ DBG_871X_SEL_NL(m, "FIX\n");
+ DBG_871X_SEL_NL(m, "0x%02x\n", adapter->fix_rate);
+ } else {
+ DBG_871X_SEL_NL(m, "RA\n");
+ }
+
+ return 0;
+}
+
+ssize_t proc_set_rate_ctl(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u8 fix_rate;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%hhx", &fix_rate);
+
+ if (num >= 1)
+ adapter->fix_rate = fix_rate;
+ }
+
+ return count;
+}
+
+ssize_t proc_set_fwdl_test_case(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ char tmp[32];
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%hhu %hhu", &g_fwdl_chksum_fail, &g_fwdl_wintint_rdy_fail);
+ }
+
+ return count;
+}
+
+ssize_t proc_set_wait_hiq_empty(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ char tmp[32];
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%u", &g_wait_hiq_empty);
+ }
+
+ return count;
+}
+
+int proc_get_suspend_resume_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct dvobj_priv *dvobj = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &dvobj->drv_dbg;
+
+ DBG_871X_SEL_NL(m, "dbg_sdio_alloc_irq_cnt =%d\n", pdbgpriv->dbg_sdio_alloc_irq_cnt);
+ DBG_871X_SEL_NL(m, "dbg_sdio_free_irq_cnt =%d\n", pdbgpriv->dbg_sdio_free_irq_cnt);
+ DBG_871X_SEL_NL(m, "dbg_sdio_alloc_irq_error_cnt =%d\n", pdbgpriv->dbg_sdio_alloc_irq_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_sdio_free_irq_error_cnt =%d\n", pdbgpriv->dbg_sdio_free_irq_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_sdio_init_error_cnt =%d\n", pdbgpriv->dbg_sdio_init_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_sdio_deinit_error_cnt =%d\n", pdbgpriv->dbg_sdio_deinit_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_suspend_error_cnt =%d\n", pdbgpriv->dbg_suspend_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_suspend_cnt =%d\n", pdbgpriv->dbg_suspend_cnt);
+ DBG_871X_SEL_NL(m, "dbg_resume_cnt =%d\n", pdbgpriv->dbg_resume_cnt);
+ DBG_871X_SEL_NL(m, "dbg_resume_error_cnt =%d\n", pdbgpriv->dbg_resume_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_deinit_fail_cnt =%d\n", pdbgpriv->dbg_deinit_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_carddisable_cnt =%d\n", pdbgpriv->dbg_carddisable_cnt);
+ DBG_871X_SEL_NL(m, "dbg_ps_insuspend_cnt =%d\n", pdbgpriv->dbg_ps_insuspend_cnt);
+ DBG_871X_SEL_NL(m, "dbg_dev_unload_inIPS_cnt =%d\n", pdbgpriv->dbg_dev_unload_inIPS_cnt);
+ DBG_871X_SEL_NL(m, "dbg_scan_pwr_state_cnt =%d\n", pdbgpriv->dbg_scan_pwr_state_cnt);
+ DBG_871X_SEL_NL(m, "dbg_downloadfw_pwr_state_cnt =%d\n", pdbgpriv->dbg_downloadfw_pwr_state_cnt);
+ DBG_871X_SEL_NL(m, "dbg_carddisable_error_cnt =%d\n", pdbgpriv->dbg_carddisable_error_cnt);
+ DBG_871X_SEL_NL(m, "dbg_fw_read_ps_state_fail_cnt =%d\n", pdbgpriv->dbg_fw_read_ps_state_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_leave_ips_fail_cnt =%d\n", pdbgpriv->dbg_leave_ips_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_leave_lps_fail_cnt =%d\n", pdbgpriv->dbg_leave_lps_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_h2c_leave32k_fail_cnt =%d\n", pdbgpriv->dbg_h2c_leave32k_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_diswow_dload_fw_fail_cnt =%d\n", pdbgpriv->dbg_diswow_dload_fw_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_enwow_dload_fw_fail_cnt =%d\n", pdbgpriv->dbg_enwow_dload_fw_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_ips_drvopen_fail_cnt =%d\n", pdbgpriv->dbg_ips_drvopen_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_poll_fail_cnt =%d\n", pdbgpriv->dbg_poll_fail_cnt);
+ DBG_871X_SEL_NL(m, "dbg_rpwm_toogle_cnt =%d\n", pdbgpriv->dbg_rpwm_toogle_cnt);
+ DBG_871X_SEL_NL(m, "dbg_rpwm_timeout_fail_cnt =%d\n", pdbgpriv->dbg_rpwm_timeout_fail_cnt);
+
+ return 0;
+}
+
+#ifdef CONFIG_DBG_COUNTER
+
+int proc_get_rx_logs(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct rx_logs *rx_logs = &padapter->rx_logs;
+
+ DBG_871X_SEL_NL(m,
+ "intf_rx =%d\n"
+ "intf_rx_err_recvframe =%d\n"
+ "intf_rx_err_skb =%d\n"
+ "intf_rx_report =%d\n"
+ "core_rx =%d\n"
+ "core_rx_pre =%d\n"
+ "core_rx_pre_ver_err =%d\n"
+ "core_rx_pre_mgmt =%d\n"
+ "core_rx_pre_mgmt_err_80211w =%d\n"
+ "core_rx_pre_mgmt_err =%d\n"
+ "core_rx_pre_ctrl =%d\n"
+ "core_rx_pre_ctrl_err =%d\n"
+ "core_rx_pre_data =%d\n"
+ "core_rx_pre_data_wapi_seq_err =%d\n"
+ "core_rx_pre_data_wapi_key_err =%d\n"
+ "core_rx_pre_data_handled =%d\n"
+ "core_rx_pre_data_err =%d\n"
+ "core_rx_pre_data_unknown =%d\n"
+ "core_rx_pre_unknown =%d\n"
+ "core_rx_enqueue =%d\n"
+ "core_rx_dequeue =%d\n"
+ "core_rx_post =%d\n"
+ "core_rx_post_decrypt =%d\n"
+ "core_rx_post_decrypt_wep =%d\n"
+ "core_rx_post_decrypt_tkip =%d\n"
+ "core_rx_post_decrypt_aes =%d\n"
+ "core_rx_post_decrypt_wapi =%d\n"
+ "core_rx_post_decrypt_hw =%d\n"
+ "core_rx_post_decrypt_unknown =%d\n"
+ "core_rx_post_decrypt_err =%d\n"
+ "core_rx_post_defrag_err =%d\n"
+ "core_rx_post_portctrl_err =%d\n"
+ "core_rx_post_indicate =%d\n"
+ "core_rx_post_indicate_in_oder =%d\n"
+ "core_rx_post_indicate_reoder =%d\n"
+ "core_rx_post_indicate_err =%d\n"
+ "os_indicate =%d\n"
+ "os_indicate_ap_mcast =%d\n"
+ "os_indicate_ap_forward =%d\n"
+ "os_indicate_ap_self =%d\n"
+ "os_indicate_err =%d\n"
+ "os_netif_ok =%d\n"
+ "os_netif_err =%d\n",
+ rx_logs->intf_rx,
+ rx_logs->intf_rx_err_recvframe,
+ rx_logs->intf_rx_err_skb,
+ rx_logs->intf_rx_report,
+ rx_logs->core_rx,
+ rx_logs->core_rx_pre,
+ rx_logs->core_rx_pre_ver_err,
+ rx_logs->core_rx_pre_mgmt,
+ rx_logs->core_rx_pre_mgmt_err_80211w,
+ rx_logs->core_rx_pre_mgmt_err,
+ rx_logs->core_rx_pre_ctrl,
+ rx_logs->core_rx_pre_ctrl_err,
+ rx_logs->core_rx_pre_data,
+ rx_logs->core_rx_pre_data_wapi_seq_err,
+ rx_logs->core_rx_pre_data_wapi_key_err,
+ rx_logs->core_rx_pre_data_handled,
+ rx_logs->core_rx_pre_data_err,
+ rx_logs->core_rx_pre_data_unknown,
+ rx_logs->core_rx_pre_unknown,
+ rx_logs->core_rx_enqueue,
+ rx_logs->core_rx_dequeue,
+ rx_logs->core_rx_post,
+ rx_logs->core_rx_post_decrypt,
+ rx_logs->core_rx_post_decrypt_wep,
+ rx_logs->core_rx_post_decrypt_tkip,
+ rx_logs->core_rx_post_decrypt_aes,
+ rx_logs->core_rx_post_decrypt_wapi,
+ rx_logs->core_rx_post_decrypt_hw,
+ rx_logs->core_rx_post_decrypt_unknown,
+ rx_logs->core_rx_post_decrypt_err,
+ rx_logs->core_rx_post_defrag_err,
+ rx_logs->core_rx_post_portctrl_err,
+ rx_logs->core_rx_post_indicate,
+ rx_logs->core_rx_post_indicate_in_oder,
+ rx_logs->core_rx_post_indicate_reoder,
+ rx_logs->core_rx_post_indicate_err,
+ rx_logs->os_indicate,
+ rx_logs->os_indicate_ap_mcast,
+ rx_logs->os_indicate_ap_forward,
+ rx_logs->os_indicate_ap_self,
+ rx_logs->os_indicate_err,
+ rx_logs->os_netif_ok,
+ rx_logs->os_netif_err
+ );
+
+ return 0;
+}
+
+int proc_get_tx_logs(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct tx_logs *tx_logs = &padapter->tx_logs;
+
+ DBG_871X_SEL_NL(m,
+ "os_tx =%d\n"
+ "os_tx_err_up =%d\n"
+ "os_tx_err_xmit =%d\n"
+ "os_tx_m2u =%d\n"
+ "os_tx_m2u_ignore_fw_linked =%d\n"
+ "os_tx_m2u_ignore_self =%d\n"
+ "os_tx_m2u_entry =%d\n"
+ "os_tx_m2u_entry_err_xmit =%d\n"
+ "os_tx_m2u_entry_err_skb =%d\n"
+ "os_tx_m2u_stop =%d\n"
+ "core_tx =%d\n"
+ "core_tx_err_pxmitframe =%d\n"
+ "core_tx_err_brtx =%d\n"
+ "core_tx_upd_attrib =%d\n"
+ "core_tx_upd_attrib_adhoc =%d\n"
+ "core_tx_upd_attrib_sta =%d\n"
+ "core_tx_upd_attrib_ap =%d\n"
+ "core_tx_upd_attrib_unknown =%d\n"
+ "core_tx_upd_attrib_dhcp =%d\n"
+ "core_tx_upd_attrib_icmp =%d\n"
+ "core_tx_upd_attrib_active =%d\n"
+ "core_tx_upd_attrib_err_ucast_sta =%d\n"
+ "core_tx_upd_attrib_err_ucast_ap_link =%d\n"
+ "core_tx_upd_attrib_err_sta =%d\n"
+ "core_tx_upd_attrib_err_link =%d\n"
+ "core_tx_upd_attrib_err_sec =%d\n"
+ "core_tx_ap_enqueue_warn_fwstate =%d\n"
+ "core_tx_ap_enqueue_warn_sta =%d\n"
+ "core_tx_ap_enqueue_warn_nosta =%d\n"
+ "core_tx_ap_enqueue_warn_link =%d\n"
+ "core_tx_ap_enqueue_warn_trigger =%d\n"
+ "core_tx_ap_enqueue_mcast =%d\n"
+ "core_tx_ap_enqueue_ucast =%d\n"
+ "core_tx_ap_enqueue =%d\n"
+ "intf_tx =%d\n"
+ "intf_tx_pending_ac =%d\n"
+ "intf_tx_pending_fw_under_survey =%d\n"
+ "intf_tx_pending_fw_under_linking =%d\n"
+ "intf_tx_pending_xmitbuf =%d\n"
+ "intf_tx_enqueue =%d\n"
+ "core_tx_enqueue =%d\n"
+ "core_tx_enqueue_class =%d\n"
+ "core_tx_enqueue_class_err_sta =%d\n"
+ "core_tx_enqueue_class_err_nosta =%d\n"
+ "core_tx_enqueue_class_err_fwlink =%d\n"
+ "intf_tx_direct =%d\n"
+ "intf_tx_direct_err_coalesce =%d\n"
+ "intf_tx_dequeue =%d\n"
+ "intf_tx_dequeue_err_coalesce =%d\n"
+ "intf_tx_dump_xframe =%d\n"
+ "intf_tx_dump_xframe_err_txdesc =%d\n"
+ "intf_tx_dump_xframe_err_port =%d\n",
+ tx_logs->os_tx,
+ tx_logs->os_tx_err_up,
+ tx_logs->os_tx_err_xmit,
+ tx_logs->os_tx_m2u,
+ tx_logs->os_tx_m2u_ignore_fw_linked,
+ tx_logs->os_tx_m2u_ignore_self,
+ tx_logs->os_tx_m2u_entry,
+ tx_logs->os_tx_m2u_entry_err_xmit,
+ tx_logs->os_tx_m2u_entry_err_skb,
+ tx_logs->os_tx_m2u_stop,
+ tx_logs->core_tx,
+ tx_logs->core_tx_err_pxmitframe,
+ tx_logs->core_tx_err_brtx,
+ tx_logs->core_tx_upd_attrib,
+ tx_logs->core_tx_upd_attrib_adhoc,
+ tx_logs->core_tx_upd_attrib_sta,
+ tx_logs->core_tx_upd_attrib_ap,
+ tx_logs->core_tx_upd_attrib_unknown,
+ tx_logs->core_tx_upd_attrib_dhcp,
+ tx_logs->core_tx_upd_attrib_icmp,
+ tx_logs->core_tx_upd_attrib_active,
+ tx_logs->core_tx_upd_attrib_err_ucast_sta,
+ tx_logs->core_tx_upd_attrib_err_ucast_ap_link,
+ tx_logs->core_tx_upd_attrib_err_sta,
+ tx_logs->core_tx_upd_attrib_err_link,
+ tx_logs->core_tx_upd_attrib_err_sec,
+ tx_logs->core_tx_ap_enqueue_warn_fwstate,
+ tx_logs->core_tx_ap_enqueue_warn_sta,
+ tx_logs->core_tx_ap_enqueue_warn_nosta,
+ tx_logs->core_tx_ap_enqueue_warn_link,
+ tx_logs->core_tx_ap_enqueue_warn_trigger,
+ tx_logs->core_tx_ap_enqueue_mcast,
+ tx_logs->core_tx_ap_enqueue_ucast,
+ tx_logs->core_tx_ap_enqueue,
+ tx_logs->intf_tx,
+ tx_logs->intf_tx_pending_ac,
+ tx_logs->intf_tx_pending_fw_under_survey,
+ tx_logs->intf_tx_pending_fw_under_linking,
+ tx_logs->intf_tx_pending_xmitbuf,
+ tx_logs->intf_tx_enqueue,
+ tx_logs->core_tx_enqueue,
+ tx_logs->core_tx_enqueue_class,
+ tx_logs->core_tx_enqueue_class_err_sta,
+ tx_logs->core_tx_enqueue_class_err_nosta,
+ tx_logs->core_tx_enqueue_class_err_fwlink,
+ tx_logs->intf_tx_direct,
+ tx_logs->intf_tx_direct_err_coalesce,
+ tx_logs->intf_tx_dequeue,
+ tx_logs->intf_tx_dequeue_err_coalesce,
+ tx_logs->intf_tx_dump_xframe,
+ tx_logs->intf_tx_dump_xframe_err_txdesc,
+ tx_logs->intf_tx_dump_xframe_err_port
+ );
+
+ return 0;
+}
+
+int proc_get_int_logs(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X_SEL_NL(m,
+ "all =%d\n"
+ "err =%d\n"
+ "tbdok =%d\n"
+ "tbder =%d\n"
+ "bcnderr =%d\n"
+ "bcndma =%d\n"
+ "bcndma_e =%d\n"
+ "rx =%d\n"
+ "rx_rdu =%d\n"
+ "rx_fovw =%d\n"
+ "txfovw =%d\n"
+ "mgntok =%d\n"
+ "highdok =%d\n"
+ "bkdok =%d\n"
+ "bedok =%d\n"
+ "vidok =%d\n"
+ "vodok =%d\n",
+ padapter->int_logs.all,
+ padapter->int_logs.err,
+ padapter->int_logs.tbdok,
+ padapter->int_logs.tbder,
+ padapter->int_logs.bcnderr,
+ padapter->int_logs.bcndma,
+ padapter->int_logs.bcndma_e,
+ padapter->int_logs.rx,
+ padapter->int_logs.rx_rdu,
+ padapter->int_logs.rx_fovw,
+ padapter->int_logs.txfovw,
+ padapter->int_logs.mgntok,
+ padapter->int_logs.highdok,
+ padapter->int_logs.bkdok,
+ padapter->int_logs.bedok,
+ padapter->int_logs.vidok,
+ padapter->int_logs.vodok
+ );
+
+ return 0;
+}
+
+#endif /* CONFIG_DBG_COUNTER*/
+
+int proc_get_rx_signal(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X_SEL_NL(m, "rssi:%d\n", padapter->recvpriv.rssi);
+ /*DBG_871X_SEL_NL(m, "rxpwdb:%d\n", padapter->recvpriv.rxpwdb);*/
+ DBG_871X_SEL_NL(m, "signal_strength:%u\n", padapter->recvpriv.signal_strength);
+ DBG_871X_SEL_NL(m, "signal_qual:%u\n", padapter->recvpriv.signal_qual);
+ DBG_871X_SEL_NL(m, "noise:%d\n", padapter->recvpriv.noise);
+ rtw_odm_get_perpkt_rssi(m, padapter);
+ #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+ rtw_get_raw_rssi_info(m, padapter);
+ #endif
+ return 0;
+}
+
+
+int proc_get_hw_status(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct dvobj_priv *dvobj = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &dvobj->drv_dbg;
+
+ DBG_871X_SEL_NL(m, "RX FIFO full count: last_time =%lld, current_time =%lld, differential =%lld\n"
+ , pdbgpriv->dbg_rx_fifo_last_overflow, pdbgpriv->dbg_rx_fifo_curr_overflow, pdbgpriv->dbg_rx_fifo_diff_overflow);
+
+ return 0;
+}
+
+ssize_t proc_set_rx_signal(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 is_signal_dbg, signal_strength;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
+
+ is_signal_dbg = is_signal_dbg == 0?0:1;
+
+ if (is_signal_dbg && num != 2)
+ return count;
+
+ signal_strength = signal_strength > 100?100:signal_strength;
+
+ padapter->recvpriv.is_signal_dbg = is_signal_dbg;
+ padapter->recvpriv.signal_strength_dbg = signal_strength;
+
+ if (is_signal_dbg)
+ DBG_871X("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
+ else
+ DBG_871X("set %s\n", "HW_SIGNAL_STRENGTH");
+
+ }
+
+ return count;
+
+}
+
+int proc_get_ht_enable(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m, "%d\n", pregpriv->ht_enable);
+
+ return 0;
+}
+
+ssize_t proc_set_ht_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && mode < 2) {
+ pregpriv->ht_enable = mode;
+ printk("ht_enable =%d\n", pregpriv->ht_enable);
+ }
+ }
+
+ return count;
+
+}
+
+int proc_get_bw_mode(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m, "0x%02x\n", pregpriv->bw_mode);
+
+ return 0;
+}
+
+ssize_t proc_set_bw_mode(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && mode < 2) {
+
+ pregpriv->bw_mode = mode;
+ printk("bw_mode =%d\n", mode);
+
+ }
+ }
+
+ return count;
+
+}
+
+int proc_get_ampdu_enable(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m, "%d\n", pregpriv->ampdu_enable);
+
+ return 0;
+}
+
+ssize_t proc_set_ampdu_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && mode < 3) {
+ pregpriv->ampdu_enable = mode;
+ printk("ampdu_enable =%d\n", mode);
+ }
+
+ }
+
+ return count;
+
+}
+
+int proc_get_rx_ampdu(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m,
+ "bAcceptAddbaReq = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n", pmlmeinfo->bAcceptAddbaReq
+ );
+
+ return 0;
+}
+
+ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && mode < 2) {
+ pmlmeinfo->bAcceptAddbaReq = mode;
+ DBG_871X("pmlmeinfo->bAcceptAddbaReq =%d\n", pmlmeinfo->bAcceptAddbaReq);
+ if (mode == 0) {
+ /*tear down Rx AMPDU*/
+ send_delba(padapter, 0, get_my_bssid(&(pmlmeinfo->network)));/* recipient*/
+ }
+ }
+
+ }
+
+ return count;
+}
+
+int proc_get_en_fwps(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m, "check_fw_ps = %d , 1:enable get FW PS state , 0: disable get FW PS state\n"
+ , pregpriv->check_fw_ps);
+
+ return 0;
+}
+
+ssize_t proc_set_en_fwps(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && mode < 2) {
+ pregpriv->check_fw_ps = mode;
+ DBG_871X("pregpriv->check_fw_ps =%d\n", pregpriv->check_fw_ps);
+ }
+ }
+ return count;
+}
+
+int proc_get_rx_stbc(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pregpriv)
+ DBG_871X_SEL_NL(m, "%d\n", pregpriv->rx_stbc);
+
+ return 0;
+}
+
+ssize_t proc_set_rx_stbc(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%d ", &mode);
+
+ if (pregpriv && (mode == 0 || mode == 1 ||
+ mode == 2 || mode == 3)) {
+ pregpriv->rx_stbc = mode;
+ printk("rx_stbc =%d\n", mode);
+ }
+ }
+
+ return count;
+
+}
+
+int proc_get_rssi_disp(struct seq_file *m, void *v)
+{
+ return 0;
+}
+
+ssize_t proc_set_rssi_disp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 enable = 0;
+
+ if (count < 1) {
+ DBG_8192C("argument size is less than 1\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x", &enable);
+
+ if (num != 1) {
+ DBG_8192C("invalid set_rssi_disp parameter!\n");
+ return count;
+ }
+
+ if (enable) {
+ DBG_8192C("Linked info Function Enable\n");
+ padapter->bLinkInfoDump = enable;
+ } else {
+ DBG_8192C("Linked info Function Disable\n");
+ padapter->bLinkInfoDump = 0;
+ }
+ }
+ return count;
+}
+
+int proc_get_all_sta_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct sta_info *psta;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_871X_SEL_NL(m, "sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ for (i = 0; i < NUM_STA; i++) {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ DBG_871X_SEL_NL(m, "==============================\n");
+ DBG_871X_SEL_NL(m, "sta's macaddr:" MAC_FMT "\n",
+ MAC_ARG(psta->hwaddr));
+ DBG_871X_SEL_NL(m, "rtsen =%d, cts2slef =%d\n",
+ psta->rtsen, psta->cts2self);
+ DBG_871X_SEL_NL(m, "state = 0x%x, aid =%d, macid =%d, raid =%d\n",
+ psta->state, psta->aid, psta->mac_id,
+ psta->raid);
+ DBG_871X_SEL_NL(m, "qos_en =%d, ht_en =%d, init_rate =%d\n",
+ psta->qos_option,
+ psta->htpriv.ht_option,
+ psta->init_rate);
+ DBG_871X_SEL_NL(m, "bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n",
+ psta->bw_mode, psta->htpriv.ch_offset,
+ psta->htpriv.sgi_20m,
+ psta->htpriv.sgi_40m);
+ DBG_871X_SEL_NL(m, "ampdu_enable = %d\n",
+ psta->htpriv.ampdu_enable);
+ DBG_871X_SEL_NL(m, "agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n",
+ psta->htpriv.agg_enable_bitmap,
+ psta->htpriv.candidate_tid_bitmap);
+ DBG_871X_SEL_NL(m, "sleepq_len =%d\n",
+ psta->sleepq_len);
+ DBG_871X_SEL_NL(m, "sta_xmitpriv.vo_q_qcnt =%d\n",
+ psta->sta_xmitpriv.vo_q.qcnt);
+ DBG_871X_SEL_NL(m, "sta_xmitpriv.vi_q_qcnt =%d\n",
+ psta->sta_xmitpriv.vi_q.qcnt);
+ DBG_871X_SEL_NL(m, "sta_xmitpriv.be_q_qcnt =%d\n",
+ psta->sta_xmitpriv.be_q.qcnt);
+ DBG_871X_SEL_NL(m, "sta_xmitpriv.bk_q_qcnt =%d\n",
+ psta->sta_xmitpriv.bk_q.qcnt);
+
+ DBG_871X_SEL_NL(m, "capability = 0x%x\n",
+ psta->capability);
+ DBG_871X_SEL_NL(m, "flags = 0x%x\n", psta->flags);
+ DBG_871X_SEL_NL(m, "wpa_psk = 0x%x\n", psta->wpa_psk);
+ DBG_871X_SEL_NL(m, "wpa2_group_cipher = 0x%x\n",
+ psta->wpa2_group_cipher);
+ DBG_871X_SEL_NL(m, "wpa2_pairwise_cipher = 0x%x\n",
+ psta->wpa2_pairwise_cipher);
+ DBG_871X_SEL_NL(m, "qos_info = 0x%x\n", psta->qos_info);
+ DBG_871X_SEL_NL(m, "dot118021XPrivacy = 0x%x\n",
+ psta->dot118021XPrivacy);
+
+ for (j = 0; j < 16; j++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ DBG_871X_SEL_NL(m, "tid =%d, indicate_seq =%d\n",
+ j, preorder_ctrl->indicate_seq);
+ }
+ DBG_871X_SEL_NL(m, "==============================\n");
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ return 0;
+}
+
+int proc_get_btcoex_dbg(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter;
+ char buf[512] = {0};
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rtw_btcoex_GetDBG(padapter, buf, 512);
+
+ DBG_871X_SEL(m, "%s", buf);
+
+ return 0;
+}
+
+ssize_t proc_set_btcoex_dbg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter;
+ u8 tmp[80] = {0};
+ u32 module[2] = {0};
+ u32 num;
+
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+/* DBG_871X("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter));*/
+
+ if (NULL == buffer) {
+ DBG_871X(FUNC_ADPT_FMT ": input buffer is NULL!\n",
+ FUNC_ADPT_ARG(padapter));
+
+ return -EFAULT;
+ }
+
+ if (count < 1) {
+ DBG_871X(FUNC_ADPT_FMT ": input length is 0!\n",
+ FUNC_ADPT_ARG(padapter));
+
+ return -EFAULT;
+ }
+
+ num = count;
+ if (num > (sizeof(tmp) - 1))
+ num = (sizeof(tmp) - 1);
+
+ if (copy_from_user(tmp, buffer, num)) {
+ DBG_871X(FUNC_ADPT_FMT ": copy buffer from user space FAIL!\n",
+ FUNC_ADPT_ARG(padapter));
+
+ return -EFAULT;
+ }
+
+ num = sscanf(tmp, "%x %x", module, module+1);
+ if (1 == num) {
+ if (0 == module[0])
+ memset(module, 0, sizeof(module));
+ else
+ memset(module, 0xFF, sizeof(module));
+ } else if (2 != num) {
+ DBG_871X(FUNC_ADPT_FMT ": input(\"%s\") format incorrect!\n",
+ FUNC_ADPT_ARG(padapter), tmp);
+
+ if (0 == num)
+ return -EFAULT;
+ }
+
+ DBG_871X(FUNC_ADPT_FMT ": input 0x%08X 0x%08X\n",
+ FUNC_ADPT_ARG(padapter), module[0], module[1]);
+ rtw_btcoex_SetDBG(padapter, module);
+
+ return count;
+}
+
+int proc_get_btcoex_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter;
+ const u32 bufsize = 30*100;
+ u8 *pbuf = NULL;
+
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ pbuf = rtw_zmalloc(bufsize);
+ if (NULL == pbuf) {
+ return -ENOMEM;
+ }
+
+ rtw_btcoex_DisplayBtCoexInfo(padapter, pbuf, bufsize);
+
+ DBG_871X_SEL(m, "%s\n", pbuf);
+
+ kfree(pbuf);
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/staging/rtl8723bs/core/rtw_eeprom.c b/drivers/staging/rtl8723bs/core/rtw_eeprom.c
new file mode 100644
index 000000000000..35031a7a5dc5
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_eeprom.c
@@ -0,0 +1,369 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_EEPROM_C_
+
+#include <drv_conf.h>
+#include <osdep_service.h>
+#include <drv_types.h>
+
+void up_clk(_adapter *padapter, u16 *x)
+{
+_func_enter_;
+ *x = *x | _EESK;
+ rtw_write8(padapter, EE_9346CR, (u8)*x);
+ udelay(CLOCK_RATE);
+
+_func_exit_;
+
+}
+
+void down_clk(_adapter *padapter, u16 *x)
+{
+_func_enter_;
+ *x = *x & ~_EESK;
+ rtw_write8(padapter, EE_9346CR, (u8)*x);
+ udelay(CLOCK_RATE);
+_func_exit_;
+}
+
+void shift_out_bits(_adapter *padapter, u16 data, u16 count)
+{
+ u16 x, mask;
+_func_enter_;
+
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ mask = 0x01 << (count - 1);
+ x = rtw_read8(padapter, EE_9346CR);
+
+ x &= ~(_EEDO | _EEDI);
+
+ do {
+ x &= ~_EEDI;
+ if (data & mask)
+ x |= _EEDI;
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ rtw_write8(padapter, EE_9346CR, (u8)x);
+ udelay(CLOCK_RATE);
+ up_clk(padapter, &x);
+ down_clk(padapter, &x);
+ mask = mask >> 1;
+ } while (mask);
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ x &= ~_EEDI;
+ rtw_write8(padapter, EE_9346CR, (u8)x);
+out:
+_func_exit_;
+}
+
+u16 shift_in_bits(_adapter *padapter)
+{
+ u16 x, d = 0, i;
+_func_enter_;
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ x = rtw_read8(padapter, EE_9346CR);
+
+ x &= ~(_EEDO | _EEDI);
+ d = 0;
+
+ for (i = 0; i < 16; i++) {
+ d = d << 1;
+ up_clk(padapter, &x);
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ x = rtw_read8(padapter, EE_9346CR);
+
+ x &= ~(_EEDI);
+ if (x & _EEDO)
+ d |= 1;
+
+ down_clk(padapter, &x);
+ }
+out:
+_func_exit_;
+
+ return d;
+}
+
+void standby(_adapter *padapter)
+{
+ u8 x;
+_func_enter_;
+ x = rtw_read8(padapter, EE_9346CR);
+
+ x &= ~(_EECS | _EESK);
+ rtw_write8(padapter, EE_9346CR, x);
+
+ udelay(CLOCK_RATE);
+ x |= _EECS;
+ rtw_write8(padapter, EE_9346CR, x);
+ udelay(CLOCK_RATE);
+_func_exit_;
+}
+
+u16 wait_eeprom_cmd_done(_adapter *padapter)
+{
+ u8 x;
+ u16 i, res = false;
+_func_enter_;
+ standby(padapter);
+ for (i = 0; i < 200; i++) {
+ x = rtw_read8(padapter, EE_9346CR);
+ if (x & _EEDO) {
+ res = true;
+ goto exit;
+ }
+ udelay(CLOCK_RATE);
+ }
+exit:
+_func_exit_;
+ return res;
+}
+
+void eeprom_clean(_adapter *padapter)
+{
+ u16 x;
+_func_enter_;
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ x = rtw_read8(padapter, EE_9346CR);
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ x &= ~(_EECS | _EEDI);
+ rtw_write8(padapter, EE_9346CR, (u8)x);
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ up_clk(padapter, &x);
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ down_clk(padapter, &x);
+out:
+_func_exit_;
+}
+
+void eeprom_write16(_adapter *padapter, u16 reg, u16 data)
+{
+ u8 x;
+
+_func_enter_;
+
+ x = rtw_read8(padapter, EE_9346CR);
+
+ x &= ~(_EEDI | _EEDO | _EESK | _EEM0);
+ x |= _EEM1 | _EECS;
+ rtw_write8(padapter, EE_9346CR, x);
+
+ shift_out_bits(padapter, EEPROM_EWEN_OPCODE, 5);
+
+ if (padapter->EepromAddressSize == 8) /*CF+ and SDIO*/
+ shift_out_bits(padapter, 0, 6);
+ else /*USB*/
+ shift_out_bits(padapter, 0, 4);
+
+ standby(padapter);
+
+/* Commented out by rcnjko, 2004.0
+* Erase this particular word. Write the erase opcode and register
+* number in that order. The opcode is 3bits in length; reg is 6 bits long.
+* shift_out_bits(Adapter, EEPROM_ERASE_OPCODE, 3);
+* shift_out_bits(Adapter, reg, Adapter->EepromAddressSize);
+*
+* if (wait_eeprom_cmd_done(Adapter ) == false)
+* {
+* return;
+* }
+*/
+
+ standby(padapter);
+
+ /* write the new word to the EEPROM*/
+
+ /* send the write opcode the EEPORM*/
+ shift_out_bits(padapter, EEPROM_WRITE_OPCODE, 3);
+
+ /* select which word in the EEPROM that we are writing to.*/
+ shift_out_bits(padapter, reg, padapter->EepromAddressSize);
+
+ /* write the data to the selected EEPROM word.*/
+ shift_out_bits(padapter, data, 16);
+
+ if (wait_eeprom_cmd_done(padapter) == false) {
+
+ goto exit;
+ }
+
+ standby(padapter);
+
+ shift_out_bits(padapter, EEPROM_EWDS_OPCODE, 5);
+ shift_out_bits(padapter, reg, 4);
+
+ eeprom_clean(padapter);
+exit:
+_func_exit_;
+ return;
+}
+
+u16 eeprom_read16(_adapter *padapter, u16 reg) /*ReadEEprom*/
+{
+
+ u16 x;
+ u16 data = 0;
+
+_func_enter_;
+
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ /* select EEPROM, reset bits, set _EECS*/
+ x = rtw_read8(padapter, EE_9346CR);
+
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+
+ x &= ~(_EEDI | _EEDO | _EESK | _EEM0);
+ x |= _EEM1 | _EECS;
+ rtw_write8(padapter, EE_9346CR, (unsigned char)x);
+
+ /* write the read opcode and register number in that order*/
+ /* The opcode is 3bits in length, reg is 6 bits long*/
+ shift_out_bits(padapter, EEPROM_READ_OPCODE, 3);
+ shift_out_bits(padapter, reg, padapter->EepromAddressSize);
+
+ /* Now read the data (16 bits) in from the selected EEPROM word*/
+ data = shift_in_bits(padapter);
+
+ eeprom_clean(padapter);
+out:
+_func_exit_;
+ return data;
+
+
+}
+
+
+
+
+/*From even offset*/
+void eeprom_read_sz(_adapter *padapter, u16 reg, u8 *data, u32 sz)
+{
+
+ u16 x, data16;
+ u32 i;
+_func_enter_;
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+ /* select EEPROM, reset bits, set _EECS*/
+ x = rtw_read8(padapter, EE_9346CR);
+
+ if (padapter->bSurpriseRemoved == true) {
+ RT_TRACE(_module_rtl871x_eeprom_c_, _drv_err_, ("padapter->bSurpriseRemoved==true"));
+ goto out;
+ }
+
+ x &= ~(_EEDI | _EEDO | _EESK | _EEM0);
+ x |= _EEM1 | _EECS;
+ rtw_write8(padapter, EE_9346CR, (unsigned char)x);
+
+ /* write the read opcode and register number in that order*/
+ /* The opcode is 3bits in length, reg is 6 bits long*/
+ shift_out_bits(padapter, EEPROM_READ_OPCODE, 3);
+ shift_out_bits(padapter, reg, padapter->EepromAddressSize);
+
+
+ for (i = 0; i < sz; i += 2) {
+ data16 = shift_in_bits(padapter);
+ data[i] = data16 & 0xff;
+ data[i+1] = data16 >> 8;
+ }
+
+ eeprom_clean(padapter);
+out:
+_func_exit_;
+
+
+
+}
+
+
+/*addr_off : address offset of the entry in eeprom (not the tuple number of eeprom (reg); that is addr_off !=reg)*/
+u8 eeprom_read(_adapter *padapter, u32 addr_off, u8 sz, u8 *rbuf)
+{
+ u8 quotient, remainder, addr_2align_odd;
+ u16 reg, stmp, i = 0, idx = 0;
+_func_enter_;
+ reg = (u16)(addr_off >> 1);
+ addr_2align_odd = (u8)(addr_off & 0x1);
+
+ /*read that start at high part: e.g 1,3,5,7,9,...*/
+ if (addr_2align_odd) {
+ stmp = eeprom_read16(padapter, reg);
+ rbuf[idx++] = (u8) ((stmp>>8)&0xff); /*return hogh-part of the short*/
+ reg++; sz--;
+ }
+
+ quotient = sz >> 1;
+ remainder = sz & 0x1;
+
+ for (i = 0; i < quotient; i++) {
+ stmp = eeprom_read16(padapter, reg+i);
+ rbuf[idx++] = (u8) (stmp&0xff);
+ rbuf[idx++] = (u8) ((stmp>>8)&0xff);
+ }
+
+ reg = reg+i;
+ if (remainder) { /*end of read at lower part of short : 0,2,4,6,...*/
+ stmp = eeprom_read16(padapter, reg);
+ rbuf[idx] = (u8)(stmp & 0xff);
+ }
+_func_exit_;
+ return true;
+}
+
+
+
+void read_eeprom_content(_adapter *padapter)
+{
+
+_func_enter_;
+
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
new file mode 100644
index 000000000000..8e29802fc67f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -0,0 +1,635 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_EFUSE_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+#include <linux/jiffies.h>
+
+
+/*------------------------Define local variable------------------------------*/
+u8 fakeEfuseBank = 0;
+u32 fakeEfuseUsedBytes = 0;
+u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
+u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
+u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
+
+u32 BTEfuseUsedBytes = 0;
+u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+
+u32 fakeBTEfuseUsedBytes = 0;
+u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+/*------------------------Define local variable------------------------------*/
+
+/* */
+#define REG_EFUSE_CTRL 0x0030
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+/* */
+
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *padapter,
+ u16 Offset,
+ u8 *Value);
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *padapter,
+ u16 Offset,
+ u8 *Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE) {
+ return false;
+ }
+ /* DbgPrint("Read fake content, offset = %d\n", Offset); */
+ if (fakeEfuseBank == 0)
+ *Value = fakeEfuseContent[Offset];
+ else
+ *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
+ return true;
+}
+
+bool
+Efuse_Write1ByteToFakeContent(
+ struct adapter *padapter,
+ u16 Offset,
+ u8 Value);
+bool
+Efuse_Write1ByteToFakeContent(
+ struct adapter *padapter,
+ u16 Offset,
+ u8 Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE) {
+ return false;
+ }
+ if (fakeEfuseBank == 0)
+ fakeEfuseContent[Offset] = Value;
+ else{
+ fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
+ }
+ return true;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_PowerSwitch
+ *
+ * Overview: When we want to enable write operation, we should change to
+ * pwr on state. When we stop write, we should switch to 500k mode
+ * and disable LDO 2.5V.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/17/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_PowerSwitch(
+struct adapter *padapter,
+u8 bWrite,
+u8 PwrState)
+{
+ padapter->HalFunc.EfusePowerSwitch(padapter, bWrite, PwrState);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_GetCurrentSize
+ *
+ * Overview: Get current efuse size!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+u16
+Efuse_GetCurrentSize(
+ struct adapter *padapter,
+ u8 efuseType,
+ bool bPseudoTest)
+{
+ u16 ret = 0;
+
+ ret = padapter->HalFunc.EfuseGetCurrentSize(padapter, efuseType, bPseudoTest);
+
+ return ret;
+}
+
+/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
+u8
+Efuse_CalculateWordCnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++; /* 0 : write enable */
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
+/* */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
+/* */
+
+void
+efuse_ReadEFuse(
+ struct adapter *Adapter,
+ u8 efuseType,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+bool bPseudoTest
+ );
+void
+efuse_ReadEFuse(
+ struct adapter *Adapter,
+ u8 efuseType,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+bool bPseudoTest
+ )
+{
+ Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+void
+EFUSE_GetEfuseDefinition(
+ struct adapter *padapter,
+ u8 efuseType,
+ u8 type,
+ void *pOut,
+ bool bPseudoTest
+ )
+{
+ padapter->HalFunc.EFUSEGetEfuseDefinition(padapter, efuseType, type, pOut, bPseudoTest);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_Read1Byte
+ *
+ * Overview: Copy from WMAC fot EFUSE read 1 byte.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/23/2008 MHC Copy from WMAC.
+ *
+ *---------------------------------------------------------------------------*/
+u8
+EFUSE_Read1Byte(
+struct adapter *Adapter,
+u16 Address)
+{
+ u8 data;
+ u8 Bytetemp = {0x00};
+ u8 temp = {0x00};
+ u32 k = 0;
+ u16 contentLen = 0;
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&contentLen, false);
+
+ if (Address < contentLen) {/* E-fuse 512Byte */
+ /* Write E-fuse Register address bit0~7 */
+ temp = Address & 0xFF;
+ rtw_write8(Adapter, EFUSE_CTRL+1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ /* Write E-fuse Register address bit8~9 */
+ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
+ rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+
+ /* Write 0x30[31]= 0 */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ temp = Bytetemp & 0x7F;
+ rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+
+ /* Wait Write-ready (0x30[31]= 1) */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ while (!(Bytetemp & 0x80)) {
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtw_read8(Adapter, EFUSE_CTRL);
+ return data;
+ } else
+ return 0xFF;
+
+} /* EFUSE_Read1Byte */
+
+/* 11/16/2008 MH Read one byte from real Efuse. */
+u8
+efuse_OneByteRead(
+struct adapter *padapter,
+u16 addr,
+u8 *data,
+bool bPseudoTest)
+{
+ u32 tmpidx = 0;
+ u8 bResult;
+ u8 readbyte;
+
+ /* DBG_871X("===> EFUSE_OneByteRead(), addr = %x\n", addr); */
+ /* DBG_871X("===> EFUSE_OneByteRead() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */
+
+ if (bPseudoTest) {
+ bResult = Efuse_Read1ByteFromFakeContent(padapter, addr, data);
+ return bResult;
+ }
+
+ /* <20130121, Kordan> For SMIC EFUSE specificatoin. */
+ /* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
+ /* PHY_SetMacReg(padapter, 0x34, BIT11, 0); */
+ rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) & (~BIT11));
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(padapter, EFUSE_CTRL+1, (u8)(addr&0xff));
+ rtw_write8(padapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
+ (rtw_read8(padapter, EFUSE_CTRL+2)&0xFC));
+
+ /* rtw_write8(padapter, EFUSE_CTRL+3, 0x72); read cmd */
+ /* Write bit 32 0 */
+ readbyte = rtw_read8(padapter, EFUSE_CTRL+3);
+ rtw_write8(padapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+
+ while (!(0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 1000)) {
+ mdelay(1);
+ tmpidx++;
+ }
+ if (tmpidx < 100) {
+ *data = rtw_read8(padapter, EFUSE_CTRL);
+ bResult = true;
+ } else{
+ *data = 0xff;
+ bResult = false;
+ DBG_871X("%s: [ERROR] addr = 0x%x bResult =%d time out 1s !!!\n", __func__, addr, bResult);
+ DBG_871X("%s: [ERROR] EFUSE_CTRL = 0x%08x !!!\n", __func__, rtw_read32(padapter, EFUSE_CTRL));
+ }
+
+ return bResult;
+}
+
+/* 11/16/2008 MH Write one byte to reald Efuse. */
+u8
+efuse_OneByteWrite(
+struct adapter *padapter,
+u16 addr,
+u8 data,
+bool bPseudoTest)
+{
+ u8 tmpidx = 0;
+ u8 bResult = false;
+ u32 efuseValue = 0;
+
+ /* DBG_871X("===> EFUSE_OneByteWrite(), addr = %x data =%x\n", addr, data); */
+ /* DBG_871X("===> EFUSE_OneByteWrite() start, 0x34 = 0x%X\n", rtw_read32(padapter, EFUSE_TEST)); */
+
+ if (bPseudoTest) {
+ bResult = Efuse_Write1ByteToFakeContent(padapter, addr, data);
+ return bResult;
+ }
+
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+
+
+ efuseValue = rtw_read32(padapter, EFUSE_CTRL);
+ efuseValue |= (BIT21|BIT31);
+ efuseValue &= ~(0x3FFFF);
+ efuseValue |= ((addr<<8 | data) & 0x3FFFF);
+
+
+ /* <20130227, Kordan> 8192E MP chip A-cut had better not set 0x34[11] until B-Cut. */
+
+ /* <20130121, Kordan> For SMIC EFUSE specificatoin. */
+ /* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
+ /* PHY_SetMacReg(padapter, 0x34, BIT11, 1); */
+ rtw_write16(padapter, 0x34, rtw_read16(padapter, 0x34) | (BIT11));
+ rtw_write32(padapter, EFUSE_CTRL, 0x90600000|((addr<<8 | data)));
+
+ while ((0x80 & rtw_read8(padapter, EFUSE_CTRL+3)) && (tmpidx < 100)) {
+ mdelay(1);
+ tmpidx++;
+ }
+
+ if (tmpidx < 100) {
+ bResult = true;
+ } else{
+ bResult = false;
+ DBG_871X("%s: [ERROR] addr = 0x%x , efuseValue = 0x%x , bResult =%d time out 1s !!!\n",
+ __func__, addr, efuseValue, bResult);
+ DBG_871X("%s: [ERROR] EFUSE_CTRL = 0x%08x !!!\n", __func__, rtw_read32(padapter, EFUSE_CTRL));
+ }
+
+ /* disable Efuse program enable */
+ PHY_SetMacReg(padapter, EFUSE_TEST, BIT(11), 0);
+
+ return bResult;
+}
+
+int
+Efuse_PgPacketRead(struct adapter *padapter,
+ u8 offset,
+ u8 *data,
+ bool bPseudoTest)
+{
+ int ret = 0;
+
+ ret = padapter->HalFunc.Efuse_PgPacketRead(padapter, offset, data, bPseudoTest);
+
+ return ret;
+}
+
+int
+Efuse_PgPacketWrite(struct adapter *padapter,
+ u8 offset,
+ u8 word_en,
+ u8 *data,
+ bool bPseudoTest)
+{
+ int ret;
+
+ ret = padapter->HalFunc.Efuse_PgPacketWrite(padapter, offset, word_en, data, bPseudoTest);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_WordEnableDataRead
+ *
+ * Overview: Read allowed word in current efuse section data.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ * 11/21/2008 MHC Fix Write bug when we only enable late word.
+ *
+ *---------------------------------------------------------------------------*/
+void
+efuse_WordEnableDataRead(u8 word_en,
+ u8 *sourdata,
+ u8 *targetdata)
+{
+ if (!(word_en&BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+ if (!(word_en&BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+ if (!(word_en&BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+ if (!(word_en&BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+
+u8
+Efuse_WordEnableDataWrite(struct adapter *padapter,
+ u16 efuse_addr,
+ u8 word_en,
+ u8 *data,
+ bool bPseudoTest)
+{
+ u8 ret = 0;
+
+ ret = padapter->HalFunc.Efuse_WordEnableDataWrite(padapter, efuse_addr, word_en, data, bPseudoTest);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_ReadAllMap
+ *
+ * Overview: Read All Efuse content
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/11/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_ReadAllMap(
+ struct adapter *padapter,
+ u8 efuseType,
+ u8 *Efuse,
+ bool bPseudoTest);
+void
+Efuse_ReadAllMap(
+ struct adapter *padapter,
+ u8 efuseType,
+ u8 *Efuse,
+ bool bPseudoTest)
+{
+ u16 mapLen = 0;
+
+ Efuse_PowerSwitch(padapter, false, true);
+
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, bPseudoTest);
+
+ efuse_ReadEFuse(padapter, efuseType, 0, mapLen, Efuse, bPseudoTest);
+
+ Efuse_PowerSwitch(padapter, false, false);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_ShadowRead1Byte
+ * efuse_ShadowRead2Byte
+ * efuse_ShadowRead4Byte
+ *
+ * Overview: Read from efuse init map by one/two/four bytes !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+efuse_ShadowRead1Byte(
+struct adapter *padapter,
+u16 Offset,
+ u8 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+
+} /* EFUSE_ShadowRead1Byte */
+
+/* Read Two Bytes */
+static void
+efuse_ShadowRead2Byte(
+struct adapter *padapter,
+u16 Offset,
+ u16 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+
+} /* EFUSE_ShadowRead2Byte */
+
+/* Read Four Bytes */
+static void
+efuse_ShadowRead4Byte(
+struct adapter *padapter,
+u16 Offset,
+ u32 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+
+} /* efuse_ShadowRead4Byte */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowMapUpdate
+ *
+ * Overview: Transfer current EFUSE content to shadow init and modify map.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/13/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowMapUpdate(
+ struct adapter *padapter,
+ u8 efuseType,
+ bool bPseudoTest)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, bPseudoTest);
+
+ if (pEEPROM->bautoload_fail_flag == true) {
+ memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
+ } else{
+ Efuse_ReadAllMap(padapter, efuseType, pEEPROM->efuse_eeprom_data, bPseudoTest);
+ }
+
+ /* PlatformMoveMemory((void *)&pHalData->EfuseMap[EFUSE_MODIFY_MAP][0], */
+ /* void *)&pHalData->EfuseMap[EFUSE_INIT_MAP][0], mapLen); */
+} /* EFUSE_ShadowMapUpdate */
+
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowRead
+ *
+ * Overview: Read from efuse init map !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+EFUSE_ShadowRead(
+ struct adapter *padapter,
+ u8 Type,
+ u16 Offset,
+ u32 *Value)
+{
+ if (Type == 1)
+ efuse_ShadowRead1Byte(padapter, Offset, (u8 *)Value);
+ else if (Type == 2)
+ efuse_ShadowRead2Byte(padapter, Offset, (u16 *)Value);
+ else if (Type == 4)
+ efuse_ShadowRead4Byte(padapter, Offset, (u32 *)Value);
+
+} /* EFUSE_ShadowRead*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
new file mode 100644
index 000000000000..7b37e085b793
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -0,0 +1,1430 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _IEEE80211_C
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/of.h>
+
+u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
+u16 RTW_WPA_VERSION = 1;
+u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 };
+u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 };
+u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 };
+
+u16 RSN_VERSION_BSD = 1;
+u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 };
+u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 };
+u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 };
+u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
+/* */
+/* for adhoc-master to generate ie and provide supported-rate to fw */
+/* */
+
+static u8 WIFI_CCKRATES[] = {
+ (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
+};
+
+static u8 WIFI_OFDMRATES[] = {
+ (IEEE80211_OFDM_RATE_6MB),
+ (IEEE80211_OFDM_RATE_9MB),
+ (IEEE80211_OFDM_RATE_12MB),
+ (IEEE80211_OFDM_RATE_18MB),
+ (IEEE80211_OFDM_RATE_24MB),
+ IEEE80211_OFDM_RATE_36MB,
+ IEEE80211_OFDM_RATE_48MB,
+ IEEE80211_OFDM_RATE_54MB
+};
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val)
+{
+ unsigned char dot11_rate_table[] = {2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 0}; /* last element must be zero!! */
+
+ int i = 0;
+ while (dot11_rate_table[i] != 0) {
+ if (dot11_rate_table[i] == val)
+ return BIT(i);
+ i++;
+ }
+ return 0;
+}
+
+uint rtw_is_cckrates_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ i++;
+ }
+
+ return false;
+}
+
+uint rtw_is_cckratesonly_included(u8 *rate)
+{
+ u32 i = 0;
+
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ i++;
+ }
+
+ return true;
+}
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+{
+ if (channel > 14) {
+ if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_INVALID;
+ else
+ return WIRELESS_11A;
+ } else{ /* could be pure B, pure G, or B/G */
+ if ((rtw_is_cckratesonly_included(rate)) == true)
+ return WIRELESS_11B;
+ else if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
+ }
+
+}
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
+ unsigned int *frlen)
+{
+ memcpy((void *)pbuf, (void *)source, len);
+ *frlen = *frlen + len;
+ return (pbuf + len);
+}
+
+/* rtw_set_ie will update frame length */
+u8 *rtw_set_ie
+(
+ u8 *pbuf,
+ sint index,
+ uint len,
+ u8 *source,
+ uint *frlen /* frame length */
+)
+{
+ *pbuf = (u8)index;
+
+ *(pbuf + 1) = (u8)len;
+
+ if (len > 0)
+ memcpy((void *)(pbuf + 2), (void *)source, len);
+
+ *frlen = *frlen + (len + 2);
+
+ return (pbuf + len + 2);
+}
+
+/*----------------------------------------------------------------------------
+index: the information element id index, limit is the limit for search
+-----------------------------------------------------------------------------*/
+u8 *rtw_get_ie(u8 *pbuf, sint index, sint *len, sint limit)
+{
+ sint tmp, i;
+ u8 *p;
+
+ if (limit < 1) {
+ return NULL;
+ }
+
+ p = pbuf;
+ i = 0;
+ *len = 0;
+ while (1) {
+ if (*p == index) {
+ *len = *(p + 1);
+ return p;
+ } else{
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
+ }
+ if (i >= limit)
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * rtw_get_ie_ex - Search specific IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
+ * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
+ *
+ * Returns: The address of the specific IE found, or NULL
+ */
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
+{
+ uint cnt;
+ u8 *target_ie = NULL;
+
+
+ if (ielen)
+ *ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return target_ie;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ if (eid == in_ie[cnt]
+ && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ target_ie = &in_ie[cnt];
+
+ if (ie)
+ memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (ielen)
+ *ielen = in_ie[cnt+1]+2;
+
+ break;
+ } else{
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+
+ }
+
+ return target_ie;
+}
+
+/**
+ * rtw_ies_remove_ie - Find matching IEs and remove
+ * @ies: Address of IEs to search
+ * @ies_len: Pointer of length of ies, will update to new length
+ * @offset: The offset to start scarch
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ *
+ * Returns: _SUCCESS: ies is updated, _FAIL: not updated
+ */
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
+{
+ int ret = _FAIL;
+ u8 *target_ie;
+ u32 target_ielen;
+ u8 *start;
+ uint search_len;
+
+ if (!ies || !ies_len || *ies_len <= offset)
+ goto exit;
+
+ start = ies + offset;
+ search_len = *ies_len - offset;
+
+ while (1) {
+ target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
+ if (target_ie && target_ielen) {
+ u8 buf[MAX_IE_SZ] = {0};
+ u8 *remain_ies = target_ie + target_ielen;
+ uint remain_len = search_len - (remain_ies - start);
+
+ memcpy(buf, remain_ies, remain_len);
+ memcpy(target_ie, buf, remain_len);
+ *ies_len = *ies_len - target_ielen;
+ ret = _SUCCESS;
+
+ start = target_ie;
+ search_len = remain_len;
+ } else {
+ break;
+ }
+ }
+exit:
+ return ret;
+}
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
+{
+ memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ switch (mode) {
+ case WIRELESS_11B:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ break;
+
+ case WIRELESS_11G:
+ case WIRELESS_11A:
+ case WIRELESS_11_5N:
+ case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
+ case WIRELESS_11_5AC:
+ memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11_24N:
+ case WIRELESS_11BG_24N:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+
+ }
+}
+
+uint rtw_get_rateset_len(u8 *rateset)
+{
+ uint i = 0;
+
+ while (1) {
+ if ((rateset[i]) == 0)
+ break;
+
+ if (i > 12)
+ break;
+
+ i++;
+ }
+ return i;
+}
+
+int rtw_generate_ie(struct registry_priv *pregistrypriv)
+{
+ u8 wireless_mode;
+ int sz = 0, rateLen;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *ie = pdev_network->IEs;
+
+ /* timestamp will be inserted by hardware */
+ sz += 8;
+ ie += sz;
+
+ /* beacon interval : 2bytes */
+ *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);/* BCN_INTERVAL; */
+ sz += 2;
+ ie += 2;
+
+ /* capability info */
+ *(u16 *)ie = 0;
+
+ *(__le16 *)ie |= cpu_to_le16(cap_IBSS);
+
+ if (pregistrypriv->preamble == PREAMBLE_SHORT)
+ *(__le16 *)ie |= cpu_to_le16(cap_ShortPremble);
+
+ if (pdev_network->Privacy)
+ *(__le16 *)ie |= cpu_to_le16(cap_Privacy);
+
+ sz += 2;
+ ie += 2;
+
+ /* SSID */
+ ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
+
+ /* supported rates */
+ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
+ if (pdev_network->Configuration.DSConfig > 14)
+ wireless_mode = WIRELESS_11A_5N;
+ else
+ wireless_mode = WIRELESS_11BG_24N;
+ } else{
+ wireless_mode = pregistrypriv->wireless_mode;
+ }
+
+ rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
+
+ rateLen = rtw_get_rateset_len(pdev_network->SupportedRates);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz);
+ /* ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
+ } else{
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz);
+ }
+
+ /* DS parameter set */
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+
+
+ /* IBSS Parameter Set */
+
+ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
+ }
+
+ /* HT Cap. */
+ if (((pregistrypriv->wireless_mode&WIRELESS_11_5N) || (pregistrypriv->wireless_mode&WIRELESS_11_24N))
+ && (pregistrypriv->ht_enable == true)) {
+ /* todo: */
+ }
+
+ /* pdev_network->IELength = sz; update IELength */
+
+ /* return _SUCCESS; */
+
+ return sz;
+}
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
+{
+ int len;
+ u16 val16;
+ unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 *pbuf = pie;
+ int limit_new = limit;
+ __le16 le_tmp;
+
+ while (1) {
+ pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new);
+
+ if (pbuf) {
+
+ /* check if oui matches... */
+ if (memcmp((pbuf + 2), wpa_oui_type, sizeof(wpa_oui_type))) {
+
+ goto check_next_ie;
+ }
+
+ /* check version... */
+ memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
+
+ val16 = le16_to_cpu(le_tmp);
+ if (val16 != 0x0001)
+ goto check_next_ie;
+
+ *wpa_ie_len = *(pbuf + 1);
+
+ return pbuf;
+
+ } else{
+
+ *wpa_ie_len = 0;
+ return NULL;
+ }
+
+check_next_ie:
+
+ limit_new = limit - (pbuf - pie) - 2 - len;
+
+ if (limit_new <= 0)
+ break;
+
+ pbuf += (2 + len);
+
+ }
+
+ *wpa_ie_len = 0;
+
+ return NULL;
+
+}
+
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
+{
+
+ return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
+
+}
+
+int rtw_get_wpa_cipher_suite(u8 *s)
+{
+ if (!memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_NONE;
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_WEP40;
+ if (!memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_TKIP;
+ if (!memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_CCMP;
+ if (!memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN))
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+int rtw_get_wpa2_cipher_suite(u8 *s)
+{
+ if (!memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_NONE;
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_WEP40;
+ if (!memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_TKIP;
+ if (!memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_CCMP;
+ if (!memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN))
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
+
+ if (wpa_ie_len <= 0) {
+ /* No WPA IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
+ (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN))) {
+ return _FAIL;
+ }
+
+ pos = wpa_ie;
+
+ pos += 8;
+ left = wpa_ie_len - 8;
+
+
+ /* group_cipher */
+ if (left >= WPA_SELECTOR_LEN) {
+
+ *group_cipher = rtw_get_wpa_cipher_suite(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+
+ return _FAIL;
+ }
+
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ /* count = le16_to_cpu(*(u16*)pos); */
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * WPA_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ }
+
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (!memcmp(pos, SUITE_1X, 4)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+
+}
+
+int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
+
+ if (rsn_ie_len <= 0) {
+ /* No RSN IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2))) {
+ return _FAIL;
+ }
+
+ pos = rsn_ie;
+ pos += 4;
+ left = rsn_ie_len - 4;
+
+ /* group_cipher */
+ if (left >= RSN_SELECTOR_LEN) {
+
+ *group_cipher = rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ /* count = le16_to_cpu(*(u16*)pos); */
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * RSN_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+ }
+
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (!memcmp(pos, SUITE_1X, 4)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+
+}
+
+/* ifdef CONFIG_WAPI_SUPPORT */
+int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len)
+{
+ int len = 0;
+ u8 authmode, i;
+ uint cnt;
+ u8 wapi_oui1[4] = {0x0, 0x14, 0x72, 0x01};
+ u8 wapi_oui2[4] = {0x0, 0x14, 0x72, 0x02};
+
+ if (wapi_len)
+ *wapi_len = 0;
+
+ if (!in_ie || in_len <= 0)
+ return len;
+
+ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+
+ while (cnt < in_len) {
+ authmode = in_ie[cnt];
+
+ /* if (authmode == _WAPI_IE_) */
+ if (authmode == _WAPI_IE_ && (!memcmp(&in_ie[cnt+6], wapi_oui1, 4) ||
+ !memcmp(&in_ie[cnt+6], wapi_oui2, 4))) {
+ if (wapi_ie) {
+ memcpy(wapi_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i = i+8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ wapi_ie[i], wapi_ie[i+1], wapi_ie[i+2], wapi_ie[i+3], wapi_ie[i+4],
+ wapi_ie[i+5], wapi_ie[i+6], wapi_ie[i+7]));
+ }
+ }
+
+ if (wapi_len)
+ *wapi_len = in_ie[cnt+1]+2;
+
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else{
+ cnt += in_ie[cnt+1]+2; /* get next */
+ }
+ }
+
+ if (wapi_len)
+ len = *wapi_len;
+
+ return len;
+}
+/* endif */
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
+{
+ u8 authmode, sec_idx, i;
+ u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
+ uint cnt;
+
+ /* Search required WPA or WPA2 IE and copy to sec_ie[ ] */
+
+ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+
+ sec_idx = 0;
+
+ while (cnt < in_len) {
+ authmode = in_ie[cnt];
+
+ if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", sec_idx, in_ie[cnt+1]+2));
+
+ if (wpa_ie) {
+ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i = i+8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4],
+ wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7]));
+ }
+ }
+
+ *wpa_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else{
+ if (authmode == _WPA2_IE_ID_) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n", sec_idx, in_ie[cnt+1]+2));
+
+ if (rsn_ie) {
+ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i = i+8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4],
+ rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7]));
+ }
+ }
+
+ *rsn_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else{
+ cnt += in_ie[cnt+1]+2; /* get next */
+ }
+ }
+
+ }
+
+ return (*rsn_len + *wpa_len);
+}
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
+{
+ u8 match = false;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (ie_ptr == NULL)
+ return match;
+
+ eid = ie_ptr[0];
+
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
+ /* DBG_8192C("==> found WPS_IE.....\n"); */
+ *wps_ielen = ie_ptr[1]+2;
+ match = true;
+ }
+ return match;
+}
+
+/**
+ * rtw_get_wps_ie - Search WPS IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
+ * @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
+ *
+ * Returns: The address of the WPS IE found, or NULL
+ */
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
+{
+ uint cnt;
+ u8 *wpsie_ptr = NULL;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (wps_ielen)
+ *wps_ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return wpsie_ptr;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ wpsie_ptr = &in_ie[cnt];
+
+ if (wps_ie)
+ memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (wps_ielen)
+ *wps_ielen = in_ie[cnt+1]+2;
+
+ cnt += in_ie[cnt+1]+2;
+
+ break;
+ } else{
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+
+ }
+
+ return wpsie_ptr;
+}
+
+/**
+ * rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ (memcmp(wps_ie + 2, wps_oui, 4))) {
+ return attr_ptr;
+ }
+
+ /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
+ attr_ptr = wps_ie + 6; /* goto first attr */
+
+ while (attr_ptr - wps_ie < wps_ielen) {
+ /* 4 = 2(Attribute ID) + 2(Length) */
+ u16 attr_id = RTW_GET_BE16(attr_ptr);
+ u16 attr_data_len = RTW_GET_BE16(attr_ptr + 2);
+ u16 attr_len = attr_data_len + 4;
+
+ /* DBG_871X("%s attr_ptr:%p, id:%u, length:%u\n", __func__, attr_ptr, attr_id, attr_data_len); */
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+
+ if (len_attr)
+ *len_attr = attr_len;
+
+ break;
+ } else{
+ attr_ptr += attr_len; /* goto next */
+ }
+
+ }
+
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content
+ *
+ * Returns: the address of the specific WPS attribute content found, or NULL
+ */
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr+4, attr_len-4);
+
+ if (len_content)
+ *len_content = attr_len-4;
+
+ return attr_ptr+4;
+ }
+
+ return NULL;
+}
+
+static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ unsigned int oui;
+
+ /* first 3 bytes in vendor specific information element are the IEEE
+ * OUI of the vendor. The following byte is used a vendor specific
+ * sub-type. */
+ if (elen < 4) {
+ if (show_errors) {
+ DBG_871X("short vendor specific "
+ "information element ignored (len =%lu)\n",
+ (unsigned long) elen);
+ }
+ return -1;
+ }
+
+ oui = RTW_GET_BE24(pos);
+ switch (oui) {
+ case OUI_MICROSOFT:
+ /* Microsoft/Wi-Fi information elements are further typed and
+ * subtyped */
+ switch (pos[3]) {
+ case 1:
+ /* Microsoft OUI (00:50:F2) with OUI Type 1:
+ * real WPA information element */
+ elems->wpa_ie = pos;
+ elems->wpa_ie_len = elen;
+ break;
+ case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
+ if (elen < 5) {
+ DBG_871X("short WME "
+ "information element ignored "
+ "(len =%lu)\n",
+ (unsigned long) elen);
+ return -1;
+ }
+ switch (pos[4]) {
+ case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
+ case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
+ elems->wme = pos;
+ elems->wme_len = elen;
+ break;
+ case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
+ elems->wme_tspec = pos;
+ elems->wme_tspec_len = elen;
+ break;
+ default:
+ DBG_871X("unknown WME "
+ "information element ignored "
+ "(subtype =%d len =%lu)\n",
+ pos[4], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ case 4:
+ /* Wi-Fi Protected Setup (WPS) IE */
+ elems->wps_ie = pos;
+ elems->wps_ie_len = elen;
+ break;
+ default:
+ DBG_871X("Unknown Microsoft "
+ "information element ignored "
+ "(type =%d len =%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ case OUI_BROADCOM:
+ switch (pos[3]) {
+ case VENDOR_HT_CAPAB_OUI_TYPE:
+ elems->vendor_ht_cap = pos;
+ elems->vendor_ht_cap_len = elen;
+ break;
+ default:
+ DBG_871X("Unknown Broadcom "
+ "information element ignored "
+ "(type =%d len =%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ default:
+ DBG_871X("unknown vendor specific information "
+ "element ignored (vendor OUI %02x:%02x:%02x "
+ "len =%lu)\n",
+ pos[0], pos[1], pos[2], (unsigned long) elen);
+ return -1;
+ }
+
+ return 0;
+
+}
+
+/**
+ * ieee802_11_parse_elems - Parse information elements in management frames
+ * @start: Pointer to the start of IEs
+ * @len: Length of IE buffer in octets
+ * @elems: Data structure for parsed elements
+ * @show_errors: Whether to show parsing errors in debug log
+ * Returns: Parsing result
+ */
+ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ uint left = len;
+ u8 *pos = start;
+ int unknown = 0;
+
+ memset(elems, 0, sizeof(*elems));
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left) {
+ if (show_errors) {
+ DBG_871X("IEEE 802.11 element "
+ "parse failed (id =%d elen =%d "
+ "left =%lu)\n",
+ id, elen, (unsigned long) left);
+ }
+ return ParseFailed;
+ }
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ elems->tim = pos;
+ elems->tim_len = elen;
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (rtw_ieee802_11_parse_vendor_specific(pos, elen,
+ elems,
+ show_errors))
+ unknown++;
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn_ie = pos;
+ elems->rsn_ie_len = elen;
+ break;
+ case WLAN_EID_PWR_CAPABILITY:
+ elems->power_cap = pos;
+ elems->power_cap_len = elen;
+ break;
+ case WLAN_EID_SUPPORTED_CHANNELS:
+ elems->supp_channels = pos;
+ elems->supp_channels_len = elen;
+ break;
+ case WLAN_EID_MOBILITY_DOMAIN:
+ elems->mdie = pos;
+ elems->mdie_len = elen;
+ break;
+ case WLAN_EID_FAST_BSS_TRANSITION:
+ elems->ftie = pos;
+ elems->ftie_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ case WLAN_EID_HT_CAP:
+ elems->ht_capabilities = pos;
+ elems->ht_capabilities_len = elen;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ elems->ht_operation = pos;
+ elems->ht_operation_len = elen;
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ elems->vht_capabilities = pos;
+ elems->vht_capabilities_len = elen;
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ elems->vht_operation = pos;
+ elems->vht_operation_len = elen;
+ break;
+ case WLAN_EID_VHT_OP_MODE_NOTIFY:
+ elems->vht_op_mode_notify = pos;
+ elems->vht_op_mode_notify_len = elen;
+ break;
+ default:
+ unknown++;
+ if (!show_errors)
+ break;
+ DBG_871X("IEEE 802.11 element parse "
+ "ignored unknown element (id =%d elen =%d)\n",
+ id, elen);
+ break;
+ }
+
+ left -= elen;
+ pos += elen;
+ }
+
+ if (left)
+ return ParseFailed;
+
+ return unknown ? ParseUnknown : ParseOK;
+
+}
+
+static u8 key_char2num(u8 ch);
+static u8 key_char2num(u8 ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ else if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ else if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ else
+ return 0xff;
+}
+
+u8 key_2char2num(u8 hch, u8 lch);
+u8 key_2char2num(u8 hch, u8 lch)
+{
+ return ((key_char2num(hch) << 4) | key_char2num(lch));
+}
+
+void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
+{
+ u8 mac[ETH_ALEN];
+ struct device_node *np = dev->of_node;
+ const unsigned char *addr;
+ int len;
+
+ if (mac_addr == NULL)
+ return;
+
+ if (rtw_initmac) { /* Users specify the mac address */
+ int jj, kk;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3) {
+ mac[jj] = key_2char2num(rtw_initmac[kk], rtw_initmac[kk + 1]);
+ }
+ memcpy(mac_addr, mac, ETH_ALEN);
+ } else{ /* Use the mac address stored in the Efuse */
+ memcpy(mac, mac_addr, ETH_ALEN);
+ }
+
+ if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
+ (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
+ ((mac[0] == 0x00) && (mac[1] == 0x00) && (mac[2] == 0x00) &&
+ (mac[3] == 0x00) && (mac[4] == 0x00) && (mac[5] == 0x00))) {
+ if (np &&
+ (addr = of_get_property(np, "local-mac-address", &len)) &&
+ len == ETH_ALEN) {
+ memcpy(mac_addr, addr, ETH_ALEN);
+ } else {
+ mac[0] = 0x00;
+ mac[1] = 0xe0;
+ mac[2] = 0x4c;
+ mac[3] = 0x87;
+ mac[4] = 0x00;
+ mac[5] = 0x00;
+ /* use default mac addresss */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ DBG_871X("MAC Address from efuse error, assign default one !!!\n");
+ }
+ }
+
+ DBG_871X("rtw_macaddr_cfg MAC Address = "MAC_FMT"\n", MAC_ARG(mac_addr));
+}
+
+static int rtw_get_cipher_info(struct wlan_network *pnetwork)
+{
+ u32 wpa_ielen;
+ unsigned char *pbuf;
+ int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
+ int ret = _FAIL;
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d, is_8021x is %d",
+ __func__, pnetwork->BcnInfo.pairwise_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ } else {
+
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d,"
+ "pnetwork->group_cipher is %d, is_8021x is %d", __func__, pnetwork->BcnInfo.pairwise_cipher,
+ pnetwork->BcnInfo.group_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork)
+{
+ unsigned short cap = 0;
+ u8 bencrypt = 0;
+ /* u8 wpa_ie[255], rsn_ie[255]; */
+ u16 wpa_len = 0, rsn_len = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct ieee80211_ht_cap *pht_cap = NULL;
+ unsigned int len;
+ unsigned char *p;
+ __le16 le_cap;
+
+ memcpy((u8 *)&le_cap, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ cap = le16_to_cpu(le_cap);
+ if (cap & WLAN_CAPABILITY_PRIVACY) {
+ bencrypt = 1;
+ pnetwork->network.Privacy = 1;
+ } else {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
+ }
+ rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ if (rsn_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bencrypt)
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ rtw_get_cipher_info(pnetwork);
+
+ /* get bwmode and ch_offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct ieee80211_ht_cap *)(p + 2);
+ pnetwork->BcnInfo.ht_cap_info = le16_to_cpu(pht_cap->cap_info);
+ } else {
+ pnetwork->BcnInfo.ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ pnetwork->BcnInfo.ht_info_infos_0 = 0;
+ }
+}
+
+/* show MCS rate, unit: 100Kbps */
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI, unsigned char *MCS_rate)
+{
+ u16 max_rate = 0;
+
+ if (rf_type == RF_1T1R) {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI)?1500:1350):((short_GI)?722:650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI)?1350:1215):((short_GI)?650:585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI)?1200:1080):((short_GI)?578:520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI)?900:810):((short_GI)?433:390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI)?600:540):((short_GI)?289:260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI)?450:405):((short_GI)?217:195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
+ } else{
+ if (MCS_rate[1]) {
+ if (MCS_rate[1] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI)?3000:2700):((short_GI)?1444:1300);
+ else if (MCS_rate[1] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI)?2700:2430):((short_GI)?1300:1170);
+ else if (MCS_rate[1] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI)?2400:2160):((short_GI)?1156:1040);
+ else if (MCS_rate[1] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI)?1800:1620):((short_GI)?867:780);
+ else if (MCS_rate[1] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI)?1200:1080):((short_GI)?578:520);
+ else if (MCS_rate[1] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI)?900:810):((short_GI)?433:390);
+ else if (MCS_rate[1] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI)?600:540):((short_GI)?289:260);
+ else if (MCS_rate[1] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
+ } else{
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI)?1500:1350):((short_GI)?722:650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI)?1350:1215):((short_GI)?650:585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI)?1200:1080):((short_GI)?578:520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI)?900:810):((short_GI)?433:390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI)?600:540):((short_GI)?289:260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI)?450:405):((short_GI)?217:195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
+ }
+ }
+ return max_rate;
+}
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
+{
+ const u8 *frame_body = frame + sizeof(struct ieee80211_hdr_3addr);
+ u16 fc;
+ u8 c;
+ u8 a = ACT_PUBLIC_MAX;
+
+ fc = le16_to_cpu(((struct ieee80211_hdr_3addr *)frame)->frame_control);
+
+ if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE))
+ != (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION)
+ ) {
+ return false;
+ }
+
+ c = frame_body[0];
+
+ switch (c) {
+ case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
+ break;
+ default:
+ a = frame_body[1];
+ }
+
+ if (category)
+ *category = c;
+ if (action)
+ *action = a;
+
+ return true;
+}
+
+static const char *_action_public_str[] = {
+ "ACT_PUB_BSSCOEXIST",
+ "ACT_PUB_DSE_ENABLE",
+ "ACT_PUB_DSE_DEENABLE",
+ "ACT_PUB_DSE_REG_LOCATION",
+ "ACT_PUB_EXT_CHL_SWITCH",
+ "ACT_PUB_DSE_MSR_REQ",
+ "ACT_PUB_DSE_MSR_RPRT",
+ "ACT_PUB_MP",
+ "ACT_PUB_DSE_PWR_CONSTRAINT",
+ "ACT_PUB_VENDOR",
+ "ACT_PUB_GAS_INITIAL_REQ",
+ "ACT_PUB_GAS_INITIAL_RSP",
+ "ACT_PUB_GAS_COMEBACK_REQ",
+ "ACT_PUB_GAS_COMEBACK_RSP",
+ "ACT_PUB_TDLS_DISCOVERY_RSP",
+ "ACT_PUB_LOCATION_TRACK",
+ "ACT_PUB_RSVD",
+};
+
+const char *action_public_str(u8 action)
+{
+ action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
+ return _action_public_str[action];
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_io.c b/drivers/staging/rtl8723bs/core/rtw_io.c
new file mode 100644
index 000000000000..6bd5a4741e79
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_io.c
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*
+
+The purpose of rtw_io.c
+
+a. provides the API
+
+b. provides the protocol engine
+
+c. provides the software interface between caller and the hardware interface
+
+
+Compiler Flag Option:
+
+1. CONFIG_SDIO_HCI:
+ a. USE_SYNC_IRP: Only sync operations are provided.
+ b. USE_ASYNC_IRP:Both sync/async operations are provided.
+
+jackson@realtek.com.tw
+
+*/
+
+#define _RTW_IO_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+#define rtw_le16_to_cpu(val) val
+#define rtw_le32_to_cpu(val) val
+#define rtw_cpu_to_le16(val) val
+#define rtw_cpu_to_le32(val) val
+
+u8 _rtw_read8(struct adapter *adapter, u32 addr)
+{
+ u8 r_val;
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _read8 = pintfhdl->io_ops._read8;
+
+ r_val = _read8(pintfhdl, addr);
+ return r_val;
+}
+
+u16 _rtw_read16(struct adapter *adapter, u32 addr)
+{
+ u16 r_val;
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _read16 = pintfhdl->io_ops._read16;
+
+ r_val = _read16(pintfhdl, addr);
+ return rtw_le16_to_cpu(r_val);
+}
+
+u32 _rtw_read32(struct adapter *adapter, u32 addr)
+{
+ u32 r_val;
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _read32 = pintfhdl->io_ops._read32;
+
+ r_val = _read32(pintfhdl, addr);
+ return rtw_le32_to_cpu(r_val);
+
+}
+
+int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
+{
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int ret;
+
+ _write8 = pintfhdl->io_ops._write8;
+
+ ret = _write8(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
+{
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int ret;
+
+ _write16 = pintfhdl->io_ops._write16;
+
+ ret = _write16(pintfhdl, addr, val);
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
+{
+ /* struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; */
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int ret;
+
+ _write32 = pintfhdl->io_ops._write32;
+
+ ret = _write32(pintfhdl, addr, val);
+
+ return RTW_STATUS_CODE(ret);
+}
+
+u8 _rtw_sd_f0_read8(struct adapter *adapter, u32 addr)
+{
+ u8 r_val = 0x00;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u8 (*_sd_f0_read8)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _sd_f0_read8 = pintfhdl->io_ops._sd_f0_read8;
+
+ if (_sd_f0_read8)
+ r_val = _sd_f0_read8(pintfhdl, addr);
+ else
+ DBG_871X_LEVEL(_drv_warning_, FUNC_ADPT_FMT" _sd_f0_read8 callback is NULL\n", FUNC_ADPT_ARG(adapter));
+
+ return r_val;
+}
+
+u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 ret = _SUCCESS;
+
+ _write_port = pintfhdl->io_ops._write_port;
+
+ ret = _write_port(pintfhdl, addr, cnt, pmem);
+
+ return ret;
+}
+
+int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops))
+{
+ struct io_priv *piopriv = &padapter->iopriv;
+ struct intf_hdl *pintf = &piopriv->intf;
+
+ if (set_intf_ops == NULL)
+ return _FAIL;
+
+ piopriv->padapter = padapter;
+ pintf->padapter = padapter;
+ pintf->pintf_dev = adapter_to_dvobj(padapter);
+
+ set_intf_ops(padapter, &pintf->io_ops);
+
+ return _SUCCESS;
+}
+
+/*
+* Increase and check if the continual_io_error of this @param dvobjprive is larger than MAX_CONTINUAL_IO_ERR
+* @return true:
+* @return false:
+*/
+int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj)
+{
+ int ret = false;
+ int value = atomic_inc_return(&dvobj->continual_io_error);
+ if (value > MAX_CONTINUAL_IO_ERR) {
+ DBG_871X("[dvobj:%p][ERROR] continual_io_error:%d > %d\n", dvobj, value, MAX_CONTINUAL_IO_ERR);
+ ret = true;
+ } else {
+ /* DBG_871X("[dvobj:%p] continual_io_error:%d\n", dvobj, value); */
+ }
+ return ret;
+}
+
+/*
+* Set the continual_io_error of this @param dvobjprive to 0
+*/
+void rtw_reset_continual_io_error(struct dvobj_priv *dvobj)
+{
+ atomic_set(&dvobj->continual_io_error, 0);
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
new file mode 100644
index 000000000000..e0793f8d329d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -0,0 +1,698 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_IOCTL_SET_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+#define IS_MAC_ADDRESS_BROADCAST(addr) \
+(\
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+u8 rtw_validate_bssid(u8 *bssid)
+{
+ u8 ret = true;
+
+ if (is_zero_mac_addr(bssid)
+ || is_broadcast_mac_addr(bssid)
+ || is_multicast_mac_addr(bssid)
+ ) {
+ ret = false;
+ }
+
+ return ret;
+}
+
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
+{
+ u8 ret = true;
+
+ if (ssid->SsidLength > 32) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n"));
+ ret = false;
+ goto exit;
+ }
+
+#ifdef CONFIG_VALIDATE_SSID
+ for (i = 0; i < ssid->SsidLength; i++) {
+ /* wifi, printable ascii code must be supported */
+ if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has nonprintabl ascii\n"));
+ ret = false;
+ break;
+ }
+ }
+#endif /* CONFIG_VALIDATE_SSID */
+
+exit:
+ return ret;
+}
+
+u8 rtw_do_join(struct adapter *padapter);
+u8 rtw_do_join(struct adapter *padapter)
+{
+ struct list_head *plist, *phead;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 ret = _SUCCESS;
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist));
+
+ pmlmepriv->cur_network.join_res = -2;
+
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ pmlmepriv->pscanned = plist;
+
+ pmlmepriv->to_join = true;
+
+ if (list_empty(&queue->queue)) {
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
+ /* we try to issue sitesurvey firstly */
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == false
+ || rtw_to_roam(padapter) > 0
+ ) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_do_join(): site survey if scanned_queue is empty\n."));
+ /* submit site_survey_cmd */
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_do_join(): site survey return error\n."));
+ }
+ } else{
+ pmlmepriv->to_join = false;
+ ret = _FAIL;
+ }
+
+ goto exit;
+ } else{
+ int select_ret;
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (select_ret == _SUCCESS) {
+ pmlmepriv->to_join = false;
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else{
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ /* submit createbss_cmd to change to a ADHOC_MASTER */
+
+ /* pmlmepriv->lock has been acquired by caller... */
+ struct wlan_bssid_ex *pdev_network = &(padapter->registrypriv.dev_network);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ pibss = padapter->registrypriv.dev_network.MacAddress;
+
+ memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(padapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (rtw_createbss_cmd(padapter) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n "));
+ ret = false;
+ goto exit;
+ }
+
+ pmlmepriv->to_join = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n "));
+
+ } else{
+ /* can't associate ; reset under-linking */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */
+ /* we try to issue sitesurvey firstly */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == false
+ || rtw_to_roam(padapter) > 0
+ ) {
+ /* DBG_871X("rtw_do_join() when no desired bss in scanning queue\n"); */
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n."));
+ }
+ } else{
+ ret = _FAIL;
+ pmlmepriv->to_join = false;
+ }
+ }
+
+ }
+
+ }
+
+exit:
+ return ret;
+}
+
+u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
+{
+ u8 status = _SUCCESS;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_871X_LEVEL(_drv_always_, "set bssid:%pM\n", bssid);
+
+ if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 && bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
+ (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF && bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) {
+ status = _FAIL;
+ goto exit;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+
+ DBG_871X("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid ="MAC_FMT"\n", MAC_ARG(bssid)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid ="MAC_FMT"\n", MAC_ARG(pmlmepriv->cur_network.network.MacAddress)));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+ if (rtw_handle_tkip_countermeasure(padapter, __func__) == _FAIL) {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN);
+ pmlmepriv->assoc_by_bssid = true;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ } else {
+ status = rtw_do_join(padapter);
+ }
+
+release_mlme_lock:
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_bssid: status =%d\n", status));
+
+ return status;
+}
+
+u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
+{
+ u8 status = _SUCCESS;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *pnetwork = &pmlmepriv->cur_network;
+
+ DBG_871X_LEVEL(_drv_always_, "set ssid [%s] fw_state = 0x%08x\n",
+ ssid->Ssid, get_fwstate(pmlmepriv));
+
+ if (padapter->hw_init_completed == false) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("set_ssid: hw_init_completed ==false =>exit!!!\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ DBG_871X("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
+ (!memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Set SSID is the same ssid, fw_state = 0x%08x\n",
+ get_fwstate(pmlmepriv)));
+
+ if (rtw_is_same_ibss(padapter, pnetwork) == false) {
+ /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ } else{
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ }
+ } else {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1);
+ }
+ } else{
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+ if (rtw_handle_tkip_countermeasure(padapter, __func__) == _FAIL) {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+
+ if (rtw_validate_ssid(ssid) == false) {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+
+ memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
+ pmlmepriv->assoc_by_bssid = false;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ } else {
+ status = rtw_do_join(padapter);
+ }
+
+release_mlme_lock:
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("-rtw_set_802_11_ssid: status =%d\n", status));
+
+ return status;
+}
+
+u8 rtw_set_802_11_connect(struct adapter *padapter, u8 *bssid, struct ndis_802_11_ssid *ssid)
+{
+ u8 status = _SUCCESS;
+ bool bssid_valid = true;
+ bool ssid_valid = true;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (!ssid || rtw_validate_ssid(ssid) == false)
+ ssid_valid = false;
+
+ if (!bssid || rtw_validate_bssid(bssid) == false)
+ bssid_valid = false;
+
+ if (ssid_valid == false && bssid_valid == false) {
+ DBG_871X(FUNC_ADPT_FMT" ssid:%p, ssid_valid:%d, bssid:%p, bssid_valid:%d\n",
+ FUNC_ADPT_ARG(padapter), ssid, ssid_valid, bssid, bssid_valid);
+ status = _FAIL;
+ goto exit;
+ }
+
+ if (padapter->hw_init_completed == false) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("set_ssid: hw_init_completed ==false =>exit!!!\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" fw_state = 0x%08x\n",
+ FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+handle_tkip_countermeasure:
+ if (rtw_handle_tkip_countermeasure(padapter, __func__) == _FAIL) {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+
+ if (ssid && ssid_valid)
+ memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
+ else
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ if (bssid && bssid_valid) {
+ memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN);
+ pmlmepriv->assoc_by_bssid = true;
+ } else {
+ pmlmepriv->assoc_by_bssid = false;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ } else {
+ status = rtw_do_join(padapter);
+ }
+
+release_mlme_lock:
+ spin_unlock_bh(&pmlmepriv->lock);
+
+exit:
+ return status;
+}
+
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE *pold_state = &(cur_network->network.InfrastructureMode);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
+ ("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
+ *pold_state, networktype, get_fwstate(pmlmepriv)));
+
+ if (*pold_state != networktype) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
+ /* DBG_871X("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
+
+ if (*pold_state == Ndis802_11APMode) {
+ /* change to other mode from Ndis802_11APMode */
+ cur_network->join_res = -1;
+
+ stop_ap_mode(padapter);
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) || (*pold_state == Ndis802_11IBSS))
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true))
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ }
+ }
+
+ *pold_state = networktype;
+
+ _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
+
+ switch (networktype) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+
+ case Ndis802_11APMode:
+ set_fwstate(pmlmepriv, WIFI_AP_STATE);
+ start_ap_mode(padapter);
+ /* rtw_indicate_connect(padapter); */
+
+ break;
+
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+
+ /* SecClearAllKeys(adapter); */
+
+ /* RT_TRACE(COMP_OID_SET, DBG_LOUD, ("set_infrastructure: fw_state:%x after changing mode\n", */
+ /* get_fwstate(pmlmepriv))); */
+
+ spin_unlock_bh(&pmlmepriv->lock);
+ }
+ return true;
+}
+
+
+u8 rtw_set_802_11_disassociate(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n"));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+ rtw_indicate_disconnect(padapter);
+ /* modify for CONFIG_IEEE80211W, none 11w can use it */
+ rtw_free_assoc_resources_cmd(padapter);
+ if (_FAIL == rtw_pwr_wakeup(padapter))
+ DBG_871X("%s(): rtw_pwr_wakeup fail !!!\n", __func__);
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ return true;
+}
+
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 res = true;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv)));
+
+ if (padapter == NULL) {
+ res = false;
+ goto exit;
+ }
+ if (padapter->hw_init_completed == false) {
+ res = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n ===rtw_set_802_11_bssid_list_scan:hw_init_completed ==false ===\n"));
+ goto exit;
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) ||
+ (pmlmepriv->LinkDetectInfo.bBusyTraffic == true)) {
+ /* Scan or linking is in progress, do nothing. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
+ res = true;
+
+ if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy ==true\n\n"));
+ }
+ } else {
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_871X(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
+ indicate_wx_scan_complete_event(padapter);
+ return _SUCCESS;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
+
+ spin_unlock_bh(&pmlmepriv->lock);
+ }
+exit:
+
+ return res;
+}
+
+u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum NDIS_802_11_AUTHENTICATION_MODE authmode)
+{
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ int res;
+ u8 ret;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
+
+ psecuritypriv->ndisauthtype = authmode;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_authentication_mode:psecuritypriv->ndisauthtype =%d", psecuritypriv->ndisauthtype));
+
+ if (psecuritypriv->ndisauthtype > 3)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ res = rtw_set_auth(padapter, psecuritypriv);
+
+ if (res == _SUCCESS)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
+{
+
+ u8 bdefaultkey;
+ u8 btransmitkey;
+ sint keyid, res;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ u8 ret = _SUCCESS;
+
+ bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true; /* for ??? */
+ btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false; /* for ??? */
+ keyid = wep->KeyIndex & 0x3fffffff;
+
+ if (keyid >= 4) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n"));
+ ret = false;
+ goto exit;
+ }
+
+ switch (wep->KeyLength) {
+ case 5:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength =5\n"));
+ break;
+ case 13:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n"));
+ break;
+ default:
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!=5 or 13\n"));
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep:before memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
+ wep->KeyLength, wep->KeyIndex, keyid));
+
+ memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength;
+
+ psecuritypriv->dot11PrivacyKeyIndex = keyid;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_wep:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ psecuritypriv->dot11DefKey[keyid].skey[0], psecuritypriv->dot11DefKey[keyid].skey[1], psecuritypriv->dot11DefKey[keyid].skey[2],
+ psecuritypriv->dot11DefKey[keyid].skey[3], psecuritypriv->dot11DefKey[keyid].skey[4], psecuritypriv->dot11DefKey[keyid].skey[5],
+ psecuritypriv->dot11DefKey[keyid].skey[6], psecuritypriv->dot11DefKey[keyid].skey[7], psecuritypriv->dot11DefKey[keyid].skey[8],
+ psecuritypriv->dot11DefKey[keyid].skey[9], psecuritypriv->dot11DefKey[keyid].skey[10], psecuritypriv->dot11DefKey[keyid].skey[11],
+ psecuritypriv->dot11DefKey[keyid].skey[12]));
+
+ res = rtw_set_key(padapter, psecuritypriv, keyid, 1, true);
+
+ if (res == _FAIL)
+ ret = false;
+exit:
+
+ return ret;
+}
+
+/*
+* rtw_get_cur_max_rate -
+* @adapter: pointer to struct adapter structure
+*
+* Return 0 or 100Kbps
+*/
+u16 rtw_get_cur_max_rate(struct adapter *adapter)
+{
+ int i = 0;
+ u16 rate = 0, max_rate = 0;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct sta_info *psta = NULL;
+ u8 short_GI = 0;
+ u8 rf_type = 0;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) != true)
+ && (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) != true))
+ return 0;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, get_bssid(pmlmepriv));
+ if (psta == NULL)
+ return 0;
+
+ short_GI = query_ra_short_GI(psta);
+
+ if (IsSupportedHT(psta->wireless_mode)) {
+ rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ max_rate = rtw_mcs_rate(
+ rf_type,
+ ((psta->bw_mode == CHANNEL_WIDTH_40)?1:0),
+ short_GI,
+ psta->htpriv.ht_cap.supp_mcs_set
+ );
+ } else{
+ while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
+ rate = pcur_bss->SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ i++;
+ }
+
+ max_rate = max_rate*10/2;
+ }
+
+ return max_rate;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
new file mode 100644
index 000000000000..9e355734f0c0
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -0,0 +1,3150 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_MLME_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+
+extern u8 rtw_do_join(struct adapter *padapter);
+
+sint _rtw_init_mlme_priv(struct adapter *padapter)
+{
+ sint i;
+ u8 *pbuf;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ sint res = _SUCCESS;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+ /* memset((u8 *)pmlmepriv, 0, sizeof(struct mlme_priv)); */
+
+ pmlmepriv->nic_hdl = (u8 *)padapter;
+
+ pmlmepriv->pscanned = NULL;
+ pmlmepriv->fw_state = WIFI_STATION_STATE; /* Must sync with rtw_wdev_alloc() */
+ /* wdev->iftype = NL80211_IFTYPE_STATION */
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ spin_lock_init(&(pmlmepriv->lock));
+ _rtw_init_queue(&(pmlmepriv->free_bss_pool));
+ _rtw_init_queue(&(pmlmepriv->scanned_queue));
+
+ set_scanned_network_val(pmlmepriv, 0);
+
+ memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ pbuf = vzalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+ if (pbuf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->free_bss_buf = pbuf;
+
+ pnetwork = (struct wlan_network *)pbuf;
+
+ for (i = 0; i < MAX_BSS_CNT; i++) {
+ INIT_LIST_HEAD(&(pnetwork->list));
+
+ list_add_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue));
+
+ pnetwork++;
+ }
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ #define RTW_ROAM_SCAN_RESULT_EXP_MS 5000
+ #define RTW_ROAM_RSSI_DIFF_TH 10
+ #define RTW_ROAM_SCAN_INTERVAL_MS 10000
+
+ pmlmepriv->roam_flags = 0
+ | RTW_ROAM_ON_EXPIRED
+ | RTW_ROAM_ON_RESUME
+ #ifdef CONFIG_LAYER2_ROAMING_ACTIVE /* FIXME */
+ | RTW_ROAM_ACTIVE
+ #endif
+ ;
+
+ pmlmepriv->roam_scanr_exp_ms = RTW_ROAM_SCAN_RESULT_EXP_MS;
+ pmlmepriv->roam_rssi_diff_th = RTW_ROAM_RSSI_DIFF_TH;
+ pmlmepriv->roam_scan_int_ms = RTW_ROAM_SCAN_INTERVAL_MS;
+
+ rtw_init_mlme_timer(padapter);
+
+exit:
+
+ return res;
+}
+
+static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
+{
+ if (*ppie) {
+ kfree(*ppie);
+ *plen = 0;
+ *ppie = NULL;
+ }
+}
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
+
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
+}
+
+void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
+{
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+
+ if (pmlmepriv) {
+ if (pmlmepriv->free_bss_buf) {
+ vfree(pmlmepriv->free_bss_buf);
+ }
+ }
+}
+
+/*
+struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
+{
+ _irqL irqL;
+
+ struct wlan_network *pnetwork;
+
+ spin_lock_bh(&queue->lock);
+
+ if (list_empty(&queue->queue))
+
+ pnetwork = NULL;
+
+ else
+ {
+ pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list);
+
+ list_del_init(&(pnetwork->list));
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ return pnetwork;
+}
+*/
+
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+{
+ struct wlan_network *pnetwork;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct list_head *plist = NULL;
+
+ spin_lock_bh(&free_queue->lock);
+
+ if (list_empty(&free_queue->queue)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ plist = get_next(&(free_queue->queue));
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ list_del_init(&pnetwork->list);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr =%p\n", plist));
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ pmlmepriv->num_of_scanned++;
+
+exit:
+ spin_unlock_bh(&free_queue->lock);
+
+ return pnetwork;
+}
+
+void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
+{
+ unsigned int delta_time;
+ u32 lifetime = SCANQUEUE_LIFETIME;
+/* _irqL irqL; */
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+ if (pnetwork == NULL)
+ return;
+
+ if (pnetwork->fixed == true)
+ return;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ lifetime = 1;
+
+ if (!isfreeall) {
+ delta_time = jiffies_to_msecs(jiffies - pnetwork->last_scanned);
+ if (delta_time < lifetime)/* unit:msec */
+ return;
+ }
+
+ spin_lock_bh(&free_queue->lock);
+
+ list_del_init(&(pnetwork->list));
+
+ list_add_tail(&(pnetwork->list), &(free_queue->queue));
+
+ pmlmepriv->num_of_scanned--;
+
+
+ /* DBG_871X("_rtw_free_network:SSID =%s\n", pnetwork->network.Ssid.Ssid); */
+
+ spin_unlock_bh(&free_queue->lock);
+}
+
+void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
+{
+
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+ if (pnetwork == NULL)
+ return;
+
+ if (pnetwork->fixed == true)
+ return;
+
+ /* spin_lock_irqsave(&free_queue->lock, irqL); */
+
+ list_del_init(&(pnetwork->list));
+
+ list_add_tail(&(pnetwork->list), get_list_head(free_queue));
+
+ pmlmepriv->num_of_scanned--;
+
+ /* spin_unlock_irqrestore(&free_queue->lock, irqL); */
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork = NULL;
+ u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+ if (!memcmp(zero_addr, addr, ETH_ALEN)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+
+ /* spin_lock_bh(&scanned_queue->lock); */
+
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
+ break;
+
+ plist = get_next(plist);
+ }
+
+ if (plist == phead)
+ pnetwork = NULL;
+
+ /* spin_unlock_bh(&scanned_queue->lock); */
+
+exit:
+ return pnetwork;
+}
+
+void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
+
+ spin_lock_bh(&scanned_queue->lock);
+
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ _rtw_free_network(pmlmepriv, pnetwork, isfreeall);
+
+ }
+
+ spin_unlock_bh(&scanned_queue->lock);
+}
+
+
+
+
+sint rtw_if_up(struct adapter *padapter)
+{
+
+ sint res;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_if_up:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ res = false;
+ } else
+ res = true;
+ return res;
+}
+
+
+void rtw_generate_random_ibss(u8 *pibss)
+{
+ unsigned long curtime = jiffies;
+
+ pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
+ pibss[1] = 0x11;
+ pibss[2] = 0x87;
+ pibss[3] = (u8)(curtime & 0xff) ;/* p[0]; */
+ pibss[4] = (u8)((curtime>>8) & 0xff) ;/* p[1]; */
+ pibss[5] = (u8)((curtime>>16) & 0xff) ;/* p[2]; */
+ return;
+}
+
+u8 *rtw_get_capability_from_ie(u8 *ie)
+{
+ return (ie + 8 + 2);
+}
+
+
+u16 rtw_get_capability(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+
+ memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+}
+
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
+{
+ return (ie + 8);
+}
+
+
+int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+{
+ int res;
+
+ res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
+ return res;
+}
+
+void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
+{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n"));
+ _rtw_free_mlme_priv(pmlmepriv);
+}
+
+/*
+static struct wlan_network *rtw_dequeue_network(struct __queue *queue)
+{
+ struct wlan_network *pnetwork;
+
+ pnetwork = _rtw_dequeue_network(queue);
+ return pnetwork;
+}
+*/
+
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv);
+struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+{
+ struct wlan_network *pnetwork;
+
+ pnetwork = _rtw_alloc_network(pmlmepriv);
+ return pnetwork;
+}
+
+void rtw_free_network_nolock(struct adapter *padapter, struct wlan_network *pnetwork);
+void rtw_free_network_nolock(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_network ==> ssid = %s\n\n" , pnetwork->network.Ssid.Ssid)); */
+ _rtw_free_network_nolock(&(padapter->mlmepriv), pnetwork);
+ rtw_cfg80211_unlink_bss(padapter, pnetwork);
+}
+
+
+void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
+{
+ _rtw_free_network_queue(dev, isfreeall);
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct wlan_network *pnetwork = _rtw_find_network(scanned_queue, addr);
+
+ return pnetwork;
+}
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ int ret = true;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 0))
+ ret = false;
+ else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 1))
+ ret = false;
+ else
+ ret = true;
+
+ return ret;
+
+}
+
+inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
+{
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("(%s,%d)(%s,%d)\n", */
+ /* a->Ssid.Ssid, a->Ssid.SsidLength, b->Ssid.Ssid, b->Ssid.SsidLength)); */
+ return (a->Ssid.SsidLength == b->Ssid.SsidLength)
+ && !memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+}
+
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 feature)
+{
+ u16 s_cap, d_cap;
+ __le16 tmps, tmpd;
+
+ if (rtw_bug_check(dst, src, &s_cap, &d_cap) == false)
+ return false;
+
+ memcpy((u8 *)&tmps, rtw_get_capability_from_ie(src->IEs), 2);
+ memcpy((u8 *)&tmpd, rtw_get_capability_from_ie(dst->IEs), 2);
+
+
+ s_cap = le16_to_cpu(tmps);
+ d_cap = le16_to_cpu(tmpd);
+
+ return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
+ /* (src->Configuration.DSConfig == dst->Configuration.DSConfig) && */
+ ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN))) &&
+ ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength))) &&
+ ((s_cap & WLAN_CAPABILITY_IBSS) ==
+ (d_cap & WLAN_CAPABILITY_IBSS)) &&
+ ((s_cap & WLAN_CAPABILITY_BSS) ==
+ (d_cap & WLAN_CAPABILITY_BSS)));
+
+}
+
+struct wlan_network *_rtw_find_same_network(struct __queue *scanned_queue, struct wlan_network *network)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *found = NULL;
+
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ found = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (is_same_network(&network->network, &found->network, 0))
+ break;
+
+ plist = get_next(plist);
+ }
+
+ if (plist == phead)
+ found = NULL;
+
+ return found;
+}
+
+struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
+{
+ struct list_head *plist, *phead;
+
+
+ struct wlan_network *pwlan = NULL;
+ struct wlan_network *oldest = NULL;
+
+ phead = get_list_head(scanned_queue);
+
+ plist = get_next(phead);
+
+ while (1) {
+
+ if (phead == plist)
+ break;
+
+ pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (pwlan->fixed != true) {
+ if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
+ oldest = pwlan;
+ }
+
+ plist = get_next(plist);
+ }
+ return oldest;
+
+}
+
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct adapter *padapter, bool update_ie)
+{
+ long rssi_ori = dst->Rssi;
+
+ u8 sq_smp = src->PhyInfo.SignalQuality;
+
+ u8 ss_final;
+ u8 sq_final;
+ long rssi_final;
+
+ #if defined(DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) && 1
+ if (strcmp(dst->Ssid.Ssid, DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) == 0) {
+ DBG_871X(FUNC_ADPT_FMT" %s("MAC_FMT", ch%u) ss_ori:%3u, sq_ori:%3u, rssi_ori:%3ld, ss_smp:%3u, sq_smp:%3u, rssi_smp:%3ld\n"
+ , FUNC_ADPT_ARG(padapter)
+ , src->Ssid.Ssid, MAC_ARG(src->MacAddress), src->Configuration.DSConfig
+ , ss_ori, sq_ori, rssi_ori
+ , ss_smp, sq_smp, rssi_smp
+ );
+ }
+ #endif
+
+ /* The rule below is 1/5 for sample value, 4/5 for history value */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src, 0)) {
+ /* Take the recvpriv's value for the connected AP*/
+ ss_final = padapter->recvpriv.signal_strength;
+ sq_final = padapter->recvpriv.signal_qual;
+ /* the rssi value here is undecorated, and will be used for antenna diversity */
+ if (sq_smp != 101) /* from the right channel */
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ else
+ rssi_final = rssi_ori;
+ } else {
+ if (sq_smp != 101) { /* from the right channel */
+ ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
+ sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ } else {
+ /* bss info not receving from the right channel, use the original RX signal infos */
+ ss_final = dst->PhyInfo.SignalStrength;
+ sq_final = dst->PhyInfo.SignalQuality;
+ rssi_final = dst->Rssi;
+ }
+
+ }
+
+ if (update_ie) {
+ dst->Reserved[0] = src->Reserved[0];
+ dst->Reserved[1] = src->Reserved[1];
+ memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src));
+ }
+
+ dst->PhyInfo.SignalStrength = ss_final;
+ dst->PhyInfo.SignalQuality = sq_final;
+ dst->Rssi = rssi_final;
+
+ #if defined(DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) && 1
+ if (strcmp(dst->Ssid.Ssid, DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) == 0) {
+ DBG_871X(FUNC_ADPT_FMT" %s("MAC_FMT"), SignalStrength:%u, SignalQuality:%u, RawRSSI:%ld\n"
+ , FUNC_ADPT_ARG(padapter)
+ , dst->Ssid.Ssid, MAC_ARG(dst->MacAddress), dst->PhyInfo.SignalStrength, dst->PhyInfo.SignalQuality, dst->Rssi);
+ }
+ #endif
+}
+
+static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+ rtw_bug_check(&(pmlmepriv->cur_network.network),
+ &(pmlmepriv->cur_network.network),
+ &(pmlmepriv->cur_network.network),
+ &(pmlmepriv->cur_network.network));
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) && (is_same_network(&(pmlmepriv->cur_network.network), pnetwork, 0))) {
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,"Same Network\n"); */
+
+ /* if (pmlmepriv->cur_network.network.IELength<= pnetwork->IELength) */
+ {
+ update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
+ rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fix_ie),
+ pmlmepriv->cur_network.network.IELength);
+ }
+ }
+}
+
+
+/*
+
+Caller must hold pmlmepriv->lock first.
+
+
+*/
+void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
+{
+ struct list_head *plist, *phead;
+ u32 bssid_ex_sz;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *oldest = NULL;
+ int target_find = 0;
+ u8 feature = 0;
+
+ spin_lock_bh(&queue->lock);
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (phead == plist)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ rtw_bug_check(pnetwork, pnetwork, pnetwork, pnetwork);
+
+ if (is_same_network(&(pnetwork->network), target, feature)) {
+ target_find = 1;
+ break;
+ }
+
+ if (rtw_roam_flags(adapter)) {
+ /* TODO: don't select netowrk in the same ess as oldest if it's new enough*/
+ }
+
+ if (oldest == NULL || time_after(oldest->last_scanned, pnetwork->last_scanned))
+ oldest = pnetwork;
+
+ plist = get_next(plist);
+
+ }
+
+
+ /* If we didn't find a match, then get a new network slot to initialize
+ * with this beacon's information */
+ /* if (phead == plist) { */
+ if (!target_find) {
+ if (list_empty(&pmlmepriv->free_bss_pool.queue)) {
+ /* If there are no more slots, expire the oldest */
+ /* list_del_init(&oldest->list); */
+ pnetwork = oldest;
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
+ goto exit;
+ }
+ memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target));
+ /* variable initialize */
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = jiffies;
+
+ pnetwork->network_type = 0;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ } else {
+ /* Otherwise just pull from the free list */
+
+ pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
+
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
+ goto exit;
+ }
+
+ bssid_ex_sz = get_wlan_bssid_ex_sz(target);
+ target->Length = bssid_ex_sz;
+ memcpy(&(pnetwork->network), target, bssid_ex_sz);
+
+ pnetwork->last_scanned = jiffies;
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+
+ list_add_tail(&(pnetwork->list), &(queue->queue));
+
+ }
+ } else {
+ /* we have an entry and we are going to update it. But this entry may
+ * be already expired. In this case we do the same as we found a new
+ * net and call the new_net handler
+ */
+ bool update_ie = true;
+
+ pnetwork->last_scanned = jiffies;
+
+ /* target.Reserved[0]== 1, means that scaned network is a bcn frame. */
+ if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
+ update_ie = false;
+
+ /* probe resp(3) > beacon(1) > probe req(2) */
+ if ((target->Reserved[0] != 2) &&
+ (target->Reserved[0] >= pnetwork->network.Reserved[0])
+ ) {
+ update_ie = true;
+ } else {
+ update_ie = false;
+ }
+
+ update_network(&(pnetwork->network), target, adapter, update_ie);
+ }
+
+exit:
+ spin_unlock_bh(&queue->lock);
+}
+
+void rtw_add_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork);
+void rtw_add_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ /* struct __queue *queue = &(pmlmepriv->scanned_queue); */
+
+ /* spin_lock_bh(&queue->lock); */
+
+ update_current_network(adapter, pnetwork);
+
+ rtw_update_scanned_network(adapter, pnetwork);
+
+ /* spin_unlock_bh(&queue->lock); */
+}
+
+/* select the desired network based on the capability of the (i)bss. */
+/* check items: (1) security */
+/* (2) network_type */
+/* (3) WMM */
+/* (4) HT */
+/* (5) others */
+int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork);
+int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u32 desired_encmode;
+ u32 privacy;
+
+ /* u8 wps_ie[512]; */
+ uint wps_ielen;
+
+ int bselected = true;
+
+ desired_encmode = psecuritypriv->ndisencryptstatus;
+ privacy = pnetwork->network.Privacy;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ return true;
+ else
+ return false;
+
+ }
+ if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */
+ u8 *p = NULL;
+ uint ie_len = 0;
+
+ if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0))
+ bselected = false;
+
+ if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) {
+ p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pnetwork->network.IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ bselected = true;
+ } else {
+ bselected = false;
+ }
+ }
+ }
+
+
+ if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
+ DBG_871X("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
+ bselected = false;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ bselected = false;
+ }
+
+
+ return bselected;
+}
+
+/* TODO: Perry : For Power Management */
+void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
+}
+
+
+void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ u32 len;
+ struct wlan_bssid_ex *pnetwork;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+ pnetwork = (struct wlan_bssid_ex *)pbuf;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid =%s\n", pnetwork->Ssid.Ssid));
+
+ len = get_wlan_bssid_ex_sz(pnetwork);
+ if (len > (sizeof(struct wlan_bssid_ex))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n ****rtw_survey_event_callback: return a wrong bss ***\n"));
+ return;
+ }
+
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ /* update IBSS_network 's timestamp */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
+ /* RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,"rtw_survey_event_callback : WIFI_ADHOC_MASTER_STATE\n\n"); */
+ if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
+ struct wlan_network *ibss_wlan = NULL;
+
+ memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
+ if (ibss_wlan) {
+ memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ goto exit;
+ }
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ }
+ }
+
+ /* lock pmlmepriv->lock when you accessing network_q */
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
+ if (pnetwork->Ssid.Ssid[0] == 0) {
+ pnetwork->Ssid.SsidLength = 0;
+ }
+ rtw_add_network(adapter, pnetwork);
+ }
+
+exit:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ return;
+}
+
+
+
+void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ u8 timer_cancelled = false;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+ spin_lock_bh(&pmlmepriv->lock);
+ if (pmlmepriv->wps_probe_req_ie) {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback: fw_state:%x\n\n", get_fwstate(pmlmepriv)));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ /* u8 timer_cancelled; */
+
+ timer_cancelled = true;
+ /* _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled); */
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ } else {
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status =%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
+ }
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ if (timer_cancelled)
+ _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
+
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ rtw_set_signal_stat_timer(&adapter->recvpriv);
+
+ if (pmlmepriv->to_join == true) {
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ if (rtw_select_and_join_from_scanned_queue(pmlmepriv) == _SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else{
+ struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network);
+ u8 *pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ /* pmlmepriv->fw_state ^= _FW_UNDER_SURVEY;because don't set assoc_timer */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
+
+ memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+ rtw_generate_random_ibss(pibss);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
+ }
+
+ pmlmepriv->to_join = false;
+ }
+ }
+ } else{
+ int s_ret;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ pmlmepriv->to_join = false;
+ s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (_SUCCESS == s_ret) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else if (s_ret == 2) {/* there is no need to wait for join */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ rtw_indicate_connect(adapter);
+ } else{
+ DBG_871X("try_to_join, but select scanning queue fail, to_roam:%d\n", rtw_to_roam(adapter));
+
+ if (rtw_to_roam(adapter) != 0) {
+ if (rtw_dec_to_roam(adapter) == 0
+ || _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)
+ ) {
+ rtw_set_to_roam(adapter, 0);
+#ifdef CONFIG_INTEL_WIDI
+ if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
+ memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
+ intel_widi_wk_cmd(adapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
+ DBG_871X("change to widi listen\n");
+ }
+#endif /* CONFIG_INTEL_WIDI */
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+ } else {
+ pmlmepriv->to_join = true;
+ }
+ } else
+ rtw_indicate_disconnect(adapter);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ }
+ } else {
+ if (rtw_chk_roam_flags(adapter, RTW_ROAM_ACTIVE)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (rtw_select_roaming_candidate(pmlmepriv) == _SUCCESS) {
+ receive_disconnect(adapter, pmlmepriv->cur_network.network.MacAddress
+ , WLAN_REASON_ACTIVE_ROAM);
+ }
+ }
+ }
+ }
+
+ /* DBG_871X("scan complete in %dms\n", jiffies_to_msecs(jiffies - pmlmepriv->scan_start_time)); */
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ rtw_os_xmit_schedule(adapter);
+
+ rtw_cfg80211_surveydone_event_callback(adapter);
+
+ rtw_indicate_scan_done(adapter, false);
+}
+
+void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+}
+
+void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+}
+
+static void free_scanqueue(struct mlme_priv *pmlmepriv)
+{
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct __queue *scan_queue = &pmlmepriv->scanned_queue;
+ struct list_head *plist, *phead, *ptemp;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
+ spin_lock_bh(&scan_queue->lock);
+ spin_lock_bh(&free_queue->lock);
+
+ phead = get_list_head(scan_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ ptemp = get_next(plist);
+ list_del_init(plist);
+ list_add_tail(plist, &free_queue->queue);
+ plist = ptemp;
+ pmlmepriv->num_of_scanned--;
+ }
+
+ spin_unlock_bh(&free_queue->lock);
+ spin_unlock_bh(&scan_queue->lock);
+}
+
+static void rtw_reset_rx_info(struct debug_priv *pdbgpriv)
+{
+ pdbgpriv->dbg_rx_ampdu_drop_count = 0;
+ pdbgpriv->dbg_rx_ampdu_forced_indicate_count = 0;
+ pdbgpriv->dbg_rx_ampdu_loss_count = 0;
+ pdbgpriv->dbg_rx_dup_mgt_frame_drop_count = 0;
+ pdbgpriv->dbg_rx_ampdu_window_shift_cnt = 0;
+}
+
+static void find_network(struct adapter *adapter)
+{
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan)
+ pwlan->fixed = false;
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources : pwlan == NULL\n\n"));
+
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) &&
+ (adapter->stapriv.asoc_sta_count == 1))
+ rtw_free_network_nolock(adapter, pwlan);
+}
+
+/*
+*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct dvobj_priv *psdpriv = adapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("tgt_network->network.MacAddress ="MAC_FMT" ssid =%s\n",
+ MAC_ARG(tgt_network->network.MacAddress), tgt_network->network.Ssid.Ssid));
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
+ rtw_free_stainfo(adapter, psta);
+
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
+
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ rtw_free_all_stainfo(adapter);
+
+ psta = rtw_get_bcmc_stainfo(adapter);
+ rtw_free_stainfo(adapter, psta);
+
+ rtw_init_bcmc_stainfo(adapter);
+ }
+
+ find_network(adapter);
+
+ if (lock_scanned_queue)
+ adapter->securitypriv.key_mask = 0;
+
+ rtw_reset_rx_info(pdbgpriv);
+}
+
+/*
+*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_connect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n"));
+
+ pmlmepriv->to_join = false;
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+
+ set_fwstate(pmlmepriv, _FW_LINKED);
+
+ rtw_os_indicate_connect(padapter);
+ }
+
+ rtw_set_to_roam(padapter, 0);
+#ifdef CONFIG_INTEL_WIDI
+ if (padapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
+ memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
+ intel_widi_wk_cmd(padapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
+ DBG_871X("change to widi listen\n");
+ }
+#endif /* CONFIG_INTEL_WIDI */
+
+ rtw_set_scan_deny(padapter, 3000);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
+}
+
+/*
+*rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_disconnect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n"));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING|WIFI_UNDER_WPS);
+
+ /* DBG_871X("clear wps when %s\n", __func__); */
+
+ if (rtw_to_roam(padapter) > 0)
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)
+ || (rtw_to_roam(padapter) <= 0)
+ ) {
+ rtw_os_indicate_disconnect(padapter);
+
+ /* set ips_deny_time to avoid enter IPS before LPS leave */
+ rtw_set_ips_deny(padapter, 3000);
+
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ rtw_clear_scan_deny(padapter);
+ }
+
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
+}
+
+inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ rtw_os_indicate_scan_done(padapter, aborted);
+
+ if (is_primary_adapter(padapter) &&
+ (!adapter_to_pwrctl(padapter)->bInSuspend) &&
+ (!check_fwstate(&padapter->mlmepriv,
+ WIFI_ASOC_STATE|WIFI_UNDER_LINKING))) {
+ struct pwrctrl_priv *pwrpriv;
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+ rtw_set_ips_deny(padapter, 0);
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 1);
+ }
+}
+
+void rtw_scan_abort(struct adapter *adapter)
+{
+ unsigned long start;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
+
+ start = jiffies;
+ pmlmeext->scan_abort = true;
+ while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)
+ && jiffies_to_msecs(start) <= 200) {
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+
+ DBG_871X(FUNC_NDEV_FMT"fw_state = _FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ msleep(20);
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
+ DBG_871X(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_indicate_scan_done(adapter, true);
+ }
+ pmlmeext->scan_abort = false;
+}
+
+static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ int i;
+ struct sta_info *bmc_sta, *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
+ if (psta == NULL) {
+ psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
+ }
+
+ if (psta) { /* update ptarget_sta */
+
+ DBG_871X("%s\n", __func__);
+
+ psta->aid = pnetwork->join_res;
+
+ update_sta_info(padapter, psta);
+
+ /* update station supportRate */
+ psta->bssratelen = rtw_get_rateset_len(pnetwork->network.SupportedRates);
+ memcpy(psta->bssrateset, pnetwork->network.SupportedRates, psta->bssratelen);
+ rtw_hal_update_sta_rate_mask(padapter, psta);
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+ psta->raid = rtw_hal_networktype_to_raid(padapter, psta);
+
+
+ /* sta mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ /* security related */
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ padapter->securitypriv.binstallGrpkey = false;
+ padapter->securitypriv.busetkipkey = false;
+ padapter->securitypriv.bgrpkey_handshake = false;
+
+ psta->ieee8021x_blocked = true;
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof(union Keytype));
+
+ memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof(union Keytype));
+ memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof(union Keytype));
+
+ memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
+ psta->dot11txpn.val = psta->dot11txpn.val + 1;
+ memset((u8 *)&psta->dot11wtxpn, 0, sizeof(union pn48));
+ memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
+ }
+
+ /* Commented by Albert 2012/07/21 */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ if (padapter->securitypriv.wps_ie_len != 0) {
+ psta->ieee8021x_blocked = true;
+ padapter->securitypriv.wps_ie_len = 0;
+ }
+
+
+ /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
+ /* if A-MPDU Rx is enabled, reseting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+ /* todo: check if AP can send A-MPDU packets */
+ for (i = 0; i < 16 ; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d indicate_seq:%u\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq);
+ #endif
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz;ex. 32(kbytes) -> wsize_b =32 */
+ }
+
+
+ bmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (bmc_sta) {
+ for (i = 0; i < 16 ; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d indicate_seq:%u\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq);
+ #endif
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz;ex. 32(kbytes) -> wsize_b =32 */
+ }
+ }
+ }
+
+ return psta;
+
+}
+
+/* pnetwork : returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+
+ DBG_871X("%s\n", __func__);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:"MAC_FMT"\n"
+ , get_fwstate(pmlmepriv), MAC_ARG(pnetwork->network.MacAddress)));
+
+
+ /* why not use ptarget_wlan?? */
+ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
+ /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
+ cur_network->network.IELength = ptarget_wlan->network.IELength;
+ memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
+
+ cur_network->aid = pnetwork->join_res;
+
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
+ /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */
+ padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength);
+ #if defined(DBG_RX_SIGNAL_DISPLAY_PROCESSING) && 1
+ DBG_871X(FUNC_ADPT_FMT" signal_strength:%3u, rssi:%3d, signal_qual:%3u"
+ "\n"
+ , FUNC_ADPT_ARG(padapter)
+ , padapter->recvpriv.signal_strength
+ , padapter->recvpriv.rssi
+ , padapter->recvpriv.signal_qual
+ );
+ #endif
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
+ switch (pnetwork->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+
+ if (pmlmepriv->fw_state&WIFI_UNDER_WPS)
+ pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
+ else
+ pmlmepriv->fw_state = WIFI_STATION_STATE;
+
+ break;
+ case Ndis802_11IBSS:
+ pmlmepriv->fw_state = WIFI_ADHOC_STATE;
+ break;
+ default:
+ pmlmepriv->fw_state = WIFI_NULL_STATE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n"));
+ break;
+ }
+
+ rtw_update_protection(padapter, (cur_network->network.IEs) + sizeof(struct ndis_802_11_fix_ie),
+ (cur_network->network.IELength));
+
+ rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength, (u8) cur_network->network.Configuration.DSConfig);
+}
+
+/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* pnetwork : returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+/* if join_res > 0, for (fw_state ==WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
+/* if join_res > 0, for (fw_state ==WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */
+/* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan != NULL). */
+/* */
+/* define REJOIN */
+void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
+{
+ static u8 retry = 0;
+ u8 timer_cancelled;
+ struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
+ unsigned int the_same_macaddr = false;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res =%d\n", pnetwork->join_res));
+
+ rtw_get_encrypt_decrypt_from_registrypriv(adapter);
+
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
+ } else{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ }
+
+ the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
+
+ pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
+ if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
+ return;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
+ pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\n rtw_joinbss_event_callback !! spin_lock_irqsave\n"));
+
+ if (pnetwork->join_res > 0) {
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ retry = 0;
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
+ /* s1. find ptarget_wlan */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (the_same_macaddr == true) {
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ } else{
+ pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if (pcur_wlan)
+ pcur_wlan->fixed = false;
+
+ pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (pcur_sta)
+ rtw_free_stainfo(adapter, pcur_sta);
+
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+
+ } else{
+ ptarget_wlan = _rtw_find_same_network(&pmlmepriv->scanned_queue, pnetwork);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+
+ /* s2. update cur_network */
+ if (ptarget_wlan) {
+ rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
+ } else{
+ DBG_871X_LEVEL(_drv_always_, "Can't find ptarget_wlan when joinbss_event callback\n");
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ goto ignore_joinbss_callback;
+ }
+
+
+ /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
+ if (ptarget_sta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ goto ignore_joinbss_callback;
+ }
+ }
+
+ /* s4. indicate connect */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ pmlmepriv->cur_network_scanned = ptarget_wlan;
+ rtw_indicate_connect(adapter);
+ } else{
+ /* adhoc mode will rtw_indicate_connect when rtw_stassoc_event_callback */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
+ }
+
+
+ /* s5. Cancle assoc_timer */
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n"));
+
+ } else{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ goto ignore_joinbss_callback;
+ }
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ } else if (pnetwork->join_res == -4) {
+ rtw_reset_securitypriv(adapter);
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+
+ /* rtw_free_assoc_resources(adapter, 1); */
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("fail! clear _FW_UNDER_LINKING ^^^fw_state =%x\n", get_fwstate(pmlmepriv)));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+
+ } else{/* if join_res < 0 (join fails), then try again */
+
+ #ifdef REJOIN
+ res = _FAIL;
+ if (retry < 2) {
+ res = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_select_and_join_from_scanned_queue again! res:%d\n", res));
+ }
+
+ if (res == _SUCCESS) {
+ /* extend time of assoc_timer */
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ retry++;
+ } else if (res == 2) {/* there is no need to wait for join */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ rtw_indicate_connect(adapter);
+ } else{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Set Assoc_Timer = 1; can't find match ssid in scanned_q\n"));
+ #endif
+
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ /* rtw_free_assoc_resources(adapter, 1); */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ #ifdef REJOIN
+ retry = 0;
+ }
+ #endif
+ }
+
+ignore_joinbss_callback:
+
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+
+ mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
+
+ rtw_os_xmit_schedule(adapter);
+}
+
+/* FOR STA, AP , AD-HOC mode */
+void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, u32 mstatus)
+{
+ u16 media_status_rpt;
+
+ if (psta == NULL)
+ return;
+
+ media_status_rpt = (u16)((psta->mac_id<<8)|mstatus); /* MACID|OPMODE:1 connect */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
+}
+
+void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ struct sta_info *psta;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *ptarget_wlan = NULL;
+
+ if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false)
+ return;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta) {
+ u8 *passoc_req = NULL;
+ u32 assoc_req_len = 0;
+
+ rtw_sta_media_status_rpt(adapter, psta, 1);
+
+#ifndef CONFIG_AUTO_AP_MODE
+
+ ap_sta_info_defer_update(adapter, psta);
+
+ /* report to upper layer */
+ DBG_871X("indicate_sta_assoc_event to upper layer - hostapd\n");
+ spin_lock_bh(&psta->lock);
+ if (psta->passoc_req && psta->assoc_req_len > 0) {
+ passoc_req = rtw_zmalloc(psta->assoc_req_len);
+ if (passoc_req) {
+ assoc_req_len = psta->assoc_req_len;
+ memcpy(passoc_req, psta->passoc_req, assoc_req_len);
+
+ kfree(psta->passoc_req);
+ psta->passoc_req = NULL;
+ psta->assoc_req_len = 0;
+ }
+ }
+ spin_unlock_bh(&psta->lock);
+
+ if (passoc_req && assoc_req_len > 0) {
+ rtw_cfg80211_indicate_sta_assoc(adapter, passoc_req, assoc_req_len);
+
+ kfree(passoc_req);
+ }
+#endif /* CONFIG_AUTO_AP_MODE */
+ }
+ return;
+ }
+
+ /* for AD-HOC mode */
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta != NULL) {
+ /* the sta have been in sta_info_queue => do nothing */
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
+
+ return; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ }
+
+ psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
+ return;
+ }
+
+ /* to do : init sta_info variable */
+ psta->qos_option = 0;
+ psta->mac_id = (uint)pstassoc->cam_id;
+ /* psta->aid = (uint)pstassoc->cam_id; */
+ DBG_871X("%s\n", __func__);
+ /* for ad-hoc mode */
+ rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true);
+
+ rtw_sta_media_status_rpt(adapter, psta, 1);
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
+
+
+ psta->ieee8021x_blocked = false;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (adapter->stapriv.asoc_sta_count == 2) {
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ pmlmepriv->cur_network_scanned = ptarget_wlan;
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ rtw_indicate_connect(adapter);
+ }
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+
+ mlmeext_sta_add_event_callback(adapter, psta);
+}
+
+void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ int mac_id = (-1);
+ struct sta_info *psta;
+ struct wlan_network *pwlan = NULL;
+ struct wlan_bssid_ex *pdev_network = NULL;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stadel_event *pstadel = (struct stadel_event *)pbuf;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
+ if (psta)
+ mac_id = psta->mac_id;
+ else
+ mac_id = pstadel->mac_id;
+
+ DBG_871X("%s(mac_id =%d) =" MAC_FMT "\n", __func__, mac_id, MAC_ARG(pstadel->macaddr));
+
+ if (mac_id >= 0) {
+ u16 media_status;
+ media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */
+ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ /* if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) */
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ return;
+
+
+ mlmeext_sta_del_event_callback(adapter);
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ u16 reason = *((unsigned short *)(pstadel->rsvd));
+ bool roam = false;
+ struct wlan_network *roam_target = NULL;
+
+ if (adapter->registrypriv.wifi_spec == 1) {
+ roam = false;
+ } else if (reason == WLAN_REASON_EXPIRATION_CHK && rtw_chk_roam_flags(adapter, RTW_ROAM_ON_EXPIRED)) {
+ roam = true;
+ } else if (reason == WLAN_REASON_ACTIVE_ROAM && rtw_chk_roam_flags(adapter, RTW_ROAM_ACTIVE)) {
+ roam = true;
+ roam_target = pmlmepriv->roam_network;
+ }
+#ifdef CONFIG_INTEL_WIDI
+ else if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_CONNECTED) {
+ roam = true;
+ }
+#endif /* CONFIG_INTEL_WIDI */
+
+ if (roam == true) {
+ if (rtw_to_roam(adapter) > 0)
+ rtw_dec_to_roam(adapter); /* this stadel_event is caused by roaming, decrease to_roam */
+ else if (rtw_to_roam(adapter) == 0)
+ rtw_set_to_roam(adapter, adapter->registrypriv.max_roaming_times);
+ } else {
+ rtw_set_to_roam(adapter, 0);
+ }
+
+ rtw_free_uc_swdec_pending_queue(adapter);
+
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ /* remove the network entry in scanned_queue */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(adapter, pwlan);
+ }
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+#ifdef CONFIG_INTEL_WIDI
+ if (!rtw_to_roam(adapter))
+ process_intel_widi_disconnect(adapter, 1);
+#endif /* CONFIG_INTEL_WIDI */
+
+ _rtw_roaming(adapter, roam_target);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+
+ rtw_free_stainfo(adapter, psta);
+
+ if (adapter->stapriv.asoc_sta_count == 1) {/* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ /* rtw_indicate_disconnect(adapter);removed@20091105 */
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ /* free old ibss network */
+ /* pwlan = rtw_find_network(&pmlmepriv->scanned_queue, pstadel->macaddr); */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(adapter, pwlan);
+ }
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ /* re-create ibss */
+ pdev_network = &(adapter->registrypriv.dev_network);
+ pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
+
+ memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS) {
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
+
+ }
+
+
+ }
+
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
+{
+ struct reportpwrstate_parm *preportpwrstate;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n"));
+ preportpwrstate = (struct reportpwrstate_parm *)pbuf;
+ preportpwrstate->state |= (u8)(adapter_to_pwrctl(padapter)->cpwm_tog + 0x80);
+ cpwm_int_hdl(padapter, preportpwrstate);
+}
+
+
+void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
+{
+ WMMOnAssocRsp(padapter);
+}
+
+/*
+* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* @adapter: pointer to struct adapter structure
+*/
+void _rtw_join_timeout_handler (struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ DBG_871X("%s, fw_state =%x\n", __func__, get_fwstate(pmlmepriv));
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ return;
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
+ while (1) {
+ rtw_dec_to_roam(adapter);
+ if (rtw_to_roam(adapter) != 0) { /* try another */
+ int do_join_r;
+ DBG_871X("%s try another roaming\n", __func__);
+ do_join_r = rtw_do_join(adapter);
+ if (_SUCCESS != do_join_r) {
+ DBG_871X("%s roaming do_join return %d\n", __func__, do_join_r);
+ continue;
+ }
+ break;
+ } else {
+#ifdef CONFIG_INTEL_WIDI
+ if (adapter->mlmepriv.widi_state == INTEL_WIDI_STATE_ROAMING) {
+ memset(pmlmepriv->sa_ext, 0x00, L2SDTA_SERVICE_VE_LEN);
+ intel_widi_wk_cmd(adapter, INTEL_WIDI_LISTEN_WK, NULL, 0);
+ DBG_871X("change to widi listen\n");
+ }
+#endif /* CONFIG_INTEL_WIDI */
+ DBG_871X("%s We've try roaming but fail\n", __func__);
+ rtw_indicate_disconnect(adapter);
+ break;
+ }
+ }
+
+ } else{
+ rtw_indicate_disconnect(adapter);
+ free_scanqueue(pmlmepriv);/* */
+
+ /* indicate disconnect for the case that join_timeout and check_fwstate != FW_LINKED */
+ rtw_cfg80211_indicate_disconnect(adapter);
+
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+/*
+* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* @adapter: pointer to struct adapter structure
+*/
+void rtw_scan_timeout_handler (struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ DBG_871X(FUNC_ADPT_FMT" fw_state =%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ rtw_indicate_scan_done(adapter, true);
+}
+
+void rtw_mlme_reset_auto_scan_int(struct adapter *adapter)
+{
+ struct mlme_priv *mlme = &adapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeinfo->VHT_enable) /* disable auto scan when connect to 11AC AP */
+ mlme->auto_scan_int_ms = 0;
+ else if (adapter->registrypriv.wifi_spec && is_client_associated_to_ap(adapter) == true)
+ mlme->auto_scan_int_ms = 60*1000;
+ else if (rtw_chk_roam_flags(adapter, RTW_ROAM_ACTIVE)) {
+ if (check_fwstate(mlme, WIFI_STATION_STATE) && check_fwstate(mlme, _FW_LINKED))
+ mlme->auto_scan_int_ms = mlme->roam_scan_int_ms;
+ } else
+ mlme->auto_scan_int_ms = 0; /* disabled */
+
+ return;
+}
+
+static void rtw_auto_scan_handler(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ rtw_mlme_reset_auto_scan_int(padapter);
+
+ if (pmlmepriv->auto_scan_int_ms != 0
+ && jiffies_to_msecs(jiffies - pmlmepriv->scan_start_time) > pmlmepriv->auto_scan_int_ms) {
+
+ if (!padapter->registrypriv.wifi_spec) {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) {
+ DBG_871X(FUNC_ADPT_FMT" _FW_UNDER_SURVEY|_FW_UNDER_LINKING\n", FUNC_ADPT_ARG(padapter));
+ goto exit;
+ }
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ DBG_871X(FUNC_ADPT_FMT" exit BusyTraffic\n", FUNC_ADPT_ARG(padapter));
+ goto exit;
+ }
+ }
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ }
+
+exit:
+ return;
+}
+
+void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
+{
+ if (!adapter)
+ return;
+
+ if (adapter->hw_init_completed == false)
+ return;
+
+ if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true))
+ return;
+
+ if (adapter->net_closed == true)
+ return;
+
+ if (is_primary_adapter(adapter))
+ DBG_871X("IsBtDisabled =%d, IsBtControlLps =%d\n", rtw_btcoex_IsBtDisabled(adapter), rtw_btcoex_IsBtControlLps(adapter));
+
+ if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode == true)
+ && (rtw_btcoex_IsBtControlLps(adapter) == false)
+ ) {
+ u8 bEnterPS;
+
+ linked_status_chk(adapter);
+
+ bEnterPS = traffic_status_watchdog(adapter, 1);
+ if (bEnterPS) {
+ /* rtw_lps_ctrl_wk_cmd(adapter, LPS_CTRL_ENTER, 1); */
+ rtw_hal_dm_watchdog_in_lps(adapter);
+ } else{
+ /* call rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 1) in traffic_status_watchdog() */
+ }
+
+ } else{
+ if (is_primary_adapter(adapter)) {
+ rtw_dynamic_chk_wk_cmd(adapter);
+ }
+ }
+
+ /* auto site survey */
+ rtw_auto_scan_handler(adapter);
+}
+
+
+inline bool rtw_is_scan_deny(struct adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false;
+}
+
+inline void rtw_clear_scan_deny(struct adapter *adapter)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ atomic_set(&mlmepriv->set_scan_deny, 0);
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+}
+
+void rtw_set_scan_deny_timer_hdl(struct adapter *adapter)
+{
+ rtw_clear_scan_deny(adapter);
+}
+
+void rtw_set_scan_deny(struct adapter *adapter, u32 ms)
+{
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ atomic_set(&mlmepriv->set_scan_deny, 1);
+ _set_timer(&mlmepriv->set_scan_deny_timer, ms);
+}
+
+/*
+* Select a new roaming candidate from the original @param candidate and @param competitor
+* @return true: candidate is updated
+* @return false: candidate is not updated
+*/
+static int rtw_check_roaming_candidate(struct mlme_priv *mlme
+ , struct wlan_network **candidate, struct wlan_network *competitor)
+{
+ int updated = false;
+ struct adapter *adapter = container_of(mlme, struct adapter, mlmepriv);
+
+ if (is_same_ess(&competitor->network, &mlme->cur_network.network) == false)
+ goto exit;
+
+ if (rtw_is_desired_network(adapter, competitor) == false)
+ goto exit;
+
+ DBG_871X("roam candidate:%s %s("MAC_FMT", ch%3u) rssi:%d, age:%5d\n",
+ (competitor == mlme->cur_network_scanned)?"*":" ",
+ competitor->network.Ssid.Ssid,
+ MAC_ARG(competitor->network.MacAddress),
+ competitor->network.Configuration.DSConfig,
+ (int)competitor->network.Rssi,
+ jiffies_to_msecs(jiffies - competitor->last_scanned)
+ );
+
+ /* got specific addr to roam */
+ if (!is_zero_mac_addr(mlme->roam_tgt_addr)) {
+ if (!memcmp(mlme->roam_tgt_addr, competitor->network.MacAddress, ETH_ALEN))
+ goto update;
+ else
+ goto exit;
+ }
+ if (jiffies_to_msecs(jiffies - competitor->last_scanned) >= mlme->roam_scanr_exp_ms)
+ goto exit;
+
+ if (competitor->network.Rssi - mlme->cur_network_scanned->network.Rssi < mlme->roam_rssi_diff_th)
+ goto exit;
+
+ if (*candidate != NULL && (*candidate)->network.Rssi >= competitor->network.Rssi)
+ goto exit;
+
+update:
+ *candidate = competitor;
+ updated = true;
+
+exit:
+ return updated;
+}
+
+int rtw_select_roaming_candidate(struct mlme_priv *mlme)
+{
+ int ret = _FAIL;
+ struct list_head *phead;
+ struct adapter *adapter;
+ struct __queue *queue = &(mlme->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *candidate = NULL;
+
+ if (mlme->cur_network_scanned == NULL) {
+ rtw_warn_on(1);
+ return ret;
+ }
+
+ spin_lock_bh(&(mlme->scanned_queue.lock));
+ phead = get_list_head(queue);
+ adapter = (struct adapter *)mlme->nic_hdl;
+
+ mlme->pscanned = get_next(phead);
+
+ while (phead != mlme->pscanned) {
+
+ pnetwork = LIST_CONTAINOR(mlme->pscanned, struct wlan_network, list);
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ mlme->pscanned = get_next(mlme->pscanned);
+
+ DBG_871X("%s("MAC_FMT", ch%u) rssi:%d\n"
+ , pnetwork->network.Ssid.Ssid
+ , MAC_ARG(pnetwork->network.MacAddress)
+ , pnetwork->network.Configuration.DSConfig
+ , (int)pnetwork->network.Rssi);
+
+ rtw_check_roaming_candidate(mlme, &candidate, pnetwork);
+
+ }
+
+ if (candidate == NULL) {
+ DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ } else {
+ DBG_871X("%s: candidate: %s("MAC_FMT", ch:%u)\n", __func__,
+ candidate->network.Ssid.Ssid, MAC_ARG(candidate->network.MacAddress),
+ candidate->network.Configuration.DSConfig);
+
+ mlme->roam_network = candidate;
+
+ if (!memcmp(candidate->network.MacAddress, mlme->roam_tgt_addr, ETH_ALEN))
+ memset(mlme->roam_tgt_addr, 0, ETH_ALEN);
+ }
+
+ ret = _SUCCESS;
+exit:
+ spin_unlock_bh(&(mlme->scanned_queue.lock));
+
+ return ret;
+}
+
+/*
+* Select a new join candidate from the original @param candidate and @param competitor
+* @return true: candidate is updated
+* @return false: candidate is not updated
+*/
+static int rtw_check_join_candidate(struct mlme_priv *mlme
+ , struct wlan_network **candidate, struct wlan_network *competitor)
+{
+ int updated = false;
+ struct adapter *adapter = container_of(mlme, struct adapter, mlmepriv);
+
+
+ /* check bssid, if needed */
+ if (mlme->assoc_by_bssid == true) {
+ if (memcmp(competitor->network.MacAddress, mlme->assoc_bssid, ETH_ALEN))
+ goto exit;
+ }
+
+ /* check ssid, if needed */
+ if (mlme->assoc_ssid.Ssid[0] && mlme->assoc_ssid.SsidLength) {
+ if (competitor->network.Ssid.SsidLength != mlme->assoc_ssid.SsidLength
+ || memcmp(competitor->network.Ssid.Ssid, mlme->assoc_ssid.Ssid, mlme->assoc_ssid.SsidLength)
+ )
+ goto exit;
+ }
+
+ if (rtw_is_desired_network(adapter, competitor) == false)
+ goto exit;
+
+ if (rtw_to_roam(adapter) > 0) {
+ if (jiffies_to_msecs(jiffies - competitor->last_scanned) >= mlme->roam_scanr_exp_ms
+ || is_same_ess(&competitor->network, &mlme->cur_network.network) == false
+ )
+ goto exit;
+ }
+
+ if (*candidate == NULL || (*candidate)->network.Rssi < competitor->network.Rssi) {
+ *candidate = competitor;
+ updated = true;
+ }
+
+ if (updated) {
+ DBG_871X("[by_bssid:%u][assoc_ssid:%s]"
+ "[to_roam:%u] "
+ "new candidate: %s("MAC_FMT", ch%u) rssi:%d\n",
+ mlme->assoc_by_bssid,
+ mlme->assoc_ssid.Ssid,
+ rtw_to_roam(adapter),
+ (*candidate)->network.Ssid.Ssid,
+ MAC_ARG((*candidate)->network.MacAddress),
+ (*candidate)->network.Configuration.DSConfig,
+ (int)(*candidate)->network.Rssi
+ );
+ }
+
+exit:
+ return updated;
+}
+
+/*
+Calling context:
+The caller of the sub-routine will be in critical section...
+
+The caller must hold the following spinlock
+
+pmlmepriv->lock
+
+
+*/
+
+int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
+{
+ int ret;
+ struct list_head *phead;
+ struct adapter *adapter;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *candidate = NULL;
+
+ adapter = (struct adapter *)pmlmepriv->nic_hdl;
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ if (pmlmepriv->roam_network) {
+ candidate = pmlmepriv->roam_network;
+ pmlmepriv->roam_network = NULL;
+ goto candidate_exist;
+ }
+
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (phead != pmlmepriv->pscanned) {
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ DBG_871X("%s("MAC_FMT", ch%u) rssi:%d\n"
+ , pnetwork->network.Ssid.Ssid
+ , MAC_ARG(pnetwork->network.MacAddress)
+ , pnetwork->network.Configuration.DSConfig
+ , (int)pnetwork->network.Rssi);
+
+ rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
+
+ }
+
+ if (candidate == NULL) {
+ DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
+#ifdef CONFIG_WOWLAN
+ _clr_fwstate_(pmlmepriv, _FW_LINKED|_FW_UNDER_LINKING);
+#endif
+ ret = _FAIL;
+ goto exit;
+ } else {
+ DBG_871X("%s: candidate: %s("MAC_FMT", ch:%u)\n", __func__,
+ candidate->network.Ssid.Ssid, MAC_ARG(candidate->network.MacAddress),
+ candidate->network.Configuration.DSConfig);
+ goto candidate_exist;
+ }
+
+candidate_exist:
+
+ /* check for situation of _FW_LINKED */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ DBG_871X("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__);
+
+ rtw_disassoc_cmd(adapter, 0, true);
+ rtw_indicate_disconnect(adapter);
+ rtw_free_assoc_resources(adapter, 0);
+ }
+
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ ret = rtw_joinbss_cmd(adapter, candidate);
+
+exit:
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ return ret;
+}
+
+sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
+{
+ struct cmd_obj *pcmd;
+ struct setauth_parm *psetauthparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ sint res = _SUCCESS;
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+
+ psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm));
+ if (psetauthparm == NULL) {
+ kfree((unsigned char *)pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memset(psetauthparm, 0, sizeof(struct setauth_parm));
+ psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
+
+ pcmd->cmdcode = _SetAuth_CMD_;
+ pcmd->parmbuf = (unsigned char *)psetauthparm;
+ pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("after enqueue set_auth_cmd, auth_mode =%x\n", psecuritypriv->dot11AuthAlgrthm));
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+ return res;
+}
+
+sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, sint keyid, u8 set_tx, bool enqueue)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ sint res = _SUCCESS;
+
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm =(unsigned char)psecuritypriv->dot118021XGrpPrivacy =%d\n", psetkeyparm->algorithm));
+ } else {
+ psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm =(u8)psecuritypriv->dot11PrivacyAlgrthm =%d\n", psetkeyparm->algorithm));
+
+ }
+ psetkeyparm->keyid = (u8)keyid;/* 0~3 */
+ psetkeyparm->set_tx = set_tx;
+ if (is_wep_enc(psetkeyparm->algorithm))
+ adapter->securitypriv.key_mask |= BIT(psetkeyparm->keyid);
+
+ DBG_871X("==> rtw_set_key algorithm(%x), keyid(%x), key_mask(%x)\n", psetkeyparm->algorithm, psetkeyparm->keyid, adapter->securitypriv.key_mask);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm =%d psetkeyparm->keyid =(u8)keyid =%d\n", psetkeyparm->algorithm, keyid));
+
+ switch (psetkeyparm->algorithm) {
+
+ case _WEP40_:
+ keylen = 5;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _WEP104_:
+ keylen = 13;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _TKIP_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ case _AES_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm));
+ res = _FAIL;
+ kfree((unsigned char *)psetkeyparm);
+ goto exit;
+ }
+
+
+ if (enqueue) {
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ kfree((unsigned char *)psetkeyparm);
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ INIT_LIST_HEAD(&pcmd->list);
+
+ /* sema_init(&(pcmd->cmd_sem), 0); */
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+ } else{
+ setkey_hdl(adapter, (u8 *)psetkeyparm);
+ kfree((u8 *) psetkeyparm);
+ }
+exit:
+ return res;
+}
+
+/* adjust IEs for rtw_joinbss_cmd in WMM */
+int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len)
+{
+ unsigned int ielength = 0;
+ unsigned int i, j;
+
+ i = 12; /* after the fixed IE */
+ while (i < in_len) {
+ ielength = initial_out_len;
+
+ if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { /* WMM element ID and OUI */
+ for (j = i; j < i + 9; j++) {
+ out_ie[ielength] = in_ie[j];
+ ielength++;
+ }
+ out_ie[initial_out_len + 1] = 0x07;
+ out_ie[initial_out_len + 6] = 0x00;
+ out_ie[initial_out_len + 8] = 0x00;
+
+ break;
+ }
+
+ i += (in_ie[i+1]+2); /* to the next IE element */
+ }
+
+ return ielength;
+
+}
+
+
+/* */
+/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
+/* Added by Annie, 2006-05-07. */
+/* */
+/* Search by BSSID, */
+/* Return Value: */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
+/* */
+/* */
+
+static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+ int i = 0;
+
+ do {
+ if ((psecuritypriv->PMKIDList[i].bUsed) &&
+ (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) {
+ break;
+ } else{
+ i++;
+ /* continue; */
+ }
+
+ } while (i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE) {
+ i = -1;/* Could not find. */
+ } else {
+ /* There is one Pre-Authentication Key for the specific BSSID. */
+ }
+
+ return i;
+
+}
+
+/* */
+/* Check the RSN IE length */
+/* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */
+/* 0-11th element in the array are the fixed IE */
+/* 12th element in the array is the IE */
+/* 13th element in the array is the IE length */
+/* */
+
+static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie_len)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+
+ if (ie[13] <= 20) {
+ /* The RSN IE didn't include the PMK ID, append the PMK information */
+ ie[ie_len] = 1;
+ ie_len++;
+ ie[ie_len] = 0; /* PMKID count = 0x0100 */
+ ie_len++;
+ memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
+
+ ie_len += 16;
+ ie[13] += 18;/* PMKID length = 2+16 */
+
+ }
+ return ie_len;
+}
+
+sint rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
+{
+ u8 authmode = 0x0;
+ uint ielength;
+ int iEntry;
+
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ uint ndisauthmode = psecuritypriv->ndisauthtype;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("+rtw_restruct_sec_ie: ndisauthmode =%d ndissecuritytype =%d\n",
+ ndisauthmode, ndissecuritytype));
+
+ /* copy fixed ie only */
+ memcpy(out_ie, in_ie, 12);
+ ielength = 12;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK))
+ authmode = _WPA_IE_ID_;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
+ authmode = _WPA2_IE_ID_;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
+
+ ielength += psecuritypriv->wps_ie_len;
+ } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) {
+ /* copy RSN or SSN */
+ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2);
+ /* debug for CONFIG_IEEE80211W
+ {
+ int jj;
+ printk("supplicant_ie_length =%d &&&&&&&&&&&&&&&&&&&\n", psecuritypriv->supplicant_ie[1]+2);
+ for (jj = 0; jj < psecuritypriv->supplicant_ie[1]+2; jj++)
+ printk(" %02x ", psecuritypriv->supplicant_ie[jj]);
+ printk("\n");
+ }*/
+ ielength += psecuritypriv->supplicant_ie[1]+2;
+ rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie);
+ }
+
+ iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
+ if (iEntry < 0) {
+ return ielength;
+ } else{
+ if (authmode == _WPA2_IE_ID_)
+ ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
+ }
+ return ielength;
+}
+
+void rtw_init_registrypriv_dev_network(struct adapter *adapter)
+{
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct eeprom_priv *peepriv = &adapter->eeprompriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *myhwaddr = myid(peepriv);
+
+ memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
+
+ memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
+
+ pdev_network->Configuration.Length = sizeof(struct ndis_802_11_conf);
+ pdev_network->Configuration.BeaconPeriod = 100;
+ pdev_network->Configuration.FHConfig.Length = 0;
+ pdev_network->Configuration.FHConfig.HopPattern = 0;
+ pdev_network->Configuration.FHConfig.HopSet = 0;
+ pdev_network->Configuration.FHConfig.DwellTime = 0;
+}
+
+void rtw_update_registrypriv_dev_network(struct adapter *adapter)
+{
+ int sz = 0;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
+ /* struct xmit_priv *pxmitpriv = &adapter->xmitpriv; */
+
+ pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0) ; /* adhoc no 802.1x */
+
+ pdev_network->Rssi = 0;
+
+ switch (pregistrypriv->wireless_mode) {
+ case WIRELESS_11B:
+ pdev_network->NetworkTypeInUse = (Ndis802_11DS);
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11_24N:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11A_5N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ break;
+ case WIRELESS_11ABGN:
+ if (pregistrypriv->channel > 14)
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ else
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ default:
+ /* TODO */
+ break;
+ }
+
+ pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("pregistrypriv->channel =%d, pdev_network->Configuration.DSConfig = 0x%x\n", pregistrypriv->channel, pdev_network->Configuration.DSConfig));
+
+ if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
+ pdev_network->Configuration.ATIMWindow = (0);
+
+ pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
+
+ /* 1. Supported rates */
+ /* 2. IE */
+
+ /* rtw_set_supported_rate(pdev_network->SupportedRates, pregistrypriv->wireless_mode) ; will be called in rtw_generate_ie */
+ sz = rtw_generate_ie(pregistrypriv);
+
+ pdev_network->IELength = sz;
+
+ pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+
+ /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
+ /* pdev_network->IELength = cpu_to_le32(sz); */
+}
+
+void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
+{
+}
+
+/* the fucntion is at passive_level */
+void rtw_joinbss_reset(struct adapter *padapter)
+{
+ u8 threshold;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */
+
+ pmlmepriv->num_FortyMHzIntolerant = 0;
+
+ pmlmepriv->num_sta_no_ht = 0;
+
+ phtpriv->ampdu_enable = false;/* reset to disabled */
+
+ /* TH = 1 => means that invalidate usb rx aggregation */
+ /* TH = 0 => means that validate usb rx aggregation, use init value. */
+ if (phtpriv->ht_option) {
+ if (padapter->registrypriv.wifi_spec == 1)
+ threshold = 1;
+ else
+ threshold = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ } else{
+ threshold = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ }
+}
+
+void rtw_ht_use_default_setting(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ bool bHwLDPCSupport = false, bHwSTBCSupport = false;
+ bool bHwSupportBeamformer = false, bHwSupportBeamformee = false;
+
+ if (pregistrypriv->wifi_spec)
+ phtpriv->bss_coexist = 1;
+ else
+ phtpriv->bss_coexist = 0;
+
+ phtpriv->sgi_40m = TEST_FLAG(pregistrypriv->short_gi, BIT1) ? true : false;
+ phtpriv->sgi_20m = TEST_FLAG(pregistrypriv->short_gi, BIT0) ? true : false;
+
+ /* LDPC support */
+ rtw_hal_get_def_var(padapter, HAL_DEF_RX_LDPC, (u8 *)&bHwLDPCSupport);
+ CLEAR_FLAGS(phtpriv->ldpc_cap);
+ if (bHwLDPCSupport) {
+ if (TEST_FLAG(pregistrypriv->ldpc_cap, BIT4))
+ SET_FLAG(phtpriv->ldpc_cap, LDPC_HT_ENABLE_RX);
+ }
+ rtw_hal_get_def_var(padapter, HAL_DEF_TX_LDPC, (u8 *)&bHwLDPCSupport);
+ if (bHwLDPCSupport) {
+ if (TEST_FLAG(pregistrypriv->ldpc_cap, BIT5))
+ SET_FLAG(phtpriv->ldpc_cap, LDPC_HT_ENABLE_TX);
+ }
+ if (phtpriv->ldpc_cap)
+ DBG_871X("[HT] Support LDPC = 0x%02X\n", phtpriv->ldpc_cap);
+
+ /* STBC */
+ rtw_hal_get_def_var(padapter, HAL_DEF_TX_STBC, (u8 *)&bHwSTBCSupport);
+ CLEAR_FLAGS(phtpriv->stbc_cap);
+ if (bHwSTBCSupport) {
+ if (TEST_FLAG(pregistrypriv->stbc_cap, BIT5))
+ SET_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX);
+ }
+ rtw_hal_get_def_var(padapter, HAL_DEF_RX_STBC, (u8 *)&bHwSTBCSupport);
+ if (bHwSTBCSupport) {
+ if (TEST_FLAG(pregistrypriv->stbc_cap, BIT4))
+ SET_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_RX);
+ }
+ if (phtpriv->stbc_cap)
+ DBG_871X("[HT] Support STBC = 0x%02X\n", phtpriv->stbc_cap);
+
+ /* Beamforming setting */
+ rtw_hal_get_def_var(padapter, HAL_DEF_EXPLICIT_BEAMFORMER, (u8 *)&bHwSupportBeamformer);
+ rtw_hal_get_def_var(padapter, HAL_DEF_EXPLICIT_BEAMFORMEE, (u8 *)&bHwSupportBeamformee);
+ CLEAR_FLAGS(phtpriv->beamform_cap);
+ if (TEST_FLAG(pregistrypriv->beamform_cap, BIT4) && bHwSupportBeamformer) {
+ SET_FLAG(phtpriv->beamform_cap, BEAMFORMING_HT_BEAMFORMER_ENABLE);
+ DBG_871X("[HT] Support Beamformer\n");
+ }
+ if (TEST_FLAG(pregistrypriv->beamform_cap, BIT5) && bHwSupportBeamformee) {
+ SET_FLAG(phtpriv->beamform_cap, BEAMFORMING_HT_BEAMFORMEE_ENABLE);
+ DBG_871X("[HT] Support Beamformee\n");
+ }
+}
+
+void rtw_build_wmm_ie_ht(struct adapter *padapter, u8 *out_ie, uint *pout_len)
+{
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
+ int out_len;
+ u8 *pframe;
+
+ if (padapter->mlmepriv.qospriv.qos_option == 0) {
+ out_len = *pout_len;
+ pframe = rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
+
+ padapter->mlmepriv.qospriv.qos_option = 1;
+ }
+}
+
+/* the fucntion is >= passive_level */
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len, u8 channel)
+{
+ u32 ielen, out_len;
+ enum HT_CAP_AMPDU_FACTOR max_rx_ampdu_factor;
+ unsigned char *p, *pframe;
+ struct rtw_ieee80211_ht_cap ht_capie;
+ u8 cbw40_enable = 0, stbc_rx_enable = 0, rf_type = 0, operation_bw = 0;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ phtpriv->ht_option = false;
+
+ out_len = *pout_len;
+
+ memset(&ht_capie, 0, sizeof(struct rtw_ieee80211_ht_cap));
+
+ ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_DSSSCCK40);
+
+ if (phtpriv->sgi_20m)
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SGI_20);
+
+ /* Get HT BW */
+ if (in_ie == NULL) {
+ /* TDLS: TODO 20/40 issue */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ operation_bw = padapter->mlmeextpriv.cur_bwmode;
+ if (operation_bw > CHANNEL_WIDTH_40)
+ operation_bw = CHANNEL_WIDTH_40;
+ } else
+ /* TDLS: TODO 40? */
+ operation_bw = CHANNEL_WIDTH_40;
+ } else {
+ p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
+ if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
+ struct HT_info_element *pht_info = (struct HT_info_element *)(p+2);
+ if (pht_info->infos[0] & BIT(2)) {
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ case 3:
+ operation_bw = CHANNEL_WIDTH_40;
+ break;
+ default:
+ operation_bw = CHANNEL_WIDTH_20;
+ break;
+ }
+ } else {
+ operation_bw = CHANNEL_WIDTH_20;
+ }
+ }
+ }
+
+ /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
+ if (channel > 14) {
+ if ((pregistrypriv->bw_mode & 0xf0) > 0)
+ cbw40_enable = 1;
+ } else {
+ if ((pregistrypriv->bw_mode & 0x0f) > 0)
+ cbw40_enable = 1;
+ }
+
+ if ((cbw40_enable == 1) && (operation_bw == CHANNEL_WIDTH_40)) {
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH);
+ if (phtpriv->sgi_40m)
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SGI_40);
+ }
+
+ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX))
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_TX_STBC);
+
+ /* todo: disable SM power save mode */
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SM_PS);
+
+ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_RX)) {
+ if ((channel <= 14 && pregistrypriv->rx_stbc == 0x1) || /* enable for 2.4GHz */
+ (pregistrypriv->wifi_spec == 1)) {
+ stbc_rx_enable = 1;
+ DBG_871X("declare supporting RX STBC\n");
+ }
+ }
+
+ /* fill default supported_mcs_set */
+ memcpy(ht_capie.supp_mcs_set, pmlmeext->default_supported_mcs_set, 16);
+
+ /* update default supported_mcs_set */
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ switch (rf_type) {
+ case RF_1T1R:
+ if (stbc_rx_enable)
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_RX_STBC_1R);/* RX STBC One spatial stream */
+
+ set_mcs_rate_by_mask(ht_capie.supp_mcs_set, MCS_RATE_1R);
+ break;
+
+ case RF_2T2R:
+ case RF_1T2R:
+ default:
+ if (stbc_rx_enable)
+ ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_RX_STBC_2R);/* RX STBC two spatial stream */
+
+ #ifdef CONFIG_DISABLE_MCS13TO15
+ if (((cbw40_enable == 1) && (operation_bw == CHANNEL_WIDTH_40)) && (pregistrypriv->wifi_spec != 1))
+ set_mcs_rate_by_mask(ht_capie.supp_mcs_set, MCS_RATE_2R_13TO15_OFF);
+ else
+ set_mcs_rate_by_mask(ht_capie.supp_mcs_set, MCS_RATE_2R);
+ #else /* CONFIG_DISABLE_MCS13TO15 */
+ set_mcs_rate_by_mask(ht_capie.supp_mcs_set, MCS_RATE_2R);
+ #endif /* CONFIG_DISABLE_MCS13TO15 */
+ break;
+ }
+
+ {
+ u32 rx_packet_offset, max_recvbuf_sz;
+ rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
+ rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
+ }
+
+ if (padapter->driver_rx_ampdu_factor != 0xFF)
+ max_rx_ampdu_factor =
+ (enum HT_CAP_AMPDU_FACTOR)padapter->driver_rx_ampdu_factor;
+ else
+ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR,
+ &max_rx_ampdu_factor);
+
+ /* rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor); */
+ ht_capie.ampdu_params_info = (max_rx_ampdu_factor&0x03);
+
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ else
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+
+ pframe = rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct rtw_ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
+
+ phtpriv->ht_option = true;
+
+ if (in_ie != NULL) {
+ p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
+ if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
+ out_len = *pout_len;
+ pframe = rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
+ }
+ }
+
+ return phtpriv->ht_option;
+
+}
+
+/* the fucntion is > passive_level (in critical_section) */
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channel)
+{
+ u8 *p, max_ampdu_sz;
+ int len;
+ /* struct sta_info *bmc_sta, *psta; */
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ struct ieee80211_ht_addt_info *pht_addtinfo;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ /* struct wlan_network *pcur_network = &(pmlmepriv->cur_network);; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 cbw40_enable = 0;
+
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
+ return;
+
+ DBG_871X("+rtw_update_ht_cap()\n");
+
+ /* maybe needs check if ap supports rx ampdu. */
+ if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) {
+ if (pregistrypriv->wifi_spec == 1) {
+ /* remove this part because testbed AP should disable RX AMPDU */
+ /* phtpriv->ampdu_enable = false; */
+ phtpriv->ampdu_enable = true;
+ } else {
+ phtpriv->ampdu_enable = true;
+ }
+ } else if (pregistrypriv->ampdu_enable == 2) {
+ /* remove this part because testbed AP should disable RX AMPDU */
+ /* phtpriv->ampdu_enable = true; */
+ }
+
+
+ /* check Max Rx A-MPDU Size */
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
+ if (p && len > 0) {
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR);
+ max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */
+
+ /* DBG_871X("rtw_update_ht_cap(): max_ampdu_sz =%d\n", max_ampdu_sz); */
+ phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
+
+ }
+
+
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
+ if (p && len > 0) {
+ pht_addtinfo = (struct ieee80211_ht_addt_info *)(p+2);
+ /* todo: */
+ }
+
+ if (channel > 14) {
+ if ((pregistrypriv->bw_mode & 0xf0) > 0)
+ cbw40_enable = 1;
+ } else {
+ if ((pregistrypriv->bw_mode & 0x0f) > 0)
+ cbw40_enable = 1;
+ }
+
+ /* update cur_bwmode & cur_ch_offset */
+ if ((cbw40_enable) &&
+ (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) &
+ BIT(1)) && (pmlmeinfo->HT_info.infos[0] & BIT(2))) {
+ int i;
+ u8 rf_type;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS set */
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= pmlmeext->default_supported_mcs_set[i];
+
+ /* update the MCS rates */
+ switch (rf_type) {
+ case RF_1T1R:
+ case RF_1T2R:
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_1R);
+ break;
+ case RF_2T2R:
+ default:
+#ifdef CONFIG_DISABLE_MCS13TO15
+ if (pmlmeext->cur_bwmode == CHANNEL_WIDTH_40 && pregistrypriv->wifi_spec != 1)
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R_13TO15_OFF);
+ else
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R);
+#else /* CONFIG_DISABLE_MCS13TO15 */
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R);
+#endif /* CONFIG_DISABLE_MCS13TO15 */
+ }
+
+ /* switch to the 40M Hz mode according to the AP */
+ /* pmlmeext->cur_bwmode = CHANNEL_WIDTH_40; */
+ switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
+ case EXTCHNL_OFFSET_UPPER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case EXTCHNL_OFFSET_LOWER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+
+ /* */
+ /* Config SM Power Save setting */
+ /* */
+ pmlmeinfo->SM_PS =
+ (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) &
+ 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_871X("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+
+ /* */
+ /* Config current HT Protection mode. */
+ /* */
+ pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
+}
+
+void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u8 issued;
+ int priority;
+ struct sta_info *psta = NULL;
+ struct ht_priv *phtpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+
+ /* if (bmcst || (padapter->mlmepriv.LinkDetectInfo.bTxBusyTraffic == false)) */
+ if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100))
+ return;
+
+ priority = pattrib->priority;
+
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ if (pattrib->psta != psta) {
+ DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
+ return;
+ }
+
+ if (psta == NULL) {
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ return;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return;
+ }
+
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
+ issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
+
+ if (0 == issued) {
+ DBG_871X("rtw_issue_addbareq_cmd, p =%d\n", priority);
+ psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
+ rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
+ }
+ }
+
+}
+
+void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ u8 cap_content[8] = {0};
+ u8 *pframe;
+
+
+ if (phtpriv->bss_coexist) {
+ SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
+ }
+
+ pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
+}
+
+inline void rtw_set_to_roam(struct adapter *adapter, u8 to_roam)
+{
+ if (to_roam == 0)
+ adapter->mlmepriv.to_join = false;
+ adapter->mlmepriv.to_roam = to_roam;
+}
+
+inline u8 rtw_dec_to_roam(struct adapter *adapter)
+{
+ adapter->mlmepriv.to_roam--;
+ return adapter->mlmepriv.to_roam;
+}
+
+inline u8 rtw_to_roam(struct adapter *adapter)
+{
+ return adapter->mlmepriv.to_roam;
+}
+
+void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ spin_lock_bh(&pmlmepriv->lock);
+ _rtw_roaming(padapter, tgt_network);
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ int do_join_r;
+
+ if (0 < rtw_to_roam(padapter)) {
+ DBG_871X("roaming from %s("MAC_FMT"), length:%d\n",
+ cur_network->network.Ssid.Ssid, MAC_ARG(cur_network->network.MacAddress),
+ cur_network->network.Ssid.SsidLength);
+ memcpy(&pmlmepriv->assoc_ssid, &cur_network->network.Ssid, sizeof(struct ndis_802_11_ssid));
+
+ pmlmepriv->assoc_by_bssid = false;
+
+ while (1) {
+ do_join_r = rtw_do_join(padapter);
+ if (_SUCCESS == do_join_r) {
+ break;
+ } else {
+ DBG_871X("roaming do_join return %d\n", do_join_r);
+ rtw_dec_to_roam(padapter);
+
+ if (rtw_to_roam(padapter) > 0) {
+ continue;
+ } else {
+ DBG_871X("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
+ rtw_indicate_disconnect(padapter);
+ break;
+ }
+ }
+ }
+ }
+
+}
+
+sint rtw_linked_check(struct adapter *padapter)
+{
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true) ||
+ (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) == true)) {
+ if (padapter->stapriv.asoc_sta_count > 2)
+ return true;
+ } else{ /* Station mode */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == true)
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
new file mode 100644
index 000000000000..17d881d66910
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -0,0 +1,6941 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_MLME_EXT_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtw_wifi_regd.h>
+
+
+static struct mlme_handler mlme_sta_tbl[] = {
+ {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
+ {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
+ {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
+ {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
+ {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
+ {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
+
+ /*----------------------------------------------------------
+ below 2 are reserved
+ -----------------------------------------------------------*/
+ {0, "DoReserved", &DoReserved},
+ {0, "DoReserved", &DoReserved},
+ {WIFI_BEACON, "OnBeacon", &OnBeacon},
+ {WIFI_ATIM, "OnATIM", &OnAtim},
+ {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
+ {WIFI_AUTH, "OnAuth", &OnAuthClient},
+ {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
+ {WIFI_ACTION, "OnAction", &OnAction},
+ {WIFI_ACTION_NOACK, "OnActionNoAck", &OnAction},
+};
+
+static struct action_handler OnAction_tbl[] = {
+ {RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
+ {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &DoReserved},
+ {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &DoReserved},
+ {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
+ {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
+ {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
+ {RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
+ {RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
+ {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &OnAction_sa_query},
+ {RTW_WLAN_CATEGORY_UNPROTECTED_WNM, "ACTION_UNPROTECTED_WNM", &DoReserved},
+ {RTW_WLAN_CATEGORY_SELF_PROTECTED, "ACTION_SELF_PROTECTED", &DoReserved},
+ {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &DoReserved},
+ {RTW_WLAN_CATEGORY_VHT, "ACTION_VHT", &DoReserved},
+ {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &DoReserved},
+};
+
+
+static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+/**************************************************
+OUI definitions for the vendor specific IE
+***************************************************/
+unsigned char RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
+unsigned char WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
+unsigned char P2P_OUI[] = {0x50, 0x6F, 0x9A, 0x09};
+unsigned char WFD_OUI[] = {0x50, 0x6F, 0x9A, 0x0A};
+
+unsigned char WMM_INFO_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+unsigned char WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+static unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
+
+/********************************************************
+ChannelPlan definitions
+*********************************************************/
+static RT_CHANNEL_PLAN_2G RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
+ {{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x05, RT_CHANNEL_DOMAIN_2G_GLOBAL , Passive scan CH 12, 13, 14 */
+ {{}, 0}, /* 0x06, RT_CHANNEL_DOMAIN_2G_NULL */
+};
+
+static RT_CHANNEL_PLAN_5G RTW_ChannelPlan5G[RT_CHANNEL_DOMAIN_5G_MAX] = {
+ {{}, 0}, /* 0x00, RT_CHANNEL_DOMAIN_5G_NULL */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x01, RT_CHANNEL_DOMAIN_5G_ETSI1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x02, RT_CHANNEL_DOMAIN_5G_ETSI2 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 149, 153, 157, 161, 165}, 22}, /* 0x03, RT_CHANNEL_DOMAIN_5G_ETSI3 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165}, 24}, /* 0x04, RT_CHANNEL_DOMAIN_5G_FCC1 */
+ {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9}, /* 0x05, RT_CHANNEL_DOMAIN_5G_FCC2 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13}, /* 0x06, RT_CHANNEL_DOMAIN_5G_FCC3 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161}, 12}, /* 0x07, RT_CHANNEL_DOMAIN_5G_FCC4 */
+ {{149, 153, 157, 161, 165}, 5}, /* 0x08, RT_CHANNEL_DOMAIN_5G_FCC5 */
+ {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x09, RT_CHANNEL_DOMAIN_5G_FCC6 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 20}, /* 0x0A, RT_CHANNEL_DOMAIN_5G_FCC7_IC1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 149, 153, 157, 161, 165}, 20}, /* 0x0B, RT_CHANNEL_DOMAIN_5G_KCC1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 19}, /* 0x0C, RT_CHANNEL_DOMAIN_5G_MKK1 */
+ {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x0D, RT_CHANNEL_DOMAIN_5G_MKK2 */
+ {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11}, /* 0x0E, RT_CHANNEL_DOMAIN_5G_MKK3 */
+ {{56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 15}, /* 0x0F, RT_CHANNEL_DOMAIN_5G_NCC1 */
+ {{56, 60, 64, 149, 153, 157, 161, 165}, 8}, /* 0x10, RT_CHANNEL_DOMAIN_5G_NCC2 */
+ {{149, 153, 157, 161, 165}, 5}, /* 0x11, RT_CHANNEL_DOMAIN_5G_NCC3 */
+ {{36, 40, 44, 48}, 4}, /* 0x12, RT_CHANNEL_DOMAIN_5G_ETSI4 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 136, 140, 149, 153, 157, 161, 165}, 20}, /* 0x13, RT_CHANNEL_DOMAIN_5G_ETSI5 */
+ {{149, 153, 157, 161}, 4}, /* 0x14, RT_CHANNEL_DOMAIN_5G_FCC8 */
+ {{36, 40, 44, 48, 52, 56, 60, 64}, 8}, /* 0x15, RT_CHANNEL_DOMAIN_5G_ETSI6 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 149, 153, 157, 161, 165}, 13}, /* 0x16, RT_CHANNEL_DOMAIN_5G_ETSI7 */
+ {{36, 40, 44, 48, 149, 153, 157, 161, 165}, 9}, /* 0x17, RT_CHANNEL_DOMAIN_5G_ETSI8 */
+ {{100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140}, 11}, /* 0x18, RT_CHANNEL_DOMAIN_5G_ETSI9 */
+ {{149, 153, 157, 161, 165}, 5}, /* 0x19, RT_CHANNEL_DOMAIN_5G_ETSI10 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 132, 136, 140, 149, 153, 157, 161, 165}, 16}, /* 0x1A, RT_CHANNEL_DOMAIN_5G_ETSI11 */
+ {{52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 17}, /* 0x1B, RT_CHANNEL_DOMAIN_5G_NCC4 */
+ {{149, 153, 157, 161}, 4}, /* 0x1C, RT_CHANNEL_DOMAIN_5G_ETSI12 */
+ {{36, 40, 44, 48, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 17}, /* 0x1D, RT_CHANNEL_DOMAIN_5G_FCC9 */
+ {{36, 40, 44, 48, 100, 104, 108, 112, 116, 132, 136, 140}, 12}, /* 0x1E, RT_CHANNEL_DOMAIN_5G_ETSI13 */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161}, 20}, /* 0x1F, RT_CHANNEL_DOMAIN_5G_FCC10 */
+
+ /* Driver self defined for old channel plan Compatible , Remember to modify if have new channel plan definition ===== */
+ {{36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 132, 136, 140, 149, 153, 157, 161, 165}, 21}, /* 0x20, RT_CHANNEL_DOMAIN_5G_FCC */
+ {{36, 40, 44, 48}, 4}, /* 0x21, RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS */
+ {{36, 40, 44, 48, 149, 153, 157, 161}, 8}, /* 0x22, RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS */
+};
+
+static RT_CHANNEL_PLAN_MAP RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
+ /* 0x00 ~ 0x1F , Old Define ===== */
+ {0x02, 0x20}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
+ {0x02, 0x0A}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
+ {0x01, 0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
+ {0x01, 0x00}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
+ {0x01, 0x00}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
+ {0x03, 0x00}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
+ {0x03, 0x00}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
+ {0x01, 0x09}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
+ {0x03, 0x09}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
+ {0x03, 0x00}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
+ {0x00, 0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
+ {0x02, 0x0F}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
+ {0x01, 0x08}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
+ {0x02, 0x06}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
+ {0x02, 0x0B}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
+ {0x02, 0x09}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
+ {0x01, 0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
+ {0x02, 0x05}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
+ {0x01, 0x21}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00, 0x04}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
+ {0x02, 0x10}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
+ {0x00, 0x21}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
+ {0x00, 0x22}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
+ {0x03, 0x21}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x06, 0x08}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
+ {0x02, 0x08}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
+ {0x00, 0x00}, /* 0x1A, */
+ {0x00, 0x00}, /* 0x1B, */
+ {0x00, 0x00}, /* 0x1C, */
+ {0x00, 0x00}, /* 0x1D, */
+ {0x00, 0x00}, /* 0x1E, */
+ {0x06, 0x04}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
+ /* 0x20 ~ 0x7F , New Define ===== */
+ {0x00, 0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
+ {0x01, 0x00}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
+ {0x02, 0x00}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
+ {0x03, 0x00}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
+ {0x04, 0x00}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
+ {0x02, 0x04}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
+ {0x00, 0x01}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
+ {0x03, 0x0C}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
+ {0x00, 0x0B}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
+ {0x00, 0x05}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
+ {0x00, 0x00}, /* 0x2A, */
+ {0x00, 0x00}, /* 0x2B, */
+ {0x00, 0x00}, /* 0x2C, */
+ {0x00, 0x00}, /* 0x2D, */
+ {0x00, 0x00}, /* 0x2E, */
+ {0x00, 0x00}, /* 0x2F, */
+ {0x00, 0x06}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
+ {0x00, 0x07}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
+ {0x00, 0x08}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
+ {0x00, 0x09}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
+ {0x02, 0x0A}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
+ {0x00, 0x02}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
+ {0x00, 0x03}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
+ {0x03, 0x0D}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
+ {0x03, 0x0E}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
+ {0x02, 0x0F}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
+ {0x00, 0x00}, /* 0x3A, */
+ {0x00, 0x00}, /* 0x3B, */
+ {0x00, 0x00}, /* 0x3C, */
+ {0x00, 0x00}, /* 0x3D, */
+ {0x00, 0x00}, /* 0x3E, */
+ {0x00, 0x00}, /* 0x3F, */
+ {0x02, 0x10}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
+ {0x05, 0x00}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_NULL */
+ {0x01, 0x12}, /* 0x42, RT_CHANNEL_DOMAIN_ETSI1_ETSI4 */
+ {0x02, 0x05}, /* 0x43, RT_CHANNEL_DOMAIN_FCC1_FCC2 */
+ {0x02, 0x11}, /* 0x44, RT_CHANNEL_DOMAIN_FCC1_NCC3 */
+ {0x00, 0x13}, /* 0x45, RT_CHANNEL_DOMAIN_WORLD_ETSI5 */
+ {0x02, 0x14}, /* 0x46, RT_CHANNEL_DOMAIN_FCC1_FCC8 */
+ {0x00, 0x15}, /* 0x47, RT_CHANNEL_DOMAIN_WORLD_ETSI6 */
+ {0x00, 0x16}, /* 0x48, RT_CHANNEL_DOMAIN_WORLD_ETSI7 */
+ {0x00, 0x17}, /* 0x49, RT_CHANNEL_DOMAIN_WORLD_ETSI8 */
+ {0x00, 0x18}, /* 0x50, RT_CHANNEL_DOMAIN_WORLD_ETSI9 */
+ {0x00, 0x19}, /* 0x51, RT_CHANNEL_DOMAIN_WORLD_ETSI10 */
+ {0x00, 0x1A}, /* 0x52, RT_CHANNEL_DOMAIN_WORLD_ETSI11 */
+ {0x02, 0x1B}, /* 0x53, RT_CHANNEL_DOMAIN_FCC1_NCC4 */
+ {0x00, 0x1C}, /* 0x54, RT_CHANNEL_DOMAIN_WORLD_ETSI12 */
+ {0x02, 0x1D}, /* 0x55, RT_CHANNEL_DOMAIN_FCC1_FCC9 */
+ {0x00, 0x1E}, /* 0x56, RT_CHANNEL_DOMAIN_WORLD_ETSI13 */
+ {0x02, 0x1F}, /* 0x57, RT_CHANNEL_DOMAIN_FCC1_FCC10 */
+};
+
+static RT_CHANNEL_PLAN_MAP RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03, 0x02}; /* use the conbination for max channel numbers */
+
+/*
+ * Search the @param ch in given @param ch_set
+ * @ch_set: the given channel set
+ * @ch: the given channel number
+ *
+ * return the index of channel_num in channel_set, -1 if not found
+ */
+int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch)
+{
+ int i;
+ for (i = 0; ch_set[i].ChannelNum != 0; i++) {
+ if (ch == ch_set[i].ChannelNum)
+ break;
+ }
+
+ if (i >= ch_set[i].ChannelNum)
+ return -1;
+ return i;
+}
+
+/*
+ * Check the @param ch is fit with setband setting of @param adapter
+ * @adapter: the given adapter
+ * @ch: the given channel number
+ *
+ * return true when check valid, false not valid
+ */
+bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch)
+{
+ if (adapter->setband == GHZ24_50 /* 2.4G and 5G */
+ || (adapter->setband == GHZ_24 && ch < 35) /* 2.4G only */
+ || (adapter->setband == GHZ_50 && ch > 35) /* 5G only */
+ ) {
+ return true;
+ }
+ return false;
+}
+
+/****************************************************************************
+
+Following are the initialization functions for WiFi MLME
+
+*****************************************************************************/
+
+int init_hw_mlme_ext(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+ return _SUCCESS;
+}
+
+void init_mlme_default_rate_set(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ unsigned char mixed_datarate[NumRates] = {_1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_, _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_, _48M_RATE_, _54M_RATE_, 0xff};
+ unsigned char mixed_basicrate[NumRates] = {_1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_, _12M_RATE_, _24M_RATE_, 0xff,};
+ unsigned char supported_mcs_set[16] = {0xff, 0xff, 0x00, 0x00, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+ memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
+ memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
+
+ memcpy(pmlmeext->default_supported_mcs_set, supported_mcs_set, sizeof(pmlmeext->default_supported_mcs_set));
+}
+
+static void init_mlme_ext_priv_value(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ atomic_set(&pmlmeext->event_seq, 0);
+ pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
+ pmlmeext->sa_query_seq = 0;
+ pmlmeext->mgnt_80211w_IPN = 0;
+ pmlmeext->mgnt_80211w_IPN_rx = 0;
+ pmlmeext->cur_channel = padapter->registrypriv.channel;
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ pmlmeext->retry = 0;
+
+ pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
+
+ init_mlme_default_rate_set(padapter);
+
+ if (pmlmeext->cur_channel > 14)
+ pmlmeext->tx_rate = IEEE80211_OFDM_RATE_6MB;
+ else
+ pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
+
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->scan_abort = false;
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeinfo->auth_seq = 0;
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ pmlmeinfo->key_index = 0;
+ pmlmeinfo->iv = 0;
+
+ pmlmeinfo->enc_algo = _NO_PRIVACY_;
+ pmlmeinfo->authModeToggle = 0;
+
+ memset(pmlmeinfo->chg_txt, 0, 128);
+
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
+
+ pmlmeinfo->dialogToken = 0;
+
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+}
+
+static int has_channel(RT_CHANNEL_INFO *channel_set,
+ u8 chanset_size,
+ u8 chan) {
+ int i;
+
+ for (i = 0; i < chanset_size; i++) {
+ if (channel_set[i].ChannelNum == chan) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list) {
+
+ struct p2p_oper_class_map op_class[] = {
+ { IEEE80211G, 81, 1, 13, 1, BW20 },
+ { IEEE80211G, 82, 14, 14, 1, BW20 },
+ { IEEE80211A, 115, 36, 48, 4, BW20 },
+ { IEEE80211A, 116, 36, 44, 8, BW40PLUS },
+ { IEEE80211A, 117, 40, 48, 8, BW40MINUS },
+ { IEEE80211A, 124, 149, 161, 4, BW20 },
+ { IEEE80211A, 125, 149, 169, 4, BW20 },
+ { IEEE80211A, 126, 149, 157, 8, BW40PLUS },
+ { IEEE80211A, 127, 153, 161, 8, BW40MINUS },
+ { -1, 0, 0, 0, 0, BW20 }
+ };
+
+ int cla, op;
+
+ cla = 0;
+
+ for (op = 0; op_class[op].op_class; op++) {
+ u8 ch;
+ struct p2p_oper_class_map *o = &op_class[op];
+ struct p2p_reg_class *reg = NULL;
+
+ for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
+ if (!has_channel(channel_set, chanset_size, ch)) {
+ continue;
+ }
+
+ if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ continue;
+
+ if ((0 < (padapter->registrypriv.bw_mode & 0xf0)) &&
+ ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ continue;
+
+ if (reg == NULL) {
+ reg = &channel_list->reg_class[cla];
+ cla++;
+ reg->reg_class = o->op_class;
+ reg->channels = 0;
+ }
+ reg->channel[reg->channels] = ch;
+ reg->channels++;
+ }
+ }
+ channel_list->reg_classes = cla;
+
+}
+
+static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, RT_CHANNEL_INFO *channel_set)
+{
+ u8 index, chanset_size = 0;
+ u8 b5GBand = false, b2_4GBand = false;
+ u8 Index2G = 0, Index5G = 0;
+
+ memset(channel_set, 0, sizeof(RT_CHANNEL_INFO)*MAX_CHANNEL_NUM);
+
+ if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
+ DBG_871X("ChannelPlan ID %x error !!!!!\n", ChannelPlan);
+ return chanset_size;
+ }
+
+ if (IsSupported24G(padapter->registrypriv.wireless_mode)) {
+ b2_4GBand = true;
+ if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
+ else
+ Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
+ }
+
+ if (b2_4GBand) {
+ for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
+ channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
+
+ if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (RT_CHANNEL_DOMAIN_GLOBAL_NULL == ChannelPlan)) {
+ if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == ChannelPlan ||
+ RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) { /* channel 12~13, passive scan */
+ if (channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+
+ chanset_size++;
+ }
+ }
+
+ if (b5GBand) {
+ for (index = 0; index < RTW_ChannelPlan5G[Index5G].Len; index++) {
+ if (RTW_ChannelPlan5G[Index5G].Channel[index] <= 48
+ || RTW_ChannelPlan5G[Index5G].Channel[index] >= 149) {
+ channel_set[chanset_size].ChannelNum = RTW_ChannelPlan5G[Index5G].Channel[index];
+ if (RT_CHANNEL_DOMAIN_WORLD_WIDE_5G == ChannelPlan)/* passive scan for all 5G channels */
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ else
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ DBG_871X("%s(): channel_set[%d].ChannelNum = %d\n", __func__, chanset_size, channel_set[chanset_size].ChannelNum);
+ chanset_size++;
+ }
+ }
+ }
+
+ DBG_871X("%s ChannelPlan ID %x Chan num:%d \n", __func__, ChannelPlan, chanset_size);
+ return chanset_size;
+}
+
+int init_mlme_ext_priv(struct adapter *padapter)
+{
+ int res = _SUCCESS;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+ /* memset((u8 *)pmlmeext, 0, sizeof(struct mlme_ext_priv)); */
+
+ pmlmeext->padapter = padapter;
+
+ /* fill_fwpriv(padapter, &(pmlmeext->fwpriv)); */
+
+ init_mlme_ext_priv_value(padapter);
+ pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+
+ init_mlme_ext_timer(padapter);
+
+ init_mlme_ap_info(padapter);
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+ pmlmeext->last_scan_time = 0;
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->mlmeext_init = true;
+ pmlmeext->active_keep_alive_check = true;
+
+#ifdef DBG_FIXED_CHAN
+ pmlmeext->fixed_chan = 0xFF;
+#endif
+
+ return res;
+
+}
+
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
+{
+ struct adapter *padapter = pmlmeext->padapter;
+
+ if (!padapter)
+ return;
+
+ if (padapter->bDriverStopped == true) {
+ del_timer_sync(&pmlmeext->survey_timer);
+ del_timer_sync(&pmlmeext->link_timer);
+ /* del_timer_sync(&pmlmeext->ADDBA_timer); */
+ }
+}
+
+static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
+{
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ if (ptable->func) {
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ return;
+
+ ptable->func(padapter, precv_frame);
+ }
+}
+
+void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int index;
+ struct mlme_handler *ptable;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("+mgt_dispatcher: type(0x%x) subtype(0x%x)\n",
+ GetFrameType(pframe), GetFrameSubType(pframe)));
+
+ if (GetFrameType(pframe) != WIFI_MGT_TYPE) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("mgt_dispatcher: type(0x%x) error!\n", GetFrameType(pframe)));
+ return;
+ }
+
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN)) {
+ return;
+ }
+
+ ptable = mlme_sta_tbl;
+
+ index = GetFrameSubType(pframe) >> 4;
+
+ if (index >= (sizeof(mlme_sta_tbl) / sizeof(struct mlme_handler))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type =%d\n", index));
+ return;
+ }
+ ptable += index;
+
+ if (psta != NULL) {
+ if (GetRetry(pframe)) {
+ if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
+ /* drop the duplicate management frame */
+ pdbgpriv->dbg_rx_dup_mgt_frame_drop_count++;
+ DBG_871X("Drop duplicate management frame with seq_num = %d.\n", precv_frame->u.hdr.attrib.seq_num);
+ return;
+ }
+ }
+ psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
+ }
+
+ switch (GetFrameSubType(pframe)) {
+ case WIFI_AUTH:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ ptable->func = &OnAuth;
+ else
+ ptable->func = &OnAuthClient;
+ /* pass through */
+ case WIFI_ASSOCREQ:
+ case WIFI_REASSOCREQ:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_PROBEREQ:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_BEACON:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_ACTION:
+ /* if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) */
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ default:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ }
+}
+
+/****************************************************************************
+
+Following are the callback functions for each subtype of the management frames
+
+*****************************************************************************/
+
+unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ielen;
+ unsigned char *p;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ u8 is_valid_p2p_probereq = false;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ return _SUCCESS;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false &&
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE) == false) {
+ return _SUCCESS;
+ }
+
+
+ /* DBG_871X("+OnProbeReq\n"); */
+
+#ifdef CONFIG_AUTO_AP_MODE
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ pmlmepriv->cur_network.join_res == true) {
+ struct sta_info *psta;
+ u8 *mac_addr, *peer_addr;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 RC_OUI[4] = {0x00, 0xE0, 0x4C, 0x0A};
+ /* EID[1] + EID_LEN[1] + RC_OUI[4] + MAC[6] + PairingID[2] + ChannelNum[2] */
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, (int *)&ielen,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ if (!p || ielen != 14)
+ goto _non_rc_device;
+
+ if (memcmp(p+2, RC_OUI, sizeof(RC_OUI)))
+ goto _non_rc_device;
+
+ if (memcmp(p+6, get_sa(pframe), ETH_ALEN)) {
+ DBG_871X("%s, do rc pairing ("MAC_FMT"), but mac addr mismatch!("MAC_FMT")\n", __func__,
+ MAC_ARG(get_sa(pframe)), MAC_ARG(p+6));
+
+ goto _non_rc_device;
+ }
+
+ DBG_871X("%s, got the pairing device("MAC_FMT")\n", __func__, MAC_ARG(get_sa(pframe)));
+
+ /* new a station */
+ psta = rtw_get_stainfo(pstapriv, get_sa(pframe));
+ if (psta == NULL) {
+ /* allocate a new one */
+ DBG_871X("going to alloc stainfo for rc ="MAC_FMT"\n", MAC_ARG(get_sa(pframe)));
+ psta = rtw_alloc_stainfo(pstapriv, get_sa(pframe));
+ if (psta == NULL) {
+ /* TODO: */
+ DBG_871X(" Exceed the upper limit of supported clients...\n");
+ return _SUCCESS;
+ }
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&psta->asoc_list)) {
+ psta->expire_to = pstapriv->expire_to;
+ list_add_tail(&psta->asoc_list, &pstapriv->asoc_list);
+ pstapriv->asoc_list_cnt++;
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ /* generate pairing ID */
+ mac_addr = myid(&(padapter->eeprompriv));
+ peer_addr = psta->hwaddr;
+ psta->pid = (u16)(((mac_addr[4]<<8) + mac_addr[5]) + ((peer_addr[4]<<8) + peer_addr[5]));
+
+ /* update peer stainfo */
+ psta->isrc = true;
+ /* psta->aid = 0; */
+ /* psta->mac_id = 2; */
+
+ /* get a unique AID */
+ if (psta->aid > 0) {
+ DBG_871X("old AID %d\n", psta->aid);
+ } else {
+ for (psta->aid = 1; psta->aid <= NUM_STA; psta->aid++)
+ if (pstapriv->sta_aid[psta->aid - 1] == NULL)
+ break;
+
+ if (psta->aid > pstapriv->max_num_sta) {
+ psta->aid = 0;
+ DBG_871X("no room for more AIDs\n");
+ return _SUCCESS;
+ } else {
+ pstapriv->sta_aid[psta->aid - 1] = psta;
+ DBG_871X("allocate new AID = (%d)\n", psta->aid);
+ }
+ }
+
+ psta->qos_option = 1;
+ psta->bw_mode = CHANNEL_WIDTH_20;
+ psta->ieee8021x_blocked = false;
+ psta->htpriv.ht_option = true;
+ psta->htpriv.ampdu_enable = false;
+ psta->htpriv.sgi_20m = false;
+ psta->htpriv.sgi_40m = false;
+ psta->htpriv.ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ spin_lock_bh(&psta->lock);
+ psta->state |= _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ report_add_sta_event(padapter, psta->hwaddr, psta->aid);
+
+ }
+
+ issue_probersp(padapter, get_sa(pframe), false);
+
+ return _SUCCESS;
+
+ }
+
+_non_rc_device:
+
+ return _SUCCESS;
+
+#endif /* CONFIG_AUTO_AP_MODE */
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+
+ /* check (wildcard) SSID */
+ if (p != NULL) {
+ if (is_valid_p2p_probereq == true)
+ goto _issue_probersp;
+
+ if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ || (ielen == 0 && pmlmeinfo->hidden_ssid_mode)
+ )
+ return _SUCCESS;
+
+_issue_probersp:
+ if (((check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ pmlmepriv->cur_network.join_res == true)) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ /* DBG_871X("+issue_probersp during ap mode\n"); */
+ issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
+ }
+
+ }
+
+ return _SUCCESS;
+
+}
+
+unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ return _SUCCESS;
+
+}
+
+unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int cam_idx;
+ struct sta_info *psta;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ struct wlan_bssid_ex *pbss;
+ int ret = _SUCCESS;
+ u8 *p = NULL;
+ u32 ielen = 0;
+
+ p = rtw_get_ie(pframe + sizeof(struct ieee80211_hdr_3addr) + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ielen, precv_frame->u.hdr.len - sizeof(struct ieee80211_hdr_3addr) - _BEACON_IE_OFFSET_);
+ if ((p != NULL) && (ielen > 0)) {
+ if ((*(p + 1 + ielen) == 0x2D) && (*(p + 2 + ielen) != 0x2D)) {
+ /* Invalid value 0x2D is detected in Extended Supported Rates (ESR) IE. Try to fix the IE length to avoid failed Beacon parsing. */
+ DBG_871X("[WIFIDBG] Error in ESR IE is detected in Beacon of BSSID:"MAC_FMT". Fix the length of ESR IE to avoid failed Beacon parsing.\n", MAC_ARG(GetAddr3Ptr(pframe)));
+ *(p + 1) = ielen - 1;
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ /* we should update current network before auth, or some IE is wrong */
+ pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
+ if (pbss) {
+ if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
+ update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
+ rtw_get_bcn_info(&(pmlmepriv->cur_network));
+ }
+ kfree((u8 *)pbss);
+ }
+
+ /* check the vendor of the assoc AP */
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct ieee80211_hdr_3addr), len-sizeof(struct ieee80211_hdr_3addr));
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* reset for adaptive_early_32k */
+ pmlmeext->adaptive_tsf_done = false;
+ pmlmeext->DrvBcnEarly = 0xff;
+ pmlmeext->DrvBcnTimeOut = 0xff;
+ pmlmeext->bcn_cnt = 0;
+ memset(pmlmeext->bcn_delay_cnt, 0, sizeof(pmlmeext->bcn_delay_cnt));
+ memset(pmlmeext->bcn_delay_ratio, 0, sizeof(pmlmeext->bcn_delay_ratio));
+
+ /* start auth */
+ start_clnt_auth(padapter);
+
+ return _SUCCESS;
+ }
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ ret = rtw_check_bcn_info(padapter, pframe, len);
+ if (!ret) {
+ DBG_871X_LEVEL(_drv_always_, "ap has changed, disconnect now\n ");
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 0);
+ return _SUCCESS;
+ }
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
+ /* DBG_871X("update_bcn_info\n"); */
+ update_beacon_info(padapter, pframe, len, psta);
+
+ adaptive_early_32k(pmlmeext, pframe, len);
+ }
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0) {
+ /* DBG_871X("update_bcn_info\n"); */
+ update_beacon_info(padapter, pframe, len, psta);
+ }
+ } else{
+ /* allocate a new CAM entry for IBSS station */
+ cam_idx = allocate_fw_sta_entry(padapter);
+ if (cam_idx == NUM_STA)
+ goto _END_ONBEACON_;
+
+ /* get supported rate */
+ if (update_sta_support_rate(padapter, (pframe + WLAN_HDR_A3_LEN + _BEACON_IE_OFFSET_), (len - WLAN_HDR_A3_LEN - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) {
+ pmlmeinfo->FW_sta_info[cam_idx].status = 0;
+ goto _END_ONBEACON_;
+ }
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* report sta add event */
+ report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx);
+ }
+ }
+ }
+
+_END_ONBEACON_:
+
+ return _SUCCESS;
+
+}
+
+unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int auth_mode, seq, ie_len;
+ unsigned char *sa, *p;
+ u16 algorithm;
+ int status;
+ static struct sta_info stat;
+ struct sta_info *pstat = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ u8 offset = 0;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ DBG_871X("+OnAuth\n");
+
+ sa = GetAddr2Ptr(pframe);
+
+ auth_mode = psecuritypriv->dot11AuthAlgrthm;
+
+ if (GetPrivacy(pframe)) {
+ u8 *iv;
+ struct rx_pkt_attrib *prxattrib = &(precv_frame->u.hdr.attrib);
+
+ prxattrib->hdrlen = WLAN_HDR_A3_LEN;
+ prxattrib->encrypt = _WEP40_;
+
+ iv = pframe+prxattrib->hdrlen;
+ prxattrib->key_index = ((iv[3]>>6)&0x3);
+
+ prxattrib->iv_len = 4;
+ prxattrib->icv_len = 4;
+
+ rtw_wep_decrypt(padapter, (u8 *)precv_frame);
+
+ offset = 4;
+ }
+
+ algorithm = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset));
+ seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
+
+ DBG_871X("auth alg =%x, seq =%X\n", algorithm, seq);
+
+ if (auth_mode == 2 &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
+ auth_mode = 0;
+
+ if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
+ (algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
+ DBG_871X("auth rejected due to bad alg [alg =%d, auth_mib =%d] %02X%02X%02X%02X%02X%02X\n",
+ algorithm, auth_mode, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
+
+ status = _STATS_NO_SUPP_ALG_;
+
+ goto auth_fail;
+ }
+
+ if (rtw_access_ctrl(padapter, sa) == false) {
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+
+ /* allocate a new one */
+ DBG_871X("going to alloc stainfo for sa ="MAC_FMT"\n", MAC_ARG(sa));
+ pstat = rtw_alloc_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+ DBG_871X(" Exceed the upper limit of supported clients...\n");
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat->state = WIFI_FW_AUTH_NULL;
+ pstat->auth_seq = 0;
+
+ /* pstat->flags = 0; */
+ /* pstat->capability = 0; */
+ } else{
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&pstat->asoc_list) == false) {
+ list_del_init(&pstat->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ if (pstat->expire_to > 0) {
+ /* TODO: STA re_auth within expire_to */
+ }
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ if (seq == 1) {
+ /* TODO: STA re_auth and auth timeout */
+ }
+ }
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (list_empty(&pstat->auth_list)) {
+
+ list_add_tail(&pstat->auth_list, &pstapriv->auth_list);
+ pstapriv->auth_list_cnt++;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ if (pstat->auth_seq == 0)
+ pstat->expire_to = pstapriv->auth_to;
+
+
+ if ((pstat->auth_seq + 1) != seq) {
+ DBG_871X("(1)auth rejected because out of seq [rx_seq =%d, exp_seq =%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+
+ if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2 || auth_mode == 3)) {
+ if (seq == 1) {
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ pstat->expire_to = pstapriv->assoc_to;
+ pstat->authalg = algorithm;
+ } else{
+ DBG_871X("(2)auth rejected because out of seq [rx_seq =%d, exp_seq =%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ } else{ /* shared system or auto authentication */
+ if (seq == 1) {
+ /* prepare for the challenging txt... */
+ memset((void *)pstat->chg_txt, 78, 128);
+
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_STATE;
+ pstat->authalg = algorithm;
+ pstat->auth_seq = 2;
+ } else if (seq == 3) {
+ /* checking for challenging txt... */
+ DBG_871X("checking for challenging txt...\n");
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
+ len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
+
+ if ((p == NULL) || (ie_len <= 0)) {
+ DBG_871X("auth rejected because challenge failure!(1)\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+
+ if (!memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ pstat->state &= (~WIFI_FW_AUTH_STATE);
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ /* challenging txt is correct... */
+ pstat->expire_to = pstapriv->assoc_to;
+ } else{
+ DBG_871X("auth rejected because challenge failure!\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+ } else{
+ DBG_871X("(3)auth rejected because out of seq [rx_seq =%d, exp_seq =%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ }
+
+
+ /* Now, we are going to issue_auth... */
+ pstat->auth_seq = seq + 1;
+
+ issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
+
+ if (pstat->state & WIFI_FW_AUTH_SUCCESS)
+ pstat->auth_seq = 0;
+
+
+ return _SUCCESS;
+
+auth_fail:
+
+ if (pstat)
+ rtw_free_stainfo(padapter, pstat);
+
+ pstat = &stat;
+ memset((char *)pstat, '\0', sizeof(stat));
+ pstat->auth_seq = 2;
+ memcpy(pstat->hwaddr, sa, 6);
+
+ issue_auth(padapter, pstat, (unsigned short)status);
+
+ return _FAIL;
+
+}
+
+unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int seq, len, status, algthm, offset;
+ unsigned char *p;
+ unsigned int go2asoc = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_871X("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
+ return _SUCCESS;
+
+ offset = (GetPrivacy(pframe)) ? 4 : 0;
+
+ algthm = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset));
+ seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
+ status = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 4));
+
+ if (status != 0) {
+ DBG_871X("clnt auth fail, status: %d\n", status);
+ if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ else
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
+ /* pmlmeinfo->reauth_count = 0; */
+ }
+
+ set_link_timer(pmlmeext, 1);
+ goto authclnt_fail;
+ }
+
+ if (seq == 2) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ /* legendary shared system */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
+ pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
+
+ if (p == NULL) {
+ /* DBG_871X("marc: no challenge text?\n"); */
+ goto authclnt_fail;
+ }
+
+ memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
+ pmlmeinfo->auth_seq = 3;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+
+ return _SUCCESS;
+ } else{
+ /* open system */
+ go2asoc = 1;
+ }
+ } else if (seq == 4) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ go2asoc = 1;
+ } else{
+ goto authclnt_fail;
+ }
+ } else{
+ /* this is also illegal */
+ /* DBG_871X("marc: clnt auth failed due to illegal seq =%x\n", seq); */
+ goto authclnt_fail;
+ }
+
+ if (go2asoc) {
+ DBG_871X_LEVEL(_drv_always_, "auth success, start assoc\n");
+ start_clnt_assoc(padapter);
+ return _SUCCESS;
+ }
+
+authclnt_fail:
+
+ /* pmlmeinfo->state &= ~(WIFI_FW_AUTH_STATE); */
+
+ return _FAIL;
+
+}
+
+unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u16 capab_info, listen_interval;
+ struct rtw_ieee802_11_elems elems;
+ struct sta_info *pstat;
+ unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+ int i, ie_len, wpa_ie_len, left;
+ unsigned char supportRate[16];
+ int supportRateNum;
+ unsigned short status = _STATS_SUCCESSFUL_;
+ unsigned short frame_type, ie_offset = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ frame_type = GetFrameSubType(pframe);
+ if (frame_type == WIFI_ASSOCREQ) {
+ reassoc = 0;
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ } else{ /* WIFI_REASSOCREQ */
+ reassoc = 1;
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+ }
+
+
+ if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) {
+ DBG_871X("handle_assoc(reassoc =%d) - too short payload (len =%lu)"
+ "\n", reassoc, (unsigned long)pkt_len);
+ return _FAIL;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (pstat == (struct sta_info *)NULL) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ }
+
+ capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
+ /* capab_info = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN)); */
+ /* listen_interval = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN+2)); */
+ listen_interval = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN+2);
+
+ left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
+ pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
+
+
+ DBG_871X("%s\n", __func__);
+
+ /* check if this stat has been successfully authenticated/assocated */
+ if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
+ if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ } else{
+ pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ } else{
+ pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+
+
+ pstat->capability = capab_info;
+
+ /* now parse all ieee802_11 ie to point to elems */
+ if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
+ !elems.ssid) {
+ DBG_871X("STA " MAC_FMT " sent invalid association request\n",
+ MAC_ARG(pstat->hwaddr));
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+
+ /* now we should check all the fields... */
+ /* checking SSID */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL) {
+ status = _STATS_FAILURE_;
+ }
+
+ if (ie_len == 0) /* broadcast ssid, however it is not allowed in assocreq */
+ status = _STATS_FAILURE_;
+ else {
+ /* check if ssid match */
+ if (memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ status = _STATS_FAILURE_;
+
+ if (ie_len != cur->Ssid.SsidLength)
+ status = _STATS_FAILURE_;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ /* check if the supported rate is ok */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL) {
+ DBG_871X("Rx a sta assoc-req which supported rate is empty!\n");
+ /* use our own rate set as statoin used */
+ /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
+ /* supportRateNum = AP_BSSRATE_LEN; */
+
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ } else {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+
+ if (supportRateNum <= sizeof(supportRate)) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+ }
+ }
+
+ /* todo: mask supportRate between AP & STA -> move to update raid */
+ /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
+
+ /* update station supportRate */
+ pstat->bssratelen = supportRateNum;
+ memcpy(pstat->bssrateset, supportRate, supportRateNum);
+ UpdateBrateTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
+
+ /* check RSN/WPA/WPS */
+ pstat->dot8021xalg = 0;
+ pstat->wpa_psk = 0;
+ pstat->wpa_group_cipher = 0;
+ pstat->wpa2_group_cipher = 0;
+ pstat->wpa_pairwise_cipher = 0;
+ pstat->wpa2_pairwise_cipher = 0;
+ memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
+ if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
+
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.rsn_ie;
+ wpa_ie_len = elems.rsn_ie_len;
+
+ if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(1);
+
+ pstat->wpa2_group_cipher = group_cipher&psecuritypriv->wpa2_group_cipher;
+ pstat->wpa2_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa2_pairwise_cipher;
+
+ if (!pstat->wpa2_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa2_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+ } else{
+ status = WLAN_STATUS_INVALID_IE;
+ }
+
+ } else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
+
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.wpa_ie;
+ wpa_ie_len = elems.wpa_ie_len;
+
+ if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(0);
+
+ pstat->wpa_group_cipher = group_cipher&psecuritypriv->wpa_group_cipher;
+ pstat->wpa_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa_pairwise_cipher;
+
+ if (!pstat->wpa_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+
+ } else{
+ status = WLAN_STATUS_INVALID_IE;
+ }
+
+ } else {
+ wpa_ie = NULL;
+ wpa_ie_len = 0;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
+ if (wpa_ie == NULL) {
+ if (elems.wps_ie) {
+ DBG_871X("STA included WPS IE in "
+ "(Re)Association Request - assume WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ /* wpabuf_free(sta->wps_ie); */
+ /* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
+ /* elems.wps_ie_len - 4); */
+ } else {
+ DBG_871X("STA did not include WPA/RSN IE "
+ "in (Re)Association Request - possible WPS "
+ "use\n");
+ pstat->flags |= WLAN_STA_MAYBE_WPS;
+ }
+
+
+ /* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
+ /* that the selected registrar of AP is _FLASE */
+ if ((psecuritypriv->wpa_psk > 0)
+ && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if (pmlmepriv->wps_beacon_ie) {
+ u8 selected_registrar = 0;
+
+ rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
+
+ if (!selected_registrar) {
+ DBG_871X("selected_registrar is false , or AP is not ready to do WPS\n");
+
+ status = _STATS_UNABLE_HANDLE_STA_;
+
+ goto OnAssocReqFail;
+ }
+ }
+ }
+
+ } else{
+ int copy_len;
+
+ if (psecuritypriv->wpa_psk == 0) {
+ DBG_871X("STA " MAC_FMT ": WPA/RSN IE in association "
+ "request, but AP don't support WPA/RSN\n", MAC_ARG(pstat->hwaddr));
+
+ status = WLAN_STATUS_INVALID_IE;
+
+ goto OnAssocReqFail;
+
+ }
+
+ if (elems.wps_ie) {
+ DBG_871X("STA included WPS IE in "
+ "(Re)Association Request - WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ copy_len = 0;
+ } else{
+ copy_len = ((wpa_ie_len+2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)):(wpa_ie_len+2);
+ }
+
+
+ if (copy_len > 0)
+ memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+
+ }
+
+
+ /* check if there is WMM IE & support WWM-PS */
+ pstat->flags &= ~WLAN_STA_WME;
+ pstat->qos_option = 0;
+ pstat->qos_info = 0;
+ pstat->has_legacy_ac = true;
+ pstat->uapsd_vo = 0;
+ pstat->uapsd_vi = 0;
+ pstat->uapsd_be = 0;
+ pstat->uapsd_bk = 0;
+ if (pmlmepriv->qospriv.qos_option) {
+ p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
+ for (;;) {
+ p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+ if (!memcmp(p+2, WMM_IE, 6)) {
+
+ pstat->flags |= WLAN_STA_WME;
+
+ pstat->qos_option = 1;
+ pstat->qos_info = *(p+8);
+
+ pstat->max_sp_len = (pstat->qos_info>>5)&0x3;
+
+ if ((pstat->qos_info&0xf) != 0xf)
+ pstat->has_legacy_ac = true;
+ else
+ pstat->has_legacy_ac = false;
+
+ if (pstat->qos_info&0xf) {
+ if (pstat->qos_info&BIT(0))
+ pstat->uapsd_vo = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vo = 0;
+
+ if (pstat->qos_info&BIT(1))
+ pstat->uapsd_vi = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vi = 0;
+
+ if (pstat->qos_info&BIT(2))
+ pstat->uapsd_bk = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_bk = 0;
+
+ if (pstat->qos_info&BIT(3))
+ pstat->uapsd_be = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_be = 0;
+
+ }
+
+ break;
+ }
+ } else {
+ break;
+ }
+ p = p + ie_len + 2;
+ }
+ }
+
+ /* save HT capabilities in the sta object */
+ memset(&pstat->htpriv.ht_cap, 0, sizeof(struct rtw_ieee80211_ht_cap));
+ if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct rtw_ieee80211_ht_cap)) {
+ pstat->flags |= WLAN_STA_HT;
+
+ pstat->flags |= WLAN_STA_WME;
+
+ memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct rtw_ieee80211_ht_cap));
+
+ } else
+ pstat->flags &= ~WLAN_STA_HT;
+
+
+ if ((pmlmepriv->htpriv.ht_option == false) && (pstat->flags&WLAN_STA_HT)) {
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+
+ if ((pstat->flags & WLAN_STA_HT) &&
+ ((pstat->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
+ (pstat->wpa_pairwise_cipher&WPA_CIPHER_TKIP))) {
+ DBG_871X("HT: " MAC_FMT " tried to "
+ "use TKIP with HT association\n", MAC_ARG(pstat->hwaddr));
+
+ /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
+ /* goto OnAssocReqFail; */
+ }
+ pstat->flags |= WLAN_STA_NONERP;
+ for (i = 0; i < pstat->bssratelen; i++) {
+ if ((pstat->bssrateset[i] & 0x7f) > 22) {
+ pstat->flags &= ~WLAN_STA_NONERP;
+ break;
+ }
+ }
+
+ if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
+ else
+ pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
+
+
+
+ if (status != _STATS_SUCCESSFUL_)
+ goto OnAssocReqFail;
+
+ /* TODO: identify_proprietary_vendor_ie(); */
+ /* Realtek proprietary IE */
+ /* identify if this is Broadcom sta */
+ /* identify if this is ralink sta */
+ /* Customer proprietary IE */
+
+
+
+ /* get a unique AID */
+ if (pstat->aid > 0) {
+ DBG_871X(" old AID %d\n", pstat->aid);
+ } else {
+ for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
+ if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ break;
+
+ /* if (pstat->aid > NUM_STA) { */
+ if (pstat->aid > pstapriv->max_num_sta) {
+
+ pstat->aid = 0;
+
+ DBG_871X(" no room for more AIDs\n");
+
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+
+ goto OnAssocReqFail;
+
+
+ } else {
+ pstapriv->sta_aid[pstat->aid - 1] = pstat;
+ DBG_871X("allocate new AID = (%d)\n", pstat->aid);
+ }
+ }
+
+
+ pstat->state &= (~WIFI_FW_ASSOC_STATE);
+ pstat->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (!list_empty(&pstat->auth_list)) {
+ list_del_init(&pstat->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&pstat->asoc_list)) {
+ pstat->expire_to = pstapriv->expire_to;
+ list_add_tail(&pstat->asoc_list, &pstapriv->asoc_list);
+ pstapriv->asoc_list_cnt++;
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ /* now the station is qualified to join our BSS... */
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+ /* 1 bss_cap_update & sta_info_update */
+ bss_cap_update_on_sta_join(padapter, pstat);
+ sta_info_update(padapter, pstat);
+
+ /* 2 issue assoc rsp before notify station join event. */
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+
+ spin_lock_bh(&pstat->lock);
+ if (pstat->passoc_req) {
+ kfree(pstat->passoc_req);
+ pstat->passoc_req = NULL;
+ pstat->assoc_req_len = 0;
+ }
+
+ pstat->passoc_req = rtw_zmalloc(pkt_len);
+ if (pstat->passoc_req) {
+ memcpy(pstat->passoc_req, pframe, pkt_len);
+ pstat->assoc_req_len = pkt_len;
+ }
+ spin_unlock_bh(&pstat->lock);
+
+ /* 3-(1) report sta add event */
+ report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
+ }
+
+ return _SUCCESS;
+
+asoc_class2_error:
+
+ issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
+
+ return _FAIL;
+
+OnAssocReqFail:
+
+ pstat->aid = 0;
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+
+ return _FAIL;
+}
+
+unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ uint i;
+ int res;
+ unsigned short status;
+ struct ndis_80211_var_ie *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ /* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_871X("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
+ return _SUCCESS;
+
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ return _SUCCESS;
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* status */
+ status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
+ if (status > 0) {
+ DBG_871X("assoc reject, status code: %d\n", status);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ res = -4;
+ goto report_assoc_result;
+ }
+
+ /* get capabilities */
+ pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ /* set slot time */
+ pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
+
+ /* AID */
+ res = pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4))&0x3fff);
+
+ /* following are moved to join event callback function */
+ /* to handle HT, WMM, rate adaptive, update MAC reg */
+ /* for not to handle the synchronous IO in the tasklet */
+ for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ pIE = (struct ndis_80211_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (!memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
+ WMM_param_handler(padapter, pIE);
+ break;
+
+ case _HT_CAPABILITY_IE_: /* HT caps */
+ HT_caps_handler(padapter, pIE);
+ break;
+
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ HT_info_handler(padapter, pIE);
+ break;
+
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
+ UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
+
+report_assoc_result:
+ if (res > 0) {
+ rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
+ } else {
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ }
+
+ report_join_res(padapter, res);
+
+ return _SUCCESS;
+}
+
+unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ /* check A3 */
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
+ return _SUCCESS;
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_871X("%s Reason code(%d)\n", __func__, reason);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
+ /* rtw_free_stainfo(padapter, psta); */
+ /* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
+
+ DBG_871X_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = false;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&psta->asoc_list) == false) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update(padapter, updated);
+ }
+
+
+ return _SUCCESS;
+ } else{
+ int ignore_received_deauth = 0;
+
+ /* Commented by Albert 20130604 */
+ /* Before sending the auth frame to start the STA/GC mode connection with AP/GO, */
+ /* we will send the deauth first. */
+ /* However, the Win8.1 with BRCM Wi-Fi will send the deauth with reason code 6 to us after receieving our deauth. */
+ /* Added the following code to avoid this case. */
+ if ((pmlmeinfo->state & WIFI_FW_AUTH_STATE) ||
+ (pmlmeinfo->state & WIFI_FW_ASSOC_STATE)) {
+ if (reason == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA) {
+ ignore_received_deauth = 1;
+ } else if (WLAN_REASON_PREV_AUTH_NOT_VALID == reason) {
+ /* TODO: 802.11r */
+ ignore_received_deauth = 1;
+ }
+ }
+
+ DBG_871X_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+ reason, GetAddr3Ptr(pframe), ignore_received_deauth);
+
+ if (0 == ignore_received_deauth) {
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ }
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+
+}
+
+unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ /* check A3 */
+ if (memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN))
+ return _SUCCESS;
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_871X("%s Reason code(%d)\n", __func__, reason);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
+ /* rtw_free_stainfo(padapter, psta); */
+ /* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
+
+ DBG_871X_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = false;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&psta->asoc_list) == false) {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update(padapter, updated);
+ }
+
+ return _SUCCESS;
+ } else{
+ DBG_871X_LEVEL(_drv_always_, "sta recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+
+}
+
+unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ DBG_871X("%s\n", __func__);
+ return _SUCCESS;
+}
+
+unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = (u8 *)(pframe + sizeof(struct ieee80211_hdr_3addr));
+ u8 category;
+ u8 action;
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+
+ if (!psta)
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case RTW_WLAN_ACTION_SPCT_MSR_REQ:
+ case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
+ case RTW_WLAN_ACTION_SPCT_TPC_REQ:
+ case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
+ case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
+ break;
+ default:
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 *addr;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ unsigned char *frame_body;
+ unsigned char category, action;
+ unsigned short tid, status, reason_code = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("%s\n", __func__);
+
+ /* check RA matches or not */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ return _SUCCESS;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ addr = GetAddr2Ptr(pframe);
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta == NULL)
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category == RTW_WLAN_CATEGORY_BACK) {/* representing Block Ack */
+ if (!pmlmeinfo->HT_enable) {
+ return _SUCCESS;
+ }
+
+ action = frame_body[1];
+ DBG_871X("%s, action =%d\n", __func__, action);
+ switch (action) {
+ case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
+
+ memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
+ /* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
+ process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
+
+ if (pmlmeinfo->bAcceptAddbaReq == true) {
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
+ } else{
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
+ }
+
+ break;
+
+ case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
+ status = RTW_GET_LE16(&frame_body[3]);
+ tid = ((frame_body[5] >> 2) & 0x7);
+
+ if (status == 0) {
+ /* successful */
+ DBG_871X("agg_enable for TID =%d\n", tid);
+ psta->htpriv.agg_enable_bitmap |= 1 << tid;
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else{
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ }
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_871X("%s alive check - rx ADDBA response\n", __func__);
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ /* DBG_871X("marc: ADDBA RSP: %x\n", pmlmeinfo->agg_enable_bitmap); */
+ break;
+
+ case RTW_WLAN_ACTION_DELBA: /* DELBA */
+ if ((frame_body[3] & BIT(3)) == 0) {
+ psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+
+ /* reason_code = frame_body[4] | (frame_body[5] << 8); */
+ reason_code = RTW_GET_LE16(&frame_body[4]);
+ } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
+ tid = (frame_body[3] >> 4) & 0x0F;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d indicate_seq:%u\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq);
+ #endif
+ }
+
+ DBG_871X("%s(): DELBA: %x(%x)\n", __func__, pmlmeinfo->agg_enable_bitmap, reason_code);
+ /* todo: how to notify the host while receiving DELETE BA */
+ break;
+
+ default:
+ break;
+ }
+ }
+ return _SUCCESS;
+}
+
+static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
+{
+ struct adapter *adapter = recv_frame->u.hdr.adapter;
+ struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
+ u8 *frame = recv_frame->u.hdr.rx_data;
+ u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (recv_frame->u.hdr.attrib.frag_num & 0xf);
+
+ if (GetRetry(frame)) {
+ if (token >= 0) {
+ if ((seq_ctrl == mlmeext->action_public_rxseq)
+ && (token == mlmeext->action_public_dialog_token)) {
+ DBG_871X(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x, token:%d\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq, token);
+ return _FAIL;
+ }
+ } else {
+ if (seq_ctrl == mlmeext->action_public_rxseq) {
+ DBG_871X(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq);
+ return _FAIL;
+ }
+ }
+ }
+
+ mlmeext->action_public_rxseq = seq_ctrl;
+
+ if (token >= 0)
+ mlmeext->action_public_dialog_token = token;
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
+{
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body;
+ u8 dialogToken = 0;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+
+ if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
+ return _FAIL;
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+
+ if (!memcmp(frame_body + 2, P2P_OUI, 4)) {
+ ret = on_action_public_p2p(precv_frame);
+ }
+
+ return ret;
+}
+
+static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint frame_len = precv_frame->u.hdr.len;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ u8 token;
+ struct adapter *adapter = precv_frame->u.hdr.adapter;
+ int cnt = 0;
+ char msg[64];
+
+ token = frame_body[2];
+
+ if (rtw_action_public_decache(precv_frame, token) == _FAIL)
+ goto exit;
+
+ cnt += sprintf((msg+cnt), "%s(token:%u)", action_public_str(action), token);
+ rtw_cfg80211_rx_action(adapter, pframe, frame_len, msg);
+
+ ret = _SUCCESS;
+
+exit:
+ return ret;
+}
+
+unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ u8 category, action;
+
+ /* check RA matches or not */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_PUBLIC)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case ACT_PUBLIC_VENDOR:
+ ret = on_action_public_vendor(precv_frame);
+ break;
+ default:
+ ret = on_action_public_default(precv_frame, action);
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ u8 category, action;
+
+ /* check RA matches or not */
+ if (memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_HT)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case RTW_WLAN_ACTION_HT_COMPRESS_BEAMFORMING:
+ break;
+ default:
+ break;
+ }
+
+exit:
+
+ return _SUCCESS;
+}
+
+unsigned int OnAction_sa_query(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned short tid;
+ /* Baron */
+
+ DBG_871X("OnAction_sa_query\n");
+
+ switch (pframe[WLAN_HDR_A3_LEN+1]) {
+ case 0: /* SA Query req */
+ memcpy(&tid, &pframe[WLAN_HDR_A3_LEN+2], sizeof(unsigned short));
+ DBG_871X("OnAction_sa_query request, action =%d, tid =%04x\n", pframe[WLAN_HDR_A3_LEN+1], tid);
+ issue_action_SA_Query(padapter, GetAddr2Ptr(pframe), 1, tid);
+ break;
+
+ case 1: /* SA Query rsp */
+ del_timer_sync(&pmlmeext->sa_query_timer);
+ DBG_871X("OnAction_sa_query response, action =%d, tid =%04x, cancel timer\n", pframe[WLAN_HDR_A3_LEN+1], pframe[WLAN_HDR_A3_LEN+2]);
+ break;
+ default:
+ break;
+ }
+ if (0) {
+ int pp;
+ printk("pattrib->pktlen = %d =>", pattrib->pkt_len);
+ for (pp = 0; pp < pattrib->pkt_len; pp++)
+ printk(" %02x ", pframe[pp]);
+ printk("\n");
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int i;
+ unsigned char category;
+ struct action_handler *ptable;
+ unsigned char *frame_body;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+
+ for (i = 0; i < sizeof(OnAction_tbl)/sizeof(struct action_handler); i++) {
+ ptable = &OnAction_tbl[i];
+
+ if (category == ptable->num)
+ ptable->func(padapter, precv_frame);
+
+ }
+
+ return _SUCCESS;
+
+}
+
+unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
+{
+
+ /* DBG_871X("rcvd mgt frame(%x, %x)\n", (GetFrameSubType(pframe) >> 4), *(unsigned int *)GetAddr1Ptr(pframe)); */
+ return _SUCCESS;
+}
+
+static struct xmit_frame *_alloc_mgtxmitframe(struct xmit_priv *pxmitpriv, bool once)
+{
+ struct xmit_frame *pmgntframe;
+ struct xmit_buf *pxmitbuf;
+
+ if (once)
+ pmgntframe = rtw_alloc_xmitframe_once(pxmitpriv);
+ else
+ pmgntframe = rtw_alloc_xmitframe_ext(pxmitpriv);
+
+ if (pmgntframe == NULL) {
+ DBG_871X(FUNC_ADPT_FMT" alloc xmitframe fail, once:%d\n", FUNC_ADPT_ARG(pxmitpriv->adapter), once);
+ goto exit;
+ }
+
+ pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
+ if (pxmitbuf == NULL) {
+ DBG_871X(FUNC_ADPT_FMT" alloc xmitbuf fail\n", FUNC_ADPT_ARG(pxmitpriv->adapter));
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ pmgntframe = NULL;
+ goto exit;
+ }
+
+ pmgntframe->frame_tag = MGNT_FRAMETAG;
+ pmgntframe->pxmitbuf = pxmitbuf;
+ pmgntframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pmgntframe;
+
+exit:
+ return pmgntframe;
+
+}
+
+inline struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
+{
+ return _alloc_mgtxmitframe(pxmitpriv, false);
+}
+
+/****************************************************************************
+
+Following are some TX fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ pmlmeext->tx_rate = rate;
+ /* DBG_871X("%s(): rate = %x\n", __func__, rate); */
+}
+
+void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 wireless_mode;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ /* memset((u8 *)(pattrib), 0, sizeof(struct pkt_attrib)); */
+
+ pattrib->hdrlen = 24;
+ pattrib->nr_frags = 1;
+ pattrib->priority = 7;
+ pattrib->mac_id = 0;
+ pattrib->qsel = 0x12;
+
+ pattrib->pktlen = 0;
+
+ if (pmlmeext->tx_rate == IEEE80211_CCK_RATE_1MB)
+ wireless_mode = WIRELESS_11B;
+ else
+ wireless_mode = WIRELESS_11G;
+ pattrib->raid = rtw_get_mgntframe_raid(padapter, wireless_mode);
+ pattrib->rate = pmlmeext->tx_rate;
+
+ pattrib->encrypt = _NO_PRIVACY_;
+ pattrib->bswenc = false;
+
+ pattrib->qos_en = false;
+ pattrib->ht_en = false;
+ pattrib->bwmode = CHANNEL_WIDTH_20;
+ pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pattrib->sgi = false;
+
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+
+ pattrib->retry_ctrl = true;
+
+ pattrib->mbssid = 0;
+
+}
+
+void update_mgntframe_attrib_addr(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ u8 *pframe;
+ struct pkt_attrib *pattrib = &pmgntframe->attrib;
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+
+ memcpy(pattrib->ra, GetAddr1Ptr(pframe), ETH_ALEN);
+ memcpy(pattrib->ta, GetAddr2Ptr(pframe), ETH_ALEN);
+}
+
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true) {
+ rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
+ return;
+ }
+
+ rtw_hal_mgnt_xmit(padapter, pmgntframe);
+}
+
+s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
+{
+ s32 ret = _FAIL;
+ _irqL irqL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
+ struct submit_ctx sctx;
+
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true) {
+ rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
+ return ret;
+ }
+
+ rtw_sctx_init(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait(&sctx, __func__);
+
+ spin_lock_irqsave(&pxmitpriv->lock_sctx, irqL);
+ pxmitbuf->sctx = NULL;
+ spin_unlock_irqrestore(&pxmitpriv->lock_sctx, irqL);
+
+ return ret;
+}
+
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ static u8 seq_no = 0;
+ s32 ret = _FAIL;
+ u32 timeout_ms = 500;/* 500ms */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter->bSurpriseRemoved == true ||
+ padapter->bDriverStopped == true) {
+ rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
+ return -1;
+ }
+
+ if (mutex_lock_interruptible(&pxmitpriv->ack_tx_mutex) == 0) {
+ pxmitpriv->ack_tx = true;
+ pxmitpriv->seq_no = seq_no++;
+ pmgntframe->ack_report = 1;
+ if (rtw_hal_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
+ ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
+ }
+
+ pxmitpriv->ack_tx = false;
+ mutex_unlock(&pxmitpriv->ack_tx_mutex);
+ }
+
+ return ret;
+}
+
+static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
+{
+ u8 *ssid_ie;
+ sint ssid_len_ori;
+ int len_diff = 0;
+
+ ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
+
+ /* DBG_871X("%s hidden_ssid_mode:%u, ssid_ie:%p, ssid_len_ori:%d\n", __func__, hidden_ssid_mode, ssid_ie, ssid_len_ori); */
+
+ if (ssid_ie && ssid_len_ori > 0) {
+ switch (hidden_ssid_mode) {
+ case 1:
+ {
+ u8 *next_ie = ssid_ie + 2 + ssid_len_ori;
+ u32 remain_len = 0;
+
+ remain_len = ies_len - (next_ie-ies);
+
+ ssid_ie[1] = 0;
+ memcpy(ssid_ie+2, next_ie, remain_len);
+ len_diff -= ssid_len_ori;
+
+ break;
+ }
+ case 2:
+ memset(&ssid_ie[2], 0, ssid_len_ori);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return len_diff;
+}
+
+void issue_beacon(struct adapter *padapter, int timeout_ms)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ unsigned int rate_len;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ /* DBG_871X("%s\n", __func__); */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_871X("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+
+ spin_lock_bh(&pmlmepriv->bcn_update_lock);
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ /* DBG_871X("ie len =%d\n", cur_network->IELength); */
+ {
+ int len_diff;
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ len_diff = update_hidden_ssid(
+ pframe+_BEACON_IE_OFFSET_
+ , cur_network->IELength-_BEACON_IE_OFFSET_
+ , pmlmeinfo->hidden_ssid_mode
+ );
+ pframe += (cur_network->IELength+len_diff);
+ pattrib->pktlen += (cur_network->IELength+len_diff);
+ }
+
+ {
+ u8 *wps_ie;
+ uint wps_ielen;
+ u8 sr = 0;
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof(struct ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ if (wps_ie && wps_ielen > 0) {
+ rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
+ }
+ if (sr != 0)
+ set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
+ else
+ _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
+ }
+
+ goto _issue_bcn;
+
+ }
+
+ /* below for ad-hoc mode */
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ /* if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) */
+ {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8) {
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ }
+
+
+ /* todo:HT for adhoc */
+
+_issue_bcn:
+
+ pmlmepriv->update_bcn = false;
+
+ spin_unlock_bh(&pmlmepriv->bcn_update_lock);
+
+ if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
+ DBG_871X("beacon frame too large\n");
+ return;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* DBG_871X("issue bcn_sz =%d\n", pattrib->last_txcmdsz); */
+ if (timeout_ms > 0)
+ dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
+ else
+ dump_mgntframe(padapter, pmgntframe);
+
+}
+
+void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ unsigned char *mac, *bssid;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+
+ u8 *pwps_ie;
+ uint wps_ielen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned int rate_len;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ if (da == NULL)
+ return;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_871X("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ pwps_ie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ /* inerset & update wps_probe_resp_ie */
+ if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie;
+
+ wps_offset = (uint)(pwps_ie - cur_network->IEs);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
+
+ memcpy(pframe, cur_network->IEs, wps_offset);
+ pframe += wps_offset;
+ pattrib->pktlen += wps_offset;
+
+ wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
+ pframe += wps_ielen+2;
+ pattrib->pktlen += wps_ielen+2;
+ }
+
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ }
+ } else{
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pattrib->pktlen += cur_network->IELength;
+ }
+
+ /* retrieve SSID IE from cur_network->Ssid */
+ {
+ u8 *ssid_ie;
+ sint ssid_ielen;
+ sint ssid_ielen_diff;
+ u8 buf[MAX_IE_SZ];
+ u8 *ies = pmgntframe->buf_addr+TXDESC_OFFSET+sizeof(struct ieee80211_hdr_3addr);
+
+ ssid_ie = rtw_get_ie(ies+_FIXED_IE_LENGTH_, _SSID_IE_, &ssid_ielen,
+ (pframe-ies)-_FIXED_IE_LENGTH_);
+
+ ssid_ielen_diff = cur_network->Ssid.SsidLength - ssid_ielen;
+
+ if (ssid_ie && cur_network->Ssid.SsidLength) {
+ uint remainder_ielen;
+ u8 *remainder_ie;
+ remainder_ie = ssid_ie+2;
+ remainder_ielen = (pframe-remainder_ie);
+
+ if (remainder_ielen > MAX_IE_SZ) {
+ DBG_871X_LEVEL(_drv_warning_, FUNC_ADPT_FMT" remainder_ielen > MAX_IE_SZ\n", FUNC_ADPT_ARG(padapter));
+ remainder_ielen = MAX_IE_SZ;
+ }
+
+ memcpy(buf, remainder_ie, remainder_ielen);
+ memcpy(remainder_ie+ssid_ielen_diff, buf, remainder_ielen);
+ *(ssid_ie+1) = cur_network->Ssid.SsidLength;
+ memcpy(ssid_ie+2, cur_network->Ssid.Ssid, cur_network->Ssid.SsidLength);
+
+ pframe += ssid_ielen_diff;
+ pattrib->pktlen += ssid_ielen_diff;
+ }
+ }
+ } else{
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8) {
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ }
+
+
+ /* todo:HT for adhoc */
+
+ }
+
+#ifdef CONFIG_AUTO_AP_MODE
+{
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("(%s)\n", __func__);
+
+ /* check rc station */
+ psta = rtw_get_stainfo(pstapriv, da);
+ if (psta && psta->isrc && psta->pid > 0) {
+ u8 RC_OUI[4] = {0x00, 0xE0, 0x4C, 0x0A};
+ u8 RC_INFO[14] = {0};
+ /* EID[1] + EID_LEN[1] + RC_OUI[4] + MAC[6] + PairingID[2] + ChannelNum[2] */
+ u16 cu_ch = (u16)cur_network->Configuration.DSConfig;
+
+ DBG_871X("%s, reply rc(pid = 0x%x) device "MAC_FMT" in ch =%d\n", __func__,
+ psta->pid, MAC_ARG(psta->hwaddr), cu_ch);
+
+ /* append vendor specific ie */
+ memcpy(RC_INFO, RC_OUI, sizeof(RC_OUI));
+ memcpy(&RC_INFO[4], mac, ETH_ALEN);
+ memcpy(&RC_INFO[10], (u8 *)&psta->pid, 2);
+ memcpy(&RC_INFO[12], (u8 *)&cu_ch, 2);
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, sizeof(RC_INFO), RC_INFO, &pattrib->pktlen);
+ }
+}
+#endif /* CONFIG_AUTO_AP_MODE */
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+
+}
+
+static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ unsigned char *mac;
+ unsigned char bssrate[NumRates];
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int bssrate_len = 0;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+issue_probereq\n"));
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ if (da) {
+ /* unicast probe request frame */
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ } else{
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ }
+
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ if (pssid)
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
+ else
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &(pattrib->pktlen));
+
+ get_rate_set(padapter, bssrate, &bssrate_len);
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else{
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
+ }
+
+ if (ch)
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, &ch, &pattrib->pktlen);
+
+ if (append_wps) {
+ /* add wps_ie for wps2.0 */
+ if (pmlmepriv->wps_probe_req_ie_len > 0 && pmlmepriv->wps_probe_req_ie) {
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("issuing probe_req, tx_len =%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
+{
+ _issue_probereq(padapter, pssid, da, 0, 1, false);
+}
+
+int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps,
+ int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+
+ do {
+ ret = _issue_probereq(padapter, pssid, da, ch, append_wps, wait_ms > 0?true:false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ #ifndef DBG_XMIT_ACK
+ goto exit;
+ #endif
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_871X(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), MAC_ARG(da), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ else
+ DBG_871X(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ }
+exit:
+ return ret;
+}
+
+/* if psta == NULL, indiate we are station(client) now... */
+void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ unsigned int val32;
+ unsigned short val16;
+ int use_shared_key = 0;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ __le16 le_tmp;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_AUTH);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+
+ if (psta) { /* for AP mode */
+ memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ /* setting auth algo number */
+ val16 = (u16)psta->authalg;
+
+ if (status != _STATS_SUCCESSFUL_)
+ val16 = 0;
+
+ if (val16)
+ use_shared_key = 1;
+
+ le_tmp = cpu_to_le16(val16);
+
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ val16 = (u16)psta->auth_seq;
+ le_tmp = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ /* setting status code... */
+ val16 = status;
+ le_tmp = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ /* added challenging text... */
+ if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, psta->chg_txt, &(pattrib->pktlen));
+
+ } else{
+ memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+
+ /* setting auth algo number */
+ val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
+ if (val16) {
+ use_shared_key = 1;
+ }
+ le_tmp = cpu_to_le16(val16);
+ /* DBG_871X("%s auth_algo = %s auth_seq =%d\n", __func__, (pmlmeinfo->auth_algo == 0)?"OPEN":"SHARED", pmlmeinfo->auth_seq); */
+
+ /* setting IV for auth seq #3 */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ __le32 le_tmp32;
+
+ /* DBG_871X("==> iv(%d), key_index(%d)\n", pmlmeinfo->iv, pmlmeinfo->key_index); */
+ val32 = ((pmlmeinfo->iv++) | (pmlmeinfo->key_index << 30));
+ le_tmp32 = cpu_to_le32(val32);
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&le_tmp32, &(pattrib->pktlen));
+
+ pattrib->iv_len = 4;
+ }
+
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ le_tmp = cpu_to_le16(pmlmeinfo->auth_seq);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+
+ /* setting status code... */
+ le_tmp = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ /* then checking to see if sending challenging text... */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, pmlmeinfo->chg_txt, &(pattrib->pktlen));
+
+ SetPrivacy(fctrl);
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pattrib->encrypt = _WEP40_;
+
+ pattrib->icv_len = 4;
+
+ pattrib->pktlen += pattrib->icv_len;
+
+ }
+
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
+ DBG_871X("%s\n", __func__);
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+
+void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
+{
+ struct xmit_frame *pmgntframe;
+ struct ieee80211_hdr *pwlanhdr;
+ struct pkt_attrib *pattrib;
+ unsigned char *pbuf, *pframe;
+ unsigned short val;
+ __le16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ u8 *ie = pnetwork->IEs;
+ __le16 lestatus, le_tmp;
+
+ DBG_871X("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
+ memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
+ SetFrameSubType(pwlanhdr, pkt_type);
+ else
+ return;
+
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen += pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* capability */
+ val = *(unsigned short *)rtw_get_capability_from_ie(ie);
+
+ pframe = rtw_set_fixed_ie(pframe, _CAPABILITY_, (unsigned char *)&val, &(pattrib->pktlen));
+
+ lestatus = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&lestatus, &(pattrib->pktlen));
+
+ le_tmp = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
+ pframe = rtw_set_fixed_ie(pframe, _ASOC_ID_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ if (pstat->bssratelen <= 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &(pattrib->pktlen));
+ } else{
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (pstat->bssratelen-8), pstat->bssrateset+8, &(pattrib->pktlen));
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
+ uint ie_len = 0;
+
+ /* FILL HT CAP INFO IE */
+ /* p = hostapd_eid_ht_capabilities_info(hapd, p); */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+
+ /* FILL HT ADD INFO IE */
+ /* p = hostapd_eid_ht_operation(hapd, p); */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+
+ }
+
+ /* FILL WMM IE */
+ if ((pstat->flags & WLAN_STA_WME) && (pmlmepriv->qospriv.qos_option)) {
+ uint ie_len = 0;
+ unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+ for (pbuf = ie + _BEACON_IE_OFFSET_; ; pbuf += (ie_len + 2)) {
+ pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if (pbuf && !memcmp(pbuf+2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+
+ break;
+ }
+
+ if ((pbuf == NULL) || (ie_len == 0)) {
+ break;
+ }
+ }
+
+ }
+
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK) {
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
+ }
+
+ /* add WPS IE ie for wps 2.0 */
+ if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
+ memcpy(pframe, pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len);
+
+ pframe += pmlmepriv->wps_assoc_resp_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_assocreq(struct adapter *padapter)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ __le16 val16;
+ unsigned int i, j, index = 0;
+ unsigned char bssrate[NumRates], sta_bssrate[NumRates];
+ struct ndis_80211_var_ie *pIE;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bssrate_len = 0, sta_bssrate_len = 0;
+ u8 vs_ie_length = 0;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ASSOCREQ);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ /* caps */
+ memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* listen interval */
+ /* todo: listen interval for power saving */
+ val16 = cpu_to_le16(3);
+ memcpy(pframe, (unsigned char *)&val16, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.Ssid.SsidLength, pmlmeinfo->network.Ssid.Ssid, &(pattrib->pktlen));
+
+ /* supported rate & extended supported rate */
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ get_rate_set(padapter, sta_bssrate, &sta_bssrate_len);
+ /* DBG_871X("sta_bssrate_len =%d\n", sta_bssrate_len); */
+
+ if (pmlmeext->cur_channel == 14) /* for JAPAN, channel 14 can only uses B Mode(CCK) */
+ sta_bssrate_len = 4;
+
+
+ /* for (i = 0; i < sta_bssrate_len; i++) { */
+ /* DBG_871X("sta_bssrate[%d]=%02X\n", i, sta_bssrate[i]); */
+ /* */
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+ DBG_871X("network.SupportedRates[%d]=%02X\n", i, pmlmeinfo->network.SupportedRates[i]);
+ }
+
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ for (j = 0; j < sta_bssrate_len; j++) {
+ /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
+ if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
+ == (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK)) {
+ /* DBG_871X("match i = %d, j =%d\n", i, j); */
+ break;
+ } else {
+ /* DBG_871X("not match: %02X != %02X\n", (pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK), (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK)); */
+ }
+ }
+
+ if (j == sta_bssrate_len) {
+ /* the rate is not supported by STA */
+ DBG_871X("%s(): the rate[%d]=%02X is not supported by STA!\n", __func__, i, pmlmeinfo->network.SupportedRates[i]);
+ } else {
+ /* the rate is supported by STA */
+ bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
+ }
+ }
+
+ bssrate_len = index;
+ DBG_871X("bssrate_len = %d\n", bssrate_len);
+
+ if (bssrate_len == 0) {
+ rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ goto exit; /* don't connect to AP if no joint supported rate */
+ }
+
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
+
+ /* vendor specific IE, such as WPA, WMM, WPS */
+ for (i = sizeof(struct ndis_802_11_fix_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_80211_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
+ (!memcmp(pIE->data, WMM_OUI, 4)) ||
+ (!memcmp(pIE->data, WPS_OUI, 4))) {
+ vs_ie_length = pIE->Length;
+ if ((!padapter->registrypriv.wifi_spec) && (!memcmp(pIE->data, WPS_OUI, 4))) {
+ /* Commented by Kurt 20110629 */
+ /* In some older APs, WPS handshake */
+ /* would be fail if we append vender extensions informations to AP */
+
+ vs_ie_length = 14;
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, vs_ie_length, pIE->data, &(pattrib->pktlen));
+ }
+ break;
+
+ case EID_WPA2:
+ pframe = rtw_set_ie(pframe, EID_WPA2, pIE->Length, pIE->data, &(pattrib->pktlen));
+ break;
+ case EID_HTCapability:
+ if (padapter->mlmepriv.htpriv.ht_option == true) {
+ if (!(is_ap_in_tkip(padapter))) {
+ memcpy(&(pmlmeinfo->HT_caps), pIE->data, sizeof(struct HT_caps_element));
+ pframe = rtw_set_ie(pframe, EID_HTCapability, pIE->Length, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+ }
+ }
+ break;
+
+ case EID_EXTCapability:
+ if (padapter->mlmepriv.htpriv.ht_option == true)
+ pframe = rtw_set_ie(pframe, EID_EXTCapability, pIE->Length, pIE->data, &(pattrib->pktlen));
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+
+ ret = _SUCCESS;
+
+exit:
+ if (ret == _SUCCESS)
+ rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
+ else
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+
+ return;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ /* DBG_871X("%s:%d\n", __func__, power_mode); */
+
+ if (!padapter)
+ goto exit;
+
+ pxmitpriv = &(padapter->xmitpriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (power_mode)
+ SetPwrMgt(fctrl);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else{
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/*
+ * [IMPORTANT] Don't call this function in interrupt context
+ *
+ * When wait_ms > 0, this function shoule be called at process context
+ * da == NULL for station mode
+ */
+int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_info *psta;
+
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ psta = rtw_get_stainfo(&padapter->stapriv, da);
+ if (psta) {
+ if (power_mode)
+ rtw_hal_macid_sleep(padapter, psta->mac_id);
+ else
+ rtw_hal_macid_wakeup(padapter, psta->mac_id);
+ } else {
+ DBG_871X(FUNC_ADPT_FMT ": Can't find sta info for " MAC_FMT ", skip macid %s!!\n",
+ FUNC_ADPT_ARG(padapter), MAC_ARG(da), power_mode?"sleep":"wakeup");
+ rtw_warn_on(1);
+ }
+
+ do {
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0?true:false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ #ifndef DBG_XMIT_ACK
+ goto exit;
+ #endif
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_871X(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), MAC_ARG(da), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ else
+ DBG_871X(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ }
+exit:
+ return ret;
+}
+
+/*
+ * [IMPORTANT] This function run in interrupt context
+ *
+ * The null data packet would be sent without power bit,
+ * and not guarantee success.
+ */
+s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
+{
+ int ret;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ ret = _issue_nulldata(padapter, da, 0, false);
+
+ return ret;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ u16 *qc;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_871X("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ pattrib->hdrlen += 2;
+ pattrib->qos_en = true;
+ pattrib->eosp = 1;
+ pattrib->ack_policy = 0;
+ pattrib->mdata = 0;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
+
+ SetPriority(qc, tid);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pframe += sizeof(struct ieee80211_qos_hdr);
+ pattrib->pktlen = sizeof(struct ieee80211_qos_hdr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else{
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/* when wait_ms >0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ do {
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0?true:false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ #ifndef DBG_XMIT_ACK
+ goto exit;
+ #endif
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_871X(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), MAC_ARG(da), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ else
+ DBG_871X(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ }
+exit:
+ return ret;
+}
+
+static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int ret = _FAIL;
+ __le16 le_tmp;
+
+ /* DBG_871X("%s to "MAC_FMT"\n", __func__, MAC_ARG(da)); */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ goto exit;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DEAUTH);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ le_tmp = cpu_to_le16(reason);
+ pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_, (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else{
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
+{
+ DBG_871X("%s to "MAC_FMT"\n", __func__, MAC_ARG(da));
+ return _issue_deauth(padapter, da, reason, false);
+}
+
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt,
+ int wait_ms)
+{
+ int ret;
+ int i = 0;
+
+ do {
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0?true:false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ msleep(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ #ifndef DBG_XMIT_ACK
+ goto exit;
+ #endif
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_871X(FUNC_ADPT_FMT" to "MAC_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), MAC_ARG(da), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ else
+ DBG_871X(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS?", acked":"", i, try_cnt, (i + 1) * wait_ms);
+ }
+exit:
+ return ret;
+}
+
+void issue_action_SA_Query(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short tid)
+{
+ u8 category = RTW_WLAN_CATEGORY_SA_QUERY;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ u8 *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ __le16 le_tmp;
+
+ DBG_871X("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_871X("%s: alloc_mgtxmitframe fail\n", __func__);
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ if (raddr)
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ else
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
+ pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
+
+ switch (action) {
+ case 0: /* SA Query req */
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&pmlmeext->sa_query_seq, &pattrib->pktlen);
+ pmlmeext->sa_query_seq++;
+ /* send sa query request to AP, AP should reply sa query response in 1 second */
+ set_sa_query_timer(pmlmeext, 1000);
+ break;
+
+ case 1: /* SA Query rsp */
+ le_tmp = cpu_to_le16(tid);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&le_tmp, &pattrib->pktlen);
+ break;
+ default:
+ break;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
+{
+ u8 category = RTW_WLAN_CATEGORY_BACK;
+ u16 start_seq;
+ u16 BA_para_set;
+ u16 reason_code;
+ u16 BA_timeout_value;
+ u16 BA_starting_seqctrl = 0;
+ enum HT_CAP_AMPDU_FACTOR max_rx_ampdu_factor;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ u8 *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ __le16 le_tmp;
+
+ DBG_871X("%s, category =%d, action =%d, status =%d\n", __func__, category, action, status);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+ if (category == 3) {
+ switch (action) {
+ case 0: /* ADDBA req */
+ do {
+ pmlmeinfo->dialogToken++;
+ } while (pmlmeinfo->dialogToken == 0);
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->dialogToken), &(pattrib->pktlen));
+
+ if (rtw_btcoex_IsBTCoexCtrlAMPDUSize(padapter)) {
+ /* A-MSDU NOT Supported */
+ BA_para_set = 0;
+ /* immediate Block Ack */
+ BA_para_set |= (1 << 1) & IEEE80211_ADDBA_PARAM_POLICY_MASK;
+ /* TID */
+ BA_para_set |= (status << 2) & IEEE80211_ADDBA_PARAM_TID_MASK;
+ /* max buffer size is 8 MSDU */
+ BA_para_set |= (8 << 6) & RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ } else {
+ BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */
+ }
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ BA_timeout_value = 5000;/* 5ms */
+ le_tmp = cpu_to_le16(BA_timeout_value);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ /* if ((psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress)) != NULL) */
+ psta = rtw_get_stainfo(pstapriv, raddr);
+ if (psta != NULL) {
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
+
+ DBG_871X("BA_starting_seqctrl = %d for TID =%d\n", start_seq, status & 0x07);
+
+ psta->BA_starting_seqctrl[status & 0x07] = start_seq;
+
+ BA_starting_seqctrl = start_seq << 4;
+ }
+
+ le_tmp = cpu_to_le16(BA_starting_seqctrl);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+
+ case 1: /* ADDBA rsp */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
+ if (padapter->driver_rx_ampdu_factor != 0xFF)
+ max_rx_ampdu_factor =
+ (enum HT_CAP_AMPDU_FACTOR)padapter->driver_rx_ampdu_factor;
+ else
+ rtw_hal_get_def_var(padapter,
+ HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+
+ if (MAX_AMPDU_FACTOR_64K == max_rx_ampdu_factor)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ else if (MAX_AMPDU_FACTOR_32K == max_rx_ampdu_factor)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
+ else if (MAX_AMPDU_FACTOR_16K == max_rx_ampdu_factor)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
+ else if (MAX_AMPDU_FACTOR_8K == max_rx_ampdu_factor)
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
+ else
+ BA_para_set = ((le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+
+ if (rtw_btcoex_IsBTCoexCtrlAMPDUSize(padapter) &&
+ padapter->driver_rx_ampdu_factor == 0xFF) {
+ /* max buffer size is 8 MSDU */
+ BA_para_set &= ~RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ BA_para_set |= (8 << 6) & RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
+ }
+
+ if (pregpriv->ampdu_amsdu == 0)/* disabled */
+ le_tmp = cpu_to_le16(BA_para_set & ~BIT(0));
+ else if (pregpriv->ampdu_amsdu == 1)/* enabled */
+ le_tmp = cpu_to_le16(BA_para_set | BIT(0));
+ else /* auto */
+ le_tmp = cpu_to_le16(BA_para_set);
+
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(pmlmeinfo->ADDBA_req.BA_timeout_value)), &(pattrib->pktlen));
+ break;
+ case 2:/* DELBA */
+ BA_para_set = (status & 0x1F) << 3;
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ reason_code = 37;
+ le_tmp = cpu_to_le16(reason_code);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+ default:
+ break;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_action_BSSCoexistPacket(struct adapter *padapter)
+{
+ struct list_head *plist, *phead;
+ unsigned char category, action;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct wlan_network *pnetwork = NULL;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 InfoContent[16] = {0};
+ u8 ICS[8][15];
+
+ if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
+ return;
+
+ if (true == pmlmeinfo->bwmode_updated)
+ return;
+
+
+ DBG_871X("%s\n", __func__);
+
+
+ category = RTW_WLAN_CATEGORY_PUBLIC;
+ action = ACT_PUBLIC_BSSCOEXIST;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+
+ /* */
+ if (pmlmepriv->num_FortyMHzIntolerant > 0) {
+ u8 iedata = 0;
+
+ iedata |= BIT(2);/* 20 MHz BSS Width Request */
+
+ pframe = rtw_set_ie(pframe, EID_BSSCoexistence, 1, &iedata, &(pattrib->pktlen));
+
+ }
+
+
+ /* */
+ memset(ICS, 0, sizeof(ICS));
+ if (pmlmepriv->num_sta_no_ht > 0) {
+ int i;
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ int len;
+ u8 *p;
+ struct wlan_bssid_ex *pbss_network;
+
+ if (phead == plist)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
+
+ p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
+ if ((p == NULL) || (len == 0)) {/* non-HT */
+
+ if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
+ continue;
+
+ ICS[0][pbss_network->Configuration.DSConfig] = 1;
+
+ if (ICS[0][0] == 0)
+ ICS[0][0] = 1;
+ }
+
+ }
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+
+ for (i = 0; i < 8; i++) {
+ if (ICS[i][0] == 1) {
+ int j, k = 0;
+
+ InfoContent[k] = i;
+ /* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
+ k++;
+
+ for (j = 1; j <= 14; j++) {
+ if (ICS[i][j] == 1) {
+ if (k < 16) {
+ InfoContent[k] = j; /* channel number */
+ /* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
+ k++;
+ }
+ }
+ }
+
+ pframe = rtw_set_ie(pframe, EID_BSSIntolerantChlReport, k, InfoContent, &(pattrib->pktlen));
+
+ }
+
+ }
+
+
+ }
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u16 tid;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+ if (psta == NULL)
+ return _SUCCESS;
+
+ /* DBG_871X("%s:%s\n", __func__, (initiator == 0)?"RX_DIR":"TX_DIR"); */
+
+ if (initiator == 0) {/* recipient */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->recvreorder_ctrl[tid].enable == true) {
+ DBG_871X("rx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->recvreorder_ctrl[tid].enable = false;
+ psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d indicate_seq:%u\n", __func__, __LINE__,
+ psta->recvreorder_ctrl[tid].indicate_seq);
+ #endif
+ }
+ }
+ } else if (initiator == 1) {/* originator */
+ /* DBG_871X("tx agg_enable_bitmap(0x%08x)\n", psta->htpriv.agg_enable_bitmap); */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
+ DBG_871X("tx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+
+ }
+ }
+ }
+
+ return _SUCCESS;
+
+}
+
+unsigned int send_beacon(struct adapter *padapter)
+{
+ u8 bxmitok = false;
+ int issue = 0;
+ int poll = 0;
+ unsigned long start = jiffies;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DL_BCN_SEL, NULL);
+ do {
+ issue_beacon(padapter, 100);
+ issue++;
+ do {
+ yield();
+ rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ poll++;
+ } while ((poll%10) != 0 && false == bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ } while (false == bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped) {
+ return _FAIL;
+ }
+
+
+ if (false == bxmitok) {
+ DBG_871X("%s fail! %u ms\n", __func__, jiffies_to_msecs(jiffies - start));
+ return _FAIL;
+ } else{
+ unsigned long passing_time = jiffies_to_msecs(jiffies - start);
+
+ if (passing_time > 100 || issue > 3)
+ DBG_871X("%s success, issue:%d, poll:%d, %lu ms\n", __func__, issue, poll, passing_time);
+ /* else */
+ /* DBG_871X("%s success, issue:%d, poll:%d, %u ms\n", __func__, issue, poll, passing_time); */
+
+ return _SUCCESS;
+ }
+}
+
+/****************************************************************************
+
+Following are some utitity fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void site_survey(struct adapter *padapter)
+{
+ unsigned char survey_channel = 0, val8;
+ RT_SCAN_TYPE ScanType = SCAN_PASSIVE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 initialgain = 0;
+ u32 channel_scan_time_ms = 0;
+
+ {
+ struct rtw_ieee80211_channel *ch;
+ if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
+ ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
+ survey_channel = ch->hw_value;
+ ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
+ }
+ }
+
+ DBG_871X(FUNC_ADPT_FMT" ch:%u (cnt:%u) at %dms, %c%c%c\n"
+ , FUNC_ADPT_ARG(padapter)
+ , survey_channel
+ , pmlmeext->sitesurvey_res.channel_idx
+ , jiffies_to_msecs(jiffies - padapter->mlmepriv.scan_start_time)
+ , ScanType?'A':'P', pmlmeext->sitesurvey_res.scan_mode?'A':'P'
+ , pmlmeext->sitesurvey_res.ssid[0].SsidLength?'S':' '
+ );
+#ifdef DBG_FIXED_CHAN
+ DBG_871X(FUNC_ADPT_FMT" fixed_chan:%u\n", pmlmeext->fixed_chan);
+#endif
+
+ if (survey_channel != 0) {
+ /* PAUSE 4-AC Queue when site_survey */
+ /* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ /* val8 |= 0x0f; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ if (pmlmeext->sitesurvey_res.channel_idx == 0) {
+#ifdef DBG_FIXED_CHAN
+ if (pmlmeext->fixed_chan != 0xff)
+ set_channel_bwmode(padapter, pmlmeext->fixed_chan, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
+ else
+#endif
+ set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
+ } else{
+#ifdef DBG_FIXED_CHAN
+ if (pmlmeext->fixed_chan != 0xff)
+ SelectChannel(padapter, pmlmeext->fixed_chan);
+ else
+#endif
+ SelectChannel(padapter, survey_channel);
+ }
+
+ if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
+ {
+ int i;
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
+ /* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */
+ if (padapter->registrypriv.wifi_spec)
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ else
+ issue_probereq_ex(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL, 0, 0, 0, 0);
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
+ /* IOT issue, When wifi_spec is not set, send one probe req without WPS IE. */
+ if (padapter->registrypriv.wifi_spec)
+ issue_probereq(padapter, NULL, NULL);
+ else
+ issue_probereq_ex(padapter, NULL, NULL, 0, 0, 0, 0);
+ issue_probereq(padapter, NULL, NULL);
+ }
+ }
+ }
+
+ channel_scan_time_ms = pmlmeext->chan_scan_time;
+
+ set_survey_timer(pmlmeext, channel_scan_time_ms);
+#if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ {
+ struct noise_info info;
+ info.bPauseDIG = false;
+ info.IGIValue = 0;
+ info.max_time = channel_scan_time_ms/2;/* ms */
+ info.chan = survey_channel;
+ rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR, &info, false);
+ }
+#endif
+
+ } else{
+
+ /* channel number is 0 or this channel is not valid. */
+
+ {
+ pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
+
+ /* switch back to the original channel */
+ /* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ /* flush 4-AC Queue after site_survey */
+ /* val8 = 0; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+
+ /* config MSR */
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag(padapter);
+ /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
+
+ if (is_client_associated_to_ap(padapter) == true)
+ issue_nulldata(padapter, NULL, 0, 3, 500);
+
+ val8 = 0; /* survey done */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ report_surveydone_event(padapter);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ }
+ }
+
+ return;
+
+}
+
+/* collect bss info from Beacon and Probe request/response frames. */
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+{
+ int i;
+ u32 len;
+ u8 *p;
+ u16 val16, subtype;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u32 packet_len = precv_frame->u.hdr.len;
+ u8 ie_offset;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ __le32 le32_tmp;
+
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ) {
+ /* DBG_871X("IE too long for survey event\n"); */
+ return _FAIL;
+ }
+
+ memset(bssid, 0, sizeof(struct wlan_bssid_ex));
+
+ subtype = GetFrameSubType(pframe);
+
+ if (subtype == WIFI_BEACON) {
+ bssid->Reserved[0] = 1;
+ ie_offset = _BEACON_IE_OFFSET_;
+ } else {
+ /* FIXME : more type */
+ if (subtype == WIFI_PROBERSP) {
+ ie_offset = _PROBERSP_IE_OFFSET_;
+ bssid->Reserved[0] = 3;
+ } else if (subtype == WIFI_PROBEREQ) {
+ ie_offset = _PROBEREQ_IE_OFFSET_;
+ bssid->Reserved[0] = 2;
+ } else {
+ bssid->Reserved[0] = 0;
+ ie_offset = _FIXED_IE_LENGTH_;
+ }
+ }
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* get the signal strength */
+ bssid->Rssi = precv_frame->u.hdr.attrib.phy_info.RecvSignalPower; /* in dBM.raw data */
+ bssid->PhyInfo.SignalQuality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
+ if (p == NULL) {
+ DBG_871X("marc: cannot find SSID for survey event\n");
+ return _FAIL;
+ }
+
+ if (*(p + 1)) {
+ if (len > NDIS_802_11_LENGTH_SSID) {
+ DBG_871X("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else
+ bssid->Ssid.SsidLength = 0;
+
+ memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ /* checking rate info... */
+ i = 0;
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > NDIS_802_11_LENGTH_RATES_EX) {
+ DBG_871X("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates, (p + 2), len);
+ i = len;
+ }
+
+ p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+ DBG_871X("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates + i, (p + 2), len);
+ }
+
+ bssid->NetworkTypeInUse = Ndis802_11OFDM24;
+
+ if (bssid->IELength < 12)
+ return _FAIL;
+
+ /* Checking for DSConfig */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
+
+ bssid->Configuration.DSConfig = 0;
+ bssid->Configuration.Length = 0;
+
+ if (p) {
+ bssid->Configuration.DSConfig = *(p + 2);
+ } else {
+ /* In 5G, some ap do not have DSSET IE */
+ /* checking HT info for channel */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
+ if (p) {
+ struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
+ bssid->Configuration.DSConfig = HT_info->primary_channel;
+ } else { /* use current channel */
+ bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter);
+ }
+ }
+
+ memcpy(&le32_tmp, rtw_get_beacon_interval_from_ie(bssid->IEs), 2);
+ bssid->Configuration.BeaconPeriod = le32_to_cpu(le32_tmp);
+
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(0)) {
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ } else {
+ bssid->InfrastructureMode = Ndis802_11IBSS;
+ memcpy(bssid->MacAddress, GetAddr3Ptr(pframe), ETH_ALEN);
+ }
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ bssid->Configuration.ATIMWindow = 0;
+
+ /* 20/40 BSS Coexistence check */
+ if ((pregistrypriv->wifi_spec == 1) && (false == pmlmeinfo->bwmode_updated)) {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
+ if (p && len > 0) {
+ struct HT_caps_element *pHT_caps;
+ pHT_caps = (struct HT_caps_element *)(p + 2);
+
+ if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & BIT(14))
+ pmlmepriv->num_FortyMHzIntolerant++;
+ } else
+ pmlmepriv->num_sta_no_ht++;
+ }
+
+#ifdef CONFIG_INTEL_WIDI
+ /* process_intel_widi_query_or_tigger(padapter, bssid); */
+ if (process_intel_widi_query_or_tigger(padapter, bssid))
+ return _FAIL;
+#endif /* CONFIG_INTEL_WIDI */
+
+ #if defined(DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) & 1
+ if (strcmp(bssid->Ssid.Ssid, DBG_RX_SIGNAL_DISPLAY_SSID_MONITORED) == 0) {
+ DBG_871X("Receiving %s("MAC_FMT", DSConfig:%u) from ch%u with ss:%3u, sq:%3u, RawRSSI:%3ld\n"
+ , bssid->Ssid.Ssid, MAC_ARG(bssid->MacAddress), bssid->Configuration.DSConfig
+ , rtw_get_oper_ch(padapter)
+ , bssid->PhyInfo.SignalStrength, bssid->PhyInfo.SignalQuality, bssid->Rssi
+ );
+ }
+ #endif
+
+ /* mark bss info receving from nearby channel as SignalQuality 101 */
+ if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
+ bssid->PhyInfo.SignalQuality = 101;
+
+ return _SUCCESS;
+}
+
+void start_create_ibss(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ u8 join_type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_IBSS) {/* adhoc master */
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
+
+ /* switch channel */
+ /* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
+
+ beacon_timing_control(padapter);
+
+ /* set msr to WIFI_FW_ADHOC_STATE */
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ /* issue beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("issuing beacon frame fail....\n"));
+
+ report_join_res(padapter, -1);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ } else{
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ report_join_res(padapter, 1);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ rtw_indicate_connect(padapter);
+ }
+ } else{
+ DBG_871X("start_create_ibss, invalid cap:%x\n", caps);
+ return;
+ }
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+
+}
+
+void start_clnt_join(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ int beacon_timeout;
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_ESS) {
+ Set_MSR(padapter, WIFI_FW_STATION_STATE);
+
+ val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* Because of AP's not receiving deauth before */
+ /* AP may: 1)not response auth or 2)deauth us after link is complete */
+ /* issue deauth before issuing auth to deal with the situation */
+
+ /* Commented by Albert 2012/07/21 */
+ /* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
+ {
+ /* To avoid connecting to AP fail during resume process, change retry count from 5 to 1 */
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
+ }
+
+ /* here wait for receiving the beacon to start auth */
+ /* and enable a timer */
+ beacon_timeout = decide_wait_for_beacon_timeout(pmlmeinfo->bcn_interval);
+ set_link_timer(pmlmeext, beacon_timeout);
+ _set_timer(&padapter->mlmepriv.assoc_timer,
+ (REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout);
+
+ pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
+ } else if (caps&cap_IBSS) { /* adhoc client */
+ Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
+
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ beacon_timing_control(padapter);
+
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+
+ report_join_res(padapter, 1);
+ } else{
+ /* DBG_871X("marc: invalid cap:%x\n", caps); */
+ return;
+ }
+
+}
+
+void start_clnt_auth(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
+ pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
+
+ pmlmeinfo->auth_seq = 1;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeext->retry = 0;
+
+
+ DBG_871X_LEVEL(_drv_always_, "start auth\n");
+ issue_auth(padapter, NULL, 0);
+
+ set_link_timer(pmlmeext, REAUTH_TO);
+
+}
+
+
+void start_clnt_assoc(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
+ pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
+
+ issue_assocreq(padapter);
+
+ set_link_timer(pmlmeext, REASSOC_TO);
+}
+
+unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* check A3 */
+ if (!(!memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+ DBG_871X("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_del_sta_event(padapter, MacAddr, reason);
+
+ } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ }
+ }
+
+ return _SUCCESS;
+}
+
+static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
+{
+ struct registry_priv *pregistrypriv;
+ struct mlme_ext_priv *pmlmeext;
+ RT_CHANNEL_INFO *chplan_new;
+ u8 channel;
+ u8 i;
+
+
+ pregistrypriv = &padapter->registrypriv;
+ pmlmeext = &padapter->mlmeextpriv;
+
+ /* Adjust channel plan by AP Country IE */
+ if (pregistrypriv->enable80211d &&
+ (!pmlmeext->update_channel_plan_by_ap_done)) {
+ u8 *ie, *p;
+ u32 len;
+ RT_CHANNEL_PLAN chplan_ap;
+ RT_CHANNEL_INFO chplan_sta[MAX_CHANNEL_NUM];
+ u8 country[4];
+ u8 fcn; /* first channel number */
+ u8 noc; /* number of channel */
+ u8 j, k;
+
+ ie = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (!ie)
+ return;
+ if (len < 6)
+ return;
+
+ ie += 2;
+ p = ie;
+ ie += len;
+
+ memset(country, 0, 4);
+ memcpy(country, p, 3);
+ p += 3;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: 802.11d country =%s\n", __func__, country));
+
+ i = 0;
+ while ((ie - p) >= 3) {
+ fcn = *(p++);
+ noc = *(p++);
+ p++;
+
+ for (j = 0; j < noc; j++) {
+ if (fcn <= 14)
+ channel = fcn + j; /* 2.4 GHz */
+ else
+ channel = fcn + j*4; /* 5 GHz */
+
+ chplan_ap.Channel[i++] = channel;
+ }
+ }
+ chplan_ap.Len = i;
+
+#ifdef DEBUG_RTL871X
+ i = 0;
+ DBG_871X("%s: AP[%s] channel plan {", __func__, bssid->Ssid.Ssid);
+ while ((i < chplan_ap.Len) && (chplan_ap.Channel[i] != 0)) {
+ DBG_8192C("%02d,", chplan_ap.Channel[i]);
+ i++;
+ }
+ DBG_871X("}\n");
+#endif
+
+ memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
+#ifdef DEBUG_RTL871X
+ i = 0;
+ DBG_871X("%s: STA channel plan {", __func__);
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ DBG_871X("%02d(%c),", chplan_sta[i].ChannelNum, chplan_sta[i].ScanType == SCAN_PASSIVE?'p':'a');
+ i++;
+ }
+ DBG_871X("}\n");
+#endif
+
+ memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
+ chplan_new = pmlmeext->channel_set;
+
+ i = j = k = 0;
+ if (pregistrypriv->wireless_mode & WIRELESS_11G) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0) ||
+ (chplan_sta[i].ChannelNum > 14))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] > 14))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else{
+ /* keep original STA 2.4G channel plan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ /* skip AP 2.4G channel plan */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ j++;
+ }
+ }
+
+ if (pregistrypriv->wireless_mode & WIRELESS_11A) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] == 0))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+/* chplan_new[k].ScanType = chplan_sta[i].ScanType; */
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] != 0)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else{
+ /* keep original STA 5G channel plan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+ }
+
+ pmlmeext->update_channel_plan_by_ap_done = 1;
+
+#ifdef DEBUG_RTL871X
+ k = 0;
+ DBG_871X("%s: new STA channel plan {", __func__);
+ while ((k < MAX_CHANNEL_NUM) && (chplan_new[k].ChannelNum != 0)) {
+ DBG_871X("%02d(%c),", chplan_new[k].ChannelNum, chplan_new[k].ScanType == SCAN_PASSIVE?'p':'c');
+ k++;
+ }
+ DBG_871X("}\n");
+#endif
+ }
+
+ /* If channel is used by AP, set channel scan type to active */
+ channel = bssid->Configuration.DSConfig;
+ chplan_new = pmlmeext->channel_set;
+ i = 0;
+ while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
+ if (chplan_new[i].ChannelNum == channel) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ /* 5G Bnad 2, 3 (DFS) doesn't change to active scan */
+ if (channel >= 52 && channel <= 144)
+ break;
+
+ chplan_new[i].ScanType = SCAN_ACTIVE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: change channel %d scan type from passive to active\n",
+ __func__, channel));
+ }
+ break;
+ }
+ i++;
+ }
+}
+
+/****************************************************************************
+
+Following are the functions to report events
+
+*****************************************************************************/
+
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct survey_event *psurvey_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext;
+ struct cmd_priv *pcmdpriv;
+ /* u8 *pframe = precv_frame->u.hdr.rx_data; */
+ /* uint len = precv_frame->u.hdr.len; */
+
+ if (!padapter)
+ return;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+ if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
+ kfree((u8 *)pcmd_obj);
+ kfree((u8 *)pevtcmd);
+ return;
+ }
+
+ process_80211d(padapter, &psurvey_evt->bss);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ pmlmeext->sitesurvey_res.bss_cnt++;
+
+ return;
+
+}
+
+void report_surveydone_event(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct surveydone_event *psurveydone_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+
+ DBG_871X("survey done event(%x) band:%d for "ADPT_FMT"\n", psurveydone_evt->bss_cnt, padapter->setband, ADPT_ARG(padapter));
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+
+}
+
+void report_join_res(struct adapter *padapter, int res)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct joinbss_event *pjoinbss_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+ pjoinbss_evt->network.join_res = pjoinbss_evt->network.aid = res;
+
+ DBG_871X("report_join_res(%d)\n", res);
+
+
+ rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
+
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+
+}
+
+void report_wmm_edca_update(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct wmm_event *pwmm_event;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct wmm_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct wmm_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_WMM);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ pwmm_event = (struct wmm_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ pwmm_event->wmm = 0;
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+
+}
+
+void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct sta_info *psta;
+ int mac_id;
+ struct stadel_event *pdel_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL) {
+ return;
+ }
+
+ cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
+
+
+ psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
+ if (psta)
+ mac_id = (int)psta->mac_id;
+ else
+ mac_id = (-1);
+
+ pdel_sta_evt->mac_id = mac_id;
+
+ DBG_871X("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct stassoc_event *padd_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree((u8 *)pcmd_obj);
+ return;
+ }
+
+ INIT_LIST_HEAD(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+ pc2h_evt_hdr->seq = atomic_inc_return(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ padd_sta_evt->cam_id = cam_idx;
+
+ DBG_871X("report_add_sta_event: add STA\n");
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+
+bool rtw_port_switch_chk(struct adapter *adapter)
+{
+ bool switch_needed = false;
+ return switch_needed;
+}
+
+/****************************************************************************
+
+Following are the event callback functions
+
+*****************************************************************************/
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* ERP */
+ VCS_update(padapter, psta);
+
+ /* HT */
+ if (pmlmepriv->htpriv.ht_option) {
+ psta->htpriv.ht_option = true;
+
+ psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
+
+ psta->htpriv.rx_ampdu_min_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para&IEEE80211_HT_CAP_AMPDU_DENSITY)>>2;
+
+ if (support_short_GI(padapter, &(pmlmeinfo->HT_caps), CHANNEL_WIDTH_20))
+ psta->htpriv.sgi_20m = true;
+
+ if (support_short_GI(padapter, &(pmlmeinfo->HT_caps), CHANNEL_WIDTH_40))
+ psta->htpriv.sgi_40m = true;
+
+ psta->qos_option = true;
+
+ psta->htpriv.ldpc_cap = pmlmepriv->htpriv.ldpc_cap;
+ psta->htpriv.stbc_cap = pmlmepriv->htpriv.stbc_cap;
+ psta->htpriv.beamform_cap = pmlmepriv->htpriv.beamform_cap;
+
+ memcpy(&psta->htpriv.ht_cap, &pmlmeinfo->HT_caps, sizeof(struct rtw_ieee80211_ht_cap));
+ } else{
+ psta->htpriv.ht_option = false;
+
+ psta->htpriv.ampdu_enable = false;
+
+ psta->htpriv.sgi_20m = false;
+ psta->htpriv.sgi_40m = false;
+ psta->qos_option = false;
+
+ }
+
+ psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ psta->bw_mode = pmlmeext->cur_bwmode;
+
+ /* QoS */
+ if (pmlmepriv->qospriv.qos_option)
+ psta->qos_option = true;
+
+ update_ldpc_stbc_cap(psta);
+
+ spin_lock_bh(&psta->lock);
+ psta->state = _FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+}
+
+static void rtw_mlmeext_disconnect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ u8 state_backup = (pmlmeinfo->state&0x03);
+
+ /* set_opmode_cmd(padapter, infra_client_with_mlme); */
+
+ /*
+ * For safety, prevent from keeping macid sleep.
+ * If we can sure all power mode enter/leave are paired,
+ * this check can be removed.
+ * Lucas@20131113
+ */
+ /* wakeup macid after disconnect. */
+ {
+ struct sta_info *psta;
+ psta = rtw_get_stainfo(&padapter->stapriv, get_my_bssid(pnetwork));
+ if (psta)
+ rtw_hal_macid_wakeup(padapter, psta->mac_id);
+ }
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ if (state_backup == WIFI_FW_STATION_STATE) {
+ if (rtw_port_switch_chk(padapter) == true) {
+ rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
+ {
+ struct adapter *port0_iface = dvobj_get_port0_adapter(adapter_to_dvobj(padapter));
+ if (port0_iface)
+ rtw_lps_ctrl_wk_cmd(port0_iface, LPS_CTRL_CONNECT, 0);
+ }
+ }
+ }
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ flush_all_cam_entry(padapter);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* pmlmepriv->LinkDetectInfo.TrafficBusyState = false; */
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
+ pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
+
+}
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 join_type;
+ struct sta_info *psta;
+ if (join_res < 0) {
+ join_type = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ goto exit_mlmeext_joinbss_event_callback;
+ }
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+
+
+ /* turn on dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+
+ /* update IOT-releated issue */
+ update_IOT_info(padapter);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+
+ /* BCN interval */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
+
+ /* udpate capability */
+ update_capinfo(padapter, pmlmeinfo->capability);
+
+ /* WMM, Update EDCA param */
+ WMMOnAssocRsp(padapter);
+
+ /* HT */
+ HTOnAssocRsp(padapter);
+
+ /* Set cur_channel&cur_bwmode&cur_ch_offset */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) { /* only for infra. mode */
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* DBG_871X("set_sta_rate\n"); */
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+
+ /* set per sta rate after updating HT cap. */
+ set_sta_rate(padapter, psta);
+
+ rtw_sta_media_status_rpt(padapter, psta, 1);
+
+ /* wakeup macid after join bss successfully to ensure
+ the subsequent data frames can be sent out normally */
+ rtw_hal_macid_wakeup(padapter, psta->mac_id);
+ }
+
+ if (rtw_port_switch_chk(padapter) == true)
+ rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+
+ /* set_link_timer(pmlmeext, DISCONNECT_TO); */
+ }
+
+ if (get_iface_type(padapter) == IFACE_PORT0)
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
+
+exit_mlmeext_joinbss_event_callback:
+
+ DBG_871X("=>%s\n", __func__);
+
+}
+
+/* currently only adhoc mode will go here */
+void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 join_type;
+
+ DBG_871X("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) { /* adhoc master or sta_count>1 */
+
+ /* nothing to do */
+ } else{ /* adhoc client */
+ /* update TSF Value */
+ /* update_TSF(pmlmeext, pframe, len); */
+
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+
+ /* start beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
+
+ pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
+
+ return;
+ }
+
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ }
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ psta->bssratelen = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[psta->mac_id].SupportedRates);
+ memcpy(psta->bssrateset, pmlmeinfo->FW_sta_info[psta->mac_id].SupportedRates, psta->bssratelen);
+
+ /* update adhoc sta_info */
+ update_sta_info(padapter, psta);
+
+ rtw_hal_update_sta_rate_mask(padapter, psta);
+
+ /* ToDo: HT for Ad-hoc */
+ psta->wireless_mode = rtw_check_network_type(psta->bssrateset, psta->bssratelen, pmlmeext->cur_channel);
+ psta->raid = rtw_hal_networktype_to_raid(padapter, psta);
+
+ /* rate radaptive */
+ Update_RA_Entry(padapter, psta);
+}
+
+void mlmeext_sta_del_event_callback(struct adapter *padapter)
+{
+ if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter))
+ rtw_mlmeext_disconnect(padapter);
+}
+
+/****************************************************************************
+
+Following are the functions for the timer handlers
+
+*****************************************************************************/
+void _linked_info_dump(struct adapter *padapter)
+{
+ int i;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int UndecoratedSmoothedPWDB;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+
+ if (padapter->bLinkInfoDump) {
+
+ DBG_871X("\n ============["ADPT_FMT"] linked status check ===================\n", ADPT_ARG(padapter));
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
+
+ DBG_871X("AP[" MAC_FMT "] - UndecoratedSmoothedPWDB:%d\n",
+ MAC_ARG(padapter->mlmepriv.cur_network.network.MacAddress), UndecoratedSmoothedPWDB);
+ } else if ((pmlmeinfo->state&0x03) == _HW_STATE_AP_) {
+ struct list_head *phead, *plist;
+
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+ while (phead != plist) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ DBG_871X("STA[" MAC_FMT "]:UndecoratedSmoothedPWDB:%d\n",
+ MAC_ARG(psta->hwaddr), psta->rssi_stat.UndecoratedSmoothedPWDB);
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ }
+ for (i = 0; i < NUM_STA; i++) {
+ if (pdvobj->macid[i] == true) {
+ if (i != 1) /* skip bc/mc sta */
+ /* tx info ============ */
+ rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
+ }
+ }
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_RX_INFO_DUMP, NULL);
+
+
+ }
+
+
+}
+
+static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 ret = false;
+
+ #ifdef DBG_EXPIRATION_CHK
+ DBG_871X(FUNC_ADPT_FMT" rx:"STA_PKTS_FMT", beacon:%llu, probersp_to_self:%llu"
+ /*", probersp_bm:%llu, probersp_uo:%llu, probereq:%llu, BI:%u"*/
+ ", retry:%u\n"
+ , FUNC_ADPT_ARG(padapter)
+ , STA_RX_PKTS_DIFF_ARG(psta)
+ , psta->sta_stats.rx_beacon_pkts - psta->sta_stats.last_rx_beacon_pkts
+ , psta->sta_stats.rx_probersp_pkts - psta->sta_stats.last_rx_probersp_pkts
+ /*, psta->sta_stats.rx_probersp_bm_pkts - psta->sta_stats.last_rx_probersp_bm_pkts
+ , psta->sta_stats.rx_probersp_uo_pkts - psta->sta_stats.last_rx_probersp_uo_pkts
+ , psta->sta_stats.rx_probereq_pkts - psta->sta_stats.last_rx_probereq_pkts
+ , pmlmeinfo->bcn_interval*/
+ , pmlmeext->retry
+ );
+
+ DBG_871X(FUNC_ADPT_FMT" tx_pkts:%llu, link_count:%u\n", FUNC_ADPT_ARG(padapter)
+ , padapter->xmitpriv.tx_pkts
+ , pmlmeinfo->link_count
+ );
+ #endif
+
+ if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta))
+ && sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta)
+ && sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta)
+ ) {
+ ret = false;
+ } else{
+ ret = true;
+ }
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void linked_status_chk(struct adapter *padapter)
+{
+ u32 i;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+
+ if (is_client_associated_to_ap(padapter)) {
+ /* linked infrastructure client mode */
+
+ int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
+ int rx_chk_limit;
+ int link_count_limit;
+
+ #if defined(DBG_ROAMING_TEST)
+ rx_chk_limit = 1;
+ #else
+ rx_chk_limit = 8;
+ #endif
+ link_count_limit = 7; /* 16 sec */
+
+ /* Marked by Kurt 20130715 */
+ /* For WiDi 3.5 and latered on, they don't ask WiDi sink to do roaming, so we could not check rx limit that strictly. */
+ /* todo: To check why we under miracast session, rx_chk would be false */
+ /* ifdef CONFIG_INTEL_WIDI */
+ /* if (padapter->mlmepriv.widi_state != INTEL_WIDI_STATE_NONE) */
+ /* rx_chk_limit = 1; */
+ /* endif */
+
+ psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
+ if (psta != NULL) {
+ if (chk_ap_is_alive(padapter, psta) == false)
+ rx_chk = _FAIL;
+
+ if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
+ tx_chk = _FAIL;
+
+ {
+ if (rx_chk != _SUCCESS) {
+ if (pmlmeext->retry == 0) {
+ #ifdef DBG_EXPIRATION_CHK
+ DBG_871X("issue_probereq to trigger probersp, retry =%d\n", pmlmeext->retry);
+ #endif
+ issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress, 0, 0, 0, 0);
+ issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress, 0, 0, 0, 0);
+ issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress, 0, 0, 0, 0);
+ }
+ }
+
+ if (tx_chk != _SUCCESS && pmlmeinfo->link_count++ == link_count_limit) {
+ #ifdef DBG_EXPIRATION_CHK
+ DBG_871X("%s issue_nulldata 0\n", __func__);
+ #endif
+ tx_chk = issue_nulldata_in_interrupt(padapter, NULL);
+ }
+ }
+
+ if (rx_chk == _FAIL) {
+ pmlmeext->retry++;
+ if (pmlmeext->retry > rx_chk_limit) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
+ FUNC_ADPT_ARG(padapter));
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress
+ , WLAN_REASON_EXPIRATION_CHK);
+ return;
+ }
+ } else {
+ pmlmeext->retry = 0;
+ }
+
+ if (tx_chk == _FAIL) {
+ pmlmeinfo->link_count %= (link_count_limit+1);
+ } else {
+ pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
+ pmlmeinfo->link_count = 0;
+ }
+
+ } /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.MacAddress)) != NULL) */
+ } else if (is_client_associated_to_ibss(padapter)) {
+ /* linked IBSS mode */
+ /* for each assoc list entry to check the rx pkt counter */
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1) {
+ psta = pmlmeinfo->FW_sta_info[i].psta;
+
+ if (NULL == psta)
+ continue;
+
+ if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
+
+ if (pmlmeinfo->FW_sta_info[i].retry < 3) {
+ pmlmeinfo->FW_sta_info[i].retry++;
+ } else{
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].status = 0;
+ report_del_sta_event(padapter, psta->hwaddr
+ , 65535/* indicate disconnect caused by no rx */
+ );
+ }
+ } else{
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
+ }
+ }
+ }
+
+ /* set_link_timer(pmlmeext, DISCONNECT_TO); */
+
+ }
+
+}
+
+void survey_timer_hdl(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* DBG_871X("marc: survey timer\n"); */
+
+ /* issue rtw_sitesurvey_cmd */
+ if (pmlmeext->sitesurvey_res.state > SCAN_START) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ pmlmeext->sitesurvey_res.channel_idx++;
+ }
+
+ if (pmlmeext->scan_abort == true) {
+ {
+ pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
+ DBG_871X("%s idx:%d\n", __func__
+ , pmlmeext->sitesurvey_res.channel_idx
+ );
+ }
+
+ pmlmeext->scan_abort = false;/* reset */
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ goto exit_survey_timer_hdl;
+ }
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree((unsigned char *)ph2c);
+ goto exit_survey_timer_hdl;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ rtw_enqueue_cmd(pcmdpriv, ph2c);
+ }
+
+
+exit_survey_timer_hdl:
+
+ return;
+}
+
+void link_timer_hdl(struct adapter *padapter)
+{
+ /* static unsigned int rx_pkt = 0; */
+ /* static u64 tx_cnt = 0; */
+ /* struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ /* struct sta_priv *pstapriv = &padapter->stapriv; */
+
+
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ DBG_871X("link_timer_hdl:no beacon while connecting\n");
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -3);
+ } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
+ /* re-auth timer */
+ if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
+ /* if (pmlmeinfo->auth_algo != dot11AuthAlgrthm_Auto) */
+ /* */
+ pmlmeinfo->state = 0;
+ report_join_res(padapter, -1);
+ return;
+ /* */
+ /* else */
+ /* */
+ /* pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared; */
+ /* pmlmeinfo->reauth_count = 0; */
+ /* */
+ }
+
+ DBG_871X("link_timer_hdl: auth timeout and try again\n");
+ pmlmeinfo->auth_seq = 1;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+ } else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
+ /* re-assoc timer */
+ if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ return;
+ }
+
+ DBG_871X("link_timer_hdl: assoc timeout and try again\n");
+ issue_assocreq(padapter);
+ set_link_timer(pmlmeext, REASSOC_TO);
+ }
+
+ return;
+}
+
+void addba_timer_hdl(struct sta_info *psta)
+{
+ struct ht_priv *phtpriv;
+
+ if (!psta)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->candidate_tid_bitmap)
+ phtpriv->candidate_tid_bitmap = 0x0;
+
+ }
+}
+
+void sa_query_timer_hdl(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ /* disconnect */
+ spin_lock_bh(&pmlmepriv->lock);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_disassoc_cmd(padapter, 0, true);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+
+ spin_unlock_bh(&pmlmepriv->lock);
+ DBG_871X("SA query timeout disconnect\n");
+}
+
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ return H2C_SUCCESS;
+}
+
+#ifdef CONFIG_AUTO_AP_MODE
+static int rtw_auto_ap_start_beacon(struct adapter *adapter)
+{
+ int ret = 0;
+ u8 *pbuf = NULL;
+ uint len;
+ u8 supportRate[16];
+ int sz = 0, rateLen;
+ u8 *ie;
+ u8 wireless_mode, oper_channel;
+ u8 ssid[3] = {0}; /* hidden ssid */
+ u32 ssid_len = sizeof(ssid);
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+
+ len = 128;
+ pbuf = rtw_zmalloc(len);
+ if (!pbuf)
+ return -ENOMEM;
+
+
+ /* generate beacon */
+ ie = pbuf;
+
+ /* timestamp will be inserted by hardware */
+ sz += 8;
+ ie += sz;
+
+ /* beacon interval : 2bytes */
+ *(u16 *)ie = cpu_to_le16((u16)100);/* BCN_INTERVAL = 100; */
+ sz += 2;
+ ie += 2;
+
+ /* capability info */
+ *(u16 *)ie = 0;
+ *(u16 *)ie |= cpu_to_le16(cap_ESS);
+ *(u16 *)ie |= cpu_to_le16(cap_ShortPremble);
+ /* u16*)ie |= cpu_to_le16(cap_Privacy); */
+ sz += 2;
+ ie += 2;
+
+ /* SSID */
+ ie = rtw_set_ie(ie, _SSID_IE_, ssid_len, ssid, &sz);
+
+ /* supported rates */
+ wireless_mode = WIRELESS_11BG_24N;
+ rtw_set_supported_rate(supportRate, wireless_mode);
+ rateLen = rtw_get_rateset_len(supportRate);
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, supportRate, &sz);
+ } else{
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, supportRate, &sz);
+ }
+
+
+ /* DS parameter set */
+ if (check_buddy_fwstate(adapter, _FW_LINKED) &&
+ check_buddy_fwstate(adapter, WIFI_STATION_STATE)) {
+ struct adapter *pbuddystruct adapter = adapter->pbuddystruct adapter;
+ struct mlme_ext_priv *pbuddy_mlmeext = &pbuddystruct adapter->mlmeextpriv;
+
+ oper_channel = pbuddy_mlmeext->cur_channel;
+ } else{
+ oper_channel = adapter_to_dvobj(adapter)->oper_channel;
+ }
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, &oper_channel, &sz);
+
+ /* ext supported rates */
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (supportRate + 8), &sz);
+ }
+
+ DBG_871X("%s, start auto ap beacon sz =%d\n", __func__, sz);
+
+ /* lunch ap mode & start to issue beacon */
+ if (rtw_check_beacon_data(adapter, pbuf, sz) == _SUCCESS) {
+
+ } else{
+ ret = -EINVAL;
+ }
+
+
+ kfree(pbuf);
+
+ return ret;
+
+}
+#endif/* CONFIG_AUTO_AP_MODE */
+
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
+
+ if (psetop->mode == Ndis802_11APMode) {
+ pmlmeinfo->state = WIFI_FW_AP_STATE;
+ type = _HW_STATE_AP_;
+ /* start_ap_mode(padapter); */
+ } else if (psetop->mode == Ndis802_11Infrastructure) {
+ pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
+ type = _HW_STATE_STATION_;
+ } else if (psetop->mode == Ndis802_11IBSS) {
+ type = _HW_STATE_ADHOC_;
+ } else{
+ type = _HW_STATE_NOLINK_;
+ }
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
+ /* Set_NETYPE0_MSR(padapter, type); */
+
+
+#ifdef CONFIG_AUTO_AP_MODE
+ if (psetop->mode == Ndis802_11APMode)
+ rtw_auto_ap_start_beacon(padapter);
+#endif
+
+ if (rtw_port_switch_chk(padapter) == true) {
+ rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
+
+ if (psetop->mode == Ndis802_11APMode)
+ adapter_to_pwrctl(padapter)->fw_psmode_iface_id = 0xff; /* ap mode won't dowload rsvd pages */
+ else if (psetop->mode == Ndis802_11Infrastructure) {
+ struct adapter *port0_iface = dvobj_get_port0_adapter(adapter_to_dvobj(padapter));
+ if (port0_iface)
+ rtw_lps_ctrl_wk_cmd(port0_iface, LPS_CTRL_CONNECT, 0);
+ }
+ }
+
+ if (psetop->mode == Ndis802_11APMode) {
+ /* Do this after port switch to */
+ /* prevent from downloading rsvd page to wrong port */
+ rtw_btcoex_MediaStatusNotify(padapter, 1); /* connect */
+ }
+
+ return H2C_SUCCESS;
+
+}
+
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
+ /* u32 initialgain; */
+
+ if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
+ struct wlan_bssid_ex *network = &padapter->mlmepriv.cur_network.network;
+ start_bss_network(padapter, (u8 *)network);
+ return H2C_SUCCESS;
+ }
+
+ /* below is for ad-hoc master */
+ if (pparm->network.InfrastructureMode == Ndis802_11IBSS) {
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under linking, need to write the BB registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ /* cancel link timer */
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ start_create_ibss(padapter);
+
+ }
+
+ return H2C_SUCCESS;
+
+}
+
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 join_type;
+ struct ndis_80211_var_ie *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ u32 i;
+ u8 cbw40_enable = 0;
+ /* u32 initialgain; */
+ /* u32 acparm; */
+ u8 ch, bw, offset;
+
+ /* check already connecting to AP or not */
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE) {
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, 1, 100);
+ }
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ del_timer_sync(&pmlmeext->link_timer);
+
+ /* set MSR to nolink -> infra. mode */
+ /* Set_MSR(padapter, _HW_STATE_NOLINK_); */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ }
+
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+ pmlmeinfo->bwmode_updated = false;
+ /* pmlmeinfo->assoc_AP_vendor = HT_IOT_PEER_MAX; */
+ pmlmeinfo->VHT_enable = 0;
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* Check AP vendor to move rtw_joinbss_cmd() */
+ /* pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->IEs, pnetwork->IELength); */
+
+ /* sizeof(struct ndis_802_11_fix_ie) */
+ for (i = _FIXED_IE_LENGTH_; i < pnetwork->IELength;) {
+ pIE = (struct ndis_80211_var_ie *)(pnetwork->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
+ if (!memcmp(pIE->data, WMM_OUI, 4))
+ WMM_param_handler(padapter, pIE);
+ break;
+
+ case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
+ pmlmeinfo->HT_caps_enable = 1;
+ break;
+
+ case _HT_EXTRA_INFO_IE_: /* Get HT Info IE. */
+ pmlmeinfo->HT_info_enable = 1;
+
+ /* spec case only for cisco's ap because cisco's ap issue assoc rsp using mcs rate @40MHz or @20MHz */
+ {
+ struct HT_info_element *pht_info = (struct HT_info_element *)(pIE->data);
+
+ if (pnetwork->Configuration.DSConfig > 14) {
+ if ((pregpriv->bw_mode >> 4) > CHANNEL_WIDTH_20)
+ cbw40_enable = 1;
+ } else {
+ if ((pregpriv->bw_mode & 0x0f) > CHANNEL_WIDTH_20)
+ cbw40_enable = 1;
+ }
+
+ if ((cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode according to the AP */
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case 3:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeext->cur_bwmode = CHANNEL_WIDTH_20;
+ break;
+ }
+
+ DBG_871X("set HT ch/bw before connected\n");
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ /* check channel, bandwidth, offset and switch */
+ if (rtw_chk_start_clnt_join(padapter, &ch, &bw, &offset) == _FAIL) {
+ report_join_res(padapter, (-4));
+ return H2C_SUCCESS;
+ }
+
+ /* disable dynamic functions, such as high power, DIG */
+ /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false); */
+
+ /* config the initial gain under linking, need to write the BB registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_hal_set_hwreg(padapter, HW_VAR_DO_IQK, NULL);
+
+ set_channel_bwmode(padapter, ch, offset, bw);
+
+ /* cancel link timer */
+ del_timer_sync(&pmlmeext->link_timer);
+
+ start_clnt_join(padapter);
+
+ return H2C_SUCCESS;
+
+}
+
+u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ u8 val8;
+
+ if (is_client_associated_to_ap(padapter)) {
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+ }
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ /* Stop BCN */
+ val8 = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
+ }
+
+ rtw_mlmeext_disconnect(padapter);
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ return H2C_SUCCESS;
+}
+
+static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
+ u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+{
+ int i, j;
+ int set_idx;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* clear first */
+ memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+
+ /* acquire channels from in */
+ j = 0;
+ for (i = 0; i < in_num; i++) {
+
+ DBG_871X(FUNC_ADPT_FMT" "CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(&in[i]));
+
+ set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
+ if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED)
+ && set_idx >= 0
+ && rtw_mlme_band_check(padapter, in[i].hw_value) == true
+ ) {
+ if (j >= out_num) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
+ FUNC_ADPT_ARG(padapter), out_num);
+ break;
+ }
+
+ memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+
+ if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
+ out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ if (j >= out_num)
+ break;
+ }
+
+ /* if out is empty, use channel_set as default */
+ if (j == 0) {
+ for (i = 0; i < pmlmeext->max_chan_nums; i++) {
+
+ DBG_871X(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter), pmlmeext->channel_set[i].ChannelNum);
+
+ if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum) == true) {
+
+ if (j >= out_num) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
+ FUNC_ADPT_ARG(padapter), out_num);
+ break;
+ }
+
+ out[j].hw_value = pmlmeext->channel_set[i].ChannelNum;
+
+ if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
+ out[j].flags |= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ }
+ }
+
+ return j;
+}
+
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
+ u8 bdelayscan = false;
+ u8 val8;
+ u32 initialgain;
+ u32 i;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
+ pmlmeext->sitesurvey_res.state = SCAN_START;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pparm->ssid[i].SsidLength) {
+ memcpy(pmlmeext->sitesurvey_res.ssid[i].Ssid, pparm->ssid[i].Ssid, IW_ESSID_MAX_SIZE);
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = pparm->ssid[i].SsidLength;
+ } else {
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = 0;
+ }
+ }
+
+ pmlmeext->sitesurvey_res.ch_num = rtw_scan_ch_decision(padapter
+ , pmlmeext->sitesurvey_res.ch, RTW_CHANNEL_SCAN_AMOUNT
+ , pparm->ch, pparm->ch_num
+ );
+
+ pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
+
+ /* issue null data if associating to the AP */
+ if (is_client_associated_to_ap(padapter) == true) {
+ pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
+
+ issue_nulldata(padapter, NULL, 1, 3, 500);
+
+ bdelayscan = true;
+ }
+ if (bdelayscan) {
+ /* delay 50ms to protect nulldata(1). */
+ set_survey_timer(pmlmeext, 50);
+ return H2C_SUCCESS;
+ }
+ }
+
+ if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under scaning, need to write the BB registers */
+ initialgain = 0x1e;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+
+ /* set MSR to no link state */
+ Set_MSR(padapter, _HW_STATE_NOLINK_);
+
+ val8 = 1; /* under site survey */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
+ }
+
+ site_survey(padapter);
+
+ return H2C_SUCCESS;
+
+}
+
+u8 setauth_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pparm->mode < 4) {
+ pmlmeinfo->auth_algo = pparm->mode;
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u16 ctrl = 0;
+ s16 cam_id = 0;
+ struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ unsigned char null_addr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ u8 *addr;
+
+ /* main tx key for wep. */
+ if (pparm->set_tx)
+ pmlmeinfo->key_index = pparm->keyid;
+
+ cam_id = rtw_camid_alloc(padapter, NULL, pparm->keyid);
+
+ if (cam_id < 0) {
+ } else {
+ if (cam_id > 3) /* not default key, searched by A2 */
+ addr = get_bssid(&padapter->mlmepriv);
+ else
+ addr = null_addr;
+
+ ctrl = BIT(15) | BIT6 | ((pparm->algorithm) << 2) | pparm->keyid;
+ write_cam(padapter, cam_id, ctrl, addr, pparm->key);
+ DBG_871X_LEVEL(_drv_always_, "set group key camid:%d, addr:"MAC_FMT", kid:%d, type:%s\n"
+ , cam_id, MAC_ARG(addr), pparm->keyid, security_type_str(pparm->algorithm));
+ }
+
+ if (cam_id >= 0 && cam_id <= 3)
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_DK_CFG, (u8 *)true);
+
+ /* allow multicast packets to driver */
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_ON_RCR_AM, null_addr);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u16 ctrl = 0;
+ s16 cam_id = 0;
+ u8 ret = H2C_SUCCESS;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+
+ if (pparm->algorithm == _NO_PRIVACY_)
+ goto write_to_cam;
+
+ psta = rtw_get_stainfo(pstapriv, pparm->addr);
+ if (!psta) {
+ DBG_871X_LEVEL(_drv_always_, "%s sta:"MAC_FMT" not found\n", __func__, MAC_ARG(pparm->addr));
+ ret = H2C_REJECTED;
+ goto exit;
+ }
+
+ pmlmeinfo->enc_algo = pparm->algorithm;
+ cam_id = rtw_camid_alloc(padapter, psta, 0);
+ if (cam_id < 0)
+ goto exit;
+
+write_to_cam:
+ if (pparm->algorithm == _NO_PRIVACY_) {
+ while ((cam_id = rtw_camid_search(padapter, pparm->addr, -1)) >= 0) {
+ DBG_871X_LEVEL(_drv_always_, "clear key for addr:"MAC_FMT", camid:%d\n", MAC_ARG(pparm->addr), cam_id);
+ clear_cam_entry(padapter, cam_id);
+ rtw_camid_free(padapter, cam_id);
+ }
+ } else {
+ DBG_871X_LEVEL(_drv_always_, "set pairwise key camid:%d, addr:"MAC_FMT", kid:%d, type:%s\n",
+ cam_id, MAC_ARG(pparm->addr), pparm->keyid, security_type_str(pparm->algorithm));
+ ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
+ write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+ }
+ ret = H2C_SUCCESS_RSP;
+
+exit:
+ return ret;
+}
+
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, pparm->addr);
+
+ if (!psta)
+ return H2C_SUCCESS;
+
+ if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ /* pmlmeinfo->ADDBA_retry_count = 0; */
+ /* pmlmeinfo->candidate_tid_bitmap |= (0x1 << pparm->tid); */
+ /* psta->htpriv.candidate_tid_bitmap |= BIT(pparm->tid); */
+ issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ /* _set_timer(&pmlmeext->ADDBA_timer, ADDBA_TO); */
+ _set_timer(&psta->addba_retry_timer, ADDBA_TO);
+ } else{
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
+ }
+ return H2C_SUCCESS;
+}
+
+
+u8 chk_bmc_sleepq_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_parm_rsp(ph2c, GEN_CMD_CODE(_ChkBMCSleepq));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+u8 set_tx_beacon_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct Tx_Beacon_param *ptxBeacon_parm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 res = _SUCCESS;
+ int len_diff = 0;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ptxBeacon_parm = (struct Tx_Beacon_param *)rtw_zmalloc(sizeof(struct Tx_Beacon_param));
+ if (ptxBeacon_parm == NULL) {
+ kfree((unsigned char *)ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memcpy(&(ptxBeacon_parm->network), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+
+ len_diff = update_hidden_ssid(
+ ptxBeacon_parm->network.IEs+_BEACON_IE_OFFSET_
+ , ptxBeacon_parm->network.IELength-_BEACON_IE_OFFSET_
+ , pmlmeinfo->hidden_ssid_mode
+ );
+ ptxBeacon_parm->network.IELength += len_diff;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+ return res;
+}
+
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ u8 evt_code, evt_seq;
+ u16 evt_sz;
+ uint *peventbuf;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+ struct evt_priv *pevt_priv = &(padapter->evtpriv);
+
+ if (pbuf == NULL)
+ goto _abort_event_;
+
+ peventbuf = (uint *)pbuf;
+ evt_sz = (u16)(*peventbuf&0xffff);
+ evt_seq = (u8)((*peventbuf>>24)&0x7f);
+ evt_code = (u8)((*peventbuf>>16)&0xff);
+
+
+ #ifdef CHECK_EVENT_SEQ
+ /* checking event sequence... */
+ if (evt_seq != (atomic_read(&pevt_priv->event_seq) & 0x7f)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
+ ("Event Seq Error! %d vs %d\n", (evt_seq & 0x7f),
+ (atomic_read(&pevt_priv->event_seq) & 0x7f)));
+
+ pevt_priv->event_seq = (evt_seq+1)&0x7f;
+
+ goto _abort_event_;
+ }
+ #endif
+
+ /* checking if event code is valid */
+ if (evt_code >= MAX_C2HEVT) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent Code(%d) mismatch!\n", evt_code));
+ goto _abort_event_;
+ }
+
+ /* checking if event size match the event parm size */
+ if ((wlanevents[evt_code].parmsize != 0) &&
+ (wlanevents[evt_code].parmsize != evt_sz)) {
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent(%d) Parm Size mismatch (%d vs %d)!\n",
+ evt_code, wlanevents[evt_code].parmsize, evt_sz));
+ goto _abort_event_;
+
+ }
+
+ atomic_inc(&pevt_priv->event_seq);
+
+ peventbuf += 2;
+
+ if (peventbuf) {
+ event_callback = wlanevents[evt_code].event_callback;
+ event_callback(padapter, (u8 *)peventbuf);
+
+ pevt_priv->evt_done_cnt++;
+ }
+
+
+_abort_event_:
+
+
+ return H2C_SUCCESS;
+
+}
+
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ return H2C_SUCCESS;
+}
+
+u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return H2C_SUCCESS;
+
+ if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
+ msleep(10);/* 10ms, ATIM(HIQ) Windows */
+
+ /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ list_del_init(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ if (xmitframe_hiq_filter(pxmitframe) == true)
+ pxmitframe->attrib.qsel = 0x11;/* HIQ */
+
+ rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
+ }
+
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* check hi queue and bmc_sleepq */
+ rtw_chk_hi_queue_cmd(padapter);
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (send_beacon(padapter) == _FAIL) {
+ DBG_871X("issue_beacon, fail!\n");
+ return H2C_PARAMETERS_ERROR;
+ }
+
+ /* tx bc/mc frames after update TIM */
+ chk_bmc_sleepq_hdl(padapter, NULL);
+
+ return H2C_SUCCESS;
+}
+
+int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ unsigned char cur_ch = pmlmeext->cur_channel;
+ unsigned char cur_bw = pmlmeext->cur_bwmode;
+ unsigned char cur_ch_offset = pmlmeext->cur_ch_offset;
+ bool connect_allow = true;
+
+ if (!ch || !bw || !offset) {
+ rtw_warn_on(1);
+ connect_allow = false;
+ }
+
+ if (connect_allow == true) {
+ DBG_871X("start_join_set_ch_bw: ch =%d, bwmode =%d, ch_offset =%d\n", cur_ch, cur_bw, cur_ch_offset);
+ *ch = cur_ch;
+ *bw = cur_bw;
+ *offset = cur_ch_offset;
+ }
+
+ return connect_allow == true ? _SUCCESS : _FAIL;
+}
+
+/* Find union about ch, bw, ch_offset of all linked/linking interfaces */
+int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct adapter *iface;
+ struct mlme_ext_priv *mlmeext;
+ u8 ch_ret = 0;
+ u8 bw_ret = CHANNEL_WIDTH_20;
+ u8 offset_ret = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ if (ch)
+ *ch = 0;
+ if (bw)
+ *bw = CHANNEL_WIDTH_20;
+ if (offset)
+ *offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ iface = dvobj->padapters;
+ mlmeext = &iface->mlmeextpriv;
+
+ if (!check_fwstate(&iface->mlmepriv, _FW_LINKED|_FW_UNDER_LINKING))
+ return 0;
+
+ ch_ret = mlmeext->cur_channel;
+ bw_ret = mlmeext->cur_bwmode;
+ offset_ret = mlmeext->cur_ch_offset;
+
+ return 1;
+}
+
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct set_ch_parm *set_ch_parm;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ set_ch_parm = (struct set_ch_parm *)pbuf;
+
+ DBG_871X(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ set_ch_parm->ch, set_ch_parm->bw, set_ch_parm->ch_offset);
+
+ pmlmeext->cur_channel = set_ch_parm->ch;
+ pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
+ pmlmeext->cur_bwmode = set_ch_parm->bw;
+
+ set_channel_bwmode(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ if ((padapter->rtw_wdev != NULL) && (padapter->rtw_wdev->wiphy)) {
+ struct regulatory_request request;
+ request.initiator = NL80211_REGDOM_SET_BY_DRIVER;
+ rtw_reg_notifier(padapter->rtw_wdev->wiphy, &request);
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct LedBlink_param *ledBlink_param;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ ledBlink_param = (struct LedBlink_param *)pbuf;
+ return H2C_SUCCESS;
+}
+
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
+
+/* TDLS_ESTABLISHED : write RCR DATA BIT */
+/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
+/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
+/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
+/* TDLS_OFF_CH : first time set channel to off channel */
+/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
+/* TDLS_P_OFF_CH : periodically go to off channel */
+/* TDLS_P_BASE_CH : periodically go back to base channel */
+/* TDLS_RS_RCR : restore RCR */
+/* TDLS_TEAR_STA : free tdls sta */
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
+
+u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct RunInThread_param *p;
+
+
+ if (NULL == pbuf)
+ return H2C_PARAMETERS_ERROR;
+ p = (struct RunInThread_param *)pbuf;
+
+ if (p->func)
+ p->func(p->context);
+
+ return H2C_SUCCESS;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_odm.c b/drivers/staging/rtl8723bs/core/rtw_odm.c
new file mode 100644
index 000000000000..3144e8ec2fa2
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_odm.c
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtw_odm.h>
+#include <hal_data.h>
+
+static const char *odm_comp_str[] = {
+ /* BIT0 */"ODM_COMP_DIG",
+ /* BIT1 */"ODM_COMP_RA_MASK",
+ /* BIT2 */"ODM_COMP_DYNAMIC_TXPWR",
+ /* BIT3 */"ODM_COMP_FA_CNT",
+ /* BIT4 */"ODM_COMP_RSSI_MONITOR",
+ /* BIT5 */"ODM_COMP_CCK_PD",
+ /* BIT6 */"ODM_COMP_ANT_DIV",
+ /* BIT7 */"ODM_COMP_PWR_SAVE",
+ /* BIT8 */"ODM_COMP_PWR_TRAIN",
+ /* BIT9 */"ODM_COMP_RATE_ADAPTIVE",
+ /* BIT10 */"ODM_COMP_PATH_DIV",
+ /* BIT11 */"ODM_COMP_PSD",
+ /* BIT12 */"ODM_COMP_DYNAMIC_PRICCA",
+ /* BIT13 */"ODM_COMP_RXHP",
+ /* BIT14 */"ODM_COMP_MP",
+ /* BIT15 */"ODM_COMP_DYNAMIC_ATC",
+ /* BIT16 */"ODM_COMP_EDCA_TURBO",
+ /* BIT17 */"ODM_COMP_EARLY_MODE",
+ /* BIT18 */NULL,
+ /* BIT19 */NULL,
+ /* BIT20 */NULL,
+ /* BIT21 */NULL,
+ /* BIT22 */NULL,
+ /* BIT23 */NULL,
+ /* BIT24 */"ODM_COMP_TX_PWR_TRACK",
+ /* BIT25 */"ODM_COMP_RX_GAIN_TRACK",
+ /* BIT26 */"ODM_COMP_CALIBRATION",
+ /* BIT27 */NULL,
+ /* BIT28 */NULL,
+ /* BIT29 */NULL,
+ /* BIT30 */"ODM_COMP_COMMON",
+ /* BIT31 */"ODM_COMP_INIT",
+};
+
+#define RTW_ODM_COMP_MAX 32
+
+static const char *odm_ability_str[] = {
+ /* BIT0 */"ODM_BB_DIG",
+ /* BIT1 */"ODM_BB_RA_MASK",
+ /* BIT2 */"ODM_BB_DYNAMIC_TXPWR",
+ /* BIT3 */"ODM_BB_FA_CNT",
+ /* BIT4 */"ODM_BB_RSSI_MONITOR",
+ /* BIT5 */"ODM_BB_CCK_PD",
+ /* BIT6 */"ODM_BB_ANT_DIV",
+ /* BIT7 */"ODM_BB_PWR_SAVE",
+ /* BIT8 */"ODM_BB_PWR_TRAIN",
+ /* BIT9 */"ODM_BB_RATE_ADAPTIVE",
+ /* BIT10 */"ODM_BB_PATH_DIV",
+ /* BIT11 */"ODM_BB_PSD",
+ /* BIT12 */"ODM_BB_RXHP",
+ /* BIT13 */"ODM_BB_ADAPTIVITY",
+ /* BIT14 */"ODM_BB_DYNAMIC_ATC",
+ /* BIT15 */NULL,
+ /* BIT16 */"ODM_MAC_EDCA_TURBO",
+ /* BIT17 */"ODM_MAC_EARLY_MODE",
+ /* BIT18 */NULL,
+ /* BIT19 */NULL,
+ /* BIT20 */NULL,
+ /* BIT21 */NULL,
+ /* BIT22 */NULL,
+ /* BIT23 */NULL,
+ /* BIT24 */"ODM_RF_TX_PWR_TRACK",
+ /* BIT25 */"ODM_RF_RX_GAIN_TRACK",
+ /* BIT26 */"ODM_RF_CALIBRATION",
+};
+
+#define RTW_ODM_ABILITY_MAX 27
+
+static const char *odm_dbg_level_str[] = {
+ NULL,
+ "ODM_DBG_OFF",
+ "ODM_DBG_SERIOUS",
+ "ODM_DBG_WARNING",
+ "ODM_DBG_LOUD",
+ "ODM_DBG_TRACE",
+};
+
+#define RTW_ODM_DBG_LEVEL_NUM 6
+
+void rtw_odm_dbg_comp_msg(void *sel, struct adapter *adapter)
+{
+ u64 dbg_comp;
+ int i;
+
+ rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &dbg_comp);
+ DBG_871X_SEL_NL(sel, "odm.DebugComponents = 0x%016llx\n", dbg_comp);
+ for (i = 0; i < RTW_ODM_COMP_MAX; i++) {
+ if (odm_comp_str[i])
+ DBG_871X_SEL_NL(sel, "%cBIT%-2d %s\n",
+ (BIT0 << i) & dbg_comp ? '+' : ' ',
+ i, odm_comp_str[i]);
+ }
+}
+
+inline void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps)
+{
+ rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &comps);
+}
+
+void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter)
+{
+ u32 dbg_level;
+ int i;
+
+ rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &dbg_level);
+ DBG_871X_SEL_NL(sel, "odm.DebugLevel = %u\n", dbg_level);
+ for (i = 0; i < RTW_ODM_DBG_LEVEL_NUM; i++) {
+ if (odm_dbg_level_str[i])
+ DBG_871X_SEL_NL(sel, "%u %s\n", i, odm_dbg_level_str[i]);
+ }
+}
+
+inline void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level)
+{
+ rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &level);
+}
+
+void rtw_odm_ability_msg(void *sel, struct adapter *adapter)
+{
+ u32 ability = 0;
+ int i;
+
+ rtw_hal_get_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
+ DBG_871X_SEL_NL(sel, "odm.SupportAbility = 0x%08x\n", ability);
+ for (i = 0; i < RTW_ODM_ABILITY_MAX; i++) {
+ if (odm_ability_str[i])
+ DBG_871X_SEL_NL(sel, "%cBIT%-2d %s\n",
+ (BIT0 << i) & ability ? '+' : ' ', i,
+ odm_ability_str[i]);
+ }
+}
+
+inline void rtw_odm_ability_set(struct adapter *adapter, u32 ability)
+{
+ rtw_hal_set_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
+}
+
+void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &pHalData->odmpriv;
+
+ DBG_871X_SEL_NL(sel, "%10s %16s %8s %10s %11s %14s\n"
+ , "TH_L2H_ini", "TH_EDCCA_HL_diff", "IGI_Base", "ForceEDCCA", "AdapEn_RSSI", "IGI_LowerBound");
+ DBG_871X_SEL_NL(sel, "0x%-8x %-16d 0x%-6x %-10d %-11u %-14u\n"
+ , (u8)odm->TH_L2H_ini
+ , odm->TH_EDCCA_HL_diff
+ , odm->IGI_Base
+ , odm->ForceEDCCA
+ , odm->AdapEn_RSSI
+ , odm->IGI_LowerBound
+ );
+}
+
+void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini, s8 TH_EDCCA_HL_diff,
+ s8 IGI_Base, bool ForceEDCCA, u8 AdapEn_RSSI, u8 IGI_LowerBound)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &pHalData->odmpriv;
+
+ odm->TH_L2H_ini = TH_L2H_ini;
+ odm->TH_EDCCA_HL_diff = TH_EDCCA_HL_diff;
+ odm->IGI_Base = IGI_Base;
+ odm->ForceEDCCA = ForceEDCCA;
+ odm->AdapEn_RSSI = AdapEn_RSSI;
+ odm->IGI_LowerBound = IGI_LowerBound;
+}
+
+void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter)
+{
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &(hal_data->odmpriv);
+
+ DBG_871X_SEL_NL(sel, "RxRate = %s, RSSI_A = %d(%%), RSSI_B = %d(%%)\n",
+ HDATA_RATE(odm->RxRate), odm->RSSI_A, odm->RSSI_B);
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
new file mode 100644
index 000000000000..f708dbf5bfd4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -0,0 +1,1421 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_PWRCTRL_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+#include <linux/jiffies.h>
+
+
+void _ips_enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+
+ pwrpriv->bips_processing = true;
+
+ /* syn ips_mode with request */
+ pwrpriv->ips_mode = pwrpriv->ips_mode_req;
+
+ pwrpriv->ips_enter_cnts++;
+ DBG_871X("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
+
+ if (rf_off == pwrpriv->change_rfpwrstate) {
+ pwrpriv->bpower_saving = true;
+ DBG_871X_LEVEL(_drv_always_, "nolinked power save enter\n");
+
+ if (pwrpriv->ips_mode == IPS_LEVEL_2)
+ pwrpriv->bkeepfwalive = true;
+
+ rtw_ips_pwr_down(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ }
+ pwrpriv->bips_processing = false;
+
+}
+
+void ips_enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+
+
+ rtw_btcoex_IpsNotify(padapter, pwrpriv->ips_mode_req);
+
+ down(&pwrpriv->lock);
+ _ips_enter(padapter);
+ up(&pwrpriv->lock);
+}
+
+int _ips_leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ int result = _SUCCESS;
+
+ if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
+ pwrpriv->bips_processing = true;
+ pwrpriv->change_rfpwrstate = rf_on;
+ pwrpriv->ips_leave_cnts++;
+ DBG_871X("==>ips_leave cnts:%d\n", pwrpriv->ips_leave_cnts);
+
+ result = rtw_ips_pwr_up(padapter);
+ if (result == _SUCCESS) {
+ pwrpriv->rf_pwrstate = rf_on;
+ }
+ DBG_871X_LEVEL(_drv_always_, "nolinked power save leave\n");
+
+ DBG_871X("==> ips_leave.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
+ pwrpriv->bips_processing = false;
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->bpower_saving = false;
+ }
+
+ return result;
+}
+
+int ips_leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ int ret;
+
+ if (!is_primary_adapter(padapter))
+ return _SUCCESS;
+
+ down(&pwrpriv->lock);
+ ret = _ips_leave(padapter);
+ up(&pwrpriv->lock);
+
+ if (_SUCCESS == ret)
+ rtw_btcoex_IpsNotify(padapter, IPS_NONE);
+
+ return ret;
+}
+
+static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
+{
+ struct adapter *buddy = adapter->pbuddy_adapter;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct xmit_priv *pxmit_priv = &adapter->xmitpriv;
+
+ bool ret = false;
+
+ if (adapter_to_pwrctl(adapter)->bpower_saving == true) {
+ /* DBG_871X("%s: already in LPS or IPS mode\n", __func__); */
+ goto exit;
+ }
+
+ if (time_before(jiffies, adapter_to_pwrctl(adapter)->ips_deny_time)) {
+ /* DBG_871X("%s ips_deny_time\n", __func__); */
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)
+ || check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS)
+ || check_fwstate(pmlmepriv, WIFI_AP_STATE)
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)
+ )
+ goto exit;
+
+ /* consider buddy, if exist */
+ if (buddy) {
+ struct mlme_priv *b_pmlmepriv = &(buddy->mlmepriv);
+
+ if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR)
+ || check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS)
+ || check_fwstate(b_pmlmepriv, WIFI_AP_STATE)
+ || check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)
+ )
+ goto exit;
+ }
+
+ if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
+ pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
+ DBG_871X_LEVEL(_drv_always_, "There are some pkts to transmit\n");
+ DBG_871X_LEVEL(_drv_always_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ goto exit;
+ }
+
+ ret = true;
+
+exit:
+ return ret;
+}
+
+
+/*
+ * ATTENTION:
+ *rtw_ps_processor() doesn't handle LPS.
+ */
+void rtw_ps_processor(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ u32 ps_deny = 0;
+
+ down(&adapter_to_pwrctl(padapter)->lock);
+ ps_deny = rtw_ps_deny_get(padapter);
+ up(&adapter_to_pwrctl(padapter)->lock);
+ if (ps_deny != 0) {
+ DBG_871X(FUNC_ADPT_FMT ": ps_deny = 0x%08X, skip power save!\n",
+ FUNC_ADPT_ARG(padapter), ps_deny);
+ goto exit;
+ }
+
+ if (pwrpriv->bInSuspend == true) {/* system suspend or autosuspend */
+ pdbgpriv->dbg_ps_insuspend_cnt++;
+ DBG_871X("%s, pwrpriv->bInSuspend == true ignore this process\n", __func__);
+ return;
+ }
+
+ pwrpriv->ps_processing = true;
+
+ if (pwrpriv->ips_mode_req == IPS_NONE)
+ goto exit;
+
+ if (rtw_pwr_unassociated_idle(padapter) == false)
+ goto exit;
+
+ if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts%4) == 0)) {
+ DBG_871X("==>%s\n", __func__);
+ pwrpriv->change_rfpwrstate = rf_off;
+ {
+ ips_enter(padapter);
+ }
+ }
+exit:
+ pwrpriv->ps_processing = false;
+ return;
+}
+
+void pwr_state_check_handler(RTW_TIMER_HDL_ARGS);
+void pwr_state_check_handler(RTW_TIMER_HDL_ARGS)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+ rtw_ps_cmd(padapter);
+}
+
+void traffic_check_for_leave_lps(struct adapter *padapter, u8 tx, u32 tx_packets)
+{
+ static unsigned long start_time = 0;
+ static u32 xmit_cnt = 0;
+ u8 bLeaveLPS = false;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+
+
+ if (tx) { /* from tx */
+ xmit_cnt += tx_packets;
+
+ if (start_time == 0)
+ start_time = jiffies;
+
+ if (jiffies_to_msecs(jiffies - start_time) > 2000) { /* 2 sec == watch dog timer */
+ if (xmit_cnt > 8) {
+ if ((adapter_to_pwrctl(padapter)->bLeisurePs)
+ && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
+ && (rtw_btcoex_IsBtControlLps(padapter) == false)
+ ) {
+ DBG_871X("leave lps via Tx = %d\n", xmit_cnt);
+ bLeaveLPS = true;
+ }
+ }
+
+ start_time = jiffies;
+ xmit_cnt = 0;
+ }
+
+ } else { /* from rx path */
+ if (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 4/*2*/) {
+ if ((adapter_to_pwrctl(padapter)->bLeisurePs)
+ && (adapter_to_pwrctl(padapter)->pwr_mode != PS_MODE_ACTIVE)
+ && (rtw_btcoex_IsBtControlLps(padapter) == false)
+ ) {
+ DBG_871X("leave lps via Rx = %d\n", pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod);
+ bLeaveLPS = true;
+ }
+ }
+ }
+
+ if (bLeaveLPS)
+ /* DBG_871X("leave lps via %s, Tx = %d, Rx = %d\n", tx?"Tx":"Rx", pmlmepriv->LinkDetectInfo.NumTxOkInPeriod, pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod); */
+ /* rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 1); */
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, tx?0:1);
+}
+
+/*
+ * Description:
+ *This function MUST be called under power lock protect
+ *
+ * Parameters
+ *padapter
+ *pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
+ *
+ */
+void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
+{
+ u8 rpwm;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ u8 cpwm_orig;
+
+ pslv = PS_STATE(pslv);
+
+ if (pwrpriv->brpwmtimeout == true) {
+ DBG_871X("%s: RPWM timeout, force to set RPWM(0x%02X) again!\n", __func__, pslv);
+ } else{
+ if ((pwrpriv->rpwm == pslv)
+ || ((pwrpriv->rpwm >= PS_STATE_S2) && (pslv >= PS_STATE_S2))) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Already set rpwm[0x%02X], new = 0x%02X!\n", __func__, pwrpriv->rpwm, pslv));
+ return;
+ }
+ }
+
+ if ((padapter->bSurpriseRemoved == true) ||
+ (padapter->hw_init_completed == false)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
+ __func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
+
+ pwrpriv->cpwm = PS_STATE_S4;
+
+ return;
+ }
+
+ if (padapter->bDriverStopped == true) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
+
+ if (pslv < PS_STATE_S2) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n", __func__, pslv));
+ return;
+ }
+ }
+
+ rpwm = pslv | pwrpriv->tog;
+ /* only when from PS_STATE S0/S1 to S2 and higher needs ACK */
+ if ((pwrpriv->cpwm < PS_STATE_S2) && (pslv >= PS_STATE_S2))
+ rpwm |= PS_ACK;
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("rtw_set_rpwm: rpwm = 0x%02x cpwm = 0x%02x\n", rpwm, pwrpriv->cpwm));
+
+ pwrpriv->rpwm = pslv;
+
+ cpwm_orig = 0;
+ if (rpwm & PS_ACK)
+ rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_orig);
+
+ if (rpwm & PS_ACK)
+ _set_timer(&pwrpriv->pwr_rpwm_timer, LPS_RPWM_WAIT_MS);
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_RPWM, (u8 *)(&rpwm));
+
+ pwrpriv->tog += 0x80;
+
+ /* No LPS 32K, No Ack */
+ if (rpwm & PS_ACK) {
+ unsigned long start_time;
+ u8 cpwm_now;
+ u8 poll_cnt = 0;
+
+ start_time = jiffies;
+
+ /* polling cpwm */
+ do {
+ mdelay(1);
+ poll_cnt++;
+ rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_now);
+ if ((cpwm_orig ^ cpwm_now) & 0x80) {
+ pwrpriv->cpwm = PS_STATE_S4;
+ pwrpriv->cpwm_tog = cpwm_now & PS_TOGGLE;
+ break;
+ }
+
+ if (jiffies_to_msecs(jiffies - start_time) > LPS_RPWM_WAIT_MS) {
+ DBG_871X("%s: polling cpwm timeout! poll_cnt =%d, cpwm_orig =%02x, cpwm_now =%02x\n", __func__, poll_cnt, cpwm_orig, cpwm_now);
+ _set_timer(&pwrpriv->pwr_rpwm_timer, 1);
+ break;
+ }
+ } while (1);
+ } else
+ pwrpriv->cpwm = pslv;
+}
+
+static u8 PS_RDY_CHECK(struct adapter *padapter)
+{
+ unsigned long curr_time, delta_time;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+ if (true == pwrpriv->bInSuspend && pwrpriv->wowlan_mode)
+ return true;
+ else if (true == pwrpriv->bInSuspend && pwrpriv->wowlan_ap_mode)
+ return true;
+ else if (true == pwrpriv->bInSuspend)
+ return false;
+#else
+ if (true == pwrpriv->bInSuspend)
+ return false;
+#endif
+
+ curr_time = jiffies;
+
+ delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
+
+ if (delta_time < LPS_DELAY_TIME)
+ return false;
+
+ if (check_fwstate(pmlmepriv, WIFI_SITE_MONITOR)
+ || check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS)
+ || check_fwstate(pmlmepriv, WIFI_AP_STATE)
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE)
+ || rtw_is_scan_deny(padapter)
+ )
+ return false;
+
+ if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && (padapter->securitypriv.binstallGrpkey == false)) {
+ DBG_871X("Group handshake still in progress !!!\n");
+ return false;
+ }
+
+ if (!rtw_cfg80211_pwr_mgmt(padapter))
+ return false;
+
+ return true;
+}
+
+void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode, const char *msg)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+ struct debug_priv *pdbgpriv = &padapter->dvobj->drv_dbg;
+#endif
+
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: PowerMode =%d Smart_PS =%d\n",
+ __func__, ps_mode, smart_ps));
+
+ if (ps_mode > PM_Card_Disable) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_, ("ps_mode:%d error\n", ps_mode));
+ return;
+ }
+
+ if (pwrpriv->pwr_mode == ps_mode)
+ if (PS_MODE_ACTIVE == ps_mode)
+ return;
+
+
+ down(&pwrpriv->lock);
+
+ /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
+ if (ps_mode == PS_MODE_ACTIVE) {
+ if (1
+ && (((rtw_btcoex_IsBtControlLps(padapter) == false)
+ )
+ || ((rtw_btcoex_IsBtControlLps(padapter) == true)
+ && (rtw_btcoex_IsLpsOn(padapter) == false))
+ )
+ ) {
+ DBG_871X(FUNC_ADPT_FMT" Leave 802.11 power save - %s\n",
+ FUNC_ADPT_ARG(padapter), msg);
+
+ pwrpriv->pwr_mode = ps_mode;
+ rtw_set_rpwm(padapter, PS_STATE_S4);
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+ if (pwrpriv->wowlan_mode == true ||
+ pwrpriv->wowlan_ap_mode == true) {
+ unsigned long start_time;
+ u32 delay_ms;
+ u8 val8;
+ delay_ms = 20;
+ start_time = jiffies;
+ do {
+ rtw_hal_get_hwreg(padapter, HW_VAR_SYS_CLKR, &val8);
+ if (!(val8 & BIT(4))) { /* 0x08 bit4 = 1 --> in 32k, bit4 = 0 --> leave 32k */
+ pwrpriv->cpwm = PS_STATE_S4;
+ break;
+ }
+ if (jiffies_to_msecs(jiffies - start_time) > delay_ms) {
+ DBG_871X("%s: Wait for FW 32K leave more than %u ms!!!\n",
+ __func__, delay_ms);
+ pdbgpriv->dbg_wow_leave_ps_fail_cnt++;
+ break;
+ }
+ msleep(1);
+ } while (1);
+ }
+#endif
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ pwrpriv->bFwCurrentInPSMode = false;
+
+ rtw_btcoex_LpsNotify(padapter, ps_mode);
+ }
+ } else{
+ if ((PS_RDY_CHECK(padapter) && check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE))
+ || ((rtw_btcoex_IsBtControlLps(padapter) == true)
+ && (rtw_btcoex_IsLpsOn(padapter) == true))
+ ) {
+ u8 pslv;
+
+ DBG_871X(FUNC_ADPT_FMT" Enter 802.11 power save - %s\n",
+ FUNC_ADPT_ARG(padapter), msg);
+
+ rtw_btcoex_LpsNotify(padapter, ps_mode);
+
+ pwrpriv->bFwCurrentInPSMode = true;
+ pwrpriv->pwr_mode = ps_mode;
+ pwrpriv->smart_ps = smart_ps;
+ pwrpriv->bcn_ant_mode = bcn_ant_mode;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+
+ pslv = PS_STATE_S2;
+ if (pwrpriv->alives == 0)
+ pslv = PS_STATE_S0;
+
+ if ((rtw_btcoex_IsBtDisabled(padapter) == false)
+ && (rtw_btcoex_IsBtControlLps(padapter) == true)) {
+ u8 val8;
+
+ val8 = rtw_btcoex_LpsVal(padapter);
+ if (val8 & BIT(4))
+ pslv = PS_STATE_S2;
+ }
+
+ rtw_set_rpwm(padapter, pslv);
+ }
+ }
+
+ up(&pwrpriv->lock);
+}
+
+/*
+ * Return:
+ *0: Leave OK
+ *-1: Timeout
+ *-2: Other error
+ */
+s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+{
+ unsigned long start_time;
+ u8 bAwake = false;
+ s32 err = 0;
+
+
+ start_time = jiffies;
+ while (1) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
+ if (true == bAwake)
+ break;
+
+ if (true == padapter->bSurpriseRemoved) {
+ err = -2;
+ DBG_871X("%s: device surprise removed!!\n", __func__);
+ break;
+ }
+
+ if (jiffies_to_msecs(jiffies - start_time) > delay_ms) {
+ err = -1;
+ DBG_871X("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
+ break;
+ }
+ msleep(1);
+ }
+
+ return err;
+}
+
+/* */
+/* Description: */
+/* Enter the leisure power save mode. */
+/* */
+void LPS_Enter(struct adapter *padapter, const char *msg)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(dvobj);
+ int n_assoc_iface = 0;
+ char buf[32] = {0};
+
+ if (rtw_btcoex_IsBtControlLps(padapter) == true)
+ return;
+
+ /* Skip lps enter request if number of assocated adapters is not 1 */
+ if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
+ n_assoc_iface++;
+ if (n_assoc_iface != 1)
+ return;
+
+ /* Skip lps enter request for adapter not port0 */
+ if (get_iface_type(padapter) != IFACE_PORT0)
+ return;
+
+ if (PS_RDY_CHECK(dvobj->padapters) == false)
+ return;
+
+ if (pwrpriv->bLeisurePs) {
+ /* Idle for a while if we connect to AP a while ago. */
+ if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
+ sprintf(buf, "WIFI-%s", msg);
+ pwrpriv->bpower_saving = true;
+ rtw_set_ps_mode(padapter, pwrpriv->power_mgnt, padapter->registrypriv.smart_ps, 0, buf);
+ }
+ } else
+ pwrpriv->LpsIdleCount++;
+ }
+
+/* DBG_871X("-LeisurePSEnter\n"); */
+}
+
+/* */
+/* Description: */
+/* Leave the leisure power save mode. */
+/* */
+void LPS_Leave(struct adapter *padapter, const char *msg)
+{
+#define LPS_LEAVE_TIMEOUT_MS 100
+
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(dvobj);
+ char buf[32] = {0};
+
+/* DBG_871X("+LeisurePSLeave\n"); */
+
+ if (rtw_btcoex_IsBtControlLps(padapter) == true)
+ return;
+
+ if (pwrpriv->bLeisurePs) {
+ if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
+ sprintf(buf, "WIFI-%s", msg);
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0, buf);
+
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
+ LPS_RF_ON_check(padapter, LPS_LEAVE_TIMEOUT_MS);
+ }
+ }
+
+ pwrpriv->bpower_saving = false;
+/* DBG_871X("-LeisurePSLeave\n"); */
+
+}
+
+void LeaveAllPowerSaveModeDirect(struct adapter *Adapter)
+{
+ struct adapter *pri_padapter = GET_PRIMARY_ADAPTER(Adapter);
+ struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(Adapter);
+
+ DBG_871X("%s.....\n", __func__);
+
+ if (true == Adapter->bSurpriseRemoved) {
+ DBG_871X(FUNC_ADPT_FMT ": bSurpriseRemoved =%d Skip!\n",
+ FUNC_ADPT_ARG(Adapter), Adapter->bSurpriseRemoved);
+ return;
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true)) { /* connect */
+
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
+ DBG_871X("%s: Driver Already Leave LPS\n", __func__);
+ return;
+ }
+
+ down(&pwrpriv->lock);
+
+ rtw_set_rpwm(Adapter, PS_STATE_S4);
+
+ up(&pwrpriv->lock);
+
+ rtw_lps_ctrl_wk_cmd(pri_padapter, LPS_CTRL_LEAVE, 0);
+ } else{
+ if (pwrpriv->rf_pwrstate == rf_off)
+ if (false == ips_leave(pri_padapter))
+ DBG_871X("======> ips_leave fail.............\n");
+ }
+}
+
+/* */
+/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
+/* Move code to function by tynli. 2010.03.26. */
+/* */
+void LeaveAllPowerSaveMode(struct adapter *Adapter)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(Adapter);
+ u8 enqueue = 0;
+ int n_assoc_iface = 0;
+
+ if (!Adapter->bup) {
+ DBG_871X(FUNC_ADPT_FMT ": bup =%d Skip!\n",
+ FUNC_ADPT_ARG(Adapter), Adapter->bup);
+ return;
+ }
+
+ if (Adapter->bSurpriseRemoved) {
+ DBG_871X(FUNC_ADPT_FMT ": bSurpriseRemoved =%d Skip!\n",
+ FUNC_ADPT_ARG(Adapter), Adapter->bSurpriseRemoved);
+ return;
+ }
+
+ if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
+ n_assoc_iface++;
+
+ if (n_assoc_iface) { /* connect */
+ enqueue = 1;
+
+ rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
+
+ LPS_Leave_check(Adapter);
+ } else {
+ if (adapter_to_pwrctl(Adapter)->rf_pwrstate == rf_off) {
+ if (false == ips_leave(Adapter))
+ DBG_871X("======> ips_leave fail.............\n");
+ }
+ }
+}
+
+void LPS_Leave_check(
+ struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv;
+ unsigned long start_time;
+ u8 bReady;
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ bReady = false;
+ start_time = jiffies;
+
+ yield();
+
+ while (1) {
+ down(&pwrpriv->lock);
+
+ if ((padapter->bSurpriseRemoved == true)
+ || (padapter->hw_init_completed == false)
+ || (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
+ )
+ bReady = true;
+
+ up(&pwrpriv->lock);
+
+ if (true == bReady)
+ break;
+
+ if (jiffies_to_msecs(jiffies - start_time) > 100) {
+ DBG_871X("Wait for cpwm event than 100 ms!!!\n");
+ break;
+ }
+ msleep(1);
+ }
+}
+
+/*
+ * Caller:ISR handler...
+ *
+ * This will be called when CPWM interrupt is up.
+ *
+ * using to update cpwn of drv; and drv willl make a decision to up or down pwr level
+ */
+void cpwm_int_hdl(
+ struct adapter *padapter,
+ struct reportpwrstate_parm *preportpwrstate)
+{
+ struct pwrctrl_priv *pwrpriv;
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ down(&pwrpriv->lock);
+
+ if (pwrpriv->rpwm < PS_STATE_S2) {
+ DBG_871X("%s: Redundant CPWM Int. RPWM = 0x%02X CPWM = 0x%02x\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm);
+ up(&pwrpriv->lock);
+ goto exit;
+ }
+
+ pwrpriv->cpwm = PS_STATE(preportpwrstate->state);
+ pwrpriv->cpwm_tog = preportpwrstate->state & PS_TOGGLE;
+
+ if (pwrpriv->cpwm >= PS_STATE_S2) {
+ if (pwrpriv->alives & CMD_ALIVE)
+ up(&padapter->cmdpriv.cmd_queue_sema);
+
+ if (pwrpriv->alives & XMIT_ALIVE)
+ up(&padapter->xmitpriv.xmit_sema);
+ }
+
+ up(&pwrpriv->lock);
+
+exit:
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("cpwm_int_hdl: cpwm = 0x%02x\n", pwrpriv->cpwm));
+}
+
+static void cpwm_event_callback(struct work_struct *work)
+{
+ struct pwrctrl_priv *pwrpriv = container_of(work, struct pwrctrl_priv, cpwm_event);
+ struct dvobj_priv *dvobj = pwrctl_to_dvobj(pwrpriv);
+ struct adapter *adapter = dvobj->if1;
+ struct reportpwrstate_parm report;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ report.state = PS_STATE_S2;
+ cpwm_int_hdl(adapter, &report);
+}
+
+static void rpwmtimeout_workitem_callback(struct work_struct *work)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *dvobj;
+ struct pwrctrl_priv *pwrpriv;
+
+
+ pwrpriv = container_of(work, struct pwrctrl_priv, rpwmtimeoutwi);
+ dvobj = pwrctl_to_dvobj(pwrpriv);
+ padapter = dvobj->if1;
+/* DBG_871X("+%s: rpwm = 0x%02X cpwm = 0x%02X\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm); */
+
+ down(&pwrpriv->lock);
+ if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2)) {
+ DBG_871X("%s: rpwm = 0x%02X cpwm = 0x%02X CPWM done!\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm);
+ goto exit;
+ }
+ up(&pwrpriv->lock);
+
+ if (rtw_read8(padapter, 0x100) != 0xEA) {
+ struct reportpwrstate_parm report;
+
+ report.state = PS_STATE_S2;
+ DBG_871X("\n%s: FW already leave 32K!\n\n", __func__);
+ cpwm_int_hdl(padapter, &report);
+
+ return;
+ }
+
+ down(&pwrpriv->lock);
+
+ if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2)) {
+ DBG_871X("%s: cpwm =%d, nothing to do!\n", __func__, pwrpriv->cpwm);
+ goto exit;
+ }
+ pwrpriv->brpwmtimeout = true;
+ rtw_set_rpwm(padapter, pwrpriv->rpwm);
+ pwrpriv->brpwmtimeout = false;
+
+exit:
+ up(&pwrpriv->lock);
+}
+
+/*
+ * This function is a timer handler, can't do any IO in it.
+ */
+static void pwr_rpwm_timeout_handler(void *FunctionContext)
+{
+ struct adapter *padapter;
+ struct pwrctrl_priv *pwrpriv;
+
+
+ padapter = (struct adapter *)FunctionContext;
+ pwrpriv = adapter_to_pwrctl(padapter);
+ DBG_871X("+%s: rpwm = 0x%02X cpwm = 0x%02X\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm);
+
+ if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2)) {
+ DBG_871X("+%s: cpwm =%d, nothing to do!\n", __func__, pwrpriv->cpwm);
+ return;
+ }
+
+ _set_workitem(&pwrpriv->rpwmtimeoutwi);
+}
+
+static __inline void register_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+{
+ pwrctrl->alives |= tag;
+}
+
+static __inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, u32 tag)
+{
+ pwrctrl->alives &= ~tag;
+}
+
+
+/*
+ * Description:
+ *Check if the fw_pwrstate is okay for I/O.
+ *If not (cpwm is less than S2), then the sub-routine
+ *will raise the cpwm to be greater than or equal to S2.
+ *
+ *Calling Context: Passive
+ *
+ *Constraint:
+ * 1. this function will request pwrctrl->lock
+ *
+ * Return Value:
+ *_SUCCESS hardware is ready for I/O
+ *_FAIL can't I/O right now
+ */
+s32 rtw_register_task_alive(struct adapter *padapter, u32 task)
+{
+ s32 res;
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ res = _SUCCESS;
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S2;
+
+ down(&pwrctrl->lock);
+
+ register_task_alive(pwrctrl, task);
+
+ if (pwrctrl->bFwCurrentInPSMode == true) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: task = 0x%x cpwm = 0x%02x alives = 0x%08x\n",
+ __func__, task, pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm < pslv) {
+ if (pwrctrl->cpwm < PS_STATE_S2)
+ res = _FAIL;
+ if (pwrctrl->rpwm < pslv)
+ rtw_set_rpwm(padapter, pslv);
+ }
+ }
+
+ up(&pwrctrl->lock);
+
+ if (_FAIL == res)
+ if (pwrctrl->cpwm >= PS_STATE_S2)
+ res = _SUCCESS;
+
+ return res;
+}
+
+/*
+ * Description:
+ *If task is done, call this func. to power down firmware again.
+ *
+ *Constraint:
+ * 1. this function will request pwrctrl->lock
+ *
+ * Return Value:
+ *none
+ */
+void rtw_unregister_task_alive(struct adapter *padapter, u32 task)
+{
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S0;
+
+ if ((rtw_btcoex_IsBtDisabled(padapter) == false)
+ && (rtw_btcoex_IsBtControlLps(padapter) == true)) {
+ u8 val8;
+
+ val8 = rtw_btcoex_LpsVal(padapter);
+ if (val8 & BIT(4))
+ pslv = PS_STATE_S2;
+ }
+
+ down(&pwrctrl->lock);
+
+ unregister_task_alive(pwrctrl, task);
+
+ if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
+ && (pwrctrl->bFwCurrentInPSMode == true)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: cpwm = 0x%02x alives = 0x%08x\n",
+ __func__, pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm > pslv)
+ if ((pslv >= PS_STATE_S2) || (pwrctrl->alives == 0))
+ rtw_set_rpwm(padapter, pslv);
+
+ }
+
+ up(&pwrctrl->lock);
+}
+
+/*
+ * Caller: rtw_xmit_thread
+ *
+ * Check if the fw_pwrstate is okay for xmit.
+ * If not (cpwm is less than S3), then the sub-routine
+ * will raise the cpwm to be greater than or equal to S3.
+ *
+ * Calling Context: Passive
+ *
+ * Return Value:
+ * _SUCCESS rtw_xmit_thread can write fifo/txcmd afterwards.
+ * _FAIL rtw_xmit_thread can not do anything.
+ */
+s32 rtw_register_tx_alive(struct adapter *padapter)
+{
+ s32 res;
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ res = _SUCCESS;
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S2;
+
+ down(&pwrctrl->lock);
+
+ register_task_alive(pwrctrl, XMIT_ALIVE);
+
+ if (pwrctrl->bFwCurrentInPSMode == true) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("rtw_register_tx_alive: cpwm = 0x%02x alives = 0x%08x\n",
+ pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm < pslv) {
+ if (pwrctrl->cpwm < PS_STATE_S2)
+ res = _FAIL;
+ if (pwrctrl->rpwm < pslv)
+ rtw_set_rpwm(padapter, pslv);
+ }
+ }
+
+ up(&pwrctrl->lock);
+
+ if (_FAIL == res)
+ if (pwrctrl->cpwm >= PS_STATE_S2)
+ res = _SUCCESS;
+
+ return res;
+}
+
+/*
+ * Caller: rtw_cmd_thread
+ *
+ * Check if the fw_pwrstate is okay for issuing cmd.
+ * If not (cpwm should be is less than S2), then the sub-routine
+ * will raise the cpwm to be greater than or equal to S2.
+ *
+ * Calling Context: Passive
+ *
+ * Return Value:
+ *_SUCCESS rtw_cmd_thread can issue cmds to firmware afterwards.
+ *_FAIL rtw_cmd_thread can not do anything.
+ */
+s32 rtw_register_cmd_alive(struct adapter *padapter)
+{
+ s32 res;
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ res = _SUCCESS;
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S2;
+
+ down(&pwrctrl->lock);
+
+ register_task_alive(pwrctrl, CMD_ALIVE);
+
+ if (pwrctrl->bFwCurrentInPSMode == true) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_info_,
+ ("rtw_register_cmd_alive: cpwm = 0x%02x alives = 0x%08x\n",
+ pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm < pslv) {
+ if (pwrctrl->cpwm < PS_STATE_S2)
+ res = _FAIL;
+ if (pwrctrl->rpwm < pslv)
+ rtw_set_rpwm(padapter, pslv);
+ }
+ }
+
+ up(&pwrctrl->lock);
+
+ if (_FAIL == res)
+ if (pwrctrl->cpwm >= PS_STATE_S2)
+ res = _SUCCESS;
+
+ return res;
+}
+
+/*
+ * Caller: ISR
+ *
+ * If ISR's txdone,
+ * No more pkts for TX,
+ * Then driver shall call this fun. to power down firmware again.
+ */
+void rtw_unregister_tx_alive(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S0;
+
+ if ((rtw_btcoex_IsBtDisabled(padapter) == false)
+ && (rtw_btcoex_IsBtControlLps(padapter) == true)) {
+ u8 val8;
+
+ val8 = rtw_btcoex_LpsVal(padapter);
+ if (val8 & BIT(4))
+ pslv = PS_STATE_S2;
+ }
+
+ down(&pwrctrl->lock);
+
+ unregister_task_alive(pwrctrl, XMIT_ALIVE);
+
+ if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
+ && (pwrctrl->bFwCurrentInPSMode == true)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: cpwm = 0x%02x alives = 0x%08x\n",
+ __func__, pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm > pslv)
+ if ((pslv >= PS_STATE_S2) || (pwrctrl->alives == 0))
+ rtw_set_rpwm(padapter, pslv);
+ }
+
+ up(&pwrctrl->lock);
+}
+
+/*
+ * Caller: ISR
+ *
+ * If all commands have been done,
+ * and no more command to do,
+ * then driver shall call this fun. to power down firmware again.
+ */
+void rtw_unregister_cmd_alive(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrl;
+ u8 pslv;
+
+ pwrctrl = adapter_to_pwrctl(padapter);
+ pslv = PS_STATE_S0;
+
+ if ((rtw_btcoex_IsBtDisabled(padapter) == false)
+ && (rtw_btcoex_IsBtControlLps(padapter) == true)) {
+ u8 val8;
+
+ val8 = rtw_btcoex_LpsVal(padapter);
+ if (val8 & BIT(4))
+ pslv = PS_STATE_S2;
+ }
+
+ down(&pwrctrl->lock);
+
+ unregister_task_alive(pwrctrl, CMD_ALIVE);
+
+ if ((pwrctrl->pwr_mode != PS_MODE_ACTIVE)
+ && (pwrctrl->bFwCurrentInPSMode == true)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_info_,
+ ("%s: cpwm = 0x%02x alives = 0x%08x\n",
+ __func__, pwrctrl->cpwm, pwrctrl->alives));
+
+ if (pwrctrl->cpwm > pslv) {
+ if ((pslv >= PS_STATE_S2) || (pwrctrl->alives == 0))
+ rtw_set_rpwm(padapter, pslv);
+ }
+ }
+
+ up(&pwrctrl->lock);
+}
+
+void rtw_init_pwrctrl_priv(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ sema_init(&pwrctrlpriv->lock, 1);
+ sema_init(&pwrctrlpriv->check_32k_lock, 1);
+ pwrctrlpriv->rf_pwrstate = rf_on;
+ pwrctrlpriv->ips_enter_cnts = 0;
+ pwrctrlpriv->ips_leave_cnts = 0;
+ pwrctrlpriv->bips_processing = false;
+
+ pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
+ pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
+
+ pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+ pwrctrlpriv->bInternalAutoSuspend = false;
+ pwrctrlpriv->bInSuspend = false;
+ pwrctrlpriv->bkeepfwalive = false;
+
+ pwrctrlpriv->LpsIdleCount = 0;
+ pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+
+ pwrctrlpriv->bFwCurrentInPSMode = false;
+
+ pwrctrlpriv->rpwm = 0;
+ pwrctrlpriv->cpwm = PS_STATE_S4;
+
+ pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
+ pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
+ pwrctrlpriv->bcn_ant_mode = 0;
+ pwrctrlpriv->dtim = 0;
+
+ pwrctrlpriv->tog = 0x80;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_RPWM, (u8 *)(&pwrctrlpriv->rpwm));
+
+ _init_workitem(&pwrctrlpriv->cpwm_event, cpwm_event_callback, NULL);
+
+ pwrctrlpriv->brpwmtimeout = false;
+ _init_workitem(&pwrctrlpriv->rpwmtimeoutwi, rpwmtimeout_workitem_callback, NULL);
+ _init_timer(&pwrctrlpriv->pwr_rpwm_timer, padapter->pnetdev, pwr_rpwm_timeout_handler, padapter);
+
+ rtw_init_timer(&pwrctrlpriv->pwr_state_check_timer, padapter, pwr_state_check_handler);
+
+ pwrctrlpriv->wowlan_mode = false;
+ pwrctrlpriv->wowlan_ap_mode = false;
+
+#ifdef CONFIG_PNO_SUPPORT
+ pwrctrlpriv->pno_inited = false;
+ pwrctrlpriv->pnlo_info = NULL;
+ pwrctrlpriv->pscan_info = NULL;
+ pwrctrlpriv->pno_ssid_list = NULL;
+ pwrctrlpriv->pno_in_resume = true;
+#endif
+}
+
+
+void rtw_free_pwrctrl_priv(struct adapter *adapter)
+{
+ /* memset((unsigned char *)pwrctrlpriv, 0, sizeof(struct pwrctrl_priv)); */
+
+#ifdef CONFIG_PNO_SUPPORT
+ if (pwrctrlpriv->pnlo_info != NULL)
+ printk("****** pnlo_info memory leak********\n");
+
+ if (pwrctrlpriv->pscan_info != NULL)
+ printk("****** pscan_info memory leak********\n");
+
+ if (pwrctrlpriv->pno_ssid_list != NULL)
+ printk("****** pno_ssid_list memory leak********\n");
+#endif
+}
+
+inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
+}
+
+/*
+* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
+* @adapter: pointer to struct adapter structure
+* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* Return _SUCCESS or _FAIL
+*/
+
+int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(dvobj);
+ struct mlme_priv *pmlmepriv;
+ int ret = _SUCCESS;
+ unsigned long start = jiffies;
+ unsigned long deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
+
+ /* for LPS */
+ LeaveAllPowerSaveMode(padapter);
+
+ /* IPS still bound with primary adapter */
+ padapter = GET_PRIMARY_ADAPTER(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+
+ if (time_before(pwrpriv->ips_deny_time, deny_time))
+ pwrpriv->ips_deny_time = deny_time;
+
+
+ if (pwrpriv->ps_processing) {
+ DBG_871X("%s wait ps_processing...\n", __func__);
+ while (pwrpriv->ps_processing && jiffies_to_msecs(jiffies - start) <= 3000)
+ msleep(10);
+ if (pwrpriv->ps_processing)
+ DBG_871X("%s wait ps_processing timeout\n", __func__);
+ else
+ DBG_871X("%s wait ps_processing done\n", __func__);
+ }
+
+ if (pwrpriv->bInternalAutoSuspend == false && pwrpriv->bInSuspend) {
+ DBG_871X("%s wait bInSuspend...\n", __func__);
+ while (pwrpriv->bInSuspend
+ && jiffies_to_msecs(jiffies - start) <= 3000
+ ) {
+ msleep(10);
+ }
+ if (pwrpriv->bInSuspend)
+ DBG_871X("%s wait bInSuspend timeout\n", __func__);
+ else
+ DBG_871X("%s wait bInSuspend done\n", __func__);
+ }
+
+ /* System suspend is not allowed to wakeup */
+ if ((pwrpriv->bInternalAutoSuspend == false) && (true == pwrpriv->bInSuspend)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* block??? */
+ if ((pwrpriv->bInternalAutoSuspend == true) && (padapter->net_closed == true)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* I think this should be check in IPS, LPS, autosuspend functions... */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (rf_off == pwrpriv->rf_pwrstate) {
+ {
+ DBG_8192C("%s call ips_leave....\n", __func__);
+ if (_FAIL == ips_leave(padapter)) {
+ DBG_8192C("======> ips_leave fail.............\n");
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+ }
+
+ /* TODO: the following checking need to be merged... */
+ if (padapter->bDriverStopped
+ || !padapter->bup
+ || !padapter->hw_init_completed
+ ) {
+ DBG_8192C("%s: bDriverStopped =%d, bup =%d, hw_init_completed =%u\n"
+ , caller
+ , padapter->bDriverStopped
+ , padapter->bup
+ , padapter->hw_init_completed);
+ ret = false;
+ goto exit;
+ }
+
+exit:
+ deny_time = jiffies + msecs_to_jiffies(ips_deffer_ms);
+ if (time_before(pwrpriv->ips_deny_time, deny_time))
+ pwrpriv->ips_deny_time = deny_time;
+ return ret;
+
+}
+
+int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
+{
+ int ret = 0;
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ if (mode < PS_MODE_NUM) {
+ if (pwrctrlpriv->power_mgnt != mode) {
+ if (PS_MODE_ACTIVE == mode)
+ LeaveAllPowerSaveMode(padapter);
+ else
+ pwrctrlpriv->LpsIdleCount = 2;
+
+ pwrctrlpriv->power_mgnt = mode;
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ }
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
+{
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_871X("%s %s\n", __func__, mode == IPS_NORMAL?"IPS_NORMAL":"IPS_LEVEL_2");
+ return 0;
+ } else if (mode == IPS_NONE) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_871X("%s %s\n", __func__, "IPS_NONE");
+ if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ return -EFAULT;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ATTENTION:
+ *This function will request pwrctrl LOCK!
+ */
+void rtw_ps_deny(struct adapter *padapter, enum PS_DENY_REASON reason)
+{
+ struct pwrctrl_priv *pwrpriv;
+
+/* DBG_871X("+" FUNC_ADPT_FMT ": Request PS deny for %d (0x%08X)\n", */
+/* FUNC_ADPT_ARG(padapter), reason, BIT(reason)); */
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ down(&pwrpriv->lock);
+ if (pwrpriv->ps_deny & BIT(reason)) {
+ DBG_871X(FUNC_ADPT_FMT ": [WARNING] Reason %d had been set before!!\n",
+ FUNC_ADPT_ARG(padapter), reason);
+ }
+ pwrpriv->ps_deny |= BIT(reason);
+ up(&pwrpriv->lock);
+
+/* DBG_871X("-" FUNC_ADPT_FMT ": Now PS deny for 0x%08X\n", */
+/* FUNC_ADPT_ARG(padapter), pwrpriv->ps_deny); */
+}
+
+/*
+ * ATTENTION:
+ *This function will request pwrctrl LOCK!
+ */
+void rtw_ps_deny_cancel(struct adapter *padapter, enum PS_DENY_REASON reason)
+{
+ struct pwrctrl_priv *pwrpriv;
+
+
+/* DBG_871X("+" FUNC_ADPT_FMT ": Cancel PS deny for %d(0x%08X)\n", */
+/* FUNC_ADPT_ARG(padapter), reason, BIT(reason)); */
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ down(&pwrpriv->lock);
+ if ((pwrpriv->ps_deny & BIT(reason)) == 0) {
+ DBG_871X(FUNC_ADPT_FMT ": [ERROR] Reason %d had been canceled before!!\n",
+ FUNC_ADPT_ARG(padapter), reason);
+ }
+ pwrpriv->ps_deny &= ~BIT(reason);
+ up(&pwrpriv->lock);
+
+/* DBG_871X("-" FUNC_ADPT_FMT ": Now PS deny for 0x%08X\n", */
+/* FUNC_ADPT_ARG(padapter), pwrpriv->ps_deny); */
+}
+
+/*
+ * ATTENTION:
+ *Before calling this function pwrctrl lock should be occupied already,
+ *otherwise it may return incorrect value.
+ */
+u32 rtw_ps_deny_get(struct adapter *padapter)
+{
+ u32 deny;
+
+
+ deny = adapter_to_pwrctl(padapter)->ps_deny;
+
+ return deny;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
new file mode 100644
index 000000000000..695a5c958c80
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -0,0 +1,2689 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_RECV_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+#include <rtw_recv.h>
+
+static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
+static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
+
+u8 rtw_rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+u8 rtw_bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
+
+void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
+{
+ memset((u8 *)psta_recvpriv, 0, sizeof(struct sta_recv_priv));
+
+ spin_lock_init(&psta_recvpriv->lock);
+
+ /* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
+ /* _rtw_init_queue(&psta_recvpriv->blk_strms[i]); */
+
+ _rtw_init_queue(&psta_recvpriv->defrag_q);
+}
+
+sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
+{
+ sint i;
+ union recv_frame *precvframe;
+ sint res = _SUCCESS;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+ /* memset((unsigned char *)precvpriv, 0, sizeof (struct recv_priv)); */
+
+ spin_lock_init(&precvpriv->lock);
+
+ _rtw_init_queue(&precvpriv->free_recv_queue);
+ _rtw_init_queue(&precvpriv->recv_pending_queue);
+ _rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
+
+ precvpriv->adapter = padapter;
+
+ precvpriv->free_recvframe_cnt = NR_RECVFRAME;
+
+ precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+
+ if (precvpriv->pallocated_frame_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ /* memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); */
+
+ precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
+ /* precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - */
+ /* ((SIZE_PTR) (precvpriv->pallocated_frame_buf) &(RXFRAME_ALIGN_SZ-1)); */
+
+ precvframe = (union recv_frame *) precvpriv->precv_frame_buf;
+
+
+ for (i = 0; i < NR_RECVFRAME; i++) {
+ INIT_LIST_HEAD(&(precvframe->u.list));
+
+ list_add_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
+
+ res = rtw_os_recv_resource_alloc(padapter, precvframe);
+
+ precvframe->u.hdr.len = 0;
+
+ precvframe->u.hdr.adapter = padapter;
+ precvframe++;
+
+ }
+
+ res = rtw_hal_init_recv_priv(padapter);
+
+ rtw_init_timer(&precvpriv->signal_stat_timer, padapter, rtw_signal_stat_timer_hdl);
+
+ precvpriv->signal_stat_sampling_interval = 2000; /* ms */
+
+ rtw_set_signal_stat_timer(precvpriv);
+
+exit:
+ return res;
+}
+
+void _rtw_free_recv_priv(struct recv_priv *precvpriv)
+{
+ struct adapter *padapter = precvpriv->adapter;
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ rtw_os_recv_resource_free(precvpriv);
+
+ if (precvpriv->pallocated_frame_buf)
+ vfree(precvpriv->pallocated_frame_buf);
+
+ rtw_hal_free_recv_priv(padapter);
+}
+
+union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
+{
+
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+
+ if (list_empty(&pfree_recv_queue->queue))
+ precvframe = NULL;
+ else{
+ phead = get_list_head(pfree_recv_queue);
+
+ plist = get_next(phead);
+
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ list_del_init(&precvframe->u.hdr.list);
+ padapter = precvframe->u.hdr.adapter;
+ if (padapter != NULL) {
+ precvpriv = &padapter->recvpriv;
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt--;
+ }
+ }
+ return precvframe;
+}
+
+union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+
+ spin_lock_bh(&pfree_recv_queue->lock);
+
+ precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
+
+ spin_unlock_bh(&pfree_recv_queue->lock);
+
+ return precvframe;
+}
+
+int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ rtw_os_free_recvframe(precvframe);
+
+
+ spin_lock_bh(&pfree_recv_queue->lock);
+
+ list_del_init(&(precvframe->u.hdr.list));
+
+ precvframe->u.hdr.len = 0;
+
+ list_add_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
+
+ if (padapter != NULL) {
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+ spin_unlock_bh(&pfree_recv_queue->lock);
+ return _SUCCESS;
+}
+
+
+
+
+sint _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ /* INIT_LIST_HEAD(&(precvframe->u.hdr.list)); */
+ list_del_init(&(precvframe->u.hdr.list));
+
+
+ list_add_tail(&(precvframe->u.hdr.list), get_list_head(queue));
+
+ if (padapter != NULL)
+ if (queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+
+ return _SUCCESS;
+}
+
+sint rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ sint ret;
+
+ /* _spinlock(&pfree_recv_queue->lock); */
+ spin_lock_bh(&queue->lock);
+ ret = _rtw_enqueue_recvframe(precvframe, queue);
+ /* spin_unlock(&pfree_recv_queue->lock); */
+ spin_unlock_bh(&queue->lock);
+
+ return ret;
+}
+
+/*
+sint rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ return rtw_free_recvframe(precvframe, queue);
+}
+*/
+
+
+
+
+/*
+caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+
+using spinlock to protect
+
+*/
+
+void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+
+ spin_lock(&pframequeue->lock);
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ }
+
+ spin_unlock(&pframequeue->lock);
+}
+
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
+{
+ u32 cnt = 0;
+ union recv_frame *pending_frame;
+ while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
+ rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
+ cnt++;
+ }
+
+ if (cnt)
+ DBG_871X(FUNC_ADPT_FMT" dequeue %d\n", FUNC_ADPT_ARG(adapter), cnt);
+
+ return cnt;
+}
+
+
+sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&precvbuf->list);
+ list_add(&precvbuf->list, get_list_head(queue));
+
+ spin_unlock_bh(&queue->lock);
+
+ return _SUCCESS;
+}
+
+sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&precvbuf->list);
+
+ list_add_tail(&precvbuf->list, get_list_head(queue));
+ spin_unlock_bh(&queue->lock);
+ return _SUCCESS;
+
+}
+
+struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue)
+{
+ struct recv_buf *precvbuf;
+ struct list_head *plist, *phead;
+
+ spin_lock_bh(&queue->lock);
+
+ if (list_empty(&queue->queue))
+ precvbuf = NULL;
+ else{
+ phead = get_list_head(queue);
+
+ plist = get_next(phead);
+
+ precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
+
+ list_del_init(&precvbuf->list);
+
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ return precvbuf;
+
+}
+
+sint recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe);
+sint recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
+{
+
+ sint i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload, *pframemic;
+ u8 *mickey;
+ /* u8 *iv, rxdata_key_idx = 0; */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
+
+ if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:prxattrib->encrypt == _TKIP_\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:da = 0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ /* mickey =&psecuritypriv->dot118021XGrprxmickey.skey[0]; */
+ /* iv = precvframe->u.hdr.rx_data+prxattrib->hdrlen; */
+ /* rxdata_key_idx =(((iv[3])>>6)&0x3) ; */
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
+ /* DBG_871X("\n recvframe_chkmic: bcmc key psecuritypriv->dot118021XGrpKeyid(%d), pmlmeinfo->key_index(%d) , recv key_id(%d)\n", */
+ /* psecuritypriv->dot118021XGrpKeyid, pmlmeinfo->key_index, rxdata_key_idx); */
+
+ if (psecuritypriv->binstallGrpkey == false) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
+ DBG_871X("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
+ goto exit;
+ }
+ } else {
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
+ }
+
+ datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
+ pframe = precvframe->u.hdr.rx_data;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len =%d prxattrib->icv_len =%d\n", prxattrib->iv_len, prxattrib->icv_len));
+
+
+ rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); /* care the length of the data */
+
+ pframemic = payload+datalen;
+
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic+i)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic:miccode[%d](%02x) != *(pframemic+%d)(%02x) ", i, miccode[i], i, *(pframemic+i)));
+ bmic_err = true;
+ }
+ }
+
+
+ if (bmic_err == true) {
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n *(pframemic-8)-*(pframemic-1) = 0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-8), *(pframemic-7), *(pframemic-6), *(pframemic-5), *(pframemic-4), *(pframemic-3), *(pframemic-2), *(pframemic-1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n *(pframemic-16)-*(pframemic-9) = 0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-16), *(pframemic-15), *(pframemic-14), *(pframemic-13), *(pframemic-12), *(pframemic-11), *(pframemic-10), *(pframemic-9)));
+
+ {
+ uint i;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet (len =%d) ======\n", precvframe->u.hdr.len));
+ for (i = 0; i < precvframe->u.hdr.len; i = i+8) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->u.hdr.rx_data+i), *(precvframe->u.hdr.rx_data+i+1),
+ *(precvframe->u.hdr.rx_data+i+2), *(precvframe->u.hdr.rx_data+i+3),
+ *(precvframe->u.hdr.rx_data+i+4), *(precvframe->u.hdr.rx_data+i+5),
+ *(precvframe->u.hdr.rx_data+i+6), *(precvframe->u.hdr.rx_data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet end [len =%d]======\n", precvframe->u.hdr.len));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n hrdlen =%d,\n", prxattrib->hdrlen));
+ }
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("ra = 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey =%d ",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing issue , */
+ /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+ if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted == true) && (brpt_micerror == true)) {
+ rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted =%d ", prxattrib->bdecrypted));
+ DBG_871X(" mic error :prxattrib->bdecrypted =%d\n", prxattrib->bdecrypted);
+ } else{
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted =%d ", prxattrib->bdecrypted));
+ DBG_871X(" mic error :prxattrib->bdecrypted =%d\n", prxattrib->bdecrypted);
+ }
+
+ res = _FAIL;
+
+ } else {
+ /* mic checked ok */
+ if ((psecuritypriv->bcheck_grpkey == false) && (IS_MCAST(prxattrib->ra) == true)) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey =true"));
+ }
+ }
+
+ } else
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic: rtw_get_stainfo == NULL!!!\n"));
+
+ recvframe_pull_tail(precvframe, 8);
+
+ }
+
+exit:
+ return res;
+
+}
+
+/* decrypt and set the ivlen, icvlen of the recv_frame */
+union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame);
+union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
+{
+
+ struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ union recv_frame *return_packet = precv_frame;
+ u32 res = _SUCCESS;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted =%x prxattrib->encrypt = 0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
+
+ if (prxattrib->encrypt > 0) {
+ u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+ prxattrib->key_index = (((iv[3])>>6)&0x3);
+
+ if (prxattrib->key_index > WEP_KEYS) {
+ DBG_871X("prxattrib->key_index(%d) > WEP_KEYS\n", prxattrib->key_index);
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case _TKIP_:
+ case _AES_:
+ default:
+ prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
+ break;
+ }
+ }
+ }
+
+ if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt == true))) {
+ psecuritypriv->hw_decrypted = false;
+
+ #ifdef DBG_RX_DECRYPTOR
+ DBG_871X("[%s] %d:prxstat->bdecrypted:%d, prxattrib->encrypt:%d, Setting psecuritypriv->hw_decrypted = %d\n",
+ __func__,
+ __LINE__,
+ prxattrib->bdecrypted,
+ prxattrib->encrypt,
+ psecuritypriv->hw_decrypted);
+ #endif
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_wep);
+ rtw_wep_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _TKIP_:
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_tkip);
+ res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _AES_:
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_aes);
+ res = rtw_aes_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ default:
+ break;
+ }
+ } else if (prxattrib->bdecrypted == 1
+ && prxattrib->encrypt > 0
+ && (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)
+ ) {
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_hw);
+
+ psecuritypriv->hw_decrypted = true;
+ #ifdef DBG_RX_DECRYPTOR
+ DBG_871X("[%s] %d:prxstat->bdecrypted:%d, prxattrib->encrypt:%d, Setting psecuritypriv->hw_decrypted = %d\n",
+ __func__,
+ __LINE__,
+ prxattrib->bdecrypted,
+ prxattrib->encrypt,
+ psecuritypriv->hw_decrypted);
+
+ #endif
+ } else {
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_unknown);
+ #ifdef DBG_RX_DECRYPTOR
+ DBG_871X("[%s] %d:prxstat->bdecrypted:%d, prxattrib->encrypt:%d, Setting psecuritypriv->hw_decrypted = %d\n",
+ __func__,
+ __LINE__,
+ prxattrib->bdecrypted,
+ prxattrib->encrypt,
+ psecuritypriv->hw_decrypted);
+ #endif
+ }
+
+ if (res == _FAIL) {
+ rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
+ return_packet = NULL;
+ } else
+ prxattrib->bdecrypted = true;
+
+ return return_packet;
+}
+
+/* set the security information in the recv_frame */
+union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame);
+union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ u8 *psta_addr = NULL;
+ u8 *ptr;
+ uint auth_alg;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ union recv_frame *prtnframe;
+ u16 ether_type = 0;
+ u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
+ struct rx_pkt_attrib *pattrib;
+
+ pstapriv = &adapter->stapriv;
+
+ auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
+
+ ptr = get_recvframe_data(precv_frame);
+ pfhdr = &precv_frame->u.hdr;
+ pattrib = &pfhdr->attrib;
+ psta_addr = pattrib->ta;
+
+ prtnframe = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm =%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+
+ if (auth_alg == 2) {
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ __be16 be_tmp;
+
+ /* blocked */
+ /* only accept EAPOL frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked == 1\n"));
+
+ prtnframe = precv_frame;
+
+ /* get ether_type */
+ ptr = ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE;
+ memcpy(&be_tmp, ptr, 2);
+ ether_type = ntohs(be_tmp);
+
+ if (ether_type == eapol_type)
+ prtnframe = precv_frame;
+ else {
+ /* free this frame */
+ rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
+ prtnframe = NULL;
+ }
+ } else{
+ /* allowed */
+ /* check decryption status, and decrypt the frame if needed */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked == 0\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:precv_frame->hdr.attrib.privacy =%x\n", precv_frame->u.hdr.attrib.privacy));
+
+ if (pattrib->bdecrypted == 0)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:prxstat->decrypted =%x\n", pattrib->bdecrypted));
+
+ prtnframe = precv_frame;
+ /* check is the EAPOL frame or not (Rekey) */
+ /* if (ether_type == eapol_type) { */
+ /* RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("########portctrl:ether_type == 0x888e\n")); */
+ /* check Rekey */
+
+ /* prtnframe =precv_frame; */
+ /* */
+ /* else { */
+ /* RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:ether_type = 0x%04x\n", ether_type)); */
+ /* */
+ }
+ } else
+ prtnframe = precv_frame;
+
+ return prtnframe;
+}
+
+sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache);
+sint recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
+{
+ sint tid = precv_frame->u.hdr.attrib.priority;
+
+ u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (precv_frame->u.hdr.attrib.frag_num & 0xf);
+
+ if (tid > 15) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl = 0x%x, tid = 0x%x\n", seq_ctrl, tid));
+
+ return _FAIL;
+ }
+
+ if (1) { /* if (bretry) */
+ if (seq_ctrl == prxcache->tid_rxseq[tid]) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, seq_ctrl = 0x%x, tid = 0x%x, tid_rxseq = 0x%x\n", seq_ctrl, tid, prxcache->tid_rxseq[tid]));
+
+ return _FAIL;
+ }
+ }
+
+ prxcache->tid_rxseq[tid] = seq_ctrl;
+
+ return _SUCCESS;
+
+}
+
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame);
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned char pwrbit;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ pwrbit = GetPwrMgt(ptr);
+
+ if (psta) {
+ if (pwrbit) {
+ if (!(psta->state & WIFI_SLEEP_STATE)) {
+ /* psta->state |= WIFI_SLEEP_STATE; */
+ /* pstapriv->sta_dz_bitmap |= BIT(psta->aid); */
+
+ stop_sta_xmit(padapter, psta);
+
+ /* DBG_871X("to sleep, sta_dz_bitmap =%x\n", pstapriv->sta_dz_bitmap); */
+ }
+ } else{
+ if (psta->state & WIFI_SLEEP_STATE) {
+ /* psta->state ^= WIFI_SLEEP_STATE; */
+ /* pstapriv->sta_dz_bitmap &= ~BIT(psta->aid); */
+
+ wakeup_sta_to_xmit(padapter, psta);
+
+ /* DBG_871X("to wakeup, sta_dz_bitmap =%x\n", pstapriv->sta_dz_bitmap); */
+ }
+ }
+
+ }
+}
+
+void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame);
+void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ if (!psta)
+ return;
+
+ if (!psta->qos_option)
+ return;
+
+ if (!(psta->qos_info&0xf))
+ return;
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (wmmps_ac) {
+ if (psta->sleepq_ac_len > 0)
+ /* process received triggered frame */
+ xmit_delivery_enabled_frames(padapter, psta);
+ else
+ /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
+ issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
+ }
+ }
+}
+
+void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta);
+void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
+{
+ int sz;
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ sz = get_recvframe_len(prframe);
+ precvpriv->rx_bytes += sz;
+
+ padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
+
+ if ((!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst))) {
+ padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
+ }
+
+ if (sta)
+ psta = sta;
+ else
+ psta = prframe->u.hdr.psta;
+
+ if (psta) {
+ pstats = &psta->sta_stats;
+
+ pstats->rx_data_pkts++;
+ pstats->rx_bytes += sz;
+ }
+
+ traffic_check_for_leave_lps(padapter, false, 0);
+}
+
+sint sta2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta
+);
+sint sta2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta
+)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ sint ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ u8 *sta_addr = NULL;
+ sint bmcast = IS_MCAST(pattrib->dst);
+
+ /* DBG_871X("[%s] %d, seqnum:%d\n", __func__, __LINE__, pattrib->seq_num); */
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA ==myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ /* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
+ if (memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid != TA under STATION_MODE; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->bssid;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (bmcast) {
+ /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
+ if (!IS_MCAST(pattrib->bssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+ } else{ /* not mc-frame */
+ /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
+ if (memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ }
+
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ sta_addr = mybssid;
+ } else
+ ret = _FAIL;
+
+
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under sta2sta_data_frame ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+sint ap2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta);
+sint ap2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ sint ret = _SUCCESS;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ sint bmcast = IS_MCAST(pattrib->dst);
+
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ && (check_fwstate(pmlmepriv, _FW_LINKED) == true
+ || check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
+ ) {
+
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA ==myself\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s SA ="MAC_FMT", myhwaddr ="MAC_FMT"\n",
+ __func__, MAC_ARG(pattrib->src), MAC_ARG(myhwaddr));
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* da should be for me */
+ if ((memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare DA fail; DA ="MAC_FMT"\n", MAC_ARG(pattrib->dst)));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s DA ="MAC_FMT"\n", __func__, MAC_ARG(pattrib->dst));
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+
+ /* check BSSID */
+ if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare BSSID fail ; BSSID ="MAC_FMT"\n", MAC_ARG(pattrib->bssid)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid ="MAC_FMT"\n", MAC_ARG(mybssid)));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s BSSID ="MAC_FMT", mybssid ="MAC_FMT"\n",
+ __func__, MAC_ARG(pattrib->bssid), MAC_ARG(mybssid));
+ DBG_871X("this adapter = %d, buddy adapter = %d\n", adapter->adapter_type, adapter->pbuddystruct adapter->adapter_type);
+ #endif
+
+ if (!bmcast) {
+ DBG_871X("issue_deauth to the nonassociated ap =" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->bssid));
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("ap2sta: can't get psta under STATION_MODE ; drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s can't get psta under STATION_MODE ; drop pkt\n", __func__);
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ }
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ /* */
+ memcpy(pattrib->bssid, mybssid, ETH_ALEN);
+
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under MP_MODE ; drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s can't get psta under WIFI_MP_STATE ; drop pkt\n", __func__);
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* Special case */
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ } else{
+ if (!memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+
+ /* for AP multicast issue , modify by yiwei */
+ static unsigned long send_issue_deauth_time = 0;
+
+ /* DBG_871X("After send deauth , %u ms has elapsed.\n", jiffies_to_msecs(jiffies - send_issue_deauth_time)); */
+
+ if (jiffies_to_msecs(jiffies - send_issue_deauth_time) > 10000 || send_issue_deauth_time == 0) {
+ send_issue_deauth_time = jiffies;
+
+ DBG_871X("issue_deauth to the ap =" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->bssid));
+
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+ }
+ }
+
+ ret = _FAIL;
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s fw_state:0x%x\n", __func__, get_fwstate(pmlmepriv));
+ #endif
+ }
+
+exit:
+ return ret;
+}
+
+sint sta2ap_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta);
+sint sta2ap_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ unsigned char *mybssid = get_bssid(pmlmepriv);
+ sint ret = _SUCCESS;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* For AP mode, RA =BSSID, TX =STA(SRC_ADDR), A3 =DST_ADDR */
+ if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->src);
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under AP_MODE; drop pkt\n"));
+ DBG_871X("issue_deauth to sta =" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->src));
+
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ process_pwrbit_data(adapter, precv_frame);
+
+ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ process_wmmps_data(adapter, precv_frame);
+ }
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else {
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ DBG_871X("issue_deauth to sta =" MAC_FMT " for the reason(7)\n", MAC_ARG(pattrib->src));
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_frame);
+sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_info *psta = NULL;
+ /* uint len = precv_frame->u.hdr.len; */
+
+ /* DBG_871X("+validate_recv_ctrl_frame\n"); */
+
+ if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
+ return _FAIL;
+
+ /* receive the frames that ra(a1) is my address */
+ if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
+ return _FAIL;
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta == NULL)
+ return _FAIL;
+
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
+
+ /* only handle ps-poll */
+ if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
+ u16 aid;
+ u8 wmmps_ac = 0;
+
+ aid = GetAid(pframe);
+ if (psta->aid != aid)
+ return _FAIL;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ return _FAIL;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_871X("%s alive check-rx ps-poll\n", __func__);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ if (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ list_del_init(&pxmitframe->list);
+
+ psta->sleepq_len--;
+
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ /* DBG_871X("handling ps-poll, q_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
+
+ rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* DBG_871X("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ } else{
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* DBG_871X("no buffered packets to xmit\n"); */
+ if (pstapriv->tim_bitmap&BIT(psta->aid)) {
+ if (psta->sleepq_len == 0) {
+ DBG_871X("no buffered packets to xmit\n");
+
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata_in_interrupt(padapter, psta->hwaddr);
+ } else{
+ DBG_871X("error!psta->sleepq_len =%d\n", psta->sleepq_len);
+ psta->sleepq_len = 0;
+ }
+
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ }
+ }
+ }
+ }
+
+ return _FAIL;
+
+}
+
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame);
+sint validate_recv_mgnt_frame(struct adapter *padapter, union recv_frame *precv_frame);
+sint validate_recv_mgnt_frame(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ /* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+validate_recv_mgnt_frame\n"));
+
+ precv_frame = recvframe_chk_defrag(padapter, precv_frame);
+ if (precv_frame == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s: fragment packet\n", __func__));
+ return _SUCCESS;
+ }
+
+ {
+ /* for rx pkt statistics */
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+ if (psta) {
+ psta->sta_stats.rx_mgnt_pkts++;
+ if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON)
+ psta->sta_stats.rx_beacon_pkts++;
+ else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ)
+ psta->sta_stats.rx_probereq_pkts++;
+ else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
+ if (!memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN))
+ psta->sta_stats.rx_probersp_pkts++;
+ else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data))
+ || is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
+ psta->sta_stats.rx_probersp_bm_pkts++;
+ else
+ psta->sta_stats.rx_probersp_uo_pkts++;
+ }
+ }
+ }
+
+ mgt_dispatcher(padapter, precv_frame);
+
+ return _SUCCESS;
+
+}
+
+sint validate_recv_data_frame(struct adapter *adapter, union recv_frame *precv_frame);
+sint validate_recv_data_frame(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ u8 bretry;
+ u8 *psa, *pda, *pbssid;
+ struct sta_info *psta = NULL;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ sint ret = _SUCCESS;
+
+ bretry = GetRetry(ptr);
+ pda = get_da(ptr);
+ psa = get_sa(ptr);
+ pbssid = get_hdr_bssid(ptr);
+
+ if (pbssid == NULL) {
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s pbssid == NULL\n", __func__);
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+ memcpy(pattrib->dst, pda, ETH_ALEN);
+ memcpy(pattrib->src, psa, ETH_ALEN);
+
+ memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+
+ switch (pattrib->to_fr_ds) {
+ case 0:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 1:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, pbssid, ETH_ALEN);
+ ret = ap2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 2:
+ memcpy(pattrib->ra, pbssid, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2ap_data_frame(adapter, precv_frame, &psta);
+ break;
+
+ case 3:
+ memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
+ ret = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" case 3\n"));
+ break;
+
+ default:
+ ret = _FAIL;
+ break;
+
+ }
+
+ if (ret == _FAIL) {
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s case:%d, res:%d\n", __func__, pattrib->to_fr_ds, ret);
+ #endif
+ goto exit;
+ } else if (ret == RTW_RX_HANDLED) {
+ goto exit;
+ }
+
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" after to_fr_ds_chk; psta == NULL\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s psta == NULL\n", __func__);
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* psta->rssi = prxcmd->rssi; */
+ /* psta->signal_quality = prxcmd->sq; */
+ precv_frame->u.hdr.psta = psta;
+
+
+ pattrib->amsdu = 0;
+ pattrib->ack_policy = 0;
+ /* parsing QC field */
+ if (pattrib->qos == 1) {
+ pattrib->priority = GetPriority((ptr + 24));
+ pattrib->ack_policy = GetAckpolicy((ptr + 24));
+ pattrib->amsdu = GetAMsdu((ptr + 24));
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+
+ if (pattrib->priority != 0 && pattrib->priority != 3)
+ adapter->recvpriv.bIsAnyNonBEPkts = true;
+
+ } else{
+ pattrib->priority = 0;
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+ }
+
+
+ if (pattrib->order)/* HT-CTRL 11n */
+ pattrib->hdrlen += 4;
+
+ precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+
+ /* decache, drop duplicate recv packets */
+ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decache : drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s recv_decache return _FAIL\n", __func__);
+ #endif
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->privacy) {
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy =%x\n", pattrib->privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x)) =%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
+
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt =%d\n", pattrib->encrypt));
+
+ SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+ } else{
+ pattrib->encrypt = 0;
+ pattrib->iv_len = pattrib->icv_len = 0;
+ }
+
+exit:
+ return ret;
+}
+
+static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 type;
+ u8 subtype;
+
+ type = GetFrameType(ptr);
+ subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
+
+ /* only support station mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)
+ && adapter->securitypriv.binstallBIPkey == true) {
+ /* unicast management frame decrypt */
+ if (pattrib->privacy && !(IS_MCAST(GetAddr1Ptr(ptr))) &&
+ (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
+ u8 *ppp, *mgmt_DATA;
+ u32 data_len = 0;
+ ppp = GetAddr2Ptr(ptr);
+
+ pattrib->bdecrypted = 0;
+ pattrib->encrypt = _AES_;
+ pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
+ /* set iv and icv length */
+ SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+ memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
+ /* actual management data frame body */
+ data_len = pattrib->pkt_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
+ mgmt_DATA = rtw_zmalloc(data_len);
+ if (mgmt_DATA == NULL) {
+ DBG_871X("%s mgmt allocate fail !!!!!!!!!\n", __func__);
+ goto validate_80211w_fail;
+ }
+ precv_frame = decryptor(adapter, precv_frame);
+ /* save actual management data frame body */
+ memcpy(mgmt_DATA, ptr+pattrib->hdrlen+pattrib->iv_len, data_len);
+ /* overwrite the iv field */
+ memcpy(ptr+pattrib->hdrlen, mgmt_DATA, data_len);
+ /* remove the iv and icv length */
+ pattrib->pkt_len = pattrib->pkt_len - pattrib->iv_len - pattrib->icv_len;
+ kfree(mgmt_DATA);
+ if (!precv_frame) {
+ DBG_871X("%s mgmt descrypt fail !!!!!!!!!\n", __func__);
+ goto validate_80211w_fail;
+ }
+ } else if (IS_MCAST(GetAddr1Ptr(ptr)) &&
+ (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)) {
+ sint BIP_ret = _SUCCESS;
+ /* verify BIP MME IE of broadcast/multicast de-auth/disassoc packet */
+ BIP_ret = rtw_BIP_verify(adapter, (u8 *)precv_frame);
+ if (BIP_ret == _FAIL) {
+ /* DBG_871X("802.11w BIP verify fail\n"); */
+ goto validate_80211w_fail;
+ } else if (BIP_ret == RTW_RX_HANDLED) {
+ /* DBG_871X("802.11w recv none protected packet\n"); */
+ /* issue sa query request */
+ issue_action_SA_Query(adapter, NULL, 0, 0);
+ goto validate_80211w_fail;
+ }
+ } else { /* 802.11w protect */
+ if (subtype == WIFI_ACTION) {
+ /* according 802.11-2012 standard, these five types are not robust types */
+ if (ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_PUBLIC &&
+ ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_HT &&
+ ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_UNPROTECTED_WNM &&
+ ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_SELF_PROTECTED &&
+ ptr[WLAN_HDR_A3_LEN] != RTW_WLAN_CATEGORY_P2P) {
+ DBG_871X("action frame category =%d should robust\n", ptr[WLAN_HDR_A3_LEN]);
+ goto validate_80211w_fail;
+ }
+ } else if (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC) {
+ DBG_871X("802.11w recv none protected packet\n");
+ /* issue sa query request */
+ issue_action_SA_Query(adapter, NULL, 0, 0);
+ goto validate_80211w_fail;
+ }
+ }
+ }
+ return _SUCCESS;
+
+validate_80211w_fail:
+ return _FAIL;
+
+}
+
+static inline void dump_rx_packet(u8 *ptr)
+{
+ int i;
+
+ DBG_871X("#############################\n");
+ for (i = 0; i < 64; i = i+8)
+ DBG_871X("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_871X("#############################\n");
+}
+
+sint validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame);
+sint validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ /* shall check frame subtype, to / from ds, da, bssid */
+
+ /* then call check if rx seq/frag. duplicated. */
+
+ u8 type;
+ u8 subtype;
+ sint retval = _SUCCESS;
+ u8 bDumpRxPkt;
+
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 ver = (unsigned char) (*ptr)&0x3;
+
+ /* add version chk */
+ if (ver != 0) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! (ver!= 0)\n"));
+ retval = _FAIL;
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_ver_err);
+ goto exit;
+ }
+
+ type = GetFrameType(ptr);
+ subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
+
+ pattrib->to_fr_ds = get_tofr_ds(ptr);
+
+ pattrib->frag_num = GetFragNum(ptr);
+ pattrib->seq_num = GetSequence(ptr);
+
+ pattrib->pw_save = GetPwrMgt(ptr);
+ pattrib->mfrag = GetMFrag(ptr);
+ pattrib->mdata = GetMData(ptr);
+ pattrib->privacy = GetPrivacy(ptr);
+ pattrib->order = GetOrder(ptr);
+ rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
+ if (bDumpRxPkt == 1) /* dump all rx packets */
+ dump_rx_packet(ptr);
+ else if ((bDumpRxPkt == 2) && (type == WIFI_MGT_TYPE))
+ dump_rx_packet(ptr);
+ else if ((bDumpRxPkt == 3) && (type == WIFI_DATA_TYPE))
+ dump_rx_packet(ptr);
+
+ switch (type) {
+ case WIFI_MGT_TYPE: /* mgnt */
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_mgmt);
+ if (validate_80211w_mgmt(adapter, precv_frame) == _FAIL) {
+ retval = _FAIL;
+ DBG_COUNTER(padapter->rx_logs.core_rx_pre_mgmt_err_80211w);
+ break;
+ }
+
+ retval = validate_recv_mgnt_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_mgnt_frame fail\n"));
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_mgmt_err);
+ }
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_CTRL_TYPE: /* ctrl */
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_ctrl);
+ retval = validate_recv_ctrl_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_ctrl_frame fail\n"));
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_ctrl_err);
+ }
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_DATA_TYPE: /* data */
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_data);
+
+ pattrib->qos = (subtype & BIT(7)) ? 1:0;
+ retval = validate_recv_data_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ /* RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail\n")); */
+ precvpriv->rx_drop++;
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_data_err);
+ } else if (retval == _SUCCESS) {
+#ifdef DBG_RX_DUMP_EAP
+ u8 bDumpRxPkt;
+ u16 eth_type;
+
+ /* dump eapol */
+ rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
+ /* get ether_type */
+ memcpy(&eth_type, ptr + pattrib->hdrlen + pattrib->iv_len + LLC_HEADER_SIZE, 2);
+ eth_type = ntohs((unsigned short) eth_type);
+ if ((bDumpRxPkt == 4) && (eth_type == 0x888e))
+ dump_rx_packet(ptr);
+#endif
+ } else
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_data_handled);
+ break;
+ default:
+ DBG_COUNTER(adapter->rx_logs.core_rx_pre_unknown);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! type = 0x%x\n", type));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME validate_recv_data_frame fail! type = 0x%x\n", type);
+ #endif
+ retval = _FAIL;
+ break;
+ }
+
+exit:
+ return retval;
+}
+
+
+/* remove the wlanhdr and add the eth_hdr */
+sint wlanhdr_to_ethhdr(union recv_frame *precvframe);
+sint wlanhdr_to_ethhdr(union recv_frame *precvframe)
+{
+ sint rmv_len;
+ u16 eth_type, len;
+ u8 bsnaphdr;
+ u8 *psnap_type;
+ struct ieee80211_snap_hdr *psnap;
+ __be16 be_tmp;
+ sint ret = _SUCCESS;
+ struct adapter *adapter = precvframe->u.hdr.adapter;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *ptr = get_recvframe_data(precvframe) ; /* point to frame_ctrl field */
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+
+ if (pattrib->encrypt) {
+ recvframe_pull_tail(precvframe, pattrib->icv_len);
+ }
+
+ psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
+ psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
+ /* convert hdr + possible LLC headers into Ethernet header */
+ /* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
+ if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ (memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2)) &&
+ (memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2))) ||
+ /* eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || */
+ !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ bsnaphdr = true;
+ } else
+ /* Leave Ethernet header part of hdr and full payload */
+ bsnaphdr = false;
+
+ rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr?SNAP_SIZE:0);
+ len = precvframe->u.hdr.len - rmv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ===pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n\n", pattrib->hdrlen, pattrib->iv_len));
+
+ memcpy(&be_tmp, ptr+rmv_len, 2);
+ eth_type = ntohs(be_tmp); /* pattrib->ether_type */
+ pattrib->eth_type = eth_type;
+
+#ifdef CONFIG_AUTO_AP_MODE
+ if (0x8899 == pattrib->eth_type) {
+ struct sta_info *psta = precvframe->u.hdr.psta;
+
+ DBG_871X("wlan rx: got eth_type = 0x%x\n", pattrib->eth_type);
+
+ if (psta && psta->isrc && psta->pid > 0) {
+ u16 rx_pid;
+
+ rx_pid = *(u16 *)(ptr+rmv_len+2);
+
+ DBG_871X("wlan rx(pid = 0x%x): sta("MAC_FMT") pid = 0x%x\n",
+ rx_pid, MAC_ARG(psta->hwaddr), psta->pid);
+
+ if (rx_pid == psta->pid) {
+ int i;
+ u16 len = *(u16 *)(ptr+rmv_len+4);
+ /* u16 ctrl_type = *(u16*)(ptr+rmv_len+6); */
+
+ /* DBG_871X("RC: len = 0x%x, ctrl_type = 0x%x\n", len, ctrl_type); */
+ DBG_871X("RC: len = 0x%x\n", len);
+
+ for (i = 0; i < len ; i++)
+ DBG_871X("0x%x\n", *(ptr+rmv_len+6+i));
+ /* DBG_871X("0x%x\n", *(ptr+rmv_len+8+i)); */
+
+ DBG_871X("RC-end\n");
+ }
+ }
+ }
+#endif /* CONFIG_AUTO_AP_MODE */
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) {
+ ptr += rmv_len;
+ *ptr = 0x87;
+ *(ptr+1) = 0x12;
+
+ eth_type = 0x8712;
+ /* append rx status for mp test packets */
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
+ memcpy(ptr, get_rxmem(precvframe), 24);
+ ptr += 24;
+ } else
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr?2:0)));
+
+ memcpy(ptr, pattrib->dst, ETH_ALEN);
+ memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+
+ if (!bsnaphdr) {
+ be_tmp = htons(len);
+ memcpy(ptr+12, &be_tmp, 2);
+ }
+
+ return ret;
+}
+
+/* perform defrag */
+static union recv_frame *recvframe_defrag(struct adapter *adapter,
+ struct __queue *defrag_q)
+{
+ struct list_head *plist, *phead;
+ u8 *data, wlanhdr_offset;
+ u8 curfragnum;
+ struct recv_frame_hdr *pfhdr, *pnfhdr;
+ union recv_frame *prframe, *pnextrframe;
+ struct __queue *pfree_recv_queue;
+
+ curfragnum = 0;
+ pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
+
+ phead = get_list_head(defrag_q);
+ plist = get_next(phead);
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pfhdr = &prframe->u.hdr;
+ list_del_init(&(prframe->u.list));
+
+ if (curfragnum != pfhdr->attrib.frag_num) {
+ /* the first fragment number must be 0 */
+ /* free the whole queue */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ return NULL;
+ }
+
+ curfragnum++;
+
+ plist = get_list_head(defrag_q);
+
+ plist = get_next(plist);
+
+ data = get_recvframe_data(prframe);
+
+ while (phead != plist) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnfhdr = &pnextrframe->u.hdr;
+
+
+ /* check the fragment sequence (2nd ~n fragment frame) */
+
+ if (curfragnum != pnfhdr->attrib.frag_num) {
+ /* the fragment number must be increasing (after decache) */
+ /* release the defrag_q & prframe */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+ return NULL;
+ }
+
+ curfragnum++;
+
+ /* copy the 2nd~n fragment frame's payload to the first fragment */
+ /* get the 2nd~last fragment frame's payload */
+
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ recvframe_pull(pnextrframe, wlanhdr_offset);
+
+ /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
+ recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
+
+ /* memcpy */
+ memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
+
+ recvframe_put(prframe, pnfhdr->len);
+
+ pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+ plist = get_next(plist);
+
+ };
+
+ /* free the defrag_q queue and return the prframe */
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
+
+ return prframe;
+}
+
+/* check if need to defrag, if needed queue the frame to defrag_q */
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 ismfrag;
+ u8 fragnum;
+ u8 *psta_addr;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ struct list_head *phead;
+ union recv_frame *prtnframe = NULL;
+ struct __queue *pfree_recv_queue, *pdefrag_q;
+
+ pstapriv = &padapter->stapriv;
+
+ pfhdr = &precv_frame->u.hdr;
+
+ pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* need to define struct of wlan header frame ctrl */
+ ismfrag = pfhdr->attrib.mfrag;
+ fragnum = pfhdr->attrib.frag_num;
+
+ psta_addr = pfhdr->attrib.ta;
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+ if (psta == NULL) {
+ u8 type = GetFrameType(pfhdr->rx_data);
+ if (type != WIFI_DATA_TYPE) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ } else
+ pdefrag_q = NULL;
+ } else
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+
+ if ((ismfrag == 0) && (fragnum == 0))
+ prtnframe = precv_frame;/* isn't a fragment frame */
+
+ if (ismfrag == 1) {
+ /* 0~(n-1) fragment frame */
+ /* enqueue to defraf_g */
+ if (pdefrag_q != NULL) {
+ if (fragnum == 0)
+ /* the first fragment */
+ if (!list_empty(&pdefrag_q->queue))
+ /* free current defrag_q */
+ rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
+
+
+ /* Then enqueue the 0~(n-1) fragment into the defrag_q */
+
+ /* spin_lock(&pdefrag_q->lock); */
+ phead = get_list_head(pdefrag_q);
+ list_add_tail(&pfhdr->list, phead);
+ /* spin_unlock(&pdefrag_q->lock); */
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Enqueuq: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum));
+
+ prtnframe = NULL;
+
+ } else{
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum));
+ }
+
+ }
+
+ if ((ismfrag == 0) && (fragnum != 0)) {
+ /* the last fragment frame */
+ /* enqueue the last fragment */
+ if (pdefrag_q != NULL) {
+ /* spin_lock(&pdefrag_q->lock); */
+ phead = get_list_head(pdefrag_q);
+ list_add_tail(&pfhdr->list, phead);
+ /* spin_unlock(&pdefrag_q->lock); */
+
+ /* call recvframe_defrag to defrag */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("defrag: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum));
+ precv_frame = recvframe_defrag(padapter, pdefrag_q);
+ prtnframe = precv_frame;
+
+ } else{
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum));
+ }
+
+ }
+
+
+ if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ /* after defrag we must check tkip mic code */
+ if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe) == _FAIL\n"));
+ rtw_free_recvframe(prtnframe, pfree_recv_queue);
+ prtnframe = NULL;
+ }
+ }
+ return prtnframe;
+}
+
+static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
+{
+ int a_len, padding_len;
+ u16 nSubframe_Length;
+ u8 nr_subframes, i;
+ u8 *pdata;
+ _pkt *sub_pkt, *subframes[MAX_SUBFRAME_COUNT];
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ int ret = _SUCCESS;
+
+ nr_subframes = 0;
+
+ recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
+
+ if (prframe->u.hdr.attrib.iv_len > 0)
+ recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
+
+ a_len = prframe->u.hdr.len;
+
+ pdata = prframe->u.hdr.rx_data;
+
+ while (a_len > ETH_HLEN) {
+
+ /* Offset 12 denote 2 mac address */
+ nSubframe_Length = RTW_GET_BE16(pdata + 12);
+
+ if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ DBG_871X("nRemain_Length is %d and nSubframe_Length is : %d\n", a_len, nSubframe_Length);
+ break;
+ }
+
+ sub_pkt = rtw_os_alloc_msdu_pkt(prframe, nSubframe_Length, pdata);
+ if (sub_pkt == NULL) {
+ DBG_871X("%s(): allocate sub packet fail !!!\n", __func__);
+ break;
+ }
+
+ /* move the data point to data content */
+ pdata += ETH_HLEN;
+ a_len -= ETH_HLEN;
+
+ subframes[nr_subframes++] = sub_pkt;
+
+ if (nr_subframes >= MAX_SUBFRAME_COUNT) {
+ DBG_871X("ParseSubframe(): Too many Subframes! Packets dropped!\n");
+ break;
+ }
+
+ pdata += nSubframe_Length;
+ a_len -= nSubframe_Length;
+ if (a_len != 0) {
+ padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
+ if (padding_len == 4) {
+ padding_len = 0;
+ }
+
+ if (a_len < padding_len) {
+ DBG_871X("ParseSubframe(): a_len < padding_len !\n");
+ break;
+ }
+ pdata += padding_len;
+ a_len -= padding_len;
+ }
+ }
+
+ for (i = 0; i < nr_subframes; i++) {
+ sub_pkt = subframes[i];
+
+ /* Indicat the packets to upper layer */
+ if (sub_pkt) {
+ rtw_os_recv_indicate_pkt(padapter, sub_pkt, &prframe->u.hdr.attrib);
+ }
+ }
+
+ prframe->u.hdr.len = 0;
+ rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
+
+ return ret;
+}
+
+int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num);
+int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
+{
+ struct adapter *padapter = preorder_ctrl->padapter;
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ u8 wsize = preorder_ctrl->wsize_b;
+ u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
+
+ /* Rx Reorder initialize condition. */
+ if (preorder_ctrl->indicate_seq == 0xFFFF) {
+ preorder_ctrl->indicate_seq = seq_num;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d init IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, seq_num);
+ #endif
+
+ /* DbgPrint("check_indicate_seq, 1st->indicate_seq =%d\n", precvpriv->indicate_seq); */
+ }
+
+ /* DbgPrint("enter->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
+
+ /* Drop out the packet which SeqNum is smaller than WinStart */
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) {
+ /* RT_TRACE(COMP_RX_REORDER, DBG_LOUD, ("CheckRxTsIndicateSeq(): Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, NewSeqNum)); */
+ /* DbgPrint("CheckRxTsIndicateSeq(): Packet Drop! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
+
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("%s IndicateSeq: %d > NewSeq: %d\n", __func__,
+ preorder_ctrl->indicate_seq, seq_num);
+ #endif
+
+
+ return false;
+ }
+
+ /* */
+ /* Sliding window manipulation. Conditions includes: */
+ /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
+ /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
+ /* */
+ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d SN_EQUAL IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, seq_num);
+ #endif
+ } else if (SN_LESS(wend, seq_num)) {
+ /* RT_TRACE(COMP_RX_REORDER, DBG_LOUD, ("CheckRxTsIndicateSeq(): Window Shift! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, NewSeqNum)); */
+ /* DbgPrint("CheckRxTsIndicateSeq(): Window Shift! IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
+
+ /* boundary situation, when seq_num cross 0xFFF */
+ if (seq_num >= (wsize - 1))
+ preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
+ else
+ preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
+ pdbgpriv->dbg_rx_ampdu_window_shift_cnt++;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d SN_LESS(wend, seq_num) IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, seq_num);
+ #endif
+ }
+
+ /* DbgPrint("exit->check_indicate_seq(): IndicateSeq: %d, NewSeq: %d\n", precvpriv->indicate_seq, seq_num); */
+
+ return true;
+}
+
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ struct list_head *phead, *plist;
+ union recv_frame *pnextrframe;
+ struct rx_pkt_attrib *pnextattrib;
+
+ /* DbgPrint("+enqueue_reorder_recvframe()\n"); */
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
+ /* spin_lock(&ppending_recvframe_queue->lock); */
+
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextattrib = &pnextrframe->u.hdr.attrib;
+
+ if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
+ plist = get_next(plist);
+ else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
+ /* Duplicate entry is found!! Do not insert current entry. */
+ /* RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum)); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
+ return false;
+ else
+ break;
+
+ /* DbgPrint("enqueue_reorder_recvframe():while\n"); */
+
+ }
+
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
+ /* spin_lock(&ppending_recvframe_queue->lock); */
+
+ list_del_init(&(prframe->u.hdr.list));
+
+ list_add_tail(&(prframe->u.hdr.list), plist);
+
+ /* spin_unlock(&ppending_recvframe_queue->lock); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
+
+
+ /* RT_TRACE(COMP_RX_REORDER, DBG_TRACE, ("InsertRxReorderList(): Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum)); */
+ return true;
+
+}
+
+void recv_indicatepkts_pkt_loss_cnt(struct debug_priv *pdbgpriv, u64 prev_seq, u64 current_seq);
+void recv_indicatepkts_pkt_loss_cnt(struct debug_priv *pdbgpriv, u64 prev_seq, u64 current_seq)
+{
+ if (current_seq < prev_seq)
+ pdbgpriv->dbg_rx_ampdu_loss_count += (4096 + current_seq - prev_seq);
+ else
+ pdbgpriv->dbg_rx_ampdu_loss_count += (current_seq - prev_seq);
+
+}
+int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced);
+int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
+{
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct rx_pkt_attrib *pattrib;
+ /* u8 index = 0; */
+ int bPktInBuf = false;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_indicate_in_oder);
+
+ /* DbgPrint("+recv_indicatepkts_in_order\n"); */
+
+ /* spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); */
+ /* spin_lock(&ppending_recvframe_queue->lock); */
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ /* Handling some condition for forced indicate case. */
+ if (bforced == true) {
+ pdbgpriv->dbg_rx_ampdu_forced_indicate_count++;
+ if (list_empty(phead)) {
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
+ /* spin_unlock(&ppending_recvframe_queue->lock); */
+ return true;
+ }
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+ recv_indicatepkts_pkt_loss_cnt(pdbgpriv, preorder_ctrl->indicate_seq, pattrib->seq_num);
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+
+ }
+
+ /* Prepare indication list and indication. */
+ /* Check if there is any packet need indicate. */
+ while (!list_empty(phead)) {
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+
+ if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkts_in_order: indicate =%d seq =%d amsdu =%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
+
+ plist = get_next(plist);
+ list_del_init(&(prframe->u.hdr.list));
+
+ if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+ }
+
+ /* Set this as a lock to make sure that only one thread is indicating packet. */
+ /* pTS->RxIndicateState = RXTS_INDICATE_PROCESSING; */
+
+ /* Indicate packets */
+ /* RT_ASSERT((index<=REORDER_WIN_SIZE), ("RxReorderIndicatePacket(): Rx Reorder buffer full!!\n")); */
+
+
+ /* indicate this recv_frame */
+ /* DbgPrint("recv_indicatepkts_in_order, indicate_seq =%d, seq_num =%d\n", precvpriv->indicate_seq, pattrib->seq_num); */
+ if (!pattrib->amsdu) {
+ /* DBG_871X("recv_indicatepkts_in_order, amsdu!= 1, indicate_seq =%d, seq_num =%d\n", preorder_ctrl->indicate_seq, pattrib->seq_num); */
+
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false))
+ rtw_recv_indicatepkt(padapter, prframe);/* indicate this recv_frame */
+
+ } else if (pattrib->amsdu == 1) {
+ if (amsdu_to_msdu(padapter, prframe) != _SUCCESS)
+ rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
+
+ } else{
+ /* error condition; */
+ }
+
+
+ /* Update local variables. */
+ bPktInBuf = false;
+
+ } else{
+ bPktInBuf = true;
+ break;
+ }
+
+ /* DbgPrint("recv_indicatepkts_in_order():while\n"); */
+
+ }
+
+ /* spin_unlock(&ppending_recvframe_queue->lock); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
+
+ return bPktInBuf;
+}
+
+int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe);
+int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_indicate_reoder);
+
+ if (!pattrib->amsdu) {
+ /* s1. */
+ wlanhdr_to_ethhdr(prframe);
+
+ if (pattrib->qos != 1) {
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n"));
+
+ rtw_recv_indicatepkt(padapter, prframe);
+ return _SUCCESS;
+
+ }
+
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s pattrib->qos != 1\n", __func__);
+ #endif
+
+ return _FAIL;
+
+ }
+
+ if (preorder_ctrl->enable == false) {
+ /* indicate this recv_frame */
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+
+ rtw_recv_indicatepkt(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+
+ return _SUCCESS;
+ }
+ } else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
+ if (preorder_ctrl->enable == false) {
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+
+ retval = amsdu_to_msdu(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, NewSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, pattrib->seq_num);
+ #endif
+
+ if (retval != _SUCCESS) {
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s amsdu_to_msdu fail\n", __func__);
+ #endif
+ }
+
+ return retval;
+ }
+ }
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkt_reorder: indicate =%d seq =%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num));
+
+ /* s2. check if winstart_b(indicate_seq) needs to been updated */
+ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
+ pdbgpriv->dbg_rx_ampdu_drop_count++;
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s check_indicate_seq fail\n", __func__);
+ #endif
+ goto _err_exit;
+ }
+
+
+ /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
+ if (!enqueue_reorder_recvframe(preorder_ctrl, prframe)) {
+ /* DbgPrint("recv_indicatepkt_reorder, enqueue_reorder_recvframe fail!\n"); */
+ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); */
+ /* return _FAIL; */
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s enqueue_reorder_recvframe fail\n", __func__);
+ #endif
+ goto _err_exit;
+ }
+
+
+ /* s4. */
+ /* Indication process. */
+ /* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */
+ /* with the SeqNum smaller than latest WinStart and buffer other packets. */
+ /* */
+ /* For Rx Reorder condition: */
+ /* 1. All packets with SeqNum smaller than WinStart => Indicate */
+ /* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */
+ /* */
+
+ /* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) {
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ } else{
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+ }
+
+ return _SUCCESS;
+
+_err_exit:
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+
+ return _FAIL;
+}
+
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext)
+{
+ struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ struct adapter *padapter = preorder_ctrl->padapter;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ return;
+
+ /* DBG_871X("+rtw_reordering_ctrl_timeout_handler() =>\n"); */
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+
+}
+
+int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe);
+int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ /* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_indicate);
+
+ if (phtpriv->ht_option == true) { /* B/G/N Mode */
+ /* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
+
+ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) { /* including perform A-MPDU Rx Ordering Buffer Control */
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s recv_indicatepkt_reorder error!\n", __func__);
+ #endif
+
+ if ((padapter->bDriverStopped == false) &&
+ (padapter->bSurpriseRemoved == false)) {
+ retval = _FAIL;
+ return retval;
+ }
+ }
+ } else { /* B/G mode */
+ retval = wlanhdr_to_ethhdr(prframe);
+ if (retval != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s wlanhdr_to_ethhdr error!\n", __func__);
+ #endif
+ return retval;
+ }
+
+ if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) {
+ /* indicate this recv_frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n"));
+ rtw_recv_indicatepkt(padapter, prframe);
+
+
+ } else{
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n"));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ retval = _FAIL;
+ return retval;
+ }
+
+ }
+
+ return retval;
+
+}
+
+static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret = _SUCCESS;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_pre);
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
+{
+ int ret = _SUCCESS;
+ union recv_frame *orig_prframe = prframe;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ DBG_COUNTER(padapter->rx_logs.core_rx_post);
+
+ prframe = decryptor(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decryptor: drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s decryptor: drop pkt\n", __func__);
+ #endif
+ ret = _FAIL;
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_decrypt_err);
+ goto _recv_data_drop;
+ }
+
+ prframe = recvframe_chk_defrag(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chk_defrag: drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s recvframe_chk_defrag: drop pkt\n", __func__);
+ #endif
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_defrag_err);
+ goto _recv_data_drop;
+ }
+
+ prframe = portctrl(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("portctrl: drop pkt\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s portctrl: drop pkt\n", __func__);
+ #endif
+ ret = _FAIL;
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_portctrl_err);
+ goto _recv_data_drop;
+ }
+
+ count_rx_stats(padapter, prframe, NULL);
+
+ ret = process_recv_indicatepkts(padapter, prframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recv_func: process_recv_indicatepkts fail!\n"));
+ #ifdef DBG_RX_DROP_FRAME
+ DBG_871X("DBG_RX_DROP_FRAME %s process_recv_indicatepkts fail!\n", __func__);
+ #endif
+ rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
+ DBG_COUNTER(padapter->rx_logs.core_rx_post_indicate_err);
+ goto _recv_data_drop;
+ }
+
+_recv_data_drop:
+ precvpriv->rx_drop++;
+ return ret;
+}
+
+
+int recv_func(struct adapter *padapter, union recv_frame *rframe);
+int recv_func(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret;
+ struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
+ struct recv_priv *recvpriv = &padapter->recvpriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+
+ /* check if need to handle uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
+ union recv_frame *pending_frame;
+ int cnt = 0;
+
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
+ cnt++;
+ DBG_COUNTER(padapter->rx_logs.core_rx_dequeue);
+ recv_func_posthandle(padapter, pending_frame);
+ }
+
+ if (cnt)
+ DBG_871X(FUNC_ADPT_FMT" dequeue %d from uc_swdec_pending_queue\n",
+ FUNC_ADPT_ARG(padapter), cnt);
+ }
+
+ DBG_COUNTER(padapter->rx_logs.core_rx);
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
+
+ /* check if need to enqueue into uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
+ (prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt == true) &&
+ psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPAPSK &&
+ !psecuritypriv->busetkipkey) {
+ DBG_COUNTER(padapter->rx_logs.core_rx_enqueue);
+ rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
+ /* DBG_871X("%s: no key, enqueue uc_swdec_pending_queue\n", __func__); */
+
+ if (recvpriv->free_recvframe_cnt < NR_RECVFRAME/4) {
+ /* to prevent from recvframe starvation, get recvframe from uc_swdec_pending_queue to free_recvframe_cnt */
+ rframe = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue);
+ if (rframe)
+ goto do_posthandle;
+ }
+ goto exit;
+ }
+
+do_posthandle:
+ ret = recv_func_posthandle(padapter, rframe);
+ }
+
+exit:
+ return ret;
+}
+
+
+s32 rtw_recv_entry(union recv_frame *precvframe)
+{
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+ s32 ret = _SUCCESS;
+
+/* RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+rtw_recv_entry\n")); */
+
+ padapter = precvframe->u.hdr.adapter;
+
+ precvpriv = &padapter->recvpriv;
+
+ ret = recv_func(padapter, precvframe);
+ if (ret == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("rtw_recv_entry: recv_func return fail!!!\n"));
+ goto _recv_entry_drop;
+ }
+
+
+ precvpriv->rx_pkts++;
+
+ return ret;
+
+_recv_entry_drop:
+
+ /* RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("_recv_entry_drop\n")); */
+
+ return ret;
+}
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct recv_priv *recvpriv = &adapter->recvpriv;
+
+ u32 tmp_s, tmp_q;
+ u8 avg_signal_strength = 0;
+ u8 avg_signal_qual = 0;
+ u32 num_signal_strength = 0;
+ u32 num_signal_qual = 0;
+ u8 _alpha = 5; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
+
+ if (adapter->recvpriv.is_signal_dbg) {
+ /* update the user specific value, signal_strength_dbg, to signal_strength, rssi */
+ adapter->recvpriv.signal_strength = adapter->recvpriv.signal_strength_dbg;
+ adapter->recvpriv.rssi = (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
+ } else {
+
+ if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_strength = recvpriv->signal_strength_data.avg_val;
+ num_signal_strength = recvpriv->signal_strength_data.total_num;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ num_signal_qual = recvpriv->signal_qual_data.total_num;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ if (num_signal_strength == 0) {
+ if (rtw_get_on_cur_ch_time(adapter) == 0
+ || jiffies_to_msecs(jiffies - rtw_get_on_cur_ch_time(adapter)) < 2 * adapter->mlmeextpriv.mlmext_info.bcn_interval
+ ) {
+ goto set_timer;
+ }
+ }
+
+ if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == true
+ || check_fwstate(&adapter->mlmepriv, _FW_LINKED) == false
+ ) {
+ goto set_timer;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ tmp_s = (avg_signal_strength+(_alpha-1)*recvpriv->signal_strength);
+ if (tmp_s % _alpha)
+ tmp_s = tmp_s/_alpha + 1;
+ else
+ tmp_s = tmp_s/_alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
+
+ tmp_q = (avg_signal_qual+(_alpha-1)*recvpriv->signal_qual);
+ if (tmp_q % _alpha)
+ tmp_q = tmp_q/_alpha + 1;
+ else
+ tmp_q = tmp_q/_alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
+
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
+ recvpriv->signal_qual = tmp_q;
+
+ #if defined(DBG_RX_SIGNAL_DISPLAY_PROCESSING) && 1
+ DBG_871X(FUNC_ADPT_FMT" signal_strength:%3u, rssi:%3d, signal_qual:%3u"
+ ", num_signal_strength:%u, num_signal_qual:%u"
+ ", on_cur_ch_ms:%d"
+ "\n"
+ , FUNC_ADPT_ARG(adapter)
+ , recvpriv->signal_strength
+ , recvpriv->rssi
+ , recvpriv->signal_qual
+ , num_signal_strength, num_signal_qual
+ , rtw_get_on_cur_ch_time(adapter) ? jiffies_to_msecs(jiffies - rtw_get_on_cur_ch_time(adapter)) : 0
+ );
+ #endif
+ }
+
+set_timer:
+ rtw_set_signal_stat_timer(recvpriv);
+
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
new file mode 100644
index 000000000000..b87ea4e388c0
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_RF_C_
+
+#include <drv_types.h>
+
+
+struct ch_freq {
+ u32 channel;
+ u32 frequency;
+};
+
+static struct ch_freq ch_freq_map[] = {
+ {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
+ {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
+ {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
+ /* UNII */
+ {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
+ {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
+ {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
+ {171, 5855}, {173, 5865},
+ /* HiperLAN2 */
+ {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
+ {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
+ {140, 5700},
+ /* Japan MMAC */
+ {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
+ /* Japan */
+ {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
+ {208, 5040},/* Japan, means J08 */
+ {212, 5060},/* Japan, means J12 */
+ {216, 5080},/* Japan, means J16 */
+};
+
+static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
+
+u32 rtw_ch2freq(u32 channel)
+{
+ u8 i;
+ u32 freq = 0;
+
+ for (i = 0; i < ch_freq_map_num; i++) {
+ if (channel == ch_freq_map[i].channel) {
+ freq = ch_freq_map[i].frequency;
+ break;
+ }
+ }
+ if (i == ch_freq_map_num)
+ freq = 2412;
+
+ return freq;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
new file mode 100644
index 000000000000..e832f16997b7
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -0,0 +1,2437 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_SECURITY_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+static const char *_security_type_str[] = {
+ "N/A",
+ "WEP40",
+ "TKIP",
+ "TKIP_WM",
+ "AES",
+ "WEP104",
+ "SMS4",
+ "WEP_WPA",
+ "BIP",
+};
+
+const char *security_type_str(u8 value)
+{
+ if (value <= _BIP_)
+ return _security_type_str[value];
+ return NULL;
+}
+
+#ifdef DBG_SW_SEC_CNT
+#define WEP_SW_ENC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->wep_sw_enc_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->wep_sw_enc_cnt_mc++; \
+ else \
+ sec->wep_sw_enc_cnt_uc++;
+
+#define WEP_SW_DEC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->wep_sw_dec_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->wep_sw_dec_cnt_mc++; \
+ else \
+ sec->wep_sw_dec_cnt_uc++;
+
+#define TKIP_SW_ENC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->tkip_sw_enc_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->tkip_sw_enc_cnt_mc++; \
+ else \
+ sec->tkip_sw_enc_cnt_uc++;
+
+#define TKIP_SW_DEC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->tkip_sw_dec_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->tkip_sw_dec_cnt_mc++; \
+ else \
+ sec->tkip_sw_dec_cnt_uc++;
+
+#define AES_SW_ENC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->aes_sw_enc_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->aes_sw_enc_cnt_mc++; \
+ else \
+ sec->aes_sw_enc_cnt_uc++;
+
+#define AES_SW_DEC_CNT_INC(sec, ra) \
+ if (is_broadcast_mac_addr(ra)) \
+ sec->aes_sw_dec_cnt_bc++; \
+ else if (is_multicast_mac_addr(ra)) \
+ sec->aes_sw_dec_cnt_mc++; \
+ else \
+ sec->aes_sw_dec_cnt_uc++;
+#else
+#define WEP_SW_ENC_CNT_INC(sec, ra)
+#define WEP_SW_DEC_CNT_INC(sec, ra)
+#define TKIP_SW_ENC_CNT_INC(sec, ra)
+#define TKIP_SW_DEC_CNT_INC(sec, ra)
+#define AES_SW_ENC_CNT_INC(sec, ra)
+#define AES_SW_DEC_CNT_INC(sec, ra)
+#endif /* DBG_SW_SEC_CNT */
+
+/* WEP related ===== */
+
+#define CRC32_POLY 0x04c11db7
+
+struct arc4context {
+ u32 x;
+ u32 y;
+ u8 state[256];
+};
+
+
+static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
+{
+ u32 t, u;
+ u32 keyindex;
+ u32 stateindex;
+ u8 *state;
+ u32 counter;
+
+ state = parc4ctx->state;
+ parc4ctx->x = 0;
+ parc4ctx->y = 0;
+ for (counter = 0; counter < 256; counter++)
+ state[counter] = (u8)counter;
+ keyindex = 0;
+ stateindex = 0;
+ for (counter = 0; counter < 256; counter++) {
+ t = state[counter];
+ stateindex = (stateindex + key[keyindex] + t) & 0xff;
+ u = state[stateindex];
+ state[stateindex] = (u8)t;
+ state[counter] = (u8)u;
+ if (++keyindex >= key_len)
+ keyindex = 0;
+ }
+}
+
+static u32 arcfour_byte(struct arc4context *parc4ctx)
+{
+ u32 x;
+ u32 y;
+ u32 sx, sy;
+ u8 *state;
+
+ state = parc4ctx->state;
+ x = (parc4ctx->x + 1) & 0xff;
+ sx = state[x];
+ y = (sx + parc4ctx->y) & 0xff;
+ sy = state[y];
+ parc4ctx->x = x;
+ parc4ctx->y = y;
+ state[y] = (u8)sx;
+ state[x] = (u8)sy;
+ return state[(sx + sy) & 0xff];
+}
+
+static void arcfour_encrypt(
+ struct arc4context *parc4ctx,
+ u8 *dest,
+ u8 *src,
+ u32 len
+)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
+}
+
+static sint bcrc32initialized = 0;
+static u32 crc32_table[256];
+
+
+static u8 crc32_reverseBit(u8 data)
+{
+ return((u8)((data<<7)&0x80) | ((data<<5)&0x40) | ((data<<3)&0x20) | ((data<<1)&0x10) | ((data>>1)&0x08) | ((data>>3)&0x04) | ((data>>5)&0x02) | ((data>>7)&0x01));
+}
+
+static void crc32_init(void)
+{
+ if (bcrc32initialized == 1)
+ return;
+ else {
+ sint i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
+ c = 0x12340000;
+
+ for (i = 0; i < 256; ++i) {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j) {
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ }
+ p1 = (u8 *)&crc32_table[i];
+
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
+ }
+ bcrc32initialized = 1;
+ }
+}
+
+static __le32 getcrc32(u8 *buf, sint len)
+{
+ u8 *p;
+ u32 crc;
+
+ if (bcrc32initialized == 0)
+ crc32_init();
+
+ crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
+
+ for (p = buf; len > 0; ++p, --len) {
+ crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
+ }
+ return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
+}
+
+
+/*
+ Need to consider the fragment situation
+*/
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+ unsigned char crc[4];
+ struct arc4context mycontext;
+
+ sint curfragnum, length;
+ u32 keylength;
+
+ u8 *pframe, *payload, *iv; /* wepkey */
+ u8 wepkey[16];
+ u8 hw_hdr_offset = 0;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* start to encrypt each fragment */
+ if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
+ keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* the last fragment */
+
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ *((__le32 *)crc) = getcrc32(payload, length);
+
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ } else{
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ *((__le32 *)crc) = getcrc32(payload, length);
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ }
+ }
+
+ WEP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
+ }
+}
+
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
+{
+ /* exclude ICV */
+ u8 crc[4];
+ struct arc4context mycontext;
+ sint length;
+ u32 keylength;
+ u8 *pframe, *payload, *iv, wepkey[16];
+ u8 keyindex;
+ struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* start to decrypt recvframe */
+ if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
+ iv = pframe+prxattrib->hdrlen;
+ /* keyindex =(iv[3]&0x3); */
+ keyindex = prxattrib->key_index;
+ keylength = psecuritypriv->dot11DefKeylen[keyindex];
+ memcpy(&wepkey[0], iv, 3);
+ /* memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength); */
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+
+ /* decrypt payload include icv */
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ /* calculate icv and compare the icv */
+ *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
+
+ if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
+ crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ }
+
+ WEP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
+ }
+ return;
+}
+
+/* 3 =====TKIP related ===== */
+
+static u32 secmicgetuint32(u8 *p)
+/* Convert from Byte[] to Us3232 in a portable way */
+{
+ s32 i;
+ u32 res = 0;
+
+ for (i = 0; i < 4; i++) {
+ res |= ((u32)(*p++)) << (8*i);
+ }
+
+ return res;
+}
+
+static void secmicputuint32(u8 *p, u32 val)
+/* Convert from Us3232 to Byte[] in a portable way */
+{
+ long i;
+
+ for (i = 0; i < 4; i++) {
+ *p++ = (u8) (val & 0xff);
+ val >>= 8;
+ }
+}
+
+static void secmicclear(struct mic_data *pmicdata)
+{
+/* Reset the state to the empty message. */
+ pmicdata->L = pmicdata->K0;
+ pmicdata->R = pmicdata->K1;
+ pmicdata->nBytesInM = 0;
+ pmicdata->M = 0;
+}
+
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
+{
+ /* Set the key */
+ pmicdata->K0 = secmicgetuint32(key);
+ pmicdata->K1 = secmicgetuint32(key + 4);
+ /* and reset the message */
+ secmicclear(pmicdata);
+}
+
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
+{
+ /* Append the byte to our word-sized buffer */
+ pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
+ pmicdata->nBytesInM++;
+ /* Process the word if it is full. */
+ if (pmicdata->nBytesInM >= 4) {
+ pmicdata->L ^= pmicdata->M;
+ pmicdata->R ^= ROL32(pmicdata->L, 17);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROL32(pmicdata->L, 3);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROR32(pmicdata->L, 2);
+ pmicdata->L += pmicdata->R;
+ /* Clear the buffer */
+ pmicdata->M = 0;
+ pmicdata->nBytesInM = 0;
+ }
+}
+
+void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
+{
+ /* This is simple */
+ while (nbytes > 0) {
+ rtw_secmicappendbyte(pmicdata, *src++);
+ nbytes--;
+ }
+}
+
+void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
+{
+ /* Append the minimum padding */
+ rtw_secmicappendbyte(pmicdata, 0x5a);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ /* and then zeroes until the length is a multiple of 4 */
+ while (pmicdata->nBytesInM != 0) {
+ rtw_secmicappendbyte(pmicdata, 0);
+ }
+ /* The appendByte function has already computed the result. */
+ secmicputuint32(dst, pmicdata->L);
+ secmicputuint32(dst+4, pmicdata->R);
+ /* Reset to the empty message. */
+ secmicclear(pmicdata);
+}
+
+
+void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+{
+
+ struct mic_data micdata;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+
+ rtw_secmicsetkey(&micdata, key);
+ priority[0] = pri;
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ if (header[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[24], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &header[4], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+
+ }
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+
+ rtw_secmicappend(&micdata, data, data_len);
+
+ rtw_secgetmic(&micdata, mic_code);
+}
+
+/* macros for extraction/creation of unsigned char/unsigned short values */
+#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
+#define Lo8(v16) ((u8)((v16) & 0x00FF))
+#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
+#define Lo16(v32) ((u16)((v32) & 0xFFFF))
+#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
+#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
+
+/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
+#define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
+
+/* S-box lookup: 16 bits --> 16 bits */
+#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
+
+/* fixed algorithm "parameters" */
+#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
+#define TA_SIZE 6 /* 48-bit transmitter address */
+#define TK_SIZE 16 /* 128-bit temporal key */
+#define P1K_SIZE 10 /* 80-bit Phase1 key */
+#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
+
+
+/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
+static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM) */
+{
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+ },
+
+
+ { /* second half of table is unsigned char-reversed version of first! */
+ 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
+ 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
+ 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
+ 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
+ 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
+ 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
+ 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
+ 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
+ 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
+ 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
+ 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
+ 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
+ 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
+ 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
+ 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
+ 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
+ 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
+ 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
+ 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
+ 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
+ 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
+ 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
+ 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
+ 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
+ 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
+ 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
+ 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
+ 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
+ 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
+ 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
+ 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
+ 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
+ }
+};
+
+ /*
+**********************************************************************
+* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+*
+* Inputs:
+* tk[] = temporal key [128 bits]
+* ta[] = transmitter's MAC address [ 48 bits]
+* iv32 = upper 32 bits of IV [ 32 bits]
+* Output:
+* p1k[] = Phase 1 key [ 80 bits]
+*
+* Note:
+* This function only needs to be called every 2**16 packets,
+* although in theory it could be called every packet.
+*
+**********************************************************************
+*/
+static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
+{
+ sint i;
+
+ /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
+ p1k[0] = Lo16(iv32);
+ p1k[1] = Hi16(iv32);
+ p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
+ p1k[3] = Mk16(ta[3], ta[2]);
+ p1k[4] = Mk16(ta[5], ta[4]);
+
+ /* Now compute an unbalanced Feistel cipher with 80-bit block */
+ /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
+ for (i = 0; i < PHASE1_LOOP_CNT; i++) {
+ /* Each add operation here is mod 2**16 */
+ p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
+ p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
+ p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
+ p1k[3] += _S_(p1k[2] ^ TK16((i&1)+6));
+ p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
+ p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
+ }
+}
+
+
+/*
+**********************************************************************
+* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+*
+* Inputs:
+* tk[] = Temporal key [128 bits]
+* p1k[] = Phase 1 output key [ 80 bits]
+* iv16 = low 16 bits of IV counter [ 16 bits]
+* Output:
+* rc4key[] = the key used to encrypt the packet [128 bits]
+*
+* Note:
+* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
+* across all packets using the same key TK value. Then, for a
+* given value of TK[], this TKIP48 construction guarantees that
+* the final RC4KEY value is unique across all packets.
+*
+* Suggested implementation optimization: if PPK[] is "overlaid"
+* appropriately on RC4KEY[], there is no need for the final
+* for loop below that copies the PPK[] result into RC4KEY[].
+*
+**********************************************************************
+*/
+static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
+{
+ sint i;
+ u16 PPK[6]; /* temporary key for mixing */
+
+ /* Note: all adds in the PPK[] equations below are mod 2**16 */
+ for (i = 0; i < 5; i++)
+ PPK[i] = p1k[i]; /* first, copy P1K to PPK */
+
+ PPK[5] = p1k[4]+iv16; /* next, add in IV16 */
+
+ /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
+ PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
+ PPK[1] += _S_(PPK[0] ^ TK16(1));
+ PPK[2] += _S_(PPK[1] ^ TK16(2));
+ PPK[3] += _S_(PPK[2] ^ TK16(3));
+ PPK[4] += _S_(PPK[3] ^ TK16(4));
+ PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
+
+ /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
+ PPK[0] += RotR1(PPK[5] ^ TK16(6));
+ PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+ /* Note: At this point, for a given key TK[0..15], the 96-bit output */
+ /* value PPK[0..5] is guaranteed to be unique, as a function */
+ /* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
+ /* is now a keyed permutation of {TA, IV32, IV16}. */
+
+ /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
+ rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
+ rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
+ rc4key[2] = Lo8(iv16);
+ rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
+
+
+ /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
+ for (i = 0; i < 6; i++) {
+ rc4key[4+2*i] = Lo8(PPK[i]);
+ rc4key[5+2*i] = Hi8(PPK[i]);
+ }
+}
+
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ u8 hw_hdr_offset = 0;
+ struct arc4context mycontext;
+ sint curfragnum, length;
+ u32 prwskeylen;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ /* struct sta_info *stainfo; */
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt == _TKIP_) {
+
+/*
+ if (pattrib->psta)
+ {
+ stainfo = pattrib->psta;
+ }
+ else
+ {
+ DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
+ stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
+ }
+*/
+ /* if (stainfo!= NULL) */
+ {
+/*
+ if (!(stainfo->state &_FW_LINKED))
+ {
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
+ return _FAIL;
+ }
+*/
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
+ prwskey = pattrib->dot118021x_UncstKey.skey;
+
+ prwskeylen = 16;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
+
+ phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_info_, ("pattrib->iv_len =%x, pattrib->icv_len =%x\n", pattrib->iv_len, pattrib->icv_len));
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ }
+ }
+
+ TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
+ }
+/*
+ else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo == NULL!!!\n"));
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ res = _FAIL;
+ }
+*/
+
+ }
+ return res;
+}
+
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ sint length;
+ u32 prwskeylen;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+/* struct recv_priv *precvpriv =&padapter->recvpriv; */
+ u32 res = _SUCCESS;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* 4 start to decrypt recvframe */
+ if (prxattrib->encrypt == _TKIP_) {
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ static unsigned long start = 0;
+ static u32 no_gkey_bc_cnt = 0;
+ static u32 no_gkey_mc_cnt = 0;
+
+ if (psecuritypriv->binstallGrpkey == false) {
+ res = _FAIL;
+
+ if (start == 0)
+ start = jiffies;
+
+ if (is_broadcast_mac_addr(prxattrib->ra))
+ no_gkey_bc_cnt++;
+ else
+ no_gkey_mc_cnt++;
+
+ if (jiffies_to_msecs(jiffies - start) > 1000) {
+ if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
+ FUNC_ADPT_ARG(padapter), no_gkey_bc_cnt, no_gkey_mc_cnt);
+ }
+ start = jiffies;
+ no_gkey_bc_cnt = 0;
+ no_gkey_mc_cnt = 0;
+ }
+ goto exit;
+ }
+
+ if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" gkey installed. no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
+ FUNC_ADPT_ARG(padapter), no_gkey_bc_cnt, no_gkey_mc_cnt);
+ }
+ start = 0;
+ no_gkey_bc_cnt = 0;
+ no_gkey_mc_cnt = 0;
+
+ /* DBG_871X("rx bc/mc packets, to perform sw rtw_tkip_decrypt\n"); */
+ /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ prwskeylen = 16;
+ } else{
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ prwskeylen = 16;
+ }
+
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
+
+ /* 4 decrypt payload include icv */
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
+
+ if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
+ crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ res = _FAIL;
+ }
+
+ TKIP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo == NULL!!!\n"));
+ res = _FAIL;
+ }
+
+ }
+exit:
+ return res;
+
+}
+
+
+/* 3 =====AES related ===== */
+
+
+
+#define MAX_MSG_SIZE 2048
+/*****************************/
+/******** SBOX Table *********/
+/*****************************/
+
+ static u8 sbox_table[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+ };
+
+/*****************************/
+/**** Function Prototypes ****/
+/*****************************/
+
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
+static void construct_mic_iv(
+ u8 *mic_header1,
+ sint qc_exists,
+ sint a4_exists,
+ u8 *mpdu,
+ uint payload_length,
+ u8 *pn_vector,
+ uint frtype
+);/* add for CONFIG_IEEE80211W, none 11w also can use */
+static void construct_mic_header1(
+ u8 *mic_header1,
+ sint header_length,
+ u8 *mpdu,
+ uint frtype
+);/* add for CONFIG_IEEE80211W, none 11w also can use */
+static void construct_mic_header2(
+ u8 *mic_header2,
+ u8 *mpdu,
+ sint a4_exists,
+ sint qc_exists
+);
+static void construct_ctr_preload(
+ u8 *ctr_preload,
+ sint a4_exists,
+ sint qc_exists,
+ u8 *mpdu,
+ u8 *pn_vector,
+ sint c,
+ uint frtype
+);/* add for CONFIG_IEEE80211W, none 11w also can use */
+static void xor_128(u8 *a, u8 *b, u8 *out);
+static void xor_32(u8 *a, u8 *b, u8 *out);
+static u8 sbox(u8 a);
+static void next_key(u8 *key, sint round);
+static void byte_sub(u8 *in, u8 *out);
+static void shift_row(u8 *in, u8 *out);
+static void mix_column(u8 *in, u8 *out);
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
+
+
+/****************************************/
+/* aes128k128d() */
+/* Performs a 128 bit AES encrypt with */
+/* 128 bit data. */
+/****************************************/
+static void xor_128(u8 *a, u8 *b, u8 *out)
+{
+ sint i;
+
+ for (i = 0; i < 16; i++) {
+ out[i] = a[i] ^ b[i];
+ }
+}
+
+
+static void xor_32(u8 *a, u8 *b, u8 *out)
+{
+ sint i;
+
+ for (i = 0; i < 4; i++) {
+ out[i] = a[i] ^ b[i];
+ }
+}
+
+
+static u8 sbox(u8 a)
+{
+ return sbox_table[(sint)a];
+}
+
+
+static void next_key(u8 *key, sint round)
+{
+ u8 rcon;
+ u8 sbox_key[4];
+ u8 rcon_table[12] = {
+ 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80,
+ 0x1b, 0x36, 0x36, 0x36
+ };
+ sbox_key[0] = sbox(key[13]);
+ sbox_key[1] = sbox(key[14]);
+ sbox_key[2] = sbox(key[15]);
+ sbox_key[3] = sbox(key[12]);
+
+ rcon = rcon_table[round];
+
+ xor_32(&key[0], sbox_key, &key[0]);
+ key[0] = key[0] ^ rcon;
+
+ xor_32(&key[4], &key[0], &key[4]);
+ xor_32(&key[8], &key[4], &key[8]);
+ xor_32(&key[12], &key[8], &key[12]);
+}
+
+
+static void byte_sub(u8 *in, u8 *out)
+{
+ sint i;
+
+ for (i = 0; i < 16; i++) {
+ out[i] = sbox(in[i]);
+ }
+}
+
+
+static void shift_row(u8 *in, u8 *out)
+{
+ out[0] = in[0];
+ out[1] = in[5];
+ out[2] = in[10];
+ out[3] = in[15];
+ out[4] = in[4];
+ out[5] = in[9];
+ out[6] = in[14];
+ out[7] = in[3];
+ out[8] = in[8];
+ out[9] = in[13];
+ out[10] = in[2];
+ out[11] = in[7];
+ out[12] = in[12];
+ out[13] = in[1];
+ out[14] = in[6];
+ out[15] = in[11];
+}
+
+
+static void mix_column(u8 *in, u8 *out)
+{
+ sint i;
+ u8 add1b[4];
+ u8 add1bf7[4];
+ u8 rotl[4];
+ u8 swap_halfs[4];
+ u8 andf7[4];
+ u8 rotr[4];
+ u8 temp[4];
+ u8 tempb[4];
+
+ for (i = 0; i < 4; i++) {
+ if ((in[i] & 0x80) == 0x80)
+ add1b[i] = 0x1b;
+ else
+ add1b[i] = 0x00;
+ }
+
+ swap_halfs[0] = in[2]; /* Swap halfs */
+ swap_halfs[1] = in[3];
+ swap_halfs[2] = in[0];
+ swap_halfs[3] = in[1];
+
+ rotl[0] = in[3]; /* Rotate left 8 bits */
+ rotl[1] = in[0];
+ rotl[2] = in[1];
+ rotl[3] = in[2];
+
+ andf7[0] = in[0] & 0x7f;
+ andf7[1] = in[1] & 0x7f;
+ andf7[2] = in[2] & 0x7f;
+ andf7[3] = in[3] & 0x7f;
+
+ for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
+ andf7[i] = andf7[i] << 1;
+ if ((andf7[i-1] & 0x80) == 0x80)
+ andf7[i] = (andf7[i] | 0x01);
+ }
+ andf7[0] = andf7[0] << 1;
+ andf7[0] = andf7[0] & 0xfe;
+
+ xor_32(add1b, andf7, add1bf7);
+
+ xor_32(in, add1bf7, rotr);
+
+ temp[0] = rotr[0]; /* Rotate right 8 bits */
+ rotr[0] = rotr[1];
+ rotr[1] = rotr[2];
+ rotr[2] = rotr[3];
+ rotr[3] = temp[0];
+
+ xor_32(add1bf7, rotr, temp);
+ xor_32(swap_halfs, rotl, tempb);
+ xor_32(temp, tempb, out);
+}
+
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
+{
+ sint round;
+ sint i;
+ u8 intermediatea[16];
+ u8 intermediateb[16];
+ u8 round_key[16];
+
+ for (i = 0; i < 16; i++)
+ round_key[i] = key[i];
+
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
+ xor_128(round_key, data, ciphertext);
+ next_key(round_key, round);
+ } else if (round == 10) {
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ xor_128(intermediateb, round_key, ciphertext);
+ } else{ /* 1 - 9 */
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ mix_column(&intermediateb[0], &intermediatea[0]);
+ mix_column(&intermediateb[4], &intermediatea[4]);
+ mix_column(&intermediateb[8], &intermediatea[8]);
+ mix_column(&intermediateb[12], &intermediatea[12]);
+ xor_128(intermediatea, round_key, ciphertext);
+ next_key(round_key, round);
+ }
+ }
+}
+
+
+/************************************************/
+/* construct_mic_iv() */
+/* Builds the MIC IV from header fields and PN */
+/* Baron think the function is construct CCM */
+/* nonce */
+/************************************************/
+static void construct_mic_iv(
+ u8 *mic_iv,
+ sint qc_exists,
+ sint a4_exists,
+ u8 *mpdu,
+ uint payload_length,
+ u8 *pn_vector,
+ uint frtype/* add for CONFIG_IEEE80211W, none 11w also can use */
+)
+{
+ sint i;
+
+ mic_iv[0] = 0x59;
+
+ if (qc_exists && a4_exists)
+ mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
+
+ if (qc_exists && !a4_exists)
+ mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
+
+ if (!qc_exists)
+ mic_iv[1] = 0x00;
+
+ /* 802.11w management frame should set management bit(4) */
+ if (frtype == WIFI_MGT_TYPE)
+ mic_iv[1] |= BIT(4);
+
+ for (i = 2; i < 8; i++)
+ mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
+ #ifdef CONSISTENT_PN_ORDER
+ for (i = 8; i < 14; i++)
+ mic_iv[i] = pn_vector[i - 8]; /* mic_iv[8:13] = PN[0:5] */
+ #else
+ for (i = 8; i < 14; i++)
+ mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
+ #endif
+ mic_iv[14] = (unsigned char) (payload_length / 256);
+ mic_iv[15] = (unsigned char) (payload_length % 256);
+}
+
+
+/************************************************/
+/* construct_mic_header1() */
+/* Builds the first MIC header block from */
+/* header fields. */
+/* Build AAD SC, A1, A2 */
+/************************************************/
+static void construct_mic_header1(
+ u8 *mic_header1,
+ sint header_length,
+ u8 *mpdu,
+ uint frtype/* add for CONFIG_IEEE80211W, none 11w also can use */
+)
+{
+ mic_header1[0] = (u8)((header_length - 2) / 256);
+ mic_header1[1] = (u8)((header_length - 2) % 256);
+
+ /* 802.11w management frame don't AND subtype bits 4, 5, 6 of frame control field */
+ if (frtype == WIFI_MGT_TYPE)
+ mic_header1[2] = mpdu[0];
+ else
+ mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
+
+ mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
+ mic_header1[4] = mpdu[4]; /* A1 */
+ mic_header1[5] = mpdu[5];
+ mic_header1[6] = mpdu[6];
+ mic_header1[7] = mpdu[7];
+ mic_header1[8] = mpdu[8];
+ mic_header1[9] = mpdu[9];
+ mic_header1[10] = mpdu[10]; /* A2 */
+ mic_header1[11] = mpdu[11];
+ mic_header1[12] = mpdu[12];
+ mic_header1[13] = mpdu[13];
+ mic_header1[14] = mpdu[14];
+ mic_header1[15] = mpdu[15];
+}
+
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header2(
+ u8 *mic_header2,
+ u8 *mpdu,
+ sint a4_exists,
+ sint qc_exists
+)
+{
+ sint i;
+
+ for (i = 0; i < 16; i++)
+ mic_header2[i] = 0x00;
+
+ mic_header2[0] = mpdu[16]; /* A3 */
+ mic_header2[1] = mpdu[17];
+ mic_header2[2] = mpdu[18];
+ mic_header2[3] = mpdu[19];
+ mic_header2[4] = mpdu[20];
+ mic_header2[5] = mpdu[21];
+
+ mic_header2[6] = 0x00;
+ mic_header2[7] = 0x00; /* mpdu[23]; */
+
+
+ if (!qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ }
+
+ if (qc_exists && !a4_exists) {
+ mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
+ mic_header2[9] = mpdu[25] & 0x00;
+ }
+
+ if (qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ mic_header2[14] = mpdu[30] & 0x0f;
+ mic_header2[15] = mpdu[31] & 0x00;
+ }
+
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/* Baron think the function is construct CCM */
+/* nonce */
+/************************************************/
+static void construct_ctr_preload(
+ u8 *ctr_preload,
+ sint a4_exists,
+ sint qc_exists,
+ u8 *mpdu,
+ u8 *pn_vector,
+ sint c,
+ uint frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+)
+{
+ sint i = 0;
+
+ for (i = 0; i < 16; i++)
+ ctr_preload[i] = 0x00;
+ i = 0;
+
+ ctr_preload[0] = 0x01; /* flag */
+ if (qc_exists && a4_exists)
+ ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
+ if (qc_exists && !a4_exists)
+ ctr_preload[1] = mpdu[24] & 0x0f;
+
+ /* 802.11w management frame should set management bit(4) */
+ if (frtype == WIFI_MGT_TYPE)
+ ctr_preload[1] |= BIT(4);
+
+ for (i = 2; i < 8; i++)
+ ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
+#ifdef CONSISTENT_PN_ORDER
+ for (i = 8; i < 14; i++)
+ ctr_preload[i] = pn_vector[i - 8]; /* ctr_preload[8:13] = PN[0:5] */
+#else
+ for (i = 8; i < 14; i++)
+ ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
+#endif
+ ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char) (c % 256);
+}
+
+
+/************************************/
+/* bitwise_xor() */
+/* A 128 bit, bitwise exclusive or */
+/************************************/
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
+{
+ sint i;
+
+ for (i = 0; i < 16; i++) {
+ out[i] = ina[i] ^ inb[i];
+ }
+}
+
+
+static sint aes_cipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
+{
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+
+ frsubtype = frsubtype>>4;
+
+
+ memset((void *)mic_iv, 0, 16);
+ memset((void *)mic_header1, 0, 16);
+ memset((void *)mic_header2, 0, 16);
+ memset((void *)ctr_preload, 0, 16);
+ memset((void *)chain_buffer, 0, 16);
+ memset((void *)aes_out, 0, 16);
+ memset((void *)padded_buffer, 0, 16);
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if (((frtype|frsubtype) == WIFI_DATA_CFACK) ||
+ ((frtype|frsubtype) == WIFI_DATA_CFPOLL) ||
+ ((frtype|frsubtype) == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+
+ } else if ((frtype == WIFI_DATA) && /* add for CONFIG_IEEE80211W, none 11w also can use */
+ ((frsubtype == 0x08) ||
+ (frsubtype == 0x09) ||
+ (frsubtype == 0x0a) ||
+ (frsubtype == 0x0b))) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+
+ qc_exists = 1;
+ } else
+ qc_exists = 0;
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ construct_mic_iv(
+ mic_iv,
+ qc_exists,
+ a4_exists,
+ pframe, /* message, */
+ plen,
+ pn_vector,
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+
+ construct_mic_header1(
+ mic_header1,
+ hdrlen,
+ pframe, /* message */
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+ construct_mic_header2(
+ mic_header2,
+ pframe, /* message, */
+ a4_exists,
+ qc_exists
+ );
+
+
+ payload_remainder = plen % 16;
+ num_blocks = plen / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++) {
+ padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ }
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ }
+
+ for (j = 0 ; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ pframe, /* message, */
+ pn_vector,
+ i+1,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<16;j++) message[payload_index++] = chain_buffer[j]; */
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ pframe, /* message, */
+ pn_vector,
+ num_blocks+1,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];/* padded_buffer[j] = message[payload_index+j]; */
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<payload_remainder;j++) message[payload_index++] = chain_buffer[j]; */
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ pframe, /* message, */
+ pn_vector,
+ 0,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];/* padded_buffer[j] = message[j+hdrlen+8+plen]; */
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<8;j++) message[payload_index++] = chain_buffer[j]; */
+
+ return _SUCCESS;
+}
+
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+
+ /*static*/
+/* unsigned char message[MAX_MSG_SIZE]; */
+
+ /* Intermediate Buffers */
+ sint curfragnum, length;
+ u32 prwskeylen;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ u8 hw_hdr_offset = 0;
+ /* struct sta_info *stainfo = NULL; */
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+/* uint offset = 0; */
+ u32 res = _SUCCESS;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* 4 start to encrypt each fragment */
+ if ((pattrib->encrypt == _AES_)) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
+ prwskey = pattrib->dot118021x_UncstKey.skey;
+
+ prwskeylen = 16;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((SIZE_PTR)(pframe));
+ }
+ }
+
+ AES_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
+ }
+ return res;
+}
+
+static sint aes_decipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
+{
+ static u8 message[MAX_MSG_SIZE];
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+ sint res = _SUCCESS;
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+
+
+/* uint offset = 0; */
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+
+ frsubtype = frsubtype>>4;
+
+
+ memset((void *)mic_iv, 0, 16);
+ memset((void *)mic_header1, 0, 16);
+ memset((void *)mic_header2, 0, 16);
+ memset((void *)ctr_preload, 0, 16);
+ memset((void *)chain_buffer, 0, 16);
+ memset((void *)aes_out, 0, 16);
+ memset((void *)padded_buffer, 0, 16);
+
+ /* start to decrypt the payload */
+
+ num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */
+
+ payload_remainder = (plen-8) % 16;
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if (((frtype|frsubtype) == WIFI_DATA_CFACK) ||
+ ((frtype|frsubtype) == WIFI_DATA_CFPOLL) ||
+ ((frtype|frsubtype) == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN) {
+ hdrlen += 2;
+ }
+ } else if ((frtype == WIFI_DATA) && /* only for data packet . add for CONFIG_IEEE80211W, none 11w also can use */
+ ((frsubtype == 0x08) ||
+ (frsubtype == 0x09) ||
+ (frsubtype == 0x0a) ||
+ (frsubtype == 0x0b))) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN) {
+ hdrlen += 2;
+ }
+ qc_exists = 1;
+ } else
+ qc_exists = 0;
+
+
+ /* now, decrypt pframe with hdrlen offset and plen long */
+
+ payload_index = hdrlen + 8; /* 8 is for extiv */
+
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ pframe,
+ pn_vector,
+ i+1,
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ pframe,
+ pn_vector,
+ num_blocks+1,
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++) {
+ padded_buffer[j] = pframe[payload_index+j];
+ }
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ /* start to calculate the mic */
+ if ((hdrlen + plen+8) <= MAX_MSG_SIZE)
+ memcpy((void *)message, pframe, (hdrlen + plen+8)); /* 8 is for ext iv len */
+
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+
+
+ construct_mic_iv(
+ mic_iv,
+ qc_exists,
+ a4_exists,
+ message,
+ plen-8,
+ pn_vector,
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+
+ construct_mic_header1(
+ mic_header1,
+ hdrlen,
+ message,
+ frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
+ );
+ construct_mic_header2(
+ mic_header2,
+ message,
+ a4_exists,
+ qc_exists
+ );
+
+
+ payload_remainder = (plen-8) % 16;
+ num_blocks = (plen-8) / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++) {
+ padded_buffer[j] = message[payload_index++];
+ }
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ }
+
+ for (j = 0; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ message[payload_index+j] = mic[j];
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ message,
+ pn_vector,
+ i+1,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) {
+ /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ message,
+ pn_vector,
+ num_blocks+1,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++) {
+ padded_buffer[j] = message[payload_index+j];
+ }
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(
+ ctr_preload,
+ a4_exists,
+ qc_exists,
+ message,
+ pn_vector,
+ 0,
+ frtype
+ ); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++) {
+ padded_buffer[j] = message[j+hdrlen+8+plen-8];
+ }
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ message[payload_index++] = chain_buffer[j];
+
+ /* compare the mic */
+ for (i = 0; i < 8; i++) {
+ if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
+ DBG_871X("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ res = _FAIL;
+ }
+ }
+ return res;
+}
+
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+
+
+ /*static*/
+/* unsigned char message[MAX_MSG_SIZE]; */
+
+
+ /* Intermediate Buffers */
+
+
+ sint length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+/* struct recv_priv *precvpriv =&padapter->recvpriv; */
+ u32 res = _SUCCESS;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ /* 4 start to encrypt each fragment */
+ if ((prxattrib->encrypt == _AES_)) {
+
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(prxattrib->ra)) {
+ static unsigned long start = 0;
+ static u32 no_gkey_bc_cnt = 0;
+ static u32 no_gkey_mc_cnt = 0;
+
+ /* DBG_871X("rx bc/mc packets, to perform sw rtw_aes_decrypt\n"); */
+ /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
+ if (psecuritypriv->binstallGrpkey == false) {
+ res = _FAIL;
+
+ if (start == 0)
+ start = jiffies;
+
+ if (is_broadcast_mac_addr(prxattrib->ra))
+ no_gkey_bc_cnt++;
+ else
+ no_gkey_mc_cnt++;
+
+ if (jiffies_to_msecs(jiffies - start) > 1000) {
+ if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
+ FUNC_ADPT_ARG(padapter), no_gkey_bc_cnt, no_gkey_mc_cnt);
+ }
+ start = jiffies;
+ no_gkey_bc_cnt = 0;
+ no_gkey_mc_cnt = 0;
+ }
+
+ goto exit;
+ }
+
+ if (no_gkey_bc_cnt || no_gkey_mc_cnt) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" gkey installed. no_gkey_bc_cnt:%u, no_gkey_mc_cnt:%u\n",
+ FUNC_ADPT_ARG(padapter), no_gkey_bc_cnt, no_gkey_mc_cnt);
+ }
+ start = 0;
+ no_gkey_bc_cnt = 0;
+ no_gkey_mc_cnt = 0;
+
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
+ DBG_871X("not match packet_index =%d, install_index =%d\n"
+ , prxattrib->key_index, psecuritypriv->dot118021XGrpKeyid);
+ res = _FAIL;
+ goto exit;
+ }
+ } else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+
+
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+
+ AES_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo == NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+exit:
+ return res;
+}
+
+u32 rtw_BIP_verify(struct adapter *padapter, u8 *precvframe)
+{
+ struct rx_pkt_attrib *pattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ u8 *pframe;
+ u8 *BIP_AAD, *p;
+ u32 res = _FAIL;
+ uint len, ori_len;
+ struct ieee80211_hdr *pwlanhdr;
+ u8 mic[16];
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ __le16 le_tmp;
+ __le64 le_tmp64;
+
+ ori_len = pattrib->pkt_len-WLAN_HDR_A3_LEN+BIP_AAD_SIZE;
+ BIP_AAD = rtw_zmalloc(ori_len);
+
+ if (BIP_AAD == NULL) {
+ DBG_871X("BIP AAD allocate fail\n");
+ return _FAIL;
+ }
+ /* PKT start */
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ /* mapping to wlan header */
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ /* save the frame body + MME */
+ memcpy(BIP_AAD+BIP_AAD_SIZE, pframe+WLAN_HDR_A3_LEN, pattrib->pkt_len-WLAN_HDR_A3_LEN);
+ /* find MME IE pointer */
+ p = rtw_get_ie(BIP_AAD+BIP_AAD_SIZE, _MME_IE_, &len, pattrib->pkt_len-WLAN_HDR_A3_LEN);
+ /* Baron */
+ if (p) {
+ u16 keyid = 0;
+ u64 temp_ipn = 0;
+ /* save packet number */
+ memcpy(&le_tmp64, p+4, 6);
+ temp_ipn = le64_to_cpu(le_tmp64);
+ /* BIP packet number should bigger than previous BIP packet */
+ if (temp_ipn <= pmlmeext->mgnt_80211w_IPN_rx) {
+ DBG_871X("replay BIP packet\n");
+ goto BIP_exit;
+ }
+ /* copy key index */
+ memcpy(&le_tmp, p+2, 2);
+ keyid = le16_to_cpu(le_tmp);
+ if (keyid != padapter->securitypriv.dot11wBIPKeyid) {
+ DBG_871X("BIP key index error!\n");
+ goto BIP_exit;
+ }
+ /* clear the MIC field of MME to zero */
+ memset(p+2+len-8, 0, 8);
+
+ /* conscruct AAD, copy frame control field */
+ memcpy(BIP_AAD, &pwlanhdr->frame_control, 2);
+ ClearRetry(BIP_AAD);
+ ClearPwrMgt(BIP_AAD);
+ ClearMData(BIP_AAD);
+ /* conscruct AAD, copy address 1 to address 3 */
+ memcpy(BIP_AAD+2, pwlanhdr->addr1, 18);
+
+ if (omac1_aes_128(padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey
+ , BIP_AAD, ori_len, mic))
+ goto BIP_exit;
+
+ /* MIC field should be last 8 bytes of packet (packet without FCS) */
+ if (!memcmp(mic, pframe+pattrib->pkt_len-8, 8)) {
+ pmlmeext->mgnt_80211w_IPN_rx = temp_ipn;
+ res = _SUCCESS;
+ } else
+ DBG_871X("BIP MIC error!\n");
+
+ } else
+ res = RTW_RX_HANDLED;
+BIP_exit:
+
+ kfree(BIP_AAD);
+ return res;
+}
+
+/* AES tables*/
+const u32 Te0[256] = {
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+const u32 Td0[256] = {
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+const u8 Td4s[256] = {
+ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+const u8 rcons[] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
+ /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * @return the number of rounds for the given cipher key size.
+ */
+static void rijndaelKeySetupEnc(u32 rk[/*44*/], const u8 cipherKey[])
+{
+ int i;
+ u32 temp;
+
+ rk[0] = GETU32(cipherKey);
+ rk[1] = GETU32(cipherKey + 4);
+ rk[2] = GETU32(cipherKey + 8);
+ rk[3] = GETU32(cipherKey + 12);
+ for (i = 0; i < 10; i++) {
+ temp = rk[3];
+ rk[4] = rk[0] ^
+ TE421(temp) ^ TE432(temp) ^ TE443(temp) ^ TE414(temp) ^
+ RCON(i);
+ rk[5] = rk[1] ^ rk[4];
+ rk[6] = rk[2] ^ rk[5];
+ rk[7] = rk[3] ^ rk[6];
+ rk += 4;
+ }
+}
+
+static void rijndaelEncrypt(u32 rk[/*44*/], u8 pt[16], u8 ct[16])
+{
+ u32 s0, s1, s2, s3, t0, t1, t2, t3;
+ int Nr = 10;
+ int r;
+
+ /*
+ * map byte array block to cipher state
+ * and add initial round key:
+ */
+ s0 = GETU32(pt) ^ rk[0];
+ s1 = GETU32(pt + 4) ^ rk[1];
+ s2 = GETU32(pt + 8) ^ rk[2];
+ s3 = GETU32(pt + 12) ^ rk[3];
+
+#define ROUND(i, d, s) \
+d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
+d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
+d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
+d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]
+
+ /* Nr - 1 full rounds: */
+ r = Nr >> 1;
+ for (;;) {
+ ROUND(1, t, s);
+ rk += 8;
+ if (--r == 0)
+ break;
+ ROUND(0, s, t);
+ }
+
+#undef ROUND
+
+ /*
+ * apply last round and
+ * map cipher state to byte array block:
+ */
+ s0 = TE41(t0) ^ TE42(t1) ^ TE43(t2) ^ TE44(t3) ^ rk[0];
+ PUTU32(ct, s0);
+ s1 = TE41(t1) ^ TE42(t2) ^ TE43(t3) ^ TE44(t0) ^ rk[1];
+ PUTU32(ct + 4, s1);
+ s2 = TE41(t2) ^ TE42(t3) ^ TE43(t0) ^ TE44(t1) ^ rk[2];
+ PUTU32(ct + 8, s2);
+ s3 = TE41(t3) ^ TE42(t0) ^ TE43(t1) ^ TE44(t2) ^ rk[3];
+ PUTU32(ct + 12, s3);
+}
+
+static void *aes_encrypt_init(u8 *key, size_t len)
+{
+ u32 *rk;
+ if (len != 16)
+ return NULL;
+ rk = (u32 *)rtw_malloc(AES_PRIV_SIZE);
+ if (rk == NULL)
+ return NULL;
+ rijndaelKeySetupEnc(rk, key);
+ return rk;
+}
+
+static void aes_128_encrypt(void *ctx, u8 *plain, u8 *crypt)
+{
+ rijndaelEncrypt(ctx, plain, crypt);
+}
+
+
+static void gf_mulx(u8 *pad)
+{
+ int i, carry;
+
+ carry = pad[0] & 0x80;
+ for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
+ pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
+
+ pad[AES_BLOCK_SIZE - 1] <<= 1;
+ if (carry)
+ pad[AES_BLOCK_SIZE - 1] ^= 0x87;
+}
+
+static void aes_encrypt_deinit(void *ctx)
+{
+ memset(ctx, 0, AES_PRIV_SIZE);
+ kfree(ctx);
+}
+
+
+/**
+ * omac1_aes_128_vector - One-Key CBC MAC (OMAC1) hash with AES-128
+ * @key: 128-bit key for the hash operation
+ * @num_elem: Number of elements in the data vector
+ * @addr: Pointers to the data areas
+ * @len: Lengths of the data blocks
+ * @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
+ * Returns: 0 on success, -1 on failure
+ *
+ * This is a mode for using block cipher (AES in this case) for authentication.
+ * OMAC1 was standardized with the name CMAC by NIST in a Special Publication
+ * (SP) 800-38B.
+ */
+static int omac1_aes_128_vector(u8 *key, size_t num_elem,
+ u8 *addr[], size_t *len, u8 *mac)
+{
+ void *ctx;
+ u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
+ u8 *pos, *end;
+ size_t i, e, left, total_len;
+
+ ctx = aes_encrypt_init(key, 16);
+ if (ctx == NULL)
+ return -1;
+ memset(cbc, 0, AES_BLOCK_SIZE);
+
+ total_len = 0;
+ for (e = 0; e < num_elem; e++)
+ total_len += len[e];
+ left = total_len;
+
+ e = 0;
+ pos = addr[0];
+ end = pos + len[0];
+
+ while (left >= AES_BLOCK_SIZE) {
+ for (i = 0; i < AES_BLOCK_SIZE; i++) {
+ cbc[i] ^= *pos++;
+ if (pos >= end) {
+ e++;
+ pos = addr[e];
+ end = pos + len[e];
+ }
+ }
+ if (left > AES_BLOCK_SIZE)
+ aes_128_encrypt(ctx, cbc, cbc);
+ left -= AES_BLOCK_SIZE;
+ }
+
+ memset(pad, 0, AES_BLOCK_SIZE);
+ aes_128_encrypt(ctx, pad, pad);
+ gf_mulx(pad);
+
+ if (left || total_len == 0) {
+ for (i = 0; i < left; i++) {
+ cbc[i] ^= *pos++;
+ if (pos >= end) {
+ e++;
+ pos = addr[e];
+ end = pos + len[e];
+ }
+ }
+ cbc[left] ^= 0x80;
+ gf_mulx(pad);
+ }
+
+ for (i = 0; i < AES_BLOCK_SIZE; i++)
+ pad[i] ^= cbc[i];
+ aes_128_encrypt(ctx, pad, mac);
+ aes_encrypt_deinit(ctx);
+ return 0;
+}
+
+
+/**
+ * omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC)
+ * @key: 128-bit key for the hash operation
+ * @data: Data buffer for which a MAC is determined
+ * @data_len: Length of data buffer in bytes
+ * @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
+ * Returns: 0 on success, -1 on failure
+ *
+ * This is a mode for using block cipher (AES in this case) for authentication.
+ * OMAC1 was standardized with the name CMAC by NIST in a Special Publication
+ * (SP) 800-38B.
+ * modify for CONFIG_IEEE80211W */
+int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac)
+{
+ return omac1_aes_128_vector(key, 1, &data, &data_len, mac);
+}
+
+/* Restore HW wep key setting according to key_mask */
+void rtw_sec_restore_wep_key(struct adapter *adapter)
+{
+ struct security_priv *securitypriv = &(adapter->securitypriv);
+ sint keyid;
+
+ if ((_WEP40_ == securitypriv->dot11PrivacyAlgrthm) || (_WEP104_ == securitypriv->dot11PrivacyAlgrthm)) {
+ for (keyid = 0; keyid < 4; keyid++) {
+ if (securitypriv->key_mask & BIT(keyid)) {
+ if (keyid == securitypriv->dot11PrivacyKeyIndex)
+ rtw_set_key(adapter, securitypriv, keyid, 1, false);
+ else
+ rtw_set_key(adapter, securitypriv, keyid, 0, false);
+ }
+ }
+ }
+}
+
+u8 rtw_handle_tkip_countermeasure(struct adapter *adapter, const char *caller)
+{
+ struct security_priv *securitypriv = &(adapter->securitypriv);
+ u8 status = _SUCCESS;
+
+ if (securitypriv->btkip_countermeasure == true) {
+ unsigned long passing_ms = jiffies_to_msecs(jiffies - securitypriv->btkip_countermeasure_time);
+ if (passing_ms > 60*1000) {
+ DBG_871X_LEVEL(_drv_always_, "%s("ADPT_FMT") countermeasure time:%lus > 60s\n",
+ caller, ADPT_ARG(adapter), passing_ms/1000);
+ securitypriv->btkip_countermeasure = false;
+ securitypriv->btkip_countermeasure_time = 0;
+ } else {
+ DBG_871X_LEVEL(_drv_always_, "%s("ADPT_FMT") countermeasure time:%lus < 60s\n",
+ caller, ADPT_ARG(adapter), passing_ms/1000);
+ status = _FAIL;
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
new file mode 100644
index 000000000000..cb43ec90a648
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -0,0 +1,641 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_STA_MGT_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+void _rtw_init_stainfo(struct sta_info *psta);
+void _rtw_init_stainfo(struct sta_info *psta)
+{
+ memset((u8 *)psta, 0, sizeof(struct sta_info));
+
+ spin_lock_init(&psta->lock);
+ INIT_LIST_HEAD(&psta->list);
+ INIT_LIST_HEAD(&psta->hash_list);
+ /* INIT_LIST_HEAD(&psta->asoc_list); */
+ /* INIT_LIST_HEAD(&psta->sleep_list); */
+ /* INIT_LIST_HEAD(&psta->wakeup_list); */
+
+ _rtw_init_queue(&psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
+
+ INIT_LIST_HEAD(&psta->asoc_list);
+
+ INIT_LIST_HEAD(&psta->auth_list);
+
+ psta->expire_to = 0;
+
+ psta->flags = 0;
+
+ psta->capability = 0;
+
+ psta->bpairwise_key_installed = false;
+
+ psta->nonerp_set = 0;
+ psta->no_short_slot_time_set = 0;
+ psta->no_short_preamble_set = 0;
+ psta->no_ht_gf_set = 0;
+ psta->no_ht_set = 0;
+ psta->ht_20mhz_set = 0;
+
+ psta->under_exist_checking = 0;
+
+ psta->keep_alive_trycnt = 0;
+}
+
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
+{
+ struct sta_info *psta;
+ s32 i;
+
+ pstapriv->pallocated_stainfo_buf = vzalloc(sizeof(struct sta_info) * NUM_STA+4);
+
+ if (!pstapriv->pallocated_stainfo_buf)
+ return _FAIL;
+
+ pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
+ ((SIZE_PTR)(pstapriv->pallocated_stainfo_buf) & 3);
+
+ _rtw_init_queue(&pstapriv->free_sta_queue);
+
+ spin_lock_init(&pstapriv->sta_hash_lock);
+
+ /* _rtw_init_queue(&pstapriv->asoc_q); */
+ pstapriv->asoc_sta_count = 0;
+ _rtw_init_queue(&pstapriv->sleep_q);
+ _rtw_init_queue(&pstapriv->wakeup_q);
+
+ psta = (struct sta_info *)(pstapriv->pstainfo_buf);
+
+
+ for (i = 0; i < NUM_STA; i++) {
+ _rtw_init_stainfo(psta);
+
+ INIT_LIST_HEAD(&(pstapriv->sta_hash[i]));
+
+ list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+
+ psta++;
+ }
+
+ pstapriv->sta_dz_bitmap = 0;
+ pstapriv->tim_bitmap = 0;
+
+ INIT_LIST_HEAD(&pstapriv->asoc_list);
+ INIT_LIST_HEAD(&pstapriv->auth_list);
+ spin_lock_init(&pstapriv->asoc_list_lock);
+ spin_lock_init(&pstapriv->auth_list_lock);
+ pstapriv->asoc_list_cnt = 0;
+ pstapriv->auth_list_cnt = 0;
+
+ pstapriv->auth_to = 3; /* 3*2 = 6 sec */
+ pstapriv->assoc_to = 3;
+ pstapriv->expire_to = 3; /* 3*2 = 6 sec */
+ pstapriv->max_num_sta = NUM_STA;
+ return _SUCCESS;
+}
+
+inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
+{
+ int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info);
+
+ if (!stainfo_offset_valid(offset))
+ DBG_871X("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return offset;
+}
+
+inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
+{
+ if (!stainfo_offset_valid(offset))
+ DBG_871X("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
+}
+
+/* this function is used to free the memory of lock || sema for all stainfos */
+void kfree_all_stainfo(struct sta_priv *pstapriv);
+void kfree_all_stainfo(struct sta_priv *pstapriv)
+{
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ phead = get_list_head(&pstapriv->free_sta_queue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, list);
+ plist = get_next(plist);
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+}
+
+void kfree_sta_priv_lock(struct sta_priv *pstapriv);
+void kfree_sta_priv_lock(struct sta_priv *pstapriv)
+{
+ kfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
+}
+
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
+{
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int index;
+
+ if (pstapriv) {
+
+ /*delete all reordering_ctrl_timer */
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ int i;
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ plist = get_next(plist);
+
+ for (i = 0; i < 16 ; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+ }
+ }
+ }
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ /*===============================*/
+
+ kfree_sta_priv_lock(pstapriv);
+
+ if (pstapriv->pallocated_stainfo_buf)
+ vfree(pstapriv->pallocated_stainfo_buf);
+
+ }
+ return _SUCCESS;
+}
+
+/* struct sta_info *rtw_alloc_stainfo(_queue *pfree_sta_queue, unsigned char *hwaddr) */
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ uint tmp_aid;
+ s32 index;
+ struct list_head *phash_list;
+ struct sta_info *psta;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int i = 0;
+ u16 wRxSeqInitialValue = 0xffff;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ /* spin_lock_bh(&(pfree_sta_queue->lock)); */
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
+ if (list_empty(&pfree_sta_queue->queue)) {
+ /* spin_unlock_bh(&(pfree_sta_queue->lock)); */
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
+ psta = NULL;
+ return psta;
+ } else{
+ psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
+
+ list_del_init(&(psta->list));
+
+ /* spin_unlock_bh(&(pfree_sta_queue->lock)); */
+
+ tmp_aid = psta->aid;
+
+ _rtw_init_stainfo(psta);
+
+ psta->padapter = pstapriv->padapter;
+
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+
+ index = wifi_mac_hash(hwaddr);
+
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("rtw_alloc_stainfo: index = %x", index));
+
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => rtw_alloc_stainfo: index >= NUM_STA"));
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
+ psta = NULL;
+ goto exit;
+ }
+ phash_list = &(pstapriv->sta_hash[index]);
+
+ /* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
+
+ list_add_tail(&psta->hash_list, phash_list);
+
+ pstapriv->asoc_sta_count++;
+
+ /* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
+
+/* Commented by Albert 2009/08/13 */
+/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
+/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
+/* So, we initialize the tid_rxseq variable as the 0xffff. */
+
+ for (i = 0; i < 16; i++) {
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+ }
+
+ RT_TRACE(
+ _module_rtl871x_sta_mgt_c_,
+ _drv_info_, (
+ "alloc number_%d stainfo with hwaddr = %x %x %x %x %x %x \n",
+ pstapriv->asoc_sta_count,
+ hwaddr[0],
+ hwaddr[1],
+ hwaddr[2],
+ hwaddr[3],
+ hwaddr[4],
+ hwaddr[5]
+ )
+ );
+
+ init_addba_retry_timer(pstapriv->padapter, psta);
+
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16 ; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ preorder_ctrl->padapter = pstapriv->padapter;
+
+ preorder_ctrl->enable = false;
+
+ preorder_ctrl->indicate_seq = 0xffff;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq);
+ #endif
+ preorder_ctrl->wend_b = 0xffff;
+ /* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
+ preorder_ctrl->wsize_b = 64;/* 64; */
+
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+
+ rtw_init_recv_timer(preorder_ctrl);
+ }
+
+
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
+ psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
+ /* alloc mac id for non-bc/mc station, */
+ rtw_alloc_macid(pstapriv->padapter, psta);
+
+ }
+
+exit:
+
+
+ return psta;
+}
+
+/* using pstapriv->sta_hash_lock to protect */
+u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
+{
+ int i;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct hw_xmit *phwxmit;
+
+ if (psta == NULL)
+ goto exit;
+
+
+ spin_lock_bh(&psta->lock);
+ psta->state &= ~_FW_LINKED;
+ spin_unlock_bh(&psta->lock);
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ /* list_del_init(&psta->sleep_list); */
+
+ /* list_del_init(&psta->wakeup_list); */
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ /* vo */
+ /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
+ phwxmit = pxmitpriv->hwxmits;
+ phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
+ pstaxmitpriv->vo_q.qcnt = 0;
+ /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
+
+ /* vi */
+ /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
+ phwxmit = pxmitpriv->hwxmits+1;
+ phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
+ pstaxmitpriv->vi_q.qcnt = 0;
+ /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
+
+ /* be */
+ /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ phwxmit = pxmitpriv->hwxmits+2;
+ phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
+ pstaxmitpriv->be_q.qcnt = 0;
+ /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
+
+ /* bk */
+ /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ phwxmit = pxmitpriv->hwxmits+3;
+ phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
+ pstaxmitpriv->bk_q.qcnt = 0;
+ /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
+
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ list_del_init(&psta->hash_list);
+ RT_TRACE(
+ _module_rtl871x_sta_mgt_c_,
+ _drv_err_, (
+ "\n free number_%d stainfo with hwaddr = 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x \n",
+ pstapriv->asoc_sta_count,
+ psta->hwaddr[0],
+ psta->hwaddr[1],
+ psta->hwaddr[2],
+ psta->hwaddr[3],
+ psta->hwaddr[4],
+ psta->hwaddr[5]
+ )
+ );
+ pstapriv->asoc_sta_count--;
+
+
+ /* re-init sta_info; 20061114 will be init in alloc_stainfo */
+ /* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */
+ /* _rtw_init_sta_recv_priv(&psta->sta_recvpriv); */
+
+ del_timer_sync(&psta->addba_retry_timer);
+
+ /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ for (i = 0; i < 16 ; i++) {
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct __queue *ppending_recvframe_queue;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+
+
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ spin_lock_bh(&ppending_recvframe_queue->lock);
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (!list_empty(phead)) {
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ list_del_init(&(prframe->u.hdr.list));
+
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ }
+
+ spin_unlock_bh(&ppending_recvframe_queue->lock);
+
+ }
+
+ if (!(psta->state & WIFI_AP_STATE))
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
+
+
+ /* release mac id for non-bc/mc station, */
+ rtw_release_macid(pstapriv->padapter, psta);
+
+/*
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ list_del_init(&psta->asoc_list);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+*/
+ spin_lock_bh(&pstapriv->auth_list_lock);
+ if (!list_empty(&psta->auth_list)) {
+ list_del_init(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ spin_unlock_bh(&pstapriv->auth_list_lock);
+
+ psta->expire_to = 0;
+ psta->sleepq_ac_len = 0;
+ psta->qos_info = 0;
+
+ psta->max_sp_len = 0;
+ psta->uapsd_bk = 0;
+ psta->uapsd_be = 0;
+ psta->uapsd_vi = 0;
+ psta->uapsd_vo = 0;
+
+ psta->has_legacy_ac = 0;
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
+ pstapriv->sta_aid[psta->aid - 1] = NULL;
+ psta->aid = 0;
+ }
+
+ psta->under_exist_checking = 0;
+
+ /* spin_lock_bh(&(pfree_sta_queue->lock)); */
+ list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
+ /* spin_unlock_bh(&(pfree_sta_queue->lock)); */
+
+exit:
+ return _SUCCESS;
+}
+
+/* free all stainfo which in sta_hash[all] */
+void rtw_free_all_stainfo(struct adapter *padapter)
+{
+ struct list_head *plist, *phead;
+ s32 index;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
+
+ if (pstapriv->asoc_sta_count == 1)
+ return;
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ if (pbcmc_stainfo != psta)
+ rtw_free_stainfo(padapter, psta);
+
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+}
+
+/* any station allocated can be searched by hash list */
+struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+ u32 index;
+ u8 *addr;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (hwaddr == NULL)
+ return NULL;
+
+ if (IS_MCAST(hwaddr))
+ addr = bc_addr;
+ else
+ addr = hwaddr;
+
+ index = wifi_mac_hash(addr);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+
+ while (phead != plist) {
+
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ if ((!memcmp(psta->hwaddr, addr, ETH_ALEN)))
+ /* if found the matched address */
+ break;
+
+ psta = NULL;
+ plist = get_next(plist);
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return psta;
+}
+
+u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
+{
+
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ u32 res = _SUCCESS;
+ NDIS_802_11_MAC_ADDRESS bcast_addr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ /* struct __queue *pstapending = &padapter->xmitpriv.bm_pending; */
+
+ psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+
+ if (psta == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
+ goto exit;
+ }
+
+ /* default broadcast & multicast use macid 1 */
+ psta->mac_id = 1;
+
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+exit:
+ return _SUCCESS;
+}
+
+
+struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ psta = rtw_get_stainfo(pstapriv, bc_addr);
+ return psta;
+}
+
+u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+{
+ u8 res = true;
+ struct list_head *plist, *phead;
+ struct rtw_wlan_acl_node *paclnode;
+ u8 match = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ spin_lock_bh(&(pacl_node_q->lock));
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while (phead != plist) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (!memcmp(paclnode->addr, mac_addr, ETH_ALEN))
+ if (paclnode->valid == true) {
+ match = true;
+ break;
+ }
+
+ }
+ spin_unlock_bh(&(pacl_node_q->lock));
+
+
+ if (pacl_list->mode == 1) /* accept unless in deny list */
+ res = (match == true) ? false:true;
+
+ else if (pacl_list->mode == 2)/* deny unless in accept list */
+ res = (match == true) ? true:false;
+ else
+ res = true;
+
+ return res;
+}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
new file mode 100644
index 000000000000..f485f541e36d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -0,0 +1,2328 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_WLAN_UTIL_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_com_h2c.h>
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+#include <linux/inetdevice.h>
+#endif
+
+static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
+static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
+
+static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
+static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
+static unsigned char BROADCOM_OUI3[] = {0x00, 0x05, 0xb5};
+
+static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
+static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
+static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
+static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
+static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
+static unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
+static unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
+
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WPA_TKIP_CIPHER[4];
+
+#define R2T_PHY_DELAY (0)
+
+/* define WAIT_FOR_BCN_TO_MIN (3000) */
+#define WAIT_FOR_BCN_TO_MIN (6000)
+#define WAIT_FOR_BCN_TO_MAX (20000)
+
+#define DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS 1000
+#define DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD 3
+
+static u8 rtw_basic_rate_cck[4] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_ofdm[3] = {
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+int cckrates_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ }
+
+ return false;
+
+}
+
+int cckratesonly_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ }
+
+ return true;
+}
+
+u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta)
+{
+ u8 raid, cur_rf_type, rf_type = RF_1T1R;
+
+ rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&cur_rf_type));
+
+ if (cur_rf_type == RF_1T1R) {
+ rf_type = RF_1T1R;
+ } else if (IsSupportedVHT(psta->wireless_mode)) {
+ if (psta->ra_mask & 0xffc00000)
+ rf_type = RF_2T2R;
+ } else if (IsSupportedHT(psta->wireless_mode)) {
+ if (psta->ra_mask & 0xfff00000)
+ rf_type = RF_2T2R;
+ }
+
+ switch (psta->wireless_mode) {
+ case WIRELESS_11B:
+ raid = RATEID_IDX_B;
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11G:
+ raid = RATEID_IDX_G;
+ break;
+ case WIRELESS_11BG:
+ raid = RATEID_IDX_BG;
+ break;
+ case WIRELESS_11_24N:
+ case WIRELESS_11_5N:
+ case WIRELESS_11A_5N:
+ case WIRELESS_11G_24N:
+ if (rf_type == RF_2T2R)
+ raid = RATEID_IDX_GN_N2SS;
+ else
+ raid = RATEID_IDX_GN_N1SS;
+ break;
+ case WIRELESS_11B_24N:
+ case WIRELESS_11BG_24N:
+ if (psta->bw_mode == CHANNEL_WIDTH_20) {
+ if (rf_type == RF_2T2R)
+ raid = RATEID_IDX_BGN_20M_2SS_BN;
+ else
+ raid = RATEID_IDX_BGN_20M_1SS_BN;
+ } else {
+ if (rf_type == RF_2T2R)
+ raid = RATEID_IDX_BGN_40M_2SS;
+ else
+ raid = RATEID_IDX_BGN_40M_1SS;
+ }
+ break;
+ default:
+ raid = RATEID_IDX_BGN_40M_2SS;
+ break;
+
+ }
+ return raid;
+
+}
+
+unsigned char ratetbl_val_2wifirate(unsigned char rate);
+unsigned char ratetbl_val_2wifirate(unsigned char rate)
+{
+ unsigned char val = 0;
+
+ switch (rate & 0x7f) {
+ case 0:
+ val = IEEE80211_CCK_RATE_1MB;
+ break;
+
+ case 1:
+ val = IEEE80211_CCK_RATE_2MB;
+ break;
+
+ case 2:
+ val = IEEE80211_CCK_RATE_5MB;
+ break;
+
+ case 3:
+ val = IEEE80211_CCK_RATE_11MB;
+ break;
+
+ case 4:
+ val = IEEE80211_OFDM_RATE_6MB;
+ break;
+
+ case 5:
+ val = IEEE80211_OFDM_RATE_9MB;
+ break;
+
+ case 6:
+ val = IEEE80211_OFDM_RATE_12MB;
+ break;
+
+ case 7:
+ val = IEEE80211_OFDM_RATE_18MB;
+ break;
+
+ case 8:
+ val = IEEE80211_OFDM_RATE_24MB;
+ break;
+
+ case 9:
+ val = IEEE80211_OFDM_RATE_36MB;
+ break;
+
+ case 10:
+ val = IEEE80211_OFDM_RATE_48MB;
+ break;
+
+ case 11:
+ val = IEEE80211_OFDM_RATE_54MB;
+ break;
+
+ }
+
+ return val;
+
+}
+
+int is_basicrate(struct adapter *padapter, unsigned char rate);
+int is_basicrate(struct adapter *padapter, unsigned char rate)
+{
+ int i;
+ unsigned char val;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ val = pmlmeext->basicrate[i];
+
+ if ((val != 0xff) && (val != 0xfe))
+ if (rate == ratetbl_val_2wifirate(val))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset);
+unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset)
+{
+ int i;
+ unsigned char rate;
+ unsigned int len = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ rate = pmlmeext->datarate[i];
+
+ switch (rate) {
+ case 0xff:
+ return len;
+
+ case 0xfe:
+ continue;
+
+ default:
+ rate = ratetbl_val_2wifirate(rate);
+
+ if (is_basicrate(padapter, rate) == true)
+ rate |= IEEE80211_BASIC_RATE_MASK;
+
+ rateset[len] = rate;
+ len++;
+ break;
+ }
+ }
+ return len;
+}
+
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len)
+{
+ unsigned char supportedrates[NumRates];
+
+ memset(supportedrates, 0, NumRates);
+ *bssrate_len = ratetbl2rateset(padapter, supportedrates);
+ memcpy(pbssrate, supportedrates, *bssrate_len);
+}
+
+void set_mcs_rate_by_mask(u8 *mcs_set, u32 mask)
+{
+ u8 mcs_rate_1r = (u8)(mask&0xff);
+ u8 mcs_rate_2r = (u8)((mask>>8)&0xff);
+ u8 mcs_rate_3r = (u8)((mask>>16)&0xff);
+ u8 mcs_rate_4r = (u8)((mask>>24)&0xff);
+
+ mcs_set[0] &= mcs_rate_1r;
+ mcs_set[1] &= mcs_rate_2r;
+ mcs_set[2] &= mcs_rate_3r;
+ mcs_set[3] &= mcs_rate_4r;
+}
+
+void UpdateBrateTbl(struct adapter *Adapter, u8 *mBratesOS)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mBratesOS[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mBratesOS[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+
+}
+
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+
+}
+
+void Save_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 bSaveFlag = true;
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag));
+}
+
+void Restore_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 bSaveFlag = false;
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag));
+}
+
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
+{
+ if (enable == true)
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
+ else
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
+}
+
+static void Set_NETYPE0_MSR(struct adapter *padapter, u8 type)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
+}
+
+void Set_MSR(struct adapter *padapter, u8 type)
+{
+ Set_NETYPE0_MSR(padapter, type);
+}
+
+inline u8 rtw_get_oper_ch(struct adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_channel;
+}
+
+inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
+{
+#ifdef DBG_CH_SWITCH
+ const int len = 128;
+ char msg[128] = {0};
+ int cnt = 0;
+ int i = 0;
+#endif /* DBG_CH_SWITCH */
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+
+ if (dvobj->oper_channel != ch) {
+ dvobj->on_oper_ch_time = jiffies;
+
+#ifdef DBG_CH_SWITCH
+ cnt += snprintf(msg+cnt, len-cnt, "switch to ch %3u", ch);
+
+ for (i = 0; i < dvobj->iface_nums; i++) {
+ struct adapter *iface = dvobj->padapters[i];
+ cnt += snprintf(msg+cnt, len-cnt, " ["ADPT_FMT":", ADPT_ARG(iface));
+ if (iface->mlmeextpriv.cur_channel == ch)
+ cnt += snprintf(msg+cnt, len-cnt, "C");
+ else
+ cnt += snprintf(msg+cnt, len-cnt, "_");
+ if (iface->wdinfo.listen_channel == ch && !rtw_p2p_chk_state(&iface->wdinfo, P2P_STATE_NONE))
+ cnt += snprintf(msg+cnt, len-cnt, "L");
+ else
+ cnt += snprintf(msg+cnt, len-cnt, "_");
+ cnt += snprintf(msg+cnt, len-cnt, "]");
+ }
+
+ DBG_871X(FUNC_ADPT_FMT" %s\n", FUNC_ADPT_ARG(adapter), msg);
+#endif /* DBG_CH_SWITCH */
+ }
+
+ dvobj->oper_channel = ch;
+}
+
+inline u8 rtw_get_oper_bw(struct adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_bwmode;
+}
+
+inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
+{
+ adapter_to_dvobj(adapter)->oper_bwmode = bw;
+}
+
+inline u8 rtw_get_oper_choffset(struct adapter *adapter)
+{
+ return adapter_to_dvobj(adapter)->oper_ch_offset;
+}
+
+inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
+{
+ adapter_to_dvobj(adapter)->oper_ch_offset = offset;
+}
+
+u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset)
+{
+ u8 center_ch = channel;
+
+ if (chnl_bw == CHANNEL_WIDTH_80) {
+ if ((channel == 36) || (channel == 40) || (channel == 44) || (channel == 48))
+ center_ch = 42;
+ if ((channel == 52) || (channel == 56) || (channel == 60) || (channel == 64))
+ center_ch = 58;
+ if ((channel == 100) || (channel == 104) || (channel == 108) || (channel == 112))
+ center_ch = 106;
+ if ((channel == 116) || (channel == 120) || (channel == 124) || (channel == 128))
+ center_ch = 122;
+ if ((channel == 132) || (channel == 136) || (channel == 140) || (channel == 144))
+ center_ch = 138;
+ if ((channel == 149) || (channel == 153) || (channel == 157) || (channel == 161))
+ center_ch = 155;
+ else if (channel <= 14)
+ center_ch = 7;
+ } else if (chnl_bw == CHANNEL_WIDTH_40) {
+ if (chnl_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ center_ch = channel + 2;
+ else
+ center_ch = channel - 2;
+ }
+
+ return center_ch;
+}
+
+inline unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter)
+{
+ if (adapter->mlmeextpriv.cur_channel == adapter_to_dvobj(adapter)->oper_channel)
+ return adapter_to_dvobj(adapter)->on_oper_ch_time;
+ else
+ return 0;
+}
+
+void SelectChannel(struct adapter *padapter, unsigned char channel)
+{
+ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex)))
+ return;
+
+ /* saved channel info */
+ rtw_set_oper_ch(padapter, channel);
+
+ rtw_hal_set_chan(padapter, channel);
+
+ mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex));
+}
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode)
+{
+ u8 center_ch, chnl_offset80 = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ if (padapter->bNotifyChannelChange)
+ DBG_871X("[%s] ch = %d, offset = %d, bwmode = %d\n", __func__, channel, channel_offset, bwmode);
+
+ center_ch = rtw_get_center_ch(channel, bwmode, channel_offset);
+
+ if (bwmode == CHANNEL_WIDTH_80) {
+ if (center_ch > channel)
+ chnl_offset80 = HAL_PRIME_CHNL_OFFSET_LOWER;
+ else if (center_ch < channel)
+ chnl_offset80 = HAL_PRIME_CHNL_OFFSET_UPPER;
+ else
+ chnl_offset80 = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ /* set Channel */
+ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex)))
+ return;
+
+ /* saved channel/bw info */
+ rtw_set_oper_ch(padapter, channel);
+ rtw_set_oper_bw(padapter, bwmode);
+ rtw_set_oper_choffset(padapter, channel_offset);
+
+ rtw_hal_set_chnl_bw(padapter, center_ch, bwmode, channel_offset, chnl_offset80); /* set center channel */
+
+ mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex));
+}
+
+__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
+{
+ return pnetwork->MacAddress;
+}
+
+u16 get_beacon_interval(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+ memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+
+}
+
+int is_client_associated_to_ap(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ return _FAIL;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_client_associated_to_ibss(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_IBSS_empty(struct adapter *padapter)
+{
+ unsigned int i;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1)
+ return _FAIL;
+ }
+
+ return true;
+
+}
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
+{
+ if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
+ return WAIT_FOR_BCN_TO_MIN;
+ else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
+ return WAIT_FOR_BCN_TO_MAX;
+ else
+ return ((bcn_interval << 2));
+}
+
+void invalidate_cam_all(struct adapter *padapter)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+
+ spin_lock_bh(&cam_ctl->lock);
+ cam_ctl->bitmap = 0;
+ memset(dvobj->cam_cache, 0, sizeof(struct cam_entry_cache)*TOTAL_CAM_ENTRY);
+ spin_unlock_bh(&cam_ctl->lock);
+}
+
+static u32 _ReadCAM(struct adapter *padapter, u32 addr)
+{
+ u32 count = 0, cmd;
+ cmd = CAM_POLLINIG | addr;
+ rtw_write32(padapter, RWCAM, cmd);
+
+ do {
+ if (0 == (rtw_read32(padapter, REG_CAMCMD) & CAM_POLLINIG))
+ break;
+ } while (count++ < 100);
+
+ return rtw_read32(padapter, REG_CAMREAD);
+}
+void read_cam(struct adapter *padapter, u8 entry, u8 *get_key)
+{
+ u32 j, addr, cmd;
+ addr = entry << 3;
+
+ /* DBG_8192C("********* DUMP CAM Entry_#%02d***************\n", entry); */
+ for (j = 0; j < 6; j++) {
+ cmd = _ReadCAM(padapter, addr+j);
+ /* DBG_8192C("offset:0x%02x => 0x%08x\n", addr+j, cmd); */
+ if (j > 1) /* get key from cam */
+ memcpy(get_key+(j-2)*4, &cmd, 4);
+ }
+ /* DBG_8192C("*********************************\n"); */
+}
+
+void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
+{
+ unsigned int i, val, addr;
+ int j;
+ u32 cam_val[2];
+
+ addr = entry << 3;
+
+ for (j = 5; j >= 0; j--) {
+ switch (j) {
+ case 0:
+ val = (ctrl | (mac[0] << 16) | (mac[1] << 24));
+ break;
+ case 1:
+ val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24));
+ break;
+ default:
+ i = (j - 2) << 2;
+ val = (key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24));
+ break;
+ }
+
+ cam_val[0] = val;
+ cam_val[1] = addr + (unsigned int)j;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
+ }
+}
+
+void _clear_cam_entry(struct adapter *padapter, u8 entry)
+{
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ _write_cam(padapter, entry, 0, null_sta, null_key);
+}
+
+inline void write_cam(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key)
+{
+ _write_cam(adapter, id, ctrl, mac, key);
+ write_cam_cache(adapter, id, ctrl, mac, key);
+}
+
+inline void clear_cam_entry(struct adapter *adapter, u8 id)
+{
+ _clear_cam_entry(adapter, id);
+ clear_cam_cache(adapter, id);
+}
+
+inline void write_cam_from_cache(struct adapter *adapter, u8 id)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+ struct cam_entry_cache cache;
+
+ spin_lock_bh(&cam_ctl->lock);
+ memcpy(&cache, &dvobj->cam_cache[id], sizeof(struct cam_entry_cache));
+ spin_unlock_bh(&cam_ctl->lock);
+
+ _write_cam(adapter, id, cache.ctrl, cache.mac, cache.key);
+}
+
+void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+
+ spin_lock_bh(&cam_ctl->lock);
+
+ dvobj->cam_cache[id].ctrl = ctrl;
+ memcpy(dvobj->cam_cache[id].mac, mac, ETH_ALEN);
+ memcpy(dvobj->cam_cache[id].key, key, 16);
+
+ spin_unlock_bh(&cam_ctl->lock);
+}
+
+void clear_cam_cache(struct adapter *adapter, u8 id)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+
+ spin_lock_bh(&cam_ctl->lock);
+
+ memset(&(dvobj->cam_cache[id]), 0, sizeof(struct cam_entry_cache));
+
+ spin_unlock_bh(&cam_ctl->lock);
+}
+
+static bool _rtw_camid_is_gk(struct adapter *adapter, u8 cam_id)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+ bool ret = false;
+
+ if (cam_id >= TOTAL_CAM_ENTRY)
+ goto exit;
+
+ if (!(cam_ctl->bitmap & BIT(cam_id)))
+ goto exit;
+
+ ret = (dvobj->cam_cache[cam_id].ctrl&BIT6)?true:false;
+
+exit:
+ return ret;
+}
+
+static s16 _rtw_camid_search(struct adapter *adapter, u8 *addr, s16 kid)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ int i;
+ s16 cam_id = -1;
+
+ for (i = 0; i < TOTAL_CAM_ENTRY; i++) {
+ if (addr && memcmp(dvobj->cam_cache[i].mac, addr, ETH_ALEN))
+ continue;
+ if (kid >= 0 && kid != (dvobj->cam_cache[i].ctrl&0x03))
+ continue;
+
+ cam_id = i;
+ break;
+ }
+
+ if (addr)
+ DBG_871X(FUNC_ADPT_FMT" addr:"MAC_FMT" kid:%d, return cam_id:%d\n"
+ , FUNC_ADPT_ARG(adapter), MAC_ARG(addr), kid, cam_id);
+ else
+ DBG_871X(FUNC_ADPT_FMT" addr:%p kid:%d, return cam_id:%d\n"
+ , FUNC_ADPT_ARG(adapter), addr, kid, cam_id);
+
+ return cam_id;
+}
+
+s16 rtw_camid_search(struct adapter *adapter, u8 *addr, s16 kid)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+ s16 cam_id = -1;
+
+ spin_lock_bh(&cam_ctl->lock);
+ cam_id = _rtw_camid_search(adapter, addr, kid);
+ spin_unlock_bh(&cam_ctl->lock);
+
+ return cam_id;
+}
+
+s16 rtw_camid_alloc(struct adapter *adapter, struct sta_info *sta, u8 kid)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+ s16 cam_id = -1;
+ struct mlme_ext_info *mlmeinfo;
+
+ spin_lock_bh(&cam_ctl->lock);
+
+ mlmeinfo = &adapter->mlmeextpriv.mlmext_info;
+
+ if ((((mlmeinfo->state&0x03) == WIFI_FW_AP_STATE) || ((mlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE))
+ && !sta) {
+ /* AP/Ad-hoc mode group key: static alloction to default key by key ID */
+ if (kid > 3) {
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" group key with invalid key id:%u\n"
+ , FUNC_ADPT_ARG(adapter), kid);
+ rtw_warn_on(1);
+ goto bitmap_handle;
+ }
+
+ cam_id = kid;
+ } else {
+ int i;
+ u8 *addr = sta?sta->hwaddr:NULL;
+
+ if (!sta) {
+ if (!(mlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
+ /* bypass STA mode group key setting before connected(ex:WEP) because bssid is not ready */
+ goto bitmap_handle;
+ }
+
+ addr = get_bssid(&adapter->mlmepriv);
+ }
+
+ i = _rtw_camid_search(adapter, addr, kid);
+ if (i >= 0) {
+ /* Fix issue that pairwise and group key have same key id. Pairwise key first, group key can overwirte group only(ex: rekey) */
+ if (sta || _rtw_camid_is_gk(adapter, i) == true)
+ cam_id = i;
+ else
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" group key id:%u the same key id as pairwise key\n"
+ , FUNC_ADPT_ARG(adapter), kid);
+ goto bitmap_handle;
+ }
+
+ for (i = 4; i < TOTAL_CAM_ENTRY; i++)
+ if (!(cam_ctl->bitmap & BIT(i)))
+ break;
+
+ if (i == TOTAL_CAM_ENTRY) {
+ if (sta)
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" pairwise key with "MAC_FMT" id:%u no room\n"
+ , FUNC_ADPT_ARG(adapter), MAC_ARG(sta->hwaddr), kid);
+ else
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" group key id:%u no room\n"
+ , FUNC_ADPT_ARG(adapter), kid);
+ rtw_warn_on(1);
+ goto bitmap_handle;
+ }
+
+ cam_id = i;
+ }
+
+bitmap_handle:
+ if (cam_id >= 0 && cam_id < 32)
+ cam_ctl->bitmap |= BIT(cam_id);
+
+ spin_unlock_bh(&cam_ctl->lock);
+
+ return cam_id;
+}
+
+void rtw_camid_free(struct adapter *adapter, u8 cam_id)
+{
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
+
+ spin_lock_bh(&cam_ctl->lock);
+
+ if (cam_id < TOTAL_CAM_ENTRY)
+ cam_ctl->bitmap &= ~(BIT(cam_id));
+
+ spin_unlock_bh(&cam_ctl->lock);
+}
+
+int allocate_fw_sta_entry(struct adapter *padapter)
+{
+ unsigned int mac_id;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
+ pmlmeinfo->FW_sta_info[mac_id].status = 1;
+ pmlmeinfo->FW_sta_info[mac_id].retry = 0;
+ break;
+ }
+ }
+
+ return mac_id;
+}
+
+void flush_all_cam_entry(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ invalidate_cam_all(padapter);
+ /* clear default key related key search setting */
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_DK_CFG, (u8 *)false);
+
+ memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
+
+}
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
+{
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmepriv->qospriv.qos_option == 0) {
+ pmlmeinfo->WMM_enable = 0;
+ return false;
+ }
+
+ if (!memcmp(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element)))
+ return false;
+ else
+ memcpy(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element));
+
+ pmlmeinfo->WMM_enable = 1;
+ return true;
+}
+
+void WMMOnAssocRsp(struct adapter *padapter)
+{
+ u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
+ u8 acm_mask;
+ u16 TXOP;
+ u32 acParm, i;
+ u32 edca[4], inx[4];
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ acm_mask = 0;
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11_24N)
+ aSifsTime = 16;
+ else
+ aSifsTime = 10;
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ padapter->mlmepriv.acm_mask = 0;
+
+ AIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
+
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11A)) {
+ ECWMin = 4;
+ ECWMax = 10;
+ } else if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
+ ECWMin = 5;
+ ECWMax = 10;
+ } else {
+ ECWMin = 4;
+ ECWMax = 10;
+ }
+
+ TXOP = 0;
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+
+ ECWMin = 2;
+ ECWMax = 3;
+ TXOP = 0x2f;
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ } else{
+ edca[0] = edca[1] = edca[2] = edca[3] = 0;
+
+ for (i = 0; i < 4; i++) {
+ ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
+ ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
+
+ /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
+ AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime;
+
+ ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f);
+ ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
+ TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
+
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+
+ switch (ACI) {
+ case 0x0:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(1):0);
+ edca[XMIT_BE_QUEUE] = acParm;
+ break;
+
+ case 0x1:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ /* acm_mask |= (ACM? BIT(0):0); */
+ edca[XMIT_BK_QUEUE] = acParm;
+ break;
+
+ case 0x2:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(2):0);
+ edca[XMIT_VI_QUEUE] = acParm;
+ break;
+
+ case 0x3:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(3):0);
+ edca[XMIT_VO_QUEUE] = acParm;
+ break;
+ }
+
+ DBG_871X("WMM(%x): %x, %x\n", ACI, ACM, acParm);
+ }
+
+ if (padapter->registrypriv.acm_method == 1)
+ rtw_hal_set_hwreg(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ else
+ padapter->mlmepriv.acm_mask = acm_mask;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ u32 j, tmp, change_inx = false;
+
+ /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
+ for (i = 0; i < 4; i++) {
+ for (j = i+1; j < 4; j++) {
+ /* compare CW and AIFS */
+ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
+ change_inx = true;
+ } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
+ /* compare TXOP */
+ if ((edca[j] >> 16) > (edca[i] >> 16))
+ change_inx = true;
+ }
+
+ if (change_inx) {
+ tmp = edca[i];
+ edca[i] = edca[j];
+ edca[j] = tmp;
+
+ tmp = inx[i];
+ inx[i] = inx[j];
+ inx[j] = tmp;
+
+ change_inx = false;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ pxmitpriv->wmm_para_seq[i] = inx[i];
+ DBG_871X("wmm_para_seq(%d): %d\n", i, pxmitpriv->wmm_para_seq[i]);
+ }
+ }
+}
+
+static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
+{
+ unsigned char new_bwmode;
+ unsigned char new_ch_offset;
+ struct HT_info_element *pHT_info;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ u8 cbw40_enable = 0;
+
+ if (!pIE)
+ return;
+
+ if (phtpriv->ht_option == false)
+ return;
+
+ if (pmlmeext->cur_bwmode >= CHANNEL_WIDTH_80)
+ return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pHT_info = (struct HT_info_element *)pIE->data;
+
+ if (pmlmeext->cur_channel > 14) {
+ if ((pregistrypriv->bw_mode & 0xf0) > 0)
+ cbw40_enable = 1;
+ } else
+ if ((pregistrypriv->bw_mode & 0x0f) > 0)
+ cbw40_enable = 1;
+
+ if ((pHT_info->infos[0] & BIT(2)) && cbw40_enable) {
+ new_bwmode = CHANNEL_WIDTH_40;
+
+ switch (pHT_info->infos[0] & 0x3) {
+ case 1:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+
+ case 3:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+
+ default:
+ new_bwmode = CHANNEL_WIDTH_20;
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ } else{
+ new_bwmode = CHANNEL_WIDTH_20;
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+
+ if ((new_bwmode != pmlmeext->cur_bwmode) || (new_ch_offset != pmlmeext->cur_ch_offset)) {
+ pmlmeinfo->bwmode_updated = true;
+
+ pmlmeext->cur_bwmode = new_bwmode;
+ pmlmeext->cur_ch_offset = new_ch_offset;
+
+ /* update HT info also */
+ HT_info_handler(padapter, pIE);
+ } else
+ pmlmeinfo->bwmode_updated = false;
+
+
+ if (true == pmlmeinfo->bwmode_updated) {
+ struct sta_info *psta;
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+
+
+ /* update ap's stainfo */
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) {
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ if (phtpriv_sta->ht_option) {
+ /* bwmode */
+ psta->bw_mode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ } else{
+ psta->bw_mode = CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ rtw_dm_ra_mask_wk_cmd(padapter, (u8 *)psta);
+ }
+ }
+}
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
+{
+ unsigned int i;
+ u8 rf_type;
+ u8 max_AMPDU_len, min_MPDU_spacing;
+ u8 cur_ldpc_cap = 0, cur_stbc_cap = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (phtpriv->ht_option == false)
+ return;
+
+ pmlmeinfo->HT_caps_enable = 1;
+
+ for (i = 0; i < (pIE->Length); i++) {
+ if (i != 2) {
+ /* Commented by Albert 2010/07/12 */
+ /* Got the endian issue here. */
+ pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
+ } else{
+ /* modify from fw by Thomas 2010/11/17 */
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
+ max_AMPDU_len = (pIE->data[i] & 0x3);
+ else
+ max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
+ else
+ min_MPDU_spacing = (pIE->data[i] & 0x1c);
+
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
+ }
+ }
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS set */
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= pmlmeext->default_supported_mcs_set[i];
+
+ /* update the MCS rates */
+ switch (rf_type) {
+ case RF_1T1R:
+ case RF_1T2R:
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_1R);
+ break;
+ case RF_2T2R:
+ default:
+#ifdef CONFIG_DISABLE_MCS13TO15
+ if (pmlmeext->cur_bwmode == CHANNEL_WIDTH_40 && pregistrypriv->wifi_spec != 1)
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R_13TO15_OFF);
+ else
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R);
+#else /* CONFIG_DISABLE_MCS13TO15 */
+ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_2R);
+#endif /* CONFIG_DISABLE_MCS13TO15 */
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* Config STBC setting */
+ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_TX_STBC(pIE->data)) {
+ SET_FLAG(cur_stbc_cap, STBC_HT_ENABLE_TX);
+ DBG_871X("Enable HT Tx STBC !\n");
+ }
+ phtpriv->stbc_cap = cur_stbc_cap;
+ } else {
+ /* Config LDPC Coding Capability */
+ if (TEST_FLAG(phtpriv->ldpc_cap, LDPC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_LDPC_CAP(pIE->data)) {
+ SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX));
+ DBG_871X("Enable HT Tx LDPC!\n");
+ }
+ phtpriv->ldpc_cap = cur_ldpc_cap;
+
+ /* Config STBC setting */
+ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_RX_STBC(pIE->data)) {
+ SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX));
+ DBG_871X("Enable HT Tx STBC!\n");
+ }
+ phtpriv->stbc_cap = cur_stbc_cap;
+ }
+}
+
+void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (phtpriv->ht_option == false)
+ return;
+
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pmlmeinfo->HT_info_enable = 1;
+ memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
+
+ return;
+}
+
+void HTOnAssocRsp(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_871X("%s\n", __func__);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
+ pmlmeinfo->HT_enable = 1;
+ } else{
+ pmlmeinfo->HT_enable = 0;
+ /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+ return;
+ }
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+}
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pIE->Length > 1)
+ return;
+
+ pmlmeinfo->ERP_enable = 1;
+ memcpy(&(pmlmeinfo->ERP_IE), pIE->data, pIE->Length);
+}
+
+void VCS_update(struct adapter *padapter, struct sta_info *psta)
+{
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pregpriv->vrtl_carrier_sense) {/* 0:off 1:on 2:auto */
+ case 0: /* off */
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ break;
+
+ case 1: /* on */
+ if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else{
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ break;
+
+ case 2: /* auto */
+ default:
+ if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) {
+ if (pregpriv->vcs_type == 1) {
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else{
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ } else{
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ }
+ break;
+ }
+}
+
+void update_ldpc_stbc_cap(struct sta_info *psta)
+{
+ if (psta->htpriv.ht_option) {
+ if (TEST_FLAG(psta->htpriv.ldpc_cap, LDPC_HT_ENABLE_TX))
+ psta->ldpc = 1;
+
+ if (TEST_FLAG(psta->htpriv.stbc_cap, STBC_HT_ENABLE_TX))
+ psta->stbc = 1;
+ } else {
+ psta->ldpc = 0;
+ psta->stbc = 0;
+ }
+}
+
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
+{
+ unsigned int len;
+ unsigned char *p;
+ unsigned short val16, subtype;
+ struct wlan_network *cur_network = &(Adapter->mlmepriv.cur_network);
+ /* u8 wpa_ie[255], rsn_ie[255]; */
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 encryp_protocol = 0;
+ struct wlan_bssid_ex *bssid;
+ int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0;
+ unsigned char *pbuf;
+ u32 wpa_ielen = 0;
+ u8 *pbssid = GetAddr3Ptr(pframe);
+ u32 hidden_ssid = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct rtw_ieee80211_ht_cap *pht_cap = NULL;
+ u32 bcn_channel;
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+
+ if (is_client_associated_to_ap(Adapter) == false)
+ return true;
+
+ len = packet_len - sizeof(struct ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ) {
+ DBG_871X("%s IE too long for survey event\n", __func__);
+ return _FAIL;
+ }
+
+ if (memcmp(cur_network->network.MacAddress, pbssid, 6)) {
+ DBG_871X("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n" MAC_FMT MAC_FMT,
+ MAC_ARG(pbssid), MAC_ARG(cur_network->network.MacAddress));
+ return true;
+ }
+
+ bssid = (struct wlan_bssid_ex *)rtw_zmalloc(sizeof(struct wlan_bssid_ex));
+ if (bssid == NULL) {
+ DBG_871X("%s rtw_zmalloc fail !!!\n", __func__);
+ return true;
+ }
+
+ if ((pmlmepriv->timeBcnInfoChkStart != 0) && (jiffies_to_msecs(jiffies - pmlmepriv->timeBcnInfoChkStart) > DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS)) {
+ pmlmepriv->timeBcnInfoChkStart = 0;
+ pmlmepriv->NumOfBcnInfoChkFail = 0;
+ }
+
+ subtype = GetFrameSubType(pframe) >> 4;
+
+ if (subtype == WIFI_BEACON)
+ bssid->Reserved[0] = 1;
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* check bw and channel offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
+ ht_cap_info = le16_to_cpu(pht_cap->cap_info);
+ } else {
+ ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ ht_info_infos_0 = 0;
+ }
+ if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
+ ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
+ DBG_871X("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ ht_cap_info, ht_info_infos_0);
+ DBG_871X("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
+ DBG_871X("%s bw mode change\n", __func__);
+ {
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ }
+ /* goto _mismatch; */
+ }
+
+ /* Checking for channel */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p) {
+ bcn_channel = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
+ rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (pht_info) {
+ bcn_channel = pht_info->primary_channel;
+ } else { /* we don't find channel IE, so don't check it */
+ /* DBG_871X("Oops: %s we don't find channel IE, so don't check it\n", __func__); */
+ bcn_channel = Adapter->mlmeextpriv.cur_channel;
+ }
+ }
+ if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
+ DBG_871X("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
+ bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ goto _mismatch;
+ }
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p == NULL) {
+ DBG_871X("%s marc: cannot find SSID for survey event\n", __func__);
+ hidden_ssid = true;
+ } else {
+ hidden_ssid = false;
+ }
+
+ if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else {
+ bssid->Ssid.SsidLength = 0;
+ bssid->Ssid.Ssid[0] = '\0';
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
+ "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
+ bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
+ cur_network->network.Ssid.SsidLength));
+
+ if (memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
+ bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
+ if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
+ DBG_871X("%s(), SSID is not match\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ /* check encryption info */
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
+ __func__, cur_network->network.Privacy, bssid->Privacy));
+ if (cur_network->network.Privacy != bssid->Privacy) {
+ DBG_871X("%s(), privacy is not match\n", __func__);
+ goto _mismatch;
+ }
+
+ rtw_get_sec_ie(bssid->IEs, bssid->IELength, NULL, &rsn_len, NULL, &wpa_len);
+
+ if (rsn_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bssid->Privacy)
+ encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+
+ if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
+ DBG_871X("%s(): enctyp is not match\n", __func__);
+ goto _mismatch;
+ }
+
+ if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
+ pbuf = rtw_get_wpa_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n", __func__,
+ pairwise_cipher, group_cipher, is_8021x));
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_802x is %d\n",
+ __func__, pairwise_cipher, group_cipher, is_8021x));
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s cur_network->group_cipher is %d: %d\n", __func__, cur_network->BcnInfo.group_cipher, group_cipher));
+ if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher) {
+ DBG_871X("%s pairwise_cipher(%x:%x) or group_cipher(%x:%x) is not match\n", __func__,
+ pairwise_cipher, cur_network->BcnInfo.pairwise_cipher,
+ group_cipher, cur_network->BcnInfo.group_cipher);
+ goto _mismatch;
+ }
+
+ if (is_8021x != cur_network->BcnInfo.is_8021x) {
+ DBG_871X("%s authentication is not match\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ kfree((u8 *)bssid);
+ return _SUCCESS;
+
+_mismatch:
+ kfree((u8 *)bssid);
+
+ if (pmlmepriv->NumOfBcnInfoChkFail == 0)
+ pmlmepriv->timeBcnInfoChkStart = jiffies;
+
+ pmlmepriv->NumOfBcnInfoChkFail++;
+ DBG_871X("%s by "ADPT_FMT" - NumOfChkFail = %d (SeqNum of this Beacon frame = %d).\n", __func__, ADPT_ARG(Adapter), pmlmepriv->NumOfBcnInfoChkFail, GetSequence(pframe));
+
+ if ((pmlmepriv->timeBcnInfoChkStart != 0) && (jiffies_to_msecs(jiffies - pmlmepriv->timeBcnInfoChkStart) <= DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS)
+ && (pmlmepriv->NumOfBcnInfoChkFail >= DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD)) {
+ DBG_871X("%s by "ADPT_FMT" - NumOfChkFail = %d >= threshold : %d (in %d ms), return FAIL.\n", __func__, ADPT_ARG(Adapter), pmlmepriv->NumOfBcnInfoChkFail,
+ DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD, jiffies_to_msecs(jiffies - pmlmepriv->timeBcnInfoChkStart));
+ pmlmepriv->timeBcnInfoChkStart = 0;
+ pmlmepriv->NumOfBcnInfoChkFail = 0;
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta)
+{
+ unsigned int i;
+ unsigned int len;
+ struct ndis_80211_var_ie *pIE;
+
+ len = pkt_len - (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN);
+
+ for (i = 0; i < len;) {
+ pIE = (struct ndis_80211_var_ie *)(pframe + (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN) + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ /* to update WMM paramter set while receiving beacon */
+ if (!memcmp(pIE->data, WMM_PARA_OUI, 6) && pIE->Length == WLAN_WMM_LEN) /* WMM */
+ if (WMM_param_handler(padapter, pIE))
+ report_wmm_edca_update(padapter);
+
+ break;
+
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ /* HT_info_handler(padapter, pIE); */
+ bwmode_update_check(padapter, pIE);
+ break;
+
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+ VCS_update(padapter, psta);
+ break;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+}
+
+unsigned int is_ap_in_tkip(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_80211_var_ie *pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fix_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_80211_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
+ return true;
+
+ break;
+
+ case _RSN_IE_2_:
+ if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
+ return true;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ return false;
+ } else
+ return false;
+
+}
+
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps, u8 bwmode)
+{
+ unsigned char bit_offset;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (!(pmlmeinfo->HT_enable))
+ return _FAIL;
+
+ bit_offset = (bwmode & CHANNEL_WIDTH_40) ? 6 : 5;
+
+ if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset))
+ return _SUCCESS;
+ else
+ return _FAIL;
+}
+
+unsigned char get_highest_rate_idx(u32 mask)
+{
+ int i;
+ unsigned char rate_idx = 0;
+
+ for (i = 31; i >= 0; i--) {
+ if (mask & BIT(i)) {
+ rate_idx = i;
+ break;
+ }
+ }
+
+ return rate_idx;
+}
+
+void Update_RA_Entry(struct adapter *padapter, struct sta_info *psta)
+{
+ rtw_hal_update_ra_mask(psta, 0);
+}
+
+void enable_rate_adaptive(struct adapter *padapter, struct sta_info *psta);
+void enable_rate_adaptive(struct adapter *padapter, struct sta_info *psta)
+{
+ Update_RA_Entry(padapter, psta);
+}
+
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
+{
+ /* rate adaptive */
+ enable_rate_adaptive(padapter, psta);
+}
+
+unsigned char check_assoc_AP(u8 *pframe, uint len)
+{
+ unsigned int i;
+ struct ndis_80211_var_ie *pIE;
+
+ for (i = sizeof(struct ndis_802_11_fix_ie); i < len;) {
+ pIE = (struct ndis_80211_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) || (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ DBG_871X("link to Artheros AP\n");
+ return HT_IOT_PEER_ATHEROS;
+ } else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3))
+ || (!memcmp(pIE->data, BROADCOM_OUI2, 3))
+ || (!memcmp(pIE->data, BROADCOM_OUI3, 3))) {
+ DBG_871X("link to Broadcom AP\n");
+ return HT_IOT_PEER_BROADCOM;
+ } else if (!memcmp(pIE->data, MARVELL_OUI, 3)) {
+ DBG_871X("link to Marvell AP\n");
+ return HT_IOT_PEER_MARVELL;
+ } else if (!memcmp(pIE->data, RALINK_OUI, 3)) {
+ DBG_871X("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ } else if (!memcmp(pIE->data, CISCO_OUI, 3)) {
+ DBG_871X("link to Cisco AP\n");
+ return HT_IOT_PEER_CISCO;
+ } else if (!memcmp(pIE->data, REALTEK_OUI, 3)) {
+ u32 Vender = HT_IOT_PEER_REALTEK;
+
+ if (pIE->Length >= 5) {
+ if (pIE->data[4] == 1)
+ /* if (pIE->data[5] & RT_HT_CAP_USE_LONG_PREAMBLE) */
+ /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_LONG_PREAMBLE; */
+ if (pIE->data[5] & RT_HT_CAP_USE_92SE)
+ /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; */
+ Vender = HT_IOT_PEER_REALTEK_92SE;
+
+ if (pIE->data[5] & RT_HT_CAP_USE_SOFTAP)
+ Vender = HT_IOT_PEER_REALTEK_SOFTAP;
+
+ if (pIE->data[4] == 2) {
+ if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_BCUT) {
+ Vender = HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP;
+ DBG_871X("link to Realtek JAGUAR_BCUTAP\n");
+ }
+ if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_CCUT) {
+ Vender = HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP;
+ DBG_871X("link to Realtek JAGUAR_CCUTAP\n");
+ }
+ }
+ }
+
+ DBG_871X("link to Realtek AP\n");
+ return Vender;
+ } else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ DBG_871X("link to Airgo Cap\n");
+ return HT_IOT_PEER_AIRGO;
+ } else
+ break;
+
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ DBG_871X("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
+}
+
+void update_IOT_info(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pmlmeinfo->assoc_AP_vendor) {
+ case HT_IOT_PEER_MARVELL:
+ pmlmeinfo->turboMode_cts2self = 1;
+ pmlmeinfo->turboMode_rtsen = 0;
+ break;
+
+ case HT_IOT_PEER_RALINK:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ case HT_IOT_PEER_REALTEK:
+ /* rtw_write16(padapter, 0x4cc, 0xffff); */
+ /* rtw_write16(padapter, 0x546, 0x01c0); */
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ default:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ break;
+ }
+
+}
+
+void update_capinfo(struct adapter *Adapter, u16 updateCap)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool ShortPreamble;
+
+ /* Check preamble mode, 2005.01.06, by rcnjko. */
+ /* Mark to update preamble value forever, 2008.03.18 by lanhsin */
+ /* if (pMgntInfo->RegPreambleMode == PREAMBLE_AUTO) */
+ {
+
+ if (updateCap & cShortPreamble) {
+ /* Short Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
+ ShortPreamble = true;
+ pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ } else{
+ /* Long Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
+ ShortPreamble = false;
+ pmlmeinfo->preamble_mode = PREAMBLE_LONG;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ }
+ }
+
+ if (updateCap & cIBSS)
+ /* Filen: See 802.11-2007 p.91 */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ else {
+ /* Filen: See 802.11-2007 p.90 */
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N | WIRELESS_11A | WIRELESS_11_5N | WIRELESS_11AC))
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ else if (pmlmeext->cur_wireless_mode & (WIRELESS_11G)) {
+ if ((updateCap & cShortSlotTime) /* && (!(pMgntInfo->pHTInfo->RT2RT_HT_Mode & RT_HT_CAP_USE_LONG_PREAMBLE)) */)
+ /* Short Slot Time */
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ else
+ /* Long Slot Time */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ } else
+ /* B Mode */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+
+ rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+
+}
+
+void update_wireless_mode(struct adapter *padapter)
+{
+ int ratelen, network_type = 0;
+ u32 SIFS_Timer;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned char *rate = cur_network->SupportedRates;
+
+ ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
+ pmlmeinfo->HT_enable = 1;
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->VHT_enable)
+ network_type = WIRELESS_11AC;
+ else if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+
+ network_type |= WIRELESS_11A;
+ } else{
+ if (pmlmeinfo->VHT_enable)
+ network_type = WIRELESS_11AC;
+ else if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+
+ pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
+
+ SIFS_Timer = 0x0a0a0808; /* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
+ /* change this value if having IOT issues. */
+
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WIRELESS_MODE, (u8 *)&(pmlmeext->cur_wireless_mode));
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
+}
+
+void update_sta_basic_rate(struct sta_info *psta, u8 wireless_mode)
+{
+ if (IsSupportedTxCCK(wireless_mode)) {
+ /* Only B, B/G, and B/G/N AP could use CCK rate */
+ memcpy(psta->bssrateset, rtw_basic_rate_cck, 4);
+ psta->bssratelen = 4;
+ } else{
+ memcpy(psta->bssrateset, rtw_basic_rate_ofdm, 3);
+ psta->bssratelen = 3;
+ }
+}
+
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx)
+{
+ unsigned int ie_len;
+ struct ndis_80211_var_ie *pIE;
+ int supportRateNum = 0;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE == NULL)
+ return _FAIL;
+
+ memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
+ supportRateNum = ie_len;
+
+ pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE)
+ memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
+
+ return _SUCCESS;
+
+}
+
+void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
+{
+ struct sta_info *psta;
+ u16 tid, start_seq, param;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta) {
+ start_seq = le16_to_cpu(preq->BA_starting_seqctrl) >> 4;
+
+ param = le16_to_cpu(preq->BA_para_set);
+ tid = (param>>2)&0x0f;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+
+ #ifdef CONFIG_UPDATE_INDICATE_SEQ_WHILE_PROCESS_ADDBA_REQ
+ preorder_ctrl->indicate_seq = start_seq;
+ #ifdef DBG_RX_SEQ
+ DBG_871X("DBG_RX_SEQ %s:%d IndicateSeq: %d, start_seq: %d\n", __func__, __LINE__,
+ preorder_ctrl->indicate_seq, start_seq);
+ #endif
+ #else
+ preorder_ctrl->indicate_seq = 0xffff;
+ #endif
+
+ preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true) ? true : false;
+ }
+
+}
+
+void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
+{
+ u8 *pIE;
+ __le32 *pbuf;
+
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
+ pbuf = (__le32 *)pIE;
+
+ pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1));
+
+ pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
+
+ pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
+}
+
+void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL);
+}
+
+void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
+{
+ int i;
+ u8 *pIE;
+ __le32 *pbuf;
+ u64 tsf = 0;
+ u32 delay_ms;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+
+ pmlmeext->bcn_cnt++;
+
+ pIE = pframe + sizeof(struct ieee80211_hdr_3addr);
+ pbuf = (__le32 *)pIE;
+
+ tsf = le32_to_cpu(*(pbuf+1));
+ tsf = tsf << 32;
+ tsf |= le32_to_cpu(*pbuf);
+
+ /* DBG_871X("%s(): tsf_upper = 0x%08x, tsf_lower = 0x%08x\n", __func__, (u32)(tsf>>32), (u32)tsf); */
+
+ /* delay = (timestamp mod 1024*100)/1000 (unit: ms) */
+ /* delay_ms = do_div(tsf, (pmlmeinfo->bcn_interval*1024))/1000; */
+ delay_ms = rtw_modular64(tsf, (pmlmeinfo->bcn_interval*1024));
+ delay_ms = delay_ms/1000;
+
+ if (delay_ms >= 8)
+ pmlmeext->bcn_delay_cnt[8]++;
+ /* pmlmeext->bcn_delay_ratio[8] = (pmlmeext->bcn_delay_cnt[8] * 100) /pmlmeext->bcn_cnt; */
+ else
+ pmlmeext->bcn_delay_cnt[delay_ms]++;
+ /* pmlmeext->bcn_delay_ratio[delay_ms] = (pmlmeext->bcn_delay_cnt[delay_ms] * 100) /pmlmeext->bcn_cnt; */
+
+/*
+ DBG_871X("%s(): (a)bcn_cnt = %d\n", __func__, pmlmeext->bcn_cnt);
+
+
+ for (i = 0; i<9; i++)
+ {
+ DBG_871X("%s():bcn_delay_cnt[%d]=%d, bcn_delay_ratio[%d]=%d\n", __func__, i,
+ pmlmeext->bcn_delay_cnt[i] , i, pmlmeext->bcn_delay_ratio[i]);
+ }
+*/
+
+ /* dump for adaptive_early_32k */
+ if (pmlmeext->bcn_cnt > 100 && (pmlmeext->adaptive_tsf_done == true)) {
+ u8 ratio_20_delay, ratio_80_delay;
+ u8 DrvBcnEarly, DrvBcnTimeOut;
+
+ ratio_20_delay = 0;
+ ratio_80_delay = 0;
+ DrvBcnEarly = 0xff;
+ DrvBcnTimeOut = 0xff;
+
+ DBG_871X("%s(): bcn_cnt = %d\n", __func__, pmlmeext->bcn_cnt);
+
+ for (i = 0; i < 9; i++) {
+ pmlmeext->bcn_delay_ratio[i] = (pmlmeext->bcn_delay_cnt[i] * 100) / pmlmeext->bcn_cnt;
+
+
+ DBG_871X("%s():bcn_delay_cnt[%d]=%d, bcn_delay_ratio[%d]=%d\n", __func__, i,
+ pmlmeext->bcn_delay_cnt[i], i, pmlmeext->bcn_delay_ratio[i]);
+
+ ratio_20_delay += pmlmeext->bcn_delay_ratio[i];
+ ratio_80_delay += pmlmeext->bcn_delay_ratio[i];
+
+ if (ratio_20_delay > 20 && DrvBcnEarly == 0xff) {
+ DrvBcnEarly = i;
+ DBG_871X("%s(): DrvBcnEarly = %d\n", __func__, DrvBcnEarly);
+ }
+
+ if (ratio_80_delay > 80 && DrvBcnTimeOut == 0xff) {
+ DrvBcnTimeOut = i;
+ DBG_871X("%s(): DrvBcnTimeOut = %d\n", __func__, DrvBcnTimeOut);
+ }
+
+ /* reset adaptive_early_32k cnt */
+ pmlmeext->bcn_delay_cnt[i] = 0;
+ pmlmeext->bcn_delay_ratio[i] = 0;
+ }
+
+ pmlmeext->DrvBcnEarly = DrvBcnEarly;
+ pmlmeext->DrvBcnTimeOut = DrvBcnTimeOut;
+
+ pmlmeext->bcn_cnt = 0;
+ }
+
+}
+
+
+void beacon_timing_control(struct adapter *padapter)
+{
+ rtw_hal_bcn_related_reg_setting(padapter);
+}
+
+void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
+{
+ int i;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+
+
+ if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
+ return;
+
+ if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ psta->mac_id = NUM_STA;
+ return;
+ }
+
+ spin_lock_bh(&pdvobj->lock);
+ for (i = 0; i < NUM_STA; i++) {
+ if (pdvobj->macid[i] == false) {
+ pdvobj->macid[i] = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&pdvobj->lock);
+
+ if (i > (NUM_STA-1)) {
+ psta->mac_id = NUM_STA;
+ DBG_871X(" no room for more MACIDs\n");
+ } else{
+ psta->mac_id = i;
+ DBG_871X("%s = %d\n", __func__, psta->mac_id);
+ }
+
+}
+
+void rtw_release_macid(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+
+
+ if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
+ return;
+
+ if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN))
+ return;
+
+ spin_lock_bh(&pdvobj->lock);
+ if (psta->mac_id < NUM_STA && psta->mac_id != 1) {
+ if (pdvobj->macid[psta->mac_id] == true) {
+ DBG_871X("%s = %d\n", __func__, psta->mac_id);
+ pdvobj->macid[psta->mac_id] = false;
+ psta->mac_id = NUM_STA;
+ }
+
+ }
+ spin_unlock_bh(&pdvobj->lock);
+
+}
+/* For 8188E RA */
+u8 rtw_search_max_mac_id(struct adapter *padapter)
+{
+ u8 max_mac_id = 0;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+ int i;
+ spin_lock_bh(&pdvobj->lock);
+ for (i = (NUM_STA-1); i >= 0 ; i--) {
+ if (pdvobj->macid[i] == true)
+ break;
+ }
+ max_mac_id = i;
+ spin_unlock_bh(&pdvobj->lock);
+
+ return max_mac_id;
+
+}
+
+struct adapter *dvobj_get_port0_adapter(struct dvobj_priv *dvobj)
+{
+ if (get_iface_type(dvobj->padapters[i]) != IFACE_PORT0)
+ return NULL;
+
+ return dvobj->padapters;
+}
+
+#ifdef CONFIG_GPIO_API
+int rtw_get_gpio(struct net_device *netdev, int gpio_num)
+{
+ u8 value;
+ u8 direction;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(adapter);
+
+ rtw_ps_deny(adapter, PS_DENY_IOCTL);
+
+ DBG_871X("rf_pwrstate = 0x%02x\n", pwrpriv->rf_pwrstate);
+ LeaveAllPowerSaveModeDirect(adapter);
+
+ /* Read GPIO Direction */
+ direction = (rtw_read8(adapter, REG_GPIO_PIN_CTRL + 2) & BIT(gpio_num)) >> gpio_num;
+
+ /* According the direction to read register value */
+ if (direction)
+ value = (rtw_read8(adapter, REG_GPIO_PIN_CTRL + 1) & BIT(gpio_num)) >> gpio_num;
+ else
+ value = (rtw_read8(adapter, REG_GPIO_PIN_CTRL) & BIT(gpio_num)) >> gpio_num;
+
+ rtw_ps_deny_cancel(adapter, PS_DENY_IOCTL);
+ DBG_871X("%s direction =%d value =%d\n", __func__, direction, value);
+
+ return value;
+}
+EXPORT_SYMBOL(rtw_get_gpio);
+
+int rtw_set_gpio_output_value(struct net_device *netdev, int gpio_num, bool isHigh)
+{
+ u8 direction = 0;
+ u8 res = -1;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
+
+ /* Check GPIO is 4~7 */
+ if (gpio_num > 7 || gpio_num < 4) {
+ DBG_871X("%s The gpio number does not included 4~7.\n", __func__);
+ return -1;
+ }
+
+ rtw_ps_deny(adapter, PS_DENY_IOCTL);
+
+ LeaveAllPowerSaveModeDirect(adapter);
+
+ /* Read GPIO direction */
+ direction = (rtw_read8(adapter, REG_GPIO_PIN_CTRL + 2) & BIT(gpio_num)) >> gpio_num;
+
+ /* If GPIO is output direction, setting value. */
+ if (direction) {
+ if (isHigh)
+ rtw_write8(adapter, REG_GPIO_PIN_CTRL + 1, rtw_read8(adapter, REG_GPIO_PIN_CTRL + 1) | BIT(gpio_num));
+ else
+ rtw_write8(adapter, REG_GPIO_PIN_CTRL + 1, rtw_read8(adapter, REG_GPIO_PIN_CTRL + 1) & ~BIT(gpio_num));
+
+ DBG_871X("%s Set gpio %x[%d]=%d\n", __func__, REG_GPIO_PIN_CTRL+1, gpio_num, isHigh);
+ res = 0;
+ } else{
+ DBG_871X("%s The gpio is input, not be set!\n", __func__);
+ res = -1;
+ }
+
+ rtw_ps_deny_cancel(adapter, PS_DENY_IOCTL);
+ return res;
+}
+EXPORT_SYMBOL(rtw_set_gpio_output_value);
+
+int rtw_config_gpio(struct net_device *netdev, int gpio_num, bool isOutput)
+{
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
+
+ if (gpio_num > 7 || gpio_num < 4) {
+ DBG_871X("%s The gpio number does not included 4~7.\n", __func__);
+ return -1;
+ }
+
+ DBG_871X("%s gpio_num =%d direction =%d\n", __func__, gpio_num, isOutput);
+
+ rtw_ps_deny(adapter, PS_DENY_IOCTL);
+
+ LeaveAllPowerSaveModeDirect(adapter);
+
+ if (isOutput)
+ rtw_write8(adapter, REG_GPIO_PIN_CTRL + 2, rtw_read8(adapter, REG_GPIO_PIN_CTRL + 2) | BIT(gpio_num));
+ else
+ rtw_write8(adapter, REG_GPIO_PIN_CTRL + 2, rtw_read8(adapter, REG_GPIO_PIN_CTRL + 2) & ~BIT(gpio_num));
+
+ rtw_ps_deny_cancel(adapter, PS_DENY_IOCTL);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw_config_gpio);
+#endif
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+void rtw_get_current_ip_address(struct adapter *padapter, u8 *pcurrentip)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct in_device *my_ip_ptr = padapter->pnetdev->ip_ptr;
+ u8 ipaddress[4];
+
+ if ((pmlmeinfo->state & WIFI_FW_LINKING_STATE) ||
+ pmlmeinfo->state & WIFI_FW_AP_STATE) {
+ if (my_ip_ptr != NULL) {
+ struct in_ifaddr *my_ifa_list = my_ip_ptr->ifa_list;
+ if (my_ifa_list != NULL) {
+ ipaddress[0] = my_ifa_list->ifa_address & 0xFF;
+ ipaddress[1] = (my_ifa_list->ifa_address >> 8) & 0xFF;
+ ipaddress[2] = (my_ifa_list->ifa_address >> 16) & 0xFF;
+ ipaddress[3] = my_ifa_list->ifa_address >> 24;
+ DBG_871X("%s: %d.%d.%d.%d ==========\n", __func__,
+ ipaddress[0], ipaddress[1], ipaddress[2], ipaddress[3]);
+ memcpy(pcurrentip, ipaddress, 4);
+ }
+ }
+ }
+}
+#endif
+#ifdef CONFIG_WOWLAN
+void rtw_get_sec_iv(struct adapter *padapter, u8 *pcur_dot11txpn, u8 *StaAddr)
+{
+ struct sta_info *psta;
+ struct security_priv *psecpriv = &padapter->securitypriv;
+
+ memset(pcur_dot11txpn, 0, 8);
+ if (NULL == StaAddr)
+ return;
+ psta = rtw_get_stainfo(&padapter->stapriv, StaAddr);
+ DBG_871X("%s(): StaAddr: %02x %02x %02x %02x %02x %02x\n",
+ __func__, StaAddr[0], StaAddr[1], StaAddr[2],
+ StaAddr[3], StaAddr[4], StaAddr[5]);
+
+ if (psta) {
+ if (psecpriv->dot11PrivacyAlgrthm != _NO_PRIVACY_ && psta->dot11txpn.val > 0)
+ psta->dot11txpn.val--;
+ AES_IV(pcur_dot11txpn, psta->dot11txpn, 0);
+
+ DBG_871X("%s(): CurrentIV: %02x %02x %02x %02x %02x %02x %02x %02x\n"
+ , __func__, pcur_dot11txpn[0], pcur_dot11txpn[1],
+ pcur_dot11txpn[2], pcur_dot11txpn[3], pcur_dot11txpn[4],
+ pcur_dot11txpn[5], pcur_dot11txpn[6], pcur_dot11txpn[7]);
+ }
+}
+void rtw_set_sec_pn(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct security_priv *psecpriv = &padapter->securitypriv;
+
+ psta = rtw_get_stainfo(&padapter->stapriv,
+ get_my_bssid(&pmlmeinfo->network));
+
+ if (psta) {
+ if (pwrpriv->wowlan_fw_iv > psta->dot11txpn.val) {
+ if (psecpriv->dot11PrivacyAlgrthm != _NO_PRIVACY_)
+ psta->dot11txpn.val = pwrpriv->wowlan_fw_iv + 2;
+ } else {
+ DBG_871X("%s(): FW IV is smaller than driver\n", __func__);
+ psta->dot11txpn.val += 2;
+ }
+ DBG_871X("%s: dot11txpn: 0x%016llx\n", __func__, psta->dot11txpn.val);
+ }
+}
+#endif /* CONFIG_WOWLAN */
+
+#ifdef CONFIG_PNO_SUPPORT
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
+#define CIPHER_IE "key_mgmt ="
+#define CIPHER_NONE "NONE"
+#define CIPHER_WPA_PSK "WPA-PSK"
+#define CIPHER_WPA_EAP "WPA-EAP IEEE8021X"
+
+#endif /* CONFIG_PNO_SUPPORT */
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
new file mode 100644
index 000000000000..8f2c9a6658bf
--- /dev/null
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -0,0 +1,3100 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTW_XMIT_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static void _init_txservq(struct tx_servq *ptxservq)
+{
+ INIT_LIST_HEAD(&ptxservq->tx_pending);
+ _rtw_init_queue(&ptxservq->sta_pending);
+ ptxservq->qcnt = 0;
+}
+
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
+{
+ memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
+
+ spin_lock_init(&psta_xmitpriv->lock);
+
+ /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
+ /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
+
+ _init_txservq(&psta_xmitpriv->be_q);
+ _init_txservq(&psta_xmitpriv->bk_q);
+ _init_txservq(&psta_xmitpriv->vi_q);
+ _init_txservq(&psta_xmitpriv->vo_q);
+ INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
+ INIT_LIST_HEAD(&psta_xmitpriv->apsd);
+}
+
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
+{
+ int i;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_frame *pxframe;
+ sint res = _SUCCESS;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+ /* memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
+
+ spin_lock_init(&pxmitpriv->lock);
+ spin_lock_init(&pxmitpriv->lock_sctx);
+ sema_init(&pxmitpriv->xmit_sema, 0);
+ sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
+
+ /*
+ Please insert all the queue initializaiton using _rtw_init_queue below
+ */
+
+ pxmitpriv->adapter = padapter;
+
+ /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
+ /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
+
+ _rtw_init_queue(&pxmitpriv->be_pending);
+ _rtw_init_queue(&pxmitpriv->bk_pending);
+ _rtw_init_queue(&pxmitpriv->vi_pending);
+ _rtw_init_queue(&pxmitpriv->vo_pending);
+ _rtw_init_queue(&pxmitpriv->bm_pending);
+
+ /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
+ /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
+
+ _rtw_init_queue(&pxmitpriv->free_xmit_queue);
+
+ /*
+ Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ and initialize free_xmit_frame below.
+ Please also apply free_txobj to link_up all the xmit_frames...
+ */
+
+ pxmitpriv->pallocated_frame_buf = vzalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_frame_buf == NULL) {
+ pxmitpriv->pxmit_frame_buf = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
+ /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
+ /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
+
+ pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ INIT_LIST_HEAD(&(pxframe->list));
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+
+ pxframe++;
+ }
+
+ pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
+
+ pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
+
+
+ /* init xmit_buf */
+ _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
+ _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+
+ pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmitbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
+ /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
+ /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ INIT_LIST_HEAD(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->buf_tag = XMITBUF_DATA;
+
+ /* Tx buf allocation may fail sometimes, so sleep and retry. */
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), true);
+ if (res == _FAIL) {
+ msleep(10);
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), true);
+ if (res == _FAIL)
+ goto exit;
+ }
+
+ pxmitbuf->phead = pxmitbuf->pbuf;
+ pxmitbuf->pend = pxmitbuf->pbuf + MAX_XMITBUF_SZ;
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+
+ pxmitbuf->flags = XMIT_VO_QUEUE;
+
+ list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ #ifdef DBG_XMIT_BUF
+ pxmitbuf->no = i;
+ #endif
+
+ pxmitbuf++;
+
+ }
+
+ pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
+
+ /* init xframe_ext queue, the same count as extbuf */
+ _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue);
+
+ pxmitpriv->xframe_ext_alloc_addr = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->xframe_ext_alloc_addr == NULL) {
+ pxmitpriv->xframe_ext = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xframe_ext fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->xframe_ext = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->xframe_ext_alloc_addr), 4);
+ pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
+
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ INIT_LIST_HEAD(&(pxframe->list));
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ pxframe->ext_tag = 1;
+
+ list_add_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
+
+ pxframe++;
+ }
+ pxmitpriv->free_xframe_ext_cnt = NR_XMIT_EXTBUFF;
+
+ /* Init xmit extension buff */
+ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+
+ pxmitpriv->pallocated_xmit_extbuf = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmit_extbuf), 4);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ INIT_LIST_HEAD(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->buf_tag = XMITBUF_MGNT;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, MAX_XMIT_EXTBUF_SZ + XMITBUF_ALIGN_SZ, true);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitbuf->phead = pxmitbuf->pbuf;
+ pxmitbuf->pend = pxmitbuf->pbuf + MAX_XMIT_EXTBUF_SZ;
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+
+ list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ #ifdef DBG_XMIT_BUF_EXT
+ pxmitbuf->no = i;
+ #endif
+ pxmitbuf++;
+
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = NR_XMIT_EXTBUFF;
+
+ for (i = 0; i < CMDBUF_MAX; i++) {
+ pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
+ if (pxmitbuf) {
+ INIT_LIST_HEAD(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->buf_tag = XMITBUF_CMD;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ, true);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitbuf->phead = pxmitbuf->pbuf;
+ pxmitbuf->pend = pxmitbuf->pbuf + MAX_CMDBUF_SZ;
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->alloc_sz = MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ;
+ }
+ }
+
+ rtw_alloc_hwxmits(padapter);
+ rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+
+ for (i = 0; i < 4; i++) {
+ pxmitpriv->wmm_para_seq[i] = i;
+ }
+
+ pxmitpriv->ack_tx = false;
+ mutex_init(&pxmitpriv->ack_tx_mutex);
+ rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
+
+ rtw_hal_init_xmit_priv(padapter);
+
+exit:
+ return res;
+}
+
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
+{
+ int i;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ rtw_hal_free_xmit_priv(padapter);
+
+ if (pxmitpriv->pxmit_frame_buf == NULL)
+ return;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+
+ pxmitframe++;
+ }
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ), true);
+
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_frame_buf)
+ vfree(pxmitpriv->pallocated_frame_buf);
+
+
+ if (pxmitpriv->pallocated_xmitbuf)
+ vfree(pxmitpriv->pallocated_xmitbuf);
+
+ /* free xframe_ext queue, the same count as extbuf */
+ pxmitframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
+ if (pxmitframe) {
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ pxmitframe++;
+ }
+ }
+ if (pxmitpriv->xframe_ext_alloc_addr)
+ vfree(pxmitpriv->xframe_ext_alloc_addr);
+
+ /* free xmit extension buff */
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMIT_EXTBUF_SZ + XMITBUF_ALIGN_SZ), true);
+
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_xmit_extbuf) {
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
+ }
+
+ for (i = 0; i < CMDBUF_MAX; i++) {
+ pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
+ if (pxmitbuf != NULL)
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, MAX_CMDBUF_SZ+XMITBUF_ALIGN_SZ, true);
+ }
+
+ rtw_free_hwxmits(padapter);
+
+ mutex_destroy(&pxmitpriv->ack_tx_mutex);
+}
+
+u8 query_ra_short_GI(struct sta_info *psta)
+{
+ u8 sgi = false, sgi_20m = false, sgi_40m = false, sgi_80m = false;
+
+ sgi_20m = psta->htpriv.sgi_20m;
+ sgi_40m = psta->htpriv.sgi_40m;
+
+ switch (psta->bw_mode) {
+ case CHANNEL_WIDTH_80:
+ sgi = sgi_80m;
+ break;
+ case CHANNEL_WIDTH_40:
+ sgi = sgi_40m;
+ break;
+ case CHANNEL_WIDTH_20:
+ default:
+ sgi = sgi_20m;
+ break;
+ }
+
+ return sgi;
+}
+
+static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u32 sz;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ /* struct sta_info *psta = pattrib->psta; */
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pattrib->nr_frags != 1)
+ sz = padapter->xmitpriv.frag_len;
+ else /* no frag */
+ sz = pattrib->last_txcmdsz;
+
+ /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
+ /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
+ /* Other fragments are protected by previous fragment. */
+ /* So we only need to check the length of first fragment. */
+ if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
+ if (sz > padapter->registrypriv.rts_thresh)
+ pattrib->vcs_mode = RTS_CTS;
+ else{
+ if (pattrib->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (pattrib->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;
+ }
+ } else{
+ while (true) {
+ /* IOT action */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && (pattrib->ampdu_en == true) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ pattrib->vcs_mode = CTS_TO_SELF;
+ break;
+ }
+
+
+ /* check ERP protection */
+ if (pattrib->rtsen || pattrib->cts2self) {
+ if (pattrib->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (pattrib->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+
+ break;
+ }
+
+ /* check HT op mode */
+ if (pattrib->ht_en) {
+ u8 HTOpMode = pmlmeinfo->HT_protection;
+ if ((pmlmeext->cur_bwmode && (HTOpMode == 2 || HTOpMode == 3)) ||
+ (!pmlmeext->cur_bwmode && HTOpMode == 3)) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+ }
+
+ /* check rts */
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ /* to do list: check MIMO power save condition. */
+
+ /* check AMPDU aggregation for TXOP */
+ if (pattrib->ampdu_en == true) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ pattrib->vcs_mode = NONE_VCS;
+ break;
+ }
+ }
+
+ /* for debug : force driver control vrtl_carrier_sense. */
+ if (padapter->driver_vcs_en == 1)
+ pattrib->vcs_mode = padapter->driver_vcs_type;
+}
+
+static void update_attrib_phy_info(struct adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
+{
+ struct mlme_ext_priv *mlmeext = &padapter->mlmeextpriv;
+
+ pattrib->rtsen = psta->rtsen;
+ pattrib->cts2self = psta->cts2self;
+
+ pattrib->mdata = 0;
+ pattrib->eosp = 0;
+ pattrib->triggered = 0;
+ pattrib->ampdu_spacing = 0;
+
+ /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
+ pattrib->qos_en = psta->qos_option;
+
+ pattrib->raid = psta->raid;
+
+ if (mlmeext->cur_bwmode < psta->bw_mode)
+ pattrib->bwmode = mlmeext->cur_bwmode;
+ else
+ pattrib->bwmode = psta->bw_mode;
+
+ pattrib->sgi = query_ra_short_GI(psta);
+
+ pattrib->ldpc = psta->ldpc;
+ pattrib->stbc = psta->stbc;
+
+ pattrib->ht_en = psta->htpriv.ht_option;
+ pattrib->ch_offset = psta->htpriv.ch_offset;
+ pattrib->ampdu_en = false;
+
+ if (padapter->driver_ampdu_spacing != 0xFF) /* driver control AMPDU Density for peer sta's rx */
+ pattrib->ampdu_spacing = padapter->driver_ampdu_spacing;
+ else
+ pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
+
+ /* if (pattrib->ht_en && psta->htpriv.ampdu_enable) */
+ /* */
+ /* if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
+ /* pattrib->ampdu_en = true; */
+ /* */
+
+
+ pattrib->retry_ctrl = false;
+
+#ifdef CONFIG_AUTO_AP_MODE
+ if (psta->isrc && psta->pid > 0)
+ pattrib->pctrl = true;
+#endif
+
+}
+
+static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
+{
+ sint res = _SUCCESS;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ sint bmcast = IS_MCAST(pattrib->ra);
+
+ memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
+ memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
+ pattrib->mac_id = psta->mac_id;
+
+ if (psta->ieee8021x_blocked == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\n psta->ieee8021x_blocked == true\n"));
+
+ pattrib->encrypt = 0;
+
+ if ((pattrib->ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n", pattrib->ether_type));
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == true, pattrib->ether_type(%04x) != 0x888e\n", __func__, pattrib->ether_type);
+ #endif
+ res = _FAIL;
+ goto exit;
+ }
+ } else{
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
+
+ switch (psecuritypriv->dot11AuthAlgrthm) {
+ case dot11AuthAlgrthm_Open:
+ case dot11AuthAlgrthm_Shared:
+ case dot11AuthAlgrthm_Auto:
+ pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case dot11AuthAlgrthm_8021X:
+ if (bmcast)
+ pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
+ else
+ pattrib->key_idx = 0;
+ break;
+ default:
+ pattrib->key_idx = 0;
+ break;
+ }
+
+ /* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
+ if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
+ pattrib->encrypt = _NO_PRIVACY_;
+
+ }
+
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ pattrib->iv_len = 4;
+ pattrib->icv_len = 4;
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+
+ case _TKIP_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 4;
+
+ if (psecuritypriv->busetkipkey == _FAIL) {
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d) == _FAIL drop packet\n", __func__, psecuritypriv->busetkipkey);
+ #endif
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (bmcast)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+
+
+ memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
+
+ break;
+
+ case _AES_:
+
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 8;
+
+ if (bmcast)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+
+ break;
+
+ default:
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ break;
+ }
+
+ if (pattrib->encrypt > 0)
+ memcpy(pattrib->dot118021x_UncstKey.skey, psta->dot118021x_UncstKey.skey, 16);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("update_attrib: encrypt =%d securitypriv.sw_encrypt =%d\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+
+ if (pattrib->encrypt &&
+ ((padapter->securitypriv.sw_encrypt == true) || (psecuritypriv->hw_decrypted == false))) {
+ pattrib->bswenc = true;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("update_attrib: encrypt =%d securitypriv.hw_decrypted =%d bswenc =true\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+ } else {
+ pattrib->bswenc = false;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: bswenc =false\n"));
+ }
+
+exit:
+
+ return res;
+
+}
+
+u8 qos_acm(u8 acm_mask, u8 priority)
+{
+ u8 change_priority = priority;
+
+ switch (priority) {
+ case 0:
+ case 3:
+ if (acm_mask & BIT(1))
+ change_priority = 1;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 4:
+ case 5:
+ if (acm_mask & BIT(2))
+ change_priority = 0;
+ break;
+ case 6:
+ case 7:
+ if (acm_mask & BIT(3))
+ change_priority = 5;
+ break;
+ default:
+ DBG_871X("qos_acm(): invalid pattrib->priority: %d!!!\n", priority);
+ break;
+ }
+
+ return change_priority;
+}
+
+static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
+{
+ struct ethhdr etherhdr;
+ struct iphdr ip_hdr;
+ s32 UserPriority = 0;
+
+
+ _rtw_open_pktfile(ppktfile->pkt, ppktfile);
+ _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+
+ /* get UserPriority from IP hdr */
+ if (pattrib->ether_type == 0x0800) {
+ _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
+/* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
+ UserPriority = ip_hdr.tos >> 5;
+ }
+ pattrib->priority = UserPriority;
+ pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
+ pattrib->subtype = WIFI_QOS_DATA_TYPE;
+}
+
+static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib)
+{
+ uint i;
+ struct pkt_file pktfile;
+ struct sta_info *psta = NULL;
+ struct ethhdr etherhdr;
+
+ sint bmcast;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ sint res = _SUCCESS;
+
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ i = _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+
+ pattrib->ether_type = ntohs(etherhdr.h_proto);
+
+
+ memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
+ memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
+
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_adhoc);
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_ap);
+ } else
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown);
+
+ pattrib->pktlen = pktfile.pkt_len;
+
+ if (ETH_P_IP == pattrib->ether_type) {
+ /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
+ /* to prevent DHCP protocol fail */
+
+ u8 tmp[24];
+
+ _rtw_pktfile_read(&pktfile, &tmp[0], 24);
+
+ pattrib->dhcp_pkt = 0;
+ if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
+ if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("======================update_attrib: get DHCP Packet\n"));
+ pattrib->dhcp_pkt = 1;
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp);
+ }
+ }
+ }
+
+ /* for parsing ICMP pakcets */
+ {
+ struct iphdr *piphdr = (struct iphdr *)tmp;
+
+ pattrib->icmp_pkt = 0;
+ if (piphdr->protocol == 0x1) { /* protocol type in ip header 0x1 is ICMP */
+ pattrib->icmp_pkt = 1;
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
+ }
+ }
+
+
+ } else if (0x888e == pattrib->ether_type) {
+ DBG_871X_LEVEL(_drv_always_, "send eapol packet\n");
+ }
+
+ if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ rtw_set_scan_deny(padapter, 3000);
+
+ /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
+ if (pattrib->icmp_pkt == 1)
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 1);
+ else if (pattrib->dhcp_pkt == 1) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active);
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
+ }
+
+ bmcast = IS_MCAST(pattrib->ra);
+
+ /* get sta_info */
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ if (psta == NULL) { /* if we cannot get psta => drop the pkt */
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta);
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:" MAC_FMT"\n", MAC_ARG(pattrib->ra)));
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
+ #endif
+ res = _FAIL;
+ goto exit;
+ } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) && (!(psta->state & _FW_LINKED))) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link);
+ res = _FAIL;
+ goto exit;
+ }
+ }
+
+ if (psta == NULL) {
+ /* if we cannot get psta => drop the pkt */
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta);
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:" MAC_FMT "\n", MAC_ARG(pattrib->ra)));
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
+ #endif
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link);
+ DBG_871X("%s, psta("MAC_FMT")->state(0x%x) != _FW_LINKED\n", __func__, MAC_ARG(psta->hwaddr), psta->state);
+ return _FAIL;
+ }
+
+
+
+ /* TODO:_lock */
+ if (update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
+ res = _FAIL;
+ goto exit;
+ }
+
+ update_attrib_phy_info(padapter, pattrib, psta);
+
+ /* DBG_8192C("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
+
+ pattrib->psta = psta;
+ /* TODO:_unlock */
+
+ pattrib->pctrl = 0;
+
+ pattrib->ack_policy = 0;
+ /* get ether_hdr_len */
+ pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
+
+ pattrib->hdrlen = WLAN_HDR_A3_LEN;
+ pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->priority = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (pattrib->qos_en)
+ set_qos(&pktfile, pattrib);
+ } else{
+ if (pqospriv->qos_option) {
+ set_qos(&pktfile, pattrib);
+
+ if (pmlmepriv->acm_mask != 0)
+ pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
+
+ }
+ }
+
+ /* pattrib->priority = 5; force to used VI queue, for testing */
+
+ rtw_set_tx_chksum_offload(pkt, pattrib);
+
+exit:
+ return res;
+}
+
+static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ sint curfragnum, length;
+ u8 *pframe, *payload, mic[8];
+ struct mic_data micdata;
+ /* struct sta_info *stainfo; */
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+ u8 hw_hdr_offset = 0;
+ sint bmcst = IS_MCAST(pattrib->ra);
+
+/*
+ if (pattrib->psta)
+ {
+ stainfo = pattrib->psta;
+ }
+ else
+ {
+ DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
+ stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
+ }
+
+ if (stainfo == NULL)
+ {
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ return _FAIL;
+ }
+
+ if (!(stainfo->state &_FW_LINKED))
+ {
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
+ return _FAIL;
+ }
+*/
+
+ hw_hdr_offset = TXDESC_OFFSET;
+
+ if (pattrib->encrypt == _TKIP_) { /* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ /* encode mic code */
+ /* if (stainfo!= NULL) */
+ {
+ u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ if (bmcst) {
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
+ /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
+ /* msleep(10); */
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
+ } else {
+ if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16)) {
+ /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
+ /* msleep(10); */
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
+ }
+
+ if (pframe[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[24], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+
+ }
+
+ /* if (pqospriv->qos_option == 1) */
+ if (pattrib->qos_en)
+ priority[0] = (u8)pxmitframe->attrib.priority;
+
+
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ payload = pframe;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ payload = (u8 *)RND4((SIZE_PTR)(payload));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("===curfragnum =%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
+ curfragnum, *payload, *(payload+1), *(payload+2), *(payload+3), *(payload+4), *(payload+5), *(payload+6), *(payload+7)));
+
+ payload = payload+pattrib->hdrlen+pattrib->iv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum =%d pattrib->hdrlen =%d pattrib->iv_len =%d", curfragnum, pattrib->hdrlen, pattrib->iv_len));
+ if ((curfragnum+1) == pattrib->nr_frags) {
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length;
+ } else{
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length+pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum =%d length =%d pattrib->icv_len =%d", curfragnum, length, pattrib->icv_len));
+ }
+ }
+ rtw_secgetmic(&micdata, &(mic[0]));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz =%d!!!\n", pattrib->last_txcmdsz));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]= 0x%.2x , mic[1]= 0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\n\
+ mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n",
+ mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
+ /* add mic code and add the mic code length in last_txcmdsz */
+
+ memcpy(payload, &(mic[0]), 8);
+ pattrib->last_txcmdsz += 8;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ========last pkt ========\n"));
+ payload = payload-pattrib->last_txcmdsz+8;
+ for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8)
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
+ *(payload+curfragnum), *(payload+curfragnum+1), *(payload+curfragnum+2), *(payload+curfragnum+3),
+ *(payload+curfragnum+4), *(payload+curfragnum+5), *(payload+curfragnum+6), *(payload+curfragnum+7)));
+ }
+/*
+ else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo == NULL!!!\n"));
+ }
+*/
+ }
+ return _SUCCESS;
+}
+
+static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ /* struct security_priv *psecuritypriv =&padapter->securitypriv; */
+
+ /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
+ if (pattrib->bswenc) {
+ /* DBG_871X("start xmitframe_swencrypt\n"); */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _TKIP_:
+ rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _AES_:
+ rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ default:
+ break;
+ }
+
+ } else
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
+
+ return _SUCCESS;
+}
+
+s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
+{
+ u16 *qc;
+
+ struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ u8 qos_option = false;
+ sint res = _SUCCESS;
+ __le16 *fctrl = &pwlanhdr->frame_control;
+
+ memset(hdr, 0, WLANHDR_OFFSET);
+
+ SetFrameSubType(fctrl, pattrib->subtype);
+
+ if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ /* to_ds = 1, fr_ds = 0; */
+
+ {
+ /* 1.Data transfer to AP */
+ /* 2.Arp pkt will relayed by AP */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
+ }
+
+ if (pqospriv->qos_option)
+ qos_option = true;
+
+ } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+
+ if (pattrib->qos_en)
+ qos_option = true;
+ } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+
+ if (pattrib->qos_en)
+ qos_option = true;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+
+ if (qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+
+ /* TODO: fill HT Control Field */
+
+ /* Update Seq Num will be handled by f/w */
+ {
+ struct sta_info *psta;
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ if (pattrib->psta != psta) {
+ DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
+ return _FAIL;
+ }
+
+ if (psta == NULL) {
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ return _FAIL;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return _FAIL;
+ }
+
+
+ if (psta) {
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+ pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
+
+ SetSeqNum(hdr, pattrib->seqnum);
+
+ /* check if enable ampdu */
+ if (pattrib->ht_en && psta->htpriv.ampdu_enable)
+ if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
+ pattrib->ampdu_en = true;
+
+
+ /* re-check if enable ampdu by BA_starting_seqctrl */
+ if (pattrib->ampdu_en == true) {
+ u16 tx_seq;
+
+ tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
+
+ /* check BA_starting_seqctrl */
+ if (SN_LESS(pattrib->seqnum, tx_seq)) {
+ /* DBG_871X("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
+ pattrib->ampdu_en = false;/* AGG BK */
+ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
+
+ pattrib->ampdu_en = true;/* AGG EN */
+ } else{
+ /* DBG_871X("tx ampdu over run\n"); */
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
+ }
+
+ }
+ }
+ }
+
+ } else{
+
+ }
+
+exit:
+ return res;
+}
+
+s32 rtw_txframes_pending(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ return ((!list_empty(&pxmitpriv->be_pending.queue)) ||
+ (!list_empty(&pxmitpriv->bk_pending.queue)) ||
+ (!list_empty(&pxmitpriv->vi_pending.queue)) ||
+ (!list_empty(&pxmitpriv->vo_pending.queue)));
+}
+
+/*
+ * Calculate wlan 802.11 packet MAX size from pkt_attrib
+ * This function doesn't consider fragment case
+ */
+u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
+{
+ u32 len = 0;
+
+ len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
+ len += SNAP_SIZE + sizeof(u16); /* LLC */
+ len += pattrib->pktlen;
+ if (pattrib->encrypt == _TKIP_)
+ len += 8; /* MIC */
+ len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+
+ return len;
+}
+
+/*
+
+This sub-routine will perform all the following:
+
+1. remove 802.3 header.
+2. create wlan_header, based on the info in pxmitframe
+3. append sta's iv/ext-iv
+4. append LLC
+5. move frag chunk from pframe to pxmitframe->mem
+6. apply sw-encrypt, if necessary.
+
+*/
+s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe)
+{
+ struct pkt_file pktfile;
+
+ s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
+
+ SIZE_PTR addr;
+
+ u8 *pframe, *mem_start;
+ u8 hw_hdr_offset;
+
+ /* struct sta_info *psta; */
+ /* struct sta_priv *pstapriv = &padapter->stapriv; */
+ /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ u8 *pbuf_start;
+
+ s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 res = _SUCCESS;
+
+/*
+ if (pattrib->psta)
+ {
+ psta = pattrib->psta;
+ } else
+ {
+ DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ }
+
+ if (psta == NULL)
+ {
+
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ return _FAIL;
+ }
+
+
+ if (!(psta->state &_FW_LINKED))
+ {
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return _FAIL;
+ }
+*/
+ if (pxmitframe->buf_addr == NULL) {
+ DBG_8192C("==> %s buf_addr == NULL\n", __func__);
+ return _FAIL;
+ }
+
+ pbuf_start = pxmitframe->buf_addr;
+
+ hw_hdr_offset = TXDESC_OFFSET;
+ mem_start = pbuf_start + hw_hdr_offset;
+
+ if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n"));
+ DBG_8192C("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+
+ frg_inx = 0;
+ frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
+
+ while (1) {
+ llc_sz = 0;
+
+ mpdu_len = frg_len;
+
+ pframe = mem_start;
+
+ SetMFrag(mem_start);
+
+ pframe += pattrib->hdrlen;
+ mpdu_len -= pattrib->hdrlen;
+
+ /* adding icv, if necessary... */
+ if (pattrib->iv_len) {
+ memcpy(pframe, pattrib->iv, pattrib->iv_len);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
+ ("rtw_xmitframe_coalesce: keyid =%d pattrib->iv[3]=%.2x pframe =%.2x %.2x %.2x %.2x\n",
+ padapter->securitypriv.dot11PrivacyKeyIndex, pattrib->iv[3], *pframe, *(pframe+1), *(pframe+2), *(pframe+3)));
+
+ pframe += pattrib->iv_len;
+
+ mpdu_len -= pattrib->iv_len;
+ }
+
+ if (frg_inx == 0) {
+ llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
+ pframe += llc_sz;
+ mpdu_len -= llc_sz;
+ }
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ mpdu_len -= pattrib->icv_len;
+ }
+
+
+ if (bmcst) {
+ /* don't do fragment to broadcat/multicast packets */
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
+ } else {
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
+ }
+
+ pframe += mem_sz;
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ memcpy(pframe, pattrib->icv, pattrib->icv_len);
+ pframe += pattrib->icv_len;
+ }
+
+ frg_inx++;
+
+ if (bmcst || (rtw_endofpktfile(&pktfile) == true)) {
+ pattrib->nr_frags = frg_inx;
+
+ pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz:0) +
+ ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
+
+ ClearMFrag(mem_start);
+
+ break;
+ } else
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+
+ addr = (SIZE_PTR)(pframe);
+
+ mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
+
+ }
+
+ if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"));
+ DBG_8192C("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ xmitframe_swencrypt(padapter, pxmitframe);
+
+ if (bmcst == false)
+ update_attrib_vcs_info(padapter, pxmitframe);
+ else
+ pattrib->vcs_mode = NONE_VCS;
+
+exit:
+ return res;
+}
+
+/* broadcast or multicast management pkt use BIP, unicast management pkt use CCMP encryption */
+s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe)
+{
+ u8 *pframe, *mem_start = NULL, *tmp_buf = NULL;
+ u8 subtype;
+ struct sta_info *psta = NULL;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+ u8 *BIP_AAD = NULL;
+ u8 *MGMT_body = NULL;
+
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ieee80211_hdr *pwlanhdr;
+ u8 MME[_MME_IE_LENGTH_];
+ u32 ori_len;
+ mem_start = pframe = (u8 *)(pxmitframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ ori_len = BIP_AAD_SIZE+pattrib->pktlen;
+ tmp_buf = BIP_AAD = rtw_zmalloc(ori_len);
+ subtype = GetFrameSubType(pframe); /* bit(7)~bit(2) */
+
+ if (BIP_AAD == NULL)
+ return _FAIL;
+
+ spin_lock_bh(&padapter->security_key_mutex);
+
+ /* only support station mode */
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE) || !check_fwstate(pmlmepriv, _FW_LINKED))
+ goto xmitframe_coalesce_success;
+
+ /* IGTK key is not install, it may not support 802.11w */
+ if (padapter->securitypriv.binstallBIPkey != true) {
+ DBG_871X("no instll BIP key\n");
+ goto xmitframe_coalesce_success;
+ }
+ /* station mode doesn't need TX BIP, just ready the code */
+ if (bmcst) {
+ int frame_body_len;
+ u8 mic[16];
+
+ memset(MME, 0, 18);
+
+ /* other types doesn't need the BIP */
+ if (GetFrameSubType(pframe) != WIFI_DEAUTH && GetFrameSubType(pframe) != WIFI_DISASSOC)
+ goto xmitframe_coalesce_fail;
+
+ MGMT_body = pframe + sizeof(struct ieee80211_hdr_3addr);
+ pframe += pattrib->pktlen;
+
+ /* octent 0 and 1 is key index , BIP keyid is 4 or 5, LSB only need octent 0 */
+ MME[0] = padapter->securitypriv.dot11wBIPKeyid;
+ /* copy packet number */
+ memcpy(&MME[2], &pmlmeext->mgnt_80211w_IPN, 6);
+ /* increase the packet number */
+ pmlmeext->mgnt_80211w_IPN++;
+
+ /* add MME IE with MIC all zero, MME string doesn't include element id and length */
+ pframe = rtw_set_ie(pframe, _MME_IE_, 16, MME, &(pattrib->pktlen));
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ /* total frame length - header length */
+ frame_body_len = pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr);
+
+ /* conscruct AAD, copy frame control field */
+ memcpy(BIP_AAD, &pwlanhdr->frame_control, 2);
+ ClearRetry(BIP_AAD);
+ ClearPwrMgt(BIP_AAD);
+ ClearMData(BIP_AAD);
+ /* conscruct AAD, copy address 1 to address 3 */
+ memcpy(BIP_AAD+2, pwlanhdr->addr1, 18);
+ /* copy management fram body */
+ memcpy(BIP_AAD+BIP_AAD_SIZE, MGMT_body, frame_body_len);
+ /* calculate mic */
+ if (omac1_aes_128(padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey
+ , BIP_AAD, BIP_AAD_SIZE+frame_body_len, mic))
+ goto xmitframe_coalesce_fail;
+
+ /* copy right BIP mic value, total is 128bits, we use the 0~63 bits */
+ memcpy(pframe-8, mic, 8);
+ } else { /* unicast mgmt frame TX */
+ /* start to encrypt mgmt frame */
+ if (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC ||
+ subtype == WIFI_REASSOCREQ || subtype == WIFI_ACTION) {
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+
+ if (psta == NULL) {
+
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ goto xmitframe_coalesce_fail;
+ }
+
+ if (!(psta->state & _FW_LINKED) || pxmitframe->buf_addr == NULL) {
+ DBG_871X("%s, not _FW_LINKED or addr null\n", __func__);
+ goto xmitframe_coalesce_fail;
+ }
+
+ /* DBG_871X("%s, action frame category =%d\n", __func__, pframe[WLAN_HDR_A3_LEN]); */
+ /* according 802.11-2012 standard, these five types are not robust types */
+ if (subtype == WIFI_ACTION &&
+ (pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_PUBLIC ||
+ pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_HT ||
+ pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_UNPROTECTED_WNM ||
+ pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_SELF_PROTECTED ||
+ pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_P2P))
+ goto xmitframe_coalesce_fail;
+ /* before encrypt dump the management packet content */
+ if (pattrib->encrypt > 0)
+ memcpy(pattrib->dot118021x_UncstKey.skey, psta->dot118021x_UncstKey.skey, 16);
+ /* bakeup original management packet */
+ memcpy(tmp_buf, pframe, pattrib->pktlen);
+ /* move to data portion */
+ pframe += pattrib->hdrlen;
+
+ /* 802.11w unicast management packet must be _AES_ */
+ pattrib->iv_len = 8;
+ /* it's MIC of AES */
+ pattrib->icv_len = 8;
+
+ switch (pattrib->encrypt) {
+ case _AES_:
+ /* set AES IV header */
+ AES_IV(pattrib->iv, psta->dot11wtxpn, 0);
+ break;
+ default:
+ goto xmitframe_coalesce_fail;
+ }
+ /* insert iv header into management frame */
+ memcpy(pframe, pattrib->iv, pattrib->iv_len);
+ pframe += pattrib->iv_len;
+ /* copy mgmt data portion after CCMP header */
+ memcpy(pframe, tmp_buf+pattrib->hdrlen, pattrib->pktlen-pattrib->hdrlen);
+ /* move pframe to end of mgmt pkt */
+ pframe += pattrib->pktlen-pattrib->hdrlen;
+ /* add 8 bytes CCMP IV header to length */
+ pattrib->pktlen += pattrib->iv_len;
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ memcpy(pframe, pattrib->icv, pattrib->icv_len);
+ pframe += pattrib->icv_len;
+ }
+ /* add 8 bytes MIC */
+ pattrib->pktlen += pattrib->icv_len;
+ /* set final tx command size */
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* set protected bit must be beofre SW encrypt */
+ SetPrivacy(mem_start);
+ /* software encrypt */
+ xmitframe_swencrypt(padapter, pxmitframe);
+ }
+ }
+
+xmitframe_coalesce_success:
+ spin_unlock_bh(&padapter->security_key_mutex);
+ kfree(BIP_AAD);
+ return _SUCCESS;
+
+xmitframe_coalesce_fail:
+ spin_unlock_bh(&padapter->security_key_mutex);
+ kfree(BIP_AAD);
+ return _FAIL;
+}
+
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ *Organizationally Unique Identifier(OUI), 3 octets,
+ *type, defined by that organization, 2 octets.
+ */
+s32 rtw_put_snap(u8 *data, u16 h_proto)
+{
+ struct ieee80211_snap_hdr *snap;
+ u8 *oui;
+
+ snap = (struct ieee80211_snap_hdr *)data;
+ snap->dsap = 0xaa;
+ snap->ssap = 0xaa;
+ snap->ctrl = 0x03;
+
+ if (h_proto == 0x8137 || h_proto == 0x80f3)
+ oui = P802_1H_OUI;
+ else
+ oui = RFC1042_OUI;
+
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
+
+ return SNAP_SIZE + sizeof(u16);
+}
+
+void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
+{
+
+ uint protection;
+ u8 *perp;
+ sint erp_len;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+ switch (pxmitpriv->vcs_setting) {
+ case DISABLE_VCS:
+ pxmitpriv->vcs = NONE_VCS;
+ break;
+
+ case ENABLE_VCS:
+ break;
+
+ case AUTO_VCS:
+ default:
+ perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
+ if (perp == NULL)
+ pxmitpriv->vcs = NONE_VCS;
+ else{
+ protection = (*(perp + 2)) & BIT(1);
+ if (protection) {
+ if (pregistrypriv->vcs_type == RTS_CTS)
+ pxmitpriv->vcs = RTS_CTS;
+ else
+ pxmitpriv->vcs = CTS_TO_SELF;
+ } else
+ pxmitpriv->vcs = NONE_VCS;
+ }
+
+ break;
+
+ }
+}
+
+void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
+{
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 pkt_num = 1;
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ pkt_num = pxmitframe->agg_num;
+
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pkt_num;
+
+ pxmitpriv->tx_pkts += pkt_num;
+
+ pxmitpriv->tx_bytes += sz;
+
+ psta = pxmitframe->attrib.psta;
+ if (psta) {
+ pstats = &psta->sta_stats;
+
+ pstats->tx_pkts += pkt_num;
+
+ pstats->tx_bytes += sz;
+ }
+ }
+}
+
+static struct xmit_buf *__rtw_alloc_cmd_xmitbuf(struct xmit_priv *pxmitpriv,
+ enum cmdbuf_type buf_type)
+{
+ struct xmit_buf *pxmitbuf = NULL;
+
+ pxmitbuf = &pxmitpriv->pcmd_xmitbuf[buf_type];
+ if (pxmitbuf != NULL) {
+ pxmitbuf->priv_data = NULL;
+
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->agg_num = 0;
+ pxmitbuf->pg_num = 0;
+
+ if (pxmitbuf->sctx) {
+ DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ } else
+ DBG_871X("%s fail, no xmitbuf available !!!\n", __func__);
+
+ return pxmitbuf;
+}
+
+struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
+ enum cmdbuf_type buf_type)
+{
+ struct xmit_frame *pcmdframe;
+ struct xmit_buf *pxmitbuf;
+
+ pcmdframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pcmdframe == NULL) {
+ DBG_871X("%s, alloc xmitframe fail\n", __func__);
+ return NULL;
+ }
+
+ pxmitbuf = __rtw_alloc_cmd_xmitbuf(pxmitpriv, buf_type);
+ if (pxmitbuf == NULL) {
+ DBG_871X("%s, alloc xmitbuf fail\n", __func__);
+ rtw_free_xmitframe(pxmitpriv, pcmdframe);
+ return NULL;
+ }
+
+ pcmdframe->frame_tag = MGNT_FRAMETAG;
+
+ pcmdframe->pxmitbuf = pxmitbuf;
+
+ pcmdframe->buf_addr = pxmitbuf->pbuf;
+
+ pxmitbuf->priv_data = pcmdframe;
+
+ return pcmdframe;
+
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
+{
+ _irqL irqL;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+ spin_lock_irqsave(&pfree_queue->lock, irqL);
+
+ if (list_empty(&pfree_queue->queue)) {
+ pxmitbuf = NULL;
+ } else {
+
+ phead = get_list_head(pfree_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ list_del_init(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmit_extbuf_cnt--;
+ #ifdef DBG_XMIT_BUF_EXT
+ DBG_871X("DBG_XMIT_BUF_EXT ALLOC no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
+ #endif
+
+
+ pxmitbuf->priv_data = NULL;
+
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->agg_num = 1;
+
+ if (pxmitbuf->sctx) {
+ DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+
+ }
+
+ spin_unlock_irqrestore(&pfree_queue->lock, irqL);
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ _irqL irqL;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ spin_lock_irqsave(&pfree_queue->lock, irqL);
+
+ list_del_init(&pxmitbuf->list);
+
+ list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ pxmitpriv->free_xmit_extbuf_cnt++;
+ #ifdef DBG_XMIT_BUF_EXT
+ DBG_871X("DBG_XMIT_BUF_EXT FREE no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
+ #endif
+
+ spin_unlock_irqrestore(&pfree_queue->lock, irqL);
+
+ return _SUCCESS;
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
+{
+ _irqL irqL;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+ /* DBG_871X("+rtw_alloc_xmitbuf\n"); */
+
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
+
+ if (list_empty(&pfree_xmitbuf_queue->queue)) {
+ pxmitbuf = NULL;
+ } else {
+
+ phead = get_list_head(pfree_xmitbuf_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ list_del_init(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmitbuf_cnt--;
+ #ifdef DBG_XMIT_BUF
+ DBG_871X("DBG_XMIT_BUF ALLOC no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
+ #endif
+ /* DBG_871X("alloc, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
+
+ pxmitbuf->priv_data = NULL;
+
+ pxmitbuf->len = 0;
+ pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
+ pxmitbuf->agg_num = 0;
+ pxmitbuf->pg_num = 0;
+
+ if (pxmitbuf->sctx) {
+ DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+ #ifdef DBG_XMIT_BUF
+ else
+ DBG_871X("DBG_XMIT_BUF rtw_alloc_xmitbuf return NULL\n");
+ #endif
+
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ _irqL irqL;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+ /* DBG_871X("+rtw_free_xmitbuf\n"); */
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ if (pxmitbuf->sctx) {
+ DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
+ }
+
+ if (pxmitbuf->buf_tag == XMITBUF_CMD) {
+ } else if (pxmitbuf->buf_tag == XMITBUF_MGNT) {
+ rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
+ } else{
+ spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
+
+ list_del_init(&pxmitbuf->list);
+
+ list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+
+ pxmitpriv->free_xmitbuf_cnt++;
+ /* DBG_871X("FREE, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
+ #ifdef DBG_XMIT_BUF
+ DBG_871X("DBG_XMIT_BUF FREE no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
+ #endif
+ spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
+ }
+ return _SUCCESS;
+}
+
+static void rtw_init_xmitframe(struct xmit_frame *pxframe)
+{
+ if (pxframe != NULL) { /* default value setting */
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
+ /* pxframe->attrib.psta = NULL; */
+
+ pxframe->frame_tag = DATA_FRAMETAG;
+
+ pxframe->pg_num = 1;
+ pxframe->agg_num = 1;
+ pxframe->ack_report = 0;
+ }
+}
+
+/*
+Calling context:
+1. OS_TXENTRY
+2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+
+If we turn on USE_RXTHREAD, then, no need for critical section.
+Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+
+Must be very very cautious...
+
+*/
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+{
+ /*
+ Please remember to use all the osdep_service api,
+ and lock/unlock or _enter/_exit critical to protect
+ pfree_xmit_queue
+ */
+
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+
+ spin_lock_bh(&pfree_xmit_queue->lock);
+
+ if (list_empty(&pfree_xmit_queue->queue)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(pfree_xmit_queue);
+
+ plist = get_next(phead);
+
+ pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ list_del_init(&(pxframe->list));
+ pxmitpriv->free_xmitframe_cnt--;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
+ }
+
+ spin_unlock_bh(&pfree_xmit_queue->lock);
+
+ rtw_init_xmitframe(pxframe);
+ return pxframe;
+}
+
+struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &pxmitpriv->free_xframe_ext_queue;
+
+ spin_lock_bh(&queue->lock);
+
+ if (list_empty(&queue->queue)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext:%d\n", pxmitpriv->free_xframe_ext_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+ pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ list_del_init(&(pxframe->list));
+ pxmitpriv->free_xframe_ext_cnt--;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext():free_xmitframe_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+ rtw_init_xmitframe(pxframe);
+
+ return pxframe;
+}
+
+struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pxframe = NULL;
+ u8 *alloc_addr;
+
+ alloc_addr = rtw_zmalloc(sizeof(struct xmit_frame) + 4);
+
+ if (alloc_addr == NULL)
+ goto exit;
+
+ pxframe = (struct xmit_frame *)N_BYTE_ALIGMENT((SIZE_PTR)(alloc_addr), 4);
+ pxframe->alloc_addr = alloc_addr;
+
+ pxframe->padapter = pxmitpriv->adapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ rtw_init_xmitframe(pxframe);
+
+ DBG_871X("################## %s ##################\n", __func__);
+
+exit:
+ return pxframe;
+}
+
+s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
+{
+ struct __queue *queue = NULL;
+ struct adapter *padapter = pxmitpriv->adapter;
+ _pkt *pndis_pkt = NULL;
+
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("======rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n"));
+ goto exit;
+ }
+
+ if (pxmitframe->pkt) {
+ pndis_pkt = pxmitframe->pkt;
+ pxmitframe->pkt = NULL;
+ }
+
+ if (pxmitframe->alloc_addr) {
+ DBG_871X("################## %s with alloc_addr ##################\n", __func__);
+ kfree(pxmitframe->alloc_addr);
+ goto check_pkt_complete;
+ }
+
+ if (pxmitframe->ext_tag == 0)
+ queue = &pxmitpriv->free_xmit_queue;
+ else if (pxmitframe->ext_tag == 1)
+ queue = &pxmitpriv->free_xframe_ext_queue;
+ else {
+
+ }
+
+ spin_lock_bh(&queue->lock);
+
+ list_del_init(&pxmitframe->list);
+ list_add_tail(&pxmitframe->list, get_list_head(queue));
+ if (pxmitframe->ext_tag == 0) {
+ pxmitpriv->free_xmitframe_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
+ } else if (pxmitframe->ext_tag == 1) {
+ pxmitpriv->free_xframe_ext_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xframe_ext_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
+ } else {
+ }
+
+ spin_unlock_bh(&queue->lock);
+
+check_pkt_complete:
+
+ if (pndis_pkt)
+ rtw_os_pkt_complete(padapter, pndis_pkt);
+
+exit:
+ return _SUCCESS;
+}
+
+void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
+{
+ struct list_head *plist, *phead;
+ struct xmit_frame *pxmitframe;
+
+ spin_lock_bh(&(pframequeue->lock));
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ }
+ spin_unlock_bh(&(pframequeue->lock));
+}
+
+s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ DBG_COUNTER(padapter->tx_logs.core_tx_enqueue);
+ if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
+/* pxmitframe->pkt = NULL; */
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, sint up, u8 *ac)
+{
+ struct tx_servq *ptxservq = NULL;
+
+ switch (up) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ *(ac) = 3;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
+ break;
+
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ *(ac) = 1;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
+ break;
+
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ *(ac) = 0;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
+ break;
+
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ *(ac) = 2;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
+ break;
+
+ }
+
+ return ptxservq;
+}
+
+/*
+ * Will enqueue pxmitframe to the proper queue,
+ * and indicate it to xx_pending list.....
+ */
+s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ /* _irqL irqL0; */
+ u8 ac_index;
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+ sint res = _SUCCESS;
+
+ DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
+
+/*
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ }
+*/
+
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ if (pattrib->psta != psta) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
+ DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
+ return _FAIL;
+ }
+
+ if (psta == NULL) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_nosta);
+ res = _FAIL;
+ DBG_8192C("rtw_xmit_classifier: psta == NULL\n");
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmit_classifier: psta == NULL\n"));
+ goto exit;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_fwlink);
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return _FAIL;
+ }
+
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ /* spin_lock_irqsave(&pstapending->lock, irqL0); */
+
+ if (list_empty(&ptxservq->tx_pending)) {
+ list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
+ }
+
+ /* spin_lock_irqsave(&ptxservq->sta_pending.lock, irqL1); */
+
+ list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
+ ptxservq->qcnt++;
+ phwxmits[ac_index].accnt++;
+
+ /* spin_unlock_irqrestore(&ptxservq->sta_pending.lock, irqL1); */
+
+ /* spin_unlock_irqrestore(&pstapending->lock, irqL0); */
+
+exit:
+
+ return res;
+}
+
+void rtw_alloc_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
+
+ pxmitpriv->hwxmits = NULL;
+
+ pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
+
+ if (pxmitpriv->hwxmits == NULL) {
+ DBG_871X("alloc hwxmits fail!...\n");
+ return;
+ }
+
+ hwxmits = pxmitpriv->hwxmits;
+
+ if (pxmitpriv->hwxmit_entry == 5) {
+ /* pxmitpriv->bmc_txqueue.head = 0; */
+ /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
+ hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
+
+ /* pxmitpriv->vo_txqueue.head = 0; */
+ /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
+ hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
+
+ /* pxmitpriv->vi_txqueue.head = 0; */
+ /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
+ hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
+
+ /* pxmitpriv->bk_txqueue.head = 0; */
+ /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+
+ /* pxmitpriv->be_txqueue.head = 0; */
+ /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
+ hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
+
+ } else if (pxmitpriv->hwxmit_entry == 4) {
+
+ /* pxmitpriv->vo_txqueue.head = 0; */
+ /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
+ hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
+
+ /* pxmitpriv->vi_txqueue.head = 0; */
+ /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
+ hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
+
+ /* pxmitpriv->be_txqueue.head = 0; */
+ /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
+ hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
+
+ /* pxmitpriv->bk_txqueue.head = 0; */
+ /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ } else {
+
+ }
+
+
+}
+
+void rtw_free_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ hwxmits = pxmitpriv->hwxmits;
+ if (hwxmits)
+ kfree((u8 *)hwxmits);
+}
+
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
+{
+ sint i;
+
+ for (i = 0; i < entry; i++, phwxmit++) {
+ /* spin_lock_init(&phwxmit->xmit_lock); */
+ /* INIT_LIST_HEAD(&phwxmit->pending); */
+ /* phwxmit->txcmdcnt = 0; */
+ phwxmit->accnt = 0;
+ }
+}
+
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
+{
+ u32 addr;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ switch (pattrib->qsel) {
+ case 0:
+ case 3:
+ addr = BE_QUEUE_INX;
+ break;
+ case 1:
+ case 2:
+ addr = BK_QUEUE_INX;
+ break;
+ case 4:
+ case 5:
+ addr = VI_QUEUE_INX;
+ break;
+ case 6:
+ case 7:
+ addr = VO_QUEUE_INX;
+ break;
+ case 0x10:
+ addr = BCN_QUEUE_INX;
+ break;
+ case 0x11:/* BC/MC in PS (HIQ) */
+ addr = HIGH_QUEUE_INX;
+ break;
+ case 0x12:
+ default:
+ addr = MGT_QUEUE_INX;
+ break;
+
+ }
+
+ return addr;
+
+}
+
+static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 qsel;
+
+ qsel = pattrib->priority;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("### do_queue_select priority =%d , qsel = %d\n", pattrib->priority, qsel));
+
+ pattrib->qsel = qsel;
+}
+
+/*
+ * The main transmit(tx) entry
+ *
+ * Return
+ *1 enqueue
+ *0 success, hardware will handle this xmit frame(packet)
+ *<0 fail
+ */
+s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
+{
+ static unsigned long start = 0;
+ static u32 drop_cnt = 0;
+
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_frame *pxmitframe = NULL;
+
+ s32 res;
+
+ DBG_COUNTER(padapter->tx_logs.core_tx);
+
+ if (start == 0)
+ start = jiffies;
+
+ pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
+
+ if (jiffies_to_msecs(jiffies - start) > 2000) {
+ if (drop_cnt)
+ DBG_871X("DBG_TX_DROP_FRAME %s no more pxmitframe, drop_cnt:%u\n", __func__, drop_cnt);
+ start = jiffies;
+ drop_cnt = 0;
+ }
+
+ if (pxmitframe == NULL) {
+ drop_cnt++;
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
+ return -1;
+ }
+
+ res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
+
+ if (res == _FAIL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s update attrib fail\n", __func__);
+ #endif
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ pxmitframe->pkt = *ppkt;
+
+ do_queue_select(padapter, &pxmitframe->attrib);
+
+ spin_lock_bh(&pxmitpriv->lock);
+ if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe) == true) {
+ spin_unlock_bh(&pxmitpriv->lock);
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue);
+ return 1;
+ }
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* pre_xmitframe */
+ if (rtw_hal_xmit(padapter, pxmitframe) == false)
+ return 1;
+
+ return 0;
+}
+
+#define RTW_HIQ_FILTER_ALLOW_ALL 0
+#define RTW_HIQ_FILTER_ALLOW_SPECIAL 1
+#define RTW_HIQ_FILTER_DENY_ALL 2
+
+inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
+{
+ bool allow = false;
+ struct adapter *adapter = xmitframe->padapter;
+ struct registry_priv *registry = &adapter->registrypriv;
+
+ if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
+
+ struct pkt_attrib *attrib = &xmitframe->attrib;
+
+ if (attrib->ether_type == 0x0806
+ || attrib->ether_type == 0x888e
+ || attrib->dhcp_pkt
+ ) {
+ DBG_871X(FUNC_ADPT_FMT" ether_type:0x%04x%s\n", FUNC_ADPT_ARG(xmitframe->padapter)
+ , attrib->ether_type, attrib->dhcp_pkt?" DHCP":"");
+ allow = true;
+ }
+ } else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_ALL)
+ allow = true;
+ else if (registry->hiq_filter == RTW_HIQ_FILTER_DENY_ALL) {
+ } else
+ rtw_warn_on(1);
+
+ return allow;
+}
+
+sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ sint ret = false;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ sint bmcst = IS_MCAST(pattrib->ra);
+ bool update_tim = false;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
+ return ret;
+ }
+/*
+ if (pattrib->psta)
+ {
+ psta = pattrib->psta;
+ }
+ else
+ {
+ DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
+ psta =rtw_get_stainfo(pstapriv, pattrib->ra);
+ }
+*/
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ if (pattrib->psta != psta) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
+ DBG_871X("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
+ return false;
+ }
+
+ if (psta == NULL) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_nosta);
+ DBG_871X("%s, psta ==NUL\n", __func__);
+ return false;
+ }
+
+ if (!(psta->state & _FW_LINKED)) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_link);
+ DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
+ return false;
+ }
+
+ if (pattrib->triggered == 1) {
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
+ /* DBG_871X("directly xmit pspoll_triggered packet\n"); */
+
+ /* pattrib->triggered = 0; */
+ if (bmcst && xmitframe_hiq_filter(pxmitframe) == true)
+ pattrib->qsel = 0x11;/* HIQ */
+
+ return ret;
+ }
+
+
+ if (bmcst) {
+ spin_lock_bh(&psta->sleep_q.lock);
+
+ if (pstapriv->sta_dz_bitmap) { /* if anyone sta is in ps mode */
+ /* pattrib->qsel = 0x11;HIQ */
+
+ list_del_init(&pxmitframe->list);
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+
+ list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ if (!(pstapriv->tim_bitmap & BIT(0)))
+ update_tim = true;
+
+ pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->sta_dz_bitmap |= BIT(0);
+
+ /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
+
+ if (update_tim == true) {
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ } else {
+ chk_bmc_sleepq_cmd(padapter);
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+
+ ret = true;
+
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
+
+ }
+
+ spin_unlock_bh(&psta->sleep_q.lock);
+
+ return ret;
+
+ }
+
+
+ spin_lock_bh(&psta->sleep_q.lock);
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ if (pstapriv->sta_dz_bitmap & BIT(psta->aid)) {
+ list_del_init(&pxmitframe->list);
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+
+ list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ psta->sleepq_ac_len++;
+
+ if (((psta->has_legacy_ac) && (!wmmps_ac)) || ((!psta->has_legacy_ac) && (wmmps_ac))) {
+ if (!(pstapriv->tim_bitmap & BIT(psta->aid)))
+ update_tim = true;
+
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+
+ /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
+
+ if (update_tim == true)
+ /* DBG_871X("sleepq_len == 1, update BCNTIM\n"); */
+ /* upate BCN for TIM IE */
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+
+ /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
+ /* */
+ /* wakeup_sta_to_xmit(padapter, psta); */
+ /* */
+
+ ret = true;
+
+ DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
+ }
+
+ }
+
+ spin_unlock_bh(&psta->sleep_q.lock);
+
+ return ret;
+
+}
+
+static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
+{
+ sint ret;
+ struct list_head *plist, *phead;
+ u8 ac_index;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib;
+ struct xmit_frame *pxmitframe;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (phead != plist) {
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ pattrib = &pxmitframe->attrib;
+
+ pattrib->triggered = 0;
+
+ ret = xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
+
+ if (true == ret) {
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ ptxservq->qcnt--;
+ phwxmits[ac_index].accnt--;
+ } else {
+ /* DBG_871X("xmitframe_enqueue_for_sleeping_sta return false\n"); */
+ }
+
+ }
+
+}
+
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ struct sta_info *psta_bmc;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ psta->state |= WIFI_SLEEP_STATE;
+
+ pstapriv->sta_dz_bitmap |= BIT(psta->aid);
+
+
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
+
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
+
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+
+ /* for BC/MC Frames */
+ pstaxmitpriv = &psta_bmc->sta_xmitpriv;
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
+ list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 update_mask = 0, wmmps_ac = 0;
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ list_del_init(&pxmitframe->list);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ if (wmmps_ac) {
+ psta->sleepq_ac_len--;
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else{
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+/*
+ spin_unlock_bh(&psta->sleep_q.lock);
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ }
+ spin_lock_bh(&psta->sleep_q.lock);
+*/
+ rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
+
+
+ }
+
+ if (psta->sleepq_len == 0) {
+ if (pstapriv->tim_bitmap & BIT(psta->aid)) {
+ /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_mask = BIT(0);
+ }
+
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ if (psta->state&WIFI_SLEEP_STATE)
+ psta->state ^= WIFI_SLEEP_STATE;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_871X("%s alive check\n", __func__);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ }
+
+ /* for BC/MC Frames */
+ if (!psta_bmc)
+ goto _exit;
+
+ if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ list_del_init(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+
+ pxmitframe->attrib.triggered = 1;
+/*
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ }
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
+
+*/
+ rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
+
+ }
+
+ if (psta_bmc->sleepq_len == 0) {
+ if (pstapriv->tim_bitmap & BIT(0)) {
+ /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_mask |= BIT(1);
+ }
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+ }
+
+ }
+
+_exit:
+
+ /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ if (update_mask)
+ /* update_BCNTIM(padapter); */
+ /* printk("%s => call update_beacon\n", __func__); */
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+
+}
+
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 wmmps_ac = 0;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+
+ /* spin_lock_bh(&psta->sleep_q.lock); */
+ spin_lock_bh(&pxmitpriv->lock);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (xmitframe_phead != xmitframe_plist) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (!wmmps_ac)
+ continue;
+
+ list_del_init(&pxmitframe->list);
+
+ psta->sleepq_len--;
+ psta->sleepq_ac_len--;
+
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else{
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+
+ pxmitframe->attrib.triggered = 1;
+ rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
+
+ if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, true);
+ /* update_mask = BIT(0); */
+ }
+
+ }
+
+ /* spin_unlock_bh(&psta->sleep_q.lock); */
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ return;
+}
+
+void enqueue_pending_xmitbuf(
+ struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf)
+{
+ struct __queue *pqueue;
+ struct adapter *pri_adapter = pxmitpriv->adapter;
+
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+
+ spin_lock_bh(&pqueue->lock);
+ list_del_init(&pxmitbuf->list);
+ list_add_tail(&pxmitbuf->list, get_list_head(pqueue));
+ spin_unlock_bh(&pqueue->lock);
+
+ up(&(pri_adapter->xmitpriv.xmit_sema));
+}
+
+void enqueue_pending_xmitbuf_to_head(
+ struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf)
+{
+ struct __queue *pqueue;
+
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+
+ spin_lock_bh(&pqueue->lock);
+ list_del_init(&pxmitbuf->list);
+ list_add(&pxmitbuf->list, get_list_head(pqueue));
+ spin_unlock_bh(&pqueue->lock);
+}
+
+struct xmit_buf *dequeue_pending_xmitbuf(
+ struct xmit_priv *pxmitpriv)
+{
+ struct xmit_buf *pxmitbuf;
+ struct __queue *pqueue;
+
+
+ pxmitbuf = NULL;
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+
+ spin_lock_bh(&pqueue->lock);
+
+ if (!list_empty(&pqueue->queue)) {
+ struct list_head *plist, *phead;
+
+ phead = get_list_head(pqueue);
+ plist = get_next(phead);
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ list_del_init(&pxmitbuf->list);
+ }
+
+ spin_unlock_bh(&pqueue->lock);
+
+ return pxmitbuf;
+}
+
+struct xmit_buf *dequeue_pending_xmitbuf_under_survey(
+ struct xmit_priv *pxmitpriv)
+{
+ struct xmit_buf *pxmitbuf;
+ struct __queue *pqueue;
+
+
+ pxmitbuf = NULL;
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+
+ spin_lock_bh(&pqueue->lock);
+
+ if (!list_empty(&pqueue->queue)) {
+ struct list_head *plist, *phead;
+ u8 type;
+
+ phead = get_list_head(pqueue);
+ plist = phead;
+ do {
+ plist = get_next(plist);
+ if (plist == phead)
+ break;
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ type = GetFrameSubType(pxmitbuf->pbuf + TXDESC_OFFSET);
+
+ if ((type == WIFI_PROBEREQ) ||
+ (type == WIFI_DATA_NULL) ||
+ (type == WIFI_QOS_DATA_NULL)) {
+ list_del_init(&pxmitbuf->list);
+ break;
+ }
+ pxmitbuf = NULL;
+ } while (1);
+ }
+
+ spin_unlock_bh(&pqueue->lock);
+
+ return pxmitbuf;
+}
+
+sint check_pending_xmitbuf(
+ struct xmit_priv *pxmitpriv)
+{
+ struct __queue *pqueue;
+ sint ret = false;
+
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+
+ spin_lock_bh(&pqueue->lock);
+
+ if (!list_empty(&pqueue->queue))
+ ret = true;
+
+ spin_unlock_bh(&pqueue->lock);
+
+ return ret;
+}
+
+int rtw_xmit_thread(void *context)
+{
+ s32 err;
+ struct adapter *padapter;
+
+
+ err = _SUCCESS;
+ padapter = (struct adapter *)context;
+
+ thread_enter("RTW_XMIT_THREAD");
+
+ do {
+ err = rtw_hal_xmit_thread_handler(padapter);
+ flush_signals_thread();
+ } while (_SUCCESS == err);
+
+ up(&padapter->xmitpriv.terminate_xmitthread_sema);
+
+ thread_exit();
+}
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
+{
+ sctx->timeout_ms = timeout_ms;
+ sctx->submit_time = jiffies;
+ init_completion(&sctx->done);
+ sctx->status = RTW_SCTX_SUBMITTED;
+}
+
+int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
+{
+ int ret = _FAIL;
+ unsigned long expire;
+ int status = 0;
+
+ expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
+ if (!wait_for_completion_timeout(&sctx->done, expire)) {
+ /* timeout, do something?? */
+ status = RTW_SCTX_DONE_TIMEOUT;
+ DBG_871X("%s timeout: %s\n", __func__, msg);
+ } else {
+ status = sctx->status;
+ }
+
+ if (status == RTW_SCTX_DONE_SUCCESS) {
+ ret = _SUCCESS;
+ }
+
+ return ret;
+}
+
+static bool rtw_sctx_chk_waring_status(int status)
+{
+ switch (status) {
+ case RTW_SCTX_DONE_UNKNOWN:
+ case RTW_SCTX_DONE_BUF_ALLOC:
+ case RTW_SCTX_DONE_BUF_FREE:
+
+ case RTW_SCTX_DONE_DRV_STOP:
+ case RTW_SCTX_DONE_DEV_REMOVE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
+{
+ if (*sctx) {
+ if (rtw_sctx_chk_waring_status(status))
+ DBG_871X("%s status:%d\n", __func__, status);
+ (*sctx)->status = status;
+ complete(&((*sctx)->done));
+ *sctx = NULL;
+ }
+}
+
+void rtw_sctx_done(struct submit_ctx **sctx)
+{
+ rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
+}
+
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ pack_tx_ops->submit_time = jiffies;
+ pack_tx_ops->timeout_ms = timeout_ms;
+ pack_tx_ops->status = RTW_SCTX_SUBMITTED;
+
+ return rtw_sctx_wait(pack_tx_ops, __func__);
+}
+
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ if (pxmitpriv->ack_tx) {
+ rtw_sctx_done_err(&pack_tx_ops, status);
+ } else {
+ DBG_871X("%s ack_tx not set\n", __func__);
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c b/drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c
new file mode 100644
index 000000000000..0376806335d3
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/Hal8723BPwrSeq.c
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+/*
+*
+This file includes all kinds of Power Action event for RTL8723B
+and corresponding hardware configurtions which are released from HW SD.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-08-08 Roger Create.
+
+*/
+
+#include "Hal8723BPwrSeq.h"
+
+/* drivers should parse below arrays and do the corresponding actions */
+/* 3 Power on Array */
+WLAN_PWR_CFG rtl8723B_power_on_flow[
+ RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/* 3Radio off GPIO Array */
+WLAN_PWR_CFG rtl8723B_radio_off_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_END
+};
+
+/* 3Card Disable Array */
+WLAN_PWR_CFG rtl8723B_card_disable_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_CARDDIS
+ RTL8723B_TRANS_END
+};
+
+/* 3 Card Enable Array */
+WLAN_PWR_CFG rtl8723B_card_enable_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_CARDDIS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/* 3Suspend Array */
+WLAN_PWR_CFG rtl8723B_suspend_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_SUS
+ RTL8723B_TRANS_END
+};
+
+/* 3 Resume Array */
+WLAN_PWR_CFG rtl8723B_resume_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_SUS_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/* 3HWPDN Array */
+WLAN_PWR_CFG rtl8723B_hwpdn_flow[
+ RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+
+ RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+
+ RTL8723B_TRANS_END_STEPS
+] = {
+ RTL8723B_TRANS_ACT_TO_CARDEMU
+ RTL8723B_TRANS_CARDEMU_TO_PDN
+ RTL8723B_TRANS_END
+};
+
+/* 3 Enter LPS */
+WLAN_PWR_CFG rtl8723B_enter_lps_flow[
+ RTL8723B_TRANS_ACT_TO_LPS_STEPS+RTL8723B_TRANS_END_STEPS
+] = {
+ /* FW behavior */
+ RTL8723B_TRANS_ACT_TO_LPS
+ RTL8723B_TRANS_END
+};
+
+/* 3 Leave LPS */
+WLAN_PWR_CFG rtl8723B_leave_lps_flow[
+ RTL8723B_TRANS_LPS_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS
+] = {
+ /* FW behavior */
+ RTL8723B_TRANS_LPS_TO_ACT
+ RTL8723B_TRANS_END
+};
+
+/* 3 Enter SW LPS */
+WLAN_PWR_CFG rtl8723B_enter_swlps_flow[
+ RTL8723B_TRANS_ACT_TO_SWLPS_STEPS+RTL8723B_TRANS_END_STEPS
+] = {
+ /* SW behavior */
+ RTL8723B_TRANS_ACT_TO_SWLPS
+ RTL8723B_TRANS_END
+};
+
+/* 3 Leave SW LPS */
+WLAN_PWR_CFG rtl8723B_leave_swlps_flow[
+ RTL8723B_TRANS_SWLPS_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS
+] = {
+ /* SW behavior */
+ RTL8723B_TRANS_SWLPS_TO_ACT
+ RTL8723B_TRANS_END
+};
diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
new file mode 100644
index 000000000000..152a198c8f17
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
@@ -0,0 +1,442 @@
+/*****************************************************************************
+ *Copyright(c) 2009, RealTEK Technology Inc. All Right Reserved.
+ *
+ * Module: __INC_HAL8723BREG_H
+ *
+ *
+ * Note: 1. Define Mac register address and corresponding bit mask map
+ *
+ *
+ * Export: Constants, macro, functions(API), global variables(None).
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ *
+ *****************************************************************************/
+#ifndef __INC_HAL8723BREG_H
+#define __INC_HAL8723BREG_H
+
+
+
+/* */
+/* */
+/* */
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+#define REG_SYS_ISO_CTRL_8723B 0x0000 /* 2 Byte */
+#define REG_SYS_FUNC_EN_8723B 0x0002 /* 2 Byte */
+#define REG_APS_FSMCO_8723B 0x0004 /* 4 Byte */
+#define REG_SYS_CLKR_8723B 0x0008 /* 2 Byte */
+#define REG_9346CR_8723B 0x000A /* 2 Byte */
+#define REG_EE_VPD_8723B 0x000C /* 2 Byte */
+#define REG_AFE_MISC_8723B 0x0010 /* 1 Byte */
+#define REG_SPS0_CTRL_8723B 0x0011 /* 7 Byte */
+#define REG_SPS_OCP_CFG_8723B 0x0018 /* 4 Byte */
+#define REG_RSV_CTRL_8723B 0x001C /* 3 Byte */
+#define REG_RF_CTRL_8723B 0x001F /* 1 Byte */
+#define REG_LPLDO_CTRL_8723B 0x0023 /* 1 Byte */
+#define REG_AFE_XTAL_CTRL_8723B 0x0024 /* 4 Byte */
+#define REG_AFE_PLL_CTRL_8723B 0x0028 /* 4 Byte */
+#define REG_MAC_PLL_CTRL_EXT_8723B 0x002c /* 4 Byte */
+#define REG_EFUSE_CTRL_8723B 0x0030
+#define REG_EFUSE_TEST_8723B 0x0034
+#define REG_PWR_DATA_8723B 0x0038
+#define REG_CAL_TIMER_8723B 0x003C
+#define REG_ACLK_MON_8723B 0x003E
+#define REG_GPIO_MUXCFG_8723B 0x0040
+#define REG_GPIO_IO_SEL_8723B 0x0042
+#define REG_MAC_PINMUX_CFG_8723B 0x0043
+#define REG_GPIO_PIN_CTRL_8723B 0x0044
+#define REG_GPIO_INTM_8723B 0x0048
+#define REG_LEDCFG0_8723B 0x004C
+#define REG_LEDCFG1_8723B 0x004D
+#define REG_LEDCFG2_8723B 0x004E
+#define REG_LEDCFG3_8723B 0x004F
+#define REG_FSIMR_8723B 0x0050
+#define REG_FSISR_8723B 0x0054
+#define REG_HSIMR_8723B 0x0058
+#define REG_HSISR_8723B 0x005c
+#define REG_GPIO_EXT_CTRL 0x0060
+#define REG_MULTI_FUNC_CTRL_8723B 0x0068
+#define REG_GPIO_STATUS_8723B 0x006C
+#define REG_SDIO_CTRL_8723B 0x0070
+#define REG_OPT_CTRL_8723B 0x0074
+#define REG_AFE_XTAL_CTRL_EXT_8723B 0x0078
+#define REG_MCUFWDL_8723B 0x0080
+#define REG_BT_PATCH_STATUS_8723B 0x0088
+#define REG_HIMR0_8723B 0x00B0
+#define REG_HISR0_8723B 0x00B4
+#define REG_HIMR1_8723B 0x00B8
+#define REG_HISR1_8723B 0x00BC
+#define REG_PMC_DBG_CTRL2_8723B 0x00CC
+#define REG_EFUSE_BURN_GNT_8723B 0x00CF
+#define REG_HPON_FSM_8723B 0x00EC
+#define REG_SYS_CFG_8723B 0x00F0
+#define REG_SYS_CFG1_8723B 0x00FC
+#define REG_ROM_VERSION 0x00FD
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+#define REG_CR_8723B 0x0100
+#define REG_PBP_8723B 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL_8723B 0x0106
+#define REG_TRXDMA_CTRL_8723B 0x010C
+#define REG_TRXFF_BNDY_8723B 0x0114
+#define REG_TRXFF_STATUS_8723B 0x0118
+#define REG_RXFF_PTR_8723B 0x011C
+#define REG_CPWM_8723B 0x012F
+#define REG_FWIMR_8723B 0x0130
+#define REG_FWISR_8723B 0x0134
+#define REG_FTIMR_8723B 0x0138
+#define REG_PKTBUF_DBG_CTRL_8723B 0x0140
+#define REG_RXPKTBUF_CTRL_8723B 0x0142
+#define REG_PKTBUF_DBG_DATA_L_8723B 0x0144
+#define REG_PKTBUF_DBG_DATA_H_8723B 0x0148
+
+#define REG_TC0_CTRL_8723B 0x0150
+#define REG_TC1_CTRL_8723B 0x0154
+#define REG_TC2_CTRL_8723B 0x0158
+#define REG_TC3_CTRL_8723B 0x015C
+#define REG_TC4_CTRL_8723B 0x0160
+#define REG_TCUNIT_BASE_8723B 0x0164
+#define REG_RSVD3_8723B 0x0168
+#define REG_C2HEVT_MSG_NORMAL_8723B 0x01A0
+#define REG_C2HEVT_CMD_SEQ_88XX 0x01A1
+#define REG_C2hEVT_CMD_CONTENT_88XX 0x01A2
+#define REG_C2HEVT_CMD_LEN_88XX 0x01AE
+#define REG_C2HEVT_CLEAR_8723B 0x01AF
+#define REG_MCUTST_1_8723B 0x01C0
+#define REG_MCUTST_WOWLAN_8723B 0x01C7
+#define REG_FMETHR_8723B 0x01C8
+#define REG_HMETFR_8723B 0x01CC
+#define REG_HMEBOX_0_8723B 0x01D0
+#define REG_HMEBOX_1_8723B 0x01D4
+#define REG_HMEBOX_2_8723B 0x01D8
+#define REG_HMEBOX_3_8723B 0x01DC
+#define REG_LLT_INIT_8723B 0x01E0
+#define REG_HMEBOX_EXT0_8723B 0x01F0
+#define REG_HMEBOX_EXT1_8723B 0x01F4
+#define REG_HMEBOX_EXT2_8723B 0x01F8
+#define REG_HMEBOX_EXT3_8723B 0x01FC
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+#define REG_RQPN_8723B 0x0200
+#define REG_FIFOPAGE_8723B 0x0204
+#define REG_DWBCN0_CTRL_8723B REG_TDECTRL
+#define REG_TXDMA_OFFSET_CHK_8723B 0x020C
+#define REG_TXDMA_STATUS_8723B 0x0210
+#define REG_RQPN_NPQ_8723B 0x0214
+#define REG_DWBCN1_CTRL_8723B 0x0228
+
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+#define REG_RXDMA_AGG_PG_TH_8723B 0x0280
+#define REG_FW_UPD_RDPTR_8723B 0x0284 /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_RXDMA_CONTROL_8723B 0x0286 /* Control the RX DMA. */
+#define REG_RXPKT_NUM_8723B 0x0287 /* The number of packets in RXPKTBUF. */
+#define REG_RXDMA_STATUS_8723B 0x0288
+#define REG_RXDMA_PRO_8723B 0x0290
+#define REG_EARLY_MODE_CONTROL_8723B 0x02BC
+#define REG_RSVD5_8723B 0x02F0
+#define REG_RSVD6_8723B 0x02F4
+
+
+/* */
+/* */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* */
+/* */
+#define REG_PCIE_CTRL_REG_8723B 0x0300
+#define REG_INT_MIG_8723B 0x0304 /* Interrupt Migration */
+#define REG_BCNQ_DESA_8723B 0x0308 /* TX Beacon Descriptor Address */
+#define REG_HQ_DESA_8723B 0x0310 /* TX High Queue Descriptor Address */
+#define REG_MGQ_DESA_8723B 0x0318 /* TX Manage Queue Descriptor Address */
+#define REG_VOQ_DESA_8723B 0x0320 /* TX VO Queue Descriptor Address */
+#define REG_VIQ_DESA_8723B 0x0328 /* TX VI Queue Descriptor Address */
+#define REG_BEQ_DESA_8723B 0x0330 /* TX BE Queue Descriptor Address */
+#define REG_BKQ_DESA_8723B 0x0338 /* TX BK Queue Descriptor Address */
+#define REG_RX_DESA_8723B 0x0340 /* RX Queue Descriptor Address */
+#define REG_DBI_WDATA_8723B 0x0348 /* DBI Write Data */
+#define REG_DBI_RDATA_8723B 0x034C /* DBI Read Data */
+#define REG_DBI_ADDR_8723B 0x0350 /* DBI Address */
+#define REG_DBI_FLAG_8723B 0x0352 /* DBI Read/Write Flag */
+#define REG_MDIO_WDATA_8723B 0x0354 /* MDIO for Write PCIE PHY */
+#define REG_MDIO_RDATA_8723B 0x0356 /* MDIO for Reads PCIE PHY */
+#define REG_MDIO_CTL_8723B 0x0358 /* MDIO for Control */
+#define REG_DBG_SEL_8723B 0x0360 /* Debug Selection Register */
+#define REG_PCIE_HRPWM_8723B 0x0361 /* PCIe RPWM */
+#define REG_PCIE_HCPWM_8723B 0x0363 /* PCIe CPWM */
+#define REG_PCIE_MULTIFET_CTRL_8723B 0x036A /* PCIE Multi-Fethc Control */
+
+/* spec version 11 */
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+#define REG_VOQ_INFORMATION_8723B 0x0400
+#define REG_VIQ_INFORMATION_8723B 0x0404
+#define REG_BEQ_INFORMATION_8723B 0x0408
+#define REG_BKQ_INFORMATION_8723B 0x040C
+#define REG_MGQ_INFORMATION_8723B 0x0410
+#define REG_HGQ_INFORMATION_8723B 0x0414
+#define REG_BCNQ_INFORMATION_8723B 0x0418
+#define REG_TXPKT_EMPTY_8723B 0x041A
+
+#define REG_FWHW_TXQ_CTRL_8723B 0x0420
+#define REG_HWSEQ_CTRL_8723B 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY_8723B 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY_8723B 0x0425
+#define REG_LIFECTRL_CTRL_8723B 0x0426
+#define REG_MULTI_BCNQ_OFFSET_8723B 0x0427
+#define REG_SPEC_SIFS_8723B 0x0428
+#define REG_RL_8723B 0x042A
+#define REG_TXBF_CTRL_8723B 0x042C
+#define REG_DARFRC_8723B 0x0430
+#define REG_RARFRC_8723B 0x0438
+#define REG_RRSR_8723B 0x0440
+#define REG_ARFR0_8723B 0x0444
+#define REG_ARFR1_8723B 0x044C
+#define REG_CCK_CHECK_8723B 0x0454
+#define REG_AMPDU_MAX_TIME_8723B 0x0456
+#define REG_TXPKTBUF_BCNQ_BDNY1_8723B 0x0457
+
+#define REG_AMPDU_MAX_LENGTH_8723B 0x0458
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD_8723B 0x045D
+#define REG_NDPA_OPT_CTRL_8723B 0x045F
+#define REG_FAST_EDCA_CTRL_8723B 0x0460
+#define REG_RD_RESP_PKT_TH_8723B 0x0463
+#define REG_DATA_SC_8723B 0x0483
+#define REG_TXRPT_START_OFFSET 0x04AC
+#define REG_POWER_STAGE1_8723B 0x04B4
+#define REG_POWER_STAGE2_8723B 0x04B8
+#define REG_AMPDU_BURST_MODE_8723B 0x04BC
+#define REG_PKT_VO_VI_LIFE_TIME_8723B 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME_8723B 0x04C2
+#define REG_STBC_SETTING_8723B 0x04C4
+#define REG_HT_SINGLE_AMPDU_8723B 0x04C7
+#define REG_PROT_MODE_CTRL_8723B 0x04C8
+#define REG_MAX_AGGR_NUM_8723B 0x04CA
+#define REG_RTS_MAX_AGGR_NUM_8723B 0x04CB
+#define REG_BAR_MODE_CTRL_8723B 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT_8723B 0x04CF
+#define REG_MACID_PKT_DROP0_8723B 0x04D0
+#define REG_MACID_PKT_SLEEP_8723B 0x04D4
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+#define REG_EDCA_VO_PARAM_8723B 0x0500
+#define REG_EDCA_VI_PARAM_8723B 0x0504
+#define REG_EDCA_BE_PARAM_8723B 0x0508
+#define REG_EDCA_BK_PARAM_8723B 0x050C
+#define REG_BCNTCFG_8723B 0x0510
+#define REG_PIFS_8723B 0x0512
+#define REG_RDG_PIFS_8723B 0x0513
+#define REG_SIFS_CTX_8723B 0x0514
+#define REG_SIFS_TRX_8723B 0x0516
+#define REG_AGGR_BREAK_TIME_8723B 0x051A
+#define REG_SLOT_8723B 0x051B
+#define REG_TX_PTCL_CTRL_8723B 0x0520
+#define REG_TXPAUSE_8723B 0x0522
+#define REG_DIS_TXREQ_CLR_8723B 0x0523
+#define REG_RD_CTRL_8723B 0x0524
+/* */
+/* Format for offset 540h-542h: */
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting beacon content before TBTT. */
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding to send the beacon packet. */
+/* [23:20]: Reserved */
+/* Description: */
+/* | */
+/* |<--Setup--|--Hold------------>| */
+/* --------------|---------------------- */
+/* | */
+/* TBTT */
+/* Note: We cannot update beacon content to HW or send any AC packets during the time between Setup and Hold. */
+/* Described by Designer Tim and Bruce, 2011-01-14. */
+/* */
+#define REG_TBTT_PROHIBIT_8723B 0x0540
+#define REG_RD_NAV_NXT_8723B 0x0544
+#define REG_NAV_PROT_LEN_8723B 0x0546
+#define REG_BCN_CTRL_8723B 0x0550
+#define REG_BCN_CTRL_1_8723B 0x0551
+#define REG_MBID_NUM_8723B 0x0552
+#define REG_DUAL_TSF_RST_8723B 0x0553
+#define REG_BCN_INTERVAL_8723B 0x0554
+#define REG_DRVERLYINT_8723B 0x0558
+#define REG_BCNDMATIM_8723B 0x0559
+#define REG_ATIMWND_8723B 0x055A
+#define REG_USTIME_TSF_8723B 0x055C
+#define REG_BCN_MAX_ERR_8723B 0x055D
+#define REG_RXTSF_OFFSET_CCK_8723B 0x055E
+#define REG_RXTSF_OFFSET_OFDM_8723B 0x055F
+#define REG_TSFTR_8723B 0x0560
+#define REG_CTWND_8723B 0x0572
+#define REG_SECONDARY_CCA_CTRL_8723B 0x0577
+#define REG_PSTIMER_8723B 0x0580
+#define REG_TIMER0_8723B 0x0584
+#define REG_TIMER1_8723B 0x0588
+#define REG_ACMHWCTRL_8723B 0x05C0
+#define REG_SCH_TXCMD_8723B 0x05F8
+
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+#define REG_MAC_CR_8723B 0x0600
+#define REG_TCR_8723B 0x0604
+#define REG_RCR_8723B 0x0608
+#define REG_RX_PKT_LIMIT_8723B 0x060C
+#define REG_RX_DLK_TIME_8723B 0x060D
+#define REG_RX_DRVINFO_SZ_8723B 0x060F
+
+#define REG_MACID_8723B 0x0610
+#define REG_BSSID_8723B 0x0618
+#define REG_MAR_8723B 0x0620
+#define REG_MBIDCAMCFG_8723B 0x0628
+
+#define REG_USTIME_EDCA_8723B 0x0638
+#define REG_MAC_SPEC_SIFS_8723B 0x063A
+#define REG_RESP_SIFP_CCK_8723B 0x063C
+#define REG_RESP_SIFS_OFDM_8723B 0x063E
+#define REG_ACKTO_8723B 0x0640
+#define REG_CTS2TO_8723B 0x0641
+#define REG_EIFS_8723B 0x0642
+
+#define REG_NAV_UPPER_8723B 0x0652 /* unit of 128 */
+#define REG_TRXPTCL_CTL_8723B 0x0668
+
+/* Security */
+#define REG_CAMCMD_8723B 0x0670
+#define REG_CAMWRITE_8723B 0x0674
+#define REG_CAMREAD_8723B 0x0678
+#define REG_CAMDBG_8723B 0x067C
+#define REG_SECCFG_8723B 0x0680
+
+/* Power */
+#define REG_WOW_CTRL_8723B 0x0690
+#define REG_PS_RX_INFO_8723B 0x0692
+#define REG_UAPSD_TID_8723B 0x0693
+#define REG_WKFMCAM_CMD_8723B 0x0698
+#define REG_WKFMCAM_NUM_8723B 0x0698
+#define REG_WKFMCAM_RWD_8723B 0x069C
+#define REG_RXFLTMAP0_8723B 0x06A0
+#define REG_RXFLTMAP1_8723B 0x06A2
+#define REG_RXFLTMAP2_8723B 0x06A4
+#define REG_BCN_PSR_RPT_8723B 0x06A8
+#define REG_BT_COEX_TABLE_8723B 0x06C0
+#define REG_BFMER0_INFO_8723B 0x06E4
+#define REG_BFMER1_INFO_8723B 0x06EC
+#define REG_CSI_RPT_PARAM_BW20_8723B 0x06F4
+#define REG_CSI_RPT_PARAM_BW40_8723B 0x06F8
+#define REG_CSI_RPT_PARAM_BW80_8723B 0x06FC
+
+/* Hardware Port 2 */
+#define REG_MACID1_8723B 0x0700
+#define REG_BSSID1_8723B 0x0708
+#define REG_BFMEE_SEL_8723B 0x0714
+#define REG_SND_PTCL_CTRL_8723B 0x0718
+
+
+/* Redifine 8192C register definition for compatibility */
+
+/* TODO: use these definition when using REG_xxx naming rule. */
+/* NOTE: DO NOT Remove these definition. Use later. */
+#define EFUSE_CTRL_8723B REG_EFUSE_CTRL_8723B /* E-Fuse Control. */
+#define EFUSE_TEST_8723B REG_EFUSE_TEST_8723B /* E-Fuse Test. */
+#define MSR_8723B (REG_CR_8723B + 2) /* Media Status register */
+#define ISR_8723B REG_HISR0_8723B
+#define TSFR_8723B REG_TSFTR_8723B /* Timing Sync Function Timer Register. */
+
+#define PBP_8723B REG_PBP_8723B
+
+/* Redifine MACID register, to compatible prior ICs. */
+#define IDR0_8723B REG_MACID_8723B /* MAC ID Register, Offset 0x0050-0x0053 */
+#define IDR4_8723B (REG_MACID_8723B + 4) /* MAC ID Register, Offset 0x0054-0x0055 */
+
+/* 9. Security Control Registers (Offset:) */
+#define RWCAM_8723B REG_CAMCMD_8723B /* IN 8190 Data Sheet is called CAMcmd */
+#define WCAMI_8723B REG_CAMWRITE_8723B /* Software write CAM input content */
+#define RCAMO_8723B REG_CAMREAD_8723B /* Software read/write CAM config */
+#define CAMDBG_8723B REG_CAMDBG_8723B
+#define SECR_8723B REG_SECCFG_8723B /* Security Configuration Register */
+
+/* 8195 IMR/ISR bits (offset 0xB0, 8bits) */
+#define IMR_DISABLED_8723B 0
+/* IMR DW0(0x00B0-00B3) Bit 0-31 */
+#define IMR_TIMER2_8723B BIT31 /* Timeout interrupt 2 */
+#define IMR_TIMER1_8723B BIT30 /* Timeout interrupt 1 */
+#define IMR_PSTIMEOUT_8723B BIT29 /* Power Save Time Out Interrupt */
+#define IMR_GTINT4_8723B BIT28 /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3_8723B BIT27 /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TXBCN0ERR_8723B BIT26 /* Transmit Beacon0 Error */
+#define IMR_TXBCN0OK_8723B BIT25 /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE_8723B BIT24 /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0_8723B BIT20 /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDERR0_8723B BIT16 /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT_8723B BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E_8723B BIT14 /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND_8723B BIT12 /* CTWidnow End or ATIM Window End */
+#define IMR_C2HCMD_8723B BIT10 /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2_8723B BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM_8723B BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK_8723B BIT7 /* High Queue DMA OK */
+#define IMR_MGNTDOK_8723B BIT6 /* Management Queue DMA OK */
+#define IMR_BKDOK_8723B BIT5 /* AC_BK DMA OK */
+#define IMR_BEDOK_8723B BIT4 /* AC_BE DMA OK */
+#define IMR_VIDOK_8723B BIT3 /* AC_VI DMA OK */
+#define IMR_VODOK_8723B BIT2 /* AC_VO DMA OK */
+#define IMR_RDU_8723B BIT1 /* Rx Descriptor Unavailable */
+#define IMR_ROK_8723B BIT0 /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7_8723B BIT27 /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6_8723B BIT26 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5_8723B BIT25 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4_8723B BIT24 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3_8723B BIT23 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2_8723B BIT22 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1_8723B BIT21 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E_8723B BIT13 /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR_8723B BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
+#define IMR_RXERR_8723B BIT10 /* Rx Error Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW_8723B BIT9 /* Transmit FIFO Overflow */
+#define IMR_RXFOVW_8723B BIT8 /* Receive FIFO Overflow */
+
+/* 2 ACMHWCTRL 0x05C0 */
+#define AcmHw_HwEn_8723B BIT(0)
+#define AcmHw_VoqEn_8723B BIT(1)
+#define AcmHw_ViqEn_8723B BIT(2)
+#define AcmHw_BeqEn_8723B BIT(3)
+#define AcmHw_VoqStatus_8723B BIT(5)
+#define AcmHw_ViqStatus_8723B BIT(6)
+#define AcmHw_BeqStatus_8723B BIT(7)
+
+/* 8195 (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
+#define RCR_TCPOFLD_EN BIT25 /* Enable TCP checksum offload */
+
+#endif /* #ifndef __INC_HAL8723BREG_H */
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
new file mode 100644
index 000000000000..86040adb436c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.c
@@ -0,0 +1,3779 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "Mp_Precomp.h"
+
+/* Global variables, these are static variables */
+static COEX_DM_8723B_1ANT GLCoexDm8723b1Ant;
+static PCOEX_DM_8723B_1ANT pCoexDm = &GLCoexDm8723b1Ant;
+static COEX_STA_8723B_1ANT GLCoexSta8723b1Ant;
+static PCOEX_STA_8723B_1ANT pCoexSta = &GLCoexSta8723b1Ant;
+
+static const char *const GLBtInfoSrc8723b1Ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+static u32 GLCoexVerDate8723b1Ant = 20140507;
+static u32 GLCoexVer8723b1Ant = 0x4e;
+
+/* local function proto type if needed */
+/* local function start with halbtc8723b1ant_ */
+static u8 halbtc8723b1ant_BtRssiState(
+ u8 levelNum, u8 rssiThresh, u8 rssiThresh1
+)
+{
+ s32 btRssi = 0;
+ u8 btRssiState = pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if (levelNum == 2) {
+ if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to High\n")
+ );
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state stay at Low\n")
+ );
+ }
+ } else {
+ if (btRssi < rssiThresh) {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to Low\n")
+ );
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state stay at High\n")
+ );
+ }
+ }
+ } else if (levelNum == 3) {
+ if (rssiThresh > rssiThresh1) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi thresh error!!\n")
+ );
+ return pCoexSta->preBtRssiState;
+ }
+
+ if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to Medium\n")
+ );
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state stay at Low\n")
+ );
+ }
+ } else if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM)
+ ) {
+ if (btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT)) {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to High\n")
+ );
+ } else if (btRssi < rssiThresh) {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to Low\n")
+ );
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state stay at Medium\n")
+ );
+ }
+ } else {
+ if (btRssi < rssiThresh1) {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state switch to Medium\n")
+ );
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_RSSI_STATE,
+ ("[BTCoex], BT Rssi state stay at High\n")
+ );
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+static void halbtc8723b1ant_UpdateRaMask(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u32 disRateMask
+)
+{
+ pCoexDm->curRaMask = disRateMask;
+
+ if (bForceExec || (pCoexDm->preRaMask != pCoexDm->curRaMask))
+ pBtCoexist->fBtcSet(
+ pBtCoexist,
+ BTC_SET_ACT_UPDATE_RAMASK,
+ &pCoexDm->curRaMask
+ );
+ pCoexDm->preRaMask = pCoexDm->curRaMask;
+}
+
+static void halbtc8723b1ant_AutoRateFallbackRetry(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 type
+)
+{
+ bool bWifiUnderBMode = false;
+
+ pCoexDm->curArfrType = type;
+
+ if (bForceExec || (pCoexDm->preArfrType != pCoexDm->curArfrType)) {
+ switch (pCoexDm->curArfrType) {
+ case 0: /* normal mode */
+ pBtCoexist->fBtcWrite4Byte(
+ pBtCoexist, 0x430, pCoexDm->backupArfrCnt1
+ );
+ pBtCoexist->fBtcWrite4Byte(
+ pBtCoexist, 0x434, pCoexDm->backupArfrCnt2
+ );
+ break;
+ case 1:
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, &bWifiUnderBMode
+ );
+ if (bWifiUnderBMode) {
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x430, 0x0);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x434, 0x01010101);
+ } else {
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x430, 0x0);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x434, 0x04030201);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pCoexDm->preArfrType = pCoexDm->curArfrType;
+}
+
+static void halbtc8723b1ant_RetryLimit(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 type
+)
+{
+ pCoexDm->curRetryLimitType = type;
+
+ if (
+ bForceExec ||
+ (pCoexDm->preRetryLimitType != pCoexDm->curRetryLimitType)
+ ) {
+ switch (pCoexDm->curRetryLimitType) {
+ case 0: /* normal mode */
+ pBtCoexist->fBtcWrite2Byte(
+ pBtCoexist, 0x42a, pCoexDm->backupRetryLimit
+ );
+ break;
+ case 1: /* retry limit =8 */
+ pBtCoexist->fBtcWrite2Byte(pBtCoexist, 0x42a, 0x0808);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pCoexDm->preRetryLimitType = pCoexDm->curRetryLimitType;
+}
+
+static void halbtc8723b1ant_AmpduMaxTime(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 type
+)
+{
+ pCoexDm->curAmpduTimeType = type;
+
+ if (
+ bForceExec || (pCoexDm->preAmpduTimeType != pCoexDm->curAmpduTimeType)
+ ) {
+ switch (pCoexDm->curAmpduTimeType) {
+ case 0: /* normal mode */
+ pBtCoexist->fBtcWrite1Byte(
+ pBtCoexist, 0x456, pCoexDm->backupAmpduMaxTime
+ );
+ break;
+ case 1: /* AMPDU timw = 0x38 * 32us */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x456, 0x38);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pCoexDm->preAmpduTimeType = pCoexDm->curAmpduTimeType;
+}
+
+static void halbtc8723b1ant_LimitedTx(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ u8 raMaskType,
+ u8 arfrType,
+ u8 retryLimitType,
+ u8 ampduTimeType
+)
+{
+ switch (raMaskType) {
+ case 0: /* normal mode */
+ halbtc8723b1ant_UpdateRaMask(pBtCoexist, bForceExec, 0x0);
+ break;
+ case 1: /* disable cck 1/2 */
+ halbtc8723b1ant_UpdateRaMask(pBtCoexist, bForceExec, 0x00000003);
+ break;
+ case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */
+ halbtc8723b1ant_UpdateRaMask(pBtCoexist, bForceExec, 0x0001f1f7);
+ break;
+ default:
+ break;
+ }
+
+ halbtc8723b1ant_AutoRateFallbackRetry(pBtCoexist, bForceExec, arfrType);
+ halbtc8723b1ant_RetryLimit(pBtCoexist, bForceExec, retryLimitType);
+ halbtc8723b1ant_AmpduMaxTime(pBtCoexist, bForceExec, ampduTimeType);
+}
+
+static void halbtc8723b1ant_LimitedRx(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ bool bRejApAggPkt,
+ bool bBtCtrlAggBufSize,
+ u8 aggBufSize
+)
+{
+ bool bRejectRxAgg = bRejApAggPkt;
+ bool bBtCtrlRxAggSize = bBtCtrlAggBufSize;
+ u8 rxAggSize = aggBufSize;
+
+ /* */
+ /* Rx Aggregation related setting */
+ /* */
+ pBtCoexist->fBtcSet(
+ pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejectRxAgg
+ );
+ /* decide BT control aggregation buf size or not */
+ pBtCoexist->fBtcSet(
+ pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &bBtCtrlRxAggSize
+ );
+ /* aggregation buf size, only work when BT control Rx aggregation size. */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize);
+ /* real update aggregation setting */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+
+
+}
+
+static void halbtc8723b1ant_QueryBtInfo(PBTC_COEXIST pBtCoexist)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ ("[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", H2C_Parameter[0])
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x61, 1, H2C_Parameter);
+}
+
+static void halbtc8723b1ant_MonitorBtCtr(PBTC_COEXIST pBtCoexist)
+{
+ u32 regHPTxRx, regLPTxRx, u4Tmp;
+ u32 regHPTx = 0, regHPRx = 0, regLPTx = 0, regLPRx = 0;
+ static u8 NumOfBtCounterChk;
+
+ /* to avoid 0x76e[3] = 1 (WLAN_Act control by PTA) during IPS */
+ /* if (! (pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x76e) & 0x8)) */
+
+ if (pCoexSta->bUnderIps) {
+ pCoexSta->highPriorityTx = 65535;
+ pCoexSta->highPriorityRx = 65535;
+ pCoexSta->lowPriorityTx = 65535;
+ pCoexSta->lowPriorityRx = 65535;
+ return;
+ }
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & bMaskLWord;
+ regHPRx = (u4Tmp & bMaskHWord)>>16;
+
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & bMaskLWord;
+ regLPRx = (u4Tmp & bMaskHWord)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ if ((pCoexSta->lowPriorityTx >= 1050) && (!pCoexSta->bC2hBtInquiryPage))
+ pCoexSta->popEventCnt++;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ (
+ "[BTCoex], Hi-Pri Rx/Tx: %d/%d, Lo-Pri Rx/Tx: %d/%d\n",
+ regHPRx,
+ regHPTx,
+ regLPRx,
+ regLPTx
+ )
+ );
+
+ /* reset counter */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc);
+
+ if ((regHPTx == 0) && (regHPRx == 0) && (regLPTx == 0) && (regLPRx == 0)) {
+ NumOfBtCounterChk++;
+ if (NumOfBtCounterChk >= 3) {
+ halbtc8723b1ant_QueryBtInfo(pBtCoexist);
+ NumOfBtCounterChk = 0;
+ }
+ }
+}
+
+
+static void halbtc8723b1ant_MonitorWiFiCtr(PBTC_COEXIST pBtCoexist)
+{
+ s32 wifiRssi = 0;
+ bool bWifiBusy = false, bWifiUnderBMode = false;
+ static u8 nCCKLockCounter = 0;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, &bWifiUnderBMode
+ );
+
+ if (pCoexSta->bUnderIps) {
+ pCoexSta->nCRCOK_CCK = 0;
+ pCoexSta->nCRCOK_11g = 0;
+ pCoexSta->nCRCOK_11n = 0;
+ pCoexSta->nCRCOK_11nAgg = 0;
+
+ pCoexSta->nCRCErr_CCK = 0;
+ pCoexSta->nCRCErr_11g = 0;
+ pCoexSta->nCRCErr_11n = 0;
+ pCoexSta->nCRCErr_11nAgg = 0;
+ } else {
+ pCoexSta->nCRCOK_CCK = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xf88);
+ pCoexSta->nCRCOK_11g = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xf94);
+ pCoexSta->nCRCOK_11n = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xf90);
+ pCoexSta->nCRCOK_11nAgg = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xfb8);
+
+ pCoexSta->nCRCErr_CCK = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xf84);
+ pCoexSta->nCRCErr_11g = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xf96);
+ pCoexSta->nCRCErr_11n = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xf92);
+ pCoexSta->nCRCErr_11nAgg = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0xfba);
+ }
+
+
+ /* reset counter */
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0xf16, 0x1, 0x1);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0xf16, 0x1, 0x0);
+
+ if (bWifiBusy && (wifiRssi >= 30) && !bWifiUnderBMode) {
+ if (
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY) ||
+ (pCoexDm->btStatus == BT_8723B_1ANT_BT_STATUS_SCO_BUSY)
+ ) {
+ if (
+ pCoexSta->nCRCOK_CCK > (
+ pCoexSta->nCRCOK_11g +
+ pCoexSta->nCRCOK_11n +
+ pCoexSta->nCRCOK_11nAgg
+ )
+ ) {
+ if (nCCKLockCounter < 5)
+ nCCKLockCounter++;
+ } else {
+ if (nCCKLockCounter > 0)
+ nCCKLockCounter--;
+ }
+
+ } else {
+ if (nCCKLockCounter > 0)
+ nCCKLockCounter--;
+ }
+ } else {
+ if (nCCKLockCounter > 0)
+ nCCKLockCounter--;
+ }
+
+ if (!pCoexSta->bPreCCKLock) {
+
+ if (nCCKLockCounter >= 5)
+ pCoexSta->bCCKLock = true;
+ else
+ pCoexSta->bCCKLock = false;
+ } else {
+ if (nCCKLockCounter == 0)
+ pCoexSta->bCCKLock = false;
+ else
+ pCoexSta->bCCKLock = true;
+ }
+
+ pCoexSta->bPreCCKLock = pCoexSta->bCCKLock;
+
+
+}
+
+static bool halbtc8723b1ant_IsWifiStatusChanged(PBTC_COEXIST pBtCoexist)
+{
+ static bool bPreWifiBusy = false, bPreUnder4way = false, bPreBtHsOn = false;
+ bool bWifiBusy = false, bUnder4way = false, bBtHsOn = false;
+ bool bWifiConnected = false;
+
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected
+ );
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way
+ );
+
+ if (bWifiConnected) {
+ if (bWifiBusy != bPreWifiBusy) {
+ bPreWifiBusy = bWifiBusy;
+ return true;
+ }
+
+ if (bUnder4way != bPreUnder4way) {
+ bPreUnder4way = bUnder4way;
+ return true;
+ }
+
+ if (bBtHsOn != bPreBtHsOn) {
+ bPreBtHsOn = bBtHsOn;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void halbtc8723b1ant_UpdateBtLinkInfo(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bBtHsOn = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist;
+ pBtLinkInfo->bScoExist = pCoexSta->bScoExist;
+ pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist;
+ pBtLinkInfo->bPanExist = pCoexSta->bPanExist;
+ pBtLinkInfo->bHidExist = pCoexSta->bHidExist;
+
+ /* work around for HS mode. */
+ if (bBtHsOn) {
+ pBtLinkInfo->bPanExist = true;
+ pBtLinkInfo->bBtLinkExist = true;
+ }
+
+ /* check if Sco only */
+ if (
+ pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bScoOnly = true;
+ else
+ pBtLinkInfo->bScoOnly = false;
+
+ /* check if A2dp only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bA2dpOnly = true;
+ else
+ pBtLinkInfo->bA2dpOnly = false;
+
+ /* check if Pan only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bPanOnly = true;
+ else
+ pBtLinkInfo->bPanOnly = false;
+
+ /* check if Hid only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bHidOnly = true;
+ else
+ pBtLinkInfo->bHidOnly = false;
+}
+
+static u8 halbtc8723b1ant_ActionAlgorithm(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bBtHsOn = false;
+ u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ if (!pBtLinkInfo->bBtLinkExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], No BT link exists!!!\n")
+ );
+ return algorithm;
+ }
+
+ if (pBtLinkInfo->bScoExist)
+ numOfDiffProfile++;
+ if (pBtLinkInfo->bHidExist)
+ numOfDiffProfile++;
+ if (pBtLinkInfo->bPanExist)
+ numOfDiffProfile++;
+ if (pBtLinkInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (pBtLinkInfo->bScoExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO only\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ if (pBtLinkInfo->bHidExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID only\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = A2DP only\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
+ } else if (pBtLinkInfo->bPanExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = PAN(HS) only\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = PAN(EDR) only\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (pBtLinkInfo->bScoExist) {
+ if (pBtLinkInfo->bHidExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + HID\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + A2DP ==> SCO\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else if (pBtLinkInfo->bPanExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + PAN(HS)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + PAN(EDR)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID + A2DP\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else if (pBtLinkInfo->bHidExist && pBtLinkInfo->bPanExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID + PAN(HS)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID + PAN(EDR)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (pBtLinkInfo->bPanExist && pBtLinkInfo->bA2dpExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = A2DP + PAN(HS)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = A2DP + PAN(EDR)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (pBtLinkInfo->bScoExist) {
+ if (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
+ } else if (
+ pBtLinkInfo->bHidExist && pBtLinkInfo->bPanExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + HID + PAN(HS)\n"));
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n"));
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (pBtLinkInfo->bPanExist && pBtLinkInfo->bA2dpExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (pBtLinkInfo->bScoExist) {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n")
+ );
+
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR) ==>PAN(EDR)+HID\n")
+ );
+ algorithm = BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+static void halbtc8723b1ant_SetSwPenaltyTxRateAdaptive(
+ PBTC_COEXIST pBtCoexist, bool bLowPenaltyRa
+)
+{
+ u8 H2C_Parameter[6] = {0};
+
+ H2C_Parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */
+
+ if (bLowPenaltyRa) {
+ H2C_Parameter[1] |= BIT0;
+ H2C_Parameter[2] = 0x00; /* normal rate except MCS7/6/5, OFDM54/48/36 */
+ H2C_Parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ H2C_Parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ H2C_Parameter[5] = 0xf9; /* MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (bLowPenaltyRa ? "ON!!" : "OFF!!")
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x69, 6, H2C_Parameter);
+}
+
+static void halbtc8723b1ant_LowPenaltyRa(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bLowPenaltyRa
+)
+{
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if (!bForceExec) {
+ if (pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723b1ant_SetSwPenaltyTxRateAdaptive(
+ pBtCoexist, pCoexDm->bCurLowPenaltyRa
+ );
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+static void halbtc8723b1ant_SetCoexTable(
+ PBTC_COEXIST pBtCoexist,
+ u32 val0x6c0,
+ u32 val0x6c4,
+ u32 val0x6c8,
+ u8 val0x6cc
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc)
+ );
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+static void halbtc8723b1ant_CoexTable(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ u32 val0x6c0,
+ u32 val0x6c4,
+ u32 val0x6c8,
+ u8 val0x6cc
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ (bForceExec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc
+ )
+ );
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c4 = val0x6c4;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if (!bForceExec) {
+ if (
+ (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc)
+ )
+ return;
+ }
+
+ halbtc8723b1ant_SetCoexTable(
+ pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc
+ );
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+static void halbtc8723b1ant_CoexTableWithType(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 type
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], ********** CoexTable(%d) **********\n", type)
+ );
+
+ pCoexSta->nCoexTableType = type;
+
+ switch (type) {
+ case 0:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffffff, 0x3
+ );
+ break;
+ case 1:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x55555555, 0x5a5a5a5a, 0xffffff, 0x3
+ );
+ break;
+ case 2:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffffff, 0x3
+ );
+ break;
+ case 3:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0xaaaa5555, 0xaaaa5a5a, 0xffffff, 0x3
+ );
+ break;
+ case 4:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x55555555, 0xaaaa5a5a, 0xffffff, 0x3
+ );
+ break;
+ case 5:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x5a5a5a5a, 0xaaaa5a5a, 0xffffff, 0x3
+ );
+ break;
+ case 6:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0x55555555, 0xaaaaaaaa, 0xffffff, 0x3
+ );
+ break;
+ case 7:
+ halbtc8723b1ant_CoexTable(
+ pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffffff, 0x3
+ );
+ break;
+ default:
+ break;
+ }
+}
+
+static void halbtc8723b1ant_SetFwIgnoreWlanAct(
+ PBTC_COEXIST pBtCoexist, bool bEnable
+)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ if (bEnable)
+ H2C_Parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ H2C_Parameter[0]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x63, 1, H2C_Parameter);
+}
+
+static void halbtc8723b1ant_IgnoreWlanAct(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bEnable
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec ? "force to" : ""),
+ (bEnable ? "ON" : "OFF")
+ )
+ );
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct,
+ pCoexDm->bCurIgnoreWlanAct
+ )
+ );
+
+ if (pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723b1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+static void halbtc8723b1ant_SetLpsRpwm(
+ PBTC_COEXIST pBtCoexist, u8 lpsVal, u8 rpwmVal
+)
+{
+ u8 lps = lpsVal;
+ u8 rpwm = rpwmVal;
+
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_U1_LPS_VAL, &lps);
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_U1_RPWM_VAL, &rpwm);
+}
+
+static void halbtc8723b1ant_LpsRpwm(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 lpsVal, u8 rpwmVal
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (bForceExec ? "force to" : ""),
+ lpsVal,
+ rpwmVal
+ )
+ );
+ pCoexDm->curLps = lpsVal;
+ pCoexDm->curRpwm = rpwmVal;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ pCoexDm->curLps,
+ pCoexDm->curRpwm
+ )
+ );
+
+ if (
+ (pCoexDm->preLps == pCoexDm->curLps) &&
+ (pCoexDm->preRpwm == pCoexDm->curRpwm)
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ pCoexDm->preRpwm,
+ pCoexDm->curRpwm
+ )
+ );
+
+ return;
+ }
+ }
+ halbtc8723b1ant_SetLpsRpwm(pBtCoexist, lpsVal, rpwmVal);
+
+ pCoexDm->preLps = pCoexDm->curLps;
+ pCoexDm->preRpwm = pCoexDm->curRpwm;
+}
+
+static void halbtc8723b1ant_SwMechanism(
+ PBTC_COEXIST pBtCoexist, bool bLowPenaltyRA
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_MONITOR,
+ ("[BTCoex], SM[LpRA] = %d\n", bLowPenaltyRA)
+ );
+
+ halbtc8723b1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
+}
+
+static void halbtc8723b1ant_SetAntPath(
+ PBTC_COEXIST pBtCoexist, u8 antPosType, bool bInitHwCfg, bool bWifiOff
+)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ u32 fwVer = 0, u4Tmp = 0, cntBtCalChk = 0;
+ bool bPgExtSwitch = false;
+ bool bUseExtSwitch = false;
+ bool bIsInMpMode = false;
+ u8 H2C_Parameter[2] = {0}, u1Tmp = 0;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_EXT_SWITCH, &bPgExtSwitch);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer); /* [31:16]=fw ver, [15:0]=fw sub ver */
+
+ if ((fwVer > 0 && fwVer < 0xc0000) || bPgExtSwitch)
+ bUseExtSwitch = true;
+
+ if (bInitHwCfg) {
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); /* WiFi TRx Mask on */
+ pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x15); /* BT TRx Mask on */
+
+ if (fwVer >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ H2C_Parameter[0] = 1;
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x6E, 1, H2C_Parameter);
+ } else /* set grant_bt to high */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x18);
+
+ /* set wlan_act control by PTA */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0x4);
+
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x1); /* BT select s0/s1 is controlled by WiFi */
+
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x39, 0x8, 0x1);
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x974, 0xff);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x944, 0x3, 0x3);
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x930, 0x77);
+ } else if (bWifiOff) {
+ if (fwVer >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ H2C_Parameter[0] = 1;
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x6E, 1, H2C_Parameter);
+ } else /* set grant_bt to high */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x18);
+
+ /* set wlan_act to always low */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0x4);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE, &bIsInMpMode);
+ if (!bIsInMpMode)
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x0); /* BT select s0/s1 is controlled by BT */
+ else
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x1); /* BT select s0/s1 is controlled by WiFi */
+
+ /* 0x4c[24:23]= 00, Set Antenna control by BT_RFE_CTRL BT Vendor 0xac = 0xf002 */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u4Tmp &= ~BIT23;
+ u4Tmp &= ~BIT24;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x4c, u4Tmp);
+ } else {
+ /* Use H2C to set GNT_BT to LOW */
+ if (fwVer >= 0x180000) {
+ if (pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765) != 0) {
+ H2C_Parameter[0] = 0;
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x6E, 1, H2C_Parameter);
+ }
+ } else {
+ /* BT calibration check */
+ while (cntBtCalChk <= 20) {
+ u1Tmp = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x49d);
+ cntBtCalChk++;
+
+ if (u1Tmp & BIT0) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ########### BT is calibrating (wait cnt =%d) ###########\n", cntBtCalChk));
+ mdelay(50);
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ********** BT is NOT calibrating (wait cnt =%d)**********\n", cntBtCalChk));
+ break;
+ }
+ }
+
+ /* set grant_bt to PTA */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x0);
+ }
+
+ if (pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x76e) != 0xc)
+ /* set wlan_act control by PTA */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc);
+ }
+
+ if (bUseExtSwitch) {
+ if (bInitHwCfg) {
+ /* 0x4c[23]= 0, 0x4c[24]= 1 Antenna control by WL/BT */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u4Tmp &= ~BIT23;
+ u4Tmp |= BIT24;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x4c, u4Tmp);
+
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0); /* fixed internal switch S1->WiFi, S0->BT */
+
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /* tell firmware "no antenna inverse" */
+ H2C_Parameter[0] = 0;
+ H2C_Parameter[1] = 1; /* ext switch type */
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x65, 2, H2C_Parameter);
+ } else {
+ /* tell firmware "antenna inverse" */
+ H2C_Parameter[0] = 1;
+ H2C_Parameter[1] = 1; /* ext switch type */
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x65, 2, H2C_Parameter);
+ }
+ }
+
+
+ /* ext switch setting */
+ switch (antPosType) {
+ case BTC_ANT_PATH_WIFI:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x1);
+ else
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x2);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x2);
+ else
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x1);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x1);
+ else
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x2);
+ break;
+ }
+
+ } else {
+ if (bInitHwCfg) {
+ /* 0x4c[23]= 1, 0x4c[24]= 0 Antenna control by 0x64 */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u4Tmp |= BIT23;
+ u4Tmp &= ~BIT24;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x4c, u4Tmp);
+
+ /* Fix Ext switch Main->S1, Aux->S0 */
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x64, 0x1, 0x0);
+
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT) {
+
+ /* tell firmware "no antenna inverse" */
+ H2C_Parameter[0] = 0;
+ H2C_Parameter[1] = 0; /* internal switch type */
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x65, 2, H2C_Parameter);
+ } else {
+
+ /* tell firmware "antenna inverse" */
+ H2C_Parameter[0] = 1;
+ H2C_Parameter[1] = 0; /* internal switch type */
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x65, 2, H2C_Parameter);
+ }
+ }
+
+
+ /* internal switch setting */
+ switch (antPosType) {
+ case BTC_ANT_PATH_WIFI:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+ else
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x280);
+ break;
+ case BTC_ANT_PATH_BT:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x280);
+ else
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+ break;
+ default:
+ case BTC_ANT_PATH_PTA:
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT)
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x200);
+ else
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x80);
+ break;
+ }
+ }
+}
+
+static void halbtc8723b1ant_SetFwPstdma(
+ PBTC_COEXIST pBtCoexist, u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5
+)
+{
+ u8 H2C_Parameter[5] = {0};
+ u8 realByte1 = byte1, realByte5 = byte5;
+ bool bApEnable = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+
+ if (bApEnable) {
+ if (byte1&BIT4 && !(byte1&BIT5)) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], FW for 1Ant AP mode\n")
+ );
+ realByte1 &= ~BIT4;
+ realByte1 |= BIT5;
+
+ realByte5 |= BIT5;
+ realByte5 &= ~BIT6;
+ }
+ }
+
+ H2C_Parameter[0] = realByte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = realByte5;
+
+ pCoexDm->psTdmaPara[0] = realByte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = realByte5;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], PS-TDMA H2C cmd = 0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|
+ H2C_Parameter[2]<<16|
+ H2C_Parameter[3]<<8|
+ H2C_Parameter[4]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x60, 5, H2C_Parameter);
+}
+
+
+static void halbtc8723b1ant_PsTdma(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bTurnOn, u8 type
+)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bWifiBusy = false;
+ u8 rssiAdjustVal = 0;
+ u8 psTdmaByte4Val = 0x50, psTdmaByte0Val = 0x51, psTdmaByte3Val = 0x10;
+ s8 nWiFiDurationAdjust = 0x0;
+ /* u32 fwVer = 0; */
+
+ /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type =%d\n", */
+ /* (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type)); */
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if (pCoexDm->bCurPsTdmaOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ (
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ pCoexDm->curPsTdma
+ )
+ );
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ (
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ pCoexDm->curPsTdma
+ )
+ );
+ }
+
+ if (!bForceExec) {
+ if (
+ (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma)
+ )
+ return;
+ }
+
+ if (pCoexSta->nScanAPNum <= 5)
+ nWiFiDurationAdjust = 5;
+ else if (pCoexSta->nScanAPNum >= 40)
+ nWiFiDurationAdjust = -15;
+ else if (pCoexSta->nScanAPNum >= 20)
+ nWiFiDurationAdjust = -10;
+
+ if (!pCoexSta->bForceLpsOn) { /* only for A2DP-only case 1/2/9/11 */
+ psTdmaByte0Val = 0x61; /* no null-pkt */
+ psTdmaByte3Val = 0x11; /* no tx-pause at BT-slot */
+ psTdmaByte4Val = 0x10; /* 0x778 = d/1 toggle */
+ }
+
+
+ if (bTurnOn) {
+ if (pBtLinkInfo->bSlaveRole == true)
+ psTdmaByte4Val = psTdmaByte4Val | 0x1; /* 0x778 = 0x1 at wifi slot (no blocking BT Low-Pri pkts) */
+
+
+ switch (type) {
+ default:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x1a, 0x1a, 0x0, psTdmaByte4Val
+ );
+ break;
+ case 1:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist,
+ psTdmaByte0Val,
+ 0x3a+nWiFiDurationAdjust,
+ 0x03,
+ psTdmaByte3Val,
+ psTdmaByte4Val
+ );
+ break;
+ case 2:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist,
+ psTdmaByte0Val,
+ 0x2d+nWiFiDurationAdjust,
+ 0x03,
+ psTdmaByte3Val,
+ psTdmaByte4Val
+ );
+ break;
+ case 3:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x1d, 0x1d, 0x0, 0x10
+ );
+ break;
+ case 4:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0
+ );
+ break;
+ case 5:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x15, 0x3, 0x11, 0x10
+ );
+ break;
+ case 6:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x20, 0x3, 0x11, 0x11
+ );
+ break;
+ case 7:
+ halbtc8723b1ant_SetFwPstdma(pBtCoexist, 0x13, 0xc, 0x5, 0x0, 0x0);
+ break;
+ case 8:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0
+ );
+ break;
+ case 9:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist,
+ psTdmaByte0Val,
+ 0x21,
+ 0x3,
+ psTdmaByte3Val,
+ psTdmaByte4Val
+ );
+ break;
+ case 10:
+ halbtc8723b1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0xa, 0x0, 0x40);
+ break;
+ case 11:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist,
+ psTdmaByte0Val,
+ 0x21,
+ 0x03,
+ psTdmaByte3Val,
+ psTdmaByte4Val
+ );
+ break;
+ case 12:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x0a, 0x0a, 0x0, 0x50
+ );
+ break;
+ case 13:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x12, 0x12, 0x0, 0x10
+ );
+ break;
+ case 14:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x21, 0x3, 0x10, psTdmaByte4Val
+ );
+ break;
+ case 15:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x13, 0xa, 0x3, 0x8, 0x0
+ );
+ break;
+ case 16:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x93, 0x15, 0x3, 0x10, 0x0
+ );
+ break;
+ case 18:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0
+ );
+ break;
+ case 20:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x3f, 0x03, 0x11, 0x10
+
+ );
+ break;
+ case 21:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x25, 0x03, 0x11, 0x11
+ );
+ break;
+ case 22:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x25, 0x03, 0x11, 0x10
+ );
+ break;
+ case 23:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x18
+ );
+ break;
+ case 24:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xe3, 0x15, 0x3, 0x31, 0x18
+ );
+ break;
+ case 25:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18
+ );
+ break;
+ case 26:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18
+ );
+ break;
+ case 27:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x98
+ );
+ break;
+ case 28:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x69, 0x25, 0x3, 0x31, 0x0
+ );
+ break;
+ case 29:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xab, 0x1a, 0x1a, 0x1, 0x10
+ );
+ break;
+ case 30:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x51, 0x30, 0x3, 0x10, 0x10
+ );
+ break;
+ case 31:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x58
+ );
+ break;
+ case 32:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x61, 0x35, 0x3, 0x11, 0x11
+ );
+ break;
+ case 33:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xa3, 0x25, 0x3, 0x30, 0x90
+ );
+ break;
+ case 34:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x53, 0x1a, 0x1a, 0x0, 0x10
+ );
+ break;
+ case 35:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x63, 0x1a, 0x1a, 0x0, 0x10
+ );
+ break;
+ case 36:
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0xd3, 0x12, 0x3, 0x14, 0x50
+ );
+ break;
+ case 40: /* SoftAP only with no sta associated, BT disable , TDMA mode for power saving */
+ /* here softap mode screen off will cost 70-80mA for phone */
+ halbtc8723b1ant_SetFwPstdma(
+ pBtCoexist, 0x23, 0x18, 0x00, 0x10, 0x24
+ );
+ break;
+ }
+ } else {
+
+ /* disable PS tdma */
+ switch (type) {
+ case 8: /* PTA Control */
+ halbtc8723b1ant_SetFwPstdma(pBtCoexist, 0x8, 0x0, 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(
+ pBtCoexist, BTC_ANT_PATH_PTA, false, false
+ );
+ break;
+ case 0:
+ default: /* Software control, Antenna at BT side */
+ halbtc8723b1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(
+ pBtCoexist, BTC_ANT_PATH_BT, false, false
+ );
+ break;
+ case 9: /* Software control, Antenna at WiFi side */
+ halbtc8723b1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0);
+ halbtc8723b1ant_SetAntPath(
+ pBtCoexist, BTC_ANT_PATH_WIFI, false, false
+ );
+ break;
+ }
+ }
+
+ rssiAdjustVal = 0;
+ pBtCoexist->fBtcSet(
+ pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssiAdjustVal
+ );
+
+ /* update pre state */
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+static bool halbtc8723b1ant_IsCommonAction(PBTC_COEXIST pBtCoexist)
+{
+ bool bCommon = false, bWifiConnected = false, bWifiBusy = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if (
+ !bWifiConnected &&
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n")
+ );
+
+ /* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
+
+ bCommon = true;
+ } else if (
+ bWifiConnected &&
+ (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi connected + BT non connected-idle!!\n")
+ );
+
+ /* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
+
+ bCommon = true;
+ } else if (
+ !bWifiConnected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n")
+ );
+
+ /* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
+
+ bCommon = true;
+ } else if (
+ bWifiConnected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)
+ ) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+
+ /* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
+
+ bCommon = true;
+ } else if (
+ !bWifiConnected &&
+ (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus)
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")
+ );
+
+ /* halbtc8723b1ant_SwMechanism(pBtCoexist, false); */
+
+ bCommon = true;
+ } else {
+ if (bWifiBusy) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi Connected-Busy + BT Busy!!\n")
+ );
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], Wifi Connected-Idle + BT Busy!!\n")
+ );
+ }
+
+ bCommon = false;
+ }
+
+ return bCommon;
+}
+
+
+static void halbtc8723b1ant_TdmaDurationAdjustForAcl(
+ PBTC_COEXIST pBtCoexist, u8 wifiStatus
+)
+{
+ static s32 up, dn, m, n, WaitCount;
+ s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
+ u8 retryCount = 0, btInfoExt;
+ bool bWifiBusy = false;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ ("[BTCoex], TdmaDurationAdjustForAcl()\n")
+ );
+
+ if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifiStatus)
+ bWifiBusy = true;
+ else
+ bWifiBusy = false;
+
+ if (
+ (BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) ||
+ (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus)
+ ) {
+ if (
+ pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 3 &&
+ pCoexDm->curPsTdma != 9
+ ) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ WaitCount = 0;
+ }
+ return;
+ }
+
+ if (!pCoexDm->bAutoTdmaAdjust) {
+ pCoexDm->bAutoTdmaAdjust = true;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ ("[BTCoex], first run TdmaDurationAdjust()!!\n")
+ );
+
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ /* */
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ WaitCount = 0;
+ } else {
+ /* accquire the BT TRx retry count from BT_Info byte2 */
+ retryCount = pCoexSta->btRetryCnt;
+ btInfoExt = pCoexSta->btInfoExt;
+ /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); */
+ /* BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up =%d, dn =%d, m =%d, n =%d, WaitCount =%d\n", */
+ /* up, dn, m, n, WaitCount)); */
+
+ if (pCoexSta->lowPriorityTx > 1050 || pCoexSta->lowPriorityRx > 1250)
+ retryCount++;
+
+ result = 0;
+ WaitCount++;
+
+ if (retryCount == 0) { /* no retry in the last 2-second duration */
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) { /* if 連續 n 個2秒 retry count為0, 則調寬WiFi duration */
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ ("[BTCoex], Increase wifi duration!!\n")
+ );
+ }
+ } else if (retryCount <= 3) { /* <=3 retry in the last 2-second duration */
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) { /* if 連續 2 個2秒 retry count< 3, 則調窄WiFi duration */
+ if (WaitCount <= 2)
+ m++; /* é¿å…一直在兩個level中來回 */
+ else
+ m = 1;
+
+ if (m >= 20) /* m 最大值 = 20 ' 最大120秒 recheck是å¦èª¿æ•´ WiFi duration. */
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ } else { /* retry count > 3, åªè¦1次 retry count > 3, 則調窄WiFi duration */
+ if (WaitCount == 1)
+ m++; /* é¿å…一直在兩個level中來回 */
+ else
+ m = 1;
+
+ if (m >= 20) /* m 最大值 = 20 ' 最大120秒 recheck是å¦èª¿æ•´ WiFi duration. */
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")
+ );
+ }
+
+ if (result == -1) {
+ if (
+ BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(btInfoExt) &&
+ ((pCoexDm->curPsTdma == 1) || (pCoexDm->curPsTdma == 2))
+ ) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ } else if (result == 1) {
+ if (
+ BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(btInfoExt) &&
+ ((pCoexDm->curPsTdma == 1) || (pCoexDm->curPsTdma == 2))
+ ) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ }
+ } else { /* no change */
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ pCoexDm->curPsTdma
+ )
+ );
+ }
+
+ if (
+ pCoexDm->curPsTdma != 1 &&
+ pCoexDm->curPsTdma != 2 &&
+ pCoexDm->curPsTdma != 9 &&
+ pCoexDm->curPsTdma != 11
+ ) /* recover to previous adjust type */
+ halbtc8723b1ant_PsTdma(
+ pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType
+ );
+ }
+}
+
+static void halbtc8723b1ant_PsTdmaCheckForPowerSaveState(
+ PBTC_COEXIST pBtCoexist, bool bNewPsState
+)
+{
+ u8 lpsMode = 0x0;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_LPS_MODE, &lpsMode);
+
+ if (lpsMode) { /* already under LPS state */
+ if (bNewPsState) {
+ /* keep state under LPS, do nothing. */
+ } else /* will leave LPS state, turn off psTdma first */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
+ } else { /* NO PS state */
+ if (bNewPsState) /* will enter LPS state, turn off psTdma first */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
+ else {
+ /* keep state under NO PS state, do nothing. */
+ }
+ }
+}
+
+static void halbtc8723b1ant_PowerSaveState(
+ PBTC_COEXIST pBtCoexist, u8 psType, u8 lpsVal, u8 rpwmVal
+)
+{
+ bool bLowPwrDisable = false;
+
+ switch (psType) {
+ case BTC_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ bLowPwrDisable = false;
+ pBtCoexist->fBtcSet(
+ pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable
+ );
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL);
+ pCoexSta->bForceLpsOn = false;
+ break;
+ case BTC_PS_LPS_ON:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, true);
+ halbtc8723b1ant_LpsRpwm(pBtCoexist, NORMAL_EXEC, lpsVal, rpwmVal);
+ /* when coex force to enter LPS, do not enter 32k low power. */
+ bLowPwrDisable = true;
+ pBtCoexist->fBtcSet(
+ pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable
+ );
+ /* power save must executed before psTdma. */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL);
+ pCoexSta->bForceLpsOn = true;
+ break;
+ case BTC_PS_LPS_OFF:
+ halbtc8723b1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, false);
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL);
+ pCoexSta->bForceLpsOn = false;
+ break;
+ default:
+ break;
+ }
+}
+
+/* */
+/* */
+/* Software Coex Mechanism start */
+/* */
+/* */
+
+/* */
+/* */
+/* Non-Software Coex Mechanism start */
+/* */
+/* */
+static void halbtc8723b1ant_ActionWifiMultiPort(PBTC_COEXIST pBtCoexist)
+{
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+}
+
+static void halbtc8723b1ant_ActionHs(PBTC_COEXIST pBtCoexist)
+{
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+}
+
+static void halbtc8723b1ant_ActionBtInquiry(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bWifiConnected = false;
+ bool bApEnable = false;
+ bool bWifiBusy = false;
+ bool bBtBusy = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+ if (!bWifiConnected && !pCoexSta->bWiFiIsHighPriTask) {
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ } else if (
+ pBtLinkInfo->bScoExist ||
+ pBtLinkInfo->bHidExist ||
+ pBtLinkInfo->bA2dpExist
+ ) {
+ /* SCO/HID/A2DP busy */
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (pBtLinkInfo->bPanExist || bWifiBusy) {
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 20);
+
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ }
+}
+
+static void halbtc8723b1ant_ActionBtScoHidOnlyBusy(
+ PBTC_COEXIST pBtCoexist, u8 wifiStatus
+)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bWifiConnected = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ /* tdma and coex table */
+
+ if (pBtLinkInfo->bScoExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 5);
+ } else { /* HID */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 5);
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
+ PBTC_COEXIST pBtCoexist, u8 wifiStatus
+)
+{
+ u8 btRssiState;
+
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ btRssiState = halbtc8723b1ant_BtRssiState(2, 28, 0);
+
+ if ((pCoexSta->lowPriorityRx >= 1000) && (pCoexSta->lowPriorityRx != 65535))
+ pBtLinkInfo->bSlaveRole = true;
+ else
+ pBtLinkInfo->bSlaveRole = false;
+
+ if (pBtLinkInfo->bHidOnly) { /* HID */
+ halbtc8723b1ant_ActionBtScoHidOnlyBusy(pBtCoexist, wifiStatus);
+ pCoexDm->bAutoTdmaAdjust = false;
+ return;
+ } else if (pBtLinkInfo->bA2dpOnly) { /* A2DP */
+ if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE == wifiStatus) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ pCoexDm->bAutoTdmaAdjust = false;
+ } else {
+ halbtc8723b1ant_TdmaDurationAdjustForAcl(pBtCoexist, wifiStatus);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ pCoexDm->bAutoTdmaAdjust = true;
+ }
+ } else if (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist) { /* HID+A2DP */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->bAutoTdmaAdjust = false;
+
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (
+ pBtLinkInfo->bPanOnly ||
+ (pBtLinkInfo->bHidExist && pBtLinkInfo->bPanExist)
+ ) { /* PAN(OPP, FTP), HID+PAN(OPP, FTP) */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ pCoexDm->bAutoTdmaAdjust = false;
+ } else if (
+ (pBtLinkInfo->bA2dpExist && pBtLinkInfo->bPanExist) ||
+ (pBtLinkInfo->bHidExist && pBtLinkInfo->bA2dpExist && pBtLinkInfo->bPanExist)
+ ) { /* A2DP+PAN(OPP, FTP), HID+A2DP+PAN(OPP, FTP) */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ pCoexDm->bAutoTdmaAdjust = false;
+ } else {
+ /* BT no-profile busy (0x9) */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ pCoexDm->bAutoTdmaAdjust = false;
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiNotConnected(PBTC_COEXIST pBtCoexist)
+{
+ /* power save state */
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+static void halbtc8723b1ant_ActionWifiNotConnectedScan(
+ PBTC_COEXIST pBtCoexist
+)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pBtLinkInfo->bA2dpExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (pBtLinkInfo->bA2dpExist && pBtLinkInfo->bPanExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ }
+ } else if (
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ) {
+ halbtc8723b1ant_ActionBtScoHidOnlyBusy(
+ pBtCoexist, BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN
+ );
+ } else {
+ /* Bryant Add */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiNotConnectedAssoAuth(
+ PBTC_COEXIST pBtCoexist
+)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (
+ (pBtLinkInfo->bScoExist) ||
+ (pBtLinkInfo->bHidExist) ||
+ (pBtLinkInfo->bA2dpExist)
+ ) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (pBtLinkInfo->bPanExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiConnectedScan(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ if (pBtLinkInfo->bA2dpExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (pBtLinkInfo->bA2dpExist && pBtLinkInfo->bPanExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ }
+ } else if (
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ) {
+ halbtc8723b1ant_ActionBtScoHidOnlyBusy(
+ pBtCoexist, BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN
+ );
+ } else {
+ /* Bryant Add */
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiConnectedSpecialPacket(
+ PBTC_COEXIST pBtCoexist
+)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (
+ (pBtLinkInfo->bScoExist) ||
+ (pBtLinkInfo->bHidExist) ||
+ (pBtLinkInfo->bA2dpExist)
+ ) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else if (pBtLinkInfo->bPanExist) {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 20);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 4);
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ }
+}
+
+static void halbtc8723b1ant_ActionWifiConnected(PBTC_COEXIST pBtCoexist)
+{
+ bool bWifiBusy = false;
+ bool bScan = false, bLink = false, bRoam = false;
+ bool bUnder4way = false, bApEnable = false;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], CoexForWifiConnect() ===>\n")
+ );
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+ if (bUnder4way) {
+ halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n")
+ );
+ return;
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ if (bScan || bLink || bRoam) {
+ if (bScan)
+ halbtc8723b1ant_ActionWifiConnectedScan(pBtCoexist);
+ else
+ halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n")
+ );
+ return;
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ /* power save state */
+ if (
+ !bApEnable &&
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus &&
+ !pBtCoexist->btLinkInfo.bHidOnly
+ ) {
+ if (pBtCoexist->btLinkInfo.bA2dpOnly) { /* A2DP */
+ if (!bWifiBusy)
+ halbtc8723b1ant_PowerSaveState(
+ pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0
+ );
+ else { /* busy */
+ if (pCoexSta->nScanAPNum >= BT_8723B_1ANT_WIFI_NOISY_THRESH) /* no force LPS, no PS-TDMA, use pure TDMA */
+ halbtc8723b1ant_PowerSaveState(
+ pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0
+ );
+ else
+ halbtc8723b1ant_PowerSaveState(
+ pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x4
+ );
+ }
+ } else if (
+ (pCoexSta->bPanExist == false) &&
+ (pCoexSta->bA2dpExist == false) &&
+ (pCoexSta->bHidExist == false)
+ )
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ else
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x4);
+ } else
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+
+ /* tdma and coex table */
+ if (!bWifiBusy) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
+ pBtCoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE
+ );
+ } else if (
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ) {
+ halbtc8723b1ant_ActionBtScoHidOnlyBusy(pBtCoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE);
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+
+ if ((pCoexSta->highPriorityTx) + (pCoexSta->highPriorityRx) <= 60)
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ }
+ } else {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) {
+ halbtc8723b1ant_ActionWifiConnectedBtAclBusy(
+ pBtCoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY
+ );
+ } else if (
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ) {
+ halbtc8723b1ant_ActionBtScoHidOnlyBusy(
+ pBtCoexist,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY
+ );
+ } else {
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 8);
+
+ if ((pCoexSta->highPriorityTx) + (pCoexSta->highPriorityRx) <= 60)
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ }
+ }
+}
+
+static void halbtc8723b1ant_RunSwCoexistMechanism(PBTC_COEXIST pBtCoexist)
+{
+ u8 algorithm = 0;
+
+ algorithm = halbtc8723b1ant_ActionAlgorithm(pBtCoexist);
+ pCoexDm->curAlgorithm = algorithm;
+
+ if (halbtc8723b1ant_IsCommonAction(pBtCoexist)) {
+
+ } else {
+ switch (pCoexDm->curAlgorithm) {
+ case BT_8723B_1ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n"));
+ /* halbtc8723b1ant_ActionSco(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n"));
+ /* halbtc8723b1ant_ActionHid(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n"));
+ /* halbtc8723b1ant_ActionA2dp(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n"));
+ /* halbtc8723b1ant_ActionA2dpPanHs(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n"));
+ /* halbtc8723b1ant_ActionPanEdr(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n"));
+ /* halbtc8723b1ant_ActionPanHs(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n"));
+ /* halbtc8723b1ant_ActionPanEdrA2dp(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n"));
+ /* halbtc8723b1ant_ActionPanEdrHid(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n"));
+ /* halbtc8723b1ant_ActionHidA2dpPanEdr(pBtCoexist); */
+ break;
+ case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n"));
+ /* halbtc8723b1ant_ActionHidA2dp(pBtCoexist); */
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n"));
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+}
+
+static void halbtc8723b1ant_RunCoexistMechanism(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bWifiConnected = false, bBtHsOn = false;
+ bool bIncreaseScanDevNum = false;
+ bool bBtCtrlAggBufSize = false;
+ u8 aggBufSize = 5;
+ u32 wifiLinkStatus = 0;
+ u32 numOfWifiLink = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism() ===>\n"));
+
+ if (pBtCoexist->bManualControl) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if (pBtCoexist->bStopCoexDm) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n"));
+ return;
+ }
+
+ if (pCoexSta->bUnderIps) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ return;
+ }
+
+ if (
+ (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ){
+ bIncreaseScanDevNum = true;
+ }
+
+ pBtCoexist->fBtcSet(
+ pBtCoexist,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+ &bIncreaseScanDevNum
+ );
+ pBtCoexist->fBtcGet(
+ pBtCoexist,
+ BTC_GET_BL_WIFI_CONNECTED,
+ &bWifiConnected
+ );
+
+ pBtCoexist->fBtcGet(
+ pBtCoexist,
+ BTC_GET_U4_WIFI_LINK_STATUS,
+ &wifiLinkStatus
+ );
+ numOfWifiLink = wifiLinkStatus>>16;
+
+ if ((numOfWifiLink >= 2) || (wifiLinkStatus&WIFI_P2P_GO_CONNECTED)) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ (
+ "############# [BTCoex], Multi-Port numOfWifiLink = %d, wifiLinkStatus = 0x%x\n",
+ numOfWifiLink,
+ wifiLinkStatus
+ )
+ );
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, bBtCtrlAggBufSize, aggBufSize);
+
+ if ((pBtLinkInfo->bA2dpExist) && (pCoexSta->bC2hBtInquiryPage)) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("############# [BTCoex], BT Is Inquirying\n")
+ );
+ halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
+ } else
+ halbtc8723b1ant_ActionWifiMultiPort(pBtCoexist);
+
+ return;
+ }
+
+ if ((pBtLinkInfo->bBtLinkExist) && (bWifiConnected)) {
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 1, 1, 0, 1);
+
+ if (pBtLinkInfo->bScoExist)
+ halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, true, 0x5);
+ else
+ halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, true, 0x8);
+
+ halbtc8723b1ant_SwMechanism(pBtCoexist, true);
+ halbtc8723b1ant_RunSwCoexistMechanism(pBtCoexist); /* just print debug message */
+ } else {
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
+
+ halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x5);
+
+ halbtc8723b1ant_SwMechanism(pBtCoexist, false);
+ halbtc8723b1ant_RunSwCoexistMechanism(pBtCoexist); /* just print debug message */
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if (pCoexSta->bC2hBtInquiryPage) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("############# [BTCoex], BT Is Inquirying\n")
+ );
+ halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ } else if (bBtHsOn) {
+ halbtc8723b1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+
+ if (!bWifiConnected) {
+ bool bScan = false, bLink = false, bRoam = false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n"));
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if (bScan || bLink || bRoam) {
+ if (bScan)
+ halbtc8723b1ant_ActionWifiNotConnectedScan(pBtCoexist);
+ else
+ halbtc8723b1ant_ActionWifiNotConnectedAssoAuth(pBtCoexist);
+ } else
+ halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
+ } else /* wifi LPS/Busy */
+ halbtc8723b1ant_ActionWifiConnected(pBtCoexist);
+}
+
+static void halbtc8723b1ant_InitCoexDm(PBTC_COEXIST pBtCoexist)
+{
+ /* force to reset coex mechanism */
+
+ /* sw all off */
+ halbtc8723b1ant_SwMechanism(pBtCoexist, false);
+
+ /* halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 8); */
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+ pCoexSta->popEventCnt = 0;
+}
+
+static void halbtc8723b1ant_InitHwConfig(
+ PBTC_COEXIST pBtCoexist,
+ bool bBackUp,
+ bool bWifiOnly
+)
+{
+ u32 u4Tmp = 0;/* fwVer; */
+ u8 u1Tmpa = 0, u1Tmpb = 0;
+
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_INIT,
+ ("[BTCoex], 1Ant Init HW Config!!\n")
+ );
+
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x550, 0x8, 0x1); /* enable TBTT nterrupt */
+
+ /* 0x790[5:0]= 0x5 */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x790, 0x5);
+
+ /* Enable counter statistics */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x778, 0x1);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x40, 0x20, 0x1);
+
+ /* Antenna config */
+ if (bWifiOnly) {
+ halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_WIFI, true, false);
+ halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 9);
+ } else
+ halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, true, false);
+
+ /* PTA parameter */
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ u1Tmpa = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
+ u1Tmpb = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
+
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ (
+ "############# [BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x67 = 0x%x\n",
+ u4Tmp,
+ u1Tmpa,
+ u1Tmpb
+ )
+ );
+}
+
+/* */
+/* work around function start with wa_halbtc8723b1ant_ */
+/* */
+/* */
+/* extern function start with EXhalbtc8723b1ant_ */
+/* */
+void EXhalbtc8723b1ant_PowerOnSetting(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ u8 u1Tmp = 0x0;
+ u16 u2Tmp = 0x0;
+
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x67, 0x20);
+
+ /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly. */
+ u2Tmp = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x2);
+ pBtCoexist->fBtcWrite2Byte(pBtCoexist, 0x2, u2Tmp|BIT0|BIT1);
+
+ /* set GRAN_BT = 1 */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x18);
+ /* set WLAN_ACT = 0 */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0x4);
+
+ /* */
+ /* S0 or S1 setting and Local register setting(By the setting fw can get ant number, S0/S1, ... info) */
+ /* Local setting bit define */
+ /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
+ /* BIT1: "0" for internal switch; "1" for external switch */
+ /* BIT2: "0" for one antenna; "1" for two antenna */
+ /* NOTE: here default all internal switch and 1-antenna ==> BIT1 = 0 and BIT2 = 0 */
+ if (pBtCoexist->chipInterface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+
+ u1Tmp |= 0x1; /* antenna inverse */
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0xfe08, u1Tmp);
+
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_AUX_PORT;
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (pBoardInfo->singleAntPath == 0) {
+ /* set to S1 */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x280);
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
+ } else if (pBoardInfo->singleAntPath == 1) {
+ /* set to S0 */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+ u1Tmp |= 0x1; /* antenna inverse */
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_AUX_PORT;
+ }
+
+ if (pBtCoexist->chipInterface == BTC_INTF_PCI)
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0x384, u1Tmp);
+ else if (pBtCoexist->chipInterface == BTC_INTF_SDIO)
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0x60, u1Tmp);
+ }
+}
+
+void EXhalbtc8723b1ant_InitHwConfig(PBTC_COEXIST pBtCoexist, bool bWifiOnly)
+{
+ halbtc8723b1ant_InitHwConfig(pBtCoexist, true, bWifiOnly);
+}
+
+void EXhalbtc8723b1ant_InitCoexDm(PBTC_COEXIST pBtCoexist)
+{
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_INIT,
+ ("[BTCoex], Coex Mechanism Init!!\n")
+ );
+
+ pBtCoexist->bStopCoexDm = false;
+
+ halbtc8723b1ant_InitCoexDm(pBtCoexist);
+
+ halbtc8723b1ant_QueryBtInfo(pBtCoexist);
+}
+
+void EXhalbtc8723b1ant_DisplayCoexInfo(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ PBTC_STACK_INFO pStackInfo = &pBtCoexist->stackInfo;
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ u8 *cliBuf = pBtCoexist->cliBuf;
+ u8 u1Tmp[4], i, btInfoExt, psTdmaCase = 0;
+ u16 u2Tmp[4];
+ u32 u4Tmp[4];
+ bool bRoam = false;
+ bool bScan = false;
+ bool bLink = false;
+ bool bWifiUnder5G = false;
+ bool bWifiUnderBMode = false;
+ bool bBtHsOn = false;
+ bool bWifiBusy = false;
+ s32 wifiRssi = 0, btHsRssi = 0;
+ u32 wifiBw, wifiTrafficDir, faOfdm, faCck, wifiLinkStatus;
+ u8 wifiDot11Chnl, wifiHsChnl;
+ u32 fwVer = 0, btPatchVer = 0;
+ static u8 PopReportIn10s = 0;
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n ============[BT Coexist info]============"
+ );
+ CL_PRINTF(cliBuf);
+
+ if (pBtCoexist->bManualControl) {
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n ============[Under Manual Control]============"
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n =========================================="
+ );
+ CL_PRINTF(cliBuf);
+ }
+ if (pBtCoexist->bStopCoexDm) {
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n ============[Coex is STOPPED]============"
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n =========================================="
+ );
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d", "Ant PG Num/ Ant Mech/ Ant Pos:", \
+ pBoardInfo->pgAntNum,
+ pBoardInfo->btdmAntNum,
+ pBoardInfo->btdmAntPos
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ ((pStackInfo->bProfileNotified) ? "Yes" : "No"),
+ pStackInfo->hciVersion
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+ GLCoexVerDate8723b1Ant,
+ GLCoexVer8723b1Ant,
+ fwVer,
+ btPatchVer,
+ btPatchVer
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl,
+ wifiHsChnl,
+ bBtHsOn
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0],
+ pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \
+ wifiRssi-100, btHsRssi-100
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d/ %s", "Wifi bLink/ bRoam/ bScan/ bHi-Pri", \
+ bLink, bRoam, bScan, ((pCoexSta->bWiFiIsHighPriTask) ? "1" : "0")
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir
+ );
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, &bWifiUnderBMode
+ );
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s / %s/ %s/ AP =%d/ %s ", "Wifi status", \
+ (bWifiUnder5G ? "5G" : "2.4G"),
+ ((bWifiUnderBMode) ? "11b" : ((BTC_WIFI_BW_LEGACY == wifiBw) ? "11bg" : (((BTC_WIFI_BW_HT40 == wifiBw) ? "HT40" : "HT20")))),
+ ((!bWifiBusy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifiTrafficDir) ? "uplink" : "downlink")),
+ pCoexSta->nScanAPNum,
+ (pCoexSta->bCCKLock) ? "Lock" : "noLock"
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifiLinkStatus
+ );
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d/ %d/ %d", "sta/vwifi/hs/p2pGo/p2pGc", \
+ ((wifiLinkStatus&WIFI_STA_CONNECTED) ? 1 : 0),
+ ((wifiLinkStatus&WIFI_AP_CONNECTED) ? 1 : 0),
+ ((wifiLinkStatus&WIFI_HS_CONNECTED) ? 1 : 0),
+ ((wifiLinkStatus&WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+ ((wifiLinkStatus&WIFI_P2P_GC_CONNECTED) ? 1 : 0)
+ );
+ CL_PRINTF(cliBuf);
+
+
+ PopReportIn10s++;
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = [%s/ %d/ %d/ %d] ", "BT [status/ rssi/ retryCnt/ popCnt]", \
+ ((pBtCoexist->btInfo.bBtDisabled) ? ("disabled") : ((pCoexSta->bC2hBtInquiryPage) ? ("inquiry/page scan") : ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) ? "non-connected idle" :
+ ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ? "connected-idle" : "busy")))),
+ pCoexSta->btRssi, pCoexSta->btRetryCnt, pCoexSta->popEventCnt
+ );
+ CL_PRINTF(cliBuf);
+
+ if (PopReportIn10s >= 5) {
+ pCoexSta->popEventCnt = 0;
+ PopReportIn10s = 0;
+ }
+
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pBtLinkInfo->bScoExist,
+ pBtLinkInfo->bHidExist,
+ pBtLinkInfo->bPanExist,
+ pBtLinkInfo->bA2dpExist
+ );
+ CL_PRINTF(cliBuf);
+
+ if (pStackInfo->bProfileNotified) {
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+ } else {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Role", \
+ (pBtLinkInfo->bSlaveRole) ? "Slave" : "Master");
+ CL_PRINTF(cliBuf);
+ }
+
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0) ? "Basic rate" : "EDR rate"
+ );
+ CL_PRINTF(cliBuf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
+ if (pCoexSta->btInfoC2hCnt[i]) {
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723b1Ant[i], \
+ pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]
+ );
+ CL_PRINTF(cliBuf);
+ }
+ }
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \
+ (pCoexSta->bUnderIps ? "IPS ON" : "IPS OFF"),
+ (pCoexSta->bUnderLps ? "LPS ON" : "LPS OFF"),
+ pBtCoexist->btInfo.lpsVal,
+ pBtCoexist->btInfo.rpwmVal
+ );
+ CL_PRINTF(cliBuf);
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ if (!pBtCoexist->bManualControl) {
+ /* Sw mechanism */
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============"
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d", "SM[LowPenaltyRA]", \
+ pCoexDm->bCurLowPenaltyRa
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \
+ (pBtCoexist->btInfo.bRejectAggPkt ? "Yes" : "No"),
+ (pBtCoexist->btInfo.bBtCtrlAggBufSize ? "Yes" : "No"),
+ pBtCoexist->btInfo.aggBufSize
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x ", "Rate Mask", \
+ pBtCoexist->btInfo.raMask
+ );
+ CL_PRINTF(cliBuf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
+ pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "Coex Table Type", \
+ pCoexSta->nCoexTableType);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "IgnWlanAct", \
+ pCoexDm->bCurIgnoreWlanAct);
+ CL_PRINTF(cliBuf);
+
+ /*
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \
+ pCoexDm->errorCondition);
+ CL_PRINTF(cliBuf);
+ */
+ }
+
+ /* Hw setting */
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "backup ARFR1/ARFR2/RL/AMaxTime", \
+ pCoexDm->backupArfrCnt1, pCoexDm->backupArfrCnt2, pCoexDm->backupRetryLimit, pCoexDm->backupAmpduMaxTime);
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x430);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x434);
+ u2Tmp[0] = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x42a);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x456);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", "0x430/0x434/0x42a/0x456", \
+ u4Tmp[0], u4Tmp[1], u2Tmp[0], u1Tmp[0]);
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x778);
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6cc);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x880);
+ CL_SPRINTF(
+ cliBuf, BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/0x6cc/0x880[29:25]", \
+ u1Tmp[0], u4Tmp[0], (u4Tmp[1]&0x3e000000) >> 25
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x764);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x76e);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x948/ 0x67[5] / 0x764 / 0x76e", \
+ u4Tmp[0], ((u1Tmp[0]&0x20) >> 5), (u4Tmp[1] & 0xffff), u1Tmp[1]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x92c);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x930);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x944);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", \
+ u4Tmp[0]&0x3, u4Tmp[1]&0xff, u4Tmp[2]&0x3
+ );
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x39);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x40);
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u1Tmp[2] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x64);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x38[11]/0x40/0x4c[24:23]/0x64[0]", \
+ ((u1Tmp[0] & 0x8)>>3),
+ u1Tmp[1],
+ ((u4Tmp[0]&0x01800000)>>23),
+ u1Tmp[2]&0x1
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x522);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0], u1Tmp[0]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xc50);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x49c);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x", "0xc50(dig)/0x49c(null-drop)", \
+ u4Tmp[0]&0xff, u1Tmp[0]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xcf0);
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0xa5b);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0xa5c);
+
+ faOfdm =
+ ((u4Tmp[0]&0xffff0000) >> 16) +
+ ((u4Tmp[1]&0xffff0000) >> 16) +
+ (u4Tmp[1] & 0xffff) + (u4Tmp[2] & 0xffff) + \
+ ((u4Tmp[3]&0xffff0000) >> 16) + (u4Tmp[3] & 0xffff);
+ faCck = (u1Tmp[0] << 8) + u1Tmp[1];
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "OFDM-CCA/OFDM-FA/CCK-FA", \
+ u4Tmp[0]&0xffff, faOfdm, faCck
+ );
+ CL_PRINTF(cliBuf);
+
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_OK CCK/11g/11n/11n-Agg", \
+ pCoexSta->nCRCOK_CCK,
+ pCoexSta->nCRCOK_11g,
+ pCoexSta->nCRCOK_11n,
+ pCoexSta->nCRCOK_11nAgg
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d/ %d", "CRC_Err CCK/11g/11n/11n-Agg", \
+ pCoexSta->nCRCErr_CCK,
+ pCoexSta->nCRCErr_11g,
+ pCoexSta->nCRCErr_11n,
+ pCoexSta->nCRCErr_11nAgg
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c8);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8(coexTable)", \
+ u4Tmp[0], u4Tmp[1], u4Tmp[2]);
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", \
+ pCoexSta->highPriorityRx, pCoexSta->highPriorityTx
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", \
+ pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void EXhalbtc8723b1ant_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
+ return;
+
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")
+ );
+ pCoexSta->bUnderIps = true;
+
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")
+ );
+ pCoexSta->bUnderIps = false;
+
+ halbtc8723b1ant_InitHwConfig(pBtCoexist, false, false);
+ halbtc8723b1ant_InitCoexDm(pBtCoexist);
+ halbtc8723b1ant_QueryBtInfo(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b1ant_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
+ return;
+
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")
+ );
+ pCoexSta->bUnderLps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")
+ );
+ pCoexSta->bUnderLps = false;
+ }
+}
+
+void EXhalbtc8723b1ant_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ bool bWifiConnected = false, bBtHsOn = false;
+ u32 wifiLinkStatus = 0;
+ u32 numOfWifiLink = 0;
+ bool bBtCtrlAggBufSize = false;
+ u8 aggBufSize = 5;
+
+ u8 u1Tmpa, u1Tmpb;
+ u32 u4Tmp;
+
+ if (pBtCoexist->bManualControl || pBtCoexist->bStopCoexDm)
+ return;
+
+ if (BTC_SCAN_START == type) {
+ pCoexSta->bWiFiIsHighPriTask = true;
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")
+ );
+
+ halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 8); /* Force antenna setup for no scan result issue */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ u1Tmpa = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
+ u1Tmpb = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
+
+
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ (
+ "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x67 = 0x%x\n",
+ u4Tmp,
+ u1Tmpa,
+ u1Tmpb
+ )
+ );
+ } else {
+ pCoexSta->bWiFiIsHighPriTask = false;
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")
+ );
+
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_U1_AP_NUM, &pCoexSta->nScanAPNum
+ );
+ }
+
+ if (pBtCoexist->btInfo.bBtDisabled)
+ return;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ halbtc8723b1ant_QueryBtInfo(pBtCoexist);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifiLinkStatus);
+ numOfWifiLink = wifiLinkStatus>>16;
+
+ if (numOfWifiLink >= 2) {
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ halbtc8723b1ant_LimitedRx(
+ pBtCoexist, NORMAL_EXEC, false, bBtCtrlAggBufSize, aggBufSize
+ );
+ halbtc8723b1ant_ActionWifiMultiPort(pBtCoexist);
+ return;
+ }
+
+ if (pCoexSta->bC2hBtInquiryPage) {
+ halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ } else if (bBtHsOn) {
+ halbtc8723b1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if (BTC_SCAN_START == type) {
+ /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); */
+ if (!bWifiConnected) /* non-connected scan */
+ halbtc8723b1ant_ActionWifiNotConnectedScan(pBtCoexist);
+ else /* wifi is connected */
+ halbtc8723b1ant_ActionWifiConnectedScan(pBtCoexist);
+ } else if (BTC_SCAN_FINISH == type) {
+ /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); */
+ if (!bWifiConnected) /* non-connected scan */
+ halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
+ else
+ halbtc8723b1ant_ActionWifiConnected(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b1ant_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ bool bWifiConnected = false, bBtHsOn = false;
+ u32 wifiLinkStatus = 0;
+ u32 numOfWifiLink = 0;
+ bool bBtCtrlAggBufSize = false;
+ u8 aggBufSize = 5;
+
+ if (
+ pBtCoexist->bManualControl ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled
+ )
+ return;
+
+ if (BTC_ASSOCIATE_START == type) {
+ pCoexSta->bWiFiIsHighPriTask = true;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ pCoexDm->nArpCnt = 0;
+ } else {
+ pCoexSta->bWiFiIsHighPriTask = false;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ /* pCoexDm->nArpCnt = 0; */
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifiLinkStatus);
+ numOfWifiLink = wifiLinkStatus>>16;
+ if (numOfWifiLink >= 2) {
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ halbtc8723b1ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, bBtCtrlAggBufSize, aggBufSize);
+ halbtc8723b1ant_ActionWifiMultiPort(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if (pCoexSta->bC2hBtInquiryPage) {
+ halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ } else if (bBtHsOn) {
+ halbtc8723b1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if (BTC_ASSOCIATE_START == type) {
+ /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); */
+ halbtc8723b1ant_ActionWifiNotConnectedAssoAuth(pBtCoexist);
+ } else if (BTC_ASSOCIATE_FINISH == type) {
+ /* BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); */
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if (!bWifiConnected) /* non-connected scan */
+ halbtc8723b1ant_ActionWifiNotConnected(pBtCoexist);
+ else
+ halbtc8723b1ant_ActionWifiConnected(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b1ant_MediaStatusNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ u8 H2C_Parameter[3] = {0};
+ u32 wifiBw;
+ u8 wifiCentralChnl;
+ bool bWifiUnderBMode = false;
+
+ if (
+ pBtCoexist->bManualControl ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled
+ )
+ return;
+
+ if (BTC_MEDIA_CONNECT == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_B_MODE, &bWifiUnderBMode);
+
+ /* Set CCK Tx/Rx high Pri except 11b mode */
+ if (bWifiUnderBMode) {
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cd, 0x00); /* CCK Tx */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cf, 0x00); /* CCK Rx */
+ } else {
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cd, 0x10); /* CCK Tx */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cf, 0x10); /* CCK Rx */
+ }
+
+ pCoexDm->backupArfrCnt1 = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x430);
+ pCoexDm->backupArfrCnt2 = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x434);
+ pCoexDm->backupRetryLimit = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x42a);
+ pCoexDm->backupAmpduMaxTime = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x456);
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ pCoexDm->nArpCnt = 0;
+
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cd, 0x0); /* CCK Tx */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cf, 0x0); /* CCK Rx */
+ }
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if ((BTC_MEDIA_CONNECT == type) && (wifiCentralChnl <= 14)) {
+ /* H2C_Parameter[0] = 0x1; */
+ H2C_Parameter[0] = 0x0;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if (BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ H2C_Parameter[0]<<16 | H2C_Parameter[1]<<8 | H2C_Parameter[2]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x66, 3, H2C_Parameter);
+}
+
+void EXhalbtc8723b1ant_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ bool bBtHsOn = false;
+ u32 wifiLinkStatus = 0;
+ u32 numOfWifiLink = 0;
+ bool bBtCtrlAggBufSize = false;
+ u8 aggBufSize = 5;
+
+ if (
+ pBtCoexist->bManualControl ||
+ pBtCoexist->bStopCoexDm ||
+ pBtCoexist->btInfo.bBtDisabled
+ )
+ return;
+
+ if (
+ BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type ||
+ BTC_PACKET_ARP == type
+ ) {
+ if (BTC_PACKET_ARP == type) {
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], special Packet ARP notify\n")
+ );
+
+ pCoexDm->nArpCnt++;
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], ARP Packet Count = %d\n", pCoexDm->nArpCnt)
+ );
+
+ if (pCoexDm->nArpCnt >= 10) /* if APR PKT > 10 after connect, do not go to ActionWifiConnectedSpecialPacket(pBtCoexist) */
+ pCoexSta->bWiFiIsHighPriTask = false;
+ else
+ pCoexSta->bWiFiIsHighPriTask = true;
+ } else {
+ pCoexSta->bWiFiIsHighPriTask = true;
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], special Packet DHCP or EAPOL notify\n")
+ );
+ }
+ } else {
+ pCoexSta->bWiFiIsHighPriTask = false;
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], special Packet [Type = %d] notify\n", type)
+ );
+ }
+
+ pCoexSta->specialPktPeriodCnt = 0;
+
+ pBtCoexist->fBtcGet(
+ pBtCoexist, BTC_GET_U4_WIFI_LINK_STATUS, &wifiLinkStatus
+ );
+ numOfWifiLink = wifiLinkStatus>>16;
+
+ if (numOfWifiLink >= 2) {
+ halbtc8723b1ant_LimitedTx(pBtCoexist, NORMAL_EXEC, 0, 0, 0, 0);
+ halbtc8723b1ant_LimitedRx(
+ pBtCoexist, NORMAL_EXEC, false, bBtCtrlAggBufSize, aggBufSize
+ );
+ halbtc8723b1ant_ActionWifiMultiPort(pBtCoexist);
+ return;
+ }
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ if (pCoexSta->bC2hBtInquiryPage) {
+ halbtc8723b1ant_ActionBtInquiry(pBtCoexist);
+ return;
+ } else if (bBtHsOn) {
+ halbtc8723b1ant_ActionHs(pBtCoexist);
+ return;
+ }
+
+ if (
+ BTC_PACKET_DHCP == type ||
+ BTC_PACKET_EAPOL == type ||
+ ((BTC_PACKET_ARP == type) && (pCoexSta->bWiFiIsHighPriTask))
+ )
+ halbtc8723b1ant_ActionWifiConnectedSpecialPacket(pBtCoexist);
+}
+
+void EXhalbtc8723b1ant_BtInfoNotify(
+ PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length
+)
+{
+ u8 btInfo = 0;
+ u8 i, rspSource = 0;
+ bool bWifiConnected = false;
+ bool bBtBusy = false;
+
+ pCoexSta->bC2hBtInfoReqSent = false;
+
+ rspSource = tmpBuf[0]&0xf;
+ if (rspSource >= BT_INFO_SRC_8723B_1ANT_MAX)
+ rspSource = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(
+ BTC_MSG_INTERFACE,
+ INTF_NOTIFY,
+ ("[BTCoex], Bt info[%d], length =%d, hex data =[",
+ rspSource,
+ length)
+ );
+ for (i = 0; i < length; i++) {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if (i == 1)
+ btInfo = tmpBuf[i];
+ if (i == length-1)
+ BTC_PRINT(
+ BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i])
+ );
+ else
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+
+ if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rspSource) {
+ pCoexSta->btRetryCnt = pCoexSta->btInfoC2h[rspSource][2]&0xf;
+
+ if (pCoexSta->btRetryCnt >= 1)
+ pCoexSta->popEventCnt++;
+
+ if (pCoexSta->btInfoC2h[rspSource][2]&0x20)
+ pCoexSta->bC2hBtPage = true;
+ else
+ pCoexSta->bC2hBtPage = false;
+
+ pCoexSta->btRssi = pCoexSta->btInfoC2h[rspSource][3]*2-90;
+ /* pCoexSta->btInfoC2h[rspSource][3]*2+10; */
+
+ pCoexSta->btInfoExt = pCoexSta->btInfoC2h[rspSource][4];
+
+ pCoexSta->bBtTxRxMask = (pCoexSta->btInfoC2h[rspSource][2]&0x40);
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TX_RX_MASK, &pCoexSta->bBtTxRxMask);
+
+ if (!pCoexSta->bBtTxRxMask) {
+ /* BT into is responded by BT FW and BT RF REG 0x3C != 0x15 => Need to switch BT TRx Mask */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n"));
+ pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x15);
+ }
+
+ /* Here we need to resend some wifi info to BT */
+ /* because bt is reset and loss of the info. */
+ if (pCoexSta->btInfoExt & BIT1) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n")
+ );
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ if (bWifiConnected)
+ EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
+ else
+ EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ if (pCoexSta->btInfoExt & BIT3) {
+ if (!pBtCoexist->bManualControl && !pBtCoexist->bStopCoexDm) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n")
+ );
+ halbtc8723b1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, false);
+ }
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here. */
+ }
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (btInfo & BT_INFO_8723B_1ANT_B_INQ_PAGE)
+ pCoexSta->bC2hBtInquiryPage = true;
+ else
+ pCoexSta->bC2hBtInquiryPage = false;
+
+ /* set link exist status */
+ if (!(btInfo&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ pCoexSta->bBtLinkExist = false;
+ pCoexSta->bPanExist = false;
+ pCoexSta->bA2dpExist = false;
+ pCoexSta->bHidExist = false;
+ pCoexSta->bScoExist = false;
+ } else { /* connection exists */
+ pCoexSta->bBtLinkExist = true;
+ if (btInfo & BT_INFO_8723B_1ANT_B_FTP)
+ pCoexSta->bPanExist = true;
+ else
+ pCoexSta->bPanExist = false;
+
+ if (btInfo & BT_INFO_8723B_1ANT_B_A2DP)
+ pCoexSta->bA2dpExist = true;
+ else
+ pCoexSta->bA2dpExist = false;
+
+ if (btInfo & BT_INFO_8723B_1ANT_B_HID)
+ pCoexSta->bHidExist = true;
+ else
+ pCoexSta->bHidExist = false;
+
+ if (btInfo & BT_INFO_8723B_1ANT_B_SCO_ESCO)
+ pCoexSta->bScoExist = true;
+ else
+ pCoexSta->bScoExist = false;
+ }
+
+ halbtc8723b1ant_UpdateBtLinkInfo(pBtCoexist);
+
+ btInfo = btInfo & 0x1f; /* mask profile bit for connect-ilde identification (for CSR case: A2DP idle --> 0x41) */
+
+ if (!(btInfo&BT_INFO_8723B_1ANT_B_CONNECTION)) {
+ pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"));
+ } else if (btInfo == BT_INFO_8723B_1ANT_B_CONNECTION) {
+ /* connection exists but no busy */
+ pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"));
+ } else if (
+ (btInfo&BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
+ (btInfo&BT_INFO_8723B_1ANT_B_SCO_BUSY)
+ ) {
+ pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"));
+ } else if (btInfo&BT_INFO_8723B_1ANT_B_ACL_BUSY) {
+ if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus)
+ pCoexDm->bAutoTdmaAdjust = false;
+
+ pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"));
+ } else {
+ pCoexDm->btStatus = BT_8723B_1ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"));
+ }
+
+ if (
+ (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ )
+ bBtBusy = true;
+ else
+ bBtBusy = false;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+ halbtc8723b1ant_RunCoexistMechanism(pBtCoexist);
+}
+
+void EXhalbtc8723b1ant_HaltNotify(PBTC_COEXIST pBtCoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 0);
+ halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
+
+ halbtc8723b1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+
+ EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+
+ pBtCoexist->bStopCoexDm = true;
+}
+
+void EXhalbtc8723b1ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if (BTC_WIFI_PNP_SLEEP == pnpState) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to SLEEP\n"));
+
+ halbtc8723b1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
+ halbtc8723b1ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0);
+ halbtc8723b1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ halbtc8723b1ant_SetAntPath(pBtCoexist, BTC_ANT_PATH_BT, false, true);
+
+ pBtCoexist->bStopCoexDm = true;
+ } else if (BTC_WIFI_PNP_WAKE_UP == pnpState) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to WAKE UP\n"));
+ pBtCoexist->bStopCoexDm = false;
+ halbtc8723b1ant_InitHwConfig(pBtCoexist, false, false);
+ halbtc8723b1ant_InitCoexDm(pBtCoexist);
+ halbtc8723b1ant_QueryBtInfo(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b1ant_Periodical(PBTC_COEXIST pBtCoexist)
+{
+ static u8 disVerInfoCnt = 0;
+ u32 fwVer = 0, btPatchVer = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
+
+ if (disVerInfoCnt <= 5) {
+ disVerInfoCnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ GLCoexVerDate8723b1Ant, GLCoexVer8723b1Ant, fwVer, btPatchVer, btPatchVer));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ }
+
+ halbtc8723b1ant_MonitorBtCtr(pBtCoexist);
+ halbtc8723b1ant_MonitorWiFiCtr(pBtCoexist);
+
+ if (
+ halbtc8723b1ant_IsWifiStatusChanged(pBtCoexist) ||
+ pCoexDm->bAutoTdmaAdjust
+ )
+ halbtc8723b1ant_RunCoexistMechanism(pBtCoexist);
+
+ pCoexSta->specialPktPeriodCnt++;
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h
new file mode 100644
index 000000000000..880bd63d36a5
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b1Ant.h
@@ -0,0 +1,193 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* The following is for 8723B 1ANT BT Co-exist definition */
+#define BT_INFO_8723B_1ANT_B_FTP BIT7
+#define BT_INFO_8723B_1ANT_B_A2DP BIT6
+#define BT_INFO_8723B_1ANT_B_HID BIT5
+#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_1ANT_B_CONNECTION BIT0
+
+#define BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \
+ (((_BT_INFO_EXT_&BIT0)) ? true : false)
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2
+
+#define BT_8723B_1ANT_WIFI_NOISY_THRESH 30 /* max: 255 */
+
+typedef enum _BT_INFO_SRC_8723B_1ANT {
+ BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_1ANT_MAX
+} BT_INFO_SRC_8723B_1ANT, *PBT_INFO_SRC_8723B_1ANT;
+
+typedef enum _BT_8723B_1ANT_BT_STATUS {
+ BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_1ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_1ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_1ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_1ANT_BT_STATUS_MAX
+} BT_8723B_1ANT_BT_STATUS, *PBT_8723B_1ANT_BT_STATUS;
+
+typedef enum _BT_8723B_1ANT_WIFI_STATUS {
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4,
+ BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5,
+ BT_8723B_1ANT_WIFI_STATUS_MAX
+} BT_8723B_1ANT_WIFI_STATUS, *PBT_8723B_1ANT_WIFI_STATUS;
+
+typedef enum _BT_8723B_1ANT_COEX_ALGO {
+ BT_8723B_1ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_1ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_1ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_1ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_1ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_1ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_1ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_1ANT_COEX_ALGO_MAX = 0xb,
+} BT_8723B_1ANT_COEX_ALGO, *PBT_8723B_1ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723B_1ANT {
+ /* fw mechanism */
+ bool bCurIgnoreWlanAct;
+ bool bPreIgnoreWlanAct;
+ u8 prePsTdma;
+ u8 curPsTdma;
+ u8 psTdmaPara[5];
+ u8 psTdmaDuAdjType;
+ bool bAutoTdmaAdjust;
+ bool bPrePsTdmaOn;
+ bool bCurPsTdmaOn;
+ bool bPreBtAutoReport;
+ bool bCurBtAutoReport;
+ u8 preLps;
+ u8 curLps;
+ u8 preRpwm;
+ u8 curRpwm;
+
+ /* sw mechanism */
+ bool bPreLowPenaltyRa;
+ bool bCurLowPenaltyRa;
+ u32 preVal0x6c0;
+ u32 curVal0x6c0;
+ u32 preVal0x6c4;
+ u32 curVal0x6c4;
+ u32 preVal0x6c8;
+ u32 curVal0x6c8;
+ u8 preVal0x6cc;
+ u8 curVal0x6cc;
+ bool bLimitedDig;
+
+ u32 backupArfrCnt1; /* Auto Rate Fallback Retry cnt */
+ u32 backupArfrCnt2; /* Auto Rate Fallback Retry cnt */
+ u16 backupRetryLimit;
+ u8 backupAmpduMaxTime;
+
+ /* algorithm related */
+ u8 preAlgorithm;
+ u8 curAlgorithm;
+ u8 btStatus;
+ u8 wifiChnlInfo[3];
+
+ u32 preRaMask;
+ u32 curRaMask;
+ u8 preArfrType;
+ u8 curArfrType;
+ u8 preRetryLimitType;
+ u8 curRetryLimitType;
+ u8 preAmpduTimeType;
+ u8 curAmpduTimeType;
+ u32 nArpCnt;
+
+ u8 errorCondition;
+} COEX_DM_8723B_1ANT, *PCOEX_DM_8723B_1ANT;
+
+typedef struct _COEX_STA_8723B_1ANT {
+ bool bBtLinkExist;
+ bool bScoExist;
+ bool bA2dpExist;
+ bool bHidExist;
+ bool bPanExist;
+
+ bool bUnderLps;
+ bool bUnderIps;
+ u32 specialPktPeriodCnt;
+ u32 highPriorityTx;
+ u32 highPriorityRx;
+ u32 lowPriorityTx;
+ u32 lowPriorityRx;
+ s8 btRssi;
+ bool bBtTxRxMask;
+ u8 preBtRssiState;
+ u8 preWifiRssiState[4];
+ bool bC2hBtInfoReqSent;
+ u8 btInfoC2h[BT_INFO_SRC_8723B_1ANT_MAX][10];
+ u32 btInfoC2hCnt[BT_INFO_SRC_8723B_1ANT_MAX];
+ bool bC2hBtInquiryPage;
+ bool bC2hBtPage; /* Add for win8.1 page out issue */
+ bool bWiFiIsHighPriTask; /* Add for win8.1 page out issue */
+ u8 btRetryCnt;
+ u8 btInfoExt;
+ u32 popEventCnt;
+ u8 nScanAPNum;
+
+ u32 nCRCOK_CCK;
+ u32 nCRCOK_11g;
+ u32 nCRCOK_11n;
+ u32 nCRCOK_11nAgg;
+
+ u32 nCRCErr_CCK;
+ u32 nCRCErr_11g;
+ u32 nCRCErr_11n;
+ u32 nCRCErr_11nAgg;
+
+ bool bCCKLock;
+ bool bPreCCKLock;
+ u8 nCoexTableType;
+
+ bool bForceLpsOn;
+} COEX_STA_8723B_1ANT, *PCOEX_STA_8723B_1ANT;
+
+/* */
+/* The following is interface which will notify coex module. */
+/* */
+void EXhalbtc8723b1ant_PowerOnSetting(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b1ant_InitHwConfig(PBTC_COEXIST pBtCoexist, bool bWifiOnly);
+void EXhalbtc8723b1ant_InitCoexDm(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b1ant_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_MediaStatusNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b1ant_BtInfoNotify(
+ PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length
+);
+void EXhalbtc8723b1ant_HaltNotify(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b1ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState);
+void EXhalbtc8723b1ant_Periodical(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b1ant_DisplayCoexInfo(PBTC_COEXIST pBtCoexist);
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
new file mode 100644
index 000000000000..f5bc511d02f2
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.c
@@ -0,0 +1,3729 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "Mp_Precomp.h"
+
+/* Global variables, these are static variables */
+static COEX_DM_8723B_2ANT GLCoexDm8723b2Ant;
+static PCOEX_DM_8723B_2ANT pCoexDm = &GLCoexDm8723b2Ant;
+static COEX_STA_8723B_2ANT GLCoexSta8723b2Ant;
+static PCOEX_STA_8723B_2ANT pCoexSta = &GLCoexSta8723b2Ant;
+
+static const char *const GLBtInfoSrc8723b2Ant[] = {
+ "BT Info[wifi fw]",
+ "BT Info[bt rsp]",
+ "BT Info[bt auto report]",
+};
+
+static u32 GLCoexVerDate8723b2Ant = 20131211;
+static u32 GLCoexVer8723b2Ant = 0x40;
+
+/* local function start with halbtc8723b2ant_ */
+static u8 halbtc8723b2ant_BtRssiState(
+ u8 levelNum, u8 rssiThresh, u8 rssiThresh1
+)
+{
+ s32 btRssi = 0;
+ u8 btRssiState = pCoexSta->preBtRssiState;
+
+ btRssi = pCoexSta->btRssi;
+
+ if (levelNum == 2) {
+ if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ } else {
+ if (btRssi < rssiThresh) {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ } else if (levelNum == 3) {
+ if (rssiThresh > rssiThresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n"));
+ return pCoexSta->preBtRssiState;
+ }
+
+ if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n"));
+ }
+ } else if (
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM)
+ ) {
+ if (btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ btRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n"));
+ } else if (btRssi < rssiThresh) {
+ btRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n"));
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n"));
+ }
+ } else {
+ if (btRssi < rssiThresh1) {
+ btRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n"));
+ } else {
+ btRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preBtRssiState = btRssiState;
+
+ return btRssiState;
+}
+
+static u8 halbtc8723b2ant_WifiRssiState(
+ PBTC_COEXIST pBtCoexist,
+ u8 index,
+ u8 levelNum,
+ u8 rssiThresh,
+ u8 rssiThresh1
+)
+{
+ s32 wifiRssi = 0;
+ u8 wifiRssiState = pCoexSta->preWifiRssiState[index];
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+
+ if (levelNum == 2) {
+ if (
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ } else {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ } else {
+ if (wifiRssi < rssiThresh) {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ } else {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ } else if (levelNum == 3) {
+ if (rssiThresh > rssiThresh1) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n"));
+ return pCoexSta->preWifiRssiState[index];
+ }
+
+ if (
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)
+ ) {
+ if (wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ } else {
+ wifiRssiState = BTC_RSSI_STATE_STAY_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n"));
+ }
+ } else if (
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) ||
+ (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM)
+ ) {
+ if (wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT)) {
+ wifiRssiState = BTC_RSSI_STATE_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n"));
+ } else if (wifiRssi < rssiThresh) {
+ wifiRssiState = BTC_RSSI_STATE_LOW;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n"));
+ } else {
+ wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n"));
+ }
+ } else {
+ if (wifiRssi < rssiThresh1) {
+ wifiRssiState = BTC_RSSI_STATE_MEDIUM;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n"));
+ } else {
+ wifiRssiState = BTC_RSSI_STATE_STAY_HIGH;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n"));
+ }
+ }
+ }
+
+ pCoexSta->preWifiRssiState[index] = wifiRssiState;
+
+ return wifiRssiState;
+}
+
+static void halbtc8723b2ant_LimitedRx(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ bool bRejApAggPkt,
+ bool bBtCtrlAggBufSize,
+ u8 aggBufSize
+)
+{
+ bool bRejectRxAgg = bRejApAggPkt;
+ bool bBtCtrlRxAggSize = bBtCtrlAggBufSize;
+ u8 rxAggSize = aggBufSize;
+
+ /* */
+ /* Rx Aggregation related setting */
+ /* */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejectRxAgg);
+ /* decide BT control aggregation buf size or not */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &bBtCtrlRxAggSize);
+ /* aggregation buf size, only work when BT control Rx aggregation size. */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize);
+ /* real update aggregation setting */
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL);
+}
+
+static void halbtc8723b2ant_MonitorBtCtr(PBTC_COEXIST pBtCoexist)
+{
+ u32 regHPTxRx, regLPTxRx, u4Tmp;
+ u32 regHPTx = 0, regHPRx = 0, regLPTx = 0, regLPRx = 0;
+
+ regHPTxRx = 0x770;
+ regLPTxRx = 0x774;
+
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, regHPTxRx);
+ regHPTx = u4Tmp & bMaskLWord;
+ regHPRx = (u4Tmp & bMaskHWord)>>16;
+
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, regLPTxRx);
+ regLPTx = u4Tmp & bMaskLWord;
+ regLPRx = (u4Tmp & bMaskHWord)>>16;
+
+ pCoexSta->highPriorityTx = regHPTx;
+ pCoexSta->highPriorityRx = regHPRx;
+ pCoexSta->lowPriorityTx = regLPTx;
+ pCoexSta->lowPriorityRx = regLPRx;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_MONITOR,
+ (
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ regHPTxRx,
+ regHPTx,
+ regHPTx,
+ regHPRx,
+ regHPRx
+ )
+ );
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_BT_MONITOR,
+ (
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ regLPTxRx,
+ regLPTx,
+ regLPTx,
+ regLPRx,
+ regLPRx
+ )
+ );
+
+ /* reset counter */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc);
+}
+
+static void halbtc8723b2ant_QueryBtInfo(PBTC_COEXIST pBtCoexist)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ pCoexSta->bC2hBtInfoReqSent = true;
+
+ H2C_Parameter[0] |= BIT0; /* trigger */
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ ("[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n", H2C_Parameter[0])
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x61, 1, H2C_Parameter);
+}
+
+static bool halbtc8723b2ant_IsWifiStatusChanged(PBTC_COEXIST pBtCoexist)
+{
+ static bool bPreWifiBusy = false, bPreUnder4way = false, bPreBtHsOn = false;
+ bool bWifiBusy = false, bUnder4way = false, bBtHsOn = false;
+ bool bWifiConnected = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way);
+
+ if (bWifiConnected) {
+ if (bWifiBusy != bPreWifiBusy) {
+ bPreWifiBusy = bWifiBusy;
+ return true;
+ }
+
+ if (bUnder4way != bPreUnder4way) {
+ bPreUnder4way = bUnder4way;
+ return true;
+ }
+
+ if (bBtHsOn != bPreBtHsOn) {
+ bPreBtHsOn = bBtHsOn;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void halbtc8723b2ant_UpdateBtLinkInfo(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bBtHsOn = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist;
+ pBtLinkInfo->bScoExist = pCoexSta->bScoExist;
+ pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist;
+ pBtLinkInfo->bPanExist = pCoexSta->bPanExist;
+ pBtLinkInfo->bHidExist = pCoexSta->bHidExist;
+
+ /* work around for HS mode. */
+ if (bBtHsOn) {
+ pBtLinkInfo->bPanExist = true;
+ pBtLinkInfo->bBtLinkExist = true;
+ }
+
+ /* check if Sco only */
+ if (
+ pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bScoOnly = true;
+ else
+ pBtLinkInfo->bScoOnly = false;
+
+ /* check if A2dp only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bA2dpOnly = true;
+ else
+ pBtLinkInfo->bA2dpOnly = false;
+
+ /* check if Pan only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ pBtLinkInfo->bPanExist &&
+ !pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bPanOnly = true;
+ else
+ pBtLinkInfo->bPanOnly = false;
+
+ /* check if Hid only */
+ if (
+ !pBtLinkInfo->bScoExist &&
+ !pBtLinkInfo->bA2dpExist &&
+ !pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bHidExist
+ )
+ pBtLinkInfo->bHidOnly = true;
+ else
+ pBtLinkInfo->bHidOnly = false;
+}
+
+static u8 halbtc8723b2ant_ActionAlgorithm(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ bool bBtHsOn = false;
+ u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+ u8 numOfDiffProfile = 0;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+
+ if (!pBtLinkInfo->bBtLinkExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n"));
+ return algorithm;
+ }
+
+ if (pBtLinkInfo->bScoExist)
+ numOfDiffProfile++;
+
+ if (pBtLinkInfo->bHidExist)
+ numOfDiffProfile++;
+
+ if (pBtLinkInfo->bPanExist)
+ numOfDiffProfile++;
+
+ if (pBtLinkInfo->bA2dpExist)
+ numOfDiffProfile++;
+
+ if (numOfDiffProfile == 1) {
+ if (pBtLinkInfo->bScoExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ if (pBtLinkInfo->bHidExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else if (pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+ } else if (pBtLinkInfo->bPanExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 2) {
+ if (pBtLinkInfo->bScoExist) {
+ if (pBtLinkInfo->bHidExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (pBtLinkInfo->bA2dpExist) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (pBtLinkInfo->bPanExist) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+ }
+ }
+ }
+ } else if (numOfDiffProfile == 3) {
+ if (pBtLinkInfo->bScoExist) {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], SCO + HID + A2DP ==> HID\n")
+ );
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], SCO + HID + PAN(HS)\n")
+ );
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ ("[BTCoex], SCO + HID + PAN(EDR)\n")
+ );
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ } else if (
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ } else {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+ }
+ }
+ }
+ } else if (numOfDiffProfile >= 3) {
+ if (pBtLinkInfo->bScoExist) {
+ if (
+ pBtLinkInfo->bHidExist &&
+ pBtLinkInfo->bPanExist &&
+ pBtLinkInfo->bA2dpExist
+ ) {
+ if (bBtHsOn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n"));
+
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR) ==>PAN(EDR)+HID\n"));
+ algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+ }
+ }
+ }
+ }
+
+ return algorithm;
+}
+
+static void halbtc8723b2ant_SetFwDacSwingLevel(
+ PBTC_COEXIST pBtCoexist, u8 dacSwingLvl
+)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ /* There are several type of dacswing */
+ /* 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */
+ H2C_Parameter[0] = dacSwingLvl;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ ("[BTCoex], Set Dac Swing Level = 0x%x\n", dacSwingLvl)
+ );
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ ("[BTCoex], FW write 0x64 = 0x%x\n", H2C_Parameter[0])
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x64, 1, H2C_Parameter);
+}
+
+static void halbtc8723b2ant_SetFwDecBtPwr(
+ PBTC_COEXIST pBtCoexist, u8 decBtPwrLvl
+)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ H2C_Parameter[0] = decBtPwrLvl;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ decBtPwrLvl,
+ H2C_Parameter[0]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x62, 1, H2C_Parameter);
+}
+
+static void halbtc8723b2ant_DecBtPwr(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 decBtPwrLvl
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s Dec BT power level = %d\n",
+ (bForceExec ? "force to" : ""),
+ decBtPwrLvl
+ )
+ );
+ pCoexDm->curBtDecPwrLvl = decBtPwrLvl;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], preBtDecPwrLvl =%d, curBtDecPwrLvl =%d\n",
+ pCoexDm->preBtDecPwrLvl,
+ pCoexDm->curBtDecPwrLvl
+ )
+ );
+
+ if (pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl)
+ return;
+ }
+ halbtc8723b2ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->curBtDecPwrLvl);
+
+ pCoexDm->preBtDecPwrLvl = pCoexDm->curBtDecPwrLvl;
+}
+
+static void halbtc8723b2ant_FwDacSwingLvl(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 fwDacSwingLvl
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (bForceExec ? "force to" : ""),
+ fwDacSwingLvl
+ )
+ );
+ pCoexDm->curFwDacSwingLvl = fwDacSwingLvl;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], preFwDacSwingLvl =%d, curFwDacSwingLvl =%d\n",
+ pCoexDm->preFwDacSwingLvl,
+ pCoexDm->curFwDacSwingLvl
+ )
+ );
+
+ if (pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl)
+ return;
+ }
+
+ halbtc8723b2ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl);
+
+ pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl;
+}
+
+static void halbtc8723b2ant_SetSwRfRxLpfCorner(
+ PBTC_COEXIST pBtCoexist,
+ bool bRxRfShrinkOn
+)
+{
+ if (bRxRfShrinkOn) {
+ /* Shrink RF Rx LPF corner */
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], Shrink RF Rx LPF corner!!\n")
+ );
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xffffc);
+ } else {
+ /* Resume RF Rx LPF corner */
+ /* After initialized, we can use pCoexDm->btRf0x1eBackup */
+ if (pBtCoexist->bInitilized) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n"));
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup);
+ }
+ }
+}
+
+static void halbtc8723b2ant_RfShrink(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bRxRfShrinkOn
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (bForceExec ? "force to" : ""),
+ (bRxRfShrinkOn ? "ON" : "OFF")
+ )
+ );
+ pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], bPreRfRxLpfShrink =%d, bCurRfRxLpfShrink =%d\n",
+ pCoexDm->bPreRfRxLpfShrink,
+ pCoexDm->bCurRfRxLpfShrink
+ )
+ );
+
+ if (pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink)
+ return;
+ }
+ halbtc8723b2ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink);
+
+ pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink;
+}
+
+static void halbtc8723b2ant_SetSwPenaltyTxRateAdaptive(
+ PBTC_COEXIST pBtCoexist, bool bLowPenaltyRa
+)
+{
+ u8 H2C_Parameter[6] = {0};
+
+ H2C_Parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */
+
+ if (bLowPenaltyRa) {
+ H2C_Parameter[1] |= BIT0;
+ H2C_Parameter[2] = 0x00; /* normal rate except MCS7/6/5, OFDM54/48/36 */
+ H2C_Parameter[3] = 0xf7; /* MCS7 or OFDM54 */
+ H2C_Parameter[4] = 0xf8; /* MCS6 or OFDM48 */
+ H2C_Parameter[5] = 0xf9; /* MCS5 or OFDM36 */
+ }
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (bLowPenaltyRa ? "ON!!" : "OFF!!")
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x69, 6, H2C_Parameter);
+}
+
+static void halbtc8723b2ant_LowPenaltyRa(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bLowPenaltyRa
+)
+{
+ /* return; */
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (bForceExec ? "force to" : ""),
+ (bLowPenaltyRa ? "ON" : "OFF")
+ )
+ );
+ pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], bPreLowPenaltyRa =%d, bCurLowPenaltyRa =%d\n",
+ pCoexDm->bPreLowPenaltyRa,
+ pCoexDm->bCurLowPenaltyRa
+ )
+ );
+
+ if (pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa)
+ return;
+ }
+ halbtc8723b2ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa);
+
+ pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa;
+}
+
+static void halbtc8723b2ant_SetDacSwingReg(PBTC_COEXIST pBtCoexist, u32 level)
+{
+ u8 val = (u8)level;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], Write SwDacSwing = 0x%x\n", level)
+ );
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x883, 0x3e, val);
+}
+
+static void halbtc8723b2ant_SetSwFullTimeDacSwing(
+ PBTC_COEXIST pBtCoexist, bool bSwDacSwingOn, u32 swDacSwingLvl
+)
+{
+ if (bSwDacSwingOn)
+ halbtc8723b2ant_SetDacSwingReg(pBtCoexist, swDacSwingLvl);
+ else
+ halbtc8723b2ant_SetDacSwingReg(pBtCoexist, 0x18);
+}
+
+
+static void halbtc8723b2ant_DacSwing(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ bool bDacSwingOn,
+ u32 dacSwingLvl
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s turn DacSwing =%s, dacSwingLvl = 0x%x\n",
+ (bForceExec ? "force to" : ""),
+ (bDacSwingOn ? "ON" : "OFF"),
+ dacSwingLvl
+ )
+ );
+ pCoexDm->bCurDacSwingOn = bDacSwingOn;
+ pCoexDm->curDacSwingLvl = dacSwingLvl;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], bPreDacSwingOn =%d, preDacSwingLvl = 0x%x, bCurDacSwingOn =%d, curDacSwingLvl = 0x%x\n",
+ pCoexDm->bPreDacSwingOn,
+ pCoexDm->preDacSwingLvl,
+ pCoexDm->bCurDacSwingOn,
+ pCoexDm->curDacSwingLvl
+ )
+ );
+
+ if ((pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) &&
+ (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl))
+ return;
+ }
+ mdelay(30);
+ halbtc8723b2ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl);
+
+ pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn;
+ pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl;
+}
+
+static void halbtc8723b2ant_SetAgcTable(
+ PBTC_COEXIST pBtCoexist, bool bAgcTableEn
+)
+{
+ u8 rssiAdjustVal = 0;
+
+ /* BB AGC Gain Table */
+ if (bAgcTableEn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB Agc Table On!\n"));
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6e1A0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6d1B0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6c1C0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6b1D0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x6a1E0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x691F0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0x68200001);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB Agc Table Off!\n"));
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xaa1A0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa91B0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa81C0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa71D0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa61E0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa51F0001);
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0xc78, 0xa4200001);
+ }
+
+
+ /* RF Gain */
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+ if (bAgcTableEn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38fff);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x380c3);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x28ce6);
+ }
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+ if (bAgcTableEn) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n"));
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x38fff);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x38ffe);
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n"));
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x380c3);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x40, 0xfffff, 0x28ce6);
+ }
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+ /* set rssiAdjustVal for wifi module. */
+ if (bAgcTableEn)
+ rssiAdjustVal = 8;
+
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal);
+}
+
+static void halbtc8723b2ant_AgcTable(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bAgcTableEn
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s %s Agc Table\n",
+ (bForceExec ? "force to" : ""),
+ (bAgcTableEn ? "Enable" : "Disable")
+ )
+ );
+ pCoexDm->bCurAgcTableEn = bAgcTableEn;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], bPreAgcTableEn =%d, bCurAgcTableEn =%d\n",
+ pCoexDm->bPreAgcTableEn,
+ pCoexDm->bCurAgcTableEn
+ )
+ );
+
+ if (pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn)
+ return;
+ }
+ halbtc8723b2ant_SetAgcTable(pBtCoexist, bAgcTableEn);
+
+ pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn;
+}
+
+static void halbtc8723b2ant_SetCoexTable(
+ PBTC_COEXIST pBtCoexist,
+ u32 val0x6c0,
+ u32 val0x6c4,
+ u32 val0x6c8,
+ u8 val0x6cc
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c0, val0x6c0);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c4, val0x6c4);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8)
+ );
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x6c8, val0x6c8);
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_EXEC,
+ ("[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc)
+ );
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x6cc, val0x6cc);
+}
+
+static void halbtc8723b2ant_CoexTable(
+ PBTC_COEXIST pBtCoexist,
+ bool bForceExec,
+ u32 val0x6c0,
+ u32 val0x6c4,
+ u32 val0x6c8,
+ u8 val0x6cc
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW,
+ (
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (bForceExec ? "force to" : ""),
+ val0x6c0,
+ val0x6c4,
+ val0x6c8,
+ val0x6cc
+ )
+ );
+ pCoexDm->curVal0x6c0 = val0x6c0;
+ pCoexDm->curVal0x6c4 = val0x6c4;
+ pCoexDm->curVal0x6c8 = val0x6c8;
+ pCoexDm->curVal0x6cc = val0x6cc;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ pCoexDm->preVal0x6c0,
+ pCoexDm->preVal0x6c4,
+ pCoexDm->preVal0x6c8,
+ pCoexDm->preVal0x6cc
+ )
+ );
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_SW_DETAIL,
+ (
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x, curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ pCoexDm->curVal0x6c0,
+ pCoexDm->curVal0x6c4,
+ pCoexDm->curVal0x6c8,
+ pCoexDm->curVal0x6cc
+ )
+ );
+
+ if (
+ (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) &&
+ (pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) &&
+ (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) &&
+ (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc)
+ )
+ return;
+ }
+ halbtc8723b2ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+
+ pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0;
+ pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4;
+ pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8;
+ pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc;
+}
+
+static void halbtc8723b2ant_CoexTableWithType(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, u8 type
+)
+{
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffff, 0x3);
+ break;
+ case 1:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x5afa5afa, 0xffff, 0x3);
+ break;
+ case 2:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 3:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3);
+ break;
+ case 4:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0xffffffff, 0xffffffff, 0xffff, 0x3);
+ break;
+ case 5:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3);
+ break;
+ case 6:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5a5a5a, 0xffff, 0x3);
+ break;
+ case 7:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0xfafafafa, 0xffff, 0x3);
+ break;
+ case 8:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 9:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5aea5aea, 0xffff, 0x3);
+ break;
+ case 10:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5aff5aff, 0xffff, 0x3);
+ break;
+ case 11:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5f5a5f, 0xffff, 0x3);
+ break;
+ case 12:
+ halbtc8723b2ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5f5f5f5f, 0xffff, 0x3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void halbtc8723b2ant_SetFwIgnoreWlanAct(
+ PBTC_COEXIST pBtCoexist, bool bEnable
+)
+{
+ u8 H2C_Parameter[1] = {0};
+
+ if (bEnable)
+ H2C_Parameter[0] |= BIT0; /* function enable */
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ H2C_Parameter[0]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x63, 1, H2C_Parameter);
+}
+
+static void halbtc8723b2ant_IgnoreWlanAct(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bEnable
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (bForceExec ? "force to" : ""),
+ (bEnable ? "ON" : "OFF")
+ )
+ );
+
+ pCoexDm->bCurIgnoreWlanAct = bEnable;
+
+ if (!bForceExec) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct));
+
+ if (pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct)
+ return;
+ }
+ halbtc8723b2ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable);
+
+ pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct;
+}
+
+static void halbtc8723b2ant_SetFwPstdma(
+ PBTC_COEXIST pBtCoexist,
+ u8 byte1,
+ u8 byte2,
+ u8 byte3,
+ u8 byte4,
+ u8 byte5
+)
+{
+ u8 H2C_Parameter[5] = {0};
+
+ H2C_Parameter[0] = byte1;
+ H2C_Parameter[1] = byte2;
+ H2C_Parameter[2] = byte3;
+ H2C_Parameter[3] = byte4;
+ H2C_Parameter[4] = byte5;
+
+ pCoexDm->psTdmaPara[0] = byte1;
+ pCoexDm->psTdmaPara[1] = byte2;
+ pCoexDm->psTdmaPara[2] = byte3;
+ pCoexDm->psTdmaPara[3] = byte4;
+ pCoexDm->psTdmaPara[4] = byte5;
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ H2C_Parameter[0],
+ H2C_Parameter[1]<<24|
+ H2C_Parameter[2]<<16|
+ H2C_Parameter[3]<<8|
+ H2C_Parameter[4]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x60, 5, H2C_Parameter);
+}
+
+static void halbtc8723b2ant_SwMechanism1(
+ PBTC_COEXIST pBtCoexist,
+ bool bShrinkRxLPF,
+ bool bLowPenaltyRA,
+ bool bLimitedDIG,
+ bool bBTLNAConstrain
+)
+{
+ halbtc8723b2ant_RfShrink(pBtCoexist, NORMAL_EXEC, bShrinkRxLPF);
+ halbtc8723b2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA);
+}
+
+static void halbtc8723b2ant_SwMechanism2(
+ PBTC_COEXIST pBtCoexist,
+ bool bAGCTableShift,
+ bool bADCBackOff,
+ bool bSWDACSwing,
+ u32 dacSwingLvl
+)
+{
+ halbtc8723b2ant_AgcTable(pBtCoexist, NORMAL_EXEC, bAGCTableShift);
+ halbtc8723b2ant_DacSwing(pBtCoexist, NORMAL_EXEC, bSWDACSwing, dacSwingLvl);
+}
+
+static void halbtc8723b2ant_SetAntPath(
+ PBTC_COEXIST pBtCoexist, u8 antPosType, bool bInitHwCfg, bool bWifiOff
+)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ u32 fwVer = 0, u4Tmp = 0;
+ bool bPgExtSwitch = false;
+ bool bUseExtSwitch = false;
+ u8 H2C_Parameter[2] = {0};
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_EXT_SWITCH, &bPgExtSwitch);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer); /* [31:16]=fw ver, [15:0]=fw sub ver */
+
+ if ((fwVer > 0 && fwVer < 0xc0000) || bPgExtSwitch)
+ bUseExtSwitch = true;
+
+ if (bInitHwCfg) {
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x39, 0x8, 0x1);
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x974, 0xff);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x944, 0x3, 0x3);
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x930, 0x77);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x1);
+
+ if (fwVer >= 0x180000) {
+ /* Use H2C to set GNT_BT to LOW */
+ H2C_Parameter[0] = 0;
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x6E, 1, H2C_Parameter);
+ } else {
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x0);
+ }
+
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); /* WiFi TRx Mask off */
+ pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x01); /* BT TRx Mask off */
+
+ if (pBoardInfo->btdmAntPos == BTC_ANTENNA_AT_MAIN_PORT) {
+ /* tell firmware "no antenna inverse" */
+ H2C_Parameter[0] = 0;
+ } else {
+ /* tell firmware "antenna inverse" */
+ H2C_Parameter[0] = 1;
+ }
+
+ if (bUseExtSwitch) {
+ /* ext switch type */
+ H2C_Parameter[1] = 1;
+ } else {
+ /* int switch type */
+ H2C_Parameter[1] = 0;
+ }
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x65, 2, H2C_Parameter);
+ }
+
+ /* ext switch setting */
+ if (bUseExtSwitch) {
+ if (bInitHwCfg) {
+ /* 0x4c[23]= 0, 0x4c[24]= 1 Antenna control by WL/BT */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u4Tmp &= ~BIT23;
+ u4Tmp |= BIT24;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x4c, u4Tmp);
+ }
+
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0); /* fixed internal switch S1->WiFi, S0->BT */
+ switch (antPosType) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x1); /* ext switch main at wifi */
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x92c, 0x3, 0x2); /* ext switch aux at wifi */
+ break;
+ }
+ } else { /* internal switch */
+ if (bInitHwCfg) {
+ /* 0x4c[23]= 0, 0x4c[24]= 1 Antenna control by WL/BT */
+ u4Tmp = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u4Tmp |= BIT23;
+ u4Tmp &= ~BIT24;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x4c, u4Tmp);
+ }
+
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x64, 0x1, 0x0); /* fixed external switch S1->Main, S0->Aux */
+ switch (antPosType) {
+ case BTC_ANT_WIFI_AT_MAIN:
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0); /* fixed internal switch S1->WiFi, S0->BT */
+ break;
+ case BTC_ANT_WIFI_AT_AUX:
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x280); /* fixed internal switch S0->WiFi, S1->BT */
+ break;
+ }
+ }
+}
+
+static void halbtc8723b2ant_PsTdma(
+ PBTC_COEXIST pBtCoexist, bool bForceExec, bool bTurnOn, u8 type
+)
+{
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW,
+ (
+ "[BTCoex], %s turn %s PS TDMA, type =%d\n",
+ (bForceExec ? "force to" : ""),
+ (bTurnOn ? "ON" : "OFF"),
+ type
+ )
+ );
+ pCoexDm->bCurPsTdmaOn = bTurnOn;
+ pCoexDm->curPsTdma = type;
+
+ if (!bForceExec) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ pCoexDm->bPrePsTdmaOn,
+ pCoexDm->bCurPsTdmaOn
+ )
+ );
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ pCoexDm->prePsTdma, pCoexDm->curPsTdma
+ )
+ );
+
+ if (
+ (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) &&
+ (pCoexDm->prePsTdma == pCoexDm->curPsTdma)
+ )
+ return;
+ }
+
+ if (bTurnOn) {
+ switch (type) {
+ case 1:
+ default:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x90);
+ break;
+ case 2:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90);
+ break;
+ case 3:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1c, 0x3, 0xf1, 0x90);
+ break;
+ case 4:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x10, 0x03, 0xf1, 0x90);
+ break;
+ case 5:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x90);
+ break;
+ case 6:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x90);
+ break;
+ case 7:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1c, 0x3, 0x70, 0x90);
+ break;
+ case 8:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x10, 0x3, 0x70, 0x90);
+ break;
+ case 9:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x90);
+ break;
+ case 10:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90);
+ break;
+ case 11:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x90);
+ break;
+ case 12:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x90);
+ break;
+ case 13:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x90);
+ break;
+ case 14:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x90);
+ break;
+ case 15:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x90);
+ break;
+ case 16:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0x60, 0x90);
+ break;
+ case 17:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x90);
+ break;
+ case 18:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x90);
+ break;
+ case 19:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0xe1, 0x90);
+ break;
+ case 20:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0x60, 0x90);
+ break;
+ case 21:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x15, 0x03, 0x70, 0x90);
+ break;
+ case 71:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x90);
+ break;
+ }
+ } else {
+ /* disable PS tdma */
+ switch (type) {
+ case 0:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x40, 0x0);
+ break;
+ case 1:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x48, 0x0);
+ break;
+ default:
+ halbtc8723b2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x40, 0x0);
+ break;
+ }
+ }
+
+ /* update pre state */
+ pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn;
+ pCoexDm->prePsTdma = pCoexDm->curPsTdma;
+}
+
+static void halbtc8723b2ant_CoexAllOff(PBTC_COEXIST pBtCoexist)
+{
+ /* fw all off */
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ /* sw all off */
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+
+ /* hw all off */
+ /* pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+}
+
+static void halbtc8723b2ant_InitCoexDm(PBTC_COEXIST pBtCoexist)
+{
+ /* force to reset coex mechanism */
+
+ halbtc8723b2ant_PsTdma(pBtCoexist, FORCE_EXEC, false, 1);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, FORCE_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+}
+
+static void halbtc8723b2ant_ActionBtInquiry(PBTC_COEXIST pBtCoexist)
+{
+ bool bWifiConnected = false;
+ bool bLowPwrDisable = true;
+
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ if (bWifiConnected) {
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ } else {
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+ }
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+
+ pCoexDm->bNeedRecover0x948 = true;
+ pCoexDm->backup0x948 = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+
+ halbtc8723b2ant_SetAntPath(pBtCoexist, BTC_ANT_WIFI_AT_AUX, false, false);
+}
+
+static bool halbtc8723b2ant_IsCommonAction(PBTC_COEXIST pBtCoexist)
+{
+ u8 btRssiState = BTC_RSSI_STATE_HIGH;
+ bool bCommon = false, bWifiConnected = false, bWifiBusy = false;
+ bool bBtHsOn = false, bLowPwrDisable = false;
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+
+ if (!bWifiConnected) {
+ bLowPwrDisable = false;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-connected idle!!\n"));
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+
+ bCommon = true;
+ } else {
+ if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) {
+ bLowPwrDisable = false;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n"));
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0xb);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+
+ bCommon = true;
+ } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) {
+ bLowPwrDisable = true;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+
+ if (bBtHsOn)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n"));
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0xb);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+
+ bCommon = true;
+ } else {
+ bLowPwrDisable = true;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable);
+
+ if (bWifiBusy) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi Connected-Busy + BT Busy!!\n"));
+ bCommon = false;
+ } else {
+ if (bBtHsOn)
+ return false;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi Connected-Idle + BT Busy!!\n"));
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 21);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0xb);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ bCommon = true;
+ }
+ }
+ }
+
+ return bCommon;
+}
+
+static void halbtc8723b2ant_TdmaDurationAdjust(
+ PBTC_COEXIST pBtCoexist, bool bScoHid, bool bTxPause, u8 maxInterval
+)
+{
+ static s32 up, dn, m, n, WaitCount;
+ s32 result; /* 0: no change, +1: increase WiFi duration, -1: decrease WiFi duration */
+ u8 retryCount = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n"));
+
+ if (!pCoexDm->bAutoTdmaAdjust) {
+ pCoexDm->bAutoTdmaAdjust = true;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n"));
+ {
+ if (bScoHid) {
+ if (bTxPause) {
+ if (maxInterval == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ } else if (maxInterval == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (maxInterval == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ } else {
+ if (maxInterval == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (maxInterval == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (maxInterval == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ } else {
+ if (bTxPause) {
+ if (maxInterval == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ } else if (maxInterval == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (maxInterval == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ }
+ } else {
+ if (maxInterval == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ } else if (maxInterval == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (maxInterval == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ }
+ }
+ }
+ }
+ /* */
+ up = 0;
+ dn = 0;
+ m = 1;
+ n = 3;
+ result = 0;
+ WaitCount = 0;
+ } else {
+ /* accquire the BT TRx retry count from BT_Info byte2 */
+ retryCount = pCoexSta->btRetryCnt;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount));
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], up =%d, dn =%d, m =%d, n =%d, WaitCount =%d\n",
+ up, dn, m, n, WaitCount
+ )
+ );
+ result = 0;
+ WaitCount++;
+
+ if (retryCount == 0) { /* no retry in the last 2-second duration */
+ up++;
+ dn--;
+
+ if (dn <= 0)
+ dn = 0;
+
+ if (up >= n) { /* if 連續 n 個2秒 retry count為0, 則調寬WiFi duration */
+ WaitCount = 0;
+ n = 3;
+ up = 0;
+ dn = 0;
+ result = 1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n"));
+ }
+ } else if (retryCount <= 3) { /* <=3 retry in the last 2-second duration */
+ up--;
+ dn++;
+
+ if (up <= 0)
+ up = 0;
+
+ if (dn == 2) { /* if 連續 2 個2秒 retry count< 3, 則調窄WiFi duration */
+ if (WaitCount <= 2)
+ m++; /* é¿å…一直在兩個level中來回 */
+ else
+ m = 1;
+
+ if (m >= 20) /* m 最大值 = 20 ' 最大120秒 recheck是å¦èª¿æ•´ WiFi duration. */
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n"));
+ }
+ } else { /* retry count > 3, åªè¦1次 retry count > 3, 則調窄WiFi duration */
+ if (WaitCount == 1)
+ m++; /* é¿å…一直在兩個level中來回 */
+ else
+ m = 1;
+
+ if (m >= 20) /* m 最大值 = 20 ' 最大120秒 recheck是å¦èª¿æ•´ WiFi duration. */
+ m = 20;
+
+ n = 3*m;
+ up = 0;
+ dn = 0;
+ WaitCount = 0;
+ result = -1;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n"));
+ }
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval));
+ if (maxInterval == 1) {
+ if (bTxPause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+
+ if (pCoexDm->curPsTdma == 71) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ } else if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+
+ if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ } else if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+ pCoexDm->psTdmaDuAdjType = 5;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+ pCoexDm->psTdmaDuAdjType = 13;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 71);
+ pCoexDm->psTdmaDuAdjType = 71;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+
+ if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 71) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ } else if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ } else if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ pCoexDm->psTdmaDuAdjType = 1;
+ } else if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 71);
+ pCoexDm->psTdmaDuAdjType = 71;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ pCoexDm->psTdmaDuAdjType = 9;
+ }
+ }
+ }
+ } else if (maxInterval == 2) {
+ if (bTxPause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+
+ if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ } else if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6);
+ pCoexDm->psTdmaDuAdjType = 6;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14);
+ pCoexDm->psTdmaDuAdjType = 14;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+
+ if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ } else if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2);
+ pCoexDm->psTdmaDuAdjType = 2;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10);
+ pCoexDm->psTdmaDuAdjType = 10;
+ }
+ }
+ }
+ } else if (maxInterval == 3) {
+ if (bTxPause) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n"));
+ if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ }
+
+ if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8);
+ pCoexDm->psTdmaDuAdjType = 8;
+ } else if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16);
+ pCoexDm->psTdmaDuAdjType = 16;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7);
+ pCoexDm->psTdmaDuAdjType = 7;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15);
+ pCoexDm->psTdmaDuAdjType = 15;
+ }
+ }
+ } else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n"));
+ if (pCoexDm->curPsTdma == 5) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 6) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 7) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 8) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ }
+
+ if (pCoexDm->curPsTdma == 13) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 14) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 15) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 16) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+
+ if (result == -1) {
+ if (pCoexDm->curPsTdma == 1) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4);
+ pCoexDm->psTdmaDuAdjType = 4;
+ } else if (pCoexDm->curPsTdma == 9) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12);
+ pCoexDm->psTdmaDuAdjType = 12;
+ }
+ } else if (result == 1) {
+ if (pCoexDm->curPsTdma == 4) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 3) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 2) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3);
+ pCoexDm->psTdmaDuAdjType = 3;
+ } else if (pCoexDm->curPsTdma == 12) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 11) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ } else if (pCoexDm->curPsTdma == 10) {
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11);
+ pCoexDm->psTdmaDuAdjType = 11;
+ }
+ }
+ }
+ }
+ }
+
+ /* if current PsTdma not match with the recorded one (when scan, dhcp...), */
+ /* then we have to adjust it back to the previous record one. */
+ if (pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType) {
+ bool bScan = false, bLink = false, bRoam = false;
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_DETAIL,
+ (
+ "[BTCoex], PsTdma type dismatch!!!, curPsTdma =%d, recordPsTdma =%d\n",
+ pCoexDm->curPsTdma,
+ pCoexDm->psTdmaDuAdjType
+ )
+ );
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+
+ if (!bScan && !bLink && !bRoam)
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType);
+ else {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"));
+ }
+ }
+}
+
+/* SCO only or SCO+PAN(HS) */
+static void halbtc8723b2ant_ActionSco(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 4);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if (BTC_WIFI_BW_LEGACY == wifiBw) /* for SCO quality at 11b/g mode */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2);
+ else /* for SCO quality & wifi performance balance at 11n mode */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 8);
+
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 0); /* for voice quality */
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, true, 0x4);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, true, 0x4);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, true, 0x4);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, true, 0x4);
+ }
+ }
+}
+
+
+static void halbtc8723b2ant_ActionHid(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if (BTC_WIFI_BW_LEGACY == wifiBw) /* for HID at 11b/g mode */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ else /* for HID quality & wifi performance balance at 11n mode */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 9);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ )
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9);
+ else
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
+static void halbtc8723b2ant_ActionA2dp(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, wifiRssiState1, btRssiState;
+ u32 wifiBw;
+ u8 apNum = 0;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ wifiRssiState1 = halbtc8723b2ant_WifiRssiState(pBtCoexist, 1, 2, 40, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_AP_NUM, &apNum);
+
+ /* define the office environment */
+ if (apNum >= 10 && BTC_RSSI_HIGH(wifiRssiState1)) {
+ /* DbgPrint(" AP#>10(%d)\n", apNum); */
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0);
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+
+ /* sw mechanism */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, true, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, true, 0x18);
+ }
+ return;
+ }
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ )
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, false, 1);
+ else
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, true, 1);
+
+ /* sw mechanism */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+static void halbtc8723b2ant_ActionA2dpPanHs(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, true, 2);
+
+ /* sw mechanism */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+static void halbtc8723b2ant_ActionPanEdr(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 10);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ )
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1);
+ else
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5);
+
+ /* sw mechanism */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+
+/* PAN(HS) only */
+static void halbtc8723b2ant_ActionPanHs(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+
+ halbtc8723b2ant_PsTdma(pBtCoexist, NORMAL_EXEC, false, 1);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+/* PAN(EDR)+A2DP */
+static void halbtc8723b2ant_ActionPanEdrA2dp(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 12);
+ if (BTC_WIFI_BW_HT40 == wifiBw)
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, true, 3);
+ else
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, false, 3);
+ } else {
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, false, true, 3);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, false, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+static void halbtc8723b2ant_ActionPanEdrHid(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 3);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 11);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x780);
+ } else {
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ }
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, false, 2);
+ } else {
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 11);
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 2);
+ }
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+/* HID+A2DP+PAN(EDR) */
+static void halbtc8723b2ant_ActionHidA2dpPanEdr(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, false, 0x8);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ if (BTC_WIFI_BW_HT40 == wifiBw)
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 2);
+ else
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, false, 3);
+ } else
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+static void halbtc8723b2ant_ActionHidA2dp(PBTC_COEXIST pBtCoexist)
+{
+ u8 wifiRssiState, btRssiState;
+ u32 wifiBw;
+ u8 apNum = 0;
+
+ wifiRssiState = halbtc8723b2ant_WifiRssiState(pBtCoexist, 0, 2, 15, 0);
+ /* btRssiState = halbtc8723b2ant_BtRssiState(2, 29, 0); */
+ btRssiState = halbtc8723b2ant_BtRssiState(3, 29, 37);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+ halbtc8723b2ant_LimitedRx(pBtCoexist, NORMAL_EXEC, false, true, 0x5);
+
+ halbtc8723b2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 6);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_LEGACY == wifiBw) {
+ if (BTC_RSSI_HIGH(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else if (BTC_RSSI_MEDIUM(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ } else {
+ /* only 802.11N mode we have to dec bt power to 4 degree */
+ if (BTC_RSSI_HIGH(btRssiState)) {
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_AP_NUM, &apNum);
+ /* need to check ap Number of Not */
+ if (apNum < 10)
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 4);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ } else if (BTC_RSSI_MEDIUM(btRssiState))
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2);
+ else
+ halbtc8723b2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0);
+ }
+
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7);
+
+ if (
+ (btRssiState == BTC_RSSI_STATE_HIGH) ||
+ (btRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ )
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, false, 2);
+ else
+ halbtc8723b2ant_TdmaDurationAdjust(pBtCoexist, true, true, 2);
+
+ /* sw mechanism */
+ if (BTC_WIFI_BW_HT40 == wifiBw) {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, true, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ } else {
+ if (
+ (wifiRssiState == BTC_RSSI_STATE_HIGH) ||
+ (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH)
+ ) {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, true, false, false, 0x18);
+ } else {
+ halbtc8723b2ant_SwMechanism1(pBtCoexist, false, true, false, false);
+ halbtc8723b2ant_SwMechanism2(pBtCoexist, false, false, false, 0x18);
+ }
+ }
+}
+
+static void halbtc8723b2ant_RunCoexistMechanism(PBTC_COEXIST pBtCoexist)
+{
+ u8 algorithm = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism() ===>\n"));
+
+ if (pBtCoexist->bManualControl) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n"));
+ return;
+ }
+
+ if (pCoexSta->bUnderIps) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n"));
+ return;
+ }
+
+ algorithm = halbtc8723b2ant_ActionAlgorithm(pBtCoexist);
+ if (pCoexSta->bC2hBtInquiryPage && (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT is under inquiry/page scan !!\n"));
+ halbtc8723b2ant_ActionBtInquiry(pBtCoexist);
+ return;
+ } else {
+ if (pCoexDm->bNeedRecover0x948) {
+ pCoexDm->bNeedRecover0x948 = false;
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, pCoexDm->backup0x948);
+ }
+ }
+
+ pCoexDm->curAlgorithm = algorithm;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d\n", pCoexDm->curAlgorithm));
+
+ if (halbtc8723b2ant_IsCommonAction(pBtCoexist)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n"));
+ pCoexDm->bAutoTdmaAdjust = false;
+ } else {
+ if (pCoexDm->curAlgorithm != pCoexDm->preAlgorithm) {
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE,
+ (
+ "[BTCoex], preAlgorithm =%d, curAlgorithm =%d\n",
+ pCoexDm->preAlgorithm,
+ pCoexDm->curAlgorithm
+ )
+ );
+ pCoexDm->bAutoTdmaAdjust = false;
+ }
+
+
+ switch (pCoexDm->curAlgorithm) {
+ case BT_8723B_2ANT_COEX_ALGO_SCO:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n"));
+ halbtc8723b2ant_ActionSco(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n"));
+ halbtc8723b2ant_ActionHid(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n"));
+ halbtc8723b2ant_ActionA2dp(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n"));
+ halbtc8723b2ant_ActionA2dpPanHs(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n"));
+ halbtc8723b2ant_ActionPanEdr(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANHS:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n"));
+ halbtc8723b2ant_ActionPanHs(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n"));
+ halbtc8723b2ant_ActionPanEdrA2dp(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n"));
+ halbtc8723b2ant_ActionPanEdrHid(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n"));
+ halbtc8723b2ant_ActionHidA2dpPanEdr(pBtCoexist);
+ break;
+ case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n"));
+ halbtc8723b2ant_ActionHidA2dp(pBtCoexist);
+ break;
+ default:
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n"));
+ halbtc8723b2ant_CoexAllOff(pBtCoexist);
+ break;
+ }
+ pCoexDm->preAlgorithm = pCoexDm->curAlgorithm;
+ }
+}
+
+static void halbtc8723b2ant_WifiOffHwCfg(PBTC_COEXIST pBtCoexist)
+{
+ bool bIsInMpMode = false;
+ u8 H2C_Parameter[2] = {0};
+ u32 fwVer = 0;
+
+ /* set wlan_act to low */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0x4);
+
+ pBtCoexist->fBtcSetRfReg(pBtCoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); /* WiFi goto standby while GNT_BT 0-->1 */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ if (fwVer >= 0x180000) {
+ /* Use H2C to set GNT_BT to HIGH */
+ H2C_Parameter[0] = 1;
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x6E, 1, H2C_Parameter);
+ } else
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x18);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_IS_IN_MP_MODE, &bIsInMpMode);
+ if (!bIsInMpMode)
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x0); /* BT select s0/s1 is controlled by BT */
+ else
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x67, 0x20, 0x1); /* BT select s0/s1 is controlled by WiFi */
+}
+
+static void halbtc8723b2ant_InitHwConfig(PBTC_COEXIST pBtCoexist, bool bBackUp)
+{
+ u8 u1Tmp = 0;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n"));
+
+ /* backup rf 0x1e value */
+ pCoexDm->btRf0x1eBackup =
+ pBtCoexist->fBtcGetRfReg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff);
+
+ /* 0x790[5:0]= 0x5 */
+ u1Tmp = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x790);
+ u1Tmp &= 0xc0;
+ u1Tmp |= 0x5;
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x790, u1Tmp);
+
+ /* Antenna config */
+ halbtc8723b2ant_SetAntPath(pBtCoexist, BTC_ANT_WIFI_AT_MAIN, true, false);
+
+ /* PTA parameter */
+ halbtc8723b2ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0);
+
+ /* Enable counter statistics */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0xc); /* 0x76e[3] = 1, WLAN_Act control by PTA */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x778, 0x3);
+ pBtCoexist->fBtcWrite1ByteBitMask(pBtCoexist, 0x40, 0x20, 0x1);
+}
+
+/* */
+/* work around function start with wa_halbtc8723b2ant_ */
+/* */
+/* */
+/* extern function start with EXhalbtc8723b2ant_ */
+/* */
+void EXhalbtc8723b2ant_PowerOnSetting(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ u8 u1Tmp = 0x4; /* Set BIT2 by default since it's 2ant case */
+ u16 u2Tmp = 0x0;
+
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x67, 0x20);
+
+ /* enable BB, REG_SYS_FUNC_EN such that we can write 0x948 correctly. */
+ u2Tmp = pBtCoexist->fBtcRead2Byte(pBtCoexist, 0x2);
+ pBtCoexist->fBtcWrite2Byte(pBtCoexist, 0x2, u2Tmp|BIT0|BIT1);
+
+ /* set GRAN_BT = 1 */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x765, 0x18);
+ /* set WLAN_ACT = 0 */
+ pBtCoexist->fBtcWrite1Byte(pBtCoexist, 0x76e, 0x4);
+
+ /* */
+ /* S0 or S1 setting and Local register setting(By the setting fw can get ant number, S0/S1, ... info) */
+ /* Local setting bit define */
+ /* BIT0: "0" for no antenna inverse; "1" for antenna inverse */
+ /* BIT1: "0" for internal switch; "1" for external switch */
+ /* BIT2: "0" for one antenna; "1" for two antenna */
+ /* NOTE: here default all internal switch and 1-antenna ==> BIT1 = 0 and BIT2 = 0 */
+ if (pBtCoexist->chipInterface == BTC_INTF_USB) {
+ /* fixed at S0 for USB interface */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+
+ u1Tmp |= 0x1; /* antenna inverse */
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0xfe08, u1Tmp);
+
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_AUX_PORT;
+ } else {
+ /* for PCIE and SDIO interface, we check efuse 0xc3[6] */
+ if (pBoardInfo->singleAntPath == 0) {
+ /* set to S1 */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x280);
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
+ } else if (pBoardInfo->singleAntPath == 1) {
+ /* set to S0 */
+ pBtCoexist->fBtcWrite4Byte(pBtCoexist, 0x948, 0x0);
+ u1Tmp |= 0x1; /* antenna inverse */
+ pBoardInfo->btdmAntPos = BTC_ANTENNA_AT_AUX_PORT;
+ }
+
+ if (pBtCoexist->chipInterface == BTC_INTF_PCI)
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0x384, u1Tmp);
+ else if (pBtCoexist->chipInterface == BTC_INTF_SDIO)
+ pBtCoexist->fBtcWriteLocalReg1Byte(pBtCoexist, 0x60, u1Tmp);
+ }
+}
+
+void EXhalbtc8723b2ant_InitHwConfig(PBTC_COEXIST pBtCoexist, bool bWifiOnly)
+{
+ halbtc8723b2ant_InitHwConfig(pBtCoexist, true);
+}
+
+void EXhalbtc8723b2ant_InitCoexDm(PBTC_COEXIST pBtCoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n"));
+
+ halbtc8723b2ant_InitCoexDm(pBtCoexist);
+}
+
+void EXhalbtc8723b2ant_DisplayCoexInfo(PBTC_COEXIST pBtCoexist)
+{
+ PBTC_BOARD_INFO pBoardInfo = &pBtCoexist->boardInfo;
+ PBTC_STACK_INFO pStackInfo = &pBtCoexist->stackInfo;
+ PBTC_BT_LINK_INFO pBtLinkInfo = &pBtCoexist->btLinkInfo;
+ u8 *cliBuf = pBtCoexist->cliBuf;
+ u8 u1Tmp[4], i, btInfoExt, psTdmaCase = 0;
+ u32 u4Tmp[4];
+ bool bRoam = false, bScan = false, bLink = false, bWifiUnder5G = false;
+ bool bBtHsOn = false, bWifiBusy = false;
+ s32 wifiRssi = 0, btHsRssi = 0;
+ u32 wifiBw, wifiTrafficDir, faOfdm, faCck;
+ u8 wifiDot11Chnl, wifiHsChnl;
+ u32 fwVer = 0, btPatchVer = 0;
+ u8 apNum = 0;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============");
+ CL_PRINTF(cliBuf);
+
+ if (pBtCoexist->bManualControl) {
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============");
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ==========================================");
+ CL_PRINTF(cliBuf);
+ }
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \
+ pBoardInfo->pgAntNum,
+ pBoardInfo->btdmAntNum
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \
+ (pStackInfo->bProfileNotified ? "Yes" : "No"),
+ pStackInfo->hciVersion
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \
+ GLCoexVerDate8723b2Ant, GLCoexVer8723b2Ant, fwVer, btPatchVer, btPatchVer);
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \
+ wifiDot11Chnl,
+ wifiHsChnl,
+ bBtHsOn
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \
+ pCoexDm->wifiChnlInfo[0],
+ pCoexDm->wifiChnlInfo[1],
+ pCoexDm->wifiChnlInfo[2]
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_AP_NUM, &apNum);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d", "Wifi rssi/ HS rssi/ AP#", \
+ wifiRssi,
+ btHsRssi,
+ apNum
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \
+ bLink,
+ bRoam,
+ bScan
+ );
+ CL_PRINTF(cliBuf);
+
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s / %s/ %s ", "Wifi status", \
+ (bWifiUnder5G ? "5G" : "2.4G"),
+ ((BTC_WIFI_BW_LEGACY == wifiBw) ? "Legacy" : (((BTC_WIFI_BW_HT40 == wifiBw) ? "HT40" : "HT20"))),
+ ((!bWifiBusy) ? "idle" : ((BTC_WIFI_TRAFFIC_TX == wifiTrafficDir) ? "uplink" : "downlink"))
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \
+ ((pBtCoexist->btInfo.bBtDisabled) ? ("disabled") : ((pCoexSta->bC2hBtInquiryPage) ? ("inquiry/page scan") : ((BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) ? "non-connected idle" :
+ ((BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ? "connected-idle" : "busy")))),
+ pCoexSta->btRssi,
+ pCoexSta->btRetryCnt
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \
+ pBtLinkInfo->bScoExist,
+ pBtLinkInfo->bHidExist,
+ pBtLinkInfo->bPanExist,
+ pBtLinkInfo->bA2dpExist
+ );
+ CL_PRINTF(cliBuf);
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+ btInfoExt = pCoexSta->btInfoExt;
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s", "BT Info A2DP rate", \
+ (btInfoExt&BIT0) ? "Basic rate" : "EDR rate"
+ );
+ CL_PRINTF(cliBuf);
+
+ for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+ if (pCoexSta->btInfoC2hCnt[i]) {
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723b2Ant[i], \
+ pCoexSta->btInfoC2h[i][0],
+ pCoexSta->btInfoC2h[i][1],
+ pCoexSta->btInfoC2h[i][2],
+ pCoexSta->btInfoC2h[i][3],
+ pCoexSta->btInfoC2h[i][4],
+ pCoexSta->btInfoC2h[i][5],
+ pCoexSta->btInfoC2h[i][6],
+ pCoexSta->btInfoC2hCnt[i]
+ );
+ CL_PRINTF(cliBuf);
+ }
+ }
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %s/%s", "PS state, IPS/LPS", \
+ ((pCoexSta->bUnderIps ? "IPS ON" : "IPS OFF")),
+ ((pCoexSta->bUnderLps ? "LPS ON" : "LPS OFF"))
+ );
+ CL_PRINTF(cliBuf);
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+ /* Sw mechanism */
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s", "============[Sw mechanism]============"
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig]", \
+ pCoexDm->bCurRfRxLpfShrink,
+ pCoexDm->bCurLowPenaltyRa,
+ pCoexDm->bLimitedDig
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+ "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \
+ pCoexDm->bCurAgcTableEn,
+ pCoexDm->bCurAdcBackOff,
+ pCoexDm->bCurDacSwingOn,
+ pCoexDm->curDacSwingLvl
+ );
+ CL_PRINTF(cliBuf);
+
+ /* Fw mechanism */
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============");
+ CL_PRINTF(cliBuf);
+
+ psTdmaCase = pCoexDm->curPsTdma;
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \
+ pCoexDm->psTdmaPara[0],
+ pCoexDm->psTdmaPara[1],
+ pCoexDm->psTdmaPara[2],
+ pCoexDm->psTdmaPara[3],
+ pCoexDm->psTdmaPara[4],
+ psTdmaCase, pCoexDm->bAutoTdmaAdjust
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \
+ pCoexDm->curBtDecPwrLvl,
+ pCoexDm->bCurIgnoreWlanAct
+ );
+ CL_PRINTF(cliBuf);
+
+ /* Hw setting */
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============");
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \
+ pCoexDm->btRf0x1eBackup
+ );
+ CL_PRINTF(cliBuf);
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x778);
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x880);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x", "0x778/0x880[29:25]", \
+ u1Tmp[0],
+ (u4Tmp[0]&0x3e000000) >> 25
+ );
+ CL_PRINTF(cliBuf);
+
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x948);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x67);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x765);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x948/ 0x67[5] / 0x765", \
+ u4Tmp[0],
+ ((u1Tmp[0]&0x20)>>5),
+ u1Tmp[1]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x92c);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x930);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x944);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", \
+ u4Tmp[0]&0x3,
+ u4Tmp[1]&0xff,
+ u4Tmp[2]&0x3
+ );
+ CL_PRINTF(cliBuf);
+
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x39);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x40);
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x4c);
+ u1Tmp[2] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x64);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x38[11]/0x40/0x4c[24:23]/0x64[0]", \
+ ((u1Tmp[0] & 0x8)>>3),
+ u1Tmp[1],
+ ((u4Tmp[0]&0x01800000)>>23),
+ u1Tmp[2]&0x1
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x550);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x522);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \
+ u4Tmp[0],
+ u1Tmp[0]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xc50);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x49c);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x", "0xc50(dig)/0x49c(null-drop)", \
+ u4Tmp[0]&0xff,
+ u1Tmp[0]
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda0);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda4);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xda8);
+ u4Tmp[3] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0xcf0);
+
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0xa5b);
+ u1Tmp[1] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0xa5c);
+
+ faOfdm =
+ ((u4Tmp[0]&0xffff0000) >> 16) +
+ ((u4Tmp[1]&0xffff0000) >> 16) +
+ (u4Tmp[1] & 0xffff) + (u4Tmp[2] & 0xffff) + \
+ ((u4Tmp[3]&0xffff0000) >> 16) +
+ (u4Tmp[3] & 0xffff);
+
+ faCck = (u1Tmp[0] << 8) + u1Tmp[1];
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "OFDM-CCA/OFDM-FA/CCK-FA", \
+ u4Tmp[0]&0xffff,
+ faOfdm,
+ faCck
+ );
+ CL_PRINTF(cliBuf);
+
+ u4Tmp[0] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c0);
+ u4Tmp[1] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c4);
+ u4Tmp[2] = pBtCoexist->fBtcRead4Byte(pBtCoexist, 0x6c8);
+ u1Tmp[0] = pBtCoexist->fBtcRead1Byte(pBtCoexist, 0x6cc);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \
+ u4Tmp[0],
+ u4Tmp[1],
+ u4Tmp[2],
+ u1Tmp[0]
+ );
+ CL_PRINTF(cliBuf);
+
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)", \
+ pCoexSta->highPriorityRx,
+ pCoexSta->highPriorityTx
+ );
+ CL_PRINTF(cliBuf);
+ CL_SPRINTF(
+ cliBuf,
+ BT_TMP_BUF_SIZE,
+ "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)", \
+ pCoexSta->lowPriorityRx,
+ pCoexSta->lowPriorityTx
+ );
+ CL_PRINTF(cliBuf);
+
+ halbtc8723b2ant_MonitorBtCtr(pBtCoexist);
+ pBtCoexist->fBtcDispDbgMsg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void EXhalbtc8723b2ant_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (BTC_IPS_ENTER == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n"));
+ pCoexSta->bUnderIps = true;
+ halbtc8723b2ant_WifiOffHwCfg(pBtCoexist);
+ halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+ halbtc8723b2ant_CoexAllOff(pBtCoexist);
+ } else if (BTC_IPS_LEAVE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n"));
+ pCoexSta->bUnderIps = false;
+ halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
+ halbtc8723b2ant_InitCoexDm(pBtCoexist);
+ halbtc8723b2ant_QueryBtInfo(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b2ant_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (BTC_LPS_ENABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n"));
+ pCoexSta->bUnderLps = true;
+ } else if (BTC_LPS_DISABLE == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n"));
+ pCoexSta->bUnderLps = false;
+ }
+}
+
+void EXhalbtc8723b2ant_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (BTC_SCAN_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n"));
+ } else if (BTC_SCAN_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n"));
+ }
+}
+
+void EXhalbtc8723b2ant_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (BTC_ASSOCIATE_START == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n"));
+ } else if (BTC_ASSOCIATE_FINISH == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n"));
+ }
+}
+
+void EXhalbtc8723b2ant_MediaStatusNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ u8 H2C_Parameter[3] = {0};
+ u32 wifiBw;
+ u8 wifiCentralChnl;
+ u8 apNum = 0;
+
+ if (BTC_MEDIA_CONNECT == type) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n"));
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n"));
+ }
+
+ /* only 2.4G we need to inform bt the chnl mask */
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl);
+ if ((BTC_MEDIA_CONNECT == type) && (wifiCentralChnl <= 14)) {
+ H2C_Parameter[0] = 0x1;
+ H2C_Parameter[1] = wifiCentralChnl;
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw);
+ if (BTC_WIFI_BW_HT40 == wifiBw)
+ H2C_Parameter[2] = 0x30;
+ else {
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U1_AP_NUM, &apNum);
+ if (apNum < 10)
+ H2C_Parameter[2] = 0x30;
+ else
+ H2C_Parameter[2] = 0x20;
+ }
+ }
+
+ pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0];
+ pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1];
+ pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2];
+
+ BTC_PRINT(
+ BTC_MSG_ALGORITHM,
+ ALGO_TRACE_FW_EXEC,
+ (
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2]
+ )
+ );
+
+ pBtCoexist->fBtcFillH2c(pBtCoexist, 0x66, 3, H2C_Parameter);
+}
+
+void EXhalbtc8723b2ant_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ if (type == BTC_PACKET_DHCP) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n"));
+ }
+}
+
+void EXhalbtc8723b2ant_BtInfoNotify(
+ PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length
+)
+{
+ u8 btInfo = 0;
+ u8 i, rspSource = 0;
+ bool bBtBusy = false, bLimitedDig = false;
+ bool bWifiConnected = false;
+
+ pCoexSta->bC2hBtInfoReqSent = false;
+
+ rspSource = tmpBuf[0]&0xf;
+ if (rspSource >= BT_INFO_SRC_8723B_2ANT_MAX)
+ rspSource = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+
+ pCoexSta->btInfoC2hCnt[rspSource]++;
+
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length =%d, hex data =[", rspSource, length));
+ for (i = 0; i < length; i++) {
+ pCoexSta->btInfoC2h[rspSource][i] = tmpBuf[i];
+ if (i == 1)
+ btInfo = tmpBuf[i];
+
+ if (i == length-1) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i]));
+ } else {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i]));
+ }
+ }
+
+ if (pBtCoexist->bManualControl) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n"));
+ return;
+ }
+
+ if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rspSource) {
+ pCoexSta->btRetryCnt = pCoexSta->btInfoC2h[rspSource][2]&0xf; /* [3:0] */
+
+ pCoexSta->btRssi = pCoexSta->btInfoC2h[rspSource][3]*2+10;
+
+ pCoexSta->btInfoExt = pCoexSta->btInfoC2h[rspSource][4];
+
+ pCoexSta->bBtTxRxMask = (pCoexSta->btInfoC2h[rspSource][2]&0x40);
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TX_RX_MASK, &pCoexSta->bBtTxRxMask);
+ if (pCoexSta->bBtTxRxMask) {
+ /* BT into is responded by BT FW and BT RF REG 0x3C != 0x01 => Need to switch BT TRx Mask */
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n"));
+ pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x01);
+ }
+
+ /* Here we need to resend some wifi info to BT */
+ /* because bt is reset and loss of the info. */
+ if ((pCoexSta->btInfoExt & BIT1)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n"));
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected);
+
+ if (bWifiConnected)
+ EXhalbtc8723b2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT);
+ else
+ EXhalbtc8723b2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+ }
+
+ if ((pCoexSta->btInfoExt & BIT3)) {
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"));
+ halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, false);
+ } else {
+ /* BT already NOT ignore Wlan active, do nothing here. */
+ }
+ }
+
+ /* check BIT2 first ==> check if bt is under inquiry or page scan */
+ if (btInfo & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+ pCoexSta->bC2hBtInquiryPage = true;
+ else
+ pCoexSta->bC2hBtInquiryPage = false;
+
+ /* set link exist status */
+ if (!(btInfo&BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ pCoexSta->bBtLinkExist = false;
+ pCoexSta->bPanExist = false;
+ pCoexSta->bA2dpExist = false;
+ pCoexSta->bHidExist = false;
+ pCoexSta->bScoExist = false;
+ } else { /* connection exists */
+ pCoexSta->bBtLinkExist = true;
+ if (btInfo & BT_INFO_8723B_2ANT_B_FTP)
+ pCoexSta->bPanExist = true;
+ else
+ pCoexSta->bPanExist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_A2DP)
+ pCoexSta->bA2dpExist = true;
+ else
+ pCoexSta->bA2dpExist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_HID)
+ pCoexSta->bHidExist = true;
+ else
+ pCoexSta->bHidExist = false;
+ if (btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+ pCoexSta->bScoExist = true;
+ else
+ pCoexSta->bScoExist = false;
+ }
+
+ halbtc8723b2ant_UpdateBtLinkInfo(pBtCoexist);
+
+ if (!(btInfo&BT_INFO_8723B_2ANT_B_CONNECTION)) {
+ pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n"));
+ } else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) { /* connection exists but no busy */
+ pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"));
+ } else if (
+ (btInfo&BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+ (btInfo&BT_INFO_8723B_2ANT_B_SCO_BUSY)
+ ) {
+ pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"));
+ } else if (btInfo&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+ pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"));
+ } else {
+ pCoexDm->btStatus = BT_8723B_2ANT_BT_STATUS_MAX;
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n"));
+ }
+
+ if (
+ (BT_8723B_2ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) ||
+ (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus)
+ ) {
+ bBtBusy = true;
+ bLimitedDig = true;
+ } else {
+ bBtBusy = false;
+ bLimitedDig = false;
+ }
+
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy);
+
+ pCoexDm->bLimitedDig = bLimitedDig;
+ pBtCoexist->fBtcSet(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &bLimitedDig);
+
+ halbtc8723b2ant_RunCoexistMechanism(pBtCoexist);
+}
+
+void EXhalbtc8723b2ant_HaltNotify(PBTC_COEXIST pBtCoexist)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n"));
+
+ halbtc8723b2ant_WifiOffHwCfg(pBtCoexist);
+ pBtCoexist->fBtcSetBtReg(pBtCoexist, BTC_BT_REG_RF, 0x3c, 0x15); /* BT goto standby while GNT_BT 1-->0 */
+ halbtc8723b2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true);
+
+ EXhalbtc8723b2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void EXhalbtc8723b2ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
+{
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n"));
+
+ if (BTC_WIFI_PNP_SLEEP == pnpState) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to SLEEP\n"));
+ } else if (BTC_WIFI_PNP_WAKE_UP == pnpState) {
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify to WAKE UP\n"));
+ halbtc8723b2ant_InitHwConfig(pBtCoexist, false);
+ halbtc8723b2ant_InitCoexDm(pBtCoexist);
+ halbtc8723b2ant_QueryBtInfo(pBtCoexist);
+ }
+}
+
+void EXhalbtc8723b2ant_Periodical(PBTC_COEXIST pBtCoexist)
+{
+ static u8 disVerInfoCnt = 0;
+ u32 fwVer = 0, btPatchVer = 0;
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical ===========================\n"));
+
+ if (disVerInfoCnt <= 5) {
+ disVerInfoCnt += 1;
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer);
+ pBtCoexist->fBtcGet(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer);
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \
+ GLCoexVerDate8723b2Ant, GLCoexVer8723b2Ant, fwVer, btPatchVer, btPatchVer));
+ BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n"));
+ }
+
+ if (
+ halbtc8723b2ant_IsWifiStatusChanged(pBtCoexist) ||
+ pCoexDm->bAutoTdmaAdjust
+ )
+ halbtc8723b2ant_RunCoexistMechanism(pBtCoexist);
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h
new file mode 100644
index 000000000000..5a0fed69eda7
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalBtc8723b2Ant.h
@@ -0,0 +1,155 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* The following is for 8723B 2Ant BT Co-exist definition */
+#define BT_INFO_8723B_2ANT_B_FTP BIT7
+#define BT_INFO_8723B_2ANT_B_A2DP BIT6
+#define BT_INFO_8723B_2ANT_B_HID BIT5
+#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4
+#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3
+#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2
+#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1
+#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2
+
+typedef enum _BT_INFO_SRC_8723B_2ANT {
+ BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2,
+ BT_INFO_SRC_8723B_2ANT_MAX
+} BT_INFO_SRC_8723B_2ANT, *PBT_INFO_SRC_8723B_2ANT;
+
+typedef enum _BT_8723B_2ANT_BT_STATUS {
+ BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0,
+ BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1,
+ BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2,
+ BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3,
+ BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4,
+ BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5,
+ BT_8723B_2ANT_BT_STATUS_MAX
+} BT_8723B_2ANT_BT_STATUS, *PBT_8723B_2ANT_BT_STATUS;
+
+typedef enum _BT_8723B_2ANT_COEX_ALGO {
+ BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0,
+ BT_8723B_2ANT_COEX_ALGO_SCO = 0x1,
+ BT_8723B_2ANT_COEX_ALGO_HID = 0x2,
+ BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3,
+ BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5,
+ BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7,
+ BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9,
+ BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa,
+ BT_8723B_2ANT_COEX_ALGO_MAX = 0xb,
+} BT_8723B_2ANT_COEX_ALGO, *PBT_8723B_2ANT_COEX_ALGO;
+
+typedef struct _COEX_DM_8723B_2ANT {
+ /* fw mechanism */
+ u8 preBtDecPwrLvl;
+ u8 curBtDecPwrLvl;
+ u8 preFwDacSwingLvl;
+ u8 curFwDacSwingLvl;
+ bool bCurIgnoreWlanAct;
+ bool bPreIgnoreWlanAct;
+ u8 prePsTdma;
+ u8 curPsTdma;
+ u8 psTdmaPara[5];
+ u8 psTdmaDuAdjType;
+ bool bResetTdmaAdjust;
+ bool bAutoTdmaAdjust;
+ bool bPrePsTdmaOn;
+ bool bCurPsTdmaOn;
+ bool bPreBtAutoReport;
+ bool bCurBtAutoReport;
+
+ /* sw mechanism */
+ bool bPreRfRxLpfShrink;
+ bool bCurRfRxLpfShrink;
+ u32 btRf0x1eBackup;
+ bool bPreLowPenaltyRa;
+ bool bCurLowPenaltyRa;
+ bool bPreDacSwingOn;
+ u32 preDacSwingLvl;
+ bool bCurDacSwingOn;
+ u32 curDacSwingLvl;
+ bool bPreAdcBackOff;
+ bool bCurAdcBackOff;
+ bool bPreAgcTableEn;
+ bool bCurAgcTableEn;
+ u32 preVal0x6c0;
+ u32 curVal0x6c0;
+ u32 preVal0x6c4;
+ u32 curVal0x6c4;
+ u32 preVal0x6c8;
+ u32 curVal0x6c8;
+ u8 preVal0x6cc;
+ u8 curVal0x6cc;
+ bool bLimitedDig;
+
+ /* algorithm related */
+ u8 preAlgorithm;
+ u8 curAlgorithm;
+ u8 btStatus;
+ u8 wifiChnlInfo[3];
+
+ bool bNeedRecover0x948;
+ u32 backup0x948;
+} COEX_DM_8723B_2ANT, *PCOEX_DM_8723B_2ANT;
+
+typedef struct _COEX_STA_8723B_2ANT {
+ bool bBtLinkExist;
+ bool bScoExist;
+ bool bA2dpExist;
+ bool bHidExist;
+ bool bPanExist;
+
+ bool bUnderLps;
+ bool bUnderIps;
+ u32 highPriorityTx;
+ u32 highPriorityRx;
+ u32 lowPriorityTx;
+ u32 lowPriorityRx;
+ u8 btRssi;
+ bool bBtTxRxMask;
+ u8 preBtRssiState;
+ u8 preWifiRssiState[4];
+ bool bC2hBtInfoReqSent;
+ u8 btInfoC2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+ u32 btInfoC2hCnt[BT_INFO_SRC_8723B_2ANT_MAX];
+ bool bC2hBtInquiryPage;
+ u8 btRetryCnt;
+ u8 btInfoExt;
+} COEX_STA_8723B_2ANT, *PCOEX_STA_8723B_2ANT;
+
+/* */
+/* The following is interface which will notify coex module. */
+/* */
+void EXhalbtc8723b2ant_PowerOnSetting(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b2ant_InitHwConfig(PBTC_COEXIST pBtCoexist, bool bWifiOnly);
+void EXhalbtc8723b2ant_InitCoexDm(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b2ant_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_MediaStatusNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtc8723b2ant_BtInfoNotify(
+ PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length
+);
+void EXhalbtc8723b2ant_HaltNotify(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b2ant_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState);
+void EXhalbtc8723b2ant_Periodical(PBTC_COEXIST pBtCoexist);
+void EXhalbtc8723b2ant_DisplayCoexInfo(PBTC_COEXIST pBtCoexist);
diff --git a/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
new file mode 100644
index 000000000000..38414dd8adbd
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalBtcOutSrc.h
@@ -0,0 +1,566 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#define NORMAL_EXEC false
+#define FORCE_EXEC true
+
+#define BTC_RF_OFF 0x0
+#define BTC_RF_ON 0x1
+
+#define BTC_RF_A 0x0
+#define BTC_RF_B 0x1
+#define BTC_RF_C 0x2
+#define BTC_RF_D 0x3
+
+#define BTC_SMSP SINGLEMAC_SINGLEPHY
+#define BTC_DMDP DUALMAC_DUALPHY
+#define BTC_DMSP DUALMAC_SINGLEPHY
+#define BTC_MP_UNKNOWN 0xff
+
+#define BT_COEX_ANT_TYPE_PG 0
+#define BT_COEX_ANT_TYPE_ANTDIV 1
+#define BT_COEX_ANT_TYPE_DETECTED 2
+
+#define BTC_MIMO_PS_STATIC 0 /* 1ss */
+#define BTC_MIMO_PS_DYNAMIC 1 /* 2ss */
+
+#define BTC_RATE_DISABLE 0
+#define BTC_RATE_ENABLE 1
+
+/* single Antenna definition */
+#define BTC_ANT_PATH_WIFI 0
+#define BTC_ANT_PATH_BT 1
+#define BTC_ANT_PATH_PTA 2
+/* dual Antenna definition */
+#define BTC_ANT_WIFI_AT_MAIN 0
+#define BTC_ANT_WIFI_AT_AUX 1
+/* coupler Antenna definition */
+#define BTC_ANT_WIFI_AT_CPL_MAIN 0
+#define BTC_ANT_WIFI_AT_CPL_AUX 1
+
+typedef enum _BTC_POWERSAVE_TYPE{
+ BTC_PS_WIFI_NATIVE = 0, /* wifi original power save behavior */
+ BTC_PS_LPS_ON = 1,
+ BTC_PS_LPS_OFF = 2,
+ BTC_PS_MAX
+} BTC_POWERSAVE_TYPE, *PBTC_POWERSAVE_TYPE;
+
+typedef enum _BTC_BT_REG_TYPE{
+ BTC_BT_REG_RF = 0,
+ BTC_BT_REG_MODEM = 1,
+ BTC_BT_REG_BLUEWIZE = 2,
+ BTC_BT_REG_VENDOR = 3,
+ BTC_BT_REG_LE = 4,
+ BTC_BT_REG_MAX
+} BTC_BT_REG_TYPE, *PBTC_BT_REG_TYPE;
+
+typedef enum _BTC_CHIP_INTERFACE{
+ BTC_INTF_UNKNOWN = 0,
+ BTC_INTF_PCI = 1,
+ BTC_INTF_USB = 2,
+ BTC_INTF_SDIO = 3,
+ BTC_INTF_MAX
+} BTC_CHIP_INTERFACE, *PBTC_CHIP_INTERFACE;
+
+typedef enum _BTC_CHIP_TYPE {
+ BTC_CHIP_UNDEF = 0,
+ BTC_CHIP_CSR_BC4 = 1,
+ BTC_CHIP_CSR_BC8 = 2,
+ BTC_CHIP_RTL8723A = 3,
+ BTC_CHIP_RTL8821 = 4,
+ BTC_CHIP_RTL8723B = 5,
+ BTC_CHIP_MAX
+} BTC_CHIP_TYPE, *PBTC_CHIP_TYPE;
+
+typedef enum _BTC_MSG_TYPE {
+ BTC_MSG_INTERFACE = 0x0,
+ BTC_MSG_ALGORITHM = 0x1,
+ BTC_MSG_MAX
+} BTC_MSG_TYPE;
+extern u32 GLBtcDbgType[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define INTF_INIT BIT0
+#define INTF_NOTIFY BIT2
+
+/* following is for BTC_ALGORITHM */
+#define ALGO_BT_RSSI_STATE BIT0
+#define ALGO_WIFI_RSSI_STATE BIT1
+#define ALGO_BT_MONITOR BIT2
+#define ALGO_TRACE BIT3
+#define ALGO_TRACE_FW BIT4
+#define ALGO_TRACE_FW_DETAIL BIT5
+#define ALGO_TRACE_FW_EXEC BIT6
+#define ALGO_TRACE_SW BIT7
+#define ALGO_TRACE_SW_DETAIL BIT8
+#define ALGO_TRACE_SW_EXEC BIT9
+
+/* following is for wifi link status */
+#define WIFI_STA_CONNECTED BIT0
+#define WIFI_AP_CONNECTED BIT1
+#define WIFI_HS_CONNECTED BIT2
+#define WIFI_P2P_GO_CONNECTED BIT3
+#define WIFI_P2P_GC_CONNECTED BIT4
+
+/* following is for command line utility */
+#define CL_SPRINTF snprintf
+#define CL_PRINTF DCMD_Printf
+
+/* The following is for dbgview print */
+#if DBG
+#define BTC_PRINT(dbgtype, dbgflag, printstr)\
+{\
+ if (GLBtcDbgType[dbgtype] & dbgflag)\
+ DbgPrint printstr;\
+}
+
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr)\
+{\
+ if (GLBtcDbgType[dbgtype] & dbgflag) {\
+ DbgPrint("%s(): ", __func__);\
+ DbgPrint printstr;\
+ } \
+}
+
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)\
+{\
+ if (GLBtcDbgType[dbgtype] & dbgflag) {\
+ int __i;\
+ u8 *ptr = (u8 *)_Ptr;\
+ DbgPrint printstr;\
+ DbgPrint(" ");\
+ for (__i = 0; __i < 6; __i++)\
+ DbgPrint("%02X%s", ptr[__i], (__i == 5) ? "" : "-");\
+ DbgPrint("\n");\
+ } \
+}
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
+{\
+ if (GLBtcDbgType[dbgtype] & dbgflag) {\
+ int __i;\
+ u8 *ptr = (u8 *)_HexData;\
+ DbgPrint(_TitleString);\
+ for (__i = 0; __i < (int)_HexDataLen; __i++) {\
+ DbgPrint("%02X%s", ptr[__i], (((__i + 1) % 4) == 0) ? " " : " ");\
+ if (((__i + 1) % 16) == 0)\
+ DbgPrint("\n");\
+ } \
+ DbgPrint("\n");\
+ } \
+}
+
+#else
+#define BTC_PRINT(dbgtype, dbgflag, printstr) no_printk printstr
+#define BTC_PRINT_F(dbgtype, dbgflag, printstr) no_printk printstr
+#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) no_printk printstr
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen) \
+ no_printk("%s %p %zu", _TitleString, _HexData, _HexDataLen)
+#endif
+
+typedef struct _BTC_BOARD_INFO {
+ /* The following is some board information */
+ u8 btChipType;
+ u8 pgAntNum; /* pg ant number */
+ u8 btdmAntNum; /* ant number for btdm */
+ u8 btdmAntPos; /* Bryant Add to indicate Antenna Position for (pgAntNum = 2) && (btdmAntNum = 1) (DPDT+1Ant case) */
+ u8 singleAntPath; /* current used for 8723b only, 1 =>s0, 0 =>s1 */
+ /* bool bBtExist; */
+} BTC_BOARD_INFO, *PBTC_BOARD_INFO;
+
+typedef enum _BTC_DBG_OPCODE {
+ BTC_DBG_SET_COEX_NORMAL = 0x0,
+ BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+ BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+ BTC_DBG_SET_COEX_DEC_BT_PWR = 0x3,
+ BTC_DBG_SET_COEX_BT_AFH_MAP = 0x4,
+ BTC_DBG_SET_COEX_BT_IGNORE_WLAN_ACT = 0x5,
+ BTC_DBG_MAX
+} BTC_DBG_OPCODE, *PBTC_DBG_OPCODE;
+
+typedef enum _BTC_RSSI_STATE {
+ BTC_RSSI_STATE_HIGH = 0x0,
+ BTC_RSSI_STATE_MEDIUM = 0x1,
+ BTC_RSSI_STATE_LOW = 0x2,
+ BTC_RSSI_STATE_STAY_HIGH = 0x3,
+ BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+ BTC_RSSI_STATE_STAY_LOW = 0x5,
+ BTC_RSSI_MAX
+} BTC_RSSI_STATE, *PBTC_RSSI_STATE;
+#define BTC_RSSI_HIGH(_rssi_) ((_rssi_ == BTC_RSSI_STATE_HIGH || _rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false)
+#define BTC_RSSI_MEDIUM(_rssi_) ((_rssi_ == BTC_RSSI_STATE_MEDIUM || _rssi_ == BTC_RSSI_STATE_STAY_MEDIUM) ? true : false)
+#define BTC_RSSI_LOW(_rssi_) ((_rssi_ == BTC_RSSI_STATE_LOW || _rssi_ == BTC_RSSI_STATE_STAY_LOW) ? true : false)
+
+typedef enum _BTC_WIFI_ROLE {
+ BTC_ROLE_STATION = 0x0,
+ BTC_ROLE_AP = 0x1,
+ BTC_ROLE_IBSS = 0x2,
+ BTC_ROLE_HS_MODE = 0x3,
+ BTC_ROLE_MAX
+} BTC_WIFI_ROLE, *PBTC_WIFI_ROLE;
+
+typedef enum _BTC_WIFI_BW_MODE {
+ BTC_WIFI_BW_LEGACY = 0x0,
+ BTC_WIFI_BW_HT20 = 0x1,
+ BTC_WIFI_BW_HT40 = 0x2,
+ BTC_WIFI_BW_MAX
+} BTC_WIFI_BW_MODE, *PBTC_WIFI_BW_MODE;
+
+typedef enum _BTC_WIFI_TRAFFIC_DIR {
+ BTC_WIFI_TRAFFIC_TX = 0x0,
+ BTC_WIFI_TRAFFIC_RX = 0x1,
+ BTC_WIFI_TRAFFIC_MAX
+} BTC_WIFI_TRAFFIC_DIR, *PBTC_WIFI_TRAFFIC_DIR;
+
+typedef enum _BTC_WIFI_PNP {
+ BTC_WIFI_PNP_WAKE_UP = 0x0,
+ BTC_WIFI_PNP_SLEEP = 0x1,
+ BTC_WIFI_PNP_MAX
+} BTC_WIFI_PNP, *PBTC_WIFI_PNP;
+
+/* for 8723b-d cut large current issue */
+typedef enum _BT_WIFI_COEX_STATE {
+ BTC_WIFI_STAT_INIT,
+ BTC_WIFI_STAT_IQK,
+ BTC_WIFI_STAT_NORMAL_OFF,
+ BTC_WIFI_STAT_MP_OFF,
+ BTC_WIFI_STAT_NORMAL,
+ BTC_WIFI_STAT_ANT_DIV,
+ BTC_WIFI_STAT_MAX
+} BT_WIFI_COEX_STATE, *PBT_WIFI_COEX_STATE;
+
+/* defined for BFP_BTC_GET */
+typedef enum _BTC_GET_TYPE {
+ /* type bool */
+ BTC_GET_BL_HS_OPERATION,
+ BTC_GET_BL_HS_CONNECTING,
+ BTC_GET_BL_WIFI_CONNECTED,
+ BTC_GET_BL_WIFI_BUSY,
+ BTC_GET_BL_WIFI_SCAN,
+ BTC_GET_BL_WIFI_LINK,
+ BTC_GET_BL_WIFI_ROAM,
+ BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+ BTC_GET_BL_WIFI_UNDER_5G,
+ BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+ BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+ BTC_GET_BL_WIFI_UNDER_B_MODE,
+ BTC_GET_BL_EXT_SWITCH,
+ BTC_GET_BL_WIFI_IS_IN_MP_MODE,
+
+ /* type s32 */
+ BTC_GET_S4_WIFI_RSSI,
+ BTC_GET_S4_HS_RSSI,
+
+ /* type u32 */
+ BTC_GET_U4_WIFI_BW,
+ BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+ BTC_GET_U4_WIFI_FW_VER,
+ BTC_GET_U4_WIFI_LINK_STATUS,
+ BTC_GET_U4_BT_PATCH_VER,
+
+ /* type u8 */
+ BTC_GET_U1_WIFI_DOT11_CHNL,
+ BTC_GET_U1_WIFI_CENTRAL_CHNL,
+ BTC_GET_U1_WIFI_HS_CHNL,
+ BTC_GET_U1_MAC_PHY_MODE,
+ BTC_GET_U1_AP_NUM,
+
+ /* for 1Ant ====== */
+ BTC_GET_U1_LPS_MODE,
+
+ BTC_GET_MAX
+} BTC_GET_TYPE, *PBTC_GET_TYPE;
+
+/* defined for BFP_BTC_SET */
+typedef enum _BTC_SET_TYPE {
+ /* type bool */
+ BTC_SET_BL_BT_DISABLE,
+ BTC_SET_BL_BT_TRAFFIC_BUSY,
+ BTC_SET_BL_BT_LIMITED_DIG,
+ BTC_SET_BL_FORCE_TO_ROAM,
+ BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+ BTC_SET_BL_BT_CTRL_AGG_SIZE,
+ BTC_SET_BL_INC_SCAN_DEV_NUM,
+ BTC_SET_BL_BT_TX_RX_MASK,
+
+ /* type u8 */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+ BTC_SET_U1_AGG_BUF_SIZE,
+
+ /* type trigger some action */
+ BTC_SET_ACT_GET_BT_RSSI,
+ BTC_SET_ACT_AGGREGATE_CTRL,
+ /* for 1Ant ====== */
+ /* type bool */
+
+ /* type u8 */
+ BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+ BTC_SET_U1_LPS_VAL,
+ BTC_SET_U1_RPWM_VAL,
+ /* type trigger some action */
+ BTC_SET_ACT_LEAVE_LPS,
+ BTC_SET_ACT_ENTER_LPS,
+ BTC_SET_ACT_NORMAL_LPS,
+ BTC_SET_ACT_DISABLE_LOW_POWER,
+ BTC_SET_ACT_UPDATE_RAMASK,
+ BTC_SET_ACT_SEND_MIMO_PS,
+ /* BT Coex related */
+ BTC_SET_ACT_CTRL_BT_INFO,
+ BTC_SET_ACT_CTRL_BT_COEX,
+ BTC_SET_ACT_CTRL_8723B_ANT,
+ /* */
+ BTC_SET_MAX
+} BTC_SET_TYPE, *PBTC_SET_TYPE;
+
+typedef enum _BTC_DBG_DISP_TYPE {
+ BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+ BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+ BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x2,
+ BTC_DBG_DISP_MAX
+} BTC_DBG_DISP_TYPE, *PBTC_DBG_DISP_TYPE;
+
+typedef enum _BTC_NOTIFY_TYPE_IPS {
+ BTC_IPS_LEAVE = 0x0,
+ BTC_IPS_ENTER = 0x1,
+ BTC_IPS_MAX
+} BTC_NOTIFY_TYPE_IPS, *PBTC_NOTIFY_TYPE_IPS;
+
+typedef enum _BTC_NOTIFY_TYPE_LPS {
+ BTC_LPS_DISABLE = 0x0,
+ BTC_LPS_ENABLE = 0x1,
+ BTC_LPS_MAX
+} BTC_NOTIFY_TYPE_LPS, *PBTC_NOTIFY_TYPE_LPS;
+
+typedef enum _BTC_NOTIFY_TYPE_SCAN {
+ BTC_SCAN_FINISH = 0x0,
+ BTC_SCAN_START = 0x1,
+ BTC_SCAN_MAX
+} BTC_NOTIFY_TYPE_SCAN, *PBTC_NOTIFY_TYPE_SCAN;
+
+typedef enum _BTC_NOTIFY_TYPE_ASSOCIATE {
+ BTC_ASSOCIATE_FINISH = 0x0,
+ BTC_ASSOCIATE_START = 0x1,
+ BTC_ASSOCIATE_MAX
+} BTC_NOTIFY_TYPE_ASSOCIATE, *PBTC_NOTIFY_TYPE_ASSOCIATE;
+
+typedef enum _BTC_NOTIFY_TYPE_MEDIA_STATUS {
+ BTC_MEDIA_DISCONNECT = 0x0,
+ BTC_MEDIA_CONNECT = 0x1,
+ BTC_MEDIA_MAX
+} BTC_NOTIFY_TYPE_MEDIA_STATUS, *PBTC_NOTIFY_TYPE_MEDIA_STATUS;
+
+typedef enum _BTC_NOTIFY_TYPE_SPECIAL_PACKET {
+ BTC_PACKET_UNKNOWN = 0x0,
+ BTC_PACKET_DHCP = 0x1,
+ BTC_PACKET_ARP = 0x2,
+ BTC_PACKET_EAPOL = 0x3,
+ BTC_PACKET_MAX
+} BTC_NOTIFY_TYPE_SPECIAL_PACKET, *PBTC_NOTIFY_TYPE_SPECIAL_PACKET;
+
+typedef enum _BTC_NOTIFY_TYPE_STACK_OPERATION {
+ BTC_STACK_OP_NONE = 0x0,
+ BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+ BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+ BTC_STACK_OP_MAX
+} BTC_NOTIFY_TYPE_STACK_OPERATION, *PBTC_NOTIFY_TYPE_STACK_OPERATION;
+
+/* Bryant Add */
+typedef enum _BTC_ANTENNA_POS {
+ BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+ BTC_ANTENNA_AT_AUX_PORT = 0x2,
+} BTC_ANTENNA_POS, *PBTC_ANTENNA_POS;
+
+typedef u8 (*BFP_BTC_R1)(void *pBtcContext, u32 RegAddr);
+typedef u16(*BFP_BTC_R2)(void *pBtcContext, u32 RegAddr);
+typedef u32 (*BFP_BTC_R4)(void *pBtcContext, u32 RegAddr);
+typedef void (*BFP_BTC_W1)(void *pBtcContext, u32 RegAddr, u8 Data);
+typedef void(*BFP_BTC_W1_BIT_MASK)(
+ void *pBtcContext, u32 regAddr, u8 bitMask, u8 data1b
+);
+typedef void (*BFP_BTC_W2)(void *pBtcContext, u32 RegAddr, u16 Data);
+typedef void (*BFP_BTC_W4)(void *pBtcContext, u32 RegAddr, u32 Data);
+typedef void (*BFP_BTC_LOCAL_REG_W1)(void *pBtcContext, u32 RegAddr, u8 Data);
+typedef void (*BFP_BTC_SET_BB_REG)(
+ void *pBtcContext, u32 RegAddr, u32 BitMask, u32 Data
+);
+typedef u32 (*BFP_BTC_GET_BB_REG)(void *pBtcContext, u32 RegAddr, u32 BitMask);
+typedef void (*BFP_BTC_SET_RF_REG)(
+ void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask, u32 Data
+);
+typedef u32 (*BFP_BTC_GET_RF_REG)(
+ void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask
+);
+typedef void (*BFP_BTC_FILL_H2C)(
+ void *pBtcContext, u8 elementId, u32 cmdLen, u8 *pCmdBuffer
+);
+
+typedef u8 (*BFP_BTC_GET)(void *pBtCoexist, u8 getType, void *pOutBuf);
+
+typedef u8 (*BFP_BTC_SET)(void *pBtCoexist, u8 setType, void *pInBuf);
+typedef void (*BFP_BTC_SET_BT_REG)(
+ void *pBtcContext, u8 regType, u32 offset, u32 value
+);
+typedef u32 (*BFP_BTC_GET_BT_REG)(void *pBtcContext, u8 regType, u32 offset);
+typedef void (*BFP_BTC_DISP_DBG_MSG)(void *pBtCoexist, u8 dispType);
+
+typedef struct _BTC_BT_INFO {
+ bool bBtDisabled;
+ u8 rssiAdjustForAgcTableOn;
+ u8 rssiAdjustFor1AntCoexType;
+ bool bPreBtCtrlAggBufSize;
+ bool bBtCtrlAggBufSize;
+ bool bRejectAggPkt;
+ bool bIncreaseScanDevNum;
+ bool bBtTxRxMask;
+ u8 preAggBufSize;
+ u8 aggBufSize;
+ bool bBtBusy;
+ bool bLimitedDig;
+ u16 btHciVer;
+ u16 btRealFwVer;
+ u8 btFwVer;
+ u32 getBtFwVerCnt;
+
+ bool bBtDisableLowPwr;
+
+ bool bBtCtrlLps;
+ bool bBtLpsOn;
+ bool bForceToRoam; /* for 1Ant solution */
+ u8 lpsVal;
+ u8 rpwmVal;
+ u32 raMask;
+} BTC_BT_INFO, *PBTC_BT_INFO;
+
+typedef struct _BTC_STACK_INFO {
+ bool bProfileNotified;
+ u16 hciVersion; /* stack hci version */
+ u8 numOfLink;
+ bool bBtLinkExist;
+ bool bScoExist;
+ bool bAclExist;
+ bool bA2dpExist;
+ bool bHidExist;
+ u8 numOfHid;
+ bool bPanExist;
+ bool bUnknownAclExist;
+ s8 minBtRssi;
+} BTC_STACK_INFO, *PBTC_STACK_INFO;
+
+typedef struct _BTC_BT_LINK_INFO {
+ bool bBtLinkExist;
+ bool bScoExist;
+ bool bScoOnly;
+ bool bA2dpExist;
+ bool bA2dpOnly;
+ bool bHidExist;
+ bool bHidOnly;
+ bool bPanExist;
+ bool bPanOnly;
+ bool bSlaveRole;
+} BTC_BT_LINK_INFO, *PBTC_BT_LINK_INFO;
+
+typedef struct _BTC_STATISTICS {
+ u32 cntBind;
+ u32 cntPowerOn;
+ u32 cntInitHwConfig;
+ u32 cntInitCoexDm;
+ u32 cntIpsNotify;
+ u32 cntLpsNotify;
+ u32 cntScanNotify;
+ u32 cntConnectNotify;
+ u32 cntMediaStatusNotify;
+ u32 cntSpecialPacketNotify;
+ u32 cntBtInfoNotify;
+ u32 cntRfStatusNotify;
+ u32 cntPeriodical;
+ u32 cntCoexDmSwitch;
+ u32 cntStackOperationNotify;
+ u32 cntDbgCtrl;
+} BTC_STATISTICS, *PBTC_STATISTICS;
+
+typedef struct _BTC_COEXIST {
+ bool bBinded; /* make sure only one adapter can bind the data context */
+ void *Adapter; /* default adapter */
+ BTC_BOARD_INFO boardInfo;
+ BTC_BT_INFO btInfo; /* some bt info referenced by non-bt module */
+ BTC_STACK_INFO stackInfo;
+ BTC_BT_LINK_INFO btLinkInfo;
+ BTC_CHIP_INTERFACE chipInterface;
+
+ bool bInitilized;
+ bool bStopCoexDm;
+ bool bManualControl;
+ u8 *cliBuf;
+ BTC_STATISTICS statistics;
+ u8 pwrModeVal[10];
+
+ /* function pointers */
+ /* io related */
+ BFP_BTC_R1 fBtcRead1Byte;
+ BFP_BTC_W1 fBtcWrite1Byte;
+ BFP_BTC_W1_BIT_MASK fBtcWrite1ByteBitMask;
+ BFP_BTC_R2 fBtcRead2Byte;
+ BFP_BTC_W2 fBtcWrite2Byte;
+ BFP_BTC_R4 fBtcRead4Byte;
+ BFP_BTC_W4 fBtcWrite4Byte;
+ BFP_BTC_LOCAL_REG_W1 fBtcWriteLocalReg1Byte;
+ /* read/write bb related */
+ BFP_BTC_SET_BB_REG fBtcSetBbReg;
+ BFP_BTC_GET_BB_REG fBtcGetBbReg;
+
+ /* read/write rf related */
+ BFP_BTC_SET_RF_REG fBtcSetRfReg;
+ BFP_BTC_GET_RF_REG fBtcGetRfReg;
+
+ /* fill h2c related */
+ BFP_BTC_FILL_H2C fBtcFillH2c;
+ /* other */
+ BFP_BTC_DISP_DBG_MSG fBtcDispDbgMsg;
+ /* normal get/set related */
+ BFP_BTC_GET fBtcGet;
+ BFP_BTC_SET fBtcSet;
+
+ BFP_BTC_GET_BT_REG fBtcGetBtReg;
+ BFP_BTC_SET_BT_REG fBtcSetBtReg;
+} BTC_COEXIST, *PBTC_COEXIST;
+
+extern BTC_COEXIST GLBtCoexist;
+
+u8 EXhalbtcoutsrc_InitlizeVariables(void *Adapter);
+void EXhalbtcoutsrc_PowerOnSetting(PBTC_COEXIST pBtCoexist);
+void EXhalbtcoutsrc_InitHwConfig(PBTC_COEXIST pBtCoexist, u8 bWifiOnly);
+void EXhalbtcoutsrc_InitCoexDm(PBTC_COEXIST pBtCoexist);
+void EXhalbtcoutsrc_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtcoutsrc_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtcoutsrc_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type);
+void EXhalbtcoutsrc_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 action);
+void EXhalbtcoutsrc_MediaStatusNotify(
+ PBTC_COEXIST pBtCoexist, RT_MEDIA_STATUS mediaStatus
+);
+void EXhalbtcoutsrc_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 pktType);
+void EXhalbtcoutsrc_BtInfoNotify(
+ PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length
+);
+void EXhalbtcoutsrc_HaltNotify(PBTC_COEXIST pBtCoexist);
+void EXhalbtcoutsrc_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState);
+void EXhalbtcoutsrc_Periodical(PBTC_COEXIST pBtCoexist);
+void EXhalbtcoutsrc_SetChipType(u8 chipType);
+void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum);
+void EXhalbtcoutsrc_SetSingleAntPath(u8 singleAntPath);
+void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
new file mode 100644
index 000000000000..51d4219177d3
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
@@ -0,0 +1,643 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+
+#include "odm_precomp.h"
+
+static bool CheckPositive(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ u8 _BoardType =
+ ((pDM_Odm->BoardType & BIT4) >> 4) << 0 | /* _GLNA */
+ ((pDM_Odm->BoardType & BIT3) >> 3) << 1 | /* _GPA */
+ ((pDM_Odm->BoardType & BIT7) >> 7) << 2 | /* _ALNA */
+ ((pDM_Odm->BoardType & BIT6) >> 6) << 3 | /* _APA */
+ ((pDM_Odm->BoardType & BIT2) >> 2) << 4; /* _BT */
+
+ u32 cond1 = Condition1, cond2 = Condition2;
+ u32 driver1 =
+ pDM_Odm->CutVersion << 24 |
+ pDM_Odm->SupportPlatform << 16 |
+ pDM_Odm->PackageType << 12 |
+ pDM_Odm->SupportInterface << 8 |
+ _BoardType;
+
+ u32 driver2 =
+ pDM_Odm->TypeGLNA << 0 |
+ pDM_Odm->TypeGPA << 8 |
+ pDM_Odm->TypeALNA << 16 |
+ pDM_Odm->TypeAPA << 24;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1,
+ cond2
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1,
+ driver2
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (" (Platform, Interface) = (0x%X, 0x%X)\n",
+ pDM_Odm->SupportPlatform,
+ pDM_Odm->SupportInterface
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ pDM_Odm->BoardType,
+ pDM_Odm->PackageType
+ )
+ );
+
+
+ /* Value Defined Check =============== */
+ /* QFN Type [15:12] and Cut Version [27:24] need to do value check */
+
+ if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
+ return false;
+ if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
+ return false;
+
+ /* Bit Defined Check ================ */
+ /* We don't care [31:28] and [23:20] */
+ /* */
+ cond1 &= 0x000F0FFF;
+ driver1 &= 0x000F0FFF;
+
+ if ((cond1 & driver1) == cond1) {
+ u32 bitMask = 0;
+
+ if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE */
+ return true;
+
+ if ((cond1 & BIT0) != 0) /* GLNA */
+ bitMask |= 0x000000FF;
+ if ((cond1 & BIT1) != 0) /* GPA */
+ bitMask |= 0x0000FF00;
+ if ((cond1 & BIT2) != 0) /* ALNA */
+ bitMask |= 0x00FF0000;
+ if ((cond1 & BIT3) != 0) /* APA */
+ bitMask |= 0xFF000000;
+
+ /* BoardType of each RF path is matched */
+ if ((cond2 & bitMask) == (driver2 & bitMask))
+ return true;
+ }
+ return false;
+}
+
+static bool CheckNegative(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ return true;
+}
+
+/******************************************************************************
+* AGC_TAB.TXT
+******************************************************************************/
+
+static u32 Array_MP_8723B_AGC_TAB[] = {
+ 0xC78, 0xFD000001,
+ 0xC78, 0xFC010001,
+ 0xC78, 0xFB020001,
+ 0xC78, 0xFA030001,
+ 0xC78, 0xF9040001,
+ 0xC78, 0xF8050001,
+ 0xC78, 0xF7060001,
+ 0xC78, 0xF6070001,
+ 0xC78, 0xF5080001,
+ 0xC78, 0xF4090001,
+ 0xC78, 0xF30A0001,
+ 0xC78, 0xF20B0001,
+ 0xC78, 0xF10C0001,
+ 0xC78, 0xF00D0001,
+ 0xC78, 0xEF0E0001,
+ 0xC78, 0xEE0F0001,
+ 0xC78, 0xED100001,
+ 0xC78, 0xEC110001,
+ 0xC78, 0xEB120001,
+ 0xC78, 0xEA130001,
+ 0xC78, 0xE9140001,
+ 0xC78, 0xE8150001,
+ 0xC78, 0xE7160001,
+ 0xC78, 0xE6170001,
+ 0xC78, 0xE5180001,
+ 0xC78, 0xE4190001,
+ 0xC78, 0xE31A0001,
+ 0xC78, 0xA51B0001,
+ 0xC78, 0xA41C0001,
+ 0xC78, 0xA31D0001,
+ 0xC78, 0x671E0001,
+ 0xC78, 0x661F0001,
+ 0xC78, 0x65200001,
+ 0xC78, 0x64210001,
+ 0xC78, 0x63220001,
+ 0xC78, 0x4A230001,
+ 0xC78, 0x49240001,
+ 0xC78, 0x48250001,
+ 0xC78, 0x47260001,
+ 0xC78, 0x46270001,
+ 0xC78, 0x45280001,
+ 0xC78, 0x44290001,
+ 0xC78, 0x432A0001,
+ 0xC78, 0x422B0001,
+ 0xC78, 0x292C0001,
+ 0xC78, 0x282D0001,
+ 0xC78, 0x272E0001,
+ 0xC78, 0x262F0001,
+ 0xC78, 0x0A300001,
+ 0xC78, 0x09310001,
+ 0xC78, 0x08320001,
+ 0xC78, 0x07330001,
+ 0xC78, 0x06340001,
+ 0xC78, 0x05350001,
+ 0xC78, 0x04360001,
+ 0xC78, 0x03370001,
+ 0xC78, 0x02380001,
+ 0xC78, 0x01390001,
+ 0xC78, 0x013A0001,
+ 0xC78, 0x013B0001,
+ 0xC78, 0x013C0001,
+ 0xC78, 0x013D0001,
+ 0xC78, 0x013E0001,
+ 0xC78, 0x013F0001,
+ 0xC78, 0xFC400001,
+ 0xC78, 0xFB410001,
+ 0xC78, 0xFA420001,
+ 0xC78, 0xF9430001,
+ 0xC78, 0xF8440001,
+ 0xC78, 0xF7450001,
+ 0xC78, 0xF6460001,
+ 0xC78, 0xF5470001,
+ 0xC78, 0xF4480001,
+ 0xC78, 0xF3490001,
+ 0xC78, 0xF24A0001,
+ 0xC78, 0xF14B0001,
+ 0xC78, 0xF04C0001,
+ 0xC78, 0xEF4D0001,
+ 0xC78, 0xEE4E0001,
+ 0xC78, 0xED4F0001,
+ 0xC78, 0xEC500001,
+ 0xC78, 0xEB510001,
+ 0xC78, 0xEA520001,
+ 0xC78, 0xE9530001,
+ 0xC78, 0xE8540001,
+ 0xC78, 0xE7550001,
+ 0xC78, 0xE6560001,
+ 0xC78, 0xE5570001,
+ 0xC78, 0xE4580001,
+ 0xC78, 0xE3590001,
+ 0xC78, 0xA65A0001,
+ 0xC78, 0xA55B0001,
+ 0xC78, 0xA45C0001,
+ 0xC78, 0xA35D0001,
+ 0xC78, 0x675E0001,
+ 0xC78, 0x665F0001,
+ 0xC78, 0x65600001,
+ 0xC78, 0x64610001,
+ 0xC78, 0x63620001,
+ 0xC78, 0x62630001,
+ 0xC78, 0x61640001,
+ 0xC78, 0x48650001,
+ 0xC78, 0x47660001,
+ 0xC78, 0x46670001,
+ 0xC78, 0x45680001,
+ 0xC78, 0x44690001,
+ 0xC78, 0x436A0001,
+ 0xC78, 0x426B0001,
+ 0xC78, 0x286C0001,
+ 0xC78, 0x276D0001,
+ 0xC78, 0x266E0001,
+ 0xC78, 0x256F0001,
+ 0xC78, 0x24700001,
+ 0xC78, 0x09710001,
+ 0xC78, 0x08720001,
+ 0xC78, 0x07730001,
+ 0xC78, 0x06740001,
+ 0xC78, 0x05750001,
+ 0xC78, 0x04760001,
+ 0xC78, 0x03770001,
+ 0xC78, 0x02780001,
+ 0xC78, 0x01790001,
+ 0xC78, 0x017A0001,
+ 0xC78, 0x017B0001,
+ 0xC78, 0x017C0001,
+ 0xC78, 0x017D0001,
+ 0xC78, 0x017E0001,
+ 0xC78, 0x017F0001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+ 0x824, 0x00390204,
+
+};
+
+void ODM_ReadAndConfig_MP_8723B_AGC_TAB(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_AGC_TAB)/sizeof(u32);
+ u32 *Array = Array_MP_8723B_AGC_TAB;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_AGC_TAB\n")
+ );
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair doesn't care the condition. */
+ if (v1 < 0x40000000) {
+ odm_ConfigBB_AGC_8723B(pDM_Odm, v1, bMaskDWord, v2);
+ continue;
+ } else {
+ /* This line is the beginning of branch. */
+ bool bMatched = true;
+ u8 cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+
+ if (cCond == COND_ELSE) { /* ELSE, ENDIF */
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ } else if (!CheckPositive(pDM_Odm, v1, v2)) {
+ bMatched = false;
+ READ_NEXT_PAIR(v1, v2, i);
+ READ_NEXT_PAIR(v1, v2, i);
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ if (!CheckNegative(pDM_Odm, v1, v2))
+ bMatched = false;
+ else
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ if (bMatched == false) {
+ /* Condition isn't matched.
+ * Discard the following (offset, data) pairs.
+ */
+ while (v1 < 0x40000000 && i < ArrayLen-2)
+ READ_NEXT_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ while (v1 < 0x40000000 && i < ArrayLen-2) {
+ odm_ConfigBB_AGC_8723B(pDM_Odm, v1, bMaskDWord, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ /* Keeps reading until ENDIF. */
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ while (cCond != COND_ENDIF && i < ArrayLen-2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ }
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* PHY_REG.TXT
+******************************************************************************/
+
+static u32 Array_MP_8723B_PHY_REG[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00190204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xB0000C1C,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x21806490,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC44,
+ 0xC34, 0x469652AF,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00013149,
+ 0xC5C, 0x00250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00020E1A,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00020E1A,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00000740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B2556,
+ 0xE6C, 0x00C00096,
+ 0xE70, 0x00C00096,
+ 0xE74, 0x01000056,
+ 0xE78, 0x01000014,
+ 0xE7C, 0x01000056,
+ 0xE80, 0x01000014,
+ 0xE84, 0x00C00096,
+ 0xE88, 0x01000056,
+ 0xE8C, 0x00C00096,
+ 0xED0, 0x00C00096,
+ 0xED4, 0x00C00096,
+ 0xED8, 0x00C00096,
+ 0xEDC, 0x000000D6,
+ 0xEE0, 0x000000D6,
+ 0xEEC, 0x01C00016,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+ 0x820, 0x01000100,
+ 0x800, 0x83040000,
+
+};
+
+void ODM_ReadAndConfig_MP_8723B_PHY_REG(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG)/sizeof(u32);
+ u32 *Array = Array_MP_8723B_PHY_REG;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_PHY_REG\n")
+ );
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair doesn't care the condition. */
+ if (v1 < 0x40000000) {
+ odm_ConfigBB_PHY_8723B(pDM_Odm, v1, bMaskDWord, v2);
+ continue;
+ } else {
+ /* This line is the beginning of branch. */
+ bool bMatched = true;
+ u8 cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+
+ if (cCond == COND_ELSE) { /* ELSE, ENDIF */
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ } else if (!CheckPositive(pDM_Odm, v1, v2)) {
+ bMatched = false;
+ READ_NEXT_PAIR(v1, v2, i);
+ READ_NEXT_PAIR(v1, v2, i);
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ if (!CheckNegative(pDM_Odm, v1, v2))
+ bMatched = false;
+ else
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ if (bMatched == false) {
+ /* Condition isn't matched.
+ * Discard the following (offset, data) pairs.
+ */
+ while (v1 < 0x40000000 && i < ArrayLen-2)
+ READ_NEXT_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ while (v1 < 0x40000000 && i < ArrayLen-2) {
+ odm_ConfigBB_PHY_8723B(pDM_Odm, v1, bMaskDWord, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ /* Keeps reading until ENDIF. */
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ while (cCond != COND_ENDIF && i < ArrayLen-2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ }
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+static u32 Array_MP_8723B_PHY_REG_PG[] = {
+ 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
+ 0, 0, 0, 0x0000086c, 0xffffff00, 0x32343600,
+ 0, 0, 0, 0x00000e00, 0xffffffff, 0x40424444,
+ 0, 0, 0, 0x00000e04, 0xffffffff, 0x28323638,
+ 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
+ 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
+};
+
+void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG_PG)/sizeof(u32);
+ u32 *Array = Array_MP_8723B_PHY_REG_PG;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_PHY_REG_PG\n")
+ );
+
+ pDM_Odm->PhyRegPgVersion = 1;
+ pDM_Odm->PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
+
+ for (i = 0; i < ArrayLen; i += 6) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+ u32 v3 = Array[i+2];
+ u32 v4 = Array[i+3];
+ u32 v5 = Array[i+4];
+ u32 v6 = Array[i+5];
+
+ odm_ConfigBB_PHY_REG_PG_8723B(pDM_Odm, v1, v2, v3, v4, v5, v6);
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h
new file mode 100644
index 000000000000..41fe0ba16914
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.h
@@ -0,0 +1,48 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#ifndef __INC_MP_BB_HW_IMG_8723B_H
+#define __INC_MP_BB_HW_IMG_8723B_H
+
+
+/******************************************************************************
+* AGC_TAB.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_AGC_TAB(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+
+/******************************************************************************
+* PHY_REG.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_PHY_REG(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+u32 ODM_GetVersion_MP_8723B_PHY_REG_PG(void);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
new file mode 100644
index 000000000000..b868e26f20ac
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
@@ -0,0 +1,302 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+
+#include "odm_precomp.h"
+
+static bool CheckPositive(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ u8 _BoardType =
+ ((pDM_Odm->BoardType & BIT4) >> 4) << 0 | /* _GLNA */
+ ((pDM_Odm->BoardType & BIT3) >> 3) << 1 | /* _GPA */
+ ((pDM_Odm->BoardType & BIT7) >> 7) << 2 | /* _ALNA */
+ ((pDM_Odm->BoardType & BIT6) >> 6) << 3 | /* _APA */
+ ((pDM_Odm->BoardType & BIT2) >> 2) << 4; /* _BT */
+
+ u32 cond1 = Condition1, cond2 = Condition2;
+ u32 driver1 =
+ pDM_Odm->CutVersion << 24 |
+ pDM_Odm->SupportPlatform << 16 |
+ pDM_Odm->PackageType << 12 |
+ pDM_Odm->SupportInterface << 8 |
+ _BoardType;
+
+ u32 driver2 =
+ pDM_Odm->TypeGLNA << 0 |
+ pDM_Odm->TypeGPA << 8 |
+ pDM_Odm->TypeALNA << 16 |
+ pDM_Odm->TypeAPA << 24;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1,
+ cond2
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1,
+ driver2
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ " (Platform, Interface) = (0x%X, 0x%X)\n",
+ pDM_Odm->SupportPlatform,
+ pDM_Odm->SupportInterface
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ pDM_Odm->BoardType,
+ pDM_Odm->PackageType
+ )
+ );
+
+
+ /* Value Defined Check =============== */
+ /* QFN Type [15:12] and Cut Version [27:24] need to do value check */
+
+ if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != (driver1 & 0x0000F000)))
+ return false;
+ if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != (driver1 & 0x0F000000)))
+ return false;
+
+ /* Bit Defined Check ================ */
+ /* We don't care [31:28] and [23:20] */
+ /* */
+ cond1 &= 0x000F0FFF;
+ driver1 &= 0x000F0FFF;
+
+ if ((cond1 & driver1) == cond1) {
+ u32 bitMask = 0;
+ if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE */
+ return true;
+
+ if ((cond1 & BIT0) != 0) /* GLNA */
+ bitMask |= 0x000000FF;
+ if ((cond1 & BIT1) != 0) /* GPA */
+ bitMask |= 0x0000FF00;
+ if ((cond1 & BIT2) != 0) /* ALNA */
+ bitMask |= 0x00FF0000;
+ if ((cond1 & BIT3) != 0) /* APA */
+ bitMask |= 0xFF000000;
+
+ if ((cond2 & bitMask) == (driver2 & bitMask)) /* BoardType of each RF path is matched */
+ return true;
+ }
+ return false;
+}
+
+static bool CheckNegative(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ return true;
+}
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+static u32 Array_MP_8723B_MAC_REG[] = {
+ 0x02F, 0x00000030,
+ 0x035, 0x00000000,
+ 0x039, 0x00000008,
+ 0x04E, 0x000000E0,
+ 0x064, 0x00000000,
+ 0x067, 0x00000020,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x765, 0x00000018,
+ 0x76E, 0x00000004,
+
+};
+
+void ODM_ReadAndConfig_MP_8723B_MAC_REG(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_MAC_REG)/sizeof(u32);
+ u32 *Array = Array_MP_8723B_MAC_REG;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_MAC_REG\n")
+ );
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair doesn't care the condition. */
+ if (v1 < 0x40000000) {
+ odm_ConfigMAC_8723B(pDM_Odm, v1, (u8)v2);
+ continue;
+ } else {
+ /* This line is the beginning of branch. */
+ bool bMatched = true;
+ u8 cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+
+ if (cCond == COND_ELSE) { /* ELSE, ENDIF */
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ } else if (!CheckPositive(pDM_Odm, v1, v2)) {
+ bMatched = false;
+ READ_NEXT_PAIR(v1, v2, i);
+ READ_NEXT_PAIR(v1, v2, i);
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ if (!CheckNegative(pDM_Odm, v1, v2))
+ bMatched = false;
+ else
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ if (bMatched == false) {
+ /* Condition isn't matched. Discard the following (offset, data) pairs. */
+ while (v1 < 0x40000000 && i < ArrayLen-2)
+ READ_NEXT_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ while (v1 < 0x40000000 && i < ArrayLen-2) {
+ odm_ConfigMAC_8723B(pDM_Odm, v1, (u8)v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ /* Keeps reading until ENDIF. */
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ while (cCond != COND_ENDIF && i < ArrayLen-2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h
new file mode 100644
index 000000000000..ae5dd3ccf939
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#ifndef __INC_MP_MAC_HW_IMG_8723B_H
+#define __INC_MP_MAC_HW_IMG_8723B_H
+
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_MAC_REG(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
new file mode 100644
index 000000000000..84a0be7ba697
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
@@ -0,0 +1,799 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+
+#include "odm_precomp.h"
+
+static bool CheckPositive(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ u8 _BoardType =
+ ((pDM_Odm->BoardType & BIT4) >> 4) << 0 | /* _GLNA */
+ ((pDM_Odm->BoardType & BIT3) >> 3) << 1 | /* _GPA */
+ ((pDM_Odm->BoardType & BIT7) >> 7) << 2 | /* _ALNA */
+ ((pDM_Odm->BoardType & BIT6) >> 6) << 3 | /* _APA */
+ ((pDM_Odm->BoardType & BIT2) >> 2) << 4; /* _BT */
+
+ u32 cond1 = Condition1, cond2 = Condition2;
+ u32 driver1 =
+ pDM_Odm->CutVersion << 24 |
+ pDM_Odm->SupportPlatform << 16 |
+ pDM_Odm->PackageType << 12 |
+ pDM_Odm->SupportInterface << 8 |
+ _BoardType;
+
+ u32 driver2 =
+ pDM_Odm->TypeGLNA << 0 |
+ pDM_Odm->TypeGPA << 8 |
+ pDM_Odm->TypeALNA << 16 |
+ pDM_Odm->TypeAPA << 24;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1,
+ cond2
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1,
+ driver2
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ " (Platform, Interface) = (0x%X, 0x%X)\n",
+ pDM_Odm->SupportPlatform,
+ pDM_Odm->SupportInterface
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ pDM_Odm->BoardType,
+ pDM_Odm->PackageType
+ )
+ );
+
+ /* Value Defined Check =============== */
+ /* QFN Type [15:12] and Cut Version [27:24] need to do value check */
+
+ if (
+ ((cond1 & 0x0000F000) != 0) &&
+ ((cond1 & 0x0000F000) != (driver1 & 0x0000F000))
+ )
+ return false;
+
+ if (
+ ((cond1 & 0x0F000000) != 0) &&
+ ((cond1 & 0x0F000000) != (driver1 & 0x0F000000))
+ )
+ return false;
+
+ /* Bit Defined Check ================ */
+ /* We don't care [31:28] and [23:20] */
+ cond1 &= 0x000F0FFF;
+ driver1 &= 0x000F0FFF;
+
+ if ((cond1 & driver1) == cond1) {
+ u32 bitMask = 0;
+
+ if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE */
+ return true;
+
+ if ((cond1 & BIT0) != 0) /* GLNA */
+ bitMask |= 0x000000FF;
+ if ((cond1 & BIT1) != 0) /* GPA */
+ bitMask |= 0x0000FF00;
+ if ((cond1 & BIT2) != 0) /* ALNA */
+ bitMask |= 0x00FF0000;
+ if ((cond1 & BIT3) != 0) /* APA */
+ bitMask |= 0xFF000000;
+
+ /* BoardType of each RF path is matched */
+ if ((cond2 & bitMask) == (driver2 & bitMask))
+ return true;
+
+ return false;
+ }
+
+ return false;
+}
+
+static bool CheckNegative(
+ PDM_ODM_T pDM_Odm, const u32 Condition1, const u32 Condition2
+)
+{
+ return true;
+}
+
+/******************************************************************************
+* RadioA.TXT
+******************************************************************************/
+
+static u32 Array_MP_8723B_RadioA[] = {
+ 0x000, 0x00010000,
+ 0x0B0, 0x000DFFE0,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B1, 0x00000018,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B2, 0x00084C00,
+ 0x0B5, 0x0000D2CC,
+ 0x0B6, 0x000925AA,
+ 0x0B7, 0x00000010,
+ 0x0B8, 0x0000907F,
+ 0x05C, 0x00000002,
+ 0x07C, 0x00000002,
+ 0x07E, 0x00000005,
+ 0x08B, 0x0006FC00,
+ 0x0B0, 0x000FF9F0,
+ 0x01C, 0x000739D2,
+ 0x01E, 0x00000000,
+ 0x0DF, 0x00000780,
+ 0x050, 0x00067435,
+ 0x80002000, 0x00000000, 0x40000000, 0x00000000,
+ 0x051, 0x0006B10E,
+ 0x90003000, 0x00000000, 0x40000000, 0x00000000,
+ 0x051, 0x0006B10E,
+ 0x90004000, 0x00000000, 0x40000000, 0x00000000,
+ 0x051, 0x0006B10E,
+ 0xA0000000, 0x00000000,
+ 0x051, 0x0006B04E,
+ 0xB0000000, 0x00000000,
+ 0x052, 0x000007D2,
+ 0x053, 0x00000000,
+ 0x054, 0x00050400,
+ 0x055, 0x0004026E,
+ 0x0DD, 0x0000004C,
+ 0x070, 0x00067435,
+ 0x80002000, 0x00000000, 0x40000000, 0x00000000,
+ 0x071, 0x0006B10E,
+ 0x90003000, 0x00000000, 0x40000000, 0x00000000,
+ 0x071, 0x0006B10E,
+ 0x90004000, 0x00000000, 0x40000000, 0x00000000,
+ 0x071, 0x0006B10E,
+ 0xA0000000, 0x00000000,
+ 0x071, 0x0006B04E,
+ 0xB0000000, 0x00000000,
+ 0x072, 0x000007D2,
+ 0x073, 0x00000000,
+ 0x074, 0x00050400,
+ 0x075, 0x0004026E,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADD7,
+ 0x035, 0x00005C00,
+ 0x034, 0x00009DD4,
+ 0x035, 0x00005000,
+ 0x034, 0x00008DD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00007DCE,
+ 0x035, 0x00003800,
+ 0x034, 0x00006CD1,
+ 0x035, 0x00004400,
+ 0x034, 0x00005CCE,
+ 0x035, 0x00003800,
+ 0x034, 0x000048CE,
+ 0x035, 0x00004400,
+ 0x034, 0x000034CE,
+ 0x035, 0x00003800,
+ 0x034, 0x00002451,
+ 0x035, 0x00004400,
+ 0x034, 0x0000144E,
+ 0x035, 0x00003800,
+ 0x034, 0x00000051,
+ 0x035, 0x00004400,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x0ED, 0x00000010,
+ 0x044, 0x0000ADD7,
+ 0x044, 0x00009DD4,
+ 0x044, 0x00008DD1,
+ 0x044, 0x00007DCE,
+ 0x044, 0x00006CC1,
+ 0x044, 0x00005CCE,
+ 0x044, 0x000044D1,
+ 0x044, 0x000034CE,
+ 0x044, 0x00002451,
+ 0x044, 0x0000144E,
+ 0x044, 0x00000051,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000000,
+ 0x07F, 0x00020080,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x000380EF,
+ 0x03B, 0x000302FE,
+ 0x03B, 0x00028CE6,
+ 0x03B, 0x000200BC,
+ 0x03B, 0x000188A5,
+ 0x03B, 0x00010FBC,
+ 0x03B, 0x00008F71,
+ 0x03B, 0x00000900,
+ 0x0EF, 0x00000000,
+ 0x0ED, 0x00000001,
+ 0x040, 0x000380EF,
+ 0x040, 0x000302FE,
+ 0x040, 0x00028CE6,
+ 0x040, 0x000200BC,
+ 0x040, 0x000188A5,
+ 0x040, 0x00010FBC,
+ 0x040, 0x00008F71,
+ 0x040, 0x00000900,
+ 0x0ED, 0x00000000,
+ 0x082, 0x00080000,
+ 0x083, 0x00008000,
+ 0x084, 0x00048D80,
+ 0x085, 0x00068000,
+ 0x0A2, 0x00080000,
+ 0x0A3, 0x00008000,
+ 0x0A4, 0x00048D80,
+ 0x0A5, 0x00068000,
+ 0x0ED, 0x00000002,
+ 0x0EF, 0x00000002,
+ 0x056, 0x00000032,
+ 0x076, 0x00000032,
+ 0x001, 0x00000780,
+
+};
+
+void ODM_ReadAndConfig_MP_8723B_RadioA(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_RadioA)/sizeof(u32);
+ u32 *Array = Array_MP_8723B_RadioA;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_RadioA\n")
+ );
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair doesn't care the condition. */
+ if (v1 < 0x40000000) {
+ odm_ConfigRF_RadioA_8723B(pDM_Odm, v1, v2);
+ continue;
+ } else {
+ /* This line is the beginning of branch. */
+ bool bMatched = true;
+ u8 cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+
+ if (cCond == COND_ELSE) { /* ELSE, ENDIF */
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ } else if (!CheckPositive(pDM_Odm, v1, v2)) {
+ bMatched = false;
+ READ_NEXT_PAIR(v1, v2, i);
+ READ_NEXT_PAIR(v1, v2, i);
+ } else {
+ READ_NEXT_PAIR(v1, v2, i);
+ if (!CheckNegative(pDM_Odm, v1, v2))
+ bMatched = false;
+ else
+ bMatched = true;
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ if (bMatched == false) {
+ /* Condition isn't matched.
+ * Discard the following (offset, data) pairs.
+ */
+ while (v1 < 0x40000000 && i < ArrayLen-2)
+ READ_NEXT_PAIR(v1, v2, i);
+
+ i -= 2; /* prevent from for-loop += 2 */
+ } else {
+ /* Configure matched pairs and skip to end of if-else. */
+ while (v1 < 0x40000000 && i < ArrayLen-2) {
+ odm_ConfigRF_RadioA_8723B(pDM_Odm, v1, v2);
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ /* Keeps reading until ENDIF. */
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ while (cCond != COND_ENDIF && i < ArrayLen-2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ cCond = (u8)((v1 & (BIT29|BIT28)) >> 28);
+ }
+ }
+ }
+ }
+}
+
+/******************************************************************************
+* TxPowerTrack_SDIO.TXT
+******************************************************************************/
+
+static u8 gDeltaSwingTableIdx_MP_5GB_N_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
+ {
+ 0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9,
+ 9, 10, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+ },
+};
+static u8 gDeltaSwingTableIdx_MP_5GB_P_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
+ 12, 13, 14, 15, 15, 16, 16, 17, 17, 18, 19, 20, 20, 20
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
+ 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
+ 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
+ },
+};
+static u8 gDeltaSwingTableIdx_MP_5GA_N_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
+ {
+ 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
+ 11, 11, 12, 13, 13, 14, 15, 16, 16, 16, 16, 16, 16, 16
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11,
+ 11, 12, 13, 14, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16
+ },
+};
+static u8 gDeltaSwingTableIdx_MP_5GA_P_TxPowerTrack_SDIO_8723B[][DELTA_SWINGIDX_SIZE] = {
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
+ 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
+ },
+ {
+ 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12,
+ 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 20, 21, 21, 21
+ },
+};
+static u8 gDeltaSwingTableIdx_MP_2GB_N_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GB_P_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GA_N_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6,
+ 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GA_P_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GCCKB_N_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GCCKB_P_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GCCKA_N_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15
+};
+static u8 gDeltaSwingTableIdx_MP_2GCCKA_P_TxPowerTrack_SDIO_8723B[] = {
+ 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15
+};
+
+void ODM_ReadAndConfig_MP_8723B_TxPowerTrack_SDIO(PDM_ODM_T pDM_Odm)
+{
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_MP_8723B\n")
+ );
+
+
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P,
+ gDeltaSwingTableIdx_MP_2GA_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N,
+ gDeltaSwingTableIdx_MP_2GA_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P,
+ gDeltaSwingTableIdx_MP_2GB_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N,
+ gDeltaSwingTableIdx_MP_2GB_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P,
+ gDeltaSwingTableIdx_MP_2GCCKA_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N,
+ gDeltaSwingTableIdx_MP_2GCCKA_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P,
+ gDeltaSwingTableIdx_MP_2GCCKB_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N,
+ gDeltaSwingTableIdx_MP_2GCCKB_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE
+ );
+
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P,
+ gDeltaSwingTableIdx_MP_5GA_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE*3
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N,
+ gDeltaSwingTableIdx_MP_5GA_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE*3
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P,
+ gDeltaSwingTableIdx_MP_5GB_P_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE*3
+ );
+ memcpy(
+ pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N,
+ gDeltaSwingTableIdx_MP_5GB_N_TxPowerTrack_SDIO_8723B,
+ DELTA_SWINGIDX_SIZE*3
+ );
+}
+
+/******************************************************************************
+* TXPWR_LMT.TXT
+******************************************************************************/
+
+static u8 *Array_MP_8723B_TXPWR_LMT[] = {
+ "FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "CCK", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "CCK", "1T", "14", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "01", "28",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "02", "28",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "10", "28",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "11", "28",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "1T", "01", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "02", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "03", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "20M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "20M", "HT", "2T", "01", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "01", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "01", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "02", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "02", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "02", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "03", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "03", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "03", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "04", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "04", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "05", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "05", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "06", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "06", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "07", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "07", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "08", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "08", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "09", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "09", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "20M", "HT", "2T", "10", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "10", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "20M", "HT", "2T", "11", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "11", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "20M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "20M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "20M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "20M", "HT", "2T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "1T", "03", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "03", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "03", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "04", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "04", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "04", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "05", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "05", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "06", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "06", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "1T", "07", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "07", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "08", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "08", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "08", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "09", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "09", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "09", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "10", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "10", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "10", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "11", "26",
+ "ETSI", "2.4G", "40M", "HT", "1T", "11", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "11", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "1T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "1T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "1T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "1T", "14", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "01", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "01", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "01", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "02", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "02", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "02", "63",
+ "FCC", "2.4G", "40M", "HT", "2T", "03", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "03", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "03", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "04", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "04", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "04", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "05", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "05", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "05", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "06", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "06", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "06", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "07", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "07", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "07", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "08", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "08", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "08", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "09", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "09", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "09", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "10", "32",
+ "ETSI", "2.4G", "40M", "HT", "2T", "10", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "10", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "11", "30",
+ "ETSI", "2.4G", "40M", "HT", "2T", "11", "30",
+ "MKK", "2.4G", "40M", "HT", "2T", "11", "30",
+ "FCC", "2.4G", "40M", "HT", "2T", "12", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "12", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "12", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "13", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "13", "32",
+ "MKK", "2.4G", "40M", "HT", "2T", "13", "32",
+ "FCC", "2.4G", "40M", "HT", "2T", "14", "63",
+ "ETSI", "2.4G", "40M", "HT", "2T", "14", "63",
+ "MKK", "2.4G", "40M", "HT", "2T", "14", "63"
+};
+
+void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(PDM_ODM_T pDM_Odm)
+{
+ u32 i = 0;
+ u32 ArrayLen = sizeof(Array_MP_8723B_TXPWR_LMT)/sizeof(u8 *);
+ u8 **Array = Array_MP_8723B_TXPWR_LMT;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ ("===> ODM_ReadAndConfig_MP_8723B_TXPWR_LMT\n")
+ );
+
+ for (i = 0; i < ArrayLen; i += 7) {
+ u8 *regulation = Array[i];
+ u8 *band = Array[i+1];
+ u8 *bandwidth = Array[i+2];
+ u8 *rate = Array[i+3];
+ u8 *rfPath = Array[i+4];
+ u8 *chnl = Array[i+5];
+ u8 *val = Array[i+6];
+
+ odm_ConfigBB_TXPWR_LMT_8723B(
+ pDM_Odm,
+ regulation,
+ band,
+ bandwidth,
+ rate,
+ rfPath,
+ chnl,
+ val
+ );
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h
new file mode 100644
index 000000000000..98aa2ba97fa9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+******************************************************************************/
+
+#ifndef __INC_MP_RF_HW_IMG_8723B_H
+#define __INC_MP_RF_HW_IMG_8723B_H
+
+
+/******************************************************************************
+* RadioA.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_RadioA(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+
+/******************************************************************************
+* TxPowerTrack_SDIO.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_TxPowerTrack_SDIO(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+u32 ODM_GetVersion_MP_8723B_TxPowerTrack_SDIO(void);
+
+/******************************************************************************
+* TXPWR_LMT.TXT
+******************************************************************************/
+
+void
+ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(/* TC: Test Chip, MP: MP Chip */
+ PDM_ODM_T pDM_Odm
+);
+u32 ODM_GetVersion_MP_8723B_TXPWR_LMT(void);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.c b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
new file mode 100644
index 000000000000..9adcc3098463
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.c
@@ -0,0 +1,662 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+/* include "Mp_Precomp.h" */
+#include "odm_precomp.h"
+
+
+#define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \
+ do {\
+ for (_offset = 0; _offset < _size; _offset++) {\
+ if (_deltaThermal < thermalThreshold[_direction][_offset]) {\
+ if (_offset != 0)\
+ _offset--;\
+ break;\
+ } \
+ } \
+ if (_offset >= _size)\
+ _offset = _size-1;\
+ } while (0)
+
+
+void ConfigureTxpowerTrack(PDM_ODM_T pDM_Odm, PTXPWRTRACK_CFG pConfig)
+{
+ ConfigureTxpowerTrack_8723B(pConfig);
+}
+
+/* */
+/* <20121113, Kordan> This function should be called when TxAGC changed. */
+/* Otherwise the previous compensation is gone, because we record the */
+/* delta of temperature between two TxPowerTracking watch dogs. */
+/* */
+/* NOTE: If Tx BB swing or Tx scaling is varified during run-time, still */
+/* need to call this function. */
+/* */
+void ODM_ClearTxPowerTrackingState(PDM_ODM_T pDM_Odm)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(pDM_Odm->Adapter);
+ u8 p = 0;
+
+ pDM_Odm->BbSwingIdxCckBase = pDM_Odm->DefaultCckIndex;
+ pDM_Odm->BbSwingIdxCck = pDM_Odm->DefaultCckIndex;
+ pDM_Odm->RFCalibrateInfo.CCK_index = 0;
+
+ for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
+ pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->DefaultOfdmIndex;
+ pDM_Odm->BbSwingIdxOfdm[p] = pDM_Odm->DefaultOfdmIndex;
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p] = pDM_Odm->DefaultOfdmIndex;
+
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] = 0;
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p] = 0;
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
+
+ /* Initial Mix mode power tracking */
+ pDM_Odm->Absolute_OFDMSwingIdx[p] = 0;
+ pDM_Odm->Remnant_OFDMSwingIdx[p] = 0;
+ }
+
+ /* Initial at Modify Tx Scaling Mode */
+ pDM_Odm->Modify_TxAGC_Flag_PathA = false;
+ /* Initial at Modify Tx Scaling Mode */
+ pDM_Odm->Modify_TxAGC_Flag_PathB = false;
+ pDM_Odm->Remnant_CCKSwingIdx = 0;
+ pDM_Odm->RFCalibrateInfo.ThermalValue = pHalData->EEPROMThermalMeter;
+ pDM_Odm->RFCalibrateInfo.ThermalValue_IQK = pHalData->EEPROMThermalMeter;
+ pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = pHalData->EEPROMThermalMeter;
+}
+
+void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter)
+{
+
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, p = 0, i = 0;
+ u8 ThermalValue_AVG_count = 0;
+ u32 ThermalValue_AVG = 0;
+
+ u8 OFDM_min_index = 0; /* OFDM BB Swing should be less than +3.0dB, which is required by Arthur */
+ u8 Indexforchannel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel) */
+
+ TXPWRTRACK_CFG c;
+
+
+ /* 4 1. The following TWO tables decide the final index of OFDM/CCK swing table. */
+ u8 *deltaSwingTableIdx_TUP_A;
+ u8 *deltaSwingTableIdx_TDOWN_A;
+ u8 *deltaSwingTableIdx_TUP_B;
+ u8 *deltaSwingTableIdx_TDOWN_B;
+
+ /* 4 2. Initilization (7 steps in total) */
+
+ ConfigureTxpowerTrack(pDM_Odm, &c);
+
+ (*c.GetDeltaSwingTable)(
+ pDM_Odm,
+ (u8 **)&deltaSwingTableIdx_TUP_A,
+ (u8 **)&deltaSwingTableIdx_TDOWN_A,
+ (u8 **)&deltaSwingTableIdx_TUP_B,
+ (u8 **)&deltaSwingTableIdx_TDOWN_B
+ );
+
+ /* cosa add for debug */
+ pDM_Odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
+ pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "===>ODM_TXPowerTrackingCallback_ThermalMeter,\npDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]: %d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ pDM_Odm->BbSwingIdxCckBase,
+ pDM_Odm->BbSwingIdxOfdmBase[ODM_RF_PATH_A],
+ pDM_Odm->DefaultOfdmIndex
+ )
+ );
+
+ ThermalValue = (u8)PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, c.ThermalRegAddr, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+ if (
+ !pDM_Odm->RFCalibrateInfo.TxPowerTrackControl ||
+ pHalData->EEPROMThermalMeter == 0 ||
+ pHalData->EEPROMThermalMeter == 0xFF
+ )
+ return;
+
+ /* 4 3. Initialize ThermalValues of RFCalibrateInfo */
+
+ if (pDM_Odm->RFCalibrateInfo.bReloadtxpowerindex)
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("reload ofdm index for band switch\n")
+ );
+
+ /* 4 4. Calculate average thermal meter */
+
+ pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue;
+ pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index++;
+ if (pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index == c.AverageThermalNum) /* Average times = c.AverageThermalNum */
+ pDM_Odm->RFCalibrateInfo.ThermalValue_AVG_index = 0;
+
+ for (i = 0; i < c.AverageThermalNum; i++) {
+ if (pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[i]) {
+ ThermalValue_AVG += pDM_Odm->RFCalibrateInfo.ThermalValue_AVG[i];
+ ThermalValue_AVG_count++;
+ }
+ }
+
+ /* Calculate Average ThermalValue after average enough times */
+ if (ThermalValue_AVG_count) {
+ ThermalValue = (u8)(ThermalValue_AVG / ThermalValue_AVG_count);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ ThermalValue,
+ pHalData->EEPROMThermalMeter
+ )
+ );
+ }
+
+ /* 4 5. Calculate delta, delta_LCK, delta_IQK. */
+ /* delta" here is used to determine whether thermal value changes or not. */
+ delta =
+ (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue) ?
+ (ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue) :
+ (pDM_Odm->RFCalibrateInfo.ThermalValue - ThermalValue);
+ delta_LCK =
+ (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_LCK) ?
+ (ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_LCK) :
+ (pDM_Odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue);
+ delta_IQK =
+ (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue_IQK) ?
+ (ThermalValue - pDM_Odm->RFCalibrateInfo.ThermalValue_IQK) :
+ (pDM_Odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta,
+ delta_LCK,
+ delta_IQK
+ )
+ );
+
+ /* 4 6. If necessary, do LCK. */
+ /* Delta temperature is equal to or larger than 20 centigrade. */
+ if (delta_LCK >= c.Threshold_IQK) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_LCK,
+ c.Threshold_IQK
+ )
+ );
+ pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
+ if (c.PHY_LCCalibrate)
+ (*c.PHY_LCCalibrate)(pDM_Odm);
+ }
+
+ /* 3 7. If necessary, move the index of swing table to adjust Tx power. */
+ if (delta > 0 && pDM_Odm->RFCalibrateInfo.TxPowerTrackControl) {
+ /* delta" here is used to record the absolute value of differrence. */
+ delta =
+ ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+
+ if (delta >= TXPWR_TRACK_TABLE_SIZE)
+ delta = TXPWR_TRACK_TABLE_SIZE - 1;
+
+ /* 4 7.1 The Final Power Index = BaseIndex + PowerIndexOffset */
+ if (ThermalValue > pHalData->EEPROMThermalMeter) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "deltaSwingTableIdx_TUP_A[%d] = %d\n",
+ delta,
+ deltaSwingTableIdx_TUP_A[delta]
+ )
+ );
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_A] =
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A];
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A] =
+ deltaSwingTableIdx_TUP_A[delta];
+
+ /* Record delta swing for mix mode power tracking */
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] =
+ deltaSwingTableIdx_TUP_A[delta];
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A]
+ )
+ );
+
+ if (c.RfPathCount > 1) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "deltaSwingTableIdx_TUP_B[%d] = %d\n",
+ delta,
+ deltaSwingTableIdx_TUP_B[delta]
+ )
+ );
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_B] =
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B];
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B] =
+ deltaSwingTableIdx_TUP_B[delta];
+
+ /* Record delta swing for mix mode power tracking */
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] =
+ deltaSwingTableIdx_TUP_B[delta];
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B]
+ )
+ );
+ }
+
+ } else {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "deltaSwingTableIdx_TDOWN_A[%d] = %d\n",
+ delta,
+ deltaSwingTableIdx_TDOWN_A[delta]
+ )
+ );
+
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_A] =
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A];
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_A] =
+ -1 * deltaSwingTableIdx_TDOWN_A[delta];
+
+ /* Record delta swing for mix mode power tracking */
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] =
+ -1 * deltaSwingTableIdx_TDOWN_A[delta];
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A]
+ )
+ );
+
+ if (c.RfPathCount > 1) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+ delta,
+ deltaSwingTableIdx_TDOWN_B[delta]
+ )
+ );
+
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[ODM_RF_PATH_B] =
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B];
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[ODM_RF_PATH_B] =
+ -1 * deltaSwingTableIdx_TDOWN_B[delta];
+
+ /* Record delta swing for mix mode power tracking */
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] =
+ -1 * deltaSwingTableIdx_TDOWN_B[delta];
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B]
+ )
+ );
+ }
+ }
+
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "\n\n ================================ [Path-%c] Calculating PowerIndexOffset ================================\n",
+ (p == ODM_RF_PATH_A ? 'A' : 'B')
+ )
+ );
+
+ if (
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] ==
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
+ ) /* If Thermal value changes but lookup table value still the same */
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
+ else
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] - pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]; /* Power Index Diff between 2 times Power Tracking */
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (
+ p == ODM_RF_PATH_A ? 'A' : 'B'),
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p],
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p],
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p]
+ )
+ );
+
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p] =
+ pDM_Odm->BbSwingIdxOfdmBase[p] +
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p];
+
+ pDM_Odm->RFCalibrateInfo.CCK_index =
+ pDM_Odm->BbSwingIdxCckBase +
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p];
+
+ pDM_Odm->BbSwingIdxCck =
+ pDM_Odm->RFCalibrateInfo.CCK_index;
+
+ pDM_Odm->BbSwingIdxOfdm[p] =
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p];
+
+ /* *************Print BB Swing Base and Index Offset************* */
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ pDM_Odm->BbSwingIdxCck,
+ pDM_Odm->BbSwingIdxCckBase,
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ pDM_Odm->BbSwingIdxOfdm[p],
+ (p == ODM_RF_PATH_A ? 'A' : 'B'),
+ pDM_Odm->BbSwingIdxOfdmBase[p],
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p]
+ )
+ );
+
+ /* 4 7.1 Handle boundary conditions of index. */
+ if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] > c.SwingTableSize_OFDM-1)
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p] = c.SwingTableSize_OFDM-1;
+ else if (pDM_Odm->RFCalibrateInfo.OFDM_index[p] < OFDM_min_index)
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p] = OFDM_min_index;
+ }
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ ("\n\n ========================================================================================================\n")
+ );
+ if (pDM_Odm->RFCalibrateInfo.CCK_index > c.SwingTableSize_CCK-1)
+ pDM_Odm->RFCalibrateInfo.CCK_index = c.SwingTableSize_CCK-1;
+ /* else if (pDM_Odm->RFCalibrateInfo.CCK_index < 0) */
+ /* pDM_Odm->RFCalibrateInfo.CCK_index = 0; */
+ } else {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl,
+ ThermalValue,
+ pDM_Odm->RFCalibrateInfo.ThermalValue
+ )
+ );
+
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
+ }
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ pDM_Odm->RFCalibrateInfo.CCK_index,
+ pDM_Odm->BbSwingIdxCckBase
+ )
+ );
+
+ /* Print Swing base & current */
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p],
+ (p == ODM_RF_PATH_A ? 'A' : 'B'),
+ pDM_Odm->BbSwingIdxOfdmBase[p]
+ )
+ );
+ }
+
+ if (
+ (pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_A] != 0 ||
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_B] != 0) &&
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl
+ ) {
+ /* 4 7.2 Configure the Swing Table to adjust Tx Power. */
+
+ pDM_Odm->RFCalibrateInfo.bTxPowerChanged = true; /* Always true after Tx Power is adjusted by power tracking. */
+ /* */
+ /* 2012/04/23 MH According to Luke's suggestion, we can not write BB digital */
+ /* to increase TX power. Otherwise, EVM will be bad. */
+ /* */
+ /* 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. */
+ if (ThermalValue > pDM_Odm->RFCalibrateInfo.ThermalValue) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_A],
+ delta,
+ ThermalValue,
+ pHalData->EEPROMThermalMeter,
+ pDM_Odm->RFCalibrateInfo.ThermalValue
+ )
+ );
+
+ if (c.RfPathCount > 1)
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_B],
+ delta,
+ ThermalValue,
+ pHalData->EEPROMThermalMeter,
+ pDM_Odm->RFCalibrateInfo.ThermalValue
+ )
+ );
+
+ } else if (ThermalValue < pDM_Odm->RFCalibrateInfo.ThermalValue) { /* Low temperature */
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_A],
+ delta,
+ ThermalValue,
+ pHalData->EEPROMThermalMeter,
+ pDM_Odm->RFCalibrateInfo.ThermalValue
+ )
+ );
+
+ if (c.RfPathCount > 1)
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[ODM_RF_PATH_B],
+ delta,
+ ThermalValue,
+ pHalData->EEPROMThermalMeter,
+ pDM_Odm->RFCalibrateInfo.ThermalValue
+ )
+ );
+
+ }
+
+ if (ThermalValue > pHalData->EEPROMThermalMeter) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature(%d) higher than PG value(%d)\n",
+ ThermalValue,
+ pHalData->EEPROMThermalMeter
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n")
+ );
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
+ (*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, 0);
+ } else {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ (
+ "Temperature(%d) lower than PG value(%d)\n",
+ ThermalValue,
+ pHalData->EEPROMThermalMeter
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ ("**********Enter POWER Tracking MIX_MODE**********\n")
+ );
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
+ (*c.ODM_TxPwrTrackSetPwr)(pDM_Odm, MIX_MODE, p, Indexforchannel);
+ }
+
+ /* Record last time Power Tracking result as base. */
+ pDM_Odm->BbSwingIdxCckBase = pDM_Odm->BbSwingIdxCck;
+ for (p = ODM_RF_PATH_A; p < c.RfPathCount; p++)
+ pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->BbSwingIdxOfdm[p];
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ (
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue = %d\n",
+ pDM_Odm->RFCalibrateInfo.ThermalValue,
+ ThermalValue
+ )
+ );
+
+ /* Record last Power Tracking Thermal Value */
+ pDM_Odm->RFCalibrateInfo.ThermalValue = ThermalValue;
+ }
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_TX_PWR_TRACK,
+ ODM_DBG_LOUD,
+ ("<===ODM_TXPowerTrackingCallback_ThermalMeter\n")
+ );
+
+ pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
+}
+
+
+
+
+/* 3 ============================================================ */
+/* 3 IQ Calibration */
+/* 3 ============================================================ */
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
+{
+ u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
+ 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
+ 161, 163, 165
+ };
+ u8 place = chnl;
+
+
+ if (chnl > 14) {
+ for (place = 14; place < sizeof(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place-13;
+ }
+ }
+ return 0;
+
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf.h b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
new file mode 100644
index 000000000000..bd7462d4ed91
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+ #ifndef __HAL_PHY_RF_H__
+ #define __HAL_PHY_RF_H__
+
+typedef enum _SPUR_CAL_METHOD {
+ PLL_RESET,
+ AFE_PHASE_SEL
+} SPUR_CAL_METHOD;
+
+typedef enum _PWRTRACK_CONTROL_METHOD {
+ BBSWING,
+ TXAGC,
+ MIX_MODE
+} PWRTRACK_METHOD;
+
+typedef void (*FuncSetPwr)(PDM_ODM_T, PWRTRACK_METHOD, u8, u8);
+typedef void (*FuncIQK)(PDM_ODM_T, u8, u8, u8);
+typedef void (*FuncLCK)(PDM_ODM_T);
+typedef void (*FuncSwing)(PDM_ODM_T, u8 **, u8 **, u8 **, u8 **);
+
+typedef struct _TXPWRTRACK_CFG {
+ u8 SwingTableSize_CCK;
+ u8 SwingTableSize_OFDM;
+ u8 Threshold_IQK;
+ u8 AverageThermalNum;
+ u8 RfPathCount;
+ u32 ThermalRegAddr;
+ FuncSetPwr ODM_TxPwrTrackSetPwr;
+ FuncIQK DoIQK;
+ FuncLCK PHY_LCCalibrate;
+ FuncSwing GetDeltaSwingTable;
+} TXPWRTRACK_CFG, *PTXPWRTRACK_CFG;
+
+void ConfigureTxpowerTrack(PDM_ODM_T pDM_Odm, PTXPWRTRACK_CFG pConfig);
+
+
+void ODM_ClearTxPowerTrackingState(PDM_ODM_T pDM_Odm);
+
+void ODM_TXPowerTrackingCallback_ThermalMeter(struct adapter *Adapter);
+
+
+
+#define ODM_TARGET_CHNL_NUM_2G_5G 59
+
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
+
+
+#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
new file mode 100644
index 000000000000..c16e147d8adc
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -0,0 +1,2091 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include "odm_precomp.h"
+
+
+
+/*---------------------------Define Local Constant---------------------------*/
+/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
+#define ODM_TXPWRTRACK_MAX_IDX8723B 6
+
+/* MACRO definition for pRFCalibrateInfo->TxIQC_8723B[0] */
+#define PATH_S0 1 /* RF_PATH_B */
+#define IDX_0xC94 0
+#define IDX_0xC80 1
+#define IDX_0xC4C 2
+#define IDX_0xC14 0
+#define IDX_0xCA0 1
+#define KEY 0
+#define VAL 1
+
+/* MACRO definition for pRFCalibrateInfo->TxIQC_8723B[1] */
+#define PATH_S1 0 /* RF_PATH_A */
+#define IDX_0xC9C 0
+#define IDX_0xC88 1
+#define IDX_0xC4C 2
+#define IDX_0xC1C 0
+#define IDX_0xC78 1
+
+
+/*---------------------------Define Local Constant---------------------------*/
+
+/* In the case that we fail to read TxPowerTrack.txt, we use the table for
+ * 88E as the default table.
+ */
+static u8 DeltaSwingTableIdx_2GA_N_8188E[] = {
+ 0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+static u8 DeltaSwingTableIdx_2GA_P_8188E[] = {
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4,
+ 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9
+};
+
+/* 3 ============================================================ */
+/* 3 Tx Power Tracking */
+/* 3 ============================================================ */
+
+
+static void setIqkMatrix_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 OFDM_index,
+ u8 RFPath,
+ s32 IqkResult_X,
+ s32 IqkResult_Y
+)
+{
+ s32 ele_A = 0, ele_D, ele_C = 0, value32;
+
+ if (OFDM_index >= OFDM_TABLE_SIZE)
+ OFDM_index = OFDM_TABLE_SIZE-1;
+
+ ele_D = (OFDMSwingTable_New[OFDM_index] & 0xFFC00000)>>22;
+
+ /* new element A = element D x X */
+ if ((IqkResult_X != 0) && (*(pDM_Odm->pBandType) == ODM_BAND_2_4G)) {
+ if ((IqkResult_X & 0x00000200) != 0) /* consider minus */
+ IqkResult_X = IqkResult_X | 0xFFFFFC00;
+ ele_A = ((IqkResult_X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((IqkResult_Y & 0x00000200) != 0)
+ IqkResult_Y = IqkResult_Y | 0xFFFFFC00;
+ ele_C = ((IqkResult_Y * ele_D)>>8)&0x000003FF;
+
+ /* if (RFPath == ODM_RF_PATH_A) */
+ switch (RFPath) {
+ case ODM_RF_PATH_A:
+ /* wirte new elements A, C, D to regC80 and regC94, element B is always 0 */
+ value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((IqkResult_X * ele_D)>>7)&0x01;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT24, value32);
+ break;
+ case ODM_RF_PATH_B:
+ /* wirte new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ value32 = (ele_D<<22)|((ele_C&0x3F)<<16)|ele_A;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((IqkResult_X * ele_D)>>7)&0x01;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT28, value32);
+
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (RFPath) {
+ case ODM_RF_PATH_A:
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable_New[OFDM_index]);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT24, 0x00);
+ break;
+
+ case ODM_RF_PATH_B:
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable_New[OFDM_index]);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT28, 0x00);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxPwrTracking path B: X = 0x%x, Y = 0x%x ele_A = 0x%x ele_C = 0x%x ele_D = 0x%x 0xeb4 = 0x%x 0xebc = 0x%x\n",
+ (u32)IqkResult_X, (u32)IqkResult_Y, (u32)ele_A, (u32)ele_C, (u32)ele_D, (u32)IqkResult_X, (u32)IqkResult_Y));
+}
+
+
+static void setCCKFilterCoefficient(PDM_ODM_T pDM_Odm, u8 CCKSwingIndex)
+{
+ if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ rtw_write8(pDM_Odm->Adapter, 0xa22, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][0]);
+ rtw_write8(pDM_Odm->Adapter, 0xa23, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][1]);
+ rtw_write8(pDM_Odm->Adapter, 0xa24, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][2]);
+ rtw_write8(pDM_Odm->Adapter, 0xa25, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][3]);
+ rtw_write8(pDM_Odm->Adapter, 0xa26, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][4]);
+ rtw_write8(pDM_Odm->Adapter, 0xa27, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][5]);
+ rtw_write8(pDM_Odm->Adapter, 0xa28, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][6]);
+ rtw_write8(pDM_Odm->Adapter, 0xa29, CCKSwingTable_Ch1_Ch13_New[CCKSwingIndex][7]);
+ } else {
+ rtw_write8(pDM_Odm->Adapter, 0xa22, CCKSwingTable_Ch14_New[CCKSwingIndex][0]);
+ rtw_write8(pDM_Odm->Adapter, 0xa23, CCKSwingTable_Ch14_New[CCKSwingIndex][1]);
+ rtw_write8(pDM_Odm->Adapter, 0xa24, CCKSwingTable_Ch14_New[CCKSwingIndex][2]);
+ rtw_write8(pDM_Odm->Adapter, 0xa25, CCKSwingTable_Ch14_New[CCKSwingIndex][3]);
+ rtw_write8(pDM_Odm->Adapter, 0xa26, CCKSwingTable_Ch14_New[CCKSwingIndex][4]);
+ rtw_write8(pDM_Odm->Adapter, 0xa27, CCKSwingTable_Ch14_New[CCKSwingIndex][5]);
+ rtw_write8(pDM_Odm->Adapter, 0xa28, CCKSwingTable_Ch14_New[CCKSwingIndex][6]);
+ rtw_write8(pDM_Odm->Adapter, 0xa29, CCKSwingTable_Ch14_New[CCKSwingIndex][7]);
+ }
+}
+
+void DoIQK_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 DeltaThermalIndex,
+ u8 ThermalValue,
+ u8 Threshold
+)
+{
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power accordign to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ *When Who Remark
+ *04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void ODM_TxPwrTrackSetPwr_8723B(
+ PDM_ODM_T pDM_Odm,
+ PWRTRACK_METHOD Method,
+ u8 RFPath,
+ u8 ChannelMappedIndex
+)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 PwrTrackingLimit_OFDM = 34; /* 0dB */
+ u8 PwrTrackingLimit_CCK = 28; /* 2dB */
+ u8 TxRate = 0xFF;
+ u8 Final_OFDM_Swing_Index = 0;
+ u8 Final_CCK_Swing_Index = 0;
+
+ {
+ u16 rate = *(pDM_Odm->pForcedDataRate);
+
+ if (!rate) { /* auto rate */
+ if (pDM_Odm->TxRate != 0xFF)
+ TxRate = HwRateToMRate(pDM_Odm->TxRate);
+ } else /* force rate */
+ TxRate = (u8)rate;
+
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("===>ODM_TxPwrTrackSetPwr8723B\n"));
+
+ if (TxRate != 0xFF) {
+ /* 2 CCK */
+ if ((TxRate >= MGN_1M) && (TxRate <= MGN_11M))
+ PwrTrackingLimit_CCK = 28; /* 2dB */
+ /* 2 OFDM */
+ else if ((TxRate >= MGN_6M) && (TxRate <= MGN_48M))
+ PwrTrackingLimit_OFDM = 36; /* 3dB */
+ else if (TxRate == MGN_54M)
+ PwrTrackingLimit_OFDM = 34; /* 2dB */
+
+ /* 2 HT */
+ else if ((TxRate >= MGN_MCS0) && (TxRate <= MGN_MCS2)) /* QPSK/BPSK */
+ PwrTrackingLimit_OFDM = 38; /* 4dB */
+ else if ((TxRate >= MGN_MCS3) && (TxRate <= MGN_MCS4)) /* 16QAM */
+ PwrTrackingLimit_OFDM = 36; /* 3dB */
+ else if ((TxRate >= MGN_MCS5) && (TxRate <= MGN_MCS7)) /* 64QAM */
+ PwrTrackingLimit_OFDM = 34; /* 2dB */
+
+ else
+ PwrTrackingLimit_OFDM = pDM_Odm->DefaultOfdmIndex; /* Default OFDM index = 30 */
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("TxRate = 0x%x, PwrTrackingLimit =%d\n", TxRate, PwrTrackingLimit_OFDM));
+
+ if (Method == TXAGC) {
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("odm_TxPwrTrackSetPwr8723B CH =%d\n", *(pDM_Odm->pChannel)));
+
+ pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
+
+ pDM_Odm->Modify_TxAGC_Flag_PathA = true;
+ pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = true;
+
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
+ } else if (Method == BBSWING) {
+ Final_OFDM_Swing_Index = pDM_Odm->DefaultOfdmIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
+ Final_CCK_Swing_Index = pDM_Odm->DefaultCckIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
+
+ /* Adjust BB swing by OFDM IQ matrix */
+ if (Final_OFDM_Swing_Index >= PwrTrackingLimit_OFDM)
+ Final_OFDM_Swing_Index = PwrTrackingLimit_OFDM;
+ else if (Final_OFDM_Swing_Index <= 0)
+ Final_OFDM_Swing_Index = 0;
+
+ if (Final_CCK_Swing_Index >= CCK_TABLE_SIZE)
+ Final_CCK_Swing_Index = CCK_TABLE_SIZE-1;
+ else if (pDM_Odm->BbSwingIdxCck <= 0)
+ Final_CCK_Swing_Index = 0;
+
+ setIqkMatrix_8723B(pDM_Odm, Final_OFDM_Swing_Index, RFPath,
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0],
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]);
+
+ setCCKFilterCoefficient(pDM_Odm, Final_CCK_Swing_Index);
+
+ } else if (Method == MIX_MODE) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("pDM_Odm->DefaultOfdmIndex =%d, pDM_Odm->DefaultCCKIndex =%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ pDM_Odm->DefaultOfdmIndex, pDM_Odm->DefaultCckIndex, pDM_Odm->Absolute_OFDMSwingIdx[RFPath], RFPath));
+
+ Final_OFDM_Swing_Index = pDM_Odm->DefaultOfdmIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
+ Final_CCK_Swing_Index = pDM_Odm->DefaultCckIndex + pDM_Odm->Absolute_OFDMSwingIdx[RFPath];
+
+ if (Final_OFDM_Swing_Index > PwrTrackingLimit_OFDM) { /* BBSwing higher then Limit */
+ pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = Final_OFDM_Swing_Index - PwrTrackingLimit_OFDM;
+
+ setIqkMatrix_8723B(pDM_Odm, PwrTrackingLimit_OFDM, RFPath,
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0],
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]);
+
+ pDM_Odm->Modify_TxAGC_Flag_PathA = true;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
+ PwrTrackingLimit_OFDM, pDM_Odm->Remnant_OFDMSwingIdx[RFPath]));
+ } else if (Final_OFDM_Swing_Index <= 0) {
+ pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = Final_OFDM_Swing_Index;
+
+ setIqkMatrix_8723B(pDM_Odm, 0, RFPath,
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0],
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]);
+
+ pDM_Odm->Modify_TxAGC_Flag_PathA = true;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ pDM_Odm->Remnant_OFDMSwingIdx[RFPath]));
+ } else {
+ setIqkMatrix_8723B(pDM_Odm, Final_OFDM_Swing_Index, RFPath,
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][0],
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[ChannelMappedIndex].Value[0][1]);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d\n", Final_OFDM_Swing_Index));
+
+ if (pDM_Odm->Modify_TxAGC_Flag_PathA) { /* If TxAGC has changed, reset TxAGC again */
+ pDM_Odm->Remnant_OFDMSwingIdx[RFPath] = 0;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, OFDM);
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, HT_MCS0_MCS7);
+ pDM_Odm->Modify_TxAGC_Flag_PathA = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag = false\n"));
+ }
+ }
+
+ if (Final_CCK_Swing_Index > PwrTrackingLimit_CCK) {
+ pDM_Odm->Remnant_CCKSwingIdx = Final_CCK_Swing_Index - PwrTrackingLimit_CCK;
+ setCCKFilterCoefficient(pDM_Odm, PwrTrackingLimit_CCK);
+ pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = true;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A CCK Over Limit , PwrTrackingLimit_CCK = %d , pDM_Odm->Remnant_CCKSwingIdx = %d\n", PwrTrackingLimit_CCK, pDM_Odm->Remnant_CCKSwingIdx));
+ } else if (Final_CCK_Swing_Index <= 0) { /* Lowest CCK Index = 0 */
+ pDM_Odm->Remnant_CCKSwingIdx = Final_CCK_Swing_Index;
+ setCCKFilterCoefficient(pDM_Odm, 0);
+ pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = true;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A CCK Under Limit , PwrTrackingLimit_CCK = %d , pDM_Odm->Remnant_CCKSwingIdx = %d\n", 0, pDM_Odm->Remnant_CCKSwingIdx));
+ } else {
+ setCCKFilterCoefficient(pDM_Odm, Final_CCK_Swing_Index);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A CCK Compensate with BBSwing , Final_CCK_Swing_Index = %d\n", Final_CCK_Swing_Index));
+
+ if (pDM_Odm->Modify_TxAGC_Flag_PathA_CCK) { /* If TxAGC has changed, reset TxAGC again */
+ pDM_Odm->Remnant_CCKSwingIdx = 0;
+ PHY_SetTxPowerIndexByRateSection(Adapter, RFPath, pHalData->CurrentChannel, CCK);
+ pDM_Odm->Modify_TxAGC_Flag_PathA_CCK = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("******Path_A pDM_Odm->Modify_TxAGC_Flag_CCK = false\n"));
+ }
+ }
+ } else
+ return; /* This method is not supported. */
+}
+
+static void GetDeltaSwingTable_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 **TemperatureUP_A,
+ u8 **TemperatureDOWN_A,
+ u8 **TemperatureUP_B,
+ u8 **TemperatureDOWN_B
+)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u16 rate = *(pDM_Odm->pForcedDataRate);
+ u8 channel = pHalData->CurrentChannel;
+
+ if (1 <= channel && channel <= 14) {
+ if (IS_CCK_RATE(rate)) {
+ *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P;
+ *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N;
+ *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P;
+ *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N;
+ } else {
+ *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P;
+ *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N;
+ *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P;
+ *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N;
+ }
+ } /*else if (36 <= channel && channel <= 64) {
+ *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0];
+ *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0];
+ *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0];
+ *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0];
+ } else if (100 <= channel && channel <= 140) {
+ *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1];
+ *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1];
+ *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1];
+ *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1];
+ } else if (149 <= channel && channel <= 173) {
+ *TemperatureUP_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2];
+ *TemperatureDOWN_A = pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2];
+ *TemperatureUP_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2];
+ *TemperatureDOWN_B = pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2];
+ }*/else {
+ *TemperatureUP_A = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
+ *TemperatureDOWN_A = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
+ *TemperatureUP_B = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
+ *TemperatureDOWN_B = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
+ }
+
+ return;
+}
+
+
+void ConfigureTxpowerTrack_8723B(PTXPWRTRACK_CFG pConfig)
+{
+ pConfig->SwingTableSize_CCK = CCK_TABLE_SIZE;
+ pConfig->SwingTableSize_OFDM = OFDM_TABLE_SIZE;
+ pConfig->Threshold_IQK = IQK_THRESHOLD;
+ pConfig->AverageThermalNum = AVG_THERMAL_NUM_8723B;
+ pConfig->RfPathCount = MAX_PATH_NUM_8723B;
+ pConfig->ThermalRegAddr = RF_T_METER_8723B;
+
+ pConfig->ODM_TxPwrTrackSetPwr = ODM_TxPwrTrackSetPwr_8723B;
+ pConfig->DoIQK = DoIQK_8723B;
+ pConfig->PHY_LCCalibrate = PHY_LCCalibrate_8723B;
+ pConfig->GetDeltaSwingTable = GetDeltaSwingTable_8723B;
+}
+
+/* 1 7. IQK */
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1 /* ms */
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 phy_PathA_IQK_8723B(
+ struct adapter *padapter, bool configPathB, u8 RF_Path
+)
+{
+ u32 regEAC, regE94, regE9C, tmp, Path_SEL_BB /*, regEA4*/;
+ u8 result = 0x00;
+
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ /* Save RF Path */
+ Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK!\n"));
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* enable path A PA in TXIQK mode */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0003f);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xc7f87);
+ /* disable path B PA in TXIQK mode */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, bRFRegOffsetMask, 0x00020); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x40ec1); */
+
+ /* 1 Tx IQK */
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+ /* path-A IQK setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+/* PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x8214010a); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x821303ea);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x28110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* Ant switch */
+ if (configPathB || (RF_Path == 0))
+ /* wifi switch to S1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000000);
+ else
+ /* wifi switch to S0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_8723B)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
+
+
+ /* Allen 20131125 */
+ tmp = (regE9C & 0x03FF0000)>>16;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+
+ if (
+ !(regEAC & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42) &&
+ (((regE94 & 0x03FF0000)>>16) < 0x110) &&
+ (((regE94 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 phy_PathA_RxIQK8723B(
+ struct adapter *padapter, bool configPathB, u8 RF_Path
+)
+{
+ u32 regEAC, regE94, regE9C, regEA4, u4tmp, tmp, Path_SEL_BB;
+ u8 result = 0x00;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK!\n")); */
+
+ /* Save RF Path */
+ Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A RX IQK:Get TXIMR setting\n"));
+ /* 1 Get TXIMR setting */
+ /* modify RXIQK mode table */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n")); */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ /* LNA2 off, PA on for Dcut */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7fb7);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x0); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+
+ /* path-A IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+
+/* PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82130ff0);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x28110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* Ant switch */
+ if (configPathB || (RF_Path == 0))
+ /* wifi switch to S1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000000);
+ else
+ /* wifi switch to S0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_8723B)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
+
+ /* Allen 20131125 */
+ tmp = (regE9C & 0x03FF0000)>>16;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+
+ if (
+ !(regEAC & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42) &&
+ (((regE94 & 0x03FF0000)>>16) < 0x110) &&
+ (((regE94 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord), u4tmp));
+
+
+ /* 1 RX IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A RX IQK\n"));
+
+ /* modify RXIQK mode table */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ /* LAN2 on, PA off for Dcut */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7d77);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x0); */
+
+ /* PA, PAD setting */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xdf, bRFRegOffsetMask, 0xf80);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x55, bRFRegOffsetMask, 0x4021f);
+
+
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+
+ /* path-A IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82110000);
+/* PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x281604c2); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x2813001f);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a8d1);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* Ant switch */
+ if (configPathB || (RF_Path == 0))
+ /* wifi switch to S1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000000);
+ else
+ /* wifi switch to S0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_8723B*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x, 0xeac = 0x%x\n", regEA4, regEAC));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea0(before IQK) = 0x%x, 0xea8(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xea0, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xea8, bMaskDWord)));
+
+ /* PA/PAD controlled by 0x0 */
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x780);
+
+ /* Allen 20131125 */
+ tmp = (regEAC & 0x03FF0000)>>16;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+
+ if (
+ !(regEAC & BIT27) && /* if Tx is OK, check whether Rx is OK */
+ (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regEAC & 0x03FF0000)>>16) != 0x36) &&
+ (((regEA4 & 0x03FF0000)>>16) < 0x110) &&
+ (((regEA4 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x02;
+ else /* if Tx not OK, ignore Rx */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK fail!!\n"));
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 phy_PathB_IQK_8723B(struct adapter *padapter)
+{
+ u32 regEAC, regE94, regE9C, tmp, Path_SEL_BB/*, regEC4, regECC, Path_SEL_BB*/;
+ u8 result = 0x00;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK!\n"));
+
+ /* Save RF Path */
+ Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* in TXIQK mode */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x20000); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0003f); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xc7f87); */
+ /* enable path B PA in TXIQK mode */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, 0x20, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x30fc1);
+
+
+
+ /* 1 Tx IQK */
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+ /* path-A IQK setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-B IQK setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+
+/* PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82140114); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x821303ea);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x28110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* switch to path B */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, bRFRegOffsetMask, 0xeffe0); */
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path B LOK & IQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path B LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0x948 = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord))); */
+
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
+
+ /* Allen 20131125 */
+ tmp = (regE9C & 0x03FF0000)>>16;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+
+ if (
+ !(regEAC & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42) &&
+ (((regE94 & 0x03FF0000)>>16) < 0x110) &&
+ (((regE94 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x01;
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 phy_PathB_RxIQK8723B(struct adapter *padapter, bool configPathB)
+{
+ u32 regE94, regE9C, regEA4, regEAC, u4tmp, tmp, Path_SEL_BB;
+ u8 result = 0x00;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK!\n")); */
+
+ /* Save RF Path */
+ Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord);
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* switch to path B */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+
+ /* 1 Get TXIMR setting */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B RX IQK:Get TXIMR setting!\n"));
+ /* modify RXIQK mode table */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n")); */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7fb7);
+ /* open PA S1 & SMIXER */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, 0x20, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x30fcd);
+
+
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, 0x01007c00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+
+
+ /* path-B IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+
+/* PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82130ff0);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x28110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* switch to path B */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, bRFRegOffsetMask, 0xeffe0); */
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path B TXIQK @ RXIQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x, 0xe9c = 0x%x\n", regE94, regE9C));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe90(before IQK) = 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xe90, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xe98, bMaskDWord)));
+
+ /* Allen 20131125 */
+ tmp = (regE9C & 0x03FF0000)>>16;
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("tmp1 = 0x%x\n", tmp)); */
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("tmp2 = 0x%x\n", tmp)); */
+
+ if (
+ !(regEAC & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42) &&
+ (((regE94 & 0x03FF0000)>>16) < 0x110) &&
+ (((regE94 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", PHY_QueryBBReg(pDM_Odm->Adapter, rTx_IQK, bMaskDWord), u4tmp));
+
+ /* 1 RX IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B RX IQK\n"));
+
+ /* modify RXIQK mode table */
+ /* 20121009, Kordan> RF Mode = 3 */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7d77);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x0); */
+
+ /* open PA S1 & close SMIXER */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, 0x20, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x30ebd);
+
+ /* PA, PAD setting */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xdf, bRFRegOffsetMask, 0xf80); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000); */
+
+ /* IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK, bMaskDWord, 0x01004800);
+
+ /* path-B IQK setting */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_B, bMaskDWord, 0x38008c1c);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_A, bMaskDWord, 0x82110000);
+/* PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x281604c2); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_A, bMaskDWord, 0x2813001f);
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_PI_B, bMaskDWord, 0x82110000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_PI_B, bMaskDWord, 0x28110000);
+
+ /* LO calibration setting */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Rsp, bMaskDWord, 0x0046a8d1);
+
+ /* enter IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x808000);
+
+ /* switch to path B */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, bRFRegOffsetMask, 0xeffe0); */
+
+ /* GNT_BT = 0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00000800);
+
+ /* One shot, path B LOK & IQK */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path B LOK & IQK!\n")); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ PHY_SetBBReg(pDM_Odm->Adapter, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+/* ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E)); */
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ mdelay(IQK_DELAY_TIME_8723B);
+
+ /* restore Ant Path */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB);
+ /* GNT_BT = 1 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, 0x00001800);
+
+ /* leave IQK mode */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+
+ /* Check failed */
+ regEAC = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regEA4 = PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord);;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regEAC));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x, 0xeac = 0x%x\n", regEA4, regEAC));
+ /* monitor image power before & after IQK */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea0(before IQK) = 0x%x, 0xea8(afer IQK) = 0x%x\n",
+ PHY_QueryBBReg(pDM_Odm->Adapter, 0xea0, bMaskDWord), PHY_QueryBBReg(pDM_Odm->Adapter, 0xea8, bMaskDWord)));
+
+ /* PA/PAD controlled by 0x0 */
+ /* leave IQK mode */
+/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, 0xffffff00, 0x00000000); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, 0xdf, bRFRegOffsetMask, 0x180); */
+
+
+
+ /* Allen 20131125 */
+ tmp = (regEAC & 0x03FF0000)>>16;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
+
+ if (
+ !(regEAC & BIT27) && /* if Tx is OK, check whether Rx is OK */
+ (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regEAC & 0x03FF0000)>>16) != 0x36) &&
+ (((regEA4 & 0x03FF0000)>>16) < 0x110) &&
+ (((regEA4 & 0x03FF0000)>>16) > 0xf0) &&
+ (tmp < 0xf)
+ )
+ result |= 0x02;
+ else
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n"));
+
+ return result;
+}
+
+static void _PHY_PathAFillIQKMatrix8723B(
+ struct adapter *padapter,
+ bool bIQKOK,
+ s32 result[][8],
+ u8 final_candidate,
+ bool bTxOnly
+)
+{
+ u32 Oldval_0, X, TX0_A, reg;
+ s32 Y, TX0_C;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed"));
+
+ if (final_candidate == 0xFF)
+ return;
+
+ else if (bIQKOK) {
+ Oldval_0 = (PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][0];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX0_A = (X * Oldval_0) >> 8;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n", X, TX0_A, Oldval_0));
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT(31), ((X*Oldval_0>>7) & 0x1));
+
+ Y = result[final_candidate][1];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ /* 2 Tx IQC */
+ TX0_C = (Y * Oldval_0) >> 8;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][KEY] = rOFDM0_XCTxAFE;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XCTxAFE, bMaskDWord);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC80][KEY] = rOFDM0_XATxIQImbalance;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC80][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XATxIQImbalance, bMaskDWord);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT(29), ((Y*Oldval_0>>7) & 0x1));
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC4C][KEY] = rOFDM0_ECCAThreshold;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC4C][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskDWord);
+
+ if (bTxOnly) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("_PHY_PathAFillIQKMatrix8723B only Tx OK\n"));
+
+ /* <20130226, Kordan> Saving RxIQC, otherwise not initialized. */
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][KEY] = rOFDM0_RxIQExtAnta;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][VAL] = 0xfffffff & PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, bMaskDWord);
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][KEY] = rOFDM0_XARxIQImbalance;
+/* pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, bMaskDWord); */
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][VAL] = 0x40000100;
+ return;
+ }
+
+ reg = result[final_candidate][2];
+
+ /* 2 Rx IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][KEY] = rOFDM0_XARxIQImbalance;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, bMaskDWord);
+
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][KEY] = rOFDM0_RxIQExtAnta;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, bMaskDWord);
+
+ }
+}
+
+static void _PHY_PathBFillIQKMatrix8723B(
+ struct adapter *padapter,
+ bool bIQKOK,
+ s32 result[][8],
+ u8 final_candidate,
+ bool bTxOnly /* do Tx only */
+)
+{
+ u32 Oldval_1, X, TX1_A, reg;
+ s32 Y, TX1_C;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQ Calibration %s !\n", (bIQKOK)?"Success":"Failed"));
+
+ if (final_candidate == 0xFF)
+ return;
+
+ else if (bIQKOK) {
+ Oldval_1 = (PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][4];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX1_A = (X * Oldval_1) >> 8;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT(27), ((X*Oldval_1>>7) & 0x1));
+
+ Y = result[final_candidate][5];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ TX1_C = (Y * Oldval_1) >> 8;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
+
+ /* 2 Tx IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+/* pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC9C][KEY] = rOFDM0_XDTxAFE; */
+/* pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC9C][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, bMaskDWord); */
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC94][KEY] = rOFDM0_XCTxAFE;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC94][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XDTxAFE, bMaskDWord);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC80][KEY] = rOFDM0_XATxIQImbalance;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC80][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, BIT(25), ((Y*Oldval_1>>7) & 0x1));
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC4C][KEY] = rOFDM0_ECCAThreshold;
+ pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC4C][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskDWord);
+
+ if (bTxOnly) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("_PHY_PathBFillIQKMatrix8723B only Tx OK\n"));
+
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][KEY] = rOFDM0_XARxIQImbalance;
+/* pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XARxIQImbalance, bMaskDWord); */
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] = 0x40000100;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][KEY] = rOFDM0_RxIQExtAnta;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][VAL] = 0x0fffffff & PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, bMaskDWord);
+ return;
+ }
+
+ /* 2 Rx IQC */
+ reg = result[final_candidate][6];
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][KEY] = rOFDM0_XARxIQImbalance;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] = PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBRxIQImbalance, bMaskDWord);
+
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+/* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_AGCRSSITable, 0x0000F000, reg); */
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][KEY] = rOFDM0_RxIQExtAnta;
+ pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][VAL] = (reg << 28)|(PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_RxIQExtAnta, bMaskDWord)&0x0fffffff);
+ }
+}
+
+/* */
+/* 2011/07/26 MH Add an API for testing IQK fail case. */
+/* */
+/* MP Already declare in odm.c */
+
+void ODM_SetIQCbyRFpath(PDM_ODM_T pDM_Odm, u32 RFpath)
+{
+
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+
+ if (
+ (pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC80][VAL] != 0x0) &&
+ (pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL] != 0x0) &&
+ (pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC80][VAL] != 0x0) &&
+ (pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][VAL] != 0x0)
+ ) {
+ if (RFpath) { /* S1: RFpath = 0, S0:RFpath = 1 */
+ /* S0 TX IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC94][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC94][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC80][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC80][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC4C][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S0][IDX_0xC4C][VAL]);
+ /* S0 RX IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][KEY], bMaskDWord, pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xC14][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][KEY], bMaskDWord, pRFCalibrateInfo->RxIQC_8723B[PATH_S0][IDX_0xCA0][VAL]);
+ } else {
+ /* S1 TX IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC94][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC80][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC80][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC4C][KEY], bMaskDWord, pRFCalibrateInfo->TxIQC_8723B[PATH_S1][IDX_0xC4C][VAL]);
+ /* S1 RX IQC */
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][KEY], bMaskDWord, pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xC14][VAL]);
+ PHY_SetBBReg(pDM_Odm->Adapter, pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][KEY], bMaskDWord, pRFCalibrateInfo->RxIQC_8723B[PATH_S1][IDX_0xCA0][VAL]);
+ }
+ }
+}
+
+static bool ODM_CheckPowerStatus(struct adapter *Adapter)
+{
+ return true;
+}
+
+static void _PHY_SaveADDARegisters8723B(
+ struct adapter *padapter,
+ u32 *ADDAReg,
+ u32 *ADDABackup,
+ u32 RegisterNum
+)
+{
+ u32 i;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ if (ODM_CheckPowerStatus(padapter) == false)
+ return;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
+ for (i = 0 ; i < RegisterNum ; i++) {
+ ADDABackup[i] = PHY_QueryBBReg(pDM_Odm->Adapter, ADDAReg[i], bMaskDWord);
+ }
+}
+
+
+static void _PHY_SaveMACRegisters8723B(
+ struct adapter *padapter, u32 *MACReg, u32 *MACBackup
+)
+{
+ u32 i;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
+ for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ MACBackup[i] = rtw_read8(pDM_Odm->Adapter, MACReg[i]);
+ }
+ MACBackup[i] = rtw_read32(pDM_Odm->Adapter, MACReg[i]);
+
+}
+
+
+static void _PHY_ReloadADDARegisters8723B(
+ struct adapter *padapter,
+ u32 *ADDAReg,
+ u32 *ADDABackup,
+ u32 RegiesterNum
+)
+{
+ u32 i;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
+ for (i = 0 ; i < RegiesterNum; i++) {
+ PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ }
+}
+
+static void _PHY_ReloadMACRegisters8723B(
+ struct adapter *padapter, u32 *MACReg, u32 *MACBackup
+)
+{
+ u32 i;
+
+ for (i = 0 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ rtw_write8(padapter, MACReg[i], (u8)MACBackup[i]);
+ }
+ rtw_write32(padapter, MACReg[i], MACBackup[i]);
+}
+
+
+static void _PHY_PathADDAOn8723B(
+ struct adapter *padapter,
+ u32 *ADDAReg,
+ bool isPathAOn,
+ bool is2T
+)
+{
+ u32 pathOn;
+ u32 i;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
+
+ pathOn = isPathAOn ? 0x01c00014 : 0x01c00014;
+ if (false == is2T) {
+ pathOn = 0x01c00014;
+ PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[0], bMaskDWord, 0x01c00014);
+ } else {
+ PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[0], bMaskDWord, pathOn);
+ }
+
+ for (i = 1 ; i < IQK_ADDA_REG_NUM ; i++) {
+ PHY_SetBBReg(pDM_Odm->Adapter, ADDAReg[i], bMaskDWord, pathOn);
+ }
+
+}
+
+static void _PHY_MACSettingCalibration8723B(
+ struct adapter *padapter, u32 *MACReg, u32 *MACBackup
+)
+{
+ u32 i = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
+
+ rtw_write8(pDM_Odm->Adapter, MACReg[i], 0x3F);
+
+ for (i = 1 ; i < (IQK_MAC_REG_NUM - 1); i++) {
+ rtw_write8(pDM_Odm->Adapter, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ }
+ rtw_write8(pDM_Odm->Adapter, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+
+}
+
+static bool phy_SimularityCompare_8723B(
+ struct adapter *padapter,
+ s32 result[][8],
+ u8 c1,
+ u8 c2
+)
+{
+ u32 i, j, diff, SimularityBitMap, bound = 0;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool bResult = true;
+ bool is2T = true;
+ s32 tmp1 = 0, tmp2 = 0;
+
+ if (is2T)
+ bound = 8;
+ else
+ bound = 4;
+
+ SimularityBitMap = 0;
+
+ for (i = 0; i < bound; i++) {
+
+ if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) {
+ if ((result[c1][i] & 0x00000200) != 0)
+ tmp1 = result[c1][i] | 0xFFFFFC00;
+ else
+ tmp1 = result[c1][i];
+
+ if ((result[c2][i] & 0x00000200) != 0)
+ tmp2 = result[c2][i] | 0xFFFFFC00;
+ else
+ tmp2 = result[c2][i];
+ } else {
+ tmp1 = result[c1][i];
+ tmp2 = result[c2][i];
+ }
+
+ diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !SimularityBitMap) {
+ if (result[c1][i]+result[c1][i+1] == 0)
+ final_candidate[(i/4)] = c2;
+ else if (result[c2][i]+result[c2][i+1] == 0)
+ final_candidate[(i/4)] = c1;
+ else
+ SimularityBitMap = SimularityBitMap|(1<<i);
+ } else
+ SimularityBitMap = SimularityBitMap|(1<<i);
+ }
+ }
+
+ if (SimularityBitMap == 0) {
+ for (i = 0; i < (bound/4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i*4; j < (i+1)*4-2; j++)
+ result[3][j] = result[final_candidate[i]][j];
+ bResult = false;
+ }
+ }
+ return bResult;
+ } else {
+
+ if (!(SimularityBitMap & 0x03)) { /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(SimularityBitMap & 0x0c)) { /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(SimularityBitMap & 0x30)) { /* path B TX OK */
+ for (i = 4; i < 6; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(SimularityBitMap & 0xc0)) { /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ result[3][i] = result[c1][i];
+ }
+ return false;
+ }
+}
+
+
+
+static void phy_IQCalibrate_8723B(
+ struct adapter *padapter,
+ s32 result[][8],
+ u8 t,
+ bool is2T,
+ u8 RF_Path
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ u32 i;
+ u8 PathAOK, PathBOK;
+ u8 tmp0xc50 = (u8)PHY_QueryBBReg(pDM_Odm->Adapter, 0xC50, bMaskByte0);
+ u8 tmp0xc58 = (u8)PHY_QueryBBReg(pDM_Odm->Adapter, 0xC58, bMaskByte0);
+ u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl,
+ rBlue_Tooth,
+ rRx_Wait_CCA,
+ rTx_CCK_RFON,
+ rTx_CCK_BBON,
+ rTx_OFDM_RFON,
+ rTx_OFDM_BBON,
+ rTx_To_Rx,
+ rTx_To_Tx,
+ rRx_CCK,
+ rRx_OFDM,
+ rRx_Wait_RIFS,
+ rRx_TO_Rx,
+ rStandby,
+ rSleep,
+ rPMPD_ANAEN
+ };
+ u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE,
+ REG_BCN_CTRL,
+ REG_BCN_CTRL_1,
+ REG_GPIO_MUXCFG
+ };
+
+ /* since 92C & 92D have the different define in IQK_BB_REG */
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable,
+ rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW,
+ rConfig_AntA,
+ rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW,
+ rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE,
+ rCCK0_AFESetting
+ };
+ const u32 retryCount = 2;
+
+ /* Note: IQ calibration must be performed after loading */
+ /* PHY_REG.txt , and radio_a, radio_b.txt */
+
+ /* u32 bbvalue; */
+
+ if (t == 0) {
+/* bbvalue = PHY_QueryBBReg(pDM_Odm->Adapter, rFPGA0_RFMOD, bMaskDWord); */
+/* RT_DISP(FINIT, INIT_IQK, ("phy_IQCalibrate_8188E() ==>0x%08x\n", bbvalue)); */
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _PHY_SaveADDARegisters8723B(padapter, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+ _PHY_SaveMACRegisters8723B(padapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup);
+ _PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2T ? "2T2R" : "1T1R"), t));
+
+ _PHY_PathADDAOn8723B(padapter, ADDA_REG, true, is2T);
+
+/* no serial mode */
+
+ /* save RF path for 8723B */
+/* Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord); */
+/* Path_SEL_RF = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, 0xfffff); */
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration8723B(padapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ /* BB setting */
+ /* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_RFMOD, BIT24, 0x00); */
+ PHY_SetBBReg(pDM_Odm->Adapter, rCCK0_AFESetting, 0x0f000000, 0xf);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+
+
+/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01); */
+/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01); */
+/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00); */
+/* PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00); */
+
+
+/* RX IQ calibration setting for 8723B D cut large current issue when leaving IPS */
+
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7fb7);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, 0x20, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x60fbd);
+
+/* path A TX IQK */
+ for (i = 0 ; i < retryCount ; i++) {
+ PathAOK = phy_PathA_IQK_8723B(padapter, is2T, RF_Path);
+/* if (PathAOK == 0x03) { */
+ if (PathAOK == 0x01) {
+ /* Path A Tx IQK Success */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_A] = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x8, bRFRegOffsetMask);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
+ result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ }
+ }
+
+/* path A RXIQK */
+ for (i = 0 ; i < retryCount ; i++) {
+ PathAOK = phy_PathA_RxIQK8723B(padapter, is2T, RF_Path);
+ if (PathAOK == 0x03) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
+/* result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
+/* result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
+ result[t][2] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
+ }
+ }
+
+ if (0x00 == PathAOK) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK failed!!\n"));
+ }
+
+/* path B IQK */
+ if (is2T) {
+
+ /* path B TX IQK */
+ for (i = 0 ; i < retryCount ; i++) {
+ PathBOK = phy_PathB_IQK_8723B(padapter);
+ if (PathBOK == 0x01) {
+ /* Path B Tx IQK Success */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0x000000);
+ pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_B] = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, 0x8, bRFRegOffsetMask);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Tx IQK Success!!\n"));
+ result[t][4] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ }
+ }
+
+/* path B RX IQK */
+ for (i = 0 ; i < retryCount ; i++) {
+ PathBOK = phy_PathB_RxIQK8723B(padapter, is2T);
+ if (PathBOK == 0x03) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK Success!!\n"));
+/* result[t][0] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
+/* result[t][1] = (PHY_QueryBBReg(pDM_Odm->Adapter, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16; */
+ result[t][6] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (PHY_QueryBBReg(pDM_Odm->Adapter, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK Fail!!\n"));
+ }
+ }
+
+/* Allen end */
+ if (0x00 == PathBOK) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK failed!!\n"));
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_IQK, bMaskH3Bytes, 0);
+
+ if (t != 0) {
+ /* Reload ADDA power saving parameters */
+ _PHY_ReloadADDARegisters8723B(padapter, ADDA_REG, pDM_Odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _PHY_ReloadMACRegisters8723B(padapter, IQK_MAC_REG, pDM_Odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ _PHY_ReloadADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+
+ /* Reload RF path */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, 0xfffff, Path_SEL_RF); */
+
+ /* Allen initial gain 0xc50 */
+ /* Restore RX initial gain */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc50, bMaskByte0, 0x50);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc50, bMaskByte0, tmp0xc50);
+ if (is2T) {
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc58, bMaskByte0, 0x50);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc58, bMaskByte0, tmp0xc58);
+ }
+
+ /* load 0xe30 IQC default value */
+ PHY_SetBBReg(pDM_Odm->Adapter, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ PHY_SetBBReg(pDM_Odm->Adapter, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8723B() <==\n"));
+
+}
+
+
+static void phy_LCCalibrate_8723B(PDM_ODM_T pDM_Odm, bool is2T)
+{
+ u8 tmpReg;
+ u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
+ struct adapter *padapter = pDM_Odm->Adapter;
+
+ /* Check continuous TX and Packet TX */
+ tmpReg = rtw_read8(pDM_Odm->Adapter, 0xd03);
+
+ if ((tmpReg&0x70) != 0) /* Deal with contisuous TX case */
+ rtw_write8(pDM_Odm->Adapter, 0xd03, tmpReg&0x8F); /* disable all continuous TX */
+ else /* Deal with Packet TX case */
+ rtw_write8(pDM_Odm->Adapter, REG_TXPAUSE, 0xFF); /* block all queues */
+
+ if ((tmpReg&0x70) != 0) {
+ /* 1. Read original RF mode */
+ /* Path-A */
+ RF_Amode = PHY_QueryRFReg(padapter, ODM_RF_PATH_A, RF_AC, bMask12Bits);
+
+ /* Path-B */
+ if (is2T)
+ RF_Bmode = PHY_QueryRFReg(padapter, ODM_RF_PATH_B, RF_AC, bMask12Bits);
+
+ /* 2. Set RF mode = standby mode */
+ /* Path-A */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+
+ /* Path-B */
+ if (is2T)
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ }
+
+ /* 3. Read RF reg18 */
+ LC_Cal = PHY_QueryRFReg(padapter, ODM_RF_PATH_A, RF_CHNLBW, bMask12Bits);
+
+ /* 4. Set LC calibration begin bit15 */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xB0, bRFRegOffsetMask, 0xDFBE0); /* LDO ON */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+
+ mdelay(100);
+
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xB0, bRFRegOffsetMask, 0xDFFE0); /* LDO OFF */
+
+ /* Channel 10 LC calibration issue for 8723bs with 26M xtal */
+ if (pDM_Odm->SupportInterface == ODM_ITRF_SDIO && pDM_Odm->PackageType >= 0x2) {
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal);
+ }
+
+ /* Restore original situation */
+ if ((tmpReg&0x70) != 0) { /* Deal with contisuous TX case */
+ /* Path-A */
+ rtw_write8(pDM_Odm->Adapter, 0xd03, tmpReg);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+
+ /* Path-B */
+ if (is2T)
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ } else /* Deal with Packet TX case */
+ rtw_write8(pDM_Odm->Adapter, REG_TXPAUSE, 0x00);
+}
+
+/* Analog Pre-distortion calibration */
+#define APK_BB_REG_NUM 8
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define DP_BB_REG_NUM 7
+#define DP_RF_REG_NUM 1
+#define DP_RETRY_LIMIT 10
+#define DP_PATH_NUM 2
+#define DP_DPK_NUM 3
+#define DP_DPK_VALUE_NUM 2
+
+
+
+/* IQK version:V2.5 20140123 */
+/* IQK is controlled by Is2ant, RF path */
+void PHY_IQCalibrate_8723B(
+ struct adapter *padapter,
+ bool bReCovery,
+ bool bRestore,
+ bool Is2ant, /* false:1ant, true:2-ant */
+ u8 RF_Path /* 0:S1, 1:S0 */
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ s32 result[4][8]; /* last is final result */
+ u8 i, final_candidate, Indexforchannel;
+ bool bPathAOK, bPathBOK;
+ s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC, RegTmp = 0;
+ bool is12simular, is13simular, is23simular;
+ bool bSingleTone = false, bCarrierSuppression = false;
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_XARxIQImbalance,
+ rOFDM0_XBRxIQImbalance,
+ rOFDM0_ECCAThreshold,
+ rOFDM0_AGCRSSITable,
+ rOFDM0_XATxIQImbalance,
+ rOFDM0_XBTxIQImbalance,
+ rOFDM0_XCTxAFE,
+ rOFDM0_XDTxAFE,
+ rOFDM0_RxIQExtAnta
+ };
+/* u32 Path_SEL_BB = 0; */
+ u32 GNT_BT_default;
+ u32 StartTime;
+ s32 ProgressingTime;
+
+ if (ODM_CheckPowerStatus(padapter) == false)
+ return;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (bSingleTone || bCarrierSuppression)
+ return;
+
+#if DISABLE_BB_RF
+ return;
+#endif
+ if (pDM_Odm->RFCalibrateInfo.bIQKInProgress)
+ return;
+
+
+ pDM_Odm->RFCalibrateInfo.bIQKInProgress = true;
+
+ if (bRestore) {
+ u32 offset, data;
+ u8 path, bResult = SUCCESS;
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+
+ path = (PHY_QueryBBReg(pDM_Odm->Adapter, rS0S1_PathSwitch, bMaskByte0) == 0x00) ? ODM_RF_PATH_A : ODM_RF_PATH_B;
+
+ /* Restore TX IQK */
+ for (i = 0; i < 3; ++i) {
+ offset = pRFCalibrateInfo->TxIQC_8723B[path][i][0];
+ data = pRFCalibrateInfo->TxIQC_8723B[path][i][1];
+ if ((offset == 0) || (data == 0)) {
+ DBG_871X(
+ "%s =>path:%s Restore TX IQK result failed\n",
+ __func__,
+ (path == ODM_RF_PATH_A)?"A":"B"
+ );
+ bResult = FAIL;
+ break;
+ }
+ /* RT_TRACE(_module_mp_, _drv_notice_, ("Switch to S1 TxIQC(offset, data) = (0x%X, 0x%X)\n", offset, data)); */
+ PHY_SetBBReg(pDM_Odm->Adapter, offset, bMaskDWord, data);
+ }
+
+ /* Restore RX IQK */
+ for (i = 0; i < 2; ++i) {
+ offset = pRFCalibrateInfo->RxIQC_8723B[path][i][0];
+ data = pRFCalibrateInfo->RxIQC_8723B[path][i][1];
+ if ((offset == 0) || (data == 0)) {
+ DBG_871X(
+ "%s =>path:%s Restore RX IQK result failed\n",
+ __func__,
+ (path == ODM_RF_PATH_A)?"A":"B"
+ );
+ bResult = FAIL;
+ break;
+ }
+ /* RT_TRACE(_module_mp_, _drv_notice_, ("Switch to S1 RxIQC (offset, data) = (0x%X, 0x%X)\n", offset, data)); */
+ PHY_SetBBReg(pDM_Odm->Adapter, offset, bMaskDWord, data);
+ }
+
+ if (pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_A] == 0) {
+ DBG_871X("%s => Restore Path-A TxLOK result failed\n", __func__);
+ bResult = FAIL;
+ } else {
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXM_IDAC, bRFRegOffsetMask, pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_A]);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_B, RF_TXM_IDAC, bRFRegOffsetMask, pDM_Odm->RFCalibrateInfo.TxLOK[ODM_RF_PATH_B]);
+ }
+
+ if (bResult == SUCCESS)
+ return;
+ }
+
+ if (bReCovery) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("PHY_IQCalibrate_8723B: Return due to bReCovery!\n"));
+ _PHY_ReloadADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+ return;
+ }
+ StartTime = jiffies;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Start!!!\n"));
+
+ /* save default GNT_BT */
+ GNT_BT_default = PHY_QueryBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord);
+ /* Save RF Path */
+/* Path_SEL_BB = PHY_QueryBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord); */
+/* Path_SEL_RF = PHY_QueryRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, 0xfffff); */
+
+ /* set GNT_BT = 0, pause BT traffic */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x764, BIT12, 0x0); */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x764, BIT11, 0x1); */
+
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+
+ final_candidate = 0xff;
+ bPathAOK = false;
+ bPathBOK = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+
+
+ for (i = 0; i < 3; i++) {
+ phy_IQCalibrate_8723B(padapter, result, i, Is2ant, RF_Path);
+
+ if (i == 1) {
+ is12simular = phy_SimularityCompare_8723B(padapter, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is12simular final_candidate is %x\n", final_candidate));
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = phy_SimularityCompare_8723B(padapter, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is13simular final_candidate is %x\n", final_candidate));
+
+ break;
+ }
+
+ is23simular = phy_SimularityCompare_8723B(padapter, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is23simular final_candidate is %x\n", final_candidate));
+ } else {
+ for (i = 0; i < 8; i++)
+ RegTmp += result[3][i];
+
+ if (RegTmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+/* RT_TRACE(COMP_INIT, DBG_LOUD, ("Release Mutex in IQCalibrate\n")); */
+
+ for (i = 0; i < 4; i++) {
+ RegE94 = result[i][0];
+ RegE9C = result[i][1];
+ RegEA4 = result[i][2];
+ RegEAC = result[i][3];
+ RegEB4 = result[i][4];
+ RegEBC = result[i][5];
+ RegEC4 = result[i][6];
+ RegECC = result[i][7];
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ }
+
+ if (final_candidate != 0xff) {
+ pDM_Odm->RFCalibrateInfo.RegE94 = RegE94 = result[final_candidate][0];
+ pDM_Odm->RFCalibrateInfo.RegE9C = RegE9C = result[final_candidate][1];
+ RegEA4 = result[final_candidate][2];
+ RegEAC = result[final_candidate][3];
+ pDM_Odm->RFCalibrateInfo.RegEB4 = RegEB4 = result[final_candidate][4];
+ pDM_Odm->RFCalibrateInfo.RegEBC = RegEBC = result[final_candidate][5];
+ RegEC4 = result[final_candidate][6];
+ RegECC = result[final_candidate][7];
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: final_candidate is %x\n", final_candidate));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: RegE94 =%x RegE9C =%x RegEA4 =%x RegEAC =%x RegEB4 =%x RegEBC =%x RegEC4 =%x RegECC =%x\n ", RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ bPathAOK = bPathBOK = true;
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: FAIL use default value\n"));
+
+ pDM_Odm->RFCalibrateInfo.RegE94 = pDM_Odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
+ pDM_Odm->RFCalibrateInfo.RegE9C = pDM_Odm->RFCalibrateInfo.RegEBC = 0x0; /* Y default value */
+ }
+
+ {
+ if (RegE94 != 0)
+ _PHY_PathAFillIQKMatrix8723B(padapter, bPathAOK, result, final_candidate, (RegEA4 == 0));
+ }
+ {
+ if (RegEB4 != 0)
+ _PHY_PathBFillIQKMatrix8723B(padapter, bPathBOK, result, final_candidate, (RegEC4 == 0));
+ }
+
+ Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
+
+/* To Fix BSOD when final_candidate is 0xff */
+/* by sherry 20120321 */
+ if (final_candidate < 4) {
+ for (i = 0; i < IQK_Matrix_REG_NUM; i++)
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
+ pDM_Odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+ }
+ /* RT_DISP(FINIT, INIT_IQK, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel)); */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+
+ _PHY_SaveADDARegisters8723B(padapter, IQK_BB_REG_92C, pDM_Odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+
+ /* restore GNT_BT */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x764, bMaskDWord, GNT_BT_default);
+ /* Restore RF Path */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, Path_SEL_BB); */
+/* PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xb0, 0xfffff, Path_SEL_RF); */
+
+ /* Resotr RX mode table parameter */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_WE_LUT, 0x80000, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x18000);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0001f);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xe6177);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0xed, 0x20, 0x1);
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, 0x43, bRFRegOffsetMask, 0x300bd);
+
+ /* set GNT_BT = HW control */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x764, BIT12, 0x0); */
+/* PHY_SetBBReg(pDM_Odm->Adapter, 0x764, BIT11, 0x0); */
+
+ if (Is2ant) {
+ if (RF_Path == 0x0) /* S1 */
+ ODM_SetIQCbyRFpath(pDM_Odm, 0);
+ else /* S0 */
+ ODM_SetIQCbyRFpath(pDM_Odm, 1);
+ }
+
+ pDM_Odm->RFCalibrateInfo.bIQKInProgress = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK finished\n"));
+ ProgressingTime = jiffies_to_msecs(jiffies - StartTime);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK ProgressingTime = %d\n", ProgressingTime));
+
+
+}
+
+
+void PHY_LCCalibrate_8723B(PDM_ODM_T pDM_Odm)
+{
+ bool bSingleTone = false, bCarrierSuppression = false;
+ u32 timeout = 2000, timecount = 0;
+ u32 StartTime;
+ s32 ProgressingTime;
+
+#if DISABLE_BB_RF
+ return;
+#endif
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (bSingleTone || bCarrierSuppression)
+ return;
+
+ StartTime = jiffies;
+ while (*(pDM_Odm->pbScanInProcess) && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+
+ pDM_Odm->RFCalibrateInfo.bLCKInProgress = true;
+
+
+ phy_LCCalibrate_8723B(pDM_Odm, false);
+
+
+ pDM_Odm->RFCalibrateInfo.bLCKInProgress = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK:Finish!!!interface %d\n", pDM_Odm->InterfaceIndex));
+ ProgressingTime = jiffies_to_msecs(jiffies - StartTime);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LCK ProgressingTime = %d\n", ProgressingTime));
+}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h
new file mode 100644
index 000000000000..ae4eca144606
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.h
@@ -0,0 +1,83 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __HAL_PHY_RF_8723B_H__
+#define __HAL_PHY_RF_8723B_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+#define IQK_DELAY_TIME_8723B 20 /* ms */
+#define IQK_DEFERRED_TIME_8723B 4
+#define index_mapping_NUM_8723B 15
+#define AVG_THERMAL_NUM_8723B 4
+#define RF_T_METER_8723B 0x42 /* */
+
+
+void ConfigureTxpowerTrack_8723B(PTXPWRTRACK_CFG pConfig);
+
+void DoIQK_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 DeltaThermalIndex,
+ u8 ThermalValue,
+ u8 Threshold
+);
+
+void ODM_TxPwrTrackSetPwr_8723B(
+ PDM_ODM_T pDM_Odm,
+ PWRTRACK_METHOD Method,
+ u8 RFPath,
+ u8 ChannelMappedIndex
+);
+
+/* 1 7. IQK */
+void PHY_IQCalibrate_8723B(
+ struct adapter *Adapter,
+ bool bReCovery,
+ bool bRestore,
+ bool Is2ant,
+ u8 RF_Path
+);
+
+void ODM_SetIQCbyRFpath(PDM_ODM_T pDM_Odm, u32 RFpath);
+
+/* */
+/* LC calibrate */
+/* */
+void PHY_LCCalibrate_8723B(PDM_ODM_T pDM_Odm);
+
+/* */
+/* AP calibrate */
+/* */
+void PHY_DigitalPredistortion_8723B(struct adapter *padapter);
+
+
+void _PHY_SaveADDARegisters_8723B(
+ struct adapter *padapter,
+ u32 *ADDAReg,
+ u32 *ADDABackup,
+ u32 RegisterNum
+);
+
+void _PHY_PathADDAOn_8723B(
+ struct adapter *padapter,
+ u32 *ADDAReg,
+ bool isPathAOn,
+ bool is2T
+);
+
+void _PHY_MACSettingCalibration_8723B(
+ struct adapter *padapter, u32 *MACReg, u32 *MACBackup
+);
+
+#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
new file mode 100644
index 000000000000..f4619768a99f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/HalPwrSeqCmd.c
@@ -0,0 +1,205 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ HalPwrSeqCmd.c
+
+Abstract:
+ Implement HW Power sequence configuration CMD handling routine for Realtek devices.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
+ 2011-07-07 Roger Create.
+
+--*/
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <HalPwrSeqCmd.h>
+
+
+/* */
+/* Description: */
+/* This routine deal with the Power Configuration CMDs parsing for RTL8723/RTL8188E Series IC. */
+/* */
+/* Assumption: */
+/* We should follow specific format which was released from HW SD. */
+/* */
+/* 2011.07.07, added by Roger. */
+/* */
+u8 HalPwrSeqCmdParsing(
+ struct adapter *padapter,
+ u8 CutVersion,
+ u8 FabVersion,
+ u8 InterfaceType,
+ WLAN_PWR_CFG PwrSeqCmd[]
+)
+{
+ WLAN_PWR_CFG PwrCfgCmd = {0};
+ u8 bPollingBit = false;
+ u32 AryIdx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 pollingCount = 0; /* polling autoload done. */
+ u32 maxPollingCnt = 5000;
+
+ do {
+ PwrCfgCmd = PwrSeqCmd[AryIdx];
+
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ (
+ "HalPwrSeqCmdParsing: offset(%#x) cut_msk(%#x) fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x) msk(%#x) value(%#x)\n",
+ GET_PWR_CFG_OFFSET(PwrCfgCmd),
+ GET_PWR_CFG_CUT_MASK(PwrCfgCmd),
+ GET_PWR_CFG_FAB_MASK(PwrCfgCmd),
+ GET_PWR_CFG_INTF_MASK(PwrCfgCmd),
+ GET_PWR_CFG_BASE(PwrCfgCmd),
+ GET_PWR_CFG_CMD(PwrCfgCmd),
+ GET_PWR_CFG_MASK(PwrCfgCmd),
+ GET_PWR_CFG_VALUE(PwrCfgCmd)
+ )
+ );
+
+ /* 2 Only Handle the command whose FAB, CUT, and Interface are matched */
+ if (
+ (GET_PWR_CFG_FAB_MASK(PwrCfgCmd) & FabVersion) &&
+ (GET_PWR_CFG_CUT_MASK(PwrCfgCmd) & CutVersion) &&
+ (GET_PWR_CFG_INTF_MASK(PwrCfgCmd) & InterfaceType)
+ ) {
+ switch (GET_PWR_CFG_CMD(PwrCfgCmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ ("HalPwrSeqCmdParsing: PWR_CMD_READ\n")
+ );
+ break;
+
+ case PWR_CMD_WRITE:
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ ("HalPwrSeqCmdParsing: PWR_CMD_WRITE\n")
+ );
+ offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
+
+ /* */
+ /* <Roger_Notes> We should deal with interface specific address mapping for some interfaces, e.g., SDIO interface */
+ /* 2011.07.07. */
+ /* */
+ if (GET_PWR_CFG_BASE(PwrCfgCmd) == PWR_BASEADDR_SDIO) {
+ /* Read Back SDIO Local value */
+ value = SdioLocalCmd52Read1Byte(padapter, offset);
+
+ value &= ~(GET_PWR_CFG_MASK(PwrCfgCmd));
+ value |= (
+ GET_PWR_CFG_VALUE(PwrCfgCmd) &
+ GET_PWR_CFG_MASK(PwrCfgCmd)
+ );
+
+ /* Write Back SDIO Local value */
+ SdioLocalCmd52Write1Byte(padapter, offset, value);
+ } else {
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
+
+ value &= (~(GET_PWR_CFG_MASK(PwrCfgCmd)));
+ value |= (
+ GET_PWR_CFG_VALUE(PwrCfgCmd)
+ &GET_PWR_CFG_MASK(PwrCfgCmd)
+ );
+
+ /* Write the value back to sytem register */
+ rtw_write8(padapter, offset, value);
+ }
+ break;
+
+ case PWR_CMD_POLLING:
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ ("HalPwrSeqCmdParsing: PWR_CMD_POLLING\n")
+ );
+
+ bPollingBit = false;
+ offset = GET_PWR_CFG_OFFSET(PwrCfgCmd);
+ do {
+ if (GET_PWR_CFG_BASE(PwrCfgCmd) == PWR_BASEADDR_SDIO)
+ value = SdioLocalCmd52Read1Byte(padapter, offset);
+ else
+ value = rtw_read8(padapter, offset);
+
+ value = value&GET_PWR_CFG_MASK(PwrCfgCmd);
+ if (
+ value == (GET_PWR_CFG_VALUE(PwrCfgCmd) &
+ GET_PWR_CFG_MASK(PwrCfgCmd))
+ )
+ bPollingBit = true;
+ else
+ udelay(10);
+
+ if (pollingCount++ > maxPollingCnt) {
+ DBG_871X(
+ "Fail to polling Offset[%#x]=%02x\n",
+ offset,
+ value
+ );
+ return false;
+ }
+ } while (!bPollingBit);
+
+ break;
+
+ case PWR_CMD_DELAY:
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ ("HalPwrSeqCmdParsing: PWR_CMD_DELAY\n")
+ );
+ if (GET_PWR_CFG_VALUE(PwrCfgCmd) == PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd));
+ else
+ udelay(GET_PWR_CFG_OFFSET(PwrCfgCmd)*1000);
+ break;
+
+ case PWR_CMD_END:
+ /* When this command is parsed, end the process */
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ ("HalPwrSeqCmdParsing: PWR_CMD_END\n")
+ );
+ return true;
+
+ default:
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_err_,
+ ("HalPwrSeqCmdParsing: Unknown CMD!!\n")
+ );
+ break;
+ }
+ }
+
+ AryIdx++;/* Add Array Index */
+ } while (1);
+
+ return true;
+}
diff --git a/drivers/staging/rtl8723bs/hal/Mp_Precomp.h b/drivers/staging/rtl8723bs/hal/Mp_Precomp.h
new file mode 100644
index 000000000000..248b9fb3886a
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/Mp_Precomp.h
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __MP_PRECOMP_H__
+#define __MP_PRECOMP_H__
+
+#include <drv_types.h>
+#include <hal_data.h>
+
+#define BT_TMP_BUF_SIZE 100
+
+#define DCMD_Printf DBG_BT_INFO
+
+#ifdef bEnable
+#undef bEnable
+#endif
+
+#include "HalBtcOutSrc.h"
+#include "HalBtc8723b1Ant.h"
+#include "HalBtc8723b2Ant.h"
+
+#endif /* __MP_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
new file mode 100644
index 000000000000..cc7e0903ee9d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -0,0 +1,1720 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define __HAL_BTCOEX_C__
+
+#include <hal_data.h>
+#include <rtw_debug.h>
+#include <hal_btcoex.h>
+#include <Mp_Precomp.h>
+
+/* Global variables */
+static const char *const BtProfileString[] = {
+ "NONE",
+ "A2DP",
+ "PAN",
+ "HID",
+ "SCO",
+};
+
+static const char *const BtSpecString[] = {
+ "1.0b",
+ "1.1",
+ "1.2",
+ "2.0+EDR",
+ "2.1+EDR",
+ "3.0+HS",
+ "4.0",
+};
+
+static const char *const BtLinkRoleString[] = {
+ "Master",
+ "Slave",
+};
+
+static const char *const h2cStaString[] = {
+ "successful",
+ "h2c busy",
+ "rf off",
+ "fw not read",
+};
+
+static const char *const ioStaString[] = {
+ "success",
+ "can not IO",
+ "rf off",
+ "fw not read",
+ "wait io timeout",
+ "invalid len",
+ "idle Q empty",
+ "insert waitQ fail",
+ "unknown fail",
+ "wrong level",
+ "h2c stopped",
+};
+
+BTC_COEXIST GLBtCoexist;
+static u8 GLBtcWiFiInScanState;
+static u8 GLBtcWiFiInIQKState;
+
+u32 GLBtcDbgType[BTC_MSG_MAX];
+static u8 GLBtcDbgBuf[BT_TMP_BUF_SIZE];
+
+typedef struct _btcoexdbginfo {
+ u8 *info;
+ u32 size; /* buffer total size */
+ u32 len; /* now used length */
+} BTCDBGINFO, *PBTCDBGINFO;
+
+static BTCDBGINFO GLBtcDbgInfo;
+
+#define BT_Operation(Adapter) false
+
+static void DBG_BT_INFO_INIT(PBTCDBGINFO pinfo, u8 *pbuf, u32 size)
+{
+ if (NULL == pinfo)
+ return;
+
+ memset(pinfo, 0, sizeof(BTCDBGINFO));
+
+ if (pbuf && size) {
+ pinfo->info = pbuf;
+ pinfo->size = size;
+ }
+}
+
+void DBG_BT_INFO(u8 *dbgmsg)
+{
+ PBTCDBGINFO pinfo;
+ u32 msglen;
+ u8 *pbuf;
+
+
+ pinfo = &GLBtcDbgInfo;
+
+ if (NULL == pinfo->info)
+ return;
+
+ msglen = strlen(dbgmsg);
+ if (pinfo->len + msglen > pinfo->size)
+ return;
+
+ pbuf = pinfo->info + pinfo->len;
+ memcpy(pbuf, dbgmsg, msglen);
+ pinfo->len += msglen;
+}
+
+/* */
+/* Debug related function */
+/* */
+static u8 halbtcoutsrc_IsBtCoexistAvailable(PBTC_COEXIST pBtCoexist)
+{
+ if (!pBtCoexist->bBinded ||
+ NULL == pBtCoexist->Adapter){
+ return false;
+ }
+ return true;
+}
+
+static void halbtcoutsrc_DbgInit(void)
+{
+ u8 i;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ GLBtcDbgType[i] = 0;
+
+ GLBtcDbgType[BTC_MSG_INTERFACE] = \
+/* INTF_INIT | */
+/* INTF_NOTIFY | */
+ 0;
+
+ GLBtcDbgType[BTC_MSG_ALGORITHM] = \
+/* ALGO_BT_RSSI_STATE | */
+/* ALGO_WIFI_RSSI_STATE | */
+/* ALGO_BT_MONITOR | */
+/* ALGO_TRACE | */
+/* ALGO_TRACE_FW | */
+/* ALGO_TRACE_FW_DETAIL | */
+/* ALGO_TRACE_FW_EXEC | */
+/* ALGO_TRACE_SW | */
+/* ALGO_TRACE_SW_DETAIL | */
+/* ALGO_TRACE_SW_EXEC | */
+ 0;
+}
+
+static void halbtcoutsrc_LeaveLps(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+
+
+ padapter = pBtCoexist->Adapter;
+
+ pBtCoexist->btInfo.bBtCtrlLps = true;
+ pBtCoexist->btInfo.bBtLpsOn = false;
+
+ rtw_btcoex_LPS_Leave(padapter);
+}
+
+static void halbtcoutsrc_EnterLps(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+
+
+ padapter = pBtCoexist->Adapter;
+
+ pBtCoexist->btInfo.bBtCtrlLps = true;
+ pBtCoexist->btInfo.bBtLpsOn = true;
+
+ rtw_btcoex_LPS_Enter(padapter);
+}
+
+static void halbtcoutsrc_NormalLps(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+
+
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Normal LPS behavior!!!\n"));
+
+ padapter = pBtCoexist->Adapter;
+
+ if (pBtCoexist->btInfo.bBtCtrlLps) {
+ pBtCoexist->btInfo.bBtLpsOn = false;
+ rtw_btcoex_LPS_Leave(padapter);
+ pBtCoexist->btInfo.bBtCtrlLps = false;
+
+ /* recover the LPS state to the original */
+ }
+}
+
+/*
+ * Constraint:
+ * 1. this function will request pwrctrl->lock
+ */
+static void halbtcoutsrc_LeaveLowPower(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+ struct hal_com_data *pHalData;
+ s32 ready;
+ unsigned long stime;
+ unsigned long utime;
+ u32 timeout; /* unit: ms */
+
+
+ padapter = pBtCoexist->Adapter;
+ pHalData = GET_HAL_DATA(padapter);
+ ready = _FAIL;
+#ifdef LPS_RPWM_WAIT_MS
+ timeout = LPS_RPWM_WAIT_MS;
+#else /* !LPS_RPWM_WAIT_MS */
+ timeout = 30;
+#endif /* !LPS_RPWM_WAIT_MS */
+
+ stime = jiffies;
+ do {
+ ready = rtw_register_task_alive(padapter, BTCOEX_ALIVE);
+ if (_SUCCESS == ready)
+ break;
+
+ utime = jiffies_to_msecs(jiffies - stime);
+ if (utime > timeout)
+ break;
+
+ msleep(1);
+ } while (1);
+}
+
+/*
+ * Constraint:
+ * 1. this function will request pwrctrl->lock
+ */
+static void halbtcoutsrc_NormalLowPower(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+
+
+ padapter = pBtCoexist->Adapter;
+ rtw_unregister_task_alive(padapter, BTCOEX_ALIVE);
+}
+
+static void halbtcoutsrc_DisableLowPower(PBTC_COEXIST pBtCoexist, u8 bLowPwrDisable)
+{
+ pBtCoexist->btInfo.bBtDisableLowPwr = bLowPwrDisable;
+ if (bLowPwrDisable)
+ halbtcoutsrc_LeaveLowPower(pBtCoexist); /* leave 32k low power. */
+ else
+ halbtcoutsrc_NormalLowPower(pBtCoexist); /* original 32k low power behavior. */
+}
+
+static void halbtcoutsrc_AggregationCheck(PBTC_COEXIST pBtCoexist)
+{
+ struct adapter *padapter;
+ bool bNeedToAct;
+
+
+ padapter = pBtCoexist->Adapter;
+ bNeedToAct = false;
+
+ if (pBtCoexist->btInfo.bRejectAggPkt)
+ rtw_btcoex_RejectApAggregatedPacket(padapter, true);
+ else{
+
+ if (pBtCoexist->btInfo.bPreBtCtrlAggBufSize !=
+ pBtCoexist->btInfo.bBtCtrlAggBufSize){
+
+ bNeedToAct = true;
+ pBtCoexist->btInfo.bPreBtCtrlAggBufSize = pBtCoexist->btInfo.bBtCtrlAggBufSize;
+ }
+
+ if (pBtCoexist->btInfo.bBtCtrlAggBufSize) {
+ if (pBtCoexist->btInfo.preAggBufSize !=
+ pBtCoexist->btInfo.aggBufSize){
+ bNeedToAct = true;
+ }
+ pBtCoexist->btInfo.preAggBufSize = pBtCoexist->btInfo.aggBufSize;
+ }
+
+ if (bNeedToAct) {
+ rtw_btcoex_RejectApAggregatedPacket(padapter, true);
+ rtw_btcoex_RejectApAggregatedPacket(padapter, false);
+ }
+ }
+}
+
+static u8 halbtcoutsrc_IsWifiBusy(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv;
+
+
+ pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ return true;
+ if (true == pmlmepriv->LinkDetectInfo.bBusyTraffic)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 _halbtcoutsrc_GetWifiLinkStatus(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv;
+ u8 bp2p;
+ u32 portConnectedStatus;
+
+
+ pmlmepriv = &padapter->mlmepriv;
+ bp2p = false;
+ portConnectedStatus = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (true == bp2p)
+ portConnectedStatus |= WIFI_P2P_GO_CONNECTED;
+ else
+ portConnectedStatus |= WIFI_AP_CONNECTED;
+ } else {
+ if (true == bp2p)
+ portConnectedStatus |= WIFI_P2P_GC_CONNECTED;
+ else
+ portConnectedStatus |= WIFI_STA_CONNECTED;
+ }
+ }
+
+ return portConnectedStatus;
+}
+
+static u32 halbtcoutsrc_GetWifiLinkStatus(PBTC_COEXIST pBtCoexist)
+{
+ /* */
+ /* return value: */
+ /* [31:16]=> connected port number */
+ /* [15:0]=> port connected bit define */
+ /* */
+
+ struct adapter *padapter;
+ u32 retVal;
+ u32 portConnectedStatus, numOfConnectedPort;
+
+
+ padapter = pBtCoexist->Adapter;
+ portConnectedStatus = 0;
+ numOfConnectedPort = 0;
+
+ retVal = _halbtcoutsrc_GetWifiLinkStatus(padapter);
+ if (retVal) {
+ portConnectedStatus |= retVal;
+ numOfConnectedPort++;
+ }
+
+ retVal = (numOfConnectedPort << 16) | portConnectedStatus;
+
+ return retVal;
+}
+
+static u32 halbtcoutsrc_GetBtPatchVer(PBTC_COEXIST pBtCoexist)
+{
+ return pBtCoexist->btInfo.btRealFwVer;
+}
+
+static s32 halbtcoutsrc_GetWifiRssi(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ s32 UndecoratedSmoothedPWDB = 0;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ UndecoratedSmoothedPWDB = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
+
+ return UndecoratedSmoothedPWDB;
+}
+
+static u8 halbtcoutsrc_GetWifiScanAPNum(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ static u8 scan_AP_num = 0;
+
+ pmlmeext = &padapter->mlmeextpriv;
+
+ if (GLBtcWiFiInScanState == false) {
+ if (pmlmeext->sitesurvey_res.bss_cnt > 0xFF)
+ scan_AP_num = 0xFF;
+ else
+ scan_AP_num = (u8)pmlmeext->sitesurvey_res.bss_cnt;
+ }
+
+ return scan_AP_num;
+}
+
+static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+ struct hal_com_data *pHalData;
+ struct mlme_ext_priv *mlmeext;
+ u8 *pu8;
+ s32 *pS4Tmp;
+ u32 *pU4Tmp;
+ u8 *pU1Tmp;
+ u8 ret;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return false;
+
+ padapter = pBtCoexist->Adapter;
+ pHalData = GET_HAL_DATA(padapter);
+ mlmeext = &padapter->mlmeextpriv;
+ pu8 = (u8 *)pOutBuf;
+ pS4Tmp = (s32 *)pOutBuf;
+ pU4Tmp = (u32 *)pOutBuf;
+ pU1Tmp = (u8 *)pOutBuf;
+ ret = true;
+
+ switch (getType) {
+ case BTC_GET_BL_HS_OPERATION:
+ *pu8 = false;
+ ret = false;
+ break;
+
+ case BTC_GET_BL_HS_CONNECTING:
+ *pu8 = false;
+ ret = false;
+ break;
+
+ case BTC_GET_BL_WIFI_CONNECTED:
+ *pu8 = check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE);
+ break;
+
+ case BTC_GET_BL_WIFI_BUSY:
+ *pu8 = halbtcoutsrc_IsWifiBusy(padapter);
+ break;
+
+ case BTC_GET_BL_WIFI_SCAN:
+ /* Use the value of the new variable GLBtcWiFiInScanState to judge whether WiFi is in scan state or not, since the originally used flag
+ WIFI_SITE_MONITOR in fwstate may not be cleared in time */
+ *pu8 = GLBtcWiFiInScanState;
+ break;
+
+ case BTC_GET_BL_WIFI_LINK:
+ *pu8 = check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING);
+ break;
+
+ case BTC_GET_BL_WIFI_ROAM:
+ *pu8 = check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING);
+ break;
+
+ case BTC_GET_BL_WIFI_4_WAY_PROGRESS:
+ *pu8 = false;
+ break;
+
+ case BTC_GET_BL_WIFI_UNDER_5G:
+ *pu8 = (pHalData->CurrentBandType == 1) ? true : false;
+ break;
+
+ case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+ *pu8 = check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE);
+ break;
+
+ case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+ *pu8 = padapter->securitypriv.dot11PrivacyAlgrthm == 0 ? false : true;
+ break;
+
+ case BTC_GET_BL_WIFI_UNDER_B_MODE:
+ if (mlmeext->cur_wireless_mode == WIRELESS_11B)
+ *pu8 = true;
+ else
+ *pu8 = false;
+ break;
+
+ case BTC_GET_BL_WIFI_IS_IN_MP_MODE:
+ *pu8 = false;
+ break;
+
+ case BTC_GET_BL_EXT_SWITCH:
+ *pu8 = false;
+ break;
+
+ case BTC_GET_S4_WIFI_RSSI:
+ *pS4Tmp = halbtcoutsrc_GetWifiRssi(padapter);
+ break;
+
+ case BTC_GET_S4_HS_RSSI:
+ *pS4Tmp = 0;
+ ret = false;
+ break;
+
+ case BTC_GET_U4_WIFI_BW:
+ if (IsLegacyOnly(mlmeext->cur_wireless_mode))
+ *pU4Tmp = BTC_WIFI_BW_LEGACY;
+ else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_20)
+ *pU4Tmp = BTC_WIFI_BW_HT20;
+ else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40)
+ *pU4Tmp = BTC_WIFI_BW_HT40;
+ else
+ *pU4Tmp = BTC_WIFI_BW_HT40; /* todo */
+ break;
+
+ case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+ {
+ PRT_LINK_DETECT_T plinkinfo;
+ plinkinfo = &padapter->mlmepriv.LinkDetectInfo;
+
+ if (plinkinfo->NumTxOkInPeriod > plinkinfo->NumRxOkInPeriod)
+ *pU4Tmp = BTC_WIFI_TRAFFIC_TX;
+ else
+ *pU4Tmp = BTC_WIFI_TRAFFIC_RX;
+ }
+ break;
+
+ case BTC_GET_U4_WIFI_FW_VER:
+ *pU4Tmp = pHalData->FirmwareVersion << 16;
+ *pU4Tmp |= pHalData->FirmwareSubVersion;
+ break;
+
+ case BTC_GET_U4_WIFI_LINK_STATUS:
+ *pU4Tmp = halbtcoutsrc_GetWifiLinkStatus(pBtCoexist);
+ break;
+
+ case BTC_GET_U4_BT_PATCH_VER:
+ *pU4Tmp = halbtcoutsrc_GetBtPatchVer(pBtCoexist);
+ break;
+
+ case BTC_GET_U1_WIFI_DOT11_CHNL:
+ *pU1Tmp = padapter->mlmeextpriv.cur_channel;
+ break;
+
+ case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+ *pU1Tmp = pHalData->CurrentChannel;
+ break;
+
+ case BTC_GET_U1_WIFI_HS_CHNL:
+ *pU1Tmp = 0;
+ ret = false;
+ break;
+
+ case BTC_GET_U1_MAC_PHY_MODE:
+ *pU1Tmp = BTC_SMSP;
+/* *pU1Tmp = BTC_DMSP; */
+/* *pU1Tmp = BTC_DMDP; */
+/* *pU1Tmp = BTC_MP_UNKNOWN; */
+ break;
+
+ case BTC_GET_U1_AP_NUM:
+ *pU1Tmp = halbtcoutsrc_GetWifiScanAPNum(padapter);
+ break;
+
+ /* 1Ant =========== */
+ case BTC_GET_U1_LPS_MODE:
+ *pU1Tmp = padapter->dvobj->pwrctl_priv.pwr_mode;
+ break;
+
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
+static u8 halbtcoutsrc_Set(void *pBtcContext, u8 setType, void *pInBuf)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+ struct hal_com_data *pHalData;
+ u8 *pu8;
+ u8 *pU1Tmp;
+ u32 *pU4Tmp;
+ u8 ret;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+ pHalData = GET_HAL_DATA(padapter);
+ pu8 = (u8 *)pInBuf;
+ pU1Tmp = (u8 *)pInBuf;
+ pU4Tmp = (u32 *)pInBuf;
+ ret = true;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return false;
+
+ switch (setType) {
+ /* set some u8 type variables. */
+ case BTC_SET_BL_BT_DISABLE:
+ pBtCoexist->btInfo.bBtDisabled = *pu8;
+ break;
+
+ case BTC_SET_BL_BT_TRAFFIC_BUSY:
+ pBtCoexist->btInfo.bBtBusy = *pu8;
+ break;
+
+ case BTC_SET_BL_BT_LIMITED_DIG:
+ pBtCoexist->btInfo.bLimitedDig = *pu8;
+ break;
+
+ case BTC_SET_BL_FORCE_TO_ROAM:
+ pBtCoexist->btInfo.bForceToRoam = *pu8;
+ break;
+
+ case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+ pBtCoexist->btInfo.bRejectAggPkt = *pu8;
+ break;
+
+ case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+ pBtCoexist->btInfo.bBtCtrlAggBufSize = *pu8;
+ break;
+
+ case BTC_SET_BL_INC_SCAN_DEV_NUM:
+ pBtCoexist->btInfo.bIncreaseScanDevNum = *pu8;
+ break;
+
+ case BTC_SET_BL_BT_TX_RX_MASK:
+ pBtCoexist->btInfo.bBtTxRxMask = *pu8;
+ break;
+
+ /* set some u8 type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+ pBtCoexist->btInfo.rssiAdjustForAgcTableOn = *pU1Tmp;
+ break;
+
+ case BTC_SET_U1_AGG_BUF_SIZE:
+ pBtCoexist->btInfo.aggBufSize = *pU1Tmp;
+ break;
+
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_GET_BT_RSSI:
+ ret = false;
+ break;
+
+ case BTC_SET_ACT_AGGREGATE_CTRL:
+ halbtcoutsrc_AggregationCheck(pBtCoexist);
+ break;
+
+ /* 1Ant =========== */
+ /* set some u8 type variables. */
+ case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+ pBtCoexist->btInfo.rssiAdjustFor1AntCoexType = *pU1Tmp;
+ break;
+
+ case BTC_SET_U1_LPS_VAL:
+ pBtCoexist->btInfo.lpsVal = *pU1Tmp;
+ break;
+
+ case BTC_SET_U1_RPWM_VAL:
+ pBtCoexist->btInfo.rpwmVal = *pU1Tmp;
+ break;
+
+ /* the following are some action which will be triggered */
+ case BTC_SET_ACT_LEAVE_LPS:
+ halbtcoutsrc_LeaveLps(pBtCoexist);
+ break;
+
+ case BTC_SET_ACT_ENTER_LPS:
+ halbtcoutsrc_EnterLps(pBtCoexist);
+ break;
+
+ case BTC_SET_ACT_NORMAL_LPS:
+ halbtcoutsrc_NormalLps(pBtCoexist);
+ break;
+
+ case BTC_SET_ACT_DISABLE_LOW_POWER:
+ halbtcoutsrc_DisableLowPower(pBtCoexist, *pu8);
+ break;
+
+ case BTC_SET_ACT_UPDATE_RAMASK:
+ pBtCoexist->btInfo.raMask = *pU4Tmp;
+
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE) == true) {
+ struct sta_info *psta;
+ struct wlan_bssid_ex *cur_network;
+
+ cur_network = &padapter->mlmeextpriv.mlmext_info.network;
+ psta = rtw_get_stainfo(&padapter->stapriv, cur_network->MacAddress);
+ rtw_hal_update_ra_mask(psta, 0);
+ }
+ break;
+
+ case BTC_SET_ACT_SEND_MIMO_PS:
+ ret = false;
+ break;
+
+ case BTC_SET_ACT_CTRL_BT_INFO:
+ ret = false;
+ break;
+
+ case BTC_SET_ACT_CTRL_BT_COEX:
+ ret = false;
+ break;
+ case BTC_SET_ACT_CTRL_8723B_ANT:
+ ret = false;
+ break;
+ /* */
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
+static void halbtcoutsrc_DisplayFwPwrModeCmd(PBTC_COEXIST pBtCoexist)
+{
+ u8 *cliBuf = pBtCoexist->cliBuf;
+
+ CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x ", "Power mode cmd ", \
+ pBtCoexist->pwrModeVal[0], pBtCoexist->pwrModeVal[1],
+ pBtCoexist->pwrModeVal[2], pBtCoexist->pwrModeVal[3],
+ pBtCoexist->pwrModeVal[4], pBtCoexist->pwrModeVal[5]);
+ CL_PRINTF(cliBuf);
+}
+
+/* */
+/* IO related function */
+/* */
+static u8 halbtcoutsrc_Read1Byte(void *pBtcContext, u32 RegAddr)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ return rtw_read8(padapter, RegAddr);
+}
+
+static u16 halbtcoutsrc_Read2Byte(void *pBtcContext, u32 RegAddr)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ return rtw_read16(padapter, RegAddr);
+}
+
+static u32 halbtcoutsrc_Read4Byte(void *pBtcContext, u32 RegAddr)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ return rtw_read32(padapter, RegAddr);
+}
+
+static void halbtcoutsrc_Write1Byte(void *pBtcContext, u32 RegAddr, u8 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ rtw_write8(padapter, RegAddr, Data);
+}
+
+static void halbtcoutsrc_BitMaskWrite1Byte(void *pBtcContext, u32 regAddr, u8 bitMask, u8 data1b)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+ u8 originalValue, bitShift;
+ u8 i;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+ originalValue = 0;
+ bitShift = 0;
+
+ if (bitMask != 0xFF) {
+ originalValue = rtw_read8(padapter, regAddr);
+
+ for (i = 0; i <= 7; i++) {
+ if ((bitMask>>i)&0x1)
+ break;
+ }
+ bitShift = i;
+
+ data1b = (originalValue & ~bitMask) | ((data1b << bitShift) & bitMask);
+ }
+
+ rtw_write8(padapter, regAddr, data1b);
+}
+
+static void halbtcoutsrc_Write2Byte(void *pBtcContext, u32 RegAddr, u16 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ rtw_write16(padapter, RegAddr, Data);
+}
+
+static void halbtcoutsrc_Write4Byte(void *pBtcContext, u32 RegAddr, u32 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ rtw_write32(padapter, RegAddr, Data);
+}
+
+static void halbtcoutsrc_WriteLocalReg1Byte(void *pBtcContext, u32 RegAddr, u8 Data)
+{
+ PBTC_COEXIST pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ struct adapter *Adapter = pBtCoexist->Adapter;
+
+ if (BTC_INTF_SDIO == pBtCoexist->chipInterface) {
+ rtw_write8(Adapter, SDIO_LOCAL_BASE | RegAddr, Data);
+ } else {
+ rtw_write8(Adapter, RegAddr, Data);
+ }
+}
+
+static void halbtcoutsrc_SetBbReg(void *pBtcContext, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ PHY_SetBBReg(padapter, RegAddr, BitMask, Data);
+}
+
+
+static u32 halbtcoutsrc_GetBbReg(void *pBtcContext, u32 RegAddr, u32 BitMask)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ return PHY_QueryBBReg(padapter, RegAddr, BitMask);
+}
+
+static void halbtcoutsrc_SetRfReg(void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ PHY_SetRFReg(padapter, eRFPath, RegAddr, BitMask, Data);
+}
+
+static u32 halbtcoutsrc_GetRfReg(void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ return PHY_QueryRFReg(padapter, eRFPath, RegAddr, BitMask);
+}
+
+static void halbtcoutsrc_SetBtReg(void *pBtcContext, u8 RegType, u32 RegAddr, u32 Data)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+ u8 CmdBuffer1[4] = {0};
+ u8 CmdBuffer2[4] = {0};
+ u8 *AddrToSet = (u8 *)&RegAddr;
+ u8 *ValueToSet = (u8 *)&Data;
+ u8 OperVer = 0;
+ u8 ReqNum = 0;
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ CmdBuffer1[0] |= (OperVer & 0x0f); /* Set OperVer */
+ CmdBuffer1[0] |= ((ReqNum << 4) & 0xf0); /* Set ReqNum */
+ CmdBuffer1[1] = 0x0d; /* Set OpCode to BT_LO_OP_WRITE_REG_VALUE */
+ CmdBuffer1[2] = ValueToSet[0]; /* Set WriteRegValue */
+ rtw_hal_fill_h2c_cmd(padapter, 0x67, 4, &(CmdBuffer1[0]));
+
+ msleep(200);
+ ReqNum++;
+
+ CmdBuffer2[0] |= (OperVer & 0x0f); /* Set OperVer */
+ CmdBuffer2[0] |= ((ReqNum << 4) & 0xf0); /* Set ReqNum */
+ CmdBuffer2[1] = 0x0c; /* Set OpCode of BT_LO_OP_WRITE_REG_ADDR */
+ CmdBuffer2[3] = AddrToSet[0]; /* Set WriteRegAddr */
+ rtw_hal_fill_h2c_cmd(padapter, 0x67, 4, &(CmdBuffer2[0]));
+}
+
+static u32 halbtcoutsrc_GetBtReg(void *pBtcContext, u8 RegType, u32 RegAddr)
+{
+ /* To be implemented. Always return 0 temporarily */
+ return 0;
+}
+
+static void halbtcoutsrc_FillH2cCmd(void *pBtcContext, u8 elementId, u32 cmdLen, u8 *pCmdBuffer)
+{
+ PBTC_COEXIST pBtCoexist;
+ struct adapter *padapter;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ padapter = pBtCoexist->Adapter;
+
+ rtw_hal_fill_h2c_cmd(padapter, elementId, cmdLen, pCmdBuffer);
+}
+
+static void halbtcoutsrc_DisplayDbgMsg(void *pBtcContext, u8 dispType)
+{
+ PBTC_COEXIST pBtCoexist;
+
+
+ pBtCoexist = (PBTC_COEXIST)pBtcContext;
+ switch (dispType) {
+ case BTC_DBG_DISP_COEX_STATISTICS:
+ break;
+ case BTC_DBG_DISP_BT_LINK_INFO:
+ break;
+ case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+ halbtcoutsrc_DisplayFwPwrModeCmd(pBtCoexist);
+ break;
+ default:
+ break;
+ }
+}
+
+/* */
+/* Extern functions called by other module */
+/* */
+static u8 EXhalbtcoutsrc_BindBtCoexWithAdapter(void *padapter)
+{
+ PBTC_COEXIST pBtCoexist = &GLBtCoexist;
+
+ if (pBtCoexist->bBinded)
+ return false;
+ else
+ pBtCoexist->bBinded = true;
+
+ pBtCoexist->statistics.cntBind++;
+
+ pBtCoexist->Adapter = padapter;
+
+ pBtCoexist->stackInfo.bProfileNotified = false;
+
+ pBtCoexist->btInfo.bBtCtrlAggBufSize = false;
+ pBtCoexist->btInfo.aggBufSize = 5;
+
+ pBtCoexist->btInfo.bIncreaseScanDevNum = false;
+
+ /* set default antenna position to main port */
+ pBtCoexist->boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
+
+ return true;
+}
+
+u8 EXhalbtcoutsrc_InitlizeVariables(void *padapter)
+{
+ PBTC_COEXIST pBtCoexist = &GLBtCoexist;
+
+ /* pBtCoexist->statistics.cntBind++; */
+
+ halbtcoutsrc_DbgInit();
+
+ pBtCoexist->chipInterface = BTC_INTF_SDIO;
+
+ EXhalbtcoutsrc_BindBtCoexWithAdapter(padapter);
+
+ pBtCoexist->fBtcRead1Byte = halbtcoutsrc_Read1Byte;
+ pBtCoexist->fBtcWrite1Byte = halbtcoutsrc_Write1Byte;
+ pBtCoexist->fBtcWrite1ByteBitMask = halbtcoutsrc_BitMaskWrite1Byte;
+ pBtCoexist->fBtcRead2Byte = halbtcoutsrc_Read2Byte;
+ pBtCoexist->fBtcWrite2Byte = halbtcoutsrc_Write2Byte;
+ pBtCoexist->fBtcRead4Byte = halbtcoutsrc_Read4Byte;
+ pBtCoexist->fBtcWrite4Byte = halbtcoutsrc_Write4Byte;
+ pBtCoexist->fBtcWriteLocalReg1Byte = halbtcoutsrc_WriteLocalReg1Byte;
+
+ pBtCoexist->fBtcSetBbReg = halbtcoutsrc_SetBbReg;
+ pBtCoexist->fBtcGetBbReg = halbtcoutsrc_GetBbReg;
+
+ pBtCoexist->fBtcSetRfReg = halbtcoutsrc_SetRfReg;
+ pBtCoexist->fBtcGetRfReg = halbtcoutsrc_GetRfReg;
+
+ pBtCoexist->fBtcFillH2c = halbtcoutsrc_FillH2cCmd;
+ pBtCoexist->fBtcDispDbgMsg = halbtcoutsrc_DisplayDbgMsg;
+
+ pBtCoexist->fBtcGet = halbtcoutsrc_Get;
+ pBtCoexist->fBtcSet = halbtcoutsrc_Set;
+ pBtCoexist->fBtcGetBtReg = halbtcoutsrc_GetBtReg;
+ pBtCoexist->fBtcSetBtReg = halbtcoutsrc_SetBtReg;
+
+ pBtCoexist->cliBuf = &GLBtcDbgBuf[0];
+
+ pBtCoexist->boardInfo.singleAntPath = 0;
+
+ GLBtcWiFiInScanState = false;
+
+ GLBtcWiFiInIQKState = false;
+
+ return true;
+}
+
+void EXhalbtcoutsrc_PowerOnSetting(PBTC_COEXIST pBtCoexist)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ /* Power on setting function is only added in 8723B currently */
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_PowerOnSetting(pBtCoexist);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_PowerOnSetting(pBtCoexist);
+}
+
+void EXhalbtcoutsrc_InitHwConfig(PBTC_COEXIST pBtCoexist, u8 bWifiOnly)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntInitHwConfig++;
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_InitHwConfig(pBtCoexist, bWifiOnly);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_InitHwConfig(pBtCoexist, bWifiOnly);
+}
+
+void EXhalbtcoutsrc_InitCoexDm(PBTC_COEXIST pBtCoexist)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntInitCoexDm++;
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_InitCoexDm(pBtCoexist);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_InitCoexDm(pBtCoexist);
+
+ pBtCoexist->bInitilized = true;
+}
+
+void EXhalbtcoutsrc_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ u8 ipsType;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntIpsNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (IPS_NONE == type)
+ ipsType = BTC_IPS_LEAVE;
+ else
+ ipsType = BTC_IPS_ENTER;
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_IpsNotify(pBtCoexist, ipsType);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_IpsNotify(pBtCoexist, ipsType);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ u8 lpsType;
+
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntLpsNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (PS_MODE_ACTIVE == type)
+ lpsType = BTC_LPS_DISABLE;
+ else
+ lpsType = BTC_LPS_ENABLE;
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_LpsNotify(pBtCoexist, lpsType);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_LpsNotify(pBtCoexist, lpsType);
+}
+
+void EXhalbtcoutsrc_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type)
+{
+ u8 scanType;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+ pBtCoexist->statistics.cntScanNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (type) {
+ scanType = BTC_SCAN_START;
+ GLBtcWiFiInScanState = true;
+ } else {
+ scanType = BTC_SCAN_FINISH;
+ GLBtcWiFiInScanState = false;
+ }
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_ScanNotify(pBtCoexist, scanType);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_ScanNotify(pBtCoexist, scanType);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 action)
+{
+ u8 assoType;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+ pBtCoexist->statistics.cntConnectNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (action)
+ assoType = BTC_ASSOCIATE_START;
+ else
+ assoType = BTC_ASSOCIATE_FINISH;
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_ConnectNotify(pBtCoexist, assoType);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_ConnectNotify(pBtCoexist, assoType);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_MediaStatusNotify(PBTC_COEXIST pBtCoexist, RT_MEDIA_STATUS mediaStatus)
+{
+ u8 mStatus;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntMediaStatusNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (RT_MEDIA_CONNECT == mediaStatus)
+ mStatus = BTC_MEDIA_CONNECT;
+ else
+ mStatus = BTC_MEDIA_DISCONNECT;
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_MediaStatusNotify(pBtCoexist, mStatus);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, mStatus);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 pktType)
+{
+ u8 packetType;
+
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+ pBtCoexist->statistics.cntSpecialPacketNotify++;
+ if (pBtCoexist->bManualControl)
+ return;
+
+ if (PACKET_DHCP == pktType)
+ packetType = BTC_PACKET_DHCP;
+ else if (PACKET_EAPOL == pktType)
+ packetType = BTC_PACKET_EAPOL;
+ else if (PACKET_ARP == pktType)
+ packetType = BTC_PACKET_ARP;
+ else{
+ packetType = BTC_PACKET_UNKNOWN;
+ return;
+ }
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_SpecialPacketNotify(pBtCoexist, packetType);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_SpecialPacketNotify(pBtCoexist, packetType);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_BtInfoNotify(PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ pBtCoexist->statistics.cntBtInfoNotify++;
+
+ /* All notify is called in cmd thread, don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_HaltNotify(PBTC_COEXIST pBtCoexist)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_HaltNotify(pBtCoexist);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_HaltNotify(pBtCoexist);
+
+ pBtCoexist->bBinded = false;
+}
+
+void EXhalbtcoutsrc_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ /* */
+ /* currently only 1ant we have to do the notification, */
+ /* once pnp is notified to sleep state, we have to leave LPS that we can sleep normally. */
+ /* */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_PnpNotify(pBtCoexist, pnpState);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_PnpNotify(pBtCoexist, pnpState);
+}
+
+void EXhalbtcoutsrc_Periodical(PBTC_COEXIST pBtCoexist)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+ pBtCoexist->statistics.cntPeriodical++;
+
+ /* Periodical should be called in cmd thread, */
+ /* don't need to leave low power again */
+/* halbtcoutsrc_LeaveLowPower(pBtCoexist); */
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_Periodical(pBtCoexist);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_Periodical(pBtCoexist);
+
+/* halbtcoutsrc_NormalLowPower(pBtCoexist); */
+}
+
+void EXhalbtcoutsrc_SetChipType(u8 chipType)
+{
+ GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723B;
+}
+
+void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum)
+{
+ if (BT_COEX_ANT_TYPE_PG == type) {
+ GLBtCoexist.boardInfo.pgAntNum = antNum;
+ GLBtCoexist.boardInfo.btdmAntNum = antNum;
+ } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ GLBtCoexist.boardInfo.btdmAntNum = antNum;
+ /* GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT; */
+ } else if (BT_COEX_ANT_TYPE_DETECTED == type) {
+ GLBtCoexist.boardInfo.btdmAntNum = antNum;
+ /* GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT; */
+ }
+}
+
+/* */
+/* Currently used by 8723b only, S0 or S1 */
+/* */
+void EXhalbtcoutsrc_SetSingleAntPath(u8 singleAntPath)
+{
+ GLBtCoexist.boardInfo.singleAntPath = singleAntPath;
+}
+
+void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist)
+{
+ if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
+ return;
+
+ halbtcoutsrc_LeaveLowPower(pBtCoexist);
+
+ if (pBtCoexist->boardInfo.btdmAntNum == 2)
+ EXhalbtc8723b2ant_DisplayCoexInfo(pBtCoexist);
+ else if (pBtCoexist->boardInfo.btdmAntNum == 1)
+ EXhalbtc8723b1ant_DisplayCoexInfo(pBtCoexist);
+
+ halbtcoutsrc_NormalLowPower(pBtCoexist);
+}
+
+/*
+ * Description:
+ *Run BT-Coexist mechansim or not
+ *
+ */
+void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ pHalData->bt_coexist.bBtExist = bBtExist;
+}
+
+/*
+ * Dewcription:
+ *Check is co-exist mechanism enabled or not
+ *
+ * Return:
+ *true Enable BT co-exist mechanism
+ *false Disable BT co-exist mechanism
+ */
+u8 hal_btcoex_IsBtExist(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ return pHalData->bt_coexist.bBtExist;
+}
+
+u8 hal_btcoex_IsBtDisabled(struct adapter *padapter)
+{
+ if (!hal_btcoex_IsBtExist(padapter))
+ return true;
+
+ if (GLBtCoexist.btInfo.bBtDisabled)
+ return true;
+ else
+ return false;
+}
+
+void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ pHalData->bt_coexist.btChipType = chipType;
+
+ EXhalbtcoutsrc_SetChipType(chipType);
+}
+
+void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->bt_coexist.btTotalAntNum = antNum;
+ EXhalbtcoutsrc_SetAntNum(BT_COEX_ANT_TYPE_PG, antNum);
+}
+
+void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath)
+{
+ EXhalbtcoutsrc_SetSingleAntPath(singleAntPath);
+}
+
+u8 hal_btcoex_Initialize(struct adapter *padapter)
+{
+ u8 ret1;
+ u8 ret2;
+
+
+ memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
+ ret1 = EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
+ ret2 = (ret1 == true) ? true : false;
+
+ return ret2;
+}
+
+void hal_btcoex_PowerOnSetting(struct adapter *padapter)
+{
+ EXhalbtcoutsrc_PowerOnSetting(&GLBtCoexist);
+}
+
+void hal_btcoex_InitHwConfig(struct adapter *padapter, u8 bWifiOnly)
+{
+ if (!hal_btcoex_IsBtExist(padapter))
+ return;
+
+ EXhalbtcoutsrc_InitHwConfig(&GLBtCoexist, bWifiOnly);
+ EXhalbtcoutsrc_InitCoexDm(&GLBtCoexist);
+}
+
+void hal_btcoex_IpsNotify(struct adapter *padapter, u8 type)
+{
+ EXhalbtcoutsrc_IpsNotify(&GLBtCoexist, type);
+}
+
+void hal_btcoex_LpsNotify(struct adapter *padapter, u8 type)
+{
+ EXhalbtcoutsrc_LpsNotify(&GLBtCoexist, type);
+}
+
+void hal_btcoex_ScanNotify(struct adapter *padapter, u8 type)
+{
+ EXhalbtcoutsrc_ScanNotify(&GLBtCoexist, type);
+}
+
+void hal_btcoex_ConnectNotify(struct adapter *padapter, u8 action)
+{
+ EXhalbtcoutsrc_ConnectNotify(&GLBtCoexist, action);
+}
+
+void hal_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus)
+{
+ EXhalbtcoutsrc_MediaStatusNotify(&GLBtCoexist, mediaStatus);
+}
+
+void hal_btcoex_SpecialPacketNotify(struct adapter *padapter, u8 pktType)
+{
+ EXhalbtcoutsrc_SpecialPacketNotify(&GLBtCoexist, pktType);
+}
+
+void hal_btcoex_IQKNotify(struct adapter *padapter, u8 state)
+{
+ GLBtcWiFiInIQKState = state;
+}
+
+void hal_btcoex_BtInfoNotify(struct adapter *padapter, u8 length, u8 *tmpBuf)
+{
+ if (GLBtcWiFiInIQKState == true)
+ return;
+
+ EXhalbtcoutsrc_BtInfoNotify(&GLBtCoexist, tmpBuf, length);
+}
+
+void hal_btcoex_SuspendNotify(struct adapter *padapter, u8 state)
+{
+ if (state == 1)
+ state = BTC_WIFI_PNP_SLEEP;
+ else
+ state = BTC_WIFI_PNP_WAKE_UP;
+
+ EXhalbtcoutsrc_PnpNotify(&GLBtCoexist, state);
+}
+
+void hal_btcoex_HaltNotify(struct adapter *padapter)
+{
+ EXhalbtcoutsrc_HaltNotify(&GLBtCoexist);
+}
+
+void hal_btcoex_Hanlder(struct adapter *padapter)
+{
+ EXhalbtcoutsrc_Periodical(&GLBtCoexist);
+}
+
+s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter)
+{
+ return (s32)GLBtCoexist.btInfo.bBtCtrlAggBufSize;
+}
+
+void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual)
+{
+ GLBtCoexist.bManualControl = bmanual;
+}
+
+u8 hal_btcoex_IsBtControlLps(struct adapter *padapter)
+{
+ if (hal_btcoex_IsBtExist(padapter) == false)
+ return false;
+
+ if (GLBtCoexist.btInfo.bBtDisabled)
+ return false;
+
+ if (GLBtCoexist.btInfo.bBtCtrlLps)
+ return true;
+
+ return false;
+}
+
+u8 hal_btcoex_IsLpsOn(struct adapter *padapter)
+{
+ if (hal_btcoex_IsBtExist(padapter) == false)
+ return false;
+
+ if (GLBtCoexist.btInfo.bBtDisabled)
+ return false;
+
+ if (GLBtCoexist.btInfo.bBtLpsOn)
+ return true;
+
+ return false;
+}
+
+u8 hal_btcoex_RpwmVal(struct adapter *padapter)
+{
+ return GLBtCoexist.btInfo.rpwmVal;
+}
+
+u8 hal_btcoex_LpsVal(struct adapter *padapter)
+{
+ return GLBtCoexist.btInfo.lpsVal;
+}
+
+u32 hal_btcoex_GetRaMask(struct adapter *padapter)
+{
+ if (!hal_btcoex_IsBtExist(padapter))
+ return 0;
+
+ if (GLBtCoexist.btInfo.bBtDisabled)
+ return 0;
+
+ if (GLBtCoexist.boardInfo.btdmAntNum != 1)
+ return 0;
+
+ return GLBtCoexist.btInfo.raMask;
+}
+
+void hal_btcoex_RecordPwrMode(struct adapter *padapter, u8 *pCmdBuf, u8 cmdLen)
+{
+ BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write pwrModeCmd = 0x%04x%08x\n",
+ pCmdBuf[0]<<8|pCmdBuf[1],
+ pCmdBuf[2]<<24|pCmdBuf[3]<<16|pCmdBuf[4]<<8|pCmdBuf[5]));
+
+ memcpy(GLBtCoexist.pwrModeVal, pCmdBuf, cmdLen);
+}
+
+void hal_btcoex_DisplayBtCoexInfo(struct adapter *padapter, u8 *pbuf, u32 bufsize)
+{
+ PBTCDBGINFO pinfo;
+
+
+ pinfo = &GLBtcDbgInfo;
+ DBG_BT_INFO_INIT(pinfo, pbuf, bufsize);
+ EXhalbtcoutsrc_DisplayBtCoexInfo(&GLBtCoexist);
+ DBG_BT_INFO_INIT(pinfo, NULL, 0);
+}
+
+void hal_btcoex_SetDBG(struct adapter *padapter, u32 *pDbgModule)
+{
+ u32 i;
+
+
+ if (NULL == pDbgModule)
+ return;
+
+ for (i = 0; i < BTC_MSG_MAX; i++)
+ GLBtcDbgType[i] = pDbgModule[i];
+}
+
+u32 hal_btcoex_GetDBG(struct adapter *padapter, u8 *pStrBuf, u32 bufSize)
+{
+ s32 count;
+ u8 *pstr;
+ u32 leftSize;
+
+
+ if ((NULL == pStrBuf) || (0 == bufSize))
+ return 0;
+
+ pstr = pStrBuf;
+ leftSize = bufSize;
+/* DBG_871X(FUNC_ADPT_FMT ": bufsize =%d\n", FUNC_ADPT_ARG(padapter), bufSize); */
+
+ count = rtw_sprintf(pstr, leftSize, "#define DBG\t%d\n", DBG);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+
+ count = rtw_sprintf(pstr, leftSize, "BTCOEX Debug Setting:\n");
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+
+ count = rtw_sprintf(pstr, leftSize,
+ "INTERFACE / ALGORITHM: 0x%08X / 0x%08X\n\n",
+ GLBtcDbgType[BTC_MSG_INTERFACE],
+ GLBtcDbgType[BTC_MSG_ALGORITHM]);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+
+ count = rtw_sprintf(pstr, leftSize, "INTERFACE Debug Setting Definition:\n");
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for INTF_INIT\n",
+ (GLBtcDbgType[BTC_MSG_INTERFACE]&INTF_INIT)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for INTF_NOTIFY\n\n",
+ (GLBtcDbgType[BTC_MSG_INTERFACE]&INTF_NOTIFY)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+
+ count = rtw_sprintf(pstr, leftSize, "ALGORITHM Debug Setting Definition:\n");
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for BT_RSSI_STATE\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_BT_RSSI_STATE)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[1]=%d for WIFI_RSSI_STATE\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_WIFI_RSSI_STATE)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for BT_MONITOR\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_BT_MONITOR)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[3]=%d for TRACE\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[4]=%d for TRACE_FW\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[5]=%d for TRACE_FW_DETAIL\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW_DETAIL)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[6]=%d for TRACE_FW_EXEC\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW_EXEC)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[7]=%d for TRACE_SW\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[8]=%d for TRACE_SW_DETAIL\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW_DETAIL)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+ count = rtw_sprintf(pstr, leftSize, "\tbit[9]=%d for TRACE_SW_EXEC\n",
+ (GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW_EXEC)?1:0);
+ if ((count < 0) || (count >= leftSize))
+ goto exit;
+ pstr += count;
+ leftSize -= count;
+
+exit:
+ count = pstr - pStrBuf;
+/* DBG_871X(FUNC_ADPT_FMT ": usedsize =%d\n", FUNC_ADPT_ARG(padapter), count); */
+
+ return count;
+}
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
new file mode 100644
index 000000000000..1880d4140bee
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -0,0 +1,1751 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_COM_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include "hal_com_h2c.h"
+
+#include "odm_precomp.h"
+
+u8 rtw_hal_data_init(struct adapter *padapter)
+{
+ if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
+ padapter->hal_data_sz = sizeof(struct hal_com_data);
+ padapter->HalData = vzalloc(padapter->hal_data_sz);
+ if (padapter->HalData == NULL) {
+ DBG_8192C("cant not alloc memory for HAL DATA\n");
+ return _FAIL;
+ }
+ }
+ return _SUCCESS;
+}
+
+void rtw_hal_data_deinit(struct adapter *padapter)
+{
+ if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
+ if (padapter->HalData) {
+ phy_free_filebuf(padapter);
+ vfree(padapter->HalData);
+ padapter->HalData = NULL;
+ padapter->hal_data_sz = 0;
+ }
+ }
+}
+
+
+void dump_chip_info(HAL_VERSION ChipVersion)
+{
+ int cnt = 0;
+ u8 buf[128];
+
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8723B_");
+ cnt += sprintf((buf+cnt), "%s_", IS_NORMAL_CHIP(ChipVersion) ? "Normal_Chip" : "Test_Chip");
+ if (IS_CHIP_VENDOR_TSMC(ChipVersion))
+ cnt += sprintf((buf+cnt), "%s_", "TSMC");
+ else if (IS_CHIP_VENDOR_UMC(ChipVersion))
+ cnt += sprintf((buf+cnt), "%s_", "UMC");
+ else if (IS_CHIP_VENDOR_SMIC(ChipVersion))
+ cnt += sprintf((buf+cnt), "%s_", "SMIC");
+
+ if (IS_A_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "A_CUT_");
+ else if (IS_B_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "B_CUT_");
+ else if (IS_C_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "C_CUT_");
+ else if (IS_D_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "D_CUT_");
+ else if (IS_E_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "E_CUT_");
+ else if (IS_I_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "I_CUT_");
+ else if (IS_J_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "J_CUT_");
+ else if (IS_K_CUT(ChipVersion))
+ cnt += sprintf((buf+cnt), "K_CUT_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_", ChipVersion.CUTVersion);
+
+ if (IS_1T1R(ChipVersion))
+ cnt += sprintf((buf+cnt), "1T1R_");
+ else if (IS_1T2R(ChipVersion))
+ cnt += sprintf((buf+cnt), "1T2R_");
+ else if (IS_2T2R(ChipVersion))
+ cnt += sprintf((buf+cnt), "2T2R_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_RFTYPE(%d)_", ChipVersion.RFType);
+
+ cnt += sprintf((buf+cnt), "RomVer(%d)\n", ChipVersion.ROMVer);
+
+ DBG_871X("%s", buf);
+}
+
+
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+/*
+ * Description:
+ *Use hardware(efuse), driver parameter(registry) and default channel plan
+ *to decide which one should be used.
+ *
+ * Parameters:
+ *padapter pointer of adapter
+ *hw_channel_plan channel plan from HW (efuse/eeprom)
+ * BIT[7] software configure mode; 0:Enable, 1:disable
+ * BIT[6:0] Channel Plan
+ *sw_channel_plan channel plan from SW (registry/module param)
+ *def_channel_plan channel plan used when HW/SW both invalid
+ *AutoLoadFail efuse autoload fail or not
+ *
+ * Return:
+ *Final channel plan decision
+ *
+ */
+u8 hal_com_config_channel_plan(
+ struct adapter *padapter,
+ u8 hw_channel_plan,
+ u8 sw_channel_plan,
+ u8 def_channel_plan,
+ bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData;
+ u8 chnlPlan;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pHalData->bDisableSWChannelPlan = false;
+ chnlPlan = def_channel_plan;
+
+ if (0xFF == hw_channel_plan)
+ AutoLoadFail = true;
+
+ if (false == AutoLoadFail) {
+ u8 hw_chnlPlan;
+
+ hw_chnlPlan = hw_channel_plan & (~EEPROM_CHANNEL_PLAN_BY_HW_MASK);
+ if (rtw_is_channel_plan_valid(hw_chnlPlan)) {
+#ifndef CONFIG_SW_CHANNEL_PLAN
+ if (hw_channel_plan & EEPROM_CHANNEL_PLAN_BY_HW_MASK)
+ pHalData->bDisableSWChannelPlan = true;
+#endif /* !CONFIG_SW_CHANNEL_PLAN */
+
+ chnlPlan = hw_chnlPlan;
+ }
+ }
+
+ if (
+ (false == pHalData->bDisableSWChannelPlan) &&
+ rtw_is_channel_plan_valid(sw_channel_plan)
+ )
+ chnlPlan = sw_channel_plan;
+
+ return chnlPlan;
+}
+
+bool HAL_IsLegalChannel(struct adapter *Adapter, u32 Channel)
+{
+ bool bLegalChannel = true;
+
+ if (Channel > 14) {
+ bLegalChannel = false;
+ DBG_871X("Channel > 14 but wireless_mode do not support 5G\n");
+ } else if ((Channel <= 14) && (Channel >= 1)) {
+ if (IsSupported24G(Adapter->registrypriv.wireless_mode) == false) {
+ bLegalChannel = false;
+ DBG_871X("(Channel <= 14) && (Channel >= 1) but wireless_mode do not support 2.4G\n");
+ }
+ } else {
+ bLegalChannel = false;
+ DBG_871X("Channel is Invalid !!!\n");
+ }
+
+ return bLegalChannel;
+}
+
+u8 MRateToHwRate(u8 rate)
+{
+ u8 ret = DESC_RATE1M;
+
+ switch (rate) {
+ case MGN_1M:
+ ret = DESC_RATE1M;
+ break;
+ case MGN_2M:
+ ret = DESC_RATE2M;
+ break;
+ case MGN_5_5M:
+ ret = DESC_RATE5_5M;
+ break;
+ case MGN_11M:
+ ret = DESC_RATE11M;
+ break;
+ case MGN_6M:
+ ret = DESC_RATE6M;
+ break;
+ case MGN_9M:
+ ret = DESC_RATE9M;
+ break;
+ case MGN_12M:
+ ret = DESC_RATE12M;
+ break;
+ case MGN_18M:
+ ret = DESC_RATE18M;
+ break;
+ case MGN_24M:
+ ret = DESC_RATE24M;
+ break;
+ case MGN_36M:
+ ret = DESC_RATE36M;
+ break;
+ case MGN_48M:
+ ret = DESC_RATE48M;
+ break;
+ case MGN_54M:
+ ret = DESC_RATE54M;
+ break;
+ case MGN_MCS0:
+ ret = DESC_RATEMCS0;
+ break;
+ case MGN_MCS1:
+ ret = DESC_RATEMCS1;
+ break;
+ case MGN_MCS2:
+ ret = DESC_RATEMCS2;
+ break;
+ case MGN_MCS3:
+ ret = DESC_RATEMCS3;
+ break;
+ case MGN_MCS4:
+ ret = DESC_RATEMCS4;
+ break;
+ case MGN_MCS5:
+ ret = DESC_RATEMCS5;
+ break;
+ case MGN_MCS6:
+ ret = DESC_RATEMCS6;
+ break;
+ case MGN_MCS7:
+ ret = DESC_RATEMCS7;
+ break;
+ case MGN_MCS8:
+ ret = DESC_RATEMCS8;
+ break;
+ case MGN_MCS9:
+ ret = DESC_RATEMCS9;
+ break;
+ case MGN_MCS10:
+ ret = DESC_RATEMCS10;
+ break;
+ case MGN_MCS11:
+ ret = DESC_RATEMCS11;
+ break;
+ case MGN_MCS12:
+ ret = DESC_RATEMCS12;
+ break;
+ case MGN_MCS13:
+ ret = DESC_RATEMCS13;
+ break;
+ case MGN_MCS14:
+ ret = DESC_RATEMCS14;
+ break;
+ case MGN_MCS15:
+ ret = DESC_RATEMCS15;
+ break;
+ case MGN_MCS16:
+ ret = DESC_RATEMCS16;
+ break;
+ case MGN_MCS17:
+ ret = DESC_RATEMCS17;
+ break;
+ case MGN_MCS18:
+ ret = DESC_RATEMCS18;
+ break;
+ case MGN_MCS19:
+ ret = DESC_RATEMCS19;
+ break;
+ case MGN_MCS20:
+ ret = DESC_RATEMCS20;
+ break;
+ case MGN_MCS21:
+ ret = DESC_RATEMCS21;
+ break;
+ case MGN_MCS22:
+ ret = DESC_RATEMCS22;
+ break;
+ case MGN_MCS23:
+ ret = DESC_RATEMCS23;
+ break;
+ case MGN_MCS24:
+ ret = DESC_RATEMCS24;
+ break;
+ case MGN_MCS25:
+ ret = DESC_RATEMCS25;
+ break;
+ case MGN_MCS26:
+ ret = DESC_RATEMCS26;
+ break;
+ case MGN_MCS27:
+ ret = DESC_RATEMCS27;
+ break;
+ case MGN_MCS28:
+ ret = DESC_RATEMCS28;
+ break;
+ case MGN_MCS29:
+ ret = DESC_RATEMCS29;
+ break;
+ case MGN_MCS30:
+ ret = DESC_RATEMCS30;
+ break;
+ case MGN_MCS31:
+ ret = DESC_RATEMCS31;
+ break;
+ case MGN_VHT1SS_MCS0:
+ ret = DESC_RATEVHTSS1MCS0;
+ break;
+ case MGN_VHT1SS_MCS1:
+ ret = DESC_RATEVHTSS1MCS1;
+ break;
+ case MGN_VHT1SS_MCS2:
+ ret = DESC_RATEVHTSS1MCS2;
+ break;
+ case MGN_VHT1SS_MCS3:
+ ret = DESC_RATEVHTSS1MCS3;
+ break;
+ case MGN_VHT1SS_MCS4:
+ ret = DESC_RATEVHTSS1MCS4;
+ break;
+ case MGN_VHT1SS_MCS5:
+ ret = DESC_RATEVHTSS1MCS5;
+ break;
+ case MGN_VHT1SS_MCS6:
+ ret = DESC_RATEVHTSS1MCS6;
+ break;
+ case MGN_VHT1SS_MCS7:
+ ret = DESC_RATEVHTSS1MCS7;
+ break;
+ case MGN_VHT1SS_MCS8:
+ ret = DESC_RATEVHTSS1MCS8;
+ break;
+ case MGN_VHT1SS_MCS9:
+ ret = DESC_RATEVHTSS1MCS9;
+ break;
+ case MGN_VHT2SS_MCS0:
+ ret = DESC_RATEVHTSS2MCS0;
+ break;
+ case MGN_VHT2SS_MCS1:
+ ret = DESC_RATEVHTSS2MCS1;
+ break;
+ case MGN_VHT2SS_MCS2:
+ ret = DESC_RATEVHTSS2MCS2;
+ break;
+ case MGN_VHT2SS_MCS3:
+ ret = DESC_RATEVHTSS2MCS3;
+ break;
+ case MGN_VHT2SS_MCS4:
+ ret = DESC_RATEVHTSS2MCS4;
+ break;
+ case MGN_VHT2SS_MCS5:
+ ret = DESC_RATEVHTSS2MCS5;
+ break;
+ case MGN_VHT2SS_MCS6:
+ ret = DESC_RATEVHTSS2MCS6;
+ break;
+ case MGN_VHT2SS_MCS7:
+ ret = DESC_RATEVHTSS2MCS7;
+ break;
+ case MGN_VHT2SS_MCS8:
+ ret = DESC_RATEVHTSS2MCS8;
+ break;
+ case MGN_VHT2SS_MCS9:
+ ret = DESC_RATEVHTSS2MCS9;
+ break;
+ case MGN_VHT3SS_MCS0:
+ ret = DESC_RATEVHTSS3MCS0;
+ break;
+ case MGN_VHT3SS_MCS1:
+ ret = DESC_RATEVHTSS3MCS1;
+ break;
+ case MGN_VHT3SS_MCS2:
+ ret = DESC_RATEVHTSS3MCS2;
+ break;
+ case MGN_VHT3SS_MCS3:
+ ret = DESC_RATEVHTSS3MCS3;
+ break;
+ case MGN_VHT3SS_MCS4:
+ ret = DESC_RATEVHTSS3MCS4;
+ break;
+ case MGN_VHT3SS_MCS5:
+ ret = DESC_RATEVHTSS3MCS5;
+ break;
+ case MGN_VHT3SS_MCS6:
+ ret = DESC_RATEVHTSS3MCS6;
+ break;
+ case MGN_VHT3SS_MCS7:
+ ret = DESC_RATEVHTSS3MCS7;
+ break;
+ case MGN_VHT3SS_MCS8:
+ ret = DESC_RATEVHTSS3MCS8;
+ break;
+ case MGN_VHT3SS_MCS9:
+ ret = DESC_RATEVHTSS3MCS9;
+ break;
+ case MGN_VHT4SS_MCS0:
+ ret = DESC_RATEVHTSS4MCS0;
+ break;
+ case MGN_VHT4SS_MCS1:
+ ret = DESC_RATEVHTSS4MCS1;
+ break;
+ case MGN_VHT4SS_MCS2:
+ ret = DESC_RATEVHTSS4MCS2;
+ break;
+ case MGN_VHT4SS_MCS3:
+ ret = DESC_RATEVHTSS4MCS3;
+ break;
+ case MGN_VHT4SS_MCS4:
+ ret = DESC_RATEVHTSS4MCS4;
+ break;
+ case MGN_VHT4SS_MCS5:
+ ret = DESC_RATEVHTSS4MCS5;
+ break;
+ case MGN_VHT4SS_MCS6:
+ ret = DESC_RATEVHTSS4MCS6;
+ break;
+ case MGN_VHT4SS_MCS7:
+ ret = DESC_RATEVHTSS4MCS7;
+ break;
+ case MGN_VHT4SS_MCS8:
+ ret = DESC_RATEVHTSS4MCS8;
+ break;
+ case MGN_VHT4SS_MCS9:
+ ret = DESC_RATEVHTSS4MCS9;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+u8 HwRateToMRate(u8 rate)
+{
+ u8 ret_rate = MGN_1M;
+
+ switch (rate) {
+ case DESC_RATE1M:
+ ret_rate = MGN_1M;
+ break;
+ case DESC_RATE2M:
+ ret_rate = MGN_2M;
+ break;
+ case DESC_RATE5_5M:
+ ret_rate = MGN_5_5M;
+ break;
+ case DESC_RATE11M:
+ ret_rate = MGN_11M;
+ break;
+ case DESC_RATE6M:
+ ret_rate = MGN_6M;
+ break;
+ case DESC_RATE9M:
+ ret_rate = MGN_9M;
+ break;
+ case DESC_RATE12M:
+ ret_rate = MGN_12M;
+ break;
+ case DESC_RATE18M:
+ ret_rate = MGN_18M;
+ break;
+ case DESC_RATE24M:
+ ret_rate = MGN_24M;
+ break;
+ case DESC_RATE36M:
+ ret_rate = MGN_36M;
+ break;
+ case DESC_RATE48M:
+ ret_rate = MGN_48M;
+ break;
+ case DESC_RATE54M:
+ ret_rate = MGN_54M;
+ break;
+ case DESC_RATEMCS0:
+ ret_rate = MGN_MCS0;
+ break;
+ case DESC_RATEMCS1:
+ ret_rate = MGN_MCS1;
+ break;
+ case DESC_RATEMCS2:
+ ret_rate = MGN_MCS2;
+ break;
+ case DESC_RATEMCS3:
+ ret_rate = MGN_MCS3;
+ break;
+ case DESC_RATEMCS4:
+ ret_rate = MGN_MCS4;
+ break;
+ case DESC_RATEMCS5:
+ ret_rate = MGN_MCS5;
+ break;
+ case DESC_RATEMCS6:
+ ret_rate = MGN_MCS6;
+ break;
+ case DESC_RATEMCS7:
+ ret_rate = MGN_MCS7;
+ break;
+ case DESC_RATEMCS8:
+ ret_rate = MGN_MCS8;
+ break;
+ case DESC_RATEMCS9:
+ ret_rate = MGN_MCS9;
+ break;
+ case DESC_RATEMCS10:
+ ret_rate = MGN_MCS10;
+ break;
+ case DESC_RATEMCS11:
+ ret_rate = MGN_MCS11;
+ break;
+ case DESC_RATEMCS12:
+ ret_rate = MGN_MCS12;
+ break;
+ case DESC_RATEMCS13:
+ ret_rate = MGN_MCS13;
+ break;
+ case DESC_RATEMCS14:
+ ret_rate = MGN_MCS14;
+ break;
+ case DESC_RATEMCS15:
+ ret_rate = MGN_MCS15;
+ break;
+ case DESC_RATEMCS16:
+ ret_rate = MGN_MCS16;
+ break;
+ case DESC_RATEMCS17:
+ ret_rate = MGN_MCS17;
+ break;
+ case DESC_RATEMCS18:
+ ret_rate = MGN_MCS18;
+ break;
+ case DESC_RATEMCS19:
+ ret_rate = MGN_MCS19;
+ break;
+ case DESC_RATEMCS20:
+ ret_rate = MGN_MCS20;
+ break;
+ case DESC_RATEMCS21:
+ ret_rate = MGN_MCS21;
+ break;
+ case DESC_RATEMCS22:
+ ret_rate = MGN_MCS22;
+ break;
+ case DESC_RATEMCS23:
+ ret_rate = MGN_MCS23;
+ break;
+ case DESC_RATEMCS24:
+ ret_rate = MGN_MCS24;
+ break;
+ case DESC_RATEMCS25:
+ ret_rate = MGN_MCS25;
+ break;
+ case DESC_RATEMCS26:
+ ret_rate = MGN_MCS26;
+ break;
+ case DESC_RATEMCS27:
+ ret_rate = MGN_MCS27;
+ break;
+ case DESC_RATEMCS28:
+ ret_rate = MGN_MCS28;
+ break;
+ case DESC_RATEMCS29:
+ ret_rate = MGN_MCS29;
+ break;
+ case DESC_RATEMCS30:
+ ret_rate = MGN_MCS30;
+ break;
+ case DESC_RATEMCS31:
+ ret_rate = MGN_MCS31;
+ break;
+ case DESC_RATEVHTSS1MCS0:
+ ret_rate = MGN_VHT1SS_MCS0;
+ break;
+ case DESC_RATEVHTSS1MCS1:
+ ret_rate = MGN_VHT1SS_MCS1;
+ break;
+ case DESC_RATEVHTSS1MCS2:
+ ret_rate = MGN_VHT1SS_MCS2;
+ break;
+ case DESC_RATEVHTSS1MCS3:
+ ret_rate = MGN_VHT1SS_MCS3;
+ break;
+ case DESC_RATEVHTSS1MCS4:
+ ret_rate = MGN_VHT1SS_MCS4;
+ break;
+ case DESC_RATEVHTSS1MCS5:
+ ret_rate = MGN_VHT1SS_MCS5;
+ break;
+ case DESC_RATEVHTSS1MCS6:
+ ret_rate = MGN_VHT1SS_MCS6;
+ break;
+ case DESC_RATEVHTSS1MCS7:
+ ret_rate = MGN_VHT1SS_MCS7;
+ break;
+ case DESC_RATEVHTSS1MCS8:
+ ret_rate = MGN_VHT1SS_MCS8;
+ break;
+ case DESC_RATEVHTSS1MCS9:
+ ret_rate = MGN_VHT1SS_MCS9;
+ break;
+ case DESC_RATEVHTSS2MCS0:
+ ret_rate = MGN_VHT2SS_MCS0;
+ break;
+ case DESC_RATEVHTSS2MCS1:
+ ret_rate = MGN_VHT2SS_MCS1;
+ break;
+ case DESC_RATEVHTSS2MCS2:
+ ret_rate = MGN_VHT2SS_MCS2;
+ break;
+ case DESC_RATEVHTSS2MCS3:
+ ret_rate = MGN_VHT2SS_MCS3;
+ break;
+ case DESC_RATEVHTSS2MCS4:
+ ret_rate = MGN_VHT2SS_MCS4;
+ break;
+ case DESC_RATEVHTSS2MCS5:
+ ret_rate = MGN_VHT2SS_MCS5;
+ break;
+ case DESC_RATEVHTSS2MCS6:
+ ret_rate = MGN_VHT2SS_MCS6;
+ break;
+ case DESC_RATEVHTSS2MCS7:
+ ret_rate = MGN_VHT2SS_MCS7;
+ break;
+ case DESC_RATEVHTSS2MCS8:
+ ret_rate = MGN_VHT2SS_MCS8;
+ break;
+ case DESC_RATEVHTSS2MCS9:
+ ret_rate = MGN_VHT2SS_MCS9;
+ break;
+ case DESC_RATEVHTSS3MCS0:
+ ret_rate = MGN_VHT3SS_MCS0;
+ break;
+ case DESC_RATEVHTSS3MCS1:
+ ret_rate = MGN_VHT3SS_MCS1;
+ break;
+ case DESC_RATEVHTSS3MCS2:
+ ret_rate = MGN_VHT3SS_MCS2;
+ break;
+ case DESC_RATEVHTSS3MCS3:
+ ret_rate = MGN_VHT3SS_MCS3;
+ break;
+ case DESC_RATEVHTSS3MCS4:
+ ret_rate = MGN_VHT3SS_MCS4;
+ break;
+ case DESC_RATEVHTSS3MCS5:
+ ret_rate = MGN_VHT3SS_MCS5;
+ break;
+ case DESC_RATEVHTSS3MCS6:
+ ret_rate = MGN_VHT3SS_MCS6;
+ break;
+ case DESC_RATEVHTSS3MCS7:
+ ret_rate = MGN_VHT3SS_MCS7;
+ break;
+ case DESC_RATEVHTSS3MCS8:
+ ret_rate = MGN_VHT3SS_MCS8;
+ break;
+ case DESC_RATEVHTSS3MCS9:
+ ret_rate = MGN_VHT3SS_MCS9;
+ break;
+ case DESC_RATEVHTSS4MCS0:
+ ret_rate = MGN_VHT4SS_MCS0;
+ break;
+ case DESC_RATEVHTSS4MCS1:
+ ret_rate = MGN_VHT4SS_MCS1;
+ break;
+ case DESC_RATEVHTSS4MCS2:
+ ret_rate = MGN_VHT4SS_MCS2;
+ break;
+ case DESC_RATEVHTSS4MCS3:
+ ret_rate = MGN_VHT4SS_MCS3;
+ break;
+ case DESC_RATEVHTSS4MCS4:
+ ret_rate = MGN_VHT4SS_MCS4;
+ break;
+ case DESC_RATEVHTSS4MCS5:
+ ret_rate = MGN_VHT4SS_MCS5;
+ break;
+ case DESC_RATEVHTSS4MCS6:
+ ret_rate = MGN_VHT4SS_MCS6;
+ break;
+ case DESC_RATEVHTSS4MCS7:
+ ret_rate = MGN_VHT4SS_MCS7;
+ break;
+ case DESC_RATEVHTSS4MCS8:
+ ret_rate = MGN_VHT4SS_MCS8;
+ break;
+ case DESC_RATEVHTSS4MCS9:
+ ret_rate = MGN_VHT4SS_MCS9;
+ break;
+
+ default:
+ DBG_871X("HwRateToMRate(): Non supported Rate [%x]!!!\n", rate);
+ break;
+ }
+
+ return ret_rate;
+}
+
+void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg)
+{
+ u8 i, is_brate, brate;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+
+ is_brate = mBratesOS[i] & IEEE80211_BASIC_RATE_MASK;
+ brate = mBratesOS[i] & 0x7f;
+
+ if (is_brate) {
+ switch (brate) {
+ case IEEE80211_CCK_RATE_1MB:
+ *pBrateCfg |= RATE_1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ *pBrateCfg |= RATE_2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ *pBrateCfg |= RATE_5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ *pBrateCfg |= RATE_11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ *pBrateCfg |= RATE_6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ *pBrateCfg |= RATE_9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ *pBrateCfg |= RATE_12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ *pBrateCfg |= RATE_18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ *pBrateCfg |= RATE_24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ *pBrateCfg |= RATE_36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ *pBrateCfg |= RATE_48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ *pBrateCfg |= RATE_54M;
+ break;
+ }
+ }
+ }
+}
+
+static void _OneOutPipeMapping(struct adapter *padapter)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+}
+
+static void _TwoOutPipeMapping(struct adapter *padapter, bool bWIFICfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (bWIFICfg) { /* WMM */
+
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 0, 1, 0, 1, 0, 0, 0, 0, 0 }; */
+ /* 0:ep_0 num, 1:ep_1 num */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else { /* typical setting */
+
+
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 1, 0, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:ep_0 num, 1:ep_1 num */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ }
+
+}
+
+static void _ThreeOutPipeMapping(struct adapter *padapter, bool bWIFICfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (bWIFICfg) { /* for WMM */
+
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 2, 1, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else { /* typical setting */
+
+
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 2, 2, 1, 0, 0, 0, 0, 0, 0 }; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+ }
+
+}
+
+bool Hal_MappingOutPipe(struct adapter *padapter, u8 NumOutPipe)
+{
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+ bool bWIFICfg = (pregistrypriv->wifi_spec) ? true : false;
+
+ bool result = true;
+
+ switch (NumOutPipe) {
+ case 2:
+ _TwoOutPipeMapping(padapter, bWIFICfg);
+ break;
+ case 3:
+ case 4:
+ _ThreeOutPipeMapping(padapter, bWIFICfg);
+ break;
+ case 1:
+ _OneOutPipeMapping(padapter);
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+
+}
+
+void hal_init_macaddr(struct adapter *adapter)
+{
+ rtw_hal_set_hwreg(adapter, HW_VAR_MAC_ADDR, adapter->eeprompriv.mac_addr);
+}
+
+void rtw_init_hal_com_default_value(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ pHalData->AntDetection = 1;
+}
+
+/*
+* C2H event format:
+* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+*/
+
+void c2h_evt_clear(struct adapter *adapter)
+{
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+}
+
+/*
+* C2H event format:
+* Field TRIGGER CMD_LEN CONTENT CMD_SEQ CMD_ID
+* BITS [127:120] [119:112] [111:16] [15:8] [7:0]
+*/
+s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr_88xx *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (buf == NULL)
+ goto exit;
+
+ trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr_88xx *)buf;
+
+ memset(c2h_evt, 0, 16);
+
+ c2h_evt->id = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL);
+ c2h_evt->seq = rtw_read8(adapter, REG_C2HEVT_CMD_SEQ_88XX);
+ c2h_evt->plen = rtw_read8(adapter, REG_C2HEVT_CMD_LEN_88XX);
+
+ RT_PRINT_DATA(
+ _module_hal_init_c_,
+ _drv_info_,
+ "c2h_evt_read(): ",
+ &c2h_evt,
+ sizeof(c2h_evt)
+ );
+
+ DBG_871X(
+ "%s id:%u, len:%u, seq:%u, trigger:0x%02x\n",
+ __func__,
+ c2h_evt->id,
+ c2h_evt->plen,
+ c2h_evt->seq,
+ trigger
+ );
+
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++)
+ c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 2 + i);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read(): Command Content:\n",
+ c2h_evt->payload, c2h_evt->plen);
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /*
+ * Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the next command message.
+ */
+ c2h_evt_clear(adapter);
+exit:
+ return ret;
+}
+
+
+u8 rtw_hal_networktype_to_raid(struct adapter *adapter, struct sta_info *psta)
+{
+ return networktype_to_raid_ex(adapter, psta);
+}
+
+u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type)
+{
+
+ u8 raid;
+ raid = (network_type & WIRELESS_11B) ? RATEID_IDX_B : RATEID_IDX_G;
+ return raid;
+}
+
+void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 i, rf_type, limit;
+ u32 tx_ra_bitmap;
+
+ if (psta == NULL)
+ return;
+
+ tx_ra_bitmap = 0;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i < sizeof(psta->bssrateset); i++) {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ }
+
+ /* n mode ra_bitmap */
+ if (psta->htpriv.ht_option) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if (rf_type == RF_2T2R)
+ limit = 16; /* 2R */
+ else
+ limit = 8; /* 1R */
+
+ for (i = 0; i < limit; i++) {
+ if (psta->htpriv.ht_cap.supp_mcs_set[i/8] & BIT(i%8))
+ tx_ra_bitmap |= BIT(i+12);
+ }
+ }
+
+ psta->ra_mask = tx_ra_bitmap;
+ psta->init_rate = get_highest_rate_idx(tx_ra_bitmap)&0x3f;
+}
+
+void hw_var_port_switch(struct adapter *adapter)
+{
+}
+
+void SetHwReg(struct adapter *adapter, u8 variable, u8 *val)
+{
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &(hal_data->odmpriv);
+
+ switch (variable) {
+ case HW_VAR_PORT_SWITCH:
+ hw_var_port_switch(adapter);
+ break;
+ case HW_VAR_INIT_RTS_RATE:
+ rtw_warn_on(1);
+ break;
+ case HW_VAR_SEC_CFG:
+ {
+ u16 reg_scr;
+
+ reg_scr = rtw_read16(adapter, REG_SECCFG);
+ rtw_write16(adapter, REG_SECCFG, reg_scr|SCR_CHK_KEYID|SCR_RxDecEnable|SCR_TxEncEnable);
+ }
+ break;
+ case HW_VAR_SEC_DK_CFG:
+ {
+ struct security_priv *sec = &adapter->securitypriv;
+ u8 reg_scr = rtw_read8(adapter, REG_SECCFG);
+
+ if (val) { /* Enable default key related setting */
+ reg_scr |= SCR_TXBCUSEDK;
+ if (sec->dot11AuthAlgrthm != dot11AuthAlgrthm_8021X)
+ reg_scr |= (SCR_RxUseDK|SCR_TxUseDK);
+ } else /* Disable default key related setting */
+ reg_scr &= ~(SCR_RXBCUSEDK|SCR_TXBCUSEDK|SCR_RxUseDK|SCR_TxUseDK);
+
+ rtw_write8(adapter, REG_SECCFG, reg_scr);
+ }
+ break;
+ case HW_VAR_DM_FLAG:
+ odm->SupportAbility = *((u32 *)val);
+ break;
+ case HW_VAR_DM_FUNC_OP:
+ if (*((u8 *)val) == true) {
+ /* save dm flag */
+ odm->BK_SupportAbility = odm->SupportAbility;
+ } else {
+ /* restore dm flag */
+ odm->SupportAbility = odm->BK_SupportAbility;
+ }
+ break;
+ case HW_VAR_DM_FUNC_SET:
+ if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
+ struct dm_priv *dm = &hal_data->dmpriv;
+ dm->DMFlag = dm->InitDMFlag;
+ odm->SupportAbility = dm->InitODMFlag;
+ } else {
+ odm->SupportAbility |= *((u32 *)val);
+ }
+ break;
+ case HW_VAR_DM_FUNC_CLR:
+ /*
+ * input is already a mask to clear function
+ * don't invert it again! George, Lucas@20130513
+ */
+ odm->SupportAbility &= *((u32 *)val);
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE:
+ /* TODO - Is something needed here? */
+ break;
+ case HW_VAR_WIRELESS_MODE:
+ /* TODO - Is something needed here? */
+ break;
+ default:
+ DBG_871X_LEVEL(
+ _drv_always_,
+ FUNC_ADPT_FMT" variable(%d) not defined!\n",
+ FUNC_ADPT_ARG(adapter),
+ variable
+ );
+ break;
+ }
+}
+
+void GetHwReg(struct adapter *adapter, u8 variable, u8 *val)
+{
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &(hal_data->odmpriv);
+
+ switch (variable) {
+ case HW_VAR_BASIC_RATE:
+ *((u16 *)val) = hal_data->BasicRateSet;
+ break;
+ case HW_VAR_DM_FLAG:
+ *((u32 *)val) = odm->SupportAbility;
+ break;
+ case HW_VAR_RF_TYPE:
+ *((u8 *)val) = hal_data->rf_type;
+ break;
+ default:
+ DBG_871X_LEVEL(
+ _drv_always_,
+ FUNC_ADPT_FMT" variable(%d) not defined!\n",
+ FUNC_ADPT_ARG(adapter),
+ variable
+ );
+ break;
+ }
+}
+
+
+
+
+u8 SetHalDefVar(
+ struct adapter *adapter, enum HAL_DEF_VARIABLE variable, void *value
+)
+{
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &(hal_data->odmpriv);
+ u8 bResult = _SUCCESS;
+
+ switch (variable) {
+ case HW_DEF_FA_CNT_DUMP:
+ /* ODM_COMP_COMMON */
+ if (*((u8 *)value))
+ odm->DebugComponents |= (ODM_COMP_DIG | ODM_COMP_FA_CNT);
+ else
+ odm->DebugComponents &= ~(ODM_COMP_DIG | ODM_COMP_FA_CNT);
+ break;
+ case HAL_DEF_DBG_RX_INFO_DUMP:
+ DBG_871X("============ Rx Info dump ===================\n");
+ DBG_871X("bLinked = %d, RSSI_Min = %d(%%)\n",
+ odm->bLinked, odm->RSSI_Min);
+
+ if (odm->bLinked) {
+ DBG_871X("RxRate = %s, RSSI_A = %d(%%), RSSI_B = %d(%%)\n",
+ HDATA_RATE(odm->RxRate), odm->RSSI_A, odm->RSSI_B);
+
+ #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+ rtw_dump_raw_rssi_info(adapter);
+ #endif
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ ODM_CmnInfoUpdate(odm, ODM_CMNINFO_DBG_COMP, *((u64 *)value));
+ break;
+ case HW_DEF_ODM_DBG_LEVEL:
+ ODM_CmnInfoUpdate(odm, ODM_CMNINFO_DBG_LEVEL, *((u32 *)value));
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ {
+ u8 dm_func = *((u8 *)value);
+ struct dm_priv *dm = &hal_data->dmpriv;
+
+ if (dm_func == 0) { /* disable all dynamic func */
+ odm->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ DBG_8192C("==> Disable all dynamic function...\n");
+ } else if (dm_func == 1) {/* disable DIG */
+ odm->SupportAbility &= (~DYNAMIC_BB_DIG);
+ DBG_8192C("==> Disable DIG...\n");
+ } else if (dm_func == 2) {/* disable High power */
+ odm->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
+ } else if (dm_func == 3) {/* disable tx power tracking */
+ odm->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
+ DBG_8192C("==> Disable tx power tracking...\n");
+ } else if (dm_func == 4) {/* disable BT coexistence */
+ dm->DMFlag &= (~DYNAMIC_FUNC_BT);
+ } else if (dm_func == 5) {/* disable antenna diversity */
+ odm->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
+ } else if (dm_func == 6) {/* turn on all dynamic func */
+ if (!(odm->SupportAbility & DYNAMIC_BB_DIG)) {
+ DIG_T *pDigTable = &odm->DM_DigTable;
+ pDigTable->CurIGValue = rtw_read8(adapter, 0xc50);
+ }
+ dm->DMFlag |= DYNAMIC_FUNC_BT;
+ odm->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ DBG_8192C("==> Turn on all dynamic function...\n");
+ }
+ }
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ hal_data->bDumpRxPkt = *((u8 *)value);
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ hal_data->bDumpTxPkt = *((u8 *)value);
+ break;
+ case HAL_DEF_ANT_DETECT:
+ hal_data->AntDetection = *((u8 *)value);
+ break;
+ default:
+ DBG_871X_LEVEL(_drv_always_, "%s: [WARNING] HAL_DEF_VARIABLE(%d) not defined!\n", __func__, variable);
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+u8 GetHalDefVar(
+ struct adapter *adapter, enum HAL_DEF_VARIABLE variable, void *value
+)
+{
+ struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
+ DM_ODM_T *odm = &(hal_data->odmpriv);
+ u8 bResult = _SUCCESS;
+
+ switch (variable) {
+ case HAL_DEF_UNDERCORATEDSMOOTHEDPWDB:
+ {
+ struct mlme_priv *pmlmepriv;
+ struct sta_priv *pstapriv;
+ struct sta_info *psta;
+
+ pmlmepriv = &adapter->mlmepriv;
+ pstapriv = &adapter->stapriv;
+ psta = rtw_get_stainfo(pstapriv, pmlmepriv->cur_network.network.MacAddress);
+ if (psta)
+ *((int *)value) = psta->rssi_stat.UndecoratedSmoothedPWDB;
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ *((u64 *)value) = odm->DebugComponents;
+ break;
+ case HW_DEF_ODM_DBG_LEVEL:
+ *((u32 *)value) = odm->DebugLevel;
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ *((u32 *)value) = hal_data->odmpriv.SupportAbility;
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ *((u8 *)value) = hal_data->bDumpRxPkt;
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ *((u8 *)value) = hal_data->bDumpTxPkt;
+ break;
+ case HAL_DEF_ANT_DETECT:
+ *((u8 *)value) = hal_data->AntDetection;
+ break;
+ case HAL_DEF_MACID_SLEEP:
+ *(u8 *)value = false;
+ break;
+ case HAL_DEF_TX_PAGE_SIZE:
+ *((u32 *)value) = PAGE_SIZE_128;
+ break;
+ default:
+ DBG_871X_LEVEL(_drv_always_, "%s: [WARNING] HAL_DEF_VARIABLE(%d) not defined!\n", __func__, variable);
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+void GetHalODMVar(
+ struct adapter *Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void *pValue1,
+ void *pValue2
+)
+{
+ switch (eVariable) {
+#if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ case HAL_ODM_NOISE_MONITOR:
+ {
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 chan = *(u8 *)pValue1;
+ *(s16 *)pValue2 = pHalData->noise[chan];
+ #ifdef DBG_NOISE_MONITOR
+ DBG_8192C("### Noise monitor chan(%d)-noise:%d (dBm) ###\n",
+ chan, pHalData->noise[chan]);
+ #endif
+
+ }
+ break;
+#endif/* ifdef CONFIG_BACKGROUND_NOISE_MONITOR */
+ default:
+ break;
+ }
+}
+
+void SetHalODMVar(
+ struct adapter *Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void *pValue1,
+ bool bSet
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T podmpriv = &pHalData->odmpriv;
+ /* _irqL irqL; */
+ switch (eVariable) {
+ case HAL_ODM_STA_INFO:
+ {
+ struct sta_info *psta = (struct sta_info *)pValue1;
+ if (bSet) {
+ DBG_8192C("### Set STA_(%d) info ###\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
+ } else {
+ DBG_8192C("### Clean STA_(%d) info ###\n", psta->mac_id);
+ /* spin_lock_bh(&pHalData->odm_stainfo_lock); */
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, NULL);
+
+ /* spin_unlock_bh(&pHalData->odm_stainfo_lock); */
+ }
+ }
+ break;
+ case HAL_ODM_P2P_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
+ break;
+ case HAL_ODM_WIFI_DISPLAY_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
+ break;
+ #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ case HAL_ODM_NOISE_MONITOR:
+ {
+ struct noise_info *pinfo = (struct noise_info *)pValue1;
+
+ #ifdef DBG_NOISE_MONITOR
+ DBG_8192C("### Noise monitor chan(%d)-bPauseDIG:%d, IGIValue:0x%02x, max_time:%d (ms) ###\n",
+ pinfo->chan, pinfo->bPauseDIG, pinfo->IGIValue, pinfo->max_time);
+ #endif
+
+ pHalData->noise[pinfo->chan] = ODM_InbandNoise_Monitor(podmpriv, pinfo->bPauseDIG, pinfo->IGIValue, pinfo->max_time);
+ DBG_871X("chan_%d, noise = %d (dBm)\n", pinfo->chan, pHalData->noise[pinfo->chan]);
+ #ifdef DBG_NOISE_MONITOR
+ DBG_871X("noise_a = %d, noise_b = %d noise_all:%d\n",
+ podmpriv->noise_level.noise[ODM_RF_PATH_A],
+ podmpriv->noise_level.noise[ODM_RF_PATH_B],
+ podmpriv->noise_level.noise_all);
+ #endif
+ }
+ break;
+ #endif/* ifdef CONFIG_BACKGROUND_NOISE_MONITOR */
+
+ default:
+ break;
+ }
+}
+
+
+bool eqNByte(u8 *str1, u8 *str2, u32 num)
+{
+ if (num == 0)
+ return false;
+ while (num > 0) {
+ num--;
+ if (str1[num] != str2[num])
+ return false;
+ }
+ return true;
+}
+
+/* */
+/* Description: */
+/* Return true if chTmp is represent for hex digit and */
+/* false otherwise. */
+/* */
+/* */
+bool IsHexDigit(char chTmp)
+{
+ if (
+ (chTmp >= '0' && chTmp <= '9') ||
+ (chTmp >= 'a' && chTmp <= 'f') ||
+ (chTmp >= 'A' && chTmp <= 'F')
+ )
+ return true;
+ else
+ return false;
+}
+
+
+/* */
+/* Description: */
+/* Translate a character to hex digit. */
+/* */
+u32 MapCharToHexDigit(char chTmp)
+{
+ if (chTmp >= '0' && chTmp <= '9')
+ return (chTmp - '0');
+ else if (chTmp >= 'a' && chTmp <= 'f')
+ return (10 + (chTmp - 'a'));
+ else if (chTmp >= 'A' && chTmp <= 'F')
+ return (10 + (chTmp - 'A'));
+ else
+ return 0;
+}
+
+
+
+/* Description: */
+/* Parse hex number from the string pucStr. */
+bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove)
+{
+ char *szScan = szStr;
+
+ /* Check input parameter. */
+ if (szStr == NULL || pu4bVal == NULL || pu4bMove == NULL) {
+ DBG_871X("GetHexValueFromString(): Invalid inpur argumetns! szStr: %p, pu4bVal: %p, pu4bMove: %p\n", szStr, pu4bVal, pu4bMove);
+ return false;
+ }
+
+ /* Initialize output. */
+ *pu4bMove = 0;
+ *pu4bVal = 0;
+
+ /* Skip leading space. */
+ while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
+ szScan++;
+ (*pu4bMove)++;
+ }
+
+ /* Skip leading '0x' or '0X'. */
+ if (*szScan == '0' && (*(szScan+1) == 'x' || *(szScan+1) == 'X')) {
+ szScan += 2;
+ (*pu4bMove) += 2;
+ }
+
+ /* Check if szScan is now pointer to a character for hex digit, */
+ /* if not, it means this is not a valid hex number. */
+ if (!IsHexDigit(*szScan))
+ return false;
+
+ /* Parse each digit. */
+ do {
+ (*pu4bVal) <<= 4;
+ *pu4bVal += MapCharToHexDigit(*szScan);
+
+ szScan++;
+ (*pu4bMove)++;
+ } while (IsHexDigit(*szScan));
+
+ return true;
+}
+
+bool GetFractionValueFromString(
+ char *szStr, u8 *pInteger, u8 *pFraction, u32 *pu4bMove
+)
+{
+ char *szScan = szStr;
+
+ /* Initialize output. */
+ *pu4bMove = 0;
+ *pInteger = 0;
+ *pFraction = 0;
+
+ /* Skip leading space. */
+ while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
+ ++szScan;
+ ++(*pu4bMove);
+ }
+
+ /* Parse each digit. */
+ do {
+ (*pInteger) *= 10;
+ *pInteger += (*szScan - '0');
+
+ ++szScan;
+ ++(*pu4bMove);
+
+ if (*szScan == '.') {
+ ++szScan;
+ ++(*pu4bMove);
+
+ if (*szScan < '0' || *szScan > '9')
+ return false;
+ else {
+ *pFraction = *szScan - '0';
+ ++szScan;
+ ++(*pu4bMove);
+ return true;
+ }
+ }
+ } while (*szScan >= '0' && *szScan <= '9');
+
+ return true;
+}
+
+/* */
+/* Description: */
+/* Return true if szStr is comment out with leading "//". */
+/* */
+bool IsCommentString(char *szStr)
+{
+ if (*szStr == '/' && *(szStr+1) == '/')
+ return true;
+ else
+ return false;
+}
+
+bool GetU1ByteIntegerFromStringInDecimal(char *Str, u8 *pInt)
+{
+ u16 i = 0;
+ *pInt = 0;
+
+ while (Str[i] != '\0') {
+ if (Str[i] >= '0' && Str[i] <= '9') {
+ *pInt *= 10;
+ *pInt += (Str[i] - '0');
+ } else
+ return false;
+
+ ++i;
+ }
+
+ return true;
+}
+
+/* <20121004, Kordan> For example,
+ * ParseQualifiedString(inString, 0, outString, '[', ']') gets "Kordan" from
+ * a string "Hello [Kordan]".
+ * If RightQualifier does not exist, it will hang in the while loop
+ */
+bool ParseQualifiedString(
+ char *In, u32 *Start, char *Out, char LeftQualifier, char RightQualifier
+)
+{
+ u32 i = 0, j = 0;
+ char c = In[(*Start)++];
+
+ if (c != LeftQualifier)
+ return false;
+
+ i = (*Start);
+ while ((c = In[(*Start)++]) != RightQualifier)
+ ; /* find ']' */
+ j = (*Start) - 2;
+ strncpy((char *)Out, (const char *)(In+i), j-i+1);
+
+ return true;
+}
+
+bool isAllSpaceOrTab(u8 *data, u8 size)
+{
+ u8 cnt = 0, NumOfSpaceAndTab = 0;
+
+ while (size > cnt) {
+ if (data[cnt] == ' ' || data[cnt] == '\t' || data[cnt] == '\0')
+ ++NumOfSpaceAndTab;
+
+ ++cnt;
+ }
+
+ return size == NumOfSpaceAndTab;
+}
+
+
+void rtw_hal_check_rxfifo_full(struct adapter *adapter)
+{
+ struct dvobj_priv *psdpriv = adapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ int save_cnt = false;
+
+ /* switch counter to RX fifo */
+ /* printk("8723b or 8192e , MAC_667 set 0xf0\n"); */
+ rtw_write8(adapter, REG_RXERR_RPT+3, rtw_read8(adapter, REG_RXERR_RPT+3)|0xf0);
+ save_cnt = true;
+ /* todo: other chips */
+
+ if (save_cnt) {
+ /* rtw_write8(adapter, REG_RXERR_RPT+3, rtw_read8(adapter, REG_RXERR_RPT+3)|0xa0); */
+ pdbgpriv->dbg_rx_fifo_last_overflow = pdbgpriv->dbg_rx_fifo_curr_overflow;
+ pdbgpriv->dbg_rx_fifo_curr_overflow = rtw_read16(adapter, REG_RXERR_RPT);
+ pdbgpriv->dbg_rx_fifo_diff_overflow = pdbgpriv->dbg_rx_fifo_curr_overflow-pdbgpriv->dbg_rx_fifo_last_overflow;
+ }
+}
+
+void linked_info_dump(struct adapter *padapter, u8 benable)
+{
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ if (padapter->bLinkInfoDump == benable)
+ return;
+
+ DBG_871X("%s %s\n", __func__, (benable) ? "enable" : "disable");
+
+ if (benable) {
+ pwrctrlpriv->org_power_mgnt = pwrctrlpriv->power_mgnt;/* keep org value */
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ pwrctrlpriv->ips_org_mode = pwrctrlpriv->ips_mode;/* keep org value */
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ } else {
+ rtw_pm_set_ips(padapter, pwrctrlpriv->ips_org_mode);
+
+ rtw_pm_set_lps(padapter, pwrctrlpriv->ips_org_mode);
+ }
+ padapter->bLinkInfoDump = benable;
+}
+
+#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+void rtw_get_raw_rssi_info(void *sel, struct adapter *padapter)
+{
+ u8 isCCKrate, rf_path;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
+
+ DBG_871X_SEL_NL(
+ sel,
+ "RxRate = %s, PWDBALL = %d(%%), rx_pwr_all = %d(dBm)\n",
+ HDATA_RATE(psample_pkt_rssi->data_rate),
+ psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all
+ );
+
+ isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+
+ if (isCCKrate)
+ psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
+
+ for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
+ DBG_871X_SEL_NL(
+ sel,
+ "RF_PATH_%d =>singal_strength:%d(%%), singal_quality:%d(%%)\n",
+ rf_path, psample_pkt_rssi->mimo_singal_strength[rf_path],
+ psample_pkt_rssi->mimo_singal_quality[rf_path]
+ );
+
+ if (!isCCKrate) {
+ DBG_871X_SEL_NL(
+ sel,
+ "\trx_ofdm_pwr:%d(dBm), rx_ofdm_snr:%d(dB)\n",
+ psample_pkt_rssi->ofdm_pwr[rf_path],
+ psample_pkt_rssi->ofdm_snr[rf_path]
+ );
+ }
+ }
+}
+
+void rtw_dump_raw_rssi_info(struct adapter *padapter)
+{
+ u8 isCCKrate, rf_path;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
+ DBG_871X("============ RAW Rx Info dump ===================\n");
+ DBG_871X("RxRate = %s, PWDBALL = %d(%%), rx_pwr_all = %d(dBm)\n",
+ HDATA_RATE(psample_pkt_rssi->data_rate), psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all);
+
+ isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+
+ if (isCCKrate)
+ psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
+
+ for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
+ DBG_871X("RF_PATH_%d =>singal_strength:%d(%%), singal_quality:%d(%%)"
+ , rf_path, psample_pkt_rssi->mimo_singal_strength[rf_path], psample_pkt_rssi->mimo_singal_quality[rf_path]);
+
+ if (!isCCKrate) {
+ printk(", rx_ofdm_pwr:%d(dBm), rx_ofdm_snr:%d(dB)\n",
+ psample_pkt_rssi->ofdm_pwr[rf_path], psample_pkt_rssi->ofdm_snr[rf_path]);
+ } else {
+ printk("\n");
+ }
+ }
+}
+
+void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
+{
+ u8 isCCKrate, rf_path;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+
+ PODM_PHY_INFO_T pPhyInfo = (PODM_PHY_INFO_T)(&pattrib->phy_info);
+ struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
+
+ psample_pkt_rssi->data_rate = pattrib->data_rate;
+ isCCKrate = (pattrib->data_rate <= DESC_RATE11M) ? true : false;
+
+ psample_pkt_rssi->pwdball = pPhyInfo->RxPWDBAll;
+ psample_pkt_rssi->pwr_all = pPhyInfo->RecvSignalPower;
+
+ for (rf_path = 0; rf_path < pHalData->NumTotalRFPath; rf_path++) {
+ psample_pkt_rssi->mimo_singal_strength[rf_path] = pPhyInfo->RxMIMOSignalStrength[rf_path];
+ psample_pkt_rssi->mimo_singal_quality[rf_path] = pPhyInfo->RxMIMOSignalQuality[rf_path];
+ if (!isCCKrate) {
+ psample_pkt_rssi->ofdm_pwr[rf_path] = pPhyInfo->RxPwr[rf_path];
+ psample_pkt_rssi->ofdm_snr[rf_path] = pPhyInfo->RxSNR[rf_path];
+ }
+ }
+}
+#endif
+
+static u32 Array_kfreemap[] = {
+ 0xf8, 0xe,
+ 0xf6, 0xc,
+ 0xf4, 0xa,
+ 0xf2, 0x8,
+ 0xf0, 0x6,
+ 0xf3, 0x4,
+ 0xf5, 0x2,
+ 0xf7, 0x0,
+ 0xf9, 0x0,
+ 0xfc, 0x0,
+};
+
+void rtw_bb_rf_gain_offset(struct adapter *padapter)
+{
+ u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
+ u32 res, i = 0;
+ u32 ArrayLen = sizeof(Array_kfreemap)/sizeof(u32);
+ u32 *Array = Array_kfreemap;
+ u32 v1 = 0, v2 = 0, target = 0;
+ /* DBG_871X("+%s value: 0x%02x+\n", __func__, value); */
+
+ if (value & BIT4) {
+ DBG_871X("Offset RF Gain.\n");
+ DBG_871X("Offset RF Gain. padapter->eeprompriv.EEPROMRFGainVal = 0x%x\n", padapter->eeprompriv.EEPROMRFGainVal);
+ if (padapter->eeprompriv.EEPROMRFGainVal != 0xff) {
+ res = rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
+ res &= 0xfff87fff;
+ DBG_871X("Offset RF Gain. before reg 0x7f = 0x%08x\n", res);
+ /* res &= 0xfff87fff; */
+ for (i = 0; i < ArrayLen; i += 2) {
+ v1 = Array[i];
+ v2 = Array[i+1];
+ if (v1 == padapter->eeprompriv.EEPROMRFGainVal) {
+ DBG_871X("Offset RF Gain. got v1 = 0x%x , v2 = 0x%x\n", v1, v2);
+ target = v2;
+ break;
+ }
+ }
+ DBG_871X("padapter->eeprompriv.EEPROMRFGainVal = 0x%x , Gain offset Target Value = 0x%x\n", padapter->eeprompriv.EEPROMRFGainVal, target);
+ PHY_SetRFReg(padapter, RF_PATH_A, REG_RF_BB_GAIN_OFFSET, BIT18|BIT17|BIT16|BIT15, target);
+
+ /* res |= (padapter->eeprompriv.EEPROMRFGainVal & 0x0f)<< 15; */
+ /* rtw_hal_write_rfreg(padapter, RF_PATH_A, REG_RF_BB_GAIN_OFFSET, RF_GAIN_OFFSET_MASK, res); */
+ res = rtw_hal_read_rfreg(padapter, RF_PATH_A, 0x7f, 0xffffffff);
+ DBG_871X("Offset RF Gain. After reg 0x7f = 0x%08x\n", res);
+ } else
+ DBG_871X("Offset RF Gain. padapter->eeprompriv.EEPROMRFGainVal = 0x%x != 0xff, didn't run Kfree\n", padapter->eeprompriv.EEPROMRFGainVal);
+ } else
+ DBG_871X("Using the default RF gain.\n");
+}
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
new file mode 100644
index 000000000000..566b6f0997da
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -0,0 +1,3286 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_COM_PHYCFG_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+
+u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
+ u8 TxNum, enum RATE_SECTION RateSection)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 value = 0;
+
+ if (RfPath > ODM_RF_PATH_D) {
+ DBG_871X("Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", RfPath);
+ return 0;
+ }
+
+ if (Band == BAND_ON_2_4G) {
+ switch (RateSection) {
+ case CCK:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0];
+ break;
+ case OFDM:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1];
+ break;
+ case HT_MCS0_MCS7:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2];
+ break;
+ case HT_MCS8_MCS15:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3];
+ break;
+ case HT_MCS16_MCS23:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4];
+ break;
+ case HT_MCS24_MCS31:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][6];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][7];
+ break;
+ case VHT_3SSMCS0_3SSMCS9:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][8];
+ break;
+ case VHT_4SSMCS0_4SSMCS9:
+ value = pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][9];
+ break;
+ default:
+ DBG_871X("Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ RateSection, RfPath, TxNum);
+ break;
+
+ };
+ } else if (Band == BAND_ON_5G) {
+ switch (RateSection) {
+ case OFDM:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][0];
+ break;
+ case HT_MCS0_MCS7:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][1];
+ break;
+ case HT_MCS8_MCS15:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][2];
+ break;
+ case HT_MCS16_MCS23:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][3];
+ break;
+ case HT_MCS24_MCS31:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][4];
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][5];
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][6];
+ break;
+ case VHT_3SSMCS0_3SSMCS9:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][7];
+ break;
+ case VHT_4SSMCS0_4SSMCS9:
+ value = pHalData->TxPwrByRateBase5G[RfPath][TxNum][8];
+ break;
+ default:
+ DBG_871X("Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ RateSection, RfPath, TxNum);
+ break;
+ };
+ } else
+ DBG_871X("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", Band);
+
+ return value;
+}
+
+static void
+phy_SetTxPowerByRateBase(
+ struct adapter *Adapter,
+ u8 Band,
+ u8 RfPath,
+ enum RATE_SECTION RateSection,
+ u8 TxNum,
+ u8 Value
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ if (RfPath > ODM_RF_PATH_D) {
+ DBG_871X("Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", RfPath);
+ return;
+ }
+
+ if (Band == BAND_ON_2_4G) {
+ switch (RateSection) {
+ case CCK:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][0] = Value;
+ break;
+ case OFDM:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][1] = Value;
+ break;
+ case HT_MCS0_MCS7:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][2] = Value;
+ break;
+ case HT_MCS8_MCS15:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][3] = Value;
+ break;
+ case HT_MCS16_MCS23:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][4] = Value;
+ break;
+ case HT_MCS24_MCS31:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][5] = Value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][6] = Value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][7] = Value;
+ break;
+ case VHT_3SSMCS0_3SSMCS9:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][8] = Value;
+ break;
+ case VHT_4SSMCS0_4SSMCS9:
+ pHalData->TxPwrByRateBase2_4G[RfPath][TxNum][9] = Value;
+ break;
+ default:
+ DBG_871X("Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in phy_SetTxPowerByRateBase()\n",
+ RateSection, RfPath, TxNum);
+ break;
+ };
+ } else if (Band == BAND_ON_5G) {
+ switch (RateSection) {
+ case OFDM:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][0] = Value;
+ break;
+ case HT_MCS0_MCS7:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][1] = Value;
+ break;
+ case HT_MCS8_MCS15:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][2] = Value;
+ break;
+ case HT_MCS16_MCS23:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][3] = Value;
+ break;
+ case HT_MCS24_MCS31:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][4] = Value;
+ break;
+ case VHT_1SSMCS0_1SSMCS9:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][5] = Value;
+ break;
+ case VHT_2SSMCS0_2SSMCS9:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][6] = Value;
+ break;
+ case VHT_3SSMCS0_3SSMCS9:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][7] = Value;
+ break;
+ case VHT_4SSMCS0_4SSMCS9:
+ pHalData->TxPwrByRateBase5G[RfPath][TxNum][8] = Value;
+ break;
+ default:
+ DBG_871X("Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in phy_SetTxPowerByRateBase()\n",
+ RateSection, RfPath, TxNum);
+ break;
+ };
+ } else
+ DBG_871X("Invalid Band %d in phy_SetTxPowerByRateBase()\n", Band);
+}
+
+static void
+phy_StoreTxPowerByRateBase(
+struct adapter *padapter
+ )
+{
+ u8 path, base;
+
+ /* DBG_871X("===>%s\n", __func__); */
+
+ for (path = ODM_RF_PATH_A; path <= ODM_RF_PATH_B; ++path) {
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_11M);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, CCK, RF_1TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 1Tx CCK = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_54M);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, OFDM, RF_1TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 1Tx OFDM = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 1Tx MCS0-7 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_2TX, MGN_MCS15);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 2Tx MCS8-15 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_3TX, MGN_MCS23);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, HT_MCS16_MCS23, RF_3TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 3Tx MCS16-23 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_1TX, MGN_VHT1SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 1Tx VHT1SS = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_2TX, MGN_VHT2SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 2Tx VHT2SS = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, path, RF_3TX, MGN_VHT3SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_2_4G, path, VHT_3SSMCS0_3SSMCS9, RF_3TX, base);
+ /* DBG_871X("Power index base of 2.4G path %d 3Tx VHT3SS = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_54M);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, OFDM, RF_1TX, base);
+ /* DBG_871X("Power index base of 5G path %d 1Tx OFDM = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base);
+ /* DBG_871X("Power index base of 5G path %d 1Tx MCS0~7 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_2TX, MGN_MCS15);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base);
+ /* DBG_871X("Power index base of 5G path %d 2Tx MCS8~15 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_3TX, MGN_MCS23);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, HT_MCS16_MCS23, RF_3TX, base);
+ /* DBG_871X("Power index base of 5G path %d 3Tx MCS16~23 = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_1TX, MGN_VHT1SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base);
+ /* DBG_871X("Power index base of 5G path %d 1Tx VHT1SS = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_2TX, MGN_VHT2SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base);
+ /* DBG_871X("Power index base of 5G path %d 2Tx VHT2SS = > 0x%x\n", path, base); */
+
+ base = PHY_GetTxPowerByRate(padapter, BAND_ON_5G, path, RF_3TX, MGN_VHT2SS_MCS7);
+ phy_SetTxPowerByRateBase(padapter, BAND_ON_5G, path, VHT_3SSMCS0_3SSMCS9, RF_3TX, base);
+ /* DBG_871X("Power index base of 5G path %d 3Tx VHT3SS = > 0x%x\n", path, base); */
+ }
+
+ /* DBG_871X("<===%s\n", __func__); */
+}
+
+u8 PHY_GetRateSectionIndexOfTxPowerByRate(
+ struct adapter *padapter, u32 RegAddr, u32 BitMask
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ u8 index = 0;
+
+ if (pDM_Odm->PhyRegPgVersion == 0) {
+ switch (RegAddr) {
+ case rTxAGC_A_Rate18_06:
+ index = 0;
+ break;
+ case rTxAGC_A_Rate54_24:
+ index = 1;
+ break;
+ case rTxAGC_A_CCK1_Mcs32:
+ index = 6;
+ break;
+ case rTxAGC_B_CCK11_A_CCK2_11:
+ if (BitMask == bMaskH3Bytes)
+ index = 7;
+ else if (BitMask == 0x000000ff)
+ index = 15;
+ break;
+
+ case rTxAGC_A_Mcs03_Mcs00:
+ index = 2;
+ break;
+ case rTxAGC_A_Mcs07_Mcs04:
+ index = 3;
+ break;
+ case rTxAGC_A_Mcs11_Mcs08:
+ index = 4;
+ break;
+ case rTxAGC_A_Mcs15_Mcs12:
+ index = 5;
+ break;
+ case rTxAGC_B_Rate18_06:
+ index = 8;
+ break;
+ case rTxAGC_B_Rate54_24:
+ index = 9;
+ break;
+ case rTxAGC_B_CCK1_55_Mcs32:
+ index = 14;
+ break;
+ case rTxAGC_B_Mcs03_Mcs00:
+ index = 10;
+ break;
+ case rTxAGC_B_Mcs07_Mcs04:
+ index = 11;
+ break;
+ case rTxAGC_B_Mcs11_Mcs08:
+ index = 12;
+ break;
+ case rTxAGC_B_Mcs15_Mcs12:
+ index = 13;
+ break;
+ default:
+ DBG_871X("Invalid RegAddr 0x3%x in PHY_GetRateSectionIndexOfTxPowerByRate()", RegAddr);
+ break;
+ };
+ }
+
+ return index;
+}
+
+void
+PHY_GetRateValuesOfTxPowerByRate(
+ struct adapter *padapter,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Value,
+ u8 *RateIndex,
+ s8 *PwrByRateVal,
+ u8 *RateNum
+)
+{
+ u8 i = 0;
+
+ switch (RegAddr) {
+ case rTxAGC_A_Rate18_06:
+ case rTxAGC_B_Rate18_06:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_6M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_9M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_12M);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_18M);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case rTxAGC_A_Rate54_24:
+ case rTxAGC_B_Rate54_24:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_24M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_36M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_48M);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_54M);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case rTxAGC_A_CCK1_Mcs32:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_1M);
+ PwrByRateVal[0] = (s8) ((((Value >> (8 + 4)) & 0xF)) * 10 +
+ ((Value >> 8) & 0xF));
+ *RateNum = 1;
+ break;
+
+ case rTxAGC_B_CCK11_A_CCK2_11:
+ if (BitMask == 0xffffff00) {
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_2M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_5_5M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_11M);
+ for (i = 1; i < 4; ++i) {
+ PwrByRateVal[i - 1] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 3;
+ } else if (BitMask == 0x000000ff) {
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_11M);
+ PwrByRateVal[0] = (s8) ((((Value >> 4) & 0xF)) * 10 + (Value & 0xF));
+ *RateNum = 1;
+ }
+ break;
+
+ case rTxAGC_A_Mcs03_Mcs00:
+ case rTxAGC_B_Mcs03_Mcs00:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS0);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS1);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS2);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS3);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case rTxAGC_A_Mcs07_Mcs04:
+ case rTxAGC_B_Mcs07_Mcs04:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS4);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS5);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS6);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS7);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case rTxAGC_A_Mcs11_Mcs08:
+ case rTxAGC_B_Mcs11_Mcs08:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS8);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS9);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS10);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS11);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case rTxAGC_A_Mcs15_Mcs12:
+ case rTxAGC_B_Mcs15_Mcs12:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS12);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS13);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS14);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS15);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+
+ break;
+
+ case rTxAGC_B_CCK1_55_Mcs32:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_1M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_2M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_5_5M);
+ for (i = 1; i < 4; ++i) {
+ PwrByRateVal[i - 1] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 3;
+ break;
+
+ case 0xC20:
+ case 0xE20:
+ case 0x1820:
+ case 0x1a20:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_1M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_2M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_5_5M);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_11M);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC24:
+ case 0xE24:
+ case 0x1824:
+ case 0x1a24:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_6M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_9M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_12M);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_18M);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC28:
+ case 0xE28:
+ case 0x1828:
+ case 0x1a28:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_24M);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_36M);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_48M);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_54M);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC2C:
+ case 0xE2C:
+ case 0x182C:
+ case 0x1a2C:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS0);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS1);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS2);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS3);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC30:
+ case 0xE30:
+ case 0x1830:
+ case 0x1a30:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS4);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS5);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS6);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS7);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC34:
+ case 0xE34:
+ case 0x1834:
+ case 0x1a34:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS8);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS9);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS10);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS11);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC38:
+ case 0xE38:
+ case 0x1838:
+ case 0x1a38:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS12);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS13);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS14);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS15);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC3C:
+ case 0xE3C:
+ case 0x183C:
+ case 0x1a3C:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS0);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS1);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS2);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS3);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC40:
+ case 0xE40:
+ case 0x1840:
+ case 0x1a40:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS4);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS5);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS6);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS7);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC44:
+ case 0xE44:
+ case 0x1844:
+ case 0x1a44:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS8);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT1SS_MCS9);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS0);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS1);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC48:
+ case 0xE48:
+ case 0x1848:
+ case 0x1a48:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS2);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS3);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS4);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS5);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xC4C:
+ case 0xE4C:
+ case 0x184C:
+ case 0x1a4C:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS6);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS7);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS8);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS9);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xCD8:
+ case 0xED8:
+ case 0x18D8:
+ case 0x1aD8:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS16);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS17);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS18);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS19);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xCDC:
+ case 0xEDC:
+ case 0x18DC:
+ case 0x1aDC:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS20);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS21);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS22);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_MCS23);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xCE0:
+ case 0xEE0:
+ case 0x18E0:
+ case 0x1aE0:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS0);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS1);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS2);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS3);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xCE4:
+ case 0xEE4:
+ case 0x18E4:
+ case 0x1aE4:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS4);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS5);
+ RateIndex[2] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS6);
+ RateIndex[3] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS7);
+ for (i = 0; i < 4; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ case 0xCE8:
+ case 0xEE8:
+ case 0x18E8:
+ case 0x1aE8:
+ RateIndex[0] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS8);
+ RateIndex[1] = PHY_GetRateIndexOfTxPowerByRate(MGN_VHT3SS_MCS9);
+ for (i = 0; i < 2; ++i) {
+ PwrByRateVal[i] = (s8) ((((Value >> (i * 8 + 4)) & 0xF)) * 10 +
+ ((Value >> (i * 8)) & 0xF));
+ }
+ *RateNum = 4;
+ break;
+
+ default:
+ DBG_871X("Invalid RegAddr 0x%x in %s()\n", RegAddr, __func__);
+ break;
+ };
+}
+
+static void PHY_StoreTxPowerByRateNew(
+ struct adapter *padapter,
+ u32 Band,
+ u32 RfPath,
+ u32 TxNum,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 i = 0, rateIndex[4] = {0}, rateNum = 0;
+ s8 PwrByRateVal[4] = {0};
+
+ PHY_GetRateValuesOfTxPowerByRate(padapter, RegAddr, BitMask, Data, rateIndex, PwrByRateVal, &rateNum);
+
+ if (Band != BAND_ON_2_4G && Band != BAND_ON_5G) {
+ DBG_871X("Invalid Band %d\n", Band);
+ return;
+ }
+
+ if (RfPath > ODM_RF_PATH_D) {
+ DBG_871X("Invalid RfPath %d\n", RfPath);
+ return;
+ }
+
+ if (TxNum > ODM_RF_PATH_D) {
+ DBG_871X("Invalid TxNum %d\n", TxNum);
+ return;
+ }
+
+ for (i = 0; i < rateNum; ++i) {
+ if (rateIndex[i] == PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS0) ||
+ rateIndex[i] == PHY_GetRateIndexOfTxPowerByRate(MGN_VHT2SS_MCS1))
+ TxNum = RF_2TX;
+
+ pHalData->TxPwrByRateOffset[Band][RfPath][TxNum][rateIndex[i]] = PwrByRateVal[i];
+ }
+}
+
+static void PHY_StoreTxPowerByRateOld(
+ struct adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 index = PHY_GetRateSectionIndexOfTxPowerByRate(padapter, RegAddr, BitMask);
+
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][index] = Data;
+ /* DBG_871X("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", pHalData->pwrGroupCnt, */
+ /* pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0]); */
+}
+
+void PHY_InitTxPowerByRate(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 band, rfPath, TxNum, rate;
+
+ for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
+ for (rfPath = 0; rfPath < TX_PWR_BY_RATE_NUM_RF; ++rfPath)
+ for (TxNum = 0; TxNum < TX_PWR_BY_RATE_NUM_RF; ++TxNum)
+ for (rate = 0; rate < TX_PWR_BY_RATE_NUM_RATE; ++rate)
+ pHalData->TxPwrByRateOffset[band][rfPath][TxNum][rate] = 0;
+}
+
+void PHY_StoreTxPowerByRate(
+ struct adapter *padapter,
+ u32 Band,
+ u32 RfPath,
+ u32 TxNum,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+
+ if (pDM_Odm->PhyRegPgVersion > 0)
+ PHY_StoreTxPowerByRateNew(padapter, Band, RfPath, TxNum, RegAddr, BitMask, Data);
+ else if (pDM_Odm->PhyRegPgVersion == 0) {
+ PHY_StoreTxPowerByRateOld(padapter, RegAddr, BitMask, Data);
+
+ if (RegAddr == rTxAGC_A_Mcs15_Mcs12 && pHalData->rf_type == RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ else if (RegAddr == rTxAGC_B_Mcs15_Mcs12 && pHalData->rf_type != RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ } else
+ DBG_871X("Invalid PHY_REG_PG.txt version %d\n", pDM_Odm->PhyRegPgVersion);
+
+}
+
+static void
+phy_ConvertTxPowerByRateInDbmToRelativeValues(
+struct adapter *padapter
+ )
+{
+ u8 base = 0, i = 0, value = 0, band = 0, path = 0, txNum = 0;
+ u8 cckRates[4] = {
+ MGN_1M, MGN_2M, MGN_5_5M, MGN_11M
+ };
+ u8 ofdmRates[8] = {
+ MGN_6M, MGN_9M, MGN_12M, MGN_18M, MGN_24M, MGN_36M, MGN_48M, MGN_54M
+ };
+ u8 mcs0_7Rates[8] = {
+ MGN_MCS0, MGN_MCS1, MGN_MCS2, MGN_MCS3, MGN_MCS4, MGN_MCS5, MGN_MCS6, MGN_MCS7
+ };
+ u8 mcs8_15Rates[8] = {
+ MGN_MCS8, MGN_MCS9, MGN_MCS10, MGN_MCS11, MGN_MCS12, MGN_MCS13, MGN_MCS14, MGN_MCS15
+ };
+ u8 mcs16_23Rates[8] = {
+ MGN_MCS16, MGN_MCS17, MGN_MCS18, MGN_MCS19, MGN_MCS20, MGN_MCS21, MGN_MCS22, MGN_MCS23
+ };
+ u8 vht1ssRates[10] = {
+ MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
+ MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9
+ };
+ u8 vht2ssRates[10] = {
+ MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
+ MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9
+ };
+ u8 vht3ssRates[10] = {
+ MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
+ MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9
+ };
+
+ /* DBG_871X("===>PHY_ConvertTxPowerByRateInDbmToRelativeValues()\n"); */
+
+ for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band) {
+ for (path = ODM_RF_PATH_A; path <= ODM_RF_PATH_D; ++path) {
+ for (txNum = RF_1TX; txNum < RF_MAX_TX_NUM; ++txNum) {
+ /* CCK */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_11M);
+ for (i = 0; i < sizeof(cckRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, cckRates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, cckRates[i], value - base);
+ }
+
+ /* OFDM */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_54M);
+ for (i = 0; i < sizeof(ofdmRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, ofdmRates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, ofdmRates[i], value - base);
+ }
+
+ /* HT MCS0~7 */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS7);
+ for (i = 0; i < sizeof(mcs0_7Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs0_7Rates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs0_7Rates[i], value - base);
+ }
+
+ /* HT MCS8~15 */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS15);
+ for (i = 0; i < sizeof(mcs8_15Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs8_15Rates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs8_15Rates[i], value - base);
+ }
+
+ /* HT MCS16~23 */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_MCS23);
+ for (i = 0; i < sizeof(mcs16_23Rates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, mcs16_23Rates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, mcs16_23Rates[i], value - base);
+ }
+
+ /* VHT 1SS */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT1SS_MCS7);
+ for (i = 0; i < sizeof(vht1ssRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht1ssRates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, vht1ssRates[i], value - base);
+ }
+
+ /* VHT 2SS */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT2SS_MCS7);
+ for (i = 0; i < sizeof(vht2ssRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht2ssRates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, vht2ssRates[i], value - base);
+ }
+
+ /* VHT 3SS */
+ base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_VHT3SS_MCS7);
+ for (i = 0; i < sizeof(vht3ssRates); ++i) {
+ value = PHY_GetTxPowerByRate(padapter, band, path, txNum, vht3ssRates[i]);
+ PHY_SetTxPowerByRate(padapter, band, path, txNum, vht3ssRates[i], value - base);
+ }
+ }
+ }
+ }
+
+ /* DBG_871X("<===PHY_ConvertTxPowerByRateInDbmToRelativeValues()\n"); */
+}
+
+/*
+ * This function must be called if the value in the PHY_REG_PG.txt(or header)
+ * is exact dBm values
+ */
+void PHY_TxPowerByRateConfiguration(struct adapter *padapter)
+{
+ phy_StoreTxPowerByRateBase(padapter);
+ phy_ConvertTxPowerByRateInDbmToRelativeValues(padapter);
+}
+
+void PHY_SetTxPowerIndexByRateSection(
+ struct adapter *padapter, u8 RFPath, u8 Channel, u8 RateSection
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ if (RateSection == CCK) {
+ u8 cckRates[] = {MGN_1M, MGN_2M, MGN_5_5M, MGN_11M};
+ if (pHalData->CurrentBandType == BAND_ON_2_4G)
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ cckRates, sizeof(cckRates)/sizeof(u8));
+
+ } else if (RateSection == OFDM) {
+ u8 ofdmRates[] = {MGN_6M, MGN_9M, MGN_12M, MGN_18M, MGN_24M, MGN_36M, MGN_48M, MGN_54M};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ ofdmRates, sizeof(ofdmRates)/sizeof(u8));
+
+ } else if (RateSection == HT_MCS0_MCS7) {
+ u8 htRates1T[] = {MGN_MCS0, MGN_MCS1, MGN_MCS2, MGN_MCS3, MGN_MCS4, MGN_MCS5, MGN_MCS6, MGN_MCS7};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ htRates1T, sizeof(htRates1T)/sizeof(u8));
+
+ } else if (RateSection == HT_MCS8_MCS15) {
+ u8 htRates2T[] = {MGN_MCS8, MGN_MCS9, MGN_MCS10, MGN_MCS11, MGN_MCS12, MGN_MCS13, MGN_MCS14, MGN_MCS15};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ htRates2T, sizeof(htRates2T)/sizeof(u8));
+
+ } else if (RateSection == HT_MCS16_MCS23) {
+ u8 htRates3T[] = {MGN_MCS16, MGN_MCS17, MGN_MCS18, MGN_MCS19, MGN_MCS20, MGN_MCS21, MGN_MCS22, MGN_MCS23};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ htRates3T, sizeof(htRates3T)/sizeof(u8));
+
+ } else if (RateSection == HT_MCS24_MCS31) {
+ u8 htRates4T[] = {MGN_MCS24, MGN_MCS25, MGN_MCS26, MGN_MCS27, MGN_MCS28, MGN_MCS29, MGN_MCS30, MGN_MCS31};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ htRates4T, sizeof(htRates4T)/sizeof(u8));
+
+ } else if (RateSection == VHT_1SSMCS0_1SSMCS9) {
+ u8 vhtRates1T[] = {MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
+ MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9};
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ vhtRates1T, sizeof(vhtRates1T)/sizeof(u8));
+
+ } else if (RateSection == VHT_2SSMCS0_2SSMCS9) {
+ u8 vhtRates2T[] = {MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
+ MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9};
+
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ vhtRates2T, sizeof(vhtRates2T)/sizeof(u8));
+ } else if (RateSection == VHT_3SSMCS0_3SSMCS9) {
+ u8 vhtRates3T[] = {MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
+ MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9};
+
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ vhtRates3T, sizeof(vhtRates3T)/sizeof(u8));
+ } else if (RateSection == VHT_4SSMCS0_4SSMCS9) {
+ u8 vhtRates4T[] = {MGN_VHT4SS_MCS0, MGN_VHT4SS_MCS1, MGN_VHT4SS_MCS2, MGN_VHT4SS_MCS3, MGN_VHT4SS_MCS4,
+ MGN_VHT4SS_MCS5, MGN_VHT4SS_MCS6, MGN_VHT4SS_MCS7, MGN_VHT4SS_MCS8, MGN_VHT4SS_MCS9};
+
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
+ vhtRates4T, sizeof(vhtRates4T)/sizeof(u8));
+ } else
+ DBG_871X("Invalid RateSection %d in %s", RateSection, __func__);
+}
+
+static bool phy_GetChnlIndex(u8 Channel, u8 *ChannelIdx)
+{
+ u8 channel5G[CHANNEL_MAX_NUMBER_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161,
+ 163, 165, 167, 168, 169, 171, 173, 175, 177
+ };
+ u8 i = 0;
+ bool bIn24G = true;
+
+ if (Channel <= 14) {
+ bIn24G = true;
+ *ChannelIdx = Channel-1;
+ } else {
+ bIn24G = false;
+
+ for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ if (channel5G[i] == Channel) {
+ *ChannelIdx = i;
+ return bIn24G;
+ }
+ }
+ }
+
+ return bIn24G;
+}
+
+u8 PHY_GetTxPowerIndexBase(
+ struct adapter *padapter,
+ u8 RFPath,
+ u8 Rate,
+ enum CHANNEL_WIDTH BandWidth,
+ u8 Channel,
+ bool *bIn24G
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 i = 0; /* default set to 1S */
+ u8 txPower = 0;
+ u8 chnlIdx = (Channel-1);
+
+ if (HAL_IsLegalChannel(padapter, Channel) == false) {
+ chnlIdx = 0;
+ DBG_871X("Illegal channel!!\n");
+ }
+
+ *bIn24G = phy_GetChnlIndex(Channel, &chnlIdx);
+
+ /* DBG_871X("[%s] Channel Index: %d\n", (*bIn24G?"2.4G":"5G"), chnlIdx); */
+
+ if (*bIn24G) { /* 3 ============================== 2.4 G ============================== */
+ if (IS_CCK_RATE(Rate))
+ txPower = pHalData->Index24G_CCK_Base[RFPath][chnlIdx];
+ else if (MGN_6M <= Rate)
+ txPower = pHalData->Index24G_BW40_Base[RFPath][chnlIdx];
+ else
+ DBG_871X("PHY_GetTxPowerIndexBase: INVALID Rate.\n");
+
+ /* DBG_871X("Base Tx power(RF-%c, Rate #%d, Channel Index %d) = 0x%X\n", */
+ /* ((RFPath == 0)?'A':'B'), Rate, chnlIdx, txPower); */
+
+ /* OFDM-1T */
+ if ((MGN_6M <= Rate && Rate <= MGN_54M) && !IS_CCK_RATE(Rate)) {
+ txPower += pHalData->OFDM_24G_Diff[RFPath][TX_1S];
+ /* DBG_871X("+PowerDiff 2.4G (RF-%c): (OFDM-1T) = (%d)\n", ((RFPath == 0)?'A':'B'), pHalData->OFDM_24G_Diff[RFPath][TX_1S]); */
+ }
+ if (BandWidth == CHANNEL_WIDTH_20) { /* BW20-1S, BW20-2S */
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_24G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 2.4G (RF-%c): (BW20-1S, BW20-2S, BW20-3S, BW20-4S) = (%d, %d, %d, %d)\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW20_24G_Diff[RFPath][TX_1S], pHalData->BW20_24G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW20_24G_Diff[RFPath][TX_3S], pHalData->BW20_24G_Diff[RFPath][TX_4S]); */
+ } else if (BandWidth == CHANNEL_WIDTH_40) { /* BW40-1S, BW40-2S */
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 2.4G (RF-%c): (BW40-1S, BW40-2S, BW40-3S, BW40-4S) = (%d, %d, %d, %d)\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW40_24G_Diff[RFPath][TX_1S], pHalData->BW40_24G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW40_24G_Diff[RFPath][TX_3S], pHalData->BW40_24G_Diff[RFPath][TX_4S]); */
+ }
+ /* Willis suggest adopt BW 40M power index while in BW 80 mode */
+ else if (BandWidth == CHANNEL_WIDTH_80) {
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_24G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 2.4G (RF-%c): (BW40-1S, BW40-2S, BW40-3S, BW40-4T) = (%d, %d, %d, %d) P.S. Current is in BW 80MHz\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW40_24G_Diff[RFPath][TX_1S], pHalData->BW40_24G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW40_24G_Diff[RFPath][TX_3S], pHalData->BW40_24G_Diff[RFPath][TX_4S]); */
+ }
+ } else {/* 3 ============================== 5 G ============================== */
+ if (MGN_6M <= Rate)
+ txPower = pHalData->Index5G_BW40_Base[RFPath][chnlIdx];
+ else
+ DBG_871X("===> mpt_ProQueryCalTxPower_Jaguar: INVALID Rate.\n");
+
+ /* DBG_871X("Base Tx power(RF-%c, Rate #%d, Channel Index %d) = 0x%X\n", */
+ /* ((RFPath == 0)?'A':'B'), Rate, chnlIdx, txPower); */
+
+ /* OFDM-1T */
+ if ((MGN_6M <= Rate && Rate <= MGN_54M) && !IS_CCK_RATE(Rate)) {
+ txPower += pHalData->OFDM_5G_Diff[RFPath][TX_1S];
+ /* DBG_871X("+PowerDiff 5G (RF-%c): (OFDM-1T) = (%d)\n", ((RFPath == 0)?'A':'B'), pHalData->OFDM_5G_Diff[RFPath][TX_1S]); */
+ }
+
+ /* BW20-1S, BW20-2S */
+ if (BandWidth == CHANNEL_WIDTH_20) {
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_5G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_5G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_5G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW20_5G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 5G (RF-%c): (BW20-1S, BW20-2S, BW20-3S, BW20-4S) = (%d, %d, %d, %d)\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW20_5G_Diff[RFPath][TX_1S], pHalData->BW20_5G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW20_5G_Diff[RFPath][TX_3S], pHalData->BW20_5G_Diff[RFPath][TX_4S]); */
+ } else if (BandWidth == CHANNEL_WIDTH_40) { /* BW40-1S, BW40-2S */
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_5G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_5G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_5G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS24 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW40_5G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 5G(RF-%c): (BW40-1S, BW40-2S) = (%d, %d, %d, %d)\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW40_5G_Diff[RFPath][TX_1S], pHalData->BW40_5G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW40_5G_Diff[RFPath][TX_3S], pHalData->BW40_5G_Diff[RFPath][TX_4S]); */
+ } else if (BandWidth == CHANNEL_WIDTH_80) { /* BW80-1S, BW80-2S */
+ /* <20121220, Kordan> Get the index of array "Index5G_BW80_Base". */
+ u8 channel5G_80M[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
+ for (i = 0; i < sizeof(channel5G_80M)/sizeof(u8); ++i)
+ if (channel5G_80M[i] == Channel)
+ chnlIdx = i;
+
+ txPower = pHalData->Index5G_BW80_Base[RFPath][chnlIdx];
+
+ if ((MGN_MCS0 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT1SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += + pHalData->BW80_5G_Diff[RFPath][TX_1S];
+ if ((MGN_MCS8 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT2SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW80_5G_Diff[RFPath][TX_2S];
+ if ((MGN_MCS16 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT3SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW80_5G_Diff[RFPath][TX_3S];
+ if ((MGN_MCS23 <= Rate && Rate <= MGN_MCS31) || (MGN_VHT4SS_MCS0 <= Rate && Rate <= MGN_VHT4SS_MCS9))
+ txPower += pHalData->BW80_5G_Diff[RFPath][TX_4S];
+
+ /* DBG_871X("+PowerDiff 5G(RF-%c): (BW80-1S, BW80-2S, BW80-3S, BW80-4S) = (%d, %d, %d, %d)\n", ((RFPath == 0)?'A':(RFPath == 1)?'B':(RFPath ==2)?'C':'D'), */
+ /* pHalData->BW80_5G_Diff[RFPath][TX_1S], pHalData->BW80_5G_Diff[RFPath][TX_2S], */
+ /* pHalData->BW80_5G_Diff[RFPath][TX_3S], pHalData->BW80_5G_Diff[RFPath][TX_4S]); */
+ }
+ }
+
+ return txPower;
+}
+
+s8 PHY_GetTxPowerTrackingOffset(struct adapter *padapter, u8 RFPath, u8 Rate)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ s8 offset = 0;
+
+ if (pDM_Odm->RFCalibrateInfo.TxPowerTrackControl == false)
+ return offset;
+
+ if ((Rate == MGN_1M) || (Rate == MGN_2M) || (Rate == MGN_5_5M) || (Rate == MGN_11M)) {
+ offset = pDM_Odm->Remnant_CCKSwingIdx;
+ /* DBG_871X("+Remnant_CCKSwingIdx = 0x%x\n", RFPath, Rate, pDM_Odm->Remnant_CCKSwingIdx); */
+ } else {
+ offset = pDM_Odm->Remnant_OFDMSwingIdx[RFPath];
+ /* DBG_871X("+Remanant_OFDMSwingIdx[RFPath %u][Rate 0x%x] = 0x%x\n", RFPath, Rate, pDM_Odm->Remnant_OFDMSwingIdx[RFPath]); */
+
+ }
+
+ return offset;
+}
+
+u8 PHY_GetRateIndexOfTxPowerByRate(u8 Rate)
+{
+ u8 index = 0;
+ switch (Rate) {
+ case MGN_1M:
+ index = 0;
+ break;
+ case MGN_2M:
+ index = 1;
+ break;
+ case MGN_5_5M:
+ index = 2;
+ break;
+ case MGN_11M:
+ index = 3;
+ break;
+ case MGN_6M:
+ index = 4;
+ break;
+ case MGN_9M:
+ index = 5;
+ break;
+ case MGN_12M:
+ index = 6;
+ break;
+ case MGN_18M:
+ index = 7;
+ break;
+ case MGN_24M:
+ index = 8;
+ break;
+ case MGN_36M:
+ index = 9;
+ break;
+ case MGN_48M:
+ index = 10;
+ break;
+ case MGN_54M:
+ index = 11;
+ break;
+ case MGN_MCS0:
+ index = 12;
+ break;
+ case MGN_MCS1:
+ index = 13;
+ break;
+ case MGN_MCS2:
+ index = 14;
+ break;
+ case MGN_MCS3:
+ index = 15;
+ break;
+ case MGN_MCS4:
+ index = 16;
+ break;
+ case MGN_MCS5:
+ index = 17;
+ break;
+ case MGN_MCS6:
+ index = 18;
+ break;
+ case MGN_MCS7:
+ index = 19;
+ break;
+ case MGN_MCS8:
+ index = 20;
+ break;
+ case MGN_MCS9:
+ index = 21;
+ break;
+ case MGN_MCS10:
+ index = 22;
+ break;
+ case MGN_MCS11:
+ index = 23;
+ break;
+ case MGN_MCS12:
+ index = 24;
+ break;
+ case MGN_MCS13:
+ index = 25;
+ break;
+ case MGN_MCS14:
+ index = 26;
+ break;
+ case MGN_MCS15:
+ index = 27;
+ break;
+ case MGN_MCS16:
+ index = 28;
+ break;
+ case MGN_MCS17:
+ index = 29;
+ break;
+ case MGN_MCS18:
+ index = 30;
+ break;
+ case MGN_MCS19:
+ index = 31;
+ break;
+ case MGN_MCS20:
+ index = 32;
+ break;
+ case MGN_MCS21:
+ index = 33;
+ break;
+ case MGN_MCS22:
+ index = 34;
+ break;
+ case MGN_MCS23:
+ index = 35;
+ break;
+ case MGN_MCS24:
+ index = 36;
+ break;
+ case MGN_MCS25:
+ index = 37;
+ break;
+ case MGN_MCS26:
+ index = 38;
+ break;
+ case MGN_MCS27:
+ index = 39;
+ break;
+ case MGN_MCS28:
+ index = 40;
+ break;
+ case MGN_MCS29:
+ index = 41;
+ break;
+ case MGN_MCS30:
+ index = 42;
+ break;
+ case MGN_MCS31:
+ index = 43;
+ break;
+ case MGN_VHT1SS_MCS0:
+ index = 44;
+ break;
+ case MGN_VHT1SS_MCS1:
+ index = 45;
+ break;
+ case MGN_VHT1SS_MCS2:
+ index = 46;
+ break;
+ case MGN_VHT1SS_MCS3:
+ index = 47;
+ break;
+ case MGN_VHT1SS_MCS4:
+ index = 48;
+ break;
+ case MGN_VHT1SS_MCS5:
+ index = 49;
+ break;
+ case MGN_VHT1SS_MCS6:
+ index = 50;
+ break;
+ case MGN_VHT1SS_MCS7:
+ index = 51;
+ break;
+ case MGN_VHT1SS_MCS8:
+ index = 52;
+ break;
+ case MGN_VHT1SS_MCS9:
+ index = 53;
+ break;
+ case MGN_VHT2SS_MCS0:
+ index = 54;
+ break;
+ case MGN_VHT2SS_MCS1:
+ index = 55;
+ break;
+ case MGN_VHT2SS_MCS2:
+ index = 56;
+ break;
+ case MGN_VHT2SS_MCS3:
+ index = 57;
+ break;
+ case MGN_VHT2SS_MCS4:
+ index = 58;
+ break;
+ case MGN_VHT2SS_MCS5:
+ index = 59;
+ break;
+ case MGN_VHT2SS_MCS6:
+ index = 60;
+ break;
+ case MGN_VHT2SS_MCS7:
+ index = 61;
+ break;
+ case MGN_VHT2SS_MCS8:
+ index = 62;
+ break;
+ case MGN_VHT2SS_MCS9:
+ index = 63;
+ break;
+ case MGN_VHT3SS_MCS0:
+ index = 64;
+ break;
+ case MGN_VHT3SS_MCS1:
+ index = 65;
+ break;
+ case MGN_VHT3SS_MCS2:
+ index = 66;
+ break;
+ case MGN_VHT3SS_MCS3:
+ index = 67;
+ break;
+ case MGN_VHT3SS_MCS4:
+ index = 68;
+ break;
+ case MGN_VHT3SS_MCS5:
+ index = 69;
+ break;
+ case MGN_VHT3SS_MCS6:
+ index = 70;
+ break;
+ case MGN_VHT3SS_MCS7:
+ index = 71;
+ break;
+ case MGN_VHT3SS_MCS8:
+ index = 72;
+ break;
+ case MGN_VHT3SS_MCS9:
+ index = 73;
+ break;
+ case MGN_VHT4SS_MCS0:
+ index = 74;
+ break;
+ case MGN_VHT4SS_MCS1:
+ index = 75;
+ break;
+ case MGN_VHT4SS_MCS2:
+ index = 76;
+ break;
+ case MGN_VHT4SS_MCS3:
+ index = 77;
+ break;
+ case MGN_VHT4SS_MCS4:
+ index = 78;
+ break;
+ case MGN_VHT4SS_MCS5:
+ index = 79;
+ break;
+ case MGN_VHT4SS_MCS6:
+ index = 80;
+ break;
+ case MGN_VHT4SS_MCS7:
+ index = 81;
+ break;
+ case MGN_VHT4SS_MCS8:
+ index = 82;
+ break;
+ case MGN_VHT4SS_MCS9:
+ index = 83;
+ break;
+ default:
+ DBG_871X("Invalid rate 0x%x in %s\n", Rate, __func__);
+ break;
+ };
+
+ return index;
+}
+
+s8 PHY_GetTxPowerByRate(
+ struct adapter *padapter, u8 Band, u8 RFPath, u8 TxNum, u8 Rate
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ s8 value = 0;
+ u8 rateIndex = PHY_GetRateIndexOfTxPowerByRate(Rate);
+
+ if ((padapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory == 2) ||
+ padapter->registrypriv.RegEnableTxPowerByRate == 0)
+ return 0;
+
+ if (Band != BAND_ON_2_4G && Band != BAND_ON_5G) {
+ DBG_871X("Invalid band %d in %s\n", Band, __func__);
+ return value;
+ }
+ if (RFPath > ODM_RF_PATH_D) {
+ DBG_871X("Invalid RfPath %d in %s\n", RFPath, __func__);
+ return value;
+ }
+ if (TxNum >= RF_MAX_TX_NUM) {
+ DBG_871X("Invalid TxNum %d in %s\n", TxNum, __func__);
+ return value;
+ }
+ if (rateIndex >= TX_PWR_BY_RATE_NUM_RATE) {
+ DBG_871X("Invalid RateIndex %d in %s\n", rateIndex, __func__);
+ return value;
+ }
+
+ value = pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex];
+
+ return value;
+
+}
+
+void PHY_SetTxPowerByRate(
+ struct adapter *padapter,
+ u8 Band,
+ u8 RFPath,
+ u8 TxNum,
+ u8 Rate,
+ s8 Value
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 rateIndex = PHY_GetRateIndexOfTxPowerByRate(Rate);
+
+ if (Band != BAND_ON_2_4G && Band != BAND_ON_5G) {
+ DBG_871X("Invalid band %d in %s\n", Band, __func__);
+ return;
+ }
+ if (RFPath > ODM_RF_PATH_D) {
+ DBG_871X("Invalid RfPath %d in %s\n", RFPath, __func__);
+ return;
+ }
+ if (TxNum >= RF_MAX_TX_NUM) {
+ DBG_871X("Invalid TxNum %d in %s\n", TxNum, __func__);
+ return;
+ }
+ if (rateIndex >= TX_PWR_BY_RATE_NUM_RATE) {
+ DBG_871X("Invalid RateIndex %d in %s\n", rateIndex, __func__);
+ return;
+ }
+
+ pHalData->TxPwrByRateOffset[Band][RFPath][TxNum][rateIndex] = Value;
+}
+
+void PHY_SetTxPowerLevelByPath(struct adapter *Adapter, u8 channel, u8 path)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ bool bIsIn24G = (pHalData->CurrentBandType == BAND_ON_2_4G);
+
+ /* if (pMgntInfo->RegNByteAccess == 0) */
+ {
+ if (bIsIn24G)
+ PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, CCK);
+
+ PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, OFDM);
+ PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, HT_MCS0_MCS7);
+
+ if (pHalData->NumTotalRFPath >= 2)
+ PHY_SetTxPowerIndexByRateSection(Adapter, path, channel, HT_MCS8_MCS15);
+
+ }
+}
+
+void PHY_SetTxPowerIndexByRateArray(
+ struct adapter *padapter,
+ u8 RFPath,
+ enum CHANNEL_WIDTH BandWidth,
+ u8 Channel,
+ u8 *Rates,
+ u8 RateArraySize
+)
+{
+ u32 powerIndex = 0;
+ int i = 0;
+
+ for (i = 0; i < RateArraySize; ++i) {
+ powerIndex = PHY_GetTxPowerIndex(padapter, RFPath, Rates[i], BandWidth, Channel);
+ PHY_SetTxPowerIndex(padapter, powerIndex, RFPath, Rates[i]);
+ }
+}
+
+static s8 phy_GetWorldWideLimit(s8 *LimitTable)
+{
+ s8 min = LimitTable[0];
+ u8 i = 0;
+
+ for (i = 0; i < MAX_REGULATION_NUM; ++i) {
+ if (LimitTable[i] < min)
+ min = LimitTable[i];
+ }
+
+ return min;
+}
+
+static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Band, u8 Channel)
+{
+ s8 channelIndex = -1;
+ u8 channel5G[CHANNEL_MAX_NUMBER_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 100, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 142, 144, 149, 151, 153, 155, 157, 159, 161,
+ 163, 165, 167, 168, 169, 171, 173, 175, 177
+ };
+ u8 i = 0;
+ if (Band == BAND_ON_2_4G)
+ channelIndex = Channel - 1;
+ else if (Band == BAND_ON_5G) {
+ for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ if (channel5G[i] == Channel)
+ channelIndex = i;
+ }
+ } else
+ DBG_871X("Invalid Band %d in %s", Band, __func__);
+
+ if (channelIndex == -1)
+ DBG_871X("Invalid Channel %d of Band %d in %s", Channel, Band, __func__);
+
+ return channelIndex;
+}
+
+s8 PHY_GetTxPowerLimit(
+ struct adapter *Adapter,
+ u32 RegPwrTblSel,
+ enum BAND_TYPE Band,
+ enum CHANNEL_WIDTH Bandwidth,
+ u8 RfPath,
+ u8 DataRate,
+ u8 Channel
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ s16 band = -1, regulation = -1, bandwidth = -1, rateSection = -1, channel = -1;
+ s8 powerLimit = MAX_POWER_INDEX;
+
+ if ((Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory != 1) ||
+ Adapter->registrypriv.RegEnableTxPowerLimit == 0)
+ return MAX_POWER_INDEX;
+
+ switch (Adapter->registrypriv.RegPwrTblSel) {
+ case 1:
+ regulation = TXPWR_LMT_ETSI;
+ break;
+ case 2:
+ regulation = TXPWR_LMT_MKK;
+ break;
+ case 3:
+ regulation = TXPWR_LMT_FCC;
+ break;
+
+ case 4:
+ regulation = TXPWR_LMT_WW;
+ break;
+
+ default:
+ regulation = (Band == BAND_ON_2_4G) ? pHalData->Regulation2_4G : pHalData->Regulation5G;
+ break;
+ }
+
+ /* DBG_871X("pMgntInfo->RegPwrTblSel %d, final regulation %d\n", Adapter->registrypriv.RegPwrTblSel, regulation); */
+
+
+ if (Band == BAND_ON_2_4G)
+ band = 0;
+ else if (Band == BAND_ON_5G)
+ band = 1;
+
+ if (Bandwidth == CHANNEL_WIDTH_20)
+ bandwidth = 0;
+ else if (Bandwidth == CHANNEL_WIDTH_40)
+ bandwidth = 1;
+ else if (Bandwidth == CHANNEL_WIDTH_80)
+ bandwidth = 2;
+ else if (Bandwidth == CHANNEL_WIDTH_160)
+ bandwidth = 3;
+
+ switch (DataRate) {
+ case MGN_1M: case MGN_2M: case MGN_5_5M: case MGN_11M:
+ rateSection = 0;
+ break;
+
+ case MGN_6M: case MGN_9M: case MGN_12M: case MGN_18M:
+ case MGN_24M: case MGN_36M: case MGN_48M: case MGN_54M:
+ rateSection = 1;
+ break;
+
+ case MGN_MCS0: case MGN_MCS1: case MGN_MCS2: case MGN_MCS3:
+ case MGN_MCS4: case MGN_MCS5: case MGN_MCS6: case MGN_MCS7:
+ rateSection = 2;
+ break;
+
+ case MGN_MCS8: case MGN_MCS9: case MGN_MCS10: case MGN_MCS11:
+ case MGN_MCS12: case MGN_MCS13: case MGN_MCS14: case MGN_MCS15:
+ rateSection = 3;
+ break;
+
+ case MGN_MCS16: case MGN_MCS17: case MGN_MCS18: case MGN_MCS19:
+ case MGN_MCS20: case MGN_MCS21: case MGN_MCS22: case MGN_MCS23:
+ rateSection = 4;
+ break;
+
+ case MGN_MCS24: case MGN_MCS25: case MGN_MCS26: case MGN_MCS27:
+ case MGN_MCS28: case MGN_MCS29: case MGN_MCS30: case MGN_MCS31:
+ rateSection = 5;
+ break;
+
+ case MGN_VHT1SS_MCS0: case MGN_VHT1SS_MCS1: case MGN_VHT1SS_MCS2:
+ case MGN_VHT1SS_MCS3: case MGN_VHT1SS_MCS4: case MGN_VHT1SS_MCS5:
+ case MGN_VHT1SS_MCS6: case MGN_VHT1SS_MCS7: case MGN_VHT1SS_MCS8:
+ case MGN_VHT1SS_MCS9:
+ rateSection = 6;
+ break;
+
+ case MGN_VHT2SS_MCS0: case MGN_VHT2SS_MCS1: case MGN_VHT2SS_MCS2:
+ case MGN_VHT2SS_MCS3: case MGN_VHT2SS_MCS4: case MGN_VHT2SS_MCS5:
+ case MGN_VHT2SS_MCS6: case MGN_VHT2SS_MCS7: case MGN_VHT2SS_MCS8:
+ case MGN_VHT2SS_MCS9:
+ rateSection = 7;
+ break;
+
+ case MGN_VHT3SS_MCS0: case MGN_VHT3SS_MCS1: case MGN_VHT3SS_MCS2:
+ case MGN_VHT3SS_MCS3: case MGN_VHT3SS_MCS4: case MGN_VHT3SS_MCS5:
+ case MGN_VHT3SS_MCS6: case MGN_VHT3SS_MCS7: case MGN_VHT3SS_MCS8:
+ case MGN_VHT3SS_MCS9:
+ rateSection = 8;
+ break;
+
+ case MGN_VHT4SS_MCS0: case MGN_VHT4SS_MCS1: case MGN_VHT4SS_MCS2:
+ case MGN_VHT4SS_MCS3: case MGN_VHT4SS_MCS4: case MGN_VHT4SS_MCS5:
+ case MGN_VHT4SS_MCS6: case MGN_VHT4SS_MCS7: case MGN_VHT4SS_MCS8:
+ case MGN_VHT4SS_MCS9:
+ rateSection = 9;
+ break;
+
+ default:
+ DBG_871X("Wrong rate 0x%x\n", DataRate);
+ break;
+ }
+
+ if (Band == BAND_ON_5G && rateSection == 0)
+ DBG_871X("Wrong rate 0x%x: No CCK in 5G Band\n", DataRate);
+
+ /* workaround for wrong index combination to obtain tx power limit, */
+ /* OFDM only exists in BW 20M */
+ if (rateSection == 1)
+ bandwidth = 0;
+
+ /* workaround for wrong index combination to obtain tx power limit, */
+ /* CCK table will only be given in BW 20M */
+ if (rateSection == 0)
+ bandwidth = 0;
+
+ /* workaround for wrong indxe combination to obtain tx power limit, */
+ /* HT on 80M will reference to HT on 40M */
+ if ((rateSection == 2 || rateSection == 3) && Band == BAND_ON_5G && bandwidth == 2) {
+ bandwidth = 1;
+ }
+
+ if (Band == BAND_ON_2_4G)
+ channel = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_2_4G, Channel);
+ else if (Band == BAND_ON_5G)
+ channel = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_5G, Channel);
+ else if (Band == BAND_ON_BOTH) {
+ /* BAND_ON_BOTH don't care temporarily */
+ }
+
+ if (band == -1 || regulation == -1 || bandwidth == -1 ||
+ rateSection == -1 || channel == -1) {
+ /* DBG_871X("Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnlGroup %d]\n", */
+ /* band, regulation, bandwidth, RfPath, rateSection, channelGroup); */
+
+ return MAX_POWER_INDEX;
+ }
+
+ if (Band == BAND_ON_2_4G) {
+ s8 limits[10] = {0}; u8 i = 0;
+ for (i = 0; i < MAX_REGULATION_NUM; i++)
+ limits[i] = pHalData->TxPwrLimit_2_4G[i][bandwidth][rateSection][channel][RfPath];
+
+ powerLimit = (regulation == TXPWR_LMT_WW) ? phy_GetWorldWideLimit(limits) :
+ pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channel][RfPath];
+
+ } else if (Band == BAND_ON_5G) {
+ s8 limits[10] = {0}; u8 i = 0;
+ for (i = 0; i < MAX_REGULATION_NUM; ++i)
+ limits[i] = pHalData->TxPwrLimit_5G[i][bandwidth][rateSection][channel][RfPath];
+
+ powerLimit = (regulation == TXPWR_LMT_WW) ? phy_GetWorldWideLimit(limits) :
+ pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channel][RfPath];
+ } else
+ DBG_871X("No power limit table of the specified band\n");
+
+ /* combine 5G VHT & HT rate */
+ /* 5G 20M and 40M HT and VHT can cross reference */
+ /*
+ if (Band == BAND_ON_5G && powerLimit == MAX_POWER_INDEX) {
+ if (bandwidth == 0 || bandwidth == 1) {
+ RT_TRACE(COMP_INIT, DBG_LOUD, ("No power limit table of the specified band %d, bandwidth %d, ratesection %d, rf path %d\n",
+ band, bandwidth, rateSection, RfPath));
+ if (rateSection == 2)
+ powerLimit = pHalData->TxPwrLimit_5G[regulation]
+ [bandwidth][4][channelGroup][RfPath];
+ else if (rateSection == 4)
+ powerLimit = pHalData->TxPwrLimit_5G[regulation]
+ [bandwidth][2][channelGroup][RfPath];
+ else if (rateSection == 3)
+ powerLimit = pHalData->TxPwrLimit_5G[regulation]
+ [bandwidth][5][channelGroup][RfPath];
+ else if (rateSection == 5)
+ powerLimit = pHalData->TxPwrLimit_5G[regulation]
+ [bandwidth][3][channelGroup][RfPath];
+ }
+ }
+ */
+ /* DBG_871X("TxPwrLmt[Regulation %d][Band %d][BW %d][RFPath %d][Rate 0x%x][Chnl %d] = %d\n", */
+ /* regulation, pHalData->CurrentBandType, Bandwidth, RfPath, DataRate, Channel, powerLimit); */
+ return powerLimit;
+}
+
+static void phy_CrossReferenceHTAndVHTTxPowerLimit(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 regulation, bw, channel, rateSection;
+ s8 tempPwrLmt = 0;
+
+ for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
+ for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+ for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
+ for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
+ tempPwrLmt = pHalData->TxPwrLimit_5G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
+ if (tempPwrLmt == MAX_POWER_INDEX) {
+ u8 baseSection = 2, refSection = 6;
+ if (bw == 0 || bw == 1) { /* 5G 20M 40M VHT and HT can cross reference */
+ /* DBG_871X("No power limit table of the specified band %d, bandwidth %d, ratesection %d, channel %d, rf path %d\n", */
+ /* 1, bw, rateSection, channel, ODM_RF_PATH_A); */
+ if (rateSection >= 2 && rateSection <= 9) {
+ if (rateSection == 2) {
+ baseSection = 2;
+ refSection = 6;
+ } else if (rateSection == 3) {
+ baseSection = 3;
+ refSection = 7;
+ } else if (rateSection == 4) {
+ baseSection = 4;
+ refSection = 8;
+ } else if (rateSection == 5) {
+ baseSection = 5;
+ refSection = 9;
+ } else if (rateSection == 6) {
+ baseSection = 6;
+ refSection = 2;
+ } else if (rateSection == 7) {
+ baseSection = 7;
+ refSection = 3;
+ } else if (rateSection == 8) {
+ baseSection = 8;
+ refSection = 4;
+ } else if (rateSection == 9) {
+ baseSection = 9;
+ refSection = 5;
+ }
+ pHalData->TxPwrLimit_5G[regulation][bw][baseSection][channel][ODM_RF_PATH_A] =
+ pHalData->TxPwrLimit_5G[regulation][bw][refSection][channel][ODM_RF_PATH_A];
+ }
+
+ /* DBG_871X("use other value %d", tempPwrLmt); */
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 BW40PwrBasedBm2_4G = 0x2E;
+ u8 regulation, bw, channel, rateSection;
+ s8 tempValue = 0, tempPwrLmt = 0;
+ u8 rfPath = 0;
+
+ /* DBG_871X("=====> PHY_ConvertTxPowerLimitToPowerIndex()\n"); */
+
+ phy_CrossReferenceHTAndVHTTxPowerLimit(Adapter);
+
+ for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
+ for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) {
+ for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
+ for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
+ tempPwrLmt = pHalData->TxPwrLimit_2_4G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
+
+ for (rfPath = ODM_RF_PATH_A; rfPath < MAX_RF_PATH_NUM; ++rfPath) {
+ if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
+ if (rateSection == 5) /* HT 4T */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_4TX, HT_MCS24_MCS31);
+ else if (rateSection == 4) /* HT 3T */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_3TX, HT_MCS16_MCS23);
+ else if (rateSection == 3) /* HT 2T */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15);
+ else if (rateSection == 2) /* HT 1T */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7);
+ else if (rateSection == 1) /* OFDM */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, OFDM);
+ else if (rateSection == 0) /* CCK */
+ BW40PwrBasedBm2_4G = PHY_GetTxPowerByRateBase(Adapter, BAND_ON_2_4G, rfPath, RF_1TX, CCK);
+ } else
+ BW40PwrBasedBm2_4G = Adapter->registrypriv.RegPowerBase * 2;
+
+ if (tempPwrLmt != MAX_POWER_INDEX) {
+ tempValue = tempPwrLmt - BW40PwrBasedBm2_4G;
+ pHalData->TxPwrLimit_2_4G[regulation][bw][rateSection][channel][rfPath] = tempValue;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* DBG_871X("<===== PHY_ConvertTxPowerLimitToPowerIndex()\n"); */
+}
+
+void PHY_InitTxPowerLimit(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 i, j, k, l, m;
+
+ /* DBG_871X("=====> PHY_InitTxPowerLimit()!\n"); */
+
+ for (i = 0; i < MAX_REGULATION_NUM; ++i) {
+ for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j)
+ for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
+ for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
+ for (l = 0; l < MAX_RF_PATH_NUM; ++l)
+ pHalData->TxPwrLimit_2_4G[i][j][k][m][l] = MAX_POWER_INDEX;
+ }
+
+ for (i = 0; i < MAX_REGULATION_NUM; ++i) {
+ for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j)
+ for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
+ for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
+ for (l = 0; l < MAX_RF_PATH_NUM; ++l)
+ pHalData->TxPwrLimit_5G[i][j][k][m][l] = MAX_POWER_INDEX;
+ }
+
+ /* DBG_871X("<===== PHY_InitTxPowerLimit()!\n"); */
+}
+
+void PHY_SetTxPowerLimit(
+ struct adapter *Adapter,
+ u8 *Regulation,
+ u8 *Band,
+ u8 *Bandwidth,
+ u8 *RateSection,
+ u8 *RfPath,
+ u8 *Channel,
+ u8 *PowerLimit
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 regulation = 0, bandwidth = 0, rateSection = 0, channel;
+ s8 powerLimit = 0, prevPowerLimit, channelIndex;
+
+ /* DBG_871X("Index of power limit table [band %s][regulation %s][bw %s][rate section %s][rf path %s][chnl %s][val %s]\n", */
+ /* Band, Regulation, Bandwidth, RateSection, RfPath, Channel, PowerLimit); */
+
+ if (!GetU1ByteIntegerFromStringInDecimal((s8 *)Channel, &channel) ||
+ !GetU1ByteIntegerFromStringInDecimal((s8 *)PowerLimit, &powerLimit))
+ DBG_871X("Illegal index of power limit table [chnl %s][val %s]\n", Channel, PowerLimit);
+
+ powerLimit = powerLimit > MAX_POWER_INDEX ? MAX_POWER_INDEX : powerLimit;
+
+ if (eqNByte(Regulation, (u8 *)("FCC"), 3))
+ regulation = 0;
+ else if (eqNByte(Regulation, (u8 *)("MKK"), 3))
+ regulation = 1;
+ else if (eqNByte(Regulation, (u8 *)("ETSI"), 4))
+ regulation = 2;
+ else if (eqNByte(Regulation, (u8 *)("WW13"), 4))
+ regulation = 3;
+
+ if (eqNByte(RateSection, (u8 *)("CCK"), 3) && eqNByte(RfPath, (u8 *)("1T"), 2))
+ rateSection = 0;
+ else if (eqNByte(RateSection, (u8 *)("OFDM"), 4) && eqNByte(RfPath, (u8 *)("1T"), 2))
+ rateSection = 1;
+ else if (eqNByte(RateSection, (u8 *)("HT"), 2) && eqNByte(RfPath, (u8 *)("1T"), 2))
+ rateSection = 2;
+ else if (eqNByte(RateSection, (u8 *)("HT"), 2) && eqNByte(RfPath, (u8 *)("2T"), 2))
+ rateSection = 3;
+ else if (eqNByte(RateSection, (u8 *)("HT"), 2) && eqNByte(RfPath, (u8 *)("3T"), 2))
+ rateSection = 4;
+ else if (eqNByte(RateSection, (u8 *)("HT"), 2) && eqNByte(RfPath, (u8 *)("4T"), 2))
+ rateSection = 5;
+ else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("1T"), 2))
+ rateSection = 6;
+ else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("2T"), 2))
+ rateSection = 7;
+ else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("3T"), 2))
+ rateSection = 8;
+ else if (eqNByte(RateSection, (u8 *)("VHT"), 3) && eqNByte(RfPath, (u8 *)("4T"), 2))
+ rateSection = 9;
+ else {
+ DBG_871X("Wrong rate section!\n");
+ return;
+ }
+
+
+ if (eqNByte(Bandwidth, (u8 *)("20M"), 3))
+ bandwidth = 0;
+ else if (eqNByte(Bandwidth, (u8 *)("40M"), 3))
+ bandwidth = 1;
+ else if (eqNByte(Bandwidth, (u8 *)("80M"), 3))
+ bandwidth = 2;
+ else if (eqNByte(Bandwidth, (u8 *)("160M"), 4))
+ bandwidth = 3;
+
+ if (eqNByte(Band, (u8 *)("2.4G"), 4)) {
+ channelIndex = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_2_4G, channel);
+
+ if (channelIndex == -1)
+ return;
+
+ prevPowerLimit = pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A];
+
+ if (powerLimit < prevPowerLimit)
+ pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A] = powerLimit;
+
+ /* DBG_871X("2.4G Band value : [regulation %d][bw %d][rate_section %d][chnl %d][val %d]\n", */
+ /* regulation, bandwidth, rateSection, channelIndex, pHalData->TxPwrLimit_2_4G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A]); */
+ } else if (eqNByte(Band, (u8 *)("5G"), 2)) {
+ channelIndex = phy_GetChannelIndexOfTxPowerLimit(BAND_ON_5G, channel);
+
+ if (channelIndex == -1)
+ return;
+
+ prevPowerLimit = pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A];
+
+ if (powerLimit < prevPowerLimit)
+ pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A] = powerLimit;
+
+ /* DBG_871X("5G Band value : [regulation %d][bw %d][rate_section %d][chnl %d][val %d]\n", */
+ /* regulation, bandwidth, rateSection, channel, pHalData->TxPwrLimit_5G[regulation][bandwidth][rateSection][channelIndex][ODM_RF_PATH_A]); */
+ } else {
+ DBG_871X("Cannot recognize the band info in %s\n", Band);
+ return;
+ }
+}
+
+u8 PHY_GetTxPowerIndex(
+ struct adapter *padapter,
+ u8 RFPath,
+ u8 Rate,
+ enum CHANNEL_WIDTH BandWidth,
+ u8 Channel
+)
+{
+ return PHY_GetTxPowerIndex_8723B(padapter, RFPath, Rate, BandWidth, Channel);
+}
+
+void PHY_SetTxPowerIndex(
+ struct adapter *padapter, u32 PowerIndex, u8 RFPath, u8 Rate
+)
+{
+ PHY_SetTxPowerIndex_8723B(padapter, PowerIndex, RFPath, Rate);
+}
+
+void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ pHalData->Regulation2_4G = TXPWR_LMT_WW;
+ pHalData->Regulation5G = TXPWR_LMT_WW;
+
+ switch (ChannelPlan) {
+ case RT_CHANNEL_DOMAIN_WORLD_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_WW;
+ break;
+ case RT_CHANNEL_DOMAIN_ETSI1_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_MKK1_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_MKK;
+ break;
+ case RT_CHANNEL_DOMAIN_ETSI2_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC1:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI1:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_MKK1_MKK1:
+ pHalData->Regulation2_4G = TXPWR_LMT_MKK;
+ pHalData->Regulation5G = TXPWR_LMT_MKK;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_KCC1:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_MKK;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_FCC2:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_FCC3:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_FCC4:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_FCC5:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_FCC6:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC7:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI2:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI3:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_MKK1_MKK2:
+ pHalData->Regulation2_4G = TXPWR_LMT_MKK;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_MKK1_MKK3:
+ pHalData->Regulation2_4G = TXPWR_LMT_MKK;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_NCC1:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_NCC2:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_GLOBAL_NULL:
+ pHalData->Regulation2_4G = TXPWR_LMT_WW;
+ pHalData->Regulation5G = TXPWR_LMT_WW;
+ break;
+ case RT_CHANNEL_DOMAIN_ETSI1_ETSI4:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC2:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_NCC3:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI5:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC8:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI6:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI7:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI8:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI9:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI10:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI11:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_NCC4:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI12:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC9:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_WORLD_ETSI13:
+ pHalData->Regulation2_4G = TXPWR_LMT_ETSI;
+ pHalData->Regulation5G = TXPWR_LMT_ETSI;
+ break;
+ case RT_CHANNEL_DOMAIN_FCC1_FCC10:
+ pHalData->Regulation2_4G = TXPWR_LMT_FCC;
+ pHalData->Regulation5G = TXPWR_LMT_FCC;
+ break;
+ case RT_CHANNEL_DOMAIN_REALTEK_DEFINE: /* Realtek Reserve */
+ pHalData->Regulation2_4G = TXPWR_LMT_WW;
+ pHalData->Regulation5G = TXPWR_LMT_WW;
+ break;
+ default:
+ break;
+ }
+}
+
+
+static char file_path_bs[PATH_MAX];
+
+#define GetLineFromBuffer(buffer) strsep(&buffer, "\n")
+
+int phy_ConfigMACWithParaFile(struct adapter *Adapter, char *pFileName)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+ char *szLine, *ptmp;
+ u32 u4bRegOffset, u4bRegValue, u4bMove;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_MAC_PARA_FILE))
+ return rtStatus;
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pHalData->mac_reg_len == 0) && (pHalData->mac_reg == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pHalData->mac_reg = vzalloc(rlen);
+ if (pHalData->mac_reg) {
+ memcpy(pHalData->mac_reg, pHalData->para_file_buf, rlen);
+ pHalData->mac_reg_len = rlen;
+ } else
+ DBG_871X("%s mac_reg alloc fail !\n", __func__);
+ }
+ }
+ } else {
+ if ((pHalData->mac_reg_len != 0) && (pHalData->mac_reg != NULL)) {
+ memcpy(pHalData->para_file_buf, pHalData->mac_reg, pHalData->mac_reg_len);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ ptmp = pHalData->para_file_buf;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ if (!IsCommentString(szLine)) {
+ /* Get 1st hex value as register offset */
+ if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
+ if (u4bRegOffset == 0xffff) /* Ending. */
+ break;
+
+ /* Get 2nd hex value as register value. */
+ szLine += u4bMove;
+ if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove))
+ rtw_write8(Adapter, u4bRegOffset, (u8)u4bRegValue);
+ }
+ }
+ }
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+int phy_ConfigBBWithParaFile(
+ struct adapter *Adapter, char *pFileName, u32 ConfigType
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+ char *szLine, *ptmp;
+ u32 u4bRegOffset, u4bRegValue, u4bMove;
+ char *pBuf = NULL;
+ u32 *pBufLen = NULL;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PARA_FILE))
+ return rtStatus;
+
+ switch (ConfigType) {
+ case CONFIG_BB_PHY_REG:
+ pBuf = pHalData->bb_phy_reg;
+ pBufLen = &pHalData->bb_phy_reg_len;
+ break;
+ case CONFIG_BB_AGC_TAB:
+ pBuf = pHalData->bb_agc_tab;
+ pBufLen = &pHalData->bb_agc_tab_len;
+ break;
+ default:
+ DBG_871X("Unknown ConfigType!! %d\r\n", ConfigType);
+ break;
+ }
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pBuf = vzalloc(rlen);
+ if (pBuf) {
+ memcpy(pBuf, pHalData->para_file_buf, rlen);
+ *pBufLen = rlen;
+
+ switch (ConfigType) {
+ case CONFIG_BB_PHY_REG:
+ pHalData->bb_phy_reg = pBuf;
+ break;
+ case CONFIG_BB_AGC_TAB:
+ pHalData->bb_agc_tab = pBuf;
+ break;
+ }
+ } else
+ DBG_871X("%s(): ConfigType %d alloc fail !\n", __func__, ConfigType);
+ }
+ }
+ } else {
+ if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ ptmp = pHalData->para_file_buf;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ if (!IsCommentString(szLine)) {
+ /* Get 1st hex value as register offset. */
+ if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
+ if (u4bRegOffset == 0xffff) /* Ending. */
+ break;
+ else if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe)
+ msleep(50);
+ else if (u4bRegOffset == 0xfd)
+ mdelay(5);
+ else if (u4bRegOffset == 0xfc)
+ mdelay(1);
+ else if (u4bRegOffset == 0xfb)
+ udelay(50);
+ else if (u4bRegOffset == 0xfa)
+ udelay(5);
+ else if (u4bRegOffset == 0xf9)
+ udelay(1);
+
+ /* Get 2nd hex value as register value. */
+ szLine += u4bMove;
+ if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
+ /* DBG_871X("[BB-ADDR]%03lX =%08lX\n", u4bRegOffset, u4bRegValue); */
+ PHY_SetBBReg(Adapter, u4bRegOffset, bMaskDWord, u4bRegValue);
+
+ if (u4bRegOffset == 0xa24)
+ pHalData->odmpriv.RFCalibrateInfo.RegA24 = u4bRegValue;
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+ }
+ }
+ }
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+static void phy_DecryptBBPgParaFile(struct adapter *Adapter, char *buffer)
+{
+ u32 i = 0, j = 0;
+ u8 map[95] = {0};
+ u8 currentChar;
+ char *BufOfLines, *ptmp;
+
+ /* DBG_871X("=====>phy_DecryptBBPgParaFile()\n"); */
+ /* 32 the ascii code of the first visable char, 126 the last one */
+ for (i = 0; i < 95; ++i)
+ map[i] = (u8) (94 - i);
+
+ ptmp = buffer;
+ i = 0;
+ for (BufOfLines = GetLineFromBuffer(ptmp); BufOfLines != NULL; BufOfLines = GetLineFromBuffer(ptmp)) {
+ /* DBG_871X("Encrypted Line: %s\n", BufOfLines); */
+
+ for (j = 0; j < strlen(BufOfLines); ++j) {
+ currentChar = BufOfLines[j];
+
+ if (currentChar == '\0')
+ break;
+
+ currentChar -= (u8) ((((i + j) * 3) % 128));
+
+ BufOfLines[j] = map[currentChar - 32] + 32;
+ }
+ /* DBG_871X("Decrypted Line: %s\n", BufOfLines); */
+ if (strlen(BufOfLines) != 0)
+ i++;
+ BufOfLines[strlen(BufOfLines)] = '\n';
+ }
+}
+
+static int phy_ParseBBPgParaFile(struct adapter *Adapter, char *buffer)
+{
+ int rtStatus = _SUCCESS;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ char *szLine, *ptmp;
+ u32 u4bRegOffset, u4bRegMask, u4bRegValue;
+ u32 u4bMove;
+ bool firstLine = true;
+ u8 tx_num = 0;
+ u8 band = 0, rf_path = 0;
+
+ /* DBG_871X("=====>phy_ParseBBPgParaFile()\n"); */
+
+ if (Adapter->registrypriv.RegDecryptCustomFile == 1)
+ phy_DecryptBBPgParaFile(Adapter, buffer);
+
+ ptmp = buffer;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ if (!IsCommentString(szLine)) {
+ if (isAllSpaceOrTab(szLine, sizeof(*szLine)))
+ continue;
+
+ /* Get header info (relative value or exact value) */
+ if (firstLine) {
+ if (eqNByte(szLine, (u8 *)("#[v1]"), 5)) {
+
+ pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
+ /* DBG_871X("This is a new format PHY_REG_PG.txt\n"); */
+ } else if (eqNByte(szLine, (u8 *)("#[v0]"), 5)) {
+ pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
+ /* DBG_871X("This is a old format PHY_REG_PG.txt ok\n"); */
+ } else {
+ DBG_871X("The format in PHY_REG_PG are invalid %s\n", szLine);
+ return _FAIL;
+ }
+
+ if (eqNByte(szLine + 5, (u8 *)("[Exact]#"), 8)) {
+ pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
+ /* DBG_871X("The values in PHY_REG_PG are exact values ok\n"); */
+ firstLine = false;
+ continue;
+ } else if (eqNByte(szLine + 5, (u8 *)("[Relative]#"), 11)) {
+ pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_RELATIVE_VALUE;
+ /* DBG_871X("The values in PHY_REG_PG are relative values ok\n"); */
+ firstLine = false;
+ continue;
+ } else {
+ DBG_871X("The values in PHY_REG_PG are invalid %s\n", szLine);
+ return _FAIL;
+ }
+ }
+
+ if (pHalData->odmpriv.PhyRegPgVersion == 0) {
+ /* Get 1st hex value as register offset. */
+ if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
+ szLine += u4bMove;
+ if (u4bRegOffset == 0xffff) /* Ending. */
+ break;
+
+ /* Get 2nd hex value as register mask. */
+ if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
+ /* Get 3rd hex value as register value. */
+ if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
+ PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, u4bRegValue);
+ /* DBG_871X("[ADDR] %03X =%08X Mask =%08x\n", u4bRegOffset, u4bRegValue, u4bRegMask); */
+ } else
+ return _FAIL;
+ } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
+ u32 combineValue = 0;
+ u8 integer = 0, fraction = 0;
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+ PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, combineValue);
+
+ /* DBG_871X("[ADDR] 0x%3x = 0x%4x\n", u4bRegOffset, combineValue); */
+ }
+ }
+ } else if (pHalData->odmpriv.PhyRegPgVersion > 0) {
+ u32 index = 0;
+
+ if (eqNByte(szLine, "0xffff", 6))
+ break;
+
+ if (!eqNByte("#[END]#", szLine, 7)) {
+ /* load the table label info */
+ if (szLine[0] == '#') {
+ index = 0;
+ if (eqNByte(szLine, "#[2.4G]", 7)) {
+ band = BAND_ON_2_4G;
+ index += 8;
+ } else if (eqNByte(szLine, "#[5G]", 5)) {
+ band = BAND_ON_5G;
+ index += 6;
+ } else {
+ DBG_871X("Invalid band %s in PHY_REG_PG.txt\n", szLine);
+ return _FAIL;
+ }
+
+ rf_path = szLine[index] - 'A';
+ /* DBG_871X(" Table label Band %d, RfPath %d\n", band, rf_path); */
+ } else { /* load rows of tables */
+ if (szLine[1] == '1')
+ tx_num = RF_1TX;
+ else if (szLine[1] == '2')
+ tx_num = RF_2TX;
+ else if (szLine[1] == '3')
+ tx_num = RF_3TX;
+ else if (szLine[1] == '4')
+ tx_num = RF_4TX;
+ else {
+ DBG_871X("Invalid row in PHY_REG_PG.txt %c\n", szLine[1]);
+ return _FAIL;
+ }
+
+ while (szLine[index] != ']')
+ ++index;
+ ++index;/* skip ] */
+
+ /* Get 2nd hex value as register offset. */
+ szLine += index;
+ if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ /* Get 2nd hex value as register mask. */
+ if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
+ /* Get 3rd hex value as register value. */
+ if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
+ PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, u4bRegValue);
+ /* DBG_871X("[ADDR] %03X (tx_num %d) =%08X Mask =%08x\n", u4bRegOffset, tx_num, u4bRegValue, u4bRegMask); */
+ } else
+ return _FAIL;
+ } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
+ u32 combineValue = 0;
+ u8 integer = 0, fraction = 0;
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+
+ if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
+ szLine += u4bMove;
+ else
+ return _FAIL;
+
+ integer *= 2;
+ if (fraction == 5)
+ integer += 1;
+ combineValue <<= 8;
+ combineValue |= (((integer / 10) << 4) + (integer % 10));
+ /* DBG_871X(" %d", integer); */
+ PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, combineValue);
+
+ /* DBG_871X("[ADDR] 0x%3x (tx_num %d) = 0x%4x\n", u4bRegOffset, tx_num, combineValue); */
+ }
+ }
+ }
+ }
+ }
+ }
+ /* DBG_871X("<=====phy_ParseBBPgParaFile()\n"); */
+ return rtStatus;
+}
+
+int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char *pFileName)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PG_PARA_FILE))
+ return rtStatus;
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pHalData->bb_phy_reg_pg_len == 0) && (pHalData->bb_phy_reg_pg == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pHalData->bb_phy_reg_pg = vzalloc(rlen);
+ if (pHalData->bb_phy_reg_pg) {
+ memcpy(pHalData->bb_phy_reg_pg, pHalData->para_file_buf, rlen);
+ pHalData->bb_phy_reg_pg_len = rlen;
+ } else
+ DBG_871X("%s bb_phy_reg_pg alloc fail !\n", __func__);
+ }
+ }
+ } else {
+ if ((pHalData->bb_phy_reg_pg_len != 0) && (pHalData->bb_phy_reg_pg != NULL)) {
+ memcpy(pHalData->para_file_buf, pHalData->bb_phy_reg_pg, pHalData->bb_phy_reg_pg_len);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ /* DBG_871X("phy_ConfigBBWithPgParaFile(): read %s ok\n", pFileName); */
+ phy_ParseBBPgParaFile(Adapter, pHalData->para_file_buf);
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+int PHY_ConfigRFWithParaFile(
+ struct adapter *Adapter, char *pFileName, u8 eRFPath
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+ char *szLine, *ptmp;
+ u32 u4bRegOffset, u4bRegValue, u4bMove;
+ u16 i;
+ char *pBuf = NULL;
+ u32 *pBufLen = NULL;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_PARA_FILE))
+ return rtStatus;
+
+ switch (eRFPath) {
+ case ODM_RF_PATH_A:
+ pBuf = pHalData->rf_radio_a;
+ pBufLen = &pHalData->rf_radio_a_len;
+ break;
+ case ODM_RF_PATH_B:
+ pBuf = pHalData->rf_radio_b;
+ pBufLen = &pHalData->rf_radio_b_len;
+ break;
+ default:
+ DBG_871X("Unknown RF path!! %d\r\n", eRFPath);
+ break;
+ }
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pBuf = vzalloc(rlen);
+ if (pBuf) {
+ memcpy(pBuf, pHalData->para_file_buf, rlen);
+ *pBufLen = rlen;
+
+ switch (eRFPath) {
+ case ODM_RF_PATH_A:
+ pHalData->rf_radio_a = pBuf;
+ break;
+ case ODM_RF_PATH_B:
+ pHalData->rf_radio_b = pBuf;
+ break;
+ }
+ } else
+ DBG_871X("%s(): eRFPath =%d alloc fail !\n", __func__, eRFPath);
+ }
+ }
+ } else {
+ if ((pBufLen != NULL) && (*pBufLen == 0) && (pBuf == NULL)) {
+ memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
+
+ ptmp = pHalData->para_file_buf;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ if (!IsCommentString(szLine)) {
+ /* Get 1st hex value as register offset. */
+ if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
+ if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe) /* Deay specific ms. Only RF configuration require delay. */
+ msleep(50);
+ else if (u4bRegOffset == 0xfd) {
+ /* mdelay(5); */
+ for (i = 0; i < 100; i++)
+ udelay(MAX_STALL_TIME);
+ } else if (u4bRegOffset == 0xfc) {
+ /* mdelay(1); */
+ for (i = 0; i < 20; i++)
+ udelay(MAX_STALL_TIME);
+ } else if (u4bRegOffset == 0xfb)
+ udelay(50);
+ else if (u4bRegOffset == 0xfa)
+ udelay(5);
+ else if (u4bRegOffset == 0xf9)
+ udelay(1);
+ else if (u4bRegOffset == 0xffff)
+ break;
+
+ /* Get 2nd hex value as register value. */
+ szLine += u4bMove;
+ if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
+ PHY_SetRFReg(Adapter, eRFPath, u4bRegOffset, bRFRegOffsetMask, u4bRegValue);
+
+ /* Temp add, for frequency lock, if no delay, that may cause */
+ /* frequency shift, ex: 2412MHz => 2417MHz */
+ /* If frequency shift, the following action may works. */
+ /* Fractional-N table in radio_a.txt */
+ /* 0x2a 0x00001 channel 1 */
+ /* 0x2b 0x00808 frequency divider. */
+ /* 0x2b 0x53333 */
+ /* 0x2c 0x0000c */
+ udelay(1);
+ }
+ }
+ }
+ }
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+static void initDeltaSwingIndexTables(
+ struct adapter *Adapter,
+ char *Band,
+ char *Path,
+ char *Sign,
+ char *Channel,
+ char *Rate,
+ char *Data
+)
+{
+ #define STR_EQUAL_5G(_band, _path, _sign, _rate, _chnl) \
+ ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
+ (strcmp(Rate, _rate) == 0) && (strcmp(Channel, _chnl) == 0)\
+ )
+ #define STR_EQUAL_2G(_band, _path, _sign, _rate) \
+ ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
+ (strcmp(Rate, _rate) == 0)\
+ )
+
+ #define STORE_SWING_TABLE(_array, _iteratedIdx) \
+ for (token = strsep(&Data, delim); token != NULL; token = strsep(&Data, delim)) {\
+ sscanf(token, "%d", &idx);\
+ _array[_iteratedIdx++] = (u8)idx;\
+ } \
+
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
+ u32 j = 0;
+ char *token;
+ char delim[] = ",";
+ u32 idx = 0;
+
+ /* DBG_871X("===>initDeltaSwingIndexTables(): Band: %s;\nPath: %s;\nSign: %s;\nChannel: %s;\nRate: %s;\n, Data: %s;\n", */
+ /* Band, Path, Sign, Channel, Rate, Data); */
+
+ if (STR_EQUAL_2G("2G", "A", "+", "CCK")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P, j);
+ } else if (STR_EQUAL_2G("2G", "A", "-", "CCK")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N, j);
+ } else if (STR_EQUAL_2G("2G", "B", "+", "CCK")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P, j);
+ } else if (STR_EQUAL_2G("2G", "B", "-", "CCK")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N, j);
+ } else if (STR_EQUAL_2G("2G", "A", "+", "ALL")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P, j);
+ } else if (STR_EQUAL_2G("2G", "A", "-", "ALL")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N, j);
+ } else if (STR_EQUAL_2G("2G", "B", "+", "ALL")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P, j);
+ } else if (STR_EQUAL_2G("2G", "B", "-", "ALL")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N, j);
+ } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "0")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0], j);
+ } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "0")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0], j);
+ } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "0")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0], j);
+ } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "0")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0], j);
+ } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "1")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1], j);
+ } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "1")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1], j);
+ } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "1")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1], j);
+ } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "1")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1], j);
+ } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "2")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2], j);
+ } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "2")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2], j);
+ } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "2")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2], j);
+ } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "2")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2], j);
+ } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "3")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[3], j);
+ } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "3")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[3], j);
+ } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "3")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[3], j);
+ } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "3")) {
+ STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[3], j);
+ } else
+ DBG_871X("===>initDeltaSwingIndexTables(): The input is invalid!!\n");
+}
+
+int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char *pFileName)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+ char *szLine, *ptmp;
+ u32 i = 0;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_TRACK_PARA_FILE))
+ return rtStatus;
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pHalData->rf_tx_pwr_track_len == 0) && (pHalData->rf_tx_pwr_track == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pHalData->rf_tx_pwr_track = vzalloc(rlen);
+ if (pHalData->rf_tx_pwr_track) {
+ memcpy(pHalData->rf_tx_pwr_track, pHalData->para_file_buf, rlen);
+ pHalData->rf_tx_pwr_track_len = rlen;
+ } else
+ DBG_871X("%s rf_tx_pwr_track alloc fail !\n", __func__);
+ }
+ }
+ } else {
+ if ((pHalData->rf_tx_pwr_track_len != 0) && (pHalData->rf_tx_pwr_track != NULL)) {
+ memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_track, pHalData->rf_tx_pwr_track_len);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
+
+ ptmp = pHalData->para_file_buf;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ if (!IsCommentString(szLine)) {
+ char band[5] = "", path[5] = "", sign[5] = "";
+ char chnl[5] = "", rate[10] = "";
+ char data[300] = ""; /* 100 is too small */
+
+ if (strlen(szLine) < 10 || szLine[0] != '[')
+ continue;
+
+ strncpy(band, szLine+1, 2);
+ strncpy(path, szLine+5, 1);
+ strncpy(sign, szLine+8, 1);
+
+ i = 10; /* szLine+10 */
+ if (!ParseQualifiedString(szLine, &i, rate, '[', ']')) {
+ /* DBG_871X("Fail to parse rate!\n"); */
+ }
+ if (!ParseQualifiedString(szLine, &i, chnl, '[', ']')) {
+ /* DBG_871X("Fail to parse channel group!\n"); */
+ }
+ while (szLine[i] != '{' && i < strlen(szLine))
+ i++;
+ if (!ParseQualifiedString(szLine, &i, data, '{', '}')) {
+ /* DBG_871X("Fail to parse data!\n"); */
+ }
+
+ initDeltaSwingIndexTables(Adapter, band, path, sign, chnl, rate, data);
+ }
+ }
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+static int phy_ParsePowerLimitTableFile(struct adapter *Adapter, char *buffer)
+{
+ u32 i = 0, forCnt = 0;
+ u8 loadingStage = 0, limitValue = 0, fraction = 0;
+ char *szLine, *ptmp;
+ int rtStatus = _SUCCESS;
+ char band[10], bandwidth[10], rateSection[10],
+ regulation[TXPWR_LMT_MAX_REGULATION_NUM][10], rfPath[10], colNumBuf[10];
+ u8 colNum = 0;
+
+ DBG_871X("===>phy_ParsePowerLimitTableFile()\n");
+
+ if (Adapter->registrypriv.RegDecryptCustomFile == 1)
+ phy_DecryptBBPgParaFile(Adapter, buffer);
+
+ ptmp = buffer;
+ for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
+ /* skip comment */
+ if (IsCommentString(szLine)) {
+ continue;
+ }
+
+ if (loadingStage == 0) {
+ for (forCnt = 0; forCnt < TXPWR_LMT_MAX_REGULATION_NUM; ++forCnt)
+ memset((void *) regulation[forCnt], 0, 10);
+
+ memset((void *) band, 0, 10);
+ memset((void *) bandwidth, 0, 10);
+ memset((void *) rateSection, 0, 10);
+ memset((void *) rfPath, 0, 10);
+ memset((void *) colNumBuf, 0, 10);
+
+ if (szLine[0] != '#' || szLine[1] != '#')
+ continue;
+
+ /* skip the space */
+ i = 2;
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ szLine[--i] = ' '; /* return the space in front of the regulation info */
+
+ /* Parse the label of the table */
+ if (!ParseQualifiedString(szLine, &i, band, ' ', ',')) {
+ DBG_871X("Fail to parse band!\n");
+ return _FAIL;
+ }
+ if (!ParseQualifiedString(szLine, &i, bandwidth, ' ', ',')) {
+ DBG_871X("Fail to parse bandwidth!\n");
+ return _FAIL;
+ }
+ if (!ParseQualifiedString(szLine, &i, rfPath, ' ', ',')) {
+ DBG_871X("Fail to parse rf path!\n");
+ return _FAIL;
+ }
+ if (!ParseQualifiedString(szLine, &i, rateSection, ' ', ',')) {
+ DBG_871X("Fail to parse rate!\n");
+ return _FAIL;
+ }
+
+ loadingStage = 1;
+ } else if (loadingStage == 1) {
+ if (szLine[0] != '#' || szLine[1] != '#')
+ continue;
+
+ /* skip the space */
+ i = 2;
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ if (!eqNByte((u8 *)(szLine + i), (u8 *)("START"), 5)) {
+ DBG_871X("Lost \"## START\" label\n");
+ return _FAIL;
+ }
+
+ loadingStage = 2;
+ } else if (loadingStage == 2) {
+ if (szLine[0] != '#' || szLine[1] != '#')
+ continue;
+
+ /* skip the space */
+ i = 2;
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ if (!ParseQualifiedString(szLine, &i, colNumBuf, '#', '#')) {
+ DBG_871X("Fail to parse column number!\n");
+ return _FAIL;
+ }
+
+ if (!GetU1ByteIntegerFromStringInDecimal(colNumBuf, &colNum))
+ return _FAIL;
+
+ if (colNum > TXPWR_LMT_MAX_REGULATION_NUM) {
+ DBG_871X(
+ "unvalid col number %d (greater than max %d)\n",
+ colNum, TXPWR_LMT_MAX_REGULATION_NUM
+ );
+ return _FAIL;
+ }
+
+ for (forCnt = 0; forCnt < colNum; ++forCnt) {
+ u8 regulation_name_cnt = 0;
+
+ /* skip the space */
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ while (szLine[i] != ' ' && szLine[i] != '\t' && szLine[i] != '\0')
+ regulation[forCnt][regulation_name_cnt++] = szLine[i++];
+ /* DBG_871X("regulation %s!\n", regulation[forCnt]); */
+
+ if (regulation_name_cnt == 0) {
+ DBG_871X("unvalid number of regulation!\n");
+ return _FAIL;
+ }
+ }
+
+ loadingStage = 3;
+ } else if (loadingStage == 3) {
+ char channel[10] = {0}, powerLimit[10] = {0};
+ u8 cnt = 0;
+
+ /* the table ends */
+ if (szLine[0] == '#' && szLine[1] == '#') {
+ i = 2;
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ if (eqNByte((u8 *)(szLine + i), (u8 *)("END"), 3)) {
+ loadingStage = 0;
+ continue;
+ } else {
+ DBG_871X("Wrong format\n");
+ DBG_871X("<===== phy_ParsePowerLimitTableFile()\n");
+ return _FAIL;
+ }
+ }
+
+ if ((szLine[0] != 'c' && szLine[0] != 'C') ||
+ (szLine[1] != 'h' && szLine[1] != 'H')) {
+ DBG_871X("Meet wrong channel => power limt pair\n");
+ continue;
+ }
+ i = 2;/* move to the location behind 'h' */
+
+ /* load the channel number */
+ cnt = 0;
+ while (szLine[i] >= '0' && szLine[i] <= '9') {
+ channel[cnt] = szLine[i];
+ ++cnt;
+ ++i;
+ }
+ /* DBG_871X("chnl %s!\n", channel); */
+
+ for (forCnt = 0; forCnt < colNum; ++forCnt) {
+ /* skip the space between channel number and the power limit value */
+ while (szLine[i] == ' ' || szLine[i] == '\t')
+ ++i;
+
+ /* load the power limit value */
+ cnt = 0;
+ fraction = 0;
+ memset((void *) powerLimit, 0, 10);
+ while ((szLine[i] >= '0' && szLine[i] <= '9') || szLine[i] == '.') {
+ if (szLine[i] == '.') {
+ if ((szLine[i+1] >= '0' && szLine[i+1] <= '9')) {
+ fraction = szLine[i+1];
+ i += 2;
+ } else {
+ DBG_871X("Wrong fraction in TXPWR_LMT.txt\n");
+ return _FAIL;
+ }
+
+ break;
+ }
+
+ powerLimit[cnt] = szLine[i];
+ ++cnt;
+ ++i;
+ }
+
+ if (powerLimit[0] == '\0') {
+ powerLimit[0] = '6';
+ powerLimit[1] = '3';
+ i += 2;
+ } else {
+ if (!GetU1ByteIntegerFromStringInDecimal(powerLimit, &limitValue))
+ return _FAIL;
+
+ limitValue *= 2;
+ cnt = 0;
+ if (fraction == '5')
+ ++limitValue;
+
+ /* the value is greater or equal to 100 */
+ if (limitValue >= 100) {
+ powerLimit[cnt++] = limitValue/100 + '0';
+ limitValue %= 100;
+
+ if (limitValue >= 10) {
+ powerLimit[cnt++] = limitValue/10 + '0';
+ limitValue %= 10;
+ } else
+ powerLimit[cnt++] = '0';
+
+ powerLimit[cnt++] = limitValue + '0';
+ } else if (limitValue >= 10) { /* the value is greater or equal to 10 */
+ powerLimit[cnt++] = limitValue/10 + '0';
+ limitValue %= 10;
+ powerLimit[cnt++] = limitValue + '0';
+ }
+ /* the value is less than 10 */
+ else
+ powerLimit[cnt++] = limitValue + '0';
+
+ powerLimit[cnt] = '\0';
+ }
+
+ /* DBG_871X("ch%s => %s\n", channel, powerLimit); */
+
+ /* store the power limit value */
+ PHY_SetTxPowerLimit(Adapter, (u8 *)regulation[forCnt], (u8 *)band,
+ (u8 *)bandwidth, (u8 *)rateSection, (u8 *)rfPath, (u8 *)channel, (u8 *)powerLimit);
+
+ }
+ } else {
+ DBG_871X("Abnormal loading stage in phy_ParsePowerLimitTableFile()!\n");
+ rtStatus = _FAIL;
+ break;
+ }
+ }
+
+ DBG_871X("<===phy_ParsePowerLimitTableFile()\n");
+ return rtStatus;
+}
+
+int PHY_ConfigRFWithPowerLimitTableParaFile(
+ struct adapter *Adapter, char *pFileName
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rlen = 0, rtStatus = _FAIL;
+
+ if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_LMT_PARA_FILE))
+ return rtStatus;
+
+ memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
+
+ if ((pHalData->rf_tx_pwr_lmt_len == 0) && (pHalData->rf_tx_pwr_lmt == NULL)) {
+ rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
+
+ if (rtw_is_file_readable(file_path_bs) == true) {
+ rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
+ if (rlen > 0) {
+ rtStatus = _SUCCESS;
+ pHalData->rf_tx_pwr_lmt = vzalloc(rlen);
+ if (pHalData->rf_tx_pwr_lmt) {
+ memcpy(pHalData->rf_tx_pwr_lmt, pHalData->para_file_buf, rlen);
+ pHalData->rf_tx_pwr_lmt_len = rlen;
+ } else
+ DBG_871X("%s rf_tx_pwr_lmt alloc fail !\n", __func__);
+ }
+ }
+ } else {
+ if ((pHalData->rf_tx_pwr_lmt_len != 0) && (pHalData->rf_tx_pwr_lmt != NULL)) {
+ memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_lmt, pHalData->rf_tx_pwr_lmt_len);
+ rtStatus = _SUCCESS;
+ } else
+ DBG_871X("%s(): Critical Error !!!\n", __func__);
+ }
+
+ if (rtStatus == _SUCCESS) {
+ /* DBG_871X("%s(): read %s ok\n", __func__, pFileName); */
+ rtStatus = phy_ParsePowerLimitTableFile(Adapter, pHalData->para_file_buf);
+ } else
+ DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
+
+ return rtStatus;
+}
+
+void phy_free_filebuf(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->mac_reg)
+ vfree(pHalData->mac_reg);
+ if (pHalData->bb_phy_reg)
+ vfree(pHalData->bb_phy_reg);
+ if (pHalData->bb_agc_tab)
+ vfree(pHalData->bb_agc_tab);
+ if (pHalData->bb_phy_reg_pg)
+ vfree(pHalData->bb_phy_reg_pg);
+ if (pHalData->bb_phy_reg_mp)
+ vfree(pHalData->bb_phy_reg_mp);
+ if (pHalData->rf_radio_a)
+ vfree(pHalData->rf_radio_a);
+ if (pHalData->rf_radio_b)
+ vfree(pHalData->rf_radio_b);
+ if (pHalData->rf_tx_pwr_track)
+ vfree(pHalData->rf_tx_pwr_track);
+ if (pHalData->rf_tx_pwr_lmt)
+ vfree(pHalData->rf_tx_pwr_lmt);
+
+}
+
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
new file mode 100644
index 000000000000..3463f40d6a31
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -0,0 +1,474 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#define _HAL_INTF_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+
+void rtw_hal_chip_configure(struct adapter *padapter)
+{
+ if (padapter->HalFunc.intf_chip_configure)
+ padapter->HalFunc.intf_chip_configure(padapter);
+}
+
+void rtw_hal_read_chip_info(struct adapter *padapter)
+{
+ if (padapter->HalFunc.read_adapter_info)
+ padapter->HalFunc.read_adapter_info(padapter);
+}
+
+void rtw_hal_read_chip_version(struct adapter *padapter)
+{
+ if (padapter->HalFunc.read_chip_version)
+ padapter->HalFunc.read_chip_version(padapter);
+}
+
+void rtw_hal_def_value_init(struct adapter *padapter)
+{
+ if (is_primary_adapter(padapter))
+ if (padapter->HalFunc.init_default_value)
+ padapter->HalFunc.init_default_value(padapter);
+}
+
+void rtw_hal_free_data(struct adapter *padapter)
+{
+ /* free HAL Data */
+ rtw_hal_data_deinit(padapter);
+
+ if (is_primary_adapter(padapter))
+ if (padapter->HalFunc.free_hal_data)
+ padapter->HalFunc.free_hal_data(padapter);
+}
+
+void rtw_hal_dm_init(struct adapter *padapter)
+{
+ if (is_primary_adapter(padapter))
+ if (padapter->HalFunc.dm_init)
+ padapter->HalFunc.dm_init(padapter);
+}
+
+void rtw_hal_dm_deinit(struct adapter *padapter)
+{
+ /* cancel dm timer */
+ if (is_primary_adapter(padapter))
+ if (padapter->HalFunc.dm_deinit)
+ padapter->HalFunc.dm_deinit(padapter);
+}
+
+static void rtw_hal_init_opmode(struct adapter *padapter)
+{
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType = Ndis802_11InfrastructureMax;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ sint fw_state;
+
+ fw_state = get_fwstate(pmlmepriv);
+
+ if (fw_state & WIFI_ADHOC_STATE)
+ networkType = Ndis802_11IBSS;
+ else if (fw_state & WIFI_STATION_STATE)
+ networkType = Ndis802_11Infrastructure;
+ else if (fw_state & WIFI_AP_STATE)
+ networkType = Ndis802_11APMode;
+ else
+ return;
+
+ rtw_setopmode_cmd(padapter, networkType, false);
+}
+
+uint rtw_hal_init(struct adapter *padapter)
+{
+ uint status;
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+
+ status = padapter->HalFunc.hal_init(padapter);
+
+ if (status == _SUCCESS) {
+ rtw_hal_init_opmode(padapter);
+
+ dvobj->padapters->hw_init_completed = true;
+
+ if (padapter->registrypriv.notch_filter == 1)
+ rtw_hal_notch_filter(padapter, 1);
+
+ rtw_hal_reset_security_engine(padapter);
+
+ rtw_sec_restore_wep_key(dvobj->padapters);
+
+ init_hw_mlme_ext(padapter);
+
+ rtw_bb_rf_gain_offset(padapter);
+ } else {
+ dvobj->padapters->hw_init_completed = false;
+ DBG_871X("rtw_hal_init: hal__init fail\n");
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("-rtl871x_hal_init:status = 0x%x\n", status));
+
+ return status;
+
+}
+
+uint rtw_hal_deinit(struct adapter *padapter)
+{
+ uint status = _SUCCESS;
+ struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
+
+ status = padapter->HalFunc.hal_deinit(padapter);
+
+ if (status == _SUCCESS) {
+ padapter = dvobj->padapters;
+ padapter->hw_init_completed = false;
+ } else {
+ DBG_871X("\n rtw_hal_deinit: hal_init fail\n");
+ }
+ return status;
+}
+
+void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val)
+{
+ if (padapter->HalFunc.SetHwRegHandler)
+ padapter->HalFunc.SetHwRegHandler(padapter, variable, val);
+}
+
+void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val)
+{
+ if (padapter->HalFunc.GetHwRegHandler)
+ padapter->HalFunc.GetHwRegHandler(padapter, variable, val);
+}
+
+void rtw_hal_set_hwreg_with_buf(struct adapter *padapter, u8 variable, u8 *pbuf, int len)
+{
+ if (padapter->HalFunc.SetHwRegHandlerWithBuf)
+ padapter->HalFunc.SetHwRegHandlerWithBuf(padapter, variable, pbuf, len);
+}
+
+u8 rtw_hal_set_def_var(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue)
+{
+ if (padapter->HalFunc.SetHalDefVarHandler)
+ return padapter->HalFunc.SetHalDefVarHandler(padapter, eVariable, pValue);
+ return _FAIL;
+}
+
+u8 rtw_hal_get_def_var(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue)
+{
+ if (padapter->HalFunc.GetHalDefVarHandler)
+ return padapter->HalFunc.GetHalDefVarHandler(padapter, eVariable, pValue);
+ return _FAIL;
+}
+
+void rtw_hal_set_odm_var(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, bool bSet)
+{
+ if (padapter->HalFunc.SetHalODMVarHandler)
+ padapter->HalFunc.SetHalODMVarHandler(padapter, eVariable, pValue1, bSet);
+}
+
+void rtw_hal_get_odm_var(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, void *pValue2)
+{
+ if (padapter->HalFunc.GetHalODMVarHandler)
+ padapter->HalFunc.GetHalODMVarHandler(padapter, eVariable, pValue1, pValue2);
+}
+
+void rtw_hal_enable_interrupt(struct adapter *padapter)
+{
+ if (padapter->HalFunc.enable_interrupt)
+ padapter->HalFunc.enable_interrupt(padapter);
+ else
+ DBG_871X("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
+
+}
+
+void rtw_hal_disable_interrupt(struct adapter *padapter)
+{
+ if (padapter->HalFunc.disable_interrupt)
+ padapter->HalFunc.disable_interrupt(padapter);
+ else
+ DBG_871X("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
+
+}
+
+u8 rtw_hal_check_ips_status(struct adapter *padapter)
+{
+ u8 val = false;
+ if (padapter->HalFunc.check_ips_status)
+ val = padapter->HalFunc.check_ips_status(padapter);
+ else
+ DBG_871X("%s: HalFunc.check_ips_status is NULL!\n", __func__);
+
+ return val;
+}
+
+s32 rtw_hal_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (padapter->HalFunc.hal_xmitframe_enqueue)
+ return padapter->HalFunc.hal_xmitframe_enqueue(padapter, pxmitframe);
+
+ return false;
+}
+
+s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (padapter->HalFunc.hal_xmit)
+ return padapter->HalFunc.hal_xmit(padapter, pxmitframe);
+
+ return false;
+}
+
+/*
+ * [IMPORTANT] This function would be run in interrupt context.
+ */
+s32 rtw_hal_mgnt_xmit(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ update_mgntframe_attrib_addr(padapter, pmgntframe);
+ /* pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET; */
+ /* pwlanhdr = (struct rtw_ieee80211_hdr *)pframe; */
+ /* memcpy(pmgntframe->attrib.ra, pwlanhdr->addr1, ETH_ALEN); */
+
+ if (padapter->securitypriv.binstallBIPkey == true) {
+ if (IS_MCAST(pmgntframe->attrib.ra)) {
+ pmgntframe->attrib.encrypt = _BIP_;
+ /* pmgntframe->attrib.bswenc = true; */
+ } else {
+ pmgntframe->attrib.encrypt = _AES_;
+ pmgntframe->attrib.bswenc = true;
+ }
+ rtw_mgmt_xmitframe_coalesce(padapter, pmgntframe->pkt, pmgntframe);
+ }
+
+ if (padapter->HalFunc.mgnt_xmit)
+ ret = padapter->HalFunc.mgnt_xmit(padapter, pmgntframe);
+ return ret;
+}
+
+s32 rtw_hal_init_xmit_priv(struct adapter *padapter)
+{
+ if (padapter->HalFunc.init_xmit_priv != NULL)
+ return padapter->HalFunc.init_xmit_priv(padapter);
+ return _FAIL;
+}
+
+void rtw_hal_free_xmit_priv(struct adapter *padapter)
+{
+ if (padapter->HalFunc.free_xmit_priv != NULL)
+ padapter->HalFunc.free_xmit_priv(padapter);
+}
+
+s32 rtw_hal_init_recv_priv(struct adapter *padapter)
+{
+ if (padapter->HalFunc.init_recv_priv)
+ return padapter->HalFunc.init_recv_priv(padapter);
+
+ return _FAIL;
+}
+
+void rtw_hal_free_recv_priv(struct adapter *padapter)
+{
+
+ if (padapter->HalFunc.free_recv_priv)
+ padapter->HalFunc.free_recv_priv(padapter);
+}
+
+void rtw_hal_update_ra_mask(struct sta_info *psta, u8 rssi_level)
+{
+ struct adapter *padapter;
+ struct mlme_priv *pmlmepriv;
+
+ if (!psta)
+ return;
+
+ padapter = psta->padapter;
+
+ pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ add_RATid(padapter, psta, rssi_level);
+ else {
+ if (padapter->HalFunc.UpdateRAMaskHandler)
+ padapter->HalFunc.UpdateRAMaskHandler(padapter, psta->mac_id, rssi_level);
+ }
+}
+
+void rtw_hal_add_ra_tid(struct adapter *padapter, u32 bitmap, u8 *arg, u8 rssi_level)
+{
+ if (padapter->HalFunc.Add_RateATid)
+ padapter->HalFunc.Add_RateATid(padapter, bitmap, arg, rssi_level);
+}
+
+/*Start specifical interface thread */
+void rtw_hal_start_thread(struct adapter *padapter)
+{
+ if (padapter->HalFunc.run_thread)
+ padapter->HalFunc.run_thread(padapter);
+}
+/*Start specifical interface thread */
+void rtw_hal_stop_thread(struct adapter *padapter)
+{
+ if (padapter->HalFunc.cancel_thread)
+ padapter->HalFunc.cancel_thread(padapter);
+}
+
+u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask)
+{
+ u32 data = 0;
+ if (padapter->HalFunc.read_bbreg)
+ data = padapter->HalFunc.read_bbreg(padapter, RegAddr, BitMask);
+ return data;
+}
+void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ if (padapter->HalFunc.write_bbreg)
+ padapter->HalFunc.write_bbreg(padapter, RegAddr, BitMask, Data);
+}
+
+u32 rtw_hal_read_rfreg(struct adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask)
+{
+ u32 data = 0;
+ if (padapter->HalFunc.read_rfreg)
+ data = padapter->HalFunc.read_rfreg(padapter, eRFPath, RegAddr, BitMask);
+ return data;
+}
+void rtw_hal_write_rfreg(struct adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ if (padapter->HalFunc.write_rfreg)
+ padapter->HalFunc.write_rfreg(padapter, eRFPath, RegAddr, BitMask, Data);
+}
+
+void rtw_hal_set_chan(struct adapter *padapter, u8 channel)
+{
+ if (padapter->HalFunc.set_channel_handler)
+ padapter->HalFunc.set_channel_handler(padapter, channel);
+}
+
+void rtw_hal_set_chnl_bw(struct adapter *padapter, u8 channel,
+ enum CHANNEL_WIDTH Bandwidth, u8 Offset40, u8 Offset80)
+{
+ if (padapter->HalFunc.set_chnl_bw_handler)
+ padapter->HalFunc.set_chnl_bw_handler(padapter, channel,
+ Bandwidth, Offset40,
+ Offset80);
+}
+
+void rtw_hal_dm_watchdog(struct adapter *padapter)
+{
+ if (padapter->HalFunc.hal_dm_watchdog)
+ padapter->HalFunc.hal_dm_watchdog(padapter);
+
+}
+
+void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter)
+{
+ if (adapter_to_pwrctl(padapter)->bFwCurrentInPSMode == true) {
+ if (padapter->HalFunc.hal_dm_watchdog_in_lps)
+ padapter->HalFunc.hal_dm_watchdog_in_lps(padapter); /* this fuction caller is in interrupt context */
+ }
+}
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *padapter)
+{
+ if (padapter->HalFunc.SetBeaconRelatedRegistersHandler)
+ padapter->HalFunc.SetBeaconRelatedRegistersHandler(padapter);
+}
+
+
+s32 rtw_hal_xmit_thread_handler(struct adapter *padapter)
+{
+ if (padapter->HalFunc.xmit_thread_handler)
+ return padapter->HalFunc.xmit_thread_handler(padapter);
+ return _FAIL;
+}
+
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
+{
+ if (adapter->HalFunc.hal_notch_filter)
+ adapter->HalFunc.hal_notch_filter(adapter, enable);
+}
+
+void rtw_hal_reset_security_engine(struct adapter *adapter)
+{
+ if (adapter->HalFunc.hal_reset_security_engine)
+ adapter->HalFunc.hal_reset_security_engine(adapter);
+}
+
+bool rtw_hal_c2h_valid(struct adapter *adapter, u8 *buf)
+{
+ return c2h_evt_valid((struct c2h_evt_hdr_88xx *)buf);
+}
+
+s32 rtw_hal_c2h_evt_read(struct adapter *adapter, u8 *buf)
+{
+ return c2h_evt_read_88xx(adapter, buf);
+}
+
+s32 rtw_hal_c2h_handler(struct adapter *adapter, u8 *c2h_evt)
+{
+ s32 ret = _FAIL;
+ if (adapter->HalFunc.c2h_handler)
+ ret = adapter->HalFunc.c2h_handler(adapter, c2h_evt);
+ return ret;
+}
+
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
+{
+ return adapter->HalFunc.c2h_id_filter_ccx;
+}
+
+s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter)
+{
+ return GET_HAL_DATA(padapter)->bDisableSWChannelPlan;
+}
+
+s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid)
+{
+ u8 support;
+
+
+ support = false;
+ rtw_hal_get_def_var(padapter, HAL_DEF_MACID_SLEEP, &support);
+ if (false == support)
+ return _FAIL;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MACID_SLEEP, (u8 *)&macid);
+
+ return _SUCCESS;
+}
+
+s32 rtw_hal_macid_wakeup(struct adapter *padapter, u32 macid)
+{
+ u8 support;
+
+
+ support = false;
+ rtw_hal_get_def_var(padapter, HAL_DEF_MACID_SLEEP, &support);
+ if (false == support)
+ return _FAIL;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MACID_WAKEUP, (u8 *)&macid);
+
+ return _SUCCESS;
+}
+
+s32 rtw_hal_fill_h2c_cmd(struct adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
+{
+ s32 ret = _FAIL;
+
+ if (padapter->HalFunc.fill_h2c_cmd)
+ ret = padapter->HalFunc.fill_h2c_cmd(padapter, ElementID, CmdLen, pCmdBuffer);
+ else
+ DBG_871X("%s: func[fill_h2c_cmd] not defined!\n", __func__);
+
+ return ret;
+}
diff --git a/drivers/staging/rtl8723bs/hal/hal_phy.c b/drivers/staging/rtl8723bs/hal/hal_phy.c
new file mode 100644
index 000000000000..c0a899df7aaf
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_phy.c
@@ -0,0 +1,224 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_PHY_C_
+
+#include <drv_types.h>
+
+/**
+* Function: PHY_CalculateBitShift
+*
+* OverView: Get shifted position of the BitMask
+*
+* Input:
+* u32 BitMask,
+*
+* Output: none
+* Return: u32 Return the shift bit bit position of the mask
+*/
+u32 PHY_CalculateBitShift(u32 BitMask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((BitMask>>i) & 0x1) == 1)
+ break;
+ }
+
+ return i;
+}
+
+
+/* */
+/* ==> RF shadow Operation API Code Section!!! */
+/* */
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RFShadowRead
+ * PHY_RFShadowWrite
+ * PHY_RFShadowCompare
+ * PHY_RFShadowRecorver
+ * PHY_RFShadowCompareAll
+ * PHY_RFShadowRecorverAll
+ * PHY_RFShadowCompareFlagSet
+ * PHY_RFShadowRecorverFlagSet
+ *
+ * Overview: When we set RF register, we must write shadow at first.
+ * When we are running, we must compare shadow abd locate error addr.
+ * Decide to recorver or not.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/20/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+u32 PHY_RFShadowRead(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
+{
+ return RF_Shadow[eRFPath][Offset].Value;
+
+} /* PHY_RFShadowRead */
+
+
+void PHY_RFShadowWrite(
+ IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u32 Data
+)
+{
+ RF_Shadow[eRFPath][Offset].Value = (Data & bRFRegOffsetMask);
+ RF_Shadow[eRFPath][Offset].Driver_Write = true;
+
+} /* PHY_RFShadowWrite */
+
+
+bool PHY_RFShadowCompare(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
+{
+ u32 reg;
+ /* Check if we need to check the register */
+ if (RF_Shadow[eRFPath][Offset].Compare == true) {
+ reg = rtw_hal_read_rfreg(Adapter, eRFPath, Offset, bRFRegOffsetMask);
+ /* Compare shadow and real rf register for 20bits!! */
+ if (RF_Shadow[eRFPath][Offset].Value != reg) {
+ /* Locate error position. */
+ RF_Shadow[eRFPath][Offset].ErrorOrNot = true;
+ /* RT_TRACE(COMP_INIT, DBG_LOUD, */
+ /* PHY_RFShadowCompare RF-%d Addr%02lx Err = %05lx\n", */
+ /* eRFPath, Offset, reg)); */
+ }
+ return RF_Shadow[eRFPath][Offset].ErrorOrNot;
+ }
+ return false;
+} /* PHY_RFShadowCompare */
+
+
+void PHY_RFShadowRecorver(IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset)
+{
+ /* Check if the address is error */
+ if (RF_Shadow[eRFPath][Offset].ErrorOrNot == true) {
+ /* Check if we need to recorver the register. */
+ if (RF_Shadow[eRFPath][Offset].Recorver == true) {
+ rtw_hal_write_rfreg(Adapter, eRFPath, Offset, bRFRegOffsetMask,
+ RF_Shadow[eRFPath][Offset].Value);
+ /* RT_TRACE(COMP_INIT, DBG_LOUD, */
+ /* PHY_RFShadowRecorver RF-%d Addr%02lx=%05lx", */
+ /* eRFPath, Offset, RF_Shadow[eRFPath][Offset].Value)); */
+ }
+ }
+
+} /* PHY_RFShadowRecorver */
+
+
+void PHY_RFShadowCompareAll(IN PADAPTER Adapter)
+{
+ u8 eRFPath = 0;
+ u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
+
+ for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
+ for (Offset = 0; Offset < maxReg; Offset++) {
+ PHY_RFShadowCompare(Adapter, eRFPath, Offset);
+ }
+ }
+
+} /* PHY_RFShadowCompareAll */
+
+
+void PHY_RFShadowRecorverAll(IN PADAPTER Adapter)
+{
+ u8 eRFPath = 0;
+ u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
+
+ for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
+ for (Offset = 0; Offset < maxReg; Offset++) {
+ PHY_RFShadowRecorver(Adapter, eRFPath, Offset);
+ }
+ }
+
+} /* PHY_RFShadowRecorverAll */
+
+
+void
+PHY_RFShadowCompareFlagSet(
+ IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u8 Type
+)
+{
+ /* Set True or False!!! */
+ RF_Shadow[eRFPath][Offset].Compare = Type;
+
+} /* PHY_RFShadowCompareFlagSet */
+
+
+void PHY_RFShadowRecorverFlagSet(
+ IN PADAPTER Adapter, IN u8 eRFPath, IN u32 Offset, IN u8 Type
+)
+{
+ /* Set True or False!!! */
+ RF_Shadow[eRFPath][Offset].Recorver = Type;
+
+} /* PHY_RFShadowRecorverFlagSet */
+
+
+void PHY_RFShadowCompareFlagSetAll(IN PADAPTER Adapter)
+{
+ u8 eRFPath = 0;
+ u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
+
+ for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
+ for (Offset = 0; Offset < maxReg; Offset++) {
+ /* 2008/11/20 MH For S3S4 test, we only check reg 26/27 now!!!! */
+ if (Offset != 0x26 && Offset != 0x27)
+ PHY_RFShadowCompareFlagSet(Adapter, eRFPath, Offset, false);
+ else
+ PHY_RFShadowCompareFlagSet(Adapter, eRFPath, Offset, true);
+ }
+ }
+
+} /* PHY_RFShadowCompareFlagSetAll */
+
+
+void PHY_RFShadowRecorverFlagSetAll(IN PADAPTER Adapter)
+{
+ u8 eRFPath = 0;
+ u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
+
+ for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
+ for (Offset = 0; Offset < maxReg; Offset++) {
+ /* 2008/11/20 MH For S3S4 test, we only check reg 26/27 now!!!! */
+ if (Offset != 0x26 && Offset != 0x27)
+ PHY_RFShadowRecorverFlagSet(Adapter, eRFPath, Offset, false);
+ else
+ PHY_RFShadowRecorverFlagSet(Adapter, eRFPath, Offset, true);
+ }
+ }
+
+} /* PHY_RFShadowCompareFlagSetAll */
+
+void PHY_RFShadowRefresh(IN PADAPTER Adapter)
+{
+ u8 eRFPath = 0;
+ u32 Offset = 0, maxReg = GET_RF6052_REAL_MAX_REG(Adapter);
+
+ for (eRFPath = 0; eRFPath < RF6052_MAX_PATH; eRFPath++) {
+ for (Offset = 0; Offset < maxReg; Offset++) {
+ RF_Shadow[eRFPath][Offset].Value = 0;
+ RF_Shadow[eRFPath][Offset].Compare = false;
+ RF_Shadow[eRFPath][Offset].Recorver = false;
+ RF_Shadow[eRFPath][Offset].ErrorOrNot = false;
+ RF_Shadow[eRFPath][Offset].Driver_Write = false;
+ }
+ }
+
+} /* PHY_RFShadowRead */
diff --git a/drivers/staging/rtl8723bs/hal/hal_sdio.c b/drivers/staging/rtl8723bs/hal/hal_sdio.c
new file mode 100644
index 000000000000..e147c69e7222
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/hal_sdio.c
@@ -0,0 +1,115 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_SDIO_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+
+u8 rtw_hal_sdio_max_txoqt_free_space(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ if (pHalData->SdioTxOQTMaxFreeSpace < 8)
+ pHalData->SdioTxOQTMaxFreeSpace = 8;
+
+ return pHalData->SdioTxOQTMaxFreeSpace;
+}
+
+u8 rtw_hal_sdio_query_tx_freepage(
+ struct adapter *padapter, u8 PageIdx, u8 RequiredPageNum
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ if ((pHalData->SdioTxFIFOFreePage[PageIdx]+pHalData->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]) >= (RequiredPageNum))
+ return true;
+ else
+ return false;
+}
+
+void rtw_hal_sdio_update_tx_freepage(
+ struct adapter *padapter, u8 PageIdx, u8 RequiredPageNum
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 DedicatedPgNum = 0;
+ u8 RequiredPublicFreePgNum = 0;
+ /* _irqL irql; */
+
+ /* spin_lock_bh(&pHalData->SdioTxFIFOFreePageLock); */
+
+ DedicatedPgNum = pHalData->SdioTxFIFOFreePage[PageIdx];
+ if (RequiredPageNum <= DedicatedPgNum) {
+ pHalData->SdioTxFIFOFreePage[PageIdx] -= RequiredPageNum;
+ } else {
+ pHalData->SdioTxFIFOFreePage[PageIdx] = 0;
+ RequiredPublicFreePgNum = RequiredPageNum - DedicatedPgNum;
+ pHalData->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX] -= RequiredPublicFreePgNum;
+ }
+
+ /* spin_unlock_bh(&pHalData->SdioTxFIFOFreePageLock); */
+}
+
+void rtw_hal_set_sdio_tx_max_length(
+ struct adapter *padapter, u8 numHQ, u8 numNQ, u8 numLQ, u8 numPubQ
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u32 page_size;
+ u32 lenHQ, lenNQ, lenLQ;
+
+ rtw_hal_get_def_var(padapter, HAL_DEF_TX_PAGE_SIZE, &page_size);
+
+ lenHQ = ((numHQ + numPubQ) >> 1) * page_size;
+ lenNQ = ((numNQ + numPubQ) >> 1) * page_size;
+ lenLQ = ((numLQ + numPubQ) >> 1) * page_size;
+
+ pHalData->sdio_tx_max_len[HI_QUEUE_IDX] =
+ (lenHQ > MAX_XMITBUF_SZ) ? MAX_XMITBUF_SZ : lenHQ;
+ pHalData->sdio_tx_max_len[MID_QUEUE_IDX] =
+ (lenNQ > MAX_XMITBUF_SZ) ? MAX_XMITBUF_SZ : lenNQ;
+ pHalData->sdio_tx_max_len[LOW_QUEUE_IDX] =
+ (lenLQ > MAX_XMITBUF_SZ) ? MAX_XMITBUF_SZ : lenLQ;
+}
+
+u32 rtw_hal_get_sdio_tx_max_length(struct adapter *padapter, u8 queue_idx)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u32 deviceId, max_len;
+
+
+ deviceId = ffaddr2deviceId(pdvobjpriv, queue_idx);
+ switch (deviceId) {
+ case WLAN_TX_HIQ_DEVICE_ID:
+ max_len = pHalData->sdio_tx_max_len[HI_QUEUE_IDX];
+ break;
+
+ case WLAN_TX_MIQ_DEVICE_ID:
+ max_len = pHalData->sdio_tx_max_len[MID_QUEUE_IDX];
+ break;
+
+ case WLAN_TX_LOQ_DEVICE_ID:
+ max_len = pHalData->sdio_tx_max_len[LOW_QUEUE_IDX];
+ break;
+
+ default:
+ max_len = pHalData->sdio_tx_max_len[MID_QUEUE_IDX];
+ break;
+ }
+
+ return max_len;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
new file mode 100644
index 000000000000..2dbf19971e04
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -0,0 +1,1446 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+static const u16 dB_Invert_Table[8][12] = {
+ {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
+ {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
+ {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
+ {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
+ {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
+ {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
+ {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125,
+ 15849},
+ {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119,
+ 56234, 65535}
+ };
+
+/* Global var */
+
+u32 OFDMSwingTable[OFDM_TABLE_SIZE] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+};
+
+u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB <== default */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB <== default */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+u32 OFDMSwingTable_New[OFDM_TABLE_SIZE] = {
+ 0x0b40002d, /* 0, -15.0dB */
+ 0x0c000030, /* 1, -14.5dB */
+ 0x0cc00033, /* 2, -14.0dB */
+ 0x0d800036, /* 3, -13.5dB */
+ 0x0e400039, /* 4, -13.0dB */
+ 0x0f00003c, /* 5, -12.5dB */
+ 0x10000040, /* 6, -12.0dB */
+ 0x11000044, /* 7, -11.5dB */
+ 0x12000048, /* 8, -11.0dB */
+ 0x1300004c, /* 9, -10.5dB */
+ 0x14400051, /* 10, -10.0dB */
+ 0x15800056, /* 11, -9.5dB */
+ 0x16c0005b, /* 12, -9.0dB */
+ 0x18000060, /* 13, -8.5dB */
+ 0x19800066, /* 14, -8.0dB */
+ 0x1b00006c, /* 15, -7.5dB */
+ 0x1c800072, /* 16, -7.0dB */
+ 0x1e400079, /* 17, -6.5dB */
+ 0x20000080, /* 18, -6.0dB */
+ 0x22000088, /* 19, -5.5dB */
+ 0x24000090, /* 20, -5.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x288000a2, /* 22, -4.0dB */
+ 0x2ac000ab, /* 23, -3.5dB */
+ 0x2d4000b5, /* 24, -3.0dB */
+ 0x300000c0, /* 25, -2.5dB */
+ 0x32c000cb, /* 26, -2.0dB */
+ 0x35c000d7, /* 27, -1.5dB */
+ 0x390000e4, /* 28, -1.0dB */
+ 0x3c8000f2, /* 29, -0.5dB */
+ 0x40000100, /* 30, +0dB */
+ 0x43c0010f, /* 31, +0.5dB */
+ 0x47c0011f, /* 32, +1.0dB */
+ 0x4c000130, /* 33, +1.5dB */
+ 0x50800142, /* 34, +2.0dB */
+ 0x55400155, /* 35, +2.5dB */
+ 0x5a400169, /* 36, +3.0dB */
+ 0x5fc0017f, /* 37, +3.5dB */
+ 0x65400195, /* 38, +4.0dB */
+ 0x6b8001ae, /* 39, +4.5dB */
+ 0x71c001c7, /* 40, +5.0dB */
+ 0x788001e2, /* 41, +5.5dB */
+ 0x7f8001fe /* 42, +6.0dB */
+};
+
+u8 CCKSwingTable_Ch1_Ch13_New[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */
+};
+
+u8 CCKSwingTable_Ch14_New[CCK_TABLE_SIZE][8] = {
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */
+};
+
+u32 TxScalingTable_Jaguar[TXSCALE_TABLE_SIZE] = {
+ 0x081, /* 0, -12.0dB */
+ 0x088, /* 1, -11.5dB */
+ 0x090, /* 2, -11.0dB */
+ 0x099, /* 3, -10.5dB */
+ 0x0A2, /* 4, -10.0dB */
+ 0x0AC, /* 5, -9.5dB */
+ 0x0B6, /* 6, -9.0dB */
+ 0x0C0, /* 7, -8.5dB */
+ 0x0CC, /* 8, -8.0dB */
+ 0x0D8, /* 9, -7.5dB */
+ 0x0E5, /* 10, -7.0dB */
+ 0x0F2, /* 11, -6.5dB */
+ 0x101, /* 12, -6.0dB */
+ 0x110, /* 13, -5.5dB */
+ 0x120, /* 14, -5.0dB */
+ 0x131, /* 15, -4.5dB */
+ 0x143, /* 16, -4.0dB */
+ 0x156, /* 17, -3.5dB */
+ 0x16A, /* 18, -3.0dB */
+ 0x180, /* 19, -2.5dB */
+ 0x197, /* 20, -2.0dB */
+ 0x1AF, /* 21, -1.5dB */
+ 0x1C8, /* 22, -1.0dB */
+ 0x1E3, /* 23, -0.5dB */
+ 0x200, /* 24, +0 dB */
+ 0x21E, /* 25, +0.5dB */
+ 0x23E, /* 26, +1.0dB */
+ 0x261, /* 27, +1.5dB */
+ 0x285, /* 28, +2.0dB */
+ 0x2AB, /* 29, +2.5dB */
+ 0x2D3, /* 30, +3.0dB */
+ 0x2FE, /* 31, +3.5dB */
+ 0x32B, /* 32, +4.0dB */
+ 0x35C, /* 33, +4.5dB */
+ 0x38E, /* 34, +5.0dB */
+ 0x3C4, /* 35, +5.5dB */
+ 0x3FE /* 36, +6.0dB */
+};
+
+/* Local Function predefine. */
+
+/* START------------COMMON INFO RELATED--------------- */
+void odm_CommonInfoSelfInit(PDM_ODM_T pDM_Odm);
+
+void odm_CommonInfoSelfUpdate(PDM_ODM_T pDM_Odm);
+
+void odm_CmnInfoInit_Debug(PDM_ODM_T pDM_Odm);
+
+void odm_BasicDbgMessage(PDM_ODM_T pDM_Odm);
+
+/* END------------COMMON INFO RELATED--------------- */
+
+/* START---------------DIG--------------------------- */
+
+/* Remove by Yuchen */
+
+/* END---------------DIG--------------------------- */
+
+/* START-------BB POWER SAVE----------------------- */
+/* Remove BB power Saving by YuChen */
+/* END---------BB POWER SAVE----------------------- */
+
+void odm_RefreshRateAdaptiveMaskCE(PDM_ODM_T pDM_Odm);
+
+/* Remove by YuChen */
+
+void odm_RSSIMonitorInit(PDM_ODM_T pDM_Odm);
+
+void odm_RSSIMonitorCheckCE(PDM_ODM_T pDM_Odm);
+
+void odm_RSSIMonitorCheck(PDM_ODM_T pDM_Odm);
+
+void odm_SwAntDetectInit(PDM_ODM_T pDM_Odm);
+
+void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
+
+
+
+void odm_GlobalAdapterCheck(void);
+
+void odm_RefreshRateAdaptiveMask(PDM_ODM_T pDM_Odm);
+
+void ODM_TXPowerTrackingCheck(PDM_ODM_T pDM_Odm);
+
+void odm_RateAdaptiveMaskInit(PDM_ODM_T pDM_Odm);
+
+void odm_TXPowerTrackingThermalMeterInit(PDM_ODM_T pDM_Odm);
+
+
+void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm);
+
+void odm_TXPowerTrackingCheckCE(PDM_ODM_T pDM_Odm);
+
+/* Remove Edca by Yu Chen */
+
+
+#define RxDefaultAnt1 0x65a9
+#define RxDefaultAnt2 0x569a
+
+void odm_InitHybridAntDiv(PDM_ODM_T pDM_Odm);
+
+bool odm_StaDefAntSel(
+ PDM_ODM_T pDM_Odm,
+ u32 OFDM_Ant1_Cnt,
+ u32 OFDM_Ant2_Cnt,
+ u32 CCK_Ant1_Cnt,
+ u32 CCK_Ant2_Cnt,
+ u8 *pDefAnt
+);
+
+void odm_SetRxIdleAnt(PDM_ODM_T pDM_Odm, u8 Ant, bool bDualPath);
+
+
+
+void odm_HwAntDiv(PDM_ODM_T pDM_Odm);
+
+
+/* */
+/* 3 Export Interface */
+/* */
+
+/* */
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+/* */
+void ODM_DMInit(PDM_ODM_T pDM_Odm)
+{
+
+ odm_CommonInfoSelfInit(pDM_Odm);
+ odm_CmnInfoInit_Debug(pDM_Odm);
+ odm_DIGInit(pDM_Odm);
+ odm_NHMCounterStatisticsInit(pDM_Odm);
+ odm_AdaptivityInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit(pDM_Odm);
+ ODM_CfoTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ odm_RSSIMonitorInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+
+ ODM_ClearTxPowerTrackingState(pDM_Odm);
+
+ if (*(pDM_Odm->mp_mode) != 1)
+ odm_PathDiversityInit(pDM_Odm);
+
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_DynamicTxPowerInit(pDM_Odm);
+
+ odm_SwAntDetectInit(pDM_Odm);
+}
+
+/* */
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+/* */
+void ODM_DMWatchdog(PDM_ODM_T pDM_Odm)
+{
+ odm_CommonInfoSelfUpdate(pDM_Odm);
+ odm_BasicDbgMessage(pDM_Odm);
+ odm_FalseAlarmCounterStatistics(pDM_Odm);
+ odm_NHMCounterStatistics(pDM_Odm);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): RSSI = 0x%x\n", pDM_Odm->RSSI_Min));
+
+ odm_RSSIMonitorCheck(pDM_Odm);
+
+ /* For CE Platform(SPRD or Tablet) */
+ /* 8723A or 8189ES platform */
+ /* NeilChen--2012--08--24-- */
+ /* Fix Leave LPS issue */
+ if ((adapter_to_pwrctl(pDM_Odm->Adapter)->pwr_mode != PS_MODE_ACTIVE) /* in LPS mode */
+ /* */
+ /* (pDM_Odm->SupportICType & (ODM_RTL8723A))|| */
+ /* (pDM_Odm->SupportICType & (ODM_RTL8188E) &&(&&(((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))) */
+ /* */
+ ) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
+ odm_DIGbyRSSI_LPS(pDM_Odm);
+ } else
+ odm_DIG(pDM_Odm);
+
+ {
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ odm_Adaptivity(pDM_Odm, pDM_DigTable->CurIGValue);
+ }
+ odm_CCKPacketDetectionThresh(pDM_Odm);
+
+ if (*(pDM_Odm->pbPowerSaving) == true)
+ return;
+
+
+ odm_RefreshRateAdaptiveMask(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+ odm_PathDiversity(pDM_Odm);
+ ODM_CfoTracking(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+
+ /* odm_EdcaTurboCheck(pDM_Odm); */
+
+ /* 2010.05.30 LukeLee: For CE platform, files in IC subfolders may not be included to be compiled, */
+ /* so compile flags must be left here to prevent from compile errors */
+ pDM_Odm->PhyDbgInfo.NumQryBeaconPkt = 0;
+}
+
+
+/* */
+/* Init /.. Fixed HW value. Only init time. */
+/* */
+void ODM_CmnInfoInit(PDM_ODM_T pDM_Odm, ODM_CMNINFO_E CmnInfo, u32 Value)
+{
+ /* */
+ /* This section is used for init value */
+ /* */
+ switch (CmnInfo) {
+ /* */
+ /* Fixed ODM value. */
+ /* */
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_PLATFORM:
+ pDM_Odm->SupportPlatform = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_INTERFACE:
+ pDM_Odm->SupportInterface = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_IC_TYPE:
+ pDM_Odm->SupportICType = Value;
+ break;
+
+ case ODM_CMNINFO_CUT_VER:
+ pDM_Odm->CutVersion = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_FAB_VER:
+ pDM_Odm->FabVersion = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_RFE_TYPE:
+ pDM_Odm->RFEType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_BOARD_TYPE:
+ pDM_Odm->BoardType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_PACKAGE_TYPE:
+ pDM_Odm->PackageType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_EXT_LNA:
+ pDM_Odm->ExtLNA = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_5G_EXT_LNA:
+ pDM_Odm->ExtLNA5G = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_EXT_PA:
+ pDM_Odm->ExtPA = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_5G_EXT_PA:
+ pDM_Odm->ExtPA5G = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_GPA:
+ pDM_Odm->TypeGPA = (ODM_TYPE_GPA_E)Value;
+ break;
+ case ODM_CMNINFO_APA:
+ pDM_Odm->TypeAPA = (ODM_TYPE_APA_E)Value;
+ break;
+ case ODM_CMNINFO_GLNA:
+ pDM_Odm->TypeGLNA = (ODM_TYPE_GLNA_E)Value;
+ break;
+ case ODM_CMNINFO_ALNA:
+ pDM_Odm->TypeALNA = (ODM_TYPE_ALNA_E)Value;
+ break;
+
+ case ODM_CMNINFO_EXT_TRSW:
+ pDM_Odm->ExtTRSW = (u8)Value;
+ break;
+ case ODM_CMNINFO_PATCH_ID:
+ pDM_Odm->PatchID = (u8)Value;
+ break;
+ case ODM_CMNINFO_BINHCT_TEST:
+ pDM_Odm->bInHctTest = (bool)Value;
+ break;
+ case ODM_CMNINFO_BWIFI_TEST:
+ pDM_Odm->bWIFITest = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_SMART_CONCURRENT:
+ pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
+ break;
+
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+}
+
+
+void ODM_CmnInfoHook(PDM_ODM_T pDM_Odm, ODM_CMNINFO_E CmnInfo, void *pValue)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* */
+ /* Dynamic call by reference pointer. */
+ /* */
+ case ODM_CMNINFO_MAC_PHY_MODE:
+ pDM_Odm->pMacPhyMode = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_TX_UNI:
+ pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
+ break;
+
+ case ODM_CMNINFO_RX_UNI:
+ pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
+ break;
+
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pwirelessmode = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_BAND:
+ pDM_Odm->pBandType = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_SEC_MODE:
+ pDM_Odm->pSecurity = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_DMSP_GET_VALUE:
+ pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_BUDDY_ADAPTOR:
+ pDM_Odm->pBuddyAdapter = (struct adapter **)pValue;
+ break;
+
+ case ODM_CMNINFO_DMSP_IS_MASTER:
+ pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_ONE_PATH_CCA:
+ pDM_Odm->pOnePathCCA = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_DRV_STOP:
+ pDM_Odm->pbDriverStopped = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_PNP_IN:
+ pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_INIT_ON:
+ pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_ANT_TEST:
+ pDM_Odm->pAntennaTest = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_NET_CLOSED:
+ pDM_Odm->pbNet_closed = (bool *)pValue;
+ break;
+
+ case ODM_CMNINFO_FORCED_RATE:
+ pDM_Odm->pForcedDataRate = (u16 *)pValue;
+ break;
+
+ case ODM_CMNINFO_FORCED_IGI_LB:
+ pDM_Odm->pu1ForcedIgiLb = (u8 *)pValue;
+ break;
+
+ case ODM_CMNINFO_MP_MODE:
+ pDM_Odm->mp_mode = (u8 *)pValue;
+ break;
+
+ /* case ODM_CMNINFO_RTSTA_AID: */
+ /* pDM_Odm->pAidMap = (u8 *)pValue; */
+ /* break; */
+
+ /* case ODM_CMNINFO_BT_COEXIST: */
+ /* pDM_Odm->BTCoexist = (bool *)pValue; */
+
+ /* case ODM_CMNINFO_STA_STATUS: */
+ /* pDM_Odm->pODM_StaInfo[] = (PSTA_INFO_T)pValue; */
+ /* break; */
+
+ /* case ODM_CMNINFO_PHY_STATUS: */
+ /* pDM_Odm->pPhyInfo = (ODM_PHY_INFO *)pValue; */
+ /* break; */
+
+ /* case ODM_CMNINFO_MAC_STATUS: */
+ /* pDM_Odm->pMacInfo = (ODM_MAC_INFO *)pValue; */
+ /* break; */
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+}
+
+
+void ODM_CmnInfoPtrArrayHook(
+ PDM_ODM_T pDM_Odm,
+ ODM_CMNINFO_E CmnInfo,
+ u16 Index,
+ void *pValue
+)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* */
+ /* Dynamic call by reference pointer. */
+ /* */
+ case ODM_CMNINFO_STA_STATUS:
+ pDM_Odm->pODM_StaInfo[Index] = (PSTA_INFO_T)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+}
+
+
+/* */
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+/* */
+void ODM_CmnInfoUpdate(PDM_ODM_T pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* */
+ /* This init variable may be changed in run time. */
+ /* */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_LINK_IN_PROGRESS:
+ pDM_Odm->bLinkInProcess = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_WIFI_DIRECT:
+ pDM_Odm->bWIFI_Direct = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_WIFI_DISPLAY:
+ pDM_Odm->bWIFI_Display = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_STATION_STATE:
+ pDM_Odm->bsta_state = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_DBG_COMP:
+ pDM_Odm->DebugComponents = Value;
+ break;
+
+ case ODM_CMNINFO_DBG_LEVEL:
+ pDM_Odm->DebugLevel = (u32)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_HIGH:
+ pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_RA_THRESHOLD_LOW:
+ pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
+ break;
+ /* The following is for BT HS mode and BT coexist mechanism. */
+ case ODM_CMNINFO_BT_ENABLED:
+ pDM_Odm->bBtEnabled = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_BT_HS_CONNECT_PROCESS:
+ pDM_Odm->bBtConnectProcess = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_BT_HS_RSSI:
+ pDM_Odm->btHsRssi = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_BT_OPERATION:
+ pDM_Odm->bBtHsOperation = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_BT_LIMITED_DIG:
+ pDM_Odm->bBtLimitedDig = (bool)Value;
+ break;
+
+ case ODM_CMNINFO_BT_DISABLE_EDCA:
+ pDM_Odm->bBtDisableEdcaTurbo = (bool)Value;
+ break;
+
+/*
+ case ODM_CMNINFO_OP_MODE:
+ pDM_Odm->OPMode = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->WirelessMode = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_BAND:
+ pDM_Odm->BandType = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->SecChOffset = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_SEC_MODE:
+ pDM_Odm->Security = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_BW:
+ pDM_Odm->BandWidth = (u8)Value;
+ break;
+
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->Channel = (u8)Value;
+ break;
+*/
+ default:
+ /* do nothing */
+ break;
+ }
+
+
+}
+
+void odm_CommonInfoSelfInit(PDM_ODM_T pDM_Odm)
+{
+ pDM_Odm->bCckHighPower = (bool) PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG(CCK_RPT_FORMAT, pDM_Odm), ODM_BIT(CCK_RPT_FORMAT, pDM_Odm));
+ pDM_Odm->RFPathRxEnable = (u8) PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG(BB_RX_PATH, pDM_Odm), ODM_BIT(BB_RX_PATH, pDM_Odm));
+
+ ODM_InitDebugSetting(pDM_Odm);
+
+ pDM_Odm->TxRate = 0xFF;
+}
+
+void odm_CommonInfoSelfUpdate(PDM_ODM_T pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ PSTA_INFO_T pEntry;
+
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
+ if (*(pDM_Odm->pSecChOffset) == 1)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel)-2;
+ else if (*(pDM_Odm->pSecChOffset) == 2)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel)+2;
+ } else
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+void odm_CmnInfoInit_Debug(PDM_ODM_T pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug ==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform =%d\n", pDM_Odm->SupportPlatform));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility = 0x%x\n", pDM_Odm->SupportAbility));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface =%d\n", pDM_Odm->SupportInterface));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType = 0x%x\n", pDM_Odm->SupportICType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion =%d\n", pDM_Odm->CutVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion =%d\n", pDM_Odm->FabVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType =%d\n", pDM_Odm->RFType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType =%d\n", pDM_Odm->BoardType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA =%d\n", pDM_Odm->ExtLNA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA =%d\n", pDM_Odm->ExtPA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW =%d\n", pDM_Odm->ExtTRSW));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID =%d\n", pDM_Odm->PatchID));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest =%d\n", pDM_Odm->bInHctTest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest =%d\n", pDM_Odm->bWIFITest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent =%d\n", pDM_Odm->bDualMacSmartConcurrent));
+
+}
+
+void odm_BasicDbgMessage(PDM_ODM_T pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_BasicDbgMsg ==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked = %d, RSSI_Min = %d,\n",
+ pDM_Odm->bLinked, pDM_Odm->RSSI_Min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RxRate = 0x%x, RSSI_A = %d, RSSI_B = %d\n",
+ pDM_Odm->RxRate, pDM_Odm->RSSI_A, pDM_Odm->RSSI_B));
+}
+
+/* 3 ============================================================ */
+/* 3 DIG */
+/* 3 ============================================================ */
+/*-----------------------------------------------------------------------------
+ * Function: odm_DIGInit()
+ *
+ * Overview: Set DIG scheme init value.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ *When Who Remark
+ *
+ *---------------------------------------------------------------------------
+ */
+
+/* Remove DIG by yuchen */
+
+/* Remove DIG and FA check by Yu Chen */
+
+
+/* 3 ============================================================ */
+/* 3 BB Power Save */
+/* 3 ============================================================ */
+
+/* Remove BB power saving by Yuchen */
+
+/* 3 ============================================================ */
+/* 3 RATR MASK */
+/* 3 ============================================================ */
+/* 3 ============================================================ */
+/* 3 Rate Adaptive */
+/* 3 ============================================================ */
+
+void odm_RateAdaptiveMaskInit(PDM_ODM_T pDM_Odm)
+{
+ PODM_RATE_ADAPTIVE pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->Type = DM_Type_ByDriver;
+ if (pOdmRA->Type == DM_Type_ByDriver)
+ pDM_Odm->bUseRAMask = true;
+ else
+ pDM_Odm->bUseRAMask = false;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->LdpcThres = 35;
+ pOdmRA->bUseLdpc = false;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+u32 ODM_Get_Rate_Bitmap(
+ PDM_ODM_T pDM_Odm,
+ u32 macid,
+ u32 ra_mask,
+ u8 rssi_level
+)
+{
+ PSTA_INFO_T pEntry;
+ u32 rate_bitmap = 0;
+ u8 WirelessMode;
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+
+ case (ODM_WM_G):
+ case (ODM_WM_A):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else
+ rate_bitmap = 0x00000ff0;
+ break;
+
+ case (ODM_WM_B|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+
+ case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_B|ODM_WM_N24G):
+ case (ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_A|ODM_WM_N5G):
+ if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x000f0000;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x000ff000;
+ else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ } else {
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x0f8f0000;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x0f8ff000;
+ else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x0f8ff015;
+ else
+ rate_bitmap = 0x0f8ff005;
+ }
+ }
+ break;
+
+ case (ODM_WM_AC|ODM_WM_G):
+ if (rssi_level == 1)
+ rate_bitmap = 0xfc3f0000;
+ else if (rssi_level == 2)
+ rate_bitmap = 0xfffff000;
+ else
+ rate_bitmap = 0xffffffff;
+ break;
+
+ case (ODM_WM_AC|ODM_WM_A):
+
+ if (pDM_Odm->RFType == RF_1T1R) {
+ if (rssi_level == 1) /* add by Gary for ac-series */
+ rate_bitmap = 0x003f8000;
+ else if (rssi_level == 2)
+ rate_bitmap = 0x003ff000;
+ else
+ rate_bitmap = 0x003ff010;
+ } else {
+ if (rssi_level == 1) /* add by Gary for ac-series */
+ rate_bitmap = 0xfe3f8000; /* VHT 2SS MCS3~9 */
+ else if (rssi_level == 2)
+ rate_bitmap = 0xfffff000; /* VHT 2SS MCS0~9 */
+ else
+ rate_bitmap = 0xfffff010; /* All */
+ }
+ break;
+
+ default:
+ if (pDM_Odm->RFType == RF_1T2R)
+ rate_bitmap = 0x000fffff;
+ else
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ /* printk("%s ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", __func__, rssi_level, WirelessMode, rate_bitmap); */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n", rssi_level, WirelessMode, rate_bitmap));
+
+ return (ra_mask&rate_bitmap);
+
+}
+
+/*-----------------------------------------------------------------------------
+* Function: odm_RefreshRateAdaptiveMask()
+*
+* Overview: Update rate table mask according to rssi
+*
+* Input: NONE
+*
+* Output: NONE
+*
+* Return: NONE
+*
+* Revised History:
+*When Who Remark
+*05/27/2009 hpfan Create Version 0.
+*
+* --------------------------------------------------------------------------
+*/
+void odm_RefreshRateAdaptiveMask(PDM_ODM_T pDM_Odm)
+{
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("odm_RefreshRateAdaptiveMask()---------->\n"));
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("odm_RefreshRateAdaptiveMask(): Return cos not supported\n"));
+ return;
+ }
+ odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
+}
+
+void odm_RefreshRateAdaptiveMaskCE(PDM_ODM_T pDM_Odm)
+{
+ u8 i;
+ struct adapter *padapter = pDM_Odm->Adapter;
+
+ if (padapter->bDriverStopped) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("<---- odm_RefreshRateAdaptiveMask(): driver is going to unload\n"));
+ return;
+ }
+
+ if (!pDM_Odm->bUseRAMask) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("<---- odm_RefreshRateAdaptiveMask(): driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ /* printk("==> %s\n", __func__); */
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ PSTA_INFO_T pstat = pDM_Odm->pODM_StaInfo[i];
+
+ if (IS_STA_VALID(pstat)) {
+ if (IS_MCAST(pstat->hwaddr)) /* if (psta->mac_id == 1) */
+ continue;
+ if (IS_MCAST(pstat->hwaddr))
+ continue;
+
+ if (true == ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI:%d, RSSI_LEVEL:%d\n", pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level));
+ /* printk("RSSI:%d, RSSI_LEVEL:%d\n", pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level); */
+ rtw_hal_update_ra_mask(pstat, pstat->rssi_level);
+ }
+
+ }
+ }
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck(
+ PDM_ODM_T pDM_Odm,
+ s32 RSSI,
+ bool bForceUpdate,
+ u8 *pRATRState
+)
+{
+ PODM_RATE_ADAPTIVE pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+
+ default:
+ ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+ /* printk("==>%s, RATRState:0x%02x , RSSI:%d\n", __func__, RATRState, RSSI); */
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
+ *pRATRState = RATRState;
+ return true;
+ }
+
+ return false;
+}
+
+
+/* */
+
+/* 3 ============================================================ */
+/* 3 Dynamic Tx Power */
+/* 3 ============================================================ */
+
+/* Remove BY YuChen */
+
+/* 3 ============================================================ */
+/* 3 RSSI Monitor */
+/* 3 ============================================================ */
+
+void odm_RSSIMonitorInit(PDM_ODM_T pDM_Odm)
+{
+ pRA_T pRA_Table = &pDM_Odm->DM_RA_Table;
+
+ pRA_Table->firstconnect = false;
+
+}
+
+void odm_RSSIMonitorCheck(PDM_ODM_T pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ return;
+
+ odm_RSSIMonitorCheckCE(pDM_Odm);
+
+} /* odm_RSSIMonitorCheck */
+
+static void FindMinimumRSSI(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ PDM_ODM_T pDM_Odm = &(pHalData->odmpriv);
+
+ /* 1 1.Determine the minimum RSSI */
+
+ if (
+ (pDM_Odm->bLinked != true) &&
+ (pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0)
+ ) {
+ pdmpriv->MinUndecoratedPWDBForDM = 0;
+ /* ODM_RT_TRACE(pDM_Odm, COMP_BB_POWERSAVING, DBG_LOUD, ("Not connected to any\n")); */
+ } else
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+
+ /* DBG_8192C("%s =>MinUndecoratedPWDBForDM(%d)\n", __func__, pdmpriv->MinUndecoratedPWDBForDM); */
+ /* ODM_RT_TRACE(pDM_Odm, COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n", pHalData->MinUndecoratedPWDBForDM)); */
+}
+
+void odm_RSSIMonitorCheckCE(PDM_ODM_T pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
+ u8 sta_cnt = 0;
+ u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
+ bool FirstConnect = false;
+ pRA_T pRA_Table = &pDM_Odm->DM_RA_Table;
+
+ if (pDM_Odm->bLinked != true)
+ return;
+
+ FirstConnect = (pDM_Odm->bLinked) && (pRA_Table->firstconnect == false);
+ pRA_Table->firstconnect = pDM_Odm->bLinked;
+
+ /* if (check_fwstate(&Adapter->mlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) == true) */
+ {
+ struct sta_info *psta;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ psta = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(psta)) {
+ if (IS_MCAST(psta->hwaddr)) /* if (psta->mac_id == 1) */
+ continue;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB == (-1))
+ continue;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
+ tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
+ tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
+ PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+ }
+ }
+
+ /* printk("%s ==> sta_cnt(%d)\n", __func__, sta_cnt); */
+
+ for (i = 0; i < sta_cnt; i++) {
+ if (PWDB_rssi[i] != (0)) {
+ if (pHalData->fw_ractrl == true)/* Report every sta's RSSI to FW */
+ rtl8723b_set_rssi_cmd(Adapter, (u8 *)(&PWDB_rssi[i]));
+ }
+ }
+ }
+
+
+
+ if (tmpEntryMaxPWDB != 0) /* If associated entry is found */
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = tmpEntryMaxPWDB;
+ else
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = 0;
+
+ if (tmpEntryMinPWDB != 0xff) /* If associated entry is found */
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = tmpEntryMinPWDB;
+ else
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
+
+ FindMinimumRSSI(Adapter);/* get pdmpriv->MinUndecoratedPWDBForDM */
+
+ pDM_Odm->RSSI_Min = pdmpriv->MinUndecoratedPWDBForDM;
+ /* ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM); */
+}
+
+/* 3 ============================================================ */
+/* 3 Tx Power Tracking */
+/* 3 ============================================================ */
+
+void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm)
+{
+ odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
+}
+
+static u8 getSwingIndex(PDM_ODM_T pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ u8 i = 0;
+ u32 bbSwing;
+ u32 swingTableSize;
+ u32 *pSwingTable;
+
+ bbSwing = PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, 0xFFC00000);
+
+ pSwingTable = OFDMSwingTable_New;
+ swingTableSize = OFDM_TABLE_SIZE;
+
+ for (i = 0; i < swingTableSize; ++i) {
+ u32 tableValue = pSwingTable[i];
+
+ if (tableValue >= 0x100000)
+ tableValue >>= 22;
+ if (bbSwing == tableValue)
+ break;
+ }
+ return i;
+}
+
+void odm_TXPowerTrackingThermalMeterInit(PDM_ODM_T pDM_Odm)
+{
+ u8 defaultSwingIndex = getSwingIndex(pDM_Odm);
+ u8 p = 0;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ pdmpriv->bTXPowerTracking = true;
+ pdmpriv->TXPowercount = 0;
+ pdmpriv->bTXPowerTrackingInit = false;
+
+ if (*(pDM_Odm->mp_mode) != 1)
+ pdmpriv->TxPowerTrackControl = true;
+ else
+ pdmpriv->TxPowerTrackControl = false;
+
+
+ /* MSG_8192C("pdmpriv->TxPowerTrackControl = %d\n", pdmpriv->TxPowerTrackControl); */
+
+ /* pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true; */
+ pDM_Odm->RFCalibrateInfo.ThermalValue = pHalData->EEPROMThermalMeter;
+ pDM_Odm->RFCalibrateInfo.ThermalValue_IQK = pHalData->EEPROMThermalMeter;
+ pDM_Odm->RFCalibrateInfo.ThermalValue_LCK = pHalData->EEPROMThermalMeter;
+
+ /* The index of "0 dB" in SwingTable. */
+ pDM_Odm->DefaultOfdmIndex = (defaultSwingIndex >= OFDM_TABLE_SIZE) ? 30 : defaultSwingIndex;
+ pDM_Odm->DefaultCckIndex = 20;
+
+ pDM_Odm->BbSwingIdxCckBase = pDM_Odm->DefaultCckIndex;
+ pDM_Odm->RFCalibrateInfo.CCK_index = pDM_Odm->DefaultCckIndex;
+
+ for (p = ODM_RF_PATH_A; p < MAX_RF_PATH; ++p) {
+ pDM_Odm->BbSwingIdxOfdmBase[p] = pDM_Odm->DefaultOfdmIndex;
+ pDM_Odm->RFCalibrateInfo.OFDM_index[p] = pDM_Odm->DefaultOfdmIndex;
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndex[p] = 0;
+ pDM_Odm->RFCalibrateInfo.DeltaPowerIndexLast[p] = 0;
+ pDM_Odm->RFCalibrateInfo.PowerIndexOffset[p] = 0;
+ }
+
+}
+
+
+void ODM_TXPowerTrackingCheck(PDM_ODM_T pDM_Odm)
+{
+ odm_TXPowerTrackingCheckCE(pDM_Odm);
+}
+
+void odm_TXPowerTrackingCheckCE(PDM_ODM_T pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
+ PHY_SetRFReg(pDM_Odm->Adapter, ODM_RF_PATH_A, RF_T_METER_NEW, (BIT17 | BIT16), 0x03);
+
+ /* DBG_871X("Trigger Thermal Meter!!\n"); */
+
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
+ return;
+ } else {
+ /* DBG_871X("Schedule TxPowerTracking direct call!!\n"); */
+ ODM_TXPowerTrackingCallback_ThermalMeter(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+ }
+}
+
+/* 3 ============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3 ============================================================ */
+void odm_SwAntDetectInit(PDM_ODM_T pDM_Odm)
+{
+ pSWAT_T pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ pDM_SWAT_Table->SWAS_NoLink_BK_Reg92c = rtw_read32(pDM_Odm->Adapter, rDPDT_control);
+ pDM_SWAT_Table->PreAntenna = MAIN_ANT;
+ pDM_SWAT_Table->CurAntenna = MAIN_ANT;
+ pDM_SWAT_Table->SWAS_NoLink_State = 0;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
new file mode 100644
index 000000000000..0b3541a91548
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -0,0 +1,1465 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALDMOUTSRC_H__
+#define __HALDMOUTSRC_H__
+
+
+#include "odm_EdcaTurboCheck.h"
+#include "odm_DIG.h"
+#include "odm_PathDiv.h"
+#include "odm_DynamicBBPowerSaving.h"
+#include "odm_DynamicTxPower.h"
+#include "odm_CfoTracking.h"
+#include "odm_NoiseMonitor.h"
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+#define NONE 0
+
+
+/* 3 Tx Power Tracking */
+/* 3 ============================================================ */
+#define DPK_DELTA_MAPPING_NUM 13
+#define index_mapping_HP_NUM 15
+#define OFDM_TABLE_SIZE 43
+#define CCK_TABLE_SIZE 33
+#define TXSCALE_TABLE_SIZE 37
+#define TXPWR_TRACK_TABLE_SIZE 30
+#define DELTA_SWINGIDX_SIZE 30
+#define BAND_NUM 4
+
+/* 3 PSD Handler */
+/* 3 ============================================================ */
+
+#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
+#define MODE_40M 0 /* 0:20M, 1:40M */
+#define PSD_TH2 3
+#define PSD_CHMIN 20 /* Minimum channel number for BT AFH */
+#define SIR_STEP_SIZE 3
+#define Smooth_Size_1 5
+#define Smooth_TH_1 3
+#define Smooth_Size_2 10
+#define Smooth_TH_2 4
+#define Smooth_Size_3 20
+#define Smooth_TH_3 4
+#define Smooth_Step_Size 5
+#define Adaptive_SIR 1
+#define PSD_RESCAN 4
+#define PSD_SCAN_INTERVAL 700 /* ms */
+
+/* 8723A High Power IGI Setting */
+#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
+#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
+#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
+#define DM_DIG_LOW_PWR_THRESHOLD 0x14
+
+/* ANT Test */
+#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
+#define ANTTESTA 0x01 /* Ant A will be Testing */
+#define ANTTESTB 0x02 /* Ant B will be testing */
+
+#define PS_MODE_ACTIVE 0x01
+
+/* for 8723A Ant Definition--2012--06--07 due to different IC may be different ANT define */
+#define MAIN_ANT 1 /* Ant A or Ant Main */
+#define AUX_ANT 2 /* AntB or Ant Aux */
+#define MAX_ANT 3 /* 3 for AP using */
+
+
+/* Antenna Diversity Type */
+#define SW_ANTDIV 0
+#define HW_ANTDIV 1
+/* structure and define */
+
+/* Remove DIG by Yuchen */
+
+/* Remoce BB power saving by Yuchn */
+
+/* Remove DIG by yuchen */
+
+typedef struct _Dynamic_Primary_CCA {
+ u8 PriCCA_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 DupRTS_flag;
+ u8 Monitor_flag;
+ u8 CH_offset;
+ u8 MF_state;
+} Pri_CCA_T, *pPri_CCA_T;
+
+typedef struct _Rate_Adaptive_Table_ {
+ u8 firstconnect;
+} RA_T, *pRA_T;
+
+typedef struct _RX_High_Power_ {
+ u8 RXHP_flag;
+ u8 PSD_func_trigger;
+ u8 PSD_bitmap_RXHP[80];
+ u8 Pre_IGI;
+ u8 Cur_IGI;
+ u8 Pre_pw_th;
+ u8 Cur_pw_th;
+ bool First_time_enter;
+ bool RXHP_enable;
+ u8 TP_Mode;
+ RT_TIMER PSDTimer;
+} RXHP_T, *pRXHP_T;
+
+#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
+#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
+
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+#define TRAFFIC_UltraLOW 2
+
+typedef struct _SW_Antenna_Switch_ {
+ u8 Double_chk_flag;
+ u8 try_flag;
+ s32 PreRSSI;
+ u8 CurAntenna;
+ u8 PreAntenna;
+ u8 RSSI_Trying;
+ u8 TestMode;
+ u8 bTriggerAntennaSwitch;
+ u8 SelectAntennaMap;
+ u8 RSSI_target;
+ u8 reset_idx;
+ u16 Single_Ant_Counter;
+ u16 Dual_Ant_Counter;
+ u16 Aux_FailDetec_Counter;
+ u16 Retry_Counter;
+
+ /* Before link Antenna Switch check */
+ u8 SWAS_NoLink_State;
+ u32 SWAS_NoLink_BK_Reg860;
+ u32 SWAS_NoLink_BK_Reg92c;
+ u32 SWAS_NoLink_BK_Reg948;
+ bool ANTA_ON; /* To indicate Ant A is or not */
+ bool ANTB_ON; /* To indicate Ant B is on or not */
+ bool Pre_Aux_FailDetec;
+ bool RSSI_AntDect_bResult;
+ u8 Ant5G;
+ u8 Ant2G;
+
+ s32 RSSI_sum_A;
+ s32 RSSI_sum_B;
+ s32 RSSI_cnt_A;
+ s32 RSSI_cnt_B;
+
+ u64 lastTxOkCnt;
+ u64 lastRxOkCnt;
+ u64 TXByteCnt_A;
+ u64 TXByteCnt_B;
+ u64 RXByteCnt_A;
+ u64 RXByteCnt_B;
+ u8 TrafficLoad;
+ u8 Train_time;
+ u8 Train_time_flag;
+ RT_TIMER SwAntennaSwitchTimer;
+ RT_TIMER SwAntennaSwitchTimer_8723B;
+ u32 PktCnt_SWAntDivByCtrlFrame;
+ bool bSWAntDivByCtrlFrame;
+} SWAT_T, *pSWAT_T;
+
+/* Remove Edca by YuChen */
+
+
+typedef struct _ODM_RATE_ADAPTIVE {
+ u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
+ u8 LdpcThres; /* if RSSI > LdpcThres => switch from LPDC to BCC */
+ bool bUseLdpc;
+ bool bLowerRtsRate;
+ u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
+ u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
+ u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
+
+} ODM_RATE_ADAPTIVE, *PODM_RATE_ADAPTIVE;
+
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM_MAX 10
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define AVG_THERMAL_NUM 8
+#define IQK_Matrix_REG_NUM 8
+#define IQK_Matrix_Settings_NUM 14+24+21 /* Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G */
+
+#define DM_Type_ByFW 0
+#define DM_Type_ByDriver 1
+
+/* */
+/* Declare for common info */
+/* */
+#define MAX_PATH_NUM_92CS 2
+#define MAX_PATH_NUM_8188E 1
+#define MAX_PATH_NUM_8192E 2
+#define MAX_PATH_NUM_8723B 1
+#define MAX_PATH_NUM_8812A 2
+#define MAX_PATH_NUM_8821A 1
+#define MAX_PATH_NUM_8814A 4
+#define MAX_PATH_NUM_8822B 2
+
+
+#define IQK_THRESHOLD 8
+#define DPK_THRESHOLD 4
+
+typedef struct _ODM_Phy_Status_Info_ {
+ /* */
+ /* Be care, if you want to add any element please insert between */
+ /* RxPWDBAll & SignalStrength. */
+ /* */
+ u8 RxPWDBAll;
+
+ u8 SignalQuality; /* in 0-100 index. */
+ s8 RxMIMOSignalQuality[4]; /* per-path's EVM */
+ u8 RxMIMOEVMdbm[4]; /* per-path's EVM dbm */
+
+ u8 RxMIMOSignalStrength[4];/* in 0~100 index */
+
+ u16 Cfo_short[4]; /* per-path's Cfo_short */
+ u16 Cfo_tail[4]; /* per-path's Cfo_tail */
+
+ s8 RxPower; /* in dBm Translate from PWdB */
+ s8 RecvSignalPower; /* Real power in dBm for this packet, no beautification and aggregation. Keep this raw info to be used for the other procedures. */
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+
+ s8 RxPwr[4]; /* per-path's pwdb */
+
+ u8 RxSNR[4]; /* per-path's SNR */
+ u8 BandWidth;
+ u8 btCoexPwrAdjust;
+} ODM_PHY_INFO_T, *PODM_PHY_INFO_T;
+
+
+typedef struct _ODM_Per_Pkt_Info_ {
+ /* u8 Rate; */
+ u8 DataRate;
+ u8 StationID;
+ bool bPacketMatchBSSID;
+ bool bPacketToSelf;
+ bool bPacketBeacon;
+ bool bToSelf;
+} ODM_PACKET_INFO_T, *PODM_PACKET_INFO_T;
+
+
+typedef struct _ODM_Phy_Dbg_Info_ {
+ /* ODM Write, debug info */
+ s8 RxSNRdB[4];
+ u32 NumQryPhyStatus;
+ u32 NumQryPhyStatusCCK;
+ u32 NumQryPhyStatusOFDM;
+ u8 NumQryBeaconPkt;
+ /* Others */
+ s32 RxEVM[4];
+
+} ODM_PHY_DBG_INFO_T;
+
+
+typedef struct _ODM_Mac_Status_Info_ {
+ u8 test;
+} ODM_MAC_INFO;
+
+
+typedef enum tag_Dynamic_ODM_Support_Ability_Type {
+ /* BB Team */
+ ODM_DIG = 0x00000001,
+ ODM_HIGH_POWER = 0x00000002,
+ ODM_CCK_CCA_TH = 0x00000004,
+ ODM_FA_STATISTICS = 0x00000008,
+ ODM_RAMASK = 0x00000010,
+ ODM_RSSI_MONITOR = 0x00000020,
+ ODM_SW_ANTDIV = 0x00000040,
+ ODM_HW_ANTDIV = 0x00000080,
+ ODM_BB_PWRSV = 0x00000100,
+ ODM_2TPATHDIV = 0x00000200,
+ ODM_1TPATHDIV = 0x00000400,
+ ODM_PSD2AFH = 0x00000800
+} ODM_Ability_E;
+
+/* */
+/* 2011/20/20 MH For MP driver RT_WLAN_STA = STA_INFO_T */
+/* Please declare below ODM relative info in your STA info structure. */
+/* */
+typedef struct _ODM_STA_INFO {
+ /* Driver Write */
+ bool bUsed; /* record the sta status link or not? */
+ /* u8 WirelessMode; */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ /* ODM Write */
+ /* 1 TX_INFO (may changed by IC) */
+ /* TX_INFO_T pTxInfo; Define in IC folder. Move lower layer. */
+
+ /* */
+ /* Please use compile flag to disabe the strcutrue for other IC except 88E. */
+ /* Move To lower layer. */
+ /* */
+ /* ODM Write Wilson will handle this part(said by Luke.Lee) */
+ /* TX_RPT_T pTxRpt; Define in IC folder. Move lower layer. */
+} ODM_STA_INFO_T, *PODM_STA_INFO_T;
+
+/* */
+/* 2011/10/20 MH Define Common info enum for all team. */
+/* */
+typedef enum _ODM_Common_Info_Definition {
+ /* Fixed value: */
+
+ /* HOOK BEFORE REG INIT----------- */
+ ODM_CMNINFO_PLATFORM = 0,
+ ODM_CMNINFO_ABILITY, /* ODM_ABILITY_E */
+ ODM_CMNINFO_INTERFACE, /* ODM_INTERFACE_E */
+ ODM_CMNINFO_MP_TEST_CHIP,
+ ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */
+ ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */
+ ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
+ ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */
+ ODM_CMNINFO_RFE_TYPE,
+ ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
+ ODM_CMNINFO_PACKAGE_TYPE,
+ ODM_CMNINFO_EXT_LNA, /* true */
+ ODM_CMNINFO_5G_EXT_LNA,
+ ODM_CMNINFO_EXT_PA,
+ ODM_CMNINFO_5G_EXT_PA,
+ ODM_CMNINFO_GPA,
+ ODM_CMNINFO_APA,
+ ODM_CMNINFO_GLNA,
+ ODM_CMNINFO_ALNA,
+ ODM_CMNINFO_EXT_TRSW,
+ ODM_CMNINFO_PATCH_ID, /* CUSTOMER ID */
+ ODM_CMNINFO_BINHCT_TEST,
+ ODM_CMNINFO_BWIFI_TEST,
+ ODM_CMNINFO_SMART_CONCURRENT,
+ /* HOOK BEFORE REG INIT----------- */
+
+
+ /* Dynamic value: */
+/* POINTER REFERENCE----------- */
+ ODM_CMNINFO_MAC_PHY_MODE, /* ODM_MAC_PHY_MODE_E */
+ ODM_CMNINFO_TX_UNI,
+ ODM_CMNINFO_RX_UNI,
+ ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
+ ODM_CMNINFO_BAND, /* ODM_BAND_TYPE_E */
+ ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
+ ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
+ ODM_CMNINFO_BW, /* ODM_BW_E */
+ ODM_CMNINFO_CHNL,
+ ODM_CMNINFO_FORCED_RATE,
+
+ ODM_CMNINFO_DMSP_GET_VALUE,
+ ODM_CMNINFO_BUDDY_ADAPTOR,
+ ODM_CMNINFO_DMSP_IS_MASTER,
+ ODM_CMNINFO_SCAN,
+ ODM_CMNINFO_POWER_SAVING,
+ ODM_CMNINFO_ONE_PATH_CCA, /* ODM_CCA_PATH_E */
+ ODM_CMNINFO_DRV_STOP,
+ ODM_CMNINFO_PNP_IN,
+ ODM_CMNINFO_INIT_ON,
+ ODM_CMNINFO_ANT_TEST,
+ ODM_CMNINFO_NET_CLOSED,
+ ODM_CMNINFO_MP_MODE,
+ /* ODM_CMNINFO_RTSTA_AID, For win driver only? */
+ ODM_CMNINFO_FORCED_IGI_LB,
+ ODM_CMNINFO_IS1ANTENNA,
+ ODM_CMNINFO_RFDEFAULTPATH,
+/* POINTER REFERENCE----------- */
+
+/* CALL BY VALUE------------- */
+ ODM_CMNINFO_WIFI_DIRECT,
+ ODM_CMNINFO_WIFI_DISPLAY,
+ ODM_CMNINFO_LINK_IN_PROGRESS,
+ ODM_CMNINFO_LINK,
+ ODM_CMNINFO_STATION_STATE,
+ ODM_CMNINFO_RSSI_MIN,
+ ODM_CMNINFO_DBG_COMP, /* u64 */
+ ODM_CMNINFO_DBG_LEVEL, /* u32 */
+ ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
+ ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
+ ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
+ ODM_CMNINFO_BT_ENABLED,
+ ODM_CMNINFO_BT_HS_CONNECT_PROCESS,
+ ODM_CMNINFO_BT_HS_RSSI,
+ ODM_CMNINFO_BT_OPERATION,
+ ODM_CMNINFO_BT_LIMITED_DIG, /* Need to Limited Dig or not */
+ ODM_CMNINFO_BT_DISABLE_EDCA,
+/* CALL BY VALUE------------- */
+
+ /* Dynamic ptr array hook itms. */
+ ODM_CMNINFO_STA_STATUS,
+ ODM_CMNINFO_PHY_STATUS,
+ ODM_CMNINFO_MAC_STATUS,
+
+ ODM_CMNINFO_MAX,
+
+
+} ODM_CMNINFO_E;
+
+/* 2011/10/20 MH Define ODM support ability. ODM_CMNINFO_ABILITY */
+typedef enum _ODM_Support_Ability_Definition {
+ /* */
+ /* BB ODM section BIT 0-15 */
+ /* */
+ ODM_BB_DIG = BIT0,
+ ODM_BB_RA_MASK = BIT1,
+ ODM_BB_DYNAMIC_TXPWR = BIT2,
+ ODM_BB_FA_CNT = BIT3,
+ ODM_BB_RSSI_MONITOR = BIT4,
+ ODM_BB_CCK_PD = BIT5,
+ ODM_BB_ANT_DIV = BIT6,
+ ODM_BB_PWR_SAVE = BIT7,
+ ODM_BB_PWR_TRAIN = BIT8,
+ ODM_BB_RATE_ADAPTIVE = BIT9,
+ ODM_BB_PATH_DIV = BIT10,
+ ODM_BB_PSD = BIT11,
+ ODM_BB_RXHP = BIT12,
+ ODM_BB_ADAPTIVITY = BIT13,
+ ODM_BB_CFO_TRACKING = BIT14,
+
+ /* MAC DM section BIT 16-23 */
+ ODM_MAC_EDCA_TURBO = BIT16,
+ ODM_MAC_EARLY_MODE = BIT17,
+
+ /* RF ODM section BIT 24-31 */
+ ODM_RF_TX_PWR_TRACK = BIT24,
+ ODM_RF_RX_GAIN_TRACK = BIT25,
+ ODM_RF_CALIBRATION = BIT26,
+} ODM_ABILITY_E;
+
+/* ODM_CMNINFO_INTERFACE */
+typedef enum tag_ODM_Support_Interface_Definition {
+ ODM_ITRF_SDIO = 0x4,
+ ODM_ITRF_ALL = 0x7,
+} ODM_INTERFACE_E;
+
+/* ODM_CMNINFO_IC_TYPE */
+typedef enum tag_ODM_Support_IC_Type_Definition {
+ ODM_RTL8723B = BIT8,
+} ODM_IC_TYPE_E;
+
+/* ODM_CMNINFO_CUT_VER */
+typedef enum tag_ODM_Cut_Version_Definition {
+ ODM_CUT_A = 0,
+ ODM_CUT_B = 1,
+ ODM_CUT_C = 2,
+ ODM_CUT_D = 3,
+ ODM_CUT_E = 4,
+ ODM_CUT_F = 5,
+
+ ODM_CUT_I = 8,
+ ODM_CUT_J = 9,
+ ODM_CUT_K = 10,
+ ODM_CUT_TEST = 15,
+} ODM_CUT_VERSION_E;
+
+/* ODM_CMNINFO_FAB_VER */
+typedef enum tag_ODM_Fab_Version_Definition {
+ ODM_TSMC = 0,
+ ODM_UMC = 1,
+} ODM_FAB_E;
+
+/* ODM_CMNINFO_RF_TYPE */
+/* */
+/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
+/* */
+typedef enum tag_ODM_RF_Path_Bit_Definition {
+ ODM_RF_TX_A = BIT0,
+ ODM_RF_TX_B = BIT1,
+ ODM_RF_TX_C = BIT2,
+ ODM_RF_TX_D = BIT3,
+ ODM_RF_RX_A = BIT4,
+ ODM_RF_RX_B = BIT5,
+ ODM_RF_RX_C = BIT6,
+ ODM_RF_RX_D = BIT7,
+} ODM_RF_PATH_E;
+
+
+typedef enum tag_ODM_RF_Type_Definition {
+ ODM_1T1R = 0,
+ ODM_1T2R = 1,
+ ODM_2T2R = 2,
+ ODM_2T3R = 3,
+ ODM_2T4R = 4,
+ ODM_3T3R = 5,
+ ODM_3T4R = 6,
+ ODM_4T4R = 7,
+} ODM_RF_TYPE_E;
+
+
+/* */
+/* ODM Dynamic common info value definition */
+/* */
+
+/* typedef enum _MACPHY_MODE_8192D{ */
+/* SINGLEMAC_SINGLEPHY, */
+/* DUALMAC_DUALPHY, */
+/* DUALMAC_SINGLEPHY, */
+/* MACPHY_MODE_8192D,*PMACPHY_MODE_8192D; */
+/* Above is the original define in MP driver. Please use the same define. THX. */
+typedef enum tag_ODM_MAC_PHY_Mode_Definition {
+ ODM_SMSP = 0,
+ ODM_DMSP = 1,
+ ODM_DMDP = 2,
+} ODM_MAC_PHY_MODE_E;
+
+
+typedef enum tag_BT_Coexist_Definition {
+ ODM_BT_BUSY = 1,
+ ODM_BT_ON = 2,
+ ODM_BT_OFF = 3,
+ ODM_BT_NONE = 4,
+} ODM_BT_COEXIST_E;
+
+/* ODM_CMNINFO_OP_MODE */
+typedef enum tag_Operation_Mode_Definition {
+ ODM_NO_LINK = BIT0,
+ ODM_LINK = BIT1,
+ ODM_SCAN = BIT2,
+ ODM_POWERSAVE = BIT3,
+ ODM_AP_MODE = BIT4,
+ ODM_CLIENT_MODE = BIT5,
+ ODM_AD_HOC = BIT6,
+ ODM_WIFI_DIRECT = BIT7,
+ ODM_WIFI_DISPLAY = BIT8,
+} ODM_OPERATION_MODE_E;
+
+/* ODM_CMNINFO_WM_MODE */
+typedef enum tag_Wireless_Mode_Definition {
+ ODM_WM_UNKNOW = 0x0,
+ ODM_WM_B = BIT0,
+ ODM_WM_G = BIT1,
+ ODM_WM_A = BIT2,
+ ODM_WM_N24G = BIT3,
+ ODM_WM_N5G = BIT4,
+ ODM_WM_AUTO = BIT5,
+ ODM_WM_AC = BIT6,
+} ODM_WIRELESS_MODE_E;
+
+/* ODM_CMNINFO_BAND */
+typedef enum tag_Band_Type_Definition {
+ ODM_BAND_2_4G = 0,
+ ODM_BAND_5G,
+ ODM_BAND_ON_BOTH,
+ ODM_BANDMAX
+} ODM_BAND_TYPE_E;
+
+/* ODM_CMNINFO_SEC_CHNL_OFFSET */
+typedef enum tag_Secondary_Channel_Offset_Definition {
+ ODM_DONT_CARE = 0,
+ ODM_BELOW = 1,
+ ODM_ABOVE = 2
+} ODM_SEC_CHNL_OFFSET_E;
+
+/* ODM_CMNINFO_SEC_MODE */
+typedef enum tag_Security_Definition {
+ ODM_SEC_OPEN = 0,
+ ODM_SEC_WEP40 = 1,
+ ODM_SEC_TKIP = 2,
+ ODM_SEC_RESERVE = 3,
+ ODM_SEC_AESCCMP = 4,
+ ODM_SEC_WEP104 = 5,
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_SEC_SMS4 = 7,
+} ODM_SECURITY_E;
+
+/* ODM_CMNINFO_BW */
+typedef enum tag_Bandwidth_Definition {
+ ODM_BW20M = 0,
+ ODM_BW40M = 1,
+ ODM_BW80M = 2,
+ ODM_BW160M = 3,
+ ODM_BW10M = 4,
+} ODM_BW_E;
+
+
+/* ODM_CMNINFO_BOARD_TYPE */
+/* For non-AC-series IC , ODM_BOARD_5G_EXT_PA and ODM_BOARD_5G_EXT_LNA are ignored */
+/* For AC-series IC, external PA & LNA can be indivisuallly added on 2.4G and/or 5G */
+typedef enum tag_Board_Definition {
+ ODM_BOARD_DEFAULT = 0, /* The DEFAULT case. */
+ ODM_BOARD_MINICARD = BIT(0), /* 0 = non-mini card, 1 = mini card. */
+ ODM_BOARD_SLIM = BIT(1), /* 0 = non-slim card, 1 = slim card */
+ ODM_BOARD_BT = BIT(2), /* 0 = without BT card, 1 = with BT */
+ ODM_BOARD_EXT_PA = BIT(3), /* 0 = no 2G ext-PA, 1 = existing 2G ext-PA */
+ ODM_BOARD_EXT_LNA = BIT(4), /* 0 = no 2G ext-LNA, 1 = existing 2G ext-LNA */
+ ODM_BOARD_EXT_TRSW = BIT(5), /* 0 = no ext-TRSW, 1 = existing ext-TRSW */
+ ODM_BOARD_EXT_PA_5G = BIT(6), /* 0 = no 5G ext-PA, 1 = existing 5G ext-PA */
+ ODM_BOARD_EXT_LNA_5G = BIT(7), /* 0 = no 5G ext-LNA, 1 = existing 5G ext-LNA */
+} ODM_BOARD_TYPE_E;
+
+typedef enum tag_ODM_Package_Definition {
+ ODM_PACKAGE_DEFAULT = 0,
+ ODM_PACKAGE_QFN68 = BIT(0),
+ ODM_PACKAGE_TFBGA90 = BIT(1),
+ ODM_PACKAGE_TFBGA79 = BIT(2),
+} ODM_Package_TYPE_E;
+
+typedef enum tag_ODM_TYPE_GPA_Definition {
+ TYPE_GPA0 = 0,
+ TYPE_GPA1 = BIT(1)|BIT(0)
+} ODM_TYPE_GPA_E;
+
+typedef enum tag_ODM_TYPE_APA_Definition {
+ TYPE_APA0 = 0,
+ TYPE_APA1 = BIT(1)|BIT(0)
+} ODM_TYPE_APA_E;
+
+typedef enum tag_ODM_TYPE_GLNA_Definition {
+ TYPE_GLNA0 = 0,
+ TYPE_GLNA1 = BIT(2)|BIT(0),
+ TYPE_GLNA2 = BIT(3)|BIT(1),
+ TYPE_GLNA3 = BIT(3)|BIT(2)|BIT(1)|BIT(0)
+} ODM_TYPE_GLNA_E;
+
+typedef enum tag_ODM_TYPE_ALNA_Definition {
+ TYPE_ALNA0 = 0,
+ TYPE_ALNA1 = BIT(2)|BIT(0),
+ TYPE_ALNA2 = BIT(3)|BIT(1),
+ TYPE_ALNA3 = BIT(3)|BIT(2)|BIT(1)|BIT(0)
+} ODM_TYPE_ALNA_E;
+
+/* ODM_CMNINFO_ONE_PATH_CCA */
+typedef enum tag_CCA_Path {
+ ODM_CCA_2R = 0,
+ ODM_CCA_1R_A = 1,
+ ODM_CCA_1R_B = 2,
+} ODM_CCA_PATH_E;
+
+
+typedef struct _ODM_RA_Info_ {
+ u8 RateID;
+ u32 RateMask;
+ u32 RAUseRate;
+ u8 RateSGI;
+ u8 RssiStaRA;
+ u8 PreRssiStaRA;
+ u8 SGIEnable;
+ u8 DecisionRate;
+ u8 PreRate;
+ u8 HighestRate;
+ u8 LowestRate;
+ u32 NscUp;
+ u32 NscDown;
+ u16 RTY[5];
+ u32 TOTAL;
+ u16 DROP;
+ u8 Active;
+ u16 RptTime;
+ u8 RAWaitingCounter;
+ u8 RAPendingCounter;
+ u8 PTActive; /* on or off */
+ u8 PTTryState; /* 0 trying state, 1 for decision state */
+ u8 PTStage; /* 0~6 */
+ u8 PTStopCount; /* Stop PT counter */
+ u8 PTPreRate; /* if rate change do PT */
+ u8 PTPreRssi; /* if RSSI change 5% do PT */
+ u8 PTModeSS; /* decide whitch rate should do PT */
+ u8 RAstage; /* StageRA, decide how many times RA will be done between PT */
+ u8 PTSmoothFactor;
+} ODM_RA_INFO_T, *PODM_RA_INFO_T;
+
+typedef struct _IQK_MATRIX_REGS_SETTING {
+ bool bIQKDone;
+ s32 Value[3][IQK_Matrix_REG_NUM];
+ bool bBWIqkResultSaved[3];
+} IQK_MATRIX_REGS_SETTING, *PIQK_MATRIX_REGS_SETTING;
+
+
+/* Remove PATHDIV_PARA struct to odm_PathDiv.h */
+
+typedef struct ODM_RF_Calibration_Structure {
+ /* for tx power tracking */
+
+ u32 RegA24; /* for TempCCK */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ u8 TXPowercount;
+ bool bTXPowerTrackingInit;
+ bool bTXPowerTracking;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
+ u8 TM_Trigger;
+ u8 InternalPA5G[2]; /* pathA / pathB */
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+ u8 ThermalValue_AVG[AVG_THERMAL_NUM];
+ u8 ThermalValue_AVG_index;
+ u8 ThermalValue_RxGain;
+ u8 ThermalValue_Crystal;
+ u8 ThermalValue_DPKstore;
+ u8 ThermalValue_DPKtrack;
+ bool TxPowerTrackingInProgress;
+
+ bool bReloadtxpowerindex;
+ u8 bRfPiEnable;
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+
+ /* Tx power Tracking ------------------------- */
+ u8 bCCKinCH14;
+ u8 CCK_index;
+ u8 OFDM_index[MAX_RF_PATH];
+ s8 PowerIndexOffset[MAX_RF_PATH];
+ s8 DeltaPowerIndex[MAX_RF_PATH];
+ s8 DeltaPowerIndexLast[MAX_RF_PATH];
+ bool bTxPowerChanged;
+
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+ IQK_MATRIX_REGS_SETTING IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
+ bool bNeedIQK;
+ bool bIQKInProgress;
+ u8 Delta_IQK;
+ u8 Delta_LCK;
+ s8 BBSwingDiff2G, BBSwingDiff5G; /* Unit: dB */
+ u8 DeltaSwingTableIdx_2GCCKA_P[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GCCKA_N[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GCCKB_P[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GCCKB_N[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GA_P[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GA_N[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GB_P[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GB_N[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_5GA_P[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_5GA_N[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_5GB_P[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_5GB_N[BAND_NUM][DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GA_P_8188E[DELTA_SWINGIDX_SIZE];
+ u8 DeltaSwingTableIdx_2GA_N_8188E[DELTA_SWINGIDX_SIZE];
+
+ /* */
+
+ /* for IQK */
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 RegB68;
+ u32 RegB6C;
+ u32 Reg870;
+ u32 Reg860;
+ u32 Reg864;
+
+ bool bIQKInitialized;
+ bool bLCKInProgress;
+ bool bAntennaDetected;
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+ u32 TxIQC_8723B[2][3][2]; /* { {S1: 0xc94, 0xc80, 0xc4c} , {S0: 0xc9c, 0xc88, 0xc4c}} */
+ u32 RxIQC_8723B[2][2][2]; /* { {S1: 0xc14, 0xca0} , {S0: 0xc14, 0xca0}} */
+
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+
+ /* DPK */
+ bool bDPKFail;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+
+ u32 TxLOK[2];
+
+} ODM_RF_CAL_T, *PODM_RF_CAL_T;
+/* */
+/* ODM Dynamic common info value definition */
+/* */
+
+typedef struct _FAST_ANTENNA_TRAINNING_ {
+ u8 Bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u8 antsel_rx_keep_3;
+ u32 antSumRSSI[7];
+ u32 antRSSIcnt[7];
+ u32 antAveRSSI[7];
+ u8 FAT_State;
+ u32 TrainIdx;
+ u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 RxIdleAnt;
+ bool bBecomeLinked;
+ u32 MinMaxRSSI;
+ u8 idx_AntDiv_counter_2G;
+ u8 idx_AntDiv_counter_5G;
+ u32 AntDiv_2G_5G;
+ u32 CCK_counter_main;
+ u32 CCK_counter_aux;
+ u32 OFDM_counter_main;
+ u32 OFDM_counter_aux;
+
+
+ u32 CCK_CtrlFrame_Cnt_main;
+ u32 CCK_CtrlFrame_Cnt_aux;
+ u32 OFDM_CtrlFrame_Cnt_main;
+ u32 OFDM_CtrlFrame_Cnt_aux;
+ u32 MainAnt_CtrlFrame_Sum;
+ u32 AuxAnt_CtrlFrame_Sum;
+ u32 MainAnt_CtrlFrame_Cnt;
+ u32 AuxAnt_CtrlFrame_Cnt;
+
+} FAT_T, *pFAT_T;
+
+typedef enum _FAT_STATE {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+} FAT_STATE_E, *PFAT_STATE_E;
+
+typedef enum _ANT_DIV_TYPE {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+ S0S1_SW_ANTDIV = 0x06 /* 8723B intrnal switch S0 S1 */
+} ANT_DIV_TYPE_E, *PANT_DIV_TYPE_E;
+
+typedef struct _ODM_PATH_DIVERSITY_ {
+ u8 RespTxPath;
+ u8 PathSel[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 PathA_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 PathB_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 PathA_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 PathB_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+} PATHDIV_T, *pPATHDIV_T;
+
+
+typedef enum _BASEBAND_CONFIG_PHY_REG_PG_VALUE_TYPE{
+ PHY_REG_PG_RELATIVE_VALUE = 0,
+ PHY_REG_PG_EXACT_VALUE = 1
+} PHY_REG_PG_TYPE;
+
+
+/* */
+/* Antenna detection information from single tone mechanism, added by Roger, 2012.11.27. */
+/* */
+typedef struct _ANT_DETECTED_INFO {
+ bool bAntDetected;
+ u32 dBForAntA;
+ u32 dBForAntB;
+ u32 dBForAntO;
+} ANT_DETECTED_INFO, *PANT_DETECTED_INFO;
+
+/* */
+/* 2011/09/22 MH Copy from SD4 defined structure. We use to support PHY DM integration. */
+/* */
+typedef struct DM_Out_Source_Dynamic_Mechanism_Structure {
+ /* RT_TIMER FastAntTrainingTimer; */
+ /* */
+ /* Add for different team use temporarily */
+ /* */
+ struct adapter *Adapter; /* For CE/NIC team */
+ /* WHen you use Adapter or priv pointer, you must make sure the pointer is ready. */
+ bool odm_ready;
+
+ PHY_REG_PG_TYPE PhyRegPgValueType;
+ u8 PhyRegPgVersion;
+
+ u64 DebugComponents;
+ u32 DebugLevel;
+
+ u32 NumQryPhyStatusAll; /* CCK + OFDM */
+ u32 LastNumQryPhyStatusAll;
+ u32 RxPWDBAve;
+ bool MPDIG_2G; /* off MPDIG */
+ u8 Times_2G;
+
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+ bool bCckHighPower;
+ u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
+ u8 ControlChannel;
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+
+/* REMOVED COMMON INFO---------- */
+ /* u8 PseudoMacPhyMode; */
+ /* bool *BTCoexist; */
+ /* bool PseudoBtCoexist; */
+ /* u8 OPMode; */
+ /* bool bAPMode; */
+ /* bool bClientMode; */
+ /* bool bAdHocMode; */
+ /* bool bSlaveOfDMSP; */
+/* REMOVED COMMON INFO---------- */
+
+
+/* 1 COMMON INFORMATION */
+
+ /* */
+ /* Init Value */
+ /* */
+/* HOOK BEFORE REG INIT----------- */
+ /* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
+ u8 SupportPlatform;
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ?K?K = 1/2/3/?K */
+ u32 SupportAbility;
+ /* ODM PCIE/USB/SDIO = 1/2/3 */
+ u8 SupportInterface;
+ /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any other type = 1/2/3/... */
+ u32 SupportICType;
+ /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
+ u8 CutVersion;
+ /* Fab Version TSMC/UMC = 0/1 */
+ u8 FabVersion;
+ /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
+ u8 RFType;
+ u8 RFEType;
+ /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/... = 0/1/2/3/4/... */
+ u8 BoardType;
+ u8 PackageType;
+ u8 TypeGLNA;
+ u8 TypeGPA;
+ u8 TypeALNA;
+ u8 TypeAPA;
+ /* with external LNA NO/Yes = 0/1 */
+ u8 ExtLNA;
+ u8 ExtLNA5G;
+ /* with external PA NO/Yes = 0/1 */
+ u8 ExtPA;
+ u8 ExtPA5G;
+ /* with external TRSW NO/Yes = 0/1 */
+ u8 ExtTRSW;
+ u8 PatchID; /* Customer ID */
+ bool bInHctTest;
+ bool bWIFITest;
+
+ bool bDualMacSmartConcurrent;
+ u32 BK_SupportAbility;
+ u8 AntDivType;
+/* HOOK BEFORE REG INIT----------- */
+
+ /* */
+ /* Dynamic Value */
+ /* */
+/* POINTER REFERENCE----------- */
+
+ u8 u8_temp;
+ bool bool_temp;
+ struct adapter *adapter_temp;
+
+ /* MAC PHY Mode SMSP/DMSP/DMDP = 0/1/2 */
+ u8 *pMacPhyMode;
+ /* TX Unicast byte count */
+ u64 *pNumTxBytesUnicast;
+ /* RX Unicast byte count */
+ u64 *pNumRxBytesUnicast;
+ /* Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3 */
+ u8 *pwirelessmode; /* ODM_WIRELESS_MODE_E */
+ /* Frequence band 2.4G/5G = 0/1 */
+ u8 *pBandType;
+ /* Secondary channel offset don't_care/below/above = 0/1/2 */
+ u8 *pSecChOffset;
+ /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
+ u8 *pSecurity;
+ /* BW info 20M/40M/80M = 0/1/2 */
+ u8 *pBandWidth;
+ /* Central channel location Ch1/Ch2/.... */
+ u8 *pChannel; /* central channel number */
+ bool DPK_Done;
+ /* Common info for 92D DMSP */
+
+ bool *pbGetValueFromOtherMac;
+ struct adapter **pBuddyAdapter;
+ bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
+ /* Common info for Status */
+ bool *pbScanInProcess;
+ bool *pbPowerSaving;
+ /* CCA Path 2-path/path-A/path-B = 0/1/2; using ODM_CCA_PATH_E. */
+ u8 *pOnePathCCA;
+ /* pMgntInfo->AntennaTest */
+ u8 *pAntennaTest;
+ bool *pbNet_closed;
+ u8 *mp_mode;
+ /* u8 *pAidMap; */
+ u8 *pu1ForcedIgiLb;
+/* For 8723B IQK----------- */
+ bool *pIs1Antenna;
+ u8 *pRFDefaultPath;
+ /* 0:S1, 1:S0 */
+
+/* POINTER REFERENCE----------- */
+ u16 *pForcedDataRate;
+/* CALL BY VALUE------------- */
+ bool bLinkInProcess;
+ bool bWIFI_Direct;
+ bool bWIFI_Display;
+ bool bLinked;
+
+ bool bsta_state;
+ u8 RSSI_Min;
+ u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
+ bool bIsMPChip;
+ bool bOneEntryOnly;
+ /* Common info for BTDM */
+ bool bBtEnabled; /* BT is disabled */
+ bool bBtConnectProcess; /* BT HS is under connection progress. */
+ u8 btHsRssi; /* BT HS mode wifi rssi value. */
+ bool bBtHsOperation; /* BT HS mode is under progress */
+ bool bBtDisableEdcaTurbo; /* Under some condition, don't enable the EDCA Turbo */
+ bool bBtLimitedDig; /* BT is busy. */
+/* CALL BY VALUE------------- */
+ u8 RSSI_A;
+ u8 RSSI_B;
+ u64 RSSI_TRSW;
+ u64 RSSI_TRSW_H;
+ u64 RSSI_TRSW_L;
+ u64 RSSI_TRSW_iso;
+
+ u8 RxRate;
+ bool bNoisyState;
+ u8 TxRate;
+ u8 LinkedInterval;
+ u8 preChannel;
+ u32 TxagcOffsetValueA;
+ bool IsTxagcOffsetPositiveA;
+ u32 TxagcOffsetValueB;
+ bool IsTxagcOffsetPositiveB;
+ u64 lastTxOkCnt;
+ u64 lastRxOkCnt;
+ u32 BbSwingOffsetA;
+ bool IsBbSwingOffsetPositiveA;
+ u32 BbSwingOffsetB;
+ bool IsBbSwingOffsetPositiveB;
+ s8 TH_L2H_ini;
+ s8 TH_EDCCA_HL_diff;
+ s8 IGI_Base;
+ u8 IGI_target;
+ bool ForceEDCCA;
+ u8 AdapEn_RSSI;
+ s8 Force_TH_H;
+ s8 Force_TH_L;
+ u8 IGI_LowerBound;
+ u8 antdiv_rssi;
+ u8 AntType;
+ u8 pre_AntType;
+ u8 antdiv_period;
+ u8 antdiv_select;
+ u8 NdpaPeriod;
+ bool H2C_RARpt_connect;
+
+ /* add by Yu Cehn for adaptivtiy */
+ bool adaptivity_flag;
+ bool NHM_disable;
+ bool TxHangFlg;
+ bool Carrier_Sense_enable;
+ u8 tolerance_cnt;
+ u64 NHMCurTxOkcnt;
+ u64 NHMCurRxOkcnt;
+ u64 NHMLastTxOkcnt;
+ u64 NHMLastRxOkcnt;
+ u8 txEdcca1;
+ u8 txEdcca0;
+ s8 H2L_lb;
+ s8 L2H_lb;
+ u8 Adaptivity_IGI_upper;
+ u8 NHM_cnt_0;
+
+
+ ODM_NOISE_MONITOR noise_level;/* ODM_MAX_CHANNEL_NUM]; */
+ /* */
+ /* 2 Define STA info. */
+ /* _ODM_STA_INFO */
+ /* 2012/01/12 MH For MP, we need to reduce one array pointer for default port.?? */
+ PSTA_INFO_T pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
+
+ /* */
+ /* 2012/02/14 MH Add to share 88E ra with other SW team. */
+ /* We need to colelct all support abilit to a proper area. */
+ /* */
+ bool RaSupport88E;
+
+ /* Define ........... */
+
+ /* Latest packet phy info (ODM write) */
+ ODM_PHY_DBG_INFO_T PhyDbgInfo;
+ /* PHY_INFO_88E PhyInfo; */
+
+ /* Latest packet phy info (ODM write) */
+ ODM_MAC_INFO *pMacInfo;
+ /* MAC_INFO_88E MacInfo; */
+
+ /* Different Team independt structure?? */
+
+ /* */
+ /* TX_RTP_CMN TX_retrpo; */
+ /* TX_RTP_88E TX_retrpo; */
+ /* TX_RTP_8195 TX_retrpo; */
+
+ /* */
+ /* ODM Structure */
+ /* */
+ FAT_T DM_FatTable;
+ DIG_T DM_DigTable;
+ PS_T DM_PSTable;
+ Pri_CCA_T DM_PriCCA;
+ RXHP_T DM_RXHP_Table;
+ RA_T DM_RA_Table;
+ false_ALARM_STATISTICS FalseAlmCnt;
+ false_ALARM_STATISTICS FlaseAlmCntBuddyAdapter;
+ SWAT_T DM_SWAT_Table;
+ bool RSSI_test;
+ CFO_TRACKING DM_CfoTrack;
+
+ EDCA_T DM_EDCA_Table;
+ u32 WMMEDCA_BE;
+ PATHDIV_T DM_PathDiv;
+ /* Copy from SD4 structure */
+ /* */
+ /* ================================================== */
+ /* */
+
+ /* common */
+ /* u8 DM_Type; */
+ /* u8 PSD_Report_RXHP[80]; Add By Gary */
+ /* u8 PSD_func_flag; Add By Gary */
+ /* for DIG */
+ /* u8 bDMInitialGainEnable; */
+ /* u8 binitialized; for dm_initial_gain_Multi_STA use. */
+ /* for Antenna diversity */
+ /* u8 AntDivCfg; 0:OFF , 1:ON, 2:by efuse */
+ /* PSTA_INFO_T RSSI_target; */
+
+ bool *pbDriverStopped;
+ bool *pbDriverIsGoingToPnpSetPowerSleep;
+ bool *pinit_adpt_in_progress;
+
+ /* PSD */
+ bool bUserAssignLevel;
+ RT_TIMER PSDTimer;
+ u8 RSSI_BT; /* come from BT */
+ bool bPSDinProcess;
+ bool bPSDactive;
+ bool bDMInitialGainEnable;
+
+ /* MPT DIG */
+ RT_TIMER MPT_DIGTimer;
+
+ /* for rate adaptive, in fact, 88c/92c fw will handle this */
+ u8 bUseRAMask;
+
+ ODM_RATE_ADAPTIVE RateAdaptive;
+
+ ANT_DETECTED_INFO AntDetectedInfo; /* Antenna detected information for RSSI tool */
+
+ ODM_RF_CAL_T RFCalibrateInfo;
+
+ /* */
+ /* TX power tracking */
+ /* */
+ u8 BbSwingIdxOfdm[MAX_RF_PATH];
+ u8 BbSwingIdxOfdmCurrent;
+ u8 BbSwingIdxOfdmBase[MAX_RF_PATH];
+ bool BbSwingFlagOfdm;
+ u8 BbSwingIdxCck;
+ u8 BbSwingIdxCckCurrent;
+ u8 BbSwingIdxCckBase;
+ u8 DefaultOfdmIndex;
+ u8 DefaultCckIndex;
+ bool BbSwingFlagCck;
+
+ s8 Absolute_OFDMSwingIdx[MAX_RF_PATH];
+ s8 Remnant_OFDMSwingIdx[MAX_RF_PATH];
+ s8 Remnant_CCKSwingIdx;
+ s8 Modify_TxAGC_Value; /* Remnat compensate value at TxAGC */
+ bool Modify_TxAGC_Flag_PathA;
+ bool Modify_TxAGC_Flag_PathB;
+ bool Modify_TxAGC_Flag_PathC;
+ bool Modify_TxAGC_Flag_PathD;
+ bool Modify_TxAGC_Flag_PathA_CCK;
+
+ s8 KfreeOffset[MAX_RF_PATH];
+ /* */
+ /* ODM system resource. */
+ /* */
+
+ /* ODM relative time. */
+ RT_TIMER PathDivSwitchTimer;
+ /* 2011.09.27 add for Path Diversity */
+ RT_TIMER CCKPathDiversityTimer;
+ RT_TIMER FastAntTrainingTimer;
+
+ /* ODM relative workitem. */
+
+ #if (BEAMFORMING_SUPPORT == 1)
+ RT_BEAMFORMING_INFO BeamformingInfo;
+ #endif
+} DM_ODM_T, *PDM_ODM_T; /* DM_Dynamic_Mechanism_Structure */
+
+#define ODM_RF_PATH_MAX 2
+
+typedef enum _ODM_RF_RADIO_PATH {
+ ODM_RF_PATH_A = 0, /* Radio Path A */
+ ODM_RF_PATH_B = 1, /* Radio Path B */
+ ODM_RF_PATH_C = 2, /* Radio Path C */
+ ODM_RF_PATH_D = 3, /* Radio Path D */
+ ODM_RF_PATH_AB,
+ ODM_RF_PATH_AC,
+ ODM_RF_PATH_AD,
+ ODM_RF_PATH_BC,
+ ODM_RF_PATH_BD,
+ ODM_RF_PATH_CD,
+ ODM_RF_PATH_ABC,
+ ODM_RF_PATH_ACD,
+ ODM_RF_PATH_BCD,
+ ODM_RF_PATH_ABCD,
+ /* ODM_RF_PATH_MAX, Max RF number 90 support */
+} ODM_RF_RADIO_PATH_E, *PODM_RF_RADIO_PATH_E;
+
+ typedef enum _ODM_RF_CONTENT {
+ odm_radioa_txt = 0x1000,
+ odm_radiob_txt = 0x1001,
+ odm_radioc_txt = 0x1002,
+ odm_radiod_txt = 0x1003
+} ODM_RF_CONTENT;
+
+typedef enum _ODM_BB_Config_Type {
+ CONFIG_BB_PHY_REG,
+ CONFIG_BB_AGC_TAB,
+ CONFIG_BB_AGC_TAB_2G,
+ CONFIG_BB_AGC_TAB_5G,
+ CONFIG_BB_PHY_REG_PG,
+ CONFIG_BB_PHY_REG_MP,
+ CONFIG_BB_AGC_TAB_DIFF,
+} ODM_BB_Config_Type, *PODM_BB_Config_Type;
+
+typedef enum _ODM_RF_Config_Type {
+ CONFIG_RF_RADIO,
+ CONFIG_RF_TXPWR_LMT,
+} ODM_RF_Config_Type, *PODM_RF_Config_Type;
+
+typedef enum _ODM_FW_Config_Type {
+ CONFIG_FW_NIC,
+ CONFIG_FW_NIC_2,
+ CONFIG_FW_AP,
+ CONFIG_FW_WoWLAN,
+ CONFIG_FW_WoWLAN_2,
+ CONFIG_FW_AP_WoWLAN,
+ CONFIG_FW_BT,
+} ODM_FW_Config_Type;
+
+/* Status code */
+typedef enum _RT_STATUS {
+ RT_STATUS_SUCCESS,
+ RT_STATUS_FAILURE,
+ RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE,
+ RT_STATUS_INVALID_CONTEXT,
+ RT_STATUS_INVALID_PARAMETER,
+ RT_STATUS_NOT_SUPPORT,
+ RT_STATUS_OS_API_FAILED,
+} RT_STATUS, *PRT_STATUS;
+
+#ifdef REMOVE_PACK
+#pragma pack()
+#endif
+
+/* include "odm_function.h" */
+
+/* 3 =========================================================== */
+/* 3 DIG */
+/* 3 =========================================================== */
+
+/* Remove DIG by Yuchen */
+
+/* 3 =========================================================== */
+/* 3 AGC RX High Power Mode */
+/* 3 =========================================================== */
+#define LNA_Low_Gain_1 0x64
+#define LNA_Low_Gain_2 0x5A
+#define LNA_Low_Gain_3 0x58
+
+#define FA_RXHP_TH1 5000
+#define FA_RXHP_TH2 1500
+#define FA_RXHP_TH3 800
+#define FA_RXHP_TH4 600
+#define FA_RXHP_TH5 500
+
+/* 3 =========================================================== */
+/* 3 EDCA */
+/* 3 =========================================================== */
+
+/* 3 =========================================================== */
+/* 3 Dynamic Tx Power */
+/* 3 =========================================================== */
+/* Dynamic Tx Power Control Threshold */
+
+/* 3 =========================================================== */
+/* 3 Rate Adaptive */
+/* 3 =========================================================== */
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+/* 3 =========================================================== */
+/* 3 BB Power Save */
+/* 3 =========================================================== */
+
+typedef enum tag_1R_CCA_Type_Definition {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+} DM_1R_CCA_E;
+
+typedef enum tag_RF_Type_Definition {
+ RF_Save = 0,
+ RF_Normal = 1,
+ RF_MAX = 2,
+} DM_RF_E;
+
+/* 3 =========================================================== */
+/* 3 Antenna Diversity */
+/* 3 =========================================================== */
+typedef enum tag_SW_Antenna_Switch_Definition {
+ Antenna_A = 1,
+ Antenna_B = 2,
+ Antenna_MAX = 3,
+} DM_SWAS_E;
+
+
+/* Maximal number of antenna detection mechanism needs to perform, added by Roger, 2011.12.28. */
+#define MAX_ANTENNA_DETECTION_CNT 10
+
+/* */
+/* Extern Global Variables. */
+/* */
+extern u32 OFDMSwingTable[OFDM_TABLE_SIZE];
+extern u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8];
+extern u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8];
+
+extern u32 OFDMSwingTable_New[OFDM_TABLE_SIZE];
+extern u8 CCKSwingTable_Ch1_Ch13_New[CCK_TABLE_SIZE][8];
+extern u8 CCKSwingTable_Ch14_New[CCK_TABLE_SIZE][8];
+
+extern u32 TxScalingTable_Jaguar[TXSCALE_TABLE_SIZE];
+
+/* */
+/* check Sta pointer valid or not */
+/* */
+#define IS_STA_VALID(pSta) (pSta)
+/* 20100514 Joseph: Add definition for antenna switching test after link. */
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+/* Remove DIG by yuchen */
+
+void ODM_SetAntenna(PDM_ODM_T pDM_Odm, u8 Antenna);
+
+
+/* Remove BB power saving by Yuchen */
+
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
+void ODM_TXPowerTrackingCheck(PDM_ODM_T pDM_Odm);
+
+bool ODM_RAStateCheck(
+ PDM_ODM_T pDM_Odm,
+ s32 RSSI,
+ bool bForceUpdate,
+ u8 *pRATRState
+);
+
+#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
+void ODM_SwAntDivChkPerPktRssi(
+ PDM_ODM_T pDM_Odm,
+ u8 StationID,
+ PODM_PHY_INFO_T pPhyInfo
+);
+
+u32 ODM_Get_Rate_Bitmap(
+ PDM_ODM_T pDM_Odm,
+ u32 macid,
+ u32 ra_mask,
+ u8 rssi_level
+);
+
+#if (BEAMFORMING_SUPPORT == 1)
+BEAMFORMING_CAP Beamforming_GetEntryBeamCapByMacId(PMGNT_INFO pMgntInfo, u8 MacId);
+#endif
+
+void odm_TXPowerTrackingInit(PDM_ODM_T pDM_Odm);
+
+void ODM_DMInit(PDM_ODM_T pDM_Odm);
+
+void ODM_DMWatchdog(PDM_ODM_T pDM_Odm); /* For common use in the future */
+
+void ODM_CmnInfoInit(PDM_ODM_T pDM_Odm, ODM_CMNINFO_E CmnInfo, u32 Value);
+
+void ODM_CmnInfoHook(PDM_ODM_T pDM_Odm, ODM_CMNINFO_E CmnInfo, void *pValue);
+
+void ODM_CmnInfoPtrArrayHook(
+ PDM_ODM_T pDM_Odm,
+ ODM_CMNINFO_E CmnInfo,
+ u16 Index,
+ void *pValue
+);
+
+void ODM_CmnInfoUpdate(PDM_ODM_T pDM_Odm, u32 CmnInfo, u64 Value);
+
+void ODM_InitAllTimers(PDM_ODM_T pDM_Odm);
+
+void ODM_CancelAllTimers(PDM_ODM_T pDM_Odm);
+
+void ODM_ReleaseAllTimers(PDM_ODM_T pDM_Odm);
+
+void ODM_AntselStatistics_88C(
+ PDM_ODM_T pDM_Odm,
+ u8 MacId,
+ u32 PWDBAll,
+ bool isCCKrate
+);
+
+void ODM_DynamicARFBSelect(PDM_ODM_T pDM_Odm, u8 rate, bool Collision_State);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_AntDiv.c b/drivers/staging/rtl8723bs/hal/odm_AntDiv.c
new file mode 100644
index 000000000000..e0b20562ccd6
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_AntDiv.c
@@ -0,0 +1,70 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+//============================================================
+// include files
+//============================================================
+
+#include "odm_precomp.h"
+
+//======================================================
+// when antenna test utility is on or some testing
+// need to disable antenna diversity
+// call this function to disable all ODM related mechanisms
+// which will switch antenna.
+//======================================================
+void ODM_StopAntennaSwitchDm(PDM_ODM_T pDM_Odm)
+{
+ // disable ODM antenna diversity
+ pDM_Odm->SupportAbility &= ~ODM_BB_ANT_DIV;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_ANT_DIV,
+ ODM_DBG_LOUD,
+ ("STOP Antenna Diversity\n")
+ );
+}
+
+void ODM_SetAntConfig(PDM_ODM_T pDM_Odm, u8 antSetting)// 0=A, 1=B, 2=C, ....
+{
+ if (antSetting == 0) // ant A
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000000);
+ else if (antSetting == 1)
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x948, bMaskDWord, 0x00000280);
+}
+
+//======================================================
+
+
+void ODM_SwAntDivRestAfterLink(PDM_ODM_T pDM_Odm)
+{
+ pSWAT_T pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+ pFAT_T pDM_FatTable = &pDM_Odm->DM_FatTable;
+ u32 i;
+
+ pDM_Odm->RSSI_test = false;
+ pDM_SWAT_Table->try_flag = 0xff;
+ pDM_SWAT_Table->RSSI_Trying = 0;
+ pDM_SWAT_Table->Double_chk_flag = 0;
+
+ pDM_FatTable->RxIdleAnt = MAIN_ANT;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pDM_FatTable->MainAnt_Sum[i] = 0;
+ pDM_FatTable->AuxAnt_Sum[i] = 0;
+ pDM_FatTable->MainAnt_Cnt[i] = 0;
+ pDM_FatTable->AuxAnt_Cnt[i] = 0;
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_AntDiv.h b/drivers/staging/rtl8723bs/hal/odm_AntDiv.h
new file mode 100644
index 000000000000..92cdad55b7a4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_AntDiv.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMANTDIV_H__
+#define __ODMANTDIV_H__
+
+
+
+#define ANT1_2G 0 /* = ANT2_5G */
+#define ANT2_2G 1 /* = ANT1_5G */
+
+/* Antenna Diversty Control Type */
+#define ODM_AUTO_ANT 0
+#define ODM_FIX_MAIN_ANT 1
+#define ODM_FIX_AUX_ANT 2
+
+#define TX_BY_REG 0
+
+#define ANTDIV_ON 1
+#define ANTDIV_OFF 0
+
+#define INIT_ANTDIV_TIMMER 0
+#define CANCEL_ANTDIV_TIMMER 1
+#define RELEASE_ANTDIV_TIMMER 2
+
+#endif /* ifndef __ODMANTDIV_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
new file mode 100644
index 000000000000..9cde6c66235b
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -0,0 +1,338 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+ bool bEEPROMCheck;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ bEEPROMCheck = (pHalData->EEPROMVersion >= 0x01) ? true : false;
+
+ if (pCfoTrack->CrystalCap == CrystalCap)
+ return;
+
+ pCfoTrack->CrystalCap = CrystalCap;
+
+ /* 0x2C[23:18] = 0x2C[17:12] = CrystalCap */
+ CrystalCap = CrystalCap & 0x3F;
+ PHY_SetBBReg(
+ pDM_Odm->Adapter,
+ REG_MAC_PHY_CTRL,
+ 0x00FFF000,
+ (CrystalCap | (CrystalCap << 6))
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "odm_SetCrystalCap(): CrystalCap = 0x%x\n",
+ CrystalCap
+ )
+ );
+}
+
+static u8 odm_GetDefaultCrytaltalCap(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ u8 CrystalCap = 0x20;
+
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ CrystalCap = pHalData->CrystalCap;
+
+ CrystalCap = CrystalCap & 0x3f;
+
+ return CrystalCap;
+}
+
+static void odm_SetATCStatus(void *pDM_VOID, bool ATCStatus)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+
+ if (pCfoTrack->bATCStatus == ATCStatus)
+ return;
+
+ PHY_SetBBReg(
+ pDM_Odm->Adapter,
+ ODM_REG(BB_ATC, pDM_Odm),
+ ODM_BIT(BB_ATC, pDM_Odm),
+ ATCStatus
+ );
+ pCfoTrack->bATCStatus = ATCStatus;
+}
+
+static bool odm_GetATCStatus(void *pDM_VOID)
+{
+ bool ATCStatus;
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ ATCStatus = (bool)PHY_QueryBBReg(
+ pDM_Odm->Adapter,
+ ODM_REG(BB_ATC, pDM_Odm),
+ ODM_BIT(BB_ATC, pDM_Odm)
+ );
+ return ATCStatus;
+}
+
+void ODM_CfoTrackingReset(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+
+ pCfoTrack->DefXCap = odm_GetDefaultCrytaltalCap(pDM_Odm);
+ pCfoTrack->bAdjust = true;
+
+ odm_SetCrystalCap(pDM_Odm, pCfoTrack->DefXCap);
+ odm_SetATCStatus(pDM_Odm, true);
+}
+
+void ODM_CfoTrackingInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+
+ pCfoTrack->DefXCap =
+ pCfoTrack->CrystalCap = odm_GetDefaultCrytaltalCap(pDM_Odm);
+ pCfoTrack->bATCStatus = odm_GetATCStatus(pDM_Odm);
+ pCfoTrack->bAdjust = true;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking_init() =========>\n")
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking_init(): bATCStatus = %d, CrystalCap = 0x%x\n",
+ pCfoTrack->bATCStatus,
+ pCfoTrack->DefXCap
+ )
+ );
+}
+
+void ODM_CfoTracking(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+ int CFO_kHz_A, CFO_kHz_B, CFO_ave = 0;
+ int CFO_ave_diff;
+ int CrystalCap = (int)pCfoTrack->CrystalCap;
+ u8 Adjust_Xtal = 1;
+
+ /* 4 Support ability */
+ if (!(pDM_Odm->SupportAbility & ODM_BB_CFO_TRACKING)) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking(): Return: SupportAbility ODM_BB_CFO_TRACKING is disabled\n")
+ );
+ return;
+ }
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking() =========>\n")
+ );
+
+ if (!pDM_Odm->bLinked || !pDM_Odm->bOneEntryOnly) {
+ /* 4 No link or more than one entry */
+ ODM_CfoTrackingReset(pDM_Odm);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking(): Reset: bLinked = %d, bOneEntryOnly = %d\n",
+ pDM_Odm->bLinked,
+ pDM_Odm->bOneEntryOnly
+ )
+ );
+ } else {
+ /* 3 1. CFO Tracking */
+ /* 4 1.1 No new packet */
+ if (pCfoTrack->packetCount == pCfoTrack->packetCount_pre) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking(): packet counter doesn't change\n"
+ )
+ );
+ return;
+ }
+ pCfoTrack->packetCount_pre = pCfoTrack->packetCount;
+
+ /* 4 1.2 Calculate CFO */
+ CFO_kHz_A = (int)(pCfoTrack->CFO_tail[0] * 3125) / 1280;
+ CFO_kHz_B = (int)(pCfoTrack->CFO_tail[1] * 3125) / 1280;
+
+ if (pDM_Odm->RFType < ODM_2T2R)
+ CFO_ave = CFO_kHz_A;
+ else
+ CFO_ave = (int)(CFO_kHz_A + CFO_kHz_B) >> 1;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking(): CFO_kHz_A = %dkHz, CFO_kHz_B = %dkHz, CFO_ave = %dkHz\n",
+ CFO_kHz_A,
+ CFO_kHz_B,
+ CFO_ave
+ )
+ );
+
+ /* 4 1.3 Avoid abnormal large CFO */
+ CFO_ave_diff =
+ (pCfoTrack->CFO_ave_pre >= CFO_ave) ?
+ (pCfoTrack->CFO_ave_pre-CFO_ave) :
+ (CFO_ave-pCfoTrack->CFO_ave_pre);
+
+ if (
+ CFO_ave_diff > 20 &&
+ pCfoTrack->largeCFOHit == 0 &&
+ !pCfoTrack->bAdjust
+ ) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_CFO_TRACKING, ODM_DBG_LOUD, ("ODM_CfoTracking(): first large CFO hit\n"));
+ pCfoTrack->largeCFOHit = 1;
+ return;
+ } else
+ pCfoTrack->largeCFOHit = 0;
+ pCfoTrack->CFO_ave_pre = CFO_ave;
+
+ /* 4 1.4 Dynamic Xtal threshold */
+ if (pCfoTrack->bAdjust == false) {
+ if (CFO_ave > CFO_TH_XTAL_HIGH || CFO_ave < (-CFO_TH_XTAL_HIGH))
+ pCfoTrack->bAdjust = true;
+ } else {
+ if (CFO_ave < CFO_TH_XTAL_LOW && CFO_ave > (-CFO_TH_XTAL_LOW))
+ pCfoTrack->bAdjust = false;
+ }
+
+ /* 4 1.5 BT case: Disable CFO tracking */
+ if (pDM_Odm->bBtEnabled) {
+ pCfoTrack->bAdjust = false;
+ odm_SetCrystalCap(pDM_Odm, pCfoTrack->DefXCap);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking(): Disable CFO tracking for BT!!\n")
+ );
+ }
+
+ /* 4 1.6 Big jump */
+ if (pCfoTrack->bAdjust) {
+ if (CFO_ave > CFO_TH_XTAL_LOW)
+ Adjust_Xtal = Adjust_Xtal+((CFO_ave-CFO_TH_XTAL_LOW)>>2);
+ else if (CFO_ave < (-CFO_TH_XTAL_LOW))
+ Adjust_Xtal = Adjust_Xtal+((CFO_TH_XTAL_LOW-CFO_ave)>>2);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking(): Crystal cap offset = %d\n",
+ Adjust_Xtal
+ )
+ );
+ }
+
+ /* 4 1.7 Adjust Crystal Cap. */
+ if (pCfoTrack->bAdjust) {
+ if (CFO_ave > CFO_TH_XTAL_LOW)
+ CrystalCap = CrystalCap + Adjust_Xtal;
+ else if (CFO_ave < (-CFO_TH_XTAL_LOW))
+ CrystalCap = CrystalCap - Adjust_Xtal;
+
+ if (CrystalCap > 0x3f)
+ CrystalCap = 0x3f;
+ else if (CrystalCap < 0)
+ CrystalCap = 0;
+
+ odm_SetCrystalCap(pDM_Odm, (u8)CrystalCap);
+ }
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ (
+ "ODM_CfoTracking(): Crystal cap = 0x%x, Default Crystal cap = 0x%x\n",
+ pCfoTrack->CrystalCap,
+ pCfoTrack->DefXCap
+ )
+ );
+
+ /* 3 2. Dynamic ATC switch */
+ if (CFO_ave < CFO_TH_ATC && CFO_ave > -CFO_TH_ATC) {
+ odm_SetATCStatus(pDM_Odm, false);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking(): Disable ATC!!\n")
+ );
+ } else {
+ odm_SetATCStatus(pDM_Odm, true);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CFO_TRACKING,
+ ODM_DBG_LOUD,
+ ("ODM_CfoTracking(): Enable ATC!!\n")
+ );
+ }
+ }
+}
+
+void ODM_ParsingCFO(void *pDM_VOID, void *pPktinfo_VOID, s8 *pcfotail)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ PODM_PACKET_INFO_T pPktinfo = (PODM_PACKET_INFO_T)pPktinfo_VOID;
+ PCFO_TRACKING pCfoTrack = &pDM_Odm->DM_CfoTrack;
+ u8 i;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_CFO_TRACKING))
+ return;
+
+ if (pPktinfo->StationID != 0) {
+ /* 3 Update CFO report for path-A & path-B */
+ /* Only paht-A and path-B have CFO tail and short CFO */
+ for (i = ODM_RF_PATH_A; i <= ODM_RF_PATH_B; i++)
+ pCfoTrack->CFO_tail[i] = (int)pcfotail[i];
+
+ /* 3 Update packet counter */
+ if (pCfoTrack->packetCount == 0xffffffff)
+ pCfoTrack->packetCount = 0;
+ else
+ pCfoTrack->packetCount++;
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h
new file mode 100644
index 000000000000..0c92899d9967
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMCFOTRACK_H__
+#define __ODMCFOTRACK_H__
+
+#define CFO_TH_XTAL_HIGH 20 /* kHz */
+#define CFO_TH_XTAL_LOW 10 /* kHz */
+#define CFO_TH_ATC 80 /* kHz */
+
+typedef struct _CFO_TRACKING_ {
+ bool bATCStatus;
+ bool largeCFOHit;
+ bool bAdjust;
+ u8 CrystalCap;
+ u8 DefXCap;
+ int CFO_tail[2];
+ int CFO_ave_pre;
+ u32 packetCount;
+ u32 packetCount_pre;
+
+ bool bForceXtalCap;
+ bool bReset;
+} CFO_TRACKING, *PCFO_TRACKING;
+
+void ODM_CfoTrackingReset(void *pDM_VOID
+);
+
+void ODM_CfoTrackingInit(void *pDM_VOID);
+
+void ODM_CfoTracking(void *pDM_VOID);
+
+void ODM_ParsingCFO(void *pDM_VOID, void *pPktinfo_VOID, s8 *pcfotail);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
new file mode 100644
index 000000000000..ba8e8eb534ef
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -0,0 +1,1221 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+#define ADAPTIVITY_VERSION "5.0"
+
+void odm_NHMCounterStatisticsInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ /* PHY parameters initialize for n series */
+ rtw_write16(pDM_Odm->Adapter, ODM_REG_NHM_TIMER_11N+2, 0x2710); /* 0x894[31:16]= 0x2710 Time duration for NHM unit: 4us, 0x2710 =40ms */
+ /* rtw_write16(pDM_Odm->Adapter, ODM_REG_NHM_TIMER_11N+2, 0x4e20); 0x894[31:16]= 0x4e20 Time duration for NHM unit: 4us, 0x4e20 =80ms */
+ rtw_write16(pDM_Odm->Adapter, ODM_REG_NHM_TH9_TH10_11N+2, 0xffff); /* 0x890[31:16]= 0xffff th_9, th_10 */
+ /* rtw_write32(pDM_Odm->Adapter, ODM_REG_NHM_TH3_TO_TH0_11N, 0xffffff5c); 0x898 = 0xffffff5c th_3, th_2, th_1, th_0 */
+ rtw_write32(pDM_Odm->Adapter, ODM_REG_NHM_TH3_TO_TH0_11N, 0xffffff52); /* 0x898 = 0xffffff52 th_3, th_2, th_1, th_0 */
+ rtw_write32(pDM_Odm->Adapter, ODM_REG_NHM_TH7_TO_TH4_11N, 0xffffffff); /* 0x89c = 0xffffffff th_7, th_6, th_5, th_4 */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_FPGA0_IQK_11N, bMaskByte0, 0xff); /* 0xe28[7:0]= 0xff th_8 */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_NHM_TH9_TH10_11N, BIT10|BIT9|BIT8, 0x7); /* 0x890[9:8]=3 enable CCX */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_OFDM_FA_RSTC_11N, BIT7, 0x1); /* 0xc0c[7]= 1 max power among all RX ants */
+}
+
+void odm_NHMCounterStatistics(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ /* Get NHM report */
+ odm_GetNHMCounterStatistics(pDM_Odm);
+
+ /* Reset NHM counter */
+ odm_NHMCounterStatisticsReset(pDM_Odm);
+}
+
+void odm_GetNHMCounterStatistics(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ u32 value32 = 0;
+
+ value32 = PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG_NHM_CNT_11N, bMaskDWord);
+
+ pDM_Odm->NHM_cnt_0 = (u8)(value32 & bMaskByte0);
+}
+
+void odm_NHMCounterStatisticsReset(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_NHM_TH9_TH10_11N, BIT1, 0);
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_NHM_TH9_TH10_11N, BIT1, 1);
+}
+
+void odm_NHMBBInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ pDM_Odm->adaptivity_flag = 0;
+ pDM_Odm->tolerance_cnt = 3;
+ pDM_Odm->NHMLastTxOkcnt = 0;
+ pDM_Odm->NHMLastRxOkcnt = 0;
+ pDM_Odm->NHMCurTxOkcnt = 0;
+ pDM_Odm->NHMCurRxOkcnt = 0;
+}
+
+/* */
+void odm_NHMBB(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ /* u8 test_status; */
+ /* Pfalse_ALARM_STATISTICS pFalseAlmCnt = &(pDM_Odm->FalseAlmCnt); */
+
+ pDM_Odm->NHMCurTxOkcnt =
+ *(pDM_Odm->pNumTxBytesUnicast)-pDM_Odm->NHMLastTxOkcnt;
+ pDM_Odm->NHMCurRxOkcnt =
+ *(pDM_Odm->pNumRxBytesUnicast)-pDM_Odm->NHMLastRxOkcnt;
+ pDM_Odm->NHMLastTxOkcnt =
+ *(pDM_Odm->pNumTxBytesUnicast);
+ pDM_Odm->NHMLastRxOkcnt =
+ *(pDM_Odm->pNumRxBytesUnicast);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "NHM_cnt_0 =%d, NHMCurTxOkcnt = %llu, NHMCurRxOkcnt = %llu\n",
+ pDM_Odm->NHM_cnt_0,
+ pDM_Odm->NHMCurTxOkcnt,
+ pDM_Odm->NHMCurRxOkcnt
+ )
+ );
+
+
+ if ((pDM_Odm->NHMCurTxOkcnt) + 1 > (u64)(pDM_Odm->NHMCurRxOkcnt<<2) + 1) { /* Tx > 4*Rx possible for adaptivity test */
+ if (pDM_Odm->NHM_cnt_0 >= 190 || pDM_Odm->adaptivity_flag == true) {
+ /* Enable EDCCA since it is possible running Adaptivity testing */
+ /* test_status = 1; */
+ pDM_Odm->adaptivity_flag = true;
+ pDM_Odm->tolerance_cnt = 0;
+ } else {
+ if (pDM_Odm->tolerance_cnt < 3)
+ pDM_Odm->tolerance_cnt = pDM_Odm->tolerance_cnt + 1;
+ else
+ pDM_Odm->tolerance_cnt = 4;
+ /* test_status = 5; */
+ if (pDM_Odm->tolerance_cnt > 3) {
+ /* test_status = 3; */
+ pDM_Odm->adaptivity_flag = false;
+ }
+ }
+ } else { /* TX<RX */
+ if (pDM_Odm->adaptivity_flag == true && pDM_Odm->NHM_cnt_0 <= 200) {
+ /* test_status = 2; */
+ pDM_Odm->tolerance_cnt = 0;
+ } else {
+ if (pDM_Odm->tolerance_cnt < 3)
+ pDM_Odm->tolerance_cnt = pDM_Odm->tolerance_cnt + 1;
+ else
+ pDM_Odm->tolerance_cnt = 4;
+ /* test_status = 5; */
+ if (pDM_Odm->tolerance_cnt > 3) {
+ /* test_status = 4; */
+ pDM_Odm->adaptivity_flag = false;
+ }
+ }
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("adaptivity_flag = %d\n ", pDM_Odm->adaptivity_flag));
+}
+
+void odm_SearchPwdBLowerBound(void *pDM_VOID, u8 IGI_target)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ u32 value32 = 0;
+ u8 cnt, IGI;
+ bool bAdjust = true;
+ s8 TH_L2H_dmc, TH_H2L_dmc;
+ s8 Diff;
+
+ IGI = 0x50; /* find H2L, L2H lower bound */
+ ODM_Write_DIG(pDM_Odm, IGI);
+
+
+ Diff = IGI_target-(s8)IGI;
+ TH_L2H_dmc = pDM_Odm->TH_L2H_ini + Diff;
+ if (TH_L2H_dmc > 10)
+ TH_L2H_dmc = 10;
+ TH_H2L_dmc = TH_L2H_dmc - pDM_Odm->TH_EDCCA_HL_diff;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte0, (u8)TH_L2H_dmc);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte2, (u8)TH_H2L_dmc);
+
+ mdelay(5);
+
+ while (bAdjust) {
+ for (cnt = 0; cnt < 20; cnt++) {
+ value32 = PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG_RPT_11N, bMaskDWord);
+
+ if (value32 & BIT30)
+ pDM_Odm->txEdcca1 = pDM_Odm->txEdcca1 + 1;
+ else if (value32 & BIT29)
+ pDM_Odm->txEdcca1 = pDM_Odm->txEdcca1 + 1;
+ else
+ pDM_Odm->txEdcca0 = pDM_Odm->txEdcca0 + 1;
+ }
+ /* DbgPrint("txEdcca1 = %d, txEdcca0 = %d\n", pDM_Odm->txEdcca1, pDM_Odm->txEdcca0); */
+
+ if (pDM_Odm->txEdcca1 > 5) {
+ IGI = IGI-1;
+ TH_L2H_dmc = TH_L2H_dmc + 1;
+ if (TH_L2H_dmc > 10)
+ TH_L2H_dmc = 10;
+ TH_H2L_dmc = TH_L2H_dmc - pDM_Odm->TH_EDCCA_HL_diff;
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte0, (u8)TH_L2H_dmc);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte2, (u8)TH_H2L_dmc);
+
+ pDM_Odm->TxHangFlg = true;
+ pDM_Odm->txEdcca1 = 0;
+ pDM_Odm->txEdcca0 = 0;
+
+ if (TH_L2H_dmc == 10) {
+ bAdjust = false;
+ pDM_Odm->TxHangFlg = false;
+ pDM_Odm->txEdcca1 = 0;
+ pDM_Odm->txEdcca0 = 0;
+ pDM_Odm->H2L_lb = TH_H2L_dmc;
+ pDM_Odm->L2H_lb = TH_L2H_dmc;
+ pDM_Odm->Adaptivity_IGI_upper = IGI;
+ }
+ } else {
+ bAdjust = false;
+ pDM_Odm->TxHangFlg = false;
+ pDM_Odm->txEdcca1 = 0;
+ pDM_Odm->txEdcca0 = 0;
+ pDM_Odm->H2L_lb = TH_H2L_dmc;
+ pDM_Odm->L2H_lb = TH_L2H_dmc;
+ pDM_Odm->Adaptivity_IGI_upper = IGI;
+ }
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("IGI = 0x%x, H2L_lb = 0x%x, L2H_lb = 0x%x\n", IGI, pDM_Odm->H2L_lb, pDM_Odm->L2H_lb));
+}
+
+void odm_AdaptivityInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ if (pDM_Odm->Carrier_Sense_enable == false)
+ pDM_Odm->TH_L2H_ini = 0xf7; /* -7 */
+ else
+ pDM_Odm->TH_L2H_ini = 0xa;
+
+ pDM_Odm->AdapEn_RSSI = 20;
+ pDM_Odm->TH_EDCCA_HL_diff = 7;
+
+ pDM_Odm->IGI_Base = 0x32;
+ pDM_Odm->IGI_target = 0x1c;
+ pDM_Odm->ForceEDCCA = 0;
+ pDM_Odm->NHM_disable = false;
+ pDM_Odm->TxHangFlg = true;
+ pDM_Odm->txEdcca0 = 0;
+ pDM_Odm->txEdcca1 = 0;
+ pDM_Odm->H2L_lb = 0;
+ pDM_Odm->L2H_lb = 0;
+ pDM_Odm->Adaptivity_IGI_upper = 0;
+ odm_NHMBBInit(pDM_Odm);
+
+ PHY_SetBBReg(pDM_Odm->Adapter, REG_RD_CTRL, BIT11, 1); /* stop counting if EDCCA is asserted */
+}
+
+
+void odm_Adaptivity(void *pDM_VOID, u8 IGI)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ s8 TH_L2H_dmc, TH_H2L_dmc;
+ s8 Diff, IGI_target;
+ bool EDCCA_State = false;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("Go to odm_DynamicEDCCA()\n"));
+ return;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_Adaptivity() =====>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("ForceEDCCA =%d, IGI_Base = 0x%x, TH_L2H_ini = %d, TH_EDCCA_HL_diff = %d, AdapEn_RSSI = %d\n",
+ pDM_Odm->ForceEDCCA, pDM_Odm->IGI_Base, pDM_Odm->TH_L2H_ini, pDM_Odm->TH_EDCCA_HL_diff, pDM_Odm->AdapEn_RSSI));
+
+ if (*pDM_Odm->pBandWidth == ODM_BW20M) /* CHANNEL_WIDTH_20 */
+ IGI_target = pDM_Odm->IGI_Base;
+ else if (*pDM_Odm->pBandWidth == ODM_BW40M)
+ IGI_target = pDM_Odm->IGI_Base + 2;
+ else if (*pDM_Odm->pBandWidth == ODM_BW80M)
+ IGI_target = pDM_Odm->IGI_Base + 2;
+ else
+ IGI_target = pDM_Odm->IGI_Base;
+ pDM_Odm->IGI_target = (u8) IGI_target;
+
+ /* Search pwdB lower bound */
+ if (pDM_Odm->TxHangFlg == true) {
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_DBG_RPT_11N, bMaskDWord, 0x208);
+ odm_SearchPwdBLowerBound(pDM_Odm, pDM_Odm->IGI_target);
+ }
+
+ if ((!pDM_Odm->bLinked) || (*pDM_Odm->pChannel > 149)) { /* Band4 doesn't need adaptivity */
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte0, 0x7f);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte2, 0x7f);
+ return;
+ }
+
+ if (!pDM_Odm->ForceEDCCA) {
+ if (pDM_Odm->RSSI_Min > pDM_Odm->AdapEn_RSSI)
+ EDCCA_State = 1;
+ else if (pDM_Odm->RSSI_Min < (pDM_Odm->AdapEn_RSSI - 5))
+ EDCCA_State = 0;
+ } else
+ EDCCA_State = 1;
+
+ if (
+ pDM_Odm->bLinked &&
+ pDM_Odm->Carrier_Sense_enable == false &&
+ pDM_Odm->NHM_disable == false &&
+ pDM_Odm->TxHangFlg == false
+ )
+ odm_NHMBB(pDM_Odm);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "BandWidth =%s, IGI_target = 0x%x, EDCCA_State =%d\n",
+ (*pDM_Odm->pBandWidth == ODM_BW80M) ? "80M" :
+ ((*pDM_Odm->pBandWidth == ODM_BW40M) ? "40M" : "20M"),
+ IGI_target,
+ EDCCA_State
+ )
+ );
+
+ if (EDCCA_State == 1) {
+ Diff = IGI_target-(s8)IGI;
+ TH_L2H_dmc = pDM_Odm->TH_L2H_ini + Diff;
+ if (TH_L2H_dmc > 10)
+ TH_L2H_dmc = 10;
+
+ TH_H2L_dmc = TH_L2H_dmc - pDM_Odm->TH_EDCCA_HL_diff;
+
+ /* replace lower bound to prevent EDCCA always equal */
+ if (TH_H2L_dmc < pDM_Odm->H2L_lb)
+ TH_H2L_dmc = pDM_Odm->H2L_lb;
+ if (TH_L2H_dmc < pDM_Odm->L2H_lb)
+ TH_L2H_dmc = pDM_Odm->L2H_lb;
+ } else {
+ TH_L2H_dmc = 0x7f;
+ TH_H2L_dmc = 0x7f;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("IGI = 0x%x, TH_L2H_dmc = %d, TH_H2L_dmc = %d\n",
+ IGI, TH_L2H_dmc, TH_H2L_dmc));
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte0, (u8)TH_L2H_dmc);
+ PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_ECCAThreshold, bMaskByte2, (u8)TH_H2L_dmc);
+}
+
+void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->bStopDIG) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("Stop Writing IGI\n"));
+ return;
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("ODM_REG(IGI_A, pDM_Odm) = 0x%x, ODM_BIT(IGI, pDM_Odm) = 0x%x\n",
+ ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ /* 1 Check initial gain by upper bound */
+ if (!pDM_DigTable->bPSDInProgress) {
+ if (CurrentIGI > pDM_DigTable->rx_gain_range_max) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("CurrentIGI(0x%02x) is larger than upper bound !!\n", pDM_DigTable->rx_gain_range_max));
+ CurrentIGI = pDM_DigTable->rx_gain_range_max;
+ }
+
+ }
+
+ /* 1 Set IGI value */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+
+ if (pDM_Odm->RFType > ODM_1T1R)
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_TRACE, ("CurrentIGI(0x%02x).\n", CurrentIGI));
+
+}
+
+void odm_PauseDIG(
+ void *pDM_VOID,
+ ODM_Pause_DIG_TYPE PauseType,
+ u8 IGIValue
+)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+ static bool bPaused = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG() =========>\n"));
+
+ if (
+ (pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY) &&
+ pDM_Odm->TxHangFlg == true
+ ) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_PauseDIG(): Dynamic adjust threshold in progress !!\n")
+ );
+ return;
+ }
+
+ if (
+ !bPaused && (!(pDM_Odm->SupportAbility & ODM_BB_DIG) ||
+ !(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
+ ){
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_PauseDIG(): Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n")
+ );
+ return;
+ }
+
+ switch (PauseType) {
+ /* 1 Pause DIG */
+ case ODM_PAUSE_DIG:
+ /* 2 Disable DIG */
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility & (~ODM_BB_DIG));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Pause DIG !!\n"));
+
+ /* 2 Backup IGI value */
+ if (!bPaused) {
+ pDM_DigTable->IGIBackup = pDM_DigTable->CurIGValue;
+ bPaused = true;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Backup IGI = 0x%x\n", pDM_DigTable->IGIBackup));
+
+ /* 2 Write new IGI value */
+ ODM_Write_DIG(pDM_Odm, IGIValue);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Write new IGI = 0x%x\n", IGIValue));
+ break;
+
+ /* 1 Resume DIG */
+ case ODM_RESUME_DIG:
+ if (bPaused) {
+ /* 2 Write backup IGI value */
+ ODM_Write_DIG(pDM_Odm, pDM_DigTable->IGIBackup);
+ bPaused = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Write original IGI = 0x%x\n", pDM_DigTable->IGIBackup));
+
+ /* 2 Enable DIG */
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility | ODM_BB_DIG);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Resume DIG !!\n"));
+ }
+ break;
+
+ default:
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_PauseDIG(): Wrong type !!\n"));
+ break;
+ }
+}
+
+bool odm_DigAbort(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ /* SupportAbility */
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: SupportAbility ODM_BB_FA_CNT is disabled\n"));
+ return true;
+ }
+
+ /* SupportAbility */
+ if (!(pDM_Odm->SupportAbility & ODM_BB_DIG)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: SupportAbility ODM_BB_DIG is disabled\n"));
+ return true;
+ }
+
+ /* ScanInProcess */
+ if (*(pDM_Odm->pbScanInProcess)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: In Scan Progress\n"));
+ return true;
+ }
+
+ /* add by Neil Chen to avoid PSD is processing */
+ if (pDM_Odm->bDMInitialGainEnable == false) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Return: PSD is Processing\n"));
+ return true;
+ }
+
+ return false;
+}
+
+void odm_DIGInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ pDM_DigTable->bStopDIG = false;
+ pDM_DigTable->bPSDInProgress = false;
+ pDM_DigTable->CurIGValue = (u8) PHY_QueryBBReg(pDM_Odm->Adapter, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
+ pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
+ pDM_DigTable->FALowThresh = DMfalseALARM_THRESH_LOW;
+ pDM_DigTable->FAHighThresh = DMfalseALARM_THRESH_HIGH;
+ pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
+ pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
+ pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
+ pDM_DigTable->PreCCK_CCAThres = 0xFF;
+ pDM_DigTable->CurCCK_CCAThres = 0x83;
+ pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
+ pDM_DigTable->LargeFAHit = 0;
+ pDM_DigTable->Recover_cnt = 0;
+ pDM_DigTable->bMediaConnect_0 = false;
+ pDM_DigTable->bMediaConnect_1 = false;
+
+ /* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
+ pDM_Odm->bDMInitialGainEnable = true;
+
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
+ pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
+
+ /* To Initi BT30 IGI */
+ pDM_DigTable->BT30_CurIGI = 0x32;
+
+ if (pDM_Odm->BoardType & (ODM_BOARD_EXT_PA|ODM_BOARD_EXT_LNA)) {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ } else {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ }
+
+}
+
+
+void odm_DIG(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ /* Common parameters */
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+ Pfalse_ALARM_STATISTICS pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ bool FirstConnect, FirstDisConnect;
+ u8 DIG_MaxOfMin, DIG_Dynamic_MIN;
+ u8 dm_dig_max, dm_dig_min;
+ u8 CurrentIGI = pDM_DigTable->CurIGValue;
+ u8 offset;
+ u32 dm_FA_thres[3];
+ u8 Adap_IGI_Upper = 0;
+ u32 TxTp = 0, RxTp = 0;
+ bool bDFSBand = false;
+ bool bPerformance = true, bFirstTpTarget = false, bFirstCoverage = false;
+
+ if (odm_DigAbort(pDM_Odm) == true)
+ return;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() ===========================>\n\n"));
+
+ if (pDM_Odm->adaptivity_flag == true)
+ Adap_IGI_Upper = pDM_Odm->Adaptivity_IGI_upper;
+
+
+ /* 1 Update status */
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0 == false);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0 == true);
+
+ /* 1 Boundary Decision */
+ /* 2 For WIN\CE */
+ dm_dig_max = 0x5A;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutly upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
+
+ /* 1 Adjust boundary by RSSI */
+ if (pDM_Odm->bLinked && bPerformance) {
+ /* 2 Modify DIG upper bound */
+ /* 4 Modify DIG upper bound for 92E, 8723A\B, 8821 & 8812 BT */
+ if (pDM_Odm->bBtLimitedDig == 1) {
+ offset = 10;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Coex. case: Force upper bound to RSSI + %d !!!!!!\n", offset));
+ } else
+ offset = 15;
+
+ if ((pDM_Odm->RSSI_Min + offset) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + offset) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + offset;
+
+ /* 2 Modify DIG lower bound */
+ /* if (pDM_Odm->bOneEntryOnly) */
+ {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
+ DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ }
+ } else {
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ DIG_Dynamic_MIN = dm_dig_min;
+ }
+
+ /* 1 Force Lower Bound for AntDiv */
+ if (pDM_Odm->bLinked && !pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
+ if (
+ pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV ||
+ pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV ||
+ pDM_Odm->AntDivType == S0S1_SW_ANTDIV
+ ) {
+ if (pDM_DigTable->AntDiv_RSSI_max > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_ANT_DIV,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Antenna diversity case: Force lower bound to 0x%x !!!!!!\n",
+ DIG_Dynamic_MIN
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_ANT_DIV,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Antenna diversity case: RSSI_max = 0x%x !!!!!!\n",
+ pDM_DigTable->AntDiv_RSSI_max
+ )
+ );
+ }
+ }
+ }
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Adjust boundary by RSSI Upper bound = 0x%x, Lower bound = 0x%x\n",
+ pDM_DigTable->rx_gain_range_max,
+ DIG_Dynamic_MIN
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Link status: bLinked = %d, RSSI = %d, bFirstConnect = %d, bFirsrDisConnect = %d\n\n",
+ pDM_Odm->bLinked,
+ pDM_Odm->RSSI_Min,
+ FirstConnect,
+ FirstDisConnect
+ )
+ );
+
+ /* 1 Modify DIG lower bound, deal with abnormal case */
+ /* 2 Abnormal false alarm case */
+ if (FirstDisConnect) {
+ pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN;
+ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN;
+ } else
+ pDM_DigTable->rx_gain_range_min =
+ odm_ForbiddenIGICheck(pDM_Odm, DIG_Dynamic_MIN, CurrentIGI);
+
+ if (pDM_Odm->bLinked && !FirstConnect) {
+ if (
+ (pDM_Odm->PhyDbgInfo.NumQryBeaconPkt < 5) &&
+ pDM_Odm->bsta_state
+ ) {
+ pDM_DigTable->rx_gain_range_min = dm_dig_min;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Abnrormal #beacon (%d) case in STA mode: Force lower bound to 0x%x !!!!!!\n\n",
+ pDM_Odm->PhyDbgInfo.NumQryBeaconPkt,
+ pDM_DigTable->rx_gain_range_min
+ )
+ );
+ }
+ }
+
+ /* 2 Abnormal lower bound case */
+ if (pDM_DigTable->rx_gain_range_min > pDM_DigTable->rx_gain_range_max) {
+ pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Abnrormal lower bound case: Force lower bound to 0x%x !!!!!!\n\n",
+ pDM_DigTable->rx_gain_range_min
+ )
+ );
+ }
+
+
+ /* 1 False alarm threshold decision */
+ odm_FAThresholdCheck(pDM_Odm, bDFSBand, bPerformance, RxTp, TxTp, dm_FA_thres);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): False alarm threshold = %d, %d, %d\n\n", dm_FA_thres[0], dm_FA_thres[1], dm_FA_thres[2]));
+
+ /* 1 Adjust initial gain by false alarm */
+ if (pDM_Odm->bLinked && bPerformance) {
+ /* 2 After link */
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIG(): Adjust IGI after link\n")
+ );
+
+ if (bFirstTpTarget || (FirstConnect && bPerformance)) {
+ pDM_DigTable->LargeFAHit = 0;
+
+ if (pDM_Odm->RSSI_Min < DIG_MaxOfMin) {
+ if (CurrentIGI < pDM_Odm->RSSI_Min)
+ CurrentIGI = pDM_Odm->RSSI_Min;
+ } else {
+ if (CurrentIGI < DIG_MaxOfMin)
+ CurrentIGI = DIG_MaxOfMin;
+ }
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): First connect case: IGI does on-shot to 0x%x\n",
+ CurrentIGI
+ )
+ );
+
+ } else {
+ if (pFalseAlmCnt->Cnt_all > dm_FA_thres[2])
+ CurrentIGI = CurrentIGI + 4;
+ else if (pFalseAlmCnt->Cnt_all > dm_FA_thres[1])
+ CurrentIGI = CurrentIGI + 2;
+ else if (pFalseAlmCnt->Cnt_all < dm_FA_thres[0])
+ CurrentIGI = CurrentIGI - 2;
+
+ if (
+ (pDM_Odm->PhyDbgInfo.NumQryBeaconPkt < 5) &&
+ (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH1) &&
+ (pDM_Odm->bsta_state)
+ ) {
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): Abnormal #beacon (%d) case: IGI does one-shot to 0x%x\n",
+ pDM_Odm->PhyDbgInfo.NumQryBeaconPkt,
+ CurrentIGI
+ )
+ );
+ }
+ }
+ } else {
+ /* 2 Before link */
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIG(): Adjust IGI before link\n")
+ );
+
+ if (FirstDisConnect || bFirstCoverage) {
+ CurrentIGI = dm_dig_min;
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIG(): First disconnect case: IGI does on-shot to lower bound\n")
+ );
+ } else {
+ if (pFalseAlmCnt->Cnt_all > dm_FA_thres[2])
+ CurrentIGI = CurrentIGI + 4;
+ else if (pFalseAlmCnt->Cnt_all > dm_FA_thres[1])
+ CurrentIGI = CurrentIGI + 2;
+ else if (pFalseAlmCnt->Cnt_all < dm_FA_thres[0])
+ CurrentIGI = CurrentIGI - 2;
+ }
+ }
+
+ /* 1 Check initial gain by upper/lower bound */
+ if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+
+ if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
+ CurrentIGI = pDM_DigTable->rx_gain_range_max;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ (
+ "odm_DIG(): CurIGValue = 0x%x, TotalFA = %d\n\n",
+ CurrentIGI,
+ pFalseAlmCnt->Cnt_all
+ )
+ );
+
+ /* 1 Force upper bound and lower bound for adaptivity */
+ if (
+ pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY &&
+ pDM_Odm->adaptivity_flag == true
+ ) {
+ if (CurrentIGI > Adap_IGI_Upper)
+ CurrentIGI = Adap_IGI_Upper;
+
+ if (pDM_Odm->IGI_LowerBound != 0) {
+ if (CurrentIGI < pDM_Odm->IGI_LowerBound)
+ CurrentIGI = pDM_Odm->IGI_LowerBound;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Adaptivity case: Force upper bound to 0x%x !!!!!!\n", Adap_IGI_Upper));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Adaptivity case: Force lower bound to 0x%x !!!!!!\n\n", pDM_Odm->IGI_LowerBound));
+ }
+
+
+ /* 1 Update status */
+ if (pDM_Odm->bBtHsOperation) {
+ if (pDM_Odm->bLinked) {
+ if (pDM_DigTable->BT30_CurIGI > (CurrentIGI))
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);
+ else
+ ODM_Write_DIG(pDM_Odm, pDM_DigTable->BT30_CurIGI);
+
+ pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
+ } else {
+ if (pDM_Odm->bLinkInProcess)
+ ODM_Write_DIG(pDM_Odm, 0x1c);
+ else if (pDM_Odm->bBtConnectProcess)
+ ODM_Write_DIG(pDM_Odm, 0x28);
+ else
+ ODM_Write_DIG(pDM_Odm, pDM_DigTable->BT30_CurIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+ }
+ } else { /* BT is not using */
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+ pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
+ }
+}
+
+void odm_DIGbyRSSI_LPS(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ Pfalse_ALARM_STATISTICS pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+
+ u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
+ u8 CurrentIGI = pDM_Odm->RSSI_Min;
+
+ CurrentIGI = CurrentIGI+RSSI_OFFSET_DIG;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIGbyRSSI_LPS() ==>\n")
+ );
+
+ /* Using FW PS mode to make IGI */
+ /* Adjust by FA in LPS MODE */
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
+ CurrentIGI = CurrentIGI+4;
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
+ CurrentIGI = CurrentIGI+2;
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
+ CurrentIGI = CurrentIGI-2;
+
+
+ /* Lower bound checking */
+
+ /* RSSI Lower bound check */
+ if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC)
+ RSSI_Lower = pDM_Odm->RSSI_Min-10;
+ else
+ RSSI_Lower = DM_DIG_MIN_NIC;
+
+ /* Upper and Lower Bound checking */
+ if (CurrentIGI > DM_DIG_MAX_NIC)
+ CurrentIGI = DM_DIG_MAX_NIC;
+ else if (CurrentIGI < RSSI_Lower)
+ CurrentIGI = RSSI_Lower;
+
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIGbyRSSI_LPS(): pFalseAlmCnt->Cnt_all = %d\n", pFalseAlmCnt->Cnt_all)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIGbyRSSI_LPS(): pDM_Odm->RSSI_Min = %d\n", pDM_Odm->RSSI_Min)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_DIG,
+ ODM_DBG_LOUD,
+ ("odm_DIGbyRSSI_LPS(): CurrentIGI = 0x%x\n", CurrentIGI)
+ );
+
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);
+ /* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+}
+
+/* 3 ============================================================ */
+/* 3 FASLE ALARM CHECK */
+/* 3 ============================================================ */
+
+void odm_FalseAlarmCounterStatistics(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ Pfalse_ALARM_STATISTICS FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ u32 ret_value;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
+ return;
+
+ /* hold ofdm counter */
+ /* hold page C counter */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1);
+ /* hold page D counter */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1);
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord
+ );
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord
+ );
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord
+ );
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord
+ );
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail =
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail;
+
+ {
+ /* hold cck counter */
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ PHY_SetBBReg(pDM_Odm->Adapter, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0
+ );
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3
+ );
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value&0xff)<<8;
+
+ ret_value = PHY_QueryBBReg(
+ pDM_Odm->Adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord
+ );
+ FalseAlmCnt->Cnt_CCK_CCA =
+ ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+ }
+
+ FalseAlmCnt->Cnt_all = (
+ FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail
+ );
+
+ FalseAlmCnt->Cnt_CCA_all =
+ FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Enter odm_FalseAlarmCounterStatistics\n")
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ (
+ "Cnt_Fast_Fsync =%d, Cnt_SB_Search_fail =%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync,
+ FalseAlmCnt->Cnt_SB_Search_fail
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ (
+ "Cnt_Parity_Fail =%d, Cnt_Rate_Illegal =%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail,
+ FalseAlmCnt->Cnt_Rate_Illegal
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ (
+ "Cnt_Crc8_fail =%d, Cnt_Mcs_fail =%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail,
+ FalseAlmCnt->Cnt_Mcs_fail
+ )
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_OFDM_CCA =%d\n", FalseAlmCnt->Cnt_OFDM_CCA)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_CCK_CCA =%d\n", FalseAlmCnt->Cnt_CCK_CCA)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_CCA_all =%d\n", FalseAlmCnt->Cnt_CCA_all)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_Cck_fail =%d\n", FalseAlmCnt->Cnt_Cck_fail)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Cnt_Ofdm_fail =%d\n", FalseAlmCnt->Cnt_Ofdm_fail)
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_FA_CNT,
+ ODM_DBG_LOUD,
+ ("Total False Alarm =%d\n", FalseAlmCnt->Cnt_all)
+ );
+}
+
+
+void odm_FAThresholdCheck(
+ void *pDM_VOID,
+ bool bDFSBand,
+ bool bPerformance,
+ u32 RxTp,
+ u32 TxTp,
+ u32 *dm_FA_thres
+)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ if (pDM_Odm->bLinked && (bPerformance || bDFSBand)) {
+ /* For NIC */
+ dm_FA_thres[0] = DM_DIG_FA_TH0;
+ dm_FA_thres[1] = DM_DIG_FA_TH1;
+ dm_FA_thres[2] = DM_DIG_FA_TH2;
+ } else {
+ dm_FA_thres[0] = 2000;
+ dm_FA_thres[1] = 4000;
+ dm_FA_thres[2] = 5000;
+ }
+ return;
+}
+
+u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+ Pfalse_ALARM_STATISTICS pFalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ u8 rx_gain_range_min = pDM_DigTable->rx_gain_range_min;
+
+ if (pFalseAlmCnt->Cnt_all > 10000) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Abnormally false alarm case.\n"));
+
+ if (pDM_DigTable->LargeFAHit != 3)
+ pDM_DigTable->LargeFAHit++;
+
+ /* if (pDM_DigTable->ForbiddenIGI < pDM_DigTable->CurIGValue) */
+ if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
+ pDM_DigTable->ForbiddenIGI = CurrentIGI;
+ /* pDM_DigTable->ForbiddenIGI = pDM_DigTable->CurIGValue; */
+ pDM_DigTable->LargeFAHit = 1;
+ }
+
+ if (pDM_DigTable->LargeFAHit >= 3) {
+ if ((pDM_DigTable->ForbiddenIGI + 2) > pDM_DigTable->rx_gain_range_max)
+ rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
+ else
+ rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 2);
+ pDM_DigTable->Recover_cnt = 1800;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Abnormally false alarm case: Recover_cnt = %d\n", pDM_DigTable->Recover_cnt));
+ }
+ } else {
+ if (pDM_DigTable->Recover_cnt != 0) {
+ pDM_DigTable->Recover_cnt--;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Recover_cnt = %d\n", pDM_DigTable->Recover_cnt));
+ } else {
+ if (pDM_DigTable->LargeFAHit < 3) {
+ if ((pDM_DigTable->ForbiddenIGI - 2) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
+ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
+ } else {
+ pDM_DigTable->ForbiddenIGI -= 2;
+ rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 2);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else
+ pDM_DigTable->LargeFAHit = 0;
+ }
+ }
+
+ return rx_gain_range_min;
+
+}
+
+/* 3 ============================================================ */
+/* 3 CCK Packet Detect Threshold */
+/* 3 ============================================================ */
+
+void odm_CCKPacketDetectionThresh(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ Pfalse_ALARM_STATISTICS FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ u8 CurCCK_CCAThres;
+
+
+ if (
+ !(pDM_Odm->SupportAbility & ODM_BB_CCK_PD) ||
+ !(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)
+ ) {
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CCK_PD,
+ ODM_DBG_LOUD,
+ ("odm_CCKPacketDetectionThresh() return ==========\n")
+ );
+ return;
+ }
+
+ if (pDM_Odm->ExtLNA)
+ return;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CCK_PD,
+ ODM_DBG_LOUD,
+ ("odm_CCKPacketDetectionThresh() ==========>\n")
+ );
+
+ if (pDM_Odm->bLinked) {
+ if (pDM_Odm->RSSI_Min > 25)
+ CurCCK_CCAThres = 0xcd;
+ else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10))
+ CurCCK_CCAThres = 0x83;
+ else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+
+ ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_CCK_PD,
+ ODM_DBG_LOUD,
+ (
+ "odm_CCKPacketDetectionThresh() CurCCK_CCAThres = 0x%x\n",
+ CurCCK_CCAThres
+ )
+ );
+}
+
+void ODM_Write_CCK_CCA_Thres(void *pDM_VOID, u8 CurCCK_CCAThres)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ /* modify by Guo.Mingzhi 2012-01-03 */
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres)
+ rtw_write8(pDM_Odm->Adapter, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+
+ pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.h b/drivers/staging/rtl8723bs/hal/odm_DIG.h
new file mode 100644
index 000000000000..e27a69126086
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.h
@@ -0,0 +1,195 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMDIG_H__
+#define __ODMDIG_H__
+
+typedef struct _Dynamic_Initial_Gain_Threshold_ {
+ bool bStopDIG;
+ bool bPSDInProgress;
+
+ u8 Dig_Enable_Flag;
+ u8 Dig_Ext_Port_Stage;
+
+ int RssiLowThresh;
+ int RssiHighThresh;
+
+ u32 FALowThresh;
+ u32 FAHighThresh;
+
+ u8 CurSTAConnectState;
+ u8 PreSTAConnectState;
+ u8 CurMultiSTAConnectState;
+
+ u8 PreIGValue;
+ u8 CurIGValue;
+ u8 BackupIGValue; /* MP DIG */
+ u8 BT30_CurIGI;
+ u8 IGIBackup;
+
+ s8 BackoffVal;
+ s8 BackoffVal_range_max;
+ s8 BackoffVal_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 Rssi_val_min;
+
+ u8 PreCCK_CCAThres;
+ u8 CurCCK_CCAThres;
+ u8 PreCCKPDState;
+ u8 CurCCKPDState;
+ u8 CCKPDBackup;
+
+ u8 LargeFAHit;
+ u8 ForbiddenIGI;
+ u32 Recover_cnt;
+
+ u8 DIG_Dynamic_MIN_0;
+ u8 DIG_Dynamic_MIN_1;
+ bool bMediaConnect_0;
+ bool bMediaConnect_1;
+
+ u32 AntDiv_RSSI_max;
+ u32 RSSI_max;
+
+ u8 *pbP2pLinkInProgress;
+} DIG_T, *pDIG_T;
+
+typedef struct false_ALARM_STATISTICS {
+ u32 Cnt_Parity_Fail;
+ u32 Cnt_Rate_Illegal;
+ u32 Cnt_Crc8_fail;
+ u32 Cnt_Mcs_fail;
+ u32 Cnt_Ofdm_fail;
+ u32 Cnt_Ofdm_fail_pre; /* For RTL8881A */
+ u32 Cnt_Cck_fail;
+ u32 Cnt_all;
+ u32 Cnt_Fast_Fsync;
+ u32 Cnt_SB_Search_fail;
+ u32 Cnt_OFDM_CCA;
+ u32 Cnt_CCK_CCA;
+ u32 Cnt_CCA_all;
+ u32 Cnt_BW_USC; /* Gary */
+ u32 Cnt_BW_LSC; /* Gary */
+} false_ALARM_STATISTICS, *Pfalse_ALARM_STATISTICS;
+
+typedef enum tag_Dynamic_Init_Gain_Operation_Type_Definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+} DM_DIG_OP_E;
+
+typedef enum tag_ODM_PauseDIG_Type {
+ ODM_PAUSE_DIG = BIT0,
+ ODM_RESUME_DIG = BIT1
+} ODM_Pause_DIG_TYPE;
+
+typedef enum tag_ODM_PauseCCKPD_Type {
+ ODM_PAUSE_CCKPD = BIT0,
+ ODM_RESUME_CCKPD = BIT1
+} ODM_Pause_CCKPD_TYPE;
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DMfalseALARM_THRESH_LOW 400
+#define DMfalseALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX_NIC 0x3e
+#define DM_DIG_MIN_NIC 0x1e /* 0x22//0x1c */
+#define DM_DIG_MAX_OF_MIN_NIC 0x3e
+
+#define DM_DIG_MAX_AP 0x3e
+#define DM_DIG_MIN_AP 0x1c
+#define DM_DIG_MAX_OF_MIN 0x2A /* 0x32 */
+#define DM_DIG_MIN_AP_DFS 0x20
+
+#define DM_DIG_MAX_NIC_HP 0x46
+#define DM_DIG_MIN_NIC_HP 0x2e
+
+#define DM_DIG_MAX_AP_HP 0x42
+#define DM_DIG_MIN_AP_HP 0x30
+
+#define DM_DIG_FA_TH0 0x200/* 0x20 */
+
+#define DM_DIG_FA_TH1 0x300
+#define DM_DIG_FA_TH2 0x400
+/* this is for 92d */
+#define DM_DIG_FA_TH0_92D 0x100
+#define DM_DIG_FA_TH1_92D 0x400
+#define DM_DIG_FA_TH2_92D 0x600
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
+#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
+#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
+#define RSSI_OFFSET_DIG 0x05
+
+void odm_NHMCounterStatisticsInit(void *pDM_VOID);
+
+void odm_NHMCounterStatistics(void *pDM_VOID);
+
+void odm_NHMBBInit(void *pDM_VOID);
+
+void odm_NHMBB(void *pDM_VOID);
+
+void odm_NHMCounterStatisticsReset(void *pDM_VOID);
+
+void odm_GetNHMCounterStatistics(void *pDM_VOID);
+
+void odm_SearchPwdBLowerBound(void *pDM_VOID, u8 IGI_target);
+
+void odm_AdaptivityInit(void *pDM_VOID);
+
+void odm_Adaptivity(void *pDM_VOID, u8 IGI);
+
+void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI);
+
+void odm_PauseDIG(void *pDM_VOID, ODM_Pause_DIG_TYPE PauseType, u8 IGIValue);
+
+void odm_DIGInit(void *pDM_VOID);
+
+void odm_DIG(void *pDM_VOID);
+
+void odm_DIGbyRSSI_LPS(void *pDM_VOID);
+
+void odm_FalseAlarmCounterStatistics(void *pDM_VOID);
+
+void odm_FAThresholdCheck(
+ void *pDM_VOID,
+ bool bDFSBand,
+ bool bPerformance,
+ u32 RxTp,
+ u32 TxTp,
+ u32 *dm_FA_thres
+);
+
+u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI);
+
+bool odm_DigAbort(void *pDM_VOID);
+
+void odm_CCKPacketDetectionThresh(void *pDM_VOID);
+
+void ODM_Write_CCK_CCA_Thres(void *pDM_VOID, u8 CurCCK_CCAThres);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c
new file mode 100644
index 000000000000..6af017562539
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.c
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_DynamicBBPowerSavingInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pPS_T pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->PreCCAState = CCA_MAX;
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ pDM_PSTable->PreRFState = RF_MAX;
+ pDM_PSTable->CurRFState = RF_MAX;
+ pDM_PSTable->Rssi_val_min = 0;
+ pDM_PSTable->initialize = 0;
+}
+
+void ODM_RF_Saving(void *pDM_VOID, u8 bForceInNormal)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ pPS_T pDM_PSTable = &pDM_Odm->DM_PSTable;
+ u8 Rssi_Up_bound = 30;
+ u8 Rssi_Low_bound = 25;
+
+ if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
+ Rssi_Up_bound = 50;
+ Rssi_Low_bound = 45;
+ }
+
+ if (pDM_PSTable->initialize == 0) {
+
+ pDM_PSTable->Reg874 = (PHY_QueryBBReg(pDM_Odm->Adapter, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (PHY_QueryBBReg(pDM_Odm->Adapter, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (PHY_QueryBBReg(pDM_Odm->Adapter, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (PHY_QueryBBReg(pDM_Odm->Adapter, 0xa74, bMaskDWord)&0xF000)>>12;
+ /* Reg818 = PHY_QueryBBReg(padapter, 0x818, bMaskDWord); */
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreRFState == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->CurRFState = RF_Save;
+ else
+ pDM_PSTable->CurRFState = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->CurRFState = RF_Normal;
+ else
+ pDM_PSTable->CurRFState = RF_Save;
+ }
+ } else
+ pDM_PSTable->CurRFState = RF_MAX;
+ } else
+ pDM_PSTable->CurRFState = RF_Normal;
+
+ if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
+ if (pDM_PSTable->CurRFState == RF_Save) {
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc70, BIT3, 0); /* RegC70[3]= 1'b0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]= 0x63 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]= 0x3 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x818, BIT28, 0x0); /* Reg818[28]= 1'b0 */
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x818, BIT28, 0x1); /* Reg818[28]= 1'b1 */
+ } else {
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x874, 0x1CC000, pDM_PSTable->Reg874);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xc70, BIT3, pDM_PSTable->RegC70);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ PHY_SetBBReg(pDM_Odm->Adapter, 0x818, BIT28, 0x0);
+ }
+ pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h
new file mode 100644
index 000000000000..b435cae4ab63
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicBBPowerSaving.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMDYNAMICBBPOWERSAVING_H__
+#define __ODMDYNAMICBBPOWERSAVING_H__
+
+typedef struct _Dynamic_Power_Saving_ {
+ u8 PreCCAState;
+ u8 CurCCAState;
+
+ u8 PreRFState;
+ u8 CurRFState;
+
+ int Rssi_val_min;
+
+ u8 initialize;
+ u32 Reg874, RegC70, Reg85C, RegA74;
+
+} PS_T, *pPS_T;
+
+#define dm_RF_Saving ODM_RF_Saving
+
+void ODM_RF_Saving(void *pDM_VOID, u8 bForceInNormal);
+
+void odm_DynamicBBPowerSavingInit(void *pDM_VOID);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c
new file mode 100644
index 000000000000..d24e5f712c20
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_DynamicTxPowerInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ pdmpriv->bDynamicTxPowerEnable = false;
+
+ pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
+ pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h
new file mode 100644
index 000000000000..35cdbab6d965
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_DynamicTxPower.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMDYNAMICTXPOWER_H__
+#define __ODMDYNAMICTXPOWER_H__
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
+#define TX_POWER_NEAR_FIELD_THRESH_8812 60
+
+#define TxHighPwrLevel_Normal 0
+#define TxHighPwrLevel_Level1 1
+#define TxHighPwrLevel_Level2 2
+#define TxHighPwrLevel_BT1 3
+#define TxHighPwrLevel_BT2 4
+#define TxHighPwrLevel_15 5
+#define TxHighPwrLevel_35 6
+#define TxHighPwrLevel_50 7
+#define TxHighPwrLevel_70 8
+#define TxHighPwrLevel_100 9
+
+void odm_DynamicTxPowerInit(void *pDM_VOID);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
new file mode 100644
index 000000000000..8d53cb73cea5
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.c
@@ -0,0 +1,186 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+static u32 edca_setting_DL_GMode[HT_IOT_PEER_MAX] = {
+/*UNKNOWN, REALTEK_90, ALTEK_92SE BROADCOM, LINK ATHEROS,
+ *CISCO, MERU, MARVELL, 92U_AP, SELF_AP
+ */
+ 0x4322, 0xa44f, 0x5e4322, 0xa42b, 0x5e4322, 0x4322,
+ 0xa42b, 0x5ea42b, 0xa44f, 0x5e4322, 0x5ea42b
+};
+
+static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
+/*UNKNOWN, REALTEK_90, REALTEK_92SE, BROADCOM, RALINK, ATHEROS,
+ *CISCO, MERU, MARVELL, 92U_AP, SELF_AP(DownLink/Tx)
+ */
+ 0x5e4322, 0xa44f, 0x5e4322, 0x5ea32b, 0x5ea422, 0x5ea322,
+ 0x3ea430, 0x5ea42b, 0x5ea44f, 0x5e4322, 0x5e4322};
+
+static u32 edca_setting_DL[HT_IOT_PEER_MAX] = {
+/*UNKNOWN, REALTEK_90, REALTEK_92SE, BROADCOM, RALINK, ATHEROS,
+ *CISCO, MERU, MARVELL, 92U_AP, SELF_AP(UpLink/Rx)
+ */
+ 0xa44f, 0x5ea44f, 0x5e4322, 0x5ea42b, 0xa44f, 0xa630,
+ 0x5ea630, 0x5ea42b, 0xa44f, 0xa42b, 0xa42b};
+
+void ODM_EdcaTurboInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
+ Adapter->recvpriv.bIsAnyNonBEPkts = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("Orginial VO PARAM: 0x%x\n",
+ rtw_read32(pDM_Odm->Adapter, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("Orginial VI PARAM: 0x%x\n",
+ rtw_read32(pDM_Odm->Adapter, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("Orginial BE PARAM: 0x%x\n",
+ rtw_read32(pDM_Odm->Adapter, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("Orginial BK PARAM: 0x%x\n",
+ rtw_read32(pDM_Odm->Adapter, ODM_EDCA_BK_PARAM)));
+} /* ODM_InitEdcaTurbo */
+
+void odm_EdcaTurboCheck(void *pDM_VOID)
+{
+ /* In HW integration first stage, we provide 4 different handles to
+ * operate at the same time. In stage2/3, we need to prove universal
+ * interface and merge all HW dynamic mechanism.
+ */
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("odm_EdcaTurboCheck ========================>\n"));
+
+ if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
+ return;
+
+ odm_EdcaTurboCheckCE(pDM_Odm);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD,
+ ("<========================odm_EdcaTurboCheck\n"));
+} /* odm_CheckEdcaTurbo */
+
+void odm_EdcaTurboCheckCE(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
+ struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct registry_priv *pregpriv = &Adapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &(Adapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 EDCA_BE_UL = 0x5ea42b;
+ u32 EDCA_BE_DL = 0x5ea42b;
+ u32 iot_peer = 0;
+ u8 wirelessmode = 0xFF; /* invalid value */
+ u32 trafficIndex;
+ u32 edca_param;
+ u64 cur_tx_bytes = 0;
+ u64 cur_rx_bytes = 0;
+ u8 bbtchange = false;
+ u8 biasonrx = false;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ if (!pDM_Odm->bLinked) {
+ precvpriv->bIsAnyNonBEPkts = false;
+ return;
+ }
+
+ if ((pregpriv->wifi_spec == 1)) {
+ precvpriv->bIsAnyNonBEPkts = false;
+ return;
+ }
+
+ if (pDM_Odm->pwirelessmode)
+ wirelessmode = *(pDM_Odm->pwirelessmode);
+
+ iot_peer = pmlmeinfo->assoc_AP_vendor;
+
+ if (iot_peer >= HT_IOT_PEER_MAX) {
+ precvpriv->bIsAnyNonBEPkts = false;
+ return;
+ }
+
+ /* Check if the status needs to be changed. */
+ if ((bbtchange) || (!precvpriv->bIsAnyNonBEPkts)) {
+ cur_tx_bytes = pdvobjpriv->traffic_stat.cur_tx_bytes;
+ cur_rx_bytes = pdvobjpriv->traffic_stat.cur_rx_bytes;
+
+ /* traffic, TX or RX */
+ if (biasonrx) {
+ if (cur_tx_bytes > (cur_rx_bytes << 2)) {
+ /* Uplink TP is present. */
+ trafficIndex = UP_LINK;
+ } else { /* Balance TP is present. */
+ trafficIndex = DOWN_LINK;
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes << 2)) {
+ /* Downlink TP is present. */
+ trafficIndex = DOWN_LINK;
+ } else { /* Balance TP is present. */
+ trafficIndex = UP_LINK;
+ }
+ }
+
+ /* 92D txop can't be set to 0x3e for cisco1250 */
+ if ((iot_peer == HT_IOT_PEER_CISCO) &&
+ (wirelessmode == ODM_WM_N24G)) {
+ EDCA_BE_DL = edca_setting_DL[iot_peer];
+ EDCA_BE_UL = edca_setting_UL[iot_peer];
+ } else if ((iot_peer == HT_IOT_PEER_CISCO) &&
+ ((wirelessmode == ODM_WM_G) ||
+ (wirelessmode == (ODM_WM_B | ODM_WM_G)) ||
+ (wirelessmode == ODM_WM_A) ||
+ (wirelessmode == ODM_WM_B))) {
+ EDCA_BE_DL = edca_setting_DL_GMode[iot_peer];
+ } else if ((iot_peer == HT_IOT_PEER_AIRGO) &&
+ ((wirelessmode == ODM_WM_G) ||
+ (wirelessmode == ODM_WM_A))) {
+ EDCA_BE_DL = 0xa630;
+ } else if (iot_peer == HT_IOT_PEER_MARVELL) {
+ EDCA_BE_DL = edca_setting_DL[iot_peer];
+ EDCA_BE_UL = edca_setting_UL[iot_peer];
+ } else if (iot_peer == HT_IOT_PEER_ATHEROS) {
+ /* Set DL EDCA for Atheros peer to 0x3ea42b. */
+ EDCA_BE_DL = edca_setting_DL[iot_peer];
+ }
+
+ if (trafficIndex == DOWN_LINK)
+ edca_param = EDCA_BE_DL;
+ else
+ edca_param = EDCA_BE_UL;
+
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, edca_param);
+
+ pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
+
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
+ } else {
+ /* Turn Off EDCA turbo here. */
+ /* Restore original EDCA according to the declaration of AP. */
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ }
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h
new file mode 100644
index 000000000000..b0590abe87f0
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_EdcaTurboCheck.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMEDCATURBOCHECK_H__
+#define __ODMEDCATURBOCHECK_H__
+
+typedef struct _EDCA_TURBO_ {
+ bool bCurrentTurboEDCA;
+ bool bIsCurRDLState;
+
+ u32 prv_traffic_idx; /* edca turbo */
+} EDCA_T, *pEDCA_T;
+
+void odm_EdcaTurboCheck(void *pDM_VOID);
+void ODM_EdcaTurboInit(void *pDM_VOID);
+
+void odm_EdcaTurboCheckCE(void *pDM_VOID);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
new file mode 100644
index 000000000000..ba2700135b60
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -0,0 +1,535 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+#define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig_MP_##ic##txt(pDM_Odm))
+#define READ_AND_CONFIG READ_AND_CONFIG_MP
+#define GET_VERSION_MP(ic, txt) (ODM_GetVersion_MP_##ic##txt())
+#define GET_VERSION(ic, txt) (pDM_Odm->bIsMPChip?GET_VERSION_MP(ic, txt):GET_VERSION_TC(ic, txt))
+
+static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+{
+ if ((AntPower <= -100) || (AntPower >= 20))
+ return 0;
+ else if (AntPower >= 0)
+ return 100;
+ else
+ return (100+AntPower);
+
+}
+
+static s32 odm_SignalScaleMapping_92CSeries(PDM_ODM_T pDM_Odm, s32 CurrSig)
+{
+ s32 RetSig = 0;
+
+ if (pDM_Odm->SupportInterface == ODM_ITRF_SDIO) {
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
+ }
+
+ return RetSig;
+}
+
+s32 odm_SignalScaleMapping(PDM_ODM_T pDM_Odm, s32 CurrSig)
+{
+ return odm_SignalScaleMapping_92CSeries(pDM_Odm, CurrSig);
+}
+
+static u8 odm_EVMdbToPercentage(s8 Value)
+{
+ /* */
+ /* -33dB~0dB to 0%~99% */
+ /* */
+ s8 ret_val;
+
+ ret_val = Value;
+ ret_val /= 2;
+
+ /* DbgPrint("Value =%d\n", Value); */
+ /* ODM_RT_DISP(FRX, RX_PHY_SQ, ("EVMdbToPercentage92C Value =%d / %x\n", ret_val, ret_val)); */
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+
+ if (ret_val == 99)
+ ret_val = 100;
+
+ return ret_val;
+}
+
+static void odm_RxPhyStatus92CSeries_Parsing(
+ PDM_ODM_T pDM_Odm,
+ PODM_PHY_INFO_T pPhyInfo,
+ u8 *pPhyStatus,
+ PODM_PACKET_INFO_T pPktinfo
+)
+{
+ u8 i, Max_spatial_stream;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
+ u8 RSSI, total_rssi = 0;
+ bool isCCKrate = false;
+ u8 rf_rx_num = 0;
+ u8 cck_highpwr = 0;
+ u8 LNA_idx, VGA_idx;
+ PPHY_STATUS_RPT_8192CD_T pPhyStaRpt = (PPHY_STATUS_RPT_8192CD_T)pPhyStatus;
+
+ isCCKrate = (pPktinfo->DataRate <= DESC_RATE11M) ? true : false;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+
+
+ if (isCCKrate) {
+ u8 cck_agc_rpt;
+
+ pDM_Odm->PhyDbgInfo.NumQryPhyStatusCCK++;
+ /* */
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* */
+
+ /* if (pHalData->eRFPowerState == eRfOn) */
+ cck_highpwr = pDM_Odm->bCckHighPower;
+ /* else */
+ /* cck_highpwr = false; */
+
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+
+ /* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
+ /* The RSSI formula should be modified according to the gain table */
+ /* In 88E, cck_highpwr is always set to 1 */
+ LNA_idx = ((cck_agc_rpt & 0xE0)>>5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ rx_pwr_all = odm_CCKRSSI_8723B(LNA_idx, VGA_idx);
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+ /* */
+ /* (3) Get Signal Quality (EVM) */
+ /* */
+ /* if (pPktinfo->bPacketMatchBSSID) */
+ {
+ u8 SQ, SQ_rpt;
+
+ if (pPhyInfo->RxPWDBAll > 40 && !pDM_Odm->bInHctTest)
+ SQ = 100;
+ else {
+ SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
+
+ if (SQ_rpt > 64)
+ SQ = 0;
+ else if (SQ_rpt < 20)
+ SQ = 100;
+ else
+ SQ = ((64-SQ_rpt) * 100) / 44;
+
+ }
+
+ /* DbgPrint("cck SQ = %d\n", SQ); */
+ pPhyInfo->SignalQuality = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ }
+ } else { /* is OFDM rate */
+ pDM_Odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
+
+ /* */
+ /* (1)Get RSSI for HT rate */
+ /* */
+
+ for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
+ /* 2008/01/30 MH we will judge RF RX path now. */
+ if (pDM_Odm->RFPathRxEnable & BIT(i))
+ rf_rx_num++;
+ /* else */
+ /* continue; */
+
+ rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain&0x3F)*2) - 110;
+
+
+ pPhyInfo->RxPwr[i] = rx_pwr[i];
+
+ /* Translate DBM to percentage. */
+ RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ total_rssi += RSSI;
+ /* RT_DISP(FRX, RX_PHY_SS, ("RF-%d RXPWR =%x RSSI =%d\n", i, rx_pwr[i], RSSI)); */
+
+ pPhyInfo->RxMIMOSignalStrength[i] = (u8) RSSI;
+
+ /* Get Rx snr value in DB */
+ pPhyInfo->RxSNR[i] = pDM_Odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+ }
+
+
+ /* */
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* */
+ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1)&0x7f)-110;
+
+ PWDB_ALL_BT = PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ /* RT_DISP(FRX, RX_PHY_SS, ("PWDB_ALL =%d\n", PWDB_ALL)); */
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ /* ODM_RT_TRACE(pDM_Odm, ODM_COMP_RSSI_MONITOR, ODM_DBG_LOUD, ("ODM OFDM RSSI =%d\n", pPhyInfo->RxPWDBAll)); */
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
+ pPhyInfo->RxPower = rx_pwr_all;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+
+ {/* pMgntInfo->CustomerID != RT_CID_819x_Lenovo */
+ /* */
+ /* (3)EVM of HT rate */
+ /* */
+ if (pPktinfo->DataRate >= DESC_RATEMCS8 && pPktinfo->DataRate <= DESC_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ /* RT_DISP(FRX, RX_PHY_SQ, ("RXRATE =%x RXEVM =%x EVM =%s%d\n", */
+ /* GET_RX_STATUS_DESC_RX_MCS(pDesc), pDrvInfo->rxevm[i], "%", EVM)); */
+
+ /* if (pPktinfo->bPacketMatchBSSID) */
+ {
+ if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
+ }
+ }
+ }
+
+ ODM_ParsingCFO(pDM_Odm, pPktinfo, pPhyStaRpt->path_cfotail);
+
+ }
+
+ /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
+ /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
+ if (isCCKrate) {
+#ifdef CONFIG_SKIP_SIGNAL_SCALE_MAPPING
+ pPhyInfo->SignalStrength = (u8)PWDB_ALL;
+#else
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, PWDB_ALL));/* PWDB_ALL; */
+#endif
+ } else {
+ if (rf_rx_num != 0) {
+#ifdef CONFIG_SKIP_SIGNAL_SCALE_MAPPING
+ total_rssi /= rf_rx_num;
+ pPhyInfo->SignalStrength = (u8)total_rssi;
+#else
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(pDM_Odm, total_rssi /= rf_rx_num));
+#endif
+ }
+ }
+
+ /* DbgPrint("isCCKrate = %d, pPhyInfo->RxPWDBAll = %d, pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a = 0x%x\n", */
+ /* isCCKrate, pPhyInfo->RxPWDBAll, pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a); */
+}
+
+static void odm_Process_RSSIForDM(
+ PDM_ODM_T pDM_Odm, PODM_PHY_INFO_T pPhyInfo, PODM_PACKET_INFO_T pPktinfo
+)
+{
+
+ s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK, UndecoratedSmoothedOFDM, RSSI_Ave;
+ u8 isCCKrate = 0;
+ u8 RSSI_max, RSSI_min, i;
+ u32 OFDM_pkt = 0;
+ u32 Weighting = 0;
+ PSTA_INFO_T pEntry;
+
+
+ if (pPktinfo->StationID == 0xFF)
+ return;
+
+ pEntry = pDM_Odm->pODM_StaInfo[pPktinfo->StationID];
+
+ if (!IS_STA_VALID(pEntry))
+ return;
+
+ if ((!pPktinfo->bPacketMatchBSSID))
+ return;
+
+ if (pPktinfo->bPacketBeacon)
+ pDM_Odm->PhyDbgInfo.NumQryBeaconPkt++;
+
+ isCCKrate = ((pPktinfo->DataRate <= DESC_RATE11M)) ? true : false;
+ pDM_Odm->RxRate = pPktinfo->DataRate;
+
+ /* Statistic for antenna/path diversity------------------ */
+ if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
+
+ }
+
+ /* Smart Antenna Debug Message------------------ */
+
+ UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
+ UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
+ UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+
+ if (!isCCKrate) { /* ofdm rate */
+ if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ pDM_Odm->RSSI_A = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ pDM_Odm->RSSI_B = 0;
+ } else {
+ /* DbgPrint("pRfd->Status.RxMIMOSignalStrength[0] = %d, pRfd->Status.RxMIMOSignalStrength[1] = %d\n", */
+ /* pRfd->Status.RxMIMOSignalStrength[0], pRfd->Status.RxMIMOSignalStrength[1]); */
+ pDM_Odm->RSSI_A = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ pDM_Odm->RSSI_B = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+
+ if (
+ pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A] >
+ pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B]
+ ) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ } else {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ }
+
+ if ((RSSI_max-RSSI_min) < 3)
+ RSSI_Ave = RSSI_max;
+ else if ((RSSI_max-RSSI_min) < 6)
+ RSSI_Ave = RSSI_max - 1;
+ else if ((RSSI_max-RSSI_min) < 10)
+ RSSI_Ave = RSSI_max - 2;
+ else
+ RSSI_Ave = RSSI_max - 3;
+ }
+
+ /* 1 Process OFDM RSSI */
+ if (UndecoratedSmoothedOFDM <= 0) /* initialize */
+ UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
+ else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
+ UndecoratedSmoothedOFDM =
+ ((UndecoratedSmoothedOFDM*(Rx_Smooth_Factor-1)) +
+ RSSI_Ave)/Rx_Smooth_Factor;
+ UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
+ } else {
+ UndecoratedSmoothedOFDM =
+ ((UndecoratedSmoothedOFDM*(Rx_Smooth_Factor-1)) +
+ RSSI_Ave)/Rx_Smooth_Factor;
+ }
+ }
+
+ pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT0;
+
+ } else {
+ RSSI_Ave = pPhyInfo->RxPWDBAll;
+ pDM_Odm->RSSI_A = (u8) pPhyInfo->RxPWDBAll;
+ pDM_Odm->RSSI_B = 0;
+
+ /* 1 Process CCK RSSI */
+ if (UndecoratedSmoothedCCK <= 0) /* initialize */
+ UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
+ else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK*(Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll)/Rx_Smooth_Factor;
+ UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
+ } else {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK*(Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll)/Rx_Smooth_Factor;
+ }
+ }
+ pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
+ }
+
+ /* if (pEntry) */
+ {
+ /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
+ if (pEntry->rssi_stat.ValidBit >= 64)
+ pEntry->rssi_stat.ValidBit = 64;
+ else
+ pEntry->rssi_stat.ValidBit++;
+
+ for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
+ OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i)&BIT0;
+
+ if (pEntry->rssi_stat.ValidBit == 64) {
+ Weighting = ((OFDM_pkt<<4) > 64)?64:(OFDM_pkt<<4);
+ UndecoratedSmoothedPWDB = (Weighting*UndecoratedSmoothedOFDM+(64-Weighting)*UndecoratedSmoothedCCK)>>6;
+ } else {
+ if (pEntry->rssi_stat.ValidBit != 0)
+ UndecoratedSmoothedPWDB = (OFDM_pkt*UndecoratedSmoothedOFDM+(pEntry->rssi_stat.ValidBit-OFDM_pkt)*UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
+ else
+ UndecoratedSmoothedPWDB = 0;
+ }
+
+ pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
+ pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
+ pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
+
+ /* DbgPrint("OFDM_pkt =%d, Weighting =%d\n", OFDM_pkt, Weighting); */
+ /* DbgPrint("UndecoratedSmoothedOFDM =%d, UndecoratedSmoothedPWDB =%d, UndecoratedSmoothedCCK =%d\n", */
+ /* UndecoratedSmoothedOFDM, UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK); */
+
+ }
+
+ }
+}
+
+
+/* */
+/* Endianness before calling this API */
+/* */
+static void ODM_PhyStatusQuery_92CSeries(
+ PDM_ODM_T pDM_Odm,
+ PODM_PHY_INFO_T pPhyInfo,
+ u8 *pPhyStatus,
+ PODM_PACKET_INFO_T pPktinfo
+)
+{
+
+ odm_RxPhyStatus92CSeries_Parsing(pDM_Odm, pPhyInfo, pPhyStatus, pPktinfo);
+
+ if (!pDM_Odm->RSSI_test)
+ odm_Process_RSSIForDM(pDM_Odm, pPhyInfo, pPktinfo);
+}
+
+void ODM_PhyStatusQuery(
+ PDM_ODM_T pDM_Odm,
+ PODM_PHY_INFO_T pPhyInfo,
+ u8 *pPhyStatus,
+ PODM_PACKET_INFO_T pPktinfo
+)
+{
+
+ ODM_PhyStatusQuery_92CSeries(pDM_Odm, pPhyInfo, pPhyStatus, pPktinfo);
+}
+
+/* */
+/* If you want to add a new IC, Please follow below template and generate a new one. */
+/* */
+/* */
+
+HAL_STATUS ODM_ConfigRFWithHeaderFile(
+ PDM_ODM_T pDM_Odm,
+ ODM_RF_Config_Type ConfigType,
+ ODM_RF_RADIO_PATH_E eRFPath
+)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===>ODM_ConfigRFWithHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
+ pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
+
+ if (ConfigType == CONFIG_RF_RADIO)
+ READ_AND_CONFIG(8723B, _RadioA);
+ else if (ConfigType == CONFIG_RF_TXPWR_LMT)
+ READ_AND_CONFIG(8723B, _TXPWR_LMT);
+
+ return HAL_STATUS_SUCCESS;
+}
+
+HAL_STATUS ODM_ConfigRFWithTxPwrTrackHeaderFile(PDM_ODM_T pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===>ODM_ConfigRFWithTxPwrTrackHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
+ pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
+
+ if (pDM_Odm->SupportInterface == ODM_ITRF_SDIO)
+ READ_AND_CONFIG(8723B, _TxPowerTrack_SDIO);
+
+ return HAL_STATUS_SUCCESS;
+}
+
+HAL_STATUS ODM_ConfigBBWithHeaderFile(
+ PDM_ODM_T pDM_Odm, ODM_BB_Config_Type ConfigType
+)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===>ODM_ConfigBBWithHeaderFile (%s)\n", (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
+ pDM_Odm->SupportPlatform, pDM_Odm->SupportInterface, pDM_Odm->BoardType));
+
+ if (ConfigType == CONFIG_BB_PHY_REG)
+ READ_AND_CONFIG(8723B, _PHY_REG);
+ else if (ConfigType == CONFIG_BB_AGC_TAB)
+ READ_AND_CONFIG(8723B, _AGC_TAB);
+ else if (ConfigType == CONFIG_BB_PHY_REG_PG)
+ READ_AND_CONFIG(8723B, _PHY_REG_PG);
+
+ return HAL_STATUS_SUCCESS;
+}
+
+HAL_STATUS ODM_ConfigMACWithHeaderFile(PDM_ODM_T pDM_Odm)
+{
+ u8 result = HAL_STATUS_SUCCESS;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ (
+ "===>ODM_ConfigMACWithHeaderFile (%s)\n",
+ (pDM_Odm->bIsMPChip) ? "MPChip" : "TestChip"
+ )
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ (
+ "pDM_Odm->SupportPlatform: 0x%X, pDM_Odm->SupportInterface: 0x%X, pDM_Odm->BoardType: 0x%X\n",
+ pDM_Odm->SupportPlatform,
+ pDM_Odm->SupportInterface,
+ pDM_Odm->BoardType
+ )
+ );
+
+ READ_AND_CONFIG(8723B, _MAC_REG);
+
+ return result;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.h b/drivers/staging/rtl8723bs/hal/odm_HWConfig.h
new file mode 100644
index 000000000000..f029922d12f0
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.h
@@ -0,0 +1,162 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALHWOUTSRC_H__
+#define __HALHWOUTSRC_H__
+
+
+/*--------------------------Define -------------------------------------------*/
+/* define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = Array[i]; v2 = Array[i+1]; } while (0) */
+#define AGC_DIFF_CONFIG_MP(ic, band) (ODM_ReadAndConfig_MP_##ic##_AGC_TAB_DIFF(pDM_Odm, Array_MP_##ic##_AGC_TAB_DIFF_##band, \
+ sizeof(Array_MP_##ic##_AGC_TAB_DIFF_##band)/sizeof(u32)))
+#define AGC_DIFF_CONFIG_TC(ic, band) (ODM_ReadAndConfig_TC_##ic##_AGC_TAB_DIFF(pDM_Odm, Array_TC_##ic##_AGC_TAB_DIFF_##band, \
+ sizeof(Array_TC_##ic##_AGC_TAB_DIFF_##band)/sizeof(u32)))
+
+#define AGC_DIFF_CONFIG(ic, band)\
+ do {\
+ if (pDM_Odm->bIsMPChip)\
+ AGC_DIFF_CONFIG_MP(ic, band);\
+ else\
+ AGC_DIFF_CONFIG_TC(ic, band);\
+ } while (0)
+
+
+/* */
+/* structure and define */
+/* */
+
+typedef struct _Phy_Rx_AGC_Info {
+ #if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
+ u8 gain:7, trsw:1;
+ #else
+ u8 trsw:1, gain:7;
+ #endif
+} PHY_RX_AGC_INFO_T, *pPHY_RX_AGC_INFO_T;
+
+typedef struct _Phy_Status_Rpt_8192cd {
+ PHY_RX_AGC_INFO_T path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ s8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ s8 sig_evm;
+ u8 rsvd_3;
+
+#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+#endif
+} PHY_STATUS_RPT_8192CD_T, *PPHY_STATUS_RPT_8192CD_T;
+
+
+typedef struct _Phy_Status_Rpt_8812 {
+ /* 2012.05.24 LukeLee: This structure should take big/little endian in consideration later..... */
+
+ /* DWORD 0 */
+ u8 gain_trsw[2];
+#if (ODM_ENDIAN_TYPE == ODM_ENDIAN_LITTLE)
+ u16 chl_num:10;
+ u16 sub_chnl:4;
+ u16 r_RFMOD:2;
+#else /* _BIG_ENDIAN_ */
+ u16 r_RFMOD:2;
+ u16 sub_chnl:4;
+ u16 chl_num:10;
+#endif
+
+ /* DWORD 1 */
+ u8 pwdb_all;
+ u8 cfosho[4]; /* DW 1 byte 1 DW 2 byte 0 */
+
+ /* DWORD 2 */
+ s8 cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */
+
+ /* DWORD 3 */
+ s8 rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */
+ s8 rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */
+
+ /* DWORD 4 */
+ u8 PCTS_MSK_RPT[2];
+ u8 pdsnr[2]; /* DW 4 byte 3 DW 5 Byte 0 */
+
+ /* DWORD 5 */
+ u8 csi_current[2];
+ u8 rx_gain_c;
+
+ /* DWORD 6 */
+ u8 rx_gain_d;
+ s8 sigevm;
+ u8 resvd_0;
+ u8 antidx_anta:3;
+ u8 antidx_antb:3;
+ u8 resvd_1:2;
+} PHY_STATUS_RPT_8812_T, *PPHY_STATUS_RPT_8812_T;
+
+
+void ODM_PhyStatusQuery(
+ PDM_ODM_T pDM_Odm,
+ PODM_PHY_INFO_T pPhyInfo,
+ u8 *pPhyStatus,
+ PODM_PACKET_INFO_T pPktinfo
+);
+
+HAL_STATUS ODM_ConfigRFWithTxPwrTrackHeaderFile(PDM_ODM_T pDM_Odm);
+
+HAL_STATUS ODM_ConfigRFWithHeaderFile(
+ PDM_ODM_T pDM_Odm,
+ ODM_RF_Config_Type ConfigType,
+ ODM_RF_RADIO_PATH_E eRFPath
+);
+
+HAL_STATUS ODM_ConfigBBWithHeaderFile(
+ PDM_ODM_T pDM_Odm, ODM_BB_Config_Type ConfigType
+);
+
+HAL_STATUS ODM_ConfigMACWithHeaderFile(PDM_ODM_T pDM_Odm);
+
+HAL_STATUS ODM_ConfigFWWithHeaderFile(
+ PDM_ODM_T pDM_Odm,
+ ODM_FW_Config_Type ConfigType,
+ u8 *pFirmware,
+ u32 *pSize
+);
+
+s32 odm_SignalScaleMapping(PDM_ODM_T pDM_Odm, s32 CurrSig);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
new file mode 100644
index 000000000000..af7b44e29092
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
@@ -0,0 +1,175 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+/* This function is for inband noise test utility only */
+/* To obtain the inband noise level(dbm), do the following. */
+/* 1. disable DIG and Power Saving */
+/* 2. Set initial gain = 0x1a */
+/* 3. Stop updating idle time pwer report (for driver read) */
+/* - 0x80c[25] */
+
+#define Valid_Min -35
+#define Valid_Max 10
+#define ValidCnt 5
+
+static s16 odm_InbandNoise_Monitor_NSeries(
+ PDM_ODM_T pDM_Odm,
+ u8 bPauseDIG,
+ u8 IGIValue,
+ u32 max_time
+)
+{
+ u32 tmp4b;
+ u8 max_rf_path = 0, rf_path;
+ u8 reg_c50, reg_c58, valid_done = 0;
+ struct noise_level noise_data;
+ u32 start = 0, func_start = 0, func_end = 0;
+
+ func_start = jiffies;
+ pDM_Odm->noise_level.noise_all = 0;
+
+ if ((pDM_Odm->RFType == ODM_1T2R) || (pDM_Odm->RFType == ODM_2T2R))
+ max_rf_path = 2;
+ else
+ max_rf_path = 1;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_DebugControlInbandNoise_Nseries() ==>\n"));
+
+ memset(&noise_data, 0, sizeof(struct noise_level));
+
+ /* */
+ /* Step 1. Disable DIG && Set initial gain. */
+ /* */
+
+ if (bPauseDIG)
+ odm_PauseDIG(pDM_Odm, ODM_PAUSE_DIG, IGIValue);
+ /* */
+ /* Step 2. Disable all power save for read registers */
+ /* */
+ /* dcmd_DebugControlPowerSave(padapter, PSDisable); */
+
+ /* */
+ /* Step 3. Get noise power level */
+ /* */
+ start = jiffies;
+ while (1) {
+
+ /* Stop updating idle time pwer report (for driver read) */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 1);
+
+ /* Read Noise Floor Report */
+ tmp4b = PHY_QueryBBReg(pDM_Odm->Adapter, 0x8f8, bMaskDWord);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("Noise Floor Report (0x8f8) = 0x%08x\n", tmp4b));
+
+ /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0, TestInitialGain); */
+ /* if (max_rf_path == 2) */
+ /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0, TestInitialGain); */
+
+ /* update idle time pwer report per 5us */
+ PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 0);
+
+ noise_data.value[ODM_RF_PATH_A] = (u8)(tmp4b&0xff);
+ noise_data.value[ODM_RF_PATH_B] = (u8)((tmp4b&0xff00)>>8);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("value_a = 0x%x(%d), value_b = 0x%x(%d)\n",
+ noise_data.value[ODM_RF_PATH_A], noise_data.value[ODM_RF_PATH_A], noise_data.value[ODM_RF_PATH_B], noise_data.value[ODM_RF_PATH_B]));
+
+ for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path; rf_path++) {
+ noise_data.sval[rf_path] = (s8)noise_data.value[rf_path];
+ noise_data.sval[rf_path] /= 2;
+ }
+
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("sval_a = %d, sval_b = %d\n",
+ noise_data.sval[ODM_RF_PATH_A], noise_data.sval[ODM_RF_PATH_B]));
+ /* mdelay(10); */
+ /* msleep(10); */
+
+ for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path; rf_path++) {
+ if ((noise_data.valid_cnt[rf_path] < ValidCnt) && (noise_data.sval[rf_path] < Valid_Max && noise_data.sval[rf_path] >= Valid_Min)) {
+ noise_data.valid_cnt[rf_path]++;
+ noise_data.sum[rf_path] += noise_data.sval[rf_path];
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RF_Path:%d Valid sval = %d\n", rf_path, noise_data.sval[rf_path]));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("Sum of sval = %d,\n", noise_data.sum[rf_path]));
+ if (noise_data.valid_cnt[rf_path] == ValidCnt) {
+ valid_done++;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("After divided, RF_Path:%d , sum = %d\n", rf_path, noise_data.sum[rf_path]));
+ }
+
+ }
+
+ }
+
+ /* printk("####### valid_done:%d #############\n", valid_done); */
+ if ((valid_done == max_rf_path) || (jiffies_to_msecs(jiffies - start) > max_time)) {
+ for (rf_path = ODM_RF_PATH_A; rf_path < max_rf_path; rf_path++) {
+ /* printk("%s PATH_%d - sum = %d, valid_cnt = %d\n", __func__, rf_path, noise_data.sum[rf_path], noise_data.valid_cnt[rf_path]); */
+ if (noise_data.valid_cnt[rf_path])
+ noise_data.sum[rf_path] /= noise_data.valid_cnt[rf_path];
+ else
+ noise_data.sum[rf_path] = 0;
+ }
+ break;
+ }
+ }
+ reg_c50 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
+ reg_c50 &= ~BIT7;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("0x%x = 0x%02x(%d)\n", rOFDM0_XAAGCCore1, reg_c50, reg_c50));
+ pDM_Odm->noise_level.noise[ODM_RF_PATH_A] = -110 + reg_c50 + noise_data.sum[ODM_RF_PATH_A];
+ pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[ODM_RF_PATH_A];
+
+ if (max_rf_path == 2) {
+ reg_c58 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
+ reg_c58 &= ~BIT7;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("0x%x = 0x%02x(%d)\n", rOFDM0_XBAGCCore1, reg_c58, reg_c58));
+ pDM_Odm->noise_level.noise[ODM_RF_PATH_B] = -110 + reg_c58 + noise_data.sum[ODM_RF_PATH_B];
+ pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[ODM_RF_PATH_B];
+ }
+ pDM_Odm->noise_level.noise_all /= max_rf_path;
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_COMMON,
+ ODM_DBG_LOUD,
+ (
+ "noise_a = %d, noise_b = %d\n",
+ pDM_Odm->noise_level.noise[ODM_RF_PATH_A],
+ pDM_Odm->noise_level.noise[ODM_RF_PATH_B]
+ )
+ );
+
+ /* */
+ /* Step 4. Recover the Dig */
+ /* */
+ if (bPauseDIG)
+ odm_PauseDIG(pDM_Odm, ODM_RESUME_DIG, IGIValue);
+
+ func_end = jiffies_to_msecs(jiffies - func_start);
+ /* printk("%s noise_a = %d, noise_b = %d noise_all:%d (%d ms)\n", __func__, */
+ /* pDM_Odm->noise_level.noise[ODM_RF_PATH_A], */
+ /* pDM_Odm->noise_level.noise[ODM_RF_PATH_B], */
+ /* pDM_Odm->noise_level.noise_all, func_end); */
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_DebugControlInbandNoise_Nseries() <==\n"));
+ return pDM_Odm->noise_level.noise_all;
+
+}
+
+s16 ODM_InbandNoise_Monitor(void *pDM_VOID, u8 bPauseDIG, u8 IGIValue, u32 max_time)
+{
+ return odm_InbandNoise_Monitor_NSeries(pDM_VOID, bPauseDIG, IGIValue, max_time);
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
new file mode 100644
index 000000000000..3f650912f4ab
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *****************************************************************************/
+#ifndef __ODMNOISEMONITOR_H__
+#define __ODMNOISEMONITOR_H__
+
+#define ODM_MAX_CHANNEL_NUM 38/* 14+24 */
+struct noise_level {
+ /* u8 value_a, value_b; */
+ u8 value[MAX_RF_PATH];
+ /* s8 sval_a, sval_b; */
+ s8 sval[MAX_RF_PATH];
+
+ /* s32 noise_a = 0, noise_b = 0, sum_a = 0, sum_b = 0; */
+ /* s32 noise[ODM_RF_PATH_MAX]; */
+ s32 sum[MAX_RF_PATH];
+ /* u8 valid_cnt_a = 0, valid_cnt_b = 0, */
+ u8 valid[MAX_RF_PATH];
+ u8 valid_cnt[MAX_RF_PATH];
+
+};
+
+
+typedef struct _ODM_NOISE_MONITOR_ {
+ s8 noise[MAX_RF_PATH];
+ s16 noise_all;
+} ODM_NOISE_MONITOR;
+
+s16 ODM_InbandNoise_Monitor(
+ void *pDM_VOID,
+ u8 bPauseDIG,
+ u8 IGIValue,
+ u32 max_time
+);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.c b/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
new file mode 100644
index 000000000000..2735ebfb6be3
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_PathDiv.c
@@ -0,0 +1,42 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_PathDiversityInit(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_PATH_DIV))
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_PATH_DIV,
+ ODM_DBG_LOUD,
+ ("Return: Not Support PathDiv\n")
+ );
+}
+
+void odm_PathDiversity(void *pDM_VOID)
+{
+ PDM_ODM_T pDM_Odm = (PDM_ODM_T)pDM_VOID;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_PATH_DIV))
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_PATH_DIV,
+ ODM_DBG_LOUD,
+ ("Return: Not Support PathDiv\n")
+ );
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_PathDiv.h b/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
new file mode 100644
index 000000000000..becde2e2a9ee
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_PathDiv.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODMPATHDIV_H__
+#define __ODMPATHDIV_H__
+
+void
+odm_PathDiversityInit(
+ void *pDM_VOID
+ );
+
+void
+odm_PathDiversity(
+ void *pDM_VOID
+ );
+
+ #endif /* ifndef __ODMPATHDIV_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
new file mode 100644
index 000000000000..0e4ce2741737
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.c
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+s8 odm_CCKRSSI_8723B(u8 LNA_idx, u8 VGA_idx)
+{
+ s8 rx_pwr_all = 0x00;
+
+ switch (LNA_idx) {
+ /* 46 53 73 95 201301231630 */
+ /* 46 53 77 99 201301241630 */
+
+ case 6:
+ rx_pwr_all = -34 - (2 * VGA_idx);
+ break;
+ case 4:
+ rx_pwr_all = -14 - (2 * VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 6 - (2 * VGA_idx);
+ break;
+ case 0:
+ rx_pwr_all = 16 - (2 * VGA_idx);
+ break;
+ default:
+ /* rx_pwr_all = -53+(2*(31-VGA_idx)); */
+ /* DbgPrint("wrong LNA index\n"); */
+ break;
+
+ }
+ return rx_pwr_all;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h
new file mode 100644
index 000000000000..0700351a2b74
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_RTL8723B.h
@@ -0,0 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __ODM_RTL8723B_H__
+#define __ODM_RTL8723B_H__
+
+#define DM_DIG_MIN_NIC_8723 0x1C
+
+s8 odm_CCKRSSI_8723B(u8 LNA_idx, u8 VGA_idx);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
new file mode 100644
index 000000000000..cdc9f38438d1
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.c
@@ -0,0 +1,257 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_ConfigRFReg_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Data,
+ ODM_RF_RADIO_PATH_E RF_PATH,
+ u32 RegAddr
+)
+{
+ if (Addr == 0xfe || Addr == 0xffe)
+ msleep(50);
+ else {
+ PHY_SetRFReg(pDM_Odm->Adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ /* For disable/enable test in high temperature, the B6 value will fail to fill. Suggestion by BB Stanley, 2013.06.25. */
+ if (Addr == 0xb6) {
+ u32 getvalue = 0;
+ u8 count = 0;
+
+ getvalue = PHY_QueryRFReg(
+ pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord
+ );
+
+ udelay(1);
+
+ while ((getvalue>>8) != (Data>>8)) {
+ count++;
+ PHY_SetRFReg(pDM_Odm->Adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ udelay(1);
+ getvalue = PHY_QueryRFReg(pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> ODM_ConfigRFWithHeaderFile: [B6] getvalue 0x%x, Data 0x%x, count %d\n",
+ getvalue,
+ Data,
+ count
+ )
+ );
+ if (count > 5)
+ break;
+ }
+ }
+
+ if (Addr == 0xb2) {
+ u32 getvalue = 0;
+ u8 count = 0;
+
+ getvalue = PHY_QueryRFReg(
+ pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord
+ );
+
+ udelay(1);
+
+ while (getvalue != Data) {
+ count++;
+ PHY_SetRFReg(
+ pDM_Odm->Adapter,
+ RF_PATH,
+ RegAddr,
+ bRFRegOffsetMask,
+ Data
+ );
+ udelay(1);
+ /* Do LCK againg */
+ PHY_SetRFReg(
+ pDM_Odm->Adapter,
+ RF_PATH,
+ 0x18,
+ bRFRegOffsetMask,
+ 0x0fc07
+ );
+ udelay(1);
+ getvalue = PHY_QueryRFReg(
+ pDM_Odm->Adapter, RF_PATH, Addr, bMaskDWord
+ );
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> ODM_ConfigRFWithHeaderFile: [B2] getvalue 0x%x, Data 0x%x, count %d\n",
+ getvalue,
+ Data,
+ count
+ )
+ );
+
+ if (count > 5)
+ break;
+ }
+ }
+ }
+}
+
+
+void odm_ConfigRF_RadioA_8723B(PDM_ODM_T pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8723B(
+ pDM_Odm,
+ Addr,
+ Data,
+ ODM_RF_PATH_A,
+ Addr|maskforPhySet
+ );
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n",
+ Addr,
+ Data
+ )
+ );
+}
+
+void odm_ConfigMAC_8723B(PDM_ODM_T pDM_Odm, u32 Addr, u8 Data)
+{
+ rtw_write8(pDM_Odm->Adapter, Addr, Data);
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n",
+ Addr,
+ Data
+ )
+ );
+}
+
+void odm_ConfigBB_AGC_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+)
+{
+ PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_TRACE,
+ (
+ "===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
+ Addr,
+ Data
+ )
+ );
+}
+
+void odm_ConfigBB_PHY_REG_PG_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Band,
+ u32 RfPath,
+ u32 TxNum,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+)
+{
+ if (Addr == 0xfe || Addr == 0xffe)
+ msleep(50);
+ else {
+ PHY_StoreTxPowerByRate(pDM_Odm->Adapter, Band, RfPath, TxNum, Addr, Bitmask, Data);
+ }
+ ODM_RT_TRACE(
+ pDM_Odm,
+ ODM_COMP_INIT,
+ ODM_DBG_LOUD,
+ (
+ "===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
+ Addr,
+ Bitmask,
+ Data
+ )
+ );
+}
+
+void odm_ConfigBB_PHY_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+)
+{
+ if (Addr == 0xfe)
+ msleep(50);
+ else if (Addr == 0xfd)
+ mdelay(5);
+ else if (Addr == 0xfc)
+ mdelay(1);
+ else if (Addr == 0xfb)
+ udelay(50);
+ else if (Addr == 0xfa)
+ udelay(5);
+ else if (Addr == 0xf9)
+ udelay(1);
+ else {
+ PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ }
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigBB_TXPWR_LMT_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 *Regulation,
+ u8 *Band,
+ u8 *Bandwidth,
+ u8 *RateSection,
+ u8 *RfPath,
+ u8 *Channel,
+ u8 *PowerLimit
+)
+{
+ PHY_SetTxPowerLimit(
+ pDM_Odm->Adapter,
+ Regulation,
+ Band,
+ Bandwidth,
+ RateSection,
+ RfPath,
+ Channel,
+ PowerLimit
+ );
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
new file mode 100644
index 000000000000..a6b3d21e6463
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_RegConfig8723B.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __INC_ODM_REGCONFIG_H_8723B
+#define __INC_ODM_REGCONFIG_H_8723B
+
+void odm_ConfigRFReg_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Data,
+ ODM_RF_RADIO_PATH_E RF_PATH,
+ u32 RegAddr
+);
+
+void odm_ConfigRF_RadioA_8723B(PDM_ODM_T pDM_Odm, u32 Addr, u32 Data);
+
+void odm_ConfigMAC_8723B(PDM_ODM_T pDM_Odm, u32 Addr, u8 Data);
+
+void odm_ConfigBB_AGC_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+);
+
+void odm_ConfigBB_PHY_REG_PG_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Band,
+ u32 RfPath,
+ u32 TxNum,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+);
+
+void odm_ConfigBB_PHY_8723B(
+ PDM_ODM_T pDM_Odm,
+ u32 Addr,
+ u32 Bitmask,
+ u32 Data
+);
+
+void odm_ConfigBB_TXPWR_LMT_8723B(
+ PDM_ODM_T pDM_Odm,
+ u8 *Regulation,
+ u8 *Band,
+ u8 *Bandwidth,
+ u8 *RateSection,
+ u8 *RfPath,
+ u8 *Channel,
+ u8 *PowerLimit
+);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
new file mode 100644
index 000000000000..dc20e6165911
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
@@ -0,0 +1,172 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11N_H__
+#define __ODM_REGDEFINE11N_H__
+
+
+/* 2 RF REG LIST */
+#define ODM_REG_RF_MODE_11N 0x00
+#define ODM_REG_RF_0B_11N 0x0B
+#define ODM_REG_CHNBW_11N 0x18
+#define ODM_REG_T_METER_11N 0x24
+#define ODM_REG_RF_25_11N 0x25
+#define ODM_REG_RF_26_11N 0x26
+#define ODM_REG_RF_27_11N 0x27
+#define ODM_REG_RF_2B_11N 0x2B
+#define ODM_REG_RF_2C_11N 0x2C
+#define ODM_REG_RXRF_A3_11N 0x3C
+#define ODM_REG_T_METER_92D_11N 0x42
+#define ODM_REG_T_METER_88E_11N 0x42
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+#define ODM_REG_BB_CTRL_11N 0x800
+#define ODM_REG_RF_PIN_11N 0x804
+#define ODM_REG_PSD_CTRL_11N 0x808
+#define ODM_REG_TX_ANT_CTRL_11N 0x80C
+#define ODM_REG_BB_PWR_SAV5_11N 0x818
+#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
+#define ODM_REG_RX_DEFUALT_A_11N 0x858
+#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_BB_PWR_SAV3_11N 0x85C
+#define ODM_REG_ANTSEL_CTRL_11N 0x860
+#define ODM_REG_RX_ANT_CTRL_11N 0x864
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_BB_PWR_SAV1_11N 0x874
+#define ODM_REG_ANTSEL_PATH_11N 0x878
+#define ODM_REG_BB_3WIRE_11N 0x88C
+#define ODM_REG_SC_CNT_11N 0x8C4
+#define ODM_REG_PSD_DATA_11N 0x8B4
+#define ODM_REG_PSD_DATA_11N 0x8B4
+#define ODM_REG_NHM_TIMER_11N 0x894
+#define ODM_REG_NHM_TH9_TH10_11N 0x890
+#define ODM_REG_NHM_TH3_TO_TH0_11N 0x898
+#define ODM_REG_NHM_TH7_TO_TH4_11N 0x89c
+#define ODM_REG_NHM_CNT_11N 0x8d8
+/* PAGE 9 */
+#define ODM_REG_DBG_RPT_11N 0x908
+#define ODM_REG_ANT_MAPPING1_11N 0x914
+#define ODM_REG_ANT_MAPPING2_11N 0x918
+/* PAGE A */
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define ODM_REG_CCK_FA_RST_11N 0xA2C
+#define ODM_REG_CCK_FA_MSB_11N 0xA58
+#define ODM_REG_CCK_FA_LSB_11N 0xA5C
+#define ODM_REG_CCK_CCA_CNT_11N 0xA60
+#define ODM_REG_BB_PWR_SAV4_11N 0xA74
+/* PAGE B */
+#define ODM_REG_LNA_SWITCH_11N 0xB2C
+#define ODM_REG_PATH_SWITCH_11N 0xB30
+#define ODM_REG_RSSI_CTRL_11N 0xB38
+#define ODM_REG_CONFIG_ANTA_11N 0xB68
+#define ODM_REG_RSSI_BT_11N 0xB9C
+/* PAGE C */
+#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define ODM_REG_BB_RX_PATH_11N 0xC04
+#define ODM_REG_TRMUX_11N 0xC08
+#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define ODM_REG_RXIQI_MATRIX_11N 0xC14
+#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define ODM_REG_IGI_A_11N 0xC50
+#define ODM_REG_ANTDIV_PARA2_11N 0xC54
+#define ODM_REG_IGI_B_11N 0xC58
+#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
+#define ODM_REG_L1SBD_PD_CH_11N 0XC6C
+#define ODM_REG_BB_PWR_SAV2_11N 0xC70
+#define ODM_REG_RX_OFF_11N 0xC7C
+#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
+#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
+#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
+#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/* PAGE D */
+#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
+#define ODM_REG_BB_ATC_11N 0xD2C
+#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
+#define ODM_REG_RPT_11N 0xDF4
+/* PAGE E */
+#define ODM_REG_TXAGC_A_6_18_11N 0xE00
+#define ODM_REG_TXAGC_A_24_54_11N 0xE04
+#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define ODM_REG_FPGA0_IQK_11N 0xE28
+#define ODM_REG_TXIQK_TONE_A_11N 0xE30
+#define ODM_REG_RXIQK_TONE_A_11N 0xE34
+#define ODM_REG_TXIQK_PI_A_11N 0xE38
+#define ODM_REG_RXIQK_PI_A_11N 0xE3C
+#define ODM_REG_TXIQK_11N 0xE40
+#define ODM_REG_RXIQK_11N 0xE44
+#define ODM_REG_IQK_AGC_PTS_11N 0xE48
+#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
+#define ODM_REG_BLUETOOTH_11N 0xE6C
+#define ODM_REG_RX_WAIT_CCA_11N 0xE70
+#define ODM_REG_TX_CCK_RFON_11N 0xE74
+#define ODM_REG_TX_CCK_BBON_11N 0xE78
+#define ODM_REG_OFDM_RFON_11N 0xE7C
+#define ODM_REG_OFDM_BBON_11N 0xE80
+#define ODM_REG_TX2RX_11N 0xE84
+#define ODM_REG_TX2TX_11N 0xE88
+#define ODM_REG_RX_CCK_11N 0xE8C
+#define ODM_REG_RX_OFDM_11N 0xED0
+#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
+#define ODM_REG_RX2RX_11N 0xED8
+#define ODM_REG_STANDBY_11N 0xEDC
+#define ODM_REG_SLEEP_11N 0xEE0
+#define ODM_REG_PMPD_ANAEN_11N 0xEEC
+#define ODM_REG_IGI_C_11N 0xF84
+#define ODM_REG_IGI_D_11N 0xF88
+
+/* 2 MAC REG LIST */
+#define ODM_REG_BB_RST_11N 0x02
+#define ODM_REG_ANTSEL_PIN_11N 0x4C
+#define ODM_REG_EARLY_MODE_11N 0x4D0
+#define ODM_REG_RSSI_MONITOR_11N 0x4FE
+#define ODM_REG_EDCA_VO_11N 0x500
+#define ODM_REG_EDCA_VI_11N 0x504
+#define ODM_REG_EDCA_BE_11N 0x508
+#define ODM_REG_EDCA_BK_11N 0x50C
+#define ODM_REG_TXPAUSE_11N 0x522
+#define ODM_REG_RESP_TX_11N 0x6D8
+#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11N 0x0000007F
+#define ODM_BIT_CCK_RPT_FORMAT_11N BIT9
+#define ODM_BIT_BB_RX_PATH_11N 0xF
+#define ODM_BIT_BB_ATC_11N BIT11
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.c b/drivers/staging/rtl8723bs/hal/odm_debug.c
new file mode 100644
index 000000000000..28cf0a66ff93
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.c
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void ODM_InitDebugSetting(PDM_ODM_T pDM_Odm)
+{
+ pDM_Odm->DebugLevel = ODM_DBG_LOUD;
+
+ pDM_Odm->DebugComponents =
+/* BB Functions */
+/* ODM_COMP_DIG | */
+/* ODM_COMP_RA_MASK | */
+/* ODM_COMP_DYNAMIC_TXPWR | */
+/* ODM_COMP_FA_CNT | */
+/* ODM_COMP_RSSI_MONITOR | */
+/* ODM_COMP_CCK_PD | */
+/* ODM_COMP_ANT_DIV | */
+/* ODM_COMP_PWR_SAVE | */
+/* ODM_COMP_PWR_TRAIN | */
+/* ODM_COMP_RATE_ADAPTIVE | */
+/* ODM_COMP_PATH_DIV | */
+/* ODM_COMP_DYNAMIC_PRICCA | */
+/* ODM_COMP_RXHP | */
+/* ODM_COMP_MP | */
+/* ODM_COMP_CFO_TRACKING | */
+
+/* MAC Functions */
+/* ODM_COMP_EDCA_TURBO | */
+/* ODM_COMP_EARLY_MODE | */
+/* RF Functions */
+/* ODM_COMP_TX_PWR_TRACK | */
+/* ODM_COMP_RX_GAIN_TRACK | */
+/* ODM_COMP_CALIBRATION | */
+/* Common */
+/* ODM_COMP_COMMON | */
+/* ODM_COMP_INIT | */
+/* ODM_COMP_PSD | */
+0;
+}
diff --git a/drivers/staging/rtl8723bs/hal/odm_debug.h b/drivers/staging/rtl8723bs/hal/odm_debug.h
new file mode 100644
index 000000000000..2ec4baf57464
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_debug.h
@@ -0,0 +1,166 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_DBG_H__
+#define __ODM_DBG_H__
+
+
+/* */
+/* Define the debug levels */
+/* */
+/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
+/* So that, they can help SW engineer to develope or trace states changed */
+/* and also help HW enginner to trace every operation to and from HW, */
+/* e.g IO, Tx, Rx. */
+/* */
+/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
+/* which help us to debug SW or HW. */
+/* */
+/* */
+/* */
+/* Never used in a call to ODM_RT_TRACE()! */
+/* */
+#define ODM_DBG_OFF 1
+
+/* */
+/* Fatal bug. */
+/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
+/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
+/* */
+#define ODM_DBG_SERIOUS 2
+
+/* */
+/* Abnormal, rare, or unexpeted cases. */
+/* For example, */
+/* IRP/Packet/OID canceled, */
+/* device suprisely unremoved and so on. */
+/* */
+#define ODM_DBG_WARNING 3
+
+/* */
+/* Normal case with useful information about current SW or HW state. */
+/* For example, Tx/Rx descriptor to fill, Tx/Rx descriptor completed status, */
+/* SW protocol state change, dynamic mechanism state change and so on. */
+/* */
+#define ODM_DBG_LOUD 4
+
+/* */
+/* Normal case with detail execution flow or information. */
+/* */
+#define ODM_DBG_TRACE 5
+
+/* */
+/* Define the tracing components */
+/* */
+/* */
+/* BB Functions */
+#define ODM_COMP_DIG BIT0
+#define ODM_COMP_RA_MASK BIT1
+#define ODM_COMP_DYNAMIC_TXPWR BIT2
+#define ODM_COMP_FA_CNT BIT3
+#define ODM_COMP_RSSI_MONITOR BIT4
+#define ODM_COMP_CCK_PD BIT5
+#define ODM_COMP_ANT_DIV BIT6
+#define ODM_COMP_PWR_SAVE BIT7
+#define ODM_COMP_PWR_TRAIN BIT8
+#define ODM_COMP_RATE_ADAPTIVE BIT9
+#define ODM_COMP_PATH_DIV BIT10
+#define ODM_COMP_PSD BIT11
+#define ODM_COMP_DYNAMIC_PRICCA BIT12
+#define ODM_COMP_RXHP BIT13
+#define ODM_COMP_MP BIT14
+#define ODM_COMP_CFO_TRACKING BIT15
+/* MAC Functions */
+#define ODM_COMP_EDCA_TURBO BIT16
+#define ODM_COMP_EARLY_MODE BIT17
+/* RF Functions */
+#define ODM_COMP_TX_PWR_TRACK BIT24
+#define ODM_COMP_RX_GAIN_TRACK BIT25
+#define ODM_COMP_CALIBRATION BIT26
+/* Common Functions */
+#define ODM_COMP_COMMON BIT30
+#define ODM_COMP_INIT BIT31
+
+/*------------------------Export Marco Definition---------------------------*/
+ #define DbgPrint printk
+ #define RT_PRINTK(fmt, args...)\
+ DbgPrint("%s(): " fmt, __func__, ## args)
+ #define RT_DISP(dbgtype, dbgflag, printstr)
+
+#ifndef ASSERT
+ #define ASSERT(expr)
+#endif
+
+#if DBG
+#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt)\
+ if (\
+ (comp & pDM_Odm->DebugComponents) &&\
+ (level <= pDM_Odm->DebugLevel || level == ODM_DBG_SERIOUS)\
+ ) {\
+ RT_PRINTK fmt;\
+ }
+
+#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt)\
+ if (\
+ (comp & pDM_Odm->DebugComponents) &&\
+ (level <= pDM_Odm->DebugLevel)\
+ ) {\
+ RT_PRINTK fmt;\
+ }
+
+#define ODM_RT_ASSERT(pDM_Odm, expr, fmt)\
+ if (!expr) {\
+ DbgPrint("Assertion failed! %s at ......\n", #expr);\
+ DbgPrint(\
+ " ......%s,%s, line =%d\n",\
+ __FILE__,\
+ __func__,\
+ __LINE__\
+ );\
+ RT_PRINTK fmt;\
+ ASSERT(false);\
+ }
+#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
+#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
+#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
+
+#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr)\
+ if (\
+ (comp & pDM_Odm->DebugComponents) &&\
+ (level <= pDM_Odm->DebugLevel)\
+ ) {\
+ int __i;\
+ u8 *__ptr = (u8 *)ptr;\
+ DbgPrint("[ODM] ");\
+ DbgPrint(title_str);\
+ DbgPrint(" ");\
+ for (__i = 0; __i < 6; __i++)\
+ DbgPrint("%02X%s", __ptr[__i], (__i == 5) ? "" : "-");\
+ DbgPrint("\n");\
+ }
+#else
+#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) no_printk fmt
+#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) no_printk fmt
+#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) no_printk fmt
+#define ODM_dbg_enter() do {} while (0)
+#define ODM_dbg_exit() do {} while (0)
+#define ODM_dbg_trace(str) no_printk("%s", str)
+#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
+ no_printk("%s %p", title_str, ptr)
+#endif
+
+void ODM_InitDebugSetting(PDM_ODM_T pDM_Odm);
+
+#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_interface.h b/drivers/staging/rtl8723bs/hal/odm_interface.h
new file mode 100644
index 000000000000..8ad0a0afca57
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_interface.h
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef __ODM_INTERFACE_H__
+#define __ODM_INTERFACE_H__
+
+
+
+/* =========== Constant/Structure/Enum/... Define */
+
+/* =========== Macro Define */
+
+#define _reg_all(_name) ODM_##_name
+#define _reg_ic(_name, _ic) ODM_##_name##_ic
+#define _bit_all(_name) BIT_##_name
+#define _bit_ic(_name, _ic) BIT_##_name##_ic
+
+/*===================================
+
+#define ODM_REG_DIG_11N 0xC50
+#define ODM_REG_DIG_11AC 0xDDD
+
+ODM_REG(DIG, _pDM_Odm)
+=====================================*/
+
+#define _reg_11N(_name) ODM_REG_##_name##_11N
+#define _bit_11N(_name) ODM_BIT_##_name##_11N
+
+#define _cat(_name, _ic_type, _func) _func##_11N(_name)
+
+/* _name: name of register or bit. */
+/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
+/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C", depends on SupportICType. */
+#define ODM_REG(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _reg)
+#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _bit)
+
+typedef enum _ODM_H2C_CMD {
+ ODM_H2C_RSSI_REPORT = 0,
+ ODM_H2C_PSD_RESULT = 1,
+ ODM_H2C_PathDiv = 2,
+ ODM_H2C_WIFI_CALIBRATION = 3,
+ ODM_MAX_H2CCMD
+} ODM_H2C_CMD;
+
+
+#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_precomp.h b/drivers/staging/rtl8723bs/hal/odm_precomp.h
new file mode 100644
index 000000000000..f543bdb31a89
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_precomp.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_PRECOMP_H__
+#define __ODM_PRECOMP_H__
+
+#include "odm_types.h"
+
+#define TEST_FALG___ 1
+
+/* 2 Config Flags and Structs - defined by each ODM Type */
+
+ /* include <basic_types.h> */
+ /* include <osdep_service.h> */
+ /* include <drv_types.h> */
+ /* include <rtw_byteorder.h> */
+ /* include <hal_intf.h> */
+#define BEAMFORMING_SUPPORT 0
+
+/* 2 Hardware Parameter Files */
+
+/* 2 OutSrc Header Files */
+
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_debug.h"
+#include "odm_RegDefine11N.h"
+#include "odm_AntDiv.h"
+#include "odm_EdcaTurboCheck.h"
+#include "odm_DIG.h"
+#include "odm_PathDiv.h"
+#include "odm_DynamicBBPowerSaving.h"
+#include "odm_DynamicTxPower.h"
+#include "odm_CfoTracking.h"
+#include "odm_NoiseMonitor.h"
+#include "HalPhyRf.h"
+#include "HalPhyRf_8723B.h"/* for IQK, LCK, Power-tracking */
+#include "rtl8723b_hal.h"
+#include "odm_interface.h"
+#include "odm_reg.h"
+#include "HalHWImg8723B_MAC.h"
+#include "HalHWImg8723B_RF.h"
+#include "HalHWImg8723B_BB.h"
+#include "Hal8723BReg.h"
+#include "odm_RTL8723B.h"
+#include "odm_RegConfig8723B.h"
+
+#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/odm_reg.h b/drivers/staging/rtl8723bs/hal/odm_reg.h
new file mode 100644
index 000000000000..2496dcef7a9f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_reg.h
@@ -0,0 +1,103 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* File Name: odm_reg.h */
+/* Description: */
+/* This file is for general register definition. */
+#ifndef __HAL_ODM_REG_H__
+#define __HAL_ODM_REG_H__
+
+/* Register Definition */
+
+/* MAC REG */
+#define ODM_BB_RESET 0x002
+#define ODM_DUMMY 0x4fe
+#define RF_T_METER_OLD 0x24
+#define RF_T_METER_NEW 0x42
+
+#define ODM_EDCA_VO_PARAM 0x500
+#define ODM_EDCA_VI_PARAM 0x504
+#define ODM_EDCA_BE_PARAM 0x508
+#define ODM_EDCA_BK_PARAM 0x50C
+#define ODM_TXPAUSE 0x522
+
+/* BB REG */
+#define ODM_FPGA_PHY0_PAGE8 0x800
+#define ODM_PSD_SETTING 0x808
+#define ODM_AFE_SETTING 0x818
+#define ODM_TXAGC_B_24_54 0x834
+#define ODM_TXAGC_B_MCS32_5 0x838
+#define ODM_TXAGC_B_MCS0_MCS3 0x83c
+#define ODM_TXAGC_B_MCS4_MCS7 0x848
+#define ODM_TXAGC_B_MCS8_MCS11 0x84c
+#define ODM_ANALOG_REGISTER 0x85c
+#define ODM_RF_INTERFACE_OUTPUT 0x860
+#define ODM_TXAGC_B_MCS12_MCS15 0x868
+#define ODM_TXAGC_B_11_A_2_11 0x86c
+#define ODM_AD_DA_LSB_MASK 0x874
+#define ODM_ENABLE_3_WIRE 0x88c
+#define ODM_PSD_REPORT 0x8b4
+#define ODM_R_ANT_SELECT 0x90c
+#define ODM_CCK_ANT_SELECT 0xa07
+#define ODM_CCK_PD_THRESH 0xa0a
+#define ODM_CCK_RF_REG1 0xa11
+#define ODM_CCK_MATCH_FILTER 0xa20
+#define ODM_CCK_RAKE_MAC 0xa2e
+#define ODM_CCK_CNT_RESET 0xa2d
+#define ODM_CCK_TX_DIVERSITY 0xa2f
+#define ODM_CCK_FA_CNT_MSB 0xa5b
+#define ODM_CCK_FA_CNT_LSB 0xa5c
+#define ODM_CCK_NEW_FUNCTION 0xa75
+#define ODM_OFDM_PHY0_PAGE_C 0xc00
+#define ODM_OFDM_RX_ANT 0xc04
+#define ODM_R_A_RXIQI 0xc14
+#define ODM_R_A_AGC_CORE1 0xc50
+#define ODM_R_A_AGC_CORE2 0xc54
+#define ODM_R_B_AGC_CORE1 0xc58
+#define ODM_R_AGC_PAR 0xc70
+#define ODM_R_HTSTF_AGC_PAR 0xc7c
+#define ODM_TX_PWR_TRAINING_A 0xc90
+#define ODM_TX_PWR_TRAINING_B 0xc98
+#define ODM_OFDM_FA_CNT1 0xcf0
+#define ODM_OFDM_PHY0_PAGE_D 0xd00
+#define ODM_OFDM_FA_CNT2 0xda0
+#define ODM_OFDM_FA_CNT3 0xda4
+#define ODM_OFDM_FA_CNT4 0xda8
+#define ODM_TXAGC_A_6_18 0xe00
+#define ODM_TXAGC_A_24_54 0xe04
+#define ODM_TXAGC_A_1_MCS32 0xe08
+#define ODM_TXAGC_A_MCS0_MCS3 0xe10
+#define ODM_TXAGC_A_MCS4_MCS7 0xe14
+#define ODM_TXAGC_A_MCS8_MCS11 0xe18
+#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
+
+/* RF REG */
+#define ODM_GAIN_SETTING 0x00
+#define ODM_CHANNEL 0x18
+
+/* Ant Detect Reg */
+#define ODM_DPDT 0x300
+
+/* PSD Init */
+#define ODM_PSDREG 0x808
+
+/* 92D Path Div */
+#define PATHDIV_REG 0xB30
+#define PATHDIV_TRI 0xBA0
+
+/* Bitmap Definition */
+
+#define BIT_FA_RESET BIT0
+
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_types.h b/drivers/staging/rtl8723bs/hal/odm_types.h
new file mode 100644
index 000000000000..9e3d072b03db
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/odm_types.h
@@ -0,0 +1,102 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __ODM_TYPES_H__
+#define __ODM_TYPES_H__
+
+#include <drv_types.h>
+
+/* Deifne HW endian support */
+#define ODM_ENDIAN_BIG 0
+#define ODM_ENDIAN_LITTLE 1
+
+#define GET_ODM(__padapter) ((PDM_ODM_T)(&((GET_HAL_DATA(__padapter))->odmpriv)))
+
+typedef enum _HAL_STATUS {
+ HAL_STATUS_SUCCESS,
+ HAL_STATUS_FAILURE,
+ /*RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE,
+ RT_STATUS_INVALID_CONTEXT,
+ RT_STATUS_INVALID_PARAMETER,
+ RT_STATUS_NOT_SUPPORT,
+ RT_STATUS_OS_API_FAILED,*/
+} HAL_STATUS, *PHAL_STATUS;
+
+
+/* */
+/* Declare for ODM spin lock defintion temporarily fro compile pass. */
+/* */
+typedef enum _RT_SPINLOCK_TYPE {
+ RT_TX_SPINLOCK = 1,
+ RT_RX_SPINLOCK = 2,
+ RT_RM_SPINLOCK = 3,
+ RT_CAM_SPINLOCK = 4,
+ RT_SCAN_SPINLOCK = 5,
+ RT_LOG_SPINLOCK = 7,
+ RT_BW_SPINLOCK = 8,
+ RT_CHNLOP_SPINLOCK = 9,
+ RT_RF_OPERATE_SPINLOCK = 10,
+ RT_INITIAL_SPINLOCK = 11,
+ RT_RF_STATE_SPINLOCK = 12, /* For RF state. Added by Bruce, 2007-10-30. */
+ /* Shall we define Ndis 6.2 SpinLock Here ? */
+ RT_PORT_SPINLOCK = 16,
+ RT_H2C_SPINLOCK = 20, /* For H2C cmd. Added by tynli. 2009.11.09. */
+
+ RT_BTData_SPINLOCK = 25,
+
+ RT_WAPI_OPTION_SPINLOCK = 26,
+ RT_WAPI_RX_SPINLOCK = 27,
+
+ /* add for 92D CCK control issue */
+ RT_CCK_PAGEA_SPINLOCK = 28,
+ RT_BUFFER_SPINLOCK = 29,
+ RT_CHANNEL_AND_BANDWIDTH_SPINLOCK = 30,
+ RT_GEN_TEMP_BUF_SPINLOCK = 31,
+ RT_AWB_SPINLOCK = 32,
+ RT_FW_PS_SPINLOCK = 33,
+ RT_HW_TIMER_SPIN_LOCK = 34,
+ RT_MPT_WI_SPINLOCK = 35,
+ RT_P2P_SPIN_LOCK = 36, /* Protect P2P context */
+ RT_DBG_SPIN_LOCK = 37,
+ RT_IQK_SPINLOCK = 38,
+ RT_PENDED_OID_SPINLOCK = 39,
+ RT_CHNLLIST_SPINLOCK = 40,
+ RT_INDIC_SPINLOCK = 41, /* protect indication */
+} RT_SPINLOCK_TYPE;
+
+ #if defined(__LITTLE_ENDIAN)
+ #define ODM_ENDIAN_TYPE ODM_ENDIAN_LITTLE
+ #else
+ #define ODM_ENDIAN_TYPE ODM_ENDIAN_BIG
+ #endif
+
+ typedef struct timer_list RT_TIMER, *PRT_TIMER;
+ typedef void *RT_TIMER_CALL_BACK;
+ #define STA_INFO_T struct sta_info
+ #define PSTA_INFO_T struct sta_info *
+
+ #define SET_TX_DESC_ANTSEL_A_88E(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 24, 1, __Value)
+ #define SET_TX_DESC_ANTSEL_B_88E(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 25, 1, __Value)
+ #define SET_TX_DESC_ANTSEL_C_88E(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 29, 1, __Value)
+
+ /* define useless flag to avoid compile warning */
+ #define USE_WORKITEM 0
+ #define FPGA_TWO_MAC_VERIFICATION 0
+
+#define READ_NEXT_PAIR(v1, v2, i) do { if (i+2 >= ArrayLen) break; i += 2; v1 = Array[i]; v2 = Array[i+1]; } while (0)
+#define COND_ELSE 2
+#define COND_ENDIF 3
+
+#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
new file mode 100644
index 000000000000..69eed62b7c94
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -0,0 +1,2358 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723B_CMD_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+#include "hal_com_h2c.h"
+
+#define MAX_H2C_BOX_NUMS 4
+#define MESSAGE_BOX_SIZE 4
+
+#define RTL8723B_MAX_CMD_LEN 7
+#define RTL8723B_EX_MESSAGE_BOX_SIZE 4
+
+static u8 _is_fw_read_cmd_down(struct adapter *padapter, u8 msgbox_num)
+{
+ u8 read_down = false;
+ int retry_cnts = 100;
+
+ u8 valid;
+
+ /* DBG_8192C(" _is_fw_read_cmd_down , reg_1cc(%x), msg_box(%d)...\n", rtw_read8(padapter, REG_HMETFR), msgbox_num); */
+
+ do {
+ valid = rtw_read8(padapter, REG_HMETFR) & BIT(msgbox_num);
+ if (0 == valid) {
+ read_down = true;
+ }
+#ifdef CONFIG_WOWLAN
+ else
+ msleep(1);
+#endif
+ } while ((!read_down) && (retry_cnts--));
+
+ return read_down;
+
+}
+
+
+/*****************************************
+* H2C Msg format :
+*| 31 - 8 |7-5 | 4 - 0 |
+*| h2c_msg |Class |CMD_ID |
+*| 31-0 |
+*| Ext msg |
+*
+******************************************/
+s32 FillH2CCmd8723B(struct adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
+{
+ u8 h2c_box_num;
+ u32 msgbox_addr;
+ u32 msgbox_ex_addr = 0;
+ struct hal_com_data *pHalData;
+ u32 h2c_cmd = 0;
+ u32 h2c_cmd_ex = 0;
+ s32 ret = _FAIL;
+
+ padapter = GET_PRIMARY_ADAPTER(padapter);
+ pHalData = GET_HAL_DATA(padapter);
+ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->h2c_fwcmd_mutex)))
+ return ret;
+
+ if (!pCmdBuffer) {
+ goto exit;
+ }
+
+ if (CmdLen > RTL8723B_MAX_CMD_LEN) {
+ goto exit;
+ }
+
+ if (padapter->bSurpriseRemoved == true)
+ goto exit;
+
+ /* pay attention to if race condition happened in H2C cmd setting. */
+ do {
+ h2c_box_num = pHalData->LastHMEBoxNum;
+
+ if (!_is_fw_read_cmd_down(padapter, h2c_box_num)) {
+ DBG_8192C(" fw read cmd failed...\n");
+ /* DBG_8192C(" 0x1c0: 0x%8x\n", rtw_read32(padapter, 0x1c0)); */
+ /* DBG_8192C(" 0x1c4: 0x%8x\n", rtw_read32(padapter, 0x1c4)); */
+ goto exit;
+ }
+
+ if (CmdLen <= 3)
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
+ else {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, CmdLen-3);
+/* *(u8 *)(&h2c_cmd) |= BIT(7); */
+ }
+
+ *(u8 *)(&h2c_cmd) |= ElementID;
+
+ if (CmdLen > 3) {
+ msgbox_ex_addr = REG_HMEBOX_EXT0_8723B + (h2c_box_num*RTL8723B_EX_MESSAGE_BOX_SIZE);
+ rtw_write32(padapter, msgbox_ex_addr, h2c_cmd_ex);
+ }
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num*MESSAGE_BOX_SIZE);
+ rtw_write32(padapter, msgbox_addr, h2c_cmd);
+
+ /* DBG_8192C("MSG_BOX:%d, CmdLen(%d), CmdID(0x%x), reg:0x%x =>h2c_cmd:0x%.8x, reg:0x%x =>h2c_cmd_ex:0x%.8x\n" */
+ /* , pHalData->LastHMEBoxNum , CmdLen, ElementID, msgbox_addr, h2c_cmd, msgbox_ex_addr, h2c_cmd_ex); */
+
+ pHalData->LastHMEBoxNum = (h2c_box_num+1) % MAX_H2C_BOX_NUMS;
+
+ } while (0);
+
+ ret = _SUCCESS;
+
+exit:
+
+ mutex_unlock(&(adapter_to_dvobj(padapter)->h2c_fwcmd_mutex));
+ return ret;
+}
+
+static void ConstructBeacon(struct adapter *padapter, u8 *pframe, u32 *pLength)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ u32 rate_len, pktlen;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+
+ /* DBG_871X("%s\n", __func__); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct ieee80211_hdr_3addr);
+ pktlen = sizeof (struct ieee80211_hdr_3addr);
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ /* capability info: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ /* DBG_871X("ie len =%d\n", cur_network->IELength); */
+ pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fix_ie);
+ memcpy(pframe, cur_network->IEs+sizeof(struct ndis_802_11_fix_ie), pktlen);
+
+ goto _ConstructBeacon;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pktlen);
+ }
+
+
+ /* todo: ERP IE */
+
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pktlen);
+
+
+ /* todo:HT for adhoc */
+
+_ConstructBeacon:
+
+ if ((pktlen + TXDESC_SIZE) > 512) {
+ DBG_871X("beacon frame too large\n");
+ return;
+ }
+
+ *pLength = pktlen;
+
+ /* DBG_871X("%s bcn_sz =%d\n", __func__, pktlen); */
+
+}
+
+static void ConstructPSPoll(struct adapter *padapter, u8 *pframe, u32 *pLength)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* DBG_871X("%s\n", __func__); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ /* Frame control. */
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+ SetPwrMgt(fctrl);
+ SetFrameSubType(pframe, WIFI_PSPOLL);
+
+ /* AID. */
+ SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
+
+ /* BSSID. */
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ /* TA. */
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ *pLength = 16;
+}
+
+static void ConstructNullFunctionData(
+ struct adapter *padapter,
+ u8 *pframe,
+ u32 *pLength,
+ u8 *StaAddr,
+ u8 bQoS,
+ u8 AC,
+ u8 bEosp,
+ u8 bForcePowerSave
+)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ u32 pktlen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+
+ /* DBG_871X("%s:%d\n", __func__, bForcePowerSave); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+ if (bForcePowerSave)
+ SetPwrMgt(fctrl);
+
+ switch (cur_network->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
+ break;
+ case Ndis802_11APMode:
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ break;
+ case Ndis802_11IBSS:
+ default:
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ break;
+ }
+
+ SetSeqNum(pwlanhdr, 0);
+
+ if (bQoS == true) {
+ struct ieee80211_qos_hdr *pwlanqoshdr;
+
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
+
+ pktlen = sizeof(struct ieee80211_qos_hdr);
+ } else {
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ }
+
+ *pLength = pktlen;
+}
+
+
+#ifdef CONFIG_WOWLAN
+/* */
+/* Description: */
+/* Construct the ARP response packet to support ARP offload. */
+/* */
+static void ConstructARPResponse(
+ struct adapter *padapter,
+ u8 *pframe,
+ u32 *pLength,
+ u8 *pIPAddress
+)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ static u8 ARPLLCHeader[8] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06};
+ u8 *pARPRspPkt = pframe;
+ /* for TKIP Cal MIC */
+ u8 *payload = pframe;
+ u8 EncryptionHeadOverhead = 0;
+ /* DBG_871X("%s:%d\n", __func__, bForcePowerSave); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+
+ /* */
+ /* MAC Header. */
+ /* */
+ SetFrameType(fctrl, WIFI_DATA);
+ /* SetFrameSubType(fctrl, 0); */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetDuration(pwlanhdr, 0);
+ /* SET_80211_HDR_FRAME_CONTROL(pARPRspPkt, 0); */
+ /* SET_80211_HDR_TYPE_AND_SUBTYPE(pARPRspPkt, Type_Data); */
+ /* SET_80211_HDR_TO_DS(pARPRspPkt, 1); */
+ /* SET_80211_HDR_ADDRESS1(pARPRspPkt, pMgntInfo->Bssid); */
+ /* SET_80211_HDR_ADDRESS2(pARPRspPkt, Adapter->CurrentAddress); */
+ /* SET_80211_HDR_ADDRESS3(pARPRspPkt, pMgntInfo->Bssid); */
+
+ /* SET_80211_HDR_DURATION(pARPRspPkt, 0); */
+ /* SET_80211_HDR_FRAGMENT_SEQUENCE(pARPRspPkt, 0); */
+ *pLength = 24;
+
+ /* */
+ /* Security Header: leave space for it if necessary. */
+ /* */
+
+ switch (psecuritypriv->dot11PrivacyAlgrthm) {
+ case _WEP40_:
+ case _WEP104_:
+ EncryptionHeadOverhead = 4;
+ break;
+ case _TKIP_:
+ EncryptionHeadOverhead = 8;
+ break;
+ case _AES_:
+ EncryptionHeadOverhead = 8;
+ break;
+ default:
+ EncryptionHeadOverhead = 0;
+ }
+
+ if (EncryptionHeadOverhead > 0) {
+ memset(&(pframe[*pLength]), 0, EncryptionHeadOverhead);
+ *pLength += EncryptionHeadOverhead;
+ SetPrivacy(fctrl);
+ }
+
+ /* */
+ /* Frame Body. */
+ /* */
+ pARPRspPkt = (u8 *)(pframe + *pLength);
+ payload = pARPRspPkt; /* Get Payload pointer */
+ /* LLC header */
+ memcpy(pARPRspPkt, ARPLLCHeader, 8);
+ *pLength += 8;
+
+ /* ARP element */
+ pARPRspPkt += 8;
+ SET_ARP_PKT_HW(pARPRspPkt, 0x0100);
+ SET_ARP_PKT_PROTOCOL(pARPRspPkt, 0x0008); /* IP protocol */
+ SET_ARP_PKT_HW_ADDR_LEN(pARPRspPkt, 6);
+ SET_ARP_PKT_PROTOCOL_ADDR_LEN(pARPRspPkt, 4);
+ SET_ARP_PKT_OPERATION(pARPRspPkt, 0x0200); /* ARP response */
+ SET_ARP_PKT_SENDER_MAC_ADDR(pARPRspPkt, myid(&(padapter->eeprompriv)));
+ SET_ARP_PKT_SENDER_IP_ADDR(pARPRspPkt, pIPAddress);
+ {
+ SET_ARP_PKT_TARGET_MAC_ADDR(pARPRspPkt, get_my_bssid(&(pmlmeinfo->network)));
+ SET_ARP_PKT_TARGET_IP_ADDR(pARPRspPkt, pIPAddress);
+ DBG_871X("%s Target Mac Addr:" MAC_FMT "\n", __func__, MAC_ARG(get_my_bssid(&(pmlmeinfo->network))));
+ DBG_871X("%s Target IP Addr" IP_FMT "\n", __func__, IP_ARG(pIPAddress));
+ }
+
+ *pLength += 28;
+
+ if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_) {
+ u8 mic[8];
+ struct mic_data micdata;
+ struct sta_info *psta = NULL;
+ u8 priority[4] = {
+ 0x0, 0x0, 0x0, 0x0
+ };
+ u8 null_key[16] = {
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
+ };
+
+ DBG_871X("%s(): Add MIC\n", __func__);
+
+ psta = rtw_get_stainfo(&padapter->stapriv, get_my_bssid(&(pmlmeinfo->network)));
+ if (psta != NULL) {
+ if (!memcmp(&psta->dot11tkiptxmickey.skey[0], null_key, 16)) {
+ DBG_871X("%s(): STA dot11tkiptxmickey == 0\n", __func__);
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, &psta->dot11tkiptxmickey.skey[0]);
+ }
+
+ rtw_secmicappend(&micdata, pwlanhdr->addr3, 6); /* DA */
+
+ rtw_secmicappend(&micdata, pwlanhdr->addr2, 6); /* SA */
+
+ priority[0] = 0;
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ rtw_secmicappend(&micdata, payload, 36); /* payload length = 8 + 28 */
+
+ rtw_secgetmic(&micdata, &(mic[0]));
+
+ pARPRspPkt += 28;
+ memcpy(pARPRspPkt, &(mic[0]), 8);
+
+ *pLength += 8;
+ }
+}
+
+#ifdef CONFIG_PNO_SUPPORT
+static void ConstructPnoInfo(
+ struct adapter *padapter, u8 *pframe, u32 *pLength
+)
+{
+
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+
+ u8 *pPnoInfoPkt = pframe;
+ pPnoInfoPkt = (u8 *)(pframe + *pLength);
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->ssid_num, 4);
+
+ *pLength += 4;
+ pPnoInfoPkt += 4;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->fast_scan_period, 4);
+
+ *pLength += 4;
+ pPnoInfoPkt += 4;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->fast_scan_iterations, 4);
+
+ *pLength += 4;
+ pPnoInfoPkt += 4;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->slow_scan_period, 4);
+
+ *pLength += 4;
+ pPnoInfoPkt += 4;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->ssid_length,
+ MAX_PNO_LIST_COUNT);
+
+ *pLength += MAX_PNO_LIST_COUNT;
+ pPnoInfoPkt += MAX_PNO_LIST_COUNT;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->ssid_cipher_info,
+ MAX_PNO_LIST_COUNT);
+
+ *pLength += MAX_PNO_LIST_COUNT;
+ pPnoInfoPkt += MAX_PNO_LIST_COUNT;
+ memcpy(pPnoInfoPkt, &pwrctl->pnlo_info->ssid_channel_info,
+ MAX_PNO_LIST_COUNT);
+
+ *pLength += MAX_PNO_LIST_COUNT;
+ pPnoInfoPkt += MAX_PNO_LIST_COUNT;
+}
+
+static void ConstructSSIDList(
+ struct adapter *padapter, u8 *pframe, u32 *pLength
+)
+{
+ int i = 0;
+ u8 *pSSIDListPkt = pframe;
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+
+ pSSIDListPkt = (u8 *)(pframe + *pLength);
+
+ for (i = 0; i < pwrctl->pnlo_info->ssid_num ; i++) {
+ memcpy(pSSIDListPkt, &pwrctl->pno_ssid_list->node[i].SSID,
+ pwrctl->pnlo_info->ssid_length[i]);
+
+ *pLength += WLAN_SSID_MAXLEN;
+ pSSIDListPkt += WLAN_SSID_MAXLEN;
+ }
+}
+
+static void ConstructScanInfo(
+ struct adapter *padapter, u8 *pframe, u32 *pLength
+)
+{
+ int i = 0;
+ u8 *pScanInfoPkt = pframe;
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+
+ pScanInfoPkt = (u8 *)(pframe + *pLength);
+
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->channel_num, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->orig_ch, 1);
+
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->orig_bw, 1);
+
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->orig_40_offset, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->orig_80_offset, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->periodScan, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->period_scan_time, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->enableRFE, 1);
+
+ *pLength += 1;
+ pScanInfoPkt += 1;
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->rfe_type, 8);
+
+ *pLength += 8;
+ pScanInfoPkt += 8;
+
+ for (i = 0; i < MAX_SCAN_LIST_COUNT; i++) {
+ memcpy(pScanInfoPkt, &pwrctl->pscan_info->ssid_channel_info[i], 4);
+ *pLength += 4;
+ pScanInfoPkt += 4;
+ }
+}
+#endif
+
+#ifdef CONFIG_GTK_OL
+static void ConstructGTKResponse(
+ struct adapter *padapter, u8 *pframe, u32 *pLength
+)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ static u8 LLCHeader[8] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 0x8E};
+ static u8 GTKbody_a[11] = {0x01, 0x03, 0x00, 0x5F, 0x02, 0x03, 0x12, 0x00, 0x10, 0x42, 0x0B};
+ u8 *pGTKRspPkt = pframe;
+ u8 EncryptionHeadOverhead = 0;
+ /* DBG_871X("%s:%d\n", __func__, bForcePowerSave); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_control;
+ *(fctrl) = 0;
+
+ /* */
+ /* MAC Header. */
+ /* */
+ SetFrameType(fctrl, WIFI_DATA);
+ /* SetFrameSubType(fctrl, 0); */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetDuration(pwlanhdr, 0);
+
+ *pLength = 24;
+
+ /* */
+ /* Security Header: leave space for it if necessary. */
+ /* */
+
+ switch (psecuritypriv->dot11PrivacyAlgrthm) {
+ case _WEP40_:
+ case _WEP104_:
+ EncryptionHeadOverhead = 4;
+ break;
+ case _TKIP_:
+ EncryptionHeadOverhead = 8;
+ break;
+ case _AES_:
+ EncryptionHeadOverhead = 8;
+ break;
+ default:
+ EncryptionHeadOverhead = 0;
+ }
+
+ if (EncryptionHeadOverhead > 0) {
+ memset(&(pframe[*pLength]), 0, EncryptionHeadOverhead);
+ *pLength += EncryptionHeadOverhead;
+ /* GTK's privacy bit is done by FW */
+ /* SetPrivacy(fctrl); */
+ }
+
+ /* */
+ /* Frame Body. */
+ /* */
+ pGTKRspPkt = (u8 *)(pframe + *pLength);
+ /* LLC header */
+ memcpy(pGTKRspPkt, LLCHeader, 8);
+ *pLength += 8;
+
+ /* GTK element */
+ pGTKRspPkt += 8;
+
+ /* GTK frame body after LLC, part 1 */
+ memcpy(pGTKRspPkt, GTKbody_a, 11);
+ *pLength += 11;
+ pGTKRspPkt += 11;
+ /* GTK frame body after LLC, part 2 */
+ memset(&(pframe[*pLength]), 0, 88);
+ *pLength += 88;
+ pGTKRspPkt += 88;
+
+}
+#endif /* CONFIG_GTK_OL */
+
+#ifdef CONFIG_PNO_SUPPORT
+static void ConstructProbeReq(struct adapter *padapter, u8 *pframe, u32 *pLength)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 pktlen;
+ unsigned char *mac;
+ unsigned char bssrate[NumRates];
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bssrate_len = 0;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ pframe += pktlen;
+
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &pktlen);
+
+ get_rate_set(padapter, bssrate, &bssrate_len);
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &pktlen);
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &pktlen);
+ } else
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &pktlen);
+
+ *pLength = pktlen;
+}
+#endif /* CONFIG_PNO_SUPPORT */
+#endif /* CONFIG_WOWLAN */
+
+#ifdef CONFIG_AP_WOWLAN
+static void ConstructProbeRsp(struct adapter *padapter, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u8 *mac, *bssid;
+ u32 pktlen;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 *pwps_ie;
+ uint wps_ielen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* DBG_871X("%s\n", __func__); */
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ DBG_871X("%s FW Mac Addr:" MAC_FMT "\n", __func__, MAC_ARG(mac));
+ DBG_871X("%s FW IP Addr" IP_FMT "\n", __func__, IP_ARG(StaAddr));
+
+ SetSeqNum(pwlanhdr, 0);
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ pframe += pktlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+ pwps_ie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_,
+ cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ /* inerset & update wps_probe_resp_ie */
+ if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie;
+
+ wps_offset = (uint)(pwps_ie - cur_network->IEs);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
+
+ memcpy(pframe, cur_network->IEs, wps_offset);
+ pframe += wps_offset;
+ pktlen += wps_offset;
+
+ wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
+ pframe += wps_ielen+2;
+ pktlen += wps_ielen+2;
+ }
+
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pktlen += remainder_ielen;
+ }
+ } else {
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pktlen += cur_network->IELength;
+ }
+
+ /* retrieve SSID IE from cur_network->Ssid */
+ {
+ u8 *ssid_ie;
+ sint ssid_ielen;
+ sint ssid_ielen_diff;
+ u8 buf[MAX_IE_SZ];
+ u8 *ies = pframe + sizeof(struct ieee80211_hdr_3addr);
+
+ ssid_ie = rtw_get_ie(ies+_FIXED_IE_LENGTH_, _SSID_IE_, &ssid_ielen,
+ (pframe-ies)-_FIXED_IE_LENGTH_);
+
+ ssid_ielen_diff = cur_network->Ssid.SsidLength - ssid_ielen;
+
+ if (ssid_ie && cur_network->Ssid.SsidLength) {
+ uint remainder_ielen;
+ u8 *remainder_ie;
+ remainder_ie = ssid_ie+2;
+ remainder_ielen = (pframe-remainder_ie);
+
+ if (remainder_ielen > MAX_IE_SZ) {
+ DBG_871X_LEVEL(_drv_warning_, FUNC_ADPT_FMT" remainder_ielen > MAX_IE_SZ\n", FUNC_ADPT_ARG(padapter));
+ remainder_ielen = MAX_IE_SZ;
+ }
+
+ memcpy(buf, remainder_ie, remainder_ielen);
+ memcpy(remainder_ie+ssid_ielen_diff, buf, remainder_ielen);
+ *(ssid_ie+1) = cur_network->Ssid.SsidLength;
+ memcpy(ssid_ie+2, cur_network->Ssid.Ssid, cur_network->Ssid.SsidLength);
+ pframe += ssid_ielen_diff;
+ pktlen += ssid_ielen_diff;
+ }
+ }
+
+ *pLength = pktlen;
+
+}
+#endif /* CONFIG_AP_WOWLAN */
+
+/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
+/* 2010.06.23. Added by tynli. */
+void CheckFwRsvdPageContent(struct adapter *Adapter)
+{
+}
+
+static void rtl8723b_set_FwRsvdPage_cmd(struct adapter *padapter, PRSVDPAGE_LOC rsvdpageloc)
+{
+ u8 u1H2CRsvdPageParm[H2C_RSVDPAGE_LOC_LEN] = {0};
+
+ DBG_871X("8723BRsvdPageLoc: ProbeRsp =%d PsPoll =%d Null =%d QoSNull =%d BTNull =%d\n",
+ rsvdpageloc->LocProbeRsp, rsvdpageloc->LocPsPoll,
+ rsvdpageloc->LocNullData, rsvdpageloc->LocQosNull,
+ rsvdpageloc->LocBTQosNull);
+
+ SET_8723B_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1H2CRsvdPageParm, rsvdpageloc->LocProbeRsp);
+ SET_8723B_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1H2CRsvdPageParm, rsvdpageloc->LocPsPoll);
+ SET_8723B_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1H2CRsvdPageParm, rsvdpageloc->LocNullData);
+ SET_8723B_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1H2CRsvdPageParm, rsvdpageloc->LocQosNull);
+ SET_8723B_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1H2CRsvdPageParm, rsvdpageloc->LocBTQosNull);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CRsvdPageParm:", u1H2CRsvdPageParm, H2C_RSVDPAGE_LOC_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_RSVD_PAGE, H2C_RSVDPAGE_LOC_LEN, u1H2CRsvdPageParm);
+}
+
+static void rtl8723b_set_FwAoacRsvdPage_cmd(struct adapter *padapter, PRSVDPAGE_LOC rsvdpageloc)
+{
+#ifdef CONFIG_WOWLAN
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 u1H2CAoacRsvdPageParm[H2C_AOAC_RSVDPAGE_LOC_LEN] = {0};
+
+ DBG_871X("8723BAOACRsvdPageLoc: RWC =%d ArpRsp =%d NbrAdv =%d GtkRsp =%d GtkInfo =%d ProbeReq =%d NetworkList =%d\n",
+ rsvdpageloc->LocRemoteCtrlInfo, rsvdpageloc->LocArpRsp,
+ rsvdpageloc->LocNbrAdv, rsvdpageloc->LocGTKRsp,
+ rsvdpageloc->LocGTKInfo, rsvdpageloc->LocProbeReq,
+ rsvdpageloc->LocNetList);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1H2CAoacRsvdPageParm, rsvdpageloc->LocRemoteCtrlInfo);
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1H2CAoacRsvdPageParm, rsvdpageloc->LocArpRsp);
+ /* SET_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(u1H2CAoacRsvdPageParm, rsvdpageloc->LocNbrAdv); */
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(u1H2CAoacRsvdPageParm, rsvdpageloc->LocGTKRsp);
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(u1H2CAoacRsvdPageParm, rsvdpageloc->LocGTKInfo);
+#ifdef CONFIG_GTK_OL
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1H2CAoacRsvdPageParm, rsvdpageloc->LocGTKEXTMEM);
+#endif /* CONFIG_GTK_OL */
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CAoacRsvdPageParm:", u1H2CAoacRsvdPageParm, H2C_AOAC_RSVDPAGE_LOC_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_AOAC_RSVD_PAGE, H2C_AOAC_RSVDPAGE_LOC_LEN, u1H2CAoacRsvdPageParm);
+ } else {
+#ifdef CONFIG_PNO_SUPPORT
+ if (!pwrpriv->pno_in_resume) {
+ DBG_871X("NLO_INFO =%d\n", rsvdpageloc->LocPNOInfo);
+ memset(&u1H2CAoacRsvdPageParm, 0, sizeof(u1H2CAoacRsvdPageParm));
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_NLO_INFO(u1H2CAoacRsvdPageParm, rsvdpageloc->LocPNOInfo);
+ FillH2CCmd8723B(padapter, H2C_AOAC_RSVDPAGE3, H2C_AOAC_RSVDPAGE_LOC_LEN, u1H2CAoacRsvdPageParm);
+ msleep(10);
+ }
+#endif
+ }
+
+#endif /* CONFIG_WOWLAN */
+}
+
+#ifdef CONFIG_AP_WOWLAN
+static void rtl8723b_set_ap_wow_rsvdpage_cmd(
+ struct adapter *padapter, PRSVDPAGE_LOC rsvdpageloc
+)
+{
+ u8 header;
+ u8 rsvdparm[H2C_AOAC_RSVDPAGE_LOC_LEN] = {0};
+
+ header = rtw_read8(padapter, REG_BCNQ_BDNY);
+
+ DBG_871X("%s: beacon: %d, probeRsp: %d, header:0x%02x\n", __func__,
+ rsvdpageloc->LocApOffloadBCN,
+ rsvdpageloc->LocProbeRsp,
+ header);
+
+ SET_H2CCMD_AP_WOWLAN_RSVDPAGE_LOC_BCN(rsvdparm,
+ rsvdpageloc->LocApOffloadBCN + header);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_BCN_RSVDPAGE,
+ H2C_BCN_RSVDPAGE_LEN, rsvdparm);
+
+ msleep(10);
+
+ memset(&rsvdparm, 0, sizeof(rsvdparm));
+
+ SET_H2CCMD_AP_WOWLAN_RSVDPAGE_LOC_ProbeRsp(
+ rsvdparm,
+ rsvdpageloc->LocProbeRsp + header);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_PROBERSP_RSVDPAGE,
+ H2C_PROBERSP_RSVDPAGE_LEN, rsvdparm);
+
+ msleep(10);
+}
+#endif /* CONFIG_AP_WOWLAN */
+
+void rtl8723b_set_FwMediaStatusRpt_cmd(struct adapter *padapter, u8 mstatus, u8 macid)
+{
+ u8 u1H2CMediaStatusRptParm[H2C_MEDIA_STATUS_RPT_LEN] = {0};
+ u8 macid_end = 0;
+
+ DBG_871X("%s(): mstatus = %d macid =%d\n", __func__, mstatus, macid);
+
+ SET_8723B_H2CCMD_MSRRPT_PARM_OPMODE(u1H2CMediaStatusRptParm, mstatus);
+ SET_8723B_H2CCMD_MSRRPT_PARM_MACID_IND(u1H2CMediaStatusRptParm, 0);
+ SET_8723B_H2CCMD_MSRRPT_PARM_MACID(u1H2CMediaStatusRptParm, macid);
+ SET_8723B_H2CCMD_MSRRPT_PARM_MACID_END(u1H2CMediaStatusRptParm, macid_end);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CMediaStatusRptParm:", u1H2CMediaStatusRptParm, H2C_MEDIA_STATUS_RPT_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_MEDIA_STATUS_RPT, H2C_MEDIA_STATUS_RPT_LEN, u1H2CMediaStatusRptParm);
+}
+
+#ifdef CONFIG_WOWLAN
+static void rtl8723b_set_FwKeepAlive_cmd(struct adapter *padapter, u8 benable, u8 pkt_type)
+{
+ u8 u1H2CKeepAliveParm[H2C_KEEP_ALIVE_CTRL_LEN] = {0};
+ u8 adopt = 1, check_period = 5;
+
+ DBG_871X("%s(): benable = %d\n", __func__, benable);
+ SET_8723B_H2CCMD_KEEPALIVE_PARM_ENABLE(u1H2CKeepAliveParm, benable);
+ SET_8723B_H2CCMD_KEEPALIVE_PARM_ADOPT(u1H2CKeepAliveParm, adopt);
+ SET_8723B_H2CCMD_KEEPALIVE_PARM_PKT_TYPE(u1H2CKeepAliveParm, pkt_type);
+ SET_8723B_H2CCMD_KEEPALIVE_PARM_CHECK_PERIOD(u1H2CKeepAliveParm, check_period);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CKeepAliveParm:", u1H2CKeepAliveParm, H2C_KEEP_ALIVE_CTRL_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_KEEP_ALIVE, H2C_KEEP_ALIVE_CTRL_LEN, u1H2CKeepAliveParm);
+}
+
+static void rtl8723b_set_FwDisconDecision_cmd(struct adapter *padapter, u8 benable)
+{
+ u8 u1H2CDisconDecisionParm[H2C_DISCON_DECISION_LEN] = {0};
+ u8 adopt = 1, check_period = 10, trypkt_num = 0;
+
+ DBG_871X("%s(): benable = %d\n", __func__, benable);
+ SET_8723B_H2CCMD_DISCONDECISION_PARM_ENABLE(u1H2CDisconDecisionParm, benable);
+ SET_8723B_H2CCMD_DISCONDECISION_PARM_ADOPT(u1H2CDisconDecisionParm, adopt);
+ SET_8723B_H2CCMD_DISCONDECISION_PARM_CHECK_PERIOD(u1H2CDisconDecisionParm, check_period);
+ SET_8723B_H2CCMD_DISCONDECISION_PARM_TRY_PKT_NUM(u1H2CDisconDecisionParm, trypkt_num);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CDisconDecisionParm:", u1H2CDisconDecisionParm, H2C_DISCON_DECISION_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_DISCON_DECISION, H2C_DISCON_DECISION_LEN, u1H2CDisconDecisionParm);
+}
+#endif /* CONFIG_WOWLAN */
+
+void rtl8723b_set_FwMacIdConfig_cmd(struct adapter *padapter, u8 mac_id, u8 raid, u8 bw, u8 sgi, u32 mask)
+{
+ u8 u1H2CMacIdConfigParm[H2C_MACID_CFG_LEN] = {0};
+
+ DBG_871X("%s(): mac_id =%d raid = 0x%x bw =%d mask = 0x%x\n", __func__, mac_id, raid, bw, mask);
+
+ SET_8723B_H2CCMD_MACID_CFG_MACID(u1H2CMacIdConfigParm, mac_id);
+ SET_8723B_H2CCMD_MACID_CFG_RAID(u1H2CMacIdConfigParm, raid);
+ SET_8723B_H2CCMD_MACID_CFG_SGI_EN(u1H2CMacIdConfigParm, sgi ? 1 : 0);
+ SET_8723B_H2CCMD_MACID_CFG_BW(u1H2CMacIdConfigParm, bw);
+ SET_8723B_H2CCMD_MACID_CFG_RATE_MASK0(u1H2CMacIdConfigParm, (u8)(mask & 0x000000ff));
+ SET_8723B_H2CCMD_MACID_CFG_RATE_MASK1(u1H2CMacIdConfigParm, (u8)((mask & 0x0000ff00) >> 8));
+ SET_8723B_H2CCMD_MACID_CFG_RATE_MASK2(u1H2CMacIdConfigParm, (u8)((mask & 0x00ff0000) >> 16));
+ SET_8723B_H2CCMD_MACID_CFG_RATE_MASK3(u1H2CMacIdConfigParm, (u8)((mask & 0xff000000) >> 24));
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CMacIdConfigParm:", u1H2CMacIdConfigParm, H2C_MACID_CFG_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_MACID_CFG, H2C_MACID_CFG_LEN, u1H2CMacIdConfigParm);
+}
+
+static void rtl8723b_set_FwRssiSetting_cmd(struct adapter *padapter, u8 *param)
+{
+ u8 u1H2CRssiSettingParm[H2C_RSSI_SETTING_LEN] = {0};
+ u8 mac_id = *param;
+ u8 rssi = *(param+2);
+ u8 uldl_state = 0;
+
+ /* DBG_871X("%s(): param =%.2x-%.2x-%.2x\n", __func__, *param, *(param+1), *(param+2)); */
+ /* DBG_871X("%s(): mac_id =%d rssi =%d\n", __func__, mac_id, rssi); */
+
+ SET_8723B_H2CCMD_RSSI_SETTING_MACID(u1H2CRssiSettingParm, mac_id);
+ SET_8723B_H2CCMD_RSSI_SETTING_RSSI(u1H2CRssiSettingParm, rssi);
+ SET_8723B_H2CCMD_RSSI_SETTING_ULDL_STATE(u1H2CRssiSettingParm, uldl_state);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_notice_, "u1H2CRssiSettingParm:", u1H2CRssiSettingParm, H2C_RSSI_SETTING_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_RSSI_SETTING, H2C_RSSI_SETTING_LEN, u1H2CRssiSettingParm);
+}
+
+void rtl8723b_set_FwPwrMode_cmd(struct adapter *padapter, u8 psmode)
+{
+ int i;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ u8 u1H2CPwrModeParm[H2C_PWRMODE_LEN] = {0};
+ u8 PowerState = 0, awake_intvl = 1, byte5 = 0, rlbm = 0;
+
+ if (pwrpriv->dtim > 0)
+ DBG_871X("%s(): FW LPS mode = %d, SmartPS =%d, dtim =%d\n", __func__, psmode, pwrpriv->smart_ps, pwrpriv->dtim);
+ else
+ DBG_871X("%s(): FW LPS mode = %d, SmartPS =%d\n", __func__, psmode, pwrpriv->smart_ps);
+
+#ifdef CONFIG_WOWLAN
+ if (psmode == PS_MODE_DTIM) { /* For WOWLAN LPS, DTIM = (awake_intvl - 1) */
+ awake_intvl = 3;/* DTIM =2 */
+ rlbm = 2;
+ } else
+#endif /* CONFIG_WOWLAN */
+ {
+ if (pwrpriv->dtim > 0 && pwrpriv->dtim < 16)
+ awake_intvl = pwrpriv->dtim+1;/* DTIM = (awake_intvl - 1) */
+ else
+ awake_intvl = 3;/* DTIM =2 */
+
+ rlbm = 2;
+ }
+
+
+ if (padapter->registrypriv.wifi_spec == 1) {
+ awake_intvl = 2;
+ rlbm = 2;
+ }
+
+ if (psmode > 0) {
+ if (rtw_btcoex_IsBtControlLps(padapter) == true) {
+ PowerState = rtw_btcoex_RpwmVal(padapter);
+ byte5 = rtw_btcoex_LpsVal(padapter);
+
+ if ((rlbm == 2) && (byte5 & BIT(4))) {
+ /* Keep awake interval to 1 to prevent from */
+ /* decreasing coex performance */
+ awake_intvl = 2;
+ rlbm = 2;
+ }
+ } else {
+ PowerState = 0x00;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+ byte5 = 0x40;
+ }
+ } else {
+ PowerState = 0x0C;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+ byte5 = 0x40;
+ }
+
+ SET_8723B_H2CCMD_PWRMODE_PARM_MODE(u1H2CPwrModeParm, (psmode > 0) ? 1 : 0);
+ SET_8723B_H2CCMD_PWRMODE_PARM_SMART_PS(u1H2CPwrModeParm, pwrpriv->smart_ps);
+ SET_8723B_H2CCMD_PWRMODE_PARM_RLBM(u1H2CPwrModeParm, rlbm);
+ SET_8723B_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1H2CPwrModeParm, awake_intvl);
+ SET_8723B_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1H2CPwrModeParm, padapter->registrypriv.uapsd_enable);
+ SET_8723B_H2CCMD_PWRMODE_PARM_PWR_STATE(u1H2CPwrModeParm, PowerState);
+ SET_8723B_H2CCMD_PWRMODE_PARM_BYTE5(u1H2CPwrModeParm, byte5);
+ if (psmode != PS_MODE_ACTIVE) {
+ if (pmlmeext->adaptive_tsf_done == false && pmlmeext->bcn_cnt > 0) {
+ u8 ratio_20_delay, ratio_80_delay;
+
+ /* byte 6 for adaptive_early_32k */
+ /* 0:3] = DrvBcnEarly (ms) , [4:7] = DrvBcnTimeOut (ms) */
+ /* 20% for DrvBcnEarly, 80% for DrvBcnTimeOut */
+ ratio_20_delay = 0;
+ ratio_80_delay = 0;
+ pmlmeext->DrvBcnEarly = 0xff;
+ pmlmeext->DrvBcnTimeOut = 0xff;
+
+ DBG_871X("%s(): bcn_cnt = %d\n", __func__, pmlmeext->bcn_cnt);
+
+ for (i = 0; i < 9; i++) {
+ pmlmeext->bcn_delay_ratio[i] = (pmlmeext->bcn_delay_cnt[i]*100)/pmlmeext->bcn_cnt;
+
+ DBG_871X(
+ "%s(): bcn_delay_cnt[%d]=%d, bcn_delay_ratio[%d] = %d\n",
+ __func__,
+ i,
+ pmlmeext->bcn_delay_cnt[i],
+ i,
+ pmlmeext->bcn_delay_ratio[i]
+ );
+
+ ratio_20_delay += pmlmeext->bcn_delay_ratio[i];
+ ratio_80_delay += pmlmeext->bcn_delay_ratio[i];
+
+ if (ratio_20_delay > 20 && pmlmeext->DrvBcnEarly == 0xff) {
+ pmlmeext->DrvBcnEarly = i;
+ DBG_871X("%s(): DrvBcnEarly = %d\n", __func__, pmlmeext->DrvBcnEarly);
+ }
+
+ if (ratio_80_delay > 80 && pmlmeext->DrvBcnTimeOut == 0xff) {
+ pmlmeext->DrvBcnTimeOut = i;
+ DBG_871X("%s(): DrvBcnTimeOut = %d\n", __func__, pmlmeext->DrvBcnTimeOut);
+ }
+
+ /* reset adaptive_early_32k cnt */
+ pmlmeext->bcn_delay_cnt[i] = 0;
+ pmlmeext->bcn_delay_ratio[i] = 0;
+
+ }
+
+ pmlmeext->bcn_cnt = 0;
+ pmlmeext->adaptive_tsf_done = true;
+
+ } else {
+ DBG_871X("%s(): DrvBcnEarly = %d\n", __func__, pmlmeext->DrvBcnEarly);
+ DBG_871X("%s(): DrvBcnTimeOut = %d\n", __func__, pmlmeext->DrvBcnTimeOut);
+ }
+
+/* offload to FW if fw version > v15.10
+ pmlmeext->DrvBcnEarly = 0;
+ pmlmeext->DrvBcnTimeOut =7;
+
+ if ((pmlmeext->DrvBcnEarly!= 0Xff) && (pmlmeext->DrvBcnTimeOut!= 0xff))
+ u1H2CPwrModeParm[H2C_PWRMODE_LEN-1] = BIT(0) | ((pmlmeext->DrvBcnEarly<<1)&0x0E) |((pmlmeext->DrvBcnTimeOut<<4)&0xf0) ;
+*/
+
+ }
+
+ rtw_btcoex_RecordPwrMode(padapter, u1H2CPwrModeParm, H2C_PWRMODE_LEN);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CPwrModeParm:", u1H2CPwrModeParm, H2C_PWRMODE_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_SET_PWR_MODE, H2C_PWRMODE_LEN, u1H2CPwrModeParm);
+}
+
+void rtl8723b_set_FwPsTuneParam_cmd(struct adapter *padapter)
+{
+ u8 u1H2CPsTuneParm[H2C_PSTUNEPARAM_LEN] = {0};
+ u8 bcn_to_limit = 10; /* 10 * 100 * awakeinterval (ms) */
+ u8 dtim_timeout = 5; /* ms wait broadcast data timer */
+ u8 ps_timeout = 20; /* ms Keep awake when tx */
+ u8 dtim_period = 3;
+
+ /* DBG_871X("%s(): FW LPS mode = %d\n", __func__, psmode); */
+
+ SET_8723B_H2CCMD_PSTUNE_PARM_BCN_TO_LIMIT(u1H2CPsTuneParm, bcn_to_limit);
+ SET_8723B_H2CCMD_PSTUNE_PARM_DTIM_TIMEOUT(u1H2CPsTuneParm, dtim_timeout);
+ SET_8723B_H2CCMD_PSTUNE_PARM_PS_TIMEOUT(u1H2CPsTuneParm, ps_timeout);
+ SET_8723B_H2CCMD_PSTUNE_PARM_ADOPT(u1H2CPsTuneParm, 1);
+ SET_8723B_H2CCMD_PSTUNE_PARM_DTIM_PERIOD(u1H2CPsTuneParm, dtim_period);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CPsTuneParm:", u1H2CPsTuneParm, H2C_PSTUNEPARAM_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_PS_TUNING_PARA, H2C_PSTUNEPARAM_LEN, u1H2CPsTuneParm);
+}
+
+void rtl8723b_set_FwPwrModeInIPS_cmd(struct adapter *padapter, u8 cmd_param)
+{
+ /* BIT0:enable, BIT1:NoConnect32k */
+
+ DBG_871X("%s()\n", __func__);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_FWLPS_IN_IPS_, 1, &cmd_param);
+}
+
+#ifdef CONFIG_WOWLAN
+static void rtl8723b_set_FwWoWlanCtrl_Cmd(struct adapter *padapter, u8 bFuncEn)
+{
+ struct security_priv *psecpriv = &padapter->securitypriv;
+ u8 u1H2CWoWlanCtrlParm[H2C_WOWLAN_LEN] = {0};
+ u8 discont_wake = 1, gpionum = 0, gpio_dur = 0, hw_unicast = 0;
+ u8 sdio_wakeup_enable = 1;
+ u8 gpio_high_active = 0; /* 0: low active, 1: high active */
+ u8 magic_pkt = 0;
+
+#ifdef CONFIG_GPIO_WAKEUP
+ gpionum = WAKEUP_GPIO_IDX;
+ sdio_wakeup_enable = 0;
+#endif
+
+#ifdef CONFIG_PNO_SUPPORT
+ if (!ppwrpriv->wowlan_pno_enable)
+ magic_pkt = 1;
+#endif
+
+ if (psecpriv->dot11PrivacyAlgrthm == _WEP40_ || psecpriv->dot11PrivacyAlgrthm == _WEP104_)
+ hw_unicast = 1;
+
+ DBG_871X("%s(): bFuncEn =%d\n", __func__, bFuncEn);
+
+ SET_H2CCMD_WOWLAN_FUNC_ENABLE(u1H2CWoWlanCtrlParm, bFuncEn);
+ SET_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(u1H2CWoWlanCtrlParm, 0);
+ SET_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(u1H2CWoWlanCtrlParm, magic_pkt);
+ SET_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(u1H2CWoWlanCtrlParm, hw_unicast);
+ SET_H2CCMD_WOWLAN_ALL_PKT_DROP(u1H2CWoWlanCtrlParm, 0);
+ SET_H2CCMD_WOWLAN_GPIO_ACTIVE(u1H2CWoWlanCtrlParm, gpio_high_active);
+ SET_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(u1H2CWoWlanCtrlParm, discont_wake);
+ SET_H2CCMD_WOWLAN_GPIONUM(u1H2CWoWlanCtrlParm, gpionum);
+ SET_H2CCMD_WOWLAN_DATAPIN_WAKE_UP(u1H2CWoWlanCtrlParm, sdio_wakeup_enable);
+ SET_H2CCMD_WOWLAN_GPIO_DURATION(u1H2CWoWlanCtrlParm, gpio_dur);
+ /* SET_H2CCMD_WOWLAN_GPIO_PULSE_EN(u1H2CWoWlanCtrlParm, 1); */
+ SET_H2CCMD_WOWLAN_GPIO_PULSE_COUNT(u1H2CWoWlanCtrlParm, 0x09);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CWoWlanCtrlParm:", u1H2CWoWlanCtrlParm, H2C_WOWLAN_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_WOWLAN, H2C_WOWLAN_LEN, u1H2CWoWlanCtrlParm);
+}
+
+static void rtl8723b_set_FwRemoteWakeCtrl_Cmd(struct adapter *padapter, u8 benable)
+{
+ u8 u1H2CRemoteWakeCtrlParm[H2C_REMOTE_WAKE_CTRL_LEN] = {0};
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct pwrctrl_priv *ppwrpriv = adapter_to_pwrctl(padapter);
+
+ DBG_871X("%s(): Enable =%d\n", __func__, benable);
+
+ if (!ppwrpriv->wowlan_pno_enable) {
+ SET_H2CCMD_REMOTE_WAKECTRL_ENABLE(u1H2CRemoteWakeCtrlParm, benable);
+ SET_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(u1H2CRemoteWakeCtrlParm, 1);
+#ifdef CONFIG_GTK_OL
+ if (psecuritypriv->binstallKCK_KEK &&
+ psecuritypriv->dot11PrivacyAlgrthm == _AES_) {
+ SET_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(u1H2CRemoteWakeCtrlParm, 1);
+ } else {
+ DBG_871X("no kck or security is not AES\n");
+ SET_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(u1H2CRemoteWakeCtrlParm, 0);
+ }
+#endif /* CONFIG_GTK_OL */
+
+ SET_H2CCMD_REMOTE_WAKE_CTRL_FW_UNICAST_EN(u1H2CRemoteWakeCtrlParm, 1);
+
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _AES_) ||
+ (psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_))
+ SET_H2CCMD_REMOTE_WAKE_CTRL_ARP_ACTION(u1H2CRemoteWakeCtrlParm, 0);
+ else
+ SET_H2CCMD_REMOTE_WAKE_CTRL_ARP_ACTION(u1H2CRemoteWakeCtrlParm, 1);
+ }
+#ifdef CONFIG_PNO_SUPPORT
+ else {
+ SET_H2CCMD_REMOTE_WAKECTRL_ENABLE(u1H2CRemoteWakeCtrlParm, benable);
+ SET_H2CCMD_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(u1H2CRemoteWakeCtrlParm, benable);
+ }
+#endif
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CRemoteWakeCtrlParm:", u1H2CRemoteWakeCtrlParm, H2C_REMOTE_WAKE_CTRL_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_REMOTE_WAKE_CTRL,
+ H2C_REMOTE_WAKE_CTRL_LEN, u1H2CRemoteWakeCtrlParm);
+#ifdef CONFIG_PNO_SUPPORT
+ if (ppwrpriv->wowlan_pno_enable && ppwrpriv->pno_in_resume == false) {
+ res = rtw_read8(padapter, REG_PNO_STATUS);
+ DBG_871X("cmd: 0x81 REG_PNO_STATUS: 0x%02x\n", res);
+ while (!(res&BIT(7)) && count < 25) {
+ DBG_871X("[%d] cmd: 0x81 REG_PNO_STATUS: 0x%02x\n", count, res);
+ res = rtw_read8(padapter, REG_PNO_STATUS);
+ count++;
+ msleep(2);
+ }
+ DBG_871X("cmd: 0x81 REG_PNO_STATUS: 0x%02x\n", res);
+ }
+#endif /* CONFIG_PNO_SUPPORT */
+}
+
+static void rtl8723b_set_FwAOACGlobalInfo_Cmd(struct adapter *padapter, u8 group_alg, u8 pairwise_alg)
+{
+ u8 u1H2CAOACGlobalInfoParm[H2C_AOAC_GLOBAL_INFO_LEN] = {0};
+
+ DBG_871X("%s(): group_alg =%d pairwise_alg =%d\n", __func__, group_alg, pairwise_alg);
+
+ SET_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(u1H2CAOACGlobalInfoParm, pairwise_alg);
+ SET_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(u1H2CAOACGlobalInfoParm, group_alg);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CAOACGlobalInfoParm:", u1H2CAOACGlobalInfoParm, H2C_AOAC_GLOBAL_INFO_LEN);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_AOAC_GLOBAL_INFO, H2C_AOAC_GLOBAL_INFO_LEN, u1H2CAOACGlobalInfoParm);
+}
+
+#ifdef CONFIG_PNO_SUPPORT
+static void rtl8723b_set_FwScanOffloadInfo_cmd(struct adapter *padapter, PRSVDPAGE_LOC rsvdpageloc, u8 enable)
+{
+ u8 u1H2CScanOffloadInfoParm[H2C_SCAN_OFFLOAD_CTRL_LEN] = {0};
+ u8 res = 0, count = 0;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+
+ DBG_871X("%s: loc_probe_packet:%d, loc_scan_info: %d loc_ssid_info:%d\n",
+ __func__, rsvdpageloc->LocProbePacket, rsvdpageloc->LocScanInfo, rsvdpageloc->LocSSIDInfo);
+
+ SET_H2CCMD_AOAC_NLO_FUN_EN(u1H2CScanOffloadInfoParm, enable);
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_SCAN_INFO(u1H2CScanOffloadInfoParm, rsvdpageloc->LocScanInfo);
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_PROBE_PACKET(u1H2CScanOffloadInfoParm, rsvdpageloc->LocProbePacket);
+ SET_H2CCMD_AOAC_RSVDPAGE_LOC_SSID_INFO(u1H2CScanOffloadInfoParm, rsvdpageloc->LocSSIDInfo);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_always_, "u1H2CScanOffloadInfoParm:", u1H2CScanOffloadInfoParm, H2C_SCAN_OFFLOAD_CTRL_LEN);
+ FillH2CCmd8723B(padapter, H2C_8723B_D0_SCAN_OFFLOAD_INFO, H2C_SCAN_OFFLOAD_CTRL_LEN, u1H2CScanOffloadInfoParm);
+
+ msleep(20);
+}
+#endif /* CONFIG_PNO_SUPPORT */
+
+static void rtl8723b_set_FwWoWlanRelated_cmd(struct adapter *padapter, u8 enable)
+{
+ struct security_priv *psecpriv = &padapter->securitypriv;
+ struct pwrctrl_priv *ppwrpriv = adapter_to_pwrctl(padapter);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_info *psta = NULL;
+ u8 pkt_type = 0;
+
+ DBG_871X_LEVEL(_drv_always_, "+%s()+: enable =%d\n", __func__, enable);
+ if (enable) {
+ rtl8723b_set_FwAOACGlobalInfo_Cmd(padapter, psecpriv->dot118021XGrpPrivacy, psecpriv->dot11PrivacyAlgrthm);
+
+ rtl8723b_set_FwJoinBssRpt_cmd(padapter, RT_MEDIA_CONNECT); /* RT_MEDIA_CONNECT will confuse in the future */
+
+ if (!(ppwrpriv->wowlan_pno_enable)) {
+ psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(pmlmepriv));
+ if (psta != NULL)
+ rtl8723b_set_FwMediaStatusRpt_cmd(padapter, RT_MEDIA_CONNECT, psta->mac_id);
+ } else
+ DBG_871X("%s(): Disconnected, no FwMediaStatusRpt CONNECT\n", __func__);
+
+ msleep(2);
+
+ if (!(ppwrpriv->wowlan_pno_enable)) {
+ rtl8723b_set_FwDisconDecision_cmd(padapter, enable);
+ msleep(2);
+
+ if ((psecpriv->dot11PrivacyAlgrthm != _WEP40_) || (psecpriv->dot11PrivacyAlgrthm != _WEP104_))
+ pkt_type = 1;
+
+ rtl8723b_set_FwKeepAlive_cmd(padapter, enable, pkt_type);
+ msleep(2);
+ }
+
+ rtl8723b_set_FwWoWlanCtrl_Cmd(padapter, enable);
+ msleep(2);
+
+ rtl8723b_set_FwRemoteWakeCtrl_Cmd(padapter, enable);
+ } else {
+ rtl8723b_set_FwRemoteWakeCtrl_Cmd(padapter, enable);
+ msleep(2);
+ rtl8723b_set_FwWoWlanCtrl_Cmd(padapter, enable);
+ }
+
+ DBG_871X_LEVEL(_drv_always_, "-%s()-\n", __func__);
+}
+
+void rtl8723b_set_wowlan_cmd(struct adapter *padapter, u8 enable)
+{
+ rtl8723b_set_FwWoWlanRelated_cmd(padapter, enable);
+}
+#endif /* CONFIG_WOWLAN */
+
+#ifdef CONFIG_AP_WOWLAN
+static void rtl8723b_set_FwAPWoWlanCtrl_Cmd(struct adapter *padapter, u8 bFuncEn)
+{
+ u8 u1H2CAPWoWlanCtrlParm[H2C_WOWLAN_LEN] = {0};
+ u8 gpionum = 0, gpio_dur = 0;
+ u8 gpio_high_active = 1; /* 0: low active, 1: high active */
+ u8 gpio_pulse = bFuncEn;
+#ifdef CONFIG_GPIO_WAKEUP
+ gpionum = WAKEUP_GPIO_IDX;
+#endif
+
+ DBG_871X("%s(): bFuncEn =%d\n", __func__, bFuncEn);
+
+ if (bFuncEn)
+ gpio_dur = 16;
+ else
+ gpio_dur = 0;
+
+ SET_H2CCMD_AP_WOW_GPIO_CTRL_INDEX(u1H2CAPWoWlanCtrlParm,
+ gpionum);
+ SET_H2CCMD_AP_WOW_GPIO_CTRL_PLUS(u1H2CAPWoWlanCtrlParm,
+ gpio_pulse);
+ SET_H2CCMD_AP_WOW_GPIO_CTRL_HIGH_ACTIVE(u1H2CAPWoWlanCtrlParm,
+ gpio_high_active);
+ SET_H2CCMD_AP_WOW_GPIO_CTRL_EN(u1H2CAPWoWlanCtrlParm,
+ bFuncEn);
+ SET_H2CCMD_AP_WOW_GPIO_CTRL_DURATION(u1H2CAPWoWlanCtrlParm,
+ gpio_dur);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_AP_WOW_GPIO_CTRL,
+ H2C_AP_WOW_GPIO_CTRL_LEN, u1H2CAPWoWlanCtrlParm);
+}
+
+static void rtl8723b_set_Fw_AP_Offload_Cmd(struct adapter *padapter, u8 bFuncEn)
+{
+ u8 u1H2CAPOffloadCtrlParm[H2C_WOWLAN_LEN] = {0};
+
+ DBG_871X("%s(): bFuncEn =%d\n", __func__, bFuncEn);
+
+ SET_H2CCMD_AP_WOWLAN_EN(u1H2CAPOffloadCtrlParm, bFuncEn);
+
+ FillH2CCmd8723B(padapter, H2C_8723B_AP_OFFLOAD,
+ H2C_AP_OFFLOAD_LEN, u1H2CAPOffloadCtrlParm);
+}
+
+static void rtl8723b_set_AP_FwWoWlan_cmd(struct adapter *padapter, u8 enable)
+{
+ DBG_871X_LEVEL(_drv_always_, "+%s()+: enable =%d\n", __func__, enable);
+ if (enable) {
+ rtl8723b_set_FwJoinBssRpt_cmd(padapter, RT_MEDIA_CONNECT);
+ issue_beacon(padapter, 0);
+ }
+
+ rtl8723b_set_FwAPWoWlanCtrl_Cmd(padapter, enable);
+ msleep(10);
+ rtl8723b_set_Fw_AP_Offload_Cmd(padapter, enable);
+ msleep(10);
+ DBG_871X_LEVEL(_drv_always_, "-%s()-\n", __func__);
+ return ;
+}
+
+void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable)
+{
+ rtl8723b_set_AP_FwWoWlan_cmd(padapter, enable);
+}
+#endif /* CONFIG_AP_WOWLAN */
+
+/* */
+/* Description: Fill the reserved packets that FW will use to RSVD page. */
+/* Now we just send 4 types packet to rsvd page. */
+/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
+/* Input: */
+/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
+/* so we need to set the packet length to total lengh. */
+/* true: At the second time, we should send the first packet (default:beacon) */
+/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* 2009.10.15 by tynli. */
+static void rtl8723b_set_FwRsvdPagePkt(
+ struct adapter *padapter, bool bDLFinished
+)
+{
+ struct hal_com_data *pHalData;
+ struct xmit_frame *pcmdframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ struct pwrctrl_priv *pwrctl;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u32 BeaconLength = 0, PSPollLength = 0;
+ u32 NullDataLength = 0, QosNullLength = 0, BTQosNullLength = 0;
+ u8 *ReservedPagePacket;
+ u8 TxDescLen = TXDESC_SIZE, TxDescOffset = TXDESC_OFFSET;
+ u8 TotalPageNum = 0, CurtPktPageNum = 0, RsvdPageNum = 0;
+ u16 BufIndex, PageSize = 128;
+ u32 TotalPacketLen, MaxRsvdPageBufSize = 0;
+ RSVDPAGE_LOC RsvdPageLoc;
+#ifdef CONFIG_WOWLAN
+ u32 ARPLegnth = 0, GTKLegnth = 0;
+ u8 currentip[4];
+ u8 cur_dot11txpn[8];
+#ifdef CONFIG_GTK_OL
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta;
+ u8 kek[RTW_KEK_LEN];
+ u8 kck[RTW_KCK_LEN];
+#endif
+#endif
+
+ /* DBG_871X("%s---->\n", __func__); */
+
+ pHalData = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+ pwrctl = adapter_to_pwrctl(padapter);
+
+ RsvdPageNum = BCNQ_PAGE_NUM_8723B + WOWLAN_PAGE_NUM_8723B;
+ MaxRsvdPageBufSize = RsvdPageNum*PageSize;
+
+ pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
+ if (pcmdframe == NULL) {
+ DBG_871X("%s: alloc ReservedPagePacket fail!\n", __func__);
+ return;
+ }
+
+ ReservedPagePacket = pcmdframe->buf_addr;
+ memset(&RsvdPageLoc, 0, sizeof(RSVDPAGE_LOC));
+
+ /* 3 (1) beacon */
+ BufIndex = TxDescOffset;
+ ConstructBeacon(padapter, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* If we don't add 1 more page, the WOWLAN function has a problem. Baron thinks it's a bug of firmware */
+ if (CurtPktPageNum == 1)
+ CurtPktPageNum += 1;
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3 (2) ps-poll */
+ RsvdPageLoc.LocPsPoll = TotalPageNum;
+ ConstructPSPoll(padapter, &ReservedPagePacket[BufIndex], &PSPollLength);
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false, false);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: PS-POLL %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (PSPollLength+TxDescLen)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + PSPollLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3 (3) null data */
+ RsvdPageLoc.LocNullData = TotalPageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &NullDataLength,
+ get_my_bssid(&pmlmeinfo->network),
+ false, 0, 0, false
+ );
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false, false);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: NULL DATA %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (NullDataLength+TxDescLen)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + NullDataLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3 (5) Qos null data */
+ RsvdPageLoc.LocQosNull = TotalPageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &QosNullLength,
+ get_my_bssid(&pmlmeinfo->network),
+ true, 0, 0, false
+ );
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false, false);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: QOS NULL DATA %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (QosNullLength+TxDescLen)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + QosNullLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3 (6) BT Qos null data */
+ RsvdPageLoc.LocBTQosNull = TotalPageNum;
+ ConstructNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &BTQosNullLength,
+ get_my_bssid(&pmlmeinfo->network),
+ true, 0, 0, false
+ );
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true, false);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: BT QOS NULL DATA %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (BTQosNullLength+TxDescLen)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + BTQosNullLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+#ifdef CONFIG_WOWLAN
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ /* if (pwrctl->wowlan_mode == true) { */
+ /* BufIndex += (CurtPktPageNum*PageSize); */
+
+ /* 3(7) ARP RSP */
+ rtw_get_current_ip_address(padapter, currentip);
+ RsvdPageLoc.LocArpRsp = TotalPageNum;
+ {
+ ConstructARPResponse(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &ARPLegnth,
+ currentip
+ );
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], ARPLegnth, false, false, true);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: ARP RSP %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (ARPLegnth+TxDescLen)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + ARPLegnth);
+ }
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3(8) SEC IV */
+ rtw_get_sec_iv(padapter, cur_dot11txpn, get_my_bssid(&pmlmeinfo->network));
+ RsvdPageLoc.LocRemoteCtrlInfo = TotalPageNum;
+ memcpy(ReservedPagePacket+BufIndex-TxDescLen, cur_dot11txpn, _AES_IV_LEN_);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: SEC IV %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], _AES_IV_LEN_); */
+
+ CurtPktPageNum = (u8)PageNum_128(_AES_IV_LEN_);
+
+ TotalPageNum += CurtPktPageNum;
+
+#ifdef CONFIG_GTK_OL
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* if the ap staion info. exists, get the kek, kck from staion info. */
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ memset(kek, 0, RTW_KEK_LEN);
+ memset(kck, 0, RTW_KCK_LEN);
+ DBG_8192C("%s, KEK, KCK download rsvd page all zero\n", __func__);
+ } else {
+ memcpy(kek, psta->kek, RTW_KEK_LEN);
+ memcpy(kck, psta->kck, RTW_KCK_LEN);
+ }
+
+ /* 3(9) KEK, KCK */
+ RsvdPageLoc.LocGTKInfo = TotalPageNum;
+ memcpy(ReservedPagePacket+BufIndex-TxDescLen, kck, RTW_KCK_LEN);
+ memcpy(ReservedPagePacket+BufIndex-TxDescLen+RTW_KCK_LEN, kek, RTW_KEK_LEN);
+
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: KEK KCK %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (TxDescLen + RTW_KCK_LEN + RTW_KEK_LEN)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + RTW_KCK_LEN + RTW_KEK_LEN);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 3(10) GTK Response */
+ RsvdPageLoc.LocGTKRsp = TotalPageNum;
+ ConstructGTKResponse(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &GTKLegnth
+ );
+
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], GTKLegnth, false, false, true);
+ /* DBG_871X("%s(): HW_VAR_SET_TX_CMD: GTK RSP %p %d\n", */
+ /* __func__, &ReservedPagePacket[BufIndex-TxDescLen], (TxDescLen + GTKLegnth)); */
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + GTKLegnth);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* below page is empty for GTK extension memory */
+ /* 3(11) GTK EXT MEM */
+ RsvdPageLoc.LocGTKEXTMEM = TotalPageNum;
+
+ CurtPktPageNum = 2;
+
+ TotalPageNum += CurtPktPageNum;
+
+ TotalPacketLen = BufIndex-TxDescLen + 256; /* extension memory for FW */
+#else
+ TotalPacketLen = BufIndex-TxDescLen + sizeof (union pn48); /* IV len */
+#endif /* CONFIG_GTK_OL */
+ } else
+#endif /* CONFIG_WOWLAN */
+ {
+#ifdef CONFIG_PNO_SUPPORT
+ if (pwrctl->pno_in_resume == false && pwrctl->pno_inited == true) {
+ /* Probe Request */
+ RsvdPageLoc.LocProbePacket = TotalPageNum;
+ ConstructProbeReq(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &ProbeReqLength);
+
+ rtl8723b_fill_fake_txdesc(padapter,
+ &ReservedPagePacket[BufIndex-TxDescLen],
+ ProbeReqLength, false, false, false);
+#ifdef CONFIG_PNO_SET_DEBUG
+ {
+ int gj;
+ printk("probe req pkt =>\n");
+ for (gj = 0; gj < ProbeReqLength+TxDescLen; gj++) {
+ printk(" %02x ", ReservedPagePacket[BufIndex-TxDescLen+gj]);
+ if ((gj+1)%8 == 0)
+ printk("\n");
+ }
+ printk(" <=end\n");
+ }
+#endif
+ CurtPktPageNum =
+ (u8)PageNum_128(TxDescLen + ProbeReqLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* PNO INFO Page */
+ RsvdPageLoc.LocPNOInfo = TotalPageNum;
+ ConstructPnoInfo(padapter, &ReservedPagePacket[BufIndex-TxDescLen], &PNOLength);
+#ifdef CONFIG_PNO_SET_DEBUG
+ {
+ int gj;
+ printk("PNO pkt =>\n");
+ for (gj = 0; gj < PNOLength; gj++) {
+ printk(" %02x ", ReservedPagePacket[BufIndex-TxDescLen+gj]);
+ if ((gj + 1)%8 == 0)
+ printk("\n");
+ }
+ printk(" <=end\n");
+ }
+#endif
+
+ CurtPktPageNum = (u8)PageNum_128(PNOLength);
+ TotalPageNum += CurtPktPageNum;
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* SSID List Page */
+ RsvdPageLoc.LocSSIDInfo = TotalPageNum;
+ ConstructSSIDList(padapter, &ReservedPagePacket[BufIndex-TxDescLen], &SSIDLegnth);
+#ifdef CONFIG_PNO_SET_DEBUG
+ {
+ int gj;
+ printk("SSID list pkt =>\n");
+ for (gj = 0; gj < SSIDLegnth; gj++) {
+ printk(" %02x ", ReservedPagePacket[BufIndex-TxDescLen+gj]);
+ if ((gj + 1)%8 == 0)
+ printk("\n");
+ }
+ printk(" <=end\n");
+ }
+#endif
+ CurtPktPageNum = (u8)PageNum_128(SSIDLegnth);
+ TotalPageNum += CurtPktPageNum;
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* Scan Info Page */
+ RsvdPageLoc.LocScanInfo = TotalPageNum;
+ ConstructScanInfo(padapter, &ReservedPagePacket[BufIndex-TxDescLen], &ScanInfoLength);
+#ifdef CONFIG_PNO_SET_DEBUG
+ {
+ int gj;
+ printk("Scan info pkt =>\n");
+ for (gj = 0; gj < ScanInfoLength; gj++) {
+ printk(" %02x ", ReservedPagePacket[BufIndex-TxDescLen+gj]);
+ if ((gj + 1)%8 == 0)
+ printk("\n");
+ }
+ printk(" <=end\n");
+ }
+#endif
+ CurtPktPageNum = (u8)PageNum_128(ScanInfoLength);
+ TotalPageNum += CurtPktPageNum;
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ TotalPacketLen = BufIndex + ScanInfoLength;
+ } else {
+ TotalPacketLen = BufIndex + BTQosNullLength;
+ }
+#else /* CONFIG_PNO_SUPPORT */
+ TotalPacketLen = BufIndex + BTQosNullLength;
+#endif
+ }
+
+ if (TotalPacketLen > MaxRsvdPageBufSize) {
+ DBG_871X("%s(): ERROR: The rsvd page size is not enough!!TotalPacketLen %d, MaxRsvdPageBufSize %d\n", __func__,
+ TotalPacketLen, MaxRsvdPageBufSize);
+ goto error;
+ } else {
+ /* update attribute */
+ pattrib = &pcmdframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TxDescOffset;
+ dump_mgntframe_and_wait(padapter, pcmdframe, 100);
+ }
+
+ DBG_871X("%s: Set RSVD page location to Fw , TotalPacketLen(%d), TotalPageNum(%d)\n", __func__, TotalPacketLen, TotalPageNum);
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ rtl8723b_set_FwRsvdPage_cmd(padapter, &RsvdPageLoc);
+ rtl8723b_set_FwAoacRsvdPage_cmd(padapter, &RsvdPageLoc);
+ } else {
+ rtl8723b_set_FwAoacRsvdPage_cmd(padapter, &RsvdPageLoc);
+#ifdef CONFIG_PNO_SUPPORT
+ if (pwrctl->pno_in_resume)
+ rtl8723b_set_FwScanOffloadInfo_cmd(padapter,
+ &RsvdPageLoc, 0);
+ else
+ rtl8723b_set_FwScanOffloadInfo_cmd(padapter,
+ &RsvdPageLoc, 1);
+#endif
+ }
+ return;
+
+error:
+
+ rtw_free_xmitframe(pxmitpriv, pcmdframe);
+}
+
+#ifdef CONFIG_AP_WOWLAN
+/* */
+/* Description: Fill the reserved packets that FW will use to RSVD page. */
+/* Now we just send 2 types packet to rsvd page. (1)Beacon, (2)ProbeRsp. */
+/* */
+/* Input: bDLFinished */
+/* */
+/* false: At the first time we will send all the packets as a large packet to Hw, */
+/* so we need to set the packet length to total lengh. */
+/* */
+/* true: At the second time, we should send the first packet (default:beacon) */
+/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* 2009.10.15 by tynli. */
+static void rtl8723b_set_AP_FwRsvdPagePkt(
+ struct adapter *padapter, bool bDLFinished
+)
+{
+ struct hal_com_data *pHalData;
+ struct xmit_frame *pcmdframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ struct pwrctrl_priv *pwrctl;
+ u32 BeaconLength = 0, ProbeRspLength = 0;
+ u8 *ReservedPagePacket;
+ u8 TxDescLen = TXDESC_SIZE, TxDescOffset = TXDESC_OFFSET;
+ u8 TotalPageNum = 0, CurtPktPageNum = 0, RsvdPageNum = 0;
+ u8 currentip[4];
+ u16 BufIndex, PageSize = 128;
+ u32 TotalPacketLen = 0, MaxRsvdPageBufSize = 0;
+ RSVDPAGE_LOC RsvdPageLoc;
+
+ /* DBG_871X("%s---->\n", __func__); */
+ DBG_8192C("+" FUNC_ADPT_FMT ": iface_type =%d\n",
+ FUNC_ADPT_ARG(padapter), get_iface_type(padapter));
+
+ pHalData = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+ pwrctl = adapter_to_pwrctl(padapter);
+
+ RsvdPageNum = BCNQ_PAGE_NUM_8723B + AP_WOWLAN_PAGE_NUM_8723B;
+ MaxRsvdPageBufSize = RsvdPageNum*PageSize;
+
+ pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
+ if (pcmdframe == NULL) {
+ DBG_871X("%s: alloc ReservedPagePacket fail!\n", __func__);
+ return;
+ }
+
+ ReservedPagePacket = pcmdframe->buf_addr;
+ memset(&RsvdPageLoc, 0, sizeof(RSVDPAGE_LOC));
+
+ /* 3 (1) beacon */
+ BufIndex = TxDescOffset;
+ ConstructBeacon(padapter, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* If we don't add 1 more page, the WOWLAN function has a problem. Baron thinks it's a bug of firmware */
+ if (CurtPktPageNum == 1)
+ CurtPktPageNum += 1;
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* 2 (4) probe response */
+ RsvdPageLoc.LocProbeRsp = TotalPageNum;
+
+ rtw_get_current_ip_address(padapter, currentip);
+
+ ConstructProbeRsp(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &ProbeRspLength,
+ currentip,
+ false);
+ rtl8723b_fill_fake_txdesc(padapter,
+ &ReservedPagePacket[BufIndex-TxDescLen],
+ ProbeRspLength,
+ false, false, false);
+
+ DBG_871X("%s(): HW_VAR_SET_TX_CMD: PROBE RSP %p %d\n",
+ __func__, &ReservedPagePacket[BufIndex-TxDescLen],
+ (ProbeRspLength+TxDescLen));
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + ProbeRspLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ TotalPacketLen = BufIndex + ProbeRspLength;
+
+ if (TotalPacketLen > MaxRsvdPageBufSize) {
+ DBG_871X("%s(): ERROR: The rsvd page size is not enough \
+ !!TotalPacketLen %d, MaxRsvdPageBufSize %d\n",
+ __func__, TotalPacketLen, MaxRsvdPageBufSize);
+ goto error;
+ } else {
+ /* update attribute */
+ pattrib = &pcmdframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->pktlen = TotalPacketLen - TxDescOffset;
+ pattrib->last_txcmdsz = TotalPacketLen - TxDescOffset;
+ dump_mgntframe_and_wait(padapter, pcmdframe, 100);
+ }
+
+ DBG_871X("%s: Set RSVD page location to Fw , TotalPacketLen(%d), TotalPageNum(%d)\n", __func__, TotalPacketLen, TotalPageNum);
+ rtl8723b_set_ap_wow_rsvdpage_cmd(padapter, &RsvdPageLoc);
+
+ return;
+error:
+ rtw_free_xmitframe(pxmitpriv, pcmdframe);
+}
+#endif /* CONFIG_AP_WOWLAN */
+
+void rtl8723b_download_rsvd_page(struct adapter *padapter, u8 mstatus)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+#ifdef CONFIG_AP_WOWLAN
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+#endif
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool bcn_valid = false;
+ u8 DLBcnCount = 0;
+ u32 poll = 0;
+ u8 val8;
+
+ DBG_8192C("+" FUNC_ADPT_FMT ": iface_type =%d mstatus(%x)\n",
+ FUNC_ADPT_ARG(padapter), get_iface_type(padapter), mstatus);
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ bool bRecover = false;
+ u8 v8;
+
+ /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
+ /* Suggested by filen. Added by tynli. */
+ rtw_write16(padapter, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+
+ /* set REG_CR bit 8 */
+ v8 = rtw_read8(padapter, REG_CR+1);
+ v8 |= BIT(0); /* ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, v8);
+
+ /* Disable Hw protection for a time which revserd for Hw sending beacon. */
+ /* Fix download reserved page packet fail that access collision with the protection time. */
+ /* 2010.05.11. Added by tynli. */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 &= ~EN_BCN_FUNCTION;
+ val8 |= DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
+ if (pHalData->RegFwHwTxQCtrl & BIT(6))
+ bRecover = true;
+
+ /* To tell Hw the packet is not a real beacon frame. */
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl & ~BIT(6));
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+
+ /* Clear beacon valid check bit. */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DL_BCN_SEL, NULL);
+
+ DLBcnCount = 0;
+ poll = 0;
+ do {
+#ifdef CONFIG_AP_WOWLAN
+ if (pwrpriv->wowlan_ap_mode)
+ rtl8723b_set_AP_FwRsvdPagePkt(padapter, 0);
+ else
+ rtl8723b_set_FwRsvdPagePkt(padapter, 0);
+#else
+ /* download rsvd page. */
+ rtl8723b_set_FwRsvdPagePkt(padapter, 0);
+#endif
+ DLBcnCount++;
+ do {
+ yield();
+ /* mdelay(10); */
+ /* check rsvd page download OK. */
+ rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ poll++;
+ } while (!bcn_valid && (poll%10) != 0 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ } while (!bcn_valid && DLBcnCount <= 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped) {
+ } else if (!bcn_valid)
+ DBG_871X(ADPT_FMT": 1 DL RSVD page failed! DLBcnCount:%u, poll:%u\n",
+ ADPT_ARG(padapter), DLBcnCount, poll);
+ else {
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+ pwrctl->fw_psmode_iface_id = padapter->iface_id;
+ DBG_871X(ADPT_FMT": 1 DL RSVD page success! DLBcnCount:%u, poll:%u\n",
+ ADPT_ARG(padapter), DLBcnCount, poll);
+ }
+
+ /* 2010.05.11. Added by tynli. */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 |= EN_BCN_FUNCTION;
+ val8 &= ~DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bRecover) {
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl | BIT(6));
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ }
+
+ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
+ v8 = rtw_read8(padapter, REG_CR+1);
+ v8 &= ~BIT(0); /* ~ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, v8);
+ }
+}
+
+void rtl8723b_set_rssi_cmd(struct adapter *padapter, u8 *param)
+{
+ rtl8723b_set_FwRssiSetting_cmd(padapter, param);
+}
+
+void rtl8723b_set_FwJoinBssRpt_cmd(struct adapter *padapter, u8 mstatus)
+{
+ if (mstatus == 1)
+ rtl8723b_download_rsvd_page(padapter, RT_MEDIA_CONNECT);
+}
+
+/* arg[0] = macid */
+/* arg[1] = raid */
+/* arg[2] = shortGIrate */
+/* arg[3] = init_rate */
+void rtl8723b_Add_RateATid(
+ struct adapter *padapter,
+ u32 bitmap,
+ u8 *arg,
+ u8 rssi_level
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_info *psta;
+ u8 mac_id = arg[0];
+ u8 raid = arg[1];
+ u8 shortGI = arg[2];
+ u8 bw;
+ u32 mask = bitmap&0x0FFFFFFF;
+
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL)
+ return;
+
+ bw = psta->bw_mode;
+
+ if (rssi_level != DM_RATR_STA_INIT)
+ mask = ODM_Get_Rate_Bitmap(&pHalData->odmpriv, mac_id, mask, rssi_level);
+
+ DBG_871X("%s(): mac_id =%d raid = 0x%x bw =%d mask = 0x%x\n", __func__, mac_id, raid, bw, mask);
+ rtl8723b_set_FwMacIdConfig_cmd(padapter, mac_id, raid, bw, shortGI, mask);
+}
+
+static void ConstructBtNullFunctionData(
+ struct adapter *padapter,
+ u8 *pframe,
+ u32 *pLength,
+ u8 *StaAddr,
+ u8 bQoS,
+ u8 AC,
+ u8 bEosp,
+ u8 bForcePowerSave
+)
+{
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ u32 pktlen;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u8 bssid[ETH_ALEN];
+
+
+ DBG_871X("+" FUNC_ADPT_FMT ": qos =%d eosp =%d ps =%d\n",
+ FUNC_ADPT_ARG(padapter), bQoS, bEosp, bForcePowerSave);
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ if (NULL == StaAddr) {
+ memcpy(bssid, myid(&padapter->eeprompriv), ETH_ALEN);
+ StaAddr = bssid;
+ }
+
+ fctrl = &pwlanhdr->frame_control;
+ *fctrl = 0;
+ if (bForcePowerSave)
+ SetPwrMgt(fctrl);
+
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
+
+ SetDuration(pwlanhdr, 0);
+ SetSeqNum(pwlanhdr, 0);
+
+ if (bQoS == true) {
+ struct ieee80211_qos_hdr *pwlanqoshdr;
+
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pwlanqoshdr = (struct ieee80211_qos_hdr *)pframe;
+ SetPriority(&pwlanqoshdr->qos_ctrl, AC);
+ SetEOSP(&pwlanqoshdr->qos_ctrl, bEosp);
+
+ pktlen = sizeof(struct ieee80211_qos_hdr);
+ } else {
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pktlen = sizeof(struct ieee80211_hdr_3addr);
+ }
+
+ *pLength = pktlen;
+}
+
+static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ struct xmit_frame *pcmdframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u32 BeaconLength = 0;
+ u32 BTQosNullLength = 0;
+ u8 *ReservedPagePacket;
+ u8 TxDescLen, TxDescOffset;
+ u8 TotalPageNum = 0, CurtPktPageNum = 0, RsvdPageNum = 0;
+ u16 BufIndex, PageSize;
+ u32 TotalPacketLen, MaxRsvdPageBufSize = 0;
+ RSVDPAGE_LOC RsvdPageLoc;
+
+
+/* DBG_8192C("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter)); */
+
+ pHalData = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+ TxDescLen = TXDESC_SIZE;
+ TxDescOffset = TXDESC_OFFSET;
+ PageSize = PAGE_SIZE_TX_8723B;
+
+ RsvdPageNum = BCNQ_PAGE_NUM_8723B;
+ MaxRsvdPageBufSize = RsvdPageNum*PageSize;
+
+ pcmdframe = rtw_alloc_cmdxmitframe(pxmitpriv);
+ if (pcmdframe == NULL) {
+ DBG_8192C("%s: alloc ReservedPagePacket fail!\n", __func__);
+ return;
+ }
+
+ ReservedPagePacket = pcmdframe->buf_addr;
+ memset(&RsvdPageLoc, 0, sizeof(RSVDPAGE_LOC));
+
+ /* 3 (1) beacon */
+ BufIndex = TxDescOffset;
+ ConstructBeacon(padapter, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* If we don't add 1 more page, the WOWLAN function has a problem. Baron thinks it's a bug of firmware */
+ if (CurtPktPageNum == 1)
+ CurtPktPageNum += 1;
+ TotalPageNum += CurtPktPageNum;
+
+ BufIndex += (CurtPktPageNum*PageSize);
+
+ /* Jump to lastest page */
+ if (BufIndex < (MaxRsvdPageBufSize - PageSize)) {
+ BufIndex = TxDescOffset + (MaxRsvdPageBufSize - PageSize);
+ TotalPageNum = BCNQ_PAGE_NUM_8723B - 1;
+ }
+
+ /* 3 (6) BT Qos null data */
+ RsvdPageLoc.LocBTQosNull = TotalPageNum;
+ ConstructBtNullFunctionData(
+ padapter,
+ &ReservedPagePacket[BufIndex],
+ &BTQosNullLength,
+ NULL,
+ true, 0, 0, false
+ );
+ rtl8723b_fill_fake_txdesc(padapter, &ReservedPagePacket[BufIndex-TxDescLen], BTQosNullLength, false, true, false);
+
+ CurtPktPageNum = (u8)PageNum_128(TxDescLen + BTQosNullLength);
+
+ TotalPageNum += CurtPktPageNum;
+
+ TotalPacketLen = BufIndex + BTQosNullLength;
+ if (TotalPacketLen > MaxRsvdPageBufSize) {
+ DBG_8192C(FUNC_ADPT_FMT ": ERROR: The rsvd page size is not enough!!TotalPacketLen %d, MaxRsvdPageBufSize %d\n",
+ FUNC_ADPT_ARG(padapter), TotalPacketLen, MaxRsvdPageBufSize);
+ goto error;
+ }
+
+ /* update attribute */
+ pattrib = &pcmdframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->pktlen = pattrib->last_txcmdsz = TotalPacketLen - TxDescOffset;
+ dump_mgntframe_and_wait(padapter, pcmdframe, 100);
+
+/* DBG_8192C(FUNC_ADPT_FMT ": Set RSVD page location to Fw, TotalPacketLen(%d), TotalPageNum(%d)\n", */
+/* FUNC_ADPT_ARG(padapter), TotalPacketLen, TotalPageNum); */
+ rtl8723b_set_FwRsvdPage_cmd(padapter, &RsvdPageLoc);
+ rtl8723b_set_FwAoacRsvdPage_cmd(padapter, &RsvdPageLoc);
+
+ return;
+
+error:
+ rtw_free_xmitframe(pxmitpriv, pcmdframe);
+}
+
+void rtl8723b_download_BTCoex_AP_mode_rsvd_page(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u8 bRecover = false;
+ u8 bcn_valid = false;
+ u8 DLBcnCount = 0;
+ u32 poll = 0;
+ u8 val8;
+
+
+ DBG_8192C("+" FUNC_ADPT_FMT ": iface_type =%d fw_state = 0x%08X\n",
+ FUNC_ADPT_ARG(padapter), get_iface_type(padapter), get_fwstate(&padapter->mlmepriv));
+
+#ifdef DEBUG
+ if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == false) {
+ DBG_8192C(FUNC_ADPT_FMT ": [WARNING] not in AP mode!!\n",
+ FUNC_ADPT_ARG(padapter));
+ }
+#endif /* DEBUG */
+
+ pHalData = GET_HAL_DATA(padapter);
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
+ /* Suggested by filen. Added by tynli. */
+ rtw_write16(padapter, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+
+ /* set REG_CR bit 8 */
+ val8 = rtw_read8(padapter, REG_CR+1);
+ val8 |= BIT(0); /* ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, val8);
+
+ /* Disable Hw protection for a time which revserd for Hw sending beacon. */
+ /* Fix download reserved page packet fail that access collision with the protection time. */
+ /* 2010.05.11. Added by tynli. */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 &= ~EN_BCN_FUNCTION;
+ val8 |= DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]= 0 to tell Hw the packet is not a real beacon frame. */
+ if (pHalData->RegFwHwTxQCtrl & BIT(6))
+ bRecover = true;
+
+ /* To tell Hw the packet is not a real beacon frame. */
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+
+ /* Clear beacon valid check bit. */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DL_BCN_SEL, NULL);
+
+ DLBcnCount = 0;
+ poll = 0;
+ do {
+ SetFwRsvdPagePkt_BTCoex(padapter);
+ DLBcnCount++;
+ do {
+ yield();
+/* mdelay(10); */
+ /* check rsvd page download OK. */
+ rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, &bcn_valid);
+ poll++;
+ } while (!bcn_valid && (poll%10) != 0 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+ } while (!bcn_valid && (DLBcnCount <= 100) && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ if (true == bcn_valid) {
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+ pwrctl->fw_psmode_iface_id = padapter->iface_id;
+ DBG_8192C(ADPT_FMT": DL RSVD page success! DLBcnCount:%d, poll:%d\n",
+ ADPT_ARG(padapter), DLBcnCount, poll);
+ } else {
+ DBG_8192C(ADPT_FMT": DL RSVD page fail! DLBcnCount:%d, poll:%d\n",
+ ADPT_ARG(padapter), DLBcnCount, poll);
+ DBG_8192C(ADPT_FMT": DL RSVD page fail! bSurpriseRemoved =%d\n",
+ ADPT_ARG(padapter), padapter->bSurpriseRemoved);
+ DBG_8192C(ADPT_FMT": DL RSVD page fail! bDriverStopped =%d\n",
+ ADPT_ARG(padapter), padapter->bDriverStopped);
+ }
+
+ /* 2010.05.11. Added by tynli. */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 |= EN_BCN_FUNCTION;
+ val8 &= ~DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bRecover) {
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+ }
+
+ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
+ val8 = rtw_read8(padapter, REG_CR+1);
+ val8 &= ~BIT(0); /* ~ENSWBCN */
+ rtw_write8(padapter, REG_CR+1, val8);
+}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
new file mode 100644
index 000000000000..b16255984f4e
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -0,0 +1,300 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/* Description: */
+/* This file is for 92CE/92CU dynamic mechanism only */
+
+#define _RTL8723B_DM_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+/* Global var */
+
+static void dm_CheckStatistics(struct adapter *Adapter)
+{
+}
+/* */
+/* functions */
+/* */
+static void Init_ODM_ComInfo_8723b(struct adapter *Adapter)
+{
+
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T pDM_Odm = &(pHalData->odmpriv);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u8 cut_ver, fab_ver;
+
+ /* */
+ /* Init Value */
+ /* */
+ memset(pDM_Odm, 0, sizeof(*pDM_Odm));
+
+ pDM_Odm->Adapter = Adapter;
+#define ODM_CE 0x04
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_PLATFORM, ODM_CE);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_INTERFACE, RTW_SDIO);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_PACKAGE_TYPE, pHalData->PackageType);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8723B);
+
+ fab_ver = ODM_TSMC;
+ cut_ver = ODM_CUT_A;
+
+ DBG_871X("%s(): fab_ver =%d cut_ver =%d\n", __func__, fab_ver, cut_ver);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_FAB_VER, fab_ver);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_CUT_VER, cut_ver);
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(pHalData->VersionID));
+
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_PATCH_ID, pHalData->CustomerID);
+ /* ODM_CMNINFO_BINHCT_TEST only for MP Team */
+ ODM_CmnInfoInit(pDM_Odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
+
+
+ if (pHalData->rf_type == RF_1T1R) {
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
+ } else if (pHalData->rf_type == RF_2T2R) {
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
+ } else if (pHalData->rf_type == RF_1T2R) {
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
+ }
+
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION|ODM_RF_TX_PWR_TRACK;
+
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+}
+
+static void Update_ODM_ComInfo_8723b(struct adapter *Adapter)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct dvobj_priv *dvobj = adapter_to_dvobj(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(Adapter);
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T pDM_Odm = &(pHalData->odmpriv);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ u8 zero = 0;
+
+ pdmpriv->InitODMFlag = 0
+ | ODM_BB_DIG
+ | ODM_BB_RA_MASK
+ | ODM_BB_DYNAMIC_TXPWR
+ | ODM_BB_FA_CNT
+ | ODM_BB_RSSI_MONITOR
+ | ODM_BB_CCK_PD
+ | ODM_BB_PWR_SAVE
+ | ODM_BB_CFO_TRACKING
+ | ODM_MAC_EDCA_TURBO
+ | ODM_RF_TX_PWR_TRACK
+ | ODM_RF_CALIBRATION
+#ifdef CONFIG_ODM_ADAPTIVITY
+ | ODM_BB_ADAPTIVITY
+#endif
+ ;
+
+ /* */
+ /* Pointer reference */
+ /* */
+ /* ODM_CMNINFO_MAC_PHY_MODE pHalData->MacPhyMode92D */
+ /* ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_MAC_PHY_MODE,&(pDM_Odm->u8_temp)); */
+
+ ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_TX_UNI, &(dvobj->traffic_stat.tx_bytes));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_RX_UNI, &(dvobj->traffic_stat.rx_bytes));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_WM_MODE, &(pmlmeext->cur_wireless_mode));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &(pHalData->nCur40MhzPrimeSC));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SEC_MODE, &(Adapter->securitypriv.dot11PrivacyAlgrthm));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_BW, &(pHalData->CurrentChannelBW));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_CHNL, &(pHalData->CurrentChannel));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_NET_CLOSED, &(Adapter->net_closed));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_MP_MODE, &zero);
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_BAND, &(pHalData->CurrentBandType));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_FORCED_IGI_LB, &(pHalData->u1ForcedIgiLb));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_FORCED_RATE, &(pHalData->ForcedDataRate));
+
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_SCAN, &(pmlmepriv->bScanInProcess));
+ ODM_CmnInfoHook(pDM_Odm, ODM_CMNINFO_POWER_SAVING, &(pwrctrlpriv->bpower_saving));
+
+
+ for (i = 0; i < NUM_STA; i++)
+ ODM_CmnInfoPtrArrayHook(pDM_Odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+}
+
+void rtl8723b_InitHalDm(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ PDM_ODM_T pDM_Odm = &(pHalData->odmpriv);
+
+ pdmpriv->DM_Type = DM_Type_ByDriver;
+ pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
+
+ pdmpriv->DMFlag |= DYNAMIC_FUNC_BT;
+
+ pdmpriv->InitDMFlag = pdmpriv->DMFlag;
+
+ Update_ODM_ComInfo_8723b(Adapter);
+
+ ODM_DMInit(pDM_Odm);
+}
+
+void rtl8723b_HalDmWatchDog(struct adapter *Adapter)
+{
+ bool bFwCurrentInPSMode = false;
+ bool bFwPSAwake = true;
+ u8 hw_init_completed = false;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ hw_init_completed = Adapter->hw_init_completed;
+
+ if (hw_init_completed == false)
+ goto skip_dm;
+
+ bFwCurrentInPSMode = adapter_to_pwrctl(Adapter)->bFwCurrentInPSMode;
+ rtw_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&bFwPSAwake));
+
+ if (
+ (hw_init_completed == true) &&
+ ((!bFwCurrentInPSMode) && bFwPSAwake)
+ ) {
+ /* */
+ /* Calculate Tx/Rx statistics. */
+ /* */
+ dm_CheckStatistics(Adapter);
+ rtw_hal_check_rxfifo_full(Adapter);
+ }
+
+ /* ODM */
+ if (hw_init_completed == true) {
+ u8 bLinked = false;
+ u8 bsta_state = false;
+ u8 bBtDisabled = true;
+
+ if (rtw_linked_check(Adapter)) {
+ bLinked = true;
+ if (check_fwstate(&Adapter->mlmepriv, WIFI_STATION_STATE))
+ bsta_state = true;
+ }
+
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_STATION_STATE, bsta_state);
+
+ /* ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM); */
+
+ bBtDisabled = rtw_btcoex_IsBtDisabled(Adapter);
+
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED, ((bBtDisabled == true)?false:true));
+
+ ODM_DMWatchdog(&pHalData->odmpriv);
+ }
+
+skip_dm:
+ return;
+}
+
+void rtl8723b_hal_dm_in_lps(struct adapter *padapter)
+{
+ u32 PWDB_rssi = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ DBG_871X("%s, RSSI_Min =%d\n", __func__, pDM_Odm->RSSI_Min);
+
+ /* update IGI */
+ ODM_Write_DIG(pDM_Odm, pDM_Odm->RSSI_Min);
+
+
+ /* set rssi to fw */
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta && (psta->rssi_stat.UndecoratedSmoothedPWDB > 0)) {
+ PWDB_rssi = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+
+ rtl8723b_set_rssi_cmd(padapter, (u8 *)&PWDB_rssi);
+ }
+
+}
+
+void rtl8723b_HalDmWatchDog_in_LPS(struct adapter *Adapter)
+{
+ u8 bLinked = false;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ pDIG_T pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct sta_priv *pstapriv = &Adapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ if (Adapter->hw_init_completed == false)
+ goto skip_lps_dm;
+
+
+ if (rtw_linked_check(Adapter))
+ bLinked = true;
+
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_LINK, bLinked);
+
+ if (bLinked == false)
+ goto skip_lps_dm;
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ goto skip_lps_dm;
+
+
+ /* ODM_DMWatchdog(&pHalData->odmpriv); */
+ /* Do DIG by RSSI In LPS-32K */
+
+ /* 1 Find MIN-RSSI */
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL)
+ goto skip_lps_dm;
+
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ DBG_871X("CurIGValue =%d, EntryMinUndecoratedSmoothedPWDB = %d\n", pDM_DigTable->CurIGValue, pdmpriv->EntryMinUndecoratedSmoothedPWDB);
+
+ if (pdmpriv->EntryMinUndecoratedSmoothedPWDB <= 0)
+ goto skip_lps_dm;
+
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+
+ pDM_Odm->RSSI_Min = pdmpriv->MinUndecoratedPWDBForDM;
+
+ /* if (pDM_DigTable->CurIGValue != pDM_Odm->RSSI_Min) */
+ if (
+ (pDM_DigTable->CurIGValue > pDM_Odm->RSSI_Min + 5) ||
+ (pDM_DigTable->CurIGValue < pDM_Odm->RSSI_Min - 5)
+ )
+ rtw_dm_in_lps_wk_cmd(Adapter);
+
+
+skip_lps_dm:
+
+ return;
+
+}
+
+void rtl8723b_init_dm_priv(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+
+ memset(pdmpriv, 0, sizeof(struct dm_priv));
+ Init_ODM_ComInfo_8723b(Adapter);
+}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
new file mode 100644
index 000000000000..163537faefd9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -0,0 +1,4549 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HAL_INIT_C_
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+#include "hal_com_h2c.h"
+
+static void _FWDownloadEnable(struct adapter *padapter, bool enable)
+{
+ u8 tmp, count = 0;
+
+ if (enable) {
+ /* 8051 enable */
+ tmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, tmp|0x04);
+
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp|0x01);
+
+ do {
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ if (tmp & 0x01)
+ break;
+ rtw_write8(padapter, REG_MCUFWDL, tmp|0x01);
+ msleep(1);
+ } while (count++ < 100);
+
+ if (count > 0)
+ DBG_871X("%s: !!!!!!!!Write 0x80 Fail!: count = %d\n", __func__, count);
+
+ /* 8051 reset */
+ tmp = rtw_read8(padapter, REG_MCUFWDL+2);
+ rtw_write8(padapter, REG_MCUFWDL+2, tmp&0xf7);
+ } else {
+ /* MCU firmware download disable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp&0xfe);
+ }
+}
+
+static int _BlockWrite(struct adapter *padapter, void *buffer, u32 buffSize)
+{
+ int ret = _SUCCESS;
+
+ u32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
+ u32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
+ u32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */
+ u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
+ u32 remainSize_p1 = 0, remainSize_p2 = 0;
+ u8 *bufferPtr = (u8 *)buffer;
+ u32 i = 0, offset = 0;
+
+/* printk("====>%s %d\n", __func__, __LINE__); */
+
+ /* 3 Phase #1 */
+ blockCount_p1 = buffSize / blockSize_p1;
+ remainSize_p1 = buffSize % blockSize_p1;
+
+ if (blockCount_p1) {
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_notice_,
+ (
+ "_BlockWrite: [P1] buffSize(%d) blockSize_p1(%d) blockCount_p1(%d) remainSize_p1(%d)\n",
+ buffSize,
+ blockSize_p1,
+ blockCount_p1,
+ remainSize_p1
+ )
+ );
+ }
+
+ for (i = 0; i < blockCount_p1; i++) {
+ ret = rtw_write32(padapter, (FW_8723B_START_ADDRESS + i * blockSize_p1), *((u32 *)(bufferPtr + i * blockSize_p1)));
+ if (ret == _FAIL) {
+ printk("====>%s %d i:%d\n", __func__, __LINE__, i);
+ goto exit;
+ }
+ }
+
+ /* 3 Phase #2 */
+ if (remainSize_p1) {
+ offset = blockCount_p1 * blockSize_p1;
+
+ blockCount_p2 = remainSize_p1/blockSize_p2;
+ remainSize_p2 = remainSize_p1%blockSize_p2;
+
+ if (blockCount_p2) {
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_notice_,
+ (
+ "_BlockWrite: [P2] buffSize_p2(%d) blockSize_p2(%d) blockCount_p2(%d) remainSize_p2(%d)\n",
+ (buffSize-offset),
+ blockSize_p2,
+ blockCount_p2,
+ remainSize_p2
+ )
+ );
+ }
+
+ }
+
+ /* 3 Phase #3 */
+ if (remainSize_p2) {
+ offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+
+ blockCount_p3 = remainSize_p2 / blockSize_p3;
+
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P3] buffSize_p3(%d) blockSize_p3(%d) blockCount_p3(%d)\n",
+ (buffSize-offset), blockSize_p3, blockCount_p3));
+
+ for (i = 0; i < blockCount_p3; i++) {
+ ret = rtw_write8(padapter, (FW_8723B_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+
+ if (ret == _FAIL) {
+ printk("====>%s %d i:%d\n", __func__, __LINE__, i);
+ goto exit;
+ }
+ }
+ }
+exit:
+ return ret;
+}
+
+static int _PageWrite(
+ struct adapter *padapter,
+ u32 page,
+ void *buffer,
+ u32 size
+)
+{
+ u8 value8;
+ u8 u8Page = (u8) (page & 0x07);
+
+ value8 = (rtw_read8(padapter, REG_MCUFWDL+2) & 0xF8) | u8Page;
+ rtw_write8(padapter, REG_MCUFWDL+2, value8);
+
+ return _BlockWrite(padapter, buffer, size);
+}
+
+static int _WriteFW(struct adapter *padapter, void *buffer, u32 size)
+{
+ /* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
+ /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
+ int ret = _SUCCESS;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+ u8 *bufferPtr = (u8 *)buffer;
+
+ pageNums = size / MAX_DLFW_PAGE_SIZE;
+ /* RT_ASSERT((pageNums <= 4), ("Page numbers should not greater then 4\n")); */
+ remainSize = size % MAX_DLFW_PAGE_SIZE;
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * MAX_DLFW_PAGE_SIZE;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, MAX_DLFW_PAGE_SIZE);
+
+ if (ret == _FAIL) {
+ printk("====>%s %d\n", __func__, __LINE__);
+ goto exit;
+ }
+ }
+
+ if (remainSize) {
+ offset = pageNums * MAX_DLFW_PAGE_SIZE;
+ page = pageNums;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, remainSize);
+
+ if (ret == _FAIL) {
+ printk("====>%s %d\n", __func__, __LINE__);
+ goto exit;
+ }
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("_WriteFW Done- for Normal chip.\n"));
+
+exit:
+ return ret;
+}
+
+void _8051Reset8723(struct adapter *padapter)
+{
+ u8 cpu_rst;
+ u8 io_rst;
+
+
+ /* Reset 8051(WLMCU) IO wrapper */
+ /* 0x1c[8] = 0 */
+ /* Suggested by Isaac@SD1 and Gimmy@SD1, coding by Lucas@20130624 */
+ io_rst = rtw_read8(padapter, REG_RSV_CTRL+1);
+ io_rst &= ~BIT(0);
+ rtw_write8(padapter, REG_RSV_CTRL+1, io_rst);
+
+ cpu_rst = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ cpu_rst &= ~BIT(2);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, cpu_rst);
+
+ /* Enable 8051 IO wrapper */
+ /* 0x1c[8] = 1 */
+ io_rst = rtw_read8(padapter, REG_RSV_CTRL+1);
+ io_rst |= BIT(0);
+ rtw_write8(padapter, REG_RSV_CTRL+1, io_rst);
+
+ cpu_rst = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ cpu_rst |= BIT(2);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, cpu_rst);
+
+ DBG_8192C("%s: Finish\n", __func__);
+}
+
+u8 g_fwdl_chksum_fail = 0;
+
+static s32 polling_fwdl_chksum(
+ struct adapter *adapter, u32 min_cnt, u32 timeout_ms
+)
+{
+ s32 ret = _FAIL;
+ u32 value32;
+ unsigned long start = jiffies;
+ u32 cnt = 0;
+
+ /* polling CheckSum report */
+ do {
+ cnt++;
+ value32 = rtw_read32(adapter, REG_MCUFWDL);
+ if (value32 & FWDL_ChkSum_rpt || adapter->bSurpriseRemoved || adapter->bDriverStopped)
+ break;
+ yield();
+ } while (jiffies_to_msecs(jiffies-start) < timeout_ms || cnt < min_cnt);
+
+ if (!(value32 & FWDL_ChkSum_rpt)) {
+ goto exit;
+ }
+
+ if (g_fwdl_chksum_fail) {
+ DBG_871X("%s: fwdl test case: fwdl_chksum_fail\n", __func__);
+ g_fwdl_chksum_fail--;
+ goto exit;
+ }
+
+ ret = _SUCCESS;
+
+exit:
+ DBG_871X(
+ "%s: Checksum report %s! (%u, %dms), REG_MCUFWDL:0x%08x\n",
+ __func__,
+ (ret == _SUCCESS) ? "OK" : "Fail",
+ cnt,
+ jiffies_to_msecs(jiffies-start),
+ value32
+ );
+
+ return ret;
+}
+
+u8 g_fwdl_wintint_rdy_fail = 0;
+
+static s32 _FWFreeToGo(struct adapter *adapter, u32 min_cnt, u32 timeout_ms)
+{
+ s32 ret = _FAIL;
+ u32 value32;
+ unsigned long start = jiffies;
+ u32 cnt = 0;
+
+ value32 = rtw_read32(adapter, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtw_write32(adapter, REG_MCUFWDL, value32);
+
+ _8051Reset8723(adapter);
+
+ /* polling for FW ready */
+ do {
+ cnt++;
+ value32 = rtw_read32(adapter, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY || adapter->bSurpriseRemoved || adapter->bDriverStopped)
+ break;
+ yield();
+ } while (jiffies_to_msecs(jiffies - start) < timeout_ms || cnt < min_cnt);
+
+ if (!(value32 & WINTINI_RDY)) {
+ goto exit;
+ }
+
+ if (g_fwdl_wintint_rdy_fail) {
+ DBG_871X("%s: fwdl test case: wintint_rdy_fail\n", __func__);
+ g_fwdl_wintint_rdy_fail--;
+ goto exit;
+ }
+
+ ret = _SUCCESS;
+
+exit:
+ DBG_871X(
+ "%s: Polling FW ready %s! (%u, %dms), REG_MCUFWDL:0x%08x\n",
+ __func__,
+ (ret == _SUCCESS) ? "OK" : "Fail",
+ cnt,
+ jiffies_to_msecs(jiffies-start),
+ value32
+ );
+
+ return ret;
+}
+
+#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
+
+void rtl8723b_FirmwareSelfReset(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 u1bTmp;
+ u8 Delay = 100;
+
+ if (
+ !(IS_FW_81xxC(padapter) && ((pHalData->FirmwareVersion < 0x21) || (pHalData->FirmwareVersion == 0x21 && pHalData->FirmwareSubVersion < 0x01)))
+ ) { /* after 88C Fw v33.1 */
+ /* 0x1cf = 0x20. Inform 8051 to reset. 2009.12.25. tynli_test */
+ rtw_write8(padapter, REG_HMETFR+3, 0x20);
+
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ while (u1bTmp & BIT2) {
+ Delay--;
+ if (Delay == 0)
+ break;
+ udelay(50);
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_notice_, ("-%s: 8051 reset success (%d)\n", __func__, Delay));
+
+ if (Delay == 0) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_, ("%s: Force 8051 reset!!!\n", __func__));
+ /* force firmware reset */
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp&(~BIT2));
+ }
+ }
+}
+
+/* */
+/* Description: */
+/* Download 8192C firmware code. */
+/* */
+/* */
+s32 rtl8723b_FirmwareDownload(struct adapter *padapter, bool bUsedWoWLANFw)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 write_fw = 0;
+ unsigned long fwdl_start_time;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct rt_firmware *pFirmware;
+ struct rt_firmware *pBTFirmware;
+ struct rt_firmware_hdr *pFwHdr = NULL;
+ u8 *pFirmwareBuf;
+ u32 FirmwareLen;
+ const struct firmware *fw;
+ struct device *device = dvobj_to_dev(padapter->dvobj);
+ u8 *fwfilepath;
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ u8 tmp_ps;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+#ifdef CONFIG_WOWLAN
+ RT_TRACE(_module_hal_init_c_, _drv_notice_, ("+%s, bUsedWoWLANFw:%d\n", __func__, bUsedWoWLANFw));
+#endif
+ pFirmware = kzalloc(sizeof(struct rt_firmware), GFP_KERNEL);
+ if (!pFirmware)
+ return _FAIL;
+ pBTFirmware = kzalloc(sizeof(struct rt_firmware), GFP_KERNEL);
+ if (!pBTFirmware) {
+ kfree(pFirmware);
+ return _FAIL;
+ }
+ tmp_ps = rtw_read8(padapter, 0xa3);
+ tmp_ps &= 0xf8;
+ tmp_ps |= 0x02;
+ /* 1. write 0xA3[:2:0] = 3b'010 */
+ rtw_write8(padapter, 0xa3, tmp_ps);
+ /* 2. read power_state = 0xA0[1:0] */
+ tmp_ps = rtw_read8(padapter, 0xa0);
+ tmp_ps &= 0x03;
+ if (tmp_ps != 0x01) {
+ DBG_871X(FUNC_ADPT_FMT" tmp_ps =%x\n", FUNC_ADPT_ARG(padapter), tmp_ps);
+ pdbgpriv->dbg_downloadfw_pwr_state_cnt++;
+ }
+
+#ifdef CONFIG_WOWLAN
+ if (bUsedWoWLANFw)
+ fwfilepath = "rtlwifi/rtl8723bs_wowlan.bin";
+ else
+#endif /* CONFIG_WOWLAN */
+ fwfilepath = "rtlwifi/rtl8723bs_nic.bin";
+
+ pr_info("rtl8723bs: acquire FW from file:%s\n", fwfilepath);
+
+ rtStatus = request_firmware(&fw, fwfilepath, device);
+ if (rtStatus) {
+ pr_err("Request firmware failed with error 0x%x\n", rtStatus);
+ rtStatus = _FAIL;
+ goto exit;
+ }
+
+ if (!fw) {
+ pr_err("Firmware %s not available\n", fwfilepath);
+ rtStatus = _FAIL;
+ goto exit;
+ }
+
+ if (fw->size > FW_8723B_SIZE) {
+ rtStatus = _FAIL;
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_err_,
+ ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE)
+ );
+ goto exit;
+ }
+
+ pFirmware->szFwBuffer = kzalloc(fw->size, GFP_KERNEL);
+ if (!pFirmware->szFwBuffer) {
+ rtStatus = _FAIL;
+ goto exit;
+ }
+
+ memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
+ pFirmware->ulFwLength = fw->size;
+ release_firmware(fw);
+ if (pFirmware->ulFwLength > FW_8723B_SIZE) {
+ rtStatus = _FAIL;
+ DBG_871X_LEVEL(_drv_emerg_, "Firmware size:%u exceed %u\n", pFirmware->ulFwLength, FW_8723B_SIZE);
+ goto release_fw1;
+ }
+
+ pFirmwareBuf = pFirmware->szFwBuffer;
+ FirmwareLen = pFirmware->ulFwLength;
+
+ /* To Check Fw header. Added by tynli. 2009.12.04. */
+ pFwHdr = (struct rt_firmware_hdr *)pFirmwareBuf;
+
+ pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
+ pHalData->FirmwareSubVersion = le16_to_cpu(pFwHdr->Subversion);
+ pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
+
+ DBG_871X(
+ "%s: fw_ver =%x fw_subver =%04x sig = 0x%x, Month =%02x, Date =%02x, Hour =%02x, Minute =%02x\n",
+ __func__,
+ pHalData->FirmwareVersion,
+ pHalData->FirmwareSubVersion,
+ pHalData->FirmwareSignature,
+ pFwHdr->Month,
+ pFwHdr->Date,
+ pFwHdr->Hour,
+ pFwHdr->Minute
+ );
+
+ if (IS_FW_HEADER_EXIST_8723B(pFwHdr)) {
+ DBG_871X("%s(): Shift for fw header!\n", __func__);
+ /* Shift 32 bytes for FW header */
+ pFirmwareBuf = pFirmwareBuf + 32;
+ FirmwareLen = FirmwareLen - 32;
+ }
+
+ /* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
+ /* or it will cause download Fw fail. 2010.02.01. by tynli. */
+ if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) { /* 8051 RAM code */
+ rtw_write8(padapter, REG_MCUFWDL, 0x00);
+ rtl8723b_FirmwareSelfReset(padapter);
+ }
+
+ _FWDownloadEnable(padapter, true);
+ fwdl_start_time = jiffies;
+ while (
+ !padapter->bDriverStopped &&
+ !padapter->bSurpriseRemoved &&
+ (write_fw++ < 3 || jiffies_to_msecs(jiffies - fwdl_start_time) < 500)
+ ) {
+ /* reset FWDL chksum */
+ rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL)|FWDL_ChkSum_rpt);
+
+ rtStatus = _WriteFW(padapter, pFirmwareBuf, FirmwareLen);
+ if (rtStatus != _SUCCESS)
+ continue;
+
+ rtStatus = polling_fwdl_chksum(padapter, 5, 50);
+ if (rtStatus == _SUCCESS)
+ break;
+ }
+ _FWDownloadEnable(padapter, false);
+ if (_SUCCESS != rtStatus)
+ goto fwdl_stat;
+
+ rtStatus = _FWFreeToGo(padapter, 10, 200);
+ if (_SUCCESS != rtStatus)
+ goto fwdl_stat;
+
+fwdl_stat:
+ DBG_871X(
+ "FWDL %s. write_fw:%u, %dms\n",
+ (rtStatus == _SUCCESS)?"success":"fail",
+ write_fw,
+ jiffies_to_msecs(jiffies - fwdl_start_time)
+ );
+
+exit:
+ kfree(pFirmware->szFwBuffer);
+ kfree(pFirmware);
+release_fw1:
+ kfree(pBTFirmware);
+ DBG_871X(" <=== rtl8723b_FirmwareDownload()\n");
+ return rtStatus;
+}
+
+void rtl8723b_InitializeFirmwareVars(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ /* Init Fw LPS related. */
+ adapter_to_pwrctl(padapter)->bFwCurrentInPSMode = false;
+
+ /* Init H2C cmd. */
+ rtw_write8(padapter, REG_HMETFR, 0x0f);
+
+ /* Init H2C counter. by tynli. 2009.12.09. */
+ pHalData->LastHMEBoxNum = 0;
+/* pHalData->H2CQueueHead = 0; */
+/* pHalData->H2CQueueTail = 0; */
+/* pHalData->H2CStopInsertQueue = false; */
+}
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+/* */
+
+/* */
+/* Description: Prepare some information to Fw for WoWLAN. */
+/* (1) Download wowlan Fw. */
+/* (2) Download RSVD page packets. */
+/* (3) Enable AP offload if needed. */
+/* */
+/* 2011.04.12 by tynli. */
+/* */
+void SetFwRelatedForWoWLAN8723b(
+ struct adapter *padapter, u8 bHostIsGoingtoSleep
+)
+{
+ int status = _FAIL;
+ /* */
+ /* 1. Before WoWLAN we need to re-download WoWLAN Fw. */
+ /* */
+ status = rtl8723b_FirmwareDownload(padapter, bHostIsGoingtoSleep);
+ if (status != _SUCCESS) {
+ DBG_871X("SetFwRelatedForWoWLAN8723b(): Re-Download Firmware failed!!\n");
+ return;
+ } else {
+ DBG_871X("SetFwRelatedForWoWLAN8723b(): Re-Download Firmware Success !!\n");
+ }
+ /* */
+ /* 2. Re-Init the variables about Fw related setting. */
+ /* */
+ rtl8723b_InitializeFirmwareVars(padapter);
+}
+#endif /* CONFIG_WOWLAN */
+
+static void rtl8723b_free_hal_data(struct adapter *padapter)
+{
+}
+
+/* */
+/* Efuse related code */
+/* */
+static u8 hal_EfuseSwitchToBank(
+ struct adapter *padapter, u8 bank, bool bPseudoTest
+)
+{
+ u8 bRet = false;
+ u32 value32 = 0;
+#ifdef HAL_EFUSE_MEMORY
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+#endif
+
+
+ DBG_8192C("%s: Efuse switch bank to %d\n", __func__, bank);
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ pEfuseHal->fakeEfuseBank = bank;
+#else
+ fakeEfuseBank = bank;
+#endif
+ bRet = true;
+ } else {
+ value32 = rtw_read32(padapter, EFUSE_TEST);
+ bRet = true;
+ switch (bank) {
+ case 0:
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ break;
+ case 1:
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_BT_SEL_0);
+ break;
+ case 2:
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_BT_SEL_1);
+ break;
+ case 3:
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_BT_SEL_2);
+ break;
+ default:
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ bRet = false;
+ break;
+ }
+ rtw_write32(padapter, EFUSE_TEST, value32);
+ }
+
+ return bRet;
+}
+
+static void Hal_GetEfuseDefinition(
+ struct adapter *padapter,
+ u8 efuseType,
+ u8 type,
+ void *pOut,
+ bool bPseudoTest
+)
+{
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ {
+ u8 *pMax_section;
+ pMax_section = (u8 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pMax_section = EFUSE_MAX_SECTION_8723B;
+ else
+ *pMax_section = EFUSE_BT_MAX_SECTION;
+ }
+ break;
+
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723B;
+ else
+ *pu2Tmp = EFUSE_BT_REAL_CONTENT_LEN;
+ }
+ break;
+
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723B-EFUSE_OOB_PROTECT_BYTES);
+ else
+ *pu2Tmp = (EFUSE_BT_REAL_BANK_CONTENT_LEN-EFUSE_PROTECT_BYTES_BANK);
+ }
+ break;
+
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = (EFUSE_REAL_CONTENT_LEN_8723B-EFUSE_OOB_PROTECT_BYTES);
+ else
+ *pu2Tmp = (EFUSE_BT_REAL_CONTENT_LEN-(EFUSE_PROTECT_BYTES_BANK*3));
+ }
+ break;
+
+ case TYPE_EFUSE_MAP_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_MAX_MAP_LEN;
+ else
+ *pu2Tmp = EFUSE_BT_MAP_LEN;
+ }
+ break;
+
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu1Tmp = EFUSE_OOB_PROTECT_BYTES;
+ else
+ *pu1Tmp = EFUSE_PROTECT_BYTES_BANK;
+ }
+ break;
+
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+
+ if (efuseType == EFUSE_WIFI)
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_8723B;
+ else
+ *pu2Tmp = EFUSE_BT_REAL_BANK_CONTENT_LEN;
+ }
+ break;
+
+ default:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = 0;
+ }
+ break;
+ }
+}
+
+#define VOLTAGE_V25 0x03
+#define LDOE25_SHIFT 28
+
+/* */
+/* The following is for compile ok */
+/* That should be merged with the original in the future */
+/* */
+#define EFUSE_ACCESS_ON_8723 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF_8723 0x00 /* For RTL8723 only. */
+#define REG_EFUSE_ACCESS_8723 0x00CF /* Efuse access protection for RTL8723 */
+
+/* */
+static void Hal_BT_EfusePowerSwitch(
+ struct adapter *padapter, u8 bWrite, u8 PwrState
+)
+{
+ u8 tempval;
+ if (PwrState == true) {
+ /* enable BT power cut */
+ /* 0x6A[14] = 1 */
+ tempval = rtw_read8(padapter, 0x6B);
+ tempval |= BIT(6);
+ rtw_write8(padapter, 0x6B, tempval);
+
+ /* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
+ /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+ msleep(1);
+ /* disable BT output isolation */
+ /* 0x6A[15] = 0 */
+ tempval = rtw_read8(padapter, 0x6B);
+ tempval &= ~BIT(7);
+ rtw_write8(padapter, 0x6B, tempval);
+ } else {
+ /* enable BT output isolation */
+ /* 0x6A[15] = 1 */
+ tempval = rtw_read8(padapter, 0x6B);
+ tempval |= BIT(7);
+ rtw_write8(padapter, 0x6B, tempval);
+
+ /* Attention!! Between 0x6A[14] and 0x6A[15] setting need 100us delay */
+ /* So don't wirte 0x6A[14]= 1 and 0x6A[15]= 0 together! */
+
+ /* disable BT power cut */
+ /* 0x6A[14] = 1 */
+ tempval = rtw_read8(padapter, 0x6B);
+ tempval &= ~BIT(6);
+ rtw_write8(padapter, 0x6B, tempval);
+ }
+
+}
+static void Hal_EfusePowerSwitch(
+ struct adapter *padapter, u8 bWrite, u8 PwrState
+)
+{
+ u8 tempval;
+ u16 tmpV16;
+
+
+ if (PwrState == true) {
+ /* To avoid cannot access efuse regsiters after disable/enable several times during DTM test. */
+ /* Suggested by SD1 IsaacHsu. 2013.07.08, added by tynli. */
+ tempval = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HSUS_CTRL);
+ if (tempval & BIT(0)) { /* SDIO local register is suspend */
+ u8 count = 0;
+
+
+ tempval &= ~BIT(0);
+ rtw_write8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HSUS_CTRL, tempval);
+
+ /* check 0x86[1:0]= 10'2h, wait power state to leave suspend */
+ do {
+ tempval = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HSUS_CTRL);
+ tempval &= 0x3;
+ if (tempval == 0x02)
+ break;
+
+ count++;
+ if (count >= 100)
+ break;
+
+ mdelay(10);
+ } while (1);
+
+ if (count >= 100) {
+ DBG_8192C(FUNC_ADPT_FMT ": Leave SDIO local register suspend fail! Local 0x86 =%#X\n",
+ FUNC_ADPT_ARG(padapter), tempval);
+ } else {
+ DBG_8192C(FUNC_ADPT_FMT ": Leave SDIO local register suspend OK! Local 0x86 =%#X\n",
+ FUNC_ADPT_ARG(padapter), tempval);
+ }
+ }
+
+ rtw_write8(padapter, REG_EFUSE_ACCESS_8723, EFUSE_ACCESS_ON_8723);
+
+ /* Reset: 0x0000h[28], default valid */
+ tmpV16 = rtw_read16(padapter, REG_SYS_FUNC_EN);
+ if (!(tmpV16 & FEN_ELDR)) {
+ tmpV16 |= FEN_ELDR;
+ rtw_write16(padapter, REG_SYS_FUNC_EN, tmpV16);
+ }
+
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
+ tmpV16 = rtw_read16(padapter, REG_SYS_CLKR);
+ if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
+ tmpV16 |= (LOADER_CLK_EN | ANA8M);
+ rtw_write16(padapter, REG_SYS_CLKR, tmpV16);
+ }
+
+ if (bWrite == true) {
+ /* Enable LDO 2.5V before read/write action */
+ tempval = rtw_read8(padapter, EFUSE_TEST+3);
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ rtw_write8(padapter, EFUSE_TEST+3, (tempval | 0x80));
+
+ /* rtw_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); */
+ }
+ } else {
+ rtw_write8(padapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+
+ if (bWrite == true) {
+ /* Disable LDO 2.5V after read/write action */
+ tempval = rtw_read8(padapter, EFUSE_TEST+3);
+ rtw_write8(padapter, EFUSE_TEST+3, (tempval & 0x7F));
+ }
+
+ }
+}
+
+static void hal_ReadEFuse_WiFi(
+ struct adapter *padapter,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+ bool bPseudoTest
+)
+{
+#ifdef HAL_EFUSE_MEMORY
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+#endif
+ u8 *efuseTbl = NULL;
+ u16 eFuse_Addr = 0;
+ u8 offset, wden;
+ u8 efuseHeader, efuseExtHdr, efuseData;
+ u16 i, total, used;
+ u8 efuse_usage = 0;
+
+ /* DBG_871X("YJ: ====>%s():_offset =%d _size_byte =%d bPseudoTest =%d\n", __func__, _offset, _size_byte, bPseudoTest); */
+ /* */
+ /* Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10. */
+ /* */
+ if ((_offset+_size_byte) > EFUSE_MAX_MAP_LEN) {
+ DBG_8192C("%s: Invalid offset(%#x) with read bytes(%#x)!!\n", __func__, _offset, _size_byte);
+ return;
+ }
+
+ efuseTbl = (u8 *)rtw_malloc(EFUSE_MAX_MAP_LEN);
+ if (efuseTbl == NULL) {
+ DBG_8192C("%s: alloc efuseTbl fail!\n", __func__);
+ return;
+ }
+ /* 0xff will be efuse default value instead of 0x00. */
+ memset(efuseTbl, 0xFF, EFUSE_MAX_MAP_LEN);
+
+
+#ifdef DEBUG
+if (0) {
+ for (i = 0; i < 256; i++)
+ efuse_OneByteRead(padapter, i, &efuseTbl[i], false);
+ DBG_871X("Efuse Content:\n");
+ for (i = 0; i < 256; i++) {
+ if (i % 16 == 0)
+ printk("\n");
+ printk("%02X ", efuseTbl[i]);
+ }
+ printk("\n");
+}
+#endif
+
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0, bPseudoTest);
+
+ while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseHeader, bPseudoTest);
+ if (efuseHeader == 0xFF) {
+ DBG_8192C("%s: data end at address =%#x\n", __func__, eFuse_Addr-1);
+ break;
+ }
+ /* DBG_8192C("%s: efuse[0x%X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseHeader); */
+
+ /* Check PG header for section num. */
+ if (EXT_HEADER(efuseHeader)) { /* extended header */
+ offset = GET_HDR_OFFSET_2_0(efuseHeader);
+ /* DBG_8192C("%s: extended header offset = 0x%X\n", __func__, offset); */
+
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseExtHdr, bPseudoTest);
+ /* DBG_8192C("%s: efuse[0x%X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseExtHdr); */
+ if (ALL_WORDS_DISABLED(efuseExtHdr))
+ continue;
+
+ offset |= ((efuseExtHdr & 0xF0) >> 1);
+ wden = (efuseExtHdr & 0x0F);
+ } else {
+ offset = ((efuseHeader >> 4) & 0x0f);
+ wden = (efuseHeader & 0x0f);
+ }
+ /* DBG_8192C("%s: Offset =%d Worden = 0x%X\n", __func__, offset, wden); */
+
+ if (offset < EFUSE_MAX_SECTION_8723B) {
+ u16 addr;
+ /* Get word enable value from PG header */
+/* DBG_8192C("%s: Offset =%d Worden = 0x%X\n", __func__, offset, wden); */
+
+ addr = offset * PGPKT_DATA_SIZE;
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wden & (0x01<<i))) {
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseData, bPseudoTest);
+/* DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseData); */
+ efuseTbl[addr] = efuseData;
+
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseData, bPseudoTest);
+/* DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseData); */
+ efuseTbl[addr+1] = efuseData;
+ }
+ addr += 2;
+ }
+ } else {
+ DBG_8192C(KERN_ERR "%s: offset(%d) is illegal!!\n", __func__, offset);
+ eFuse_Addr += Efuse_CalculateWordCnts(wden)*2;
+ }
+ }
+
+ /* Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+#ifdef DEBUG
+if (1) {
+ DBG_871X("Efuse Realmap:\n");
+ for (i = 0; i < _size_byte; i++) {
+ if (i % 16 == 0)
+ printk("\n");
+ printk("%02X ", pbuf[i]);
+ }
+ printk("\n");
+}
+#endif
+ /* Calculate Efuse utilization */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total, bPseudoTest);
+ used = eFuse_Addr - 1;
+ efuse_usage = (u8)((used*100)/total);
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ pEfuseHal->fakeEfuseUsedBytes = used;
+#else
+ fakeEfuseUsedBytes = used;
+#endif
+ } else {
+ rtw_hal_set_hwreg(padapter, HW_VAR_EFUSE_BYTES, (u8 *)&used);
+ rtw_hal_set_hwreg(padapter, HW_VAR_EFUSE_USAGE, (u8 *)&efuse_usage);
+ }
+
+ kfree(efuseTbl);
+}
+
+static void hal_ReadEFuse_BT(
+ struct adapter *padapter,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+ bool bPseudoTest
+)
+{
+#ifdef HAL_EFUSE_MEMORY
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+#endif
+ u8 *efuseTbl;
+ u8 bank;
+ u16 eFuse_Addr;
+ u8 efuseHeader, efuseExtHdr, efuseData;
+ u8 offset, wden;
+ u16 i, total, used;
+ u8 efuse_usage;
+
+
+ /* */
+ /* Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10. */
+ /* */
+ if ((_offset+_size_byte) > EFUSE_BT_MAP_LEN) {
+ DBG_8192C("%s: Invalid offset(%#x) with read bytes(%#x)!!\n", __func__, _offset, _size_byte);
+ return;
+ }
+
+ efuseTbl = rtw_malloc(EFUSE_BT_MAP_LEN);
+ if (efuseTbl == NULL) {
+ DBG_8192C("%s: efuseTbl malloc fail!\n", __func__);
+ return;
+ }
+ /* 0xff will be efuse default value instead of 0x00. */
+ memset(efuseTbl, 0xFF, EFUSE_BT_MAP_LEN);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_BANK, &total, bPseudoTest);
+
+ for (bank = 1; bank < 3; bank++) { /* 8723b Max bake 0~2 */
+ if (hal_EfuseSwitchToBank(padapter, bank, bPseudoTest) == false) {
+ DBG_8192C("%s: hal_EfuseSwitchToBank Fail!!\n", __func__);
+ goto exit;
+ }
+
+ eFuse_Addr = 0;
+
+ while (AVAILABLE_EFUSE_ADDR(eFuse_Addr)) {
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseHeader, bPseudoTest);
+ if (efuseHeader == 0xFF)
+ break;
+ DBG_8192C("%s: efuse[%#X]= 0x%02x (header)\n", __func__, (((bank-1)*EFUSE_REAL_CONTENT_LEN_8723B)+eFuse_Addr-1), efuseHeader);
+
+ /* Check PG header for section num. */
+ if (EXT_HEADER(efuseHeader)) { /* extended header */
+ offset = GET_HDR_OFFSET_2_0(efuseHeader);
+ DBG_8192C("%s: extended header offset_2_0 = 0x%X\n", __func__, offset);
+
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseExtHdr, bPseudoTest);
+ DBG_8192C("%s: efuse[%#X]= 0x%02x (ext header)\n", __func__, (((bank-1)*EFUSE_REAL_CONTENT_LEN_8723B)+eFuse_Addr-1), efuseExtHdr);
+ if (ALL_WORDS_DISABLED(efuseExtHdr))
+ continue;
+
+
+ offset |= ((efuseExtHdr & 0xF0) >> 1);
+ wden = (efuseExtHdr & 0x0F);
+ } else {
+ offset = ((efuseHeader >> 4) & 0x0f);
+ wden = (efuseHeader & 0x0f);
+ }
+
+ if (offset < EFUSE_BT_MAX_SECTION) {
+ u16 addr;
+
+ /* Get word enable value from PG header */
+ DBG_8192C("%s: Offset =%d Worden =%#X\n", __func__, offset, wden);
+
+ addr = offset * PGPKT_DATA_SIZE;
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wden & (0x01<<i))) {
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseData, bPseudoTest);
+ DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseData);
+ efuseTbl[addr] = efuseData;
+
+ efuse_OneByteRead(padapter, eFuse_Addr++, &efuseData, bPseudoTest);
+ DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, eFuse_Addr-1, efuseData);
+ efuseTbl[addr+1] = efuseData;
+ }
+ addr += 2;
+ }
+ } else {
+ DBG_8192C("%s: offset(%d) is illegal!!\n", __func__, offset);
+ eFuse_Addr += Efuse_CalculateWordCnts(wden)*2;
+ }
+ }
+
+ if ((eFuse_Addr-1) < total) {
+ DBG_8192C("%s: bank(%d) data end at %#x\n", __func__, bank, eFuse_Addr-1);
+ break;
+ }
+ }
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0, bPseudoTest);
+
+ /* Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+ /* */
+ /* Calculate Efuse utilization. */
+ /* */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &total, bPseudoTest);
+ used = (EFUSE_BT_REAL_BANK_CONTENT_LEN*(bank-1)) + eFuse_Addr - 1;
+ DBG_8192C("%s: bank(%d) data end at %#x , used =%d\n", __func__, bank, eFuse_Addr-1, used);
+ efuse_usage = (u8)((used*100)/total);
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ pEfuseHal->fakeBTEfuseUsedBytes = used;
+#else
+ fakeBTEfuseUsedBytes = used;
+#endif
+ } else {
+ rtw_hal_set_hwreg(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *)&used);
+ rtw_hal_set_hwreg(padapter, HW_VAR_EFUSE_BT_USAGE, (u8 *)&efuse_usage);
+ }
+
+exit:
+ kfree(efuseTbl);
+}
+
+static void Hal_ReadEFuse(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+ bool bPseudoTest
+)
+{
+ if (efuseType == EFUSE_WIFI)
+ hal_ReadEFuse_WiFi(padapter, _offset, _size_byte, pbuf, bPseudoTest);
+ else
+ hal_ReadEFuse_BT(padapter, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+static u16 hal_EfuseGetCurrentSize_WiFi(
+ struct adapter *padapter, bool bPseudoTest
+)
+{
+#ifdef HAL_EFUSE_MEMORY
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+#endif
+ u16 efuse_addr = 0;
+ u16 start_addr = 0; /* for debug */
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+ u32 count = 0; /* for debug */
+
+
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ efuse_addr = (u16)pEfuseHal->fakeEfuseUsedBytes;
+#else
+ efuse_addr = (u16)fakeEfuseUsedBytes;
+#endif
+ } else
+ rtw_hal_get_hwreg(padapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ start_addr = efuse_addr;
+ DBG_8192C("%s: start_efuse_addr = 0x%X\n", __func__, efuse_addr);
+
+ /* switch bank back to bank 0 for later BT and wifi use. */
+ hal_EfuseSwitchToBank(padapter, 0, bPseudoTest);
+
+ count = 0;
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest) == false) {
+ DBG_8192C(KERN_ERR "%s: efuse_OneByteRead Fail! addr = 0x%X !!\n", __func__, efuse_addr);
+ goto error;
+ }
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if ((start_addr != 0) && (efuse_addr == start_addr)) {
+ count++;
+ DBG_8192C(FUNC_ADPT_FMT ": [WARNING] efuse raw 0x%X = 0x%02X not 0xFF!!(%d times)\n",
+ FUNC_ADPT_ARG(padapter), efuse_addr, efuse_data, count);
+
+ efuse_data = 0xFF;
+ if (count < 4) {
+ /* try again! */
+
+ if (count > 2) {
+ /* try again form address 0 */
+ efuse_addr = 0;
+ start_addr = 0;
+ }
+
+ continue;
+ }
+
+ goto error;
+ }
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_addr++;
+ efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
+ if (ALL_WORDS_DISABLED(efuse_data))
+ continue;
+
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ efuse_addr += (word_cnts*2)+1;
+ }
+
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ pEfuseHal->fakeEfuseUsedBytes = efuse_addr;
+#else
+ fakeEfuseUsedBytes = efuse_addr;
+#endif
+ } else
+ rtw_hal_set_hwreg(padapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ goto exit;
+
+error:
+ /* report max size to prevent wirte efuse */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &efuse_addr, bPseudoTest);
+
+exit:
+ DBG_8192C("%s: CurrentSize =%d\n", __func__, efuse_addr);
+
+ return efuse_addr;
+}
+
+static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
+{
+#ifdef HAL_EFUSE_MEMORY
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+#endif
+ u16 btusedbytes;
+ u16 efuse_addr;
+ u8 bank, startBank;
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+ u16 retU2 = 0;
+
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ btusedbytes = pEfuseHal->fakeBTEfuseUsedBytes;
+#else
+ btusedbytes = fakeBTEfuseUsedBytes;
+#endif
+ } else
+ rtw_hal_get_hwreg(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *)&btusedbytes);
+
+ efuse_addr = (u16)((btusedbytes%EFUSE_BT_REAL_BANK_CONTENT_LEN));
+ startBank = (u8)(1+(btusedbytes/EFUSE_BT_REAL_BANK_CONTENT_LEN));
+
+ DBG_8192C("%s: start from bank =%d addr = 0x%X\n", __func__, startBank, efuse_addr);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_BANK, &retU2, bPseudoTest);
+
+ for (bank = startBank; bank < 3; bank++) {
+ if (hal_EfuseSwitchToBank(padapter, bank, bPseudoTest) == false) {
+ DBG_8192C(KERN_ERR "%s: switch bank(%d) Fail!!\n", __func__, bank);
+ /* bank = EFUSE_MAX_BANK; */
+ break;
+ }
+
+ /* only when bank is switched we have to reset the efuse_addr. */
+ if (bank != startBank)
+ efuse_addr = 0;
+#if 1
+
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest) == false) {
+ DBG_8192C(KERN_ERR "%s: efuse_OneByteRead Fail! addr = 0x%X !!\n", __func__, efuse_addr);
+ /* bank = EFUSE_MAX_BANK; */
+ break;
+ }
+ DBG_8192C("%s: efuse_OneByteRead ! addr = 0x%X !efuse_data = 0x%X! bank =%d\n", __func__, efuse_addr, efuse_data, bank);
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_addr++;
+ efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
+ DBG_8192C("%s: efuse_OneByteRead EXT_HEADER ! addr = 0x%X !efuse_data = 0x%X! bank =%d\n", __func__, efuse_addr, efuse_data, bank);
+
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ efuse_addr++;
+ continue;
+ }
+
+/* hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1); */
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+
+ DBG_8192C(FUNC_ADPT_FMT": Offset =%d Worden =%#X\n",
+ FUNC_ADPT_ARG(padapter), hoffset, hworden);
+
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr += (word_cnts*2)+1;
+ }
+#else
+ while (
+ bContinual &&
+ efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest) &&
+ AVAILABLE_EFUSE_ADDR(efuse_addr)
+ ) {
+ if (efuse_data != 0xFF) {
+ if ((efuse_data&0x1F) == 0x0F) { /* extended header */
+ hoffset = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
+ if ((efuse_data & 0x0F) == 0x0F) {
+ efuse_addr++;
+ continue;
+ } else {
+ hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ }
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ } else
+ bContinual = false;
+ }
+#endif
+
+
+ /* Check if we need to check next bank efuse */
+ if (efuse_addr < retU2)
+ break; /* don't need to check next bank. */
+ }
+
+ retU2 = ((bank-1)*EFUSE_BT_REAL_BANK_CONTENT_LEN)+efuse_addr;
+ if (bPseudoTest) {
+ pEfuseHal->fakeBTEfuseUsedBytes = retU2;
+ /* RT_DISP(FEEPROM, EFUSE_PG, ("Hal_EfuseGetCurrentSize_BT92C(), already use %u bytes\n", pEfuseHal->fakeBTEfuseUsedBytes)); */
+ } else {
+ pEfuseHal->BTEfuseUsedBytes = retU2;
+ /* RT_DISP(FEEPROM, EFUSE_PG, ("Hal_EfuseGetCurrentSize_BT92C(), already use %u bytes\n", pEfuseHal->BTEfuseUsedBytes)); */
+ }
+
+ DBG_8192C("%s: CurrentSize =%d\n", __func__, retU2);
+ return retU2;
+}
+
+static u16 Hal_EfuseGetCurrentSize(
+ struct adapter *padapter, u8 efuseType, bool bPseudoTest
+)
+{
+ u16 ret = 0;
+
+ if (efuseType == EFUSE_WIFI)
+ ret = hal_EfuseGetCurrentSize_WiFi(padapter, bPseudoTest);
+ else
+ ret = hal_EfuseGetCurrentSize_BT(padapter, bPseudoTest);
+
+ return ret;
+}
+
+static u8 Hal_EfuseWordEnableDataWrite(
+ struct adapter *padapter,
+ u16 efuse_addr,
+ u8 word_en,
+ u8 *data,
+ bool bPseudoTest
+)
+{
+ u16 tmpaddr = 0;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[PGPKT_DATA_SIZE];
+
+
+/* DBG_8192C("%s: efuse_addr =%#x word_en =%#x\n", __func__, efuse_addr, word_en); */
+ memset(tmpdata, 0xFF, PGPKT_DATA_SIZE);
+
+ if (!(word_en & BIT(0))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(padapter, start_addr++, data[0], bPseudoTest);
+ efuse_OneByteWrite(padapter, start_addr++, data[1], bPseudoTest);
+
+ efuse_OneByteRead(padapter, tmpaddr, &tmpdata[0], bPseudoTest);
+ efuse_OneByteRead(padapter, tmpaddr+1, &tmpdata[1], bPseudoTest);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1])) {
+ badworden &= (~BIT(0));
+ }
+ }
+ if (!(word_en & BIT(1))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(padapter, start_addr++, data[2], bPseudoTest);
+ efuse_OneByteWrite(padapter, start_addr++, data[3], bPseudoTest);
+
+ efuse_OneByteRead(padapter, tmpaddr, &tmpdata[2], bPseudoTest);
+ efuse_OneByteRead(padapter, tmpaddr+1, &tmpdata[3], bPseudoTest);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3])) {
+ badworden &= (~BIT(1));
+ }
+ }
+
+ if (!(word_en & BIT(2))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(padapter, start_addr++, data[4], bPseudoTest);
+ efuse_OneByteWrite(padapter, start_addr++, data[5], bPseudoTest);
+
+ efuse_OneByteRead(padapter, tmpaddr, &tmpdata[4], bPseudoTest);
+ efuse_OneByteRead(padapter, tmpaddr+1, &tmpdata[5], bPseudoTest);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5])) {
+ badworden &= (~BIT(2));
+ }
+ }
+
+ if (!(word_en & BIT(3))) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(padapter, start_addr++, data[6], bPseudoTest);
+ efuse_OneByteWrite(padapter, start_addr++, data[7], bPseudoTest);
+
+ efuse_OneByteRead(padapter, tmpaddr, &tmpdata[6], bPseudoTest);
+ efuse_OneByteRead(padapter, tmpaddr+1, &tmpdata[7], bPseudoTest);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7])) {
+ badworden &= (~BIT(3));
+ }
+ }
+
+ return badworden;
+}
+
+static s32 Hal_EfusePgPacketRead(
+ struct adapter *padapter,
+ u8 offset,
+ u8 *data,
+ bool bPseudoTest
+)
+{
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 i;
+ u8 max_section = 0;
+ s32 ret;
+
+
+ if (data == NULL)
+ return false;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, &max_section, bPseudoTest);
+ if (offset > max_section) {
+ DBG_8192C("%s: Packet offset(%d) is illegal(>%d)!\n", __func__, offset, max_section);
+ return false;
+ }
+
+ memset(data, 0xFF, PGPKT_DATA_SIZE);
+ ret = true;
+
+ /* */
+ /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
+ /* Skip dummy parts to prevent unexpected data read from Efuse. */
+ /* By pass right now. 2009.02.19. */
+ /* */
+ while (AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_OneByteRead(padapter, efuse_addr++, &efuse_data, bPseudoTest) == false) {
+ ret = false;
+ break;
+ }
+
+ if (efuse_data == 0xFF)
+ break;
+
+ if (EXT_HEADER(efuse_data)) {
+ hoffset = GET_HDR_OFFSET_2_0(efuse_data);
+ efuse_OneByteRead(padapter, efuse_addr++, &efuse_data, bPseudoTest);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ DBG_8192C("%s: Error!! All words disabled!\n", __func__);
+ continue;
+ }
+
+ hoffset |= ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+
+ if (hoffset == offset) {
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(hworden & (0x01<<i))) {
+ efuse_OneByteRead(padapter, efuse_addr++, &efuse_data, bPseudoTest);
+/* DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, efuse_addr+tmpidx, efuse_data); */
+ data[i*2] = efuse_data;
+
+ efuse_OneByteRead(padapter, efuse_addr++, &efuse_data, bPseudoTest);
+/* DBG_8192C("%s: efuse[%#X]= 0x%02X\n", __func__, efuse_addr+tmpidx, efuse_data); */
+ data[(i*2)+1] = efuse_data;
+ }
+ }
+ } else {
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ efuse_addr += word_cnts*2;
+ }
+ }
+
+ return ret;
+}
+
+static u8 hal_EfusePgCheckAvailableAddr(
+ struct adapter *padapter, u8 efuseType, u8 bPseudoTest
+)
+{
+ u16 max_available = 0;
+ u16 current_size;
+
+
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &max_available, bPseudoTest);
+/* DBG_8192C("%s: max_available =%d\n", __func__, max_available); */
+
+ current_size = Efuse_GetCurrentSize(padapter, efuseType, bPseudoTest);
+ if (current_size >= max_available) {
+ DBG_8192C("%s: Error!! current_size(%d)>max_available(%d)\n", __func__, current_size, max_available);
+ return false;
+ }
+ return true;
+}
+
+static void hal_EfuseConstructPGPkt(
+ u8 offset,
+ u8 word_en,
+ u8 *pData,
+ PPGPKT_STRUCT pTargetPkt
+)
+{
+ memset(pTargetPkt->data, 0xFF, PGPKT_DATA_SIZE);
+ pTargetPkt->offset = offset;
+ pTargetPkt->word_en = word_en;
+ efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+}
+
+static u8 hal_EfusePartialWriteCheck(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 *pAddr,
+ PPGPKT_STRUCT pTargetPkt,
+ u8 bPseudoTest
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PEFUSE_HAL pEfuseHal = &pHalData->EfuseHal;
+ u8 bRet = false;
+ u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
+ u8 efuse_data = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, &efuse_max_available_len, bPseudoTest);
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_EFUSE_CONTENT_LEN_BANK, &efuse_max, bPseudoTest);
+
+ if (efuseType == EFUSE_WIFI) {
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ startAddr = (u16)pEfuseHal->fakeEfuseUsedBytes;
+#else
+ startAddr = (u16)fakeEfuseUsedBytes;
+#endif
+ } else
+ rtw_hal_get_hwreg(padapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
+ } else {
+ if (bPseudoTest) {
+#ifdef HAL_EFUSE_MEMORY
+ startAddr = (u16)pEfuseHal->fakeBTEfuseUsedBytes;
+#else
+ startAddr = (u16)fakeBTEfuseUsedBytes;
+#endif
+ } else
+ rtw_hal_get_hwreg(padapter, HW_VAR_EFUSE_BT_BYTES, (u8 *)&startAddr);
+ }
+ startAddr %= efuse_max;
+ DBG_8192C("%s: startAddr =%#X\n", __func__, startAddr);
+
+ while (1) {
+ if (startAddr >= efuse_max_available_len) {
+ bRet = false;
+ DBG_8192C("%s: startAddr(%d) >= efuse_max_available_len(%d)\n", __func__, startAddr, efuse_max_available_len);
+ break;
+ }
+
+ if (efuse_OneByteRead(padapter, startAddr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
+#if 1
+ bRet = false;
+ DBG_8192C("%s: Something Wrong! last bytes(%#X = 0x%02X) is not 0xFF\n",
+ __func__, startAddr, efuse_data);
+ break;
+#else
+ if (EXT_HEADER(efuse_data)) {
+ cur_header = efuse_data;
+ startAddr++;
+ efuse_OneByteRead(padapter, startAddr, &efuse_data, bPseudoTest);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ DBG_8192C("%s: Error condition, all words disabled!", __func__);
+ bRet = false;
+ break;
+ } else {
+ curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ curPkt.word_en = efuse_data & 0x0F;
+ }
+ } else {
+ cur_header = efuse_data;
+ curPkt.offset = (cur_header>>4) & 0x0F;
+ curPkt.word_en = cur_header & 0x0F;
+ }
+
+ curPkt.word_cnts = Efuse_CalculateWordCnts(curPkt.word_en);
+ /* if same header is found but no data followed */
+ /* write some part of data followed by the header. */
+ if (
+ (curPkt.offset == pTargetPkt->offset) &&
+ (hal_EfuseCheckIfDatafollowed(padapter, curPkt.word_cnts, startAddr+1, bPseudoTest) == false) &&
+ wordEnMatched(pTargetPkt, &curPkt, &matched_wden) == true
+ ) {
+ DBG_8192C("%s: Need to partial write data by the previous wrote header\n", __func__);
+ /* Here to write partial data */
+ badworden = Efuse_WordEnableDataWrite(padapter, startAddr+1, matched_wden, pTargetPkt->data, bPseudoTest);
+ if (badworden != 0x0F) {
+ u32 PgWriteSuccess = 0;
+ /* if write fail on some words, write these bad words again */
+ if (efuseType == EFUSE_WIFI)
+ PgWriteSuccess = Efuse_PgPacketWrite(padapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+ else
+ PgWriteSuccess = Efuse_PgPacketWrite_BT(padapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+
+ if (!PgWriteSuccess) {
+ bRet = false; /* write fail, return */
+ break;
+ }
+ }
+ /* partial write ok, update the target packet for later use */
+ for (i = 0; i < 4; i++) {
+ if ((matched_wden & (0x1<<i)) == 0) { /* this word has been written */
+ pTargetPkt->word_en |= (0x1<<i); /* disable the word */
+ }
+ }
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+ }
+ /* read from next header */
+ startAddr = startAddr + (curPkt.word_cnts*2) + 1;
+#endif
+ } else {
+ /* not used header, 0xff */
+ *pAddr = startAddr;
+/* DBG_8192C("%s: Started from unused header offset =%d\n", __func__, startAddr)); */
+ bRet = true;
+ break;
+ }
+ }
+
+ return bRet;
+}
+
+static u8 hal_EfusePgPacketWrite1ByteHeader(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 *pAddr,
+ PPGPKT_STRUCT pTargetPkt,
+ u8 bPseudoTest
+)
+{
+ u8 pg_header = 0, tmp_header = 0;
+ u16 efuse_addr = *pAddr;
+ u8 repeatcnt = 0;
+
+
+/* DBG_8192C("%s\n", __func__); */
+ pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
+
+ do {
+ efuse_OneByteWrite(padapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(padapter, efuse_addr, &tmp_header, bPseudoTest);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8192C("%s: Repeat over limit for pg_header!!\n", __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) {
+ DBG_8192C(KERN_ERR "%s: PG Header Fail!!(pg = 0x%02X read = 0x%02X)\n", __func__, pg_header, tmp_header);
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+
+ return true;
+}
+
+static u8 hal_EfusePgPacketWrite2ByteHeader(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 *pAddr,
+ PPGPKT_STRUCT pTargetPkt,
+ u8 bPseudoTest
+)
+{
+ u16 efuse_addr, efuse_max_available_len = 0;
+ u8 pg_header = 0, tmp_header = 0;
+ u8 repeatcnt = 0;
+
+
+/* DBG_8192C("%s\n", __func__); */
+ EFUSE_GetEfuseDefinition(padapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, &efuse_max_available_len, bPseudoTest);
+
+ efuse_addr = *pAddr;
+ if (efuse_addr >= efuse_max_available_len) {
+ DBG_8192C("%s: addr(%d) over avaliable(%d)!!\n", __func__, efuse_addr, efuse_max_available_len);
+ return false;
+ }
+
+ pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
+/* DBG_8192C("%s: pg_header = 0x%x\n", __func__, pg_header); */
+
+ do {
+ efuse_OneByteWrite(padapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(padapter, efuse_addr, &tmp_header, bPseudoTest);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8192C("%s: Repeat over limit for pg_header!!\n", __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) {
+ DBG_8192C(KERN_ERR "%s: PG Header Fail!!(pg = 0x%02X read = 0x%02X)\n", __func__, pg_header, tmp_header);
+ return false;
+ }
+
+ /* to write ext_header */
+ efuse_addr++;
+ pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
+
+ do {
+ efuse_OneByteWrite(padapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(padapter, efuse_addr, &tmp_header, bPseudoTest);
+ if (tmp_header != 0xFF)
+ break;
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ DBG_8192C("%s: Repeat over limit for ext_header!!\n", __func__);
+ return false;
+ }
+ } while (1);
+
+ if (tmp_header != pg_header) { /* offset PG fail */
+ DBG_8192C(KERN_ERR "%s: PG EXT Header Fail!!(pg = 0x%02X read = 0x%02X)\n", __func__, pg_header, tmp_header);
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+
+ return true;
+}
+
+static u8 hal_EfusePgPacketWriteHeader(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 *pAddr,
+ PPGPKT_STRUCT pTargetPkt,
+ u8 bPseudoTest
+)
+{
+ u8 bRet = false;
+
+ if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
+ bRet = hal_EfusePgPacketWrite2ByteHeader(padapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+ else
+ bRet = hal_EfusePgPacketWrite1ByteHeader(padapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+
+ return bRet;
+}
+
+static u8 hal_EfusePgPacketWriteData(
+ struct adapter *padapter,
+ u8 efuseType,
+ u16 *pAddr,
+ PPGPKT_STRUCT pTargetPkt,
+ u8 bPseudoTest
+)
+{
+ u16 efuse_addr;
+ u8 badworden;
+
+
+ efuse_addr = *pAddr;
+ badworden = Efuse_WordEnableDataWrite(padapter, efuse_addr+1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
+ if (badworden != 0x0F) {
+ DBG_8192C("%s: Fail!!\n", __func__);
+ return false;
+ }
+
+/* DBG_8192C("%s: ok\n", __func__); */
+ return true;
+}
+
+static s32 Hal_EfusePgPacketWrite(
+ struct adapter *padapter,
+ u8 offset,
+ u8 word_en,
+ u8 *pData,
+ bool bPseudoTest
+)
+{
+ PGPKT_STRUCT targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_WIFI;
+
+ if (!hal_EfusePgCheckAvailableAddr(padapter, efuseType, bPseudoTest))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ return true;
+}
+
+static bool Hal_EfusePgPacketWrite_BT(
+ struct adapter *padapter,
+ u8 offset,
+ u8 word_en,
+ u8 *pData,
+ bool bPseudoTest
+)
+{
+ PGPKT_STRUCT targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_BT;
+
+ if (!hal_EfusePgCheckAvailableAddr(padapter, efuseType, bPseudoTest))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(padapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ return true;
+}
+
+static HAL_VERSION ReadChipVersion8723B(struct adapter *padapter)
+{
+ u32 value32;
+ HAL_VERSION ChipVersion;
+ struct hal_com_data *pHalData;
+
+/* YJ, TODO, move read chip type here */
+ pHalData = GET_HAL_DATA(padapter);
+
+ value32 = rtw_read32(padapter, REG_SYS_CFG);
+ ChipVersion.ICType = CHIP_8723B;
+ ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
+ ChipVersion.RFType = RF_TYPE_1T1R;
+ ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
+ ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
+
+ /* For regulator mode. by tynli. 2011.01.14 */
+ pHalData->RegulatorMode = ((value32 & SPS_SEL) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
+
+ value32 = rtw_read32(padapter, REG_GPIO_OUTSTS);
+ ChipVersion.ROMVer = ((value32 & RF_RL_ID) >> 20); /* ROM code version. */
+
+ /* For multi-function consideration. Added by Roger, 2010.10.06. */
+ pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
+ value32 = rtw_read32(padapter, REG_MULTI_FUNC_CTRL);
+ pHalData->MultiFunc |= ((value32 & WL_FUNC_EN) ? RT_MULTI_FUNC_WIFI : 0);
+ pHalData->MultiFunc |= ((value32 & BT_FUNC_EN) ? RT_MULTI_FUNC_BT : 0);
+ pHalData->MultiFunc |= ((value32 & GPS_FUNC_EN) ? RT_MULTI_FUNC_GPS : 0);
+ pHalData->PolarityCtl = ((value32 & WL_HWPDN_SL) ? RT_POLARITY_HIGH_ACT : RT_POLARITY_LOW_ACT);
+#if 1
+ dump_chip_info(ChipVersion);
+#endif
+ pHalData->VersionID = ChipVersion;
+ if (IS_1T2R(ChipVersion))
+ pHalData->rf_type = RF_1T2R;
+ else if (IS_2T2R(ChipVersion))
+ pHalData->rf_type = RF_2T2R;
+ else
+ pHalData->rf_type = RF_1T1R;
+
+ MSG_8192C("RF_Type is %x!!\n", pHalData->rf_type);
+
+ return ChipVersion;
+}
+
+static void rtl8723b_read_chip_version(struct adapter *padapter)
+{
+ ReadChipVersion8723B(padapter);
+}
+
+void rtl8723b_InitBeaconParameters(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u16 val16;
+ u8 val8;
+
+
+ val8 = DIS_TSF_UDT;
+ val16 = val8 | (val8 << 8); /* port0 and port1 */
+
+ /* Enable prot0 beacon function for PSTDMA */
+ val16 |= EN_BCN_FUNCTION;
+
+ rtw_write16(padapter, REG_BCN_CTRL, val16);
+
+ /* TODO: Remove these magic number */
+ rtw_write16(padapter, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+ /* Firmware will control REG_DRVERLYINT when power saving is enable, */
+ /* so don't set this register on STA mode. */
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE) == false)
+ rtw_write8(padapter, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME_8723B); /* 5ms */
+ rtw_write8(padapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME_8723B); /* 2ms */
+
+ /* Suggested by designer timchen. Change beacon AIFS to the largest number */
+ /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
+ rtw_write16(padapter, REG_BCNTCFG, 0x660F);
+
+ pHalData->RegBcnCtrlVal = rtw_read8(padapter, REG_BCN_CTRL);
+ pHalData->RegTxPause = rtw_read8(padapter, REG_TXPAUSE);
+ pHalData->RegFwHwTxQCtrl = rtw_read8(padapter, REG_FWHW_TXQ_CTRL+2);
+ pHalData->RegReg542 = rtw_read8(padapter, REG_TBTT_PROHIBIT+2);
+ pHalData->RegCR_1 = rtw_read8(padapter, REG_CR+1);
+}
+
+void _InitBurstPktLen_8723BS(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ rtw_write8(Adapter, 0x4c7, rtw_read8(Adapter, 0x4c7)|BIT(7)); /* enable single pkt ampdu */
+ rtw_write8(Adapter, REG_RX_PKT_LIMIT_8723B, 0x18); /* for VHT packet length 11K */
+ rtw_write8(Adapter, REG_MAX_AGGR_NUM_8723B, 0x1F);
+ rtw_write8(Adapter, REG_PIFS_8723B, 0x00);
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL_8723B, rtw_read8(Adapter, REG_FWHW_TXQ_CTRL)&(~BIT(7)));
+ if (pHalData->AMPDUBurstMode)
+ rtw_write8(Adapter, REG_AMPDU_BURST_MODE_8723B, 0x5F);
+ rtw_write8(Adapter, REG_AMPDU_MAX_TIME_8723B, 0x70);
+
+ /* ARFB table 9 for 11ac 5G 2SS */
+ rtw_write32(Adapter, REG_ARFR0_8723B, 0x00000010);
+ if (IS_NORMAL_CHIP(pHalData->VersionID))
+ rtw_write32(Adapter, REG_ARFR0_8723B+4, 0xfffff000);
+ else
+ rtw_write32(Adapter, REG_ARFR0_8723B+4, 0x3e0ff000);
+
+ /* ARFB table 10 for 11ac 5G 1SS */
+ rtw_write32(Adapter, REG_ARFR1_8723B, 0x00000010);
+ rtw_write32(Adapter, REG_ARFR1_8723B+4, 0x003ff000);
+}
+
+static void ResumeTxBeacon(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+ResumeTxBeacon\n"));
+
+ pHalData->RegFwHwTxQCtrl |= BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT+1, 0xff);
+ pHalData->RegReg542 |= BIT(0);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT+2, pHalData->RegReg542);
+}
+
+static void StopTxBeacon(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+StopTxBeacon\n"));
+
+ pHalData->RegFwHwTxQCtrl &= ~BIT(6);
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL+2, pHalData->RegFwHwTxQCtrl);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT+1, 0x64);
+ pHalData->RegReg542 &= ~BIT(0);
+ rtw_write8(padapter, REG_TBTT_PROHIBIT+2, pHalData->RegReg542);
+
+ CheckFwRsvdPageContent(padapter); /* 2010.06.23. Added by tynli. */
+}
+
+static void _BeaconFunctionEnable(struct adapter *padapter, u8 Enable, u8 Linked)
+{
+ rtw_write8(padapter, REG_BCN_CTRL, DIS_TSF_UDT | EN_BCN_FUNCTION | DIS_BCNQ_SUB);
+ rtw_write8(padapter, REG_RD_CTRL+1, 0x6F);
+}
+
+static void rtl8723b_SetBeaconRelatedRegisters(struct adapter *padapter)
+{
+ u8 val8;
+ u32 value32;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u32 bcn_ctrl_reg;
+
+ /* reset TSF, enable update TSF, correcting TSF On Beacon */
+
+ /* REG_BCN_INTERVAL */
+ /* REG_BCNDMATIM */
+ /* REG_ATIMWND */
+ /* REG_TBTT_PROHIBIT */
+ /* REG_DRVERLYINT */
+ /* REG_BCN_MAX_ERR */
+ /* REG_BCNTCFG (0x510) */
+ /* REG_DUAL_TSF_RST */
+ /* REG_BCN_CTRL (0x550) */
+
+
+ bcn_ctrl_reg = REG_BCN_CTRL;
+
+ /* */
+ /* ATIM window */
+ /* */
+ rtw_write16(padapter, REG_ATIMWND, 2);
+
+ /* */
+ /* Beacon interval (in unit of TU). */
+ /* */
+ rtw_write16(padapter, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
+
+ rtl8723b_InitBeaconParameters(padapter);
+
+ rtw_write8(padapter, REG_SLOT, 0x09);
+
+ /* */
+ /* Reset TSF Timer to zero, added by Roger. 2008.06.24 */
+ /* */
+ value32 = rtw_read32(padapter, REG_TCR);
+ value32 &= ~TSFRST;
+ rtw_write32(padapter, REG_TCR, value32);
+
+ value32 |= TSFRST;
+ rtw_write32(padapter, REG_TCR, value32);
+
+ /* NOTE: Fix test chip's bug (about contention windows's randomness) */
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE) == true) {
+ rtw_write8(padapter, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtw_write8(padapter, REG_RXTSF_OFFSET_OFDM, 0x50);
+ }
+
+ _BeaconFunctionEnable(padapter, true, true);
+
+ ResumeTxBeacon(padapter);
+ val8 = rtw_read8(padapter, bcn_ctrl_reg);
+ val8 |= DIS_BCNQ_SUB;
+ rtw_write8(padapter, bcn_ctrl_reg, val8);
+}
+
+static void rtl8723b_GetHalODMVar(
+ struct adapter *Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void *pValue1,
+ void *pValue2
+)
+{
+ GetHalODMVar(Adapter, eVariable, pValue1, pValue2);
+}
+
+static void rtl8723b_SetHalODMVar(
+ struct adapter *Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void *pValue1,
+ bool bSet
+)
+{
+ SetHalODMVar(Adapter, eVariable, pValue1, bSet);
+}
+
+static void hal_notch_filter_8723b(struct adapter *adapter, bool enable)
+{
+ if (enable) {
+ DBG_871X("Enable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) | BIT1);
+ } else {
+ DBG_871X("Disable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) & ~BIT1);
+ }
+}
+
+static void UpdateHalRAMask8723B(struct adapter *padapter, u32 mac_id, u8 rssi_level)
+{
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ struct sta_info *psta;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_871X("%s(): mac_id =%d rssi_level =%d\n", __func__, mac_id, rssi_level);
+
+ if (mac_id >= NUM_STA) /* CAM_SIZE */
+ return;
+
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL)
+ return;
+
+ shortGIrate = query_ra_short_GI(psta);
+
+ mask = psta->ra_mask;
+
+ rate_bitmap = 0xffffffff;
+ rate_bitmap = ODM_Get_Rate_Bitmap(&pHalData->odmpriv, mac_id, mask, rssi_level);
+ DBG_871X("%s => mac_id:%d, networkType:0x%02x, mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
+ __func__, mac_id, psta->wireless_mode, mask, rssi_level, rate_bitmap);
+
+ mask &= rate_bitmap;
+
+ rate_bitmap = rtw_btcoex_GetRaMask(padapter);
+ mask &= ~rate_bitmap;
+
+#ifdef CONFIG_CMCC_TEST
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11G) {
+ if (mac_id == 0) {
+ DBG_871X("CMCC_BT update raid entry, mask = 0x%x\n", mask);
+ mask &= 0xffffff00; /* disable CCK & <24M OFDM rate for 11G mode for CMCC */
+ DBG_871X("CMCC_BT update raid entry, mask = 0x%x\n", mask);
+ }
+ }
+#endif
+
+ if (pHalData->fw_ractrl == true) {
+ rtl8723b_set_FwMacIdConfig_cmd(padapter, mac_id, psta->raid, psta->bw_mode, shortGIrate, mask);
+ }
+
+ /* set correct initial date rate for each mac_id */
+ pdmpriv->INIDATA_RATE[mac_id] = psta->init_rate;
+ DBG_871X("%s(): mac_id =%d raid = 0x%x bw =%d mask = 0x%x init_rate = 0x%x\n", __func__, mac_id, psta->raid, psta->bw_mode, mask, psta->init_rate);
+}
+
+
+void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc)
+{
+ pHalFunc->free_hal_data = &rtl8723b_free_hal_data;
+
+ pHalFunc->dm_init = &rtl8723b_init_dm_priv;
+
+ pHalFunc->read_chip_version = &rtl8723b_read_chip_version;
+
+ pHalFunc->UpdateRAMaskHandler = &UpdateHalRAMask8723B;
+
+ pHalFunc->set_bwmode_handler = &PHY_SetBWMode8723B;
+ pHalFunc->set_channel_handler = &PHY_SwChnl8723B;
+ pHalFunc->set_chnl_bw_handler = &PHY_SetSwChnlBWMode8723B;
+
+ pHalFunc->set_tx_power_level_handler = &PHY_SetTxPowerLevel8723B;
+ pHalFunc->get_tx_power_level_handler = &PHY_GetTxPowerLevel8723B;
+
+ pHalFunc->hal_dm_watchdog = &rtl8723b_HalDmWatchDog;
+ pHalFunc->hal_dm_watchdog_in_lps = &rtl8723b_HalDmWatchDog_in_LPS;
+
+
+ pHalFunc->SetBeaconRelatedRegistersHandler = &rtl8723b_SetBeaconRelatedRegisters;
+
+ pHalFunc->Add_RateATid = &rtl8723b_Add_RateATid;
+
+ pHalFunc->run_thread = &rtl8723b_start_thread;
+ pHalFunc->cancel_thread = &rtl8723b_stop_thread;
+
+ pHalFunc->read_bbreg = &PHY_QueryBBReg_8723B;
+ pHalFunc->write_bbreg = &PHY_SetBBReg_8723B;
+ pHalFunc->read_rfreg = &PHY_QueryRFReg_8723B;
+ pHalFunc->write_rfreg = &PHY_SetRFReg_8723B;
+
+ /* Efuse related function */
+ pHalFunc->BTEfusePowerSwitch = &Hal_BT_EfusePowerSwitch;
+ pHalFunc->EfusePowerSwitch = &Hal_EfusePowerSwitch;
+ pHalFunc->ReadEFuse = &Hal_ReadEFuse;
+ pHalFunc->EFUSEGetEfuseDefinition = &Hal_GetEfuseDefinition;
+ pHalFunc->EfuseGetCurrentSize = &Hal_EfuseGetCurrentSize;
+ pHalFunc->Efuse_PgPacketRead = &Hal_EfusePgPacketRead;
+ pHalFunc->Efuse_PgPacketWrite = &Hal_EfusePgPacketWrite;
+ pHalFunc->Efuse_WordEnableDataWrite = &Hal_EfuseWordEnableDataWrite;
+ pHalFunc->Efuse_PgPacketWrite_BT = &Hal_EfusePgPacketWrite_BT;
+
+ pHalFunc->GetHalODMVarHandler = &rtl8723b_GetHalODMVar;
+ pHalFunc->SetHalODMVarHandler = &rtl8723b_SetHalODMVar;
+
+ pHalFunc->xmit_thread_handler = &hal_xmit_handler;
+ pHalFunc->hal_notch_filter = &hal_notch_filter_8723b;
+
+ pHalFunc->c2h_handler = c2h_handler_8723b;
+ pHalFunc->c2h_id_filter_ccx = c2h_id_filter_ccx_8723b;
+
+ pHalFunc->fill_h2c_cmd = &FillH2CCmd8723B;
+}
+
+void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ u8 val;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ val = rtw_read8(padapter, REG_LEDCFG2);
+ /* Let 8051 take control antenna settting */
+ val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
+ rtw_write8(padapter, REG_LEDCFG2, val);
+}
+
+void rtl8723b_init_default_value(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ struct dm_priv *pdmpriv;
+ u8 i;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ pdmpriv = &pHalData->dmpriv;
+
+ padapter->registrypriv.wireless_mode = WIRELESS_11BG_24N;
+
+ /* init default value */
+ pHalData->fw_ractrl = false;
+ pHalData->bIQKInitialized = false;
+ if (!adapter_to_pwrctl(padapter)->bkeepfwalive)
+ pHalData->LastHMEBoxNum = 0;
+
+ pHalData->bIQKInitialized = false;
+
+ /* init dm default value */
+ pdmpriv->TM_Trigger = 0;/* for IQK */
+/* pdmpriv->binitialized = false; */
+/* pdmpriv->prv_traffic_idx = 3; */
+/* pdmpriv->initialize = 0; */
+
+ pdmpriv->ThermalValue_HP_index = 0;
+ for (i = 0; i < HP_THERMAL_NUM; i++)
+ pdmpriv->ThermalValue_HP[i] = 0;
+
+ /* init Efuse variables */
+ pHalData->EfuseUsedBytes = 0;
+ pHalData->EfuseUsedPercentage = 0;
+#ifdef HAL_EFUSE_MEMORY
+ pHalData->EfuseHal.fakeEfuseBank = 0;
+ pHalData->EfuseHal.fakeEfuseUsedBytes = 0;
+ memset(pHalData->EfuseHal.fakeEfuseContent, 0xFF, EFUSE_MAX_HW_SIZE);
+ memset(pHalData->EfuseHal.fakeEfuseInitMap, 0xFF, EFUSE_MAX_MAP_LEN);
+ memset(pHalData->EfuseHal.fakeEfuseModifiedMap, 0xFF, EFUSE_MAX_MAP_LEN);
+ pHalData->EfuseHal.BTEfuseUsedBytes = 0;
+ pHalData->EfuseHal.BTEfuseUsedPercentage = 0;
+ memset(pHalData->EfuseHal.BTEfuseContent, 0xFF, EFUSE_MAX_BT_BANK*EFUSE_MAX_HW_SIZE);
+ memset(pHalData->EfuseHal.BTEfuseInitMap, 0xFF, EFUSE_BT_MAX_MAP_LEN);
+ memset(pHalData->EfuseHal.BTEfuseModifiedMap, 0xFF, EFUSE_BT_MAX_MAP_LEN);
+ pHalData->EfuseHal.fakeBTEfuseUsedBytes = 0;
+ memset(pHalData->EfuseHal.fakeBTEfuseContent, 0xFF, EFUSE_MAX_BT_BANK*EFUSE_MAX_HW_SIZE);
+ memset(pHalData->EfuseHal.fakeBTEfuseInitMap, 0xFF, EFUSE_BT_MAX_MAP_LEN);
+ memset(pHalData->EfuseHal.fakeBTEfuseModifiedMap, 0xFF, EFUSE_BT_MAX_MAP_LEN);
+#endif
+}
+
+u8 GetEEPROMSize8723B(struct adapter *padapter)
+{
+ u8 size = 0;
+ u32 cr;
+
+ cr = rtw_read16(padapter, REG_9346CR);
+ /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
+ size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
+
+ MSG_8192C("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+
+ return size;
+}
+
+/* */
+/* */
+/* LLT R/W/Init function */
+/* */
+/* */
+s32 rtl8723b_InitLLTTable(struct adapter *padapter)
+{
+ unsigned long start, passing_time;
+ u32 val32;
+ s32 ret;
+
+
+ ret = _FAIL;
+
+ val32 = rtw_read32(padapter, REG_AUTO_LLT);
+ val32 |= BIT_AUTO_INIT_LLT;
+ rtw_write32(padapter, REG_AUTO_LLT, val32);
+
+ start = jiffies;
+
+ do {
+ val32 = rtw_read32(padapter, REG_AUTO_LLT);
+ if (!(val32 & BIT_AUTO_INIT_LLT)) {
+ ret = _SUCCESS;
+ break;
+ }
+
+ passing_time = jiffies_to_msecs(jiffies - start);
+ if (passing_time > 1000) {
+ DBG_8192C(
+ "%s: FAIL!! REG_AUTO_LLT(0x%X) =%08x\n",
+ __func__,
+ REG_AUTO_LLT,
+ val32
+ );
+ break;
+ }
+
+ msleep(1);
+ } while (1);
+
+ return ret;
+}
+
+static bool Hal_GetChnlGroup8723B(u8 Channel, u8 *pGroup)
+{
+ bool bIn24G = true;
+
+ if (Channel <= 14) {
+ bIn24G = true;
+
+ if (1 <= Channel && Channel <= 2)
+ *pGroup = 0;
+ else if (3 <= Channel && Channel <= 5)
+ *pGroup = 1;
+ else if (6 <= Channel && Channel <= 8)
+ *pGroup = 2;
+ else if (9 <= Channel && Channel <= 11)
+ *pGroup = 3;
+ else if (12 <= Channel && Channel <= 14)
+ *pGroup = 4;
+ else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("==>Hal_GetChnlGroup8723B in 2.4 G, but Channel %d in Group not found\n", Channel));
+ }
+ } else {
+ bIn24G = false;
+
+ if (36 <= Channel && Channel <= 42)
+ *pGroup = 0;
+ else if (44 <= Channel && Channel <= 48)
+ *pGroup = 1;
+ else if (50 <= Channel && Channel <= 58)
+ *pGroup = 2;
+ else if (60 <= Channel && Channel <= 64)
+ *pGroup = 3;
+ else if (100 <= Channel && Channel <= 106)
+ *pGroup = 4;
+ else if (108 <= Channel && Channel <= 114)
+ *pGroup = 5;
+ else if (116 <= Channel && Channel <= 122)
+ *pGroup = 6;
+ else if (124 <= Channel && Channel <= 130)
+ *pGroup = 7;
+ else if (132 <= Channel && Channel <= 138)
+ *pGroup = 8;
+ else if (140 <= Channel && Channel <= 144)
+ *pGroup = 9;
+ else if (149 <= Channel && Channel <= 155)
+ *pGroup = 10;
+ else if (157 <= Channel && Channel <= 161)
+ *pGroup = 11;
+ else if (165 <= Channel && Channel <= 171)
+ *pGroup = 12;
+ else if (173 <= Channel && Channel <= 177)
+ *pGroup = 13;
+ else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("==>Hal_GetChnlGroup8723B in 5G, but Channel %d in Group not found\n", Channel));
+ }
+
+ }
+ RT_TRACE(
+ _module_hci_hal_init_c_,
+ _drv_info_,
+ (
+ "<==Hal_GetChnlGroup8723B, (%s) Channel = %d, Group =%d,\n",
+ bIn24G ? "2.4G" : "5G",
+ Channel,
+ *pGroup
+ )
+ );
+ return bIn24G;
+}
+
+void Hal_InitPGData(struct adapter *padapter, u8 *PROMContent)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (false == pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!pEEPROM->EepromOrEfuse) {
+ /* Read EFUSE real map to shadow. */
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ memcpy((void *)PROMContent, (void *)pEEPROM->efuse_eeprom_data, HWSET_MAX_SIZE_8723B);
+ }
+ } else {/* autoload fail */
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("AutoLoad Fail reported from CR9346!!\n"));
+ if (false == pEEPROM->EepromOrEfuse)
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ memcpy((void *)PROMContent, (void *)pEEPROM->efuse_eeprom_data, HWSET_MAX_SIZE_8723B);
+ }
+}
+
+void Hal_EfuseParseIDCode(struct adapter *padapter, u8 *hwinfo)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+/* struct hal_com_data *pHalData = GET_HAL_DATA(padapter); */
+ u16 EEPROMId;
+
+
+ /* Checl 0x8129 again for making sure autoload status!! */
+ EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
+ if (EEPROMId != RTL_EEPROM_ID) {
+ DBG_8192C("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
+ pEEPROM->bautoload_fail_flag = true;
+ } else
+ pEEPROM->bautoload_fail_flag = false;
+
+ RT_TRACE(_module_hal_init_c_, _drv_notice_, ("EEPROM ID = 0x%04x\n", EEPROMId));
+}
+
+static void Hal_ReadPowerValueFromPROM_8723B(
+ struct adapter *Adapter,
+ struct TxPowerInfo24G *pwrInfo24G,
+ u8 *PROMContent,
+ bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u32 rfPath, eeAddr = EEPROM_TX_PWR_INX_8723B, group, TxCount = 0;
+
+ memset(pwrInfo24G, 0, sizeof(struct TxPowerInfo24G));
+
+ if (0xFF == PROMContent[eeAddr+1])
+ AutoLoadFail = true;
+
+ if (AutoLoadFail) {
+ DBG_871X("%s(): Use Default value!\n", __func__);
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW20_Diff[rfPath][0] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][0] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ }
+ }
+ }
+
+ return;
+ }
+
+ pHalData->bTXPowerDataReadFromEEPORM = true; /* YJ, move, 120316 */
+
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+
+ for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) {
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = 0;
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = 0;
+ eeAddr++;
+ } else {
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ else {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF)
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ else {
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+ }
+}
+
+
+void Hal_EfuseParseTxPowerInfo_8723B(
+ struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct TxPowerInfo24G pwrInfo24G;
+ u8 rfPath, ch, TxCount = 1;
+
+ Hal_ReadPowerValueFromPROM_8723B(padapter, &pwrInfo24G, PROMContent, AutoLoadFail);
+ for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) {
+ for (ch = 0 ; ch < CHANNEL_MAX_NUMBER; ch++) {
+ u8 group = 0;
+
+ Hal_GetChnlGroup8723B(ch+1, &group);
+
+ if (ch == 14-1) {
+ pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][5];
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ } else {
+ pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ }
+#ifdef DEBUG
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("======= Path %d, ChannelIndex %d, Group %d =======\n", rfPath, ch, group));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_CCK_Base[rfPath][ch]));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_BW40_Base[rfPath][ch]));
+#endif
+ }
+
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
+ pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
+ pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+
+#ifdef DEBUG
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("--------------------------------------- 2.4G ---------------------------------------\n"));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("CCK_24G_Diff[%d][%d]= %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("OFDM_24G_Diff[%d][%d]= %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("BW20_24G_Diff[%d][%d]= %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]));
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("BW40_24G_Diff[%d][%d]= %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]));
+#endif
+ }
+ }
+
+ /* 2010/10/19 MH Add Regulator recognize for CU. */
+ if (!AutoLoadFail) {
+ pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_8723B]&0x7); /* bit0~2 */
+ if (PROMContent[EEPROM_RF_BOARD_OPTION_8723B] == 0xFF)
+ pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION&0x7); /* bit0~2 */
+ } else
+ pHalData->EEPROMRegulatory = 0;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory));
+}
+
+void Hal_EfuseParseBTCoexistInfo_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 tempval;
+ u32 tmpu4;
+
+ if (!AutoLoadFail) {
+ tmpu4 = rtw_read32(padapter, REG_MULTI_FUNC_CTRL);
+ if (tmpu4 & BT_FUNC_EN)
+ pHalData->EEPROMBluetoothCoexist = true;
+ else
+ pHalData->EEPROMBluetoothCoexist = false;
+
+ pHalData->EEPROMBluetoothType = BT_RTL8723B;
+
+ tempval = hwinfo[EEPROM_RF_BT_SETTING_8723B];
+ if (tempval != 0xFF) {
+ pHalData->EEPROMBluetoothAntNum = tempval & BIT(0);
+ /* EFUSE_0xC3[6] == 0, S1(Main)-ODM_RF_PATH_A; */
+ /* EFUSE_0xC3[6] == 1, S0(Aux)-ODM_RF_PATH_B */
+ pHalData->ant_path = (tempval & BIT(6))?ODM_RF_PATH_B:ODM_RF_PATH_A;
+ } else {
+ pHalData->EEPROMBluetoothAntNum = Ant_x1;
+ if (pHalData->PackageType == PACKAGE_QFN68)
+ pHalData->ant_path = ODM_RF_PATH_B;
+ else
+ pHalData->ant_path = ODM_RF_PATH_A;
+ }
+ } else {
+ pHalData->EEPROMBluetoothCoexist = false;
+ pHalData->EEPROMBluetoothType = BT_RTL8723B;
+ pHalData->EEPROMBluetoothAntNum = Ant_x1;
+ pHalData->ant_path = ODM_RF_PATH_A;
+ }
+
+ if (padapter->registrypriv.ant_num > 0) {
+ DBG_8192C(
+ "%s: Apply driver defined antenna number(%d) to replace origin(%d)\n",
+ __func__,
+ padapter->registrypriv.ant_num,
+ pHalData->EEPROMBluetoothAntNum == Ant_x2 ? 2 : 1
+ );
+
+ switch (padapter->registrypriv.ant_num) {
+ case 1:
+ pHalData->EEPROMBluetoothAntNum = Ant_x1;
+ break;
+ case 2:
+ pHalData->EEPROMBluetoothAntNum = Ant_x2;
+ break;
+ default:
+ DBG_8192C(
+ "%s: Discard invalid driver defined antenna number(%d)!\n",
+ __func__,
+ padapter->registrypriv.ant_num
+ );
+ break;
+ }
+ }
+
+ rtw_btcoex_SetBTCoexist(padapter, pHalData->EEPROMBluetoothCoexist);
+ rtw_btcoex_SetChipType(padapter, pHalData->EEPROMBluetoothType);
+ rtw_btcoex_SetPGAntNum(padapter, pHalData->EEPROMBluetoothAntNum == Ant_x2 ? 2 : 1);
+ if (pHalData->EEPROMBluetoothAntNum == Ant_x1)
+ rtw_btcoex_SetSingleAntPath(padapter, pHalData->ant_path);
+
+ DBG_8192C(
+ "%s: %s BT-coex, ant_num =%d\n",
+ __func__,
+ pHalData->EEPROMBluetoothCoexist == true ? "Enable" : "Disable",
+ pHalData->EEPROMBluetoothAntNum == Ant_x2 ? 2 : 1
+ );
+}
+
+void Hal_EfuseParseEEPROMVer_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+/* RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("%s(): AutoLoadFail = %d\n", __func__, AutoLoadFail)); */
+ if (!AutoLoadFail)
+ pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_8723B];
+ else
+ pHalData->EEPROMVersion = 1;
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
+ pHalData->EEPROMVersion));
+}
+
+
+
+void Hal_EfuseParsePackageType_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 package;
+ u8 efuseContent;
+
+ Efuse_PowerSwitch(padapter, false, true);
+ efuse_OneByteRead(padapter, 0x1FB, &efuseContent, false);
+ DBG_871X("%s phy efuse read 0x1FB =%x\n", __func__, efuseContent);
+ Efuse_PowerSwitch(padapter, false, false);
+
+ package = efuseContent & 0x7;
+ switch (package) {
+ case 0x4:
+ pHalData->PackageType = PACKAGE_TFBGA79;
+ break;
+ case 0x5:
+ pHalData->PackageType = PACKAGE_TFBGA90;
+ break;
+ case 0x6:
+ pHalData->PackageType = PACKAGE_QFN68;
+ break;
+ case 0x7:
+ pHalData->PackageType = PACKAGE_TFBGA80;
+ break;
+
+ default:
+ pHalData->PackageType = PACKAGE_DEFAULT;
+ break;
+ }
+
+ DBG_871X("PackageType = 0x%X\n", pHalData->PackageType);
+}
+
+
+void Hal_EfuseParseVoltage_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ /* memcpy(pEEPROM->adjuseVoltageVal, &hwinfo[EEPROM_Voltage_ADDR_8723B], 1); */
+ DBG_871X("%s hwinfo[EEPROM_Voltage_ADDR_8723B] =%02x\n", __func__, hwinfo[EEPROM_Voltage_ADDR_8723B]);
+ pEEPROM->adjuseVoltageVal = (hwinfo[EEPROM_Voltage_ADDR_8723B] & 0xf0) >> 4;
+ DBG_871X("%s pEEPROM->adjuseVoltageVal =%x\n", __func__, pEEPROM->adjuseVoltageVal);
+}
+
+void Hal_EfuseParseChnlPlan_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ padapter->mlmepriv.ChannelPlan = hal_com_config_channel_plan(
+ padapter,
+ hwinfo ? hwinfo[EEPROM_ChannelPlan_8723B] : 0xFF,
+ padapter->registrypriv.channel_plan,
+ RT_CHANNEL_DOMAIN_WORLD_NULL,
+ AutoLoadFail
+ );
+
+ Hal_ChannelPlanToRegulation(padapter, padapter->mlmepriv.ChannelPlan);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("EEPROM ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan));
+}
+
+void Hal_EfuseParseCustomerID_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+/* RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("%s(): AutoLoadFail = %d\n", __func__, AutoLoadFail)); */
+ if (!AutoLoadFail)
+ pHalData->EEPROMCustomerID = hwinfo[EEPROM_CustomID_8723B];
+ else
+ pHalData->EEPROMCustomerID = 0;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID));
+}
+
+void Hal_EfuseParseAntennaDiversity_8723B(
+ struct adapter *padapter,
+ u8 *hwinfo,
+ bool AutoLoadFail
+)
+{
+}
+
+void Hal_EfuseParseXtal_8723B(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+/* RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("%s(): AutoLoadFail = %d\n", __func__, AutoLoadFail)); */
+ if (!AutoLoadFail) {
+ pHalData->CrystalCap = hwinfo[EEPROM_XTAL_8723B];
+ if (pHalData->CrystalCap == 0xFF)
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723B; /* what value should 8812 set? */
+ } else
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_8723B;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("EEPROM CrystalCap: 0x%2x\n", pHalData->CrystalCap));
+}
+
+
+void Hal_EfuseParseThermalMeter_8723B(
+ struct adapter *padapter, u8 *PROMContent, u8 AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+/* RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("%s(): AutoLoadFail = %d\n", __func__, AutoLoadFail)); */
+ /* */
+ /* ThermalMeter from EEPROM */
+ /* */
+ if (false == AutoLoadFail)
+ pHalData->EEPROMThermalMeter = PROMContent[EEPROM_THERMAL_METER_8723B];
+ else
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_8723B;
+
+ if ((pHalData->EEPROMThermalMeter == 0xff) || (true == AutoLoadFail)) {
+ pHalData->bAPKThermalMeterIgnore = true;
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_8723B;
+ }
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("EEPROM ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter));
+}
+
+
+void Hal_ReadRFGainOffset(
+ struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail
+)
+{
+ /* */
+ /* BB_RF Gain Offset from EEPROM */
+ /* */
+
+ if (!AutoloadFail) {
+ Adapter->eeprompriv.EEPROMRFGainOffset = PROMContent[EEPROM_RF_GAIN_OFFSET];
+ DBG_871X("AutoloadFail =%x,\n", AutoloadFail);
+ Adapter->eeprompriv.EEPROMRFGainVal = EFUSE_Read1Byte(Adapter, EEPROM_RF_GAIN_VAL);
+ DBG_871X("Adapter->eeprompriv.EEPROMRFGainVal =%x\n", Adapter->eeprompriv.EEPROMRFGainVal);
+ } else {
+ Adapter->eeprompriv.EEPROMRFGainOffset = 0;
+ Adapter->eeprompriv.EEPROMRFGainVal = 0xFF;
+ DBG_871X("else AutoloadFail =%x,\n", AutoloadFail);
+ }
+ DBG_871X("EEPRORFGainOffset = 0x%02x\n", Adapter->eeprompriv.EEPROMRFGainOffset);
+}
+
+u8 BWMapping_8723B(struct adapter *Adapter, struct pkt_attrib *pattrib)
+{
+ u8 BWSettingOfDesc = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ /* DBG_871X("BWMapping pHalData->CurrentChannelBW %d, pattrib->bwmode %d\n", pHalData->CurrentChannelBW, pattrib->bwmode); */
+
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
+ if (pattrib->bwmode == CHANNEL_WIDTH_80)
+ BWSettingOfDesc = 2;
+ else if (pattrib->bwmode == CHANNEL_WIDTH_40)
+ BWSettingOfDesc = 1;
+ else
+ BWSettingOfDesc = 0;
+ } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ if ((pattrib->bwmode == CHANNEL_WIDTH_40) || (pattrib->bwmode == CHANNEL_WIDTH_80))
+ BWSettingOfDesc = 1;
+ else
+ BWSettingOfDesc = 0;
+ } else
+ BWSettingOfDesc = 0;
+
+ /* if (pTcb->bBTTxPacket) */
+ /* BWSettingOfDesc = 0; */
+
+ return BWSettingOfDesc;
+}
+
+u8 SCMapping_8723B(struct adapter *Adapter, struct pkt_attrib *pattrib)
+{
+ u8 SCSettingOfDesc = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ /* DBG_871X("SCMapping: pHalData->CurrentChannelBW %d, pHalData->nCur80MhzPrimeSC %d, pHalData->nCur40MhzPrimeSC %d\n", pHalData->CurrentChannelBW, pHalData->nCur80MhzPrimeSC, pHalData->nCur40MhzPrimeSC); */
+
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
+ if (pattrib->bwmode == CHANNEL_WIDTH_80) {
+ SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ } else if (pattrib->bwmode == CHANNEL_WIDTH_40) {
+ if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
+ SCSettingOfDesc = VHT_DATA_SC_40_LOWER_OF_80MHZ;
+ else if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
+ SCSettingOfDesc = VHT_DATA_SC_40_UPPER_OF_80MHZ;
+ else
+ DBG_871X("SCMapping: Not Correct Primary40MHz Setting\n");
+ } else {
+ if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER))
+ SCSettingOfDesc = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
+ else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER))
+ SCSettingOfDesc = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER))
+ SCSettingOfDesc = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if ((pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) && (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER))
+ SCSettingOfDesc = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
+ else
+ DBG_871X("SCMapping: Not Correct Primary40MHz Setting\n");
+ }
+ } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ /* DBG_871X("SCMapping: HT Case: pHalData->CurrentChannelBW %d, pHalData->nCur40MhzPrimeSC %d\n", pHalData->CurrentChannelBW, pHalData->nCur40MhzPrimeSC); */
+
+ if (pattrib->bwmode == CHANNEL_WIDTH_40) {
+ SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ } else if (pattrib->bwmode == CHANNEL_WIDTH_20) {
+ if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) {
+ SCSettingOfDesc = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ } else if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) {
+ SCSettingOfDesc = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ } else {
+ SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ }
+ }
+ } else {
+ SCSettingOfDesc = VHT_DATA_SC_DONOT_CARE;
+ }
+
+ return SCSettingOfDesc;
+}
+
+static void rtl8723b_cal_txdesc_chksum(struct tx_desc *ptxdesc)
+{
+ u16 *usPtr = (u16 *)ptxdesc;
+ u32 count;
+ u32 index;
+ u16 checksum = 0;
+
+
+ /* Clear first */
+ ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
+
+ /* checksume is always calculated by first 32 bytes, */
+ /* and it doesn't depend on TX DESC length. */
+ /* Thomas, Lucas@SD4, 20130515 */
+ count = 16;
+
+ for (index = 0; index < count; index++) {
+ checksum |= le16_to_cpu(*(__le16 *)(usPtr + index));
+ }
+
+ ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
+}
+
+static u8 fill_txdesc_sectype(struct pkt_attrib *pattrib)
+{
+ u8 sectype = 0;
+ if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
+ switch (pattrib->encrypt) {
+ /* SEC_TYPE */
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ sectype = 1;
+ break;
+
+ case _AES_:
+ sectype = 3;
+ break;
+
+ case _NO_PRIVACY_:
+ default:
+ break;
+ }
+ }
+ return sectype;
+}
+
+static void fill_txdesc_vcs_8723b(struct adapter *padapter, struct pkt_attrib *pattrib, PTXDESC_8723B ptxdesc)
+{
+ /* DBG_8192C("cvs_mode =%d\n", pattrib->vcs_mode); */
+
+ if (pattrib->vcs_mode) {
+ switch (pattrib->vcs_mode) {
+ case RTS_CTS:
+ ptxdesc->rtsen = 1;
+ /* ENABLE HW RTS */
+ ptxdesc->hw_rts_en = 1;
+ break;
+
+ case CTS_TO_SELF:
+ ptxdesc->cts2self = 1;
+ break;
+
+ case NONE_VCS:
+ default:
+ break;
+ }
+
+ ptxdesc->rtsrate = 8; /* RTS Rate =24M */
+ ptxdesc->rts_ratefb_lmt = 0xF;
+
+ if (padapter->mlmeextpriv.mlmext_info.preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->rts_short = 1;
+
+ /* Set RTS BW */
+ if (pattrib->ht_en)
+ ptxdesc->rts_sc = SCMapping_8723B(padapter, pattrib);
+ }
+}
+
+static void fill_txdesc_phy_8723b(struct adapter *padapter, struct pkt_attrib *pattrib, PTXDESC_8723B ptxdesc)
+{
+ /* DBG_8192C("bwmode =%d, ch_off =%d\n", pattrib->bwmode, pattrib->ch_offset); */
+
+ if (pattrib->ht_en) {
+ ptxdesc->data_bw = BWMapping_8723B(padapter, pattrib);
+
+ ptxdesc->data_sc = SCMapping_8723B(padapter, pattrib);
+ }
+}
+
+static void rtl8723b_fill_default_txdesc(
+ struct xmit_frame *pxmitframe, u8 *pbuf
+)
+{
+ struct adapter *padapter;
+ struct hal_com_data *pHalData;
+ struct dm_priv *pdmpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ struct pkt_attrib *pattrib;
+ PTXDESC_8723B ptxdesc;
+ s32 bmcst;
+
+ memset(pbuf, 0, TXDESC_SIZE);
+
+ padapter = pxmitframe->padapter;
+ pHalData = GET_HAL_DATA(padapter);
+ pdmpriv = &pHalData->dmpriv;
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pattrib = &pxmitframe->attrib;
+ bmcst = IS_MCAST(pattrib->ra);
+
+ ptxdesc = (PTXDESC_8723B)pbuf;
+
+ if (pxmitframe->frame_tag == DATA_FRAMETAG) {
+ u8 drv_userate = 0;
+
+ ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
+ ptxdesc->rate_id = pattrib->raid;
+ ptxdesc->qsel = pattrib->qsel;
+ ptxdesc->seq = pattrib->seqnum;
+
+ ptxdesc->sectype = fill_txdesc_sectype(pattrib);
+ fill_txdesc_vcs_8723b(padapter, pattrib, ptxdesc);
+
+ if (pattrib->icmp_pkt == 1 && padapter->registrypriv.wifi_spec == 1)
+ drv_userate = 1;
+
+ if (
+ (pattrib->ether_type != 0x888e) &&
+ (pattrib->ether_type != 0x0806) &&
+ (pattrib->ether_type != 0x88B4) &&
+ (pattrib->dhcp_pkt != 1) &&
+ (drv_userate != 1)
+#ifdef CONFIG_AUTO_AP_MODE
+ && (pattrib->pctrl != true)
+#endif
+ ) {
+ /* Non EAP & ARP & DHCP type data packet */
+
+ if (pattrib->ampdu_en == true) {
+ ptxdesc->agg_en = 1; /* AGG EN */
+ ptxdesc->max_agg_num = 0x1f;
+ ptxdesc->ampdu_density = pattrib->ampdu_spacing;
+ } else
+ ptxdesc->bk = 1; /* AGG BK */
+
+ fill_txdesc_phy_8723b(padapter, pattrib, ptxdesc);
+
+ ptxdesc->data_ratefb_lmt = 0x1F;
+
+ if (pHalData->fw_ractrl == false) {
+ ptxdesc->userate = 1;
+
+ if (pHalData->dmpriv.INIDATA_RATE[pattrib->mac_id] & BIT(7))
+ ptxdesc->data_short = 1;
+
+ ptxdesc->datarate = pHalData->dmpriv.INIDATA_RATE[pattrib->mac_id] & 0x7F;
+ }
+
+ if (padapter->fix_rate != 0xFF) { /* modify data rate by iwpriv */
+ ptxdesc->userate = 1;
+ if (padapter->fix_rate & BIT(7))
+ ptxdesc->data_short = 1;
+
+ ptxdesc->datarate = (padapter->fix_rate & 0x7F);
+ ptxdesc->disdatafb = 1;
+ }
+
+ if (pattrib->ldpc)
+ ptxdesc->data_ldpc = 1;
+ if (pattrib->stbc)
+ ptxdesc->data_stbc = 1;
+
+#ifdef CONFIG_CMCC_TEST
+ ptxdesc->data_short = 1; /* use cck short premble */
+#endif
+ } else {
+ /* EAP data packet and ARP packet. */
+ /* Use the 1M data rate to send the EAP/ARP packet. */
+ /* This will maybe make the handshake smooth. */
+
+ ptxdesc->bk = 1; /* AGG BK */
+ ptxdesc->userate = 1; /* driver uses rate */
+ if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->data_short = 1;/* DATA_SHORT */
+ ptxdesc->datarate = MRateToHwRate(pmlmeext->tx_rate);
+ DBG_871X("YJ: %s(): ARP Data: userate =%d, datarate = 0x%x\n", __func__, ptxdesc->userate, ptxdesc->datarate);
+ }
+
+ ptxdesc->usb_txagg_num = pxmitframe->agg_num;
+ } else if (pxmitframe->frame_tag == MGNT_FRAMETAG) {
+/* RT_TRACE(_module_hal_xmit_c_, _drv_notice_, ("%s: MGNT_FRAMETAG\n", __func__)); */
+
+ ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
+ ptxdesc->qsel = pattrib->qsel;
+ ptxdesc->rate_id = pattrib->raid; /* Rate ID */
+ ptxdesc->seq = pattrib->seqnum;
+ ptxdesc->userate = 1; /* driver uses rate, 1M */
+
+ ptxdesc->mbssid = pattrib->mbssid & 0xF;
+
+ ptxdesc->rty_lmt_en = 1; /* retry limit enable */
+ if (pattrib->retry_ctrl == true) {
+ ptxdesc->data_rt_lmt = 6;
+ } else {
+ ptxdesc->data_rt_lmt = 12;
+ }
+
+ ptxdesc->datarate = MRateToHwRate(pmlmeext->tx_rate);
+
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ if (pxmitframe->ack_report) {
+ #ifdef DBG_CCX
+ DBG_8192C("%s set spe_rpt\n", __func__);
+ #endif
+ ptxdesc->spe_rpt = 1;
+ ptxdesc->sw_define = (u8)(GET_PRIMARY_ADAPTER(padapter)->xmitpriv.seq_no);
+ }
+ } else if (pxmitframe->frame_tag == TXAGG_FRAMETAG) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_warning_, ("%s: TXAGG_FRAMETAG\n", __func__));
+ } else {
+ RT_TRACE(_module_hal_xmit_c_, _drv_warning_, ("%s: frame_tag = 0x%x\n", __func__, pxmitframe->frame_tag));
+
+ ptxdesc->macid = pattrib->mac_id; /* CAM_ID(MAC_ID) */
+ ptxdesc->rate_id = pattrib->raid; /* Rate ID */
+ ptxdesc->qsel = pattrib->qsel;
+ ptxdesc->seq = pattrib->seqnum;
+ ptxdesc->userate = 1; /* driver uses rate */
+ ptxdesc->datarate = MRateToHwRate(pmlmeext->tx_rate);
+ }
+
+ ptxdesc->pktlen = pattrib->last_txcmdsz;
+ ptxdesc->offset = TXDESC_SIZE + OFFSET_SZ;
+
+ if (bmcst)
+ ptxdesc->bmc = 1;
+
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
+ /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
+ /* mgnt frame should be controled by Hw because Fw will also send null data */
+ /* which we cannot control when Fw LPS enable. */
+ /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
+ /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
+ /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
+ /* 2010.06.23. Added by tynli. */
+ if (!pattrib->qos_en) /* Hw set sequence number */
+ ptxdesc->en_hwseq = 1; /* HWSEQ_EN */
+}
+
+/*
+ *Description:
+ *
+ *Parameters:
+ * pxmitframe xmitframe
+ * pbuf where to fill tx desc
+ */
+void rtl8723b_update_txdesc(struct xmit_frame *pxmitframe, u8 *pbuf)
+{
+ struct tx_desc *pdesc;
+
+ rtl8723b_fill_default_txdesc(pxmitframe, pbuf);
+
+ pdesc = (struct tx_desc *)pbuf;
+ pdesc->txdw0 = pdesc->txdw0;
+ pdesc->txdw1 = pdesc->txdw1;
+ pdesc->txdw2 = pdesc->txdw2;
+ pdesc->txdw3 = pdesc->txdw3;
+ pdesc->txdw4 = pdesc->txdw4;
+ pdesc->txdw5 = pdesc->txdw5;
+ pdesc->txdw6 = pdesc->txdw6;
+ pdesc->txdw7 = pdesc->txdw7;
+ pdesc->txdw8 = pdesc->txdw8;
+ pdesc->txdw9 = pdesc->txdw9;
+
+ rtl8723b_cal_txdesc_chksum(pdesc);
+}
+
+/* */
+/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
+/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
+/* Fw can tell Hw to send these packet derectly. */
+/* Added by tynli. 2009.10.15. */
+/* */
+/* type1:pspoll, type2:null */
+void rtl8723b_fill_fake_txdesc(
+ struct adapter *padapter,
+ u8 *pDesc,
+ u32 BufferLen,
+ u8 IsPsPoll,
+ u8 IsBTQosNull,
+ u8 bDataFrame
+)
+{
+ /* Clear all status */
+ memset(pDesc, 0, TXDESC_SIZE);
+
+ SET_TX_DESC_FIRST_SEG_8723B(pDesc, 1); /* bFirstSeg; */
+ SET_TX_DESC_LAST_SEG_8723B(pDesc, 1); /* bLastSeg; */
+
+ SET_TX_DESC_OFFSET_8723B(pDesc, 0x28); /* Offset = 32 */
+
+ SET_TX_DESC_PKT_SIZE_8723B(pDesc, BufferLen); /* Buffer size + command header */
+ SET_TX_DESC_QUEUE_SEL_8723B(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
+
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
+ if (true == IsPsPoll) {
+ SET_TX_DESC_NAV_USE_HDR_8723B(pDesc, 1);
+ } else {
+ SET_TX_DESC_HWSEQ_EN_8723B(pDesc, 1); /* Hw set sequence number */
+ SET_TX_DESC_HWSEQ_SEL_8723B(pDesc, 0);
+ }
+
+ if (true == IsBTQosNull) {
+ SET_TX_DESC_BT_INT_8723B(pDesc, 1);
+ }
+
+ SET_TX_DESC_USE_RATE_8723B(pDesc, 1); /* use data rate which is set by Sw */
+ SET_TX_DESC_OWN_8723B((u8 *)pDesc, 1);
+
+ SET_TX_DESC_TX_RATE_8723B(pDesc, DESC8723B_RATE1M);
+
+ /* */
+ /* Encrypt the data frame if under security mode excepct null data. Suggested by CCW. */
+ /* */
+ if (true == bDataFrame) {
+ u32 EncAlg;
+
+ EncAlg = padapter->securitypriv.dot11PrivacyAlgrthm;
+ switch (EncAlg) {
+ case _NO_PRIVACY_:
+ SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x0);
+ break;
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x1);
+ break;
+ case _SMS4_:
+ SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x2);
+ break;
+ case _AES_:
+ SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE_8723B(pDesc, 0x0);
+ break;
+ }
+ }
+
+ /* USB interface drop packet if the checksum of descriptor isn't correct. */
+ /* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
+ rtl8723b_cal_txdesc_chksum((struct tx_desc *)pDesc);
+}
+
+static void hw_var_set_opmode(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 val8;
+ u8 mode = *((u8 *)val);
+
+ {
+ /* disable Port0 TSF update */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 |= DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ /* set net_type */
+ Set_MSR(padapter, mode);
+ DBG_871X("#### %s() -%d iface_type(0) mode = %d ####\n", __func__, __LINE__, mode);
+
+ if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
+ {
+ StopTxBeacon(padapter);
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN_EARLY_INT
+ rtw_write8(padapter, REG_DRVERLYINT, 0x05); /* restore early int time to 5ms */
+ UpdateInterruptMask8812AU(padapter, true, 0, IMR_BCNDMAINT0_8723B);
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN_EARLY_INT */
+
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR
+ UpdateInterruptMask8812AU(padapter, true, 0, (IMR_TXBCN0ERR_8723B|IMR_TXBCN0OK_8723B));
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR */
+
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN */
+ }
+
+ /* disable atim wnd */
+ rtw_write8(padapter, REG_BCN_CTRL, DIS_TSF_UDT|EN_BCN_FUNCTION|DIS_ATIM);
+ /* rtw_write8(padapter, REG_BCN_CTRL, 0x18); */
+ } else if ((mode == _HW_STATE_ADHOC_) /*|| (mode == _HW_STATE_AP_)*/) {
+ ResumeTxBeacon(padapter);
+ rtw_write8(padapter, REG_BCN_CTRL, DIS_TSF_UDT|EN_BCN_FUNCTION|DIS_BCNQ_SUB);
+ } else if (mode == _HW_STATE_AP_) {
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN_EARLY_INT
+ UpdateInterruptMask8723BU(padapter, true, IMR_BCNDMAINT0_8723B, 0);
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN_EARLY_INT */
+
+#ifdef CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR
+ UpdateInterruptMask8723BU(padapter, true, (IMR_TXBCN0ERR_8723B|IMR_TXBCN0OK_8723B), 0);
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR */
+
+#endif /* CONFIG_INTERRUPT_BASED_TXBCN */
+
+ ResumeTxBeacon(padapter);
+
+ rtw_write8(padapter, REG_BCN_CTRL, DIS_TSF_UDT|DIS_BCNQ_SUB);
+
+ /* Set RCR */
+ rtw_write32(padapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0, reject ICV_ERR packet */
+ /* enable to rx data frame */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable to rx ps-poll */
+ rtw_write16(padapter, REG_RXFLTMAP1, 0x0400);
+
+ /* Beacon Control related register for first time */
+ rtw_write8(padapter, REG_BCNDMATIM, 0x02); /* 2ms */
+
+ /* rtw_write8(padapter, REG_BCN_MAX_ERR, 0xFF); */
+ rtw_write8(padapter, REG_ATIMWND, 0x0a); /* 10ms */
+ rtw_write16(padapter, REG_BCNTCFG, 0x00);
+ rtw_write16(padapter, REG_TBTT_PROHIBIT, 0xff04);
+ rtw_write16(padapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
+
+ /* reset TSF */
+ rtw_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* enable BCN0 Function for if1 */
+ /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
+ rtw_write8(padapter, REG_BCN_CTRL, (DIS_TSF_UDT|EN_BCN_FUNCTION|EN_TXBCN_RPT|DIS_BCNQ_SUB));
+
+ /* SW_BCN_SEL - Port0 */
+ /* rtw_write8(Adapter, REG_DWBCN1_CTRL_8192E+2, rtw_read8(Adapter, REG_DWBCN1_CTRL_8192E+2) & ~BIT4); */
+ rtw_hal_set_hwreg(padapter, HW_VAR_DL_BCN_SEL, NULL);
+
+ /* select BCN on port 0 */
+ rtw_write8(
+ padapter,
+ REG_CCK_CHECK_8723B,
+ (rtw_read8(padapter, REG_CCK_CHECK_8723B)&~BIT_BCN_PORT_SEL)
+ );
+
+ /* dis BCN1 ATIM WND if if2 is station */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL_1);
+ val8 |= DIS_ATIM;
+ rtw_write8(padapter, REG_BCN_CTRL_1, val8);
+ }
+ }
+}
+
+static void hw_var_set_macaddr(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_macid;
+
+ reg_macid = REG_MACID;
+
+ for (idx = 0 ; idx < 6; idx++)
+ rtw_write8(GET_PRIMARY_ADAPTER(padapter), (reg_macid+idx), val[idx]);
+}
+
+static void hw_var_set_bssid(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_bssid;
+
+ reg_bssid = REG_BSSID;
+
+ for (idx = 0 ; idx < 6; idx++)
+ rtw_write8(padapter, (reg_bssid+idx), val[idx]);
+}
+
+static void hw_var_set_bcn_func(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u32 bcn_ctrl_reg;
+
+ bcn_ctrl_reg = REG_BCN_CTRL;
+
+ if (*(u8 *)val)
+ rtw_write8(padapter, bcn_ctrl_reg, (EN_BCN_FUNCTION | EN_TXBCN_RPT));
+ else {
+ u8 val8;
+ val8 = rtw_read8(padapter, bcn_ctrl_reg);
+ val8 &= ~(EN_BCN_FUNCTION | EN_TXBCN_RPT);
+
+ /* Always enable port0 beacon function for PSTDMA */
+ if (REG_BCN_CTRL == bcn_ctrl_reg)
+ val8 |= EN_BCN_FUNCTION;
+
+ rtw_write8(padapter, bcn_ctrl_reg, val8);
+ }
+}
+
+static void hw_var_set_correct_tsf(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 val8;
+ u64 tsf;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ tsf = pmlmeext->TSFValue-rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024))-1024; /* us */
+
+ if (
+ ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ )
+ StopTxBeacon(padapter);
+
+ {
+ /* disable related TSF function */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 &= ~EN_BCN_FUNCTION;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ rtw_write32(padapter, REG_TSFTR, tsf);
+ rtw_write32(padapter, REG_TSFTR+4, tsf>>32);
+
+ /* enable related TSF function */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 |= EN_BCN_FUNCTION;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+ }
+
+ if (
+ ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ )
+ ResumeTxBeacon(padapter);
+}
+
+static void hw_var_set_mlme_disconnect(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 val8;
+
+ /* Set RCR to not to receive data frame when NO LINK state */
+ /* rtw_write32(padapter, REG_RCR, rtw_read32(padapter, REG_RCR) & ~RCR_ADF); */
+ /* reject all data frames */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0);
+
+ /* reset TSF */
+ rtw_write8(padapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* disable update TSF */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 |= DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+}
+
+static void hw_var_set_mlme_sitesurvey(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u32 value_rcr, rcr_clear_bit, reg_bcn_ctl;
+ u16 value_rxfltmap2;
+ u8 val8;
+ struct hal_com_data *pHalData;
+ struct mlme_priv *pmlmepriv;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+
+ reg_bcn_ctl = REG_BCN_CTRL;
+
+ rcr_clear_bit = RCR_CBSSID_BCN;
+
+ /* config RCR to receive different BSSID & not to receive data frame */
+ value_rxfltmap2 = 0;
+
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true))
+ rcr_clear_bit = RCR_CBSSID_BCN;
+
+ value_rcr = rtw_read32(padapter, REG_RCR);
+
+ if (*((u8 *)val)) {
+ /* under sitesurvey */
+ value_rcr &= ~(rcr_clear_bit);
+ rtw_write32(padapter, REG_RCR, value_rcr);
+
+ rtw_write16(padapter, REG_RXFLTMAP2, value_rxfltmap2);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
+ /* disable update TSF */
+ val8 = rtw_read8(padapter, reg_bcn_ctl);
+ val8 |= DIS_TSF_UDT;
+ rtw_write8(padapter, reg_bcn_ctl, val8);
+ }
+
+ /* Save orignal RRSR setting. */
+ pHalData->RegRRSR = rtw_read16(padapter, REG_RRSR);
+ } else {
+ /* sitesurvey done */
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)))
+ /* enable to rx data frame */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)) {
+ /* enable update TSF */
+ val8 = rtw_read8(padapter, reg_bcn_ctl);
+ val8 &= ~DIS_TSF_UDT;
+ rtw_write8(padapter, reg_bcn_ctl, val8);
+ }
+
+ value_rcr |= rcr_clear_bit;
+ rtw_write32(padapter, REG_RCR, value_rcr);
+
+ /* Restore orignal RRSR setting. */
+ rtw_write16(padapter, REG_RRSR, pHalData->RegRRSR);
+ }
+}
+
+static void hw_var_set_mlme_join(struct adapter *padapter, u8 variable, u8 *val)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ u8 RetryLimit;
+ u8 type;
+ struct hal_com_data *pHalData;
+ struct mlme_priv *pmlmepriv;
+ struct eeprom_priv *pEEPROM;
+
+
+ RetryLimit = 0x30;
+ type = *(u8 *)val;
+ pHalData = GET_HAL_DATA(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+ pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (type == 0) { /* prepare to join */
+ /* enable to rx data frame.Accept all data frame */
+ /* rtw_write32(padapter, REG_RCR, rtw_read32(padapter, REG_RCR)|RCR_ADF); */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0xFFFF);
+
+ val32 = rtw_read32(padapter, REG_RCR);
+ if (padapter->in_cta_test)
+ val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ else
+ val32 |= RCR_CBSSID_DATA|RCR_CBSSID_BCN;
+ rtw_write32(padapter, REG_RCR, val32);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ RetryLimit = (pEEPROM->CustomerID == RT_CID_CCX) ? 7 : 48;
+ else /* Ad-hoc Mode */
+ RetryLimit = 0x7;
+ } else if (type == 1) /* joinbss_event call back when join res < 0 */
+ rtw_write16(padapter, REG_RXFLTMAP2, 0x00);
+ else if (type == 2) { /* sta add event call back */
+ /* enable update TSF */
+ val8 = rtw_read8(padapter, REG_BCN_CTRL);
+ val8 &= ~DIS_TSF_UDT;
+ rtw_write8(padapter, REG_BCN_CTRL, val8);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE))
+ RetryLimit = 0x7;
+ }
+
+ val16 = (RetryLimit << RETRY_LIMIT_SHORT_SHIFT) | (RetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ rtw_write16(padapter, REG_RL, val16);
+}
+
+void CCX_FwC2HTxRpt_8723b(struct adapter *padapter, u8 *pdata, u8 len)
+{
+ u8 seq_no;
+
+#define GET_8723B_C2H_TX_RPT_LIFE_TIME_OVER(_Header) LE_BITS_TO_1BYTE((_Header + 0), 6, 1)
+#define GET_8723B_C2H_TX_RPT_RETRY_OVER(_Header) LE_BITS_TO_1BYTE((_Header + 0), 7, 1)
+
+ /* DBG_871X("%s, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, */
+ /* *pdata, *(pdata+1), *(pdata+2), *(pdata+3), *(pdata+4), *(pdata+5), *(pdata+6), *(pdata+7)); */
+
+ seq_no = *(pdata+6);
+
+ if (GET_8723B_C2H_TX_RPT_RETRY_OVER(pdata) | GET_8723B_C2H_TX_RPT_LIFE_TIME_OVER(pdata)) {
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+/*
+ else if (seq_no != padapter->xmitpriv.seq_no) {
+ DBG_871X("tx_seq_no =%d, rpt_seq_no =%d\n", padapter->xmitpriv.seq_no, seq_no);
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+*/
+ else
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_SUCCESS);
+}
+
+s32 c2h_id_filter_ccx_8723b(u8 *buf)
+{
+ struct c2h_evt_hdr_88xx *c2h_evt = (struct c2h_evt_hdr_88xx *)buf;
+ s32 ret = false;
+ if (c2h_evt->id == C2H_CCX_TX_RPT)
+ ret = true;
+
+ return ret;
+}
+
+
+s32 c2h_handler_8723b(struct adapter *padapter, u8 *buf)
+{
+ struct c2h_evt_hdr_88xx *pC2hEvent = (struct c2h_evt_hdr_88xx *)buf;
+ s32 ret = _SUCCESS;
+ u8 index = 0;
+
+ if (pC2hEvent == NULL) {
+ DBG_8192C("%s(): pC2hEventis NULL\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ }
+
+ switch (pC2hEvent->id) {
+ case C2H_AP_RPT_RSP:
+ break;
+ case C2H_DBG:
+ {
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("c2h_handler_8723b: %s\n", pC2hEvent->payload));
+ }
+ break;
+
+ case C2H_CCX_TX_RPT:
+/* CCX_FwC2HTxRpt(padapter, QueueID, pC2hEvent->payload); */
+ break;
+
+ case C2H_EXT_RA_RPT:
+/* C2HExtRaRptHandler(padapter, pC2hEvent->payload, C2hEvent.CmdLen); */
+ break;
+
+ case C2H_HW_INFO_EXCH:
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("[BT], C2H_HW_INFO_EXCH\n"));
+ for (index = 0; index < pC2hEvent->plen; index++) {
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("[BT], tmpBuf[%d]= 0x%x\n", index, pC2hEvent->payload[index]));
+ }
+ break;
+
+ case C2H_8723B_BT_INFO:
+ rtw_btcoex_BtInfoNotify(padapter, pC2hEvent->plen, pC2hEvent->payload);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Clear event to notify FW we have read the command. */
+ /* Note: */
+ /* If this field isn't clear, the FW won't update the next command message. */
+/* rtw_write8(padapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE); */
+exit:
+ return ret;
+}
+
+static void process_c2h_event(struct adapter *padapter, PC2H_EVT_HDR pC2hEvent, u8 *c2hBuf)
+{
+ u8 index = 0;
+
+ if (c2hBuf == NULL) {
+ DBG_8192C("%s c2hbuff is NULL\n", __func__);
+ return;
+ }
+
+ switch (pC2hEvent->CmdID) {
+ case C2H_AP_RPT_RSP:
+ break;
+ case C2H_DBG:
+ {
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("C2HCommandHandler: %s\n", c2hBuf));
+ }
+ break;
+
+ case C2H_CCX_TX_RPT:
+/* CCX_FwC2HTxRpt(padapter, QueueID, tmpBuf); */
+ break;
+
+ case C2H_EXT_RA_RPT:
+/* C2HExtRaRptHandler(padapter, tmpBuf, C2hEvent.CmdLen); */
+ break;
+
+ case C2H_HW_INFO_EXCH:
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("[BT], C2H_HW_INFO_EXCH\n"));
+ for (index = 0; index < pC2hEvent->CmdLen; index++) {
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("[BT], tmpBuf[%d]= 0x%x\n", index, c2hBuf[index]));
+ }
+ break;
+
+ case C2H_8723B_BT_INFO:
+ rtw_btcoex_BtInfoNotify(padapter, pC2hEvent->CmdLen, c2hBuf);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length)
+{
+ C2H_EVT_HDR C2hEvent;
+ u8 *tmpBuf = NULL;
+#ifdef CONFIG_WOWLAN
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+
+ if (pwrpriv->wowlan_mode == true) {
+ DBG_871X("%s(): return because wowolan_mode ==true! CMDID =%d\n", __func__, pbuffer[0]);
+ return;
+ }
+#endif
+ C2hEvent.CmdID = pbuffer[0];
+ C2hEvent.CmdSeq = pbuffer[1];
+ C2hEvent.CmdLen = length-2;
+ tmpBuf = pbuffer+2;
+
+ /* DBG_871X("%s C2hEvent.CmdID:%x C2hEvent.CmdLen:%x C2hEvent.CmdSeq:%x\n", */
+ /* __func__, C2hEvent.CmdID, C2hEvent.CmdLen, C2hEvent.CmdSeq); */
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_notice_, "C2HPacketHandler_8723B(): Command Content:\n", tmpBuf, C2hEvent.CmdLen);
+
+ process_c2h_event(padapter, &C2hEvent, tmpBuf);
+ /* c2h_handler_8723b(padapter,&C2hEvent); */
+ return;
+}
+
+void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 val8;
+ u32 val32;
+
+ switch (variable) {
+ case HW_VAR_MEDIA_STATUS:
+ val8 = rtw_read8(padapter, MSR) & 0x0c;
+ val8 |= *val;
+ rtw_write8(padapter, MSR, val8);
+ break;
+
+ case HW_VAR_MEDIA_STATUS1:
+ val8 = rtw_read8(padapter, MSR) & 0x03;
+ val8 |= *val << 2;
+ rtw_write8(padapter, MSR, val8);
+ break;
+
+ case HW_VAR_SET_OPMODE:
+ hw_var_set_opmode(padapter, variable, val);
+ break;
+
+ case HW_VAR_MAC_ADDR:
+ hw_var_set_macaddr(padapter, variable, val);
+ break;
+
+ case HW_VAR_BSSID:
+ hw_var_set_bssid(padapter, variable, val);
+ break;
+
+ case HW_VAR_BASIC_RATE:
+ {
+ struct mlme_ext_info *mlmext_info = &padapter->mlmeextpriv.mlmext_info;
+ u16 input_b = 0, masked = 0, ioted = 0, BrateCfg = 0;
+ u16 rrsr_2g_force_mask = (RRSR_11M|RRSR_5_5M|RRSR_1M);
+ u16 rrsr_2g_allow_mask = (RRSR_24M|RRSR_12M|RRSR_6M|RRSR_CCK_RATES);
+
+ HalSetBrateCfg(padapter, val, &BrateCfg);
+ input_b = BrateCfg;
+
+ /* apply force and allow mask */
+ BrateCfg |= rrsr_2g_force_mask;
+ BrateCfg &= rrsr_2g_allow_mask;
+ masked = BrateCfg;
+
+ #ifdef CONFIG_CMCC_TEST
+ BrateCfg |= (RRSR_11M|RRSR_5_5M|RRSR_1M); /* use 11M to send ACK */
+ BrateCfg |= (RRSR_24M|RRSR_18M|RRSR_12M); /* CMCC_OFDM_ACK 12/18/24M */
+ #endif
+
+ /* IOT consideration */
+ if (mlmext_info->assoc_AP_vendor == HT_IOT_PEER_CISCO) {
+ /* if peer is cisco and didn't use ofdm rate, we enable 6M ack */
+ if ((BrateCfg & (RRSR_24M|RRSR_12M|RRSR_6M)) == 0)
+ BrateCfg |= RRSR_6M;
+ }
+ ioted = BrateCfg;
+
+ pHalData->BasicRateSet = BrateCfg;
+
+ DBG_8192C("HW_VAR_BASIC_RATE: %#x -> %#x -> %#x\n", input_b, masked, ioted);
+
+ /* Set RRSR rate table. */
+ rtw_write16(padapter, REG_RRSR, BrateCfg);
+ rtw_write8(padapter, REG_RRSR+2, rtw_read8(padapter, REG_RRSR+2)&0xf0);
+ }
+ break;
+
+ case HW_VAR_TXPAUSE:
+ rtw_write8(padapter, REG_TXPAUSE, *val);
+ break;
+
+ case HW_VAR_BCN_FUNC:
+ hw_var_set_bcn_func(padapter, variable, val);
+ break;
+
+ case HW_VAR_CORRECT_TSF:
+ hw_var_set_correct_tsf(padapter, variable, val);
+ break;
+
+ case HW_VAR_CHECK_BSSID:
+ {
+ u32 val32;
+ val32 = rtw_read32(padapter, REG_RCR);
+ if (*val)
+ val32 |= RCR_CBSSID_DATA|RCR_CBSSID_BCN;
+ else
+ val32 &= ~(RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ rtw_write32(padapter, REG_RCR, val32);
+ }
+ break;
+
+ case HW_VAR_MLME_DISCONNECT:
+ hw_var_set_mlme_disconnect(padapter, variable, val);
+ break;
+
+ case HW_VAR_MLME_SITESURVEY:
+ hw_var_set_mlme_sitesurvey(padapter, variable, val);
+
+ rtw_btcoex_ScanNotify(padapter, *val?true:false);
+ break;
+
+ case HW_VAR_MLME_JOIN:
+ hw_var_set_mlme_join(padapter, variable, val);
+
+ switch (*val) {
+ case 0:
+ /* prepare to join */
+ rtw_btcoex_ConnectNotify(padapter, true);
+ break;
+ case 1:
+ /* joinbss_event callback when join res < 0 */
+ rtw_btcoex_ConnectNotify(padapter, false);
+ break;
+ case 2:
+ /* sta add event callback */
+/* rtw_btcoex_MediaStatusNotify(padapter, RT_MEDIA_CONNECT); */
+ break;
+ }
+ break;
+
+ case HW_VAR_ON_RCR_AM:
+ val32 = rtw_read32(padapter, REG_RCR);
+ val32 |= RCR_AM;
+ rtw_write32(padapter, REG_RCR, val32);
+ DBG_8192C("%s, %d, RCR = %x\n", __func__, __LINE__, rtw_read32(padapter, REG_RCR));
+ break;
+
+ case HW_VAR_OFF_RCR_AM:
+ val32 = rtw_read32(padapter, REG_RCR);
+ val32 &= ~RCR_AM;
+ rtw_write32(padapter, REG_RCR, val32);
+ DBG_8192C("%s, %d, RCR = %x\n", __func__, __LINE__, rtw_read32(padapter, REG_RCR));
+ break;
+
+ case HW_VAR_BEACON_INTERVAL:
+ rtw_write16(padapter, REG_BCN_INTERVAL, *((u16 *)val));
+ break;
+
+ case HW_VAR_SLOT_TIME:
+ rtw_write8(padapter, REG_SLOT, *val);
+ break;
+
+ case HW_VAR_RESP_SIFS:
+ /* SIFS_Timer = 0x0a0a0808; */
+ /* RESP_SIFS for CCK */
+ rtw_write8(padapter, REG_RESP_SIFS_CCK, val[0]); /* SIFS_T2T_CCK (0x08) */
+ rtw_write8(padapter, REG_RESP_SIFS_CCK+1, val[1]); /* SIFS_R2T_CCK(0x08) */
+ /* RESP_SIFS for OFDM */
+ rtw_write8(padapter, REG_RESP_SIFS_OFDM, val[2]); /* SIFS_T2T_OFDM (0x0a) */
+ rtw_write8(padapter, REG_RESP_SIFS_OFDM+1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
+ break;
+
+ case HW_VAR_ACK_PREAMBLE:
+ {
+ u8 regTmp;
+ u8 bShortPreamble = *val;
+
+ /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+ /* regTmp = (pHalData->nCur40MhzPrimeSC)<<5; */
+ regTmp = 0;
+ if (bShortPreamble)
+ regTmp |= 0x80;
+ rtw_write8(padapter, REG_RRSR+2, regTmp);
+ }
+ break;
+
+ case HW_VAR_CAM_EMPTY_ENTRY:
+ {
+ u8 ucIndex = *val;
+ u8 i;
+ u32 ulCommand = 0;
+ u32 ulContent = 0;
+ u32 ulEncAlgo = CAM_AES;
+
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
+ /* filled id in CAM config 2 byte */
+ if (i == 0) {
+ ulContent |= (ucIndex & 0x03) | ((u16)(ulEncAlgo)<<2);
+ /* ulContent |= CAM_VALID; */
+ } else
+ ulContent = 0;
+
+ /* polling bit, and No Write enable, and address */
+ ulCommand = CAM_CONTENT_COUNT*ucIndex+i;
+ ulCommand = ulCommand | CAM_POLLINIG | CAM_WRITE;
+ /* write content 0 is equall to mark invalid */
+ rtw_write32(padapter, WCAMI, ulContent); /* mdelay(40); */
+ /* RT_TRACE(COMP_SEC, DBG_LOUD, ("CAM_empty_entry(): WRITE A4: %lx\n", ulContent)); */
+ rtw_write32(padapter, RWCAM, ulCommand); /* mdelay(40); */
+ /* RT_TRACE(COMP_SEC, DBG_LOUD, ("CAM_empty_entry(): WRITE A0: %lx\n", ulCommand)); */
+ }
+ }
+ break;
+
+ case HW_VAR_CAM_INVALID_ALL:
+ rtw_write32(padapter, RWCAM, BIT(31)|BIT(30));
+ break;
+
+ case HW_VAR_CAM_WRITE:
+ {
+ u32 cmd;
+ u32 *cam_val = (u32 *)val;
+
+ rtw_write32(padapter, WCAMI, cam_val[0]);
+
+ cmd = CAM_POLLINIG | CAM_WRITE | cam_val[1];
+ rtw_write32(padapter, RWCAM, cmd);
+ }
+ break;
+
+ case HW_VAR_AC_PARAM_VO:
+ rtw_write32(padapter, REG_EDCA_VO_PARAM, *((u32 *)val));
+ break;
+
+ case HW_VAR_AC_PARAM_VI:
+ rtw_write32(padapter, REG_EDCA_VI_PARAM, *((u32 *)val));
+ break;
+
+ case HW_VAR_AC_PARAM_BE:
+ pHalData->AcParam_BE = ((u32 *)(val))[0];
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, *((u32 *)val));
+ break;
+
+ case HW_VAR_AC_PARAM_BK:
+ rtw_write32(padapter, REG_EDCA_BK_PARAM, *((u32 *)val));
+ break;
+
+ case HW_VAR_ACM_CTRL:
+ {
+ u8 ctrl = *((u8 *)val);
+ u8 hwctrl = 0;
+
+ if (ctrl != 0) {
+ hwctrl |= AcmHw_HwEn;
+
+ if (ctrl & BIT(1)) /* BE */
+ hwctrl |= AcmHw_BeqEn;
+
+ if (ctrl & BIT(2)) /* VI */
+ hwctrl |= AcmHw_ViqEn;
+
+ if (ctrl & BIT(3)) /* VO */
+ hwctrl |= AcmHw_VoqEn;
+ }
+
+ DBG_8192C("[HW_VAR_ACM_CTRL] Write 0x%02X\n", hwctrl);
+ rtw_write8(padapter, REG_ACMHWCTRL, hwctrl);
+ }
+ break;
+
+ case HW_VAR_AMPDU_FACTOR:
+ {
+ u32 AMPDULen = (*((u8 *)val));
+
+ if (AMPDULen < HT_AGG_SIZE_32K)
+ AMPDULen = (0x2000 << (*((u8 *)val)))-1;
+ else
+ AMPDULen = 0x7fff;
+
+ rtw_write32(padapter, REG_AMPDU_MAX_LENGTH_8723B, AMPDULen);
+ }
+ break;
+
+ case HW_VAR_H2C_FW_PWRMODE:
+ {
+ u8 psmode = *val;
+
+ /* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
+ /* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
+ if (psmode != PS_MODE_ACTIVE) {
+ ODM_RF_Saving(&pHalData->odmpriv, true);
+ }
+
+ /* if (psmode != PS_MODE_ACTIVE) { */
+ /* rtl8723b_set_lowpwr_lps_cmd(padapter, true); */
+ /* else { */
+ /* rtl8723b_set_lowpwr_lps_cmd(padapter, false); */
+ /* */
+ rtl8723b_set_FwPwrMode_cmd(padapter, psmode);
+ }
+ break;
+ case HW_VAR_H2C_PS_TUNE_PARAM:
+ rtl8723b_set_FwPsTuneParam_cmd(padapter);
+ break;
+
+ case HW_VAR_H2C_FW_JOINBSSRPT:
+ rtl8723b_set_FwJoinBssRpt_cmd(padapter, *val);
+ break;
+
+ case HW_VAR_INITIAL_GAIN:
+ {
+ DIG_T *pDigTable = &pHalData->odmpriv.DM_DigTable;
+ u32 rx_gain = *(u32 *)val;
+
+ if (rx_gain == 0xff) {/* restore rx gain */
+ ODM_Write_DIG(&pHalData->odmpriv, pDigTable->BackupIGValue);
+ } else {
+ pDigTable->BackupIGValue = pDigTable->CurIGValue;
+ ODM_Write_DIG(&pHalData->odmpriv, rx_gain);
+ }
+ }
+ break;
+
+ case HW_VAR_EFUSE_USAGE:
+ pHalData->EfuseUsedPercentage = *val;
+ break;
+
+ case HW_VAR_EFUSE_BYTES:
+ pHalData->EfuseUsedBytes = *((u16 *)val);
+ break;
+
+ case HW_VAR_EFUSE_BT_USAGE:
+#ifdef HAL_EFUSE_MEMORY
+ pHalData->EfuseHal.BTEfuseUsedPercentage = *val;
+#endif
+ break;
+
+ case HW_VAR_EFUSE_BT_BYTES:
+#ifdef HAL_EFUSE_MEMORY
+ pHalData->EfuseHal.BTEfuseUsedBytes = *((u16 *)val);
+#else
+ BTEfuseUsedBytes = *((u16 *)val);
+#endif
+ break;
+
+ case HW_VAR_FIFO_CLEARN_UP:
+ {
+ #define RW_RELEASE_EN BIT(18)
+ #define RXDMA_IDLE BIT(17)
+
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(padapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ padapter->xmitpriv.nqos_ssn = rtw_read16(padapter, REG_NQOS_SEQ);
+
+ if (pwrpriv->bkeepfwalive != true) {
+ /* RX DMA stop */
+ val32 = rtw_read32(padapter, REG_RXPKT_NUM);
+ val32 |= RW_RELEASE_EN;
+ rtw_write32(padapter, REG_RXPKT_NUM, val32);
+ do {
+ val32 = rtw_read32(padapter, REG_RXPKT_NUM);
+ val32 &= RXDMA_IDLE;
+ if (val32)
+ break;
+
+ DBG_871X("%s: [HW_VAR_FIFO_CLEARN_UP] val =%x times:%d\n", __func__, val32, trycnt);
+ } while (--trycnt);
+
+ if (trycnt == 0) {
+ DBG_8192C("[HW_VAR_FIFO_CLEARN_UP] Stop RX DMA failed......\n");
+ }
+
+ /* RQPN Load 0 */
+ rtw_write16(padapter, REG_RQPN_NPQ, 0);
+ rtw_write32(padapter, REG_RQPN, 0x80000000);
+ mdelay(2);
+ }
+ }
+ break;
+
+ case HW_VAR_APFM_ON_MAC:
+ pHalData->bMacPwrCtrlOn = *val;
+ DBG_8192C("%s: bMacPwrCtrlOn =%d\n", __func__, pHalData->bMacPwrCtrlOn);
+ break;
+
+ case HW_VAR_NAV_UPPER:
+ {
+ u32 usNavUpper = *((u32 *)val);
+
+ if (usNavUpper > HAL_NAV_UPPER_UNIT_8723B * 0xFF) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_, ("The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n", usNavUpper, HAL_NAV_UPPER_UNIT_8723B));
+ break;
+ }
+
+ /* The value of ((usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B) */
+ /* is getting the upper integer. */
+ usNavUpper = (usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B;
+ rtw_write8(padapter, REG_NAV_UPPER, (u8)usNavUpper);
+ }
+ break;
+
+ case HW_VAR_H2C_MEDIA_STATUS_RPT:
+ {
+ u16 mstatus_rpt = (*(u16 *)val);
+ u8 mstatus, macId;
+
+ mstatus = (u8) (mstatus_rpt & 0xFF);
+ macId = (u8)(mstatus_rpt >> 8);
+ rtl8723b_set_FwMediaStatusRpt_cmd(padapter, mstatus, macId);
+ }
+ break;
+ case HW_VAR_BCN_VALID:
+ {
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+ val8 = rtw_read8(padapter, REG_TDECTRL+2);
+ val8 |= BIT(0);
+ rtw_write8(padapter, REG_TDECTRL+2, val8);
+ }
+ break;
+
+ case HW_VAR_DL_BCN_SEL:
+ {
+ /* SW_BCN_SEL - Port0 */
+ val8 = rtw_read8(padapter, REG_DWBCN1_CTRL_8723B+2);
+ val8 &= ~BIT(4);
+ rtw_write8(padapter, REG_DWBCN1_CTRL_8723B+2, val8);
+ }
+ break;
+
+ case HW_VAR_DO_IQK:
+ pHalData->bNeedIQK = true;
+ break;
+
+ case HW_VAR_DL_RSVD_PAGE:
+ if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)
+ rtl8723b_download_BTCoex_AP_mode_rsvd_page(padapter);
+ else
+ rtl8723b_download_rsvd_page(padapter, RT_MEDIA_CONNECT);
+ break;
+
+ case HW_VAR_MACID_SLEEP:
+ /* Input is MACID */
+ val32 = *(u32 *)val;
+ if (val32 > 31) {
+ DBG_8192C(FUNC_ADPT_FMT ": [HW_VAR_MACID_SLEEP] Invalid macid(%d)\n",
+ FUNC_ADPT_ARG(padapter), val32);
+ break;
+ }
+ val8 = (u8)val32; /* macid is between 0~31 */
+
+ val32 = rtw_read32(padapter, REG_MACID_SLEEP);
+ DBG_8192C(FUNC_ADPT_FMT ": [HW_VAR_MACID_SLEEP] macid =%d, org MACID_SLEEP = 0x%08X\n",
+ FUNC_ADPT_ARG(padapter), val8, val32);
+ if (val32 & BIT(val8))
+ break;
+ val32 |= BIT(val8);
+ rtw_write32(padapter, REG_MACID_SLEEP, val32);
+ break;
+
+ case HW_VAR_MACID_WAKEUP:
+ /* Input is MACID */
+ val32 = *(u32 *)val;
+ if (val32 > 31) {
+ DBG_8192C(FUNC_ADPT_FMT ": [HW_VAR_MACID_WAKEUP] Invalid macid(%d)\n",
+ FUNC_ADPT_ARG(padapter), val32);
+ break;
+ }
+ val8 = (u8)val32; /* macid is between 0~31 */
+
+ val32 = rtw_read32(padapter, REG_MACID_SLEEP);
+ DBG_8192C(FUNC_ADPT_FMT ": [HW_VAR_MACID_WAKEUP] macid =%d, org MACID_SLEEP = 0x%08X\n",
+ FUNC_ADPT_ARG(padapter), val8, val32);
+ if (!(val32 & BIT(val8)))
+ break;
+ val32 &= ~BIT(val8);
+ rtw_write32(padapter, REG_MACID_SLEEP, val32);
+ break;
+
+ default:
+ SetHwReg(padapter, variable, val);
+ break;
+ }
+}
+
+void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 val8;
+ u16 val16;
+
+ switch (variable) {
+ case HW_VAR_TXPAUSE:
+ *val = rtw_read8(padapter, REG_TXPAUSE);
+ break;
+
+ case HW_VAR_BCN_VALID:
+ {
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
+ val8 = rtw_read8(padapter, REG_TDECTRL+2);
+ *val = (BIT(0) & val8) ? true : false;
+ }
+ break;
+
+ case HW_VAR_FWLPS_RF_ON:
+ {
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ u32 valRCR;
+
+ if (
+ (padapter->bSurpriseRemoved == true) ||
+ (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off)
+ ) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+ /* because Fw is unload. */
+ *val = true;
+ } else {
+ valRCR = rtw_read32(padapter, REG_RCR);
+ valRCR &= 0x00070000;
+ if (valRCR)
+ *val = false;
+ else
+ *val = true;
+ }
+ }
+ break;
+
+ case HW_VAR_EFUSE_USAGE:
+ *val = pHalData->EfuseUsedPercentage;
+ break;
+
+ case HW_VAR_EFUSE_BYTES:
+ *((u16 *)val) = pHalData->EfuseUsedBytes;
+ break;
+
+ case HW_VAR_EFUSE_BT_USAGE:
+#ifdef HAL_EFUSE_MEMORY
+ *val = pHalData->EfuseHal.BTEfuseUsedPercentage;
+#endif
+ break;
+
+ case HW_VAR_EFUSE_BT_BYTES:
+#ifdef HAL_EFUSE_MEMORY
+ *((u16 *)val) = pHalData->EfuseHal.BTEfuseUsedBytes;
+#else
+ *((u16 *)val) = BTEfuseUsedBytes;
+#endif
+ break;
+
+ case HW_VAR_APFM_ON_MAC:
+ *val = pHalData->bMacPwrCtrlOn;
+ break;
+ case HW_VAR_CHK_HI_QUEUE_EMPTY:
+ val16 = rtw_read16(padapter, REG_TXPKT_EMPTY);
+ *val = (val16 & BIT(10)) ? true:false;
+ break;
+#ifdef CONFIG_WOWLAN
+ case HW_VAR_RPWM_TOG:
+ *val = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1) & BIT7;
+ break;
+ case HW_VAR_WAKEUP_REASON:
+ *val = rtw_read8(padapter, REG_WOWLAN_WAKE_REASON);
+ if (*val == 0xEA)
+ *val = 0;
+ break;
+ case HW_VAR_SYS_CLKR:
+ *val = rtw_read8(padapter, REG_SYS_CLKR);
+ break;
+#endif
+ default:
+ GetHwReg(padapter, variable, val);
+ break;
+ }
+}
+
+/*
+ *Description:
+ * Change default setting of specified variable.
+ */
+u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
+{
+ struct hal_com_data *pHalData;
+ u8 bResult;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ bResult = _SUCCESS;
+
+ switch (variable) {
+ default:
+ bResult = SetHalDefVar(padapter, variable, pval);
+ break;
+ }
+
+ return bResult;
+}
+
+/*
+ *Description:
+ * Query setting of specified variable.
+ */
+u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval)
+{
+ struct hal_com_data *pHalData;
+ u8 bResult;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ bResult = _SUCCESS;
+
+ switch (variable) {
+ case HAL_DEF_MAX_RECVBUF_SZ:
+ *((u32 *)pval) = MAX_RECVBUF_SZ;
+ break;
+
+ case HAL_DEF_RX_PACKET_OFFSET:
+ *((u32 *)pval) = RXDESC_SIZE + DRVINFO_SZ*8;
+ break;
+
+ case HW_VAR_MAX_RX_AMPDU_FACTOR:
+ /* Stanley@BB.SD3 suggests 16K can get stable performance */
+ /* The experiment was done on SDIO interface */
+ /* coding by Lucas@20130730 */
+ *(u32 *)pval = MAX_AMPDU_FACTOR_16K;
+ break;
+ case HAL_DEF_TX_LDPC:
+ case HAL_DEF_RX_LDPC:
+ *((u8 *)pval) = false;
+ break;
+ case HAL_DEF_TX_STBC:
+ *((u8 *)pval) = 0;
+ break;
+ case HAL_DEF_RX_STBC:
+ *((u8 *)pval) = 1;
+ break;
+ case HAL_DEF_EXPLICIT_BEAMFORMER:
+ case HAL_DEF_EXPLICIT_BEAMFORMEE:
+ *((u8 *)pval) = false;
+ break;
+
+ case HW_DEF_RA_INFO_DUMP:
+ {
+ u8 mac_id = *(u8 *)pval;
+ u32 cmd;
+ u32 ra_info1, ra_info2;
+ u32 rate_mask1, rate_mask2;
+ u8 curr_tx_rate, curr_tx_sgi, hight_rate, lowest_rate;
+
+ DBG_8192C("============ RA status check Mac_id:%d ===================\n", mac_id);
+
+ cmd = 0x40000100 | mac_id;
+ rtw_write32(padapter, REG_HMEBOX_DBG_2_8723B, cmd);
+ msleep(10);
+ ra_info1 = rtw_read32(padapter, 0x2F0);
+ curr_tx_rate = ra_info1&0x7F;
+ curr_tx_sgi = (ra_info1>>7)&0x01;
+ DBG_8192C("[ ra_info1:0x%08x ] =>cur_tx_rate = %s, cur_sgi:%d, PWRSTS = 0x%02x \n",
+ ra_info1,
+ HDATA_RATE(curr_tx_rate),
+ curr_tx_sgi,
+ (ra_info1>>8) & 0x07);
+
+ cmd = 0x40000400 | mac_id;
+ rtw_write32(padapter, REG_HMEBOX_DBG_2_8723B, cmd);
+ msleep(10);
+ ra_info1 = rtw_read32(padapter, 0x2F0);
+ ra_info2 = rtw_read32(padapter, 0x2F4);
+ rate_mask1 = rtw_read32(padapter, 0x2F8);
+ rate_mask2 = rtw_read32(padapter, 0x2FC);
+ hight_rate = ra_info2&0xFF;
+ lowest_rate = (ra_info2>>8) & 0xFF;
+
+ DBG_8192C("[ ra_info1:0x%08x ] =>RSSI =%d, BW_setting = 0x%02x, DISRA = 0x%02x, VHT_EN = 0x%02x\n",
+ ra_info1,
+ ra_info1&0xFF,
+ (ra_info1>>8) & 0xFF,
+ (ra_info1>>16) & 0xFF,
+ (ra_info1>>24) & 0xFF);
+
+ DBG_8192C("[ ra_info2:0x%08x ] =>hight_rate =%s, lowest_rate =%s, SGI = 0x%02x, RateID =%d\n",
+ ra_info2,
+ HDATA_RATE(hight_rate),
+ HDATA_RATE(lowest_rate),
+ (ra_info2>>16) & 0xFF,
+ (ra_info2>>24) & 0xFF);
+
+ DBG_8192C("rate_mask2 = 0x%08x, rate_mask1 = 0x%08x\n", rate_mask2, rate_mask1);
+
+ }
+ break;
+
+ case HAL_DEF_TX_PAGE_BOUNDARY:
+ if (!padapter->registrypriv.wifi_spec) {
+ *(u8 *)pval = TX_PAGE_BOUNDARY_8723B;
+ } else {
+ *(u8 *)pval = WMM_NORMAL_TX_PAGE_BOUNDARY_8723B;
+ }
+ break;
+
+ case HAL_DEF_MACID_SLEEP:
+ *(u8 *)pval = true; /* support macid sleep */
+ break;
+
+ default:
+ bResult = GetHalDefVar(padapter, variable, pval);
+ break;
+ }
+
+ return bResult;
+}
+
+#ifdef CONFIG_WOWLAN
+void Hal_DetectWoWMode(struct adapter *padapter)
+{
+ adapter_to_pwrctl(padapter)->bSupportRemoteWakeup = true;
+ DBG_871X("%s\n", __func__);
+}
+#endif /* CONFIG_WOWLAN */
+
+void rtl8723b_start_thread(struct adapter *padapter)
+{
+#ifndef CONFIG_SDIO_TX_TASKLET
+ struct xmit_priv *xmitpriv = &padapter->xmitpriv;
+
+ xmitpriv->SdioXmitThread = kthread_run(rtl8723bs_xmit_thread, padapter, "RTWHALXT");
+ if (IS_ERR(xmitpriv->SdioXmitThread)) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("%s: start rtl8723bs_xmit_thread FAIL!!\n", __func__));
+ }
+#endif
+}
+
+void rtl8723b_stop_thread(struct adapter *padapter)
+{
+#ifndef CONFIG_SDIO_TX_TASKLET
+ struct xmit_priv *xmitpriv = &padapter->xmitpriv;
+
+ /* stop xmit_buf_thread */
+ if (xmitpriv->SdioXmitThread) {
+ up(&xmitpriv->SdioXmitSema);
+ down(&xmitpriv->SdioXmitTerminateSema);
+ xmitpriv->SdioXmitThread = NULL;
+ }
+#endif
+}
+
+#if defined(CONFIG_CHECK_BT_HANG)
+extern void check_bt_status_work(void *data);
+void rtl8723bs_init_checkbthang_workqueue(struct adapter *adapter)
+{
+ adapter->priv_checkbt_wq = alloc_workqueue("sdio_wq", 0, 0);
+ INIT_DELAYED_WORK(&adapter->checkbt_work, (void *)check_bt_status_work);
+}
+
+void rtl8723bs_free_checkbthang_workqueue(struct adapter *adapter)
+{
+ if (adapter->priv_checkbt_wq) {
+ cancel_delayed_work_sync(&adapter->checkbt_work);
+ flush_workqueue(adapter->priv_checkbt_wq);
+ destroy_workqueue(adapter->priv_checkbt_wq);
+ adapter->priv_checkbt_wq = NULL;
+ }
+}
+
+void rtl8723bs_cancle_checkbthang_workqueue(struct adapter *adapter)
+{
+ if (adapter->priv_checkbt_wq)
+ cancel_delayed_work_sync(&adapter->checkbt_work);
+}
+
+void rtl8723bs_hal_check_bt_hang(struct adapter *adapter)
+{
+ if (adapter->priv_checkbt_wq)
+ queue_delayed_work(adapter->priv_checkbt_wq, &(adapter->checkbt_work), 0);
+}
+#endif
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
new file mode 100644
index 000000000000..28d1a229c3a6
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -0,0 +1,1050 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723B_PHYCFG_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Channel switch:The size of command tables for switch channel*/
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+/**
+* Function: phy_CalculateBitShift
+*
+* OverView: Get shifted position of the BitMask
+*
+* Input:
+* u32 BitMask,
+*
+* Output: none
+* Return: u32 Return the shift bit bit position of the mask
+*/
+static u32 phy_CalculateBitShift(u32 BitMask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((BitMask>>i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+
+/**
+* Function: PHY_QueryBBReg
+*
+* OverView: Read "sepcific bits" from BB register
+*
+* Input:
+* struct adapter * Adapter,
+* u32 RegAddr, The target address to be readback
+* u32 BitMask The target bit position in the target address
+* to be readback
+* Output: None
+* Return: u32 Data The readback register value
+* Note: This function is equal to "GetRegSetting" in PHY programming guide
+*/
+u32 PHY_QueryBBReg_8723B(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
+{
+ u32 ReturnValue = 0, OriginalValue, BitShift;
+
+#if (DISABLE_BB_RF == 1)
+ return 0;
+#endif
+
+ /* RT_TRACE(COMP_RF, DBG_TRACE, ("--->PHY_QueryBBReg(): RegAddr(%#lx), BitMask(%#lx)\n", RegAddr, BitMask)); */
+
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ ReturnValue = (OriginalValue & BitMask) >> BitShift;
+
+ return ReturnValue;
+
+}
+
+
+/**
+* Function: PHY_SetBBReg
+*
+* OverView: Write "Specific bits" to BB register (page 8~)
+*
+* Input:
+* struct adapter * Adapter,
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register value in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRegSetting" in PHY programming guide
+*/
+
+void PHY_SetBBReg_8723B(
+ struct adapter *Adapter,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+)
+{
+ /* u16 BBWaitCounter = 0; */
+ u32 OriginalValue, BitShift;
+
+#if (DISABLE_BB_RF == 1)
+ return;
+#endif
+
+ /* RT_TRACE(COMP_RF, DBG_TRACE, ("--->PHY_SetBBReg(): RegAddr(%#lx), BitMask(%#lx), Data(%#lx)\n", RegAddr, BitMask, Data)); */
+
+ if (BitMask != bMaskDWord) { /* if not "double word" write */
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((OriginalValue & (~BitMask)) | ((Data << BitShift) & BitMask));
+ }
+
+ rtw_write32(Adapter, RegAddr, Data);
+
+}
+
+
+/* */
+/* 2. RF register R/W API */
+/* */
+
+static u32 phy_RFSerialRead_8723B(
+ struct adapter *Adapter, enum RF_PATH eRFPath, u32 Offset
+)
+{
+ u32 retValue = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_register_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+ u32 tmplong2;
+ u8 RfPiEnable = 0;
+ u32 MaskforPhySet = 0;
+ int i = 0;
+
+ /* */
+ /* Make sure RF register offset is correct */
+ /* */
+ Offset &= 0xff;
+
+ NewOffset = Offset;
+
+ if (eRFPath == RF_PATH_A) {
+ tmplong2 = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord);;
+ tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord, tmplong2&(~bLSSIReadEdge));
+ } else {
+ tmplong2 = PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter2|MaskforPhySet, bMaskDWord);
+ tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
+ PHY_SetBBReg(Adapter, rFPGA0_XB_HSSIParameter2|MaskforPhySet, bMaskDWord, tmplong2&(~bLSSIReadEdge));
+ }
+
+ tmplong2 = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord);
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord, tmplong2 & (~bLSSIReadEdge));
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2|MaskforPhySet, bMaskDWord, tmplong2 | bLSSIReadEdge);
+
+ udelay(10);
+
+ for (i = 0; i < 2; i++)
+ udelay(MAX_STALL_TIME);
+ udelay(10);
+
+ if (eRFPath == RF_PATH_A)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1|MaskforPhySet, BIT8);
+ else if (eRFPath == RF_PATH_B)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1|MaskforPhySet, BIT8);
+
+ if (RfPiEnable) {
+ /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi|MaskforPhySet, bLSSIReadBackData);
+
+ /* RT_DISP(FINIT, INIT_RF, ("Readback from RF-PI : 0x%x\n", retValue)); */
+ } else {
+ /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack|MaskforPhySet, bLSSIReadBackData);
+
+ /* RT_DISP(FINIT, INIT_RF, ("Readback from RF-SI : 0x%x\n", retValue)); */
+ }
+ return retValue;
+
+}
+
+/**
+* Function: phy_RFSerialWrite_8723B
+*
+* OverView: Write data to RF register (page 8~)
+*
+* Input:
+* struct adapter * Adapter,
+* RF_PATH eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+* u32 Data The new register Data in the target bit position
+* of the target to be read
+*
+* Output: None
+* Return: None
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+ *
+ * Note: For RF8256 only
+ * The total count of RTL8256(Zebra4) register is around 36 bit it only employs
+ * 4-bit RF address. RTL8256 uses "register mode control bit" (Reg00[12], Reg00[10])
+ * to access register address bigger than 0xf. See "Appendix-4 in PHY Configuration
+ * programming guide" for more details.
+ * Thus, we define a sub-finction for RTL8526 register address conversion
+ * ===========================================================
+ * Register Mode RegCTL[1] RegCTL[0] Note
+ * (Reg00[12]) (Reg00[10])
+ * ===========================================================
+ * Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ *
+ *2008/09/02 MH Add 92S RF definition
+ *
+ *
+ *
+*/
+static void phy_RFSerialWrite_8723B(
+ struct adapter *Adapter,
+ enum RF_PATH eRFPath,
+ u32 Offset,
+ u32 Data
+)
+{
+ u32 DataAndAddr = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_register_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+
+ Offset &= 0xff;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* */
+ /* Put write addr in [5:0] and write data in [31:16] */
+ /* */
+ /* DataAndAddr = (Data<<16) | (NewOffset&0x3f); */
+ DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff; /* T65 RF */
+
+ /* */
+ /* Write Operation */
+ /* */
+ PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+ /* RTPRINT(FPHY, PHY_RFW, ("RFW-%d Addr[0x%lx]= 0x%lx\n", eRFPath, pPhyReg->rf3wireOffset, DataAndAddr)); */
+
+}
+
+
+/**
+* Function: PHY_QueryRFReg
+*
+* OverView: Query "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter * Adapter,
+* RF_PATH eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be read
+* u32 BitMask The target bit position in the target address
+* to be read
+*
+* Output: None
+* Return: u32 Readback value
+* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
+*/
+u32 PHY_QueryRFReg_8723B(
+ struct adapter *Adapter,
+ u8 eRFPath,
+ u32 RegAddr,
+ u32 BitMask
+)
+{
+ u32 Original_Value, Readback_Value, BitShift;
+
+#if (DISABLE_BB_RF == 1)
+ return 0;
+#endif
+
+ Original_Value = phy_RFSerialRead_8723B(Adapter, eRFPath, RegAddr);
+
+ BitShift = phy_CalculateBitShift(BitMask);
+ Readback_Value = (Original_Value & BitMask) >> BitShift;
+
+ return Readback_Value;
+}
+
+/**
+* Function: PHY_SetRFReg
+*
+* OverView: Write "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter * Adapter,
+* RF_PATH eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register Data in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
+*/
+void PHY_SetRFReg_8723B(
+ struct adapter *Adapter,
+ u8 eRFPath,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+)
+{
+ u32 Original_Value, BitShift;
+
+#if (DISABLE_BB_RF == 1)
+ return;
+#endif
+
+ /* RF data is 12 bits only */
+ if (BitMask != bRFRegOffsetMask) {
+ Original_Value = phy_RFSerialRead_8723B(Adapter, eRFPath, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((Original_Value & (~BitMask)) | (Data<<BitShift));
+ }
+
+ phy_RFSerialWrite_8723B(Adapter, eRFPath, RegAddr, Data);
+}
+
+
+/* */
+/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
+/* */
+
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_MACConfig8192C
+ *
+ * Overview: Condig MAC by header file or parameter file.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------
+ */
+s32 PHY_MACConfig8723B(struct adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ s8 *pszMACRegFile;
+ s8 sz8723MACRegFile[] = RTL8723B_PHY_MACREG;
+
+
+ pszMACRegFile = sz8723MACRegFile;
+
+ /* */
+ /* Config MAC */
+ /* */
+ rtStatus = phy_ConfigMACWithParaFile(Adapter, pszMACRegFile);
+ if (rtStatus == _FAIL)
+ {
+ ODM_ConfigMACWithHeaderFile(&pHalData->odmpriv);
+ rtStatus = _SUCCESS;
+ }
+
+ return rtStatus;
+}
+
+/**
+* Function: phy_InitBBRFRegisterDefinition
+*
+* OverView: Initialize Register definition offset for Radio Path A/B/C/D
+*
+* Input:
+* struct adapter * Adapter,
+*
+* Output: None
+* Return: None
+* Note: The initialization value is constant and it should never be changes
+*/
+static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ /* RF Interface Sowrtware Control */
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+
+ /* RF Interface Output (and Enable) */
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x864 */
+
+ /* RF Interface (Output and) Enable */
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; /* LSSI Parameter */
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; /* wire control parameter2 */
+
+ /* Tranceiver Readback LSSI/HSPI mode */
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+ pHalData->PHYRegDef[ODM_RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
+ pHalData->PHYRegDef[ODM_RF_PATH_B].rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
+
+}
+
+static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+ u8 sz8723BBRegFile[] = RTL8723B_PHY_REG;
+ u8 sz8723AGCTableFile[] = RTL8723B_AGC_TAB;
+ u8 sz8723BBBRegPgFile[] = RTL8723B_PHY_REG_PG;
+ u8 sz8723BBRegMpFile[] = RTL8723B_PHY_REG_MP;
+ u8 sz8723BRFTxPwrLmtFile[] = RTL8723B_TXPWR_LMT;
+ u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszBBRegMpFile = NULL, *pszRFTxPwrLmtFile = NULL;
+
+ pszBBRegFile = sz8723BBRegFile;
+ pszAGCTableFile = sz8723AGCTableFile;
+ pszBBRegPgFile = sz8723BBBRegPgFile;
+ pszBBRegMpFile = sz8723BBRegMpFile;
+ pszRFTxPwrLmtFile = sz8723BRFTxPwrLmtFile;
+
+ /* Read Tx Power Limit File */
+ PHY_InitTxPowerLimit(Adapter);
+ if (
+ Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
+ (Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
+ ) {
+ if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL)
+ {
+ if (HAL_STATUS_SUCCESS != ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_TXPWR_LMT, (ODM_RF_RADIO_PATH_E)0))
+ rtStatus = _FAIL;
+ }
+
+ if (rtStatus != _SUCCESS) {
+ DBG_871X("%s():Read Tx power limit fail\n", __func__);
+ goto phy_BB8190_Config_ParaFile_Fail;
+ }
+ }
+
+ /* */
+ /* 1. Read PHY_REG.TXT BB INIT!! */
+ /* */
+ if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) == _FAIL)
+ {
+ if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
+ rtStatus = _FAIL;
+ }
+
+ if (rtStatus != _SUCCESS) {
+ DBG_8192C("%s():Write BB Reg Fail!!", __func__);
+ goto phy_BB8190_Config_ParaFile_Fail;
+ }
+
+ /* If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
+ PHY_InitTxPowerByRate(Adapter);
+ if (
+ Adapter->registrypriv.RegEnableTxPowerByRate == 1 ||
+ (Adapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory != 2)
+ ) {
+ if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) == _FAIL)
+ {
+ if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
+ rtStatus = _FAIL;
+ }
+
+ if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE)
+ PHY_TxPowerByRateConfiguration(Adapter);
+
+ if (
+ Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
+ (Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
+ )
+ PHY_ConvertTxPowerLimitToPowerIndex(Adapter);
+
+ if (rtStatus != _SUCCESS) {
+ DBG_8192C("%s():BB_PG Reg Fail!!\n", __func__);
+ }
+ }
+
+ /* */
+ /* 2. Read BB AGC table Initialization */
+ /* */
+ if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile, CONFIG_BB_AGC_TAB) == _FAIL)
+ {
+ if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
+ rtStatus = _FAIL;
+ }
+
+ if (rtStatus != _SUCCESS) {
+ DBG_8192C("%s():AGC Table Fail\n", __func__);
+ goto phy_BB8190_Config_ParaFile_Fail;
+ }
+
+phy_BB8190_Config_ParaFile_Fail:
+
+ return rtStatus;
+}
+
+
+int PHY_BBConfig8723B(struct adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u32 RegVal;
+ u8 CrystalCap;
+
+ phy_InitBBRFRegisterDefinition(Adapter);
+
+ /* Enable BB and RF */
+ RegVal = rtw_read16(Adapter, REG_SYS_FUNC_EN);
+ rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal|BIT13|BIT0|BIT1));
+
+ rtw_write32(Adapter, 0x948, 0x280); /* Others use Antenna S1 */
+
+ rtw_write8(Adapter, REG_RF_CTRL, RF_EN|RF_RSTB|RF_SDMRSTB);
+
+ msleep(1);
+
+ PHY_SetRFReg(Adapter, ODM_RF_PATH_A, 0x1, 0xfffff, 0x780);
+
+ rtw_write8(Adapter, REG_SYS_FUNC_EN, FEN_PPLL|FEN_PCIEA|FEN_DIO_PCIE|FEN_BB_GLB_RSTn|FEN_BBRSTB);
+
+ rtw_write8(Adapter, REG_AFE_XTAL_CTRL+1, 0x80);
+
+ /* */
+ /* Config BB and AGC */
+ /* */
+ rtStatus = phy_BB8723b_Config_ParaFile(Adapter);
+
+ /* 0x2C[23:18] = 0x2C[17:12] = CrystalCap */
+ CrystalCap = pHalData->CrystalCap & 0x3F;
+ PHY_SetBBReg(Adapter, REG_MAC_PHY_CTRL, 0xFFF000, (CrystalCap | (CrystalCap << 6)));
+
+ return rtStatus;
+}
+
+static void phy_LCK_8723B(struct adapter *Adapter)
+{
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0xB0, bRFRegOffsetMask, 0xDFBE0);
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, 0x8C01);
+ mdelay(200);
+ PHY_SetRFReg(Adapter, RF_PATH_A, 0xB0, bRFRegOffsetMask, 0xDFFE0);
+}
+
+int PHY_RFConfig8723B(struct adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* RF config */
+ /* */
+ rtStatus = PHY_RF6052_Config8723B(Adapter);
+
+ phy_LCK_8723B(Adapter);
+ /* PHY_BB8723B_Config_1T(Adapter); */
+
+ return rtStatus;
+}
+
+/**************************************************************************************************************
+ * Description:
+ * The low-level interface to set TxAGC , called by both MP and Normal Driver.
+ *
+ * <20120830, Kordan>
+ **************************************************************************************************************/
+
+void PHY_SetTxPowerIndex_8723B(
+ struct adapter *Adapter,
+ u32 PowerIndex,
+ u8 RFPath,
+ u8 Rate
+)
+{
+ if (RFPath == ODM_RF_PATH_A || RFPath == ODM_RF_PATH_B) {
+ switch (Rate) {
+ case MGN_1M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, PowerIndex);
+ break;
+ case MGN_2M:
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte1, PowerIndex);
+ break;
+ case MGN_5_5M:
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte2, PowerIndex);
+ break;
+ case MGN_11M:
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte3, PowerIndex);
+ break;
+
+ case MGN_6M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate18_06, bMaskByte0, PowerIndex);
+ break;
+ case MGN_9M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate18_06, bMaskByte1, PowerIndex);
+ break;
+ case MGN_12M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate18_06, bMaskByte2, PowerIndex);
+ break;
+ case MGN_18M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate18_06, bMaskByte3, PowerIndex);
+ break;
+
+ case MGN_24M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate54_24, bMaskByte0, PowerIndex);
+ break;
+ case MGN_36M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate54_24, bMaskByte1, PowerIndex);
+ break;
+ case MGN_48M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate54_24, bMaskByte2, PowerIndex);
+ break;
+ case MGN_54M:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Rate54_24, bMaskByte3, PowerIndex);
+ break;
+
+ case MGN_MCS0:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs03_Mcs00, bMaskByte0, PowerIndex);
+ break;
+ case MGN_MCS1:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs03_Mcs00, bMaskByte1, PowerIndex);
+ break;
+ case MGN_MCS2:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs03_Mcs00, bMaskByte2, PowerIndex);
+ break;
+ case MGN_MCS3:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs03_Mcs00, bMaskByte3, PowerIndex);
+ break;
+
+ case MGN_MCS4:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs07_Mcs04, bMaskByte0, PowerIndex);
+ break;
+ case MGN_MCS5:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs07_Mcs04, bMaskByte1, PowerIndex);
+ break;
+ case MGN_MCS6:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs07_Mcs04, bMaskByte2, PowerIndex);
+ break;
+ case MGN_MCS7:
+ PHY_SetBBReg(Adapter, rTxAGC_A_Mcs07_Mcs04, bMaskByte3, PowerIndex);
+ break;
+
+ default:
+ DBG_871X("Invalid Rate!!\n");
+ break;
+ }
+ } else {
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Invalid RFPath!!\n"));
+ }
+}
+
+u8 PHY_GetTxPowerIndex_8723B(
+ struct adapter *padapter,
+ u8 RFPath,
+ u8 Rate,
+ enum CHANNEL_WIDTH BandWidth,
+ u8 Channel
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ s8 txPower = 0, powerDiffByRate = 0, limit = 0;
+ bool bIn24G = false;
+
+ /* DBG_871X("===>%s\n", __func__); */
+
+ txPower = (s8) PHY_GetTxPowerIndexBase(padapter, RFPath, Rate, BandWidth, Channel, &bIn24G);
+ powerDiffByRate = PHY_GetTxPowerByRate(padapter, BAND_ON_2_4G, ODM_RF_PATH_A, RF_1TX, Rate);
+
+ limit = PHY_GetTxPowerLimit(
+ padapter,
+ padapter->registrypriv.RegPwrTblSel,
+ (u8)(!bIn24G),
+ pHalData->CurrentChannelBW,
+ RFPath,
+ Rate,
+ pHalData->CurrentChannel
+ );
+
+ powerDiffByRate = powerDiffByRate > limit ? limit : powerDiffByRate;
+ txPower += powerDiffByRate;
+
+ txPower += PHY_GetTxPowerTrackingOffset(padapter, RFPath, Rate);
+
+ if (txPower > MAX_POWER_INDEX)
+ txPower = MAX_POWER_INDEX;
+
+ /* DBG_871X("Final Tx Power(RF-%c, Channel: %d) = %d(0x%X)\n", ((RFPath == 0)?'A':'B'), Channel, txPower, txPower)); */
+ return (u8) txPower;
+}
+
+void PHY_SetTxPowerLevel8723B(struct adapter *Adapter, u8 Channel)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ pFAT_T pDM_FatTable = &pDM_Odm->DM_FatTable;
+ u8 RFPath = ODM_RF_PATH_A;
+
+ if (pHalData->AntDivCfg) {/* antenna diversity Enable */
+ RFPath = ((pDM_FatTable->RxIdleAnt == MAIN_ANT) ? ODM_RF_PATH_A : ODM_RF_PATH_B);
+ } else { /* antenna diversity disable */
+ RFPath = pHalData->ant_path;
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("==>PHY_SetTxPowerLevel8723B()\n"));
+
+ PHY_SetTxPowerLevelByPath(Adapter, Channel, RFPath);
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("<==PHY_SetTxPowerLevel8723B()\n"));
+}
+
+void PHY_GetTxPowerLevel8723B(struct adapter *Adapter, s32 *powerlevel)
+{
+}
+
+static void phy_SetRegBW_8723B(
+ struct adapter *Adapter, enum CHANNEL_WIDTH CurrentBW
+)
+{
+ u16 RegRfMod_BW, u2tmp = 0;
+ RegRfMod_BW = rtw_read16(Adapter, REG_TRXPTCL_CTL_8723B);
+
+ switch (CurrentBW) {
+ case CHANNEL_WIDTH_20:
+ rtw_write16(Adapter, REG_TRXPTCL_CTL_8723B, (RegRfMod_BW & 0xFE7F)); /* BIT 7 = 0, BIT 8 = 0 */
+ break;
+
+ case CHANNEL_WIDTH_40:
+ u2tmp = RegRfMod_BW | BIT7;
+ rtw_write16(Adapter, REG_TRXPTCL_CTL_8723B, (u2tmp & 0xFEFF)); /* BIT 7 = 1, BIT 8 = 0 */
+ break;
+
+ case CHANNEL_WIDTH_80:
+ u2tmp = RegRfMod_BW | BIT8;
+ rtw_write16(Adapter, REG_TRXPTCL_CTL_8723B, (u2tmp & 0xFF7F)); /* BIT 7 = 0, BIT 8 = 1 */
+ break;
+
+ default:
+ DBG_871X("phy_PostSetBWMode8723B(): unknown Bandwidth: %#X\n", CurrentBW);
+ break;
+ }
+}
+
+static u8 phy_GetSecondaryChnl_8723B(struct adapter *Adapter)
+{
+ u8 SCSettingOf40 = 0, SCSettingOf20 = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ (
+ "SCMapping: VHT Case: pHalData->CurrentChannelBW %d, pHalData->nCur80MhzPrimeSC %d, pHalData->nCur40MhzPrimeSC %d\n",
+ pHalData->CurrentChannelBW,
+ pHalData->nCur80MhzPrimeSC,
+ pHalData->nCur40MhzPrimeSC
+ )
+ );
+ if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_80) {
+ if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
+ SCSettingOf40 = VHT_DATA_SC_40_LOWER_OF_80MHZ;
+ else if (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
+ SCSettingOf40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
+ else
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SCMapping: Not Correct Primary40MHz Setting\n"));
+
+ if (
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) &&
+ (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
+ )
+ SCSettingOf20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
+ else if (
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) &&
+ (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
+ )
+ SCSettingOf20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else if (
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) &&
+ (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
+ )
+ SCSettingOf20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if (
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) &&
+ (pHalData->nCur80MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
+ )
+ SCSettingOf20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
+ else
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SCMapping: Not Correct Primary40MHz Setting\n"));
+ } else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40) {
+ RT_TRACE(
+ _module_hal_init_c_,
+ _drv_info_,
+ (
+ "SCMapping: VHT Case: pHalData->CurrentChannelBW %d, pHalData->nCur40MhzPrimeSC %d\n",
+ pHalData->CurrentChannelBW,
+ pHalData->nCur40MhzPrimeSC
+ )
+ );
+
+ if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER)
+ SCSettingOf20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
+ else if (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER)
+ SCSettingOf20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
+ else
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("SCMapping: Not Correct Primary40MHz Setting\n"));
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("SCMapping: SC Value %x\n", ((SCSettingOf40 << 4) | SCSettingOf20)));
+ return ((SCSettingOf40 << 4) | SCSettingOf20);
+}
+
+static void phy_PostSetBwMode8723B(struct adapter *Adapter)
+{
+ u8 SubChnlNum = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+
+ /* 3 Set Reg668 Reg440 BW */
+ phy_SetRegBW_8723B(Adapter, pHalData->CurrentChannelBW);
+
+ /* 3 Set Reg483 */
+ SubChnlNum = phy_GetSecondaryChnl_8723B(Adapter);
+ rtw_write8(Adapter, REG_DATA_SC_8723B, SubChnlNum);
+
+ /* 3 */
+ /* 3<2>Set PHY related register */
+ /* 3 */
+ switch (pHalData->CurrentChannelBW) {
+ /* 20 MHz channel*/
+ case CHANNEL_WIDTH_20:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+
+/* PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 1); */
+
+ PHY_SetBBReg(Adapter, rOFDM0_TxPseudoNoiseWgt, (BIT31|BIT30), 0x0);
+ break;
+
+ /* 40 MHz channel*/
+ case CHANNEL_WIDTH_40:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+
+ /* Set Control channel to upper or lower. These settings are required only for 40MHz */
+ PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC>>1));
+
+ PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
+
+/* PHY_SetBBReg(Adapter, rFPGA0_AnalogParameter2, BIT10, 0); */
+
+ PHY_SetBBReg(Adapter, 0x818, (BIT26|BIT27), (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+
+ break;
+
+ default:
+ /*RT_TRACE(COMP_DBG, DBG_LOUD, ("phy_SetBWMode8723B(): unknown Bandwidth: %#X\n"\
+ , pHalData->CurrentChannelBW));*/
+ break;
+ }
+
+ /* 3<3>Set RF related register */
+ PHY_RF6052SetBandwidth8723B(Adapter, pHalData->CurrentChannelBW);
+}
+
+static void phy_SwChnl8723B(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ u8 channelToSW = pHalData->CurrentChannel;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N) {
+ /* RT_TRACE(COMP_MLME, DBG_LOUD, ("phy_SwChnl8723B: return for PSEUDO\n")); */
+ return;
+ }
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff00) | channelToSW);
+ PHY_SetRFReg(padapter, ODM_RF_PATH_A, RF_CHNLBW, 0x3FF, pHalData->RfRegChnlVal[0]);
+ PHY_SetRFReg(padapter, ODM_RF_PATH_B, RF_CHNLBW, 0x3FF, pHalData->RfRegChnlVal[0]);
+
+ DBG_8192C("===>phy_SwChnl8723B: Channel = %d\n", channelToSW);
+}
+
+static void phy_SwChnlAndSetBwMode8723B(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ /* RT_TRACE(COMP_SCAN, DBG_LOUD, ("phy_SwChnlAndSetBwMode8723B(): bSwChnl %d, bSetChnlBW %d\n", pHalData->bSwChnl, pHalData->bSetChnlBW)); */
+ if (Adapter->bNotifyChannelChange) {
+ DBG_871X("[%s] bSwChnl =%d, ch =%d, bSetChnlBW =%d, bw =%d\n",
+ __func__,
+ pHalData->bSwChnl,
+ pHalData->CurrentChannel,
+ pHalData->bSetChnlBW,
+ pHalData->CurrentChannelBW);
+ }
+
+ if (Adapter->bDriverStopped || Adapter->bSurpriseRemoved)
+ return;
+
+ if (pHalData->bSwChnl) {
+ phy_SwChnl8723B(Adapter);
+ pHalData->bSwChnl = false;
+ }
+
+ if (pHalData->bSetChnlBW) {
+ phy_PostSetBwMode8723B(Adapter);
+ pHalData->bSetChnlBW = false;
+ }
+
+ PHY_SetTxPowerLevel8723B(Adapter, pHalData->CurrentChannel);
+}
+
+static void PHY_HandleSwChnlAndSetBW8723B(
+ struct adapter *Adapter,
+ bool bSwitchChannel,
+ bool bSetBandWidth,
+ u8 ChannelNum,
+ enum CHANNEL_WIDTH ChnlWidth,
+ enum EXTCHNL_OFFSET ExtChnlOffsetOf40MHz,
+ enum EXTCHNL_OFFSET ExtChnlOffsetOf80MHz,
+ u8 CenterFrequencyIndex1
+)
+{
+ /* static bool bInitialzed = false; */
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ u8 tmpChannel = pHalData->CurrentChannel;
+ enum CHANNEL_WIDTH tmpBW = pHalData->CurrentChannelBW;
+ u8 tmpnCur40MhzPrimeSC = pHalData->nCur40MhzPrimeSC;
+ u8 tmpnCur80MhzPrimeSC = pHalData->nCur80MhzPrimeSC;
+ u8 tmpCenterFrequencyIndex1 = pHalData->CurrentCenterFrequencyIndex1;
+
+ /* DBG_871X("=> PHY_HandleSwChnlAndSetBW8812: bSwitchChannel %d, bSetBandWidth %d\n", bSwitchChannel, bSetBandWidth); */
+
+ /* check is swchnl or setbw */
+ if (!bSwitchChannel && !bSetBandWidth) {
+ DBG_871X("PHY_HandleSwChnlAndSetBW8812: not switch channel and not set bandwidth\n");
+ return;
+ }
+
+ /* skip change for channel or bandwidth is the same */
+ if (bSwitchChannel) {
+ /* if (pHalData->CurrentChannel != ChannelNum) */
+ {
+ if (HAL_IsLegalChannel(Adapter, ChannelNum))
+ pHalData->bSwChnl = true;
+ }
+ }
+
+ if (bSetBandWidth)
+ pHalData->bSetChnlBW = true;
+
+ if (!pHalData->bSetChnlBW && !pHalData->bSwChnl) {
+ /* DBG_871X("<= PHY_HandleSwChnlAndSetBW8812: bSwChnl %d, bSetChnlBW %d\n", pHalData->bSwChnl, pHalData->bSetChnlBW); */
+ return;
+ }
+
+
+ if (pHalData->bSwChnl) {
+ pHalData->CurrentChannel = ChannelNum;
+ pHalData->CurrentCenterFrequencyIndex1 = ChannelNum;
+ }
+
+
+ if (pHalData->bSetChnlBW) {
+ pHalData->CurrentChannelBW = ChnlWidth;
+ pHalData->nCur40MhzPrimeSC = ExtChnlOffsetOf40MHz;
+ pHalData->nCur80MhzPrimeSC = ExtChnlOffsetOf80MHz;
+ pHalData->CurrentCenterFrequencyIndex1 = CenterFrequencyIndex1;
+ }
+
+ /* Switch workitem or set timer to do switch channel or setbandwidth operation */
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
+ phy_SwChnlAndSetBwMode8723B(Adapter);
+ } else {
+ if (pHalData->bSwChnl) {
+ pHalData->CurrentChannel = tmpChannel;
+ pHalData->CurrentCenterFrequencyIndex1 = tmpChannel;
+ }
+
+ if (pHalData->bSetChnlBW) {
+ pHalData->CurrentChannelBW = tmpBW;
+ pHalData->nCur40MhzPrimeSC = tmpnCur40MhzPrimeSC;
+ pHalData->nCur80MhzPrimeSC = tmpnCur80MhzPrimeSC;
+ pHalData->CurrentCenterFrequencyIndex1 = tmpCenterFrequencyIndex1;
+ }
+ }
+}
+
+void PHY_SetBWMode8723B(
+ struct adapter *Adapter,
+ enum CHANNEL_WIDTH Bandwidth, /* 20M or 40M */
+ unsigned char Offset /* Upper, Lower, or Don't care */
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ PHY_HandleSwChnlAndSetBW8723B(Adapter, false, true, pHalData->CurrentChannel, Bandwidth, Offset, Offset, pHalData->CurrentChannel);
+}
+
+/* Call after initialization */
+void PHY_SwChnl8723B(struct adapter *Adapter, u8 channel)
+{
+ PHY_HandleSwChnlAndSetBW8723B(Adapter, true, false, channel, 0, 0, 0, channel);
+}
+
+void PHY_SetSwChnlBWMode8723B(
+ struct adapter *Adapter,
+ u8 channel,
+ enum CHANNEL_WIDTH Bandwidth,
+ u8 Offset40,
+ u8 Offset80
+)
+{
+ /* DBG_871X("%s() ===>\n", __func__); */
+
+ PHY_HandleSwChnlAndSetBW8723B(Adapter, true, true, channel, Bandwidth, Offset40, Offset80, channel);
+
+ /* DBG_871X("<==%s()\n", __func__); */
+}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
new file mode 100644
index 000000000000..3a85d0cddfda
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -0,0 +1,224 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/******************************************************************************
+ *
+ *
+ * Module: rtl8192c_rf6052.c (Source C File)
+ *
+ * Note: Provide RF 6052 series relative API.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ *
+ * 09/25/2008 MHC Create initial version.
+ * 11/05/2008 MHC Add API for tw power setting.
+ *
+ *
+******************************************************************************/
+
+#include <rtl8723b_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+/*------------------------Define global variable-----------------------------*/
+
+
+/*------------------------Define local variable------------------------------*/
+/* 2008/11/20 MH For Debug only, RF */
+/*------------------------Define local variable------------------------------*/
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetBandwidth()
+ *
+ * Overview: This function is called by SetBWModeCallback8190Pci() only
+ *
+ * Input: struct adapter * Adapter
+ * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: For RF type 0222D
+ *---------------------------------------------------------------------------*/
+void PHY_RF6052SetBandwidth8723B(
+ struct adapter *Adapter, enum CHANNEL_WIDTH Bandwidth
+) /* 20M or 40M */
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (Bandwidth) {
+ case CHANNEL_WIDTH_20:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT10 | BIT11);
+ PHY_SetRFReg(Adapter, ODM_RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ PHY_SetRFReg(Adapter, ODM_RF_PATH_B, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+
+ case CHANNEL_WIDTH_40:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT10);
+ PHY_SetRFReg(Adapter, ODM_RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ PHY_SetRFReg(Adapter, ODM_RF_PATH_B, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+
+ default:
+ /* RT_TRACE(COMP_DBG, DBG_LOUD, ("PHY_SetRF8225Bandwidth(): unknown Bandwidth: %#X\n", Bandwidth)); */
+ break;
+ }
+
+}
+
+static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
+{
+ u32 u4RegValue = 0;
+ u8 eRFPath;
+ struct bb_register_def *pPhyReg;
+
+ int rtStatus = _SUCCESS;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ static char sz8723RadioAFile[] = RTL8723B_PHY_RADIO_A;
+ static char sz8723RadioBFile[] = RTL8723B_PHY_RADIO_B;
+ static s8 sz8723BTxPwrTrackFile[] = RTL8723B_TXPWR_TRACK;
+ char *pszRadioAFile, *pszRadioBFile, *pszTxPwrTrackFile;
+
+ pszRadioAFile = sz8723RadioAFile;
+ pszRadioBFile = sz8723RadioBFile;
+ pszTxPwrTrackFile = sz8723BTxPwrTrackFile;
+
+ /* 3----------------------------------------------------------------- */
+ /* 3 <2> Initialize RF */
+ /* 3----------------------------------------------------------------- */
+ /* for (eRFPath = RF_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+
+ pPhyReg = &pHalData->PHYRegDef[eRFPath];
+
+ /*----Store original RFENV control type----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ break;
+ }
+
+ /*----Set RF_ENV enable----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Set RF_ENV output high----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /* Set bit number of Address and Data for RF register */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Initialize RF fom connfiguration file----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile, eRFPath) == _FAIL)
+ {
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
+ rtStatus = _FAIL;
+ }
+ break;
+ case RF_PATH_B:
+ if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile, eRFPath) == _FAIL)
+ {
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
+ rtStatus = _FAIL;
+ }
+ break;
+ case RF_PATH_C:
+ break;
+ case RF_PATH_D:
+ break;
+ }
+
+ /*----Restore RFENV control type----*/;
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ break;
+ }
+
+ if (rtStatus != _SUCCESS) {
+ /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
+ goto phy_RF6052_Config_ParaFile_Fail;
+ }
+
+ }
+
+ /* 3 ----------------------------------------------------------------- */
+ /* 3 Configuration of Tx Power Tracking */
+ /* 3 ----------------------------------------------------------------- */
+
+ if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) == _FAIL)
+ {
+ ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
+ }
+
+ /* RT_TRACE(COMP_INIT, DBG_LOUD, ("<---phy_RF6052_Config_ParaFile()\n")); */
+ return rtStatus;
+
+phy_RF6052_Config_ParaFile_Fail:
+ return rtStatus;
+}
+
+
+int PHY_RF6052_Config8723B(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* Initialize general global value */
+ /* */
+ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->NumTotalRFPath = 1;
+ else
+ pHalData->NumTotalRFPath = 2;
+
+ /* */
+ /* Config BB and RF */
+ /* */
+ rtStatus = phy_RF6052_Config_ParaFile(Adapter);
+ return rtStatus;
+
+}
+
+/* End of HalRf6052.c */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
new file mode 100644
index 000000000000..92e5a0e7aa59
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c
@@ -0,0 +1,86 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723B_REDESC_C_
+
+#include <rtl8723b_hal.h>
+
+static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
+
+ /* DBG_8192C("process_rssi => pattrib->rssil(%d) signal_strength(%d)\n ", pattrib->RecvSignalPower, pattrib->signal_strength); */
+ /* if (pRfd->Status.bPacketToSelf || pRfd->Status.bPacketBeacon) */
+ {
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalStrength;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+ }
+
+} /* Process_UI_RSSI_8192C */
+
+static void process_link_qual(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct signal_stat *signal_stat;
+
+ if (prframe == NULL || padapter == NULL)
+ return;
+
+ pattrib = &prframe->u.hdr.attrib;
+ signal_stat = &padapter->recvpriv.signal_qual_data;
+
+ /* DBG_8192C("process_link_qual => pattrib->signal_qual(%d)\n ", pattrib->signal_qual); */
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalQuality;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+} /* Process_UiLinkQuality8192S */
+
+
+void rtl8723b_process_phy_info(struct adapter *padapter, void *prframe)
+{
+ union recv_frame *precvframe = (union recv_frame *)prframe;
+ /* */
+ /* Check RSSI */
+ /* */
+ process_rssi(padapter, precvframe);
+ /* */
+ /* Check PWDB. */
+ /* */
+ /* process_PWDB(padapter, precvframe); */
+
+ /* UpdateRxSignalStatistics8192C(Adapter, pRfd); */
+ /* */
+ /* Check EVM */
+ /* */
+ process_link_qual(padapter, precvframe);
+ #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+ rtw_store_phy_info(padapter, prframe);
+ #endif
+
+}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
new file mode 100644
index 000000000000..b002eb446b2c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -0,0 +1,490 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723BS_RECV_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+
+static s32 initrecvbuf(struct recv_buf *precvbuf, struct adapter *padapter)
+{
+ INIT_LIST_HEAD(&precvbuf->list);
+ spin_lock_init(&precvbuf->recvbuf_lock);
+
+ precvbuf->adapter = padapter;
+
+ return _SUCCESS;
+}
+
+static void update_recvframe_attrib(
+ struct adapter *padapter, union recv_frame *precvframe, struct recv_stat *prxstat
+)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct recv_stat report;
+ PRXREPORT prxreport = (PRXREPORT)&report;
+
+ report.rxdw0 = prxstat->rxdw0;
+ report.rxdw1 = prxstat->rxdw1;
+ report.rxdw2 = prxstat->rxdw2;
+ report.rxdw3 = prxstat->rxdw3;
+ report.rxdw4 = prxstat->rxdw4;
+ report.rxdw5 = prxstat->rxdw5;
+
+ pattrib = &precvframe->u.hdr.attrib;
+ memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
+
+ /* update rx report to recv_frame attribute */
+ pattrib->pkt_rpt_type = prxreport->c2h_ind?C2H_PACKET:NORMAL_RX;
+/* DBG_871X("%s: pkt_rpt_type =%d\n", __func__, pattrib->pkt_rpt_type); */
+
+ if (pattrib->pkt_rpt_type == NORMAL_RX) {
+ /* Normal rx packet */
+ /* update rx report to recv_frame attribute */
+ pattrib->pkt_len = (u16)prxreport->pktlen;
+ pattrib->drvinfo_sz = (u8)(prxreport->drvinfosize << 3);
+ pattrib->physt = (u8)prxreport->physt;
+
+ pattrib->crc_err = (u8)prxreport->crc32;
+ pattrib->icv_err = (u8)prxreport->icverr;
+
+ pattrib->bdecrypted = (u8)(prxreport->swdec ? 0 : 1);
+ pattrib->encrypt = (u8)prxreport->security;
+
+ pattrib->qos = (u8)prxreport->qos;
+ pattrib->priority = (u8)prxreport->tid;
+
+ pattrib->amsdu = (u8)prxreport->amsdu;
+
+ pattrib->seq_num = (u16)prxreport->seq;
+ pattrib->frag_num = (u8)prxreport->frag;
+ pattrib->mfrag = (u8)prxreport->mf;
+ pattrib->mdata = (u8)prxreport->md;
+
+ pattrib->data_rate = (u8)prxreport->rx_rate;
+ } else
+ pattrib->pkt_len = (u16)prxreport->pktlen;
+}
+
+/*
+ * Notice:
+ *Before calling this function,
+ *precvframe->u.hdr.rx_data should be ready!
+ */
+static void update_recvframe_phyinfo(
+ union recv_frame *precvframe, struct phy_stat *pphy_status
+)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PODM_PHY_INFO_T pPHYInfo = (PODM_PHY_INFO_T)(&pattrib->phy_info);
+
+ u8 *wlanhdr;
+ ODM_PACKET_INFO_T pkt_info;
+ u8 *sa = NULL;
+ /* _irqL irqL; */
+ struct sta_priv *pstapriv;
+ struct sta_info *psta;
+
+ pkt_info.bPacketMatchBSSID = false;
+ pkt_info.bPacketToSelf = false;
+ pkt_info.bPacketBeacon = false;
+
+
+ wlanhdr = get_recvframe_data(precvframe);
+
+ pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+ !pattrib->icv_err && !pattrib->crc_err &&
+ !memcmp(get_hdr_bssid(wlanhdr), get_bssid(&padapter->mlmepriv), ETH_ALEN));
+
+ pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID && (!memcmp(get_ra(wlanhdr), myid(&padapter->eeprompriv), ETH_ALEN));
+
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID && (GetFrameSubType(wlanhdr) == WIFI_BEACON);
+
+ sa = get_ta(wlanhdr);
+
+ pkt_info.StationID = 0xFF;
+
+ pstapriv = &padapter->stapriv;
+ psta = rtw_get_stainfo(pstapriv, sa);
+ if (psta) {
+ pkt_info.StationID = psta->mac_id;
+ /* DBG_8192C("%s ==> StationID(%d)\n", __func__, pkt_info.StationID); */
+ }
+ pkt_info.DataRate = pattrib->data_rate;
+
+ /* rtl8723b_query_rx_phy_status(precvframe, pphy_status); */
+ /* spin_lock_bh(&pHalData->odm_stainfo_lock); */
+ ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
+ if (psta)
+ psta->rssi = pattrib->phy_info.RecvSignalPower;
+ /* spin_unlock_bh(&pHalData->odm_stainfo_lock); */
+ precvframe->u.hdr.psta = NULL;
+ if (
+ pkt_info.bPacketMatchBSSID &&
+ (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true)
+ ) {
+ if (psta) {
+ precvframe->u.hdr.psta = psta;
+ rtl8723b_process_phy_info(padapter, precvframe);
+ }
+ } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE) == true)
+ if (psta)
+ precvframe->u.hdr.psta = psta;
+ rtl8723b_process_phy_info(padapter, precvframe);
+ }
+}
+
+static void rtl8723bs_c2h_packet_handler(struct adapter *padapter, u8 *pbuf, u16 length)
+{
+ u8 *tmpBuf = NULL;
+ u8 res = false;
+
+ if (length == 0)
+ return;
+
+ /* DBG_871X("+%s() length =%d\n", __func__, length); */
+
+ tmpBuf = rtw_zmalloc(length);
+ if (tmpBuf == NULL)
+ return;
+
+ memcpy(tmpBuf, pbuf, length);
+
+ res = rtw_c2h_packet_wk_cmd(padapter, tmpBuf, length);
+
+ if (res == false)
+ kfree(tmpBuf);
+
+ /* DBG_871X("-%s res(%d)\n", __func__, res); */
+
+ return;
+}
+
+static void rtl8723bs_recv_tasklet(void *priv)
+{
+ struct adapter *padapter;
+ struct hal_com_data *pHalData;
+ struct recv_priv *precvpriv;
+ struct recv_buf *precvbuf;
+ union recv_frame *precvframe;
+ struct rx_pkt_attrib *pattrib;
+ u8 *ptr;
+ u32 pkt_offset, skb_len, alloc_sz;
+ _pkt *pkt_copy = NULL;
+ u8 shift_sz = 0, rx_report_sz = 0;
+
+
+ padapter = (struct adapter *)priv;
+ pHalData = GET_HAL_DATA(padapter);
+ precvpriv = &padapter->recvpriv;
+
+ do {
+ precvbuf = rtw_dequeue_recvbuf(&precvpriv->recv_buf_pending_queue);
+ if (NULL == precvbuf)
+ break;
+
+ ptr = precvbuf->pdata;
+
+ while (ptr < precvbuf->ptail) {
+ precvframe = rtw_alloc_recvframe(&precvpriv->free_recv_queue);
+ if (precvframe == NULL) {
+ DBG_8192C("%s: no enough recv frame!\n", __func__);
+ rtw_enqueue_recvbuf_to_head(precvbuf, &precvpriv->recv_buf_pending_queue);
+
+ /* The case of can't allocte recvframe should be temporary, */
+ /* schedule again and hope recvframe is available next time. */
+ tasklet_schedule(&precvpriv->recv_tasklet);
+ return;
+ }
+
+ /* rx desc parsing */
+ update_recvframe_attrib(padapter, precvframe, (struct recv_stat *)ptr);
+
+ pattrib = &precvframe->u.hdr.attrib;
+
+ /* fix Hardware RX data error, drop whole recv_buffer */
+ if ((!(pHalData->ReceiveConfig & RCR_ACRC32)) && pattrib->crc_err) {
+ DBG_8192C("%s()-%d: RX Warning! rx CRC ERROR !!\n", __func__, __LINE__);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
+ }
+
+ rx_report_sz = RXDESC_SIZE + pattrib->drvinfo_sz;
+ pkt_offset = rx_report_sz + pattrib->shift_sz + pattrib->pkt_len;
+
+ if ((ptr + pkt_offset) > precvbuf->ptail) {
+ DBG_8192C("%s()-%d: : next pkt len(%p,%d) exceed ptail(%p)!\n", __func__, __LINE__, ptr, pkt_offset, precvbuf->ptail);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
+ }
+
+ if ((pattrib->crc_err) || (pattrib->icv_err)) {
+ {
+ DBG_8192C("%s: crc_err =%d icv_err =%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
+ }
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ } else {
+ /* Modified by Albert 20101213 */
+ /* For 8 bytes IP header alignment. */
+ if (pattrib->qos) /* Qos data, wireless lan header length is 26 */
+ shift_sz = 6;
+ else
+ shift_sz = 0;
+
+ skb_len = pattrib->pkt_len;
+
+ /* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
+ /* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ if (skb_len <= 1650)
+ alloc_sz = 1664;
+ else
+ alloc_sz = skb_len + 14;
+ } else {
+ alloc_sz = skb_len;
+ /* 6 is for IP header 8 bytes alignment in QoS packet case. */
+ /* 8 is for skb->data 4 bytes alignment. */
+ alloc_sz += 14;
+ }
+
+ pkt_copy = rtw_skb_alloc(alloc_sz);
+
+ if (pkt_copy) {
+ pkt_copy->dev = padapter->pnetdev;
+ precvframe->u.hdr.pkt = pkt_copy;
+ skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
+ memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
+ precvframe->u.hdr.rx_head = pkt_copy->head;
+ precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
+ } else {
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ DBG_8192C("%s: alloc_skb fail, drop frag frame\n", __func__);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
+ }
+
+ precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
+ if (precvframe->u.hdr.pkt) {
+ _pkt *pkt_clone = precvframe->u.hdr.pkt;
+
+ pkt_clone->data = ptr + rx_report_sz + pattrib->shift_sz;
+ skb_reset_tail_pointer(pkt_clone);
+ precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
+ = pkt_clone->data;
+ precvframe->u.hdr.rx_end = pkt_clone->data + skb_len;
+ } else {
+ DBG_8192C("%s: rtw_skb_clone fail\n", __func__);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
+ }
+ }
+
+ recvframe_put(precvframe, skb_len);
+ /* recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE); */
+
+ if (pHalData->ReceiveConfig & RCR_APPFCS)
+ recvframe_pull_tail(precvframe, IEEE80211_FCS_LEN);
+
+ /* move to drv info position */
+ ptr += RXDESC_SIZE;
+
+ /* update drv info */
+ if (pHalData->ReceiveConfig & RCR_APP_BA_SSN) {
+ /* rtl8723s_update_bassn(padapter, pdrvinfo); */
+ ptr += 4;
+ }
+
+ if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
+ if (pattrib->physt)
+ update_recvframe_phyinfo(precvframe, (struct phy_stat *)ptr);
+
+ if (rtw_recv_entry(precvframe) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_dump_, ("%s: rtw_recv_entry(precvframe) != _SUCCESS\n", __func__));
+ }
+ } else if (pattrib->pkt_rpt_type == C2H_PACKET) {
+ C2H_EVT_HDR C2hEvent;
+
+ u16 len_c2h = pattrib->pkt_len;
+ u8 *pbuf_c2h = precvframe->u.hdr.rx_data;
+ u8 *pdata_c2h;
+
+ C2hEvent.CmdID = pbuf_c2h[0];
+ C2hEvent.CmdSeq = pbuf_c2h[1];
+ C2hEvent.CmdLen = (len_c2h-2);
+ pdata_c2h = pbuf_c2h+2;
+
+ if (C2hEvent.CmdID == C2H_CCX_TX_RPT)
+ CCX_FwC2HTxRpt_8723b(padapter, pdata_c2h, C2hEvent.CmdLen);
+ else
+ rtl8723bs_c2h_packet_handler(padapter, precvframe->u.hdr.rx_data, pattrib->pkt_len);
+
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+
+ }
+ }
+
+ pkt_offset = _RND8(pkt_offset);
+ precvbuf->pdata += pkt_offset;
+ ptr = precvbuf->pdata;
+ precvframe = NULL;
+ pkt_copy = NULL;
+ }
+
+ rtw_enqueue_recvbuf(precvbuf, &precvpriv->free_recv_buf_queue);
+ } while (1);
+
+}
+
+/*
+ * Initialize recv private variable for hardware dependent
+ * 1. recv buf
+ * 2. recv tasklet
+ *
+ */
+s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
+{
+ s32 res;
+ u32 i, n;
+ struct recv_priv *precvpriv;
+ struct recv_buf *precvbuf;
+
+
+ res = _SUCCESS;
+ precvpriv = &padapter->recvpriv;
+
+ /* 3 1. init recv buffer */
+ _rtw_init_queue(&precvpriv->free_recv_buf_queue);
+ _rtw_init_queue(&precvpriv->recv_buf_pending_queue);
+
+ n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
+ precvpriv->pallocated_recv_buf = rtw_zmalloc(n);
+ if (precvpriv->pallocated_recv_buf == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("alloc recv_buf fail!\n"));
+ goto exit;
+ }
+
+ precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_recv_buf), 4);
+
+ /* init each recv buffer */
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ res = initrecvbuf(precvbuf, padapter);
+ if (res == _FAIL)
+ break;
+
+ if (precvbuf->pskb == NULL) {
+ SIZE_PTR tmpaddr = 0;
+ SIZE_PTR alignment = 0;
+
+ precvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+
+ if (precvbuf->pskb) {
+ precvbuf->pskb->dev = padapter->pnetdev;
+
+ tmpaddr = (SIZE_PTR)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+ }
+
+ if (precvbuf->pskb == NULL) {
+ DBG_871X("%s: alloc_skb fail!\n", __func__);
+ }
+ }
+
+ list_add_tail(&precvbuf->list, &precvpriv->free_recv_buf_queue.queue);
+
+ precvbuf++;
+ }
+ precvpriv->free_recv_buf_queue_cnt = i;
+
+ if (res == _FAIL)
+ goto initbuferror;
+
+ /* 3 2. init tasklet */
+ tasklet_init(
+ &precvpriv->recv_tasklet,
+ (void(*)(unsigned long))rtl8723bs_recv_tasklet,
+ (unsigned long)padapter
+ );
+
+ goto exit;
+
+initbuferror:
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ if (precvbuf) {
+ n = precvpriv->free_recv_buf_queue_cnt;
+ precvpriv->free_recv_buf_queue_cnt = 0;
+ for (i = 0; i < n ; i++) {
+ list_del_init(&precvbuf->list);
+ rtw_os_recvbuf_resource_free(padapter, precvbuf);
+ precvbuf++;
+ }
+ precvpriv->precv_buf = NULL;
+ }
+
+ if (precvpriv->pallocated_recv_buf) {
+ n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
+ kfree(precvpriv->pallocated_recv_buf);
+ precvpriv->pallocated_recv_buf = NULL;
+ }
+
+exit:
+ return res;
+}
+
+/*
+ * Free recv private variable of hardware dependent
+ * 1. recv buf
+ * 2. recv tasklet
+ *
+ */
+void rtl8723bs_free_recv_priv(struct adapter *padapter)
+{
+ u32 i, n;
+ struct recv_priv *precvpriv;
+ struct recv_buf *precvbuf;
+
+
+ precvpriv = &padapter->recvpriv;
+
+ /* 3 1. kill tasklet */
+ tasklet_kill(&precvpriv->recv_tasklet);
+
+ /* 3 2. free all recv buffers */
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ if (precvbuf) {
+ n = NR_RECVBUFF;
+ precvpriv->free_recv_buf_queue_cnt = 0;
+ for (i = 0; i < n ; i++) {
+ list_del_init(&precvbuf->list);
+ rtw_os_recvbuf_resource_free(padapter, precvbuf);
+ precvbuf++;
+ }
+ precvpriv->precv_buf = NULL;
+ }
+
+ if (precvpriv->pallocated_recv_buf) {
+ n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
+ kfree(precvpriv->pallocated_recv_buf);
+ precvpriv->pallocated_recv_buf = NULL;
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
new file mode 100644
index 000000000000..ca6ad9659b09
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -0,0 +1,686 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RTL8723BS_XMIT_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
+{
+ u32 n = 0;
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ while (pHalData->SdioTxOQTFreeSpace < agg_num)
+ {
+ if (
+ (padapter->bSurpriseRemoved == true) ||
+ (padapter->bDriverStopped == true)
+ ) {
+ DBG_871X("%s: bSurpriseRemoved or bDriverStopped (wait TxOQT)\n", __func__);
+ return false;
+ }
+
+ HalQueryTxOQTBufferStatus8723BSdio(padapter);
+
+ if ((++n % 60) == 0) {
+ if ((n % 300) == 0) {
+ DBG_871X("%s(%d): QOT free space(%d), agg_num: %d\n",
+ __func__, n, pHalData->SdioTxOQTFreeSpace, agg_num);
+ }
+ msleep(1);
+ /* yield(); */
+ }
+ }
+
+ pHalData->SdioTxOQTFreeSpace -= agg_num;
+
+ /* if (n > 1) */
+ /* ++priv->pshare->nr_out_of_txoqt_space; */
+
+ return true;
+}
+
+static s32 rtl8723_dequeue_writeport(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ struct xmit_buf *pxmitbuf;
+ struct adapter * pri_padapter = padapter;
+ s32 ret = 0;
+ u8 PageIdx = 0;
+ u32 deviceId;
+ u8 bUpdatePageNum = false;
+
+ ret = ret || check_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+
+ if (true == ret)
+ pxmitbuf = dequeue_pending_xmitbuf_under_survey(pxmitpriv);
+ else
+ pxmitbuf = dequeue_pending_xmitbuf(pxmitpriv);
+
+ if (pxmitbuf == NULL)
+ return true;
+
+ deviceId = ffaddr2deviceId(pdvobjpriv, pxmitbuf->ff_hwaddr);
+
+ /* translate fifo addr to queue index */
+ switch (deviceId) {
+ case WLAN_TX_HIQ_DEVICE_ID:
+ PageIdx = HI_QUEUE_IDX;
+ break;
+
+ case WLAN_TX_MIQ_DEVICE_ID:
+ PageIdx = MID_QUEUE_IDX;
+ break;
+
+ case WLAN_TX_LOQ_DEVICE_ID:
+ PageIdx = LOW_QUEUE_IDX;
+ break;
+ }
+
+query_free_page:
+ /* check if hardware tx fifo page is enough */
+ if (false == rtw_hal_sdio_query_tx_freepage(pri_padapter, PageIdx, pxmitbuf->pg_num)) {
+ if (!bUpdatePageNum) {
+ /* Total number of page is NOT available, so update current FIFO status */
+ HalQueryTxBufferStatus8723BSdio(padapter);
+ bUpdatePageNum = true;
+ goto query_free_page;
+ } else {
+ bUpdatePageNum = false;
+ enqueue_pending_xmitbuf_to_head(pxmitpriv, pxmitbuf);
+ return true;
+ }
+ }
+
+ if (
+ (padapter->bSurpriseRemoved == true) ||
+ (padapter->bDriverStopped == true)
+ ) {
+ RT_TRACE(
+ _module_hal_xmit_c_,
+ _drv_notice_,
+ ("%s: bSurpriseRemoved(wirte port)\n", __func__)
+ );
+ goto free_xmitbuf;
+ }
+
+ if (rtw_sdio_wait_enough_TxOQT_space(padapter, pxmitbuf->agg_num) == false)
+ goto free_xmitbuf;
+
+ traffic_check_for_leave_lps(padapter, true, pxmitbuf->agg_num);
+
+ rtw_write_port(padapter, deviceId, pxmitbuf->len, (u8 *)pxmitbuf);
+
+ rtw_hal_sdio_update_tx_freepage(pri_padapter, PageIdx, pxmitbuf->pg_num);
+
+free_xmitbuf:
+ /* rtw_free_xmitframe(pxmitpriv, pframe); */
+ /* pxmitbuf->priv_data = NULL; */
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+
+#ifdef CONFIG_SDIO_TX_TASKLET
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+#endif
+
+ return _FAIL;
+}
+
+/*
+ * Description
+ *Transmit xmitbuf to hardware tx fifo
+ *
+ * Return
+ *_SUCCESS ok
+ *_FAIL something error
+ */
+s32 rtl8723bs_xmit_buf_handler(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv;
+ u8 queue_empty, queue_pending;
+ s32 ret;
+
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ if (down_interruptible(&pxmitpriv->xmit_sema)) {
+ DBG_871X_LEVEL(_drv_emerg_, "%s: down SdioXmitBufSema fail!\n", __func__);
+ return _FAIL;
+ }
+
+ ret = (padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true);
+ if (ret) {
+ RT_TRACE(
+ _module_hal_xmit_c_,
+ _drv_err_,
+ (
+ "%s: bDriverStopped(%d) bSurpriseRemoved(%d)!\n",
+ __func__,
+ padapter->bDriverStopped,
+ padapter->bSurpriseRemoved
+ )
+ );
+ return _FAIL;
+ }
+
+ queue_pending = check_pending_xmitbuf(pxmitpriv);
+
+ if (queue_pending == false)
+ return _SUCCESS;
+
+ ret = rtw_register_tx_alive(padapter);
+ if (ret != _SUCCESS) {
+ return _SUCCESS;
+ }
+
+ do {
+ queue_empty = rtl8723_dequeue_writeport(padapter);
+/* dump secondary adapter xmitbuf */
+ } while (!queue_empty);
+
+ rtw_unregister_tx_alive(padapter);
+
+ return _SUCCESS;
+}
+
+/*
+ * Description:
+ *Aggregation packets and send to hardware
+ *
+ * Return:
+ *0 Success
+ *-1 Hardware resource(TX FIFO) not ready
+ *-2 Software resource(xmitbuf) not ready
+ */
+static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv)
+{
+ s32 err, ret;
+ u32 k = 0;
+ struct hw_xmit *hwxmits, *phwxmit;
+ u8 no_res, idx, hwentry;
+ struct tx_servq *ptxservq;
+ struct list_head *sta_plist, *sta_phead, *frame_plist, *frame_phead;
+ struct xmit_frame *pxmitframe;
+ struct __queue *pframe_queue;
+ struct xmit_buf *pxmitbuf;
+ u32 txlen, max_xmit_len;
+ u8 txdesc_size = TXDESC_SIZE;
+ int inx[4];
+
+ err = 0;
+ no_res = false;
+ hwxmits = pxmitpriv->hwxmits;
+ hwentry = pxmitpriv->hwxmit_entry;
+ ptxservq = NULL;
+ pxmitframe = NULL;
+ pframe_queue = NULL;
+ pxmitbuf = NULL;
+
+ if (padapter->registrypriv.wifi_spec == 1) {
+ for (idx = 0; idx<4; idx++)
+ inx[idx] = pxmitpriv->wmm_para_seq[idx];
+ } else {
+ inx[0] = 0;
+ inx[1] = 1;
+ inx[2] = 2;
+ inx[3] = 3;
+ }
+
+ /* 0(VO), 1(VI), 2(BE), 3(BK) */
+ for (idx = 0; idx < hwentry; idx++) {
+ phwxmit = hwxmits + inx[idx];
+
+ if (
+ (check_pending_xmitbuf(pxmitpriv) == true) &&
+ (padapter->mlmepriv.LinkDetectInfo.bHigherBusyTxTraffic == true)
+ ) {
+ if ((phwxmit->accnt > 0) && (phwxmit->accnt < 5)) {
+ err = -2;
+ break;
+ }
+ }
+
+ max_xmit_len = rtw_hal_get_sdio_tx_max_length(padapter, inx[idx]);
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ sta_phead = get_list_head(phwxmit->sta_queue);
+ sta_plist = get_next(sta_phead);
+ /* because stop_sta_xmit may delete sta_plist at any time */
+ /* so we should add lock here, or while loop can not exit */
+ while (sta_phead != sta_plist) {
+ ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
+ sta_plist = get_next(sta_plist);
+
+#ifdef DBG_XMIT_BUF
+ DBG_871X(
+ "%s idx:%d hwxmit_pkt_num:%d ptxservq_pkt_num:%d\n",
+ __func__,
+ idx,
+ phwxmit->accnt,
+ ptxservq->qcnt
+ );
+ DBG_871X(
+ "%s free_xmit_extbuf_cnt =%d free_xmitbuf_cnt =%d free_xmitframe_cnt =%d\n",
+ __func__,
+ pxmitpriv->free_xmit_extbuf_cnt,
+ pxmitpriv->free_xmitbuf_cnt,
+ pxmitpriv->free_xmitframe_cnt
+ );
+#endif
+ pframe_queue = &ptxservq->sta_pending;
+
+ frame_phead = get_list_head(pframe_queue);
+
+ while (list_empty(frame_phead) == false) {
+ frame_plist = get_next(frame_phead);
+ pxmitframe = LIST_CONTAINOR(frame_plist, struct xmit_frame, list);
+
+ /* check xmit_buf size enough or not */
+ txlen = txdesc_size + rtw_wlan_pkt_size(pxmitframe);
+ if (
+ (NULL == pxmitbuf) ||
+ ((_RND(pxmitbuf->len, 8) + txlen) > max_xmit_len) ||
+ (k >= (rtw_hal_sdio_max_txoqt_free_space(padapter)-1))
+ ) {
+ if (pxmitbuf) {
+ /* pxmitbuf->priv_data will be NULL, and will crash here */
+ if (pxmitbuf->len > 0 && pxmitbuf->priv_data) {
+ struct xmit_frame *pframe;
+ pframe = (struct xmit_frame*)pxmitbuf->priv_data;
+ pframe->agg_num = k;
+ pxmitbuf->agg_num = k;
+ rtl8723b_update_txdesc(pframe, pframe->buf_addr);
+ rtw_free_xmitframe(pxmitpriv, pframe);
+ pxmitbuf->priv_data = NULL;
+ enqueue_pending_xmitbuf(pxmitpriv, pxmitbuf);
+ /* can not yield under lock */
+ /* yield(); */
+ } else
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ }
+
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL) {
+#ifdef DBG_XMIT_BUF
+ DBG_871X_LEVEL(_drv_err_, "%s: xmit_buf is not enough!\n", __func__);
+#endif
+ err = -2;
+ up(&(pxmitpriv->xmit_sema));
+ break;
+ }
+ k = 0;
+ }
+
+ /* ok to send, remove frame from queue */
+ if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) == true) {
+ if (
+ (pxmitframe->attrib.psta->state & WIFI_SLEEP_STATE) &&
+ (pxmitframe->attrib.triggered == 0)
+ ) {
+ DBG_871X(
+ "%s: one not triggered pkt in queue when this STA sleep,"
+ " break and goto next sta\n",
+ __func__
+ );
+ break;
+ }
+ }
+
+ list_del_init(&pxmitframe->list);
+ ptxservq->qcnt--;
+ phwxmit->accnt--;
+
+ if (k == 0) {
+ pxmitbuf->ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
+ pxmitbuf->priv_data = (u8 *)pxmitframe;
+ }
+
+ /* coalesce the xmitframe to xmitbuf */
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->ptail;
+
+ ret = rtw_xmitframe_coalesce(padapter, pxmitframe->pkt, pxmitframe);
+ if (ret == _FAIL) {
+ DBG_871X_LEVEL(_drv_err_, "%s: coalesce FAIL!", __func__);
+ /* Todo: error handler */
+ } else {
+ k++;
+ if (k != 1)
+ rtl8723b_update_txdesc(pxmitframe, pxmitframe->buf_addr);
+ rtw_count_tx_stats(padapter, pxmitframe, pxmitframe->attrib.last_txcmdsz);
+
+ txlen = txdesc_size + pxmitframe->attrib.last_txcmdsz;
+ pxmitframe->pg_num = (txlen + 127)/128;
+ pxmitbuf->pg_num += (txlen + 127)/128;
+ /* if (k != 1) */
+ /* ((struct xmit_frame*)pxmitbuf->priv_data)->pg_num += pxmitframe->pg_num; */
+ pxmitbuf->ptail += _RND(txlen, 8); /* round to 8 bytes alignment */
+ pxmitbuf->len = _RND(pxmitbuf->len, 8) + txlen;
+ }
+
+ if (k != 1)
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ pxmitframe = NULL;
+ }
+
+ if (list_empty(&pframe_queue->queue))
+ list_del_init(&ptxservq->tx_pending);
+
+ if (err)
+ break;
+ }
+ spin_unlock_bh(&pxmitpriv->lock);
+
+ /* dump xmit_buf to hw tx fifo */
+ if (pxmitbuf) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_info_, ("pxmitbuf->len =%d enqueue\n", pxmitbuf->len));
+
+ if (pxmitbuf->len > 0) {
+ struct xmit_frame *pframe;
+ pframe = (struct xmit_frame*)pxmitbuf->priv_data;
+ pframe->agg_num = k;
+ pxmitbuf->agg_num = k;
+ rtl8723b_update_txdesc(pframe, pframe->buf_addr);
+ rtw_free_xmitframe(pxmitpriv, pframe);
+ pxmitbuf->priv_data = NULL;
+ enqueue_pending_xmitbuf(pxmitpriv, pxmitbuf);
+ yield();
+ }
+ else
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ pxmitbuf = NULL;
+ }
+
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Description
+ *Transmit xmitframe from queue
+ *
+ * Return
+ *_SUCCESS ok
+ *_FAIL something error
+ */
+static s32 rtl8723bs_xmit_handler(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv;
+ s32 ret;
+
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ if (down_interruptible(&pxmitpriv->SdioXmitSema)) {
+ DBG_871X_LEVEL(_drv_emerg_, "%s: down sema fail!\n", __func__);
+ return _FAIL;
+ }
+
+next:
+ if (
+ (padapter->bDriverStopped == true) ||
+ (padapter->bSurpriseRemoved == true)
+ ) {
+ RT_TRACE(
+ _module_hal_xmit_c_,
+ _drv_notice_,
+ (
+ "%s: bDriverStopped(%d) bSurpriseRemoved(%d)\n",
+ __func__,
+ padapter->bDriverStopped,
+ padapter->bSurpriseRemoved
+ )
+ );
+ return _FAIL;
+ }
+
+ spin_lock_bh(&pxmitpriv->lock);
+ ret = rtw_txframes_pending(padapter);
+ spin_unlock_bh(&pxmitpriv->lock);
+ if (ret == 0) {
+ return _SUCCESS;
+ }
+
+ /* dequeue frame and write to hardware */
+
+ ret = xmit_xmitframes(padapter, pxmitpriv);
+ if (ret == -2) {
+ /* here sleep 1ms will cause big TP loss of TX */
+ /* from 50+ to 40+ */
+ if (padapter->registrypriv.wifi_spec)
+ msleep(1);
+ else
+ yield();
+ goto next;
+ }
+
+ spin_lock_bh(&pxmitpriv->lock);
+ ret = rtw_txframes_pending(padapter);
+ spin_unlock_bh(&pxmitpriv->lock);
+ if (ret == 1) {
+ goto next;
+ }
+
+ return _SUCCESS;
+}
+
+int rtl8723bs_xmit_thread(void *context)
+{
+ s32 ret;
+ struct adapter *padapter;
+ struct xmit_priv *pxmitpriv;
+ u8 thread_name[20] = "RTWHALXT";
+
+
+ ret = _SUCCESS;
+ padapter = (struct adapter *)context;
+ pxmitpriv = &padapter->xmitpriv;
+
+ rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
+ thread_enter(thread_name);
+
+ DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ /* For now, no one would down sema to check thread is running, */
+ /* so mark this temporary, Lucas@20130820 */
+/* up(&pxmitpriv->SdioXmitTerminateSema); */
+
+ do {
+ ret = rtl8723bs_xmit_handler(padapter);
+ if (signal_pending(current)) {
+ flush_signals(current);
+ }
+ } while (_SUCCESS == ret);
+
+ up(&pxmitpriv->SdioXmitTerminateSema);
+
+ RT_TRACE(_module_hal_xmit_c_, _drv_notice_, ("-%s\n", __func__));
+
+ thread_exit();
+}
+
+s32 rtl8723bs_mgnt_xmit(
+ struct adapter *padapter, struct xmit_frame *pmgntframe
+)
+{
+ s32 ret = _SUCCESS;
+ struct pkt_attrib *pattrib;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ u8 *pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ u8 txdesc_size = TXDESC_SIZE;
+
+ RT_TRACE(_module_hal_xmit_c_, _drv_info_, ("+%s\n", __func__));
+
+ pattrib = &pmgntframe->attrib;
+ pxmitbuf = pmgntframe->pxmitbuf;
+
+ rtl8723b_update_txdesc(pmgntframe, pmgntframe->buf_addr);
+
+ pxmitbuf->len = txdesc_size + pattrib->last_txcmdsz;
+ pxmitbuf->pg_num = (pxmitbuf->len + 127)/128; /* 128 is tx page size */
+ pxmitbuf->ptail = pmgntframe->buf_addr + pxmitbuf->len;
+ pxmitbuf->ff_hwaddr = rtw_get_ff_hwaddr(pmgntframe);
+
+ rtw_count_tx_stats(padapter, pmgntframe, pattrib->last_txcmdsz);
+
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+
+ pxmitbuf->priv_data = NULL;
+
+ if (GetFrameSubType(pframe) == WIFI_BEACON) { /* dump beacon directly */
+ ret = rtw_write_port(padapter, pdvobjpriv->Queue2Pipe[pxmitbuf->ff_hwaddr], pxmitbuf->len, (u8 *)pxmitbuf);
+ if (ret != _SUCCESS)
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
+
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ } else
+ enqueue_pending_xmitbuf(pxmitpriv, pxmitbuf);
+
+ return ret;
+}
+
+/*
+ * Description:
+ *Handle xmitframe(packet) come from rtw_xmit()
+ *
+ * Return:
+ *true dump packet directly ok
+ *false enqueue, temporary can't transmit packets to hardware
+ */
+s32 rtl8723bs_hal_xmit(
+ struct adapter *padapter, struct xmit_frame *pxmitframe
+)
+{
+ struct xmit_priv *pxmitpriv;
+ s32 err;
+
+
+ pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
+ pxmitpriv = &padapter->xmitpriv;
+
+ if (
+ (pxmitframe->frame_tag == DATA_FRAMETAG) &&
+ (pxmitframe->attrib.ether_type != 0x0806) &&
+ (pxmitframe->attrib.ether_type != 0x888e) &&
+ (pxmitframe->attrib.dhcp_pkt != 1)
+ ) {
+ if (padapter->mlmepriv.LinkDetectInfo.bBusyTraffic == true)
+ rtw_issue_addbareq_cmd(padapter, pxmitframe);
+ }
+
+ spin_lock_bh(&pxmitpriv->lock);
+ err = rtw_xmitframe_enqueue(padapter, pxmitframe);
+ spin_unlock_bh(&pxmitpriv->lock);
+ if (err != _SUCCESS) {
+ RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n"));
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ pxmitpriv->tx_drop++;
+ return true;
+ }
+
+ up(&pxmitpriv->SdioXmitSema);
+
+ return false;
+}
+
+s32 rtl8723bs_hal_xmitframe_enqueue(
+ struct adapter *padapter, struct xmit_frame *pxmitframe
+)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ s32 err;
+
+ if ((err = rtw_xmitframe_enqueue(padapter, pxmitframe)) != _SUCCESS) {
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ pxmitpriv->tx_drop++;
+ } else {
+#ifdef CONFIG_SDIO_TX_TASKLET
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+#else
+ up(&pxmitpriv->SdioXmitSema);
+#endif
+ }
+
+ return err;
+
+}
+
+/*
+ * Return
+ *_SUCCESS start thread ok
+ *_FAIL start thread fail
+ *
+ */
+s32 rtl8723bs_init_xmit_priv(struct adapter *padapter)
+{
+ struct xmit_priv *xmitpriv = &padapter->xmitpriv;
+ struct hal_com_data *phal;
+
+
+ phal = GET_HAL_DATA(padapter);
+
+ spin_lock_init(&phal->SdioTxFIFOFreePageLock);
+ sema_init(&xmitpriv->SdioXmitSema, 0);
+ sema_init(&xmitpriv->SdioXmitTerminateSema, 0);
+
+ return _SUCCESS;
+}
+
+void rtl8723bs_free_xmit_priv(struct adapter *padapter)
+{
+ struct hal_com_data *phal;
+ struct xmit_priv *pxmitpriv;
+ struct xmit_buf *pxmitbuf;
+ struct __queue *pqueue;
+ struct list_head *plist, *phead;
+ struct list_head tmplist;
+
+
+ phal = GET_HAL_DATA(padapter);
+ pxmitpriv = &padapter->xmitpriv;
+ pqueue = &pxmitpriv->pending_xmitbuf_queue;
+ phead = get_list_head(pqueue);
+ INIT_LIST_HEAD(&tmplist);
+
+ spin_lock_bh(&pqueue->lock);
+ if (!list_empty(&pqueue->queue)) {
+ /* Insert tmplist to end of queue, and delete phead */
+ /* then tmplist become head of queue. */
+ list_add_tail(&tmplist, phead);
+ list_del_init(phead);
+ }
+ spin_unlock_bh(&pqueue->lock);
+
+ phead = &tmplist;
+ while (list_empty(phead) == false) {
+ plist = get_next(phead);
+ list_del_init(plist);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+ rtw_free_xmitframe(pxmitpriv, (struct xmit_frame *)pxmitbuf->priv_data);
+ pxmitbuf->priv_data = NULL;
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ }
+}
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
new file mode 100644
index 000000000000..6dfb06a49d41
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -0,0 +1,1928 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _SDIO_HALINIT_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+#include "hal_com_h2c.h"
+/*
+ * Description:
+ *Call power on sequence to enable card
+ *
+ * Return:
+ *_SUCCESS enable success
+ *_FAIL enable fail
+ */
+static u8 CardEnable(struct adapter *padapter)
+{
+ u8 bMacPwrCtrlOn;
+ u8 ret = _FAIL;
+
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (bMacPwrCtrlOn == false) {
+ /* RSV_CTRL 0x1C[7:0] = 0x00 */
+ /* unlock ISO/CLK/Power control register */
+ rtw_write8(padapter, REG_RSV_CTRL, 0x0);
+
+ ret = HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_card_enable_flow);
+ if (ret == _SUCCESS) {
+ u8 bMacPwrCtrlOn = true;
+ rtw_hal_set_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ }
+ } else
+ ret = _SUCCESS;
+
+ return ret;
+}
+
+#ifdef CONFIG_GPIO_WAKEUP
+/* we set it high under init and fw will */
+/* give us Low Pulse when host wake up */
+void HostWakeUpGpioClear(struct adapter *Adapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(Adapter, REG_GPIO_PIN_CTRL_2);
+
+ /* set GPIO 12 1 */
+ value32 |= BIT(12);/* 4+8 */
+ /* GPIO 12 out put */
+ value32 |= BIT(20);/* 4+16 */
+
+ rtw_write32(Adapter, REG_GPIO_PIN_CTRL_2, value32);
+} /* HostWakeUpGpioClear */
+
+void HalSetOutPutGPIO(struct adapter *padapter, u8 index, u8 OutPutValue)
+{
+ if (index <= 7) {
+ /* config GPIO mode */
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL + 3, rtw_read8(padapter, REG_GPIO_PIN_CTRL + 3) & ~BIT(index));
+
+ /* config GPIO Sel */
+ /* 0: input */
+ /* 1: output */
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL + 2, rtw_read8(padapter, REG_GPIO_PIN_CTRL + 2) | BIT(index));
+
+ /* set output value */
+ if (OutPutValue)
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL + 1, rtw_read8(padapter, REG_GPIO_PIN_CTRL + 1) | BIT(index));
+ else
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL + 1, rtw_read8(padapter, REG_GPIO_PIN_CTRL + 1) & ~BIT(index));
+ } else {
+ /* 88C Series: */
+ /* index: 11~8 transform to 3~0 */
+ /* 8723 Series: */
+ /* index: 12~8 transform to 4~0 */
+ index -= 8;
+
+ /* config GPIO mode */
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL_2 + 3, rtw_read8(padapter, REG_GPIO_PIN_CTRL_2 + 3) & ~BIT(index));
+
+ /* config GPIO Sel */
+ /* 0: input */
+ /* 1: output */
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL_2 + 2, rtw_read8(padapter, REG_GPIO_PIN_CTRL_2 + 2) | BIT(index));
+
+ /* set output value */
+ if (OutPutValue)
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL_2 + 1, rtw_read8(padapter, REG_GPIO_PIN_CTRL_2 + 1) | BIT(index));
+ else
+ rtw_write8(padapter, REG_GPIO_PIN_CTRL_2 + 1, rtw_read8(padapter, REG_GPIO_PIN_CTRL_2 + 1) & ~BIT(index));
+ }
+}
+#endif
+
+static
+u8 _InitPowerOn_8723BS(struct adapter *padapter)
+{
+ u8 value8;
+ u16 value16;
+ u32 value32;
+ u8 ret;
+/* u8 bMacPwrCtrlOn; */
+
+
+ /* all of these MUST be configured before power on */
+#ifdef CONFIG_EXT_CLK
+ /* Use external crystal(XTAL) */
+ value8 = rtw_read8(padapter, REG_PAD_CTRL1_8723B+2);
+ value8 |= BIT(7);
+ rtw_write8(padapter, REG_PAD_CTRL1_8723B+2, value8);
+
+ /* CLK_REQ High active or Low Active */
+ /* Request GPIO polarity: */
+ /* 0: low active */
+ /* 1: high active */
+ value8 = rtw_read8(padapter, REG_MULTI_FUNC_CTRL+1);
+ value8 |= BIT(5);
+ rtw_write8(padapter, REG_MULTI_FUNC_CTRL+1, value8);
+#endif /* CONFIG_EXT_CLK */
+
+ /* only cmd52 can be used before power on(card enable) */
+ ret = CardEnable(padapter);
+ if (ret == false) {
+ RT_TRACE(
+ _module_hci_hal_init_c_,
+ _drv_emerg_,
+ ("%s: run power on flow fail\n", __func__)
+ );
+ return _FAIL;
+ }
+
+ /* Radio-Off Pin Trigger */
+ value8 = rtw_read8(padapter, REG_GPIO_INTM+1);
+ value8 |= BIT(1); /* Enable falling edge triggering interrupt */
+ rtw_write8(padapter, REG_GPIO_INTM+1, value8);
+ value8 = rtw_read8(padapter, REG_GPIO_IO_SEL_2+1);
+ value8 |= BIT(1);
+ rtw_write8(padapter, REG_GPIO_IO_SEL_2+1, value8);
+
+ /* Enable power down and GPIO interrupt */
+ value16 = rtw_read16(padapter, REG_APS_FSMCO);
+ value16 |= EnPDN; /* Enable HW power down and RF on */
+ rtw_write16(padapter, REG_APS_FSMCO, value16);
+
+ /* Enable CMD53 R/W Operation */
+/* bMacPwrCtrlOn = true; */
+/* rtw_hal_set_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn); */
+
+ rtw_write8(padapter, REG_CR, 0x00);
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ value16 = rtw_read16(padapter, REG_CR);
+ value16 |= (
+ HCI_TXDMA_EN |
+ HCI_RXDMA_EN |
+ TXDMA_EN |
+ RXDMA_EN |
+ PROTOCOL_EN |
+ SCHEDULE_EN |
+ ENSEC |
+ CALTMR_EN
+ );
+ rtw_write16(padapter, REG_CR, value16);
+
+ rtw_btcoex_PowerOnSetting(padapter);
+
+ /* external switch to S1 */
+ /* 0x38[11] = 0x1 */
+ /* 0x4c[23] = 0x1 */
+ /* 0x64[0] = 0 */
+ value16 = rtw_read16(padapter, REG_PWR_DATA);
+ /* Switch the control of EESK, EECS to RFC for DPDT or Antenna switch */
+ value16 |= BIT(11); /* BIT_EEPRPAD_RFE_CTRL_EN */
+ rtw_write16(padapter, REG_PWR_DATA, value16);
+/* DBG_8192C("%s: REG_PWR_DATA(0x%x) = 0x%04X\n", __func__, REG_PWR_DATA, rtw_read16(padapter, REG_PWR_DATA)); */
+
+ value32 = rtw_read32(padapter, REG_LEDCFG0);
+ value32 |= BIT(23); /* DPDT_SEL_EN, 1 for SW control */
+ rtw_write32(padapter, REG_LEDCFG0, value32);
+/* DBG_8192C("%s: REG_LEDCFG0(0x%x) = 0x%08X\n", __func__, REG_LEDCFG0, rtw_read32(padapter, REG_LEDCFG0)); */
+
+ value8 = rtw_read8(padapter, REG_PAD_CTRL1_8723B);
+ value8 &= ~BIT(0); /* BIT_SW_DPDT_SEL_DATA, DPDT_SEL default configuration */
+ rtw_write8(padapter, REG_PAD_CTRL1_8723B, value8);
+/* DBG_8192C("%s: REG_PAD_CTRL1(0x%x) = 0x%02X\n", __func__, REG_PAD_CTRL1_8723B, rtw_read8(padapter, REG_PAD_CTRL1_8723B)); */
+
+#ifdef CONFIG_GPIO_WAKEUP
+ HostWakeUpGpioClear(padapter);
+#endif
+
+ return _SUCCESS;
+}
+
+/* Tx Page FIFO threshold */
+static void _init_available_page_threshold(struct adapter *padapter, u8 numHQ, u8 numNQ, u8 numLQ, u8 numPubQ)
+{
+ u16 HQ_threshold, NQ_threshold, LQ_threshold;
+
+ HQ_threshold = (numPubQ + numHQ + 1) >> 1;
+ HQ_threshold |= (HQ_threshold<<8);
+
+ NQ_threshold = (numPubQ + numNQ + 1) >> 1;
+ NQ_threshold |= (NQ_threshold<<8);
+
+ LQ_threshold = (numPubQ + numLQ + 1) >> 1;
+ LQ_threshold |= (LQ_threshold<<8);
+
+ rtw_write16(padapter, 0x218, HQ_threshold);
+ rtw_write16(padapter, 0x21A, NQ_threshold);
+ rtw_write16(padapter, 0x21C, LQ_threshold);
+ DBG_8192C("%s(): Enable Tx FIFO Page Threshold H:0x%x, N:0x%x, L:0x%x\n", __func__, HQ_threshold, NQ_threshold, LQ_threshold);
+}
+
+static void _InitQueueReservedPage(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ bool bWiFiConfig = pregistrypriv->wifi_spec;
+
+ if (pHalData->OutEpQueueSel & TX_SELE_HQ)
+ numHQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_HPQ_8723B : NORMAL_PAGE_NUM_HPQ_8723B;
+
+ if (pHalData->OutEpQueueSel & TX_SELE_LQ)
+ numLQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_LPQ_8723B : NORMAL_PAGE_NUM_LPQ_8723B;
+
+ /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ if (pHalData->OutEpQueueSel & TX_SELE_NQ)
+ numNQ = bWiFiConfig ? WMM_NORMAL_PAGE_NUM_NPQ_8723B : NORMAL_PAGE_NUM_NPQ_8723B;
+
+ numPubQ = TX_TOTAL_PAGE_NUMBER_8723B - numHQ - numLQ - numNQ;
+
+ value8 = (u8)_NPQ(numNQ);
+ rtw_write8(padapter, REG_RQPN_NPQ, value8);
+
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtw_write32(padapter, REG_RQPN, value32);
+
+ rtw_hal_set_sdio_tx_max_length(padapter, numHQ, numNQ, numLQ, numPubQ);
+
+ _init_available_page_threshold(padapter, numHQ, numNQ, numLQ, numPubQ);
+}
+
+static void _InitTxBufferBoundary(struct adapter *padapter)
+{
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+ /* u16 txdmactrl; */
+ u8 txpktbuf_bndy;
+
+ if (!pregistrypriv->wifi_spec) {
+ txpktbuf_bndy = TX_PAGE_BOUNDARY_8723B;
+ } else {
+ /* for WMM */
+ txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_8723B;
+ }
+
+ rtw_write8(padapter, REG_TXPKTBUF_BCNQ_BDNY_8723B, txpktbuf_bndy);
+ rtw_write8(padapter, REG_TXPKTBUF_MGQ_BDNY_8723B, txpktbuf_bndy);
+ rtw_write8(padapter, REG_TXPKTBUF_WMAC_LBK_BF_HD_8723B, txpktbuf_bndy);
+ rtw_write8(padapter, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtw_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+}
+
+static void _InitNormalChipRegPriority(
+ struct adapter *Adapter,
+ u16 beQ,
+ u16 bkQ,
+ u16 viQ,
+ u16 voQ,
+ u16 mgtQ,
+ u16 hiQ
+)
+{
+ u16 value16 = (rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7);
+
+ value16 |=
+ _TXDMA_BEQ_MAP(beQ) |
+ _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) |
+ _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) |
+ _TXDMA_HIQ_MAP(hiQ);
+
+ rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
+}
+
+static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ u16 value = 0;
+ switch (pHalData->OutEpQueueSel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+
+ _InitNormalChipRegPriority(
+ Adapter, value, value, value, value, value, value
+ );
+
+}
+
+static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+
+
+ u16 valueHi = 0;
+ u16 valueLow = 0;
+
+ switch (pHalData->OutEpQueueSel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+
+ if (!pregistrypriv->wifi_spec) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {
+ /* for WMM , CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueLow;
+ bkQ = valueHi;
+ viQ = valueHi;
+ voQ = valueLow;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+
+}
+
+static void _InitNormalChipThreeOutEpPriority(struct adapter *padapter)
+{
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+
+ if (!pregistrypriv->wifi_spec) {
+ /* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else {
+ /* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _InitNormalChipRegPriority(padapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitNormalChipQueuePriority(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (pHalData->OutEpNumber) {
+ case 1:
+ _InitNormalChipOneOutEpPriority(Adapter);
+ break;
+ case 2:
+ _InitNormalChipTwoOutEpPriority(Adapter);
+ break;
+ case 3:
+ _InitNormalChipThreeOutEpPriority(Adapter);
+ break;
+ default:
+ /* RT_ASSERT(false, ("Shall not reach here!\n")); */
+ break;
+ }
+
+
+}
+
+static void _InitQueuePriority(struct adapter *padapter)
+{
+ _InitNormalChipQueuePriority(padapter);
+}
+
+static void _InitPageBoundary(struct adapter *padapter)
+{
+ /* RX Page Boundary */
+ u16 rxff_bndy = RX_DMA_BOUNDARY_8723B;
+
+ rtw_write16(padapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
+}
+
+static void _InitTransferPageSize(struct adapter *padapter)
+{
+ /* Tx page size is always 128. */
+
+ u8 value8;
+ value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
+ rtw_write8(padapter, REG_PBP, value8);
+}
+
+static void _InitDriverInfoSize(struct adapter *padapter, u8 drvInfoSize)
+{
+ rtw_write8(padapter, REG_RX_DRVINFO_SZ, drvInfoSize);
+}
+
+static void _InitNetworkType(struct adapter *padapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(padapter, REG_CR);
+
+ /* TODO: use the other function to set network type */
+/* value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AD_HOC); */
+ value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
+
+ rtw_write32(padapter, REG_CR, value32);
+}
+
+static void _InitWMACSetting(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ u16 value16;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->ReceiveConfig = 0;
+ pHalData->ReceiveConfig |= RCR_APM | RCR_AM | RCR_AB;
+ pHalData->ReceiveConfig |= RCR_CBSSID_DATA | RCR_CBSSID_BCN | RCR_AMF;
+ pHalData->ReceiveConfig |= RCR_HTC_LOC_CTRL;
+ pHalData->ReceiveConfig |= RCR_APP_PHYST_RXFF | RCR_APP_ICV | RCR_APP_MIC;
+ rtw_write32(padapter, REG_RCR, pHalData->ReceiveConfig);
+
+ /* Accept all multicast address */
+ rtw_write32(padapter, REG_MAR, 0xFFFFFFFF);
+ rtw_write32(padapter, REG_MAR + 4, 0xFFFFFFFF);
+
+ /* Accept all data frames */
+ value16 = 0xFFFF;
+ rtw_write16(padapter, REG_RXFLTMAP2, value16);
+
+ /* 2010.09.08 hpfan */
+ /* Since ADF is removed from RCR, ps-poll will not be indicate to driver, */
+ /* RxFilterMap should mask ps-poll to gurantee AP mode can rx ps-poll. */
+ value16 = 0x400;
+ rtw_write16(padapter, REG_RXFLTMAP1, value16);
+
+ /* Accept all management frames */
+ value16 = 0xFFFF;
+ rtw_write16(padapter, REG_RXFLTMAP0, value16);
+}
+
+static void _InitAdaptiveCtrl(struct adapter *padapter)
+{
+ u16 value16;
+ u32 value32;
+
+ /* Response Rate Set */
+ value32 = rtw_read32(padapter, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtw_write32(padapter, REG_RRSR, value32);
+
+ /* CF-END Threshold */
+ /* m_spIoBase->rtw_write8(REG_CFEND_TH, 0x1); */
+
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtw_write16(padapter, REG_SPEC_SIFS, value16);
+
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtw_write16(padapter, REG_RL, value16);
+}
+
+static void _InitEDCA(struct adapter *padapter)
+{
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(padapter, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(padapter, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(padapter, REG_SIFS_CTX, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(padapter, REG_SIFS_TRX, 0x100a);
+
+ /* TXOP */
+ rtw_write32(padapter, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(padapter, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(padapter, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(padapter, REG_EDCA_VO_PARAM, 0x002FA226);
+}
+
+static void _InitRetryFunction(struct adapter *padapter)
+{
+ u8 value8;
+
+ value8 = rtw_read8(padapter, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtw_write8(padapter, REG_FWHW_TXQ_CTRL, value8);
+
+ /* Set ACK timeout */
+ rtw_write8(padapter, REG_ACKTO, 0x40);
+}
+
+static void HalRxAggr8723BSdio(struct adapter *padapter)
+{
+ struct registry_priv *pregistrypriv;
+ u8 valueDMATimeout;
+ u8 valueDMAPageCount;
+
+
+ pregistrypriv = &padapter->registrypriv;
+
+ if (pregistrypriv->wifi_spec) {
+ /* 2010.04.27 hpfan */
+ /* Adjust RxAggrTimeout to close to zero disable RxAggr, suggested by designer */
+ /* Timeout value is calculated by 34 / (2^n) */
+ valueDMATimeout = 0x06;
+ valueDMAPageCount = 0x06;
+ } else {
+ /* 20130530, Isaac@SD1 suggest 3 kinds of parameter */
+ /* TX/RX Balance */
+ valueDMATimeout = 0x06;
+ valueDMAPageCount = 0x06;
+ }
+
+ rtw_write8(padapter, REG_RXDMA_AGG_PG_TH+1, valueDMATimeout);
+ rtw_write8(padapter, REG_RXDMA_AGG_PG_TH, valueDMAPageCount);
+}
+
+static void sdio_AggSettingRxUpdate(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ u8 valueDMA;
+ u8 valueRxAggCtrl = 0;
+ u8 aggBurstNum = 3; /* 0:1, 1:2, 2:3, 3:4 */
+ u8 aggBurstSize = 0; /* 0:1K, 1:512Byte, 2:256Byte... */
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ valueDMA = rtw_read8(padapter, REG_TRXDMA_CTRL);
+ valueDMA |= RXDMA_AGG_EN;
+ rtw_write8(padapter, REG_TRXDMA_CTRL, valueDMA);
+
+ valueRxAggCtrl |= RXDMA_AGG_MODE_EN;
+ valueRxAggCtrl |= ((aggBurstNum<<2) & 0x0C);
+ valueRxAggCtrl |= ((aggBurstSize<<4) & 0x30);
+ rtw_write8(padapter, REG_RXDMA_MODE_CTRL_8723B, valueRxAggCtrl);/* RxAggLowThresh = 4*1K */
+}
+
+static void _initSdioAggregationSetting(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ /* Tx aggregation setting */
+/* sdio_AggSettingTxUpdate(padapter); */
+
+ /* Rx aggregation setting */
+ HalRxAggr8723BSdio(padapter);
+
+ sdio_AggSettingRxUpdate(padapter);
+
+ /* 201/12/10 MH Add for USB agg mode dynamic switch. */
+ pHalData->UsbRxHighSpeedMode = false;
+}
+
+static void _InitOperationMode(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ struct mlme_ext_priv *pmlmeext;
+ u8 regBwOpMode = 0;
+ u32 regRATR = 0, regRRSR = 0;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pmlmeext = &padapter->mlmeextpriv;
+
+ /* 1 This part need to modified according to the rate set we filtered!! */
+ /* */
+ /* Set RRSR, RATR, and REG_BWOPMODE registers */
+ /* */
+ switch (pmlmeext->cur_wireless_mode) {
+ case WIRELESS_MODE_B:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK;
+ regRRSR = RATE_ALL_CCK;
+ break;
+ case WIRELESS_MODE_A:
+/* RT_ASSERT(false, ("Error wireless a mode\n")); */
+ break;
+ case WIRELESS_MODE_G:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_AUTO:
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_N_24G:
+ /* It support CCK rate by default. */
+ /* CCK rate will be filtered out only when associated AP does not support it. */
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ break;
+ case WIRELESS_MODE_N_5G:
+/* RT_ASSERT(false, ("Error wireless mode")); */
+ regBwOpMode = BW_OPMODE_5G;
+ regRATR = RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_OFDM_AG;
+ break;
+
+ default: /* for MacOSX compiler warning. */
+ break;
+ }
+
+ rtw_write8(padapter, REG_BWOPMODE, regBwOpMode);
+
+}
+
+static void _InitInterrupt(struct adapter *padapter)
+{
+ /* HISR - turn all off */
+ rtw_write32(padapter, REG_HISR, 0);
+
+ /* HIMR - turn all off */
+ rtw_write32(padapter, REG_HIMR, 0);
+
+ /* */
+ /* Initialize and enable SDIO Host Interrupt. */
+ /* */
+ InitInterrupt8723BSdio(padapter);
+
+ /* */
+ /* Initialize system Host Interrupt. */
+ /* */
+ InitSysInterrupt8723BSdio(padapter);
+}
+
+static void _InitRFType(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+#if DISABLE_BB_RF
+ pHalData->rf_chip = RF_PSEUDO_11N;
+ return;
+#endif
+
+ pHalData->rf_chip = RF_6052;
+
+ pHalData->rf_type = RF_1T1R;
+ DBG_8192C("Set RF Chip ID to RF_6052 and RF type to 1T1R.\n");
+}
+
+static void _RfPowerSave(struct adapter *padapter)
+{
+/* YJ, TODO */
+}
+
+/* */
+/* 2010/08/09 MH Add for power down check. */
+/* */
+static bool HalDetectPwrDownMode(struct adapter *Adapter)
+{
+ u8 tmpvalue;
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(Adapter);
+
+
+ EFUSE_ShadowRead(Adapter, 1, 0x7B/*EEPROM_RF_OPT3_92C*/, (u32 *)&tmpvalue);
+
+ /* 2010/08/25 MH INF priority > PDN Efuse value. */
+ if (tmpvalue & BIT4 && pwrctrlpriv->reg_pdnmode)
+ pHalData->pwrdown = true;
+ else
+ pHalData->pwrdown = false;
+
+ DBG_8192C("HalDetectPwrDownMode(): PDN =%d\n", pHalData->pwrdown);
+
+ return pHalData->pwrdown;
+} /* HalDetectPwrDownMode */
+
+static u32 rtl8723bs_hal_init(struct adapter *padapter)
+{
+ s32 ret;
+ struct hal_com_data *pHalData;
+ struct pwrctrl_priv *pwrctrlpriv;
+ struct registry_priv *pregistrypriv;
+ u32 NavUpper = WiFiNavUpperUs;
+ u8 u1bTmp;
+
+ pHalData = GET_HAL_DATA(padapter);
+ pwrctrlpriv = adapter_to_pwrctl(padapter);
+ pregistrypriv = &padapter->registrypriv;
+
+ if (
+ adapter_to_pwrctl(padapter)->bips_processing == true &&
+ adapter_to_pwrctl(padapter)->pre_ips_type == 0
+ ) {
+ unsigned long start_time;
+ u8 cpwm_orig, cpwm_now;
+ u8 val8, bMacPwrCtrlOn = true;
+
+ DBG_871X("%s: Leaving IPS in FWLPS state\n", __func__);
+
+ /* for polling cpwm */
+ cpwm_orig = 0;
+ rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_orig);
+
+ /* ser rpwm */
+ val8 = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1);
+ val8 &= 0x80;
+ val8 += 0x80;
+ val8 |= BIT(6);
+ rtw_write8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1, val8);
+ DBG_871X("%s: write rpwm =%02x\n", __func__, val8);
+ adapter_to_pwrctl(padapter)->tog = (val8 + 0x80) & 0x80;
+
+ /* do polling cpwm */
+ start_time = jiffies;
+ do {
+
+ mdelay(1);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CPWM, &cpwm_now);
+ if ((cpwm_orig ^ cpwm_now) & 0x80)
+ break;
+
+ if (jiffies_to_msecs(jiffies - start_time) > 100) {
+ DBG_871X("%s: polling cpwm timeout when leaving IPS in FWLPS state\n", __func__);
+ break;
+ }
+ } while (1);
+
+ rtl8723b_set_FwPwrModeInIPS_cmd(padapter, 0);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+
+ rtw_btcoex_HAL_Initialize(padapter, false);
+
+ return _SUCCESS;
+ }
+
+#ifdef CONFIG_WOWLAN
+ if (rtw_read8(padapter, REG_MCUFWDL)&BIT7) {
+ u8 reg_val = 0;
+ DBG_871X("+Reset Entry+\n");
+ rtw_write8(padapter, REG_MCUFWDL, 0x00);
+ _8051Reset8723(padapter);
+ /* reset BB */
+ reg_val = rtw_read8(padapter, REG_SYS_FUNC_EN);
+ reg_val &= ~(BIT(0) | BIT(1));
+ rtw_write8(padapter, REG_SYS_FUNC_EN, reg_val);
+ /* reset RF */
+ rtw_write8(padapter, REG_RF_CTRL, 0);
+ /* reset TRX path */
+ rtw_write16(padapter, REG_CR, 0);
+ /* reset MAC, Digital Core */
+ reg_val = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ reg_val &= ~(BIT(4) | BIT(7));
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, reg_val);
+ reg_val = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ reg_val |= BIT(4) | BIT(7);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, reg_val);
+ DBG_871X("-Reset Entry-\n");
+ }
+#endif /* CONFIG_WOWLAN */
+ /* Disable Interrupt first. */
+/* rtw_hal_disable_interrupt(padapter); */
+
+ ret = _InitPowerOn_8723BS(padapter);
+ if (_FAIL == ret) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init Power On!\n"));
+ return _FAIL;
+ }
+
+ rtw_write8(padapter, REG_EARLY_MODE_CONTROL, 0);
+
+ ret = rtl8723b_FirmwareDownload(padapter, false);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("%s: Download Firmware failed!!\n", __func__));
+ padapter->bFWReady = false;
+ pHalData->fw_ractrl = false;
+ return ret;
+ } else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("rtl8723bs_hal_init(): Download Firmware Success!!\n"));
+ padapter->bFWReady = true;
+ pHalData->fw_ractrl = true;
+ }
+
+ rtl8723b_InitializeFirmwareVars(padapter);
+
+/* SIC_Init(padapter); */
+
+ if (pwrctrlpriv->reg_rfoff == true)
+ pwrctrlpriv->rf_pwrstate = rf_off;
+
+ /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
+ /* HW GPIO pin. Before PHY_RFConfig8192C. */
+ HalDetectPwrDownMode(padapter);
+
+ /* Set RF type for BB/RF configuration */
+ _InitRFType(padapter);
+
+ /* Save target channel */
+ /* <Roger_Notes> Current Channel will be updated again later. */
+ pHalData->CurrentChannel = 6;
+
+#if (HAL_MAC_ENABLE == 1)
+ ret = PHY_MACConfig8723B(padapter);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializepadapter8192CSdio(): Fail to configure MAC!!\n"));
+ return ret;
+ }
+#endif
+ /* */
+ /* d. Initialize BB related configurations. */
+ /* */
+#if (HAL_BB_ENABLE == 1)
+ ret = PHY_BBConfig8723B(padapter);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializepadapter8192CSdio(): Fail to configure BB!!\n"));
+ return ret;
+ }
+#endif
+
+ /* If RF is on, we need to init RF. Otherwise, skip the procedure. */
+ /* We need to follow SU method to change the RF cfg.txt. Default disable RF TX/RX mode. */
+ /* if (pHalData->eRFPowerState == eRfOn) */
+ {
+#if (HAL_RF_ENABLE == 1)
+ ret = PHY_RFConfig8723B(padapter);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializepadapter8192CSdio(): Fail to configure RF!!\n"));
+ return ret;
+ }
+#endif
+ }
+
+ /* */
+ /* Joseph Note: Keep RfRegChnlVal for later use. */
+ /* */
+ pHalData->RfRegChnlVal[0] =
+ PHY_QueryRFReg(padapter, (enum RF_PATH)0, RF_CHNLBW, bRFRegOffsetMask);
+ pHalData->RfRegChnlVal[1] =
+ PHY_QueryRFReg(padapter, (enum RF_PATH)1, RF_CHNLBW, bRFRegOffsetMask);
+
+
+ /* if (!pHalData->bMACFuncEnable) { */
+ _InitQueueReservedPage(padapter);
+ _InitTxBufferBoundary(padapter);
+
+ /* init LLT after tx buffer boundary is defined */
+ ret = rtl8723b_InitLLTTable(padapter);
+ if (_SUCCESS != ret) {
+ DBG_8192C("%s: Failed to init LLT Table!\n", __func__);
+ return _FAIL;
+ }
+ /* */
+ _InitQueuePriority(padapter);
+ _InitPageBoundary(padapter);
+ _InitTransferPageSize(padapter);
+
+ /* Get Rx PHY status in order to report RSSI and others. */
+ _InitDriverInfoSize(padapter, DRVINFO_SZ);
+ hal_init_macaddr(padapter);
+ _InitNetworkType(padapter);
+ _InitWMACSetting(padapter);
+ _InitAdaptiveCtrl(padapter);
+ _InitEDCA(padapter);
+ _InitRetryFunction(padapter);
+ _initSdioAggregationSetting(padapter);
+ _InitOperationMode(padapter);
+ rtl8723b_InitBeaconParameters(padapter);
+ _InitInterrupt(padapter);
+ _InitBurstPktLen_8723BS(padapter);
+
+ /* YJ, TODO */
+ rtw_write8(padapter, REG_SECONDARY_CCA_CTRL_8723B, 0x3); /* CCA */
+ rtw_write8(padapter, 0x976, 0); /* hpfan_todo: 2nd CCA related */
+
+ rtw_write16(padapter, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+ rtw_write16(padapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+
+ invalidate_cam_all(padapter);
+
+ rtw_hal_set_chnl_bw(padapter, padapter->registrypriv.channel,
+ CHANNEL_WIDTH_20, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HAL_PRIME_CHNL_OFFSET_DONT_CARE);
+
+ /* Record original value for template. This is arough data, we can only use the data */
+ /* for power adjust. The value can not be adjustde according to different power!!! */
+/* pHalData->OriginalCckTxPwrIdx = pHalData->CurrentCckTxPwrIdx; */
+/* pHalData->OriginalOfdm24GTxPwrIdx = pHalData->CurrentOfdm24GTxPwrIdx; */
+
+ rtl8723b_InitAntenna_Selection(padapter);
+
+ /* */
+ /* Disable BAR, suggested by Scott */
+ /* 2010.04.09 add by hpfan */
+ /* */
+ rtw_write32(padapter, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtw_write8(padapter, REG_HWSEQ_CTRL, 0xFF);
+
+
+ /* */
+ /* Configure SDIO TxRx Control to enable Rx DMA timer masking. */
+ /* 2010.02.24. */
+ /* */
+ rtw_write32(padapter, SDIO_LOCAL_BASE|SDIO_REG_TX_CTRL, 0);
+
+ _RfPowerSave(padapter);
+
+
+ rtl8723b_InitHalDm(padapter);
+
+ /* DbgPrint("pHalData->DefaultTxPwrDbm = %d\n", pHalData->DefaultTxPwrDbm); */
+
+ /* */
+ /* Update current Tx FIFO page status. */
+ /* */
+ HalQueryTxBufferStatus8723BSdio(padapter);
+ HalQueryTxOQTBufferStatus8723BSdio(padapter);
+ pHalData->SdioTxOQTMaxFreeSpace = pHalData->SdioTxOQTFreeSpace;
+
+ /* Enable MACTXEN/MACRXEN block */
+ u1bTmp = rtw_read8(padapter, REG_CR);
+ u1bTmp |= (MACTXEN | MACRXEN);
+ rtw_write8(padapter, REG_CR, u1bTmp);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_NAV_UPPER, (u8 *)&NavUpper);
+
+ /* ack for xmit mgmt frames. */
+ rtw_write32(padapter, REG_FWHW_TXQ_CTRL, rtw_read32(padapter, REG_FWHW_TXQ_CTRL)|BIT(12));
+
+/* pHalData->PreRpwmVal = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HRPWM1) & 0x80; */
+
+ {
+ pwrctrlpriv->rf_pwrstate = rf_on;
+
+ if (pwrctrlpriv->rf_pwrstate == rf_on) {
+ struct pwrctrl_priv *pwrpriv;
+ unsigned long start_time;
+ u8 restore_iqk_rst;
+ u8 b2Ant;
+ u8 h2cCmdBuf;
+
+ pwrpriv = adapter_to_pwrctl(padapter);
+
+ PHY_LCCalibrate_8723B(&pHalData->odmpriv);
+
+ /* Inform WiFi FW that it is the beginning of IQK */
+ h2cCmdBuf = 1;
+ FillH2CCmd8723B(padapter, H2C_8723B_BT_WLAN_CALIBRATION, 1, &h2cCmdBuf);
+
+ start_time = jiffies;
+ do {
+ if (rtw_read8(padapter, 0x1e7) & 0x01)
+ break;
+
+ msleep(50);
+ } while (jiffies_to_msecs(jiffies - start_time) <= 400);
+
+ rtw_btcoex_IQKNotify(padapter, true);
+
+ restore_iqk_rst = (pwrpriv->bips_processing == true) ? true : false;
+ b2Ant = pHalData->EEPROMBluetoothAntNum == Ant_x2 ? true : false;
+ PHY_IQCalibrate_8723B(padapter, false, restore_iqk_rst, b2Ant, pHalData->ant_path);
+ pHalData->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
+
+ rtw_btcoex_IQKNotify(padapter, false);
+
+ /* Inform WiFi FW that it is the finish of IQK */
+ h2cCmdBuf = 0;
+ FillH2CCmd8723B(padapter, H2C_8723B_BT_WLAN_CALIBRATION, 1, &h2cCmdBuf);
+
+ ODM_TXPowerTrackingCheck(&pHalData->odmpriv);
+ }
+ }
+
+ /* Init BT hw config. */
+ rtw_btcoex_HAL_Initialize(padapter, false);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("-%s\n", __func__));
+
+ return _SUCCESS;
+}
+
+/* */
+/* Description: */
+/* RTL8723e card disable power sequence v003 which suggested by Scott. */
+/* */
+/* First created by tynli. 2011.01.28. */
+/* */
+static void CardDisableRTL8723BSdio(struct adapter *padapter)
+{
+ u8 u1bTmp;
+ u8 bMacPwrCtrlOn;
+ u8 ret = _FAIL;
+
+ /* Run LPS WL RFOFF flow */
+ ret = HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_enter_lps_flow);
+ if (ret == _FAIL) {
+ DBG_8192C(KERN_ERR "%s: run RF OFF flow fail!\n", __func__);
+ }
+
+ /* ==== Reset digital sequence ====== */
+
+ u1bTmp = rtw_read8(padapter, REG_MCUFWDL);
+ if ((u1bTmp & RAM_DL_SEL) && padapter->bFWReady) /* 8051 RAM code */
+ rtl8723b_FirmwareSelfReset(padapter);
+
+ /* Reset MCU 0x2[10]= 0. Suggested by Filen. 2011.01.26. by tynli. */
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ u1bTmp &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp);
+
+ /* MCUFWDL 0x80[1:0]= 0 */
+ /* reset MCU ready status */
+ rtw_write8(padapter, REG_MCUFWDL, 0);
+
+ /* Reset MCU IO Wrapper, added by Roger, 2011.08.30 */
+ u1bTmp = rtw_read8(padapter, REG_RSV_CTRL+1);
+ u1bTmp &= ~BIT(0);
+ rtw_write8(padapter, REG_RSV_CTRL+1, u1bTmp);
+ u1bTmp = rtw_read8(padapter, REG_RSV_CTRL+1);
+ u1bTmp |= BIT(0);
+ rtw_write8(padapter, REG_RSV_CTRL+1, u1bTmp);
+
+ /* ==== Reset digital sequence end ====== */
+
+ bMacPwrCtrlOn = false; /* Disable CMD53 R/W */
+ ret = false;
+ rtw_hal_set_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ ret = HalPwrSeqCmdParsing(padapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, rtl8723B_card_disable_flow);
+ if (ret == false) {
+ DBG_8192C(KERN_ERR "%s: run CARD DISABLE flow fail!\n", __func__);
+ }
+}
+
+static u32 rtl8723bs_hal_deinit(struct adapter *padapter)
+{
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ if (padapter->hw_init_completed == true) {
+ if (adapter_to_pwrctl(padapter)->bips_processing == true) {
+ if (padapter->netif_up == true) {
+ int cnt = 0;
+ u8 val8 = 0;
+
+ DBG_871X("%s: issue H2C to FW when entering IPS\n", __func__);
+
+ rtl8723b_set_FwPwrModeInIPS_cmd(padapter, 0x3);
+ /* poll 0x1cc to make sure H2C command already finished by FW; MAC_0x1cc = 0 means H2C done by FW. */
+ do {
+ val8 = rtw_read8(padapter, REG_HMETFR);
+ cnt++;
+ DBG_871X("%s polling REG_HMETFR = 0x%x, cnt =%d\n", __func__, val8, cnt);
+ mdelay(10);
+ } while (cnt < 100 && (val8 != 0));
+ /* H2C done, enter 32k */
+ if (val8 == 0) {
+ /* ser rpwm to enter 32k */
+ val8 = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1);
+ val8 += 0x80;
+ val8 |= BIT(0);
+ rtw_write8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1, val8);
+ DBG_871X("%s: write rpwm =%02x\n", __func__, val8);
+ adapter_to_pwrctl(padapter)->tog = (val8 + 0x80) & 0x80;
+ cnt = val8 = 0;
+ do {
+ val8 = rtw_read8(padapter, REG_CR);
+ cnt++;
+ DBG_871X("%s polling 0x100 = 0x%x, cnt =%d\n", __func__, val8, cnt);
+ mdelay(10);
+ } while (cnt < 100 && (val8 != 0xEA));
+ } else {
+ DBG_871X(
+ "MAC_1C0 =%08x, MAC_1C4 =%08x, MAC_1C8 =%08x, MAC_1CC =%08x\n",
+ rtw_read32(padapter, 0x1c0),
+ rtw_read32(padapter, 0x1c4),
+ rtw_read32(padapter, 0x1c8),
+ rtw_read32(padapter, 0x1cc)
+ );
+ }
+
+ DBG_871X(
+ "polling done when entering IPS, check result : 0x100 = 0x%x, cnt =%d, MAC_1cc = 0x%02x\n",
+ rtw_read8(padapter, REG_CR),
+ cnt,
+ rtw_read8(padapter, REG_HMETFR)
+ );
+
+ adapter_to_pwrctl(padapter)->pre_ips_type = 0;
+
+ } else {
+ pdbgpriv->dbg_carddisable_cnt++;
+ CardDisableRTL8723BSdio(padapter);
+
+ adapter_to_pwrctl(padapter)->pre_ips_type = 1;
+ }
+
+ } else {
+ pdbgpriv->dbg_carddisable_cnt++;
+ CardDisableRTL8723BSdio(padapter);
+ }
+ } else
+ pdbgpriv->dbg_deinit_fail_cnt++;
+
+ return _SUCCESS;
+}
+
+static u32 rtl8723bs_inirp_init(struct adapter *padapter)
+{
+ return _SUCCESS;
+}
+
+static u32 rtl8723bs_inirp_deinit(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+rtl8723bs_inirp_deinit\n"));
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("-rtl8723bs_inirp_deinit\n"));
+
+ return _SUCCESS;
+}
+
+static void rtl8723bs_init_default_value(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ rtl8723b_init_default_value(padapter);
+
+ /* interface related variable */
+ pHalData->SdioRxFIFOCnt = 0;
+}
+
+static void rtl8723bs_interface_configure(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ bool bWiFiConfig = pregistrypriv->wifi_spec;
+
+
+ pdvobjpriv->RtOutPipe[0] = WLAN_TX_HIQ_DEVICE_ID;
+ pdvobjpriv->RtOutPipe[1] = WLAN_TX_MIQ_DEVICE_ID;
+ pdvobjpriv->RtOutPipe[2] = WLAN_TX_LOQ_DEVICE_ID;
+
+ if (bWiFiConfig)
+ pHalData->OutEpNumber = 2;
+ else
+ pHalData->OutEpNumber = SDIO_MAX_TX_QUEUE;
+
+ switch (pHalData->OutEpNumber) {
+ case 3:
+ pHalData->OutEpQueueSel = TX_SELE_HQ | TX_SELE_LQ|TX_SELE_NQ;
+ break;
+ case 2:
+ pHalData->OutEpQueueSel = TX_SELE_HQ | TX_SELE_NQ;
+ break;
+ case 1:
+ pHalData->OutEpQueueSel = TX_SELE_HQ;
+ break;
+ default:
+ break;
+ }
+
+ Hal_MappingOutPipe(padapter, pHalData->OutEpNumber);
+}
+
+/* */
+/* Description: */
+/* We should set Efuse cell selection to WiFi cell in default. */
+/* */
+/* Assumption: */
+/* PASSIVE_LEVEL */
+/* */
+/* Added by Roger, 2010.11.23. */
+/* */
+static void _EfuseCellSel(struct adapter *padapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(padapter, EFUSE_TEST);
+ value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
+ rtw_write32(padapter, EFUSE_TEST, value32);
+}
+
+static void _ReadRFType(struct adapter *Adapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
+
+#if DISABLE_BB_RF
+ pHalData->rf_chip = RF_PSEUDO_11N;
+#else
+ pHalData->rf_chip = RF_6052;
+#endif
+}
+
+
+static void Hal_EfuseParseMACAddr_8723BS(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ u16 i;
+ u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0xb7, 0x23, 0x00};
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (AutoLoadFail) {
+/* sMacAddr[5] = (u8)GetRandomNumber(1, 254); */
+ for (i = 0; i < 6; i++)
+ pEEPROM->mac_addr[i] = sMacAddr[i];
+ } else {
+ /* Read Permanent MAC address */
+ memcpy(pEEPROM->mac_addr, &hwinfo[EEPROM_MAC_ADDR_8723BS], ETH_ALEN);
+ }
+/* NicIFSetMacAddress(padapter, padapter->PermanentAddress); */
+
+ RT_TRACE(
+ _module_hci_hal_init_c_,
+ _drv_notice_,
+ (
+ "Hal_EfuseParseMACAddr_8723BS: Permanent Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
+ pEEPROM->mac_addr[0],
+ pEEPROM->mac_addr[1],
+ pEEPROM->mac_addr[2],
+ pEEPROM->mac_addr[3],
+ pEEPROM->mac_addr[4],
+ pEEPROM->mac_addr[5]
+ )
+ );
+}
+
+static void Hal_EfuseParseBoardType_8723BS(
+ struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail
+)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->BoardType = (hwinfo[EEPROM_RF_BOARD_OPTION_8723B] & 0xE0) >> 5;
+ if (pHalData->BoardType == 0xFF)
+ pHalData->BoardType = (EEPROM_DEFAULT_BOARD_OPTION&0xE0)>>5;
+ } else
+ pHalData->BoardType = 0;
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Board Type: 0x%2x\n", pHalData->BoardType));
+}
+
+static void _ReadEfuseInfo8723BS(struct adapter *padapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ u8 *hwinfo = NULL;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("====>_ReadEfuseInfo8723BS()\n"));
+
+ /* */
+ /* This part read and parse the eeprom/efuse content */
+ /* */
+
+ if (sizeof(pEEPROM->efuse_eeprom_data) < HWSET_MAX_SIZE_8723B)
+ DBG_871X("[WARNING] size of efuse_eeprom_data is less than HWSET_MAX_SIZE_8723B!\n");
+
+ hwinfo = pEEPROM->efuse_eeprom_data;
+
+ Hal_InitPGData(padapter, hwinfo);
+
+ Hal_EfuseParseIDCode(padapter, hwinfo);
+ Hal_EfuseParseEEPROMVer_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+ Hal_EfuseParseMACAddr_8723BS(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+ Hal_EfuseParseTxPowerInfo_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseBoardType_8723BS(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+ /* */
+ /* Read Bluetooth co-exist and initialize */
+ /* */
+ Hal_EfuseParsePackageType_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseBTCoexistInfo_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseChnlPlan_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseThermalMeter_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseAntennaDiversity_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+ Hal_EfuseParseCustomerID_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+ Hal_EfuseParseVoltage_8723B(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+#ifdef CONFIG_WOWLAN
+ Hal_DetectWoWMode(padapter);
+#endif
+
+ Hal_ReadRFGainOffset(padapter, hwinfo, pEEPROM->bautoload_fail_flag);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<==== _ReadEfuseInfo8723BS()\n"));
+}
+
+static void _ReadPROMContent(struct adapter *padapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ u8 eeValue;
+
+ eeValue = rtw_read8(padapter, REG_9346CR);
+ /* To check system boot selection. */
+ pEEPROM->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
+ pEEPROM->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("%s: 9346CR = 0x%02X, Boot from %s, Autoload %s\n",
+ __func__, eeValue,
+ (pEEPROM->EepromOrEfuse ? "EEPROM" : "EFUSE"),
+ (pEEPROM->bautoload_fail_flag ? "Fail" : "OK")));
+
+/* pHalData->EEType = IS_BOOT_FROM_EEPROM(Adapter) ? EEPROM_93C46 : EEPROM_BOOT_EFUSE; */
+
+ _ReadEfuseInfo8723BS(padapter);
+}
+
+static void _InitOtherVariable(struct adapter *Adapter)
+{
+}
+
+/* */
+/* Description: */
+/* Read HW adapter information by E-Fuse or EEPROM according CR9346 reported. */
+/* */
+/* Assumption: */
+/* PASSIVE_LEVEL (SDIO interface) */
+/* */
+/* */
+static s32 _ReadAdapterInfo8723BS(struct adapter *padapter)
+{
+ u8 val8;
+ unsigned long start;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("+_ReadAdapterInfo8723BS\n"));
+
+ /* before access eFuse, make sure card enable has been called */
+ if (padapter->hw_init_completed == false)
+ _InitPowerOn_8723BS(padapter);
+
+
+ val8 = rtw_read8(padapter, 0x4e);
+ MSG_8192C("%s, 0x4e = 0x%x\n", __func__, val8);
+ val8 |= BIT(6);
+ rtw_write8(padapter, 0x4e, val8);
+
+
+ start = jiffies;
+
+ _EfuseCellSel(padapter);
+ _ReadRFType(padapter);
+ _ReadPROMContent(padapter);
+ _InitOtherVariable(padapter);
+
+ if (padapter->hw_init_completed == false) {
+ rtw_write8(padapter, 0x67, 0x00); /* for BT, Switch Ant control to BT */
+ CardDisableRTL8723BSdio(padapter);/* for the power consumption issue, wifi ko module is loaded during booting, but wifi GUI is off */
+ }
+
+
+ MSG_8192C("<==== _ReadAdapterInfo8723BS in %d ms\n", jiffies_to_msecs(jiffies - start));
+
+ return _SUCCESS;
+}
+
+static void ReadAdapterInfo8723BS(struct adapter *padapter)
+{
+ /* Read EEPROM size before call any EEPROM function */
+ padapter->EepromAddressSize = GetEEPROMSize8723B(padapter);
+
+ _ReadAdapterInfo8723BS(padapter);
+}
+
+/*
+ * If variable not handled here,
+ * some variables will be processed in SetHwReg8723B()
+ */
+static void SetHwReg8723BS(struct adapter *padapter, u8 variable, u8 *val)
+{
+ struct hal_com_data *pHalData;
+ u8 val8;
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+ struct wowlan_ioctl_param *poidparam;
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+ int res;
+ u32 tmp;
+ u16 len = 0;
+ u8 trycnt = 100;
+ u32 himr = 0;
+#if defined(CONFIG_WOWLAN)
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_info *psta = NULL;
+ u64 iv_low = 0, iv_high = 0;
+ u8 mstatus = (*(u8 *)val);
+#endif
+#endif
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ switch (variable) {
+ case HW_VAR_SET_RPWM:
+ /* rpwm value only use BIT0(clock bit) , BIT6(Ack bit), and BIT7(Toggle bit) */
+ /* BIT0 value - 1: 32k, 0:40MHz. */
+ /* BIT6 value - 1: report cpwm value after success set, 0:do not report. */
+ /* BIT7 value - Toggle bit change. */
+ {
+ val8 = *val;
+ val8 &= 0xC1;
+ rtw_write8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HRPWM1, val8);
+ }
+ break;
+ case HW_VAR_SET_REQ_FW_PS:
+ {
+ u8 req_fw_ps = 0;
+ req_fw_ps = rtw_read8(padapter, 0x8f);
+ req_fw_ps |= 0x10;
+ rtw_write8(padapter, 0x8f, req_fw_ps);
+ }
+ break;
+ case HW_VAR_RXDMA_AGG_PG_TH:
+ val8 = *val;
+ break;
+
+#ifdef CONFIG_WOWLAN
+ case HW_VAR_WOWLAN:
+ {
+ poidparam = (struct wowlan_ioctl_param *)val;
+ switch (poidparam->subcode) {
+ case WOWLAN_ENABLE:
+ DBG_871X_LEVEL(_drv_always_, "WOWLAN_ENABLE\n");
+
+ /* backup data rate to register 0x8b for wowlan FW */
+ rtw_write8(padapter, 0x8d, 1);
+ rtw_write8(padapter, 0x8c, 0);
+ rtw_write8(padapter, 0x8f, 0x40);
+ rtw_write8(padapter, 0x8b,
+ rtw_read8(padapter, 0x2f0));
+
+ /* 1. Download WOWLAN FW */
+ DBG_871X_LEVEL(_drv_always_, "Re-download WoWlan FW!\n");
+ SetFwRelatedForWoWLAN8723b(padapter, true);
+
+ /* 2. RX DMA stop */
+ DBG_871X_LEVEL(_drv_always_, "Pause DMA\n");
+ rtw_write32(padapter, REG_RXPKT_NUM, (rtw_read32(padapter, REG_RXPKT_NUM)|RW_RELEASE_EN));
+ do {
+ if ((rtw_read32(padapter, REG_RXPKT_NUM)&RXDMA_IDLE)) {
+ DBG_871X_LEVEL(_drv_always_, "RX_DMA_IDLE is true\n");
+ break;
+ } else {
+ /* If RX_DMA is not idle, receive one pkt from DMA */
+ res = sdio_local_read(padapter, SDIO_REG_RX0_REQ_LEN, 4, (u8 *)&tmp);
+ len = le16_to_cpu(tmp);
+ DBG_871X_LEVEL(_drv_always_, "RX len:%d\n", len);
+ if (len > 0)
+ res = RecvOnePkt(padapter, len);
+ else
+ DBG_871X_LEVEL(_drv_always_, "read length fail %d\n", len);
+
+ DBG_871X_LEVEL(_drv_always_, "RecvOnePkt Result: %d\n", res);
+ }
+ } while (trycnt--);
+ if (trycnt == 0)
+ DBG_871X_LEVEL(_drv_always_, "Stop RX DMA failed......\n");
+
+ /* 3. Clear IMR and ISR */
+ DBG_871X_LEVEL(_drv_always_, "Clear IMR and ISR\n");
+ tmp = 0;
+ sdio_local_write(padapter, SDIO_REG_HIMR_ON, 4, (u8 *)&tmp);
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ sdio_local_read(padapter, SDIO_REG_HISR, 4, (u8 *)&tmp);
+ sdio_local_write(padapter, SDIO_REG_HISR, 4, (u8 *)&tmp);
+
+ /* 4. Enable CPWM2 only */
+ DBG_871X_LEVEL(_drv_always_, "Enable only CPWM2\n");
+ sdio_local_read(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ DBG_871X("DisableInterruptButCpwm28723BSdio(): Read SDIO_REG_HIMR: 0x%08x\n", tmp);
+
+ himr = cpu_to_le32(SDIO_HIMR_DISABLED)|SDIO_HIMR_CPWM2_MSK;
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+
+ sdio_local_read(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ DBG_871X("DisableInterruptButCpwm28723BSdio(): Read again SDIO_REG_HIMR: 0x%08x\n", tmp);
+
+ /* 5. Set Enable WOWLAN H2C command. */
+ DBG_871X_LEVEL(_drv_always_, "Set Enable WOWLan cmd\n");
+ rtl8723b_set_wowlan_cmd(padapter, 1);
+
+ /* 6. Check EnableWoWlan CMD is ready */
+ if (!pwrctl->wowlan_pno_enable) {
+ DBG_871X_LEVEL(_drv_always_, "Check EnableWoWlan CMD is ready\n");
+ mstatus = rtw_read8(padapter, REG_WOW_CTRL);
+ trycnt = 10;
+ while (!(mstatus&BIT1) && trycnt > 1) {
+ mstatus = rtw_read8(padapter, REG_WOW_CTRL);
+ DBG_871X("Loop index: %d :0x%02x\n", trycnt, mstatus);
+ trycnt--;
+ msleep(2);
+ }
+ }
+ break;
+
+ case WOWLAN_DISABLE:
+ DBG_871X_LEVEL(_drv_always_, "WOWLAN_DISABLE\n");
+
+ psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(pmlmepriv));
+ if (psta != NULL)
+ rtl8723b_set_FwMediaStatusRpt_cmd(padapter, RT_MEDIA_DISCONNECT, psta->mac_id);
+ else
+ DBG_871X("psta is null\n");
+
+ /* 1. Read wakeup reason */
+ pwrctl->wowlan_wake_reason = rtw_read8(padapter, REG_WOWLAN_WAKE_REASON);
+ DBG_871X_LEVEL(
+ _drv_always_,
+ "wakeup_reason: 0x%02x, mac_630 = 0x%08x, mac_634 = 0x%08x, mac_1c0 = 0x%08x, mac_1c4 = 0x%08x"
+ ", mac_494 = 0x%08x, , mac_498 = 0x%08x, mac_49c = 0x%08x, mac_608 = 0x%08x, mac_4a0 = 0x%08x, mac_4a4 = 0x%08x\n"
+ ", mac_1cc = 0x%08x, mac_2f0 = 0x%08x, mac_2f4 = 0x%08x, mac_2f8 = 0x%08x, mac_2fc = 0x%08x, mac_8c = 0x%08x",
+ pwrctl->wowlan_wake_reason,
+ rtw_read32(padapter, REG_WOWLAN_GTK_DBG1),
+ rtw_read32(padapter, REG_WOWLAN_GTK_DBG2),
+ rtw_read32(padapter, 0x1c0),
+ rtw_read32(padapter, 0x1c4),
+ rtw_read32(padapter, 0x494),
+ rtw_read32(padapter, 0x498),
+ rtw_read32(padapter, 0x49c),
+ rtw_read32(padapter, 0x608),
+ rtw_read32(padapter, 0x4a0),
+ rtw_read32(padapter, 0x4a4),
+ rtw_read32(padapter, 0x1cc),
+ rtw_read32(padapter, 0x2f0),
+ rtw_read32(padapter, 0x2f4),
+ rtw_read32(padapter, 0x2f8),
+ rtw_read32(padapter, 0x2fc),
+ rtw_read32(padapter, 0x8c)
+ );
+#ifdef CONFIG_PNO_SET_DEBUG
+ DBG_871X("0x1b9: 0x%02x, 0x632: 0x%02x\n", rtw_read8(padapter, 0x1b9), rtw_read8(padapter, 0x632));
+ DBG_871X("0x4fc: 0x%02x, 0x4fd: 0x%02x\n", rtw_read8(padapter, 0x4fc), rtw_read8(padapter, 0x4fd));
+ DBG_871X("TXDMA STATUS: 0x%08x\n", rtw_read32(padapter, REG_TXDMA_STATUS));
+#endif
+
+ {
+ /* 2. Set Disable WOWLAN H2C command. */
+ DBG_871X_LEVEL(_drv_always_, "Set Disable WOWLan cmd\n");
+ rtl8723b_set_wowlan_cmd(padapter, 0);
+
+ /* 3. Check Disable WoWlan CMD ready. */
+ DBG_871X_LEVEL(_drv_always_, "Check DisableWoWlan CMD is ready\n");
+ mstatus = rtw_read8(padapter, REG_WOW_CTRL);
+ trycnt = 50;
+ while (mstatus&BIT1 && trycnt > 1) {
+ mstatus = rtw_read8(padapter, REG_WOW_CTRL);
+ DBG_871X_LEVEL(_drv_always_, "Loop index: %d :0x%02x\n", trycnt, mstatus);
+ trycnt--;
+ msleep(10);
+ }
+
+ if (mstatus & BIT1) {
+ DBG_871X_LEVEL(_drv_always_, "Disable WOW mode fail!!\n");
+ DBG_871X("Set 0x690 = 0x00\n");
+ rtw_write8(padapter, REG_WOW_CTRL, (rtw_read8(padapter, REG_WOW_CTRL)&0xf0));
+ DBG_871X_LEVEL(_drv_always_, "Release RXDMA\n");
+ rtw_write32(padapter, REG_RXPKT_NUM, (rtw_read32(padapter, REG_RXPKT_NUM)&(~RW_RELEASE_EN)));
+ }
+
+ /* 3.1 read fw iv */
+ iv_low = rtw_read32(padapter, REG_TXPKTBUF_IV_LOW);
+ /* only low two bytes is PN, check AES_IV macro for detail */
+ iv_low &= 0xffff;
+ iv_high = rtw_read32(padapter, REG_TXPKTBUF_IV_HIGH);
+ /* get the real packet number */
+ pwrctl->wowlan_fw_iv = iv_high << 16 | iv_low;
+ DBG_871X_LEVEL(_drv_always_, "fw_iv: 0x%016llx\n", pwrctl->wowlan_fw_iv);
+ /* Update TX iv data. */
+ rtw_set_sec_pn(padapter);
+
+ /* 3.2 read GTK index and key */
+ if (
+ psecuritypriv->binstallKCK_KEK == true &&
+ psecuritypriv->dot11PrivacyAlgrthm == _AES_
+ ) {
+ u8 gtk_keyindex = 0;
+ u8 get_key[16];
+ /* read gtk key index */
+ gtk_keyindex = rtw_read8(padapter, 0x48c);
+
+ if (gtk_keyindex < 4) {
+ psecuritypriv->dot118021XGrpKeyid = gtk_keyindex;
+ read_cam(padapter, gtk_keyindex, get_key);
+ memcpy(psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, get_key, 16);
+ DBG_871X_LEVEL(
+ _drv_always_,
+ "GTK (%d) = 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ gtk_keyindex,
+ psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].lkey[0],
+ psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].lkey[1],
+ psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].lkey[2],
+ psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].lkey[3]
+ );
+ } else
+ DBG_871X_LEVEL(_drv_always_, "GTK index =%d\n", gtk_keyindex);
+ }
+
+ /* 4. Re-download Normal FW. */
+ DBG_871X_LEVEL(_drv_always_, "Re-download Normal FW!\n");
+ SetFwRelatedForWoWLAN8723b(padapter, false);
+ }
+#ifdef CONFIG_GPIO_WAKEUP
+ DBG_871X_LEVEL(_drv_always_, "Set Wake GPIO to high for default.\n");
+ HalSetOutPutGPIO(padapter, WAKEUP_GPIO_IDX, 1);
+#endif
+
+ /* 5. Download reserved pages and report media status if needed. */
+ if (
+ (pwrctl->wowlan_wake_reason != FWDecisionDisconnect) &&
+ (pwrctl->wowlan_wake_reason != Rx_Pairwisekey) &&
+ (pwrctl->wowlan_wake_reason != Rx_DisAssoc) &&
+ (pwrctl->wowlan_wake_reason != Rx_DeAuth)
+ ) {
+ rtl8723b_set_FwJoinBssRpt_cmd(padapter, RT_MEDIA_CONNECT);
+ if (psta != NULL)
+ rtl8723b_set_FwMediaStatusRpt_cmd(padapter, RT_MEDIA_CONNECT, psta->mac_id);
+ }
+#ifdef CONFIG_PNO_SUPPORT
+ rtw_write8(padapter, 0x1b8, 0);
+ DBG_871X("reset 0x1b8: %d\n", rtw_read8(padapter, 0x1b8));
+ rtw_write8(padapter, 0x1b9, 0);
+ DBG_871X("reset 0x1b9: %d\n", rtw_read8(padapter, 0x1b9));
+ rtw_write8(padapter, REG_PNO_STATUS, 0);
+ DBG_871X("reset REG_PNO_STATUS: %d\n", rtw_read8(padapter, REG_PNO_STATUS));
+#endif
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+#endif /* CONFIG_WOWLAN */
+#ifdef CONFIG_AP_WOWLAN
+ case HW_VAR_AP_WOWLAN:
+ {
+ poidparam = (struct wowlan_ioctl_param *)val;
+ switch (poidparam->subcode) {
+ case WOWLAN_AP_ENABLE:
+ DBG_871X("%s, WOWLAN_AP_ENABLE\n", __func__);
+ /* 1. Download WOWLAN FW */
+ DBG_871X_LEVEL(_drv_always_, "Re-download WoWlan FW!\n");
+ SetFwRelatedForWoWLAN8723b(padapter, true);
+
+ /* 2. RX DMA stop */
+ DBG_871X_LEVEL(_drv_always_, "Pause DMA\n");
+ rtw_write32(padapter, REG_RXPKT_NUM,
+ (rtw_read32(padapter, REG_RXPKT_NUM)|RW_RELEASE_EN));
+ do {
+ if ((rtw_read32(padapter, REG_RXPKT_NUM)&RXDMA_IDLE)) {
+ DBG_871X_LEVEL(_drv_always_, "RX_DMA_IDLE is true\n");
+ break;
+ } else {
+ /* If RX_DMA is not idle, receive one pkt from DMA */
+ res = sdio_local_read(padapter, SDIO_REG_RX0_REQ_LEN, 4, (u8 *)&tmp);
+ len = le16_to_cpu(tmp);
+
+ DBG_871X_LEVEL(_drv_always_, "RX len:%d\n", len);
+ if (len > 0)
+ res = RecvOnePkt(padapter, len);
+ else
+ DBG_871X_LEVEL(_drv_always_, "read length fail %d\n", len);
+
+ DBG_871X_LEVEL(_drv_always_, "RecvOnePkt Result: %d\n", res);
+ }
+ } while (trycnt--);
+
+ if (trycnt == 0)
+ DBG_871X_LEVEL(_drv_always_, "Stop RX DMA failed......\n");
+
+ /* 3. Clear IMR and ISR */
+ DBG_871X_LEVEL(_drv_always_, "Clear IMR and ISR\n");
+ tmp = 0;
+ sdio_local_write(padapter, SDIO_REG_HIMR_ON, 4, (u8 *)&tmp);
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ sdio_local_read(padapter, SDIO_REG_HISR, 4, (u8 *)&tmp);
+ sdio_local_write(padapter, SDIO_REG_HISR, 4, (u8 *)&tmp);
+
+ /* 4. Enable CPWM2 only */
+ DBG_871X_LEVEL(_drv_always_, "Enable only CPWM2\n");
+ sdio_local_read(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ DBG_871X("DisableInterruptButCpwm28723BSdio(): Read SDIO_REG_HIMR: 0x%08x\n", tmp);
+
+ himr = cpu_to_le32(SDIO_HIMR_DISABLED)|SDIO_HIMR_CPWM2_MSK;
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+
+ sdio_local_read(padapter, SDIO_REG_HIMR, 4, (u8 *)&tmp);
+ DBG_871X("DisableInterruptButCpwm28723BSdio(): Read again SDIO_REG_HIMR: 0x%08x\n", tmp);
+
+ /* 5. Set Enable WOWLAN H2C command. */
+ DBG_871X_LEVEL(_drv_always_, "Set Enable AP WOWLan cmd\n");
+ rtl8723b_set_ap_wowlan_cmd(padapter, 1);
+ /* 6. add some delay for H2C cmd ready */
+ msleep(10);
+
+ rtw_write8(padapter, REG_WOWLAN_WAKE_REASON, 0);
+ break;
+ case WOWLAN_AP_DISABLE:
+ DBG_871X("%s, WOWLAN_AP_DISABLE\n", __func__);
+ /* 1. Read wakeup reason */
+ pwrctl->wowlan_wake_reason =
+ rtw_read8(padapter, REG_WOWLAN_WAKE_REASON);
+
+ DBG_871X_LEVEL(_drv_always_, "wakeup_reason: 0x%02x\n",
+ pwrctl->wowlan_wake_reason);
+
+ /* 2. Set Disable WOWLAN H2C command. */
+ DBG_871X_LEVEL(_drv_always_, "Set Disable WOWLan cmd\n");
+ rtl8723b_set_ap_wowlan_cmd(padapter, 0);
+ /* 6. add some delay for H2C cmd ready */
+ msleep(2);
+
+ DBG_871X_LEVEL(_drv_always_, "Release RXDMA\n");
+
+ rtw_write32(padapter, REG_RXPKT_NUM,
+ (rtw_read32(padapter, REG_RXPKT_NUM) & (~RW_RELEASE_EN)));
+
+ SetFwRelatedForWoWLAN8723b(padapter, false);
+
+#ifdef CONFIG_GPIO_WAKEUP
+ DBG_871X_LEVEL(_drv_always_, "Set Wake GPIO to high for default.\n");
+ HalSetOutPutGPIO(padapter, WAKEUP_GPIO_IDX, 1);
+#endif /* CONFIG_GPIO_WAKEUP */
+ rtl8723b_set_FwJoinBssRpt_cmd(padapter, RT_MEDIA_CONNECT);
+ issue_beacon(padapter, 0);
+ break;
+ default:
+ break;
+ }
+}
+ break;
+#endif /* CONFIG_AP_WOWLAN */
+ case HW_VAR_DM_IN_LPS:
+ rtl8723b_hal_dm_in_lps(padapter);
+ break;
+ default:
+ SetHwReg8723B(padapter, variable, val);
+ break;
+ }
+}
+
+/*
+ * If variable not handled here,
+ * some variables will be processed in GetHwReg8723B()
+ */
+static void GetHwReg8723BS(struct adapter *padapter, u8 variable, u8 *val)
+{
+ switch (variable) {
+ case HW_VAR_CPWM:
+ *val = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HCPWM1_8723B);
+ break;
+
+ case HW_VAR_FW_PS_STATE:
+ {
+ /* 3. read dword 0x88 driver read fw ps state */
+ *((u16 *)val) = rtw_read16(padapter, 0x88);
+ }
+ break;
+ default:
+ GetHwReg8723B(padapter, variable, val);
+ break;
+ }
+}
+
+static void SetHwRegWithBuf8723B(struct adapter *padapter, u8 variable, u8 *pbuf, int len)
+{
+ switch (variable) {
+ case HW_VAR_C2H_HANDLE:
+ /* DBG_8192C("%s len =%d\n", __func__, len); */
+ C2HPacketHandler_8723B(padapter, pbuf, len);
+ break;
+ default:
+ break;
+ }
+}
+
+/* */
+/* Description: */
+/* Query setting of specified variable. */
+/* */
+static u8 GetHalDefVar8723BSDIO(
+ struct adapter *Adapter, enum HAL_DEF_VARIABLE eVariable, void *pValue
+)
+{
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_IS_SUPPORT_ANT_DIV:
+ break;
+ case HAL_DEF_CURRENT_ANTENNA:
+ break;
+ case HW_VAR_MAX_RX_AMPDU_FACTOR:
+ /* Stanley@BB.SD3 suggests 16K can get stable performance */
+ /* coding by Lucas@20130730 */
+ *(u32 *)pValue = MAX_AMPDU_FACTOR_16K;
+ break;
+ default:
+ bResult = GetHalDefVar8723B(Adapter, eVariable, pValue);
+ break;
+ }
+
+ return bResult;
+}
+
+/* */
+/* Description: */
+/* Change default setting of specified variable. */
+/* */
+static u8 SetHalDefVar8723BSDIO(struct adapter *Adapter,
+ enum HAL_DEF_VARIABLE eVariable, void *pValue)
+{
+ return SetHalDefVar8723B(Adapter, eVariable, pValue);
+}
+
+void rtl8723bs_set_hal_ops(struct adapter *padapter)
+{
+ struct hal_ops *pHalFunc = &padapter->HalFunc;
+
+ rtl8723b_set_hal_ops(pHalFunc);
+
+ pHalFunc->hal_init = &rtl8723bs_hal_init;
+ pHalFunc->hal_deinit = &rtl8723bs_hal_deinit;
+
+ pHalFunc->inirp_init = &rtl8723bs_inirp_init;
+ pHalFunc->inirp_deinit = &rtl8723bs_inirp_deinit;
+
+ pHalFunc->init_xmit_priv = &rtl8723bs_init_xmit_priv;
+ pHalFunc->free_xmit_priv = &rtl8723bs_free_xmit_priv;
+
+ pHalFunc->init_recv_priv = &rtl8723bs_init_recv_priv;
+ pHalFunc->free_recv_priv = &rtl8723bs_free_recv_priv;
+
+ pHalFunc->init_default_value = &rtl8723bs_init_default_value;
+ pHalFunc->intf_chip_configure = &rtl8723bs_interface_configure;
+ pHalFunc->read_adapter_info = &ReadAdapterInfo8723BS;
+
+ pHalFunc->enable_interrupt = &EnableInterrupt8723BSdio;
+ pHalFunc->disable_interrupt = &DisableInterrupt8723BSdio;
+ pHalFunc->check_ips_status = &CheckIPSStatus;
+#ifdef CONFIG_WOWLAN
+ pHalFunc->clear_interrupt = &ClearInterrupt8723BSdio;
+#endif
+ pHalFunc->SetHwRegHandler = &SetHwReg8723BS;
+ pHalFunc->GetHwRegHandler = &GetHwReg8723BS;
+ pHalFunc->SetHwRegHandlerWithBuf = &SetHwRegWithBuf8723B;
+ pHalFunc->GetHalDefVarHandler = &GetHalDefVar8723BSDIO;
+ pHalFunc->SetHalDefVarHandler = &SetHalDefVar8723BSDIO;
+
+ pHalFunc->hal_xmit = &rtl8723bs_hal_xmit;
+ pHalFunc->mgnt_xmit = &rtl8723bs_mgnt_xmit;
+ pHalFunc->hal_xmitframe_enqueue = &rtl8723bs_hal_xmitframe_enqueue;
+
+#if defined(CONFIG_CHECK_BT_HANG)
+ pHalFunc->hal_init_checkbthang_workqueue = &rtl8723bs_init_checkbthang_workqueue;
+ pHalFunc->hal_free_checkbthang_workqueue = &rtl8723bs_free_checkbthang_workqueue;
+ pHalFunc->hal_cancle_checkbthang_workqueue = &rtl8723bs_cancle_checkbthang_workqueue;
+ pHalFunc->hal_checke_bt_hang = &rtl8723bs_hal_check_bt_hang;
+#endif
+}
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
new file mode 100644
index 000000000000..6285b72faa9a
--- /dev/null
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -0,0 +1,1294 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *******************************************************************************/
+#define _SDIO_OPS_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtl8723b_hal.h>
+
+/* define SDIO_DEBUG_IO 1 */
+
+
+/* */
+/* Description: */
+/* The following mapping is for SDIO host local register space. */
+/* */
+/* Creadted by Roger, 2011.01.31. */
+/* */
+static void HalSdioGetCmdAddr8723BSdio(
+ struct adapter *padapter,
+ u8 DeviceID,
+ u32 Addr,
+ u32 *pCmdAddr
+)
+{
+ switch (DeviceID) {
+ case SDIO_LOCAL_DEVICE_ID:
+ *pCmdAddr = ((SDIO_LOCAL_DEVICE_ID << 13) | (Addr & SDIO_LOCAL_MSK));
+ break;
+
+ case WLAN_IOREG_DEVICE_ID:
+ *pCmdAddr = ((WLAN_IOREG_DEVICE_ID << 13) | (Addr & WLAN_IOREG_MSK));
+ break;
+
+ case WLAN_TX_HIQ_DEVICE_ID:
+ *pCmdAddr = ((WLAN_TX_HIQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ break;
+
+ case WLAN_TX_MIQ_DEVICE_ID:
+ *pCmdAddr = ((WLAN_TX_MIQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ break;
+
+ case WLAN_TX_LOQ_DEVICE_ID:
+ *pCmdAddr = ((WLAN_TX_LOQ_DEVICE_ID << 13) | (Addr & WLAN_FIFO_MSK));
+ break;
+
+ case WLAN_RX0FF_DEVICE_ID:
+ *pCmdAddr = ((WLAN_RX0FF_DEVICE_ID << 13) | (Addr & WLAN_RX0FF_MSK));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static u8 get_deviceid(u32 addr)
+{
+ u8 devideId;
+ u16 pseudoId;
+
+
+ pseudoId = (u16)(addr >> 16);
+ switch (pseudoId) {
+ case 0x1025:
+ devideId = SDIO_LOCAL_DEVICE_ID;
+ break;
+
+ case 0x1026:
+ devideId = WLAN_IOREG_DEVICE_ID;
+ break;
+
+/* case 0x1027: */
+/* devideId = SDIO_FIRMWARE_FIFO; */
+/* break; */
+
+ case 0x1031:
+ devideId = WLAN_TX_HIQ_DEVICE_ID;
+ break;
+
+ case 0x1032:
+ devideId = WLAN_TX_MIQ_DEVICE_ID;
+ break;
+
+ case 0x1033:
+ devideId = WLAN_TX_LOQ_DEVICE_ID;
+ break;
+
+ case 0x1034:
+ devideId = WLAN_RX0FF_DEVICE_ID;
+ break;
+
+ default:
+/* devideId = (u8)((addr >> 13) & 0xF); */
+ devideId = WLAN_IOREG_DEVICE_ID;
+ break;
+ }
+
+ return devideId;
+}
+
+/*
+ * Ref:
+ *HalSdioGetCmdAddr8723BSdio()
+ */
+static u32 _cvrt2ftaddr(const u32 addr, u8 *pdeviceId, u16 *poffset)
+{
+ u8 deviceId;
+ u16 offset;
+ u32 ftaddr;
+
+
+ deviceId = get_deviceid(addr);
+ offset = 0;
+
+ switch (deviceId) {
+ case SDIO_LOCAL_DEVICE_ID:
+ offset = addr & SDIO_LOCAL_MSK;
+ break;
+
+ case WLAN_TX_HIQ_DEVICE_ID:
+ case WLAN_TX_MIQ_DEVICE_ID:
+ case WLAN_TX_LOQ_DEVICE_ID:
+ offset = addr & WLAN_FIFO_MSK;
+ break;
+
+ case WLAN_RX0FF_DEVICE_ID:
+ offset = addr & WLAN_RX0FF_MSK;
+ break;
+
+ case WLAN_IOREG_DEVICE_ID:
+ default:
+ deviceId = WLAN_IOREG_DEVICE_ID;
+ offset = addr & WLAN_IOREG_MSK;
+ break;
+ }
+ ftaddr = (deviceId << 13) | offset;
+
+ if (pdeviceId)
+ *pdeviceId = deviceId;
+ if (poffset)
+ *poffset = offset;
+
+ return ftaddr;
+}
+
+static u8 sdio_read8(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u32 ftaddr;
+ u8 val;
+
+ ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
+ val = sd_read8(pintfhdl, ftaddr, NULL);
+ return val;
+}
+
+static u16 sdio_read16(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u32 ftaddr;
+ u16 val;
+ __le16 le_tmp;
+
+ ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
+ sd_cmd52_read(pintfhdl, ftaddr, 2, (u8 *)&le_tmp);
+ val = le16_to_cpu(le_tmp);
+ return val;
+}
+
+static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
+{
+ struct adapter *padapter;
+ u8 bMacPwrCtrlOn;
+ u8 deviceId;
+ u16 offset;
+ u32 ftaddr;
+ u8 shift;
+ u32 val;
+ s32 err;
+ __le32 le_tmp;
+
+ padapter = pintfhdl->padapter;
+ ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (false == bMacPwrCtrlOn) ||
+ (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ err = sd_cmd52_read(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
+#ifdef SDIO_DEBUG_IO
+ if (!err) {
+#endif
+ val = le32_to_cpu(le_tmp);
+ return val;
+#ifdef SDIO_DEBUG_IO
+ }
+
+ DBG_8192C(KERN_ERR "%s: Mac Power off, Read FAIL(%d)! addr = 0x%x\n", __func__, err, addr);
+ return SDIO_ERR_VAL32;
+#endif
+ }
+
+ /* 4 bytes alignment */
+ shift = ftaddr & 0x3;
+ if (shift == 0) {
+ val = sd_read32(pintfhdl, ftaddr, NULL);
+ } else {
+ u8 *ptmpbuf;
+
+ ptmpbuf = (u8 *)rtw_malloc(8);
+ if (NULL == ptmpbuf) {
+ DBG_8192C(KERN_ERR "%s: Allocate memory FAIL!(size =8) addr = 0x%x\n", __func__, addr);
+ return SDIO_ERR_VAL32;
+ }
+
+ ftaddr &= ~(u16)0x3;
+ sd_read(pintfhdl, ftaddr, 8, ptmpbuf);
+ memcpy(&le_tmp, ptmpbuf+shift, 4);
+ val = le32_to_cpu(le_tmp);
+
+ kfree(ptmpbuf);
+ }
+ return val;
+}
+
+static s32 sdio_readN(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pbuf)
+{
+ struct adapter *padapter;
+ u8 bMacPwrCtrlOn;
+ u8 deviceId;
+ u16 offset;
+ u32 ftaddr;
+ u8 shift;
+ s32 err;
+
+ padapter = pintfhdl->padapter;
+ err = 0;
+
+ ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (false == bMacPwrCtrlOn) ||
+ (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ err = sd_cmd52_read(pintfhdl, ftaddr, cnt, pbuf);
+ return err;
+ }
+
+ /* 4 bytes alignment */
+ shift = ftaddr & 0x3;
+ if (shift == 0) {
+ err = sd_read(pintfhdl, ftaddr, cnt, pbuf);
+ } else {
+ u8 *ptmpbuf;
+ u32 n;
+
+ ftaddr &= ~(u16)0x3;
+ n = cnt + shift;
+ ptmpbuf = rtw_malloc(n);
+ if (NULL == ptmpbuf)
+ return -1;
+
+ err = sd_read(pintfhdl, ftaddr, n, ptmpbuf);
+ if (!err)
+ memcpy(pbuf, ptmpbuf+shift, cnt);
+ kfree(ptmpbuf);
+ }
+ return err;
+}
+
+static s32 sdio_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+{
+ u32 ftaddr;
+ s32 err;
+
+ ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
+ sd_write8(pintfhdl, ftaddr, val, &err);
+
+ return err;
+}
+
+static s32 sdio_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+{
+ u32 ftaddr;
+ s32 err;
+ __le16 le_tmp;
+
+ ftaddr = _cvrt2ftaddr(addr, NULL, NULL);
+ le_tmp = cpu_to_le16(val);
+ err = sd_cmd52_write(pintfhdl, ftaddr, 2, (u8 *)&le_tmp);
+
+ return err;
+}
+
+static s32 sdio_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+{
+ struct adapter *padapter;
+ u8 bMacPwrCtrlOn;
+ u8 deviceId;
+ u16 offset;
+ u32 ftaddr;
+ u8 shift;
+ s32 err;
+ __le32 le_tmp;
+
+ padapter = pintfhdl->padapter;
+ err = 0;
+
+ ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (!bMacPwrCtrlOn) ||
+ (adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ le_tmp = cpu_to_le32(val);
+ err = sd_cmd52_write(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
+ return err;
+ }
+
+ /* 4 bytes alignment */
+ shift = ftaddr & 0x3;
+ if (shift == 0) {
+ sd_write32(pintfhdl, ftaddr, val, &err);
+ } else {
+ le_tmp = cpu_to_le32(val);
+ err = sd_cmd52_write(pintfhdl, ftaddr, 4, (u8 *)&le_tmp);
+ }
+ return err;
+}
+
+static s32 sdio_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pbuf)
+{
+ struct adapter *padapter;
+ u8 bMacPwrCtrlOn;
+ u8 deviceId;
+ u16 offset;
+ u32 ftaddr;
+ u8 shift;
+ s32 err;
+
+ padapter = pintfhdl->padapter;
+ err = 0;
+
+ ftaddr = _cvrt2ftaddr(addr, &deviceId, &offset);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ ((deviceId == WLAN_IOREG_DEVICE_ID) && (offset < 0x100)) ||
+ (false == bMacPwrCtrlOn) ||
+ (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ err = sd_cmd52_write(pintfhdl, ftaddr, cnt, pbuf);
+ return err;
+ }
+
+ shift = ftaddr & 0x3;
+ if (shift == 0) {
+ err = sd_write(pintfhdl, ftaddr, cnt, pbuf);
+ } else {
+ u8 *ptmpbuf;
+ u32 n;
+
+ ftaddr &= ~(u16)0x3;
+ n = cnt + shift;
+ ptmpbuf = rtw_malloc(n);
+ if (NULL == ptmpbuf)
+ return -1;
+ err = sd_read(pintfhdl, ftaddr, 4, ptmpbuf);
+ if (err) {
+ kfree(ptmpbuf);
+ return err;
+ }
+ memcpy(ptmpbuf+shift, pbuf, cnt);
+ err = sd_write(pintfhdl, ftaddr, n, ptmpbuf);
+ kfree(ptmpbuf);
+ }
+ return err;
+}
+
+static u8 sdio_f0_read8(struct intf_hdl *pintfhdl, u32 addr)
+{
+ return sd_f0_read8(pintfhdl, addr, NULL);
+}
+
+static void sdio_read_mem(
+ struct intf_hdl *pintfhdl,
+ u32 addr,
+ u32 cnt,
+ u8 *rmem
+)
+{
+ s32 err;
+
+ err = sdio_readN(pintfhdl, addr, cnt, rmem);
+ /* TODO: Report error is err not zero */
+}
+
+static void sdio_write_mem(
+ struct intf_hdl *pintfhdl,
+ u32 addr,
+ u32 cnt,
+ u8 *wmem
+)
+{
+ sdio_writeN(pintfhdl, addr, cnt, wmem);
+}
+
+/*
+ * Description:
+ *Read from RX FIFO
+ *Round read size to block size,
+ *and make sure data transfer will be done in one command.
+ *
+ * Parameters:
+ *pintfhdl a pointer of intf_hdl
+ *addr port ID
+ *cnt size to read
+ *rmem address to put data
+ *
+ * Return:
+ *_SUCCESS(1) Success
+ *_FAIL(0) Fail
+ */
+static u32 sdio_read_port(
+ struct intf_hdl *pintfhdl,
+ u32 addr,
+ u32 cnt,
+ u8 *mem
+)
+{
+ struct adapter *padapter;
+ PSDIO_DATA psdio;
+ struct hal_com_data *phal;
+ u32 oldcnt;
+#ifdef SDIO_DYNAMIC_ALLOC_MEM
+ u8 *oldmem;
+#endif
+ s32 err;
+
+
+ padapter = pintfhdl->padapter;
+ psdio = &adapter_to_dvobj(padapter)->intf_data;
+ phal = GET_HAL_DATA(padapter);
+
+ HalSdioGetCmdAddr8723BSdio(padapter, addr, phal->SdioRxFIFOCnt++, &addr);
+
+ oldcnt = cnt;
+ if (cnt > psdio->block_transfer_len)
+ cnt = _RND(cnt, psdio->block_transfer_len);
+/* cnt = sdio_align_size(cnt); */
+
+ if (oldcnt != cnt) {
+#ifdef SDIO_DYNAMIC_ALLOC_MEM
+ oldmem = mem;
+ mem = rtw_malloc(cnt);
+ if (mem == NULL) {
+ DBG_8192C(KERN_WARNING "%s: allocate memory %d bytes fail!\n", __func__, cnt);
+ mem = oldmem;
+ oldmem == NULL;
+ }
+#else
+ /* in this case, caller should gurante the buffer is big enough */
+ /* to receive data after alignment */
+#endif
+ }
+
+ err = _sd_read(pintfhdl, addr, cnt, mem);
+
+#ifdef SDIO_DYNAMIC_ALLOC_MEM
+ if ((oldcnt != cnt) && (oldmem)) {
+ memcpy(oldmem, mem, oldcnt);
+ kfree(mem);
+ }
+#endif
+
+ if (err)
+ return _FAIL;
+ return _SUCCESS;
+}
+
+/*
+ * Description:
+ *Write to TX FIFO
+ *Align write size block size,
+ *and make sure data could be written in one command.
+ *
+ * Parameters:
+ *pintfhdl a pointer of intf_hdl
+ *addr port ID
+ *cnt size to write
+ *wmem data pointer to write
+ *
+ * Return:
+ *_SUCCESS(1) Success
+ *_FAIL(0) Fail
+ */
+static u32 sdio_write_port(
+ struct intf_hdl *pintfhdl,
+ u32 addr,
+ u32 cnt,
+ u8 *mem
+)
+{
+ struct adapter *padapter;
+ PSDIO_DATA psdio;
+ s32 err;
+ struct xmit_buf *xmitbuf = (struct xmit_buf *)mem;
+
+ padapter = pintfhdl->padapter;
+ psdio = &adapter_to_dvobj(padapter)->intf_data;
+
+ if (padapter->hw_init_completed == false) {
+ DBG_871X("%s [addr = 0x%x cnt =%d] padapter->hw_init_completed == false\n", __func__, addr, cnt);
+ return _FAIL;
+ }
+
+ cnt = _RND4(cnt);
+ HalSdioGetCmdAddr8723BSdio(padapter, addr, cnt >> 2, &addr);
+
+ if (cnt > psdio->block_transfer_len)
+ cnt = _RND(cnt, psdio->block_transfer_len);
+/* cnt = sdio_align_size(cnt); */
+
+ err = sd_write(pintfhdl, addr, cnt, xmitbuf->pdata);
+
+ rtw_sctx_done_err(
+ &xmitbuf->sctx,
+ err ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS
+ );
+
+ if (err)
+ return _FAIL;
+ return _SUCCESS;
+}
+
+void sdio_set_intf_ops(struct adapter *padapter, struct _io_ops *pops)
+{
+ pops->_read8 = &sdio_read8;
+ pops->_read16 = &sdio_read16;
+ pops->_read32 = &sdio_read32;
+ pops->_read_mem = &sdio_read_mem;
+ pops->_read_port = &sdio_read_port;
+
+ pops->_write8 = &sdio_write8;
+ pops->_write16 = &sdio_write16;
+ pops->_write32 = &sdio_write32;
+ pops->_writeN = &sdio_writeN;
+ pops->_write_mem = &sdio_write_mem;
+ pops->_write_port = &sdio_write_port;
+
+ pops->_sd_f0_read8 = sdio_f0_read8;
+}
+
+/*
+ * Todo: align address to 4 bytes.
+ */
+static s32 _sdio_local_read(
+ struct adapter *padapter,
+ u32 addr,
+ u32 cnt,
+ u8 *pbuf
+)
+{
+ struct intf_hdl *pintfhdl;
+ u8 bMacPwrCtrlOn;
+ s32 err;
+ u8 *ptmpbuf;
+ u32 n;
+
+
+ pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (false == bMacPwrCtrlOn) {
+ err = _sd_cmd52_read(pintfhdl, addr, cnt, pbuf);
+ return err;
+ }
+
+ n = RND4(cnt);
+ ptmpbuf = (u8 *)rtw_malloc(n);
+ if (!ptmpbuf)
+ return (-1);
+
+ err = _sd_read(pintfhdl, addr, n, ptmpbuf);
+ if (!err)
+ memcpy(pbuf, ptmpbuf, cnt);
+
+ kfree(ptmpbuf);
+
+ return err;
+}
+
+/*
+ * Todo: align address to 4 bytes.
+ */
+s32 sdio_local_read(
+ struct adapter *padapter,
+ u32 addr,
+ u32 cnt,
+ u8 *pbuf
+)
+{
+ struct intf_hdl *pintfhdl;
+ u8 bMacPwrCtrlOn;
+ s32 err;
+ u8 *ptmpbuf;
+ u32 n;
+
+ pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ (false == bMacPwrCtrlOn) ||
+ (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ err = sd_cmd52_read(pintfhdl, addr, cnt, pbuf);
+ return err;
+ }
+
+ n = RND4(cnt);
+ ptmpbuf = (u8 *)rtw_malloc(n);
+ if (!ptmpbuf)
+ return (-1);
+
+ err = sd_read(pintfhdl, addr, n, ptmpbuf);
+ if (!err)
+ memcpy(pbuf, ptmpbuf, cnt);
+
+ kfree(ptmpbuf);
+
+ return err;
+}
+
+/*
+ * Todo: align address to 4 bytes.
+ */
+s32 sdio_local_write(
+ struct adapter *padapter,
+ u32 addr,
+ u32 cnt,
+ u8 *pbuf
+)
+{
+ struct intf_hdl *pintfhdl;
+ u8 bMacPwrCtrlOn;
+ s32 err;
+ u8 *ptmpbuf;
+
+ if (addr & 0x3)
+ DBG_8192C("%s, address must be 4 bytes alignment\n", __func__);
+
+ if (cnt & 0x3)
+ DBG_8192C("%s, size must be the multiple of 4\n", __func__);
+
+ pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (
+ (false == bMacPwrCtrlOn) ||
+ (true == adapter_to_pwrctl(padapter)->bFwCurrentInPSMode)
+ ) {
+ err = sd_cmd52_write(pintfhdl, addr, cnt, pbuf);
+ return err;
+ }
+
+ ptmpbuf = (u8 *)rtw_malloc(cnt);
+ if (!ptmpbuf)
+ return (-1);
+
+ memcpy(ptmpbuf, pbuf, cnt);
+
+ err = sd_write(pintfhdl, addr, cnt, ptmpbuf);
+
+ kfree(ptmpbuf);
+
+ return err;
+}
+
+u8 SdioLocalCmd52Read1Byte(struct adapter *padapter, u32 addr)
+{
+ u8 val = 0;
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_read(pintfhdl, addr, 1, &val);
+
+ return val;
+}
+
+static u16 SdioLocalCmd52Read2Byte(struct adapter *padapter, u32 addr)
+{
+ __le16 val = 0;
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_read(pintfhdl, addr, 2, (u8 *)&val);
+
+ return le16_to_cpu(val);
+}
+
+static u32 SdioLocalCmd53Read4Byte(struct adapter *padapter, u32 addr)
+{
+
+ u8 bMacPwrCtrlOn;
+ u32 val = 0;
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ __le32 le_tmp;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
+ if (!bMacPwrCtrlOn || adapter_to_pwrctl(padapter)->bFwCurrentInPSMode) {
+ sd_cmd52_read(pintfhdl, addr, 4, (u8 *)&le_tmp);
+ val = le32_to_cpu(le_tmp);
+ } else {
+ val = sd_read32(pintfhdl, addr, NULL);
+ }
+ return val;
+}
+
+void SdioLocalCmd52Write1Byte(struct adapter *padapter, u32 addr, u8 v)
+{
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ sd_cmd52_write(pintfhdl, addr, 1, &v);
+}
+
+static void SdioLocalCmd52Write4Byte(struct adapter *padapter, u32 addr, u32 v)
+{
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ __le32 le_tmp;
+
+ HalSdioGetCmdAddr8723BSdio(padapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
+ le_tmp = cpu_to_le32(v);
+ sd_cmd52_write(pintfhdl, addr, 4, (u8 *)&le_tmp);
+}
+
+static s32 ReadInterrupt8723BSdio(struct adapter *padapter, u32 *phisr)
+{
+ u32 hisr, himr;
+ u8 val8, hisr_len;
+
+
+ if (phisr == NULL)
+ return false;
+
+ himr = GET_HAL_DATA(padapter)->sdio_himr;
+
+ /* decide how many bytes need to be read */
+ hisr_len = 0;
+ while (himr) {
+ hisr_len++;
+ himr >>= 8;
+ }
+
+ hisr = 0;
+ while (hisr_len != 0) {
+ hisr_len--;
+ val8 = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HISR+hisr_len);
+ hisr |= (val8 << (8*hisr_len));
+ }
+
+ *phisr = hisr;
+
+ return true;
+}
+
+/* */
+/* Description: */
+/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
+/* */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
+/* */
+/* Created by Roger, 2011.02.11. */
+/* */
+void InitInterrupt8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+ pHalData->sdio_himr = (u32)( \
+ SDIO_HIMR_RX_REQUEST_MSK |
+ SDIO_HIMR_AVAL_MSK |
+/* SDIO_HIMR_TXERR_MSK | */
+/* SDIO_HIMR_RXERR_MSK | */
+/* SDIO_HIMR_TXFOVW_MSK | */
+/* SDIO_HIMR_RXFOVW_MSK | */
+/* SDIO_HIMR_TXBCNOK_MSK | */
+/* SDIO_HIMR_TXBCNERR_MSK | */
+/* SDIO_HIMR_BCNERLY_INT_MSK | */
+/* SDIO_HIMR_C2HCMD_MSK | */
+/* SDIO_HIMR_HSISR_IND_MSK | */
+/* SDIO_HIMR_GTINT3_IND_MSK | */
+/* SDIO_HIMR_GTINT4_IND_MSK | */
+/* SDIO_HIMR_PSTIMEOUT_MSK | */
+/* SDIO_HIMR_OCPINT_MSK | */
+/* SDIO_HIMR_ATIMEND_MSK | */
+/* SDIO_HIMR_ATIMEND_E_MSK | */
+/* SDIO_HIMR_CTWEND_MSK | */
+ 0);
+}
+
+/* */
+/* Description: */
+/* Initialize System Host Interrupt Mask configuration variables for future use. */
+/* */
+/* Created by Roger, 2011.08.03. */
+/* */
+void InitSysInterrupt8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->SysIntrMask = ( \
+/* HSIMR_GPIO12_0_INT_EN | */
+/* HSIMR_SPS_OCP_INT_EN | */
+/* HSIMR_RON_INT_EN | */
+/* HSIMR_PDNINT_EN | */
+/* HSIMR_GPIO9_INT_EN | */
+ 0);
+}
+
+#ifdef CONFIG_WOWLAN
+/* */
+/* Description: */
+/* Clear corresponding SDIO Host ISR interrupt service. */
+/* */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
+/* */
+/* Created by Roger, 2011.02.11. */
+/* */
+void ClearInterrupt8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ u8 *clear;
+
+
+ if (true == padapter->bSurpriseRemoved)
+ return;
+
+ pHalData = GET_HAL_DATA(padapter);
+ clear = rtw_zmalloc(4);
+
+ /* Clear corresponding HISR Content if needed */
+ *(__le32 *)clear = cpu_to_le32(pHalData->sdio_hisr & MASK_SDIO_HISR_CLEAR);
+ if (*(__le32 *)clear) {
+ /* Perform write one clear operation */
+ sdio_local_write(padapter, SDIO_REG_HISR, 4, clear);
+ }
+
+ kfree(clear);
+}
+#endif
+
+/* */
+/* Description: */
+/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
+/* */
+/* Assumption: */
+/* 1. Using SDIO Local register ONLY for configuration. */
+/* 2. PASSIVE LEVEL */
+/* */
+/* Created by Roger, 2011.02.11. */
+/* */
+void EnableInterrupt8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData;
+ __le32 himr;
+ u32 tmp;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ himr = cpu_to_le32(pHalData->sdio_himr);
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+
+ RT_TRACE(
+ _module_hci_ops_c_,
+ _drv_notice_,
+ (
+ "%s: enable SDIO HIMR = 0x%08X\n",
+ __func__,
+ pHalData->sdio_himr
+ )
+ );
+
+ /* Update current system IMR settings */
+ tmp = rtw_read32(padapter, REG_HSIMR);
+ rtw_write32(padapter, REG_HSIMR, tmp | pHalData->SysIntrMask);
+
+ RT_TRACE(
+ _module_hci_ops_c_,
+ _drv_notice_,
+ (
+ "%s: enable HSIMR = 0x%08X\n",
+ __func__,
+ pHalData->SysIntrMask
+ )
+ );
+
+ /* */
+ /* <Roger_Notes> There are some C2H CMDs have been sent before system interrupt is enabled, e.g., C2H, CPWM. */
+ /* So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore. */
+ /* 2011.10.19. */
+ /* */
+ rtw_write8(padapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+}
+
+/* */
+/* Description: */
+/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
+/* */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
+/* */
+/* Created by Roger, 2011.02.11. */
+/* */
+void DisableInterrupt8723BSdio(struct adapter *padapter)
+{
+ __le32 himr;
+
+ himr = cpu_to_le32(SDIO_HIMR_DISABLED);
+ sdio_local_write(padapter, SDIO_REG_HIMR, 4, (u8 *)&himr);
+}
+
+/* */
+/* Description: */
+/* Using 0x100 to check the power status of FW. */
+/* */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
+/* */
+/* Created by Isaac, 2013.09.10. */
+/* */
+u8 CheckIPSStatus(struct adapter *padapter)
+{
+ DBG_871X(
+ "%s(): Read 0x100 = 0x%02x 0x86 = 0x%02x\n",
+ __func__,
+ rtw_read8(padapter, 0x100),
+ rtw_read8(padapter, 0x86)
+ );
+
+ if (rtw_read8(padapter, 0x100) == 0xEA)
+ return true;
+ else
+ return false;
+}
+
+static struct recv_buf *sd_recv_rxfifo(struct adapter *padapter, u32 size)
+{
+ u32 readsize, ret;
+ u8 *preadbuf;
+ struct recv_priv *precvpriv;
+ struct recv_buf *precvbuf;
+
+
+ /* Patch for some SDIO Host 4 bytes issue */
+ /* ex. RK3188 */
+ readsize = RND4(size);
+
+ /* 3 1. alloc recvbuf */
+ precvpriv = &padapter->recvpriv;
+ precvbuf = rtw_dequeue_recvbuf(&precvpriv->free_recv_buf_queue);
+ if (precvbuf == NULL) {
+ DBG_871X_LEVEL(_drv_err_, "%s: alloc recvbuf FAIL!\n", __func__);
+ return NULL;
+ }
+
+ /* 3 2. alloc skb */
+ if (precvbuf->pskb == NULL) {
+ SIZE_PTR tmpaddr = 0;
+ SIZE_PTR alignment = 0;
+
+ precvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+
+ if (precvbuf->pskb) {
+ precvbuf->pskb->dev = padapter->pnetdev;
+
+ tmpaddr = (SIZE_PTR)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+ }
+
+ if (precvbuf->pskb == NULL) {
+ DBG_871X("%s: alloc_skb fail! read =%d\n", __func__, readsize);
+ return NULL;
+ }
+ }
+
+ /* 3 3. read data from rxfifo */
+ preadbuf = precvbuf->pskb->data;
+ ret = sdio_read_port(&padapter->iopriv.intf, WLAN_RX0FF_DEVICE_ID, readsize, preadbuf);
+ if (ret == _FAIL) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("%s: read port FAIL!\n", __func__));
+ return NULL;
+ }
+
+
+ /* 3 4. init recvbuf */
+ precvbuf->len = size;
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ skb_set_tail_pointer(precvbuf->pskb, size);
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+
+ return precvbuf;
+}
+
+static void sd_rxhandler(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *ppending_queue;
+
+ precvpriv = &padapter->recvpriv;
+ ppending_queue = &precvpriv->recv_buf_pending_queue;
+
+ /* 3 1. enqueue recvbuf */
+ rtw_enqueue_recvbuf(precvbuf, ppending_queue);
+
+ /* 3 2. schedule tasklet */
+ tasklet_schedule(&precvpriv->recv_tasklet);
+}
+
+void sd_int_dpc(struct adapter *padapter)
+{
+ struct hal_com_data *phal;
+ struct dvobj_priv *dvobj;
+ struct intf_hdl *pintfhdl = &padapter->iopriv.intf;
+ struct pwrctrl_priv *pwrctl;
+
+
+ phal = GET_HAL_DATA(padapter);
+ dvobj = adapter_to_dvobj(padapter);
+ pwrctl = dvobj_to_pwrctl(dvobj);
+
+ if (phal->sdio_hisr & SDIO_HISR_AVAL) {
+ u8 freepage[4];
+
+ _sdio_local_read(padapter, SDIO_REG_FREE_TXPG, 4, freepage);
+ up(&(padapter->xmitpriv.xmit_sema));
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_CPWM1) {
+ struct reportpwrstate_parm report;
+
+ u8 bcancelled;
+ _cancel_timer(&(pwrctl->pwr_rpwm_timer), &bcancelled);
+
+ report.state = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_HCPWM1_8723B);
+
+ /* cpwm_int_hdl(padapter, &report); */
+ _set_workitem(&(pwrctl->cpwm_event));
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_TXERR) {
+ u8 *status;
+ u32 addr;
+
+ status = rtw_malloc(4);
+ if (status) {
+ addr = REG_TXDMA_STATUS;
+ HalSdioGetCmdAddr8723BSdio(padapter, WLAN_IOREG_DEVICE_ID, addr, &addr);
+ _sd_read(pintfhdl, addr, 4, status);
+ _sd_write(pintfhdl, addr, 4, status);
+ DBG_8192C("%s: SDIO_HISR_TXERR (0x%08x)\n", __func__, le32_to_cpu(*(u32 *)status));
+ kfree(status);
+ } else {
+ DBG_8192C("%s: SDIO_HISR_TXERR, but can't allocate memory to read status!\n", __func__);
+ }
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_TXBCNOK) {
+ DBG_8192C("%s: SDIO_HISR_TXBCNOK\n", __func__);
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_TXBCNERR) {
+ DBG_8192C("%s: SDIO_HISR_TXBCNERR\n", __func__);
+ }
+#ifndef CONFIG_C2H_PACKET_EN
+ if (phal->sdio_hisr & SDIO_HISR_C2HCMD) {
+ struct c2h_evt_hdr_88xx *c2h_evt;
+
+ DBG_8192C("%s: C2H Command\n", __func__);
+ c2h_evt = (struct c2h_evt_hdr_88xx *)rtw_zmalloc(16);
+ if (c2h_evt != NULL) {
+ if (rtw_hal_c2h_evt_read(padapter, (u8 *)c2h_evt) == _SUCCESS) {
+ if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
+ /* Handle CCX report here */
+ rtw_hal_c2h_handler(padapter, (u8 *)c2h_evt);
+ kfree((u8 *)c2h_evt);
+ } else {
+ rtw_c2h_wk_cmd(padapter, (u8 *)c2h_evt);
+ }
+ }
+ } else {
+ /* Error handling for malloc fail */
+ if (rtw_cbuf_push(padapter->evtpriv.c2h_queue, (void *)NULL) != _SUCCESS)
+ DBG_871X("%s rtw_cbuf_push fail\n", __func__);
+ _set_workitem(&padapter->evtpriv.c2h_wk);
+ }
+ }
+#endif
+
+ if (phal->sdio_hisr & SDIO_HISR_RXFOVW) {
+ DBG_8192C("%s: Rx Overflow\n", __func__);
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_RXERR) {
+ DBG_8192C("%s: Rx Error\n", __func__);
+ }
+
+ if (phal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
+ struct recv_buf *precvbuf;
+ int alloc_fail_time = 0;
+ u32 hisr;
+
+/* DBG_8192C("%s: RX Request, size =%d\n", __func__, phal->SdioRxFIFOSize); */
+ phal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
+ do {
+ phal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(padapter, SDIO_REG_RX0_REQ_LEN);
+ if (phal->SdioRxFIFOSize != 0) {
+ precvbuf = sd_recv_rxfifo(padapter, phal->SdioRxFIFOSize);
+ if (precvbuf)
+ sd_rxhandler(padapter, precvbuf);
+ else {
+ alloc_fail_time++;
+ DBG_871X("precvbuf is Null for %d times because alloc memory failed\n", alloc_fail_time);
+ if (alloc_fail_time >= 10)
+ break;
+ }
+ phal->SdioRxFIFOSize = 0;
+ } else
+ break;
+
+ hisr = 0;
+ ReadInterrupt8723BSdio(padapter, &hisr);
+ hisr &= SDIO_HISR_RX_REQUEST;
+ if (!hisr)
+ break;
+ } while (1);
+
+ if (alloc_fail_time == 10)
+ DBG_871X("exit because alloc memory failed more than 10 times\n");
+
+ }
+}
+
+void sd_int_hdl(struct adapter *padapter)
+{
+ struct hal_com_data *phal;
+
+
+ if (
+ (padapter->bDriverStopped == true) ||
+ (padapter->bSurpriseRemoved == true)
+ )
+ return;
+
+ phal = GET_HAL_DATA(padapter);
+
+ phal->sdio_hisr = 0;
+ ReadInterrupt8723BSdio(padapter, &phal->sdio_hisr);
+
+ if (phal->sdio_hisr & phal->sdio_himr) {
+ u32 v32;
+
+ phal->sdio_hisr &= phal->sdio_himr;
+
+ /* clear HISR */
+ v32 = phal->sdio_hisr & MASK_SDIO_HISR_CLEAR;
+ if (v32) {
+ SdioLocalCmd52Write4Byte(padapter, SDIO_REG_HISR, v32);
+ }
+
+ sd_int_dpc(padapter);
+ } else {
+ RT_TRACE(_module_hci_ops_c_, _drv_err_,
+ ("%s: HISR(0x%08x) and HIMR(0x%08x) not match!\n",
+ __func__, phal->sdio_hisr, phal->sdio_himr));
+ }
+}
+
+/* */
+/* Description: */
+/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
+/* */
+/* Assumption: */
+/* 1. Running at PASSIVE_LEVEL */
+/* 2. RT_TX_SPINLOCK is NOT acquired. */
+/* */
+/* Created by Roger, 2011.01.28. */
+/* */
+u8 HalQueryTxBufferStatus8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *phal;
+ u32 NumOfFreePage;
+ /* _irqL irql; */
+
+
+ phal = GET_HAL_DATA(padapter);
+
+ NumOfFreePage = SdioLocalCmd53Read4Byte(padapter, SDIO_REG_FREE_TXPG);
+
+ /* spin_lock_bh(&phal->SdioTxFIFOFreePageLock); */
+ memcpy(phal->SdioTxFIFOFreePage, &NumOfFreePage, 4);
+ RT_TRACE(_module_hci_ops_c_, _drv_notice_,
+ ("%s: Free page for HIQ(%#x), MIDQ(%#x), LOWQ(%#x), PUBQ(%#x)\n",
+ __func__,
+ phal->SdioTxFIFOFreePage[HI_QUEUE_IDX],
+ phal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
+ phal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
+ phal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
+ /* spin_unlock_bh(&phal->SdioTxFIFOFreePageLock); */
+
+ return true;
+}
+
+/* */
+/* Description: */
+/* Query SDIO Local register to get the current number of TX OQT Free Space. */
+/* */
+u8 HalQueryTxOQTBufferStatus8723BSdio(struct adapter *padapter)
+{
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->SdioTxOQTFreeSpace = SdioLocalCmd52Read1Byte(padapter, SDIO_REG_OQT_FREE_PG);
+ return true;
+}
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+u8 RecvOnePkt(struct adapter *padapter, u32 size)
+{
+ struct recv_buf *precvbuf;
+ struct dvobj_priv *psddev;
+ PSDIO_DATA psdio_data;
+ struct sdio_func *func;
+
+ u8 res = false;
+
+ DBG_871X("+%s: size: %d+\n", __func__, size);
+
+ if (padapter == NULL) {
+ DBG_871X(KERN_ERR "%s: padapter is NULL!\n", __func__);
+ return false;
+ }
+
+ psddev = adapter_to_dvobj(padapter);
+ psdio_data = &psddev->intf_data;
+ func = psdio_data->func;
+
+ if (size) {
+ sdio_claim_host(func);
+ precvbuf = sd_recv_rxfifo(padapter, size);
+
+ if (precvbuf) {
+ /* printk("Completed Recv One Pkt.\n"); */
+ sd_rxhandler(padapter, precvbuf);
+ res = true;
+ } else {
+ res = false;
+ }
+ sdio_release_host(func);
+ }
+ DBG_871X("-%s-\n", __func__);
+ return res;
+}
+#endif /* CONFIG_WOWLAN */
diff --git a/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
new file mode 100644
index 000000000000..fbb83db4fb60
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/Hal8192CPhyReg.h
@@ -0,0 +1,1126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*****************************************************************************
+ *
+ * Module: __INC_HAL8192CPHYREG_H
+ *
+ *
+ * Note: 1. Define PMAC/BB register map
+ * 2. Define RF register map
+ * 3. PMAC/BB register bit mask.
+ * 4. RF reg bit mask.
+ * 5. Other BB/RF relative definition.
+ *
+ *
+ * Export: Constants, macro, functions(API), global variables(None).
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
+ * 2. Reorganize code architecture.
+ *09/25/2008 MH 1. Add RL6052 register definition
+ *
+ *****************************************************************************/
+#ifndef __INC_HAL8192CPHYREG_H
+#define __INC_HAL8192CPHYREG_H
+
+
+/*--------------------------Define Parameters-------------------------------*/
+
+/* */
+/* 8192S Regsiter offset definition */
+/* */
+
+/* */
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+/* */
+
+
+/* */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+/* */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* */
+/* 2. Page2(0x200) */
+/* */
+/* The following two definition are only used for USB interface. */
+#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB read/write command address. */
+#define RF_BB_CMD_DATA 0x02c4 /* RF/BB read/write command data. */
+
+/* */
+/* 3. Page8(0x800) */
+/* */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rTxAGC_B_Rate18_06 0x830
+#define rTxAGC_B_Rate54_24 0x834
+#define rTxAGC_B_CCK1_55_Mcs32 0x838
+#define rTxAGC_B_Mcs03_Mcs00 0x83c
+
+#define rTxAGC_B_Mcs07_Mcs04 0x848
+#define rTxAGC_B_Mcs11_Mcs08 0x84c
+
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+
+#define rTxAGC_B_Mcs15_Mcs12 0x868
+#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+#define TransceiverA_HSPI_Readback 0x8b8 /* Transceiver A HSPI Readback */
+#define TransceiverB_HSPI_Readback 0x8bc /* Transceiver B HSPI Readback */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* */
+/* 4. Page9(0x900) */
+/* */
+#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
+#define rS0S1_PathSwitch 0x948
+
+/* */
+/* 5. PageA(0xA00) */
+/* */
+/* Set Control channel to upper or lower. These settings are required only for 40MHz */
+#define rCCK0_System 0xa00
+
+#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
+#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
+
+#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+/* */
+/* PageB(0xB00) */
+/* */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rConfig_Pmpd_AntB 0xb98
+#define rAPK 0xbd8
+
+/* */
+/* 6. PageC(0xC00) */
+/* */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /* PD, BW & SBD DM tune init gain */
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+
+#define rOFDM0_RxIQExtAnta 0xca0
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+
+/* */
+/* 7. PageD(0xD00) */
+/* */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* */
+/* 8. PageE(0xE00) */
+/* */
+#define rTxAGC_A_Rate18_06 0xe00
+#define rTxAGC_A_Rate54_24 0xe04
+#define rTxAGC_A_CCK1_Mcs32 0xe08
+#define rTxAGC_A_Mcs03_Mcs00 0xe10
+#define rTxAGC_A_Mcs07_Mcs04 0xe14
+#define rTxAGC_A_Mcs11_Mcs08 0xe18
+#define rTxAGC_A_Mcs15_Mcs12 0xe1c
+
+#define rFPGA0_IQK 0xe28
+#define rTx_IQK_Tone_A 0xe30
+#define rRx_IQK_Tone_A 0xe34
+#define rTx_IQK_PI_A 0xe38
+#define rRx_IQK_PI_A 0xe3c
+
+#define rTx_IQK 0xe40
+#define rRx_IQK 0xe44
+#define rIQK_AGC_Pts 0xe48
+#define rIQK_AGC_Rsp 0xe4c
+#define rTx_IQK_Tone_B 0xe50
+#define rRx_IQK_Tone_B 0xe54
+#define rTx_IQK_PI_B 0xe58
+#define rRx_IQK_PI_B 0xe5c
+#define rIQK_AGC_Cont 0xe60
+
+#define rBlue_Tooth 0xe6c
+#define rRx_Wait_CCA 0xe70
+#define rTx_CCK_RFON 0xe74
+#define rTx_CCK_BBON 0xe78
+#define rTx_OFDM_RFON 0xe7c
+#define rTx_OFDM_BBON 0xe80
+#define rTx_To_Rx 0xe84
+#define rTx_To_Tx 0xe88
+#define rRx_CCK 0xe8c
+
+#define rTx_Power_Before_IQK_A 0xe94
+#define rTx_Power_After_IQK_A 0xe9c
+
+#define rRx_Power_Before_IQK_A 0xea0
+#define rRx_Power_Before_IQK_A_2 0xea4
+#define rRx_Power_After_IQK_A 0xea8
+#define rRx_Power_After_IQK_A_2 0xeac
+
+#define rTx_Power_Before_IQK_B 0xeb4
+#define rTx_Power_After_IQK_B 0xebc
+
+#define rRx_Power_Before_IQK_B 0xec0
+#define rRx_Power_Before_IQK_B_2 0xec4
+#define rRx_Power_After_IQK_B 0xec8
+#define rRx_Power_After_IQK_B_2 0xecc
+
+#define rRx_OFDM 0xed0
+#define rRx_Wait_RIFS 0xed4
+#define rRx_TO_Rx 0xed8
+#define rStandby 0xedc
+#define rSleep 0xee0
+#define rPMPD_ANAEN 0xeec
+
+/* */
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* */
+/* Zebra1 */
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7 /* RF channel switch */
+
+/* endif */
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* */
+/* RL6052 Register definition */
+/* */
+#define RF_AC 0x00 /* */
+
+#define RF_IQADJ_G1 0x01 /* */
+#define RF_IQADJ_G2 0x02 /* */
+#define RF_BS_PA_APSET_G1_G4 0x03
+#define RF_BS_PA_APSET_G5_G8 0x04
+#define RF_POW_TRSW 0x05 /* */
+
+#define RF_GAIN_RX 0x06 /* */
+#define RF_GAIN_TX 0x07 /* */
+
+#define RF_TXM_IDAC 0x08 /* */
+#define RF_IPA_G 0x09 /* */
+#define RF_TXBIAS_G 0x0A
+#define RF_TXPA_AG 0x0B
+#define RF_IPA_A 0x0C /* */
+#define RF_TXBIAS_A 0x0D
+#define RF_BS_PA_APSET_G9_G11 0x0E
+#define RF_BS_IQGEN 0x0F /* */
+
+#define RF_MODE1 0x10 /* */
+#define RF_MODE2 0x11 /* */
+
+#define RF_RX_AGC_HP 0x12 /* */
+#define RF_TX_AGC 0x13 /* */
+#define RF_BIAS 0x14 /* */
+#define RF_IPA 0x15 /* */
+#define RF_TXBIAS 0x16 /* */
+#define RF_POW_ABILITY 0x17 /* */
+#define RF_MODE_AG 0x18 /* */
+#define rRfChannel 0x18 /* RF channel and BW switch */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19 /* */
+
+#define RF_RX_G1 0x1A /* */
+#define RF_RX_G2 0x1B /* */
+
+#define RF_RX_BB2 0x1C /* */
+#define RF_RX_BB1 0x1D /* */
+
+#define RF_RCK1 0x1E /* */
+#define RF_RCK2 0x1F /* */
+
+#define RF_TX_G1 0x20 /* */
+#define RF_TX_G2 0x21 /* */
+#define RF_TX_G3 0x22 /* */
+
+#define RF_TX_BB1 0x23 /* */
+
+#define RF_T_METER 0x24 /* */
+
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C /* */
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+#define RF_0x52 0x52
+#define RF_WE_LUT 0xEF
+#define RF_S0S1 0xB0
+
+/* */
+/* Bit Mask */
+/* */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bAntennaSelect 0x0300
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* chane gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+/* define bHWSISelect 0x8 */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+#define bADClkPhase 0x4000000 /* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+#define bXtalCap01 0xc0000000 /* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and also HWord, LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 switch */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* the threshold for high power */
+#define bRSSI_Gen 0x7f000000 /* the threshold for ant diversity */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+/* define bRxMF_Hold 0x3800 */
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+/* define bRxPath1 0x01 */
+/* define bRxPath2 0x02 */
+/* define bRxPath3 0x04 */
+/* define bRxPath4 0x08 */
+/* define bTxPath1 0x10 */
+/* define bTxPath2 0x20 */
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* */
+/* Other Definition */
+/* */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMaskH3Bytes 0xffffff00
+#define bMask12Bits 0xfff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+
+
+#define bEnable 0x1 /* Useless */
+#define bDisable 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+/* define max debug item in each debug page */
+/* define bMaxItem_FPGA_PHY0 0x9 */
+/* define bMaxItem_FPGA_PHY1 0x3 */
+/* define bMaxItem_PHY_11B 0x16 */
+/* define bMaxItem_OFDM_PHY0 0x29 */
+/* define bMaxItem_OFDM_PHY1 0x0 */
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define PathA 0x0 /* Useless */
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+#endif /* __INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h b/drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h
new file mode 100644
index 000000000000..22250411ed6c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/Hal8723BPhyCfg.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8723BPHYCFG_H__
+#define __INC_HAL8723BPHYCFG_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50 /* us */
+#define AntennaDiversityValue 0x80 /* Adapter->bSoftwareAntennaDiversity ? 0x00:0x80) */
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define MAX_AGGR_NUM 0x07
+
+
+/*--------------------------Define Parameters End-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+
+/*------------------------------Define structure End----------------------------*/
+
+/*--------------------------Exported Function prototype---------------------*/
+u32
+PHY_QueryBBReg_8723B(
+struct adapter *Adapter,
+u32 RegAddr,
+u32 BitMask
+ );
+
+void
+PHY_SetBBReg_8723B(
+struct adapter *Adapter,
+u32 RegAddr,
+u32 BitMask,
+u32 Data
+ );
+
+u32
+PHY_QueryRFReg_8723B(
+struct adapter * Adapter,
+u8 eRFPath,
+u32 RegAddr,
+u32 BitMask
+ );
+
+void
+PHY_SetRFReg_8723B(
+struct adapter * Adapter,
+u8 eRFPath,
+u32 RegAddr,
+u32 BitMask,
+u32 Data
+ );
+
+/* MAC/BB/RF HAL config */
+int PHY_BBConfig8723B(struct adapter *Adapter );
+
+int PHY_RFConfig8723B(struct adapter *Adapter );
+
+s32 PHY_MACConfig8723B(struct adapter *padapter);
+
+void
+PHY_SetTxPowerIndex_8723B(
+struct adapter * Adapter,
+u32 PowerIndex,
+u8 RFPath,
+u8 Rate
+ );
+
+u8
+PHY_GetTxPowerIndex_8723B(
+struct adapter * padapter,
+u8 RFPath,
+u8 Rate,
+enum CHANNEL_WIDTH BandWidth,
+u8 Channel
+ );
+
+void
+PHY_GetTxPowerLevel8723B(
+struct adapter * Adapter,
+ s32* powerlevel
+ );
+
+void
+PHY_SetTxPowerLevel8723B(
+struct adapter * Adapter,
+u8 channel
+ );
+
+void
+PHY_SetBWMode8723B(
+struct adapter * Adapter,
+enum CHANNEL_WIDTH Bandwidth, /* 20M or 40M */
+unsigned char Offset /* Upper, Lower, or Don't care */
+);
+
+void
+PHY_SwChnl8723B(/* Call after initialization */
+struct adapter *Adapter,
+u8 channel
+ );
+
+void
+PHY_SetSwChnlBWMode8723B(
+struct adapter * Adapter,
+u8 channel,
+enum CHANNEL_WIDTH Bandwidth,
+u8 Offset40,
+u8 Offset80
+);
+
+/*--------------------------Exported Function prototype End---------------------*/
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h b/drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h
new file mode 100644
index 000000000000..84a08e0092dd
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/Hal8723BPhyReg.h
@@ -0,0 +1,77 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8723BPHYREG_H__
+#define __INC_HAL8723BPHYREG_H__
+
+#include <Hal8192CPhyReg.h>
+
+/* BB Register Definition */
+/* */
+/* 4. Page9(0x900) */
+/* */
+#define rDPDT_control 0x92c
+#define rfe_ctrl_anta_src 0x930
+#define rS0S1_PathSwitch 0x948
+#define AGC_table_select 0xb2c
+
+/* */
+/* PageB(0xB00) */
+/* */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rPdp_AntA_8 0xb08
+#define rPdp_AntA_C 0xb0c
+#define rPdp_AntA_10 0xb10
+#define rPdp_AntA_14 0xb14
+#define rPdp_AntA_18 0xb18
+#define rPdp_AntA_1C 0xb1c
+#define rPdp_AntA_20 0xb20
+#define rPdp_AntA_24 0xb24
+
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_ram64x16 0xb2c
+
+#define rBndA 0xb30
+#define rHssiPar 0xb34
+
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rPdp_AntB_8 0xb78
+#define rPdp_AntB_C 0xb7c
+#define rPdp_AntB_10 0xb80
+#define rPdp_AntB_14 0xb84
+#define rPdp_AntB_18 0xb88
+#define rPdp_AntB_1C 0xb8c
+#define rPdp_AntB_20 0xb90
+#define rPdp_AntB_24 0xb94
+
+#define rConfig_Pmpd_AntB 0xb98
+
+#define rBndB 0xba0
+
+#define rAPK 0xbd8
+#define rPm_Rx0_AntA 0xbdc
+#define rPm_Rx1_AntA 0xbe0
+#define rPm_Rx2_AntA 0xbe4
+#define rPm_Rx3_AntA 0xbe8
+#define rPm_Rx0_AntB 0xbec
+#define rPm_Rx1_AntB 0xbf0
+#define rPm_Rx2_AntB 0xbf4
+#define rPm_Rx3_AntB 0xbf8
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h b/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
new file mode 100644
index 000000000000..796449c3f430
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
@@ -0,0 +1,232 @@
+#ifndef REALTEK_POWER_SEQUENCE_8723B
+#define REALTEK_POWER_SEQUENCE_8723B
+
+#include "HalPwrSeqCmd.h"
+
+/*
+ Check document WM-20130815-JackieLau-RTL8723B_Power_Architecture v08.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transision from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+*/
+#define RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS 26
+#define RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS 15
+#define RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS 15
+#define RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS 15
+#define RTL8723B_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8723B_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8723B_TRANS_ACT_TO_SWLPS_STEPS 22
+#define RTL8723B_TRANS_SWLPS_TO_ACT_STEPS 15
+#define RTL8723B_TRANS_END_STEPS 1
+
+
+#define RTL8723B_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \
+ {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \
+ {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital , 1:isolation*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]= 0 and WLSUS_EN 0x04[11]= 0*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]= 1*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]= 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0},/**/ \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6, BIT6},/* Enable WL control XTAL setting*/ \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1},/*Enable falling edge triggering interrupt*/\
+ {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1},/*Enable GPIO9 interrupt mode*/\
+ {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*Enable GPIO9 input mode*/\
+ {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*Enable HSISR GPIO[C:0] interrupt*/\
+ {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1},/*Enable HSISR GPIO9 interrupt*/\
+ {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3, BIT3},/*For GPIO9 internal pull high setting by test chip*/\
+ {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6, BIT6},/*For GPIO9 internal pull high setting*/\
+
+
+#define RTL8723B_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*Enable rising edge triggering interrupt*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]= 1*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+ {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6, 0},/* Enable BT control XTAL setting*/\
+ {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital , 1:isolation*/ \
+ {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/\
+
+
+#define RTL8723B_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8723B_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, omments here*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07 = 0x20 , SOP option to disable BG/MB*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/
+
+
+#define RTL8723B_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8723B_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+#define RTL8723B_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled, and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \
+
+
+#define RTL8723B_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]= 0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+
+ #define RTL8723B_TRANS_ACT_TO_SWLPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0194, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*enable 32 K source*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled, and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1},/*CCK and OFDM are enable*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled, and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1},/*CCK and OFDM are enable*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled, and clock are gated*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*disable security engine*/ \
+ {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x40},/*When driver enter Sus/ Disable, enable LOP for BT*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5},/*reset dual TSF*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0},/*Reset CPU*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*Reset MCUFWDL register*/ \
+ {0x001D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*Reset CPU IO Wrapper*/ \
+ {0x001D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 1},/*Reset CPU IO Wrapper*/ \
+ {0x0287, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*polling RXFF packet number = 0 */ \
+ {0x0286, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/*polling RXDMA idle */ \
+ {0x013D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*Clear FW RPWM interrupt */\
+ {0x0139, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*Set FW RPWM interrupt source*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4},/*switch TSF to 32K*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7},/*polling TSF stable*/\
+ {0x0090, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*Set FW LPS*/ \
+ {0x0090, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0},/*polling FW LPS ready */
+
+
+#define RTL8723B_TRANS_SWLPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0},/*switch TSF to 32K*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/*polling TSF stable*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1, enable security engine*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x06B7, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x09}, /*. reset MAC rx state machine*/\
+ {0x06B4, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x86}, /*. reset MAC rx state machine*/\
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1},/* set CPU RAM code ready*/ \
+ {0x001D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*Reset CPU IO Wrapper*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, 0},/* Enable CPU*/ \
+ {0x001D, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0},/*enable CPU IO Wrapper*/ \
+ {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT2, BIT2},/* Enable CPU*/ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, BIT7},/*polling FW init ready */ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT6, BIT6},/*polling FW init ready */ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8723B_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, PWR_CMD_END, 0, 0},
+
+
+extern WLAN_PWR_CFG rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_card_disable_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_card_enable_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS+RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_enter_swlps_flow[RTL8723B_TRANS_ACT_TO_SWLPS_STEPS+RTL8723B_TRANS_END_STEPS];
+extern WLAN_PWR_CFG rtl8723B_leave_swlps_flow[RTL8723B_TRANS_SWLPS_TO_ACT_STEPS+RTL8723B_TRANS_END_STEPS];
+#endif
diff --git a/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h b/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h
new file mode 100644
index 000000000000..5bb9f5a04734
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/HalPwrSeqCmd.h
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HALPWRSEQCMD_H__
+#define __HALPWRSEQCMD_H__
+
+#include <drv_types.h>
+
+/*---------------------------------------------*/
+/* 3 The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+ /* offset: the read register offset */
+ /* msk: the mask of the read value */
+ /* value: N/A, left by 0 */
+ /* note: dirver shall implement this function by read & msk */
+
+#define PWR_CMD_WRITE 0x01
+ /* offset: the read register offset */
+ /* msk: the mask of the write bits */
+ /* value: write value */
+ /* note: driver shall implement this cmd by read & msk after write */
+
+#define PWR_CMD_POLLING 0x02
+ /* offset: the read register offset */
+ /* msk: the mask of the polled value */
+ /* value: the value to be polled, masked by the msd field. */
+ /* note: driver shall implement this cmd by */
+ /* do{ */
+ /* if ((Read(offset) & msk) == (value & msk)) */
+ /* break; */
+ /* } while (not timeout); */
+
+#define PWR_CMD_DELAY 0x03
+ /* offset: the value to delay */
+ /* msk: N/A */
+ /* value: the unit of delay, 0: us, 1: ms */
+
+#define PWR_CMD_END 0x04
+ /* offset: N/A */
+ /* msk: N/A */
+ /* value: N/A */
+
+/*---------------------------------------------*/
+/* 3 The value of base: 4 bits */
+/*---------------------------------------------*/
+ /* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+/*---------------------------------------------*/
+/* 3 The value of interface_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of fab_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of cut_msk: 8 bits */
+/*---------------------------------------------*/
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+typedef enum _PWRSEQ_CMD_DELAY_UNIT_
+{
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+} PWRSEQ_DELAY_UNIT;
+
+typedef struct _WL_PWR_CFG_
+{
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+} WLAN_PWR_CFG, *PWLAN_PWR_CFG;
+
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+
+/* */
+/* Prototype of protected function. */
+/* */
+u8 HalPwrSeqCmdParsing(
+ struct adapter * padapter,
+ u8 CutVersion,
+ u8 FabVersion,
+ u8 InterfaceType,
+ WLAN_PWR_CFG PwrCfgCmd[]);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/HalVerDef.h b/drivers/staging/rtl8723bs/include/HalVerDef.h
new file mode 100644
index 000000000000..a9e8609303b9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/HalVerDef.h
@@ -0,0 +1,127 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_VERSION_DEF_H__
+#define __HAL_VERSION_DEF_H__
+
+/* HAL_IC_TYPE_E */
+typedef enum tag_HAL_IC_Type_Definition
+{
+ CHIP_8192S = 0,
+ CHIP_8188C = 1,
+ CHIP_8192C = 2,
+ CHIP_8192D = 3,
+ CHIP_8723A = 4,
+ CHIP_8188E = 5,
+ CHIP_8812 = 6,
+ CHIP_8821 = 7,
+ CHIP_8723B = 8,
+ CHIP_8192E = 9,
+}HAL_IC_TYPE_E;
+
+/* HAL_CHIP_TYPE_E */
+typedef enum tag_HAL_CHIP_Type_Definition
+{
+ TEST_CHIP = 0,
+ NORMAL_CHIP = 1,
+ FPGA = 2,
+}HAL_CHIP_TYPE_E;
+
+/* HAL_CUT_VERSION_E */
+typedef enum tag_HAL_Cut_Version_Definition
+{
+ A_CUT_VERSION = 0,
+ B_CUT_VERSION = 1,
+ C_CUT_VERSION = 2,
+ D_CUT_VERSION = 3,
+ E_CUT_VERSION = 4,
+ F_CUT_VERSION = 5,
+ G_CUT_VERSION = 6,
+ H_CUT_VERSION = 7,
+ I_CUT_VERSION = 8,
+ J_CUT_VERSION = 9,
+ K_CUT_VERSION = 10,
+}HAL_CUT_VERSION_E;
+
+/* HAL_Manufacturer */
+typedef enum tag_HAL_Manufacturer_Version_Definition
+{
+ CHIP_VENDOR_TSMC = 0,
+ CHIP_VENDOR_UMC = 1,
+ CHIP_VENDOR_SMIC = 2,
+}HAL_VENDOR_E;
+
+typedef enum tag_HAL_RF_Type_Definition
+{
+ RF_TYPE_1T1R = 0,
+ RF_TYPE_1T2R = 1,
+ RF_TYPE_2T2R = 2,
+ RF_TYPE_2T3R = 3,
+ RF_TYPE_2T4R = 4,
+ RF_TYPE_3T3R = 5,
+ RF_TYPE_3T4R = 6,
+ RF_TYPE_4T4R = 7,
+}HAL_RF_TYPE_E;
+
+typedef struct tag_HAL_VERSION
+{
+ HAL_IC_TYPE_E ICType;
+ HAL_CHIP_TYPE_E ChipType;
+ HAL_CUT_VERSION_E CUTVersion;
+ HAL_VENDOR_E VendorType;
+ HAL_RF_TYPE_E RFType;
+ u8 ROMVer;
+}HAL_VERSION,*PHAL_VERSION;
+
+/* VERSION_8192C VersionID; */
+/* HAL_VERSION VersionID; */
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) ((HAL_IC_TYPE_E)((version).ICType) )
+#define GET_CVID_CHIP_TYPE(version) ((HAL_CHIP_TYPE_E)((version).ChipType) )
+#define GET_CVID_RF_TYPE(version) ((HAL_RF_TYPE_E)((version).RFType))
+#define GET_CVID_MANUFACTUER(version) ((HAL_VENDOR_E)((version).VendorType))
+#define GET_CVID_CUT_VERSION(version) ((HAL_CUT_VERSION_E)((version).CUTVersion))
+#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
+
+/* */
+/* Common Macro. -- */
+/* */
+/* HAL_VERSION VersionID */
+
+/* HAL_CHIP_TYPE_E */
+#define IS_TEST_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==TEST_CHIP)? true: false)
+#define IS_NORMAL_CHIP(version) ((GET_CVID_CHIP_TYPE(version) ==NORMAL_CHIP)? true: false)
+
+/* HAL_CUT_VERSION_E */
+#define IS_A_CUT(version) ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
+#define IS_B_CUT(version) ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true : false)
+#define IS_C_CUT(version) ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? true : false)
+#define IS_D_CUT(version) ((GET_CVID_CUT_VERSION(version) == D_CUT_VERSION) ? true : false)
+#define IS_E_CUT(version) ((GET_CVID_CUT_VERSION(version) == E_CUT_VERSION) ? true : false)
+#define IS_I_CUT(version) ((GET_CVID_CUT_VERSION(version) == I_CUT_VERSION) ? true : false)
+#define IS_J_CUT(version) ((GET_CVID_CUT_VERSION(version) == J_CUT_VERSION) ? true : false)
+#define IS_K_CUT(version) ((GET_CVID_CUT_VERSION(version) == K_CUT_VERSION) ? true : false)
+
+/* HAL_VENDOR_E */
+#define IS_CHIP_VENDOR_TSMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC)? true: false)
+#define IS_CHIP_VENDOR_UMC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC)? true: false)
+#define IS_CHIP_VENDOR_SMIC(version) ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_SMIC)? true: false)
+
+/* HAL_RF_TYPE_E */
+#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R)? true : false)
+#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)? true : false)
+#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)? true : false)
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/autoconf.h b/drivers/staging/rtl8723bs/include/autoconf.h
new file mode 100644
index 000000000000..09ed29f4efbd
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/autoconf.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+/*
+ * Automatically generated C config: don't edit
+ */
+
+/*
+ * Functions Config
+ */
+/* define DEBUG_CFG80211 */
+
+#ifndef CONFIG_WIRELESS_EXT
+#error CONFIG_WIRELESS_EXT needs to be enabled for this driver to work
+#endif
+
+/*
+ * Auto Config Section
+ */
+#define LPS_RPWM_WAIT_MS 300
+#ifndef DISABLE_BB_RF
+#define DISABLE_BB_RF 0
+#endif
+
+#if DISABLE_BB_RF
+ #define HAL_MAC_ENABLE 0
+ #define HAL_BB_ENABLE 0
+ #define HAL_RF_ENABLE 0
+#else
+ #define HAL_MAC_ENABLE 1
+ #define HAL_BB_ENABLE 1
+ #define HAL_RF_ENABLE 1
+#endif
+
+/*
+ * Platform dependent
+ */
+#define WAKEUP_GPIO_IDX 12 /* WIFI Chip Side */
+#ifdef CONFIG_WOWLAN
+#define CONFIG_GTK_OL
+#endif /* CONFIG_WOWLAN */
+
+/*
+ * Debug Related Config
+ */
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG 1 /* for ODM & BTCOEX debug */
+/*#define DEBUG_RTL871X */
+#else /* !DEBUG */
+#define DBG 0 /* for ODM & BTCOEX debug */
+#endif /* !DEBUG */
+
+#ifdef CONFIG_PROC_FS
+#define PROC_DEBUG
+#endif
+
+/* define DBG_XMIT_BUF */
+/* define DBG_XMIT_BUF_EXT */
diff --git a/drivers/staging/rtl8723bs/include/basic_types.h b/drivers/staging/rtl8723bs/include/basic_types.h
new file mode 100644
index 000000000000..abadea07466d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/basic_types.h
@@ -0,0 +1,209 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __BASIC_TYPES_H__
+#define __BASIC_TYPES_H__
+
+
+#define SUCCESS 0
+#define FAIL (-1)
+
+#include <linux/types.h>
+
+typedef signed int sint;
+
+#define FIELD_OFFSET(s, field) ((__kernel_ssize_t)&((s*)(0))->field)
+
+#define SIZE_PTR __kernel_size_t
+#define SSIZE_PTR __kernel_ssize_t
+
+/* port from fw by thomas */
+/* TODO: Belows are Sync from SD7-Driver. It is necessary to check correctness */
+
+/*
+ *Call endian free function when
+ * 1. Read/write packet content.
+ * 2. Before write integer to IO.
+ * 3. After read integer from IO.
+*/
+
+/* */
+/* Byte Swapping routine. */
+/* */
+#define EF1Byte (u8)
+#define EF2Byte le16_to_cpu
+#define EF4Byte le32_to_cpu
+
+/* Convert little data endian to host ordering */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr) \
+ EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*(_ptr))
+#define READEF4BYTE(_ptr) \
+ EF4BYTE(*(_ptr))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val) \
+ do { \
+ (*((u8 *)(_ptr))) = EF1BYTE(_val); \
+ } while (0)
+/* Write le data to memory in host ordering */
+#define WRITEEF2BYTE(_ptr, _val) \
+ do { \
+ (*((u16 *)(_ptr))) = EF2BYTE(_val); \
+ } while (0)
+
+#define WRITEEF4BYTE(_ptr, _val) \
+ do { \
+ (*((u32 *)(_ptr))) = EF2BYTE(_val); \
+ } while (0)
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((__le32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((__le16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/* */
+/* Description: */
+/* Translate subfield (continuous bits in little-endian) of 4-byte value in litten byte to */
+/* 4-byte value in host byte ordering. */
+/* */
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_32(__bitlen) \
+ )
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_16(__bitlen) \
+ )
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_8(__bitlen) \
+ )
+
+/* */
+/* Description: */
+/* Mask subfield (continuous bits in little-endian) of 4-byte value in litten byte oredering */
+/* and return the result in 4-byte value in host byte ordering. */
+/* */
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ (\
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
+ )
+
+/* */
+/* Description: */
+/* Set subfield of little-endian 4-byte value to specified value. */
+/* */
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u32 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
+ )
+
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u16 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
+ );
+
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
+ )
+
+#define LE_BITS_CLEARED_TO_1BYTE_8BIT(__pStart, __BitOffset, __BitLen) \
+ (\
+ LE_P1BYTE_TO_HOST_1BYTE(__pStart) \
+ )
+
+#define SET_BITS_TO_LE_1BYTE_8BIT(__pStart, __BitOffset, __BitLen, __Value) \
+{ \
+ *((u8 *)(__pStart)) = \
+ EF1Byte(\
+ LE_BITS_CLEARED_TO_1BYTE_8BIT(__pStart, __BitOffset, __BitLen) \
+ | \
+ ((u8)__Value) \
+ ); \
+}
+
+/* Get the N-bytes aligment offset from the current length */
+#define N_BYTE_ALIGMENT(__Value, __Aligment) ((__Aligment == 1) ? (__Value) : (((__Value + __Aligment - 1) / __Aligment) * __Aligment))
+
+#define TEST_FLAG(__Flag, __testFlag) (((__Flag) & (__testFlag)) != 0)
+#define SET_FLAG(__Flag, __setFlag) ((__Flag) |= __setFlag)
+#define CLEAR_FLAG(__Flag, __clearFlag) ((__Flag) &= ~(__clearFlag))
+#define CLEAR_FLAGS(__Flag) ((__Flag) = 0)
+#define TEST_FLAGS(__Flag, __testFlags) (((__Flag) & (__testFlags)) == (__testFlags))
+
+#endif /* __BASIC_TYPES_H__ */
diff --git a/drivers/staging/rtl8723bs/include/cmd_osdep.h b/drivers/staging/rtl8723bs/include/cmd_osdep.h
new file mode 100644
index 000000000000..3ad3ace86fd4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/cmd_osdep.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __CMD_OSDEP_H_
+#define __CMD_OSDEP_H_
+
+
+extern sint _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv);
+extern sint _rtw_init_evt_priv(struct evt_priv *pevtpriv);
+extern void _rtw_free_evt_priv (struct evt_priv *pevtpriv);
+extern void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
+extern sint _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
+extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/drv_conf.h b/drivers/staging/rtl8723bs/include/drv_conf.h
new file mode 100644
index 000000000000..3e1ed0717ed8
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/drv_conf.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __DRV_CONF_H__
+#define __DRV_CONF_H__
+#include "autoconf.h"
+
+//About USB VENDOR REQ
+#if defined(CONFIG_USB_VENDOR_REQ_BUFFER_PREALLOC) && !defined(CONFIG_USB_VENDOR_REQ_MUTEX)
+ #warning "define CONFIG_USB_VENDOR_REQ_MUTEX for CONFIG_USB_VENDOR_REQ_BUFFER_PREALLOC automatically"
+ #define CONFIG_USB_VENDOR_REQ_MUTEX
+#endif
+#if defined(CONFIG_VENDOR_REQ_RETRY) && !defined(CONFIG_USB_VENDOR_REQ_MUTEX)
+ #warning "define CONFIG_USB_VENDOR_REQ_MUTEX for CONFIG_VENDOR_REQ_RETRY automatically"
+ #define CONFIG_USB_VENDOR_REQ_MUTEX
+#endif
+
+#define DYNAMIC_CAMID_ALLOC
+
+#ifndef CONFIG_RTW_HIQ_FILTER
+ #define CONFIG_RTW_HIQ_FILTER 1
+#endif
+
+//#include <rtl871x_byteorder.h>
+
+#endif // __DRV_CONF_H__
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
new file mode 100644
index 000000000000..4d14fbc5a1fe
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -0,0 +1,720 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*-------------------------------------------------------------------------------
+
+ For type defines and data structure defines
+
+--------------------------------------------------------------------------------*/
+
+
+#ifndef __DRV_TYPES_H__
+#define __DRV_TYPES_H__
+
+#include <linux/version.h>
+#include <linux/sched/signal.h>
+#include <autoconf.h>
+#include <basic_types.h>
+#include <osdep_service.h>
+#include <rtw_byteorder.h>
+#include <wlan_bssdef.h>
+#include <wifi.h>
+#include <ieee80211.h>
+
+enum _NIC_VERSION {
+
+ RTL8711_NIC,
+ RTL8712_NIC,
+ RTL8713_NIC,
+ RTL8716_NIC
+
+};
+
+#include <rtw_rf.h>
+
+#include <rtw_ht.h>
+
+#ifdef CONFIG_INTEL_WIDI
+#include <rtw_intel_widi.h>
+#endif
+
+#include <rtw_cmd.h>
+#include <cmd_osdep.h>
+#include <rtw_security.h>
+#include <rtw_xmit.h>
+#include <xmit_osdep.h>
+#include <rtw_recv.h>
+
+#include <recv_osdep.h>
+#include <rtw_efuse.h>
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtw_qos.h>
+#include <rtw_pwrctrl.h>
+#include <rtw_mlme.h>
+#include <mlme_osdep.h>
+#include <rtw_io.h>
+#include <rtw_ioctl.h>
+#include <rtw_ioctl_set.h>
+#include <osdep_intf.h>
+#include <rtw_eeprom.h>
+#include <sta_info.h>
+#include <rtw_event.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_ap.h>
+#include <rtw_efuse.h>
+#include <rtw_version.h>
+#include <rtw_odm.h>
+
+#include "ioctl_cfg80211.h"
+
+#include <linux/ip.h>
+#include <linux/if_ether.h>
+#include <ethernet.h>
+
+#define SPEC_DEV_ID_NONE BIT(0)
+#define SPEC_DEV_ID_DISABLE_HT BIT(1)
+#define SPEC_DEV_ID_ENABLE_PS BIT(2)
+#define SPEC_DEV_ID_RF_CONFIG_1T1R BIT(3)
+#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
+#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
+
+struct specific_device_id{
+
+ u32 flags;
+
+ u16 idVendor;
+ u16 idProduct;
+
+};
+
+struct registry_priv
+{
+ u8 chip_version;
+ u8 rfintfs;
+ u8 lbkmode;
+ u8 hci;
+ struct ndis_802_11_ssid ssid;
+ u8 network_mode; /* infra, ad-hoc, auto */
+ u8 channel;/* ad-hoc support requirement */
+ u8 wireless_mode;/* A, B, G, auto */
+ u8 scan_mode;/* active, passive */
+ u8 radio_enable;
+ u8 preamble;/* long, short, auto */
+ u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
+ u8 vcs_type;/* RTS/CTS, CTS-to-self */
+ u16 rts_thresh;
+ u16 frag_thresh;
+ u8 adhoc_tx_pwr;
+ u8 soft_ap;
+ u8 power_mgnt;
+ u8 ips_mode;
+ u8 smart_ps;
+ u8 usb_rxagg_mode;
+ u8 long_retry_lmt;
+ u8 short_retry_lmt;
+ u16 busy_thresh;
+ u8 ack_policy;
+ u8 mp_dm;
+ u8 software_encrypt;
+ u8 software_decrypt;
+ u8 acm_method;
+ /* UAPSD */
+ u8 wmm_enable;
+ u8 uapsd_enable;
+ u8 uapsd_max_sp;
+ u8 uapsd_acbk_en;
+ u8 uapsd_acbe_en;
+ u8 uapsd_acvi_en;
+ u8 uapsd_acvo_en;
+
+ struct wlan_bssid_ex dev_network;
+
+ u8 ht_enable;
+ /* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz */
+ /* 2.4G use bit 0 ~ 3, 5G use bit 4 ~ 7 */
+ /* 0x21 means enable 2.4G 40MHz & 5G 80MHz */
+ u8 bw_mode;
+ u8 ampdu_enable;/* for tx */
+ u8 rx_stbc;
+ u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
+ /* Short GI support Bit Map */
+ /* BIT0 - 20MHz, 1: support, 0: non-support */
+ /* BIT1 - 40MHz, 1: support, 0: non-support */
+ /* BIT2 - 80MHz, 1: support, 0: non-support */
+ /* BIT3 - 160MHz, 1: support, 0: non-support */
+ u8 short_gi;
+ /* BIT0: Enable VHT LDPC Rx, BIT1: Enable VHT LDPC Tx, BIT4: Enable HT LDPC Rx, BIT5: Enable HT LDPC Tx */
+ u8 ldpc_cap;
+ /* BIT0: Enable VHT STBC Rx, BIT1: Enable VHT STBC Tx, BIT4: Enable HT STBC Rx, BIT5: Enable HT STBC Tx */
+ u8 stbc_cap;
+ /* BIT0: Enable VHT Beamformer, BIT1: Enable VHT Beamformee, BIT4: Enable HT Beamformer, BIT5: Enable HT Beamformee */
+ u8 beamform_cap;
+
+ u8 lowrate_two_xmit;
+
+ u8 rf_config ;
+ u8 low_power ;
+
+ u8 wifi_spec;/* !turbo_mode */
+
+ u8 channel_plan;
+
+ u8 btcoex;
+ u8 bt_iso;
+ u8 bt_sco;
+ u8 bt_ampdu;
+ s8 ant_num;
+
+ bool bAcceptAddbaReq;
+
+ u8 antdiv_cfg;
+ u8 antdiv_type;
+
+ u8 usbss_enable;/* 0:disable, 1:enable */
+ u8 hwpdn_mode;/* 0:disable, 1:enable, 2:decide by EFUSE config */
+ u8 hwpwrp_detect;/* 0:disable, 1:enable */
+
+ u8 hw_wps_pbc;/* 0:disable, 1:enable */
+
+ u8 max_roaming_times; /* the max number driver will try to roaming */
+
+ u8 enable80211d;
+
+ u8 ifname[16];
+
+ u8 notch_filter;
+
+ /* define for tx power adjust */
+ u8 RegEnableTxPowerLimit;
+ u8 RegEnableTxPowerByRate;
+ u8 RegPowerBase;
+ u8 RegPwrTblSel;
+ s8 TxBBSwing_2G;
+ s8 TxBBSwing_5G;
+ u8 AmplifierType_2G;
+ u8 AmplifierType_5G;
+ u8 bEn_RFE;
+ u8 RFE_Type;
+ u8 check_fw_ps;
+
+ u8 load_phy_file;
+ u8 RegDecryptCustomFile;
+
+#ifdef CONFIG_MULTI_VIR_IFACES
+ u8 ext_iface_num;/* primary/secondary iface is excluded */
+#endif
+ u8 qos_opt_enable;
+
+ u8 hiq_filter;
+};
+
+
+/* For registry parameters */
+#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
+#define RGTRY_SZ(field) sizeof(((struct registry_priv*) 0)->field)
+#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
+#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *) 0)->field)
+
+#include <drv_types_sdio.h>
+#define INTF_DATA SDIO_DATA
+
+#define is_primary_adapter(adapter) (1)
+#define get_iface_type(adapter) (IFACE_PORT0)
+#define GET_PRIMARY_ADAPTER(padapter) (((struct adapter *)padapter)->dvobj->if1)
+#define GET_IFACE_NUMS(padapter) (((struct adapter *)padapter)->dvobj->iface_nums)
+#define GET_ADAPTER(padapter, iface_id) (((struct adapter *)padapter)->dvobj->padapters[iface_id])
+
+#ifdef CONFIG_DBG_COUNTER
+
+struct rx_logs {
+ u32 intf_rx;
+ u32 intf_rx_err_recvframe;
+ u32 intf_rx_err_skb;
+ u32 intf_rx_report;
+ u32 core_rx;
+ u32 core_rx_pre;
+ u32 core_rx_pre_ver_err;
+ u32 core_rx_pre_mgmt;
+ u32 core_rx_pre_mgmt_err_80211w;
+ u32 core_rx_pre_mgmt_err;
+ u32 core_rx_pre_ctrl;
+ u32 core_rx_pre_ctrl_err;
+ u32 core_rx_pre_data;
+ u32 core_rx_pre_data_wapi_seq_err;
+ u32 core_rx_pre_data_wapi_key_err;
+ u32 core_rx_pre_data_handled;
+ u32 core_rx_pre_data_err;
+ u32 core_rx_pre_data_unknown;
+ u32 core_rx_pre_unknown;
+ u32 core_rx_enqueue;
+ u32 core_rx_dequeue;
+ u32 core_rx_post;
+ u32 core_rx_post_decrypt;
+ u32 core_rx_post_decrypt_wep;
+ u32 core_rx_post_decrypt_tkip;
+ u32 core_rx_post_decrypt_aes;
+ u32 core_rx_post_decrypt_wapi;
+ u32 core_rx_post_decrypt_hw;
+ u32 core_rx_post_decrypt_unknown;
+ u32 core_rx_post_decrypt_err;
+ u32 core_rx_post_defrag_err;
+ u32 core_rx_post_portctrl_err;
+ u32 core_rx_post_indicate;
+ u32 core_rx_post_indicate_in_oder;
+ u32 core_rx_post_indicate_reoder;
+ u32 core_rx_post_indicate_err;
+ u32 os_indicate;
+ u32 os_indicate_ap_mcast;
+ u32 os_indicate_ap_forward;
+ u32 os_indicate_ap_self;
+ u32 os_indicate_err;
+ u32 os_netif_ok;
+ u32 os_netif_err;
+};
+
+struct tx_logs {
+ u32 os_tx;
+ u32 os_tx_err_up;
+ u32 os_tx_err_xmit;
+ u32 os_tx_m2u;
+ u32 os_tx_m2u_ignore_fw_linked;
+ u32 os_tx_m2u_ignore_self;
+ u32 os_tx_m2u_entry;
+ u32 os_tx_m2u_entry_err_xmit;
+ u32 os_tx_m2u_entry_err_skb;
+ u32 os_tx_m2u_stop;
+ u32 core_tx;
+ u32 core_tx_err_pxmitframe;
+ u32 core_tx_err_brtx;
+ u32 core_tx_upd_attrib;
+ u32 core_tx_upd_attrib_adhoc;
+ u32 core_tx_upd_attrib_sta;
+ u32 core_tx_upd_attrib_ap;
+ u32 core_tx_upd_attrib_unknown;
+ u32 core_tx_upd_attrib_dhcp;
+ u32 core_tx_upd_attrib_icmp;
+ u32 core_tx_upd_attrib_active;
+ u32 core_tx_upd_attrib_err_ucast_sta;
+ u32 core_tx_upd_attrib_err_ucast_ap_link;
+ u32 core_tx_upd_attrib_err_sta;
+ u32 core_tx_upd_attrib_err_link;
+ u32 core_tx_upd_attrib_err_sec;
+ u32 core_tx_ap_enqueue_warn_fwstate;
+ u32 core_tx_ap_enqueue_warn_sta;
+ u32 core_tx_ap_enqueue_warn_nosta;
+ u32 core_tx_ap_enqueue_warn_link;
+ u32 core_tx_ap_enqueue_warn_trigger;
+ u32 core_tx_ap_enqueue_mcast;
+ u32 core_tx_ap_enqueue_ucast;
+ u32 core_tx_ap_enqueue;
+ u32 intf_tx;
+ u32 intf_tx_pending_ac;
+ u32 intf_tx_pending_fw_under_survey;
+ u32 intf_tx_pending_fw_under_linking;
+ u32 intf_tx_pending_xmitbuf;
+ u32 intf_tx_enqueue;
+ u32 core_tx_enqueue;
+ u32 core_tx_enqueue_class;
+ u32 core_tx_enqueue_class_err_sta;
+ u32 core_tx_enqueue_class_err_nosta;
+ u32 core_tx_enqueue_class_err_fwlink;
+ u32 intf_tx_direct;
+ u32 intf_tx_direct_err_coalesce;
+ u32 intf_tx_dequeue;
+ u32 intf_tx_dequeue_err_coalesce;
+ u32 intf_tx_dump_xframe;
+ u32 intf_tx_dump_xframe_err_txdesc;
+ u32 intf_tx_dump_xframe_err_port;
+};
+
+struct int_logs {
+ u32 all;
+ u32 err;
+ u32 tbdok;
+ u32 tbder;
+ u32 bcnderr;
+ u32 bcndma;
+ u32 bcndma_e;
+ u32 rx;
+ u32 rx_rdu;
+ u32 rx_fovw;
+ u32 txfovw;
+ u32 mgntok;
+ u32 highdok;
+ u32 bkdok;
+ u32 bedok;
+ u32 vidok;
+ u32 vodok;
+};
+
+#endif /* CONFIG_DBG_COUNTER */
+
+struct debug_priv {
+ u32 dbg_sdio_free_irq_error_cnt;
+ u32 dbg_sdio_alloc_irq_error_cnt;
+ u32 dbg_sdio_free_irq_cnt;
+ u32 dbg_sdio_alloc_irq_cnt;
+ u32 dbg_sdio_deinit_error_cnt;
+ u32 dbg_sdio_init_error_cnt;
+ u32 dbg_suspend_error_cnt;
+ u32 dbg_suspend_cnt;
+ u32 dbg_resume_cnt;
+ u32 dbg_resume_error_cnt;
+ u32 dbg_deinit_fail_cnt;
+ u32 dbg_carddisable_cnt;
+ u32 dbg_carddisable_error_cnt;
+ u32 dbg_ps_insuspend_cnt;
+ u32 dbg_dev_unload_inIPS_cnt;
+ u32 dbg_wow_leave_ps_fail_cnt;
+ u32 dbg_scan_pwr_state_cnt;
+ u32 dbg_downloadfw_pwr_state_cnt;
+ u32 dbg_fw_read_ps_state_fail_cnt;
+ u32 dbg_leave_ips_fail_cnt;
+ u32 dbg_leave_lps_fail_cnt;
+ u32 dbg_h2c_leave32k_fail_cnt;
+ u32 dbg_diswow_dload_fw_fail_cnt;
+ u32 dbg_enwow_dload_fw_fail_cnt;
+ u32 dbg_ips_drvopen_fail_cnt;
+ u32 dbg_poll_fail_cnt;
+ u32 dbg_rpwm_toogle_cnt;
+ u32 dbg_rpwm_timeout_fail_cnt;
+ u64 dbg_rx_fifo_last_overflow;
+ u64 dbg_rx_fifo_curr_overflow;
+ u64 dbg_rx_fifo_diff_overflow;
+ u64 dbg_rx_ampdu_drop_count;
+ u64 dbg_rx_ampdu_forced_indicate_count;
+ u64 dbg_rx_ampdu_loss_count;
+ u64 dbg_rx_dup_mgt_frame_drop_count;
+ u64 dbg_rx_ampdu_window_shift_cnt;
+};
+
+struct rtw_traffic_statistics {
+ /* tx statistics */
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_drop;
+ u64 cur_tx_bytes;
+ u64 last_tx_bytes;
+ u32 cur_tx_tp; /* Tx throughput in MBps. */
+
+ /* rx statistics */
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drop;
+ u64 cur_rx_bytes;
+ u64 last_rx_bytes;
+ u32 cur_rx_tp; /* Rx throughput in MBps. */
+};
+
+struct cam_ctl_t {
+ _lock lock;
+ u64 bitmap;
+};
+
+struct cam_entry_cache {
+ u16 ctrl;
+ u8 mac[ETH_ALEN];
+ u8 key[16];
+};
+
+#define KEY_FMT "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+#define KEY_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5], \
+ ((u8 *)(x))[6], ((u8 *)(x))[7], ((u8 *)(x))[8], ((u8 *)(x))[9], ((u8 *)(x))[10], ((u8 *)(x))[11], \
+ ((u8 *)(x))[12], ((u8 *)(x))[13], ((u8 *)(x))[14], ((u8 *)(x))[15]
+
+struct dvobj_priv
+{
+ /*-------- below is common data --------*/
+ struct adapter *if1; /* PRIMARY_ADAPTER */
+ struct adapter *if2; /* SECONDARY_ADAPTER */
+
+ s32 processing_dev_remove;
+
+ struct debug_priv drv_dbg;
+
+ /* for local/global synchronization */
+ /* */
+ _lock lock;
+ int macid[NUM_STA];
+
+ _mutex hw_init_mutex;
+ _mutex h2c_fwcmd_mutex;
+ _mutex setch_mutex;
+ _mutex setbw_mutex;
+
+ unsigned char oper_channel; /* saved channel info when call set_channel_bw */
+ unsigned char oper_bwmode;
+ unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
+ unsigned long on_oper_ch_time;
+
+ struct adapter *padapters;
+
+ struct cam_ctl_t cam_ctl;
+ struct cam_entry_cache cam_cache[TOTAL_CAM_ENTRY];
+
+ /* For 92D, DMDP have 2 interface. */
+ u8 InterfaceNumber;
+ u8 NumInterfaces;
+
+ /* In /Out Pipe information */
+ int RtInPipe[2];
+ int RtOutPipe[4];
+ u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
+
+ u8 irq_alloc;
+ atomic_t continual_io_error;
+
+ atomic_t disable_func;
+
+ struct pwrctrl_priv pwrctl_priv;
+
+ struct rtw_traffic_statistics traffic_stat;
+
+/*-------- below is for SDIO INTERFACE --------*/
+
+#ifdef INTF_DATA
+ INTF_DATA intf_data;
+#endif
+};
+
+#define dvobj_to_pwrctl(dvobj) (&(dvobj->pwrctl_priv))
+#define pwrctl_to_dvobj(pwrctl) container_of(pwrctl, struct dvobj_priv, pwrctl_priv)
+
+__inline static struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+{
+ /* todo: get interface type from dvobj and the return the dev accordingly */
+#ifdef RTW_DVOBJ_CHIP_HW_TYPE
+#endif
+
+ return &dvobj->intf_data.func->dev;
+}
+
+struct adapter *dvobj_get_port0_adapter(struct dvobj_priv *dvobj);
+
+enum _IFACE_TYPE {
+ IFACE_PORT0, /* mapping to port0 for C/D series chips */
+ IFACE_PORT1, /* mapping to port1 for C/D series chip */
+ MAX_IFACE_PORT,
+};
+
+enum ADAPTER_TYPE {
+ PRIMARY_ADAPTER,
+ SECONDARY_ADAPTER,
+ MAX_ADAPTER = 0xFF,
+};
+
+typedef enum _DRIVER_STATE{
+ DRIVER_NORMAL = 0,
+ DRIVER_DISAPPEAR = 1,
+ DRIVER_REPLACE_DONGLE = 2,
+}DRIVER_STATE;
+
+struct adapter {
+ int DriverState;/* for disable driver using module, use dongle to replace module. */
+ int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
+ int bDongle;/* build-in module or external dongle */
+
+ struct dvobj_priv *dvobj;
+ struct mlme_priv mlmepriv;
+ struct mlme_ext_priv mlmeextpriv;
+ struct cmd_priv cmdpriv;
+ struct evt_priv evtpriv;
+ /* struct io_queue *pio_queue; */
+ struct io_priv iopriv;
+ struct xmit_priv xmitpriv;
+ struct recv_priv recvpriv;
+ struct sta_priv stapriv;
+ struct security_priv securitypriv;
+ _lock security_key_mutex; /* add for CONFIG_IEEE80211W, none 11w also can use */
+ struct registry_priv registrypriv;
+ struct eeprom_priv eeprompriv;
+
+ struct hostapd_priv *phostapdpriv;
+
+ u32 setband;
+
+ void * HalData;
+ u32 hal_data_sz;
+ struct hal_ops HalFunc;
+
+ s32 bDriverStopped;
+ s32 bSurpriseRemoved;
+ s32 bCardDisableWOHSM;
+
+ u32 IsrContent;
+ u32 ImrContent;
+
+ u8 EepromAddressSize;
+ u8 hw_init_completed;
+ u8 bDriverIsGoingToUnload;
+ u8 init_adpt_in_progress;
+ u8 bHaltInProgress;
+
+ void *cmdThread;
+ void *evtThread;
+ void *xmitThread;
+ void *recvThread;
+
+ u32 (*intf_init)(struct dvobj_priv *dvobj);
+ void (*intf_deinit)(struct dvobj_priv *dvobj);
+ int (*intf_alloc_irq)(struct dvobj_priv *dvobj);
+ void (*intf_free_irq)(struct dvobj_priv *dvobj);
+
+
+ void (*intf_start)(struct adapter * adapter);
+ void (*intf_stop)(struct adapter * adapter);
+
+ _nic_hdl pnetdev;
+ char old_ifname[IFNAMSIZ];
+
+ /* used by rtw_rereg_nd_name related function */
+ struct rereg_nd_name_data {
+ _nic_hdl old_pnetdev;
+ char old_ifname[IFNAMSIZ];
+ u8 old_ips_mode;
+ u8 old_bRegUseLed;
+ } rereg_nd_name_priv;
+
+ int bup;
+ struct net_device_stats stats;
+ struct iw_statistics iwstats;
+ struct proc_dir_entry *dir_dev;/* for proc directory */
+ struct proc_dir_entry *dir_odm;
+
+ struct wireless_dev *rtw_wdev;
+ struct rtw_wdev_priv wdev_data;
+
+ int net_closed;
+
+ u8 netif_up;
+
+ u8 bFWReady;
+ u8 bBTFWReady;
+ u8 bLinkInfoDump;
+ u8 bRxRSSIDisplay;
+ /* Added by Albert 2012/10/26 */
+ /* The driver will show up the desired channel number when this flag is 1. */
+ u8 bNotifyChannelChange;
+
+ /* pbuddystruct adapter is used only in two inteface case, (iface_nums =2 in struct dvobj_priv) */
+ /* PRIMARY ADAPTER's buddy is SECONDARY_ADAPTER */
+ /* SECONDARY_ADAPTER's buddy is PRIMARY_ADAPTER */
+ /* for iface_id > SECONDARY_ADAPTER(IFACE_ID1), refer to padapters[iface_id] in struct dvobj_priv */
+ /* and their pbuddystruct adapter is PRIMARY_ADAPTER. */
+ /* for PRIMARY_ADAPTER(IFACE_ID0) can directly refer to if1 in struct dvobj_priv */
+ struct adapter *pbuddy_adapter;
+
+ /* extend to support multi interface */
+ /* IFACE_ID0 is equals to PRIMARY_ADAPTER */
+ /* IFACE_ID1 is equals to SECONDARY_ADAPTER */
+ u8 iface_id;
+
+ /* for debug purpose */
+ u8 fix_rate;
+ u8 driver_vcs_en; /* Enable = 1, Disable = 0 driver control vrtl_carrier_sense for tx */
+ u8 driver_vcs_type;/* force 0:disable VCS, 1:RTS-CTS, 2:CTS-to-self when vcs_en = 1. */
+ u8 driver_ampdu_spacing;/* driver control AMPDU Density for peer sta's rx */
+ u8 driver_rx_ampdu_factor;/* 0xff: disable drv ctrl, 0:8k, 1:16k, 2:32k, 3:64k; */
+
+ unsigned char in_cta_test;
+
+#ifdef CONFIG_DBG_COUNTER
+ struct rx_logs rx_logs;
+ struct tx_logs tx_logs;
+ struct int_logs int_logs;
+#endif
+};
+
+#define adapter_to_dvobj(adapter) (adapter->dvobj)
+#define adapter_to_pwrctl(adapter) (dvobj_to_pwrctl(adapter->dvobj))
+#define adapter_wdev_data(adapter) (&((adapter)->wdev_data))
+
+/* */
+/* Function disabled. */
+/* */
+#define DF_TX_BIT BIT0
+#define DF_RX_BIT BIT1
+#define DF_IO_BIT BIT2
+
+/* define RTW_DISABLE_FUNC(padapter, func) (atomic_add(&adapter_to_dvobj(padapter)->disable_func, (func))) */
+/* define RTW_ENABLE_FUNC(padapter, func) (atomic_sub(&adapter_to_dvobj(padapter)->disable_func, (func))) */
+__inline static void RTW_DISABLE_FUNC(struct adapter *padapter, int func_bit)
+{
+ int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
+ df |= func_bit;
+ atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
+}
+
+__inline static void RTW_ENABLE_FUNC(struct adapter *padapter, int func_bit)
+{
+ int df = atomic_read(&adapter_to_dvobj(padapter)->disable_func);
+ df &= ~(func_bit);
+ atomic_set(&adapter_to_dvobj(padapter)->disable_func, df);
+}
+
+#define RTW_IS_FUNC_DISABLED(padapter, func_bit) (atomic_read(&adapter_to_dvobj(padapter)->disable_func) & (func_bit))
+
+#define RTW_CANNOT_IO(padapter) \
+ ((padapter)->bSurpriseRemoved || \
+ RTW_IS_FUNC_DISABLED((padapter), DF_IO_BIT))
+
+#define RTW_CANNOT_RX(padapter) \
+ ((padapter)->bDriverStopped || \
+ (padapter)->bSurpriseRemoved || \
+ RTW_IS_FUNC_DISABLED((padapter), DF_RX_BIT))
+
+#define RTW_CANNOT_TX(padapter) \
+ ((padapter)->bDriverStopped || \
+ (padapter)->bSurpriseRemoved || \
+ RTW_IS_FUNC_DISABLED((padapter), DF_TX_BIT))
+
+#ifdef CONFIG_GPIO_API
+int rtw_get_gpio(struct net_device *netdev, int gpio_num);
+int rtw_set_gpio_output_value(struct net_device *netdev, int gpio_num, bool isHigh);
+int rtw_config_gpio(struct net_device *netdev, int gpio_num, bool isOutput);
+#endif
+
+#ifdef CONFIG_WOWLAN
+int rtw_suspend_wow(struct adapter *padapter);
+int rtw_resume_process_wow(struct adapter *padapter);
+#endif
+
+__inline static u8 *myid(struct eeprom_priv *peepriv)
+{
+ return (peepriv->mac_addr);
+}
+
+/* HCI Related header file */
+#include <sdio_osintf.h>
+#include <sdio_ops.h>
+#include <sdio_hal.h>
+
+#include <rtw_btcoex.h>
+
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
+void rtw_indicate_wx_assoc_event(struct adapter *padapter);
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+int rtw_change_ifname(struct adapter *padapter, const char *ifname);
+
+extern char *rtw_phy_file_path;
+extern char *rtw_initmac;
+extern int rtw_mc2u_disable;
+extern int rtw_ht_enable;
+extern u32 g_wait_hiq_empty;
+extern u8 g_fwdl_wintint_rdy_fail;
+extern u8 g_fwdl_chksum_fail;
+
+#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/rtl8723bs/include/drv_types_sdio.h b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
new file mode 100644
index 000000000000..aef9bf71ab25
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/drv_types_sdio.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __DRV_TYPES_SDIO_H__
+#define __DRV_TYPES_SDIO_H__
+
+/* SDIO Header Files */
+ #include <linux/mmc/sdio_func.h>
+ #include <linux/mmc/sdio_ids.h>
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+ #include <linux/mmc/host.h>
+ #include <linux/mmc/card.h>
+#endif
+
+typedef struct sdio_data
+{
+ u8 func_number;
+
+ u8 tx_block_mode;
+ u8 rx_block_mode;
+ u32 block_transfer_len;
+
+ struct sdio_func *func;
+ void *sys_sdio_irq_thd;
+} SDIO_DATA, *PSDIO_DATA;
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/ethernet.h b/drivers/staging/rtl8723bs/include/ethernet.h
new file mode 100644
index 000000000000..bd7099497b95
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/ethernet.h
@@ -0,0 +1,22 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+/*! \file */
+#ifndef __INC_ETHERNET_H
+#define __INC_ETHERNET_H
+
+#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
+
+#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
new file mode 100644
index 000000000000..7ee59c07fbf9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -0,0 +1,68 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_BTCOEX_H__
+#define __HAL_BTCOEX_H__
+
+#include <drv_types.h>
+
+/* Some variables can't get from outsrc BT-Coex, */
+/* so we need to save here */
+typedef struct _BT_COEXIST
+{
+ u8 bBtExist;
+ u8 btTotalAntNum;
+ u8 btChipType;
+ u8 bInitlized;
+} BT_COEXIST, *PBT_COEXIST;
+
+void DBG_BT_INFO(u8 *dbgmsg);
+
+void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist);
+u8 hal_btcoex_IsBtExist(struct adapter *padapter);
+u8 hal_btcoex_IsBtDisabled(struct adapter *);
+void hal_btcoex_SetChipType(struct adapter *padapter, u8 chipType);
+void hal_btcoex_SetPgAntNum(struct adapter *padapter, u8 antNum);
+void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath);
+
+u8 hal_btcoex_Initialize(struct adapter *padapter);
+void hal_btcoex_PowerOnSetting(struct adapter *padapter);
+void hal_btcoex_InitHwConfig(struct adapter *padapter, u8 bWifiOnly);
+
+void hal_btcoex_IpsNotify(struct adapter *padapter, u8 type);
+void hal_btcoex_LpsNotify(struct adapter *padapter, u8 type);
+void hal_btcoex_ScanNotify(struct adapter *padapter, u8 type);
+void hal_btcoex_ConnectNotify(struct adapter *padapter, u8 action);
+void hal_btcoex_MediaStatusNotify(struct adapter *padapter, u8 mediaStatus);
+void hal_btcoex_SpecialPacketNotify(struct adapter *padapter, u8 pktType);
+void hal_btcoex_IQKNotify(struct adapter *padapter, u8 state);
+void hal_btcoex_BtInfoNotify(struct adapter *padapter, u8 length, u8 *tmpBuf);
+void hal_btcoex_SuspendNotify(struct adapter *padapter, u8 state);
+void hal_btcoex_HaltNotify(struct adapter *padapter);
+
+void hal_btcoex_Hanlder(struct adapter *padapter);
+
+s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter);
+void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual);
+u8 hal_btcoex_IsBtControlLps(struct adapter *);
+u8 hal_btcoex_IsLpsOn(struct adapter *);
+u8 hal_btcoex_RpwmVal(struct adapter *);
+u8 hal_btcoex_LpsVal(struct adapter *);
+u32 hal_btcoex_GetRaMask(struct adapter *);
+void hal_btcoex_RecordPwrMode(struct adapter *padapter, u8 *pCmdBuf, u8 cmdLen);
+void hal_btcoex_DisplayBtCoexInfo(struct adapter *, u8 *pbuf, u32 bufsize);
+void hal_btcoex_SetDBG(struct adapter *, u32 *pDbgModule);
+u32 hal_btcoex_GetDBG(struct adapter *, u8 *pStrBuf, u32 bufSize);
+
+#endif /* !__HAL_BTCOEX_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
new file mode 100644
index 000000000000..3e9ed3b66632
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -0,0 +1,309 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_COMMON_H__
+#define __HAL_COMMON_H__
+
+#include "HalVerDef.h"
+#include "hal_pg.h"
+#include "hal_phy.h"
+#include "hal_phy_reg.h"
+#include "hal_com_reg.h"
+#include "hal_com_phycfg.h"
+
+/*------------------------------ Tx Desc definition Macro ------------------------*/
+/* pragma mark -- Tx Desc related definition. -- */
+/* */
+/* */
+/* Rate */
+/* */
+/* CCK Rates, TxHT = 0 */
+#define DESC_RATE1M 0x00
+#define DESC_RATE2M 0x01
+#define DESC_RATE5_5M 0x02
+#define DESC_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC_RATE6M 0x04
+#define DESC_RATE9M 0x05
+#define DESC_RATE12M 0x06
+#define DESC_RATE18M 0x07
+#define DESC_RATE24M 0x08
+#define DESC_RATE36M 0x09
+#define DESC_RATE48M 0x0a
+#define DESC_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC_RATEMCS0 0x0c
+#define DESC_RATEMCS1 0x0d
+#define DESC_RATEMCS2 0x0e
+#define DESC_RATEMCS3 0x0f
+#define DESC_RATEMCS4 0x10
+#define DESC_RATEMCS5 0x11
+#define DESC_RATEMCS6 0x12
+#define DESC_RATEMCS7 0x13
+#define DESC_RATEMCS8 0x14
+#define DESC_RATEMCS9 0x15
+#define DESC_RATEMCS10 0x16
+#define DESC_RATEMCS11 0x17
+#define DESC_RATEMCS12 0x18
+#define DESC_RATEMCS13 0x19
+#define DESC_RATEMCS14 0x1a
+#define DESC_RATEMCS15 0x1b
+#define DESC_RATEMCS16 0x1C
+#define DESC_RATEMCS17 0x1D
+#define DESC_RATEMCS18 0x1E
+#define DESC_RATEMCS19 0x1F
+#define DESC_RATEMCS20 0x20
+#define DESC_RATEMCS21 0x21
+#define DESC_RATEMCS22 0x22
+#define DESC_RATEMCS23 0x23
+#define DESC_RATEMCS24 0x24
+#define DESC_RATEMCS25 0x25
+#define DESC_RATEMCS26 0x26
+#define DESC_RATEMCS27 0x27
+#define DESC_RATEMCS28 0x28
+#define DESC_RATEMCS29 0x29
+#define DESC_RATEMCS30 0x2A
+#define DESC_RATEMCS31 0x2B
+#define DESC_RATEVHTSS1MCS0 0x2C
+#define DESC_RATEVHTSS1MCS1 0x2D
+#define DESC_RATEVHTSS1MCS2 0x2E
+#define DESC_RATEVHTSS1MCS3 0x2F
+#define DESC_RATEVHTSS1MCS4 0x30
+#define DESC_RATEVHTSS1MCS5 0x31
+#define DESC_RATEVHTSS1MCS6 0x32
+#define DESC_RATEVHTSS1MCS7 0x33
+#define DESC_RATEVHTSS1MCS8 0x34
+#define DESC_RATEVHTSS1MCS9 0x35
+#define DESC_RATEVHTSS2MCS0 0x36
+#define DESC_RATEVHTSS2MCS1 0x37
+#define DESC_RATEVHTSS2MCS2 0x38
+#define DESC_RATEVHTSS2MCS3 0x39
+#define DESC_RATEVHTSS2MCS4 0x3A
+#define DESC_RATEVHTSS2MCS5 0x3B
+#define DESC_RATEVHTSS2MCS6 0x3C
+#define DESC_RATEVHTSS2MCS7 0x3D
+#define DESC_RATEVHTSS2MCS8 0x3E
+#define DESC_RATEVHTSS2MCS9 0x3F
+#define DESC_RATEVHTSS3MCS0 0x40
+#define DESC_RATEVHTSS3MCS1 0x41
+#define DESC_RATEVHTSS3MCS2 0x42
+#define DESC_RATEVHTSS3MCS3 0x43
+#define DESC_RATEVHTSS3MCS4 0x44
+#define DESC_RATEVHTSS3MCS5 0x45
+#define DESC_RATEVHTSS3MCS6 0x46
+#define DESC_RATEVHTSS3MCS7 0x47
+#define DESC_RATEVHTSS3MCS8 0x48
+#define DESC_RATEVHTSS3MCS9 0x49
+#define DESC_RATEVHTSS4MCS0 0x4A
+#define DESC_RATEVHTSS4MCS1 0x4B
+#define DESC_RATEVHTSS4MCS2 0x4C
+#define DESC_RATEVHTSS4MCS3 0x4D
+#define DESC_RATEVHTSS4MCS4 0x4E
+#define DESC_RATEVHTSS4MCS5 0x4F
+#define DESC_RATEVHTSS4MCS6 0x50
+#define DESC_RATEVHTSS4MCS7 0x51
+#define DESC_RATEVHTSS4MCS8 0x52
+#define DESC_RATEVHTSS4MCS9 0x53
+
+#define HDATA_RATE(rate)\
+(rate ==DESC_RATE1M)?"CCK_1M":\
+(rate ==DESC_RATE2M)?"CCK_2M":\
+(rate ==DESC_RATE5_5M)?"CCK5_5M":\
+(rate ==DESC_RATE11M)?"CCK_11M":\
+(rate ==DESC_RATE6M)?"OFDM_6M":\
+(rate ==DESC_RATE9M)?"OFDM_9M":\
+(rate ==DESC_RATE12M)?"OFDM_12M":\
+(rate ==DESC_RATE18M)?"OFDM_18M":\
+(rate ==DESC_RATE24M)?"OFDM_24M":\
+(rate ==DESC_RATE36M)?"OFDM_36M":\
+(rate ==DESC_RATE48M)?"OFDM_48M":\
+(rate ==DESC_RATE54M)?"OFDM_54M":\
+(rate ==DESC_RATEMCS0)?"MCS0":\
+(rate ==DESC_RATEMCS1)?"MCS1":\
+(rate ==DESC_RATEMCS2)?"MCS2":\
+(rate ==DESC_RATEMCS3)?"MCS3":\
+(rate ==DESC_RATEMCS4)?"MCS4":\
+(rate ==DESC_RATEMCS5)?"MCS5":\
+(rate ==DESC_RATEMCS6)?"MCS6":\
+(rate ==DESC_RATEMCS7)?"MCS7":\
+(rate ==DESC_RATEMCS8)?"MCS8":\
+(rate ==DESC_RATEMCS9)?"MCS9":\
+(rate ==DESC_RATEMCS10)?"MCS10":\
+(rate ==DESC_RATEMCS11)?"MCS11":\
+(rate ==DESC_RATEMCS12)?"MCS12":\
+(rate ==DESC_RATEMCS13)?"MCS13":\
+(rate ==DESC_RATEMCS14)?"MCS14":\
+(rate ==DESC_RATEMCS15)?"MCS15":\
+(rate ==DESC_RATEVHTSS1MCS0)?"VHTSS1MCS0":\
+(rate ==DESC_RATEVHTSS1MCS1)?"VHTSS1MCS1":\
+(rate ==DESC_RATEVHTSS1MCS2)?"VHTSS1MCS2":\
+(rate ==DESC_RATEVHTSS1MCS3)?"VHTSS1MCS3":\
+(rate ==DESC_RATEVHTSS1MCS4)?"VHTSS1MCS4":\
+(rate ==DESC_RATEVHTSS1MCS5)?"VHTSS1MCS5":\
+(rate ==DESC_RATEVHTSS1MCS6)?"VHTSS1MCS6":\
+(rate ==DESC_RATEVHTSS1MCS7)?"VHTSS1MCS7":\
+(rate ==DESC_RATEVHTSS1MCS8)?"VHTSS1MCS8":\
+(rate ==DESC_RATEVHTSS1MCS9)?"VHTSS1MCS9":\
+(rate ==DESC_RATEVHTSS2MCS0)?"VHTSS2MCS0":\
+(rate ==DESC_RATEVHTSS2MCS1)?"VHTSS2MCS1":\
+(rate ==DESC_RATEVHTSS2MCS2)?"VHTSS2MCS2":\
+(rate ==DESC_RATEVHTSS2MCS3)?"VHTSS2MCS3":\
+(rate ==DESC_RATEVHTSS2MCS4)?"VHTSS2MCS4":\
+(rate ==DESC_RATEVHTSS2MCS5)?"VHTSS2MCS5":\
+(rate ==DESC_RATEVHTSS2MCS6)?"VHTSS2MCS6":\
+(rate ==DESC_RATEVHTSS2MCS7)?"VHTSS2MCS7":\
+(rate ==DESC_RATEVHTSS2MCS8)?"VHTSS2MCS8":\
+(rate ==DESC_RATEVHTSS2MCS9)?"VHTSS2MCS9":"UNKNOW"
+
+
+enum{
+ UP_LINK,
+ DOWN_LINK,
+};
+typedef enum _RT_MEDIA_STATUS {
+ RT_MEDIA_DISCONNECT = 0,
+ RT_MEDIA_CONNECT = 1
+} RT_MEDIA_STATUS;
+
+#define MAX_DLFW_PAGE_SIZE 4096 /* @ page : 4k bytes */
+enum FIRMWARE_SOURCE {
+ FW_SOURCE_IMG_FILE = 0,
+ FW_SOURCE_HEADER_FILE = 1, /* from header file */
+};
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+/* define MAX_TX_QUEUE 9 */
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+#define TX_SELE_EQ BIT(3) /* Extern Queue */
+
+#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len)&0x7F ? 1:0))
+#define PageNum_256(_Len) (u32)(((_Len)>>8) + ((_Len)&0xFF ? 1:0))
+#define PageNum_512(_Len) (u32)(((_Len)>>9) + ((_Len)&0x1FF ? 1:0))
+#define PageNum(_Len, _Size) (u32)(((_Len)/(_Size)) + ((_Len)&((_Size) - 1) ? 1:0))
+
+
+u8 rtw_hal_data_init(struct adapter *padapter);
+void rtw_hal_data_deinit(struct adapter *padapter);
+
+void dump_chip_info(HAL_VERSION ChipVersion);
+
+u8 /* return the final channel plan decision */
+hal_com_config_channel_plan(
+struct adapter *padapter,
+u8 hw_channel_plan, /* channel plan from HW (efuse/eeprom) */
+u8 sw_channel_plan, /* channel plan from SW (registry/module param) */
+u8 def_channel_plan, /* channel plan used when the former two is invalid */
+bool AutoLoadFail
+ );
+
+bool
+HAL_IsLegalChannel(
+struct adapter *Adapter,
+u32 Channel
+ );
+
+u8 MRateToHwRate(u8 rate);
+
+u8 HwRateToMRate(u8 rate);
+
+void HalSetBrateCfg(
+ struct adapter * Adapter,
+ u8 *mBratesOS,
+ u16 *pBrateCfg);
+
+bool
+Hal_MappingOutPipe(
+struct adapter *padapter,
+u8 NumOutPipe
+ );
+
+void hal_init_macaddr(struct adapter *adapter);
+
+void rtw_init_hal_com_default_value(struct adapter * Adapter);
+
+void c2h_evt_clear(struct adapter *adapter);
+s32 c2h_evt_read_88xx(struct adapter *adapter, u8 *buf);
+
+u8 rtw_hal_networktype_to_raid(struct adapter *adapter, struct sta_info *psta);
+u8 rtw_get_mgntframe_raid(struct adapter *adapter, unsigned char network_type);
+void rtw_hal_update_sta_rate_mask(struct adapter *padapter, struct sta_info *psta);
+
+void hw_var_port_switch (struct adapter *adapter);
+
+void SetHwReg(struct adapter *padapter, u8 variable, u8 *val);
+void GetHwReg(struct adapter *padapter, u8 variable, u8 *val);
+void rtw_hal_check_rxfifo_full(struct adapter *adapter);
+
+u8 SetHalDefVar(struct adapter *adapter, enum HAL_DEF_VARIABLE variable,
+ void *value);
+u8 GetHalDefVar(struct adapter *adapter, enum HAL_DEF_VARIABLE variable,
+ void *value);
+
+bool eqNByte(u8 *str1, u8 *str2, u32 num);
+
+bool IsHexDigit(char chTmp);
+
+u32 MapCharToHexDigit(char chTmp);
+
+bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove);
+
+bool GetFractionValueFromString(char *szStr, u8 *pInteger, u8 *pFraction,
+ u32 *pu4bMove);
+
+bool IsCommentString(char *szStr);
+
+bool ParseQualifiedString(char *In, u32 *Start, char *Out, char LeftQualifier,
+ char RightQualifier);
+
+bool GetU1ByteIntegerFromStringInDecimal(char *str, u8 *in);
+
+bool isAllSpaceOrTab(u8 *data, u8 size);
+
+void linked_info_dump(struct adapter *padapter, u8 benable);
+#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+void rtw_get_raw_rssi_info(void *sel, struct adapter *padapter);
+void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe);
+void rtw_dump_raw_rssi_info(struct adapter *padapter);
+#endif
+
+#define HWSET_MAX_SIZE 512
+
+void rtw_bb_rf_gain_offset(struct adapter *padapter);
+
+void GetHalODMVar(struct adapter *Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void * pValue1,
+ void * pValue2);
+void SetHalODMVar(
+ struct adapter * Adapter,
+ enum HAL_ODM_VARIABLE eVariable,
+ void * pValue1,
+ bool bSet);
+
+#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
+struct noise_info
+{
+ u8 bPauseDIG;
+ u8 IGIValue;
+ u32 max_time;/* ms */
+ u8 chan;
+};
+#endif
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_com_h2c.h b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
new file mode 100644
index 000000000000..86b0c42295c2
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_com_h2c.h
@@ -0,0 +1,293 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __COMMON_H2C_H__
+#define __COMMON_H2C_H__
+
+/* */
+/* H2C CMD DEFINITION ------------------------------------------------ */
+/* */
+/* 88e, 8723b, 8812, 8821, 92e use the same FW code base */
+enum h2c_cmd{
+ /* Common Class: 000 */
+ H2C_RSVD_PAGE = 0x00,
+ H2C_MEDIA_STATUS_RPT = 0x01,
+ H2C_SCAN_ENABLE = 0x02,
+ H2C_KEEP_ALIVE = 0x03,
+ H2C_DISCON_DECISION = 0x04,
+ H2C_PSD_OFFLOAD = 0x05,
+ H2C_AP_OFFLOAD = 0x08,
+ H2C_BCN_RSVDPAGE = 0x09,
+ H2C_PROBERSP_RSVDPAGE = 0x0A,
+ H2C_FCS_RSVDPAGE = 0x10,
+ H2C_FCS_INFO = 0x11,
+ H2C_AP_WOW_GPIO_CTRL = 0x13,
+
+ /* PoweSave Class: 001 */
+ H2C_SET_PWR_MODE = 0x20,
+ H2C_PS_TUNING_PARA = 0x21,
+ H2C_PS_TUNING_PARA2 = 0x22,
+ H2C_P2P_LPS_PARAM = 0x23,
+ H2C_P2P_PS_OFFLOAD = 0x24,
+ H2C_PS_SCAN_ENABLE = 0x25,
+ H2C_SAP_PS_ = 0x26,
+ H2C_INACTIVE_PS_ = 0x27, /* Inactive_PS */
+ H2C_FWLPS_IN_IPS_ = 0x28,
+
+ /* Dynamic Mechanism Class: 010 */
+ H2C_MACID_CFG = 0x40,
+ H2C_TXBF = 0x41,
+ H2C_RSSI_SETTING = 0x42,
+ H2C_AP_REQ_TXRPT = 0x43,
+ H2C_INIT_RATE_COLLECT = 0x44,
+
+ /* BT Class: 011 */
+ H2C_B_TYPE_TDMA = 0x60,
+ H2C_BT_INFO = 0x61,
+ H2C_FORCE_BT_TXPWR = 0x62,
+ H2C_BT_IGNORE_WLANACT = 0x63,
+ H2C_DAC_SWING_VALUE = 0x64,
+ H2C_ANT_SEL_RSV = 0x65,
+ H2C_WL_OPMODE = 0x66,
+ H2C_BT_MP_OPER = 0x67,
+ H2C_BT_CONTROL = 0x68,
+ H2C_BT_WIFI_CTRL = 0x69,
+ H2C_BT_FW_PATCH = 0x6A,
+
+ /* WOWLAN Class: 100 */
+ H2C_WOWLAN = 0x80,
+ H2C_REMOTE_WAKE_CTRL = 0x81,
+ H2C_AOAC_GLOBAL_INFO = 0x82,
+ H2C_AOAC_RSVD_PAGE = 0x83,
+ H2C_AOAC_RSVD_PAGE2 = 0x84,
+ H2C_D0_SCAN_OFFLOAD_CTRL = 0x85,
+ H2C_D0_SCAN_OFFLOAD_INFO = 0x86,
+ H2C_CHNL_SWITCH_OFFLOAD = 0x87,
+ H2C_AOAC_RSVDPAGE3 = 0x88,
+
+ H2C_RESET_TSF = 0xC0,
+ H2C_MAXID,
+};
+
+#define H2C_RSVDPAGE_LOC_LEN 5
+#define H2C_MEDIA_STATUS_RPT_LEN 3
+#define H2C_KEEP_ALIVE_CTRL_LEN 2
+#define H2C_DISCON_DECISION_LEN 3
+#define H2C_AP_OFFLOAD_LEN 3
+#define H2C_AP_WOW_GPIO_CTRL_LEN 4
+#define H2C_AP_PS_LEN 2
+#define H2C_PWRMODE_LEN 7
+#define H2C_PSTUNEPARAM_LEN 4
+#define H2C_MACID_CFG_LEN 7
+#define H2C_BTMP_OPER_LEN 4
+#define H2C_WOWLAN_LEN 4
+#define H2C_REMOTE_WAKE_CTRL_LEN 3
+#define H2C_AOAC_GLOBAL_INFO_LEN 2
+#define H2C_AOAC_RSVDPAGE_LOC_LEN 7
+#define H2C_SCAN_OFFLOAD_CTRL_LEN 4
+#define H2C_BT_FW_PATCH_LEN 6
+#define H2C_RSSI_SETTING_LEN 4
+#define H2C_AP_REQ_TXRPT_LEN 2
+#define H2C_FORCE_BT_TXPWR_LEN 3
+#define H2C_BCN_RSVDPAGE_LEN 5
+#define H2C_PROBERSP_RSVDPAGE_LEN 5
+
+#ifdef CONFIG_WOWLAN
+#define eqMacAddr(a, b) (((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0)
+#define cpMacAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3], (des)[4]=(src)[4], (des)[5]=(src)[5])
+#define cpIpAddr(des, src) ((des)[0]=(src)[0], (des)[1]=(src)[1], (des)[2]=(src)[2], (des)[3]=(src)[3])
+
+/* */
+/* ARP packet */
+/* */
+/* LLC Header */
+#define GET_ARP_PKT_LLC_TYPE(__pHeader) ReadEF2Byte(((u8 *)(__pHeader)) + 6)
+
+/* ARP element */
+#define GET_ARP_PKT_OPERATION(__pHeader) ReadEF2Byte(((u8 *)(__pHeader)) + 6)
+#define GET_ARP_PKT_SENDER_MAC_ADDR(__pHeader, _val) cpMacAddr((u8 *)(_val), ((u8 *)(__pHeader))+8)
+#define GET_ARP_PKT_SENDER_IP_ADDR(__pHeader, _val) cpIpAddr((u8 *)(_val), ((u8 *)(__pHeader))+14)
+#define GET_ARP_PKT_TARGET_MAC_ADDR(__pHeader, _val) cpMacAddr((u8 *)(_val), ((u8 *)(__pHeader))+18)
+#define GET_ARP_PKT_TARGET_IP_ADDR(__pHeader, _val) cpIpAddr((u8 *)(_val), ((u8 *)(__pHeader))+24)
+
+#define SET_ARP_PKT_HW(__pHeader, __Value) WRITEEF2BYTE(((u8 *)(__pHeader)) + 0, __Value)
+#define SET_ARP_PKT_PROTOCOL(__pHeader, __Value) WRITEEF2BYTE(((u8 *)(__pHeader)) + 2, __Value)
+#define SET_ARP_PKT_HW_ADDR_LEN(__pHeader, __Value) WRITEEF1BYTE(((u8 *)(__pHeader)) + 4, __Value)
+#define SET_ARP_PKT_PROTOCOL_ADDR_LEN(__pHeader, __Value) WRITEEF1BYTE(((u8 *)(__pHeader)) + 5, __Value)
+#define SET_ARP_PKT_OPERATION(__pHeader, __Value) WRITEEF2BYTE(((u8 *)(__pHeader)) + 6, __Value)
+#define SET_ARP_PKT_SENDER_MAC_ADDR(__pHeader, _val) cpMacAddr(((u8 *)(__pHeader))+8, (u8 *)(_val))
+#define SET_ARP_PKT_SENDER_IP_ADDR(__pHeader, _val) cpIpAddr(((u8 *)(__pHeader))+14, (u8 *)(_val))
+#define SET_ARP_PKT_TARGET_MAC_ADDR(__pHeader, _val) cpMacAddr(((u8 *)(__pHeader))+18, (u8 *)(_val))
+#define SET_ARP_PKT_TARGET_IP_ADDR(__pHeader, _val) cpIpAddr(((u8 *)(__pHeader))+24, (u8 *)(_val))
+
+#define FW_WOWLAN_FUN_EN BIT(0)
+#define FW_WOWLAN_PATTERN_MATCH BIT(1)
+#define FW_WOWLAN_MAGIC_PKT BIT(2)
+#define FW_WOWLAN_UNICAST BIT(3)
+#define FW_WOWLAN_ALL_PKT_DROP BIT(4)
+#define FW_WOWLAN_GPIO_ACTIVE BIT(5)
+#define FW_WOWLAN_REKEY_WAKEUP BIT(6)
+#define FW_WOWLAN_DEAUTH_WAKEUP BIT(7)
+
+#define FW_WOWLAN_GPIO_WAKEUP_EN BIT(0)
+#define FW_FW_PARSE_MAGIC_PKT BIT(1)
+
+#define FW_REMOTE_WAKE_CTRL_EN BIT(0)
+#define FW_REALWOWLAN_EN BIT(5)
+
+#define FW_WOWLAN_KEEP_ALIVE_EN BIT(0)
+#define FW_ADOPT_USER BIT(1)
+#define FW_WOWLAN_KEEP_ALIVE_PKT_TYPE BIT(2)
+
+#define FW_REMOTE_WAKE_CTRL_EN BIT(0)
+#define FW_ARP_EN BIT(1)
+#define FW_REALWOWLAN_EN BIT(5)
+#define FW_WOW_FW_UNICAST_EN BIT(7)
+
+#endif /* CONFIG_WOWLAN */
+
+/* _RSVDPAGE_LOC_CMD_0x00 */
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__pH2CCmd, __Value)SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+4, 0, 8, __Value)
+
+/* _MEDIA_STATUS_RPT_PARM_CMD_0x01 */
+#define SET_H2CCMD_MSRRPT_PARM_OPMODE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_MSRRPT_PARM_MACID(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+#define SET_H2CCMD_MSRRPT_PARM_MACID_END(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+2, 0, 8, __Value)
+
+/* _KEEP_ALIVE_CMD_0x03 */
+#define SET_H2CCMD_KEEPALIVE_PARM_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_KEEPALIVE_PARM_ADOPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_KEEPALIVE_PARM_PKT_TYPE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_H2CCMD_KEEPALIVE_PARM_CHECK_PERIOD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+
+/* _DISCONNECT_DECISION_CMD_0x04 */
+#define SET_H2CCMD_DISCONDECISION_PARM_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_DISCONDECISION_PARM_ADOPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_DISCONDECISION_PARM_CHECK_PERIOD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+#define SET_H2CCMD_DISCONDECISION_PARM_TRY_PKT_NUM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+2, 0, 8, __Value)
+
+#ifdef CONFIG_AP_WOWLAN
+/* _AP_Offload 0x08 */
+#define SET_H2CCMD_AP_WOWLAN_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+/* _BCN_RsvdPage 0x09 */
+#define SET_H2CCMD_AP_WOWLAN_RSVDPAGE_LOC_BCN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+/* _Probersp_RsvdPage 0x0a */
+#define SET_H2CCMD_AP_WOWLAN_RSVDPAGE_LOC_ProbeRsp(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+/* _Probersp_RsvdPage 0x13 */
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_INDEX(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 4, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_C2H_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_PLUS(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_HIGH_ACTIVE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_DURATION(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AP_WOW_GPIO_CTRL_C2H_DURATION(__pH2CCmd, __Value)SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+/* _AP_PS 0x26 */
+#define SET_H2CCMD_AP_WOW_PS_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_AP_WOW_PS_32K_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_AP_WOW_PS_RF(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_H2CCMD_AP_WOW_PS_DURATION(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#endif
+
+/* _WoWLAN PARAM_CMD_0x80 */
+#define SET_H2CCMD_WOWLAN_FUNC_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#define SET_H2CCMD_WOWLAN_ALL_PKT_DROP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_H2CCMD_WOWLAN_GPIO_ACTIVE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value)
+#define SET_H2CCMD_WOWLAN_REKEY_WAKE_UP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value)
+#define SET_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_H2CCMD_WOWLAN_GPIONUM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 7, __Value)
+#define SET_H2CCMD_WOWLAN_DATAPIN_WAKE_UP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 7, 1, __Value)
+#define SET_H2CCMD_WOWLAN_GPIO_DURATION(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+/* define SET_H2CCMD_WOWLAN_GPIO_PULSE_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 1, __Value) */
+#define SET_H2CCMD_WOWLAN_GPIO_PULSE_COUNT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+
+/* _REMOTE_WAKEUP_CMD_0x81 */
+#define SET_H2CCMD_REMOTE_WAKECTRL_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_FW_UNICAST_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value)
+#define SET_H2CCMD_REMOTE_WAKE_CTRL_ARP_ACTION(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 1, __Value)
+
+/* AOAC_GLOBAL_INFO_0x82 */
+#define SET_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+
+/* AOAC_RSVDPAGE_LOC_0x83 */
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd), 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+4, 0, 8, __Value)
+#ifdef CONFIG_GTK_OL
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+5, 0, 8, __Value)
+#endif /* CONFIG_GTK_OL */
+#ifdef CONFIG_PNO_SUPPORT
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_NLO_INFO(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd), 0, 8, __Value)
+#endif
+
+#ifdef CONFIG_PNO_SUPPORT
+/* D0_Scan_Offload_Info_0x86 */
+#define SET_H2CCMD_AOAC_NLO_FUN_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd), 3, 1, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_PROBE_PACKET(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_SCAN_INFO(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_H2CCMD_AOAC_RSVDPAGE_LOC_SSID_INFO(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+#endif /* CONFIG_PNO_SUPPORT */
+
+/* */
+/* Structure -------------------------------------------------- */
+/* */
+typedef struct _RSVDPAGE_LOC {
+ u8 LocProbeRsp;
+ u8 LocPsPoll;
+ u8 LocNullData;
+ u8 LocQosNull;
+ u8 LocBTQosNull;
+#ifdef CONFIG_WOWLAN
+ u8 LocRemoteCtrlInfo;
+ u8 LocArpRsp;
+ u8 LocNbrAdv;
+ u8 LocGTKRsp;
+ u8 LocGTKInfo;
+ u8 LocProbeReq;
+ u8 LocNetList;
+#ifdef CONFIG_GTK_OL
+ u8 LocGTKEXTMEM;
+#endif /* CONFIG_GTK_OL */
+#ifdef CONFIG_PNO_SUPPORT
+ u8 LocPNOInfo;
+ u8 LocScanInfo;
+ u8 LocSSIDInfo;
+ u8 LocProbePacket;
+#endif /* CONFIG_PNO_SUPPORT */
+#endif /* CONFIG_WOWLAN */
+#ifdef CONFIG_AP_WOWLAN
+ u8 LocApOffloadBCN;
+#endif /* CONFIG_AP_WOWLAN */
+} RSVDPAGE_LOC, *PRSVDPAGE_LOC;
+
+#endif
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+void rtw_get_current_ip_address(struct adapter *padapter, u8 *pcurrentip);
+void rtw_get_sec_iv(struct adapter *padapter, u8*pcur_dot11txpn, u8 *StaAddr);
+void rtw_set_sec_pn(struct adapter *padapter);
+#endif
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
new file mode 100644
index 000000000000..bcd81f50581f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -0,0 +1,273 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_COM_PHYCFG_H__
+#define __HAL_COM_PHYCFG_H__
+
+#define PathA 0x0 /* Useless */
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+enum RATE_SECTION {
+ CCK = 0,
+ OFDM,
+ HT_MCS0_MCS7,
+ HT_MCS8_MCS15,
+ HT_MCS16_MCS23,
+ HT_MCS24_MCS31,
+ VHT_1SSMCS0_1SSMCS9,
+ VHT_2SSMCS0_2SSMCS9,
+ VHT_3SSMCS0_3SSMCS9,
+ VHT_4SSMCS0_4SSMCS9,
+};
+
+enum RF_TX_NUM {
+ RF_1TX = 0,
+ RF_2TX,
+ RF_3TX,
+ RF_4TX,
+ RF_MAX_TX_NUM,
+ RF_TX_NUM_NONIMPLEMENT,
+};
+
+#define MAX_POWER_INDEX 0x3F
+
+enum _REGULATION_TXPWR_LMT {
+ TXPWR_LMT_FCC = 0,
+ TXPWR_LMT_MKK,
+ TXPWR_LMT_ETSI,
+ TXPWR_LMT_WW,
+ TXPWR_LMT_MAX_REGULATION_NUM,
+};
+
+/*------------------------------Define structure----------------------------*/
+struct bb_register_def {
+ u32 rfintfs; /* set software control: */
+ /* 0x870~0x877[8 bytes] */
+
+ u32 rfintfo; /* output data: */
+ /* 0x860~0x86f [16 bytes] */
+
+ u32 rfintfe; /* output enable: */
+ /* 0x860~0x86f [16 bytes] */
+
+ u32 rf3wireOffset; /* LSSI data: */
+ /* 0x840~0x84f [16 bytes] */
+
+ u32 rfHSSIPara2; /* wire parameter control2 : */
+ /* 0x824~0x827, 0x82c~0x82f,
+ * 0x834~0x837, 0x83c~0x83f
+ */
+ u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
+ /* 0x8a0~0x8af [16 bytes] */
+
+ u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode
+ * 0x8b8-8bc for Path A and B */
+
+};
+
+u8
+PHY_GetTxPowerByRateBase(
+struct adapter * Adapter,
+u8 Band,
+u8 RfPath,
+u8 TxNum,
+enum RATE_SECTION RateSection
+ );
+
+u8
+PHY_GetRateSectionIndexOfTxPowerByRate(
+struct adapter *padapter,
+u32 RegAddr,
+u32 BitMask
+ );
+
+void
+PHY_GetRateValuesOfTxPowerByRate(
+struct adapter *padapter,
+u32 RegAddr,
+u32 BitMask,
+u32 Value,
+ u8* RateIndex,
+ s8* PwrByRateVal,
+ u8* RateNum
+ );
+
+u8
+PHY_GetRateIndexOfTxPowerByRate(
+u8 Rate
+ );
+
+void
+PHY_SetTxPowerIndexByRateSection(
+struct adapter * padapter,
+u8 RFPath,
+u8 Channel,
+u8 RateSection
+ );
+
+s8
+PHY_GetTxPowerByRate(
+struct adapter *padapter,
+u8 Band,
+u8 RFPath,
+u8 TxNum,
+u8 RateIndex
+ );
+
+void
+PHY_SetTxPowerByRate(
+struct adapter *padapter,
+u8 Band,
+u8 RFPath,
+u8 TxNum,
+u8 Rate,
+s8 Value
+ );
+
+void
+PHY_SetTxPowerLevelByPath(
+struct adapter *Adapter,
+u8 channel,
+u8 path
+ );
+
+void
+PHY_SetTxPowerIndexByRateArray(
+struct adapter * padapter,
+u8 RFPath,
+enum CHANNEL_WIDTH BandWidth,
+u8 Channel,
+u8* Rates,
+u8 RateArraySize
+ );
+
+void
+PHY_InitTxPowerByRate(
+struct adapter *padapter
+ );
+
+void
+PHY_StoreTxPowerByRate(
+struct adapter *padapter,
+u32 Band,
+u32 RfPath,
+u32 TxNum,
+u32 RegAddr,
+u32 BitMask,
+u32 Data
+ );
+
+void
+PHY_TxPowerByRateConfiguration(
+ struct adapter * padapter
+ );
+
+u8
+PHY_GetTxPowerIndexBase(
+struct adapter * padapter,
+u8 RFPath,
+u8 Rate,
+enum CHANNEL_WIDTH BandWidth,
+u8 Channel,
+ bool *bIn24G
+ );
+
+s8 PHY_GetTxPowerLimit (struct adapter *adapter, u32 RegPwrTblSel,
+ enum BAND_TYPE Band, enum CHANNEL_WIDTH Bandwidth,
+u8 RfPath,
+u8 DataRate,
+u8 Channel
+ );
+
+void
+PHY_SetTxPowerLimit(
+struct adapter * Adapter,
+u8 *Regulation,
+u8 *Band,
+u8 *Bandwidth,
+u8 *RateSection,
+u8 *RfPath,
+u8 *Channel,
+u8 *PowerLimit
+ );
+
+void
+PHY_ConvertTxPowerLimitToPowerIndex(
+struct adapter * Adapter
+ );
+
+void
+PHY_InitTxPowerLimit(
+struct adapter * Adapter
+ );
+
+s8
+PHY_GetTxPowerTrackingOffset(
+ struct adapter *padapter,
+ u8 Rate,
+ u8 RFPath
+ );
+
+u8
+PHY_GetTxPowerIndex(
+struct adapter * padapter,
+u8 RFPath,
+u8 Rate,
+enum CHANNEL_WIDTH BandWidth,
+u8 Channel
+ );
+
+void
+PHY_SetTxPowerIndex(
+struct adapter * padapter,
+u32 PowerIndex,
+u8 RFPath,
+u8 Rate
+ );
+
+void
+Hal_ChannelPlanToRegulation(
+struct adapter * Adapter,
+u16 ChannelPlan
+ );
+
+#define MAX_PARA_FILE_BUF_LEN 25600
+
+#define LOAD_MAC_PARA_FILE BIT0
+#define LOAD_BB_PARA_FILE BIT1
+#define LOAD_BB_PG_PARA_FILE BIT2
+#define LOAD_BB_MP_PARA_FILE BIT3
+#define LOAD_RF_PARA_FILE BIT4
+#define LOAD_RF_TXPWR_TRACK_PARA_FILE BIT5
+#define LOAD_RF_TXPWR_LMT_PARA_FILE BIT6
+
+int phy_ConfigMACWithParaFile(struct adapter *Adapter, char*pFileName);
+
+int phy_ConfigBBWithParaFile(struct adapter *Adapter, char*pFileName, u32 ConfigType);
+
+int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char*pFileName);
+
+int phy_ConfigBBWithMpParaFile(struct adapter *Adapter, char*pFileName);
+
+int PHY_ConfigRFWithParaFile(struct adapter *Adapter, char*pFileName, u8 eRFPath);
+
+int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char*pFileName);
+
+int PHY_ConfigRFWithPowerLimitTableParaFile(struct adapter *Adapter, char*pFileName);
+
+void phy_free_filebuf(struct adapter *padapter);
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_com_reg.h b/drivers/staging/rtl8723bs/include/hal_com_reg.h
new file mode 100644
index 000000000000..fbf33dba1757
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_com_reg.h
@@ -0,0 +1,1725 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_COMMON_REG_H__
+#define __HAL_COMMON_REG_H__
+
+
+#define MAC_ADDR_LEN 6
+
+#define HAL_NAV_UPPER_UNIT 128 /* micro-second */
+
+/* 8188E PKT_BUFF_ACCESS_CTRL value */
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+/* */
+/* */
+/* */
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_SYS_EEPROM_CTRL 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS0_CTRL_6 0x0016
+#define REG_POWER_OFF_IN_PROCESS 0x0017
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_MAC_PHY_CTRL 0x002c /* for 92d, DMDP, SMSP, DMSP contrl */
+#define REG_APE_PLL_CTRL_EXT 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS Multi-Function control source. */
+#define REG_GSSR 0x006c
+#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
+#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081 /* RTL8188E */
+#define REG_MCUTSTCFG 0x0084
+#define REG_FDHM0 0x0088
+#define REG_HOST_SUSP_CNT 0x00BC /* RTL8192C Host suspend counter on FPGA platform */
+#define REG_SYSTEM_ON_CTRL 0x00CC /* For 8723AE Reset after S3 */
+#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection for RTL8723 */
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
+#define REG_TYPE_ID 0x00FC
+
+/* */
+/* 2010/12/29 MH Add for 92D */
+/* */
+#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
+
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+#define REG_HIMR 0x0120
+#define REG_HISR 0x0124
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_FTIMR 0x0138
+#define REG_FTISR 0x013C /* RTL8192C */
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194 /* RTL8188E */
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_MCUTST_1 0x01c0
+#define REG_MCUTST_WOWLAN 0x01C7 /* Defined after 8188E series. */
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+#define REG_LLT_INIT 0x01E0
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+#define REG_AUTO_LLT 0x0224
+
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+/* */
+/* */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* */
+/* */
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304 /* Interrupt Migration */
+#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descriptor Address */
+#define REG_HQ_DESA 0x0310 /* TX High Queue Descriptor Address */
+#define REG_MGQ_DESA 0x0318 /* TX Manage Queue Descriptor Address */
+#define REG_VOQ_DESA 0x0320 /* TX VO Queue Descriptor Address */
+#define REG_VIQ_DESA 0x0328 /* TX VI Queue Descriptor Address */
+#define REG_BEQ_DESA 0x0330 /* TX BE Queue Descriptor Address */
+#define REG_BKQ_DESA 0x0338 /* TX BK Queue Descriptor Address */
+#define REG_RX_DESA 0x0340 /* RX Queue Descriptor Address */
+/* sherry added for DBI Read/Write 20091126 */
+#define REG_DBI_WDATA 0x0348 /* Backdoor REG for Access Configuration */
+#define REG_DBI_RDATA 0x034C /* Backdoor REG for Access Configuration */
+#define REG_DBI_CTRL 0x0350 /* Backdoor REG for Access Configuration */
+#define REG_DBI_FLAG 0x0352 /* Backdoor REG for Access Configuration */
+#define REG_MDIO 0x0354 /* MDIO for Access PCIE PHY */
+#define REG_DBG_SEL 0x0360 /* Debug Selection Register */
+#define REG_PCIE_HRPWM 0x0361 /* PCIe RPWM */
+#define REG_PCIE_HCPWM 0x0363 /* PCIe CPWM */
+#define REG_WATCH_DOG 0x0368
+
+/* RTL8723 series ------------------------------- */
+#define REG_PCIE_HISR_EN 0x0394 /* PCIE Local Interrupt Enable Register */
+#define REG_PCIE_HISR 0x03A0
+#define REG_PCIE_HISRE 0x03A4
+#define REG_PCIE_HIMR 0x03A8
+#define REG_PCIE_HIMRE 0x03AC
+
+#define REG_USB_HIMR 0xFE38
+#define REG_USB_HIMRE 0xFE3C
+#define REG_USB_HISR 0xFE78
+#define REG_USB_HISRE 0xFE7C
+
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_BCNQ_BDNY 0x0424
+#define REG_MGQ_BDNY 0x0425
+#define REG_LIFETIME_CTRL 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_BCNQ1_BDNY 0x0457
+
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
+#define REG_STBC_SETTING 0x04C4
+#define REG_QUEUE_CTRL 0x04C6
+#define REG_SINGLE_AMPDU_CTRL 0x04c7
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_RTS_MAX_AGGR_NUM 0x04CB
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x04D0
+#define REG_MACID_SLEEP 0x04D4
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
+#define REG_DUMMY 0x04FC
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_TSFTR_SYN_OFFSET 0x0518
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+/* */
+/* Format for offset 540h-542h: */
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting beacon content before TBTT. */
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding to send the beacon packet. */
+/* [23:20]: Reserved */
+/* Description: */
+/* | */
+/* |<--Setup--|--Hold------------>| */
+/* --------------|---------------------- */
+/* | */
+/* TBTT */
+/* Note: We cannot update beacon content to HW or send any AC packets during the time between Setup and Hold. */
+/* Described by Designer Tim and Bruce, 2011-01-14. */
+/* */
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554 /* The same as REG_MBSSID_BCN_SPACE */
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_USTIME_TSF 0x055C
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_TSFTR1 0x0568 /* HW Port 1 TSF Register */
+#define REG_ATIMWND_1 0x0570
+#define REG_P2P_CTWIN 0x0572 /* 1 Byte long (in unit of TU) */
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+
+#define REG_DMC 0x05F0 /* Dual MAC Co-Existence Register */
+#define REG_SCH_TX_CMD 0x05F8
+
+#define REG_FW_RESET_TSF_CNT_1 0x05FC
+#define REG_FW_RESET_TSF_CNT_0 0x05FD
+#define REG_FW_BCN_DIS_CNT 0x05FE
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_PNO_STATUS 0x0631
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+#define REG_RESP_SIFS_CCK 0x063C /* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_RESP_SIFS_OFDM 0x063E /* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+
+/* RXERR_RPT */
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDMfalse_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCKfalse_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HTfalse_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+/* */
+/* Note: */
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test. The default value is */
+/* always too small, but the WiFi TestPlan test by 25, 000 microseconds of NAV through sending */
+/* CTS in the air. We must update this value greater than 25, 000 microseconds to pass the item. */
+/* The offset of NAV_UPPER in 8192C Spec is incorrect, and the offset should be 0x0652. Commented */
+/* by SD1 Scott. */
+/* By Bruce, 2011-07-18. */
+/* */
+#define REG_NAV_UPPER 0x0652 /* unit of 128 */
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+/* Security */
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_NUM REG_WKFMCAM_CMD
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_BT_COEX_TABLE 0x06C0
+
+/* Hardware Port 2 */
+#define REG_MACID1 0x0700
+#define REG_BSSID1 0x0708
+
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* */
+/* */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+
+/* for 92DU high_Queue low_Queue Normal_Queue select */
+#define REG_USB_High_NORMAL_Queue_Select_MAC0 0xFE44
+/* define REG_USB_LOW_Queue_Select_MAC0 0xFE45 */
+#define REG_USB_High_NORMAL_Queue_Select_MAC1 0xFE47
+/* define REG_USB_LOW_Queue_Select_MAC1 0xFE48 */
+
+/* For test chip */
+#define REG_TEST_USB_TXQS 0xFE48
+#define REG_TEST_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_TEST_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_TEST_SIE_OPTIONAL 0xFE64
+#define REG_TEST_SIE_CHIRP_K 0xFE65
+#define REG_TEST_SIE_PHY 0xFE66 /* 0xFE66~0xFE6B */
+#define REG_TEST_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_TEST_SIE_STRING 0xFE80 /* 0xFE80~0xFEB9 */
+
+
+/* For normal chip */
+#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
+#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
+#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
+#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* 0xFE6D, for RTL8723 only. */
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
+
+
+/* */
+/* */
+/* Redifine 8192C register definition for compatibility */
+/* */
+/* */
+
+/* TODO: use these definition when using REG_xxx naming rule. */
+/* NOTE: DO NOT Remove these definition. Use later. */
+
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+#define EFUSE_TEST REG_EFUSE_TEST /* E-Fuse Test. */
+#define MSR (REG_CR + 2) /* Media Status register */
+/* define ISR REG_HISR */
+
+#define TSFR REG_TSFTR /* Timing Sync Function Timer Register. */
+#define TSFR1 REG_TSFTR1 /* HW Port 1 TSF Register */
+
+#define PBP REG_PBP
+
+/* Redifine MACID register, to compatible prior ICs. */
+#define IDR0 REG_MACID /* MAC ID Register, Offset 0x0050-0x0053 */
+#define IDR4 (REG_MACID + 4) /* MAC ID Register, Offset 0x0054-0x0055 */
+
+
+/* */
+/* 9. Security Control Registers (Offset:) */
+/* */
+#define RWCAM REG_CAMCMD /* IN 8190 Data Sheet is called CAMcmd */
+#define WCAMI REG_CAMWRITE /* Software write CAM input content */
+#define RCAMO REG_CAMREAD /* Software read/write CAM config */
+#define CAMDBG REG_CAMDBG
+#define SECR REG_SECCFG /* Security Configuration Register */
+
+/* Unused register */
+#define UnusedRegister 0x1BF
+#define DCAM UnusedRegister
+#define PSR UnusedRegister
+#define BBAddr UnusedRegister
+#define PhyDataR UnusedRegister
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+/* */
+/* 8192C Cmd9346CR bits (Offset 0xA, 16bit) */
+/* */
+#define CmdEEPROM_En BIT5 /* EEPROM enable when set 1 */
+#define CmdEERPOMSEL BIT4 /* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
+#define Cmd9346CR_9356SEL BIT4
+
+/* */
+/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
+/* */
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT5
+
+/* */
+/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
+/* */
+#define GPIO_IN REG_GPIO_PIN_CTRL /* GPIO pins input value */
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) /* GPIO pins output value */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* */
+/* 8811A GPIO PIN Control Register (offset 0x60, 4 byte) */
+/* */
+#define GPIO_IN_8811A REG_GPIO_PIN_CTRL_2 /* GPIO pins input value */
+#define GPIO_OUT_8811A (REG_GPIO_PIN_CTRL_2+1) /* GPIO pins output value */
+#define GPIO_IO_SEL_8811A (REG_GPIO_PIN_CTRL_2+2) /* GPIO pins output enable when a bit is set to "1"; otherwise, input is configured. */
+#define GPIO_MOD_8811A (REG_GPIO_PIN_CTRL_2+3)
+
+/* */
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+/* */
+#define HSIMR_GPIO12_0_INT_EN BIT0
+#define HSIMR_SPS_OCP_INT_EN BIT5
+#define HSIMR_RON_INT_EN BIT6
+#define HSIMR_PDN_INT_EN BIT7
+#define HSIMR_GPIO9_INT_EN BIT25
+
+/* */
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+/* */
+#define HSISR_GPIO12_0_INT BIT0
+#define HSISR_SPS_OCP_INT BIT5
+#define HSISR_RON_INT BIT6
+#define HSISR_PDNINT BIT7
+#define HSISR_GPIO9_INT BIT25
+
+/* */
+/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+/* */
+/*
+Network Type
+00: No link
+01: Link in ad hoc network
+10: Link in infrastructure network
+11: AP mode
+Default: 00b.
+*/
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+/* */
+/* USB INTR CONTENT */
+/* */
+#define USB_C2H_CMDID_OFFSET 0
+#define USB_C2H_SEQ_OFFSET 1
+#define USB_C2H_EVENT_OFFSET 2
+#define USB_INTR_CPWM_OFFSET 16
+#define USB_INTR_CONTENT_C2H_OFFSET 0
+#define USB_INTR_CONTENT_CPWM1_OFFSET 16
+#define USB_INTR_CONTENT_CPWM2_OFFSET 20
+#define USB_INTR_CONTENT_HISR_OFFSET 48
+#define USB_INTR_CONTENT_HISRE_OFFSET 52
+#define USB_INTR_CONTENT_LENGTH 56
+
+/* */
+/* Response Rate Set Register (offset 0x440, 24bits) */
+/* */
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+
+#define RRSR_CCK_RATES (RRSR_11M|RRSR_5_5M|RRSR_2M|RRSR_1M)
+#define RRSR_OFDM_RATES (RRSR_54M|RRSR_48M|RRSR_36M|RRSR_24M|RRSR_18M|RRSR_12M|RRSR_9M|RRSR_6M)
+
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT0
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT1
+#define HAL92C_WOL_DISASSOC_EVENT BIT2
+#define HAL92C_WOL_DEAUTH_EVENT BIT3
+#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT4
+
+/* */
+/* Rate Definition */
+/* */
+/* CCK */
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+/* OFDM */
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+/* MCS 1 Spatial Stream */
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+/* MCS 2 Spatial Stream */
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+/* CCK */
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+/* OFDM */
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+/* MCS 1 Spatial Stream */
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+/* MCS 2 Spatial Stream */
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+
+/* ALL CCK Rate */
+#define RATE_ALL_CCK RATR_1M|RATR_2M|RATR_55M|RATR_11M
+#define RATE_ALL_OFDM_AG RATR_6M|RATR_9M|RATR_12M|RATR_18M|RATR_24M|\
+ RATR_36M|RATR_48M|RATR_54M
+#define RATE_ALL_OFDM_1SS RATR_MCS0|RATR_MCS1|RATR_MCS2|RATR_MCS3 |\
+ RATR_MCS4|RATR_MCS5|RATR_MCS6 |RATR_MCS7
+#define RATE_ALL_OFDM_2SS RATR_MCS8|RATR_MCS9 |RATR_MCS10|RATR_MCS11|\
+ RATR_MCS12|RATR_MCS13|RATR_MCS14|RATR_MCS15
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+/* Only use CCK 1M rate for ACK */
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+#define RATE_RRSR_WITHOUT_CCK 0xFFFF0
+
+/* */
+/* BW_OPMODE bits (Offset 0x603, 8bit) */
+/* */
+#define BW_OPMODE_20MHZ BIT2
+#define BW_OPMODE_5G BIT1
+
+/* */
+/* CAM Config Setting (offset 0x680, 1 byte) */
+/* */
+#define CAM_VALID BIT15
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT5
+
+#define CAM_CONTENT_COUNT 8
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+#define CAM_SMS4 0x6
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_CONFIG_USEDK true
+#define CAM_CONFIG_NO_USEDK false
+
+#define CAM_WRITE BIT16
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT31
+
+/* */
+/* 10. Power Save Control Registers */
+/* */
+#define WOW_PMEN BIT0 /* Power management Enable. */
+#define WOW_WOMEN BIT1 /* WoW function on or off. */
+#define WOW_MAGIC BIT2 /* Magic packet */
+#define WOW_UWF BIT3 /* Unicast Wakeup frame. */
+
+/* */
+/* 12. Host Interrupt Status Registers */
+/* */
+/* */
+/* 8190 IMR/ISR bits */
+/* */
+#define IMR8190_DISABLED 0x0
+#define IMR_DISABLED 0x0
+/* IMR DW0 Bit 0-31 */
+#define IMR_BCNDMAINT6 BIT31 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5 BIT30 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4 BIT29 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3 BIT28 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2 BIT27 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1 BIT26 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK8 BIT25 /* Beacon Queue DMA OK Interrup 8 */
+#define IMR_BCNDOK7 BIT24 /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6 BIT23 /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5 BIT22 /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4 BIT21 /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3 BIT20 /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2 BIT19 /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1 BIT18 /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_TIMEOUT2 BIT17 /* Timeout interrupt 2 */
+#define IMR_TIMEOUT1 BIT16 /* Timeout interrupt 1 */
+#define IMR_TXFOVW BIT15 /* Transmit FIFO Overflow */
+#define IMR_PSTIMEOUT BIT14 /* Power save time out interrupt */
+#define IMR_BcnInt BIT13 /* Beacon DMA Interrupt 0 */
+#define IMR_RXFOVW BIT12 /* Receive FIFO Overflow */
+#define IMR_RDU BIT11 /* Receive Descriptor Unavailable */
+#define IMR_ATIMEND BIT10 /* For 92C, ATIM Window End Interrupt. For 8723 and later ICs, it also means P2P CTWin End interrupt. */
+#define IMR_BDOK BIT9 /* Beacon Queue DMA OK Interrup */
+#define IMR_HIGHDOK BIT8 /* High Queue DMA OK Interrupt */
+#define IMR_TBDOK BIT7 /* Transmit Beacon OK interrup */
+#define IMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
+#define IMR_TBDER BIT5 /* For 92C, Transmit Beacon Error Interrupt */
+#define IMR_BKDOK BIT4 /* AC_BK DMA OK Interrupt */
+#define IMR_BEDOK BIT3 /* AC_BE DMA OK Interrupt */
+#define IMR_VIDOK BIT2 /* AC_VI DMA OK Interrupt */
+#define IMR_VODOK BIT1 /* AC_VO DMA Interrupt */
+#define IMR_ROK BIT0 /* Receive DMA OK Interrupt */
+
+/* 13. Host Interrupt Status Extension Register (Offset: 0x012C-012Eh) */
+#define IMR_TSF_BIT32_TOGGLE BIT15
+#define IMR_BcnInt_E BIT12
+#define IMR_TXERR BIT11
+#define IMR_RXERR BIT10
+#define IMR_C2HCMD BIT9
+#define IMR_CPWM BIT8
+/* RSVD [2-7] */
+#define IMR_OCPINT BIT1
+#define IMR_WLANOFF BIT0
+
+/* */
+/* 8723E series PCIE Host IMR/ISR bit */
+/* */
+/* IMR DW0 Bit 0-31 */
+#define PHIMR_TIMEOUT2 BIT31
+#define PHIMR_TIMEOUT1 BIT30
+#define PHIMR_PSTIMEOUT BIT29
+#define PHIMR_GTINT4 BIT28
+#define PHIMR_GTINT3 BIT27
+#define PHIMR_TXBCNERR BIT26
+#define PHIMR_TXBCNOK BIT25
+#define PHIMR_TSF_BIT32_TOGGLE BIT24
+#define PHIMR_BCNDMAINT3 BIT23
+#define PHIMR_BCNDMAINT2 BIT22
+#define PHIMR_BCNDMAINT1 BIT21
+#define PHIMR_BCNDMAINT0 BIT20
+#define PHIMR_BCNDOK3 BIT19
+#define PHIMR_BCNDOK2 BIT18
+#define PHIMR_BCNDOK1 BIT17
+#define PHIMR_BCNDOK0 BIT16
+#define PHIMR_HSISR_IND_ON BIT15
+#define PHIMR_BCNDMAINT_E BIT14
+#define PHIMR_ATIMEND_E BIT13
+#define PHIMR_ATIM_CTW_END BIT12
+#define PHIMR_HISRE_IND BIT11 /* RO. HISRE Indicator (HISRE & HIMRE is true, this bit is set to 1) */
+#define PHIMR_C2HCMD BIT10
+#define PHIMR_CPWM2 BIT9
+#define PHIMR_CPWM BIT8
+#define PHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
+#define PHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
+#define PHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
+#define PHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
+#define PHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
+#define PHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
+#define PHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
+#define PHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
+
+/* PCIE Host Interrupt Status Extension bit */
+#define PHIMR_BCNDMAINT7 BIT23
+#define PHIMR_BCNDMAINT6 BIT22
+#define PHIMR_BCNDMAINT5 BIT21
+#define PHIMR_BCNDMAINT4 BIT20
+#define PHIMR_BCNDOK7 BIT19
+#define PHIMR_BCNDOK6 BIT18
+#define PHIMR_BCNDOK5 BIT17
+#define PHIMR_BCNDOK4 BIT16
+/* bit12 15: RSVD */
+#define PHIMR_TXERR BIT11
+#define PHIMR_RXERR BIT10
+#define PHIMR_TXFOVW BIT9
+#define PHIMR_RXFOVW BIT8
+/* bit2-7: RSVD */
+#define PHIMR_OCPINT BIT1
+/* bit0: RSVD */
+
+#define UHIMR_TIMEOUT2 BIT31
+#define UHIMR_TIMEOUT1 BIT30
+#define UHIMR_PSTIMEOUT BIT29
+#define UHIMR_GTINT4 BIT28
+#define UHIMR_GTINT3 BIT27
+#define UHIMR_TXBCNERR BIT26
+#define UHIMR_TXBCNOK BIT25
+#define UHIMR_TSF_BIT32_TOGGLE BIT24
+#define UHIMR_BCNDMAINT3 BIT23
+#define UHIMR_BCNDMAINT2 BIT22
+#define UHIMR_BCNDMAINT1 BIT21
+#define UHIMR_BCNDMAINT0 BIT20
+#define UHIMR_BCNDOK3 BIT19
+#define UHIMR_BCNDOK2 BIT18
+#define UHIMR_BCNDOK1 BIT17
+#define UHIMR_BCNDOK0 BIT16
+#define UHIMR_HSISR_IND BIT15
+#define UHIMR_BCNDMAINT_E BIT14
+/* RSVD BIT13 */
+#define UHIMR_CTW_END BIT12
+/* RSVD BIT11 */
+#define UHIMR_C2HCMD BIT10
+#define UHIMR_CPWM2 BIT9
+#define UHIMR_CPWM BIT8
+#define UHIMR_HIGHDOK BIT7 /* High Queue DMA OK Interrupt */
+#define UHIMR_MGNTDOK BIT6 /* Management Queue DMA OK Interrupt */
+#define UHIMR_BKDOK BIT5 /* AC_BK DMA OK Interrupt */
+#define UHIMR_BEDOK BIT4 /* AC_BE DMA OK Interrupt */
+#define UHIMR_VIDOK BIT3 /* AC_VI DMA OK Interrupt */
+#define UHIMR_VODOK BIT2 /* AC_VO DMA Interrupt */
+#define UHIMR_RDU BIT1 /* Receive Descriptor Unavailable */
+#define UHIMR_ROK BIT0 /* Receive DMA OK Interrupt */
+
+/* USB Host Interrupt Status Extension bit */
+#define UHIMR_BCNDMAINT7 BIT23
+#define UHIMR_BCNDMAINT6 BIT22
+#define UHIMR_BCNDMAINT5 BIT21
+#define UHIMR_BCNDMAINT4 BIT20
+#define UHIMR_BCNDOK7 BIT19
+#define UHIMR_BCNDOK6 BIT18
+#define UHIMR_BCNDOK5 BIT17
+#define UHIMR_BCNDOK4 BIT16
+/* bit14-15: RSVD */
+#define UHIMR_ATIMEND_E BIT13
+#define UHIMR_ATIMEND BIT12
+#define UHIMR_TXERR BIT11
+#define UHIMR_RXERR BIT10
+#define UHIMR_TXFOVW BIT9
+#define UHIMR_RXFOVW BIT8
+/* bit2-7: RSVD */
+#define UHIMR_OCPINT BIT1
+/* bit0: RSVD */
+
+
+#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
+#define HAL_NIC_UNPLUG_PCI_ISR 0xEAEAEAEA /* The value when the NIC is unplugged for PCI in PCI interrupt (page 3). */
+
+/* */
+/* 8188 IMR/ISR bits */
+/* */
+#define IMR_DISABLED_88E 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK_88E BIT30 /* TXRPT interrupt when CCX bit of the packet is set */
+#define IMR_PSTIMEOUT_88E BIT29 /* Power Save Time Out Interrupt */
+#define IMR_GTINT4_88E BIT28 /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3_88E BIT27 /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TBDER_88E BIT26 /* Transmit Beacon0 Error */
+#define IMR_TBDOK_88E BIT25 /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE_88E BIT24 /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0_88E BIT20 /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDERR0_88E BIT16 /* Beacon Queue DMA Error 0 */
+#define IMR_HSISR_IND_ON_INT_88E BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E_88E BIT14 /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND_88E BIT12 /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT_88E BIT11 /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
+#define IMR_C2HCMD_88E BIT10 /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2_88E BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM_88E BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK_88E BIT7 /* High Queue DMA OK */
+#define IMR_MGNTDOK_88E BIT6 /* Management Queue DMA OK */
+#define IMR_BKDOK_88E BIT5 /* AC_BK DMA OK */
+#define IMR_BEDOK_88E BIT4 /* AC_BE DMA OK */
+#define IMR_VIDOK_88E BIT3 /* AC_VI DMA OK */
+#define IMR_VODOK_88E BIT2 /* AC_VO DMA OK */
+#define IMR_RDU_88E BIT1 /* Rx Descriptor Unavailable */
+#define IMR_ROK_88E BIT0 /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7_88E BIT27 /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6_88E BIT26 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5_88E BIT25 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4_88E BIT24 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3_88E BIT23 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2_88E BIT22 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1_88E BIT21 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7_88E BIT20 /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6_88E BIT19 /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5_88E BIT18 /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4_88E BIT17 /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3_88E BIT16 /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2_88E BIT15 /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1_88E BIT14 /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E_88E BIT13 /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR_88E BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
+#define IMR_RXERR_88E BIT10 /* Rx Error Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW_88E BIT9 /* Transmit FIFO Overflow */
+#define IMR_RXFOVW_88E BIT8 /* Receive FIFO Overflow */
+
+/*===================================================================
+=====================================================================
+Here the register defines are for 92C. When the define is as same with 92C,
+we will use the 92C's define for the consistency
+So the following defines for 92C is not entire!!!!!!
+=====================================================================
+=====================================================================*/
+/*
+Based on Datasheet V33---090401
+Register Summary
+Current IOREG MAP
+0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+*/
+ /* */
+ /* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
+ /* */
+/* Note: */
+/* The the bits of stoping AC(VO/VI/BE/BK) queue in datasheet RTL8192S/RTL8192C are wrong, */
+/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2, and BK - Bit3. */
+/* 8723 and 88E may be not correct either in the eralier version. Confirmed with DD Tim. */
+/* By Bruce, 2011-09-22. */
+#define StopBecon BIT6
+#define StopHigh BIT5
+#define StopMgt BIT4
+#define StopBK BIT3
+#define StopBE BIT2
+#define StopVI BIT1
+#define StopVO BIT0
+
+/* */
+/* 8192C (RCR) Receive Configuration Register (Offset 0x608, 32 bits) */
+/* */
+#define RCR_APPFCS BIT31 /* WMAC append FCS after pauload */
+#define RCR_APP_MIC BIT30 /* MACRX will retain the MIC at the bottom of the packet. */
+#define RCR_APP_ICV BIT29 /* MACRX will retain the ICV at the bottom of the packet. */
+#define RCR_APP_PHYST_RXFF BIT28 /* PHY Status is appended before RX packet in RXFF */
+#define RCR_APP_BA_SSN BIT27 /* SSN of previous TXBA is appended as after original RXDESC as the 4-th DW of RXDESC. */
+#define RCR_NONQOS_VHT BIT26 /* Reserved */
+#define RCR_RSVD_BIT25 BIT25 /* Reserved */
+#define RCR_ENMBID BIT24 /* Enable Multiple BssId. Only response ACK to the packets whose DID(A1) matching to the addresses in the MBSSID CAM Entries. */
+#define RCR_LSIGEN BIT23 /* Enable LSIG TXOP Protection function. Search KEYCAM for each rx packet to check if LSIGEN bit is set. */
+#define RCR_MFBEN BIT22 /* Enable immediate MCS Feedback function. When Rx packet with MRQ = 1'b1, then search KEYCAM to find sender's MCS Feedback function and send response. */
+#define RCR_RSVD_BIT21 BIT21 /* Reserved */
+#define RCR_RSVD_BIT20 BIT20 /* Reserved */
+#define RCR_RSVD_BIT19 BIT19 /* Reserved */
+#define RCR_TIM_PARSER_EN BIT18 /* RX Beacon TIM Parser. */
+#define RCR_BM_DATA_EN BIT17 /* Broadcast data packet interrupt enable. */
+#define RCR_UC_DATA_EN BIT16 /* Unicast data packet interrupt enable. */
+#define RCR_RSVD_BIT15 BIT15 /* Reserved */
+#define RCR_HTC_LOC_CTRL BIT14 /* MFC<--HTC = 1 MFC-->HTC = 0 */
+#define RCR_AMF BIT13 /* Accept management type frame */
+#define RCR_ACF BIT12 /* Accept control type frame. Control frames BA, BAR, and PS-Poll (when in AP mode) are not controlled by this bit. They are controlled by ADF. */
+#define RCR_ADF BIT11 /* Accept data type frame. This bit also regulates BA, BAR, and PS-Poll (AP mode only). */
+#define RCR_RSVD_BIT10 BIT10 /* Reserved */
+#define RCR_AICV BIT9 /* Accept ICV error packet */
+#define RCR_ACRC32 BIT8 /* Accept CRC32 error packet */
+#define RCR_CBSSID_BCN BIT7 /* Accept BSSID match packet (Rx beacon, probe rsp) */
+#define RCR_CBSSID_DATA BIT6 /* Accept BSSID match packet (Data) */
+#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match packet */
+#define RCR_APWRMGT BIT5 /* Accept power management packet */
+#define RCR_ADD3 BIT4 /* Accept address 3 match packet */
+#define RCR_AB BIT3 /* Accept broadcast packet */
+#define RCR_AM BIT2 /* Accept multicast packet */
+#define RCR_APM BIT1 /* Accept physical match packet */
+#define RCR_AAP BIT0 /* Accept all unicast packet */
+
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+
+/* 2 SYS_ISO_CTRL */
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+#define PWC_EV12V BIT(15)
+
+
+/* 2 SYS_FUNC_EN */
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_EN_25_1 BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+/* 2 APS_FSMCO */
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+/* 2 SYS_CLKR */
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+
+
+/* 2 9346CR /REG_SYS_EEPROM_CTRL */
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROMSEL BIT(4)
+#define EEPROM_EN BIT(5)
+
+
+/* 2 RF_CTRL */
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+
+/* 2 LDOV12D_CTRL */
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EF_TRPT BIT(7)
+#define EF_CELL_SEL (BIT(8)|BIT(9)) /* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define LDOE25_EN BIT(31)
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+#define EFUSE_BT_SEL_0 0x1
+#define EFUSE_BT_SEL_1 0x2
+#define EFUSE_BT_SEL_2 0x3
+
+
+/* 2 8051FWDL */
+/* 2 MCUFWDL */
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define RAM_DL_SEL BIT(7)
+#define ROM_DLEN BIT(19)
+#define CPRST BIT(23)
+
+
+/* 2 REG_SYS_CFG */
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define SW_OFFLOAD_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define EXT_VENDOR_ID (BIT(18)|BIT(19)) /* Currently only for RTL8723B */
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23) /* RTL ID */
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+#define RF_TYPE_ID BIT(27)
+
+#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
+#define SPS_SEL BIT(24) /* 1:LDO regulator mode; 0:Switching regulator mode */
+
+
+#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
+#define CHIP_VER_RTL_SHIFT 12
+#define EXT_VENDOR_ID_SHIFT 18
+
+/* 2 REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+
+/* 2 Function Enable Registers */
+/* 2 CR */
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+#define CALTMR_EN BIT(10) /* 32k CAL TMR enable */
+
+/* Network type */
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+/* 2 PBP - Page Size Register */
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+
+/* 2 TX/RXDMA */
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+/* For normal driver, 0x10C */
+#define _TXDMA_CMQ_MAP(x) (((x)&0x3) << 16)
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+
+#define QUEUE_EXTRA 0
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+
+/* 2 TRXFF_BNDY */
+
+
+/* 2 LLT_INIT */
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+/* 2 RQPN */
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+#define _NPQ(x) ((x) & 0xFF) /* NOTE: in RQPN_NPQ register */
+#define _EPQ(x) (((x) & 0xFF) << 16) /* NOTE: in RQPN_EPQ register */
+
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+
+/* 2 TDECTL */
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+
+/* 2 TXDMA_OFFSET_CHK */
+#define DROP_DATA_EN BIT(9)
+
+/* 2 AUTO_LLT */
+#define BIT_SHIFT_TXPKTNUM 24
+#define BIT_MASK_TXPKTNUM 0xff
+#define BIT_TXPKTNUM(x) (((x) & BIT_MASK_TXPKTNUM) << BIT_SHIFT_TXPKTNUM)
+
+#define BIT_TDE_DBG_SEL BIT(23)
+#define BIT_AUTO_INIT_LLT BIT(16)
+
+#define BIT_SHIFT_Tx_OQT_free_space 8
+#define BIT_MASK_Tx_OQT_free_space 0xff
+#define BIT_Tx_OQT_free_space(x) (((x) & BIT_MASK_Tx_OQT_free_space) << BIT_SHIFT_Tx_OQT_free_space)
+
+
+/* */
+/* */
+/* 0x0280h ~ 0x028Bh RX DMA Configuration */
+/* */
+/* */
+
+/* 2 REG_RXDMA_CONTROL, 0x0286h */
+/* Write only. When this bit is set, RXDMA will decrease RX PKT counter by one. Before */
+/* this bit is polled, FW shall update RXFF_RD_PTR first. This register is write pulse and auto clear. */
+/* define RXPKT_RELEASE_POLL BIT(0) */
+/* Read only. When RXMA finishes on-going DMA operation, RXMDA will report idle state in */
+/* this bit. FW can start releasing packets after RXDMA entering idle mode. */
+/* define RXDMA_IDLE BIT(1) */
+/* When this bit is set, RXDMA will enter this mode after on-going RXDMA packet to host */
+/* completed, and stop DMA packet to host. RXDMA will then report Default: 0; */
+/* define RW_RELEASE_EN BIT(2) */
+
+/* 2 REG_RXPKT_NUM, 0x0284 */
+#define RXPKT_RELEASE_POLL BIT(16)
+#define RXDMA_IDLE BIT(17)
+#define RW_RELEASE_EN BIT(18)
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+/* 2 FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+
+/* 2 SPEC SIFS */
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+/* 2 RL */
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+
+/* 2 EDCA setting */
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+
+/* 2 BCN_CTRL */
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+#define STOP_BCNQ BIT(6)
+#define DIS_RX_BSSID_FIT BIT(6)
+
+#define DIS_ATIM BIT(0)
+#define DIS_BCNQ_SUB BIT(1)
+#define DIS_TSF_UDT BIT(4)
+
+/* The same function but different bit field. */
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+
+
+/* 2 ACMHWCTRL */
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+/* 2 REG_DUAL_TSF_RST (0x553) */
+#define DUAL_TSF_RST_P2P BIT(4)
+
+/* 2 REG_NOA_DESC_SEL (0x5CF) */
+#define NOA_DESC_SEL_0 0
+#define NOA_DESC_SEL_1 BIT(4)
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+
+/* 2 APSD_CTRL */
+#define APSDOFF BIT(6)
+
+/* 2 TCR */
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+
+/* 2 RCR */
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define FORCEACK BIT(26)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+
+/* 2 SECCFG */
+#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
+#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
+#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
+#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
+#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
+#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
+#define SCR_TXBCUSEDK BIT(6) /* Force Tx Broadcast packets Use Default Key */
+#define SCR_RXBCUSEDK BIT(7) /* Force Rx Broadcast packets Use Default Key */
+#define SCR_CHK_KEYID BIT(8)
+
+/* */
+/* */
+/* SDIO Bus Specification */
+/* */
+/* */
+
+/* I/O bus domain address mapping */
+#define SDIO_LOCAL_BASE 0x10250000
+#define WLAN_IOREG_BASE 0x10260000
+#define FIRMWARE_FIFO_BASE 0x10270000
+#define TX_HIQ_BASE 0x10310000
+#define TX_MIQ_BASE 0x10320000
+#define TX_LOQ_BASE 0x10330000
+#define TX_EPQ_BASE 0x10350000
+#define RX_RX0FF_BASE 0x10340000
+
+/* SDIO host local register space mapping. */
+#define SDIO_LOCAL_MSK 0x0FFF
+#define WLAN_IOREG_MSK 0x7FFF
+#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
+#define WLAN_RX0FF_MSK 0x0003
+
+#define SDIO_WITHOUT_REF_DEVICE_ID 0 /* Without reference to the SDIO Device ID */
+#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */
+#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */
+#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */
+#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */
+#define WLAN_TX_EXQ_DEVICE_ID 3 /* 0b[16], 011b[15:13] */
+#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
+#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
+
+/* SDIO Tx Free Page Index */
+#define HI_QUEUE_IDX 0
+#define MID_QUEUE_IDX 1
+#define LOW_QUEUE_IDX 2
+#define PUBLIC_QUEUE_IDX 3
+
+#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */
+#define SDIO_MAX_RX_QUEUE 1
+
+#define SDIO_REG_TX_CTRL 0x0000 /* SDIO Tx Control */
+#define SDIO_REG_HIMR 0x0014 /* SDIO Host Interrupt Mask */
+#define SDIO_REG_HISR 0x0018 /* SDIO Host Interrupt Service Routine */
+#define SDIO_REG_HCPWM 0x0019 /* HCI Current Power Mode */
+#define SDIO_REG_RX0_REQ_LEN 0x001C /* RXDMA Request Length */
+#define SDIO_REG_OQT_FREE_PG 0x001E /* OQT Free Page */
+#define SDIO_REG_FREE_TXPG 0x0020 /* Free Tx Buffer Page */
+#define SDIO_REG_HCPWM1 0x0024 /* HCI Current Power Mode 1 */
+#define SDIO_REG_HCPWM2 0x0026 /* HCI Current Power Mode 2 */
+#define SDIO_REG_FREE_TXPG_SEQ 0x0028 /* Free Tx Page Sequence */
+#define SDIO_REG_HTSFR_INFO 0x0030 /* HTSF Informaion */
+#define SDIO_REG_HRPWM1 0x0080 /* HCI Request Power Mode 1 */
+#define SDIO_REG_HRPWM2 0x0082 /* HCI Request Power Mode 2 */
+#define SDIO_REG_HPS_CLKR 0x0084 /* HCI Power Save Clock */
+#define SDIO_REG_HSUS_CTRL 0x0086 /* SDIO HCI Suspend Control */
+#define SDIO_REG_HIMR_ON 0x0090 /* SDIO Host Extension Interrupt Mask Always */
+#define SDIO_REG_HISR_ON 0x0091 /* SDIO Host Extension Interrupt Status Always */
+
+#define SDIO_HIMR_DISABLED 0
+
+/* RTL8723/RTL8188E SDIO Host Interrupt Mask Register */
+#define SDIO_HIMR_RX_REQUEST_MSK BIT0
+#define SDIO_HIMR_AVAL_MSK BIT1
+#define SDIO_HIMR_TXERR_MSK BIT2
+#define SDIO_HIMR_RXERR_MSK BIT3
+#define SDIO_HIMR_TXFOVW_MSK BIT4
+#define SDIO_HIMR_RXFOVW_MSK BIT5
+#define SDIO_HIMR_TXBCNOK_MSK BIT6
+#define SDIO_HIMR_TXBCNERR_MSK BIT7
+#define SDIO_HIMR_BCNERLY_INT_MSK BIT16
+#define SDIO_HIMR_C2HCMD_MSK BIT17
+#define SDIO_HIMR_CPWM1_MSK BIT18
+#define SDIO_HIMR_CPWM2_MSK BIT19
+#define SDIO_HIMR_HSISR_IND_MSK BIT20
+#define SDIO_HIMR_GTINT3_IND_MSK BIT21
+#define SDIO_HIMR_GTINT4_IND_MSK BIT22
+#define SDIO_HIMR_PSTIMEOUT_MSK BIT23
+#define SDIO_HIMR_OCPINT_MSK BIT24
+#define SDIO_HIMR_ATIMEND_MSK BIT25
+#define SDIO_HIMR_ATIMEND_E_MSK BIT26
+#define SDIO_HIMR_CTWEND_MSK BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HIMR_MCU_ERR_MSK BIT28
+#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT29
+
+/* SDIO Host Interrupt Service Routine */
+#define SDIO_HISR_RX_REQUEST BIT0
+#define SDIO_HISR_AVAL BIT1
+#define SDIO_HISR_TXERR BIT2
+#define SDIO_HISR_RXERR BIT3
+#define SDIO_HISR_TXFOVW BIT4
+#define SDIO_HISR_RXFOVW BIT5
+#define SDIO_HISR_TXBCNOK BIT6
+#define SDIO_HISR_TXBCNERR BIT7
+#define SDIO_HISR_BCNERLY_INT BIT16
+#define SDIO_HISR_C2HCMD BIT17
+#define SDIO_HISR_CPWM1 BIT18
+#define SDIO_HISR_CPWM2 BIT19
+#define SDIO_HISR_HSISR_IND BIT20
+#define SDIO_HISR_GTINT3_IND BIT21
+#define SDIO_HISR_GTINT4_IND BIT22
+#define SDIO_HISR_PSTIMEOUT BIT23
+#define SDIO_HISR_OCPINT BIT24
+#define SDIO_HISR_ATIMEND BIT25
+#define SDIO_HISR_ATIMEND_E BIT26
+#define SDIO_HISR_CTWEND BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HISR_MCU_ERR BIT28
+#define SDIO_HISR_TSF_BIT32_TOGGLE BIT29
+
+#define MASK_SDIO_HISR_CLEAR (SDIO_HISR_TXERR |\
+ SDIO_HISR_RXERR |\
+ SDIO_HISR_TXFOVW |\
+ SDIO_HISR_RXFOVW |\
+ SDIO_HISR_TXBCNOK |\
+ SDIO_HISR_TXBCNERR |\
+ SDIO_HISR_C2HCMD |\
+ SDIO_HISR_CPWM1 |\
+ SDIO_HISR_CPWM2 |\
+ SDIO_HISR_HSISR_IND |\
+ SDIO_HISR_GTINT3_IND |\
+ SDIO_HISR_GTINT4_IND |\
+ SDIO_HISR_PSTIMEOUT |\
+ SDIO_HISR_OCPINT)
+
+/* SDIO HCI Suspend Control Register */
+#define HCI_RESUME_PWR_RDY BIT1
+#define HCI_SUS_CTRL BIT0
+
+/* SDIO Tx FIFO related */
+#define SDIO_TX_FREE_PG_QUEUE 4 /* The number of Tx FIFO free page */
+#define SDIO_TX_FIFO_PAGE_SZ 128
+
+#define MAX_TX_AGG_PACKET_NUMBER 0x8
+
+/* */
+/* */
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+/* */
+/* */
+
+/* 2 USB Information (0xFE17) */
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+/* 2 Special Option */
+#define USB_AGG_EN BIT(3)
+
+/* 0; Use interrupt endpoint to upload interrupt pkt */
+/* 1; Use bulk endpoint to upload interrupt pkt, */
+#define INT_BULK_SEL BIT(4)
+
+/* 2REG_C2HEVT_CLEAR */
+#define C2H_EVT_HOST_CLOSE 0x00 /* Set by driver and notify FW that the driver has read the C2H command message */
+#define C2H_EVT_FW_CLOSE 0xFF /* Set by FW indicating that FW had set the C2H command message and it's not yet read by driver. */
+
+
+/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+#define WL_HWPDN_EN BIT0 /* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_SL BIT1 /* WiFi HW PDn polarity control */
+#define WL_FUNC_EN BIT2 /* WiFi function enable */
+#define WL_HWROF_EN BIT3 /* Enable GPIO[9] as WiFi RF HW PDn source */
+#define BT_HWPDN_EN BIT16 /* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_SL BIT17 /* BT HW PDn polarity control */
+#define BT_FUNC_EN BIT18 /* BT function enable */
+#define BT_HWROF_EN BIT19 /* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define GPS_HWPDN_EN BIT20 /* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_SL BIT21 /* GPS HW PDn polarity control */
+#define GPS_FUNC_EN BIT22 /* GPS function enable */
+
+/* 3 REG_LIFECTRL_CTRL */
+#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
+#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
+#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
+#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
+
+#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us, said by Tim. */
+
+/* 2 8192D PartNo. */
+#define PARTNO_92D_NIC (BIT7|BIT6)
+#define PARTNO_92D_NIC_REMARK (BIT5|BIT4)
+#define PARTNO_SINGLE_BAND_VS BIT3
+#define PARTNO_SINGLE_BAND_VS_REMARK BIT1
+#define PARTNO_CONCURRENT_BAND_VC (BIT3|BIT2)
+#define PARTNO_CONCURRENT_BAND_VC_REMARK (BIT1|BIT0)
+
+/* */
+/* General definitions */
+/* */
+
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_8188E 176
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_8812 255
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_8723B 255
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_8192C 255
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+
+
+/* GPIO BIT */
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT2
+#define HAL_8192EU_HW_GPIO_WPS_BIT BIT7
+#define HAL_8188E_HW_GPIO_WPS_BIT BIT7
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
new file mode 100644
index 000000000000..74a1db1eac38
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -0,0 +1,483 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_DATA_H__
+#define __HAL_DATA_H__
+
+#include "odm_precomp.h"
+#include <hal_btcoex.h>
+
+#include <hal_sdio.h>
+
+/* */
+/* <Roger_Notes> For RTL8723 WiFi/BT/GPS multi-function configuration. 2010.10.06. */
+/* */
+enum RT_MULTI_FUNC {
+ RT_MULTI_FUNC_NONE = 0x00,
+ RT_MULTI_FUNC_WIFI = 0x01,
+ RT_MULTI_FUNC_BT = 0x02,
+ RT_MULTI_FUNC_GPS = 0x04,
+};
+/* */
+/* <Roger_Notes> For RTL8723 WiFi PDn/GPIO polarity control configuration. 2010.10.08. */
+/* */
+enum RT_POLARITY_CTL {
+ RT_POLARITY_LOW_ACT = 0,
+ RT_POLARITY_HIGH_ACT = 1,
+};
+
+/* For RTL8723 regulator mode. by tynli. 2011.01.14. */
+enum RT_REGULATOR_MODE {
+ RT_SWITCHING_REGULATOR = 0,
+ RT_LDO_REGULATOR = 1,
+};
+
+enum RT_AMPDU_BURST {
+ RT_AMPDU_BURST_NONE = 0,
+ RT_AMPDU_BURST_92D = 1,
+ RT_AMPDU_BURST_88E = 2,
+ RT_AMPDU_BURST_8812_4 = 3,
+ RT_AMPDU_BURST_8812_8 = 4,
+ RT_AMPDU_BURST_8812_12 = 5,
+ RT_AMPDU_BURST_8812_15 = 6,
+ RT_AMPDU_BURST_8723B = 7,
+};
+
+#define CHANNEL_MAX_NUMBER 14+24+21 /* 14 is the max channel number */
+#define CHANNEL_MAX_NUMBER_2G 14
+#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to "phy_GetChnlGroup8812A" and "Hal_ReadTxPowerInfo8812A" */
+#define CHANNEL_MAX_NUMBER_5G_80M 7
+#define CHANNEL_GROUP_MAX 3+9 /* ch1~3, ch4~9, ch10~14 total three groups */
+#define MAX_PG_GROUP 13
+
+/* Tx Power Limit Table Size */
+#define MAX_REGULATION_NUM 4
+#define MAX_RF_PATH_NUM_IN_POWER_LIMIT_TABLE 4
+#define MAX_2_4G_BANDWITH_NUM 4
+#define MAX_RATE_SECTION_NUM 10
+#define MAX_5G_BANDWITH_NUM 4
+
+#define MAX_BASE_NUM_IN_PHY_REG_PG_2_4G 10 /* CCK:1, OFDM:1, HT:4, VHT:4 */
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 9 /* OFDM:1, HT:4, VHT:4 */
+
+
+/* duplicate code, will move to ODM ######### */
+/* define IQK_MAC_REG_NUM 4 */
+/* define IQK_ADDA_REG_NUM 16 */
+
+/* define IQK_BB_REG_NUM 10 */
+#define IQK_BB_REG_NUM_92C 9
+#define IQK_BB_REG_NUM_92D 10
+#define IQK_BB_REG_NUM_test 6
+
+#define IQK_Matrix_Settings_NUM_92D 1+24+21
+
+/* define HP_THERMAL_NUM 8 */
+/* duplicate code, will move to ODM ######### */
+
+enum {
+ SINGLEMAC_SINGLEPHY, /* SMSP */
+ DUALMAC_DUALPHY, /* DMDP */
+ DUALMAC_SINGLEPHY, /* DMSP */
+};
+
+#define PAGE_SIZE_128 128
+#define PAGE_SIZE_256 256
+#define PAGE_SIZE_512 512
+
+struct dm_priv {
+ u8 DM_Type;
+
+#define DYNAMIC_FUNC_BT BIT0
+
+ u8 DMFlag;
+ u8 InitDMFlag;
+ /* u8 RSVD_1; */
+
+ u32 InitODMFlag;
+ /* Upper and Lower Signal threshold for Rate Adaptive */
+ int UndecoratedSmoothedPWDB;
+ int UndecoratedSmoothedCCK;
+ int EntryMinUndecoratedSmoothedPWDB;
+ int EntryMaxUndecoratedSmoothedPWDB;
+ int MinUndecoratedPWDBForDM;
+ int LastMinUndecoratedPWDBForDM;
+
+ s32 UndecoratedSmoothedBeacon;
+
+/* duplicate code, will move to ODM ######### */
+ /* for High Power */
+ u8 bDynamicTxPowerEnable;
+ u8 LastDTPLvl;
+ u8 DynamicTxHighPowerLvl;/* Add by Jacken Tx Power Control for Near/Far Range 2008/03/06 */
+
+ /* for tx power tracking */
+ u8 bTXPowerTracking;
+ u8 TXPowercount;
+ u8 bTXPowerTrackingInit;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
+ u8 TM_Trigger;
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+ u8 bRfPiEnable;
+ /* u8 RSVD_2; */
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+ /* u8 RSVD_3; */
+ /* u8 RSVD_4; */
+ /* u8 RSVD_5; */
+
+ /* for IQK */
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+
+ u8 PowerIndex_backup[6];
+ u8 OFDM_index[2];
+
+ u8 bCCKinCH14;
+ u8 CCK_index;
+ u8 bDoneTxpower;
+ u8 CCK_index_HP;
+
+ u8 OFDM_index_HP[2];
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+ /* u8 RSVD_6; */
+
+ /* for TxPwrTracking2 */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+ u32 prv_traffic_idx; /* edca turbo */
+/* duplicate code, will move to ODM ######### */
+
+ /* Add for Reading Initial Data Rate SEL Register 0x484 during watchdog. Using for fill tx desc. 2011.3.21 by Thomas */
+ u8 INIDATA_RATE[32];
+};
+
+
+struct hal_com_data {
+ HAL_VERSION VersionID;
+ enum RT_MULTI_FUNC MultiFunc; /* For multi-function consideration. */
+ enum RT_POLARITY_CTL PolarityCtl; /* For Wifi PDn Polarity control. */
+ enum RT_REGULATOR_MODE RegulatorMode; /* switching regulator or LDO */
+
+ u16 FirmwareVersion;
+ u16 FirmwareVersionRev;
+ u16 FirmwareSubVersion;
+ u16 FirmwareSignature;
+
+ /* current WIFI_PHY values */
+ enum WIRELESS_MODE CurrentWirelessMode;
+ enum CHANNEL_WIDTH CurrentChannelBW;
+ enum BAND_TYPE CurrentBandType; /* 0:2.4G, 1:5G */
+ enum BAND_TYPE BandSet;
+ u8 CurrentChannel;
+ u8 CurrentCenterFrequencyIndex1;
+ u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
+ u8 nCur80MhzPrimeSC; /* used for primary 40MHz of 80MHz mode */
+
+ u16 CustomerID;
+ u16 BasicRateSet;
+ u16 ForcedDataRate;/* Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M. */
+ u32 ReceiveConfig;
+
+ /* rf_ctrl */
+ u8 rf_chip;
+ u8 rf_type;
+ u8 PackageType;
+ u8 NumTotalRFPath;
+
+ u8 InterfaceSel;
+ u8 framesync;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+ u8 DefaultInitialGain[4];
+ /* EEPROM setting. */
+ u16 EEPROMVID;
+ u16 EEPROMSVID;
+
+ u8 EEPROMCustomerID;
+ u8 EEPROMSubCustomerID;
+ u8 EEPROMVersion;
+ u8 EEPROMRegulatory;
+ u8 EEPROMThermalMeter;
+ u8 EEPROMBluetoothCoexist;
+ u8 EEPROMBluetoothType;
+ u8 EEPROMBluetoothAntNum;
+ u8 EEPROMBluetoothAntIsolation;
+ u8 EEPROMBluetoothRadioShared;
+ u8 bTXPowerDataReadFromEEPORM;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDisableSWChannelPlan; /* flag of disable software change channel plan */
+
+ bool EepromOrEfuse;
+ u8 EfuseUsedPercentage;
+ u16 EfuseUsedBytes;
+ EFUSE_HAL EfuseHal;
+
+ /* 3 [2.4G] */
+ u8 Index24G_CCK_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 Index24G_BW40_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ /* 3 [5G] */
+ u8 Index5G_BW40_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 Index5G_BW80_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+ s8 OFDM_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW80_5G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+ u8 Regulation2_4G;
+ u8 Regulation5G;
+
+ u8 TxPwrInPercentage;
+
+ u8 TxPwrCalibrateRate;
+ /* TX power by rate table at most 4RF path. */
+ /* The register is */
+ /* VHT TX power by rate off setArray = */
+ /* Band:-2G&5G = 0 / 1 */
+ /* RF: at most 4*4 = ABCD = 0/1/2/3 */
+ /* CCK = 0 OFDM = 1/2 HT-MCS 0-15 =3/4/56 VHT =7/8/9/10/11 */
+ u8 TxPwrByRateTable;
+ u8 TxPwrByRateBand;
+ s8 TxPwrByRateOffset[TX_PWR_BY_RATE_NUM_BAND]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RATE];
+ /* */
+
+ /* 2 Power Limit Table */
+ u8 TxPwrLevelCck[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];
+ u8 TxPwrLevelHT40_1S[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_2S[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER]; /* For HT 40MHZ pwr */
+ s8 TxPwrHt20Diff[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];/* HT 20<->40 Pwr diff */
+ u8 TxPwrLegacyHtDiff[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];/* For HT<->legacy pwr diff */
+
+ /* Power Limit Table for 2.4G */
+ s8 TxPwrLimit_2_4G[MAX_REGULATION_NUM]
+ [MAX_2_4G_BANDWITH_NUM]
+ [MAX_RATE_SECTION_NUM]
+ [CHANNEL_MAX_NUMBER_2G]
+ [MAX_RF_PATH_NUM];
+
+ /* Power Limit Table for 5G */
+ s8 TxPwrLimit_5G[MAX_REGULATION_NUM]
+ [MAX_5G_BANDWITH_NUM]
+ [MAX_RATE_SECTION_NUM]
+ [CHANNEL_MAX_NUMBER_5G]
+ [MAX_RF_PATH_NUM];
+
+
+ /* Store the original power by rate value of the base of each rate section of rf path A & B */
+ u8 TxPwrByRateBase2_4G[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_2_4G];
+ u8 TxPwrByRateBase5G[TX_PWR_BY_RATE_NUM_RF]
+ [TX_PWR_BY_RATE_NUM_RF]
+ [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
+
+ /* For power group */
+ u8 PwrGroupHT20[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];
+ u8 PwrGroupHT40[RF_PATH_MAX_92C_88E][CHANNEL_MAX_NUMBER];
+
+
+
+
+ u8 PGMaxGroup;
+ u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
+ /* The current Tx Power Level */
+ u8 CurrentCckTxPwrIdx;
+ u8 CurrentOfdm24GTxPwrIdx;
+ u8 CurrentBW2024GTxPwrIdx;
+ u8 CurrentBW4024GTxPwrIdx;
+
+ /* Read/write are allow for following hardware information variables */
+ u8 pwrGroupCnt;
+ u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
+ u32 CCKTxPowerLevelOriginalOffset;
+
+ u8 CrystalCap;
+ u32 AntennaTxPath; /* Antenna path Tx */
+ u32 AntennaRxPath; /* Antenna path Rx */
+
+ u8 PAType_2G;
+ u8 PAType_5G;
+ u8 LNAType_2G;
+ u8 LNAType_5G;
+ u8 ExternalPA_2G;
+ u8 ExternalLNA_2G;
+ u8 ExternalPA_5G;
+ u8 ExternalLNA_5G;
+ u8 TypeGLNA;
+ u8 TypeGPA;
+ u8 TypeALNA;
+ u8 TypeAPA;
+ u8 RFEType;
+ u8 BoardType;
+ u8 ExternalPA;
+ u8 bIQKInitialized;
+ bool bLCKInProgress;
+
+ bool bSwChnl;
+ bool bSetChnlBW;
+ bool bChnlBWInitialized;
+ bool bNeedIQK;
+
+ u8 bLedOpenDrain; /* Support Open-drain arrangement for controlling the LED. Added by Roger, 2009.10.16. */
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking as default */
+ u8 b1x1RecvCombine; /* for 1T1R receive combining */
+
+ u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
+
+ struct bb_register_def PHYRegDef[4]; /* Radio A/B/C/D */
+
+ u32 RfRegChnlVal[2];
+
+ /* RDG enable */
+ bool bRDGEnable;
+
+ /* for host message to fw */
+ u8 LastHMEBoxNum;
+
+ u8 fw_ractrl;
+ u8 RegTxPause;
+ /* Beacon function related global variable. */
+ u8 RegBcnCtrlVal;
+ u8 RegFwHwTxQCtrl;
+ u8 RegReg542;
+ u8 RegCR_1;
+ u8 Reg837;
+ u8 RegRFPathS1;
+ u16 RegRRSR;
+
+ u8 CurAntenna;
+ u8 AntDivCfg;
+ u8 AntDetection;
+ u8 TRxAntDivType;
+ u8 ant_path; /* for 8723B s0/s1 selection */
+
+ u8 u1ForcedIgiLb; /* forced IGI lower bound */
+
+ u8 bDumpRxPkt;/* for debug */
+ u8 bDumpTxPkt;/* for debug */
+ u8 FwRsvdPageStartOffset; /* 2010.06.23. Added by tynli. Reserve page start offset except beacon in TxQ. */
+
+ /* 2010/08/09 MH Add CU power down mode. */
+ bool pwrdown;
+
+ /* Add for dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceIndex;
+
+ u8 OutEpQueueSel;
+ u8 OutEpNumber;
+
+ /* 2010/12/10 MH Add for USB aggreation mode dynamic shceme. */
+ bool UsbRxHighSpeedMode;
+
+ /* 2010/11/22 MH Add for slim combo debug mode selective. */
+ /* This is used for fix the drawback of CU TSMC-A/UMC-A cut. HW auto suspend ability. Close BT clock. */
+ bool SlimComboDbg;
+
+ /* u8 AMPDUDensity; */
+
+ /* Auto FSM to Turn On, include clock, isolation, power control for MAC only */
+ u8 bMacPwrCtrlOn;
+
+ u8 RegIQKFWOffload;
+ struct submit_ctx iqk_sctx;
+
+ enum RT_AMPDU_BURST AMPDUBurstMode; /* 92C maybe not use, but for compile successfully */
+
+ u32 sdio_himr;
+ u32 sdio_hisr;
+
+ /* SDIO Tx FIFO related. */
+ /* HIQ, MID, LOW, PUB free pages; padapter->xmitpriv.free_txpg */
+ u8 SdioTxFIFOFreePage[SDIO_TX_FREE_PG_QUEUE];
+ _lock SdioTxFIFOFreePageLock;
+ u8 SdioTxOQTMaxFreeSpace;
+ u8 SdioTxOQTFreeSpace;
+
+
+ /* SDIO Rx FIFO related. */
+ u8 SdioRxFIFOCnt;
+ u16 SdioRxFIFOSize;
+
+ u32 sdio_tx_max_len[SDIO_MAX_TX_QUEUE];/* H, N, L, used for sdio tx aggregation max length per queue */
+
+ struct dm_priv dmpriv;
+ DM_ODM_T odmpriv;
+
+ /* For bluetooth co-existance */
+ BT_COEXIST bt_coexist;
+
+ /* Interrupt related register information. */
+ u32 SysIntrStatus;
+ u32 SysIntrMask;
+
+
+ char para_file_buf[MAX_PARA_FILE_BUF_LEN];
+ char *mac_reg;
+ u32 mac_reg_len;
+ char *bb_phy_reg;
+ u32 bb_phy_reg_len;
+ char *bb_agc_tab;
+ u32 bb_agc_tab_len;
+ char *bb_phy_reg_pg;
+ u32 bb_phy_reg_pg_len;
+ char *bb_phy_reg_mp;
+ u32 bb_phy_reg_mp_len;
+ char *rf_radio_a;
+ u32 rf_radio_a_len;
+ char *rf_radio_b;
+ u32 rf_radio_b_len;
+ char *rf_tx_pwr_track;
+ u32 rf_tx_pwr_track_len;
+ char *rf_tx_pwr_lmt;
+ u32 rf_tx_pwr_lmt_len;
+
+#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
+ s16 noise[ODM_MAX_CHANNEL_NUM];
+#endif
+
+};
+
+#define GET_HAL_DATA(__padapter) ((struct hal_com_data *)((__padapter)->HalData))
+#define GET_HAL_RFPATH_NUM(__padapter) (((struct hal_com_data *)((__padapter)->HalData))->NumTotalRFPath)
+#define RT_GetInterfaceSelection(_Adapter) (GET_HAL_DATA(_Adapter)->InterfaceSel)
+#define GET_RF_TYPE(__padapter) (GET_HAL_DATA(__padapter)->rf_type)
+
+#endif /* __HAL_DATA_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
new file mode 100644
index 000000000000..276089ad4864
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -0,0 +1,410 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_INTF_H__
+#define __HAL_INTF_H__
+
+
+enum RTL871X_HCI_TYPE {
+ RTW_PCIE = BIT0,
+ RTW_USB = BIT1,
+ RTW_SDIO = BIT2,
+ RTW_GSPI = BIT3,
+};
+
+enum HW_VARIABLES {
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_MEDIA_STATUS1,
+ HW_VAR_SET_OPMODE,
+ HW_VAR_MAC_ADDR,
+ HW_VAR_BSSID,
+ HW_VAR_INIT_RTS_RATE,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_TXPAUSE,
+ HW_VAR_BCN_FUNC,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_CHECK_BSSID,
+ HW_VAR_MLME_DISCONNECT,
+ HW_VAR_MLME_SITESURVEY,
+ HW_VAR_MLME_JOIN,
+ HW_VAR_ON_RCR_AM,
+ HW_VAR_OFF_RCR_AM,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_RESP_SIFS,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_SEC_CFG,
+ HW_VAR_SEC_DK_CFG,
+ HW_VAR_BCN_VALID,
+ HW_VAR_RF_TYPE,
+ HW_VAR_DM_FLAG,
+ HW_VAR_DM_FUNC_OP,
+ HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_CLR,
+ HW_VAR_CAM_EMPTY_ENTRY,
+ HW_VAR_CAM_INVALID_ALL,
+ HW_VAR_CAM_WRITE,
+ HW_VAR_CAM_READ,
+ HW_VAR_AC_PARAM_VO,
+ HW_VAR_AC_PARAM_VI,
+ HW_VAR_AC_PARAM_BE,
+ HW_VAR_AC_PARAM_BK,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_RXDMA_AGG_PG_TH,
+ HW_VAR_SET_RPWM,
+ HW_VAR_CPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_PS_TUNE_PARAM,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_TDLS_WRCR,
+ HW_VAR_TDLS_INIT_CH_SEN,
+ HW_VAR_TDLS_RS_RCR,
+ HW_VAR_TDLS_DONE_CH_SEN,
+ HW_VAR_INITIAL_GAIN,
+ HW_VAR_TRIGGER_GPIO_0,
+ HW_VAR_BT_SET_COEXIST,
+ HW_VAR_BT_ISSUE_DELBA,
+ HW_VAR_CURRENT_ANTENNA,
+ HW_VAR_ANTENNA_DIVERSITY_LINK,
+ HW_VAR_ANTENNA_DIVERSITY_SELECT,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_EFUSE_BT_USAGE,
+ HW_VAR_EFUSE_BT_BYTES,
+ HW_VAR_FIFO_CLEARN_UP,
+ HW_VAR_CHECK_TXBUF,
+ HW_VAR_PCIE_STOP_TX_DMA,
+ HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation, power control for MAC only */
+ /* The valid upper nav range for the HW updating, if the true value is larger than the upper range, the HW won't update it. */
+ /* Unit in microsecond. 0 means disable this function. */
+#ifdef CONFIG_WOWLAN
+ HW_VAR_WOWLAN,
+ HW_VAR_WAKEUP_REASON,
+ HW_VAR_RPWM_TOG,
+#endif
+#ifdef CONFIG_AP_WOWLAN
+ HW_VAR_AP_WOWLAN,
+#endif
+ HW_VAR_SYS_CLKR,
+ HW_VAR_NAV_UPPER,
+ HW_VAR_C2H_HANDLE,
+ HW_VAR_RPT_TIMER_SETTING,
+ HW_VAR_TX_RPT_MAX_MACID,
+ HW_VAR_H2C_MEDIA_STATUS_RPT,
+ HW_VAR_CHK_HI_QUEUE_EMPTY,
+ HW_VAR_DL_BCN_SEL,
+ HW_VAR_AMPDU_MAX_TIME,
+ HW_VAR_WIRELESS_MODE,
+ HW_VAR_USB_MODE,
+ HW_VAR_PORT_SWITCH,
+ HW_VAR_DO_IQK,
+ HW_VAR_DM_IN_LPS,
+ HW_VAR_SET_REQ_FW_PS,
+ HW_VAR_FW_PS_STATE,
+ HW_VAR_SOUNDING_ENTER,
+ HW_VAR_SOUNDING_LEAVE,
+ HW_VAR_SOUNDING_RATE,
+ HW_VAR_SOUNDING_STATUS,
+ HW_VAR_SOUNDING_FW_NDPA,
+ HW_VAR_SOUNDING_CLK,
+ HW_VAR_DL_RSVD_PAGE,
+ HW_VAR_MACID_SLEEP,
+ HW_VAR_MACID_WAKEUP,
+};
+
+enum HAL_DEF_VARIABLE {
+ HAL_DEF_UNDERCORATEDSMOOTHEDPWDB,
+ HAL_DEF_IS_SUPPORT_ANT_DIV,
+ HAL_DEF_CURRENT_ANTENNA,
+ HAL_DEF_DRVINFO_SZ,
+ HAL_DEF_MAX_RECVBUF_SZ,
+ HAL_DEF_RX_PACKET_OFFSET,
+ HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
+ HAL_DEF_DBG_DM_FUNC,/* for dbg */
+ HAL_DEF_RA_DECISION_RATE,
+ HAL_DEF_RA_SGI,
+ HAL_DEF_PT_PWR_STATUS,
+ HAL_DEF_TX_LDPC, /* LDPC support */
+ HAL_DEF_RX_LDPC, /* LDPC support */
+ HAL_DEF_TX_STBC, /* TX STBC support */
+ HAL_DEF_RX_STBC, /* RX STBC support */
+ HAL_DEF_EXPLICIT_BEAMFORMER,/* Explicit Compressed Steering Capable */
+ HAL_DEF_EXPLICIT_BEAMFORMEE,/* Explicit Compressed Beamforming Feedback Capable */
+ HW_VAR_MAX_RX_AMPDU_FACTOR,
+ HW_DEF_RA_INFO_DUMP,
+ HAL_DEF_DBG_DUMP_TXPKT,
+ HW_DEF_FA_CNT_DUMP,
+ HW_DEF_ODM_DBG_FLAG,
+ HW_DEF_ODM_DBG_LEVEL,
+ HAL_DEF_TX_PAGE_SIZE,
+ HAL_DEF_TX_PAGE_BOUNDARY,
+ HAL_DEF_TX_PAGE_BOUNDARY_WOWLAN,
+ HAL_DEF_ANT_DETECT,/* to do for 8723a */
+ HAL_DEF_PCI_SUUPORT_L1_BACKDOOR, /* Determine if the L1 Backdoor setting is turned on. */
+ HAL_DEF_PCI_AMD_L1_SUPPORT,
+ HAL_DEF_PCI_ASPM_OSC, /* Support for ASPM OSC, added by Roger, 2013.03.27. */
+ HAL_DEF_MACID_SLEEP, /* Support for MACID sleep */
+ HAL_DEF_DBG_RX_INFO_DUMP,
+};
+
+enum HAL_ODM_VARIABLE {
+ HAL_ODM_STA_INFO,
+ HAL_ODM_P2P_STATE,
+ HAL_ODM_WIFI_DISPLAY_STATE,
+ HAL_ODM_NOISE_MONITOR,
+};
+
+enum HAL_INTF_PS_FUNC {
+ HAL_USB_SELECT_SUSPEND,
+ HAL_MAX_ID,
+};
+
+typedef s32 (*c2h_id_filter)(u8 *c2h_evt);
+
+struct hal_ops {
+ u32 (*hal_power_on)(struct adapter *padapter);
+ void (*hal_power_off)(struct adapter *padapter);
+ u32 (*hal_init)(struct adapter *padapter);
+ u32 (*hal_deinit)(struct adapter *padapter);
+
+ void (*free_hal_data)(struct adapter *padapter);
+
+ u32 (*inirp_init)(struct adapter *padapter);
+ u32 (*inirp_deinit)(struct adapter *padapter);
+ void (*irp_reset)(struct adapter *padapter);
+
+ s32 (*init_xmit_priv)(struct adapter *padapter);
+ void (*free_xmit_priv)(struct adapter *padapter);
+
+ s32 (*init_recv_priv)(struct adapter *padapter);
+ void (*free_recv_priv)(struct adapter *padapter);
+
+ void (*dm_init)(struct adapter *padapter);
+ void (*dm_deinit)(struct adapter *padapter);
+ void (*read_chip_version)(struct adapter *padapter);
+
+ void (*init_default_value)(struct adapter *padapter);
+
+ void (*intf_chip_configure)(struct adapter *padapter);
+
+ void (*read_adapter_info)(struct adapter *padapter);
+
+ void (*enable_interrupt)(struct adapter *padapter);
+ void (*disable_interrupt)(struct adapter *padapter);
+ u8 (*check_ips_status)(struct adapter *padapter);
+ s32 (*interrupt_handler)(struct adapter *padapter);
+ void (*clear_interrupt)(struct adapter *padapter);
+ void (*set_bwmode_handler)(struct adapter *padapter, enum CHANNEL_WIDTH Bandwidth, u8 Offset);
+ void (*set_channel_handler)(struct adapter *padapter, u8 channel);
+ void (*set_chnl_bw_handler)(struct adapter *padapter, u8 channel, enum CHANNEL_WIDTH Bandwidth, u8 Offset40, u8 Offset80);
+
+ void (*set_tx_power_level_handler)(struct adapter *padapter, u8 channel);
+ void (*get_tx_power_level_handler)(struct adapter *padapter, s32 *powerlevel);
+
+ void (*hal_dm_watchdog)(struct adapter *padapter);
+ void (*hal_dm_watchdog_in_lps)(struct adapter *padapter);
+
+
+ void (*SetHwRegHandler)(struct adapter *padapter, u8 variable, u8 *val);
+ void (*GetHwRegHandler)(struct adapter *padapter, u8 variable, u8 *val);
+
+ void (*SetHwRegHandlerWithBuf)(struct adapter *padapter, u8 variable, u8 *pbuf, int len);
+
+ u8 (*GetHalDefVarHandler)(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue);
+ u8 (*SetHalDefVarHandler)(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue);
+
+ void (*GetHalODMVarHandler)(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, void *pValue2);
+ void (*SetHalODMVarHandler)(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, bool bSet);
+
+ void (*UpdateRAMaskHandler)(struct adapter *padapter, u32 mac_id, u8 rssi_level);
+ void (*SetBeaconRelatedRegistersHandler)(struct adapter *padapter);
+
+ void (*Add_RateATid)(struct adapter *padapter, u32 bitmap, u8 *arg, u8 rssi_level);
+
+ void (*run_thread)(struct adapter *padapter);
+ void (*cancel_thread)(struct adapter *padapter);
+
+ u8 (*interface_ps_func)(struct adapter *padapter, enum HAL_INTF_PS_FUNC efunc_id, u8 *val);
+
+ s32 (*hal_xmit)(struct adapter *padapter, struct xmit_frame *pxmitframe);
+ /*
+ * mgnt_xmit should be implemented to run in interrupt context
+ */
+ s32 (*mgnt_xmit)(struct adapter *padapter, struct xmit_frame *pmgntframe);
+ s32 (*hal_xmitframe_enqueue)(struct adapter *padapter, struct xmit_frame *pxmitframe);
+
+ u32 (*read_bbreg)(struct adapter *padapter, u32 RegAddr, u32 BitMask);
+ void (*write_bbreg)(struct adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data);
+ u32 (*read_rfreg)(struct adapter *padapter, u8 eRFPath, u32 RegAddr, u32 BitMask);
+ void (*write_rfreg)(struct adapter *padapter, u8 eRFPath, u32 RegAddr, u32 BitMask, u32 Data);
+
+ void (*EfusePowerSwitch)(struct adapter *padapter, u8 bWrite, u8 PwrState);
+ void (*BTEfusePowerSwitch)(struct adapter *padapter, u8 bWrite, u8 PwrState);
+ void (*ReadEFuse)(struct adapter *padapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest);
+ void (*EFUSEGetEfuseDefinition)(struct adapter *padapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest);
+ u16 (*EfuseGetCurrentSize)(struct adapter *padapter, u8 efuseType, bool bPseudoTest);
+ int (*Efuse_PgPacketRead)(struct adapter *padapter, u8 offset, u8 *data, bool bPseudoTest);
+ int (*Efuse_PgPacketWrite)(struct adapter *padapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
+ u8 (*Efuse_WordEnableDataWrite)(struct adapter *padapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest);
+ bool (*Efuse_PgPacketWrite_BT)(struct adapter *padapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
+
+ s32 (*xmit_thread_handler)(struct adapter *padapter);
+ void (*hal_notch_filter)(struct adapter * adapter, bool enable);
+ void (*hal_reset_security_engine)(struct adapter * adapter);
+ s32 (*c2h_handler)(struct adapter *padapter, u8 *c2h_evt);
+ c2h_id_filter c2h_id_filter_ccx;
+
+ s32 (*fill_h2c_cmd)(struct adapter *, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer);
+};
+
+enum RT_EEPROM_TYPE {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_PS BIT29
+#define RF_CHANGE_BY_HW BIT30
+#define RF_CHANGE_BY_SW BIT31
+
+#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
+#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
+
+enum wowlan_subcode {
+ WOWLAN_PATTERN_MATCH = 1,
+ WOWLAN_MAGIC_PACKET = 2,
+ WOWLAN_UNICAST = 3,
+ WOWLAN_SET_PATTERN = 4,
+ WOWLAN_DUMP_REG = 5,
+ WOWLAN_ENABLE = 6,
+ WOWLAN_DISABLE = 7,
+ WOWLAN_STATUS = 8,
+ WOWLAN_DEBUG_RELOAD_FW = 9,
+ WOWLAN_DEBUG_1 = 10,
+ WOWLAN_DEBUG_2 = 11,
+ WOWLAN_AP_ENABLE = 12,
+ WOWLAN_AP_DISABLE = 13
+};
+
+struct wowlan_ioctl_param{
+ unsigned int subcode;
+ unsigned int subcode_value;
+ unsigned int wakeup_reason;
+ unsigned int len;
+ unsigned char pattern[0];
+};
+
+#define Rx_Pairwisekey 0x01
+#define Rx_GTK 0x02
+#define Rx_DisAssoc 0x04
+#define Rx_DeAuth 0x08
+#define Rx_ARPReq 0x09
+#define FWDecisionDisconnect 0x10
+#define Rx_MagicPkt 0x21
+#define Rx_UnicastPkt 0x22
+#define Rx_PatternPkt 0x23
+#define RX_PNOWakeUp 0x55
+#define AP_WakeUp 0x66
+
+void rtw_hal_def_value_init(struct adapter *padapter);
+
+void rtw_hal_free_data(struct adapter *padapter);
+
+void rtw_hal_dm_init(struct adapter *padapter);
+void rtw_hal_dm_deinit(struct adapter *padapter);
+
+uint rtw_hal_init(struct adapter *padapter);
+uint rtw_hal_deinit(struct adapter *padapter);
+void rtw_hal_stop(struct adapter *padapter);
+void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+
+void rtw_hal_set_hwreg_with_buf(struct adapter *padapter, u8 variable, u8 *pbuf, int len);
+
+void rtw_hal_chip_configure(struct adapter *padapter);
+void rtw_hal_read_chip_info(struct adapter *padapter);
+void rtw_hal_read_chip_version(struct adapter *padapter);
+
+u8 rtw_hal_set_def_var(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue);
+u8 rtw_hal_get_def_var(struct adapter *padapter, enum HAL_DEF_VARIABLE eVariable, void *pValue);
+
+void rtw_hal_set_odm_var(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, bool bSet);
+void rtw_hal_get_odm_var(struct adapter *padapter, enum HAL_ODM_VARIABLE eVariable, void *pValue1, void *pValue2);
+
+void rtw_hal_enable_interrupt(struct adapter *padapter);
+void rtw_hal_disable_interrupt(struct adapter *padapter);
+
+u8 rtw_hal_check_ips_status(struct adapter *padapter);
+
+s32 rtw_hal_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtw_hal_mgnt_xmit(struct adapter *padapter, struct xmit_frame *pmgntframe);
+
+s32 rtw_hal_init_xmit_priv(struct adapter *padapter);
+void rtw_hal_free_xmit_priv(struct adapter *padapter);
+
+s32 rtw_hal_init_recv_priv(struct adapter *padapter);
+void rtw_hal_free_recv_priv(struct adapter *padapter);
+
+void rtw_hal_update_ra_mask(struct sta_info *psta, u8 rssi_level);
+void rtw_hal_add_ra_tid(struct adapter *padapter, u32 bitmap, u8 *arg, u8 rssi_level);
+
+void rtw_hal_start_thread(struct adapter *padapter);
+void rtw_hal_stop_thread(struct adapter *padapter);
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+
+u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask, u32 Data);
+u32 rtw_hal_read_rfreg(struct adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_rfreg(struct adapter *padapter, u32 eRFPath, u32 RegAddr, u32 BitMask, u32 Data);
+
+#define PHY_QueryBBReg(Adapter, RegAddr, BitMask) rtw_hal_read_bbreg((Adapter), (RegAddr), (BitMask))
+#define PHY_SetBBReg(Adapter, RegAddr, BitMask, Data) rtw_hal_write_bbreg((Adapter), (RegAddr), (BitMask), (Data))
+#define PHY_QueryRFReg(Adapter, eRFPath, RegAddr, BitMask) rtw_hal_read_rfreg((Adapter), (eRFPath), (RegAddr), (BitMask))
+#define PHY_SetRFReg(Adapter, eRFPath, RegAddr, BitMask, Data) rtw_hal_write_rfreg((Adapter), (eRFPath), (RegAddr), (BitMask), (Data))
+
+#define PHY_SetMacReg PHY_SetBBReg
+#define PHY_QueryMacReg PHY_QueryBBReg
+
+void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
+void rtw_hal_set_chnl_bw(struct adapter *padapter, u8 channel, enum CHANNEL_WIDTH Bandwidth, u8 Offset40, u8 Offset80);
+void rtw_hal_dm_watchdog(struct adapter *padapter);
+void rtw_hal_dm_watchdog_in_lps(struct adapter *padapter);
+
+s32 rtw_hal_xmit_thread_handler(struct adapter *padapter);
+
+void rtw_hal_notch_filter(struct adapter * adapter, bool enable);
+void rtw_hal_reset_security_engine(struct adapter * adapter);
+
+bool rtw_hal_c2h_valid(struct adapter *adapter, u8 *buf);
+s32 rtw_hal_c2h_evt_read(struct adapter *adapter, u8 *buf);
+s32 rtw_hal_c2h_handler(struct adapter *adapter, u8 *c2h_evt);
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
+
+s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter);
+
+s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid);
+s32 rtw_hal_macid_wakeup(struct adapter *padapter, u32 macid);
+
+s32 rtw_hal_fill_h2c_cmd(struct adapter *, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer);
+
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_pg.h b/drivers/staging/rtl8723bs/include/hal_pg.h
new file mode 100644
index 000000000000..ba2a0b0c5b2f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_pg.h
@@ -0,0 +1,81 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __HAL_PG_H__
+#define __HAL_PG_H__
+
+#define MAX_RF_PATH 4
+/* MAX_TX_COUNT must always be set to 4, otherwise the read efuse table
+ * sequence will be wrong.
+ */
+#define MAX_TX_COUNT 4
+
+/* For VHT series TX power by rate table. */
+/* VHT TX power by rate off setArray = */
+/* Band:-2G&5G = 0 / 1 */
+/* RF: at most 4*4 = ABCD = 0/1/2/3 */
+/* CCK = 0 OFDM = 1/2 HT-MCS 0-15 =3/4/56 VHT =7/8/9/10/11 */
+#define TX_PWR_BY_RATE_NUM_BAND 2
+#define TX_PWR_BY_RATE_NUM_RF 4
+#define TX_PWR_BY_RATE_NUM_RATE 84
+#define MAX_RF_PATH_NUM 2
+#define MAX_CHNL_GROUP_24G 6
+#define EEPROM_DEFAULT_BOARD_OPTION 0x00
+
+/* EEPROM/Efuse PG Offset for 8723BE/8723BU/8723BS */
+/* 0x10 ~ 0x63 = TX power area. */
+#define EEPROM_TX_PWR_INX_8723B 0x10
+/* New EFUSE default value */
+#define EEPROM_DEFAULT_24G_INDEX 0x2D
+#define EEPROM_DEFAULT_24G_HT20_DIFF 0X02
+#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
+#define EEPROM_Default_ThermalMeter_8723B 0x18
+#define EEPROM_Default_CrystalCap_8723B 0x20
+
+#define EEPROM_ChannelPlan_8723B 0xB8
+#define EEPROM_XTAL_8723B 0xB9
+#define EEPROM_THERMAL_METER_8723B 0xBA
+
+#define EEPROM_RF_BOARD_OPTION_8723B 0xC1
+#define EEPROM_RF_BT_SETTING_8723B 0xC3
+#define EEPROM_VERSION_8723B 0xC4
+#define EEPROM_CustomID_8723B 0xC5
+#define EEPROM_DEFAULT_DIFF 0XFE
+
+/* RTL8723BS */
+#define EEPROM_MAC_ADDR_8723BS 0x11A
+#define EEPROM_Voltage_ADDR_8723B 0x8
+#define RTL_EEPROM_ID 0x8129
+
+struct TxPowerInfo24G {
+ u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+enum {
+ Ant_x2 = 0,
+ Ant_x1 = 1
+};
+
+enum {
+ BT_RTL8723B = 8,
+};
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/hal_phy.h b/drivers/staging/rtl8723bs/include/hal_phy.h
new file mode 100644
index 000000000000..15f192697957
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_phy.h
@@ -0,0 +1,183 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_PHY_H__
+#define __HAL_PHY_H__
+
+
+#if DISABLE_BB_RF
+#define HAL_FW_ENABLE 0
+#define HAL_MAC_ENABLE 0
+#define HAL_BB_ENABLE 0
+#define HAL_RF_ENABLE 0
+#else /* FPGA_PHY and ASIC */
+#define HAL_FW_ENABLE 1
+#define HAL_MAC_ENABLE 1
+#define HAL_BB_ENABLE 1
+#define HAL_RF_ENABLE 1
+#endif
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG_88E 0xFF
+#define RF6052_MAX_REG_92C 0x7F
+
+#define RF6052_MAX_REG \
+ (RF6052_MAX_REG_88E > RF6052_MAX_REG_92C) ? RF6052_MAX_REG_88E: RF6052_MAX_REG_92C
+
+#define GET_RF6052_REAL_MAX_REG(_Adapter) RF6052_MAX_REG_92C
+
+#define RF6052_MAX_PATH 2
+
+/* */
+/* Antenna detection method, i.e., using single tone detection or RSSI reported from each antenna detected. */
+/* Added by Roger, 2013.05.22. */
+/* */
+#define ANT_DETECT_BY_SINGLE_TONE BIT0
+#define ANT_DETECT_BY_RSSI BIT1
+#define IS_ANT_DETECT_SUPPORT_SINGLE_TONE(__Adapter) ((GET_HAL_DATA(__Adapter)->AntDetection) & ANT_DETECT_BY_SINGLE_TONE)
+#define IS_ANT_DETECT_SUPPORT_RSSI(__Adapter) ((GET_HAL_DATA(__Adapter)->AntDetection) & ANT_DETECT_BY_RSSI)
+
+
+/*--------------------------Define Parameters-------------------------------*/
+enum BAND_TYPE {
+ BAND_ON_2_4G = 0,
+ BAND_ON_5G,
+ BAND_ON_BOTH,
+ BANDMAX
+};
+
+enum RF_TYPE {
+ RF_TYPE_MIN = 0, /* 0 */
+ RF_8225 = 1, /* 1 11b/g RF for verification only */
+ RF_8256 = 2, /* 2 11b/g/n */
+ RF_8258 = 3, /* 3 11a/b/g/n RF */
+ RF_6052 = 4, /* 4 11b/g/n RF */
+ RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
+ RF_TYPE_MAX
+};
+
+enum RF_PATH {
+ RF_PATH_A = 0,
+ RF_PATH_B,
+ RF_PATH_C,
+ RF_PATH_D
+};
+
+#define TX_1S 0
+#define TX_2S 1
+#define TX_3S 2
+#define TX_4S 3
+
+#define RF_PATH_MAX_92C_88E 2
+#define RF_PATH_MAX_90_8812 4 /* Max RF number 90 support */
+
+enum ANTENNA_PATH {
+ ANTENNA_NONE = 0,
+ ANTENNA_D = 1,
+ ANTENNA_C = 2,
+ ANTENNA_CD = 3,
+ ANTENNA_B = 4,
+ ANTENNA_BD = 5,
+ ANTENNA_BC = 6,
+ ANTENNA_BCD = 7,
+ ANTENNA_A = 8,
+ ANTENNA_AD = 9,
+ ANTENNA_AC = 10,
+ ANTENNA_ACD = 11,
+ ANTENNA_AB = 12,
+ ANTENNA_ABD = 13,
+ ANTENNA_ABC = 14,
+ ANTENNA_ABCD = 15
+};
+
+enum RF_CONTENT {
+ radioa_txt = 0x1000,
+ radiob_txt = 0x1001,
+ radioc_txt = 0x1002,
+ radiod_txt = 0x1003
+};
+
+enum BaseBand_Config_Type {
+ BaseBand_Config_PHY_REG = 0, /* Radio Path A */
+ BaseBand_Config_AGC_TAB = 1, /* Radio Path B */
+ BaseBand_Config_AGC_TAB_2G = 2,
+ BaseBand_Config_AGC_TAB_5G = 3,
+ BaseBand_Config_PHY_REG_PG
+};
+
+enum HW_BLOCK {
+ HW_BLOCK_MAC = 0,
+ HW_BLOCK_PHY0 = 1,
+ HW_BLOCK_PHY1 = 2,
+ HW_BLOCK_RF = 3,
+ HW_BLOCK_MAXIMUM = 4, /* Never use this */
+};
+
+enum WIRELESS_MODE {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = 0x01,
+ WIRELESS_MODE_B = 0x02,
+ WIRELESS_MODE_G = 0x04,
+ WIRELESS_MODE_AUTO = 0x08,
+ WIRELESS_MODE_N_24G = 0x10,
+ WIRELESS_MODE_N_5G = 0x20,
+ WIRELESS_MODE_AC_5G = 0x40,
+ WIRELESS_MODE_AC_24G = 0x80,
+ WIRELESS_MODE_AC_ONLY = 0x100,
+};
+
+enum SwChnlCmdID {
+ CmdID_End,
+ CmdID_SetTxPowerLevel,
+ CmdID_BBRegWrite10,
+ CmdID_WritePortUlong,
+ CmdID_WritePortUshort,
+ CmdID_WritePortUchar,
+ CmdID_RF_WriteReg,
+};
+
+struct SwChnlCmd {
+ enum SwChnlCmdID CmdID;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+};
+
+struct R_ANTENNA_SELECT_OFDM {
+#ifdef __LITTLE_ENDIAN
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 OFDM_TXSC:2;
+ u32 Reserved:2;
+#else
+ u32 Reserved:2;
+ u32 OFDM_TXSC:2;
+ u32 r_ant_non_ht_s1:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_l:4;
+ u32 r_tx_antenna:4;
+#endif
+};
+
+/*--------------------------Exported Function prototype---------------------*/
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_phy_reg.h b/drivers/staging/rtl8723bs/include/hal_phy_reg.h
new file mode 100644
index 000000000000..518095269497
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_phy_reg.h
@@ -0,0 +1,25 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_PHY_REG_H__
+#define __HAL_PHY_REG_H__
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+/* if (RTL92SE_FPGA_VERIFY == 1) */
+/* define bRFRegOffsetMask 0xfff */
+/* else */
+#define bRFRegOffsetMask 0xfffff
+/* endif */
+
+#endif /* __HAL_PHY_REG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_sdio.h b/drivers/staging/rtl8723bs/include/hal_sdio.h
new file mode 100644
index 000000000000..691a02e980a3
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/hal_sdio.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __HAL_SDIO_H_
+#define __HAL_SDIO_H_
+
+#define ffaddr2deviceId(pdvobj, addr) (pdvobj->Queue2Pipe[addr])
+
+u8 rtw_hal_sdio_max_txoqt_free_space(struct adapter *padapter);
+u8 rtw_hal_sdio_query_tx_freepage(struct adapter *padapter, u8 PageIdx, u8 RequiredPageNum);
+void rtw_hal_sdio_update_tx_freepage(struct adapter *padapter, u8 PageIdx, u8 RequiredPageNum);
+void rtw_hal_set_sdio_tx_max_length(struct adapter *padapter, u8 numHQ, u8 numNQ, u8 numLQ, u8 numPubQ);
+u32 rtw_hal_get_sdio_tx_max_length(struct adapter *padapter, u8 queue_idx);
+
+#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
new file mode 100644
index 000000000000..6dc6dc73d72f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -0,0 +1,1345 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_H
+#define __IEEE80211_H
+
+#include <linux/ieee80211.h>
+
+#define MGMT_QUEUE_NUM 5
+
+#define ETH_ALEN 6
+#define ETH_TYPE_LEN 2
+#define PAYLOAD_TYPE_LEN 1
+
+#define RTL_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 28)
+
+/* RTL871X_IOCTL_HOSTAPD ioctl() cmd: */
+enum {
+ RTL871X_HOSTAPD_FLUSH = 1,
+ RTL871X_HOSTAPD_ADD_STA = 2,
+ RTL871X_HOSTAPD_REMOVE_STA = 3,
+ RTL871X_HOSTAPD_GET_INFO_STA = 4,
+ /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
+ RTL871X_HOSTAPD_GET_WPAIE_STA = 5,
+ RTL871X_SET_ENCRYPTION = 6,
+ RTL871X_GET_ENCRYPTION = 7,
+ RTL871X_HOSTAPD_SET_FLAGS_STA = 8,
+ RTL871X_HOSTAPD_GET_RID = 9,
+ RTL871X_HOSTAPD_SET_RID = 10,
+ RTL871X_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
+ RTL871X_HOSTAPD_SET_GENERIC_ELEMENT = 12,
+ RTL871X_HOSTAPD_MLME = 13,
+ RTL871X_HOSTAPD_SCAN_REQ = 14,
+ RTL871X_HOSTAPD_STA_CLEAR_STATS = 15,
+ RTL871X_HOSTAPD_SET_BEACON = 16,
+ RTL871X_HOSTAPD_SET_WPS_BEACON = 17,
+ RTL871X_HOSTAPD_SET_WPS_PROBE_RESP = 18,
+ RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP = 19,
+ RTL871X_HOSTAPD_SET_HIDDEN_SSID = 20,
+ RTL871X_HOSTAPD_SET_MACADDR_ACL = 21,
+ RTL871X_HOSTAPD_ACL_ADD_STA = 22,
+ RTL871X_HOSTAPD_ACL_REMOVE_STA = 23,
+};
+
+/* STA flags */
+#define WLAN_STA_AUTH BIT(0)
+#define WLAN_STA_ASSOC BIT(1)
+#define WLAN_STA_PS BIT(2)
+#define WLAN_STA_TIM BIT(3)
+#define WLAN_STA_PERM BIT(4)
+#define WLAN_STA_AUTHORIZED BIT(5)
+#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
+#define WLAN_STA_SHORT_PREAMBLE BIT(7)
+#define WLAN_STA_PREAUTH BIT(8)
+#define WLAN_STA_WME BIT(9)
+#define WLAN_STA_MFP BIT(10)
+#define WLAN_STA_HT BIT(11)
+#define WLAN_STA_WPS BIT(12)
+#define WLAN_STA_MAYBE_WPS BIT(13)
+#define WLAN_STA_VHT BIT(14)
+#define WLAN_STA_NONERP BIT(31)
+
+#define IEEE_CMD_SET_WPA_PARAM 1
+#define IEEE_CMD_SET_WPA_IE 2
+#define IEEE_CMD_SET_ENCRYPTION 3
+#define IEEE_CMD_MLME 4
+
+#define IEEE_PARAM_WPA_ENABLED 1
+#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
+#define IEEE_PARAM_DROP_UNENCRYPTED 3
+#define IEEE_PARAM_PRIVACY_INVOKED 4
+#define IEEE_PARAM_AUTH_ALGS 5
+#define IEEE_PARAM_IEEE_802_1X 6
+#define IEEE_PARAM_WPAX_SELECT 7
+
+#define AUTH_ALG_OPEN_SYSTEM 0x1
+#define AUTH_ALG_SHARED_KEY 0x2
+#define AUTH_ALG_LEAP 0x00000004
+
+#define IEEE_MLME_STA_DEAUTH 1
+#define IEEE_MLME_STA_DISASSOC 2
+
+#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
+#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
+#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
+#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
+#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
+#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
+
+
+#define IEEE_CRYPT_ALG_NAME_LEN 16
+
+#define WPA_CIPHER_NONE BIT(0)
+#define WPA_CIPHER_WEP40 BIT(1)
+#define WPA_CIPHER_WEP104 BIT(2)
+#define WPA_CIPHER_TKIP BIT(3)
+#define WPA_CIPHER_CCMP BIT(4)
+
+
+
+#define WPA_SELECTOR_LEN 4
+extern u8 RTW_WPA_OUI_TYPE[] ;
+extern u16 RTW_WPA_VERSION ;
+extern u8 WPA_AUTH_KEY_MGMT_NONE[];
+extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 WPA_CIPHER_SUITE_NONE[];
+extern u8 WPA_CIPHER_SUITE_WEP40[];
+extern u8 WPA_CIPHER_SUITE_TKIP[];
+extern u8 WPA_CIPHER_SUITE_WRAP[];
+extern u8 WPA_CIPHER_SUITE_CCMP[];
+extern u8 WPA_CIPHER_SUITE_WEP104[];
+
+
+#define RSN_HEADER_LEN 4
+#define RSN_SELECTOR_LEN 4
+
+extern u16 RSN_VERSION_BSD;
+extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 RSN_CIPHER_SUITE_NONE[];
+extern u8 RSN_CIPHER_SUITE_WEP40[];
+extern u8 RSN_CIPHER_SUITE_TKIP[];
+extern u8 RSN_CIPHER_SUITE_WRAP[];
+extern u8 RSN_CIPHER_SUITE_CCMP[];
+extern u8 RSN_CIPHER_SUITE_WEP104[];
+
+
+typedef enum _RATEID_IDX_ {
+ RATEID_IDX_BGN_40M_2SS = 0,
+ RATEID_IDX_BGN_40M_1SS = 1,
+ RATEID_IDX_BGN_20M_2SS_BN = 2,
+ RATEID_IDX_BGN_20M_1SS_BN = 3,
+ RATEID_IDX_GN_N2SS = 4,
+ RATEID_IDX_GN_N1SS = 5,
+ RATEID_IDX_BG = 6,
+ RATEID_IDX_G = 7,
+ RATEID_IDX_B = 8,
+ RATEID_IDX_VHT_2SS = 9,
+ RATEID_IDX_VHT_1SS = 10,
+} RATEID_IDX, *PRATEID_IDX;
+
+typedef enum _RATR_TABLE_MODE{
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_N = 8,
+}RATR_TABLE_MODE, *PRATR_TABLE_MODE;
+
+
+enum NETWORK_TYPE
+{
+ WIRELESS_INVALID = 0,
+ /* Sub-Element */
+ WIRELESS_11B = BIT(0), /* tx: cck only , rx: cck only, hw: cck */
+ WIRELESS_11G = BIT(1), /* tx: ofdm only, rx: ofdm & cck, hw: cck & ofdm */
+ WIRELESS_11A = BIT(2), /* tx: ofdm only, rx: ofdm only, hw: ofdm only */
+ WIRELESS_11_24N = BIT(3), /* tx: MCS only, rx: MCS & cck, hw: MCS & cck */
+ WIRELESS_11_5N = BIT(4), /* tx: MCS only, rx: MCS & ofdm, hw: ofdm only */
+ WIRELESS_AUTO = BIT(5),
+ WIRELESS_11AC = BIT(6),
+
+ /* Combination */
+ /* Type for current wireless mode */
+ WIRELESS_11BG = (WIRELESS_11B|WIRELESS_11G), /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
+ WIRELESS_11G_24N = (WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
+ WIRELESS_11A_5N = (WIRELESS_11A|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11B_24N = (WIRELESS_11B|WIRELESS_11_24N), /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
+ WIRELESS_11BG_24N = (WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N), /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
+ WIRELESS_11_24AC = (WIRELESS_11G|WIRELESS_11AC),
+ WIRELESS_11_5AC = (WIRELESS_11A|WIRELESS_11AC),
+
+
+ /* Type for registry default wireless mode */
+ WIRELESS_11AGN = (WIRELESS_11A|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N), /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11ABGN = (WIRELESS_11A|WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N),
+ WIRELESS_MODE_24G = (WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11AC),
+ WIRELESS_MODE_MAX = (WIRELESS_11A|WIRELESS_11B|WIRELESS_11G|WIRELESS_11_24N|WIRELESS_11_5N|WIRELESS_11AC),
+};
+
+#define SUPPORTED_24G_NETTYPE_MSK (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
+
+#define IsLegacyOnly(NetType) ((NetType) == ((NetType) & (WIRELESS_11BG|WIRELESS_11A)))
+
+#define IsSupported24G(NetType) ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
+
+#define IsEnableHWCCK(NetType) IsSupported24G(NetType)
+#define IsEnableHWOFDM(NetType) (((NetType) & (WIRELESS_11G|WIRELESS_11_24N)) ? true : false)
+
+#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
+#define IsSupportedRxOFDM(NetType) IsEnableHWOFDM(NetType)
+#define IsSupportedRxHT(NetType) IsEnableHWOFDM(NetType)
+
+#define IsSupportedTxCCK(NetType) (((NetType) & (WIRELESS_11B)) ? true : false)
+#define IsSupportedTxOFDM(NetType) (((NetType) & (WIRELESS_11G|WIRELESS_11A)) ? true : false)
+#define IsSupportedHT(NetType) (((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N)) ? true : false)
+
+#define IsSupportedVHT(NetType) (((NetType) & (WIRELESS_11AC)) ? true : false)
+
+
+typedef struct ieee_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 name;
+ u32 value;
+ } wpa_param;
+ struct {
+ u32 len;
+ u8 reserved[32];
+ u8 data[0];
+ } wpa_ie;
+ struct{
+ int command;
+ int reason_code;
+ } mlme;
+ struct {
+ u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
+ u8 set_tx;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+ struct {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u8 tx_supp_rates[16];
+ struct rtw_ieee80211_ht_cap ht_cap;
+ } add_sta;
+ struct {
+ u8 reserved[2];/* for set max_num_sta */
+ u8 buf[0];
+ } bcn_ie;
+ } u;
+}ieee_param;
+
+typedef struct ieee_param_ex {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ u8 data[0];
+}ieee_param_ex;
+
+struct sta_data{
+ u16 aid;
+ u16 capability;
+ int flags;
+ u32 sta_set;
+ u8 tx_supp_rates[16];
+ u32 tx_supp_rates_len;
+ struct rtw_ieee80211_ht_cap ht_cap;
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
+#define IEEE80211_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE80211_HLEN 30
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+
+/* this is stolen from ipw2200 driver */
+#define IEEE_IBSS_MAC_HASH_SIZE 31
+
+struct ieee_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct eapol {
+ u8 snap[6];
+ u16 ethertype;
+ u8 version;
+ u8 type;
+ u16 length;
+} __attribute__ ((packed));
+
+enum eap_type {
+ EAP_PACKET = 0,
+ EAPOL_START,
+ EAPOL_LOGOFF,
+ EAPOL_KEY,
+ EAPOL_ENCAP_ASF_ALERT
+};
+
+#define IEEE80211_3ADDR_LEN 24
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_FCS_LEN 4
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* Frame control field constants */
+#define RTW_IEEE80211_FCTL_VERS 0x0003
+#define RTW_IEEE80211_FCTL_FTYPE 0x000c
+#define RTW_IEEE80211_FCTL_STYPE 0x00f0
+#define RTW_IEEE80211_FCTL_TODS 0x0100
+#define RTW_IEEE80211_FCTL_FROMDS 0x0200
+#define RTW_IEEE80211_FCTL_MOREFRAGS 0x0400
+#define RTW_IEEE80211_FCTL_RETRY 0x0800
+#define RTW_IEEE80211_FCTL_PM 0x1000
+#define RTW_IEEE80211_FCTL_MOREDATA 0x2000
+#define RTW_IEEE80211_FCTL_PROTECTED 0x4000
+#define RTW_IEEE80211_FCTL_ORDER 0x8000
+#define RTW_IEEE80211_FCTL_CTL_EXT 0x0f00
+
+#define RTW_IEEE80211_FTYPE_MGMT 0x0000
+#define RTW_IEEE80211_FTYPE_CTL 0x0004
+#define RTW_IEEE80211_FTYPE_DATA 0x0008
+#define RTW_IEEE80211_FTYPE_EXT 0x000c
+
+/* management */
+#define RTW_IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define RTW_IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define RTW_IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define RTW_IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define RTW_IEEE80211_STYPE_PROBE_REQ 0x0040
+#define RTW_IEEE80211_STYPE_PROBE_RESP 0x0050
+#define RTW_IEEE80211_STYPE_BEACON 0x0080
+#define RTW_IEEE80211_STYPE_ATIM 0x0090
+#define RTW_IEEE80211_STYPE_DISASSOC 0x00A0
+#define RTW_IEEE80211_STYPE_AUTH 0x00B0
+#define RTW_IEEE80211_STYPE_DEAUTH 0x00C0
+#define RTW_IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define RTW_IEEE80211_STYPE_CTL_EXT 0x0060
+#define RTW_IEEE80211_STYPE_BACK_REQ 0x0080
+#define RTW_IEEE80211_STYPE_BACK 0x0090
+#define RTW_IEEE80211_STYPE_PSPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_RTS 0x00B0
+#define RTW_IEEE80211_STYPE_CTS 0x00C0
+#define RTW_IEEE80211_STYPE_ACK 0x00D0
+#define RTW_IEEE80211_STYPE_CFEND 0x00E0
+#define RTW_IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define RTW_IEEE80211_STYPE_DATA 0x0000
+#define RTW_IEEE80211_STYPE_DATA_CFACK 0x0010
+#define RTW_IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define RTW_IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define RTW_IEEE80211_STYPE_NULLFUNC 0x0040
+#define RTW_IEEE80211_STYPE_CFACK 0x0050
+#define RTW_IEEE80211_STYPE_CFPOLL 0x0060
+#define RTW_IEEE80211_STYPE_CFACKPOLL 0x0070
+#define RTW_IEEE80211_STYPE_QOS_DATA 0x0080
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACK 0x0090
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0
+#define RTW_IEEE80211_STYPE_QOS_NULLFUNC 0x00C0
+#define RTW_IEEE80211_STYPE_QOS_CFACK 0x00D0
+#define RTW_IEEE80211_STYPE_QOS_CFPOLL 0x00E0
+#define RTW_IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
+
+/* sequence control field */
+#define RTW_IEEE80211_SCTL_FRAG 0x000F
+#define RTW_IEEE80211_SCTL_SEQ 0xFFF0
+
+
+#define RTW_ERP_INFO_NON_ERP_PRESENT BIT(0)
+#define RTW_ERP_INFO_USE_PROTECTION BIT(1)
+#define RTW_ERP_INFO_BARKER_PREAMBLE_MODE BIT(2)
+
+/* QoS, QOS */
+#define NORMAL_ACK 0
+#define NO_ACK 1
+#define NON_EXPLICIT_ACK 2
+#define BLOCK_ACK 3
+
+#ifndef ETH_P_PAE
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#endif /* ETH_P_PAE */
+
+#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+
+#define ETH_P_ECONET 0x0018
+
+#ifndef ETH_P_80211_RAW
+#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
+#endif
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct ieee80211_snap_hdr {
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+} __attribute__ ((packed));
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define WLAN_FC_GET_TYPE(fc) ((fc) & RTW_IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & RTW_IEEE80211_FCTL_STYPE)
+
+#define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
+
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
+
+/* Authentication algorithms */
+#define WLAN_AUTH_OPEN 0
+#define WLAN_AUTH_SHARED_KEY 1
+
+#define WLAN_AUTH_CHALLENGE_LEN 128
+
+#define WLAN_CAPABILITY_BSS (1<<0)
+#define WLAN_CAPABILITY_IBSS (1<<1)
+#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
+#define WLAN_CAPABILITY_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
+#define WLAN_CAPABILITY_PBCC (1<<6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
+
+/* Status codes */
+#define WLAN_STATUS_SUCCESS 0
+#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
+#define WLAN_STATUS_CAPS_UNSUPPORTED 10
+#define WLAN_STATUS_REASSOC_NO_ASSOC 11
+#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
+#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
+#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
+#define WLAN_STATUS_CHALLENGE_FAIL 15
+#define WLAN_STATUS_AUTH_TIMEOUT 16
+#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
+#define WLAN_STATUS_ASSOC_DENIED_RATES 18
+/* 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+
+/* Reason codes */
+#define WLAN_REASON_UNSPECIFIED 1
+#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
+#define WLAN_REASON_DEAUTH_LEAVING 3
+#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
+#define WLAN_REASON_DISASSOC_AP_BUSY 5
+#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
+#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
+#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
+#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9
+#define WLAN_REASON_ACTIVE_ROAM 65533
+#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
+#define WLAN_REASON_EXPIRATION_CHK 65535
+
+/* Information Element IDs */
+#define WLAN_EID_SSID 0
+#define WLAN_EID_SUPP_RATES 1
+#define WLAN_EID_FH_PARAMS 2
+#define WLAN_EID_DS_PARAMS 3
+#define WLAN_EID_CF_PARAMS 4
+#define WLAN_EID_TIM 5
+#define WLAN_EID_IBSS_PARAMS 6
+#define WLAN_EID_CHALLENGE 16
+/* EIDs defined by IEEE 802.11h - START */
+#define WLAN_EID_PWR_CONSTRAINT 32
+#define WLAN_EID_PWR_CAPABILITY 33
+#define WLAN_EID_TPC_REQUEST 34
+#define WLAN_EID_TPC_REPORT 35
+#define WLAN_EID_SUPPORTED_CHANNELS 36
+#define WLAN_EID_CHANNEL_SWITCH 37
+#define WLAN_EID_MEASURE_REQUEST 38
+#define WLAN_EID_MEASURE_REPORT 39
+#define WLAN_EID_QUITE 40
+#define WLAN_EID_IBSS_DFS 41
+/* EIDs defined by IEEE 802.11h - END */
+#define WLAN_EID_ERP_INFO 42
+#define WLAN_EID_HT_CAP 45
+#define WLAN_EID_RSN 48
+#define WLAN_EID_EXT_SUPP_RATES 50
+#define WLAN_EID_MOBILITY_DOMAIN 54
+#define WLAN_EID_FAST_BSS_TRANSITION 55
+#define WLAN_EID_TIMEOUT_INTERVAL 56
+#define WLAN_EID_RIC_DATA 57
+#define WLAN_EID_HT_OPERATION 61
+#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
+#define WLAN_EID_20_40_BSS_COEXISTENCE 72
+#define WLAN_EID_20_40_BSS_INTOLERANT 73
+#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
+#define WLAN_EID_MMIE 76
+#define WLAN_EID_VENDOR_SPECIFIC 221
+#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
+#define WLAN_EID_VHT_CAPABILITY 191
+#define WLAN_EID_VHT_OPERATION 192
+#define WLAN_EID_VHT_OP_MODE_NOTIFY 199
+
+#define IEEE80211_MGMT_HDR_LEN 24
+#define IEEE80211_DATA_HDR3_LEN 24
+#define IEEE80211_DATA_HDR4_LEN 30
+
+
+#define IEEE80211_STATMASK_SIGNAL (1<<0)
+#define IEEE80211_STATMASK_RSSI (1<<1)
+#define IEEE80211_STATMASK_NOISE (1<<2)
+#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_WEMASK 0x7
+
+
+#define IEEE80211_CCK_MODULATION (1<<0)
+#define IEEE80211_OFDM_MODULATION (1<<1)
+
+#define IEEE80211_24GHZ_BAND (1<<0)
+#define IEEE80211_52GHZ_BAND (1<<1)
+
+#define IEEE80211_CCK_RATE_LEN 4
+#define IEEE80211_NUM_OFDM_RATESLEN 8
+
+
+#define IEEE80211_CCK_RATE_1MB 0x02
+#define IEEE80211_CCK_RATE_2MB 0x04
+#define IEEE80211_CCK_RATE_5MB 0x0B
+#define IEEE80211_CCK_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_LEN 8
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+
+#define IEEE80211_CCK_RATES_MASK 0x0000000F
+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
+ IEEE80211_CCK_RATE_2MB_MASK)
+#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
+ IEEE80211_CCK_RATE_5MB_MASK | \
+ IEEE80211_CCK_RATE_11MB_MASK)
+
+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
+ IEEE80211_OFDM_RATE_12MB_MASK | \
+ IEEE80211_OFDM_RATE_24MB_MASK)
+#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
+ IEEE80211_OFDM_RATE_9MB_MASK | \
+ IEEE80211_OFDM_RATE_18MB_MASK | \
+ IEEE80211_OFDM_RATE_36MB_MASK | \
+ IEEE80211_OFDM_RATE_48MB_MASK | \
+ IEEE80211_OFDM_RATE_54MB_MASK)
+#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
+ IEEE80211_CCK_DEFAULT_RATES_MASK)
+
+#define IEEE80211_NUM_OFDM_RATES 8
+#define IEEE80211_NUM_CCK_RATES 4
+#define IEEE80211_OFDM_SHIFT_MASK_A 4
+
+
+enum MGN_RATE{
+ MGN_1M = 0x02,
+ MGN_2M = 0x04,
+ MGN_5_5M = 0x0B,
+ MGN_6M = 0x0C,
+ MGN_9M = 0x12,
+ MGN_11M = 0x16,
+ MGN_12M = 0x18,
+ MGN_18M = 0x24,
+ MGN_24M = 0x30,
+ MGN_36M = 0x48,
+ MGN_48M = 0x60,
+ MGN_54M = 0x6C,
+ MGN_MCS32 = 0x7F,
+ MGN_MCS0,
+ MGN_MCS1,
+ MGN_MCS2,
+ MGN_MCS3,
+ MGN_MCS4,
+ MGN_MCS5,
+ MGN_MCS6,
+ MGN_MCS7,
+ MGN_MCS8,
+ MGN_MCS9,
+ MGN_MCS10,
+ MGN_MCS11,
+ MGN_MCS12,
+ MGN_MCS13,
+ MGN_MCS14,
+ MGN_MCS15,
+ MGN_MCS16,
+ MGN_MCS17,
+ MGN_MCS18,
+ MGN_MCS19,
+ MGN_MCS20,
+ MGN_MCS21,
+ MGN_MCS22,
+ MGN_MCS23,
+ MGN_MCS24,
+ MGN_MCS25,
+ MGN_MCS26,
+ MGN_MCS27,
+ MGN_MCS28,
+ MGN_MCS29,
+ MGN_MCS30,
+ MGN_MCS31,
+ MGN_VHT1SS_MCS0,
+ MGN_VHT1SS_MCS1,
+ MGN_VHT1SS_MCS2,
+ MGN_VHT1SS_MCS3,
+ MGN_VHT1SS_MCS4,
+ MGN_VHT1SS_MCS5,
+ MGN_VHT1SS_MCS6,
+ MGN_VHT1SS_MCS7,
+ MGN_VHT1SS_MCS8,
+ MGN_VHT1SS_MCS9,
+ MGN_VHT2SS_MCS0,
+ MGN_VHT2SS_MCS1,
+ MGN_VHT2SS_MCS2,
+ MGN_VHT2SS_MCS3,
+ MGN_VHT2SS_MCS4,
+ MGN_VHT2SS_MCS5,
+ MGN_VHT2SS_MCS6,
+ MGN_VHT2SS_MCS7,
+ MGN_VHT2SS_MCS8,
+ MGN_VHT2SS_MCS9,
+ MGN_VHT3SS_MCS0,
+ MGN_VHT3SS_MCS1,
+ MGN_VHT3SS_MCS2,
+ MGN_VHT3SS_MCS3,
+ MGN_VHT3SS_MCS4,
+ MGN_VHT3SS_MCS5,
+ MGN_VHT3SS_MCS6,
+ MGN_VHT3SS_MCS7,
+ MGN_VHT3SS_MCS8,
+ MGN_VHT3SS_MCS9,
+ MGN_VHT4SS_MCS0,
+ MGN_VHT4SS_MCS1,
+ MGN_VHT4SS_MCS2,
+ MGN_VHT4SS_MCS3,
+ MGN_VHT4SS_MCS4,
+ MGN_VHT4SS_MCS5,
+ MGN_VHT4SS_MCS6,
+ MGN_VHT4SS_MCS7,
+ MGN_VHT4SS_MCS8,
+ MGN_VHT4SS_MCS9,
+ MGN_UNKNOWN
+};
+
+#define IS_HT_RATE(_rate) (_rate >= MGN_MCS0 && _rate <= MGN_MCS31)
+#define IS_VHT_RATE(_rate) (_rate >= MGN_VHT1SS_MCS0 && _rate <= MGN_VHT4SS_MCS9)
+#define IS_CCK_RATE(_rate) (MGN_1M == _rate || _rate == MGN_2M || _rate == MGN_5_5M || _rate == MGN_11M)
+#define IS_OFDM_RATE(_rate) (MGN_6M <= _rate && _rate <= MGN_54M && _rate != MGN_11M)
+
+
+/* NOTE: This data is for statistical purposes; not all hardware provides this
+ * information for frames received. Not setting these will not cause
+ * any adverse affects. */
+struct ieee80211_rx_stats {
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u8 received_channel;
+ u16 rate; /* in 100 kbps */
+ u8 mask;
+ u8 freq;
+ u16 len;
+};
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define IEEE80211_FRAG_CACHE_LEN 4
+
+struct ieee80211_frag_entry {
+ u32 first_frag_time;
+ uint seq;
+ uint last_frag;
+ uint qos; /* jackson */
+ uint tid; /* jackson */
+ struct sk_buff *skb;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct ieee80211_stats {
+ uint tx_unicast_frames;
+ uint tx_multicast_frames;
+ uint tx_fragments;
+ uint tx_unicast_octets;
+ uint tx_multicast_octets;
+ uint tx_deferred_transmissions;
+ uint tx_single_retry_frames;
+ uint tx_multiple_retry_frames;
+ uint tx_retry_limit_exceeded;
+ uint tx_discards;
+ uint rx_unicast_frames;
+ uint rx_multicast_frames;
+ uint rx_fragments;
+ uint rx_unicast_octets;
+ uint rx_multicast_octets;
+ uint rx_fcs_errors;
+ uint rx_discards_no_buffer;
+ uint tx_discards_wrong_sa;
+ uint rx_discards_undecryptable;
+ uint rx_message_in_msg_fragments;
+ uint rx_message_in_bad_msg_fragments;
+};
+
+struct ieee80211_softmac_stats {
+ uint rx_ass_ok;
+ uint rx_ass_err;
+ uint rx_probe_rq;
+ uint tx_probe_rs;
+ uint tx_beacons;
+ uint rx_auth_rq;
+ uint rx_auth_rs_ok;
+ uint rx_auth_rs_err;
+ uint tx_auth_rq;
+ uint no_auth_rs;
+ uint no_ass_rs;
+ uint tx_ass_rq;
+ uint rx_ass_rq;
+ uint tx_probe_rq;
+ uint reassoc;
+ uint swtxstop;
+ uint swtxawake;
+};
+
+#define SEC_KEY_1 (1<<0)
+#define SEC_KEY_2 (1<<1)
+#define SEC_KEY_3 (1<<2)
+#define SEC_KEY_4 (1<<3)
+#define SEC_ACTIVE_KEY (1<<4)
+#define SEC_AUTH_MODE (1<<5)
+#define SEC_UNICAST_GROUP (1<<6)
+#define SEC_LEVEL (1<<7)
+#define SEC_ENABLED (1<<8)
+
+#define SEC_LEVEL_0 0 /* None */
+#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
+#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+
+#define BIP_MAX_KEYID 5
+#define BIP_AAD_SIZE 20
+
+struct ieee80211_security {
+ u16 active_key:2,
+ enabled:1,
+ auth_mode:2,
+ auth_algo:4,
+ unicast_uses_group:1;
+ u8 key_sizes[WEP_KEYS];
+ u8 keys[WEP_KEYS][WEP_KEY_LEN];
+ u8 level;
+ u16 flags;
+} __attribute__ ((packed));
+
+/*
+
+ 802.11 data frame from AP
+
+ ,-------------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `-------------------------------------------------------------------'
+
+Total: 28-2340 bytes
+
+*/
+
+struct ieee80211_header_data {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ u16 seq_ctrl;
+};
+
+#define BEACON_PROBE_SSID_ID_POSITION 12
+
+/* Management Frame Information Element Types */
+#define MFIE_TYPE_SSID 0
+#define MFIE_TYPE_RATES 1
+#define MFIE_TYPE_FH_SET 2
+#define MFIE_TYPE_DS_SET 3
+#define MFIE_TYPE_CF_SET 4
+#define MFIE_TYPE_TIM 5
+#define MFIE_TYPE_IBSS_SET 6
+#define MFIE_TYPE_CHALLENGE 16
+#define MFIE_TYPE_ERP 42
+#define MFIE_TYPE_RSN 48
+#define MFIE_TYPE_RATES_EX 50
+#define MFIE_TYPE_GENERIC 221
+
+struct ieee80211_info_element_hdr {
+ u8 id;
+ u8 len;
+} __attribute__ ((packed));
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __attribute__ ((packed));
+
+/*
+ * These are the data types that can make up management packets
+ *
+ u16 auth_algorithm;
+ u16 auth_sequence;
+ u16 beacon_interval;
+ u16 capability;
+ u8 current_ap[ETH_ALEN];
+ u16 listen_interval;
+ struct {
+ u16 association_id:14, reserved:2;
+ } __attribute__ ((packed));
+ u32 time_stamp[2];
+ u16 reason;
+ u16 status;
+*/
+
+#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
+#define IEEE80211_DEFAULT_BASIC_RATE 10
+
+
+struct ieee80211_authentication {
+ struct ieee80211_header_data header;
+ u16 algorithm;
+ u16 transaction;
+ u16 status;
+ /* struct ieee80211_info_element_hdr info_element; */
+} __attribute__ ((packed));
+
+
+struct ieee80211_probe_response {
+ struct ieee80211_header_data header;
+ u32 time_stamp[2];
+ u16 beacon_interval;
+ u16 capability;
+ struct ieee80211_info_element info_element;
+} __attribute__ ((packed));
+
+struct ieee80211_probe_request {
+ struct ieee80211_header_data header;
+ /*struct ieee80211_info_element info_element;*/
+} __attribute__ ((packed));
+
+struct ieee80211_assoc_request_frame {
+ struct ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 listen_interval;
+ /* u8 current_ap[ETH_ALEN]; */
+ struct ieee80211_info_element_hdr info_element;
+} __attribute__ ((packed));
+
+struct ieee80211_assoc_response_frame {
+ struct ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 status;
+ u16 aid;
+} __attribute__ ((packed));
+
+struct ieee80211_txb {
+ u8 nr_frags;
+ u8 encrypted;
+ u16 reserved;
+ u16 frag_size;
+ u16 payload_size;
+ struct sk_buff *fragments[0];
+};
+
+
+/* SWEEP TABLE ENTRIES NUMBER*/
+#define MAX_SWEEP_TAB_ENTRIES 42
+#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
+/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates. Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH ((u8)12)
+#define MAX_RATES_EX_LENGTH ((u8)16)
+#define MAX_NETWORK_COUNT 128
+#define MAX_CHANNEL_NUMBER 161
+#define IEEE80211_SOFTMAC_SCAN_TIME 400
+/* HZ / 2) */
+#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
+
+#define CRC_LENGTH 4U
+
+#define MAX_WPA_IE_LEN (256)
+#define MAX_WPS_IE_LEN (512)
+#define MAX_P2P_IE_LEN (256)
+#define MAX_WFD_IE_LEN (128)
+
+#define NETWORK_EMPTY_ESSID (1<<0)
+#define NETWORK_HAS_OFDM (1<<1)
+#define NETWORK_HAS_CCK (1<<2)
+
+#define IEEE80211_DTIM_MBCAST 4
+#define IEEE80211_DTIM_UCAST 2
+#define IEEE80211_DTIM_VALID 1
+#define IEEE80211_DTIM_INVALID 0
+
+#define IEEE80211_PS_DISABLED 0
+#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
+#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
+#define IW_ESSID_MAX_SIZE 32
+/*
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+*/
+
+enum ieee80211_state {
+
+ /* the card is not linked at all */
+ IEEE80211_NOLINK = 0,
+
+ /* IEEE80211_ASSOCIATING* are for BSS client mode
+ * the driver shall not perform RX filtering unless
+ * the state is LINKED.
+ * The driver shall just check for the state LINKED and
+ * defaults to NOLINK for ALL the other states (including
+ * LINKED_SCANNING)
+ */
+
+ /* the association procedure will start (wq scheduling)*/
+ IEEE80211_ASSOCIATING,
+ IEEE80211_ASSOCIATING_RETRY,
+
+ /* the association procedure is sending AUTH request*/
+ IEEE80211_ASSOCIATING_AUTHENTICATING,
+
+ /* the association procedure has successfully authentcated
+ * and is sending association request
+ */
+ IEEE80211_ASSOCIATING_AUTHENTICATED,
+
+ /* the link is ok. the card associated to a BSS or linked
+ * to a ibss cell or acting as an AP and creating the bss
+ */
+ IEEE80211_LINKED,
+
+ /* same as LINKED, but the driver shall apply RX filter
+ * rules as we are in NO_LINK mode. As the card is still
+ * logically linked, but it is doing a syncro site survey
+ * then it will be back to LINKED state.
+ */
+ IEEE80211_LINKED_SCANNING,
+
+};
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
+#define IP_FMT "%d.%d.%d.%d"
+#define IP_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3]
+
+extern __inline int is_multicast_mac_addr(const u8 *addr)
+{
+ return ((addr[0] != 0xff) && (0x01 & addr[0]));
+}
+
+extern __inline int is_broadcast_mac_addr(const u8 *addr)
+{
+ return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
+ (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
+}
+
+extern __inline int is_zero_mac_addr(const u8 *addr)
+{
+ return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
+ (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
+}
+
+#define CFG_IEEE80211_RESERVE_FCS (1<<0)
+#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+
+typedef struct tx_pending_t{
+ int frag;
+ struct ieee80211_txb *txb;
+}tx_pending_t;
+
+
+
+#define MAXTID 16
+
+#define IEEE_A (1<<0)
+#define IEEE_B (1<<1)
+#define IEEE_G (1<<2)
+#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+
+/* Action category code */
+enum rtw_ieee80211_category {
+ RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ RTW_WLAN_CATEGORY_QOS = 1,
+ RTW_WLAN_CATEGORY_DLS = 2,
+ RTW_WLAN_CATEGORY_BACK = 3,
+ RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
+ RTW_WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ RTW_WLAN_CATEGORY_FT = 6,
+ RTW_WLAN_CATEGORY_HT = 7,
+ RTW_WLAN_CATEGORY_SA_QUERY = 8,
+ RTW_WLAN_CATEGORY_UNPROTECTED_WNM = 11, /* add for CONFIG_IEEE80211W, none 11w also can use */
+ RTW_WLAN_CATEGORY_TDLS = 12,
+ RTW_WLAN_CATEGORY_SELF_PROTECTED = 15, /* add for CONFIG_IEEE80211W, none 11w also can use */
+ RTW_WLAN_CATEGORY_WMM = 17,
+ RTW_WLAN_CATEGORY_VHT = 21,
+ RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
+};
+
+/* SPECTRUM_MGMT action code */
+enum rtw_ieee80211_spectrum_mgmt_actioncode {
+ RTW_WLAN_ACTION_SPCT_MSR_REQ = 0,
+ RTW_WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ RTW_WLAN_ACTION_SPCT_TPC_REQ = 2,
+ RTW_WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ RTW_WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+ RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
+};
+
+enum _PUBLIC_ACTION{
+ ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
+ ACT_PUBLIC_DSE_ENABLE = 1,
+ ACT_PUBLIC_DSE_DEENABLE = 2,
+ ACT_PUBLIC_DSE_REG_LOCATION = 3,
+ ACT_PUBLIC_EXT_CHL_SWITCH = 4,
+ ACT_PUBLIC_DSE_MSR_REQ = 5,
+ ACT_PUBLIC_DSE_MSR_RPRT = 6,
+ ACT_PUBLIC_MP = 7, /* Measurement Pilot */
+ ACT_PUBLIC_DSE_PWR_CONSTRAINT = 8,
+ ACT_PUBLIC_VENDOR = 9, /* for WIFI_DIRECT */
+ ACT_PUBLIC_GAS_INITIAL_REQ = 10,
+ ACT_PUBLIC_GAS_INITIAL_RSP = 11,
+ ACT_PUBLIC_GAS_COMEBACK_REQ = 12,
+ ACT_PUBLIC_GAS_COMEBACK_RSP = 13,
+ ACT_PUBLIC_TDLS_DISCOVERY_RSP = 14,
+ ACT_PUBLIC_LOCATION_TRACK = 15,
+ ACT_PUBLIC_MAX
+};
+
+/* BACK action code */
+enum rtw_ieee80211_back_actioncode {
+ RTW_WLAN_ACTION_ADDBA_REQ = 0,
+ RTW_WLAN_ACTION_ADDBA_RESP = 1,
+ RTW_WLAN_ACTION_DELBA = 2,
+};
+
+/* HT features action code */
+enum rtw_ieee80211_ht_actioncode {
+ RTW_WLAN_ACTION_HT_NOTI_CHNL_WIDTH = 0,
+ RTW_WLAN_ACTION_HT_SM_PS = 1,
+ RTW_WLAN_ACTION_HT_PSMP = 2,
+ RTW_WLAN_ACTION_HT_SET_PCO_PHASE = 3,
+ RTW_WLAN_ACTION_HT_CSI = 4,
+ RTW_WLAN_ACTION_HT_NON_COMPRESS_BEAMFORMING = 5,
+ RTW_WLAN_ACTION_HT_COMPRESS_BEAMFORMING = 6,
+ RTW_WLAN_ACTION_HT_ASEL_FEEDBACK = 7,
+};
+
+/* BACK (block-ack) parties */
+enum rtw_ieee80211_back_parties {
+ RTW_WLAN_BACK_RECIPIENT = 0,
+ RTW_WLAN_BACK_INITIATOR = 1,
+ RTW_WLAN_BACK_TIMER = 2,
+};
+
+/* VHT features action code */
+enum rtw_ieee80211_vht_actioncode{
+ RTW_WLAN_ACTION_VHT_COMPRESSED_BEAMFORMING = 0,
+ RTW_WLAN_ACTION_VHT_GROUPID_MANAGEMENT = 1,
+ RTW_WLAN_ACTION_VHT_OPMODE_NOTIFICATION = 2,
+};
+
+
+#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
+ * 00:50:F2 */
+#define WME_OUI_TYPE 2
+#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WME_VERSION 1
+
+#define WME_ACTION_CODE_SETUP_REQUEST 0
+#define WME_ACTION_CODE_SETUP_RESPONSE 1
+#define WME_ACTION_CODE_TEARDOWN 2
+
+#define WME_SETUP_RESPONSE_STATUS_ADMISSION_ACCEPTED 0
+#define WME_SETUP_RESPONSE_STATUS_INVALID_PARAMETERS 1
+#define WME_SETUP_RESPONSE_STATUS_REFUSED 3
+
+#define WME_TSPEC_DIRECTION_UPLINK 0
+#define WME_TSPEC_DIRECTION_DOWNLINK 1
+#define WME_TSPEC_DIRECTION_BI_DIRECTIONAL 3
+
+
+#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
+
+#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
+
+/**
+ * enum rtw_ieee80211_channel_flags - channel flags
+ *
+ * Channel flags set by the regulatory control code.
+ *
+ * @RTW_IEEE80211_CHAN_DISABLED: This channel is disabled.
+ * @RTW_IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted
+ * on this channel.
+ * @RTW_IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
+ * @RTW_IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
+ * @RTW_IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
+ * is not permitted.
+ * @RTW_IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
+ * is not permitted.
+ */
+ enum rtw_ieee80211_channel_flags {
+ RTW_IEEE80211_CHAN_DISABLED = 1<<0,
+ RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
+ RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
+ RTW_IEEE80211_CHAN_RADAR = 1<<3,
+ RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
+ RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ };
+
+ #define RTW_IEEE80211_CHAN_NO_HT40 \
+ (RTW_IEEE80211_CHAN_NO_HT40PLUS | RTW_IEEE80211_CHAN_NO_HT40MINUS)
+
+/* Represent channel details, subset of ieee80211_channel */
+struct rtw_ieee80211_channel {
+ /* enum nl80211_band band; */
+ /* u16 center_freq; */
+ u16 hw_value;
+ u32 flags;
+ /* int max_antenna_gain; */
+ /* int max_power; */
+ /* int max_reg_power; */
+ /* bool beacon_found; */
+ /* u32 orig_flags; */
+ /* int orig_mag; */
+ /* int orig_mpwr; */
+};
+
+#define CHAN_FMT \
+ /*"band:%d, "*/ \
+ /*"center_freq:%u, "*/ \
+ "hw_value:%u, " \
+ "flags:0x%08x" \
+ /*"max_antenna_gain:%d\n"*/ \
+ /*"max_power:%d\n"*/ \
+ /*"max_reg_power:%d\n"*/ \
+ /*"beacon_found:%u\n"*/ \
+ /*"orig_flags:0x%08x\n"*/ \
+ /*"orig_mag:%d\n"*/ \
+ /*"orig_mpwr:%d\n"*/
+
+#define CHAN_ARG(channel) \
+ /*(channel)->band*/ \
+ /*, (channel)->center_freq*/ \
+ (channel)->hw_value \
+ , (channel)->flags \
+ /*, (channel)->max_antenna_gain*/ \
+ /*, (channel)->max_power*/ \
+ /*, (channel)->max_reg_power*/ \
+ /*, (channel)->beacon_found*/ \
+ /*, (channel)->orig_flags*/ \
+ /*, (channel)->orig_mag*/ \
+ /*, (channel)->orig_mpwr*/ \
+
+/* Parsed Information Elements */
+struct rtw_ieee802_11_elems {
+ u8 *ssid;
+ u8 ssid_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *fh_params;
+ u8 fh_params_len;
+ u8 *ds_params;
+ u8 ds_params_len;
+ u8 *cf_params;
+ u8 cf_params_len;
+ u8 *tim;
+ u8 tim_len;
+ u8 *ibss_params;
+ u8 ibss_params_len;
+ u8 *challenge;
+ u8 challenge_len;
+ u8 *erp_info;
+ u8 erp_info_len;
+ u8 *ext_supp_rates;
+ u8 ext_supp_rates_len;
+ u8 *wpa_ie;
+ u8 wpa_ie_len;
+ u8 *rsn_ie;
+ u8 rsn_ie_len;
+ u8 *wme;
+ u8 wme_len;
+ u8 *wme_tspec;
+ u8 wme_tspec_len;
+ u8 *wps_ie;
+ u8 wps_ie_len;
+ u8 *power_cap;
+ u8 power_cap_len;
+ u8 *supp_channels;
+ u8 supp_channels_len;
+ u8 *mdie;
+ u8 mdie_len;
+ u8 *ftie;
+ u8 ftie_len;
+ u8 *timeout_int;
+ u8 timeout_int_len;
+ u8 *ht_capabilities;
+ u8 ht_capabilities_len;
+ u8 *ht_operation;
+ u8 ht_operation_len;
+ u8 *vendor_ht_cap;
+ u8 vendor_ht_cap_len;
+ u8 *vht_capabilities;
+ u8 vht_capabilities_len;
+ u8 *vht_operation;
+ u8 vht_operation_len;
+ u8 *vht_op_mode_notify;
+ u8 vht_op_mode_notify_len;
+};
+
+typedef enum { ParseOK = 0, ParseUnknown = 1, ParseFailed = -1 } ParseRes;
+
+ParseRes rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors);
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source, unsigned int *frlen);
+u8 *rtw_set_ie(u8 *pbuf, sint index, uint len, u8 *source, uint *frlen);
+
+enum secondary_ch_offset {
+ SCN = 0, /* no secondary channel */
+ SCA = 1, /* secondary channel above */
+ SCB = 3, /* secondary channel below */
+};
+
+u8 *rtw_get_ie(u8*pbuf, sint index, sint *len, sint limit);
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen);
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len);
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode) ;
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
+int rtw_get_wpa_cipher_suite(u8 *s);
+int rtw_get_wpa2_cipher_suite(u8 *s);
+int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len);
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
+int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x);
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len);
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_content, uint *len_content);
+
+/**
+ * for_each_ie - iterate over continuous IEs
+ * @ie:
+ * @buf:
+ * @buf_len:
+ */
+#define for_each_ie(ie, buf, buf_len) \
+ for (ie = (void*)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; ie = (void*)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
+
+uint rtw_get_rateset_len(u8 *rateset);
+
+struct registry_priv;
+int rtw_generate_ie(struct registry_priv *pregistrypriv);
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val);
+
+uint rtw_is_cckrates_included(u8 *rate);
+
+uint rtw_is_cckratesonly_included(u8 *rate);
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork);
+
+void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr);
+
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI, unsigned char * MCS_rate);
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action);
+const char *action_public_str(u8 action);
+
+#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h b/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h
new file mode 100644
index 000000000000..2d42e0c2deff
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/ioctl_cfg80211.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __IOCTL_CFG80211_H__
+#define __IOCTL_CFG80211_H__
+
+#include <linux/version.h>
+
+struct rtw_wdev_invit_info {
+ u8 state; /* 0: req, 1:rep */
+ u8 peer_mac[ETH_ALEN];
+ u8 active;
+ u8 token;
+ u8 flags;
+ u8 status;
+ u8 req_op_ch;
+ u8 rsp_op_ch;
+};
+
+#define rtw_wdev_invit_info_init(invit_info) \
+ do { \
+ (invit_info)->state = 0xff; \
+ memset((invit_info)->peer_mac, 0, ETH_ALEN); \
+ (invit_info)->active = 0xff; \
+ (invit_info)->token = 0; \
+ (invit_info)->flags = 0x00; \
+ (invit_info)->status = 0xff; \
+ (invit_info)->req_op_ch = 0; \
+ (invit_info)->rsp_op_ch = 0; \
+ } while (0)
+
+struct rtw_wdev_nego_info {
+ u8 state; /* 0: req, 1:rep, 2:conf */
+ u8 peer_mac[ETH_ALEN];
+ u8 active;
+ u8 token;
+ u8 status;
+ u8 req_intent;
+ u8 req_op_ch;
+ u8 req_listen_ch;
+ u8 rsp_intent;
+ u8 rsp_op_ch;
+ u8 conf_op_ch;
+};
+
+#define rtw_wdev_nego_info_init(nego_info) \
+ do { \
+ (nego_info)->state = 0xff; \
+ memset((nego_info)->peer_mac, 0, ETH_ALEN); \
+ (nego_info)->active = 0xff; \
+ (nego_info)->token = 0; \
+ (nego_info)->status = 0xff; \
+ (nego_info)->req_intent = 0xff; \
+ (nego_info)->req_op_ch = 0; \
+ (nego_info)->req_listen_ch = 0; \
+ (nego_info)->rsp_intent = 0xff; \
+ (nego_info)->rsp_op_ch = 0; \
+ (nego_info)->conf_op_ch = 0; \
+ } while (0)
+
+struct rtw_wdev_priv
+{
+ struct wireless_dev *rtw_wdev;
+
+ struct adapter *padapter;
+
+ struct cfg80211_scan_request *scan_request;
+ _lock scan_req_lock;
+
+ struct net_device *pmon_ndev;/* for monitor interface */
+ char ifname_mon[IFNAMSIZ + 1]; /* interface name for monitor interface */
+
+ u8 p2p_enabled;
+
+ u8 provdisc_req_issued;
+
+ struct rtw_wdev_invit_info invit_info;
+ struct rtw_wdev_nego_info nego_info;
+
+ u8 bandroid_scan;
+ bool block;
+ bool power_mgmt;
+};
+
+#define wiphy_to_adapter(x) (*((struct adapter **)wiphy_priv(x)))
+
+#define wdev_to_ndev(w) ((w)->netdev)
+
+int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
+void rtw_wdev_free(struct wireless_dev *wdev);
+void rtw_wdev_unregister(struct wireless_dev *wdev);
+
+void rtw_cfg80211_init_wiphy(struct adapter *padapter);
+
+void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnetwork);
+void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
+struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wlan_network *pnetwork);
+int rtw_cfg80211_check_bss(struct adapter *padapter);
+void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter);
+void rtw_cfg80211_indicate_connect(struct adapter *padapter);
+void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
+void rtw_cfg80211_indicate_scan_done(struct adapter *adapter, bool aborted);
+
+void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter, unsigned char *da, unsigned short reason);
+
+void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame, uint frame_len, const char*msg);
+
+bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
+
+#define rtw_cfg80211_rx_mgmt(adapter, freq, sig_dbm, buf, len, gfp) cfg80211_rx_mgmt((adapter)->rtw_wdev, freq, sig_dbm, buf, len, 0)
+#define rtw_cfg80211_send_rx_assoc(adapter, bss, buf, len) cfg80211_send_rx_assoc((adapter)->pnetdev, bss, buf, len)
+#define rtw_cfg80211_mgmt_tx_status(adapter, cookie, buf, len, ack, gfp) cfg80211_mgmt_tx_status((adapter)->rtw_wdev, cookie, buf, len, ack, gfp)
+#define rtw_cfg80211_ready_on_channel(adapter, cookie, chan, channel_type, duration, gfp) cfg80211_ready_on_channel((adapter)->rtw_wdev, cookie, chan, duration, gfp)
+#define rtw_cfg80211_remain_on_channel_expired(adapter, cookie, chan, chan_type, gfp) cfg80211_remain_on_channel_expired((adapter)->rtw_wdev, cookie, chan, gfp)
+
+#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8723bs/include/mlme_osdep.h b/drivers/staging/rtl8723bs/include/mlme_osdep.h
new file mode 100644
index 000000000000..69fd5543a3b1
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/mlme_osdep.h
@@ -0,0 +1,27 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __MLME_OSDEP_H_
+#define __MLME_OSDEP_H_
+
+
+extern void rtw_init_mlme_timer(struct adapter *padapter);
+extern void rtw_os_indicate_disconnect(struct adapter *adapter);
+extern void rtw_os_indicate_connect(struct adapter *adapter);
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
+extern void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
+
+void rtw_reset_securitypriv(struct adapter *adapter);
+
+#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
new file mode 100644
index 000000000000..cd738da3bec9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -0,0 +1,88 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef __OSDEP_INTF_H_
+#define __OSDEP_INTF_H_
+
+
+struct intf_priv {
+
+ u8 *intf_dev;
+ u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
+ u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
+ u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
+
+ volatile u8 *io_rwmem;
+ volatile u8 *allocated_io_rwmem;
+ u32 io_wsz; /* unit: 4bytes */
+ u32 io_rsz;/* unit: 4bytes */
+ u8 intf_status;
+
+ void (*_bus_io)(u8 *priv);
+
+/*
+Under Sync. IRP (SDIO/USB)
+A protection mechanism is necessary for the io_rwmem(read/write protocol)
+
+Under Async. IRP (SDIO/USB)
+The protection mechanism is through the pending queue.
+*/
+
+ _mutex ioctl_mutex;
+};
+
+
+#ifdef CONFIG_R871X_TEST
+int rtw_start_pseudo_adhoc(struct adapter *padapter);
+int rtw_stop_pseudo_adhoc(struct adapter *padapter);
+#endif
+
+struct dvobj_priv *devobj_init(void);
+void devobj_deinit(struct dvobj_priv *pdvobj);
+
+u8 rtw_init_drv_sw(struct adapter *padapter);
+u8 rtw_free_drv_sw(struct adapter *padapter);
+u8 rtw_reset_drv_sw(struct adapter *padapter);
+void rtw_dev_unload(struct adapter *padapter);
+
+u32 rtw_start_drv_threads(struct adapter *padapter);
+void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_cancel_all_timer(struct adapter *padapter);
+
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
+struct net_device *rtw_init_netdev(struct adapter *padapter);
+void rtw_unregister_netdevs(struct dvobj_priv *dvobj);
+
+u16 rtw_recv_select_queue(struct sk_buff *skb);
+
+int rtw_ndev_notifier_register(void);
+void rtw_ndev_notifier_unregister(void);
+
+#include "../os_dep/rtw_proc.h"
+
+void rtw_ips_dev_unload(struct adapter *padapter);
+
+int rtw_ips_pwr_up(struct adapter *padapter);
+void rtw_ips_pwr_down(struct adapter *padapter);
+
+int rtw_drv_register_netdev(struct adapter *padapter);
+void rtw_ndev_destructor(_nic_hdl ndev);
+
+int rtw_suspend_common(struct adapter *padapter);
+int rtw_resume_common(struct adapter *padapter);
+
+#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
new file mode 100644
index 000000000000..fdeabc1daeca
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -0,0 +1,281 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __OSDEP_SERVICE_H_
+#define __OSDEP_SERVICE_H_
+
+
+#define _FAIL 0
+#define _SUCCESS 1
+#define RTW_RX_HANDLED 2
+
+#include <osdep_service_linux.h>
+
+#ifndef BIT
+ #define BIT(x) (1 << (x))
+#endif
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+#define BIT32 0x0100000000
+#define BIT33 0x0200000000
+#define BIT34 0x0400000000
+#define BIT35 0x0800000000
+#define BIT36 0x1000000000
+
+extern int RTW_STATUS_CODE(int error_code);
+
+/* flags used for rtw_mstat_update() */
+enum mstat_f {
+ /* type: 0x00ff */
+ MSTAT_TYPE_VIR = 0x00,
+ MSTAT_TYPE_PHY = 0x01,
+ MSTAT_TYPE_SKB = 0x02,
+ MSTAT_TYPE_USB = 0x03,
+ MSTAT_TYPE_MAX = 0x04,
+
+ /* func: 0xff00 */
+ MSTAT_FUNC_UNSPECIFIED = 0x00<<8,
+ MSTAT_FUNC_IO = 0x01<<8,
+ MSTAT_FUNC_TX_IO = 0x02<<8,
+ MSTAT_FUNC_RX_IO = 0x03<<8,
+ MSTAT_FUNC_TX = 0x04<<8,
+ MSTAT_FUNC_RX = 0x05<<8,
+ MSTAT_FUNC_MAX = 0x06<<8,
+};
+
+#define mstat_tf_idx(flags) ((flags)&0xff)
+#define mstat_ff_idx(flags) (((flags)&0xff00) >> 8)
+
+typedef enum mstat_status{
+ MSTAT_ALLOC_SUCCESS = 0,
+ MSTAT_ALLOC_FAIL,
+ MSTAT_FREE
+} MSTAT_STATUS;
+
+#define rtw_mstat_update(flag, status, sz) do {} while (0)
+#define rtw_mstat_dump(sel) do {} while (0)
+u8*_rtw_zmalloc(u32 sz);
+u8*_rtw_malloc(u32 sz);
+void _kfree(u8 *pbuf, u32 sz);
+
+struct sk_buff *_rtw_skb_alloc(u32 sz);
+struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb);
+struct sk_buff *_rtw_skb_clone(struct sk_buff *skb);
+int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
+
+#define rtw_malloc(sz) _rtw_malloc((sz))
+#define rtw_zmalloc(sz) _rtw_zmalloc((sz))
+
+#define rtw_skb_alloc(size) _rtw_skb_alloc((size))
+#define rtw_skb_alloc_f(size, mstat_f) _rtw_skb_alloc((size))
+#define rtw_skb_copy(skb) _rtw_skb_copy((skb))
+#define rtw_skb_clone(skb) _rtw_skb_clone((skb))
+#define rtw_skb_copy_f(skb, mstat_f) _rtw_skb_copy((skb))
+#define rtw_skb_clone_f(skb, mstat_f) _rtw_skb_clone((skb))
+#define rtw_netif_rx(ndev, skb) _rtw_netif_rx(ndev, skb)
+
+extern void _rtw_init_queue(struct __queue *pqueue);
+
+extern void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc);
+
+static __inline void thread_enter(char *name)
+{
+ allow_signal(SIGTERM);
+}
+
+__inline static void flush_signals_thread(void)
+{
+ if (signal_pending (current))
+ {
+ flush_signals(current);
+ }
+}
+
+#define rtw_warn_on(condition) WARN_ON(condition)
+
+__inline static int rtw_bug_check(void *parg1, void *parg2, void *parg3, void *parg4)
+{
+ int ret = true;
+
+ return ret;
+
+}
+
+#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+
+__inline static u32 _RND4(u32 sz)
+{
+
+ u32 val;
+
+ val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+
+ return val;
+
+}
+
+__inline static u32 _RND8(u32 sz)
+{
+
+ u32 val;
+
+ val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+
+ return val;
+
+}
+
+#ifndef MAC_FMT
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#endif
+#ifndef MAC_ARG
+#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
+#endif
+
+
+#ifdef CONFIG_AP_WOWLAN
+extern void rtw_softap_lock_suspend(void);
+extern void rtw_softap_unlock_suspend(void);
+#endif
+
+/* File operation APIs, just for linux now */
+extern int rtw_is_file_readable(char *path);
+extern int rtw_retrive_from_file(char *path, u8 *buf, u32 sz);
+
+extern void rtw_free_netdev(struct net_device * netdev);
+
+
+extern u64 rtw_modular64(u64 x, u64 y);
+
+/* Macros for handling unaligned memory accesses */
+
+#define RTW_GET_BE16(a) ((u16) (((a)[0] << 8) | (a)[1]))
+#define RTW_PUT_BE16(a, val) \
+ do { \
+ (a)[0] = ((u16) (val)) >> 8; \
+ (a)[1] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_LE16(a) ((u16) (((a)[1] << 8) | (a)[0]))
+#define RTW_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32) (a)[2]))
+#define RTW_PUT_BE24(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[2] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
+ (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
+#define RTW_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE32(a) ((((u32) (a)[3]) << 24) | (((u32) (a)[2]) << 16) | \
+ (((u32) (a)[1]) << 8) | ((u32) (a)[0]))
+#define RTW_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE64(a) ((((u64) (a)[0]) << 56) | (((u64) (a)[1]) << 48) | \
+ (((u64) (a)[2]) << 40) | (((u64) (a)[3]) << 32) | \
+ (((u64) (a)[4]) << 24) | (((u64) (a)[5]) << 16) | \
+ (((u64) (a)[6]) << 8) | ((u64) (a)[7]))
+#define RTW_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8) (((u64) (val)) >> 56); \
+ (a)[1] = (u8) (((u64) (val)) >> 48); \
+ (a)[2] = (u8) (((u64) (val)) >> 40); \
+ (a)[3] = (u8) (((u64) (val)) >> 32); \
+ (a)[4] = (u8) (((u64) (val)) >> 24); \
+ (a)[5] = (u8) (((u64) (val)) >> 16); \
+ (a)[6] = (u8) (((u64) (val)) >> 8); \
+ (a)[7] = (u8) (((u64) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE64(a) ((((u64) (a)[7]) << 56) | (((u64) (a)[6]) << 48) | \
+ (((u64) (a)[5]) << 40) | (((u64) (a)[4]) << 32) | \
+ (((u64) (a)[3]) << 24) | (((u64) (a)[2]) << 16) | \
+ (((u64) (a)[1]) << 8) | ((u64) (a)[0]))
+
+void rtw_buf_free(u8 **buf, u32 *buf_len);
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
+
+struct rtw_cbuf {
+ u32 write;
+ u32 read;
+ u32 size;
+ void *bufs[0];
+};
+
+bool rtw_cbuf_full(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_empty(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf);
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf);
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size);
+
+/* String handler */
+/*
+ * Write formatted output to sized buffer
+ */
+#define rtw_sprintf(buf, size, format, arg...) snprintf(buf, size, format, ##arg)
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
new file mode 100644
index 000000000000..486e8184b0b2
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -0,0 +1,178 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __OSDEP_LINUX_SERVICE_H_
+#define __OSDEP_LINUX_SERVICE_H_
+
+ #include <linux/spinlock.h>
+ #include <linux/compiler.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/kref.h>
+ /* include <linux/smp_lock.h> */
+ #include <linux/netdevice.h>
+ #include <linux/skbuff.h>
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+ #include <asm/atomic.h>
+ #include <asm/io.h>
+ #include <linux/semaphore.h>
+ #include <linux/sem.h>
+ #include <linux/sched.h>
+ #include <linux/etherdevice.h>
+ #include <linux/wireless.h>
+ #include <net/iw_handler.h>
+ #include <linux/if_arp.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h> /* for struct tasklet_struct */
+ #include <linux/ip.h>
+ #include <linux/kthread.h>
+ #include <linux/list.h>
+ #include <linux/vmalloc.h>
+
+/* #include <linux/ieee80211.h> */
+ #include <net/ieee80211_radiotap.h>
+ #include <net/cfg80211.h>
+
+ typedef struct semaphore _sema;
+ typedef spinlock_t _lock;
+ typedef struct mutex _mutex;
+ typedef struct timer_list _timer;
+
+ struct __queue {
+ struct list_head queue;
+ _lock lock;
+ };
+
+ typedef struct sk_buff _pkt;
+ typedef unsigned char _buffer;
+
+ typedef int _OS_STATUS;
+ /* typedef u32 _irqL; */
+ typedef unsigned long _irqL;
+ typedef struct net_device * _nic_hdl;
+
+ #define thread_exit() complete_and_exit(NULL, 0)
+
+ typedef void timer_hdl_return;
+ typedef void* timer_hdl_context;
+
+ typedef struct work_struct _workitem;
+
+__inline static struct list_head *get_next(struct list_head *list)
+{
+ return list->next;
+}
+
+__inline static struct list_head *get_list_head(struct __queue *queue)
+{
+ return (&(queue->queue));
+}
+
+
+#define LIST_CONTAINOR(ptr, type, member) \
+ ((type *)((char *)(ptr)-(__kernel_size_t)(&((type *)0)->member)))
+
+#define RTW_TIMER_HDL_ARGS void *FunctionContext
+
+__inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void* cntx)
+{
+ /* setup_timer(ptimer, pfunc, (u32)cntx); */
+ ptimer->function = pfunc;
+ ptimer->data = (unsigned long)cntx;
+ init_timer(ptimer);
+}
+
+__inline static void _set_timer(_timer *ptimer, u32 delay_time)
+{
+ mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
+}
+
+__inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
+{
+ del_timer_sync(ptimer);
+ *bcancelled = true;/* true == 1; false == 0 */
+}
+
+
+__inline static void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
+{
+ INIT_WORK(pwork, pfunc);
+}
+
+__inline static void _set_workitem(_workitem *pwork)
+{
+ schedule_work(pwork);
+}
+
+__inline static void _cancel_workitem_sync(_workitem *pwork)
+{
+ cancel_work_sync(pwork);
+}
+
+static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
+{
+ return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)));
+}
+
+static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
+{
+ netif_tx_wake_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_start_queue(struct net_device *pnetdev)
+{
+ netif_tx_start_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
+{
+ netif_tx_stop_all_queues(pnetdev);
+}
+
+static inline void rtw_merge_string(char *dst, int dst_len, char *src1, char *src2)
+{
+ int len = 0;
+ len += snprintf(dst+len, dst_len - len, "%s", src1);
+ len += snprintf(dst+len, dst_len - len, "%s", src2);
+}
+
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
+
+#define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
+
+#define NDEV_FMT "%s"
+#define NDEV_ARG(ndev) ndev->name
+#define ADPT_FMT "%s"
+#define ADPT_ARG(adapter) adapter->pnetdev->name
+#define FUNC_NDEV_FMT "%s(%s)"
+#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
+#define FUNC_ADPT_FMT "%s(%s)"
+#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
+
+struct rtw_netdev_priv_indicator {
+ void *priv;
+ u32 sizeof_priv;
+};
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
+extern struct net_device * rtw_alloc_etherdev(int sizeof_priv);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/recv_osdep.h b/drivers/staging/rtl8723bs/include/recv_osdep.h
new file mode 100644
index 000000000000..a480874a2f2a
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/recv_osdep.h
@@ -0,0 +1,48 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RECV_OSDEP_H_
+#define __RECV_OSDEP_H_
+
+
+extern sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+extern void _rtw_free_recv_priv (struct recv_priv *precvpriv);
+
+
+extern s32 rtw_recv_entry(union recv_frame *precv_frame);
+extern int rtw_recv_indicatepkt(struct adapter *adapter, union recv_frame *precv_frame);
+extern void rtw_recv_returnpacket(_nic_hdl cnxt, _pkt *preturnedpkt);
+
+extern void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
+
+int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void rtw_free_recv_priv (struct recv_priv *precvpriv);
+
+
+int rtw_os_recv_resource_alloc(struct adapter *padapter, union recv_frame *precvframe);
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
+
+
+void rtw_os_free_recvframe(union recv_frame *precvframe);
+
+
+int rtw_os_recvbuf_resource_free(struct adapter *padapter, struct recv_buf *precvbuf);
+
+_pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 *pdata);
+void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib);
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
+
+
+#endif /* */
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_recv.h b/drivers/staging/rtl8723bs/include/rtl8192c_recv.h
new file mode 100644
index 000000000000..3e1be0092df4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8192c_recv.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTL8192C_RECV_H_
+#define _RTL8192C_RECV_H_
+
+#define RECV_BLK_SZ 512
+#define RECV_BLK_CNT 16
+#define RECV_BLK_TH RECV_BLK_CNT
+
+#define MAX_RECVBUF_SZ (10240)
+
+struct phy_stat
+{
+ unsigned int phydw0;
+
+ unsigned int phydw1;
+
+ unsigned int phydw2;
+
+ unsigned int phydw3;
+
+ unsigned int phydw4;
+
+ unsigned int phydw5;
+
+ unsigned int phydw6;
+
+ unsigned int phydw7;
+};
+
+/* Rx smooth factor */
+#define Rx_Smooth_Factor (20)
+
+
+void rtl8192c_translate_rx_signal_stuff(union recv_frame *precvframe, struct phy_stat *pphy_status);
+void rtl8192c_query_rx_desc_status(union recv_frame *precvframe, struct recv_stat *pdesc);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8192c_rf.h b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
new file mode 100644
index 000000000000..0dbee562d19b
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8192c_rf.h
@@ -0,0 +1,39 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTL8192C_RF_H_
+#define _RTL8192C_RF_H_
+
+
+/* */
+/* RF RL6052 Series API */
+/* */
+void rtl8192c_RF_ChangeTxPath(struct adapter *Adapter,
+ u16 DataRate);
+void rtl8192c_PHY_RF6052SetBandwidth(
+ struct adapter * Adapter,
+ enum CHANNEL_WIDTH Bandwidth);
+void rtl8192c_PHY_RF6052SetCckTxPower(
+ struct adapter *Adapter,
+ u8* pPowerlevel);
+void rtl8192c_PHY_RF6052SetOFDMTxPower(
+ struct adapter *Adapter,
+ u8* pPowerLevel,
+ u8 Channel);
+int PHY_RF6052_Config8192C(struct adapter * Adapter );
+
+/*--------------------------Exported Function prototype---------------------*/
+
+
+#endif/* End of HalRf.h */
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
new file mode 100644
index 000000000000..8d610646ad17
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_cmd.h
@@ -0,0 +1,199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_CMD_H__
+#define __RTL8723B_CMD_H__
+
+/* */
+/* H2C CMD DEFINITION ------------------------------------------------ */
+/* */
+
+enum h2c_cmd_8723B{
+ /* Common Class: 000 */
+ H2C_8723B_RSVD_PAGE = 0x00,
+ H2C_8723B_MEDIA_STATUS_RPT = 0x01,
+ H2C_8723B_SCAN_ENABLE = 0x02,
+ H2C_8723B_KEEP_ALIVE = 0x03,
+ H2C_8723B_DISCON_DECISION = 0x04,
+ H2C_8723B_PSD_OFFLOAD = 0x05,
+ H2C_8723B_AP_OFFLOAD = 0x08,
+ H2C_8723B_BCN_RSVDPAGE = 0x09,
+ H2C_8723B_PROBERSP_RSVDPAGE = 0x0A,
+ H2C_8723B_FCS_RSVDPAGE = 0x10,
+ H2C_8723B_FCS_INFO = 0x11,
+ H2C_8723B_AP_WOW_GPIO_CTRL = 0x13,
+
+ /* PoweSave Class: 001 */
+ H2C_8723B_SET_PWR_MODE = 0x20,
+ H2C_8723B_PS_TUNING_PARA = 0x21,
+ H2C_8723B_PS_TUNING_PARA2 = 0x22,
+ H2C_8723B_P2P_LPS_PARAM = 0x23,
+ H2C_8723B_P2P_PS_OFFLOAD = 0x24,
+ H2C_8723B_PS_SCAN_ENABLE = 0x25,
+ H2C_8723B_SAP_PS_ = 0x26,
+ H2C_8723B_INACTIVE_PS_ = 0x27, /* Inactive_PS */
+ H2C_8723B_FWLPS_IN_IPS_ = 0x28,
+
+ /* Dynamic Mechanism Class: 010 */
+ H2C_8723B_MACID_CFG = 0x40,
+ H2C_8723B_TXBF = 0x41,
+ H2C_8723B_RSSI_SETTING = 0x42,
+ H2C_8723B_AP_REQ_TXRPT = 0x43,
+ H2C_8723B_INIT_RATE_COLLECT = 0x44,
+
+ /* BT Class: 011 */
+ H2C_8723B_B_TYPE_TDMA = 0x60,
+ H2C_8723B_BT_INFO = 0x61,
+ H2C_8723B_FORCE_BT_TXPWR = 0x62,
+ H2C_8723B_BT_IGNORE_WLANACT = 0x63,
+ H2C_8723B_DAC_SWING_VALUE = 0x64,
+ H2C_8723B_ANT_SEL_RSV = 0x65,
+ H2C_8723B_WL_OPMODE = 0x66,
+ H2C_8723B_BT_MP_OPER = 0x67,
+ H2C_8723B_BT_CONTROL = 0x68,
+ H2C_8723B_BT_WIFI_CTRL = 0x69,
+ H2C_8723B_BT_FW_PATCH = 0x6A,
+ H2C_8723B_BT_WLAN_CALIBRATION = 0x6D,
+
+ /* WOWLAN Class: 100 */
+ H2C_8723B_WOWLAN = 0x80,
+ H2C_8723B_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8723B_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8723B_AOAC_RSVD_PAGE = 0x83,
+ H2C_8723B_AOAC_RSVD_PAGE2 = 0x84,
+ H2C_8723B_D0_SCAN_OFFLOAD_CTRL = 0x85,
+ H2C_8723B_D0_SCAN_OFFLOAD_INFO = 0x86,
+ H2C_8723B_CHNL_SWITCH_OFFLOAD = 0x87,
+
+ H2C_8723B_RESET_TSF = 0xC0,
+ H2C_8723B_MAXID,
+};
+/* */
+/* H2C CMD CONTENT -------------------------------------------------- */
+/* */
+/* _RSVDPAGE_LOC_CMD_0x00 */
+#define SET_8723B_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_RSVDPAGE_LOC_PSPOLL(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+1, 0, 8, __Value)
+#define SET_8723B_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8723B_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8723B_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+4, 0, 8, __Value)
+
+/* _MEDIA_STATUS_RPT_PARM_CMD_0x01 */
+#define SET_8723B_H2CCMD_MSRRPT_PARM_OPMODE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8723B_H2CCMD_MSRRPT_PARM_MACID_IND(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8723B_H2CCMD_MSRRPT_PARM_MACID(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+#define SET_8723B_H2CCMD_MSRRPT_PARM_MACID_END(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+2, 0, 8, __Value)
+
+/* _KEEP_ALIVE_CMD_0x03 */
+#define SET_8723B_H2CCMD_KEEPALIVE_PARM_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8723B_H2CCMD_KEEPALIVE_PARM_ADOPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8723B_H2CCMD_KEEPALIVE_PARM_PKT_TYPE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value)
+#define SET_8723B_H2CCMD_KEEPALIVE_PARM_CHECK_PERIOD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+
+/* _DISCONNECT_DECISION_CMD_0x04 */
+#define SET_8723B_H2CCMD_DISCONDECISION_PARM_ENABLE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value)
+#define SET_8723B_H2CCMD_DISCONDECISION_PARM_ADOPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value)
+#define SET_8723B_H2CCMD_DISCONDECISION_PARM_CHECK_PERIOD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+1, 0, 8, __Value)
+#define SET_8723B_H2CCMD_DISCONDECISION_PARM_TRY_PKT_NUM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd+2, 0, 8, __Value)
+
+/* _PWR_MOD_CMD_0x20 */
+#define SET_8723B_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_RLBM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 4, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_SMART_PS(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 4, 4, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_PWR_STATE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+4, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PWRMODE_PARM_BYTE5(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT((__pH2CCmd)+5, 0, 8, __Value)
+
+#define GET_8723B_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd) LE_BITS_TO_1BYTE(__pH2CCmd, 0, 8)
+
+/* _PS_TUNE_PARAM_CMD_0x21 */
+#define SET_8723B_H2CCMD_PSTUNE_PARM_BCN_TO_LIMIT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PSTUNE_PARM_DTIM_TIMEOUT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+1, 0, 8, __Value)
+#define SET_8723B_H2CCMD_PSTUNE_PARM_ADOPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 0, 1, __Value)
+#define SET_8723B_H2CCMD_PSTUNE_PARM_PS_TIMEOUT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 1, 7, __Value)
+#define SET_8723B_H2CCMD_PSTUNE_PARM_DTIM_PERIOD(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+3, 0, 8, __Value)
+
+/* _MACID_CFG_CMD_0x40 */
+#define SET_8723B_H2CCMD_MACID_CFG_MACID(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_RAID(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+1, 0, 5, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_SGI_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+1, 7, 1, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_BW(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 0, 2, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_NO_UPDATE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 3, 1, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_VHT_EN(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 4, 2, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_DISPT(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 6, 1, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_DISRA(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 7, 1, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_RATE_MASK0(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+3, 0, 8, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_RATE_MASK1(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+4, 0, 8, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_RATE_MASK2(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+5, 0, 8, __Value)
+#define SET_8723B_H2CCMD_MACID_CFG_RATE_MASK3(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+6, 0, 8, __Value)
+
+/* _RSSI_SETTING_CMD_0x42 */
+#define SET_8723B_H2CCMD_RSSI_SETTING_MACID(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_RSSI_SETTING_RSSI(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 0, 7, __Value)
+#define SET_8723B_H2CCMD_RSSI_SETTING_ULDL_STATE(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+3, 0, 8, __Value)
+
+/* _AP_REQ_TXRPT_CMD_0x43 */
+#define SET_8723B_H2CCMD_APREQRPT_PARM_MACID1(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value)
+#define SET_8723B_H2CCMD_APREQRPT_PARM_MACID2(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+1, 0, 8, __Value)
+
+/* _FORCE_BT_TXPWR_CMD_0x62 */
+#define SET_8723B_H2CCMD_BT_PWR_IDX(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE_8BIT(__pH2CCmd, 0, 8, __Value)
+
+/* _FORCE_BT_MP_OPER_CMD_0x67 */
+#define SET_8723B_H2CCMD_BT_MPOPER_VER(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 4, __Value)
+#define SET_8723B_H2CCMD_BT_MPOPER_REQNUM(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 4, __Value)
+#define SET_8723B_H2CCMD_BT_MPOPER_IDX(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+1, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_MPOPER_PARAM1(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+2, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_MPOPER_PARAM2(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+3, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_MPOPER_PARAM3(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE(__pH2CCmd+4, 0, 8, __Value)
+
+/* _BT_FW_PATCH_0x6A */
+#define SET_8723B_H2CCMD_BT_FW_PATCH_SIZE(__pH2CCmd, __Value) SET_BITS_TO_LE_2BYTE((u8 *)(__pH2CCmd), 0, 16, __Value)
+#define SET_8723B_H2CCMD_BT_FW_PATCH_ADDR0(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_FW_PATCH_ADDR1(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_FW_PATCH_ADDR2(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value)
+#define SET_8723B_H2CCMD_BT_FW_PATCH_ADDR3(__pH2CCmd, __Value) SET_BITS_TO_LE_1BYTE((__pH2CCmd)+5, 0, 8, __Value)
+
+/* */
+/* Function Statement -------------------------------------------------- */
+/* */
+
+/* host message to firmware cmd */
+void rtl8723b_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
+void rtl8723b_set_FwJoinBssRpt_cmd(struct adapter *padapter, u8 mstatus);
+void rtl8723b_set_rssi_cmd(struct adapter *padapter, u8 *param);
+void rtl8723b_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 *arg, u8 rssi_level);
+void rtl8723b_fw_try_ap_cmd(struct adapter *padapter, u32 need_ack);
+/* s32 rtl8723b_set_lowpwr_lps_cmd(struct adapter *padapter, u8 enable); */
+void rtl8723b_set_FwPsTuneParam_cmd(struct adapter *padapter);
+void rtl8723b_set_FwMacIdConfig_cmd(struct adapter *padapter, u8 mac_id, u8 raid, u8 bw, u8 sgi, u32 mask);
+void rtl8723b_set_FwMediaStatusRpt_cmd(struct adapter *padapter, u8 mstatus, u8 macid);
+void rtl8723b_download_rsvd_page(struct adapter *padapter, u8 mstatus);
+void rtl8723b_download_BTCoex_AP_mode_rsvd_page(struct adapter *padapter);
+
+void CheckFwRsvdPageContent(struct adapter *padapter);
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+void rtl8723b_set_wowlan_cmd(struct adapter *padapter, u8 enable);
+void rtl8723b_set_ap_wowlan_cmd(struct adapter *padapter, u8 enable);
+void SetFwRelatedForWoWLAN8723b(struct adapter *padapter, u8 bHostIsGoingtoSleep);
+#endif/* CONFIG_WOWLAN */
+
+void rtl8723b_set_FwPwrModeInIPS_cmd(struct adapter *padapter, u8 cmd_param);
+
+s32 FillH2CCmd8723B(struct adapter *padapter, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer);
+
+#define FillH2CCmd FillH2CCmd8723B
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_dm.h b/drivers/staging/rtl8723bs/include/rtl8723b_dm.h
new file mode 100644
index 000000000000..cc64b38ead2a
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_dm.h
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_DM_H__
+#define __RTL8723B_DM_H__
+/* */
+/* Description: */
+/* */
+/* This file is for 8723B dynamic mechanism only */
+/* */
+/* */
+/* */
+
+/* */
+/* structure and define */
+/* */
+
+/* */
+/* function prototype */
+/* */
+
+void rtl8723b_init_dm_priv(struct adapter *padapter);
+
+void rtl8723b_InitHalDm(struct adapter *padapter);
+void rtl8723b_HalDmWatchDog(struct adapter *padapter);
+void rtl8723b_HalDmWatchDog_in_LPS(struct adapter *padapter);
+void rtl8723b_hal_dm_in_lps(struct adapter *padapter);
+
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
new file mode 100644
index 000000000000..adaeea115e4d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -0,0 +1,279 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_HAL_H__
+#define __RTL8723B_HAL_H__
+
+#include "hal_data.h"
+
+#include "rtl8723b_spec.h"
+#include "rtl8723b_rf.h"
+#include "rtl8723b_dm.h"
+#include "rtl8723b_recv.h"
+#include "rtl8723b_xmit.h"
+#include "rtl8723b_cmd.h"
+#include "rtw_mp.h"
+#include "Hal8723BPwrSeq.h"
+#include "Hal8723BPhyReg.h"
+#include "Hal8723BPhyCfg.h"
+
+/* */
+/* RTL8723B From file */
+/* */
+ #define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
+ #define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
+ #define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
+ #define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
+ #define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
+ #define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
+ #define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
+ #define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
+ #define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
+ #define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
+ #define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
+
+/* */
+/* RTL8723B From header */
+/* */
+
+#define FW_8723B_SIZE 0x8000
+#define FW_8723B_START_ADDRESS 0x1000
+#define FW_8723B_END_ADDRESS 0x1FFF /* 0x5FFF */
+
+#define IS_FW_HEADER_EXIST_8723B(_pFwHdr) ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x5300)
+
+struct rt_firmware {
+ u32 ulFwLength;
+ u8 *szFwBuffer;
+};
+
+/* This structure must be cared byte-ordering */
+struct rt_firmware_hdr {
+ /* 8-byte alinment required */
+
+ /* LONG WORD 0 ---- */
+ __le16 Signature; /* 92C0: test chip; 92C, 88C0: test chip; 88C1: MP A-cut; 92C1: MP A-cut */
+ u8 Category; /* AP/NIC and USB/PCI */
+ u8 Function; /* Reserved for different FW function indcation, for further use when driver needs to download different FW in different conditions */
+ __le16 Version; /* FW Version */
+ __le16 Subversion; /* FW Subversion, default 0x00 */
+
+ /* LONG WORD 1 ---- */
+ u8 Month; /* Release time Month field */
+ u8 Date; /* Release time Date field */
+ u8 Hour; /* Release time Hour field */
+ u8 Minute; /* Release time Minute field */
+ __le16 RamCodeSize; /* The size of RAM code */
+ __le16 Rsvd2;
+
+ /* LONG WORD 2 ---- */
+ __le32 SvnIdx; /* The SVN entry index */
+ __le32 Rsvd3;
+
+ /* LONG WORD 3 ---- */
+ __le32 Rsvd4;
+ __le32 Rsvd5;
+};
+
+#define DRIVER_EARLY_INT_TIME_8723B 0x05
+#define BCN_DMA_ATIME_INT_TIME_8723B 0x02
+
+/* for 8723B */
+/* TX 32K, RX 16K, Page size 128B for TX, 8B for RX */
+#define PAGE_SIZE_TX_8723B 128
+#define PAGE_SIZE_RX_8723B 8
+
+#define RX_DMA_SIZE_8723B 0x4000 /* 16K */
+#define RX_DMA_RESERVED_SIZE_8723B 0x80 /* 128B, reserved for tx report */
+#define RX_DMA_BOUNDARY_8723B (RX_DMA_SIZE_8723B - RX_DMA_RESERVED_SIZE_8723B - 1)
+
+
+/* Note: We will divide number of page equally for each queue other than public queue! */
+
+/* For General Reserved Page Number(Beacon Queue is reserved page) */
+/* Beacon:2, PS-Poll:1, Null Data:1, Qos Null Data:1, BT Qos Null Data:1 */
+#define BCNQ_PAGE_NUM_8723B 0x08
+#define BCNQ1_PAGE_NUM_8723B 0x00
+
+#ifdef CONFIG_PNO_SUPPORT
+#undef BCNQ1_PAGE_NUM_8723B
+#define BCNQ1_PAGE_NUM_8723B 0x00 /* 0x04 */
+#endif
+#define MAX_RX_DMA_BUFFER_SIZE_8723B 0x2800 /* RX 10K */
+
+/* For WoWLan , more reserved page */
+/* ARP Rsp:1, RWC:1, GTK Info:1, GTK RSP:2, GTK EXT MEM:2, PNO: 6 */
+#ifdef CONFIG_WOWLAN
+#define WOWLAN_PAGE_NUM_8723B 0x07
+#else
+#define WOWLAN_PAGE_NUM_8723B 0x00
+#endif
+
+#ifdef CONFIG_PNO_SUPPORT
+#undef WOWLAN_PAGE_NUM_8723B
+#define WOWLAN_PAGE_NUM_8723B 0x0d
+#endif
+
+#ifdef CONFIG_AP_WOWLAN
+#define AP_WOWLAN_PAGE_NUM_8723B 0x02
+#endif
+
+#define TX_TOTAL_PAGE_NUMBER_8723B (0xFF - BCNQ_PAGE_NUM_8723B - BCNQ1_PAGE_NUM_8723B - WOWLAN_PAGE_NUM_8723B)
+#define TX_PAGE_BOUNDARY_8723B (TX_TOTAL_PAGE_NUMBER_8723B + 1)
+
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B TX_TOTAL_PAGE_NUMBER_8723B
+#define WMM_NORMAL_TX_PAGE_BOUNDARY_8723B (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER_8723B + 1)
+
+/* For Normal Chip Setting */
+/* (HPQ + LPQ + NPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER_8723B */
+#define NORMAL_PAGE_NUM_HPQ_8723B 0x0C
+#define NORMAL_PAGE_NUM_LPQ_8723B 0x02
+#define NORMAL_PAGE_NUM_NPQ_8723B 0x02
+
+/* Note: For Normal Chip Setting, modify later */
+#define WMM_NORMAL_PAGE_NUM_HPQ_8723B 0x30
+#define WMM_NORMAL_PAGE_NUM_LPQ_8723B 0x20
+#define WMM_NORMAL_PAGE_NUM_NPQ_8723B 0x20
+
+
+#include "HalVerDef.h"
+#include "hal_com.h"
+
+#define EFUSE_OOB_PROTECT_BYTES 15
+
+#define HAL_EFUSE_MEMORY
+
+#define HWSET_MAX_SIZE_8723B 512
+#define EFUSE_REAL_CONTENT_LEN_8723B 512
+#define EFUSE_MAP_LEN_8723B 512
+#define EFUSE_MAX_SECTION_8723B 64
+
+#define EFUSE_IC_ID_OFFSET 506 /* For some inferiority IC purpose. added by Roger, 2009.09.02. */
+#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN_8723B)
+
+#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
+
+/* */
+/* EFUSE for BT definition */
+/* */
+#define EFUSE_BT_REAL_BANK_CONTENT_LEN 512
+#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
+#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
+#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
+
+#define EFUSE_PROTECT_BYTES_BANK 16
+
+/* Description: Determine the types of C2H events that are the same in driver and Fw. */
+/* Fisrt constructed by tynli. 2009.10.09. */
+typedef enum _C2H_EVT
+{
+ C2H_DBG = 0,
+ C2H_TSF = 1,
+ C2H_AP_RPT_RSP = 2,
+ C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific tx packet. */
+ C2H_BT_RSSI = 4,
+ C2H_BT_OP_MODE = 5,
+ C2H_EXT_RA_RPT = 6,
+ C2H_8723B_BT_INFO = 9,
+ C2H_HW_INFO_EXCH = 10,
+ C2H_8723B_BT_MP_INFO = 11,
+ MAX_C2HEVENT
+} C2H_EVT;
+
+typedef struct _C2H_EVT_HDR
+{
+ u8 CmdID;
+ u8 CmdLen;
+ u8 CmdSeq;
+} __attribute__((__packed__)) C2H_EVT_HDR, *PC2H_EVT_HDR;
+
+typedef enum tag_Package_Definition
+{
+ PACKAGE_DEFAULT,
+ PACKAGE_QFN68,
+ PACKAGE_TFBGA90,
+ PACKAGE_TFBGA80,
+ PACKAGE_TFBGA79
+}PACKAGE_TYPE_E;
+
+#define INCLUDE_MULTI_FUNC_BT(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
+#define INCLUDE_MULTI_FUNC_GPS(_Adapter) (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
+
+/* rtl8723a_hal_init.c */
+s32 rtl8723b_FirmwareDownload(struct adapter *padapter, bool bUsedWoWLANFw);
+void rtl8723b_FirmwareSelfReset(struct adapter *padapter);
+void rtl8723b_InitializeFirmwareVars(struct adapter *padapter);
+
+void rtl8723b_InitAntenna_Selection(struct adapter *padapter);
+void rtl8723b_init_default_value(struct adapter *padapter);
+
+s32 rtl8723b_InitLLTTable(struct adapter *padapter);
+
+/* EFuse */
+u8 GetEEPROMSize8723B(struct adapter *padapter);
+void Hal_InitPGData(struct adapter *padapter, u8 *PROMContent);
+void Hal_EfuseParseIDCode(struct adapter *padapter, u8 *hwinfo);
+void Hal_EfuseParseTxPowerInfo_8723B(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail);
+void Hal_EfuseParseBTCoexistInfo_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseEEPROMVer_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseChnlPlan_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseCustomerID_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseAntennaDiversity_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseXtal_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseThermalMeter_8723B(struct adapter *padapter, u8 *hwinfo, u8 AutoLoadFail);
+void Hal_EfuseParsePackageType_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+void Hal_EfuseParseVoltage_8723B(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+
+void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length);
+
+void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc);
+void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val);
+void GetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val);
+u8 SetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval);
+u8 GetHalDefVar8723B(struct adapter *padapter, enum HAL_DEF_VARIABLE variable, void *pval);
+
+/* register */
+void rtl8723b_InitBeaconParameters(struct adapter *padapter);
+void _InitBurstPktLen_8723BS(struct adapter * Adapter);
+void _8051Reset8723(struct adapter *padapter);
+#ifdef CONFIG_WOWLAN
+void Hal_DetectWoWMode(struct adapter *padapter);
+#endif /* CONFIG_WOWLAN */
+
+void rtl8723b_start_thread(struct adapter *padapter);
+void rtl8723b_stop_thread(struct adapter *padapter);
+
+#if defined(CONFIG_CHECK_BT_HANG)
+void rtl8723bs_init_checkbthang_workqueue(struct adapter * adapter);
+void rtl8723bs_free_checkbthang_workqueue(struct adapter * adapter);
+void rtl8723bs_cancle_checkbthang_workqueue(struct adapter * adapter);
+void rtl8723bs_hal_check_bt_hang(struct adapter * adapter);
+#endif
+
+#ifdef CONFIG_GPIO_WAKEUP
+void HalSetOutPutGPIO(struct adapter *padapter, u8 index, u8 OutPutValue);
+#endif
+
+int FirmwareDownloadBT(struct adapter * Adapter, struct rt_firmware *firmware);
+
+void CCX_FwC2HTxRpt_8723b(struct adapter *padapter, u8 *pdata, u8 len);
+s32 c2h_id_filter_ccx_8723b(u8 *buf);
+s32 c2h_handler_8723b(struct adapter *padapter, u8 *pC2hEvent);
+u8 MRateToHwRate8723B(u8 rate);
+u8 HwRateToMRate8723B(u8 rate);
+
+void Hal_ReadRFGainOffset(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_recv.h b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
new file mode 100644
index 000000000000..7218424dae99
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_recv.h
@@ -0,0 +1,144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_RECV_H__
+#define __RTL8723B_RECV_H__
+
+#include <rtl8192c_recv.h>
+
+typedef struct rxreport_8723b
+{
+ /* DWORD 0 */
+ u32 pktlen:14;
+ u32 crc32:1;
+ u32 icverr:1;
+ u32 drvinfosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 physt:1;
+ u32 swdec:1;
+ u32 rsvd0028:2;
+ u32 eor:1;
+ u32 rsvd0031:1;
+
+ /* DWORD 1 */
+ u32 macid:7;
+ u32 rsvd0407:1;
+ u32 tid:4;
+ u32 macid_vld:1;
+ u32 amsdu:1;
+ u32 rxid_match:1;
+ u32 paggr:1;
+ u32 a1fit:4;
+ u32 chkerr:1; /* 20 */
+ u32 rx_ipv:1;
+ u32 rx_is_tcp_udp:1;
+ u32 chk_vld:1; /* 23 */
+ u32 pam:1;
+ u32 pwr:1;
+ u32 md:1;
+ u32 mf:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ /* DWORD 2 */
+ u32 seq:12;
+ u32 frag:4;
+ u32 rx_is_qos:1;
+ u32 rsvd0817:1;
+ u32 wlanhd_iv_len:6;
+ u32 hwrsvd0824:4;
+ u32 c2h_ind:1;
+ u32 rsvd0829:2;
+ u32 fcs_ok:1;
+
+ /* DWORD 3 */
+ u32 rx_rate:7;
+ u32 rsvd1207:3;
+ u32 htc:1;
+ u32 esop:1;
+ u32 bssid_fit:2;
+ u32 rsvd1214:2;
+ u32 dma_agg_num:8;
+ u32 rsvd1224:5;
+ u32 patternmatch:1;
+ u32 unicastwake:1;
+ u32 magicwake:1;
+
+ /* DWORD 4 */
+ u32 splcp:1; /* Ofdm sgi or cck_splcp */
+ u32 ldpc:1;
+ u32 stbc:1;
+ u32 not_sounding:1;
+ u32 bw:2;
+ u32 rsvd1606:26;
+
+ /* DWORD 5 */
+ u32 tsfl;
+} RXREPORT, *PRXREPORT;
+
+typedef struct phystatus_8723b
+{
+ u32 rxgain_a:7;
+ u32 trsw_a:1;
+ u32 rxgain_b:7;
+ u32 trsw_b:1;
+ u32 chcorr_l:16;
+
+ u32 sigqualcck:8;
+ u32 cfo_a:8;
+ u32 cfo_b:8;
+ u32 chcorr_h:8;
+
+ u32 noisepwrdb_h:8;
+ u32 cfo_tail_a:8;
+ u32 cfo_tail_b:8;
+ u32 rsvd0824:8;
+
+ u32 rsvd1200:8;
+ u32 rxevm_a:8;
+ u32 rxevm_b:8;
+ u32 rxsnr_a:8;
+
+ u32 rxsnr_b:8;
+ u32 noisepwrdb_l:8;
+ u32 rsvd1616:8;
+ u32 postsnr_a:8;
+
+ u32 postsnr_b:8;
+ u32 csi_a:8;
+ u32 csi_b:8;
+ u32 targetcsi_a:8;
+
+ u32 targetcsi_b:8;
+ u32 sigevm:8;
+ u32 maxexpwr:8;
+ u32 exintflag:1;
+ u32 sgien:1;
+ u32 rxsc:2;
+ u32 idlelong:1;
+ u32 anttrainen:1;
+ u32 antselb:1;
+ u32 antsel:1;
+} PHYSTATUS, *PPHYSTATUS;
+
+s32 rtl8723bs_init_recv_priv(struct adapter *padapter);
+void rtl8723bs_free_recv_priv(struct adapter *padapter);
+
+void rtl8723b_query_rx_phy_status(union recv_frame *prframe, struct phy_stat *pphy_stat);
+void rtl8723b_process_phy_info(struct adapter *padapter, void *prframe);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_rf.h b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
new file mode 100644
index 000000000000..f5aa1b09a608
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_rf.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_RF_H__
+#define __RTL8723B_RF_H__
+
+#include "rtl8192c_rf.h"
+
+int PHY_RF6052_Config8723B(struct adapter *Adapter );
+
+void
+PHY_RF6052SetBandwidth8723B(struct adapter *Adapter,
+ enum CHANNEL_WIDTH Bandwidth);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_spec.h b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
new file mode 100644
index 000000000000..8d78f4ef5438
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_spec.h
@@ -0,0 +1,262 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *******************************************************************************/
+#ifndef __RTL8723B_SPEC_H__
+#define __RTL8723B_SPEC_H__
+
+#include <autoconf.h>
+
+
+#define HAL_NAV_UPPER_UNIT_8723B 128 /* micro-second */
+
+/* */
+/* */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* */
+/* */
+#define REG_RSV_CTRL_8723B 0x001C /* 3 Byte */
+#define REG_BT_WIFI_ANTENNA_SWITCH_8723B 0x0038
+#define REG_HSISR_8723B 0x005c
+#define REG_PAD_CTRL1_8723B 0x0064
+#define REG_AFE_CTRL_4_8723B 0x0078
+#define REG_HMEBOX_DBG_0_8723B 0x0088
+#define REG_HMEBOX_DBG_1_8723B 0x008A
+#define REG_HMEBOX_DBG_2_8723B 0x008C
+#define REG_HMEBOX_DBG_3_8723B 0x008E
+#define REG_HIMR0_8723B 0x00B0
+#define REG_HISR0_8723B 0x00B4
+#define REG_HIMR1_8723B 0x00B8
+#define REG_HISR1_8723B 0x00BC
+#define REG_PMC_DBG_CTRL2_8723B 0x00CC
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+#define REG_C2HEVT_CMD_ID_8723B 0x01A0
+#define REG_C2HEVT_CMD_LEN_8723B 0x01AE
+#define REG_WOWLAN_WAKE_REASON 0x01C7
+#define REG_WOWLAN_GTK_DBG1 0x630
+#define REG_WOWLAN_GTK_DBG2 0x634
+
+#define REG_HMEBOX_EXT0_8723B 0x01F0
+#define REG_HMEBOX_EXT1_8723B 0x01F4
+#define REG_HMEBOX_EXT2_8723B 0x01F8
+#define REG_HMEBOX_EXT3_8723B 0x01FC
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+#define REG_RXDMA_CONTROL_8723B 0x0286 /* Control the RX DMA. */
+#define REG_RXDMA_MODE_CTRL_8723B 0x0290
+
+/* */
+/* */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* */
+/* */
+#define REG_PCIE_CTRL_REG_8723B 0x0300
+#define REG_INT_MIG_8723B 0x0304 /* Interrupt Migration */
+#define REG_BCNQ_DESA_8723B 0x0308 /* TX Beacon Descriptor Address */
+#define REG_HQ_DESA_8723B 0x0310 /* TX High Queue Descriptor Address */
+#define REG_MGQ_DESA_8723B 0x0318 /* TX Manage Queue Descriptor Address */
+#define REG_VOQ_DESA_8723B 0x0320 /* TX VO Queue Descriptor Address */
+#define REG_VIQ_DESA_8723B 0x0328 /* TX VI Queue Descriptor Address */
+#define REG_BEQ_DESA_8723B 0x0330 /* TX BE Queue Descriptor Address */
+#define REG_BKQ_DESA_8723B 0x0338 /* TX BK Queue Descriptor Address */
+#define REG_RX_DESA_8723B 0x0340 /* RX Queue Descriptor Address */
+#define REG_DBI_WDATA_8723B 0x0348 /* DBI Write Data */
+#define REG_DBI_RDATA_8723B 0x034C /* DBI Read Data */
+#define REG_DBI_ADDR_8723B 0x0350 /* DBI Address */
+#define REG_DBI_FLAG_8723B 0x0352 /* DBI Read/Write Flag */
+#define REG_MDIO_WDATA_8723B 0x0354 /* MDIO for Write PCIE PHY */
+#define REG_MDIO_RDATA_8723B 0x0356 /* MDIO for Reads PCIE PHY */
+#define REG_MDIO_CTL_8723B 0x0358 /* MDIO for Control */
+#define REG_DBG_SEL_8723B 0x0360 /* Debug Selection Register */
+#define REG_PCIE_HRPWM_8723B 0x0361 /* PCIe RPWM */
+#define REG_PCIE_HCPWM_8723B 0x0363 /* PCIe CPWM */
+#define REG_PCIE_MULTIFET_CTRL_8723B 0x036A /* PCIE Multi-Fethc Control */
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+#define REG_TXPKTBUF_BCNQ_BDNY_8723B 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY_8723B 0x0425
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD_8723B 0x045D
+#ifdef CONFIG_WOWLAN
+#define REG_TXPKTBUF_IV_LOW 0x0484
+#define REG_TXPKTBUF_IV_HIGH 0x0488
+#endif
+#define REG_AMPDU_BURST_MODE_8723B 0x04BC
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+#define REG_SECONDARY_CCA_CTRL_8723B 0x0577
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+
+
+/* */
+/* SDIO Bus Specification */
+/* */
+
+/* */
+/* SDIO CMD Address Mapping */
+/* */
+
+/* */
+/* I/O bus domain (Host) */
+/* */
+
+/* */
+/* SDIO register */
+/* */
+#define SDIO_REG_HCPWM1_8723B 0x025 /* HCI Current Power Mode 1 */
+
+
+/* */
+/* 8723 Regsiter Bit and Content definition */
+/* */
+
+/* 2 HSISR */
+/* interrupt mask which needs to clear */
+#define MASK_HSISR_CLEAR (HSISR_GPIO12_0_INT |\
+ HSISR_SPS_OCP_INT |\
+ HSISR_RON_INT |\
+ HSISR_PDNINT |\
+ HSISR_GPIO9_INT)
+
+/* */
+/* */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* */
+/* */
+
+
+/* */
+/* */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* */
+/* */
+
+/* */
+/* */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* */
+/* */
+#define BIT_USB_RXDMA_AGG_EN BIT(31)
+#define RXDMA_AGG_MODE_EN BIT(1)
+
+#ifdef CONFIG_WOWLAN
+#define RXPKT_RELEASE_POLL BIT(16)
+#define RXDMA_IDLE BIT(17)
+#define RW_RELEASE_EN BIT(18)
+#endif
+
+/* */
+/* */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* */
+/* */
+
+/* */
+/* 8723B REG_CCK_CHECK (offset 0x454) */
+/* */
+#define BIT_BCN_PORT_SEL BIT5
+
+/* */
+/* */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* */
+/* */
+
+/* */
+/* */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* */
+/* */
+#define EEPROM_RF_GAIN_OFFSET 0xC1
+#define EEPROM_RF_GAIN_VAL 0x1F6
+
+
+/* */
+/* 8195 IMR/ISR bits (offset 0xB0, 8bits) */
+/* */
+#define IMR_DISABLED_8723B 0
+/* IMR DW0(0x00B0-00B3) Bit 0-31 */
+#define IMR_TIMER2_8723B BIT31 /* Timeout interrupt 2 */
+#define IMR_TIMER1_8723B BIT30 /* Timeout interrupt 1 */
+#define IMR_PSTIMEOUT_8723B BIT29 /* Power Save Time Out Interrupt */
+#define IMR_GTINT4_8723B BIT28 /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3_8723B BIT27 /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TXBCN0ERR_8723B BIT26 /* Transmit Beacon0 Error */
+#define IMR_TXBCN0OK_8723B BIT25 /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE_8723B BIT24 /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0_8723B BIT20 /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDERR0_8723B BIT16 /* Beacon Queue DMA OK0 */
+#define IMR_HSISR_IND_ON_INT_8723B BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E_8723B BIT14 /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND_8723B BIT12 /* CTWidnow End or ATIM Window End */
+#define IMR_C2HCMD_8723B BIT10 /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2_8723B BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM_8723B BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK_8723B BIT7 /* High Queue DMA OK */
+#define IMR_MGNTDOK_8723B BIT6 /* Management Queue DMA OK */
+#define IMR_BKDOK_8723B BIT5 /* AC_BK DMA OK */
+#define IMR_BEDOK_8723B BIT4 /* AC_BE DMA OK */
+#define IMR_VIDOK_8723B BIT3 /* AC_VI DMA OK */
+#define IMR_VODOK_8723B BIT2 /* AC_VO DMA OK */
+#define IMR_RDU_8723B BIT1 /* Rx Descriptor Unavailable */
+#define IMR_ROK_8723B BIT0 /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7_8723B BIT27 /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6_8723B BIT26 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5_8723B BIT25 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4_8723B BIT24 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3_8723B BIT23 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2_8723B BIT22 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1_8723B BIT21 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDOK7_8723B BIT20 /* Beacon Queue DMA OK Interrup 7 */
+#define IMR_BCNDOK6_8723B BIT19 /* Beacon Queue DMA OK Interrup 6 */
+#define IMR_BCNDOK5_8723B BIT18 /* Beacon Queue DMA OK Interrup 5 */
+#define IMR_BCNDOK4_8723B BIT17 /* Beacon Queue DMA OK Interrup 4 */
+#define IMR_BCNDOK3_8723B BIT16 /* Beacon Queue DMA OK Interrup 3 */
+#define IMR_BCNDOK2_8723B BIT15 /* Beacon Queue DMA OK Interrup 2 */
+#define IMR_BCNDOK1_8723B BIT14 /* Beacon Queue DMA OK Interrup 1 */
+#define IMR_ATIMEND_E_8723B BIT13 /* ATIM Window End Extension for Win7 */
+#define IMR_TXERR_8723B BIT11 /* Tx Error Flag Interrupt Status, write 1 clear. */
+#define IMR_RXERR_8723B BIT10 /* Rx Error Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW_8723B BIT9 /* Transmit FIFO Overflow */
+#define IMR_RXFOVW_8723B BIT8 /* Receive FIFO Overflow */
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
new file mode 100644
index 000000000000..3bea5d5dd82b
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_xmit.h
@@ -0,0 +1,458 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTL8723B_XMIT_H__
+#define __RTL8723B_XMIT_H__
+
+/* */
+/* Queue Select Value in TxDesc */
+/* */
+#define QSLT_BK 0x2/* 0x01 */
+#define QSLT_BE 0x0
+#define QSLT_VI 0x5/* 0x4 */
+#define QSLT_VO 0x7/* 0x6 */
+#define QSLT_BEACON 0x10
+#define QSLT_HIGH 0x11
+#define QSLT_MGNT 0x12
+#define QSLT_CMD 0x13
+
+#define MAX_TID (15)
+
+/* OFFSET 0 */
+#define OFFSET_SZ 0
+#define OFFSET_SHT 16
+#define BMC BIT(24)
+#define LSG BIT(26)
+#define FSG BIT(27)
+#define OWN BIT(31)
+
+
+/* OFFSET 4 */
+#define PKT_OFFSET_SZ 0
+#define BK BIT(6)
+#define QSEL_SHT 8
+#define Rate_ID_SHT 16
+#define NAVUSEHDR BIT(20)
+#define PKT_OFFSET_SHT 26
+#define HWPC BIT(31)
+
+/* OFFSET 8 */
+#define AGG_EN BIT(29)
+
+/* OFFSET 12 */
+#define SEQ_SHT 16
+
+/* OFFSET 16 */
+#define QoS BIT(6)
+#define HW_SEQ_EN BIT(7)
+#define USERATE BIT(8)
+#define DISDATAFB BIT(10)
+#define DATA_SHORT BIT(24)
+#define DATA_BW BIT(25)
+
+/* OFFSET 20 */
+#define SGI BIT(6)
+
+/* */
+/* defined for TX DESC Operation */
+/* */
+typedef struct txdesc_8723b
+{
+ /* Offset 0 */
+ u32 pktlen:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 rsvd0026:1;
+ u32 rsvd0027:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 rsvd0031:1;
+
+ /* Offset 4 */
+ u32 macid:7;
+ u32 rsvd0407:1;
+ u32 qsel:5;
+ u32 rdg_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rate_id:5;
+ u32 en_desc_id:1;
+ u32 sectype:2;
+ u32 pkt_offset:5; /* unit: 8 bytes */
+ u32 moredata:1;
+ u32 txop_ps_cap:1;
+ u32 txop_ps_mode:1;
+
+ /* Offset 8 */
+ u32 p_aid:9;
+ u32 rsvd0809:1;
+ u32 cca_rts:2;
+ u32 agg_en:1;
+ u32 rdg_en:1;
+ u32 null_0:1;
+ u32 null_1:1;
+ u32 bk:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 spe_rpt:1;
+ u32 ampdu_density:3;
+ u32 bt_null:1;
+ u32 g_id:6;
+ u32 rsvd0830:2;
+
+ /* Offset 12 */
+ u32 wheader_len:4;
+ u32 chk_en:1;
+ u32 early_rate:1;
+ u32 hw_ssn_sel:2;
+ u32 userate:1;
+ u32 disrtsfb:1;
+ u32 disdatafb:1;
+ u32 cts2self:1;
+ u32 rtsen:1;
+ u32 hw_rts_en:1;
+ u32 port_id:1;
+ u32 navusehdr:1;
+ u32 use_max_len:1;
+ u32 max_agg_num:5;
+ u32 ndpa:2;
+ u32 ampdu_max_time:8;
+
+ /* Offset 16 */
+ u32 datarate:7;
+ u32 try_rate:1;
+ u32 data_ratefb_lmt:5;
+ u32 rts_ratefb_lmt:4;
+ u32 rty_lmt_en:1;
+ u32 data_rt_lmt:6;
+ u32 rtsrate:5;
+ u32 pcts_en:1;
+ u32 pcts_mask_idx:2;
+
+ /* Offset 20 */
+ u32 data_sc:4;
+ u32 data_short:1;
+ u32 data_bw:2;
+ u32 data_ldpc:1;
+ u32 data_stbc:2;
+ u32 vcs_stbc:2;
+ u32 rts_short:1;
+ u32 rts_sc:4;
+ u32 rsvd2016:7;
+ u32 tx_ant:4;
+ u32 txpwr_offset:3;
+ u32 rsvd2031:1;
+
+ /* Offset 24 */
+ u32 sw_define:12;
+ u32 mbssid:4;
+ u32 antsel_A:3;
+ u32 antsel_B:3;
+ u32 antsel_C:3;
+ u32 antsel_D:3;
+ u32 rsvd2428:4;
+
+ /* Offset 28 */
+ u32 checksum:16;
+ u32 rsvd2816:8;
+ u32 usb_txagg_num:8;
+
+ /* Offset 32 */
+ u32 rts_rc:6;
+ u32 bar_rty_th:2;
+ u32 data_rc:6;
+ u32 rsvd3214:1;
+ u32 en_hwseq:1;
+ u32 nextneadpage:8;
+ u32 tailpage:8;
+
+ /* Offset 36 */
+ u32 padding_len:11;
+ u32 txbf_path:1;
+ u32 seq:12;
+ u32 final_data_rate:8;
+}TXDESC_8723B, *PTXDESC_8723B;
+
+#ifndef __INC_HAL8723BDESC_H
+#define __INC_HAL8723BDESC_H
+
+#define RX_STATUS_DESC_SIZE_8723B 24
+#define RX_DRV_INFO_SIZE_UNIT_8723B 8
+
+
+/* DWORD 0 */
+#define SET_RX_STATUS_DESC_PKT_LEN_8723B(__pRxStatusDesc, __Value) SET_BITS_TO_LE_4BYTE(__pRxStatusDesc, 0, 14, __Value)
+#define SET_RX_STATUS_DESC_EOR_8723B(__pRxStatusDesc, __Value) SET_BITS_TO_LE_4BYTE(__pRxStatusDesc, 30, 1, __Value)
+#define SET_RX_STATUS_DESC_OWN_8723B(__pRxStatusDesc, __Value) SET_BITS_TO_LE_4BYTE(__pRxStatusDesc, 31, 1, __Value)
+
+#define GET_RX_STATUS_DESC_PKT_LEN_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 0, 14)
+#define GET_RX_STATUS_DESC_CRC32_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 14, 1)
+#define GET_RX_STATUS_DESC_ICV_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 15, 1)
+#define GET_RX_STATUS_DESC_DRVINFO_SIZE_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 16, 4)
+#define GET_RX_STATUS_DESC_SECURITY_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 20, 3)
+#define GET_RX_STATUS_DESC_QOS_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 23, 1)
+#define GET_RX_STATUS_DESC_SHIFT_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 24, 2)
+#define GET_RX_STATUS_DESC_PHY_STATUS_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 26, 1)
+#define GET_RX_STATUS_DESC_SWDEC_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 27, 1)
+#define GET_RX_STATUS_DESC_LAST_SEG_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 28, 1)
+#define GET_RX_STATUS_DESC_FIRST_SEG_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 29, 1)
+#define GET_RX_STATUS_DESC_EOR_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 30, 1)
+#define GET_RX_STATUS_DESC_OWN_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc, 31, 1)
+
+/* DWORD 1 */
+#define GET_RX_STATUS_DESC_MACID_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 0, 7)
+#define GET_RX_STATUS_DESC_TID_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 8, 4)
+#define GET_RX_STATUS_DESC_AMSDU_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 14, 1)
+#define GET_RX_STATUS_DESC_PAGGR_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 15, 1)
+#define GET_RX_STATUS_DESC_A1_FIT_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 16, 4)
+#define GET_RX_STATUS_DESC_CHKERR_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 20, 1)
+#define GET_RX_STATUS_DESC_IPVER_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP__8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 23, 1)
+#define GET_RX_STATUS_DESC_PAM_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 24, 1)
+#define GET_RX_STATUS_DESC_PWR_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 25, 1)
+#define GET_RX_STATUS_DESC_MORE_DATA_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 26, 1)
+#define GET_RX_STATUS_DESC_MORE_FRAG_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 27, 1)
+#define GET_RX_STATUS_DESC_TYPE_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 28, 2)
+#define GET_RX_STATUS_DESC_MC_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 30, 1)
+#define GET_RX_STATUS_DESC_BC_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+4, 31, 1)
+
+/* DWORD 2 */
+#define GET_RX_STATUS_DESC_SEQ_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+8, 0, 12)
+#define GET_RX_STATUS_DESC_FRAG_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+8, 28, 1)
+
+/* DWORD 3 */
+#define GET_RX_STATUS_DESC_RX_RATE_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+12, 0, 7)
+#define GET_RX_STATUS_DESC_HTC_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+12, 12, 2)
+#define GET_RX_STATUS_DESC_PATTERN_MATCH_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+12, 31, 1)
+
+/* DWORD 6 */
+#define GET_RX_STATUS_DESC_SPLCP_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+16, 2, 1)
+#define GET_RX_STATUS_DESC_BW_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+16, 4, 2)
+
+/* DWORD 5 */
+#define GET_RX_STATUS_DESC_TSFL_8723B(__pRxStatusDesc) LE_BITS_TO_4BYTE(__pRxStatusDesc+20, 0, 32)
+
+#define GET_RX_STATUS_DESC_BUFF_ADDR_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+24, 0, 32)
+#define GET_RX_STATUS_DESC_BUFF_ADDR64_8723B(__pRxDesc) LE_BITS_TO_4BYTE(__pRxDesc+28, 0, 32)
+
+#define SET_RX_STATUS_DESC_BUFF_ADDR_8723B(__pRxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pRxDesc+24, 0, 32, __Value)
+
+
+/* Dword 0 */
+#define GET_TX_DESC_OWN_8723B(__pTxDesc) LE_BITS_TO_4BYTE(__pTxDesc, 31, 1)
+
+#define SET_TX_DESC_PKT_SIZE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 0, 16, __Value)
+#define SET_TX_DESC_OFFSET_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 16, 8, __Value)
+#define SET_TX_DESC_BMC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 24, 1, __Value)
+#define SET_TX_DESC_HTC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 25, 1, __Value)
+#define SET_TX_DESC_LAST_SEG_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 26, 1, __Value)
+#define SET_TX_DESC_FIRST_SEG_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 27, 1, __Value)
+#define SET_TX_DESC_LINIP_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 28, 1, __Value)
+#define SET_TX_DESC_NO_ACM_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 29, 1, __Value)
+#define SET_TX_DESC_GF_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 30, 1, __Value)
+#define SET_TX_DESC_OWN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc, 31, 1, __Value)
+
+/* Dword 1 */
+#define SET_TX_DESC_MACID_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 0, 7, __Value)
+#define SET_TX_DESC_QUEUE_SEL_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 8, 5, __Value)
+#define SET_TX_DESC_RDG_NAV_EXT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 13, 1, __Value)
+#define SET_TX_DESC_LSIG_TXOP_EN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 14, 1, __Value)
+#define SET_TX_DESC_PIFS_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 15, 1, __Value)
+#define SET_TX_DESC_RATE_ID_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 16, 5, __Value)
+#define SET_TX_DESC_EN_DESC_ID_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 21, 1, __Value)
+#define SET_TX_DESC_SEC_TYPE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 22, 2, __Value)
+#define SET_TX_DESC_PKT_OFFSET_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+4, 24, 5, __Value)
+
+
+/* Dword 2 */
+#define SET_TX_DESC_PAID_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 0, 9, __Value)
+#define SET_TX_DESC_CCA_RTS_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 10, 2, __Value)
+#define SET_TX_DESC_AGG_ENABLE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 12, 1, __Value)
+#define SET_TX_DESC_RDG_ENABLE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 13, 1, __Value)
+#define SET_TX_DESC_AGG_BREAK_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 16, 1, __Value)
+#define SET_TX_DESC_MORE_FRAG_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 17, 1, __Value)
+#define SET_TX_DESC_RAW_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 18, 1, __Value)
+#define SET_TX_DESC_SPE_RPT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 19, 1, __Value)
+#define SET_TX_DESC_AMPDU_DENSITY_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 20, 3, __Value)
+#define SET_TX_DESC_BT_INT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 23, 1, __Value)
+#define SET_TX_DESC_GID_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 24, 6, __Value)
+
+
+/* Dword 3 */
+#define SET_TX_DESC_WHEADER_LEN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 0, 4, __Value)
+#define SET_TX_DESC_CHK_EN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 4, 1, __Value)
+#define SET_TX_DESC_EARLY_MODE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 5, 1, __Value)
+#define SET_TX_DESC_HWSEQ_SEL_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 6, 2, __Value)
+#define SET_TX_DESC_USE_RATE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 8, 1, __Value)
+#define SET_TX_DESC_DISABLE_RTS_FB_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 9, 1, __Value)
+#define SET_TX_DESC_DISABLE_FB_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 10, 1, __Value)
+#define SET_TX_DESC_CTS2SELF_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 11, 1, __Value)
+#define SET_TX_DESC_RTS_ENABLE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 12, 1, __Value)
+#define SET_TX_DESC_HW_RTS_ENABLE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 13, 1, __Value)
+#define SET_TX_DESC_NAV_USE_HDR_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 15, 1, __Value)
+#define SET_TX_DESC_USE_MAX_LEN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 16, 1, __Value)
+#define SET_TX_DESC_MAX_AGG_NUM_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 17, 5, __Value)
+#define SET_TX_DESC_NDPA_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 22, 2, __Value)
+#define SET_TX_DESC_AMPDU_MAX_TIME_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+12, 24, 8, __Value)
+
+/* Dword 4 */
+#define SET_TX_DESC_TX_RATE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 0, 7, __Value)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 8, 5, __Value)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 13, 4, __Value)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 17, 1, __Value)
+#define SET_TX_DESC_DATA_RETRY_LIMIT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 18, 6, __Value)
+#define SET_TX_DESC_RTS_RATE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 24, 5, __Value)
+
+
+/* Dword 5 */
+#define SET_TX_DESC_DATA_SC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 0, 4, __Value)
+#define SET_TX_DESC_DATA_SHORT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 4, 1, __Value)
+#define SET_TX_DESC_DATA_BW_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 5, 2, __Value)
+#define SET_TX_DESC_DATA_LDPC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 7, 1, __Value)
+#define SET_TX_DESC_DATA_STBC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 8, 2, __Value)
+#define SET_TX_DESC_CTROL_STBC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 10, 2, __Value)
+#define SET_TX_DESC_RTS_SHORT_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 12, 1, __Value)
+#define SET_TX_DESC_RTS_SC_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+20, 13, 4, __Value)
+
+
+/* Dword 6 */
+#define SET_TX_DESC_SW_DEFINE_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+24, 0, 12, __Value)
+#define SET_TX_DESC_ANTSEL_A_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+24, 16, 3, __Value)
+#define SET_TX_DESC_ANTSEL_B_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+24, 19, 3, __Value)
+#define SET_TX_DESC_ANTSEL_C_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+24, 22, 3, __Value)
+#define SET_TX_DESC_ANTSEL_D_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+24, 25, 3, __Value)
+
+/* Dword 7 */
+#define SET_TX_DESC_TX_DESC_CHECKSUM_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 0, 16, __Value)
+#define SET_TX_DESC_USB_TXAGG_NUM_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 24, 8, __Value)
+#define SET_TX_DESC_SDIO_TXSEQ_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 16, 8, __Value)
+
+/* Dword 8 */
+#define SET_TX_DESC_HWSEQ_EN_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+32, 15, 1, __Value)
+
+/* Dword 9 */
+#define SET_TX_DESC_SEQ_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+36, 12, 12, __Value)
+
+/* Dword 10 */
+#define SET_TX_DESC_TX_BUFFER_ADDRESS_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+40, 0, 32, __Value)
+#define GET_TX_DESC_TX_BUFFER_ADDRESS_8723B(__pTxDesc) LE_BITS_TO_4BYTE(__pTxDesc+40, 0, 32)
+
+/* Dword 11 */
+#define SET_TX_DESC_NEXT_DESC_ADDRESS_8723B(__pTxDesc, __Value) SET_BITS_TO_LE_4BYTE(__pTxDesc+48, 0, 32, __Value)
+
+
+#define SET_EARLYMODE_PKTNUM_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr, 0, 4, __Value)
+#define SET_EARLYMODE_LEN0_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr, 4, 15, __Value)
+#define SET_EARLYMODE_LEN1_1_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr, 19, 13, __Value)
+#define SET_EARLYMODE_LEN1_2_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr+4, 0, 2, __Value)
+#define SET_EARLYMODE_LEN2_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr+4, 2, 15, __Value)
+#define SET_EARLYMODE_LEN3_8723B(__pAddr, __Value) SET_BITS_TO_LE_4BYTE(__pAddr+4, 17, 15, __Value)
+
+#endif
+/* */
+/* */
+/* Rate */
+/* */
+/* */
+/* CCK Rates, TxHT = 0 */
+#define DESC8723B_RATE1M 0x00
+#define DESC8723B_RATE2M 0x01
+#define DESC8723B_RATE5_5M 0x02
+#define DESC8723B_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC8723B_RATE6M 0x04
+#define DESC8723B_RATE9M 0x05
+#define DESC8723B_RATE12M 0x06
+#define DESC8723B_RATE18M 0x07
+#define DESC8723B_RATE24M 0x08
+#define DESC8723B_RATE36M 0x09
+#define DESC8723B_RATE48M 0x0a
+#define DESC8723B_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC8723B_RATEMCS0 0x0c
+#define DESC8723B_RATEMCS1 0x0d
+#define DESC8723B_RATEMCS2 0x0e
+#define DESC8723B_RATEMCS3 0x0f
+#define DESC8723B_RATEMCS4 0x10
+#define DESC8723B_RATEMCS5 0x11
+#define DESC8723B_RATEMCS6 0x12
+#define DESC8723B_RATEMCS7 0x13
+#define DESC8723B_RATEMCS8 0x14
+#define DESC8723B_RATEMCS9 0x15
+#define DESC8723B_RATEMCS10 0x16
+#define DESC8723B_RATEMCS11 0x17
+#define DESC8723B_RATEMCS12 0x18
+#define DESC8723B_RATEMCS13 0x19
+#define DESC8723B_RATEMCS14 0x1a
+#define DESC8723B_RATEMCS15 0x1b
+#define DESC8723B_RATEVHTSS1MCS0 0x2c
+#define DESC8723B_RATEVHTSS1MCS1 0x2d
+#define DESC8723B_RATEVHTSS1MCS2 0x2e
+#define DESC8723B_RATEVHTSS1MCS3 0x2f
+#define DESC8723B_RATEVHTSS1MCS4 0x30
+#define DESC8723B_RATEVHTSS1MCS5 0x31
+#define DESC8723B_RATEVHTSS1MCS6 0x32
+#define DESC8723B_RATEVHTSS1MCS7 0x33
+#define DESC8723B_RATEVHTSS1MCS8 0x34
+#define DESC8723B_RATEVHTSS1MCS9 0x35
+#define DESC8723B_RATEVHTSS2MCS0 0x36
+#define DESC8723B_RATEVHTSS2MCS1 0x37
+#define DESC8723B_RATEVHTSS2MCS2 0x38
+#define DESC8723B_RATEVHTSS2MCS3 0x39
+#define DESC8723B_RATEVHTSS2MCS4 0x3a
+#define DESC8723B_RATEVHTSS2MCS5 0x3b
+#define DESC8723B_RATEVHTSS2MCS6 0x3c
+#define DESC8723B_RATEVHTSS2MCS7 0x3d
+#define DESC8723B_RATEVHTSS2MCS8 0x3e
+#define DESC8723B_RATEVHTSS2MCS9 0x3f
+
+
+#define RX_HAL_IS_CCK_RATE_8723B(pDesc)\
+ (GET_RX_STATUS_DESC_RX_RATE_8723B(pDesc) == DESC8723B_RATE1M ||\
+ GET_RX_STATUS_DESC_RX_RATE_8723B(pDesc) == DESC8723B_RATE2M ||\
+ GET_RX_STATUS_DESC_RX_RATE_8723B(pDesc) == DESC8723B_RATE5_5M ||\
+ GET_RX_STATUS_DESC_RX_RATE_8723B(pDesc) == DESC8723B_RATE11M)
+
+
+void rtl8723b_update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem);
+void rtl8723b_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc, u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull, u8 bDataFrame);
+
+s32 rtl8723bs_init_xmit_priv(struct adapter *padapter);
+void rtl8723bs_free_xmit_priv(struct adapter *padapter);
+s32 rtl8723bs_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtl8723bs_mgnt_xmit(struct adapter *padapter, struct xmit_frame *pmgntframe);
+s32 rtl8723bs_hal_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtl8723bs_xmit_buf_handler(struct adapter *padapter);
+int rtl8723bs_xmit_thread(void *context);
+#define hal_xmit_handler rtl8723bs_xmit_buf_handler
+
+u8 BWMapping_8723B(struct adapter * Adapter, struct pkt_attrib *pattrib);
+u8 SCMapping_8723B(struct adapter * Adapter, struct pkt_attrib *pattrib);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_ap.h b/drivers/staging/rtl8723bs/include/rtw_ap.h
new file mode 100644
index 000000000000..3c2d1e912a9d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_ap.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_AP_H_
+#define __RTW_AP_H_
+
+void init_mlme_ap_info(struct adapter *padapter);
+void free_mlme_ap_info(struct adapter *padapter);
+/* void update_BCNTIM(struct adapter *padapter); */
+void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx);
+void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level);
+void expire_timeout_chk(struct adapter *padapter);
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
+void start_bss_network(struct adapter *padapter, u8 *pbuf);
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len);
+void rtw_ap_restore_network(struct adapter *padapter);
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode);
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr);
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr);
+
+u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta);
+int rtw_ap_set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid);
+int rtw_ap_set_wep_key(struct adapter *padapter, u8 *key, u8 keylen, int keyid, u8 set_tx);
+
+void associated_clients_update(struct adapter *padapter, u8 updated);
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta);
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta);
+void sta_info_update(struct adapter *padapter, struct sta_info *psta);
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta);
+u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta, bool active, u16 reason);
+int rtw_sta_flush(struct adapter *padapter);
+void start_ap_mode(struct adapter *padapter);
+void stop_ap_mode(struct adapter *padapter);
+
+#endif
+void update_bmc_sta(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/include/rtw_beamforming.h b/drivers/staging/rtl8723bs/include/rtw_beamforming.h
new file mode 100644
index 000000000000..69711e41c50b
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_beamforming.h
@@ -0,0 +1,135 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_BEAMFORMING_H_
+#define __RTW_BEAMFORMING_H_
+
+#define BEAMFORMING_ENTRY_NUM 2
+#define GET_BEAMFORM_INFO(_pmlmepriv) ((struct beamforming_info *)(&(_pmlmepriv)->beamforming_info))
+
+typedef enum _BEAMFORMING_ENTRY_STATE
+{
+ BEAMFORMING_ENTRY_STATE_UNINITIALIZE,
+ BEAMFORMING_ENTRY_STATE_INITIALIZEING,
+ BEAMFORMING_ENTRY_STATE_INITIALIZED,
+ BEAMFORMING_ENTRY_STATE_PROGRESSING,
+ BEAMFORMING_ENTRY_STATE_PROGRESSED,
+}BEAMFORMING_ENTRY_STATE, *PBEAMFORMING_ENTRY_STATE;
+
+
+typedef enum _BEAMFORMING_STATE
+{
+ BEAMFORMING_STATE_IDLE,
+ BEAMFORMING_STATE_START,
+ BEAMFORMING_STATE_END,
+}BEAMFORMING_STATE, *PBEAMFORMING_STATE;
+
+
+typedef enum _BEAMFORMING_CAP
+{
+ BEAMFORMING_CAP_NONE = 0x0,
+ BEAMFORMER_CAP_HT_EXPLICIT = 0x1,
+ BEAMFORMEE_CAP_HT_EXPLICIT = 0x2,
+ BEAMFORMER_CAP_VHT_SU = 0x4, /* Self has er Cap, because Reg er & peer ee */
+ BEAMFORMEE_CAP_VHT_SU = 0x8, /* Self has ee Cap, because Reg ee & peer er */
+ BEAMFORMER_CAP = 0x10,
+ BEAMFORMEE_CAP = 0x20,
+}BEAMFORMING_CAP, *PBEAMFORMING_CAP;
+
+
+typedef enum _SOUNDING_MODE
+{
+ SOUNDING_SW_VHT_TIMER = 0x0,
+ SOUNDING_SW_HT_TIMER = 0x1,
+ SOUNDING_STOP_All_TIMER = 0x2,
+ SOUNDING_HW_VHT_TIMER = 0x3,
+ SOUNDING_HW_HT_TIMER = 0x4,
+ SOUNDING_STOP_OID_TIMER = 0x5,
+ SOUNDING_AUTO_VHT_TIMER = 0x6,
+ SOUNDING_AUTO_HT_TIMER = 0x7,
+ SOUNDING_FW_VHT_TIMER = 0x8,
+ SOUNDING_FW_HT_TIMER = 0x9,
+}SOUNDING_MODE, *PSOUNDING_MODE;
+
+
+enum BEAMFORMING_CTRL_TYPE
+{
+ BEAMFORMING_CTRL_ENTER = 0,
+ BEAMFORMING_CTRL_LEAVE = 1,
+ BEAMFORMING_CTRL_START_PERIOD = 2,
+ BEAMFORMING_CTRL_END_PERIOD = 3,
+ BEAMFORMING_CTRL_SOUNDING_FAIL =4,
+ BEAMFORMING_CTRL_SOUNDING_CLK =5,
+};
+
+struct beamforming_entry {
+ bool bUsed;
+ bool bSound;
+ u16 aid; /* Used to construct AID field of NDPA packet. */
+ u16 mac_id; /* Used to Set Reg42C in IBSS mode. */
+ u16 p_aid; /* Used to fill Reg42C & Reg714 to compare with P_AID of Tx DESC. */
+ u8 mac_addr[6];/* Used to fill Reg6E4 to fill Mac address of CSI report frame. */
+ enum CHANNEL_WIDTH sound_bw; /* Sounding BandWidth */
+ u16 sound_period;
+ BEAMFORMING_CAP beamforming_entry_cap;
+ BEAMFORMING_ENTRY_STATE beamforming_entry_state;
+ u8 LogSeq;
+ u8 LogRetryCnt;
+ u8 LogSuccessCnt;
+ u8 LogStatusFailCnt;
+ u8 PreCsiReport[327];
+ u8 DefaultCsiCnt;
+ bool bDefaultCSI;
+};
+
+struct sounding_info {
+ u8 sound_idx;
+ enum CHANNEL_WIDTH sound_bw;
+ SOUNDING_MODE sound_mode;
+ u16 sound_period;
+};
+
+struct beamforming_info {
+ BEAMFORMING_CAP beamforming_cap;
+ BEAMFORMING_STATE beamforming_state;
+ struct beamforming_entry beamforming_entry[BEAMFORMING_ENTRY_NUM];
+ u8 beamforming_cur_idx;
+ u8 beamforming_in_progress;
+ u8 sounding_sequence;
+ struct sounding_info sounding_info;
+};
+
+struct rtw_ndpa_sta_info {
+ u16 aid:12;
+ u16 feedback_type:1;
+ u16 nc_index:3;
+};
+
+BEAMFORMING_CAP beamforming_get_entry_beam_cap_by_mac_id(void *pmlmepriv , u8 mac_id);
+void beamforming_notify(struct adapter * adapter);
+BEAMFORMING_CAP beamforming_get_beamform_cap(struct beamforming_info *pBeamInfo);
+
+u32 beamforming_get_report_frame(struct adapter * Adapter, union recv_frame *precv_frame);
+
+bool beamforming_send_ht_ndpa_packet(struct adapter * Adapter, u8 *ra, enum CHANNEL_WIDTH bw, u8 qidx);
+bool beamforming_send_vht_ndpa_packet(struct adapter * Adapter, u8 *ra, u16 aid, enum CHANNEL_WIDTH bw, u8 qidx);
+
+void beamforming_check_sounding_success(struct adapter * Adapter, bool status);
+
+void beamforming_watchdog(struct adapter * Adapter);
+
+void beamforming_wk_hdl(struct adapter *padapter, u8 type, u8 *pbuf);
+u8 beamforming_wk_cmd(struct adapter *padapter, s32 type, u8 *pbuf, s32 size, u8 enqueue);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_br_ext.h b/drivers/staging/rtl8723bs/include/rtw_br_ext.h
new file mode 100644
index 000000000000..c942535a7961
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_br_ext.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_BR_EXT_H_
+#define _RTW_BR_EXT_H_
+
+#define MACADDRLEN 6
+#define _DEBUG_ERR DBG_8192C
+#define _DEBUG_INFO /* DBG_8192C */
+#define DEBUG_WARN DBG_8192C
+#define DEBUG_INFO /* DBG_8192C */
+#define DEBUG_ERR DBG_8192C
+/* define GET_MY_HWADDR ((GET_MIB(priv))->dot11OperationEntry.hwaddr) */
+#define GET_MY_HWADDR(padapter) ((padapter)->eeprompriv.mac_addr)
+
+#define NAT25_HASH_BITS 4
+#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
+#define NAT25_AGEING_TIME 300
+
+#define MAX_NETWORK_ADDR_LEN 17
+
+struct nat25_network_db_entry
+{
+ struct nat25_network_db_entry *next_hash;
+ struct nat25_network_db_entry **pprev_hash;
+ atomic_t use_count;
+ unsigned char macAddr[6];
+ unsigned long ageing_timer;
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+};
+
+enum NAT25_METHOD {
+ NAT25_MIN,
+ NAT25_CHECK,
+ NAT25_INSERT,
+ NAT25_LOOKUP,
+ NAT25_PARSE,
+ NAT25_MAX
+};
+
+struct br_ext_info {
+ unsigned int nat25_disable;
+ unsigned int macclone_enable;
+ unsigned int dhcp_bcst_disable;
+ int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
+ unsigned char nat25_dmzMac[MACADDRLEN];
+ unsigned int nat25sc_disable;
+};
+
+void nat25_db_cleanup(struct adapter *priv);
+
+#endif /* _RTW_BR_EXT_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_btcoex.h b/drivers/staging/rtl8723bs/include/rtw_btcoex.h
new file mode 100644
index 000000000000..9a5c3f40bddb
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_btcoex.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_BTCOEX_H__
+#define __RTW_BTCOEX_H__
+
+#include <drv_types.h>
+
+
+#define PACKET_NORMAL 0
+#define PACKET_DHCP 1
+#define PACKET_ARP 2
+#define PACKET_EAPOL 3
+
+void rtw_btcoex_Initialize(struct adapter *);
+void rtw_btcoex_PowerOnSetting(struct adapter *padapter);
+void rtw_btcoex_HAL_Initialize(struct adapter *padapter, u8 bWifiOnly);
+void rtw_btcoex_IpsNotify(struct adapter *, u8 type);
+void rtw_btcoex_LpsNotify(struct adapter *, u8 type);
+void rtw_btcoex_ScanNotify(struct adapter *, u8 type);
+void rtw_btcoex_ConnectNotify(struct adapter *, u8 action);
+void rtw_btcoex_MediaStatusNotify(struct adapter *, u8 mediaStatus);
+void rtw_btcoex_SpecialPacketNotify(struct adapter *, u8 pktType);
+void rtw_btcoex_IQKNotify(struct adapter *padapter, u8 state);
+void rtw_btcoex_BtInfoNotify(struct adapter *, u8 length, u8 *tmpBuf);
+void rtw_btcoex_SuspendNotify(struct adapter *, u8 state);
+void rtw_btcoex_HaltNotify(struct adapter *);
+u8 rtw_btcoex_IsBtDisabled(struct adapter *);
+void rtw_btcoex_Handler(struct adapter *);
+s32 rtw_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *);
+void rtw_btcoex_SetManualControl(struct adapter *, u8 bmanual);
+u8 rtw_btcoex_IsBtControlLps(struct adapter *);
+u8 rtw_btcoex_IsLpsOn(struct adapter *);
+u8 rtw_btcoex_RpwmVal(struct adapter *);
+u8 rtw_btcoex_LpsVal(struct adapter *);
+void rtw_btcoex_SetBTCoexist(struct adapter *, u8 bBtExist);
+void rtw_btcoex_SetChipType(struct adapter *, u8 chipType);
+void rtw_btcoex_SetPGAntNum(struct adapter *, u8 antNum);
+void rtw_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath);
+u32 rtw_btcoex_GetRaMask(struct adapter *);
+void rtw_btcoex_RecordPwrMode(struct adapter *, u8 *pCmdBuf, u8 cmdLen);
+void rtw_btcoex_DisplayBtCoexInfo(struct adapter *, u8 *pbuf, u32 bufsize);
+void rtw_btcoex_SetDBG(struct adapter *, u32 *pDbgModule);
+u32 rtw_btcoex_GetDBG(struct adapter *, u8 *pStrBuf, u32 bufSize);
+
+/* ================================================== */
+/* Below Functions are called by BT-Coex */
+/* ================================================== */
+void rtw_btcoex_RejectApAggregatedPacket(struct adapter *, u8 enable);
+void rtw_btcoex_LPS_Enter(struct adapter *);
+void rtw_btcoex_LPS_Leave(struct adapter *);
+
+#endif /* __RTW_BTCOEX_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_byteorder.h b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
new file mode 100644
index 000000000000..ffbbcec77a0d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_byteorder.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTL871X_BYTEORDER_H_
+#define _RTL871X_BYTEORDER_H_
+
+#if defined (__LITTLE_ENDIAN)
+#include <linux/byteorder/little_endian.h>
+#else
+# include <linux/byteorder/big_endian.h>
+#endif
+
+#endif /* _RTL871X_BYTEORDER_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
new file mode 100644
index 000000000000..286d329b7a4f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -0,0 +1,980 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_CMD_H_
+#define __RTW_CMD_H_
+
+
+#define C2H_MEM_SZ (16*1024)
+
+ #define FREE_CMDOBJ_SZ 128
+
+ #define MAX_CMDSZ 1024
+ #define MAX_RSPSZ 512
+ #define MAX_EVTSZ 1024
+
+ #define CMDBUFF_ALIGN_SZ 512
+
+ struct cmd_obj {
+ struct adapter *padapter;
+ u16 cmdcode;
+ u8 res;
+ u8 *parmbuf;
+ u32 cmdsz;
+ u8 *rsp;
+ u32 rspsz;
+ struct submit_ctx *sctx;
+ /* _sema cmd_sem; */
+ struct list_head list;
+ };
+
+ /* cmd flags */
+ enum {
+ RTW_CMDF_DIRECTLY = BIT0,
+ RTW_CMDF_WAIT_ACK = BIT1,
+ };
+
+ struct cmd_priv {
+ _sema cmd_queue_sema;
+ /* _sema cmd_done_sema; */
+ _sema terminate_cmdthread_sema;
+ struct __queue cmd_queue;
+ u8 cmd_seq;
+ u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *cmd_allocated_buf;
+ u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *rsp_allocated_buf;
+ u32 cmd_issued_cnt;
+ u32 cmd_done_cnt;
+ u32 rsp_cnt;
+ atomic_t cmdthd_running;
+ /* u8 cmdthd_running; */
+ u8 stop_req;
+ struct adapter *padapter;
+ _mutex sctx_mutex;
+ };
+
+ struct evt_priv {
+ _workitem c2h_wk;
+ bool c2h_wk_alive;
+ struct rtw_cbuf *c2h_queue;
+ #define C2H_QUEUE_MAX_LEN 10
+
+ atomic_t event_seq;
+ u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *evt_allocated_buf;
+ u32 evt_done_cnt;
+ u8 *c2h_mem;
+ u8 *allocated_c2h_mem;
+ };
+
+#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
+do {\
+ INIT_LIST_HEAD(&pcmd->list);\
+ pcmd->cmdcode = code;\
+ pcmd->parmbuf = (u8 *)(pparm);\
+ pcmd->cmdsz = sizeof (*pparm);\
+ pcmd->rsp = NULL;\
+ pcmd->rspsz = 0;\
+} while (0)
+
+#define init_h2fwcmd_w_parm_no_parm_rsp(pcmd, code) \
+do {\
+ INIT_LIST_HEAD(&pcmd->list);\
+ pcmd->cmdcode = code;\
+ pcmd->parmbuf = NULL;\
+ pcmd->cmdsz = 0;\
+ pcmd->rsp = NULL;\
+ pcmd->rspsz = 0;\
+} while (0)
+
+struct c2h_evt_hdr {
+ u8 id:4;
+ u8 plen:4;
+ u8 seq;
+ u8 payload[0];
+};
+
+struct c2h_evt_hdr_88xx {
+ u8 id;
+ u8 seq;
+ u8 payload[12];
+ u8 plen;
+ u8 trigger;
+};
+
+#define c2h_evt_valid(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
+
+struct P2P_PS_Offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1; /* Only valid in Owner */
+ u8 discovery:1;
+ u8 rsvd:1;
+};
+
+struct P2P_PS_CTWPeriod_t {
+ u8 CTWPeriod; /* TU */
+};
+
+extern u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
+extern struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv);
+extern void rtw_free_cmd_obj(struct cmd_obj *pcmd);
+
+void rtw_stop_cmd_thread(struct adapter *adapter);
+int rtw_cmd_thread(void *context);
+
+extern u32 rtw_init_cmd_priv (struct cmd_priv *pcmdpriv);
+extern void rtw_free_cmd_priv (struct cmd_priv *pcmdpriv);
+
+extern u32 rtw_init_evt_priv (struct evt_priv *pevtpriv);
+extern void rtw_free_evt_priv (struct evt_priv *pevtpriv);
+extern void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
+
+enum rtw_drvextra_cmd_id
+{
+ NONE_WK_CID,
+ DYNAMIC_CHK_WK_CID,
+ DM_CTRL_WK_CID,
+ PBC_POLLING_WK_CID,
+ POWER_SAVING_CTRL_WK_CID,/* IPS, AUTOSuspend */
+ LPS_CTRL_WK_CID,
+ ANT_SELECT_WK_CID,
+ P2P_PS_WK_CID,
+ P2P_PROTO_WK_CID,
+ CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
+ INTEl_WIDI_WK_CID,
+ C2H_WK_CID,
+ RTP_TIMER_CFG_WK_CID,
+ RESET_SECURITYPRIV, /* add for CONFIG_IEEE80211W, none 11w also can use */
+ FREE_ASSOC_RESOURCES, /* add for CONFIG_IEEE80211W, none 11w also can use */
+ DM_IN_LPS_WK_CID,
+ DM_RA_MSK_WK_CID, /* add for STA update RAMask when bandwith change. */
+ BEAMFORMING_WK_CID,
+ LPS_CHANGE_DTIM_CID,
+ BTINFO_WK_CID,
+ MAX_WK_CID
+};
+
+enum LPS_CTRL_TYPE
+{
+ LPS_CTRL_SCAN = 0,
+ LPS_CTRL_JOINBSS = 1,
+ LPS_CTRL_CONNECT =2,
+ LPS_CTRL_DISCONNECT =3,
+ LPS_CTRL_SPECIAL_PACKET =4,
+ LPS_CTRL_LEAVE =5,
+ LPS_CTRL_TRAFFIC_BUSY = 6,
+};
+
+enum RFINTFS {
+ SWSI,
+ HWSI,
+ HWPI,
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To enter USB suspend mode
+
+Command Mode
+
+*/
+struct usb_suspend_parm {
+ u32 action;/* 1: sleep, 0:resume */
+};
+
+/*
+Caller Mode: Infra, Ad-HoC
+
+Notes: To join a known BSS.
+
+Command-Event Mode
+
+*/
+
+/*
+Caller Mode: Infra, Ad-Hoc
+
+Notes: To join the specified bss
+
+Command Event Mode
+
+*/
+struct joinbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To disconnect the current associated BSS
+
+Command Mode
+
+*/
+struct disconnect_parm {
+ u32 deauth_timeout_ms;
+};
+
+/*
+Caller Mode: AP, Ad-HoC(M)
+
+Notes: To create a BSS
+
+Command Mode
+*/
+struct createbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+/*
+Caller Mode: AP, Ad-HoC, Infra
+
+Notes: To set the NIC mode of RTL8711
+
+Command Mode
+
+The definition of mode:
+
+#define IW_MODE_AUTO 0 Let the driver decides which AP to join
+#define IW_MODE_ADHOC 1 Single cell network (Ad-Hoc Clients)
+#define IW_MODE_INFRA 2 Multi cell network, roaming, ..
+#define IW_MODE_MASTER 3 Synchronisation master or Access Point
+#define IW_MODE_REPEAT 4 Wireless Repeater (forwarder)
+#define IW_MODE_SECOND 5 Secondary master/repeater (backup)
+#define IW_MODE_MONITOR 6 Passive monitor (listen only)
+
+*/
+struct setopmode_parm {
+ u8 mode;
+ u8 rsvd[3];
+};
+
+/*
+Caller Mode: AP, Ad-HoC, Infra
+
+Notes: To ask RTL8711 performing site-survey
+
+Command-Event Mode
+
+*/
+
+#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
+#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
+struct sitesurvey_parm {
+ sint scan_mode; /* active: 1, passive: 0 */
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the auth type of RTL8711. open/shared/802.1x
+
+Command Mode
+
+*/
+struct setauth_parm {
+ u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
+ u8 _1x; /* 0: PSK, 1: TLS */
+ u8 rsvd[2];
+};
+
+/*
+Caller Mode: Infra
+
+a. algorithm: wep40, wep104, tkip & aes
+b. keytype: grp key/unicast key
+c. key contents
+
+when shared key ==> keyid is the camid
+when 802.1x ==> keyid [0:1] ==> grp key
+when 802.1x ==> keyid > 2 ==> unicast key
+
+*/
+struct setkey_parm {
+ u8 algorithm; /* encryption algorithm, could be none, wep40, TKIP, CCMP, wep104 */
+ u8 keyid;
+ u8 grpkey; /* 1: this is the grpkey for 802.1x. 0: this is the unicast key for 802.1x */
+ u8 set_tx; /* 1: main tx key for wep. 0: other key. */
+ u8 key[16]; /* this could be 40 or 104 */
+};
+
+/*
+When in AP or Ad-Hoc mode, this is used to
+allocate an sw/hw entry for a newly associated sta.
+
+Command
+
+when shared key ==> algorithm/keyid
+
+*/
+struct set_stakey_parm {
+ u8 addr[ETH_ALEN];
+ u8 algorithm;
+ u8 keyid;
+ u8 key[16];
+};
+
+struct set_stakey_rsp {
+ u8 addr[ETH_ALEN];
+ u8 keyid;
+ u8 rsvd;
+};
+
+/*
+Caller Ad-Hoc/AP
+
+Command -Rsp(AID == CAMID) mode
+
+This is to force fw to add an sta_data entry per driver's request.
+
+FW will write an cam entry associated with it.
+
+*/
+struct set_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+struct set_assocsta_rsp {
+ u8 cam_id;
+ u8 rsvd[3];
+};
+
+/*
+ Caller Ad-Hoc/AP
+
+ Command mode
+
+ This is to force fw to del an sta_data entry per driver's request
+
+ FW will invalidate the cam entry associated with it.
+
+*/
+struct del_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+/*
+Caller Mode: AP/Ad-HoC(M)
+
+Notes: To notify fw that given staid has changed its power state
+
+Command Mode
+
+*/
+struct setstapwrstate_parm {
+ u8 staid;
+ u8 status;
+ u8 hwaddr[6];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the basic rate of RTL8711
+
+Command Mode
+
+*/
+struct setbasicrate_parm {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current basic rate
+
+Command-Rsp Mode
+
+*/
+struct getbasicrate_parm {
+ u32 rsvd;
+};
+
+struct getbasicrate_rsp {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the data rate of RTL8711
+
+Command Mode
+
+*/
+struct setdatarate_parm {
+ u8 mac_id;
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current data rate
+
+Command-Rsp Mode
+
+*/
+struct getdatarate_parm {
+ u32 rsvd;
+
+};
+struct getdatarate_rsp {
+ u8 datarates[NumRates];
+};
+
+
+/*
+Caller Mode: Any
+AP: AP can use the info for the contents of beacon frame
+Infra: STA can use the info when sitesurveying
+Ad-HoC(M): Like AP
+Ad-HoC(C): Like STA
+
+
+Notes: To set the phy capability of the NIC
+
+Command Mode
+
+*/
+
+struct setphyinfo_parm {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+struct getphyinfo_parm {
+ u32 rsvd;
+};
+
+struct getphyinfo_rsp {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the channel/modem/band
+This command will be used when channel/modem/band is changed.
+
+Command Mode
+
+*/
+struct setphy_parm {
+ u8 rfchannel;
+ u8 modem;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To get the current setting of channel/modem/band
+
+Command-Rsp Mode
+
+*/
+struct getphy_parm {
+ u32 rsvd;
+
+};
+struct getphy_rsp {
+ u8 rfchannel;
+ u8 modem;
+};
+
+struct readBB_parm {
+ u8 offset;
+};
+struct readBB_rsp {
+ u8 value;
+};
+
+struct readTSSI_parm {
+ u8 offset;
+};
+struct readTSSI_rsp {
+ u8 value;
+};
+
+struct writeBB_parm {
+ u8 offset;
+ u8 value;
+};
+
+struct readRF_parm {
+ u8 offset;
+};
+struct readRF_rsp {
+ u32 value;
+};
+
+struct writeRF_parm {
+ u32 offset;
+ u32 value;
+};
+
+struct getrfintfs_parm {
+ u8 rfintfs;
+};
+
+
+struct Tx_Beacon_param
+{
+ struct wlan_bssid_ex network;
+};
+
+/*
+ Notes: This command is used for H2C/C2H loopback testing
+
+ mac[0] == 0
+ ==> CMD mode, return H2C_SUCCESS.
+ The following condition must be ture under CMD mode
+ mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ s2 == (b1 << 8 | b0);
+
+ mac[0] == 1
+ ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+
+ The rsp layout shall be:
+ rsp: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = mac[3];
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = s1;
+ s1 = swap16(s0);
+ w0 = swap32(w1);
+ b0 = b1
+ s2 = s0 + s1
+ b1 = b0
+ w1 = w0
+
+ mac[0] == 2
+ ==> CMD_EVENT mode, return H2C_SUCCESS
+ The event layout shall be:
+ event: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = event's sequence number, starting from 1 to parm's marc[3]
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = swap16(s0) - event.mac[2];
+ s1 = s1 + event.mac[2];
+ w0 = swap32(w0);
+ b0 = b1
+ s2 = s0 + event.mac[2]
+ b1 = b0
+ w1 = swap32(w1) - event.mac[2];
+
+ parm->mac[3] is the total event counts that host requested.
+
+
+ event will be the same with the cmd's param.
+
+*/
+
+/* CMD param Formart for driver extra cmd handler */
+struct drvextra_cmd_parm {
+ int ec_id; /* extra cmd id */
+ int type; /* Can use this field as the type id or command size */
+ int size; /* buffer size */
+ unsigned char *pbuf;
+};
+
+/*------------------- Below are used for RF/BB tunning ---------------------*/
+
+struct setantenna_parm {
+ u8 tx_antset;
+ u8 rx_antset;
+ u8 tx_antenna;
+ u8 rx_antenna;
+};
+
+struct enrateadaptive_parm {
+ u32 en;
+};
+
+struct settxagctbl_parm {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct gettxagctbl_parm {
+ u32 rsvd;
+};
+struct gettxagctbl_rsp {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct setagcctrl_parm {
+ u32 agcctrl; /* 0: pure hw, 1: fw */
+};
+
+
+struct setssup_parm {
+ u32 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct getssup_parm {
+ u32 rsvd;
+};
+struct getssup_rsp {
+ u8 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+
+struct setssdlevel_parm {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct getssdlevel_parm {
+ u32 rsvd;
+};
+struct getssdlevel_rsp {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct setssulevel_parm {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct getssulevel_parm {
+ u32 rsvd;
+};
+struct getssulevel_rsp {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+
+struct setcountjudge_parm {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct getcountjudge_parm {
+ u32 rsvd;
+};
+struct getcountjudge_rsp {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+
+struct setratable_parm {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+struct getratable_parm {
+ uint rsvd;
+};
+struct getratable_rsp {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+
+/* to get TX, RX retry count */
+struct gettxretrycnt_parm{
+ unsigned int rsvd;
+};
+struct gettxretrycnt_rsp{
+ unsigned long tx_retrycnt;
+};
+
+struct getrxretrycnt_parm{
+ unsigned int rsvd;
+};
+struct getrxretrycnt_rsp{
+ unsigned long rx_retrycnt;
+};
+
+/* to get BCNOK, BCNERR count */
+struct getbcnokcnt_parm{
+ unsigned int rsvd;
+};
+struct getbcnokcnt_rsp{
+ unsigned long bcnokcnt;
+};
+
+struct getbcnerrcnt_parm{
+ unsigned int rsvd;
+};
+struct getbcnerrcnt_rsp{
+ unsigned long bcnerrcnt;
+};
+
+/* to get current TX power level */
+struct getcurtxpwrlevel_parm{
+ unsigned int rsvd;
+};
+struct getcurtxpwrlevel_rsp{
+ unsigned short tx_power;
+};
+
+struct setprobereqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocreqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setproberspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocrspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+
+struct addBaReq_parm
+{
+ unsigned int tid;
+ u8 addr[ETH_ALEN];
+};
+
+/*H2C Handler index: 46 */
+struct set_ch_parm {
+ u8 ch;
+ u8 bw;
+ u8 ch_offset;
+};
+
+/*H2C Handler index: 59 */
+struct SetChannelPlan_param
+{
+ u8 channel_plan;
+};
+
+/*H2C Handler index: 60 */
+struct LedBlink_param
+{
+ void *pLed;
+};
+
+/*H2C Handler index: 61 */
+struct SetChannelSwitch_param
+{
+ u8 new_ch_no;
+};
+
+/*H2C Handler index: 62 */
+struct TDLSoption_param
+{
+ u8 addr[ETH_ALEN];
+ u8 option;
+};
+
+/*H2C Handler index: 64 */
+struct RunInThread_param
+{
+ void (*func)(void*);
+ void *context;
+};
+
+
+#define GEN_CMD_CODE(cmd) cmd ## _CMD_
+
+
+/*
+
+Result:
+0x00: success
+0x01: sucess, and check Response.
+0x02: cmd ignored due to duplicated sequcne number
+0x03: cmd dropped due to invalid cmd code
+0x04: reserved.
+
+*/
+
+#define H2C_RSP_OFFSET 512
+
+#define H2C_SUCCESS 0x00
+#define H2C_SUCCESS_RSP 0x01
+#define H2C_DUPLICATED 0x02
+#define H2C_DROPPED 0x03
+#define H2C_PARAMETERS_ERROR 0x04
+#define H2C_REJECTED 0x05
+#define H2C_CMD_OVERFLOW 0x06
+#define H2C_RESERVED 0x07
+
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num, struct rtw_ieee80211_channel *ch, int ch_num);
+extern u8 rtw_createbss_cmd(struct adapter *padapter);
+u8 rtw_startbss_cmd(struct adapter *padapter, int flags);
+
+struct sta_info;
+extern u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_key, bool enqueue);
+extern u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueue);
+
+extern u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
+extern u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype, bool enqueue);
+extern u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
+extern u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
+
+extern u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
+extern u8 rtw_setfwdig_cmd(struct adapter *padapter, u8 type);
+extern u8 rtw_setfwra_cmd(struct adapter *padapter, u8 type);
+
+extern u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr);
+/* add for CONFIG_IEEE80211W, none 11w also can use */
+extern u8 rtw_reset_securitypriv_cmd(struct adapter *padapter);
+extern u8 rtw_free_assoc_resources_cmd(struct adapter *padapter);
+extern u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue);
+u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter);
+
+u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta);
+
+extern u8 rtw_ps_cmd(struct adapter *padapter);
+
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
+
+extern u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig);
+
+extern u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length);
+extern u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+extern void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+
+extern void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+extern void rtw_getrttbl_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+
+
+struct _cmd_callback {
+ u32 cmd_code;
+ void (*callback)(struct adapter *padapter, struct cmd_obj *cmd);
+};
+
+enum rtw_h2c_cmd
+{
+ GEN_CMD_CODE(_Read_MACREG) , /*0*/
+ GEN_CMD_CODE(_Write_MACREG) ,
+ GEN_CMD_CODE(_Read_BBREG) ,
+ GEN_CMD_CODE(_Write_BBREG) ,
+ GEN_CMD_CODE(_Read_RFREG) ,
+ GEN_CMD_CODE(_Write_RFREG) , /*5*/
+ GEN_CMD_CODE(_Read_EEPROM) ,
+ GEN_CMD_CODE(_Write_EEPROM) ,
+ GEN_CMD_CODE(_Read_EFUSE) ,
+ GEN_CMD_CODE(_Write_EFUSE) ,
+
+ GEN_CMD_CODE(_Read_CAM) , /*10*/
+ GEN_CMD_CODE(_Write_CAM) ,
+ GEN_CMD_CODE(_setBCNITV),
+ GEN_CMD_CODE(_setMBIDCFG),
+ GEN_CMD_CODE(_JoinBss), /*14*/
+ GEN_CMD_CODE(_DisConnect) , /*15*/
+ GEN_CMD_CODE(_CreateBss) ,
+ GEN_CMD_CODE(_SetOpMode) ,
+ GEN_CMD_CODE(_SiteSurvey), /*18*/
+ GEN_CMD_CODE(_SetAuth) ,
+
+ GEN_CMD_CODE(_SetKey) , /*20*/
+ GEN_CMD_CODE(_SetStaKey) ,
+ GEN_CMD_CODE(_SetAssocSta) ,
+ GEN_CMD_CODE(_DelAssocSta) ,
+ GEN_CMD_CODE(_SetStaPwrState) ,
+ GEN_CMD_CODE(_SetBasicRate) , /*25*/
+ GEN_CMD_CODE(_GetBasicRate) ,
+ GEN_CMD_CODE(_SetDataRate) ,
+ GEN_CMD_CODE(_GetDataRate) ,
+ GEN_CMD_CODE(_SetPhyInfo) ,
+
+ GEN_CMD_CODE(_GetPhyInfo) , /*30*/
+ GEN_CMD_CODE(_SetPhy) ,
+ GEN_CMD_CODE(_GetPhy) ,
+ GEN_CMD_CODE(_readRssi) ,
+ GEN_CMD_CODE(_readGain) ,
+ GEN_CMD_CODE(_SetAtim) , /*35*/
+ GEN_CMD_CODE(_SetPwrMode) ,
+ GEN_CMD_CODE(_JoinbssRpt),
+ GEN_CMD_CODE(_SetRaTable) ,
+ GEN_CMD_CODE(_GetRaTable) ,
+
+ GEN_CMD_CODE(_GetCCXReport), /*40*/
+ GEN_CMD_CODE(_GetDTMReport),
+ GEN_CMD_CODE(_GetTXRateStatistics),
+ GEN_CMD_CODE(_SetUsbSuspend),
+ GEN_CMD_CODE(_SetH2cLbk),
+ GEN_CMD_CODE(_AddBAReq) , /*45*/
+ GEN_CMD_CODE(_SetChannel), /*46*/
+ GEN_CMD_CODE(_SetTxPower),
+ GEN_CMD_CODE(_SwitchAntenna),
+ GEN_CMD_CODE(_SetCrystalCap),
+ GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
+
+ GEN_CMD_CODE(_SetSingleToneTx),/*51*/
+ GEN_CMD_CODE(_SetCarrierSuppressionTx),
+ GEN_CMD_CODE(_SetContinuousTx),
+ GEN_CMD_CODE(_SwitchBandwidth), /*54*/
+ GEN_CMD_CODE(_TX_Beacon), /*55*/
+
+ GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
+ GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
+ GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
+
+ GEN_CMD_CODE(_SetChannelPlan), /*59*/
+ GEN_CMD_CODE(_LedBlink), /*60*/
+
+ GEN_CMD_CODE(_SetChannelSwitch), /*61*/
+ GEN_CMD_CODE(_TDLS), /*62*/
+ GEN_CMD_CODE(_ChkBMCSleepq), /*63*/
+
+ GEN_CMD_CODE(_RunInThreadCMD), /*64*/
+
+ MAX_H2CCMD
+};
+
+#define _GetBBReg_CMD_ _Read_BBREG_CMD_
+#define _SetBBReg_CMD_ _Write_BBREG_CMD_
+#define _GetRFReg_CMD_ _Read_RFREG_CMD_
+#define _SetRFReg_CMD_ _Write_RFREG_CMD_
+
+#endif /* _CMD_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_debug.h b/drivers/staging/rtl8723bs/include/rtw_debug.h
new file mode 100644
index 000000000000..625e2a39a861
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_debug.h
@@ -0,0 +1,355 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_DEBUG_H__
+#define __RTW_DEBUG_H__
+
+#include <linux/trace_seq.h>
+
+#define _drv_always_ 1
+#define _drv_emerg_ 2
+#define _drv_alert_ 3
+#define _drv_crit_ 4
+#define _drv_err_ 5
+#define _drv_warning_ 6
+#define _drv_notice_ 7
+#define _drv_info_ 8
+#define _drv_dump_ 9
+#define _drv_debug_ 10
+
+
+#define _module_rtl871x_xmit_c_ BIT(0)
+#define _module_xmit_osdep_c_ BIT(1)
+#define _module_rtl871x_recv_c_ BIT(2)
+#define _module_recv_osdep_c_ BIT(3)
+#define _module_rtl871x_mlme_c_ BIT(4)
+#define _module_mlme_osdep_c_ BIT(5)
+#define _module_rtl871x_sta_mgt_c_ BIT(6)
+#define _module_rtl871x_cmd_c_ BIT(7)
+#define _module_cmd_osdep_c_ BIT(8)
+#define _module_rtl871x_io_c_ BIT(9)
+#define _module_io_osdep_c_ BIT(10)
+#define _module_os_intfs_c_ BIT(11)
+#define _module_rtl871x_security_c_ BIT(12)
+#define _module_rtl871x_eeprom_c_ BIT(13)
+#define _module_hal_init_c_ BIT(14)
+#define _module_hci_hal_init_c_ BIT(15)
+#define _module_rtl871x_ioctl_c_ BIT(16)
+#define _module_rtl871x_ioctl_set_c_ BIT(17)
+#define _module_rtl871x_ioctl_query_c_ BIT(18)
+#define _module_rtl871x_pwrctrl_c_ BIT(19)
+#define _module_hci_intfs_c_ BIT(20)
+#define _module_hci_ops_c_ BIT(21)
+#define _module_osdep_service_c_ BIT(22)
+#define _module_mp_ BIT(23)
+#define _module_hci_ops_os_c_ BIT(24)
+#define _module_rtl871x_ioctl_os_c BIT(25)
+#define _module_rtl8712_cmd_c_ BIT(26)
+/* define _module_efuse_ BIT(27) */
+#define _module_rtl8192c_xmit_c_ BIT(28)
+#define _module_hal_xmit_c_ BIT(28)
+#define _module_efuse_ BIT(29)
+#define _module_rtl8712_recv_c_ BIT(30)
+#define _module_rtl8712_led_c_ BIT(31)
+
+#undef _MODULE_DEFINE_
+
+#if defined _RTW_XMIT_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_xmit_c_
+#elif defined _XMIT_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_xmit_osdep_c_
+#elif defined _RTW_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_recv_c_
+#elif defined _RECV_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_recv_osdep_c_
+#elif defined _RTW_MLME_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_mlme_c_
+#elif defined _MLME_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_mlme_osdep_c_
+#elif defined _RTW_MLME_EXT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTW_STA_MGT_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_sta_mgt_c_
+#elif defined _RTW_CMD_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_cmd_c_
+#elif defined _CMD_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_cmd_osdep_c_
+#elif defined _RTW_IO_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_io_c_
+#elif defined _IO_OSDEP_C_
+ #define _MODULE_DEFINE_ _module_io_osdep_c_
+#elif defined _OS_INTFS_C_
+ #define _MODULE_DEFINE_ _module_os_intfs_c_
+#elif defined _RTW_SECURITY_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_security_c_
+#elif defined _RTW_EEPROM_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_eeprom_c_
+#elif defined _HAL_INTF_C_
+ #define _MODULE_DEFINE_ _module_hal_init_c_
+#elif (defined _HCI_HAL_INIT_C_) || (defined _SDIO_HALINIT_C_)
+ #define _MODULE_DEFINE_ _module_hci_hal_init_c_
+#elif defined _RTL871X_IOCTL_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_c_
+#elif defined _RTL871X_IOCTL_SET_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_set_c_
+#elif defined _RTL871X_IOCTL_QUERY_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_query_c_
+#elif defined _RTL871X_PWRCTRL_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_pwrctrl_c_
+#elif defined _RTW_PWRCTRL_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _HCI_INTF_C_
+ #define _MODULE_DEFINE_ _module_hci_intfs_c_
+#elif defined _HCI_OPS_C_
+ #define _MODULE_DEFINE_ _module_hci_ops_c_
+#elif defined _SDIO_OPS_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _OSDEP_HCI_INTF_C_
+ #define _MODULE_DEFINE_ _module_hci_intfs_c_
+#elif defined _OSDEP_SERVICE_C_
+ #define _MODULE_DEFINE_ _module_osdep_service_c_
+#elif defined _HCI_OPS_OS_C_
+ #define _MODULE_DEFINE_ _module_hci_ops_os_c_
+#elif defined _RTL871X_IOCTL_LINUX_C_
+ #define _MODULE_DEFINE_ _module_rtl871x_ioctl_os_c
+#elif defined _RTL8712_CMD_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_cmd_c_
+#elif defined _RTL8192C_XMIT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTL8723AS_XMIT_C_
+ #define _MODULE_DEFINE_ 1
+#elif defined _RTL8712_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
+#elif defined _RTL8192CU_RECV_C_
+ #define _MODULE_DEFINE_ _module_rtl8712_recv_c_
+#elif defined _RTL871X_MLME_EXT_C_
+ #define _MODULE_DEFINE_ _module_mlme_osdep_c_
+#elif defined _RTW_EFUSE_C_
+ #define _MODULE_DEFINE_ _module_efuse_
+#endif
+
+#define RT_TRACE(_Comp, _Level, Fmt) do{}while (0)
+#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) do{}while (0)
+
+#define DBG_871X(x, ...) do {} while (0)
+#define MSG_8192C(x, ...) do {} while (0)
+#define DBG_8192C(x,...) do {} while (0)
+#define DBG_871X_LEVEL(x,...) do {} while (0)
+
+#undef _dbgdump
+
+#ifndef _RTL871X_DEBUG_C_
+ extern u32 GlobalDebugLevel;
+ extern u64 GlobalDebugComponents;
+#endif
+
+#define _dbgdump printk
+
+#define DRIVER_PREFIX "RTL8723BS: "
+
+#if defined(_dbgdump)
+
+/* with driver-defined prefix */
+#undef DBG_871X_LEVEL
+#define DBG_871X_LEVEL(level, fmt, arg...) \
+ do {\
+ if (level <= GlobalDebugLevel) {\
+ if (level <= _drv_err_ && level > _drv_always_) \
+ _dbgdump(DRIVER_PREFIX"ERROR " fmt, ##arg);\
+ else \
+ _dbgdump(DRIVER_PREFIX fmt, ##arg);\
+ }\
+ }while (0)
+
+/* without driver-defined prefix */
+#undef _DBG_871X_LEVEL
+#define _DBG_871X_LEVEL(level, fmt, arg...) \
+ do {\
+ if (level <= GlobalDebugLevel) {\
+ if (level <= _drv_err_ && level > _drv_always_) \
+ _dbgdump("ERROR " fmt, ##arg);\
+ else \
+ _dbgdump(fmt, ##arg);\
+ }\
+ }while (0)
+
+#define RTW_DBGDUMP NULL /* 'stream' for _dbgdump */
+
+/* dump message to selected 'stream' */
+#define DBG_871X_SEL(sel, fmt, arg...) \
+ do { \
+ if (sel == RTW_DBGDUMP) \
+ _DBG_871X_LEVEL(_drv_always_, fmt, ##arg); \
+ else \
+ seq_printf(sel, fmt, ##arg); \
+ } while (0)
+
+/* dump message to selected 'stream' with driver-defined prefix */
+#define DBG_871X_SEL_NL(sel, fmt, arg...) \
+ do { \
+ if (sel == RTW_DBGDUMP) \
+ DBG_871X_LEVEL(_drv_always_, fmt, ##arg); \
+ else \
+ seq_printf(sel, fmt, ##arg); \
+ } while (0)
+
+#endif /* defined(_dbgdump) */
+
+#ifdef DEBUG
+#if defined(_dbgdump)
+ #undef DBG_871X
+ #define DBG_871X(...) do {\
+ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\
+ }while (0)
+
+ #undef MSG_8192C
+ #define MSG_8192C(...) do {\
+ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\
+ }while (0)
+
+ #undef DBG_8192C
+ #define DBG_8192C(...) do {\
+ _dbgdump(DRIVER_PREFIX __VA_ARGS__);\
+ }while (0)
+#endif /* defined(_dbgdump) */
+#endif /* DEBUG */
+
+#ifdef DEBUG_RTL871X
+
+#if defined(_dbgdump) && defined(_MODULE_DEFINE_)
+
+ #undef RT_TRACE
+ #define RT_TRACE(_Comp, _Level, Fmt)\
+ do {\
+ if ((_Comp & GlobalDebugComponents) && (_Level <= GlobalDebugLevel)) {\
+ _dbgdump("%s [0x%08x,%d]", DRIVER_PREFIX, (unsigned int)_Comp, _Level);\
+ _dbgdump Fmt;\
+ }\
+ }while (0)
+
+#endif /* defined(_dbgdump) && defined(_MODULE_DEFINE_) */
+
+
+#if defined(_dbgdump)
+ #undef RT_PRINT_DATA
+ #define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) \
+ if (((_Comp) & GlobalDebugComponents) && (_Level <= GlobalDebugLevel)) \
+ { \
+ int __i; \
+ u8 *ptr = (u8 *)_HexData; \
+ _dbgdump("%s", DRIVER_PREFIX); \
+ _dbgdump(_TitleString); \
+ for (__i = 0; __i<(int)_HexDataLen; __i++) \
+ { \
+ _dbgdump("%02X%s", ptr[__i], (((__i + 1) % 4) == 0)?" ":" "); \
+ if (((__i + 1) % 16) == 0) _dbgdump("\n"); \
+ } \
+ _dbgdump("\n"); \
+ }
+#endif /* defined(_dbgdump) */
+#endif /* DEBUG_RTL871X */
+
+#ifdef CONFIG_DBG_COUNTER
+#define DBG_COUNTER(counter) counter++
+#else
+#define DBG_COUNTER(counter) do {} while (0)
+#endif
+
+void dump_drv_version(void *sel);
+void dump_log_level(void *sel);
+
+void sd_f0_reg_dump(void *sel, struct adapter *adapter);
+
+void mac_reg_dump(void *sel, struct adapter *adapter);
+void bb_reg_dump(void *sel, struct adapter *adapter);
+void rf_reg_dump(void *sel, struct adapter *adapter);
+
+#ifdef PROC_DEBUG
+ssize_t proc_set_write_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_read_reg(struct seq_file *m, void *v);
+ssize_t proc_set_read_reg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_fwstate(struct seq_file *m, void *v);
+int proc_get_sec_info(struct seq_file *m, void *v);
+int proc_get_mlmext_state(struct seq_file *m, void *v);
+
+int proc_get_roam_flags(struct seq_file *m, void *v);
+ssize_t proc_set_roam_flags(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_roam_param(struct seq_file *m, void *v);
+ssize_t proc_set_roam_param(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+ssize_t proc_set_roam_tgt_addr(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_qos_option(struct seq_file *m, void *v);
+int proc_get_ht_option(struct seq_file *m, void *v);
+int proc_get_rf_info(struct seq_file *m, void *v);
+int proc_get_survey_info(struct seq_file *m, void *v);
+int proc_get_ap_info(struct seq_file *m, void *v);
+int proc_get_adapter_state(struct seq_file *m, void *v);
+int proc_get_trx_info(struct seq_file *m, void *v);
+int proc_get_rate_ctl(struct seq_file *m, void *v);
+ssize_t proc_set_rate_ctl(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_suspend_resume_info(struct seq_file *m, void *v);
+
+ssize_t proc_set_fwdl_test_case(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+ssize_t proc_set_wait_hiq_empty(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_all_sta_info(struct seq_file *m, void *v);
+
+int proc_get_rx_signal(struct seq_file *m, void *v);
+ssize_t proc_set_rx_signal(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_hw_status(struct seq_file *m, void *v);
+
+int proc_get_ht_enable(struct seq_file *m, void *v);
+ssize_t proc_set_ht_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_bw_mode(struct seq_file *m, void *v);
+ssize_t proc_set_bw_mode(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_ampdu_enable(struct seq_file *m, void *v);
+ssize_t proc_set_ampdu_enable(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_rx_ampdu(struct seq_file *m, void *v);
+ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_rx_stbc(struct seq_file *m, void *v);
+ssize_t proc_set_rx_stbc(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_en_fwps(struct seq_file *m, void *v);
+ssize_t proc_set_en_fwps(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+/* int proc_get_two_path_rssi(struct seq_file *m, void *v); */
+int proc_get_rssi_disp(struct seq_file *m, void *v);
+ssize_t proc_set_rssi_disp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_btcoex_dbg(struct seq_file *m, void *v);
+ssize_t proc_set_btcoex_dbg(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_btcoex_info(struct seq_file *m, void *v);
+
+int proc_get_odm_dbg_comp(struct seq_file *m, void *v);
+ssize_t proc_set_odm_dbg_comp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+int proc_get_odm_dbg_level(struct seq_file *m, void *v);
+ssize_t proc_set_odm_dbg_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+int proc_get_odm_adaptivity(struct seq_file *m, void *v);
+ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+
+#ifdef CONFIG_DBG_COUNTER
+int proc_get_rx_logs(struct seq_file *m, void *v);
+int proc_get_tx_logs(struct seq_file *m, void *v);
+int proc_get_int_logs(struct seq_file *m, void *v);
+#endif
+
+#endif /* PROC_DEBUG */
+
+#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_eeprom.h b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
new file mode 100644
index 000000000000..2e292bf9cf51
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_eeprom.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_EEPROM_H__
+#define __RTW_EEPROM_H__
+
+
+#define RTL8712_EEPROM_ID 0x8712
+/* define EEPROM_MAX_SIZE 256 */
+
+#define HWSET_MAX_SIZE_128 128
+#define HWSET_MAX_SIZE_256 256
+#define HWSET_MAX_SIZE_512 512
+
+#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
+
+#define CLOCK_RATE 50 /* 100us */
+
+/* EEPROM opcodes */
+#define EEPROM_READ_OPCODE 06
+#define EEPROM_WRITE_OPCODE 05
+#define EEPROM_ERASE_OPCODE 07
+#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
+#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+#define eeprom_cis0_sz 17
+#define eeprom_cis1_sz 50
+
+/* */
+/* Customer ID, note that: */
+/* This variable is initiailzed through EEPROM or registry, */
+/* however, its definition may be different with that in EEPROM for */
+/* EEPROM size consideration. So, we have to perform proper translation between them. */
+/* Besides, CustomerID of registry has precedence of that of EEPROM. */
+/* defined below. 060703, by rcnjko. */
+/* */
+typedef enum _RT_CUSTOMER_ID
+{
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_CHINA_MOBILE = 15,
+ RT_CID_819x_ALPHA = 16,
+ RT_CID_819x_Sitecom = 17,
+ RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded for CCX functions, but for test behavior like retry limit and tx report. By Bruce, 2009-02-17. */
+ RT_CID_819x_Lenovo = 19,
+ RT_CID_819x_QMI = 20,
+ RT_CID_819x_Edimax_Belkin = 21,
+ RT_CID_819x_Sercomm_Belkin = 22,
+ RT_CID_819x_CAMEO1 = 23,
+ RT_CID_819x_MSI = 24,
+ RT_CID_819x_Acer = 25,
+ RT_CID_819x_AzWave_ASUS = 26,
+ RT_CID_819x_AzWave = 27, /* For AzWave in PCIe, The ID is AzWave use and not only Asus */
+ RT_CID_819x_HP = 28,
+ RT_CID_819x_WNC_COREGA = 29,
+ RT_CID_819x_Arcadyan_Belkin = 30,
+ RT_CID_819x_SAMSUNG = 31,
+ RT_CID_819x_CLEVO = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+ RT_CID_819x_Xavi = 39,
+ RT_CID_LENOVO_CHINA = 40,
+ RT_CID_INTEL_CHINA = 41,
+ RT_CID_TPLINK_HPWR = 42,
+ RT_CID_819x_Sercomm_Netgear = 43,
+ RT_CID_819x_ALPHA_Dlink = 44,/* add by ylb 20121012 for customer led for alpha */
+ RT_CID_WNC_NEC = 45,/* add by page for NEC */
+ RT_CID_DNI_BUFFALO = 46,/* add by page for NEC */
+}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
+
+struct eeprom_priv
+{
+ u8 bautoload_fail_flag;
+ u8 bloadfile_fail_flag;
+ u8 bloadmac_fail_flag;
+ u8 EepromOrEfuse;
+
+ u8 mac_addr[6]; /* PermanentAddress */
+
+ u16 channel_plan;
+ u16 CustomerID;
+
+ u8 efuse_eeprom_data[EEPROM_MAX_SIZE]; /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
+ u8 adjuseVoltageVal;
+
+ u8 EEPROMRFGainOffset;
+ u8 EEPROMRFGainVal;
+
+ u8 sdio_setting;
+ u32 ocr;
+ u8 cis0[eeprom_cis0_sz];
+ u8 cis1[eeprom_cis1_sz];
+};
+
+#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_efuse.h b/drivers/staging/rtl8723bs/include/rtw_efuse.h
new file mode 100644
index 000000000000..5d3778a3204b
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_efuse.h
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+
+#define EFUSE_ERROE_HANDLE 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define PGPKT_DATA_SIZE 8
+
+#define EFUSE_WIFI 0
+#define EFUSE_BT 1
+
+enum _EFUSE_DEF_TYPE {
+ TYPE_EFUSE_MAX_SECTION = 0,
+ TYPE_EFUSE_REAL_CONTENT_LEN = 1,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
+ TYPE_EFUSE_MAP_LEN = 4,
+ TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
+ TYPE_EFUSE_CONTENT_LEN_BANK = 6,
+};
+
+#define EFUSE_MAX_MAP_LEN 512
+
+#define EFUSE_MAX_HW_SIZE 512
+#define EFUSE_MAX_SECTION_BASE 16
+
+#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
+#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
+#define GET_HDR_OFFSET_2_0(header) ((header & 0xE0) >> 5)
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+
+/* */
+/* The following is for BT Efuse definition */
+/* */
+#define EFUSE_BT_MAX_MAP_LEN 1024
+#define EFUSE_MAX_BANK 4
+#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
+/* */
+/*--------------------------Define Parameters-------------------------------*/
+#define EFUSE_MAX_WORD_UNIT 4
+
+/*------------------------------Define structure----------------------------*/
+typedef struct PG_PKT_STRUCT_A{
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+ u8 word_cnts;
+}PGPKT_STRUCT,*PPGPKT_STRUCT;
+
+/*------------------------------Define structure----------------------------*/
+typedef struct _EFUSE_HAL{
+ u8 fakeEfuseBank;
+ u32 fakeEfuseUsedBytes;
+ u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
+ u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN];
+ u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN];
+
+ u16 BTEfuseUsedBytes;
+ u8 BTEfuseUsedPercentage;
+ u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+
+ u16 fakeBTEfuseUsedBytes;
+ u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+}EFUSE_HAL, *PEFUSE_HAL;
+
+
+/*------------------------Export global variable----------------------------*/
+extern u8 fakeEfuseBank;
+extern u32 fakeEfuseUsedBytes;
+extern u8 fakeEfuseContent[];
+extern u8 fakeEfuseInitMap[];
+extern u8 fakeEfuseModifiedMap[];
+
+extern u32 BTEfuseUsedBytes;
+extern u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 BTEfuseInitMap[];
+extern u8 BTEfuseModifiedMap[];
+
+extern u32 fakeBTEfuseUsedBytes;
+extern u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 fakeBTEfuseInitMap[];
+extern u8 fakeBTEfuseModifiedMap[];
+/*------------------------Export global variable----------------------------*/
+
+u16 Efuse_GetCurrentSize(struct adapter *padapter, u8 efuseType, bool bPseudoTest);
+u8 Efuse_CalculateWordCnts(u8 word_en);
+void EFUSE_GetEfuseDefinition(struct adapter *padapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest);
+u8 efuse_OneByteRead(struct adapter *padapter, u16 addr, u8 *data, bool bPseudoTest);
+u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoTest);
+
+void Efuse_PowerSwitch(struct adapter *padapter, u8 bWrite, u8 PwrState);
+int Efuse_PgPacketRead(struct adapter *padapter, u8 offset, u8 *data, bool bPseudoTest);
+int Efuse_PgPacketWrite(struct adapter *padapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
+void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
+u8 Efuse_WordEnableDataWrite(struct adapter *padapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest);
+
+u8 EFUSE_Read1Byte(struct adapter *padapter, u16 Address);
+void EFUSE_ShadowMapUpdate(struct adapter *padapter, u8 efuseType, bool bPseudoTest);
+void EFUSE_ShadowRead(struct adapter *padapter, u8 Type, u16 Offset, u32 *Value);
+void Rtw_Hal_ReadMACAddrFromFile(struct adapter *padapter);
+u32 Rtw_Hal_readPGDataFromConfigFile(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_event.h b/drivers/staging/rtl8723bs/include/rtw_event.h
new file mode 100644
index 000000000000..2bf23de7e516
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_event.h
@@ -0,0 +1,117 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_EVENT_H_
+#define _RTW_EVENT_H_
+
+/*
+Used to report a bss has been scanned
+
+*/
+struct survey_event {
+ struct wlan_bssid_ex bss;
+};
+
+/*
+Used to report that the requested site survey has been done.
+
+bss_cnt indicates the number of bss that has been reported.
+
+
+*/
+struct surveydone_event {
+ unsigned int bss_cnt;
+
+};
+
+/*
+Used to report the link result of joinning the given bss
+
+
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+
+*/
+struct joinbss_event {
+ struct wlan_network network;
+};
+
+/*
+Used to report a given STA has joinned the created BSS.
+It is used in AP/Ad-HoC(M) mode.
+
+
+*/
+struct stassoc_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2];
+ int cam_id;
+
+};
+
+struct stadel_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2]; /* for reason */
+ int mac_id;
+};
+
+struct addba_event
+{
+ unsigned int tid;
+};
+
+struct wmm_event
+{
+ unsigned char wmm;
+};
+
+#define GEN_EVT_CODE(event) event ## _EVT_
+
+
+
+struct fwevent {
+ u32 parmsize;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+};
+
+
+#define C2HEVENT_SZ 32
+
+struct event_node{
+ unsigned char *node;
+ unsigned char evt_code;
+ unsigned short evt_sz;
+ volatile int *caller_ff_tail;
+ int caller_ff_sz;
+};
+
+struct c2hevent_queue {
+ volatile int head;
+ volatile int tail;
+ struct event_node nodes[C2HEVENT_SZ];
+ unsigned char seq;
+};
+
+#define NETWORK_QUEUE_SZ 4
+
+struct network_queue {
+ volatile int head;
+ volatile int tail;
+ struct wlan_bssid_ex networks[NETWORK_QUEUE_SZ];
+};
+
+
+#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ht.h b/drivers/staging/rtl8723bs/include/rtw_ht.h
new file mode 100644
index 000000000000..20ca0b7b481c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_ht.h
@@ -0,0 +1,118 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_HT_H_
+#define _RTW_HT_H_
+
+
+struct ht_priv
+{
+ u8 ht_option;
+ u8 ampdu_enable;/* for enable Tx A-MPDU */
+ u8 tx_amsdu_enable;/* for enable Tx A-MSDU */
+ u8 bss_coexist;/* for 20/40 Bss coexist */
+
+ /* u8 baddbareq_issued[16]; */
+ u32 tx_amsdu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
+ u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz, updated when join_callback. */
+
+ u8 rx_ampdu_min_spacing;
+
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi_20m;
+ u8 sgi_40m;
+
+ /* for processing Tx A-MPDU */
+ u8 agg_enable_bitmap;
+ /* u8 ADDBA_retry_count; */
+ u8 candidate_tid_bitmap;
+
+ u8 ldpc_cap;
+ u8 stbc_cap;
+ u8 beamform_cap;
+
+ struct rtw_ieee80211_ht_cap ht_cap;
+
+};
+
+typedef enum AGGRE_SIZE{
+ HT_AGG_SIZE_8K = 0,
+ HT_AGG_SIZE_16K = 1,
+ HT_AGG_SIZE_32K = 2,
+ HT_AGG_SIZE_64K = 3,
+ VHT_AGG_SIZE_128K = 4,
+ VHT_AGG_SIZE_256K = 5,
+ VHT_AGG_SIZE_512K = 6,
+ VHT_AGG_SIZE_1024K = 7,
+}AGGRE_SIZE_E, *PAGGRE_SIZE_E;
+
+typedef enum _RT_HT_INF0_CAP{
+ RT_HT_CAP_USE_TURBO_AGGR = 0x01,
+ RT_HT_CAP_USE_LONG_PREAMBLE = 0x02,
+ RT_HT_CAP_USE_AMPDU = 0x04,
+ RT_HT_CAP_USE_WOW = 0x8,
+ RT_HT_CAP_USE_SOFTAP = 0x10,
+ RT_HT_CAP_USE_92SE = 0x20,
+ RT_HT_CAP_USE_88C_92C = 0x40,
+ RT_HT_CAP_USE_AP_CLIENT_MODE = 0x80, /* AP team request to reserve this bit, by Emily */
+}RT_HT_INF0_CAPBILITY, *PRT_HT_INF0_CAPBILITY;
+
+typedef enum _RT_HT_INF1_CAP{
+ RT_HT_CAP_USE_VIDEO_CLIENT = 0x01,
+ RT_HT_CAP_USE_JAGUAR_BCUT = 0x02,
+ RT_HT_CAP_USE_JAGUAR_CCUT = 0x04,
+}RT_HT_INF1_CAPBILITY, *PRT_HT_INF1_CAPBILITY;
+
+#define LDPC_HT_ENABLE_RX BIT0
+#define LDPC_HT_ENABLE_TX BIT1
+#define LDPC_HT_TEST_TX_ENABLE BIT2
+#define LDPC_HT_CAP_TX BIT3
+
+#define STBC_HT_ENABLE_RX BIT0
+#define STBC_HT_ENABLE_TX BIT1
+#define STBC_HT_TEST_TX_ENABLE BIT2
+#define STBC_HT_CAP_TX BIT3
+
+#define BEAMFORMING_HT_BEAMFORMER_ENABLE BIT0 /* Declare our NIC supports beamformer */
+#define BEAMFORMING_HT_BEAMFORMEE_ENABLE BIT1 /* Declare our NIC supports beamformee */
+#define BEAMFORMING_HT_BEAMFORMER_TEST BIT2 /* Transmiting Beamforming no matter the target supports it or not */
+
+/* */
+/* The HT Control field */
+/* */
+#define SET_HT_CTRL_CSI_STEERING(_pEleStart, _val) SET_BITS_TO_LE_1BYTE((_pEleStart)+2, 6, 2, _val)
+#define SET_HT_CTRL_NDP_ANNOUNCEMENT(_pEleStart, _val) SET_BITS_TO_LE_1BYTE((_pEleStart)+3, 0, 1, _val)
+#define GET_HT_CTRL_NDP_ANNOUNCEMENT(_pEleStart) LE_BITS_TO_1BYTE((_pEleStart)+3, 0, 1)
+
+/* 20/40 BSS Coexist */
+#define SET_EXT_CAPABILITY_ELE_BSS_COEXIST(_pEleStart, _val) SET_BITS_TO_LE_1BYTE((_pEleStart), 0, 1, _val)
+#define GET_EXT_CAPABILITY_ELE_BSS_COEXIST(_pEleStart) LE_BITS_TO_1BYTE((_pEleStart), 0, 1)
+
+
+#define GET_HT_CAPABILITY_ELE_LDPC_CAP(_pEleStart) LE_BITS_TO_1BYTE(_pEleStart, 0, 1)
+#define GET_HT_CAPABILITY_ELE_TX_STBC(_pEleStart) LE_BITS_TO_1BYTE(_pEleStart, 7, 1)
+
+#define GET_HT_CAPABILITY_ELE_RX_STBC(_pEleStart) LE_BITS_TO_1BYTE((_pEleStart)+1, 0, 2)
+
+/* TXBF Capabilities */
+#define SET_HT_CAP_TXBF_RECEIVE_NDP_CAP(_pEleStart, _val) SET_BITS_TO_LE_4BYTE(((u8 *)(_pEleStart))+21, 3, 1, ((u8)_val))
+#define SET_HT_CAP_TXBF_TRANSMIT_NDP_CAP(_pEleStart, _val) SET_BITS_TO_LE_4BYTE(((u8 *)(_pEleStart))+21, 4, 1, ((u8)_val))
+#define SET_HT_CAP_TXBF_EXPLICIT_COMP_STEERING_CAP(_pEleStart, _val) SET_BITS_TO_LE_4BYTE(((u8 *)(_pEleStart))+21, 10, 1, ((u8)_val))
+#define SET_HT_CAP_TXBF_EXPLICIT_COMP_FEEDBACK_CAP(_pEleStart, _val) SET_BITS_TO_LE_4BYTE(((u8 *)(_pEleStart))+21, 15, 2, ((u8)_val))
+#define SET_HT_CAP_TXBF_COMP_STEERING_NUM_ANTENNAS(_pEleStart, _val) SET_BITS_TO_LE_4BYTE(((u8 *)(_pEleStart))+21, 23, 2, ((u8)_val))
+
+#define GET_HT_CAP_TXBF_EXPLICIT_COMP_STEERING_CAP(_pEleStart) LE_BITS_TO_4BYTE((_pEleStart)+21, 10, 1)
+#define GET_HT_CAP_TXBF_EXPLICIT_COMP_FEEDBACK_CAP(_pEleStart) LE_BITS_TO_4BYTE((_pEleStart)+21, 15, 2)
+
+#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
new file mode 100644
index 000000000000..0341d0d35375
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -0,0 +1,373 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#ifndef _RTW_IO_H_
+#define _RTW_IO_H_
+
+#define NUM_IOREQ 8
+
+#define MAX_PROT_SZ (64-16)
+
+#define _IOREADY 0
+#define _IO_WAIT_COMPLETE 1
+#define _IO_WAIT_RSP 2
+
+/* IO COMMAND TYPE */
+#define _IOSZ_MASK_ (0x7F)
+#define _IO_WRITE_ BIT(7)
+#define _IO_FIXED_ BIT(8)
+#define _IO_BURST_ BIT(9)
+#define _IO_BYTE_ BIT(10)
+#define _IO_HW_ BIT(11)
+#define _IO_WORD_ BIT(12)
+#define _IO_SYNC_ BIT(13)
+#define _IO_CMDMASK_ (0x1F80)
+
+
+/*
+ For prompt mode accessing, caller shall free io_req
+ Otherwise, io_handler will free io_req
+*/
+
+
+
+/* IO STATUS TYPE */
+#define _IO_ERR_ BIT(2)
+#define _IO_SUCCESS_ BIT(1)
+#define _IO_DONE_ BIT(0)
+
+
+#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
+#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
+#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
+
+#define IO_RD32_ASYNC (_IO_WORD_)
+#define IO_RD16_ASYNC (_IO_HW_)
+#define IO_RD8_ASYNC (_IO_BYTE_)
+
+#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
+#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
+#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
+
+#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
+#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
+#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
+
+/*
+
+ Only Sync. burst accessing is provided.
+
+*/
+
+#define IO_WR_BURST(x) (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+#define IO_RD_BURST(x) (_IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+
+
+
+/* below is for the intf_option bit defition... */
+
+#define _INTF_ASYNC_ BIT(0) /* support async io */
+
+struct intf_priv;
+struct intf_hdl;
+struct io_queue;
+
+struct _io_ops {
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
+
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+
+ void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
+
+ u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
+
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl, u32 cnt, u8 *pmem);
+
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+
+ u8 (*_sd_f0_read8)(struct intf_hdl *pintfhdl, u32 addr);
+};
+
+struct io_req {
+ struct list_head list;
+ u32 addr;
+ volatile u32 val;
+ u32 command;
+ u32 status;
+ u8 *pbuf;
+ _sema sema;
+
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt);
+ u8 *cnxt;
+};
+
+struct intf_hdl {
+ struct adapter *padapter;
+ struct dvobj_priv *pintf_dev;/* pointer to &(padapter->dvobjpriv); */
+
+ struct _io_ops io_ops;
+};
+
+struct reg_protocol_rd {
+
+#ifdef __LITTLE_ENDIAN
+
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ /* u32 Value; */
+#else
+
+
+/* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans :4;
+
+ u32 Reserved2 :24;
+
+ /* DW2 */
+ u32 WriteEnable : 1;
+ u32 ByteCount :7;
+
+
+ u32 Reserved3 : 3;
+ u32 Byte4Access : 1;
+
+ u32 Byte2Access : 1;
+ u32 Byte1Access : 1;
+ u32 BurstMode :1 ;
+ u32 FixOrContinuous : 1;
+
+ u32 Reserved4 : 16;
+
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+ /* u32 Value; */
+
+#endif
+
+};
+
+
+struct reg_protocol_wt {
+
+
+#ifdef __LITTLE_ENDIAN
+
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+
+#else
+ /* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans :4;
+
+ u32 Reserved2 :24;
+
+ /* DW2 */
+ u32 WriteEnable : 1;
+ u32 ByteCount :7;
+
+ u32 Reserved3 : 3;
+ u32 Byte4Access : 1;
+
+ u32 Byte2Access : 1;
+ u32 Byte1Access : 1;
+ u32 BurstMode :1 ;
+ u32 FixOrContinuous : 1;
+
+ u32 Reserved4 : 16;
+
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+ u32 Value;
+
+#endif
+
+};
+#define SD_IO_TRY_CNT (8)
+#define MAX_CONTINUAL_IO_ERR SD_IO_TRY_CNT
+
+int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj);
+void rtw_reset_continual_io_error(struct dvobj_priv *dvobj);
+
+/*
+Below is the data structure used by _io_handler
+
+*/
+
+struct io_queue {
+ _lock lock;
+ struct list_head free_ioreqs;
+ struct list_head pending; /* The io_req list that will be served in the single protocol read/write. */
+ struct list_head processing;
+ u8 *free_ioreqs_buf; /* 4-byte aligned */
+ u8 *pallocated_free_ioreqs_buf;
+ struct intf_hdl intf;
+};
+
+struct io_priv{
+
+ struct adapter *padapter;
+
+ struct intf_hdl intf;
+
+};
+
+extern uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+extern void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
+extern uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+
+
+extern uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
+extern struct io_req *alloc_ioreq(struct io_queue *pio_q);
+
+extern uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
+extern void unregister_intf_hdl(struct intf_hdl *pintfhdl);
+
+extern void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+extern void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+extern u8 _rtw_read8(struct adapter *adapter, u32 addr);
+extern u16 _rtw_read16(struct adapter *adapter, u32 addr);
+extern u32 _rtw_read32(struct adapter *adapter, u32 addr);
+
+extern int _rtw_write8(struct adapter *adapter, u32 addr, u8 val);
+extern int _rtw_write16(struct adapter *adapter, u32 addr, u16 val);
+extern int _rtw_write32(struct adapter *adapter, u32 addr, u32 val);
+
+extern u8 _rtw_sd_f0_read8(struct adapter *adapter, u32 addr);
+
+extern u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+#define rtw_read8(adapter, addr) _rtw_read8((adapter), (addr))
+#define rtw_read16(adapter, addr) _rtw_read16((adapter), (addr))
+#define rtw_read32(adapter, addr) _rtw_read32((adapter), (addr))
+
+#define rtw_write8(adapter, addr, val) _rtw_write8((adapter), (addr), (val))
+#define rtw_write16(adapter, addr, val) _rtw_write16((adapter), (addr), (val))
+#define rtw_write32(adapter, addr, val) _rtw_write32((adapter), (addr), (val))
+
+#define rtw_write_port(adapter, addr, cnt, mem) _rtw_write_port((adapter), (addr), (cnt), (mem))
+
+#define rtw_sd_f0_read8(adapter, addr) _rtw_sd_f0_read8((adapter), (addr))
+
+extern void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
+
+/* ioreq */
+extern void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
+extern void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
+extern void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
+extern void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
+extern void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
+extern void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
+
+
+extern uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+extern uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+extern uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+
+extern void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+extern void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+extern void async_write8(struct adapter *adapter, u32 addr, u8 val,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+extern void async_write16(struct adapter *adapter, u32 addr, u16 val,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+extern void async_write32(struct adapter *adapter, u32 addr, u32 val,
+ void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
+
+extern void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+extern void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+
+int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops));
+
+
+extern uint alloc_io_queue(struct adapter *adapter);
+extern void free_io_queue(struct adapter *adapter);
+extern void async_bus_io(struct io_queue *pio_q);
+extern void bus_sync_io(struct io_queue *pio_q);
+extern u32 _ioreq2rwmem(struct io_queue *pio_q);
+extern void dev_power_down(struct adapter * Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a, _b, _c) \
+ rtw_write8(_a, _b, _c)
+#define PlatformEFIOWrite2Byte(_a, _b, _c) \
+ rtw_write16(_a, _b, _c)
+#define PlatformEFIOWrite4Byte(_a, _b, _c) \
+ rtw_write32(_a, _b, _c)
+
+#define PlatformEFIORead1Byte(_a, _b) \
+ rtw_read8(_a, _b)
+#define PlatformEFIORead2Byte(_a, _b) \
+ rtw_read16(_a, _b)
+#define PlatformEFIORead4Byte(_a, _b) \
+ rtw_read32(_a, _b)
+
+#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl.h b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
new file mode 100644
index 000000000000..c19e179fc426
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_ioctl.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_H_
+#define _RTW_IOCTL_H_
+
+/* 00 - Success */
+/* 11 - Error */
+#define STATUS_SUCCESS (0x00000000L)
+#define STATUS_PENDING (0x00000103L)
+
+#define STATUS_UNSUCCESSFUL (0xC0000001L)
+#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
+#define STATUS_NOT_SUPPORTED (0xC00000BBL)
+
+#define NDIS_STATUS_SUCCESS ((uint)STATUS_SUCCESS)
+#define NDIS_STATUS_PENDING ((uint)STATUS_PENDING)
+#define NDIS_STATUS_NOT_RECOGNIZED ((uint)0x00010001L)
+#define NDIS_STATUS_NOT_COPIED ((uint)0x00010002L)
+#define NDIS_STATUS_NOT_ACCEPTED ((uint)0x00010003L)
+#define NDIS_STATUS_CALL_ACTIVE ((uint)0x00010007L)
+
+#define NDIS_STATUS_FAILURE ((uint)STATUS_UNSUCCESSFUL)
+#define NDIS_STATUS_RESOURCES ((uint)STATUS_INSUFFICIENT_RESOURCES)
+#define NDIS_STATUS_CLOSING ((uint)0xC0010002L)
+#define NDIS_STATUS_BAD_VERSION ((uint)0xC0010004L)
+#define NDIS_STATUS_BAD_CHARACTERISTICS ((uint)0xC0010005L)
+#define NDIS_STATUS_ADAPTER_NOT_FOUND ((uint)0xC0010006L)
+#define NDIS_STATUS_OPEN_FAILED ((uint)0xC0010007L)
+#define NDIS_STATUS_DEVICE_FAILED ((uint)0xC0010008L)
+#define NDIS_STATUS_MULTICAST_FULL ((uint)0xC0010009L)
+#define NDIS_STATUS_MULTICAST_EXISTS ((uint)0xC001000AL)
+#define NDIS_STATUS_MULTICAST_NOT_FOUND ((uint)0xC001000BL)
+#define NDIS_STATUS_REQUEST_ABORTED ((uint)0xC001000CL)
+#define NDIS_STATUS_RESET_IN_PROGRESS ((uint)0xC001000DL)
+#define NDIS_STATUS_CLOSING_INDICATING ((uint)0xC001000EL)
+#define NDIS_STATUS_NOT_SUPPORTED ((uint)STATUS_NOT_SUPPORTED)
+#define NDIS_STATUS_INVALID_PACKET ((uint)0xC001000FL)
+#define NDIS_STATUS_OPEN_LIST_FULL ((uint)0xC0010010L)
+#define NDIS_STATUS_ADAPTER_NOT_READY ((uint)0xC0010011L)
+#define NDIS_STATUS_ADAPTER_NOT_OPEN ((uint)0xC0010012L)
+#define NDIS_STATUS_NOT_INDICATING ((uint)0xC0010013L)
+#define NDIS_STATUS_INVALID_LENGTH ((uint)0xC0010014L)
+#define NDIS_STATUS_INVALID_DATA ((uint)0xC0010015L)
+#define NDIS_STATUS_BUFFER_TOO_SHORT ((uint)0xC0010016L)
+#define NDIS_STATUS_INVALID_OID ((uint)0xC0010017L)
+#define NDIS_STATUS_ADAPTER_REMOVED ((uint)0xC0010018L)
+#define NDIS_STATUS_UNSUPPORTED_MEDIA ((uint)0xC0010019L)
+#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((uint)0xC001001AL)
+#define NDIS_STATUS_FILE_NOT_FOUND ((uint)0xC001001BL)
+#define NDIS_STATUS_ERROR_READING_FILE ((uint)0xC001001CL)
+#define NDIS_STATUS_ALREADY_MAPPED ((uint)0xC001001DL)
+#define NDIS_STATUS_RESOURCE_CONFLICT ((uint)0xC001001EL)
+#define NDIS_STATUS_NO_CABLE ((uint)0xC001001FL)
+
+#define NDIS_STATUS_INVALID_SAP ((uint)0xC0010020L)
+#define NDIS_STATUS_SAP_IN_USE ((uint)0xC0010021L)
+#define NDIS_STATUS_INVALID_ADDRESS ((uint)0xC0010022L)
+#define NDIS_STATUS_VC_NOT_ACTIVATED ((uint)0xC0010023L)
+#define NDIS_STATUS_DEST_OUT_OF_ORDER ((uint)0xC0010024L) /* cause 27 */
+#define NDIS_STATUS_VC_NOT_AVAILABLE ((uint)0xC0010025L) /* cause 35, 45 */
+#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((uint)0xC0010026L) /* cause 37 */
+#define NDIS_STATUS_INCOMPATABLE_QOS ((uint)0xC0010027L) /* cause 49 */
+#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((uint)0xC0010028L) /* cause 93 */
+#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((uint)0xC0010029L) /* cause 3 */
+
+extern struct iw_handler_def rtw_handlers_def;
+
+#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h b/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h
new file mode 100644
index 000000000000..ebf233559f67
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_ioctl_set.h
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOCTL_SET_H_
+#define __RTW_IOCTL_SET_H_
+
+
+typedef u8 NDIS_802_11_PMKID_VALUE[16];
+
+typedef struct _BSSIDInfo {
+ NDIS_802_11_MAC_ADDRESS BSSID;
+ NDIS_802_11_PMKID_VALUE PMKID;
+} BSSIDInfo, *PBSSIDInfo;
+
+
+u8 rtw_set_802_11_authentication_mode(struct adapter *pdapter, enum NDIS_802_11_AUTHENTICATION_MODE authmode);
+u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid);
+u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep * wep);
+u8 rtw_set_802_11_disassociate(struct adapter *padapter);
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num);
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRASTRUCTURE networktype);
+u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid * ssid);
+u8 rtw_set_802_11_connect(struct adapter *padapter, u8 *bssid, struct ndis_802_11_ssid *ssid);
+
+u8 rtw_validate_bssid(u8 *bssid);
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid);
+
+u16 rtw_get_cur_max_rate(struct adapter *adapter);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
new file mode 100644
index 000000000000..d88ef67ce8d6
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -0,0 +1,695 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_H_
+#define __RTW_MLME_H_
+
+
+#define MAX_BSS_CNT 128
+/* define MAX_JOIN_TIMEOUT 2000 */
+/* define MAX_JOIN_TIMEOUT 2500 */
+#define MAX_JOIN_TIMEOUT 6500
+
+/* Commented by Albert 20101105 */
+/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
+
+#define SCANNING_TIMEOUT 8000
+
+#ifdef PALTFORM_OS_WINCE
+#define SCANQUEUE_LIFETIME 12000000 /* unit:us */
+#else
+#define SCANQUEUE_LIFETIME 20000 /* 20sec, unit:msec */
+#endif
+
+#define WIFI_NULL_STATE 0x00000000
+#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state... */
+#define WIFI_REASOC_STATE 0x00000002
+#define WIFI_SLEEP_STATE 0x00000004
+#define WIFI_STATION_STATE 0x00000008
+#define WIFI_AP_STATE 0x00000010
+#define WIFI_ADHOC_STATE 0x00000020
+#define WIFI_ADHOC_MASTER_STATE 0x00000040
+#define WIFI_UNDER_LINKING 0x00000080
+
+#define WIFI_UNDER_WPS 0x00000100
+/* define WIFI_UNDER_CMD 0x00000200 */
+/* define WIFI_UNDER_P2P 0x00000400 */
+#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
+#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station is under site surveying */
+#ifdef WDS
+#define WIFI_WDS 0x00001000
+#define WIFI_WDS_RX_BEACON 0x00002000 /* already rx WDS AP beacon */
+#endif
+#ifdef AUTO_CONFIG
+#define WIFI_AUTOCONF 0x00004000
+#define WIFI_AUTOCONF_IND 0x00008000
+#endif
+
+/**
+* ========== P2P Section Start ===============
+#define WIFI_P2P_LISTEN_STATE 0x00010000
+#define WIFI_P2P_GROUP_FORMATION_STATE 0x00020000
+ ========== P2P Section End ===============
+*/
+
+/* ifdef UNDER_MPTEST */
+#define WIFI_MP_STATE 0x00010000
+#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continous tx background */
+#define WIFI_MP_CTX_ST 0x00040000 /* in continous tx with single-tone */
+#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continous tx background due to out of skb */
+#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continous tx */
+#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continous tx with carrier suppression */
+#define WIFI_MP_LPBK_STATE 0x00400000
+/* endif */
+
+/* define _FW_UNDER_CMD WIFI_UNDER_CMD */
+#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
+#define _FW_LINKED WIFI_ASOC_STATE
+#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
+
+
+enum dot11AuthAlgrthmNum {
+ dot11AuthAlgrthm_Open = 0,
+ dot11AuthAlgrthm_Shared,
+ dot11AuthAlgrthm_8021X,
+ dot11AuthAlgrthm_Auto,
+ dot11AuthAlgrthm_WAPI,
+ dot11AuthAlgrthm_MaxNum
+};
+
+/* Scan type including active and passive scan. */
+typedef enum _RT_SCAN_TYPE
+{
+ SCAN_PASSIVE,
+ SCAN_ACTIVE,
+ SCAN_MIX,
+}RT_SCAN_TYPE, *PRT_SCAN_TYPE;
+
+enum _BAND
+{
+ GHZ24_50 = 0,
+ GHZ_50,
+ GHZ_24,
+ GHZ_MAX,
+};
+
+#define rtw_band_valid(band) ((band) >= GHZ24_50 && (band) < GHZ_MAX)
+
+enum DriverInterface {
+ DRIVER_WEXT = 1,
+ DRIVER_CFG80211 = 2
+};
+
+enum SCAN_RESULT_TYPE
+{
+ SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
+ SCAN_RESULT_ALL = 1, /* Will return all the scanned device, include AP. */
+ SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD device. */
+ /* If this device is Miracast sink device, it will just return all the Miracast source devices. */
+};
+
+/*
+
+there are several "locks" in mlme_priv,
+since mlme_priv is a shared resource between many threads,
+like ISR/Call-Back functions, the OID handlers, and even timer functions.
+
+
+Each struct __queue has its own locks, already.
+Other items are protected by mlme_priv.lock.
+
+To avoid possible dead lock, any thread trying to modifiying mlme_priv
+SHALL not lock up more than one locks at a time!
+
+*/
+
+
+#define traffic_threshold 10
+#define traffic_scan_period 500
+
+struct sitesurvey_ctrl {
+ u64 last_tx_pkts;
+ uint last_rx_pkts;
+ sint traffic_busy;
+ _timer sitesurvey_ctrl_timer;
+};
+
+typedef struct _RT_LINK_DETECT_T{
+ u32 NumTxOkInPeriod;
+ u32 NumRxOkInPeriod;
+ u32 NumRxUnicastOkInPeriod;
+ bool bBusyTraffic;
+ bool bTxBusyTraffic;
+ bool bRxBusyTraffic;
+ bool bHigherBusyTraffic; /* For interrupt migration purpose. */
+ bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according as Rx traffic. */
+ bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according as Tx traffic. */
+ /* u8 TrafficBusyState; */
+ u8 TrafficTransitionCount;
+ u32 LowPowerTransitionCount;
+}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+
+struct profile_info {
+ u8 ssidlen;
+ u8 ssid[ WLAN_SSID_MAXLEN ];
+ u8 peermac[ ETH_ALEN ];
+};
+
+struct tx_invite_req_info{
+ u8 token;
+ u8 benable;
+ u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 ssidlen;
+ u8 go_bssid[ ETH_ALEN ];
+ u8 peer_macaddr[ ETH_ALEN ];
+ u8 operating_ch; /* This information will be set by using the p2p_set op_ch =x */
+ u8 peer_ch; /* The listen channel for peer P2P device */
+
+};
+
+struct tx_invite_resp_info{
+ u8 token; /* Used to record the dialog token of p2p invitation request frame. */
+};
+
+struct tx_provdisc_req_info{
+ u16 wps_config_method_request; /* Used when sending the provisioning request frame */
+ u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
+ struct ndis_802_11_ssid ssid;
+ u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
+ u8 peerIFAddr[ ETH_ALEN ]; /* Peer interface address */
+ u8 benable; /* This provision discovery request frame is trigger to send or not */
+};
+
+struct rx_provdisc_req_info{ /* When peer device issue prov_disc_req first, we should store the following informations */
+ u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
+ u8 strconfig_method_desc_of_prov_disc_req[4]; /* description for the config method located in the provisioning discovery request frame. */
+ /* The UI must know this information to know which config method the remote p2p device is requiring. */
+};
+
+struct tx_nego_req_info{
+ u16 peer_channel_num[2]; /* The channel number which the receiver stands. */
+ u8 peerDevAddr[ ETH_ALEN ]; /* Peer device address */
+ u8 benable; /* This negoitation request frame is trigger to send or not */
+};
+
+struct group_id_info{
+ u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of this P2P group */
+ u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+};
+
+struct scan_limit_info{
+ u8 scan_op_ch_only; /* When this flag is set, the driver should just scan the operation channel */
+ u8 operation_ch[2]; /* Store the operation channel of invitation request frame */
+};
+
+struct cfg80211_wifidirect_info{
+ _timer remain_on_ch_timer;
+ u8 restore_channel;
+ struct ieee80211_channel remain_on_ch_channel;
+ enum nl80211_channel_type remain_on_ch_type;
+ u64 remain_on_ch_cookie;
+ bool is_ro_ch;
+ unsigned long last_ro_ch_time; /* this will be updated at the beginning and end of ro_ch */
+};
+
+struct wifidirect_info{
+ struct adapter * padapter;
+ _timer find_phase_timer;
+ _timer restore_p2p_state_timer;
+
+ /* Used to do the scanning. After confirming the peer is availalble, the driver transmits the P2P frame to peer. */
+ _timer pre_tx_scan_timer;
+ _timer reset_ch_sitesurvey;
+ _timer reset_ch_sitesurvey2; /* Just for resetting the scan limit function by using p2p nego */
+ struct tx_provdisc_req_info tx_prov_disc_info;
+ struct rx_provdisc_req_info rx_prov_disc_info;
+ struct tx_invite_req_info invitereq_info;
+ struct profile_info profileinfo[ P2P_MAX_PERSISTENT_GROUP_NUM ]; /* Store the profile information of persistent group */
+ struct tx_invite_resp_info inviteresp_info;
+ struct tx_nego_req_info nego_req_info;
+ struct group_id_info groupid_info; /* Store the group id information when doing the group negotiation handshake. */
+ struct scan_limit_info rx_invitereq_info; /* Used for get the limit scan channel from the Invitation procedure */
+ struct scan_limit_info p2p_info; /* Used for get the limit scan channel from the P2P negotiation handshake */
+ enum P2P_ROLE role;
+ enum P2P_STATE pre_p2p_state;
+ enum P2P_STATE p2p_state;
+ u8 device_addr[ETH_ALEN]; /* The device address should be the mac address of this device. */
+ u8 interface_addr[ETH_ALEN];
+ u8 social_chan[4];
+ u8 listen_channel;
+ u8 operating_channel;
+ u8 listen_dwell; /* This value should be between 1 and 3 */
+ u8 support_rate[8];
+ u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
+ u8 intent; /* should only include the intent value. */
+ u8 p2p_peer_interface_addr[ ETH_ALEN ];
+ u8 p2p_peer_device_addr[ ETH_ALEN ];
+ u8 peer_intent; /* Included the intent value and tie breaker value. */
+ u8 device_name[ WPS_MAX_DEVICE_NAME_LEN ]; /* Device name for displaying on searching device screen */
+ u8 device_name_len;
+ u8 profileindex; /* Used to point to the index of profileinfo array */
+ u8 peer_operating_ch;
+ u8 find_phase_state_exchange_cnt;
+ u16 device_password_id_for_nego; /* The device password ID for group negotation */
+ u8 negotiation_dialog_token;
+ u8 nego_ssid[ WLAN_SSID_MAXLEN ]; /* SSID information for group negotitation */
+ u8 nego_ssidlen;
+ u8 p2p_group_ssid[WLAN_SSID_MAXLEN];
+ u8 p2p_group_ssid_len;
+ u8 persistent_supported; /* Flag to know the persistent function should be supported or not. */
+ /* In the Sigma test, the Sigma will provide this enable from the sta_set_p2p CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 session_available; /* Flag to set the WFD session available to enable or disable "by Sigma" */
+ /* In the Sigma test, the Sigma will disable the session available by using the sta_preset CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+
+ u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma */
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security function for TDLS by WFD Sigma */
+ /* 0: disable */
+ /* In this case, the driver can't issue the tdsl setup request frame. */
+ /* 1: enable */
+ /* In this case, the driver can issue the tdls setup request frame */
+ /* even the current security is weak security. */
+
+ enum P2P_WPSINFO ui_got_wps_info; /* This field will store the WPS value (PIN value or PBC) that UI had got from the user. */
+ u16 supported_wps_cm; /* This field describes the WPS config method which this driver supported. */
+ /* The value should be the combination of config method defined in page104 of WPS v2.0 spec. */
+ u8 external_uuid; /* UUID flag */
+ u8 uuid[16]; /* UUID */
+ uint channel_list_attr_len; /* This field will contain the length of body of P2P Channel List attribute of group negotitation response frame. */
+ u8 channel_list_attr[100]; /* This field will contain the body of P2P Channel List attribute of group negotitation response frame. */
+ /* We will use the channel_cnt and channel_list fields when constructing the group negotitation confirm frame. */
+ u8 driver_interface; /* Indicate DRIVER_WEXT or DRIVER_CFG80211 */
+};
+
+struct tdls_ss_record{ /* signal strength record */
+ u8 macaddr[ETH_ALEN];
+ u8 RxPWDBAll;
+ u8 is_tdls_sta; /* true: direct link sta, false: else */
+};
+
+struct tdls_info{
+ u8 ap_prohibited;
+ u8 link_established;
+ u8 sta_cnt;
+ u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
+ struct tdls_ss_record ss_record;
+ u8 ch_sensing;
+ u8 cur_channel;
+ u8 candidate_ch;
+ u8 collect_pkt_num[MAX_CHANNEL_NUM];
+ _lock cmd_lock;
+ _lock hdl_lock;
+ u8 watchdog_count;
+ u8 dev_discovered; /* WFD_TDLS: for sigma test */
+ u8 tdls_enable;
+ u8 external_setup; /* true: setup is handled by wpa_supplicant */
+};
+
+struct tdls_txmgmt {
+ u8 peer[ETH_ALEN];
+ u8 action_code;
+ u8 dialog_token;
+ u16 status_code;
+ u8 *buf;
+ size_t len;
+ u8 external_support;
+};
+
+/* used for mlme_priv.roam_flags */
+enum {
+ RTW_ROAM_ON_EXPIRED = BIT0,
+ RTW_ROAM_ON_RESUME = BIT1,
+ RTW_ROAM_ACTIVE = BIT2,
+};
+
+struct mlme_priv {
+
+ _lock lock;
+ sint fw_state; /* shall we protect this variable? maybe not necessarily... */
+ u8 bScanInProcess;
+ u8 to_join; /* flag */
+
+ u8 to_roam; /* roaming trying times */
+ struct wlan_network *roam_network; /* the target of active roam */
+ u8 roam_flags;
+ u8 roam_rssi_diff_th; /* rssi difference threshold for active scan candidate selection */
+ u32 roam_scan_int_ms; /* scan interval for active roam */
+ u32 roam_scanr_exp_ms; /* scan result expire time in ms for roam */
+ u8 roam_tgt_addr[ETH_ALEN]; /* request to roam to speicific target without other consideration */
+
+ u8 *nic_hdl;
+
+ u8 not_indic_disco;
+ struct list_head *pscanned;
+ struct __queue free_bss_pool;
+ struct __queue scanned_queue;
+ u8 *free_bss_buf;
+ u32 num_of_scanned;
+
+ struct ndis_802_11_ssid assoc_ssid;
+ u8 assoc_bssid[6];
+
+ struct wlan_network cur_network;
+ struct wlan_network *cur_network_scanned;
+
+ /* uint wireless_mode; no used, remove it */
+
+ u32 auto_scan_int_ms;
+
+ _timer assoc_timer;
+
+ uint assoc_by_bssid;
+ uint assoc_by_rssi;
+
+ _timer scan_to_timer; /* driver itself handles scan_timeout status. */
+ unsigned long scan_start_time; /* used to evaluate the time spent in scanning */
+
+ _timer set_scan_deny_timer;
+ atomic_t set_scan_deny; /* 0: allowed, 1: deny */
+
+ struct qos_priv qospriv;
+
+ /* Number of non-HT AP/stations */
+ int num_sta_no_ht;
+
+ /* Number of HT AP/stations 20 MHz */
+ /* int num_sta_ht_20mhz; */
+
+
+ int num_FortyMHzIntolerant;
+
+ struct ht_priv htpriv;
+
+ RT_LINK_DETECT_T LinkDetectInfo;
+ _timer dynamic_chk_timer; /* dynamic/periodic check timer */
+
+ u8 acm_mask; /* for wmm acm mask */
+ u8 ChannelPlan;
+ RT_SCAN_TYPE scan_mode; /* active: 1, passive: 0 */
+
+ u8 *wps_probe_req_ie;
+ u32 wps_probe_req_ie_len;
+
+ /* Number of associated Non-ERP stations (i.e., stations using 802.11b
+ * in 802.11g BSS) */
+ int num_sta_non_erp;
+
+ /* Number of associated stations that do not support Short Slot Time */
+ int num_sta_no_short_slot_time;
+
+ /* Number of associated stations that do not support Short Preamble */
+ int num_sta_no_short_preamble;
+
+ int olbc; /* Overlapping Legacy BSS Condition */
+
+ /* Number of HT associated stations that do not support greenfield */
+ int num_sta_ht_no_gf;
+
+ /* Number of associated non-HT stations */
+ /* int num_sta_no_ht; */
+
+ /* Number of HT associated stations 20 MHz */
+ int num_sta_ht_20mhz;
+
+ /* Overlapping BSS information */
+ int olbc_ht;
+
+ u16 ht_op_mode;
+
+ u8 *assoc_req;
+ u32 assoc_req_len;
+ u8 *assoc_rsp;
+ u32 assoc_rsp_len;
+
+ u8 *wps_beacon_ie;
+ /* u8 *wps_probe_req_ie; */
+ u8 *wps_probe_resp_ie;
+ u8 *wps_assoc_resp_ie; /* for CONFIG_IOCTL_CFG80211, this IE could include p2p ie / wfd ie */
+
+ u32 wps_beacon_ie_len;
+ /* u32 wps_probe_req_ie_len; */
+ u32 wps_probe_resp_ie_len;
+ u32 wps_assoc_resp_ie_len; /* for CONFIG_IOCTL_CFG80211, this IE len could include p2p ie / wfd ie */
+
+ u8 *p2p_beacon_ie;
+ u8 *p2p_probe_req_ie;
+ u8 *p2p_probe_resp_ie;
+ u8 *p2p_go_probe_resp_ie; /* for GO */
+ u8 *p2p_assoc_req_ie;
+
+ u32 p2p_beacon_ie_len;
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_resp_ie_len;
+ u32 p2p_go_probe_resp_ie_len; /* for GO */
+ u32 p2p_assoc_req_ie_len;
+
+ _lock bcn_update_lock;
+ u8 update_bcn;
+
+#ifdef CONFIG_INTEL_WIDI
+ int widi_state;
+ int listen_state;
+ _timer listen_timer;
+ atomic_t rx_probe_rsp; /* 1:receive probe respone from RDS source. */
+ u8 *l2sdTaBuffer;
+ u8 channel_idx;
+ u8 group_cnt; /* In WiDi 3.5, they specified another scan algo. for WFD/RDS co-existed */
+ u8 sa_ext[L2SDTA_SERVICE_VE_LEN];
+
+ u8 widi_enable;
+ /**
+ * For WiDi 4; upper layer would set
+ * p2p_primary_device_type_category_id
+ * p2p_primary_device_type_sub_category_id
+ * p2p_secondary_device_type_category_id
+ * p2p_secondary_device_type_sub_category_id
+ */
+ u16 p2p_pdt_cid;
+ u16 p2p_pdt_scid;
+ u8 num_p2p_sdt;
+ u16 p2p_sdt_cid[MAX_NUM_P2P_SDT];
+ u16 p2p_sdt_scid[MAX_NUM_P2P_SDT];
+ u8 p2p_reject_disable; /* When starting NL80211 wpa_supplicant/hostapd, it will call netdev_close */
+ /* such that it will cause p2p disabled. Use this flag to reject. */
+#endif /* CONFIG_INTEL_WIDI */
+
+ u8 NumOfBcnInfoChkFail;
+ unsigned long timeBcnInfoChkStart;
+};
+
+#define rtw_mlme_set_auto_scan_int(adapter, ms) \
+ do { \
+ adapter->mlmepriv.auto_scan_int_ms = ms; \
+ while (0)
+
+void rtw_mlme_reset_auto_scan_int(struct adapter *adapter);
+
+struct hostapd_priv
+{
+ struct adapter *padapter;
+};
+
+extern int hostapd_mode_init(struct adapter *padapter);
+extern void hostapd_mode_unload(struct adapter *padapter);
+
+extern void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf);
+extern void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
+extern void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf);
+
+extern void rtw_join_timeout_handler(RTW_TIMER_HDL_ARGS);
+extern void _rtw_scan_timeout_handler(RTW_TIMER_HDL_ARGS);
+
+int event_thread(void *context);
+
+extern void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
+extern int rtw_init_mlme_priv(struct adapter *adapter);/* (struct mlme_priv *pmlmepriv); */
+
+extern void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+
+
+extern sint rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
+extern sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, sint keyid, u8 set_tx, bool enqueue);
+extern sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
+
+__inline static u8 *get_bssid(struct mlme_priv *pmlmepriv)
+{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress => bssid */
+ /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress => ibss mac address */
+ return pmlmepriv->cur_network.network.MacAddress;
+}
+
+__inline static sint check_fwstate(struct mlme_priv *pmlmepriv, sint state)
+{
+ if (pmlmepriv->fw_state & state)
+ return true;
+
+ return false;
+}
+
+__inline static sint get_fwstate(struct mlme_priv *pmlmepriv)
+{
+ return pmlmepriv->fw_state;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ *
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
+__inline static void set_fwstate(struct mlme_priv *pmlmepriv, sint state)
+{
+ pmlmepriv->fw_state |= state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY ==state) {
+ pmlmepriv->bScanInProcess = true;
+ }
+}
+
+__inline static void _clr_fwstate_(struct mlme_priv *pmlmepriv, sint state)
+{
+ pmlmepriv->fw_state &= ~state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY ==state) {
+ pmlmepriv->bScanInProcess = false;
+ }
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ */
+__inline static void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
+{
+ spin_lock_bh(&pmlmepriv->lock);
+ if (check_fwstate(pmlmepriv, state) == true)
+ pmlmepriv->fw_state ^= state;
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+__inline static void set_scanned_network_val(struct mlme_priv *pmlmepriv, sint val)
+{
+ spin_lock_bh(&pmlmepriv->lock);
+ pmlmepriv->num_of_scanned = val;
+ spin_unlock_bh(&pmlmepriv->lock);
+}
+
+extern u16 rtw_get_capability(struct wlan_bssid_ex *bss);
+extern void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target);
+extern void rtw_disconnect_hdl_under_linked(struct adapter * adapter, struct sta_info *psta, u8 free_assoc);
+extern void rtw_generate_random_ibss(u8 *pibss);
+extern struct wlan_network* rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+extern struct wlan_network* rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
+struct wlan_network *_rtw_find_same_network(struct __queue *scanned_queue, struct wlan_network *network);
+
+extern void rtw_free_assoc_resources(struct adapter * adapter, int lock_scanned_queue);
+extern void rtw_indicate_disconnect(struct adapter * adapter);
+extern void rtw_indicate_connect(struct adapter * adapter);
+void rtw_indicate_scan_done(struct adapter *padapter, bool aborted);
+void rtw_scan_abort(struct adapter *adapter);
+
+extern int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len);
+extern int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len);
+extern void rtw_init_registrypriv_dev_network(struct adapter *adapter);
+
+extern void rtw_update_registrypriv_dev_network(struct adapter *adapter);
+
+extern void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
+
+extern void _rtw_join_timeout_handler(struct adapter *adapter);
+extern void rtw_scan_timeout_handler(struct adapter *adapter);
+
+extern void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+bool rtw_is_scan_deny(struct adapter *adapter);
+void rtw_clear_scan_deny(struct adapter *adapter);
+void rtw_set_scan_deny_timer_hdl(struct adapter *adapter);
+void rtw_set_scan_deny(struct adapter *adapter, u32 ms);
+
+extern int _rtw_init_mlme_priv(struct adapter *padapter);
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
+
+extern void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
+
+/* extern struct wlan_network* _rtw_dequeue_network(struct __queue *queue); */
+
+extern struct wlan_network* _rtw_alloc_network(struct mlme_priv *pmlmepriv);
+
+
+extern void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall);
+extern void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork);
+
+
+extern struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+
+extern void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
+
+extern sint rtw_if_up(struct adapter *padapter);
+
+sint rtw_linked_check(struct adapter *padapter);
+
+u8 *rtw_get_capability_from_ie(u8 *ie);
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie);
+
+
+void rtw_joinbss_reset(struct adapter *padapter);
+
+void rtw_ht_use_default_setting(struct adapter *padapter);
+void rtw_build_wmm_ie_ht(struct adapter *padapter, u8 *out_ie, uint *pout_len);
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len, u8 channel);
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channel);
+void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe);
+void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len);
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork);
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 feature);
+
+#define rtw_roam_flags(adapter) ((adapter)->mlmepriv.roam_flags)
+#define rtw_chk_roam_flags(adapter, flags) ((adapter)->mlmepriv.roam_flags & flags)
+#define rtw_clr_roam_flags(adapter, flags) \
+ do { \
+ ((adapter)->mlmepriv.roam_flags &= ~flags); \
+ } while (0)
+
+#define rtw_set_roam_flags(adapter, flags) \
+ do { \
+ ((adapter)->mlmepriv.roam_flags |= flags); \
+ } while (0)
+
+#define rtw_assign_roam_flags(adapter, flags) \
+ do { \
+ ((adapter)->mlmepriv.roam_flags = flags); \
+ } while (0)
+
+void _rtw_roaming(struct adapter *adapter, struct wlan_network *tgt_network);
+void rtw_roaming(struct adapter *adapter, struct wlan_network *tgt_network);
+void rtw_set_to_roam(struct adapter *adapter, u8 to_roam);
+u8 rtw_dec_to_roam(struct adapter *adapter);
+u8 rtw_to_roam(struct adapter *adapter);
+int rtw_select_roaming_candidate(struct mlme_priv *pmlmepriv);
+
+void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, u32 mstatus);
+
+#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
new file mode 100644
index 000000000000..f3952463697e
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -0,0 +1,888 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_EXT_H_
+#define __RTW_MLME_EXT_H_
+
+
+/* Commented by Albert 20101105 */
+/* Increase the SURVEY_TO value from 100 to 150 (100ms to 150ms) */
+/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
+/* So, this driver tried to extend the dwell time for each scanning channel. */
+/* This will increase the chance to receive the probe response from SoftAP. */
+
+#define SURVEY_TO (100)
+#define REAUTH_TO (300) /* 50) */
+#define REASSOC_TO (300) /* 50) */
+/* define DISCONNECT_TO (3000) */
+#define ADDBA_TO (2000)
+
+#define LINKED_TO (1) /* unit:2 sec, 1x2 =2 sec */
+
+#define REAUTH_LIMIT (4)
+#define REASSOC_LIMIT (4)
+#define READDBA_LIMIT (2)
+
+#define ROAMING_LIMIT 8
+/* define IOCMD_REG0 0x10250370 */
+/* define IOCMD_REG1 0x10250374 */
+/* define IOCMD_REG2 0x10250378 */
+
+/* define FW_DYNAMIC_FUN_SWITCH 0x10250364 */
+
+/* define WRITE_BB_CMD 0xF0000001 */
+/* define SET_CHANNEL_CMD 0xF3000000 */
+/* define UPDATE_RA_CMD 0xFD0000A2 */
+
+#define DYNAMIC_FUNC_DISABLE (0x0)
+
+/* ====== ODM_ABILITY_E ======== */
+/* BB ODM section BIT 0-15 */
+#define DYNAMIC_BB_DIG BIT0 /* ODM_BB_DIG */
+#define DYNAMIC_BB_RA_MASK BIT1 /* ODM_BB_RA_MASK */
+#define DYNAMIC_BB_DYNAMIC_TXPWR BIT2 /* ODM_BB_DYNAMIC_TXPWR */
+#define DYNAMIC_BB_BB_FA_CNT BIT3 /* ODM_BB_FA_CNT */
+#define DYNAMIC_BB_RSSI_MONITOR BIT4 /* ODM_BB_RSSI_MONITOR */
+#define DYNAMIC_BB_CCK_PD BIT5 /* ODM_BB_CCK_PD */
+#define DYNAMIC_BB_ANT_DIV BIT6 /* ODM_BB_ANT_DIV */
+#define DYNAMIC_BB_PWR_SAVE BIT7 /* ODM_BB_PWR_SAVE */
+#define DYNAMIC_BB_PWR_TRAIN BIT8 /* ODM_BB_PWR_TRAIN */
+#define DYNAMIC_BB_RATE_ADAPTIVE BIT9 /* ODM_BB_RATE_ADAPTIVE */
+#define DYNAMIC_BB_PATH_DIV BIT10/* ODM_BB_PATH_DIV */
+#define DYNAMIC_BB_PSD BIT11/* ODM_BB_PSD */
+#define DYNAMIC_BB_RXHP BIT12/* ODM_BB_RXHP */
+#define DYNAMIC_BB_ADAPTIVITY BIT13/* ODM_BB_ADAPTIVITY */
+#define DYNAMIC_BB_DYNAMIC_ATC BIT14/* ODM_BB_DYNAMIC_ATC */
+
+/* MAC DM section BIT 16-23 */
+#define DYNAMIC_MAC_EDCA_TURBO BIT16/* ODM_MAC_EDCA_TURBO */
+#define DYNAMIC_MAC_EARLY_MODE BIT17/* ODM_MAC_EARLY_MODE */
+
+/* RF ODM section BIT 24-31 */
+#define DYNAMIC_RF_TX_PWR_TRACK BIT24/* ODM_RF_TX_PWR_TRACK */
+#define DYNAMIC_RF_RX_GAIN_TRACK BIT25/* ODM_RF_RX_GAIN_TRACK */
+#define DYNAMIC_RF_CALIBRATION BIT26/* ODM_RF_CALIBRATION */
+
+#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
+
+#define _HW_STATE_NOLINK_ 0x00
+#define _HW_STATE_ADHOC_ 0x01
+#define _HW_STATE_STATION_ 0x02
+#define _HW_STATE_AP_ 0x03
+
+
+#define _1M_RATE_ 0
+#define _2M_RATE_ 1
+#define _5M_RATE_ 2
+#define _11M_RATE_ 3
+#define _6M_RATE_ 4
+#define _9M_RATE_ 5
+#define _12M_RATE_ 6
+#define _18M_RATE_ 7
+#define _24M_RATE_ 8
+#define _36M_RATE_ 9
+#define _48M_RATE_ 10
+#define _54M_RATE_ 11
+
+/********************************************************
+MCS rate definitions
+*********************************************************/
+#define MCS_RATE_1R (0x000000ff)
+#define MCS_RATE_2R (0x0000ffff)
+#define MCS_RATE_3R (0x00ffffff)
+#define MCS_RATE_4R (0xffffffff)
+#define MCS_RATE_2R_13TO15_OFF (0x00001fff)
+
+
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WMM_OUI[];
+extern unsigned char WPS_OUI[];
+extern unsigned char WFD_OUI[];
+extern unsigned char P2P_OUI[];
+
+extern unsigned char WMM_INFO_OUI[];
+extern unsigned char WMM_PARA_OUI[];
+
+
+/* */
+/* Channel Plan Type. */
+/* Note: */
+/* We just add new channel plan when the new channel plan is different from any of the following */
+/* channel plan. */
+/* If you just wnat to customize the acitions(scan period or join actions) about one of the channel plan, */
+/* customize them in RT_CHANNEL_INFO in the RT_CHANNEL_LIST. */
+/* */
+typedef enum _RT_CHANNEL_DOMAIN
+{
+ /* old channel plan mapping ===== */
+ RT_CHANNEL_DOMAIN_FCC = 0x00,
+ RT_CHANNEL_DOMAIN_IC = 0x01,
+ RT_CHANNEL_DOMAIN_ETSI = 0x02,
+ RT_CHANNEL_DOMAIN_SPAIN = 0x03,
+ RT_CHANNEL_DOMAIN_FRANCE = 0x04,
+ RT_CHANNEL_DOMAIN_MKK = 0x05,
+ RT_CHANNEL_DOMAIN_MKK1 = 0x06,
+ RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
+ RT_CHANNEL_DOMAIN_TELEC = 0x08,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
+ RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
+ RT_CHANNEL_DOMAIN_CHINA = 0x0C,
+ RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
+ RT_CHANNEL_DOMAIN_KOREA = 0x0E,
+ RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
+ RT_CHANNEL_DOMAIN_JAPAN = 0x10,
+ RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
+ RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G = 0x13,
+ RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
+
+ /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
+ RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
+ RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
+ RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
+ RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
+ RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
+ RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
+ RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
+ RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
+ RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
+ RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
+ RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
+ RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
+ RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
+ RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
+ RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
+ RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
+ RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
+ RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
+ RT_CHANNEL_DOMAIN_GLOBAL_NULL = 0x41,
+ RT_CHANNEL_DOMAIN_ETSI1_ETSI4 = 0x42,
+ RT_CHANNEL_DOMAIN_FCC1_FCC2 = 0x43,
+ RT_CHANNEL_DOMAIN_FCC1_NCC3 = 0x44,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI5 = 0x45,
+ RT_CHANNEL_DOMAIN_FCC1_FCC8 = 0x46,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI6 = 0x47,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI7 = 0x48,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI8 = 0x49,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI9 = 0x50,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI10 = 0x51,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI11 = 0x52,
+ RT_CHANNEL_DOMAIN_FCC1_NCC4 = 0x53,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI12 = 0x54,
+ RT_CHANNEL_DOMAIN_FCC1_FCC9 = 0x55,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI13 = 0x56,
+ RT_CHANNEL_DOMAIN_FCC1_FCC10 = 0x57,
+ /* Add new channel plan above this line =============== */
+ RT_CHANNEL_DOMAIN_MAX,
+ RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
+}RT_CHANNEL_DOMAIN, *PRT_CHANNEL_DOMAIN;
+
+typedef enum _RT_CHANNEL_DOMAIN_2G
+{
+ RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwird 13 */
+ RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
+ RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
+ RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
+ RT_CHANNEL_DOMAIN_2G_GLOBAL = 0x05, /* Global domain */
+ RT_CHANNEL_DOMAIN_2G_NULL = 0x06,
+ /* Add new channel plan above this line =============== */
+ RT_CHANNEL_DOMAIN_2G_MAX,
+}RT_CHANNEL_DOMAIN_2G, *PRT_CHANNEL_DOMAIN_2G;
+
+typedef enum _RT_CHANNEL_DOMAIN_5G
+{
+ RT_CHANNEL_DOMAIN_5G_NULL = 0x00,
+ RT_CHANNEL_DOMAIN_5G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_5G_ETSI2 = 0x02, /* Australia, New Zealand */
+ RT_CHANNEL_DOMAIN_5G_ETSI3 = 0x03, /* Russia */
+ RT_CHANNEL_DOMAIN_5G_FCC1 = 0x04, /* US */
+ RT_CHANNEL_DOMAIN_5G_FCC2 = 0x05, /* FCC o/w DFS Channels */
+ RT_CHANNEL_DOMAIN_5G_FCC3 = 0x06, /* India, Mexico */
+ RT_CHANNEL_DOMAIN_5G_FCC4 = 0x07, /* Venezuela */
+ RT_CHANNEL_DOMAIN_5G_FCC5 = 0x08, /* China */
+ RT_CHANNEL_DOMAIN_5G_FCC6 = 0x09, /* Israel */
+ RT_CHANNEL_DOMAIN_5G_FCC7_IC1 = 0x0A, /* US, Canada */
+ RT_CHANNEL_DOMAIN_5G_KCC1 = 0x0B, /* Korea */
+ RT_CHANNEL_DOMAIN_5G_MKK1 = 0x0C, /* Japan */
+ RT_CHANNEL_DOMAIN_5G_MKK2 = 0x0D, /* Japan (W52, W53) */
+ RT_CHANNEL_DOMAIN_5G_MKK3 = 0x0E, /* Japan (W56) */
+ RT_CHANNEL_DOMAIN_5G_NCC1 = 0x0F, /* Taiwan */
+ RT_CHANNEL_DOMAIN_5G_NCC2 = 0x10, /* Taiwan o/w DFS */
+ RT_CHANNEL_DOMAIN_5G_NCC3 = 0x11, /* Taiwan w/o DFS, Band4 only */
+ RT_CHANNEL_DOMAIN_5G_ETSI4 = 0x12, /* Europe w/o DFS, Band1 only */
+ RT_CHANNEL_DOMAIN_5G_ETSI5 = 0x13, /* Australia, New Zealand(w/o Weather radar) */
+ RT_CHANNEL_DOMAIN_5G_FCC8 = 0x14, /* Latin America */
+ RT_CHANNEL_DOMAIN_5G_ETSI6 = 0x15, /* Israel, Bahrain, Egypt, India, China, Malaysia */
+ RT_CHANNEL_DOMAIN_5G_ETSI7 = 0x16, /* China */
+ RT_CHANNEL_DOMAIN_5G_ETSI8 = 0x17, /* Jordan */
+ RT_CHANNEL_DOMAIN_5G_ETSI9 = 0x18, /* Lebanon */
+ RT_CHANNEL_DOMAIN_5G_ETSI10 = 0x19, /* Qatar */
+ RT_CHANNEL_DOMAIN_5G_ETSI11 = 0x1A, /* Russia */
+ RT_CHANNEL_DOMAIN_5G_NCC4 = 0x1B, /* Taiwan, (w/o Weather radar) */
+ RT_CHANNEL_DOMAIN_5G_ETSI12 = 0x1C, /* Indonesia */
+ RT_CHANNEL_DOMAIN_5G_FCC9 = 0x1D, /* w/o Weather radar) */
+ RT_CHANNEL_DOMAIN_5G_ETSI13 = 0x1E, /* w/o Weather radar) */
+ RT_CHANNEL_DOMAIN_5G_FCC10 = 0x1F, /* Argentina (w/o Weather radar) */
+ /* Add new channel plan above this line =============== */
+ /* Driver Self Defined ===== */
+ RT_CHANNEL_DOMAIN_5G_FCC = 0x20,
+ RT_CHANNEL_DOMAIN_5G_JAPAN_NO_DFS = 0x21,
+ RT_CHANNEL_DOMAIN_5G_FCC4_NO_DFS = 0x22,
+ RT_CHANNEL_DOMAIN_5G_MAX,
+}RT_CHANNEL_DOMAIN_5G, *PRT_CHANNEL_DOMAIN_5G;
+
+#define rtw_is_channel_plan_valid(chplan) (chplan<RT_CHANNEL_DOMAIN_MAX || chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
+
+typedef struct _RT_CHANNEL_PLAN
+{
+ unsigned char Channel[MAX_CHANNEL_NUM];
+ unsigned char Len;
+}RT_CHANNEL_PLAN, *PRT_CHANNEL_PLAN;
+
+typedef struct _RT_CHANNEL_PLAN_2G
+{
+ unsigned char Channel[MAX_CHANNEL_NUM_2G];
+ unsigned char Len;
+}RT_CHANNEL_PLAN_2G, *PRT_CHANNEL_PLAN_2G;
+
+typedef struct _RT_CHANNEL_PLAN_5G
+{
+ unsigned char Channel[MAX_CHANNEL_NUM_5G];
+ unsigned char Len;
+}RT_CHANNEL_PLAN_5G, *PRT_CHANNEL_PLAN_5G;
+
+typedef struct _RT_CHANNEL_PLAN_MAP
+{
+ unsigned char Index2G;
+ unsigned char Index5G;
+}RT_CHANNEL_PLAN_MAP, *PRT_CHANNEL_PLAN_MAP;
+
+enum Associated_AP
+{
+ atherosAP = 0,
+ broadcomAP = 1,
+ ciscoAP = 2,
+ marvellAP = 3,
+ ralinkAP = 4,
+ realtekAP = 5,
+ airgocapAP = 6,
+ unknownAP = 7,
+ maxAP,
+};
+
+typedef enum _HT_IOT_PEER
+{
+ HT_IOT_PEER_UNKNOWN = 0,
+ HT_IOT_PEER_REALTEK = 1,
+ HT_IOT_PEER_REALTEK_92SE = 2,
+ HT_IOT_PEER_BROADCOM = 3,
+ HT_IOT_PEER_RALINK = 4,
+ HT_IOT_PEER_ATHEROS = 5,
+ HT_IOT_PEER_CISCO = 6,
+ HT_IOT_PEER_MERU = 7,
+ HT_IOT_PEER_MARVELL = 8,
+ HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP, by Bohn, 2009.12.17 */
+ HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
+ HT_IOT_PEER_AIRGO = 11,
+ HT_IOT_PEER_INTEL = 12,
+ HT_IOT_PEER_RTK_APCLIENT = 13,
+ HT_IOT_PEER_REALTEK_81XX = 14,
+ HT_IOT_PEER_REALTEK_WOW = 15,
+ HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP = 16,
+ HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP = 17,
+ HT_IOT_PEER_MAX = 18
+}HT_IOT_PEER_E, *PHTIOT_PEER_E;
+
+
+enum SCAN_STATE
+{
+ SCAN_DISABLE = 0,
+ SCAN_START = 1,
+ SCAN_TXNULL = 2,
+ SCAN_PROCESS = 3,
+ SCAN_COMPLETE = 4,
+ SCAN_STATE_MAX,
+};
+
+struct mlme_handler {
+ unsigned int num;
+ char* str;
+ unsigned int (*func)(struct adapter *padapter, union recv_frame *precv_frame);
+};
+
+struct action_handler {
+ unsigned int num;
+ char* str;
+ unsigned int (*func)(struct adapter *padapter, union recv_frame *precv_frame);
+};
+
+struct ss_res
+{
+ int state;
+ int bss_cnt;
+ int channel_idx;
+ int scan_mode;
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/* define AP_MODE 0x0C */
+/* define STATION_MODE 0x08 */
+/* define AD_HOC_MODE 0x04 */
+/* define NO_LINK_MODE 0x00 */
+
+#define WIFI_FW_NULL_STATE _HW_STATE_NOLINK_
+#define WIFI_FW_STATION_STATE _HW_STATE_STATION_
+#define WIFI_FW_AP_STATE _HW_STATE_AP_
+#define WIFI_FW_ADHOC_STATE _HW_STATE_ADHOC_
+
+#define WIFI_FW_AUTH_NULL 0x00000100
+#define WIFI_FW_AUTH_STATE 0x00000200
+#define WIFI_FW_AUTH_SUCCESS 0x00000400
+
+#define WIFI_FW_ASSOC_STATE 0x00002000
+#define WIFI_FW_ASSOC_SUCCESS 0x00004000
+
+#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE | WIFI_FW_AUTH_SUCCESS |WIFI_FW_ASSOC_STATE)
+
+struct FW_Sta_Info
+{
+ struct sta_info *psta;
+ u32 status;
+ u32 rx_pkt;
+ u32 retry;
+ NDIS_802_11_RATES_EX SupportedRates;
+};
+
+/*
+ * Usage:
+ * When one iface acted as AP mode and the other iface is STA mode and scanning,
+ * it should switch back to AP's operating channel periodically.
+ * Parameters info:
+ * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to AP's operating channel for
+ * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
+ * Example:
+ * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
+ * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MILLISECOND is 3 and SURVEY_TO is 100.
+ * When it's STA mode gets set_scan command,
+ * it would
+ * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
+ * 2. Back to channel 1 for 300 milliseconds
+ * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
+ * 4. Back to channel 1 for 300 milliseconds
+ * 5. ... and so on, till survey done.
+ */
+struct mlme_ext_info
+{
+ u32 state;
+ u32 reauth_count;
+ u32 reassoc_count;
+ u32 link_count;
+ u32 auth_seq;
+ u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
+ u32 authModeToggle;
+ u32 enc_algo;/* encrypt algorithm; */
+ u32 key_index; /* this is only valid for legendary wep, 0~3 for key id. */
+ u32 iv;
+ u8 chg_txt[128];
+ u16 aid;
+ u16 bcn_interval;
+ u16 capability;
+ u8 assoc_AP_vendor;
+ u8 slotTime;
+ u8 preamble_mode;
+ u8 WMM_enable;
+ u8 ERP_enable;
+ u8 ERP_IE;
+ u8 HT_enable;
+ u8 HT_caps_enable;
+ u8 HT_info_enable;
+ u8 HT_protection;
+ u8 turboMode_cts2self;
+ u8 turboMode_rtsen;
+ u8 SM_PS;
+ u8 agg_enable_bitmap;
+ u8 ADDBA_retry_count;
+ u8 candidate_tid_bitmap;
+ u8 dialogToken;
+ /* Accept ADDBA Request */
+ bool bAcceptAddbaReq;
+ u8 bwmode_updated;
+ u8 hidden_ssid_mode;
+ u8 VHT_enable;
+
+ struct ADDBA_request ADDBA_req;
+ struct WMM_para_element WMM_param;
+ struct HT_caps_element HT_caps;
+ struct HT_info_element HT_info;
+ struct wlan_bssid_ex network;/* join network or bss_network, if in ap mode, it is the same to cur_network.network */
+ struct FW_Sta_Info FW_sta_info[NUM_STA];
+};
+
+/* The channel information about this channel including joining, scanning, and power constraints. */
+typedef struct _RT_CHANNEL_INFO
+{
+ u8 ChannelNum; /* The channel number. */
+ RT_SCAN_TYPE ScanType; /* Scan type such as passive or active scan. */
+}RT_CHANNEL_INFO, *PRT_CHANNEL_INFO;
+
+int rtw_ch_set_search_ch(RT_CHANNEL_INFO *ch_set, const u32 ch);
+bool rtw_mlme_band_check(struct adapter *adapter, const u32 ch);
+
+/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
+#define P2P_MAX_REG_CLASSES 10
+
+/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of channels per regulatory class */
+#define P2P_MAX_REG_CLASS_CHANNELS 20
+
+/* struct p2p_channels - List of supported channels */
+struct p2p_channels {
+ /* struct p2p_reg_class - Supported regulatory class */
+ struct p2p_reg_class {
+ /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
+ u8 reg_class;
+
+ /* channel - Supported channels */
+ u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
+
+ /* channels - Number of channel entries in use */
+ size_t channels;
+ } reg_class[P2P_MAX_REG_CLASSES];
+
+ /* reg_classes - Number of reg_class entries in use */
+ size_t reg_classes;
+};
+
+struct p2p_oper_class_map {
+ enum hw_mode {IEEE80211G, IEEE80211A} mode;
+ u8 op_class;
+ u8 min_chan;
+ u8 max_chan;
+ u8 inc;
+ enum { BW20, BW40PLUS, BW40MINUS } bw;
+};
+
+struct mlme_ext_priv
+{
+ struct adapter *padapter;
+ u8 mlmeext_init;
+ atomic_t event_seq;
+ u16 mgnt_seq;
+ u16 sa_query_seq;
+ u64 mgnt_80211w_IPN;
+ u64 mgnt_80211w_IPN_rx;
+ /* struct fw_priv fwpriv; */
+
+ unsigned char cur_channel;
+ unsigned char cur_bwmode;
+ unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
+ unsigned char cur_wireless_mode; /* NETWORK_TYPE */
+
+ unsigned char max_chan_nums;
+ RT_CHANNEL_INFO channel_set[MAX_CHANNEL_NUM];
+ struct p2p_channels channel_list;
+ unsigned char basicrate[NumRates];
+ unsigned char datarate[NumRates];
+ unsigned char default_supported_mcs_set[16];
+
+ struct ss_res sitesurvey_res;
+ struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including current scanning/connecting/connected related info. */
+ /* for ap mode, network includes ap's cap_info */
+ _timer survey_timer;
+ _timer link_timer;
+ _timer sa_query_timer;
+ /* _timer ADDBA_timer; */
+ u16 chan_scan_time;
+ unsigned long last_scan_time;
+ u8 scan_abort;
+ u8 tx_rate; /* TXRATE when USERATE is set. */
+
+ u32 retry; /* retry for issue probereq */
+
+ u64 TSFValue;
+
+ /* for LPS-32K to adaptive bcn early and timeout */
+ u8 adaptive_tsf_done;
+ u32 bcn_delay_cnt[9];
+ u32 bcn_delay_ratio[9];
+ u32 bcn_cnt;
+ u8 DrvBcnEarly;
+ u8 DrvBcnTimeOut;
+
+ unsigned char bstart_bss;
+
+ u8 update_channel_plan_by_ap_done;
+
+ /* recv_decache check for Action_public frame */
+ u8 action_public_dialog_token;
+ u16 action_public_rxseq;
+
+ u8 active_keep_alive_check;
+#ifdef DBG_FIXED_CHAN
+ u8 fixed_chan;
+#endif
+
+};
+
+void init_mlme_default_rate_set(struct adapter *padapter);
+int init_mlme_ext_priv(struct adapter *padapter);
+int init_hw_mlme_ext(struct adapter *padapter);
+void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+extern void init_mlme_ext_timer(struct adapter *padapter);
+extern void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta);
+extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
+
+/* void fill_fwpriv(struct adapter *padapter, struct fw_priv *pfwpriv); */
+
+unsigned char networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta);
+
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len);
+void set_mcs_rate_by_mask(u8 *mcs_set, u32 mask);
+void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
+
+void Save_DM_Func_Flag(struct adapter *padapter);
+void Restore_DM_Func_Flag(struct adapter *padapter);
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
+
+void Set_MSR(struct adapter *padapter, u8 type);
+
+u8 rtw_get_oper_ch(struct adapter *adapter);
+void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
+u8 rtw_get_oper_bw(struct adapter *adapter);
+void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
+u8 rtw_get_oper_choffset(struct adapter *adapter);
+void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
+u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset);
+unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter);
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode);
+void SelectChannel(struct adapter *padapter, unsigned char channel);
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
+
+void read_cam(struct adapter *padapter , u8 entry, u8 *get_key);
+
+/* modify HW only */
+void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
+void _clear_cam_entry(struct adapter *padapter, u8 entry);
+void write_cam_from_cache(struct adapter *adapter, u8 id);
+
+/* modify both HW and cache */
+void write_cam(struct adapter *padapter, u8 id, u16 ctrl, u8 *mac, u8 *key);
+void clear_cam_entry(struct adapter *padapter, u8 id);
+
+/* modify cache only */
+void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key);
+void clear_cam_cache(struct adapter *adapter, u8 id);
+
+void invalidate_cam_all(struct adapter *padapter);
+
+
+int allocate_fw_sta_entry(struct adapter *padapter);
+void flush_all_cam_entry(struct adapter *padapter);
+
+void site_survey(struct adapter *padapter);
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid);
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src, struct adapter *padapter, bool update_ie);
+
+u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
+u16 get_beacon_interval(struct wlan_bssid_ex *bss);
+
+int is_client_associated_to_ap(struct adapter *padapter);
+int is_client_associated_to_ibss(struct adapter *padapter);
+int is_IBSS_empty(struct adapter *padapter);
+
+unsigned char check_assoc_AP(u8 *pframe, uint len);
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_80211_var_ie * pIE);
+void WMMOnAssocRsp(struct adapter *padapter);
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_80211_var_ie * pIE);
+void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie * pIE);
+void HTOnAssocRsp(struct adapter *padapter);
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_80211_var_ie * pIE);
+void VCS_update(struct adapter *padapter, struct sta_info *psta);
+void update_ldpc_stbc_cap(struct sta_info *psta);
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint len, struct sta_info *psta);
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
+void update_IOT_info(struct adapter *padapter);
+void update_capinfo(struct adapter * Adapter, u16 updateCap);
+void update_wireless_mode(struct adapter *padapter);
+void update_sta_basic_rate(struct sta_info *psta, u8 wireless_mode);
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx);
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta);
+void Update_RA_Entry(struct adapter *padapter, struct sta_info *psta);
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
+
+unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason);
+
+unsigned char get_highest_rate_idx(u32 mask);
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps, u8 bwmode);
+unsigned int is_ap_in_tkip(struct adapter *padapter);
+
+s16 rtw_camid_search(struct adapter *adapter, u8 *addr, s16 kid);
+s16 rtw_camid_alloc(struct adapter *adapter, struct sta_info *sta, u8 kid);
+void rtw_camid_free(struct adapter *adapter, u8 cam_id);
+
+extern void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta);
+extern void rtw_release_macid(struct adapter *padapter, struct sta_info *psta);
+extern u8 rtw_search_max_mac_id(struct adapter *padapter);
+
+void report_join_res(struct adapter *padapter, int res);
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame);
+void report_surveydone_event(struct adapter *padapter);
+void report_del_sta_event(struct adapter *padapter, unsigned char* MacAddr, unsigned short reason);
+void report_add_sta_event(struct adapter *padapter, unsigned char* MacAddr, int cam_idx);
+bool rtw_port_switch_chk(struct adapter *adapter);
+void report_wmm_edca_update(struct adapter *padapter);
+
+void beacon_timing_control(struct adapter *padapter);
+u8 chk_bmc_sleepq_cmd(struct adapter *padapter);
+extern u8 set_tx_beacon_cmd(struct adapter *padapter);
+unsigned int setup_beacon_frame(struct adapter *padapter, unsigned char *beacon_frame);
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
+void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib);
+void update_mgntframe_attrib_addr(struct adapter *padapter, struct xmit_frame *pmgntframe);
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe);
+s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms);
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe);
+
+void issue_beacon(struct adapter *padapter, int timeout_ms);
+void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq);
+void issue_assocreq(struct adapter *padapter);
+void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type);
+void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status);
+void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da);
+s32 issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps, int try_cnt, int wait_ms);
+int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms);
+s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da);
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms);
+int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason);
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt, int wait_ms);
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status);
+void issue_action_SA_Query(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short tid);
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
+unsigned int send_beacon(struct adapter *padapter);
+
+void start_clnt_assoc(struct adapter *padapter);
+void start_clnt_auth(struct adapter *padapter);
+void start_clnt_join(struct adapter *padapter);
+void start_create_ibss(struct adapter *padapter);
+
+unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame);
+
+unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame);
+unsigned int OnAction_sa_query(struct adapter *padapter, union recv_frame *precv_frame);
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
+void mlmeext_sta_del_event_callback(struct adapter *padapter);
+void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta);
+
+void linked_status_chk(struct adapter *padapter);
+
+void _linked_info_dump(struct adapter *padapter);
+
+void survey_timer_hdl (struct adapter *padapter);
+void link_timer_hdl (struct adapter *padapter);
+void addba_timer_hdl(struct sta_info *psta);
+void sa_query_timer_hdl(struct adapter *padapter);
+/* void reauth_timer_hdl(struct adapter *padapter); */
+/* void reassoc_timer_hdl(struct adapter *padapter); */
+
+#define set_survey_timer(mlmeext, ms) \
+ do { \
+ /*DBG_871X("%s set_survey_timer(%p, %d)\n", __func__, (mlmeext), (ms));*/ \
+ _set_timer(&(mlmeext)->survey_timer, (ms)); \
+ } while (0)
+
+#define set_link_timer(mlmeext, ms) \
+ do { \
+ /*DBG_871X("%s set_link_timer(%p, %d)\n", __func__, (mlmeext), (ms));*/ \
+ _set_timer(&(mlmeext)->link_timer, (ms)); \
+ } while (0)
+#define set_sa_query_timer(mlmeext, ms) \
+ do { \
+ DBG_871X("%s set_sa_query_timer(%p, %d)\n", __func__, (mlmeext), (ms)); \
+ _set_timer(&(mlmeext)->sa_query_timer, (ms)); \
+ } while (0)
+extern int cckrates_included(unsigned char *rate, int ratelen);
+extern int cckratesonly_included(unsigned char *rate, int ratelen);
+
+extern void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
+
+extern void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
+extern void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
+extern void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
+extern u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer);
+
+int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset);
+int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset);
+
+struct cmd_hdl {
+ uint parmsize;
+ u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf);
+};
+
+
+u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+
+
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf);
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf);
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setauth_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf); /* Kurt: Handling DFS channel switch announcement ie. */
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 run_in_thread_hdl(struct adapter *padapter, u8 *pbuf);
+
+
+#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl},
+#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
+
+struct C2HEvent_Header
+{
+
+#ifdef __LITTLE_ENDIAN
+
+ unsigned int len:16;
+ unsigned int ID:8;
+ unsigned int seq:8;
+#else
+ unsigned int seq:8;
+ unsigned int ID:8;
+ unsigned int len:16;
+#endif
+ unsigned int rsvd;
+};
+
+void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf);
+void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf);
+
+enum rtw_c2h_event
+{
+ GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
+ GEN_EVT_CODE(_Read_BBREG),
+ GEN_EVT_CODE(_Read_RFREG),
+ GEN_EVT_CODE(_Read_EEPROM),
+ GEN_EVT_CODE(_Read_EFUSE),
+ GEN_EVT_CODE(_Read_CAM), /*5*/
+ GEN_EVT_CODE(_Get_BasicRate),
+ GEN_EVT_CODE(_Get_DataRate),
+ GEN_EVT_CODE(_Survey), /*8*/
+ GEN_EVT_CODE(_SurveyDone), /*9*/
+
+ GEN_EVT_CODE(_JoinBss) , /*10*/
+ GEN_EVT_CODE(_AddSTA),
+ GEN_EVT_CODE(_DelSTA),
+ GEN_EVT_CODE(_AtimDone) ,
+ GEN_EVT_CODE(_TX_Report),
+ GEN_EVT_CODE(_CCX_Report), /*15*/
+ GEN_EVT_CODE(_DTM_Report),
+ GEN_EVT_CODE(_TX_Rate_Statistics),
+ GEN_EVT_CODE(_C2HLBK),
+ GEN_EVT_CODE(_FWDBG),
+ GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
+ GEN_EVT_CODE(_ADDBA),
+ GEN_EVT_CODE(_C2HBCN),
+ GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
+ GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE, work around ASPM */
+ GEN_EVT_CODE(_WMM), /*25*/
+ MAX_C2HEVT
+};
+
+
+#ifdef _RTW_MLME_EXT_C_
+
+static struct fwevent wlanevents[] =
+{
+ {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_survey_event_callback}, /*8*/
+ {sizeof (struct surveydone_event), &rtw_surveydone_event_callback}, /*9*/
+
+ {0, &rtw_joinbss_event_callback}, /*10*/
+ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
+ {sizeof(struct stadel_event), &rtw_stadel_event_callback},
+ {0, &rtw_atimdone_event_callback},
+ {0, rtw_dummy_event_callback},
+ {0, NULL}, /*15*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, rtw_fwdbg_event_callback},
+ {0, NULL}, /*20*/
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_cpwm_event_callback},
+ {0, NULL},
+ {0, &rtw_wmm_event_callback},
+
+};
+
+#endif/* _RTL8192C_CMD_C_ */
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
new file mode 100644
index 000000000000..88ace11e42e9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_mp.h
@@ -0,0 +1,512 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_MP_H_
+#define _RTW_MP_H_
+
+#define MAX_MP_XMITBUF_SZ 2048
+#define NR_MP_XMITFRAME 8
+
+struct mp_xmit_frame
+{
+ struct list_head list;
+
+ struct pkt_attrib attrib;
+
+ _pkt *pkt;
+
+ int frame_tag;
+
+ struct adapter *padapter;
+
+ uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
+};
+
+struct mp_wiparam
+{
+ u32 bcompleted;
+ u32 act_type;
+ u32 io_offset;
+ u32 io_value;
+};
+
+typedef void(*wi_act_func)(void* padapter);
+
+struct mp_tx
+{
+ u8 stop;
+ u32 count, sended;
+ u8 payload;
+ struct pkt_attrib attrib;
+ /* struct tx_desc desc; */
+ /* u8 resvdtx[7]; */
+ u8 desc[TXDESC_SIZE];
+ u8 *pallocated_buf;
+ u8 *buf;
+ u32 buf_size, write_size;
+ void *PktTxThread;
+};
+
+#define MP_MAX_LINES 1000
+#define MP_MAX_LINES_BYTES 256
+
+typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
+typedef struct _MPT_CONTEXT
+{
+ /* Indicate if we have started Mass Production Test. */
+ bool bMassProdTest;
+
+ /* Indicate if the driver is unloading or unloaded. */
+ bool bMptDrvUnload;
+
+ _sema MPh2c_Sema;
+ _timer MPh2c_timeout_timer;
+/* Event used to sync H2c for BT control */
+
+ bool MptH2cRspEvent;
+ bool MptBtC2hEvent;
+ bool bMPh2c_timeout;
+
+ /* 8190 PCI does not support NDIS_WORK_ITEM. */
+ /* Work Item for Mass Production Test. */
+ /* NDIS_WORK_ITEM MptWorkItem; */
+/* RT_WORK_ITEM MptWorkItem; */
+ /* Event used to sync the case unloading driver and MptWorkItem is still in progress. */
+/* NDIS_EVENT MptWorkItemEvent; */
+ /* To protect the following variables. */
+/* NDIS_SPIN_LOCK MptWorkItemSpinLock; */
+ /* Indicate a MptWorkItem is scheduled and not yet finished. */
+ bool bMptWorkItemInProgress;
+ /* An instance which implements function and context of MptWorkItem. */
+ MPT_WORK_ITEM_HANDLER CurrMptAct;
+
+ /* 1 =Start, 0 =Stop from UI. */
+ u32 MptTestStart;
+ /* _TEST_MODE, defined in MPT_Req2.h */
+ u32 MptTestItem;
+ /* Variable needed in each implementation of CurrMptAct. */
+ u32 MptActType; /* Type of action performed in CurrMptAct. */
+ /* The Offset of IO operation is depend of MptActType. */
+ u32 MptIoOffset;
+ /* The Value of IO operation is depend of MptActType. */
+ u32 MptIoValue;
+ /* The RfPath of IO operation is depend of MptActType. */
+ u32 MptRfPath;
+
+ enum WIRELESS_MODE MptWirelessModeToSw; /* Wireless mode to switch. */
+ u8 MptChannelToSw; /* Channel to switch. */
+ u8 MptInitGainToSet; /* Initial gain to set. */
+ u32 MptBandWidth; /* bandwidth to switch. */
+ u32 MptRateIndex; /* rate index. */
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpCckTxPower;
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpOfdmTxPower;
+ /* For MP Tx Power index */
+ u8 TxPwrLevel[2]; /* rf-A, rf-B */
+ u32 RegTxPwrLimit;
+ /* Content of RCR Regsiter for Mass Production Test. */
+ u32 MptRCR;
+ /* true if we only receive packets with specific pattern. */
+ bool bMptFilterPattern;
+ /* Rx OK count, statistics used in Mass Production Test. */
+ u32 MptRxOkCnt;
+ /* Rx CRC32 error count, statistics used in Mass Production Test. */
+ u32 MptRxCrcErrCnt;
+
+ bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
+ bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
+ bool bStartContTx; /* true if we have start Continuous Tx test. */
+ /* true if we are in Single Carrier Tx test. */
+ bool bSingleCarrier;
+ /* true if we are in Carrier Suppression Tx Test. */
+ bool bCarrierSuppression;
+ /* true if we are in Single Tone Tx test. */
+ bool bSingleTone;
+
+ /* ACK counter asked by K.Y.. */
+ bool bMptEnableAckCounter;
+ u32 MptAckCounter;
+
+ /* SD3 Willis For 8192S to save 1T/2T RF table for ACUT Only fro ACUT delete later ~~~! */
+ /* s8 BufOfLines[2][MAX_LINES_HWCONFIG_TXT][MAX_BYTES_LINE_HWCONFIG_TXT]; */
+ /* s8 BufOfLines[2][MP_MAX_LINES][MP_MAX_LINES_BYTES]; */
+ /* s32 RfReadLine[2]; */
+
+ u8 APK_bound[2]; /* for APK path A/path B */
+ bool bMptIndexEven;
+
+ u8 backup0xc50;
+ u8 backup0xc58;
+ u8 backup0xc30;
+ u8 backup0x52_RF_A;
+ u8 backup0x52_RF_B;
+
+ u32 backup0x58_RF_A;
+ u32 backup0x58_RF_B;
+
+ u8 h2cReqNum;
+ u8 c2hBuf[32];
+
+ u8 btInBuf[100];
+ u32 mptOutLen;
+ u8 mptOutBuf[100];
+
+}MPT_CONTEXT, *PMPT_CONTEXT;
+/* endif */
+
+/* E-Fuse */
+#define EFUSE_MAP_SIZE 512
+
+#define EFUSE_MAX_SIZE 512
+/* end of E-Fuse */
+
+/* define RTPRIV_IOCTL_MP (SIOCIWFIRSTPRIV + 0x17) */
+enum {
+ WRITE_REG = 1,
+ READ_REG,
+ WRITE_RF,
+ READ_RF,
+ MP_START,
+ MP_STOP,
+ MP_RATE,
+ MP_CHANNEL,
+ MP_BANDWIDTH,
+ MP_TXPOWER,
+ MP_ANT_TX,
+ MP_ANT_RX,
+ MP_CTX,
+ MP_QUERY,
+ MP_ARX,
+ MP_PSD,
+ MP_PWRTRK,
+ MP_THER,
+ MP_IOCTL,
+ EFUSE_GET,
+ EFUSE_SET,
+ MP_RESET_STATS,
+ MP_DUMP,
+ MP_PHYPARA,
+ MP_SetRFPathSwh,
+ MP_QueryDrvStats,
+ MP_SetBT,
+ CTA_TEST,
+ MP_DISABLE_BT_COEXIST,
+ MP_PwrCtlDM,
+#ifdef CONFIG_WOWLAN
+ MP_WOW_ENABLE,
+#endif
+#ifdef CONFIG_AP_WOWLAN
+ MP_AP_WOW_ENABLE,
+#endif
+ MP_NULL,
+ MP_GET_TXPOWER_INX,
+};
+
+struct mp_priv
+{
+ struct adapter *papdater;
+
+ /* Testing Flag */
+ u32 mode;/* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
+
+ u32 prev_fw_state;
+
+ /* OID cmd handler */
+ struct mp_wiparam workparam;
+/* u8 act_in_progress; */
+
+ /* Tx Section */
+ u8 TID;
+ u32 tx_pktcount;
+ u32 pktInterval;
+ struct mp_tx tx;
+
+ /* Rx Section */
+ u32 rx_bssidpktcount;
+ u32 rx_pktcount;
+ u32 rx_pktcount_filter_out;
+ u32 rx_crcerrpktcount;
+ u32 rx_pktloss;
+ bool rx_bindicatePkt;
+ struct recv_stat rxstat;
+
+ /* RF/BB relative */
+ u8 channel;
+ u8 bandwidth;
+ u8 prime_channel_offset;
+ u8 txpoweridx;
+ u8 txpoweridx_b;
+ u8 rateidx;
+ u32 preamble;
+/* u8 modem; */
+ u32 CrystalCap;
+/* u32 curr_crystalcap; */
+
+ u16 antenna_tx;
+ u16 antenna_rx;
+/* u8 curr_rfpath; */
+
+ u8 check_mp_pkt;
+
+ u8 bSetTxPower;
+/* uint ForcedDataRate; */
+ u8 mp_dm;
+ u8 mac_filter[ETH_ALEN];
+ u8 bmac_filter;
+
+ struct wlan_network mp_network;
+ NDIS_802_11_MAC_ADDRESS network_macaddr;
+
+ u8 *pallocated_mp_xmitframe_buf;
+ u8 *pmp_xmtframe_buf;
+ struct __queue free_mp_xmitqueue;
+ u32 free_mp_xmitframe_cnt;
+ bool bSetRxBssid;
+ bool bTxBufCkFail;
+
+ MPT_CONTEXT MptCtx;
+
+ u8 *TXradomBuffer;
+};
+
+typedef struct _IOCMD_STRUCT_ {
+ u8 cmdclass;
+ u16 value;
+ u8 index;
+}IOCMD_STRUCT;
+
+struct rf_reg_param {
+ u32 path;
+ u32 offset;
+ u32 value;
+};
+
+struct bb_reg_param {
+ u32 offset;
+ u32 value;
+};
+
+#define LOWER true
+#define RAISE false
+
+/* Hardware Registers */
+#define BB_REG_BASE_ADDR 0x800
+
+/* MP variables */
+enum MP_MODE {
+ MP_OFF,
+ MP_ON,
+ MP_ERR,
+ MP_CONTINUOUS_TX,
+ MP_SINGLE_CARRIER_TX,
+ MP_CARRIER_SUPPRISSION_TX,
+ MP_SINGLE_TONE_TX,
+ MP_PACKET_TX,
+ MP_PACKET_RX
+};
+
+#define MAX_RF_PATH_NUMS RF_PATH_MAX
+
+extern u8 mpdatarate[NumRates];
+
+/* MP set force data rate base on the definition. */
+enum MPT_RATE_INDEX {
+ /* CCK rate. */
+ MPT_RATE_1M = 0 , /* 0 */
+ MPT_RATE_2M,
+ MPT_RATE_55M,
+ MPT_RATE_11M, /* 3 */
+
+ /* OFDM rate. */
+ MPT_RATE_6M, /* 4 */
+ MPT_RATE_9M,
+ MPT_RATE_12M,
+ MPT_RATE_18M,
+ MPT_RATE_24M,
+ MPT_RATE_36M,
+ MPT_RATE_48M,
+ MPT_RATE_54M, /* 11 */
+
+ /* HT rate. */
+ MPT_RATE_MCS0, /* 12 */
+ MPT_RATE_MCS1,
+ MPT_RATE_MCS2,
+ MPT_RATE_MCS3,
+ MPT_RATE_MCS4,
+ MPT_RATE_MCS5,
+ MPT_RATE_MCS6,
+ MPT_RATE_MCS7, /* 19 */
+ MPT_RATE_MCS8,
+ MPT_RATE_MCS9,
+ MPT_RATE_MCS10,
+ MPT_RATE_MCS11,
+ MPT_RATE_MCS12,
+ MPT_RATE_MCS13,
+ MPT_RATE_MCS14,
+ MPT_RATE_MCS15, /* 27 */
+ /* VHT rate. Total: 20*/
+ MPT_RATE_VHT1SS_MCS0 = 100,/* To reserve MCS16~MCS31, the index starts from #100. */
+ MPT_RATE_VHT1SS_MCS1, /* #101 */
+ MPT_RATE_VHT1SS_MCS2,
+ MPT_RATE_VHT1SS_MCS3,
+ MPT_RATE_VHT1SS_MCS4,
+ MPT_RATE_VHT1SS_MCS5,
+ MPT_RATE_VHT1SS_MCS6, /* #106 */
+ MPT_RATE_VHT1SS_MCS7,
+ MPT_RATE_VHT1SS_MCS8,
+ MPT_RATE_VHT1SS_MCS9,
+ MPT_RATE_VHT2SS_MCS0,
+ MPT_RATE_VHT2SS_MCS1, /* #111 */
+ MPT_RATE_VHT2SS_MCS2,
+ MPT_RATE_VHT2SS_MCS3,
+ MPT_RATE_VHT2SS_MCS4,
+ MPT_RATE_VHT2SS_MCS5,
+ MPT_RATE_VHT2SS_MCS6, /* #116 */
+ MPT_RATE_VHT2SS_MCS7,
+ MPT_RATE_VHT2SS_MCS8,
+ MPT_RATE_VHT2SS_MCS9,
+ MPT_RATE_LAST
+};
+
+#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
+
+enum POWER_MODE {
+ POWER_LOW = 0,
+ POWER_NORMAL
+};
+
+/* The following enumeration is used to define the value of Reg0xD00[30:28] or JaguarReg0x914[18:16]. */
+enum OFDM_TX_MODE {
+ OFDM_ALL_OFF = 0,
+ OFDM_ContinuousTx = 1,
+ OFDM_SingleCarrier = 2,
+ OFDM_SingleTone = 4,
+};
+
+#define RX_PKT_BROADCAST 1
+#define RX_PKT_DEST_ADDR 2
+#define RX_PKT_PHY_MATCH 3
+
+#define Mac_OFDM_OK 0x00000000
+#define Mac_OFDM_Fail 0x10000000
+#define Mac_OFDM_FasleAlarm 0x20000000
+#define Mac_CCK_OK 0x30000000
+#define Mac_CCK_Fail 0x40000000
+#define Mac_CCK_FasleAlarm 0x50000000
+#define Mac_HT_OK 0x60000000
+#define Mac_HT_Fail 0x70000000
+#define Mac_HT_FasleAlarm 0x90000000
+#define Mac_DropPacket 0xA0000000
+
+enum ENCRY_CTRL_STATE {
+ HW_CONTROL, /* hw encryption& decryption */
+ SW_CONTROL, /* sw encryption& decryption */
+ HW_ENCRY_SW_DECRY, /* hw encryption & sw decryption */
+ SW_ENCRY_HW_DECRY /* sw encryption & hw decryption */
+};
+
+enum MPT_TXPWR_DEF {
+ MPT_CCK,
+ MPT_OFDM, /* L and HT OFDM */
+ MPT_VHT_OFDM
+};
+
+#define REG_RF_BB_GAIN_OFFSET 0x7f
+#define RF_GAIN_OFFSET_MASK 0xfffff
+
+/* */
+/* struct mp_xmit_frame *alloc_mp_xmitframe(struct mp_priv *pmp_priv); */
+/* int free_mp_xmitframe(struct xmit_priv *pxmitpriv, struct mp_xmit_frame *pmp_xmitframe); */
+
+s32 init_mp_priv(struct adapter *padapter);
+void free_mp_priv(struct mp_priv *pmp_priv);
+s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
+void MPT_DeInitAdapter(struct adapter *padapter);
+s32 mp_start_test(struct adapter *padapter);
+void mp_stop_test(struct adapter *padapter);
+
+u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
+void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
+
+u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz);
+void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz);
+u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask);
+void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val);
+u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr);
+void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val);
+
+void SetChannel(struct adapter *padapter);
+void SetBandwidth(struct adapter *padapter);
+int SetTxPower(struct adapter *padapter);
+void SetAntennaPathPower(struct adapter *padapter);
+void SetDataRate(struct adapter *padapter);
+
+void SetAntenna(struct adapter *padapter);
+
+s32 SetThermalMeter(struct adapter *padapter, u8 target_ther);
+void GetThermalMeter(struct adapter *padapter, u8 *value);
+
+void SetContinuousTx(struct adapter *padapter, u8 bStart);
+void SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
+void SetSingleToneTx(struct adapter *padapter, u8 bStart);
+void SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
+void PhySetTxPowerLevel(struct adapter *padapter);
+
+void fill_txdesc_for_mp(struct adapter *padapter, u8 *ptxdesc);
+void SetPacketTx(struct adapter *padapter);
+void SetPacketRx(struct adapter *padapter, u8 bStartRx);
+
+void ResetPhyRxPktCount(struct adapter *padapter);
+u32 GetPhyRxPktReceived(struct adapter *padapter);
+u32 GetPhyRxPktCRC32Error(struct adapter *padapter);
+
+s32 SetPowerTracking(struct adapter *padapter, u8 enable);
+void GetPowerTracking(struct adapter *padapter, u8 *enable);
+
+u32 mp_query_psd(struct adapter *padapter, u8 *data);
+
+void Hal_SetAntenna(struct adapter *padapter);
+void Hal_SetBandwidth(struct adapter *padapter);
+
+void Hal_SetTxPower(struct adapter *padapter);
+void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
+void Hal_SetSingleToneTx (struct adapter *padapter , u8 bStart);
+void Hal_SetSingleCarrierTx (struct adapter *padapter, u8 bStart);
+void Hal_SetContinuousTx (struct adapter *padapter, u8 bStart);
+void Hal_SetBandwidth(struct adapter *padapter);
+
+void Hal_SetDataRate(struct adapter *padapter);
+void Hal_SetChannel(struct adapter *padapter);
+void Hal_SetAntennaPathPower(struct adapter *padapter);
+s32 Hal_SetThermalMeter(struct adapter *padapter, u8 target_ther);
+s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
+void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
+void Hal_GetThermalMeter(struct adapter *padapter, u8 *value);
+void Hal_mpt_SwitchRfSetting(struct adapter *padapter);
+void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
+void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *padapter, bool beven);
+void Hal_SetCCKTxPower(struct adapter *padapter, u8 * TxPower);
+void Hal_SetOFDMTxPower(struct adapter *padapter, u8 * TxPower);
+void Hal_TriggerRFThermalMeter(struct adapter *padapter);
+u8 Hal_ReadRFThermalMeter(struct adapter *padapter);
+void Hal_SetCCKContinuousTx(struct adapter *padapter, u8 bStart);
+void Hal_SetOFDMContinuousTx(struct adapter *padapter, u8 bStart);
+void Hal_ProSetCrystalCap (struct adapter *padapter , u32 CrystalCapVal);
+void MP_PHY_SetRFPathSwitch(struct adapter *padapter , bool bMain);
+u32 mpt_ProQueryCalTxPower(struct adapter *padapter, u8 RfPath);
+void MPT_PwrCtlDM(struct adapter *padapter, u32 bstart);
+u8 MptToMgntRate(u32 MptRateIdx);
+
+#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_odm.h b/drivers/staging/rtl8723bs/include/rtw_odm.h
new file mode 100644
index 000000000000..961ae2c218f9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_odm.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_ODM_H__
+#define __RTW_ODM_H__
+
+#include <drv_types.h>
+
+/*
+* This file provides utilities/wrappers for rtw driver to use ODM
+*/
+
+void rtw_odm_dbg_comp_msg(void *sel, struct adapter *adapter);
+void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps);
+void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter);
+void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level);
+
+void rtw_odm_ability_msg(void *sel, struct adapter *adapter);
+void rtw_odm_ability_set(struct adapter *adapter, u32 ability);
+
+void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter);
+void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini, s8 TH_EDCCA_HL_diff,
+ s8 IGI_Base, bool ForceEDCCA, u8 AdapEn_RSSI, u8 IGI_LowerBound);
+void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter);
+#endif /* __RTW_ODM_H__ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
new file mode 100644
index 000000000000..cf8e766a27a8
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
@@ -0,0 +1,375 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_PWRCTRL_H_
+#define __RTW_PWRCTRL_H_
+
+
+#define FW_PWR0 0
+#define FW_PWR1 1
+#define FW_PWR2 2
+#define FW_PWR3 3
+
+
+#define HW_PWR0 7
+#define HW_PWR1 6
+#define HW_PWR2 2
+#define HW_PWR3 0
+#define HW_PWR4 8
+
+#define FW_PWRMSK 0x7
+
+
+#define XMIT_ALIVE BIT(0)
+#define RECV_ALIVE BIT(1)
+#define CMD_ALIVE BIT(2)
+#define EVT_ALIVE BIT(3)
+#define BTCOEX_ALIVE BIT(4)
+
+
+enum Power_Mgnt
+{
+ PS_MODE_ACTIVE = 0 ,
+ PS_MODE_MIN ,
+ PS_MODE_MAX ,
+ PS_MODE_DTIM , /* PS_MODE_SELF_DEFINED */
+ PS_MODE_VOIP ,
+ PS_MODE_UAPSD_WMM ,
+ PS_MODE_UAPSD ,
+ PS_MODE_IBSS ,
+ PS_MODE_WWLAN ,
+ PM_Radio_Off ,
+ PM_Card_Disable ,
+ PS_MODE_NUM,
+};
+
+#ifdef CONFIG_PNO_SUPPORT
+#define MAX_PNO_LIST_COUNT 16
+#define MAX_SCAN_LIST_COUNT 14 /* 2.4G only */
+#endif
+
+/*
+ BIT[2:0] = HW state
+ BIT[3] = Protocol PS state, 0: register active state , 1: register sleep state
+ BIT[4] = sub-state
+*/
+
+#define PS_DPS BIT(0)
+#define PS_LCLK (PS_DPS)
+#define PS_RF_OFF BIT(1)
+#define PS_ALL_ON BIT(2)
+#define PS_ST_ACTIVE BIT(3)
+
+#define PS_ISR_ENABLE BIT(4)
+#define PS_IMR_ENABLE BIT(5)
+#define PS_ACK BIT(6)
+#define PS_TOGGLE BIT(7)
+
+#define PS_STATE_MASK (0x0F)
+#define PS_STATE_HW_MASK (0x07)
+#define PS_SEQ_MASK (0xc0)
+
+#define PS_STATE(x) (PS_STATE_MASK & (x))
+#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
+#define PS_SEQ(x) (PS_SEQ_MASK & (x))
+
+#define PS_STATE_S0 (PS_DPS)
+#define PS_STATE_S1 (PS_LCLK)
+#define PS_STATE_S2 (PS_RF_OFF)
+#define PS_STATE_S3 (PS_ALL_ON)
+#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
+
+
+#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
+#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
+#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+
+struct reportpwrstate_parm {
+ unsigned char mode;
+ unsigned char state; /* the CPWM value */
+ unsigned short rsvd;
+};
+
+
+typedef _sema _pwrlock;
+
+
+#define LPS_DELAY_TIME 1*HZ /* 1 sec */
+
+#define EXE_PWR_NONE 0x01
+#define EXE_PWR_IPS 0x02
+#define EXE_PWR_LPS 0x04
+
+/* RF state. */
+enum rt_rf_power_state {
+ rf_on, /* RF is on after RFSleep or RFOff */
+ rf_sleep, /* 802.11 Power Save mode */
+ rf_off, /* HW/SW Radio OFF or Inactive Power Save */
+ /* Add the new RF state above this line ===== */
+ rf_max
+};
+
+/* RF Off Level for IPS or HW/SW radio off */
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3) /* NIC halt, re-initialize hw parameters */
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW */
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock Req in initialization. */
+#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R if no packet is received or transmittd. */
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
+
+#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) (ppsc->cur_ps_level &= (~(_PS_FLAG)))
+#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) (ppsc->cur_ps_level |= _PS_FLAG)
+
+/* ASPM OSC Control bit, added by Roger, 2013.03.29. */
+#define RT_PCI_ASPM_OSC_IGNORE 0 /* PCI ASPM ignore OSC control in default */
+#define RT_PCI_ASPM_OSC_ENABLE BIT0 /* PCI ASPM controlled by OS according to ACPI Spec 5.0 */
+#define RT_PCI_ASPM_OSC_DISABLE BIT1 /* PCI ASPM controlled by driver or BIOS, i.e., force enable ASPM */
+
+
+enum _PS_BBRegBackup_ {
+ PSBBREG_RF0 = 0,
+ PSBBREG_RF1,
+ PSBBREG_RF2,
+ PSBBREG_AFE0,
+ PSBBREG_TOTALCNT
+};
+
+enum { /* for ips_mode */
+ IPS_NONE = 0,
+ IPS_NORMAL,
+ IPS_LEVEL_2,
+ IPS_NUM
+};
+
+/* Design for pwrctrl_priv.ips_deny, 32 bits for 32 reasons at most */
+enum PS_DENY_REASON {
+ PS_DENY_DRV_INITIAL = 0,
+ PS_DENY_SCAN,
+ PS_DENY_JOIN,
+ PS_DENY_DISCONNECT,
+ PS_DENY_SUSPEND,
+ PS_DENY_IOCTL,
+ PS_DENY_MGNT_TX,
+ PS_DENY_DRV_REMOVE = 30,
+ PS_DENY_OTHERS = 31
+};
+
+#ifdef CONFIG_PNO_SUPPORT
+typedef struct pno_nlo_info
+{
+ u32 fast_scan_period; /* Fast scan period */
+ u32 ssid_num; /* number of entry */
+ u32 slow_scan_period; /* slow scan period */
+ u32 fast_scan_iterations; /* Fast scan iterations */
+ u8 ssid_length[MAX_PNO_LIST_COUNT]; /* SSID Length Array */
+ u8 ssid_cipher_info[MAX_PNO_LIST_COUNT]; /* Cipher information for security */
+ u8 ssid_channel_info[MAX_PNO_LIST_COUNT]; /* channel information */
+}pno_nlo_info_t;
+
+typedef struct pno_ssid {
+ u32 SSID_len;
+ u8 SSID[32];
+} pno_ssid_t;
+
+typedef struct pno_ssid_list {
+ pno_ssid_t node[MAX_PNO_LIST_COUNT];
+}pno_ssid_list_t;
+
+typedef struct pno_scan_channel_info
+{
+ u8 channel;
+ u8 tx_power;
+ u8 timeout;
+ u8 active; /* set 1 means active scan, or pasivite scan. */
+}pno_scan_channel_info_t;
+
+typedef struct pno_scan_info
+{
+ u8 enableRFE; /* Enable RFE */
+ u8 period_scan_time; /* exclusive with fast_scan_period and slow_scan_period */
+ u8 periodScan; /* exclusive with fast_scan_period and slow_scan_period */
+ u8 orig_80_offset; /* original channel 80 offset */
+ u8 orig_40_offset; /* original channel 40 offset */
+ u8 orig_bw; /* original bandwidth */
+ u8 orig_ch; /* original channel */
+ u8 channel_num; /* number of channel */
+ u64 rfe_type; /* rfe_type && 0x00000000000000ff */
+ pno_scan_channel_info_t ssid_channel_info[MAX_SCAN_LIST_COUNT];
+}pno_scan_info_t;
+#endif /* CONFIG_PNO_SUPPORT */
+
+struct pwrctrl_priv
+{
+ _pwrlock lock;
+ _pwrlock check_32k_lock;
+ volatile u8 rpwm; /* requested power state for fw */
+ volatile u8 cpwm; /* fw current power state. updated when 1. read from HCPWM 2. driver lowers power level */
+ volatile u8 tog; /* toggling */
+ volatile u8 cpwm_tog; /* toggling */
+
+ u8 pwr_mode;
+ u8 smart_ps;
+ u8 bcn_ant_mode;
+ u8 dtim;
+
+ u32 alives;
+ _workitem cpwm_event;
+ u8 brpwmtimeout;
+ _workitem rpwmtimeoutwi;
+ _timer pwr_rpwm_timer;
+ u8 bpower_saving; /* for LPS/IPS */
+
+ u8 b_hw_radio_off;
+ u8 reg_rfoff;
+ u8 reg_pdnmode; /* powerdown mode */
+ u32 rfoff_reason;
+
+ /* RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+
+ uint ips_enter_cnts;
+ uint ips_leave_cnts;
+
+ u8 ips_mode;
+ u8 ips_org_mode;
+ u8 ips_mode_req; /* used to accept the mode setting request, will update to ipsmode later */
+ uint bips_processing;
+ unsigned long ips_deny_time; /* will deny IPS when system time is smaller than this */
+ u8 pre_ips_type;/* 0: default flow, 1: carddisbale flow */
+
+ /* ps_deny: if 0, power save is free to go; otherwise deny all kinds of power save. */
+ /* Use PS_DENY_REASON to decide reason. */
+ /* Don't access this variable directly without control function, */
+ /* and this variable should be protected by lock. */
+ u32 ps_deny;
+
+ u8 ps_processing; /* temporarily used to mark whether in rtw_ps_processor */
+
+ u8 fw_psmode_iface_id;
+ u8 bLeisurePs;
+ u8 LpsIdleCount;
+ u8 power_mgnt;
+ u8 org_power_mgnt;
+ u8 bFwCurrentInPSMode;
+ unsigned long DelayLPSLastTimeStamp;
+ s32 pnp_current_pwr_state;
+ u8 pnp_bstop_trx;
+
+
+ u8 bInternalAutoSuspend;
+ u8 bInSuspend;
+
+ u8 bAutoResume;
+ u8 autopm_cnt;
+
+ u8 bSupportRemoteWakeup;
+ u8 wowlan_wake_reason;
+ u8 wowlan_ap_mode;
+ u8 wowlan_mode;
+#ifdef CONFIG_WOWLAN
+ u8 wowlan_pattern;
+ u8 wowlan_magic;
+ u8 wowlan_unicast;
+ u8 wowlan_pattern_idx;
+ u8 wowlan_pno_enable;
+#ifdef CONFIG_PNO_SUPPORT
+ u8 pno_in_resume;
+ u8 pno_inited;
+ pno_nlo_info_t *pnlo_info;
+ pno_scan_info_t *pscan_info;
+ pno_ssid_list_t *pno_ssid_list;
+#endif
+ u32 wowlan_pattern_context[8][5];
+ u64 wowlan_fw_iv;
+#endif /* CONFIG_WOWLAN */
+ _timer pwr_state_check_timer;
+ int pwr_state_check_interval;
+ u8 pwr_state_check_cnts;
+
+ int ps_flag; /* used by autosuspend */
+
+ enum rt_rf_power_state rf_pwrstate;/* cur power state, only for IPS */
+ /* rt_rf_power_state current_rfpwrstate; */
+ enum rt_rf_power_state change_rfpwrstate;
+
+ u8 bHWPowerdown; /* power down mode selection. 0:radio off, 1:power down */
+ u8 bHWPwrPindetect; /* come from registrypriv.hwpwrp_detect. enable power down function. 0:disable, 1:enable */
+ u8 bkeepfwalive;
+ u8 brfoffbyhw;
+ unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
+};
+
+#define rtw_get_ips_mode_req(pwrctl) \
+ (pwrctl)->ips_mode_req
+
+#define rtw_ips_mode_req(pwrctl, ips_mode) \
+ (pwrctl)->ips_mode_req = (ips_mode)
+
+#define RTW_PWR_STATE_CHK_INTERVAL 2000
+
+#define _rtw_set_pwr_state_check_timer(pwrctl, ms) \
+ do { \
+ /*DBG_871X("%s _rtw_set_pwr_state_check_timer(%p, %d)\n", __func__, (pwrctl), (ms));*/ \
+ _set_timer(&(pwrctl)->pwr_state_check_timer, (ms)); \
+ } while (0)
+
+#define rtw_set_pwr_state_check_timer(pwrctl) \
+ _rtw_set_pwr_state_check_timer((pwrctl), (pwrctl)->pwr_state_check_interval)
+
+extern void rtw_init_pwrctrl_priv(struct adapter *adapter);
+extern void rtw_free_pwrctrl_priv(struct adapter * adapter);
+
+s32 rtw_register_task_alive(struct adapter *, u32 task);
+void rtw_unregister_task_alive(struct adapter *, u32 task);
+extern s32 rtw_register_tx_alive(struct adapter *padapter);
+extern void rtw_unregister_tx_alive(struct adapter *padapter);
+extern s32 rtw_register_cmd_alive(struct adapter *padapter);
+extern void rtw_unregister_cmd_alive(struct adapter *padapter);
+extern void cpwm_int_hdl(struct adapter *padapter, struct reportpwrstate_parm *preportpwrstate);
+extern void LPS_Leave_check(struct adapter *padapter);
+
+extern void LeaveAllPowerSaveMode(struct adapter * Adapter);
+extern void LeaveAllPowerSaveModeDirect(struct adapter * Adapter);
+void _ips_enter(struct adapter *padapter);
+void ips_enter(struct adapter *padapter);
+int _ips_leave(struct adapter *padapter);
+int ips_leave(struct adapter *padapter);
+
+void rtw_ps_processor(struct adapter *padapter);
+
+s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms);
+void LPS_Enter(struct adapter *padapter, const char *msg);
+void LPS_Leave(struct adapter *padapter, const char *msg);
+void traffic_check_for_leave_lps(struct adapter *padapter, u8 tx, u32 tx_packets);
+void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode, const char *msg);
+void rtw_set_rpwm(struct adapter *padapter, u8 val8);
+
+void rtw_set_ips_deny(struct adapter *padapter, u32 ms);
+int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller);
+#define rtw_pwr_wakeup(adapter) _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+#define rtw_pwr_wakeup_ex(adapter, ips_deffer_ms) _rtw_pwr_wakeup(adapter, ips_deffer_ms, __func__)
+int rtw_pm_set_ips(struct adapter *padapter, u8 mode);
+int rtw_pm_set_lps(struct adapter *padapter, u8 mode);
+
+void rtw_ps_deny(struct adapter *padapter, enum PS_DENY_REASON reason);
+void rtw_ps_deny_cancel(struct adapter *padapter, enum PS_DENY_REASON reason);
+u32 rtw_ps_deny_get(struct adapter *padapter);
+
+#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_qos.h b/drivers/staging/rtl8723bs/include/rtw_qos.h
new file mode 100644
index 000000000000..ce6d9142bc5a
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_qos.h
@@ -0,0 +1,27 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#ifndef _RTW_QOS_H_
+#define _RTW_QOS_H_
+
+
+
+struct qos_priv {
+ unsigned int qos_option; /* bit mask option: u-apsd, s-apsd, ts, block ack... */
+};
+
+
+#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
new file mode 100644
index 000000000000..570a3c333aa0
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -0,0 +1,553 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_RECV_H_
+#define _RTW_RECV_H_
+
+ #ifdef CONFIG_SINGLE_RECV_BUF
+ #define NR_RECVBUFF (1)
+ #else
+ #define NR_RECVBUFF (8)
+ #endif /* CONFIG_SINGLE_RECV_BUF */
+
+ #define NR_PREALLOC_RECV_SKB (8)
+
+#define NR_RECVFRAME 256
+
+#define RXFRAME_ALIGN 8
+#define RXFRAME_ALIGN_SZ (1<<RXFRAME_ALIGN)
+
+#define DRVINFO_SZ 4 /* unit is 8bytes */
+
+#define MAX_RXFRAME_CNT 512
+#define MAX_RX_NUMBLKS (32)
+#define RECVFRAME_HDR_ALIGN 128
+
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+
+#define MAX_SUBFRAME_COUNT 64
+extern u8 rtw_rfc1042_header[];
+extern u8 rtw_bridge_tunnel_header[];
+
+/* for Rx reordering buffer control */
+struct recv_reorder_ctrl
+{
+ struct adapter *padapter;
+ u8 enable;
+ u16 indicate_seq;/* wstart_b, init_value = 0xffff */
+ u16 wend_b;
+ u8 wsize_b;
+ struct __queue pending_recvframe_queue;
+ _timer reordering_ctrl_timer;
+};
+
+struct stainfo_rxcache {
+ u16 tid_rxseq[16];
+/*
+ unsigned short tid0_rxseq;
+ unsigned short tid1_rxseq;
+ unsigned short tid2_rxseq;
+ unsigned short tid3_rxseq;
+ unsigned short tid4_rxseq;
+ unsigned short tid5_rxseq;
+ unsigned short tid6_rxseq;
+ unsigned short tid7_rxseq;
+ unsigned short tid8_rxseq;
+ unsigned short tid9_rxseq;
+ unsigned short tid10_rxseq;
+ unsigned short tid11_rxseq;
+ unsigned short tid12_rxseq;
+ unsigned short tid13_rxseq;
+ unsigned short tid14_rxseq;
+ unsigned short tid15_rxseq;
+*/
+};
+
+
+struct smooth_rssi_data {
+ u32 elements[100]; /* array to store values */
+ u32 index; /* index to current array to store */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct signal_stat {
+ u8 update_req; /* used to indicate */
+ u8 avg_val; /* avg of valid elements */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct phy_info {
+ u8 RxPWDBAll;
+
+ u8 SignalQuality; /* in 0-100 index. */
+ s8 RxMIMOSignalQuality[4]; /* per-path's EVM */
+ u8 RxMIMOEVMdbm[4]; /* per-path's EVM dbm */
+
+ u8 RxMIMOSignalStrength[4];/* in 0~100 index */
+
+ u16 Cfo_short[4]; /* per-path's Cfo_short */
+ u16 Cfo_tail[4]; /* per-path's Cfo_tail */
+
+ s8 RxPower; /* in dBm Translate from PWdB */
+ s8 RecvSignalPower;/* Real power in dBm for this packet, no beautification and aggregation. Keep this raw info to be used for the other procedures. */
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+
+ s8 RxPwr[4]; /* per-path's pwdb */
+ u8 RxSNR[4]; /* per-path's SNR */
+ u8 BandWidth;
+ u8 btCoexPwrAdjust;
+};
+
+#ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+struct rx_raw_rssi
+{
+ u8 data_rate;
+ u8 pwdball;
+ s8 pwr_all;
+
+ u8 mimo_singal_strength[4];/* in 0~100 index */
+ u8 mimo_singal_quality[4];
+
+ s8 ofdm_pwr[4];
+ u8 ofdm_snr[4];
+
+};
+#endif
+
+struct rx_pkt_attrib {
+ u16 pkt_len;
+ u8 physt;
+ u8 drvinfo_sz;
+ u8 shift_sz;
+ u8 hdrlen; /* the WLAN Header Len */
+ u8 to_fr_ds;
+ u8 amsdu;
+ u8 qos;
+ u8 priority;
+ u8 pw_save;
+ u8 mdata;
+ u16 seq_num;
+ u8 frag_num;
+ u8 mfrag;
+ u8 order;
+ u8 privacy; /* in frame_ctrl field */
+ u8 bdecrypted;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero, indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 crc_err;
+ u8 icv_err;
+
+ u16 eth_type;
+
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+
+ u8 ack_policy;
+
+/* ifdef CONFIG_TCP_CSUM_OFFLOAD_RX */
+ u8 tcpchk_valid; /* 0: invalid, 1: valid */
+ u8 ip_chkrpt; /* 0: incorrect, 1: correct */
+ u8 tcp_chkrpt; /* 0: incorrect, 1: correct */
+/* endif */
+ u8 key_index;
+
+ u8 data_rate;
+ u8 sgi;
+ u8 pkt_rpt_type;
+ u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
+
+/*
+ u8 signal_qual;
+ s8 rx_mimo_signal_qual[2];
+ u8 signal_strength;
+ u32 RxPWDBAll;
+ s32 RecvSignalPower;
+*/
+ struct phy_info phy_info;
+};
+
+
+/* These definition is used for Rx packet reordering. */
+#define SN_LESS(a, b) (((a-b)&0x800)!= 0)
+#define SN_EQUAL(a, b) (a == b)
+/* define REORDER_WIN_SIZE 128 */
+/* define REORDER_ENTRY_NUM 128 */
+#define REORDER_WAIT_TIME (50) /* (ms) */
+
+#define RECVBUFF_ALIGN_SZ 8
+
+#define RXDESC_SIZE 24
+#define RXDESC_OFFSET RXDESC_SIZE
+
+struct recv_stat {
+ __le32 rxdw0;
+ __le32 rxdw1;
+ __le32 rxdw2;
+ __le32 rxdw3;
+#ifndef BUF_DESC_ARCH
+ __le32 rxdw4;
+ __le32 rxdw5;
+#endif /* if BUF_DESC_ARCH is defined, rx_buf_desc occupy 4 double words */
+};
+
+#define EOR BIT(30)
+
+/*
+accesser of recv_priv: rtw_recv_entry(dispatch / passive level); recv_thread(passive) ; returnpkt(dispatch)
+; halt(passive) ;
+
+using enter_critical section to protect
+*/
+struct recv_priv {
+ _lock lock;
+ struct __queue free_recv_queue;
+ struct __queue recv_pending_queue;
+ struct __queue uc_swdec_pending_queue;
+ u8 *pallocated_frame_buf;
+ u8 *precv_frame_buf;
+ uint free_recvframe_cnt;
+ struct adapter *adapter;
+ u32 bIsAnyNonBEPkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drop;
+ uint rx_icv_err;
+ uint rx_largepacket_crcerr;
+ uint rx_smallpacket_crcerr;
+ uint rx_middlepacket_crcerr;
+
+ struct tasklet_struct irq_prepare_beacon_tasklet;
+ struct tasklet_struct recv_tasklet;
+ struct sk_buff_head free_recv_skb_queue;
+ struct sk_buff_head rx_skb_queue;
+#ifdef CONFIG_RX_INDICATE_QUEUE
+ struct task rx_indicate_tasklet;
+ struct ifqueue rx_indicate_queue;
+#endif /* CONFIG_RX_INDICATE_QUEUE */
+
+ u8 *pallocated_recv_buf;
+ u8 *precv_buf; /* 4 alignment */
+ struct __queue free_recv_buf_queue;
+ u32 free_recv_buf_queue_cnt;
+
+ struct __queue recv_buf_pending_queue;
+
+ /* For display the phy informatiom */
+ u8 is_signal_dbg; /* for debug */
+ u8 signal_strength_dbg; /* for debug */
+
+ u8 signal_strength;
+ u8 signal_qual;
+ s8 rssi; /* translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength); */
+ #ifdef DBG_RX_SIGNAL_DISPLAY_RAW_DATA
+ struct rx_raw_rssi raw_rssi_info;
+ #endif
+ /* s8 rxpwdb; */
+ s16 noise;
+ /* int RxSNRdB[2]; */
+ /* s8 RxRssi[2]; */
+ /* int FalseAlmCnt_all; */
+
+
+ _timer signal_stat_timer;
+ u32 signal_stat_sampling_interval;
+ /* u32 signal_stat_converging_constant; */
+ struct signal_stat signal_qual_data;
+ struct signal_stat signal_strength_data;
+};
+
+#define rtw_set_signal_stat_timer(recvpriv) _set_timer(&(recvpriv)->signal_stat_timer, (recvpriv)->signal_stat_sampling_interval)
+
+struct sta_recv_priv {
+
+ _lock lock;
+ sint option;
+
+ /* struct __queue blk_strms[MAX_RX_NUMBLKS]; */
+ struct __queue defrag_q; /* keeping the fragment frame until defrag */
+
+ struct stainfo_rxcache rxcache;
+
+ /* uint sta_rx_bytes; */
+ /* uint sta_rx_pkts; */
+ /* uint sta_rx_fail; */
+
+};
+
+
+struct recv_buf
+{
+ struct list_head list;
+
+ _lock recvbuf_lock;
+
+ u32 ref_cnt;
+
+ struct adapter * adapter;
+
+ u8 *pbuf;
+ u8 *pallocated_buf;
+
+ u32 len;
+ u8 *phead;
+ u8 *pdata;
+ u8 *ptail;
+ u8 *pend;
+
+ _pkt *pskb;
+ u8 reuse;
+};
+
+
+/*
+ head ----->
+
+ data ----->
+
+ payload
+
+ tail ----->
+
+
+ end ----->
+
+ len = (unsigned int)(tail - data);
+
+*/
+struct recv_frame_hdr
+{
+ struct list_head list;
+#ifndef CONFIG_BSD_RX_USE_MBUF
+ struct sk_buff *pkt;
+ struct sk_buff *pkt_newalloc;
+#else /* CONFIG_BSD_RX_USE_MBUF */
+ _pkt *pkt;
+ _pkt *pkt_newalloc;
+#endif /* CONFIG_BSD_RX_USE_MBUF */
+
+ struct adapter *adapter;
+
+ u8 fragcnt;
+
+ int frame_tag;
+
+ struct rx_pkt_attrib attrib;
+
+ uint len;
+ u8 *rx_head;
+ u8 *rx_data;
+ u8 *rx_tail;
+ u8 *rx_end;
+
+ void *precvbuf;
+
+
+ /* */
+ struct sta_info *psta;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl *preorder_ctrl;
+};
+
+
+union recv_frame{
+ union{
+ struct list_head list;
+ struct recv_frame_hdr hdr;
+ uint mem[RECVFRAME_HDR_ALIGN>>2];
+ }u;
+
+ /* uint mem[MAX_RXSZ>>2]; */
+
+};
+
+enum RX_PACKET_TYPE {
+ NORMAL_RX,/* Normal rx packet */
+ TX_REPORT1,/* CCX */
+ TX_REPORT2,/* TX RPT */
+ HIS_REPORT,/* USB HISR RPT */
+ C2H_PACKET
+};
+
+extern union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
+extern union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue); /* get a free recv_frame from pfree_recv_queue */
+extern int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue);
+
+#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
+extern int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+extern int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+
+extern void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue);
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
+
+sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue);
+sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
+struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext);
+
+__inline static u8 *get_rxmem(union recv_frame *precvframe)
+{
+ /* always return rx_head... */
+ if (precvframe == NULL)
+ return NULL;
+
+ return precvframe->u.hdr.rx_head;
+}
+
+__inline static u8 *get_recvframe_data(union recv_frame *precvframe)
+{
+
+ /* alwasy return rx_data */
+ if (precvframe == NULL)
+ return NULL;
+
+ return precvframe->u.hdr.rx_data;
+
+}
+
+__inline static u8 *recvframe_pull(union recv_frame *precvframe, sint sz)
+{
+ /* rx_data += sz; move rx_data sz bytes hereafter */
+
+ /* used for extract sz bytes from rx_data, update rx_data and return the updated rx_data to the caller */
+
+
+ if (precvframe == NULL)
+ return NULL;
+
+
+ precvframe->u.hdr.rx_data += sz;
+
+ if (precvframe->u.hdr.rx_data > precvframe->u.hdr.rx_tail)
+ {
+ precvframe->u.hdr.rx_data -= sz;
+ return NULL;
+ }
+
+ precvframe->u.hdr.len -=sz;
+
+ return precvframe->u.hdr.rx_data;
+
+}
+
+__inline static u8 *recvframe_put(union recv_frame *precvframe, sint sz)
+{
+ /* rx_tai += sz; move rx_tail sz bytes hereafter */
+
+ /* used for append sz bytes from ptr to rx_tail, update rx_tail and return the updated rx_tail to the caller */
+ /* after putting, rx_tail must be still larger than rx_end. */
+ unsigned char * prev_rx_tail;
+
+ if (precvframe == NULL)
+ return NULL;
+
+ prev_rx_tail = precvframe->u.hdr.rx_tail;
+
+ precvframe->u.hdr.rx_tail += sz;
+
+ if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end)
+ {
+ precvframe->u.hdr.rx_tail = prev_rx_tail;
+ return NULL;
+ }
+
+ precvframe->u.hdr.len +=sz;
+
+ return precvframe->u.hdr.rx_tail;
+
+}
+
+
+
+__inline static u8 *recvframe_pull_tail(union recv_frame *precvframe, sint sz)
+{
+ /* rmv data from rx_tail (by yitsen) */
+
+ /* used for extract sz bytes from rx_end, update rx_end and return the updated rx_end to the caller */
+ /* after pulling, rx_end must be still larger than rx_data. */
+
+ if (precvframe == NULL)
+ return NULL;
+
+ precvframe->u.hdr.rx_tail -= sz;
+
+ if (precvframe->u.hdr.rx_tail < precvframe->u.hdr.rx_data)
+ {
+ precvframe->u.hdr.rx_tail += sz;
+ return NULL;
+ }
+
+ precvframe->u.hdr.len -=sz;
+
+ return precvframe->u.hdr.rx_tail;
+
+}
+
+__inline static union recv_frame *rxmem_to_recvframe(u8 *rxmem)
+{
+ /* due to the design of 2048 bytes alignment of recv_frame, we can reference the union recv_frame */
+ /* from any given member of recv_frame. */
+ /* rxmem indicates the any member/address in recv_frame */
+
+ return (union recv_frame*)(((SIZE_PTR)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
+
+}
+
+__inline static sint get_recvframe_len(union recv_frame *precvframe)
+{
+ return precvframe->u.hdr.len;
+}
+
+
+__inline static s32 translate_percentage_to_dbm(u32 SignalStrengthIndex)
+{
+ s32 SignalPower; /* in dBm. */
+
+#ifdef CONFIG_SKIP_SIGNAL_SCALE_MAPPING
+ /* Translate to dBm (x =y-100) */
+ SignalPower = SignalStrengthIndex - 100;
+#else
+ /* Translate to dBm (x = 0.5y-95). */
+ SignalPower = (s32)((SignalStrengthIndex + 1) >> 1);
+ SignalPower -= 95;
+#endif
+
+ return SignalPower;
+}
+
+
+struct sta_info;
+
+extern void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
+
+extern void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_rf.h b/drivers/staging/rtl8723bs/include/rtw_rf.h
new file mode 100644
index 000000000000..f9becab7f7ad
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_rf.h
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_RF_H_
+#define __RTW_RF_H_
+
+
+#define OFDM_PHY 1
+#define MIXED_PHY 2
+#define CCK_PHY 3
+
+#define NumRates 13
+
+/* slot time for 11g */
+#define SHORT_SLOT_TIME 9
+#define NON_SHORT_SLOT_TIME 20
+
+#define RTL8711_RF_MAX_SENS 6
+#define RTL8711_RF_DEF_SENS 4
+
+/* */
+/* We now define the following channels as the max channels in each channel plan. */
+/* 2G, total 14 chnls */
+/* {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} */
+/* 5G, total 24 chnls */
+/* {36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120,
+ * 124, 128, 132, 136, 140, 149, 153, 157, 161, 165} */
+#define MAX_CHANNEL_NUM_2G 14
+#define MAX_CHANNEL_NUM_5G 24
+#define MAX_CHANNEL_NUM 38/* 14+24 */
+
+#define NUM_REGULATORYS 1
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+struct regulatory_class {
+ u32 starting_freq; /* MHz, */
+ u8 channel_set[MAX_CHANNEL_NUM];
+ u8 channel_cck_power[MAX_CHANNEL_NUM];/* dbm */
+ u8 channel_ofdm_power[MAX_CHANNEL_NUM];/* dbm */
+ u8 txpower_limit; /* dbm */
+ u8 channel_spacing; /* MHz */
+ u8 modem;
+};
+
+enum CAPABILITY {
+ cESS = 0x0001,
+ cIBSS = 0x0002,
+ cPollable = 0x0004,
+ cPollReq = 0x0008,
+ cPrivacy = 0x0010,
+ cShortPreamble = 0x0020,
+ cPBCC = 0x0040,
+ cChannelAgility = 0x0080,
+ cSpectrumMgnt = 0x0100,
+ cQos = 0x0200, /* For HCCA, use with CF-Pollable and CF-PollReq */
+ cShortSlotTime = 0x0400,
+ cAPSD = 0x0800,
+ cRM = 0x1000, /* RRM (Radio Request Measurement) */
+ cDSSS_OFDM = 0x2000,
+ cDelayedBA = 0x4000,
+ cImmediateBA = 0x8000,
+};
+
+enum _REG_PREAMBLE_MODE {
+ PREAMBLE_LONG = 1,
+ PREAMBLE_AUTO = 2,
+ PREAMBLE_SHORT = 3,
+};
+
+enum _RTL8712_RF_MIMO_CONFIG_ {
+ RTL8712_RFCONFIG_1T = 0x10,
+ RTL8712_RFCONFIG_2T = 0x20,
+ RTL8712_RFCONFIG_1R = 0x01,
+ RTL8712_RFCONFIG_2R = 0x02,
+ RTL8712_RFCONFIG_1T1R = 0x11,
+ RTL8712_RFCONFIG_1T2R = 0x12,
+ RTL8712_RFCONFIG_TURBO = 0x92,
+ RTL8712_RFCONFIG_2T2R = 0x22
+};
+
+enum RF90_RADIO_PATH {
+ RF90_PATH_A = 0, /* Radio Path A */
+ RF90_PATH_B = 1, /* Radio Path B */
+ RF90_PATH_C = 2, /* Radio Path C */
+ RF90_PATH_D = 3 /* Radio Path D */
+};
+
+/* Bandwidth Offset */
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+/* Represent Channel Width in HT Capabilities */
+enum CHANNEL_WIDTH {
+ CHANNEL_WIDTH_20 = 0,
+ CHANNEL_WIDTH_40 = 1,
+ CHANNEL_WIDTH_80 = 2,
+ CHANNEL_WIDTH_160 = 3,
+ CHANNEL_WIDTH_80_80 = 4,
+ CHANNEL_WIDTH_MAX = 5,
+};
+
+/* Represent Extension Channel Offset in HT Capabilities */
+/* This is available only in 40Mhz mode. */
+enum EXTCHNL_OFFSET {
+ EXTCHNL_OFFSET_NO_EXT = 0,
+ EXTCHNL_OFFSET_UPPER = 1,
+ EXTCHNL_OFFSET_NO_DEF = 2,
+ EXTCHNL_OFFSET_LOWER = 3,
+};
+
+enum VHT_DATA_SC {
+ VHT_DATA_SC_DONOT_CARE = 0,
+ VHT_DATA_SC_20_UPPER_OF_80MHZ = 1,
+ VHT_DATA_SC_20_LOWER_OF_80MHZ = 2,
+ VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3,
+ VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4,
+ VHT_DATA_SC_20_RECV1 = 5,
+ VHT_DATA_SC_20_RECV2 = 6,
+ VHT_DATA_SC_20_RECV3 = 7,
+ VHT_DATA_SC_20_RECV4 = 8,
+ VHT_DATA_SC_40_UPPER_OF_80MHZ = 9,
+ VHT_DATA_SC_40_LOWER_OF_80MHZ = 10,
+};
+
+enum PROTECTION_MODE {
+ PROTECTION_MODE_AUTO = 0,
+ PROTECTION_MODE_FORCE_ENABLE = 1,
+ PROTECTION_MODE_FORCE_DISABLE = 2,
+};
+
+/* 2007/11/15 MH Define different RF type. */
+enum RT_RF_TYPE_DEFINITION {
+ RF_1T2R = 0,
+ RF_2T4R = 1,
+ RF_2T2R = 2,
+ RF_1T1R = 3,
+ RF_2T2R_GREEN = 4,
+ RF_MAX_TYPE = 5,
+};
+
+u32 rtw_ch2freq(u32 ch);
+
+#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
new file mode 100644
index 000000000000..d5af72b2335d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_security.h
@@ -0,0 +1,440 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_SECURITY_H_
+#define __RTW_SECURITY_H_
+
+
+#define _NO_PRIVACY_ 0x0
+#define _WEP40_ 0x1
+#define _TKIP_ 0x2
+#define _TKIP_WTMIC_ 0x3
+#define _AES_ 0x4
+#define _WEP104_ 0x5
+#define _WEP_WPA_MIXED_ 0x07 /* WEP + WPA */
+#define _SMS4_ 0x06
+#define _BIP_ 0x8
+#define is_wep_enc(alg) (((alg) == _WEP40_) || ((alg) == _WEP104_))
+
+const char *security_type_str(u8 value);
+
+#define _WPA_IE_ID_ 0xdd
+#define _WPA2_IE_ID_ 0x30
+
+#define SHA256_MAC_LEN 32
+#define AES_BLOCK_SIZE 16
+#define AES_PRIV_SIZE (4 * 44)
+
+#define RTW_KEK_LEN 16
+#define RTW_KCK_LEN 16
+#define RTW_REPLAY_CTR_LEN 8
+
+enum {
+ ENCRYP_PROTOCOL_OPENSYS, /* open system */
+ ENCRYP_PROTOCOL_WEP, /* WEP */
+ ENCRYP_PROTOCOL_WPA, /* WPA */
+ ENCRYP_PROTOCOL_WPA2, /* WPA2 */
+ ENCRYP_PROTOCOL_WAPI, /* WAPI: Not support in this version */
+ ENCRYP_PROTOCOL_MAX
+};
+
+
+#ifndef Ndis802_11AuthModeWPA2
+#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
+#endif
+
+#ifndef Ndis802_11AuthModeWPA2PSK
+#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
+#endif
+
+union pn48 {
+
+ u64 val;
+
+#ifdef __LITTLE_ENDIAN
+
+struct {
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
+} _byte_;
+#else
+struct {
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
+} _byte_;
+#endif
+
+};
+
+union Keytype {
+ u8 skey[16];
+ u32 lkey[4];
+};
+
+
+typedef struct _RT_PMKID_LIST
+{
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8* ssid_octet;
+ u16 ssid_length;
+} RT_PMKID_LIST, *PRT_PMKID_LIST;
+
+
+struct security_priv
+{
+ u32 dot11AuthAlgrthm; /* 802.11 auth, could be open, shared, 8021x and authswitch */
+ u32 dot11PrivacyAlgrthm; /* This specify the privacy for shared auth. algorithm. */
+
+ /* WEP */
+ u32 dot11PrivacyKeyIndex; /* this is only valid for legendary wep, 0~3 for key id. (tx key index) */
+ union Keytype dot11DefKey[4]; /* this is only valid for def. key */
+ u32 dot11DefKeylen[4];
+ u8 key_mask; /* use to restore wep key after hal_init */
+
+ u32 dot118021XGrpPrivacy; /* This specify the privacy algthm. used for Grp key */
+ u32 dot118021XGrpKeyid; /* key id used for Grp Key (tx key index) */
+ union Keytype dot118021XGrpKey[BIP_MAX_KEYID]; /* 802.1x Group Key, for inx0 and inx1 */
+ union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID];
+ union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID];
+ union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit. */
+ union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv. */
+ u32 dot11wBIPKeyid; /* key id used for BIP Key (tx key index) */
+ union Keytype dot11wBIPKey[6]; /* BIP Key, for index4 and index5 */
+ union pn48 dot11wBIPtxpn; /* PN48 used for Grp Key xmit. */
+ union pn48 dot11wBIPrxpn; /* PN48 used for Grp Key recv. */
+
+ /* extend security capabilities for AP_MODE */
+ unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ unsigned int wpa_group_cipher;
+ unsigned int wpa2_group_cipher;
+ unsigned int wpa_pairwise_cipher;
+ unsigned int wpa2_pairwise_cipher;
+
+ u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
+ int wps_ie_len;
+
+
+ u8 binstallGrpkey;
+#ifdef CONFIG_GTK_OL
+ u8 binstallKCK_KEK;
+#endif /* CONFIG_GTK_OL */
+ u8 binstallBIPkey;
+ u8 busetkipkey;
+ /* _timer tkip_timer; */
+ u8 bcheck_grpkey;
+ u8 bgrpkey_handshake;
+
+ s32 sw_encrypt;/* from registry_priv */
+ s32 sw_decrypt;/* from registry_priv */
+
+ s32 hw_decrypted;/* if the rx packets is hw_decrypted ==false, it means the hw has not been ready. */
+
+
+ /* keeps the auth_type & enc_status from upper layer ioctl(wpa_supplicant or wzc) */
+ u32 ndisauthtype; /* enum NDIS_802_11_AUTHENTICATION_MODE */
+ u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
+
+ struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
+
+ struct ndis_802_11_wep ndiswep;
+
+ u8 assoc_info[600];
+ u8 szofcapability[256]; /* for wpa2 usage */
+ u8 oidassociation[512]; /* for wpa/wpa2 usage */
+ u8 authenticator_ie[256]; /* store ap security information element */
+ u8 supplicant_ie[256]; /* store sta security information element */
+
+
+ /* for tkip countermeasure */
+ unsigned long last_mic_err_time;
+ u8 btkip_countermeasure;
+ u8 btkip_wait_report;
+ u32 btkip_countermeasure_time;
+
+ /* For WPA2 Pre-Authentication. */
+ RT_PMKID_LIST PMKIDList[NUM_PMKID_CACHE]; /* Renamed from PreAuthKey[NUM_PRE_AUTH_KEY]. Annie, 2006-10-13. */
+ u8 PMKIDIndex;
+
+ u8 bWepDefaultKeyIdxSet;
+
+#define DBG_SW_SEC_CNT
+#ifdef DBG_SW_SEC_CNT
+ u64 wep_sw_enc_cnt_bc;
+ u64 wep_sw_enc_cnt_mc;
+ u64 wep_sw_enc_cnt_uc;
+ u64 wep_sw_dec_cnt_bc;
+ u64 wep_sw_dec_cnt_mc;
+ u64 wep_sw_dec_cnt_uc;
+
+ u64 tkip_sw_enc_cnt_bc;
+ u64 tkip_sw_enc_cnt_mc;
+ u64 tkip_sw_enc_cnt_uc;
+ u64 tkip_sw_dec_cnt_bc;
+ u64 tkip_sw_dec_cnt_mc;
+ u64 tkip_sw_dec_cnt_uc;
+
+ u64 aes_sw_enc_cnt_bc;
+ u64 aes_sw_enc_cnt_mc;
+ u64 aes_sw_enc_cnt_uc;
+ u64 aes_sw_dec_cnt_bc;
+ u64 aes_sw_dec_cnt_mc;
+ u64 aes_sw_dec_cnt_uc;
+#endif /* DBG_SW_SEC_CNT */
+};
+
+struct sha256_state {
+ u64 length;
+ u32 state[8], curlen;
+ u8 buf[64];
+};
+
+#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst)\
+do{\
+ switch (psecuritypriv->dot11AuthAlgrthm)\
+ {\
+ case dot11AuthAlgrthm_Open:\
+ case dot11AuthAlgrthm_Shared:\
+ case dot11AuthAlgrthm_Auto:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
+ case dot11AuthAlgrthm_8021X:\
+ if (bmcst)\
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else\
+ encry_algo =(u8) psta->dot118021XPrivacy;\
+ break;\
+ case dot11AuthAlgrthm_WAPI:\
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm;\
+ break;\
+ }\
+}while (0)
+
+#define _AES_IV_LEN_ 8
+
+#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt)\
+do{\
+ switch (encrypt)\
+ {\
+ case _WEP40_:\
+ case _WEP104_:\
+ iv_len = 4;\
+ icv_len = 4;\
+ break;\
+ case _TKIP_:\
+ iv_len = 8;\
+ icv_len = 4;\
+ break;\
+ case _AES_:\
+ iv_len = 8;\
+ icv_len = 8;\
+ break;\
+ case _SMS4_:\
+ iv_len = 18;\
+ icv_len = 16;\
+ break;\
+ default:\
+ iv_len = 0;\
+ icv_len = 0;\
+ break;\
+ }\
+}while (0)
+
+
+#define GET_TKIP_PN(iv, dot11txpn)\
+do{\
+ dot11txpn._byte_.TSC0 =iv[2];\
+ dot11txpn._byte_.TSC1 =iv[0];\
+ dot11txpn._byte_.TSC2 =iv[4];\
+ dot11txpn._byte_.TSC3 =iv[5];\
+ dot11txpn._byte_.TSC4 =iv[6];\
+ dot11txpn._byte_.TSC5 =iv[7];\
+}while (0)
+
+
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
+
+struct mic_data
+{
+ u32 K0, K1; /* Key */
+ u32 L, R; /* Current state */
+ u32 M; /* Message accumulator (single word) */
+ u32 nBytesInM; /* # bytes in M */
+};
+
+extern const u32 Te0[256];
+extern const u32 Te1[256];
+extern const u32 Te2[256];
+extern const u32 Te3[256];
+extern const u32 Te4[256];
+extern const u32 Td0[256];
+extern const u32 Td1[256];
+extern const u32 Td2[256];
+extern const u32 Td3[256];
+extern const u32 Td4[256];
+extern const u32 rcon[10];
+extern const u8 Td4s[256];
+extern const u8 rcons[10];
+
+#define RCON(i) (rcons[(i)] << 24)
+
+static inline u32 rotr(u32 val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+#define TE0(i) Te0[((i) >> 24) & 0xff]
+#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
+#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
+#define TE3(i) rotr(Te0[(i) & 0xff], 24)
+#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
+#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
+#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
+#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
+#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
+#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
+#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
+#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
+#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
+
+#define TD0(i) Td0[((i) >> 24) & 0xff]
+#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
+#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
+#define TD3(i) rotr(Td0[(i) & 0xff], 24)
+#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
+#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
+#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
+#define TD44(i) (Td4s[(i) & 0xff])
+#define TD0_(i) Td0[(i) & 0xff]
+#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
+#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
+#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
+
+#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
+ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
+
+#define PUTU32(ct, st) { \
+(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
+(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
+
+#define WPA_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
+ (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8) (((u64) (val)) >> 56); \
+ (a)[1] = (u8) (((u64) (val)) >> 48); \
+ (a)[2] = (u8) (((u64) (val)) >> 40); \
+ (a)[3] = (u8) (((u64) (val)) >> 32); \
+ (a)[4] = (u8) (((u64) (val)) >> 24); \
+ (a)[5] = (u8) (((u64) (val)) >> 16); \
+ (a)[6] = (u8) (((u64) (val)) >> 8); \
+ (a)[7] = (u8) (((u64) (val)) & 0xff); \
+ } while (0)
+
+/* ===== start - public domain SHA256 implementation ===== */
+
+/* This is based on SHA256 implementation in LibTomCrypt that was released into
+ * public domain by Tom St Denis. */
+
+/* the K array */
+static const unsigned long K[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+ 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+ 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+ 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+ 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+ 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+ 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+ 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+ 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+ 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+
+/* Various logical functions */
+#define RORc(x, y) \
+(((((unsigned long) (x) & 0xFFFFFFFFUL) >> (unsigned long) ((y) & 31)) | \
+ ((unsigned long) (x) << (unsigned long) (32 - ((y) & 31)))) & 0xFFFFFFFFUL)
+#define Ch(x, y, z) (z ^ (x & (y ^ z)))
+#define Maj(x, y, z) (((x | y) & z) | (x & y))
+#define S(x, n) RORc((x), (n))
+#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
+#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+int omac1_aes_128(u8 *key, u8 *data, size_t data_len, u8 *mac);
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 * key);
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
+void rtw_secmicappend(struct mic_data *pmicdata, u8 * src, u32 nBytes);
+void rtw_secgetmic(struct mic_data *pmicdata, u8 * dst);
+
+void rtw_seccalctkipmic(
+ u8 * key,
+ u8 *header,
+ u8 *data,
+ u32 data_len,
+ u8 *Miccode,
+ u8 priority);
+
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe);
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe);
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe);
+
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe);
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe);
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe);
+u32 rtw_BIP_verify(struct adapter *padapter, u8 *precvframe);
+
+void rtw_sec_restore_wep_key(struct adapter *adapter);
+u8 rtw_handle_tkip_countermeasure(struct adapter * adapter, const char *caller);
+
+#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_version.h b/drivers/staging/rtl8723bs/include/rtw_version.h
new file mode 100644
index 000000000000..628d987c3e86
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_version.h
@@ -0,0 +1,2 @@
+#define DRIVERVERSION "v4.3.5.5_12290.20140916_BTCOEX20140507-4E40"
+#define BTCOEXVERSION "BTCOEX20140507-4E40"
diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
new file mode 100644
index 000000000000..d97ca1630bd4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ *****************************************************************************/
+
+#ifndef __RTW_WIFI_REGD_H__
+#define __RTW_WIFI_REGD_H__
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *iso_name;
+};
+
+enum country_code_type_t {
+ COUNTRY_CODE_USER = 0,
+
+ /*add new channel plan above this line */
+ COUNTRY_CODE_MAX
+};
+
+int rtw_regd_init(struct adapter *padapter,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
new file mode 100644
index 000000000000..11571649cd2c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -0,0 +1,528 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _RTW_XMIT_H_
+#define _RTW_XMIT_H_
+
+
+#define MAX_XMITBUF_SZ (20480) /* 20k */
+
+#define NR_XMITBUFF (16)
+
+#define XMITBUF_ALIGN_SZ 512
+
+/* xmit extension buff defination */
+#define MAX_XMIT_EXTBUF_SZ (1536)
+#define NR_XMIT_EXTBUFF (32)
+
+#define MAX_CMDBUF_SZ (5120) /* 4096) */
+
+#define MAX_NUMBLKS (1)
+
+#define XMIT_VO_QUEUE (0)
+#define XMIT_VI_QUEUE (1)
+#define XMIT_BE_QUEUE (2)
+#define XMIT_BK_QUEUE (3)
+
+#define VO_QUEUE_INX 0
+#define VI_QUEUE_INX 1
+#define BE_QUEUE_INX 2
+#define BK_QUEUE_INX 3
+#define BCN_QUEUE_INX 4
+#define MGT_QUEUE_INX 5
+#define HIGH_QUEUE_INX 6
+#define TXCMD_QUEUE_INX 7
+
+#define HW_QUEUE_ENTRY 8
+
+#define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
+do{\
+ pattrib_iv[0] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[1] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[3] = ((keyidx & 0x3)<<6);\
+ dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0: (dot11txpn.val+1);\
+}while (0)
+
+
+#define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
+do{\
+ pattrib_iv[0] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
+ pattrib_iv[4] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[5] = dot11txpn._byte_.TSC3;\
+ pattrib_iv[6] = dot11txpn._byte_.TSC4;\
+ pattrib_iv[7] = dot11txpn._byte_.TSC5;\
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
+}while (0)
+
+#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
+do{\
+ pattrib_iv[0] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[1] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[2] = 0;\
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
+ pattrib_iv[4] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[5] = dot11txpn._byte_.TSC3;\
+ pattrib_iv[6] = dot11txpn._byte_.TSC4;\
+ pattrib_iv[7] = dot11txpn._byte_.TSC5;\
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
+}while (0)
+
+
+#define HWXMIT_ENTRY 4
+
+/* For Buffer Descriptor ring architecture */
+#define TXDESC_SIZE 40
+
+#define TXDESC_OFFSET TXDESC_SIZE
+
+enum TXDESC_SC{
+ SC_DONT_CARE = 0x00,
+ SC_UPPER = 0x01,
+ SC_LOWER = 0x02,
+ SC_DUPLICATE = 0x03
+};
+
+#define TXDESC_40_BYTES
+
+struct tx_desc {
+ __le32 txdw0;
+ __le32 txdw1;
+ __le32 txdw2;
+ __le32 txdw3;
+ __le32 txdw4;
+ __le32 txdw5;
+ __le32 txdw6;
+ __le32 txdw7;
+
+#if defined(TXDESC_40_BYTES) || defined(TXDESC_64_BYTES)
+ __le32 txdw8;
+ __le32 txdw9;
+#endif /* TXDESC_40_BYTES */
+
+#ifdef TXDESC_64_BYTES
+ __le32 txdw10;
+ __le32 txdw11;
+
+ /* 2008/05/15 MH Because PCIE HW memory R/W 4K limit. And now, our descriptor */
+ /* size is 40 bytes. If you use more than 102 descriptor(103*40>4096), HW will execute */
+ /* memoryR/W CRC error. And then all DMA fetch will fail. We must decrease descriptor */
+ /* number or enlarge descriptor size as 64 bytes. */
+ __le32 txdw12;
+ __le32 txdw13;
+ __le32 txdw14;
+ __le32 txdw15;
+#endif
+};
+
+union txdesc {
+ struct tx_desc txdesc;
+ unsigned int value[TXDESC_SIZE>>2];
+};
+
+struct hw_xmit {
+ /* _lock xmit_lock; */
+ /* struct list_head pending; */
+ struct __queue *sta_queue;
+ /* struct hw_txqueue *phwtxqueue; */
+ /* sint txcmdcnt; */
+ int accnt;
+};
+
+/* reduce size */
+struct pkt_attrib
+{
+ u8 type;
+ u8 subtype;
+ u8 bswenc;
+ u8 dhcp_pkt;
+ u16 ether_type;
+ u16 seqnum;
+ u16 pkt_hdrlen; /* the original 802.3 pkt header len */
+ u16 hdrlen; /* the WLAN Header Len */
+ u32 pktlen; /* the original 802.3 pkt raw_data len (not include ether_hdr data) */
+ u32 last_txcmdsz;
+ u8 nr_frags;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero, indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 iv[18];
+ u8 icv[16];
+ u8 priority;
+ u8 ack_policy;
+ u8 mac_id;
+ u8 vcs_mode; /* virtual carrier sense method */
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 key_idx;
+ u8 qos_en;
+ u8 ht_en;
+ u8 raid;/* rate adpative id */
+ u8 bwmode;
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+ u8 ampdu_en;/* tx ampdu enable */
+ u8 ampdu_spacing; /* ampdu_min_spacing for peer sta's rx */
+ u8 mdata;/* more data bit */
+ u8 pctrl;/* per packet txdesc control enable */
+ u8 triggered;/* for ap mode handling Power Saving sta */
+ u8 qsel;
+ u8 order;/* order bit */
+ u8 eosp;
+ u8 rate;
+ u8 intel_proxim;
+ u8 retry_ctrl;
+ u8 mbssid;
+ u8 ldpc;
+ u8 stbc;
+ struct sta_info * psta;
+
+ u8 rtsen;
+ u8 cts2self;
+ union Keytype dot11tkiptxmickey;
+ /* union Keytype dot11tkiprxmickey; */
+ union Keytype dot118021x_UncstKey;
+
+ u8 icmp_pkt;
+
+};
+
+#define WLANHDR_OFFSET 64
+
+#define NULL_FRAMETAG (0x0)
+#define DATA_FRAMETAG 0x01
+#define L2_FRAMETAG 0x02
+#define MGNT_FRAMETAG 0x03
+#define AMSDU_FRAMETAG 0x04
+
+#define EII_FRAMETAG 0x05
+#define IEEE8023_FRAMETAG 0x06
+
+#define MP_FRAMETAG 0x07
+
+#define TXAGG_FRAMETAG 0x08
+
+enum {
+ XMITBUF_DATA = 0,
+ XMITBUF_MGNT = 1,
+ XMITBUF_CMD = 2,
+};
+
+struct submit_ctx{
+ unsigned long submit_time; /* */
+ u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
+ int status; /* status for operation */
+ struct completion done;
+};
+
+enum {
+ RTW_SCTX_SUBMITTED = -1,
+ RTW_SCTX_DONE_SUCCESS = 0,
+ RTW_SCTX_DONE_UNKNOWN,
+ RTW_SCTX_DONE_TIMEOUT,
+ RTW_SCTX_DONE_BUF_ALLOC,
+ RTW_SCTX_DONE_BUF_FREE,
+ RTW_SCTX_DONE_WRITE_PORT_ERR,
+ RTW_SCTX_DONE_TX_DESC_NA,
+ RTW_SCTX_DONE_TX_DENY,
+ RTW_SCTX_DONE_CCX_PKT_FAIL,
+ RTW_SCTX_DONE_DRV_STOP,
+ RTW_SCTX_DONE_DEV_REMOVE,
+ RTW_SCTX_DONE_CMD_ERROR,
+};
+
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
+int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg);
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
+void rtw_sctx_done(struct submit_ctx **sctx);
+
+struct xmit_buf
+{
+ struct list_head list;
+
+ struct adapter *padapter;
+
+ u8 *pallocated_buf;
+
+ u8 *pbuf;
+
+ void *priv_data;
+
+ u16 buf_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf, 2:cmd xmitbuf */
+ u16 flags;
+ u32 alloc_sz;
+
+ u32 len;
+
+ struct submit_ctx *sctx;
+
+ u8 *phead;
+ u8 *pdata;
+ u8 *ptail;
+ u8 *pend;
+ u32 ff_hwaddr;
+ u8 pg_num;
+ u8 agg_num;
+
+#if defined(DBG_XMIT_BUF)|| defined(DBG_XMIT_BUF_EXT)
+ u8 no;
+#endif
+
+};
+
+
+struct xmit_frame
+{
+ struct list_head list;
+
+ struct pkt_attrib attrib;
+
+ _pkt *pkt;
+
+ int frame_tag;
+
+ struct adapter *padapter;
+
+ u8 *buf_addr;
+
+ struct xmit_buf *pxmitbuf;
+
+ u8 pg_num;
+ u8 agg_num;
+
+ u8 ack_report;
+
+ u8 *alloc_addr; /* the actual address this xmitframe allocated */
+ u8 ext_tag; /* 0:data, 1:mgmt */
+
+};
+
+struct tx_servq {
+ struct list_head tx_pending;
+ struct __queue sta_pending;
+ int qcnt;
+};
+
+
+struct sta_xmit_priv
+{
+ _lock lock;
+ sint option;
+ sint apsd_setting; /* When bit mask is on, the associated edca queue supports APSD. */
+
+
+ /* struct tx_servq blk_q[MAX_NUMBLKS]; */
+ struct tx_servq be_q; /* priority == 0, 3 */
+ struct tx_servq bk_q; /* priority == 1, 2 */
+ struct tx_servq vi_q; /* priority == 4, 5 */
+ struct tx_servq vo_q; /* priority == 6, 7 */
+ struct list_head legacy_dz;
+ struct list_head apsd;
+
+ u16 txseq_tid[16];
+
+ /* uint sta_tx_bytes; */
+ /* u64 sta_tx_pkts; */
+ /* uint sta_tx_fail; */
+
+
+};
+
+
+struct hw_txqueue {
+ volatile sint head;
+ volatile sint tail;
+ volatile sint free_sz; /* in units of 64 bytes */
+ volatile sint free_cmdsz;
+ volatile sint txsz[8];
+ uint ff_hwaddr;
+ uint cmd_hwaddr;
+ sint ac_tag;
+};
+
+struct agg_pkt_info{
+ u16 offset;
+ u16 pkt_len;
+};
+
+enum cmdbuf_type {
+ CMDBUF_BEACON = 0x00,
+ CMDBUF_RSVD,
+ CMDBUF_MAX
+};
+
+struct xmit_priv {
+
+ _lock lock;
+
+ _sema xmit_sema;
+ _sema terminate_xmitthread_sema;
+
+ /* struct __queue blk_strms[MAX_NUMBLKS]; */
+ struct __queue be_pending;
+ struct __queue bk_pending;
+ struct __queue vi_pending;
+ struct __queue vo_pending;
+ struct __queue bm_pending;
+
+ /* struct __queue legacy_dz_queue; */
+ /* struct __queue apsd_queue; */
+
+ u8 *pallocated_frame_buf;
+ u8 *pxmit_frame_buf;
+ uint free_xmitframe_cnt;
+ struct __queue free_xmit_queue;
+
+ /* uint mapping_addr; */
+ /* uint pkt_sz; */
+
+ u8 *xframe_ext_alloc_addr;
+ u8 *xframe_ext;
+ uint free_xframe_ext_cnt;
+ struct __queue free_xframe_ext_queue;
+
+ /* struct hw_txqueue be_txqueue; */
+ /* struct hw_txqueue bk_txqueue; */
+ /* struct hw_txqueue vi_txqueue; */
+ /* struct hw_txqueue vo_txqueue; */
+ /* struct hw_txqueue bmc_txqueue; */
+
+ uint frag_len;
+
+ struct adapter *adapter;
+
+ u8 vcs_setting;
+ u8 vcs;
+ u8 vcs_type;
+ /* u16 rts_thresh; */
+
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_drop;
+ u64 last_tx_pkts;
+
+ struct hw_xmit *hwxmits;
+ u8 hwxmit_entry;
+
+ u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from large to small. it's value is 0->vo, 1->vi, 2->be, 3->bk. */
+
+#ifdef CONFIG_SDIO_TX_TASKLET
+ struct tasklet_struct xmit_tasklet;
+#else
+ void *SdioXmitThread;
+ _sema SdioXmitSema;
+ _sema SdioXmitTerminateSema;
+#endif /* CONFIG_SDIO_TX_TASKLET */
+
+ struct __queue free_xmitbuf_queue;
+ struct __queue pending_xmitbuf_queue;
+ u8 *pallocated_xmitbuf;
+ u8 *pxmitbuf;
+ uint free_xmitbuf_cnt;
+
+ struct __queue free_xmit_extbuf_queue;
+ u8 *pallocated_xmit_extbuf;
+ u8 *pxmit_extbuf;
+ uint free_xmit_extbuf_cnt;
+
+ struct xmit_buf pcmd_xmitbuf[CMDBUF_MAX];
+
+ u16 nqos_ssn;
+
+ int ack_tx;
+ _mutex ack_tx_mutex;
+ struct submit_ctx ack_tx_ops;
+ u8 seq_no;
+ _lock lock_sctx;
+};
+
+extern struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
+ enum cmdbuf_type buf_type);
+#define rtw_alloc_cmdxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_RSVD)
+#define rtw_alloc_bcnxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_BEACON)
+
+extern struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
+extern s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+
+extern struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
+extern s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+
+void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz);
+extern void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
+extern s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib);
+extern s32 rtw_put_snap(u8 *data, u16 h_proto);
+
+extern struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
+struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv);
+struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv);
+extern s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe);
+extern void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue);
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, sint up, u8 *ac);
+extern s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe);
+
+extern s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe);
+extern u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
+#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
+extern s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
+extern s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
+s32 _rtw_init_hw_txqueue(struct hw_txqueue* phw_txqueue, u8 ac_tag);
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
+
+
+s32 rtw_txframes_pending(struct adapter *padapter);
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry);
+
+
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
+void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
+
+
+void rtw_alloc_hwxmits(struct adapter *padapter);
+void rtw_free_hwxmits(struct adapter *padapter);
+
+
+s32 rtw_xmit(struct adapter *padapter, _pkt **pkt);
+bool xmitframe_hiq_filter(struct xmit_frame *xmitframe);
+
+sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
+
+u8 query_ra_short_GI(struct sta_info *psta);
+
+u8 qos_acm(u8 acm_mask, u8 priority);
+
+void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+void enqueue_pending_xmitbuf_to_head(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
+struct xmit_buf*dequeue_pending_xmitbuf(struct xmit_priv *pxmitpriv);
+struct xmit_buf*dequeue_pending_xmitbuf_under_survey(struct xmit_priv *pxmitpriv);
+sint check_pending_xmitbuf(struct xmit_priv *pxmitpriv);
+int rtw_xmit_thread(void *context);
+
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
+
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
+
+/* include after declaring struct xmit_buf, in order to avoid warning */
+#include <xmit_osdep.h>
+
+#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/rtl8723bs/include/sdio_hal.h b/drivers/staging/rtl8723bs/include/sdio_hal.h
new file mode 100644
index 000000000000..8fd8bbeda2d8
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/sdio_hal.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __SDIO_HAL_H__
+#define __SDIO_HAL_H__
+
+
+extern u8 sd_hal_bus_init(struct adapter *padapter);
+extern u8 sd_hal_bus_deinit(struct adapter *padapter);
+
+u8 sd_int_isr(struct adapter *padapter);
+void sd_int_dpc(struct adapter *padapter);
+void rtw_set_hal_ops(struct adapter *padapter);
+
+void rtl8723bs_set_hal_ops(struct adapter *padapter);
+
+#endif /* __SDIO_HAL_H__ */
diff --git a/drivers/staging/rtl8723bs/include/sdio_ops.h b/drivers/staging/rtl8723bs/include/sdio_ops.h
new file mode 100644
index 000000000000..8fffc652cf0c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/sdio_ops.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __SDIO_OPS_H__
+#define __SDIO_OPS_H__
+
+
+#include <sdio_ops_linux.h>
+
+extern void sdio_set_intf_ops(struct adapter *padapter, struct _io_ops *pops);
+
+/* extern void sdio_func1cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem); */
+/* extern void sdio_func1cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem); */
+extern u8 SdioLocalCmd52Read1Byte(struct adapter *padapter, u32 addr);
+extern void SdioLocalCmd52Write1Byte(struct adapter *padapter, u32 addr, u8 v);
+extern s32 sdio_local_read(struct adapter *padapter, u32 addr, u32 cnt, u8 *pbuf);
+extern s32 sdio_local_write(struct adapter *padapter, u32 addr, u32 cnt, u8 *pbuf);
+
+u32 _sdio_read32(struct adapter *padapter, u32 addr);
+s32 _sdio_write32(struct adapter *padapter, u32 addr, u32 val);
+
+extern void sd_int_hdl(struct adapter *padapter);
+extern u8 CheckIPSStatus(struct adapter *padapter);
+
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+extern u8 RecvOnePkt(struct adapter *padapter, u32 size);
+#endif /* CONFIG_WOWLAN */
+extern void InitInterrupt8723BSdio(struct adapter *padapter);
+extern void InitSysInterrupt8723BSdio(struct adapter *padapter);
+extern void EnableInterrupt8723BSdio(struct adapter *padapter);
+extern void DisableInterrupt8723BSdio(struct adapter *padapter);
+extern u8 HalQueryTxBufferStatus8723BSdio(struct adapter *padapter);
+extern u8 HalQueryTxOQTBufferStatus8723BSdio(struct adapter *padapter);
+#if defined(CONFIG_WOWLAN) || defined(CONFIG_AP_WOWLAN)
+extern void ClearInterrupt8723BSdio(struct adapter *padapter);
+#endif /* CONFIG_WOWLAN */
+
+#endif /* !__SDIO_OPS_H__ */
diff --git a/drivers/staging/rtl8723bs/include/sdio_ops_linux.h b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
new file mode 100644
index 000000000000..bd62caec409f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/sdio_ops_linux.h
@@ -0,0 +1,40 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __SDIO_OPS_LINUX_H__
+#define __SDIO_OPS_LINUX_H__
+
+#define SDIO_ERR_VAL8 0xEA
+#define SDIO_ERR_VAL16 0xEAEA
+#define SDIO_ERR_VAL32 0xEAEAEAEA
+
+u8 sd_f0_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err);
+
+s32 _sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
+s32 _sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
+s32 sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
+s32 sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata);
+
+u8 sd_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err);
+u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err);
+s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata);
+s32 sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata);
+void sd_write8(struct intf_hdl *pintfhdl, u32 addr, u8 v, s32 *err);
+void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err);
+s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata);
+s32 sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata);
+
+
+void rtw_sdio_set_irq_thd(struct dvobj_priv *dvobj, void *thd_hdl);
+#endif
diff --git a/drivers/staging/rtl8723bs/include/sdio_osintf.h b/drivers/staging/rtl8723bs/include/sdio_osintf.h
new file mode 100644
index 000000000000..86673682fb4f
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/sdio_osintf.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __SDIO_OSINTF_H__
+#define __SDIO_OSINTF_H__
+
+
+
+u8 sd_hal_bus_init(struct adapter *padapter);
+u8 sd_hal_bus_deinit(struct adapter *padapter);
+void sd_c2h_hdl(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8723bs/include/sta_info.h b/drivers/staging/rtl8723bs/include/sta_info.h
new file mode 100644
index 000000000000..84fa116fc5da
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/sta_info.h
@@ -0,0 +1,392 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __STA_INFO_H_
+#define __STA_INFO_H_
+
+
+#define IBSS_START_MAC_ID 2
+#define NUM_STA 32
+#define NUM_ACL 16
+
+
+/* if mode == 0, then the sta is allowed once the addr is hit. */
+/* if mode == 1, then the sta is rejected once the addr is non-hit. */
+struct rtw_wlan_acl_node {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 valid;
+};
+
+/* mode = 0, disable */
+/* mode = 1, accept unless in deny list */
+/* mode =2, deny unless in accept list */
+struct wlan_acl_pool {
+ int mode;
+ int num;
+ struct rtw_wlan_acl_node aclnode[NUM_ACL];
+ struct __queue acl_node_q;
+};
+
+typedef struct _RSSI_STA{
+ s32 UndecoratedSmoothedPWDB;
+ s32 UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM;
+ u64 PacketMap;
+ u8 ValidBit;
+}RSSI_STA, *PRSSI_STA;
+
+struct stainfo_stats {
+
+ u64 rx_mgnt_pkts;
+ u64 rx_beacon_pkts;
+ u64 rx_probereq_pkts;
+ u64 rx_probersp_pkts;
+ u64 rx_probersp_bm_pkts;
+ u64 rx_probersp_uo_pkts;
+ u64 rx_ctrl_pkts;
+ u64 rx_data_pkts;
+
+ u64 last_rx_mgnt_pkts;
+ u64 last_rx_beacon_pkts;
+ u64 last_rx_probereq_pkts;
+ u64 last_rx_probersp_pkts;
+ u64 last_rx_probersp_bm_pkts;
+ u64 last_rx_probersp_uo_pkts;
+ u64 last_rx_ctrl_pkts;
+ u64 last_rx_data_pkts;
+
+ u64 rx_bytes;
+ u64 rx_drops;
+
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
+struct sta_info {
+
+ _lock lock;
+ struct list_head list; /* free_sta_queue */
+ struct list_head hash_list; /* sta_hash */
+ struct adapter *padapter;
+
+ struct sta_xmit_priv sta_xmitpriv;
+ struct sta_recv_priv sta_recvpriv;
+
+ struct __queue sleep_q;
+ unsigned int sleepq_len;
+
+ uint state;
+ uint aid;
+ uint mac_id;
+ uint qos_option;
+ u8 hwaddr[ETH_ALEN];
+
+ uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
+ uint dot118021XPrivacy; /* aes, tkip... */
+ union Keytype dot11tkiptxmickey;
+ union Keytype dot11tkiprxmickey;
+ union Keytype dot118021x_UncstKey;
+ union pn48 dot11txpn; /* PN48 used for Unicast xmit */
+#ifdef CONFIG_GTK_OL
+ u8 kek[RTW_KEK_LEN];
+ u8 kck[RTW_KCK_LEN];
+ u8 replay_ctr[RTW_REPLAY_CTR_LEN];
+#endif /* CONFIG_GTK_OL */
+ union pn48 dot11wtxpn; /* PN48 used for Unicast mgmt xmit. */
+ union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
+
+
+ u8 bssrateset[16];
+ u32 bssratelen;
+ s32 rssi;
+ s32 signal_quality;
+
+ u8 cts2self;
+ u8 rtsen;
+
+ u8 raid;
+ u8 init_rate;
+ u32 ra_mask;
+ u8 wireless_mode; /* NETWORK_TYPE */
+ u8 bw_mode;
+
+ u8 ldpc;
+ u8 stbc;
+
+ struct stainfo_stats sta_stats;
+
+ /* for A-MPDU TX, ADDBA timeout check */
+ _timer addba_retry_timer;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl recvreorder_ctrl[16];
+
+ /* for A-MPDU Tx */
+ /* unsigned char ampdu_txen_bitmap; */
+ u16 BA_starting_seqctrl[16];
+
+
+ struct ht_priv htpriv;
+
+ /* Notes: */
+ /* STA_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) + sta_info: (STA & AP) CAP/INFO */
+ /* scan_q: AP CAP/INFO */
+
+ /* AP_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
+ /* sta_info: (AP & STA) CAP/INFO */
+
+ struct list_head asoc_list;
+ struct list_head auth_list;
+
+ unsigned int expire_to;
+ unsigned int auth_seq;
+ unsigned int authalg;
+ unsigned char chg_txt[128];
+
+ u16 capability;
+ int flags;
+
+ int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ int wpa_group_cipher;
+ int wpa2_group_cipher;
+ int wpa_pairwise_cipher;
+ int wpa2_pairwise_cipher;
+
+ u8 bpairwise_key_installed;
+
+ u8 wpa_ie[32];
+
+ u8 nonerp_set;
+ u8 no_short_slot_time_set;
+ u8 no_short_preamble_set;
+ u8 no_ht_gf_set;
+ u8 no_ht_set;
+ u8 ht_20mhz_set;
+
+ unsigned int tx_ra_bitmap;
+ u8 qos_info;
+
+ u8 max_sp_len;
+ u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
+ u8 uapsd_be;
+ u8 uapsd_vi;
+ u8 uapsd_vo;
+
+ u8 has_legacy_ac;
+ unsigned int sleepq_ac_len;
+
+ u8 under_exist_checking;
+
+ u8 keep_alive_trycnt;
+
+#ifdef CONFIG_AUTO_AP_MODE
+ u8 isrc; /* this device is rc */
+ u16 pid; /* pairing id */
+#endif
+
+ u8 *passoc_req;
+ u32 assoc_req_len;
+
+ /* for DM */
+ RSSI_STA rssi_stat;
+
+ /* ODM_STA_INFO_T */
+ /* ================ODM Relative Info ======================= */
+ /* Please be care, dont declare too much structure here. It will cost memory * STA support num. */
+ /* */
+ /* */
+ /* 2011/10/20 MH Add for ODM STA info. */
+ /* */
+ /* Driver Write */
+ u8 bValid; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ u8 rssi_level; /* for Refresh RA mask */
+ /* ODM Write */
+ /* 1 TX_INFO (may changed by IC) */
+ /* TX_INFO_T pTxInfo; Define in IC folder. Move lower layer. */
+ /* */
+ /* ================ODM Relative Info ======================= */
+ /* */
+
+ /* To store the sequence number of received management frame */
+ u16 RxMgmtFrameSeqNum;
+};
+
+#define sta_rx_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts \
+ + sta->sta_stats.rx_ctrl_pkts \
+ + sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts \
+ + sta->sta_stats.last_rx_ctrl_pkts \
+ + sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_data_pkts(sta) \
+ (sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_data_pkts(sta) \
+ (sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts)
+
+#define sta_last_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts)
+
+#define sta_rx_beacon_pkts(sta) \
+ (sta->sta_stats.rx_beacon_pkts)
+
+#define sta_last_rx_beacon_pkts(sta) \
+ (sta->sta_stats.last_rx_beacon_pkts)
+
+#define sta_rx_probereq_pkts(sta) \
+ (sta->sta_stats.rx_probereq_pkts)
+
+#define sta_last_rx_probereq_pkts(sta) \
+ (sta->sta_stats.last_rx_probereq_pkts)
+
+#define sta_rx_probersp_pkts(sta) \
+ (sta->sta_stats.rx_probersp_pkts)
+
+#define sta_last_rx_probersp_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_pkts)
+
+#define sta_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.rx_probersp_bm_pkts)
+
+#define sta_last_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_bm_pkts)
+
+#define sta_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.rx_probersp_uo_pkts)
+
+#define sta_last_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_uo_pkts)
+
+#define sta_update_last_rx_pkts(sta) \
+ do { \
+ sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
+ sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
+ sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
+ sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
+ sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
+ sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
+ sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
+ sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
+ } while (0)
+
+#define STA_RX_PKTS_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts
+
+#define STA_LAST_RX_PKTS_ARG(sta) \
+ sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.last_rx_data_pkts
+
+#define STA_RX_PKTS_DIFF_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts -sta->sta_stats.last_rx_data_pkts
+
+#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
+
+struct sta_priv {
+
+ u8 *pallocated_stainfo_buf;
+ u8 *pstainfo_buf;
+ struct __queue free_sta_queue;
+
+ _lock sta_hash_lock;
+ struct list_head sta_hash[NUM_STA];
+ int asoc_sta_count;
+ struct __queue sleep_q;
+ struct __queue wakeup_q;
+
+ struct adapter *padapter;
+
+ struct list_head asoc_list;
+ struct list_head auth_list;
+ _lock asoc_list_lock;
+ _lock auth_list_lock;
+ u8 asoc_list_cnt;
+ u8 auth_list_cnt;
+
+ unsigned int auth_to; /* sec, time to expire in authenticating. */
+ unsigned int assoc_to; /* sec, time to expire before associating. */
+ unsigned int expire_to; /* sec , time to expire after associated. */
+
+ /* pointers to STA info; based on allocated AID or NULL if AID free
+ * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
+ * and so on
+ */
+ struct sta_info *sta_aid[NUM_STA];
+
+ u16 sta_dz_bitmap;/* only support 15 stations, staion aid bitmap for sleeping sta. */
+ u16 tim_bitmap;/* only support 15 stations, aid = 0~15 mapping bit0~bit15 */
+
+ u16 max_num_sta;
+
+ struct wlan_acl_pool acl_list;
+};
+
+
+__inline static u32 wifi_mac_hash(u8 *mac)
+{
+ u32 x;
+
+ x = mac[0];
+ x = (x << 2) ^ mac[1];
+ x = (x << 2) ^ mac[2];
+ x = (x << 2) ^ mac[3];
+ x = (x << 2) ^ mac[4];
+ x = (x << 2) ^ mac[5];
+
+ x ^= x >> 8;
+ x = x & (NUM_STA - 1);
+
+ return x;
+}
+
+
+extern u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
+extern u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
+
+#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
+int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
+struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset);
+
+extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
+extern u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta);
+extern void rtw_free_all_stainfo(struct adapter *padapter);
+extern struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr);
+extern u32 rtw_init_bcmc_stainfo(struct adapter *padapter);
+extern struct sta_info* rtw_get_bcmc_stainfo(struct adapter *padapter);
+extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+
+#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
new file mode 100644
index 000000000000..530d698f50d9
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -0,0 +1,1158 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef _WIFI_H_
+#define _WIFI_H_
+
+
+#ifdef BIT
+/* error "BIT define occurred earlier elsewhere!\n" */
+#undef BIT
+#endif
+#define BIT(x) (1 << (x))
+
+
+#define WLAN_ETHHDR_LEN 14
+#define WLAN_ETHADDR_LEN 6
+#define WLAN_IEEE_OUI_LEN 3
+#define WLAN_ADDR_LEN 6
+#define WLAN_CRC_LEN 4
+#define WLAN_BSSID_LEN 6
+#define WLAN_BSS_TS_LEN 8
+#define WLAN_HDR_A3_LEN 24
+#define WLAN_HDR_A4_LEN 30
+#define WLAN_HDR_A3_QOS_LEN 26
+#define WLAN_HDR_A4_QOS_LEN 32
+#define WLAN_SSID_MAXLEN 32
+#define WLAN_DATA_MAXLEN 2312
+
+#define WLAN_A3_PN_OFFSET 24
+#define WLAN_A4_PN_OFFSET 30
+
+#define WLAN_MIN_ETHFRM_LEN 60
+#define WLAN_MAX_ETHFRM_LEN 1514
+#define WLAN_ETHHDR_LEN 14
+#define WLAN_WMM_LEN 24
+
+#define P80211CAPTURE_VERSION 0x80211001
+
+/* This value is tested by WiFi 11n Test Plan 5.2.3. */
+/* This test verifies the WLAN NIC can update the NAV through sending the CTS with large duration. */
+#define WiFiNavUpperUs 30000 /* 30 ms */
+
+enum WIFI_FRAME_TYPE {
+ WIFI_MGT_TYPE = (0),
+ WIFI_CTRL_TYPE = (BIT(2)),
+ WIFI_DATA_TYPE = (BIT(3)),
+ WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
+};
+
+enum WIFI_FRAME_SUBTYPE {
+
+ /* below is for mgt frame */
+ WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE),
+ WIFI_ASSOCRSP = (BIT(4) | WIFI_MGT_TYPE),
+ WIFI_REASSOCREQ = (BIT(5) | WIFI_MGT_TYPE),
+ WIFI_REASSOCRSP = (BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE),
+ WIFI_PROBERSP = (BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_BEACON = (BIT(7) | WIFI_MGT_TYPE),
+ WIFI_ATIM = (BIT(7) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DISASSOC = (BIT(7) | BIT(5) | WIFI_MGT_TYPE),
+ WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DEAUTH = (BIT(7) | BIT(6) | WIFI_MGT_TYPE),
+ WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_ACTION_NOACK = (BIT(7) | BIT(6) | BIT(5) | WIFI_MGT_TYPE),
+
+ /* below is for control frame */
+ WIFI_NDPA = (BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
+ WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+
+ /* below is for data frame */
+ WIFI_DATA = (0 | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
+ WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
+ WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+};
+
+enum WIFI_REASON_CODE {
+ _RSON_RESERVED_ = 0,
+ _RSON_UNSPECIFIED_ = 1,
+ _RSON_AUTH_NO_LONGER_VALID_ = 2,
+ _RSON_DEAUTH_STA_LEAVING_ = 3,
+ _RSON_INACTIVITY_ = 4,
+ _RSON_UNABLE_HANDLE_ = 5,
+ _RSON_CLS2_ = 6,
+ _RSON_CLS3_ = 7,
+ _RSON_DISAOC_STA_LEAVING_ = 8,
+ _RSON_ASOC_NOT_AUTH_ = 9,
+
+ /* WPA reason */
+ _RSON_INVALID_IE_ = 13,
+ _RSON_MIC_FAILURE_ = 14,
+ _RSON_4WAY_HNDSHK_TIMEOUT_ = 15,
+ _RSON_GROUP_KEY_UPDATE_TIMEOUT_ = 16,
+ _RSON_DIFF_IE_ = 17,
+ _RSON_MLTCST_CIPHER_NOT_VALID_ = 18,
+ _RSON_UNICST_CIPHER_NOT_VALID_ = 19,
+ _RSON_AKMP_NOT_VALID_ = 20,
+ _RSON_UNSUPPORT_RSNE_VER_ = 21,
+ _RSON_INVALID_RSNE_CAP_ = 22,
+ _RSON_IEEE_802DOT1X_AUTH_FAIL_ = 23,
+
+ /* belowing are Realtek definition */
+ _RSON_PMK_NOT_AVAILABLE_ = 24,
+ _RSON_TDLS_TEAR_TOOFAR_ = 25,
+ _RSON_TDLS_TEAR_UN_RSN_ = 26,
+};
+
+/* Reason codes (IEEE 802.11-2007, 7.3.1.7, Table 7-22) */
+/* IEEE 802.11h */
+#define WLAN_REASON_PWR_CAPABILITY_NOT_VALID 10
+#define WLAN_REASON_SUPPORTED_CHANNEL_NOT_VALID 11
+
+enum WIFI_STATUS_CODE {
+ _STATS_SUCCESSFUL_ = 0,
+ _STATS_FAILURE_ = 1,
+ _STATS_CAP_FAIL_ = 10,
+ _STATS_NO_ASOC_ = 11,
+ _STATS_OTHER_ = 12,
+ _STATS_NO_SUPP_ALG_ = 13,
+ _STATS_OUT_OF_AUTH_SEQ_ = 14,
+ _STATS_CHALLENGE_FAIL_ = 15,
+ _STATS_AUTH_TIMEOUT_ = 16,
+ _STATS_UNABLE_HANDLE_STA_ = 17,
+ _STATS_RATE_FAIL_ = 18,
+};
+
+/* Status codes (IEEE 802.11-2007, 7.3.1.9, Table 7-23) */
+/* entended */
+/* IEEE 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+/* IEEE 802.11h */
+#define WLAN_STATUS_SPEC_MGMT_REQUIRED 22
+#define WLAN_STATUS_PWR_CAPABILITY_NOT_VALID 23
+#define WLAN_STATUS_SUPPORTED_CHANNEL_NOT_VALID 24
+/* IEEE 802.11g */
+#define WLAN_STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME 25
+#define WLAN_STATUS_ASSOC_DENIED_NO_ER_PBCC 26
+#define WLAN_STATUS_ASSOC_DENIED_NO_DSSS_OFDM 27
+/* IEEE 802.11w */
+#define WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY 30
+#define WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION 31
+/* IEEE 802.11i */
+#define WLAN_STATUS_INVALID_IE 40
+#define WLAN_STATUS_GROUP_CIPHER_NOT_VALID 41
+#define WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID 42
+#define WLAN_STATUS_AKMP_NOT_VALID 43
+#define WLAN_STATUS_UNSUPPORTED_RSN_IE_VERSION 44
+#define WLAN_STATUS_INVALID_RSN_IE_CAPAB 45
+#define WLAN_STATUS_CIPHER_REJECTED_PER_POLICY 46
+#define WLAN_STATUS_TS_NOT_CREATED 47
+#define WLAN_STATUS_DIRECT_LINK_NOT_ALLOWED 48
+#define WLAN_STATUS_DEST_STA_NOT_PRESENT 49
+#define WLAN_STATUS_DEST_STA_NOT_QOS_STA 50
+#define WLAN_STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE 51
+/* IEEE 802.11r */
+#define WLAN_STATUS_INVALID_FT_ACTION_FRAME_COUNT 52
+#define WLAN_STATUS_INVALID_PMKID 53
+#define WLAN_STATUS_INVALID_MDIE 54
+#define WLAN_STATUS_INVALID_FTIE 55
+
+
+enum WIFI_REG_DOMAIN {
+ DOMAIN_FCC = 1,
+ DOMAIN_IC = 2,
+ DOMAIN_ETSI = 3,
+ DOMAIN_SPAIN = 4,
+ DOMAIN_FRANCE = 5,
+ DOMAIN_MKK = 6,
+ DOMAIN_ISRAEL = 7,
+ DOMAIN_MKK1 = 8,
+ DOMAIN_MKK2 = 9,
+ DOMAIN_MKK3 = 10,
+ DOMAIN_MAX
+};
+
+#define _TO_DS_ BIT(8)
+#define _FROM_DS_ BIT(9)
+#define _MORE_FRAG_ BIT(10)
+#define _RETRY_ BIT(11)
+#define _PWRMGT_ BIT(12)
+#define _MORE_DATA_ BIT(13)
+#define _PRIVACY_ BIT(14)
+#define _ORDER_ BIT(15)
+
+#define SetToDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_)
+
+#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_TO_DS_)) != 0)
+
+#define ClearToDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_TO_DS_))
+
+#define SetFrDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_FROM_DS_)
+
+#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
+
+#define ClearFrDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_FROM_DS_))
+
+#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
+
+#define SetMFrag(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
+
+#define GetMFrag(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_FRAG_)) != 0)
+
+#define ClearMFrag(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_))
+
+#define SetRetry(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_RETRY_)
+
+#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_RETRY_)) != 0)
+
+#define ClearRetry(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_RETRY_))
+
+#define SetPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PWRMGT_)
+
+#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_PWRMGT_)) != 0)
+
+#define ClearPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PWRMGT_))
+
+#define SetMData(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_DATA_)
+
+#define GetMData(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_DATA_)) != 0)
+
+#define ClearMData(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_))
+
+#define SetPrivacy(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_)
+
+#define GetPrivacy(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_PRIVACY_)) != 0)
+
+#define ClearPrivacy(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PRIVACY_))
+
+
+#define GetOrder(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_ORDER_)) != 0)
+
+#define GetFrameType(pbuf) \
+ (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2)))
+
+#define SetFrameType(pbuf, type) \
+ do { \
+ *(unsigned short *)(pbuf) &= __constant_cpu_to_le16(~(BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= __constant_cpu_to_le16(type); \
+ } while (0)
+
+#define GetFrameSubType(pbuf) (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(7) |\
+ BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2)))
+
+#define SetFrameSubType(pbuf, type) \
+ do { \
+ *(__le16 *)(pbuf) &= cpu_to_le16(~(BIT(7) | BIT(6) | \
+ BIT(5) | BIT(4) | BIT(3) | BIT(2))); \
+ *(__le16 *)(pbuf) |= cpu_to_le16(type); \
+ } while (0)
+
+#define GetSequence(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
+
+#define GetFragNum(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
+
+#define GetTupleCache(pbuf) \
+ (cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
+
+#define SetFragNum(pbuf, num) \
+ do { \
+ *(unsigned short *)((size_t)(pbuf) + 22) = \
+ ((*(unsigned short *)((size_t)(pbuf) + 22)) & \
+ le16_to_cpu(~(0x000f))) | \
+ cpu_to_le16(0x0f & (num)); \
+ } while (0)
+
+#define SetSeqNum(pbuf, num) \
+ do { \
+ *(__le16 *)((size_t)(pbuf) + 22) = \
+ ((*(__le16 *)((size_t)(pbuf) + 22)) & cpu_to_le16((unsigned short)0x000f)) | \
+ cpu_to_le16((unsigned short)(0xfff0 & (num << 4))); \
+ } while (0)
+
+#define SetDuration(pbuf, dur) \
+ *(__le16 *)((size_t)(pbuf) + 2) = cpu_to_le16(0xffff & (dur))
+
+
+#define SetPriority(pbuf, tid) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
+
+#define GetPriority(pbuf) ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
+
+#define SetEOSP(pbuf, eosp) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
+
+#define SetAckpolicy(pbuf, ack) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((ack & 3) << 5)
+
+#define GetAckpolicy(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 5) & 0x3)
+
+#define GetAMsdu(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 7) & 0x1)
+
+#define SetAMsdu(pbuf, amsdu) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
+
+#define GetAid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
+
+#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \
+ (((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \
+ 30 : 24))) & 0x000f)
+
+#define GetAddr1Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 4))
+
+#define GetAddr2Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 10))
+
+#define GetAddr3Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 16))
+
+#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
+
+#define MacAddr_isBcst(addr) \
+ (\
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+__inline static int IS_MCAST(unsigned char *da)
+{
+ if ((*da) & 0x01)
+ return true;
+ else
+ return false;
+}
+
+__inline static unsigned char * get_ra(unsigned char *pframe)
+{
+ unsigned char *ra;
+ ra = GetAddr1Ptr(pframe);
+ return ra;
+}
+__inline static unsigned char * get_ta(unsigned char *pframe)
+{
+ unsigned char *ta;
+ ta = GetAddr2Ptr(pframe);
+ return ta;
+}
+
+__inline static unsigned char * get_da(unsigned char *pframe)
+{
+ unsigned char *da;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ default: /* ToDs = 1, FromDs = 1 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ }
+
+ return da;
+}
+
+
+__inline static unsigned char * get_sa(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ default: /* ToDs = 1, FromDs = 1 */
+ sa = GetAddr4Ptr(pframe);
+ break;
+ }
+
+ return sa;
+}
+
+__inline static unsigned char * get_hdr_bssid(unsigned char *pframe)
+{
+ unsigned char *sa = NULL;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs = 0, FromDs = 0 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x01: /* ToDs = 0, FromDs = 1 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x02: /* ToDs = 1, FromDs = 0 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ case 0x03: /* ToDs = 1, FromDs = 1 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ }
+
+ return sa;
+}
+
+
+__inline static int IsFrameTypeCtrl(unsigned char *pframe)
+{
+ if (WIFI_CTRL_TYPE == GetFrameType(pframe))
+ return true;
+ else
+ return false;
+}
+/*-----------------------------------------------------------------------------
+ Below is for the security related definition
+------------------------------------------------------------------------------*/
+#define _RESERVED_FRAME_TYPE_ 0
+#define _SKB_FRAME_TYPE_ 2
+#define _PRE_ALLOCMEM_ 1
+#define _PRE_ALLOCHDR_ 3
+#define _PRE_ALLOCLLCHDR_ 4
+#define _PRE_ALLOCICVHDR_ 5
+#define _PRE_ALLOCMICHDR_ 6
+
+#define _SIFSTIME_ ((priv->pmib->dot11BssType.net_work_type&WIRELESS_11A)?16:10)
+#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
+#define _CRCLNG_ 4
+
+#define _ASOCREQ_IE_OFFSET_ 4 /* excluding wlan_hdr */
+#define _ASOCRSP_IE_OFFSET_ 6
+#define _REASOCREQ_IE_OFFSET_ 10
+#define _REASOCRSP_IE_OFFSET_ 6
+#define _PROBEREQ_IE_OFFSET_ 0
+#define _PROBERSP_IE_OFFSET_ 12
+#define _AUTH_IE_OFFSET_ 6
+#define _DEAUTH_IE_OFFSET_ 0
+#define _BEACON_IE_OFFSET_ 12
+#define _PUBLIC_ACTION_IE_OFFSET_ 8
+
+#define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
+
+#define _SSID_IE_ 0
+#define _SUPPORTEDRATES_IE_ 1
+#define _DSSET_IE_ 3
+#define _TIM_IE_ 5
+#define _IBSS_PARA_IE_ 6
+#define _COUNTRY_IE_ 7
+#define _CHLGETXT_IE_ 16
+#define _SUPPORTED_CH_IE_ 36
+#define _CH_SWTICH_ANNOUNCE_ 37 /* Secondary Channel Offset */
+#define _RSN_IE_2_ 48
+#define _SSN_IE_1_ 221
+#define _ERPINFO_IE_ 42
+#define _EXT_SUPPORTEDRATES_IE_ 50
+
+#define _HT_CAPABILITY_IE_ 45
+#define _FTIE_ 55
+#define _TIMEOUT_ITVL_IE_ 56
+#define _SRC_IE_ 59
+#define _HT_EXTRA_INFO_IE_ 61
+#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
+#define _WAPI_IE_ 68
+
+#define _RIC_Descriptor_IE_ 75
+#define _MME_IE_ 76 /* 802.11w Management MIC element */
+#define _LINK_ID_IE_ 101
+#define _CH_SWITCH_TIMING_ 104
+#define _PTI_BUFFER_STATUS_ 106
+#define _EXT_CAP_IE_ 127
+#define _VENDOR_SPECIFIC_IE_ 221
+
+#define _RESERVED47_ 47
+
+enum ELEMENT_ID {
+ EID_SsId = 0, /* service set identifier (0:32) */
+ EID_SupRates = 1, /* supported rates (1:8) */
+ EID_FHParms = 2, /* FH parameter set (5) */
+ EID_DSParms = 3, /* DS parameter set (1) */
+ EID_CFParms = 4, /* CF parameter set (6) */
+ EID_Tim = 5, /* Traffic Information Map (4:254) */
+ EID_IbssParms = 6, /* IBSS parameter set (2) */
+ EID_Country = 7, /* */
+
+ /* Form 7.3.2: Information elements in 802.11E/D13.0, page 46. */
+ EID_QBSSLoad = 11,
+ EID_EDCAParms = 12,
+ EID_TSpec = 13,
+ EID_TClass = 14,
+ EID_Schedule = 15,
+ /* */
+
+ EID_Ctext = 16, /* challenge text*/
+ EID_POWER_CONSTRAINT = 32, /* Power Constraint*/
+
+ /* vivi for WIFITest, 802.11h AP, 20100427 */
+ /* 2010/12/26 MH The definition we can declare always!! */
+ EID_PowerCap = 33,
+ EID_SupportedChannels = 36,
+ EID_ChlSwitchAnnounce = 37,
+
+ EID_MeasureRequest = 38, /* Measurement Request */
+ EID_MeasureReport = 39, /* Measurement Report */
+
+ EID_ERPInfo = 42,
+
+ /* Form 7.3.2: Information elements in 802.11E/D13.0, page 46. */
+ EID_TSDelay = 43,
+ EID_TCLASProc = 44,
+ EID_HTCapability = 45,
+ EID_QoSCap = 46,
+ /* */
+
+ EID_WPA2 = 48,
+ EID_ExtSupRates = 50,
+
+ EID_FTIE = 55, /* Defined in 802.11r */
+ EID_Timeout = 56, /* Defined in 802.11r */
+
+ EID_SupRegulatory = 59, /* Supported Requlatory Classes 802.11y */
+ EID_HTInfo = 61,
+ EID_SecondaryChnlOffset = 62,
+
+ EID_BSSCoexistence = 72, /* 20/40 BSS Coexistence */
+ EID_BSSIntolerantChlReport = 73,
+ EID_OBSS = 74, /* Overlapping BSS Scan Parameters */
+
+ EID_LinkIdentifier = 101, /* Defined in 802.11z */
+ EID_WakeupSchedule = 102, /* Defined in 802.11z */
+ EID_ChnlSwitchTimeing = 104, /* Defined in 802.11z */
+ EID_PTIControl = 105, /* Defined in 802.11z */
+ EID_PUBufferStatus = 106, /* Defined in 802.11z */
+
+ EID_EXTCapability = 127, /* Extended Capabilities */
+ /* From S19:Aironet IE and S21:AP IP address IE in CCX v1.13, p16 and p18. */
+ EID_Aironet = 133, /* 0x85: Aironet Element for Cisco CCX */
+ EID_CiscoIP = 149, /* 0x95: IP Address IE for Cisco CCX */
+
+ EID_CellPwr = 150, /* 0x96: Cell Power Limit IE. Ref. 0x96. */
+
+ EID_CCKM = 156,
+
+ EID_Vendor = 221, /* 0xDD: Vendor Specific */
+
+ EID_WAPI = 68,
+ EID_VHTCapability = 191, /* Based on 802.11ac D2.0 */
+ EID_VHTOperation = 192, /* Based on 802.11ac D2.0 */
+ EID_OpModeNotification = 199, /* Based on 802.11ac D3.0 */
+};
+
+/* ---------------------------------------------------------------------------
+ Below is the fixed elements...
+-----------------------------------------------------------------------------*/
+#define _AUTH_ALGM_NUM_ 2
+#define _AUTH_SEQ_NUM_ 2
+#define _BEACON_ITERVAL_ 2
+#define _CAPABILITY_ 2
+#define _CURRENT_APADDR_ 6
+#define _LISTEN_INTERVAL_ 2
+#define _RSON_CODE_ 2
+#define _ASOC_ID_ 2
+#define _STATUS_CODE_ 2
+#define _TIMESTAMP_ 8
+
+#define AUTH_ODD_TO 0
+#define AUTH_EVEN_TO 1
+
+#define WLAN_ETHCONV_ENCAP 1
+#define WLAN_ETHCONV_RFC1042 2
+#define WLAN_ETHCONV_8021h 3
+
+#define cap_ESS BIT(0)
+#define cap_IBSS BIT(1)
+#define cap_CFPollable BIT(2)
+#define cap_CFRequest BIT(3)
+#define cap_Privacy BIT(4)
+#define cap_ShortPremble BIT(5)
+#define cap_PBCC BIT(6)
+#define cap_ChAgility BIT(7)
+#define cap_SpecMgmt BIT(8)
+#define cap_QoS BIT(9)
+#define cap_ShortSlot BIT(10)
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11i / 802.1x
+------------------------------------------------------------------------------*/
+#define _IEEE8021X_MGT_ 1 /* WPA */
+#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
+
+#define _MME_IE_LENGTH_ 18
+/*-----------------------------------------------------------------------------
+ Below is the definition for WMM
+------------------------------------------------------------------------------*/
+#define _WMM_IE_Length_ 7 /* for WMM STA */
+#define _WMM_Para_Element_Length_ 24
+
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11n
+------------------------------------------------------------------------------*/
+
+#define SetOrderBit(pbuf) \
+ do { \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \
+ } while (0)
+
+#define GetOrderBit(pbuf) (((*(unsigned short *)(pbuf)) & cpu_to_le16(_ORDER_)) != 0)
+
+#define ACT_CAT_VENDOR 0x7F/* 127 */
+
+/**
+ * struct rtw_ieee80211_bar - HT Block Ack Request
+ *
+ * This structure refers to "HT BlockAckReq" as
+ * described in 802.11n draft section 7.2.1.7.1
+ */
+struct rtw_ieee80211_bar {
+ __le16 frame_control;
+ __le16 duration;
+ unsigned char ra[6];
+ unsigned char ta[6];
+ __le16 control;
+ __le16 start_seq_num;
+} __attribute__((packed));
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+
+
+ /**
+ * struct rtw_ieee80211_ht_cap - HT capabilities
+ *
+ * This structure refers to "HT capabilities element" as
+ * described in 802.11n draft section 7.3.2.52
+ */
+
+struct rtw_ieee80211_ht_cap {
+ __le16 cap_info;
+ unsigned char ampdu_params_info;
+ unsigned char supp_mcs_set[16];
+ __le16 extended_ht_cap_info;
+ __le16 tx_BF_cap_info;
+ unsigned char antenna_selection_info;
+} __attribute__ ((packed));
+
+/**
+ * struct rtw_ieee80211_ht_cap - HT additional information
+ *
+ * This structure refers to "HT information element" as
+ * described in 802.11n draft section 7.3.2.53
+ */
+struct ieee80211_ht_addt_info {
+ unsigned char control_chan;
+ unsigned char ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ unsigned char basic_set[16];
+} __attribute__ ((packed));
+
+
+struct HT_caps_element
+{
+ union
+ {
+ struct
+ {
+ __le16 HT_caps_info;
+ unsigned char AMPDU_para;
+ unsigned char MCS_rate[16];
+ __le16 HT_ext_caps;
+ __le16 Beamforming_caps;
+ unsigned char ASEL_caps;
+ } HT_cap_element;
+ unsigned char HT_cap[26];
+ }u;
+} __attribute__ ((packed));
+
+struct HT_info_element
+{
+ unsigned char primary_channel;
+ unsigned char infos[5];
+ unsigned char MCS_rate[16];
+} __attribute__ ((packed));
+
+struct AC_param
+{
+ unsigned char ACI_AIFSN;
+ unsigned char CW;
+ __le16 TXOP_limit;
+} __attribute__ ((packed));
+
+struct WMM_para_element
+{
+ unsigned char QoS_info;
+ unsigned char reserved;
+ struct AC_param ac_param[4];
+} __attribute__ ((packed));
+
+struct ADDBA_request
+{
+ unsigned char dialog_token;
+ __le16 BA_para_set;
+ __le16 BA_timeout_value;
+ __le16 BA_starting_seqctrl;
+} __attribute__ ((packed));
+
+enum HT_CAP_AMPDU_FACTOR {
+ MAX_AMPDU_FACTOR_8K = 0,
+ MAX_AMPDU_FACTOR_16K = 1,
+ MAX_AMPDU_FACTOR_32K = 2,
+ MAX_AMPDU_FACTOR_64K = 3,
+};
+
+/* 802.11n HT capabilities masks */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC_1R 0x0100
+#define IEEE80211_HT_CAP_RX_STBC_2R 0x0200
+#define IEEE80211_HT_CAP_RX_STBC_3R 0x0300
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+/* 802.11n HT capability AMPDU settings */
+#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
+#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
+/* 802.11n HT capability MSC set */
+#define IEEE80211_SUPP_MCS_SET_UEQM 4
+#define IEEE80211_HT_CAP_MAX_STREAMS 4
+#define IEEE80211_SUPP_MCS_SET_LEN 10
+/* maximum streams the spec allows */
+#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
+#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
+#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
+/* 802.11n HT capability TXBF capability */
+#define IEEE80211_HT_CAP_TXBF_RX_NDP 0x00000008
+#define IEEE80211_HT_CAP_TXBF_TX_NDP 0x00000010
+#define IEEE80211_HT_CAP_TXBF_EXPLICIT_COMP_STEERING_CAP 0x00000400
+
+/* 802.11n HT IE masks */
+#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_IE_CHA_WIDTH 0x04
+#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
+#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-PMDU buffer sizes
+ * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF 0x40
+
+
+/* Spatial Multiplexing Power Save Modes */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+
+#define OP_MODE_PURE 0
+#define OP_MODE_MAY_BE_LEGACY_STAS 1
+#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
+#define OP_MODE_MIXED 3
+
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+
+#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
+ ((u16) (0x0001 | 0x0002))
+#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BCN ((u16) BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+
+
+
+/* endif */
+
+/* ===============WPS Section =============== */
+/* For WPSv1.0 */
+#define WPSOUI 0x0050f204
+/* WPS attribute ID */
+#define WPS_ATTR_VER1 0x104A
+#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
+#define WPS_ATTR_RESP_TYPE 0x103B
+#define WPS_ATTR_UUID_E 0x1047
+#define WPS_ATTR_MANUFACTURER 0x1021
+#define WPS_ATTR_MODEL_NAME 0x1023
+#define WPS_ATTR_MODEL_NUMBER 0x1024
+#define WPS_ATTR_SERIAL_NUMBER 0x1042
+#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
+#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ATTR_DEVICE_NAME 0x1011
+#define WPS_ATTR_CONF_METHOD 0x1008
+#define WPS_ATTR_RF_BANDS 0x103C
+#define WPS_ATTR_DEVICE_PWID 0x1012
+#define WPS_ATTR_REQUEST_TYPE 0x103A
+#define WPS_ATTR_ASSOCIATION_STATE 0x1002
+#define WPS_ATTR_CONFIG_ERROR 0x1009
+#define WPS_ATTR_VENDOR_EXT 0x1049
+#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
+
+/* Value of WPS attribute "WPS_ATTR_DEVICE_NAME */
+#define WPS_MAX_DEVICE_NAME_LEN 32
+
+/* Value of WPS Request Type Attribute */
+#define WPS_REQ_TYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_REQ_TYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_REQ_TYPE_REGISTRAR 0x02
+#define WPS_REQ_TYPE_WLAN_MANAGER_REGISTRAR 0x03
+
+/* Value of WPS Response Type Attribute */
+#define WPS_RESPONSE_TYPE_INFO_ONLY 0x00
+#define WPS_RESPONSE_TYPE_8021X 0x01
+#define WPS_RESPONSE_TYPE_REGISTRAR 0x02
+#define WPS_RESPONSE_TYPE_AP 0x03
+
+/* Value of WPS WiFi Simple Configuration State Attribute */
+#define WPS_WSC_STATE_NOT_CONFIG 0x01
+#define WPS_WSC_STATE_CONFIG 0x02
+
+/* Value of WPS Version Attribute */
+#define WPS_VERSION_1 0x10
+
+/* Value of WPS Configuration Method Attribute */
+#define WPS_CONFIG_METHOD_FLASH 0x0001
+#define WPS_CONFIG_METHOD_ETHERNET 0x0002
+#define WPS_CONFIG_METHOD_LABEL 0x0004
+#define WPS_CONFIG_METHOD_DISPLAY 0x0008
+#define WPS_CONFIG_METHOD_E_NFC 0x0010
+#define WPS_CONFIG_METHOD_I_NFC 0x0020
+#define WPS_CONFIG_METHOD_NFC 0x0040
+#define WPS_CONFIG_METHOD_PBC 0x0080
+#define WPS_CONFIG_METHOD_KEYPAD 0x0100
+#define WPS_CONFIG_METHOD_VPBC 0x0280
+#define WPS_CONFIG_METHOD_PPBC 0x0480
+#define WPS_CONFIG_METHOD_VDISPLAY 0x2008
+#define WPS_CONFIG_METHOD_PDISPLAY 0x4008
+
+/* Value of Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_CID_DISPLAYS 0x0007
+#define WPS_PDT_CID_MULIT_MEDIA 0x0008
+#define WPS_PDT_CID_RTK_WIDI WPS_PDT_CID_MULIT_MEDIA
+
+/* Value of Sub Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_SCID_MEDIA_SERVER 0x0005
+#define WPS_PDT_SCID_RTK_DMP WPS_PDT_SCID_MEDIA_SERVER
+
+/* Value of Device Password ID */
+#define WPS_DPID_PIN 0x0000
+#define WPS_DPID_USER_SPEC 0x0001
+#define WPS_DPID_MACHINE_SPEC 0x0002
+#define WPS_DPID_REKEY 0x0003
+#define WPS_DPID_PBC 0x0004
+#define WPS_DPID_REGISTRAR_SPEC 0x0005
+
+/* Value of WPS RF Bands Attribute */
+#define WPS_RF_BANDS_2_4_GHZ 0x01
+#define WPS_RF_BANDS_5_GHZ 0x02
+
+/* Value of WPS Association State Attribute */
+#define WPS_ASSOC_STATE_NOT_ASSOCIATED 0x00
+#define WPS_ASSOC_STATE_CONNECTION_SUCCESS 0x01
+#define WPS_ASSOC_STATE_CONFIGURATION_FAILURE 0x02
+#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
+#define WPS_ASSOC_STATE_IP_FAILURE 0x04
+
+/* =====================P2P Section ===================== */
+/* For P2P */
+#define P2POUI 0x506F9A09
+
+/* P2P Attribute ID */
+#define P2P_ATTR_STATUS 0x00
+#define P2P_ATTR_MINOR_REASON_CODE 0x01
+#define P2P_ATTR_CAPABILITY 0x02
+#define P2P_ATTR_DEVICE_ID 0x03
+#define P2P_ATTR_GO_INTENT 0x04
+#define P2P_ATTR_CONF_TIMEOUT 0x05
+#define P2P_ATTR_LISTEN_CH 0x06
+#define P2P_ATTR_GROUP_BSSID 0x07
+#define P2P_ATTR_EX_LISTEN_TIMING 0x08
+#define P2P_ATTR_INTENTED_IF_ADDR 0x09
+#define P2P_ATTR_MANAGEABILITY 0x0A
+#define P2P_ATTR_CH_LIST 0x0B
+#define P2P_ATTR_NOA 0x0C
+#define P2P_ATTR_DEVICE_INFO 0x0D
+#define P2P_ATTR_GROUP_INFO 0x0E
+#define P2P_ATTR_GROUP_ID 0x0F
+#define P2P_ATTR_INTERFACE 0x10
+#define P2P_ATTR_OPERATING_CH 0x11
+#define P2P_ATTR_INVITATION_FLAGS 0x12
+
+/* Value of Status Attribute */
+#define P2P_STATUS_SUCCESS 0x00
+#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
+#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
+#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
+#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
+#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
+#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
+#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
+#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
+#define P2P_STATUS_FAIL_USER_REJECT 0x0B
+
+/* Value of Inviation Flags Attribute */
+#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
+
+#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
+ P2P_DEVCAP_CLIENT_DISCOVERABILITY | \
+ P2P_DEVCAP_CONCURRENT_OPERATION | \
+ P2P_DEVCAP_INVITATION_PROC)
+
+#define DMP_P2P_GRPCAP_SUPPORT (P2P_GRPCAP_INTRABSS)
+
+/* Value of Device Capability Bitmap */
+#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
+#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
+#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
+#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
+#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
+#define P2P_DEVCAP_INVITATION_PROC BIT(5)
+
+/* Value of Group Capability Bitmap */
+#define P2P_GRPCAP_GO BIT(0)
+#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
+#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
+#define P2P_GRPCAP_INTRABSS BIT(3)
+#define P2P_GRPCAP_CROSS_CONN BIT(4)
+#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
+#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
+
+/* P2P Public Action Frame (Management Frame) */
+#define P2P_PUB_ACTION_ACTION 0x09
+
+/* P2P Public Action Frame Type */
+#define P2P_GO_NEGO_REQ 0
+#define P2P_GO_NEGO_RESP 1
+#define P2P_GO_NEGO_CONF 2
+#define P2P_INVIT_REQ 3
+#define P2P_INVIT_RESP 4
+#define P2P_DEVDISC_REQ 5
+#define P2P_DEVDISC_RESP 6
+#define P2P_PROVISION_DISC_REQ 7
+#define P2P_PROVISION_DISC_RESP 8
+
+/* P2P Action Frame Type */
+#define P2P_NOTICE_OF_ABSENCE 0
+#define P2P_PRESENCE_REQUEST 1
+#define P2P_PRESENCE_RESPONSE 2
+#define P2P_GO_DISC_REQUEST 3
+
+
+#define P2P_MAX_PERSISTENT_GROUP_NUM 10
+
+#define P2P_PROVISIONING_SCAN_CNT 3
+
+#define P2P_WILDCARD_SSID_LEN 7
+
+#define P2P_FINDPHASE_EX_NONE 0 /* default value, used when: (1)p2p disabed or (2)p2p enabled but only do 1 scan phase */
+#define P2P_FINDPHASE_EX_FULL 1 /* used when p2p enabled and want to do 1 scan phase and P2P_FINDPHASE_EX_MAX-1 find phase */
+#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
+#define P2P_FINDPHASE_EX_MAX 4
+#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
+
+#define P2P_PROVISION_TIMEOUT 5000 /* 5 seconds timeout for sending the provision discovery request */
+#define P2P_CONCURRENT_PROVISION_TIMEOUT 3000 /* 3 seconds timeout for sending the provision discovery request under concurrent mode */
+#define P2P_GO_NEGO_TIMEOUT 5000 /* 5 seconds timeout for receiving the group negotation response */
+#define P2P_CONCURRENT_GO_NEGO_TIMEOUT 3000 /* 3 seconds timeout for sending the negotiation request under concurrent mode */
+#define P2P_TX_PRESCAN_TIMEOUT 100 /* 100ms */
+#define P2P_INVITE_TIMEOUT 5000 /* 5 seconds timeout for sending the invitation request */
+#define P2P_CONCURRENT_INVITE_TIMEOUT 3000 /* 3 seconds timeout for sending the invitation request under concurrent mode */
+#define P2P_RESET_SCAN_CH 25000 /* 25 seconds timeout to reset the scan channel (based on channel plan) */
+#define P2P_MAX_INTENT 15
+
+#define P2P_MAX_NOA_NUM 2
+
+/* WPS Configuration Method */
+#define WPS_CM_NONE 0x0000
+#define WPS_CM_LABEL 0x0004
+#define WPS_CM_DISPLYA 0x0008
+#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
+#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
+#define WPS_CM_NFC_INTERFACE 0x0040
+#define WPS_CM_PUSH_BUTTON 0x0080
+#define WPS_CM_KEYPAD 0x0100
+#define WPS_CM_SW_PUHS_BUTTON 0x0280
+#define WPS_CM_HW_PUHS_BUTTON 0x0480
+#define WPS_CM_SW_DISPLAY_PIN 0x2008
+#define WPS_CM_LCD_DISPLAY_PIN 0x4008
+
+enum P2P_ROLE {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum P2P_STATE {
+ P2P_STATE_NONE = 0, /* P2P disable */
+ P2P_STATE_IDLE = 1, /* P2P had enabled and do nothing */
+ P2P_STATE_LISTEN = 2, /* In pure listen state */
+ P2P_STATE_SCAN = 3, /* In scan phase */
+ P2P_STATE_FIND_PHASE_LISTEN = 4, /* In the listen state of find phase */
+ P2P_STATE_FIND_PHASE_SEARCH = 5, /* In the search state of find phase */
+ P2P_STATE_TX_PROVISION_DIS_REQ = 6, /* In P2P provisioning discovery */
+ P2P_STATE_RX_PROVISION_DIS_RSP = 7,
+ P2P_STATE_RX_PROVISION_DIS_REQ = 8,
+ P2P_STATE_GONEGO_ING = 9, /* Doing the group owner negoitation handshake */
+ P2P_STATE_GONEGO_OK = 10, /* finish the group negoitation handshake with success */
+ P2P_STATE_GONEGO_FAIL = 11, /* finish the group negoitation handshake with failure */
+ P2P_STATE_RECV_INVITE_REQ_MATCH = 12, /* receiving the P2P Inviation request and match with the profile. */
+ P2P_STATE_PROVISIONING_ING = 13, /* Doing the P2P WPS */
+ P2P_STATE_PROVISIONING_DONE = 14, /* Finish the P2P WPS */
+ P2P_STATE_TX_INVITE_REQ = 15, /* Transmit the P2P Invitation request */
+ P2P_STATE_RX_INVITE_RESP_OK = 16, /* Receiving the P2P Invitation response */
+ P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17, /* receiving the P2P Inviation request and dismatch with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_GO = 18, /* receiving the P2P Inviation request and this wifi is GO. */
+ P2P_STATE_RECV_INVITE_REQ_JOIN = 19, /* receiving the P2P Inviation request to join an existing P2P Group. */
+ P2P_STATE_RX_INVITE_RESP_FAIL = 20, /* recveing the P2P Inviation response with failure */
+ P2P_STATE_RX_INFOR_NOREADY = 21, /* receiving p2p negoitation response with information is not available */
+ P2P_STATE_TX_INFOR_NOREADY = 22, /* sending p2p negoitation response with information is not available */
+};
+
+enum P2P_WPSINFO {
+ P2P_NO_WPSINFO = 0,
+ P2P_GOT_WPSINFO_PEER_DISPLAY_PIN = 1,
+ P2P_GOT_WPSINFO_SELF_DISPLAY_PIN = 2,
+ P2P_GOT_WPSINFO_PBC = 3,
+};
+
+#define P2P_PRIVATE_IOCTL_SET_LEN 64
+
+enum P2P_PROTO_WK_ID
+{
+ P2P_FIND_PHASE_WK = 0,
+ P2P_RESTORE_STATE_WK = 1,
+ P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
+ P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
+ P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_RO_CH_WK = 6,
+};
+
+/* =====================WFD Section ===================== */
+/* For Wi-Fi Display */
+#define WFD_ATTR_DEVICE_INFO 0x00
+#define WFD_ATTR_ASSOC_BSSID 0x01
+#define WFD_ATTR_COUPLED_SINK_INFO 0x06
+#define WFD_ATTR_LOCAL_IP_ADDR 0x08
+#define WFD_ATTR_SESSION_INFO 0x09
+#define WFD_ATTR_ALTER_MAC 0x0a
+
+/* For WFD Device Information Attribute */
+#define WFD_DEVINFO_SOURCE 0x0000
+#define WFD_DEVINFO_PSINK 0x0001
+#define WFD_DEVINFO_SSINK 0x0002
+#define WFD_DEVINFO_DUAL 0x0003
+
+#define WFD_DEVINFO_SESSION_AVAIL 0x0010
+#define WFD_DEVINFO_WSD 0x0040
+#define WFD_DEVINFO_PC_TDLS 0x0080
+#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
+
+#define IP_MCAST_MAC(mac) ((mac[0]== 0x01) && (mac[1]== 0x00) && (mac[2]== 0x5e))
+#define ICMPV6_MCAST_MAC(mac) ((mac[0]== 0x33) && (mac[1]== 0x33) && (mac[2]!= 0xff))
+
+/* Regulatroy Domain */
+struct regd_pair_mapping {
+ u16 reg_dmnenum;
+ u16 reg_2ghz_ctl;
+};
+
+struct rtw_regulatory {
+ char alpha2[2];
+ u16 country_code;
+ u16 max_power_level;
+ u32 tp_scale;
+ u16 current_rd;
+ u16 current_rd_ext;
+ int16_t power_limit;
+ struct regd_pair_mapping *regpair;
+};
+
+#endif /* _WIFI_H_ */
diff --git a/drivers/staging/rtl8723bs/include/wlan_bssdef.h b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
new file mode 100644
index 000000000000..af78d97980fa
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/wlan_bssdef.h
@@ -0,0 +1,278 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __WLAN_BSSDEF_H__
+#define __WLAN_BSSDEF_H__
+
+
+#define MAX_IE_SZ 768
+
+
+#define NDIS_802_11_LENGTH_SSID 32
+#define NDIS_802_11_LENGTH_RATES 8
+#define NDIS_802_11_LENGTH_RATES_EX 16
+
+typedef unsigned char NDIS_802_11_MAC_ADDRESS[6];
+typedef unsigned char NDIS_802_11_RATES[NDIS_802_11_LENGTH_RATES]; /* Set of 8 data rates */
+typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX]; /* Set of 16 data rates */
+
+struct ndis_802_11_ssid {
+ u32 SsidLength;
+ u8 Ssid[32];
+};
+
+enum NDIS_802_11_NETWORK_TYPE {
+ Ndis802_11FH,
+ Ndis802_11DS,
+ Ndis802_11OFDM5,
+ Ndis802_11OFDM24,
+ Ndis802_11NetworkTypeMax /* not a real type, defined as an upper bound */
+};
+
+struct ndis_802_11_conf_fh {
+ u32 Length; /* Length of structure */
+ u32 HopPattern; /* As defined by 802.11, MSB set */
+ u32 HopSet; /* to one if non-802.11 */
+ u32 DwellTime; /* units are Kusec */
+};
+
+/*
+ FW will only save the channel number in DSConfig.
+ ODI Handler will convert the channel number to freq. number.
+*/
+struct ndis_802_11_conf {
+ u32 Length; /* Length of structure */
+ u32 BeaconPeriod; /* units are Kusec */
+ u32 ATIMWindow; /* units are Kusec */
+ u32 DSConfig; /* Frequency, units are kHz */
+ struct ndis_802_11_conf_fh FHConfig;
+};
+
+enum NDIS_802_11_NETWORK_INFRASTRUCTURE {
+ Ndis802_11IBSS,
+ Ndis802_11Infrastructure,
+ Ndis802_11AutoUnknown,
+ Ndis802_11InfrastructureMax, /* Not a real value, defined as upper bound */
+ Ndis802_11APMode,
+};
+
+struct ndis_802_11_fix_ie {
+ u8 Timestamp[8];
+ u16 BeaconInterval;
+ u16 Capabilities;
+};
+
+struct ndis_80211_var_ie {
+ u8 ElementID;
+ u8 Length;
+ u8 data[1];
+};
+
+/* Length is the 4 bytes multiples of the sum of
+ * sizeof (NDIS_802_11_MAC_ADDRESS) + 2 +
+ * sizeof (struct ndis_802_11_ssid) + sizeof (u32) +
+ * sizeof (long) + sizeof (enum NDIS_802_11_NETWORK_TYPE) +
+ * sizeof (struct ndis_802_11_conf) + sizeof (NDIS_802_11_RATES_EX) + IELength
+ *
+ * Except for IELength, all other fields are fixed length. Therefore, we can
+ * define a macro to present the partial sum.
+ */
+enum NDIS_802_11_AUTHENTICATION_MODE {
+ Ndis802_11AuthModeOpen,
+ Ndis802_11AuthModeShared,
+ Ndis802_11AuthModeAutoSwitch,
+ Ndis802_11AuthModeWPA,
+ Ndis802_11AuthModeWPAPSK,
+ Ndis802_11AuthModeWPANone,
+ Ndis802_11AuthModeWAPI,
+ Ndis802_11AuthModeMax /* Not a real mode, defined as upper bound */
+};
+
+enum NDIS_802_11_WEP_STATUS {
+ Ndis802_11WEPEnabled,
+ Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
+ Ndis802_11WEPDisabled,
+ Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
+ Ndis802_11WEPKeyAbsent,
+ Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
+ Ndis802_11WEPNotSupported,
+ Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
+ Ndis802_11Encryption2Enabled,
+ Ndis802_11Encryption2KeyAbsent,
+ Ndis802_11Encryption3Enabled,
+ Ndis802_11Encryption3KeyAbsent,
+ Ndis802_11_EncrypteionWAPI
+};
+
+#define NDIS_802_11_AI_REQFI_CAPABILITIES 1
+#define NDIS_802_11_AI_REQFI_LISTENINTERVAL 2
+#define NDIS_802_11_AI_REQFI_CURRENTAPADDRESS 4
+
+#define NDIS_802_11_AI_RESFI_CAPABILITIES 1
+#define NDIS_802_11_AI_RESFI_STATUSCODE 2
+#define NDIS_802_11_AI_RESFI_ASSOCIATIONID 4
+
+struct ndis_802_11_ai_reqfi {
+ u16 Capabilities;
+ u16 ListenInterval;
+ NDIS_802_11_MAC_ADDRESS CurrentAPAddress;
+};
+
+struct ndis_801_11_ai_resfi {
+ u16 Capabilities;
+ u16 StatusCode;
+ u16 AssociationId;
+};
+
+typedef struct _NDIS_802_11_ASSOCIATION_INFORMATION
+{
+ u32 Length;
+ u16 AvailableRequestFixedIEs;
+ struct ndis_802_11_ai_reqfi RequestFixedIEs;
+ u32 RequestIELength;
+ u32 OffsetRequestIEs;
+ u16 AvailableResponseFixedIEs;
+ struct ndis_801_11_ai_resfi ResponseFixedIEs;
+ u32 ResponseIELength;
+ u32 OffsetResponseIEs;
+} NDIS_802_11_ASSOCIATION_INFORMATION, *PNDIS_802_11_ASSOCIATION_INFORMATION;
+
+enum NDIS_802_11_RELOAD_DEFAULTS {
+ Ndis802_11ReloadWEPKeys
+};
+
+
+/* Key mapping keys require a BSSID */
+typedef struct _NDIS_802_11_KEY
+{
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ u32 KeyLength; /* length of key in bytes */
+ NDIS_802_11_MAC_ADDRESS BSSID;
+ unsigned long long KeyRSC;
+ u8 KeyMaterial[32]; /* variable length depending on above field */
+} NDIS_802_11_KEY, *PNDIS_802_11_KEY;
+
+typedef struct _NDIS_802_11_REMOVE_KEY
+{
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ NDIS_802_11_MAC_ADDRESS BSSID;
+} NDIS_802_11_REMOVE_KEY, *PNDIS_802_11_REMOVE_KEY;
+
+struct ndis_802_11_wep {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex; /* 0 is the per-client key, 1-N are the global keys */
+ u32 KeyLength; /* length of key in bytes */
+ u8 KeyMaterial[16];/* variable length depending on above field */
+};
+
+/* mask for authentication/integrity fields */
+#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
+#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
+#define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02
+#define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06
+#define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E
+
+/* MIC check time, 60 seconds. */
+#define MIC_CHECK_TIME 60000000
+
+#ifndef Ndis802_11APMode
+#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
+#endif
+
+struct wlan_phy_info {
+ u8 SignalStrength;/* in percentage) */
+ u8 SignalQuality;/* in percentage) */
+ u8 Optimum_antenna; /* for Antenna diversity */
+ u8 Reserved_0;
+};
+
+struct wlan_bcn_info {
+ /* these infor get from rtw_get_encrypt_info when
+ * * translate scan to UI */
+ u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
+ int group_cipher; /* WPA/WPA2 group cipher */
+ int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
+ int is_8021x;
+
+ /* bwmode 20/40 and ch_offset UP/LOW */
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+};
+
+/* temporally add #pragma pack for structure alignment issue of
+* struct wlan_bssid_ex and get_wlan_bssid_ex_sz()
+*/
+struct wlan_bssid_ex {
+ u32 Length;
+ NDIS_802_11_MAC_ADDRESS MacAddress;
+ u8 Reserved[2];/* 0]: IS beacon frame */
+ struct ndis_802_11_ssid Ssid;
+ u32 Privacy;
+ long Rssi;/* in dBM, raw data , get from PHY) */
+ enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
+ struct ndis_802_11_conf Configuration;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode;
+ NDIS_802_11_RATES_EX SupportedRates;
+ struct wlan_phy_info PhyInfo;
+ u32 IELength;
+ u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and capability information) */
+} __packed;
+
+__inline static uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+{
+ return (sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength);
+}
+
+struct wlan_network {
+ struct list_head list;
+ int network_type; /* refer to ieee80211.h for WIRELESS_11A/B/G */
+ int fixed; /* set to fixed when not to be removed as site-surveying */
+ unsigned long last_scanned; /* timestamp for the network */
+ int aid; /* will only be valid when a BSS is joinned. */
+ int join_res;
+ struct wlan_bssid_ex network; /* must be the last item */
+ struct wlan_bcn_info BcnInfo;
+};
+
+enum VRTL_CARRIER_SENSE {
+ DISABLE_VCS,
+ ENABLE_VCS,
+ AUTO_VCS
+};
+
+enum VCS_TYPE {
+ NONE_VCS,
+ RTS_CTS,
+ CTS_TO_SELF
+};
+
+#define PWR_CAM 0
+#define PWR_MINPS 1
+#define PWR_MAXPS 2
+#define PWR_UAPSD 3
+#define PWR_VOIP 4
+
+enum UAPSD_MAX_SP {
+ NO_LIMIT,
+ TWO_MSDU,
+ FOUR_MSDU,
+ SIX_MSDU
+};
+
+#define NUM_PRE_AUTH_KEY 16
+#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
+
+#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
new file mode 100644
index 000000000000..46909ff7339c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __XMIT_OSDEP_H_
+#define __XMIT_OSDEP_H_
+
+
+struct pkt_file {
+ _pkt *pkt;
+ __kernel_size_t pkt_len; /* the remainder length of the open_file */
+ _buffer *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ __kernel_size_t buf_len;
+};
+
+#define NR_XMITFRAME 256
+
+struct xmit_priv;
+struct pkt_attrib;
+struct sta_xmit_priv;
+struct xmit_frame;
+struct xmit_buf;
+
+extern int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev);
+extern int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev);
+
+void rtw_os_xmit_schedule(struct adapter *padapter);
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag);
+void rtw_os_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag);
+
+extern void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib);
+
+extern uint rtw_remainder_len(struct pkt_file *pfile);
+extern void _rtw_open_pktfile(_pkt *pkt, struct pkt_file *pfile);
+extern uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen);
+extern sint rtw_endofpktfile (struct pkt_file *pfile);
+
+extern void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt);
+extern void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe);
+
+#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
new file mode 100644
index 000000000000..5e7a61f24f8d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -0,0 +1,3586 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _IOCTL_CFG80211_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+
+#include <rtw_wifi_regd.h>
+
+#define RTW_MAX_MGMT_TX_CNT (8)
+
+#define RTW_SCAN_IE_LEN_MAX 2304
+#define RTW_MAX_REMAIN_ON_CHANNEL_DURATION 5000 /* ms */
+#define RTW_MAX_NUM_PMKIDS 4
+
+#define RTW_CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+
+static const u32 rtw_cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) \
+ { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+ }
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+/* if wowlan is not supported, kernel generate a disconnect at each suspend
+ * cf: /net/wireless/sysfs.c, so register a stub wowlan.
+ * Moreover wowlan has to be enabled via a the nl80211_set_wowlan callback.
+ * (from user space, e.g. iw phy0 wowlan enable)
+ */
+static const struct wiphy_wowlan_support wowlan_stub = {
+ .flags = WIPHY_WOWLAN_ANY,
+ .n_patterns = 0,
+ .pattern_max_len = 0,
+ .pattern_min_len = 0,
+ .max_pkt_offset = 0,
+};
+
+static struct ieee80211_rate rtw_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define rtw_a_rates (rtw_rates + 4)
+#define RTW_A_RATES_NUM 8
+#define rtw_g_rates (rtw_rates + 0)
+#define RTW_G_RATES_NUM 12
+
+#define RTW_2G_CHANNELS_NUM 14
+#define RTW_5G_CHANNELS_NUM 37
+
+static struct ieee80211_channel rtw_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static void rtw_2g_channels_init(struct ieee80211_channel *channels)
+{
+ memcpy((void*)channels, (void*)rtw_2ghz_channels,
+ sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ );
+}
+
+static void rtw_2g_rates_init(struct ieee80211_rate *rates)
+{
+ memcpy(rates, rtw_g_rates,
+ sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM
+ );
+}
+
+static struct ieee80211_supported_band *rtw_spt_band_alloc(
+ enum nl80211_band band
+ )
+{
+ struct ieee80211_supported_band *spt_band = NULL;
+ int n_channels, n_bitrates;
+
+ if (band == NL80211_BAND_2GHZ)
+ {
+ n_channels = RTW_2G_CHANNELS_NUM;
+ n_bitrates = RTW_G_RATES_NUM;
+ }
+ else
+ {
+ goto exit;
+ }
+
+ spt_band = (struct ieee80211_supported_band *)rtw_zmalloc(
+ sizeof(struct ieee80211_supported_band)
+ + sizeof(struct ieee80211_channel)*n_channels
+ + sizeof(struct ieee80211_rate)*n_bitrates
+ );
+ if (!spt_band)
+ goto exit;
+
+ spt_band->channels = (struct ieee80211_channel*)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
+ spt_band->bitrates = (struct ieee80211_rate*)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
+ spt_band->band = band;
+ spt_band->n_channels = n_channels;
+ spt_band->n_bitrates = n_bitrates;
+
+ if (band == NL80211_BAND_2GHZ)
+ {
+ rtw_2g_channels_init(spt_band->channels);
+ rtw_2g_rates_init(spt_band->bitrates);
+ }
+
+ /* spt_band.ht_cap */
+
+exit:
+
+ return spt_band;
+}
+
+static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
+{
+ u32 size = 0;
+
+ if (!spt_band)
+ return;
+
+ if (spt_band->band == NL80211_BAND_2GHZ)
+ {
+ size = sizeof(struct ieee80211_supported_band)
+ + sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ + sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
+ }
+ kfree((u8 *)spt_band);
+}
+
+static const struct ieee80211_txrx_stypes
+rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_ADHOC] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_STATION] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_AP_VLAN] = {
+ /* copy AP */
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
+ },
+};
+
+static int rtw_ieee80211_channel_to_frequency(int chan, int band)
+{
+ /* see 802.11 17.3.8.3.2 and Annex J
+ * there are overlapping channel numbers in 5GHz and 2GHz bands */
+ if (band == NL80211_BAND_2GHZ) {
+ if (chan == 14)
+ return 2484;
+ else if (chan < 14)
+ return 2407 + chan * 5;
+ }
+
+ return 0; /* not supported */
+}
+
+static u64 rtw_get_systime_us(void)
+{
+ struct timespec ts;
+ get_monotonic_boottime(&ts);
+ return ((u64)ts.tv_sec*1000000) + ts.tv_nsec / 1000;
+}
+
+#define MAX_BSSINFO_LEN 1000
+struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ struct ieee80211_channel *notify_channel;
+ struct cfg80211_bss *bss = NULL;
+ /* struct ieee80211_supported_band *band; */
+ u16 channel;
+ u32 freq;
+ u64 notify_timestamp;
+ u16 notify_capability;
+ u16 notify_interval;
+ u8 *notify_ie;
+ size_t notify_ielen;
+ s32 notify_signal;
+ u8 *buf = NULL, *pbuf;
+ size_t len, bssinf_len = 0;
+ struct ieee80211_hdr *pwlanhdr;
+ __le16 *fctrl;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ struct wireless_dev *wdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+
+ /* DBG_8192C("%s\n", __func__); */
+
+ bssinf_len = pnetwork->network.IELength+sizeof (struct ieee80211_hdr_3addr);
+ if (bssinf_len > MAX_BSSINFO_LEN) {
+ DBG_871X("%s IE Length too long > %d byte\n", __func__, MAX_BSSINFO_LEN);
+ goto exit;
+ }
+
+ {
+ u16 wapi_len = 0;
+
+ if (rtw_get_wapi_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &wapi_len)>0)
+ {
+ if (wapi_len > 0)
+ {
+ DBG_871X("%s, no support wapi!\n", __func__);
+ goto exit;
+ }
+ }
+ }
+
+ /* To reduce PBC Overlap rate */
+ /* spin_lock_bh(&pwdev_priv->scan_req_lock); */
+ if (adapter_wdev_data(padapter)->scan_request != NULL)
+ {
+ u8 *psr = NULL, sr = 0;
+ struct ndis_802_11_ssid *pssid = &pnetwork->network.Ssid;
+ struct cfg80211_scan_request *request = adapter_wdev_data(padapter)->scan_request;
+ struct cfg80211_ssid *ssids = request->ssids;
+ u32 wpsielen = 0;
+ u8 *wpsie = NULL;
+
+ wpsie = rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wpsielen);
+
+ if (wpsie && wpsielen>0)
+ psr = rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
+
+ if (sr != 0)
+ {
+ if (request->n_ssids == 1 && request->n_channels == 1) /* it means under processing WPS */
+ {
+ DBG_8192C("ssid =%s, len =%d\n", pssid->Ssid, pssid->SsidLength);
+
+ if (ssids[0].ssid_len == 0) {
+ }
+ else if (pssid->SsidLength == ssids[0].ssid_len &&
+ !memcmp(pssid->Ssid, ssids[0].ssid, ssids[0].ssid_len))
+ {
+ DBG_871X("%s, got sr and ssid match!\n", __func__);
+ }
+ else
+ {
+ if (psr != NULL)
+ *psr = 0; /* clear sr */
+ }
+ }
+ }
+ }
+ /* spin_unlock_bh(&pwdev_priv->scan_req_lock); */
+
+
+ channel = pnetwork->network.Configuration.DSConfig;
+ freq = rtw_ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
+
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ notify_timestamp = rtw_get_systime_us();
+
+ notify_interval = le16_to_cpu(*(__le16 *)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
+ notify_capability = le16_to_cpu(*(__le16 *)rtw_get_capability_from_ie(pnetwork->network.IEs));
+
+ notify_ie = pnetwork->network.IEs+_FIXED_IE_LENGTH_;
+ notify_ielen = pnetwork->network.IELength-_FIXED_IE_LENGTH_;
+
+ /* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
+ notify_signal = 100*translate_percentage_to_dbm(padapter->recvpriv.signal_strength);/* dbm */
+ } else {
+ notify_signal = 100*translate_percentage_to_dbm(pnetwork->network.PhyInfo.SignalStrength);/* dbm */
+ }
+
+ buf = kzalloc(MAX_BSSINFO_LEN, GFP_ATOMIC);
+ if (!buf)
+ goto exit;
+ pbuf = buf;
+
+ pwlanhdr = (struct ieee80211_hdr *)pbuf;
+ fctrl = &(pwlanhdr->frame_control);
+ *(fctrl) = 0;
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+
+ if (pnetwork->network.Reserved[0] == 1) { /* WIFI_BEACON */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ SetFrameSubType(pbuf, WIFI_BEACON);
+ } else {
+ memcpy(pwlanhdr->addr1, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ SetFrameSubType(pbuf, WIFI_PROBERSP);
+ }
+
+ memcpy(pwlanhdr->addr2, pnetwork->network.MacAddress, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pnetwork->network.MacAddress, ETH_ALEN);
+
+
+ pbuf += sizeof(struct ieee80211_hdr_3addr);
+ len = sizeof (struct ieee80211_hdr_3addr);
+
+ memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
+ len += pnetwork->network.IELength;
+
+ *((__le64*)pbuf) = cpu_to_le64(notify_timestamp);
+
+ bss = cfg80211_inform_bss_frame(wiphy, notify_channel, (struct ieee80211_mgmt *)buf,
+ len, notify_signal, GFP_ATOMIC);
+
+ if (unlikely(!bss)) {
+ DBG_8192C(FUNC_ADPT_FMT" bss NULL\n", FUNC_ADPT_ARG(padapter));
+ goto exit;
+ }
+
+ cfg80211_put_bss(wiphy, bss);
+ kfree(buf);
+
+exit:
+ return bss;
+
+}
+
+/*
+ Check the given bss is valid by kernel API cfg80211_get_bss()
+ @padapter : the given adapter
+
+ return true if bss is valid, false for not found.
+*/
+int rtw_cfg80211_check_bss(struct adapter *padapter)
+{
+ struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
+ struct cfg80211_bss *bss = NULL;
+ struct ieee80211_channel *notify_channel = NULL;
+ u32 freq;
+
+ if (!(pnetwork) || !(padapter->rtw_wdev))
+ return false;
+
+ freq = rtw_ieee80211_channel_to_frequency(pnetwork->Configuration.DSConfig, NL80211_BAND_2GHZ);
+
+ notify_channel = ieee80211_get_channel(padapter->rtw_wdev->wiphy, freq);
+ bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
+ pnetwork->MacAddress, pnetwork->Ssid.Ssid,
+ pnetwork->Ssid.SsidLength,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+
+ cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
+
+ return (bss!= NULL);
+}
+
+void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = pwdev->wiphy;
+ int freq = (int)cur_network->network.Configuration.DSConfig;
+ struct ieee80211_channel *chan;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+ if (pwdev->iftype != NL80211_IFTYPE_ADHOC)
+ {
+ return;
+ }
+
+ if (!rtw_cfg80211_check_bss(padapter)) {
+ struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
+ struct wlan_network *scanned = pmlmepriv->cur_network_scanned;
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true)
+ {
+
+ memcpy(&cur_network->network, pnetwork, sizeof(struct wlan_bssid_ex));
+ if (!rtw_cfg80211_inform_bss(padapter, cur_network))
+ DBG_871X(FUNC_ADPT_FMT" inform fail !!\n", FUNC_ADPT_ARG(padapter));
+ else
+ DBG_871X(FUNC_ADPT_FMT" inform success !!\n", FUNC_ADPT_ARG(padapter));
+ }
+ else
+ {
+ if (scanned == NULL) {
+ rtw_warn_on(1);
+ return;
+ }
+ if (!memcmp(&(scanned->network.Ssid), &(pnetwork->Ssid), sizeof(struct ndis_802_11_ssid))
+ && !memcmp(scanned->network.MacAddress, pnetwork->MacAddress, sizeof(NDIS_802_11_MAC_ADDRESS))
+ ) {
+ if (!rtw_cfg80211_inform_bss(padapter, scanned)) {
+ DBG_871X(FUNC_ADPT_FMT" inform fail !!\n", FUNC_ADPT_ARG(padapter));
+ } else {
+ /* DBG_871X(FUNC_ADPT_FMT" inform success !!\n", FUNC_ADPT_ARG(padapter)); */
+ }
+ } else {
+ DBG_871X("scanned & pnetwork compare fail\n");
+ rtw_warn_on(1);
+ }
+ }
+
+ if (!rtw_cfg80211_check_bss(padapter))
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" BSS not found !!\n", FUNC_ADPT_ARG(padapter));
+ }
+ /* notify cfg80211 that device joined an IBSS */
+ chan = ieee80211_get_channel(wiphy, freq);
+ cfg80211_ibss_joined(padapter->pnetdev, cur_network->network.MacAddress, chan, GFP_ATOMIC);
+}
+
+void rtw_cfg80211_indicate_connect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+ if (pwdev->iftype != NL80211_IFTYPE_STATION
+ && pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT
+ ) {
+ return;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ return;
+
+ {
+ struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
+ struct wlan_network *scanned = pmlmepriv->cur_network_scanned;
+
+ /* DBG_871X(FUNC_ADPT_FMT" BSS not found\n", FUNC_ADPT_ARG(padapter)); */
+
+ if (scanned == NULL) {
+ rtw_warn_on(1);
+ goto check_bss;
+ }
+
+ if (!memcmp(scanned->network.MacAddress, pnetwork->MacAddress, sizeof(NDIS_802_11_MAC_ADDRESS))
+ && !memcmp(&(scanned->network.Ssid), &(pnetwork->Ssid), sizeof(struct ndis_802_11_ssid))
+ ) {
+ if (!rtw_cfg80211_inform_bss(padapter, scanned)) {
+ DBG_871X(FUNC_ADPT_FMT" inform fail !!\n", FUNC_ADPT_ARG(padapter));
+ } else {
+ /* DBG_871X(FUNC_ADPT_FMT" inform success !!\n", FUNC_ADPT_ARG(padapter)); */
+ }
+ } else {
+ DBG_871X("scanned: %s("MAC_FMT"), cur: %s("MAC_FMT")\n",
+ scanned->network.Ssid.Ssid, MAC_ARG(scanned->network.MacAddress),
+ pnetwork->Ssid.Ssid, MAC_ARG(pnetwork->MacAddress)
+ );
+ rtw_warn_on(1);
+ }
+ }
+
+check_bss:
+ if (!rtw_cfg80211_check_bss(padapter))
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" BSS not found !!\n", FUNC_ADPT_ARG(padapter));
+
+ if (rtw_to_roam(padapter) > 0) {
+ struct wiphy *wiphy = pwdev->wiphy;
+ struct ieee80211_channel *notify_channel;
+ u32 freq;
+ u16 channel = cur_network->network.Configuration.DSConfig;
+ struct cfg80211_roam_info roam_info = {};
+
+ freq = rtw_ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
+
+ notify_channel = ieee80211_get_channel(wiphy, freq);
+
+ DBG_871X(FUNC_ADPT_FMT" call cfg80211_roamed\n", FUNC_ADPT_ARG(padapter));
+ roam_info.channel = notify_channel;
+ roam_info.bssid = cur_network->network.MacAddress;
+ roam_info.req_ie =
+ pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2;
+ roam_info.req_ie_len =
+ pmlmepriv->assoc_req_len-sizeof(struct ieee80211_hdr_3addr)-2;
+ roam_info.resp_ie =
+ pmlmepriv->assoc_rsp+sizeof(struct ieee80211_hdr_3addr)+6;
+ roam_info.resp_ie_len =
+ pmlmepriv->assoc_rsp_len-sizeof(struct ieee80211_hdr_3addr)-6;
+ cfg80211_roamed(padapter->pnetdev, &roam_info, GFP_ATOMIC);
+ }
+ else
+ {
+ cfg80211_connect_result(padapter->pnetdev, cur_network->network.MacAddress
+ , pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2
+ , pmlmepriv->assoc_req_len-sizeof(struct ieee80211_hdr_3addr)-2
+ , pmlmepriv->assoc_rsp+sizeof(struct ieee80211_hdr_3addr)+6
+ , pmlmepriv->assoc_rsp_len-sizeof(struct ieee80211_hdr_3addr)-6
+ , WLAN_STATUS_SUCCESS, GFP_ATOMIC);
+ }
+}
+
+void rtw_cfg80211_indicate_disconnect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ if (pwdev->iftype != NL80211_IFTYPE_STATION
+ && pwdev->iftype != NL80211_IFTYPE_P2P_CLIENT
+ ) {
+ return;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ return;
+
+ if (!padapter->mlmepriv.not_indic_disco) {
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ cfg80211_disconnected(padapter->pnetdev, 0,
+ NULL, 0, true, GFP_ATOMIC);
+ } else {
+ cfg80211_connect_result(padapter->pnetdev, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC/*GFP_KERNEL*/);
+ }
+ }
+}
+
+
+static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len;
+ struct sta_info *psta = NULL, *pbcmc_sta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv* psecuritypriv =&(padapter->securitypriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_8192C("%s\n", __func__);
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ if (param->u.crypt.idx >= WEP_KEYS)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+ else
+ {
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (!psta)
+ {
+ /* ret = -EINVAL; */
+ DBG_8192C("rtw_set_encryption(), sta has already been removed or never been added\n");
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL))
+ {
+ /* todo:clear default encryption keys */
+
+ DBG_8192C("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
+
+ goto exit;
+ }
+
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL))
+ {
+ DBG_8192C("r871x_set_encryption, crypt.alg = WEP\n");
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ DBG_8192C("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len);
+
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0)
+ {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ }
+
+ if (psecuritypriv->bWepDefaultKeyIdxSet == 0)
+ {
+ /* wep default key has not been set, so use this key index as default key. */
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (wep_key_len == 13)
+ {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+ }
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), param->u.crypt.key, wep_key_len);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
+
+ rtw_ap_set_wep_key(padapter, param->u.crypt.key, wep_key_len, wep_key_idx, 1);
+
+ goto exit;
+
+ }
+
+
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */
+ {
+ if (param->u.crypt.set_tx == 0) /* group key */
+ {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ DBG_8192C("%s, set group_key, WEP\n", __func__);
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ DBG_8192C("%s, set group_key, TKIP\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ DBG_8192C("%s, set group_key, CCMP\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ }
+ else
+ {
+ DBG_8192C("%s, set group_key, none\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta)
+ {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+
+ }
+
+ goto exit;
+
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) /* psk/802_1x */
+ {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ {
+ if (param->u.crypt.set_tx == 1) /* pairwise key */
+ {
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ DBG_8192C("%s, set pairwise key, WEP\n", __func__);
+
+ psta->dot118021XPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psta->dot118021XPrivacy = _WEP104_;
+ }
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ DBG_8192C("%s, set pairwise key, TKIP\n", __func__);
+
+ psta->dot118021XPrivacy = _TKIP_;
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+
+ DBG_8192C("%s, set pairwise key, CCMP\n", __func__);
+
+ psta->dot118021XPrivacy = _AES_;
+ }
+ else
+ {
+ DBG_8192C("%s, set pairwise key, none\n", __func__);
+
+ psta->dot118021XPrivacy = _NO_PRIVACY_;
+ }
+
+ rtw_ap_set_pairwise_key(padapter, psta);
+
+ psta->ieee8021x_blocked = false;
+
+ psta->bpairwise_key_installed = true;
+
+ }
+ else/* group key??? */
+ {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ }
+ else
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta)
+ {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+
+ }
+
+ }
+
+ }
+
+exit:
+
+ return ret;
+
+}
+
+static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_8192C("%s\n", __func__);
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ if (param->u.crypt.idx >= WEP_KEYS
+ || param->u.crypt.idx >= BIP_MAX_KEYID
+ )
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("wpa_set_encryption, crypt.alg = WEP\n"));
+ DBG_8192C("wpa_set_encryption, crypt.alg = WEP\n");
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (psecuritypriv->bWepDefaultKeyIdxSet == 0)
+ {
+ /* wep default key has not been set, so use this key index as default key. */
+
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (wep_key_len == 13)
+ {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+ }
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), param->u.crypt.key, wep_key_len);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = wep_key_len;
+
+ rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
+
+ goto exit;
+ }
+
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */
+ {
+ struct sta_info * psta,*pbcmc_sta;
+ struct sta_priv * pstapriv = &padapter->stapriv;
+
+ /* DBG_8192C("%s, : dot11AuthAlgrthm == dot11AuthAlgrthm_8021X\n", __func__); */
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) /* sta mode */
+ {
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ /* DEBUG_ERR(("Set wpa_set_encryption: Obtain Sta_info fail\n")); */
+ DBG_8192C("%s, : Obtain Sta_info fail\n", __func__);
+ }
+ else
+ {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ psta->ieee8021x_blocked = false;
+
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ {
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+
+ if (param->u.crypt.set_tx == 1)/* pairwise key */
+ {
+
+ DBG_8192C("%s, : param->u.crypt.set_tx == 1\n", __func__);
+
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */
+ {
+ /* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ padapter->securitypriv.busetkipkey =false;
+ /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
+ }
+
+ /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ DBG_871X(" ~~~~set sta key:unicastkey\n");
+
+ rtw_setstakey_cmd(padapter, psta, true, true);
+ }
+ else/* group key */
+ {
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8);
+ padapter->securitypriv.binstallGrpkey = true;
+ /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ DBG_871X(" ~~~~set sta key:groupkey\n");
+
+ padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
+ rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true);
+ }
+ else if (strcmp(param->u.crypt.alg, "BIP") == 0)
+ {
+ /* DBG_871X("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */
+ /* save the IGTK key, length 16 bytes */
+ memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ /*DBG_871X("IGTK key below:\n");
+ for (no = 0;no<16;no++)
+ printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]);
+ DBG_871X("\n");*/
+ padapter->securitypriv.dot11wBIPKeyid = param->u.crypt.idx;
+ padapter->securitypriv.binstallBIPkey = true;
+ DBG_871X(" ~~~~set sta key:IGKT\n");
+ }
+ }
+ }
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta == NULL)
+ {
+ /* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
+ }
+ else
+ {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ pbcmc_sta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ {
+ pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+ }
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) /* adhoc mode */
+ {
+ }
+ }
+
+exit:
+
+ DBG_8192C("%s, ret =%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
+{
+ char *alg_name;
+ u32 param_len;
+ struct ieee_param *param = NULL;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_871X(FUNC_NDEV_FMT" adding key for %pM\n", FUNC_NDEV_ARG(ndev), mac_addr);
+ DBG_871X("cipher = 0x%x\n", params->cipher);
+ DBG_871X("key_len = 0x%x\n", params->key_len);
+ DBG_871X("seq_len = 0x%x\n", params->seq_len);
+ DBG_871X("key_index =%d\n", key_index);
+ DBG_871X("pairwise =%d\n", pairwise);
+
+ param_len = sizeof(struct ieee_param) + params->key_len;
+ param = (struct ieee_param *)rtw_malloc(param_len);
+ if (param == NULL)
+ return -1;
+
+ memset(param, 0, param_len);
+
+ param->cmd = IEEE_CMD_SET_ENCRYPTION;
+ memset(param->sta_addr, 0xff, ETH_ALEN);
+
+ switch (params->cipher) {
+ case IW_AUTH_CIPHER_NONE:
+ /* todo: remove key */
+ /* remove = 1; */
+ alg_name = "none";
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ alg_name = "WEP";
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ alg_name = "TKIP";
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ alg_name = "CCMP";
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ alg_name = "BIP";
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto addkey_end;
+ }
+
+ strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+
+
+ if (!mac_addr || is_broadcast_ether_addr(mac_addr))
+ {
+ param->u.crypt.set_tx = 0; /* for wpa/wpa2 group key */
+ } else {
+ param->u.crypt.set_tx = 1; /* for wpa/wpa2 pairwise key */
+ }
+
+ param->u.crypt.idx = key_index;
+
+ if (params->seq_len && params->seq)
+ {
+ memcpy(param->u.crypt.seq, (u8 *)params->seq, params->seq_len);
+ }
+
+ if (params->key_len && params->key)
+ {
+ param->u.crypt.key_len = params->key_len;
+ memcpy(param->u.crypt.key, (u8 *)params->key, params->key_len);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ {
+ ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ {
+ if (mac_addr)
+ memcpy(param->sta_addr, (void*)mac_addr, ETH_ALEN);
+
+ ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
+ {
+ /* DBG_8192C("@@@@@@@@@@ fw_state = 0x%x, iftype =%d\n", pmlmepriv->fw_state, rtw_wdev->iftype); */
+ ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
+ }
+ else
+ {
+ DBG_8192C("error!\n");
+
+ }
+
+addkey_end:
+ kfree((u8 *)param);
+
+ return ret;
+
+}
+
+static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
+ void (*callback)(void *cookie,
+ struct key_params*))
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise, const u8 *mac_addr)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_871X(FUNC_NDEV_FMT" key_index =%d\n", FUNC_NDEV_ARG(ndev), key_index);
+
+ if (key_index == psecuritypriv->dot11PrivacyKeyIndex)
+ {
+ /* clear the flag of wep default key set. */
+ psecuritypriv->bWepDefaultKeyIdxSet = 0;
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev, u8 key_index
+ , bool unicast, bool multicast
+ )
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_871X(FUNC_NDEV_FMT" key_index =%d, unicast =%d, multicast =%d\n",
+ FUNC_NDEV_ARG(ndev), key_index, unicast, multicast);
+
+ if ((key_index < WEP_KEYS) && ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_))) /* set wep default key */
+ {
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ psecuritypriv->dot11PrivacyKeyIndex = key_index;
+
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (psecuritypriv->dot11DefKeylen[key_index] == 13)
+ {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->bWepDefaultKeyIdxSet = 1; /* set the flag to represent that wep default key has been set */
+ }
+
+ return 0;
+
+}
+
+static int cfg80211_rtw_get_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *mac,
+ struct station_info *sinfo)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ sinfo->filled = 0;
+
+ if (!mac) {
+ DBG_871X(FUNC_NDEV_FMT" mac ==%p\n", FUNC_NDEV_ARG(ndev), mac);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, (u8 *)mac);
+ if (psta == NULL) {
+ DBG_8192C("%s, sta_info is null\n", __func__);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+#ifdef DEBUG_CFG80211
+ DBG_871X(FUNC_NDEV_FMT" mac ="MAC_FMT"\n", FUNC_NDEV_ARG(ndev), MAC_ARG(mac));
+#endif
+
+ /* for infra./P2PClient mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
+ && check_fwstate(pmlmepriv, _FW_LINKED)
+ )
+ {
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+
+ if (memcmp((u8 *)mac, cur_network->network.MacAddress, ETH_ALEN)) {
+ DBG_871X("%s, mismatch bssid ="MAC_FMT"\n", __func__, MAC_ARG(cur_network->network.MacAddress));
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->txrate.legacy = rtw_get_cur_max_rate(padapter);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
+ sinfo->rx_packets = sta_rx_data_pkts(psta);
+
+ sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
+ sinfo->tx_packets = psta->sta_stats.tx_pkts;
+
+ }
+
+ /* for Ad-Hoc/AP mode */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)
+ ||check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
+ ||check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ && check_fwstate(pmlmepriv, _FW_LINKED)
+ )
+ {
+ /* TODO: should acquire station info... */
+ }
+
+exit:
+ return ret;
+}
+
+extern int netdev_open(struct net_device *pnetdev);
+
+static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ enum nl80211_iftype old_type;
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct wireless_dev *rtw_wdev = padapter->rtw_wdev;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int ret = 0;
+ u8 change = false;
+
+ DBG_871X(FUNC_NDEV_FMT" type =%d\n", FUNC_NDEV_ARG(ndev), type);
+
+ if (adapter_to_dvobj(padapter)->processing_dev_remove == true)
+ {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ {
+ DBG_871X(FUNC_NDEV_FMT" call netdev_open\n", FUNC_NDEV_ARG(ndev));
+ if (netdev_open(ndev) != 0) {
+ DBG_871X(FUNC_NDEV_FMT" call netdev_open fail\n", FUNC_NDEV_ARG(ndev));
+ ret = -EPERM;
+ goto exit;
+ }
+ }
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ DBG_871X(FUNC_NDEV_FMT" call rtw_pwr_wakeup fail\n", FUNC_NDEV_ARG(ndev));
+ ret = -EPERM;
+ goto exit;
+ }
+
+ old_type = rtw_wdev->iftype;
+ DBG_871X(FUNC_NDEV_FMT" old_iftype =%d, new_iftype =%d\n",
+ FUNC_NDEV_ARG(ndev), old_type, type);
+
+ if (old_type != type)
+ {
+ change = true;
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ networkType = Ndis802_11IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ networkType = Ndis802_11Infrastructure;
+ break;
+ case NL80211_IFTYPE_AP:
+ networkType = Ndis802_11APMode;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ rtw_wdev->iftype = type;
+
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false)
+ {
+ rtw_wdev->iftype = old_type;
+ ret = -EPERM;
+ goto exit;
+ }
+
+ rtw_setopmode_cmd(padapter, networkType, true);
+
+exit:
+
+ DBG_871X(FUNC_NDEV_FMT" ret:%d\n", FUNC_NDEV_ARG(ndev), ret);
+ return ret;
+}
+
+void rtw_cfg80211_indicate_scan_done(struct adapter *adapter, bool aborted)
+{
+ struct rtw_wdev_priv *pwdev_priv = adapter_wdev_data(adapter);
+ struct cfg80211_scan_info info = {
+ .aborted = aborted
+ };
+
+ spin_lock_bh(&pwdev_priv->scan_req_lock);
+ if (pwdev_priv->scan_request != NULL) {
+ #ifdef DEBUG_CFG80211
+ DBG_871X("%s with scan req\n", __func__);
+ #endif
+
+ /* avoid WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); */
+ if (pwdev_priv->scan_request->wiphy != pwdev_priv->rtw_wdev->wiphy)
+ {
+ DBG_8192C("error wiphy compare\n");
+ }
+ else
+ {
+ cfg80211_scan_done(pwdev_priv->scan_request, &info);
+ }
+
+ pwdev_priv->scan_request = NULL;
+ } else {
+ #ifdef DEBUG_CFG80211
+ DBG_871X("%s without scan req\n", __func__);
+ #endif
+ }
+ spin_unlock_bh(&pwdev_priv->scan_req_lock);
+}
+
+void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = pwdev->wiphy;
+ struct cfg80211_bss *bss = NULL;
+ struct wlan_bssid_ex select_network = pnetwork->network;
+
+ bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
+ select_network.MacAddress, select_network.Ssid.Ssid,
+ select_network.Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
+ 0/*WLAN_CAPABILITY_ESS*/);
+
+ if (bss) {
+ cfg80211_unlink_bss(wiphy, bss);
+ DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__, select_network.Ssid.Ssid);
+ cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
+ }
+ return;
+}
+
+void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
+{
+ struct list_head *plist, *phead;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+
+#ifdef DEBUG_CFG80211
+ DBG_8192C("%s\n", __func__);
+#endif
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1)
+ {
+ if (phead == plist)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* report network only if the current channel set contains the channel to which this network belongs */
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0
+ && rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true
+ && true == rtw_validate_ssid(&(pnetwork->network.Ssid))
+ )
+ {
+ /* ev =translate_scan(padapter, a, pnetwork, ev, stop); */
+ rtw_cfg80211_inform_bss(padapter, pnetwork);
+ }
+
+ plist = get_next(plist);
+
+ }
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+}
+
+static int rtw_cfg80211_set_probe_req_wpsp2pie(struct adapter *padapter, char *buf, int len)
+{
+ int ret = 0;
+ uint wps_ielen = 0;
+ u8 *wps_ie;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+#ifdef DEBUG_CFG80211
+ DBG_8192C("%s, ielen =%d\n", __func__, len);
+#endif
+
+ if (len>0)
+ {
+ if ((wps_ie = rtw_get_wps_ie(buf, len, NULL, &wps_ielen)))
+ {
+ #ifdef DEBUG_CFG80211
+ DBG_8192C("probe_req_wps_ielen =%d\n", wps_ielen);
+ #endif
+
+ if (pmlmepriv->wps_probe_req_ie)
+ {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ pmlmepriv->wps_probe_req_ie = rtw_malloc(wps_ielen);
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ DBG_8192C("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+
+ }
+ memcpy(pmlmepriv->wps_probe_req_ie, wps_ie, wps_ielen);
+ pmlmepriv->wps_probe_req_ie_len = wps_ielen;
+ }
+ }
+
+ return ret;
+
+}
+
+static int cfg80211_rtw_scan(struct wiphy *wiphy
+ , struct cfg80211_scan_request *request)
+{
+ struct net_device *ndev = wdev_to_ndev(request->wdev);
+ int i;
+ u8 _status = false;
+ int ret = 0;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+ u8 survey_times =3;
+ u8 survey_times_for_one_ch =6;
+ struct cfg80211_ssid *ssids = request->ssids;
+ int j = 0;
+ bool need_indicate_scan_done = false;
+
+ struct adapter *padapter;
+ struct rtw_wdev_priv *pwdev_priv;
+ struct mlme_priv *pmlmepriv;
+
+ if (ndev == NULL) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ pwdev_priv = adapter_wdev_data(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+
+/* ifdef DEBUG_CFG80211 */
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+/* endif */
+
+ spin_lock_bh(&pwdev_priv->scan_req_lock);
+ pwdev_priv->scan_request = request;
+ spin_unlock_bh(&pwdev_priv->scan_req_lock);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ {
+#ifdef DEBUG_CFG80211
+ DBG_871X("%s under WIFI_AP_STATE\n", __func__);
+#endif
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
+ {
+ DBG_8192C("%s, fwstate = 0x%x\n", __func__, pmlmepriv->fw_state);
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS))
+ {
+ DBG_8192C("AP mode process WPS\n");
+ }
+
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+ }
+
+ rtw_ps_deny(padapter, PS_DENY_SCAN);
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+
+ if (request->ie && request->ie_len>0)
+ {
+ rtw_cfg80211_set_probe_req_wpsp2pie(padapter, (u8 *)request->ie, request->ie_len);
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ DBG_8192C("%s, fwstate = 0x%x\n", __func__, pmlmepriv->fw_state);
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ DBG_8192C("%s, fwstate = 0x%x\n", __func__, pmlmepriv->fw_state);
+ ret = -EBUSY;
+ goto check_need_indicate_scan_done;
+ }
+
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true)
+ {
+ static unsigned long lastscantime = 0;
+ unsigned long passtime;
+
+ passtime = jiffies_to_msecs(jiffies - lastscantime);
+ lastscantime = jiffies;
+ if (passtime > 12000)
+ {
+ DBG_871X("%s: bBusyTraffic == true\n", __func__);
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+ }
+
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_871X(FUNC_ADPT_FMT ": scan deny\n", FUNC_ADPT_ARG(padapter));
+ need_indicate_scan_done = true;
+ goto check_need_indicate_scan_done;
+ }
+
+ memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+ /* parsing request ssids, n_ssids */
+ for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
+ #ifdef DEBUG_CFG80211
+ DBG_8192C("ssid =%s, len =%d\n", ssids[i].ssid, ssids[i].ssid_len);
+ #endif
+ memcpy(ssid[i].Ssid, ssids[i].ssid, ssids[i].ssid_len);
+ ssid[i].SsidLength = ssids[i].ssid_len;
+ }
+
+ /* parsing channels, n_channels */
+ memset(ch, 0, sizeof(struct rtw_ieee80211_channel)*RTW_CHANNEL_SCAN_AMOUNT);
+ for (i = 0;i<request->n_channels && i<RTW_CHANNEL_SCAN_AMOUNT;i++) {
+ #ifdef DEBUG_CFG80211
+ DBG_871X(FUNC_ADPT_FMT CHAN_FMT"\n", FUNC_ADPT_ARG(padapter), CHAN_ARG(request->channels[i]));
+ #endif
+ ch[i].hw_value = request->channels[i]->hw_value;
+ ch[i].flags = request->channels[i]->flags;
+ }
+
+ spin_lock_bh(&pmlmepriv->lock);
+ if (request->n_channels == 1) {
+ for (i = 1;i<survey_times_for_one_ch;i++)
+ memcpy(&ch[i], &ch[0], sizeof(struct rtw_ieee80211_channel));
+ _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times_for_one_ch);
+ } else if (request->n_channels <= 4) {
+ for (j =request->n_channels-1;j>= 0;j--)
+ for (i = 0;i<survey_times;i++)
+ {
+ memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
+ }
+ _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * request->n_channels);
+ } else {
+ _status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0);
+ }
+ spin_unlock_bh(&pmlmepriv->lock);
+
+
+ if (_status == false)
+ {
+ ret = -1;
+ }
+
+check_need_indicate_scan_done:
+ if (true == need_indicate_scan_done)
+ {
+ rtw_cfg80211_surveydone_event_callback(padapter);
+ rtw_cfg80211_indicate_scan_done(padapter, false);
+ }
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_SCAN);
+
+exit:
+ return ret;
+
+}
+
+static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ DBG_8192C("%s\n", __func__);
+ return 0;
+}
+
+
+
+static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32 wpa_version)
+{
+ DBG_8192C("%s, wpa_version =%d\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ return 0;
+ }
+
+
+ if (wpa_version & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
+ {
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPAPSK;
+ }
+
+ return 0;
+
+}
+
+static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
+ enum nl80211_auth_type sme_auth_type)
+{
+ DBG_8192C("%s, nl80211_auth_type =%d\n", __func__, sme_auth_type);
+
+
+ switch (sme_auth_type) {
+ case NL80211_AUTHTYPE_AUTOMATIC:
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+
+ break;
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+
+ if (psecuritypriv->ndisauthtype>Ndis802_11AuthModeWPA)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+
+ break;
+ default:
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ /* return -ENOTSUPP; */
+ }
+
+ return 0;
+
+}
+
+static int rtw_cfg80211_set_cipher(struct security_priv *psecuritypriv, u32 cipher, bool ucast)
+{
+ u32 ndisencryptstatus = Ndis802_11EncryptionDisabled;
+
+ u32 *profile_cipher = ucast ? &psecuritypriv->dot11PrivacyAlgrthm :
+ &psecuritypriv->dot118021XGrpPrivacy;
+
+ DBG_8192C("%s, ucast =%d, cipher = 0x%x\n", __func__, ucast, cipher);
+
+
+ if (!cipher) {
+ *profile_cipher = _NO_PRIVACY_;
+ psecuritypriv->ndisencryptstatus = ndisencryptstatus;
+ return 0;
+ }
+
+ switch (cipher) {
+ case IW_AUTH_CIPHER_NONE:
+ *profile_cipher = _NO_PRIVACY_;
+ ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *profile_cipher = _WEP40_;
+ ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *profile_cipher = _WEP104_;
+ ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *profile_cipher = _TKIP_;
+ ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *profile_cipher = _AES_;
+ ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ default:
+ DBG_8192C("Unsupported cipher: 0x%x\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ if (ucast)
+ {
+ psecuritypriv->ndisencryptstatus = ndisencryptstatus;
+
+ /* if (psecuritypriv->dot11PrivacyAlgrthm >= _AES_) */
+ /* psecuritypriv->ndisauthtype = Ndis802_11AuthModeWPA2PSK; */
+ }
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv, u32 key_mgt)
+{
+ DBG_8192C("%s, key_mgt = 0x%x\n", __func__, key_mgt);
+
+ if (key_mgt == WLAN_AKM_SUITE_8021X)
+ /* auth_type = UMAC_AUTH_TYPE_8021X; */
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ else if (key_mgt == WLAN_AKM_SUITE_PSK) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ }
+ else {
+ DBG_8192C("Invalid key mgt: 0x%x\n", key_mgt);
+ /* return -EINVAL; */
+ }
+
+ return 0;
+}
+
+static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t ielen)
+{
+ u8 *buf = NULL, *pos = NULL;
+ int group_cipher = 0, pairwise_cipher = 0;
+ int ret = 0;
+ int wpa_ielen = 0;
+ int wpa2_ielen = 0;
+ u8 *pwpa, *pwpa2;
+ u8 null_addr[]= {0, 0, 0, 0, 0, 0};
+
+ if (pie == NULL || !ielen) {
+ /* Treat this as normal case, but need to clear WIFI_UNDER_WPS */
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ goto exit;
+ }
+
+ if (ielen > MAX_WPA_IE_LEN+MAX_WPS_IE_LEN+MAX_P2P_IE_LEN) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ buf = rtw_zmalloc(ielen);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(buf, pie , ielen);
+
+ /* dump */
+ {
+ int i;
+ DBG_8192C("set wpa_ie(length:%zu):\n", ielen);
+ for (i = 0;i<ielen;i =i+8)
+ DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+ }
+
+ pos = buf;
+ if (ielen < RSN_HEADER_LEN) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
+ ret = -1;
+ goto exit;
+ }
+
+ pwpa = rtw_get_wpa_ie(buf, &wpa_ielen, ielen);
+ if (pwpa && wpa_ielen>0)
+ {
+ if (rtw_parse_wpa_ie(pwpa, wpa_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
+ {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen+2);
+
+ DBG_8192C("got wpa_ie, wpa_ielen:%u\n", wpa_ielen);
+ }
+ }
+
+ pwpa2 = rtw_get_wpa2_ie(buf, &wpa2_ielen, ielen);
+ if (pwpa2 && wpa2_ielen>0)
+ {
+ if (rtw_parse_wpa2_ie(pwpa2, wpa2_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
+ {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen+2);
+
+ DBG_8192C("got wpa2_ie, wpa2_ielen:%u\n", wpa2_ielen);
+ }
+ }
+
+ if (group_cipher == 0)
+ {
+ group_cipher = WPA_CIPHER_NONE;
+ }
+ if (pairwise_cipher == 0)
+ {
+ pairwise_cipher = WPA_CIPHER_NONE;
+ }
+
+ switch (group_cipher)
+ {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ switch (pairwise_cipher)
+ {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ {/* handle wps_ie */
+ uint wps_ielen;
+ u8 *wps_ie;
+
+ wps_ie = rtw_get_wps_ie(buf, ielen, NULL, &wps_ielen);
+ if (wps_ie && wps_ielen > 0) {
+ DBG_8192C("got wps_ie, wps_ielen:%u\n", wps_ielen);
+ padapter->securitypriv.wps_ie_len = wps_ielen<MAX_WPS_IE_LEN?wps_ielen:MAX_WPS_IE_LEN;
+ memcpy(padapter->securitypriv.wps_ie, wps_ie, padapter->securitypriv.wps_ie_len);
+ set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ } else {
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ }
+ }
+
+ /* TKIP and AES disallow multicast packets until installing group key */
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_
+ || padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_
+ || padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ /* WPS open need to enable multicast */
+ /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true) */
+ rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
+ pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
+
+exit:
+ kfree(buf);
+ if (ret)
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ return ret;
+}
+
+static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ibss_params *params)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct ndis_802_11_ssid ndis_ssid;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int ret = 0;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!params->ssid || !params->ssid_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (params->ssid_len > IW_ESSID_MAX_SIZE) {
+
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ ndis_ssid.SsidLength = params->ssid_len;
+ memcpy(ndis_ssid.Ssid, (u8 *)params->ssid, params->ssid_len);
+
+ /* DBG_8192C("ssid =%s, len =%zu\n", ndis_ssid.Ssid, params->ssid_len); */
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+
+ ret = rtw_cfg80211_set_auth_type(psecuritypriv, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ rtw_set_802_11_authentication_mode(padapter, psecuritypriv->ndisauthtype);
+
+ if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == false)
+ {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int cfg80211_rtw_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct wireless_dev *rtw_wdev = padapter->rtw_wdev;
+ enum nl80211_iftype old_type;
+ int ret = 0;
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ padapter->mlmepriv.not_indic_disco = true;
+
+ old_type = rtw_wdev->iftype;
+
+ rtw_set_to_roam(padapter, 0);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED))
+ {
+ rtw_scan_abort(padapter);
+ LeaveAllPowerSaveMode(padapter);
+
+ rtw_wdev->iftype = NL80211_IFTYPE_STATION;
+
+ if (rtw_set_802_11_infrastructure_mode(padapter, Ndis802_11Infrastructure) ==false)
+ {
+ rtw_wdev->iftype = old_type;
+ ret = -EPERM;
+ goto leave_ibss;
+ }
+ rtw_setopmode_cmd(padapter, Ndis802_11Infrastructure, true);
+ }
+
+leave_ibss:
+ padapter->mlmepriv.not_indic_disco = false;
+
+ return 0;
+}
+
+static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_connect_params *sme)
+{
+ int ret = 0;
+ enum NDIS_802_11_AUTHENTICATION_MODE authmode;
+ struct ndis_802_11_ssid ndis_ssid;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ padapter->mlmepriv.not_indic_disco = true;
+
+ DBG_871X("=>"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ DBG_871X("privacy =%d, key =%p, key_len =%d, key_idx =%d\n",
+ sme->privacy, sme->key, sme->key_len, sme->key_idx);
+
+
+ if (adapter_wdev_data(padapter)->block == true)
+ {
+ ret = -EBUSY;
+ DBG_871X("%s wdev_priv.block is set\n", __func__);
+ goto exit;
+ }
+
+ rtw_ps_deny(padapter, PS_DENY_JOIN);
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!sme->ssid || !sme->ssid_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (sme->ssid_len > IW_ESSID_MAX_SIZE) {
+
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ ndis_ssid.SsidLength = sme->ssid_len;
+ memcpy(ndis_ssid.Ssid, (u8 *)sme->ssid, sme->ssid_len);
+
+ DBG_8192C("ssid =%s, len =%zu\n", ndis_ssid.Ssid, sme->ssid_len);
+
+
+ if (sme->bssid)
+ DBG_8192C("bssid ="MAC_FMT"\n", MAC_ARG(sme->bssid));
+
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ ret = -EBUSY;
+ DBG_8192C("%s, fw_state = 0x%x, goto exit\n", __func__, pmlmepriv->fw_state);
+ goto exit;
+ }
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ rtw_scan_abort(padapter);
+ }
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+
+ ret = rtw_cfg80211_set_wpa_version(psecuritypriv, sme->crypto.wpa_versions);
+ if (ret < 0)
+ goto exit;
+
+ ret = rtw_cfg80211_set_auth_type(psecuritypriv, sme->auth_type);
+
+ if (ret < 0)
+ goto exit;
+
+ DBG_8192C("%s, ie_len =%zu\n", __func__, sme->ie_len);
+
+ ret = rtw_cfg80211_set_wpa_ie(padapter, (u8 *)sme->ie, sme->ie_len);
+ if (ret < 0)
+ goto exit;
+
+ if (sme->crypto.n_ciphers_pairwise) {
+ ret = rtw_cfg80211_set_cipher(psecuritypriv, sme->crypto.ciphers_pairwise[0], true);
+ if (ret < 0)
+ goto exit;
+ }
+
+ /* For WEP Shared auth */
+ if ((psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Shared
+ || psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) && sme->key
+ )
+ {
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ DBG_871X("%s(): Shared/Auto WEP\n", __func__);
+
+ wep_key_idx = sme->key_idx;
+ wep_key_len = sme->key_len;
+
+ if (sme->key_idx > WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0)
+ {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ DBG_871X(" wpa_set_encryption: pwep allocate fail !!!\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+
+ if (wep_key_len == 13)
+ {
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ }
+ }
+ else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+ pwep->KeyIndex |= 0x80000000;
+
+ memcpy(pwep->KeyMaterial, (void *)sme->key, pwep->KeyLength);
+
+ if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
+ {
+ ret = -EOPNOTSUPP ;
+ }
+
+ kfree((u8 *)pwep);
+
+ if (ret < 0)
+ goto exit;
+ }
+
+ ret = rtw_cfg80211_set_cipher(psecuritypriv, sme->crypto.cipher_group, false);
+ if (ret < 0)
+ return ret;
+
+ if (sme->crypto.n_akm_suites) {
+ ret = rtw_cfg80211_set_key_mgt(psecuritypriv, sme->crypto.akm_suites[0]);
+ if (ret < 0)
+ goto exit;
+ }
+
+ authmode = psecuritypriv->ndisauthtype;
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+
+ /* rtw_set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
+
+ if (rtw_set_802_11_connect(padapter, (u8 *)sme->bssid, &ndis_ssid) == false) {
+ ret = -1;
+ goto exit;
+ }
+
+ DBG_8192C("set ssid:dot11AuthAlgrthm =%d, dot11PrivacyAlgrthm =%d, dot118021XGrpPrivacy =%d\n", psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm, psecuritypriv->dot118021XGrpPrivacy);
+
+exit:
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_JOIN);
+
+ DBG_8192C("<=%s, ret %d\n", __func__, ret);
+
+ padapter->mlmepriv.not_indic_disco = false;
+
+ return ret;
+}
+
+static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
+ u16 reason_code)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ padapter->mlmepriv.not_indic_disco = true;
+
+ rtw_set_to_roam(padapter, 0);
+
+ rtw_scan_abort(padapter);
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+
+ DBG_871X("%s...call rtw_indicate_disconnect\n", __func__);
+
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+ rtw_pwr_wakeup(padapter);
+
+ padapter->mlmepriv.not_indic_disco = false;
+
+ DBG_871X(FUNC_NDEV_FMT" return 0\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ DBG_8192C("%s\n", __func__);
+ return 0;
+}
+
+static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ int *dbm)
+{
+ DBG_8192C("%s\n", __func__);
+
+ *dbm = (12);
+
+ return 0;
+}
+
+inline bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter)
+{
+ struct rtw_wdev_priv *rtw_wdev_priv = adapter_wdev_data(adapter);
+ return rtw_wdev_priv->power_mgmt;
+}
+
+static int cfg80211_rtw_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *ndev,
+ bool enabled, int timeout)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct rtw_wdev_priv *rtw_wdev_priv = adapter_wdev_data(padapter);
+
+ DBG_871X(FUNC_NDEV_FMT" enabled:%u, timeout:%d\n", FUNC_NDEV_ARG(ndev),
+ enabled, timeout);
+
+ rtw_wdev_priv->power_mgmt = enabled;
+
+ if (!enabled)
+ LPS_Leave(padapter, "CFG80211_PWRMGMT");
+
+ return 0;
+}
+
+static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_pmksa *pmksa)
+{
+ u8 index, blInserted = false;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 };
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ if (!memcmp((u8 *)pmksa->bssid, strZeroMacAddress, ETH_ALEN))
+ {
+ return -EINVAL;
+ }
+
+ blInserted = false;
+
+ /* overwrite PMKID */
+ for (index = 0 ; index<NUM_PMKID_CACHE; index++)
+ {
+ if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN))
+ { /* BSSID is matched, the same AP => rewrite with new PMKID. */
+ DBG_871X(FUNC_NDEV_FMT" BSSID exists in the PMKList.\n", FUNC_NDEV_ARG(ndev));
+
+ memcpy(psecuritypriv->PMKIDList[index].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
+ psecuritypriv->PMKIDList[index].bUsed = true;
+ psecuritypriv->PMKIDIndex = index+1;
+ blInserted = true;
+ break;
+ }
+ }
+
+ if (!blInserted)
+ {
+ /* Find a new entry */
+ DBG_871X(FUNC_NDEV_FMT" Use the new entry index = %d for this PMKID.\n",
+ FUNC_NDEV_ARG(ndev), psecuritypriv->PMKIDIndex);
+
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, (u8 *)pmksa->bssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
+
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
+ psecuritypriv->PMKIDIndex++ ;
+ if (psecuritypriv->PMKIDIndex == 16)
+ {
+ psecuritypriv->PMKIDIndex = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_del_pmksa(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_pmksa *pmksa)
+{
+ u8 index, bMatched = false;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ for (index = 0 ; index<NUM_PMKID_CACHE; index++)
+ {
+ if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN))
+ { /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
+ memset(psecuritypriv->PMKIDList[index].Bssid, 0x00, ETH_ALEN);
+ memset(psecuritypriv->PMKIDList[index].PMKID, 0x00, WLAN_PMKID_LEN);
+ psecuritypriv->PMKIDList[index].bUsed = false;
+ bMatched = true;
+ break;
+ }
+ }
+
+ if (false == bMatched)
+ {
+ DBG_871X(FUNC_NDEV_FMT" do not have matched BSSID\n"
+ , FUNC_NDEV_ARG(ndev));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cfg80211_rtw_flush_pmksa(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ psecuritypriv->PMKIDIndex = 0;
+
+ return 0;
+}
+
+void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame, uint frame_len)
+{
+ struct net_device *ndev = padapter->pnetdev;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ {
+ struct station_info sinfo;
+ u8 ie_offset;
+ if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ else /* WIFI_REASSOCREQ */
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+
+ sinfo.filled = 0;
+ sinfo.assoc_req_ies = pmgmt_frame + WLAN_HDR_A3_LEN + ie_offset;
+ sinfo.assoc_req_ies_len = frame_len - WLAN_HDR_A3_LEN - ie_offset;
+ cfg80211_new_sta(ndev, GetAddr2Ptr(pmgmt_frame), &sinfo, GFP_ATOMIC);
+ }
+}
+
+void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter, unsigned char *da, unsigned short reason)
+{
+ struct net_device *ndev = padapter->pnetdev;
+
+ DBG_871X(FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+
+ cfg80211_del_sta(ndev, da, GFP_ATOMIC);
+}
+
+static int rtw_cfg80211_monitor_if_open(struct net_device *ndev)
+{
+ int ret = 0;
+
+ DBG_8192C("%s\n", __func__);
+
+ return ret;
+}
+
+static int rtw_cfg80211_monitor_if_close(struct net_device *ndev)
+{
+ int ret = 0;
+
+ DBG_8192C("%s\n", __func__);
+
+ return ret;
+}
+
+static int rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret = 0;
+ int rtap_len;
+ int qos_len = 0;
+ int dot11_hdr_len = 24;
+ int snap_len = 6;
+ unsigned char *pdata;
+ u16 frame_control;
+ unsigned char src_mac_addr[6];
+ unsigned char dst_mac_addr[6];
+ struct ieee80211_hdr *dot11_hdr;
+ struct ieee80211_radiotap_header *rtap_hdr;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ if (!skb)
+ goto fail;
+
+ rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, skb->truesize);
+
+ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
+ goto fail;
+
+ rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
+ if (unlikely(rtap_hdr->it_version))
+ goto fail;
+
+ rtap_len = ieee80211_get_radiotap_len(skb->data);
+ if (unlikely(skb->len < rtap_len))
+ goto fail;
+
+ if (rtap_len != 14)
+ {
+ DBG_8192C("radiotap len (should be 14): %d\n", rtap_len);
+ goto fail;
+ }
+
+ /* Skip the ratio tap header */
+ skb_pull(skb, rtap_len);
+
+ dot11_hdr = (struct ieee80211_hdr *)skb->data;
+ frame_control = le16_to_cpu(dot11_hdr->frame_control);
+ /* Check if the QoS bit is set */
+ if ((frame_control & RTW_IEEE80211_FCTL_FTYPE) == RTW_IEEE80211_FTYPE_DATA) {
+ /* Check if this ia a Wireless Distribution System (WDS) frame
+ * which has 4 MAC addresses
+ */
+ if (frame_control & 0x0080)
+ qos_len = 2;
+ if ((frame_control & 0x0300) == 0x0300)
+ dot11_hdr_len += 6;
+
+ memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr));
+ memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
+
+ /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
+ * for two MAC addresses
+ */
+ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
+ pdata = (unsigned char*)skb->data;
+ memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
+ memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
+
+ DBG_8192C("should be eapol packet\n");
+
+ /* Use the real net device to transmit the packet */
+ ret = _rtw_xmit_entry(skb, padapter->pnetdev);
+
+ return ret;
+
+ }
+ else if ((frame_control & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE))
+ == (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION)
+ )
+ {
+ /* only for action frames */
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ /* u8 category, action, OUI_Subtype, dialogToken = 0; */
+ /* unsigned char *frame_body; */
+ struct ieee80211_hdr *pwlanhdr;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ u8 *buf = skb->data;
+ u32 len = skb->len;
+ u8 category, action;
+
+ if (rtw_action_frame_parse(buf, len, &category, &action) == false) {
+ DBG_8192C(FUNC_NDEV_FMT" frame_control:0x%x\n", FUNC_NDEV_ARG(ndev),
+ le16_to_cpu(((struct ieee80211_hdr_3addr *)buf)->frame_control));
+ goto fail;
+ }
+
+ DBG_8192C("RTW_Tx:da ="MAC_FMT" via "FUNC_NDEV_FMT"\n",
+ MAC_ARG(GetAddr1Ptr(buf)), FUNC_NDEV_ARG(ndev));
+ if (category == RTW_WLAN_CATEGORY_PUBLIC)
+ DBG_871X("RTW_Tx:%s\n", action_public_str(action));
+ else
+ DBG_871X("RTW_Tx:category(%u), action(%u)\n", category, action);
+
+ /* starting alloc mgmt frame to dump it */
+ if ((pmgntframe = alloc_mgtxmitframe(pxmitpriv)) == NULL)
+ {
+ goto fail;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+
+ memcpy(pframe, (void*)buf, len);
+ pattrib->pktlen = len;
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ /* update seq number */
+ pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+ pmlmeext->mgnt_seq++;
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ }
+ else
+ {
+ DBG_8192C("frame_control = 0x%x\n", frame_control & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE));
+ }
+
+
+fail:
+
+ dev_kfree_skb_any(skb);
+
+ return 0;
+
+}
+
+static int rtw_cfg80211_monitor_if_set_mac_address(struct net_device *ndev, void *addr)
+{
+ int ret = 0;
+
+ DBG_8192C("%s\n", __func__);
+
+ return ret;
+}
+
+static const struct net_device_ops rtw_cfg80211_monitor_if_ops = {
+ .ndo_open = rtw_cfg80211_monitor_if_open,
+ .ndo_stop = rtw_cfg80211_monitor_if_close,
+ .ndo_start_xmit = rtw_cfg80211_monitor_if_xmit_entry,
+ .ndo_set_mac_address = rtw_cfg80211_monitor_if_set_mac_address,
+};
+
+static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, struct net_device **ndev)
+{
+ int ret = 0;
+ struct net_device* mon_ndev = NULL;
+ struct wireless_dev* mon_wdev = NULL;
+ struct rtw_netdev_priv_indicator *pnpi;
+ struct rtw_wdev_priv *pwdev_priv = adapter_wdev_data(padapter);
+
+ if (!name) {
+ DBG_871X(FUNC_ADPT_FMT" without specific name\n", FUNC_ADPT_ARG(padapter));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pwdev_priv->pmon_ndev) {
+ DBG_871X(FUNC_ADPT_FMT" monitor interface exist: "NDEV_FMT"\n",
+ FUNC_ADPT_ARG(padapter), NDEV_ARG(pwdev_priv->pmon_ndev));
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mon_ndev = alloc_etherdev(sizeof(struct rtw_netdev_priv_indicator));
+ if (!mon_ndev) {
+ DBG_871X(FUNC_ADPT_FMT" allocate ndev fail\n", FUNC_ADPT_ARG(padapter));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+ strncpy(mon_ndev->name, name, IFNAMSIZ);
+ mon_ndev->name[IFNAMSIZ - 1] = 0;
+ mon_ndev->destructor = rtw_ndev_destructor;
+
+ mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops;
+
+ pnpi = netdev_priv(mon_ndev);
+ pnpi->priv = padapter;
+ pnpi->sizeof_priv = sizeof(struct adapter);
+
+ /* wdev */
+ mon_wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ if (!mon_wdev) {
+ DBG_871X(FUNC_ADPT_FMT" allocate mon_wdev fail\n", FUNC_ADPT_ARG(padapter));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mon_wdev->wiphy = padapter->rtw_wdev->wiphy;
+ mon_wdev->netdev = mon_ndev;
+ mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
+ mon_ndev->ieee80211_ptr = mon_wdev;
+
+ ret = register_netdevice(mon_ndev);
+ if (ret) {
+ goto out;
+ }
+
+ *ndev = pwdev_priv->pmon_ndev = mon_ndev;
+ memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ+1);
+
+out:
+ if (ret && mon_wdev) {
+ kfree((u8 *)mon_wdev);
+ mon_wdev = NULL;
+ }
+
+ if (ret && mon_ndev) {
+ free_netdev(mon_ndev);
+ *ndev = mon_ndev = NULL;
+ }
+
+ return ret;
+}
+
+static struct wireless_dev *
+ cfg80211_rtw_add_virtual_intf(
+ struct wiphy *wiphy,
+ const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type, struct vif_params *params)
+{
+ int ret = 0;
+ struct net_device* ndev = NULL;
+ struct adapter *padapter = wiphy_to_adapter(wiphy);
+
+ DBG_871X(FUNC_ADPT_FMT " wiphy:%s, name:%s, type:%d\n",
+ FUNC_ADPT_ARG(padapter), wiphy_name(wiphy), name, type);
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ ret = -ENODEV;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ ret = rtw_cfg80211_add_monitor_if(padapter, (char *)name, &ndev);
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ ret = -ENODEV;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENODEV;
+ DBG_871X("Unsupported interface type\n");
+ break;
+ }
+
+ DBG_871X(FUNC_ADPT_FMT" ndev:%p, ret:%d\n", FUNC_ADPT_ARG(padapter), ndev, ret);
+
+ return ndev ? ndev->ieee80211_ptr : ERR_PTR(ret);
+}
+
+static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
+ struct wireless_dev *wdev
+)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ int ret = 0;
+ struct adapter *adapter;
+ struct rtw_wdev_priv *pwdev_priv;
+
+ if (!ndev) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ adapter = (struct adapter *)rtw_netdev_priv(ndev);
+ pwdev_priv = adapter_wdev_data(adapter);
+
+ unregister_netdevice(ndev);
+
+ if (ndev == pwdev_priv->pmon_ndev) {
+ pwdev_priv->pmon_ndev = NULL;
+ pwdev_priv->ifname_mon[0] = '\0';
+ DBG_871X(FUNC_NDEV_FMT" remove monitor interface\n", FUNC_NDEV_ARG(ndev));
+ }
+
+exit:
+ return ret;
+}
+
+static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_len, const u8 *tail, size_t tail_len)
+{
+ int ret = 0;
+ u8 *pbuf = NULL;
+ uint len, wps_ielen = 0;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+ DBG_8192C("%s beacon_head_len =%zu, beacon_tail_len =%zu\n", __func__, head_len, tail_len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (head_len<24)
+ return -EINVAL;
+
+ pbuf = rtw_zmalloc(head_len+tail_len);
+ if (!pbuf)
+ return -ENOMEM;
+
+ memcpy(pbuf, (void *)head+24, head_len-24);/* 24 =beacon header len. */
+ memcpy(pbuf+head_len-24, (void *)tail, tail_len);
+
+ len = head_len+tail_len-24;
+
+ /* check wps ie if inclued */
+ if (rtw_get_wps_ie(pbuf+_FIXED_IE_LENGTH_, len-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
+ DBG_8192C("add bcn, wps_ielen =%d\n", wps_ielen);
+
+ /* pbss_network->IEs will not include p2p_ie, wfd ie */
+ rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, P2P_OUI, 4);
+ rtw_ies_remove_ie(pbuf, &len, _BEACON_IE_OFFSET_, _VENDOR_SPECIFIC_IE_, WFD_OUI, 4);
+
+ if (rtw_check_beacon_data(adapter, pbuf, len) == _SUCCESS)
+ {
+ ret = 0;
+ }
+ else
+ {
+ ret = -EINVAL;
+ }
+
+
+ kfree(pbuf);
+
+ return ret;
+}
+
+static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_ap_settings *settings)
+{
+ int ret = 0;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(ndev);
+
+ DBG_871X(FUNC_NDEV_FMT" hidden_ssid:%d, auth_type:%d\n", FUNC_NDEV_ARG(ndev),
+ settings->hidden_ssid, settings->auth_type);
+
+ ret = rtw_add_beacon(adapter, settings->beacon.head, settings->beacon.head_len,
+ settings->beacon.tail, settings->beacon.tail_len);
+
+ adapter->mlmeextpriv.mlmext_info.hidden_ssid_mode = settings->hidden_ssid;
+
+ if (settings->ssid && settings->ssid_len) {
+ struct wlan_bssid_ex *pbss_network = &adapter->mlmepriv.cur_network.network;
+ struct wlan_bssid_ex *pbss_network_ext = &adapter->mlmeextpriv.mlmext_info.network;
+
+ memcpy(pbss_network->Ssid.Ssid, (void *)settings->ssid, settings->ssid_len);
+ pbss_network->Ssid.SsidLength = settings->ssid_len;
+ memcpy(pbss_network_ext->Ssid.Ssid, (void *)settings->ssid, settings->ssid_len);
+ pbss_network_ext->Ssid.SsidLength = settings->ssid_len;
+ }
+
+ return ret;
+}
+
+static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
+{
+ int ret = 0;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(ndev);
+
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
+
+ return ret;
+}
+
+static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+static int cfg80211_rtw_add_station(struct wiphy *wiphy, struct net_device *ndev,
+ const u8 *mac,
+ struct station_parameters *params)
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ return 0;
+}
+
+static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev,
+ struct station_del_parameters *params)
+{
+ int ret = 0;
+ struct list_head *phead, *plist;
+ u8 updated = false;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ const u8 *mac = params->mac;
+
+ DBG_871X("+"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ {
+ DBG_8192C("%s, fw_state != FW_LINKED|WIFI_AP_STATE\n", __func__);
+ return -EINVAL;
+ }
+
+
+ if (!mac)
+ {
+ DBG_8192C("flush all sta, and cam_entry\n");
+
+ flush_all_cam_entry(padapter); /* clear CAM */
+
+ ret = rtw_sta_flush(padapter);
+
+ return ret;
+ }
+
+
+ DBG_8192C("free sta macaddr =" MAC_FMT "\n", MAC_ARG(mac));
+
+ if (mac[0] == 0xff && mac[1] == 0xff &&
+ mac[2] == 0xff && mac[3] == 0xff &&
+ mac[4] == 0xff && mac[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while (phead != plist)
+ {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ if (!memcmp((u8 *)mac, psta->hwaddr, ETH_ALEN))
+ {
+ if (psta->dot8021xalg == 1 && psta->bpairwise_key_installed == false)
+ {
+ DBG_8192C("%s, sta's dot8021xalg = 1 and key_installed = false\n", __func__);
+ }
+ else
+ {
+ DBG_8192C("free psta =%p, aid =%d\n", psta, psta->aid);
+
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+
+ psta = NULL;
+
+ break;
+ }
+
+ }
+
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update(padapter, updated);
+
+ DBG_871X("-"FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ return ret;
+
+}
+
+static int cfg80211_rtw_change_station(struct wiphy *wiphy, struct net_device *ndev,
+ const u8 *mac, struct station_parameters *params)
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ return 0;
+}
+
+static struct sta_info *rtw_sta_info_get_by_idx(const int idx, struct sta_priv *pstapriv)
+
+{
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ int i = 0;
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while (phead != plist)
+ {
+ if (idx == i) psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+ i++;
+ }
+ return psta;
+}
+
+static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, u8 *mac, struct station_info *sinfo)
+{
+
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ psta = rtw_sta_info_get_by_idx(idx, pstapriv);
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ if (NULL == psta)
+ {
+ DBG_871X("Station is not found\n");
+ ret = -ENOENT;
+ goto exit;
+ }
+ memcpy(mac, psta->hwaddr, ETH_ALEN);
+ sinfo->filled = BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = psta->rssi;
+
+exit:
+ return ret;
+}
+
+static int cfg80211_rtw_change_bss(struct wiphy *wiphy, struct net_device *ndev,
+ struct bss_parameters *params)
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ return 0;
+}
+
+void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame, uint frame_len, const char*msg)
+{
+ s32 freq;
+ int channel;
+ u8 category, action;
+
+ channel = rtw_get_oper_ch(adapter);
+
+ rtw_action_frame_parse(frame, frame_len, &category, &action);
+
+ DBG_8192C("RTW_Rx:cur_ch =%d\n", channel);
+ if (msg)
+ DBG_871X("RTW_Rx:%s\n", msg);
+ else
+ DBG_871X("RTW_Rx:category(%u), action(%u)\n", category, action);
+
+ freq = rtw_ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
+
+ rtw_cfg80211_rx_mgmt(adapter, freq, 0, frame, frame_len, GFP_ATOMIC);
+}
+
+static int _cfg80211_rtw_mgmt_tx(struct adapter *padapter, u8 tx_ch, const u8 *buf, size_t len)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ int ret = _FAIL;
+ bool ack = true;
+ struct ieee80211_hdr *pwlanhdr;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ rtw_set_scan_deny(padapter, 1000);
+
+ rtw_scan_abort(padapter);
+ if (tx_ch != rtw_get_oper_ch(padapter)) {
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED))
+ pmlmeext->cur_channel = tx_ch;
+ set_channel_bwmode(padapter, tx_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, CHANNEL_WIDTH_20);
+ }
+
+ /* starting alloc mgmt frame to dump it */
+ if ((pmgntframe = alloc_mgtxmitframe(pxmitpriv)) == NULL)
+ {
+ /* ret = -ENOMEM; */
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+
+ memcpy(pframe, (void*)buf, len);
+ pattrib->pktlen = len;
+
+ pwlanhdr = (struct ieee80211_hdr *)pframe;
+ /* update seq number */
+ pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+ pmlmeext->mgnt_seq++;
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (dump_mgntframe_and_wait_ack(padapter, pmgntframe) != _SUCCESS)
+ {
+ ack = false;
+ ret = _FAIL;
+
+ #ifdef DEBUG_CFG80211
+ DBG_8192C("%s, ack == _FAIL\n", __func__);
+ #endif
+ }
+ else
+ {
+
+ msleep(50);
+
+ #ifdef DEBUG_CFG80211
+ DBG_8192C("%s, ack =%d, ok!\n", __func__, ack);
+ #endif
+ ret = _SUCCESS;
+ }
+
+exit:
+
+ #ifdef DEBUG_CFG80211
+ DBG_8192C("%s, ret =%d\n", __func__, ret);
+ #endif
+
+ return ret;
+
+}
+
+static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct ieee80211_channel *chan = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+ int ret = 0;
+ int tx_ret;
+ u32 dump_limit = RTW_MAX_MGMT_TX_CNT;
+ u32 dump_cnt = 0;
+ bool ack = true;
+ u8 tx_ch = (u8)ieee80211_frequency_to_channel(chan->center_freq);
+ u8 category, action;
+ int type = (-1);
+ struct adapter *padapter;
+ struct rtw_wdev_priv *pwdev_priv;
+
+ if (ndev == NULL) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ padapter = (struct adapter *)rtw_netdev_priv(ndev);
+ pwdev_priv = adapter_wdev_data(padapter);
+
+ /* cookie generation */
+ *cookie = (unsigned long) buf;
+
+#ifdef DEBUG_CFG80211
+ DBG_871X(FUNC_ADPT_FMT" len =%zu, ch =%d"
+ "\n", FUNC_ADPT_ARG(padapter),
+ len, tx_ch
+ );
+#endif /* DEBUG_CFG80211 */
+
+ /* indicate ack before issue frame to avoid racing with rsp frame */
+ rtw_cfg80211_mgmt_tx_status(padapter, *cookie, buf, len, ack, GFP_KERNEL);
+
+ if (rtw_action_frame_parse(buf, len, &category, &action) == false) {
+ DBG_8192C(FUNC_ADPT_FMT" frame_control:0x%x\n", FUNC_ADPT_ARG(padapter),
+ le16_to_cpu(((struct ieee80211_hdr_3addr *)buf)->frame_control));
+ goto exit;
+ }
+
+ DBG_8192C("RTW_Tx:tx_ch =%d, da ="MAC_FMT"\n", tx_ch, MAC_ARG(GetAddr1Ptr(buf)));
+ if (category == RTW_WLAN_CATEGORY_PUBLIC)
+ DBG_871X("RTW_Tx:%s\n", action_public_str(action));
+ else
+ DBG_871X("RTW_Tx:category(%u), action(%u)\n", category, action);
+
+ rtw_ps_deny(padapter, PS_DENY_MGNT_TX);
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EFAULT;
+ goto cancel_ps_deny;
+ }
+
+ do {
+ dump_cnt++;
+ tx_ret = _cfg80211_rtw_mgmt_tx(padapter, tx_ch, buf, len);
+ } while (dump_cnt < dump_limit && tx_ret != _SUCCESS);
+
+ if (tx_ret != _SUCCESS || dump_cnt > 1) {
+ DBG_871X(FUNC_ADPT_FMT" %s (%d/%d)\n", FUNC_ADPT_ARG(padapter),
+ tx_ret == _SUCCESS?"OK":"FAIL", dump_cnt, dump_limit);
+ }
+
+ switch (type) {
+ case P2P_GO_NEGO_CONF:
+ rtw_clear_scan_deny(padapter);
+ break;
+ case P2P_INVIT_RESP:
+ if (pwdev_priv->invit_info.flags & BIT(0)
+ && pwdev_priv->invit_info.status == 0)
+ {
+ DBG_871X(FUNC_ADPT_FMT" agree with invitation of persistent group\n",
+ FUNC_ADPT_ARG(padapter));
+ rtw_set_scan_deny(padapter, 5000);
+ rtw_pwr_wakeup_ex(padapter, 5000);
+ rtw_clear_scan_deny(padapter);
+ }
+ break;
+ }
+
+cancel_ps_deny:
+ rtw_ps_deny_cancel(padapter, PS_DENY_MGNT_TX);
+exit:
+ return ret;
+}
+
+static void cfg80211_rtw_mgmt_frame_register(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u16 frame_type, bool reg)
+{
+ struct net_device *ndev = wdev_to_ndev(wdev);
+ struct adapter *adapter;
+
+ if (ndev == NULL)
+ goto exit;
+
+ adapter = (struct adapter *)rtw_netdev_priv(ndev);
+
+#ifdef DEBUG_CFG80211
+ DBG_871X(FUNC_ADPT_FMT" frame_type:%x, reg:%d\n", FUNC_ADPT_ARG(adapter),
+ frame_type, reg);
+#endif
+
+ if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
+ return;
+exit:
+ return;
+}
+
+#if defined(CONFIG_PNO_SUPPORT)
+static int cfg80211_rtw_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request) {
+
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 ret;
+
+ if (padapter->bup == false) {
+ DBG_871X("%s: net device is down.\n", __func__);
+ return -EIO;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true ||
+ check_fwstate(pmlmepriv, _FW_LINKED) == true ||
+ check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ DBG_871X("%s: device is busy.\n", __func__);
+ rtw_scan_abort(padapter);
+ }
+
+ if (request == NULL) {
+ DBG_871X("%s: invalid cfg80211_requests parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = rtw_android_cfg80211_pno_setup(dev, request->ssids,
+ request->n_ssids, request->interval);
+
+ if (ret < 0) {
+ DBG_871X("%s ret: %d\n", __func__, ret);
+ goto exit;
+ }
+
+ ret = rtw_android_pno_enable(dev, true);
+ if (ret < 0) {
+ DBG_871X("%s ret: %d\n", __func__, ret);
+ goto exit;
+ }
+exit:
+ return ret;
+}
+
+static int cfg80211_rtw_sched_scan_stop(struct wiphy *wiphy,
+ struct net_device *dev) {
+ return rtw_android_pno_enable(dev, false);
+}
+#endif /* CONFIG_PNO_SUPPORT */
+
+static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap, enum nl80211_band band, u8 rf_type)
+{
+
+#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
+#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
+
+ ht_cap->ht_supported = true;
+
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
+
+ /*
+ *Maximum length of AMPDU that the STA can receive.
+ *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+
+ /*Minimum MPDU start spacing , */
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ /*
+ *hw->wiphy->bands[NL80211_BAND_2GHZ]
+ *base on ant_num
+ *rx_mask: RX mask
+ *if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
+ *if rx_ant =2 rx_mask[1]= 0xff;==>MCS8-MCS15
+ *if rx_ant >=3 rx_mask[2]= 0xff;
+ *if BW_40 rx_mask[4]= 0x01;
+ *highest supported RX rate
+ */
+ if (rf_type == RF_1T1R)
+ {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
+ }
+ else if ((rf_type == RF_1T2R) || (rf_type ==RF_2T2R))
+ {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+
+ ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
+ }
+ else
+ {
+ DBG_8192C("%s, error rf_type =%d\n", __func__, rf_type);
+ }
+
+}
+
+void rtw_cfg80211_init_wiphy(struct adapter *padapter)
+{
+ u8 rf_type;
+ struct ieee80211_supported_band *bands;
+ struct wireless_dev *pwdev = padapter->rtw_wdev;
+ struct wiphy *wiphy = pwdev->wiphy;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ DBG_8192C("%s:rf_type =%d\n", __func__, rf_type);
+
+ {
+ bands = wiphy->bands[NL80211_BAND_2GHZ];
+ if (bands)
+ rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
+ }
+
+ /* init regulary domain */
+ rtw_regd_init(padapter, rtw_reg_notifier);
+
+ /* copy mac_addr to wiphy */
+ memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+
+}
+
+static void rtw_cfg80211_preinit_wiphy(struct adapter *padapter, struct wiphy *wiphy)
+{
+
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->max_scan_ssids = RTW_SSID_SCAN_AMOUNT;
+ wiphy->max_scan_ie_len = RTW_SCAN_IE_LEN_MAX;
+ wiphy->max_num_pmkids = RTW_MAX_NUM_PMKIDS;
+
+ wiphy->max_remain_on_channel_duration = RTW_MAX_REMAIN_ON_CHANNEL_DURATION;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_ADHOC)
+ | BIT(NL80211_IFTYPE_AP)
+ | BIT(NL80211_IFTYPE_MONITOR)
+ ;
+
+ wiphy->mgmt_stypes = rtw_cfg80211_default_mgmt_stypes;
+
+ wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
+
+ wiphy->cipher_suites = rtw_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
+
+ /* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
+ wiphy->bands[NL80211_BAND_2GHZ] = rtw_spt_band_alloc(NL80211_BAND_2GHZ);
+
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
+
+#if defined(CONFIG_PM)
+ wiphy->max_sched_scan_reqs = 1;
+#ifdef CONFIG_PNO_SUPPORT
+ wiphy->max_sched_scan_ssids = MAX_PNO_LIST_COUNT;
+#endif
+#endif
+
+#if defined(CONFIG_PM)
+ wiphy->wowlan = &wowlan_stub;
+#endif
+
+ if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+}
+
+static struct cfg80211_ops rtw_cfg80211_ops = {
+ .change_virtual_intf = cfg80211_rtw_change_iface,
+ .add_key = cfg80211_rtw_add_key,
+ .get_key = cfg80211_rtw_get_key,
+ .del_key = cfg80211_rtw_del_key,
+ .set_default_key = cfg80211_rtw_set_default_key,
+ .get_station = cfg80211_rtw_get_station,
+ .scan = cfg80211_rtw_scan,
+ .set_wiphy_params = cfg80211_rtw_set_wiphy_params,
+ .connect = cfg80211_rtw_connect,
+ .disconnect = cfg80211_rtw_disconnect,
+ .join_ibss = cfg80211_rtw_join_ibss,
+ .leave_ibss = cfg80211_rtw_leave_ibss,
+ .set_tx_power = cfg80211_rtw_set_txpower,
+ .get_tx_power = cfg80211_rtw_get_txpower,
+ .set_power_mgmt = cfg80211_rtw_set_power_mgmt,
+ .set_pmksa = cfg80211_rtw_set_pmksa,
+ .del_pmksa = cfg80211_rtw_del_pmksa,
+ .flush_pmksa = cfg80211_rtw_flush_pmksa,
+
+ .add_virtual_intf = cfg80211_rtw_add_virtual_intf,
+ .del_virtual_intf = cfg80211_rtw_del_virtual_intf,
+
+ .start_ap = cfg80211_rtw_start_ap,
+ .change_beacon = cfg80211_rtw_change_beacon,
+ .stop_ap = cfg80211_rtw_stop_ap,
+
+ .add_station = cfg80211_rtw_add_station,
+ .del_station = cfg80211_rtw_del_station,
+ .change_station = cfg80211_rtw_change_station,
+ .dump_station = cfg80211_rtw_dump_station,
+ .change_bss = cfg80211_rtw_change_bss,
+
+ .mgmt_tx = cfg80211_rtw_mgmt_tx,
+ .mgmt_frame_register = cfg80211_rtw_mgmt_frame_register,
+
+#if defined(CONFIG_PNO_SUPPORT)
+ .sched_scan_start = cfg80211_rtw_sched_scan_start,
+ .sched_scan_stop = cfg80211_rtw_sched_scan_stop,
+#endif /* CONFIG_PNO_SUPPORT */
+};
+
+int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
+{
+ int ret = 0;
+ struct wiphy *wiphy;
+ struct wireless_dev *wdev;
+ struct rtw_wdev_priv *pwdev_priv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ DBG_8192C("%s(padapter =%p)\n", __func__, padapter);
+
+ /* wiphy */
+ wiphy = wiphy_new(&rtw_cfg80211_ops, sizeof(struct adapter *));
+ if (!wiphy) {
+ DBG_8192C("Couldn't allocate wiphy device\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ set_wiphy_dev(wiphy, dev);
+ *((struct adapter **)wiphy_priv(wiphy)) = padapter;
+ rtw_cfg80211_preinit_wiphy(padapter, wiphy);
+
+ ret = wiphy_register(wiphy);
+ if (ret < 0) {
+ DBG_8192C("Couldn't register wiphy device\n");
+ goto free_wiphy;
+ }
+
+ /* wdev */
+ wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ if (!wdev) {
+ DBG_8192C("Couldn't allocate wireless device\n");
+ ret = -ENOMEM;
+ goto unregister_wiphy;
+ }
+ wdev->wiphy = wiphy;
+ wdev->netdev = pnetdev;
+
+ wdev->iftype = NL80211_IFTYPE_STATION; /* will be init in rtw_hal_init() */
+ /* Must sync with _rtw_init_mlme_priv() */
+ /* pmlmepriv->fw_state = WIFI_STATION_STATE */
+ padapter->rtw_wdev = wdev;
+ pnetdev->ieee80211_ptr = wdev;
+
+ /* init pwdev_priv */
+ pwdev_priv = adapter_wdev_data(padapter);
+ pwdev_priv->rtw_wdev = wdev;
+ pwdev_priv->pmon_ndev = NULL;
+ pwdev_priv->ifname_mon[0] = '\0';
+ pwdev_priv->padapter = padapter;
+ pwdev_priv->scan_request = NULL;
+ spin_lock_init(&pwdev_priv->scan_req_lock);
+
+ pwdev_priv->p2p_enabled = false;
+ pwdev_priv->provdisc_req_issued = false;
+ rtw_wdev_invit_info_init(&pwdev_priv->invit_info);
+ rtw_wdev_nego_info_init(&pwdev_priv->nego_info);
+
+ pwdev_priv->bandroid_scan = false;
+
+ if (padapter->registrypriv.power_mgnt != PS_MODE_ACTIVE)
+ pwdev_priv->power_mgmt = true;
+ else
+ pwdev_priv->power_mgmt = false;
+ kfree((u8 *)wdev);
+
+ return ret;
+
+unregister_wiphy:
+ wiphy_unregister(wiphy);
+ free_wiphy:
+ wiphy_free(wiphy);
+exit:
+ return ret;
+
+}
+
+void rtw_wdev_free(struct wireless_dev *wdev)
+{
+ DBG_8192C("%s(wdev =%p)\n", __func__, wdev);
+
+ if (!wdev)
+ return;
+
+ rtw_spt_band_free(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+
+ wiphy_free(wdev->wiphy);
+
+ kfree((u8 *)wdev);
+}
+
+void rtw_wdev_unregister(struct wireless_dev *wdev)
+{
+ struct net_device *ndev;
+ struct adapter *adapter;
+ struct rtw_wdev_priv *pwdev_priv;
+
+ DBG_8192C("%s(wdev =%p)\n", __func__, wdev);
+
+ if (!wdev)
+ return;
+
+ if (!(ndev = wdev_to_ndev(wdev)))
+ return;
+
+ adapter = (struct adapter *)rtw_netdev_priv(ndev);
+ pwdev_priv = adapter_wdev_data(adapter);
+
+ rtw_cfg80211_indicate_scan_done(adapter, true);
+
+ if (pwdev_priv->pmon_ndev) {
+ DBG_8192C("%s, unregister monitor interface\n", __func__);
+ unregister_netdev(pwdev_priv->pmon_ndev);
+ }
+
+ wiphy_unregister(wdev->wiphy);
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
new file mode 100644
index 000000000000..916741371bee
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -0,0 +1,5808 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _IOCTL_LINUX_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <rtw_mp.h>
+#include <linux/jiffies.h>
+
+#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
+
+#define SCAN_ITEM_SIZE 768
+#define MAX_CUSTOM_LEN 64
+#define RATE_COUNT 4
+
+/* combo scan */
+#define WEXT_CSCAN_AMOUNT 9
+#define WEXT_CSCAN_BUF_LEN 360
+#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
+#define WEXT_CSCAN_HEADER_SIZE 12
+#define WEXT_CSCAN_SSID_SECTION 'S'
+#define WEXT_CSCAN_CHANNEL_SECTION 'C'
+#define WEXT_CSCAN_NPROBE_SECTION 'N'
+#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
+#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
+#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
+#define WEXT_CSCAN_TYPE_SECTION 'T'
+
+
+extern u8 key_2char2num(u8 hch, u8 lch);
+
+static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
+ 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
+
+static const char * const iw_operation_mode[] =
+{
+ "Auto", "Ad-Hoc", "Managed", "Master", "Repeater", "Secondary", "Monitor"
+};
+
+static int hex2num_i(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+/**
+ * hwaddr_aton - Convert ASCII string to MAC address
+ * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
+ * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
+ * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
+ */
+static int hwaddr_aton_i(const char *txt, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ int a, b;
+
+ a = hex2num_i(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex2num_i(*txt++);
+ if (b < 0)
+ return -1;
+ *addr++ = (a << 4) | b;
+ if (i < 5 && *txt++ != ':')
+ return -1;
+ }
+
+ return 0;
+}
+
+void indicate_wx_scan_complete_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ /* DBG_871X("+rtw_indicate_wx_scan_complete_event\n"); */
+}
+
+
+void rtw_indicate_wx_assoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex*)(&(pmlmeinfo->network));
+
+ memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true)
+ memcpy(wrqu.ap_addr.sa_data, pnetwork->MacAddress, ETH_ALEN);
+ else
+ memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
+
+ DBG_871X_LEVEL(_drv_always_, "assoc success\n");
+}
+
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+}
+
+/*
+uint rtw_is_cckrates_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i]!= 0)
+ {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ i++;
+ }
+
+ return false;
+}
+
+uint rtw_is_cckratesonly_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i]!= 0)
+ {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ i++;
+ }
+
+ return true;
+}
+*/
+
+static char *translate_scan(struct adapter *padapter,
+ struct iw_request_info* info, struct wlan_network *pnetwork,
+ char *start, char *stop)
+{
+ struct iw_event iwe;
+ u16 cap;
+ u32 ht_ielen = 0;
+ char *custom = NULL;
+ char *p;
+ u16 max_rate = 0, rate, ht_cap =false, vht_cap = false;
+ u32 i = 0;
+ u8 bw_40MHz = 0, short_GI = 0;
+ u16 mcs_rate = 0, vht_data_rate = 0;
+ u8 ie_offset = (pnetwork->network.Reserved[0] == 2? 0:12);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 ss, sq;
+
+ /* AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN);
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
+
+ /* Add the ESSID */
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = min((u16)pnetwork->network.Ssid.SsidLength, (u16)32);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /* parsing HT_CAP_IE */
+ if (pnetwork->network.Reserved[0] == 2) /* Probe Request */
+ {
+ p = rtw_get_ie(&pnetwork->network.IEs[0], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength);
+ }
+ else
+ {
+ p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
+ }
+ if (p && ht_ielen>0)
+ {
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ ht_cap = true;
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
+ bw_40MHz = (le16_to_cpu(pht_capie->cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
+ short_GI = (le16_to_cpu(pht_capie->cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
+ }
+
+ /* Add the protocol name */
+ iwe.cmd = SIOCGIWNAME;
+ if ((rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates)) == true)
+ {
+ if (ht_cap == true)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
+ }
+ else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates)) == true)
+ {
+ if (ht_cap == true)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
+ }
+ else
+ {
+ if (pnetwork->network.Configuration.DSConfig > 14)
+ {
+ if (vht_cap == true)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11AC");
+ else if (ht_cap == true)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
+ }
+ else
+ {
+ if (ht_cap == true)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
+
+ /* Add mode */
+ if (pnetwork->network.Reserved[0] == 2) /* Probe Request */
+ {
+ cap = 0;
+ }
+ else
+ {
+ __le16 le_tmp;
+
+ iwe.cmd = SIOCGIWMODE;
+ memcpy((u8 *)&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ cap = le16_to_cpu(le_tmp);
+ }
+
+ if (cap & (WLAN_CAPABILITY_IBSS |WLAN_CAPABILITY_BSS)) {
+ if (cap & WLAN_CAPABILITY_BSS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
+ }
+
+ if (pnetwork->network.Configuration.DSConfig<1 /*|| pnetwork->network.Configuration.DSConfig>14*/)
+ pnetwork->network.Configuration.DSConfig = 1;
+
+ /* Add frequency/channel */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = rtw_ch2freq(pnetwork->network.Configuration.DSConfig) * 100000;
+ iwe.u.freq.e = 1;
+ iwe.u.freq.i = pnetwork->network.Configuration.DSConfig;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (cap & WLAN_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /*Add basic and extended rates */
+ max_rate = 0;
+ custom = kzalloc(MAX_CUSTOM_LEN, GFP_ATOMIC);
+ if (!custom)
+ return start;
+ p = custom;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ while (pnetwork->network.SupportedRates[i]!= 0)
+ {
+ rate = pnetwork->network.SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ i++;
+ }
+
+ if (vht_cap == true) {
+ max_rate = vht_data_rate;
+ }
+ else if (ht_cap == true)
+ {
+ if (mcs_rate&0x8000)/* MCS15 */
+ {
+ max_rate = (bw_40MHz) ? ((short_GI)?300:270):((short_GI)?144:130);
+
+ }
+ else if (mcs_rate&0x0080)/* MCS7 */
+ {
+ max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
+ }
+ else/* default MCS7 */
+ {
+ /* DBG_871X("wx_get_scan, mcs_rate_bitmap = 0x%x\n", mcs_rate); */
+ max_rate = (bw_40MHz) ? ((short_GI)?150:135):((short_GI)?72:65);
+ }
+
+ max_rate = max_rate*2;/* Mbps/2; */
+ }
+
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.value = max_rate * 500000;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
+
+ /* parsing WPA/WPA2 IE */
+ if (pnetwork->network.Reserved[0] != 2) /* Probe Request */
+ {
+ u8 *buf;
+ u8 wpa_ie[255], rsn_ie[255];
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 *p;
+ sint out_len = 0;
+ out_len =rtw_get_sec_ie(pnetwork->network.IEs , pnetwork->network.IELength, rsn_ie,&rsn_len, wpa_ie,&wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ buf = kzalloc(MAX_WPA_IE_LEN*2, GFP_KERNEL);
+ if (!buf)
+ return start;
+ if (wpa_len > 0) {
+ p =buf;
+ p += sprintf(p, "wpa_ie =");
+ for (i = 0; i < wpa_len; i++) {
+ p += sprintf(p, "%02x", wpa_ie[i]);
+ }
+
+ if (wpa_len > 100) {
+ printk("-----------------Len %d----------------\n", wpa_len);
+ for (i = 0; i < wpa_len; i++) {
+ printk("%02x ", wpa_ie[i]);
+ }
+ printk("\n");
+ printk("-----------------Len %d----------------\n", wpa_len);
+ }
+
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd =IWEVGENIE;
+ iwe.u.data.length = wpa_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie);
+ }
+ if (rsn_len > 0) {
+ p = buf;
+ memset(buf, 0, MAX_WPA_IE_LEN*2);
+ p += sprintf(p, "rsn_ie =");
+ for (i = 0; i < rsn_len; i++)
+ p += sprintf(p, "%02x", rsn_ie[i]);
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd =IWEVGENIE;
+ iwe.u.data.length = rsn_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
+ }
+ kfree(buf);
+ }
+
+ { /* parsing WPS IE */
+ uint cnt = 0, total_ielen;
+ u8 *wpsie_ptr = NULL;
+ uint wps_ielen = 0;
+
+ u8 *ie_ptr = pnetwork->network.IEs + ie_offset;
+ total_ielen = pnetwork->network.IELength - ie_offset;
+
+ if (pnetwork->network.Reserved[0] == 2) /* Probe Request */
+ {
+ ie_ptr = pnetwork->network.IEs;
+ total_ielen = pnetwork->network.IELength;
+ }
+ else /* Beacon or Probe Respones */
+ {
+ ie_ptr = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
+ total_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
+ }
+
+ while (cnt < total_ielen)
+ {
+ if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen>2))
+ {
+ wpsie_ptr = &ie_ptr[cnt];
+ iwe.cmd =IWEVGENIE;
+ iwe.u.data.length = (u16)wps_ielen;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr);
+ }
+ cnt+=ie_ptr[cnt+1]+2; /* goto next */
+ }
+ }
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED
+ #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ | IW_QUAL_NOISE_UPDATED
+ #else
+ | IW_QUAL_NOISE_INVALID
+ #endif
+ #ifdef CONFIG_SIGNAL_DISPLAY_DBM
+ | IW_QUAL_DBM
+ #endif
+ ;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
+ ss = padapter->recvpriv.signal_strength;
+ sq = padapter->recvpriv.signal_qual;
+ } else {
+ ss = pnetwork->network.PhyInfo.SignalStrength;
+ sq = pnetwork->network.PhyInfo.SignalQuality;
+ }
+
+
+ #ifdef CONFIG_SIGNAL_DISPLAY_DBM
+ iwe.u.qual.level = (u8) translate_percentage_to_dbm(ss);/* dbm */
+ #else
+ #ifdef CONFIG_SKIP_SIGNAL_SCALE_MAPPING
+ {
+ /* Do signal scale mapping when using percentage as the unit of signal strength, since the scale mapping is skipped in odm */
+
+ struct hal_com_data *pHal = GET_HAL_DATA(padapter);
+
+ iwe.u.qual.level = (u8)odm_SignalScaleMapping(&pHal->odmpriv, ss);
+ }
+ #else
+ iwe.u.qual.level = (u8)ss;/* */
+ #endif
+ #endif
+
+ iwe.u.qual.qual = (u8)sq; /* signal quality */
+
+ #if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ {
+ s16 tmp_noise = 0;
+ rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(pnetwork->network.Configuration.DSConfig), &(tmp_noise));
+ iwe.u.qual.noise = tmp_noise ;
+ }
+ #else
+ iwe.u.qual.noise = 0; /* noise level */
+ #endif
+
+ /* DBG_871X("iqual =%d, ilevel =%d, inoise =%d, iupdated =%d\n", iwe.u.qual.qual, iwe.u.qual.level , iwe.u.qual.noise, iwe.u.qual.updated); */
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
+
+ {
+ u8 *buf;
+ u8 *p, *pos;
+
+ buf = kzalloc(MAX_WPA_IE_LEN, GFP_KERNEL);
+ if (!buf)
+ goto exit;
+ p = buf;
+ pos = pnetwork->network.Reserved;
+ p += sprintf(p, "fm =%02X%02X", pos[1], pos[0]);
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+ kfree(buf);
+ }
+exit:
+ kfree(custom);
+
+ return start;
+}
+
+static int wpa_set_auth_algs(struct net_device *dev, u32 value)
+{
+ struct adapter *padapter = (struct adapter *) rtw_netdev_priv(dev);
+ int ret = 0;
+
+ if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM))
+ {
+ DBG_871X("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+ }
+ else if (value & AUTH_ALG_SHARED_KEY)
+ {
+ DBG_871X("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+ }
+ else if (value & AUTH_ALG_OPEN_SYSTEM)
+ {
+ DBG_871X("wpa_set_auth_algs, AUTH_ALG_OPEN_SYSTEM\n");
+ /* padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled; */
+ if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK)
+ {
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ }
+
+ }
+ else if (value & AUTH_ALG_LEAP)
+ {
+ DBG_871X("wpa_set_auth_algs, AUTH_ALG_LEAP\n");
+ }
+ else
+ {
+ DBG_871X("wpa_set_auth_algs, error!\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+
+}
+
+static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+
+ if (param->u.crypt.idx >= WEP_KEYS ||
+ param->u.crypt.idx >= BIP_MAX_KEYID) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+ else
+ {
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("wpa_set_encryption, crypt.alg = WEP\n"));
+ DBG_871X("wpa_set_encryption, crypt.alg = WEP\n");
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(1)wep_key_idx =%d\n", wep_key_idx));
+ DBG_871X("(1)wep_key_idx =%d\n", wep_key_idx);
+
+ if (wep_key_idx > WEP_KEYS)
+ return -EINVAL;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(2)wep_key_idx =%d\n", wep_key_idx));
+
+ if (wep_key_len > 0)
+ {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
+ goto exit;
+ }
+
+ memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+
+ if (wep_key_len == 13)
+ {
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ }
+ }
+ else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+ pwep->KeyIndex |= 0x80000000;
+
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+
+ if (param->u.crypt.set_tx)
+ {
+ DBG_871X("wep, set_tx = 1\n");
+
+ if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
+ {
+ ret = -EOPNOTSUPP ;
+ }
+ }
+ else
+ {
+ DBG_871X("wep, set_tx = 0\n");
+
+ /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
+ /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to fw/cam */
+
+ if (wep_key_idx >= WEP_KEYS) {
+ ret = -EOPNOTSUPP ;
+ goto exit;
+ }
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength;
+ rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
+ }
+
+ goto exit;
+ }
+
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) /* 802_1x */
+ {
+ struct sta_info * psta,*pbcmc_sta;
+ struct sta_priv * pstapriv = &padapter->stapriv;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) /* sta mode */
+ {
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ /* DEBUG_ERR(("Set wpa_set_encryption: Obtain Sta_info fail\n")); */
+ }
+ else
+ {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ psta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ {
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+
+ if (param->u.crypt.set_tx == 1)/* pairwise key */
+ {
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0)/* set mic key */
+ {
+ /* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ padapter->securitypriv.busetkipkey =false;
+ /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
+ }
+
+ /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ DBG_871X(" ~~~~set sta key:unicastkey\n");
+
+ rtw_setstakey_cmd(padapter, psta, true, true);
+ }
+ else/* group key */
+ {
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0 || strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ /* only TKIP group key need to install this */
+ if (param->u.crypt.key_len > 16)
+ {
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey,&(param->u.crypt.key[24]), 8);
+ }
+ padapter->securitypriv.binstallGrpkey = true;
+ /* DEBUG_ERR((" param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
+ DBG_871X(" ~~~~set sta key:groupkey\n");
+
+ padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
+
+ rtw_set_key(padapter,&padapter->securitypriv, param->u.crypt.idx, 1, true);
+ }
+ else if (strcmp(param->u.crypt.alg, "BIP") == 0)
+ {
+ /* printk("BIP key_len =%d , index =%d @@@@@@@@@@@@@@@@@@\n", param->u.crypt.key_len, param->u.crypt.idx); */
+ /* save the IGTK key, length 16 bytes */
+ memcpy(padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ /*printk("IGTK key below:\n");
+ for (no = 0;no<16;no++)
+ printk(" %02x ", padapter->securitypriv.dot11wBIPKey[param->u.crypt.idx].skey[no]);
+ printk("\n");*/
+ padapter->securitypriv.dot11wBIPKeyid = param->u.crypt.idx;
+ padapter->securitypriv.binstallBIPkey = true;
+ DBG_871X(" ~~~~set sta key:IGKT\n");
+ }
+ }
+ }
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta == NULL)
+ {
+ /* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
+ }
+ else
+ {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ pbcmc_sta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled)||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ {
+ pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+ }
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) /* adhoc mode */
+ {
+ }
+ }
+
+exit:
+
+ kfree((u8 *)pwep);
+ return ret;
+}
+
+static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
+{
+ u8 *buf = NULL, *pos = NULL;
+ int group_cipher = 0, pairwise_cipher = 0;
+ int ret = 0;
+ u8 null_addr[]= {0, 0, 0, 0, 0, 0};
+
+ if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ if (pie == NULL)
+ return ret;
+ else
+ return -EINVAL;
+ }
+
+ if (ielen)
+ {
+ buf = rtw_zmalloc(ielen);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(buf, pie , ielen);
+
+ /* dump */
+ {
+ int i;
+ DBG_871X("\n wpa_ie(length:%d):\n", ielen);
+ for (i = 0;i<ielen;i =i+8)
+ DBG_871X("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+ }
+
+ pos = buf;
+ if (ielen < RSN_HEADER_LEN) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
+ ret = -1;
+ goto exit;
+ }
+
+ if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
+ {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPAPSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS)
+ {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeWPA2PSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ if (group_cipher == 0)
+ {
+ group_cipher = WPA_CIPHER_NONE;
+ }
+ if (pairwise_cipher == 0)
+ {
+ pairwise_cipher = WPA_CIPHER_NONE;
+ }
+
+ switch (group_cipher)
+ {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ switch (pairwise_cipher)
+ {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus =Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ {/* set wps_ie */
+ u16 cnt = 0;
+ u8 eid, wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+
+ while (cnt < ielen)
+ {
+ eid = buf[cnt];
+
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&buf[cnt+2], wps_oui, 4)))
+ {
+ DBG_871X("SET WPS_IE\n");
+
+ padapter->securitypriv.wps_ie_len = ((buf[cnt+1]+2) < MAX_WPS_IE_LEN) ? (buf[cnt+1]+2):MAX_WPS_IE_LEN;
+
+ memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
+
+ set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
+
+ cnt += buf[cnt+1]+2;
+
+ break;
+ } else {
+ cnt += buf[cnt+1]+2; /* goto next */
+ }
+ }
+ }
+ }
+
+ /* TKIP and AES disallow multicast packets until installing group key */
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_
+ || padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_
+ || padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ /* WPS open need to enable multicast */
+ /* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true) */
+ rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
+ pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
+
+exit:
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int rtw_wx_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 ht_ielen = 0;
+ char *p;
+ u8 ht_cap =false, vht_cap =false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ NDIS_802_11_RATES_EX* prates = NULL;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ if (p && ht_ielen>0)
+ {
+ ht_cap = true;
+ }
+
+ prates = &pcur_bss->SupportedRates;
+
+ if (rtw_is_cckratesonly_included((u8 *)prates) == true)
+ {
+ if (ht_cap == true)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
+ }
+ else if ((rtw_is_cckrates_included((u8 *)prates)) == true)
+ {
+ if (ht_cap == true)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
+ }
+ else
+ {
+ if (pcur_bss->Configuration.DSConfig > 14)
+ {
+ if (vht_cap == true)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11AC");
+ else if (ht_cap == true)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
+ }
+ else
+ {
+ if (ht_cap == true)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+ }
+ else
+ {
+ /* prates = &padapter->registrypriv.dev_network.SupportedRates; */
+ /* snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g"); */
+ snprintf(wrqu->name, IFNAMSIZ, "unassociated");
+ }
+ return 0;
+}
+
+static int rtw_wx_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
+
+ return 0;
+}
+
+static int rtw_wx_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ {
+ /* wrqu->freq.m = ieee80211_wlan_frequencies[pcur_bss->Configuration.DSConfig-1] * 100000; */
+ wrqu->freq.m = rtw_ch2freq(pcur_bss->Configuration.DSConfig) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = pcur_bss->Configuration.DSConfig;
+
+ }
+ else {
+ wrqu->freq.m = rtw_ch2freq(padapter->mlmeextpriv.cur_channel) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = padapter->mlmeextpriv.cur_channel;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType ;
+ int ret = 0;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (padapter->hw_init_completed ==false) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ switch (wrqu->mode)
+ {
+ case IW_MODE_AUTO:
+ networkType = Ndis802_11AutoUnknown;
+ DBG_871X("set_mode = IW_MODE_AUTO\n");
+ break;
+ case IW_MODE_ADHOC:
+ networkType = Ndis802_11IBSS;
+ DBG_871X("set_mode = IW_MODE_ADHOC\n");
+ break;
+ case IW_MODE_MASTER:
+ networkType = Ndis802_11APMode;
+ DBG_871X("set_mode = IW_MODE_MASTER\n");
+ /* rtw_setopmode_cmd(padapter, networkType, true); */
+ break;
+ case IW_MODE_INFRA:
+ networkType = Ndis802_11Infrastructure;
+ DBG_871X("set_mode = IW_MODE_INFRA\n");
+ break;
+
+ default :
+ ret = -EINVAL;;
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported \n", iw_operation_mode[wrqu->mode]));
+ goto exit;
+ }
+
+/*
+ if (Ndis802_11APMode == networkType)
+ {
+ rtw_setopmode_cmd(padapter, networkType, true);
+ }
+ else
+ {
+ rtw_setopmode_cmd(padapter, Ndis802_11AutoUnknown, true);
+ }
+*/
+
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) ==false) {
+
+ ret = -EPERM;
+ goto exit;
+
+ }
+
+ rtw_setopmode_cmd(padapter, networkType, true);
+
+exit:
+ return ret;
+}
+
+static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
+ {
+ wrqu->mode = IW_MODE_INFRA;
+ }
+ else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+
+ {
+ wrqu->mode = IW_MODE_ADHOC;
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ {
+ wrqu->mode = IW_MODE_MASTER;
+ }
+ else
+ {
+ wrqu->mode = IW_MODE_AUTO;
+ }
+ return 0;
+}
+
+
+static int rtw_wx_set_pmkid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 j, blInserted = false;
+ int intReturn = false;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct iw_pmksa* pPMK = (struct iw_pmksa*) extra;
+ u8 strZeroMacAddress[ ETH_ALEN ] = { 0x00 };
+ u8 strIssueBssid[ ETH_ALEN ] = { 0x00 };
+
+ /*
+ There are the BSSID information in the bssid.sa_data array.
+ If cmd is IW_PMKSA_FLUSH, it means the wpa_suppplicant wants to clear all the PMKID information.
+ If cmd is IW_PMKSA_ADD, it means the wpa_supplicant wants to add a PMKID/BSSID to driver.
+ If cmd is IW_PMKSA_REMOVE, it means the wpa_supplicant wants to remove a PMKID/BSSID from driver.
+ */
+
+ memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
+ if (pPMK->cmd == IW_PMKSA_ADD)
+ {
+ DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
+ if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
+ {
+ return(intReturn);
+ }
+ else
+ {
+ intReturn = true;
+ }
+ blInserted = false;
+
+ /* overwrite PMKID */
+ for (j = 0 ; j<NUM_PMKID_CACHE; j++)
+ {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN))
+ { /* BSSID is matched, the same AP => rewrite with new PMKID. */
+
+ DBG_871X("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n");
+
+ memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ psecuritypriv->PMKIDList[ j ].bUsed = true;
+ psecuritypriv->PMKIDIndex = j+1;
+ blInserted = true;
+ break;
+ }
+ }
+
+ if (!blInserted)
+ {
+ /* Find a new entry */
+ DBG_871X("[rtw_wx_set_pmkid] Use the new entry index = %d for this PMKID.\n",
+ psecuritypriv->PMKIDIndex);
+
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+
+ psecuritypriv->PMKIDList[ psecuritypriv->PMKIDIndex ].bUsed = true;
+ psecuritypriv->PMKIDIndex++ ;
+ if (psecuritypriv->PMKIDIndex == 16)
+ {
+ psecuritypriv->PMKIDIndex = 0;
+ }
+ }
+ }
+ else if (pPMK->cmd == IW_PMKSA_REMOVE)
+ {
+ DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n");
+ intReturn = true;
+ for (j = 0 ; j<NUM_PMKID_CACHE; j++)
+ {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN))
+ { /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
+ memset(psecuritypriv->PMKIDList[ j ].Bssid, 0x00, ETH_ALEN);
+ psecuritypriv->PMKIDList[ j ].bUsed = false;
+ break;
+ }
+ }
+ }
+ else if (pPMK->cmd == IW_PMKSA_FLUSH)
+ {
+ DBG_871X("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n");
+ memset(&psecuritypriv->PMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ psecuritypriv->PMKIDIndex = 0;
+ intReturn = true;
+ }
+ return intReturn;
+}
+
+static int rtw_wx_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ {
+ wrqu->sens.value = 0;
+ wrqu->sens.fixed = 0; /* no auto select */
+ wrqu->sens.disabled = 1;
+ }
+ return 0;
+}
+
+static int rtw_wx_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ u16 val;
+ int i;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
+
+ wrqu->data.length = sizeof(*range);
+ memset(range, 0, sizeof(*range));
+
+ /* Let's try to keep this struct in the same order as in
+ * linux/include/wireless.h
+ */
+
+ /* TODO: See what values we can set, and remove the ones we can't
+ * set, or fill them with some default data.
+ */
+
+ /* ~5 Mb/s real (802.11b) */
+ range->throughput = 5 * 1000 * 1000;
+
+ /* signal level threshold range */
+
+ /* percent values between 0 and 100. */
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+ range->max_qual.noise = 100;
+ range->max_qual.updated = 7; /* Updated all three */
+
+
+ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
+ /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+ range->avg_qual.level = 256 - 78;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = 7; /* Updated all three */
+
+ range->num_bitrates = RATE_COUNT;
+
+ for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
+ range->bitrate[i] = rtw_rates[i];
+ }
+
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->pm_capa = 0;
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 16;
+
+ for (i = 0, val = 0; i < MAX_CHANNEL_NUM; i++) {
+
+ /* Include only legal frequencies for some countries */
+ if (pmlmeext->channel_set[i].ChannelNum != 0)
+ {
+ range->freq[val].i = pmlmeext->channel_set[i].ChannelNum;
+ range->freq[val].m = rtw_ch2freq(pmlmeext->channel_set[i].ChannelNum) * 100000;
+ range->freq[val].e = 1;
+ val++;
+ }
+
+ if (val == IW_MAX_FREQUENCIES)
+ break;
+ }
+
+ range->num_channels = val;
+ range->num_frequency = val;
+
+/* Commented by Albert 2009/10/13 */
+/* The following code will proivde the security capability to network manager. */
+/* If the driver doesn't provide this capability to network manager, */
+/* the WPA/WPA2 routers can't be choosen in the network manager. */
+
+/*
+#define IW_SCAN_CAPA_NONE 0x00
+#define IW_SCAN_CAPA_ESSID 0x01
+#define IW_SCAN_CAPA_BSSID 0x02
+#define IW_SCAN_CAPA_CHANNEL 0x04
+#define IW_SCAN_CAPA_MODE 0x08
+#define IW_SCAN_CAPA_RATE 0x10
+#define IW_SCAN_CAPA_TYPE 0x20
+#define IW_SCAN_CAPA_TIME 0x40
+*/
+
+ range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
+ IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
+
+ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |IW_SCAN_CAPA_BSSID|
+ IW_SCAN_CAPA_CHANNEL|IW_SCAN_CAPA_MODE|IW_SCAN_CAPA_RATE;
+
+ return 0;
+}
+
+/* set bssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. rtw_set_802_11_authentication_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_bssid() */
+static int rtw_wx_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sockaddr *temp = (struct sockaddr *)awrq;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct list_head *phead;
+ u8 *dst_bssid, *src_bssid;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ enum NDIS_802_11_AUTHENTICATION_MODE authmode;
+
+ rtw_ps_deny(padapter, PS_DENY_JOIN);
+ if (_FAIL == rtw_pwr_wakeup(padapter))
+ {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+
+ if (temp->sa_family != ARPHRD_ETHER) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ spin_lock_bh(&queue->lock);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if (phead == pmlmepriv->pscanned)
+ break;
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_bssid = pnetwork->network.MacAddress;
+
+ src_bssid = temp->sa_data;
+
+ if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN)))
+ {
+ if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode))
+ {
+ ret = -1;
+ spin_unlock_bh(&queue->lock);
+ goto exit;
+ }
+
+ break;
+ }
+
+ }
+ spin_unlock_bh(&queue->lock);
+
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ /* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
+ if (rtw_set_802_11_bssid(padapter, temp->sa_data) == false) {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_JOIN);
+
+ return ret;
+}
+
+static int rtw_wx_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+
+ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
+
+ if (((check_fwstate(pmlmepriv, _FW_LINKED)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == true))
+ {
+
+ memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
+ }
+ else
+ {
+ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+ }
+
+ return 0;
+}
+
+static int rtw_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u16 reason;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *) extra;
+
+
+ if (mlme == NULL)
+ return -1;
+
+ DBG_871X("%s\n", __func__);
+
+ reason = mlme->reason_code;
+
+ DBG_871X("%s, cmd =%d, reason =%d\n", __func__, mlme->cmd, reason);
+
+ switch (mlme->cmd)
+ {
+ case IW_MLME_DEAUTH:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ case IW_MLME_DISASSOC:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 _status = false;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d\n", __func__, __LINE__);
+ #endif
+
+ rtw_ps_deny(padapter, PS_DENY_SCAN);
+ if (_FAIL == rtw_pwr_wakeup(padapter))
+ {
+ ret = -1;
+ goto exit;
+ }
+
+ if (padapter->bDriverStopped) {
+ DBG_871X("bDriverStopped =%d\n", padapter->bDriverStopped);
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (padapter->hw_init_completed ==false) {
+ ret = -1;
+ goto exit;
+ }
+
+ /* When Busy Traffic, driver do not site survey. So driver return success. */
+ /* wpa_supplicant will not issue SIOCSIWSCAN cmd again after scan timeout. */
+ /* modify by thomas 2011-02-22. */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true)
+ {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
+ {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+ memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req))
+ {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID)
+ {
+ int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
+
+ memcpy(ssid[0].Ssid, req->essid, len);
+ ssid[0].SsidLength = len;
+
+ DBG_871X("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
+
+ spin_lock_bh(&pmlmepriv->lock);
+
+ _status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
+
+ spin_unlock_bh(&pmlmepriv->lock);
+
+ }
+ else if (req->scan_type == IW_SCAN_TYPE_PASSIVE)
+ {
+ DBG_871X("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
+ }
+
+ }
+ else if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE
+ && !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)
+ )
+ {
+ int len = wrqu->data.length -WEXT_CSCAN_HEADER_SIZE;
+ char *pos = extra+WEXT_CSCAN_HEADER_SIZE;
+ char section;
+ char sec_len;
+ int ssid_index = 0;
+
+ /* DBG_871X("%s COMBO_SCAN header is recognized\n", __func__); */
+
+ while (len >= 1) {
+ section = *(pos++); len-= 1;
+
+ switch (section) {
+ case WEXT_CSCAN_SSID_SECTION:
+ /* DBG_871X("WEXT_CSCAN_SSID_SECTION\n"); */
+ if (len < 1) {
+ len = 0;
+ break;
+ }
+
+ sec_len = *(pos++); len-= 1;
+
+ if (sec_len>0 && sec_len<=len) {
+ ssid[ssid_index].SsidLength = sec_len;
+ memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
+ /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
+ ssid_index++;
+ }
+
+ pos+=sec_len; len-=sec_len;
+ break;
+
+
+ case WEXT_CSCAN_CHANNEL_SECTION:
+ /* DBG_871X("WEXT_CSCAN_CHANNEL_SECTION\n"); */
+ pos+= 1; len-= 1;
+ break;
+ case WEXT_CSCAN_ACTV_DWELL_SECTION:
+ /* DBG_871X("WEXT_CSCAN_ACTV_DWELL_SECTION\n"); */
+ pos+=2; len-=2;
+ break;
+ case WEXT_CSCAN_PASV_DWELL_SECTION:
+ /* DBG_871X("WEXT_CSCAN_PASV_DWELL_SECTION\n"); */
+ pos+=2; len-=2;
+ break;
+ case WEXT_CSCAN_HOME_DWELL_SECTION:
+ /* DBG_871X("WEXT_CSCAN_HOME_DWELL_SECTION\n"); */
+ pos+=2; len-=2;
+ break;
+ case WEXT_CSCAN_TYPE_SECTION:
+ /* DBG_871X("WEXT_CSCAN_TYPE_SECTION\n"); */
+ pos+= 1; len-= 1;
+ break;
+ default:
+ /* DBG_871X("Unknown CSCAN section %c\n", section); */
+ len = 0; /* stop parsing */
+ }
+ /* DBG_871X("len:%d\n", len); */
+
+ }
+
+ /* jeff: it has still some scan paramater to parse, we only do this now... */
+ _status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
+
+ } else
+
+ {
+ _status = rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ }
+
+ if (_status == false)
+ ret = -1;
+
+exit:
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_SCAN);
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d return %d\n", __func__, __LINE__, ret);
+ #endif
+
+ return ret;
+}
+
+static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct list_head *plist, *phead;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ char *ev = extra;
+ char *stop = ev + wrqu->data.length;
+ u32 ret = 0;
+ sint wait_status;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d\n", __func__, __LINE__);
+ #endif
+
+ if (adapter_to_pwrctl(padapter)->brfoffbyhw && padapter->bDriverStopped)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
+
+ if (check_fwstate(pmlmepriv, wait_status))
+ return -EAGAIN;
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1)
+ {
+ if (phead == plist)
+ break;
+
+ if ((stop - ev) < SCAN_ITEM_SIZE) {
+ ret = -E2BIG;
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* report network only if the current channel set contains the channel to which this network belongs */
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0
+ && rtw_mlme_band_check(padapter, pnetwork->network.Configuration.DSConfig) == true
+ && true == rtw_validate_ssid(&(pnetwork->network.Ssid))
+ )
+ {
+ ev =translate_scan(padapter, a, pnetwork, ev, stop);
+ }
+
+ plist = get_next(plist);
+
+ }
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ wrqu->data.length = ev-extra;
+ wrqu->data.flags = 0;
+
+exit:
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d return %d\n", __func__, __LINE__, ret);
+ #endif
+
+ return ret ;
+
+}
+
+/* set ssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. set_802_11_authenticaion_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_ssid() */
+static int rtw_wx_set_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *queue = &pmlmepriv->scanned_queue;
+ struct list_head *phead;
+ struct wlan_network *pnetwork = NULL;
+ enum NDIS_802_11_AUTHENTICATION_MODE authmode;
+ struct ndis_802_11_ssid ndis_ssid;
+ u8 *dst_ssid, *src_ssid;
+
+ uint ret = 0, len;
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d\n", __func__, __LINE__);
+ #endif
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
+
+ rtw_ps_deny(padapter, PS_DENY_JOIN);
+ if (_FAIL == rtw_pwr_wakeup(padapter))
+ {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -1;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ DBG_871X("=>%s\n", __func__);
+ if (wrqu->essid.flags && wrqu->essid.length)
+ {
+ len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE;
+
+ if (wrqu->essid.length != 33)
+ DBG_871X("ssid =%s, len =%d\n", extra, wrqu->essid.length);
+
+ memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ ndis_ssid.SsidLength = len;
+ memcpy(ndis_ssid.Ssid, extra, len);
+ src_ssid = ndis_ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
+ spin_lock_bh(&queue->lock);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if (phead == pmlmepriv->pscanned)
+ {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_warning_,
+ ("rtw_wx_set_essid: scan_q is empty, set ssid to check if scanning again!\n"));
+
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_ssid = pnetwork->network.Ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: dst_ssid =%s\n",
+ pnetwork->network.Ssid.Ssid));
+
+ if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
+ (pnetwork->network.Ssid.SsidLength ==ndis_ssid.SsidLength))
+ {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: find match, set infra mode\n"));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)
+ {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ continue;
+ }
+
+ if (rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode) == false)
+ {
+ ret = -1;
+ spin_unlock_bh(&queue->lock);
+ goto exit;
+ }
+
+ break;
+ }
+ }
+ spin_unlock_bh(&queue->lock);
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("set ssid: set_802_11_auth. mode =%d\n", authmode));
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ /* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
+ if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == false) {
+ ret = -1;
+ goto exit;
+ }
+ }
+
+exit:
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_JOIN);
+
+ DBG_871X("<=%s, ret %d\n", __func__, ret);
+
+ #ifdef DBG_IOCTL
+ DBG_871X("DBG_IOCTL %s:%d return %d\n", __func__, __LINE__, ret);
+ #endif
+
+ return ret;
+}
+
+static int rtw_wx_get_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true))
+ {
+ len = pcur_bss->Ssid.SsidLength;
+
+ wrqu->essid.length = len;
+
+ memcpy(extra, pcur_bss->Ssid.Ssid, len);
+
+ wrqu->essid.flags = 1;
+ }
+ else
+ {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtw_wx_set_rate(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ int i, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 datarates[NumRates];
+ u32 target_rate = wrqu->bitrate.value;
+ u32 fixed = wrqu->bitrate.fixed;
+ u32 ratevalue = 0;
+ u8 mpdatarate[NumRates]={11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
+
+ if (target_rate == -1) {
+ ratevalue = 11;
+ goto set_rate;
+ }
+ target_rate = target_rate/100000;
+
+ switch (target_rate) {
+ case 10:
+ ratevalue = 0;
+ break;
+ case 20:
+ ratevalue = 1;
+ break;
+ case 55:
+ ratevalue = 2;
+ break;
+ case 60:
+ ratevalue = 3;
+ break;
+ case 90:
+ ratevalue = 4;
+ break;
+ case 110:
+ ratevalue = 5;
+ break;
+ case 120:
+ ratevalue = 6;
+ break;
+ case 180:
+ ratevalue = 7;
+ break;
+ case 240:
+ ratevalue = 8;
+ break;
+ case 360:
+ ratevalue = 9;
+ break;
+ case 480:
+ ratevalue = 10;
+ break;
+ case 540:
+ ratevalue = 11;
+ break;
+ default:
+ ratevalue = 11;
+ break;
+ }
+
+set_rate:
+
+ for (i = 0; i<NumRates; i++)
+ {
+ if (ratevalue ==mpdatarate[i])
+ {
+ datarates[i] = mpdatarate[i];
+ if (fixed == 0)
+ break;
+ }
+ else {
+ datarates[i] = 0xff;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
+ }
+
+ if (rtw_setdatarate_cmd(padapter, datarates) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("rtw_wx_set_rate Fail!!!\n"));
+ ret = -1;
+ }
+ return ret;
+}
+
+static int rtw_wx_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 max_rate = 0;
+
+ max_rate = rtw_get_cur_max_rate((struct adapter *)rtw_netdev_priv(dev));
+
+ if (max_rate == 0)
+ return -EPERM;
+
+ wrqu->bitrate.fixed = 0; /* no auto select */
+ wrqu->bitrate.value = max_rate * 100000;
+
+ return 0;
+}
+
+static int rtw_wx_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (wrqu->rts.disabled)
+ padapter->registrypriv.rts_thresh = 2347;
+ else {
+ if (wrqu->rts.value < 0 ||
+ wrqu->rts.value > 2347)
+ return -EINVAL;
+
+ padapter->registrypriv.rts_thresh = wrqu->rts.value;
+ }
+
+ DBG_871X("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ return 0;
+}
+
+static int rtw_wx_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ wrqu->rts.value = padapter->registrypriv.rts_thresh;
+ wrqu->rts.fixed = 0; /* no auto select */
+ /* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
+
+ return 0;
+}
+
+static int rtw_wx_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (wrqu->frag.disabled)
+ padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
+ else {
+ if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+ wrqu->frag.value > MAX_FRAG_THRESHOLD)
+ return -EINVAL;
+
+ padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
+ }
+
+ DBG_871X("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ return 0;
+
+}
+
+static int rtw_wx_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ wrqu->frag.value = padapter->xmitpriv.frag_len;
+ wrqu->frag.fixed = 0; /* no auto select */
+ /* wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); */
+
+ return 0;
+}
+
+static int rtw_wx_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ /* struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); */
+
+
+ wrqu->retry.value = 7;
+ wrqu->retry.fixed = 0; /* no auto select */
+ wrqu->retry.disabled = 1;
+
+ return 0;
+
+}
+
+static int rtw_wx_set_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ u32 key, ret = 0;
+ u32 keyindex_provided;
+ struct ndis_802_11_wep wep;
+ enum NDIS_802_11_AUTHENTICATION_MODE authmode;
+
+ struct iw_point *erq = &(wrqu->encoding);
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ DBG_871X("+rtw_wx_set_enc, flags = 0x%x\n", erq->flags);
+
+ memset(&wep, 0, sizeof(struct ndis_802_11_wep));
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ if (erq->flags & IW_ENCODE_DISABLED)
+ {
+ DBG_871X("EncryptionDisabled\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype =authmode;
+
+ goto exit;
+ }
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ keyindex_provided = 1;
+ }
+ else
+ {
+ keyindex_provided = 0;
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ DBG_871X("rtw_wx_set_enc, key =%d\n", key);
+ }
+
+ /* set authentication mode */
+ if (erq->flags & IW_ENCODE_OPEN)
+ {
+ DBG_871X("rtw_wx_set_enc():IW_ENCODE_OPEN\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype =authmode;
+ }
+ else if (erq->flags & IW_ENCODE_RESTRICTED)
+ {
+ DBG_871X("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ authmode = Ndis802_11AuthModeShared;
+ padapter->securitypriv.ndisauthtype =authmode;
+ }
+ else
+ {
+ DBG_871X("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags);
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype =authmode;
+ }
+
+ wep.KeyIndex = key;
+ if (erq->length > 0)
+ {
+ wep.KeyLength = erq->length <= 5 ? 5 : 13;
+
+ wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ }
+ else
+ {
+ wep.KeyLength = 0 ;
+
+ if (keyindex_provided == 1)/* set key_id only, no given KeyMaterial(erq->length == 0). */
+ {
+ padapter->securitypriv.dot11PrivacyKeyIndex = key;
+
+ DBG_871X("(keyindex_provided == 1), keyid =%d, key_len =%d\n", key, padapter->securitypriv.dot11DefKeylen[key]);
+
+ switch (padapter->securitypriv.dot11DefKeylen[key])
+ {
+ case 5:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ break;
+ case 13:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ break;
+ default:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ break;
+ }
+
+ goto exit;
+
+ }
+
+ }
+
+ wep.KeyIndex |= 0x80000000;
+
+ memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
+
+ if (rtw_set_802_11_add_wep(padapter, &wep) == false) {
+ if (rf_on == pwrpriv->rf_pwrstate)
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtw_wx_get_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ uint key, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *erq = &(wrqu->encoding);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) != true)
+ {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) != true)
+ {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ return 0;
+ }
+ }
+
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ } else
+ {
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ }
+
+ erq->flags = key + 1;
+
+ /* if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen) */
+ /* */
+ /* erq->flags |= IW_ENCODE_OPEN; */
+ /* */
+
+ switch (padapter->securitypriv.ndisencryptstatus)
+ {
+ case Ndis802_11EncryptionNotSupported:
+ case Ndis802_11EncryptionDisabled:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ case Ndis802_11Encryption1Enabled:
+ erq->length = padapter->securitypriv.dot11DefKeylen[key];
+
+ if (erq->length)
+ {
+ memcpy(keybuf, padapter->securitypriv.dot11DefKey[key].skey, padapter->securitypriv.dot11DefKeylen[key]);
+
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen)
+ {
+ erq->flags |= IW_ENCODE_OPEN;
+ }
+ else if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeShared)
+ {
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ }
+ }
+ else
+ {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ }
+ break;
+ case Ndis802_11Encryption2Enabled:
+ case Ndis802_11Encryption3Enabled:
+ erq->length = 16;
+ erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN | IW_ENCODE_NOKEY);
+ break;
+ default:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ }
+ return ret;
+}
+
+static int rtw_wx_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ /* struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); */
+
+ wrqu->power.value = 0;
+ wrqu->power.fixed = 0; /* no auto select */
+ wrqu->power.disabled = 1;
+
+ return 0;
+}
+
+static int rtw_wx_set_gen_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
+
+ return ret;
+}
+
+static int rtw_wx_set_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_param *param = (struct iw_param*)&(wrqu->param);
+ int ret = 0;
+
+ switch (param->flags & IW_AUTH_INDEX) {
+
+ case IW_AUTH_WPA_VERSION:
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+
+ break;
+ case IW_AUTH_KEY_MGMT:
+ /*
+ * ??? does not use these parameters
+ */
+ break;
+
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ {
+ if (param->value)
+ { /* wpa_supplicant is enabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = true;
+ }
+ else
+ { /* wpa_supplicant is disabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = false;
+ }
+ break;
+ }
+ case IW_AUTH_DROP_UNENCRYPTED:
+ {
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+
+ if (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption1Enabled)
+ {
+ break;/* it means init value, or using wep, ndisencryptstatus = Ndis802_11Encryption1Enabled, */
+ /* then it needn't reset it; */
+ }
+
+ if (param->value) {
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.ndisauthtype =Ndis802_11AuthModeOpen;
+ }
+
+ break;
+ }
+
+ case IW_AUTH_80211_AUTH_ALG:
+
+ /*
+ * It's the starting point of a link layer connection using wpa_supplicant
+ */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ DBG_871X("%s...call rtw_indicate_disconnect\n ", __func__);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+
+
+ ret = wpa_set_auth_algs(dev, (u32)param->value);
+
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int rtw_wx_set_enc_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ char *alg_name;
+ u32 param_len;
+ struct ieee_param *param = NULL;
+ struct iw_point *pencoding = &wrqu->encoding;
+ struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
+ int ret = 0;
+
+ param_len = sizeof(struct ieee_param) + pext->key_len;
+ param = (struct ieee_param *)rtw_malloc(param_len);
+ if (param == NULL)
+ return -1;
+
+ memset(param, 0, param_len);
+
+ param->cmd = IEEE_CMD_SET_ENCRYPTION;
+ memset(param->sta_addr, 0xff, ETH_ALEN);
+
+
+ switch (pext->alg) {
+ case IW_ENCODE_ALG_NONE:
+ /* todo: remove key */
+ /* remove = 1; */
+ alg_name = "none";
+ break;
+ case IW_ENCODE_ALG_WEP:
+ alg_name = "WEP";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg_name = "TKIP";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg_name = "CCMP";
+ break;
+ case IW_ENCODE_ALG_AES_CMAC:
+ alg_name = "BIP";
+ break;
+ default:
+ ret = -1;
+ goto exit;
+ }
+
+ strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+
+ if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ {
+ param->u.crypt.set_tx = 1;
+ }
+
+ /* cliW: WEP does not have group key
+ * just not checking GROUP key setting
+ */
+ if ((pext->alg != IW_ENCODE_ALG_WEP) &&
+ ((pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+ || (pext->ext_flags & IW_ENCODE_ALG_AES_CMAC)
+ ))
+ {
+ param->u.crypt.set_tx = 0;
+ }
+
+ param->u.crypt.idx = (pencoding->flags&0x00FF) -1 ;
+
+ if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ {
+ memcpy(param->u.crypt.seq, pext->rx_seq, 8);
+ }
+
+ if (pext->key_len)
+ {
+ param->u.crypt.key_len = pext->key_len;
+ /* memcpy(param + 1, pext + 1, pext->key_len); */
+ memcpy(param->u.crypt.key, pext + 1, pext->key_len);
+ }
+
+ if (pencoding->flags & IW_ENCODE_DISABLED)
+ {
+ /* todo: remove key */
+ /* remove = 1; */
+ }
+
+ ret = wpa_set_encryption(dev, param, param_len);
+
+exit:
+ kfree((u8 *)param);
+
+ return ret;
+}
+
+
+static int rtw_wx_get_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ /* struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); */
+ /* struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); */
+ /* struct security_priv *psecuritypriv = &padapter->securitypriv; */
+
+ if (extra)
+ {
+ wrqu->data.length = 14;
+ wrqu->data.flags = 1;
+ memcpy(extra, "<WIFI@REALTEK>", 14);
+ }
+ return 0;
+}
+
+static int rtw_wx_read32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter;
+ struct iw_point *p;
+ u16 len;
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+ u8 *ptmp;
+ int ret;
+
+
+ ret = 0;
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+ p = &wrqu->data;
+ len = p->length;
+ if (0 == len)
+ return -EINVAL;
+
+ ptmp = (u8 *)rtw_malloc(len);
+ if (NULL == ptmp)
+ return -ENOMEM;
+
+ if (copy_from_user(ptmp, p->pointer, len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ bytes = 0;
+ addr = 0;
+ sscanf(ptmp, "%d,%x", &bytes, &addr);
+
+ switch (bytes) {
+ case 1:
+ data32 = rtw_read8(padapter, addr);
+ sprintf(extra, "0x%02X", data32);
+ break;
+ case 2:
+ data32 = rtw_read16(padapter, addr);
+ sprintf(extra, "0x%04X", data32);
+ break;
+ case 4:
+ data32 = rtw_read32(padapter, addr);
+ sprintf(extra, "0x%08X", data32);
+ break;
+ default:
+ DBG_871X(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+ DBG_871X(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
+
+exit:
+ kfree(ptmp);
+
+ return 0;
+}
+
+static int rtw_wx_write32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+
+
+ bytes = 0;
+ addr = 0;
+ data32 = 0;
+ sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+
+ switch (bytes) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)data32);
+ DBG_871X(KERN_INFO "%s: addr = 0x%08X data = 0x%02X\n", __func__, addr, (u8)data32);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)data32);
+ DBG_871X(KERN_INFO "%s: addr = 0x%08X data = 0x%04X\n", __func__, addr, (u16)data32);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, data32);
+ DBG_871X(KERN_INFO "%s: addr = 0x%08X data = 0x%08X\n", __func__, addr, data32);
+ break;
+ default:
+ DBG_871X(KERN_INFO "%s: usage> write [bytes],[address(hex)],[data(hex)]\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_read_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+
+ path = *(u32*)extra;
+ addr = *((u32*)extra + 1);
+ data32 = rtw_hal_read_rfreg(padapter, path, addr, 0xFFFFF);
+ /*
+ * IMPORTANT!!
+ * Only when wireless private ioctl is at odd order,
+ * "extra" would be copied to user space.
+ */
+ sprintf(extra, "0x%05x", data32);
+
+ return 0;
+}
+
+static int rtw_wx_write_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+
+ path = *(u32*)extra;
+ addr = *((u32*)extra + 1);
+ data32 = *((u32*)extra + 2);
+/* DBG_871X("%s: path =%d addr = 0x%02x data = 0x%05x\n", __func__, path, addr, data32); */
+ rtw_hal_write_rfreg(padapter, path, addr, 0xFFFFF, data32);
+
+ return 0;
+}
+
+static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return -1;
+}
+
+static int dummy(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ /* struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); */
+ /* struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); */
+
+ /* DBG_871X("cmd_code =%x, fwstate = 0x%x\n", a->cmd, get_fwstate(pmlmepriv)); */
+
+ return -1;
+
+}
+
+static int rtw_wx_set_channel_plan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 channel_plan_req = (u8) (*((int *)wrqu));
+
+ if (_SUCCESS == rtw_set_chplan_cmd(padapter, channel_plan_req, 1, 1)) {
+ DBG_871X("%s set channel_plan = 0x%02X\n", __func__, channel_plan_req);
+ } else
+ return -EPERM;
+
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return 0;
+}
+
+static int rtw_wx_get_sensitivity(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *buf)
+{
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+/*
+typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra);
+*/
+/*
+ *For all data larger than 16 octets, we need to use a
+ *pointer to memory allocated in user space.
+ */
+static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ return ret;
+}
+
+static int rtw_get_ap_info(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u32 cnt = 0, wpa_ielen;
+ struct list_head *plist, *phead;
+ unsigned char *pbuf;
+ u8 bssid[ETH_ALEN];
+ char data[32];
+ struct wlan_network *pnetwork = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct iw_point *pdata = &wrqu->data;
+
+ DBG_871X("+rtw_get_aplist_info\n");
+
+ if ((padapter->bDriverStopped) || (pdata == NULL))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))) == true)
+ {
+ msleep(30);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+
+
+ /* pdata->length = 0;? */
+ pdata->flags = 0;
+ if (pdata->length>=32)
+ {
+ if (copy_from_user(data, pdata->pointer, 32))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+ else
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1)
+ {
+ if (phead == plist)
+ break;
+
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* if (hwaddr_aton_i(pdata->pointer, bssid)) */
+ if (hwaddr_aton_i(data, bssid))
+ {
+ DBG_871X("Invalid BSSID '%s'.\n", (u8 *)data);
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ return -EINVAL;
+ }
+
+
+ if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN))/* BSSID match, then check if supporting wpa/wpa2 */
+ {
+ DBG_871X("BSSID:" MAC_FMT "\n", MAC_ARG(bssid));
+
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen>0))
+ {
+ pdata->flags = 1;
+ break;
+ }
+
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen>0))
+ {
+ pdata->flags = 2;
+ break;
+ }
+
+ }
+
+ plist = get_next(plist);
+
+ }
+
+ spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+
+ if (pdata->length>=34)
+ {
+ if (copy_to_user((u8 __force __user *)pdata->pointer+32, (u8 *)&pdata->flags, 1))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+
+ return ret;
+
+}
+
+static int rtw_set_pid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ int *pdata = (int *)wrqu;
+ int selector;
+
+ if ((padapter->bDriverStopped) || (pdata == NULL))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ selector = *pdata;
+ if (selector < 3 && selector >= 0) {
+ padapter->pid[selector] = *(pdata+1);
+ DBG_871X("%s set pid[%d]=%d\n", __func__, selector , padapter->pid[selector]);
+ }
+ else
+ DBG_871X("%s selector %d error\n", __func__, selector);
+
+exit:
+
+ return ret;
+
+}
+
+static int rtw_wps_start(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *pdata = &wrqu->data;
+ u32 u32wps_start = 0;
+ unsigned int uintRet = 0;
+
+ if ((true == padapter->bDriverStopped) ||(true ==padapter->bSurpriseRemoved) || (NULL == pdata))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ uintRet = copy_from_user((void*) &u32wps_start, pdata->pointer, 4);
+ if (u32wps_start == 0)
+ {
+ u32wps_start = *extra;
+ }
+
+ DBG_871X("[%s] wps_start = %d\n", __func__, u32wps_start);
+
+#ifdef CONFIG_INTEL_WIDI
+ process_intel_widi_wps_status(padapter, u32wps_start);
+#endif /* CONFIG_INTEL_WIDI */
+
+exit:
+
+ return ret;
+
+}
+
+static int rtw_p2p_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+
+ return ret;
+
+}
+
+static int rtw_p2p_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+
+ return ret;
+
+}
+
+static int rtw_p2p_get2(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+
+ int ret = 0;
+
+ return ret;
+
+}
+
+static int rtw_rereg_nd_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct rereg_nd_name_data *rereg_priv = &padapter->rereg_nd_name_priv;
+ char new_ifname[IFNAMSIZ];
+
+ if (rereg_priv->old_ifname[0] == 0) {
+ char *reg_ifname;
+ reg_ifname = padapter->registrypriv.ifname;
+
+ strncpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+ }
+
+ /* DBG_871X("%s wrqu->data.length:%d\n", __func__, wrqu->data.length); */
+ if (wrqu->data.length > IFNAMSIZ)
+ return -EFAULT;
+
+ if (copy_from_user(new_ifname, wrqu->data.pointer, IFNAMSIZ)) {
+ return -EFAULT;
+ }
+
+ if (0 == strcmp(rereg_priv->old_ifname, new_ifname)) {
+ return ret;
+ }
+
+ DBG_871X("%s new_ifname:%s\n", __func__, new_ifname);
+ if (0 != (ret = rtw_change_ifname(padapter, new_ifname))) {
+ goto exit;
+ }
+
+ strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+
+ if (!memcmp(new_ifname, "disable%d", 9)) {
+
+ DBG_871X("%s disable\n", __func__);
+ /* free network queue for Android's timming issue */
+ rtw_free_network_queue(padapter, true);
+
+ /* the interface is being "disabled", we can do deeper IPS */
+ /* rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv); */
+ /* rtw_ips_mode_req(&padapter->pwrctrlpriv, IPS_NORMAL); */
+ }
+exit:
+ return ret;
+
+}
+
+static int rtw_dbg_port(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u8 major_cmd, minor_cmd;
+ u16 arg;
+ u32 extra_arg, *pdata, val32;
+ struct sta_info *psta;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+
+ pdata = (u32*)&wrqu->data;
+
+ val32 = *pdata;
+ arg = (u16)(val32&0x0000ffff);
+ major_cmd = (u8)(val32>>24);
+ minor_cmd = (u8)((val32>>16)&0x00ff);
+
+ extra_arg = *(pdata+1);
+
+ switch (major_cmd)
+ {
+ case 0x70:/* read_reg */
+ switch (minor_cmd)
+ {
+ case 1:
+ DBG_871X("rtw_read8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ DBG_871X("rtw_read16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ DBG_871X("rtw_read32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x71:/* write_reg */
+ switch (minor_cmd)
+ {
+ case 1:
+ rtw_write8(padapter, arg, extra_arg);
+ DBG_871X("rtw_write8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ rtw_write16(padapter, arg, extra_arg);
+ DBG_871X("rtw_write16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ rtw_write32(padapter, arg, extra_arg);
+ DBG_871X("rtw_write32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x72:/* read_bb */
+ DBG_871X("read_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x73:/* write_bb */
+ rtw_hal_write_bbreg(padapter, arg, 0xffffffff, extra_arg);
+ DBG_871X("write_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x74:/* read_rf */
+ DBG_871X("read RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+ case 0x75:/* write_rf */
+ rtw_hal_write_rfreg(padapter, minor_cmd, arg, 0xffffffff, extra_arg);
+ DBG_871X("write RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+
+ case 0x76:
+ switch (minor_cmd)
+ {
+ case 0x00: /* normal mode, */
+ padapter->recvpriv.is_signal_dbg = 0;
+ break;
+ case 0x01: /* dbg mode */
+ padapter->recvpriv.is_signal_dbg = 1;
+ extra_arg = extra_arg>100?100:extra_arg;
+ padapter->recvpriv.signal_strength_dbg =extra_arg;
+ break;
+ }
+ break;
+ case 0x78: /* IOL test */
+ break;
+ case 0x79:
+ {
+ /*
+ * dbg 0x79000000 [value], set RESP_TXAGC to + value, value:0~15
+ * dbg 0x79010000 [value], set RESP_TXAGC to - value, value:0~15
+ */
+ u8 value = extra_arg & 0x0f;
+ u8 sign = minor_cmd;
+ u16 write_value = 0;
+
+ DBG_871X("%s set RESP_TXAGC to %s %u\n", __func__, sign?"minus":"plus", value);
+
+ if (sign)
+ value = value | 0x10;
+
+ write_value = value | (value << 5);
+ rtw_write16(padapter, 0x6d9, write_value);
+ }
+ break;
+ case 0x7a:
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress
+ , WLAN_REASON_EXPIRATION_CHK);
+ break;
+ case 0x7F:
+ switch (minor_cmd)
+ {
+ case 0x0:
+ DBG_871X("fwstate = 0x%x\n", get_fwstate(pmlmepriv));
+ break;
+ case 0x01:
+ DBG_871X("minor_cmd 0x%x\n", minor_cmd);
+ break;
+ case 0x02:
+ DBG_871X("pmlmeinfo->state = 0x%x\n", pmlmeinfo->state);
+ DBG_871X("DrvBcnEarly =%d\n", pmlmeext->DrvBcnEarly);
+ DBG_871X("DrvBcnTimeOut =%d\n", pmlmeext->DrvBcnTimeOut);
+ break;
+ case 0x03:
+ DBG_871X("qos_option =%d\n", pmlmepriv->qospriv.qos_option);
+ DBG_871X("ht_option =%d\n", pmlmepriv->htpriv.ht_option);
+ break;
+ case 0x04:
+ DBG_871X("cur_ch =%d\n", pmlmeext->cur_channel);
+ DBG_871X("cur_bw =%d\n", pmlmeext->cur_bwmode);
+ DBG_871X("cur_ch_off =%d\n", pmlmeext->cur_ch_offset);
+
+ DBG_871X("oper_ch =%d\n", rtw_get_oper_ch(padapter));
+ DBG_871X("oper_bw =%d\n", rtw_get_oper_bw(padapter));
+ DBG_871X("oper_ch_offet =%d\n", rtw_get_oper_choffset(padapter));
+
+ break;
+ case 0x05:
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta)
+ {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_871X("SSID =%s\n", cur_network->network.Ssid.Ssid);
+ DBG_871X("sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr));
+ DBG_871X("cur_channel =%d, cur_bwmode =%d, cur_ch_offset =%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ DBG_871X("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_871X("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_871X("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_871X("bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n", psta->bw_mode, psta->htpriv.ch_offset, psta->htpriv.sgi_20m, psta->htpriv.sgi_40m);
+ DBG_871X("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_871X("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+
+ for (i = 0;i<16;i++)
+ {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable)
+ {
+ DBG_871X("tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ }
+
+ }
+ else
+ {
+ DBG_871X("can't get sta's macaddr, cur_network's macaddr:" MAC_FMT "\n", MAC_ARG(cur_network->network.MacAddress));
+ }
+ break;
+ case 0x06:
+ {
+ u32 ODMFlag;
+ rtw_hal_get_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ DBG_871X("(B)DMFlag = 0x%x, arg = 0x%x\n", ODMFlag, arg);
+ ODMFlag = (u32)(0x0f&arg);
+ DBG_871X("(A)DMFlag = 0x%x\n", ODMFlag);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ }
+ break;
+ case 0x07:
+ DBG_871X("bSurpriseRemoved =%d, bDriverStopped =%d\n",
+ padapter->bSurpriseRemoved, padapter->bDriverStopped);
+ break;
+ case 0x08:
+ {
+ DBG_871X("minor_cmd 0x%x\n", minor_cmd);
+ }
+ break;
+ case 0x09:
+ {
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_871X("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+
+ for (i = 0; i< NUM_STA; i++)
+ {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while (phead != plist)
+ {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ if (extra_arg == psta->aid)
+ {
+ DBG_871X("sta's macaddr:" MAC_FMT "\n", MAC_ARG(psta->hwaddr));
+ DBG_871X("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_871X("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_871X("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_871X("bwmode =%d, ch_offset =%d, sgi_20m =%d, sgi_40m =%d\n", psta->bw_mode, psta->htpriv.ch_offset, psta->htpriv.sgi_20m, psta->htpriv.sgi_40m);
+ DBG_871X("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_871X("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ DBG_871X("capability = 0x%x\n", psta->capability);
+ DBG_871X("flags = 0x%x\n", psta->flags);
+ DBG_871X("wpa_psk = 0x%x\n", psta->wpa_psk);
+ DBG_871X("wpa2_group_cipher = 0x%x\n", psta->wpa2_group_cipher);
+ DBG_871X("wpa2_pairwise_cipher = 0x%x\n", psta->wpa2_pairwise_cipher);
+ DBG_871X("qos_info = 0x%x\n", psta->qos_info);
+ DBG_871X("dot118021XPrivacy = 0x%x\n", psta->dot118021XPrivacy);
+
+
+
+ for (j = 0;j<16;j++)
+ {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ {
+ DBG_871X("tid =%d, indicate_seq =%d\n", j, preorder_ctrl->indicate_seq);
+ }
+ }
+
+ }
+
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ }
+ break;
+ case 0x0a:
+ {
+ int max_mac_id = 0;
+ max_mac_id = rtw_search_max_mac_id(padapter);
+ printk("%s ==> max_mac_id = %d\n", __func__, max_mac_id);
+ }
+ break;
+ case 0x0b: /* Enable = 1, Disable = 0 driver control vrtl_carrier_sense. */
+ if (arg == 0) {
+ DBG_871X("disable driver ctrl vcs\n");
+ padapter->driver_vcs_en = 0;
+ }
+ else if (arg == 1) {
+ DBG_871X("enable driver ctrl vcs = %d\n", extra_arg);
+ padapter->driver_vcs_en = 1;
+
+ if (extra_arg>2)
+ padapter->driver_vcs_type = 1;
+ else
+ padapter->driver_vcs_type = extra_arg;
+ }
+ break;
+ case 0x0c:/* dump rx/tx packet */
+ {
+ if (arg == 0) {
+ DBG_871X("dump rx packet (%d)\n", extra_arg);
+ /* pHalData->bDumpRxPkt =extra_arg; */
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_RXPKT, &(extra_arg));
+ }
+ else if (arg == 1) {
+ DBG_871X("dump tx packet (%d)\n", extra_arg);
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(extra_arg));
+ }
+ }
+ break;
+ case 0x0e:
+ {
+ if (arg == 0) {
+ DBG_871X("disable driver ctrl rx_ampdu_factor\n");
+ padapter->driver_rx_ampdu_factor = 0xFF;
+ }
+ else if (arg == 1) {
+
+ DBG_871X("enable driver ctrl rx_ampdu_factor = %d\n", extra_arg);
+
+ if ((extra_arg & 0x03) > 0x03)
+ padapter->driver_rx_ampdu_factor = 0xFF;
+ else
+ padapter->driver_rx_ampdu_factor = extra_arg;
+ }
+ }
+ break;
+
+ case 0x10:/* driver version display */
+ dump_drv_version(RTW_DBGDUMP);
+ break;
+ case 0x11:/* dump linked status */
+ {
+ linked_info_dump(padapter, extra_arg);
+ }
+ break;
+ case 0x12: /* set rx_stbc */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
+ /* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
+ if (pregpriv && (extra_arg == 0 || extra_arg == 1|| extra_arg == 2 || extra_arg == 3))
+ {
+ pregpriv->rx_stbc = extra_arg;
+ DBG_871X("set rx_stbc =%d\n", pregpriv->rx_stbc);
+ }
+ else
+ DBG_871X("get rx_stbc =%d\n", pregpriv->rx_stbc);
+
+ }
+ break;
+ case 0x13: /* set ampdu_enable */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
+ if (pregpriv && extra_arg < 3)
+ {
+ pregpriv->ampdu_enable = extra_arg;
+ DBG_871X("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
+ }
+ else
+ DBG_871X("get ampdu_enable =%d\n", pregpriv->ampdu_enable);
+
+ }
+ break;
+ case 0x14:
+ {
+ DBG_871X("minor_cmd 0x%x\n", minor_cmd);
+ }
+ break;
+ case 0x16:
+ {
+ if (arg == 0xff) {
+ rtw_odm_dbg_comp_msg(RTW_DBGDUMP, padapter);
+ }
+ else {
+ u64 dbg_comp = (u64)extra_arg;
+ rtw_odm_dbg_comp_set(padapter, dbg_comp);
+ }
+ }
+ break;
+#ifdef DBG_FIXED_CHAN
+ case 0x17:
+ {
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ printk("===> Fixed channel to %d\n", extra_arg);
+ pmlmeext->fixed_chan = extra_arg;
+
+ }
+ break;
+#endif
+ case 0x18:
+ {
+ printk("===> Switch USB Mode %d\n", extra_arg);
+ rtw_hal_set_hwreg(padapter, HW_VAR_USB_MODE, (u8 *)&extra_arg);
+ }
+ break;
+ case 0x19:
+ {
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ /* extra_arg : */
+ /* BIT0: Enable VHT LDPC Rx, BIT1: Enable VHT LDPC Tx, */
+ /* BIT4: Enable HT LDPC Rx, BIT5: Enable HT LDPC Tx */
+ if (arg == 0) {
+ DBG_871X("driver disable LDPC\n");
+ pregistrypriv->ldpc_cap = 0x00;
+ }
+ else if (arg == 1) {
+ DBG_871X("driver set LDPC cap = 0x%x\n", extra_arg);
+ pregistrypriv->ldpc_cap = (u8)(extra_arg&0x33);
+ }
+ }
+ break;
+ case 0x1a:
+ {
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ /* extra_arg : */
+ /* BIT0: Enable VHT STBC Rx, BIT1: Enable VHT STBC Tx, */
+ /* BIT4: Enable HT STBC Rx, BIT5: Enable HT STBC Tx */
+ if (arg == 0) {
+ DBG_871X("driver disable STBC\n");
+ pregistrypriv->stbc_cap = 0x00;
+ }
+ else if (arg == 1) {
+ DBG_871X("driver set STBC cap = 0x%x\n", extra_arg);
+ pregistrypriv->stbc_cap = (u8)(extra_arg&0x33);
+ }
+ }
+ break;
+ case 0x1b:
+ {
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+ if (arg == 0) {
+ DBG_871X("disable driver ctrl max_rx_rate, reset to default_rate_set\n");
+ init_mlme_default_rate_set(padapter);
+ pregistrypriv->ht_enable = (u8)rtw_ht_enable;
+ }
+ else if (arg == 1) {
+
+ int i;
+ u8 max_rx_rate;
+
+ DBG_871X("enable driver ctrl max_rx_rate = 0x%x\n", extra_arg);
+
+ max_rx_rate = (u8)extra_arg;
+
+ if (max_rx_rate < 0xc) /* max_rx_rate < MSC0 -> B or G -> disable HT */
+ {
+ pregistrypriv->ht_enable = 0;
+ for (i = 0; i<NumRates; i++)
+ {
+ if (pmlmeext->datarate[i] > max_rx_rate)
+ pmlmeext->datarate[i] = 0xff;
+ }
+
+ }
+ else if (max_rx_rate < 0x1c) /* mcs0~mcs15 */
+ {
+ u32 mcs_bitmap = 0x0;
+
+ for (i = 0; i<((max_rx_rate+1)-0xc); i++)
+ mcs_bitmap |= BIT(i);
+
+ set_mcs_rate_by_mask(pmlmeext->default_supported_mcs_set, mcs_bitmap);
+ }
+ }
+ }
+ break;
+ case 0x1c: /* enable/disable driver control AMPDU Density for peer sta's rx */
+ {
+ if (arg == 0) {
+ DBG_871X("disable driver ctrl ampdu density\n");
+ padapter->driver_ampdu_spacing = 0xFF;
+ }
+ else if (arg == 1) {
+
+ DBG_871X("enable driver ctrl ampdu density = %d\n", extra_arg);
+
+ if ((extra_arg & 0x07) > 0x07)
+ padapter->driver_ampdu_spacing = 0xFF;
+ else
+ padapter->driver_ampdu_spacing = extra_arg;
+ }
+ }
+ break;
+#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
+ case 0x1e:
+ {
+ struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
+ PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
+ u8 chan = rtw_get_oper_ch(padapter);
+ DBG_871X("===========================================\n");
+ ODM_InbandNoise_Monitor(pDM_Odm, true, 0x1e, 100);
+ DBG_871X("channel(%d), noise_a = %d, noise_b = %d , noise_all:%d\n",
+ chan, pDM_Odm->noise_level.noise[ODM_RF_PATH_A],
+ pDM_Odm->noise_level.noise[ODM_RF_PATH_B],
+ pDM_Odm->noise_level.noise_all);
+ DBG_871X("===========================================\n");
+
+ }
+ break;
+#endif
+ case 0x23:
+ {
+ DBG_871X("turn %s the bNotifyChannelChange Variable\n", (extra_arg == 1)?"on":"off");
+ padapter->bNotifyChannelChange = extra_arg;
+ break;
+ }
+ case 0x24:
+ {
+ break;
+ }
+#ifdef CONFIG_GPIO_API
+ case 0x25: /* Get GPIO register */
+ {
+ /*
+ * dbg 0x7f250000 [gpio_num], Get gpio value, gpio_num:0~7
+ */
+
+ int value;
+ DBG_871X("Read GPIO Value extra_arg = %d\n", extra_arg);
+ value = rtw_get_gpio(dev, extra_arg);
+ DBG_871X("Read GPIO Value = %d\n", value);
+ break;
+ }
+ case 0x26: /* Set GPIO direction */
+ {
+
+ /* dbg 0x7f26000x [y], Set gpio direction,
+ * x: gpio_num, 4~7 y: indicate direction, 0~1
+ */
+
+ int value;
+ DBG_871X("Set GPIO Direction! arg = %d , extra_arg =%d\n", arg , extra_arg);
+ value = rtw_config_gpio(dev, arg, extra_arg);
+ DBG_871X("Set GPIO Direction %s\n", (value ==-1)?"Fail!!!":"Success");
+ break;
+ }
+ case 0x27: /* Set GPIO output direction value */
+ {
+ /*
+ * dbg 0x7f27000x [y], Set gpio output direction value,
+ * x: gpio_num, 4~7 y: indicate direction, 0~1
+ */
+
+ int value;
+ DBG_871X("Set GPIO Value! arg = %d , extra_arg =%d\n", arg , extra_arg);
+ value = rtw_set_gpio_output_value(dev, arg, extra_arg);
+ DBG_871X("Set GPIO Value %s\n", (value ==-1)?"Fail!!!":"Success");
+ break;
+ }
+#endif
+ case 0xaa:
+ {
+ if ((extra_arg & 0x7F)> 0x3F) extra_arg = 0xFF;
+ DBG_871X("chang data rate to :0x%02x\n", extra_arg);
+ padapter->fix_rate = extra_arg;
+ }
+ break;
+ case 0xdd:/* registers dump , 0 for mac reg, 1 for bb reg, 2 for rf reg */
+ {
+ if (extra_arg == 0) {
+ mac_reg_dump(RTW_DBGDUMP, padapter);
+ }
+ else if (extra_arg == 1) {
+ bb_reg_dump(RTW_DBGDUMP, padapter);
+ }
+ else if (extra_arg ==2) {
+ rf_reg_dump(RTW_DBGDUMP, padapter);
+ }
+ }
+ break;
+
+ case 0xee:/* turn on/off dynamic funcs */
+ {
+ u32 odm_flag;
+
+ if (0xf ==extra_arg) {
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag);
+ DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag);
+ DBG_871X("extra_arg = 0 - disable all dynamic func\n");
+ DBG_871X("extra_arg = 1 - disable DIG- BIT(0)\n");
+ DBG_871X("extra_arg = 2 - disable High power - BIT(1)\n");
+ DBG_871X("extra_arg = 3 - disable tx power tracking - BIT(2)\n");
+ DBG_871X("extra_arg = 4 - disable BT coexistence - BIT(3)\n");
+ DBG_871X("extra_arg = 5 - disable antenna diversity - BIT(4)\n");
+ DBG_871X("extra_arg = 6 - enable all dynamic func\n");
+ }
+ else {
+ /*extra_arg = 0 - disable all dynamic func
+ extra_arg = 1 - disable DIG
+ extra_arg = 2 - disable tx power tracking
+ extra_arg = 3 - turn on all dynamic func
+ */
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg));
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC,&odm_flag);
+ DBG_871X(" === DMFlag(0x%08x) ===\n", odm_flag);
+ }
+ }
+ break;
+
+ case 0xfd:
+ rtw_write8(padapter, 0xc50, arg);
+ DBG_871X("wr(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ rtw_write8(padapter, 0xc58, arg);
+ DBG_871X("wr(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xfe:
+ DBG_871X("rd(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ DBG_871X("rd(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xff:
+ {
+ DBG_871X("dbg(0x210) = 0x%x\n", rtw_read32(padapter, 0x210));
+ DBG_871X("dbg(0x608) = 0x%x\n", rtw_read32(padapter, 0x608));
+ DBG_871X("dbg(0x280) = 0x%x\n", rtw_read32(padapter, 0x280));
+ DBG_871X("dbg(0x284) = 0x%x\n", rtw_read32(padapter, 0x284));
+ DBG_871X("dbg(0x288) = 0x%x\n", rtw_read32(padapter, 0x288));
+
+ DBG_871X("dbg(0x664) = 0x%x\n", rtw_read32(padapter, 0x664));
+
+
+ DBG_871X("\n");
+
+ DBG_871X("dbg(0x430) = 0x%x\n", rtw_read32(padapter, 0x430));
+ DBG_871X("dbg(0x438) = 0x%x\n", rtw_read32(padapter, 0x438));
+
+ DBG_871X("dbg(0x440) = 0x%x\n", rtw_read32(padapter, 0x440));
+
+ DBG_871X("dbg(0x458) = 0x%x\n", rtw_read32(padapter, 0x458));
+
+ DBG_871X("dbg(0x484) = 0x%x\n", rtw_read32(padapter, 0x484));
+ DBG_871X("dbg(0x488) = 0x%x\n", rtw_read32(padapter, 0x488));
+
+ DBG_871X("dbg(0x444) = 0x%x\n", rtw_read32(padapter, 0x444));
+ DBG_871X("dbg(0x448) = 0x%x\n", rtw_read32(padapter, 0x448));
+ DBG_871X("dbg(0x44c) = 0x%x\n", rtw_read32(padapter, 0x44c));
+ DBG_871X("dbg(0x450) = 0x%x\n", rtw_read32(padapter, 0x450));
+ }
+ break;
+ }
+ break;
+ default:
+ DBG_871X("error dbg cmd!\n");
+ break;
+ }
+
+
+ return ret;
+
+}
+
+static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
+{
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (name) {
+ case IEEE_PARAM_WPA_ENABLED:
+
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; /* 802.1x */
+
+ /* ret = ieee80211_wpa_enable(ieee, value); */
+
+ switch ((value)&0xff)
+ {
+ case 1 : /* WPA */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case 2: /* WPA2 */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("wpa_set_param:padapter->securitypriv.ndisauthtype =%d\n", padapter->securitypriv.ndisauthtype));
+
+ break;
+
+ case IEEE_PARAM_TKIP_COUNTERMEASURES:
+ /* ieee->tkip_countermeasures =value; */
+ break;
+
+ case IEEE_PARAM_DROP_UNENCRYPTED:
+ {
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+ break;
+
+ }
+ case IEEE_PARAM_PRIVACY_INVOKED:
+
+ /* ieee->privacy_invoked =value; */
+
+ break;
+
+ case IEEE_PARAM_AUTH_ALGS:
+
+ ret = wpa_set_auth_algs(dev, value);
+
+ break;
+
+ case IEEE_PARAM_IEEE_802_1X:
+
+ /* ieee->ieee802_1x =value; */
+
+ break;
+
+ case IEEE_PARAM_WPAX_SELECT:
+
+ /* added for WPA2 mixed mode */
+ /* DBG_871X(KERN_WARNING "------------------------>wpax value = %x\n", value); */
+ /*
+ spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
+ ieee->wpax_type_set = 1;
+ ieee->wpax_type_notify = value;
+ spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
+ */
+
+ break;
+
+ default:
+
+
+
+ ret = -EOPNOTSUPP;
+
+
+ break;
+
+ }
+
+ return ret;
+
+}
+
+static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (command)
+ {
+ case IEEE_MLME_STA_DEAUTH:
+
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+
+ break;
+
+ case IEEE_MLME_STA_DISASSOC:
+
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+
+}
+
+static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ uint ret = 0;
+
+ /* down(&ieee->wx_sem); */
+
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL)
+ {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length))
+ {
+ kfree((u8 *)param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+
+ case IEEE_CMD_SET_WPA_PARAM:
+ ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
+ break;
+
+ case IEEE_CMD_SET_WPA_IE:
+ /* ret = wpa_set_wpa_ie(dev, param, p->length); */
+ ret = rtw_set_wpa_ie((struct adapter *)rtw_netdev_priv(dev), (char*)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
+ break;
+
+ case IEEE_CMD_SET_ENCRYPTION:
+ ret = wpa_set_encryption(dev, param, p->length);
+ break;
+
+ case IEEE_CMD_MLME:
+ ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code);
+ break;
+
+ default:
+ DBG_871X("Unknown WPA supplicant request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+ kfree((u8 *)param);
+
+out:
+
+ /* up(&ieee->wx_sem); */
+
+ return ret;
+
+}
+
+static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct sta_info *psta = NULL, *pbcmc_sta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv* psecuritypriv =&(padapter->securitypriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("%s\n", __func__);
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ /* sizeof(struct ieee_param) = 64 bytes; */
+ /* if (param_len != (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) */
+ if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ if (param->u.crypt.idx >= WEP_KEYS)
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+ else
+ {
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (!psta)
+ {
+ /* ret = -EINVAL; */
+ DBG_871X("rtw_set_encryption(), sta has already been removed or never been added\n");
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL))
+ {
+ /* todo:clear default encryption keys */
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ psecuritypriv->ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+
+ DBG_871X("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
+
+ goto exit;
+ }
+
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL))
+ {
+ DBG_871X("r871x_set_encryption, crypt.alg = WEP\n");
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ DBG_871X("r871x_set_encryption, wep_key_idx =%d, len =%d\n", wep_key_idx, wep_key_len);
+
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len<= 0))
+ {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+
+ if (wep_key_len > 0)
+ {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep =(struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ DBG_871X(" r871x_set_encryption: pwep allocate fail !!!\n");
+ goto exit;
+ }
+
+ memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+
+ if (param->u.crypt.set_tx)
+ {
+ DBG_871X("wep, set_tx = 1\n");
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (pwep->KeyLength == 13)
+ {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx]=pwep->KeyLength;
+
+ rtw_ap_set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx, 1);
+ }
+ else
+ {
+ DBG_871X("wep, set_tx = 0\n");
+
+ /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
+ /* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to cam */
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+
+ rtw_ap_set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx, 0);
+ }
+
+ goto exit;
+
+ }
+
+
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) /* group key */
+ {
+ if (param->u.crypt.set_tx == 1)
+ {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ DBG_871X("%s, set group_key, WEP\n", __func__);
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ DBG_871X("%s, set group_key, TKIP\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ DBG_871X("%s, set group_key, CCMP\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ }
+ else
+ {
+ DBG_871X("%s, set group_key, none\n", __func__);
+
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta)
+ {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+
+ }
+
+ goto exit;
+
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) /* psk/802_1x */
+ {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ {
+ if (param->u.crypt.set_tx == 1)
+ {
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ DBG_871X("%s, set pairwise key, WEP\n", __func__);
+
+ psta->dot118021XPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psta->dot118021XPrivacy = _WEP104_;
+ }
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ DBG_871X("%s, set pairwise key, TKIP\n", __func__);
+
+ psta->dot118021XPrivacy = _TKIP_;
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+
+ DBG_871X("%s, set pairwise key, CCMP\n", __func__);
+
+ psta->dot118021XPrivacy = _AES_;
+ }
+ else
+ {
+ DBG_871X("%s, set pairwise key, none\n", __func__);
+
+ psta->dot118021XPrivacy = _NO_PRIVACY_;
+ }
+
+ rtw_ap_set_pairwise_key(padapter, psta);
+
+ psta->ieee8021x_blocked = false;
+
+ }
+ else/* group key??? */
+ {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0)
+ {
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+ }
+ else if (strcmp(param->u.crypt.alg, "TKIP") == 0)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+
+ /* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+
+ }
+ else if (strcmp(param->u.crypt.alg, "CCMP") == 0)
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len>16 ?16:param->u.crypt.key_len));
+ }
+ else
+ {
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ rtw_ap_set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta =rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta)
+ {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+
+ }
+
+ }
+
+ }
+
+exit:
+ kfree((u8 *)pwep);
+
+ return ret;
+
+}
+
+static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ unsigned char *pbuf = param->u.bcn_ie.buf;
+
+
+ DBG_871X("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
+
+ if ((pstapriv->max_num_sta>NUM_STA) || (pstapriv->max_num_sta<= 0))
+ pstapriv->max_num_sta = NUM_STA;
+
+
+ if (rtw_check_beacon_data(padapter, pbuf, (len-12-2)) == _SUCCESS)/* 12 = param header, 2:no packed */
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+
+ return ret;
+
+}
+
+static int rtw_hostapd_sta_flush(struct net_device *dev)
+{
+ /* _irqL irqL; */
+ /* struct list_head *phead, *plist; */
+ int ret = 0;
+ /* struct sta_info *psta = NULL; */
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ /* struct sta_priv *pstapriv = &padapter->stapriv; */
+
+ DBG_871X("%s\n", __func__);
+
+ flush_all_cam_entry(padapter); /* clear CAM */
+
+ ret = rtw_sta_flush(padapter);
+
+ return ret;
+
+}
+
+static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("rtw_add_sta(aid =%d) =" MAC_FMT "\n", param->u.add_sta.aid, MAC_ARG(param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ {
+ return -EINVAL;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+/*
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta)
+ {
+ DBG_871X("rtw_add_sta(), free has been added psta =%p\n", psta);
+ spin_lock_bh(&(pstapriv->sta_hash_lock));
+ rtw_free_stainfo(padapter, psta);
+ spin_unlock_bh(&(pstapriv->sta_hash_lock));
+
+ psta = NULL;
+ }
+*/
+ /* psta = rtw_alloc_stainfo(pstapriv, param->sta_addr); */
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta)
+ {
+ int flags = param->u.add_sta.flags;
+
+ /* DBG_871X("rtw_add_sta(), init sta's variables, psta =%p\n", psta); */
+
+ psta->aid = param->u.add_sta.aid;/* aid = 1~2007 */
+
+ memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
+
+
+ /* check wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* chec 802.11n ht cap. */
+ if (WLAN_STA_HT&flags)
+ {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ memcpy((void*)&psta->htpriv.ht_cap, (void*)&param->u.add_sta.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ }
+ else
+ {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (pmlmepriv->htpriv.ht_option == false)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+
+
+ }
+ else
+ {
+ ret = -ENOMEM;
+ }
+
+ return ret;
+
+}
+
+static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("rtw_del_sta =" MAC_FMT "\n", MAC_ARG(param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ {
+ return -EINVAL;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta)
+ {
+ u8 updated =false;
+
+ /* DBG_871X("free psta =%p, aid =%d\n", psta, psta->aid); */
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ if (list_empty(&psta->asoc_list) ==false)
+ {
+ list_del_init(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ associated_clients_update(padapter, updated);
+
+ psta = NULL;
+
+ }
+ else
+ {
+ DBG_871X("rtw_del_sta(), sta has already been removed or never been added\n");
+
+ /* ret = -1; */
+ }
+
+
+ return ret;
+
+}
+
+static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
+ struct sta_data *psta_data = (struct sta_data *)param_ex->data;
+
+ DBG_871X("rtw_ioctl_get_sta_info, sta_addr: " MAC_FMT "\n", MAC_ARG(param_ex->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ {
+ return -EINVAL;
+ }
+
+ if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
+ param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
+ param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
+ if (psta)
+ {
+ psta_data->aid = (u16)psta->aid;
+ psta_data->capability = psta->capability;
+ psta_data->flags = psta->flags;
+
+/*
+ nonerp_set : BIT(0)
+ no_short_slot_time_set : BIT(1)
+ no_short_preamble_set : BIT(2)
+ no_ht_gf_set : BIT(3)
+ no_ht_set : BIT(4)
+ ht_20mhz_set : BIT(5)
+*/
+
+ psta_data->sta_set =((psta->nonerp_set) |
+ (psta->no_short_slot_time_set <<1) |
+ (psta->no_short_preamble_set <<2) |
+ (psta->no_ht_gf_set <<3) |
+ (psta->no_ht_set <<4) |
+ (psta->ht_20mhz_set <<5));
+
+ psta_data->tx_supp_rates_len = psta->bssratelen;
+ memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
+ memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
+ psta_data->rx_bytes = psta->sta_stats.rx_bytes;
+ psta_data->rx_drops = psta->sta_stats.rx_drops;
+
+ psta_data->tx_pkts = psta->sta_stats.tx_pkts;
+ psta_data->tx_bytes = psta->sta_stats.tx_bytes;
+ psta_data->tx_drops = psta->sta_stats.tx_drops;
+
+
+ }
+ else
+ {
+ ret = -1;
+ }
+
+ return ret;
+
+}
+
+static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_871X("rtw_get_sta_wpaie, sta_addr: " MAC_FMT "\n", MAC_ARG(param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ {
+ return -EINVAL;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta)
+ {
+ if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC))
+ {
+ int wpa_ie_len;
+ int copy_len;
+
+ wpa_ie_len = psta->wpa_ie[1];
+
+ copy_len = ((wpa_ie_len+2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)):(wpa_ie_len+2);
+
+ param->u.wpa_ie.len = copy_len;
+
+ memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
+ }
+ else
+ {
+ /* ret = -1; */
+ DBG_871X("sta's wpa_ie is NONE\n");
+ }
+ }
+ else
+ {
+ ret = -1;
+ }
+
+ return ret;
+
+}
+
+static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ unsigned char wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int ie_len;
+
+ DBG_871X("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+
+ if (pmlmepriv->wps_beacon_ie)
+ {
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
+ }
+
+ if (ie_len>0)
+ {
+ pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_beacon_ie_len = ie_len;
+ if (pmlmepriv->wps_beacon_ie == NULL) {
+ DBG_871X("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
+
+ update_beacon(padapter, _VENDOR_SPECIFIC_IE_, wps_oui, true);
+
+ pmlmeext->bstart_bss = true;
+
+ }
+
+
+ return ret;
+
+}
+
+static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_871X("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+
+ if (pmlmepriv->wps_probe_resp_ie)
+ {
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ }
+
+ if (ie_len>0)
+ {
+ pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_probe_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_probe_resp_ie == NULL) {
+ DBG_871X("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+
+ return ret;
+
+}
+
+static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_871X("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+
+ if (pmlmepriv->wps_assoc_resp_ie)
+ {
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+ }
+
+ if (ie_len>0)
+ {
+ pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_assoc_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_assoc_resp_ie == NULL) {
+ DBG_871X("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+
+ return ret;
+
+}
+
+static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *mlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
+ struct mlme_ext_info *mlmeinfo = &(mlmeext->mlmext_info);
+ int ie_len;
+ u8 *ssid_ie;
+ char ssid[NDIS_802_11_LENGTH_SSID + 1];
+ sint ssid_len;
+ u8 ignore_broadcast_ssid;
+
+ if (check_fwstate(mlmepriv, WIFI_AP_STATE) != true)
+ return -EPERM;
+
+ if (param->u.bcn_ie.reserved[0] != 0xea)
+ return -EINVAL;
+
+ mlmeinfo->hidden_ssid_mode = ignore_broadcast_ssid = param->u.bcn_ie.reserved[1];
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+ ssid_ie = rtw_get_ie(param->u.bcn_ie.buf, WLAN_EID_SSID, &ssid_len, ie_len);
+
+ if (ssid_ie && ssid_len > 0 && ssid_len <= NDIS_802_11_LENGTH_SSID) {
+ struct wlan_bssid_ex *pbss_network = &mlmepriv->cur_network.network;
+ struct wlan_bssid_ex *pbss_network_ext = &mlmeinfo->network;
+
+ memcpy(ssid, ssid_ie+2, ssid_len);
+ ssid[ssid_len] = 0x0;
+
+ if (0)
+ DBG_871X(FUNC_ADPT_FMT" ssid:(%s,%d), from ie:(%s,%d), (%s,%d)\n", FUNC_ADPT_ARG(adapter),
+ ssid, ssid_len,
+ pbss_network->Ssid.Ssid, pbss_network->Ssid.SsidLength,
+ pbss_network_ext->Ssid.Ssid, pbss_network_ext->Ssid.SsidLength);
+
+ memcpy(pbss_network->Ssid.Ssid, (void *)ssid, ssid_len);
+ pbss_network->Ssid.SsidLength = ssid_len;
+ memcpy(pbss_network_ext->Ssid.Ssid, (void *)ssid, ssid_len);
+ pbss_network_ext->Ssid.SsidLength = ssid_len;
+
+ if (0)
+ DBG_871X(FUNC_ADPT_FMT" after ssid:(%s,%d), (%s,%d)\n", FUNC_ADPT_ARG(adapter),
+ pbss_network->Ssid.Ssid, pbss_network->Ssid.SsidLength,
+ pbss_network_ext->Ssid.Ssid, pbss_network_ext->Ssid.SsidLength);
+ }
+
+ DBG_871X(FUNC_ADPT_FMT" ignore_broadcast_ssid:%d, %s,%d\n", FUNC_ADPT_ARG(adapter),
+ ignore_broadcast_ssid, ssid, ssid_len);
+
+ return ret;
+}
+
+static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+ ret = rtw_acl_remove_sta(padapter, param->sta_addr);
+
+ return ret;
+
+}
+
+static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ {
+ return -EINVAL;
+ }
+
+ ret = rtw_acl_add_sta(padapter, param->sta_addr);
+
+ return ret;
+
+}
+
+static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ rtw_set_macaddr_acl(padapter, param->u.mlme.command);
+
+ return ret;
+}
+
+static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ /* DBG_871X("%s\n", __func__); */
+
+ /*
+ * this function is expect to call in master mode, which allows no power saving
+ * so, we just check hw_init_completed
+ */
+
+ if (padapter->hw_init_completed ==false) {
+ ret = -EPERM;
+ goto out;
+ }
+
+
+ /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
+ if (!p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL)
+ {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length))
+ {
+ kfree((u8 *)param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* DBG_871X("%s, cmd =%d\n", __func__, param->cmd); */
+
+ switch (param->cmd)
+ {
+ case RTL871X_HOSTAPD_FLUSH:
+
+ ret = rtw_hostapd_sta_flush(dev);
+
+ break;
+
+ case RTL871X_HOSTAPD_ADD_STA:
+
+ ret = rtw_add_sta(dev, param);
+
+ break;
+
+ case RTL871X_HOSTAPD_REMOVE_STA:
+
+ ret = rtw_del_sta(dev, param);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_BEACON:
+
+ ret = rtw_set_beacon(dev, param, p->length);
+
+ break;
+
+ case RTL871X_SET_ENCRYPTION:
+
+ ret = rtw_set_encryption(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_GET_WPAIE_STA:
+
+ ret = rtw_get_sta_wpaie(dev, param);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_WPS_BEACON:
+
+ ret = rtw_set_wps_beacon(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP:
+
+ ret = rtw_set_wps_probe_resp(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP:
+
+ ret = rtw_set_wps_assoc_resp(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_HIDDEN_SSID:
+
+ ret = rtw_set_hidden_ssid(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_GET_INFO_STA:
+
+ ret = rtw_ioctl_get_sta_data(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_SET_MACADDR_ACL:
+
+ ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_ACL_ADD_STA:
+
+ ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
+
+ break;
+
+ case RTL871X_HOSTAPD_ACL_REMOVE_STA:
+
+ ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
+
+ break;
+
+ default:
+ DBG_871X("Unknown hostapd request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+
+ kfree((u8 *)param);
+
+out:
+
+ return ret;
+
+}
+
+static int rtw_wx_set_priv(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+
+#ifdef DEBUG_RTW_WX_SET_PRIV
+ char *ext_dbg;
+#endif
+
+ int ret = 0;
+ int len = 0;
+ char *ext;
+
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *dwrq = (struct iw_point*)awrq;
+
+ /* RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_notice_, ("+rtw_wx_set_priv\n")); */
+ if (dwrq->length == 0)
+ return -EFAULT;
+
+ len = dwrq->length;
+ if (!(ext = vmalloc(len)))
+ return -ENOMEM;
+
+ if (copy_from_user(ext, dwrq->pointer, len)) {
+ vfree(ext);
+ return -EFAULT;
+ }
+
+
+ /* RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_notice_, */
+ /* ("rtw_wx_set_priv: %s req =%s\n", */
+ /* dev->name, ext)); */
+
+ #ifdef DEBUG_RTW_WX_SET_PRIV
+ if (!(ext_dbg = vmalloc(len)))
+ {
+ vfree(ext, len);
+ return -ENOMEM;
+ }
+
+ memcpy(ext_dbg, ext, len);
+ #endif
+
+ /* added for wps2.0 @20110524 */
+ if (dwrq->flags == 0x8766 && len > 8)
+ {
+ u32 cp_sz;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 *probereq_wpsie = ext;
+ int probereq_wpsie_len = len;
+ u8 wps_oui[4]={0x0, 0x50, 0xf2, 0x04};
+
+ if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
+ (!memcmp(&probereq_wpsie[2], wps_oui, 4)))
+ {
+ cp_sz = probereq_wpsie_len>MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN:probereq_wpsie_len;
+
+ if (pmlmepriv->wps_probe_req_ie)
+ {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ pmlmepriv->wps_probe_req_ie = rtw_malloc(cp_sz);
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ printk("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ ret = -EINVAL;
+ goto FREE_EXT;
+
+ }
+
+ memcpy(pmlmepriv->wps_probe_req_ie, probereq_wpsie, cp_sz);
+ pmlmepriv->wps_probe_req_ie_len = cp_sz;
+
+ }
+
+ goto FREE_EXT;
+
+ }
+
+ if (len >= WEXT_CSCAN_HEADER_SIZE
+ && !memcmp(ext, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)
+ ) {
+ ret = rtw_wx_set_scan(dev, info, awrq, ext);
+ goto FREE_EXT;
+ }
+
+FREE_EXT:
+
+ vfree(ext);
+ #ifdef DEBUG_RTW_WX_SET_PRIV
+ vfree(ext_dbg);
+ #endif
+
+ /* DBG_871X("rtw_wx_set_priv: (SIOCSIWPRIV) %s ret =%d\n", */
+ /* dev->name, ret); */
+
+ return ret;
+
+}
+
+static int rtw_pm_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ unsigned mode = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_871X("[%s] extra = %s\n", __func__, extra);
+
+ if (!memcmp(extra, "lps =", 4))
+ {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_lps(padapter, mode);
+ }
+ else if (!memcmp(extra, "ips =", 4))
+ {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_ips(padapter, mode);
+ }
+ else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rtw_mp_efuse_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ int err = 0;
+ return err;
+}
+
+static int rtw_mp_efuse_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ int err = 0;
+ return err;
+}
+
+static int rtw_tdls(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ return ret;
+}
+
+
+static int rtw_tdls_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ return ret;
+}
+
+
+
+
+
+#ifdef CONFIG_INTEL_WIDI
+static int rtw_widi_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ process_intel_widi_cmd(padapter, extra);
+
+ return ret;
+}
+
+static int rtw_widi_set_probe_request(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u8 *pbuf = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ pbuf = rtw_malloc(sizeof(l2_msg_t));
+ if (pbuf)
+ {
+ if (copy_from_user(pbuf, wrqu->data.pointer, wrqu->data.length))
+ ret = -EFAULT;
+ /* memcpy(pbuf, wrqu->data.pointer, wrqu->data.length); */
+
+ if (wrqu->data.flags == 0)
+ intel_widi_wk_cmd(padapter, INTEL_WIDI_ISSUE_PROB_WK, pbuf, sizeof(l2_msg_t));
+ else if (wrqu->data.flags == 1)
+ rtw_set_wfd_rds_sink_info(padapter, (l2_msg_t *)pbuf);
+ }
+ return ret;
+}
+#endif /* CONFIG_INTEL_WIDI */
+
+static int rtw_test(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len;
+ u8 *pbuf, *pch;
+ char *ptmp;
+ u8 *delim = ",";
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+
+ DBG_871X("+%s\n", __func__);
+ len = wrqu->data.length;
+
+ pbuf = (u8 *)rtw_zmalloc(len);
+ if (pbuf == NULL) {
+ DBG_871X("%s: no memory!\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(pbuf, wrqu->data.pointer, len)) {
+ kfree(pbuf);
+ DBG_871X("%s: copy from user fail!\n", __func__);
+ return -EFAULT;
+ }
+ DBG_871X("%s: string =\"%s\"\n", __func__, pbuf);
+
+ ptmp = (char*)pbuf;
+ pch = strsep(&ptmp, delim);
+ if ((pch == NULL) || (strlen(pch) == 0)) {
+ kfree(pbuf);
+ DBG_871X("%s: parameter error(level 1)!\n", __func__);
+ return -EFAULT;
+ }
+
+ if (strcmp(pch, "bton") == 0)
+ {
+ rtw_btcoex_SetManualControl(padapter, false);
+ }
+
+ if (strcmp(pch, "btoff") == 0)
+ {
+ rtw_btcoex_SetManualControl(padapter, true);
+ }
+
+ if (strcmp(pch, "h2c") == 0)
+ {
+ u8 param[8];
+ u8 count = 0;
+ u32 tmp;
+ u8 i;
+ u32 pos;
+ s32 ret;
+
+
+ do {
+ pch = strsep(&ptmp, delim);
+ if ((pch == NULL) || (strlen(pch) == 0))
+ break;
+
+ sscanf(pch, "%x", &tmp);
+ param[count++] = (u8)tmp;
+ } while (count < 8);
+
+ if (count == 0) {
+ kfree(pbuf);
+ DBG_871X("%s: parameter error(level 2)!\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = rtw_hal_fill_h2c_cmd(padapter, param[0], count-1, &param[1]);
+
+ pos = sprintf(extra, "H2C ID = 0x%02x content =", param[0]);
+ for (i = 1; i<count; i++) {
+ pos += sprintf(extra+pos, "%02x,", param[i]);
+ }
+ extra[pos] = 0;
+ pos--;
+ pos += sprintf(extra+pos, " %s", ret == _FAIL?"FAIL":"OK");
+
+ wrqu->data.length = strlen(extra) + 1;
+ }
+
+ kfree(pbuf);
+ return 0;
+}
+
+static iw_handler rtw_handlers[] =
+{
+ NULL, /* SIOCSIWCOMMIT */
+ rtw_wx_get_name, /* SIOCGIWNAME */
+ dummy, /* SIOCSIWNWID */
+ dummy, /* SIOCGIWNWID */
+ rtw_wx_set_freq, /* SIOCSIWFREQ */
+ rtw_wx_get_freq, /* SIOCGIWFREQ */
+ rtw_wx_set_mode, /* SIOCSIWMODE */
+ rtw_wx_get_mode, /* SIOCGIWMODE */
+ dummy, /* SIOCSIWSENS */
+ rtw_wx_get_sens, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ rtw_wx_get_range, /* SIOCGIWRANGE */
+ rtw_wx_set_priv, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ dummy, /* SIOCSIWSPY */
+ dummy, /* SIOCGIWSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCWIWTHRSPY */
+ rtw_wx_set_wap, /* SIOCSIWAP */
+ rtw_wx_get_wap, /* SIOCGIWAP */
+ rtw_wx_set_mlme, /* request MLME operation; uses struct iw_mlme */
+ dummy, /* SIOCGIWAPLIST -- depricated */
+ rtw_wx_set_scan, /* SIOCSIWSCAN */
+ rtw_wx_get_scan, /* SIOCGIWSCAN */
+ rtw_wx_set_essid, /* SIOCSIWESSID */
+ rtw_wx_get_essid, /* SIOCGIWESSID */
+ dummy, /* SIOCSIWNICKN */
+ rtw_wx_get_nick, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ rtw_wx_set_rate, /* SIOCSIWRATE */
+ rtw_wx_get_rate, /* SIOCGIWRATE */
+ rtw_wx_set_rts, /* SIOCSIWRTS */
+ rtw_wx_get_rts, /* SIOCGIWRTS */
+ rtw_wx_set_frag, /* SIOCSIWFRAG */
+ rtw_wx_get_frag, /* SIOCGIWFRAG */
+ dummy, /* SIOCSIWTXPOW */
+ dummy, /* SIOCGIWTXPOW */
+ dummy, /* SIOCSIWRETRY */
+ rtw_wx_get_retry, /* SIOCGIWRETRY */
+ rtw_wx_set_enc, /* SIOCSIWENCODE */
+ rtw_wx_get_enc, /* SIOCGIWENCODE */
+ dummy, /* SIOCSIWPOWER */
+ rtw_wx_get_power, /* SIOCGIWPOWER */
+ NULL, /*---hole---*/
+ NULL, /*---hole---*/
+ rtw_wx_set_gen_ie, /* SIOCSIWGENIE */
+ NULL, /* SIOCGWGENIE */
+ rtw_wx_set_auth, /* SIOCSIWAUTH */
+ NULL, /* SIOCGIWAUTH */
+ rtw_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCGIWENCODEEXT */
+ rtw_wx_set_pmkid, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
+};
+
+static const struct iw_priv_args rtw_private_args[] = {
+ {
+ SIOCIWFIRSTPRIV + 0x0,
+ IW_PRIV_TYPE_CHAR | 0x7FF, 0, "write"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x1,
+ IW_PRIV_TYPE_CHAR | 0x7FF,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "read"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x5,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setpid"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
+ },
+/* for PLATFORM_MT53XX */
+ {
+ SIOCIWFIRSTPRIV + 0x7,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x8,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x9,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie"
+ },
+
+/* for RTK_DMP_PLATFORM */
+ {
+ SIOCIWFIRSTPRIV + 0xA,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
+ },
+
+ {
+ SIOCIWFIRSTPRIV + 0xB,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "dbg"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "rfw"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xD,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rfr"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x10,
+ IW_PRIV_TYPE_CHAR | 1024, 0, "p2p_set"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x11,
+ IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK , "p2p_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x12, 0, 0, "NULL"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x13,
+ IW_PRIV_TYPE_CHAR | 64, IW_PRIV_TYPE_CHAR | 64 , "p2p_get2"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x14,
+ IW_PRIV_TYPE_CHAR | 64, 0, "tdls"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x15,
+ IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | 1024 , "tdls_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x16,
+ IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
+ },
+
+ {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ , 0 , "rereg_nd_name"},
+ {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"},
+ {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
+ {
+ SIOCIWFIRSTPRIV + 0x1D,
+ IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
+ },
+
+#ifdef CONFIG_INTEL_WIDI
+ {
+ SIOCIWFIRSTPRIV + 0x1E,
+ IW_PRIV_TYPE_CHAR | 1024, 0, "widi_set"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x1F,
+ IW_PRIV_TYPE_CHAR | 128, 0, "widi_prob_req"
+ },
+#endif /* CONFIG_INTEL_WIDI */
+
+#ifdef CONFIG_WOWLAN
+ { MP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "wow_mode" }, /* set */
+#endif
+#ifdef CONFIG_AP_WOWLAN
+ { MP_AP_WOW_ENABLE , IW_PRIV_TYPE_CHAR | 1024, 0, "ap_wow_mode" }, /* set */
+#endif
+};
+
+static iw_handler rtw_private_handler[] =
+{
+ rtw_wx_write32, /* 0x00 */
+ rtw_wx_read32, /* 0x01 */
+ rtw_drvext_hdl, /* 0x02 */
+ rtw_mp_ioctl_hdl, /* 0x03 */
+
+/* for MM DTV platform */
+ rtw_get_ap_info, /* 0x04 */
+
+ rtw_set_pid, /* 0x05 */
+ rtw_wps_start, /* 0x06 */
+
+/* for PLATFORM_MT53XX */
+ rtw_wx_get_sensitivity, /* 0x07 */
+ rtw_wx_set_mtk_wps_probe_ie, /* 0x08 */
+ rtw_wx_set_mtk_wps_ie, /* 0x09 */
+
+/* for RTK_DMP_PLATFORM */
+/* Set Channel depend on the country code */
+ rtw_wx_set_channel_plan, /* 0x0A */
+
+ rtw_dbg_port, /* 0x0B */
+ rtw_wx_write_rf, /* 0x0C */
+ rtw_wx_read_rf, /* 0x0D */
+ rtw_wx_priv_null, /* 0x0E */
+ rtw_wx_priv_null, /* 0x0F */
+ rtw_p2p_set, /* 0x10 */
+ rtw_p2p_get, /* 0x11 */
+ NULL, /* 0x12 */
+ rtw_p2p_get2, /* 0x13 */
+
+ rtw_tdls, /* 0x14 */
+ rtw_tdls_get, /* 0x15 */
+
+ rtw_pm_set, /* 0x16 */
+ rtw_wx_priv_null, /* 0x17 */
+ rtw_rereg_nd_name, /* 0x18 */
+ rtw_wx_priv_null, /* 0x19 */
+ rtw_mp_efuse_set, /* 0x1A */
+ rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1C is reserved for hostapd */
+ rtw_test, /* 0x1D */
+#ifdef CONFIG_INTEL_WIDI
+ rtw_widi_set, /* 0x1E */
+ rtw_widi_set_probe_request, /* 0x1F */
+#endif /* CONFIG_INTEL_WIDI */
+};
+
+static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_statistics *piwstats =&padapter->iwstats;
+ int tmp_level = 0;
+ int tmp_qual = 0;
+ int tmp_noise = 0;
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) != true)
+ {
+ piwstats->qual.qual = 0;
+ piwstats->qual.level = 0;
+ piwstats->qual.noise = 0;
+ /* DBG_871X("No link level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); */
+ }
+ else {
+ #ifdef CONFIG_SIGNAL_DISPLAY_DBM
+ tmp_level = translate_percentage_to_dbm(padapter->recvpriv.signal_strength);
+ #else
+ #ifdef CONFIG_SKIP_SIGNAL_SCALE_MAPPING
+ {
+ /* Do signal scale mapping when using percentage as the unit of signal strength, since the scale mapping is skipped in odm */
+
+ struct hal_com_data *pHal = GET_HAL_DATA(padapter);
+
+ tmp_level = (u8)odm_SignalScaleMapping(&pHal->odmpriv, padapter->recvpriv.signal_strength);
+ }
+ #else
+ tmp_level = padapter->recvpriv.signal_strength;
+ #endif
+ #endif
+
+ tmp_qual = padapter->recvpriv.signal_qual;
+#if defined(CONFIG_SIGNAL_DISPLAY_DBM) && defined(CONFIG_BACKGROUND_NOISE_MONITOR)
+ if (rtw_linked_check(padapter)) {
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct noise_info info;
+ info.bPauseDIG = true;
+ info.IGIValue = 0x1e;
+ info.max_time = 100;/* ms */
+ info.chan = pmlmeext->cur_channel ;/* rtw_get_oper_ch(padapter); */
+ rtw_ps_deny(padapter, PS_DENY_IOCTL);
+ LeaveAllPowerSaveModeDirect(padapter);
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&info, false);
+ /* ODM_InbandNoise_Monitor(podmpriv, true, 0x20, 100); */
+ rtw_ps_deny_cancel(padapter, PS_DENY_IOCTL);
+ rtw_hal_get_odm_var(padapter, HAL_ODM_NOISE_MONITOR,&(info.chan), &(padapter->recvpriv.noise));
+ DBG_871X("chan:%d, noise_level:%d\n", info.chan, padapter->recvpriv.noise);
+ }
+#endif
+ tmp_noise = padapter->recvpriv.noise;
+ DBG_871X("level:%d, qual:%d, noise:%d, rssi (%d)\n", tmp_level, tmp_qual, tmp_noise, padapter->recvpriv.rssi);
+
+ piwstats->qual.level = tmp_level;
+ piwstats->qual.qual = tmp_qual;
+ piwstats->qual.noise = tmp_noise;
+ }
+ piwstats->qual.updated = IW_QUAL_ALL_UPDATED ;/* IW_QUAL_DBM; */
+
+ #ifdef CONFIG_SIGNAL_DISPLAY_DBM
+ piwstats->qual.updated = piwstats->qual.updated | IW_QUAL_DBM;
+ #endif
+
+ return &padapter->iwstats;
+}
+
+struct iw_handler_def rtw_handlers_def =
+{
+ .standard = rtw_handlers,
+ .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+#if defined(CONFIG_WEXT_PRIV)
+ .private = rtw_private_handler,
+ .private_args = (struct iw_priv_args *)rtw_private_args,
+ .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
+ .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+#endif
+ .get_wireless_stats = rtw_get_wireless_stats,
+};
+
+/* copy from net/wireless/wext.c start */
+/* ---------------------------------------------------------------- */
+/*
+ * Calculate size of private arguments
+ */
+static const char iw_priv_type_size[] = {
+ 0, /* IW_PRIV_TYPE_NONE */
+ 1, /* IW_PRIV_TYPE_BYTE */
+ 1, /* IW_PRIV_TYPE_CHAR */
+ 0, /* Not defined */
+ sizeof(__u32), /* IW_PRIV_TYPE_INT */
+ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */
+ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */
+ 0, /* Not defined */
+};
+
+static int get_priv_size(__u16 args)
+{
+ int num = args & IW_PRIV_SIZE_MASK;
+ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
+
+ return num * iw_priv_type_size[type];
+}
+/* copy from net/wireless/wext.c end */
+
+static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_data)
+{
+ int err = 0;
+ u8 *input = NULL;
+ u32 input_len = 0;
+ const char delim[] = " ";
+ u8 *output = NULL;
+ u32 output_len = 0;
+ u32 count = 0;
+ u8 *buffer = NULL;
+ u32 buffer_len = 0;
+ char *ptr = NULL;
+ u8 cmdname[17] = {0}; /* IFNAMSIZ+1 */
+ u32 cmdlen;
+ s32 len;
+ u8 *extra = NULL;
+ u32 extra_size = 0;
+
+ s32 k;
+ const iw_handler *priv; /* Private ioctl */
+ const struct iw_priv_args *priv_args; /* Private ioctl description */
+ u32 num_priv; /* Number of ioctl */
+ u32 num_priv_args; /* Number of descriptions */
+ iw_handler handler;
+ int temp;
+ int subcmd = 0; /* sub-ioctl index */
+ int offset = 0; /* Space for sub-ioctl index */
+
+ union iwreq_data wdata;
+
+
+ memcpy(&wdata, wrq_data, sizeof(wdata));
+
+ input_len = 2048;
+ input = rtw_zmalloc(input_len);
+ if (NULL == input)
+ return -ENOMEM;
+ if (copy_from_user(input, wdata.data.pointer, input_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ ptr = input;
+ len = strlen(input);
+
+ sscanf(ptr, "%16s", cmdname);
+ cmdlen = strlen(cmdname);
+ DBG_8192C("%s: cmd =%s\n", __func__, cmdname);
+
+ /* skip command string */
+ if (cmdlen > 0)
+ cmdlen += 1; /* skip one space */
+ ptr += cmdlen;
+ len -= cmdlen;
+ DBG_8192C("%s: parameters =%s\n", __func__, ptr);
+
+ priv = rtw_private_handler;
+ priv_args = rtw_private_args;
+ num_priv = sizeof(rtw_private_handler) / sizeof(iw_handler);
+ num_priv_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args);
+
+ if (num_priv_args == 0) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Search the correct ioctl */
+ k = -1;
+ while ((++k < num_priv_args) && strcmp(priv_args[k].name, cmdname));
+
+ /* If not found... */
+ if (k == num_priv_args) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Watch out for sub-ioctls ! */
+ if (priv_args[k].cmd < SIOCDEVPRIVATE)
+ {
+ int j = -1;
+
+ /* Find the matching *real* ioctl */
+ while ((++j < num_priv_args) && ((priv_args[j].name[0] != '\0') ||
+ (priv_args[j].set_args != priv_args[k].set_args) ||
+ (priv_args[j].get_args != priv_args[k].get_args)));
+
+ /* If not found... */
+ if (j == num_priv_args) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Save sub-ioctl number */
+ subcmd = priv_args[k].cmd;
+ /* Reserve one int (simplify alignment issues) */
+ offset = sizeof(__u32);
+ /* Use real ioctl definition from now on */
+ k = j;
+ }
+
+ buffer = rtw_zmalloc(4096);
+ if (NULL == buffer) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* If we have to set some data */
+ if ((priv_args[k].set_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ {
+ u8 *str;
+
+ switch (priv_args[k].set_args & IW_PRIV_TYPE_MASK)
+ {
+ case IW_PRIV_TYPE_BYTE:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str) break;
+ sscanf(str, "%i", &temp);
+ buffer[count++] = (u8)temp;
+ } while (1);
+ buffer_len = count;
+
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+
+ break;
+
+ case IW_PRIV_TYPE_INT:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str) break;
+ sscanf(str, "%i", &temp);
+ ((s32*)buffer)[count++] = (s32)temp;
+ } while (1);
+ buffer_len = count * sizeof(s32);
+
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+
+ break;
+
+ case IW_PRIV_TYPE_CHAR:
+ if (len > 0)
+ {
+ /* Size of the string to fetch */
+ wdata.data.length = len;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+
+ /* Fetch string */
+ memcpy(buffer, ptr, wdata.data.length);
+ }
+ else
+ {
+ wdata.data.length = 1;
+ buffer[0] = '\0';
+ }
+ buffer_len = wdata.data.length;
+ break;
+
+ default:
+ DBG_8192C("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ (wdata.data.length != (priv_args[k].set_args & IW_PRIV_SIZE_MASK)))
+ {
+ DBG_8192C("%s: The command %s needs exactly %d argument(s)...\n",
+ __func__, cmdname, priv_args[k].set_args & IW_PRIV_SIZE_MASK);
+ err = -EINVAL;
+ goto exit;
+ }
+ } /* if args to set */
+ else
+ {
+ wdata.data.length = 0L;
+ }
+
+ /* Those two tests are important. They define how the driver
+ * will have to handle the data */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((get_priv_size(priv_args[k].set_args) + offset) <= IFNAMSIZ))
+ {
+ /* First case : all SET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ memcpy(wdata.name + offset, buffer, IFNAMSIZ - offset);
+ }
+ else
+ {
+ if ((priv_args[k].set_args == 0) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ))
+ {
+ /* Second case : no SET args, GET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ }
+ else
+ {
+ /* Third case : args won't fit in wrq, or variable number of args */
+ if (copy_to_user(wdata.data.pointer, buffer, buffer_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ wdata.data.flags = subcmd;
+ }
+ }
+
+ kfree(input);
+ input = NULL;
+
+ extra_size = 0;
+ if (IW_IS_SET(priv_args[k].cmd))
+ {
+ /* Size of set arguments */
+ extra_size = get_priv_size(priv_args[k].set_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((extra_size + offset) <= IFNAMSIZ))
+ extra_size = 0;
+ } else {
+ /* Size of get arguments */
+ extra_size = get_priv_size(priv_args[k].get_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (extra_size <= IFNAMSIZ))
+ extra_size = 0;
+ }
+
+ if (extra_size == 0) {
+ extra = (u8 *)&wdata;
+ kfree(buffer);
+ buffer = NULL;
+ } else
+ extra = buffer;
+
+ handler = priv[priv_args[k].cmd - SIOCIWFIRSTPRIV];
+ err = handler(dev, NULL, &wdata, extra);
+
+ /* If we have to get some data */
+ if ((priv_args[k].get_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_MASK))
+ {
+ int j;
+ int n = 0; /* number of args */
+ u8 str[20] = {0};
+
+ /* Check where is the returned data */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ))
+ n = priv_args[k].get_args & IW_PRIV_SIZE_MASK;
+ else
+ n = wdata.data.length;
+
+ output = rtw_zmalloc(4096);
+ if (NULL == output) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ switch (priv_args[k].get_args & IW_PRIV_TYPE_MASK)
+ {
+ case IW_PRIV_TYPE_BYTE:
+ /* Display args */
+ for (j = 0; j < n; j++)
+ {
+ sprintf(str, "%d ", extra[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+
+ case IW_PRIV_TYPE_INT:
+ /* Display args */
+ for (j = 0; j < n; j++)
+ {
+ sprintf(str, "%d ", ((__s32*)extra)[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+
+ case IW_PRIV_TYPE_CHAR:
+ /* Display args */
+ memcpy(output, extra, n);
+ break;
+
+ default:
+ DBG_8192C("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ output_len = strlen(output) + 1;
+ wrq_data->data.length = output_len;
+ if (copy_to_user(wrq_data->data.pointer, output, output_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ } /* if args to set */
+ else
+ {
+ wrq_data->data.length = 0;
+ }
+
+exit:
+ kfree(input);
+ kfree(buffer);
+ kfree(output);
+
+ return err;
+}
+
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int ret = 0;
+
+ switch (cmd)
+ {
+ case RTL_IOCTL_WPA_SUPPLICANT:
+ ret = wpa_supplicant_ioctl(dev, &wrq->u.data);
+ break;
+ case RTL_IOCTL_HOSTAPD:
+ ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
+ break;
+ case SIOCDEVPRIVATE:
+ ret = rtw_ioctl_wext_private(dev, &wrq->u);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
new file mode 100644
index 000000000000..46315d1a82f7
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -0,0 +1,206 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#define _MLME_OSDEP_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+static void _dynamic_check_timer_handlder (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ rtw_dynamic_check_timer_handlder(adapter);
+
+ _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
+}
+
+static void _rtw_set_scan_deny_timer_hdl(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ rtw_set_scan_deny_timer_hdl(adapter);
+}
+
+void rtw_init_mlme_timer(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _init_timer(&(pmlmepriv->assoc_timer), padapter->pnetdev, _rtw_join_timeout_handler, padapter);
+ /* _init_timer(&(pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer), padapter->pnetdev, sitesurvey_ctrl_handler, padapter); */
+ _init_timer(&(pmlmepriv->scan_to_timer), padapter->pnetdev, rtw_scan_timeout_handler, padapter);
+
+ _init_timer(&(pmlmepriv->dynamic_chk_timer), padapter->pnetdev, _dynamic_check_timer_handlder, padapter);
+
+ _init_timer(&(pmlmepriv->set_scan_deny_timer), padapter->pnetdev, _rtw_set_scan_deny_timer_hdl, padapter);
+}
+
+void rtw_os_indicate_connect(struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ==true))
+ {
+ rtw_cfg80211_ibss_indicate_connect(adapter);
+ }
+ else
+ rtw_cfg80211_indicate_connect(adapter);
+
+ rtw_indicate_wx_assoc_event(adapter);
+ netif_carrier_on(adapter->pnetdev);
+
+ if (adapter->pid[2] != 0)
+ rtw_signal_process(adapter->pid[2], SIGALRM);
+}
+
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ rtw_cfg80211_indicate_scan_done(padapter, aborted);
+ indicate_wx_scan_complete_event(padapter);
+}
+
+static RT_PMKID_LIST backupPMKIDList[ NUM_PMKID_CACHE ];
+void rtw_reset_securitypriv(struct adapter *adapter)
+{
+ u8 backupPMKIDIndex = 0;
+ u8 backupTKIPCountermeasure = 0x00;
+ u32 backupTKIPcountermeasure_time = 0;
+ /* add for CONFIG_IEEE80211W, none 11w also can use */
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+
+ spin_lock_bh(&adapter->security_key_mutex);
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)/* 802.1x */
+ {
+ /* Added by Albert 2009/02/18 */
+ /* We have to backup the PMK information for WiFi PMK Caching test item. */
+ /* */
+ /* Backup the btkip_countermeasure information. */
+ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
+
+ memset(&backupPMKIDList[ 0 ], 0x00, sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+
+ memcpy(&backupPMKIDList[ 0 ], &adapter->securitypriv.PMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ backupPMKIDIndex = adapter->securitypriv.PMKIDIndex;
+ backupTKIPCountermeasure = adapter->securitypriv.btkip_countermeasure;
+ backupTKIPcountermeasure_time = adapter->securitypriv.btkip_countermeasure_time;
+
+ /* reset RX BIP packet number */
+ pmlmeext->mgnt_80211w_IPN_rx = 0;
+
+ memset((unsigned char *)&adapter->securitypriv, 0, sizeof (struct security_priv));
+
+ /* Added by Albert 2009/02/18 */
+ /* Restore the PMK information to securitypriv structure for the following connection. */
+ memcpy(&adapter->securitypriv.PMKIDList[ 0 ], &backupPMKIDList[ 0 ], sizeof(RT_PMKID_LIST) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backupPMKIDIndex;
+ adapter->securitypriv.btkip_countermeasure = backupTKIPCountermeasure;
+ adapter->securitypriv.btkip_countermeasure_time = backupTKIPcountermeasure_time;
+
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ }
+ else /* reset values in securitypriv */
+ {
+ /* if (adapter->mlmepriv.fw_state & WIFI_STATION_STATE) */
+ /* */
+ struct security_priv *psec_priv =&adapter->securitypriv;
+
+ psec_priv->dot11AuthAlgrthm =dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ /* */
+ }
+ /* add for CONFIG_IEEE80211W, none 11w also can use */
+ spin_unlock_bh(&adapter->security_key_mutex);
+}
+
+void rtw_os_indicate_disconnect(struct adapter *adapter)
+{
+ /* RT_PMKID_LIST backupPMKIDList[ NUM_PMKID_CACHE ]; */
+
+ netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
+
+ rtw_cfg80211_indicate_disconnect(adapter);
+
+ rtw_indicate_wx_disassoc_event(adapter);
+
+ /* modify for CONFIG_IEEE80211W, none 11w also can use the same command */
+ rtw_reset_securitypriv_cmd(adapter);
+}
+
+void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff,*p, i;
+ union iwreq_data wrqu;
+
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_, ("+rtw_report_sec_ie, authmode =%d\n", authmode));
+
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_)
+ {
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_, ("rtw_report_sec_ie, authmode =%d\n", authmode));
+
+ buff = rtw_zmalloc(IW_CUSTOM_MAX);
+ if (NULL == buff) {
+ DBG_871X(FUNC_ADPT_FMT ": alloc memory FAIL!!\n",
+ FUNC_ADPT_ARG(adapter));
+ return;
+ }
+ p = buff;
+
+ p+=sprintf(p,"ASSOCINFO(ReqIEs =");
+
+ len = sec_ie[1]+2;
+ len = (len < IW_CUSTOM_MAX) ? len:IW_CUSTOM_MAX;
+
+ for (i = 0;i<len;i++) {
+ p+=sprintf(p,"%02x", sec_ie[i]);
+ }
+
+ p+=sprintf(p,")");
+
+ memset(&wrqu, 0, sizeof(wrqu));
+
+ wrqu.data.length =p-buff;
+
+ wrqu.data.length = (wrqu.data.length<IW_CUSTOM_MAX) ? wrqu.data.length:IW_CUSTOM_MAX;
+
+ kfree(buff);
+ }
+}
+
+void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
+{
+ _init_timer(&psta->addba_retry_timer, padapter->pnetdev, addba_timer_hdl, psta);
+}
+
+void init_mlme_ext_timer(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ _init_timer(&pmlmeext->survey_timer, padapter->pnetdev, survey_timer_hdl, padapter);
+ _init_timer(&pmlmeext->link_timer, padapter->pnetdev, link_timer_hdl, padapter);
+ _init_timer(&pmlmeext->sa_query_timer, padapter->pnetdev, sa_query_timer_hdl, padapter);
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
new file mode 100644
index 000000000000..f83cfc76505c
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -0,0 +1,1916 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _OS_INTFS_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <hal_data.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
+MODULE_AUTHOR("Realtek Semiconductor Corp.");
+MODULE_VERSION(DRIVERVERSION);
+
+/* module param defaults */
+static int rtw_chip_version = 0x00;
+static int rtw_rfintfs = HWPI;
+static int rtw_lbkmode = 0;/* RTL8712_AIR_TRX; */
+
+
+static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure;infra, ad-hoc, auto */
+/* struct ndis_802_11_ssid ssid; */
+static int rtw_channel = 1;/* ad-hoc support requirement */
+static int rtw_wireless_mode = WIRELESS_MODE_MAX;
+static int rtw_vrtl_carrier_sense = AUTO_VCS;
+static int rtw_vcs_type = RTS_CTS;/* */
+static int rtw_rts_thresh = 2347;/* */
+static int rtw_frag_thresh = 2346;/* */
+static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
+static int rtw_scan_mode = 1;/* active, passive */
+static int rtw_adhoc_tx_pwr = 1;
+static int rtw_soft_ap = 0;
+/* int smart_ps = 1; */
+static int rtw_power_mgnt = 1;
+static int rtw_ips_mode = IPS_NORMAL;
+module_param(rtw_ips_mode, int, 0644);
+MODULE_PARM_DESC(rtw_ips_mode,"The default IPS mode");
+
+static int rtw_smart_ps = 2;
+
+static int rtw_check_fw_ps = 1;
+
+static int rtw_usb_rxagg_mode = 2;/* USB_RX_AGG_DMA = 1, USB_RX_AGG_USB =2 */
+module_param(rtw_usb_rxagg_mode, int, 0644);
+
+static int rtw_radio_enable = 1;
+static int rtw_long_retry_lmt = 7;
+static int rtw_short_retry_lmt = 7;
+static int rtw_busy_thresh = 40;
+/* int qos_enable = 0; */
+static int rtw_ack_policy = NORMAL_ACK;
+
+static int rtw_software_encrypt = 0;
+static int rtw_software_decrypt = 0;
+
+static int rtw_acm_method = 0;/* 0:By SW 1:By HW. */
+
+static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
+static int rtw_uapsd_enable = 0;
+static int rtw_uapsd_max_sp = NO_LIMIT;
+static int rtw_uapsd_acbk_en = 0;
+static int rtw_uapsd_acbe_en = 0;
+static int rtw_uapsd_acvi_en = 0;
+static int rtw_uapsd_acvo_en = 0;
+
+int rtw_ht_enable = 1;
+/* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz, 4: 80+80MHz */
+/* 2.4G use bit 0 ~ 3, 5G use bit 4 ~ 7 */
+/* 0x21 means enable 2.4G 40MHz & 5G 80MHz */
+static int rtw_bw_mode = 0x21;
+static int rtw_ampdu_enable = 1;/* for enable tx_ampdu ,0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
+static int rtw_rx_stbc = 1;/* 0: disable, 1:enable 2.4g */
+static int rtw_ampdu_amsdu = 0;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
+/* Short GI support Bit Map */
+/* BIT0 - 20MHz, 0: non-support, 1: support */
+/* BIT1 - 40MHz, 0: non-support, 1: support */
+/* BIT2 - 80MHz, 0: non-support, 1: support */
+/* BIT3 - 160MHz, 0: non-support, 1: support */
+static int rtw_short_gi = 0xf;
+/* BIT0: Enable VHT LDPC Rx, BIT1: Enable VHT LDPC Tx, BIT4: Enable HT LDPC Rx, BIT5: Enable HT LDPC Tx */
+static int rtw_ldpc_cap = 0x33;
+/* BIT0: Enable VHT STBC Rx, BIT1: Enable VHT STBC Tx, BIT4: Enable HT STBC Rx, BIT5: Enable HT STBC Tx */
+static int rtw_stbc_cap = 0x13;
+/* BIT0: Enable VHT Beamformer, BIT1: Enable VHT Beamformee, BIT4: Enable HT Beamformer, BIT5: Enable HT Beamformee */
+static int rtw_beamform_cap = 0x2;
+
+static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
+
+/* int rf_config = RF_1T2R; 1T2R */
+static int rtw_rf_config = RF_MAX_TYPE; /* auto */
+static int rtw_low_power = 0;
+static int rtw_wifi_spec = 0;
+static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
+
+static int rtw_btcoex_enable = 1;
+module_param(rtw_btcoex_enable, int, 0644);
+MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism");
+static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */
+static int rtw_bt_sco = 3;/* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */
+static int rtw_bt_ampdu = 1 ;/* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+static int rtw_ant_num = -1; /* <0: undefined, >0: Antenna number */
+module_param(rtw_ant_num, int, 0644);
+MODULE_PARM_DESC(rtw_ant_num, "Antenna number setting");
+
+static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
+
+static int rtw_antdiv_cfg = 1; /* 0:OFF , 1:ON, 2:decide by Efuse config */
+static int rtw_antdiv_type = 0 ; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+
+
+static int rtw_enusbss = 0;/* 0:disable, 1:enable */
+
+static int rtw_hwpdn_mode =2;/* 0:disable, 1:enable, 2: by EFUSE config */
+
+#ifdef CONFIG_HW_PWRP_DETECTION
+static int rtw_hwpwrp_detect = 1;
+#else
+static int rtw_hwpwrp_detect = 0; /* HW power ping detect 0:disable , 1:enable */
+#endif
+
+static int rtw_hw_wps_pbc = 0;
+
+int rtw_mc2u_disable = 0;
+
+static int rtw_80211d = 0;
+
+#ifdef CONFIG_QOS_OPTIMIZATION
+static int rtw_qos_opt_enable = 1;/* 0: disable, 1:enable */
+#else
+static int rtw_qos_opt_enable = 0;/* 0: disable, 1:enable */
+#endif
+module_param(rtw_qos_opt_enable, int, 0644);
+
+static char* ifname = "wlan%d";
+module_param(ifname, charp, 0644);
+MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
+
+char* rtw_initmac = NULL; /* temp mac address if users want to use instead of the mac address in Efuse */
+
+module_param(rtw_initmac, charp, 0644);
+module_param(rtw_channel_plan, int, 0644);
+module_param(rtw_chip_version, int, 0644);
+module_param(rtw_rfintfs, int, 0644);
+module_param(rtw_lbkmode, int, 0644);
+module_param(rtw_network_mode, int, 0644);
+module_param(rtw_channel, int, 0644);
+module_param(rtw_wmm_enable, int, 0644);
+module_param(rtw_vrtl_carrier_sense, int, 0644);
+module_param(rtw_vcs_type, int, 0644);
+module_param(rtw_busy_thresh, int, 0644);
+
+module_param(rtw_ht_enable, int, 0644);
+module_param(rtw_bw_mode, int, 0644);
+module_param(rtw_ampdu_enable, int, 0644);
+module_param(rtw_rx_stbc, int, 0644);
+module_param(rtw_ampdu_amsdu, int, 0644);
+
+module_param(rtw_lowrate_two_xmit, int, 0644);
+
+module_param(rtw_rf_config, int, 0644);
+module_param(rtw_power_mgnt, int, 0644);
+module_param(rtw_smart_ps, int, 0644);
+module_param(rtw_low_power, int, 0644);
+module_param(rtw_wifi_spec, int, 0644);
+
+module_param(rtw_antdiv_cfg, int, 0644);
+module_param(rtw_antdiv_type, int, 0644);
+
+module_param(rtw_enusbss, int, 0644);
+module_param(rtw_hwpdn_mode, int, 0644);
+module_param(rtw_hwpwrp_detect, int, 0644);
+
+module_param(rtw_hw_wps_pbc, int, 0644);
+
+static uint rtw_max_roaming_times =2;
+module_param(rtw_max_roaming_times, uint, 0644);
+MODULE_PARM_DESC(rtw_max_roaming_times,"The max roaming times to try");
+
+module_param(rtw_mc2u_disable, int, 0644);
+
+module_param(rtw_80211d, int, 0644);
+MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
+
+static uint rtw_notch_filter = 0;
+module_param(rtw_notch_filter, uint, 0644);
+MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
+
+#define CONFIG_RTW_HIQ_FILTER 1
+
+static uint rtw_hiq_filter = CONFIG_RTW_HIQ_FILTER;
+module_param(rtw_hiq_filter, uint, 0644);
+MODULE_PARM_DESC(rtw_hiq_filter, "0:allow all, 1:allow special, 2:deny all");
+
+static int rtw_tx_pwr_lmt_enable = 0;
+static int rtw_tx_pwr_by_rate = 0;
+
+module_param(rtw_tx_pwr_lmt_enable, int, 0644);
+MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable,"0:Disable, 1:Enable, 2: Depend on efuse");
+
+module_param(rtw_tx_pwr_by_rate, int, 0644);
+MODULE_PARM_DESC(rtw_tx_pwr_by_rate,"0:Disable, 1:Enable, 2: Depend on efuse");
+
+char *rtw_phy_file_path = "";
+module_param(rtw_phy_file_path, charp, 0644);
+MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
+/* PHY FILE Bit Map */
+/* BIT0 - MAC, 0: non-support, 1: support */
+/* BIT1 - BB, 0: non-support, 1: support */
+/* BIT2 - BB_PG, 0: non-support, 1: support */
+/* BIT3 - BB_MP, 0: non-support, 1: support */
+/* BIT4 - RF, 0: non-support, 1: support */
+/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
+/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
+static int rtw_load_phy_file = (BIT2|BIT6);
+module_param(rtw_load_phy_file, int, 0644);
+MODULE_PARM_DESC(rtw_load_phy_file,"PHY File Bit Map");
+static int rtw_decrypt_phy_file = 0;
+module_param(rtw_decrypt_phy_file, int, 0644);
+MODULE_PARM_DESC(rtw_decrypt_phy_file,"Enable Decrypt PHY File");
+
+int _netdev_open(struct net_device *pnetdev);
+int netdev_open (struct net_device *pnetdev);
+static int netdev_close (struct net_device *pnetdev);
+
+static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
+{
+ uint status = _SUCCESS;
+ struct registry_priv *registry_par = &padapter->registrypriv;
+
+ registry_par->chip_version = (u8)rtw_chip_version;
+ registry_par->rfintfs = (u8)rtw_rfintfs;
+ registry_par->lbkmode = (u8)rtw_lbkmode;
+ /* registry_par->hci = (u8)hci; */
+ registry_par->network_mode = (u8)rtw_network_mode;
+
+ memcpy(registry_par->ssid.Ssid, "ANY", 3);
+ registry_par->ssid.SsidLength = 3;
+
+ registry_par->channel = (u8)rtw_channel;
+ registry_par->wireless_mode = (u8)rtw_wireless_mode;
+
+ if (registry_par->channel > 14)
+ registry_par->channel = 1;
+
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vcs_type = (u8)rtw_vcs_type;
+ registry_par->rts_thresh =(u16)rtw_rts_thresh;
+ registry_par->frag_thresh =(u16)rtw_frag_thresh;
+ registry_par->preamble = (u8)rtw_preamble;
+ registry_par->scan_mode = (u8)rtw_scan_mode;
+ registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
+ registry_par->soft_ap = (u8)rtw_soft_ap;
+ registry_par->smart_ps = (u8)rtw_smart_ps;
+ registry_par->check_fw_ps = (u8)rtw_check_fw_ps;
+ registry_par->power_mgnt = (u8)rtw_power_mgnt;
+ registry_par->ips_mode = (u8)rtw_ips_mode;
+ registry_par->radio_enable = (u8)rtw_radio_enable;
+ registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
+ registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
+ registry_par->busy_thresh = (u16)rtw_busy_thresh;
+ /* registry_par->qos_enable = (u8)rtw_qos_enable; */
+ registry_par->ack_policy = (u8)rtw_ack_policy;
+ registry_par->software_encrypt = (u8)rtw_software_encrypt;
+ registry_par->software_decrypt = (u8)rtw_software_decrypt;
+
+ registry_par->acm_method = (u8)rtw_acm_method;
+ registry_par->usb_rxagg_mode = (u8)rtw_usb_rxagg_mode;
+
+ /* UAPSD */
+ registry_par->wmm_enable = (u8)rtw_wmm_enable;
+ registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
+ registry_par->uapsd_max_sp = (u8)rtw_uapsd_max_sp;
+ registry_par->uapsd_acbk_en = (u8)rtw_uapsd_acbk_en;
+ registry_par->uapsd_acbe_en = (u8)rtw_uapsd_acbe_en;
+ registry_par->uapsd_acvi_en = (u8)rtw_uapsd_acvi_en;
+ registry_par->uapsd_acvo_en = (u8)rtw_uapsd_acvo_en;
+
+ registry_par->ht_enable = (u8)rtw_ht_enable;
+ registry_par->bw_mode = (u8)rtw_bw_mode;
+ registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
+ registry_par->rx_stbc = (u8)rtw_rx_stbc;
+ registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
+ registry_par->short_gi = (u8)rtw_short_gi;
+ registry_par->ldpc_cap = (u8)rtw_ldpc_cap;
+ registry_par->stbc_cap = (u8)rtw_stbc_cap;
+ registry_par->beamform_cap = (u8)rtw_beamform_cap;
+
+ registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
+ registry_par->rf_config = (u8)rtw_rf_config;
+ registry_par->low_power = (u8)rtw_low_power;
+
+
+ registry_par->wifi_spec = (u8)rtw_wifi_spec;
+
+ registry_par->channel_plan = (u8)rtw_channel_plan;
+
+ registry_par->btcoex = (u8)rtw_btcoex_enable;
+ registry_par->bt_iso = (u8)rtw_bt_iso;
+ registry_par->bt_sco = (u8)rtw_bt_sco;
+ registry_par->bt_ampdu = (u8)rtw_bt_ampdu;
+ registry_par->ant_num = (s8)rtw_ant_num;
+
+ registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+
+ registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
+ registry_par->antdiv_type = (u8)rtw_antdiv_type;
+
+ registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
+
+ registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
+#ifdef CONFIG_INTEL_WIDI
+ registry_par->max_roaming_times = (u8)rtw_max_roaming_times + 2;
+#endif /* CONFIG_INTEL_WIDI */
+
+ registry_par->enable80211d = (u8)rtw_80211d;
+
+ snprintf(registry_par->ifname, 16, "%s", ifname);
+
+ registry_par->notch_filter = (u8)rtw_notch_filter;
+
+ registry_par->RegEnableTxPowerLimit = (u8)rtw_tx_pwr_lmt_enable;
+ registry_par->RegEnableTxPowerByRate = (u8)rtw_tx_pwr_by_rate;
+
+ registry_par->RegPowerBase = 14;
+ registry_par->TxBBSwing_2G = 0xFF;
+ registry_par->TxBBSwing_5G = 0xFF;
+ registry_par->bEn_RFE = 1;
+ registry_par->RFE_Type = 64;
+
+ registry_par->load_phy_file = (u8)rtw_load_phy_file;
+ registry_par->RegDecryptCustomFile = (u8)rtw_decrypt_phy_file;
+ registry_par->qos_opt_enable = (u8)rtw_qos_opt_enable;
+
+ registry_par->hiq_filter = (u8)rtw_hiq_filter;
+ return status;
+}
+
+static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct sockaddr *addr = p;
+
+ if (padapter->bup == false)
+ {
+ /* DBG_871X("r8711_net_set_mac_address(), MAC =%x:%x:%x:%x:%x:%x\n", addr->sa_data[0], addr->sa_data[1], addr->sa_data[2], addr->sa_data[3], */
+ /* addr->sa_data[4], addr->sa_data[5]); */
+ memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
+ /* memcpy(pnetdev->dev_addr, addr->sa_data, ETH_ALEN); */
+ /* padapter->bset_hwaddr = true; */
+ }
+
+ return 0;
+}
+
+static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct recv_priv *precvpriv = &(padapter->recvpriv);
+
+ padapter->stats.tx_packets = pxmitpriv->tx_pkts;/* pxmitpriv->tx_pkts++; */
+ padapter->stats.rx_packets = precvpriv->rx_pkts;/* precvpriv->rx_pkts++; */
+ padapter->stats.tx_dropped = pxmitpriv->tx_drop;
+ padapter->stats.rx_dropped = precvpriv->rx_drop;
+ padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
+ padapter->stats.rx_bytes = precvpriv->rx_bytes;
+
+ return &padapter->stats;
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 0
+ * AC_VI -> queue 1
+ * AC_BE -> queue 2
+ * AC_BK -> queue 3
+ */
+static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
+
+/* Given a data frame determine the 802.1p/1d tag to use. */
+static unsigned int rtw_classify8021d(struct sk_buff *skb)
+{
+ unsigned int dscp;
+
+ /* skb->priority values from 256->263 are magic values to
+ * directly indicate a specific 802.1d priority. This is used
+ * to allow 802.1d priority to be passed directly in from VLAN
+ * tags, etc.
+ */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return skb->priority - 256;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ dscp = ip_hdr(skb)->tos & 0xfc;
+ break;
+ default:
+ return 0;
+ }
+
+ return dscp >> 5;
+}
+
+
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
+ , void *accel_priv
+ , select_queue_fallback_t fallback
+)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ skb->priority = rtw_classify8021d(skb);
+
+ if (pmlmepriv->acm_mask != 0)
+ {
+ skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
+ }
+
+ return rtw_1d_to_queue[skb->priority];
+}
+
+u16 rtw_recv_select_queue(struct sk_buff *skb)
+{
+ struct iphdr *piphdr;
+ unsigned int dscp;
+ __be16 eth_type;
+ u32 priority;
+ u8 *pdata = skb->data;
+
+ memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+
+ switch (be16_to_cpu(eth_type)) {
+ case ETH_P_IP:
+
+ piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+
+ dscp = piphdr->tos & 0xfc;
+
+ priority = dscp >> 5;
+
+ break;
+ default:
+ priority = 0;
+ }
+
+ return rtw_1d_to_queue[priority];
+
+}
+
+static int rtw_ndev_notifier_call(struct notifier_block * nb, unsigned long state, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
+ return NOTIFY_DONE;
+
+ DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT" state:%lu\n", FUNC_NDEV_ARG(dev), state);
+
+ switch (state) {
+ case NETDEV_CHANGENAME:
+ rtw_adapter_proc_replace(dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rtw_ndev_notifier = {
+ .notifier_call = rtw_ndev_notifier_call,
+};
+
+int rtw_ndev_notifier_register(void)
+{
+ return register_netdevice_notifier(&rtw_ndev_notifier);
+}
+
+void rtw_ndev_notifier_unregister(void)
+{
+ unregister_netdevice_notifier(&rtw_ndev_notifier);
+}
+
+
+static int rtw_ndev_init(struct net_device *dev)
+{
+ struct adapter *adapter = rtw_netdev_priv(dev);
+
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ strncpy(adapter->old_ifname, dev->name, IFNAMSIZ);
+ rtw_adapter_proc_init(dev);
+
+ return 0;
+}
+
+static void rtw_ndev_uninit(struct net_device *dev)
+{
+ struct adapter *adapter = rtw_netdev_priv(dev);
+
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ rtw_adapter_proc_deinit(dev);
+}
+
+static const struct net_device_ops rtw_netdev_ops = {
+ .ndo_init = rtw_ndev_init,
+ .ndo_uninit = rtw_ndev_uninit,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = rtw_xmit_entry,
+ .ndo_select_queue = rtw_select_queue,
+ .ndo_set_mac_address = rtw_net_set_mac_address,
+ .ndo_get_stats = rtw_net_get_stats,
+ .ndo_do_ioctl = rtw_ioctl,
+};
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
+{
+ if (dev_alloc_name(pnetdev, ifname) < 0) {
+ pr_err("dev_alloc_name, fail for %s\n", ifname);
+ return 1;
+ }
+ netif_carrier_off(pnetdev);
+ /* rtw_netif_stop_queue(pnetdev); */
+
+ return 0;
+}
+
+struct net_device *rtw_init_netdev(struct adapter *old_padapter)
+{
+ struct adapter *padapter;
+ struct net_device *pnetdev;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
+
+ if (old_padapter != NULL)
+ pnetdev = rtw_alloc_etherdev_with_old_priv(sizeof(struct adapter), (void *)old_padapter);
+ else
+ pnetdev = rtw_alloc_etherdev(sizeof(struct adapter));
+
+ pr_info("pnetdev = %p\n", pnetdev);
+ if (!pnetdev)
+ return NULL;
+
+ padapter = rtw_netdev_priv(pnetdev);
+ padapter->pnetdev = pnetdev;
+
+ /* pnetdev->init = NULL; */
+
+ DBG_871X("register rtw_netdev_ops to netdev_ops\n");
+ pnetdev->netdev_ops = &rtw_netdev_ops;
+
+ /* pnetdev->tx_timeout = NULL; */
+ pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
+
+ /* step 2. */
+ loadparam(padapter, pnetdev);
+
+ return pnetdev;
+}
+
+void rtw_unregister_netdevs(struct dvobj_priv *dvobj)
+{
+ struct adapter *padapter = NULL;
+ struct net_device *pnetdev = NULL;
+
+ padapter = dvobj->padapters;
+
+ if (padapter == NULL)
+ return;
+
+ pnetdev = padapter->pnetdev;
+
+ if ((padapter->DriverState != DRIVER_DISAPPEAR) && pnetdev)
+ unregister_netdev(pnetdev); /* will call netdev_close() */
+ rtw_wdev_unregister(padapter->rtw_wdev);
+}
+
+u32 rtw_start_drv_threads(struct adapter *padapter)
+{
+ u32 _status = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads\n"));
+ padapter->xmitThread = kthread_run(rtw_xmit_thread, padapter, "RTW_XMIT_THREAD");
+ if (IS_ERR(padapter->xmitThread))
+ _status = _FAIL;
+
+ padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
+ if (IS_ERR(padapter->cmdThread))
+ _status = _FAIL;
+ else
+ down(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
+
+ rtw_hal_start_thread(padapter);
+ return _status;
+}
+
+void rtw_stop_drv_threads (struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
+
+ rtw_stop_cmd_thread(padapter);
+
+ /* Below is to termindate tx_thread... */
+ up(&padapter->xmitpriv.xmit_sema);
+ down(&padapter->xmitpriv.terminate_xmitthread_sema);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("\n drv_halt: rtw_xmit_thread can be terminated !\n"));
+
+ rtw_hal_stop_thread(padapter);
+}
+
+static u8 rtw_init_default_value(struct adapter *padapter)
+{
+ u8 ret = _SUCCESS;
+ struct registry_priv* pregistrypriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ /* xmit_priv */
+ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
+ pxmitpriv->vcs = pregistrypriv->vcs_type;
+ pxmitpriv->vcs_type = pregistrypriv->vcs_type;
+ /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */
+ pxmitpriv->frag_len = pregistrypriv->frag_thresh;
+
+ /* recv_priv */
+
+ /* mlme_priv */
+ pmlmepriv->scan_mode = SCAN_ACTIVE;
+
+ /* qos_priv */
+ /* pmlmepriv->qospriv.qos_option = pregistrypriv->wmm_enable; */
+
+ /* ht_priv */
+ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
+
+ /* security_priv */
+ /* rtw_get_encrypt_decrypt_from_registrypriv(padapter); */
+ psecuritypriv->binstallGrpkey = _FAIL;
+#ifdef CONFIG_GTK_OL
+ psecuritypriv->binstallKCK_KEK = _FAIL;
+#endif /* CONFIG_GTK_OL */
+ psecuritypriv->sw_encrypt =pregistrypriv->software_encrypt;
+ psecuritypriv->sw_decrypt =pregistrypriv->software_decrypt;
+
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+
+ psecuritypriv->dot11PrivacyKeyIndex = 0;
+
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpKeyid = 1;
+
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* registry_priv */
+ rtw_init_registrypriv_dev_network(padapter);
+ rtw_update_registrypriv_dev_network(padapter);
+
+ /* hal_priv */
+ rtw_hal_def_value_init(padapter);
+
+ /* misc. */
+ RTW_ENABLE_FUNC(padapter, DF_RX_BIT);
+ RTW_ENABLE_FUNC(padapter, DF_TX_BIT);
+ padapter->bLinkInfoDump = 0;
+ padapter->bNotifyChannelChange = 0;
+
+ /* for debug purpose */
+ padapter->fix_rate = 0xFF;
+ padapter->driver_ampdu_spacing = 0xFF;
+ padapter->driver_rx_ampdu_factor = 0xFF;
+
+ return ret;
+}
+
+struct dvobj_priv *devobj_init(void)
+{
+ struct dvobj_priv *pdvobj = NULL;
+
+ if ((pdvobj = (struct dvobj_priv*)rtw_zmalloc(sizeof(*pdvobj))) == NULL)
+ return NULL;
+
+ mutex_init(&pdvobj->hw_init_mutex);
+ mutex_init(&pdvobj->h2c_fwcmd_mutex);
+ mutex_init(&pdvobj->setch_mutex);
+ mutex_init(&pdvobj->setbw_mutex);
+
+ spin_lock_init(&pdvobj->lock);
+
+ pdvobj->macid[1] = true; /* macid = 1 for bc/mc stainfo */
+
+ pdvobj->processing_dev_remove = false;
+
+ atomic_set(&pdvobj->disable_func, 0);
+
+ spin_lock_init(&pdvobj->cam_ctl.lock);
+
+ return pdvobj;
+}
+
+void devobj_deinit(struct dvobj_priv *pdvobj)
+{
+ if (!pdvobj)
+ return;
+
+ mutex_destroy(&pdvobj->hw_init_mutex);
+ mutex_destroy(&pdvobj->h2c_fwcmd_mutex);
+ mutex_destroy(&pdvobj->setch_mutex);
+ mutex_destroy(&pdvobj->setbw_mutex);
+
+ kfree((u8 *)pdvobj);
+}
+
+u8 rtw_reset_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ /* hal_priv */
+ if (is_primary_adapter(padapter))
+ rtw_hal_def_value_init(padapter);
+
+ RTW_ENABLE_FUNC(padapter, DF_RX_BIT);
+ RTW_ENABLE_FUNC(padapter, DF_TX_BIT);
+ padapter->bLinkInfoDump = 0;
+
+ padapter->xmitpriv.tx_pkts = 0;
+ padapter->recvpriv.rx_pkts = 0;
+
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+
+ /* pmlmepriv->LinkDetectInfo.TrafficBusyState = false; */
+ pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
+ pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY |_FW_UNDER_LINKING);
+
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+
+ /* mlmeextpriv */
+ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ return ret8;
+}
+
+
+u8 rtw_init_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
+
+ ret8 = rtw_init_default_value(padapter);
+
+ rtw_init_hal_com_default_value(padapter);
+
+ if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->cmdpriv.padapter =padapter;
+
+ if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+
+ if (rtw_init_mlme_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (init_mlme_ext_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
+ DBG_871X("Can't _rtw_init_xmit_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
+ DBG_871X("Can't _rtw_init_recv_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+ /* add for CONFIG_IEEE80211W, none 11w also can use */
+ spin_lock_init(&padapter->security_key_mutex);
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
+ /* memset((unsigned char *)&padapter->securitypriv, 0, sizeof (struct security_priv)); */
+
+ if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
+ DBG_871X("Can't _rtw_init_sta_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->stapriv.padapter = padapter;
+ padapter->setband = GHZ24_50;
+ padapter->fix_rate = 0xFF;
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_init_pwrctrl_priv(padapter);
+
+ rtw_hal_dm_init(padapter);
+
+#ifdef CONFIG_INTEL_WIDI
+ if (rtw_init_intel_widi(padapter) == _FAIL) {
+ DBG_871X("Can't rtw_init_intel_widi\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+#endif /* CONFIG_INTEL_WIDI */
+
+exit:
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
+
+ return ret8;
+}
+
+void rtw_cancel_all_timer(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer\n"));
+
+ del_timer_sync(&padapter->mlmepriv.assoc_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel association timer complete!\n"));
+
+ del_timer_sync(&padapter->mlmepriv.scan_to_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel scan_to_timer!\n"));
+
+ del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel dynamic_chk_timer!\n"));
+
+ del_timer_sync(&(adapter_to_pwrctl(padapter)->pwr_state_check_timer));
+
+ del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer);
+ rtw_clear_scan_deny(padapter);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel set_scan_deny_timer!\n"));
+
+ del_timer_sync(&padapter->recvpriv.signal_stat_timer);
+
+ /* cancel dm timer */
+ rtw_hal_dm_deinit(padapter);
+}
+
+u8 rtw_free_drv_sw(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
+
+#ifdef CONFIG_INTEL_WIDI
+ rtw_free_intel_widi(padapter);
+#endif /* CONFIG_INTEL_WIDI */
+
+ free_mlme_ext_priv(&padapter->mlmeextpriv);
+
+ rtw_free_cmd_priv(&padapter->cmdpriv);
+
+ rtw_free_evt_priv(&padapter->evtpriv);
+
+ rtw_free_mlme_priv(&padapter->mlmepriv);
+
+ /* free_io_queue(padapter); */
+
+ _rtw_free_xmit_priv(&padapter->xmitpriv);
+
+ _rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
+
+ _rtw_free_recv_priv(&padapter->recvpriv);
+
+ rtw_free_pwrctrl_priv(padapter);
+
+ /* kfree((void *)padapter); */
+
+ rtw_hal_free_data(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<==rtw_free_drv_sw\n"));
+
+ /* free the old_pnetdev */
+ if (padapter->rereg_nd_name_priv.old_pnetdev) {
+ free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
+ padapter->rereg_nd_name_priv.old_pnetdev = NULL;
+ }
+
+ /* clear pbuddystruct adapter to avoid access wrong pointer. */
+ if (padapter->pbuddy_adapter != NULL)
+ padapter->pbuddy_adapter->pbuddy_adapter = NULL;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw\n"));
+
+ return _SUCCESS;
+}
+
+static int _rtw_drv_register_netdev(struct adapter *padapter, char *name)
+{
+ int ret = _SUCCESS;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ /* alloc netdev name */
+ if (rtw_init_netdev_name(pnetdev, name))
+ return _FAIL;
+
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+
+ /* Tell the network stack we exist */
+ if (register_netdev(pnetdev) != 0) {
+ DBG_871X(FUNC_NDEV_FMT "Failed!\n", FUNC_NDEV_ARG(pnetdev));
+ ret = _FAIL;
+ goto error_register_netdev;
+ }
+
+ DBG_871X("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id+1), MAC_ARG(pnetdev->dev_addr));
+
+ return ret;
+
+error_register_netdev:
+
+ rtw_free_drv_sw(padapter);
+
+ rtw_free_netdev(pnetdev);
+
+ return ret;
+}
+
+int rtw_drv_register_netdev(struct adapter *if1)
+{
+ struct dvobj_priv *dvobj = if1->dvobj;
+ struct adapter *padapter = dvobj->padapters;
+ char *name = if1->registrypriv.ifname;
+
+ return _rtw_drv_register_netdev(padapter, name);
+}
+
+int _netdev_open(struct net_device *pnetdev)
+{
+ uint status;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n"));
+ DBG_871X("+871x_drv - drv_open, bup =%d\n", padapter->bup);
+
+ padapter->netif_up = true;
+
+ if (pwrctrlpriv->ps_flag == true) {
+ padapter->net_closed = false;
+ goto netdev_open_normal_process;
+ }
+
+ if (padapter->bup == false) {
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl871x_hal_init(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ DBG_871X("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr));
+
+ status =rtw_start_drv_threads(padapter);
+ if (status == _FAIL) {
+ DBG_871X("Initialize driver software resource Failed!\n");
+ goto netdev_open_error;
+ }
+
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+
+ rtw_cfg80211_init_wiphy(padapter);
+
+ padapter->bup = true;
+ pwrctrlpriv->bips_processing = false;
+ }
+ padapter->net_closed = false;
+
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_start_queue(pnetdev);
+ else
+ rtw_netif_wake_queue(pnetdev);
+
+netdev_open_normal_process:
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n"));
+ DBG_871X("-871x_drv - drv_open, bup =%d\n", padapter->bup);
+
+ return 0;
+
+netdev_open_error:
+
+ padapter->bup = false;
+
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-871x_drv - dev_open, fail!\n"));
+ DBG_871X("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);
+
+ return (-1);
+
+}
+
+int netdev_open(struct net_device *pnetdev)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
+
+ if (pwrctrlpriv->bInSuspend == true)
+ {
+ DBG_871X("+871x_drv - drv_open, bInSuspend =%d\n", pwrctrlpriv->bInSuspend);
+ return 0;
+ }
+
+ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->hw_init_mutex)))
+ return -1;
+
+ ret = _netdev_open(pnetdev);
+ mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
+
+ return ret;
+}
+
+static int ips_netdrv_open(struct adapter *padapter)
+{
+ int status = _SUCCESS;
+ /* struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter); */
+
+ padapter->net_closed = false;
+
+ DBG_871X("===> %s.........\n", __func__);
+
+
+ padapter->bDriverStopped = false;
+ padapter->bCardDisableWOHSM = false;
+ /* padapter->bup = true; */
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL)
+ {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ if (padapter->intf_start)
+ {
+ padapter->intf_start(padapter);
+ }
+
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+
+ return _SUCCESS;
+
+netdev_open_error:
+ /* padapter->bup = false; */
+ DBG_871X("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup);
+
+ return _FAIL;
+}
+
+
+int rtw_ips_pwr_up(struct adapter *padapter)
+{
+ int result;
+ DBG_871X("===> rtw_ips_pwr_up..............\n");
+
+ result = ips_netdrv_open(padapter);
+
+ DBG_871X("<=== rtw_ips_pwr_up..............\n");
+ return result;
+
+}
+
+void rtw_ips_pwr_down(struct adapter *padapter)
+{
+ DBG_871X("===> rtw_ips_pwr_down...................\n");
+
+ padapter->bCardDisableWOHSM = true;
+ padapter->net_closed = true;
+
+ rtw_ips_dev_unload(padapter);
+ padapter->bCardDisableWOHSM = false;
+ DBG_871X("<=== rtw_ips_pwr_down.....................\n");
+}
+
+void rtw_ips_dev_unload(struct adapter *padapter)
+{
+ DBG_871X("====> %s...\n", __func__);
+
+
+ if (padapter->bSurpriseRemoved == false)
+ {
+ rtw_hal_deinit(padapter);
+ }
+
+}
+
+
+static int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
+{
+ int status = -1;
+
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+
+ if (true == bnormal)
+ {
+ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->hw_init_mutex)) == 0) {
+ status = _netdev_open(pnetdev);
+ mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
+ }
+ }
+ else
+ status = (_SUCCESS == ips_netdrv_open(padapter))?(0):(-1);
+
+ return status;
+}
+
+static int netdev_close(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n"));
+
+ if (pwrctl->bInternalAutoSuspend == true)
+ {
+ /* rtw_pwr_wakeup(padapter); */
+ if (pwrctl->rf_pwrstate == rf_off)
+ pwrctl->ps_flag = true;
+ }
+ padapter->net_closed = true;
+ padapter->netif_up = false;
+
+/*if (!padapter->hw_init_completed)
+ {
+ DBG_871X("(1)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed);
+
+ padapter->bDriverStopped = true;
+
+ rtw_dev_unload(padapter);
+ }
+ else*/
+ if (pwrctl->rf_pwrstate == rf_on) {
+ DBG_871X("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed);
+
+ /* s1. */
+ if (pnetdev)
+ {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+ }
+
+ rtw_scan_abort(padapter);
+ adapter_wdev_data(padapter)->bandroid_scan = false;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
+ DBG_871X("-871x_drv - drv_close, bup =%d\n", padapter->bup);
+
+ return 0;
+
+}
+
+void rtw_ndev_destructor(struct net_device *ndev)
+{
+ DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+
+ if (ndev->ieee80211_ptr)
+ kfree((u8 *)ndev->ieee80211_ptr);
+
+ free_netdev(ndev);
+}
+
+void rtw_dev_unload(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctl = adapter_to_pwrctl(padapter);
+ struct dvobj_priv *pobjpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &pobjpriv->drv_dbg;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 cnt = 0;
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("+%s\n", __func__));
+
+ if (padapter->bup == true)
+ {
+ DBG_871X("===> %s\n", __func__);
+
+ padapter->bDriverStopped = true;
+ if (padapter->xmitpriv.ack_tx)
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
+
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("@ rtw_dev_unload: stop intf complete!\n"));
+
+ if (!pwrctl->bInternalAutoSuspend)
+ rtw_stop_drv_threads(padapter);
+
+ while (atomic_read(&(pcmdpriv->cmdthd_running)) == true) {
+ if (cnt > 5) {
+ DBG_871X("stop cmdthd timeout\n");
+ break;
+ } else {
+ cnt ++;
+ DBG_871X("cmdthd is running(%d)\n", cnt);
+ msleep(10);
+ }
+ }
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("@ %s: stop thread complete!\n", __func__));
+
+ /* check the status of IPS */
+ if (rtw_hal_check_ips_status(padapter) == true || pwrctl->rf_pwrstate == rf_off) { /* check HW status and SW state */
+ DBG_871X_LEVEL(_drv_always_, "%s: driver in IPS-FWLPS\n", __func__);
+ pdbgpriv->dbg_dev_unload_inIPS_cnt++;
+ LeaveAllPowerSaveMode(padapter);
+ } else {
+ DBG_871X_LEVEL(_drv_always_, "%s: driver not in IPS\n", __func__);
+ }
+
+ if (padapter->bSurpriseRemoved == false)
+ {
+ rtw_btcoex_IpsNotify(padapter, pwrctl->ips_mode_req);
+#ifdef CONFIG_WOWLAN
+ if (pwrctl->bSupportRemoteWakeup == true &&
+ pwrctl->wowlan_mode ==true) {
+ DBG_871X_LEVEL(_drv_always_, "%s bSupportRemoteWakeup ==true do not run rtw_hal_deinit()\n", __func__);
+ }
+ else
+#endif
+ {
+ /* amy modify 20120221 for power seq is different between driver open and ips */
+ rtw_hal_deinit(padapter);
+ }
+ padapter->bSurpriseRemoved = true;
+ }
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_,
+ ("@ %s: deinit hal complete!\n", __func__));
+
+ padapter->bup = false;
+
+ DBG_871X("<=== %s\n", __func__);
+ }
+ else {
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("%s: bup ==false\n", __func__));
+ DBG_871X("%s: bup ==false\n", __func__);
+ }
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("-%s\n", __func__));
+}
+
+static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+
+ if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
+ && check_fwstate(pmlmepriv, _FW_LINKED))
+ {
+ DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
+ pmlmepriv->cur_network.network.Ssid.Ssid,
+ MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
+ pmlmepriv->cur_network.network.Ssid.SsidLength,
+ pmlmepriv->assoc_ssid.SsidLength);
+ rtw_set_to_roam(padapter, 1);
+ }
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED))
+ {
+ rtw_disassoc_cmd(padapter, 0, false);
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ }
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ {
+ rtw_sta_flush(padapter);
+ }
+
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ rtw_indicate_scan_done(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
+ {
+ DBG_871X_LEVEL(_drv_always_, "%s: fw_under_linking\n", __func__);
+ rtw_indicate_disconnect(padapter);
+ }
+
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return _SUCCESS;
+}
+
+#ifdef CONFIG_WOWLAN
+int rtw_suspend_wow(struct adapter *padapter)
+{
+ u8 ch, bw, offset;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct wowlan_ioctl_param poidparam;
+ int ret = _SUCCESS;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+
+
+ DBG_871X("wowlan_mode: %d\n", pwrpriv->wowlan_mode);
+ DBG_871X("wowlan_pno_enable: %d\n", pwrpriv->wowlan_pno_enable);
+
+ if (pwrpriv->wowlan_mode == true) {
+ if (pnetdev)
+ rtw_netif_stop_queue(pnetdev);
+ /* 1. stop thread */
+ padapter->bDriverStopped = true; /* for stop thread */
+ rtw_stop_drv_threads(padapter);
+ padapter->bDriverStopped = false; /* for 32k command */
+
+ /* 2. disable interrupt */
+ if (padapter->intf_stop) {
+ padapter->intf_stop(padapter);
+ }
+
+ /* 2.1 clean interupt */
+ if (padapter->HalFunc.clear_interrupt)
+ padapter->HalFunc.clear_interrupt(padapter);
+
+ /* 2.2 free irq */
+ /* sdio_free_irq(adapter_to_dvobj(padapter)); */
+ if (padapter->intf_free_irq)
+ padapter->intf_free_irq(adapter_to_dvobj(padapter));
+
+ poidparam.subcode = WOWLAN_ENABLE;
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
+ if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
+ && check_fwstate(pmlmepriv, _FW_LINKED))
+ {
+ DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
+ pmlmepriv->cur_network.network.Ssid.Ssid,
+ MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
+ pmlmepriv->cur_network.network.Ssid.SsidLength,
+ pmlmepriv->assoc_ssid.SsidLength);
+
+ rtw_set_to_roam(padapter, 0);
+ }
+ }
+
+ DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
+ {
+ DBG_871X_LEVEL(_drv_always_, "%s: fw_under_survey\n", __func__);
+ rtw_indicate_scan_done(padapter, 1);
+ clr_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+ }
+
+ if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
+ DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ FUNC_ADPT_ARG(padapter), ch, bw, offset);
+ set_channel_bwmode(padapter, ch, offset, bw);
+ }
+
+ if (pwrpriv->wowlan_pno_enable)
+ DBG_871X_LEVEL(_drv_always_, "%s: pno: %d\n", __func__, pwrpriv->wowlan_pno_enable);
+ else
+ rtw_set_ps_mode(padapter, PS_MODE_DTIM, 0, 0, "WOWLAN");
+
+ }
+ else
+ {
+ DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
+ }
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return ret;
+}
+#endif /* ifdef CONFIG_WOWLAN */
+
+#ifdef CONFIG_AP_WOWLAN
+int rtw_suspend_ap_wow(struct adapter *padapter)
+{
+ u8 ch, bw, offset;
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct wowlan_ioctl_param poidparam;
+ int ret = _SUCCESS;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+
+ pwrpriv->wowlan_ap_mode = true;
+
+ DBG_871X("wowlan_ap_mode: %d\n", pwrpriv->wowlan_ap_mode);
+
+ if (pnetdev)
+ rtw_netif_stop_queue(pnetdev);
+ /* 1. stop thread */
+ padapter->bDriverStopped = true; /* for stop thread */
+ rtw_stop_drv_threads(padapter);
+ padapter->bDriverStopped = false; /* for 32k command */
+
+ /* 2. disable interrupt */
+ rtw_hal_disable_interrupt(padapter); /* It need wait for leaving 32K. */
+
+ /* 2.1 clean interupt */
+ if (padapter->HalFunc.clear_interrupt)
+ padapter->HalFunc.clear_interrupt(padapter);
+
+ /* 2.2 free irq */
+ /* sdio_free_irq(adapter_to_dvobj(padapter)); */
+ if (padapter->intf_free_irq)
+ padapter->intf_free_irq(adapter_to_dvobj(padapter));
+
+ poidparam.subcode = WOWLAN_AP_ENABLE;
+ padapter->HalFunc.SetHwRegHandler(padapter,
+ HW_VAR_AP_WOWLAN, (u8 *)&poidparam);
+
+ DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
+
+ if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
+ DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ FUNC_ADPT_ARG(padapter), ch, bw, offset);
+ set_channel_bwmode(padapter, ch, offset, bw);
+ }
+
+ rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, 0, "AP-WOWLAN");
+
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return ret;
+}
+#endif /* ifdef CONFIG_AP_WOWLAN */
+
+
+static int rtw_suspend_normal(struct adapter *padapter)
+{
+ struct net_device *pnetdev = padapter->pnetdev;
+ int ret = _SUCCESS;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ rtw_suspend_free_assoc_resource(padapter);
+
+ if ((rtw_hal_check_ips_status(padapter) == true)
+ || (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off))
+ {
+ DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR #### driver in IPS ####ERROR###!!!\n", __func__);
+
+ }
+
+ rtw_dev_unload(padapter);
+
+ /* sdio_deinit(adapter_to_dvobj(padapter)); */
+ if (padapter->intf_deinit)
+ padapter->intf_deinit(adapter_to_dvobj(padapter));
+
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return ret;
+}
+
+int rtw_suspend_common(struct adapter *padapter)
+{
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ int ret = 0;
+ unsigned long start_time = jiffies;
+
+ DBG_871X_LEVEL(_drv_always_, " suspend start\n");
+ DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+ pdbgpriv->dbg_suspend_cnt++;
+
+ pwrpriv->bInSuspend = true;
+
+ while (pwrpriv->bips_processing == true)
+ msleep(1);
+
+ if ((!padapter->bup) || (padapter->bDriverStopped)||(padapter->bSurpriseRemoved))
+ {
+ DBG_871X("%s bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n", __func__
+ , padapter->bup, padapter->bDriverStopped, padapter->bSurpriseRemoved);
+ pdbgpriv->dbg_suspend_error_cnt++;
+ goto exit;
+ }
+ rtw_ps_deny(padapter, PS_DENY_SUSPEND);
+
+ rtw_cancel_all_timer(padapter);
+
+ LeaveAllPowerSaveModeDirect(padapter);
+
+ rtw_stop_cmd_thread(padapter);
+
+ /* wait for the latest FW to remove this condition. */
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ rtw_btcoex_SuspendNotify(padapter, 0);
+ DBG_871X("WIFI_AP_STATE\n");
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ rtw_btcoex_SuspendNotify(padapter, 1);
+ DBG_871X("STATION\n");
+ }
+
+ rtw_ps_deny_cancel(padapter, PS_DENY_SUSPEND);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ #ifdef CONFIG_WOWLAN
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pwrpriv->wowlan_mode = true;
+ } else if (pwrpriv->wowlan_pno_enable == true) {
+ pwrpriv->wowlan_mode |= pwrpriv->wowlan_pno_enable;
+ }
+
+ if (pwrpriv->wowlan_mode == true)
+ rtw_suspend_wow(padapter);
+ else
+ rtw_suspend_normal(padapter);
+
+ #else /* CONFIG_WOWLAN */
+ rtw_suspend_normal(padapter);
+ #endif /* CONFIG_WOWLAN */
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ #ifdef CONFIG_AP_WOWLAN
+ rtw_suspend_ap_wow(padapter);
+ #else
+ rtw_suspend_normal(padapter);
+ #endif /* CONFIG_AP_WOWLAN */
+ } else {
+ rtw_suspend_normal(padapter);
+ }
+
+ DBG_871X_LEVEL(_drv_always_, "rtw suspend success in %d ms\n",
+ jiffies_to_msecs(jiffies - start_time));
+
+exit:
+ DBG_871X("<=== %s return %d.............. in %dms\n", __func__
+ , ret, jiffies_to_msecs(jiffies - start_time));
+
+ return ret;
+}
+
+#ifdef CONFIG_WOWLAN
+int rtw_resume_process_wow(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ struct wowlan_ioctl_param poidparam;
+ struct sta_info *psta = NULL;
+ int ret = _SUCCESS;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+
+ if (padapter) {
+ pnetdev = padapter->pnetdev;
+ pwrpriv = adapter_to_pwrctl(padapter);
+ } else {
+ pdbgpriv->dbg_resume_error_cnt++;
+ ret = -1;
+ goto exit;
+ }
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
+ DBG_871X("%s pdapter %p bDriverStopped %d bSurpriseRemoved %d\n",
+ __func__, padapter, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto exit;
+ }
+
+#ifdef CONFIG_PNO_SUPPORT
+ pwrpriv->pno_in_resume = true;
+#endif
+
+ if (pwrpriv->wowlan_mode == true) {
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0, "WOWLAN");
+
+ pwrpriv->bFwCurrentInPSMode = false;
+
+ if (padapter->intf_stop) {
+ padapter->intf_stop(padapter);
+ }
+
+ if (padapter->HalFunc.clear_interrupt)
+ padapter->HalFunc.clear_interrupt(padapter);
+
+ /* if (sdio_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS) { */
+ if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS)) {
+ ret = -1;
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: sdio_alloc_irq Failed!!\n", __func__));
+ goto exit;
+ }
+
+ /* Disable WOW, set H2C command */
+ poidparam.subcode =WOWLAN_DISABLE;
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
+
+ psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
+ if (psta) {
+ set_sta_rate(padapter, psta);
+ }
+
+
+ padapter->bDriverStopped = false;
+ DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
+ rtw_start_drv_threads(padapter);
+
+ if (padapter->intf_start) {
+ padapter->intf_start(padapter);
+ }
+
+ /* start netif queue */
+ if (pnetdev) {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_start_queue(pnetdev);
+ else
+ rtw_netif_wake_queue(pnetdev);
+ }
+ }
+ else {
+
+ DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
+ }
+
+ if (padapter->pid[1]!= 0) {
+ DBG_871X("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+ if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
+ if (pwrpriv->wowlan_wake_reason == FWDecisionDisconnect ||
+ pwrpriv->wowlan_wake_reason == Rx_DisAssoc ||
+ pwrpriv->wowlan_wake_reason == Rx_DeAuth) {
+
+ DBG_871X("%s: disconnect reason: %02x\n", __func__,
+ pwrpriv->wowlan_wake_reason);
+ rtw_indicate_disconnect(padapter);
+
+ rtw_sta_media_status_rpt(padapter,
+ rtw_get_stainfo(&padapter->stapriv,
+ get_bssid(&padapter->mlmepriv)), 0);
+
+ rtw_free_assoc_resources(padapter, 1);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ } else {
+ DBG_871X("%s: do roaming\n", __func__);
+ rtw_roaming(padapter, NULL);
+ }
+ }
+
+ if (pwrpriv->wowlan_mode == true) {
+ pwrpriv->bips_processing = false;
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+ } else {
+ DBG_871X_LEVEL(_drv_always_, "do not reset timer\n");
+ }
+
+ pwrpriv->wowlan_mode =false;
+
+ /* clean driver side wake up reason. */
+ pwrpriv->wowlan_wake_reason = 0;
+exit:
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return ret;
+}
+#endif /* ifdef CONFIG_WOWLAN */
+
+#ifdef CONFIG_AP_WOWLAN
+int rtw_resume_process_ap_wow(struct adapter *padapter)
+{
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ struct wowlan_ioctl_param poidparam;
+ int ret = _SUCCESS;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+
+ if (padapter) {
+ pnetdev = padapter->pnetdev;
+ pwrpriv = adapter_to_pwrctl(padapter);
+ } else {
+ pdbgpriv->dbg_resume_error_cnt++;
+ ret = -1;
+ goto exit;
+ }
+
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0, "AP-WOWLAN");
+
+ pwrpriv->bFwCurrentInPSMode = false;
+
+ rtw_hal_disable_interrupt(padapter);
+
+ if (padapter->HalFunc.clear_interrupt)
+ padapter->HalFunc.clear_interrupt(padapter);
+
+ /* if (sdio_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS) { */
+ if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS)) {
+ ret = -1;
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: sdio_alloc_irq Failed!!\n", __func__));
+ goto exit;
+ }
+
+ /* Disable WOW, set H2C command */
+ poidparam.subcode = WOWLAN_AP_DISABLE;
+ padapter->HalFunc.SetHwRegHandler(padapter,
+ HW_VAR_AP_WOWLAN, (u8 *)&poidparam);
+ pwrpriv->wowlan_ap_mode = false;
+
+ padapter->bDriverStopped = false;
+ DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
+ rtw_start_drv_threads(padapter);
+
+ if (padapter->intf_start) {
+ padapter->intf_start(padapter);
+ }
+
+ /* start netif queue */
+ if (pnetdev) {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_start_queue(pnetdev);
+ else
+ rtw_netif_wake_queue(pnetdev);
+ }
+
+ if (padapter->pid[1]!= 0) {
+ DBG_871X("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+ pwrpriv->bips_processing = false;
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+
+ /* clean driver side wake up reason. */
+ pwrpriv->wowlan_wake_reason = 0;
+exit:
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ return ret;
+}
+#endif /* ifdef CONFIG_APWOWLAN */
+
+static int rtw_resume_process_normal(struct adapter *padapter)
+{
+ struct net_device *pnetdev;
+ struct pwrctrl_priv *pwrpriv;
+ struct mlme_priv *pmlmepriv;
+ struct dvobj_priv *psdpriv;
+ struct debug_priv *pdbgpriv;
+
+ int ret = _SUCCESS;
+
+ if (!padapter) {
+ ret = -1;
+ goto exit;
+ }
+
+ pnetdev = padapter->pnetdev;
+ pwrpriv = adapter_to_pwrctl(padapter);
+ pmlmepriv = &padapter->mlmepriv;
+ psdpriv = padapter->dvobj;
+ pdbgpriv = &psdpriv->drv_dbg;
+
+ DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ /* interface init */
+ /* if (sdio_init(adapter_to_dvobj(padapter)) != _SUCCESS) */
+ if ((padapter->intf_init) && (padapter->intf_init(adapter_to_dvobj(padapter)) != _SUCCESS))
+ {
+ ret = -1;
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: initialize SDIO Failed!!\n", __func__));
+ goto exit;
+ }
+ rtw_hal_disable_interrupt(padapter);
+ /* if (sdio_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS) */
+ if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS))
+ {
+ ret = -1;
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: sdio_alloc_irq Failed!!\n", __func__));
+ goto exit;
+ }
+
+ rtw_reset_drv_sw(padapter);
+ pwrpriv->bkeepfwalive = false;
+
+ DBG_871X("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+ if (pm_netdev_open(pnetdev, true) != 0) {
+ ret = -1;
+ pdbgpriv->dbg_resume_error_cnt++;
+ goto exit;
+ }
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ if (padapter->pid[1]!= 0) {
+ DBG_871X("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+
+ if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME))
+ rtw_roaming(padapter, NULL);
+
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ rtw_ap_restore_network(padapter);
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ } else {
+ DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ }
+
+ DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+
+exit:
+ return ret;
+}
+
+int rtw_resume_common(struct adapter *padapter)
+{
+ int ret = 0;
+ unsigned long start_time = jiffies;
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ DBG_871X_LEVEL(_drv_always_, "resume start\n");
+ DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ #ifdef CONFIG_WOWLAN
+ if (pwrpriv->wowlan_mode == true)
+ rtw_resume_process_wow(padapter);
+ else
+ rtw_resume_process_normal(padapter);
+ #else
+ rtw_resume_process_normal(padapter);
+ #endif
+
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ #ifdef CONFIG_AP_WOWLAN
+ rtw_resume_process_ap_wow(padapter);
+ #else
+ rtw_resume_process_normal(padapter);
+ #endif /* CONFIG_AP_WOWLAN */
+ } else {
+ rtw_resume_process_normal(padapter);
+ }
+
+ rtw_btcoex_SuspendNotify(padapter, 0);
+
+ if (pwrpriv) {
+ pwrpriv->bInSuspend = false;
+ #ifdef CONFIG_PNO_SUPPORT
+ pwrpriv->pno_in_resume = false;
+ #endif
+ }
+ DBG_871X_LEVEL(_drv_always_, "%s:%d in %d ms\n", __func__ , ret,
+ jiffies_to_msecs(jiffies - start_time));
+
+ return ret;
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
new file mode 100644
index 000000000000..02db59e8b593
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -0,0 +1,481 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+
+#define _OSDEP_SERVICE_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+/*
+* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
+* @return: one of RTW_STATUS_CODE
+*/
+inline int RTW_STATUS_CODE(int error_code)
+{
+ if (error_code >= 0)
+ return _SUCCESS;
+ return _FAIL;
+}
+
+u8 *_rtw_malloc(u32 sz)
+{
+ u8 *pbuf = NULL;
+
+ pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+
+ return pbuf;
+}
+
+u8 *_rtw_zmalloc(u32 sz)
+{
+ u8 *pbuf = _rtw_malloc(sz);
+
+ if (pbuf != NULL) {
+ memset(pbuf, 0, sz);
+ }
+
+ return pbuf;
+}
+
+inline struct sk_buff *_rtw_skb_alloc(u32 sz)
+{
+ return __dev_alloc_skb(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+}
+
+inline struct sk_buff *_rtw_skb_copy(const struct sk_buff *skb)
+{
+ return skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+}
+
+inline struct sk_buff *_rtw_skb_clone(struct sk_buff *skb)
+{
+ return skb_clone(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+}
+
+inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
+{
+ skb->dev = ndev;
+ return netif_rx(skb);
+}
+
+void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc)
+{
+ struct adapter *adapter = (struct adapter *)padapter;
+
+ _init_timer(ptimer, adapter->pnetdev, pfunc, adapter);
+}
+
+void _rtw_init_queue(struct __queue *pqueue)
+{
+ INIT_LIST_HEAD(&(pqueue->queue));
+
+ spin_lock_init(&(pqueue->lock));
+}
+
+/*
+* Open a file with the specific @param path, @param flag, @param mode
+* @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
+* @param path the path of the file to open
+* @param flag file operation flags, please refer to linux document
+* @param mode please refer to linux document
+* @return Linux specific error code
+*/
+static int openFile(struct file **fpp, char *path, int flag, int mode)
+{
+ struct file *fp;
+
+ fp =filp_open(path, flag, mode);
+ if (IS_ERR(fp)) {
+ *fpp = NULL;
+ return PTR_ERR(fp);
+ }
+ else {
+ *fpp =fp;
+ return 0;
+ }
+}
+
+/*
+* Close the file with the specific @param fp
+* @param fp the pointer of struct file to close
+* @return always 0
+*/
+static int closeFile(struct file *fp)
+{
+ filp_close(fp, NULL);
+ return 0;
+}
+
+static int readFile(struct file *fp, char *buf, int len)
+{
+ int rlen = 0, sum = 0;
+
+ if (!fp->f_op || !fp->f_op->read)
+ return -EPERM;
+
+ while (sum<len) {
+ rlen =fp->f_op->read(fp, (char __force __user *)buf+sum, len-sum, &fp->f_pos);
+ if (rlen>0)
+ sum+=rlen;
+ else if (0 != rlen)
+ return rlen;
+ else
+ break;
+ }
+
+ return sum;
+
+}
+
+/*
+* Test if the specifi @param path is a file and readable
+* @param path the path of the file to test
+* @return Linux specific error code
+*/
+static int isFileReadable(char *path)
+{
+ struct file *fp;
+ int ret = 0;
+ mm_segment_t oldfs;
+ char buf;
+
+ fp =filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ }
+ else {
+ oldfs = get_fs(); set_fs(get_ds());
+
+ if (1!=readFile(fp, &buf, 1))
+ ret = PTR_ERR(fp);
+
+ set_fs(oldfs);
+ filp_close(fp, NULL);
+ }
+ return ret;
+}
+
+/*
+* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
+* @param path the path of the file to open and read
+* @param buf the starting address of the buffer to store file content
+* @param sz how many bytes to read at most
+* @return the byte we've read, or Linux specific error code
+*/
+static int retriveFromFile(char *path, u8 *buf, u32 sz)
+{
+ int ret =-1;
+ mm_segment_t oldfs;
+ struct file *fp;
+
+ if (path && buf) {
+ if (0 == (ret =openFile(&fp, path, O_RDONLY, 0))) {
+ DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
+
+ oldfs = get_fs(); set_fs(get_ds());
+ ret =readFile(fp, buf, sz);
+ set_fs(oldfs);
+ closeFile(fp);
+
+ DBG_871X("%s readFile, ret:%d\n", __func__, ret);
+
+ } else {
+ DBG_871X("%s openFile path:%s Fail, ret:%d\n", __func__, path, ret);
+ }
+ } else {
+ DBG_871X("%s NULL pointer\n", __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+* Test if the specifi @param path is a file and readable
+* @param path the path of the file to test
+* @return true or false
+*/
+int rtw_is_file_readable(char *path)
+{
+ if (isFileReadable(path) == 0)
+ return true;
+ else
+ return false;
+}
+
+/*
+* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
+* @param path the path of the file to open and read
+* @param buf the starting address of the buffer to store file content
+* @param sz how many bytes to read at most
+* @return the byte we've read
+*/
+int rtw_retrive_from_file(char *path, u8 *buf, u32 sz)
+{
+ int ret =retriveFromFile(path, buf, sz);
+ return ret>= 0?ret:0;
+}
+
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+ pnpi->priv =old_priv;
+ pnpi->sizeof_priv =sizeof_priv;
+
+RETURN:
+ return pnetdev;
+}
+
+struct net_device *rtw_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+
+ pnpi->priv = vzalloc(sizeof_priv);
+ if (!pnpi->priv) {
+ free_netdev(pnetdev);
+ pnetdev = NULL;
+ goto RETURN;
+ }
+
+ pnpi->sizeof_priv =sizeof_priv;
+RETURN:
+ return pnetdev;
+}
+
+void rtw_free_netdev(struct net_device * netdev)
+{
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ if (!netdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(netdev);
+
+ if (!pnpi->priv)
+ goto RETURN;
+
+ vfree(pnpi->priv);
+ free_netdev(netdev);
+
+RETURN:
+ return;
+}
+
+int rtw_change_ifname(struct adapter *padapter, const char *ifname)
+{
+ struct net_device *pnetdev;
+ struct net_device *cur_pnetdev;
+ struct rereg_nd_name_data *rereg_priv;
+ int ret;
+
+ if (!padapter)
+ goto error;
+
+ cur_pnetdev = padapter->pnetdev;
+ rereg_priv = &padapter->rereg_nd_name_priv;
+
+ /* free the old_pnetdev */
+ if (rereg_priv->old_pnetdev) {
+ free_netdev(rereg_priv->old_pnetdev);
+ rereg_priv->old_pnetdev = NULL;
+ }
+
+ if (!rtnl_is_locked())
+ unregister_netdev(cur_pnetdev);
+ else
+ unregister_netdevice(cur_pnetdev);
+
+ rereg_priv->old_pnetdev =cur_pnetdev;
+
+ pnetdev = rtw_init_netdev(padapter);
+ if (!pnetdev) {
+ ret = -1;
+ goto error;
+ }
+
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
+
+ rtw_init_netdev_name(pnetdev, ifname);
+
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+
+ if (!rtnl_is_locked())
+ ret = register_netdev(pnetdev);
+ else
+ ret = register_netdevice(pnetdev);
+
+ if (ret != 0) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("register_netdev() failed\n"));
+ goto error;
+ }
+
+ return 0;
+
+error:
+
+ return -1;
+
+}
+
+u64 rtw_modular64(u64 x, u64 y)
+{
+ return do_div(x, y);
+}
+
+void rtw_buf_free(u8 **buf, u32 *buf_len)
+{
+ u32 ori_len;
+
+ if (!buf || !buf_len)
+ return;
+
+ ori_len = *buf_len;
+
+ if (*buf) {
+ *buf_len = 0;
+ kfree(*buf);
+ *buf = NULL;
+ }
+}
+
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
+{
+ u32 ori_len = 0, dup_len = 0;
+ u8 *ori = NULL;
+ u8 *dup = NULL;
+
+ if (!buf || !buf_len)
+ return;
+
+ if (!src || !src_len)
+ goto keep_ori;
+
+ /* duplicate src */
+ dup = rtw_malloc(src_len);
+ if (dup) {
+ dup_len = src_len;
+ memcpy(dup, src, dup_len);
+ }
+
+keep_ori:
+ ori = *buf;
+ ori_len = *buf_len;
+
+ /* replace buf with dup */
+ *buf_len = 0;
+ *buf = dup;
+ *buf_len = dup_len;
+
+ /* free ori */
+ if (ori && ori_len > 0)
+ kfree(ori);
+}
+
+
+/**
+ * rtw_cbuf_full - test if cbuf is full
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is full
+ */
+inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read-1)? true : false;
+}
+
+/**
+ * rtw_cbuf_empty - test if cbuf is empty
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is empty
+ */
+inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read)? true : false;
+}
+
+/**
+ * rtw_cbuf_push - push a pointer into cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ * @buf: pointer to push in
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: true push success
+ */
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
+{
+ if (rtw_cbuf_full(cbuf))
+ return _FAIL;
+
+ DBG_871X("%s on %u\n", __func__, cbuf->write);
+ cbuf->bufs[cbuf->write] = buf;
+ cbuf->write = (cbuf->write+1)%cbuf->size;
+
+ return _SUCCESS;
+}
+
+/**
+ * rtw_cbuf_pop - pop a pointer from cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: pointer popped out
+ */
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
+{
+ void *buf;
+ if (rtw_cbuf_empty(cbuf))
+ return NULL;
+
+ DBG_871X("%s on %u\n", __func__, cbuf->read);
+ buf = cbuf->bufs[cbuf->read];
+ cbuf->read = (cbuf->read+1)%cbuf->size;
+
+ return buf;
+}
+
+/**
+ * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
+ * @size: size of pointer
+ *
+ * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
+ */
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
+{
+ struct rtw_cbuf *cbuf;
+
+ cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void*)*size);
+
+ if (cbuf) {
+ cbuf->write = cbuf->read = 0;
+ cbuf->size = size;
+ }
+
+ return cbuf;
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
new file mode 100644
index 000000000000..e731ab4e2bd7
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -0,0 +1,365 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _RECV_OSDEP_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+
+void rtw_os_free_recvframe(union recv_frame *precvframe)
+{
+ if (precvframe->u.hdr.pkt)
+ {
+ dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
+
+ precvframe->u.hdr.pkt = NULL;
+ }
+}
+
+/* alloc os related resource in union recv_frame */
+int rtw_os_recv_resource_alloc(struct adapter *padapter, union recv_frame *precvframe)
+{
+ int res = _SUCCESS;
+
+ precvframe->u.hdr.pkt_newalloc = precvframe->u.hdr.pkt = NULL;
+
+ return res;
+}
+
+/* free os related resource in union recv_frame */
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
+{
+ sint i;
+ union recv_frame *precvframe;
+ precvframe = (union recv_frame*) precvpriv->precv_frame_buf;
+
+ for (i = 0; i < NR_RECVFRAME; i++)
+ {
+ if (precvframe->u.hdr.pkt)
+ {
+ dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
+ precvframe->u.hdr.pkt = NULL;
+ }
+ precvframe++;
+ }
+}
+
+/* free os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_free(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ int ret = _SUCCESS;
+
+ if (precvbuf->pskb)
+ {
+ dev_kfree_skb_any(precvbuf->pskb);
+ }
+ return ret;
+
+}
+
+_pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8 *pdata)
+{
+ u16 eth_type;
+ u8 *data_ptr;
+ _pkt *sub_skb;
+ struct rx_pkt_attrib *pattrib;
+
+ pattrib = &prframe->u.hdr.attrib;
+
+ sub_skb = rtw_skb_alloc(nSubframe_Length + 12);
+ if (sub_skb)
+ {
+ skb_reserve(sub_skb, 12);
+ data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
+ memcpy(data_ptr, (pdata + ETH_HLEN), nSubframe_Length);
+ }
+ else
+ {
+ sub_skb = rtw_skb_clone(prframe->u.hdr.pkt);
+ if (sub_skb)
+ {
+ sub_skb->data = pdata + ETH_HLEN;
+ sub_skb->len = nSubframe_Length;
+ skb_set_tail_pointer(sub_skb, nSubframe_Length);
+ }
+ else
+ {
+ DBG_871X("%s(): rtw_skb_clone() Fail!!!\n", __func__);
+ return NULL;
+ }
+ }
+
+ eth_type = RTW_GET_BE16(&sub_skb->data[6]);
+
+ if (sub_skb->len >= 8 &&
+ ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ skb_pull(sub_skb, SNAP_SIZE);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ } else {
+ __be16 len;
+ /* Leave Ethernet header part of hdr and full payload */
+ len = htons(sub_skb->len);
+ memcpy(skb_push(sub_skb, 2), &len, 2);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ }
+
+ return sub_skb;
+}
+
+void rtw_os_recv_indicate_pkt(struct adapter *padapter, _pkt *pkt, struct rx_pkt_attrib *pattrib)
+{
+ struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
+ int ret;
+
+ /* Indicat the packets to upper layer */
+ if (pkt) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ {
+ _pkt *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int bmcast = IS_MCAST(pattrib->dst);
+
+ /* DBG_871X("bmcast =%d\n", bmcast); */
+
+ if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN))
+ {
+ /* DBG_871X("not ap psta =%p, addr =%pM\n", psta, pattrib->dst); */
+
+ if (bmcast)
+ {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pskb2 = rtw_skb_clone(pkt);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->dst);
+ }
+
+ if (psta)
+ {
+ struct net_device *pnetdev = (struct net_device*)padapter->pnetdev;
+
+ /* DBG_871X("directly forwarding to the rtw_xmit_entry\n"); */
+
+ /* skb->ip_summed = CHECKSUM_NONE; */
+ pkt->dev = pnetdev;
+ skb_set_queue_mapping(pkt, rtw_recv_select_queue(pkt));
+
+ _rtw_xmit_entry(pkt, pnetdev);
+
+ if (bmcast && (pskb2 != NULL)) {
+ pkt = pskb2;
+ DBG_COUNTER(padapter->rx_logs.os_indicate_ap_mcast);
+ } else {
+ DBG_COUNTER(padapter->rx_logs.os_indicate_ap_forward);
+ return;
+ }
+ }
+ }
+ else/* to APself */
+ {
+ /* DBG_871X("to APSelf\n"); */
+ DBG_COUNTER(padapter->rx_logs.os_indicate_ap_self);
+ }
+ }
+
+ pkt->protocol = eth_type_trans(pkt, padapter->pnetdev);
+ pkt->dev = padapter->pnetdev;
+
+#ifdef CONFIG_TCP_CSUM_OFFLOAD_RX
+ if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) {
+ pkt->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ pkt->ip_summed = CHECKSUM_NONE;
+ }
+#else /* !CONFIG_TCP_CSUM_OFFLOAD_RX */
+ pkt->ip_summed = CHECKSUM_NONE;
+#endif /* CONFIG_TCP_CSUM_OFFLOAD_RX */
+
+ ret = rtw_netif_rx(padapter->pnetdev, pkt);
+ if (ret == NET_RX_SUCCESS)
+ DBG_COUNTER(padapter->rx_logs.os_netif_ok);
+ else
+ DBG_COUNTER(padapter->rx_logs.os_netif_err);
+ }
+}
+
+void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
+{
+ enum nl80211_key_type key_type = 0;
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv* pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ unsigned long cur_time = 0;
+
+ if (psecuritypriv->last_mic_err_time == 0)
+ {
+ psecuritypriv->last_mic_err_time = jiffies;
+ }
+ else
+ {
+ cur_time = jiffies;
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ)
+ {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ }
+ else
+ {
+ psecuritypriv->last_mic_err_time = jiffies;
+ }
+ }
+
+ if (bgroup)
+ {
+ key_type |= NL80211_KEYTYPE_GROUP;
+ }
+ else
+ {
+ key_type |= NL80211_KEYTYPE_PAIRWISE;
+ }
+
+ cfg80211_michael_mic_failure(padapter->pnetdev, (u8 *)&pmlmepriv->assoc_bssid[ 0 ], key_type, -1,
+ NULL, GFP_ATOMIC);
+
+ memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ {
+ ev.flags |= IW_MICFAILURE_GROUP;
+ }
+ else
+ {
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+ }
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[ 0 ], ETH_ALEN);
+
+ memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+}
+
+#ifdef CONFIG_AUTO_AP_MODE
+static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ _pkt *skb = precv_frame->u.hdr.pkt;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_info *psta = precv_frame->u.hdr.psta;
+
+ DBG_871X("eth rx: got eth_type = 0x%x\n", pattrib->eth_type);
+
+ if (psta && psta->isrc && psta->pid>0)
+ {
+ u16 rx_pid;
+
+ rx_pid = *(u16*)(skb->data+ETH_HLEN);
+
+ DBG_871X("eth rx(pid = 0x%x): sta("MAC_FMT") pid = 0x%x\n",
+ rx_pid, MAC_ARG(psta->hwaddr), psta->pid);
+
+ if (rx_pid == psta->pid)
+ {
+ int i;
+ u16 len = *(u16*)(skb->data+ETH_HLEN+2);
+ /* u16 ctrl_type = *(u16*)(skb->data+ETH_HLEN+4); */
+
+ /* DBG_871X("eth, RC: len = 0x%x, ctrl_type = 0x%x\n", len, ctrl_type); */
+ DBG_871X("eth, RC: len = 0x%x\n", len);
+
+ for (i = 0;i<len;i++)
+ DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+4+i));
+ /* DBG_871X("0x%x\n", *(skb->data+ETH_HLEN+6+i)); */
+
+ DBG_871X("eth, RC-end\n");
+ }
+
+ }
+
+}
+#endif /* CONFIG_AUTO_AP_MODE */
+
+int rtw_recv_indicatepkt(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *pfree_recv_queue;
+ _pkt *skb;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+
+ DBG_COUNTER(padapter->rx_logs.os_indicate);
+
+ precvpriv = &(padapter->recvpriv);
+ pfree_recv_queue = &(precvpriv->free_recv_queue);
+
+ skb = precv_frame->u.hdr.pkt;
+ if (skb == NULL)
+ {
+ RT_TRACE(_module_recv_osdep_c_, _drv_err_, ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
+ goto _recv_indicatepkt_drop;
+ }
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("rtw_recv_indicatepkt():skb != NULL !!!\n"));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head =%p precv_frame->hdr.rx_data =%p\n", precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("precv_frame->hdr.rx_tail =%p precv_frame->u.hdr.rx_end =%p precv_frame->hdr.len =%d\n", precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end, precv_frame->u.hdr.len));
+
+ skb->data = precv_frame->u.hdr.rx_data;
+
+ skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
+
+ skb->len = precv_frame->u.hdr.len;
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("\n skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len));
+
+#ifdef CONFIG_AUTO_AP_MODE
+ if (0x8899 == pattrib->eth_type)
+ {
+ rtw_os_ksocket_send(padapter, precv_frame);
+
+ /* goto _recv_indicatepkt_drop; */
+ }
+#endif /* CONFIG_AUTO_AP_MODE */
+
+ rtw_os_recv_indicate_pkt(padapter, skb, pattrib);
+
+ precv_frame->u.hdr.pkt = NULL; /* pointers to NULL before rtw_free_recvframe() */
+
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("\n rtw_recv_indicatepkt :after rtw_os_recv_indicate_pkt!!!!\n"));
+
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ /* enqueue back to free_recv_queue */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ DBG_COUNTER(padapter->rx_logs.os_indicate_err);
+ return _FAIL;
+}
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ struct adapter *padapter = preorder_ctrl->padapter;
+
+ _init_timer(&(preorder_ctrl->reordering_ctrl_timer), padapter->pnetdev, rtw_reordering_ctrl_timeout_handler, preorder_ctrl);
+
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
new file mode 100644
index 000000000000..92277457aba4
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -0,0 +1,787 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include "rtw_proc.h"
+
+#ifdef PROC_DEBUG
+
+static struct proc_dir_entry *rtw_proc = NULL;
+
+#define RTW_PROC_NAME "rtl8723bs"
+
+#define get_proc_net init_net.proc_net
+
+inline struct proc_dir_entry *rtw_proc_create_dir(const char *name, struct proc_dir_entry *parent, void *data)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_mkdir_data(name, S_IRUGO|S_IXUGO, parent, data);
+
+ return entry;
+}
+
+inline struct proc_dir_entry *rtw_proc_create_entry(const char *name, struct proc_dir_entry *parent,
+ const struct file_operations *fops, void *data)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_create_data(name, S_IFREG|S_IRUGO, parent, fops, data);
+
+ return entry;
+}
+
+static int proc_get_dummy(struct seq_file *m, void *v)
+{
+ return 0;
+}
+
+static int proc_get_drv_version(struct seq_file *m, void *v)
+{
+ dump_drv_version(m);
+ return 0;
+}
+
+static int proc_get_log_level(struct seq_file *m, void *v)
+{
+ dump_log_level(m);
+ return 0;
+}
+
+static ssize_t proc_set_log_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ char tmp[32];
+ int log_level;
+
+ if (count < 1)
+ return -EINVAL;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ sscanf(tmp, "%d ", &log_level);
+ if (log_level >= _drv_always_ && log_level <= _drv_debug_)
+ {
+ GlobalDebugLevel = log_level;
+ printk("%d\n", GlobalDebugLevel);
+ }
+ } else {
+ return -EFAULT;
+ }
+
+ return count;
+}
+
+/*
+* rtw_drv_proc:
+* init/deinit when register/unregister driver
+*/
+static const struct rtw_proc_hdl drv_proc_hdls [] = {
+ {"ver_info", proc_get_drv_version, NULL},
+ {"log_level", proc_get_log_level, proc_set_log_level},
+};
+
+static const int drv_proc_hdls_num = sizeof(drv_proc_hdls) / sizeof(struct rtw_proc_hdl);
+
+static int rtw_drv_proc_open(struct inode *inode, struct file *file)
+{
+ /* struct net_device *dev = proc_get_parent_data(inode); */
+ ssize_t index = (ssize_t)PDE_DATA(inode);
+ const struct rtw_proc_hdl *hdl = drv_proc_hdls+index;
+
+ return single_open(file, hdl->show, NULL);
+}
+
+static ssize_t rtw_drv_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
+{
+ ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
+ const struct rtw_proc_hdl *hdl = drv_proc_hdls+index;
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
+
+ if (write)
+ return write(file, buffer, count, pos, NULL);
+
+ return -EROFS;
+}
+
+static const struct file_operations rtw_drv_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rtw_drv_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = rtw_drv_proc_write,
+};
+
+int rtw_drv_proc_init(void)
+{
+ int ret = _FAIL;
+ ssize_t i;
+ struct proc_dir_entry *entry = NULL;
+
+ if (rtw_proc != NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ rtw_proc = rtw_proc_create_dir(RTW_PROC_NAME, get_proc_net, NULL);
+
+ if (rtw_proc == NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ for (i = 0;i<drv_proc_hdls_num;i++) {
+ entry = rtw_proc_create_entry(drv_proc_hdls[i].name, rtw_proc, &rtw_drv_proc_fops, (void *)i);
+ if (!entry) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+ }
+
+ ret = _SUCCESS;
+
+exit:
+ return ret;
+}
+
+void rtw_drv_proc_deinit(void)
+{
+ int i;
+
+ if (rtw_proc == NULL)
+ return;
+
+ for (i = 0;i<drv_proc_hdls_num;i++)
+ remove_proc_entry(drv_proc_hdls[i].name, rtw_proc);
+
+ remove_proc_entry(RTW_PROC_NAME, get_proc_net);
+ rtw_proc = NULL;
+}
+
+static int proc_get_sd_f0_reg_dump(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ sd_f0_reg_dump(m, adapter);
+
+ return 0;
+}
+
+static int proc_get_mac_reg_dump(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ mac_reg_dump(m, adapter);
+
+ return 0;
+}
+
+static int proc_get_bb_reg_dump(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ bb_reg_dump(m, adapter);
+
+ return 0;
+}
+
+static int proc_get_rf_reg_dump(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rf_reg_dump(m, adapter);
+
+ return 0;
+}
+static int proc_get_linked_info_dump(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (padapter)
+ DBG_871X_SEL_NL(m, "linked_info_dump :%s\n", (padapter->bLinkInfoDump)?"enable":"disable");
+
+ return 0;
+}
+
+static ssize_t proc_set_linked_info_dump(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ char tmp[2];
+ int mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (padapter)
+ {
+ /* padapter->bLinkInfoDump = mode; */
+ /* DBG_871X("linked_info_dump =%s\n", (padapter->bLinkInfoDump)?"enable":"disable"); */
+ linked_info_dump(padapter, mode);
+ }
+
+ }
+
+ return count;
+
+}
+
+static int proc_get_rx_info(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ /* Counts of packets whose seq_num is less than preorder_ctrl->indicate_seq, Ex delay, retransmission, redundant packets and so on */
+ DBG_871X_SEL_NL(m,"Counts of Packets Whose Seq_Num Less Than Reorder Control Seq_Num: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_drop_count);
+ /* How many times the Rx Reorder Timer is triggered. */
+ DBG_871X_SEL_NL(m,"Rx Reorder Time-out Trigger Counts: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_forced_indicate_count);
+ /* Total counts of packets loss */
+ DBG_871X_SEL_NL(m,"Rx Packet Loss Counts: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_loss_count);
+ DBG_871X_SEL_NL(m,"Duplicate Management Frame Drop Count: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_dup_mgt_frame_drop_count);
+ DBG_871X_SEL_NL(m,"AMPDU BA window shift Count: %llu\n", (unsigned long long)pdbgpriv->dbg_rx_ampdu_window_shift_cnt);
+ return 0;
+}
+
+
+static ssize_t proc_reset_rx_info(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+ char cmd[32];
+ if (buffer && !copy_from_user(cmd, buffer, sizeof(cmd))) {
+ if ('0' == cmd[0]) {
+ pdbgpriv->dbg_rx_ampdu_drop_count = 0;
+ pdbgpriv->dbg_rx_ampdu_forced_indicate_count = 0;
+ pdbgpriv->dbg_rx_ampdu_loss_count = 0;
+ pdbgpriv->dbg_rx_dup_mgt_frame_drop_count = 0;
+ pdbgpriv->dbg_rx_ampdu_window_shift_cnt = 0;
+ }
+ }
+
+ return count;
+}
+
+static int proc_get_cam(struct seq_file *m, void *v)
+{
+ return 0;
+}
+
+static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter;
+
+ char tmp[32];
+ char cmd[5];
+ u8 id;
+
+ adapter = (struct adapter *)rtw_netdev_priv(dev);
+ if (!adapter)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ /* c <id>: clear specific cam entry */
+ /* wfc <id>: write specific cam entry from cam cache */
+
+ int num = sscanf(tmp, "%4s %hhu", cmd, &id);
+
+ if (num < 2)
+ return count;
+
+ if (strcmp("c", cmd) == 0) {
+ _clear_cam_entry(adapter, id);
+ adapter->securitypriv.hw_decrypted = false; /* temporarily set this for TX path to use SW enc */
+ } else if (strcmp("wfc", cmd) == 0) {
+ write_cam_from_cache(adapter, id);
+ }
+ }
+
+ return count;
+}
+
+static int proc_get_cam_cache(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
+ u8 i;
+
+ DBG_871X_SEL_NL(m, "cam bitmap:0x%016llx\n", dvobj->cam_ctl.bitmap);
+
+ DBG_871X_SEL_NL(m, "%-2s %-6s %-17s %-32s %-3s %-7s"
+ /* %-2s %-2s %-4s %-5s" */
+ "\n"
+ , "id", "ctrl", "addr", "key", "kid", "type"
+ /* "MK", "GK", "MFB", "valid" */
+ );
+
+ for (i = 0;i<32;i++) {
+ if (dvobj->cam_cache[i].ctrl != 0)
+ DBG_871X_SEL_NL(m, "%2u 0x%04x "MAC_FMT" "KEY_FMT" %3u %-7s"
+ /* %2u %2u 0x%02x %5u" */
+ "\n", i
+ , dvobj->cam_cache[i].ctrl
+ , MAC_ARG(dvobj->cam_cache[i].mac)
+ , KEY_ARG(dvobj->cam_cache[i].key)
+ , (dvobj->cam_cache[i].ctrl)&0x03
+ , security_type_str(((dvobj->cam_cache[i].ctrl)>>2)&0x07)
+ /* ((dvobj->cam_cache[i].ctrl)>>5)&0x01 */
+ /* ((dvobj->cam_cache[i].ctrl)>>6)&0x01 */
+ /* ((dvobj->cam_cache[i].ctrl)>>8)&0x7f */
+ /* ((dvobj->cam_cache[i].ctrl)>>15)&0x01 */
+ );
+ }
+
+ return 0;
+}
+
+/*
+* rtw_adapter_proc:
+* init/deinit when register/unregister net_device
+*/
+static const struct rtw_proc_hdl adapter_proc_hdls [] = {
+ {"write_reg", proc_get_dummy, proc_set_write_reg},
+ {"read_reg", proc_get_read_reg, proc_set_read_reg},
+ {"fwstate", proc_get_fwstate, NULL},
+ {"sec_info", proc_get_sec_info, NULL},
+ {"mlmext_state", proc_get_mlmext_state, NULL},
+ {"qos_option", proc_get_qos_option, NULL},
+ {"ht_option", proc_get_ht_option, NULL},
+ {"rf_info", proc_get_rf_info, NULL},
+ {"survey_info", proc_get_survey_info, NULL},
+ {"ap_info", proc_get_ap_info, NULL},
+ {"adapter_state", proc_get_adapter_state, NULL},
+ {"trx_info", proc_get_trx_info, NULL},
+ {"rate_ctl", proc_get_rate_ctl, proc_set_rate_ctl},
+ {"cam", proc_get_cam, proc_set_cam},
+ {"cam_cache", proc_get_cam_cache, NULL},
+ {"suspend_info", proc_get_suspend_resume_info, NULL},
+ {"rx_info", proc_get_rx_info, proc_reset_rx_info},
+
+ {"roam_flags", proc_get_roam_flags, proc_set_roam_flags},
+ {"roam_param", proc_get_roam_param, proc_set_roam_param},
+ {"roam_tgt_addr", proc_get_dummy, proc_set_roam_tgt_addr},
+
+ {"sd_f0_reg_dump", proc_get_sd_f0_reg_dump, NULL},
+
+ {"fwdl_test_case", proc_get_dummy, proc_set_fwdl_test_case},
+ {"wait_hiq_empty", proc_get_dummy, proc_set_wait_hiq_empty},
+
+ {"mac_reg_dump", proc_get_mac_reg_dump, NULL},
+ {"bb_reg_dump", proc_get_bb_reg_dump, NULL},
+ {"rf_reg_dump", proc_get_rf_reg_dump, NULL},
+
+ {"all_sta_info", proc_get_all_sta_info, NULL},
+
+ {"rx_signal", proc_get_rx_signal, proc_set_rx_signal},
+ {"hw_info", proc_get_hw_status, NULL},
+
+ {"ht_enable", proc_get_ht_enable, proc_set_ht_enable},
+ {"bw_mode", proc_get_bw_mode, proc_set_bw_mode},
+ {"ampdu_enable", proc_get_ampdu_enable, proc_set_ampdu_enable},
+ {"rx_stbc", proc_get_rx_stbc, proc_set_rx_stbc},
+ {"rx_ampdu", proc_get_rx_ampdu, proc_set_rx_ampdu},
+
+ {"en_fwps", proc_get_en_fwps, proc_set_en_fwps},
+
+ /* path_rssi", proc_get_two_path_rssi, NULL}, */
+ {"rssi_disp", proc_get_rssi_disp, proc_set_rssi_disp},
+
+ {"btcoex_dbg", proc_get_btcoex_dbg, proc_set_btcoex_dbg},
+ {"btcoex", proc_get_btcoex_info, NULL},
+
+ {"linked_info_dump", proc_get_linked_info_dump, proc_set_linked_info_dump},
+#ifdef CONFIG_DBG_COUNTER
+ {"rx_logs", proc_get_rx_logs, NULL},
+ {"tx_logs", proc_get_tx_logs, NULL},
+ {"int_logs", proc_get_int_logs, NULL},
+#endif
+};
+
+static const int adapter_proc_hdls_num = sizeof(adapter_proc_hdls) / sizeof(struct rtw_proc_hdl);
+
+static int rtw_adapter_proc_open(struct inode *inode, struct file *file)
+{
+ ssize_t index = (ssize_t)PDE_DATA(inode);
+ const struct rtw_proc_hdl *hdl = adapter_proc_hdls+index;
+
+ return single_open(file, hdl->show, proc_get_parent_data(inode));
+}
+
+static ssize_t rtw_adapter_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
+{
+ ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
+ const struct rtw_proc_hdl *hdl = adapter_proc_hdls+index;
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
+
+ if (write)
+ return write(file, buffer, count, pos, ((struct seq_file *)file->private_data)->private);
+
+ return -EROFS;
+}
+
+static const struct file_operations rtw_adapter_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rtw_adapter_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = rtw_adapter_proc_write,
+};
+
+int proc_get_odm_dbg_comp(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rtw_odm_dbg_comp_msg(m, adapter);
+
+ return 0;
+}
+
+ssize_t proc_set_odm_dbg_comp(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+
+ u64 dbg_comp;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%llx", &dbg_comp);
+
+ if (num != 1)
+ return count;
+
+ rtw_odm_dbg_comp_set(adapter, dbg_comp);
+ }
+
+ return count;
+}
+
+int proc_get_odm_dbg_level(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rtw_odm_dbg_level_msg(m, adapter);
+
+ return 0;
+}
+
+ssize_t proc_set_odm_dbg_level(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+
+ u32 dbg_level;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%u", &dbg_level);
+
+ if (num != 1)
+ return count;
+
+ rtw_odm_dbg_level_set(adapter, dbg_level);
+ }
+
+ return count;
+}
+
+static int proc_get_odm_ability(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rtw_odm_ability_msg(m, adapter);
+
+ return 0;
+}
+
+static ssize_t proc_set_odm_ability(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+
+ u32 ability;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%x", &ability);
+
+ if (num != 1)
+ return count;
+
+ rtw_odm_ability_set(adapter, ability);
+ }
+
+ return count;
+}
+
+int proc_get_odm_adaptivity(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ rtw_odm_adaptivity_parm_msg(m, padapter);
+
+ return 0;
+}
+
+ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 TH_L2H_ini;
+ s8 TH_EDCCA_HL_diff;
+ u32 IGI_Base;
+ int ForceEDCCA;
+ u8 AdapEn_RSSI;
+ u8 IGI_LowerBound;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+
+ int num = sscanf(tmp, "%x %hhd %x %d %hhu %hhu",
+ &TH_L2H_ini, &TH_EDCCA_HL_diff, &IGI_Base, &ForceEDCCA, &AdapEn_RSSI, &IGI_LowerBound);
+
+ if (num != 6)
+ return count;
+
+ rtw_odm_adaptivity_parm_set(padapter, (s8)TH_L2H_ini, TH_EDCCA_HL_diff, (s8)IGI_Base, (bool)ForceEDCCA, AdapEn_RSSI, IGI_LowerBound);
+ }
+
+ return count;
+}
+
+/*
+* rtw_odm_proc:
+* init/deinit when register/unregister net_device, along with rtw_adapter_proc
+*/
+static const struct rtw_proc_hdl odm_proc_hdls [] = {
+ {"dbg_comp", proc_get_odm_dbg_comp, proc_set_odm_dbg_comp},
+ {"dbg_level", proc_get_odm_dbg_level, proc_set_odm_dbg_level},
+ {"ability", proc_get_odm_ability, proc_set_odm_ability},
+ {"adaptivity", proc_get_odm_adaptivity, proc_set_odm_adaptivity},
+};
+
+static const int odm_proc_hdls_num = sizeof(odm_proc_hdls) / sizeof(struct rtw_proc_hdl);
+
+static int rtw_odm_proc_open(struct inode *inode, struct file *file)
+{
+ ssize_t index = (ssize_t)PDE_DATA(inode);
+ const struct rtw_proc_hdl *hdl = odm_proc_hdls+index;
+
+ return single_open(file, hdl->show, proc_get_parent_data(inode));
+}
+
+static ssize_t rtw_odm_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
+{
+ ssize_t index = (ssize_t)PDE_DATA(file_inode(file));
+ const struct rtw_proc_hdl *hdl = odm_proc_hdls+index;
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *, void *) = hdl->write;
+
+ if (write)
+ return write(file, buffer, count, pos, ((struct seq_file *)file->private_data)->private);
+
+ return -EROFS;
+}
+
+static const struct file_operations rtw_odm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rtw_odm_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = rtw_odm_proc_write,
+};
+
+static struct proc_dir_entry *rtw_odm_proc_init(struct net_device *dev)
+{
+ struct proc_dir_entry *dir_odm = NULL;
+ struct proc_dir_entry *entry = NULL;
+ struct adapter *adapter = rtw_netdev_priv(dev);
+ ssize_t i;
+
+ if (adapter->dir_dev == NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ if (adapter->dir_odm != NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ dir_odm = rtw_proc_create_dir("odm", adapter->dir_dev, dev);
+ if (dir_odm == NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ adapter->dir_odm = dir_odm;
+
+ for (i = 0;i<odm_proc_hdls_num;i++) {
+ entry = rtw_proc_create_entry(odm_proc_hdls[i].name, dir_odm, &rtw_odm_proc_fops, (void *)i);
+ if (!entry) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+ }
+
+exit:
+ return dir_odm;
+}
+
+static void rtw_odm_proc_deinit(struct adapter *adapter)
+{
+ struct proc_dir_entry *dir_odm = NULL;
+ int i;
+
+ dir_odm = adapter->dir_odm;
+
+ if (dir_odm == NULL) {
+ rtw_warn_on(1);
+ return;
+ }
+
+ for (i = 0;i<odm_proc_hdls_num;i++)
+ remove_proc_entry(odm_proc_hdls[i].name, dir_odm);
+
+ remove_proc_entry("odm", adapter->dir_dev);
+
+ adapter->dir_odm = NULL;
+}
+
+struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev)
+{
+ struct proc_dir_entry *drv_proc = rtw_proc;
+ struct proc_dir_entry *dir_dev = NULL;
+ struct proc_dir_entry *entry = NULL;
+ struct adapter *adapter = rtw_netdev_priv(dev);
+ ssize_t i;
+
+ if (drv_proc == NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ if (adapter->dir_dev != NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ dir_dev = rtw_proc_create_dir(dev->name, drv_proc, dev);
+ if (dir_dev == NULL) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+
+ adapter->dir_dev = dir_dev;
+
+ for (i = 0;i<adapter_proc_hdls_num;i++) {
+ entry = rtw_proc_create_entry(adapter_proc_hdls[i].name, dir_dev, &rtw_adapter_proc_fops, (void *)i);
+ if (!entry) {
+ rtw_warn_on(1);
+ goto exit;
+ }
+ }
+
+ rtw_odm_proc_init(dev);
+
+exit:
+ return dir_dev;
+}
+
+void rtw_adapter_proc_deinit(struct net_device *dev)
+{
+ struct proc_dir_entry *drv_proc = rtw_proc;
+ struct proc_dir_entry *dir_dev = NULL;
+ struct adapter *adapter = rtw_netdev_priv(dev);
+ int i;
+
+ dir_dev = adapter->dir_dev;
+
+ if (dir_dev == NULL) {
+ rtw_warn_on(1);
+ return;
+ }
+
+ for (i = 0;i<adapter_proc_hdls_num;i++)
+ remove_proc_entry(adapter_proc_hdls[i].name, dir_dev);
+
+ rtw_odm_proc_deinit(adapter);
+
+ remove_proc_entry(dev->name, drv_proc);
+
+ adapter->dir_dev = NULL;
+}
+
+void rtw_adapter_proc_replace(struct net_device *dev)
+{
+ struct proc_dir_entry *drv_proc = rtw_proc;
+ struct proc_dir_entry *dir_dev = NULL;
+ struct adapter *adapter = rtw_netdev_priv(dev);
+ int i;
+
+ dir_dev = adapter->dir_dev;
+
+ if (dir_dev == NULL) {
+ rtw_warn_on(1);
+ return;
+ }
+
+ for (i = 0;i<adapter_proc_hdls_num;i++)
+ remove_proc_entry(adapter_proc_hdls[i].name, dir_dev);
+
+ rtw_odm_proc_deinit(adapter);
+
+ remove_proc_entry(adapter->old_ifname, drv_proc);
+
+ adapter->dir_dev = NULL;
+
+ rtw_adapter_proc_init(dev);
+
+}
+
+#endif /* PROC_DEBUG */
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.h b/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
new file mode 100644
index 000000000000..f633663fa790
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.h
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#ifndef __RTW_PROC_H__
+#define __RTW_PROC_H__
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+struct rtw_proc_hdl {
+ char *name;
+ int (*show)(struct seq_file *, void *);
+ ssize_t (*write)(struct file *file, const char __user *buffer, size_t count, loff_t *pos, void *data);
+};
+
+#ifdef PROC_DEBUG
+
+int rtw_drv_proc_init(void);
+void rtw_drv_proc_deinit(void);
+struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev);
+void rtw_adapter_proc_deinit(struct net_device *dev);
+void rtw_adapter_proc_replace(struct net_device *dev);
+
+#else //!PROC_DEBUG
+
+static inline int rtw_drv_proc_init(void) {return 0;}
+static inline void rtw_drv_proc_deinit(void) {}
+static inline struct proc_dir_entry *rtw_adapter_proc_init(struct net_device *dev){return NULL;}
+static inline void rtw_adapter_proc_deinit(struct net_device *dev){}
+static inline void rtw_adapter_proc_replace(struct net_device *dev){}
+
+#endif //!PROC_DEBUG
+
+#endif //__RTW_PROC_H__
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
new file mode 100644
index 000000000000..d2fb489d2e83
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -0,0 +1,695 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _HCI_INTF_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+#include <linux/jiffies.h>
+
+#ifndef dev_to_sdio_func
+#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
+#endif
+
+static const struct sdio_device_id sdio_ids[] =
+{
+ { SDIO_DEVICE(0x024c, 0x0523), },
+ { SDIO_DEVICE(0x024c, 0x0623), },
+ { SDIO_DEVICE(0x024c, 0x0626), },
+ { SDIO_DEVICE(0x024c, 0xb723), },
+ { /* end: all zeroes */ },
+};
+static const struct acpi_device_id acpi_ids[] = {
+ {"OBDA8723", 0x0000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(sdio, sdio_ids);
+MODULE_DEVICE_TABLE(acpi, acpi_ids);
+
+static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
+static void rtw_dev_remove(struct sdio_func *func);
+static int rtw_sdio_resume(struct device *dev);
+static int rtw_sdio_suspend(struct device *dev);
+
+static const struct dev_pm_ops rtw_sdio_pm_ops = {
+ .suspend = rtw_sdio_suspend,
+ .resume = rtw_sdio_resume,
+};
+
+struct sdio_drv_priv {
+ struct sdio_driver r871xs_drv;
+ int drv_registered;
+};
+
+static struct sdio_drv_priv sdio_drvpriv = {
+ .r871xs_drv.probe = rtw_drv_init,
+ .r871xs_drv.remove = rtw_dev_remove,
+ .r871xs_drv.name = "rtl8723bs",
+ .r871xs_drv.id_table = sdio_ids,
+ .r871xs_drv.drv = {
+ .pm = &rtw_sdio_pm_ops,
+ }
+};
+
+static void sd_sync_int_hdl(struct sdio_func *func)
+{
+ struct dvobj_priv *psdpriv;
+
+
+ psdpriv = sdio_get_drvdata(func);
+
+ if (!psdpriv->if1) {
+ DBG_871X("%s if1 == NULL\n", __func__);
+ return;
+ }
+
+ rtw_sdio_set_irq_thd(psdpriv, current);
+ sd_int_hdl(psdpriv->if1);
+ rtw_sdio_set_irq_thd(psdpriv, NULL);
+}
+
+static int sdio_alloc_irq(struct dvobj_priv *dvobj)
+{
+ PSDIO_DATA psdio_data;
+ struct sdio_func *func;
+ int err;
+
+ psdio_data = &dvobj->intf_data;
+ func = psdio_data->func;
+
+ sdio_claim_host(func);
+
+ err = sdio_claim_irq(func, &sd_sync_int_hdl);
+ if (err)
+ {
+ dvobj->drv_dbg.dbg_sdio_alloc_irq_error_cnt++;
+ printk(KERN_CRIT "%s: sdio_claim_irq FAIL(%d)!\n", __func__, err);
+ }
+ else
+ {
+ dvobj->drv_dbg.dbg_sdio_alloc_irq_cnt++;
+ dvobj->irq_alloc = 1;
+ }
+
+ sdio_release_host(func);
+
+ return err?_FAIL:_SUCCESS;
+}
+
+static void sdio_free_irq(struct dvobj_priv *dvobj)
+{
+ PSDIO_DATA psdio_data;
+ struct sdio_func *func;
+ int err;
+
+ if (dvobj->irq_alloc) {
+ psdio_data = &dvobj->intf_data;
+ func = psdio_data->func;
+
+ if (func) {
+ sdio_claim_host(func);
+ err = sdio_release_irq(func);
+ if (err)
+ {
+ dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++;
+ DBG_871X_LEVEL(_drv_err_,"%s: sdio_release_irq FAIL(%d)!\n", __func__, err);
+ }
+ else
+ dvobj->drv_dbg.dbg_sdio_free_irq_cnt++;
+ sdio_release_host(func);
+ }
+ dvobj->irq_alloc = 0;
+ }
+}
+
+#ifdef CONFIG_GPIO_WAKEUP
+extern unsigned int oob_irq;
+static irqreturn_t gpio_hostwakeup_irq_thread(int irq, void *data)
+{
+ struct adapter *padapter = (struct adapter *)data;
+ DBG_871X_LEVEL(_drv_always_, "gpio_hostwakeup_irq_thread\n");
+ /* Disable interrupt before calling handler */
+ /* disable_irq_nosync(oob_irq); */
+ rtw_lock_suspend_timeout(HZ/2);
+ return IRQ_HANDLED;
+}
+
+static u8 gpio_hostwakeup_alloc_irq(struct adapter *padapter)
+{
+ int err;
+ if (oob_irq == 0) {
+ DBG_871X("oob_irq ZERO!\n");
+ return _FAIL;
+ }
+ /* dont set it IRQF_TRIGGER_LOW, or wowlan */
+ /* power is high after suspend */
+ /* and failing can prevent can not sleep issue if */
+ /* wifi gpio12 pin is not linked with CPU */
+ err = request_threaded_irq(oob_irq, gpio_hostwakeup_irq_thread, NULL,
+ /* IRQF_TRIGGER_LOW | IRQF_ONESHOT, */
+ IRQF_TRIGGER_FALLING,
+ "rtw_wifi_gpio_wakeup", padapter);
+ if (err < 0) {
+ DBG_871X("Oops: can't allocate gpio irq %d err:%d\n", oob_irq, err);
+ return false;
+ } else {
+ DBG_871X("allocate gpio irq %d ok\n", oob_irq);
+ }
+
+ enable_irq_wake(oob_irq);
+ return _SUCCESS;
+}
+
+static void gpio_hostwakeup_free_irq(struct adapter *padapter)
+{
+ if (oob_irq == 0)
+ return;
+
+ disable_irq_wake(oob_irq);
+ free_irq(oob_irq, padapter);
+}
+#endif
+
+static u32 sdio_init(struct dvobj_priv *dvobj)
+{
+ PSDIO_DATA psdio_data;
+ struct sdio_func *func;
+ int err;
+
+ psdio_data = &dvobj->intf_data;
+ func = psdio_data->func;
+
+ /* 3 1. init SDIO bus */
+ sdio_claim_host(func);
+
+ err = sdio_enable_func(func);
+ if (err) {
+ dvobj->drv_dbg.dbg_sdio_init_error_cnt++;
+ DBG_8192C(KERN_CRIT "%s: sdio_enable_func FAIL(%d)!\n", __func__, err);
+ goto release;
+ }
+
+ err = sdio_set_block_size(func, 512);
+ if (err) {
+ dvobj->drv_dbg.dbg_sdio_init_error_cnt++;
+ DBG_8192C(KERN_CRIT "%s: sdio_set_block_size FAIL(%d)!\n", __func__, err);
+ goto release;
+ }
+ psdio_data->block_transfer_len = 512;
+ psdio_data->tx_block_mode = 1;
+ psdio_data->rx_block_mode = 1;
+
+release:
+ sdio_release_host(func);
+
+ if (err)
+ return _FAIL;
+ return _SUCCESS;
+}
+
+static void sdio_deinit(struct dvobj_priv *dvobj)
+{
+ struct sdio_func *func;
+ int err;
+
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("+sdio_deinit\n"));
+
+ func = dvobj->intf_data.func;
+
+ if (func) {
+ sdio_claim_host(func);
+ err = sdio_disable_func(func);
+ if (err)
+ {
+ dvobj->drv_dbg.dbg_sdio_deinit_error_cnt++;
+ DBG_8192C(KERN_ERR "%s: sdio_disable_func(%d)\n", __func__, err);
+ }
+
+ if (dvobj->irq_alloc) {
+ err = sdio_release_irq(func);
+ if (err)
+ {
+ dvobj->drv_dbg.dbg_sdio_free_irq_error_cnt++;
+ DBG_8192C(KERN_ERR "%s: sdio_release_irq(%d)\n", __func__, err);
+ }
+ else
+ dvobj->drv_dbg.dbg_sdio_free_irq_cnt++;
+ }
+
+ sdio_release_host(func);
+ }
+}
+static struct dvobj_priv *sdio_dvobj_init(struct sdio_func *func)
+{
+ int status = _FAIL;
+ struct dvobj_priv *dvobj = NULL;
+ PSDIO_DATA psdio;
+
+ dvobj = devobj_init();
+ if (dvobj == NULL) {
+ goto exit;
+ }
+
+ sdio_set_drvdata(func, dvobj);
+
+ psdio = &dvobj->intf_data;
+ psdio->func = func;
+
+ if (sdio_init(dvobj) != _SUCCESS) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: initialize SDIO Failed!\n", __func__));
+ goto free_dvobj;
+ }
+ rtw_reset_continual_io_error(dvobj);
+ status = _SUCCESS;
+
+free_dvobj:
+ if (status != _SUCCESS && dvobj) {
+ sdio_set_drvdata(func, NULL);
+
+ devobj_deinit(dvobj);
+
+ dvobj = NULL;
+ }
+exit:
+ return dvobj;
+}
+
+static void sdio_dvobj_deinit(struct sdio_func *func)
+{
+ struct dvobj_priv *dvobj = sdio_get_drvdata(func);
+
+ sdio_set_drvdata(func, NULL);
+ if (dvobj) {
+ sdio_deinit(dvobj);
+ devobj_deinit(dvobj);
+ }
+ return;
+}
+
+void rtw_set_hal_ops(struct adapter *padapter)
+{
+ /* alloc memory for HAL DATA */
+ rtw_hal_data_init(padapter);
+
+ rtl8723bs_set_hal_ops(padapter);
+}
+
+static void sd_intf_start(struct adapter *padapter)
+{
+ if (padapter == NULL) {
+ DBG_8192C(KERN_ERR "%s: padapter is NULL!\n", __func__);
+ return;
+ }
+
+ /* hal dep */
+ rtw_hal_enable_interrupt(padapter);
+}
+
+static void sd_intf_stop(struct adapter *padapter)
+{
+ if (padapter == NULL) {
+ DBG_8192C(KERN_ERR "%s: padapter is NULL!\n", __func__);
+ return;
+ }
+
+ /* hal dep */
+ rtw_hal_disable_interrupt(padapter);
+}
+
+
+static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct sdio_device_id *pdid)
+{
+ int status = _FAIL;
+ struct net_device *pnetdev;
+ struct adapter *padapter = NULL;
+ PSDIO_DATA psdio = &dvobj->intf_data;
+
+ padapter = (struct adapter *)vzalloc(sizeof(*padapter));
+ if (padapter == NULL) {
+ goto exit;
+ }
+
+ padapter->dvobj = dvobj;
+ dvobj->if1 = padapter;
+
+ padapter->bDriverStopped =true;
+
+ dvobj->padapters = padapter;
+ padapter->iface_id = 0;
+
+ /* 3 1. init network device data */
+ pnetdev = rtw_init_netdev(padapter);
+ if (!pnetdev)
+ goto free_adapter;
+
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
+
+ padapter = rtw_netdev_priv(pnetdev);
+
+ rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
+ /* 3 3. init driver special setting, interface, OS and hardware relative */
+
+ /* 4 3.1 set hardware operation functions */
+ rtw_set_hal_ops(padapter);
+
+
+ /* 3 5. initialize Chip version */
+ padapter->intf_start = &sd_intf_start;
+ padapter->intf_stop = &sd_intf_stop;
+
+ padapter->intf_init = &sdio_init;
+ padapter->intf_deinit = &sdio_deinit;
+ padapter->intf_alloc_irq = &sdio_alloc_irq;
+ padapter->intf_free_irq = &sdio_free_irq;
+
+ if (rtw_init_io_priv(padapter, sdio_set_intf_ops) == _FAIL)
+ {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("rtw_drv_init: Can't init io_priv\n"));
+ goto free_hal_data;
+ }
+
+ rtw_hal_read_chip_version(padapter);
+
+ rtw_hal_chip_configure(padapter);
+
+ rtw_btcoex_Initialize(padapter);
+
+ /* 3 6. read efuse/eeprom data */
+ rtw_hal_read_chip_info(padapter);
+
+ /* 3 7. init driver common data */
+ if (rtw_init_drv_sw(padapter) == _FAIL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("rtw_drv_init: Initialize driver software resource Failed!\n"));
+ goto free_hal_data;
+ }
+
+ /* 3 8. get WLan MAC address */
+ /* set mac addr */
+ rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
+
+ rtw_hal_disable_interrupt(padapter);
+
+ DBG_871X("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
+ , padapter->bDriverStopped
+ , padapter->bSurpriseRemoved
+ , padapter->bup
+ , padapter->hw_init_completed
+ );
+
+ status = _SUCCESS;
+
+free_hal_data:
+ if (status != _SUCCESS && padapter->HalData)
+ kfree(padapter->HalData);
+
+ if (status != _SUCCESS) {
+ rtw_wdev_unregister(padapter->rtw_wdev);
+ rtw_wdev_free(padapter->rtw_wdev);
+ }
+
+free_adapter:
+ if (status != _SUCCESS) {
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+ else
+ vfree((u8 *)padapter);
+ padapter = NULL;
+ }
+exit:
+ return padapter;
+}
+
+static void rtw_sdio_if1_deinit(struct adapter *if1)
+{
+ struct net_device *pnetdev = if1->pnetdev;
+ struct mlme_priv *pmlmepriv = &if1->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ rtw_disassoc_cmd(if1, 0, false);
+
+ free_mlme_ap_info(if1);
+
+#ifdef CONFIG_GPIO_WAKEUP
+ gpio_hostwakeup_free_irq(if1);
+#endif
+
+ rtw_cancel_all_timer(if1);
+
+#ifdef CONFIG_WOWLAN
+ adapter_to_pwrctl(if1)->wowlan_mode =false;
+ DBG_871X_LEVEL(_drv_always_, "%s wowlan_mode:%d\n", __func__, adapter_to_pwrctl(if1)->wowlan_mode);
+#endif /* CONFIG_WOWLAN */
+
+ rtw_dev_unload(if1);
+ DBG_871X("+r871xu_dev_remove, hw_init_completed =%d\n", if1->hw_init_completed);
+
+ if (if1->rtw_wdev) {
+ rtw_wdev_free(if1->rtw_wdev);
+ }
+
+ rtw_free_drv_sw(if1);
+
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+}
+
+/*
+ * drv_init() - a device potentially for us
+ *
+ * notes: drv_init() is called when the bus driver has located a card for us to support.
+ * We accept the new device by returning 0.
+ */
+static int rtw_drv_init(
+ struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int status = _FAIL;
+ struct adapter *if1 = NULL, *if2 = NULL;
+ struct dvobj_priv *dvobj;
+
+ dvobj = sdio_dvobj_init(func);
+ if (dvobj == NULL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("initialize device object priv Failed!\n"));
+ goto exit;
+ }
+
+ if1 = rtw_sdio_if1_init(dvobj, id);
+ if (if1 == NULL) {
+ DBG_871X("rtw_init_primarystruct adapter Failed!\n");
+ goto free_dvobj;
+ }
+
+ /* dev_alloc_name && register_netdev */
+ status = rtw_drv_register_netdev(if1);
+ if (status != _SUCCESS) {
+ goto free_if2;
+ }
+
+ if (sdio_alloc_irq(dvobj) != _SUCCESS)
+ goto free_if2;
+
+#ifdef CONFIG_GPIO_WAKEUP
+ gpio_hostwakeup_alloc_irq(if1);
+#endif
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
+
+ rtw_ndev_notifier_register();
+ status = _SUCCESS;
+
+free_if2:
+ if (status != _SUCCESS && if2) {
+ }
+ if (status != _SUCCESS && if1) {
+ rtw_sdio_if1_deinit(if1);
+ }
+free_dvobj:
+ if (status != _SUCCESS)
+ sdio_dvobj_deinit(func);
+exit:
+ return status == _SUCCESS?0:-ENODEV;
+}
+
+static void rtw_dev_remove(struct sdio_func *func)
+{
+ struct dvobj_priv *dvobj = sdio_get_drvdata(func);
+ struct adapter *padapter = dvobj->if1;
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("+rtw_dev_remove\n"));
+
+ dvobj->processing_dev_remove = true;
+
+ rtw_unregister_netdevs(dvobj);
+
+ if (padapter->bSurpriseRemoved == false) {
+ int err;
+
+ /* test surprise remove */
+ sdio_claim_host(func);
+ sdio_readb(func, 0, &err);
+ sdio_release_host(func);
+ if (err == -ENOMEDIUM) {
+ padapter->bSurpriseRemoved = true;
+ DBG_871X(KERN_NOTICE "%s: device had been removed!\n", __func__);
+ }
+ }
+
+ rtw_ps_deny(padapter, PS_DENY_DRV_REMOVE);
+
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ LeaveAllPowerSaveMode(padapter);
+
+ rtw_btcoex_HaltNotify(padapter);
+
+ rtw_sdio_if1_deinit(padapter);
+
+ sdio_dvobj_deinit(func);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("-rtw_dev_remove\n"));
+}
+
+extern int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+extern int pm_netdev_close(struct net_device *pnetdev, u8 bnormal);
+
+static int rtw_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func =dev_to_sdio_func(dev);
+ struct dvobj_priv *psdpriv = sdio_get_drvdata(func);
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
+ struct adapter *padapter = psdpriv->if1;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ if (padapter->bDriverStopped == true)
+ {
+ DBG_871X("%s bDriverStopped = %d\n", __func__, padapter->bDriverStopped);
+ return 0;
+ }
+
+ if (pwrpriv->bInSuspend == true)
+ {
+ DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
+ pdbgpriv->dbg_suspend_error_cnt++;
+ return 0;
+ }
+
+ return rtw_suspend_common(padapter);
+}
+
+static int rtw_resume_process(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
+ struct dvobj_priv *psdpriv = padapter->dvobj;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ if (pwrpriv->bInSuspend == false)
+ {
+ pdbgpriv->dbg_resume_error_cnt++;
+ DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
+ return -1;
+ }
+
+ return rtw_resume_common(padapter);
+}
+
+static int rtw_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func =dev_to_sdio_func(dev);
+ struct dvobj_priv *psdpriv = sdio_get_drvdata(func);
+ struct pwrctrl_priv *pwrpriv = dvobj_to_pwrctl(psdpriv);
+ struct adapter *padapter = psdpriv->if1;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int ret = 0;
+ struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
+
+ DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ pdbgpriv->dbg_resume_cnt++;
+
+ if (pwrpriv->bInternalAutoSuspend)
+ {
+ ret = rtw_resume_process(padapter);
+ }
+ else
+ {
+ if (pwrpriv->wowlan_mode || pwrpriv->wowlan_ap_mode)
+ {
+ ret = rtw_resume_process(padapter);
+ }
+ else
+ {
+ ret = rtw_resume_process(padapter);
+ }
+ }
+ pmlmeext->last_scan_time = jiffies;
+ DBG_871X("<======== %s return %d\n", __func__, ret);
+ return ret;
+
+}
+
+static int __init rtw_drv_entry(void)
+{
+ int ret = 0;
+
+ DBG_871X_LEVEL(_drv_always_, "module init start\n");
+ dump_drv_version(RTW_DBGDUMP);
+#ifdef BTCOEXVERSION
+ DBG_871X_LEVEL(_drv_always_, "rtl8723bs BT-Coex version = %s\n", BTCOEXVERSION);
+#endif /* BTCOEXVERSION */
+
+ sdio_drvpriv.drv_registered = true;
+ rtw_drv_proc_init();
+
+ ret = sdio_register_driver(&sdio_drvpriv.r871xs_drv);
+ if (ret != 0)
+ {
+ sdio_drvpriv.drv_registered = false;
+ rtw_drv_proc_deinit();
+ rtw_ndev_notifier_unregister();
+ DBG_871X("%s: register driver failed!!(%d)\n", __func__, ret);
+ goto exit;
+ }
+
+ goto exit;
+
+exit:
+ DBG_871X_LEVEL(_drv_always_, "module init ret =%d\n", ret);
+ return ret;
+}
+
+static void __exit rtw_drv_halt(void)
+{
+ DBG_871X_LEVEL(_drv_always_, "module exit start\n");
+
+ sdio_drvpriv.drv_registered = false;
+
+ sdio_unregister_driver(&sdio_drvpriv.r871xs_drv);
+
+ rtw_drv_proc_deinit();
+ rtw_ndev_notifier_unregister();
+
+ DBG_871X_LEVEL(_drv_always_, "module exit success\n");
+
+ rtw_mstat_dump(RTW_DBGDUMP);
+}
+
+
+module_init(rtw_drv_entry);
+module_exit(rtw_drv_halt);
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
new file mode 100644
index 000000000000..33f0f83b002d
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c
@@ -0,0 +1,599 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *******************************************************************************/
+#define _SDIO_OPS_LINUX_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+static bool rtw_sdio_claim_host_needed(struct sdio_func *func)
+{
+ struct dvobj_priv *dvobj = sdio_get_drvdata(func);
+ PSDIO_DATA sdio_data = &dvobj->intf_data;
+
+ if (sdio_data->sys_sdio_irq_thd && sdio_data->sys_sdio_irq_thd == current)
+ return false;
+ return true;
+}
+
+inline void rtw_sdio_set_irq_thd(struct dvobj_priv *dvobj, void *thd_hdl)
+{
+ PSDIO_DATA sdio_data = &dvobj->intf_data;
+
+ sdio_data->sys_sdio_irq_thd = thd_hdl;
+}
+
+u8 sd_f0_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ u8 v = 0;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return v;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ v = sdio_f0_readb(func, addr, err);
+ if (claim_needed)
+ sdio_release_host(func);
+ if (err && *err)
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, *err, addr);
+ return v;
+}
+
+/*
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 _sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ int err = 0, i;
+ struct sdio_func *func;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+
+ for (i = 0; i < cnt; i++) {
+ pdata[i] = sdio_readb(func, addr+i, &err);
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, err, addr+i);
+ break;
+ }
+ }
+ return err;
+}
+
+/*
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 sd_cmd52_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ int err = 0;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ err = _sd_cmd52_read(pintfhdl, addr, cnt, pdata);
+ if (claim_needed)
+ sdio_release_host(func);
+ return err;
+}
+
+/*
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 _sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ int err = 0, i;
+ struct sdio_func *func;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+
+ for (i = 0; i < cnt; i++) {
+ sdio_writeb(func, pdata[i], addr+i, &err);
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__, err, addr+i, pdata[i]);
+ break;
+ }
+ }
+ return err;
+}
+
+/*
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 sd_cmd52_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ int err = 0;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ err = _sd_cmd52_write(pintfhdl, addr, cnt, pdata);
+ if (claim_needed)
+ sdio_release_host(func);
+ return err;
+}
+
+u8 sd_read8(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ u8 v = 0;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return v;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ v = sdio_readb(func, addr, err);
+ if (claim_needed)
+ sdio_release_host(func);
+ if (err && *err)
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, *err, addr);
+ return v;
+}
+
+u32 sd_read32(struct intf_hdl *pintfhdl, u32 addr, s32 *err)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+ u32 v = 0;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return v;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ v = sdio_readl(func, addr, err);
+ if (claim_needed)
+ sdio_release_host(func);
+
+ if (err && *err)
+ {
+ int i;
+
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x\n", __func__, *err, addr, v);
+
+ *err = 0;
+ for (i = 0; i<SD_IO_TRY_CNT; i++)
+ {
+ if (claim_needed) sdio_claim_host(func);
+ v = sdio_readl(func, addr, err);
+ if (claim_needed) sdio_release_host(func);
+
+ if (*err == 0) {
+ rtw_reset_continual_io_error(psdiodev);
+ break;
+ } else {
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
+ if ((-ESHUTDOWN == *err) || (-ENODEV == *err)) {
+ padapter->bSurpriseRemoved = true;
+ }
+
+ if (rtw_inc_and_chk_continual_io_error(psdiodev) == true) {
+ padapter->bSurpriseRemoved = true;
+ break;
+ }
+ }
+ }
+
+ if (i ==SD_IO_TRY_CNT)
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
+ else
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
+
+ }
+ return v;
+}
+
+void sd_write8(struct intf_hdl *pintfhdl, u32 addr, u8 v, s32 *err)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return ;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ sdio_writeb(func, v, addr, err);
+ if (claim_needed)
+ sdio_release_host(func);
+ if (err && *err)
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__, *err, addr, v);
+}
+
+void sd_write32(struct intf_hdl *pintfhdl, u32 addr, u32 v, s32 *err)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+ struct sdio_func *func;
+ bool claim_needed;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return ;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ sdio_writel(func, v, addr, err);
+ if (claim_needed)
+ sdio_release_host(func);
+
+ if (err && *err)
+ {
+ int i;
+
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x val = 0x%08x\n", __func__, *err, addr, v);
+
+ *err = 0;
+ for (i = 0; i<SD_IO_TRY_CNT; i++)
+ {
+ if (claim_needed) sdio_claim_host(func);
+ sdio_writel(func, v, addr, err);
+ if (claim_needed) sdio_release_host(func);
+ if (*err == 0) {
+ rtw_reset_continual_io_error(psdiodev);
+ break;
+ } else {
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x, val = 0x%x, try_cnt =%d\n", __func__, *err, addr, v, i);
+ if ((-ESHUTDOWN == *err) || (-ENODEV == *err)) {
+ padapter->bSurpriseRemoved = true;
+ }
+
+ if (rtw_inc_and_chk_continual_io_error(psdiodev) == true) {
+ padapter->bSurpriseRemoved = true;
+ break;
+ }
+ }
+ }
+
+ if (i ==SD_IO_TRY_CNT)
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%08x, try_cnt =%d\n", __func__, *err, addr, v, i);
+ else
+ DBG_871X(KERN_ERR "%s: (%d) addr = 0x%05x val = 0x%08x, try_cnt =%d\n", __func__, *err, addr, v, i);
+ }
+}
+
+/*
+ * Use CMD53 to read data from SDIO device.
+ * This function MUST be called after sdio_claim_host() or
+ * in SDIO ISR(host had been claimed).
+ *
+ * Parameters:
+ *psdio pointer of SDIO_DATA
+ *addr address to read
+ *cnt amount to read
+ *pdata pointer to put data, this should be a "DMA:able scratch buffer"!
+ *
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 _sd_read(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ int err = -EPERM;
+ struct sdio_func *func;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+
+ if (unlikely((cnt == 1) || (cnt ==2)))
+ {
+ int i;
+ u8 *pbuf = (u8 *)pdata;
+
+ for (i = 0; i < cnt; i++)
+ {
+ *(pbuf+i) = sdio_readb(func, addr+i, &err);
+
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x\n", __func__, err, addr);
+ break;
+ }
+ }
+ return err;
+ }
+
+ err = sdio_memcpy_fromio(func, pdata, addr, cnt);
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL(%d)! ADDR =%#x Size =%d\n", __func__, err, addr, cnt);
+ }
+ return err;
+}
+
+/*
+ * Use CMD53 to read data from SDIO device.
+ *
+ * Parameters:
+ *psdio pointer of SDIO_DATA
+ *addr address to read
+ *cnt amount to read
+ *pdata pointer to put data, this should be a "DMA:able scratch buffer"!
+ *
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 sd_read(struct intf_hdl * pintfhdl, u32 addr, u32 cnt, void *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ struct sdio_func *func;
+ bool claim_needed;
+ s32 err = -EPERM;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ err = _sd_read(pintfhdl, addr, cnt, pdata);
+ if (claim_needed)
+ sdio_release_host(func);
+ return err;
+}
+
+/*
+ * Use CMD53 to write data to SDIO device.
+ * This function MUST be called after sdio_claim_host() or
+ * in SDIO ISR(host had been claimed).
+ *
+ * Parameters:
+ *psdio pointer of SDIO_DATA
+ *addr address to write
+ *cnt amount to write
+ *pdata data pointer, this should be a "DMA:able scratch buffer"!
+ *
+ * Return:
+ *0 Success
+ *others Fail
+ */
+s32 _sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+
+ struct sdio_func *func;
+ u32 size;
+ s32 err =-EPERM;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+/* size = sdio_align_size(func, cnt); */
+
+ if (unlikely((cnt == 1) || (cnt ==2)))
+ {
+ int i;
+ u8 *pbuf = (u8 *)pdata;
+
+ for (i = 0; i < cnt; i++)
+ {
+ sdio_writeb(func, *(pbuf+i), addr+i, &err);
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL!(%d) addr = 0x%05x val = 0x%02x\n", __func__, err, addr, *(pbuf+i));
+ break;
+ }
+ }
+
+ return err;
+ }
+
+ size = cnt;
+ err = sdio_memcpy_toio(func, addr, pdata, size);
+ if (err) {
+ DBG_871X(KERN_ERR "%s: FAIL(%d)! ADDR =%#x Size =%d(%d)\n", __func__, err, addr, cnt, size);
+ }
+ return err;
+}
+
+/*
+ * Use CMD53 to write data to SDIO device.
+ *
+ * Parameters:
+ * psdio pointer of SDIO_DATA
+ * addr address to write
+ * cnt amount to write
+ * pdata data pointer, this should be a "DMA:able scratch buffer"!
+ *
+ * Return:
+ * 0 Success
+ * others Fail
+ */
+s32 sd_write(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, void *pdata)
+{
+ struct adapter *padapter;
+ struct dvobj_priv *psdiodev;
+ PSDIO_DATA psdio;
+ struct sdio_func *func;
+ bool claim_needed;
+ s32 err =-EPERM;
+
+ padapter = pintfhdl->padapter;
+ psdiodev = pintfhdl->pintf_dev;
+ psdio = &psdiodev->intf_data;
+
+ if (padapter->bSurpriseRemoved) {
+ /* DBG_871X(" %s (padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n", __func__); */
+ return err;
+ }
+
+ func = psdio->func;
+ claim_needed = rtw_sdio_claim_host_needed(func);
+
+ if (claim_needed)
+ sdio_claim_host(func);
+ err = _sd_write(pintfhdl, addr, cnt, pdata);
+ if (claim_needed)
+ sdio_release_host(func);
+ return err;
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
new file mode 100644
index 000000000000..9c61125f5910
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ *****************************************************************************/
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+#include <rtw_wifi_regd.h>
+
+/*
+ * REG_RULE(freq start, freq end, bandwidth, max gain, eirp, reg_flags)
+ */
+
+/*
+ *Only these channels all allow active
+ *scan on all world regulatory domains
+ */
+
+/* 2G chan 01 - chan 11 */
+#define RTW_2GHZ_CH01_11 \
+ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
+
+/*
+ *We enable active scan on these a case
+ *by case basis by regulatory domain
+ */
+
+/* 2G chan 12 - chan 13, PASSIV SCAN */
+#define RTW_2GHZ_CH12_13 \
+ REG_RULE(2467-10, 2472+10, 40, 0, 20, \
+ NL80211_RRF_PASSIVE_SCAN)
+
+/* 2G chan 14, PASSIVS SCAN, NO OFDM (B only) */
+#define RTW_2GHZ_CH14 \
+ REG_RULE(2484-10, 2484+10, 40, 0, 20, \
+ NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
+
+static const struct ieee80211_regdomain rtw_regdom_rd = {
+ .n_reg_rules = 3,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTW_2GHZ_CH01_11,
+ RTW_2GHZ_CH12_13,
+ }
+};
+
+static int rtw_ieee80211_channel_to_frequency(int chan, int band)
+{
+ /* see 802.11 17.3.8.3.2 and Annex J
+ * there are overlapping channel numbers in 5GHz and 2GHz bands */
+
+ /* NL80211_BAND_2GHZ */
+ if (chan == 14)
+ return 2484;
+ else if (chan < 14)
+ return 2407 + chan * 5;
+ else
+ return 0; /* not supported */
+}
+
+static void _rtw_reg_apply_flags(struct wiphy *wiphy)
+{
+ struct adapter *padapter = wiphy_to_adapter(wiphy);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ RT_CHANNEL_INFO *channel_set = pmlmeext->channel_set;
+ u8 max_chan_nums = pmlmeext->max_chan_nums;
+
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ unsigned int i, j;
+ u16 channel;
+ u32 freq;
+
+ /* all channels disable */
+ for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ sband = wiphy->bands[i];
+
+ if (sband) {
+ for (j = 0; j < sband->n_channels; j++) {
+ ch = &sband->channels[j];
+
+ if (ch)
+ ch->flags = IEEE80211_CHAN_DISABLED;
+ }
+ }
+ }
+
+ /* channels apply by channel plans. */
+ for (i = 0; i < max_chan_nums; i++) {
+ channel = channel_set[i].ChannelNum;
+ freq =
+ rtw_ieee80211_channel_to_frequency(channel,
+ NL80211_BAND_2GHZ);
+
+ ch = ieee80211_get_channel(wiphy, freq);
+ if (ch) {
+ if (channel_set[i].ScanType == SCAN_PASSIVE) {
+ ch->flags = IEEE80211_CHAN_NO_IR;
+ }
+ else {
+ ch->flags = 0;
+ }
+ }
+ }
+}
+
+static int _rtw_reg_notifier_apply(struct wiphy *wiphy,
+ struct regulatory_request *request,
+ struct rtw_regulatory *reg)
+{
+ /* Hard code flags */
+ _rtw_reg_apply_flags(wiphy);
+ return 0;
+}
+
+static const struct ieee80211_regdomain *_rtw_regdomain_select(struct
+ rtw_regulatory
+ *reg)
+{
+ return &rtw_regdom_rd;
+}
+
+static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
+ struct wiphy *wiphy,
+ void (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *
+ request))
+{
+ const struct ieee80211_regdomain *regd;
+
+ wiphy->reg_notifier = reg_notifier;
+
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+
+ regd = _rtw_regdomain_select(reg);
+ wiphy_apply_custom_regulatory(wiphy, regd);
+
+ /* Hard code flags */
+ _rtw_reg_apply_flags(wiphy);
+}
+
+int rtw_regd_init(struct adapter *padapter,
+ void (*reg_notifier) (struct wiphy * wiphy,
+ struct regulatory_request *request))
+{
+ /* struct registry_priv *registrypriv = &padapter->registrypriv; */
+ struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
+ _rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
+
+ return 0;
+}
+
+void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct rtw_regulatory *reg = NULL;
+
+ DBG_8192C("%s\n", __func__);
+
+ _rtw_reg_notifier_apply(wiphy, request, reg);
+}
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
new file mode 100644
index 000000000000..76968161f936
--- /dev/null
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -0,0 +1,296 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ ******************************************************************************/
+#define _XMIT_OSDEP_C_
+
+#include <drv_types.h>
+#include <rtw_debug.h>
+
+
+uint rtw_remainder_len(struct pkt_file *pfile)
+{
+ return (pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start)));
+}
+
+void _rtw_open_pktfile (_pkt *pktptr, struct pkt_file *pfile)
+{
+ pfile->pkt = pktptr;
+ pfile->cur_addr = pfile->buf_start = pktptr->data;
+ pfile->pkt_len = pfile->buf_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start ;
+}
+
+uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len = 0;
+
+ len = rtw_remainder_len(pfile);
+ len = (rlen > len)? len: rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len-pfile->pkt_len, rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+ return len;
+}
+
+sint rtw_endofpktfile(struct pkt_file *pfile)
+{
+ if (pfile->pkt_len == 0)
+ return true;
+ return false;
+}
+
+void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib)
+{
+
+}
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag)
+{
+ if (alloc_sz > 0) {
+ pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
+ if (pxmitbuf->pallocated_buf == NULL)
+ {
+ return _FAIL;
+ }
+
+ pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ }
+
+ return _SUCCESS;
+}
+
+void rtw_os_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag)
+{
+ if (free_sz > 0)
+ kfree(pxmitbuf->pallocated_buf);
+}
+
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
+
+void rtw_os_pkt_complete(struct adapter *padapter, _pkt *pkt)
+{
+ u16 queue;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ {
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_os_pkt_complete(padapter, pxframe->pkt);
+
+ pxframe->pkt = NULL;
+}
+
+void rtw_os_xmit_schedule(struct adapter *padapter)
+{
+ struct adapter *pri_adapter = padapter;
+
+ if (!padapter)
+ return;
+
+ if (!list_empty(&padapter->xmitpriv.pending_xmitbuf_queue.queue))
+ up(&pri_adapter->xmitpriv.xmit_sema);
+}
+
+static void rtw_check_xmit_resource(struct adapter *padapter, _pkt *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) {
+ /* DBG_871X("%s(): stop netif_subqueue[%d]\n", __func__, queue); */
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt<=4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct list_head *phead, *plist;
+ struct sk_buff *newskb;
+ struct sta_info *psta = NULL;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ u8 bc_addr[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 null_addr[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ int i;
+ s32 res;
+
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u);
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while (phead != plist) {
+ int stainfo_offset;
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset)) {
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ }
+ }
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+
+ for (i = 0; i < chk_alive_num; i++) {
+ psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+ if (!(psta->state &_FW_LINKED))
+ {
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_ignore_fw_linked);
+ continue;
+ }
+
+ /* avoid come from STA1 and send back STA1 */
+ if (!memcmp(psta->hwaddr, &skb->data[6], 6)
+ || !memcmp(psta->hwaddr, null_addr, 6)
+ || !memcmp(psta->hwaddr, bc_addr, 6)
+ )
+ {
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_ignore_self);
+ continue;
+ }
+
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry);
+
+ newskb = rtw_skb_copy(skb);
+
+ if (newskb) {
+ memcpy(newskb->data, psta->hwaddr, 6);
+ res = rtw_xmit(padapter, &newskb);
+ if (res < 0) {
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry_err_xmit);
+ DBG_871X("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(newskb);
+ }
+ } else {
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_entry_err_skb);
+ DBG_871X("%s-%d: rtw_skb_copy() failed!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+ /* dev_kfree_skb_any(skb); */
+ return false; /* Caller shall tx this multicast frame via normal way. */
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ return true;
+}
+
+int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ s32 res = 0;
+
+ DBG_COUNTER(padapter->tx_logs.os_tx);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
+
+ if (rtw_if_up(padapter) == false) {
+ DBG_COUNTER(padapter->tx_logs.os_tx_err_up);
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit_entry: rtw_if_up fail\n"));
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s if_up fail\n", __func__);
+ #endif
+ goto drop_packet;
+ }
+
+ rtw_check_xmit_resource(padapter, pkt);
+
+ if (!rtw_mc2u_disable
+ && check_fwstate(pmlmepriv, WIFI_AP_STATE) == true
+ && (IP_MCAST_MAC(pkt->data)
+ || ICMPV6_MCAST_MAC(pkt->data)
+ #ifdef CONFIG_TX_BCAST2UNI
+ || is_broadcast_mac_addr(pkt->data)
+ #endif
+ )
+ && (padapter->registrypriv.wifi_spec == 0)
+ )
+ {
+ if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME/4)) {
+ res = rtw_mlcst2unicst(padapter, pkt);
+ if (res == true) {
+ goto exit;
+ }
+ } else {
+ /* DBG_871X("Stop M2U(%d, %d)! ", pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmitbuf_cnt); */
+ /* DBG_871X("!m2u); */
+ DBG_COUNTER(padapter->tx_logs.os_tx_m2u_stop);
+ }
+ }
+
+ res = rtw_xmit(padapter, &pkt);
+ if (res < 0) {
+ #ifdef DBG_TX_DROP_FRAME
+ DBG_871X("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __func__);
+ #endif
+ goto drop_packet;
+ }
+
+ RT_TRACE(_module_xmit_osdep_c_, _drv_info_, ("rtw_xmit_entry: tx_pkts =%d\n", (u32)pxmitpriv->tx_pkts));
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(pkt);
+ RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop =%d\n", (u32)pxmitpriv->tx_drop));
+
+exit:
+ return 0;
+}
+
+int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
+{
+ int ret = 0;
+
+ if (pkt) {
+ rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, pkt->truesize);
+ ret = _rtw_xmit_entry(pkt, pnetdev);
+ }
+
+ return ret;
+}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 806c12180714..482a29dd06f8 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -2594,7 +2594,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
u16 start, end, phy_blk, log_blk, tmp_blk, idx;
u8 extra[MS_EXTRA_SIZE], us1, us2;
- dev_dbg(rtsx_dev(chip), "ms_build_l2p_tbl: %d\n", seg_no);
+ dev_dbg(rtsx_dev(chip), "%s: %d\n", __func__, seg_no);
if (!ms_card->segment) {
retval = ms_init_l2p_tbl(chip);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index 3511157a2c78..7f4107bffe31 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -1490,7 +1490,7 @@ int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data)
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
- if ((val & (1 << 31)) == 0) {
+ if ((val & BIT(31)) == 0) {
if (data != (u8)val) {
rtsx_trace(chip);
return STATUS_FAIL;
@@ -1518,7 +1518,7 @@ int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data)
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_readl(chip, RTSX_HAIMR);
- if ((val & (1 << 31)) == 0)
+ if ((val & BIT(31)) == 0)
break;
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 23799013c432..8b57e17ee6d3 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -766,8 +766,7 @@ int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
return -EIO;
if (use_sg) {
- err = rtsx_transfer_sglist_adma(chip, card,
- (struct scatterlist *)buf,
+ err = rtsx_transfer_sglist_adma(chip, card, buf,
use_sg, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 10cf7295dc6c..5e4bfb601cea 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -16,9 +16,9 @@ logical_chip_type_t sm750_get_chip_type(void)
void sm750_set_chip_type(unsigned short devId, u8 revId)
{
- if (devId == 0x718)
+ if (devId == 0x718) {
chip = SM718;
- else if (devId == 0x750) {
+ } else if (devId == 0x750) {
chip = SM750;
/* SM750 and SM750LE are different in their revision ID only. */
if (revId == SM750LE_REVISION_ID) {
@@ -69,11 +69,11 @@ static void set_chip_clock(unsigned int frequency)
pll.clockType = MXCLK_PLL;
/*
- * Call sm750_calc_pll_value() to fill the other fields of the PLL
- * structure. Sometimes, the chip cannot set up the exact
- * clock required by the User.
- * Return value of sm750_calc_pll_value gives the actual possible
- * clock.
+ * Call sm750_calc_pll_value() to fill the other fields
+ * of the PLL structure. Sometimes, the chip cannot set
+ * up the exact clock required by the User.
+ * Return value of sm750_calc_pll_value gives the actual
+ * possible clock.
*/
ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
@@ -352,7 +352,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *p
RN = N * request;
quo = RN / input;
rem = RN % input;/* rem always small than 14318181 */
- fl_quo = (rem * 10000 / input);
+ fl_quo = rem * 10000 / input;
for (d = max_d; d >= 0; d--) {
X = BIT(d);
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index fbeb615aa432..2c7a9b9a7c8a 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -98,6 +98,6 @@ void sm750_set_chip_type(unsigned short devId, u8 revId);
unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
unsigned int ddk750_get_vm_size(void);
-int ddk750_init_hw(struct initchip_param *);
+int ddk750_init_hw(struct initchip_param *pinit_param);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index e4724a660d07..9b116ed6ecc7 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -146,7 +146,8 @@ void ddk750_setLogicalDispOut(disp_output_t output)
if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
- swPanelPowerSequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET, 4);
+ swPanelPowerSequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
+ 4);
}
if (output & DAC_USAGE)
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index 8abca88f089e..609bf742efff 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -12,7 +12,6 @@
#define PNL_2_PRI ((0 << PNL_2_OFFSET) | PNL_2_USAGE)
#define PNL_2_SEC ((2 << PNL_2_OFFSET) | PNL_2_USAGE)
-
/*
* primary timing & plane enable bit
* 1: 80000[8] & 80000[2] on
@@ -24,7 +23,6 @@
#define PRI_TP_ON ((0x1 << PRI_TP_OFFSET) | PRI_TP_USAGE)
#define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
-
/*
* panel sequency status
* 80000[27:24]
@@ -66,7 +64,6 @@
#define CRT_2_PRI ((0x0 << CRT_2_OFFSET) | CRT_2_USAGE)
#define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
-
/*
* DAC affect both DVI and DSUB
* 4[20]
@@ -87,8 +84,6 @@
#define DPMS_OFF ((3 << DPMS_OFFSET) | DPMS_USAGE)
#define DPMS_ON ((0 << DPMS_OFFSET) | DPMS_USAGE)
-
-
/*
* LCD1 means panel path TFT1 & panel path DVI (so enable DAC)
* CRT means crt path DSUB
@@ -107,6 +102,6 @@ typedef enum _disp_output_t {
}
disp_output_t;
-void ddk750_setLogicalDispOut(disp_output_t);
+void ddk750_setLogicalDispOut(disp_output_t output);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 250c2f478778..171ae063f06f 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -5,7 +5,6 @@
#include "ddk750_dvi.h"
#include "ddk750_sii164.h"
-
/*
* This global variable contains all the supported driver and its corresponding
* function API. Please set the function pointer to NULL whenever the function
@@ -30,7 +29,6 @@ static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
#endif
};
-
int dviInit(
unsigned char edgeSelect,
unsigned char busSelect,
@@ -47,7 +45,7 @@ int dviInit(
dvi_ctrl_device_t *pCurrentDviCtrl;
pCurrentDviCtrl = g_dcftSupportedDviController;
- if (pCurrentDviCtrl->pfnInit != NULL) {
+ if (pCurrentDviCtrl->pfnInit) {
return pCurrentDviCtrl->pfnInit(edgeSelect, busSelect, dualEdgeClkSelect, hsyncEnable,
vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
pllFilterEnable, pllFilterValue);
@@ -56,5 +54,3 @@ int dviInit(
}
#endif
-
-
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 68716ef7cb06..fe814e4881f9 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -217,7 +217,7 @@ unsigned char sm750_hw_i2c_read_reg(
unsigned char reg
)
{
- unsigned char value = (0xFF);
+ unsigned char value = 0xFF;
if (hw_i2c_write_data(addr, 1, &reg) == 1)
hw_i2c_read_data(addr, 1, &value);
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 1df7d57dea6d..bb673e18999b 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -12,7 +12,8 @@
* HW only supports 7 predefined pixel clocks, and clock select is
* in bit 29:27 of Display Control register.
*/
-static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam, unsigned long dispControl)
+static unsigned long displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
+ unsigned long dispControl)
{
unsigned long x, y;
@@ -28,9 +29,9 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
poke32(CRT_AUTO_CENTERING_TL, 0);
poke32(CRT_AUTO_CENTERING_BR,
- (((y - 1) << CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT) &
- CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
- ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
+ (((y - 1) << CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT) &
+ CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
+ ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
/*
* Assume common fields in dispControl have been properly set before
@@ -71,11 +72,9 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
return dispControl;
}
-
-
/* only timing related registers will be programed */
-static int programModeRegisters(mode_parameter_t *pModeParam,
- struct pll_value *pll)
+static int programModeRegisters(struct mode_parameter *pModeParam,
+ struct pll_value *pll)
{
int ret = 0;
int cnt = 0;
@@ -84,34 +83,38 @@ static int programModeRegisters(mode_parameter_t *pModeParam,
if (pll->clockType == SECONDARY_PLL) {
/* programe secondary pixel clock */
poke32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
- poke32(CRT_HORIZONTAL_TOTAL,
- (((pModeParam->horizontal_total - 1) <<
- CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
- CRT_HORIZONTAL_TOTAL_TOTAL_MASK) |
- ((pModeParam->horizontal_display_end - 1) &
- CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK));
-
- poke32(CRT_HORIZONTAL_SYNC,
- ((pModeParam->horizontal_sync_width <<
- CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) &
- CRT_HORIZONTAL_SYNC_WIDTH_MASK) |
- ((pModeParam->horizontal_sync_start - 1) &
- CRT_HORIZONTAL_SYNC_START_MASK));
-
- poke32(CRT_VERTICAL_TOTAL,
- (((pModeParam->vertical_total - 1) <<
- CRT_VERTICAL_TOTAL_TOTAL_SHIFT) &
- CRT_VERTICAL_TOTAL_TOTAL_MASK) |
- ((pModeParam->vertical_display_end - 1) &
- CRT_VERTICAL_TOTAL_DISPLAY_END_MASK));
-
- poke32(CRT_VERTICAL_SYNC,
- ((pModeParam->vertical_sync_height <<
- CRT_VERTICAL_SYNC_HEIGHT_SHIFT) &
- CRT_VERTICAL_SYNC_HEIGHT_MASK) |
- ((pModeParam->vertical_sync_start - 1) &
- CRT_VERTICAL_SYNC_START_MASK));
+ tmp = ((pModeParam->horizontal_total - 1) <<
+ CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
+ CRT_HORIZONTAL_TOTAL_TOTAL_MASK;
+ tmp |= (pModeParam->horizontal_display_end - 1) &
+ CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK;
+
+ poke32(CRT_HORIZONTAL_TOTAL, tmp);
+
+ tmp = (pModeParam->horizontal_sync_width <<
+ CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+ CRT_HORIZONTAL_SYNC_WIDTH_MASK;
+ tmp |= (pModeParam->horizontal_sync_start - 1) &
+ CRT_HORIZONTAL_SYNC_START_MASK;
+
+ poke32(CRT_HORIZONTAL_SYNC, tmp);
+
+ tmp = ((pModeParam->vertical_total - 1) <<
+ CRT_VERTICAL_TOTAL_TOTAL_SHIFT) &
+ CRT_VERTICAL_TOTAL_TOTAL_MASK;
+ tmp |= (pModeParam->vertical_display_end - 1) &
+ CRT_VERTICAL_TOTAL_DISPLAY_END_MASK;
+
+ poke32(CRT_VERTICAL_TOTAL, tmp);
+
+ tmp = ((pModeParam->vertical_sync_height <<
+ CRT_VERTICAL_SYNC_HEIGHT_SHIFT)) &
+ CRT_VERTICAL_SYNC_HEIGHT_MASK;
+ tmp |= (pModeParam->vertical_sync_start - 1) &
+ CRT_VERTICAL_SYNC_START_MASK;
+
+ poke32(CRT_VERTICAL_SYNC, tmp);
tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
if (pModeParam->vertical_sync_polarity)
@@ -143,25 +146,25 @@ static int programModeRegisters(mode_parameter_t *pModeParam,
poke32(PANEL_HORIZONTAL_TOTAL, reg);
poke32(PANEL_HORIZONTAL_SYNC,
- ((pModeParam->horizontal_sync_width <<
- PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) &
- PANEL_HORIZONTAL_SYNC_WIDTH_MASK) |
- ((pModeParam->horizontal_sync_start - 1) &
- PANEL_HORIZONTAL_SYNC_START_MASK));
+ ((pModeParam->horizontal_sync_width <<
+ PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+ PANEL_HORIZONTAL_SYNC_WIDTH_MASK) |
+ ((pModeParam->horizontal_sync_start - 1) &
+ PANEL_HORIZONTAL_SYNC_START_MASK));
poke32(PANEL_VERTICAL_TOTAL,
- (((pModeParam->vertical_total - 1) <<
- PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) &
- PANEL_VERTICAL_TOTAL_TOTAL_MASK) |
- ((pModeParam->vertical_display_end - 1) &
- PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK));
+ (((pModeParam->vertical_total - 1) <<
+ PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) &
+ PANEL_VERTICAL_TOTAL_TOTAL_MASK) |
+ ((pModeParam->vertical_display_end - 1) &
+ PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK));
poke32(PANEL_VERTICAL_SYNC,
- ((pModeParam->vertical_sync_height <<
- PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) &
- PANEL_VERTICAL_SYNC_HEIGHT_MASK) |
- ((pModeParam->vertical_sync_start - 1) &
- PANEL_VERTICAL_SYNC_START_MASK));
+ ((pModeParam->vertical_sync_height <<
+ PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) &
+ PANEL_VERTICAL_SYNC_HEIGHT_MASK) |
+ ((pModeParam->vertical_sync_start - 1) &
+ PANEL_VERTICAL_SYNC_START_MASK));
tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
if (pModeParam->vertical_sync_polarity)
@@ -202,7 +205,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam,
return ret;
}
-int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
+int ddk750_setModeTiming(struct mode_parameter *parm, clock_type_t clock)
{
struct pll_value pll;
unsigned int uiActualPixelClk;
@@ -219,5 +222,3 @@ int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
programModeRegisters(parm, &pll);
return 0;
}
-
-
diff --git a/drivers/staging/sm750fb/ddk750_mode.h b/drivers/staging/sm750fb/ddk750_mode.h
index e846dc2c3d5c..d5eae36d85cb 100644
--- a/drivers/staging/sm750fb/ddk750_mode.h
+++ b/drivers/staging/sm750fb/ddk750_mode.h
@@ -3,27 +3,25 @@
#include "ddk750_chip.h"
-typedef enum _spolarity_t {
+enum spolarity {
POS = 0, /* positive */
NEG, /* negative */
-}
-spolarity_t;
+};
-
-typedef struct _mode_parameter_t {
+struct mode_parameter {
/* Horizontal timing. */
unsigned long horizontal_total;
unsigned long horizontal_display_end;
unsigned long horizontal_sync_start;
unsigned long horizontal_sync_width;
- spolarity_t horizontal_sync_polarity;
+ enum spolarity horizontal_sync_polarity;
/* Vertical timing. */
unsigned long vertical_total;
unsigned long vertical_display_end;
unsigned long vertical_sync_start;
unsigned long vertical_sync_height;
- spolarity_t vertical_sync_polarity;
+ enum spolarity vertical_sync_polarity;
/* Refresh timing. */
unsigned long pixel_clock;
@@ -31,11 +29,8 @@ typedef struct _mode_parameter_t {
unsigned long vertical_frequency;
/* Clock Phase. This clock phase only applies to Panel. */
- spolarity_t clock_phase_polarity;
-}
-mode_parameter_t;
-
-int ddk750_setModeTiming(mode_parameter_t *, clock_type_t);
-
+ enum spolarity clock_phase_polarity;
+};
+int ddk750_setModeTiming(struct mode_parameter *parm, clock_type_t clock);
#endif
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 02ff6204ee1e..222ae1a06feb 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -24,7 +24,6 @@ static unsigned int get_power_mode(void)
return peek32(POWER_MODE_CTRL) & POWER_MODE_CTRL_MODE_MASK;
}
-
/*
* SM50x can operate in one of three modes: 0, 1 or Sleep.
* On hardware reset, power mode 0 is default.
@@ -80,8 +79,6 @@ void sm750_set_current_gate(unsigned int gate)
poke32(MODE0_GATE, gate);
}
-
-
/*
* This function enable/disable the 2D engine.
*/
@@ -145,5 +142,3 @@ void sm750_enable_i2c(unsigned int enable)
sm750_set_current_gate(gate);
}
-
-
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 4274d74d47c1..44c4fc587e96 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -14,7 +14,7 @@ DPMS_t;
(peek32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
-void ddk750_set_dpms(DPMS_t);
+void ddk750_set_dpms(DPMS_t state);
void sm750_set_power_mode(unsigned int powerMode);
void sm750_set_current_gate(unsigned int gate);
@@ -38,5 +38,4 @@ void sm750_enable_gpio(unsigned int enable);
*/
void sm750_enable_i2c(unsigned int enable);
-
#endif
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 4ed6d8d7712a..f9b989b7a152 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -532,7 +532,6 @@
#define GPIO_INTERRUPT_STATUS_26 BIT(17)
#define GPIO_INTERRUPT_STATUS_25 BIT(16)
-
#define PANEL_DISPLAY_CTRL 0x080000
#define PANEL_DISPLAY_CTRL_RESERVED_MASK 0xc0f08000
#define PANEL_DISPLAY_CTRL_SELECT_SHIFT 28
@@ -1279,7 +1278,6 @@
#define I2C_DATA14 0x010052
#define I2C_DATA15 0x010053
-
#define ZV0_CAPTURE_CTRL 0x090000
#define ZV0_CAPTURE_CTRL_FIELD_INPUT BIT(27)
#define ZV0_CAPTURE_CTRL_SCAN BIT(26)
@@ -1445,7 +1443,6 @@
#define DEFAULT_I2C_SCL 30
#define DEFAULT_I2C_SDA 31
-
#define GPIO_DATA_SM750LE 0x020018
#define GPIO_DATA_SM750LE_1 BIT(1)
#define GPIO_DATA_SM750LE_0 BIT(0)
@@ -1454,5 +1451,4 @@
#define GPIO_DATA_DIRECTION_SM750LE_1 BIT(1)
#define GPIO_DATA_DIRECTION_SM750LE_0 BIT(0)
-
#endif
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index e49f8845f923..386d4adcd91d 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -419,7 +419,7 @@ static int lynxfb_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (ret) {
dev_err(&pdev->dev,
"error:%d occurred in pci_save_state\n", ret);
- return ret;
+ goto lynxfb_suspend_err;
}
ret = pci_set_power_state(pdev, pci_choose_state(pdev, mesg));
@@ -427,11 +427,13 @@ static int lynxfb_suspend(struct pci_dev *pdev, pm_message_t mesg)
dev_err(&pdev->dev,
"error:%d occurred in pci_set_power_state\n",
ret);
- return ret;
+ goto lynxfb_suspend_err;
}
}
pdev->dev.power.power_state = mesg;
+
+lynxfb_suspend_err:
console_unlock();
return ret;
}
@@ -456,7 +458,7 @@ static int lynxfb_resume(struct pci_dev *pdev)
if (ret) {
dev_err(&pdev->dev,
"error:%d occurred in pci_set_power_state\n", ret);
- return ret;
+ goto lynxfb_resume_err;
}
if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
@@ -466,7 +468,7 @@ static int lynxfb_resume(struct pci_dev *pdev)
dev_err(&pdev->dev,
"error:%d occurred in pci_enable_device\n",
ret);
- return ret;
+ goto lynxfb_resume_err;
}
pci_set_master(pdev);
}
@@ -498,6 +500,8 @@ static int lynxfb_resume(struct pci_dev *pdev)
}
pdev->dev.power.power_state.event = PM_EVENT_RESUME;
+
+lynxfb_resume_err:
console_unlock();
return ret;
}
@@ -806,7 +810,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
}
for (i = 0; i < 3; i++) {
-
ret = fb_find_mode(var, info, g_fbmode[index],
pdb[i], cdb[i], NULL, 8);
@@ -834,15 +837,15 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
/* some member of info->var had been set by fb_find_mode */
- pr_info("Member of info->var is :\n\
- xres=%d\n\
- yres=%d\n\
- xres_virtual=%d\n\
- yres_virtual=%d\n\
- xoffset=%d\n\
- yoffset=%d\n\
- bits_per_pixel=%d\n \
- ...\n",
+ pr_info("Member of info->var is :\n"
+ "xres=%d\n"
+ "yres=%d\n"
+ "xres_virtual=%d\n"
+ "yres_virtual=%d\n"
+ "xoffset=%d\n"
+ "yoffset=%d\n"
+ "bits_per_pixel=%d\n"
+ " ...\n",
var->xres,
var->yres,
var->xres_virtual,
@@ -954,23 +957,23 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt);
dev_info(&sm750_dev->pdev->dev, "src=%s\n", src);
- if (!strncmp(opt, "swap", strlen("swap")))
+ if (!strncmp(opt, "swap", strlen("swap"))) {
swap = 1;
- else if (!strncmp(opt, "nocrt", strlen("nocrt")))
+ } else if (!strncmp(opt, "nocrt", strlen("nocrt"))) {
sm750_dev->nocrt = 1;
- else if (!strncmp(opt, "36bit", strlen("36bit")))
+ } else if (!strncmp(opt, "36bit", strlen("36bit"))) {
sm750_dev->pnltype = sm750_doubleTFT;
- else if (!strncmp(opt, "18bit", strlen("18bit")))
+ } else if (!strncmp(opt, "18bit", strlen("18bit"))) {
sm750_dev->pnltype = sm750_dualTFT;
- else if (!strncmp(opt, "24bit", strlen("24bit")))
+ } else if (!strncmp(opt, "24bit", strlen("24bit"))) {
sm750_dev->pnltype = sm750_24TFT;
- else if (!strncmp(opt, "nohwc0", strlen("nohwc0")))
+ } else if (!strncmp(opt, "nohwc0", strlen("nohwc0"))) {
g_hwcursor &= ~0x1;
- else if (!strncmp(opt, "nohwc1", strlen("nohwc1")))
+ } else if (!strncmp(opt, "nohwc1", strlen("nohwc1"))) {
g_hwcursor &= ~0x2;
- else if (!strncmp(opt, "nohwc", strlen("nohwc")))
+ } else if (!strncmp(opt, "nohwc", strlen("nohwc"))) {
g_hwcursor = 0;
- else {
+ } else {
if (!g_fbmode[0]) {
g_fbmode[0] = opt;
dev_info(&sm750_dev->pdev->dev,
@@ -1168,13 +1171,13 @@ static int __init lynxfb_setup(char *options)
*/
while ((opt = strsep(&options, ":")) != NULL) {
/* options that mean for any lynx chips are configured here */
- if (!strncmp(opt, "noaccel", strlen("noaccel")))
+ if (!strncmp(opt, "noaccel", strlen("noaccel"))) {
g_noaccel = 1;
- else if (!strncmp(opt, "nomtrr", strlen("nomtrr")))
+ } else if (!strncmp(opt, "nomtrr", strlen("nomtrr"))) {
g_nomtrr = 1;
- else if (!strncmp(opt, "dual", strlen("dual")))
+ } else if (!strncmp(opt, "dual", strlen("dual"))) {
g_dualview = 1;
- else {
+ } else {
strcat(tmp, opt);
tmp += strlen(opt);
if (options)
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index 28f4b9b4f95f..5b186dafedec 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -177,15 +177,15 @@ struct lynxfb_par {
static inline unsigned long ps_to_hz(unsigned int psvalue)
{
- unsigned long long numerator = 1000*1000*1000*1000ULL;
+ unsigned long long numerator = 1000 * 1000 * 1000 * 1000ULL;
/* 10^12 / picosecond period gives frequency in Hz */
do_div(numerator, psvalue);
return (unsigned long)numerator;
}
int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev);
-int hw_sm750_inithw(struct sm750_dev*, struct pci_dev *);
-void hw_sm750_initAccel(struct sm750_dev *);
+int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev);
+void hw_sm750_initAccel(struct sm750_dev *sm750_dev);
int hw_sm750_deWait(void);
int hw_sm750le_deWait(void);
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index af0db5789c53..6be86e4963be 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -144,11 +144,9 @@ unsigned int height, /* width and height of rectangle in pixel value */
unsigned int rop2) /* ROP value */
{
unsigned int nDirection, de_ctrl;
- int opSign;
nDirection = LEFT_TO_RIGHT;
/* Direction of ROP2 operation: 1 = Left to Right, (-1) = Right to Left */
- opSign = 1;
de_ctrl = 0;
/* If source and destination are the same surface, need to check for overlay cases */
@@ -212,7 +210,6 @@ unsigned int rop2) /* ROP value */
sy += height - 1;
dx += width - 1;
dy += height - 1;
- opSign = (-1);
}
/*
@@ -259,8 +256,6 @@ unsigned int rop2) /* ROP value */
if (accel->de_wait() != 0)
return -1;
- {
-
write_dpr(accel, DE_SOURCE,
((sx << DE_SOURCE_X_K1_SHIFT) & DE_SOURCE_X_K1_MASK) |
(sy & DE_SOURCE_Y_K2_MASK)); /* dpr0 */
@@ -276,8 +271,6 @@ unsigned int rop2) /* ROP value */
DE_CONTROL_COMMAND_BITBLT | DE_CONTROL_STATUS;
write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
- }
-
return 0;
}
@@ -384,7 +377,7 @@ int sm750_hw_imageblit(struct lynx_accel *accel,
/* Write MONO data (line by line) to 2D Engine data port */
for (i = 0; i < height; i++) {
/* For each line, send the data in chunks of 4 bytes */
- for (j = 0; j < (ul4BytesPerScan/4); j++)
+ for (j = 0; j < (ul4BytesPerScan / 4); j++)
write_dpPort(accel, *(unsigned int *)(pSrcbuf + (j * 4)));
if (ulBytesRemain) {
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index b1651b0d2034..b64dc8a4a8fb 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -54,6 +54,7 @@ void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
poke32(HWC_ADDRESS, reg);
}
+
void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
{
poke32(HWC_ADDRESS, 0);
@@ -65,15 +66,17 @@ void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
cursor->w = w;
cursor->h = h;
}
+
void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
int x, int y)
{
u32 reg;
- reg = (((y << HWC_LOCATION_Y_SHIFT) & HWC_LOCATION_Y_MASK) |
- (x & HWC_LOCATION_X_MASK));
+ reg = ((y << HWC_LOCATION_Y_SHIFT) & HWC_LOCATION_Y_MASK) |
+ (x & HWC_LOCATION_X_MASK);
poke32(HWC_LOCATION, reg);
}
+
void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg)
{
@@ -111,14 +114,14 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
data = 0;
for (j = 0; j < 8; j++) {
- if (mask & (0x80>>j)) {
+ if (mask & (0x80 >> j)) {
if (rop == ROP_XOR)
opr = mask ^ color;
else
opr = mask & color;
/* 2 stands for forecolor and 1 for backcolor */
- data |= ((opr & (0x80>>j))?2:1)<<(j*2);
+ data |= ((opr & (0x80 >> j)) ? 2 : 1) << (j * 2);
}
}
iowrite16(data, pbuffer);
@@ -131,10 +134,7 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
} else {
pbuffer += sizeof(u16);
}
-
}
-
-
}
@@ -165,19 +165,18 @@ void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
data = 0;
for (j = 0; j < 8; j++) {
- if (mask & (1<<j))
- data |= ((color & (1<<j))?1:2)<<(j*2);
+ if (mask & (1 << j))
+ data |= ((color & (1 << j)) ? 1 : 2) << (j * 2);
}
iowrite16(data, pbuffer);
/* assume pitch is 1,2,4,8,...*/
- if (!(i&(pitch-1))) {
+ if (!(i & (pitch - 1))) {
/* need a return */
pstart += offset;
pbuffer = pstart;
} else {
pbuffer += sizeof(u16);
}
-
}
}
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index fab3fc9c8330..baf1bbdc92ff 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -252,7 +252,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
{
int ret, fmt;
u32 reg;
- mode_parameter_t modparm;
+ struct mode_parameter modparm;
clock_type_t clock;
struct sm750_dev *sm750_dev;
struct lynxfb_par *par;
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index 723d5df44221..f459e4004bfa 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -7,10 +7,10 @@
#define SYNTH_BUF_SIZE 8192 /* currently 8K bytes */
-static u_char synth_buffer[SYNTH_BUF_SIZE]; /* guess what this is for! */
-static u_char *buff_in = synth_buffer;
-static u_char *buff_out = synth_buffer;
-static u_char *buffer_end = synth_buffer + SYNTH_BUF_SIZE - 1;
+static u16 synth_buffer[SYNTH_BUF_SIZE]; /* guess what this is for! */
+static u16 *buff_in = synth_buffer;
+static u16 *buff_out = synth_buffer;
+static u16 *buffer_end = synth_buffer + SYNTH_BUF_SIZE - 1;
/* These try to throttle applications by stopping the TTYs
* Note: we need to make sure that we will restart them eventually, which is
@@ -44,13 +44,13 @@ static void speakup_stop_ttys(void)
static int synth_buffer_free(void)
{
- int bytes_free;
+ int chars_free;
if (buff_in >= buff_out)
- bytes_free = SYNTH_BUF_SIZE - (buff_in - buff_out);
+ chars_free = SYNTH_BUF_SIZE - (buff_in - buff_out);
else
- bytes_free = buff_out - buff_in;
- return bytes_free;
+ chars_free = buff_out - buff_in;
+ return chars_free;
}
int synth_buffer_empty(void)
@@ -59,7 +59,7 @@ int synth_buffer_empty(void)
}
EXPORT_SYMBOL_GPL(synth_buffer_empty);
-void synth_buffer_add(char ch)
+void synth_buffer_add(u16 ch)
{
if (!synth->alive) {
/* This makes sure that we won't stop TTYs if there is no synth
@@ -78,9 +78,9 @@ void synth_buffer_add(char ch)
buff_in = synth_buffer;
}
-char synth_buffer_getc(void)
+u16 synth_buffer_getc(void)
{
- char ch;
+ u16 ch;
if (buff_out == buff_in)
return 0;
@@ -91,7 +91,7 @@ char synth_buffer_getc(void)
}
EXPORT_SYMBOL_GPL(synth_buffer_getc);
-char synth_buffer_peek(void)
+u16 synth_buffer_peek(void)
{
if (buff_out == buff_in)
return 0;
@@ -99,6 +99,18 @@ char synth_buffer_peek(void)
}
EXPORT_SYMBOL_GPL(synth_buffer_peek);
+void synth_buffer_skip_nonlatin1(void)
+{
+ while (buff_out != buff_in) {
+ if (*buff_out < 0x100)
+ return;
+ buff_out++;
+ if (buff_out > buffer_end)
+ buff_out = synth_buffer;
+ }
+}
+EXPORT_SYMBOL_GPL(synth_buffer_skip_nonlatin1);
+
void synth_buffer_clear(void)
{
buff_in = synth_buffer;
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index d76da0a1382c..294c74b47224 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -56,7 +56,7 @@ int speakup_add_virtual_keyboard(void)
void speakup_remove_virtual_keyboard(void)
{
- if (virt_keyboard != NULL) {
+ if (virt_keyboard) {
input_unregister_device(virt_keyboard);
virt_keyboard = NULL;
}
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 2f9b3df7f78d..7809867f5d28 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -407,12 +407,12 @@ static char *next_specifier(char *input)
int found = 0;
char *next_percent = input;
- while ((next_percent != NULL) && !found) {
+ while (next_percent && !found) {
next_percent = strchr(next_percent, '%');
- if (next_percent != NULL) {
+ if (next_percent) {
/* skip over doubled percent signs */
- while ((next_percent[0] == '%')
- && (next_percent[1] == '%'))
+ while (next_percent[0] == '%' &&
+ next_percent[1] == '%')
next_percent += 2;
if (*next_percent == '%')
found = 1;
@@ -476,19 +476,20 @@ static char *find_specifier_end(char *input)
/*
* Function: compare_specifiers
* Compare the format specifiers pointed to by *input1 and *input2.
- * Return 1 if they are the same, 0 otherwise. Advance *input1 and *input2
- * so that they point to the character following the end of the specifier.
+ * Return true if they are the same, false otherwise.
+ * Advance *input1 and *input2 so that they point to the character following
+ * the end of the specifier.
*/
-static int compare_specifiers(char **input1, char **input2)
+static bool compare_specifiers(char **input1, char **input2)
{
- int same = 0;
+ bool same = false;
char *end1 = find_specifier_end(*input1);
char *end2 = find_specifier_end(*input2);
size_t length1 = end1 - *input1;
size_t length2 = end2 - *input2;
if ((length1 == length2) && !memcmp(*input1, *input2, length1))
- same = 1;
+ same = true;
*input1 = end1;
*input2 = end2;
@@ -499,12 +500,12 @@ static int compare_specifiers(char **input1, char **input2)
* Function: fmt_validate
* Check that two format strings contain the same number of format specifiers,
* and that the order of specifiers is the same in both strings.
- * Return 1 if the condition holds, 0 if it doesn't.
+ * Return true if the condition holds, false if it doesn't.
*/
-static int fmt_validate(char *template, char *user)
+static bool fmt_validate(char *template, char *user)
{
- int valid = 1;
- int still_comparing = 1;
+ bool valid = true;
+ bool still_comparing = true;
char *template_ptr = template;
char *user_ptr = user;
@@ -516,10 +517,10 @@ static int fmt_validate(char *template, char *user)
valid = compare_specifiers(&template_ptr, &user_ptr);
} else {
/* No more format specifiers in one or both strings. */
- still_comparing = 0;
+ still_comparing = false;
/* See if one has more specifiers than the other. */
if (template_ptr || user_ptr)
- valid = 0;
+ valid = false;
}
}
return valid;
@@ -540,34 +541,30 @@ static int fmt_validate(char *template, char *user)
*/
ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
{
- int rc = 0;
char *newstr = NULL;
unsigned long flags;
- if ((index >= MSG_FIRST_INDEX) && (index < MSG_LAST_INDEX)) {
- newstr = kmalloc(length + 1, GFP_KERNEL);
- if (newstr) {
- memcpy(newstr, text, length);
- newstr[length] = '\0';
- if ((index >= MSG_FORMATTED_START
- && index <= MSG_FORMATTED_END)
- && !fmt_validate(speakup_default_msgs[index],
- newstr)) {
- kfree(newstr);
- return -EINVAL;
- }
- spin_lock_irqsave(&speakup_info.spinlock, flags);
- if (speakup_msgs[index] != speakup_default_msgs[index])
- kfree(speakup_msgs[index]);
- speakup_msgs[index] = newstr;
- spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- } else {
- rc = -ENOMEM;
- }
- } else {
- rc = -EINVAL;
+ if ((index < MSG_FIRST_INDEX) || (index >= MSG_LAST_INDEX))
+ return -EINVAL;
+
+ newstr = kmalloc(length + 1, GFP_KERNEL);
+ if (!newstr)
+ return -ENOMEM;
+
+ memcpy(newstr, text, length);
+ newstr[length] = '\0';
+ if (index >= MSG_FORMATTED_START &&
+ index <= MSG_FORMATTED_END &&
+ !fmt_validate(speakup_default_msgs[index], newstr)) {
+ kfree(newstr);
+ return -EINVAL;
}
- return rc;
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ if (speakup_msgs[index] != speakup_default_msgs[index])
+ kfree(speakup_msgs[index]);
+ speakup_msgs[index] = newstr;
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ return 0;
}
/*
@@ -607,7 +604,7 @@ void spk_reset_msg_group(struct msg_group_t *group)
void spk_initialize_msgs(void)
{
memcpy(speakup_msgs, speakup_default_msgs,
- sizeof(speakup_default_msgs));
+ sizeof(speakup_default_msgs));
}
/* Free user-supplied strings when module is unloaded: */
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index ce94cb13e256..4e6e5daba50c 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -117,7 +117,7 @@ static void say_key(int key)
}
if ((key > 0) && (key <= num_key_names))
synth_printf(" %s\n",
- spk_msg_get(MSG_KEYNAMES_START + (key - 1)));
+ spk_msg_get(MSG_KEYNAMES_START + (key - 1)));
}
static int help_init(void)
@@ -163,17 +163,15 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
}
cur_item = letter_offsets[ch - 'a'];
} else if (type == KT_CUR) {
- if (ch == 0
- && (MSG_FUNCNAMES_START + cur_item + 1) <=
- MSG_FUNCNAMES_END)
+ if (ch == 0 &&
+ (MSG_FUNCNAMES_START + cur_item + 1) <= MSG_FUNCNAMES_END)
cur_item++;
else if (ch == 3 && cur_item > 0)
cur_item--;
else
return -1;
- } else if (type == KT_SPKUP
- && ch == SPEAKUP_HELP
- && !spk_special_handler) {
+ } else if (type == KT_SPKUP && ch == SPEAKUP_HELP &&
+ !spk_special_handler) {
spk_special_handler = spk_handle_help;
synth_printf("%s\n", spk_msg_get(MSG_HELP_INFO));
build_key_data(); /* rebuild each time in case new mapping */
@@ -182,7 +180,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
name = NULL;
if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) {
synth_printf("%s\n",
- spk_msg_get(MSG_KEYNAMES_START + key - 1));
+ spk_msg_get(MSG_KEYNAMES_START + key - 1));
return 1;
}
for (i = 0; funcvals[i] != 0 && !name; i++) {
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 4e7ebc306488..ca85476e3ff7 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -26,7 +26,7 @@
* This is called when a user reads the characters or chartab sys file.
*/
static ssize_t chars_chartab_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
int i;
int len = 0;
@@ -79,7 +79,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
* character descriptions or chartab entries.
*/
static void report_char_chartab_status(int reset, int received, int used,
- int rejected, int do_characters)
+ int rejected, int do_characters)
{
static char const *object_type[] = {
"character class entries",
@@ -92,8 +92,8 @@ static void report_char_chartab_status(int reset, int received, int used,
pr_info("%s reset to defaults\n", object_type[do_characters]);
} else if (received) {
len = snprintf(buf, sizeof(buf),
- " updated %d of %d %s\n",
- used, received, object_type[do_characters]);
+ " updated %d of %d %s\n",
+ used, received, object_type[do_characters]);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
@@ -106,9 +106,10 @@ static void report_char_chartab_status(int reset, int received, int used,
* This is called when a user changes the characters or chartab parameters.
*/
static ssize_t chars_chartab_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
- char *cp = (char *) buf;
+ char *cp = (char *)buf;
char *end = cp + count; /* the null at the end of the buffer */
char *linefeed = NULL;
char keyword[MAX_DESC_LEN + 1];
@@ -129,7 +130,6 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (cp < end) {
-
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
@@ -214,7 +214,7 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
report_char_chartab_status(reset, received, used, rejected,
- do_characters);
+ do_characters);
return retval;
}
@@ -222,7 +222,7 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
* This is called when a user reads the keymap parameter.
*/
static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+ char *buf)
{
char *cp = buf;
int i;
@@ -258,7 +258,7 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when a user changes the keymap parameter.
*/
static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int i;
ssize_t ret = count;
@@ -292,9 +292,9 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
i *= (int)cp1[-1] + 1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
- i+SHIFT_TBL_SIZE+4 >= sizeof(spk_key_buf)) {
+ i + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf)) {
pr_warn("i %d %d %d %d\n", i,
- (int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
+ (int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -EINVAL;
@@ -308,7 +308,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
if (i != 0 || cp1[-1] != KEY_MAP_VER || cp1[-2] != 0) {
ret = -EINVAL;
pr_warn("end %d %d %d %d\n", i,
- (int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
+ (int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
} else {
if (spk_set_key_info(in_buff, spk_key_buf)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
@@ -325,7 +325,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when a user changes the value of the silent parameter.
*/
static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int len;
struct vc_data *vc = vc_cons[fg_console].d;
@@ -344,7 +344,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
- if (ch&2) {
+ if (ch & 2) {
shut = 1;
spk_do_flush();
} else {
@@ -364,7 +364,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when a user reads the synth setting.
*/
static ssize_t synth_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+ char *buf)
{
int rv;
@@ -379,7 +379,7 @@ static ssize_t synth_show(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when a user requests to change synthesizers.
*/
static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int len;
char new_synth_name[10];
@@ -392,7 +392,7 @@ static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
len--;
new_synth_name[len] = '\0';
spk_strlwr(new_synth_name);
- if ((synth != NULL) && (!strcmp(new_synth_name, synth->name))) {
+ if (synth && !strcmp(new_synth_name, synth->name)) {
pr_warn("%s already in use\n", new_synth_name);
} else if (synth_init(new_synth_name) != 0) {
pr_warn("failed to init synth %s\n", new_synth_name);
@@ -405,7 +405,8 @@ static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when text is sent to the synth via the synth_direct file.
*/
static ssize_t synth_direct_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
u_char tmp[256];
int len;
@@ -435,7 +436,7 @@ static ssize_t synth_direct_store(struct kobject *kobj,
* This function is called when a user reads the version.
*/
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+ char *buf)
{
char *cp;
@@ -451,7 +452,7 @@ static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
* This is called when a user reads the punctuation settings.
*/
static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+ char *buf)
{
int i;
char *cp = buf;
@@ -471,27 +472,27 @@ static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
var = spk_get_punc_var(p_header->var_id);
if (!var) {
pr_warn("var is null, p_header->var_id is %i\n",
- p_header->var_id);
+ p_header->var_id);
return -EINVAL;
}
spin_lock_irqsave(&speakup_info.spinlock, flags);
- pb = (struct st_bits_data *) &spk_punc_info[var->value];
+ pb = (struct st_bits_data *)&spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
- if (!(spk_chartab[i]&mask))
+ if (!(spk_chartab[i] & mask))
continue;
*cp++ = (char)i;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- return cp-buf;
+ return cp - buf;
}
/*
* This is called when a user changes the punctuation settings.
*/
static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int x;
struct st_var_header *p_header;
@@ -513,7 +514,7 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
var = spk_get_punc_var(p_header->var_id);
if (!var) {
pr_warn("var is null, p_header->var_id is %i\n",
- p_header->var_id);
+ p_header->var_id);
return -EINVAL;
}
@@ -538,7 +539,7 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
* This function is called when a user reads one of the variable parameters.
*/
ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+ char *buf)
{
int rv = 0;
struct st_var_header *param;
@@ -553,7 +554,7 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
spin_lock_irqsave(&speakup_info.spinlock, flags);
- var = (struct var_t *) param->data;
+ var = (struct var_t *)param->data;
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
@@ -575,14 +576,14 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
*cp1++ = '"';
*cp1++ = '\n';
*cp1 = '\0';
- rv = cp1-buf;
+ rv = cp1 - buf;
} else {
rv = sprintf(buf, "\"\"\n");
}
break;
default:
rv = sprintf(buf, "Bad parameter %s, type %i\n",
- param->name, param->var_type);
+ param->name, param->var_type);
break;
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -594,7 +595,7 @@ EXPORT_SYMBOL_GPL(spk_var_show);
* Used to reset either default_pitch or default_vol.
*/
static inline void spk_reset_default_value(char *header_name,
- int *synth_default_value, int idx)
+ int *synth_default_value, int idx)
{
struct st_var_header *param;
@@ -614,7 +615,7 @@ static inline void spk_reset_default_value(char *header_name,
* variable parameters.
*/
ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct st_var_header *param;
int ret;
@@ -663,9 +664,9 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
var_data = param->data;
value = var_data->u.n.value;
spk_reset_default_value("pitch", synth->default_pitch,
- value);
+ value);
spk_reset_default_value("vol", synth->default_vol,
- value);
+ value);
}
break;
case VAR_STRING:
@@ -680,7 +681,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
ret = spk_set_string_var(cp, param, len);
if (ret == -E2BIG)
pr_warn("value too long for %s\n",
- param->name);
+ param->name);
break;
default:
pr_warn("%s unknown type %d\n",
@@ -700,7 +701,7 @@ EXPORT_SYMBOL_GPL(spk_var_store);
*/
static ssize_t message_show_helper(char *buf, enum msg_index_t first,
- enum msg_index_t last)
+ enum msg_index_t last)
{
size_t bufsize = PAGE_SIZE;
char *buf_pointer = buf;
@@ -713,7 +714,7 @@ static ssize_t message_show_helper(char *buf, enum msg_index_t first,
if (bufsize <= 1)
break;
printed = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
- index, spk_msg_get(cursor));
+ index, spk_msg_get(cursor));
buf_pointer += printed;
bufsize -= printed;
}
@@ -722,7 +723,7 @@ static ssize_t message_show_helper(char *buf, enum msg_index_t first,
}
static void report_msg_status(int reset, int received, int used,
- int rejected, char *groupname)
+ int rejected, char *groupname)
{
int len;
char buf[160];
@@ -743,9 +744,9 @@ static void report_msg_status(int reset, int received, int used,
}
static ssize_t message_store_helper(const char *buf, size_t count,
- struct msg_group_t *group)
+ struct msg_group_t *group)
{
- char *cp = (char *) buf;
+ char *cp = (char *)buf;
char *end = cp + count;
char *linefeed = NULL;
char *temp = NULL;
@@ -762,7 +763,6 @@ static ssize_t message_store_helper(const char *buf, size_t count,
enum msg_index_t curmessage;
while (cp < end) {
-
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
@@ -828,13 +828,15 @@ static ssize_t message_store_helper(const char *buf, size_t count,
}
static ssize_t message_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -842,11 +844,13 @@ static ssize_t message_show(struct kobject *kobj,
}
static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
- BUG_ON(!group);
+ if (WARN_ON(!group))
+ return -EINVAL;
+
return message_store_helper(buf, count, group);
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index c2f70ef5b9b3..d2ad596850f3 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(quiet, "Do not announce when the synthesizer is found.");
special_func spk_special_handler;
short spk_pitch_shift, synth_flags;
-static char buf[256];
+static u16 buf[256];
int spk_attrib_bleep, spk_bleeps, spk_bleep_time = 10;
int spk_no_intr, spk_spell_delay;
int spk_key_echo, spk_say_word_ctl;
@@ -108,11 +108,12 @@ enum {
CT_Window,
CT_Max
};
+
#define read_all_mode CT_Max
static struct tty_struct *tty;
-static void spkup_write(const char *in_buf, int count);
+static void spkup_write(const u16 *in_buf, int count);
static char *phonetic[] = {
"alfa", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel",
@@ -238,7 +239,8 @@ static u_short default_chartab[256] = {
struct task_struct *speakup_task;
struct bleep spk_unprocessed_sound;
static int spk_keydown;
-static u_char spk_lastkey, spk_close_press, keymap_flags;
+static u16 spk_lastkey;
+static u_char spk_close_press, keymap_flags;
static u_char last_keycode, this_speakup_key;
static u_long last_spk_jiffy;
@@ -299,7 +301,7 @@ static void speakup_shut_up(struct vc_data *vc)
spk_shut_up |= 0x01;
spk_parked &= 0xfe;
speakup_date(vc);
- if (synth != NULL)
+ if (synth)
spk_do_flush();
}
@@ -404,8 +406,9 @@ static void say_attributes(struct vc_data *vc)
if (bg > 7) {
synth_printf(" %s ", spk_msg_get(MSG_ON_BLINKING));
bg -= 8;
- } else
+ } else {
synth_printf(" %s ", spk_msg_get(MSG_ON));
+ }
synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg));
}
@@ -426,39 +429,38 @@ static void announce_edge(struct vc_data *vc, int msg_id)
spk_msg_get(MSG_EDGE_MSGS_START + msg_id - 1));
}
-static void speak_char(u_char ch)
+static void speak_char(u16 ch)
{
- char *cp = spk_characters[ch];
+ char *cp;
struct var_t *direct = spk_get_var(DIRECT);
- if (direct && direct->u.n.value) {
- if (IS_CHAR(ch, B_CAP)) {
+ if (ch >= 0x100 || (direct && direct->u.n.value)) {
+ if (ch < 0x100 && IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
synth_printf("%s", spk_str_caps_start);
}
- synth_printf("%c", ch);
- if (IS_CHAR(ch, B_CAP))
+ synth_putwc_s(ch);
+ if (ch < 0x100 && IS_CHAR(ch, B_CAP))
synth_printf("%s", spk_str_caps_stop);
return;
}
- if (cp == NULL) {
+
+ cp = spk_characters[ch];
+ if (!cp) {
pr_info("speak_char: cp == NULL!\n");
return;
}
- synth_buffer_add(SPACE);
if (IS_CHAR(ch, B_CAP)) {
spk_pitch_shift++;
- synth_printf("%s", spk_str_caps_start);
- synth_printf("%s", cp);
- synth_printf("%s", spk_str_caps_stop);
+ synth_printf("%s %s %s",
+ spk_str_caps_start, cp, spk_str_caps_stop);
} else {
if (*cp == '^') {
- synth_printf("%s", spk_msg_get(MSG_CTRL));
cp++;
- }
- synth_printf("%s", cp);
+ synth_printf(" %s%s ", spk_msg_get(MSG_CTRL), cp);
+ } else
+ synth_printf(" %s ", cp);
}
- synth_buffer_add(SPACE);
}
static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
@@ -478,7 +480,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
c |= 0x100;
}
- ch = inverse_translate(vc, c, 0);
+ ch = inverse_translate(vc, c, 1);
*attribs = (w & 0xff00) >> 8;
}
return ch;
@@ -486,7 +488,7 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
static void say_char(struct vc_data *vc)
{
- u_short ch;
+ u16 ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
@@ -496,20 +498,20 @@ static void say_char(struct vc_data *vc)
if (spk_attrib_bleep & 2)
say_attributes(vc);
}
- speak_char(ch & 0xff);
+ speak_char(ch);
}
static void say_phonetic_char(struct vc_data *vc)
{
- u_short ch;
+ u16 ch;
spk_old_attr = spk_attr;
ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
- if (isascii(ch) && isalpha(ch)) {
+ if (ch <= 0x7f && isalpha(ch)) {
ch &= 0x1f;
synth_printf("%s\n", phonetic[--ch]);
} else {
- if (IS_CHAR(ch, B_NUM))
+ if (ch < 0x100 && IS_CHAR(ch, B_NUM))
synth_printf("%s ", spk_msg_get(MSG_NUMBER));
speak_char(ch);
}
@@ -551,42 +553,42 @@ static void say_next_char(struct vc_data *vc)
static u_long get_word(struct vc_data *vc)
{
u_long cnt = 0, tmpx = spk_x, tmp_pos = spk_pos;
- char ch;
- u_short attr_ch;
+ u16 ch;
+ u16 attr_ch;
u_char temp;
spk_old_attr = spk_attr;
- ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
+ ch = get_char(vc, (u_short *)tmp_pos, &temp);
/* decided to take out the sayword if on a space (mis-information */
if (spk_say_word_ctl && ch == SPACE) {
*buf = '\0';
synth_printf("%s\n", spk_msg_get(MSG_SPACE));
return 0;
- } else if ((tmpx < vc->vc_cols - 2)
- && (ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) >
- SPACE)) {
+ } else if (tmpx < vc->vc_cols - 2 &&
+ (ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
+ get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) {
tmp_pos += 2;
tmpx++;
} else
while (tmpx > 0) {
- ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp);
- if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *)tmp_pos, &temp) >
- SPACE))
+ ch = get_char(vc, (u_short *)tmp_pos - 1, &temp);
+ if ((ch == SPACE || ch == 0 ||
+ (ch < 0x100 && IS_WDLM(ch))) &&
+ get_char(vc, (u_short *)tmp_pos, &temp) > SPACE)
break;
tmp_pos -= 2;
tmpx--;
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
- buf[cnt++] = attr_ch & 0xff;
+ buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
tmp_pos += 2;
tmpx++;
- ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
- if ((ch == SPACE) || ch == 0
- || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
+ ch = get_char(vc, (u_short *)tmp_pos, &temp);
+ if (ch == SPACE || ch == 0 ||
+ (buf[cnt - 1] < 0x100 && IS_WDLM(buf[cnt - 1]) &&
+ ch > SPACE))
break;
buf[cnt++] = ch;
}
@@ -610,7 +612,7 @@ static void say_word(struct vc_data *vc)
static void say_prev_word(struct vc_data *vc)
{
u_char temp;
- char ch;
+ u16 ch;
u_short edge_said = 0, last_state = 0, state = 0;
spk_parked |= 0x01;
@@ -636,13 +638,14 @@ static void say_prev_word(struct vc_data *vc)
break;
spk_y--;
spk_x = vc->vc_cols - 1;
- } else
+ } else {
spk_x--;
+ }
spk_pos -= 2;
- ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
+ ch = get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
- else if (IS_WDLM(ch))
+ else if (ch < 0x100 && IS_WDLM(ch))
state = 1;
else
state = 2;
@@ -663,7 +666,7 @@ static void say_prev_word(struct vc_data *vc)
static void say_next_word(struct vc_data *vc)
{
u_char temp;
- char ch;
+ u16 ch;
u_short edge_said = 0, last_state = 2, state = 0;
spk_parked |= 0x01;
@@ -672,10 +675,10 @@ static void say_next_word(struct vc_data *vc)
return;
}
while (1) {
- ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
+ ch = get_char(vc, (u_short *)spk_pos, &temp);
if (ch == SPACE || ch == 0)
state = 0;
- else if (IS_WDLM(ch))
+ else if (ch < 0x100 && IS_WDLM(ch))
state = 1;
else
state = 2;
@@ -690,8 +693,9 @@ static void say_next_word(struct vc_data *vc)
spk_y++;
spk_x = 0;
edge_said = edge_right;
- } else
+ } else {
spk_x++;
+ }
spk_pos += 2;
last_state = state;
}
@@ -703,39 +707,47 @@ static void say_next_word(struct vc_data *vc)
static void spell_word(struct vc_data *vc)
{
static char const *delay_str[] = { "", ",", ".", ". .", ". . ." };
- char *cp = buf, *str_cap = spk_str_caps_stop;
- char *cp1, *last_cap = spk_str_caps_stop;
- u_char ch;
+ u16 *cp = buf;
+ char *cp1;
+ char *str_cap = spk_str_caps_stop;
+ char *last_cap = spk_str_caps_stop;
+ struct var_t *direct = spk_get_var(DIRECT);
+ u16 ch;
if (!get_word(vc))
return;
- while ((ch = (u_char)*cp)) {
+ while ((ch = *cp)) {
if (cp != buf)
synth_printf(" %s ", delay_str[spk_spell_delay]);
- if (IS_CHAR(ch, B_CAP)) {
+ /* FIXME: Non-latin1 considered as lower case */
+ if (ch < 0x100 && IS_CHAR(ch, B_CAP)) {
str_cap = spk_str_caps_start;
if (*spk_str_caps_stop)
spk_pitch_shift++;
else /* synth has no pitch */
last_cap = spk_str_caps_stop;
- } else
+ } else {
str_cap = spk_str_caps_stop;
+ }
if (str_cap != last_cap) {
synth_printf("%s", str_cap);
last_cap = str_cap;
}
- if (this_speakup_key == SPELL_PHONETIC
- && (isascii(ch) && isalpha(ch))) {
- ch &= 31;
+ if (ch >= 0x100 || (direct && direct->u.n.value)) {
+ synth_putwc_s(ch);
+ } else if (this_speakup_key == SPELL_PHONETIC &&
+ ch <= 0x7f && isalpha(ch)) {
+ ch &= 0x1f;
cp1 = phonetic[--ch];
+ synth_printf("%s", cp1);
} else {
cp1 = spk_characters[ch];
if (*cp1 == '^') {
synth_printf("%s", spk_msg_get(MSG_CTRL));
cp1++;
}
+ synth_printf("%s", cp1);
}
- synth_printf("%s", cp1);
cp++;
}
if (str_cap != spk_str_caps_stop)
@@ -751,7 +763,7 @@ static int get_line(struct vc_data *vc)
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)spk_pos);
for (i = 0; i < vc->vc_cols; i++) {
- buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2);
+ buf[i] = get_char(vc, (u_short *)tmp, &tmp2);
tmp += 2;
}
for (--i; i >= 0; i--)
@@ -763,7 +775,7 @@ static int get_line(struct vc_data *vc)
static void say_line(struct vc_data *vc)
{
int i = get_line(vc);
- char *cp;
+ u16 *cp;
u_short saved_punc_mask = spk_punc_mask;
if (i == 0) {
@@ -775,7 +787,7 @@ static void say_line(struct vc_data *vc)
cp = buf;
while (*cp == SPACE)
cp++;
- synth_printf("%d, ", (cp - buf) + 1);
+ synth_printf("%zd, ", (cp - buf) + 1);
}
spk_punc_mask = spk_punc_masks[spk_reading_punc];
spkup_write(buf, i);
@@ -816,7 +828,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
spk_old_attr = spk_attr;
spk_attr = get_attributes(vc, (u_short *)from);
while (from < to) {
- buf[i++] = (char)get_char(vc, (u_short *)from, &tmp);
+ buf[i++] = get_char(vc, (u_short *)from, &tmp);
from += 2;
if (i >= vc->vc_size_row)
break;
@@ -852,11 +864,11 @@ static void say_line_from_to(struct vc_data *vc, u_long from, u_long to,
static int currsentence;
static int numsentences[2];
-static char *sentbufend[2];
-static char *sentmarks[2][10];
+static u16 *sentbufend[2];
+static u16 *sentmarks[2][10];
static int currbuf;
static int bn;
-static char sentbuf[2][256];
+static u16 sentbuf[2][256];
static int say_sentence_num(int num, int prev)
{
@@ -892,10 +904,10 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
spk_attr = get_attributes(vc, (u_short *)start);
while (start < end) {
- sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp);
+ sentbuf[bn][i] = get_char(vc, (u_short *)start, &tmp);
if (i > 0) {
- if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
- && numsentences[bn] < 9) {
+ if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.' &&
+ numsentences[bn] < 9) {
/* Sentence Marker */
numsentences[bn]++;
sentmarks[bn][numsentences[bn]] =
@@ -995,7 +1007,7 @@ static void right_edge(struct vc_data *vc)
static void say_first_char(struct vc_data *vc)
{
int i, len = get_line(vc);
- u_char ch;
+ u16 ch;
spk_parked |= 0x01;
if (len == 0) {
@@ -1015,7 +1027,7 @@ static void say_first_char(struct vc_data *vc)
static void say_last_char(struct vc_data *vc)
{
int len = get_line(vc);
- u_char ch;
+ u16 ch;
spk_parked |= 0x01;
if (len == 0) {
@@ -1040,9 +1052,8 @@ static void say_position(struct vc_data *vc)
static void say_char_num(struct vc_data *vc)
{
u_char tmp;
- u_short ch = get_char(vc, (u_short *)spk_pos, &tmp);
+ u16 ch = get_char(vc, (u_short *)spk_pos, &tmp);
- ch &= 0xff;
synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
}
@@ -1070,10 +1081,10 @@ static void say_to_right(struct vc_data *vc)
/* end of stub functions. */
-static void spkup_write(const char *in_buf, int count)
+static void spkup_write(const u16 *in_buf, int count)
{
static int rep_count;
- static u_char ch = '\0', old_ch = '\0';
+ static u16 ch = '\0', old_ch = '\0';
static u_short char_type, last_type;
int in_count = count;
@@ -1085,8 +1096,11 @@ static void spkup_write(const char *in_buf, int count)
(currsentence <= numsentences[bn]))
synth_insert_next_index(currsentence++);
}
- ch = (u_char)*in_buf++;
- char_type = spk_chartab[ch];
+ ch = *in_buf++;
+ if (ch < 0x100)
+ char_type = spk_chartab[ch];
+ else
+ char_type = ALPHA;
if (ch == old_ch && !(char_type & B_NUM)) {
if (++rep_count > 2)
continue;
@@ -1106,10 +1120,10 @@ static void spkup_write(const char *in_buf, int count)
} else if (char_type & B_ALPHA) {
if ((synth_flags & SF_DEC) && (last_type & PUNC))
synth_buffer_add(SPACE);
- synth_printf("%c", ch);
+ synth_putwc_s(ch);
} else if (char_type & B_NUM) {
rep_count = 0;
- synth_printf("%c", ch);
+ synth_putwc_s(ch);
} else if (char_type & spk_punc_mask) {
speak_char(ch);
char_type &= ~PUNC; /* for dec nospell processing */
@@ -1122,7 +1136,7 @@ static void spkup_write(const char *in_buf, int count)
* repeats on you don't get nothing repeated count
*/
if (ch != old_ch)
- synth_printf("%c", ch);
+ synth_putwc_s(ch);
else
rep_count = 0;
} else {
@@ -1140,7 +1154,7 @@ static void spkup_write(const char *in_buf, int count)
if (last_type & CH_RPT) {
synth_printf(" ");
synth_printf(spk_msg_get(MSG_REPEAT_DESC2),
- ++rep_count);
+ ++rep_count);
synth_printf(" ");
}
rep_count = 0;
@@ -1157,7 +1171,7 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
- if (synth == NULL || up_flag || spk_killed)
+ if (!synth || up_flag || spk_killed)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
@@ -1195,7 +1209,7 @@ static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
- if (synth == NULL || spk_killed) {
+ if (!synth || spk_killed) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
@@ -1216,13 +1230,19 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
u_char ch, version, num_keys;
version = *cp++;
- if (version != KEY_MAP_VER)
- return -1;
+ if (version != KEY_MAP_VER) {
+ pr_debug("version found %d should be %d\n",
+ version, KEY_MAP_VER);
+ return -EINVAL;
+ }
num_keys = *cp;
states = (int)cp[1];
key_data_len = (states + 1) * (num_keys + 1);
- if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf))
- return -2;
+ if (key_data_len + SHIFT_TBL_SIZE + 4 >= sizeof(spk_key_buf)) {
+ pr_debug("too many key_infos (%d over %u)\n",
+ key_data_len + SHIFT_TBL_SIZE + 4, (unsigned int)(sizeof(spk_key_buf)));
+ return -EINVAL;
+ }
memset(k_buffer, 0, SHIFT_TBL_SIZE);
memset(spk_our_keys, 0, sizeof(spk_our_keys));
spk_shift_table = k_buffer;
@@ -1233,14 +1253,19 @@ int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
cp1 += 2; /* now pointing at shift states */
for (i = 1; i <= states; i++) {
ch = *cp1++;
- if (ch >= SHIFT_TBL_SIZE)
- return -3;
+ if (ch >= SHIFT_TBL_SIZE) {
+ pr_debug("(%d) not valid shift state (max_allowed = %d)\n", ch,
+ SHIFT_TBL_SIZE);
+ return -EINVAL;
+ }
spk_shift_table[ch] = i;
}
keymap_flags = *cp1++;
while ((ch = *cp1)) {
- if (ch >= MAX_KEY)
- return -4;
+ if (ch >= MAX_KEY) {
+ pr_debug("(%d), not valid key, (max_allowed = %d)\n", ch, MAX_KEY);
+ return -EINVAL;
+ }
spk_our_keys[ch] = cp1;
cp1 += states + 1;
}
@@ -1279,8 +1304,8 @@ void spk_reset_default_chars(void)
/* First, free any non-default */
for (i = 0; i < 256; i++) {
- if ((spk_characters[i] != NULL)
- && (spk_characters[i] != spk_default_chars[i]))
+ if (spk_characters[i] &&
+ (spk_characters[i] != spk_default_chars[i]))
kfree(spk_characters[i]);
}
@@ -1316,19 +1341,20 @@ static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
}
/* Allocation concurrency is protected by the console semaphore */
-static int speakup_allocate(struct vc_data *vc)
+static int speakup_allocate(struct vc_data *vc, gfp_t gfp_flags)
{
int vc_num;
vc_num = vc->vc_num;
- if (speakup_console[vc_num] == NULL) {
+ if (!speakup_console[vc_num]) {
speakup_console[vc_num] = kzalloc(sizeof(*speakup_console[0]),
- GFP_ATOMIC);
- if (speakup_console[vc_num] == NULL)
+ gfp_flags);
+ if (!speakup_console[vc_num])
return -ENOMEM;
speakup_date(vc);
- } else if (!spk_parked)
+ } else if (!spk_parked) {
speakup_date(vc);
+ }
return 0;
}
@@ -1373,7 +1399,7 @@ static void kbd_fakekey2(struct vc_data *vc, int command)
static void read_all_doc(struct vc_data *vc)
{
- if ((vc->vc_num != fg_console) || synth == NULL || spk_shut_up)
+ if ((vc->vc_num != fg_console) || !synth || spk_shut_up)
return;
if (!synth_supports_indexing())
return;
@@ -1381,9 +1407,9 @@ static void read_all_doc(struct vc_data *vc)
prev_cursor_track = cursor_track;
cursor_track = read_all_mode;
spk_reset_index_count(0);
- if (get_sentence_buf(vc, 0) == -1)
+ if (get_sentence_buf(vc, 0) == -1) {
kbd_fakekey2(vc, RA_DOWN_ARROW);
- else {
+ } else {
say_sentence_num(0, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
@@ -1430,8 +1456,9 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
if (!say_sentence_num(sentcount + 1, 1)) {
sn = 1;
spk_reset_index_count(sn);
- } else
+ } else {
synth_insert_next_index(0);
+ }
if (!say_sentence_num(sn, 0)) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
return;
@@ -1460,9 +1487,9 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
rv = get_sentence_buf(vc, 0);
if (rv == -1)
read_all_doc(vc);
- if (rv == 0)
+ if (rv == 0) {
kbd_fakekey2(vc, RA_FIND_NEXT_SENT);
- else {
+ } else {
say_sentence_num(1, 0);
synth_insert_next_index(0);
start_read_all_timer(vc, RA_TIMER);
@@ -1487,7 +1514,7 @@ static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
spk_parked &= 0xfe;
- if (synth == NULL || up_flag || spk_shut_up) {
+ if (!synth || up_flag || spk_shut_up) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
@@ -1509,7 +1536,7 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_parked &= 0xfe;
- if (synth == NULL || up_flag || spk_shut_up || cursor_track == CT_Off) {
+ if (!synth || up_flag || spk_shut_up || cursor_track == CT_Off) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
@@ -1533,7 +1560,7 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
-static void update_color_buffer(struct vc_data *vc, const char *ic, int len)
+static void update_color_buffer(struct vc_data *vc, const u16 *ic, int len)
{
int i, bi, hi;
int vc_num = vc->vc_num;
@@ -1548,7 +1575,7 @@ static void update_color_buffer(struct vc_data *vc, const char *ic, int len)
speakup_console[vc_num]->ht.ry[bi] = vc->vc_y;
}
while ((hi < COLOR_BUFFER_SIZE) && (i < len)) {
- if ((ic[i] > 32) && (ic[i] < 127)) {
+ if (ic[i] > 32) {
speakup_console[vc_num]->ht.highbuf[bi][hi] = ic[i];
hi++;
} else if ((ic[i] == 32) && (hi != 0)) {
@@ -1705,7 +1732,7 @@ static void speakup_bs(struct vc_data *vc)
return;
if (!spk_parked)
speakup_date(vc);
- if (spk_shut_up || synth == NULL) {
+ if (spk_shut_up || !synth) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
@@ -1718,11 +1745,11 @@ static void speakup_bs(struct vc_data *vc)
}
/* called by: vt_notifier_call() */
-static void speakup_con_write(struct vc_data *vc, const char *str, int len)
+static void speakup_con_write(struct vc_data *vc, u16 *str, int len)
{
unsigned long flags;
- if ((vc->vc_num != fg_console) || spk_shut_up || synth == NULL)
+ if ((vc->vc_num != fg_console) || spk_shut_up || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
@@ -1751,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
- if (speakup_console[vc->vc_num] == NULL || spk_parked)
+ if (!speakup_console[vc->vc_num] || spk_parked)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
@@ -1766,7 +1793,7 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
int on_off = 2;
char *label;
- if (synth == NULL || up_flag || spk_killed)
+ if (!synth || up_flag || spk_killed)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_shut_up &= 0xfe;
@@ -1810,7 +1837,7 @@ static int inc_dec_var(u_char value)
var_id = var_id / 2 + FIRST_SET_VAR;
p_header = spk_get_var_header(var_id);
- if (p_header == NULL)
+ if (!p_header)
return -1;
if (p_header->var_type != VAR_NUM)
return -1;
@@ -1893,7 +1920,7 @@ static void speakup_bits(struct vc_data *vc)
{
int val = this_speakup_key - (FIRST_EDIT_BITS - 1);
- if (spk_special_handler != NULL || val < 1 || val > 6) {
+ if (spk_special_handler || val < 1 || val > 6) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
@@ -1908,6 +1935,7 @@ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
static int num;
int maxlen;
char *cp;
+ u16 wch;
if (type == KT_SPKUP && ch == SPEAKUP_GOTO)
goto do_goto;
@@ -1916,18 +1944,20 @@ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
if (type != 0)
goto oops;
if (ch == 8) {
+ u16 wch;
if (num == 0)
return -1;
- ch = goto_buf[--num];
+ wch = goto_buf[--num];
goto_buf[num] = '\0';
- spkup_write(&ch, 1);
+ spkup_write(&wch, 1);
return 1;
}
if (ch < '+' || ch > 'y')
goto oops;
+ wch = ch;
goto_buf[num++] = ch;
goto_buf[num] = '\0';
- spkup_write(&ch, 1);
+ spkup_write(&wch, 1);
maxlen = (*goto_buf >= '0') ? 3 : 4;
if ((ch == '+' || ch == '-') && num == 1)
return 1;
@@ -1984,7 +2014,7 @@ do_goto:
static void speakup_goto(struct vc_data *vc)
{
- if (spk_special_handler != NULL) {
+ if (spk_special_handler) {
synth_printf("%s\n", spk_msg_get(MSG_ERROR));
return;
}
@@ -2072,8 +2102,8 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
tty = vc->port.tty;
if (type >= 0xf0)
type -= 0xf0;
- if (type == KT_PAD
- && (vt_get_leds(fg_console, VC_NUMLOCK))) {
+ if (type == KT_PAD &&
+ (vt_get_leds(fg_console, VC_NUMLOCK))) {
if (up_flag) {
spk_keydown = 0;
goto out;
@@ -2135,7 +2165,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
}
}
no_map:
- if (type == KT_SPKUP && spk_special_handler == NULL) {
+ if (type == KT_SPKUP && !spk_special_handler) {
do_spkup(vc, new_key);
spk_close_press = 0;
ret = 1;
@@ -2144,10 +2174,10 @@ no_map:
if (up_flag || spk_killed || type == KT_SHIFT)
goto out;
spk_shut_up &= 0xfe;
- kh = (value == KVAL(K_DOWN))
- || (value == KVAL(K_UP))
- || (value == KVAL(K_LEFT))
- || (value == KVAL(K_RIGHT));
+ kh = (value == KVAL(K_DOWN)) ||
+ (value == KVAL(K_UP)) ||
+ (value == KVAL(K_LEFT)) ||
+ (value == KVAL(K_RIGHT));
if ((cursor_track != read_all_mode) || !kh)
if (!spk_no_intr)
spk_do_flush();
@@ -2155,10 +2185,11 @@ no_map:
if (type == KT_SPEC && value == 1) {
value = '\n';
type = KT_LATIN;
- } else if (type == KT_LETTER)
+ } else if (type == KT_LETTER) {
type = KT_LATIN;
- else if (value == 0x7f)
+ } else if (value == 0x7f) {
value = 8; /* make del = backspace */
+ }
ret = (*spk_special_handler) (vc, type, value, keycode);
spk_close_press = 0;
if (ret < 0)
@@ -2246,17 +2277,16 @@ static int vt_notifier_call(struct notifier_block *nb,
switch (code) {
case VT_ALLOCATE:
if (vc->vc_mode == KD_TEXT)
- speakup_allocate(vc);
+ speakup_allocate(vc, GFP_ATOMIC);
break;
case VT_DEALLOCATE:
speakup_deallocate(vc);
break;
case VT_WRITE:
- if (param->c == '\b')
+ if (param->c == '\b') {
speakup_bs(vc);
- else if (param->c < 0x100) {
- char d = param->c;
-
+ } else {
+ u16 d = param->c;
speakup_con_write(vc, &d, 1);
}
break;
@@ -2306,7 +2336,6 @@ static int __init speakup_init(void)
{
int i;
long err = 0;
- struct st_spk_t *first_console;
struct vc_data *vc = vc_cons[fg_console].d;
struct var_t *var;
@@ -2331,18 +2360,9 @@ static int __init speakup_init(void)
if (err)
goto error_virtkeyboard;
- first_console = kzalloc(sizeof(*first_console), GFP_KERNEL);
- if (!first_console) {
- err = -ENOMEM;
- goto error_alloc;
- }
-
- speakup_console[vc->vc_num] = first_console;
- speakup_date(vc);
-
for (i = 0; i < MAX_NR_CONSOLES; i++)
if (vc_cons[i].d) {
- err = speakup_allocate(vc_cons[i].d);
+ err = speakup_allocate(vc_cons[i].d, GFP_KERNEL);
if (err)
goto error_kobjects;
}
@@ -2401,7 +2421,6 @@ error_kobjects:
for (i = 0; i < MAX_NR_CONSOLES; i++)
kfree(speakup_console[i]);
-error_alloc:
speakup_remove_virtual_keyboard();
error_virtkeyboard:
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index aeb2b865615a..08f68fc2864e 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -75,7 +75,7 @@ int speakup_set_selection(struct tty_struct *tty)
speakup_clear_selection();
spk_sel_cons = vc_cons[fg_console].d;
dev_warn(tty->dev,
- "Selection: mark console not the same as cut\n");
+ "Selection: mark console not the same as cut\n");
return -EINVAL;
}
@@ -99,7 +99,7 @@ int speakup_set_selection(struct tty_struct *tty)
sel_start = new_sel_start;
sel_end = new_sel_end;
/* Allocate a new buffer before freeing the old one ... */
- bp = kmalloc((sel_end-sel_start)/2+1, GFP_ATOMIC);
+ bp = kmalloc((sel_end - sel_start) / 2 + 1, GFP_ATOMIC);
if (!bp) {
speakup_clear_selection();
return -ENOMEM;
@@ -175,7 +175,7 @@ static struct speakup_paste_work speakup_paste_work = {
int speakup_paste_selection(struct tty_struct *tty)
{
- if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL)
+ if (cmpxchg(&speakup_paste_work.tty, NULL, tty))
return -EBUSY;
tty_kref_get(tty);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index ef89dc1c21c8..ba060d0ceca2 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -21,9 +21,21 @@ static void start_serial_interrupt(int irq);
static const struct old_serial_port rs_table[] = {
SERIAL_PORT_DFNS
};
+
static const struct old_serial_port *serstate;
static int timeouts;
+static int spk_serial_out(struct spk_synth *in_synth, const char ch);
+static void spk_serial_send_xchar(char ch);
+static void spk_serial_tiocmset(unsigned int set, unsigned int clear);
+
+struct spk_io_ops spk_serial_io_ops = {
+ .synth_out = spk_serial_out,
+ .send_xchar = spk_serial_send_xchar,
+ .tiocmset = spk_serial_tiocmset,
+};
+EXPORT_SYMBOL_GPL(spk_serial_io_ops);
+
const struct old_serial_port *spk_serial_init(int index)
{
int baud = 9600, quot = 0;
@@ -97,8 +109,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
-
- c = inb_p(speakup_info.port_tts+UART_RX);
+ c = inb_p(speakup_info.port_tts + UART_RX);
synth->read_buff_add((u_char)c);
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -119,17 +130,64 @@ static void start_serial_interrupt(int irq)
pr_err("Unable to request Speakup serial I R Q\n");
/* Set MCR */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2,
- speakup_info.port_tts + UART_MCR);
+ speakup_info.port_tts + UART_MCR);
/* Turn on Interrupts */
outb(UART_IER_MSI|UART_IER_RLSI|UART_IER_RDI,
speakup_info.port_tts + UART_IER);
- inb(speakup_info.port_tts+UART_LSR);
- inb(speakup_info.port_tts+UART_RX);
- inb(speakup_info.port_tts+UART_IIR);
- inb(speakup_info.port_tts+UART_MSR);
+ inb(speakup_info.port_tts + UART_LSR);
+ inb(speakup_info.port_tts + UART_RX);
+ inb(speakup_info.port_tts + UART_IIR);
+ inb(speakup_info.port_tts + UART_MSR);
outb(1, speakup_info.port_tts + UART_FCR); /* Turn FIFO On */
}
+static void spk_serial_send_xchar(char ch)
+{
+ int timeout = SPK_XMITR_TIMEOUT;
+
+ while (spk_serial_tx_busy()) {
+ if (!--timeout)
+ break;
+ udelay(1);
+ }
+ outb(ch, speakup_info.port_tts);
+}
+
+static void spk_serial_tiocmset(unsigned int set, unsigned int clear)
+{
+ int old = inb(speakup_info.port_tts + UART_MCR);
+ outb((old & ~clear) | set, speakup_info.port_tts + UART_MCR);
+}
+
+int spk_serial_synth_probe(struct spk_synth *synth)
+{
+ const struct old_serial_port *ser;
+ int failed = 0;
+
+ if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) {
+ ser = spk_serial_init(synth->ser);
+ if (!ser) {
+ failed = -1;
+ } else {
+ outb_p(0, ser->port);
+ mdelay(1);
+ outb_p('\r', ser->port);
+ }
+ } else {
+ failed = -1;
+ pr_warn("ttyS%i is an invalid port\n", synth->ser);
+ }
+ if (failed) {
+ pr_info("%s: not found\n", synth->long_name);
+ return -ENODEV;
+ }
+ pr_info("%s: ttyS%i, Driver Version %s\n",
+ synth->long_name, synth->ser, synth->version);
+ synth->alive = 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
+
void spk_stop_serial_interrupt(void)
{
if (speakup_info.port_tts == 0)
@@ -139,19 +197,20 @@ void spk_stop_serial_interrupt(void)
return;
/* Turn off interrupts */
- outb(0, speakup_info.port_tts+UART_IER);
+ outb(0, speakup_info.port_tts + UART_IER);
/* Free IRQ */
free_irq(serstate->irq, (void *)synth_readbuf_handler);
}
+EXPORT_SYMBOL_GPL(spk_stop_serial_interrupt);
-int spk_wait_for_xmitr(void)
+int spk_wait_for_xmitr(struct spk_synth *in_synth)
{
int tmout = SPK_XMITR_TIMEOUT;
- if ((synth->alive) && (timeouts >= NUM_DISABLE_TIMEOUTS)) {
+ if ((in_synth->alive) && (timeouts >= NUM_DISABLE_TIMEOUTS)) {
pr_warn("%s: too many timeouts, deactivating speakup\n",
- synth->long_name);
- synth->alive = 0;
+ in_synth->long_name);
+ in_synth->alive = 0;
/* No synth any more, so nobody will restart TTYs, and we thus
* need to do it ourselves. Now that there is no synth we can
* let application flood anyway
@@ -162,7 +221,7 @@ int spk_wait_for_xmitr(void)
}
while (spk_serial_tx_busy()) {
if (--tmout == 0) {
- pr_warn("%s: timed out (tx busy)\n", synth->long_name);
+ pr_warn("%s: timed out (tx busy)\n", in_synth->long_name);
timeouts++;
return 0;
}
@@ -207,18 +266,35 @@ unsigned char spk_serial_in_nowait(void)
}
EXPORT_SYMBOL_GPL(spk_serial_in_nowait);
-int spk_serial_out(const char ch)
+static int spk_serial_out(struct spk_synth *in_synth, const char ch)
{
- if (synth->alive && spk_wait_for_xmitr()) {
+ if (in_synth->alive && spk_wait_for_xmitr(in_synth)) {
outb_p(ch, speakup_info.port_tts);
return 1;
}
return 0;
}
-EXPORT_SYMBOL_GPL(spk_serial_out);
+
+const char *spk_serial_synth_immediate(struct spk_synth *synth, const char *buff)
+{
+ u_char ch;
+
+ while ((ch = *buff)) {
+ if (ch == '\n')
+ ch = synth->procspeech;
+ if (spk_wait_for_xmitr(synth))
+ outb(ch, speakup_info.port_tts);
+ else
+ return buff;
+ buff++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spk_serial_synth_immediate);
void spk_serial_release(void)
{
+ spk_stop_serial_interrupt();
if (speakup_info.port_tts == 0)
return;
synth_release_region(speakup_info.port_tts, 8);
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index b203f0f883a9..a654334c98b9 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -20,7 +20,7 @@
#define A_CAP 0x0007
#define B_NUM 0x0008
#define NUM 0x0009
-#define ALPHANUM (B_ALPHA|B_NUM)
+#define ALPHANUM (B_ALPHA | B_NUM)
#define SOME 0x0010
#define MOST 0x0020
#define PUNC 0x0040
@@ -30,13 +30,14 @@
#define B_EXNUM 0x0100
#define CH_RPT 0x0200
#define B_CTL 0x0400
-#define A_CTL (B_CTL+SYNTH_OK)
+#define A_CTL (B_CTL + SYNTH_OK)
#define B_SYM 0x0800
-#define B_CAPSYM (B_CAP|B_SYM)
+#define B_CAPSYM (B_CAP | B_SYM)
-#define IS_WDLM(x) (spk_chartab[((u_char)x)]&B_WDLM)
-#define IS_CHAR(x, type) (spk_chartab[((u_char)x)]&type)
-#define IS_TYPE(x, type) ((spk_chartab[((u_char)x)]&type) == type)
+/* FIXME: u16 */
+#define IS_WDLM(x) (spk_chartab[((u_char)x)] & B_WDLM)
+#define IS_CHAR(x, type) (spk_chartab[((u_char)x)] & type)
+#define IS_TYPE(x, type) ((spk_chartab[((u_char)x)] & type) == type)
int speakup_thread(void *data);
void spk_reset_default_chars(void);
@@ -66,7 +67,7 @@ void synth_release(void);
void spk_do_flush(void);
void speakup_start_ttys(void);
-void synth_buffer_add(char ch);
+void synth_buffer_add(u16 ch);
void synth_buffer_clear(void);
void speakup_clear_selection(void);
int speakup_set_selection(struct tty_struct *tty);
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index c7fab261d860..ad72f8e883fc 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -113,6 +113,7 @@ static struct spk_synth synth_acntpc = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = accent_release,
.synth_immediate = synth_immediate,
@@ -196,6 +197,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -233,7 +235,7 @@ static void do_catch_up(struct spk_synth *synth)
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
- jiff_max = jiffies+jiffy_delta_val;
+ jiff_max = jiffies + jiffy_delta_val;
}
}
timeout = SPK_XMITR_TIMEOUT;
@@ -259,18 +261,18 @@ static int synth_probe(struct spk_synth *synth)
if (port_forced) {
speakup_info.port_tts = port_forced;
pr_info("probe forced to %x by kernel command line\n",
- speakup_info.port_tts);
- if (synth_request_region(speakup_info.port_tts-1,
- SYNTH_IO_EXTENT)) {
+ speakup_info.port_tts);
+ if (synth_request_region(speakup_info.port_tts - 1,
+ SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
- port_val = inw(speakup_info.port_tts-1);
- synth_port_control = speakup_info.port_tts-1;
+ port_val = inw(speakup_info.port_tts - 1);
+ synth_port_control = speakup_info.port_tts - 1;
} else {
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i],
- SYNTH_IO_EXTENT)) {
+ SYNTH_IO_EXTENT)) {
pr_warn
("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
@@ -280,7 +282,7 @@ static int synth_probe(struct spk_synth *synth)
if (port_val == 0x53fc) {
/* 'S' and out&input bits */
synth_port_control = synth_portlist[i];
- speakup_info.port_tts = synth_port_control+1;
+ speakup_info.port_tts = synth_port_control + 1;
break;
}
}
@@ -294,7 +296,7 @@ static int synth_probe(struct spk_synth *synth)
return -ENODEV;
}
pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name,
- synth_port_control, synth_port_control+SYNTH_IO_EXTENT-1,
+ synth_port_control, synth_port_control + SYNTH_IO_EXTENT - 1,
synth->version);
synth->alive = 1;
return 0;
@@ -302,6 +304,7 @@ static int synth_probe(struct spk_synth *synth)
static void accent_release(void)
{
+ spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index b2e352712766..de67ffda7d45 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -99,9 +99,10 @@ static struct spk_synth synth_acntsa = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -126,7 +127,7 @@ static int synth_probe(struct spk_synth *synth)
failed = spk_serial_synth_probe(synth);
if (failed == 0) {
- spk_synth_immediate(synth, "\033=R\r");
+ synth->synth_immediate(synth, "\033=R\r");
mdelay(100);
}
synth->alive = !failed;
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 3f43f8105bc0..cead8b1b1bfc 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -108,9 +108,10 @@ static struct spk_synth synth_apollo = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -160,6 +161,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -168,10 +170,9 @@ static void do_catch_up(struct spk_synth *synth)
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (!spk_serial_out(ch)) {
- outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR);
- outb(UART_MCR_DTR | UART_MCR_RTS,
- speakup_info.port_tts + UART_MCR);
+ if (!synth->io_ops->synth_out(synth, ch)) {
+ synth->io_ops->tiocmset(0, UART_MCR_RTS);
+ synth->io_ops->tiocmset(UART_MCR_RTS, 0);
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
@@ -181,7 +182,7 @@ static void do_catch_up(struct spk_synth *synth)
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (spk_serial_out(synth->procspeech))
+ if (synth->io_ops->synth_out(synth, synth->procspeech))
schedule_timeout(msecs_to_jiffies
(delay_time_val));
else
@@ -194,7 +195,7 @@ static void do_catch_up(struct spk_synth *synth)
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
}
module_param_named(ser, synth_apollo.ser, int, 0444);
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index e696b87bf515..6880352a7b74 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -104,9 +104,10 @@ static struct spk_synth synth_audptr = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -127,15 +128,8 @@ static struct spk_synth synth_audptr = {
static void synth_flush(struct spk_synth *synth)
{
- int timeout = SPK_XMITR_TIMEOUT;
-
- while (spk_serial_tx_busy()) {
- if (!--timeout)
- break;
- udelay(1);
- }
- outb(SYNTH_CLEAR, speakup_info.port_tts);
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->send_xchar(SYNTH_CLEAR);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_version(struct spk_synth *synth)
@@ -143,7 +137,7 @@ static void synth_version(struct spk_synth *synth)
unsigned char test = 0;
char synth_id[40] = "";
- spk_synth_immediate(synth, "\x05[Q]");
+ synth->synth_immediate(synth, "\x05[Q]");
synth_id[test] = spk_serial_in();
if (synth_id[test] == 'A') {
do {
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index da158c968e7c..a972a5147c6b 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -96,9 +96,10 @@ static struct spk_synth synth_bns = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 6b74a97385da..c564bf8e1531 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -127,9 +127,10 @@ static struct spk_synth synth_decext = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -175,6 +176,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -185,7 +187,7 @@ static void do_catch_up(struct spk_synth *synth)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
- if (synth_full() || !spk_serial_out(ch)) {
+ if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
@@ -193,22 +195,22 @@ static void do_catch_up(struct spk_synth *synth)
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (ch == '[')
+ if (ch == '[') {
in_escape = 1;
- else if (ch == ']')
+ } else if (ch == ']') {
in_escape = 0;
- else if (ch <= SPACE) {
+ } else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
- flags);
+ flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock,
- flags);
+ flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
@@ -217,13 +219,13 @@ static void do_catch_up(struct spk_synth *synth)
last = ch;
}
if (!in_escape)
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
in_escape = 0;
- spk_synth_immediate(synth, "\033P;10z\033\\");
+ synth->synth_immediate(synth, "\033P;10z\033\\");
}
module_param_named(ser, synth_decext.ser, int, 0444);
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 6bf38e49a96d..5d22c3b7edd4 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -33,15 +33,15 @@
#include "spk_priv.h"
#include "speakup.h"
-#define MODULE_init 0x0dec /* module in boot code */
-#define MODULE_self_test 0x8800 /* module in self-test */
-#define MODULE_reset 0xffff /* reinit the whole module */
+#define MODULE_init 0x0dec /* module in boot code */
+#define MODULE_self_test 0x8800 /* module in self-test */
+#define MODULE_reset 0xffff /* reinit the whole module */
-#define MODE_mask 0xf000 /* mode bits in high nibble */
+#define MODE_mask 0xf000 /* mode bits in high nibble */
#define MODE_null 0x0000
-#define MODE_test 0x2000 /* in testing mode */
+#define MODE_test 0x2000 /* in testing mode */
#define MODE_status 0x8000
-#define STAT_int 0x0001 /* running in interrupt mode */
+#define STAT_int 0x0001 /* running in interrupt mode */
#define STAT_tr_char 0x0002 /* character data to transmit */
#define STAT_rr_char 0x0004 /* ready to receive char data */
#define STAT_cmd_ready 0x0008 /* ready to accept commands */
@@ -61,33 +61,31 @@
#define CMD_mask 0xf000 /* mask for command nibble */
#define CMD_null 0x0000 /* post status */
#define CMD_control 0x1000 /* hard control command */
-#define CTRL_mask 0x0F00 /* mask off control nibble */
-#define CTRL_data 0x00FF /* mask to get data byte */
-#define CTRL_null 0x0000 /* null control */
-#define CTRL_vol_up 0x0100 /* increase volume */
-#define CTRL_vol_down 0x0200 /* decrease volume */
-#define CTRL_vol_set 0x0300 /* set volume */
-#define CTRL_pause 0x0400 /* pause spc */
-#define CTRL_resume 0x0500 /* resume spc clock */
-#define CTRL_resume_spc 0x0001 /* resume spc soft pause */
-#define CTRL_flush 0x0600 /* flush all buffers */
-#define CTRL_int_enable 0x0700 /* enable status change ints */
-#define CTRL_buff_free 0x0800 /* buffer remain count */
-#define CTRL_buff_used 0x0900 /* buffer in use */
-#define CTRL_speech 0x0a00 /* immediate speech change */
-#define CTRL_SP_voice 0x0001 /* voice change */
-#define CTRL_SP_rate 0x0002 /* rate change */
-#define CTRL_SP_comma 0x0003 /* comma pause change */
-#define CTRL_SP_period 0x0004 /* period pause change */
-#define CTRL_SP_rate_delta 0x0005 /* delta rate change */
-#define CTRL_SP_get_param 0x0006 /* return the desired parameter */
-#define CTRL_last_index 0x0b00 /* get last index spoken */
-#define CTRL_io_priority 0x0c00 /* change i/o priority */
-#define CTRL_free_mem 0x0d00 /* get free paragraphs on module */
-#define CTRL_get_lang 0x0e00 /* return bit mask of loaded
- * languages
- */
-#define CMD_test 0x2000 /* self-test request */
+#define CTRL_mask 0x0F00 /* mask off control nibble */
+#define CTRL_data 0x00FF /* mask to get data byte */
+#define CTRL_null 0x0000 /* null control */
+#define CTRL_vol_up 0x0100 /* increase volume */
+#define CTRL_vol_down 0x0200 /* decrease volume */
+#define CTRL_vol_set 0x0300 /* set volume */
+#define CTRL_pause 0x0400 /* pause spc */
+#define CTRL_resume 0x0500 /* resume spc clock */
+#define CTRL_resume_spc 0x0001 /* resume spc soft pause */
+#define CTRL_flush 0x0600 /* flush all buffers */
+#define CTRL_int_enable 0x0700 /* enable status change ints */
+#define CTRL_buff_free 0x0800 /* buffer remain count */
+#define CTRL_buff_used 0x0900 /* buffer in use */
+#define CTRL_speech 0x0a00 /* immediate speech change */
+#define CTRL_SP_voice 0x0001 /* voice change */
+#define CTRL_SP_rate 0x0002 /* rate change */
+#define CTRL_SP_comma 0x0003 /* comma pause change */
+#define CTRL_SP_period 0x0004 /* period pause change */
+#define CTRL_SP_rate_delta 0x0005 /* delta rate change */
+#define CTRL_SP_get_param 0x0006 /* return the desired parameter */
+#define CTRL_last_index 0x0b00 /* get last index spoken */
+#define CTRL_io_priority 0x0c00 /* change i/o priority */
+#define CTRL_free_mem 0x0d00 /* get free paragraphs on module */
+#define CTRL_get_lang 0x0e00 /* return bit mask of loaded languages */
+#define CMD_test 0x2000 /* self-test request */
#define TEST_mask 0x0F00 /* isolate test field */
#define TEST_null 0x0000 /* no test requested */
#define TEST_isa_int 0x0100 /* assert isa irq */
@@ -101,19 +99,19 @@
#define ID_null 0x0000 /* null id */
#define ID_kernel 0x0100 /* kernel code executing */
#define ID_boot 0x0200 /* boot code executing */
-#define CMD_dma 0x4000 /* force a dma start */
-#define CMD_reset 0x5000 /* reset module status */
-#define CMD_sync 0x6000 /* kernel sync command */
-#define CMD_char_in 0x7000 /* single character send */
-#define CMD_char_out 0x8000 /* single character get */
-#define CHAR_count_1 0x0100 /* one char in cmd_low */
-#define CHAR_count_2 0x0200 /* the second in data_low */
-#define CHAR_count_3 0x0300 /* the third in data_high */
-#define CMD_spc_mode 0x9000 /* change spc mode */
-#define CMD_spc_to_text 0x0100 /* set to text mode */
-#define CMD_spc_to_digit 0x0200 /* set to digital mode */
-#define CMD_spc_rate 0x0400 /* change spc data rate */
-#define CMD_error 0xf000 /* severe error */
+#define CMD_dma 0x4000 /* force a dma start */
+#define CMD_reset 0x5000 /* reset module status */
+#define CMD_sync 0x6000 /* kernel sync command */
+#define CMD_char_in 0x7000 /* single character send */
+#define CMD_char_out 0x8000 /* single character get */
+#define CHAR_count_1 0x0100 /* one char in cmd_low */
+#define CHAR_count_2 0x0200 /* the second in data_low */
+#define CHAR_count_3 0x0300 /* the third in data_high */
+#define CMD_spc_mode 0x9000 /* change spc mode */
+#define CMD_spc_to_text 0x0100 /* set to text mode */
+#define CMD_spc_to_digit 0x0200 /* set to digital mode */
+#define CMD_spc_rate 0x0400 /* change spc data rate */
+#define CMD_error 0xf000 /* severe error */
enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC };
@@ -220,6 +218,7 @@ static struct spk_synth synth_dec_pc = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = dtpc_release,
.synth_immediate = synth_immediate,
@@ -251,7 +250,7 @@ static int dt_getstatus(void)
static void dt_sendcmd(u_int cmd)
{
outb_p(cmd & 0xFF, speakup_info.port_tts);
- outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts+1);
+ outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts + 1);
}
static int dt_waitbit(int bit)
@@ -287,11 +286,11 @@ static int dt_ctrl(u_int cmd)
if (!dt_waitbit(STAT_cmd_ready))
return -1;
- outb_p(0, speakup_info.port_tts+2);
- outb_p(0, speakup_info.port_tts+3);
+ outb_p(0, speakup_info.port_tts + 2);
+ outb_p(0, speakup_info.port_tts + 3);
dt_getstatus();
- dt_sendcmd(CMD_control|cmd);
- outb_p(0, speakup_info.port_tts+6);
+ dt_sendcmd(CMD_control | cmd);
+ outb_p(0, speakup_info.port_tts + 6);
while (dt_getstatus() & STAT_cmd_ready) {
udelay(20);
if (--timeout == 0)
@@ -319,8 +318,8 @@ udelay(50);
break;
udelay(50);
}
- outb_p(DMA_sync, speakup_info.port_tts+4);
- outb_p(0, speakup_info.port_tts+4);
+ outb_p(DMA_sync, speakup_info.port_tts + 4);
+ outb_p(0, speakup_info.port_tts + 4);
udelay(100);
for (timeout = 0; timeout < 10; timeout++) {
if (!(dt_getstatus() & STAT_flushing))
@@ -338,8 +337,8 @@ static int dt_sendchar(char ch)
return -1;
if (!(dt_stat & STAT_rr_char))
return -2;
- outb_p(DMA_single_in, speakup_info.port_tts+4);
- outb_p(ch, speakup_info.port_tts+4);
+ outb_p(DMA_single_in, speakup_info.port_tts + 4);
+ outb_p(ch, speakup_info.port_tts + 4);
dma_state ^= STAT_dma_state;
return 0;
}
@@ -355,7 +354,7 @@ static int testkernel(void)
dt_sendcmd(CMD_sync);
if (!dt_waitbit(STAT_cmd_ready))
status = -2;
- else if (dt_stat&0x8000)
+ else if (dt_stat & 0x8000)
return 0;
else if (dt_stat == 0x0dec)
pr_warn("dec_pc at 0x%x, software not loaded\n",
@@ -392,6 +391,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -410,11 +410,11 @@ static void do_catch_up(struct spk_synth *synth)
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (ch == '[')
+ if (ch == '[') {
in_escape = 1;
- else if (ch == ']')
+ } else if (ch == ']') {
in_escape = 0;
- else if (ch <= SPACE) {
+ } else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
dt_sendchar(PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
@@ -481,6 +481,7 @@ static int synth_probe(struct spk_synth *synth)
static void dtpc_release(void)
{
+ spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 26036050cdb2..0cdbd5e9b36b 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -130,9 +130,10 @@ static struct spk_synth synth_dectlk = {
.vars = vars,
.default_pitch = ap_defaults,
.default_vol = g5_defaults,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -239,6 +240,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -250,7 +252,7 @@ static void do_catch_up(struct spk_synth *synth)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
- if (synth_full_val || !spk_serial_out(ch)) {
+ if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
@@ -258,16 +260,16 @@ static void do_catch_up(struct spk_synth *synth)
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (ch == '[')
+ if (ch == '[') {
in_escape = 1;
- else if (ch == ']')
+ } else if (ch == ']') {
in_escape = 0;
- else if (ch <= SPACE) {
+ } else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
if (time_after_eq(jiffies, jiff_max)) {
if (!in_escape)
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock,
flags);
jiffy_delta_val = jiffy_delta->u.n.value;
@@ -282,17 +284,17 @@ static void do_catch_up(struct spk_synth *synth)
last = ch;
}
if (!in_escape)
- spk_serial_out(PROCSPEECH);
+ synth->io_ops->synth_out(synth, PROCSPEECH);
}
static void synth_flush(struct spk_synth *synth)
{
if (in_escape)
/* if in command output ']' so we don't get an error */
- spk_serial_out(']');
+ synth->io_ops->synth_out(synth, ']');
in_escape = 0;
is_flushing = 1;
- spk_serial_out(SYNTH_CLEAR);
+ synth->io_ops->synth_out(synth, SYNTH_CLEAR);
}
module_param_named(ser, synth_dectlk.ser, int, 0444);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index e2bf20806d8d..5973acc0a006 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -43,6 +43,7 @@ static int port_forced;
static unsigned int synth_portlist[] = {
0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0
};
+
static u_char synth_status;
static struct var_t vars[] = {
@@ -128,6 +129,7 @@ static struct spk_synth synth_dtlk = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = dtlk_release,
.synth_immediate = synth_immediate,
@@ -209,6 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -297,7 +300,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth)
t += 2;
for (i = 0; *t != '\r'; t++) {
status.rom_version[i] = *t;
- if (i < sizeof(status.rom_version)-1)
+ if (i < sizeof(status.rom_version) - 1)
i++;
}
status.rom_version[i] = 0;
@@ -373,6 +376,7 @@ static int synth_probe(struct spk_synth *synth)
static void dtlk_release(void)
{
+ spk_stop_serial_interrupt();
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts-1, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
diff --git a/drivers/staging/speakup/speakup_dtlk.h b/drivers/staging/speakup/speakup_dtlk.h
index b3b3cfc3db07..46d885fcfb20 100644
--- a/drivers/staging/speakup/speakup_dtlk.h
+++ b/drivers/staging/speakup/speakup_dtlk.h
@@ -24,11 +24,11 @@
* usec later.
*/
#define TTS_ALMOST_FULL 0x08 /* mask for AF bit: When set to 1,
- * indicates that less than 300 bytes
- * are available in the TTS input
- * buffer. AF is always 0 in the PCM,
- * TGN and CVSD modes.
- */
+ * indicates that less than 300 bytes
+ * are available in the TTS input
+ * buffer. AF is always 0 in the PCM,
+ * TGN and CVSD modes.
+ */
#define TTS_ALMOST_EMPTY 0x04 /* mask for AE bit: When set to 1,
* indicates that less than 300 bytes
* are remaining in DoubleTalk's input
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index cb7cef30c124..8db7aa358f31 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -98,9 +98,10 @@ static struct spk_synth synth_dummy = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 10f4964782e2..ba7901178e0b 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -105,6 +105,7 @@ static struct spk_synth synth_keypc = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = keynote_release,
.synth_immediate = synth_immediate,
@@ -141,9 +142,9 @@ static char *oops(void)
int s1, s2, s3, s4;
s1 = inb_p(synth_port);
- s2 = inb_p(synth_port+1);
- s3 = inb_p(synth_port+2);
- s4 = inb_p(synth_port+3);
+ s2 = inb_p(synth_port + 1);
+ s3 = inb_p(synth_port + 2);
+ s4 = inb_p(synth_port + 3);
pr_warn("synth timeout %d %d %d %d\n", s1, s2, s3, s4);
return NULL;
}
@@ -198,6 +199,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -304,6 +306,7 @@ static int synth_probe(struct spk_synth *synth)
static void keynote_release(void)
{
+ spk_stop_serial_interrupt();
if (synth_port)
synth_release_region(synth_port, SYNTH_IO_EXTENT);
synth_port = 0;
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 9d22198a0339..11275f49bea4 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -111,9 +111,10 @@ static struct spk_synth synth_ltlk = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -138,13 +139,13 @@ static void synth_interrogate(struct spk_synth *synth)
unsigned char *t, i;
unsigned char buf[50], rom_v[20];
- spk_synth_immediate(synth, "\x18\x01?");
+ synth->synth_immediate(synth, "\x18\x01?");
for (i = 0; i < 50; i++) {
buf[i] = spk_serial_in();
if (i > 2 && buf[i] == 0x7f)
break;
}
- t = buf+2;
+ t = buf + 2;
for (i = 0; *t != '\r'; t++) {
rom_v[i] = *t;
if (++i >= 19)
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index d2ff0afd685a..e454f5685f70 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -29,6 +29,7 @@
#define DRV_VERSION "2.6"
#define SOFTSYNTH_MINOR 26 /* might as well give it one more than /dev/synth */
+#define SOFTSYNTHU_MINOR 27 /* might as well give it one more than /dev/synth */
#define PROCSPEECH 0x0d
#define CLEAR_SYNTH 0x18
@@ -37,7 +38,7 @@ static void softsynth_release(void);
static int softsynth_is_alive(struct spk_synth *synth);
static unsigned char get_index(void);
-static struct miscdevice synth_device;
+static struct miscdevice synth_device, synthu_device;
static int init_pos;
static int misc_registered;
@@ -130,6 +131,7 @@ static struct spk_synth synth_soft = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = NULL,
.probe = softsynth_probe,
.release = softsynth_release,
.synth_immediate = NULL,
@@ -199,13 +201,13 @@ static int softsynth_close(struct inode *inode, struct file *fp)
return 0;
}
-static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
- loff_t *pos)
+static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
+ loff_t *pos, int unicode)
{
int chars_sent = 0;
char __user *cp;
char *init;
- char ch;
+ u16 ch;
int empty;
unsigned long flags;
DEFINE_WAIT(wait);
@@ -213,6 +215,8 @@ static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
+ if (!unicode)
+ synth_buffer_skip_nonlatin1();
if (!synth_buffer_empty() || speakup_info.flushing)
break;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
@@ -231,23 +235,57 @@ static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
cp = buf;
init = get_initstring();
- while (chars_sent < count) {
+
+ /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
+ while (chars_sent <= count - 3) {
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
- } else if (synth_buffer_empty()) {
- break;
} else if (init[init_pos]) {
ch = init[init_pos++];
} else {
+ if (!unicode)
+ synth_buffer_skip_nonlatin1();
+ if (synth_buffer_empty())
+ break;
ch = synth_buffer_getc();
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (copy_to_user(cp, &ch, 1))
- return -EFAULT;
+
+ if ((!unicode && ch < 0x100) || (unicode && ch < 0x80)) {
+ u_char c = ch;
+
+ if (copy_to_user(cp, &c, 1))
+ return -EFAULT;
+
+ chars_sent++;
+ cp++;
+ } else if (unicode && ch < 0x800) {
+ u_char s[2] = {
+ 0xc0 | (ch >> 6),
+ 0x80 | (ch & 0x3f)
+ };
+
+ if (copy_to_user(cp, s, sizeof(s)))
+ return -EFAULT;
+
+ chars_sent += sizeof(s);
+ cp += sizeof(s);
+ } else if (unicode) {
+ u_char s[3] = {
+ 0xe0 | (ch >> 12),
+ 0x80 | ((ch >> 6) & 0x3f),
+ 0x80 | (ch & 0x3f)
+ };
+
+ if (copy_to_user(cp, s, sizeof(s)))
+ return -EFAULT;
+
+ chars_sent += sizeof(s);
+ cp += sizeof(s);
+ }
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
- chars_sent++;
- cp++;
}
*pos += chars_sent;
empty = synth_buffer_empty();
@@ -259,6 +297,18 @@ static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
return chars_sent;
}
+static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ return softsynthx_read(fp, buf, count, pos, 0);
+}
+
+static ssize_t softsynthu_read(struct file *fp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ return softsynthx_read(fp, buf, count, pos, 1);
+}
+
static int last_index;
static ssize_t softsynth_write(struct file *fp, const char __user *buf,
@@ -308,6 +358,15 @@ static const struct file_operations softsynth_fops = {
.release = softsynth_close,
};
+static const struct file_operations softsynthu_fops = {
+ .owner = THIS_MODULE,
+ .poll = softsynth_poll,
+ .read = softsynthu_read,
+ .write = softsynth_write,
+ .open = softsynth_open,
+ .release = softsynth_close,
+};
+
static int softsynth_probe(struct spk_synth *synth)
{
if (misc_registered != 0)
@@ -321,16 +380,28 @@ static int softsynth_probe(struct spk_synth *synth)
return -ENODEV;
}
+ memset(&synthu_device, 0, sizeof(synthu_device));
+ synthu_device.minor = SOFTSYNTHU_MINOR;
+ synthu_device.name = "softsynthu";
+ synthu_device.fops = &softsynthu_fops;
+ if (misc_register(&synthu_device)) {
+ pr_warn("Couldn't initialize miscdevice /dev/softsynth.\n");
+ return -ENODEV;
+ }
+
misc_registered = 1;
pr_info("initialized device: /dev/softsynth, node (MAJOR 10, MINOR 26)\n");
+ pr_info("initialized device: /dev/softsynthu, node (MAJOR 10, MINOR 27)\n");
return 0;
}
static void softsynth_release(void)
{
misc_deregister(&synth_device);
+ misc_deregister(&synthu_device);
misc_registered = 0;
pr_info("unregistered /dev/softsynth\n");
+ pr_info("unregistered /dev/softsynthu\n");
}
static int softsynth_is_alive(struct spk_synth *synth)
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 143fadabfe6c..d95c375a0736 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -102,9 +102,10 @@ static struct spk_synth synth_spkout = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
@@ -125,14 +126,7 @@ static struct spk_synth synth_spkout = {
static void synth_flush(struct spk_synth *synth)
{
- int timeout = SPK_XMITR_TIMEOUT;
-
- while (spk_serial_tx_busy()) {
- if (!--timeout)
- break;
- udelay(1);
- }
- outb(SYNTH_CLEAR, speakup_info.port_tts);
+ synth->io_ops->send_xchar(SYNTH_CLEAR);
}
module_param_named(ser, synth_spkout.ser, int, 0444);
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index aa2f338d15d8..3f531fb99fd3 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -95,9 +95,10 @@ static struct spk_synth synth_txprt = {
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
+ .io_ops = &spk_serial_io_ops,
.probe = spk_serial_synth_probe,
.release = spk_serial_release,
- .synth_immediate = spk_synth_immediate,
+ .synth_immediate = spk_serial_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = spk_synth_flush,
.is_alive = spk_synth_is_alive_restart,
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index d5aa41d82122..995f586bddcd 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -42,14 +42,14 @@
const struct old_serial_port *spk_serial_init(int index);
void spk_stop_serial_interrupt(void);
-int spk_wait_for_xmitr(void);
+int spk_wait_for_xmitr(struct spk_synth *in_synth);
unsigned char spk_serial_in(void);
unsigned char spk_serial_in_nowait(void);
-int spk_serial_out(const char ch);
void spk_serial_release(void);
-char synth_buffer_getc(void);
-char synth_buffer_peek(void);
+void synth_buffer_skip_nonlatin1(void);
+u16 synth_buffer_getc(void);
+u16 synth_buffer_peek(void);
int synth_buffer_empty(void);
struct var_t *spk_get_var(enum var_id_t var_id);
ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -58,12 +58,17 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
int spk_serial_synth_probe(struct spk_synth *synth);
-const char *spk_synth_immediate(struct spk_synth *synth, const char *buff);
+const char *spk_serial_synth_immediate(struct spk_synth *synth, const char *buff);
void spk_do_catch_up(struct spk_synth *synth);
void spk_synth_flush(struct spk_synth *synth);
int spk_synth_is_alive_nop(struct spk_synth *synth);
int spk_synth_is_alive_restart(struct spk_synth *synth);
+__printf(1, 2)
void synth_printf(const char *buf, ...);
+void synth_putwc(u16 wc);
+void synth_putwc_s(u16 wc);
+void synth_putws(const u16 *buf);
+void synth_putws_s(const u16 *buf);
int synth_request_region(unsigned long start, unsigned long n);
int synth_release_region(unsigned long start, unsigned long n);
int synth_add(struct spk_synth *in_synth);
@@ -73,4 +78,6 @@ extern struct speakup_info_t speakup_info;
extern struct var_t synth_time_vars[];
+extern struct spk_io_ops spk_serial_io_ops;
+
#endif
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index b07f6cc4f284..c156975392c8 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -55,7 +55,7 @@ struct spk_highlight_color_track {
/* Count of each background color */
unsigned int bgcount[8];
/* Buffer for characters drawn with each background color */
- char highbuf[8][COLOR_BUFFER_SIZE];
+ u16 highbuf[8][COLOR_BUFFER_SIZE];
/* Current index into highbuf */
unsigned int highsize[8];
/* Reading Position for each color */
@@ -146,6 +146,14 @@ struct synth_indexing {
unsigned char currindex;
};
+struct spk_synth;
+
+struct spk_io_ops {
+ int (*synth_out)(struct spk_synth *synth, const char ch);
+ void (*send_xchar)(char ch);
+ void (*tiocmset)(unsigned int set, unsigned int clear);
+};
+
struct spk_synth {
const char *name;
const char *version;
@@ -164,6 +172,7 @@ struct spk_synth {
struct var_t *vars;
int *default_pitch;
int *default_vol;
+ struct spk_io_ops *io_ops;
int (*probe)(struct spk_synth *synth);
void (*release)(void);
const char *(*synth_immediate)(struct spk_synth *synth,
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index a61c02ba06da..352e9eebc3de 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -44,35 +44,6 @@ EXPORT_SYMBOL_GPL(speakup_info);
static int do_synth_init(struct spk_synth *in_synth);
-int spk_serial_synth_probe(struct spk_synth *synth)
-{
- const struct old_serial_port *ser;
- int failed = 0;
-
- if ((synth->ser >= SPK_LO_TTY) && (synth->ser <= SPK_HI_TTY)) {
- ser = spk_serial_init(synth->ser);
- if (ser == NULL) {
- failed = -1;
- } else {
- outb_p(0, ser->port);
- mdelay(1);
- outb_p('\r', ser->port);
- }
- } else {
- failed = -1;
- pr_warn("ttyS%i is an invalid port\n", synth->ser);
- }
- if (failed) {
- pr_info("%s: not found\n", synth->long_name);
- return -ENODEV;
- }
- pr_info("%s: ttyS%i, Driver Version %s\n",
- synth->long_name, synth->ser, synth->version);
- synth->alive = 1;
- return 0;
-}
-EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
-
/*
* Main loop of the progression thread: keep eating from the buffer
* and push to the serial port, waiting as needed
@@ -109,6 +80,7 @@ void spk_do_catch_up(struct spk_synth *synth)
synth->flush(synth);
continue;
}
+ synth_buffer_skip_nonlatin1();
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
@@ -119,7 +91,7 @@ void spk_do_catch_up(struct spk_synth *synth)
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = synth->procspeech;
- if (!spk_serial_out(ch)) {
+ if (!synth->io_ops->synth_out(synth, ch)) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
@@ -129,7 +101,7 @@ void spk_do_catch_up(struct spk_synth *synth)
delay_time_val = delay_time->u.n.value;
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- if (spk_serial_out(synth->procspeech))
+ if (synth->io_ops->synth_out(synth, synth->procspeech))
schedule_timeout(
msecs_to_jiffies(delay_time_val));
else
@@ -142,30 +114,13 @@ void spk_do_catch_up(struct spk_synth *synth)
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
- spk_serial_out(synth->procspeech);
+ synth->io_ops->synth_out(synth, synth->procspeech);
}
EXPORT_SYMBOL_GPL(spk_do_catch_up);
-const char *spk_synth_immediate(struct spk_synth *synth, const char *buff)
-{
- u_char ch;
-
- while ((ch = *buff)) {
- if (ch == '\n')
- ch = synth->procspeech;
- if (spk_wait_for_xmitr())
- outb(ch, speakup_info.port_tts);
- else
- return buff;
- buff++;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(spk_synth_immediate);
-
void spk_synth_flush(struct spk_synth *synth)
{
- spk_serial_out(synth->clear);
+ synth->io_ops->synth_out(synth, synth->clear);
}
EXPORT_SYMBOL_GPL(spk_synth_flush);
@@ -180,7 +135,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
{
if (synth->alive)
return 1;
- if (spk_wait_for_xmitr() > 0) {
+ if (spk_wait_for_xmitr(synth) > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
@@ -255,6 +210,35 @@ void synth_printf(const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(synth_printf);
+void synth_putwc(u16 wc)
+{
+ synth_buffer_add(wc);
+}
+EXPORT_SYMBOL_GPL(synth_putwc);
+
+void synth_putwc_s(u16 wc)
+{
+ synth_buffer_add(wc);
+ synth_start();
+}
+EXPORT_SYMBOL_GPL(synth_putwc_s);
+
+void synth_putws(const u16 *buf)
+{
+ const u16 *p;
+
+ for (p = buf; *p; p++)
+ synth_buffer_add(*p);
+}
+EXPORT_SYMBOL_GPL(synth_putws);
+
+void synth_putws_s(const u16 *buf)
+{
+ synth_putws(buf);
+ synth_start();
+}
+EXPORT_SYMBOL_GPL(synth_putws_s);
+
static int index_count;
static int sentence_count;
@@ -272,7 +256,7 @@ void spk_reset_index_count(int sc)
int synth_supports_indexing(void)
{
- if (synth->get_index != NULL)
+ if (synth->get_index)
return 1;
return 0;
}
@@ -350,7 +334,7 @@ int synth_init(char *synth_name)
int ret = 0;
struct spk_synth *synth = NULL;
- if (synth_name == NULL)
+ if (!synth_name)
return 0;
if (strcmp(synth_name, "none") == 0) {
@@ -362,7 +346,7 @@ int synth_init(char *synth_name)
mutex_lock(&spk_mutex);
/* First, check if we already have it loaded. */
- for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i]; i++)
if (strcmp(synths[i]->name, synth_name) == 0)
synth = synths[i];
@@ -406,8 +390,8 @@ static int do_synth_init(struct spk_synth *in_synth)
speakup_register_var(var);
if (!spk_quiet_boot)
synth_printf("%s found\n", synth->long_name);
- if (synth->attributes.name && sysfs_create_group(speakup_kobj,
- &synth->attributes) < 0)
+ if (synth->attributes.name &&
+ sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
return -ENOMEM;
synth_flags = synth->flags;
wake_up_interruptible_all(&speakup_event);
@@ -421,7 +405,7 @@ void synth_release(void)
struct var_t *var;
unsigned long flags;
- if (synth == NULL)
+ if (!synth)
return;
spin_lock_irqsave(&speakup_info.spinlock, flags);
pr_info("releasing synth %s\n", synth->name);
@@ -432,7 +416,6 @@ void synth_release(void)
sysfs_remove_group(speakup_kobj, &synth->attributes);
for (var = synth->vars; var->var_id != MAXVARS; var++)
speakup_unregister_var(var->var_id);
- spk_stop_serial_interrupt();
synth->release();
synth = NULL;
}
@@ -444,7 +427,7 @@ int synth_add(struct spk_synth *in_synth)
int status = 0;
mutex_lock(&spk_mutex);
- for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ for (i = 0; i < MAXSYNTHS && synths[i]; i++)
/* synth_remove() is responsible for rotating the array down */
if (in_synth == synths[i]) {
mutex_unlock(&spk_mutex);
@@ -471,11 +454,11 @@ void synth_remove(struct spk_synth *in_synth)
mutex_lock(&spk_mutex);
if (synth == in_synth)
synth_release();
- for (i = 0; synths[i] != NULL; i++) {
+ for (i = 0; synths[i]; i++) {
if (in_synth == synths[i])
break;
}
- for ( ; synths[i] != NULL; i++) /* compress table */
+ for ( ; synths[i]; i++) /* compress table */
synths[i] = synths[i + 1];
module_status = 0;
mutex_unlock(&spk_mutex);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index cc984196020f..d37d24e26641 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -98,7 +98,7 @@ void speakup_register_var(struct var_t *var)
}
}
p_header = var_ptrs[var->var_id];
- if (p_header->data != NULL)
+ if (p_header->data)
return;
p_header->data = var;
switch (p_header->var_type) {
@@ -210,11 +210,11 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
return -ERANGE;
var_data->u.n.value = val;
- if (var->var_type == VAR_TIME && p_val != NULL) {
+ if (var->var_type == VAR_TIME && p_val) {
*p_val = msecs_to_jiffies(val);
return 0;
}
- if (p_val != NULL)
+ if (p_val)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val];
@@ -258,10 +258,11 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
if (var->p_val != var_data->u.s.default_val)
strcpy((char *)var->p_val, var_data->u.s.default_val);
return -ERESTART;
- } else if (var->p_val)
+ } else if (var->p_val) {
strcpy((char *)var->p_val, page);
- else
+ } else {
return -E2BIG;
+ }
return 0;
}
@@ -281,17 +282,18 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
spk_chartab[*cp] &= ~mask;
}
cp = (u_char *)input;
- if (!cp)
+ if (!cp) {
cp = spk_punc_info[which].value;
- else {
+ } else {
for (; *cp; cp++) {
if (*cp < SPACE)
break;
if (mask < PUNC) {
if (!(spk_chartab[*cp] & PUNC))
break;
- } else if (spk_chartab[*cp] & B_NUM)
+ } else if (spk_chartab[*cp] & B_NUM) {
break;
+ }
}
if (*cp)
return -EINVAL;
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
new file mode 100644
index 000000000000..37a0781b0d0c
--- /dev/null
+++ b/drivers/staging/typec/Kconfig
@@ -0,0 +1,24 @@
+menu "USB Power Delivery and Type-C drivers"
+
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select TYPEC
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+config TYPEC_TCPCI
+ tristate "Type-C Port Controller Interface driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Type-C Port Controller driver for TCPCI-compliant controller.
+
+source "drivers/staging/typec/fusb302/Kconfig"
+
+endif
+
+endmenu
diff --git a/drivers/staging/typec/Makefile b/drivers/staging/typec/Makefile
new file mode 100644
index 000000000000..30a7e29cbc9e
--- /dev/null
+++ b/drivers/staging/typec/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-y += fusb302/
diff --git a/drivers/staging/typec/TODO b/drivers/staging/typec/TODO
new file mode 100644
index 000000000000..bc1f97a2d1bf
--- /dev/null
+++ b/drivers/staging/typec/TODO
@@ -0,0 +1,15 @@
+tcpm:
+- Add documentation (at the very least for the API to low level drivers)
+- Split PD code into separate file
+- Check if it makes sense to use tracepoints instead of debugfs for debug logs
+- Implement Alternate Mode handling
+- Address "#if 0" code if not addressed with the above
+- Validate all comments marked with "XXX"; either address or remove comments
+- Add support for USB PD 3.0. While not mandatory, at least fast role swap
+ as well as authentication support would be very desirable.
+
+tcpci:
+- Test with real hardware
+
+Please send patches to Guenter Roeck <linux@roeck-us.net> and copy
+Heikki Krogerus <heikki.krogerus@linux.intel.com>.
diff --git a/drivers/staging/typec/fusb302/Kconfig b/drivers/staging/typec/fusb302/Kconfig
new file mode 100644
index 000000000000..fce099ff39fe
--- /dev/null
+++ b/drivers/staging/typec/fusb302/Kconfig
@@ -0,0 +1,7 @@
+config TYPEC_FUSB302
+ tristate "Fairchild FUSB302 Type-C chip driver"
+ depends on I2C
+ help
+ The Fairchild FUSB302 Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
diff --git a/drivers/staging/typec/fusb302/Makefile b/drivers/staging/typec/fusb302/Makefile
new file mode 100644
index 000000000000..207efa5fbab8
--- /dev/null
+++ b/drivers/staging/typec/fusb302/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/staging/typec/fusb302/TODO b/drivers/staging/typec/fusb302/TODO
new file mode 100644
index 000000000000..4933a1d92c32
--- /dev/null
+++ b/drivers/staging/typec/fusb302/TODO
@@ -0,0 +1,6 @@
+fusb302:
+- Find a better logging scheme, at least not having the same debugging/logging
+ code replicated here and in tcpm
+- Find a non-hacky way to coordinate between PM and I2C access
+- Documentation? The FUSB302 datasheet provides information on the chip to help
+ understand the code. But it may still be helpful to have a documentation.
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/staging/typec/fusb302/fusb302.c
new file mode 100644
index 000000000000..2cee9a952c9b
--- /dev/null
+++ b/drivers/staging/typec/fusb302/fusb302.c
@@ -0,0 +1,1815 @@
+/*
+ * Copyright 2016-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Fairchild FUSB302 Type-C Chip Driver
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/proc_fs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sched/clock.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb/typec.h>
+#include <linux/workqueue.h>
+
+#include "fusb302_reg.h"
+#include "../tcpm.h"
+#include "../pd.h"
+
+/*
+ * When the device is SNK, BC_LVL interrupt is used to monitor cc pins
+ * for the current capability offered by the SRC. As FUSB302 chip fires
+ * the BC_LVL interrupt on PD signalings, cc lvl should be handled after
+ * a delay to avoid measuring on PD activities. The delay is slightly
+ * longer than PD_T_PD_DEBPUNCE (10-20ms).
+ */
+#define T_BC_LVL_DEBOUNCE_DELAY_MS 30
+
+enum toggling_mode {
+ TOGGLINE_MODE_OFF,
+ TOGGLING_MODE_DRP,
+ TOGGLING_MODE_SNK,
+ TOGGLING_MODE_SRC,
+};
+
+static const char * const toggling_mode_name[] = {
+ [TOGGLINE_MODE_OFF] = "toggling_OFF",
+ [TOGGLING_MODE_DRP] = "toggling_DRP",
+ [TOGGLING_MODE_SNK] = "toggling_SNK",
+ [TOGGLING_MODE_SRC] = "toggling_SRC",
+};
+
+enum src_current_status {
+ SRC_CURRENT_DEFAULT,
+ SRC_CURRENT_MEDIUM,
+ SRC_CURRENT_HIGH,
+};
+
+static const u8 ra_mda_value[] = {
+ [SRC_CURRENT_DEFAULT] = 4, /* 210mV */
+ [SRC_CURRENT_MEDIUM] = 9, /* 420mV */
+ [SRC_CURRENT_HIGH] = 18, /* 798mV */
+};
+
+static const u8 rd_mda_value[] = {
+ [SRC_CURRENT_DEFAULT] = 38, /* 1638mV */
+ [SRC_CURRENT_MEDIUM] = 38, /* 1638mV */
+ [SRC_CURRENT_HIGH] = 61, /* 2604mV */
+};
+
+#define LOG_BUFFER_ENTRIES 1024
+#define LOG_BUFFER_ENTRY_SIZE 128
+
+struct fusb302_chip {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct tcpm_port *tcpm_port;
+ struct tcpc_dev tcpc_dev;
+
+ struct regulator *vbus;
+
+ int gpio_int_n;
+ int gpio_int_n_irq;
+
+ struct workqueue_struct *wq;
+ struct delayed_work bc_lvl_handler;
+
+ atomic_t pm_suspend;
+ atomic_t i2c_busy;
+
+ /* lock for sharing chip states */
+ struct mutex lock;
+
+ /* chip status */
+ enum toggling_mode toggling_mode;
+ enum src_current_status src_current_status;
+ bool intr_togdone;
+ bool intr_bc_lvl;
+ bool intr_comp_chng;
+
+ /* port status */
+ bool pull_up;
+ bool vconn_on;
+ bool vbus_on;
+ bool charge_on;
+ bool vbus_present;
+ enum typec_cc_polarity cc_polarity;
+ enum typec_cc_status cc1;
+ enum typec_cc_status cc2;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+ /* lock for log buffer access */
+ struct mutex logbuffer_lock;
+ int logbuffer_head;
+ int logbuffer_tail;
+ u8 *logbuffer[LOG_BUFFER_ENTRIES];
+#endif
+};
+
+/*
+ * Logging
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool fusb302_log_full(struct fusb302_chip *chip)
+{
+ return chip->logbuffer_tail ==
+ (chip->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
+}
+
+static void _fusb302_log(struct fusb302_chip *chip, const char *fmt,
+ va_list args)
+{
+ char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
+ u64 ts_nsec = local_clock();
+ unsigned long rem_nsec;
+
+ if (!chip->logbuffer[chip->logbuffer_head]) {
+ chip->logbuffer[chip->logbuffer_head] =
+ kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
+ if (!chip->logbuffer[chip->logbuffer_head])
+ return;
+ }
+
+ vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
+
+ mutex_lock(&chip->logbuffer_lock);
+
+ if (fusb302_log_full(chip)) {
+ chip->logbuffer_head = max(chip->logbuffer_head - 1, 0);
+ strlcpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
+ }
+
+ if (chip->logbuffer_head < 0 ||
+ chip->logbuffer_head >= LOG_BUFFER_ENTRIES) {
+ dev_warn(chip->dev,
+ "Bad log buffer index %d\n", chip->logbuffer_head);
+ goto abort;
+ }
+
+ if (!chip->logbuffer[chip->logbuffer_head]) {
+ dev_warn(chip->dev,
+ "Log buffer index %d is NULL\n", chip->logbuffer_head);
+ goto abort;
+ }
+
+ rem_nsec = do_div(ts_nsec, 1000000000);
+ scnprintf(chip->logbuffer[chip->logbuffer_head],
+ LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
+ (unsigned long)ts_nsec, rem_nsec / 1000,
+ tmpbuffer);
+ chip->logbuffer_head = (chip->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
+
+abort:
+ mutex_unlock(&chip->logbuffer_lock);
+}
+
+static void fusb302_log(struct fusb302_chip *chip, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ _fusb302_log(chip, fmt, args);
+ va_end(args);
+}
+
+static int fusb302_seq_show(struct seq_file *s, void *v)
+{
+ struct fusb302_chip *chip = (struct fusb302_chip *)s->private;
+ int tail;
+
+ mutex_lock(&chip->logbuffer_lock);
+ tail = chip->logbuffer_tail;
+ while (tail != chip->logbuffer_head) {
+ seq_printf(s, "%s\n", chip->logbuffer[tail]);
+ tail = (tail + 1) % LOG_BUFFER_ENTRIES;
+ }
+ if (!seq_has_overflowed(s))
+ chip->logbuffer_tail = tail;
+ mutex_unlock(&chip->logbuffer_lock);
+
+ return 0;
+}
+
+static int fusb302_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fusb302_seq_show, inode->i_private);
+}
+
+static const struct file_operations fusb302_debug_operations = {
+ .open = fusb302_debug_open,
+ .llseek = seq_lseek,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static struct dentry *rootdir;
+
+static int fusb302_debugfs_init(struct fusb302_chip *chip)
+{
+ mutex_init(&chip->logbuffer_lock);
+ if (!rootdir) {
+ rootdir = debugfs_create_dir("fusb302", NULL);
+ if (!rootdir)
+ return -ENOMEM;
+ }
+
+ chip->dentry = debugfs_create_file(dev_name(chip->dev),
+ S_IFREG | 0444, rootdir,
+ chip, &fusb302_debug_operations);
+
+ return 0;
+}
+
+static void fusb302_debugfs_exit(struct fusb302_chip *chip)
+{
+ debugfs_remove(chip->dentry);
+}
+
+#else
+
+static void fusb302_log(const struct fusb302_chip *chip,
+ const char *fmt, ...) { }
+static int fusb302_debugfs_init(const struct fusb302_chip *chip) { return 0; }
+static void fusb302_debugfs_exit(const struct fusb302_chip *chip) { }
+
+#endif
+
+#define FUSB302_RESUME_RETRY 10
+#define FUSB302_RESUME_RETRY_SLEEP 50
+static int fusb302_i2c_write(struct fusb302_chip *chip,
+ u8 address, u8 data)
+{
+ int retry_cnt;
+ int ret = 0;
+
+ atomic_set(&chip->i2c_busy, 1);
+ for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
+ if (atomic_read(&chip->pm_suspend)) {
+ pr_err("fusb302_i2c: pm suspend, retry %d/%d\n",
+ retry_cnt + 1, FUSB302_RESUME_RETRY);
+ msleep(FUSB302_RESUME_RETRY_SLEEP);
+ } else {
+ break;
+ }
+ }
+ ret = i2c_smbus_write_byte_data(chip->i2c_client, address, data);
+ if (ret < 0)
+ fusb302_log(chip, "cannot write 0x%02x to 0x%02x, ret=%d",
+ data, address, ret);
+ atomic_set(&chip->i2c_busy, 0);
+
+ return ret;
+}
+
+static int fusb302_i2c_block_write(struct fusb302_chip *chip, u8 address,
+ u8 length, const u8 *data)
+{
+ int retry_cnt;
+ int ret = 0;
+
+ if (length <= 0)
+ return ret;
+ atomic_set(&chip->i2c_busy, 1);
+ for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
+ if (atomic_read(&chip->pm_suspend)) {
+ pr_err("fusb302_i2c: pm suspend, retry %d/%d\n",
+ retry_cnt + 1, FUSB302_RESUME_RETRY);
+ msleep(FUSB302_RESUME_RETRY_SLEEP);
+ } else {
+ break;
+ }
+ }
+ ret = i2c_smbus_write_i2c_block_data(chip->i2c_client, address,
+ length, data);
+ if (ret < 0)
+ fusb302_log(chip, "cannot block write 0x%02x, len=%d, ret=%d",
+ address, length, ret);
+ atomic_set(&chip->i2c_busy, 0);
+
+ return ret;
+}
+
+static int fusb302_i2c_read(struct fusb302_chip *chip,
+ u8 address, u8 *data)
+{
+ int retry_cnt;
+ int ret = 0;
+
+ atomic_set(&chip->i2c_busy, 1);
+ for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
+ if (atomic_read(&chip->pm_suspend)) {
+ pr_err("fusb302_i2c: pm suspend, retry %d/%d\n",
+ retry_cnt + 1, FUSB302_RESUME_RETRY);
+ msleep(FUSB302_RESUME_RETRY_SLEEP);
+ } else {
+ break;
+ }
+ }
+ ret = i2c_smbus_read_byte_data(chip->i2c_client, address);
+ *data = (u8)ret;
+ if (ret < 0)
+ fusb302_log(chip, "cannot read %02x, ret=%d", address, ret);
+ atomic_set(&chip->i2c_busy, 0);
+
+ return ret;
+}
+
+static int fusb302_i2c_block_read(struct fusb302_chip *chip, u8 address,
+ u8 length, u8 *data)
+{
+ int retry_cnt;
+ int ret = 0;
+
+ if (length <= 0)
+ return ret;
+ atomic_set(&chip->i2c_busy, 1);
+ for (retry_cnt = 0; retry_cnt < FUSB302_RESUME_RETRY; retry_cnt++) {
+ if (atomic_read(&chip->pm_suspend)) {
+ pr_err("fusb302_i2c: pm suspend, retry %d/%d\n",
+ retry_cnt + 1, FUSB302_RESUME_RETRY);
+ msleep(FUSB302_RESUME_RETRY_SLEEP);
+ } else {
+ break;
+ }
+ }
+ ret = i2c_smbus_read_i2c_block_data(chip->i2c_client, address,
+ length, data);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot block read 0x%02x, len=%d, ret=%d",
+ address, length, ret);
+ return ret;
+ }
+ if (ret != length) {
+ fusb302_log(chip, "only read %d/%d bytes from 0x%02x",
+ ret, length, address);
+ return -EIO;
+ }
+ atomic_set(&chip->i2c_busy, 0);
+
+ return ret;
+}
+
+static int fusb302_i2c_mask_write(struct fusb302_chip *chip, u8 address,
+ u8 mask, u8 value)
+{
+ int ret = 0;
+ u8 data;
+
+ ret = fusb302_i2c_read(chip, address, &data);
+ if (ret < 0)
+ return ret;
+ data &= ~mask;
+ data |= value;
+ ret = fusb302_i2c_write(chip, address, data);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int fusb302_i2c_set_bits(struct fusb302_chip *chip, u8 address,
+ u8 set_bits)
+{
+ return fusb302_i2c_mask_write(chip, address, 0x00, set_bits);
+}
+
+static int fusb302_i2c_clear_bits(struct fusb302_chip *chip, u8 address,
+ u8 clear_bits)
+{
+ return fusb302_i2c_mask_write(chip, address, clear_bits, 0x00);
+}
+
+static int fusb302_sw_reset(struct fusb302_chip *chip)
+{
+ int ret = 0;
+
+ ret = fusb302_i2c_write(chip, FUSB_REG_RESET,
+ FUSB_REG_RESET_SW_RESET);
+ if (ret < 0)
+ fusb302_log(chip, "cannot sw reset the chip, ret=%d", ret);
+ else
+ fusb302_log(chip, "sw reset");
+
+ return ret;
+}
+
+static int fusb302_enable_tx_auto_retries(struct fusb302_chip *chip)
+{
+ int ret = 0;
+
+ ret = fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL3,
+ FUSB_REG_CONTROL3_N_RETRIES_3 |
+ FUSB_REG_CONTROL3_AUTO_RETRY);
+
+ return ret;
+}
+
+/*
+ * initialize interrupt on the chip
+ * - unmasked interrupt: VBUS_OK
+ */
+static int fusb302_init_interrupt(struct fusb302_chip *chip)
+{
+ int ret = 0;
+
+ ret = fusb302_i2c_write(chip, FUSB_REG_MASK,
+ 0xFF & ~FUSB_REG_MASK_VBUSOK);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_write(chip, FUSB_REG_MASKA, 0xFF);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_write(chip, FUSB_REG_MASKB, 0xFF);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_CONTROL0,
+ FUSB_REG_CONTROL0_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int fusb302_set_power_mode(struct fusb302_chip *chip, u8 power_mode)
+{
+ int ret = 0;
+
+ ret = fusb302_i2c_write(chip, FUSB_REG_POWER, power_mode);
+
+ return ret;
+}
+
+static int tcpm_init(struct tcpc_dev *dev)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+ u8 data;
+
+ ret = fusb302_sw_reset(chip);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_enable_tx_auto_retries(chip);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_init_interrupt(chip);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_set_power_mode(chip, FUSB_REG_POWER_PWR_ALL);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &data);
+ if (ret < 0)
+ return ret;
+ chip->vbus_present = !!(FUSB_REG_STATUS0 & FUSB_REG_STATUS0_VBUSOK);
+ ret = fusb302_i2c_read(chip, FUSB_REG_DEVICE_ID, &data);
+ if (ret < 0)
+ return ret;
+ fusb302_log(chip, "fusb302 device ID: 0x%02x", data);
+
+ return ret;
+}
+
+static int tcpm_get_vbus(struct tcpc_dev *dev)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ ret = chip->vbus_present ? 1 : 0;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int fusb302_set_cc_pull(struct fusb302_chip *chip,
+ bool pull_up, bool pull_down)
+{
+ int ret = 0;
+ u8 data = 0x00;
+ u8 mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
+ FUSB_REG_SWITCHES0_CC2_PU_EN |
+ FUSB_REG_SWITCHES0_CC1_PD_EN |
+ FUSB_REG_SWITCHES0_CC2_PD_EN;
+
+ if (pull_up)
+ data |= (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
+ FUSB_REG_SWITCHES0_CC1_PU_EN :
+ FUSB_REG_SWITCHES0_CC2_PU_EN;
+ if (pull_down)
+ data |= FUSB_REG_SWITCHES0_CC1_PD_EN |
+ FUSB_REG_SWITCHES0_CC2_PD_EN;
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
+ mask, data);
+ if (ret < 0)
+ return ret;
+ chip->pull_up = pull_up;
+
+ return ret;
+}
+
+static int fusb302_set_src_current(struct fusb302_chip *chip,
+ enum src_current_status status)
+{
+ int ret = 0;
+
+ chip->src_current_status = status;
+ switch (status) {
+ case SRC_CURRENT_DEFAULT:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
+ FUSB_REG_CONTROL0_HOST_CUR_MASK,
+ FUSB_REG_CONTROL0_HOST_CUR_DEF);
+ break;
+ case SRC_CURRENT_MEDIUM:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
+ FUSB_REG_CONTROL0_HOST_CUR_MASK,
+ FUSB_REG_CONTROL0_HOST_CUR_MED);
+ break;
+ case SRC_CURRENT_HIGH:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL0,
+ FUSB_REG_CONTROL0_HOST_CUR_MASK,
+ FUSB_REG_CONTROL0_HOST_CUR_HIGH);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int fusb302_set_toggling(struct fusb302_chip *chip,
+ enum toggling_mode mode)
+{
+ int ret = 0;
+
+ /* first disable toggling */
+ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_TOGGLE);
+ if (ret < 0)
+ return ret;
+ /* mask interrupts for SRC or SNK */
+ ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASK,
+ FUSB_REG_MASK_BC_LVL |
+ FUSB_REG_MASK_COMP_CHNG);
+ if (ret < 0)
+ return ret;
+ chip->intr_bc_lvl = false;
+ chip->intr_comp_chng = false;
+ /* configure toggling mode: none/snk/src/drp */
+ switch (mode) {
+ case TOGGLINE_MODE_OFF:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_MODE_MASK,
+ FUSB_REG_CONTROL2_MODE_NONE);
+ if (ret < 0)
+ return ret;
+ break;
+ case TOGGLING_MODE_SNK:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_MODE_MASK,
+ FUSB_REG_CONTROL2_MODE_UFP);
+ if (ret < 0)
+ return ret;
+ break;
+ case TOGGLING_MODE_SRC:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_MODE_MASK,
+ FUSB_REG_CONTROL2_MODE_DFP);
+ if (ret < 0)
+ return ret;
+ break;
+ case TOGGLING_MODE_DRP:
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_MODE_MASK,
+ FUSB_REG_CONTROL2_MODE_DRP);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ if (mode == TOGGLINE_MODE_OFF) {
+ /* mask TOGDONE interrupt */
+ ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASKA,
+ FUSB_REG_MASKA_TOGDONE);
+ if (ret < 0)
+ return ret;
+ chip->intr_togdone = false;
+ } else {
+ /* unmask TOGDONE interrupt */
+ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA,
+ FUSB_REG_MASKA_TOGDONE);
+ if (ret < 0)
+ return ret;
+ chip->intr_togdone = true;
+ /* start toggling */
+ ret = fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL2,
+ FUSB_REG_CONTROL2_TOGGLE);
+ if (ret < 0)
+ return ret;
+ /* during toggling, consider cc as Open */
+ chip->cc1 = TYPEC_CC_OPEN;
+ chip->cc2 = TYPEC_CC_OPEN;
+ }
+ chip->toggling_mode = mode;
+
+ return ret;
+}
+
+static const char * const typec_cc_status_name[] = {
+ [TYPEC_CC_OPEN] = "Open",
+ [TYPEC_CC_RA] = "Ra",
+ [TYPEC_CC_RD] = "Rd",
+ [TYPEC_CC_RP_DEF] = "Rp-def",
+ [TYPEC_CC_RP_1_5] = "Rp-1.5",
+ [TYPEC_CC_RP_3_0] = "Rp-3.0",
+};
+
+static const enum src_current_status cc_src_current[] = {
+ [TYPEC_CC_OPEN] = SRC_CURRENT_DEFAULT,
+ [TYPEC_CC_RA] = SRC_CURRENT_DEFAULT,
+ [TYPEC_CC_RD] = SRC_CURRENT_DEFAULT,
+ [TYPEC_CC_RP_DEF] = SRC_CURRENT_DEFAULT,
+ [TYPEC_CC_RP_1_5] = SRC_CURRENT_MEDIUM,
+ [TYPEC_CC_RP_3_0] = SRC_CURRENT_HIGH,
+};
+
+static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+ bool pull_up, pull_down;
+ u8 rd_mda;
+
+ mutex_lock(&chip->lock);
+ switch (cc) {
+ case TYPEC_CC_OPEN:
+ pull_up = false;
+ pull_down = false;
+ break;
+ case TYPEC_CC_RD:
+ pull_up = false;
+ pull_down = true;
+ break;
+ case TYPEC_CC_RP_DEF:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
+ pull_up = true;
+ pull_down = false;
+ break;
+ default:
+ fusb302_log(chip, "unsupported cc value %s",
+ typec_cc_status_name[cc]);
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot stop toggling, ret=%d", ret);
+ goto done;
+ }
+ ret = fusb302_set_cc_pull(chip, pull_up, pull_down);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot set cc pulling up %s, down %s, ret = %d",
+ pull_up ? "True" : "False",
+ pull_down ? "True" : "False",
+ ret);
+ goto done;
+ }
+ /* reset the cc status */
+ chip->cc1 = TYPEC_CC_OPEN;
+ chip->cc2 = TYPEC_CC_OPEN;
+ /* adjust current for SRC */
+ if (pull_up) {
+ ret = fusb302_set_src_current(chip, cc_src_current[cc]);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set src current %s, ret=%d",
+ typec_cc_status_name[cc], ret);
+ goto done;
+ }
+ }
+ /* enable/disable interrupts, BC_LVL for SNK and COMP_CHNG for SRC */
+ if (pull_up) {
+ rd_mda = rd_mda_value[cc_src_current[cc]];
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot set SRC measure value, ret=%d",
+ ret);
+ goto done;
+ }
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
+ FUSB_REG_MASK_BC_LVL |
+ FUSB_REG_MASK_COMP_CHNG,
+ FUSB_REG_MASK_COMP_CHNG);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
+ ret);
+ goto done;
+ }
+ chip->intr_bc_lvl = false;
+ chip->intr_comp_chng = true;
+ }
+ if (pull_down) {
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
+ FUSB_REG_MASK_BC_LVL |
+ FUSB_REG_MASK_COMP_CHNG,
+ FUSB_REG_MASK_BC_LVL);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
+ ret);
+ goto done;
+ }
+ chip->intr_bc_lvl = true;
+ chip->intr_comp_chng = false;
+ }
+ fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int tcpm_get_cc(struct tcpc_dev *dev, enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+
+ mutex_lock(&chip->lock);
+ *cc1 = chip->cc1;
+ *cc2 = chip->cc2;
+ fusb302_log(chip, "cc1=%s, cc2=%s", typec_cc_status_name[*cc1],
+ typec_cc_status_name[*cc2]);
+ mutex_unlock(&chip->lock);
+
+ return 0;
+}
+
+static int tcpm_set_polarity(struct tcpc_dev *dev,
+ enum typec_cc_polarity polarity)
+{
+ return 0;
+}
+
+static int tcpm_set_vconn(struct tcpc_dev *dev, bool on)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+ u8 switches0_data = 0x00;
+ u8 switches0_mask = FUSB_REG_SWITCHES0_VCONN_CC1 |
+ FUSB_REG_SWITCHES0_VCONN_CC2;
+
+ mutex_lock(&chip->lock);
+ if (chip->vconn_on == on) {
+ fusb302_log(chip, "vconn is already %s", on ? "On" : "Off");
+ goto done;
+ }
+ if (on) {
+ switches0_data = (chip->cc_polarity == TYPEC_POLARITY_CC1) ?
+ FUSB_REG_SWITCHES0_VCONN_CC2 :
+ FUSB_REG_SWITCHES0_VCONN_CC1;
+ }
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
+ switches0_mask, switches0_data);
+ if (ret < 0)
+ goto done;
+ chip->vconn_on = on;
+ fusb302_log(chip, "vconn := %s", on ? "On" : "Off");
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int tcpm_set_vbus(struct tcpc_dev *dev, bool on, bool charge)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ if (chip->vbus_on == on) {
+ fusb302_log(chip, "vbus is already %s", on ? "On" : "Off");
+ } else {
+ if (on)
+ ret = regulator_enable(chip->vbus);
+ else
+ ret = regulator_disable(chip->vbus);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot %s vbus regulator, ret=%d",
+ on ? "enable" : "disable", ret);
+ goto done;
+ }
+ chip->vbus_on = on;
+ fusb302_log(chip, "vbus := %s", on ? "On" : "Off");
+ }
+ if (chip->charge_on == charge)
+ fusb302_log(chip, "charge is already %s",
+ charge ? "On" : "Off");
+ else
+ chip->charge_on = charge;
+
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int tcpm_set_current_limit(struct tcpc_dev *dev, u32 max_ma, u32 mv)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+
+ fusb302_log(chip, "current limit: %d ma, %d mv (not implemented)",
+ max_ma, mv);
+
+ return 0;
+}
+
+static int fusb302_pd_tx_flush(struct fusb302_chip *chip)
+{
+ return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL0,
+ FUSB_REG_CONTROL0_TX_FLUSH);
+}
+
+static int fusb302_pd_rx_flush(struct fusb302_chip *chip)
+{
+ return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL1,
+ FUSB_REG_CONTROL1_RX_FLUSH);
+}
+
+static int fusb302_pd_set_auto_goodcrc(struct fusb302_chip *chip, bool on)
+{
+ if (on)
+ return fusb302_i2c_set_bits(chip, FUSB_REG_SWITCHES1,
+ FUSB_REG_SWITCHES1_AUTO_GCRC);
+ return fusb302_i2c_clear_bits(chip, FUSB_REG_SWITCHES1,
+ FUSB_REG_SWITCHES1_AUTO_GCRC);
+}
+
+static int fusb302_pd_set_interrupts(struct fusb302_chip *chip, bool on)
+{
+ int ret = 0;
+ u8 mask_interrupts = FUSB_REG_MASK_COLLISION;
+ u8 maska_interrupts = FUSB_REG_MASKA_RETRYFAIL |
+ FUSB_REG_MASKA_HARDSENT |
+ FUSB_REG_MASKA_TX_SUCCESS |
+ FUSB_REG_MASKA_HARDRESET;
+ u8 maskb_interrupts = FUSB_REG_MASKB_GCRCSENT;
+
+ ret = on ?
+ fusb302_i2c_clear_bits(chip, FUSB_REG_MASK, mask_interrupts) :
+ fusb302_i2c_set_bits(chip, FUSB_REG_MASK, mask_interrupts);
+ if (ret < 0)
+ return ret;
+ ret = on ?
+ fusb302_i2c_clear_bits(chip, FUSB_REG_MASKA, maska_interrupts) :
+ fusb302_i2c_set_bits(chip, FUSB_REG_MASKA, maska_interrupts);
+ if (ret < 0)
+ return ret;
+ ret = on ?
+ fusb302_i2c_clear_bits(chip, FUSB_REG_MASKB, maskb_interrupts) :
+ fusb302_i2c_set_bits(chip, FUSB_REG_MASKB, maskb_interrupts);
+ return ret;
+}
+
+static int tcpm_set_pd_rx(struct tcpc_dev *dev, bool on)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ ret = fusb302_pd_rx_flush(chip);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot flush pd rx buffer, ret=%d", ret);
+ goto done;
+ }
+ ret = fusb302_pd_tx_flush(chip);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot flush pd tx buffer, ret=%d", ret);
+ goto done;
+ }
+ ret = fusb302_pd_set_auto_goodcrc(chip, on);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot turn %s auto GCRC, ret=%d",
+ on ? "on" : "off", ret);
+ goto done;
+ }
+ ret = fusb302_pd_set_interrupts(chip, on);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot turn %s pd interrupts, ret=%d",
+ on ? "on" : "off", ret);
+ goto done;
+ }
+ fusb302_log(chip, "pd := %s", on ? "on" : "off");
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static const char * const typec_role_name[] = {
+ [TYPEC_SINK] = "Sink",
+ [TYPEC_SOURCE] = "Source",
+};
+
+static const char * const typec_data_role_name[] = {
+ [TYPEC_DEVICE] = "Device",
+ [TYPEC_HOST] = "Host",
+};
+
+static int tcpm_set_roles(struct tcpc_dev *dev, bool attached,
+ enum typec_role pwr, enum typec_data_role data)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+ u8 switches1_mask = FUSB_REG_SWITCHES1_POWERROLE |
+ FUSB_REG_SWITCHES1_DATAROLE;
+ u8 switches1_data = 0x00;
+
+ mutex_lock(&chip->lock);
+ if (pwr == TYPEC_SOURCE)
+ switches1_data |= FUSB_REG_SWITCHES1_POWERROLE;
+ if (data == TYPEC_HOST)
+ switches1_data |= FUSB_REG_SWITCHES1_DATAROLE;
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES1,
+ switches1_mask, switches1_data);
+ if (ret < 0) {
+ fusb302_log(chip, "unable to set pd header %s, %s, ret=%d",
+ typec_role_name[pwr], typec_data_role_name[data],
+ ret);
+ goto done;
+ }
+ fusb302_log(chip, "pd header := %s, %s", typec_role_name[pwr],
+ typec_data_role_name[data]);
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int tcpm_start_drp_toggling(struct tcpc_dev *dev,
+ enum typec_cc_status cc)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ ret = fusb302_set_src_current(chip, cc_src_current[cc]);
+ if (ret < 0) {
+ fusb302_log(chip, "unable to set src current %s, ret=%d",
+ typec_cc_status_name[cc], ret);
+ goto done;
+ }
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_DRP);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "unable to start drp toggling, ret=%d", ret);
+ goto done;
+ }
+ fusb302_log(chip, "start drp toggling");
+done:
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int fusb302_pd_send_message(struct fusb302_chip *chip,
+ const struct pd_message *msg)
+{
+ int ret = 0;
+ u8 buf[40];
+ u8 pos = 0;
+ int len;
+
+ /* SOP tokens */
+ buf[pos++] = FUSB302_TKN_SYNC1;
+ buf[pos++] = FUSB302_TKN_SYNC1;
+ buf[pos++] = FUSB302_TKN_SYNC1;
+ buf[pos++] = FUSB302_TKN_SYNC2;
+
+ len = pd_header_cnt(msg->header) * 4;
+ /* plug 2 for header */
+ len += 2;
+ if (len > 0x1F) {
+ fusb302_log(chip,
+ "PD message too long %d (incl. header)", len);
+ return -EINVAL;
+ }
+ /* packsym tells the FUSB302 chip that the next X bytes are payload */
+ buf[pos++] = FUSB302_TKN_PACKSYM | (len & 0x1F);
+ buf[pos++] = msg->header & 0xFF;
+ buf[pos++] = (msg->header >> 8) & 0xFF;
+
+ len -= 2;
+ memcpy(&buf[pos], msg->payload, len);
+ pos += len;
+
+ /* CRC */
+ buf[pos++] = FUSB302_TKN_JAMCRC;
+ /* EOP */
+ buf[pos++] = FUSB302_TKN_EOP;
+ /* turn tx off after sending message */
+ buf[pos++] = FUSB302_TKN_TXOFF;
+ /* start transmission */
+ buf[pos++] = FUSB302_TKN_TXON;
+
+ ret = fusb302_i2c_block_write(chip, FUSB_REG_FIFOS, pos, buf);
+ if (ret < 0)
+ return ret;
+ fusb302_log(chip, "sending PD message header: %x", msg->header);
+ fusb302_log(chip, "sending PD message len: %d", len);
+
+ return ret;
+}
+
+static int fusb302_pd_send_hardreset(struct fusb302_chip *chip)
+{
+ return fusb302_i2c_set_bits(chip, FUSB_REG_CONTROL3,
+ FUSB_REG_CONTROL3_SEND_HARDRESET);
+}
+
+static const char * const transmit_type_name[] = {
+ [TCPC_TX_SOP] = "SOP",
+ [TCPC_TX_SOP_PRIME] = "SOP'",
+ [TCPC_TX_SOP_PRIME_PRIME] = "SOP''",
+ [TCPC_TX_SOP_DEBUG_PRIME] = "DEBUG'",
+ [TCPC_TX_SOP_DEBUG_PRIME_PRIME] = "DEBUG''",
+ [TCPC_TX_HARD_RESET] = "HARD_RESET",
+ [TCPC_TX_CABLE_RESET] = "CABLE_RESET",
+ [TCPC_TX_BIST_MODE_2] = "BIST_MODE_2",
+};
+
+static int tcpm_pd_transmit(struct tcpc_dev *dev, enum tcpm_transmit_type type,
+ const struct pd_message *msg)
+{
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&chip->lock);
+ switch (type) {
+ case TCPC_TX_SOP:
+ ret = fusb302_pd_send_message(chip, msg);
+ if (ret < 0)
+ fusb302_log(chip,
+ "cannot send PD message, ret=%d", ret);
+ break;
+ case TCPC_TX_HARD_RESET:
+ ret = fusb302_pd_send_hardreset(chip);
+ if (ret < 0)
+ fusb302_log(chip,
+ "cannot send hardreset, ret=%d", ret);
+ break;
+ default:
+ fusb302_log(chip, "type %s not supported",
+ transmit_type_name[type]);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static enum typec_cc_status fusb302_bc_lvl_to_cc(u8 bc_lvl)
+{
+ if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_1230_MAX)
+ return TYPEC_CC_RP_3_0;
+ if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_600_1230)
+ return TYPEC_CC_RP_1_5;
+ if (bc_lvl == FUSB_REG_STATUS0_BC_LVL_200_600)
+ return TYPEC_CC_RP_DEF;
+ return TYPEC_CC_OPEN;
+}
+
+static void fusb302_bc_lvl_handler_work(struct work_struct *work)
+{
+ struct fusb302_chip *chip = container_of(work, struct fusb302_chip,
+ bc_lvl_handler.work);
+ int ret = 0;
+ u8 status0;
+ u8 bc_lvl;
+ enum typec_cc_status cc_status;
+
+ mutex_lock(&chip->lock);
+ if (!chip->intr_bc_lvl) {
+ fusb302_log(chip, "BC_LVL interrupt is turned off, abort");
+ goto done;
+ }
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ goto done;
+ fusb302_log(chip, "BC_LVL handler, status0=0x%02x", status0);
+ if (status0 & FUSB_REG_STATUS0_ACTIVITY) {
+ fusb302_log(chip, "CC activities detected, delay handling");
+ mod_delayed_work(chip->wq, &chip->bc_lvl_handler,
+ msecs_to_jiffies(T_BC_LVL_DEBOUNCE_DELAY_MS));
+ goto done;
+ }
+ bc_lvl = status0 & FUSB_REG_STATUS0_BC_LVL_MASK;
+ cc_status = fusb302_bc_lvl_to_cc(bc_lvl);
+ if (chip->cc_polarity == TYPEC_POLARITY_CC1) {
+ if (chip->cc1 != cc_status) {
+ fusb302_log(chip, "cc1: %s -> %s",
+ typec_cc_status_name[chip->cc1],
+ typec_cc_status_name[cc_status]);
+ chip->cc1 = cc_status;
+ tcpm_cc_change(chip->tcpm_port);
+ }
+ } else {
+ if (chip->cc2 != cc_status) {
+ fusb302_log(chip, "cc2: %s -> %s",
+ typec_cc_status_name[chip->cc2],
+ typec_cc_status_name[cc_status]);
+ chip->cc2 = cc_status;
+ tcpm_cc_change(chip->tcpm_port);
+ }
+ }
+
+done:
+ mutex_unlock(&chip->lock);
+}
+
+#define PDO_FIXED_FLAGS \
+ (PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
+
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
+};
+
+static const struct tcpc_config fusb302_tcpc_config = {
+ .src_pdo = src_pdo,
+ .nr_src_pdo = ARRAY_SIZE(src_pdo),
+ .snk_pdo = snk_pdo,
+ .nr_snk_pdo = ARRAY_SIZE(snk_pdo),
+ .max_snk_mv = 9000,
+ .max_snk_ma = 3000,
+ .max_snk_mw = 27000,
+ .operating_snk_mw = 2500,
+ .type = TYPEC_PORT_DRP,
+ .default_role = TYPEC_SINK,
+ .alt_modes = NULL,
+};
+
+static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
+{
+ fusb302_tcpc_dev->config = &fusb302_tcpc_config;
+ fusb302_tcpc_dev->init = tcpm_init;
+ fusb302_tcpc_dev->get_vbus = tcpm_get_vbus;
+ fusb302_tcpc_dev->set_cc = tcpm_set_cc;
+ fusb302_tcpc_dev->get_cc = tcpm_get_cc;
+ fusb302_tcpc_dev->set_polarity = tcpm_set_polarity;
+ fusb302_tcpc_dev->set_vconn = tcpm_set_vconn;
+ fusb302_tcpc_dev->set_vbus = tcpm_set_vbus;
+ fusb302_tcpc_dev->set_current_limit = tcpm_set_current_limit;
+ fusb302_tcpc_dev->set_pd_rx = tcpm_set_pd_rx;
+ fusb302_tcpc_dev->set_roles = tcpm_set_roles;
+ fusb302_tcpc_dev->start_drp_toggling = tcpm_start_drp_toggling;
+ fusb302_tcpc_dev->pd_transmit = tcpm_pd_transmit;
+ fusb302_tcpc_dev->mux = NULL;
+}
+
+static const char * const cc_polarity_name[] = {
+ [TYPEC_POLARITY_CC1] = "Polarity_CC1",
+ [TYPEC_POLARITY_CC2] = "Polarity_CC2",
+};
+
+static int fusb302_set_cc_polarity(struct fusb302_chip *chip,
+ enum typec_cc_polarity cc_polarity)
+{
+ int ret = 0;
+ u8 switches0_mask = FUSB_REG_SWITCHES0_CC1_PU_EN |
+ FUSB_REG_SWITCHES0_CC2_PU_EN |
+ FUSB_REG_SWITCHES0_VCONN_CC1 |
+ FUSB_REG_SWITCHES0_VCONN_CC2 |
+ FUSB_REG_SWITCHES0_MEAS_CC1 |
+ FUSB_REG_SWITCHES0_MEAS_CC2;
+ u8 switches0_data = 0x00;
+ u8 switches1_mask = FUSB_REG_SWITCHES1_TXCC1_EN |
+ FUSB_REG_SWITCHES1_TXCC2_EN;
+ u8 switches1_data = 0x00;
+
+ if (cc_polarity == TYPEC_POLARITY_CC1) {
+ switches0_data = FUSB_REG_SWITCHES0_MEAS_CC1;
+ if (chip->vconn_on)
+ switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC2;
+ if (chip->pull_up)
+ switches0_data |= FUSB_REG_SWITCHES0_CC1_PU_EN;
+ switches1_data = FUSB_REG_SWITCHES1_TXCC1_EN;
+ } else {
+ switches0_data = FUSB_REG_SWITCHES0_MEAS_CC2;
+ if (chip->vconn_on)
+ switches0_data |= FUSB_REG_SWITCHES0_VCONN_CC1;
+ if (chip->pull_up)
+ switches0_data |= FUSB_REG_SWITCHES0_CC2_PU_EN;
+ switches1_data = FUSB_REG_SWITCHES1_TXCC2_EN;
+ }
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES0,
+ switches0_mask, switches0_data);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_mask_write(chip, FUSB_REG_SWITCHES1,
+ switches1_mask, switches1_data);
+ if (ret < 0)
+ return ret;
+ chip->cc_polarity = cc_polarity;
+
+ return ret;
+}
+
+static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
+ u8 togdone_result)
+{
+ int ret = 0;
+ u8 status0;
+ u8 bc_lvl;
+ enum typec_cc_polarity cc_polarity;
+ enum typec_cc_status cc_status_active, cc1, cc2;
+
+ /* set pull_up, pull_down */
+ ret = fusb302_set_cc_pull(chip, false, true);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set cc to pull down, ret=%d", ret);
+ return ret;
+ }
+ /* set polarity */
+ cc_polarity = (togdone_result == FUSB_REG_STATUS1A_TOGSS_SNK1) ?
+ TYPEC_POLARITY_CC1 : TYPEC_POLARITY_CC2;
+ ret = fusb302_set_cc_polarity(chip, cc_polarity);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
+ cc_polarity_name[cc_polarity], ret);
+ return ret;
+ }
+ /* fusb302_set_cc_polarity() has set the correct measure block */
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ return ret;
+ bc_lvl = status0 & FUSB_REG_STATUS0_BC_LVL_MASK;
+ cc_status_active = fusb302_bc_lvl_to_cc(bc_lvl);
+ /* restart toggling if the cc status on the active line is OPEN */
+ if (cc_status_active == TYPEC_CC_OPEN) {
+ fusb302_log(chip, "restart toggling as CC_OPEN detected");
+ ret = fusb302_set_toggling(chip, chip->toggling_mode);
+ return ret;
+ }
+ /* update tcpm with the new cc value */
+ cc1 = (cc_polarity == TYPEC_POLARITY_CC1) ?
+ cc_status_active : TYPEC_CC_OPEN;
+ cc2 = (cc_polarity == TYPEC_POLARITY_CC2) ?
+ cc_status_active : TYPEC_CC_OPEN;
+ if ((chip->cc1 != cc1) || (chip->cc2 != cc2)) {
+ chip->cc1 = cc1;
+ chip->cc2 = cc2;
+ tcpm_cc_change(chip->tcpm_port);
+ }
+ /* turn off toggling */
+ ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot set toggling mode off, ret=%d", ret);
+ return ret;
+ }
+ /* unmask bc_lvl interrupt */
+ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASK, FUSB_REG_MASK_BC_LVL);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot unmask bc_lcl interrupt, ret=%d", ret);
+ return ret;
+ }
+ chip->intr_bc_lvl = true;
+ fusb302_log(chip, "detected cc1=%s, cc2=%s",
+ typec_cc_status_name[cc1],
+ typec_cc_status_name[cc2]);
+
+ return ret;
+}
+
+static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
+ u8 togdone_result)
+{
+ /*
+ * - set polarity (measure cc, vconn, tx)
+ * - set pull_up, pull_down
+ * - set cc1, cc2, and update to tcpm_port
+ * - set I_COMP interrupt on
+ */
+ int ret = 0;
+ u8 status0;
+ u8 ra_mda = ra_mda_value[chip->src_current_status];
+ u8 rd_mda = rd_mda_value[chip->src_current_status];
+ bool ra_comp, rd_comp;
+ enum typec_cc_polarity cc_polarity;
+ enum typec_cc_status cc_status_active, cc1, cc2;
+
+ /* set pull_up, pull_down */
+ ret = fusb302_set_cc_pull(chip, true, false);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set cc to pull up, ret=%d", ret);
+ return ret;
+ }
+ /* set polarity */
+ cc_polarity = (togdone_result == FUSB_REG_STATUS1A_TOGSS_SRC1) ?
+ TYPEC_POLARITY_CC1 : TYPEC_POLARITY_CC2;
+ ret = fusb302_set_cc_polarity(chip, cc_polarity);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot set cc polarity %s, ret=%d",
+ cc_polarity_name[cc_polarity], ret);
+ return ret;
+ }
+ /* fusb302_set_cc_polarity() has set the correct measure block */
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
+ if (ret < 0)
+ return ret;
+ usleep_range(50, 100);
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ return ret;
+ rd_comp = !!(status0 & FUSB_REG_STATUS0_COMP);
+ if (!rd_comp) {
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, ra_mda);
+ if (ret < 0)
+ return ret;
+ usleep_range(50, 100);
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ return ret;
+ ra_comp = !!(status0 & FUSB_REG_STATUS0_COMP);
+ }
+ if (rd_comp)
+ cc_status_active = TYPEC_CC_OPEN;
+ else if (ra_comp)
+ cc_status_active = TYPEC_CC_RD;
+ else
+ /* Ra is not supported, report as Open */
+ cc_status_active = TYPEC_CC_OPEN;
+ /* restart toggling if the cc status on the active line is OPEN */
+ if (cc_status_active == TYPEC_CC_OPEN) {
+ fusb302_log(chip, "restart toggling as CC_OPEN detected");
+ ret = fusb302_set_toggling(chip, chip->toggling_mode);
+ return ret;
+ }
+ /* update tcpm with the new cc value */
+ cc1 = (cc_polarity == TYPEC_POLARITY_CC1) ?
+ cc_status_active : TYPEC_CC_OPEN;
+ cc2 = (cc_polarity == TYPEC_POLARITY_CC2) ?
+ cc_status_active : TYPEC_CC_OPEN;
+ if ((chip->cc1 != cc1) || (chip->cc2 != cc2)) {
+ chip->cc1 = cc1;
+ chip->cc2 = cc2;
+ tcpm_cc_change(chip->tcpm_port);
+ }
+ /* turn off toggling */
+ ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot set toggling mode off, ret=%d", ret);
+ return ret;
+ }
+ /* set MDAC to Rd threshold, and unmask I_COMP for unplug detection */
+ ret = fusb302_i2c_write(chip, FUSB_REG_MEASURE, rd_mda);
+ if (ret < 0)
+ return ret;
+ /* unmask comp_chng interrupt */
+ ret = fusb302_i2c_clear_bits(chip, FUSB_REG_MASK,
+ FUSB_REG_MASK_COMP_CHNG);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot unmask bc_lcl interrupt, ret=%d", ret);
+ return ret;
+ }
+ chip->intr_comp_chng = true;
+ fusb302_log(chip, "detected cc1=%s, cc2=%s",
+ typec_cc_status_name[cc1],
+ typec_cc_status_name[cc2]);
+
+ return ret;
+}
+
+static int fusb302_handle_togdone(struct fusb302_chip *chip)
+{
+ int ret = 0;
+ u8 status1a;
+ u8 togdone_result;
+
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS1A, &status1a);
+ if (ret < 0)
+ return ret;
+ togdone_result = (status1a >> FUSB_REG_STATUS1A_TOGSS_POS) &
+ FUSB_REG_STATUS1A_TOGSS_MASK;
+ switch (togdone_result) {
+ case FUSB_REG_STATUS1A_TOGSS_SNK1:
+ case FUSB_REG_STATUS1A_TOGSS_SNK2:
+ return fusb302_handle_togdone_snk(chip, togdone_result);
+ case FUSB_REG_STATUS1A_TOGSS_SRC1:
+ case FUSB_REG_STATUS1A_TOGSS_SRC2:
+ return fusb302_handle_togdone_src(chip, togdone_result);
+ case FUSB_REG_STATUS1A_TOGSS_AA:
+ /* doesn't support */
+ fusb302_log(chip, "AudioAccessory not supported");
+ fusb302_set_toggling(chip, chip->toggling_mode);
+ break;
+ default:
+ fusb302_log(chip, "TOGDONE with an invalid state: %d",
+ togdone_result);
+ fusb302_set_toggling(chip, chip->toggling_mode);
+ break;
+ }
+ return ret;
+}
+
+static int fusb302_pd_reset(struct fusb302_chip *chip)
+{
+ return fusb302_i2c_set_bits(chip, FUSB_REG_RESET,
+ FUSB_REG_RESET_PD_RESET);
+}
+
+static int fusb302_pd_read_message(struct fusb302_chip *chip,
+ struct pd_message *msg)
+{
+ int ret = 0;
+ u8 token;
+ u8 crc[4];
+ int len;
+
+ /* first SOP token */
+ ret = fusb302_i2c_read(chip, FUSB_REG_FIFOS, &token);
+ if (ret < 0)
+ return ret;
+ ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, 2,
+ (u8 *)&msg->header);
+ if (ret < 0)
+ return ret;
+ len = pd_header_cnt(msg->header) * 4;
+ /* add 4 to length to include the CRC */
+ if (len > PD_MAX_PAYLOAD * 4) {
+ fusb302_log(chip, "PD message too long %d", len);
+ return -EINVAL;
+ }
+ if (len > 0) {
+ ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, len,
+ (u8 *)msg->payload);
+ if (ret < 0)
+ return ret;
+ }
+ /* another 4 bytes to read CRC out */
+ ret = fusb302_i2c_block_read(chip, FUSB_REG_FIFOS, 4, crc);
+ if (ret < 0)
+ return ret;
+ fusb302_log(chip, "PD message header: %x", msg->header);
+ fusb302_log(chip, "PD message len: %d", len);
+
+ return ret;
+}
+
+static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
+{
+ struct fusb302_chip *chip = dev_id;
+ int ret = 0;
+ u8 interrupt;
+ u8 interrupta;
+ u8 interruptb;
+ u8 status0;
+ bool vbus_present;
+ bool comp_result;
+ bool intr_togdone;
+ bool intr_bc_lvl;
+ bool intr_comp_chng;
+ struct pd_message pd_msg;
+
+ mutex_lock(&chip->lock);
+ /* grab a snapshot of intr flags */
+ intr_togdone = chip->intr_togdone;
+ intr_bc_lvl = chip->intr_bc_lvl;
+ intr_comp_chng = chip->intr_comp_chng;
+
+ ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPT, &interrupt);
+ if (ret < 0)
+ goto done;
+ ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPTA, &interrupta);
+ if (ret < 0)
+ goto done;
+ ret = fusb302_i2c_read(chip, FUSB_REG_INTERRUPTB, &interruptb);
+ if (ret < 0)
+ goto done;
+ ret = fusb302_i2c_read(chip, FUSB_REG_STATUS0, &status0);
+ if (ret < 0)
+ goto done;
+ fusb302_log(chip,
+ "IRQ: 0x%02x, a: 0x%02x, b: 0x%02x, status0: 0x%02x",
+ interrupt, interrupta, interruptb, status0);
+
+ if (interrupt & FUSB_REG_INTERRUPT_VBUSOK) {
+ vbus_present = !!(status0 & FUSB_REG_STATUS0_VBUSOK);
+ fusb302_log(chip, "IRQ: VBUS_OK, vbus=%s",
+ vbus_present ? "On" : "Off");
+ if (vbus_present != chip->vbus_present) {
+ chip->vbus_present = vbus_present;
+ tcpm_vbus_change(chip->tcpm_port);
+ }
+ }
+
+ if ((interrupta & FUSB_REG_INTERRUPTA_TOGDONE) && intr_togdone) {
+ fusb302_log(chip, "IRQ: TOGDONE");
+ ret = fusb302_handle_togdone(chip);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "handle togdone error, ret=%d", ret);
+ goto done;
+ }
+ }
+
+ if ((interrupt & FUSB_REG_INTERRUPT_BC_LVL) && intr_bc_lvl) {
+ fusb302_log(chip, "IRQ: BC_LVL, handler pending");
+ /*
+ * as BC_LVL interrupt can be affected by PD activity,
+ * apply delay to for the handler to wait for the PD
+ * signaling to finish.
+ */
+ mod_delayed_work(chip->wq, &chip->bc_lvl_handler,
+ msecs_to_jiffies(T_BC_LVL_DEBOUNCE_DELAY_MS));
+ }
+
+ if ((interrupt & FUSB_REG_INTERRUPT_COMP_CHNG) && intr_comp_chng) {
+ comp_result = !!(status0 & FUSB_REG_STATUS0_COMP);
+ fusb302_log(chip, "IRQ: COMP_CHNG, comp=%s",
+ comp_result ? "true" : "false");
+ if (comp_result) {
+ /* cc level > Rd_threashold, detach */
+ if (chip->cc_polarity == TYPEC_POLARITY_CC1)
+ chip->cc1 = TYPEC_CC_OPEN;
+ else
+ chip->cc2 = TYPEC_CC_OPEN;
+ tcpm_cc_change(chip->tcpm_port);
+ }
+ }
+
+ if (interrupt & FUSB_REG_INTERRUPT_COLLISION) {
+ fusb302_log(chip, "IRQ: PD collision");
+ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_FAILED);
+ }
+
+ if (interrupta & FUSB_REG_INTERRUPTA_RETRYFAIL) {
+ fusb302_log(chip, "IRQ: PD retry failed");
+ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_FAILED);
+ }
+
+ if (interrupta & FUSB_REG_INTERRUPTA_HARDSENT) {
+ fusb302_log(chip, "IRQ: PD hardreset sent");
+ ret = fusb302_pd_reset(chip);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot PD reset, ret=%d", ret);
+ goto done;
+ }
+ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
+ }
+
+ if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) {
+ fusb302_log(chip, "IRQ: PD tx success");
+ /* read out the received good CRC */
+ ret = fusb302_pd_read_message(chip, &pd_msg);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot read in GCRC, ret=%d", ret);
+ goto done;
+ }
+ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
+ }
+
+ if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) {
+ fusb302_log(chip, "IRQ: PD received hardreset");
+ ret = fusb302_pd_reset(chip);
+ if (ret < 0) {
+ fusb302_log(chip, "cannot PD reset, ret=%d", ret);
+ goto done;
+ }
+ tcpm_pd_hard_reset(chip->tcpm_port);
+ }
+
+ if (interruptb & FUSB_REG_INTERRUPTB_GCRCSENT) {
+ fusb302_log(chip, "IRQ: PD sent good CRC");
+ ret = fusb302_pd_read_message(chip, &pd_msg);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot read in PD message, ret=%d", ret);
+ goto done;
+ }
+ tcpm_pd_receive(chip->tcpm_port, &pd_msg);
+ }
+done:
+ mutex_unlock(&chip->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int init_gpio(struct fusb302_chip *chip)
+{
+ struct device_node *node;
+ int ret = 0;
+
+ node = chip->dev->of_node;
+ chip->gpio_int_n = of_get_named_gpio(node, "fcs,int_n", 0);
+ if (!gpio_is_valid(chip->gpio_int_n)) {
+ ret = chip->gpio_int_n;
+ fusb302_log(chip, "cannot get named GPIO Int_N, ret=%d", ret);
+ return ret;
+ }
+ ret = devm_gpio_request(chip->dev, chip->gpio_int_n, "fcs,int_n");
+ if (ret < 0) {
+ fusb302_log(chip, "cannot request GPIO Int_N, ret=%d", ret);
+ return ret;
+ }
+ ret = gpio_direction_input(chip->gpio_int_n);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot set GPIO Int_N to input, ret=%d", ret);
+ gpio_free(chip->gpio_int_n);
+ return ret;
+ }
+ ret = gpio_to_irq(chip->gpio_int_n);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot request IRQ for GPIO Int_N, ret=%d", ret);
+ gpio_free(chip->gpio_int_n);
+ return ret;
+ }
+ chip->gpio_int_n_irq = ret;
+ return 0;
+}
+
+static int fusb302_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct fusb302_chip *chip;
+ struct i2c_adapter *adapter;
+ int ret = 0;
+
+ adapter = to_i2c_adapter(client->dev.parent);
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&client->dev,
+ "I2C/SMBus block functionality not supported!\n");
+ return -ENODEV;
+ }
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->i2c_client = client;
+ i2c_set_clientdata(client, chip);
+ chip->dev = &client->dev;
+ mutex_init(&chip->lock);
+
+ ret = fusb302_debugfs_init(chip);
+ if (ret < 0)
+ return ret;
+
+ chip->wq = create_singlethread_workqueue(dev_name(chip->dev));
+ if (!chip->wq) {
+ ret = -ENOMEM;
+ goto clear_client_data;
+ }
+ INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
+ init_tcpc_dev(&chip->tcpc_dev);
+
+ chip->vbus = devm_regulator_get(chip->dev, "vbus");
+ if (IS_ERR(chip->vbus)) {
+ ret = PTR_ERR(chip->vbus);
+ goto destroy_workqueue;
+ }
+
+ ret = init_gpio(chip);
+ if (ret < 0)
+ goto destroy_workqueue;
+
+ chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
+ if (IS_ERR(chip->tcpm_port)) {
+ ret = PTR_ERR(chip->tcpm_port);
+ fusb302_log(chip, "cannot register tcpm port, ret=%d", ret);
+ goto destroy_workqueue;
+ }
+
+ ret = devm_request_threaded_irq(chip->dev, chip->gpio_int_n_irq,
+ NULL, fusb302_irq_intn,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "fsc_interrupt_int_n", chip);
+ if (ret < 0) {
+ fusb302_log(chip,
+ "cannot request IRQ for GPIO Int_N, ret=%d", ret);
+ goto tcpm_unregister_port;
+ }
+ enable_irq_wake(chip->gpio_int_n_irq);
+ return ret;
+
+tcpm_unregister_port:
+ tcpm_unregister_port(chip->tcpm_port);
+destroy_workqueue:
+ destroy_workqueue(chip->wq);
+clear_client_data:
+ i2c_set_clientdata(client, NULL);
+ fusb302_debugfs_exit(chip);
+
+ return ret;
+}
+
+static int fusb302_remove(struct i2c_client *client)
+{
+ struct fusb302_chip *chip = i2c_get_clientdata(client);
+
+ tcpm_unregister_port(chip->tcpm_port);
+ destroy_workqueue(chip->wq);
+ i2c_set_clientdata(client, NULL);
+ fusb302_debugfs_exit(chip);
+
+ return 0;
+}
+
+static int fusb302_pm_suspend(struct device *dev)
+{
+ struct fusb302_chip *chip = dev->driver_data;
+
+ if (atomic_read(&chip->i2c_busy))
+ return -EBUSY;
+ atomic_set(&chip->pm_suspend, 1);
+
+ return 0;
+}
+
+static int fusb302_pm_resume(struct device *dev)
+{
+ struct fusb302_chip *chip = dev->driver_data;
+
+ atomic_set(&chip->pm_suspend, 0);
+
+ return 0;
+}
+
+static const struct of_device_id fusb302_dt_match[] = {
+ {.compatible = "fcs,fusb302"},
+ {},
+};
+
+static const struct i2c_device_id fusb302_i2c_device_id[] = {
+ {"typec_fusb302", 0},
+ {},
+};
+
+static const struct dev_pm_ops fusb302_pm_ops = {
+ .suspend = fusb302_pm_suspend,
+ .resume = fusb302_pm_resume,
+};
+
+static struct i2c_driver fusb302_driver = {
+ .driver = {
+ .name = "typec_fusb302",
+ .pm = &fusb302_pm_ops,
+ .of_match_table = of_match_ptr(fusb302_dt_match),
+ },
+ .probe = fusb302_probe,
+ .remove = fusb302_remove,
+ .id_table = fusb302_i2c_device_id,
+};
+module_i2c_driver(fusb302_driver);
+
+MODULE_AUTHOR("Yueyao Zhu <yueyao.zhu@gmail.com>");
+MODULE_DESCRIPTION("Fairchild FUSB302 Type-C Chip Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/typec/fusb302/fusb302_reg.h b/drivers/staging/typec/fusb302/fusb302_reg.h
new file mode 100644
index 000000000000..0682e63de773
--- /dev/null
+++ b/drivers/staging/typec/fusb302/fusb302_reg.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2016-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Fairchild FUSB302 Type-C Chip Driver
+ */
+
+#ifndef FUSB302_REG_H
+#define FUSB302_REG_H
+
+#define FUSB_REG_DEVICE_ID 0x01
+#define FUSB_REG_SWITCHES0 0x02
+#define FUSB_REG_SWITCHES0_CC2_PU_EN BIT(7)
+#define FUSB_REG_SWITCHES0_CC1_PU_EN BIT(6)
+#define FUSB_REG_SWITCHES0_VCONN_CC2 BIT(5)
+#define FUSB_REG_SWITCHES0_VCONN_CC1 BIT(4)
+#define FUSB_REG_SWITCHES0_MEAS_CC2 BIT(3)
+#define FUSB_REG_SWITCHES0_MEAS_CC1 BIT(2)
+#define FUSB_REG_SWITCHES0_CC2_PD_EN BIT(1)
+#define FUSB_REG_SWITCHES0_CC1_PD_EN BIT(0)
+#define FUSB_REG_SWITCHES1 0x03
+#define FUSB_REG_SWITCHES1_POWERROLE BIT(7)
+#define FUSB_REG_SWITCHES1_SPECREV1 BIT(6)
+#define FUSB_REG_SWITCHES1_SPECREV0 BIT(5)
+#define FUSB_REG_SWITCHES1_DATAROLE BIT(4)
+#define FUSB_REG_SWITCHES1_AUTO_GCRC BIT(2)
+#define FUSB_REG_SWITCHES1_TXCC2_EN BIT(1)
+#define FUSB_REG_SWITCHES1_TXCC1_EN BIT(0)
+#define FUSB_REG_MEASURE 0x04
+#define FUSB_REG_MEASURE_MDAC5 BIT(7)
+#define FUSB_REG_MEASURE_MDAC4 BIT(6)
+#define FUSB_REG_MEASURE_MDAC3 BIT(5)
+#define FUSB_REG_MEASURE_MDAC2 BIT(4)
+#define FUSB_REG_MEASURE_MDAC1 BIT(3)
+#define FUSB_REG_MEASURE_MDAC0 BIT(2)
+#define FUSB_REG_MEASURE_VBUS BIT(1)
+#define FUSB_REG_MEASURE_XXXX5 BIT(0)
+#define FUSB_REG_CONTROL0 0x06
+#define FUSB_REG_CONTROL0_TX_FLUSH BIT(6)
+#define FUSB_REG_CONTROL0_INT_MASK BIT(5)
+#define FUSB_REG_CONTROL0_HOST_CUR_MASK (0xC)
+#define FUSB_REG_CONTROL0_HOST_CUR_HIGH (0xC)
+#define FUSB_REG_CONTROL0_HOST_CUR_MED (0x8)
+#define FUSB_REG_CONTROL0_HOST_CUR_DEF (0x4)
+#define FUSB_REG_CONTROL0_TX_START BIT(0)
+#define FUSB_REG_CONTROL1 0x07
+#define FUSB_REG_CONTROL1_ENSOP2DB BIT(6)
+#define FUSB_REG_CONTROL1_ENSOP1DB BIT(5)
+#define FUSB_REG_CONTROL1_BIST_MODE2 BIT(4)
+#define FUSB_REG_CONTROL1_RX_FLUSH BIT(2)
+#define FUSB_REG_CONTROL1_ENSOP2 BIT(1)
+#define FUSB_REG_CONTROL1_ENSOP1 BIT(0)
+#define FUSB_REG_CONTROL2 0x08
+#define FUSB_REG_CONTROL2_MODE BIT(1)
+#define FUSB_REG_CONTROL2_MODE_MASK (0x6)
+#define FUSB_REG_CONTROL2_MODE_DFP (0x6)
+#define FUSB_REG_CONTROL2_MODE_UFP (0x4)
+#define FUSB_REG_CONTROL2_MODE_DRP (0x2)
+#define FUSB_REG_CONTROL2_MODE_NONE (0x0)
+#define FUSB_REG_CONTROL2_TOGGLE BIT(0)
+#define FUSB_REG_CONTROL3 0x09
+#define FUSB_REG_CONTROL3_SEND_HARDRESET BIT(6)
+#define FUSB_REG_CONTROL3_BIST_TMODE BIT(5) /* 302B Only */
+#define FUSB_REG_CONTROL3_AUTO_HARDRESET BIT(4)
+#define FUSB_REG_CONTROL3_AUTO_SOFTRESET BIT(3)
+#define FUSB_REG_CONTROL3_N_RETRIES BIT(1)
+#define FUSB_REG_CONTROL3_N_RETRIES_MASK (0x6)
+#define FUSB_REG_CONTROL3_N_RETRIES_3 (0x6)
+#define FUSB_REG_CONTROL3_N_RETRIES_2 (0x4)
+#define FUSB_REG_CONTROL3_N_RETRIES_1 (0x2)
+#define FUSB_REG_CONTROL3_AUTO_RETRY BIT(0)
+#define FUSB_REG_MASK 0x0A
+#define FUSB_REG_MASK_VBUSOK BIT(7)
+#define FUSB_REG_MASK_ACTIVITY BIT(6)
+#define FUSB_REG_MASK_COMP_CHNG BIT(5)
+#define FUSB_REG_MASK_CRC_CHK BIT(4)
+#define FUSB_REG_MASK_ALERT BIT(3)
+#define FUSB_REG_MASK_WAKE BIT(2)
+#define FUSB_REG_MASK_COLLISION BIT(1)
+#define FUSB_REG_MASK_BC_LVL BIT(0)
+#define FUSB_REG_POWER 0x0B
+#define FUSB_REG_POWER_PWR BIT(0)
+#define FUSB_REG_POWER_PWR_LOW 0x1
+#define FUSB_REG_POWER_PWR_MEDIUM 0x3
+#define FUSB_REG_POWER_PWR_HIGH 0x7
+#define FUSB_REG_POWER_PWR_ALL 0xF
+#define FUSB_REG_RESET 0x0C
+#define FUSB_REG_RESET_PD_RESET BIT(1)
+#define FUSB_REG_RESET_SW_RESET BIT(0)
+#define FUSB_REG_MASKA 0x0E
+#define FUSB_REG_MASKA_OCP_TEMP BIT(7)
+#define FUSB_REG_MASKA_TOGDONE BIT(6)
+#define FUSB_REG_MASKA_SOFTFAIL BIT(5)
+#define FUSB_REG_MASKA_RETRYFAIL BIT(4)
+#define FUSB_REG_MASKA_HARDSENT BIT(3)
+#define FUSB_REG_MASKA_TX_SUCCESS BIT(2)
+#define FUSB_REG_MASKA_SOFTRESET BIT(1)
+#define FUSB_REG_MASKA_HARDRESET BIT(0)
+#define FUSB_REG_MASKB 0x0F
+#define FUSB_REG_MASKB_GCRCSENT BIT(0)
+#define FUSB_REG_STATUS0A 0x3C
+#define FUSB_REG_STATUS0A_SOFTFAIL BIT(5)
+#define FUSB_REG_STATUS0A_RETRYFAIL BIT(4)
+#define FUSB_REG_STATUS0A_POWER BIT(2)
+#define FUSB_REG_STATUS0A_RX_SOFT_RESET BIT(1)
+#define FUSB_REG_STATUS0A_RX_HARD_RESET BIT(0)
+#define FUSB_REG_STATUS1A 0x3D
+#define FUSB_REG_STATUS1A_TOGSS BIT(3)
+#define FUSB_REG_STATUS1A_TOGSS_RUNNING 0x0
+#define FUSB_REG_STATUS1A_TOGSS_SRC1 0x1
+#define FUSB_REG_STATUS1A_TOGSS_SRC2 0x2
+#define FUSB_REG_STATUS1A_TOGSS_SNK1 0x5
+#define FUSB_REG_STATUS1A_TOGSS_SNK2 0x6
+#define FUSB_REG_STATUS1A_TOGSS_AA 0x7
+#define FUSB_REG_STATUS1A_TOGSS_POS (3)
+#define FUSB_REG_STATUS1A_TOGSS_MASK (0x7)
+#define FUSB_REG_STATUS1A_RXSOP2DB BIT(2)
+#define FUSB_REG_STATUS1A_RXSOP1DB BIT(1)
+#define FUSB_REG_STATUS1A_RXSOP BIT(0)
+#define FUSB_REG_INTERRUPTA 0x3E
+#define FUSB_REG_INTERRUPTA_OCP_TEMP BIT(7)
+#define FUSB_REG_INTERRUPTA_TOGDONE BIT(6)
+#define FUSB_REG_INTERRUPTA_SOFTFAIL BIT(5)
+#define FUSB_REG_INTERRUPTA_RETRYFAIL BIT(4)
+#define FUSB_REG_INTERRUPTA_HARDSENT BIT(3)
+#define FUSB_REG_INTERRUPTA_TX_SUCCESS BIT(2)
+#define FUSB_REG_INTERRUPTA_SOFTRESET BIT(1)
+#define FUSB_REG_INTERRUPTA_HARDRESET BIT(0)
+#define FUSB_REG_INTERRUPTB 0x3F
+#define FUSB_REG_INTERRUPTB_GCRCSENT BIT(0)
+#define FUSB_REG_STATUS0 0x40
+#define FUSB_REG_STATUS0_VBUSOK BIT(7)
+#define FUSB_REG_STATUS0_ACTIVITY BIT(6)
+#define FUSB_REG_STATUS0_COMP BIT(5)
+#define FUSB_REG_STATUS0_CRC_CHK BIT(4)
+#define FUSB_REG_STATUS0_ALERT BIT(3)
+#define FUSB_REG_STATUS0_WAKE BIT(2)
+#define FUSB_REG_STATUS0_BC_LVL_MASK 0x03
+#define FUSB_REG_STATUS0_BC_LVL_0_200 0x0
+#define FUSB_REG_STATUS0_BC_LVL_200_600 0x1
+#define FUSB_REG_STATUS0_BC_LVL_600_1230 0x2
+#define FUSB_REG_STATUS0_BC_LVL_1230_MAX 0x3
+#define FUSB_REG_STATUS0_BC_LVL1 BIT(1)
+#define FUSB_REG_STATUS0_BC_LVL0 BIT(0)
+#define FUSB_REG_STATUS1 0x41
+#define FUSB_REG_STATUS1_RXSOP2 BIT(7)
+#define FUSB_REG_STATUS1_RXSOP1 BIT(6)
+#define FUSB_REG_STATUS1_RX_EMPTY BIT(5)
+#define FUSB_REG_STATUS1_RX_FULL BIT(4)
+#define FUSB_REG_STATUS1_TX_EMPTY BIT(3)
+#define FUSB_REG_STATUS1_TX_FULL BIT(2)
+#define FUSB_REG_INTERRUPT 0x42
+#define FUSB_REG_INTERRUPT_VBUSOK BIT(7)
+#define FUSB_REG_INTERRUPT_ACTIVITY BIT(6)
+#define FUSB_REG_INTERRUPT_COMP_CHNG BIT(5)
+#define FUSB_REG_INTERRUPT_CRC_CHK BIT(4)
+#define FUSB_REG_INTERRUPT_ALERT BIT(3)
+#define FUSB_REG_INTERRUPT_WAKE BIT(2)
+#define FUSB_REG_INTERRUPT_COLLISION BIT(1)
+#define FUSB_REG_INTERRUPT_BC_LVL BIT(0)
+#define FUSB_REG_FIFOS 0x43
+
+/* Tokens defined for the FUSB302 TX FIFO */
+enum fusb302_txfifo_tokens {
+ FUSB302_TKN_TXON = 0xA1,
+ FUSB302_TKN_SYNC1 = 0x12,
+ FUSB302_TKN_SYNC2 = 0x13,
+ FUSB302_TKN_SYNC3 = 0x1B,
+ FUSB302_TKN_RST1 = 0x15,
+ FUSB302_TKN_RST2 = 0x16,
+ FUSB302_TKN_PACKSYM = 0x80,
+ FUSB302_TKN_JAMCRC = 0xFF,
+ FUSB302_TKN_EOP = 0x14,
+ FUSB302_TKN_TXOFF = 0xFE,
+};
+
+#endif
diff --git a/drivers/staging/typec/pd.h b/drivers/staging/typec/pd.h
new file mode 100644
index 000000000000..8d97bdb95f23
--- /dev/null
+++ b/drivers/staging/typec/pd.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_PD_H
+#define __LINUX_USB_PD_H
+
+#include <linux/types.h>
+#include <linux/usb/typec.h>
+
+/* USB PD Messages */
+enum pd_ctrl_msg_type {
+ /* 0 Reserved */
+ PD_CTRL_GOOD_CRC = 1,
+ PD_CTRL_GOTO_MIN = 2,
+ PD_CTRL_ACCEPT = 3,
+ PD_CTRL_REJECT = 4,
+ PD_CTRL_PING = 5,
+ PD_CTRL_PS_RDY = 6,
+ PD_CTRL_GET_SOURCE_CAP = 7,
+ PD_CTRL_GET_SINK_CAP = 8,
+ PD_CTRL_DR_SWAP = 9,
+ PD_CTRL_PR_SWAP = 10,
+ PD_CTRL_VCONN_SWAP = 11,
+ PD_CTRL_WAIT = 12,
+ PD_CTRL_SOFT_RESET = 13,
+ /* 14-15 Reserved */
+};
+
+enum pd_data_msg_type {
+ /* 0 Reserved */
+ PD_DATA_SOURCE_CAP = 1,
+ PD_DATA_REQUEST = 2,
+ PD_DATA_BIST = 3,
+ PD_DATA_SINK_CAP = 4,
+ /* 5-14 Reserved */
+ PD_DATA_VENDOR_DEF = 15,
+};
+
+#define PD_REV10 0x0
+#define PD_REV20 0x1
+
+#define PD_HEADER_CNT_SHIFT 12
+#define PD_HEADER_CNT_MASK 0x7
+#define PD_HEADER_ID_SHIFT 9
+#define PD_HEADER_ID_MASK 0x7
+#define PD_HEADER_PWR_ROLE BIT(8)
+#define PD_HEADER_REV_SHIFT 6
+#define PD_HEADER_REV_MASK 0x3
+#define PD_HEADER_DATA_ROLE BIT(5)
+#define PD_HEADER_TYPE_SHIFT 0
+#define PD_HEADER_TYPE_MASK 0xf
+
+#define PD_HEADER(type, pwr, data, id, cnt) \
+ ((((type) & PD_HEADER_TYPE_MASK) << PD_HEADER_TYPE_SHIFT) | \
+ ((pwr) == TYPEC_SOURCE ? PD_HEADER_PWR_ROLE : 0) | \
+ ((data) == TYPEC_HOST ? PD_HEADER_DATA_ROLE : 0) | \
+ (PD_REV20 << PD_HEADER_REV_SHIFT) | \
+ (((id) & PD_HEADER_ID_MASK) << PD_HEADER_ID_SHIFT) | \
+ (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT))
+
+#define PD_HEADER_LE(type, pwr, data, id, cnt) \
+ cpu_to_le16(PD_HEADER((type), (pwr), (data), (id), (cnt)))
+
+static inline unsigned int pd_header_cnt(u16 header)
+{
+ return (header >> PD_HEADER_CNT_SHIFT) & PD_HEADER_CNT_MASK;
+}
+
+static inline unsigned int pd_header_cnt_le(__le16 header)
+{
+ return pd_header_cnt(le16_to_cpu(header));
+}
+
+static inline unsigned int pd_header_type(u16 header)
+{
+ return (header >> PD_HEADER_TYPE_SHIFT) & PD_HEADER_TYPE_MASK;
+}
+
+static inline unsigned int pd_header_type_le(__le16 header)
+{
+ return pd_header_type(le16_to_cpu(header));
+}
+
+#define PD_MAX_PAYLOAD 7
+
+struct pd_message {
+ __le16 header;
+ __le32 payload[PD_MAX_PAYLOAD];
+} __packed;
+
+/* PDO: Power Data Object */
+#define PDO_MAX_OBJECTS 7
+
+enum pd_pdo_type {
+ PDO_TYPE_FIXED = 0,
+ PDO_TYPE_BATT = 1,
+ PDO_TYPE_VAR = 2,
+};
+
+#define PDO_TYPE_SHIFT 30
+#define PDO_TYPE_MASK 0x3
+
+#define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT)
+
+#define PDO_VOLT_MASK 0x3ff
+#define PDO_CURR_MASK 0x3ff
+#define PDO_PWR_MASK 0x3ff
+
+#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
+#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
+#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
+#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
+#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
+#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
+#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
+
+#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT)
+#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT)
+
+#define PDO_FIXED(mv, ma, flags) \
+ (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
+ PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
+
+#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
+#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
+
+#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT)
+#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT)
+#define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
+
+#define PDO_BATT(min_mv, max_mv, max_mw) \
+ (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \
+ PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw))
+
+#define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */
+#define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */
+
+#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT)
+#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT)
+#define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT)
+
+#define PDO_VAR(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
+ PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+
+static inline enum pd_pdo_type pdo_type(u32 pdo)
+{
+ return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK;
+}
+
+static inline unsigned int pdo_fixed_voltage(u32 pdo)
+{
+ return ((pdo >> PDO_FIXED_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
+}
+
+static inline unsigned int pdo_min_voltage(u32 pdo)
+{
+ return ((pdo >> PDO_VAR_MIN_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
+}
+
+static inline unsigned int pdo_max_voltage(u32 pdo)
+{
+ return ((pdo >> PDO_VAR_MAX_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
+}
+
+static inline unsigned int pdo_max_current(u32 pdo)
+{
+ return ((pdo >> PDO_VAR_MAX_CURR_SHIFT) & PDO_CURR_MASK) * 10;
+}
+
+static inline unsigned int pdo_max_power(u32 pdo)
+{
+ return ((pdo >> PDO_BATT_MAX_PWR_SHIFT) & PDO_PWR_MASK) * 250;
+}
+
+/* RDO: Request Data Object */
+#define RDO_OBJ_POS_SHIFT 28
+#define RDO_OBJ_POS_MASK 0x7
+#define RDO_GIVE_BACK BIT(27) /* Supports reduced operating current */
+#define RDO_CAP_MISMATCH BIT(26) /* Not satisfied by source caps */
+#define RDO_USB_COMM BIT(25) /* USB communications capable */
+#define RDO_NO_SUSPEND BIT(24) /* USB Suspend not supported */
+
+#define RDO_PWR_MASK 0x3ff
+#define RDO_CURR_MASK 0x3ff
+
+#define RDO_FIXED_OP_CURR_SHIFT 10
+#define RDO_FIXED_MAX_CURR_SHIFT 0
+
+#define RDO_OBJ(idx) (((idx) & RDO_OBJ_POS_MASK) << RDO_OBJ_POS_SHIFT)
+
+#define PDO_FIXED_OP_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_OP_CURR_SHIFT)
+#define PDO_FIXED_MAX_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_MAX_CURR_SHIFT)
+
+#define RDO_FIXED(idx, op_ma, max_ma, flags) \
+ (RDO_OBJ(idx) | (flags) | \
+ PDO_FIXED_OP_CURR(op_ma) | PDO_FIXED_MAX_CURR(max_ma))
+
+#define RDO_BATT_OP_PWR_SHIFT 10 /* 250mW units */
+#define RDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
+
+#define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT)
+#define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT)
+
+#define RDO_BATT(idx, op_mw, max_mw, flags) \
+ (RDO_OBJ(idx) | (flags) | \
+ RDO_BATT_OP_PWR(op_mw) | RDO_BATT_MAX_PWR(max_mw))
+
+static inline unsigned int rdo_index(u32 rdo)
+{
+ return (rdo >> RDO_OBJ_POS_SHIFT) & RDO_OBJ_POS_MASK;
+}
+
+static inline unsigned int rdo_op_current(u32 rdo)
+{
+ return ((rdo >> RDO_FIXED_OP_CURR_SHIFT) & RDO_CURR_MASK) * 10;
+}
+
+static inline unsigned int rdo_max_current(u32 rdo)
+{
+ return ((rdo >> RDO_FIXED_MAX_CURR_SHIFT) &
+ RDO_CURR_MASK) * 10;
+}
+
+static inline unsigned int rdo_op_power(u32 rdo)
+{
+ return ((rdo >> RDO_BATT_OP_PWR_SHIFT) & RDO_PWR_MASK) * 250;
+}
+
+static inline unsigned int rdo_max_power(u32 rdo)
+{
+ return ((rdo >> RDO_BATT_MAX_PWR_SHIFT) & RDO_PWR_MASK) * 250;
+}
+
+/* USB PD timers and counters */
+#define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */
+#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
+#define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */
+#define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */
+#define PD_T_SOURCE_ACTIVITY 45
+#define PD_T_SINK_ACTIVITY 135
+#define PD_T_SINK_WAIT_CAP 240
+#define PD_T_PS_TRANSITION 500
+#define PD_T_SRC_TRANSITION 35
+#define PD_T_DRP_SNK 40
+#define PD_T_DRP_SRC 30
+#define PD_T_PS_SOURCE_OFF 920
+#define PD_T_PS_SOURCE_ON 480
+#define PD_T_PS_HARD_RESET 30
+#define PD_T_SRC_RECOVER 760
+#define PD_T_SRC_RECOVER_MAX 1000
+#define PD_T_SRC_TURN_ON 275
+#define PD_T_SAFE_0V 650
+#define PD_T_VCONN_SOURCE_ON 100
+#define PD_T_SINK_REQUEST 100 /* 100 ms minimum */
+#define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
+
+#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
+#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
+
+#define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */
+#define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */
+
+#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
+#define PD_N_HARD_RESET_COUNT 2
+
+#endif /* __LINUX_USB_PD_H */
diff --git a/drivers/staging/typec/pd_bdo.h b/drivers/staging/typec/pd_bdo.h
new file mode 100644
index 000000000000..90b94d9fea5d
--- /dev/null
+++ b/drivers/staging/typec/pd_bdo.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_PD_BDO_H
+#define __LINUX_USB_PD_BDO_H
+
+/* BDO : BIST Data Object */
+#define BDO_MODE_RECV (0 << 28)
+#define BDO_MODE_TRANSMIT (1 << 28)
+#define BDO_MODE_COUNTERS (2 << 28)
+#define BDO_MODE_CARRIER0 (3 << 28)
+#define BDO_MODE_CARRIER1 (4 << 28)
+#define BDO_MODE_CARRIER2 (5 << 28)
+#define BDO_MODE_CARRIER3 (6 << 28)
+#define BDO_MODE_EYE (7 << 28)
+#define BDO_MODE_TESTDATA (8 << 28)
+
+#define BDO_MODE_MASK(mode) ((mode) & 0xf0000000)
+
+#endif
diff --git a/drivers/staging/typec/pd_vdo.h b/drivers/staging/typec/pd_vdo.h
new file mode 100644
index 000000000000..dba172e0e0d1
--- /dev/null
+++ b/drivers/staging/typec/pd_vdo.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_PD_VDO_H
+#define __LINUX_USB_PD_VDO_H
+
+#include "pd.h"
+
+/*
+ * VDO : Vendor Defined Message Object
+ * VDM object is minimum of VDM header + 6 additional data objects.
+ */
+
+/*
+ * VDM header
+ * ----------
+ * <31:16> :: SVID
+ * <15> :: VDM type ( 1b == structured, 0b == unstructured )
+ * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
+ * <12:11> :: reserved
+ * <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
+ * <7:6> :: command type (SVDM only?)
+ * <5> :: reserved (SVDM), command type (UVDM)
+ * <4:0> :: command
+ */
+#define VDO_MAX_SIZE 7
+#define VDO(vid, type, custom) \
+ (((vid) << 16) | \
+ ((type) << 15) | \
+ ((custom) & 0x7FFF))
+
+#define VDO_SVDM_TYPE (1 << 15)
+#define VDO_SVDM_VERS(x) ((x) << 13)
+#define VDO_OPOS(x) ((x) << 8)
+#define VDO_CMDT(x) ((x) << 6)
+#define VDO_OPOS_MASK VDO_OPOS(0x7)
+#define VDO_CMDT_MASK VDO_CMDT(0x3)
+
+#define CMDT_INIT 0
+#define CMDT_RSP_ACK 1
+#define CMDT_RSP_NAK 2
+#define CMDT_RSP_BUSY 3
+
+/* reserved for SVDM ... for Google UVDM */
+#define VDO_SRC_INITIATOR (0 << 5)
+#define VDO_SRC_RESPONDER (1 << 5)
+
+#define CMD_DISCOVER_IDENT 1
+#define CMD_DISCOVER_SVID 2
+#define CMD_DISCOVER_MODES 3
+#define CMD_ENTER_MODE 4
+#define CMD_EXIT_MODE 5
+#define CMD_ATTENTION 6
+
+#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f))
+
+/* ChromeOS specific commands */
+#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
+#define VDO_CMD_SEND_INFO VDO_CMD_VENDOR(1)
+#define VDO_CMD_READ_INFO VDO_CMD_VENDOR(2)
+#define VDO_CMD_REBOOT VDO_CMD_VENDOR(5)
+#define VDO_CMD_FLASH_ERASE VDO_CMD_VENDOR(6)
+#define VDO_CMD_FLASH_WRITE VDO_CMD_VENDOR(7)
+#define VDO_CMD_ERASE_SIG VDO_CMD_VENDOR(8)
+#define VDO_CMD_PING_ENABLE VDO_CMD_VENDOR(10)
+#define VDO_CMD_CURRENT VDO_CMD_VENDOR(11)
+#define VDO_CMD_FLIP VDO_CMD_VENDOR(12)
+#define VDO_CMD_GET_LOG VDO_CMD_VENDOR(13)
+#define VDO_CMD_CCD_EN VDO_CMD_VENDOR(14)
+
+#define PD_VDO_VID(vdo) ((vdo) >> 16)
+#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
+#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
+#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
+#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
+
+/*
+ * SVDM Identity request -> response
+ *
+ * Request is simply properly formatted SVDM header
+ *
+ * Response is 4 data objects:
+ * [0] :: SVDM header
+ * [1] :: Identitiy header
+ * [2] :: Cert Stat VDO
+ * [3] :: (Product | Cable) VDO
+ * [4] :: AMA VDO
+ *
+ */
+#define VDO_INDEX_HDR 0
+#define VDO_INDEX_IDH 1
+#define VDO_INDEX_CSTAT 2
+#define VDO_INDEX_CABLE 3
+#define VDO_INDEX_PRODUCT 3
+#define VDO_INDEX_AMA 4
+
+/*
+ * SVDM Identity Header
+ * --------------------
+ * <31> :: data capable as a USB host
+ * <30> :: data capable as a USB device
+ * <29:27> :: product type
+ * <26> :: modal operation supported (1b == yes)
+ * <25:16> :: Reserved, Shall be set to zero
+ * <15:0> :: USB-IF assigned VID for this cable vendor
+ */
+#define IDH_PTYPE_UNDEF 0
+#define IDH_PTYPE_HUB 1
+#define IDH_PTYPE_PERIPH 2
+#define IDH_PTYPE_PCABLE 3
+#define IDH_PTYPE_ACABLE 4
+#define IDH_PTYPE_AMA 5
+
+#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
+ | (is_modal) << 26 | ((vid) & 0xffff))
+
+#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
+#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
+#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
+
+/*
+ * Cert Stat VDO
+ * -------------
+ * <31:0> : USB-IF assigned XID for this cable
+ */
+#define PD_CSTAT_XID(vdo) (vdo)
+
+/*
+ * Product VDO
+ * -----------
+ * <31:16> : USB Product ID
+ * <15:0> : USB bcdDevice
+ */
+#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff))
+#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
+
+/*
+ * Cable VDO
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
+ * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
+#define CABLE_CTYPE 2
+#define CABLE_PLUG 0
+#define CABLE_RECEPTACLE 1
+#define CABLE_CURR_1A5 0
+#define CABLE_CURR_3A 1
+#define CABLE_CURR_5A 2
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
+ vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
+ | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
+ | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
+ | ((usbss) & 0x7))
+
+/*
+ * AMA VDO
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
+ * SVDM Discover SVIDs request -> response
+ *
+ * Request is properly formatted VDM Header with discover SVIDs command.
+ * Response is a set of SVIDs of all all supported SVIDs with all zero's to
+ * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be
+ * repeated.
+ */
+#define VDO_SVID(svid0, svid1) (((svid0) & 0xffff) << 16 | ((svid1) & 0xffff))
+#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
+#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
+
+/* USB-IF SIDs */
+#define USB_SID_PD 0xff00 /* power delivery */
+#define USB_SID_DISPLAYPORT 0xff01
+#define USB_SID_MHL 0xff02 /* Mobile High-Definition Link */
+
+/* VDM command timeouts (in ms) */
+
+#define PD_T_VDM_UNSTRUCTURED 500
+#define PD_T_VDM_BUSY 100
+#define PD_T_VDM_WAIT_MODE_E 100
+#define PD_T_VDM_SNDR_RSP 30
+#define PD_T_VDM_E_MODE 25
+#define PD_T_VDM_RCVR_RSP 15
+
+#endif /* __LINUX_USB_PD_VDO_H */
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
new file mode 100644
index 000000000000..5e5be74c7850
--- /dev/null
+++ b/drivers/staging/typec/tcpci.c
@@ -0,0 +1,526 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * USB Type-C Port Controller Interface.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec.h>
+
+#include "pd.h"
+#include "tcpci.h"
+#include "tcpm.h"
+
+#define PD_RETRY_COUNT 3
+
+struct tcpci {
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct tcpm_port *port;
+
+ struct regmap *regmap;
+
+ bool controls_vbus;
+
+ struct tcpc_dev tcpc;
+};
+
+static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
+{
+ return container_of(tcpc, struct tcpci, tcpc);
+}
+
+static int tcpci_read16(struct tcpci *tcpci, unsigned int reg,
+ unsigned int *val)
+{
+ return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
+}
+
+static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
+{
+ return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16));
+}
+
+static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
+ int ret;
+
+ switch (cc) {
+ case TYPEC_CC_RA:
+ reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC2_SHIFT);
+ break;
+ case TYPEC_CC_RD:
+ reg = (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT);
+ break;
+ case TYPEC_CC_RP_DEF:
+ reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
+ (TCPC_ROLE_CTRL_RP_VAL_DEF <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ case TYPEC_CC_RP_1_5:
+ reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
+ (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ case TYPEC_CC_RP_3_0:
+ reg = (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT) |
+ (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ case TYPEC_CC_OPEN:
+ default:
+ reg = (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT) |
+ (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ break;
+ }
+
+ ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_start_drp_toggling(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg = TCPC_ROLE_CTRL_DRP;
+
+ switch (cc) {
+ default:
+ case TYPEC_CC_RP_DEF:
+ reg |= (TCPC_ROLE_CTRL_RP_VAL_DEF <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ case TYPEC_CC_RP_1_5:
+ reg |= (TCPC_ROLE_CTRL_RP_VAL_1_5 <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ case TYPEC_CC_RP_3_0:
+ reg |= (TCPC_ROLE_CTRL_RP_VAL_3_0 <<
+ TCPC_ROLE_CTRL_RP_VAL_SHIFT);
+ break;
+ }
+
+ return regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
+}
+
+static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
+{
+ switch (cc) {
+ case 0x1:
+ return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
+ case 0x2:
+ return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
+ case 0x3:
+ if (sink)
+ return TYPEC_CC_RP_3_0;
+ case 0x0:
+ default:
+ return TYPEC_CC_OPEN;
+ }
+}
+
+static int tcpci_get_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status *cc1, enum typec_cc_status *cc2)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
+ TCPC_CC_STATUS_CC1_MASK,
+ reg & TCPC_CC_STATUS_TERM);
+ *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
+ TCPC_CC_STATUS_CC2_MASK,
+ reg & TCPC_CC_STATUS_TERM);
+
+ return 0;
+}
+
+static int tcpci_set_polarity(struct tcpc_dev *tcpc,
+ enum typec_cc_polarity polarity)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ int ret;
+
+ ret = regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
+ (polarity == TYPEC_POLARITY_CC2) ?
+ TCPC_TCPC_CTRL_ORIENTATION : 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ int ret;
+
+ ret = regmap_write(tcpci->regmap, TCPC_POWER_CTRL,
+ enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role role, enum typec_data_role data)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
+ int ret;
+
+ reg = PD_REV20 << TCPC_MSG_HDR_INFO_REV_SHIFT;
+ if (role == TYPEC_SOURCE)
+ reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
+ if (data == TYPEC_HOST)
+ reg |= TCPC_MSG_HDR_INFO_DATA_ROLE;
+ ret = regmap_write(tcpci->regmap, TCPC_MSG_HDR_INFO, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg = 0;
+ int ret;
+
+ if (enable)
+ reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
+ ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ return !!(reg & TCPC_POWER_STATUS_VBUS_PRES);
+}
+
+static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ int ret;
+
+ /* Disable both source and sink first before enabling anything */
+
+ if (!source) {
+ ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
+ TCPC_CMD_DISABLE_SRC_VBUS);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!sink) {
+ ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
+ TCPC_CMD_DISABLE_SINK_VBUS);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (source) {
+ ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
+ TCPC_CMD_SRC_VBUS_DEFAULT);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (sink) {
+ ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
+ TCPC_CMD_SINK_VBUS);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tcpci_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg, cnt, header;
+ int ret;
+
+ cnt = msg ? pd_header_cnt(msg->header) * 4 : 0;
+ ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
+ if (ret < 0)
+ return ret;
+
+ header = msg ? msg->header : 0;
+ ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
+ if (ret < 0)
+ return ret;
+
+ if (cnt > 0) {
+ ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA,
+ &msg->payload, cnt);
+ if (ret < 0)
+ return ret;
+ }
+
+ reg = (PD_RETRY_COUNT << TCPC_TRANSMIT_RETRY_SHIFT) |
+ (type << TCPC_TRANSMIT_TYPE_SHIFT);
+ ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tcpci_init(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000); /* XXX */
+ unsigned int reg;
+ int ret;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+ if (!(reg & TCPC_POWER_STATUS_UNINIT))
+ break;
+ usleep_range(10000, 20000);
+ }
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ /* Clear all events */
+ ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
+ if (ret < 0)
+ return ret;
+
+ if (tcpci->controls_vbus)
+ reg = TCPC_POWER_STATUS_VBUS_PRES;
+ else
+ reg = 0;
+ ret = regmap_write(tcpci->regmap, TCPC_POWER_STATUS_MASK, reg);
+ if (ret < 0)
+ return ret;
+
+ reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
+ TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
+ TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
+ if (tcpci->controls_vbus)
+ reg |= TCPC_ALERT_POWER_STATUS;
+ return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
+}
+
+static irqreturn_t tcpci_irq(int irq, void *dev_id)
+{
+ struct tcpci *tcpci = dev_id;
+ unsigned int status, reg;
+
+ tcpci_read16(tcpci, TCPC_ALERT, &status);
+
+ /*
+ * Clear alert status for everything except RX_STATUS, which shouldn't
+ * be cleared until we have successfully retrieved message.
+ */
+ if (status & ~TCPC_ALERT_RX_STATUS)
+ tcpci_write16(tcpci, TCPC_ALERT,
+ status & ~TCPC_ALERT_RX_STATUS);
+
+ if (status & TCPC_ALERT_CC_STATUS)
+ tcpm_cc_change(tcpci->port);
+
+ if (status & TCPC_ALERT_POWER_STATUS) {
+ regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &reg);
+
+ /*
+ * If power status mask has been reset, then the TCPC
+ * has reset.
+ */
+ if (reg == 0xff)
+ tcpm_tcpc_reset(tcpci->port);
+ else
+ tcpm_vbus_change(tcpci->port);
+ }
+
+ if (status & TCPC_ALERT_RX_STATUS) {
+ struct pd_message msg;
+ unsigned int cnt;
+
+ regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
+
+ tcpci_read16(tcpci, TCPC_RX_HDR, &reg);
+ msg.header = reg;
+
+ if (WARN_ON(cnt > sizeof(msg.payload)))
+ cnt = sizeof(msg.payload);
+
+ if (cnt > 0)
+ regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
+ &msg.payload, cnt);
+
+ /* Read complete, clear RX status alert bit */
+ tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
+
+ tcpm_pd_receive(tcpci->port, &msg);
+ }
+
+ if (status & TCPC_ALERT_RX_HARD_RST)
+ tcpm_pd_hard_reset(tcpci->port);
+
+ if (status & TCPC_ALERT_TX_SUCCESS)
+ tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_SUCCESS);
+ else if (status & TCPC_ALERT_TX_DISCARDED)
+ tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_DISCARDED);
+ else if (status & TCPC_ALERT_TX_FAILED)
+ tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config tcpci_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
+};
+
+const struct tcpc_config tcpci_tcpc_config = {
+ .type = TYPEC_PORT_DFP,
+ .default_role = TYPEC_SINK,
+};
+
+static int tcpci_parse_config(struct tcpci *tcpci)
+{
+ tcpci->controls_vbus = true; /* XXX */
+
+ /* TODO: Populate struct tcpc_config from ACPI/device-tree */
+ tcpci->tcpc.config = &tcpci_tcpc_config;
+
+ return 0;
+}
+
+static int tcpci_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct tcpci *tcpci;
+ int err;
+
+ tcpci = devm_kzalloc(&client->dev, sizeof(*tcpci), GFP_KERNEL);
+ if (!tcpci)
+ return -ENOMEM;
+
+ tcpci->client = client;
+ tcpci->dev = &client->dev;
+ i2c_set_clientdata(client, tcpci);
+ tcpci->regmap = devm_regmap_init_i2c(client, &tcpci_regmap_config);
+ if (IS_ERR(tcpci->regmap))
+ return PTR_ERR(tcpci->regmap);
+
+ tcpci->tcpc.init = tcpci_init;
+ tcpci->tcpc.get_vbus = tcpci_get_vbus;
+ tcpci->tcpc.set_vbus = tcpci_set_vbus;
+ tcpci->tcpc.set_cc = tcpci_set_cc;
+ tcpci->tcpc.get_cc = tcpci_get_cc;
+ tcpci->tcpc.set_polarity = tcpci_set_polarity;
+ tcpci->tcpc.set_vconn = tcpci_set_vconn;
+ tcpci->tcpc.start_drp_toggling = tcpci_start_drp_toggling;
+
+ tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
+ tcpci->tcpc.set_roles = tcpci_set_roles;
+ tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
+
+ err = tcpci_parse_config(tcpci);
+ if (err < 0)
+ return err;
+
+ /* Disable chip interrupts */
+ tcpci_write16(tcpci, TCPC_ALERT_MASK, 0);
+
+ err = devm_request_threaded_irq(tcpci->dev, client->irq, NULL,
+ tcpci_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(tcpci->dev), tcpci);
+ if (err < 0)
+ return err;
+
+ tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
+ return PTR_ERR_OR_ZERO(tcpci->port);
+}
+
+static int tcpci_remove(struct i2c_client *client)
+{
+ struct tcpci *tcpci = i2c_get_clientdata(client);
+
+ tcpm_unregister_port(tcpci->port);
+
+ return 0;
+}
+
+static const struct i2c_device_id tcpci_id[] = {
+ { "tcpci", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tcpci_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tcpci_of_match[] = {
+ { .compatible = "usb,tcpci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tcpci_of_match);
+#endif
+
+static struct i2c_driver tcpci_i2c_driver = {
+ .driver = {
+ .name = "tcpci",
+ .of_match_table = of_match_ptr(tcpci_of_match),
+ },
+ .probe = tcpci_probe,
+ .remove = tcpci_remove,
+ .id_table = tcpci_id,
+};
+module_i2c_driver(tcpci_i2c_driver);
+
+MODULE_DESCRIPTION("USB Type-C Port Controller Interface driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/typec/tcpci.h b/drivers/staging/typec/tcpci.h
new file mode 100644
index 000000000000..10b04c8723da
--- /dev/null
+++ b/drivers/staging/typec/tcpci.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * USB Type-C Port Controller Interface.
+ */
+
+#ifndef __LINUX_USB_TCPCI_H
+#define __LINUX_USB_TCPCI_H
+
+#define TCPC_VENDOR_ID 0x0
+#define TCPC_PRODUCT_ID 0x2
+#define TCPC_BCD_DEV 0x4
+#define TCPC_TC_REV 0x6
+#define TCPC_PD_REV 0x8
+#define TCPC_PD_INT_REV 0xa
+
+#define TCPC_ALERT 0x10
+#define TCPC_ALERT_VBUS_DISCNCT BIT(11)
+#define TCPC_ALERT_RX_BUF_OVF BIT(10)
+#define TCPC_ALERT_FAULT BIT(9)
+#define TCPC_ALERT_V_ALARM_LO BIT(8)
+#define TCPC_ALERT_V_ALARM_HI BIT(7)
+#define TCPC_ALERT_TX_SUCCESS BIT(6)
+#define TCPC_ALERT_TX_DISCARDED BIT(5)
+#define TCPC_ALERT_TX_FAILED BIT(4)
+#define TCPC_ALERT_RX_HARD_RST BIT(3)
+#define TCPC_ALERT_RX_STATUS BIT(2)
+#define TCPC_ALERT_POWER_STATUS BIT(1)
+#define TCPC_ALERT_CC_STATUS BIT(0)
+
+#define TCPC_ALERT_MASK 0x12
+#define TCPC_POWER_STATUS_MASK 0x14
+#define TCPC_FAULT_STATUS_MASK 0x15
+#define TCPC_CONFIG_STD_OUTPUT 0x18
+
+#define TCPC_TCPC_CTRL 0x19
+#define TCPC_TCPC_CTRL_ORIENTATION BIT(0)
+
+#define TCPC_ROLE_CTRL 0x1a
+#define TCPC_ROLE_CTRL_DRP BIT(6)
+#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
+#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
+#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
+#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
+#define TCPC_ROLE_CTRL_CC2_SHIFT 2
+#define TCPC_ROLE_CTRL_CC2_MASK 0x3
+#define TCPC_ROLE_CTRL_CC1_SHIFT 0
+#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC_RA 0x0
+#define TCPC_ROLE_CTRL_CC_RP 0x1
+#define TCPC_ROLE_CTRL_CC_RD 0x2
+#define TCPC_ROLE_CTRL_CC_OPEN 0x3
+
+#define TCPC_FAULT_CTRL 0x1b
+
+#define TCPC_POWER_CTRL 0x1c
+#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0)
+
+#define TCPC_CC_STATUS 0x1d
+#define TCPC_CC_STATUS_TERM BIT(4)
+#define TCPC_CC_STATUS_CC2_SHIFT 2
+#define TCPC_CC_STATUS_CC2_MASK 0x3
+#define TCPC_CC_STATUS_CC1_SHIFT 0
+#define TCPC_CC_STATUS_CC1_MASK 0x3
+
+#define TCPC_POWER_STATUS 0x1e
+#define TCPC_POWER_STATUS_UNINIT BIT(6)
+#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
+#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+
+#define TCPC_FAULT_STATUS 0x1f
+
+#define TCPC_COMMAND 0x23
+#define TCPC_CMD_WAKE_I2C 0x11
+#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22
+#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33
+#define TCPC_CMD_DISABLE_SINK_VBUS 0x44
+#define TCPC_CMD_SINK_VBUS 0x55
+#define TCPC_CMD_DISABLE_SRC_VBUS 0x66
+#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77
+#define TCPC_CMD_SRC_VBUS_HIGH 0x88
+#define TCPC_CMD_LOOK4CONNECTION 0x99
+#define TCPC_CMD_RXONEMORE 0xAA
+#define TCPC_CMD_I2C_IDLE 0xFF
+
+#define TCPC_DEV_CAP_1 0x24
+#define TCPC_DEV_CAP_2 0x26
+#define TCPC_STD_INPUT_CAP 0x28
+#define TCPC_STD_OUTPUT_CAP 0x29
+
+#define TCPC_MSG_HDR_INFO 0x2e
+#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
+#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
+#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
+
+#define TCPC_RX_DETECT 0x2f
+#define TCPC_RX_DETECT_HARD_RESET BIT(5)
+#define TCPC_RX_DETECT_SOP BIT(0)
+
+#define TCPC_RX_BYTE_CNT 0x30
+#define TCPC_RX_BUF_FRAME_TYPE 0x31
+#define TCPC_RX_HDR 0x32
+#define TCPC_RX_DATA 0x34 /* through 0x4f */
+
+#define TCPC_TRANSMIT 0x50
+#define TCPC_TRANSMIT_RETRY_SHIFT 4
+#define TCPC_TRANSMIT_RETRY_MASK 0x3
+#define TCPC_TRANSMIT_TYPE_SHIFT 0
+#define TCPC_TRANSMIT_TYPE_MASK 0x7
+
+#define TCPC_TX_BYTE_CNT 0x51
+#define TCPC_TX_HDR 0x52
+#define TCPC_TX_DATA 0x54 /* through 0x6f */
+
+#define TCPC_VBUS_VOLTAGE 0x70
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72
+#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74
+#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76
+#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78
+
+#endif /* __LINUX_USB_TCPCI_H */
diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c
new file mode 100644
index 000000000000..abba655ba00a
--- /dev/null
+++ b/drivers/staging/typec/tcpm.c
@@ -0,0 +1,3465 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * USB Power Delivery protocol stack.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/sched/clock.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/usb/typec.h>
+#include <linux/workqueue.h>
+
+#include "pd.h"
+#include "pd_vdo.h"
+#include "pd_bdo.h"
+#include "tcpm.h"
+
+#define FOREACH_STATE(S) \
+ S(INVALID_STATE), \
+ S(DRP_TOGGLING), \
+ S(SRC_UNATTACHED), \
+ S(SRC_ATTACH_WAIT), \
+ S(SRC_ATTACHED), \
+ S(SRC_STARTUP), \
+ S(SRC_SEND_CAPABILITIES), \
+ S(SRC_NEGOTIATE_CAPABILITIES), \
+ S(SRC_TRANSITION_SUPPLY), \
+ S(SRC_READY), \
+ S(SRC_WAIT_NEW_CAPABILITIES), \
+ \
+ S(SNK_UNATTACHED), \
+ S(SNK_ATTACH_WAIT), \
+ S(SNK_DEBOUNCED), \
+ S(SNK_ATTACHED), \
+ S(SNK_STARTUP), \
+ S(SNK_DISCOVERY), \
+ S(SNK_DISCOVERY_DEBOUNCE), \
+ S(SNK_DISCOVERY_DEBOUNCE_DONE), \
+ S(SNK_WAIT_CAPABILITIES), \
+ S(SNK_NEGOTIATE_CAPABILITIES), \
+ S(SNK_TRANSITION_SINK), \
+ S(SNK_TRANSITION_SINK_VBUS), \
+ S(SNK_READY), \
+ \
+ S(ACC_UNATTACHED), \
+ S(DEBUG_ACC_ATTACHED), \
+ S(AUDIO_ACC_ATTACHED), \
+ S(AUDIO_ACC_DEBOUNCE), \
+ \
+ S(HARD_RESET_SEND), \
+ S(HARD_RESET_START), \
+ S(SRC_HARD_RESET_VBUS_OFF), \
+ S(SRC_HARD_RESET_VBUS_ON), \
+ S(SNK_HARD_RESET_SINK_OFF), \
+ S(SNK_HARD_RESET_WAIT_VBUS), \
+ S(SNK_HARD_RESET_SINK_ON), \
+ \
+ S(SOFT_RESET), \
+ S(SOFT_RESET_SEND), \
+ \
+ S(DR_SWAP_ACCEPT), \
+ S(DR_SWAP_SEND), \
+ S(DR_SWAP_SEND_TIMEOUT), \
+ S(DR_SWAP_CANCEL), \
+ S(DR_SWAP_CHANGE_DR), \
+ \
+ S(PR_SWAP_ACCEPT), \
+ S(PR_SWAP_SEND), \
+ S(PR_SWAP_SEND_TIMEOUT), \
+ S(PR_SWAP_CANCEL), \
+ S(PR_SWAP_START), \
+ S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
+ S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
+ S(PR_SWAP_SRC_SNK_SINK_ON), \
+ S(PR_SWAP_SNK_SRC_SINK_OFF), \
+ S(PR_SWAP_SNK_SRC_SOURCE_ON), \
+ \
+ S(VCONN_SWAP_ACCEPT), \
+ S(VCONN_SWAP_SEND), \
+ S(VCONN_SWAP_SEND_TIMEOUT), \
+ S(VCONN_SWAP_CANCEL), \
+ S(VCONN_SWAP_START), \
+ S(VCONN_SWAP_WAIT_FOR_VCONN), \
+ S(VCONN_SWAP_TURN_ON_VCONN), \
+ S(VCONN_SWAP_TURN_OFF_VCONN), \
+ \
+ S(SNK_TRY), \
+ S(SNK_TRY_WAIT), \
+ S(SRC_TRYWAIT), \
+ S(SRC_TRYWAIT_UNATTACHED), \
+ \
+ S(SRC_TRY), \
+ S(SRC_TRY_DEBOUNCE), \
+ S(SNK_TRYWAIT), \
+ S(SNK_TRYWAIT_DEBOUNCE), \
+ S(SNK_TRYWAIT_VBUS), \
+ S(BIST_RX), \
+ \
+ S(ERROR_RECOVERY), \
+ S(ERROR_RECOVERY_WAIT_OFF)
+
+#define GENERATE_ENUM(e) e
+#define GENERATE_STRING(s) #s
+
+enum tcpm_state {
+ FOREACH_STATE(GENERATE_ENUM)
+};
+
+static const char * const tcpm_states[] = {
+ FOREACH_STATE(GENERATE_STRING)
+};
+
+enum vdm_states {
+ VDM_STATE_ERR_BUSY = -3,
+ VDM_STATE_ERR_SEND = -2,
+ VDM_STATE_ERR_TMOUT = -1,
+ VDM_STATE_DONE = 0,
+ /* Anything >0 represents an active state */
+ VDM_STATE_READY = 1,
+ VDM_STATE_BUSY = 2,
+ VDM_STATE_WAIT_RSP_BUSY = 3,
+};
+
+enum pd_msg_request {
+ PD_MSG_NONE = 0,
+ PD_MSG_CTRL_REJECT,
+ PD_MSG_CTRL_WAIT,
+ PD_MSG_DATA_SINK_CAP,
+ PD_MSG_DATA_SOURCE_CAP,
+};
+
+/* Events from low level driver */
+
+#define TCPM_CC_EVENT BIT(0)
+#define TCPM_VBUS_EVENT BIT(1)
+#define TCPM_RESET_EVENT BIT(2)
+
+#define LOG_BUFFER_ENTRIES 1024
+#define LOG_BUFFER_ENTRY_SIZE 128
+
+/* Alternate mode support */
+
+#define SVID_DISCOVERY_MAX 16
+
+struct pd_mode_data {
+ int svid_index; /* current SVID index */
+ int nsvids;
+ u16 svids[SVID_DISCOVERY_MAX];
+ int altmodes; /* number of alternate modes */
+ struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
+};
+
+struct tcpm_port {
+ struct device *dev;
+
+ struct mutex lock; /* tcpm state machine lock */
+ struct workqueue_struct *wq;
+
+ struct typec_capability typec_caps;
+ struct typec_port *typec_port;
+
+ struct tcpc_dev *tcpc;
+
+ enum typec_role vconn_role;
+ enum typec_role pwr_role;
+ enum typec_data_role data_role;
+ enum typec_pwr_opmode pwr_opmode;
+
+ struct usb_pd_identity partner_ident;
+ struct typec_partner_desc partner_desc;
+ struct typec_partner *partner;
+
+ enum typec_cc_status cc_req;
+
+ enum typec_cc_status cc1;
+ enum typec_cc_status cc2;
+ enum typec_cc_polarity polarity;
+
+ bool attached;
+ bool connected;
+ bool vbus_present;
+ bool vbus_never_low;
+ bool vbus_source;
+ bool vbus_charge;
+
+ bool send_discover;
+ bool op_vsafe5v;
+
+ int try_role;
+ int try_snk_count;
+ int try_src_count;
+
+ enum pd_msg_request queued_message;
+
+ enum tcpm_state enter_state;
+ enum tcpm_state prev_state;
+ enum tcpm_state state;
+ enum tcpm_state delayed_state;
+ unsigned long delayed_runtime;
+ unsigned long delay_ms;
+
+ spinlock_t pd_event_lock;
+ u32 pd_events;
+
+ struct work_struct event_work;
+ struct delayed_work state_machine;
+ struct delayed_work vdm_state_machine;
+ bool state_machine_running;
+
+ struct completion tx_complete;
+ enum tcpm_transmit_status tx_status;
+
+ struct mutex swap_lock; /* swap command lock */
+ bool swap_pending;
+ struct completion swap_complete;
+ int swap_status;
+
+ unsigned int message_id;
+ unsigned int caps_count;
+ unsigned int hard_reset_count;
+ bool pd_capable;
+ bool explicit_contract;
+
+ /* Partner capabilities/requests */
+ u32 sink_request;
+ u32 source_caps[PDO_MAX_OBJECTS];
+ unsigned int nr_source_caps;
+ u32 sink_caps[PDO_MAX_OBJECTS];
+ unsigned int nr_sink_caps;
+
+ /* Local capabilities */
+ u32 src_pdo[PDO_MAX_OBJECTS];
+ unsigned int nr_src_pdo;
+ u32 snk_pdo[PDO_MAX_OBJECTS];
+ unsigned int nr_snk_pdo;
+
+ unsigned int max_snk_mv;
+ unsigned int max_snk_ma;
+ unsigned int max_snk_mw;
+ unsigned int operating_snk_mw;
+
+ /* Requested current / voltage */
+ u32 current_limit;
+ u32 supply_voltage;
+
+ u32 bist_request;
+
+ /* PD state for Vendor Defined Messages */
+ enum vdm_states vdm_state;
+ u32 vdm_retries;
+ /* next Vendor Defined Message to send */
+ u32 vdo_data[VDO_MAX_SIZE];
+ u8 vdo_count;
+ /* VDO to retry if UFP responder replied busy */
+ u32 vdo_retry;
+
+ /* Alternate mode data */
+
+ struct pd_mode_data mode_data;
+ struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
+ struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+ struct mutex logbuffer_lock; /* log buffer access lock */
+ int logbuffer_head;
+ int logbuffer_tail;
+ u8 *logbuffer[LOG_BUFFER_ENTRIES];
+#endif
+};
+
+struct pd_rx_event {
+ struct work_struct work;
+ struct tcpm_port *port;
+ struct pd_message msg;
+};
+
+#define tcpm_cc_is_sink(cc) \
+ ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
+ (cc) == TYPEC_CC_RP_3_0)
+
+#define tcpm_port_is_sink(port) \
+ ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
+ (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
+
+#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
+#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
+#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
+
+#define tcpm_port_is_source(port) \
+ ((tcpm_cc_is_source((port)->cc1) && \
+ !tcpm_cc_is_source((port)->cc2)) || \
+ (tcpm_cc_is_source((port)->cc2) && \
+ !tcpm_cc_is_source((port)->cc1)))
+
+#define tcpm_port_is_debug(port) \
+ (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
+
+#define tcpm_port_is_audio(port) \
+ (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
+
+#define tcpm_port_is_audio_detached(port) \
+ ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
+ (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
+
+#define tcpm_try_snk(port) \
+ ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK)
+
+#define tcpm_try_src(port) \
+ ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE)
+
+static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
+{
+ if (port->try_role == TYPEC_SINK)
+ return SNK_UNATTACHED;
+ else if (port->try_role == TYPEC_SOURCE)
+ return SRC_UNATTACHED;
+ else if (port->tcpc->config->default_role == TYPEC_SINK)
+ return SNK_UNATTACHED;
+ return SRC_UNATTACHED;
+}
+
+static inline
+struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
+{
+ return container_of(cap, struct tcpm_port, typec_caps);
+}
+
+static bool tcpm_port_is_disconnected(struct tcpm_port *port)
+{
+ return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
+ port->cc2 == TYPEC_CC_OPEN) ||
+ (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
+ port->cc1 == TYPEC_CC_OPEN) ||
+ (port->polarity == TYPEC_POLARITY_CC2 &&
+ port->cc2 == TYPEC_CC_OPEN)));
+}
+
+/*
+ * Logging
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static bool tcpm_log_full(struct tcpm_port *port)
+{
+ return port->logbuffer_tail ==
+ (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
+}
+
+static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
+{
+ char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
+ u64 ts_nsec = local_clock();
+ unsigned long rem_nsec;
+
+ if (!port->logbuffer[port->logbuffer_head]) {
+ port->logbuffer[port->logbuffer_head] =
+ kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
+ if (!port->logbuffer[port->logbuffer_head])
+ return;
+ }
+
+ vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
+
+ mutex_lock(&port->logbuffer_lock);
+
+ if (tcpm_log_full(port)) {
+ port->logbuffer_head = max(port->logbuffer_head - 1, 0);
+ strcpy(tmpbuffer, "overflow");
+ }
+
+ if (port->logbuffer_head < 0 ||
+ port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
+ dev_warn(port->dev,
+ "Bad log buffer index %d\n", port->logbuffer_head);
+ goto abort;
+ }
+
+ if (!port->logbuffer[port->logbuffer_head]) {
+ dev_warn(port->dev,
+ "Log buffer index %d is NULL\n", port->logbuffer_head);
+ goto abort;
+ }
+
+ rem_nsec = do_div(ts_nsec, 1000000000);
+ scnprintf(port->logbuffer[port->logbuffer_head],
+ LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
+ (unsigned long)ts_nsec, rem_nsec / 1000,
+ tmpbuffer);
+ port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
+
+abort:
+ mutex_unlock(&port->logbuffer_lock);
+}
+
+static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
+{
+ va_list args;
+
+ /* Do not log while disconnected and unattached */
+ if (tcpm_port_is_disconnected(port) &&
+ (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
+ port->state == DRP_TOGGLING))
+ return;
+
+ va_start(args, fmt);
+ _tcpm_log(port, fmt, args);
+ va_end(args);
+}
+
+static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ _tcpm_log(port, fmt, args);
+ va_end(args);
+}
+
+static void tcpm_log_source_caps(struct tcpm_port *port)
+{
+ int i;
+
+ for (i = 0; i < port->nr_source_caps; i++) {
+ u32 pdo = port->source_caps[i];
+ enum pd_pdo_type type = pdo_type(pdo);
+ char msg[64];
+
+ switch (type) {
+ case PDO_TYPE_FIXED:
+ scnprintf(msg, sizeof(msg),
+ "%u mV, %u mA [%s%s%s%s%s%s]",
+ pdo_fixed_voltage(pdo),
+ pdo_max_current(pdo),
+ (pdo & PDO_FIXED_DUAL_ROLE) ?
+ "R" : "",
+ (pdo & PDO_FIXED_SUSPEND) ?
+ "S" : "",
+ (pdo & PDO_FIXED_HIGHER_CAP) ?
+ "H" : "",
+ (pdo & PDO_FIXED_USB_COMM) ?
+ "U" : "",
+ (pdo & PDO_FIXED_DATA_SWAP) ?
+ "D" : "",
+ (pdo & PDO_FIXED_EXTPOWER) ?
+ "E" : "");
+ break;
+ case PDO_TYPE_VAR:
+ scnprintf(msg, sizeof(msg),
+ "%u-%u mV, %u mA",
+ pdo_min_voltage(pdo),
+ pdo_max_voltage(pdo),
+ pdo_max_current(pdo));
+ break;
+ case PDO_TYPE_BATT:
+ scnprintf(msg, sizeof(msg),
+ "%u-%u mV, %u mW",
+ pdo_min_voltage(pdo),
+ pdo_max_voltage(pdo),
+ pdo_max_power(pdo));
+ break;
+ default:
+ strcpy(msg, "undefined");
+ break;
+ }
+ tcpm_log(port, " PDO %d: type %d, %s",
+ i, type, msg);
+ }
+}
+
+static int tcpm_seq_show(struct seq_file *s, void *v)
+{
+ struct tcpm_port *port = (struct tcpm_port *)s->private;
+ int tail;
+
+ mutex_lock(&port->logbuffer_lock);
+ tail = port->logbuffer_tail;
+ while (tail != port->logbuffer_head) {
+ seq_printf(s, "%s\n", port->logbuffer[tail]);
+ tail = (tail + 1) % LOG_BUFFER_ENTRIES;
+ }
+ if (!seq_has_overflowed(s))
+ port->logbuffer_tail = tail;
+ mutex_unlock(&port->logbuffer_lock);
+
+ return 0;
+}
+
+static int tcpm_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tcpm_seq_show, inode->i_private);
+}
+
+static const struct file_operations tcpm_debug_operations = {
+ .open = tcpm_debug_open,
+ .llseek = seq_lseek,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static struct dentry *rootdir;
+
+static int tcpm_debugfs_init(struct tcpm_port *port)
+{
+ mutex_init(&port->logbuffer_lock);
+ /* /sys/kernel/debug/tcpm/usbcX */
+ if (!rootdir) {
+ rootdir = debugfs_create_dir("tcpm", NULL);
+ if (!rootdir)
+ return -ENOMEM;
+ }
+
+ port->dentry = debugfs_create_file(dev_name(port->dev),
+ S_IFREG | 0444, rootdir,
+ port, &tcpm_debug_operations);
+
+ return 0;
+}
+
+static void tcpm_debugfs_exit(struct tcpm_port *port)
+{
+ debugfs_remove(port->dentry);
+}
+
+#else
+
+static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
+static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
+static void tcpm_log_source_caps(struct tcpm_port *port) { }
+static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
+static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
+
+#endif
+
+static int tcpm_pd_transmit(struct tcpm_port *port,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg)
+{
+ unsigned long timeout;
+ int ret;
+
+ if (msg)
+ tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
+ else
+ tcpm_log(port, "PD TX, type: %#x", type);
+
+ reinit_completion(&port->tx_complete);
+ ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
+ if (ret < 0)
+ return ret;
+
+ mutex_unlock(&port->lock);
+ timeout = wait_for_completion_timeout(&port->tx_complete,
+ msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
+ mutex_lock(&port->lock);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ switch (port->tx_status) {
+ case TCPC_TX_SUCCESS:
+ port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
+ return 0;
+ case TCPC_TX_DISCARDED:
+ return -EAGAIN;
+ case TCPC_TX_FAILED:
+ default:
+ return -EIO;
+ }
+}
+
+void tcpm_pd_transmit_complete(struct tcpm_port *port,
+ enum tcpm_transmit_status status)
+{
+ tcpm_log(port, "PD TX complete, status: %u", status);
+ port->tx_status = status;
+ complete(&port->tx_complete);
+}
+EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
+
+static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
+ enum tcpc_usb_switch config)
+{
+ int ret = 0;
+
+ tcpm_log(port, "Requesting mux mode %d, config %d, polarity %d",
+ mode, config, port->polarity);
+
+ if (port->tcpc->mux)
+ ret = port->tcpc->mux->set(port->tcpc->mux, mode, config,
+ port->polarity);
+
+ return ret;
+}
+
+static int tcpm_set_polarity(struct tcpm_port *port,
+ enum typec_cc_polarity polarity)
+{
+ int ret;
+
+ tcpm_log(port, "polarity %d", polarity);
+
+ ret = port->tcpc->set_polarity(port->tcpc, polarity);
+ if (ret < 0)
+ return ret;
+
+ port->polarity = polarity;
+
+ return 0;
+}
+
+static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
+{
+ int ret;
+
+ tcpm_log(port, "vconn:=%d", enable);
+
+ ret = port->tcpc->set_vconn(port->tcpc, enable);
+ if (!ret) {
+ port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
+ typec_set_vconn_role(port->typec_port, port->vconn_role);
+ }
+
+ return ret;
+}
+
+static u32 tcpm_get_current_limit(struct tcpm_port *port)
+{
+ enum typec_cc_status cc;
+ u32 limit;
+
+ cc = port->polarity ? port->cc2 : port->cc1;
+ switch (cc) {
+ case TYPEC_CC_RP_1_5:
+ limit = 1500;
+ break;
+ case TYPEC_CC_RP_3_0:
+ limit = 3000;
+ break;
+ case TYPEC_CC_RP_DEF:
+ default:
+ limit = 0;
+ break;
+ }
+
+ return limit;
+}
+
+static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
+{
+ int ret = -EOPNOTSUPP;
+
+ tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
+
+ if (port->tcpc->set_current_limit)
+ ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
+
+ return ret;
+}
+
+/*
+ * Determine RP value to set based on maximum current supported
+ * by a port if configured as source.
+ * Returns CC value to report to link partner.
+ */
+static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
+{
+ const u32 *src_pdo = port->src_pdo;
+ int nr_pdo = port->nr_src_pdo;
+ int i;
+
+ /*
+ * Search for first entry with matching voltage.
+ * It should report the maximum supported current.
+ */
+ for (i = 0; i < nr_pdo; i++) {
+ const u32 pdo = src_pdo[i];
+
+ if (pdo_type(pdo) == PDO_TYPE_FIXED &&
+ pdo_fixed_voltage(pdo) == 5000) {
+ unsigned int curr = pdo_max_current(pdo);
+
+ if (curr >= 3000)
+ return TYPEC_CC_RP_3_0;
+ else if (curr >= 1500)
+ return TYPEC_CC_RP_1_5;
+ return TYPEC_CC_RP_DEF;
+ }
+ }
+
+ return TYPEC_CC_RP_DEF;
+}
+
+static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
+{
+ return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
+ port->data_role);
+}
+
+static int tcpm_set_roles(struct tcpm_port *port, bool attached,
+ enum typec_role role, enum typec_data_role data)
+{
+ int ret;
+
+ if (data == TYPEC_HOST)
+ ret = tcpm_mux_set(port, TYPEC_MUX_USB,
+ TCPC_USB_SWITCH_CONNECT);
+ else
+ ret = tcpm_mux_set(port, TYPEC_MUX_NONE,
+ TCPC_USB_SWITCH_DISCONNECT);
+ if (ret < 0)
+ return ret;
+
+ ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
+ if (ret < 0)
+ return ret;
+
+ port->pwr_role = role;
+ port->data_role = data;
+ typec_set_data_role(port->typec_port, data);
+ typec_set_pwr_role(port->typec_port, role);
+
+ return 0;
+}
+
+static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
+{
+ int ret;
+
+ ret = port->tcpc->set_roles(port->tcpc, true, role,
+ port->data_role);
+ if (ret < 0)
+ return ret;
+
+ port->pwr_role = role;
+ typec_set_pwr_role(port->typec_port, role);
+
+ return 0;
+}
+
+static int tcpm_pd_send_source_caps(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ int i;
+
+ memset(&msg, 0, sizeof(msg));
+ if (!port->nr_src_pdo) {
+ /* No source capabilities defined, sink only */
+ msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
+ port->pwr_role,
+ port->data_role,
+ port->message_id, 0);
+ } else {
+ msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
+ port->pwr_role,
+ port->data_role,
+ port->message_id,
+ port->nr_src_pdo);
+ }
+ for (i = 0; i < port->nr_src_pdo; i++)
+ msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
+
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
+static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ int i;
+
+ memset(&msg, 0, sizeof(msg));
+ if (!port->nr_snk_pdo) {
+ /* No sink capabilities defined, source only */
+ msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
+ port->pwr_role,
+ port->data_role,
+ port->message_id, 0);
+ } else {
+ msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
+ port->pwr_role,
+ port->data_role,
+ port->message_id,
+ port->nr_snk_pdo);
+ }
+ for (i = 0; i < port->nr_snk_pdo; i++)
+ msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
+
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
+static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
+ unsigned int delay_ms)
+{
+ if (delay_ms) {
+ tcpm_log(port, "pending state change %s -> %s @ %u ms",
+ tcpm_states[port->state], tcpm_states[state],
+ delay_ms);
+ port->delayed_state = state;
+ mod_delayed_work(port->wq, &port->state_machine,
+ msecs_to_jiffies(delay_ms));
+ port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
+ port->delay_ms = delay_ms;
+ } else {
+ tcpm_log(port, "state change %s -> %s",
+ tcpm_states[port->state], tcpm_states[state]);
+ port->delayed_state = INVALID_STATE;
+ port->prev_state = port->state;
+ port->state = state;
+ /*
+ * Don't re-queue the state machine work item if we're currently
+ * in the state machine and we're immediately changing states.
+ * tcpm_state_machine_work() will continue running the state
+ * machine.
+ */
+ if (!port->state_machine_running)
+ mod_delayed_work(port->wq, &port->state_machine, 0);
+ }
+}
+
+static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
+ unsigned int delay_ms)
+{
+ if (port->enter_state == port->state)
+ tcpm_set_state(port, state, delay_ms);
+ else
+ tcpm_log(port,
+ "skipped %sstate change %s -> %s [%u ms], context state %s",
+ delay_ms ? "delayed " : "",
+ tcpm_states[port->state], tcpm_states[state],
+ delay_ms, tcpm_states[port->enter_state]);
+}
+
+static void tcpm_queue_message(struct tcpm_port *port,
+ enum pd_msg_request message)
+{
+ port->queued_message = message;
+ mod_delayed_work(port->wq, &port->state_machine, 0);
+}
+
+/*
+ * VDM/VDO handling functions
+ */
+static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
+ const u32 *data, int cnt)
+{
+ port->vdo_count = cnt + 1;
+ port->vdo_data[0] = header;
+ memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
+ /* Set ready, vdm state machine will actually send */
+ port->vdm_retries = 0;
+ port->vdm_state = VDM_STATE_READY;
+}
+
+static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
+ int cnt)
+{
+ u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
+ u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
+
+ memset(&port->mode_data, 0, sizeof(port->mode_data));
+
+#if 0 /* Not really a match */
+ switch (PD_IDH_PTYPE(vdo)) {
+ case IDH_PTYPE_UNDEF:
+ port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
+ break;
+ case IDH_PTYPE_HUB:
+ break;
+ case IDH_PTYPE_PERIPH:
+ break;
+ case IDH_PTYPE_PCABLE:
+ break;
+ case IDH_PTYPE_ACABLE:
+ break;
+ case IDH_PTYPE_AMA:
+ port->partner.type = TYPEC_PARTNER_ALTMODE;
+ break;
+ default:
+ break;
+ }
+#endif
+
+ port->partner_ident.id_header = vdo;
+ port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
+ port->partner_ident.product = product;
+
+ typec_partner_set_identity(port->partner);
+
+ tcpm_log(port, "Identity: %04x:%04x.%04x",
+ PD_IDH_VID(vdo),
+ PD_PRODUCT_PID(product), product & 0xffff);
+}
+
+static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
+ int cnt)
+{
+ struct pd_mode_data *pmdata = &port->mode_data;
+ int i;
+
+ for (i = 1; i < cnt; i++) {
+ u32 p = le32_to_cpu(payload[i]);
+ u16 svid;
+
+ svid = (p >> 16) & 0xffff;
+ if (!svid)
+ return false;
+
+ if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
+ goto abort;
+
+ pmdata->svids[pmdata->nsvids++] = svid;
+ tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
+
+ svid = p & 0xffff;
+ if (!svid)
+ return false;
+
+ if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
+ goto abort;
+
+ pmdata->svids[pmdata->nsvids++] = svid;
+ tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
+ }
+ return true;
+abort:
+ tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
+ return false;
+}
+
+static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
+ int cnt)
+{
+ struct pd_mode_data *pmdata = &port->mode_data;
+ struct typec_altmode_desc *paltmode;
+ struct typec_mode_desc *pmode;
+ int i;
+
+ if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+
+ paltmode = &pmdata->altmode_desc[pmdata->altmodes];
+ memset(paltmode, 0, sizeof(*paltmode));
+
+ paltmode->svid = pmdata->svids[pmdata->svid_index];
+
+ tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
+ pmdata->altmodes, paltmode->svid);
+
+ for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
+ pmode = &paltmode->modes[paltmode->n_modes];
+ memset(pmode, 0, sizeof(*pmode));
+ pmode->vdo = le32_to_cpu(payload[i]);
+ pmode->index = i - 1;
+ paltmode->n_modes++;
+ tcpm_log(port, " VDO %d: 0x%08x",
+ pmode->index, pmode->vdo);
+ }
+ port->partner_altmode[pmdata->altmodes] =
+ typec_partner_register_altmode(port->partner, paltmode);
+ if (port->partner_altmode[pmdata->altmodes] == NULL) {
+ tcpm_log(port,
+ "Failed to register alternate modes for SVID 0x%04x",
+ paltmode->svid);
+ return;
+ }
+ pmdata->altmodes++;
+}
+
+#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
+
+static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ u32 *response)
+{
+ u32 p0 = le32_to_cpu(payload[0]);
+ int cmd_type = PD_VDO_CMDT(p0);
+ int cmd = PD_VDO_CMD(p0);
+ struct pd_mode_data *modep;
+ int rlen = 0;
+ u16 svid;
+
+ tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
+ p0, cmd_type, cmd, cnt);
+
+ modep = &port->mode_data;
+
+ switch (cmd_type) {
+ case CMDT_INIT:
+ switch (cmd) {
+ case CMD_DISCOVER_IDENT:
+ break;
+ case CMD_DISCOVER_SVID:
+ break;
+ case CMD_DISCOVER_MODES:
+ break;
+ case CMD_ENTER_MODE:
+ break;
+ case CMD_EXIT_MODE:
+ break;
+ case CMD_ATTENTION:
+ break;
+ default:
+ break;
+ }
+ if (rlen >= 1) {
+ response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
+ } else if (rlen == 0) {
+ response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ } else {
+ response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
+ rlen = 1;
+ }
+ break;
+ case CMDT_RSP_ACK:
+ /* silently drop message if we are not connected */
+ if (!port->partner)
+ break;
+
+ switch (cmd) {
+ case CMD_DISCOVER_IDENT:
+ /* 6.4.4.3.1 */
+ svdm_consume_identity(port, payload, cnt);
+ response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
+ rlen = 1;
+ break;
+ case CMD_DISCOVER_SVID:
+ /* 6.4.4.3.2 */
+ if (svdm_consume_svids(port, payload, cnt)) {
+ response[0] = VDO(USB_SID_PD, 1,
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ } else if (modep->nsvids && supports_modal(port)) {
+ response[0] = VDO(modep->svids[0], 1,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ break;
+ case CMD_DISCOVER_MODES:
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, payload, cnt);
+ modep->svid_index++;
+ if (modep->svid_index < modep->nsvids) {
+ svid = modep->svids[modep->svid_index];
+ response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else {
+#if 0
+ response[0] = pd_dfp_enter_mode(port, 0, 0);
+ if (response[0])
+ rlen = 1;
+#endif
+ }
+ break;
+ case CMD_ENTER_MODE:
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rlen;
+}
+
+static void tcpm_handle_vdm_request(struct tcpm_port *port,
+ const __le32 *payload, int cnt)
+{
+ int rlen = 0;
+ u32 response[8] = { };
+ u32 p0 = le32_to_cpu(payload[0]);
+
+ if (port->vdm_state == VDM_STATE_BUSY) {
+ /* If UFP responded busy retry after timeout */
+ if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
+ port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
+ port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
+ CMDT_INIT;
+ mod_delayed_work(port->wq, &port->vdm_state_machine,
+ msecs_to_jiffies(PD_T_VDM_BUSY));
+ return;
+ }
+ port->vdm_state = VDM_STATE_DONE;
+ }
+
+ if (PD_VDO_SVDM(p0))
+ rlen = tcpm_pd_svdm(port, payload, cnt, response);
+#if 0
+ else
+ rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
+#endif
+
+ if (rlen > 0) {
+ tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+ mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+ }
+}
+
+static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
+ const u32 *data, int count)
+{
+ u32 header;
+
+ if (WARN_ON(count > VDO_MAX_SIZE - 1))
+ count = VDO_MAX_SIZE - 1;
+
+ /* set VDM header with VID & CMD */
+ header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
+ 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
+ tcpm_queue_vdm(port, header, data, count);
+
+ mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
+}
+
+static unsigned int vdm_ready_timeout(u32 vdm_hdr)
+{
+ unsigned int timeout;
+ int cmd = PD_VDO_CMD(vdm_hdr);
+
+ /* its not a structured VDM command */
+ if (!PD_VDO_SVDM(vdm_hdr))
+ return PD_T_VDM_UNSTRUCTURED;
+
+ switch (PD_VDO_CMDT(vdm_hdr)) {
+ case CMDT_INIT:
+ if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
+ timeout = PD_T_VDM_WAIT_MODE_E;
+ else
+ timeout = PD_T_VDM_SNDR_RSP;
+ break;
+ default:
+ if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
+ timeout = PD_T_VDM_E_MODE;
+ else
+ timeout = PD_T_VDM_RCVR_RSP;
+ break;
+ }
+ return timeout;
+}
+
+static void vdm_run_state_machine(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ int i, res;
+
+ switch (port->vdm_state) {
+ case VDM_STATE_READY:
+ /* Only transmit VDM if attached */
+ if (!port->attached) {
+ port->vdm_state = VDM_STATE_ERR_BUSY;
+ break;
+ }
+
+ /*
+ * if there's traffic or we're not in PDO ready state don't send
+ * a VDM.
+ */
+ if (port->state != SRC_READY && port->state != SNK_READY)
+ break;
+
+ /* Prepare and send VDM */
+ memset(&msg, 0, sizeof(msg));
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->message_id, port->vdo_count);
+ for (i = 0; i < port->vdo_count; i++)
+ msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
+ res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ if (res < 0) {
+ port->vdm_state = VDM_STATE_ERR_SEND;
+ } else {
+ unsigned long timeout;
+
+ port->vdm_retries = 0;
+ port->vdm_state = VDM_STATE_BUSY;
+ timeout = vdm_ready_timeout(port->vdo_data[0]);
+ mod_delayed_work(port->wq, &port->vdm_state_machine,
+ timeout);
+ }
+ break;
+ case VDM_STATE_WAIT_RSP_BUSY:
+ port->vdo_data[0] = port->vdo_retry;
+ port->vdo_count = 1;
+ port->vdm_state = VDM_STATE_READY;
+ break;
+ case VDM_STATE_BUSY:
+ port->vdm_state = VDM_STATE_ERR_TMOUT;
+ break;
+ case VDM_STATE_ERR_SEND:
+ /*
+ * A partner which does not support USB PD will not reply,
+ * so this is not a fatal error. At the same time, some
+ * devices may not return GoodCRC under some circumstances,
+ * so we need to retry.
+ */
+ if (port->vdm_retries < 3) {
+ tcpm_log(port, "VDM Tx error, retry");
+ port->vdm_retries++;
+ port->vdm_state = VDM_STATE_READY;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void vdm_state_machine_work(struct work_struct *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port,
+ vdm_state_machine.work);
+ enum vdm_states prev_state;
+
+ mutex_lock(&port->lock);
+
+ /*
+ * Continue running as long as the port is not busy and there was
+ * a state change.
+ */
+ do {
+ prev_state = port->vdm_state;
+ vdm_run_state_machine(port);
+ } while (port->vdm_state != prev_state &&
+ port->vdm_state != VDM_STATE_BUSY);
+
+ mutex_unlock(&port->lock);
+}
+
+/*
+ * PD (data, control) command handling functions
+ */
+static void tcpm_pd_data_request(struct tcpm_port *port,
+ const struct pd_message *msg)
+{
+ enum pd_data_msg_type type = pd_header_type_le(msg->header);
+ unsigned int cnt = pd_header_cnt_le(msg->header);
+ unsigned int i;
+
+ switch (type) {
+ case PD_DATA_SOURCE_CAP:
+ if (port->pwr_role != TYPEC_SINK)
+ break;
+
+ for (i = 0; i < cnt; i++)
+ port->source_caps[i] = le32_to_cpu(msg->payload[i]);
+
+ port->nr_source_caps = cnt;
+
+ tcpm_log_source_caps(port);
+
+ /*
+ * This message may be received even if VBUS is not
+ * present. This is quite unexpected; see USB PD
+ * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
+ * However, at the same time, we must be ready to
+ * receive this message and respond to it 15ms after
+ * receiving PS_RDY during power swap operations, no matter
+ * if VBUS is available or not (USB PD specification,
+ * section 6.5.9.2).
+ * So we need to accept the message either way,
+ * but be prepared to keep waiting for VBUS after it was
+ * handled.
+ */
+ tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ break;
+ case PD_DATA_REQUEST:
+ if (port->pwr_role != TYPEC_SOURCE ||
+ cnt != 1) {
+ tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ break;
+ }
+ port->sink_request = le32_to_cpu(msg->payload[0]);
+ tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
+ break;
+ case PD_DATA_SINK_CAP:
+ /* We don't do anything with this at the moment... */
+ for (i = 0; i < cnt; i++)
+ port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
+ port->nr_sink_caps = cnt;
+ break;
+ case PD_DATA_VENDOR_DEF:
+ tcpm_handle_vdm_request(port, msg->payload, cnt);
+ break;
+ case PD_DATA_BIST:
+ if (port->state == SRC_READY || port->state == SNK_READY) {
+ port->bist_request = le32_to_cpu(msg->payload[0]);
+ tcpm_set_state(port, BIST_RX, 0);
+ }
+ break;
+ default:
+ tcpm_log(port, "Unhandled data message type %#x", type);
+ break;
+ }
+}
+
+static void tcpm_pd_ctrl_request(struct tcpm_port *port,
+ const struct pd_message *msg)
+{
+ enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
+ enum tcpm_state next_state;
+
+ switch (type) {
+ case PD_CTRL_GOOD_CRC:
+ case PD_CTRL_PING:
+ break;
+ case PD_CTRL_GET_SOURCE_CAP:
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
+ break;
+ default:
+ tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ break;
+ }
+ break;
+ case PD_CTRL_GET_SINK_CAP:
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
+ break;
+ default:
+ tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ break;
+ }
+ break;
+ case PD_CTRL_GOTO_MIN:
+ break;
+ case PD_CTRL_PS_RDY:
+ switch (port->state) {
+ case SNK_TRANSITION_SINK:
+ if (port->vbus_present) {
+ tcpm_set_current_limit(port,
+ port->current_limit,
+ port->supply_voltage);
+ tcpm_set_state(port, SNK_READY, 0);
+ } else {
+ /*
+ * Seen after power swap. Keep waiting for VBUS
+ * in a transitional state.
+ */
+ tcpm_set_state(port,
+ SNK_TRANSITION_SINK_VBUS, 0);
+ }
+ break;
+ case PR_SWAP_SRC_SNK_SOURCE_OFF:
+ tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
+ break;
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
+ break;
+ case VCONN_SWAP_WAIT_FOR_VCONN:
+ tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PD_CTRL_REJECT:
+ case PD_CTRL_WAIT:
+ switch (port->state) {
+ case SNK_NEGOTIATE_CAPABILITIES:
+ /* USB PD specification, Figure 8-43 */
+ if (port->explicit_contract)
+ next_state = SNK_READY;
+ else
+ next_state = SNK_WAIT_CAPABILITIES;
+ tcpm_set_state(port, next_state, 0);
+ break;
+ case DR_SWAP_SEND:
+ port->swap_status = (type == PD_CTRL_WAIT ?
+ -EAGAIN : -EOPNOTSUPP);
+ tcpm_set_state(port, DR_SWAP_CANCEL, 0);
+ break;
+ case PR_SWAP_SEND:
+ port->swap_status = (type == PD_CTRL_WAIT ?
+ -EAGAIN : -EOPNOTSUPP);
+ tcpm_set_state(port, PR_SWAP_CANCEL, 0);
+ break;
+ case VCONN_SWAP_SEND:
+ port->swap_status = (type == PD_CTRL_WAIT ?
+ -EAGAIN : -EOPNOTSUPP);
+ tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PD_CTRL_ACCEPT:
+ switch (port->state) {
+ case SNK_NEGOTIATE_CAPABILITIES:
+ tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
+ break;
+ case SOFT_RESET_SEND:
+ port->message_id = 0;
+ if (port->pwr_role == TYPEC_SOURCE)
+ next_state = SRC_SEND_CAPABILITIES;
+ else
+ next_state = SNK_WAIT_CAPABILITIES;
+ tcpm_set_state(port, next_state, 0);
+ break;
+ case DR_SWAP_SEND:
+ tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
+ break;
+ case PR_SWAP_SEND:
+ tcpm_set_state(port, PR_SWAP_START, 0);
+ break;
+ case VCONN_SWAP_SEND:
+ tcpm_set_state(port, VCONN_SWAP_START, 0);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PD_CTRL_SOFT_RESET:
+ tcpm_set_state(port, SOFT_RESET, 0);
+ break;
+ case PD_CTRL_DR_SWAP:
+ if (port->typec_caps.type != TYPEC_PORT_DRP) {
+ tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ break;
+ }
+ /*
+ * XXX
+ * 6.3.9: If an alternate mode is active, a request to swap
+ * alternate modes shall trigger a port reset.
+ */
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
+ break;
+ default:
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+ break;
+ case PD_CTRL_PR_SWAP:
+ if (port->typec_caps.type != TYPEC_PORT_DRP) {
+ tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ break;
+ }
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
+ break;
+ default:
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+ break;
+ case PD_CTRL_VCONN_SWAP:
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
+ break;
+ default:
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+ break;
+ default:
+ tcpm_log(port, "Unhandled ctrl message type %#x", type);
+ break;
+ }
+}
+
+static void tcpm_pd_rx_handler(struct work_struct *work)
+{
+ struct pd_rx_event *event = container_of(work,
+ struct pd_rx_event, work);
+ const struct pd_message *msg = &event->msg;
+ unsigned int cnt = pd_header_cnt_le(msg->header);
+ struct tcpm_port *port = event->port;
+
+ mutex_lock(&port->lock);
+
+ tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
+ port->attached);
+
+ if (port->attached) {
+ /*
+ * If both ends believe to be DFP/host, we have a data role
+ * mismatch.
+ */
+ if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
+ (port->data_role == TYPEC_HOST)) {
+ tcpm_log(port,
+ "Data role mismatch, initiating error recovery");
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ } else {
+ if (cnt)
+ tcpm_pd_data_request(port, msg);
+ else
+ tcpm_pd_ctrl_request(port, msg);
+ }
+ }
+
+ mutex_unlock(&port->lock);
+ kfree(event);
+}
+
+void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
+{
+ struct pd_rx_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ INIT_WORK(&event->work, tcpm_pd_rx_handler);
+ event->port = port;
+ memcpy(&event->msg, msg, sizeof(*msg));
+ queue_work(port->wq, &event->work);
+}
+EXPORT_SYMBOL_GPL(tcpm_pd_receive);
+
+static int tcpm_pd_send_control(struct tcpm_port *port,
+ enum pd_ctrl_msg_type type)
+{
+ struct pd_message msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header = PD_HEADER_LE(type, port->pwr_role,
+ port->data_role,
+ port->message_id, 0);
+
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
+/*
+ * Send queued message without affecting state.
+ * Return true if state machine should go back to sleep,
+ * false otherwise.
+ */
+static bool tcpm_send_queued_message(struct tcpm_port *port)
+{
+ enum pd_msg_request queued_message;
+
+ do {
+ queued_message = port->queued_message;
+ port->queued_message = PD_MSG_NONE;
+
+ switch (queued_message) {
+ case PD_MSG_CTRL_WAIT:
+ tcpm_pd_send_control(port, PD_CTRL_WAIT);
+ break;
+ case PD_MSG_CTRL_REJECT:
+ tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ break;
+ case PD_MSG_DATA_SINK_CAP:
+ tcpm_pd_send_sink_caps(port);
+ break;
+ case PD_MSG_DATA_SOURCE_CAP:
+ tcpm_pd_send_source_caps(port);
+ break;
+ default:
+ break;
+ }
+ } while (port->queued_message != PD_MSG_NONE);
+
+ if (port->delayed_state != INVALID_STATE) {
+ if (time_is_after_jiffies(port->delayed_runtime)) {
+ mod_delayed_work(port->wq, &port->state_machine,
+ port->delayed_runtime - jiffies);
+ return true;
+ }
+ port->delayed_state = INVALID_STATE;
+ }
+ return false;
+}
+
+static int tcpm_pd_check_request(struct tcpm_port *port)
+{
+ u32 pdo, rdo = port->sink_request;
+ unsigned int max, op, pdo_max, index;
+ enum pd_pdo_type type;
+
+ index = rdo_index(rdo);
+ if (!index || index > port->nr_src_pdo)
+ return -EINVAL;
+
+ pdo = port->src_pdo[index - 1];
+ type = pdo_type(pdo);
+ switch (type) {
+ case PDO_TYPE_FIXED:
+ case PDO_TYPE_VAR:
+ max = rdo_max_current(rdo);
+ op = rdo_op_current(rdo);
+ pdo_max = pdo_max_current(pdo);
+
+ if (op > pdo_max)
+ return -EINVAL;
+ if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
+ return -EINVAL;
+
+ if (type == PDO_TYPE_FIXED)
+ tcpm_log(port,
+ "Requested %u mV, %u mA for %u / %u mA",
+ pdo_fixed_voltage(pdo), pdo_max, op, max);
+ else
+ tcpm_log(port,
+ "Requested %u -> %u mV, %u mA for %u / %u mA",
+ pdo_min_voltage(pdo), pdo_max_voltage(pdo),
+ pdo_max, op, max);
+ break;
+ case PDO_TYPE_BATT:
+ max = rdo_max_power(rdo);
+ op = rdo_op_power(rdo);
+ pdo_max = pdo_max_power(pdo);
+
+ if (op > pdo_max)
+ return -EINVAL;
+ if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
+ return -EINVAL;
+ tcpm_log(port,
+ "Requested %u -> %u mV, %u mW for %u / %u mW",
+ pdo_min_voltage(pdo), pdo_max_voltage(pdo),
+ pdo_max, op, max);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ port->op_vsafe5v = index == 1;
+
+ return 0;
+}
+
+static int tcpm_pd_select_pdo(struct tcpm_port *port)
+{
+ unsigned int i, max_mw = 0, max_mv = 0;
+ int ret = -EINVAL;
+
+ /*
+ * Select the source PDO providing the most power while staying within
+ * the board's voltage limits. Prefer PDO providing exp
+ */
+ for (i = 0; i < port->nr_source_caps; i++) {
+ u32 pdo = port->source_caps[i];
+ enum pd_pdo_type type = pdo_type(pdo);
+ unsigned int mv, ma, mw;
+
+ if (type == PDO_TYPE_FIXED)
+ mv = pdo_fixed_voltage(pdo);
+ else
+ mv = pdo_min_voltage(pdo);
+
+ if (type == PDO_TYPE_BATT) {
+ mw = pdo_max_power(pdo);
+ } else {
+ ma = min(pdo_max_current(pdo),
+ port->max_snk_ma);
+ mw = ma * mv / 1000;
+ }
+
+ /* Perfer higher voltages if available */
+ if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
+ mv <= port->max_snk_mv) {
+ ret = i;
+ max_mw = mw;
+ max_mv = mv;
+ }
+ }
+
+ return ret;
+}
+
+static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
+{
+ unsigned int mv, ma, mw, flags;
+ unsigned int max_ma, max_mw;
+ enum pd_pdo_type type;
+ int index;
+ u32 pdo;
+
+ index = tcpm_pd_select_pdo(port);
+ if (index < 0)
+ return -EINVAL;
+ pdo = port->source_caps[index];
+ type = pdo_type(pdo);
+
+ if (type == PDO_TYPE_FIXED)
+ mv = pdo_fixed_voltage(pdo);
+ else
+ mv = pdo_min_voltage(pdo);
+
+ /* Select maximum available current within the board's power limit */
+ if (type == PDO_TYPE_BATT) {
+ mw = pdo_max_power(pdo);
+ ma = 1000 * min(mw, port->max_snk_mw) / mv;
+ } else {
+ ma = min(pdo_max_current(pdo),
+ 1000 * port->max_snk_mw / mv);
+ }
+ ma = min(ma, port->max_snk_ma);
+
+ /* XXX: Any other flags need to be set? */
+ flags = 0;
+
+ /* Set mismatch bit if offered power is less than operating power */
+ mw = ma * mv / 1000;
+ max_ma = ma;
+ max_mw = mw;
+ if (mw < port->operating_snk_mw) {
+ flags |= RDO_CAP_MISMATCH;
+ max_mw = port->operating_snk_mw;
+ max_ma = max_mw * 1000 / mv;
+ }
+
+ tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
+ port->cc_req, port->cc1, port->cc2, port->vbus_source,
+ port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
+ port->polarity);
+
+ if (type == PDO_TYPE_BATT) {
+ *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
+
+ tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
+ index, mv, mw,
+ flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
+ } else {
+ *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
+
+ tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
+ index, mv, ma,
+ flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
+ }
+
+ port->current_limit = ma;
+ port->supply_voltage = mv;
+
+ return 0;
+}
+
+static int tcpm_pd_send_request(struct tcpm_port *port)
+{
+ struct pd_message msg;
+ int ret;
+ u32 rdo;
+
+ ret = tcpm_pd_build_request(port, &rdo);
+ if (ret < 0)
+ return ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
+ port->pwr_role,
+ port->data_role,
+ port->message_id, 1);
+ msg.payload[0] = cpu_to_le32(rdo);
+
+ return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+}
+
+static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
+{
+ int ret;
+
+ if (enable && port->vbus_charge)
+ return -EINVAL;
+
+ tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
+
+ ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
+ if (ret < 0)
+ return ret;
+
+ port->vbus_source = enable;
+ return 0;
+}
+
+static int tcpm_set_charge(struct tcpm_port *port, bool charge)
+{
+ int ret;
+
+ if (charge && port->vbus_source)
+ return -EINVAL;
+
+ if (charge != port->vbus_charge) {
+ tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
+ ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
+ charge);
+ if (ret < 0)
+ return ret;
+ }
+ port->vbus_charge = charge;
+ return 0;
+}
+
+static bool tcpm_start_drp_toggling(struct tcpm_port *port)
+{
+ int ret;
+
+ if (port->tcpc->start_drp_toggling &&
+ port->typec_caps.type == TYPEC_PORT_DRP) {
+ tcpm_log_force(port, "Start DRP toggling");
+ ret = port->tcpc->start_drp_toggling(port->tcpc,
+ tcpm_rp_cc(port));
+ if (!ret)
+ return true;
+ }
+
+ return false;
+}
+
+static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
+{
+ tcpm_log(port, "cc:=%d", cc);
+ port->cc_req = cc;
+ port->tcpc->set_cc(port->tcpc, cc);
+}
+
+static int tcpm_init_vbus(struct tcpm_port *port)
+{
+ int ret;
+
+ ret = port->tcpc->set_vbus(port->tcpc, false, false);
+ port->vbus_source = false;
+ port->vbus_charge = false;
+ return ret;
+}
+
+static int tcpm_init_vconn(struct tcpm_port *port)
+{
+ int ret;
+
+ ret = port->tcpc->set_vconn(port->tcpc, false);
+ port->vconn_role = TYPEC_SINK;
+ return ret;
+}
+
+static void tcpm_typec_connect(struct tcpm_port *port)
+{
+ if (!port->connected) {
+ /* Make sure we don't report stale identity information */
+ memset(&port->partner_ident, 0, sizeof(port->partner_ident));
+ port->partner_desc.usb_pd = port->pd_capable;
+ if (tcpm_port_is_debug(port))
+ port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
+ else if (tcpm_port_is_audio(port))
+ port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
+ else
+ port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
+ port->partner = typec_register_partner(port->typec_port,
+ &port->partner_desc);
+ port->connected = true;
+ }
+}
+
+static int tcpm_src_attach(struct tcpm_port *port)
+{
+ enum typec_cc_polarity polarity =
+ port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
+ : TYPEC_POLARITY_CC1;
+ int ret;
+
+ if (port->attached)
+ return 0;
+
+ ret = tcpm_set_polarity(port, polarity);
+ if (ret < 0)
+ return ret;
+
+ ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
+ if (ret < 0)
+ return ret;
+
+ ret = port->tcpc->set_pd_rx(port->tcpc, true);
+ if (ret < 0)
+ goto out_disable_mux;
+
+ /*
+ * USB Type-C specification, version 1.2,
+ * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
+ * Enable VCONN only if the non-RD port is set to RA.
+ */
+ if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
+ (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
+ ret = tcpm_set_vconn(port, true);
+ if (ret < 0)
+ goto out_disable_pd;
+ }
+
+ ret = tcpm_set_vbus(port, true);
+ if (ret < 0)
+ goto out_disable_vconn;
+
+ port->pd_capable = false;
+
+ port->partner = NULL;
+
+ port->attached = true;
+ port->send_discover = true;
+
+ return 0;
+
+out_disable_vconn:
+ tcpm_set_vconn(port, false);
+out_disable_pd:
+ port->tcpc->set_pd_rx(port->tcpc, false);
+out_disable_mux:
+ tcpm_mux_set(port, TYPEC_MUX_NONE, TCPC_USB_SWITCH_DISCONNECT);
+ return ret;
+}
+
+static void tcpm_typec_disconnect(struct tcpm_port *port)
+{
+ if (port->connected) {
+ typec_unregister_partner(port->partner);
+ port->partner = NULL;
+ port->connected = false;
+ }
+}
+
+static void tcpm_unregister_altmodes(struct tcpm_port *port)
+{
+ struct pd_mode_data *modep = &port->mode_data;
+ int i;
+
+ for (i = 0; i < modep->altmodes; i++) {
+ typec_unregister_altmode(port->partner_altmode[i]);
+ port->partner_altmode[i] = NULL;
+ }
+
+ memset(modep, 0, sizeof(*modep));
+}
+
+static void tcpm_reset_port(struct tcpm_port *port)
+{
+ tcpm_unregister_altmodes(port);
+ tcpm_typec_disconnect(port);
+ port->attached = false;
+ port->pd_capable = false;
+
+ port->tcpc->set_pd_rx(port->tcpc, false);
+ tcpm_init_vbus(port); /* also disables charging */
+ tcpm_init_vconn(port);
+ tcpm_set_current_limit(port, 0, 0);
+ tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
+ tcpm_set_attached_state(port, false);
+ port->try_src_count = 0;
+ port->try_snk_count = 0;
+}
+
+static void tcpm_detach(struct tcpm_port *port)
+{
+ if (!port->attached)
+ return;
+
+ if (tcpm_port_is_disconnected(port))
+ port->hard_reset_count = 0;
+
+ tcpm_reset_port(port);
+}
+
+static void tcpm_src_detach(struct tcpm_port *port)
+{
+ tcpm_detach(port);
+}
+
+static int tcpm_snk_attach(struct tcpm_port *port)
+{
+ int ret;
+
+ if (port->attached)
+ return 0;
+
+ ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
+ TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
+ if (ret < 0)
+ return ret;
+
+ ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
+ if (ret < 0)
+ return ret;
+
+ port->pd_capable = false;
+
+ port->partner = NULL;
+
+ port->attached = true;
+ port->send_discover = true;
+
+ return 0;
+}
+
+static void tcpm_snk_detach(struct tcpm_port *port)
+{
+ tcpm_detach(port);
+
+ /* XXX: (Dis)connect SuperSpeed mux? */
+}
+
+static int tcpm_acc_attach(struct tcpm_port *port)
+{
+ int ret;
+
+ if (port->attached)
+ return 0;
+
+ ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
+ if (ret < 0)
+ return ret;
+
+ port->partner = NULL;
+
+ tcpm_typec_connect(port);
+
+ port->attached = true;
+
+ return 0;
+}
+
+static void tcpm_acc_detach(struct tcpm_port *port)
+{
+ tcpm_detach(port);
+}
+
+static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
+{
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+ return HARD_RESET_SEND;
+ if (port->pd_capable)
+ return ERROR_RECOVERY;
+ if (port->pwr_role == TYPEC_SOURCE)
+ return SRC_UNATTACHED;
+ if (port->state == SNK_WAIT_CAPABILITIES)
+ return SNK_READY;
+ return SNK_UNATTACHED;
+}
+
+static inline enum tcpm_state ready_state(struct tcpm_port *port)
+{
+ if (port->pwr_role == TYPEC_SOURCE)
+ return SRC_READY;
+ else
+ return SNK_READY;
+}
+
+static inline enum tcpm_state unattached_state(struct tcpm_port *port)
+{
+ if (port->pwr_role == TYPEC_SOURCE)
+ return SRC_UNATTACHED;
+ else
+ return SNK_UNATTACHED;
+}
+
+static void tcpm_check_send_discover(struct tcpm_port *port)
+{
+ if (port->data_role == TYPEC_HOST && port->send_discover &&
+ port->pd_capable) {
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+ port->send_discover = false;
+ }
+}
+
+static void tcpm_swap_complete(struct tcpm_port *port, int result)
+{
+ if (port->swap_pending) {
+ port->swap_status = result;
+ port->swap_pending = false;
+ complete(&port->swap_complete);
+ }
+}
+
+static void run_state_machine(struct tcpm_port *port)
+{
+ int ret;
+
+ port->enter_state = port->state;
+ switch (port->state) {
+ case DRP_TOGGLING:
+ break;
+ /* SRC states */
+ case SRC_UNATTACHED:
+ tcpm_swap_complete(port, -ENOTCONN);
+ tcpm_src_detach(port);
+ if (tcpm_start_drp_toggling(port)) {
+ tcpm_set_state(port, DRP_TOGGLING, 0);
+ break;
+ }
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ if (port->typec_caps.type == TYPEC_PORT_DRP)
+ tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
+ break;
+ case SRC_ATTACH_WAIT:
+ if (tcpm_port_is_debug(port))
+ tcpm_set_state(port, DEBUG_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ else if (tcpm_port_is_audio(port))
+ tcpm_set_state(port, AUDIO_ACC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ else if (tcpm_port_is_source(port))
+ tcpm_set_state(port,
+ tcpm_try_snk(port) ? SNK_TRY
+ : SRC_ATTACHED,
+ PD_T_CC_DEBOUNCE);
+ break;
+
+ case SNK_TRY:
+ port->try_snk_count++;
+ /*
+ * Requirements:
+ * - Do not drive vconn or vbus
+ * - Terminate CC pins (both) to Rd
+ * Action:
+ * - Wait for tDRPTry (PD_T_DRP_TRY).
+ * Until then, ignore any state changes.
+ */
+ tcpm_set_cc(port, TYPEC_CC_RD);
+ tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
+ break;
+ case SNK_TRY_WAIT:
+ if (port->vbus_present && tcpm_port_is_sink(port)) {
+ tcpm_set_state(port, SNK_ATTACHED, 0);
+ break;
+ }
+ if (!tcpm_port_is_sink(port)) {
+ tcpm_set_state(port, SRC_TRYWAIT,
+ PD_T_PD_DEBOUNCE);
+ break;
+ }
+ /* No vbus, cc state is sink or open */
+ tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, PD_T_DRP_TRYWAIT);
+ break;
+ case SRC_TRYWAIT:
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ if (!port->vbus_present && tcpm_port_is_source(port))
+ tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
+ else
+ tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
+ PD_T_DRP_TRY);
+ break;
+ case SRC_TRYWAIT_UNATTACHED:
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
+ break;
+
+ case SRC_ATTACHED:
+ ret = tcpm_src_attach(port);
+ tcpm_set_state(port, SRC_UNATTACHED,
+ ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
+ break;
+ case SRC_STARTUP:
+ typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
+ port->pwr_opmode = TYPEC_PWR_MODE_USB;
+ port->caps_count = 0;
+ port->message_id = 0;
+ port->explicit_contract = false;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ break;
+ case SRC_SEND_CAPABILITIES:
+ port->caps_count++;
+ if (port->caps_count > PD_N_CAPS_COUNT) {
+ tcpm_set_state(port, SRC_READY, 0);
+ break;
+ }
+ ret = tcpm_pd_send_source_caps(port);
+ if (ret < 0) {
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES,
+ PD_T_SEND_SOURCE_CAP);
+ } else {
+ /*
+ * Per standard, we should clear the reset counter here.
+ * However, that can result in state machine hang-ups.
+ * Reset it only in READY state to improve stability.
+ */
+ /* port->hard_reset_count = 0; */
+ port->caps_count = 0;
+ port->pd_capable = true;
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SEND_SOURCE_CAP);
+ }
+ break;
+ case SRC_NEGOTIATE_CAPABILITIES:
+ ret = tcpm_pd_check_request(port);
+ if (ret < 0) {
+ tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ if (!port->explicit_contract) {
+ tcpm_set_state(port,
+ SRC_WAIT_NEW_CAPABILITIES, 0);
+ } else {
+ tcpm_set_state(port, SRC_READY, 0);
+ }
+ } else {
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
+ PD_T_SRC_TRANSITION);
+ }
+ break;
+ case SRC_TRANSITION_SUPPLY:
+ /* XXX: regulator_set_voltage(vbus, ...) */
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ port->explicit_contract = true;
+ typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
+ port->pwr_opmode = TYPEC_PWR_MODE_PD;
+ tcpm_set_state_cond(port, SRC_READY, 0);
+ break;
+ case SRC_READY:
+#if 1
+ port->hard_reset_count = 0;
+#endif
+ port->try_src_count = 0;
+
+ tcpm_typec_connect(port);
+
+ tcpm_check_send_discover(port);
+ /*
+ * 6.3.5
+ * Sending ping messages is not necessary if
+ * - the source operates at vSafe5V
+ * or
+ * - The system is not operating in PD mode
+ * or
+ * - Both partners are connected using a Type-C connector
+ * XXX How do we know that ?
+ */
+ if (port->pwr_opmode == TYPEC_PWR_MODE_PD &&
+ !port->op_vsafe5v) {
+ tcpm_pd_send_control(port, PD_CTRL_PING);
+ tcpm_set_state_cond(port, SRC_READY,
+ PD_T_SOURCE_ACTIVITY);
+ }
+ break;
+ case SRC_WAIT_NEW_CAPABILITIES:
+ /* Nothing to do... */
+ break;
+
+ /* SNK states */
+ case SNK_UNATTACHED:
+ tcpm_swap_complete(port, -ENOTCONN);
+ tcpm_snk_detach(port);
+ if (tcpm_start_drp_toggling(port)) {
+ tcpm_set_state(port, DRP_TOGGLING, 0);
+ break;
+ }
+ tcpm_set_cc(port, TYPEC_CC_RD);
+ if (port->typec_caps.type == TYPEC_PORT_DRP)
+ tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
+ break;
+ case SNK_ATTACH_WAIT:
+ if ((port->cc1 == TYPEC_CC_OPEN &&
+ port->cc2 != TYPEC_CC_OPEN) ||
+ (port->cc1 != TYPEC_CC_OPEN &&
+ port->cc2 == TYPEC_CC_OPEN))
+ tcpm_set_state(port, SNK_DEBOUNCED,
+ PD_T_CC_DEBOUNCE);
+ else if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, SNK_UNATTACHED,
+ PD_T_PD_DEBOUNCE);
+ break;
+ case SNK_DEBOUNCED:
+ if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, SNK_UNATTACHED,
+ PD_T_PD_DEBOUNCE);
+ else if (port->vbus_present)
+ tcpm_set_state(port,
+ tcpm_try_src(port) ? SRC_TRY
+ : SNK_ATTACHED,
+ 0);
+ else
+ /* Wait for VBUS, but not forever */
+ tcpm_set_state(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
+ break;
+
+ case SRC_TRY:
+ port->try_src_count++;
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_state(port, SNK_TRYWAIT, PD_T_DRP_TRY);
+ break;
+ case SRC_TRY_DEBOUNCE:
+ tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
+ break;
+ case SNK_TRYWAIT:
+ tcpm_set_cc(port, TYPEC_CC_RD);
+ tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, PD_T_CC_DEBOUNCE);
+ break;
+ case SNK_TRYWAIT_DEBOUNCE:
+ if (port->vbus_present) {
+ tcpm_set_state(port, SNK_ATTACHED, 0);
+ break;
+ }
+ if (tcpm_port_is_disconnected(port)) {
+ tcpm_set_state(port, SNK_UNATTACHED,
+ PD_T_PD_DEBOUNCE);
+ break;
+ }
+ if (tcpm_port_is_source(port))
+ tcpm_set_state(port, SRC_ATTACHED, 0);
+ /* XXX Are we supposed to stay in this state ? */
+ break;
+ case SNK_TRYWAIT_VBUS:
+ tcpm_set_state(port, SNK_ATTACHED, PD_T_CC_DEBOUNCE);
+ break;
+
+ case SNK_ATTACHED:
+ ret = tcpm_snk_attach(port);
+ if (ret < 0)
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
+ else
+ tcpm_set_state(port, SNK_STARTUP, 0);
+ break;
+ case SNK_STARTUP:
+ /* XXX: callback into infrastructure */
+ typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
+ port->pwr_opmode = TYPEC_PWR_MODE_USB;
+ port->message_id = 0;
+ port->explicit_contract = false;
+ tcpm_set_state(port, SNK_DISCOVERY, 0);
+ break;
+ case SNK_DISCOVERY:
+ if (port->vbus_present) {
+ tcpm_set_current_limit(port,
+ tcpm_get_current_limit(port),
+ 5000);
+ tcpm_set_charge(port, true);
+ tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ break;
+ }
+ /*
+ * For DRP, timeouts differ. Also, handling is supposed to be
+ * different and much more complex (dead battery detection;
+ * see USB power delivery specification, section 8.3.3.6.1.5.1).
+ */
+ tcpm_set_state(port, hard_reset_state(port),
+ port->typec_caps.type == TYPEC_PORT_DRP ?
+ PD_T_DB_DETECT : PD_T_NO_RESPONSE);
+ break;
+ case SNK_DISCOVERY_DEBOUNCE:
+ tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
+ PD_T_CC_DEBOUNCE);
+ break;
+ case SNK_DISCOVERY_DEBOUNCE_DONE:
+ if (!tcpm_port_is_disconnected(port) &&
+ tcpm_port_is_sink(port) &&
+ time_is_after_jiffies(port->delayed_runtime)) {
+ tcpm_set_state(port, SNK_DISCOVERY,
+ port->delayed_runtime - jiffies);
+ break;
+ }
+ tcpm_set_state(port, unattached_state(port), 0);
+ break;
+ case SNK_WAIT_CAPABILITIES:
+ ret = port->tcpc->set_pd_rx(port->tcpc, true);
+ if (ret < 0) {
+ tcpm_set_state(port, SNK_READY, 0);
+ break;
+ }
+ /*
+ * If VBUS has never been low, and we time out waiting
+ * for source cap, try a soft reset first, in case we
+ * were already in a stable contract before this boot.
+ * Do this only once.
+ */
+ if (port->vbus_never_low) {
+ port->vbus_never_low = false;
+ tcpm_set_state(port, SOFT_RESET_SEND,
+ PD_T_SINK_WAIT_CAP);
+ } else {
+ tcpm_set_state(port, hard_reset_state(port),
+ PD_T_SINK_WAIT_CAP);
+ }
+ break;
+ case SNK_NEGOTIATE_CAPABILITIES:
+ port->pd_capable = true;
+ port->hard_reset_count = 0;
+ ret = tcpm_pd_send_request(port);
+ if (ret < 0) {
+ /* Let the Source send capabilities again. */
+ tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ } else {
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SENDER_RESPONSE);
+ }
+ break;
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ tcpm_set_state(port, hard_reset_state(port),
+ PD_T_PS_TRANSITION);
+ break;
+ case SNK_READY:
+ port->try_snk_count = 0;
+ port->explicit_contract = true;
+ typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
+ port->pwr_opmode = TYPEC_PWR_MODE_PD;
+
+ tcpm_typec_connect(port);
+
+ tcpm_check_send_discover(port);
+ break;
+
+ /* Accessory states */
+ case ACC_UNATTACHED:
+ tcpm_acc_detach(port);
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ break;
+ case DEBUG_ACC_ATTACHED:
+ case AUDIO_ACC_ATTACHED:
+ ret = tcpm_acc_attach(port);
+ if (ret < 0)
+ tcpm_set_state(port, ACC_UNATTACHED, 0);
+ break;
+ case AUDIO_ACC_DEBOUNCE:
+ tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
+ break;
+
+ /* Hard_Reset states */
+ case HARD_RESET_SEND:
+ tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
+ tcpm_set_state(port, HARD_RESET_START, 0);
+ break;
+ case HARD_RESET_START:
+ port->hard_reset_count++;
+ port->tcpc->set_pd_rx(port->tcpc, false);
+ tcpm_unregister_altmodes(port);
+ port->send_discover = true;
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
+ PD_T_PS_HARD_RESET);
+ else
+ tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
+ break;
+ case SRC_HARD_RESET_VBUS_OFF:
+ tcpm_set_vconn(port, true);
+ tcpm_set_vbus(port, false);
+ tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
+ break;
+ case SRC_HARD_RESET_VBUS_ON:
+ tcpm_set_vbus(port, true);
+ port->tcpc->set_pd_rx(port->tcpc, true);
+ tcpm_set_attached_state(port, true);
+ tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
+ break;
+ case SNK_HARD_RESET_SINK_OFF:
+ tcpm_set_vconn(port, false);
+ tcpm_set_charge(port, false);
+ tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
+ /*
+ * VBUS may or may not toggle, depending on the adapter.
+ * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
+ * directly after timeout.
+ */
+ tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
+ break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ /* Assume we're disconnected if VBUS doesn't come back. */
+ tcpm_set_state(port, SNK_UNATTACHED,
+ PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
+ break;
+ case SNK_HARD_RESET_SINK_ON:
+ /* Note: There is no guarantee that VBUS is on in this state */
+ /*
+ * XXX:
+ * The specification suggests that dual mode ports in sink
+ * mode should transition to state PE_SRC_Transition_to_default.
+ * See USB power delivery specification chapter 8.3.3.6.1.3.
+ * This would mean to to
+ * - turn off VCONN, reset power supply
+ * - request hardware reset
+ * - turn on VCONN
+ * - Transition to state PE_Src_Startup
+ * SNK only ports shall transition to state Snk_Startup
+ * (see chapter 8.3.3.3.8).
+ * Similar, dual-mode ports in source mode should transition
+ * to PE_SNK_Transition_to_default.
+ */
+ tcpm_set_attached_state(port, true);
+ tcpm_set_state(port, SNK_STARTUP, 0);
+ break;
+
+ /* Soft_Reset states */
+ case SOFT_RESET:
+ port->message_id = 0;
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ else
+ tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ break;
+ case SOFT_RESET_SEND:
+ port->message_id = 0;
+ if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
+ tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ else
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SENDER_RESPONSE);
+ break;
+
+ /* DR_Swap states */
+ case DR_SWAP_SEND:
+ tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
+ tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
+ PD_T_SENDER_RESPONSE);
+ break;
+ case DR_SWAP_ACCEPT:
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
+ break;
+ case DR_SWAP_SEND_TIMEOUT:
+ tcpm_swap_complete(port, -ETIMEDOUT);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case DR_SWAP_CHANGE_DR:
+ if (port->data_role == TYPEC_HOST) {
+ tcpm_unregister_altmodes(port);
+ tcpm_set_roles(port, true, port->pwr_role,
+ TYPEC_DEVICE);
+ } else {
+ tcpm_set_roles(port, true, port->pwr_role,
+ TYPEC_HOST);
+ port->send_discover = true;
+ }
+ tcpm_swap_complete(port, 0);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+
+ /* PR_Swap states */
+ case PR_SWAP_ACCEPT:
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_state(port, PR_SWAP_START, 0);
+ break;
+ case PR_SWAP_SEND:
+ tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
+ tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
+ PD_T_SENDER_RESPONSE);
+ break;
+ case PR_SWAP_SEND_TIMEOUT:
+ tcpm_swap_complete(port, -ETIMEDOUT);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case PR_SWAP_START:
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
+ PD_T_SRC_TRANSITION);
+ else
+ tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
+ break;
+ case PR_SWAP_SRC_SNK_TRANSITION_OFF:
+ tcpm_set_vbus(port, false);
+ port->explicit_contract = false;
+ tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
+ PD_T_PS_SOURCE_OFF);
+ break;
+ case PR_SWAP_SRC_SNK_SOURCE_OFF:
+ tcpm_set_cc(port, TYPEC_CC_RD);
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+ break;
+ }
+ tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
+ break;
+ case PR_SWAP_SRC_SNK_SINK_ON:
+ tcpm_set_pwr_role(port, TYPEC_SINK);
+ tcpm_swap_complete(port, 0);
+ tcpm_set_state(port, SNK_STARTUP, 0);
+ break;
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ tcpm_set_charge(port, false);
+ tcpm_set_state(port, hard_reset_state(port),
+ PD_T_PS_SOURCE_OFF);
+ break;
+ case PR_SWAP_SNK_SRC_SOURCE_ON:
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_vbus(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_set_pwr_role(port, TYPEC_SOURCE);
+ tcpm_swap_complete(port, 0);
+ tcpm_set_state(port, SRC_STARTUP, 0);
+ break;
+
+ case VCONN_SWAP_ACCEPT:
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_state(port, VCONN_SWAP_START, 0);
+ break;
+ case VCONN_SWAP_SEND:
+ tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
+ tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
+ PD_T_SENDER_RESPONSE);
+ break;
+ case VCONN_SWAP_SEND_TIMEOUT:
+ tcpm_swap_complete(port, -ETIMEDOUT);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case VCONN_SWAP_START:
+ if (port->vconn_role == TYPEC_SOURCE)
+ tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
+ else
+ tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
+ break;
+ case VCONN_SWAP_WAIT_FOR_VCONN:
+ tcpm_set_state(port, hard_reset_state(port),
+ PD_T_VCONN_SOURCE_ON);
+ break;
+ case VCONN_SWAP_TURN_ON_VCONN:
+ tcpm_set_vconn(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_swap_complete(port, 0);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ case VCONN_SWAP_TURN_OFF_VCONN:
+ tcpm_set_vconn(port, false);
+ tcpm_swap_complete(port, 0);
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+
+ case DR_SWAP_CANCEL:
+ case PR_SWAP_CANCEL:
+ case VCONN_SWAP_CANCEL:
+ tcpm_swap_complete(port, port->swap_status);
+ if (port->pwr_role == TYPEC_SOURCE)
+ tcpm_set_state(port, SRC_READY, 0);
+ else
+ tcpm_set_state(port, SNK_READY, 0);
+ break;
+
+ case BIST_RX:
+ switch (BDO_MODE_MASK(port->bist_request)) {
+ case BDO_MODE_CARRIER2:
+ tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
+ break;
+ default:
+ break;
+ }
+ /* Always switch to unattached state */
+ tcpm_set_state(port, unattached_state(port), 0);
+ break;
+ case ERROR_RECOVERY:
+ tcpm_swap_complete(port, -EPROTO);
+ tcpm_reset_port(port);
+
+ tcpm_set_cc(port, TYPEC_CC_OPEN);
+ tcpm_set_state(port, ERROR_RECOVERY_WAIT_OFF,
+ PD_T_ERROR_RECOVERY);
+ break;
+ case ERROR_RECOVERY_WAIT_OFF:
+ tcpm_set_state(port,
+ tcpm_default_state(port),
+ port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
+ break;
+ default:
+ WARN(1, "Unexpected port state %d\n", port->state);
+ break;
+ }
+}
+
+static void tcpm_state_machine_work(struct work_struct *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port,
+ state_machine.work);
+ enum tcpm_state prev_state;
+
+ mutex_lock(&port->lock);
+ port->state_machine_running = true;
+
+ if (port->queued_message && tcpm_send_queued_message(port))
+ goto done;
+
+ /* If we were queued due to a delayed state change, update it now */
+ if (port->delayed_state) {
+ tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
+ tcpm_states[port->state],
+ tcpm_states[port->delayed_state], port->delay_ms);
+ port->prev_state = port->state;
+ port->state = port->delayed_state;
+ port->delayed_state = INVALID_STATE;
+ }
+
+ /*
+ * Continue running as long as we have (non-delayed) state changes
+ * to make.
+ */
+ do {
+ prev_state = port->state;
+ run_state_machine(port);
+ if (port->queued_message)
+ tcpm_send_queued_message(port);
+ } while (port->state != prev_state && !port->delayed_state);
+
+done:
+ port->state_machine_running = false;
+ mutex_unlock(&port->lock);
+}
+
+static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ enum typec_cc_status cc2)
+{
+ enum typec_cc_status old_cc1, old_cc2;
+ enum tcpm_state new_state;
+
+ old_cc1 = port->cc1;
+ old_cc2 = port->cc2;
+ port->cc1 = cc1;
+ port->cc2 = cc2;
+
+ tcpm_log_force(port,
+ "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
+ old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
+ port->polarity,
+ tcpm_port_is_disconnected(port) ? "disconnected"
+ : "connected");
+
+ switch (port->state) {
+ case DRP_TOGGLING:
+ if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
+ tcpm_port_is_source(port))
+ tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
+ else if (tcpm_port_is_sink(port))
+ tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
+ break;
+ case SRC_UNATTACHED:
+ case ACC_UNATTACHED:
+ if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
+ tcpm_port_is_source(port))
+ tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
+ break;
+ case SRC_ATTACH_WAIT:
+ if (tcpm_port_is_disconnected(port) ||
+ tcpm_port_is_audio_detached(port))
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ else if (cc1 != old_cc1 || cc2 != old_cc2)
+ tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
+ break;
+ case SRC_ATTACHED:
+ if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ break;
+
+ case SNK_UNATTACHED:
+ if (tcpm_port_is_sink(port))
+ tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
+ break;
+ case SNK_ATTACH_WAIT:
+ if ((port->cc1 == TYPEC_CC_OPEN &&
+ port->cc2 != TYPEC_CC_OPEN) ||
+ (port->cc1 != TYPEC_CC_OPEN &&
+ port->cc2 == TYPEC_CC_OPEN))
+ new_state = SNK_DEBOUNCED;
+ else if (tcpm_port_is_disconnected(port))
+ new_state = SNK_UNATTACHED;
+ else
+ break;
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
+ break;
+ case SNK_DEBOUNCED:
+ if (tcpm_port_is_disconnected(port))
+ new_state = SNK_UNATTACHED;
+ else if (port->vbus_present)
+ new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
+ else
+ new_state = SNK_UNATTACHED;
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SNK_DEBOUNCED, 0);
+ break;
+ case SNK_READY:
+ if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, unattached_state(port), 0);
+ else if (!port->pd_capable &&
+ (cc1 != old_cc1 || cc2 != old_cc2))
+ tcpm_set_current_limit(port,
+ tcpm_get_current_limit(port),
+ 5000);
+ break;
+
+ case AUDIO_ACC_ATTACHED:
+ if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
+ tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
+ break;
+ case AUDIO_ACC_DEBOUNCE:
+ if (tcpm_port_is_audio(port))
+ tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
+ break;
+
+ case DEBUG_ACC_ATTACHED:
+ if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
+ tcpm_set_state(port, ACC_UNATTACHED, 0);
+ break;
+
+ case SNK_TRY:
+ /* Do nothing, waiting for timeout */
+ break;
+
+ case SNK_DISCOVERY:
+ /* CC line is unstable, wait for debounce */
+ if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
+ break;
+ case SNK_DISCOVERY_DEBOUNCE:
+ break;
+
+ case SRC_TRYWAIT:
+ /* Hand over to state machine if needed */
+ if (!port->vbus_present && tcpm_port_is_source(port))
+ new_state = SRC_ATTACHED;
+ else
+ new_state = SRC_TRYWAIT_UNATTACHED;
+
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SRC_TRYWAIT, 0);
+ break;
+ case SNK_TRY_WAIT:
+ if (port->vbus_present && tcpm_port_is_sink(port)) {
+ tcpm_set_state(port, SNK_ATTACHED, 0);
+ break;
+ }
+ if (!tcpm_port_is_sink(port))
+ new_state = SRC_TRYWAIT;
+ else
+ new_state = SRC_TRYWAIT_UNATTACHED;
+
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SNK_TRY_WAIT, 0);
+ break;
+
+ case SRC_TRY:
+ tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
+ break;
+ case SRC_TRY_DEBOUNCE:
+ tcpm_set_state(port, SRC_TRY, 0);
+ break;
+ case SNK_TRYWAIT_DEBOUNCE:
+ if (port->vbus_present) {
+ tcpm_set_state(port, SNK_ATTACHED, 0);
+ break;
+ }
+ if (tcpm_port_is_source(port)) {
+ tcpm_set_state(port, SRC_ATTACHED, 0);
+ break;
+ }
+ if (tcpm_port_is_disconnected(port) &&
+ port->delayed_state != SNK_UNATTACHED)
+ tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
+ break;
+
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ case PR_SWAP_SRC_SNK_TRANSITION_OFF:
+ case PR_SWAP_SRC_SNK_SOURCE_OFF:
+ /*
+ * CC state change is expected here; we just turned off power.
+ * Ignore it.
+ */
+ break;
+
+ default:
+ if (tcpm_port_is_disconnected(port))
+ tcpm_set_state(port, unattached_state(port), 0);
+ break;
+ }
+}
+
+static void _tcpm_pd_vbus_on(struct tcpm_port *port)
+{
+ enum tcpm_state new_state;
+
+ tcpm_log_force(port, "VBUS on");
+ port->vbus_present = true;
+ switch (port->state) {
+ case SNK_TRANSITION_SINK_VBUS:
+ tcpm_set_state(port, SNK_READY, 0);
+ break;
+ case SNK_DISCOVERY:
+ tcpm_set_state(port, SNK_DISCOVERY, 0);
+ break;
+
+ case SNK_DEBOUNCED:
+ tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
+ : SNK_ATTACHED,
+ 0);
+ break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
+ break;
+ case SRC_ATTACHED:
+ tcpm_set_state(port, SRC_STARTUP, 0);
+ break;
+ case SRC_HARD_RESET_VBUS_ON:
+ tcpm_set_state(port, SRC_STARTUP, 0);
+ break;
+
+ case SNK_TRY:
+ /* Do nothing, waiting for timeout */
+ break;
+ case SRC_TRYWAIT:
+ /* Hand over to state machine if needed */
+ if (port->delayed_state != SRC_TRYWAIT_UNATTACHED)
+ tcpm_set_state(port, SRC_TRYWAIT, 0);
+ break;
+ case SNK_TRY_WAIT:
+ if (tcpm_port_is_sink(port)) {
+ tcpm_set_state(port, SNK_ATTACHED, 0);
+ break;
+ }
+ if (!tcpm_port_is_sink(port))
+ new_state = SRC_TRYWAIT;
+ else
+ new_state = SRC_TRYWAIT_UNATTACHED;
+
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SNK_TRY_WAIT, 0);
+ break;
+ case SNK_TRYWAIT:
+ tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void _tcpm_pd_vbus_off(struct tcpm_port *port)
+{
+ enum tcpm_state new_state;
+
+ tcpm_log_force(port, "VBUS off");
+ port->vbus_present = false;
+ port->vbus_never_low = false;
+ switch (port->state) {
+ case SNK_HARD_RESET_SINK_OFF:
+ tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
+ break;
+ case SRC_HARD_RESET_VBUS_OFF:
+ tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
+ break;
+ case HARD_RESET_SEND:
+ break;
+
+ case SNK_TRY:
+ /* Do nothing, waiting for timeout */
+ break;
+ case SRC_TRYWAIT:
+ /* Hand over to state machine if needed */
+ if (tcpm_port_is_source(port))
+ new_state = SRC_ATTACHED;
+ else
+ new_state = SRC_TRYWAIT_UNATTACHED;
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SRC_TRYWAIT, 0);
+ break;
+ case SNK_TRY_WAIT:
+ if (!tcpm_port_is_sink(port))
+ new_state = SRC_TRYWAIT;
+ else
+ new_state = SRC_TRYWAIT_UNATTACHED;
+
+ if (new_state != port->delayed_state)
+ tcpm_set_state(port, SNK_TRY_WAIT, 0);
+ break;
+ case SNK_TRYWAIT_VBUS:
+ tcpm_set_state(port, SNK_TRYWAIT, 0);
+ break;
+
+ case SNK_ATTACH_WAIT:
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
+ break;
+
+ case SNK_NEGOTIATE_CAPABILITIES:
+ break;
+
+ case PR_SWAP_SRC_SNK_TRANSITION_OFF:
+ tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
+ break;
+
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ /* Do nothing, expected */
+ break;
+
+ case ERROR_RECOVERY_WAIT_OFF:
+ tcpm_set_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_UNATTACHED : SNK_UNATTACHED,
+ 0);
+ break;
+
+ default:
+ if (port->pwr_role == TYPEC_SINK &&
+ port->attached)
+ tcpm_set_state(port, SNK_UNATTACHED, 0);
+ break;
+ }
+}
+
+static void _tcpm_pd_hard_reset(struct tcpm_port *port)
+{
+ tcpm_log_force(port, "Received hard reset");
+ /*
+ * If we keep receiving hard reset requests, executing the hard reset
+ * must have failed. Revert to error recovery if that happens.
+ */
+ tcpm_set_state(port,
+ port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
+ HARD_RESET_START : ERROR_RECOVERY,
+ 0);
+}
+
+static void tcpm_pd_event_handler(struct work_struct *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port,
+ event_work);
+ u32 events;
+
+ mutex_lock(&port->lock);
+
+ spin_lock(&port->pd_event_lock);
+ while (port->pd_events) {
+ events = port->pd_events;
+ port->pd_events = 0;
+ spin_unlock(&port->pd_event_lock);
+ if (events & TCPM_RESET_EVENT)
+ _tcpm_pd_hard_reset(port);
+ if (events & TCPM_VBUS_EVENT) {
+ bool vbus;
+
+ vbus = port->tcpc->get_vbus(port->tcpc);
+ if (vbus)
+ _tcpm_pd_vbus_on(port);
+ else
+ _tcpm_pd_vbus_off(port);
+ }
+ if (events & TCPM_CC_EVENT) {
+ enum typec_cc_status cc1, cc2;
+
+ if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
+ _tcpm_cc_change(port, cc1, cc2);
+ }
+ spin_lock(&port->pd_event_lock);
+ }
+ spin_unlock(&port->pd_event_lock);
+ mutex_unlock(&port->lock);
+}
+
+void tcpm_cc_change(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events |= TCPM_CC_EVENT;
+ spin_unlock(&port->pd_event_lock);
+ queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_cc_change);
+
+void tcpm_vbus_change(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events |= TCPM_VBUS_EVENT;
+ spin_unlock(&port->pd_event_lock);
+ queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_vbus_change);
+
+void tcpm_pd_hard_reset(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events = TCPM_RESET_EVENT;
+ spin_unlock(&port->pd_event_lock);
+ queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
+
+static int tcpm_dr_set(const struct typec_capability *cap,
+ enum typec_data_role data)
+{
+ struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ int ret;
+
+ mutex_lock(&port->swap_lock);
+ mutex_lock(&port->lock);
+
+ if (port->typec_caps.type != TYPEC_PORT_DRP || !port->pd_capable) {
+ ret = -EINVAL;
+ goto port_unlock;
+ }
+ if (port->state != SRC_READY && port->state != SNK_READY) {
+ ret = -EAGAIN;
+ goto port_unlock;
+ }
+
+ if (port->data_role == data) {
+ ret = 0;
+ goto port_unlock;
+ }
+
+ /*
+ * XXX
+ * 6.3.9: If an alternate mode is active, a request to swap
+ * alternate modes shall trigger a port reset.
+ * Reject data role swap request in this case.
+ */
+
+ port->swap_status = 0;
+ port->swap_pending = true;
+ reinit_completion(&port->swap_complete);
+ tcpm_set_state(port, DR_SWAP_SEND, 0);
+ mutex_unlock(&port->lock);
+
+ wait_for_completion(&port->swap_complete);
+
+ ret = port->swap_status;
+ goto swap_unlock;
+
+port_unlock:
+ mutex_unlock(&port->lock);
+swap_unlock:
+ mutex_unlock(&port->swap_lock);
+ return ret;
+}
+
+static int tcpm_pr_set(const struct typec_capability *cap,
+ enum typec_role role)
+{
+ struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ int ret;
+
+ mutex_lock(&port->swap_lock);
+ mutex_lock(&port->lock);
+
+ if (port->typec_caps.type != TYPEC_PORT_DRP) {
+ ret = -EINVAL;
+ goto port_unlock;
+ }
+ if (port->state != SRC_READY && port->state != SNK_READY) {
+ ret = -EAGAIN;
+ goto port_unlock;
+ }
+
+ if (role == port->pwr_role) {
+ ret = 0;
+ goto port_unlock;
+ }
+
+ if (!port->pd_capable) {
+ /*
+ * If the partner is not PD capable, reset the port to
+ * trigger a role change. This can only work if a preferred
+ * role is configured, and if it matches the requested role.
+ */
+ if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
+ port->try_role == port->pwr_role) {
+ ret = -EINVAL;
+ goto port_unlock;
+ }
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ ret = 0;
+ goto port_unlock;
+ }
+
+ port->swap_status = 0;
+ port->swap_pending = true;
+ reinit_completion(&port->swap_complete);
+ tcpm_set_state(port, PR_SWAP_SEND, 0);
+ mutex_unlock(&port->lock);
+
+ wait_for_completion(&port->swap_complete);
+
+ ret = port->swap_status;
+ goto swap_unlock;
+
+port_unlock:
+ mutex_unlock(&port->lock);
+swap_unlock:
+ mutex_unlock(&port->swap_lock);
+ return ret;
+}
+
+static int tcpm_vconn_set(const struct typec_capability *cap,
+ enum typec_role role)
+{
+ struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ int ret;
+
+ mutex_lock(&port->swap_lock);
+ mutex_lock(&port->lock);
+
+ if (port->state != SRC_READY && port->state != SNK_READY) {
+ ret = -EAGAIN;
+ goto port_unlock;
+ }
+
+ if (role == port->vconn_role) {
+ ret = 0;
+ goto port_unlock;
+ }
+
+ port->swap_status = 0;
+ port->swap_pending = true;
+ reinit_completion(&port->swap_complete);
+ tcpm_set_state(port, VCONN_SWAP_SEND, 0);
+ mutex_unlock(&port->lock);
+
+ wait_for_completion(&port->swap_complete);
+
+ ret = port->swap_status;
+ goto swap_unlock;
+
+port_unlock:
+ mutex_unlock(&port->lock);
+swap_unlock:
+ mutex_unlock(&port->swap_lock);
+ return ret;
+}
+
+static int tcpm_try_role(const struct typec_capability *cap, int role)
+{
+ struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpc_dev *tcpc = port->tcpc;
+ int ret = 0;
+
+ mutex_lock(&port->lock);
+ if (tcpc->try_role)
+ ret = tcpc->try_role(tcpc, role);
+ if (!ret && !tcpc->config->try_role_hw)
+ port->try_role = role;
+ port->try_src_count = 0;
+ port->try_snk_count = 0;
+ mutex_unlock(&port->lock);
+
+ return ret;
+}
+
+static void tcpm_init(struct tcpm_port *port)
+{
+ enum typec_cc_status cc1, cc2;
+
+ port->tcpc->init(port->tcpc);
+
+ tcpm_reset_port(port);
+
+ /*
+ * XXX
+ * Should possibly wait for VBUS to settle if it was enabled locally
+ * since tcpm_reset_port() will disable VBUS.
+ */
+ port->vbus_present = port->tcpc->get_vbus(port->tcpc);
+ if (port->vbus_present)
+ port->vbus_never_low = true;
+
+ tcpm_set_state(port, tcpm_default_state(port), 0);
+
+ if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
+ _tcpm_cc_change(port, cc1, cc2);
+
+ /*
+ * Some adapters need a clean slate at startup, and won't recover
+ * otherwise. So do not try to be fancy and force a clean disconnect.
+ */
+ tcpm_set_state(port, ERROR_RECOVERY, 0);
+}
+
+void tcpm_tcpc_reset(struct tcpm_port *port)
+{
+ mutex_lock(&port->lock);
+ /* XXX: Maintain PD connection if possible? */
+ tcpm_init(port);
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
+
+static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
+ unsigned int nr_pdo)
+{
+ unsigned int i;
+
+ if (nr_pdo > PDO_MAX_OBJECTS)
+ nr_pdo = PDO_MAX_OBJECTS;
+
+ for (i = 0; i < nr_pdo; i++)
+ dest_pdo[i] = src_pdo[i];
+
+ return nr_pdo;
+}
+
+void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo)
+{
+ mutex_lock(&port->lock);
+ port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
+ switch (port->state) {
+ case SRC_UNATTACHED:
+ case SRC_ATTACH_WAIT:
+ case SRC_TRYWAIT:
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ break;
+ case SRC_SEND_CAPABILITIES:
+ case SRC_NEGOTIATE_CAPABILITIES:
+ case SRC_READY:
+ case SRC_WAIT_NEW_CAPABILITIES:
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
+
+void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo,
+ unsigned int max_snk_mv,
+ unsigned int max_snk_ma,
+ unsigned int max_snk_mw,
+ unsigned int operating_snk_mw)
+{
+ mutex_lock(&port->lock);
+ port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
+ port->max_snk_mv = max_snk_mv;
+ port->max_snk_ma = max_snk_ma;
+ port->max_snk_mw = max_snk_mw;
+ port->operating_snk_mw = operating_snk_mw;
+
+ switch (port->state) {
+ case SNK_NEGOTIATE_CAPABILITIES:
+ case SNK_READY:
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
+
+struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
+{
+ struct tcpm_port *port;
+ int i, err;
+
+ if (!dev || !tcpc || !tcpc->config ||
+ !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
+ !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
+ !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
+ return ERR_PTR(-EINVAL);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->dev = dev;
+ port->tcpc = tcpc;
+
+ mutex_init(&port->lock);
+ mutex_init(&port->swap_lock);
+
+ port->wq = create_singlethread_workqueue(dev_name(dev));
+ if (!port->wq)
+ return ERR_PTR(-ENOMEM);
+ INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
+ INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
+ INIT_WORK(&port->event_work, tcpm_pd_event_handler);
+
+ spin_lock_init(&port->pd_event_lock);
+
+ init_completion(&port->tx_complete);
+ init_completion(&port->swap_complete);
+
+ port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
+ tcpc->config->nr_src_pdo);
+ port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
+ tcpc->config->nr_snk_pdo);
+
+ port->max_snk_mv = tcpc->config->max_snk_mv;
+ port->max_snk_ma = tcpc->config->max_snk_ma;
+ port->max_snk_mw = tcpc->config->max_snk_mw;
+ port->operating_snk_mw = tcpc->config->operating_snk_mw;
+ if (!tcpc->config->try_role_hw)
+ port->try_role = tcpc->config->default_role;
+ else
+ port->try_role = TYPEC_NO_PREFERRED_ROLE;
+
+ port->typec_caps.prefer_role = tcpc->config->default_role;
+ port->typec_caps.type = tcpc->config->type;
+ port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
+ port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
+ port->typec_caps.dr_set = tcpm_dr_set;
+ port->typec_caps.pr_set = tcpm_pr_set;
+ port->typec_caps.vconn_set = tcpm_vconn_set;
+ port->typec_caps.try_role = tcpm_try_role;
+
+ port->partner_desc.identity = &port->partner_ident;
+
+ /*
+ * TODO:
+ * - alt_modes, set_alt_mode
+ * - {debug,audio}_accessory
+ */
+
+ port->typec_port = typec_register_port(port->dev, &port->typec_caps);
+ if (!port->typec_port) {
+ err = -ENOMEM;
+ goto out_destroy_wq;
+ }
+
+ if (tcpc->config->alt_modes) {
+ struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
+
+ i = 0;
+ while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
+ port->port_altmode[i] =
+ typec_port_register_altmode(port->typec_port,
+ paltmode);
+ if (!port->port_altmode[i]) {
+ tcpm_log(port,
+ "%s: failed to register port alternate mode 0x%x",
+ dev_name(dev), paltmode->svid);
+ break;
+ }
+ i++;
+ paltmode++;
+ }
+ }
+
+ tcpm_debugfs_init(port);
+ mutex_lock(&port->lock);
+ tcpm_init(port);
+ mutex_unlock(&port->lock);
+
+ tcpm_log(port, "%s: registered", dev_name(dev));
+ return port;
+
+out_destroy_wq:
+ destroy_workqueue(port->wq);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(tcpm_register_port);
+
+void tcpm_unregister_port(struct tcpm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
+ typec_unregister_altmode(port->port_altmode[i]);
+ typec_unregister_port(port->typec_port);
+ tcpm_debugfs_exit(port);
+ destroy_workqueue(port->wq);
+}
+EXPORT_SYMBOL_GPL(tcpm_unregister_port);
+
+MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
+MODULE_DESCRIPTION("USB Type-C Port Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/typec/tcpm.h b/drivers/staging/typec/tcpm.h
new file mode 100644
index 000000000000..969b365e6549
--- /dev/null
+++ b/drivers/staging/typec/tcpm.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2015-2017 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_USB_TCPM_H
+#define __LINUX_USB_TCPM_H
+
+#include <linux/bitops.h>
+#include <linux/usb/typec.h>
+#include "pd.h"
+
+enum typec_cc_status {
+ TYPEC_CC_OPEN,
+ TYPEC_CC_RA,
+ TYPEC_CC_RD,
+ TYPEC_CC_RP_DEF,
+ TYPEC_CC_RP_1_5,
+ TYPEC_CC_RP_3_0,
+};
+
+enum typec_cc_polarity {
+ TYPEC_POLARITY_CC1,
+ TYPEC_POLARITY_CC2,
+};
+
+/* Time to wait for TCPC to complete transmit */
+#define PD_T_TCPC_TX_TIMEOUT 100
+
+enum tcpm_transmit_status {
+ TCPC_TX_SUCCESS = 0,
+ TCPC_TX_DISCARDED = 1,
+ TCPC_TX_FAILED = 2,
+};
+
+enum tcpm_transmit_type {
+ TCPC_TX_SOP = 0,
+ TCPC_TX_SOP_PRIME = 1,
+ TCPC_TX_SOP_PRIME_PRIME = 2,
+ TCPC_TX_SOP_DEBUG_PRIME = 3,
+ TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4,
+ TCPC_TX_HARD_RESET = 5,
+ TCPC_TX_CABLE_RESET = 6,
+ TCPC_TX_BIST_MODE_2 = 7
+};
+
+struct tcpc_config {
+ const u32 *src_pdo;
+ unsigned int nr_src_pdo;
+
+ const u32 *snk_pdo;
+ unsigned int nr_snk_pdo;
+
+ unsigned int max_snk_mv;
+ unsigned int max_snk_ma;
+ unsigned int max_snk_mw;
+ unsigned int operating_snk_mw;
+
+ enum typec_port_type type;
+ enum typec_role default_role;
+ bool try_role_hw; /* try.{src,snk} implemented in hardware */
+
+ struct typec_altmode_desc *alt_modes;
+};
+
+enum tcpc_usb_switch {
+ TCPC_USB_SWITCH_CONNECT,
+ TCPC_USB_SWITCH_DISCONNECT,
+ TCPC_USB_SWITCH_RESTORE, /* TODO FIXME */
+};
+
+/* Mux state attributes */
+#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
+#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
+#define TCPC_MUX_POLARITY_INVERTED BIT(2) /* Polarity inverted */
+
+/* Mux modes, decoded to attributes */
+enum tcpc_mux_mode {
+ TYPEC_MUX_NONE = 0, /* Open switch */
+ TYPEC_MUX_USB = TCPC_MUX_USB_ENABLED, /* USB only */
+ TYPEC_MUX_DP = TCPC_MUX_DP_ENABLED, /* DP only */
+ TYPEC_MUX_DOCK = TCPC_MUX_USB_ENABLED | /* Both USB and DP */
+ TCPC_MUX_DP_ENABLED,
+};
+
+struct tcpc_mux_dev {
+ int (*set)(struct tcpc_mux_dev *dev, enum tcpc_mux_mode mux_mode,
+ enum tcpc_usb_switch usb_config,
+ enum typec_cc_polarity polarity);
+ bool dfp_only;
+ void *priv_data;
+};
+
+struct tcpc_dev {
+ const struct tcpc_config *config;
+
+ int (*init)(struct tcpc_dev *dev);
+ int (*get_vbus)(struct tcpc_dev *dev);
+ int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
+ int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2);
+ int (*set_polarity)(struct tcpc_dev *dev,
+ enum typec_cc_polarity polarity);
+ int (*set_vconn)(struct tcpc_dev *dev, bool on);
+ int (*set_vbus)(struct tcpc_dev *dev, bool on, bool charge);
+ int (*set_current_limit)(struct tcpc_dev *dev, u32 max_ma, u32 mv);
+ int (*set_pd_rx)(struct tcpc_dev *dev, bool on);
+ int (*set_roles)(struct tcpc_dev *dev, bool attached,
+ enum typec_role role, enum typec_data_role data);
+ int (*start_drp_toggling)(struct tcpc_dev *dev,
+ enum typec_cc_status cc);
+ int (*try_role)(struct tcpc_dev *dev, int role);
+ int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
+ const struct pd_message *msg);
+ struct tcpc_mux_dev *mux;
+};
+
+struct tcpm_port;
+
+struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
+void tcpm_unregister_port(struct tcpm_port *port);
+
+void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo);
+void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+ unsigned int nr_pdo,
+ unsigned int max_snk_mv,
+ unsigned int max_snk_ma,
+ unsigned int max_snk_mw,
+ unsigned int operating_snk_mw);
+
+void tcpm_vbus_change(struct tcpm_port *port);
+void tcpm_cc_change(struct tcpm_port *port);
+void tcpm_pd_receive(struct tcpm_port *port,
+ const struct pd_message *msg);
+void tcpm_pd_transmit_complete(struct tcpm_port *port,
+ enum tcpm_transmit_status status);
+void tcpm_pd_hard_reset(struct tcpm_port *port);
+void tcpm_tcpc_reset(struct tcpm_port *port);
+
+#endif /* __LINUX_USB_TCPM_H */
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 1c95302f7f1b..057421eeb1ae 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -20,35 +20,19 @@
#include <linux/io.h>
#include <linux/uuid.h>
-/*
- * Whenever this file is changed a corresponding change must be made in
- * the Console/ServicePart/visordiag_early/supervisor_channel.h file
- * which is needed for Linux kernel compiles. These two files must be
- * in sync.
- */
-
-/* define the following to prevent include nesting in kernel header
- * files of similar abbreviated content
- */
#define __SUPERVISOR_CHANNEL_H__
-#define SIGNATURE_16(A, B) ((A) | (B << 8))
+#define SIGNATURE_16(A, B) ((A) | ((B) << 8))
#define SIGNATURE_32(A, B, C, D) \
(SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
#define SIGNATURE_64(A, B, C, D, E, F, G, H) \
(SIGNATURE_32(A, B, C, D) | ((u64)(SIGNATURE_32(E, F, G, H)) << 32))
-#ifndef lengthof
-#define lengthof(TYPE, MEMBER) (sizeof(((TYPE *)0)->MEMBER))
-#endif
-#ifndef COVERQ
-#define COVERQ(v, d) (((v) + (d) - 1) / (d))
-#endif
#ifndef COVER
-#define COVER(v, d) ((d) * COVERQ(v, d))
+#define COVER(v, d) ((d) * DIV_ROUND_UP(v, d))
#endif
-#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+#define ULTRA_CHANNEL_PROTOCOL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
enum channel_serverstate {
CHANNELSRV_UNINITIALIZED = 0, /* channel is in an undefined state */
@@ -78,7 +62,7 @@ enum channel_clientstate {
#define SPAR_CHANNEL_SERVER_READY(ch) \
(readl(&(ch)->srv_state) == CHANNELSRV_READY)
-#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
+#define ULTRA_VALID_CHANNELCLI_TRANSITION(o, n) \
(((((o) == CHANNELCLI_DETACHED) && ((n) == CHANNELCLI_DISABLED)) || \
(((o) == CHANNELCLI_ATTACHING) && ((n) == CHANNELCLI_DISABLED)) || \
(((o) == CHANNELCLI_ATTACHED) && ((n) == CHANNELCLI_DISABLED)) || \
@@ -100,7 +84,7 @@ enum channel_clientstate {
/* throttling invalid boot channel statetransition error due to client
* disabled
*/
-#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_DISABLED 0x01
/* throttling invalid boot channel statetransition error due to client
* not attached
@@ -108,7 +92,7 @@ enum channel_clientstate {
#define ULTRA_CLIERRORBOOT_THROTTLEMSG_NOTATTACHED 0x02
/* throttling invalid boot channel statetransition error due to busy channel */
-#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
+#define ULTRA_CLIERRORBOOT_THROTTLEMSG_BUSY 0x04
/* Values for ULTRA_CHANNEL_PROTOCOL.Features: This define exists so
* that windows guest can look at the FeatureFlags in the io channel,
@@ -214,189 +198,65 @@ struct signal_queue_header {
u8 filler[12]; /* Pad out to 64 byte cacheline */
} __packed;
-#define spar_signal_init(chan, QHDRFLD, QDATAFLD, QDATATYPE, ver, typ) \
- do { \
- memset(&chan->QHDRFLD, 0, sizeof(chan->QHDRFLD)); \
- chan->QHDRFLD.version = ver; \
- chan->QHDRFLD.chtype = typ; \
- chan->QHDRFLD.size = sizeof(chan->QDATAFLD); \
- chan->QHDRFLD.signal_size = sizeof(QDATATYPE); \
- chan->QHDRFLD.sig_base_offset = (u64)(chan->QDATAFLD) - \
- (u64)(&chan->QHDRFLD); \
- chan->QHDRFLD.max_slots = \
- sizeof(chan->QDATAFLD) / sizeof(QDATATYPE); \
- chan->QHDRFLD.max_signals = chan->QHDRFLD.max_slots - 1;\
- } while (0)
-
/* Generic function useful for validating any type of channel when it is
* received by the client that will be accessing the channel.
* Note that <logCtx> is only needed for callers in the EFI environment, and
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
static inline int
-spar_check_channel_client(void __iomem *ch,
- uuid_le expected_uuid,
- char *chname,
- u64 expected_min_bytes,
- u32 expected_version,
- u64 expected_signature)
+spar_check_channel(struct channel_header *ch,
+ uuid_le expected_uuid,
+ char *chname,
+ u64 expected_min_bytes,
+ u32 expected_version,
+ u64 expected_signature)
{
if (uuid_le_cmp(expected_uuid, NULL_UUID_LE) != 0) {
- uuid_le guid;
-
- memcpy_fromio(&guid,
- &((struct channel_header __iomem *)(ch))->chtype,
- sizeof(guid));
/* caller wants us to verify type GUID */
- if (uuid_le_cmp(guid, expected_uuid) != 0) {
+ if (uuid_le_cmp(ch->chtype, expected_uuid) != 0) {
pr_err("Channel mismatch on channel=%s(%pUL) field=type expected=%pUL actual=%pUL\n",
chname, &expected_uuid,
- &expected_uuid, &guid);
+ &expected_uuid, &ch->chtype);
return 0;
}
}
if (expected_min_bytes > 0) { /* verify channel size */
- unsigned long long bytes =
- readq(&((struct channel_header __iomem *)
- (ch))->size);
- if (bytes < expected_min_bytes) {
+ if (ch->size < expected_min_bytes) {
pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
chname, &expected_uuid,
- (unsigned long long)expected_min_bytes, bytes);
+ (unsigned long long)expected_min_bytes,
+ ch->size);
return 0;
}
}
if (expected_version > 0) { /* verify channel version */
- unsigned long ver = readl(&((struct channel_header __iomem *)
- (ch))->version_id);
- if (ver != expected_version) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8lx\n",
+ if (ch->version_id != expected_version) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=version expected=0x%-8.8lx actual=0x%-8.8x\n",
chname, &expected_uuid,
- (unsigned long)expected_version, ver);
+ (unsigned long)expected_version,
+ ch->version_id);
return 0;
}
}
if (expected_signature > 0) { /* verify channel signature */
- unsigned long long sig =
- readq(&((struct channel_header __iomem *)
- (ch))->signature);
- if (sig != expected_signature) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8llx actual=0x%-8.8llx\n",
+ if (ch->signature != expected_signature) {
+ pr_err("Channel mismatch on channel=%s(%pUL) field=signature expected=0x%-8.8Lx actual=0x%-8.8Lx\n",
chname, &expected_uuid,
- expected_signature, sig);
+ expected_signature, ch->signature);
return 0;
}
}
return 1;
}
-/* Generic function useful for validating any type of channel when it is about
- * to be initialized by the server of the channel.
- * Note that <logCtx> is only needed for callers in the EFI environment, and
- * is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
- */
-static inline int spar_check_channel_server(uuid_le typeuuid, char *name,
- u64 expected_min_bytes,
- u64 actual_bytes)
-{
- if (expected_min_bytes > 0) /* verify channel size */
- if (actual_bytes < expected_min_bytes) {
- pr_err("Channel mismatch on channel=%s(%pUL) field=size expected=0x%-8.8llx actual=0x%-8.8llx\n",
- name, &typeuuid, expected_min_bytes,
- actual_bytes);
- return 0;
- }
- return 1;
-}
-
-/*
- * Routine Description:
- * Tries to insert the prebuilt signal pointed to by pSignal into the nth
- * Queue of the Channel pointed to by pChannel
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to the signal
- *
- * Assumptions:
- * - pChannel, Queue and pSignal are valid.
- * - If insertion fails due to a full queue, the caller will determine the
- * retry policy (e.g. wait & try again, report an error, etc.).
- *
- * Return value: 1 if the insertion succeeds, 0 if the queue was
- * full.
- */
-
-unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue,
- void *sig);
-
-/*
- * Routine Description:
- * Removes one signal from Channel pChannel's nth Queue at the
- * time of the call and copies it into the memory pointed to by
- * pSignal.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to where the signals are to be copied
- *
- * Assumptions:
- * - pChannel and Queue are valid.
- * - pSignal points to a memory area large enough to hold queue's SignalSize
- *
- * Return value: 1 if the removal succeeds, 0 if the queue was
- * empty.
- */
-
-unsigned char spar_signal_remove(struct channel_header __iomem *ch, u32 queue,
- void *sig);
-
-/*
- * Routine Description:
- * Removes all signals present in Channel pChannel's nth Queue at the
- * time of the call and copies them into the memory pointed to by
- * pSignal. Returns the # of signals copied as the value of the routine.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- * pSignal: (IN) pointer to where the signals are to be copied
- *
- * Assumptions:
- * - pChannel and Queue are valid.
- * - pSignal points to a memory area large enough to hold Queue's MaxSignals
- * # of signals, each of which is Queue's SignalSize.
- *
- * Return value:
- * # of signals copied.
- */
-unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue,
- void *sig);
-
-/*
- * Routine Description:
- * Determine whether a signal queue is empty.
- *
- * Parameters:
- * pChannel: (IN) points to the IO Channel
- * Queue: (IN) nth Queue of the IO Channel
- *
- * Return value:
- * 1 if the signal queue is empty, 0 otherwise.
- */
-unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch,
- u32 queue);
-
/*
* CHANNEL Guids
*/
/* {414815ed-c58c-11da-95a9-00e08161165f} */
#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x414815ed, 0xc58c, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+ UUID_LE(0x414815ed, 0xc58c, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
static const uuid_le spar_vhba_channel_protocol_uuid =
SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
@@ -404,8 +264,8 @@ static const uuid_le spar_vhba_channel_protocol_uuid =
/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+ UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
static const uuid_le spar_vnic_channel_protocol_uuid =
SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
@@ -413,21 +273,8 @@ static const uuid_le spar_vnic_channel_protocol_uuid =
/* {72120008-4AAB-11DC-8530-444553544200} */
#define SPAR_SIOVM_UUID \
- UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
- 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
+ UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
+ 0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
-/* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} */
-#define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
- 0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
-
-static const uuid_le spar_controldirector_channel_protocol_uuid =
- SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
-
-/* {b4e79625-aede-4eAA-9e11-D3eddcd4504c} */
-#define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
- 0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
-
#endif
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 54f490090a59..9bde848a321c 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -50,17 +50,17 @@
#define ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID 2
#define ULTRA_VSWITCH_CHANNEL_PROTOCOL_VERSIONID 1
-#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \
- (spar_check_channel_client(ch, spar_vhba_channel_protocol_uuid, \
- "vhba", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE))
-
-#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \
- (spar_check_channel_client(ch, spar_vnic_channel_protocol_uuid, \
- "vnic", MIN_IO_CHANNEL_SIZE, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE))
+#define SPAR_VHBA_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel(ch, spar_vhba_channel_protocol_uuid, \
+ "vhba", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VHBA_CHANNEL_PROTOCOL_SIGNATURE))
+
+#define SPAR_VNIC_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel(ch, spar_vnic_channel_protocol_uuid, \
+ "vnic", MIN_IO_CHANNEL_SIZE, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_VNIC_CHANNEL_PROTOCOL_SIGNATURE))
/*
* Everything necessary to handle SCSI & NIC traffic between Guest Partition and
@@ -92,11 +92,11 @@ enum net_types {
*/
/* visornic -> uisnic */
NET_RCV, /* incoming packet received */
- /* uisnic -> virtpci */
+ /* uisnic -> visornic */
NET_XMIT, /* for outgoing net packets */
/* visornic -> uisnic */
NET_XMIT_DONE, /* outgoing packet xmitted */
- /* uisnic -> virtpci */
+ /* uisnic -> visornic */
NET_RCV_ENBDIS, /* enable/disable packet reception */
/* visornic -> uisnic */
NET_RCV_ENBDIS_ACK, /* acknowledge enable/disable packet */
@@ -200,7 +200,7 @@ struct uiscmdrsp_scsi {
int linuxstat; /* original Linux status used by Linux vdisk */
u8 scsistat; /* the scsi status */
u8 addlstat; /* non-scsi status */
-#define ADDL_SEL_TIMEOUT 4
+#define ADDL_SEL_TIMEOUT 4
/* The following fields are need to determine the result of command. */
u8 sensebuf[MAX_SENSE_SIZE]; /* sense info in case cmd failed; */
@@ -308,8 +308,8 @@ struct net_pkt_xmt {
u8 valid; /* 1 = struct is valid - else ignore */
u8 hrawoffv; /* 1 = hwrafoff is valid */
u8 nhrawoffv; /* 1 = nhwrafoff is valid */
- u16 protocol; /* specifies packet protocol */
- u32 csum; /* value used to set skb->csum at IOPart */
+ __be16 protocol; /* specifies packet protocol */
+ __wsum csum; /* value used to set skb->csum at IOPart */
u32 hrawoff; /* value used to set skb->h.raw at IOPart */
/* hrawoff points to the start of the TRANSPORT LAYER HEADER */
u32 nhrawoff; /* value used to set skb->nh.raw at IOPart */
@@ -340,7 +340,7 @@ struct net_pkt_xmtdone {
#define RCVPOST_BUF_SIZE 4032
#define MAX_NET_RCV_CHAIN \
((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
- / RCVPOST_BUF_SIZE)
+ / RCVPOST_BUF_SIZE)
/*
* rcv buf size must be large enough to include ethernet data len + ethernet
@@ -441,7 +441,7 @@ struct uiscmdrsp_scsitaskmgmt {
/* Result of taskmgmt command - set by IOPart - values are: */
char result;
-#define TASK_MGMT_FAILED 0
+#define TASK_MGMT_FAILED 0
} __packed;
/* Used by uissd to send disk add/remove notifications to Guest. */
@@ -496,11 +496,11 @@ struct uiscmdrsp {
char cmdtype;
/* Describes what type of information is in the struct */
-#define CMD_SCSI_TYPE 1
-#define CMD_NET_TYPE 2
-#define CMD_SCSITASKMGMT_TYPE 3
-#define CMD_NOTIFYGUEST_TYPE 4
-#define CMD_VDISKMGMT_TYPE 5
+#define CMD_SCSI_TYPE 1
+#define CMD_NET_TYPE 2
+#define CMD_SCSITASKMGMT_TYPE 3
+#define CMD_NOTIFYGUEST_TYPE 4
+#define CMD_VDISKMGMT_TYPE 5
union {
struct uiscmdrsp_scsi scsi;
struct uiscmdrsp_net net;
@@ -548,44 +548,7 @@ struct spar_io_channel_protocol {
#define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
/* Use 4K page sizes when passing page info between Guest and IOPartition. */
-#define PI_PAGE_SIZE 0x1000
-#define PI_PAGE_MASK 0x0FFF
-
-/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
-static inline u16
-add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
- u16 max_pi_arr_entries, struct phys_info pi_arr[])
-{
- u32 len;
- u16 i, firstlen;
-
- firstlen = PI_PAGE_SIZE - inp_off;
- if (inp_len <= firstlen) {
- /* The input entry spans only one page - add as is. */
- if (index >= max_pi_arr_entries)
- return 0;
- pi_arr[index].pi_pfn = inp_pfn;
- pi_arr[index].pi_off = (u16)inp_off;
- pi_arr[index].pi_len = (u16)inp_len;
- return index + 1;
- }
-
- /* This entry spans multiple pages. */
- for (len = inp_len, i = 0; len;
- len -= pi_arr[index + i].pi_len, i++) {
- if (index + i >= max_pi_arr_entries)
- return 0;
- pi_arr[index + i].pi_pfn = inp_pfn + i;
- if (i == 0) {
- pi_arr[index].pi_off = inp_off;
- pi_arr[index].pi_len = firstlen;
- } else {
- pi_arr[index + i].pi_off = 0;
- pi_arr[index + i].pi_len =
- (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
- }
- }
- return index + i;
-}
+#define PI_PAGE_SIZE 0x1000
+#define PI_PAGE_MASK 0x0FFF
#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 03d56f818a86..de0635542fbd 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -172,15 +172,15 @@ struct visor_device {
#define to_visor_device(x) container_of(x, struct visor_device, device)
-int visorbus_register_visor_driver(struct visor_driver *);
-void visorbus_unregister_visor_driver(struct visor_driver *);
+int visorbus_register_visor_driver(struct visor_driver *drv);
+void visorbus_unregister_visor_driver(struct visor_driver *drv);
int visorbus_read_channel(struct visor_device *dev,
unsigned long offset, void *dest,
unsigned long nbytes);
int visorbus_write_channel(struct visor_device *dev,
unsigned long offset, void *src,
unsigned long nbytes);
-void visorbus_enable_channel_interrupts(struct visor_device *dev);
+int visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
/* Levels of severity for diagnostic events, in order from lowest severity to
@@ -209,7 +209,7 @@ int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
uuid_le visorchannel_get_uuid(struct visorchannel *channel);
-#define BUS_ROOT_DEVICE UINT_MAX
+#define BUS_ROOT_DEVICE UINT_MAX
struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
struct visor_device *from);
#endif
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index 859345243afe..274f72422166 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -19,9 +19,9 @@
#include "channel.h"
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
-#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
- 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
+#define SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0x2b3c2d10, 0x7ef5, 0x4ad8, \
+ 0xb9, 0x66, 0x34, 0x48, 0xb7, 0x38, 0x6b, 0x3d)
#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE \
ULTRA_CHANNEL_PROTOCOL_SIGNATURE
@@ -33,24 +33,24 @@
* software. Note that you can usually add fields to the END of the
* channel struct withOUT needing to increment this.
*/
-#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
+#define ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID 1
-#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
- spar_check_channel_client(ch, \
- SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID, \
- "controlvm", \
- sizeof(struct spar_controlvm_channel_protocol), \
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
- ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE)
+#define SPAR_CONTROLVM_CHANNEL_OK_CLIENT(ch) \
+ (spar_check_channel(ch, \
+ SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID, \
+ "controlvm", \
+ sizeof(struct spar_controlvm_channel_protocol), \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_VERSIONID, \
+ ULTRA_CONTROLVM_CHANNEL_PROTOCOL_SIGNATURE))
/* Defines for various channel queues */
-#define CONTROLVM_QUEUE_REQUEST 0
-#define CONTROLVM_QUEUE_RESPONSE 1
-#define CONTROLVM_QUEUE_EVENT 2
-#define CONTROLVM_QUEUE_ACK 3
+#define CONTROLVM_QUEUE_REQUEST 0
+#define CONTROLVM_QUEUE_RESPONSE 1
+#define CONTROLVM_QUEUE_EVENT 2
+#define CONTROLVM_QUEUE_ACK 3
/* Max num of messages stored during IOVM creation to be reused after crash */
-#define CONTROLVM_CRASHMSG_MAX 2
+#define CONTROLVM_CRASHMSG_MAX 2
struct spar_segment_state {
/* Bit 0: May enter other states */
@@ -69,10 +69,12 @@ struct spar_segment_state {
u16 ready:1;
/* Bit 7: resource is configured and operating */
u16 operating:1;
+ /* Natural alignment*/
+ u16 reserved:8;
/* Note: don't use high bit unless we need to switch to ushort
* which is non-compliant
*/
-};
+} __packed;
static const struct spar_segment_state segment_state_running = {
1, 1, 1, 0, 1, 1, 1, 1
@@ -145,15 +147,7 @@ struct irq_info {
*/
u8 recv_irq_shared;
u8 reserved[3]; /* Natural alignment purposes */
-};
-
-struct pci_id {
- u16 domain;
- u8 bus;
- u8 slot;
- u8 func;
- u8 reserved[3]; /* Natural alignment purposes */
-};
+} __packed;
struct efi_spar_indication {
u64 boot_to_fw_ui:1; /* Bit 0: Stop in uefi ui */
@@ -161,7 +155,8 @@ struct efi_spar_indication {
u64 clear_cmos:1; /* Bit 2: Clear CMOS */
u64 boot_to_tool:1; /* Bit 3: Run install tool */
/* remaining bits are available */
-};
+ u64 reserved:60; /* Natural alignment */
+} __packed;
enum ultra_chipset_feature {
ULTRA_CHIPSET_FEATURE_REPLY = 0x00000001,
@@ -203,7 +198,9 @@ struct controlvm_message_header {
u32 preserve:1;
/* =1 the DiagWriter is active in the Diagnostic Partition */
u32 writer_in_diag:1;
- } flags;
+ /* Natural alignment */
+ u32 reserve:25;
+ } __packed flags;
/* Natural alignment */
u32 reserved;
/* Identifies the particular message instance */
@@ -216,7 +213,7 @@ struct controlvm_message_header {
/* Actual number of bytes of payload area to copy between IO/Command */
u32 payload_bytes;
/* if non-zero, there is a payload to copy. */
-};
+} __packed;
struct controlvm_packet_device_create {
u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
@@ -229,24 +226,24 @@ struct controlvm_packet_device_create {
uuid_le data_type_uuid; /* specifies format of data in channel */
uuid_le dev_inst_uuid; /* instance guid for the device */
struct irq_info intr; /* specifies interrupt information */
-}; /* for CONTROLVM_DEVICE_CREATE */
+} __packed; /* for CONTROLVM_DEVICE_CREATE */
struct controlvm_packet_device_configure {
/* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
/* Control uses header SegmentIndex field to access bus number... */
u32 dev_no; /* bus-relative (0..n-1) device number */
-} ; /* for CONTROLVM_DEVICE_CONFIGURE */
+} __packed; /* for CONTROLVM_DEVICE_CONFIGURE */
struct controlvm_message_device_create {
struct controlvm_message_header header;
struct controlvm_packet_device_create packet;
-}; /* total 128 bytes */
+} __packed; /* total 128 bytes */
struct controlvm_message_device_configure {
struct controlvm_message_header header;
struct controlvm_packet_device_configure packet;
-}; /* total 56 bytes */
+} __packed; /* total 56 bytes */
/* This is the format for a message in any ControlVm queue. */
struct controlvm_message_packet {
@@ -264,12 +261,12 @@ struct controlvm_message_packet {
/* indicates format of data in bus channel*/
uuid_le bus_data_type_uuid;
uuid_le bus_inst_uuid; /* instance uuid for the bus */
- } create_bus; /* for CONTROLVM_BUS_CREATE */
+ } __packed create_bus; /* for CONTROLVM_BUS_CREATE */
struct {
/* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
u32 reserved; /* Natural alignment purposes */
- } destroy_bus; /* for CONTROLVM_BUS_DESTROY */
+ } __packed destroy_bus; /* for CONTROLVM_BUS_DESTROY */
struct {
/* bus # (0..n-1) from the receiver's perspective */
u32 bus_no;
@@ -283,26 +280,27 @@ struct controlvm_message_packet {
* notifications. The corresponding
* sendBusInterruptHandle is kept in CP.
*/
- } configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
+ } __packed configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
/* for CONTROLVM_DEVICE_CREATE */
struct controlvm_packet_device_create create_device;
struct {
/* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
u32 dev_no; /* bus-relative (0..n-1) device # */
- } destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
+ } __packed destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
/* for CONTROLVM_DEVICE_CONFIGURE */
struct controlvm_packet_device_configure configure_device;
struct {
/* bus # (0..n-1) from the msg receiver's perspective */
u32 bus_no;
u32 dev_no; /* bus-relative (0..n-1) device # */
- } reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */
+ } __packed reconfigure_device;
+ /* for CONTROLVM_DEVICE_RECONFIGURE */
struct {
u32 bus_no;
struct spar_segment_state state;
u8 reserved[2]; /* Natural alignment purposes */
- } bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
+ } __packed bus_change_state; /* for CONTROLVM_BUS_CHANGESTATE */
struct {
u32 bus_no;
u32 dev_no;
@@ -310,15 +308,18 @@ struct controlvm_message_packet {
struct {
/* =1 if message is for a physical device */
u32 phys_device:1;
- } flags;
+ u32 reserved:31; /* Natural alignment */
+ u32 reserved1; /* Natural alignment */
+ } __packed flags;
u8 reserved[2]; /* Natural alignment purposes */
- } device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */
+ } __packed device_change_state;
+ /* for CONTROLVM_DEVICE_CHANGESTATE */
struct {
u32 bus_no;
u32 dev_no;
struct spar_segment_state state;
u8 reserved[6]; /* Natural alignment purposes */
- } device_change_state_event;
+ } __packed device_change_state_event;
/* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
struct {
/* indicates the max number of busses */
@@ -327,11 +328,12 @@ struct controlvm_message_packet {
u32 switch_count;
enum ultra_chipset_feature features;
u32 platform_number; /* Platform Number */
- } init_chipset; /* for CONTROLVM_CHIPSET_INIT */
+ } __packed init_chipset; /* for CONTROLVM_CHIPSET_INIT */
struct {
u32 options; /* reserved */
u32 test; /* bit 0 set to run embedded selftest */
- } chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */
+ } __packed chipset_selftest;
+ /* for CONTROLVM_CHIPSET_SELFTEST */
/* a physical address of something, that can be dereferenced
* by the receiver of this ControlVm command
*/
@@ -339,13 +341,13 @@ struct controlvm_message_packet {
/* a handle of something (depends on command id) */
u64 handle;
};
-};
+} __packed;
/* All messages in any ControlVm queue have this layout. */
struct controlvm_message {
struct controlvm_message_header hdr;
struct controlvm_message_packet cmd;
-};
+} __packed;
struct spar_controlvm_channel_protocol {
struct channel_header header;
@@ -432,7 +434,7 @@ struct spar_controlvm_channel_protocol {
/* Message stored during IOVM creation to be reused after crash */
struct controlvm_message saved_crash_msg[CONTROLVM_CRASHMSG_MAX];
-};
+} __packed;
/* The following header will be located at the beginning of PayloadVmOffset for
* various ControlVm commands. The receiver of a ControlVm command with a
@@ -458,81 +460,81 @@ struct spar_controlvm_parameters_header {
uuid_le id;
u32 revision;
u32 reserved; /* Natural alignment */
-};
+} __packed;
/* General Errors------------------------------------------------------[0-99] */
-#define CONTROLVM_RESP_SUCCESS 0
-#define CONTROLVM_RESP_ALREADY_DONE 1
-#define CONTROLVM_RESP_IOREMAP_FAILED 2
-#define CONTROLVM_RESP_KMALLOC_FAILED 3
-#define CONTROLVM_RESP_ID_UNKNOWN 4
-#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5
+#define CONTROLVM_RESP_SUCCESS 0
+#define CONTROLVM_RESP_ALREADY_DONE 1
+#define CONTROLVM_RESP_IOREMAP_FAILED 2
+#define CONTROLVM_RESP_KMALLOC_FAILED 3
+#define CONTROLVM_RESP_ID_UNKNOWN 4
+#define CONTROLVM_RESP_ID_INVALID_FOR_CLIENT 5
/* CONTROLVM_INIT_CHIPSET-------------------------------------------[100-199] */
-#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100
-#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101
+#define CONTROLVM_RESP_CLIENT_SWITCHCOUNT_NONZERO 100
+#define CONTROLVM_RESP_EXPECTED_CHIPSET_INIT 101
/* Maximum Limit----------------------------------------------------[200-299] */
-#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
-#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_BUSES 201 /* BUS_CREATE */
+#define CONTROLVM_RESP_ERROR_MAX_DEVICES 202 /* DEVICE_CREATE */
/* Payload and Parameter Related------------------------------------[400-499] */
-#define CONTROLVM_RESP_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
- * DEVICE_CONFIGURE
- */
-#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
-#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
-#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_PAYLOAD_INVALID 400 /* SWITCH_ATTACHEXTPORT,
+ * DEVICE_CONFIGURE
+ */
+#define CONTROLVM_RESP_INITIATOR_PARAMETER_INVALID 401 /* Multiple */
+#define CONTROLVM_RESP_TARGET_PARAMETER_INVALID 402 /* DEVICE_CONFIGURE */
+#define CONTROLVM_RESP_CLIENT_PARAMETER_INVALID 403 /* DEVICE_CONFIGURE */
/* Specified[Packet Structure] Value-------------------------------[500-599] */
-#define CONTROLVM_RESP_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
- * BUS_CONFIGURE,
- * DEVICE_CREATE,
- * DEVICE_CONFIG
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT */
- /* DEVICE_CREATE,
- * DEVICE_CONFIGURE,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_CHANNEL_INVALID 502 /* DEVICE_CREATE,
- * DEVICE_CONFIGURE
- */
+#define CONTROLVM_RESP_BUS_INVALID 500 /* SWITCH_ATTACHINTPORT,
+ * BUS_CONFIGURE,
+ * DEVICE_CREATE,
+ * DEVICE_CONFIG
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_DEVICE_INVALID 501 /* SWITCH_ATTACHINTPORT*/
+ /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE,
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_CHANNEL_INVALID 502 /* DEVICE_CREATE,
+ * DEVICE_CONFIGURE
+ */
/* Partition Driver Callback Interface----------------------[600-699] */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_FAILURE 604 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY
+ */
/* Unable to invoke VIRTPCI callback */
-#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE,
- * BUS_DESTROY,
- * DEVICE_CREATE,
- * DEVICE_DESTROY
- */
+#define CONTROLVM_RESP_VIRTPCI_DRIVER_CALLBACK_ERROR 605 /* BUS_CREATE,
+ * BUS_DESTROY,
+ * DEVICE_CREATE,
+ * DEVICE_DESTROY
+ */
/* VIRTPCI Callback returned error */
-#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606
- /* SWITCH_ATTACHEXTPORT,
- * SWITCH_DETACHEXTPORT
- * DEVICE_CONFIGURE
- */
+#define CONTROLVM_RESP_GENERIC_DRIVER_CALLBACK_ERROR 606
+ /* SWITCH_ATTACHEXTPORT,
+ * SWITCH_DETACHEXTPORT
+ * DEVICE_CONFIGURE
+ */
/* generic device callback returned error */
/* Bus Related------------------------------------------------------[700-799] */
-#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
+#define CONTROLVM_RESP_ERROR_BUS_DEVICE_ATTACHED 700 /* BUS_DESTROY */
/* Channel Related--------------------------------------------------[800-899] */
-#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
- * DEVICE_DESTROY
- */
-#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
+#define CONTROLVM_RESP_CHANNEL_TYPE_UNKNOWN 800 /* GET_CHANNELINFO,
+ * DEVICE_DESTROY
+ */
+#define CONTROLVM_RESP_CHANNEL_SIZE_TOO_SMALL 801 /* DEVICE_CREATE */
/* Chipset Shutdown Related---------------------------------------[1000-1099] */
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000
-#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
+#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_FAILED 1000
+#define CONTROLVM_RESP_CHIPSET_SHUTDOWN_ALREADY_ACTIVE 1001
/* Chipset Stop Related-------------------------------------------[1100-1199] */
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100
-#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101
+#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_BUS 1100
+#define CONTROLVM_RESP_CHIPSET_STOP_FAILED_SWITCH 1101
/* Device Related-------------------------------------------------[1400-1499] */
-#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400
+#define CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT 1400
#endif /* __CONTROLVMCHANNEL_H__ */
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index b0df26155d02..f0ef5ecf3d7d 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -28,8 +28,8 @@
/* {193b331b-c58f-11da-95a9-00e08161165f} */
#define SPAR_VBUS_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0x193b331b, 0xc58f, 0x11da, \
- 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
+ UUID_LE(0x193b331b, 0xc58f, 0x11da, \
+ 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
static const uuid_le spar_vbus_channel_protocol_uuid =
SPAR_VBUS_CHANNEL_PROTOCOL_UUID;
@@ -43,16 +43,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
*/
#define SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID 1
-#define SPAR_VBUS_CHANNEL_OK_CLIENT(ch) \
- spar_check_channel_client(ch, \
- spar_vbus_channel_protocol_uuid, \
- "vbus", \
- sizeof(struct spar_vbus_channel_protocol),\
- SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
- SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
-
-#pragma pack(push, 1) /* both GCC and VC now allow this pragma */
-
/*
* An array of this struct is present in the channel area for each vbus.
* (See vbuschannel.h.)
@@ -64,42 +54,7 @@ struct ultra_vbus_deviceinfo {
u8 drvname[16]; /* driver .sys file name */
u8 infostrs[96]; /* kernel version */
u8 reserved[128]; /* pad size to 256 bytes */
-};
-
-/**
- * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
- * and write it to a seq_file
- * @devinfo: the struct ultra_vbus_deviceinfo to format
- * @seq: seq_file to write to
- * @devix: the device index to be included in the output data, or -1 if no
- * device index is to be included
- *
- * Reads @devInfo, and writes it in human-readable notation to @seq.
- */
-static inline void
-vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
- struct seq_file *seq, int devix)
-{
- if (!isprint(devinfo->devtype[0]))
- return; /* uninitialized vbus device entry */
-
- if (devix >= 0)
- seq_printf(seq, "[%d]", devix);
- else
- /* vbus device entry is for bus or chipset */
- seq_puts(seq, " ");
-
- /*
- * Note: because the s-Par back-end is free to scribble in this area,
- * we never assume '\0'-termination.
- */
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
- (int)sizeof(devinfo->devtype), devinfo->devtype);
- seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
- (int)sizeof(devinfo->drvname), devinfo->drvname);
- seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
- devinfo->infostrs);
-}
+} __packed;
struct spar_vbus_headerinfo {
u32 struct_bytes; /* size of this struct in bytes */
@@ -113,7 +68,7 @@ struct spar_vbus_headerinfo {
u32 dev_info_offset; /* byte offset from beginning of this struct */
/* to the DevInfo array (below) */
u8 reserved[104];
-};
+} __packed;
struct spar_vbus_channel_protocol {
struct channel_header channel_header; /* initialized by server */
@@ -125,8 +80,6 @@ struct spar_vbus_channel_protocol {
/* describes client bus device and driver */
struct ultra_vbus_deviceinfo dev_info[0];
/* describes client device and driver for each device on the bus */
-};
-
-#pragma pack(pop)
+} __packed;
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 55f29ae8e015..a692561c81c8 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -19,21 +19,16 @@
#include "visorbus.h"
#include "visorbus_private.h"
-#include "vmcallinterface.h"
#define MYDRVNAME "visorbus"
-/* module parameters */
-static int visorbus_forcematch;
-static int visorbus_forcenomatch;
-
/* Display string that is guaranteed to be no longer the 99 characters*/
#define LINESIZE 99
#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
-#define POLLJIFFIES_NORMALCHANNEL 10
+#define POLLJIFFIES_NORMALCHANNEL 10
-static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+static bool initialized; /* stores whether bus_registration was successful */
static struct dentry *visorbus_debugfs_dir;
/*
@@ -87,12 +82,10 @@ visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
dev = to_visor_device(xdev);
guid = visorchannel_get_uuid(dev->visorchannel);
- if (add_uevent_var(env, "MODALIAS=visorbus:%pUl", &guid))
- return -ENOMEM;
- return 0;
+ return add_uevent_var(env, "MODALIAS=visorbus:%pUl", &guid);
}
-/**
+/*
* visorbus_match() - called automatically upon adding a visor_device
* (device_add), or adding a visor_driver
* (visorbus_register_visor_driver)
@@ -113,10 +106,6 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
drv = to_visor_driver(xdrv);
channel_type = visorchannel_get_uuid(dev->visorchannel);
- if (visorbus_forcematch)
- return 1;
- if (visorbus_forcenomatch)
- return 0;
if (!drv->channel_types)
return 0;
@@ -142,10 +131,10 @@ struct bus_type visorbus_type = {
.dev_groups = visorbus_dev_groups,
};
-/**
- * visorbus_releae_busdevice() - called when device_unregister() is called for
- * the bus device instance, after all other tasks
- * involved with destroying the dev are complete
+/*
+ * visorbus_release_busdevice() - called when device_unregister() is called for
+ * the bus device instance, after all other tasks
+ * involved with destroying the dev are complete
* @xdev: struct device for the bus being released
*/
static void
@@ -158,7 +147,7 @@ visorbus_release_busdevice(struct device *xdev)
kfree(dev);
}
-/**
+/*
* visorbus_release_device() - called when device_unregister() is called for
* each child device instance
* @xdev: struct device for the visor device being released
@@ -185,8 +174,6 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
{
struct visor_device *vdev = to_visor_device(dev);
- if (!vdev->visorchannel)
- return 0;
return sprintf(buf, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
@@ -197,8 +184,6 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
{
struct visor_device *vdev = to_visor_device(dev);
- if (!vdev->visorchannel)
- return 0;
return sprintf(buf, "0x%lx\n",
visorchannel_get_nbytes(vdev->visorchannel));
}
@@ -209,8 +194,6 @@ static ssize_t clientpartition_show(struct device *dev,
{
struct visor_device *vdev = to_visor_device(dev);
- if (!vdev->visorchannel)
- return 0;
return sprintf(buf, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
@@ -222,8 +205,6 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
struct visor_device *vdev = to_visor_device(dev);
char typeid[LINESIZE];
- if (!vdev->visorchannel)
- return 0;
return sprintf(buf, "%s\n",
visorchannel_id(vdev->visorchannel, typeid));
}
@@ -235,8 +216,6 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
struct visor_device *vdev = to_visor_device(dev);
char zoneid[LINESIZE];
- if (!vdev->visorchannel)
- return 0;
return sprintf(buf, "%s\n",
visorchannel_zoneid(vdev->visorchannel, zoneid));
}
@@ -245,13 +224,12 @@ static DEVICE_ATTR_RO(zoneguid);
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct visor_device *vdev = to_visor_device(dev);
int i = 0;
struct bus_type *xbus = dev->bus;
struct device_driver *xdrv = dev->driver;
struct visor_driver *drv = NULL;
- if (!vdev->visorchannel || !xbus || !xdrv)
+ if (!xbus || !xdrv)
return 0;
i = xbus->match(dev, xdrv);
if (!i)
@@ -344,11 +322,10 @@ static ssize_t channel_id_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
int len = 0;
- if (vdev->visorchannel) {
- visorchannel_id(vdev->visorchannel, buf);
- len = strlen(buf);
- buf[len++] = '\n';
- }
+ visorchannel_id(vdev->visorchannel, buf);
+ len = strlen(buf);
+ buf[len++] = '\n';
+
return len;
}
static DEVICE_ATTR_RO(channel_id);
@@ -378,6 +355,40 @@ static const struct attribute_group *visorbus_groups[] = {
* define & implement display of debugfs attributes under
* /sys/kernel/debug/visorbus/visorbus<n>.
*/
+/*
+ * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
+ * and write it to a seq_file
+ * @devinfo: the struct ultra_vbus_deviceinfo to format
+ * @seq: seq_file to write to
+ * @devix: the device index to be included in the output data, or -1 if no
+ * device index is to be included
+ *
+ * Reads @devInfo, and writes it in human-readable notation to @seq.
+ */
+static void
+vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
+ struct seq_file *seq, int devix)
+{
+ if (!isprint(devinfo->devtype[0]))
+ return; /* uninitialized vbus device entry */
+
+ if (devix >= 0)
+ seq_printf(seq, "[%d]", devix);
+ else
+ /* vbus device entry is for bus or chipset */
+ seq_puts(seq, " ");
+
+ /*
+ * Note: because the s-Par back-end is free to scribble in this area,
+ * we never assume '\0'-termination.
+ */
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
+ (int)sizeof(devinfo->devtype), devinfo->devtype);
+ seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
+ (int)sizeof(devinfo->drvname), devinfo->drvname);
+ seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
+ devinfo->infostrs);
+}
static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
{
@@ -442,16 +453,17 @@ dev_periodic_work(unsigned long __opaque)
mod_timer(&dev->timer, jiffies + POLLJIFFIES_NORMALCHANNEL);
}
-static void
+static int
dev_start_periodic_work(struct visor_device *dev)
{
if (dev->being_removed || dev->timer_active)
- return;
+ return -EINVAL;
/* now up by at least 2 */
get_device(&dev->device);
dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
add_timer(&dev->timer);
dev->timer_active = true;
+ return 0;
}
static void
@@ -464,7 +476,7 @@ dev_stop_periodic_work(struct visor_device *dev)
put_device(&dev->device);
}
-/**
+/*
* visordriver_remove_device() - handle visor device going away
* @xdev: struct device for the visor device being removed
*
@@ -557,17 +569,17 @@ EXPORT_SYMBOL_GPL(visorbus_write_channel);
* Currently we don't yet have a real interrupt, so for now we just call the
* interrupt function periodically via a timer.
*/
-void
+int
visorbus_enable_channel_interrupts(struct visor_device *dev)
{
struct visor_driver *drv = to_visor_driver(dev->device.driver);
if (!drv->channel_interrupt) {
dev_err(&dev->device, "%s no interrupt function!\n", __func__);
- return;
+ return -ENOENT;
}
- dev_start_periodic_work(dev);
+ return dev_start_periodic_work(dev);
}
EXPORT_SYMBOL_GPL(visorbus_enable_channel_interrupts);
@@ -583,7 +595,7 @@ visorbus_disable_channel_interrupts(struct visor_device *dev)
}
EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
-/**
+/*
* create_visor_device() - create visor device as a result of receiving the
* controlvm device_create message for a new device
* @dev: a freshly-zeroed struct visor_device, containing only filled-in values
@@ -613,9 +625,6 @@ create_visor_device(struct visor_device *dev)
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
- POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
- DIAG_SEVERITY_PRINT);
-
mutex_init(&dev->visordriver_callback_lock);
dev->device.bus = &visorbus_type;
dev->device.groups = visorbus_channel_groups;
@@ -630,8 +639,10 @@ create_visor_device(struct visor_device *dev)
* (NOT bus instance). That's why we need to include the bus
* number within the name.
*/
- dev_set_name(&dev->device, "vbus%u:dev%u",
- chipset_bus_no, chipset_dev_no);
+ err = dev_set_name(&dev->device, "vbus%u:dev%u",
+ chipset_bus_no, chipset_dev_no);
+ if (err)
+ goto err_put;
/*
* device_add does this:
@@ -651,17 +662,15 @@ create_visor_device(struct visor_device *dev)
* bus_type.klist_devices regardless (use bus_for_each_dev).
*/
err = device_add(&dev->device);
- if (err < 0) {
- POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no,
- DIAG_SEVERITY_ERR);
+ if (err < 0)
goto err_put;
- }
list_add_tail(&dev->list_all, &list_all_device_instances);
return 0; /* success: reference kept via unmatched get_device() */
err_put:
put_device(&dev->device);
+ dev_err(&dev->device, "Creating visor device failed. %d\n", err);
return err;
}
@@ -677,24 +686,32 @@ static int
get_vbus_header_info(struct visorchannel *chan,
struct spar_vbus_headerinfo *hdr_info)
{
- if (!SPAR_VBUS_CHANNEL_OK_CLIENT(visorchannel_get_header(chan)))
+ int err;
+
+ if (!spar_check_channel(visorchannel_get_header(chan),
+ spar_vbus_channel_protocol_uuid,
+ "vbus",
+ sizeof(struct spar_vbus_channel_protocol),
+ SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID,
+ SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE))
return -EINVAL;
- if (visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
- sizeof(*hdr_info)) < 0) {
- return -EIO;
- }
+ err = visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
+ sizeof(*hdr_info));
+ if (err < 0)
+ return err;
+
if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo))
return -EINVAL;
if (hdr_info->device_info_struct_bytes <
- sizeof(struct ultra_vbus_deviceinfo)) {
+ sizeof(struct ultra_vbus_deviceinfo))
return -EINVAL;
- }
+
return 0;
}
-/**
+/*
* write_vbus_chp_info() - write the contents of <info> to the struct
* spar_vbus_channel_protocol.chp_info
* @chan: indentifies the s-Par channel that will be updated
@@ -720,7 +737,7 @@ write_vbus_chp_info(struct visorchannel *chan,
visorchannel_write(chan, off, info, sizeof(*info));
}
-/**
+/*
* write_vbus_bus_info() - write the contents of <info> to the struct
* spar_vbus_channel_protocol.bus_info
* @chan: indentifies the s-Par channel that will be updated
@@ -746,7 +763,7 @@ write_vbus_bus_info(struct visorchannel *chan,
visorchannel_write(chan, off, info, sizeof(*info));
}
-/**
+/*
* write_vbus_dev_info() - write the contents of <info> to the struct
* spar_vbus_channel_protocol.dev_info[<devix>]
* @chan: indentifies the s-Par channel that will be updated
@@ -775,10 +792,26 @@ write_vbus_dev_info(struct visorchannel *chan,
visorchannel_write(chan, off, info, sizeof(*info));
}
-/**
+static void bus_device_info_init(
+ struct ultra_vbus_deviceinfo *bus_device_info_ptr,
+ const char *dev_type, const char *drv_name)
+{
+ memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
+ snprintf(bus_device_info_ptr->devtype,
+ sizeof(bus_device_info_ptr->devtype),
+ "%s", (dev_type) ? dev_type : "unknownType");
+ snprintf(bus_device_info_ptr->drvname,
+ sizeof(bus_device_info_ptr->drvname),
+ "%s", (drv_name) ? drv_name : "unknownDriver");
+ snprintf(bus_device_info_ptr->infostrs,
+ sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
+ utsname()->release);
+}
+
+/*
* fix_vbus_dev_info() - for a child device just created on a client bus, fill
* in information about the driver that is controlling
- * this device into the the appropriate slot within the
+ * this device into the appropriate slot within the
* vbus channel of the bus instance
* @visordev: struct visor_device for the desired device
*/
@@ -823,16 +856,12 @@ fix_vbus_dev_info(struct visor_device *visordev)
bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
- /*
- * Re-write bus+chipset info, because it is possible that this
- * was previously written by our evil counterpart, virtpci.
- */
write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
write_vbus_bus_info(bdev->visorchannel, hdr_info,
&clientbus_driverinfo);
}
-/**
+/*
* visordriver_probe_device() - handle new visor device coming online
* @xdev: struct device for the visor device being probed
*
@@ -925,10 +954,8 @@ visordriver_probe_device(struct device *xdev)
*/
int visorbus_register_visor_driver(struct visor_driver *drv)
{
- int rc = 0;
-
- if (busreg_rc < 0)
- return -ENODEV; /*can't register on a nonexistent bus*/
+ if (!initialized)
+ return -ENODEV; /* can't register on a nonexistent bus */
drv->driver.name = drv->name;
drv->driver.bus = &visorbus_type;
@@ -949,14 +976,11 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
* dev.drv = NULL
*/
- rc = driver_register(&drv->driver);
- if (rc < 0)
- driver_unregister(&drv->driver);
- return rc;
+ return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
-/**
+/*
* create_bus_instance() - create a device instance for the visor bus itself
* @dev: struct visor_device indicating the bus instance
*
@@ -970,8 +994,6 @@ create_bus_instance(struct visor_device *dev)
int err;
struct spar_vbus_headerinfo *hdr_info;
- POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
if (!hdr_info)
return -ENOMEM;
@@ -983,51 +1005,38 @@ create_bus_instance(struct visor_device *dev)
dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
visorbus_debugfs_dir);
- if (!dev->debugfs_dir) {
- err = -ENOMEM;
- goto err_hdr_info;
- }
dev->debugfs_client_bus_info =
debugfs_create_file("client_bus_info", 0440,
dev->debugfs_dir, dev,
&client_bus_info_debugfs_fops);
- if (!dev->debugfs_client_bus_info) {
- err = -ENOMEM;
+
+ dev_set_drvdata(&dev->device, dev);
+ err = get_vbus_header_info(dev->visorchannel, hdr_info);
+ if (err < 0)
goto err_debugfs_dir;
- }
- if (device_register(&dev->device) < 0) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id,
- DIAG_SEVERITY_ERR);
- err = -ENODEV;
- goto err_debugfs_created;
- }
+ err = device_register(&dev->device);
+ if (err < 0)
+ goto err_debugfs_dir;
- if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
- dev->vbus_hdr_info = (void *)hdr_info;
- write_vbus_chp_info(dev->visorchannel, hdr_info,
- &chipset_driverinfo);
- write_vbus_bus_info(dev->visorchannel, hdr_info,
- &clientbus_driverinfo);
- } else {
- kfree(hdr_info);
- }
list_add_tail(&dev->list_all, &list_all_bus_instances);
- dev_set_drvdata(&dev->device, dev);
- return 0;
-err_debugfs_created:
- debugfs_remove(dev->debugfs_client_bus_info);
+ dev->vbus_hdr_info = (void *)hdr_info;
+ write_vbus_chp_info(dev->visorchannel, hdr_info,
+ &chipset_driverinfo);
+ write_vbus_bus_info(dev->visorchannel, hdr_info,
+ &clientbus_driverinfo);
+
+ return 0;
err_debugfs_dir:
debugfs_remove_recursive(dev->debugfs_dir);
-
-err_hdr_info:
kfree(hdr_info);
+ dev_err(&dev->device, "create_bus_instance failed: %d\n", err);
return err;
}
-/**
+/*
* remove_bus_instance() - remove a device instance for the visor bus itself
* @dev: struct visor_device indentifying the bus to remove
*/
@@ -1051,30 +1060,7 @@ remove_bus_instance(struct visor_device *dev)
device_unregister(&dev->device);
}
-/**
- * create_bus_type() - create and register the one-and-only one instance of
- * the visor bus type (visorbus_type)
- * Return: 0 for success, otherwise negative errno value returned by
- * bus_register() indicating the reason for failure
- */
-static int
-create_bus_type(void)
-{
- busreg_rc = bus_register(&visorbus_type);
- return busreg_rc;
-}
-
-/**
- * remove_bus_type() - remove the one-and-only one instance of the visor bus
- * type (visorbus_type)
- */
-static void
-remove_bus_type(void)
-{
- bus_unregister(&visorbus_type);
-}
-
-/**
+/*
* remove_all_visor_devices() - remove all child visor bus device instances
*/
static void
@@ -1090,24 +1076,19 @@ remove_all_visor_devices(void)
}
}
-void
+int
chipset_bus_create(struct visor_device *dev)
{
- int rc;
- u32 bus_no = dev->chipset_bus_no;
+ int err;
- POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
- rc = create_bus_instance(dev);
- POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+ err = create_bus_instance(dev);
- if (rc < 0)
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
- else
- POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no,
- DIAG_SEVERITY_PRINT);
+ if (err < 0)
+ return err;
+
+ bus_create_response(dev, err);
- bus_create_response(dev, rc);
+ return 0;
}
void
@@ -1117,25 +1098,18 @@ chipset_bus_destroy(struct visor_device *dev)
bus_destroy_response(dev, 0);
}
-void
+int
chipset_device_create(struct visor_device *dev_info)
{
- int rc;
- u32 bus_no = dev_info->chipset_bus_no;
- u32 dev_no = dev_info->chipset_dev_no;
+ int err;
- POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
+ err = create_visor_device(dev_info);
+ if (err < 0)
+ return err;
- rc = create_visor_device(dev_info);
- device_create_response(dev_info, rc);
+ device_create_response(dev_info, err);
- if (rc < 0)
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
- else
- POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
+ return 0;
}
void
@@ -1146,7 +1120,7 @@ chipset_device_destroy(struct visor_device *dev_info)
device_destroy_response(dev_info, 0);
}
-/**
+/*
* pause_state_change_complete() - the callback function to be called by a
* visorbus function driver when a
* pending "pause device" operation has
@@ -1166,7 +1140,7 @@ pause_state_change_complete(struct visor_device *dev, int status)
device_pause_response(dev, status);
}
-/**
+/*
* resume_state_change_complete() - the callback function to be called by a
* visorbus function driver when a
* pending "resume device" operation has
@@ -1191,7 +1165,7 @@ resume_state_change_complete(struct visor_device *dev, int status)
device_resume_response(dev, status);
}
-/**
+/*
* initiate_chipset_device_pause_resume() - start a pause or resume operation
* for a visor device
* @dev: struct visor_device identifying the device being paused or resumed
@@ -1202,70 +1176,38 @@ resume_state_change_complete(struct visor_device *dev, int status)
* via a callback function; see pause_state_change_complete() and
* resume_state_change_complete().
*/
-static void
+static int
initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
{
- int rc;
+ int err;
struct visor_driver *drv = NULL;
- void (*notify_func)(struct visor_device *dev, int response) = NULL;
-
- if (is_pause)
- notify_func = device_pause_response;
- else
- notify_func = device_resume_response;
- if (!notify_func)
- return;
drv = to_visor_driver(dev->device.driver);
- if (!drv) {
- (*notify_func)(dev, -ENODEV);
- return;
- }
+ if (!drv)
+ return -ENODEV;
- if (dev->pausing || dev->resuming) {
- (*notify_func)(dev, -EBUSY);
- return;
- }
+ if (dev->pausing || dev->resuming)
+ return -EBUSY;
- /*
- * Note that even though both drv->pause() and drv->resume
- * specify a callback function, it is NOT necessary for us to
- * increment our local module usage count. Reason is, there
- * is already a linkage dependency between child function
- * drivers and visorbus, so it is already IMPOSSIBLE to unload
- * visorbus while child function drivers are still running.
- */
if (is_pause) {
- if (!drv->pause) {
- (*notify_func)(dev, -EINVAL);
- return;
- }
+ if (!drv->pause)
+ return -EINVAL;
dev->pausing = true;
- rc = drv->pause(dev, pause_state_change_complete);
+ err = drv->pause(dev, pause_state_change_complete);
} else {
- /* This should be done at BUS resume time, but an
- * existing problem prevents us from ever getting a bus
- * resume... This hack would fail to work should we
- * ever have a bus that contains NO devices, since we
- * would never even get here in that case.
+ /* The vbus_dev_info structure in the channel was been
+ * cleared, make sure it is valid.
*/
fix_vbus_dev_info(dev);
- if (!drv->resume) {
- (*notify_func)(dev, -EINVAL);
- return;
- }
+ if (!drv->resume)
+ return -EINVAL;
dev->resuming = true;
- rc = drv->resume(dev, resume_state_change_complete);
- }
- if (rc < 0) {
- if (is_pause)
- dev->pausing = false;
- else
- dev->resuming = false;
- (*notify_func)(dev, -EINVAL);
+ err = drv->resume(dev, resume_state_change_complete);
}
+
+ return err;
}
/**
@@ -1276,10 +1218,19 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
* that device. Success/failure result is returned asynchronously
* via a callback function; see pause_state_change_complete().
*/
-void
+int
chipset_device_pause(struct visor_device *dev_info)
{
- initiate_chipset_device_pause_resume(dev_info, true);
+ int err;
+
+ err = initiate_chipset_device_pause_resume(dev_info, true);
+
+ if (err < 0) {
+ dev_info->pausing = false;
+ return err;
+ }
+
+ return 0;
}
/**
@@ -1290,10 +1241,19 @@ chipset_device_pause(struct visor_device *dev_info)
* that device. Success/failure result is returned asynchronously
* via a callback function; see resume_state_change_complete().
*/
-void
+int
chipset_device_resume(struct visor_device *dev_info)
{
- initiate_chipset_device_pause_resume(dev_info, false);
+ int err;
+
+ err = initiate_chipset_device_pause_resume(dev_info, false);
+
+ if (err < 0) {
+ dev_info->resuming = false;
+ return err;
+ }
+
+ return 0;
}
int
@@ -1301,27 +1261,21 @@ visorbus_init(void)
{
int err;
- POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
if (!visorbus_debugfs_dir)
return -ENOMEM;
bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
- err = create_bus_type();
- if (err < 0) {
- POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR);
- goto error;
- }
+ err = bus_register(&visorbus_type);
+ if (err < 0)
+ return err;
+
+ initialized = true;
bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
return 0;
-
-error:
- POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
- return err;
}
void
@@ -1337,14 +1291,8 @@ visorbus_exit(void)
list_all);
remove_bus_instance(dev);
}
- remove_bus_type();
+
+ bus_unregister(&visorbus_type);
+ initialized = false;
debugfs_remove_recursive(visorbus_debugfs_dir);
}
-
-module_param_named(forcematch, visorbus_forcematch, int, 0444);
-MODULE_PARM_DESC(visorbus_forcematch,
- "1 to force a successful dev <--> drv match");
-
-module_param_named(forcenomatch, visorbus_forcenomatch, int, 0444);
-MODULE_PARM_DESC(visorbus_forcenomatch,
- "1 to force an UNsuccessful dev <--> drv match");
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 49bec1763e33..9f030b1f589f 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -27,28 +27,12 @@
* command line
*/
-static inline void bus_device_info_init(
- struct ultra_vbus_deviceinfo *bus_device_info_ptr,
- const char *dev_type, const char *drv_name)
-{
- memset(bus_device_info_ptr, 0, sizeof(struct ultra_vbus_deviceinfo));
- snprintf(bus_device_info_ptr->devtype,
- sizeof(bus_device_info_ptr->devtype),
- "%s", (dev_type) ? dev_type : "unknownType");
- snprintf(bus_device_info_ptr->drvname,
- sizeof(bus_device_info_ptr->drvname),
- "%s", (drv_name) ? drv_name : "unknownDriver");
- snprintf(bus_device_info_ptr->infostrs,
- sizeof(bus_device_info_ptr->infostrs), "kernel ver. %s",
- utsname()->release);
-}
-
-void chipset_bus_create(struct visor_device *bus_info);
+int chipset_bus_create(struct visor_device *bus_info);
void chipset_bus_destroy(struct visor_device *bus_info);
-void chipset_device_create(struct visor_device *dev_info);
+int chipset_device_create(struct visor_device *dev_info);
void chipset_device_destroy(struct visor_device *dev_info);
-void chipset_device_pause(struct visor_device *dev_info);
-void chipset_device_resume(struct visor_device *dev_info);
+int chipset_device_pause(struct visor_device *dev_info);
+int chipset_device_resume(struct visor_device *dev_info);
void bus_create_response(struct visor_device *p, int response);
void bus_destroy_response(struct visor_device *p, int response);
@@ -81,5 +65,5 @@ u64 visorchannel_get_clientpartition(struct visorchannel *channel);
int visorchannel_set_clientpartition(struct visorchannel *channel,
u64 partition_handle);
char *visorchannel_uuid_id(uuid_le *guid, char *s);
-void __iomem *visorchannel_get_header(struct visorchannel *channel);
+void *visorchannel_get_header(struct visorchannel *channel);
#endif
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index e91febcc6c1e..9e1cea22ce68 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -29,8 +29,9 @@
#define MYDRVNAME "visorchannel"
#define SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID \
- UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \
+ UUID_LE(0x3cd6e705, 0xd6a2, 0x4aa5, \
0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2)
+
static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID;
struct visorchannel {
@@ -153,10 +154,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
return 0;
}
-void __iomem *
+void *
visorchannel_get_header(struct visorchannel *channel)
{
- return (void __iomem *)&channel->chan_hdr;
+ return &channel->chan_hdr;
}
/*
@@ -173,17 +174,17 @@ visorchannel_get_header(struct visorchannel *channel)
*/
#define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \
(SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
- ((slot) * (sig_hdr)->signal_size))
+ ((slot) * (sig_hdr)->signal_size))
/*
* Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
* into host memory
*/
-#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
- visorchannel_write(channel, \
- SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +\
+#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
+ visorchannel_write(channel, \
+ SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) + \
offsetof(struct signal_queue_header, FIELD), \
- &((sig_hdr)->FIELD), \
+ &((sig_hdr)->FIELD), \
sizeof((sig_hdr)->FIELD))
static int
@@ -199,7 +200,7 @@ sig_read_header(struct visorchannel *channel, u32 queue,
sig_hdr, sizeof(struct signal_queue_header));
}
-static inline int
+static int
sig_read_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
@@ -210,7 +211,7 @@ sig_read_data(struct visorchannel *channel, u32 queue,
data, sig_hdr->signal_size);
}
-static inline int
+static int
sig_write_data(struct visorchannel *channel, u32 queue,
struct signal_queue_header *sig_hdr, u32 slot, void *data)
{
@@ -286,16 +287,6 @@ visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
}
EXPORT_SYMBOL_GPL(visorchannel_signalremove);
-/**
- * visorchannel_signalempty() - checks if the designated channel/queue
- * contains any messages
- * @channel: the channel to query
- * @queue: the queue in the channel to query
- *
- * Return: boolean indicating whether any messages in the designated
- * channel/queue are present
- */
-
static bool
queue_empty(struct visorchannel *channel, u32 queue)
{
@@ -307,6 +298,15 @@ queue_empty(struct visorchannel *channel, u32 queue)
return (sig_hdr.head == sig_hdr.tail);
}
+/**
+ * visorchannel_signalempty() - checks if the designated channel/queue
+ * contains any messages
+ * @channel: the channel to query
+ * @queue: the queue in the channel to query
+ *
+ * Return: boolean indicating whether any messages in the designated
+ * channel/queue are present
+ */
bool
visorchannel_signalempty(struct visorchannel *channel, u32 queue)
{
@@ -328,27 +328,24 @@ static int
signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
{
struct signal_queue_header sig_hdr;
- int error;
+ int err;
- error = sig_read_header(channel, queue, &sig_hdr);
- if (error)
- return error;
+ err = sig_read_header(channel, queue, &sig_hdr);
+ if (err)
+ return err;
sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
if (sig_hdr.head == sig_hdr.tail) {
sig_hdr.num_overflows++;
- visorchannel_write(channel,
- SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +
- offsetof(struct signal_queue_header,
- num_overflows),
- &sig_hdr.num_overflows,
- sizeof(sig_hdr.num_overflows));
+ err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows);
+ if (err)
+ return err;
return -EIO;
}
- error = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
- if (error)
- return error;
+ err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
+ if (err)
+ return err;
sig_hdr.num_sent++;
@@ -358,17 +355,17 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
*/
mb(); /* required for channel synch */
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
- if (error)
- return error;
- error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
- if (error)
- return error;
+ err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
+ if (err)
+ return err;
+ err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
+ if (err)
+ return err;
return 0;
}
-/**
+/*
* visorchannel_create_guts() - creates the struct visorchannel abstraction
* for a data area in memory, but does NOT modify
* this data area
@@ -418,12 +415,9 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
* release later on.
*/
channel->requested = request_mem_region(physaddr, size, MYDRVNAME);
- if (!channel->requested) {
- if (uuid_le_cmp(guid, spar_video_guid)) {
- /* Not the video channel we care about this */
- goto err_destroy_channel;
- }
- }
+ if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
+ /* we only care about errors if this is not the video channel */
+ goto err_destroy_channel;
channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
@@ -451,12 +445,9 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
channel->mapped = NULL;
channel->requested = request_mem_region(channel->physaddr,
channel_bytes, MYDRVNAME);
- if (!channel->requested) {
- if (uuid_le_cmp(guid, spar_video_guid)) {
- /* Different we care about this */
- goto err_destroy_channel;
- }
- }
+ if (!channel->requested && uuid_le_cmp(guid, spar_video_guid))
+ /* we only care about errors if this is not the video channel */
+ goto err_destroy_channel;
channel->mapped = memremap(channel->physaddr, channel_bytes,
MEMREMAP_WB);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 97778d733e1e..4cfd0fae9bd5 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -15,13 +15,11 @@
*/
#include <linux/acpi.h>
-#include <linux/cdev.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/nls.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
#include <linux/uuid.h>
#include <linux/crash_dump.h>
@@ -31,13 +29,11 @@
#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
-#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
+#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
-#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
-
#define UNISYS_SPAR_LEAF_ID 0x40000000
/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
@@ -46,35 +42,11 @@
#define UNISYS_SPAR_ID_EDX 0x34367261
/*
- * Module parameters
- */
-static int visorchipset_major;
-
-static int
-visorchipset_open(struct inode *inode, struct file *file)
-{
- unsigned int minor_number = iminor(inode);
-
- if (minor_number)
- return -ENODEV;
- return 0;
-}
-
-static int
-visorchipset_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/*
* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
* we switch to slow polling mode. As soon as we get a controlvm
* message, we switch back to fast polling mode.
*/
#define MIN_IDLE_SECONDS 10
-static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-/* when we got our last controlvm message */
-static unsigned long most_recent_message_jiffies;
struct parser_context {
unsigned long allocbytes;
@@ -85,22 +57,33 @@ struct parser_context {
char data[0];
};
-static struct delayed_work periodic_controlvm_work;
-
-static struct cdev file_cdev;
-static struct visorchannel **file_controlvm_channel;
+struct vmcall_controlvm_addr {
+ struct vmcall_io_controlvm_addr_params params;
+ int err;
+ u64 physaddr;
+};
-static struct visorchannel *controlvm_channel;
-static unsigned long controlvm_payload_bytes_buffered;
+struct visorchipset_device {
+ struct acpi_device *acpi_device;
+ unsigned long poll_jiffies;
+ /* when we got our last controlvm message */
+ unsigned long most_recent_message_jiffies;
+ struct delayed_work periodic_controlvm_work;
+ struct visorchannel *controlvm_channel;
+ unsigned long controlvm_payload_bytes_buffered;
+ /*
+ * The following variables are used to handle the scenario where we are
+ * unable to offload the payload from a controlvm message due to memory
+ * requirements. In this scenario, we simply stash the controlvm
+ * message, then attempt to process it again the next time
+ * controlvm_periodic_work() runs.
+ */
+ struct controlvm_message controlvm_pending_msg;
+ bool controlvm_pending_msg_valid;
+ struct vmcall_controlvm_addr controlvm_addr;
+};
-/*
- * The following globals are used to handle the scenario where we are unable to
- * offload the payload from a controlvm message due to memory requirements. In
- * this scenario, we simply stash the controlvm message, then attempt to
- * process it again the next time controlvm_periodic_work() runs.
- */
-static struct controlvm_message controlvm_pending_msg;
-static bool controlvm_pending_msg_valid;
+static struct visorchipset_device *chipset_dev;
struct parahotplug_request {
struct list_head list;
@@ -109,19 +92,21 @@ struct parahotplug_request {
struct controlvm_message msg;
};
-/* info for /dev/visorchipset */
-static dev_t major_dev = -1; /*< indicates major num for device */
-
/* prototypes for attributes */
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u8 tool_action = 0;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
+ if (err)
+ return err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action), &tool_action, sizeof(u8));
return sprintf(buf, "%u\n", tool_action);
}
@@ -130,19 +115,19 @@ static ssize_t toolaction_store(struct device *dev,
const char *buf, size_t count)
{
u8 tool_action;
- int ret;
+ int err;
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
tool_action),
&tool_action, sizeof(u8));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(toolaction);
@@ -152,11 +137,16 @@ static ssize_t boottotool_show(struct device *dev,
char *buf)
{
struct efi_spar_indication efi_spar_indication;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind),
+ &efi_spar_indication,
+ sizeof(struct efi_spar_indication));
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- efi_spar_ind), &efi_spar_indication,
- sizeof(struct efi_spar_indication));
+ if (err)
+ return err;
return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
}
@@ -164,21 +154,21 @@ static ssize_t boottotool_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int val, ret;
+ int val, err;
struct efi_spar_indication efi_spar_indication;
if (kstrtoint(buf, 10, &val))
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
efi_spar_ind), &(efi_spar_indication),
sizeof(struct efi_spar_indication));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(boottotool);
@@ -187,11 +177,14 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 error = 0;
+ int err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_error),
- &error, sizeof(u32));
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
+ if (err)
+ return err;
return sprintf(buf, "%i\n", error);
}
@@ -199,18 +192,18 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 error;
- int ret;
+ int err;
if (kstrtou32(buf, 10, &error))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_error),
&error, sizeof(u32));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(error);
@@ -219,12 +212,16 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 text_id = 0;
+ int err;
+
+ err = visorchannel_read
+ (chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
+ if (err)
+ return err;
- visorchannel_read
- (controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
return sprintf(buf, "%i\n", text_id);
}
@@ -232,18 +229,18 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 text_id;
- int ret;
+ int err;
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_text_id),
&text_id, sizeof(u32));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(textid);
@@ -252,11 +249,15 @@ static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u16 remaining_steps = 0;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
+ if (err)
+ return err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
return sprintf(buf, "%hu\n", remaining_steps);
}
@@ -265,18 +266,18 @@ static ssize_t remaining_steps_store(struct device *dev,
const char *buf, size_t count)
{
u16 remaining_steps;
- int ret;
+ int err;
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(remaining_steps);
@@ -292,7 +293,7 @@ parser_id_get(struct parser_context *ctx)
static void parser_done(struct parser_context *ctx)
{
- controlvm_payload_bytes_buffered -= ctx->param_bytes;
+ chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
kfree(ctx);
}
@@ -318,7 +319,7 @@ parser_string_get(struct parser_context *ctx)
}
if (value_length < 0) /* '\0' was not included in the length */
value_length = nscan;
- value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
+ value = kmalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
if (value_length > 0)
@@ -405,7 +406,7 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -417,14 +418,12 @@ chipset_init(struct controlvm_message *inmsg)
int rc = CONTROLVM_RESP_SUCCESS;
int res = 0;
- POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ALREADY_DONE;
res = -EIO;
goto out_respond;
}
chipset_inited = 1;
- POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
/*
* Set features to indicate we support parahotplug (if Command
@@ -447,7 +446,8 @@ out_respond:
}
static int
-controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
+controlvm_respond(struct controlvm_message_header *msg_hdr, int response,
+ struct spar_segment_state *state)
{
struct controlvm_message outmsg;
@@ -455,20 +455,12 @@ controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
- return visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int controlvm_respond_physdev_changestate(
- struct controlvm_message_header *msg_hdr, int response,
- struct spar_segment_state state)
-{
- struct controlvm_message outmsg;
+ if (state) {
+ outmsg.cmd.device_change_state.state = *state;
+ outmsg.cmd.device_change_state.flags.phys_device = 1;
+ }
- controlvm_init_response(&outmsg, msg_hdr, response);
- outmsg.cmd.device_change_state.state = state;
- outmsg.cmd.device_change_state.flags.phys_device = 1;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -484,68 +476,68 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
u16 local_crash_msg_count;
int err;
- err = visorchannel_read(controlvm_channel,
+ err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16));
if (err) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read message count\n");
return err;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
- local_crash_msg_count,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "invalid number of messages\n");
return -EIO;
}
- err = visorchannel_read(controlvm_channel,
+ err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32));
if (err) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read offset\n");
return err;
}
switch (typ) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
- err = visorchannel_write(controlvm_channel,
+ err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
if (err) {
- POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to write dev msg\n");
return err;
}
break;
case CRASH_BUS:
- err = visorchannel_write(controlvm_channel,
+ err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
if (err) {
- POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to write bus msg\n");
return err;
}
break;
default:
- pr_info("Invalid crash_obj_type\n");
+ dev_err(&chipset_dev->acpi_device->dev,
+ "Invalid crash_obj_type\n");
break;
}
return 0;
}
static int
-bus_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
+controlvm_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
+ int response)
{
if (!pending_msg_hdr)
return -EIO;
@@ -553,7 +545,7 @@ bus_responder(enum controlvm_id cmd_id,
if (pending_msg_hdr->id != (u32)cmd_id)
return -EINVAL;
- return controlvm_respond(pending_msg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response, NULL);
}
static int
@@ -576,25 +568,11 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
static int
-device_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
-{
- if (!pending_msg_hdr)
- return -EIO;
-
- if (pending_msg_hdr->id != (u32)cmd_id)
- return -EINVAL;
-
- return controlvm_respond(pending_msg_hdr, response);
-}
-
-static int
bus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
@@ -606,16 +584,14 @@ bus_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed bus_create: already exists\n");
err = -EEXIST;
goto err_respond;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -624,8 +600,6 @@ bus_create(struct controlvm_message *inmsg)
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
-
if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
err = save_crash_message(inmsg, CRASH_BUS);
if (err)
@@ -636,9 +610,6 @@ bus_create(struct controlvm_message *inmsg)
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
GFP_KERNEL);
if (!pmsg_hdr) {
- POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_free_bus_info;
}
@@ -654,19 +625,22 @@ bus_create(struct controlvm_message *inmsg)
cmd->create_bus.bus_data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_free_pending_msg;
}
bus_info->visorchannel = visorchannel;
/* Response will be handled by chipset_bus_create */
- chipset_bus_create(bus_info);
+ err = chipset_bus_create(bus_info);
+ /* If error chipset_bus_create didn't respond, need to respond here */
+ if (err)
+ goto err_destroy_channel;
- POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
return 0;
+err_destroy_channel:
+ visorchannel_destroy(visorchannel);
+
err_free_pending_msg:
kfree(bus_info->pending_msg_hdr);
@@ -675,7 +649,7 @@ err_free_bus_info:
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -705,9 +679,6 @@ bus_destroy(struct controlvm_message *inmsg)
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
- POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -723,7 +694,7 @@ bus_destroy(struct controlvm_message *inmsg)
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -737,23 +708,14 @@ bus_configure(struct controlvm_message *inmsg,
int err = 0;
bus_no = cmd->configure_bus.bus_no;
- POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
- DIAG_SEVERITY_PRINT);
-
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
} else if (bus_info->state.created == 0) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
} else if (bus_info->pending_msg_hdr) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EIO;
goto err_respond;
}
@@ -769,16 +731,15 @@ bus_configure(struct controlvm_message *inmsg,
bus_info->name = parser_name_get(parser_ctx);
}
- POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
- DIAG_SEVERITY_PRINT);
-
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return 0;
err_respond:
+ dev_err(&chipset_dev->acpi_device->dev,
+ "bus_configured exited with err: %d\n", err);
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -796,31 +757,29 @@ my_device_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to get bus by id: %d\n", bus_no);
err = -ENODEV;
goto err_respond;
}
if (bus_info->state.created == 0) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "bus not created, id: %d\n", bus_no);
err = -EINVAL;
goto err_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (dev_info && (dev_info->state.created == 1)) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to get bus by id: %d/%d\n", bus_no, dev_no);
err = -EEXIST;
goto err_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -832,9 +791,6 @@ my_device_create(struct controlvm_message *inmsg)
/* not sure where the best place to set the 'parent' */
dev_info->device.parent = &bus_info->device;
- POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
-
visorchannel =
visorchannel_create_with_lock(cmd->create_device.channel_addr,
cmd->create_device.channel_bytes,
@@ -842,8 +798,9 @@ my_device_create(struct controlvm_message *inmsg)
cmd->create_device.data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to create visorchannel: %d/%d\n",
+ bus_no, dev_no);
err = -ENOMEM;
goto err_free_dev_info;
}
@@ -853,14 +810,14 @@ my_device_create(struct controlvm_message *inmsg)
spar_vhba_channel_protocol_uuid) == 0) {
err = save_crash_message(inmsg, CRASH_DEV);
if (err)
- goto err_free_dev_info;
+ goto err_destroy_visorchannel;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
- goto err_free_dev_info;
+ goto err_destroy_visorchannel;
}
memcpy(pmsg_hdr, &inmsg->hdr,
@@ -868,17 +825,21 @@ my_device_create(struct controlvm_message *inmsg)
dev_info->pending_msg_hdr = pmsg_hdr;
}
/* Chipset_device_create will send response */
- chipset_device_create(dev_info);
- POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
+ err = chipset_device_create(dev_info);
+ if (err)
+ goto err_destroy_visorchannel;
+
return 0;
+err_destroy_visorchannel:
+ visorchannel_destroy(visorchannel);
+
err_free_dev_info:
kfree(dev_info);
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -891,18 +852,14 @@ my_device_changestate(struct controlvm_message *inmsg)
u32 dev_no = cmd->device_change_state.dev_no;
struct spar_segment_state state = cmd->device_change_state.state;
struct visor_device *dev_info;
- int err;
+ int err = 0;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
- POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENODEV;
goto err_respond;
}
if (dev_info->state.created == 0) {
- POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
}
@@ -926,7 +883,7 @@ my_device_changestate(struct controlvm_message *inmsg)
if (state.alive == segment_state_running.alive &&
state.operating == segment_state_running.operating)
/* Response will be sent from chipset_device_resume */
- chipset_device_resume(dev_info);
+ err = chipset_device_resume(dev_info);
/* ServerNotReady / ServerLost / SegmentStateStandby */
else if (state.alive == segment_state_standby.alive &&
state.operating == segment_state_standby.operating)
@@ -934,12 +891,16 @@ my_device_changestate(struct controlvm_message *inmsg)
* technically this is standby case where server is lost.
* Response will be sent from chipset_device_pause.
*/
- chipset_device_pause(dev_info);
+ err = chipset_device_pause(dev_info);
+ if (err)
+ goto err_respond;
+
return 0;
err_respond:
+ dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -985,7 +946,7 @@ my_device_destroy(struct controlvm_message *inmsg)
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -1004,7 +965,7 @@ err_respond:
#define PARAHOTPLUG_TIMEOUT_MS 2000
-/**
+/*
* parahotplug_next_id() - generate unique int to match an outstanding
* CONTROLVM message with a udev script /sys
* response
@@ -1019,7 +980,7 @@ parahotplug_next_id(void)
return atomic_inc_return(&id);
}
-/**
+/*
* parahotplug_next_expiration() - returns the time (in jiffies) when a
* CONTROLVM message on the list should expire
* -- PARAHOTPLUG_TIMEOUT_MS in the future
@@ -1032,7 +993,7 @@ parahotplug_next_expiration(void)
return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
}
-/**
+/*
* parahotplug_request_create() - create a parahotplug_request, which is
* basically a wrapper for a CONTROLVM_MESSAGE
* that we can stick on a list
@@ -1045,7 +1006,7 @@ parahotplug_request_create(struct controlvm_message *msg)
{
struct parahotplug_request *req;
- req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
@@ -1056,7 +1017,7 @@ parahotplug_request_create(struct controlvm_message *msg)
return req;
}
-/**
+/*
* parahotplug_request_destroy() - free a parahotplug_request
* @req: the request to deallocate
*/
@@ -1069,7 +1030,7 @@ parahotplug_request_destroy(struct parahotplug_request *req)
static LIST_HEAD(parahotplug_request_list);
static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
-/**
+/*
* parahotplug_request_complete() - mark request as complete
* @id: the id of the request
* @active: indicates whether the request is assigned to active partition
@@ -1101,9 +1062,9 @@ parahotplug_request_complete(int id, u16 active)
spin_unlock(&parahotplug_request_list_lock);
req->msg.cmd.device_change_state.state.active = active;
if (req->msg.hdr.flags.response_expected)
- controlvm_respond_physdev_changestate(
- &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- req->msg.cmd.device_change_state.state);
+ controlvm_respond(
+ &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+ &req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
@@ -1113,7 +1074,7 @@ parahotplug_request_complete(int id, u16 active)
return -EINVAL;
}
-/**
+/*
* devicedisabled_store() - disables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
@@ -1143,7 +1104,7 @@ static ssize_t devicedisabled_store(struct device *dev,
}
static DEVICE_ATTR_WO(devicedisabled);
-/**
+/*
* deviceenabled_store() - enables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
@@ -1201,26 +1162,14 @@ static const struct attribute_group *visorchipset_dev_groups[] = {
NULL
};
-static void visorchipset_dev_release(struct device *dev)
-{
-}
-
-/* /sys/devices/platform/visorchipset */
-static struct platform_device visorchipset_platform_device = {
- .name = "visorchipset",
- .id = -1,
- .dev.groups = visorchipset_dev_groups,
- .dev.release = visorchipset_dev_release,
-};
-
-/**
+/*
* parahotplug_request_kickoff() - initiate parahotplug request
* @req: the request to initiate
*
* Cause uevent to run the user level script to do the disable/enable specified
* in the parahotplug_request.
*/
-static void
+static int
parahotplug_request_kickoff(struct parahotplug_request *req)
{
struct controlvm_message_packet *cmd = &req->msg.cmd;
@@ -1241,56 +1190,59 @@ parahotplug_request_kickoff(struct parahotplug_request *req)
sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
cmd->device_change_state.dev_no & 0x7);
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
+ return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_CHANGE, envp);
}
-/**
+/*
* parahotplug_process_message() - enables or disables a PCI device by kicking
* off a udev script
* @inmsg: the message indicating whether to enable or disable
*/
-static void
+static int
parahotplug_process_message(struct controlvm_message *inmsg)
{
struct parahotplug_request *req;
+ int err;
req = parahotplug_request_create(inmsg);
if (!req)
- return;
+ return -ENOMEM;
+ /*
+ * For enable messages, just respond with success right away, we don't
+ * need to wait to see if the enable was successful.
+ */
if (inmsg->cmd.device_change_state.state.active) {
- /*
- * For enable messages, just respond with success
- * right away. This is a bit of a hack, but there are
- * issues with the early enable messages we get (with
- * either the udev script not detecting that the device
- * is up, or not getting called at all). Fortunately
- * the messages that get lost don't matter anyway, as
- *
- * devices are automatically enabled at
- * initialization.
- */
- parahotplug_request_kickoff(req);
- controlvm_respond_physdev_changestate
- (&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.device_change_state.state);
+ err = parahotplug_request_kickoff(req);
+ if (err)
+ goto err_respond;
+ controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
+ &inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
- } else {
- /*
- * For disable messages, add the request to the
- * request list before kicking off the udev script. It
- * won't get responded to until the script has
- * indicated it's done.
- */
- spin_lock(&parahotplug_request_list_lock);
- list_add_tail(&req->list, &parahotplug_request_list);
- spin_unlock(&parahotplug_request_list_lock);
-
- parahotplug_request_kickoff(req);
+ return 0;
}
+
+ /*
+ * For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
+ spin_lock(&parahotplug_request_list_lock);
+ list_add_tail(&req->list, &parahotplug_request_list);
+ spin_unlock(&parahotplug_request_list_lock);
+
+ err = parahotplug_request_kickoff(req);
+ if (err)
+ goto err_respond;
+ return 0;
+
+err_respond:
+ controlvm_respond(&inmsg->hdr, err,
+ &inmsg->cmd.device_change_state.state);
+ return err;
}
/*
@@ -1303,12 +1255,15 @@ parahotplug_process_message(struct controlvm_message *inmsg)
static int
chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ int res;
+
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_ONLINE);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
/*
@@ -1323,15 +1278,16 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
{
char env_selftest[20];
char *envp[] = { env_selftest, NULL };
+ int res;
sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
+ res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_CHANGE, envp);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
/*
@@ -1344,28 +1300,62 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
static int
chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ int res;
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_OFFLINE);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
-static inline unsigned int
-issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+static int unisys_vmcall(unsigned long tuple, unsigned long param)
{
- struct vmcall_io_controlvm_addr_params params;
- int result = VMCALL_SUCCESS;
- u64 physaddr;
+ int result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ unsigned long reg_ebx;
+ unsigned long reg_ecx;
+
+ reg_ebx = param & 0xFFFFFFFF;
+ reg_ecx = param >> 32;
+
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -EPERM;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+
+ if (result)
+ goto error;
- physaddr = virt_to_phys(&params);
- ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
- if (VMCALL_SUCCESSFUL(result)) {
- *control_addr = params.address;
- *control_bytes = params.channel_bytes;
+ return 0;
+
+error: /* Need to convert from VMCALL error codes to Linux */
+ switch (result) {
+ case VMCALL_RESULT_INVALID_PARAM:
+ return -EINVAL;
+ case VMCALL_RESULT_DATA_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EFAULT;
}
- return result;
+}
+static unsigned int
+issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+{
+ chipset_dev->controlvm_addr.physaddr = virt_to_phys(
+ &chipset_dev->controlvm_addr.params);
+ chipset_dev->controlvm_addr.err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
+ chipset_dev->controlvm_addr.physaddr);
+ if (chipset_dev->controlvm_addr.err)
+ return chipset_dev->controlvm_addr.err;
+
+ *control_addr = chipset_dev->controlvm_addr.params.address;
+ *control_bytes = chipset_dev->controlvm_addr.params.channel_bytes;
+
+ return 0;
}
static u64 controlvm_get_channel_address(void)
@@ -1373,7 +1363,7 @@ static u64 controlvm_get_channel_address(void)
u64 addr = 0;
u32 size = 0;
- if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
+ if (issue_vmcall_io_controlvm_addr(&addr, &size))
return 0;
return addr;
@@ -1388,8 +1378,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
/* send init chipset msg */
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
msg.cmd.init_chipset.bus_count = 23;
@@ -1398,71 +1386,67 @@ setup_crash_devices_work_queue(struct work_struct *work)
chipset_init(&msg);
/* get saved message count */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
- local_crash_msg_count,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "invalid count\n");
return;
}
/* get saved crash message offset */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* read create device message for storage bus offset */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* read create device message for storage device */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
sizeof(struct controlvm_message),
&local_crash_dev_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* reuse IOVM create bus message */
- if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
- bus_create(&local_crash_bus_msg);
- } else {
- POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
+ dev_err(&chipset_dev->acpi_device->dev,
+ "no valid create_bus message\n");
return;
}
+ bus_create(&local_crash_bus_msg);
/* reuse create device message for storage device */
- if (local_crash_dev_msg.cmd.create_device.channel_addr) {
- my_device_create(&local_crash_dev_msg);
- } else {
- POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
+ dev_err(&chipset_dev->acpi_device->dev,
+ "no valid create_device message\n");
return;
}
- POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ my_device_create(&local_crash_dev_msg);
}
void
@@ -1471,8 +1455,8 @@ bus_create_response(struct visor_device *bus_info, int response)
if (response >= 0)
bus_info->state.created = 1;
- bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
+ response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
@@ -1481,8 +1465,8 @@ bus_create_response(struct visor_device *bus_info, int response)
void
bus_destroy_response(struct visor_device *bus_info, int response)
{
- bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
+ response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
@@ -1494,8 +1478,8 @@ device_create_response(struct visor_device *dev_info, int response)
if (response >= 0)
dev_info->state.created = 1;
- device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
+ response);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
@@ -1504,8 +1488,8 @@ device_create_response(struct visor_device *dev_info, int response)
void
device_destroy_response(struct visor_device *dev_info, int response)
{
- device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
+ response);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
@@ -1534,136 +1518,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static int
-visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned long physaddr = 0;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- u64 addr = 0;
-
- /* sv_enable_dfp(); */
- if (offset & (PAGE_SIZE - 1))
- return -ENXIO; /* need aligned offsets */
-
- switch (offset) {
- case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
- vma->vm_flags |= VM_IO;
- if (!*file_controlvm_channel)
- return -ENXIO;
-
- visorchannel_read
- (*file_controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- gp_control_channel),
- &addr, sizeof(addr));
- if (!addr)
- return -ENXIO;
-
- physaddr = (unsigned long)addr;
- if (remap_pfn_range(vma, vma->vm_start,
- physaddr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- /*pgprot_noncached */
- (vma->vm_page_prot))) {
- return -EAGAIN;
- }
- break;
- default:
- return -ENXIO;
- }
- return 0;
-}
-
-static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
-{
- u64 result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
- result);
- return result;
-}
-
-static inline int issue_vmcall_update_physical_time(u64 adjustment)
-{
- int result = VMCALL_SUCCESS;
-
- ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
- return result;
-}
-
-static long visorchipset_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- u64 adjustment;
- s64 vrtc_offset;
-
- switch (cmd) {
- case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
- /* get the physical rtc offset */
- vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
- if (copy_to_user((void __user *)arg, &vrtc_offset,
- sizeof(vrtc_offset))) {
- return -EFAULT;
- }
- return 0;
- case VMCALL_UPDATE_PHYSICAL_TIME:
- if (copy_from_user(&adjustment, (void __user *)arg,
- sizeof(adjustment))) {
- return -EFAULT;
- }
- return issue_vmcall_update_physical_time(adjustment);
- default:
- return -EFAULT;
- }
-}
-
-static const struct file_operations visorchipset_fops = {
- .owner = THIS_MODULE,
- .open = visorchipset_open,
- .read = NULL,
- .write = NULL,
- .unlocked_ioctl = visorchipset_ioctl,
- .release = visorchipset_release,
- .mmap = visorchipset_mmap,
-};
-
-static int
-visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
-{
- int rc = 0;
-
- file_controlvm_channel = controlvm_channel;
- cdev_init(&file_cdev, &visorchipset_fops);
- file_cdev.owner = THIS_MODULE;
- if (MAJOR(major_dev) == 0) {
- rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
- /* dynamic major device number registration required */
- if (rc < 0)
- return rc;
- } else {
- /* static major device number registration required */
- rc = register_chrdev_region(major_dev, 1, "visorchipset");
- if (rc < 0)
- return rc;
- }
- rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
- if (rc < 0) {
- unregister_chrdev_region(major_dev, 1);
- return rc;
- }
- return 0;
-}
-
-static void
-visorchipset_file_cleanup(dev_t major_dev)
-{
- if (file_cdev.ops)
- cdev_del(&file_cdev);
- file_cdev.ops = NULL;
- unregister_chrdev_region(major_dev, 1);
-}
-
static struct parser_context *
parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
{
@@ -1677,12 +1531,12 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
* '\0'-terminated
*/
allocbytes++;
- if ((controlvm_payload_bytes_buffered + bytes)
+ if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
}
- ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
+ ctx = kzalloc(allocbytes, GFP_KERNEL);
if (!ctx) {
*retry = true;
return NULL;
@@ -1710,7 +1564,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
}
ctx->byte_stream = true;
- controlvm_payload_bytes_buffered += ctx->param_bytes;
+ chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
return ctx;
@@ -1719,22 +1573,20 @@ err_finish_ctx:
return NULL;
}
-/**
+/*
* handle_command() - process a controlvm message
* @inmsg: the message to process
* @channel_addr: address of the controlvm channel
*
* Return:
- * false - this function will return false only in the case where the
- * controlvm message was NOT processed, but processing must be
- * retried before reading the next controlvm message; a
- * scenario where this can occur is when we need to throttle
- * the allocation of memory in which to copy out controlvm
- * payload data
- * true - processing of the controlvm message completed,
- * either successfully or with an error
+ * 0 - Successfully processed the message
+ * -EAGAIN - ControlVM message was not processed and should be retried
+ * reading the next controlvm message; a scenario where this can
+ * occur is when we need to throttle the allocation of memory in
+ * which to copy out controlvm payload data.
+ * < 0 - error: ControlVM message was processed but an error occurred.
*/
-static bool
+static int
handle_command(struct controlvm_message inmsg, u64 channel_addr)
{
struct controlvm_message_packet *cmd = &inmsg.cmd;
@@ -1743,11 +1595,13 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
struct parser_context *parser_ctx = NULL;
bool local_addr;
struct controlvm_message ackmsg;
+ int err = 0;
/* create parsing context if necessary */
local_addr = (inmsg.hdr.flags.test_message == 1);
if (channel_addr == 0)
- return true;
+ return -EINVAL;
+
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
@@ -1763,66 +1617,69 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
parser_init_byte_stream(parm_addr, parm_bytes,
local_addr, &retry);
if (!parser_ctx && retry)
- return false;
+ return -EAGAIN;
}
if (!local_addr) {
controlvm_init_response(&ackmsg, &inmsg.hdr,
CONTROLVM_RESP_SUCCESS);
- if (controlvm_channel)
- visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_ACK,
- &ackmsg);
+ err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_ACK,
+ &ackmsg);
+ if (err)
+ return err;
}
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
- chipset_init(&inmsg);
+ err = chipset_init(&inmsg);
break;
case CONTROLVM_BUS_CREATE:
- bus_create(&inmsg);
+ err = bus_create(&inmsg);
break;
case CONTROLVM_BUS_DESTROY:
- bus_destroy(&inmsg);
+ err = bus_destroy(&inmsg);
break;
case CONTROLVM_BUS_CONFIGURE:
- bus_configure(&inmsg, parser_ctx);
+ err = bus_configure(&inmsg, parser_ctx);
break;
case CONTROLVM_DEVICE_CREATE:
- my_device_create(&inmsg);
+ err = my_device_create(&inmsg);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
if (cmd->device_change_state.flags.phys_device) {
- parahotplug_process_message(&inmsg);
+ err = parahotplug_process_message(&inmsg);
} else {
/*
* save the hdr and cmd structures for later use
* when sending back the response to Command
*/
- my_device_changestate(&inmsg);
+ err = my_device_changestate(&inmsg);
break;
}
break;
case CONTROLVM_DEVICE_DESTROY:
- my_device_destroy(&inmsg);
+ err = my_device_destroy(&inmsg);
break;
case CONTROLVM_DEVICE_CONFIGURE:
- /* no op for now, just send a respond that we passed */
+ /* no op just send a respond that we passed */
if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
+ NULL);
break;
case CONTROLVM_CHIPSET_READY:
- chipset_ready_uevent(&inmsg.hdr);
+ err = chipset_ready_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_SELFTEST:
- chipset_selftest_uevent(&inmsg.hdr);
+ err = chipset_selftest_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_STOP:
- chipset_notready_uevent(&inmsg.hdr);
+ err = chipset_notready_uevent(&inmsg.hdr);
break;
default:
+ err = -ENOMSG;
if (inmsg.hdr.flags.response_expected)
- controlvm_respond
- (&inmsg.hdr, -CONTROLVM_RESP_ID_UNKNOWN);
+ controlvm_respond(&inmsg.hdr,
+ -CONTROLVM_RESP_ID_UNKNOWN, NULL);
break;
}
@@ -1830,31 +1687,35 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
parser_done(parser_ctx);
parser_ctx = NULL;
}
- return true;
+ return err;
}
-/**
+/*
* read_controlvm_event() - retreives the next message from the
* CONTROLVM_QUEUE_EVENT queue in the controlvm
* channel
* @msg: pointer to the retrieved message
*
- * Return: true if a valid message was retrieved or false otherwise
+ * Return: 0 if valid message was retrieved or -error
*/
-static bool
+static int
read_controlvm_event(struct controlvm_message *msg)
{
- if (!visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg)) {
- /* got a message */
- if (msg->hdr.flags.test_message == 1)
- return false;
- return true;
- }
- return false;
+ int err;
+
+ err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_EVENT, msg);
+ if (err)
+ return err;
+
+ /* got a message */
+ if (msg->hdr.flags.test_message == 1)
+ return -EINVAL;
+
+ return 0;
}
-/**
+/*
* parahotplug_process_list() - remove any request from the list that's been on
* there too long and respond with an error
*/
@@ -1875,10 +1736,10 @@ parahotplug_process_list(void)
list_del(pos);
if (req->msg.hdr.flags.response_expected)
- controlvm_respond_physdev_changestate(
+ controlvm_respond(
&req->msg.hdr,
CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
- req->msg.cmd.device_change_state.state);
+ &req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
@@ -1889,67 +1750,70 @@ static void
controlvm_periodic_work(struct work_struct *work)
{
struct controlvm_message inmsg;
- bool got_command = false;
- bool handle_command_failed = false;
-
- while (!visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_RESPONSE,
- &inmsg))
- ;
- if (!got_command) {
- if (controlvm_pending_msg_valid) {
- /*
- * we throttled processing of a prior
- * msg, so try to process it again
- * rather than reading a new one
- */
- inmsg = controlvm_pending_msg;
- controlvm_pending_msg_valid = false;
- got_command = true;
- } else {
- got_command = read_controlvm_event(&inmsg);
- }
+ int count = 0;
+ int err;
+
+ /* Drain the RESPONSE queue make it empty */
+ do {
+ err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg);
+ } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
+
+ if (err != -EAGAIN)
+ goto schedule_out;
+
+ if (chipset_dev->controlvm_pending_msg_valid) {
+ /*
+ * we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = chipset_dev->controlvm_pending_msg;
+ chipset_dev->controlvm_pending_msg_valid = false;
+ err = 0;
+ } else {
+ err = read_controlvm_event(&inmsg);
}
- handle_command_failed = false;
- while (got_command && (!handle_command_failed)) {
- most_recent_message_jiffies = jiffies;
- if (handle_command(inmsg,
- visorchannel_get_physaddr
- (controlvm_channel)))
- got_command = read_controlvm_event(&inmsg);
- else {
- /*
- * this is a scenario where throttling
- * is required, but probably NOT an
- * error...; we stash the current
- * controlvm msg so we will attempt to
- * reprocess it on our next loop
- */
- handle_command_failed = true;
- controlvm_pending_msg = inmsg;
- controlvm_pending_msg_valid = true;
+ while (!err) {
+ chipset_dev->most_recent_message_jiffies = jiffies;
+ err = handle_command(inmsg,
+ visorchannel_get_physaddr
+ (chipset_dev->controlvm_channel));
+ if (err == -EAGAIN) {
+ chipset_dev->controlvm_pending_msg = inmsg;
+ chipset_dev->controlvm_pending_msg_valid = true;
+ break;
}
+
+ err = read_controlvm_event(&inmsg);
}
/* parahotplug_worker */
parahotplug_process_list();
- if (time_after(jiffies,
- most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+schedule_out:
+ if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
+ (HZ * MIN_IDLE_SECONDS))) {
/*
* it's been longer than MIN_IDLE_SECONDS since we
* processed our last controlvm message; slow down the
* polling
*/
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ if (chipset_dev->poll_jiffies !=
+ POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
+ chipset_dev->poll_jiffies =
+ POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
} else {
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ if (chipset_dev->poll_jiffies !=
+ POLLJIFFIES_CONTROLVMCHANNEL_FAST)
+ chipset_dev->poll_jiffies =
+ POLLJIFFIES_CONTROLVMCHANNEL_FAST;
}
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+ schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+ chipset_dev->poll_jiffies);
}
static int
@@ -1958,81 +1822,84 @@ visorchipset_init(struct acpi_device *acpi_device)
int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
+ struct visorchannel *controlvm_channel;
+
+ chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
+ if (!chipset_dev)
+ goto error;
addr = controlvm_get_channel_address();
if (!addr)
goto error;
- controlvm_channel = visorchannel_create_with_lock(addr, 0,
- GFP_KERNEL, uuid);
+ acpi_device->driver_data = chipset_dev;
+
+ chipset_dev->acpi_device = acpi_device;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ controlvm_channel = visorchannel_create_with_lock(addr,
+ 0, GFP_KERNEL, uuid);
+
if (!controlvm_channel)
- goto error;
+ goto error_free_chipset_dev;
- if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
- visorchannel_get_header(controlvm_channel)))
- goto error_destroy_channel;
+ chipset_dev->controlvm_channel = controlvm_channel;
- major_dev = MKDEV(visorchipset_major, 0);
- err = visorchipset_file_init(major_dev, &controlvm_channel);
+ err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
if (err < 0)
goto error_destroy_channel;
+ if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(controlvm_channel)))
+ goto error_delete_groups;
+
/* if booting in a crash kernel */
if (is_kdump_kernel())
- INIT_DELAYED_WORK(&periodic_controlvm_work,
+ INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
setup_crash_devices_work_queue);
else
- INIT_DELAYED_WORK(&periodic_controlvm_work,
+ INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
- most_recent_message_jiffies = jiffies;
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
-
- visorchipset_platform_device.dev.devt = major_dev;
- if (platform_device_register(&visorchipset_platform_device) < 0) {
- POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
- err = -ENODEV;
- goto error_cancel_work;
- }
- POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ chipset_dev->most_recent_message_jiffies = jiffies;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+ chipset_dev->poll_jiffies);
err = visorbus_init();
if (err < 0)
- goto error_unregister;
+ goto error_cancel_work;
return 0;
-error_unregister:
- platform_device_unregister(&visorchipset_platform_device);
-
error_cancel_work:
- cancel_delayed_work_sync(&periodic_controlvm_work);
- visorchipset_file_cleanup(major_dev);
+ cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+
+error_delete_groups:
+ sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
error_destroy_channel:
- visorchannel_destroy(controlvm_channel);
+ visorchannel_destroy(chipset_dev->controlvm_channel);
+
+error_free_chipset_dev:
+ kfree(chipset_dev);
error:
- POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
+ dev_err(&acpi_device->dev, "failed with error %d\n", err);
return err;
}
static int
visorchipset_exit(struct acpi_device *acpi_device)
{
- POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
visorbus_exit();
+ cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+ sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
- cancel_delayed_work_sync(&periodic_controlvm_work);
-
- visorchannel_destroy(controlvm_channel);
-
- visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
- platform_device_unregister(&visorchipset_platform_device);
- POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ visorchannel_destroy(chipset_dev->controlvm_channel);
+ kfree(chipset_dev);
return 0;
}
@@ -2050,12 +1917,12 @@ static struct acpi_driver unisys_acpi_driver = {
.ops = {
.add = visorchipset_init,
.remove = visorchipset_exit,
- },
+ },
};
MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-static __init uint32_t visorutil_spar_detect(void)
+static __init int visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -2090,10 +1957,6 @@ static void exit_unisys(void)
acpi_bus_unregister_driver(&unisys_acpi_driver);
}
-module_param_named(major, visorchipset_major, int, 0444);
-MODULE_PARM_DESC(visorchipset_major,
- "major device number to use for the device node");
-
module_init(init_unisys);
module_exit(exit_unisys);
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index d1d72c184e29..cc70e1b16bda 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -12,54 +12,9 @@
* details.
*/
-#ifndef __IOMONINTF_H__
-#define __IOMONINTF_H__
+#ifndef __VMCALLINTERFACE_H__
+#define __VMCALLINTERFACE_H__
-/*
- * This file contains all structures needed to support the VMCALLs for IO
- * Virtualization. The VMCALLs are provided by Monitor and used by IO code
- * running on IO Partitions.
- */
-static inline unsigned long
-__unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
- unsigned long reg_ecx)
-{
- unsigned long result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
-
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
- return result;
-}
-
-static inline unsigned long
-__unisys_extended_vmcall_gnuc(unsigned long long tuple,
- unsigned long long reg_ebx,
- unsigned long long reg_ecx,
- unsigned long long reg_edx)
-{
- unsigned long result = 0;
- unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
-
- cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
- if (!(cpuid_ecx & 0x80000000))
- return -EPERM;
-
- __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
- return result;
-}
-
-#ifdef VMCALL_IO_CONTROLVM_ADDR
-#undef VMCALL_IO_CONTROLVM_ADDR
-#endif /* */
-
-/* define subsystem number for AppOS, used in uislib driver */
-#define MDS_APPOS 0x4000000000000000L /* subsystem = 62 - AppOS */
enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
/* Note: when a new VMCALL is added:
* - the 1st 2 hex digits correspond to one of the
@@ -72,30 +27,20 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples */
* type of VMCALL
*/
/* used by all Guests, not just IO */
- VMCALL_IO_CONTROLVM_ADDR = 0x0501,
- /* Allow caller to query virtual time offset */
- VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET = 0x0708,
- /* LOGEVENT Post Code (RDX) with specified subsystem mask */
- /* (RCX - monitor_subsystems.h) and severity (RDX) */
- VMCALL_POST_CODE_LOGEVENT = 0x070B,
- /* Allow ULTRA_SERVICE_CAPABILITY_TIME capable guest to make VMCALL */
- VMCALL_UPDATE_PHYSICAL_TIME = 0x0a02
+ VMCALL_CONTROLVM_ADDR = 0x0501,
};
-#define VMCALL_SUCCESS 0
-#define VMCALL_SUCCESSFUL(result) (result == 0)
-
-#define unisys_vmcall(tuple, reg_ebx, reg_ecx) \
- __unisys_vmcall_gnuc(tuple, reg_ebx, reg_ecx)
-#define unisys_extended_vmcall(tuple, reg_ebx, reg_ecx, reg_edx) \
- __unisys_extended_vmcall_gnuc(tuple, reg_ebx, reg_ecx, reg_edx)
-#define ISSUE_IO_VMCALL(method, param, result) \
- (result = unisys_vmcall(method, (param) & 0xFFFFFFFF, \
- (param) >> 32))
+enum vmcall_result {
+ VMCALL_RESULT_SUCCESS = 0,
+ VMCALL_RESULT_INVALID_PARAM = 1,
+ VMCALL_RESULT_DATA_UNAVAILABLE = 2,
+ VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
+ VMCALL_RESULT_DEVICE_ERROR = 4,
+ VMCALL_RESULT_DEVICE_NOT_READY = 5
+};
/* Structures for IO VMCALLs */
-
-/* Parameters to VMCALL_IO_CONTROLVM_ADDR interface */
+/* Parameters to VMCALL_CONTROLVM_ADDR interface */
struct vmcall_io_controlvm_addr_params {
/* The Guest-relative physical address of the ControlVm channel. */
/* This VMCall fills this in with the appropriate address. */
@@ -106,72 +51,4 @@ struct vmcall_io_controlvm_addr_params {
u8 unused[4]; /* Unused Bytes in the 64-Bit Aligned Struct */
} __packed;
-/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
-enum driver_pc { /* POSTCODE driver identifier tuples */
- /* visorbus driver files */
- VISOR_BUS_PC = 0xF0,
- VISOR_BUS_PC_visorbus_main_c = 0xFF,
- VISOR_BUS_PC_visorchipset_c = 0xFE,
-};
-
-enum event_pc { /* POSTCODE event identifier tuples */
- BUS_CREATE_ENTRY_PC = 0x001,
- BUS_CREATE_FAILURE_PC = 0x002,
- BUS_CREATE_EXIT_PC = 0x003,
- BUS_CONFIGURE_ENTRY_PC = 0x004,
- BUS_CONFIGURE_FAILURE_PC = 0x005,
- BUS_CONFIGURE_EXIT_PC = 0x006,
- CHIPSET_INIT_ENTRY_PC = 0x007,
- CHIPSET_INIT_SUCCESS_PC = 0x008,
- CHIPSET_INIT_FAILURE_PC = 0x009,
- CHIPSET_INIT_EXIT_PC = 0x00A,
- CONTROLVM_INIT_FAILURE_PC = 0x00B,
- DEVICE_CREATE_ENTRY_PC = 0x00C,
- DEVICE_CREATE_FAILURE_PC = 0x00D,
- DEVICE_CREATE_SUCCESS_PC = 0x00E,
- DEVICE_CREATE_EXIT_PC = 0x00F,
- DEVICE_ADD_PC = 0x010,
- DEVICE_REGISTER_FAILURE_PC = 0x011,
- DEVICE_CHANGESTATE_FAILURE_PC = 0x012,
- DRIVER_ENTRY_PC = 0x013,
- DRIVER_EXIT_PC = 0x014,
- MALLOC_FAILURE_PC = 0x015,
- CRASH_DEV_ENTRY_PC = 0x016,
- CRASH_DEV_EXIT_PC = 0x017,
- CRASH_DEV_RD_BUS_FAILURE_PC = 0x018,
- CRASH_DEV_RD_DEV_FAILURE_PC = 0x019,
- CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A,
- CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B,
- CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C,
- CRASH_DEV_COUNT_FAILURE_PC = 0x01D,
- SAVE_MSG_BUS_FAILURE_PC = 0x01E,
- SAVE_MSG_DEV_FAILURE_PC = 0x01F,
-};
-
-/* Write a 64-bit value to the hypervisor's log file
- * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where
- * A is an identifier for the file logging the postcode
- * B is an identifier for the event logging the postcode
- * C is the line logging the postcode
- * D is additional information the caller wants to log
- * E is additional information the caller wants to log
- * Please also note that the resulting postcode is in hex, so if you are
- * searching for the __LINE__ number, convert it first to decimal. The line
- * number combined with driver and type of call, will allow you to track down
- * exactly what line an error occurred on, or where the last driver
- * entered/exited from.
- */
-
-#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity) \
-do { \
- unsigned long long post_code_temp; \
- post_code_temp = (((u64)CURRENT_FILE_PC) << 56) | \
- (((u64)EVENT_PC) << 44) | \
- ((((u64)__LINE__) & 0xFFF) << 32) | \
- ((((u64)pc16bit1) & 0xFFFF) << 16) | \
- (((u64)pc16bit2) & 0xFFFF); \
- unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity, \
- MDS_APPOS, post_code_temp); \
-} while (0)
-
-#endif /* __IOMONINTF_H__ */
+#endif /* __VMCALLINTERFACE_H__ */
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 0ce92c85157c..6997b16b4dcd 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -28,9 +28,9 @@
/* The Send and Receive Buffers of the IO Queue may both be full */
-#define IOS_ERROR_THRESHOLD 1000
-#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
-#define VISORHBA_ERROR_COUNT 30
+#define IOS_ERROR_THRESHOLD 1000
+#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
+#define VISORHBA_ERROR_COUNT 30
static struct dentry *visorhba_debugfs_dir;
@@ -101,12 +101,13 @@ struct visorhba_devices_open {
struct visorhba_devdata *devdata;
};
-#define for_each_vdisk_match(iter, list, match) \
+#define for_each_vdisk_match(iter, list, match) \
for (iter = &list->head; iter->next; iter = iter->next) \
- if ((iter->channel == match->channel) && \
- (iter->id == match->id) && \
+ if ((iter->channel == match->channel) && \
+ (iter->id == match->id) && \
(iter->lun == match->lun))
-/**
+
+/*
* visor_thread_start - starts a thread for the device
* @threadfn: Function the thread starts
* @thrcontext: Context to pass to the thread, i.e. devdata
@@ -130,7 +131,7 @@ static struct task_struct *visor_thread_start
return task;
}
-/**
+/*
* visor_thread_stop - stops the thread if it is running
*/
static void visor_thread_stop(struct task_struct *task)
@@ -140,7 +141,7 @@ static void visor_thread_stop(struct task_struct *task)
kthread_stop(task);
}
-/**
+/*
* add_scsipending_entry - save off io command that is pending in
* Service Partition
* @devdata: Pointer to devdata
@@ -183,8 +184,8 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
return insert_location;
}
-/**
- * del_scsipending_enty - removes an entry from the pending array
+/*
+ * del_scsipending_ent - removes an entry from the pending array
* @devdata: Device holding the pending array
* @del: Entry to remove
*
@@ -210,9 +211,9 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
return sent;
}
-/**
+/*
* get_scsipending_cmdrsp - return the cmdrsp stored in a pending entry
- * #ddata: Device holding the pending array
+ * @ddata: Device holding the pending array
* @ent: Entry that stores the cmdrsp
*
* Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
@@ -228,7 +229,7 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
return NULL;
}
-/**
+/*
* simple_idr_get - associate a provided pointer with an int value
* 1 <= value <= INT_MAX, and return this int value;
* the pointer value can be obtained later by passing
@@ -253,7 +254,7 @@ static unsigned int simple_idr_get(struct idr *idrtable, void *p,
return (unsigned int)(id); /* idr_alloc() guarantees > 0 */
}
-/**
+/*
* setup_scsitaskmgmt_handles - stash the necessary handles so that the
* completion processing logic for a taskmgmt
* cmd will be able to find who to wake up
@@ -271,7 +272,7 @@ static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
simple_idr_get(idrtable, result, lock);
}
-/**
+/*
* cleanup_scsitaskmgmt_handles - forget handles created by
* setup_scsitaskmgmt_handles()
*/
@@ -284,7 +285,7 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
}
-/**
+/*
* forward_taskmgmt_command - send taskmegmt command to the Service
* Partition
* @tasktype: Type of taskmgmt command
@@ -363,7 +364,7 @@ err_del_scsipending_ent:
return FAILED;
}
-/**
+/*
* visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
* @scsicmd: The scsicmd that needs aborted
*
@@ -388,7 +389,7 @@ static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd);
}
-/**
+/*
* visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
* @scsicmd: The scsicmd that needs aborted
*
@@ -412,7 +413,7 @@ static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd);
}
-/**
+/*
* visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
* target on the bus
* @scsicmd: The scsicmd that needs aborted
@@ -436,7 +437,7 @@ static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd);
}
-/**
+/*
* visorhba_host_reset_handler - Not supported
* @scsicmd: The scsicmd that needs aborted
*
@@ -450,7 +451,7 @@ visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
return SUCCESS;
}
-/**
+/*
* visorhba_get_info
* @shp: Scsi host that is requesting information
*
@@ -462,7 +463,7 @@ static const char *visorhba_get_info(struct Scsi_Host *shp)
return "visorhba";
}
-/**
+/*
* visorhba_queue_command_lck -- queues command to the Service Partition
* @scsicmd: Command to be queued
* @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
@@ -553,7 +554,7 @@ static DEF_SCSI_QCMD(visorhba_queue_command)
#define visorhba_queue_command visorhba_queue_command_lck
#endif
-/**
+/*
* visorhba_slave_alloc - called when new disk is discovered
* @scsidev: New disk
*
@@ -590,7 +591,7 @@ static int visorhba_slave_alloc(struct scsi_device *scsidev)
return 0;
}
-/**
+/*
* visorhba_slave_destroy - disk is going away
* @scsidev: scsi device going away
*
@@ -633,7 +634,7 @@ static struct scsi_host_template visorhba_driver_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-/**
+/*
* info_debugfs_show - debugfs interface to dump visorhba states
*
* This presents a file in the debugfs tree named:
@@ -677,7 +678,7 @@ static const struct file_operations info_debugfs_fops = {
.release = single_release,
};
-/**
+/*
* complete_taskmgmt_command - complete task management
* @cmdrsp: Response from the IOVM
*
@@ -685,8 +686,8 @@ static const struct file_operations info_debugfs_fops = {
* command. Wake up anyone waiting for it.
* Returns void
*/
-static inline void complete_taskmgmt_command
-(struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
+static void complete_taskmgmt_command(struct idr *idrtable,
+ struct uiscmdrsp *cmdrsp, int result)
{
wait_queue_head_t *wq =
idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
@@ -706,7 +707,7 @@ static inline void complete_taskmgmt_command
wake_up_all(wq);
}
-/**
+/*
* visorhba_serverdown_complete - Called when we are done cleaning up
* from serverdown
* @work: work structure for this serverdown request
@@ -756,7 +757,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
devdata->serverchangingstate = false;
}
-/**
+/*
* visorhba_serverdown - Got notified that the IOVM is down
* @devdata: visorhba that is being serviced by downed IOVM.
*
@@ -775,7 +776,7 @@ static int visorhba_serverdown(struct visorhba_devdata *devdata)
return 0;
}
-/**
+/*
* do_scsi_linuxstat - scsi command returned linuxstat
* @cmdrsp: response from IOVM
* @scsicmd: Command issued.
@@ -826,7 +827,7 @@ static int set_no_disk_inquiry_result(unsigned char *buf,
return 0;
}
-/**
+/*
* do_scsi_nolinuxstat - scsi command didn't have linuxstat
* @cmdrsp: response from IOVM
* @scsicmd: Command issued.
@@ -838,7 +839,7 @@ static void
do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
{
struct scsi_device *scsidev;
- unsigned char buf[36];
+ unsigned char *buf;
struct scatterlist *sg;
unsigned int i;
char *this_page;
@@ -853,6 +854,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (cmdrsp->scsi.no_disk_result == 0)
return;
+ buf = kzalloc(sizeof(char) * 36, GFP_KERNEL);
+ if (!buf)
+ return;
+
/* Linux scsi code wants a device at Lun 0
* to issue report luns, but we don't want
* a disk there so we'll present a processor
@@ -864,6 +869,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
cmdrsp->scsi.bufflen);
+ kfree(buf);
return;
}
@@ -875,6 +881,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
memcpy(this_page, buf + bufind, sg[i].length);
kunmap_atomic(this_page_orig);
}
+ kfree(buf);
} else {
devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
for_each_vdisk_match(vdisk, devdata, scsidev) {
@@ -887,7 +894,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
}
-/**
+/*
* complete_scsi_command - complete a scsi command
* @uiscmdrsp: Response from Service Partition
* @scsicmd: The scsi command
@@ -909,7 +916,7 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsicmd->scsi_done(scsicmd);
}
-/**
+/*
* drain_queue - pull responses out of iochannel
* @cmdrsp: Response from the IOSP
* @devdata: device that owns this iochannel
@@ -951,7 +958,7 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
}
}
-/**
+/*
* process_incoming_rsps - Process responses from IOSP
* @v: void pointer to visorhba_devdata
*
@@ -983,7 +990,7 @@ static int process_incoming_rsps(void *v)
return 0;
}
-/**
+/*
* visorhba_pause - function to handle visorbus pause messages
* @dev: device that is pausing.
* @complete_func: function to call when finished
@@ -1003,7 +1010,7 @@ static int visorhba_pause(struct visor_device *dev,
return 0;
}
-/**
+/*
* visorhba_resume - function called when the IO Service Partition is back
* @dev: device that is pausing.
* @complete_func: function to call when finished
@@ -1033,7 +1040,7 @@ static int visorhba_resume(struct visor_device *dev,
return 0;
}
-/**
+/*
* visorhba_probe - device has been discovered, do acquire
* @dev: visor_device that was discovered
*
@@ -1132,7 +1139,7 @@ err_scsi_host_put:
return err;
}
-/**
+/*
* visorhba_remove - remove a visorhba device
* @dev: Device to remove
*
@@ -1174,7 +1181,7 @@ static struct visor_driver visorhba_driver = {
.channel_interrupt = NULL,
};
-/**
+/*
* visorhba_init - driver init routine
*
* Initialize the visorhba driver and register it with visorbus
@@ -1200,8 +1207,8 @@ cleanup_debugfs:
return rc;
}
-/**
- * visorhba_cleanup - driver exit routine
+/*
+ * visorhba_exit - driver exit routine
*
* Unregister driver from the bus and free up memory.
*/
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 949cce680b29..cdd35437f0a0 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -33,21 +33,21 @@
#include "ultrainputreport.h"
/* Keyboard channel {c73416d0-b0b8-44af-b304-9d2ae99f1b3d} */
-#define SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID \
- UUID_LE(0xc73416d0, 0xb0b8, 0x44af, \
+#define SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID \
+ UUID_LE(0xc73416d0, 0xb0b8, 0x44af, \
0xb3, 0x4, 0x9d, 0x2a, 0xe9, 0x9f, 0x1b, 0x3d)
#define SPAR_KEYBOARD_CHANNEL_PROTOCOL_UUID_STR "c73416d0-b0b8-44af-b304-9d2ae99f1b3d"
/* Mouse channel {addf07d4-94a9-46e2-81c3-61abcdbdbd87} */
-#define SPAR_MOUSE_CHANNEL_PROTOCOL_UUID \
+#define SPAR_MOUSE_CHANNEL_PROTOCOL_UUID \
UUID_LE(0xaddf07d4, 0x94a9, 0x46e2, \
0x81, 0xc3, 0x61, 0xab, 0xcd, 0xbd, 0xbd, 0x87)
#define SPAR_MOUSE_CHANNEL_PROTOCOL_UUID_STR \
"addf07d4-94a9-46e2-81c3-61abcdbdbd87"
-#define PIXELS_ACROSS_DEFAULT 800
-#define PIXELS_DOWN_DEFAULT 600
-#define KEYCODE_TABLE_BYTES 256
+#define PIXELS_ACROSS_DEFAULT 800
+#define PIXELS_DOWN_DEFAULT 600
+#define KEYCODE_TABLE_BYTES 256
enum visorinput_device_type {
visorinput_keyboard,
@@ -539,13 +539,10 @@ handle_locking_key(struct input_dev *visorinput_dev,
static int
scancode_to_keycode(int scancode)
{
- int keycode;
-
if (scancode > 0xff)
- keycode = visorkbd_ext_keycode[(scancode >> 8) & 0xff];
- else
- keycode = visorkbd_keycode[scancode];
- return keycode;
+ return visorkbd_ext_keycode[(scancode >> 8) & 0xff];
+
+ return visorkbd_keycode[scancode];
}
static int
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 73a01a70b106..adebf224f73a 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -141,7 +141,44 @@ struct visornic_devdata {
struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
};
-/**
+/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
+static u16
+add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
+ u16 max_pi_arr_entries, struct phys_info pi_arr[])
+{
+ u32 len;
+ u16 i, firstlen;
+
+ firstlen = PI_PAGE_SIZE - inp_off;
+ if (inp_len <= firstlen) {
+ /* The input entry spans only one page - add as is. */
+ if (index >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index].pi_pfn = inp_pfn;
+ pi_arr[index].pi_off = (u16)inp_off;
+ pi_arr[index].pi_len = (u16)inp_len;
+ return index + 1;
+ }
+
+ /* This entry spans multiple pages. */
+ for (len = inp_len, i = 0; len;
+ len -= pi_arr[index + i].pi_len, i++) {
+ if (index + i >= max_pi_arr_entries)
+ return 0;
+ pi_arr[index + i].pi_pfn = inp_pfn + i;
+ if (i == 0) {
+ pi_arr[index].pi_off = inp_off;
+ pi_arr[index].pi_len = firstlen;
+ } else {
+ pi_arr[index + i].pi_off = 0;
+ pi_arr[index + i].pi_len =
+ (u16)MINNUM(len, (u32)PI_PAGE_SIZE);
+ }
+ }
+ return index + i;
+}
+
+/*
* visor_copy_fragsinfo_from_skb(
* @skb_in: skbuff that we are pulling the frags from
* @firstfraglen: length of first fragment in skb
@@ -250,7 +287,7 @@ static const struct file_operations debugfs_enable_ints_fops = {
.write = enable_ints_write,
};
-/**
+/*
* visornic_serverdown_complete - IOPART went down, pause device
* @work: Work queue it was scheduled on
*
@@ -285,7 +322,7 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
devdata->server_down_complete_func = NULL;
}
-/**
+/*
* visornic_serverdown - Command has notified us that IOPART is down
* @devdata: device that is being managed by IOPART
*
@@ -332,7 +369,7 @@ err_unlock:
return err;
}
-/**
+/*
* alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
* @netdev: network adapter the rcv bufs are attached too.
*
@@ -363,19 +400,21 @@ alloc_rcv_buf(struct net_device *netdev)
return skb;
}
-/**
+/*
* post_skb - post a skb to the IO Partition.
* @cmdrsp: cmdrsp packet to be send to the IO Partition
* @devdata: visornic_devdata to post the skb too
* @skb: skb to give to the IO partition
*
* Send the skb to the IO Partition.
- * Returns void
+ * Returns 0 or error
*/
-static inline void
+static int
post_skb(struct uiscmdrsp *cmdrsp,
struct visornic_devdata *devdata, struct sk_buff *skb)
{
+ int err;
+
cmdrsp->net.buf = skb;
cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
cmdrsp->net.rcvpost.frag.pi_off =
@@ -383,21 +422,26 @@ post_skb(struct uiscmdrsp *cmdrsp,
cmdrsp->net.rcvpost.frag.pi_len = skb->len;
cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
- if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
- cmdrsp->net.type = NET_RCV_POST;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- if (!visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp)) {
- atomic_inc(&devdata->num_rcvbuf_in_iovm);
- devdata->chstat.sent_post++;
- } else {
- devdata->chstat.sent_post_failed++;
- }
+ if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
+ return -EINVAL;
+
+ cmdrsp->net.type = NET_RCV_POST;
+ cmdrsp->cmdtype = CMD_NET_TYPE;
+ err = visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp);
+ if (err) {
+ devdata->chstat.sent_post_failed++;
+ return err;
}
+
+ atomic_inc(&devdata->num_rcvbuf_in_iovm);
+ devdata->chstat.sent_post++;
+
+ return 0;
}
-/**
+/*
* send_enbdis - send NET_RCV_ENBDIS to IO Partition
* @netdev: netdevice we are enable/disable, used as context
* return value
@@ -405,23 +449,28 @@ post_skb(struct uiscmdrsp *cmdrsp,
* @devdata: visornic device we are enabling/disabling
*
* Send the enable/disable message to the IO Partition.
- * Returns void
+ * Returns 0 or error
*/
-static void
+static int
send_enbdis(struct net_device *netdev, int state,
struct visornic_devdata *devdata)
{
+ int err;
+
devdata->cmdrsp_rcv->net.enbdis.enable = state;
devdata->cmdrsp_rcv->net.enbdis.context = netdev;
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
- if (!visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv))
- devdata->chstat.sent_enbdis++;
+ err = visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ devdata->cmdrsp_rcv);
+ if (err)
+ return err;
+ devdata->chstat.sent_enbdis++;
+ return 0;
}
-/**
+/*
* visornic_disable_with_timeout - Disable network adapter
* @netdev: netdevice to disable
* @timeout: timeout to wait for disable
@@ -439,6 +488,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
int i;
unsigned long flags;
int wait = 0;
+ int err;
/* send a msg telling the other end we are stopping incoming pkts */
spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -448,8 +498,11 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
/* send disable and wait for ack -- don't hold lock when sending
* disable because if the queue is full, insert might sleep.
+ * If an error occurs, don't wait for the timeout.
*/
- send_enbdis(netdev, 0, devdata);
+ err = send_enbdis(netdev, 0, devdata);
+ if (err)
+ return err;
/* wait for ack to arrive before we try to free rcv buffers
* NOTE: the other end automatically unposts the rcv buffers when
@@ -507,7 +560,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
return 0;
}
-/**
+/*
* init_rcv_bufs -- initialize receive bufs and send them to the IO Part
* @netdev: struct netdevice
* @devdata: visornic_devdata
@@ -518,7 +571,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
static int
init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
{
- int i, count;
+ int i, j, count, err;
/* allocate fixed number of receive buffers to post to uisnic
* post receive buffers after we've allocated a required amount
@@ -548,13 +601,30 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
* lock - we've not enabled nor started the queue so there shouldn't
* be any rcv or xmit activity
*/
- for (i = 0; i < count; i++)
- post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
+ for (i = 0; i < count; i++) {
+ err = post_skb(devdata->cmdrsp_rcv, devdata,
+ devdata->rcvbuf[i]);
+ if (!err)
+ continue;
+
+ /* Error handling -
+ * If we posted at least one skb, we should return success,
+ * but need to free the resources that we have not successfully
+ * posted.
+ */
+ for (j = i; j < count; j++) {
+ kfree_skb(devdata->rcvbuf[j]);
+ devdata->rcvbuf[j] = NULL;
+ }
+ if (i == 0)
+ return err;
+ break;
+ }
return 0;
}
-/**
+/*
* visornic_enable_with_timeout - send enable to IO Part
* @netdev: struct net_device
* @timeout: Time to wait for the ACK from the enable
@@ -566,7 +636,7 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
static int
visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
{
- int i;
+ int err = 0;
struct visornic_devdata *devdata = netdev_priv(netdev);
unsigned long flags;
int wait = 0;
@@ -576,11 +646,11 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
/* NOTE: the other end automatically unposts the rcv buffers when it
* gets a disable.
*/
- i = init_rcv_bufs(netdev, devdata);
- if (i < 0) {
+ err = init_rcv_bufs(netdev, devdata);
+ if (err < 0) {
dev_err(&netdev->dev,
- "%s failed to init rcv bufs (%d)\n", __func__, i);
- return i;
+ "%s failed to init rcv bufs\n", __func__);
+ return err;
}
spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -594,9 +664,12 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
spin_unlock_irqrestore(&devdata->priv_lock, flags);
/* send enable and wait for ack -- don't hold lock when sending enable
- * because if the queue is full, insert might sleep.
+ * because if the queue is full, insert might sleep. If an error
+ * occurs error out.
*/
- send_enbdis(netdev, 1, devdata);
+ err = send_enbdis(netdev, 1, devdata);
+ if (err)
+ return err;
spin_lock_irqsave(&devdata->priv_lock, flags);
while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
@@ -626,7 +699,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
return 0;
}
-/**
+/*
* visornic_timeout_reset - handle xmit timeout resets
* @work work item that scheduled the work
*
@@ -669,7 +742,7 @@ call_serverdown:
rtnl_unlock();
}
-/**
+/*
* visornic_open - Enable the visornic device and mark the queue started
* @netdev: netdevice to start
*
@@ -684,7 +757,7 @@ visornic_open(struct net_device *netdev)
return 0;
}
-/**
+/*
* visornic_close - Disables the visornic device and stops the queues
* @netdev: netdevice to start
*
@@ -699,7 +772,7 @@ visornic_close(struct net_device *netdev)
return 0;
}
-/**
+/*
* devdata_xmits_outstanding - compute outstanding xmits
* @devdata: visornic_devdata for device
*
@@ -714,7 +787,7 @@ static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
+ devdata->chstat.sent_xmit + 1);
}
-/**
+/*
* vnic_hit_high_watermark
* @devdata: indicates visornic device we are checking
* @high_watermark: max num of unacked xmits we will tolerate,
@@ -723,13 +796,13 @@ static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
* Returns true iff the number of unacked xmits sent to
* the IO partition is >= high_watermark.
*/
-static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
- ulong high_watermark)
+static bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
+ ulong high_watermark)
{
return (devdata_xmits_outstanding(devdata) >= high_watermark);
}
-/**
+/*
* vnic_hit_low_watermark
* @devdata: indicates visornic device we are checking
* @low_watermark: we will wait until the num of unacked xmits
@@ -739,13 +812,13 @@ static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
* Returns true iff the number of unacked xmits sent to
* the IO partition is <= low_watermark.
*/
-static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
- ulong low_watermark)
+static bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
+ ulong low_watermark)
{
return (devdata_xmits_outstanding(devdata) <= low_watermark);
}
-/**
+/*
* visornic_xmit - send a packet to the IO Partition
* @skb: Packet to be sent
* @netdev: net device the packet is being sent from
@@ -764,6 +837,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
int len, firstfraglen, padlen;
struct uiscmdrsp *cmdrsp = NULL;
unsigned long flags;
+ int err;
devdata = netdev_priv(netdev);
spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -880,8 +954,9 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART, cmdrsp)) {
+ err = visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART, cmdrsp);
+ if (err) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
devdata->busy_cnt++;
@@ -916,7 +991,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
-/**
+/*
* visornic_get_stats - returns net_stats of the visornic device
* @netdev: netdevice
*
@@ -930,7 +1005,7 @@ visornic_get_stats(struct net_device *netdev)
return &devdata->net_stats;
}
-/**
+/*
* visornic_change_mtu - changes mtu of device.
* @netdev: netdevice
* @new_mtu: value of new mtu
@@ -947,7 +1022,7 @@ visornic_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
-/**
+/*
* visornic_set_multi - changes mtu of device.
* @netdev: netdevice
*
@@ -959,6 +1034,7 @@ visornic_set_multi(struct net_device *netdev)
{
struct uiscmdrsp *cmdrsp;
struct visornic_devdata *devdata = netdev_priv(netdev);
+ int err = 0;
if (devdata->old_flags == netdev->flags)
return;
@@ -975,16 +1051,18 @@ visornic_set_multi(struct net_device *netdev)
cmdrsp->net.enbdis.context = netdev;
cmdrsp->net.enbdis.enable =
netdev->flags & IFF_PROMISC;
- visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
+ err = visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp);
kfree(cmdrsp);
+ if (err)
+ return;
out_save_flags:
devdata->old_flags = netdev->flags;
}
-/**
+/*
* visornic_xmit_timeout - request to timeout the xmit
* @netdev
*
@@ -1019,7 +1097,7 @@ visornic_xmit_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
-/**
+/*
* repost_return - repost rcv bufs that have come back
* @cmdrsp: io channel command struct to post
* @devdata: visornic devdata for the device
@@ -1030,7 +1108,7 @@ visornic_xmit_timeout(struct net_device *netdev)
* we are finished with them.
* Returns 0 for success, -1 for error.
*/
-static inline int
+static int
repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
struct sk_buff *skb, struct net_device *netdev)
{
@@ -1071,7 +1149,12 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
status = -ENOMEM;
break;
}
- post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+ status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+ if (status) {
+ kfree_skb(devdata->rcvbuf[i]);
+ devdata->rcvbuf[i] = NULL;
+ break;
+ }
numreposted++;
break;
}
@@ -1091,7 +1174,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
return status;
}
-/**
+/*
* visornic_rx - Handle receive packets coming back from IO Part
* @cmdrsp: Receive packet returned from IO Part
*
@@ -1293,7 +1376,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
return 1;
}
-/**
+/*
* devdata_initialize - Initialize devdata structure
* @devdata: visornic_devdata structure to initialize
* #dev: visorbus_deviced it belongs to
@@ -1310,7 +1393,7 @@ devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
return devdata;
}
-/**
+/*
* devdata_release - Frees up references in devdata
* @devdata: struct to clean up
*
@@ -1487,24 +1570,25 @@ static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
};
-/**
+/*
* send_rcv_posts_if_needed
* @devdata: visornic device
*
* Send receive buffers to the IO Partition.
* Returns void
*/
-static void
+static int
send_rcv_posts_if_needed(struct visornic_devdata *devdata)
{
int i;
struct net_device *netdev;
struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
+ int err;
/* don't do this until vnic is marked ready */
if (!(devdata->enabled && devdata->enab_dis_acked))
- return;
+ return 0;
netdev = devdata->netdev;
rcv_bufs_allocated = 0;
@@ -1523,14 +1607,20 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
break;
}
rcv_bufs_allocated++;
- post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+ err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+ if (err) {
+ kfree_skb(devdata->rcvbuf[i]);
+ devdata->rcvbuf[i] = NULL;
+ break;
+ }
devdata->chstat.extra_rcvbufs_sent++;
}
}
devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
+ return 0;
}
-/**
+/*
* drain_resp_queue - drains and ignores all messages from the resp queue
* @cmdrsp: io channel command response message
* @devdata: visornic device to drain
@@ -1544,7 +1634,7 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
;
}
-/**
+/*
* service_resp_queue - drains the response queue
* @cmdrsp: io channel command response message
* @devdata: visornic device to drain
@@ -1650,8 +1740,12 @@ static int visornic_poll(struct napi_struct *napi, int budget)
struct visornic_devdata,
napi);
int rx_count = 0;
+ int err;
+
+ err = send_rcv_posts_if_needed(devdata);
+ if (err)
+ return err;
- send_rcv_posts_if_needed(devdata);
service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
/* If there aren't any more packets to receive stop the poll */
@@ -1661,7 +1755,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
return rx_count;
}
-/**
+/*
* poll_for_irq - Checks the status of the response queue.
* @v: void pointer to the visronic devdata
*
@@ -1684,7 +1778,7 @@ poll_for_irq(unsigned long v)
mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
}
-/**
+/*
* visornic_probe - probe function for visornic devices
* @dev: The visor device discovered
*
@@ -1881,7 +1975,7 @@ cleanup_netdev:
return err;
}
-/**
+/*
* host_side_disappeared - IO part is gone.
* @devdata: device object
*
@@ -1897,7 +1991,7 @@ static void host_side_disappeared(struct visornic_devdata *devdata)
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
-/**
+/*
* visornic_remove - Called when visornic dev goes away
* @dev: visornic device that is being removed
*
@@ -1944,7 +2038,7 @@ static void visornic_remove(struct visor_device *dev)
free_netdev(netdev);
}
-/**
+/*
* visornic_pause - Called when IO Part disappears
* @dev: visornic device that is being serviced
* @complete_func: call when finished.
@@ -1966,7 +2060,7 @@ static int visornic_pause(struct visor_device *dev,
return 0;
}
-/**
+/*
* visornic_resume - Called when IO part has recovered
* @dev: visornic device that is being serviced
* @compelte_func: call when finished
@@ -2036,7 +2130,7 @@ static struct visor_driver visornic_driver = {
.channel_interrupt = NULL,
};
-/**
+/*
* visornic_init - Init function
*
* Init function for the visornic driver. Do initial driver setup
@@ -2052,11 +2146,11 @@ static int visornic_init(void)
if (!visornic_debugfs_dir)
return err;
- ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
+ ret = debugfs_create_file("info", 0400, visornic_debugfs_dir, NULL,
&debugfs_info_fops);
if (!ret)
goto cleanup_debugfs;
- ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
+ ret = debugfs_create_file("enable_ints", 0200, visornic_debugfs_dir,
NULL, &debugfs_enable_ints_fops);
if (!ret)
goto cleanup_debugfs;
@@ -2073,7 +2167,7 @@ cleanup_debugfs:
return err;
}
-/**
+/*
* visornic_cleanup - driver exit routine
*
* Unregister driver from the bus and free up memory.
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 74094fff4367..9e2763663ab8 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,11 +1,39 @@
-config BCM2835_VCHIQ
- tristate "Videocore VCHIQ"
+menuconfig BCM_VIDEOCORE
+ tristate "Broadcom VideoCore support"
depends on HAS_DMA
depends on OF
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
help
+ Support for Broadcom VideoCore services including
+ the BCM2835 family of products which is used
+ by the Raspberry PI.
+
+if BCM_VIDEOCORE
+
+config BCM2835_VCHIQ
+ tristate "BCM2835 VCHIQ"
+ help
Kernel to VideoCore communication interface for the
BCM2835 family of products.
Defaults to Y when the Broadcom Videocore services
are included in the build, N otherwise.
+
+if BCM2835_VCHIQ
+
+config BCM2835_VCHIQ_SUPPORT_MEMDUMP
+ bool "Support dumping memory contents to debug log"
+ help
+ BCM2835 VCHIQ supports the ability to dump the
+ contents of memory to the debug log. This
+ is typically only needed by diagnostic tools used
+ to debug issues with VideoCore.
+
+endif
+
+source "drivers/staging/vc04_services/bcm2835-audio/Kconfig"
+
+source "drivers/staging/vc04_services/bcm2835-camera/Kconfig"
+
+endif
+
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 1a9e742ee40d..e9a8e1343cbb 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -10,5 +10,8 @@ vchiq-objs := \
interface/vchiq_arm/vchiq_util.o \
interface/vchiq_arm/vchiq_connected.o \
+obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
+obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
+
ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/staging/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
new file mode 100644
index 000000000000..9f536533c257
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -0,0 +1,8 @@
+config SND_BCM2835
+ tristate "BCM2835 Audio"
+ depends on ARCH_BCM2835 && SND
+ select SND_PCM
+ select BCM2835_VCHIQ
+ help
+ Say Y or M if you want to support BCM2835 built in audio
+
diff --git a/drivers/staging/bcm2835-audio/Makefile b/drivers/staging/vc04_services/bcm2835-audio/Makefile
index d7b88d164d15..d7b88d164d15 100644
--- a/drivers/staging/bcm2835-audio/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-audio/Makefile
diff --git a/drivers/staging/bcm2835-audio/TODO b/drivers/staging/vc04_services/bcm2835-audio/TODO
index 73d41fa631ac..73d41fa631ac 100644
--- a/drivers/staging/bcm2835-audio/TODO
+++ b/drivers/staging/vc04_services/bcm2835-audio/TODO
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
new file mode 100644
index 000000000000..f484bb055df7
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c
@@ -0,0 +1,426 @@
+/*****************************************************************************
+ * Copyright 2011 Broadcom Corporation. All rights reserved.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available at
+ * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *****************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/rawmidi.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/asoundef.h>
+
+#include "bcm2835.h"
+
+/* volume maximum and minimum in terms of 0.01dB */
+#define CTRL_VOL_MAX 400
+#define CTRL_VOL_MIN -10239 /* originally -10240 */
+
+static int snd_bcm2835_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ if (kcontrol->private_value == PCM_PLAYBACK_VOLUME) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = CTRL_VOL_MIN;
+ uinfo->value.integer.max = CTRL_VOL_MAX; /* 2303 */
+ } else if (kcontrol->private_value == PCM_PLAYBACK_MUTE) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ } else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE) {
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = AUDIO_DEST_MAX - 1;
+ }
+ return 0;
+}
+
+/* toggles mute on or off depending on the value of nmute, and returns
+ * 1 if the mute value was changed, otherwise 0
+ */
+static int toggle_mute(struct bcm2835_chip *chip, int nmute)
+{
+ /* if settings are ok, just return 0 */
+ if (chip->mute == nmute)
+ return 0;
+
+ /* if the sound is muted then we need to unmute */
+ if (chip->mute == CTRL_VOL_MUTE) {
+ chip->volume = chip->old_volume; /* copy the old volume back */
+ audio_info("Unmuting, old_volume = %d, volume = %d ...\n", chip->old_volume, chip->volume);
+ } else /* otherwise we mute */ {
+ chip->old_volume = chip->volume;
+ chip->volume = 26214; /* set volume to minimum level AKA mute */
+ audio_info("Muting, old_volume = %d, volume = %d ...\n", chip->old_volume, chip->volume);
+ }
+
+ chip->mute = nmute;
+ return 1;
+}
+
+static int snd_bcm2835_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ BUG_ON(!chip && !(chip->avail_substreams & AVAIL_SUBSTREAMS_MASK));
+
+ if (kcontrol->private_value == PCM_PLAYBACK_VOLUME)
+ ucontrol->value.integer.value[0] = chip2alsa(chip->volume);
+ else if (kcontrol->private_value == PCM_PLAYBACK_MUTE)
+ ucontrol->value.integer.value[0] = chip->mute;
+ else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE)
+ ucontrol->value.integer.value[0] = chip->dest;
+
+ mutex_unlock(&chip->audio_mutex);
+ return 0;
+}
+
+static int snd_bcm2835_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ int changed = 0;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ if (kcontrol->private_value == PCM_PLAYBACK_VOLUME) {
+ audio_info("Volume change attempted.. volume = %d new_volume = %d\n", chip->volume, (int)ucontrol->value.integer.value[0]);
+ if (chip->mute == CTRL_VOL_MUTE) {
+ /* changed = toggle_mute(chip, CTRL_VOL_UNMUTE); */
+ changed = 1; /* should return 0 to signify no change but the mixer takes this as the opposite sign (no idea why) */
+ goto unlock;
+ }
+ if (changed || (ucontrol->value.integer.value[0] != chip2alsa(chip->volume))) {
+ chip->volume = alsa2chip(ucontrol->value.integer.value[0]);
+ changed = 1;
+ }
+
+ } else if (kcontrol->private_value == PCM_PLAYBACK_MUTE) {
+ /* Now implemented */
+ audio_info(" Mute attempted\n");
+ changed = toggle_mute(chip, ucontrol->value.integer.value[0]);
+
+ } else if (kcontrol->private_value == PCM_PLAYBACK_DEVICE) {
+ if (ucontrol->value.integer.value[0] != chip->dest) {
+ chip->dest = ucontrol->value.integer.value[0];
+ changed = 1;
+ }
+ }
+
+ if (changed && bcm2835_audio_set_ctls(chip))
+ dev_err(chip->card->dev, "Failed to set ALSA controls..\n");
+
+unlock:
+ mutex_unlock(&chip->audio_mutex);
+ return changed;
+}
+
+static DECLARE_TLV_DB_SCALE(snd_bcm2835_db_scale, CTRL_VOL_MIN, 1, 1);
+
+static struct snd_kcontrol_new snd_bcm2835_ctl[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .private_value = PCM_PLAYBACK_VOLUME,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ .tlv = {.p = snd_bcm2835_db_scale}
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = PCM_PLAYBACK_MUTE,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "PCM Playback Route",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = PCM_PLAYBACK_DEVICE,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ },
+};
+
+static int snd_bcm2835_spdif_default_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+ return 0;
+}
+
+static int snd_bcm2835_spdif_default_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ int i;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ for (i = 0; i < 4; i++)
+ ucontrol->value.iec958.status[i] =
+ (chip->spdif_status >> (i * 8)) & 0xff;
+
+ mutex_unlock(&chip->audio_mutex);
+ return 0;
+}
+
+static int snd_bcm2835_spdif_default_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ unsigned int val = 0;
+ int i, change;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ for (i = 0; i < 4; i++)
+ val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8);
+
+ change = val != chip->spdif_status;
+ chip->spdif_status = val;
+
+ mutex_unlock(&chip->audio_mutex);
+ return change;
+}
+
+static int snd_bcm2835_spdif_mask_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+ return 0;
+}
+
+static int snd_bcm2835_spdif_mask_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /*
+ * bcm2835 supports only consumer mode and sets all other format flags
+ * automatically. So the only thing left is signalling non-audio content
+ */
+ ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO;
+ return 0;
+}
+
+static int snd_bcm2835_spdif_stream_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+ return 0;
+}
+
+static int snd_bcm2835_spdif_stream_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ int i;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ for (i = 0; i < 4; i++)
+ ucontrol->value.iec958.status[i] =
+ (chip->spdif_status >> (i * 8)) & 0xff;
+
+ mutex_unlock(&chip->audio_mutex);
+ return 0;
+}
+
+static int snd_bcm2835_spdif_stream_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct bcm2835_chip *chip = snd_kcontrol_chip(kcontrol);
+ unsigned int val = 0;
+ int i, change;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ for (i = 0; i < 4; i++)
+ val |= (unsigned int)ucontrol->value.iec958.status[i] << (i * 8);
+ change = val != chip->spdif_status;
+ chip->spdif_status = val;
+
+ mutex_unlock(&chip->audio_mutex);
+ return change;
+}
+
+static struct snd_kcontrol_new snd_bcm2835_spdif[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+ .info = snd_bcm2835_spdif_default_info,
+ .get = snd_bcm2835_spdif_default_get,
+ .put = snd_bcm2835_spdif_default_put
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
+ .info = snd_bcm2835_spdif_mask_info,
+ .get = snd_bcm2835_spdif_mask_get,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_INACTIVE,
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM),
+ .info = snd_bcm2835_spdif_stream_info,
+ .get = snd_bcm2835_spdif_stream_get,
+ .put = snd_bcm2835_spdif_stream_put,
+ },
+};
+
+int snd_bcm2835_new_ctl(struct bcm2835_chip *chip)
+{
+ int err;
+ unsigned int idx;
+
+ strcpy(chip->card->mixername, "Broadcom Mixer");
+ for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_ctl); idx++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_bcm2835_ctl[idx], chip));
+ if (err < 0)
+ return err;
+ }
+ for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_spdif); idx++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_bcm2835_spdif[idx], chip));
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static struct snd_kcontrol_new snd_bcm2835_headphones_ctl[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Headphone Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .private_value = PCM_PLAYBACK_VOLUME,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ .tlv = {.p = snd_bcm2835_db_scale}
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Headphone Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = PCM_PLAYBACK_MUTE,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ }
+};
+
+int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip)
+{
+ int err;
+ unsigned int idx;
+
+ strcpy(chip->card->mixername, "Broadcom Mixer");
+ for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_headphones_ctl); idx++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_bcm2835_headphones_ctl[idx],
+ chip));
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static struct snd_kcontrol_new snd_bcm2835_hdmi[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "HDMI Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+ SNDRV_CTL_ELEM_ACCESS_TLV_READ,
+ .private_value = PCM_PLAYBACK_VOLUME,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ .tlv = {.p = snd_bcm2835_db_scale}
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "HDMI Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .private_value = PCM_PLAYBACK_MUTE,
+ .info = snd_bcm2835_ctl_info,
+ .get = snd_bcm2835_ctl_get,
+ .put = snd_bcm2835_ctl_put,
+ .count = 1,
+ }
+};
+
+int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip)
+{
+ int err;
+ unsigned int idx;
+
+ strcpy(chip->card->mixername, "Broadcom Mixer");
+ for (idx = 0; idx < ARRAY_SIZE(snd_bcm2835_hdmi); idx++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_bcm2835_hdmi[idx], chip));
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
new file mode 100644
index 000000000000..e8cf0b97bf02
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -0,0 +1,568 @@
+/*****************************************************************************
+ * Copyright 2011 Broadcom Corporation. All rights reserved.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available at
+ * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *****************************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <sound/asoundef.h>
+
+#include "bcm2835.h"
+
+/* hardware definition */
+static struct snd_pcm_hardware snd_bcm2835_playback_hw = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 1 * 1024,
+ .period_bytes_max = 128 * 1024,
+ .periods_min = 1,
+ .periods_max = 128,
+};
+
+static struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 128 * 1024,
+ .period_bytes_min = 1 * 1024,
+ .period_bytes_max = 128 * 1024,
+ .periods_min = 1,
+ .periods_max = 128,
+};
+
+static void snd_bcm2835_playback_free(struct snd_pcm_runtime *runtime)
+{
+ audio_info("Freeing up alsa stream here ..\n");
+ kfree(runtime->private_data);
+ runtime->private_data = NULL;
+}
+
+void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream)
+{
+ unsigned int consumed = 0;
+ int new_period = 0;
+
+
+ audio_info("alsa_stream=%p substream=%p\n", alsa_stream,
+ alsa_stream ? alsa_stream->substream : 0);
+
+ if (alsa_stream->open)
+ consumed = bcm2835_audio_retrieve_buffers(alsa_stream);
+
+ /* We get called only if playback was triggered, So, the number of buffers we retrieve in
+ * each iteration are the buffers that have been played out already
+ */
+
+ if (alsa_stream->period_size) {
+ if ((alsa_stream->pos / alsa_stream->period_size) !=
+ ((alsa_stream->pos + consumed) / alsa_stream->period_size))
+ new_period = 1;
+ }
+ audio_debug("updating pos cur: %d + %d max:%d period_bytes:%d, hw_ptr: %d new_period:%d\n",
+ alsa_stream->pos,
+ consumed,
+ alsa_stream->buffer_size,
+ (int) (alsa_stream->period_size * alsa_stream->substream->runtime->periods),
+ frames_to_bytes(alsa_stream->substream->runtime, alsa_stream->substream->runtime->status->hw_ptr),
+ new_period);
+ if (alsa_stream->buffer_size) {
+ alsa_stream->pos += consumed & ~(1 << 30);
+ alsa_stream->pos %= alsa_stream->buffer_size;
+ }
+
+ if (alsa_stream->substream) {
+ if (new_period)
+ snd_pcm_period_elapsed(alsa_stream->substream);
+ } else {
+ audio_warning(" unexpected NULL substream\n");
+ }
+}
+
+/* open callback */
+static int snd_bcm2835_playback_open_generic(
+ struct snd_pcm_substream *substream, int spdif)
+{
+ struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream;
+ int idx;
+ int err;
+
+
+ if (mutex_lock_interruptible(&chip->audio_mutex)) {
+ audio_error("Interrupted whilst waiting for lock\n");
+ return -EINTR;
+ }
+ audio_info("Alsa open (%d)\n", substream->number);
+ idx = substream->number;
+
+ if (spdif && chip->opened) {
+ err = -EBUSY;
+ goto out;
+ } else if (!spdif && (chip->opened & (1 << idx))) {
+ err = -EBUSY;
+ goto out;
+ }
+ if (idx >= MAX_SUBSTREAMS) {
+ audio_error
+ ("substream(%d) device doesn't exist max(%d) substreams allowed\n",
+ idx, MAX_SUBSTREAMS);
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Check if we are ready */
+ if (!(chip->avail_substreams & (1 << idx))) {
+ /* We are not ready yet */
+ audio_error("substream(%d) device is not ready yet\n", idx);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ alsa_stream = kzalloc(sizeof(*alsa_stream), GFP_KERNEL);
+ if (!alsa_stream) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Initialise alsa_stream */
+ alsa_stream->chip = chip;
+ alsa_stream->substream = substream;
+ alsa_stream->idx = idx;
+
+ spin_lock_init(&alsa_stream->lock);
+
+ err = bcm2835_audio_open(alsa_stream);
+ if (err) {
+ kfree(alsa_stream);
+ goto out;
+ }
+ runtime->private_data = alsa_stream;
+ runtime->private_free = snd_bcm2835_playback_free;
+ if (spdif) {
+ runtime->hw = snd_bcm2835_playback_spdif_hw;
+ } else {
+ /* clear spdif status, as we are not in spdif mode */
+ chip->spdif_status = 0;
+ runtime->hw = snd_bcm2835_playback_hw;
+ }
+ /* minimum 16 bytes alignment (for vchiq bulk transfers) */
+ snd_pcm_hw_constraint_step(runtime,
+ 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 16);
+
+ chip->alsa_stream[idx] = alsa_stream;
+
+ chip->opened |= (1 << idx);
+ alsa_stream->open = 1;
+ alsa_stream->draining = 1;
+
+out:
+ mutex_unlock(&chip->audio_mutex);
+
+
+ return err;
+}
+
+static int snd_bcm2835_playback_open(struct snd_pcm_substream *substream)
+{
+ return snd_bcm2835_playback_open_generic(substream, 0);
+}
+
+static int snd_bcm2835_playback_spdif_open(struct snd_pcm_substream *substream)
+{
+ return snd_bcm2835_playback_open_generic(substream, 1);
+}
+
+/* close callback */
+static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
+{
+ /* the hardware-specific codes will be here */
+
+ struct bcm2835_chip *chip;
+ struct snd_pcm_runtime *runtime;
+ struct bcm2835_alsa_stream *alsa_stream;
+
+
+ chip = snd_pcm_substream_chip(substream);
+ if (mutex_lock_interruptible(&chip->audio_mutex)) {
+ audio_error("Interrupted whilst waiting for lock\n");
+ return -EINTR;
+ }
+ runtime = substream->runtime;
+ alsa_stream = runtime->private_data;
+
+ audio_info("Alsa close\n");
+
+ /*
+ * Call stop if it's still running. This happens when app
+ * is force killed and we don't get a stop trigger.
+ */
+ if (alsa_stream->running) {
+ int err;
+ err = bcm2835_audio_stop(alsa_stream);
+ alsa_stream->running = 0;
+ if (err)
+ audio_error(" Failed to STOP alsa device\n");
+ }
+
+ alsa_stream->period_size = 0;
+ alsa_stream->buffer_size = 0;
+
+ if (alsa_stream->open) {
+ alsa_stream->open = 0;
+ bcm2835_audio_close(alsa_stream);
+ }
+ if (alsa_stream->chip)
+ alsa_stream->chip->alsa_stream[alsa_stream->idx] = NULL;
+ /*
+ * Do not free up alsa_stream here, it will be freed up by
+ * runtime->private_free callback we registered in *_open above
+ */
+
+ chip->opened &= ~(1 << substream->number);
+
+ mutex_unlock(&chip->audio_mutex);
+
+ return 0;
+}
+
+/* hw_params callback */
+static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+ int err;
+
+
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (err < 0) {
+ audio_error
+ (" pcm_lib_malloc failed to allocated pages for buffers\n");
+ return err;
+ }
+
+ alsa_stream->channels = params_channels(params);
+ alsa_stream->params_rate = params_rate(params);
+ alsa_stream->pcm_format_width = snd_pcm_format_width(params_format(params));
+
+ return err;
+}
+
+/* hw_free callback */
+static int snd_bcm2835_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+/* prepare callback */
+static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct bcm2835_chip *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+ int channels;
+ int err;
+
+
+ if (mutex_lock_interruptible(&chip->audio_mutex))
+ return -EINTR;
+
+ /* notify the vchiq that it should enter spdif passthrough mode by
+ * setting channels=0 (see
+ * https://github.com/raspberrypi/linux/issues/528) */
+ if (chip->spdif_status & IEC958_AES0_NONAUDIO)
+ channels = 0;
+ else
+ channels = alsa_stream->channels;
+
+ err = bcm2835_audio_set_params(alsa_stream, channels,
+ alsa_stream->params_rate,
+ alsa_stream->pcm_format_width);
+ if (err < 0)
+ audio_error(" error setting hw params\n");
+
+
+ bcm2835_audio_setup(alsa_stream);
+
+ /* in preparation of the stream, set the controls (volume level) of the stream */
+ bcm2835_audio_set_ctls(alsa_stream->chip);
+
+
+ memset(&alsa_stream->pcm_indirect, 0, sizeof(alsa_stream->pcm_indirect));
+
+ alsa_stream->pcm_indirect.hw_buffer_size =
+ alsa_stream->pcm_indirect.sw_buffer_size =
+ snd_pcm_lib_buffer_bytes(substream);
+
+ alsa_stream->buffer_size = snd_pcm_lib_buffer_bytes(substream);
+ alsa_stream->period_size = snd_pcm_lib_period_bytes(substream);
+ alsa_stream->pos = 0;
+
+ audio_debug("buffer_size=%d, period_size=%d pos=%d frame_bits=%d\n",
+ alsa_stream->buffer_size, alsa_stream->period_size,
+ alsa_stream->pos, runtime->frame_bits);
+
+ mutex_unlock(&chip->audio_mutex);
+ return 0;
+}
+
+static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
+ struct snd_pcm_indirect *rec, size_t bytes)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+ void *src = (void *) (substream->runtime->dma_area + rec->sw_data);
+ int err;
+
+ err = bcm2835_audio_write(alsa_stream, bytes, src);
+ if (err)
+ audio_error(" Failed to transfer to alsa device (%d)\n", err);
+
+}
+
+static int snd_bcm2835_pcm_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+ struct snd_pcm_indirect *pcm_indirect = &alsa_stream->pcm_indirect;
+
+ pcm_indirect->hw_queue_size = runtime->hw.buffer_bytes_max;
+ snd_pcm_indirect_playback_transfer(substream, pcm_indirect,
+ snd_bcm2835_pcm_transfer);
+ return 0;
+}
+
+/* trigger callback */
+static int snd_bcm2835_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+ int err = 0;
+
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ audio_debug("bcm2835_AUDIO_TRIGGER_START running=%d\n",
+ alsa_stream->running);
+ if (!alsa_stream->running) {
+ err = bcm2835_audio_start(alsa_stream);
+ if (!err) {
+ alsa_stream->pcm_indirect.hw_io =
+ alsa_stream->pcm_indirect.hw_data =
+ bytes_to_frames(runtime,
+ alsa_stream->pos);
+ substream->ops->ack(substream);
+ alsa_stream->running = 1;
+ alsa_stream->draining = 1;
+ } else {
+ audio_error(" Failed to START alsa device (%d)\n", err);
+ }
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ audio_debug
+ ("bcm2835_AUDIO_TRIGGER_STOP running=%d draining=%d\n",
+ alsa_stream->running, runtime->status->state == SNDRV_PCM_STATE_DRAINING);
+ if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
+ audio_info("DRAINING\n");
+ alsa_stream->draining = 1;
+ } else {
+ audio_info("DROPPING\n");
+ alsa_stream->draining = 0;
+ }
+ if (alsa_stream->running) {
+ err = bcm2835_audio_stop(alsa_stream);
+ if (err != 0)
+ audio_error(" Failed to STOP alsa device (%d)\n", err);
+ alsa_stream->running = 0;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/* pointer callback */
+static snd_pcm_uframes_t
+snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
+
+
+ audio_debug("pcm_pointer... (%d) hwptr=%d appl=%d pos=%d\n", 0,
+ frames_to_bytes(runtime, runtime->status->hw_ptr),
+ frames_to_bytes(runtime, runtime->control->appl_ptr),
+ alsa_stream->pos);
+
+ return snd_pcm_indirect_playback_pointer(substream,
+ &alsa_stream->pcm_indirect,
+ alsa_stream->pos);
+}
+
+static int snd_bcm2835_pcm_lib_ioctl(struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ int ret = snd_pcm_lib_ioctl(substream, cmd, arg);
+
+ audio_info(" .. substream=%p, cmd=%d, arg=%p (%x) ret=%d\n", substream,
+ cmd, arg, arg ? *(unsigned *) arg : 0, ret);
+ return ret;
+}
+
+/* operators */
+static struct snd_pcm_ops snd_bcm2835_playback_ops = {
+ .open = snd_bcm2835_playback_open,
+ .close = snd_bcm2835_playback_close,
+ .ioctl = snd_bcm2835_pcm_lib_ioctl,
+ .hw_params = snd_bcm2835_pcm_hw_params,
+ .hw_free = snd_bcm2835_pcm_hw_free,
+ .prepare = snd_bcm2835_pcm_prepare,
+ .trigger = snd_bcm2835_pcm_trigger,
+ .pointer = snd_bcm2835_pcm_pointer,
+ .ack = snd_bcm2835_pcm_ack,
+};
+
+static struct snd_pcm_ops snd_bcm2835_playback_spdif_ops = {
+ .open = snd_bcm2835_playback_spdif_open,
+ .close = snd_bcm2835_playback_close,
+ .ioctl = snd_bcm2835_pcm_lib_ioctl,
+ .hw_params = snd_bcm2835_pcm_hw_params,
+ .hw_free = snd_bcm2835_pcm_hw_free,
+ .prepare = snd_bcm2835_pcm_prepare,
+ .trigger = snd_bcm2835_pcm_trigger,
+ .pointer = snd_bcm2835_pcm_pointer,
+ .ack = snd_bcm2835_pcm_ack,
+};
+
+/* create a pcm device */
+int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, u32 numchannels)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ mutex_init(&chip->audio_mutex);
+ if (mutex_lock_interruptible(&chip->audio_mutex)) {
+ audio_error("Interrupted whilst waiting for lock\n");
+ return -EINTR;
+ }
+ err = snd_pcm_new(chip->card, "bcm2835 ALSA", 0, numchannels, 0, &pcm);
+ if (err < 0)
+ goto out;
+ pcm->private_data = chip;
+ strcpy(pcm->name, "bcm2835 ALSA");
+ chip->pcm = pcm;
+ chip->dest = AUDIO_DEST_AUTO;
+ chip->volume = alsa2chip(0);
+ chip->mute = CTRL_VOL_UNMUTE; /*disable mute on startup */
+ /* set operators */
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_bcm2835_playback_ops);
+
+ /* pre-allocation of buffers */
+ /* NOTE: this may fail */
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ snd_bcm2835_playback_hw.buffer_bytes_max,
+ snd_bcm2835_playback_hw.buffer_bytes_max);
+
+
+out:
+ mutex_unlock(&chip->audio_mutex);
+
+ return 0;
+}
+
+int snd_bcm2835_new_spdif_pcm(struct bcm2835_chip *chip)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ if (mutex_lock_interruptible(&chip->audio_mutex)) {
+ audio_error("Interrupted whilst waiting for lock\n");
+ return -EINTR;
+ }
+ err = snd_pcm_new(chip->card, "bcm2835 ALSA", 1, 1, 0, &pcm);
+ if (err < 0)
+ goto out;
+
+ pcm->private_data = chip;
+ strcpy(pcm->name, "bcm2835 IEC958/HDMI");
+ chip->pcm_spdif = pcm;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_bcm2835_playback_spdif_ops);
+
+ /* pre-allocation of buffers */
+ /* NOTE: this may fail */
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ snd_bcm2835_playback_spdif_hw.buffer_bytes_max, snd_bcm2835_playback_spdif_hw.buffer_bytes_max);
+out:
+ mutex_unlock(&chip->audio_mutex);
+
+ return 0;
+}
+
+int snd_bcm2835_new_simple_pcm(struct bcm2835_chip *chip,
+ const char *name,
+ enum snd_bcm2835_route route,
+ u32 numchannels)
+{
+ struct snd_pcm *pcm;
+ int err;
+
+ mutex_init(&chip->audio_mutex);
+
+ err = snd_pcm_new(chip->card, name, 0, numchannels,
+ 0, &pcm);
+ if (err)
+ return err;
+
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ chip->pcm = pcm;
+ chip->dest = route;
+ chip->volume = alsa2chip(0);
+ chip->mute = CTRL_VOL_UNMUTE;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
+ &snd_bcm2835_playback_ops);
+
+ snd_pcm_lib_preallocate_pages_for_all(
+ pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL),
+ snd_bcm2835_playback_hw.buffer_bytes_max,
+ snd_bcm2835_playback_hw.buffer_bytes_max);
+
+ return 0;
+}
+
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
new file mode 100644
index 000000000000..5f3d8f2339e3
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -0,0 +1,875 @@
+/*****************************************************************************
+ * Copyright 2011 Broadcom Corporation. All rights reserved.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available at
+ * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *****************************************************************************/
+
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+
+#include "bcm2835.h"
+
+/* ---- Include Files -------------------------------------------------------- */
+
+#include "interface/vchi/vchi.h"
+#include "vc_vchi_audioserv_defs.h"
+
+/* ---- Private Constants and Types ------------------------------------------ */
+
+#define BCM2835_AUDIO_STOP 0
+#define BCM2835_AUDIO_START 1
+#define BCM2835_AUDIO_WRITE 2
+
+/* Logging macros (for remapping to other logging mechanisms, i.e., printf) */
+#ifdef AUDIO_DEBUG_ENABLE
+#define LOG_ERR(fmt, arg...) pr_err("%s:%d " fmt, __func__, __LINE__, ##arg)
+#define LOG_WARN(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
+#define LOG_INFO(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
+#define LOG_DBG(fmt, arg...) pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
+#else
+#define LOG_ERR(fmt, arg...) pr_err("%s:%d " fmt, __func__, __LINE__, ##arg)
+#define LOG_WARN(fmt, arg...) no_printk(fmt, ##arg)
+#define LOG_INFO(fmt, arg...) no_printk(fmt, ##arg)
+#define LOG_DBG(fmt, arg...) no_printk(fmt, ##arg)
+#endif
+
+struct bcm2835_audio_instance {
+ unsigned int num_connections;
+ VCHI_SERVICE_HANDLE_T vchi_handle[VCHI_MAX_NUM_CONNECTIONS];
+ struct completion msg_avail_comp;
+ struct mutex vchi_mutex;
+ struct bcm2835_alsa_stream *alsa_stream;
+ int result;
+ short peer_version;
+};
+
+static bool force_bulk;
+
+/* ---- Private Variables ---------------------------------------------------- */
+
+/* ---- Private Function Prototypes ------------------------------------------ */
+
+/* ---- Private Functions ---------------------------------------------------- */
+
+static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream);
+static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream);
+static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int count, void *src);
+
+// Routine to send a message across a service
+
+static int
+bcm2835_vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+ void *data,
+ unsigned int size)
+{
+ return vchi_queue_kernel_message(handle,
+ data,
+ size);
+}
+
+static const u32 BCM2835_AUDIO_WRITE_COOKIE1 = ('B' << 24 | 'C' << 16 |
+ 'M' << 8 | 'A');
+static const u32 BCM2835_AUDIO_WRITE_COOKIE2 = ('D' << 24 | 'A' << 16 |
+ 'T' << 8 | 'A');
+
+struct bcm2835_audio_work {
+ struct work_struct my_work;
+ struct bcm2835_alsa_stream *alsa_stream;
+ int cmd;
+ void *src;
+ unsigned int count;
+};
+
+static void my_wq_function(struct work_struct *work)
+{
+ struct bcm2835_audio_work *w =
+ container_of(work, struct bcm2835_audio_work, my_work);
+ int ret = -9;
+
+ switch (w->cmd) {
+ case BCM2835_AUDIO_START:
+ ret = bcm2835_audio_start_worker(w->alsa_stream);
+ break;
+ case BCM2835_AUDIO_STOP:
+ ret = bcm2835_audio_stop_worker(w->alsa_stream);
+ break;
+ case BCM2835_AUDIO_WRITE:
+ ret = bcm2835_audio_write_worker(w->alsa_stream, w->count,
+ w->src);
+ break;
+ default:
+ LOG_ERR(" Unexpected work: %p:%d\n", w->alsa_stream, w->cmd);
+ break;
+ }
+ kfree((void *)work);
+}
+
+int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream)
+{
+ if (alsa_stream->my_wq) {
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_START;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
+{
+ if (alsa_stream->my_wq) {
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_STOP;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int count, void *src)
+{
+ if (alsa_stream->my_wq) {
+ struct bcm2835_audio_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ /*--- Queue some work (item 1) ---*/
+ if (!work) {
+ LOG_ERR(" .. Error: NULL work kmalloc\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&work->my_work, my_wq_function);
+ work->alsa_stream = alsa_stream;
+ work->cmd = BCM2835_AUDIO_WRITE;
+ work->src = src;
+ work->count = count;
+ if (!queue_work(alsa_stream->my_wq, &work->my_work)) {
+ kfree(work);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+static void my_workqueue_init(struct bcm2835_alsa_stream *alsa_stream)
+{
+ alsa_stream->my_wq = alloc_workqueue("my_queue", WQ_HIGHPRI, 1);
+}
+
+static void my_workqueue_quit(struct bcm2835_alsa_stream *alsa_stream)
+{
+ if (alsa_stream->my_wq) {
+ flush_workqueue(alsa_stream->my_wq);
+ destroy_workqueue(alsa_stream->my_wq);
+ alsa_stream->my_wq = NULL;
+ }
+}
+
+static void audio_vchi_callback(void *param,
+ const VCHI_CALLBACK_REASON_T reason,
+ void *msg_handle)
+{
+ struct bcm2835_audio_instance *instance = param;
+ int status;
+ int msg_len;
+ struct vc_audio_msg m;
+
+ if (reason != VCHI_CALLBACK_MSG_AVAILABLE)
+ return;
+
+ if (!instance) {
+ LOG_ERR(" .. instance is null\n");
+ BUG();
+ return;
+ }
+ if (!instance->vchi_handle[0]) {
+ LOG_ERR(" .. instance->vchi_handle[0] is null\n");
+ BUG();
+ return;
+ }
+ status = vchi_msg_dequeue(instance->vchi_handle[0],
+ &m, sizeof(m), &msg_len, VCHI_FLAGS_NONE);
+ if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
+ LOG_DBG(" .. instance=%p, m.type=VC_AUDIO_MSG_TYPE_RESULT, success=%d\n",
+ instance, m.u.result.success);
+ instance->result = m.u.result.success;
+ complete(&instance->msg_avail_comp);
+ } else if (m.type == VC_AUDIO_MSG_TYPE_COMPLETE) {
+ struct bcm2835_alsa_stream *alsa_stream = instance->alsa_stream;
+
+ LOG_DBG(" .. instance=%p, m.type=VC_AUDIO_MSG_TYPE_COMPLETE, complete=%d\n",
+ instance, m.u.complete.count);
+ if (m.u.complete.cookie1 != BCM2835_AUDIO_WRITE_COOKIE1 ||
+ m.u.complete.cookie2 != BCM2835_AUDIO_WRITE_COOKIE2)
+ LOG_ERR(" .. response is corrupt\n");
+ else if (alsa_stream) {
+ atomic_add(m.u.complete.count,
+ &alsa_stream->retrieved);
+ bcm2835_playback_fifo(alsa_stream);
+ } else {
+ LOG_ERR(" .. unexpected alsa_stream=%p\n",
+ alsa_stream);
+ }
+ } else {
+ LOG_ERR(" .. unexpected m.type=%d\n", m.type);
+ }
+}
+
+static struct bcm2835_audio_instance *
+vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
+ VCHI_CONNECTION_T **vchi_connections,
+ unsigned int num_connections)
+{
+ unsigned int i;
+ struct bcm2835_audio_instance *instance;
+ int status;
+ int ret;
+
+ LOG_DBG("%s: start", __func__);
+
+ if (num_connections > VCHI_MAX_NUM_CONNECTIONS) {
+ LOG_ERR("%s: unsupported number of connections %u (max=%u)\n",
+ __func__, num_connections, VCHI_MAX_NUM_CONNECTIONS);
+
+ return ERR_PTR(-EINVAL);
+ }
+ /* Allocate memory for this instance */
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return ERR_PTR(-ENOMEM);
+
+ instance->num_connections = num_connections;
+
+ /* Create a lock for exclusive, serialized VCHI connection access */
+ mutex_init(&instance->vchi_mutex);
+ /* Open the VCHI service connections */
+ for (i = 0; i < num_connections; i++) {
+ SERVICE_CREATION_T params = {
+ .version = VCHI_VERSION_EX(VC_AUDIOSERV_VER, VC_AUDIOSERV_MIN_VER),
+ .service_id = VC_AUDIO_SERVER_NAME,
+ .connection = vchi_connections[i],
+ .rx_fifo_size = 0,
+ .tx_fifo_size = 0,
+ .callback = audio_vchi_callback,
+ .callback_param = instance,
+ .want_unaligned_bulk_rx = 1, //TODO: remove VCOS_FALSE
+ .want_unaligned_bulk_tx = 1, //TODO: remove VCOS_FALSE
+ .want_crc = 0
+ };
+
+ LOG_DBG("%s: about to open %i\n", __func__, i);
+ status = vchi_service_open(vchi_instance, &params,
+ &instance->vchi_handle[i]);
+
+ LOG_DBG("%s: opened %i: %p=%d\n", __func__, i, instance->vchi_handle[i], status);
+ if (status) {
+ LOG_ERR("%s: failed to open VCHI service connection (status=%d)\n",
+ __func__, status);
+ ret = -EPERM;
+ goto err_close_services;
+ }
+ /* Finished with the service for now */
+ vchi_service_release(instance->vchi_handle[i]);
+ }
+
+ LOG_DBG("%s: okay\n", __func__);
+ return instance;
+
+err_close_services:
+ for (i = 0; i < instance->num_connections; i++) {
+ LOG_ERR("%s: closing %i: %p\n", __func__, i, instance->vchi_handle[i]);
+ if (instance->vchi_handle[i])
+ vchi_service_close(instance->vchi_handle[i]);
+ }
+
+ kfree(instance);
+ LOG_ERR("%s: error\n", __func__);
+
+ return ERR_PTR(ret);
+}
+
+static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
+{
+ unsigned int i;
+
+
+ if (!instance) {
+ LOG_ERR("%s: invalid handle %p\n", __func__, instance);
+
+ return -1;
+ }
+
+ LOG_DBG(" .. about to lock (%d)\n", instance->num_connections);
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+
+ /* Close all VCHI service connections */
+ for (i = 0; i < instance->num_connections; i++) {
+ int status;
+
+ LOG_DBG(" .. %i:closing %p\n", i, instance->vchi_handle[i]);
+ vchi_service_use(instance->vchi_handle[i]);
+
+ status = vchi_service_close(instance->vchi_handle[i]);
+ if (status) {
+ LOG_DBG("%s: failed to close VCHI service connection (status=%d)\n",
+ __func__, status);
+ }
+ }
+
+ mutex_unlock(&instance->vchi_mutex);
+
+ kfree(instance);
+
+
+ return 0;
+}
+
+static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream)
+{
+ static VCHI_INSTANCE_T vchi_instance;
+ static VCHI_CONNECTION_T *vchi_connection;
+ static int initted;
+ struct bcm2835_audio_instance *instance =
+ (struct bcm2835_audio_instance *)alsa_stream->instance;
+ int ret;
+
+
+ LOG_INFO("%s: start\n", __func__);
+ BUG_ON(instance);
+ if (instance) {
+ LOG_ERR("%s: VCHI instance already open (%p)\n",
+ __func__, instance);
+ instance->alsa_stream = alsa_stream;
+ alsa_stream->instance = instance;
+ ret = 0; // xxx todo -1;
+ goto err_free_mem;
+ }
+
+ /* Initialize and create a VCHI connection */
+ if (!initted) {
+ ret = vchi_initialise(&vchi_instance);
+ if (ret) {
+ LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
+ __func__, ret);
+
+ ret = -EIO;
+ goto err_free_mem;
+ }
+ ret = vchi_connect(NULL, 0, vchi_instance);
+ if (ret) {
+ LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
+ __func__, ret);
+
+ ret = -EIO;
+ goto err_free_mem;
+ }
+ initted = 1;
+ }
+
+ /* Initialize an instance of the audio service */
+ instance = vc_vchi_audio_init(vchi_instance, &vchi_connection, 1);
+
+ if (IS_ERR(instance)) {
+ LOG_ERR("%s: failed to initialize audio service\n", __func__);
+
+ ret = PTR_ERR(instance);
+ goto err_free_mem;
+ }
+
+ instance->alsa_stream = alsa_stream;
+ alsa_stream->instance = instance;
+
+ LOG_DBG(" success !\n");
+ ret = 0;
+err_free_mem:
+ kfree(vchi_instance);
+
+ return ret;
+}
+
+int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
+{
+ struct bcm2835_audio_instance *instance;
+ struct vc_audio_msg m;
+ int status;
+ int ret;
+
+
+ my_workqueue_init(alsa_stream);
+
+ ret = bcm2835_audio_open_connection(alsa_stream);
+ if (ret) {
+ ret = -1;
+ goto exit;
+ }
+ instance = alsa_stream->instance;
+ LOG_DBG(" instance (%p)\n", instance);
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ m.type = VC_AUDIO_MSG_TYPE_OPEN;
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+exit:
+ return ret;
+}
+
+static int bcm2835_audio_set_ctls_chan(struct bcm2835_alsa_stream *alsa_stream,
+ struct bcm2835_chip *chip)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ LOG_INFO(" Setting ALSA dest(%d), volume(%d)\n",
+ chip->dest, chip->volume);
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ instance->result = -1;
+
+ m.type = VC_AUDIO_MSG_TYPE_CONTROL;
+ m.u.control.dest = chip->dest;
+ m.u.control.volume = chip->volume;
+
+ /* Create the message available completion */
+ init_completion(&instance->msg_avail_comp);
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ /* We are expecting a reply from the videocore */
+ wait_for_completion(&instance->msg_avail_comp);
+
+ if (instance->result) {
+ LOG_ERR("%s: result=%d\n", __func__, instance->result);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+
+ return ret;
+}
+
+int bcm2835_audio_set_ctls(struct bcm2835_chip *chip)
+{
+ int i;
+ int ret = 0;
+
+ LOG_DBG(" Setting ALSA dest(%d), volume(%d)\n", chip->dest, chip->volume);
+
+ /* change ctls for all substreams */
+ for (i = 0; i < MAX_SUBSTREAMS; i++) {
+ if (chip->avail_substreams & (1 << i)) {
+ if (!chip->alsa_stream[i]) {
+ LOG_DBG(" No ALSA stream available?! %i:%p (%x)\n", i, chip->alsa_stream[i], chip->avail_substreams);
+ ret = 0;
+ } else if (bcm2835_audio_set_ctls_chan(chip->alsa_stream[i], chip) != 0) {
+ LOG_ERR("Couldn't set the controls for stream %d\n", i);
+ ret = -1;
+ } else {
+ LOG_DBG(" Controls set for stream %d\n", i);
+ }
+ }
+ }
+ return ret;
+}
+
+int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int channels, unsigned int samplerate,
+ unsigned int bps)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ LOG_INFO(" Setting ALSA channels(%d), samplerate(%d), bits-per-sample(%d)\n",
+ channels, samplerate, bps);
+
+ /* resend ctls - alsa_stream may not have been open when first send */
+ ret = bcm2835_audio_set_ctls_chan(alsa_stream, alsa_stream->chip);
+ if (ret) {
+ LOG_ERR(" Alsa controls not supported\n");
+ return -EINVAL;
+ }
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n", instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ instance->result = -1;
+
+ m.type = VC_AUDIO_MSG_TYPE_CONFIG;
+ m.u.config.channels = channels;
+ m.u.config.samplerate = samplerate;
+ m.u.config.bps = bps;
+
+ /* Create the message available completion */
+ init_completion(&instance->msg_avail_comp);
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ /* We are expecting a reply from the videocore */
+ wait_for_completion(&instance->msg_avail_comp);
+
+ if (instance->result) {
+ LOG_ERR("%s: result=%d", __func__, instance->result);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+
+ return ret;
+}
+
+int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream)
+{
+
+
+ return 0;
+}
+
+static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ m.type = VC_AUDIO_MSG_TYPE_START;
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+ return ret;
+}
+
+static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ m.type = VC_AUDIO_MSG_TYPE_STOP;
+ m.u.stop.draining = alsa_stream->draining;
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+ return ret;
+}
+
+int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ my_workqueue_quit(alsa_stream);
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ m.type = VC_AUDIO_MSG_TYPE_CLOSE;
+
+ /* Create the message available completion */
+ init_completion(&instance->msg_avail_comp);
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+ ret = -1;
+ goto unlock;
+ }
+
+ /* We are expecting a reply from the videocore */
+ wait_for_completion(&instance->msg_avail_comp);
+
+ if (instance->result) {
+ LOG_ERR("%s: failed result (result=%d)\n",
+ __func__, instance->result);
+
+ ret = -1;
+ goto unlock;
+ }
+
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+
+ /* Stop the audio service */
+ vc_vchi_audio_deinit(instance);
+ alsa_stream->instance = NULL;
+
+ return ret;
+}
+
+static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int count, void *src)
+{
+ struct vc_audio_msg m;
+ struct bcm2835_audio_instance *instance = alsa_stream->instance;
+ int status;
+ int ret;
+
+
+ LOG_INFO(" Writing %d bytes from %p\n", count, src);
+
+ if (mutex_lock_interruptible(&instance->vchi_mutex)) {
+ LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
+ instance->num_connections);
+ return -EINTR;
+ }
+ vchi_service_use(instance->vchi_handle[0]);
+
+ if (instance->peer_version == 0 &&
+ vchi_get_peer_version(instance->vchi_handle[0], &instance->peer_version) == 0)
+ LOG_DBG("%s: client version %d connected\n", __func__, instance->peer_version);
+
+ m.type = VC_AUDIO_MSG_TYPE_WRITE;
+ m.u.write.count = count;
+ // old version uses bulk, new version uses control
+ m.u.write.max_packet = instance->peer_version < 2 || force_bulk ? 0 : 4000;
+ m.u.write.cookie1 = BCM2835_AUDIO_WRITE_COOKIE1;
+ m.u.write.cookie2 = BCM2835_AUDIO_WRITE_COOKIE2;
+ m.u.write.silence = src == NULL;
+
+ /* Send the message to the videocore */
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ &m, sizeof(m));
+
+ if (status) {
+ LOG_ERR("%s: failed on vchi_msg_queue (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+ if (!m.u.write.silence) {
+ if (!m.u.write.max_packet) {
+ /* Send the message to the videocore */
+ status = vchi_bulk_queue_transmit(instance->vchi_handle[0],
+ src, count,
+ 0 * VCHI_FLAGS_BLOCK_UNTIL_QUEUED
+ +
+ 1 * VCHI_FLAGS_BLOCK_UNTIL_DATA_READ,
+ NULL);
+ } else {
+ while (count > 0) {
+ int bytes = min_t(int, m.u.write.max_packet, count);
+
+ status = bcm2835_vchi_msg_queue(instance->vchi_handle[0],
+ src, bytes);
+ src = (char *)src + bytes;
+ count -= bytes;
+ }
+ }
+ if (status) {
+ LOG_ERR("%s: failed on vchi_bulk_queue_transmit (status=%d)\n",
+ __func__, status);
+
+ ret = -1;
+ goto unlock;
+ }
+ }
+ ret = 0;
+
+unlock:
+ vchi_service_release(instance->vchi_handle[0]);
+ mutex_unlock(&instance->vchi_mutex);
+ return ret;
+}
+
+/**
+ * Returns all buffers from arm->vc
+ */
+void bcm2835_audio_flush_buffers(struct bcm2835_alsa_stream *alsa_stream)
+{
+}
+
+/**
+ * Forces VC to flush(drop) its filled playback buffers and
+ * return them the us. (VC->ARM)
+ */
+void bcm2835_audio_flush_playback_buffers(struct bcm2835_alsa_stream *alsa_stream)
+{
+}
+
+unsigned int bcm2835_audio_retrieve_buffers(struct bcm2835_alsa_stream *alsa_stream)
+{
+ unsigned int count = atomic_read(&alsa_stream->retrieved);
+
+ atomic_sub(count, &alsa_stream->retrieved);
+ return count;
+}
+
+module_param(force_bulk, bool, 0444);
+MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio");
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
new file mode 100644
index 000000000000..8f2d508183b2
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -0,0 +1,472 @@
+/*****************************************************************************
+ * Copyright 2011 Broadcom Corporation. All rights reserved.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available at
+ * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *****************************************************************************/
+
+#include <linux/platform_device.h>
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "bcm2835.h"
+
+static bool enable_hdmi;
+static bool enable_headphones;
+static bool enable_compat_alsa = true;
+
+module_param(enable_hdmi, bool, 0444);
+MODULE_PARM_DESC(enable_hdmi, "Enables HDMI virtual audio device");
+module_param(enable_headphones, bool, 0444);
+MODULE_PARM_DESC(enable_headphones, "Enables Headphones virtual audio device");
+module_param(enable_compat_alsa, bool, 0444);
+MODULE_PARM_DESC(enable_compat_alsa,
+ "Enables ALSA compatibility virtual audio device");
+
+static void snd_devm_unregister_child(struct device *dev, void *res)
+{
+ struct device *childdev = *(struct device **)res;
+
+ device_unregister(childdev);
+}
+
+static int snd_devm_add_child(struct device *dev, struct device *child)
+{
+ struct device **dr;
+ int ret;
+
+ dr = devres_alloc(snd_devm_unregister_child, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = device_add(child);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = child;
+ devres_add(dev, dr);
+
+ return 0;
+}
+
+static struct device *
+snd_create_device(struct device *parent,
+ struct device_driver *driver,
+ const char *name)
+{
+ struct device *device;
+ int ret;
+
+ device = devm_kzalloc(parent, sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return ERR_PTR(-ENOMEM);
+
+ device_initialize(device);
+ device->parent = parent;
+ device->driver = driver;
+
+ dev_set_name(device, "%s", name);
+
+ ret = snd_devm_add_child(parent, device);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return device;
+}
+
+static int snd_bcm2835_free(struct bcm2835_chip *chip)
+{
+ kfree(chip);
+ return 0;
+}
+
+/* component-destructor
+ * (see "Management of Cards and Components")
+ */
+static int snd_bcm2835_dev_free(struct snd_device *device)
+{
+ return snd_bcm2835_free(device->device_data);
+}
+
+/* chip-specific constructor
+ * (see "Management of Cards and Components")
+ */
+static int snd_bcm2835_create(struct snd_card *card,
+ struct bcm2835_chip **rchip)
+{
+ struct bcm2835_chip *chip;
+ int err;
+ static struct snd_device_ops ops = {
+ .dev_free = snd_bcm2835_dev_free,
+ };
+
+ *rchip = NULL;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->card = card;
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err) {
+ snd_bcm2835_free(chip);
+ return err;
+ }
+
+ *rchip = chip;
+ return 0;
+}
+
+static void snd_devm_card_free(struct device *dev, void *res)
+{
+ struct snd_card *snd_card = *(struct snd_card **)res;
+
+ snd_card_free(snd_card);
+}
+
+static struct snd_card *snd_devm_card_new(struct device *dev)
+{
+ struct snd_card **dr;
+ struct snd_card *card;
+ int ret;
+
+ dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card);
+ if (ret) {
+ devres_free(dr);
+ return ERR_PTR(ret);
+ }
+
+ *dr = card;
+ devres_add(dev, dr);
+
+ return card;
+}
+
+typedef int (*bcm2835_audio_newpcm_func)(struct bcm2835_chip *chip,
+ const char *name,
+ enum snd_bcm2835_route route,
+ u32 numchannels);
+
+typedef int (*bcm2835_audio_newctl_func)(struct bcm2835_chip *chip);
+
+struct bcm2835_audio_driver {
+ struct device_driver driver;
+ const char *shortname;
+ const char *longname;
+ int minchannels;
+ bcm2835_audio_newpcm_func newpcm;
+ bcm2835_audio_newctl_func newctl;
+ enum snd_bcm2835_route route;
+};
+
+static int bcm2835_audio_alsa_newpcm(struct bcm2835_chip *chip,
+ const char *name,
+ enum snd_bcm2835_route route,
+ u32 numchannels)
+{
+ int err;
+
+ err = snd_bcm2835_new_pcm(chip, numchannels - 1);
+ if (err)
+ return err;
+
+ err = snd_bcm2835_new_spdif_pcm(chip);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct bcm2835_audio_driver bcm2835_audio_alsa = {
+ .driver = {
+ .name = "bcm2835_alsa",
+ .owner = THIS_MODULE,
+ },
+ .shortname = "bcm2835 ALSA",
+ .longname = "bcm2835 ALSA",
+ .minchannels = 2,
+ .newpcm = bcm2835_audio_alsa_newpcm,
+ .newctl = snd_bcm2835_new_ctl,
+};
+
+static struct bcm2835_audio_driver bcm2835_audio_hdmi = {
+ .driver = {
+ .name = "bcm2835_hdmi",
+ .owner = THIS_MODULE,
+ },
+ .shortname = "bcm2835 HDMI",
+ .longname = "bcm2835 HDMI",
+ .minchannels = 1,
+ .newpcm = snd_bcm2835_new_simple_pcm,
+ .newctl = snd_bcm2835_new_hdmi_ctl,
+ .route = AUDIO_DEST_HDMI
+};
+
+static struct bcm2835_audio_driver bcm2835_audio_headphones = {
+ .driver = {
+ .name = "bcm2835_headphones",
+ .owner = THIS_MODULE,
+ },
+ .shortname = "bcm2835 Headphones",
+ .longname = "bcm2835 Headphones",
+ .minchannels = 1,
+ .newpcm = snd_bcm2835_new_simple_pcm,
+ .newctl = snd_bcm2835_new_headphones_ctl,
+ .route = AUDIO_DEST_HEADPHONES
+};
+
+struct bcm2835_audio_drivers {
+ struct bcm2835_audio_driver *audio_driver;
+ const bool *is_enabled;
+};
+
+static struct bcm2835_audio_drivers children_devices[] = {
+ {
+ .audio_driver = &bcm2835_audio_alsa,
+ .is_enabled = &enable_compat_alsa,
+ },
+ {
+ .audio_driver = &bcm2835_audio_hdmi,
+ .is_enabled = &enable_hdmi,
+ },
+ {
+ .audio_driver = &bcm2835_audio_headphones,
+ .is_enabled = &enable_headphones,
+ },
+};
+
+static int snd_add_child_device(struct device *device,
+ struct bcm2835_audio_driver *audio_driver,
+ u32 numchans)
+{
+ struct snd_card *card;
+ struct device *child;
+ struct bcm2835_chip *chip;
+ int err, i;
+
+ child = snd_create_device(device, &audio_driver->driver,
+ audio_driver->driver.name);
+ if (IS_ERR(child)) {
+ dev_err(device,
+ "Unable to create child device %p, error %ld",
+ audio_driver->driver.name,
+ PTR_ERR(child));
+ return PTR_ERR(child);
+ }
+
+ card = snd_devm_card_new(child);
+ if (IS_ERR(card)) {
+ dev_err(child, "Failed to create card");
+ return PTR_ERR(card);
+ }
+
+ snd_card_set_dev(card, child);
+ strcpy(card->driver, audio_driver->driver.name);
+ strcpy(card->shortname, audio_driver->shortname);
+ strcpy(card->longname, audio_driver->longname);
+
+ err = snd_bcm2835_create(card, &chip);
+ if (err) {
+ dev_err(child, "Failed to create chip, error %d\n", err);
+ return err;
+ }
+
+ chip->dev = child;
+
+ err = audio_driver->newpcm(chip, audio_driver->shortname,
+ audio_driver->route,
+ numchans);
+ if (err) {
+ dev_err(child, "Failed to create pcm, error %d\n", err);
+ return err;
+ }
+
+ err = audio_driver->newctl(chip);
+ if (err) {
+ dev_err(child, "Failed to create controls, error %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < numchans; i++)
+ chip->avail_substreams |= (1 << i);
+
+ err = snd_card_register(card);
+ if (err) {
+ dev_err(child, "Failed to register card, error %d\n", err);
+ return err;
+ }
+
+ dev_set_drvdata(child, card);
+ dev_info(child, "card created with %d channels\n", numchans);
+
+ return 0;
+}
+
+static int snd_add_child_devices(struct device *device, u32 numchans)
+{
+ int i;
+ int count_devices = 0;
+ int minchannels = 0;
+ int extrachannels = 0;
+ int extrachannels_per_driver = 0;
+ int extrachannels_remainder = 0;
+
+ for (i = 0; i < ARRAY_SIZE(children_devices); i++)
+ if (*children_devices[i].is_enabled)
+ count_devices++;
+
+ if (!count_devices)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(children_devices); i++)
+ if (*children_devices[i].is_enabled)
+ minchannels +=
+ children_devices[i].audio_driver->minchannels;
+
+ if (minchannels < numchans) {
+ extrachannels = numchans - minchannels;
+ extrachannels_per_driver = extrachannels / count_devices;
+ extrachannels_remainder = extrachannels % count_devices;
+ }
+
+ dev_dbg(device, "minchannels %d\n", minchannels);
+ dev_dbg(device, "extrachannels %d\n", extrachannels);
+ dev_dbg(device, "extrachannels_per_driver %d\n",
+ extrachannels_per_driver);
+ dev_dbg(device, "extrachannels_remainder %d\n",
+ extrachannels_remainder);
+
+ for (i = 0; i < ARRAY_SIZE(children_devices); i++) {
+ int err;
+ int numchannels_this_device;
+ struct bcm2835_audio_driver *audio_driver;
+
+ if (!*children_devices[i].is_enabled)
+ continue;
+
+ audio_driver = children_devices[i].audio_driver;
+
+ if (audio_driver->minchannels > numchans) {
+ dev_err(device,
+ "Out of channels, needed %d but only %d left\n",
+ audio_driver->minchannels,
+ numchans);
+ continue;
+ }
+
+ numchannels_this_device =
+ audio_driver->minchannels + extrachannels_per_driver +
+ extrachannels_remainder;
+ extrachannels_remainder = 0;
+
+ numchans -= numchannels_this_device;
+
+ err = snd_add_child_device(device, audio_driver,
+ numchannels_this_device);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int snd_bcm2835_alsa_probe_dt(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 numchans;
+ int err;
+
+ err = of_property_read_u32(dev->of_node, "brcm,pwm-channels",
+ &numchans);
+ if (err) {
+ dev_err(dev, "Failed to get DT property 'brcm,pwm-channels'");
+ return err;
+ }
+
+ if (numchans == 0 || numchans > MAX_SUBSTREAMS) {
+ numchans = MAX_SUBSTREAMS;
+ dev_warn(dev,
+ "Illegal 'brcm,pwm-channels' value, will use %u\n",
+ numchans);
+ }
+
+ err = snd_add_child_devices(dev, numchans);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int snd_bcm2835_alsa_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ return 0;
+}
+
+static int snd_bcm2835_alsa_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#endif
+
+static const struct of_device_id snd_bcm2835_of_match_table[] = {
+ { .compatible = "brcm,bcm2835-audio",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, snd_bcm2835_of_match_table);
+
+static struct platform_driver bcm2835_alsa0_driver = {
+ .probe = snd_bcm2835_alsa_probe_dt,
+#ifdef CONFIG_PM
+ .suspend = snd_bcm2835_alsa_suspend,
+ .resume = snd_bcm2835_alsa_resume,
+#endif
+ .driver = {
+ .name = "bcm2835_audio",
+ .owner = THIS_MODULE,
+ .of_match_table = snd_bcm2835_of_match_table,
+ },
+};
+
+static int bcm2835_alsa_device_init(void)
+{
+ int retval;
+
+ retval = platform_driver_register(&bcm2835_alsa0_driver);
+ if (retval)
+ pr_err("Error registering bcm2835_audio driver %d .\n", retval);
+
+ return retval;
+}
+
+static void bcm2835_alsa_device_exit(void)
+{
+ platform_driver_unregister(&bcm2835_alsa0_driver);
+}
+
+late_initcall(bcm2835_alsa_device_init);
+module_exit(bcm2835_alsa_device_exit);
+
+MODULE_AUTHOR("Dom Cobley");
+MODULE_DESCRIPTION("Alsa driver for BCM2835 chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
new file mode 100644
index 000000000000..379604d3554e
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -0,0 +1,175 @@
+/*****************************************************************************
+ * Copyright 2011 Broadcom Corporation. All rights reserved.
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available at
+ * http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *****************************************************************************/
+
+#ifndef __SOUND_ARM_BCM2835_H
+#define __SOUND_ARM_BCM2835_H
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/pcm-indirect.h>
+#include <linux/workqueue.h>
+
+/*
+ * #define AUDIO_DEBUG_ENABLE
+ * #define AUDIO_VERBOSE_DEBUG_ENABLE
+ */
+
+/* Debug macros */
+
+#ifdef AUDIO_DEBUG_ENABLE
+#ifdef AUDIO_VERBOSE_DEBUG_ENABLE
+
+#define audio_debug(fmt, arg...) \
+ pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
+
+#define audio_info(fmt, arg...) \
+ pr_info("%s:%d " fmt, __func__, __LINE__, ##arg)
+
+#else
+
+#define audio_debug(fmt, arg...)
+
+#define audio_info(fmt, arg...)
+
+#endif /* AUDIO_VERBOSE_DEBUG_ENABLE */
+
+#else
+
+#define audio_debug(fmt, arg...)
+
+#define audio_info(fmt, arg...)
+
+#endif /* AUDIO_DEBUG_ENABLE */
+
+#define audio_error(fmt, arg...) \
+ pr_err("%s:%d " fmt, __func__, __LINE__, ##arg)
+
+#define audio_warning(fmt, arg...) \
+ pr_warn("%s:%d " fmt, __func__, __LINE__, ##arg)
+
+#define audio_alert(fmt, arg...) \
+ pr_alert("%s:%d " fmt, __func__, __LINE__, ##arg)
+
+#define MAX_SUBSTREAMS (8)
+#define AVAIL_SUBSTREAMS_MASK (0xff)
+
+enum {
+ CTRL_VOL_MUTE,
+ CTRL_VOL_UNMUTE
+};
+
+/* macros for alsa2chip and chip2alsa, instead of functions */
+
+// convert alsa to chip volume (defined as macro rather than function call)
+#define alsa2chip(vol) (uint)(-(((vol) << 8) / 100))
+
+// convert chip to alsa volume
+#define chip2alsa(vol) -(((vol) * 100) >> 8)
+
+/* Some constants for values .. */
+enum snd_bcm2835_route {
+ AUDIO_DEST_AUTO = 0,
+ AUDIO_DEST_HEADPHONES = 1,
+ AUDIO_DEST_HDMI = 2,
+ AUDIO_DEST_MAX,
+};
+
+enum snd_bcm2835_ctrl {
+ PCM_PLAYBACK_VOLUME,
+ PCM_PLAYBACK_MUTE,
+ PCM_PLAYBACK_DEVICE,
+};
+
+/* definition of the chip-specific record */
+struct bcm2835_chip {
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+ struct snd_pcm *pcm_spdif;
+ /* Bitmat for valid reg_base and irq numbers */
+ unsigned int avail_substreams;
+ struct device *dev;
+ struct bcm2835_alsa_stream *alsa_stream[MAX_SUBSTREAMS];
+
+ int volume;
+ int old_volume; /* stores the volume value whist muted */
+ int dest;
+ int mute;
+
+ unsigned int opened;
+ unsigned int spdif_status;
+ struct mutex audio_mutex;
+};
+
+struct bcm2835_alsa_stream {
+ struct bcm2835_chip *chip;
+ struct snd_pcm_substream *substream;
+ struct snd_pcm_indirect pcm_indirect;
+
+ spinlock_t lock;
+ volatile unsigned int control;
+ volatile unsigned int status;
+
+ int open;
+ int running;
+ int draining;
+
+ int channels;
+ int params_rate;
+ int pcm_format_width;
+
+ unsigned int pos;
+ unsigned int buffer_size;
+ unsigned int period_size;
+
+ atomic_t retrieved;
+ struct bcm2835_audio_instance *instance;
+ struct workqueue_struct *my_wq;
+ int idx;
+};
+
+int snd_bcm2835_new_ctl(struct bcm2835_chip *chip);
+int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, u32 numchannels);
+int snd_bcm2835_new_spdif_pcm(struct bcm2835_chip *chip);
+int snd_bcm2835_new_simple_pcm(struct bcm2835_chip *chip,
+ const char *name,
+ enum snd_bcm2835_route route,
+ u32 numchannels);
+
+int snd_bcm2835_new_hdmi_ctl(struct bcm2835_chip *chip);
+int snd_bcm2835_new_headphones_ctl(struct bcm2835_chip *chip);
+
+int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream);
+int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream);
+int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int channels, unsigned int samplerate,
+ unsigned int bps);
+int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream);
+int bcm2835_audio_start(struct bcm2835_alsa_stream *alsa_stream);
+int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream);
+int bcm2835_audio_set_ctls(struct bcm2835_chip *chip);
+int bcm2835_audio_write(struct bcm2835_alsa_stream *alsa_stream,
+ unsigned int count,
+ void *src);
+void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream);
+unsigned int bcm2835_audio_retrieve_buffers(struct bcm2835_alsa_stream *alsa_stream);
+void bcm2835_audio_flush_buffers(struct bcm2835_alsa_stream *alsa_stream);
+void bcm2835_audio_flush_playback_buffers(struct bcm2835_alsa_stream *alsa_stream);
+
+#endif /* __SOUND_ARM_BCM2835_H */
diff --git a/drivers/staging/bcm2835-audio/vc_vchi_audioserv_defs.h b/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h
index da96f1bc2516..da96f1bc2516 100644
--- a/drivers/staging/bcm2835-audio/vc_vchi_audioserv_defs.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/vc_vchi_audioserv_defs.h
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Kconfig b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
new file mode 100644
index 000000000000..b8b01aa4e426
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_BCM2835
+ tristate "BCM2835 Camera"
+ depends on MEDIA_SUPPORT
+ depends on VIDEO_V4L2 && (ARCH_BCM2835 || COMPILE_TEST)
+ select BCM2835_VCHIQ
+ select VIDEOBUF2_VMALLOC
+ select BTREE
+ help
+ Say Y here to enable camera host interface devices for
+ Broadcom BCM2835 SoC. This operates over the VCHIQ interface
+ to a service running on VideoCore.
diff --git a/drivers/staging/media/platform/bcm2835/Makefile b/drivers/staging/vc04_services/bcm2835-camera/Makefile
index 8307f30517d5..8307f30517d5 100644
--- a/drivers/staging/media/platform/bcm2835/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-camera/Makefile
diff --git a/drivers/staging/vc04_services/bcm2835-camera/TODO b/drivers/staging/vc04_services/bcm2835-camera/TODO
new file mode 100644
index 000000000000..0ab9e88d769a
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/TODO
@@ -0,0 +1,34 @@
+1) Support dma-buf memory management.
+
+In order to zero-copy import camera images into the 3D or display
+pipelines, we need to export our buffers through dma-buf so that the
+vc4 driver can import them. This may involve bringing in the VCSM
+driver (which allows long-term management of regions of memory in the
+space that the VPU reserved and Linux otherwise doesn't have access
+to), or building some new protocol that allows VCSM-style management
+of Linux's CMA memory.
+
+2) Avoid extra copies for padding of images.
+
+We expose V4L2_PIX_FMT_* formats that have a specified stride/height
+padding in the V4L2 spec, but that padding doesn't match what the
+hardware can do. If we exposed the native padding requirements
+through the V4L2 "multiplanar" formats, the firmware would have one
+less copy it needed to do.
+
+3) Port to ARM64
+
+The bulk_receive() does some manual cache flushing that are 32-bit ARM
+only, which we should convert to proper cross-platform APIs.
+
+4) Convert to be a platform driver.
+
+Right now when the module probes, it tries to initialize VCHI and
+errors out if it wasn't ready yet. If bcm2835-v4l2 was built in, then
+VCHI generally isn't ready because it depends on both the firmware and
+mailbox drivers having already loaded.
+
+We should have VCHI create a platform device once it's initialized,
+and have this driver bind to it, so that we automatically load the
+v4l2 module after VCHI loads.
+
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
new file mode 100644
index 000000000000..a11e047734f9
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -0,0 +1,1966 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <linux/delay.h>
+
+#include "mmal-common.h"
+#include "mmal-encodings.h"
+#include "mmal-vchiq.h"
+#include "mmal-msg.h"
+#include "mmal-parameters.h"
+#include "bcm2835-camera.h"
+
+#define BM2835_MMAL_VERSION "0.0.2"
+#define BM2835_MMAL_MODULE_NAME "bcm2835-v4l2"
+#define MIN_WIDTH 32
+#define MIN_HEIGHT 32
+#define MIN_BUFFER_SIZE (80 * 1024)
+
+#define MAX_VIDEO_MODE_WIDTH 1280
+#define MAX_VIDEO_MODE_HEIGHT 720
+
+#define MAX_BCM2835_CAMERAS 2
+
+MODULE_DESCRIPTION("Broadcom 2835 MMAL video capture");
+MODULE_AUTHOR("Vincent Sanders");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(BM2835_MMAL_VERSION);
+
+int bcm2835_v4l2_debug;
+module_param_named(debug, bcm2835_v4l2_debug, int, 0644);
+MODULE_PARM_DESC(bcm2835_v4l2_debug, "Debug level 0-2");
+
+#define UNSET (-1)
+static int video_nr[] = {[0 ... (MAX_BCM2835_CAMERAS - 1)] = UNSET };
+module_param_array(video_nr, int, NULL, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start numbers, -1 is autodetect");
+
+static int max_video_width = MAX_VIDEO_MODE_WIDTH;
+static int max_video_height = MAX_VIDEO_MODE_HEIGHT;
+module_param(max_video_width, int, 0644);
+MODULE_PARM_DESC(max_video_width, "Threshold for video mode");
+module_param(max_video_height, int, 0644);
+MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
+
+/* global device data array */
+static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
+
+#define FPS_MIN 1
+#define FPS_MAX 90
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+ tpf_min = {.numerator = 1, .denominator = FPS_MAX},
+ tpf_max = {.numerator = 1, .denominator = FPS_MIN},
+ tpf_default = {.numerator = 1000, .denominator = 30000};
+
+/* video formats */
+static struct mmal_fmt formats[] = {
+ {
+ .name = "4:2:0, planar, YUV",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_I420,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ },
+ {
+ .name = "4:2:2, packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YUYV,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ },
+ {
+ .name = "RGB24 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_RGB24,
+ .depth = 24,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 3,
+ },
+ {
+ .name = "JPEG",
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_JPEG,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_IMAGE_ENCODE,
+ .ybbp = 0,
+ },
+ {
+ .name = "H264",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_H264,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
+ .ybbp = 0,
+ },
+ {
+ .name = "MJPEG",
+ .fourcc = V4L2_PIX_FMT_MJPEG,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .mmal = MMAL_ENCODING_MJPEG,
+ .depth = 8,
+ .mmal_component = MMAL_COMPONENT_VIDEO_ENCODE,
+ .ybbp = 0,
+ },
+ {
+ .name = "4:2:2, packed, YVYU",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YVYU,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ },
+ {
+ .name = "4:2:2, packed, VYUY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_VYUY,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ },
+ {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_UYVY,
+ .depth = 16,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 2,
+ },
+ {
+ .name = "4:2:0, planar, NV12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_NV12,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ },
+ {
+ .name = "RGB24 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_BGR24,
+ .depth = 24,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 3,
+ },
+ {
+ .name = "4:2:0, planar, YVU",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_YV12,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ },
+ {
+ .name = "4:2:0, planar, NV21",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_NV21,
+ .depth = 12,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 1,
+ },
+ {
+ .name = "RGB32 (BE)",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .flags = 0,
+ .mmal = MMAL_ENCODING_BGRA,
+ .depth = 32,
+ .mmal_component = MMAL_COMPONENT_CAMERA,
+ .ybbp = 4,
+ },
+};
+
+static struct mmal_fmt *get_format(struct v4l2_format *f)
+{
+ struct mmal_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/* ------------------------------------------------------------------
+ * Videobuf queue operations
+ * ------------------------------------------------------------------
+ */
+
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_ctxs[])
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ unsigned long size;
+
+ /* refuse queue setup if port is not configured */
+ if (!dev->capture.port) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s: capture port not configured\n", __func__);
+ return -EINVAL;
+ }
+
+ size = dev->capture.port->current_buffer.size;
+ if (size == 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s: capture port buffer size is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ if (*nbuffers < (dev->capture.port->current_buffer.num + 2))
+ *nbuffers = (dev->capture.port->current_buffer.num + 2);
+
+ *nplanes = 1;
+
+ sizes[0] = size;
+
+ /*
+ * videobuf2-vmalloc allocator is context-less so no need to set
+ * alloc_ctxs array.
+ */
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+ __func__, dev);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+ __func__, dev);
+
+ BUG_ON(!dev->capture.port);
+ BUG_ON(!dev->capture.fmt);
+
+ size = dev->capture.stride * dev->capture.height;
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool is_capturing(struct bm2835_mmal_dev *dev)
+{
+ return dev->capture.camera_port ==
+ &dev->
+ component[MMAL_COMPONENT_CAMERA]->output[MMAL_CAMERA_PORT_CAPTURE];
+}
+
+static void buffer_cb(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ int status,
+ struct mmal_buffer *buf,
+ unsigned long length, u32 mmal_flags, s64 dts, s64 pts)
+{
+ struct bm2835_mmal_dev *dev = port->cb_ctx;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: status:%d, buf:%p, length:%lu, flags %u, pts %lld\n",
+ __func__, status, buf, length, mmal_flags, pts);
+
+ if (status != 0) {
+ /* error in transfer */
+ if (buf) {
+ /* there was a buffer with the error so return it */
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ return;
+ } else if (length == 0) {
+ /* stream ended */
+ if (buf) {
+ /* this should only ever happen if the port is
+ * disabled and there are buffers still queued
+ */
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ pr_debug("Empty buffer");
+ } else if (dev->capture.frame_count) {
+ /* grab another frame */
+ if (is_capturing(dev)) {
+ pr_debug("Grab another frame");
+ vchiq_mmal_port_parameter_set(
+ instance,
+ dev->capture.
+ camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.
+ frame_count,
+ sizeof(dev->capture.frame_count));
+ }
+ } else {
+ /* signal frame completion */
+ complete(&dev->capture.frame_cmplt);
+ }
+ } else {
+ if (dev->capture.frame_count) {
+ if (dev->capture.vc_start_timestamp != -1 &&
+ pts != 0) {
+ struct timeval timestamp;
+ s64 runtime_us = pts -
+ dev->capture.vc_start_timestamp;
+ u32 div = 0;
+ u32 rem = 0;
+
+ div =
+ div_u64_rem(runtime_us, USEC_PER_SEC, &rem);
+ timestamp.tv_sec =
+ dev->capture.kernel_start_ts.tv_sec + div;
+ timestamp.tv_usec =
+ dev->capture.kernel_start_ts.tv_usec + rem;
+
+ if (timestamp.tv_usec >=
+ USEC_PER_SEC) {
+ timestamp.tv_sec++;
+ timestamp.tv_usec -=
+ USEC_PER_SEC;
+ }
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Convert start time %d.%06d and %llu "
+ "with offset %llu to %d.%06d\n",
+ (int)dev->capture.kernel_start_ts.
+ tv_sec,
+ (int)dev->capture.kernel_start_ts.
+ tv_usec,
+ dev->capture.vc_start_timestamp, pts,
+ (int)timestamp.tv_sec,
+ (int)timestamp.tv_usec);
+ buf->vb.vb2_buf.timestamp = timestamp.tv_sec * 1000000000ULL +
+ timestamp.tv_usec * 1000ULL;
+ } else {
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ }
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, length);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ if (mmal_flags & MMAL_BUFFER_HEADER_FLAG_EOS &&
+ is_capturing(dev)) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Grab another frame as buffer has EOS");
+ vchiq_mmal_port_parameter_set(
+ instance,
+ dev->capture.
+ camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.
+ frame_count,
+ sizeof(dev->capture.frame_count));
+ }
+ } else {
+ /* signal frame completion */
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ complete(&dev->capture.frame_cmplt);
+ }
+ }
+}
+
+static int enable_camera(struct bm2835_mmal_dev *dev)
+{
+ int ret;
+
+ if (!dev->camera_use_count) {
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance,
+ &dev->component[MMAL_COMPONENT_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
+ sizeof(dev->camera_num));
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed setting camera num, ret %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = vchiq_mmal_component_enable(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_CAMERA]);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed enabling camera, ret %d\n", ret);
+ return -EINVAL;
+ }
+ }
+ dev->camera_use_count++;
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev, "enabled camera (refcount %d)\n",
+ dev->camera_use_count);
+ return 0;
+}
+
+static int disable_camera(struct bm2835_mmal_dev *dev)
+{
+ int ret;
+
+ if (!dev->camera_use_count) {
+ v4l2_err(&dev->v4l2_dev,
+ "Disabled the camera when already disabled\n");
+ return -EINVAL;
+ }
+ dev->camera_use_count--;
+ if (!dev->camera_use_count) {
+ unsigned int i = 0xFFFFFFFF;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Disabling camera\n");
+ ret =
+ vchiq_mmal_component_disable(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_CAMERA]);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed disabling camera, ret %d\n", ret);
+ return -EINVAL;
+ }
+ vchiq_mmal_port_parameter_set(
+ dev->instance,
+ &dev->component[MMAL_COMPONENT_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM, &i,
+ sizeof(i));
+ }
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Camera refcount now %d\n", dev->camera_use_count);
+ return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
+ struct mmal_buffer *buf = container_of(vb2, struct mmal_buffer, vb);
+ int ret;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: dev:%p buf:%p\n", __func__, dev, buf);
+
+ buf->buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ buf->buffer_size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+
+ ret = vchiq_mmal_submit_buffer(dev->instance, dev->capture.port, buf);
+ if (ret < 0)
+ v4l2_err(&dev->v4l2_dev, "%s: error submitting buffer\n",
+ __func__);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ int ret;
+ int parameter_size;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+ __func__, dev);
+
+ /* ensure a format has actually been set */
+ if (!dev->capture.port)
+ return -EINVAL;
+
+ if (enable_camera(dev) < 0) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable camera\n");
+ return -EINVAL;
+ }
+
+ /*init_completion(&dev->capture.frame_cmplt); */
+
+ /* enable frame capture */
+ dev->capture.frame_count = 1;
+
+ /* if the preview is not already running, wait for a few frames for AGC
+ * to settle down.
+ */
+ if (!dev->component[MMAL_COMPONENT_PREVIEW]->enabled)
+ msleep(300);
+
+ /* enable the connection from camera to encoder (if applicable) */
+ if (dev->capture.camera_port != dev->capture.port
+ && dev->capture.camera_port) {
+ ret = vchiq_mmal_port_enable(dev->instance,
+ dev->capture.camera_port, NULL);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to enable encode tunnel - error %d\n",
+ ret);
+ return -1;
+ }
+ }
+
+ /* Get VC timestamp at this point in time */
+ parameter_size = sizeof(dev->capture.vc_start_timestamp);
+ if (vchiq_mmal_port_parameter_get(dev->instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_SYSTEM_TIME,
+ &dev->capture.vc_start_timestamp,
+ &parameter_size)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to get VC start time - update your VC f/w\n");
+
+ /* Flag to indicate just to rely on kernel timestamps */
+ dev->capture.vc_start_timestamp = -1;
+ } else
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Start time %lld size %d\n",
+ dev->capture.vc_start_timestamp, parameter_size);
+
+ v4l2_get_timestamp(&dev->capture.kernel_start_ts);
+
+ /* enable the camera port */
+ dev->capture.port->cb_ctx = dev;
+ ret =
+ vchiq_mmal_port_enable(dev->instance, dev->capture.port, buffer_cb);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to enable capture port - error %d. "
+ "Disabling camera port again\n", ret);
+
+ vchiq_mmal_port_disable(dev->instance,
+ dev->capture.camera_port);
+ if (disable_camera(dev) < 0) {
+ v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
+ return -EINVAL;
+ }
+ return -1;
+ }
+
+ /* capture the first frame */
+ vchiq_mmal_port_parameter_set(dev->instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static void stop_streaming(struct vb2_queue *vq)
+{
+ int ret;
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+ __func__, dev);
+
+ init_completion(&dev->capture.frame_cmplt);
+ dev->capture.frame_count = 0;
+
+ /* ensure a format has actually been set */
+ if (!dev->capture.port) {
+ v4l2_err(&dev->v4l2_dev,
+ "no capture port - stream not started?\n");
+ return;
+ }
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "stopping capturing\n");
+
+ /* stop capturing frames */
+ vchiq_mmal_port_parameter_set(dev->instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
+
+ /* wait for last frame to complete */
+ ret = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
+ if (ret <= 0)
+ v4l2_err(&dev->v4l2_dev,
+ "error %d waiting for frame completion\n", ret);
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "disabling connection\n");
+
+ /* disable the connection from camera to encoder */
+ ret = vchiq_mmal_port_disable(dev->instance, dev->capture.camera_port);
+ if (!ret && dev->capture.camera_port != dev->capture.port) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "disabling port\n");
+ ret = vchiq_mmal_port_disable(dev->instance, dev->capture.port);
+ } else if (dev->capture.camera_port != dev->capture.port) {
+ v4l2_err(&dev->v4l2_dev, "port_disable failed, error %d\n",
+ ret);
+ }
+
+ if (disable_camera(dev) < 0)
+ v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
+}
+
+static void bm2835_mmal_lock(struct vb2_queue *vq)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+
+ mutex_lock(&dev->mutex);
+}
+
+static void bm2835_mmal_unlock(struct vb2_queue *vq)
+{
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static struct vb2_ops bm2835_mmal_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = bm2835_mmal_unlock,
+ .wait_finish = bm2835_mmal_lock,
+};
+
+/* ------------------------------------------------------------------
+ * IOCTL operations
+ * ------------------------------------------------------------------
+ */
+
+static int set_overlay_params(struct bm2835_mmal_dev *dev,
+ struct vchiq_mmal_port *port)
+{
+ struct mmal_parameter_displayregion prev_config = {
+ .set = MMAL_DISPLAY_SET_LAYER | MMAL_DISPLAY_SET_ALPHA |
+ MMAL_DISPLAY_SET_DEST_RECT | MMAL_DISPLAY_SET_FULLSCREEN,
+ .layer = PREVIEW_LAYER,
+ .alpha = dev->overlay.global_alpha,
+ .fullscreen = 0,
+ .dest_rect = {
+ .x = dev->overlay.w.left,
+ .y = dev->overlay.w.top,
+ .width = dev->overlay.w.width,
+ .height = dev->overlay.w.height,
+ },
+ };
+ return vchiq_mmal_port_parameter_set(dev->instance, port,
+ MMAL_PARAMETER_DISPLAYREGION,
+ &prev_config, sizeof(prev_config));
+}
+
+/* overlay ioctl */
+static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mmal_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ f->flags = fmt->flags;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+
+ f->fmt.win = dev->overlay;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+
+ f->fmt.win.field = V4L2_FIELD_NONE;
+ f->fmt.win.chromakey = 0;
+ f->fmt.win.clips = NULL;
+ f->fmt.win.clipcount = 0;
+ f->fmt.win.bitmap = NULL;
+
+ v4l_bound_align_image(&f->fmt.win.w.width, MIN_WIDTH, dev->max_width, 1,
+ &f->fmt.win.w.height, MIN_HEIGHT, dev->max_height,
+ 1, 0);
+ v4l_bound_align_image(&f->fmt.win.w.left, MIN_WIDTH, dev->max_width, 1,
+ &f->fmt.win.w.top, MIN_HEIGHT, dev->max_height,
+ 1, 0);
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Overlay: Now w/h %dx%d l/t %dx%d\n",
+ f->fmt.win.w.width, f->fmt.win.w.height,
+ f->fmt.win.w.left, f->fmt.win.w.top);
+
+ v4l2_dump_win_format(1,
+ bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ &f->fmt.win,
+ __func__);
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+
+ vidioc_try_fmt_vid_overlay(file, priv, f);
+
+ dev->overlay = f->fmt.win;
+ if (dev->component[MMAL_COMPONENT_PREVIEW]->enabled) {
+ set_overlay_params(dev,
+ &dev->component[MMAL_COMPONENT_PREVIEW]->input[0]);
+ }
+
+ return 0;
+}
+
+static int vidioc_overlay(struct file *file, void *f, unsigned int on)
+{
+ int ret;
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct vchiq_mmal_port *src;
+ struct vchiq_mmal_port *dst;
+
+ if ((on && dev->component[MMAL_COMPONENT_PREVIEW]->enabled) ||
+ (!on && !dev->component[MMAL_COMPONENT_PREVIEW]->enabled))
+ return 0; /* already in requested state */
+
+ src =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_PREVIEW];
+
+ if (!on) {
+ /* disconnect preview ports and disable component */
+ ret = vchiq_mmal_port_disable(dev->instance, src);
+ if (!ret)
+ ret =
+ vchiq_mmal_port_connect_tunnel(dev->instance, src,
+ NULL);
+ if (ret >= 0)
+ ret = vchiq_mmal_component_disable(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_PREVIEW]);
+
+ disable_camera(dev);
+ return ret;
+ }
+
+ /* set preview port format and connect it to output */
+ dst = &dev->component[MMAL_COMPONENT_PREVIEW]->input[0];
+
+ ret = vchiq_mmal_port_set_format(dev->instance, src);
+ if (ret < 0)
+ goto error;
+
+ ret = set_overlay_params(dev, dst);
+ if (ret < 0)
+ goto error;
+
+ if (enable_camera(dev) < 0)
+ goto error;
+
+ ret = vchiq_mmal_component_enable(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_PREVIEW]);
+ if (ret < 0)
+ goto error;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "connecting %p to %p\n",
+ src, dst);
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance, src, dst);
+ if (!ret)
+ ret = vchiq_mmal_port_enable(dev->instance, src, NULL);
+error:
+ return ret;
+}
+
+static int vidioc_g_fbuf(struct file *file, void *fh,
+ struct v4l2_framebuffer *a)
+{
+ /* The video overlay must stay within the framebuffer and can't be
+ * positioned independently.
+ */
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct vchiq_mmal_port *preview_port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_PREVIEW];
+
+ a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY |
+ V4L2_FBUF_CAP_GLOBAL_ALPHA;
+ a->flags = V4L2_FBUF_FLAG_OVERLAY;
+ a->fmt.width = preview_port->es.video.width;
+ a->fmt.height = preview_port->es.video.height;
+ a->fmt.pixelformat = V4L2_PIX_FMT_YUV420;
+ a->fmt.bytesperline = preview_port->es.video.width;
+ a->fmt.sizeimage = (preview_port->es.video.width *
+ preview_port->es.video.height * 3) >> 1;
+ a->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ return 0;
+}
+
+/* input ioctls */
+static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ /* only a single camera input */
+ if (inp->index != 0)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ sprintf(inp->name, "Camera %u", inp->index);
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* capture ioctls */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ u32 major;
+ u32 minor;
+
+ vchiq_mmal_version(dev->instance, &major, &minor);
+
+ strcpy(cap->driver, "bm2835 mmal");
+ snprintf(cap->card, sizeof(cap->card), "mmal service %d.%d",
+ major, minor);
+
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OVERLAY |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mmal_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ f->flags = fmt->flags;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+
+ f->fmt.pix.width = dev->capture.width;
+ f->fmt.pix.height = dev->capture.height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = dev->capture.fmt->fourcc;
+ f->fmt.pix.bytesperline = dev->capture.stride;
+ f->fmt.pix.sizeimage = dev->capture.buffersize;
+
+ if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_RGB24)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ else if (dev->capture.fmt->fourcc == V4L2_PIX_FMT_JPEG)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.priv = 0;
+
+ v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
+ __func__);
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct mmal_fmt *mfmt;
+
+ mfmt = get_format(f);
+ if (!mfmt) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Fourcc format (0x%08x) unknown.\n",
+ f->fmt.pix.pixelformat);
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ mfmt = get_format(f);
+ }
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Clipping/aligning %dx%d format %08X\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
+
+ v4l_bound_align_image(&f->fmt.pix.width, MIN_WIDTH, dev->max_width, 1,
+ &f->fmt.pix.height, MIN_HEIGHT, dev->max_height,
+ 1, 0);
+ f->fmt.pix.bytesperline = f->fmt.pix.width * mfmt->ybbp;
+
+ /* Image buffer has to be padded to allow for alignment, even though
+ * we then remove that padding before delivering the buffer.
+ */
+ f->fmt.pix.sizeimage = ((f->fmt.pix.height + 15) & ~15) *
+ (((f->fmt.pix.width + 31) & ~31) * mfmt->depth) >> 3;
+
+ if ((mfmt->flags & V4L2_FMT_FLAG_COMPRESSED) &&
+ f->fmt.pix.sizeimage < MIN_BUFFER_SIZE)
+ f->fmt.pix.sizeimage = MIN_BUFFER_SIZE;
+
+ if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.priv = 0;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Now %dx%d format %08X\n",
+ f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat);
+
+ v4l2_dump_pix_format(1, bcm2835_v4l2_debug, &dev->v4l2_dev, &f->fmt.pix,
+ __func__);
+ return 0;
+}
+
+static int mmal_setup_components(struct bm2835_mmal_dev *dev,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct vchiq_mmal_port *port = NULL, *camera_port = NULL;
+ struct vchiq_mmal_component *encode_component = NULL;
+ struct mmal_fmt *mfmt = get_format(f);
+
+ BUG_ON(!mfmt);
+
+ if (dev->capture.encode_component) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - disconnect previous tunnel\n");
+
+ /* Disconnect any previous connection */
+ vchiq_mmal_port_connect_tunnel(dev->instance,
+ dev->capture.camera_port, NULL);
+ dev->capture.camera_port = NULL;
+ ret = vchiq_mmal_component_disable(dev->instance,
+ dev->capture.
+ encode_component);
+ if (ret)
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to disable encode component %d\n",
+ ret);
+
+ dev->capture.encode_component = NULL;
+ }
+ /* format dependent port setup */
+ switch (mfmt->mmal_component) {
+ case MMAL_COMPONENT_CAMERA:
+ /* Make a further decision on port based on resolution */
+ if (f->fmt.pix.width <= max_video_width
+ && f->fmt.pix.height <= max_video_height)
+ camera_port = port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_VIDEO];
+ else
+ camera_port = port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_CAPTURE];
+ break;
+ case MMAL_COMPONENT_IMAGE_ENCODE:
+ encode_component = dev->component[MMAL_COMPONENT_IMAGE_ENCODE];
+ port = &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->output[0];
+ camera_port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_CAPTURE];
+ break;
+ case MMAL_COMPONENT_VIDEO_ENCODE:
+ encode_component = dev->component[MMAL_COMPONENT_VIDEO_ENCODE];
+ port = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+ camera_port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_VIDEO];
+ break;
+ default:
+ break;
+ }
+
+ if (!port)
+ return -EINVAL;
+
+ if (encode_component)
+ camera_port->format.encoding = MMAL_ENCODING_OPAQUE;
+ else
+ camera_port->format.encoding = mfmt->mmal;
+
+ if (dev->rgb_bgr_swapped) {
+ if (camera_port->format.encoding == MMAL_ENCODING_RGB24)
+ camera_port->format.encoding = MMAL_ENCODING_BGR24;
+ else if (camera_port->format.encoding == MMAL_ENCODING_BGR24)
+ camera_port->format.encoding = MMAL_ENCODING_RGB24;
+ }
+
+ camera_port->format.encoding_variant = 0;
+ camera_port->es.video.width = f->fmt.pix.width;
+ camera_port->es.video.height = f->fmt.pix.height;
+ camera_port->es.video.crop.x = 0;
+ camera_port->es.video.crop.y = 0;
+ camera_port->es.video.crop.width = f->fmt.pix.width;
+ camera_port->es.video.crop.height = f->fmt.pix.height;
+ camera_port->es.video.frame_rate.num = 0;
+ camera_port->es.video.frame_rate.den = 1;
+ camera_port->es.video.color_space = MMAL_COLOR_SPACE_JPEG_JFIF;
+
+ ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
+
+ if (!ret
+ && camera_port ==
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_VIDEO]) {
+ bool overlay_enabled =
+ !!dev->component[MMAL_COMPONENT_PREVIEW]->enabled;
+ struct vchiq_mmal_port *preview_port =
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_PREVIEW];
+ /* Preview and encode ports need to match on resolution */
+ if (overlay_enabled) {
+ /* Need to disable the overlay before we can update
+ * the resolution
+ */
+ ret =
+ vchiq_mmal_port_disable(dev->instance,
+ preview_port);
+ if (!ret)
+ ret =
+ vchiq_mmal_port_connect_tunnel(
+ dev->instance,
+ preview_port,
+ NULL);
+ }
+ preview_port->es.video.width = f->fmt.pix.width;
+ preview_port->es.video.height = f->fmt.pix.height;
+ preview_port->es.video.crop.x = 0;
+ preview_port->es.video.crop.y = 0;
+ preview_port->es.video.crop.width = f->fmt.pix.width;
+ preview_port->es.video.crop.height = f->fmt.pix.height;
+ preview_port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ preview_port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+ ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
+ if (overlay_enabled) {
+ ret = vchiq_mmal_port_connect_tunnel(
+ dev->instance,
+ preview_port,
+ &dev->component[MMAL_COMPONENT_PREVIEW]->input[0]);
+ if (!ret)
+ ret = vchiq_mmal_port_enable(dev->instance,
+ preview_port,
+ NULL);
+ }
+ }
+
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s failed to set format %dx%d %08X\n", __func__,
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ /* ensure capture is not going to be tried */
+ dev->capture.port = NULL;
+ } else {
+ if (encode_component) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - set up encode comp\n");
+
+ /* configure buffering */
+ camera_port->current_buffer.size =
+ camera_port->recommended_buffer.size;
+ camera_port->current_buffer.num =
+ camera_port->recommended_buffer.num;
+
+ ret =
+ vchiq_mmal_port_connect_tunnel(
+ dev->instance,
+ camera_port,
+ &encode_component->input[0]);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "%s failed to create connection\n",
+ __func__);
+ /* ensure capture is not going to be tried */
+ dev->capture.port = NULL;
+ } else {
+ port->es.video.width = f->fmt.pix.width;
+ port->es.video.height = f->fmt.pix.height;
+ port->es.video.crop.x = 0;
+ port->es.video.crop.y = 0;
+ port->es.video.crop.width = f->fmt.pix.width;
+ port->es.video.crop.height = f->fmt.pix.height;
+ port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+
+ port->format.encoding = mfmt->mmal;
+ port->format.encoding_variant = 0;
+ /* Set any encoding specific parameters */
+ switch (mfmt->mmal_component) {
+ case MMAL_COMPONENT_VIDEO_ENCODE:
+ port->format.bitrate =
+ dev->capture.encode_bitrate;
+ break;
+ case MMAL_COMPONENT_IMAGE_ENCODE:
+ /* Could set EXIF parameters here */
+ break;
+ default:
+ break;
+ }
+ ret = vchiq_mmal_port_set_format(dev->instance,
+ port);
+ if (ret)
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "%s failed to set format %dx%d fmt %08X\n",
+ __func__,
+ f->fmt.pix.width,
+ f->fmt.pix.height,
+ f->fmt.pix.pixelformat
+ );
+ }
+
+ if (!ret) {
+ ret = vchiq_mmal_component_enable(
+ dev->instance,
+ encode_component);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "%s Failed to enable encode components\n",
+ __func__);
+ }
+ }
+ if (!ret) {
+ /* configure buffering */
+ port->current_buffer.num = 1;
+ port->current_buffer.size =
+ f->fmt.pix.sizeimage;
+ if (port->format.encoding ==
+ MMAL_ENCODING_JPEG) {
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "JPG - buf size now %d was %d\n",
+ f->fmt.pix.sizeimage,
+ port->current_buffer.size);
+ port->current_buffer.size =
+ (f->fmt.pix.sizeimage <
+ (100 << 10))
+ ? (100 << 10) : f->fmt.pix.
+ sizeimage;
+ }
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "vid_cap - cur_buf.size set to %d\n",
+ f->fmt.pix.sizeimage);
+ port->current_buffer.alignment = 0;
+ }
+ } else {
+ /* configure buffering */
+ camera_port->current_buffer.num = 1;
+ camera_port->current_buffer.size = f->fmt.pix.sizeimage;
+ camera_port->current_buffer.alignment = 0;
+ }
+
+ if (!ret) {
+ dev->capture.fmt = mfmt;
+ dev->capture.stride = f->fmt.pix.bytesperline;
+ dev->capture.width = camera_port->es.video.crop.width;
+ dev->capture.height = camera_port->es.video.crop.height;
+ dev->capture.buffersize = port->current_buffer.size;
+
+ /* select port for capture */
+ dev->capture.port = port;
+ dev->capture.camera_port = camera_port;
+ dev->capture.encode_component = encode_component;
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
+ port->format.encoding,
+ dev->capture.width, dev->capture.height,
+ dev->capture.stride, dev->capture.buffersize);
+ }
+ }
+
+ /* todo: Need to convert the vchiq/mmal error into a v4l2 error. */
+ return ret;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ int ret;
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct mmal_fmt *mfmt;
+
+ /* try the format to set valid parameters */
+ ret = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "vid_cap - vidioc_try_fmt_vid_cap failed\n");
+ return ret;
+ }
+
+ /* if a capture is running refuse to set format */
+ if (vb2_is_busy(&dev->capture.vb_vidq)) {
+ v4l2_info(&dev->v4l2_dev, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /* If the format is unsupported v4l2 says we should switch to
+ * a supported one and not return an error.
+ */
+ mfmt = get_format(f);
+ if (!mfmt) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Fourcc format (0x%08x) unknown.\n",
+ f->fmt.pix.pixelformat);
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ mfmt = get_format(f);
+ }
+
+ ret = mmal_setup_components(dev, f);
+ if (ret != 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s: failed to setup mmal components: %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ static const struct v4l2_frmsize_stepwise sizes = {
+ MIN_WIDTH, 0, 2,
+ MIN_HEIGHT, 0, 2
+ };
+ int i;
+
+ if (fsize->index)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].fourcc == fsize->pixel_format)
+ break;
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = sizes;
+ fsize->stepwise.max_width = dev->max_width;
+ fsize->stepwise.max_height = dev->max_height;
+ return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ int i;
+
+ if (fival->index)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].fourcc == fival->pixel_format)
+ break;
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ /* regarding width & height - we support any within range */
+ if (fival->width < MIN_WIDTH || fival->width > dev->max_width ||
+ fival->height < MIN_HEIGHT || fival->height > dev->max_height)
+ return -EINVAL;
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+
+ /* fill in stepwise (step=1.0 is required by V4L2 spec) */
+ fival->stepwise.min = tpf_min;
+ fival->stepwise.max = tpf_max;
+ fival->stepwise.step = (struct v4l2_fract) {1, 1};
+
+ return 0;
+}
+
+static int vidioc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ parm->parm.capture.timeperframe = dev->capture.timeperframe;
+ parm->parm.capture.readbuffers = 1;
+ return 0;
+}
+
+#define FRACT_CMP(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP \
+ (u64)(b).numerator * (a).denominator)
+
+static int vidioc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct v4l2_fract tpf;
+ struct mmal_parameter_rational fps_param;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ tpf = parm->parm.capture.timeperframe;
+
+ /* tpf: {*, 0} resets timing; clip to [min, max]*/
+ tpf = tpf.denominator ? tpf : tpf_default;
+ tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+
+ dev->capture.timeperframe = tpf;
+ parm->parm.capture.timeperframe = tpf;
+ parm->parm.capture.readbuffers = 1;
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+
+ fps_param.num = 0; /* Select variable fps, and then use
+ * FPS_RANGE to select the actual limits.
+ */
+ fps_param.den = 1;
+ set_framerate_params(dev);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops camera0_ioctl_ops = {
+ /* overlay */
+ .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt_vid_overlay,
+ .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_vid_overlay,
+ .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_vid_overlay,
+ .vidioc_overlay = vidioc_overlay,
+ .vidioc_g_fbuf = vidioc_g_fbuf,
+
+ /* inputs */
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+
+ /* capture */
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+
+ /* buffer management */
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+ .vidioc_g_parm = vidioc_g_parm,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* ------------------------------------------------------------------
+ * Driver init/finalise
+ * ------------------------------------------------------------------
+ */
+
+static const struct v4l2_file_operations camera0_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .mmap = vb2_fop_mmap,
+};
+
+static struct video_device vdev_template = {
+ .name = "camera0",
+ .fops = &camera0_fops,
+ .ioctl_ops = &camera0_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+/* Returns the number of cameras, and also the max resolution supported
+ * by those cameras.
+ */
+static int get_num_cameras(struct vchiq_mmal_instance *instance,
+ unsigned int resolutions[][2], int num_resolutions)
+{
+ int ret;
+ struct vchiq_mmal_component *cam_info_component;
+ struct mmal_parameter_camera_info_t cam_info = {0};
+ int param_size = sizeof(cam_info);
+ int i;
+
+ /* create a camera_info component */
+ ret = vchiq_mmal_component_init(instance, "camera_info",
+ &cam_info_component);
+ if (ret < 0)
+ /* Unusual failure - let's guess one camera. */
+ return 1;
+
+ if (vchiq_mmal_port_parameter_get(instance,
+ &cam_info_component->control,
+ MMAL_PARAMETER_CAMERA_INFO,
+ &cam_info,
+ &param_size)) {
+ pr_info("Failed to get camera info\n");
+ }
+ for (i = 0;
+ i < min_t(unsigned int, cam_info.num_cameras, num_resolutions);
+ i++) {
+ resolutions[i][0] = cam_info.cameras[i].max_width;
+ resolutions[i][1] = cam_info.cameras[i].max_height;
+ }
+
+ vchiq_mmal_component_finalise(instance,
+ cam_info_component);
+
+ return cam_info.num_cameras;
+}
+
+static int set_camera_parameters(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *camera,
+ struct bm2835_mmal_dev *dev)
+{
+ int ret;
+ struct mmal_parameter_camera_config cam_config = {
+ .max_stills_w = dev->max_width,
+ .max_stills_h = dev->max_height,
+ .stills_yuv422 = 1,
+ .one_shot_stills = 1,
+ .max_preview_video_w = (max_video_width > 1920) ?
+ max_video_width : 1920,
+ .max_preview_video_h = (max_video_height > 1088) ?
+ max_video_height : 1088,
+ .num_preview_video_frames = 3,
+ .stills_capture_circular_buffer_height = 0,
+ .fast_preview_resume = 0,
+ .use_stc_timestamp = MMAL_PARAM_TIMESTAMP_MODE_RAW_STC
+ };
+
+ ret = vchiq_mmal_port_parameter_set(instance, &camera->control,
+ MMAL_PARAMETER_CAMERA_CONFIG,
+ &cam_config, sizeof(cam_config));
+ return ret;
+}
+
+#define MAX_SUPPORTED_ENCODINGS 20
+
+/* MMAL instance and component init */
+static int __init mmal_init(struct bm2835_mmal_dev *dev)
+{
+ int ret;
+ struct mmal_es_format_local *format;
+ u32 bool_true = 1;
+ u32 supported_encodings[MAX_SUPPORTED_ENCODINGS];
+ int param_size;
+ struct vchiq_mmal_component *camera;
+
+ ret = vchiq_mmal_init(&dev->instance);
+ if (ret < 0)
+ return ret;
+
+ /* get the camera component ready */
+ ret = vchiq_mmal_component_init(dev->instance, "ril.camera",
+ &dev->component[MMAL_COMPONENT_CAMERA]);
+ if (ret < 0)
+ goto unreg_mmal;
+
+ camera = dev->component[MMAL_COMPONENT_CAMERA];
+ if (camera->outputs < MMAL_CAMERA_PORT_COUNT) {
+ ret = -EINVAL;
+ goto unreg_camera;
+ }
+
+ ret = set_camera_parameters(dev->instance,
+ camera,
+ dev);
+ if (ret < 0)
+ goto unreg_camera;
+
+ /* There was an error in the firmware that meant the camera component
+ * produced BGR instead of RGB.
+ * This is now fixed, but in order to support the old firmwares, we
+ * have to check.
+ */
+ dev->rgb_bgr_swapped = true;
+ param_size = sizeof(supported_encodings);
+ ret = vchiq_mmal_port_parameter_get(dev->instance,
+ &camera->output[MMAL_CAMERA_PORT_CAPTURE],
+ MMAL_PARAMETER_SUPPORTED_ENCODINGS,
+ &supported_encodings,
+ &param_size);
+ if (ret == 0) {
+ int i;
+
+ for (i = 0; i < param_size / sizeof(u32); i++) {
+ if (supported_encodings[i] == MMAL_ENCODING_BGR24) {
+ /* Found BGR24 first - old firmware. */
+ break;
+ }
+ if (supported_encodings[i] == MMAL_ENCODING_RGB24) {
+ /* Found RGB24 first
+ * new firmware, so use RGB24.
+ */
+ dev->rgb_bgr_swapped = false;
+ break;
+ }
+ }
+ }
+ format = &camera->output[MMAL_CAMERA_PORT_PREVIEW].format;
+
+ format->encoding = MMAL_ENCODING_OPAQUE;
+ format->encoding_variant = MMAL_ENCODING_I420;
+
+ format->es->video.width = 1024;
+ format->es->video.height = 768;
+ format->es->video.crop.x = 0;
+ format->es->video.crop.y = 0;
+ format->es->video.crop.width = 1024;
+ format->es->video.crop.height = 768;
+ format->es->video.frame_rate.num = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.den = 1;
+
+ format = &camera->output[MMAL_CAMERA_PORT_VIDEO].format;
+
+ format->encoding = MMAL_ENCODING_OPAQUE;
+ format->encoding_variant = MMAL_ENCODING_I420;
+
+ format->es->video.width = 1024;
+ format->es->video.height = 768;
+ format->es->video.crop.x = 0;
+ format->es->video.crop.y = 0;
+ format->es->video.crop.width = 1024;
+ format->es->video.crop.height = 768;
+ format->es->video.frame_rate.num = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.den = 1;
+
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &camera->output[MMAL_CAMERA_PORT_VIDEO],
+ MMAL_PARAMETER_NO_IMAGE_PADDING,
+ &bool_true, sizeof(bool_true));
+
+ format = &camera->output[MMAL_CAMERA_PORT_CAPTURE].format;
+
+ format->encoding = MMAL_ENCODING_OPAQUE;
+
+ format->es->video.width = 2592;
+ format->es->video.height = 1944;
+ format->es->video.crop.x = 0;
+ format->es->video.crop.y = 0;
+ format->es->video.crop.width = 2592;
+ format->es->video.crop.height = 1944;
+ format->es->video.frame_rate.num = 0; /* Rely on fps_range */
+ format->es->video.frame_rate.den = 1;
+
+ dev->capture.width = format->es->video.width;
+ dev->capture.height = format->es->video.height;
+ dev->capture.fmt = &formats[0];
+ dev->capture.encode_component = NULL;
+ dev->capture.timeperframe = tpf_default;
+ dev->capture.enc_profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+ dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
+
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &camera->output[MMAL_CAMERA_PORT_CAPTURE],
+ MMAL_PARAMETER_NO_IMAGE_PADDING,
+ &bool_true, sizeof(bool_true));
+
+ /* get the preview component ready */
+ ret = vchiq_mmal_component_init(
+ dev->instance, "ril.video_render",
+ &dev->component[MMAL_COMPONENT_PREVIEW]);
+ if (ret < 0)
+ goto unreg_camera;
+
+ if (dev->component[MMAL_COMPONENT_PREVIEW]->inputs < 1) {
+ ret = -EINVAL;
+ pr_debug("too few input ports %d needed %d\n",
+ dev->component[MMAL_COMPONENT_PREVIEW]->inputs, 1);
+ goto unreg_preview;
+ }
+
+ /* get the image encoder component ready */
+ ret = vchiq_mmal_component_init(
+ dev->instance, "ril.image_encode",
+ &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]);
+ if (ret < 0)
+ goto unreg_preview;
+
+ if (dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->inputs < 1) {
+ ret = -EINVAL;
+ v4l2_err(&dev->v4l2_dev, "too few input ports %d needed %d\n",
+ dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->inputs,
+ 1);
+ goto unreg_image_encoder;
+ }
+
+ /* get the video encoder component ready */
+ ret = vchiq_mmal_component_init(dev->instance, "ril.video_encode",
+ &dev->
+ component[MMAL_COMPONENT_VIDEO_ENCODE]);
+ if (ret < 0)
+ goto unreg_image_encoder;
+
+ if (dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->inputs < 1) {
+ ret = -EINVAL;
+ v4l2_err(&dev->v4l2_dev, "too few input ports %d needed %d\n",
+ dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->inputs,
+ 1);
+ goto unreg_vid_encoder;
+ }
+
+ {
+ struct vchiq_mmal_port *encoder_port =
+ &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+ encoder_port->format.encoding = MMAL_ENCODING_H264;
+ ret = vchiq_mmal_port_set_format(dev->instance,
+ encoder_port);
+ }
+
+ {
+ unsigned int enable = 1;
+
+ vchiq_mmal_port_parameter_set(
+ dev->instance,
+ &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->control,
+ MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
+ &enable, sizeof(enable));
+
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->control,
+ MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
+ &enable,
+ sizeof(enable));
+ }
+ ret = bm2835_mmal_set_all_camera_controls(dev);
+ if (ret < 0)
+ goto unreg_vid_encoder;
+
+ return 0;
+
+unreg_vid_encoder:
+ pr_err("Cleanup: Destroy video encoder\n");
+ vchiq_mmal_component_finalise(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_VIDEO_ENCODE]);
+
+unreg_image_encoder:
+ pr_err("Cleanup: Destroy image encoder\n");
+ vchiq_mmal_component_finalise(
+ dev->instance,
+ dev->component[MMAL_COMPONENT_IMAGE_ENCODE]);
+
+unreg_preview:
+ pr_err("Cleanup: Destroy video render\n");
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[MMAL_COMPONENT_PREVIEW]);
+
+unreg_camera:
+ pr_err("Cleanup: Destroy camera\n");
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[MMAL_COMPONENT_CAMERA]);
+
+unreg_mmal:
+ vchiq_mmal_finalise(dev->instance);
+ return ret;
+}
+
+static int __init bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
+ struct video_device *vfd)
+{
+ int ret;
+
+ *vfd = vdev_template;
+
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ vfd->lock = &dev->mutex;
+
+ vfd->queue = &dev->capture.vb_vidq;
+
+ /* video device needs to be able to access instance data */
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd,
+ VFL_TYPE_GRABBER,
+ video_nr[dev->camera_num]);
+ if (ret < 0)
+ return ret;
+
+ v4l2_info(vfd->v4l2_dev,
+ "V4L2 device registered as %s - stills mode > %dx%d\n",
+ video_device_node_name(vfd),
+ max_video_width, max_video_height);
+
+ return 0;
+}
+
+static void bcm2835_cleanup_instance(struct bm2835_mmal_dev *dev)
+{
+ if (!dev)
+ return;
+
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->vdev));
+
+ video_unregister_device(&dev->vdev);
+
+ if (dev->capture.encode_component) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "mmal_exit - disconnect tunnel\n");
+ vchiq_mmal_port_connect_tunnel(dev->instance,
+ dev->capture.camera_port, NULL);
+ vchiq_mmal_component_disable(dev->instance,
+ dev->capture.encode_component);
+ }
+ vchiq_mmal_component_disable(dev->instance,
+ dev->component[MMAL_COMPONENT_CAMERA]);
+
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->
+ component[MMAL_COMPONENT_VIDEO_ENCODE]);
+
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->
+ component[MMAL_COMPONENT_IMAGE_ENCODE]);
+
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[MMAL_COMPONENT_PREVIEW]);
+
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[MMAL_COMPONENT_CAMERA]);
+
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ kfree(dev);
+}
+
+static struct v4l2_format default_v4l2_format = {
+ .fmt.pix.pixelformat = V4L2_PIX_FMT_JPEG,
+ .fmt.pix.width = 1024,
+ .fmt.pix.bytesperline = 0,
+ .fmt.pix.height = 768,
+ .fmt.pix.sizeimage = 1024 * 768,
+};
+
+static int __init bm2835_mmal_init(void)
+{
+ int ret;
+ struct bm2835_mmal_dev *dev;
+ struct vb2_queue *q;
+ int camera;
+ unsigned int num_cameras;
+ struct vchiq_mmal_instance *instance;
+ unsigned int resolutions[MAX_BCM2835_CAMERAS][2];
+ int i;
+
+ ret = vchiq_mmal_init(&instance);
+ if (ret < 0)
+ return ret;
+
+ num_cameras = get_num_cameras(instance,
+ resolutions,
+ MAX_BCM2835_CAMERAS);
+ if (num_cameras > MAX_BCM2835_CAMERAS)
+ num_cameras = MAX_BCM2835_CAMERAS;
+
+ for (camera = 0; camera < num_cameras; camera++) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto cleanup_gdev;
+ }
+
+ dev->camera_num = camera;
+ dev->max_width = resolutions[camera][0];
+ dev->max_height = resolutions[camera][1];
+
+ /* setup device defaults */
+ dev->overlay.w.left = 150;
+ dev->overlay.w.top = 50;
+ dev->overlay.w.width = 1024;
+ dev->overlay.w.height = 768;
+ dev->overlay.clipcount = 0;
+ dev->overlay.field = V4L2_FIELD_NONE;
+ dev->overlay.global_alpha = 255;
+
+ dev->capture.fmt = &formats[3]; /* JPEG */
+
+ /* v4l device registration */
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+ "%s", BM2835_MMAL_MODULE_NAME);
+ ret = v4l2_device_register(NULL, &dev->v4l2_dev);
+ if (ret)
+ goto free_dev;
+
+ /* setup v4l controls */
+ ret = bm2835_mmal_init_controls(dev, &dev->ctrl_handler);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->v4l2_dev.ctrl_handler = &dev->ctrl_handler;
+
+ /* mmal init */
+ dev->instance = instance;
+ ret = mmal_init(dev);
+ if (ret < 0)
+ goto unreg_dev;
+
+ /* initialize queue */
+ q = &dev->capture.vb_vidq;
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct mmal_buffer);
+ q->ops = &bm2835_mmal_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(q);
+ if (ret < 0)
+ goto unreg_dev;
+
+ /* v4l2 core mutex used to protect all fops and v4l2 ioctls. */
+ mutex_init(&dev->mutex);
+
+ /* initialise video devices */
+ ret = bm2835_mmal_init_device(dev, &dev->vdev);
+ if (ret < 0)
+ goto unreg_dev;
+
+ /* Really want to call vidioc_s_fmt_vid_cap with the default
+ * format, but currently the APIs don't join up.
+ */
+ ret = mmal_setup_components(dev, &default_v4l2_format);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s: could not setup components\n", __func__);
+ goto unreg_dev;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Broadcom 2835 MMAL video capture ver %s loaded.\n",
+ BM2835_MMAL_VERSION);
+
+ gdev[camera] = dev;
+ }
+ return 0;
+
+unreg_dev:
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+free_dev:
+ kfree(dev);
+
+cleanup_gdev:
+ for (i = 0; i < camera; i++) {
+ bcm2835_cleanup_instance(gdev[i]);
+ gdev[i] = NULL;
+ }
+ pr_info("%s: error %d while loading driver\n",
+ BM2835_MMAL_MODULE_NAME, ret);
+
+ return ret;
+}
+
+static void __exit bm2835_mmal_exit(void)
+{
+ int camera;
+ struct vchiq_mmal_instance *instance = gdev[0]->instance;
+
+ for (camera = 0; camera < MAX_BCM2835_CAMERAS; camera++) {
+ bcm2835_cleanup_instance(gdev[camera]);
+ gdev[camera] = NULL;
+ }
+ vchiq_mmal_finalise(instance);
+}
+
+module_init(bm2835_mmal_init);
+module_exit(bm2835_mmal_exit);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
new file mode 100644
index 000000000000..404037476bc5
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -0,0 +1,145 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ *
+ * core driver device
+ */
+
+#define V4L2_CTRL_COUNT 29 /* number of v4l controls */
+
+enum {
+ MMAL_COMPONENT_CAMERA = 0,
+ MMAL_COMPONENT_PREVIEW,
+ MMAL_COMPONENT_IMAGE_ENCODE,
+ MMAL_COMPONENT_VIDEO_ENCODE,
+ MMAL_COMPONENT_COUNT
+};
+
+enum {
+ MMAL_CAMERA_PORT_PREVIEW = 0,
+ MMAL_CAMERA_PORT_VIDEO,
+ MMAL_CAMERA_PORT_CAPTURE,
+ MMAL_CAMERA_PORT_COUNT
+};
+
+#define PREVIEW_LAYER 2
+
+extern int bcm2835_v4l2_debug;
+
+struct bm2835_mmal_dev {
+ /* v4l2 devices */
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct mutex mutex;
+
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
+ enum v4l2_scene_mode scene_mode;
+ struct mmal_colourfx colourfx;
+ int hflip;
+ int vflip;
+ int red_gain;
+ int blue_gain;
+ enum mmal_parameter_exposuremode exposure_mode_user;
+ enum v4l2_exposure_auto_type exposure_mode_v4l2_user;
+ /* active exposure mode may differ if selected via a scene mode */
+ enum mmal_parameter_exposuremode exposure_mode_active;
+ enum mmal_parameter_exposuremeteringmode metering_mode;
+ unsigned int manual_shutter_speed;
+ bool exp_auto_priority;
+ bool manual_iso_enabled;
+ u32 iso;
+
+ /* allocated mmal instance and components */
+ struct vchiq_mmal_instance *instance;
+ struct vchiq_mmal_component *component[MMAL_COMPONENT_COUNT];
+ int camera_use_count;
+
+ struct v4l2_window overlay;
+
+ struct {
+ unsigned int width; /* width */
+ unsigned int height; /* height */
+ unsigned int stride; /* stride */
+ unsigned int buffersize; /* buffer size with padding */
+ struct mmal_fmt *fmt;
+ struct v4l2_fract timeperframe;
+
+ /* H264 encode bitrate */
+ int encode_bitrate;
+ /* H264 bitrate mode. CBR/VBR */
+ int encode_bitrate_mode;
+ /* H264 profile */
+ enum v4l2_mpeg_video_h264_profile enc_profile;
+ /* H264 level */
+ enum v4l2_mpeg_video_h264_level enc_level;
+ /* JPEG Q-factor */
+ int q_factor;
+
+ struct vb2_queue vb_vidq;
+
+ /* VC start timestamp for streaming */
+ s64 vc_start_timestamp;
+ /* Kernel start timestamp for streaming */
+ struct timeval kernel_start_ts;
+
+ struct vchiq_mmal_port *port; /* port being used for capture */
+ /* camera port being used for capture */
+ struct vchiq_mmal_port *camera_port;
+ /* component being used for encode */
+ struct vchiq_mmal_component *encode_component;
+ /* number of frames remaining which driver should capture */
+ unsigned int frame_count;
+ /* last frame completion */
+ struct completion frame_cmplt;
+
+ } capture;
+
+ unsigned int camera_num;
+ unsigned int max_width;
+ unsigned int max_height;
+ unsigned int rgb_bgr_swapped;
+};
+
+int bm2835_mmal_init_controls(
+ struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl_handler *hdl);
+
+int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev);
+int set_framerate_params(struct bm2835_mmal_dev *dev);
+
+/* Debug helpers */
+
+#define v4l2_dump_pix_format(level, debug, dev, pix_fmt, desc) \
+{ \
+ v4l2_dbg(level, debug, dev, \
+"%s: w %u h %u field %u pfmt 0x%x bpl %u sz_img %u colorspace 0x%x priv %u\n", \
+ desc, \
+ (pix_fmt)->width, (pix_fmt)->height, (pix_fmt)->field, \
+ (pix_fmt)->pixelformat, (pix_fmt)->bytesperline, \
+ (pix_fmt)->sizeimage, (pix_fmt)->colorspace, (pix_fmt)->priv); \
+}
+#define v4l2_dump_win_format(level, debug, dev, win_fmt, desc) \
+{ \
+ v4l2_dbg(level, debug, dev, \
+"%s: w %u h %u l %u t %u field %u chromakey %06X clip %p " \
+"clipcount %u bitmap %p\n", \
+ desc, \
+ (win_fmt)->w.width, (win_fmt)->w.height, \
+ (win_fmt)->w.left, (win_fmt)->w.top, \
+ (win_fmt)->field, \
+ (win_fmt)->chromakey, \
+ (win_fmt)->clips, (win_fmt)->clipcount, \
+ (win_fmt)->bitmap); \
+}
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
new file mode 100644
index 000000000000..77a5d6f4e1eb
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -0,0 +1,1333 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+
+#include "mmal-common.h"
+#include "mmal-vchiq.h"
+#include "mmal-parameters.h"
+#include "bcm2835-camera.h"
+
+/* The supported V4L2_CID_AUTO_EXPOSURE_BIAS values are from -4.0 to +4.0.
+ * MMAL values are in 1/6th increments so the MMAL range is -24 to +24.
+ * V4L2 docs say value "is expressed in terms of EV, drivers should interpret
+ * the values as 0.001 EV units, where the value 1000 stands for +1 EV."
+ * V4L2 is limited to a max of 32 values in a menu, so count in 1/3rds from
+ * -4 to +4
+ */
+static const s64 ev_bias_qmenu[] = {
+ -4000, -3667, -3333,
+ -3000, -2667, -2333,
+ -2000, -1667, -1333,
+ -1000, -667, -333,
+ 0, 333, 667,
+ 1000, 1333, 1667,
+ 2000, 2333, 2667,
+ 3000, 3333, 3667,
+ 4000
+};
+
+/* Supported ISO values (*1000)
+ * ISOO = auto ISO
+ */
+static const s64 iso_qmenu[] = {
+ 0, 100000, 200000, 400000, 800000,
+};
+static const uint32_t iso_values[] = {
+ 0, 100, 200, 400, 800,
+};
+
+static const s64 mains_freq_qmenu[] = {
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO
+};
+
+/* Supported video encode modes */
+static const s64 bitrate_mode_qmenu[] = {
+ (s64)V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ (s64)V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+};
+
+enum bm2835_mmal_ctrl_type {
+ MMAL_CONTROL_TYPE_STD,
+ MMAL_CONTROL_TYPE_STD_MENU,
+ MMAL_CONTROL_TYPE_INT_MENU,
+ MMAL_CONTROL_TYPE_CLUSTER, /* special cluster entry */
+};
+
+struct bm2835_mmal_v4l2_ctrl;
+
+typedef int(bm2835_mmal_v4l2_ctrl_cb)(
+ struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl);
+
+struct bm2835_mmal_v4l2_ctrl {
+ u32 id; /* v4l2 control identifier */
+ enum bm2835_mmal_ctrl_type type;
+ /* control minimum value or
+ * mask for MMAL_CONTROL_TYPE_STD_MENU
+ */
+ s32 min;
+ s32 max; /* maximum value of control */
+ s32 def; /* default value of control */
+ s32 step; /* step size of the control */
+ const s64 *imenu; /* integer menu array */
+ u32 mmal_id; /* mmal parameter id */
+ bm2835_mmal_v4l2_ctrl_cb *setter;
+ bool ignore_errors;
+};
+
+struct v4l2_to_mmal_effects_setting {
+ u32 v4l2_effect;
+ u32 mmal_effect;
+ s32 col_fx_enable;
+ s32 col_fx_fixed_cbcr;
+ u32 u;
+ u32 v;
+ u32 num_effect_params;
+ u32 effect_params[MMAL_MAX_IMAGEFX_PARAMETERS];
+};
+
+static const struct v4l2_to_mmal_effects_setting
+ v4l2_to_mmal_effects_values[] = {
+ { V4L2_COLORFX_NONE, MMAL_PARAM_IMAGEFX_NONE,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_BW, MMAL_PARAM_IMAGEFX_NONE,
+ 1, 0, 128, 128, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SEPIA, MMAL_PARAM_IMAGEFX_NONE,
+ 1, 0, 87, 151, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_NEGATIVE, MMAL_PARAM_IMAGEFX_NEGATIVE,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_EMBOSS, MMAL_PARAM_IMAGEFX_EMBOSS,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SKETCH, MMAL_PARAM_IMAGEFX_SKETCH,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SKY_BLUE, MMAL_PARAM_IMAGEFX_PASTEL,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_GRASS_GREEN, MMAL_PARAM_IMAGEFX_WATERCOLOUR,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SKIN_WHITEN, MMAL_PARAM_IMAGEFX_WASHEDOUT,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_VIVID, MMAL_PARAM_IMAGEFX_SATURATION,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_AQUA, MMAL_PARAM_IMAGEFX_NONE,
+ 1, 0, 171, 121, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_ART_FREEZE, MMAL_PARAM_IMAGEFX_HATCH,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SILHOUETTE, MMAL_PARAM_IMAGEFX_FILM,
+ 0, 0, 0, 0, 0, {0, 0, 0, 0, 0} },
+ { V4L2_COLORFX_SOLARIZATION, MMAL_PARAM_IMAGEFX_SOLARIZE,
+ 0, 0, 0, 0, 5, {1, 128, 160, 160, 48} },
+ { V4L2_COLORFX_ANTIQUE, MMAL_PARAM_IMAGEFX_COLOURBALANCE,
+ 0, 0, 0, 0, 3, {108, 274, 238, 0, 0} },
+ { V4L2_COLORFX_SET_CBCR, MMAL_PARAM_IMAGEFX_NONE,
+ 1, 1, 0, 0, 0, {0, 0, 0, 0, 0} }
+};
+
+struct v4l2_mmal_scene_config {
+ enum v4l2_scene_mode v4l2_scene;
+ enum mmal_parameter_exposuremode exposure_mode;
+ enum mmal_parameter_exposuremeteringmode metering_mode;
+};
+
+static const struct v4l2_mmal_scene_config scene_configs[] = {
+ /* V4L2_SCENE_MODE_NONE automatically added */
+ {
+ V4L2_SCENE_MODE_NIGHT,
+ MMAL_PARAM_EXPOSUREMODE_NIGHT,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
+ },
+ {
+ V4L2_SCENE_MODE_SPORTS,
+ MMAL_PARAM_EXPOSUREMODE_SPORTS,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE
+ },
+};
+
+/* control handlers*/
+
+static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ struct mmal_parameter_rational rational_value;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ rational_value.num = ctrl->val;
+ rational_value.den = 100;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &rational_value,
+ sizeof(rational_value));
+}
+
+static int ctrl_set_value(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ u32_value = ctrl->val;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *control;
+
+ if (ctrl->val > mmal_ctrl->max || ctrl->val < mmal_ctrl->min)
+ return 1;
+
+ if (ctrl->id == V4L2_CID_ISO_SENSITIVITY)
+ dev->iso = iso_values[ctrl->val];
+ else if (ctrl->id == V4L2_CID_ISO_SENSITIVITY_AUTO)
+ dev->manual_iso_enabled =
+ (ctrl->val == V4L2_ISO_SENSITIVITY_MANUAL);
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ if (dev->manual_iso_enabled)
+ u32_value = dev->iso;
+ else
+ u32_value = 0;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_ISO,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ s32 s32_value;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ s32_value = (ctrl->val - 12) * 2; /* Convert from index to 1/6ths */
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &s32_value, sizeof(s32_value));
+}
+
+static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret;
+ u32 u32_value;
+ struct vchiq_mmal_component *camera;
+
+ camera = dev->component[MMAL_COMPONENT_CAMERA];
+
+ u32_value = ((ctrl->val % 360) / 90) * 90;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+ if (ret < 0)
+ return ret;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+ if (ret < 0)
+ return ret;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+
+ return ret;
+}
+
+static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret;
+ u32 u32_value;
+ struct vchiq_mmal_component *camera;
+
+ if (ctrl->id == V4L2_CID_HFLIP)
+ dev->hflip = ctrl->val;
+ else
+ dev->vflip = ctrl->val;
+
+ camera = dev->component[MMAL_COMPONENT_CAMERA];
+
+ if (dev->hflip && dev->vflip)
+ u32_value = MMAL_PARAM_MIRROR_BOTH;
+ else if (dev->hflip)
+ u32_value = MMAL_PARAM_MIRROR_HORIZONTAL;
+ else if (dev->vflip)
+ u32_value = MMAL_PARAM_MIRROR_VERTICAL;
+ else
+ u32_value = MMAL_PARAM_MIRROR_NONE;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[0],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+ if (ret < 0)
+ return ret;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[1],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+ if (ret < 0)
+ return ret;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, &camera->output[2],
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+
+ return ret;
+}
+
+static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ enum mmal_parameter_exposuremode exp_mode = dev->exposure_mode_user;
+ u32 shutter_speed = 0;
+ struct vchiq_mmal_port *control;
+ int ret = 0;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ if (mmal_ctrl->mmal_id == MMAL_PARAMETER_SHUTTER_SPEED) {
+ /* V4L2 is in 100usec increments.
+ * MMAL is 1usec.
+ */
+ dev->manual_shutter_speed = ctrl->val * 100;
+ } else if (mmal_ctrl->mmal_id == MMAL_PARAMETER_EXPOSURE_MODE) {
+ switch (ctrl->val) {
+ case V4L2_EXPOSURE_AUTO:
+ exp_mode = MMAL_PARAM_EXPOSUREMODE_AUTO;
+ break;
+
+ case V4L2_EXPOSURE_MANUAL:
+ exp_mode = MMAL_PARAM_EXPOSUREMODE_OFF;
+ break;
+ }
+ dev->exposure_mode_user = exp_mode;
+ dev->exposure_mode_v4l2_user = ctrl->val;
+ } else if (mmal_ctrl->id == V4L2_CID_EXPOSURE_AUTO_PRIORITY) {
+ dev->exp_auto_priority = ctrl->val;
+ }
+
+ if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
+ if (exp_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
+ shutter_speed = dev->manual_shutter_speed;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ control,
+ MMAL_PARAMETER_SHUTTER_SPEED,
+ &shutter_speed,
+ sizeof(shutter_speed));
+ ret += vchiq_mmal_port_parameter_set(dev->instance,
+ control,
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ &exp_mode,
+ sizeof(u32));
+ dev->exposure_mode_active = exp_mode;
+ }
+ /* exposure_dynamic_framerate (V4L2_CID_EXPOSURE_AUTO_PRIORITY) should
+ * always apply irrespective of scene mode.
+ */
+ ret += set_framerate_params(dev);
+
+ return ret;
+}
+
+static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ switch (ctrl->val) {
+ case V4L2_EXPOSURE_METERING_AVERAGE:
+ dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE;
+ break;
+
+ case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED:
+ dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT;
+ break;
+
+ case V4L2_EXPOSURE_METERING_SPOT:
+ dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT;
+ break;
+
+ /* todo matrix weighting not added to Linux API till 3.9
+ * case V4L2_EXPOSURE_METERING_MATRIX:
+ * dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
+ * break;
+ */
+ }
+
+ if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
+ struct vchiq_mmal_port *control;
+ u32 u32_value = dev->metering_mode;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+ } else
+ return 0;
+}
+
+static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ switch (ctrl->val) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ u32_value = MMAL_PARAM_FLICKERAVOID_OFF;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ u32_value = MMAL_PARAM_FLICKERAVOID_50HZ;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ u32_value = MMAL_PARAM_FLICKERAVOID_60HZ;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY_AUTO:
+ u32_value = MMAL_PARAM_FLICKERAVOID_AUTO;
+ break;
+ }
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ switch (ctrl->val) {
+ case V4L2_WHITE_BALANCE_MANUAL:
+ u32_value = MMAL_PARAM_AWBMODE_OFF;
+ break;
+
+ case V4L2_WHITE_BALANCE_AUTO:
+ u32_value = MMAL_PARAM_AWBMODE_AUTO;
+ break;
+
+ case V4L2_WHITE_BALANCE_INCANDESCENT:
+ u32_value = MMAL_PARAM_AWBMODE_INCANDESCENT;
+ break;
+
+ case V4L2_WHITE_BALANCE_FLUORESCENT:
+ u32_value = MMAL_PARAM_AWBMODE_FLUORESCENT;
+ break;
+
+ case V4L2_WHITE_BALANCE_FLUORESCENT_H:
+ u32_value = MMAL_PARAM_AWBMODE_TUNGSTEN;
+ break;
+
+ case V4L2_WHITE_BALANCE_HORIZON:
+ u32_value = MMAL_PARAM_AWBMODE_HORIZON;
+ break;
+
+ case V4L2_WHITE_BALANCE_DAYLIGHT:
+ u32_value = MMAL_PARAM_AWBMODE_SUNLIGHT;
+ break;
+
+ case V4L2_WHITE_BALANCE_FLASH:
+ u32_value = MMAL_PARAM_AWBMODE_FLASH;
+ break;
+
+ case V4L2_WHITE_BALANCE_CLOUDY:
+ u32_value = MMAL_PARAM_AWBMODE_CLOUDY;
+ break;
+
+ case V4L2_WHITE_BALANCE_SHADE:
+ u32_value = MMAL_PARAM_AWBMODE_SHADE;
+ break;
+ }
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ struct vchiq_mmal_port *control;
+ struct mmal_parameter_awbgains gains;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ if (ctrl->id == V4L2_CID_RED_BALANCE)
+ dev->red_gain = ctrl->val;
+ else if (ctrl->id == V4L2_CID_BLUE_BALANCE)
+ dev->blue_gain = ctrl->val;
+
+ gains.r_gain.num = dev->red_gain;
+ gains.b_gain.num = dev->blue_gain;
+ gains.r_gain.den = gains.b_gain.den = 1000;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, control,
+ mmal_ctrl->mmal_id,
+ &gains, sizeof(gains));
+}
+
+static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret = -EINVAL;
+ int i, j;
+ struct vchiq_mmal_port *control;
+ struct mmal_parameter_imagefx_parameters imagefx;
+
+ for (i = 0; i < ARRAY_SIZE(v4l2_to_mmal_effects_values); i++) {
+ if (ctrl->val == v4l2_to_mmal_effects_values[i].v4l2_effect) {
+ imagefx.effect =
+ v4l2_to_mmal_effects_values[i].mmal_effect;
+ imagefx.num_effect_params =
+ v4l2_to_mmal_effects_values[i].num_effect_params;
+
+ if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
+ imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
+
+ for (j = 0; j < imagefx.num_effect_params; j++)
+ imagefx.effect_parameter[j] =
+ v4l2_to_mmal_effects_values[i].effect_params[j];
+
+ dev->colourfx.enable =
+ v4l2_to_mmal_effects_values[i].col_fx_enable;
+ if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
+ dev->colourfx.u =
+ v4l2_to_mmal_effects_values[i].u;
+ dev->colourfx.v =
+ v4l2_to_mmal_effects_values[i].v;
+ }
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+ &imagefx, sizeof(imagefx));
+ if (ret)
+ goto exit;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &dev->colourfx, sizeof(dev->colourfx));
+ }
+ }
+
+exit:
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "mmal_ctrl:%p ctrl id:0x%x ctrl val:%d imagefx:0x%x color_effect:%s u:%d v:%d ret %d(%d)\n",
+ mmal_ctrl, ctrl->id, ctrl->val, imagefx.effect,
+ dev->colourfx.enable ? "true" : "false",
+ dev->colourfx.u, dev->colourfx.v,
+ ret, (ret == 0 ? 0 : -EINVAL));
+ return (ret == 0 ? 0 : EINVAL);
+}
+
+static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret = -EINVAL;
+ struct vchiq_mmal_port *control;
+
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ dev->colourfx.enable = (ctrl->val & 0xff00) >> 8;
+ dev->colourfx.enable = ctrl->val & 0xff;
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &dev->colourfx,
+ sizeof(dev->colourfx));
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
+ __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
+ (ret == 0 ? 0 : -EINVAL));
+ return (ret == 0 ? 0 : EINVAL);
+}
+
+static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret;
+ struct vchiq_mmal_port *encoder_out;
+
+ dev->capture.encode_bitrate = ctrl->val;
+
+ encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
+ mmal_ctrl->mmal_id,
+ &ctrl->val, sizeof(ctrl->val));
+ ret = 0;
+ return ret;
+}
+
+static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 bitrate_mode;
+ struct vchiq_mmal_port *encoder_out;
+
+ encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+
+ dev->capture.encode_bitrate_mode = ctrl->val;
+ switch (ctrl->val) {
+ default:
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+ bitrate_mode = MMAL_VIDEO_RATECONTROL_VARIABLE;
+ break;
+ case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+ bitrate_mode = MMAL_VIDEO_RATECONTROL_CONSTANT;
+ break;
+ }
+
+ vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
+ mmal_ctrl->mmal_id,
+ &bitrate_mode,
+ sizeof(bitrate_mode));
+ return 0;
+}
+
+static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *jpeg_out;
+
+ jpeg_out = &dev->component[MMAL_COMPONENT_IMAGE_ENCODE]->output[0];
+
+ u32_value = ctrl->val;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, jpeg_out,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ u32 u32_value;
+ struct vchiq_mmal_port *vid_enc_ctl;
+
+ vid_enc_ctl = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+
+ u32_value = ctrl->val;
+
+ return vchiq_mmal_port_parameter_set(dev->instance, vid_enc_ctl,
+ mmal_ctrl->mmal_id,
+ &u32_value, sizeof(u32_value));
+}
+
+static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ struct mmal_parameter_video_profile param;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_PROFILE) {
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ dev->capture.enc_profile = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_LEVEL) {
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ dev->capture.enc_level = ctrl->val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (!ret) {
+ switch (dev->capture.enc_profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ param.profile = MMAL_VIDEO_PROFILE_H264_BASELINE;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ param.profile =
+ MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ param.profile = MMAL_VIDEO_PROFILE_H264_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ param.profile = MMAL_VIDEO_PROFILE_H264_HIGH;
+ break;
+ default:
+ /* Should never get here */
+ break;
+ }
+
+ switch (dev->capture.enc_level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ param.level = MMAL_VIDEO_LEVEL_H264_1;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ param.level = MMAL_VIDEO_LEVEL_H264_1b;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ param.level = MMAL_VIDEO_LEVEL_H264_11;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ param.level = MMAL_VIDEO_LEVEL_H264_12;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ param.level = MMAL_VIDEO_LEVEL_H264_13;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ param.level = MMAL_VIDEO_LEVEL_H264_2;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ param.level = MMAL_VIDEO_LEVEL_H264_21;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ param.level = MMAL_VIDEO_LEVEL_H264_22;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ param.level = MMAL_VIDEO_LEVEL_H264_3;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ param.level = MMAL_VIDEO_LEVEL_H264_31;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ param.level = MMAL_VIDEO_LEVEL_H264_32;
+ break;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ param.level = MMAL_VIDEO_LEVEL_H264_4;
+ break;
+ default:
+ /* Should never get here */
+ break;
+ }
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0],
+ mmal_ctrl->mmal_id,
+ &param, sizeof(param));
+ }
+ return ret;
+}
+
+static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+{
+ int ret = 0;
+ int shutter_speed;
+ struct vchiq_mmal_port *control;
+
+ v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "scene mode selected %d, was %d\n", ctrl->val,
+ dev->scene_mode);
+ control = &dev->component[MMAL_COMPONENT_CAMERA]->control;
+
+ if (ctrl->val == dev->scene_mode)
+ return 0;
+
+ if (ctrl->val == V4L2_SCENE_MODE_NONE) {
+ /* Restore all user selections */
+ dev->scene_mode = V4L2_SCENE_MODE_NONE;
+
+ if (dev->exposure_mode_user == MMAL_PARAM_EXPOSUREMODE_OFF)
+ shutter_speed = dev->manual_shutter_speed;
+ else
+ shutter_speed = 0;
+
+ v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
+ __func__, shutter_speed, dev->exposure_mode_user,
+ dev->metering_mode);
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ control,
+ MMAL_PARAMETER_SHUTTER_SPEED,
+ &shutter_speed,
+ sizeof(shutter_speed));
+ ret += vchiq_mmal_port_parameter_set(dev->instance,
+ control,
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ &dev->exposure_mode_user,
+ sizeof(u32));
+ dev->exposure_mode_active = dev->exposure_mode_user;
+ ret += vchiq_mmal_port_parameter_set(dev->instance,
+ control,
+ MMAL_PARAMETER_EXP_METERING_MODE,
+ &dev->metering_mode,
+ sizeof(u32));
+ ret += set_framerate_params(dev);
+ } else {
+ /* Set up scene mode */
+ int i;
+ const struct v4l2_mmal_scene_config *scene = NULL;
+ int shutter_speed;
+ enum mmal_parameter_exposuremode exposure_mode;
+ enum mmal_parameter_exposuremeteringmode metering_mode;
+
+ for (i = 0; i < ARRAY_SIZE(scene_configs); i++) {
+ if (scene_configs[i].v4l2_scene ==
+ ctrl->val) {
+ scene = &scene_configs[i];
+ break;
+ }
+ }
+ if (!scene)
+ return -EINVAL;
+ if (i >= ARRAY_SIZE(scene_configs))
+ return -EINVAL;
+
+ /* Set all the values */
+ dev->scene_mode = ctrl->val;
+
+ if (scene->exposure_mode == MMAL_PARAM_EXPOSUREMODE_OFF)
+ shutter_speed = dev->manual_shutter_speed;
+ else
+ shutter_speed = 0;
+ exposure_mode = scene->exposure_mode;
+ metering_mode = scene->metering_mode;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: scene mode none: shut_speed %d, exp_mode %d, metering %d\n",
+ __func__, shutter_speed, exposure_mode, metering_mode);
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_SHUTTER_SPEED,
+ &shutter_speed,
+ sizeof(shutter_speed));
+ ret += vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ &exposure_mode,
+ sizeof(u32));
+ dev->exposure_mode_active = exposure_mode;
+ ret += vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ &exposure_mode,
+ sizeof(u32));
+ ret += vchiq_mmal_port_parameter_set(dev->instance, control,
+ MMAL_PARAMETER_EXP_METERING_MODE,
+ &metering_mode,
+ sizeof(u32));
+ ret += set_framerate_params(dev);
+ }
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s: Setting scene to %d, ret=%d\n",
+ __func__, ctrl->val, ret);
+ ret = -EINVAL;
+ }
+ return 0;
+}
+
+static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct bm2835_mmal_dev *dev =
+ container_of(ctrl->handler, struct bm2835_mmal_dev,
+ ctrl_handler);
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
+ int ret;
+
+ if (!mmal_ctrl || mmal_ctrl->id != ctrl->id || !mmal_ctrl->setter) {
+ pr_warn("mmal_ctrl:%p ctrl id:%d\n", mmal_ctrl, ctrl->id);
+ return -EINVAL;
+ }
+
+ ret = mmal_ctrl->setter(dev, ctrl, mmal_ctrl);
+ if (ret)
+ pr_warn("ctrl id:%d/MMAL param %08X- returned ret %d\n",
+ ctrl->id, mmal_ctrl->mmal_id, ret);
+ if (mmal_ctrl->ignore_errors)
+ ret = 0;
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = {
+ .s_ctrl = bm2835_mmal_s_ctrl,
+};
+
+static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
+ {
+ V4L2_CID_SATURATION, MMAL_CONTROL_TYPE_STD,
+ -100, 100, 0, 1, NULL,
+ MMAL_PARAMETER_SATURATION,
+ &ctrl_set_rational,
+ false
+ },
+ {
+ V4L2_CID_SHARPNESS, MMAL_CONTROL_TYPE_STD,
+ -100, 100, 0, 1, NULL,
+ MMAL_PARAMETER_SHARPNESS,
+ &ctrl_set_rational,
+ false
+ },
+ {
+ V4L2_CID_CONTRAST, MMAL_CONTROL_TYPE_STD,
+ -100, 100, 0, 1, NULL,
+ MMAL_PARAMETER_CONTRAST,
+ &ctrl_set_rational,
+ false
+ },
+ {
+ V4L2_CID_BRIGHTNESS, MMAL_CONTROL_TYPE_STD,
+ 0, 100, 50, 1, NULL,
+ MMAL_PARAMETER_BRIGHTNESS,
+ &ctrl_set_rational,
+ false
+ },
+ {
+ V4L2_CID_ISO_SENSITIVITY, MMAL_CONTROL_TYPE_INT_MENU,
+ 0, ARRAY_SIZE(iso_qmenu) - 1, 0, 1, iso_qmenu,
+ MMAL_PARAMETER_ISO,
+ &ctrl_set_iso,
+ false
+ },
+ {
+ V4L2_CID_ISO_SENSITIVITY_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
+ 0, 1, V4L2_ISO_SENSITIVITY_AUTO, 1, NULL,
+ MMAL_PARAMETER_ISO,
+ &ctrl_set_iso,
+ false
+ },
+ {
+ V4L2_CID_IMAGE_STABILIZATION, MMAL_CONTROL_TYPE_STD,
+ 0, 1, 0, 1, NULL,
+ MMAL_PARAMETER_VIDEO_STABILISATION,
+ &ctrl_set_value,
+ false
+ },
+/* {
+ * 0, MMAL_CONTROL_TYPE_CLUSTER, 3, 1, 0, NULL, 0, NULL
+ * },
+ */
+ {
+ V4L2_CID_EXPOSURE_AUTO, MMAL_CONTROL_TYPE_STD_MENU,
+ ~0x03, 3, V4L2_EXPOSURE_AUTO, 0, NULL,
+ MMAL_PARAMETER_EXPOSURE_MODE,
+ &ctrl_set_exposure,
+ false
+ },
+/* todo this needs mixing in with set exposure
+ * {
+ * V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
+ * },
+ */
+ {
+ V4L2_CID_EXPOSURE_ABSOLUTE, MMAL_CONTROL_TYPE_STD,
+ /* Units of 100usecs */
+ 1, 1 * 1000 * 10, 100 * 10, 1, NULL,
+ MMAL_PARAMETER_SHUTTER_SPEED,
+ &ctrl_set_exposure,
+ false
+ },
+ {
+ V4L2_CID_AUTO_EXPOSURE_BIAS, MMAL_CONTROL_TYPE_INT_MENU,
+ 0, ARRAY_SIZE(ev_bias_qmenu) - 1,
+ (ARRAY_SIZE(ev_bias_qmenu) + 1) / 2 - 1, 0, ev_bias_qmenu,
+ MMAL_PARAMETER_EXPOSURE_COMP,
+ &ctrl_set_value_ev,
+ false
+ },
+ {
+ V4L2_CID_EXPOSURE_AUTO_PRIORITY, MMAL_CONTROL_TYPE_STD,
+ 0, 1,
+ 0, 1, NULL,
+ 0, /* Dummy MMAL ID as it gets mapped into FPS range*/
+ &ctrl_set_exposure,
+ false
+ },
+ {
+ V4L2_CID_EXPOSURE_METERING,
+ MMAL_CONTROL_TYPE_STD_MENU,
+ ~0x7, 2, V4L2_EXPOSURE_METERING_AVERAGE, 0, NULL,
+ MMAL_PARAMETER_EXP_METERING_MODE,
+ &ctrl_set_metering_mode,
+ false
+ },
+ {
+ V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
+ MMAL_CONTROL_TYPE_STD_MENU,
+ ~0x3ff, 9, V4L2_WHITE_BALANCE_AUTO, 0, NULL,
+ MMAL_PARAMETER_AWB_MODE,
+ &ctrl_set_awb_mode,
+ false
+ },
+ {
+ V4L2_CID_RED_BALANCE, MMAL_CONTROL_TYPE_STD,
+ 1, 7999, 1000, 1, NULL,
+ MMAL_PARAMETER_CUSTOM_AWB_GAINS,
+ &ctrl_set_awb_gains,
+ false
+ },
+ {
+ V4L2_CID_BLUE_BALANCE, MMAL_CONTROL_TYPE_STD,
+ 1, 7999, 1000, 1, NULL,
+ MMAL_PARAMETER_CUSTOM_AWB_GAINS,
+ &ctrl_set_awb_gains,
+ false
+ },
+ {
+ V4L2_CID_COLORFX, MMAL_CONTROL_TYPE_STD_MENU,
+ 0, 15, V4L2_COLORFX_NONE, 0, NULL,
+ MMAL_PARAMETER_IMAGE_EFFECT,
+ &ctrl_set_image_effect,
+ false
+ },
+ {
+ V4L2_CID_COLORFX_CBCR, MMAL_CONTROL_TYPE_STD,
+ 0, 0xffff, 0x8080, 1, NULL,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &ctrl_set_colfx,
+ false
+ },
+ {
+ V4L2_CID_ROTATE, MMAL_CONTROL_TYPE_STD,
+ 0, 360, 0, 90, NULL,
+ MMAL_PARAMETER_ROTATION,
+ &ctrl_set_rotate,
+ false
+ },
+ {
+ V4L2_CID_HFLIP, MMAL_CONTROL_TYPE_STD,
+ 0, 1, 0, 1, NULL,
+ MMAL_PARAMETER_MIRROR,
+ &ctrl_set_flip,
+ false
+ },
+ {
+ V4L2_CID_VFLIP, MMAL_CONTROL_TYPE_STD,
+ 0, 1, 0, 1, NULL,
+ MMAL_PARAMETER_MIRROR,
+ &ctrl_set_flip,
+ false
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
+ 0, ARRAY_SIZE(bitrate_mode_qmenu) - 1,
+ 0, 0, bitrate_mode_qmenu,
+ MMAL_PARAMETER_RATECONTROL,
+ &ctrl_set_bitrate_mode,
+ false
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_BITRATE, MMAL_CONTROL_TYPE_STD,
+ 25 * 1000, 25 * 1000 * 1000, 10 * 1000 * 1000, 25 * 1000, NULL,
+ MMAL_PARAMETER_VIDEO_BIT_RATE,
+ &ctrl_set_bitrate,
+ false
+ },
+ {
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, MMAL_CONTROL_TYPE_STD,
+ 1, 100,
+ 30, 1, NULL,
+ MMAL_PARAMETER_JPEG_Q_FACTOR,
+ &ctrl_set_image_encode_output,
+ false
+ },
+ {
+ V4L2_CID_POWER_LINE_FREQUENCY, MMAL_CONTROL_TYPE_STD_MENU,
+ 0, ARRAY_SIZE(mains_freq_qmenu) - 1,
+ 1, 1, NULL,
+ MMAL_PARAMETER_FLICKER_AVOID,
+ &ctrl_set_flicker_avoidance,
+ false
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER, MMAL_CONTROL_TYPE_STD,
+ 0, 1,
+ 0, 1, NULL,
+ MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
+ &ctrl_set_video_encode_param_output,
+ true /* Errors ignored as requires latest firmware to work */
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ MMAL_CONTROL_TYPE_STD_MENU,
+ ~((1<<V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1<<V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1<<V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1<<V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1, NULL,
+ MMAL_PARAMETER_PROFILE,
+ &ctrl_set_video_encode_profile_level,
+ false
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL, MMAL_CONTROL_TYPE_STD_MENU,
+ ~((1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1<<V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 1, NULL,
+ MMAL_PARAMETER_PROFILE,
+ &ctrl_set_video_encode_profile_level,
+ false
+ },
+ {
+ V4L2_CID_SCENE_MODE, MMAL_CONTROL_TYPE_STD_MENU,
+ -1, /* Min is computed at runtime */
+ V4L2_SCENE_MODE_TEXT,
+ V4L2_SCENE_MODE_NONE, 1, NULL,
+ MMAL_PARAMETER_PROFILE,
+ &ctrl_set_scene_mode,
+ false
+ },
+ {
+ V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, MMAL_CONTROL_TYPE_STD,
+ 0, 0x7FFFFFFF, 60, 1, NULL,
+ MMAL_PARAMETER_INTRAPERIOD,
+ &ctrl_set_video_encode_param_output,
+ false
+ },
+};
+
+int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
+{
+ int c;
+ int ret = 0;
+
+ for (c = 0; c < V4L2_CTRL_COUNT; c++) {
+ if ((dev->ctrls[c]) && (v4l2_ctrls[c].setter)) {
+ ret = v4l2_ctrls[c].setter(dev, dev->ctrls[c],
+ &v4l2_ctrls[c]);
+ if (!v4l2_ctrls[c].ignore_errors && ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Failed when setting default values for ctrl %d\n",
+ c);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+int set_framerate_params(struct bm2835_mmal_dev *dev)
+{
+ struct mmal_parameter_fps_range fps_range;
+ int ret;
+
+ if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
+ (dev->exp_auto_priority)) {
+ /* Variable FPS. Define min FPS as 1fps.
+ * Max as max defined FPS.
+ */
+ fps_range.fps_low.num = 1;
+ fps_range.fps_low.den = 1;
+ fps_range.fps_high.num = dev->capture.timeperframe.denominator;
+ fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+ } else {
+ /* Fixed FPS - set min and max to be the same */
+ fps_range.fps_low.num = fps_range.fps_high.num =
+ dev->capture.timeperframe.denominator;
+ fps_range.fps_low.den = fps_range.fps_high.den =
+ dev->capture.timeperframe.numerator;
+ }
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Set fps range to %d/%d to %d/%d\n",
+ fps_range.fps_low.num,
+ fps_range.fps_low.den,
+ fps_range.fps_high.num,
+ fps_range.fps_high.den);
+
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_PREVIEW],
+ MMAL_PARAMETER_FPS_RANGE,
+ &fps_range, sizeof(fps_range));
+ ret += vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_VIDEO],
+ MMAL_PARAMETER_FPS_RANGE,
+ &fps_range, sizeof(fps_range));
+ ret += vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[MMAL_COMPONENT_CAMERA]->
+ output[MMAL_CAMERA_PORT_CAPTURE],
+ MMAL_PARAMETER_FPS_RANGE,
+ &fps_range, sizeof(fps_range));
+ if (ret)
+ v4l2_dbg(0, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "Failed to set fps ret %d\n", ret);
+
+ return ret;
+}
+
+int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl_handler *hdl)
+{
+ int c;
+ const struct bm2835_mmal_v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(hdl, V4L2_CTRL_COUNT);
+
+ for (c = 0; c < V4L2_CTRL_COUNT; c++) {
+ ctrl = &v4l2_ctrls[c];
+
+ switch (ctrl->type) {
+ case MMAL_CONTROL_TYPE_STD:
+ dev->ctrls[c] = v4l2_ctrl_new_std(hdl,
+ &bm2835_mmal_ctrl_ops, ctrl->id,
+ ctrl->min, ctrl->max, ctrl->step, ctrl->def);
+ break;
+
+ case MMAL_CONTROL_TYPE_STD_MENU:
+ {
+ int mask = ctrl->min;
+
+ if (ctrl->id == V4L2_CID_SCENE_MODE) {
+ /* Special handling to work out the mask
+ * value based on the scene_configs array
+ * at runtime. Reduces the chance of
+ * mismatches.
+ */
+ int i;
+ mask = 1 << V4L2_SCENE_MODE_NONE;
+ for (i = 0;
+ i < ARRAY_SIZE(scene_configs);
+ i++) {
+ mask |= 1 << scene_configs[i].v4l2_scene;
+ }
+ mask = ~mask;
+ }
+
+ dev->ctrls[c] = v4l2_ctrl_new_std_menu(hdl,
+ &bm2835_mmal_ctrl_ops, ctrl->id,
+ ctrl->max, mask, ctrl->def);
+ break;
+ }
+
+ case MMAL_CONTROL_TYPE_INT_MENU:
+ dev->ctrls[c] = v4l2_ctrl_new_int_menu(hdl,
+ &bm2835_mmal_ctrl_ops, ctrl->id,
+ ctrl->max, ctrl->def, ctrl->imenu);
+ break;
+
+ case MMAL_CONTROL_TYPE_CLUSTER:
+ /* skip this entry when constructing controls */
+ continue;
+ }
+
+ if (hdl->error)
+ break;
+
+ dev->ctrls[c]->priv = (void *)ctrl;
+ }
+
+ if (hdl->error) {
+ pr_err("error adding control %d/%d id 0x%x\n", c,
+ V4L2_CTRL_COUNT, ctrl->id);
+ return hdl->error;
+ }
+
+ for (c = 0; c < V4L2_CTRL_COUNT; c++) {
+ ctrl = &v4l2_ctrls[c];
+
+ switch (ctrl->type) {
+ case MMAL_CONTROL_TYPE_CLUSTER:
+ v4l2_ctrl_auto_cluster(ctrl->min,
+ &dev->ctrls[c + 1],
+ ctrl->max,
+ ctrl->def);
+ break;
+
+ case MMAL_CONTROL_TYPE_STD:
+ case MMAL_CONTROL_TYPE_STD_MENU:
+ case MMAL_CONTROL_TYPE_INT_MENU:
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/media/platform/bcm2835/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index 840fd139e033..840fd139e033 100644
--- a/drivers/staging/media/platform/bcm2835/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h
new file mode 100644
index 000000000000..e71d9600b278
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-encodings.h
@@ -0,0 +1,126 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+#ifndef MMAL_ENCODINGS_H
+#define MMAL_ENCODINGS_H
+
+#define MMAL_ENCODING_H264 MMAL_FOURCC('H', '2', '6', '4')
+#define MMAL_ENCODING_H263 MMAL_FOURCC('H', '2', '6', '3')
+#define MMAL_ENCODING_MP4V MMAL_FOURCC('M', 'P', '4', 'V')
+#define MMAL_ENCODING_MP2V MMAL_FOURCC('M', 'P', '2', 'V')
+#define MMAL_ENCODING_MP1V MMAL_FOURCC('M', 'P', '1', 'V')
+#define MMAL_ENCODING_WMV3 MMAL_FOURCC('W', 'M', 'V', '3')
+#define MMAL_ENCODING_WMV2 MMAL_FOURCC('W', 'M', 'V', '2')
+#define MMAL_ENCODING_WMV1 MMAL_FOURCC('W', 'M', 'V', '1')
+#define MMAL_ENCODING_WVC1 MMAL_FOURCC('W', 'V', 'C', '1')
+#define MMAL_ENCODING_VP8 MMAL_FOURCC('V', 'P', '8', ' ')
+#define MMAL_ENCODING_VP7 MMAL_FOURCC('V', 'P', '7', ' ')
+#define MMAL_ENCODING_VP6 MMAL_FOURCC('V', 'P', '6', ' ')
+#define MMAL_ENCODING_THEORA MMAL_FOURCC('T', 'H', 'E', 'O')
+#define MMAL_ENCODING_SPARK MMAL_FOURCC('S', 'P', 'R', 'K')
+#define MMAL_ENCODING_MJPEG MMAL_FOURCC('M', 'J', 'P', 'G')
+
+#define MMAL_ENCODING_JPEG MMAL_FOURCC('J', 'P', 'E', 'G')
+#define MMAL_ENCODING_GIF MMAL_FOURCC('G', 'I', 'F', ' ')
+#define MMAL_ENCODING_PNG MMAL_FOURCC('P', 'N', 'G', ' ')
+#define MMAL_ENCODING_PPM MMAL_FOURCC('P', 'P', 'M', ' ')
+#define MMAL_ENCODING_TGA MMAL_FOURCC('T', 'G', 'A', ' ')
+#define MMAL_ENCODING_BMP MMAL_FOURCC('B', 'M', 'P', ' ')
+
+#define MMAL_ENCODING_I420 MMAL_FOURCC('I', '4', '2', '0')
+#define MMAL_ENCODING_I420_SLICE MMAL_FOURCC('S', '4', '2', '0')
+#define MMAL_ENCODING_YV12 MMAL_FOURCC('Y', 'V', '1', '2')
+#define MMAL_ENCODING_I422 MMAL_FOURCC('I', '4', '2', '2')
+#define MMAL_ENCODING_I422_SLICE MMAL_FOURCC('S', '4', '2', '2')
+#define MMAL_ENCODING_YUYV MMAL_FOURCC('Y', 'U', 'Y', 'V')
+#define MMAL_ENCODING_YVYU MMAL_FOURCC('Y', 'V', 'Y', 'U')
+#define MMAL_ENCODING_UYVY MMAL_FOURCC('U', 'Y', 'V', 'Y')
+#define MMAL_ENCODING_VYUY MMAL_FOURCC('V', 'Y', 'U', 'Y')
+#define MMAL_ENCODING_NV12 MMAL_FOURCC('N', 'V', '1', '2')
+#define MMAL_ENCODING_NV21 MMAL_FOURCC('N', 'V', '2', '1')
+#define MMAL_ENCODING_ARGB MMAL_FOURCC('A', 'R', 'G', 'B')
+#define MMAL_ENCODING_RGBA MMAL_FOURCC('R', 'G', 'B', 'A')
+#define MMAL_ENCODING_ABGR MMAL_FOURCC('A', 'B', 'G', 'R')
+#define MMAL_ENCODING_BGRA MMAL_FOURCC('B', 'G', 'R', 'A')
+#define MMAL_ENCODING_RGB16 MMAL_FOURCC('R', 'G', 'B', '2')
+#define MMAL_ENCODING_RGB24 MMAL_FOURCC('R', 'G', 'B', '3')
+#define MMAL_ENCODING_RGB32 MMAL_FOURCC('R', 'G', 'B', '4')
+#define MMAL_ENCODING_BGR16 MMAL_FOURCC('B', 'G', 'R', '2')
+#define MMAL_ENCODING_BGR24 MMAL_FOURCC('B', 'G', 'R', '3')
+#define MMAL_ENCODING_BGR32 MMAL_FOURCC('B', 'G', 'R', '4')
+
+/** SAND Video (YUVUV128) format, native format understood by VideoCore.
+ * This format is *not* opaque - if requested you will receive full frames
+ * of YUV_UV video.
+ */
+#define MMAL_ENCODING_YUVUV128 MMAL_FOURCC('S', 'A', 'N', 'D')
+
+/** VideoCore opaque image format, image handles are returned to
+ * the host but not the actual image data.
+ */
+#define MMAL_ENCODING_OPAQUE MMAL_FOURCC('O', 'P', 'Q', 'V')
+
+/** An EGL image handle
+ */
+#define MMAL_ENCODING_EGL_IMAGE MMAL_FOURCC('E', 'G', 'L', 'I')
+
+/* }@ */
+
+/** \name Pre-defined audio encodings */
+/* @{ */
+#define MMAL_ENCODING_PCM_UNSIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'U')
+#define MMAL_ENCODING_PCM_UNSIGNED_LE MMAL_FOURCC('p', 'c', 'm', 'u')
+#define MMAL_ENCODING_PCM_SIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'S')
+#define MMAL_ENCODING_PCM_SIGNED_LE MMAL_FOURCC('p', 'c', 'm', 's')
+#define MMAL_ENCODING_PCM_FLOAT_BE MMAL_FOURCC('P', 'C', 'M', 'F')
+#define MMAL_ENCODING_PCM_FLOAT_LE MMAL_FOURCC('p', 'c', 'm', 'f')
+
+/* Pre-defined H264 encoding variants */
+
+/** ISO 14496-10 Annex B byte stream format */
+#define MMAL_ENCODING_VARIANT_H264_DEFAULT 0
+/** ISO 14496-15 AVC stream format */
+#define MMAL_ENCODING_VARIANT_H264_AVC1 MMAL_FOURCC('A', 'V', 'C', '1')
+/** Implicitly delineated NAL units without emulation prevention */
+#define MMAL_ENCODING_VARIANT_H264_RAW MMAL_FOURCC('R', 'A', 'W', ' ')
+
+/** \defgroup MmalColorSpace List of pre-defined video color spaces
+ * This defines a list of common color spaces. This list isn't exhaustive and
+ * is only provided as a convenience to avoid clients having to use FourCC
+ * codes directly. However components are allowed to define and use their own
+ * FourCC codes.
+ */
+/* @{ */
+
+/** Unknown color space */
+#define MMAL_COLOR_SPACE_UNKNOWN 0
+/** ITU-R BT.601-5 [SDTV] */
+#define MMAL_COLOR_SPACE_ITUR_BT601 MMAL_FOURCC('Y', '6', '0', '1')
+/** ITU-R BT.709-3 [HDTV] */
+#define MMAL_COLOR_SPACE_ITUR_BT709 MMAL_FOURCC('Y', '7', '0', '9')
+/** JPEG JFIF */
+#define MMAL_COLOR_SPACE_JPEG_JFIF MMAL_FOURCC('Y', 'J', 'F', 'I')
+/** Title 47 Code of Federal Regulations (2003) 73.682 (a) (20) */
+#define MMAL_COLOR_SPACE_FCC MMAL_FOURCC('Y', 'F', 'C', 'C')
+/** Society of Motion Picture and Television Engineers 240M (1999) */
+#define MMAL_COLOR_SPACE_SMPTE240M MMAL_FOURCC('Y', '2', '4', '0')
+/** ITU-R BT.470-2 System M */
+#define MMAL_COLOR_SPACE_BT470_2_M MMAL_FOURCC('Y', '_', '_', 'M')
+/** ITU-R BT.470-2 System BG */
+#define MMAL_COLOR_SPACE_BT470_2_BG MMAL_FOURCC('Y', '_', 'B', 'G')
+/** JPEG JFIF, but with 16..255 luma */
+#define MMAL_COLOR_SPACE_JFIF_Y16_255 MMAL_FOURCC('Y', 'Y', '1', '6')
+/* @} MmalColorSpace List */
+
+#endif /* MMAL_ENCODINGS_H */
diff --git a/drivers/staging/media/platform/bcm2835/mmal-msg-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h
index 66e8a6edf628..66e8a6edf628 100644
--- a/drivers/staging/media/platform/bcm2835/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-common.h
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h
new file mode 100644
index 000000000000..24b002e8df0c
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-format.h
@@ -0,0 +1,99 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+#ifndef MMAL_MSG_FORMAT_H
+#define MMAL_MSG_FORMAT_H
+
+#include "mmal-msg-common.h"
+
+/* MMAL_ES_FORMAT_T */
+
+struct mmal_audio_format {
+ u32 channels; /**< Number of audio channels */
+ u32 sample_rate; /**< Sample rate */
+
+ u32 bits_per_sample; /**< Bits per sample */
+ u32 block_align; /**< Size of a block of data */
+};
+
+struct mmal_video_format {
+ u32 width; /**< Width of frame in pixels */
+ u32 height; /**< Height of frame in rows of pixels */
+ struct mmal_rect crop; /**< Visible region of the frame */
+ struct mmal_rational frame_rate; /**< Frame rate */
+ struct mmal_rational par; /**< Pixel aspect ratio */
+
+ /* FourCC specifying the color space of the video stream. See the
+ * \ref MmalColorSpace "pre-defined color spaces" for some examples.
+ */
+ u32 color_space;
+};
+
+struct mmal_subpicture_format {
+ u32 x_offset;
+ u32 y_offset;
+};
+
+union mmal_es_specific_format {
+ struct mmal_audio_format audio;
+ struct mmal_video_format video;
+ struct mmal_subpicture_format subpicture;
+};
+
+/** Definition of an elementary stream format (MMAL_ES_FORMAT_T) */
+struct mmal_es_format_local {
+ u32 type; /* enum mmal_es_type */
+
+ u32 encoding; /* FourCC specifying encoding of the elementary stream.*/
+ u32 encoding_variant; /* FourCC specifying the specific
+ * encoding variant of the elementary
+ * stream.
+ */
+
+ union mmal_es_specific_format *es; /* Type specific
+ * information for the
+ * elementary stream
+ */
+
+ u32 bitrate; /**< Bitrate in bits per second */
+ u32 flags; /**< Flags describing properties of the elementary stream. */
+
+ u32 extradata_size; /**< Size of the codec specific data */
+ u8 *extradata; /**< Codec specific data */
+};
+
+/** Remote definition of an elementary stream format (MMAL_ES_FORMAT_T) */
+struct mmal_es_format {
+ u32 type; /* enum mmal_es_type */
+
+ u32 encoding; /* FourCC specifying encoding of the elementary stream.*/
+ u32 encoding_variant; /* FourCC specifying the specific
+ * encoding variant of the elementary
+ * stream.
+ */
+
+ u32 es; /* Type specific
+ * information for the
+ * elementary stream
+ */
+
+ u32 bitrate; /**< Bitrate in bits per second */
+ u32 flags; /**< Flags describing properties of the elementary stream. */
+
+ u32 extradata_size; /**< Size of the codec specific data */
+ u32 extradata; /**< Codec specific data */
+};
+
+#endif /* MMAL_MSG_FORMAT_H */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
new file mode 100644
index 000000000000..84a0f4b717ef
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg-port.h
@@ -0,0 +1,109 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+/* MMAL_PORT_TYPE_T */
+enum mmal_port_type {
+ MMAL_PORT_TYPE_UNKNOWN = 0, /**< Unknown port type */
+ MMAL_PORT_TYPE_CONTROL, /**< Control port */
+ MMAL_PORT_TYPE_INPUT, /**< Input port */
+ MMAL_PORT_TYPE_OUTPUT, /**< Output port */
+ MMAL_PORT_TYPE_CLOCK, /**< Clock port */
+};
+
+/** The port is pass-through and doesn't need buffer headers allocated */
+#define MMAL_PORT_CAPABILITY_PASSTHROUGH 0x01
+/** The port wants to allocate the buffer payloads.
+ * This signals a preference that payload allocation should be done
+ * on this port for efficiency reasons.
+ */
+#define MMAL_PORT_CAPABILITY_ALLOCATION 0x02
+/** The port supports format change events.
+ * This applies to input ports and is used to let the client know
+ * whether the port supports being reconfigured via a format
+ * change event (i.e. without having to disable the port).
+ */
+#define MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE 0x04
+
+/* mmal port structure (MMAL_PORT_T)
+ *
+ * most elements are informational only, the pointer values for
+ * interogation messages are generally provided as additional
+ * strucures within the message. When used to set values only teh
+ * buffer_num, buffer_size and userdata parameters are writable.
+ */
+struct mmal_port {
+ u32 priv; /* Private member used by the framework */
+ u32 name; /* Port name. Used for debugging purposes (RO) */
+
+ u32 type; /* Type of the port (RO) enum mmal_port_type */
+ u16 index; /* Index of the port in its type list (RO) */
+ u16 index_all; /* Index of the port in the list of all ports (RO) */
+
+ u32 is_enabled; /* Indicates whether the port is enabled or not (RO) */
+ u32 format; /* Format of the elementary stream */
+
+ u32 buffer_num_min; /* Minimum number of buffers the port
+ * requires (RO). This is set by the
+ * component.
+ */
+
+ u32 buffer_size_min; /* Minimum size of buffers the port
+ * requires (RO). This is set by the
+ * component.
+ */
+
+ u32 buffer_alignment_min; /* Minimum alignment requirement for
+ * the buffers (RO). A value of
+ * zero means no special alignment
+ * requirements. This is set by the
+ * component.
+ */
+
+ u32 buffer_num_recommended; /* Number of buffers the port
+ * recommends for optimal
+ * performance (RO). A value of
+ * zero means no special
+ * recommendation. This is set
+ * by the component.
+ */
+
+ u32 buffer_size_recommended; /* Size of buffers the port
+ * recommends for optimal
+ * performance (RO). A value of
+ * zero means no special
+ * recommendation. This is set
+ * by the component.
+ */
+
+ u32 buffer_num; /* Actual number of buffers the port will use.
+ * This is set by the client.
+ */
+
+ u32 buffer_size; /* Actual maximum size of the buffers that
+ * will be sent to the port. This is set by
+ * the client.
+ */
+
+ u32 component; /* Component this port belongs to (Read Only) */
+
+ u32 userdata; /* Field reserved for use by the client */
+
+ u32 capabilities; /* Flags describing the capabilities of a
+ * port (RO). Bitwise combination of \ref
+ * portcapabilities "Port capabilities"
+ * values.
+ */
+
+};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
new file mode 100644
index 000000000000..52cdf4da1b47
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
@@ -0,0 +1,399 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+/* all the data structures which serialise the MMAL protocol. note
+ * these are directly mapped onto the recived message data.
+ *
+ * BEWARE: They seem to *assume* pointers are u32 and that there is no
+ * structure padding!
+ *
+ * NOTE: this implementation uses kernel types to ensure sizes. Rather
+ * than assigning values to enums to force their size the
+ * implementation uses fixed size types and not the enums (though the
+ * comments have the actual enum type
+ */
+
+#define VC_MMAL_VER 15
+#define VC_MMAL_MIN_VER 10
+#define VC_MMAL_SERVER_NAME MAKE_FOURCC("mmal")
+
+/* max total message size is 512 bytes */
+#define MMAL_MSG_MAX_SIZE 512
+/* with six 32bit header elements max payload is therefore 488 bytes */
+#define MMAL_MSG_MAX_PAYLOAD 488
+
+#include "mmal-msg-common.h"
+#include "mmal-msg-format.h"
+#include "mmal-msg-port.h"
+
+enum mmal_msg_type {
+ MMAL_MSG_TYPE_QUIT = 1,
+ MMAL_MSG_TYPE_SERVICE_CLOSED,
+ MMAL_MSG_TYPE_GET_VERSION,
+ MMAL_MSG_TYPE_COMPONENT_CREATE,
+ MMAL_MSG_TYPE_COMPONENT_DESTROY, /* 5 */
+ MMAL_MSG_TYPE_COMPONENT_ENABLE,
+ MMAL_MSG_TYPE_COMPONENT_DISABLE,
+ MMAL_MSG_TYPE_PORT_INFO_GET,
+ MMAL_MSG_TYPE_PORT_INFO_SET,
+ MMAL_MSG_TYPE_PORT_ACTION, /* 10 */
+ MMAL_MSG_TYPE_BUFFER_FROM_HOST,
+ MMAL_MSG_TYPE_BUFFER_TO_HOST,
+ MMAL_MSG_TYPE_GET_STATS,
+ MMAL_MSG_TYPE_PORT_PARAMETER_SET,
+ MMAL_MSG_TYPE_PORT_PARAMETER_GET, /* 15 */
+ MMAL_MSG_TYPE_EVENT_TO_HOST,
+ MMAL_MSG_TYPE_GET_CORE_STATS_FOR_PORT,
+ MMAL_MSG_TYPE_OPAQUE_ALLOCATOR,
+ MMAL_MSG_TYPE_CONSUME_MEM,
+ MMAL_MSG_TYPE_LMK, /* 20 */
+ MMAL_MSG_TYPE_OPAQUE_ALLOCATOR_DESC,
+ MMAL_MSG_TYPE_DRM_GET_LHS32,
+ MMAL_MSG_TYPE_DRM_GET_TIME,
+ MMAL_MSG_TYPE_BUFFER_FROM_HOST_ZEROLEN,
+ MMAL_MSG_TYPE_PORT_FLUSH, /* 25 */
+ MMAL_MSG_TYPE_HOST_LOG,
+ MMAL_MSG_TYPE_MSG_LAST
+};
+
+/* port action request messages differ depending on the action type */
+enum mmal_msg_port_action_type {
+ MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unkown action */
+ MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */
+ MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */
+ MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */
+ MMAL_MSG_PORT_ACTION_TYPE_CONNECT, /* Connect ports */
+ MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT, /* Disconnect ports */
+ MMAL_MSG_PORT_ACTION_TYPE_SET_REQUIREMENTS, /* Set buffer requirements*/
+};
+
+struct mmal_msg_header {
+ u32 magic;
+ u32 type; /** enum mmal_msg_type */
+
+ /* Opaque handle to the control service */
+ u32 control_service;
+
+ u32 context; /** a u32 per message context */
+ u32 status; /** The status of the vchiq operation */
+ u32 padding;
+};
+
+/* Send from VC to host to report version */
+struct mmal_msg_version {
+ u32 flags;
+ u32 major;
+ u32 minor;
+ u32 minimum;
+};
+
+/* request to VC to create component */
+struct mmal_msg_component_create {
+ u32 client_component; /* component context */
+ char name[128];
+ u32 pid; /* For debug */
+};
+
+/* reply from VC to component creation request */
+struct mmal_msg_component_create_reply {
+ u32 status; /* enum mmal_msg_status - how does this differ to
+ * the one in the header?
+ */
+ u32 component_handle; /* VideoCore handle for component */
+ u32 input_num; /* Number of input ports */
+ u32 output_num; /* Number of output ports */
+ u32 clock_num; /* Number of clock ports */
+};
+
+/* request to VC to destroy a component */
+struct mmal_msg_component_destroy {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_destroy_reply {
+ u32 status; /** The component destruction status */
+};
+
+/* request and reply to VC to enable a component */
+struct mmal_msg_component_enable {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_enable_reply {
+ u32 status; /** The component enable status */
+};
+
+/* request and reply to VC to disable a component */
+struct mmal_msg_component_disable {
+ u32 component_handle;
+};
+
+struct mmal_msg_component_disable_reply {
+ u32 status; /** The component disable status */
+};
+
+/* request to VC to get port information */
+struct mmal_msg_port_info_get {
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 index; /* port index to query */
+};
+
+/* reply from VC to get port info request */
+struct mmal_msg_port_info_get_reply {
+ u32 status; /** enum mmal_msg_status */
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 port_index; /* port indexed in query */
+ s32 found; /* unused */
+ u32 port_handle; /**< Handle to use for this port */
+ struct mmal_port port;
+ struct mmal_es_format format; /* elementary stream format */
+ union mmal_es_specific_format es; /* es type specific data */
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; /* es extra data */
+};
+
+/* request to VC to set port information */
+struct mmal_msg_port_info_set {
+ u32 component_handle;
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 port_index; /* port indexed in query */
+ struct mmal_port port;
+ struct mmal_es_format format;
+ union mmal_es_specific_format es;
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
+};
+
+/* reply from VC to port info set request */
+struct mmal_msg_port_info_set_reply {
+ u32 status;
+ u32 component_handle; /* component handle port is associated with */
+ u32 port_type; /* enum mmal_msg_port_type */
+ u32 index; /* port indexed in query */
+ s32 found; /* unused */
+ u32 port_handle; /**< Handle to use for this port */
+ struct mmal_port port;
+ struct mmal_es_format format;
+ union mmal_es_specific_format es;
+ u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE];
+};
+
+/* port action requests that take a mmal_port as a parameter */
+struct mmal_msg_port_action_port {
+ u32 component_handle;
+ u32 port_handle;
+ u32 action; /* enum mmal_msg_port_action_type */
+ struct mmal_port port;
+};
+
+/* port action requests that take handles as a parameter */
+struct mmal_msg_port_action_handle {
+ u32 component_handle;
+ u32 port_handle;
+ u32 action; /* enum mmal_msg_port_action_type */
+ u32 connect_component_handle;
+ u32 connect_port_handle;
+};
+
+struct mmal_msg_port_action_reply {
+ u32 status; /** The port action operation status */
+};
+
+/* MMAL buffer transfer */
+
+/** Size of space reserved in a buffer message for short messages. */
+#define MMAL_VC_SHORT_DATA 128
+
+/** Signals that the current payload is the end of the stream of data */
+#define MMAL_BUFFER_HEADER_FLAG_EOS (1<<0)
+/** Signals that the start of the current payload starts a frame */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_START (1<<1)
+/** Signals that the end of the current payload ends a frame */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_END (1<<2)
+/** Signals that the current payload contains only complete frames (>1) */
+#define MMAL_BUFFER_HEADER_FLAG_FRAME \
+ (MMAL_BUFFER_HEADER_FLAG_FRAME_START|MMAL_BUFFER_HEADER_FLAG_FRAME_END)
+/** Signals that the current payload is a keyframe (i.e. self decodable) */
+#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME (1<<3)
+/** Signals a discontinuity in the stream of data (e.g. after a seek).
+ * Can be used for instance by a decoder to reset its state
+ */
+#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY (1<<4)
+/** Signals a buffer containing some kind of config data for the component
+ * (e.g. codec config data)
+ */
+#define MMAL_BUFFER_HEADER_FLAG_CONFIG (1<<5)
+/** Signals an encrypted payload */
+#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED (1<<6)
+/** Signals a buffer containing side information */
+#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO (1<<7)
+/** Signals a buffer which is the snapshot/postview image from a stills
+ * capture
+ */
+#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT (1<<8)
+/** Signals a buffer which contains data known to be corrupted */
+#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED (1<<9)
+/** Signals that a buffer failed to be transmitted */
+#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED (1<<10)
+
+struct mmal_driver_buffer {
+ u32 magic;
+ u32 component_handle;
+ u32 port_handle;
+ u32 client_context;
+};
+
+/* buffer header */
+struct mmal_buffer_header {
+ u32 next; /* next header */
+ u32 priv; /* framework private data */
+ u32 cmd;
+ u32 data;
+ u32 alloc_size;
+ u32 length;
+ u32 offset;
+ u32 flags;
+ s64 pts;
+ s64 dts;
+ u32 type;
+ u32 user_data;
+};
+
+struct mmal_buffer_header_type_specific {
+ union {
+ struct {
+ u32 planes;
+ u32 offset[4];
+ u32 pitch[4];
+ u32 flags;
+ } video;
+ } u;
+};
+
+struct mmal_msg_buffer_from_host {
+ /* The front 32 bytes of the buffer header are copied
+ * back to us in the reply to allow for context. This
+ * area is used to store two mmal_driver_buffer structures to
+ * allow for multiple concurrent service users.
+ */
+ /* control data */
+ struct mmal_driver_buffer drvbuf;
+
+ /* referenced control data for passthrough buffer management */
+ struct mmal_driver_buffer drvbuf_ref;
+ struct mmal_buffer_header buffer_header; /* buffer header itself */
+ struct mmal_buffer_header_type_specific buffer_header_type_specific;
+ s32 is_zero_copy;
+ s32 has_reference;
+
+ /** allows short data to be xfered in control message */
+ u32 payload_in_message;
+ u8 short_data[MMAL_VC_SHORT_DATA];
+};
+
+/* port parameter setting */
+
+#define MMAL_WORKER_PORT_PARAMETER_SPACE 96
+
+struct mmal_msg_port_parameter_set {
+ u32 component_handle; /* component */
+ u32 port_handle; /* port */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+ uint32_t value[MMAL_WORKER_PORT_PARAMETER_SPACE];
+};
+
+struct mmal_msg_port_parameter_set_reply {
+ u32 status; /* enum mmal_msg_status todo: how does this
+ * differ to the one in the header?
+ */
+};
+
+/* port parameter getting */
+
+struct mmal_msg_port_parameter_get {
+ u32 component_handle; /* component */
+ u32 port_handle; /* port */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+};
+
+struct mmal_msg_port_parameter_get_reply {
+ u32 status; /* Status of mmal_port_parameter_get call */
+ u32 id; /* Parameter ID */
+ u32 size; /* Parameter size */
+ uint32_t value[MMAL_WORKER_PORT_PARAMETER_SPACE];
+};
+
+/* event messages */
+#define MMAL_WORKER_EVENT_SPACE 256
+
+struct mmal_msg_event_to_host {
+ u32 client_component; /* component context */
+
+ u32 port_type;
+ u32 port_num;
+
+ u32 cmd;
+ u32 length;
+ u8 data[MMAL_WORKER_EVENT_SPACE];
+ u32 delayed_buffer;
+};
+
+/* all mmal messages are serialised through this structure */
+struct mmal_msg {
+ /* header */
+ struct mmal_msg_header h;
+ /* payload */
+ union {
+ struct mmal_msg_version version;
+
+ struct mmal_msg_component_create component_create;
+ struct mmal_msg_component_create_reply component_create_reply;
+
+ struct mmal_msg_component_destroy component_destroy;
+ struct mmal_msg_component_destroy_reply component_destroy_reply;
+
+ struct mmal_msg_component_enable component_enable;
+ struct mmal_msg_component_enable_reply component_enable_reply;
+
+ struct mmal_msg_component_disable component_disable;
+ struct mmal_msg_component_disable_reply component_disable_reply;
+
+ struct mmal_msg_port_info_get port_info_get;
+ struct mmal_msg_port_info_get_reply port_info_get_reply;
+
+ struct mmal_msg_port_info_set port_info_set;
+ struct mmal_msg_port_info_set_reply port_info_set_reply;
+
+ struct mmal_msg_port_action_port port_action_port;
+ struct mmal_msg_port_action_handle port_action_handle;
+ struct mmal_msg_port_action_reply port_action_reply;
+
+ struct mmal_msg_buffer_from_host buffer_from_host;
+
+ struct mmal_msg_port_parameter_set port_parameter_set;
+ struct mmal_msg_port_parameter_set_reply
+ port_parameter_set_reply;
+ struct mmal_msg_port_parameter_get
+ port_parameter_get;
+ struct mmal_msg_port_parameter_get_reply
+ port_parameter_get_reply;
+
+ struct mmal_msg_event_to_host event_to_host;
+
+ u8 payload[MMAL_MSG_MAX_PAYLOAD];
+ } u;
+};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
new file mode 100644
index 000000000000..e7300229842d
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
@@ -0,0 +1,687 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ */
+
+/* common parameters */
+
+/** @name Parameter groups
+ * Parameters are divided into groups, and then allocated sequentially within
+ * a group using an enum.
+ * @{
+ */
+
+/** Common parameter ID group, used with many types of component. */
+#define MMAL_PARAMETER_GROUP_COMMON (0<<16)
+/** Camera-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_CAMERA (1<<16)
+/** Video-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_VIDEO (2<<16)
+/** Audio-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_AUDIO (3<<16)
+/** Clock-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_CLOCK (4<<16)
+/** Miracast-specific parameter ID group. */
+#define MMAL_PARAMETER_GROUP_MIRACAST (5<<16)
+
+/* Common parameters */
+enum mmal_parameter_common_type {
+ MMAL_PARAMETER_UNUSED /**< Never a valid parameter ID */
+ = MMAL_PARAMETER_GROUP_COMMON,
+ MMAL_PARAMETER_SUPPORTED_ENCODINGS, /**< MMAL_PARAMETER_ENCODING_T */
+ MMAL_PARAMETER_URI, /**< MMAL_PARAMETER_URI_T */
+
+ /** MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T */
+ MMAL_PARAMETER_CHANGE_EVENT_REQUEST,
+
+ /** MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ZERO_COPY,
+
+ /**< MMAL_PARAMETER_BUFFER_REQUIREMENTS_T */
+ MMAL_PARAMETER_BUFFER_REQUIREMENTS,
+
+ MMAL_PARAMETER_STATISTICS, /**< MMAL_PARAMETER_STATISTICS_T */
+ MMAL_PARAMETER_CORE_STATISTICS, /**< MMAL_PARAMETER_CORE_STATISTICS_T */
+ MMAL_PARAMETER_MEM_USAGE, /**< MMAL_PARAMETER_MEM_USAGE_T */
+ MMAL_PARAMETER_BUFFER_FLAG_FILTER, /**< MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_SEEK, /**< MMAL_PARAMETER_SEEK_T */
+ MMAL_PARAMETER_POWERMON_ENABLE, /**< MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_LOGGING, /**< MMAL_PARAMETER_LOGGING_T */
+ MMAL_PARAMETER_SYSTEM_TIME, /**< MMAL_PARAMETER_UINT64_T */
+ MMAL_PARAMETER_NO_IMAGE_PADDING /**< MMAL_PARAMETER_BOOLEAN_T */
+};
+
+/* camera parameters */
+
+enum mmal_parameter_camera_type {
+ /* 0 */
+ /** @ref MMAL_PARAMETER_THUMBNAIL_CONFIG_T */
+ MMAL_PARAMETER_THUMBNAIL_CONFIGURATION
+ = MMAL_PARAMETER_GROUP_CAMERA,
+ MMAL_PARAMETER_CAPTURE_QUALITY, /**< Unused? */
+ MMAL_PARAMETER_ROTATION, /**< @ref MMAL_PARAMETER_INT32_T */
+ MMAL_PARAMETER_EXIF_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_EXIF, /**< @ref MMAL_PARAMETER_EXIF_T */
+ MMAL_PARAMETER_AWB_MODE, /**< @ref MMAL_PARAM_AWBMODE_T */
+ MMAL_PARAMETER_IMAGE_EFFECT, /**< @ref MMAL_PARAMETER_IMAGEFX_T */
+ MMAL_PARAMETER_COLOUR_EFFECT, /**< @ref MMAL_PARAMETER_COLOURFX_T */
+ MMAL_PARAMETER_FLICKER_AVOID, /**< @ref MMAL_PARAMETER_FLICKERAVOID_T */
+ MMAL_PARAMETER_FLASH, /**< @ref MMAL_PARAMETER_FLASH_T */
+ MMAL_PARAMETER_REDEYE, /**< @ref MMAL_PARAMETER_REDEYE_T */
+ MMAL_PARAMETER_FOCUS, /**< @ref MMAL_PARAMETER_FOCUS_T */
+ MMAL_PARAMETER_FOCAL_LENGTHS, /**< Unused? */
+ MMAL_PARAMETER_EXPOSURE_COMP, /**< @ref MMAL_PARAMETER_INT32_T */
+ MMAL_PARAMETER_ZOOM, /**< @ref MMAL_PARAMETER_SCALEFACTOR_T */
+ MMAL_PARAMETER_MIRROR, /**< @ref MMAL_PARAMETER_MIRROR_T */
+
+ /* 0x10 */
+ MMAL_PARAMETER_CAMERA_NUM, /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAPTURE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_EXPOSURE_MODE, /**< @ref MMAL_PARAMETER_EXPOSUREMODE_T */
+ MMAL_PARAMETER_EXP_METERING_MODE, /**< @ref MMAL_PARAMETER_EXPOSUREMETERINGMODE_T */
+ MMAL_PARAMETER_FOCUS_STATUS, /**< @ref MMAL_PARAMETER_FOCUS_STATUS_T */
+ MMAL_PARAMETER_CAMERA_CONFIG, /**< @ref MMAL_PARAMETER_CAMERA_CONFIG_T */
+ MMAL_PARAMETER_CAPTURE_STATUS, /**< @ref MMAL_PARAMETER_CAPTURE_STATUS_T */
+ MMAL_PARAMETER_FACE_TRACK, /**< @ref MMAL_PARAMETER_FACE_TRACK_T */
+ MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_JPEG_Q_FACTOR, /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_FRAME_RATE, /**< @ref MMAL_PARAMETER_FRAME_RATE_T */
+ MMAL_PARAMETER_USE_STC, /**< @ref MMAL_PARAMETER_CAMERA_STC_MODE_T */
+ MMAL_PARAMETER_CAMERA_INFO, /**< @ref MMAL_PARAMETER_CAMERA_INFO_T */
+ MMAL_PARAMETER_VIDEO_STABILISATION, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_FACE_TRACK_RESULTS, /**< @ref MMAL_PARAMETER_FACE_TRACK_RESULTS_T */
+ MMAL_PARAMETER_ENABLE_RAW_CAPTURE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+
+ /* 0x20 */
+ MMAL_PARAMETER_DPF_FILE, /**< @ref MMAL_PARAMETER_URI_T */
+ MMAL_PARAMETER_ENABLE_DPF_FILE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_DPF_FAIL_IS_FATAL, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAPTURE_MODE, /**< @ref MMAL_PARAMETER_CAPTUREMODE_T */
+ MMAL_PARAMETER_FOCUS_REGIONS, /**< @ref MMAL_PARAMETER_FOCUS_REGIONS_T */
+ MMAL_PARAMETER_INPUT_CROP, /**< @ref MMAL_PARAMETER_INPUT_CROP_T */
+ MMAL_PARAMETER_SENSOR_INFORMATION, /**< @ref MMAL_PARAMETER_SENSOR_INFORMATION_T */
+ MMAL_PARAMETER_FLASH_SELECT, /**< @ref MMAL_PARAMETER_FLASH_SELECT_T */
+ MMAL_PARAMETER_FIELD_OF_VIEW, /**< @ref MMAL_PARAMETER_FIELD_OF_VIEW_T */
+ MMAL_PARAMETER_HIGH_DYNAMIC_RANGE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION, /**< @ref MMAL_PARAMETER_DRC_T */
+ MMAL_PARAMETER_ALGORITHM_CONTROL, /**< @ref MMAL_PARAMETER_ALGORITHM_CONTROL_T */
+ MMAL_PARAMETER_SHARPNESS, /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_CONTRAST, /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_BRIGHTNESS, /**< @ref MMAL_PARAMETER_RATIONAL_T */
+ MMAL_PARAMETER_SATURATION, /**< @ref MMAL_PARAMETER_RATIONAL_T */
+
+ /* 0x30 */
+ MMAL_PARAMETER_ISO, /**< @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_ANTISHAKE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+
+ /** @ref MMAL_PARAMETER_IMAGEFX_PARAMETERS_T */
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAMERA_BURST_CAPTURE,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAMERA_MIN_ISO,
+
+ /** @ref MMAL_PARAMETER_CAMERA_USE_CASE_T */
+ MMAL_PARAMETER_CAMERA_USE_CASE,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_CAPTURE_STATS_PASS,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ENABLE_REGISTER_FILE,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_REGISTER_FAIL_IS_FATAL,
+
+ /** @ref MMAL_PARAMETER_CONFIGFILE_T */
+ MMAL_PARAMETER_CONFIGFILE_REGISTERS,
+
+ /** @ref MMAL_PARAMETER_CONFIGFILE_CHUNK_T */
+ MMAL_PARAMETER_CONFIGFILE_CHUNK_REGISTERS,
+ MMAL_PARAMETER_JPEG_ATTACH_LOG, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_ZERO_SHUTTER_LAG, /**< @ref MMAL_PARAMETER_ZEROSHUTTERLAG_T */
+ MMAL_PARAMETER_FPS_RANGE, /**< @ref MMAL_PARAMETER_FPS_RANGE_T */
+ MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP, /**< @ref MMAL_PARAMETER_INT32_T */
+
+ /* 0x40 */
+ MMAL_PARAMETER_SW_SHARPEN_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_FLASH_REQUIRED, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_SW_SATURATION_DISABLE, /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_SHUTTER_SPEED, /**< Takes a @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_CUSTOM_AWB_GAINS, /**< Takes a @ref MMAL_PARAMETER_AWB_GAINS_T */
+};
+
+struct mmal_parameter_rational {
+ s32 num; /**< Numerator */
+ s32 den; /**< Denominator */
+};
+
+enum mmal_parameter_camera_config_timestamp_mode {
+ MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */
+ MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value
+ * for the frame timestamp
+ */
+ MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, /* Use the STC timestamp
+ * but subtract the
+ * timestamp of the first
+ * frame sent to give a
+ * zero based timestamp.
+ */
+};
+
+struct mmal_parameter_fps_range {
+ /**< Low end of the permitted framerate range */
+ struct mmal_parameter_rational fps_low;
+ /**< High end of the permitted framerate range */
+ struct mmal_parameter_rational fps_high;
+};
+
+/* camera configuration parameter */
+struct mmal_parameter_camera_config {
+ /* Parameters for setting up the image pools */
+ u32 max_stills_w; /* Max size of stills capture */
+ u32 max_stills_h;
+ u32 stills_yuv422; /* Allow YUV422 stills capture */
+ u32 one_shot_stills; /* Continuous or one shot stills captures. */
+
+ u32 max_preview_video_w; /* Max size of the preview or video
+ * capture frames
+ */
+ u32 max_preview_video_h;
+ u32 num_preview_video_frames;
+
+ /** Sets the height of the circular buffer for stills capture. */
+ u32 stills_capture_circular_buffer_height;
+
+ /** Allows preview/encode to resume as fast as possible after the stills
+ * input frame has been received, and then processes the still frame in
+ * the background whilst preview/encode has resumed.
+ * Actual mode is controlled by MMAL_PARAMETER_CAPTURE_MODE.
+ */
+ u32 fast_preview_resume;
+
+ /** Selects algorithm for timestamping frames if
+ * there is no clock component connected.
+ * enum mmal_parameter_camera_config_timestamp_mode
+ */
+ s32 use_stc_timestamp;
+};
+
+enum mmal_parameter_exposuremode {
+ MMAL_PARAM_EXPOSUREMODE_OFF,
+ MMAL_PARAM_EXPOSUREMODE_AUTO,
+ MMAL_PARAM_EXPOSUREMODE_NIGHT,
+ MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
+ MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
+ MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
+ MMAL_PARAM_EXPOSUREMODE_SPORTS,
+ MMAL_PARAM_EXPOSUREMODE_SNOW,
+ MMAL_PARAM_EXPOSUREMODE_BEACH,
+ MMAL_PARAM_EXPOSUREMODE_VERYLONG,
+ MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
+ MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
+ MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
+};
+
+enum mmal_parameter_exposuremeteringmode {
+ MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
+ MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
+};
+
+enum mmal_parameter_awbmode {
+ MMAL_PARAM_AWBMODE_OFF,
+ MMAL_PARAM_AWBMODE_AUTO,
+ MMAL_PARAM_AWBMODE_SUNLIGHT,
+ MMAL_PARAM_AWBMODE_CLOUDY,
+ MMAL_PARAM_AWBMODE_SHADE,
+ MMAL_PARAM_AWBMODE_TUNGSTEN,
+ MMAL_PARAM_AWBMODE_FLUORESCENT,
+ MMAL_PARAM_AWBMODE_INCANDESCENT,
+ MMAL_PARAM_AWBMODE_FLASH,
+ MMAL_PARAM_AWBMODE_HORIZON,
+};
+
+enum mmal_parameter_imagefx {
+ MMAL_PARAM_IMAGEFX_NONE,
+ MMAL_PARAM_IMAGEFX_NEGATIVE,
+ MMAL_PARAM_IMAGEFX_SOLARIZE,
+ MMAL_PARAM_IMAGEFX_POSTERIZE,
+ MMAL_PARAM_IMAGEFX_WHITEBOARD,
+ MMAL_PARAM_IMAGEFX_BLACKBOARD,
+ MMAL_PARAM_IMAGEFX_SKETCH,
+ MMAL_PARAM_IMAGEFX_DENOISE,
+ MMAL_PARAM_IMAGEFX_EMBOSS,
+ MMAL_PARAM_IMAGEFX_OILPAINT,
+ MMAL_PARAM_IMAGEFX_HATCH,
+ MMAL_PARAM_IMAGEFX_GPEN,
+ MMAL_PARAM_IMAGEFX_PASTEL,
+ MMAL_PARAM_IMAGEFX_WATERCOLOUR,
+ MMAL_PARAM_IMAGEFX_FILM,
+ MMAL_PARAM_IMAGEFX_BLUR,
+ MMAL_PARAM_IMAGEFX_SATURATION,
+ MMAL_PARAM_IMAGEFX_COLOURSWAP,
+ MMAL_PARAM_IMAGEFX_WASHEDOUT,
+ MMAL_PARAM_IMAGEFX_POSTERISE,
+ MMAL_PARAM_IMAGEFX_COLOURPOINT,
+ MMAL_PARAM_IMAGEFX_COLOURBALANCE,
+ MMAL_PARAM_IMAGEFX_CARTOON,
+};
+
+enum MMAL_PARAM_FLICKERAVOID_T {
+ MMAL_PARAM_FLICKERAVOID_OFF,
+ MMAL_PARAM_FLICKERAVOID_AUTO,
+ MMAL_PARAM_FLICKERAVOID_50HZ,
+ MMAL_PARAM_FLICKERAVOID_60HZ,
+ MMAL_PARAM_FLICKERAVOID_MAX = 0x7FFFFFFF
+};
+
+struct mmal_parameter_awbgains {
+ struct mmal_parameter_rational r_gain; /**< Red gain */
+ struct mmal_parameter_rational b_gain; /**< Blue gain */
+};
+
+/** Manner of video rate control */
+enum mmal_parameter_rate_control_mode {
+ MMAL_VIDEO_RATECONTROL_DEFAULT,
+ MMAL_VIDEO_RATECONTROL_VARIABLE,
+ MMAL_VIDEO_RATECONTROL_CONSTANT,
+ MMAL_VIDEO_RATECONTROL_VARIABLE_SKIP_FRAMES,
+ MMAL_VIDEO_RATECONTROL_CONSTANT_SKIP_FRAMES
+};
+
+enum mmal_video_profile {
+ MMAL_VIDEO_PROFILE_H263_BASELINE,
+ MMAL_VIDEO_PROFILE_H263_H320CODING,
+ MMAL_VIDEO_PROFILE_H263_BACKWARDCOMPATIBLE,
+ MMAL_VIDEO_PROFILE_H263_ISWV2,
+ MMAL_VIDEO_PROFILE_H263_ISWV3,
+ MMAL_VIDEO_PROFILE_H263_HIGHCOMPRESSION,
+ MMAL_VIDEO_PROFILE_H263_INTERNET,
+ MMAL_VIDEO_PROFILE_H263_INTERLACE,
+ MMAL_VIDEO_PROFILE_H263_HIGHLATENCY,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLESCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_CORE,
+ MMAL_VIDEO_PROFILE_MP4V_MAIN,
+ MMAL_VIDEO_PROFILE_MP4V_NBIT,
+ MMAL_VIDEO_PROFILE_MP4V_SCALABLETEXTURE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLEFACE,
+ MMAL_VIDEO_PROFILE_MP4V_SIMPLEFBA,
+ MMAL_VIDEO_PROFILE_MP4V_BASICANIMATED,
+ MMAL_VIDEO_PROFILE_MP4V_HYBRID,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDREALTIME,
+ MMAL_VIDEO_PROFILE_MP4V_CORESCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCODING,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCORE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSCALABLE,
+ MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSIMPLE,
+ MMAL_VIDEO_PROFILE_H264_BASELINE,
+ MMAL_VIDEO_PROFILE_H264_MAIN,
+ MMAL_VIDEO_PROFILE_H264_EXTENDED,
+ MMAL_VIDEO_PROFILE_H264_HIGH,
+ MMAL_VIDEO_PROFILE_H264_HIGH10,
+ MMAL_VIDEO_PROFILE_H264_HIGH422,
+ MMAL_VIDEO_PROFILE_H264_HIGH444,
+ MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE,
+ MMAL_VIDEO_PROFILE_DUMMY = 0x7FFFFFFF
+};
+
+enum mmal_video_level {
+ MMAL_VIDEO_LEVEL_H263_10,
+ MMAL_VIDEO_LEVEL_H263_20,
+ MMAL_VIDEO_LEVEL_H263_30,
+ MMAL_VIDEO_LEVEL_H263_40,
+ MMAL_VIDEO_LEVEL_H263_45,
+ MMAL_VIDEO_LEVEL_H263_50,
+ MMAL_VIDEO_LEVEL_H263_60,
+ MMAL_VIDEO_LEVEL_H263_70,
+ MMAL_VIDEO_LEVEL_MP4V_0,
+ MMAL_VIDEO_LEVEL_MP4V_0b,
+ MMAL_VIDEO_LEVEL_MP4V_1,
+ MMAL_VIDEO_LEVEL_MP4V_2,
+ MMAL_VIDEO_LEVEL_MP4V_3,
+ MMAL_VIDEO_LEVEL_MP4V_4,
+ MMAL_VIDEO_LEVEL_MP4V_4a,
+ MMAL_VIDEO_LEVEL_MP4V_5,
+ MMAL_VIDEO_LEVEL_MP4V_6,
+ MMAL_VIDEO_LEVEL_H264_1,
+ MMAL_VIDEO_LEVEL_H264_1b,
+ MMAL_VIDEO_LEVEL_H264_11,
+ MMAL_VIDEO_LEVEL_H264_12,
+ MMAL_VIDEO_LEVEL_H264_13,
+ MMAL_VIDEO_LEVEL_H264_2,
+ MMAL_VIDEO_LEVEL_H264_21,
+ MMAL_VIDEO_LEVEL_H264_22,
+ MMAL_VIDEO_LEVEL_H264_3,
+ MMAL_VIDEO_LEVEL_H264_31,
+ MMAL_VIDEO_LEVEL_H264_32,
+ MMAL_VIDEO_LEVEL_H264_4,
+ MMAL_VIDEO_LEVEL_H264_41,
+ MMAL_VIDEO_LEVEL_H264_42,
+ MMAL_VIDEO_LEVEL_H264_5,
+ MMAL_VIDEO_LEVEL_H264_51,
+ MMAL_VIDEO_LEVEL_DUMMY = 0x7FFFFFFF
+};
+
+struct mmal_parameter_video_profile {
+ enum mmal_video_profile profile;
+ enum mmal_video_level level;
+};
+
+/* video parameters */
+
+enum mmal_parameter_video_type {
+ /** @ref MMAL_DISPLAYREGION_T */
+ MMAL_PARAMETER_DISPLAYREGION = MMAL_PARAMETER_GROUP_VIDEO,
+
+ /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
+ MMAL_PARAMETER_SUPPORTED_PROFILES,
+
+ /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */
+ MMAL_PARAMETER_PROFILE,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_INTRAPERIOD,
+
+ /** @ref MMAL_PARAMETER_VIDEO_RATECONTROL_T */
+ MMAL_PARAMETER_RATECONTROL,
+
+ /** @ref MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T */
+ MMAL_PARAMETER_NALUNITFORMAT,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_MINIMISE_FRAGMENTATION,
+
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Setting the value to zero resets to the default (one slice per frame).
+ */
+ MMAL_PARAMETER_MB_ROWS_PER_SLICE,
+
+ /** @ref MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T */
+ MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION,
+
+ /** @ref MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T */
+ MMAL_PARAMETER_VIDEO_EEDE_ENABLE,
+
+ /** @ref MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T */
+ MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. Request an I-frame. */
+ MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME,
+ /** @ref MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T */
+ MMAL_PARAMETER_VIDEO_INTRA_REFRESH,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. Run-time bit rate control */
+ MMAL_PARAMETER_VIDEO_BIT_RATE,
+
+ /** @ref MMAL_PARAMETER_FRAME_RATE_T */
+ MMAL_PARAMETER_VIDEO_FRAME_RATE,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT,
+
+ /** @ref MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL,
+
+ MMAL_PARAMETER_EXTRA_BUFFERS, /**< @ref MMAL_PARAMETER_UINT32_T. */
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Changing this parameter from the default can reduce frame rate
+ * because image buffers need to be re-pitched.
+ */
+ MMAL_PARAMETER_VIDEO_ALIGN_HORIZ,
+
+ /** @ref MMAL_PARAMETER_UINT32_T.
+ * Changing this parameter from the default can reduce frame rate
+ * because image buffers need to be re-pitched.
+ */
+ MMAL_PARAMETER_VIDEO_ALIGN_VERT,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT,
+
+ /**< @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_QP_P,
+
+ /**< @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT,
+
+ /** @ref MMAL_PARAMETER_UINT32_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE,
+
+ /* H264 specific parameters */
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_DISABLE_CABAC,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_LATENCY,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_AU_DELIMITERS,
+
+ /** @ref MMAL_PARAMETER_UINT32_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_DEBLOCK_IDC,
+
+ /** @ref MMAL_PARAMETER_VIDEO_ENCODER_H264_MB_INTRA_MODES_T. */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_MB_INTRA_MODE,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_HEADER_ON_OPEN,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_PRECODE_FOR_QP,
+
+ /** @ref MMAL_PARAMETER_VIDEO_DRM_INIT_INFO_T. */
+ MMAL_PARAMETER_VIDEO_DRM_INIT_INFO,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_TIMESTAMP_FIFO,
+
+ /** @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_DECODE_ERROR_CONCEALMENT,
+
+ /** @ref MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER_T. */
+ MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER,
+
+ /** @ref MMAL_PARAMETER_BYTES_T */
+ MMAL_PARAMETER_VIDEO_DECODE_CONFIG_VD3,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_VCL_HRD_PARAMETERS,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_DELAY_HRD_FLAG,
+
+ /**< @ref MMAL_PARAMETER_BOOLEAN_T */
+ MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER
+};
+
+/** Valid mirror modes */
+enum mmal_parameter_mirror {
+ MMAL_PARAM_MIRROR_NONE,
+ MMAL_PARAM_MIRROR_VERTICAL,
+ MMAL_PARAM_MIRROR_HORIZONTAL,
+ MMAL_PARAM_MIRROR_BOTH,
+};
+
+enum mmal_parameter_displaytransform {
+ MMAL_DISPLAY_ROT0 = 0,
+ MMAL_DISPLAY_MIRROR_ROT0 = 1,
+ MMAL_DISPLAY_MIRROR_ROT180 = 2,
+ MMAL_DISPLAY_ROT180 = 3,
+ MMAL_DISPLAY_MIRROR_ROT90 = 4,
+ MMAL_DISPLAY_ROT270 = 5,
+ MMAL_DISPLAY_ROT90 = 6,
+ MMAL_DISPLAY_MIRROR_ROT270 = 7,
+};
+
+enum mmal_parameter_displaymode {
+ MMAL_DISPLAY_MODE_FILL = 0,
+ MMAL_DISPLAY_MODE_LETTERBOX = 1,
+};
+
+enum mmal_parameter_displayset {
+ MMAL_DISPLAY_SET_NONE = 0,
+ MMAL_DISPLAY_SET_NUM = 1,
+ MMAL_DISPLAY_SET_FULLSCREEN = 2,
+ MMAL_DISPLAY_SET_TRANSFORM = 4,
+ MMAL_DISPLAY_SET_DEST_RECT = 8,
+ MMAL_DISPLAY_SET_SRC_RECT = 0x10,
+ MMAL_DISPLAY_SET_MODE = 0x20,
+ MMAL_DISPLAY_SET_PIXEL = 0x40,
+ MMAL_DISPLAY_SET_NOASPECT = 0x80,
+ MMAL_DISPLAY_SET_LAYER = 0x100,
+ MMAL_DISPLAY_SET_COPYPROTECT = 0x200,
+ MMAL_DISPLAY_SET_ALPHA = 0x400,
+};
+
+struct mmal_parameter_displayregion {
+ /** Bitfield that indicates which fields are set and should be
+ * used. All other fields will maintain their current value.
+ * \ref MMAL_DISPLAYSET_T defines the bits that can be
+ * combined.
+ */
+ u32 set;
+
+ /** Describes the display output device, with 0 typically
+ * being a directly connected LCD display. The actual values
+ * will depend on the hardware. Code using hard-wired numbers
+ * (e.g. 2) is certain to fail.
+ */
+
+ u32 display_num;
+ /** Indicates that we are using the full device screen area,
+ * rather than a window of the display. If zero, then
+ * dest_rect is used to specify a region of the display to
+ * use.
+ */
+
+ s32 fullscreen;
+ /** Indicates any rotation or flipping used to map frames onto
+ * the natural display orientation.
+ */
+ u32 transform; /* enum mmal_parameter_displaytransform */
+
+ /** Where to display the frame within the screen, if
+ * fullscreen is zero.
+ */
+ struct vchiq_mmal_rect dest_rect;
+
+ /** Indicates which area of the frame to display. If all
+ * values are zero, the whole frame will be used.
+ */
+ struct vchiq_mmal_rect src_rect;
+
+ /** If set to non-zero, indicates that any display scaling
+ * should disregard the aspect ratio of the frame region being
+ * displayed.
+ */
+ s32 noaspect;
+
+ /** Indicates how the image should be scaled to fit the
+ * display. \code MMAL_DISPLAY_MODE_FILL \endcode indicates
+ * that the image should fill the screen by potentially
+ * cropping the frames. Setting \code mode \endcode to \code
+ * MMAL_DISPLAY_MODE_LETTERBOX \endcode indicates that all the
+ * source region should be displayed and black bars added if
+ * necessary.
+ */
+ u32 mode; /* enum mmal_parameter_displaymode */
+
+ /** If non-zero, defines the width of a source pixel relative
+ * to \code pixel_y \endcode. If zero, then pixels default to
+ * being square.
+ */
+ u32 pixel_x;
+
+ /** If non-zero, defines the height of a source pixel relative
+ * to \code pixel_x \endcode. If zero, then pixels default to
+ * being square.
+ */
+ u32 pixel_y;
+
+ /** Sets the relative depth of the images, with greater values
+ * being in front of smaller values.
+ */
+ u32 layer;
+
+ /** Set to non-zero to ensure copy protection is used on
+ * output.
+ */
+ s32 copyprotect_required;
+
+ /** Level of opacity of the layer, where zero is fully
+ * transparent and 255 is fully opaque.
+ */
+ u32 alpha;
+};
+
+#define MMAL_MAX_IMAGEFX_PARAMETERS 5
+
+struct mmal_parameter_imagefx_parameters {
+ enum mmal_parameter_imagefx effect;
+ u32 num_effect_params;
+ u32 effect_parameter[MMAL_MAX_IMAGEFX_PARAMETERS];
+};
+
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS 4
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES 2
+#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16
+
+struct mmal_parameter_camera_info_camera_t {
+ u32 port_id;
+ u32 max_width;
+ u32 max_height;
+ u32 lens_present;
+ u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
+};
+
+enum mmal_parameter_camera_info_flash_type_t {
+ /* Make values explicit to ensure they match values in config ini */
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_XENON = 0,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_LED = 1,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_OTHER = 2,
+ MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_MAX = 0x7FFFFFFF
+};
+
+struct mmal_parameter_camera_info_flash_t {
+ enum mmal_parameter_camera_info_flash_type_t flash_type;
+};
+
+struct mmal_parameter_camera_info_t {
+ u32 num_cameras;
+ u32 num_flashes;
+ struct mmal_parameter_camera_info_camera_t
+ cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS];
+ struct mmal_parameter_camera_info_flash_t
+ flashes[MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES];
+};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
new file mode 100644
index 000000000000..4360db6d4392
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -0,0 +1,2063 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ *
+ * V4L2 driver MMAL vchiq interface code
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/vmalloc.h>
+#include <linux/btree.h>
+#include <asm/cacheflush.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "mmal-common.h"
+#include "mmal-vchiq.h"
+#include "mmal-msg.h"
+
+#define USE_VCHIQ_ARM
+#include "interface/vchi/vchi.h"
+
+/* maximum number of components supported */
+#define VCHIQ_MMAL_MAX_COMPONENTS 4
+
+/*#define FULL_MSG_DUMP 1*/
+
+#ifdef DEBUG
+static const char *const msg_type_names[] = {
+ "UNKNOWN",
+ "QUIT",
+ "SERVICE_CLOSED",
+ "GET_VERSION",
+ "COMPONENT_CREATE",
+ "COMPONENT_DESTROY",
+ "COMPONENT_ENABLE",
+ "COMPONENT_DISABLE",
+ "PORT_INFO_GET",
+ "PORT_INFO_SET",
+ "PORT_ACTION",
+ "BUFFER_FROM_HOST",
+ "BUFFER_TO_HOST",
+ "GET_STATS",
+ "PORT_PARAMETER_SET",
+ "PORT_PARAMETER_GET",
+ "EVENT_TO_HOST",
+ "GET_CORE_STATS_FOR_PORT",
+ "OPAQUE_ALLOCATOR",
+ "CONSUME_MEM",
+ "LMK",
+ "OPAQUE_ALLOCATOR_DESC",
+ "DRM_GET_LHS32",
+ "DRM_GET_TIME",
+ "BUFFER_FROM_HOST_ZEROLEN",
+ "PORT_FLUSH",
+ "HOST_LOG",
+};
+#endif
+
+static const char *const port_action_type_names[] = {
+ "UNKNOWN",
+ "ENABLE",
+ "DISABLE",
+ "FLUSH",
+ "CONNECT",
+ "DISCONNECT",
+ "SET_REQUIREMENTS",
+};
+
+#if defined(DEBUG)
+#if defined(FULL_MSG_DUMP)
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
+ do { \
+ pr_debug(TITLE" type:%s(%d) length:%d\n", \
+ msg_type_names[(MSG)->h.type], \
+ (MSG)->h.type, (MSG_LEN)); \
+ print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
+ 16, 4, (MSG), \
+ sizeof(struct mmal_msg_header), 1); \
+ print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
+ 16, 4, \
+ ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
+ (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
+ } while (0)
+#else
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
+ { \
+ pr_debug(TITLE" type:%s(%d) length:%d\n", \
+ msg_type_names[(MSG)->h.type], \
+ (MSG)->h.type, (MSG_LEN)); \
+ }
+#endif
+#else
+#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
+#endif
+
+struct vchiq_mmal_instance;
+
+/* normal message context */
+struct mmal_msg_context {
+ struct vchiq_mmal_instance *instance;
+ u32 handle;
+
+ union {
+ struct {
+ /* work struct for defered callback - must come first */
+ struct work_struct work;
+ /* mmal instance */
+ struct vchiq_mmal_instance *instance;
+ /* mmal port */
+ struct vchiq_mmal_port *port;
+ /* actual buffer used to store bulk reply */
+ struct mmal_buffer *buffer;
+ /* amount of buffer used */
+ unsigned long buffer_used;
+ /* MMAL buffer flags */
+ u32 mmal_flags;
+ /* Presentation and Decode timestamps */
+ s64 pts;
+ s64 dts;
+
+ int status; /* context status */
+
+ } bulk; /* bulk data */
+
+ struct {
+ /* message handle to release */
+ VCHI_HELD_MSG_T msg_handle;
+ /* pointer to received message */
+ struct mmal_msg *msg;
+ /* received message length */
+ u32 msg_len;
+ /* completion upon reply */
+ struct completion cmplt;
+ } sync; /* synchronous response */
+ } u;
+
+};
+
+struct vchiq_mmal_context_map {
+ /* ensure serialized access to the btree(contention should be low) */
+ struct mutex lock;
+ struct btree_head32 btree_head;
+ u32 last_handle;
+};
+
+struct vchiq_mmal_instance {
+ VCHI_SERVICE_HANDLE_T handle;
+
+ /* ensure serialised access to service */
+ struct mutex vchiq_mutex;
+
+ /* ensure serialised access to bulk operations */
+ struct mutex bulk_mutex;
+
+ /* vmalloc page to receive scratch bulk xfers into */
+ void *bulk_scratch;
+
+ /* mapping table between context handles and mmal_msg_contexts */
+ struct vchiq_mmal_context_map context_map;
+
+ /* component to use next */
+ int component_idx;
+ struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
+};
+
+static int __must_check
+mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
+{
+ mutex_init(&context_map->lock);
+ context_map->last_handle = 0;
+ return btree_init32(&context_map->btree_head);
+}
+
+static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
+{
+ mutex_lock(&context_map->lock);
+ btree_destroy32(&context_map->btree_head);
+ mutex_unlock(&context_map->lock);
+}
+
+static u32
+mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
+ struct mmal_msg_context *msg_context,
+ gfp_t gfp)
+{
+ u32 handle;
+
+ mutex_lock(&context_map->lock);
+
+ while (1) {
+ /* just use a simple count for handles, but do not use 0 */
+ context_map->last_handle++;
+ if (!context_map->last_handle)
+ context_map->last_handle++;
+
+ handle = context_map->last_handle;
+
+ /* check if the handle is already in use */
+ if (!btree_lookup32(&context_map->btree_head, handle))
+ break;
+ }
+
+ if (btree_insert32(&context_map->btree_head, handle,
+ msg_context, gfp)) {
+ /* probably out of memory */
+ mutex_unlock(&context_map->lock);
+ return 0;
+ }
+
+ mutex_unlock(&context_map->lock);
+ return handle;
+}
+
+static struct mmal_msg_context *
+mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
+ u32 handle)
+{
+ struct mmal_msg_context *msg_context;
+
+ if (!handle)
+ return NULL;
+
+ mutex_lock(&context_map->lock);
+
+ msg_context = btree_lookup32(&context_map->btree_head, handle);
+
+ mutex_unlock(&context_map->lock);
+ return msg_context;
+}
+
+static void
+mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
+ u32 handle)
+{
+ mutex_lock(&context_map->lock);
+ btree_remove32(&context_map->btree_head, handle);
+ mutex_unlock(&context_map->lock);
+}
+
+static struct mmal_msg_context *
+get_msg_context(struct vchiq_mmal_instance *instance)
+{
+ struct mmal_msg_context *msg_context;
+
+ /* todo: should this be allocated from a pool to avoid kzalloc */
+ msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
+
+ if (!msg_context)
+ return ERR_PTR(-ENOMEM);
+
+ msg_context->instance = instance;
+ msg_context->handle =
+ mmal_context_map_create_handle(&instance->context_map,
+ msg_context,
+ GFP_KERNEL);
+
+ if (!msg_context->handle) {
+ kfree(msg_context);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return msg_context;
+}
+
+static struct mmal_msg_context *
+lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
+{
+ return mmal_context_map_lookup_handle(&instance->context_map,
+ handle);
+}
+
+static void
+release_msg_context(struct mmal_msg_context *msg_context)
+{
+ mmal_context_map_destroy_handle(&msg_context->instance->context_map,
+ msg_context->handle);
+ kfree(msg_context);
+}
+
+/* deals with receipt of event to host message */
+static void event_to_host_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg, u32 msg_len)
+{
+ pr_debug("unhandled event\n");
+ pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
+ msg->u.event_to_host.client_component,
+ msg->u.event_to_host.port_type,
+ msg->u.event_to_host.port_num,
+ msg->u.event_to_host.cmd, msg->u.event_to_host.length);
+}
+
+/* workqueue scheduled callback
+ *
+ * we do this because it is important we do not call any other vchiq
+ * sync calls from witin the message delivery thread
+ */
+static void buffer_work_cb(struct work_struct *work)
+{
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context, u.bulk.work);
+
+ msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
+ msg_context->u.bulk.port,
+ msg_context->u.bulk.status,
+ msg_context->u.bulk.buffer,
+ msg_context->u.bulk.buffer_used,
+ msg_context->u.bulk.mmal_flags,
+ msg_context->u.bulk.dts,
+ msg_context->u.bulk.pts);
+
+ /* release message context */
+ release_msg_context(msg_context);
+}
+
+/* enqueue a bulk receive for a given message context */
+static int bulk_receive(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ struct mmal_msg_context *msg_context)
+{
+ unsigned long rd_len;
+ unsigned long flags = 0;
+ int ret;
+
+ /* bulk mutex stops other bulk operations while we have a
+ * receive in progress - released in callback
+ */
+ ret = mutex_lock_interruptible(&instance->bulk_mutex);
+ if (ret != 0)
+ return ret;
+
+ rd_len = msg->u.buffer_from_host.buffer_header.length;
+
+ /* take buffer from queue */
+ spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
+ if (list_empty(&msg_context->u.bulk.port->buffers)) {
+ spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+ pr_err("buffer list empty trying to submit bulk receive\n");
+
+ /* todo: this is a serious error, we should never have
+ * committed a buffer_to_host operation to the mmal
+ * port without the buffer to back it up (underflow
+ * handling) and there is no obvious way to deal with
+ * this - how is the mmal servie going to react when
+ * we fail to do the xfer and reschedule a buffer when
+ * it arrives? perhaps a starved flag to indicate a
+ * waiting bulk receive?
+ */
+
+ mutex_unlock(&instance->bulk_mutex);
+
+ return -EINVAL;
+ }
+
+ msg_context->u.bulk.buffer =
+ list_entry(msg_context->u.bulk.port->buffers.next,
+ struct mmal_buffer, list);
+ list_del(&msg_context->u.bulk.buffer->list);
+
+ spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+
+ /* ensure we do not overrun the available buffer */
+ if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
+ rd_len = msg_context->u.bulk.buffer->buffer_size;
+ pr_warn("short read as not enough receive buffer space\n");
+ /* todo: is this the correct response, what happens to
+ * the rest of the message data?
+ */
+ }
+
+ /* store length */
+ msg_context->u.bulk.buffer_used = rd_len;
+ msg_context->u.bulk.mmal_flags =
+ msg->u.buffer_from_host.buffer_header.flags;
+ msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
+ msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
+
+ /* queue the bulk submission */
+ vchi_service_use(instance->handle);
+ ret = vchi_bulk_queue_receive(instance->handle,
+ msg_context->u.bulk.buffer->buffer,
+ /* Actual receive needs to be a multiple
+ * of 4 bytes
+ */
+ (rd_len + 3) & ~3,
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+ msg_context);
+
+ vchi_service_release(instance->handle);
+
+ if (ret != 0) {
+ /* callback will not be clearing the mutex */
+ mutex_unlock(&instance->bulk_mutex);
+ }
+
+ return ret;
+}
+
+/* enque a dummy bulk receive for a given message context */
+static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
+ struct mmal_msg_context *msg_context)
+{
+ int ret;
+
+ /* bulk mutex stops other bulk operations while we have a
+ * receive in progress - released in callback
+ */
+ ret = mutex_lock_interruptible(&instance->bulk_mutex);
+ if (ret != 0)
+ return ret;
+
+ /* zero length indicates this was a dummy transfer */
+ msg_context->u.bulk.buffer_used = 0;
+
+ /* queue the bulk submission */
+ vchi_service_use(instance->handle);
+
+ ret = vchi_bulk_queue_receive(instance->handle,
+ instance->bulk_scratch,
+ 8,
+ VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
+ VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
+ msg_context);
+
+ vchi_service_release(instance->handle);
+
+ if (ret != 0) {
+ /* callback will not be clearing the mutex */
+ mutex_unlock(&instance->bulk_mutex);
+ }
+
+ return ret;
+}
+
+/* data in message, memcpy from packet into output buffer */
+static int inline_receive(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ struct mmal_msg_context *msg_context)
+{
+ unsigned long flags = 0;
+
+ /* take buffer from queue */
+ spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
+ if (list_empty(&msg_context->u.bulk.port->buffers)) {
+ spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+ pr_err("buffer list empty trying to receive inline\n");
+
+ /* todo: this is a serious error, we should never have
+ * committed a buffer_to_host operation to the mmal
+ * port without the buffer to back it up (with
+ * underflow handling) and there is no obvious way to
+ * deal with this. Less bad than the bulk case as we
+ * can just drop this on the floor but...unhelpful
+ */
+ return -EINVAL;
+ }
+
+ msg_context->u.bulk.buffer =
+ list_entry(msg_context->u.bulk.port->buffers.next,
+ struct mmal_buffer, list);
+ list_del(&msg_context->u.bulk.buffer->list);
+
+ spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
+
+ memcpy(msg_context->u.bulk.buffer->buffer,
+ msg->u.buffer_from_host.short_data,
+ msg->u.buffer_from_host.payload_in_message);
+
+ msg_context->u.bulk.buffer_used =
+ msg->u.buffer_from_host.payload_in_message;
+
+ return 0;
+}
+
+/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
+static int
+buffer_from_host(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port, struct mmal_buffer *buf)
+{
+ struct mmal_msg_context *msg_context;
+ struct mmal_msg m;
+ int ret;
+
+ pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
+
+ /* bulk mutex stops other bulk operations while we
+ * have a receive in progress
+ */
+ if (mutex_lock_interruptible(&instance->bulk_mutex))
+ return -EINTR;
+
+ /* get context */
+ msg_context = get_msg_context(instance);
+ if (IS_ERR(msg_context)) {
+ ret = PTR_ERR(msg_context);
+ goto unlock;
+ }
+
+ /* store bulk message context for when data arrives */
+ msg_context->u.bulk.instance = instance;
+ msg_context->u.bulk.port = port;
+ msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
+ msg_context->u.bulk.buffer_used = 0;
+
+ /* initialise work structure ready to schedule callback */
+ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+
+ /* prep the buffer from host message */
+ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
+
+ m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
+ m.h.magic = MMAL_MAGIC;
+ m.h.context = msg_context->handle;
+ m.h.status = 0;
+
+ /* drvbuf is our private data passed back */
+ m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
+ m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
+ m.u.buffer_from_host.drvbuf.port_handle = port->handle;
+ m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
+
+ /* buffer header */
+ m.u.buffer_from_host.buffer_header.cmd = 0;
+ m.u.buffer_from_host.buffer_header.data =
+ (u32)(unsigned long)buf->buffer;
+ m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
+ m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
+ m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
+ m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
+ m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
+ m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
+
+ /* clear buffer type sepecific data */
+ memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
+ sizeof(m.u.buffer_from_host.buffer_header_type_specific));
+
+ /* no payload in message */
+ m.u.buffer_from_host.payload_in_message = 0;
+
+ vchi_service_use(instance->handle);
+
+ ret = vchi_queue_kernel_message(instance->handle,
+ &m,
+ sizeof(struct mmal_msg_header) +
+ sizeof(m.u.buffer_from_host));
+
+ if (ret != 0) {
+ release_msg_context(msg_context);
+ /* todo: is this correct error value? */
+ }
+
+ vchi_service_release(instance->handle);
+
+unlock:
+ mutex_unlock(&instance->bulk_mutex);
+
+ return ret;
+}
+
+/* submit a buffer to the mmal sevice
+ *
+ * the buffer_from_host uses size data from the ports next available
+ * mmal_buffer and deals with there being no buffer available by
+ * incrementing the underflow for later
+ */
+static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct mmal_buffer *buf;
+ unsigned long flags = 0;
+
+ if (!port->enabled)
+ return -EINVAL;
+
+ /* peek buffer from queue */
+ spin_lock_irqsave(&port->slock, flags);
+ if (list_empty(&port->buffers)) {
+ port->buffer_underflow++;
+ spin_unlock_irqrestore(&port->slock, flags);
+ return -ENOSPC;
+ }
+
+ buf = list_entry(port->buffers.next, struct mmal_buffer, list);
+
+ spin_unlock_irqrestore(&port->slock, flags);
+
+ /* issue buffer to mmal service */
+ ret = buffer_from_host(instance, port, buf);
+ if (ret) {
+ pr_err("adding buffer header failed\n");
+ /* todo: how should this be dealt with */
+ }
+
+ return ret;
+}
+
+/* deals with receipt of buffer to host message */
+static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg, u32 msg_len)
+{
+ struct mmal_msg_context *msg_context;
+ u32 handle;
+
+ pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
+ instance, msg, msg_len);
+
+ if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
+ handle = msg->u.buffer_from_host.drvbuf.client_context;
+ msg_context = lookup_msg_context(instance, handle);
+
+ if (!msg_context) {
+ pr_err("drvbuf.client_context(%u) is invalid\n",
+ handle);
+ return;
+ }
+ } else {
+ pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
+ return;
+ }
+
+ if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
+ /* message reception had an error */
+ pr_warn("error %d in reply\n", msg->h.status);
+
+ msg_context->u.bulk.status = msg->h.status;
+
+ } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
+ /* empty buffer */
+ if (msg->u.buffer_from_host.buffer_header.flags &
+ MMAL_BUFFER_HEADER_FLAG_EOS) {
+ msg_context->u.bulk.status =
+ dummy_bulk_receive(instance, msg_context);
+ if (msg_context->u.bulk.status == 0)
+ return; /* successful bulk submission, bulk
+ * completion will trigger callback
+ */
+ } else {
+ /* do callback with empty buffer - not EOS though */
+ msg_context->u.bulk.status = 0;
+ msg_context->u.bulk.buffer_used = 0;
+ }
+ } else if (msg->u.buffer_from_host.payload_in_message == 0) {
+ /* data is not in message, queue a bulk receive */
+ msg_context->u.bulk.status =
+ bulk_receive(instance, msg, msg_context);
+ if (msg_context->u.bulk.status == 0)
+ return; /* successful bulk submission, bulk
+ * completion will trigger callback
+ */
+
+ /* failed to submit buffer, this will end badly */
+ pr_err("error %d on bulk submission\n",
+ msg_context->u.bulk.status);
+
+ } else if (msg->u.buffer_from_host.payload_in_message <=
+ MMAL_VC_SHORT_DATA) {
+ /* data payload within message */
+ msg_context->u.bulk.status = inline_receive(instance, msg,
+ msg_context);
+ } else {
+ pr_err("message with invalid short payload\n");
+
+ /* signal error */
+ msg_context->u.bulk.status = -EINVAL;
+ msg_context->u.bulk.buffer_used =
+ msg->u.buffer_from_host.payload_in_message;
+ }
+
+ /* replace the buffer header */
+ port_buffer_from_host(instance, msg_context->u.bulk.port);
+
+ /* schedule the port callback */
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg_context *msg_context)
+{
+ /* bulk receive operation complete */
+ mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
+
+ /* replace the buffer header */
+ port_buffer_from_host(msg_context->u.bulk.instance,
+ msg_context->u.bulk.port);
+
+ msg_context->u.bulk.status = 0;
+
+ /* schedule the port callback */
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
+ struct mmal_msg_context *msg_context)
+{
+ pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
+
+ /* bulk receive operation complete */
+ mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
+
+ /* replace the buffer header */
+ port_buffer_from_host(msg_context->u.bulk.instance,
+ msg_context->u.bulk.port);
+
+ msg_context->u.bulk.status = -EINTR;
+
+ schedule_work(&msg_context->u.bulk.work);
+}
+
+/* incoming event service callback */
+static void service_callback(void *param,
+ const VCHI_CALLBACK_REASON_T reason,
+ void *bulk_ctx)
+{
+ struct vchiq_mmal_instance *instance = param;
+ int status;
+ u32 msg_len;
+ struct mmal_msg *msg;
+ VCHI_HELD_MSG_T msg_handle;
+ struct mmal_msg_context *msg_context;
+
+ if (!instance) {
+ pr_err("Message callback passed NULL instance\n");
+ return;
+ }
+
+ switch (reason) {
+ case VCHI_CALLBACK_MSG_AVAILABLE:
+ status = vchi_msg_hold(instance->handle, (void **)&msg,
+ &msg_len, VCHI_FLAGS_NONE, &msg_handle);
+ if (status) {
+ pr_err("Unable to dequeue a message (%d)\n", status);
+ break;
+ }
+
+ DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
+
+ /* handling is different for buffer messages */
+ switch (msg->h.type) {
+ case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
+ vchi_held_msg_release(&msg_handle);
+ break;
+
+ case MMAL_MSG_TYPE_EVENT_TO_HOST:
+ event_to_host_cb(instance, msg, msg_len);
+ vchi_held_msg_release(&msg_handle);
+
+ break;
+
+ case MMAL_MSG_TYPE_BUFFER_TO_HOST:
+ buffer_to_host_cb(instance, msg, msg_len);
+ vchi_held_msg_release(&msg_handle);
+ break;
+
+ default:
+ /* messages dependent on header context to complete */
+ if (!msg->h.context) {
+ pr_err("received message context was null!\n");
+ vchi_held_msg_release(&msg_handle);
+ break;
+ }
+
+ msg_context = lookup_msg_context(instance,
+ msg->h.context);
+ if (!msg_context) {
+ pr_err("received invalid message context %u!\n",
+ msg->h.context);
+ vchi_held_msg_release(&msg_handle);
+ break;
+ }
+
+ /* fill in context values */
+ msg_context->u.sync.msg_handle = msg_handle;
+ msg_context->u.sync.msg = msg;
+ msg_context->u.sync.msg_len = msg_len;
+
+ /* todo: should this check (completion_done()
+ * == 1) for no one waiting? or do we need a
+ * flag to tell us the completion has been
+ * interrupted so we can free the message and
+ * its context. This probably also solves the
+ * message arriving after interruption todo
+ * below
+ */
+
+ /* complete message so caller knows it happened */
+ complete(&msg_context->u.sync.cmplt);
+ break;
+ }
+
+ break;
+
+ case VCHI_CALLBACK_BULK_RECEIVED:
+ bulk_receive_cb(instance, bulk_ctx);
+ break;
+
+ case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
+ bulk_abort_cb(instance, bulk_ctx);
+ break;
+
+ case VCHI_CALLBACK_SERVICE_CLOSED:
+ /* TODO: consider if this requires action if received when
+ * driver is not explicitly closing the service
+ */
+ break;
+
+ default:
+ pr_err("Received unhandled message reason %d\n", reason);
+ break;
+ }
+}
+
+static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
+ struct mmal_msg *msg,
+ unsigned int payload_len,
+ struct mmal_msg **msg_out,
+ VCHI_HELD_MSG_T *msg_handle_out)
+{
+ struct mmal_msg_context *msg_context;
+ int ret;
+
+ /* payload size must not cause message to exceed max size */
+ if (payload_len >
+ (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
+ pr_err("payload length %d exceeds max:%d\n", payload_len,
+ (int)(MMAL_MSG_MAX_SIZE -
+ sizeof(struct mmal_msg_header)));
+ return -EINVAL;
+ }
+
+ msg_context = get_msg_context(instance);
+ if (IS_ERR(msg_context))
+ return PTR_ERR(msg_context);
+
+ init_completion(&msg_context->u.sync.cmplt);
+
+ msg->h.magic = MMAL_MAGIC;
+ msg->h.context = msg_context->handle;
+ msg->h.status = 0;
+
+ DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
+ ">>> sync message");
+
+ vchi_service_use(instance->handle);
+
+ ret = vchi_queue_kernel_message(instance->handle,
+ msg,
+ sizeof(struct mmal_msg_header) +
+ payload_len);
+
+ vchi_service_release(instance->handle);
+
+ if (ret) {
+ pr_err("error %d queuing message\n", ret);
+ release_msg_context(msg_context);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&msg_context->u.sync.cmplt, 3 * HZ);
+ if (ret <= 0) {
+ pr_err("error %d waiting for sync completion\n", ret);
+ if (ret == 0)
+ ret = -ETIME;
+ /* todo: what happens if the message arrives after aborting */
+ release_msg_context(msg_context);
+ return ret;
+ }
+
+ *msg_out = msg_context->u.sync.msg;
+ *msg_handle_out = msg_context->u.sync.msg_handle;
+ release_msg_context(msg_context);
+
+ return 0;
+}
+
+static void dump_port_info(struct vchiq_mmal_port *port)
+{
+ pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
+
+ pr_debug("buffer minimum num:%d size:%d align:%d\n",
+ port->minimum_buffer.num,
+ port->minimum_buffer.size, port->minimum_buffer.alignment);
+
+ pr_debug("buffer recommended num:%d size:%d align:%d\n",
+ port->recommended_buffer.num,
+ port->recommended_buffer.size,
+ port->recommended_buffer.alignment);
+
+ pr_debug("buffer current values num:%d size:%d align:%d\n",
+ port->current_buffer.num,
+ port->current_buffer.size, port->current_buffer.alignment);
+
+ pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
+ port->format.type,
+ port->format.encoding, port->format.encoding_variant);
+
+ pr_debug(" bitrate:%d flags:0x%x\n",
+ port->format.bitrate, port->format.flags);
+
+ if (port->format.type == MMAL_ES_TYPE_VIDEO) {
+ pr_debug
+ ("es video format: width:%d height:%d colourspace:0x%x\n",
+ port->es.video.width, port->es.video.height,
+ port->es.video.color_space);
+
+ pr_debug(" : crop xywh %d,%d,%d,%d\n",
+ port->es.video.crop.x,
+ port->es.video.crop.y,
+ port->es.video.crop.width, port->es.video.crop.height);
+ pr_debug(" : framerate %d/%d aspect %d/%d\n",
+ port->es.video.frame_rate.num,
+ port->es.video.frame_rate.den,
+ port->es.video.par.num, port->es.video.par.den);
+ }
+}
+
+static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
+{
+ /* todo do readonly fields need setting at all? */
+ p->type = port->type;
+ p->index = port->index;
+ p->index_all = 0;
+ p->is_enabled = port->enabled;
+ p->buffer_num_min = port->minimum_buffer.num;
+ p->buffer_size_min = port->minimum_buffer.size;
+ p->buffer_alignment_min = port->minimum_buffer.alignment;
+ p->buffer_num_recommended = port->recommended_buffer.num;
+ p->buffer_size_recommended = port->recommended_buffer.size;
+
+ /* only three writable fields in a port */
+ p->buffer_num = port->current_buffer.num;
+ p->buffer_size = port->current_buffer.size;
+ p->userdata = (u32)(unsigned long)port;
+}
+
+static int port_info_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ pr_debug("setting port info port %p\n", port);
+ if (!port)
+ return -1;
+ dump_port_info(port);
+
+ m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
+
+ m.u.port_info_set.component_handle = port->component->handle;
+ m.u.port_info_set.port_type = port->type;
+ m.u.port_info_set.port_index = port->index;
+
+ port_to_mmal_msg(port, &m.u.port_info_set.port);
+
+ /* elementary stream format setup */
+ m.u.port_info_set.format.type = port->format.type;
+ m.u.port_info_set.format.encoding = port->format.encoding;
+ m.u.port_info_set.format.encoding_variant =
+ port->format.encoding_variant;
+ m.u.port_info_set.format.bitrate = port->format.bitrate;
+ m.u.port_info_set.format.flags = port->format.flags;
+
+ memcpy(&m.u.port_info_set.es, &port->es,
+ sizeof(union mmal_es_specific_format));
+
+ m.u.port_info_set.format.extradata_size = port->format.extradata_size;
+ memcpy(&m.u.port_info_set.extradata, port->format.extradata,
+ port->format.extradata_size);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_info_set),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ /* return operation status */
+ ret = -rmsg->u.port_info_get_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
+ port->component->handle, port->handle);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* use port info get message to retrieve port information */
+static int port_info_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ /* port info time */
+ m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
+ m.u.port_info_get.component_handle = port->component->handle;
+ m.u.port_info_get.port_type = port->type;
+ m.u.port_info_get.index = port->index;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_info_get),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ /* return operation status */
+ ret = -rmsg->u.port_info_get_reply.status;
+ if (ret != MMAL_MSG_STATUS_SUCCESS)
+ goto release_msg;
+
+ if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
+ port->enabled = false;
+ else
+ port->enabled = true;
+
+ /* copy the values out of the message */
+ port->handle = rmsg->u.port_info_get_reply.port_handle;
+
+ /* port type and index cached to use on port info set because
+ * it does not use a port handle
+ */
+ port->type = rmsg->u.port_info_get_reply.port_type;
+ port->index = rmsg->u.port_info_get_reply.port_index;
+
+ port->minimum_buffer.num =
+ rmsg->u.port_info_get_reply.port.buffer_num_min;
+ port->minimum_buffer.size =
+ rmsg->u.port_info_get_reply.port.buffer_size_min;
+ port->minimum_buffer.alignment =
+ rmsg->u.port_info_get_reply.port.buffer_alignment_min;
+
+ port->recommended_buffer.alignment =
+ rmsg->u.port_info_get_reply.port.buffer_alignment_min;
+ port->recommended_buffer.num =
+ rmsg->u.port_info_get_reply.port.buffer_num_recommended;
+
+ port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
+ port->current_buffer.size =
+ rmsg->u.port_info_get_reply.port.buffer_size;
+
+ /* stream format */
+ port->format.type = rmsg->u.port_info_get_reply.format.type;
+ port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
+ port->format.encoding_variant =
+ rmsg->u.port_info_get_reply.format.encoding_variant;
+ port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
+ port->format.flags = rmsg->u.port_info_get_reply.format.flags;
+
+ /* elementary stream format */
+ memcpy(&port->es,
+ &rmsg->u.port_info_get_reply.es,
+ sizeof(union mmal_es_specific_format));
+ port->format.es = &port->es;
+
+ port->format.extradata_size =
+ rmsg->u.port_info_get_reply.format.extradata_size;
+ memcpy(port->format.extradata,
+ rmsg->u.port_info_get_reply.extradata,
+ port->format.extradata_size);
+
+ pr_debug("received port info\n");
+ dump_port_info(port);
+
+release_msg:
+
+ pr_debug("%s:result:%d component:0x%x port:%d\n",
+ __func__, ret, port->component->handle, port->handle);
+
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* create comonent on vc */
+static int create_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component,
+ const char *name)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ /* build component create message */
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
+ m.u.component_create.client_component = (u32)(unsigned long)component;
+ strncpy(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_create),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_create_reply.status;
+ if (ret != MMAL_MSG_STATUS_SUCCESS)
+ goto release_msg;
+
+ /* a valid component response received */
+ component->handle = rmsg->u.component_create_reply.component_handle;
+ component->inputs = rmsg->u.component_create_reply.input_num;
+ component->outputs = rmsg->u.component_create_reply.output_num;
+ component->clocks = rmsg->u.component_create_reply.clock_num;
+
+ pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
+ component->handle,
+ component->inputs, component->outputs, component->clocks);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* destroys a component on vc */
+static int destroy_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
+ m.u.component_destroy.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_destroy),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_destroy_reply.status;
+
+release_msg:
+
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* enable a component on vc */
+static int enable_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
+ m.u.component_enable.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_enable),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_enable_reply.status;
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* disable a component on vc */
+static int disable_component(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
+ m.u.component_disable.component_handle = component->handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.component_disable),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.component_disable_reply.status;
+
+release_msg:
+
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* get version of mmal implementation */
+static int get_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out, u32 *minor_out)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_GET_VERSION;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.version),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != m.h.type) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ *major_out = rmsg->u.version.major;
+ *minor_out = rmsg->u.version.minor;
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* do a port action with a port as a parameter */
+static int port_action_port(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ enum mmal_msg_port_action_type action_type)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
+ m.u.port_action_port.component_handle = port->component->handle;
+ m.u.port_action_port.port_handle = port->handle;
+ m.u.port_action_port.action = action_type;
+
+ port_to_mmal_msg(port, &m.u.port_action_port.port);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_action_port),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_action_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
+ __func__,
+ ret, port->component->handle, port->handle,
+ port_action_type_names[action_type], action_type);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* do a port action with handles as parameters */
+static int port_action_handle(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ enum mmal_msg_port_action_type action_type,
+ u32 connect_component_handle,
+ u32 connect_port_handle)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
+
+ m.u.port_action_handle.component_handle = port->component->handle;
+ m.u.port_action_handle.port_handle = port->handle;
+ m.u.port_action_handle.action = action_type;
+
+ m.u.port_action_handle.connect_component_handle =
+ connect_component_handle;
+ m.u.port_action_handle.connect_port_handle = connect_port_handle;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(m.u.port_action_handle),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_action_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
+ " connect component:0x%x connect port:%d\n",
+ __func__,
+ ret, port->component->handle, port->handle,
+ port_action_type_names[action_type],
+ action_type, connect_component_handle, connect_port_handle);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+static int port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter_id, void *value, u32 value_size)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
+
+ m.u.port_parameter_set.component_handle = port->component->handle;
+ m.u.port_parameter_set.port_handle = port->handle;
+ m.u.port_parameter_set.id = parameter_id;
+ m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
+ memcpy(&m.u.port_parameter_set.value, value, value_size);
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ (4 * sizeof(u32)) + value_size,
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
+ /* got an unexpected message type in reply */
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_parameter_set_reply.status;
+
+ pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
+ __func__,
+ ret, port->component->handle, port->handle, parameter_id);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+static int port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter_id, void *value, u32 *value_size)
+{
+ int ret;
+ struct mmal_msg m;
+ struct mmal_msg *rmsg;
+ VCHI_HELD_MSG_T rmsg_handle;
+
+ m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
+
+ m.u.port_parameter_get.component_handle = port->component->handle;
+ m.u.port_parameter_get.port_handle = port->handle;
+ m.u.port_parameter_get.id = parameter_id;
+ m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
+
+ ret = send_synchronous_mmal_msg(instance, &m,
+ sizeof(struct
+ mmal_msg_port_parameter_get),
+ &rmsg, &rmsg_handle);
+ if (ret)
+ return ret;
+
+ if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
+ /* got an unexpected message type in reply */
+ pr_err("Incorrect reply type %d\n", rmsg->h.type);
+ ret = -EINVAL;
+ goto release_msg;
+ }
+
+ ret = -rmsg->u.port_parameter_get_reply.status;
+ /* port_parameter_get_reply.size includes the header,
+ * whilst *value_size doesn't.
+ */
+ rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
+
+ if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
+ /* Copy only as much as we have space for
+ * but report true size of parameter
+ */
+ memcpy(value, &rmsg->u.port_parameter_get_reply.value,
+ *value_size);
+ *value_size = rmsg->u.port_parameter_get_reply.size;
+ } else
+ memcpy(value, &rmsg->u.port_parameter_get_reply.value,
+ rmsg->u.port_parameter_get_reply.size);
+
+ pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
+ ret, port->component->handle, port->handle, parameter_id);
+
+release_msg:
+ vchi_held_msg_release(&rmsg_handle);
+
+ return ret;
+}
+
+/* disables a port and drains buffers from it */
+static int port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+ struct list_head *q, *buf_head;
+ unsigned long flags = 0;
+
+ if (!port->enabled)
+ return 0;
+
+ port->enabled = false;
+
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
+ if (ret == 0) {
+ /* drain all queued buffers on port */
+ spin_lock_irqsave(&port->slock, flags);
+
+ list_for_each_safe(buf_head, q, &port->buffers) {
+ struct mmal_buffer *mmalbuf;
+
+ mmalbuf = list_entry(buf_head, struct mmal_buffer,
+ list);
+ list_del(buf_head);
+ if (port->buffer_cb)
+ port->buffer_cb(instance,
+ port, 0, mmalbuf, 0, 0,
+ MMAL_TIME_UNKNOWN,
+ MMAL_TIME_UNKNOWN);
+ }
+
+ spin_unlock_irqrestore(&port->slock, flags);
+
+ ret = port_info_get(instance, port);
+ }
+
+ return ret;
+}
+
+/* enable a port */
+static int port_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ unsigned int hdr_count;
+ struct list_head *buf_head;
+ int ret;
+
+ if (port->enabled)
+ return 0;
+
+ /* ensure there are enough buffers queued to cover the buffer headers */
+ if (port->buffer_cb) {
+ hdr_count = 0;
+ list_for_each(buf_head, &port->buffers) {
+ hdr_count++;
+ }
+ if (hdr_count < port->current_buffer.num)
+ return -ENOSPC;
+ }
+
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
+ if (ret)
+ goto done;
+
+ port->enabled = true;
+
+ if (port->buffer_cb) {
+ /* send buffer headers to videocore */
+ hdr_count = 1;
+ list_for_each(buf_head, &port->buffers) {
+ struct mmal_buffer *mmalbuf;
+
+ mmalbuf = list_entry(buf_head, struct mmal_buffer,
+ list);
+ ret = buffer_from_host(instance, port, mmalbuf);
+ if (ret)
+ goto done;
+
+ hdr_count++;
+ if (hdr_count > port->current_buffer.num)
+ break;
+ }
+ }
+
+ ret = port_info_get(instance, port);
+
+done:
+ return ret;
+}
+
+/* ------------------------------------------------------------------
+ * Exported API
+ *------------------------------------------------------------------
+ */
+
+int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_info_set(instance, port);
+ if (ret)
+ goto release_unlock;
+
+ /* read what has actually been set */
+ ret = port_info_get(instance, port);
+
+release_unlock:
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter, void *value, u32 value_size)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_parameter_set(instance, port, parameter, value, value_size);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter, void *value, u32 *value_size)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = port_parameter_get(instance, port, parameter, value, value_size);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+/* enable a port
+ *
+ * enables a port and queues buffers for satisfying callbacks if we
+ * provide a callback handler
+ */
+int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ vchiq_mmal_buffer_cb buffer_cb)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ /* already enabled - noop */
+ if (port->enabled) {
+ ret = 0;
+ goto unlock;
+ }
+
+ port->buffer_cb = buffer_cb;
+
+ ret = port_enable(instance, port);
+
+unlock:
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (!port->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = port_disable(instance, port);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+/* ports will be connected in a tunneled manner so data buffers
+ * are not handled by client.
+ */
+int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *src,
+ struct vchiq_mmal_port *dst)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ /* disconnect ports if connected */
+ if (src->connected) {
+ ret = port_disable(instance, src);
+ if (ret) {
+ pr_err("failed disabling src port(%d)\n", ret);
+ goto release_unlock;
+ }
+
+ /* do not need to disable the destination port as they
+ * are connected and it is done automatically
+ */
+
+ ret = port_action_handle(instance, src,
+ MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
+ src->connected->component->handle,
+ src->connected->handle);
+ if (ret < 0) {
+ pr_err("failed disconnecting src port\n");
+ goto release_unlock;
+ }
+ src->connected->enabled = false;
+ src->connected = NULL;
+ }
+
+ if (!dst) {
+ /* do not make new connection */
+ ret = 0;
+ pr_debug("not making new connection\n");
+ goto release_unlock;
+ }
+
+ /* copy src port format to dst */
+ dst->format.encoding = src->format.encoding;
+ dst->es.video.width = src->es.video.width;
+ dst->es.video.height = src->es.video.height;
+ dst->es.video.crop.x = src->es.video.crop.x;
+ dst->es.video.crop.y = src->es.video.crop.y;
+ dst->es.video.crop.width = src->es.video.crop.width;
+ dst->es.video.crop.height = src->es.video.crop.height;
+ dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
+ dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
+
+ /* set new format */
+ ret = port_info_set(instance, dst);
+ if (ret) {
+ pr_debug("setting port info failed\n");
+ goto release_unlock;
+ }
+
+ /* read what has actually been set */
+ ret = port_info_get(instance, dst);
+ if (ret) {
+ pr_debug("read back port info failed\n");
+ goto release_unlock;
+ }
+
+ /* connect two ports together */
+ ret = port_action_handle(instance, src,
+ MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
+ dst->component->handle, dst->handle);
+ if (ret < 0) {
+ pr_debug("connecting port %d:%d to %d:%d failed\n",
+ src->component->handle, src->handle,
+ dst->component->handle, dst->handle);
+ goto release_unlock;
+ }
+ src->connected = dst;
+
+release_unlock:
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ struct mmal_buffer *buffer)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&port->slock, flags);
+ list_add_tail(&buffer->list, &port->buffers);
+ spin_unlock_irqrestore(&port->slock, flags);
+
+ /* the port previously underflowed because it was missing a
+ * mmal_buffer which has just been added, submit that buffer
+ * to the mmal service.
+ */
+ if (port->buffer_underflow) {
+ port_buffer_from_host(instance, port);
+ port->buffer_underflow--;
+ }
+
+ return 0;
+}
+
+/* Initialise a mmal component and its ports
+ *
+ */
+int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
+ const char *name,
+ struct vchiq_mmal_component **component_out)
+{
+ int ret;
+ int idx; /* port index */
+ struct vchiq_mmal_component *component;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
+ ret = -EINVAL; /* todo is this correct error? */
+ goto unlock;
+ }
+
+ component = &instance->component[instance->component_idx];
+
+ ret = create_component(instance, component, name);
+ if (ret < 0)
+ goto unlock;
+
+ /* ports info needs gathering */
+ component->control.type = MMAL_PORT_TYPE_CONTROL;
+ component->control.index = 0;
+ component->control.component = component;
+ spin_lock_init(&component->control.slock);
+ INIT_LIST_HEAD(&component->control.buffers);
+ ret = port_info_get(instance, &component->control);
+ if (ret < 0)
+ goto release_component;
+
+ for (idx = 0; idx < component->inputs; idx++) {
+ component->input[idx].type = MMAL_PORT_TYPE_INPUT;
+ component->input[idx].index = idx;
+ component->input[idx].component = component;
+ spin_lock_init(&component->input[idx].slock);
+ INIT_LIST_HEAD(&component->input[idx].buffers);
+ ret = port_info_get(instance, &component->input[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ for (idx = 0; idx < component->outputs; idx++) {
+ component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
+ component->output[idx].index = idx;
+ component->output[idx].component = component;
+ spin_lock_init(&component->output[idx].slock);
+ INIT_LIST_HEAD(&component->output[idx].buffers);
+ ret = port_info_get(instance, &component->output[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ for (idx = 0; idx < component->clocks; idx++) {
+ component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
+ component->clock[idx].index = idx;
+ component->clock[idx].component = component;
+ spin_lock_init(&component->clock[idx].slock);
+ INIT_LIST_HEAD(&component->clock[idx].buffers);
+ ret = port_info_get(instance, &component->clock[idx]);
+ if (ret < 0)
+ goto release_component;
+ }
+
+ instance->component_idx++;
+
+ *component_out = component;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return 0;
+
+release_component:
+ destroy_component(instance, component);
+unlock:
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+/*
+ * cause a mmal component to be destroyed
+ */
+int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (component->enabled)
+ ret = disable_component(instance, component);
+
+ ret = destroy_component(instance, component);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+/*
+ * cause a mmal component to be enabled
+ */
+int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (component->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = enable_component(instance, component);
+ if (ret == 0)
+ component->enabled = true;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+/*
+ * cause a mmal component to be enabled
+ */
+int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ if (!component->enabled) {
+ mutex_unlock(&instance->vchiq_mutex);
+ return 0;
+ }
+
+ ret = disable_component(instance, component);
+ if (ret == 0)
+ component->enabled = false;
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out, u32 *minor_out)
+{
+ int ret;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ ret = get_version(instance, major_out, minor_out);
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ return ret;
+}
+
+int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
+{
+ int status = 0;
+
+ if (!instance)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&instance->vchiq_mutex))
+ return -EINTR;
+
+ vchi_service_use(instance->handle);
+
+ status = vchi_service_close(instance->handle);
+ if (status != 0)
+ pr_err("mmal-vchiq: VCHIQ close failed");
+
+ mutex_unlock(&instance->vchiq_mutex);
+
+ vfree(instance->bulk_scratch);
+
+ mmal_context_map_destroy(&instance->context_map);
+
+ kfree(instance);
+
+ return status;
+}
+
+int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
+{
+ int status;
+ struct vchiq_mmal_instance *instance;
+ static VCHI_CONNECTION_T *vchi_connection;
+ static VCHI_INSTANCE_T vchi_instance;
+ SERVICE_CREATION_T params = {
+ .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
+ .service_id = VC_MMAL_SERVER_NAME,
+ .connection = vchi_connection,
+ .rx_fifo_size = 0,
+ .tx_fifo_size = 0,
+ .callback = service_callback,
+ .callback_param = NULL,
+ .want_unaligned_bulk_rx = 1,
+ .want_unaligned_bulk_tx = 1,
+ .want_crc = 0
+ };
+
+ /* compile time checks to ensure structure size as they are
+ * directly (de)serialised from memory.
+ */
+
+ /* ensure the header structure has packed to the correct size */
+ BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
+
+ /* ensure message structure does not exceed maximum length */
+ BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
+
+ /* mmal port struct is correct size */
+ BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
+
+ /* create a vchi instance */
+ status = vchi_initialise(&vchi_instance);
+ if (status) {
+ pr_err("Failed to initialise VCHI instance (status=%d)\n",
+ status);
+ return -EIO;
+ }
+
+ status = vchi_connect(NULL, 0, vchi_instance);
+ if (status) {
+ pr_err("Failed to connect VCHI instance (status=%d)\n", status);
+ return -EIO;
+ }
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+
+ if (!instance)
+ return -ENOMEM;
+
+ mutex_init(&instance->vchiq_mutex);
+ mutex_init(&instance->bulk_mutex);
+
+ instance->bulk_scratch = vmalloc(PAGE_SIZE);
+
+ status = mmal_context_map_init(&instance->context_map);
+ if (status) {
+ pr_err("Failed to init context map (status=%d)\n", status);
+ kfree(instance);
+ return status;
+ }
+
+ params.callback_param = instance;
+
+ status = vchi_service_open(vchi_instance, &params, &instance->handle);
+ if (status) {
+ pr_err("Failed to open VCHI service connection (status=%d)\n",
+ status);
+ goto err_close_services;
+ }
+
+ vchi_service_release(instance->handle);
+
+ *out_instance = instance;
+
+ return 0;
+
+err_close_services:
+
+ vchi_service_close(instance->handle);
+ vfree(instance->bulk_scratch);
+ kfree(instance);
+ return -ENODEV;
+}
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
new file mode 100644
index 000000000000..63db053532bf
--- /dev/null
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -0,0 +1,174 @@
+/*
+ * Broadcom BM2835 V4L2 driver
+ *
+ * Copyright © 2013 Raspberry Pi (Trading) Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
+ * Dave Stevenson <dsteve@broadcom.com>
+ * Simon Mellor <simellor@broadcom.com>
+ * Luke Diamand <luked@broadcom.com>
+ *
+ * MMAL interface to VCHIQ message passing
+ */
+
+#ifndef MMAL_VCHIQ_H
+#define MMAL_VCHIQ_H
+
+#include "mmal-msg-format.h"
+
+#define MAX_PORT_COUNT 4
+
+/* Maximum size of the format extradata. */
+#define MMAL_FORMAT_EXTRADATA_MAX_SIZE 128
+
+struct vchiq_mmal_instance;
+
+enum vchiq_mmal_es_type {
+ MMAL_ES_TYPE_UNKNOWN, /**< Unknown elementary stream type */
+ MMAL_ES_TYPE_CONTROL, /**< Elementary stream of control commands */
+ MMAL_ES_TYPE_AUDIO, /**< Audio elementary stream */
+ MMAL_ES_TYPE_VIDEO, /**< Video elementary stream */
+ MMAL_ES_TYPE_SUBPICTURE /**< Sub-picture elementary stream */
+};
+
+/* rectangle, used lots so it gets its own struct */
+struct vchiq_mmal_rect {
+ s32 x;
+ s32 y;
+ s32 width;
+ s32 height;
+};
+
+struct vchiq_mmal_port_buffer {
+ unsigned int num; /* number of buffers */
+ u32 size; /* size of buffers */
+ u32 alignment; /* alignment of buffers */
+};
+
+struct vchiq_mmal_port;
+
+typedef void (*vchiq_mmal_buffer_cb)(
+ struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ int status, struct mmal_buffer *buffer,
+ unsigned long length, u32 mmal_flags, s64 dts, s64 pts);
+
+struct vchiq_mmal_port {
+ bool enabled;
+ u32 handle;
+ u32 type; /* port type, cached to use on port info set */
+ u32 index; /* port index, cached to use on port info set */
+
+ /* component port belongs to, allows simple deref */
+ struct vchiq_mmal_component *component;
+
+ struct vchiq_mmal_port *connected; /* port conencted to */
+
+ /* buffer info */
+ struct vchiq_mmal_port_buffer minimum_buffer;
+ struct vchiq_mmal_port_buffer recommended_buffer;
+ struct vchiq_mmal_port_buffer current_buffer;
+
+ /* stream format */
+ struct mmal_es_format_local format;
+ /* elementary stream format */
+ union mmal_es_specific_format es;
+
+ /* data buffers to fill */
+ struct list_head buffers;
+ /* lock to serialise adding and removing buffers from list */
+ spinlock_t slock;
+ /* count of how many buffer header refils have failed because
+ * there was no buffer to satisfy them
+ */
+ int buffer_underflow;
+ /* callback on buffer completion */
+ vchiq_mmal_buffer_cb buffer_cb;
+ /* callback context */
+ void *cb_ctx;
+};
+
+struct vchiq_mmal_component {
+ bool enabled;
+ u32 handle; /* VideoCore handle for component */
+ u32 inputs; /* Number of input ports */
+ u32 outputs; /* Number of output ports */
+ u32 clocks; /* Number of clock ports */
+ struct vchiq_mmal_port control; /* control port */
+ struct vchiq_mmal_port input[MAX_PORT_COUNT]; /* input ports */
+ struct vchiq_mmal_port output[MAX_PORT_COUNT]; /* output ports */
+ struct vchiq_mmal_port clock[MAX_PORT_COUNT]; /* clock ports */
+};
+
+int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance);
+int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance);
+
+/* Initialise a mmal component and its ports
+ *
+ */
+int vchiq_mmal_component_init(
+ struct vchiq_mmal_instance *instance,
+ const char *name,
+ struct vchiq_mmal_component **component_out);
+
+int vchiq_mmal_component_finalise(
+ struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+int vchiq_mmal_component_enable(
+ struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+int vchiq_mmal_component_disable(
+ struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_component *component);
+
+/* enable a mmal port
+ *
+ * enables a port and if a buffer callback provided enque buffer
+ * headers as apropriate for the port.
+ */
+int vchiq_mmal_port_enable(
+ struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ vchiq_mmal_buffer_cb buffer_cb);
+
+/* disable a port
+ *
+ * disable a port will dequeue any pending buffers
+ */
+int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port);
+
+int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter,
+ void *value,
+ u32 value_size);
+
+int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ u32 parameter,
+ void *value,
+ u32 *value_size);
+
+int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port);
+
+int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *src,
+ struct vchiq_mmal_port *dst);
+
+int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
+ u32 *major_out,
+ u32 *minor_out);
+
+int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
+ struct vchiq_mmal_port *port,
+ struct mmal_buffer *buf);
+
+#endif /* MMAL_VCHIQ_H */
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
index 03aa65183b25..df93154b1aa6 100644
--- a/drivers/staging/vc04_services/interface/vchi/TODO
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -1,24 +1,9 @@
-1) Port to aarch64
-
-This driver won't be very useful unless we also have it working on
-Raspberry Pi 3. This requires, at least:
-
- - Figure out an alternative to the dmac_map_area() hack.
-
- - Decide what to use instead of dsb().
-
- - Do something about (int) cast of bulk->data in
- vchiq_bulk_transfer().
-
- bulk->data is a bus address going across to the firmware. We know
- our bus addresses are <32bit.
-
-2) Write a DT binding doc and get the corresponding DT node merged to
+1) Write a DT binding doc and get the corresponding DT node merged to
bcm2835.
This will let the driver probe when enabled.
-3) Import drivers using VCHI.
+2) Import drivers using VCHI.
VCHI is just a tool to let drivers talk to the firmware. Here are
some of the ones we want:
@@ -41,7 +26,7 @@ some of the ones we want:
to manage these buffers as dmabufs so that we can zero-copy import
camera images into vc4 for rendering/display.
-4) Garbage-collect unused code
+3) Garbage-collect unused code
One of the reasons this driver wasn't upstreamed previously was that
there's a lot code that got built that's probably unnecessary these
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
index 26bc2d38d725..b6f42b86f206 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -173,7 +173,7 @@
* under the carpet. */
#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
# undef VCHI_RX_MSG_QUEUE_SIZE
-# define VCHI_RX_MSG_QUEUE_SIZE (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS)
#endif
/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 3aeffcb9c87e..988ee61fb4a7 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -37,12 +37,11 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
-#include <linux/version.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#include <linux/of.h>
-#include <asm/pgtable.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
@@ -208,6 +207,7 @@ VCHIQ_STATUS_T
vchiq_platform_init_state(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+
state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
@@ -293,6 +293,7 @@ vchiq_dump_platform_state(void *dump_context)
{
char buf[80];
int len;
+
len = snprintf(buf, sizeof(buf),
" Platform: 2835 (VC master)");
vchiq_dump(dump_context, buf, len + 1);
@@ -406,7 +407,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
dma_addr_t dma_addr;
offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
- num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
pagelist_size = sizeof(PAGELIST_T) +
(num_pages * sizeof(u32)) +
@@ -591,6 +592,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
g_fragments_size;
int head_bytes, tail_bytes;
+
head_bytes = (g_cache_line_size - pagelist->offset) &
(g_cache_line_size - 1);
tail_bytes = (pagelist->offset + actual) &
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 8a0d214f6e9b..e823f1d5d177 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -48,6 +48,7 @@
#include <linux/list.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/compat.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#include "vchiq_core.h"
@@ -194,8 +195,10 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
+#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
static void
dump_phys_mem(void *virt_addr, u32 num_bytes);
+#endif
/****************************************************************************
*
@@ -210,6 +213,7 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
{
VCHIQ_COMPLETION_DATA_T *completion;
int insert;
+
DEBUG_INITIALISE(g_state.local)
insert = instance->completion_insert;
@@ -281,6 +285,7 @@ service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
VCHIQ_SERVICE_T *service;
VCHIQ_INSTANCE_T instance;
bool skip_completion = false;
+
DEBUG_INITIALISE(g_state.local)
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -316,6 +321,7 @@ service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
VCHIQ_STATUS_T status;
+
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -407,7 +413,7 @@ static void close_delivered(USER_SERVICE_T *user_service)
}
struct vchiq_io_copy_callback_context {
- VCHIQ_ELEMENT_T *current_element;
+ struct vchiq_element *current_element;
size_t current_element_offset;
unsigned long elements_to_go;
size_t current_offset;
@@ -484,7 +490,7 @@ vchiq_ioc_copy_element_data(
**************************************************************************/
static VCHIQ_STATUS_T
vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
- VCHIQ_ELEMENT_T *elements,
+ struct vchiq_element *elements,
unsigned long count)
{
struct vchiq_io_copy_callback_context context;
@@ -520,6 +526,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
VCHIQ_SERVICE_T *service = NULL;
long ret = 0;
int i, rc;
+
DEBUG_INITIALISE(g_state.local)
vchiq_log_trace(vchiq_arm_log_level,
@@ -742,6 +749,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_QUEUE_MESSAGE: {
VCHIQ_QUEUE_MESSAGE_T args;
+
if (copy_from_user
(&args, (const void __user *)arg,
sizeof(args)) != 0) {
@@ -753,9 +761,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
/* Copy elements into kernel space */
- VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
+ struct vchiq_element elements[MAX_ELEMENTS];
+
if (copy_from_user(elements, args.elements,
- args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
+ args.count * sizeof(struct vchiq_element)) == 0)
status = vchiq_ioc_queue_message
(args.handle,
elements, args.count);
@@ -770,6 +779,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
VCHIQ_QUEUE_BULK_TRANSFER_T args;
struct bulk_waiter_node *waiter = NULL;
+
VCHIQ_BULK_DIR_T dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
@@ -797,6 +807,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
args.userdata = &waiter->bulk_waiter;
} else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
struct list_head *pos;
+
mutex_lock(&instance->bulk_waiter_list_mutex);
list_for_each(pos, &instance->bulk_waiter_list) {
if (list_entry(pos, struct bulk_waiter_node,
@@ -882,6 +893,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
instance->completion_insert)
&& !instance->closing) {
int rc;
+
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
mutex_unlock(&instance->completion_mutex);
rc = down_interruptible(&instance->insert_event);
@@ -1149,6 +1161,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
args.handle, args.option, args.value);
} break;
+#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
case VCHIQ_IOC_DUMP_PHYS_MEM: {
VCHIQ_DUMP_MEM_T args;
@@ -1160,6 +1173,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
dump_phys_mem(args.virt_addr, args.num_bytes);
} break;
+#endif
case VCHIQ_IOC_LIB_VERSION: {
unsigned int lib_version = (unsigned int)arg;
@@ -1219,6 +1233,491 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
+#if defined(CONFIG_COMPAT)
+
+struct vchiq_service_params32 {
+ int fourcc;
+ compat_uptr_t callback;
+ compat_uptr_t userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
+struct vchiq_create_service32 {
+ struct vchiq_service_params32 params;
+ int is_open;
+ int is_vchi;
+ unsigned int handle; /* OUT */
+};
+
+#define VCHIQ_IOC_CREATE_SERVICE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
+
+static long
+vchiq_compat_ioctl_create_service(
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_CREATE_SERVICE_T __user *args;
+ struct vchiq_create_service32 __user *ptrargs32 =
+ (struct vchiq_create_service32 __user *)arg;
+ struct vchiq_create_service32 args32;
+ long ret;
+
+ args = compat_alloc_user_space(sizeof(*args));
+ if (!args)
+ return -EFAULT;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_create_service32 __user *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(args32.params.fourcc, &args->params.fourcc) ||
+ put_user(compat_ptr(args32.params.callback),
+ &args->params.callback) ||
+ put_user(compat_ptr(args32.params.userdata),
+ &args->params.userdata) ||
+ put_user(args32.params.version, &args->params.version) ||
+ put_user(args32.params.version_min,
+ &args->params.version_min) ||
+ put_user(args32.is_open, &args->is_open) ||
+ put_user(args32.is_vchi, &args->is_vchi) ||
+ put_user(args32.handle, &args->handle))
+ return -EFAULT;
+
+ ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
+
+ if (ret < 0)
+ return ret;
+
+ if (get_user(args32.handle, &args->handle))
+ return -EFAULT;
+
+ if (copy_to_user(&ptrargs32->handle,
+ &args32.handle,
+ sizeof(args32.handle)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct vchiq_element32 {
+ compat_uptr_t data;
+ unsigned int size;
+};
+
+struct vchiq_queue_message32 {
+ unsigned int handle;
+ unsigned int count;
+ compat_uptr_t elements;
+};
+
+#define VCHIQ_IOC_QUEUE_MESSAGE32 \
+ _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
+
+static long
+vchiq_compat_ioctl_queue_message(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_QUEUE_MESSAGE_T *args;
+ struct vchiq_element *elements;
+ struct vchiq_queue_message32 args32;
+ unsigned int count;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_queue_message32 __user *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ args = compat_alloc_user_space(sizeof(*args) +
+ (sizeof(*elements) * MAX_ELEMENTS));
+
+ if (!args)
+ return -EFAULT;
+
+ if (put_user(args32.handle, &args->handle) ||
+ put_user(args32.count, &args->count) ||
+ put_user(compat_ptr(args32.elements), &args->elements))
+ return -EFAULT;
+
+ if (args32.count > MAX_ELEMENTS)
+ return -EINVAL;
+
+ if (args32.elements && args32.count) {
+ struct vchiq_element32 tempelement32[MAX_ELEMENTS];
+
+ elements = (struct vchiq_element __user *)(args + 1);
+
+ if (copy_from_user(&tempelement32,
+ compat_ptr(args32.elements),
+ sizeof(tempelement32)))
+ return -EFAULT;
+
+ for (count = 0; count < args32.count; count++) {
+ if (put_user(compat_ptr(tempelement32[count].data),
+ &elements[count].data) ||
+ put_user(tempelement32[count].size,
+ &elements[count].size))
+ return -EFAULT;
+ }
+
+ if (put_user(elements, &args->elements))
+ return -EFAULT;
+ }
+
+ return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
+}
+
+struct vchiq_queue_bulk_transfer32 {
+ unsigned int handle;
+ compat_uptr_t data;
+ unsigned int size;
+ compat_uptr_t userdata;
+ VCHIQ_BULK_MODE_T mode;
+};
+
+#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
+#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
+
+static long
+vchiq_compat_ioctl_queue_bulk(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_QUEUE_BULK_TRANSFER_T *args;
+ struct vchiq_queue_bulk_transfer32 args32;
+ struct vchiq_queue_bulk_transfer32 *ptrargs32 =
+ (struct vchiq_queue_bulk_transfer32 *)arg;
+ long ret;
+
+ args = compat_alloc_user_space(sizeof(*args));
+ if (!args)
+ return -EFAULT;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_queue_bulk_transfer32 __user *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(args32.handle, &args->handle) ||
+ put_user(compat_ptr(args32.data), &args->data) ||
+ put_user(args32.size, &args->size) ||
+ put_user(compat_ptr(args32.userdata), &args->userdata) ||
+ put_user(args32.mode, &args->mode))
+ return -EFAULT;
+
+ if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
+ cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
+ else
+ cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
+
+ ret = vchiq_ioctl(file, cmd, (unsigned long)args);
+
+ if (ret < 0)
+ return ret;
+
+ if (get_user(args32.mode, &args->mode))
+ return -EFAULT;
+
+ if (copy_to_user(&ptrargs32->mode,
+ &args32.mode,
+ sizeof(args32.mode)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct vchiq_completion_data32 {
+ VCHIQ_REASON_T reason;
+ compat_uptr_t header;
+ compat_uptr_t service_userdata;
+ compat_uptr_t bulk_userdata;
+};
+
+struct vchiq_await_completion32 {
+ unsigned int count;
+ compat_uptr_t buf;
+ unsigned int msgbufsize;
+ unsigned int msgbufcount; /* IN/OUT */
+ compat_uptr_t msgbufs;
+};
+
+#define VCHIQ_IOC_AWAIT_COMPLETION32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
+
+static long
+vchiq_compat_ioctl_await_completion(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_AWAIT_COMPLETION_T *args;
+ VCHIQ_COMPLETION_DATA_T *completion;
+ VCHIQ_COMPLETION_DATA_T completiontemp;
+ struct vchiq_await_completion32 args32;
+ struct vchiq_completion_data32 completion32;
+ unsigned int *msgbufcount32;
+ compat_uptr_t msgbuf32;
+ void *msgbuf;
+ void **msgbufptr;
+ long ret;
+
+ args = compat_alloc_user_space(sizeof(*args) +
+ sizeof(*completion) +
+ sizeof(*msgbufptr));
+ if (!args)
+ return -EFAULT;
+
+ completion = (VCHIQ_COMPLETION_DATA_T *)(args + 1);
+ msgbufptr = (void __user **)(completion + 1);
+
+ if (copy_from_user(&args32,
+ (struct vchiq_completion_data32 *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(args32.count, &args->count) ||
+ put_user(compat_ptr(args32.buf), &args->buf) ||
+ put_user(args32.msgbufsize, &args->msgbufsize) ||
+ put_user(args32.msgbufcount, &args->msgbufcount) ||
+ put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
+ return -EFAULT;
+
+ /* These are simple cases, so just fall into the native handler */
+ if (!args32.count || !args32.buf || !args32.msgbufcount)
+ return vchiq_ioctl(file,
+ VCHIQ_IOC_AWAIT_COMPLETION,
+ (unsigned long)args);
+
+ /*
+ * These are the more complex cases. Typical applications of this
+ * ioctl will use a very large count, with a very large msgbufcount.
+ * Since the native ioctl can asynchronously fill in the returned
+ * buffers and the application can in theory begin processing messages
+ * even before the ioctl returns, a bit of a trick is used here.
+ *
+ * By forcing both count and msgbufcount to be 1, it forces the native
+ * ioctl to only claim at most 1 message is available. This tricks
+ * the calling application into thinking only 1 message was actually
+ * available in the queue so like all good applications it will retry
+ * waiting until all the required messages are received.
+ *
+ * This trick has been tested and proven to work with vchiq_test,
+ * Minecraft_PI, the "hello pi" examples, and various other
+ * applications that are included in Raspbian.
+ */
+
+ if (copy_from_user(&msgbuf32,
+ compat_ptr(args32.msgbufs) +
+ (sizeof(compat_uptr_t) *
+ (args32.msgbufcount - 1)),
+ sizeof(msgbuf32)))
+ return -EFAULT;
+
+ msgbuf = compat_ptr(msgbuf32);
+
+ if (copy_to_user(msgbufptr,
+ &msgbuf,
+ sizeof(msgbuf)))
+ return -EFAULT;
+
+ if (copy_to_user(&args->msgbufs,
+ &msgbufptr,
+ sizeof(msgbufptr)))
+ return -EFAULT;
+
+ if (put_user(1U, &args->count) ||
+ put_user(completion, &args->buf) ||
+ put_user(1U, &args->msgbufcount))
+ return -EFAULT;
+
+ ret = vchiq_ioctl(file,
+ VCHIQ_IOC_AWAIT_COMPLETION,
+ (unsigned long)args);
+
+ /*
+ * An return value of 0 here means that no messages where available
+ * in the message queue. In this case the native ioctl does not
+ * return any data to the application at all. Not even to update
+ * msgbufcount. This functionality needs to be kept here for
+ * compatibility.
+ *
+ * Of course, < 0 means that an error occurred and no data is being
+ * returned.
+ *
+ * Since count and msgbufcount was forced to 1, that means
+ * the only other possible return value is 1. Meaning that 1 message
+ * was available, so that multiple message case does not need to be
+ * handled here.
+ */
+ if (ret <= 0)
+ return ret;
+
+ if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
+ return -EFAULT;
+
+ completion32.reason = completiontemp.reason;
+ completion32.header = ptr_to_compat(completiontemp.header);
+ completion32.service_userdata =
+ ptr_to_compat(completiontemp.service_userdata);
+ completion32.bulk_userdata =
+ ptr_to_compat(completiontemp.bulk_userdata);
+
+ if (copy_to_user(compat_ptr(args32.buf),
+ &completion32,
+ sizeof(completion32)))
+ return -EFAULT;
+
+ args32.msgbufcount--;
+
+ msgbufcount32 =
+ &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
+
+ if (copy_to_user(msgbufcount32,
+ &args32.msgbufcount,
+ sizeof(args32.msgbufcount)))
+ return -EFAULT;
+
+ return 1;
+}
+
+struct vchiq_dequeue_message32 {
+ unsigned int handle;
+ int blocking;
+ unsigned int bufsize;
+ compat_uptr_t buf;
+};
+
+#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
+
+static long
+vchiq_compat_ioctl_dequeue_message(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_DEQUEUE_MESSAGE_T *args;
+ struct vchiq_dequeue_message32 args32;
+
+ args = compat_alloc_user_space(sizeof(*args));
+ if (!args)
+ return -EFAULT;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_dequeue_message32 *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(args32.handle, &args->handle) ||
+ put_user(args32.blocking, &args->blocking) ||
+ put_user(args32.bufsize, &args->bufsize) ||
+ put_user(compat_ptr(args32.buf), &args->buf))
+ return -EFAULT;
+
+ return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
+ (unsigned long)args);
+}
+
+struct vchiq_get_config32 {
+ unsigned int config_size;
+ compat_uptr_t pconfig;
+};
+
+#define VCHIQ_IOC_GET_CONFIG32 \
+ _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
+
+static long
+vchiq_compat_ioctl_get_config(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_GET_CONFIG_T *args;
+ struct vchiq_get_config32 args32;
+
+ args = compat_alloc_user_space(sizeof(*args));
+ if (!args)
+ return -EFAULT;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_get_config32 *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(args32.config_size, &args->config_size) ||
+ put_user(compat_ptr(args32.pconfig), &args->pconfig))
+ return -EFAULT;
+
+ return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
+}
+
+#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
+
+struct vchiq_dump_mem32 {
+ compat_uptr_t virt_addr;
+ u32 num_bytes;
+};
+
+#define VCHIQ_IOC_DUMP_PHYS_MEM32 \
+ _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem32)
+
+static long
+vchiq_compat_ioctl_dump_phys_mem(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ VCHIQ_DUMP_MEM_T *args;
+ struct vchiq_dump_mem32 args32;
+
+ args = compat_alloc_user_space(sizeof(*args));
+ if (!args)
+ return -EFAULT;
+
+ if (copy_from_user(&args32,
+ (struct vchiq_dump_mem32 *)arg,
+ sizeof(args32)))
+ return -EFAULT;
+
+ if (put_user(compat_ptr(args32.virt_addr), &args->virt_addr) ||
+ put_user(args32.num_bytes, &args->num_bytes))
+ return -EFAULT;
+
+ return vchiq_ioctl(file, VCHIQ_IOC_DUMP_PHYS_MEM, (unsigned long)args);
+}
+
+#endif
+
+static long
+vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VCHIQ_IOC_CREATE_SERVICE32:
+ return vchiq_compat_ioctl_create_service(file, cmd, arg);
+ case VCHIQ_IOC_QUEUE_MESSAGE32:
+ return vchiq_compat_ioctl_queue_message(file, cmd, arg);
+ case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
+ case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
+ return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
+ case VCHIQ_IOC_AWAIT_COMPLETION32:
+ return vchiq_compat_ioctl_await_completion(file, cmd, arg);
+ case VCHIQ_IOC_DEQUEUE_MESSAGE32:
+ return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
+ case VCHIQ_IOC_GET_CONFIG32:
+ return vchiq_compat_ioctl_get_config(file, cmd, arg);
+#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
+ case VCHIQ_IOC_DUMP_PHYS_MEM32:
+ return vchiq_compat_ioctl_dump_phys_mem(file, cmd, arg);
+#endif
+ default:
+ return vchiq_ioctl(file, cmd, arg);
+ }
+}
+
+#endif
+
/****************************************************************************
*
* vchiq_open
@@ -1229,6 +1728,7 @@ static int
vchiq_open(struct inode *inode, struct file *file)
{
int dev = iminor(inode) & 0x0f;
+
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
switch (dev) {
case VCHIQ_MINOR: {
@@ -1284,6 +1784,7 @@ vchiq_release(struct inode *inode, struct file *file)
{
int dev = iminor(inode) & 0x0f;
int ret = 0;
+
switch (dev) {
case VCHIQ_MINOR: {
VCHIQ_INSTANCE_T instance = file->private_data;
@@ -1365,6 +1866,7 @@ vchiq_release(struct inode *inode, struct file *file)
instance->completion_insert) {
VCHIQ_COMPLETION_DATA_T *completion;
VCHIQ_SERVICE_T *service;
+
completion = &instance->completions[
instance->completion_remove &
(MAX_COMPLETIONS - 1)];
@@ -1387,9 +1889,11 @@ vchiq_release(struct inode *inode, struct file *file)
{
struct list_head *pos, *next;
+
list_for_each_safe(pos, next,
&instance->bulk_waiter_list) {
struct bulk_waiter_node *waiter;
+
waiter = list_entry(pos,
struct bulk_waiter_node,
list);
@@ -1430,8 +1934,10 @@ vchiq_dump(void *dump_context, const char *str, int len)
if (context->actual < context->space) {
int copy_bytes;
+
if (context->offset > 0) {
int skip_bytes = min(len, (int)context->offset);
+
str += skip_bytes;
len -= skip_bytes;
context->offset -= skip_bytes;
@@ -1452,6 +1958,7 @@ vchiq_dump(void *dump_context, const char *str, int len)
** carriage return. */
if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
char cr = '\n';
+
if (copy_to_user(context->buf + context->actual - 1,
&cr, 1))
context->actual = -EFAULT;
@@ -1547,6 +2054,8 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
*
***************************************************************************/
+#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
+
static void
dump_phys_mem(void *virt_addr, u32 num_bytes)
{
@@ -1570,10 +2079,10 @@ dump_phys_mem(void *virt_addr, u32 num_bytes)
offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
- num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ num_pages = DIV_ROUND_UP(offset + num_bytes, PAGE_SIZE);
pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
vchiq_log_error(vchiq_arm_log_level,
"Unable to allocation memory for %d pages\n",
num_pages);
@@ -1599,17 +2108,14 @@ dump_phys_mem(void *virt_addr, u32 num_bytes)
}
while (offset < end_offset) {
-
int page_offset = offset % PAGE_SIZE;
- page_idx = offset / PAGE_SIZE;
+ page_idx = offset / PAGE_SIZE;
if (page_idx != prev_idx) {
-
if (page != NULL)
kunmap(page);
page = pages[page_idx];
kmapped_virt_ptr = kmap(page);
-
prev_idx = page_idx;
}
@@ -1632,6 +2138,8 @@ out:
kfree(pages);
}
+#endif
+
/****************************************************************************
*
* vchiq_read
@@ -1643,6 +2151,7 @@ vchiq_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
DUMP_CONTEXT_T context;
+
context.buf = buf;
context.actual = 0;
context.space = count;
@@ -1673,6 +2182,9 @@ static const struct file_operations
vchiq_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vchiq_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = vchiq_compat_ioctl,
+#endif
.open = vchiq_open,
.release = vchiq_release,
.read = vchiq_read
@@ -1686,6 +2198,7 @@ int
vchiq_videocore_wanted(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
if (!arm_state)
/* autosuspend not supported - always return wanted */
return 1;
@@ -1753,6 +2266,7 @@ vchiq_keepalive_thread_func(void *v)
while (1) {
long rc = 0, uc = 0;
+
if (wait_for_completion_interruptible(&arm_state->ka_evt)
!= 0) {
vchiq_log_error(vchiq_susp_log_level,
@@ -1982,6 +2496,7 @@ static inline int
need_resume(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
(arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
vchiq_videocore_wanted(state);
@@ -2155,6 +2670,7 @@ output_timeout_error(VCHIQ_STATE_T *state)
}
for (i = 0; i < active_services; i++) {
VCHIQ_SERVICE_T *service_ptr = state->services[i];
+
if (service_ptr && service_ptr->service_use_count &&
(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
@@ -2502,6 +3018,7 @@ vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
if (ret == VCHIQ_SUCCESS) {
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
+
while (ack_cnt && (status == VCHIQ_SUCCESS)) {
/* Send the use notify to videocore */
status = vchiq_send_remote_use_active(state);
@@ -2584,6 +3101,7 @@ void
vchiq_on_remote_use(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
atomic_inc(&arm_state->ka_use_count);
complete(&arm_state->ka_evt);
@@ -2593,6 +3111,7 @@ void
vchiq_on_remote_release(VCHIQ_STATE_T *state)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
atomic_inc(&arm_state->ka_release_count);
complete(&arm_state->ka_evt);
@@ -2621,6 +3140,7 @@ vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
{
VCHIQ_SERVICE_T *service;
int use_count = 0, i;
+
i = 0;
while ((service = next_service_by_instance(instance->state,
instance, &i)) != NULL) {
@@ -2647,6 +3167,7 @@ vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
{
VCHIQ_SERVICE_T *service;
int i;
+
i = 0;
while ((service = next_service_by_instance(instance->state,
instance, &i)) != NULL) {
@@ -2660,6 +3181,7 @@ static void suspend_timer_callback(unsigned long context)
{
VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
if (!arm_state)
goto out;
vchiq_log_info(vchiq_susp_log_level,
@@ -2674,6 +3196,7 @@ vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
{
VCHIQ_STATUS_T ret = VCHIQ_ERROR;
VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+
if (service) {
ret = vchiq_use_internal(service->state, service,
USE_TYPE_SERVICE_NO_RESUME);
@@ -2687,6 +3210,7 @@ vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
{
VCHIQ_STATUS_T ret = VCHIQ_ERROR;
VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+
if (service) {
ret = vchiq_use_internal(service->state, service,
USE_TYPE_SERVICE);
@@ -2700,6 +3224,7 @@ vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
{
VCHIQ_STATUS_T ret = VCHIQ_ERROR;
VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
+
if (service) {
ret = vchiq_release_internal(service->state, service);
unlock_service(service);
@@ -2744,6 +3269,7 @@ vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
for (i = 0; (i < active_services) && (j < local_max_services); i++) {
VCHIQ_SERVICE_T *service_ptr = state->services[i];
+
if (!service_ptr)
continue;
@@ -2832,12 +3358,14 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
{
VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
get_conn_state_name(oldstate), get_conn_state_name(newstate));
if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
write_lock_bh(&arm_state->susp_res_lock);
if (!arm_state->first_connect) {
char threadname[16];
+
arm_state->first_connect = 1;
write_unlock_bh(&arm_state->susp_res_lock);
snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
@@ -2962,6 +3490,6 @@ static struct platform_driver vchiq_driver = {
};
module_platform_driver(vchiq_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Videocore VCHIQ driver");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index d587097b261c..4f9e738abddf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -192,6 +192,7 @@ VCHIQ_SERVICE_T *
find_service_by_port(VCHIQ_STATE_T *state, int localport)
{
VCHIQ_SERVICE_T *service = NULL;
+
if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
spin_lock(&service_spinlock);
service = state->services[localport];
@@ -268,6 +269,7 @@ next_service_by_instance(VCHIQ_STATE_T *state, VCHIQ_INSTANCE_T instance,
spin_lock(&service_spinlock);
while (idx < state->unused_service) {
VCHIQ_SERVICE_T *srv = state->services[idx++];
+
if (srv && (srv->srvstate != VCHIQ_SRVSTATE_FREE) &&
(srv->instance == instance)) {
service = srv;
@@ -381,6 +383,7 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
VCHIQ_HEADER_T *header, void *bulk_userdata)
{
VCHIQ_STATUS_T status;
+
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
header, bulk_userdata);
@@ -399,6 +402,7 @@ inline void
vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
{
VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+
vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
conn_state_names[oldstate],
conn_state_names[newstate]);
@@ -485,6 +489,7 @@ get_listening_service(VCHIQ_STATE_T *state, int fourcc)
for (i = 0; i < state->unused_service; i++) {
VCHIQ_SERVICE_T *service = state->services[i];
+
if (service &&
(service->public_fourcc == fourcc) &&
((service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
@@ -503,8 +508,10 @@ static VCHIQ_SERVICE_T *
get_connected_service(VCHIQ_STATE_T *state, unsigned int port)
{
int i;
+
for (i = 0; i < state->unused_service; i++) {
VCHIQ_SERVICE_T *service = state->services[i];
+
if (service && (service->srvstate == VCHIQ_SRVSTATE_OPEN)
&& (service->remoteport == port)) {
lock_service(service);
@@ -645,11 +652,13 @@ process_free_queue(VCHIQ_STATE_T *state)
VCHIQ_HEADER_T *header =
(VCHIQ_HEADER_T *)(data + pos);
int msgid = header->msgid;
+
if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
int port = VCHIQ_MSG_SRCPORT(msgid);
VCHIQ_SERVICE_QUOTA_T *service_quota =
&state->service_quotas[port];
int count;
+
spin_lock(&quota_spinlock);
count = service_quota->message_use_count;
if (count > 0)
@@ -719,6 +728,7 @@ process_free_queue(VCHIQ_STATE_T *state)
if (data_found) {
int count;
+
spin_lock(&quota_spinlock);
count = state->data_use_count;
if (count > 0)
@@ -745,9 +755,7 @@ memcpy_copy_callback(
void *context, void *dest,
size_t offset, size_t maxsize)
{
- void *src = context;
-
- memcpy(dest + offset, src + offset, maxsize);
+ memcpy(dest + offset, context + offset, maxsize);
return maxsize;
}
@@ -1059,6 +1067,7 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
{
int oldmsgid = header->msgid;
+
if (oldmsgid != VCHIQ_MSGID_PADDING)
vchiq_log_error(vchiq_core_log_level,
"%d: qms - msgid %x, not PADDING",
@@ -1143,6 +1152,7 @@ release_slot(VCHIQ_STATE_T *state, VCHIQ_SLOT_INFO_T *slot_info,
if (header) {
int msgid = header->msgid;
+
if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
(service && service->closing)) {
mutex_unlock(&state->recycle_mutex);
@@ -1252,6 +1262,7 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
}
if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
struct bulk_waiter *waiter;
+
spin_lock(&bulk_waiter_spinlock);
waiter = bulk->userdata;
if (waiter) {
@@ -1301,6 +1312,7 @@ poll_services(VCHIQ_STATE_T *state)
for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
u32 flags;
+
flags = atomic_xchg(&state->poll_services[group], 0);
for (i = 0; flags; i++) {
if (flags & (1 << i)) {
@@ -1308,6 +1320,7 @@ poll_services(VCHIQ_STATE_T *state)
find_service_by_port(state,
(group<<5) + i);
u32 service_flags;
+
flags &= ~(1 << i);
if (!service)
continue;
@@ -1421,6 +1434,7 @@ static void
abort_outstanding_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
{
int is_tx = (queue == &service->bulk_tx);
+
vchiq_log_trace(vchiq_core_log_level,
"%d: aob:%d %cx - li=%x ri=%x p=%x",
service->state->id, service->localport, is_tx ? 't' : 'r',
@@ -1484,6 +1498,7 @@ static void
resume_bulks(VCHIQ_STATE_T *state)
{
int i;
+
if (unlikely(atomic_dec_return(&pause_bulks_count) != 0)) {
WARN_ON_ONCE(1);
atomic_set(&pause_bulks_count, 0);
@@ -1507,6 +1522,7 @@ resume_bulks(VCHIQ_STATE_T *state)
VCHIQ_SERVICE_T *service = state->services[i];
int resolved_rx = 0;
int resolved_tx = 0;
+
if (!service || (service->srvstate != VCHIQ_SRVSTATE_OPEN))
continue;
@@ -1550,6 +1566,7 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
/* A matching service exists */
short version = payload->version;
short version_min = payload->version_min;
+
if ((service->version < version_min) ||
(version < service->version_min)) {
/* Version mismatch */
@@ -1651,6 +1668,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
VCHIQ_SHARED_STATE_T *remote = state->remote;
VCHIQ_SERVICE_T *service = NULL;
int tx_pos;
+
DEBUG_INITIALISE(state->local)
tx_pos = remote->tx_pos;
@@ -1664,6 +1682,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
DEBUG_TRACE(PARSE_LINE);
if (!state->rx_data) {
int rx_index;
+
WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
rx_index = remote->slot_queue[
SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
@@ -1841,6 +1860,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
case VCHIQ_MSG_BULK_RX:
case VCHIQ_MSG_BULK_TX: {
VCHIQ_BULK_QUEUE_T *queue;
+
WARN_ON(!state->is_master);
queue = (type == VCHIQ_MSG_BULK_RX) ?
&service->bulk_tx : &service->bulk_rx;
@@ -2054,6 +2074,7 @@ slot_handler_func(void *v)
{
VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
VCHIQ_SHARED_STATE_T *local = state->local;
+
DEBUG_INITIALISE(local)
while (1) {
@@ -2553,131 +2574,126 @@ vchiq_add_service_internal(VCHIQ_STATE_T *state,
VCHIQ_INSTANCE_T instance, VCHIQ_USERDATA_TERM_T userdata_term)
{
VCHIQ_SERVICE_T *service;
+ VCHIQ_SERVICE_T **pservice = NULL;
+ VCHIQ_SERVICE_QUOTA_T *service_quota;
+ int i;
service = kmalloc(sizeof(VCHIQ_SERVICE_T), GFP_KERNEL);
- if (service) {
- service->base.fourcc = params->fourcc;
- service->base.callback = params->callback;
- service->base.userdata = params->userdata;
- service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
- service->ref_count = 1;
- service->srvstate = VCHIQ_SRVSTATE_FREE;
- service->userdata_term = userdata_term;
- service->localport = VCHIQ_PORT_FREE;
- service->remoteport = VCHIQ_PORT_FREE;
-
- service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
- VCHIQ_FOURCC_INVALID : params->fourcc;
- service->client_id = 0;
- service->auto_close = 1;
- service->sync = 0;
- service->closing = 0;
- service->trace = 0;
- atomic_set(&service->poll_flags, 0);
- service->version = params->version;
- service->version_min = params->version_min;
- service->state = state;
- service->instance = instance;
- service->service_use_count = 0;
- init_bulk_queue(&service->bulk_tx);
- init_bulk_queue(&service->bulk_rx);
- sema_init(&service->remove_event, 0);
- sema_init(&service->bulk_remove_event, 0);
- mutex_init(&service->bulk_mutex);
- memset(&service->stats, 0, sizeof(service->stats));
- } else {
- vchiq_log_error(vchiq_core_log_level,
- "Out of memory");
- }
-
- if (service) {
- VCHIQ_SERVICE_T **pservice = NULL;
- int i;
-
- /* Although it is perfectly possible to use service_spinlock
- ** to protect the creation of services, it is overkill as it
- ** disables interrupts while the array is searched.
- ** The only danger is of another thread trying to create a
- ** service - service deletion is safe.
- ** Therefore it is preferable to use state->mutex which,
- ** although slower to claim, doesn't block interrupts while
- ** it is held.
- */
-
- mutex_lock(&state->mutex);
-
- /* Prepare to use a previously unused service */
- if (state->unused_service < VCHIQ_MAX_SERVICES)
- pservice = &state->services[state->unused_service];
-
- if (srvstate == VCHIQ_SRVSTATE_OPENING) {
- for (i = 0; i < state->unused_service; i++) {
- VCHIQ_SERVICE_T *srv = state->services[i];
- if (!srv) {
- pservice = &state->services[i];
- break;
- }
- }
- } else {
- for (i = (state->unused_service - 1); i >= 0; i--) {
- VCHIQ_SERVICE_T *srv = state->services[i];
- if (!srv)
- pservice = &state->services[i];
- else if ((srv->public_fourcc == params->fourcc)
- && ((srv->instance != instance) ||
- (srv->base.callback !=
- params->callback))) {
- /* There is another server using this
- ** fourcc which doesn't match. */
- pservice = NULL;
- break;
- }
+ if (!service)
+ return service;
+
+ service->base.fourcc = params->fourcc;
+ service->base.callback = params->callback;
+ service->base.userdata = params->userdata;
+ service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
+ service->ref_count = 1;
+ service->srvstate = VCHIQ_SRVSTATE_FREE;
+ service->userdata_term = userdata_term;
+ service->localport = VCHIQ_PORT_FREE;
+ service->remoteport = VCHIQ_PORT_FREE;
+
+ service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
+ VCHIQ_FOURCC_INVALID : params->fourcc;
+ service->client_id = 0;
+ service->auto_close = 1;
+ service->sync = 0;
+ service->closing = 0;
+ service->trace = 0;
+ atomic_set(&service->poll_flags, 0);
+ service->version = params->version;
+ service->version_min = params->version_min;
+ service->state = state;
+ service->instance = instance;
+ service->service_use_count = 0;
+ init_bulk_queue(&service->bulk_tx);
+ init_bulk_queue(&service->bulk_rx);
+ sema_init(&service->remove_event, 0);
+ sema_init(&service->bulk_remove_event, 0);
+ mutex_init(&service->bulk_mutex);
+ memset(&service->stats, 0, sizeof(service->stats));
+
+ /* Although it is perfectly possible to use service_spinlock
+ ** to protect the creation of services, it is overkill as it
+ ** disables interrupts while the array is searched.
+ ** The only danger is of another thread trying to create a
+ ** service - service deletion is safe.
+ ** Therefore it is preferable to use state->mutex which,
+ ** although slower to claim, doesn't block interrupts while
+ ** it is held.
+ */
+
+ mutex_lock(&state->mutex);
+
+ /* Prepare to use a previously unused service */
+ if (state->unused_service < VCHIQ_MAX_SERVICES)
+ pservice = &state->services[state->unused_service];
+
+ if (srvstate == VCHIQ_SRVSTATE_OPENING) {
+ for (i = 0; i < state->unused_service; i++) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+
+ if (!srv) {
+ pservice = &state->services[i];
+ break;
}
}
-
- if (pservice) {
- service->localport = (pservice - state->services);
- if (!handle_seq)
- handle_seq = VCHIQ_MAX_STATES *
- VCHIQ_MAX_SERVICES;
- service->handle = handle_seq |
- (state->id * VCHIQ_MAX_SERVICES) |
- service->localport;
- handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
- *pservice = service;
- if (pservice == &state->services[state->unused_service])
- state->unused_service++;
- }
-
- mutex_unlock(&state->mutex);
-
- if (!pservice) {
- kfree(service);
- service = NULL;
+ } else {
+ for (i = (state->unused_service - 1); i >= 0; i--) {
+ VCHIQ_SERVICE_T *srv = state->services[i];
+
+ if (!srv)
+ pservice = &state->services[i];
+ else if ((srv->public_fourcc == params->fourcc)
+ && ((srv->instance != instance) ||
+ (srv->base.callback !=
+ params->callback))) {
+ /* There is another server using this
+ ** fourcc which doesn't match. */
+ pservice = NULL;
+ break;
+ }
}
}
- if (service) {
- VCHIQ_SERVICE_QUOTA_T *service_quota =
- &state->service_quotas[service->localport];
- service_quota->slot_quota = state->default_slot_quota;
- service_quota->message_quota = state->default_message_quota;
- if (service_quota->slot_use_count == 0)
- service_quota->previous_tx_index =
- SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
- - 1;
+ if (pservice) {
+ service->localport = (pservice - state->services);
+ if (!handle_seq)
+ handle_seq = VCHIQ_MAX_STATES *
+ VCHIQ_MAX_SERVICES;
+ service->handle = handle_seq |
+ (state->id * VCHIQ_MAX_SERVICES) |
+ service->localport;
+ handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
+ *pservice = service;
+ if (pservice == &state->services[state->unused_service])
+ state->unused_service++;
+ }
- /* Bring this service online */
- vchiq_set_service_state(service, srvstate);
+ mutex_unlock(&state->mutex);
- vchiq_log_info(vchiq_core_msg_log_level,
- "%s Service %c%c%c%c SrcPort:%d",
- (srvstate == VCHIQ_SRVSTATE_OPENING)
- ? "Open" : "Add",
- VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
- service->localport);
+ if (!pservice) {
+ kfree(service);
+ return NULL;
}
+ service_quota = &state->service_quotas[service->localport];
+ service_quota->slot_quota = state->default_slot_quota;
+ service_quota->message_quota = state->default_message_quota;
+ if (service_quota->slot_use_count == 0)
+ service_quota->previous_tx_index =
+ SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
+ - 1;
+
+ /* Bring this service online */
+ vchiq_set_service_state(service, srvstate);
+
+ vchiq_log_info(vchiq_core_msg_log_level,
+ "%s Service %c%c%c%c SrcPort:%d",
+ (srvstate == VCHIQ_SRVSTATE_OPENING)
+ ? "Open" : "Add",
+ VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
+ service->localport);
+
/* Don't unlock the service - leave it with a ref_count of 1. */
return service;
@@ -2766,6 +2782,7 @@ release_service_messages(VCHIQ_SERVICE_T *service)
(VCHIQ_HEADER_T *)(data + pos);
int msgid = header->msgid;
int port = VCHIQ_MSG_DSTPORT(msgid);
+
if ((port == service->localport) &&
(msgid & VCHIQ_MSGID_CLAIMED)) {
vchiq_log_info(vchiq_core_log_level,
@@ -3498,6 +3515,7 @@ vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle, VCHIQ_HEADER_T *header)
if ((slot_index >= remote->slot_first) &&
(slot_index <= remote->slot_last)) {
int msgid = header->msgid;
+
if (msgid & VCHIQ_MSGID_CLAIMED) {
VCHIQ_SLOT_INFO_T *slot_info =
SLOT_INFO_FROM_INDEX(state, slot_index);
@@ -3656,9 +3674,9 @@ vchiq_dump_shared_state(void *dump_context, VCHIQ_STATE_T *state,
"COMPLETION_QUEUE_FULL_COUNT"
};
int i;
-
char buf[80];
int len;
+
len = snprintf(buf, sizeof(buf),
" %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
@@ -3762,9 +3780,11 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
&service->state->service_quotas[service->localport];
int fourcc = service->base.fourcc;
int tx_pending, rx_pending;
+
if (service->remoteport != VCHIQ_PORT_FREE) {
int len2 = snprintf(remoteport, sizeof(remoteport),
"%u", service->remoteport);
+
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
snprintf(remoteport + len2,
sizeof(remoteport) - len2,
@@ -3866,6 +3886,7 @@ vchiq_loud_error_footer(void)
VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_RETRY;
+
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
@@ -3876,6 +3897,7 @@ VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_RETRY;
+
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_RELEASE, 0, 0),
@@ -3886,6 +3908,7 @@ VCHIQ_STATUS_T vchiq_send_remote_release(VCHIQ_STATE_T *state)
VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_RETRY;
+
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index f07cd4448ddf..9367a9a5aa3c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -81,9 +81,7 @@ static struct vchiq_debugfs_log_entry vchiq_debugfs_log_entries[] = {
{ "susp", &vchiq_susp_log_level },
{ "arm", &vchiq_arm_log_level },
};
-static int n_log_entries =
- sizeof(vchiq_debugfs_log_entries)/sizeof(vchiq_debugfs_log_entries[0]);
-
+static int n_log_entries = ARRAY_SIZE(vchiq_debugfs_log_entries);
static struct dentry *vchiq_clients_top(void);
static struct dentry *vchiq_debugfs_top(void);
@@ -167,6 +165,7 @@ static int vchiq_debugfs_create_log_entries(struct dentry *top)
struct dentry *dir;
size_t i;
int ret = 0;
+
dir = debugfs_create_dir("log", vchiq_debugfs_top());
if (!dir)
return -ENOMEM;
@@ -174,6 +173,7 @@ static int vchiq_debugfs_create_log_entries(struct dentry *top)
for (i = 0; i < n_log_entries; i++) {
void *levp = (void *)vchiq_debugfs_log_entries[i].plevel;
+
dir = debugfs_create_file(vchiq_debugfs_log_entries[i].name,
0644,
debugfs_info.log_categories,
@@ -312,6 +312,7 @@ fail_top:
void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
{
VCHIQ_DEBUGFS_NODE_T *node = vchiq_instance_get_debugfs_node(instance);
+
debugfs_remove_recursive(node->dentry);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 377e8e48bb54..0e270852900d 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -88,10 +88,10 @@ typedef struct vchiq_header_struct {
char data[0]; /* message */
} VCHIQ_HEADER_T;
-typedef struct {
+struct vchiq_element {
const void *data;
unsigned int size;
-} VCHIQ_ELEMENT_T;
+};
typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 6137ae9de1c1..9f859953f45c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -50,7 +50,7 @@ typedef struct {
typedef struct {
unsigned int handle;
unsigned int count;
- const VCHIQ_ELEMENT_T *elements;
+ const struct vchiq_element *elements;
} VCHIQ_QUEUE_MESSAGE_T;
typedef struct {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 4317c06943a6..34f746db19cd 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -147,9 +147,11 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
if (status == VCHIQ_SUCCESS) {
struct list_head *pos, *next;
+
list_for_each_safe(pos, next,
&instance->bulk_waiter_list) {
struct bulk_waiter_node *waiter;
+
waiter = list_entry(pos,
struct bulk_waiter_node,
list);
@@ -406,6 +408,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
if (waiter) {
VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+
if (bulk) {
/* This thread has an outstanding bulk transfer. */
if ((bulk->data != data) ||
@@ -435,6 +438,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
!waiter->bulk_waiter.bulk) {
VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
+
if (bulk) {
/* Cancel the signal when the transfer
** completes. */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
index dd43458f306f..c233b866725b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -41,22 +41,10 @@
/* ---- Constants and Types ---------------------------------------------- */
-typedef struct {
- void *arm_shared_mem_virt;
- dma_addr_t arm_shared_mem_phys;
- size_t arm_shared_mem_size;
-
- void *vc_shared_mem_virt;
- dma_addr_t vc_shared_mem_phys;
- size_t vc_shared_mem_size;
-} VCHIQ_SHARED_MEM_INFO_T;
-
/* ---- Variable Externs ------------------------------------------------- */
/* ---- Function Prototypes ---------------------------------------------- */
-void vchiq_get_shared_mem_info(VCHIQ_SHARED_MEM_INFO_T *info);
-
VCHIQ_STATUS_T vchiq_memdrv_initialise(void);
VCHIQ_STATUS_T vchiq_userdrv_create_instance(
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 12c304ceb952..926c247fd9d3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -34,9 +34,6 @@
#ifndef VCHIQ_PAGELIST_H
#define VCHIQ_PAGELIST_H
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
#define CACHE_LINE_SIZE 32
#define PAGELIST_WRITE 0
#define PAGELIST_READ 1
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 48984abc3854..8af95fc361ed 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -556,6 +556,7 @@ EXPORT_SYMBOL(vchi_connect);
int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
{
VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+
return vchiq_status_to_vchi(vchiq_shutdown(instance));
}
EXPORT_SYMBOL(vchi_disconnect);
@@ -733,6 +734,7 @@ int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+
if (service) {
VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
if (status == VCHIQ_SUCCESS) {
@@ -750,8 +752,10 @@ int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+
if (service) {
VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+
if (status == VCHIQ_SUCCESS) {
service_free(service);
service = NULL;
@@ -770,6 +774,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
int32_t ret = -1;
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
VCHIQ_SERVICE_OPTION_T vchiq_option;
+
switch (option) {
case VCHI_SERVICE_OPTION_TRACE:
vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
@@ -797,6 +802,7 @@ int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_ve
{
int32_t ret = -1;
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+
if (service)
{
VCHIQ_STATUS_T status;
@@ -808,54 +814,6 @@ int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_ve
}
EXPORT_SYMBOL(vchi_get_peer_version);
-/* ----------------------------------------------------------------------
- * read a uint32_t from buffer.
- * network format is defined to be little endian
- * -------------------------------------------------------------------- */
-uint32_t
-vchi_readbuf_uint32(const void *_ptr)
-{
- const unsigned char *ptr = _ptr;
- return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
-}
-
-/* ----------------------------------------------------------------------
- * write a uint32_t to buffer.
- * network format is defined to be little endian
- * -------------------------------------------------------------------- */
-void
-vchi_writebuf_uint32(void *_ptr, uint32_t value)
-{
- unsigned char *ptr = _ptr;
- ptr[0] = (unsigned char)((value >> 0) & 0xFF);
- ptr[1] = (unsigned char)((value >> 8) & 0xFF);
- ptr[2] = (unsigned char)((value >> 16) & 0xFF);
- ptr[3] = (unsigned char)((value >> 24) & 0xFF);
-}
-
-/* ----------------------------------------------------------------------
- * read a uint16_t from buffer.
- * network format is defined to be little endian
- * -------------------------------------------------------------------- */
-uint16_t
-vchi_readbuf_uint16(const void *_ptr)
-{
- const unsigned char *ptr = _ptr;
- return ptr[0] | (ptr[1] << 8);
-}
-
-/* ----------------------------------------------------------------------
- * write a uint16_t into the buffer.
- * network format is defined to be little endian
- * -------------------------------------------------------------------- */
-void
-vchi_writebuf_uint16(void *_ptr, uint16_t value)
-{
- unsigned char *ptr = _ptr;
- ptr[0] = (value >> 0) & 0xFF;
- ptr[1] = (value >> 8) & 0xFF;
-}
-
/***********************************************************
* Name: vchi_service_use
*
@@ -869,6 +827,7 @@ vchi_writebuf_uint16(void *_ptr, uint16_t value)
int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
+
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
if (service)
ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
@@ -889,6 +848,7 @@ EXPORT_SYMBOL(vchi_service_use);
int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
+
SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
if (service)
ret = vchiq_status_to_vchi(
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index e0ba0ed704fd..7fa0310e7b9e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -52,7 +52,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
sema_init(&queue->push, 0);
queue->storage = kzalloc(size * sizeof(VCHIQ_HEADER_T *), GFP_KERNEL);
- if (queue->storage == NULL) {
+ if (!queue->storage) {
vchiu_queue_delete(queue);
return 0;
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
index e63964f5a18a..5a1540d349d3 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h
@@ -44,7 +44,6 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/string.h>
-#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/random.h>
#include <linux/sched/signal.h>
@@ -52,7 +51,6 @@
#include <linux/uaccess.h>
#include <linux/time.h> /* for time_t */
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include "vchiq_if.h"
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c
index b6bfa21155e4..994b81798134 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_version.c
@@ -33,27 +33,27 @@
#include "vchiq_build_info.h"
#include <linux/broadcom/vc_debug_sym.h>
-VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_hostname, "dc4-arm-01" );
-VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)" );
-VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_time, __TIME__ );
-VC_DEBUG_DECLARE_STRING_VAR( vchiq_build_date, __DATE__ );
+VC_DEBUG_DECLARE_STRING_VAR(vchiq_build_hostname, "dc4-arm-01");
+VC_DEBUG_DECLARE_STRING_VAR(vchiq_build_version, "9245b4c35b99b3870e1f7dc598c5692b3c66a6f0 (tainted)");
+VC_DEBUG_DECLARE_STRING_VAR(vchiq_build_time, __TIME__);
+VC_DEBUG_DECLARE_STRING_VAR(vchiq_build_date, __DATE__);
-const char *vchiq_get_build_hostname( void )
+const char *vchiq_get_build_hostname(void)
{
return vchiq_build_hostname;
}
-const char *vchiq_get_build_version( void )
+const char *vchiq_get_build_version(void)
{
return vchiq_build_version;
}
-const char *vchiq_get_build_date( void )
+const char *vchiq_get_build_date(void)
{
return vchiq_build_date;
}
-const char *vchiq_get_build_time( void )
+const char *vchiq_get_build_time(void)
{
return vchiq_build_time;
}
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 69e9a7705afb..a3d4610fbdbe 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -17,7 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -118,7 +118,7 @@ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
struct vme_user_vma_priv {
unsigned int minor;
- atomic_t refcnt;
+ refcount_t refcnt;
};
static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
@@ -430,7 +430,7 @@ static void vme_user_vm_open(struct vm_area_struct *vma)
{
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
- atomic_inc(&vma_priv->refcnt);
+ refcount_inc(&vma_priv->refcnt);
}
static void vme_user_vm_close(struct vm_area_struct *vma)
@@ -438,7 +438,7 @@ static void vme_user_vm_close(struct vm_area_struct *vma)
struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
unsigned int minor = vma_priv->minor;
- if (!atomic_dec_and_test(&vma_priv->refcnt))
+ if (!refcount_dec_and_test(&vma_priv->refcnt))
return;
mutex_lock(&image[minor].mutex);
@@ -473,7 +473,7 @@ static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
}
vma_priv->minor = minor;
- atomic_set(&vma_priv->refcnt, 1);
+ refcount_set(&vma_priv->refcnt, 1);
vma->vm_ops = &vme_user_vm_ops;
vma->vm_private_data = vma_priv;
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index c351e03f6ad2..feaf222574ee 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -63,16 +63,16 @@ BBuGetFrameTime(
unsigned short wRate
);
-void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
+void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
-bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr,
+bool BBbReadEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr,
+bool BBbWriteEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
unsigned char byData);
-void BBvSetShortSlotTime(struct vnt_private *);
-void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
+void BBvSetShortSlotTime(struct vnt_private *priv);
+void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData);
/* VT3253 Baseband */
bool BBbVT3253Init(struct vnt_private *priv);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index e0c92818ed70..5463cf869d1b 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -524,22 +524,22 @@ CARDvSafeResetTx(
struct vnt_tx_desc *pCurrTD;
/* initialize TD index */
- priv->apTailTD[0] = &(priv->apTD0Rings[0]);
- priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
+ priv->apTailTD[0] = &priv->apTD0Rings[0];
+ priv->apCurrTD[0] = &priv->apTD0Rings[0];
- priv->apTailTD[1] = &(priv->apTD1Rings[0]);
- priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
+ priv->apTailTD[1] = &priv->apTD1Rings[0];
+ priv->apCurrTD[1] = &priv->apTD1Rings[0];
for (uu = 0; uu < TYPE_MAXTD; uu++)
priv->iTDUsed[uu] = 0;
for (uu = 0; uu < priv->opts.tx_descs[0]; uu++) {
- pCurrTD = &(priv->apTD0Rings[uu]);
+ pCurrTD = &priv->apTD0Rings[uu];
pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
for (uu = 0; uu < priv->opts.tx_descs[1]; uu++) {
- pCurrTD = &(priv->apTD1Rings[uu]);
+ pCurrTD = &priv->apTD1Rings[uu];
pCurrTD->td0.owner = OWNED_BY_HOST;
/* init all Tx Packet pointer to NULL */
}
@@ -575,12 +575,12 @@ CARDvSafeResetRx(
struct vnt_rx_desc *pDesc;
/* initialize RD index */
- priv->pCurrRD[0] = &(priv->aRD0Ring[0]);
- priv->pCurrRD[1] = &(priv->aRD1Ring[0]);
+ priv->pCurrRD[0] = &priv->aRD0Ring[0];
+ priv->pCurrRD[1] = &priv->aRD1Ring[0];
/* init state, all RD is chip's */
for (uu = 0; uu < priv->opts.rx_descs0; uu++) {
- pDesc = &(priv->aRD0Ring[uu]);
+ pDesc = &priv->aRD0Ring[uu];
pDesc->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
pDesc->rd0.owner = OWNED_BY_NIC;
pDesc->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
@@ -588,7 +588,7 @@ CARDvSafeResetRx(
/* init state, all RD is chip's */
for (uu = 0; uu < priv->opts.rx_descs1; uu++) {
- pDesc = &(priv->aRD1Ring[uu]);
+ pDesc = &priv->aRD1Ring[uu];
pDesc->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
pDesc->rd0.owner = OWNED_BY_NIC;
pDesc->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
@@ -911,16 +911,13 @@ bool CARDbSoftwareReset(struct vnt_private *priv)
*/
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
{
- u64 qwTSFOffset = 0;
unsigned short wRxBcnTSFOffst;
wRxBcnTSFOffst = cwRXBCNTSFOff[byRxRate%MAX_RATE];
qwTSF2 += (u64)wRxBcnTSFOffst;
- qwTSFOffset = qwTSF1 - qwTSF2;
-
- return qwTSFOffset;
+ return qwTSF1 - qwTSF2;
}
/*
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index b6e853784a26..37600093cab2 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -30,7 +30,7 @@
/*--------------------- Export Definitions -------------------------*/
/*
* Baseband RF pair definition in eeprom (Bits 6..0)
-*/
+ */
#define RF_RFMD2959 0x01
#define RF_MAXIMAG 0x02
#define RF_AIROHA 0x03
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 89de67115826..095258923ebd 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -187,10 +187,10 @@ struct vnt_tx_short_buf_head {
__le16 time_stamp_off;
} __packed;
-int vnt_generate_fifo_header(struct vnt_private *, u32,
- struct vnt_tx_desc *head_td, struct sk_buff *);
-int vnt_beacon_make(struct vnt_private *, struct ieee80211_vif *);
-int vnt_beacon_enable(struct vnt_private *, struct ieee80211_vif *,
- struct ieee80211_bss_conf *);
+int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
+ struct vnt_tx_desc *head_td, struct sk_buff *skb);
+int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif);
+int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 9e074e9daf4e..028f54b453d0 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -407,16 +407,6 @@ static void vnt_free_rx_bufs(struct vnt_private *priv)
}
}
-static void usb_device_reset(struct vnt_private *priv)
-{
- int status;
-
- status = usb_reset_device(priv->usb);
- if (status)
- dev_warn(&priv->usb->dev,
- "usb_device_reset fail status=%d\n", status);
-}
-
static void vnt_free_int_bufs(struct vnt_private *priv)
{
kfree(priv->int_buf.data_buf);
@@ -995,7 +985,10 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
SET_IEEE80211_DEV(priv->hw, &intf->dev);
- usb_device_reset(priv);
+ rc = usb_reset_device(priv->usb);
+ if (rc)
+ dev_warn(&priv->usb->dev,
+ "%s reset fail status=%d\n", __func__, rc);
clear_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags);
vnt_reset_command_timer(priv);
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 068c1c89f653..23581afb4211 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -771,7 +771,7 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
ret &= vnt_rf_write_embedded(priv, 0x015C0800);
} else {
dev_dbg(&priv->usb->dev,
- "@@@@ vnt_rf_set_txpower> 11G mode\n");
+ "@@@@ %s> 11G mode\n", __func__);
power_setting = ((0x3f - power) << 20) | (0x7 << 8);
@@ -876,7 +876,7 @@ void vnt_rf_table_download(struct vnt_private *priv)
memcpy(array, addr1, length1);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_RF_INIT, length1, array);
+ MESSAGE_REQUEST_RF_INIT, length1, array);
/* Channel Table 0 */
value = 0;
@@ -889,7 +889,7 @@ void vnt_rf_table_download(struct vnt_private *priv)
memcpy(array, addr2, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE,
- value, MESSAGE_REQUEST_RF_CH0, length, array);
+ value, MESSAGE_REQUEST_RF_CH0, length, array);
length2 -= length;
value += length;
@@ -907,7 +907,7 @@ void vnt_rf_table_download(struct vnt_private *priv)
memcpy(array, addr3, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE,
- value, MESSAGE_REQUEST_RF_CH1, length, array);
+ value, MESSAGE_REQUEST_RF_CH1, length, array);
length3 -= length;
value += length;
@@ -924,7 +924,7 @@ void vnt_rf_table_download(struct vnt_private *priv)
/* Init Table 2 */
vnt_control_out(priv, MESSAGE_TYPE_WRITE,
- 0, MESSAGE_REQUEST_RF_INIT2, length1, array);
+ 0, MESSAGE_REQUEST_RF_INIT2, length1, array);
/* Channel Table 0 */
value = 0;
@@ -937,7 +937,8 @@ void vnt_rf_table_download(struct vnt_private *priv)
memcpy(array, addr2, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE,
- value, MESSAGE_REQUEST_RF_CH2, length, array);
+ value, MESSAGE_REQUEST_RF_CH2,
+ length, array);
length2 -= length;
value += length;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 1835cd13ef49..63413492e61d 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -114,7 +114,7 @@ static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
}
static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
+ u32 frame_length, u16 rate, int need_ack)
{
u32 data_time, ack_time;
@@ -135,14 +135,14 @@ static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
}
static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
+ u32 frame_length, u16 rate, int need_ack)
{
return cpu_to_le16((u16)vnt_get_rsvtime(priv, pkt_type,
frame_length, rate, need_ack));
}
static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv,
- u8 rsv_type, u8 pkt_type, u32 frame_length, u16 current_rate)
+ u8 rsv_type, u8 pkt_type, u32 frame_length, u16 current_rate)
{
u32 rrv_time, rts_time, cts_time, ack_time, data_time;
@@ -160,19 +160,19 @@ static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv,
rts_time = vnt_get_frame_time(priv->preamble_type,
pkt_type, 20, priv->top_cck_basic_rate);
cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
+ 14, priv->top_cck_basic_rate);
ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
+ 14, priv->top_ofdm_basic_rate);
} else if (rsv_type == 2) {
rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_ofdm_basic_rate);
+ 20, priv->top_ofdm_basic_rate);
cts_time = ack_time = vnt_get_frame_time(priv->preamble_type,
pkt_type, 14, priv->top_ofdm_basic_rate);
} else if (rsv_type == 3) {
cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
+ 14, priv->top_cck_basic_rate);
ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
+ 14, priv->top_ofdm_basic_rate);
rrv_time = cts_time + ack_time + data_time + 2 * priv->sifs;
@@ -227,7 +227,7 @@ static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
case RTSDUR_AA_F0:
case RTSDUR_AA_F1:
cts_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14, priv->top_ofdm_basic_rate);
+ pkt_type, 14, priv->top_ofdm_basic_rate);
dur_time = cts_time + 2 * priv->sifs +
vnt_get_rsvtime(priv, pkt_type,
frame_length, rate, need_ack);
@@ -410,7 +410,7 @@ static u16 vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
u16 current_rate = tx_context->tx_rate;
vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate,
- PK_TYPE_11B, &buf->b);
+ PK_TYPE_11B, &buf->b);
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->a);
@@ -437,7 +437,7 @@ static u16 vnt_rxtx_rts_g_fb_head(struct vnt_usb_send_context *tx_context,
u16 rts_frame_len = 20;
vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate,
- PK_TYPE_11B, &buf->b);
+ PK_TYPE_11B, &buf->b);
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->a);
@@ -683,9 +683,9 @@ static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
}
static u16 vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_buffer *tx_buffer,
- struct vnt_mic_hdr **mic_hdr, u32 need_mic,
- bool need_rts)
+ struct vnt_tx_buffer *tx_buffer,
+ struct vnt_mic_hdr **mic_hdr, u32 need_mic,
+ bool need_rts)
{
if (tx_context->pkt_type == PK_TYPE_11GB ||
@@ -1024,7 +1024,7 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, frame_size, current_rate,
- PK_TYPE_11A, &short_head->ab);
+ PK_TYPE_11A, &short_head->ab);
/* Get Duration and TimeStampOff */
short_head->duration = vnt_get_duration_le(priv,
@@ -1101,7 +1101,7 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
}
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf)
+ struct ieee80211_bss_conf *conf)
{
vnt_mac_reg_bits_off(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 1ae6a64c7fd4..dc11a05be8c4 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -47,15 +47,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
int status = 0;
+ u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
+ usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
+ if (!usb_buffer) {
+ mutex_unlock(&priv->usb_lock);
+ return -ENOMEM;
+ }
+
status = usb_control_msg(priv->usb,
- usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
- index, buffer, length, USB_CTL_WAIT);
+ usb_sndctrlpipe(priv->usb, 0),
+ request, 0x40, value,
+ index, usb_buffer, length, USB_CTL_WAIT);
+
+ kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);
@@ -75,15 +85,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value,
u16 index, u16 length, u8 *buffer)
{
int status;
+ u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
+ usb_buffer = kmalloc(length, GFP_KERNEL);
+ if (!usb_buffer) {
+ mutex_unlock(&priv->usb_lock);
+ return -ENOMEM;
+ }
+
status = usb_control_msg(priv->usb,
- usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
- index, buffer, length, USB_CTL_WAIT);
+ usb_rcvctrlpipe(priv->usb, 0),
+ request, 0xc0, value,
+ index, usb_buffer, length, USB_CTL_WAIT);
+
+ if (status == length)
+ memcpy(buffer, usb_buffer, length);
+
+ kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 9f6cc2ef08dd..b2fc17f1381b 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -45,7 +45,6 @@ static void vnt_cmd_timer_wait(struct vnt_private *priv, unsigned long msecs)
static int vnt_cmd_complete(struct vnt_private *priv)
{
-
priv->command_state = WLAN_CMD_IDLE;
if (priv->free_cmd_queue == CMD_Q_SIZE) {
/* Command Queue Empty */
@@ -165,7 +164,6 @@ void vnt_run_command(struct work_struct *work)
int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
{
-
if (priv->free_cmd_queue == 0)
return false;
@@ -178,7 +176,6 @@ int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
vnt_cmd_complete(priv);
return true;
-
}
void vnt_reset_command_timer(struct vnt_private *priv)
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index cff16984167b..5256f40524bf 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -70,10 +70,10 @@ enum connect_status {
CONNECT_STS_FORCE_16_BIT = 0xFFFF
};
-struct tstrRSSI {
- u8 u8Full;
- u8 u8Index;
- s8 as8RSSI[NUM_RSSI];
+struct rssi_history_buffer {
+ bool full;
+ u8 index;
+ s8 samples[NUM_RSSI];
};
struct network_info {
@@ -93,7 +93,7 @@ struct network_info {
u8 *ies;
u16 ies_len;
void *join_params;
- struct tstrRSSI str_rssi;
+ struct rssi_history_buffer rssi_history;
u64 tsf_hi;
};
@@ -124,10 +124,7 @@ s32 wilc_parse_network_info(u8 *msg_buffer,
struct network_info **ret_network_info);
s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
struct connect_resp_info **ret_connect_resp_info);
-void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length);
-void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length);
-void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length);
+void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
+void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
+void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
#endif
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index c307ccef790e..c3a8af081880 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -287,7 +287,6 @@ static int wilc_enqueue_cmd(struct host_if_msg *msg)
return 0;
}
-
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
* As a result, the returned index will be 1 to NUM_CONCURRENT_IFC.
@@ -385,7 +384,7 @@ static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
wid.id = (u16)WID_IP_ADDRESS;
wid.type = WID_STR;
- wid.val = (u8 *)ip_addr;
+ wid.val = ip_addr;
wid.size = IP_ALEN;
ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
@@ -1350,19 +1349,17 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
if (u32RcvdAssocRespInfoLen != 0) {
s32Err = wilc_parse_assoc_resp_info(rcv_assoc_resp, u32RcvdAssocRespInfoLen,
- &pstrConnectRespInfo);
+ &pstrConnectRespInfo);
if (s32Err) {
netdev_err(vif->ndev, "wilc_parse_assoc_resp_info() returned error %d\n", s32Err);
} else {
strConnectInfo.status = pstrConnectRespInfo->status;
- if (strConnectInfo.status == SUCCESSFUL_STATUSCODE) {
- if (pstrConnectRespInfo->ies) {
- strConnectInfo.resp_ies_len = pstrConnectRespInfo->ies_len;
- strConnectInfo.resp_ies = kmalloc(pstrConnectRespInfo->ies_len, GFP_KERNEL);
- memcpy(strConnectInfo.resp_ies, pstrConnectRespInfo->ies,
- pstrConnectRespInfo->ies_len);
- }
+ if (strConnectInfo.status == SUCCESSFUL_STATUSCODE && pstrConnectRespInfo->ies) {
+ strConnectInfo.resp_ies_len = pstrConnectRespInfo->ies_len;
+ strConnectInfo.resp_ies = kmalloc(pstrConnectRespInfo->ies_len, GFP_KERNEL);
+ memcpy(strConnectInfo.resp_ies, pstrConnectRespInfo->ies,
+ pstrConnectRespInfo->ies_len);
}
if (pstrConnectRespInfo) {
@@ -1928,6 +1925,8 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wid.type = WID_STR;
wid.size = ETH_ALEN;
wid.val = kmalloc(wid.size, GFP_KERNEL);
+ if (!wid.val)
+ return -ENOMEM;
stamac = wid.val;
ether_addr_copy(stamac, strHostIfStaInactiveT->mac);
@@ -3348,10 +3347,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
- if (result < 0) {
- netdev_err(vif->ndev, "Failed to creat MQ\n");
- goto _fail_;
- }
hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!hif_workqueue) {
netdev_err(vif->ndev, "Failed to create workqueue\n");
@@ -3444,8 +3439,7 @@ int wilc_deinit(struct wilc_vif *vif)
return result;
}
-void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length)
+void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
s32 result = 0;
struct host_if_msg msg;
@@ -3453,7 +3447,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
struct host_if_drv *hif_drv = NULL;
struct wilc_vif *vif;
- id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
+ id = ((buffer[length - 4]) | (buffer[length - 3] << 8) | (buffer[length - 2] << 16) | (buffer[length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
return;
@@ -3469,17 +3463,16 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.id = HOST_IF_MSG_RCVD_NTWRK_INFO;
msg.vif = vif;
- msg.body.net_info.len = u32Length;
- msg.body.net_info.buffer = kmalloc(u32Length, GFP_KERNEL);
- memcpy(msg.body.net_info.buffer, pu8Buffer, u32Length);
+ msg.body.net_info.len = length;
+ msg.body.net_info.buffer = kmalloc(length, GFP_KERNEL);
+ memcpy(msg.body.net_info.buffer, buffer, length);
result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "message parameters (%d)\n", result);
}
-void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length)
+void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
s32 result = 0;
struct host_if_msg msg;
@@ -3489,7 +3482,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
mutex_lock(&hif_deinit_lock);
- id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
+ id = ((buffer[length - 4]) | (buffer[length - 3] << 8) | (buffer[length - 2] << 16) | (buffer[length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif) {
mutex_unlock(&hif_deinit_lock);
@@ -3514,9 +3507,9 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.id = HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO;
msg.vif = vif;
- msg.body.async_info.len = u32Length;
- msg.body.async_info.buffer = kmalloc(u32Length, GFP_KERNEL);
- memcpy(msg.body.async_info.buffer, pu8Buffer, u32Length);
+ msg.body.async_info.len = length;
+ msg.body.async_info.buffer = kmalloc(length, GFP_KERNEL);
+ memcpy(msg.body.async_info.buffer, buffer, length);
result = wilc_enqueue_cmd(&msg);
if (result)
@@ -3525,8 +3518,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
mutex_unlock(&hif_deinit_lock);
}
-void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
- u32 u32Length)
+void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
s32 result = 0;
struct host_if_msg msg;
@@ -3534,7 +3526,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
struct host_if_drv *hif_drv = NULL;
struct wilc_vif *vif;
- id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
+ id = ((buffer[length - 4]) | (buffer[length - 3] << 8) | (buffer[length - 2] << 16) | (buffer[length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
return;
@@ -3892,7 +3884,6 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
pNewJoinBssParam->supp_rates[i + 1] = pu8IEs[index + i];
index += suppRatesNo;
- continue;
} else if (pu8IEs[index] == EXT_SUPP_RATES_IE) {
extSuppRatesNo = pu8IEs[index + 1];
if (extSuppRatesNo > (MAX_RATES_SUPPORTED - suppRatesNo))
@@ -3904,11 +3895,9 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
pNewJoinBssParam->supp_rates[suppRatesNo + i + 1] = pu8IEs[index + i];
index += extSuppRatesNo;
- continue;
} else if (pu8IEs[index] == HT_CAPABILITY_IE) {
pNewJoinBssParam->ht_capable = true;
index += pu8IEs[index + 1] + 2;
- continue;
} else if ((pu8IEs[index] == WMM_IE) &&
(pu8IEs[index + 2] == 0x00) && (pu8IEs[index + 3] == 0x50) &&
(pu8IEs[index + 4] == 0xF2) &&
@@ -3920,7 +3909,6 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
if (pu8IEs[index + 8] & BIT(7))
pNewJoinBssParam->uapsd_cap = true;
index += pu8IEs[index + 1] + 2;
- continue;
} else if ((pu8IEs[index] == P2P_IE) &&
(pu8IEs[index + 2] == 0x50) && (pu8IEs[index + 3] == 0x6f) &&
(pu8IEs[index + 4] == 0x9a) &&
@@ -3950,8 +3938,6 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
memcpy(pNewJoinBssParam->start_time, pu8IEs + u16P2P_count, 4);
index += pu8IEs[index + 1] + 2;
- continue;
-
} else if ((pu8IEs[index] == RSN_IE) ||
((pu8IEs[index] == WPA_IE) && (pu8IEs[index + 2] == 0x00) &&
(pu8IEs[index + 3] == 0x50) && (pu8IEs[index + 4] == 0xF2) &&
@@ -3997,7 +3983,6 @@ static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
}
pNewJoinBssParam->rsn_found = true;
index += pu8IEs[index + 1] + 2;
- continue;
} else {
index += pu8IEs[index + 1] + 2;
}
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index f328d75de0d1..c9782d452b07 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -197,6 +197,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
+ if (!skb2)
+ return -ENOMEM;
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 2eebc6215cac..d6d803416be2 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1074,7 +1074,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
u8 *buff = NULL;
s8 rssi;
- u32 size = 0, length = 0;
+ u32 size = 0;
struct wilc_vif *vif;
s32 ret = 0;
struct wilc *wilc;
@@ -1098,7 +1098,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
if (IS_ERR(buff))
return PTR_ERR(buff);
- if (strncasecmp(buff, "RSSI", length) == 0) {
+ if (strncasecmp(buff, "RSSI", size) == 0) {
ret = wilc_get_rssi(vif, &rssi);
netdev_info(ndev, "RSSI :%d\n", rssi);
@@ -1251,11 +1251,12 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
else
strcpy(ndev->name, "p2p%d");
- vif->idx = wl->vif_num;
vif->wilc = *wilc;
vif->ndev = ndev;
wl->vif[i] = vif;
wl->vif_num = i;
+ vif->idx = wl->vif_num;
+
ndev->netdev_ops = &wilc_netdev_ops;
{
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index cd6b8badc5f3..0189e3edbbbe 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -1,11 +1,8 @@
-/* ////////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_sdio.c */
-/* */
-/* */
-/* //////////////////////////////////////////////////////////////////////////// */
+/*
+ * Copyright (c) Atmel Corporation. All rights reserved.
+ *
+ * Module Name: wilc_sdio.c
+ */
#include <linux/string.h>
#include "wilc_wlan_if.h"
@@ -77,7 +74,7 @@ static int wilc_sdio_cmd52(struct wilc *wilc, struct sdio_cmd52 *cmd)
sdio_release_host(func);
if (ret)
- dev_err(&func->dev, "wilc_sdio_cmd52..failed, err(%d)\n", ret);
+ dev_err(&func->dev, "%s..failed, err(%d)\n", __func__, ret);
return ret;
}
@@ -106,7 +103,7 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
sdio_release_host(func);
if (ret)
- dev_err(&func->dev, "wilc_sdio_cmd53..failed, err(%d)\n", ret);
+ dev_err(&func->dev, "%s..failed, err(%d)\n", __func__, ret);
return ret;
}
@@ -246,15 +243,11 @@ static void wilc_sdio_disable_interrupt(struct wilc *dev)
struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev);
int ret;
- dev_dbg(&func->dev, "wilc_sdio_disable_interrupt IN\n");
-
sdio_claim_host(func);
ret = sdio_release_irq(func);
if (ret < 0)
dev_err(&func->dev, "can't release sdio_irq, err(%d)\n", ret);
sdio_release_host(func);
-
- dev_info(&func->dev, "wilc_sdio_disable_interrupt OUT\n");
}
/********************************************
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 55d53c3a95df..5ef84410e0f2 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -1,11 +1,9 @@
-/* ////////////////////////////////////////////////////////////////////////// */
-/* */
-/* Copyright (c) Atmel Corporation. All rights reserved. */
-/* */
-/* Module Name: wilc_spi.c */
-/* */
-/* */
-/* //////////////////////////////////////////////////////////////////////////// */
+/*
+ * Copyright (c) Atmel Corporation. All rights reserved.
+ *
+ * Module Name: wilc_spi.c
+ */
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -410,7 +408,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
if (len2 > ARRAY_SIZE(wb)) {
dev_err(&spi->dev, "spi buffer size too small (%d) (%zu)\n",
- len2, ARRAY_SIZE(wb));
+ len2, ARRAY_SIZE(wb));
return N_FAIL;
}
/* zero spi write buffers. */
@@ -454,8 +452,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
return N_FAIL;
}
- if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ)
- || (cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
+ if ((cmd == CMD_INTERNAL_READ) || (cmd == CMD_SINGLE_READ) ||
+ (cmd == CMD_DMA_READ) || (cmd == CMD_DMA_EXT_READ)) {
int retry;
/* u16 crc1, crc2; */
u8 crc[2];
@@ -929,14 +927,16 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
{
struct spi_device *spi = to_spi_device(wilc->dev);
int ret;
+ u32 tmp;
+ u32 byte_cnt;
+ int happened, j;
+ u32 unknown_mask;
+ u32 irq_flags;
if (g_spi.has_thrpt_enh) {
ret = spi_internal_read(wilc, 0xe840 - WILC_SPI_REG_BASE,
int_status);
} else {
- u32 tmp;
- u32 byte_cnt;
-
ret = wilc_spi_read_reg(wilc, WILC_VMM_TO_HOST_SIZE,
&byte_cnt);
if (!ret) {
@@ -946,37 +946,28 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status)
}
tmp = (byte_cnt >> 2) & IRQ_DMA_WD_CNT_MASK;
- {
- int happended, j;
-
- j = 0;
- do {
- u32 irq_flags;
-
- happended = 0;
+ j = 0;
+ do {
+ happened = 0;
- wilc_spi_read_reg(wilc, 0x1a90, &irq_flags);
- tmp |= ((irq_flags >> 27) << IRG_FLAGS_OFFSET);
+ wilc_spi_read_reg(wilc, 0x1a90, &irq_flags);
+ tmp |= ((irq_flags >> 27) << IRG_FLAGS_OFFSET);
- if (g_spi.nint > 5) {
- wilc_spi_read_reg(wilc, 0x1a94,
- &irq_flags);
- tmp |= (((irq_flags >> 0) & 0x7) << (IRG_FLAGS_OFFSET + 5));
- }
+ if (g_spi.nint > 5) {
+ wilc_spi_read_reg(wilc, 0x1a94,
+ &irq_flags);
+ tmp |= (((irq_flags >> 0) & 0x7) << (IRG_FLAGS_OFFSET + 5));
+ }
- {
- u32 unkmown_mask;
+ unknown_mask = ~((1ul << g_spi.nint) - 1);
- unkmown_mask = ~((1ul << g_spi.nint) - 1);
+ if ((tmp >> IRG_FLAGS_OFFSET) & unknown_mask) {
+ dev_err(&spi->dev, "Unexpected interrupt (2): j=%d, tmp=%x, mask=%x\n", j, tmp, unknown_mask);
+ happened = 1;
+ }
- if ((tmp >> IRG_FLAGS_OFFSET) & unkmown_mask) {
- dev_err(&spi->dev, "Unexpected interrupt (2): j=%d, tmp=%x, mask=%x\n", j, tmp, unkmown_mask);
- happended = 1;
- }
- }
- j++;
- } while (happended);
- }
+ j++;
+ } while (happened);
*int_status = tmp;
}
@@ -1130,11 +1121,8 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
return 1;
}
-/********************************************
- *
- * Global spi HIF function table
- *
- ********************************************/
+
+/* Global spi HIF function table */
static const struct wilc_hif_func wilc_hif_spi = {
.hif_init = wilc_spi_init,
.hif_deinit = _wilc_spi_deinit,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 2b4536318ca6..44a12bdd6a46 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -29,8 +29,8 @@
#define P2P_INV_REQ 0x03
#define P2P_INV_RSP 0x04
#define PUBLIC_ACT_VENDORSPEC 0x09
-#define GAS_INTIAL_REQ 0x0a
-#define GAS_INTIAL_RSP 0x0b
+#define GAS_INITIAL_REQ 0x0a
+#define GAS_INITIAL_RSP 0x0b
#define INVALID_CHANNEL 0
@@ -205,11 +205,11 @@ static u32 get_rssi_avg(struct network_info *network_info)
{
u8 i;
int rssi_v = 0;
- u8 num_rssi = (network_info->str_rssi.u8Full) ?
- NUM_RSSI : (network_info->str_rssi.u8Index);
+ u8 num_rssi = (network_info->rssi_history.full) ?
+ NUM_RSSI : (network_info->rssi_history.index);
for (i = 0; i < num_rssi; i++)
- rssi_v += network_info->str_rssi.as8RSSI[i];
+ rssi_v += network_info->rssi_history.samples[i];
rssi_v /= num_rssi;
return rssi_v;
@@ -346,13 +346,13 @@ static void add_network_to_shadow(struct network_info *pstrNetworkInfo,
} else {
ap_index = ap_found;
}
- rssi_index = last_scanned_shadow[ap_index].str_rssi.u8Index;
- last_scanned_shadow[ap_index].str_rssi.as8RSSI[rssi_index++] = pstrNetworkInfo->rssi;
+ rssi_index = last_scanned_shadow[ap_index].rssi_history.index;
+ last_scanned_shadow[ap_index].rssi_history.samples[rssi_index++] = pstrNetworkInfo->rssi;
if (rssi_index == NUM_RSSI) {
rssi_index = 0;
- last_scanned_shadow[ap_index].str_rssi.u8Full = 1;
+ last_scanned_shadow[ap_index].rssi_history.full = true;
}
- last_scanned_shadow[ap_index].str_rssi.u8Index = rssi_index;
+ last_scanned_shadow[ap_index].rssi_history.index = rssi_index;
last_scanned_shadow[ap_index].rssi = pstrNetworkInfo->rssi;
last_scanned_shadow[ap_index].cap_info = pstrNetworkInfo->cap_info;
last_scanned_shadow[ap_index].ssid_len = pstrNetworkInfo->ssid_len;
@@ -765,8 +765,8 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
- || (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
+ if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) ||
+ (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
u8security = u8security | TKIP;
@@ -1301,16 +1301,16 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
- ETH_ALEN)) {
+ ETH_ALEN)) {
flag = PMKID_FOUND;
break;
}
}
if (i < WILC_MAX_NUM_PMKIDS) {
memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
- PMKID_LEN);
+ PMKID_LEN);
if (!(flag == PMKID_FOUND))
priv->pmkid_list.numpmkid++;
} else {
@@ -1334,7 +1334,7 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
- ETH_ALEN)) {
+ ETH_ALEN)) {
memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct host_if_pmkid));
break;
}
@@ -1343,11 +1343,11 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
if (i < priv->pmkid_list.numpmkid && priv->pmkid_list.numpmkid > 0) {
for (; i < (priv->pmkid_list.numpmkid - 1); i++) {
memcpy(priv->pmkid_list.pmkidlist[i].bssid,
- priv->pmkid_list.pmkidlist[i + 1].bssid,
- ETH_ALEN);
+ priv->pmkid_list.pmkidlist[i + 1].bssid,
+ ETH_ALEN);
memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
- priv->pmkid_list.pmkidlist[i].pmkid,
- PMKID_LEN);
+ priv->pmkid_list.pmkidlist[i + 1].pmkid,
+ PMKID_LEN);
}
priv->pmkid_list.numpmkid--;
} else {
@@ -1477,10 +1477,10 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
}
if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
switch (buff[ACTION_SUBTYPE_ID]) {
- case GAS_INTIAL_REQ:
+ case GAS_INITIAL_REQ:
break;
- case GAS_INTIAL_RSP:
+ case GAS_INITIAL_RSP:
break;
case PUBLIC_ACT_VENDORSPEC:
@@ -1497,8 +1497,8 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
}
}
if (p2p_local_random > p2p_recv_random) {
- if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
- || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
+ if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP ||
+ buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
if (buff[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buff[i + 2], 4))) {
WILC_WFI_CfgParseRxAction(&buff[i + 6], size - (i + 6));
@@ -1666,10 +1666,10 @@ static int mgmt_tx(struct wiphy *wiphy,
curr_channel = chan->hw_value;
}
switch (buf[ACTION_SUBTYPE_ID]) {
- case GAS_INTIAL_REQ:
+ case GAS_INITIAL_REQ:
break;
- case GAS_INTIAL_RSP:
+ case GAS_INITIAL_RSP:
break;
case PUBLIC_ACT_VENDORSPEC:
@@ -1682,8 +1682,8 @@ static int mgmt_tx(struct wiphy *wiphy,
}
}
- if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
- || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
+ if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP ||
+ buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
if (p2p_local_random > p2p_recv_random) {
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) {
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index bc5ad20af0a3..9addef1f1e12 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -287,7 +287,7 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
while (dropped > 0) {
wait_for_completion_timeout(&wilc->txq_event,
- msecs_to_jiffies(1));
+ msecs_to_jiffies(1));
dropped--;
}
@@ -810,9 +810,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
if (!is_cfg_packet) {
if (pkt_len > 0) {
wilc_frmw_to_linux(wilc,
- &buffer[offset],
- pkt_len,
- pkt_offset);
+ &buffer[offset],
+ pkt_len,
+ pkt_offset);
}
} else {
struct wilc_cfg_rsp rsp;
@@ -1226,7 +1226,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
ret_size = 0;
if (!wait_for_completion_timeout(&wilc->cfg_event,
- msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
+ msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
netdev_dbg(vif->ndev, "Set Timed Out\n");
ret_size = 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 11365efcc5d0..7a5eba9b5f47 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -274,8 +274,8 @@ struct wilc_vif;
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
-int wilc_wlan_start(struct wilc *);
-int wilc_wlan_stop(struct wilc *);
+int wilc_wlan_start(struct wilc *wilc);
+int wilc_wlan_stop(struct wilc *wilc);
int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size, wilc_tx_complete_func_t func);
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
@@ -291,7 +291,7 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
void wilc_chip_sleep_manually(struct wilc *wilc);
void wilc_enable_tcp_ack_filter(bool value);
-int wilc_wlan_get_num_conn_ifcs(struct wilc *);
+int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc);
int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 926fc16319b6..d3e5b1b302f4 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -175,8 +175,9 @@ static int wilc_wlan_cfg_set_byte(u8 *frame, u32 offset, u16 id, u8 val8)
buf[0] = (u8)id;
buf[1] = (u8)(id >> 8);
buf[2] = 1;
- buf[3] = val8;
- return 4;
+ buf[3] = 0;
+ buf[4] = val8;
+ return 5;
}
static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16)
@@ -191,10 +192,11 @@ static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16)
buf[0] = (u8)id;
buf[1] = (u8)(id >> 8);
buf[2] = 2;
- buf[3] = (u8)val16;
- buf[4] = (u8)(val16 >> 8);
+ buf[3] = 0;
+ buf[4] = (u8)val16;
+ buf[5] = (u8)(val16 >> 8);
- return 5;
+ return 6;
}
static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
@@ -209,19 +211,20 @@ static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32)
buf[0] = (u8)id;
buf[1] = (u8)(id >> 8);
buf[2] = 4;
- buf[3] = (u8)val32;
- buf[4] = (u8)(val32 >> 8);
- buf[5] = (u8)(val32 >> 16);
- buf[6] = (u8)(val32 >> 24);
+ buf[3] = 0;
+ buf[4] = (u8)val32;
+ buf[5] = (u8)(val32 >> 8);
+ buf[6] = (u8)(val32 >> 16);
+ buf[7] = (u8)(val32 >> 24);
- return 7;
+ return 8;
}
static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 size)
{
u8 *buf;
- if ((offset + size + 3) >= MAX_CFG_FRAME_SIZE)
+ if ((offset + size + 4) >= MAX_CFG_FRAME_SIZE)
return 0;
buf = &frame[offset];
@@ -229,11 +232,12 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
buf[0] = (u8)id;
buf[1] = (u8)(id >> 8);
buf[2] = (u8)size;
+ buf[3] = (u8)(size >> 8);
if ((str) && (size != 0))
- memcpy(&buf[3], str, size);
+ memcpy(&buf[4], str, size);
- return (size + 3);
+ return (size + 4);
}
static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
@@ -284,12 +288,12 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
break;
if (g_cfg_byte[i].id == wid) {
- g_cfg_byte[i].val = info[3];
+ g_cfg_byte[i].val = info[4];
break;
}
i++;
} while (1);
- len = 2;
+ len = 3;
break;
case WID_SHORT:
@@ -298,12 +302,14 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
break;
if (g_cfg_hword[i].id == wid) {
- g_cfg_hword[i].val = cpu_to_le16(info[3] | (info[4] << 8));
+ g_cfg_hword[i].val =
+ cpu_to_le16(info[4] |
+ (info[5] << 8));
break;
}
i++;
} while (1);
- len = 3;
+ len = 4;
break;
case WID_INT:
@@ -312,12 +318,16 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
break;
if (g_cfg_word[i].id == wid) {
- g_cfg_word[i].val = cpu_to_le32(info[3] | (info[4] << 8) | (info[5] << 16) | (info[6] << 24));
+ g_cfg_word[i].val =
+ cpu_to_le32(info[4] |
+ (info[5] << 8) |
+ (info[6] << 16) |
+ (info[7] << 24));
break;
}
i++;
} while (1);
- len = 5;
+ len = 6;
break;
case WID_STR:
@@ -332,12 +342,13 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
i += toggle;
toggle ^= 1;
}
- memcpy(g_cfg_str[i].str, &info[2], (info[2] + 1));
+ memcpy(g_cfg_str[i].str, &info[2],
+ (info[2] + 2));
break;
}
i++;
} while (1);
- len = 1 + info[2];
+ len = 2 + info[2];
break;
default:
@@ -475,7 +486,8 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
break;
if (g_cfg_str[i].id == wid) {
- u32 size = g_cfg_str[i].str[0];
+ u32 size = g_cfg_str[i].str[0] |
+ (g_cfg_str[i].str[1] << 8);
if (buffer_size >= size) {
if (g_cfg_str[i].id == WID_SITE_SURVEY_RESULTS) {
@@ -485,7 +497,8 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
toggle ^= 1;
}
- memcpy(buffer, &g_cfg_str[i].str[1], size);
+ memcpy(buffer, &g_cfg_str[i].str[2],
+ size);
ret = size;
}
break;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 5f1851c85f12..310e2c454590 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -482,7 +482,7 @@ struct hfa384x_tx_frame {
u8 address3[6];
u16 sequence_control;
u8 address4[6];
- u16 data_len; /* little endian format */
+ __le16 data_len; /* little endian format */
/*-- 802.3 Header Information --*/
@@ -801,41 +801,41 @@ struct hfa384x_usb_txfrm {
} __packed;
struct hfa384x_usb_cmdreq {
- u16 type;
- u16 cmd;
- u16 parm0;
- u16 parm1;
- u16 parm2;
+ __le16 type;
+ __le16 cmd;
+ __le16 parm0;
+ __le16 parm1;
+ __le16 parm2;
u8 pad[54];
} __packed;
struct hfa384x_usb_wridreq {
- u16 type;
- u16 frmlen;
- u16 rid;
+ __le16 type;
+ __le16 frmlen;
+ __le16 rid;
u8 data[HFA384x_RIDDATA_MAXLEN];
} __packed;
struct hfa384x_usb_rridreq {
- u16 type;
- u16 frmlen;
- u16 rid;
+ __le16 type;
+ __le16 frmlen;
+ __le16 rid;
u8 pad[58];
} __packed;
struct hfa384x_usb_wmemreq {
- u16 type;
- u16 frmlen;
- u16 offset;
- u16 page;
+ __le16 type;
+ __le16 frmlen;
+ __le16 offset;
+ __le16 page;
u8 data[HFA384x_USB_RWMEM_MAXLEN];
} __packed;
struct hfa384x_usb_rmemreq {
- u16 type;
- u16 frmlen;
- u16 offset;
- u16 page;
+ __le16 type;
+ __le16 frmlen;
+ __le16 offset;
+ __le16 page;
u8 pad[56];
} __packed;
@@ -854,16 +854,16 @@ struct hfa384x_usb_infofrm {
struct hfa384x_usb_statusresp {
u16 type;
- u16 status;
- u16 resp0;
- u16 resp1;
- u16 resp2;
+ __le16 status;
+ __le16 resp0;
+ __le16 resp1;
+ __le16 resp2;
} __packed;
struct hfa384x_usb_rridresp {
u16 type;
- u16 frmlen;
- u16 rid;
+ __le16 frmlen;
+ __le16 rid;
u8 data[HFA384x_RIDDATA_MAXLEN];
} __packed;
@@ -1078,8 +1078,8 @@ struct hfa384x_pdr_end_of_pda {
} __packed;
struct hfa384x_pdrec {
- u16 len; /* in words */
- u16 code;
+ __le16 len; /* in words */
+ __le16 code;
union pdr {
struct hfa384x_pdr_pcb_partnum pcb_partnum;
struct hfa384x_pdr_pcb_tracenum pcb_tracenum;
@@ -1408,7 +1408,7 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
static inline int
hfa384x_drvr_setconfig16_async(struct hfa384x *hw, u16 rid, u16 val)
{
- u16 value = cpu_to_le16(val);
+ __le16 value = cpu_to_le16(val);
return hfa384x_drvr_setconfig_async(hw, rid, &value, sizeof(value),
NULL, NULL);
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6134eba5cad4..a812e55ba1b0 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -2316,7 +2316,7 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
{
int result = 0;
- u16 *pda = buf;
+ __le16 *pda = buf;
int pdaok = 0;
int morepdrs = 1;
int currpdr = 0; /* word offset of the current pdr */
@@ -3513,7 +3513,7 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
caphdr->version = htonl(P80211CAPTURE_VERSION);
caphdr->length = htonl(sizeof(struct p80211_caphdr));
- caphdr->mactime = __cpu_to_be64(rxdesc->time) * 1000;
+ caphdr->mactime = __cpu_to_be64(rxdesc->time * 1000);
caphdr->hosttime = __cpu_to_be64(jiffies);
caphdr->phytype = htonl(4); /* dss_dot11_b */
caphdr->channel = htonl(hw->sniff_channel);
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 8b0905e7c9be..a062e80361ef 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -211,7 +211,7 @@ int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
return -ENOMEM;
foo = wep_encrypt(wlandev, skb->data, p80211_wep->data,
skb->len,
- (wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK),
+ wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK,
p80211_wep->iv, p80211_wep->icv);
if (foo) {
netdev_warn(wlandev->netdev,
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 04bac2ed0e8a..66332b1fb6d5 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -101,20 +101,20 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb);
* Frame capture header. (See doc/capturefrm.txt)
*/
struct p80211_caphdr {
- u32 version;
- u32 length;
- u64 mactime;
- u64 hosttime;
- u32 phytype;
- u32 channel;
- u32 datarate;
- u32 antenna;
- u32 priority;
- u32 ssi_type;
- s32 ssi_signal;
- s32 ssi_noise;
- u32 preamble;
- u32 encoding;
+ __be32 version;
+ __be32 length;
+ __be64 mactime;
+ __be64 hosttime;
+ __be32 phytype;
+ __be32 channel;
+ __be32 datarate;
+ __be32 antenna;
+ __be32 priority;
+ __be32 ssi_type;
+ __be32 ssi_signal;
+ __be32 ssi_noise;
+ __be32 preamble;
+ __be32 encoding;
};
/* buffer free method pointer type */
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 621df98183bf..afe847722cf7 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -198,7 +198,8 @@ static void p80211req_mibset_mibget(struct wlandevice *wlandev,
struct p80211msg_dot11req_mibget *mib_msg,
int isget)
{
- struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
+ struct p80211itemd *mibitem =
+ (struct p80211itemd *)mib_msg->mibattribute.data;
struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
u8 *key = mibitem->data + sizeof(struct p80211pstrd);
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 2e349f87e738..afd877fb4557 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -604,7 +604,7 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
*/
static int mkpdrlist(struct pda *pda)
{
- u16 *pda16 = (u16 *)pda->buf;
+ __le16 *pda16 = (__le16 *)pda->buf;
int curroff; /* in 'words' */
pda->nrec = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 0e671c3b308d..e23a0d0e7b09 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1168,7 +1168,6 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
}
} else {
result = hfa384x_drvr_disable(hw, 0);
-
}
netdev_info(wlandev->netdev, "monitor mode disabled\n");
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 984804b92e05..9c2b4ef2de58 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1311,7 +1311,7 @@ void prism2sta_processing_defer(struct work_struct *data)
/* This one indicates that the MAC has decided to and
* successfully completed a change to another AP. We
* should probably implement a reassociation indication
- * in response to this one. I'm thinking that the the
+ * in response to this one. I'm thinking that the
* p80211 layer needs to be notified in case of
* buffering/queueing issues. User mode also needs to be
* notified so that any BSS dependent elements can be
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 6930f7eb741b..b450c740f626 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -178,9 +178,9 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
*sync |= FB_SYNC_HOR_HIGH_ACT;
*vmode = FB_VMODE_NONINTERLACED;
- if (XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x0080)
+ if (XGI330_RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x0080) {
*vmode = FB_VMODE_INTERLACED;
- else {
+ } else {
j = 0;
while (XGI330_EModeIDTable[j].Ext_ModeID != 0xff) {
if (XGI330_EModeIDTable[j].Ext_ModeID ==
@@ -878,30 +878,14 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
}
if ((filter >= 0) && (filter <= 7)) {
+ const u8 *f = XGI_TV_filter[filter_tb].filter[filter];
+
pr_debug("FilterTable[%d]-%d: %*ph\n",
- filter_tb, filter,
- 4, XGI_TV_filter[filter_tb].
- filter[filter]);
- xgifb_reg_set(
- XGIPART2,
- 0x35,
- (XGI_TV_filter[filter_tb].
- filter[filter][0]));
- xgifb_reg_set(
- XGIPART2,
- 0x36,
- (XGI_TV_filter[filter_tb].
- filter[filter][1]));
- xgifb_reg_set(
- XGIPART2,
- 0x37,
- (XGI_TV_filter[filter_tb].
- filter[filter][2]));
- xgifb_reg_set(
- XGIPART2,
- 0x38,
- (XGI_TV_filter[filter_tb].
- filter[filter][3]));
+ filter_tb, filter, 4, f);
+ xgifb_reg_set(XGIPART2, 0x35, f[0]);
+ xgifb_reg_set(XGIPART2, 0x36, f[1]);
+ xgifb_reg_set(XGIPART2, 0x37, f[2]);
+ xgifb_reg_set(XGIPART2, 0x38, f[3]);
}
}
}
@@ -1480,9 +1464,9 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32);
- if ((cr32 & SIS_CRT1) && !XGIfb_crt1off)
+ if ((cr32 & SIS_CRT1) && !XGIfb_crt1off) {
XGIfb_crt1off = 0;
- else {
+ } else {
if (cr32 & 0x5F)
XGIfb_crt1off = 1;
else
@@ -1500,18 +1484,19 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
xgifb_info->display2 = XGIFB_DISP_NONE;
}
- if (XGIfb_tvplug != -1)
+ if (XGIfb_tvplug != -1) {
/* Override with option */
xgifb_info->TV_plug = XGIfb_tvplug;
- else if (cr32 & SIS_VB_HIVISION) {
+ } else if (cr32 & SIS_VB_HIVISION) {
xgifb_info->TV_type = TVMODE_HIVISION;
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- } else if (cr32 & SIS_VB_SVIDEO)
+ } else if (cr32 & SIS_VB_SVIDEO) {
xgifb_info->TV_plug = TVPLUG_SVIDEO;
- else if (cr32 & SIS_VB_COMPOSITE)
+ } else if (cr32 & SIS_VB_COMPOSITE) {
xgifb_info->TV_plug = TVPLUG_COMPOSITE;
- else if (cr32 & SIS_VB_SCART)
+ } else if (cr32 & SIS_VB_SCART) {
xgifb_info->TV_plug = TVPLUG_SCART;
+ }
if (xgifb_info->TV_type == 0) {
temp = xgifb_reg_get(XGICR, 0x38);
@@ -1659,7 +1644,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xgifb_info->mmio_base = pci_resource_start(pdev, 1);
xgifb_info->mmio_size = pci_resource_len(pdev, 1);
xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
- dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n",
+ dev_info(&pdev->dev, "Relocate IO address: %llx [%08lx]\n",
(u64)pci_resource_start(pdev, 2),
xgifb_info->vga_base);
@@ -1754,13 +1739,13 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
xgifb_info->mmio_size);
dev_info(&pdev->dev,
- "Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n",
+ "Framebuffer at 0x%llx, mapped to 0x%p, size %dk\n",
(u64)xgifb_info->video_base,
xgifb_info->video_vbase,
xgifb_info->video_size / 1024);
dev_info(&pdev->dev,
- "MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n",
+ "MMIO at 0x%llx, mapped to 0x%p, size %ldk\n",
(u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
xgifb_info->mmio_size / 1024);
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index 500cabe41a3c..e835054b87bf 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,6 +1,5 @@
#ifndef _VBINIT_
#define _VBINIT_
unsigned char XGIInitNew(struct pci_dev *pdev);
-void XGIRegInit(struct vb_device_info *, unsigned long);
+void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr);
#endif
-
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 7c7c8c8f1df3..cea128bede52 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -221,8 +221,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
for (; XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID ==
tempbx; (*i)--) {
- infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].
- Ext_InfoFlag;
+ infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].Ext_InfoFlag;
if (infoflag & tempax)
return 1;
@@ -231,8 +230,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
}
for ((*i) = 0;; (*i)++) {
- infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].
- Ext_InfoFlag;
+ infoflag = XGI330_RefIndex[RefreshRateTableIndex + (*i)].Ext_InfoFlag;
if (XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID
!= tempbx) {
return 0;
@@ -5092,8 +5090,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
i = 0;
do {
- if (XGI330_RefIndex[RefreshRateTableIndex + i].
- ModeID != ModeNo)
+ if (XGI330_RefIndex[RefreshRateTableIndex + i].ModeID != ModeNo)
break;
temp = XGI330_RefIndex[RefreshRateTableIndex + i].Ext_InfoFlag;
temp &= ModeTypeMask;
@@ -5105,8 +5102,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
} while (index != 0xFFFF);
if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
- temp = XGI330_RefIndex[RefreshRateTableIndex + i - 1].
- Ext_InfoFlag;
+ temp = XGI330_RefIndex[RefreshRateTableIndex + i - 1].Ext_InfoFlag;
if (temp & InterlaceMode)
i++;
}
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 000000000000..2330a4eb4e8b
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,18 @@
+# Generic Trusted Execution Environment Configuration
+config TEE
+ tristate "Trusted Execution Environment support"
+ select DMA_SHARED_BUFFER
+ select GENERIC_ALLOCATOR
+ help
+ This implements a generic interface towards a Trusted Execution
+ Environment (TEE).
+
+if TEE
+
+menu "TEE drivers"
+
+source "drivers/tee/optee/Kconfig"
+
+endmenu
+
+endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 000000000000..7a4e4a1ac39c
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_TEE) += tee.o
+tee-objs += tee_core.o
+tee-objs += tee_shm.o
+tee-objs += tee_shm_pool.o
+obj-$(CONFIG_OPTEE) += optee/
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
new file mode 100644
index 000000000000..0126de898036
--- /dev/null
+++ b/drivers/tee/optee/Kconfig
@@ -0,0 +1,7 @@
+# OP-TEE Trusted Execution Environment Configuration
+config OPTEE
+ tristate "OP-TEE"
+ depends on HAVE_ARM_SMCCC
+ help
+ This implements the OP-TEE Trusted Execution Environment (TEE)
+ driver.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
new file mode 100644
index 000000000000..92fe5789bcce
--- /dev/null
+++ b/drivers/tee/optee/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_OPTEE) += optee.o
+optee-objs += core.o
+optee-objs += call.o
+optee-objs += rpc.o
+optee-objs += supp.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
new file mode 100644
index 000000000000..f7b7b404c990
--- /dev/null
+++ b/drivers/tee/optee/call.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
+static void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're preparing to make a call to secure world. In case we can't
+ * allocate a thread in secure world we'll end up waiting in
+ * optee_cq_wait_for_completion().
+ *
+ * Normally if there's no contention in secure world the call will
+ * complete and we can cleanup directly with optee_cq_wait_final().
+ */
+ mutex_lock(&cq->mutex);
+
+ /*
+ * We add ourselves to the queue, but we don't wait. This
+ * guarantees that we don't lose a completion if secure world
+ * returns busy and another thread just exited and try to complete
+ * someone.
+ */
+ init_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ wait_for_completion(&w->c);
+
+ mutex_lock(&cq->mutex);
+
+ /* Move to end of list to get out of the way for other waiters */
+ list_del(&w->list_node);
+ reinit_completion(&w->c);
+ list_add_tail(&w->list_node, &cq->waiters);
+
+ mutex_unlock(&cq->mutex);
+}
+
+static void optee_cq_complete_one(struct optee_call_queue *cq)
+{
+ struct optee_call_waiter *w;
+
+ list_for_each_entry(w, &cq->waiters, list_node) {
+ if (!completion_done(&w->c)) {
+ complete(&w->c);
+ break;
+ }
+ }
+}
+
+static void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
+{
+ /*
+ * We're done with the call to secure world. The thread in secure
+ * world that was used for this call is now available for some
+ * other task to use.
+ */
+ mutex_lock(&cq->mutex);
+
+ /* Get out of the list */
+ list_del(&w->list_node);
+
+ /* Wake up one eventual waiting task */
+ optee_cq_complete_one(cq);
+
+ /*
+ * If we're completed we've got a completion from another task that
+ * was just done with its call to secure world. Since yet another
+ * thread now is available in secure world wake up another eventual
+ * waiting task.
+ */
+ if (completion_done(&w->c))
+ optee_cq_complete_one(cq);
+
+ mutex_unlock(&cq->mutex);
+}
+
+/* Requires the filpstate mutex to be held */
+static struct optee_session *find_session(struct optee_context_data *ctxdata,
+ u32 session_id)
+{
+ struct optee_session *sess;
+
+ list_for_each_entry(sess, &ctxdata->sess_list, list_node)
+ if (sess->session_id == session_id)
+ return sess;
+
+ return NULL;
+}
+
+/**
+ * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @parg: physical address of message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ u32 ret;
+
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, &param);
+ } else {
+ ret = res.a0;
+ break;
+ }
+ }
+
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return ret;
+}
+
+static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg,
+ phys_addr_t *msg_parg)
+{
+ int rc;
+ struct tee_shm *shm;
+ struct optee_msg_arg *ma;
+
+ shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
+ TEE_SHM_MAPPED);
+ if (IS_ERR(shm))
+ return shm;
+
+ ma = tee_shm_get_va(shm, 0);
+ if (IS_ERR(ma)) {
+ rc = PTR_ERR(ma);
+ goto out;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, msg_parg);
+ if (rc)
+ goto out;
+
+ memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
+ ma->num_params = num_params;
+ *msg_arg = ma;
+out:
+ if (rc) {
+ tee_shm_free(shm);
+ return ERR_PTR(rc);
+ }
+
+ return shm;
+}
+
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ int rc;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess = NULL;
+
+ /* +2 for the meta parameters added below */
+ shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ /*
+ * Initialize and add the meta parameters needed when opening a
+ * session.
+ */
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
+ OPTEE_MSG_ATTR_META;
+ memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
+ memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
+ msg_arg->params[1].u.value.c = arg->clnt_login;
+
+ rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ if (rc)
+ goto out;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (optee_do_call_with_arg(ctx, msg_parg)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (msg_arg->ret == TEEC_SUCCESS) {
+ /* A new session has been created, add it to the list. */
+ sess->session_id = msg_arg->session;
+ mutex_lock(&ctxdata->mutex);
+ list_add(&sess->list_node, &ctxdata->sess_list);
+ mutex_unlock(&ctxdata->mutex);
+ } else {
+ kfree(sess);
+ }
+
+ if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
+ arg->ret = TEEC_ERROR_COMMUNICATION;
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ /* Close session again to avoid leakage */
+ optee_close_session(ctx, msg_arg->session);
+ } else {
+ arg->session = msg_arg->session;
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+ }
+out:
+ tee_shm_free(shm);
+
+ return rc;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+
+ /* Check that the session is valid and remove it from the list */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ if (sess)
+ list_del(&sess->list_node);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+ kfree(sess);
+
+ shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee_do_call_with_arg(ctx, msg_parg);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+ int rc;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, arg->session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+ msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
+ msg_arg->func = arg->func;
+ msg_arg->session = arg->session;
+ msg_arg->cancel_id = arg->cancel_id;
+
+ rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
+ if (rc)
+ goto out;
+
+ if (optee_do_call_with_arg(ctx, msg_parg)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
+ msg_arg->ret = TEEC_ERROR_COMMUNICATION;
+ msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
+ }
+
+ arg->ret = msg_arg->ret;
+ arg->ret_origin = msg_arg->ret_origin;
+out:
+ tee_shm_free(shm);
+ return rc;
+}
+
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_shm *shm;
+ struct optee_msg_arg *msg_arg;
+ phys_addr_t msg_parg;
+ struct optee_session *sess;
+
+ /* Check that the session is valid */
+ mutex_lock(&ctxdata->mutex);
+ sess = find_session(ctxdata, session);
+ mutex_unlock(&ctxdata->mutex);
+ if (!sess)
+ return -EINVAL;
+
+ shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
+ msg_arg->session = session;
+ msg_arg->cancel_id = cancel_id;
+ optee_do_call_with_arg(ctx, msg_parg);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+ 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+void optee_disable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
+ 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
new file mode 100644
index 000000000000..58169e519422
--- /dev/null
+++ b/drivers/tee/optee/core.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+#define DRIVER_NAME "optee"
+
+#define OPTEE_SHM_NUM_PRIV_PAGES 1
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)
+ mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ break;
+ }
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ /* Check that the memref is covered by the shm object */
+ if (p->u.memref.size) {
+ size_t o = p->u.memref.shm_offs +
+ p->u.memref.size - 1;
+
+ rc = tee_shm_get_pa(shm, o, NULL);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+ const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+ phys_addr_t pa;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
+ p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ break;
+ }
+ rc = tee_shm_get_pa(p->u.memref.shm,
+ p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ *vers = v;
+}
+
+static int optee_open(struct tee_context *ctx)
+{
+ struct optee_context_data *ctxdata;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
+ if (!ctxdata)
+ return -ENOMEM;
+
+ if (teedev == optee->supp_teedev) {
+ bool busy = true;
+
+ mutex_lock(&optee->supp.ctx_mutex);
+ if (!optee->supp.ctx) {
+ busy = false;
+ optee->supp.ctx = ctx;
+ }
+ mutex_unlock(&optee->supp.ctx_mutex);
+ if (busy) {
+ kfree(ctxdata);
+ return -EBUSY;
+ }
+ }
+
+ mutex_init(&ctxdata->mutex);
+ INIT_LIST_HEAD(&ctxdata->sess_list);
+
+ ctx->data = ctxdata;
+ return 0;
+}
+
+static void optee_release(struct tee_context *ctx)
+{
+ struct optee_context_data *ctxdata = ctx->data;
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ struct optee_msg_arg *arg = NULL;
+ phys_addr_t parg;
+ struct optee_session *sess;
+ struct optee_session *sess_tmp;
+
+ if (!ctxdata)
+ return;
+
+ shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
+ if (!IS_ERR(shm)) {
+ arg = tee_shm_get_va(shm, 0);
+ /*
+ * If va2pa fails for some reason, we can't call
+ * optee_close_session(), only free the memory. Secure OS
+ * will leak sessions and finally refuse more sessions, but
+ * we will at least let normal world reclaim its memory.
+ */
+ if (!IS_ERR(arg))
+ tee_shm_va2pa(shm, arg, &parg);
+ }
+
+ list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
+ list_node) {
+ list_del(&sess->list_node);
+ if (!IS_ERR_OR_NULL(arg)) {
+ memset(arg, 0, sizeof(*arg));
+ arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ arg->session = sess->session_id;
+ optee_do_call_with_arg(ctx, parg);
+ }
+ kfree(sess);
+ }
+ kfree(ctxdata);
+
+ if (!IS_ERR(shm))
+ tee_shm_free(shm);
+
+ ctx->data = NULL;
+
+ if (teedev == optee->supp_teedev) {
+ mutex_lock(&optee->supp.ctx_mutex);
+ optee->supp.ctx = NULL;
+ mutex_unlock(&optee->supp.ctx_mutex);
+ }
+}
+
+static struct tee_driver_ops optee_ops = {
+ .get_version = optee_get_version,
+ .open = optee_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+};
+
+static struct tee_desc optee_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_ops,
+ .owner = THIS_MODULE,
+};
+
+static struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_open,
+ .release = optee_release,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+};
+
+static struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ return true;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ struct tee_shm_pool *pool;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ struct tee_shm_pool_mem_info priv_info;
+ struct tee_shm_pool_mem_info dmabuf_info;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_info("shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+ pr_err("too small shared memory area\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ priv_info.vaddr = vaddr;
+ priv_info.paddr = paddr;
+ priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+ dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
+ if (IS_ERR(pool)) {
+ memunmap(va);
+ goto out;
+ }
+
+ *memremaped_shm = va;
+out:
+ return pool;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device_node *np)
+{
+ const char *method;
+
+ pr_info("probing for conduit method from DT.\n");
+
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct optee *optee_probe(struct device_node *np)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool;
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(np);
+ if (IS_ERR(invoke_fn))
+ return (void *)invoke_fn;
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ pr_warn("capabilities mismatch\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * We have no other option for shared memory, if secure world
+ * doesn't have any reserved memory we can use we can't continue.
+ */
+ if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ return ERR_PTR(-EINVAL);
+
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+ if (IS_ERR(pool))
+ return (void *)pool;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ optee->invoke_fn = invoke_fn;
+
+ teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ optee->memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+
+ optee_enable_shm_cache(optee);
+
+ pr_info("initialized driver\n");
+ return optee;
+err:
+ if (optee) {
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ kfree(optee);
+ }
+ if (pool)
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return ERR_PTR(rc);
+}
+
+static void optee_remove(struct optee *optee)
+{
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ /*
+ * The two devices has to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ if (optee->memremaped_shm)
+ memunmap(optee->memremaped_shm);
+ optee_wait_queue_exit(&optee->wait_queue);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+
+ kfree(optee);
+}
+
+static const struct of_device_id optee_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+
+static struct optee *optee_svc;
+
+static int __init optee_driver_init(void)
+{
+ struct device_node *fw_np;
+ struct device_node *np;
+ struct optee *optee;
+
+ /* Node is supposed to be below /firmware */
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, optee_match);
+ of_node_put(fw_np);
+ if (!np)
+ return -ENODEV;
+
+ optee = optee_probe(np);
+ of_node_put(np);
+
+ if (IS_ERR(optee))
+ return PTR_ERR(optee);
+
+ optee_svc = optee;
+
+ return 0;
+}
+module_init(optee_driver_init);
+
+static void __exit optee_driver_exit(void)
+{
+ struct optee *optee = optee_svc;
+
+ optee_svc = NULL;
+ if (optee)
+ optee_remove(optee);
+}
+module_exit(optee_driver_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("OP-TEE driver");
+MODULE_SUPPORTED_DEVICE("");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
new file mode 100644
index 000000000000..dd7a06ee0462
--- /dev/null
+++ b/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _OPTEE_MSG_H
+#define _OPTEE_MSG_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/*
+ * This file defines the OP-TEE message protocol used to communicate
+ * with an instance of OP-TEE running in secure world.
+ *
+ * This file is divided into three sections.
+ * 1. Formatting of messages.
+ * 2. Requests from normal world
+ * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
+ * tee-supplicant.
+ */
+
+/*****************************************************************************
+ * Part 1 - formatting of messages
+ *****************************************************************************/
+
+#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
+#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
+#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
+#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
+#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
+#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
+#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
+
+#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
+
+/*
+ * Meta parameter to be absorbed by the Secure OS and not passed
+ * to the Trusted Application.
+ *
+ * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
+ */
+#define OPTEE_MSG_ATTR_META BIT(8)
+
+/*
+ * The temporary shared memory object is not physically contigous and this
+ * temp memref is followed by another fragment until the last temp memref
+ * that doesn't have this bit set.
+ */
+#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
+
+/*
+ * Memory attributes for caching passed with temp memrefs. The actual value
+ * used is defined outside the message protocol with the exception of
+ * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
+ * defined for the memory range should be used. If optee_smc.h is used as
+ * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
+ */
+#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
+#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
+#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
+
+/*
+ * Same values as TEE_LOGIN_* from TEE Internal API
+ */
+#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
+#define OPTEE_MSG_LOGIN_USER 0x00000001
+#define OPTEE_MSG_LOGIN_GROUP 0x00000002
+#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
+#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
+#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
+
+/**
+ * struct optee_msg_param_tmem - temporary memory reference parameter
+ * @buf_ptr: Address of the buffer
+ * @size: Size of the buffer
+ * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
+ *
+ * Secure and normal world communicates pointers as physical address
+ * instead of the virtual address. This is because secure and normal world
+ * have completely independent memory mapping. Normal world can even have a
+ * hypervisor which need to translate the guest physical address (AKA IPA
+ * in ARM documentation) to a real physical address before passing the
+ * structure to secure world.
+ */
+struct optee_msg_param_tmem {
+ u64 buf_ptr;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_rmem - registered memory reference parameter
+ * @offs: Offset into shared memory reference
+ * @size: Size of the buffer
+ * @shm_ref: Shared memory reference, pointer to a struct tee_shm
+ */
+struct optee_msg_param_rmem {
+ u64 offs;
+ u64 size;
+ u64 shm_ref;
+};
+
+/**
+ * struct optee_msg_param_value - opaque value parameter
+ *
+ * Value parameters are passed unchecked between normal and secure world.
+ */
+struct optee_msg_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+/**
+ * struct optee_msg_param - parameter used together with struct optee_msg_arg
+ * @attr: attributes
+ * @tmem: parameter by temporary memory reference
+ * @rmem: parameter by registered memory reference
+ * @value: parameter by opaque value
+ *
+ * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
+ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
+ * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
+ * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
+ */
+struct optee_msg_param {
+ u64 attr;
+ union {
+ struct optee_msg_param_tmem tmem;
+ struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_value value;
+ } u;
+};
+
+/**
+ * struct optee_msg_arg - call argument
+ * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
+ * @func: Trusted Application function, specific to the Trusted Application,
+ * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
+ * @session: In parameter for all OPTEE_MSG_CMD_* except
+ * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
+ * @cancel_id: Cancellation id, a unique value to identify this request
+ * @ret: return value
+ * @ret_origin: origin of the return value
+ * @num_params: number of parameters supplied to the OS Command
+ * @params: the parameters supplied to the OS Command
+ *
+ * All normal calls to Trusted OS uses this struct. If cmd requires further
+ * information than what these field holds it can be passed as a parameter
+ * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
+ * attrs field). All parameters tagged as meta has to come first.
+ *
+ * Temp memref parameters can be fragmented if supported by the Trusted OS
+ * (when optee_smc.h is bearer of this protocol this is indicated with
+ * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
+ * fragmented then has all but the last fragment the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
+ * it will still be presented as a single logical memref to the Trusted
+ * Application.
+ */
+struct optee_msg_arg {
+ u32 cmd;
+ u32 func;
+ u32 session;
+ u32 cancel_id;
+ u32 pad;
+ u32 ret;
+ u32 ret_origin;
+ u32 num_params;
+
+ /* num_params tells the actual number of element in params */
+ struct optee_msg_param params[0];
+};
+
+/**
+ * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
+ *
+ * @num_params: Number of parameters embedded in the struct optee_msg_arg
+ *
+ * Returns the size of the struct optee_msg_arg together with the number
+ * of embedded parameters.
+ */
+#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
+ (sizeof(struct optee_msg_arg) + \
+ sizeof(struct optee_msg_param) * (num_params))
+
+/*****************************************************************************
+ * Part 2 - requests from normal world
+ *****************************************************************************/
+
+/*
+ * Return the following UID if using API specified in this file without
+ * further extensions:
+ * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
+ * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
+ * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
+ */
+#define OPTEE_MSG_UID_0 0x384fb3e0
+#define OPTEE_MSG_UID_1 0xe7f811e3
+#define OPTEE_MSG_UID_2 0xaf630002
+#define OPTEE_MSG_UID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
+
+/*
+ * Returns 2.0 if using API specified in this file without further
+ * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
+ * and OPTEE_MSG_REVISION_MINOR
+ */
+#define OPTEE_MSG_REVISION_MAJOR 2
+#define OPTEE_MSG_REVISION_MINOR 0
+#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in 4 32-bit words in the same way as
+ * OPTEE_MSG_FUNCID_CALLS_UID described above.
+ */
+#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
+#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
+#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
+#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
+#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in 2 32-bit words in the same way as
+ * OPTEE_MSG_CALLS_REVISION described above.
+ */
+#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
+
+/*
+ * Do a secure call with struct optee_msg_arg as argument
+ * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
+ *
+ * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
+ * The first two parameters are tagged as meta, holding two value
+ * parameters to pass the following information:
+ * param[0].u.value.a-b uuid of Trusted Application
+ * param[1].u.value.a-b uuid of Client
+ * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
+ *
+ * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
+ * session to a Trusted Application. struct optee_msg_arg::func is Trusted
+ * Application function, specific to the Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
+ * Trusted Application.
+ *
+ * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
+ *
+ * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
+ * information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
+ * [| OPTEE_MSG_ATTR_FRAGMENT]
+ * [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [in] param[0].u.tmem.size size (of first fragment)
+ * [in] param[0].u.tmem.shm_ref holds shared memory reference
+ * ...
+ * The shared memory can optionally be fragmented, temp memrefs can follow
+ * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
+ *
+ * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
+ * memory reference. The information is passed as:
+ * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+ * [in] param[0].u.rmem.shm_ref holds shared memory reference
+ * [in] param[0].u.rmem.offs 0
+ * [in] param[0].u.rmem.size 0
+ */
+#define OPTEE_MSG_CMD_OPEN_SESSION 0
+#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
+#define OPTEE_MSG_CMD_CLOSE_SESSION 2
+#define OPTEE_MSG_CMD_CANCEL 3
+#define OPTEE_MSG_CMD_REGISTER_SHM 4
+#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
+
+/*****************************************************************************
+ * Part 3 - Requests from secure world, RPC
+ *****************************************************************************/
+
+/*
+ * All RPC is done with a struct optee_msg_arg as bearer of information,
+ * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
+ *
+ * RPC communication with tee-supplicant is reversed compared to normal
+ * client communication desribed above. The supplicant receives requests
+ * and sends responses.
+ */
+
+/*
+ * Load a TA into memory, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
+
+/*
+ * Reserved
+ */
+#define OPTEE_MSG_RPC_CMD_RPMB 1
+
+/*
+ * File system access, defined in tee-supplicant
+ */
+#define OPTEE_MSG_RPC_CMD_FS 2
+
+/*
+ * Get time
+ *
+ * Returns number of seconds and nano seconds since the Epoch,
+ * 1970-01-01 00:00:00 +0000 (UTC).
+ *
+ * [out] param[0].u.value.a Number of seconds
+ * [out] param[0].u.value.b Number of nano seconds.
+ */
+#define OPTEE_MSG_RPC_CMD_GET_TIME 3
+
+/*
+ * Wait queue primitive, helper for secure world to implement a wait queue.
+ *
+ * If secure world need to wait for a secure world mutex it issues a sleep
+ * request instead of spinning in secure world. Conversely is a wakeup
+ * request issued when a secure world mutex with a thread waiting thread is
+ * unlocked.
+ *
+ * Waiting on a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
+ * [in] param[0].u.value.b wait key
+ *
+ * Waking up a key
+ * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
+ * [in] param[0].u.value.b wakeup key
+ */
+#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
+#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
+#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
+
+/*
+ * Suspend execution
+ *
+ * [in] param[0].value .a number of milliseconds to suspend
+ */
+#define OPTEE_MSG_RPC_CMD_SUSPEND 5
+
+/*
+ * Allocate a piece of shared memory
+ *
+ * Shared memory can optionally be fragmented, to support that additional
+ * spare param entries are allocated to make room for eventual fragments.
+ * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
+ * unused. All returned temp memrefs except the last should have the
+ * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
+ *
+ * [in] param[0].u.value.a type of memory one of
+ * OPTEE_MSG_RPC_SHM_TYPE_* below
+ * [in] param[0].u.value.b requested size
+ * [in] param[0].u.value.c required alignment
+ *
+ * [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
+ * [out] param[0].u.tmem.size size (of first fragment)
+ * [out] param[0].u.tmem.shm_ref shared memory reference
+ * ...
+ * [out] param[n].u.tmem.buf_ptr physical address
+ * [out] param[n].u.tmem.size size
+ * [out] param[n].u.tmem.shm_ref shared memory reference (same value
+ * as in param[n-1].u.tmem.shm_ref)
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
+/* Memory that can be shared with a non-secure user space application */
+#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
+/* Memory only shared with non-secure kernel */
+#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
+
+/*
+ * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
+ *
+ * [in] param[0].u.value.a type of memory one of
+ * OPTEE_MSG_RPC_SHM_TYPE_* above
+ * [in] param[0].u.value.b value of shared memory reference
+ * returned in param[0].u.tmem.shm_ref
+ * above
+ */
+#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
+
+#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
new file mode 100644
index 000000000000..c374cd594314
--- /dev/null
+++ b/drivers/tee/optee/optee_private.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef OPTEE_PRIVATE_H
+#define OPTEE_PRIVATE_H
+
+#include <linux/arm-smccc.h>
+#include <linux/semaphore.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_msg.h"
+
+#define OPTEE_MAX_ARG_SIZE 1024
+
+/* Some Global Platform error codes used in this driver */
+#define TEEC_SUCCESS 0x00000000
+#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+#define TEEC_ORIGIN_COMMS 0x00000002
+
+typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long,
+ struct arm_smccc_res *);
+
+struct optee_call_queue {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head waiters;
+};
+
+struct optee_wait_queue {
+ /* Serializes access to this struct */
+ struct mutex mu;
+ struct list_head db;
+};
+
+/**
+ * struct optee_supp - supplicant synchronization struct
+ * @ctx the context of current connected supplicant.
+ * if !NULL the supplicant device is available for use,
+ * else busy
+ * @ctx_mutex: held while accessing @ctx
+ * @func: supplicant function id to call
+ * @ret: call return value
+ * @num_params: number of elements in @param
+ * @param: parameters for @func
+ * @req_posted: if true, a request has been posted to the supplicant
+ * @supp_next_send: if true, next step is for supplicant to send response
+ * @thrd_mutex: held by the thread doing a request to supplicant
+ * @supp_mutex: held by supplicant while operating on this struct
+ * @data_to_supp: supplicant is waiting on this for next request
+ * @data_from_supp: requesting thread is waiting on this to get the result
+ */
+struct optee_supp {
+ struct tee_context *ctx;
+ /* Serializes access of ctx */
+ struct mutex ctx_mutex;
+
+ u32 func;
+ u32 ret;
+ size_t num_params;
+ struct tee_param *param;
+
+ bool req_posted;
+ bool supp_next_send;
+ /* Serializes access to this struct for requesting thread */
+ struct mutex thrd_mutex;
+ /* Serializes access to this struct for supplicant threads */
+ struct mutex supp_mutex;
+ struct completion data_to_supp;
+ struct completion data_from_supp;
+};
+
+/**
+ * struct optee - main service struct
+ * @supp_teedev: supplicant device
+ * @teedev: client device
+ * @invoke_fn: function to issue smc or hvc
+ * @call_queue: queue of threads waiting to call @invoke_fn
+ * @wait_queue: queue of threads from secure world waiting for a
+ * secure world sync object
+ * @supp: supplicant synchronization struct for RPC to supplicant
+ * @pool: shared memory pool
+ * @memremaped_shm virtual address of memory in shared memory pool
+ */
+struct optee {
+ struct tee_device *supp_teedev;
+ struct tee_device *teedev;
+ optee_invoke_fn *invoke_fn;
+ struct optee_call_queue call_queue;
+ struct optee_wait_queue wait_queue;
+ struct optee_supp supp;
+ struct tee_shm_pool *pool;
+ void *memremaped_shm;
+};
+
+struct optee_session {
+ struct list_head list_node;
+ u32 session_id;
+};
+
+struct optee_context_data {
+ /* Serializes access to this struct */
+ struct mutex mutex;
+ struct list_head sess_list;
+};
+
+struct optee_rpc_param {
+ u32 a0;
+ u32 a1;
+ u32 a2;
+ u32 a3;
+ u32 a4;
+ u32 a5;
+ u32 a6;
+ u32 a7;
+};
+
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
+
+void optee_wait_queue_init(struct optee_wait_queue *wq);
+void optee_wait_queue_exit(struct optee_wait_queue *wq);
+
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param);
+
+int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
+int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
+void optee_supp_init(struct optee_supp *supp);
+void optee_supp_uninit(struct optee_supp *supp);
+
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+
+u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
+int optee_open_session(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+int optee_close_session(struct tee_context *ctx, u32 session);
+int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
+
+void optee_enable_shm_cache(struct optee *optee);
+void optee_disable_shm_cache(struct optee *optee);
+
+int optee_from_msg_param(struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params);
+int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
+ const struct tee_param *params);
+
+/*
+ * Small helpers
+ */
+
+static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
+{
+ return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
+}
+
+static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
+{
+ *reg0 = val >> 32;
+ *reg1 = val;
+}
+
+#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
new file mode 100644
index 000000000000..13b7c98cdf25
--- /dev/null
+++ b/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef OPTEE_SMC_H
+#define OPTEE_SMC_H
+
+#include <linux/arm-smccc.h>
+#include <linux/bitops.h>
+
+#define OPTEE_SMC_STD_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
+
+/*
+ * Function specified by SMC Calling convention.
+ */
+#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
+#define OPTEE_SMC_CALLS_COUNT \
+ ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
+ SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_COUNT)
+
+/*
+ * Normal cached memory (write-back), shareable for SMP systems and not
+ * shareable for UP systems.
+ */
+#define OPTEE_SMC_SHM_CACHED 1
+
+/*
+ * a0..a7 is used as register names in the descriptions below, on arm32
+ * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
+ * 32-bit registers.
+ */
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Return one of the following UIDs if using API specified in this file
+ * without further extentions:
+ * 65cb6b93-af0c-4617-8ed6-644a8d1140f8
+ * see also OPTEE_SMC_UID_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
+#define OPTEE_SMC_CALLS_UID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_UID)
+
+/*
+ * Function specified by SMC Calling convention
+ *
+ * Returns 2.0 if using API specified in this file without further extentions.
+ * see also OPTEE_MSG_REVISION_* in optee_msg.h
+ */
+#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
+#define OPTEE_SMC_CALLS_REVISION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_TRUSTED_OS_END, \
+ OPTEE_SMC_FUNCID_CALLS_REVISION)
+
+struct optee_smc_calls_revision_result {
+ unsigned long major;
+ unsigned long minor;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Get UUID of Trusted OS.
+ *
+ * Used by non-secure world to figure out which Trusted OS is installed.
+ * Note that returned UUID is the UUID of the Trusted OS, not of the API.
+ *
+ * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
+#define OPTEE_SMC_CALL_GET_OS_UUID \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
+
+/*
+ * Get revision of Trusted OS.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
+ * described above.
+ */
+#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
+#define OPTEE_SMC_CALL_GET_OS_REVISION \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
+
+/*
+ * Call with struct optee_msg_arg as argument
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
+ * a3 Cache settings, not used if physical pointer is in a predefined shared
+ * memory area else per OPTEE_SMC_SHM_*
+ * a4-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_*
+ * a1-3 Not used
+ * a4-7 Preserved
+ *
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
+ * a1-3 Preserved
+ * a4-7 Preserved
+ *
+ * RPC return register usage:
+ * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
+ * a1-2 RPC parameters
+ * a3-7 Resume information, must be preserved
+ *
+ * Possible return values:
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Call completed, result updated in
+ * the previously supplied struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
+ * try again later.
+ * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct
+ * optee_msg_arg.
+ * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
+ * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
+ * world.
+ */
+#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
+#define OPTEE_SMC_CALL_WITH_ARG \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+
+/*
+ * Get Shared Memory Config
+ *
+ * Returns the Secure/Non-secure shared memory config.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Have config return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Physical address of start of SHM
+ * a2 Size of of SHM
+ * a3 Cache settings of memory, as defined by the
+ * OPTEE_SMC_SHM_* values above
+ * a4-7 Preserved
+ *
+ * Not available register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-3 Not used
+ * a4-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
+#define OPTEE_SMC_GET_SHM_CONFIG \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
+
+struct optee_smc_get_shm_config_result {
+ unsigned long status;
+ unsigned long start;
+ unsigned long size;
+ unsigned long settings;
+};
+
+/*
+ * Exchanges capabilities between normal world and secure world
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
+ * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
+ * a2-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ *
+ * Error return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
+ * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
+ * a2-7 Preserved
+ */
+/* Normal world works as a uniprocessor system */
+#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
+/* Secure world has reserved shared memory for normal world to use */
+#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
+/* Secure world can communicate via previously unregistered shared memory */
+#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
+#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
+#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
+
+struct optee_smc_exchange_capabilities_result {
+ unsigned long status;
+ unsigned long capabilities;
+ unsigned long reserved0;
+ unsigned long reserved1;
+};
+
+/*
+ * Disable and empties cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns one shared memory reference to free. To disable the
+ * cache and free all cached objects this function has to be called until
+ * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 Upper 32bit of a 64bit Shared memory cookie
+ * a2 Lower 32bit of a 64bit Shared memory cookie
+ * a3-7 Preserved
+ *
+ * Cache empty return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
+#define OPTEE_SMC_DISABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
+
+struct optee_smc_disable_shm_cache_result {
+ unsigned long status;
+ unsigned long shm_upper32;
+ unsigned long shm_lower32;
+ unsigned long reserved0;
+};
+
+/*
+ * Enable cache of shared memory objects
+ *
+ * Secure world can cache frequently used shared memory objects, for
+ * example objects used as RPC arguments. When secure world is idle this
+ * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
+ * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not idle return register usage:
+ * a0 OPTEE_SMC_RETURN_EBUSY
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
+#define OPTEE_SMC_ENABLE_SHM_CACHE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
+
+/*
+ * Resume from RPC (for example after processing an IRQ)
+ *
+ * Call register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
+ * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
+ * OPTEE_SMC_RETURN_RPC in a0
+ *
+ * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
+ *
+ * Possible return values
+ * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
+ * function.
+ * OPTEE_SMC_RETURN_OK Original call completed, result
+ * updated in the previously supplied.
+ * struct optee_msg_arg
+ * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
+ * world.
+ * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
+ * information was corrupt.
+ */
+#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
+#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
+ OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
+
+#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
+#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
+
+#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
+ ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
+
+#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
+
+/*
+ * Allocate memory for RPC parameter passing. The memory is used to hold a
+ * struct optee_msg_arg.
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
+ * a1 Size in bytes of required argument memory
+ * a2 Not used
+ * a3 Resume information, must be preserved
+ * a4-5 Not used
+ * a6-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1 Upper 32bits of 64bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated.
+ * a2 Lower 32bits of 64bit physical pointer to allocated
+ * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
+ * be allocated
+ * a3 Preserved
+ * a4 Upper 32bits of 64bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a5 Lower 32bits of 64bit Shared memory cookie used when freeing
+ * the memory or doing an RPC
+ * a6-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_ALLOC 0
+#define OPTEE_SMC_RETURN_RPC_ALLOC \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
+
+/*
+ * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
+ *
+ * "Call" register usage:
+ * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
+ * a1 Upper 32bits of 64bit shared memory cookie belonging to this
+ * argument memory
+ * a2 Lower 32bits of 64bit shared memory cookie belonging to this
+ * argument memory
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_FREE 2
+#define OPTEE_SMC_RETURN_RPC_FREE \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
+
+/*
+ * Deliver an IRQ in normal world.
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_IRQ
+ * a1-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_IRQ 4
+#define OPTEE_SMC_RETURN_RPC_IRQ \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
+
+/*
+ * Do an RPC request. The supplied struct optee_msg_arg tells which
+ * request to do and the parameters for the request. The following fields
+ * are used (the rest are unused):
+ * - cmd the Request ID
+ * - ret return value of the request, filled in by normal world
+ * - num_params number of parameters for the request
+ * - params the parameters
+ * - param_attrs attributes of the parameters
+ *
+ * "Call" register usage:
+ * a0 OPTEE_SMC_RETURN_RPC_CMD
+ * a1 Upper 32bit of a 64bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a2 Lower 32bit of a 64bit Shared memory cookie holding a
+ * struct optee_msg_arg, must be preserved, only the data should
+ * be updated
+ * a3-7 Resume information, must be preserved
+ *
+ * "Return" register usage:
+ * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
+ * a1-2 Not used
+ * a3-7 Preserved
+ */
+#define OPTEE_SMC_RPC_FUNC_CMD 5
+#define OPTEE_SMC_RETURN_RPC_CMD \
+ OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
+
+/* Returned in a0 */
+#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
+
+/* Returned in a0 only from Trusted OS functions */
+#define OPTEE_SMC_RETURN_OK 0x0
+#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
+#define OPTEE_SMC_RETURN_EBUSY 0x2
+#define OPTEE_SMC_RETURN_ERESUME 0x3
+#define OPTEE_SMC_RETURN_EBADADDR 0x4
+#define OPTEE_SMC_RETURN_EBADCMD 0x5
+#define OPTEE_SMC_RETURN_ENOMEM 0x6
+#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
+#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
+
+static inline bool __optee_smc_return_is_rpc(u32 ret)
+{
+ return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
+ (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
+ OPTEE_SMC_RETURN_RPC_PREFIX;
+}
+
+#endif /* OPTEE_SMC_H */
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
new file mode 100644
index 000000000000..8814eca06021
--- /dev/null
+++ b/drivers/tee/optee/rpc.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+
+struct wq_entry {
+ struct list_head link;
+ struct completion c;
+ u32 key;
+};
+
+void optee_wait_queue_init(struct optee_wait_queue *priv)
+{
+ mutex_init(&priv->mu);
+ INIT_LIST_HEAD(&priv->db);
+}
+
+void optee_wait_queue_exit(struct optee_wait_queue *priv)
+{
+ mutex_destroy(&priv->mu);
+}
+
+static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
+{
+ struct timespec64 ts;
+
+ if (arg->num_params != 1)
+ goto bad;
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
+ goto bad;
+
+ getnstimeofday64(&ts);
+ arg->params[0].u.value.a = ts.tv_sec;
+ arg->params[0].u.value.b = ts.tv_nsec;
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w;
+
+ mutex_lock(&wq->mu);
+
+ list_for_each_entry(w, &wq->db, link)
+ if (w->key == key)
+ goto out;
+
+ w = kmalloc(sizeof(*w), GFP_KERNEL);
+ if (w) {
+ init_completion(&w->c);
+ w->key = key;
+ list_add_tail(&w->link, &wq->db);
+ }
+out:
+ mutex_unlock(&wq->mu);
+ return w;
+}
+
+static void wq_sleep(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w = wq_entry_get(wq, key);
+
+ if (w) {
+ wait_for_completion(&w->c);
+ mutex_lock(&wq->mu);
+ list_del(&w->link);
+ mutex_unlock(&wq->mu);
+ kfree(w);
+ }
+}
+
+static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
+{
+ struct wq_entry *w = wq_entry_get(wq, key);
+
+ if (w)
+ complete(&w->c);
+}
+
+static void handle_rpc_func_cmd_wq(struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
+ wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
+ break;
+ case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
+ wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
+ break;
+ default:
+ goto bad;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
+{
+ u32 msec_to_wait;
+
+ if (arg->num_params != 1)
+ goto bad;
+
+ if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
+ OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto bad;
+
+ msec_to_wait = arg->params[0].u.value.a;
+
+ /* set task's state to interruptible sleep */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* take a nap */
+ msleep(msec_to_wait);
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_rpc_supp_cmd(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_param *params;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (optee_from_msg_param(params, arg->num_params, arg->params)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto out;
+ }
+
+ arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
+
+ if (optee_to_msg_param(arg->params, arg->num_params, params))
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+out:
+ kfree(params);
+}
+
+static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+{
+ u32 ret;
+ struct tee_param param;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct tee_shm *shm;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.b = sz;
+ param.u.value.c = 0;
+
+ ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&optee->supp.ctx_mutex);
+ /* Increases count as secure world doesn't have a reference */
+ shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
+ mutex_unlock(&optee->supp.ctx_mutex);
+ return shm;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ shm = cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct tee_param param;
+
+ param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
+ param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
+ param.u.value.b = tee_shm_get_id(shm);
+ param.u.value.c = 0;
+
+ /*
+ * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
+ * world has released its reference.
+ *
+ * It's better to do this before sending the request to supplicant
+ * as we'd like to let the process doing the initial allocation to
+ * do release the last reference too in order to avoid stacking
+ * many pending fput() on the client process. This could otherwise
+ * happen if secure world does many allocate and free in a single
+ * invoke.
+ */
+ tee_shm_put(shm);
+
+ optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
+}
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_MSG_RPC_SHM_TYPE_APPL:
+ cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct tee_shm *shm)
+{
+ struct optee_msg_arg *arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+ return;
+ }
+
+ switch (arg->cmd) {
+ case OPTEE_MSG_RPC_CMD_GET_TIME:
+ handle_rpc_func_cmd_get_time(arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
+ handle_rpc_func_cmd_wq(optee, arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SUSPEND:
+ handle_rpc_func_cmd_wait(arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+ handle_rpc_func_cmd_shm_alloc(ctx, arg);
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ handle_rpc_supp_cmd(ctx, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ *
+ * Result of RPC is written back into @param.
+ */
+void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_IRQ:
+ /*
+ * An IRQ was raised while secure world was executing,
+ * since all IRQs are handled in Linux a dummy RPC is
+ * performed to let Linux take the IRQ through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ handle_rpc_func_cmd(ctx, optee, shm);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
new file mode 100644
index 000000000000..b4ea0678a436
--- /dev/null
+++ b/drivers/tee/optee/supp.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "optee_private.h"
+
+void optee_supp_init(struct optee_supp *supp)
+{
+ memset(supp, 0, sizeof(*supp));
+ mutex_init(&supp->ctx_mutex);
+ mutex_init(&supp->thrd_mutex);
+ mutex_init(&supp->supp_mutex);
+ init_completion(&supp->data_to_supp);
+ init_completion(&supp->data_from_supp);
+}
+
+void optee_supp_uninit(struct optee_supp *supp)
+{
+ mutex_destroy(&supp->ctx_mutex);
+ mutex_destroy(&supp->thrd_mutex);
+ mutex_destroy(&supp->supp_mutex);
+}
+
+/**
+ * optee_supp_thrd_req() - request service from supplicant
+ * @ctx: context doing the request
+ * @func: function requested
+ * @num_params: number of elements in @param array
+ * @param: parameters for function
+ *
+ * Returns result of operation to be passed to secure world
+ */
+u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ struct tee_param *param)
+{
+ bool interruptable;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_supp *supp = &optee->supp;
+ u32 ret;
+
+ /*
+ * Other threads blocks here until we've copied our answer from
+ * supplicant.
+ */
+ while (mutex_lock_interruptible(&supp->thrd_mutex)) {
+ /* See comment below on when the RPC can be interrupted. */
+ mutex_lock(&supp->ctx_mutex);
+ interruptable = !supp->ctx;
+ mutex_unlock(&supp->ctx_mutex);
+ if (interruptable)
+ return TEEC_ERROR_COMMUNICATION;
+ }
+
+ /*
+ * We have exclusive access now since the supplicant at this
+ * point is either doing a
+ * wait_for_completion_interruptible(&supp->data_to_supp) or is in
+ * userspace still about to do the ioctl() to enter
+ * optee_supp_recv() below.
+ */
+
+ supp->func = func;
+ supp->num_params = num_params;
+ supp->param = param;
+ supp->req_posted = true;
+
+ /* Let supplicant get the data */
+ complete(&supp->data_to_supp);
+
+ /*
+ * Wait for supplicant to process and return result, once we've
+ * returned from wait_for_completion(data_from_supp) we have
+ * exclusive access again.
+ */
+ while (wait_for_completion_interruptible(&supp->data_from_supp)) {
+ mutex_lock(&supp->ctx_mutex);
+ interruptable = !supp->ctx;
+ if (interruptable) {
+ /*
+ * There's no supplicant available and since the
+ * supp->ctx_mutex currently is held none can
+ * become available until the mutex released
+ * again.
+ *
+ * Interrupting an RPC to supplicant is only
+ * allowed as a way of slightly improving the user
+ * experience in case the supplicant hasn't been
+ * started yet. During normal operation the supplicant
+ * will serve all requests in a timely manner and
+ * interrupting then wouldn't make sense.
+ */
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ init_completion(&supp->data_to_supp);
+ }
+ mutex_unlock(&supp->ctx_mutex);
+ if (interruptable)
+ break;
+ }
+
+ ret = supp->ret;
+ supp->param = NULL;
+ supp->req_posted = false;
+
+ /* We're done, let someone else talk to the supplicant now. */
+ mutex_unlock(&supp->thrd_mutex);
+
+ return ret;
+}
+
+/**
+ * optee_supp_recv() - receive request for supplicant
+ * @ctx: context receiving the request
+ * @func: requested function in supplicant
+ * @num_params: number of elements allocated in @param, updated with number
+ * used elements
+ * @param: space for parameters for @func
+ *
+ * Returns 0 on success or <0 on failure
+ */
+int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ int rc;
+
+ /*
+ * In case two threads in one supplicant is calling this function
+ * simultaneously we need to protect the data with a mutex which
+ * we'll release before returning.
+ */
+ mutex_lock(&supp->supp_mutex);
+
+ if (supp->supp_next_send) {
+ /*
+ * optee_supp_recv() has been called again without
+ * a optee_supp_send() in between. Supplicant has
+ * probably been restarted before it was able to
+ * write back last result. Abort last request and
+ * wait for a new.
+ */
+ if (supp->req_posted) {
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ supp->supp_next_send = false;
+ complete(&supp->data_from_supp);
+ }
+ }
+
+ /*
+ * This is where supplicant will be hanging most of the
+ * time, let's make this interruptable so we can easily
+ * restart supplicant if needed.
+ */
+ if (wait_for_completion_interruptible(&supp->data_to_supp)) {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* We have exlusive access to the data */
+
+ if (*num_params < supp->num_params) {
+ /*
+ * Not enough room for parameters, tell supplicant
+ * it failed and abort last request.
+ */
+ supp->ret = TEEC_ERROR_COMMUNICATION;
+ rc = -EINVAL;
+ complete(&supp->data_from_supp);
+ goto out;
+ }
+
+ *func = supp->func;
+ *num_params = supp->num_params;
+ memcpy(param, supp->param,
+ sizeof(struct tee_param) * supp->num_params);
+
+ /* Allow optee_supp_send() below to do its work */
+ supp->supp_next_send = true;
+
+ rc = 0;
+out:
+ mutex_unlock(&supp->supp_mutex);
+ return rc;
+}
+
+/**
+ * optee_supp_send() - send result of request from supplicant
+ * @ctx: context sending result
+ * @ret: return value of request
+ * @num_params: number of parameters returned
+ * @param: returned parameters
+ *
+ * Returns 0 on success or <0 on failure.
+ */
+int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct optee_supp *supp = &optee->supp;
+ size_t n;
+ int rc = 0;
+
+ /*
+ * We still have exclusive access to the data since that's how we
+ * left it when returning from optee_supp_read().
+ */
+
+ /* See comment on mutex in optee_supp_read() above */
+ mutex_lock(&supp->supp_mutex);
+
+ if (!supp->supp_next_send) {
+ /*
+ * Something strange is going on, supplicant shouldn't
+ * enter optee_supp_send() in this state
+ */
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (num_params != supp->num_params) {
+ /*
+ * Something is wrong, let supplicant restart. Next call to
+ * optee_supp_recv() will give an error to the requesting
+ * thread and release it.
+ */
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Update out and in/out parameters */
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = supp->param + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ p->u.value.a = param[n].u.value.a;
+ p->u.value.b = param[n].u.value.b;
+ p->u.value.c = param[n].u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ p->u.memref.size = param[n].u.memref.size;
+ break;
+ default:
+ break;
+ }
+ }
+ supp->ret = ret;
+
+ /* Allow optee_supp_recv() above to do its work */
+ supp->supp_next_send = false;
+
+ /* Let the requesting thread continue */
+ complete(&supp->data_from_supp);
+out:
+ mutex_unlock(&supp->supp_mutex);
+ return rc;
+}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
new file mode 100644
index 000000000000..5c60bf4423e6
--- /dev/null
+++ b/drivers/tee/tee_core.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
+#include "tee_private.h"
+
+#define TEE_NUM_DEVICES 32
+
+#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+
+/*
+ * Unprivileged devices in the lower half range and privileged devices in
+ * the upper half range.
+ */
+static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
+static DEFINE_SPINLOCK(driver_lock);
+
+static struct class *tee_class;
+static dev_t tee_devt;
+
+static int tee_open(struct inode *inode, struct file *filp)
+{
+ int rc;
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+
+ teedev = container_of(inode->i_cdev, struct tee_device, cdev);
+ if (!tee_device_get(teedev))
+ return -EINVAL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ctx->teedev = teedev;
+ INIT_LIST_HEAD(&ctx->list_shm);
+ filp->private_data = ctx;
+ rc = teedev->desc->ops->open(ctx);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ kfree(ctx);
+ tee_device_put(teedev);
+ return rc;
+}
+
+static int tee_release(struct inode *inode, struct file *filp)
+{
+ struct tee_context *ctx = filp->private_data;
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm *shm;
+
+ ctx->teedev->desc->ops->release(ctx);
+ mutex_lock(&ctx->teedev->mutex);
+ list_for_each_entry(shm, &ctx->list_shm, link)
+ shm->ctx = NULL;
+ mutex_unlock(&ctx->teedev->mutex);
+ kfree(ctx);
+ tee_device_put(teedev);
+ return 0;
+}
+
+static int tee_ioctl_version(struct tee_context *ctx,
+ struct tee_ioctl_version_data __user *uvers)
+{
+ struct tee_ioctl_version_data vers;
+
+ ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
+ if (copy_to_user(uvers, &vers, sizeof(vers)))
+ return -EFAULT;
+ return 0;
+}
+
+static int tee_ioctl_shm_alloc(struct tee_context *ctx,
+ struct tee_ioctl_shm_alloc_data __user *udata)
+{
+ long ret;
+ struct tee_ioctl_shm_alloc_data data;
+ struct tee_shm *shm;
+
+ if (copy_from_user(&data, udata, sizeof(data)))
+ return -EFAULT;
+
+ /* Currently no input flags are supported */
+ if (data.flags)
+ return -EINVAL;
+
+ data.id = -1;
+
+ shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ data.id = shm->id;
+ data.flags = shm->flags;
+ data.size = shm->size;
+
+ if (copy_to_user(udata, &data, sizeof(data)))
+ ret = -EFAULT;
+ else
+ ret = tee_shm_get_fd(shm);
+
+ /*
+ * When user space closes the file descriptor the shared memory
+ * should be freed or if tee_shm_get_fd() failed then it will
+ * be freed immediately.
+ */
+ tee_shm_put(shm);
+ return ret;
+}
+
+static int params_from_user(struct tee_context *ctx, struct tee_param *params,
+ size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_shm *shm;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ return -EINVAL;
+
+ params[n].attr = ip.attr;
+ switch (ip.attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ params[n].u.value.a = ip.a;
+ params[n].u.value.b = ip.b;
+ params[n].u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * If we fail to get a pointer to a shared memory
+ * object (and increase the ref count) from an
+ * identifier we return an error. All pointers that
+ * has been added in params have an increased ref
+ * count. It's the callers responibility to do
+ * tee_shm_put() on all resolved pointers.
+ */
+ shm = tee_shm_get_from_id(ctx, ip.c);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ params[n].u.memref.shm_offs = ip.a;
+ params[n].u.memref.size = ip.b;
+ params[n].u.memref.shm = shm;
+ break;
+ default:
+ /* Unknown attribute */
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int params_to_user(struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param __user *up = uparams + n;
+ struct tee_param *p = params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ if (put_user(p->u.value.a, &up->a) ||
+ put_user(p->u.value.b, &up->b) ||
+ put_user(p->u.value.c, &up->c))
+ return -EFAULT;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (put_user((u64)p->u.memref.size, &up->b))
+ return -EFAULT;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static bool param_is_memref(struct tee_param *param)
+{
+ switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int tee_ioctl_open_session(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_open_session_arg __user *uarg;
+ struct tee_ioctl_open_session_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+ bool have_session = false;
+
+ if (!ctx->teedev->desc->ops->open_session)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
+ if (rc)
+ goto out;
+ have_session = true;
+
+ if (put_user(arg.session, &uarg->session) ||
+ put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ /*
+ * If we've succeeded to open the session but failed to communicate
+ * it back to user space, close the session again to avoid leakage.
+ */
+ if (rc && have_session && ctx->teedev->desc->ops->close_session)
+ ctx->teedev->desc->ops->close_session(ctx, arg.session);
+
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+
+ return rc;
+}
+
+static int tee_ioctl_invoke(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ size_t n;
+ struct tee_ioctl_buf_data buf;
+ struct tee_ioctl_invoke_arg __user *uarg;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_ioctl_param __user *uparams = NULL;
+ struct tee_param *params = NULL;
+
+ if (!ctx->teedev->desc->ops->invoke_func)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
+ return -EINVAL;
+
+ if (arg.num_params) {
+ params = kcalloc(arg.num_params, sizeof(struct tee_param),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+ uparams = uarg->params;
+ rc = params_from_user(ctx, params, arg.num_params, uparams);
+ if (rc)
+ goto out;
+ }
+
+ rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
+ if (rc)
+ goto out;
+
+ if (put_user(arg.ret, &uarg->ret) ||
+ put_user(arg.ret_origin, &uarg->ret_origin)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ rc = params_to_user(uparams, arg.num_params, params);
+out:
+ if (params) {
+ /* Decrease ref count for all valid shared memory pointers */
+ for (n = 0; n < arg.num_params; n++)
+ if (param_is_memref(params + n) &&
+ params[n].u.memref.shm)
+ tee_shm_put(params[n].u.memref.shm);
+ kfree(params);
+ }
+ return rc;
+}
+
+static int tee_ioctl_cancel(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg __user *uarg)
+{
+ struct tee_ioctl_cancel_arg arg;
+
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
+ arg.session);
+}
+
+static int
+tee_ioctl_close_session(struct tee_context *ctx,
+ struct tee_ioctl_close_session_arg __user *uarg)
+{
+ struct tee_ioctl_close_session_arg arg;
+
+ if (!ctx->teedev->desc->ops->close_session)
+ return -EINVAL;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ return ctx->teedev->desc->ops->close_session(ctx, arg.session);
+}
+
+static int params_to_supp(struct tee_context *ctx,
+ struct tee_ioctl_param __user *uparams,
+ size_t num_params, struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_ioctl_param ip;
+ struct tee_param *p = params + n;
+
+ ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ ip.a = p->u.value.a;
+ ip.b = p->u.value.b;
+ ip.c = p->u.value.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ ip.b = p->u.memref.size;
+ if (!p->u.memref.shm) {
+ ip.a = 0;
+ ip.c = (u64)-1; /* invalid shm id */
+ break;
+ }
+ ip.a = p->u.memref.shm_offs;
+ ip.c = p->u.memref.shm->id;
+ break;
+ default:
+ ip.a = 0;
+ ip.b = 0;
+ ip.c = 0;
+ break;
+ }
+
+ if (copy_to_user(uparams + n, &ip, sizeof(ip)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int tee_ioctl_supp_recv(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ int rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_recv_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 func;
+
+ if (!ctx->teedev->desc->ops->supp_recv)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
+ if (rc)
+ goto out;
+
+ if (put_user(func, &uarg->func) ||
+ put_user(num_params, &uarg->num_params)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = params_to_supp(ctx, uarg->params, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static int params_from_supp(struct tee_param *params, size_t num_params,
+ struct tee_ioctl_param __user *uparams)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ struct tee_ioctl_param ip;
+
+ if (copy_from_user(&ip, uparams + n, sizeof(ip)))
+ return -EFAULT;
+
+ /* All unused attribute bits has to be zero */
+ if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
+ return -EINVAL;
+
+ p->attr = ip.attr;
+ switch (ip.attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ /* Only out and in/out values can be updated */
+ p->u.value.a = ip.a;
+ p->u.value.b = ip.b;
+ p->u.value.c = ip.c;
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ /*
+ * Only the size of the memref can be updated.
+ * Since we don't have access to the original
+ * parameters here, only store the supplied size.
+ * The driver will copy the updated size into the
+ * original parameters.
+ */
+ p->u.memref.shm = NULL;
+ p->u.memref.shm_offs = 0;
+ p->u.memref.size = ip.b;
+ break;
+ default:
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ }
+ }
+ return 0;
+}
+
+static int tee_ioctl_supp_send(struct tee_context *ctx,
+ struct tee_ioctl_buf_data __user *ubuf)
+{
+ long rc;
+ struct tee_ioctl_buf_data buf;
+ struct tee_iocl_supp_send_arg __user *uarg;
+ struct tee_param *params;
+ u32 num_params;
+ u32 ret;
+
+ /* Not valid for this driver */
+ if (!ctx->teedev->desc->ops->supp_send)
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, sizeof(buf)))
+ return -EFAULT;
+
+ if (buf.buf_len > TEE_MAX_ARG_SIZE ||
+ buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
+ return -EINVAL;
+
+ uarg = u64_to_user_ptr(buf.buf_ptr);
+ if (get_user(ret, &uarg->ret) ||
+ get_user(num_params, &uarg->num_params))
+ return -EFAULT;
+
+ if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
+ return -EINVAL;
+
+ params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ rc = params_from_supp(params, num_params, uarg->params);
+ if (rc)
+ goto out;
+
+ rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
+out:
+ kfree(params);
+ return rc;
+}
+
+static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct tee_context *ctx = filp->private_data;
+ void __user *uarg = (void __user *)arg;
+
+ switch (cmd) {
+ case TEE_IOC_VERSION:
+ return tee_ioctl_version(ctx, uarg);
+ case TEE_IOC_SHM_ALLOC:
+ return tee_ioctl_shm_alloc(ctx, uarg);
+ case TEE_IOC_OPEN_SESSION:
+ return tee_ioctl_open_session(ctx, uarg);
+ case TEE_IOC_INVOKE:
+ return tee_ioctl_invoke(ctx, uarg);
+ case TEE_IOC_CANCEL:
+ return tee_ioctl_cancel(ctx, uarg);
+ case TEE_IOC_CLOSE_SESSION:
+ return tee_ioctl_close_session(ctx, uarg);
+ case TEE_IOC_SUPPL_RECV:
+ return tee_ioctl_supp_recv(ctx, uarg);
+ case TEE_IOC_SUPPL_SEND:
+ return tee_ioctl_supp_send(ctx, uarg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations tee_fops = {
+ .owner = THIS_MODULE,
+ .open = tee_open,
+ .release = tee_release,
+ .unlocked_ioctl = tee_ioctl,
+ .compat_ioctl = tee_ioctl,
+};
+
+static void tee_release_device(struct device *dev)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ mutex_destroy(&teedev->mutex);
+ idr_destroy(&teedev->idr);
+ kfree(teedev);
+}
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data)
+{
+ struct tee_device *teedev;
+ void *ret;
+ int rc;
+ int offs = 0;
+
+ if (!teedesc || !teedesc->name || !teedesc->ops ||
+ !teedesc->ops->get_version || !teedesc->ops->open ||
+ !teedesc->ops->release || !pool)
+ return ERR_PTR(-EINVAL);
+
+ teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
+ if (!teedev) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ if (teedesc->flags & TEE_DESC_PRIVILEGED)
+ offs = TEE_NUM_DEVICES / 2;
+
+ spin_lock(&driver_lock);
+ teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs);
+ if (teedev->id < TEE_NUM_DEVICES)
+ set_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+
+ if (teedev->id >= TEE_NUM_DEVICES) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
+ teedev->id - offs);
+
+ teedev->dev.class = tee_class;
+ teedev->dev.release = tee_release_device;
+ teedev->dev.parent = dev;
+
+ teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
+
+ rc = dev_set_name(&teedev->dev, "%s", teedev->name);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_devt;
+ }
+
+ cdev_init(&teedev->cdev, &tee_fops);
+ teedev->cdev.owner = teedesc->owner;
+ teedev->cdev.kobj.parent = &teedev->dev.kobj;
+
+ dev_set_drvdata(&teedev->dev, driver_data);
+ device_initialize(&teedev->dev);
+
+ /* 1 as tee_device_unregister() does one final tee_device_put() */
+ teedev->num_users = 1;
+ init_completion(&teedev->c_no_users);
+ mutex_init(&teedev->mutex);
+ idr_init(&teedev->idr);
+
+ teedev->desc = teedesc;
+ teedev->pool = pool;
+
+ return teedev;
+err_devt:
+ unregister_chrdev_region(teedev->dev.devt, 1);
+err:
+ pr_err("could not register %s driver\n",
+ teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
+ if (teedev && teedev->id < TEE_NUM_DEVICES) {
+ spin_lock(&driver_lock);
+ clear_bit(teedev->id, dev_mask);
+ spin_unlock(&driver_lock);
+ }
+ kfree(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_device_alloc);
+
+static ssize_t implementation_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tee_device *teedev = container_of(dev, struct tee_device, dev);
+ struct tee_ioctl_version_data vers;
+
+ teedev->desc->ops->get_version(teedev, &vers);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
+}
+static DEVICE_ATTR_RO(implementation_id);
+
+static struct attribute *tee_dev_attrs[] = {
+ &dev_attr_implementation_id.attr,
+ NULL
+};
+
+static const struct attribute_group tee_dev_group = {
+ .attrs = tee_dev_attrs,
+};
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev)
+{
+ int rc;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+ dev_err(&teedev->dev, "attempt to register twice\n");
+ return -EINVAL;
+ }
+
+ rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
+ teedev->name, MAJOR(teedev->dev.devt),
+ MINOR(teedev->dev.devt), rc);
+ return rc;
+ }
+
+ rc = device_add(&teedev->dev);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "unable to device_add() %s, major %d, minor %d, err=%d\n",
+ teedev->name, MAJOR(teedev->dev.devt),
+ MINOR(teedev->dev.devt), rc);
+ goto err_device_add;
+ }
+
+ rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
+ if (rc) {
+ dev_err(&teedev->dev,
+ "failed to create sysfs attributes, err=%d\n", rc);
+ goto err_sysfs_create_group;
+ }
+
+ teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
+ return 0;
+
+err_sysfs_create_group:
+ device_del(&teedev->dev);
+err_device_add:
+ cdev_del(&teedev->cdev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_device_register);
+
+void tee_device_put(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ /* Shouldn't put in this state */
+ if (!WARN_ON(!teedev->desc)) {
+ teedev->num_users--;
+ if (!teedev->num_users) {
+ teedev->desc = NULL;
+ complete(&teedev->c_no_users);
+ }
+ }
+ mutex_unlock(&teedev->mutex);
+}
+
+bool tee_device_get(struct tee_device *teedev)
+{
+ mutex_lock(&teedev->mutex);
+ if (!teedev->desc) {
+ mutex_unlock(&teedev->mutex);
+ return false;
+ }
+ teedev->num_users++;
+ mutex_unlock(&teedev->mutex);
+ return true;
+}
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev)
+{
+ if (!teedev)
+ return;
+
+ if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
+ sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
+ cdev_del(&teedev->cdev);
+ device_del(&teedev->dev);
+ }
+
+ tee_device_put(teedev);
+ wait_for_completion(&teedev->c_no_users);
+
+ /*
+ * No need to take a mutex any longer now since teedev->desc was
+ * set to NULL before teedev->c_no_users was completed.
+ */
+
+ teedev->pool = NULL;
+
+ put_device(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_device_unregister);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @teedev: Device containing the driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev)
+{
+ return dev_get_drvdata(&teedev->dev);
+}
+EXPORT_SYMBOL_GPL(tee_get_drvdata);
+
+static int __init tee_init(void)
+{
+ int rc;
+
+ tee_class = class_create(THIS_MODULE, "tee");
+ if (IS_ERR(tee_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(tee_class);
+ }
+
+ rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
+ if (rc) {
+ pr_err("failed to allocate char dev region\n");
+ class_destroy(tee_class);
+ tee_class = NULL;
+ }
+
+ return rc;
+}
+
+static void __exit tee_exit(void)
+{
+ class_destroy(tee_class);
+ tee_class = NULL;
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+}
+
+subsys_initcall(tee_init);
+module_exit(tee_exit);
+
+MODULE_AUTHOR("Linaro");
+MODULE_DESCRIPTION("TEE Driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
new file mode 100644
index 000000000000..21cb6be8bce9
--- /dev/null
+++ b/drivers/tee/tee_private.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef TEE_PRIVATE_H
+#define TEE_PRIVATE_H
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct tee_device;
+
+/**
+ * struct tee_shm - shared memory object
+ * @teedev: device used to allocate the object
+ * @ctx: context using the object, if NULL the context is gone
+ * @link link element
+ * @paddr: physical address of the shared memory
+ * @kaddr: virtual address of the shared memory
+ * @size: size of shared memory
+ * @dmabuf: dmabuf used to for exporting to user space
+ * @flags: defined by TEE_SHM_* in tee_drv.h
+ * @id: unique id of a shared memory object on this device
+ */
+struct tee_shm {
+ struct tee_device *teedev;
+ struct tee_context *ctx;
+ struct list_head link;
+ phys_addr_t paddr;
+ void *kaddr;
+ size_t size;
+ struct dma_buf *dmabuf;
+ u32 flags;
+ int id;
+};
+
+struct tee_shm_pool_mgr;
+
+/**
+ * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
+ * @alloc: called when allocating shared memory
+ * @free: called when freeing shared memory
+ */
+struct tee_shm_pool_mgr_ops {
+ int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
+ size_t size);
+ void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
+};
+
+/**
+ * struct tee_shm_pool_mgr - shared memory manager
+ * @ops: operations
+ * @private_data: private data for the shared memory manager
+ */
+struct tee_shm_pool_mgr {
+ const struct tee_shm_pool_mgr_ops *ops;
+ void *private_data;
+};
+
+/**
+ * struct tee_shm_pool - shared memory pool
+ * @private_mgr: pool manager for shared memory only between kernel
+ * and secure world
+ * @dma_buf_mgr: pool manager for shared memory exported to user space
+ * @destroy: called when destroying the pool
+ * @private_data: private data for the pool
+ */
+struct tee_shm_pool {
+ struct tee_shm_pool_mgr private_mgr;
+ struct tee_shm_pool_mgr dma_buf_mgr;
+ void (*destroy)(struct tee_shm_pool *pool);
+ void *private_data;
+};
+
+#define TEE_DEVICE_FLAG_REGISTERED 0x1
+#define TEE_MAX_DEV_NAME_LEN 32
+
+/**
+ * struct tee_device - TEE Device representation
+ * @name: name of device
+ * @desc: description of device
+ * @id: unique id of device
+ * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
+ * @dev: embedded basic device structure
+ * @cdev: embedded cdev
+ * @num_users: number of active users of this device
+ * @c_no_user: completion used when unregistering the device
+ * @mutex: mutex protecting @num_users and @idr
+ * @idr: register of shared memory object allocated on this device
+ * @pool: shared memory pool
+ */
+struct tee_device {
+ char name[TEE_MAX_DEV_NAME_LEN];
+ const struct tee_desc *desc;
+ int id;
+ unsigned int flags;
+
+ struct device dev;
+ struct cdev cdev;
+
+ size_t num_users;
+ struct completion c_no_users;
+ struct mutex mutex; /* protects num_users and idr */
+
+ struct idr idr;
+ struct tee_shm_pool *pool;
+};
+
+int tee_shm_init(void);
+
+int tee_shm_get_fd(struct tee_shm *shm);
+
+bool tee_device_get(struct tee_device *teedev);
+void tee_device_put(struct tee_device *teedev);
+
+#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
new file mode 100644
index 000000000000..d356d7f025eb
--- /dev/null
+++ b/drivers/tee/tee_shm.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/fdtable.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static void tee_shm_release(struct tee_shm *shm)
+{
+ struct tee_device *teedev = shm->teedev;
+ struct tee_shm_pool_mgr *poolm;
+
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ if (shm->ctx)
+ list_del(&shm->link);
+ mutex_unlock(&teedev->mutex);
+
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ poolm = &teedev->pool->dma_buf_mgr;
+ else
+ poolm = &teedev->pool->private_mgr;
+
+ poolm->ops->free(poolm, shm);
+ kfree(shm);
+
+ tee_device_put(teedev);
+}
+
+static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
+ *attach, enum dma_data_direction dir)
+{
+ return NULL;
+}
+
+static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *table,
+ enum dma_data_direction dir)
+{
+}
+
+static void tee_shm_op_release(struct dma_buf *dmabuf)
+{
+ struct tee_shm *shm = dmabuf->priv;
+
+ tee_shm_release(shm);
+}
+
+static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ return NULL;
+}
+
+static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+ return NULL;
+}
+
+static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct tee_shm *shm = dmabuf->priv;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static struct dma_buf_ops tee_shm_dma_buf_ops = {
+ .map_dma_buf = tee_shm_op_map_dma_buf,
+ .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
+ .release = tee_shm_op_release,
+ .map_atomic = tee_shm_op_map_atomic,
+ .map = tee_shm_op_map,
+ .mmap = tee_shm_op_mmap,
+};
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
+ * associated with a dma-buf handle, else driver private memory.
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_pool_mgr *poolm = NULL;
+ struct tee_shm *shm;
+ void *ret;
+ int rc;
+
+ if (!(flags & TEE_SHM_MAPPED)) {
+ dev_err(teedev->dev.parent,
+ "only mapped allocations supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
+ dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ if (!teedev->pool) {
+ /* teedev has been detached from driver */
+ ret = ERR_PTR(-EINVAL);
+ goto err_dev_put;
+ }
+
+ shm = kzalloc(sizeof(*shm), GFP_KERNEL);
+ if (!shm) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_dev_put;
+ }
+
+ shm->flags = flags;
+ shm->teedev = teedev;
+ shm->ctx = ctx;
+ if (flags & TEE_SHM_DMA_BUF)
+ poolm = &teedev->pool->dma_buf_mgr;
+ else
+ poolm = &teedev->pool->private_mgr;
+
+ rc = poolm->ops->alloc(poolm, shm, size);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto err_kfree;
+ }
+
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err_pool_free;
+ }
+
+ if (flags & TEE_SHM_DMA_BUF) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tee_shm_dma_buf_ops;
+ exp_info.size = shm->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = shm;
+
+ shm->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(shm->dmabuf)) {
+ ret = ERR_CAST(shm->dmabuf);
+ goto err_rem;
+ }
+ }
+ mutex_lock(&teedev->mutex);
+ list_add_tail(&shm->link, &ctx->list_shm);
+ mutex_unlock(&teedev->mutex);
+
+ return shm;
+err_rem:
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+err_pool_free:
+ poolm->ops->free(poolm, shm);
+err_kfree:
+ kfree(shm);
+err_dev_put:
+ tee_device_put(teedev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc);
+
+/**
+ * tee_shm_get_fd() - Increase reference count and return file descriptor
+ * @shm: Shared memory handle
+ * @returns user space file descriptor to shared memory
+ */
+int tee_shm_get_fd(struct tee_shm *shm)
+{
+ u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
+ int fd;
+
+ if ((shm->flags & req_flags) != req_flags)
+ return -EINVAL;
+
+ fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+ if (fd >= 0)
+ get_dma_buf(shm->dmabuf);
+ return fd;
+}
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm)
+{
+ /*
+ * dma_buf_put() decreases the dmabuf reference counter and will
+ * call tee_shm_release() when the last reference is gone.
+ *
+ * In the case of driver private memory we call tee_shm_release
+ * directly instead as it doesn't have a reference counter.
+ */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ dma_buf_put(shm->dmabuf);
+ else
+ tee_shm_release(shm);
+}
+EXPORT_SYMBOL_GPL(tee_shm_free);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
+{
+ /* Check that we're in the range of the shm */
+ if ((char *)va < (char *)shm->kaddr)
+ return -EINVAL;
+ if ((char *)va >= ((char *)shm->kaddr + shm->size))
+ return -EINVAL;
+
+ return tee_shm_get_pa(
+ shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
+}
+EXPORT_SYMBOL_GPL(tee_shm_va2pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
+{
+ /* Check that we're in the range of the shm */
+ if (pa < shm->paddr)
+ return -EINVAL;
+ if (pa >= (shm->paddr + shm->size))
+ return -EINVAL;
+
+ if (va) {
+ void *v = tee_shm_get_va(shm, pa - shm->paddr);
+
+ if (IS_ERR(v))
+ return PTR_ERR(v);
+ *va = v;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_pa2va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
+{
+ if (offs >= shm->size)
+ return ERR_PTR(-EINVAL);
+ return (char *)shm->kaddr + offs;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_va);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
+{
+ if (offs >= shm->size)
+ return -EINVAL;
+ if (pa)
+ *pa = shm->paddr + offs;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_pa);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
+{
+ struct tee_device *teedev;
+ struct tee_shm *shm;
+
+ if (!ctx)
+ return ERR_PTR(-EINVAL);
+
+ teedev = ctx->teedev;
+ mutex_lock(&teedev->mutex);
+ shm = idr_find(&teedev->idr, id);
+ if (!shm || shm->ctx != ctx)
+ shm = ERR_PTR(-EINVAL);
+ else if (shm->flags & TEE_SHM_DMA_BUF)
+ get_dma_buf(shm->dmabuf);
+ mutex_unlock(&teedev->mutex);
+ return shm;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm)
+{
+ return shm->id;
+}
+EXPORT_SYMBOL_GPL(tee_shm_get_id);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm)
+{
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ dma_buf_put(shm->dmabuf);
+}
+EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
new file mode 100644
index 000000000000..fb4f8522a526
--- /dev/null
+++ b/drivers/tee/tee_shm_pool.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include "tee_private.h"
+
+static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ unsigned long va;
+ struct gen_pool *genpool = poolm->private_data;
+ size_t s = roundup(size, 1 << genpool->min_alloc_order);
+
+ va = gen_pool_alloc(genpool, s);
+ if (!va)
+ return -ENOMEM;
+
+ memset((void *)va, 0, s);
+ shm->kaddr = (void *)va;
+ shm->paddr = gen_pool_virt_to_phys(genpool, va);
+ shm->size = s;
+ return 0;
+}
+
+static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
+ shm->size);
+ shm->kaddr = NULL;
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
+ .alloc = pool_op_gen_alloc,
+ .free = pool_op_gen_free,
+};
+
+static void pool_res_mem_destroy(struct tee_shm_pool *pool)
+{
+ gen_pool_destroy(pool->private_mgr.private_data);
+ gen_pool_destroy(pool->dma_buf_mgr.private_data);
+}
+
+static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
+ struct tee_shm_pool_mem_info *info,
+ int min_alloc_order)
+{
+ size_t page_mask = PAGE_SIZE - 1;
+ struct gen_pool *genpool = NULL;
+ int rc;
+
+ /*
+ * Start and end must be page aligned
+ */
+ if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
+ (info->size & page_mask))
+ return -EINVAL;
+
+ genpool = gen_pool_create(min_alloc_order, -1);
+ if (!genpool)
+ return -ENOMEM;
+
+ gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
+ rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
+ -1);
+ if (rc) {
+ gen_pool_destroy(genpool);
+ return rc;
+ }
+
+ mgr->private_data = genpool;
+ mgr->ops = &pool_ops_generic;
+ return 0;
+}
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info)
+{
+ struct tee_shm_pool *pool = NULL;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Create the pool for driver private shared memory
+ */
+ ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
+ 3 /* 8 byte aligned */);
+ if (ret)
+ goto err;
+
+ /*
+ * Create the pool for dma_buf shared memory
+ */
+ ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
+ PAGE_SHIFT);
+ if (ret)
+ goto err;
+
+ pool->destroy = pool_res_mem_destroy;
+ return pool;
+err:
+ if (ret == -ENOMEM)
+ pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
+ if (pool && pool->private_mgr.private_data)
+ gen_pool_destroy(pool->private_mgr.private_data);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * There must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool)
+{
+ pool->destroy(pool);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index b95bed92da9f..f02becdb3e33 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
- tty_buffer.o tty_port.o tty_mutex.o tty_ldsem.o
+ tty_buffer.o tty_port.o tty_mutex.o \
+ tty_ldsem.o tty_baudrate.o tty_jobctrl.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index b19ae36a05ec..a8d399188242 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -920,17 +920,17 @@ int hvc_remove(struct hvc_struct *hp)
tty = tty_port_tty_get(&hp->port);
+ console_lock();
spin_lock_irqsave(&hp->lock, flags);
if (hp->index < MAX_NR_HVC_CONSOLES) {
- console_lock();
vtermnos[hp->index] = -1;
cons_ops[hp->index] = NULL;
- console_unlock();
}
/* Don't whack hp->irq because tty_hangup() will need to free the irq. */
spin_unlock_irqrestore(&hp->lock, flags);
+ console_unlock();
/*
* We 'put' the instance that was grabbed when the kref instance
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 7823d6d998cf..99bb875178d7 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1575,7 +1575,7 @@ static int __init hvcs_module_init(void)
*/
rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
if (rc)
- pr_warning("HVCS: Failed to create rescan file (err %d)\n", rc);
+ pr_warn("HVCS: Failed to create rescan file (err %d)\n", rc);
return 0;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 55577cf9b6a4..2667a205a5ab 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -89,18 +89,14 @@ module_param(debug, int, 0600);
/**
* struct gsm_mux_net - network interface
* @struct gsm_dlci* dlci
- * @struct net_device_stats stats;
*
* Created when net interface is initialized.
**/
struct gsm_mux_net {
struct kref ref;
struct gsm_dlci *dlci;
- struct net_device_stats stats;
};
-#define STATS(net) (((struct gsm_mux_net *)netdev_priv(net))->stats)
-
/*
* Each block of data we have queued to go out is in the form of
* a gsm_msg which holds everything we need in a link layer independent
@@ -2613,10 +2609,6 @@ static int gsm_mux_net_close(struct net_device *net)
return 0;
}
-static struct net_device_stats *gsm_mux_net_get_stats(struct net_device *net)
-{
- return &((struct gsm_mux_net *)netdev_priv(net))->stats;
-}
static void dlci_net_free(struct gsm_dlci *dlci)
{
if (!dlci->net) {
@@ -2660,8 +2652,8 @@ static int gsm_mux_net_start_xmit(struct sk_buff *skb,
muxnet_get(mux_net);
skb_queue_head(&dlci->skb_list, skb);
- STATS(net).tx_packets++;
- STATS(net).tx_bytes += skb->len;
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
gsm_dlci_data_kick(dlci);
/* And tell the kernel when the last transmit started. */
netif_trans_update(net);
@@ -2676,7 +2668,7 @@ static void gsm_mux_net_tx_timeout(struct net_device *net)
dev_dbg(&net->dev, "Tx timed out.\n");
/* Update statistics */
- STATS(net).tx_errors++;
+ net->stats.tx_errors++;
}
static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
@@ -2691,7 +2683,7 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
skb = dev_alloc_skb(size + NET_IP_ALIGN);
if (!skb) {
/* We got no receive buffer. */
- STATS(net).rx_dropped++;
+ net->stats.rx_dropped++;
muxnet_put(mux_net);
return;
}
@@ -2705,8 +2697,8 @@ static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
netif_rx(skb);
/* update out statistics */
- STATS(net).rx_packets++;
- STATS(net).rx_bytes += size;
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += size;
muxnet_put(mux_net);
return;
}
@@ -2718,7 +2710,6 @@ static void gsm_mux_net_init(struct net_device *net)
.ndo_stop = gsm_mux_net_close,
.ndo_start_xmit = gsm_mux_net_start_xmit,
.ndo_tx_timeout = gsm_mux_net_tx_timeout,
- .ndo_get_stats = gsm_mux_net_get_stats,
};
net->netdev_ops = &gsm_netdev_ops;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index e94aea8c0d05..7b2a466616d6 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -939,11 +939,11 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
return buf;
} /* end of n_hdlc_buf_get() */
-static char hdlc_banner[] __initdata =
+static const char hdlc_banner[] __initconst =
KERN_INFO "HDLC line discipline maxframe=%u\n";
-static char hdlc_register_ok[] __initdata =
+static const char hdlc_register_ok[] __initconst =
KERN_INFO "N_HDLC line discipline registered.\n";
-static char hdlc_register_fail[] __initdata =
+static const char hdlc_register_fail[] __initconst =
KERN_ERR "error registering line discipline: %d\n";
static int __init n_hdlc_init(void)
@@ -968,9 +968,9 @@ static int __init n_hdlc_init(void)
} /* end of init_module() */
-static char hdlc_unregister_ok[] __exitdata =
+static const char hdlc_unregister_ok[] __exitdata =
KERN_INFO "N_HDLC: line discipline unregistered\n";
-static char hdlc_unregister_fail[] __exitdata =
+static const char hdlc_unregister_fail[] __exitdata =
KERN_ERR "N_HDLC: can't unregister line discipline (err = %d)\n";
static void __exit n_hdlc_exit(void)
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 66b59a15780d..65799575c666 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -216,16 +216,11 @@ static int pty_signal(struct tty_struct *tty, int sig)
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
- struct tty_ldisc *ld;
if (!to)
return;
- ld = tty_ldisc_ref(to);
- tty_buffer_flush(to, ld);
- if (ld)
- tty_ldisc_deref(ld);
-
+ tty_buffer_flush(to, NULL);
if (to->packet) {
spin_lock_irq(&tty->ctrl_lock);
tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 1e1cbae3a0ea..433de5ea9b02 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -116,17 +116,41 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
-int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf, size_t count)
+void serdev_device_write_wakeup(struct serdev_device *serdev)
+{
+ complete(&serdev->write_comp);
+}
+EXPORT_SYMBOL_GPL(serdev_device_write_wakeup);
+
+int serdev_device_write(struct serdev_device *serdev,
+ const unsigned char *buf, size_t count,
+ unsigned long timeout)
{
struct serdev_controller *ctrl = serdev->ctrl;
+ int ret;
- if (!ctrl || !ctrl->ops->write_buf)
+ if (!ctrl || !ctrl->ops->write_buf ||
+ (timeout && !serdev->ops->write_wakeup))
return -EINVAL;
- return ctrl->ops->write_buf(ctrl, buf, count);
+ mutex_lock(&serdev->write_lock);
+ do {
+ reinit_completion(&serdev->write_comp);
+
+ ret = ctrl->ops->write_buf(ctrl, buf, count);
+ if (ret < 0)
+ break;
+
+ buf += ret;
+ count -= ret;
+
+ } while (count &&
+ (timeout = wait_for_completion_timeout(&serdev->write_comp,
+ timeout)));
+ mutex_unlock(&serdev->write_lock);
+ return ret < 0 ? ret : (count ? -ETIMEDOUT : 0);
}
-EXPORT_SYMBOL_GPL(serdev_device_write_buf);
+EXPORT_SYMBOL_GPL(serdev_device_write);
void serdev_device_write_flush(struct serdev_device *serdev)
{
@@ -224,10 +248,7 @@ static int serdev_drv_remove(struct device *dev)
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
- buf[len] = '\n';
- buf[len+1] = 0;
- return len+1;
+ return of_device_modalias(dev, buf, PAGE_SIZE);
}
static struct device_attribute serdev_device_attrs[] = {
@@ -265,6 +286,8 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
serdev->dev.parent = &ctrl->dev;
serdev->dev.bus = &serdev_bus_type;
serdev->dev.type = &serdev_device_type;
+ init_completion(&serdev->write_comp);
+ mutex_init(&serdev->write_lock);
return serdev;
}
EXPORT_SYMBOL_GPL(serdev_device_alloc);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 76e03a7de9cc..48a07e2f617f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -218,7 +218,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
spin_unlock_irq(&i->lock);
irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
- irq_flags, "serial", i);
+ irq_flags, up->port.name, i);
if (ret < 0)
serial_do_unlink(i, up);
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index e65808c482f1..787b1160d3a5 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -530,12 +530,11 @@ static int dw8250_probe(struct platform_device *pdev)
}
data->rst = devm_reset_control_get_optional(dev, NULL);
- if (IS_ERR(data->rst) && PTR_ERR(data->rst) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ if (IS_ERR(data->rst)) {
+ err = PTR_ERR(data->rst);
goto err_pclk;
}
- if (!IS_ERR(data->rst))
- reset_control_deassert(data->rst);
+ reset_control_deassert(data->rst);
dw8250_quirks(p, data);
@@ -567,8 +566,7 @@ static int dw8250_probe(struct platform_device *pdev)
return 0;
err_reset:
- if (!IS_ERR(data->rst))
- reset_control_assert(data->rst);
+ reset_control_assert(data->rst);
err_pclk:
if (!IS_ERR(data->pclk))
@@ -589,8 +587,7 @@ static int dw8250_remove(struct platform_device *pdev)
serial8250_unregister_port(data->line);
- if (!IS_ERR(data->rst))
- reset_control_assert(data->rst);
+ reset_control_assert(data->rst);
if (!IS_ERR(data->pclk))
clk_disable_unprepare(data->pclk);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 85a12f032402..82fc48eca1df 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -39,6 +39,7 @@
static unsigned int __init serial8250_early_in(struct uart_port *port, int offset)
{
+ int reg_offset = offset;
offset <<= port->regshift;
switch (port->iotype) {
@@ -52,6 +53,8 @@ static unsigned int __init serial8250_early_in(struct uart_port *port, int offse
return ioread32be(port->membase + offset);
case UPIO_PORT:
return inb(port->iobase + offset);
+ case UPIO_AU:
+ return port->serial_in(port, reg_offset);
default:
return 0;
}
@@ -59,6 +62,7 @@ static unsigned int __init serial8250_early_in(struct uart_port *port, int offse
static void __init serial8250_early_out(struct uart_port *port, int offset, int value)
{
+ int reg_offset = offset;
offset <<= port->regshift;
switch (port->iotype) {
@@ -77,6 +81,9 @@ static void __init serial8250_early_out(struct uart_port *port, int offset, int
case UPIO_PORT:
outb(value, port->iobase + offset);
break;
+ case UPIO_AU:
+ port->serial_out(port, reg_offset, value);
+ break;
}
}
@@ -172,3 +179,20 @@ OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
#endif
+
+#ifdef CONFIG_SERIAL_8250_RT288X
+
+unsigned int au_serial_in(struct uart_port *p, int offset);
+void au_serial_out(struct uart_port *p, int offset, int value);
+
+static int __init early_au_setup(struct earlycon_device *dev, const char *opt)
+{
+ dev->port.serial_in = au_serial_in;
+ dev->port.serial_out = au_serial_out;
+ dev->port.iotype = UPIO_AU;
+ dev->con->write = early_serial8250_write;
+ return 0;
+}
+OF_EARLYCON_DECLARE(palmchip, "ralink,rt2880-uart", early_au_setup);
+
+#endif
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index b89c4ffc0486..1270ff163f63 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -483,5 +483,5 @@ static struct pci_driver exar_pci_driver = {
module_pci_driver(exar_pci_driver);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Exar Serial Dricer");
+MODULE_DESCRIPTION("Exar Serial Driver");
MODULE_AUTHOR("Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>");
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index b67e7a544935..e500f7dd2470 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -61,6 +61,12 @@
* The IRQ setting mode of F81866 is not the same with F81216 series.
* Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
* Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
+ *
+ * Clock speeds for UART (register F2h)
+ * 00: 1.8432MHz.
+ * 01: 18.432MHz.
+ * 10: 24MHz.
+ * 11: 14.769MHz.
*/
#define F81866_IRQ_MODE 0xf0
#define F81866_IRQ_SHARE BIT(0)
@@ -72,6 +78,13 @@
#define F81866_LDN_LOW 0x10
#define F81866_LDN_HIGH 0x16
+#define F81866_UART_CLK 0xF2
+#define F81866_UART_CLK_MASK (BIT(1) | BIT(0))
+#define F81866_UART_CLK_1_8432MHZ 0
+#define F81866_UART_CLK_14_769MHZ (BIT(1) | BIT(0))
+#define F81866_UART_CLK_18_432MHZ BIT(0)
+#define F81866_UART_CLK_24MHZ BIT(1)
+
struct fintek_8250 {
u16 pid;
u16 base_port;
@@ -256,8 +269,26 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
}
}
-static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address,
- unsigned int irq)
+static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
+ struct fintek_8250 *pdata)
+{
+ sio_write_reg(pdata, LDN, pdata->index);
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
+ sio_write_mask_reg(pdata, F81866_UART_CLK,
+ F81866_UART_CLK_MASK,
+ F81866_UART_CLK_14_769MHZ);
+
+ uart->port.uartclk = 921600 * 16;
+ break;
+ default: /* leave clock speed untouched */
+ break;
+ }
+}
+
+static int probe_setup_port(struct fintek_8250 *pdata,
+ struct uart_8250_port *uart)
{
static const u16 addr[] = {0x4e, 0x2e};
static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
@@ -284,18 +315,20 @@ static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address,
sio_write_reg(pdata, LDN, k);
aux = sio_read_reg(pdata, IO_ADDR1);
aux |= sio_read_reg(pdata, IO_ADDR2) << 8;
- if (aux != io_address)
+ if (aux != uart->port.iobase)
continue;
pdata->index = k;
- irq_data = irq_get_irq_data(irq);
+ irq_data = irq_get_irq_data(uart->port.irq);
if (irq_data)
level_mode =
irqd_is_level_type(irq_data);
fintek_8250_set_irq_mode(pdata, level_mode);
fintek_8250_set_max_fifo(pdata);
+ fintek_8250_goto_highspeed(uart, pdata);
+
fintek_8250_exit_key(addr[i]);
return 0;
@@ -330,7 +363,7 @@ int fintek_8250_probe(struct uart_8250_port *uart)
struct fintek_8250 *pdata;
struct fintek_8250 probe_data;
- if (probe_setup_port(&probe_data, uart->port.iobase, uart->port.irq))
+ if (probe_setup_port(&probe_data, uart))
return -ENODEV;
pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index f3ea90f0e411..7dddd7e6a01c 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -183,7 +183,6 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
if (ret)
return;
- pci_set_master(pdev);
pci_try_set_mwi(pdev);
/* Special DMA address for UART */
@@ -216,6 +215,8 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
+ pci_set_master(pdev);
+
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 6119516ef5fc..09a65a3ec7f7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -328,7 +328,7 @@ static const s8 au_io_out_map[8] = {
-1, /* UART_SCR (unmapped) */
};
-static unsigned int au_serial_in(struct uart_port *p, int offset)
+unsigned int au_serial_in(struct uart_port *p, int offset)
{
if (offset >= ARRAY_SIZE(au_io_in_map))
return UINT_MAX;
@@ -338,7 +338,7 @@ static unsigned int au_serial_in(struct uart_port *p, int offset)
return __raw_readl(p->membase + (offset << p->regshift));
}
-static void au_serial_out(struct uart_port *p, int offset, int value)
+void au_serial_out(struct uart_port *p, int offset, int value)
{
if (offset >= ARRAY_SIZE(au_io_out_map))
return;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 6117ac8da48f..5c8850f7a2a0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -630,6 +630,15 @@ config SERIAL_UARTLITE_CONSOLE
console (the system console is the device which receives all kernel
messages and warnings and which allows logins in single user mode).
+config SERIAL_UARTLITE_NR_UARTS
+ int "Maximum number of uartlite serial ports"
+ depends on SERIAL_UARTLITE
+ range 1 256
+ default 1
+ help
+ Set this to the number of uartlites in your system, or the number
+ you think you might implement.
+
config SERIAL_SUNCORE
bool
depends on SPARC
@@ -1343,6 +1352,7 @@ config SERIAL_ALTERA_JTAGUART_CONSOLE
bool "Altera JTAG UART console support"
depends on SERIAL_ALTERA_JTAGUART=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Enable a Altera JTAG UART port to be the system console.
@@ -1382,6 +1392,7 @@ config SERIAL_ALTERA_UART_CONSOLE
bool "Altera UART console support"
depends on SERIAL_ALTERA_UART=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Enable a Altera UART port to be the system console.
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 2d6288bc4554..53c03e005132 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -3,7 +3,6 @@
#
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
-obj-$(CONFIG_SERIAL_21285) += 21285.o
obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
@@ -17,6 +16,8 @@ obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o
obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o
+obj-$(CONFIG_SERIAL_21285) += 21285.o
+
# Now bring in any enabled 8250/16450/16550 type drivers.
obj-$(CONFIG_SERIAL_8250) += 8250/
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index e409d7dac7ab..18e3f8342b85 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -383,6 +383,26 @@ console_initcall(altera_jtaguart_console_init);
#define ALTERA_JTAGUART_CONSOLE (&altera_jtaguart_console)
+static void altera_jtaguart_earlycon_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct earlycon_device *dev = co->data;
+
+ uart_console_write(&dev->port, s, count, altera_jtaguart_console_putc);
+}
+
+static int __init altera_jtaguart_earlycon_setup(struct earlycon_device *dev,
+ const char *options)
+{
+ if (!dev->port.membase)
+ return -ENODEV;
+
+ dev->con->write = altera_jtaguart_earlycon_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(juart, "altr,juart-1.0", altera_jtaguart_earlycon_setup);
+
#else
#define ALTERA_JTAGUART_CONSOLE NULL
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 820a74208696..46d3438a0d27 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -489,6 +489,38 @@ console_initcall(altera_uart_console_init);
#define ALTERA_UART_CONSOLE (&altera_uart_console)
+static void altera_uart_earlycon_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct earlycon_device *dev = co->data;
+
+ uart_console_write(&dev->port, s, count, altera_uart_console_putc);
+}
+
+static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
+ const char *options)
+{
+ struct uart_port *port = &dev->port;
+
+ if (!port->membase)
+ return -ENODEV;
+
+ /* Enable RX interrupts now */
+ writel(ALTERA_UART_CONTROL_RRDY_MSK,
+ port->membase + ALTERA_UART_CONTROL_REG);
+
+ if (dev->baud) {
+ unsigned int baudclk = port->uartclk / dev->baud;
+
+ writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
+ }
+
+ dev->con->write = altera_uart_earlycon_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(uart, "altr,uart-1.0", altera_uart_earlycon_setup);
+
#else
#define ALTERA_UART_CONSOLE NULL
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index b0a377725d63..8a857bb34fbb 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1327,14 +1327,15 @@ static void pl011_stop_tx(struct uart_port *port)
pl011_dma_tx_stop(uap);
}
-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
+static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
/* Start TX with programmed I/O only (no DMA) */
static void pl011_start_tx_pio(struct uart_amba_port *uap)
{
- uap->im |= UART011_TXIM;
- pl011_write(uap->im, uap, REG_IMSC);
- pl011_tx_chars(uap, false);
+ if (pl011_tx_chars(uap, false)) {
+ uap->im |= UART011_TXIM;
+ pl011_write(uap->im, uap, REG_IMSC);
+ }
}
static void pl011_start_tx(struct uart_port *port)
@@ -1414,25 +1415,26 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
+/* Returns true if tx interrupts have to be (kept) enabled */
+static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
- return;
+ return true;
uap->port.x_char = 0;
--count;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
pl011_stop_tx(&uap->port);
- return;
+ return false;
}
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
- return;
+ return true;
do {
if (likely(from_irq) && count-- == 0)
@@ -1447,8 +1449,11 @@ static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
pl011_stop_tx(&uap->port);
+ return false;
+ }
+ return true;
}
static void pl011_modem_status(struct uart_amba_port *uap)
@@ -2470,19 +2475,34 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
- /* On QDF2400 SOCs affected by Erratum 44, the "qdf2400_e44" must
- * also be specified, e.g. "earlycon=pl011,<address>,qdf2400_e44".
- */
- if (!strcmp(device->options, "qdf2400_e44"))
- device->con->write = qdf2400_e44_early_write;
- else
- device->con->write = pl011_early_write;
+ device->con->write = pl011_early_write;
return 0;
}
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
-EARLYCON_DECLARE(qdf2400_e44, pl011_early_console_setup);
+
+/*
+ * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
+ * Erratum 44, traditional earlycon can be enabled by specifying
+ * "earlycon=qdf2400_e44,<address>". Any options are ignored.
+ *
+ * Alternatively, you can just specify "earlycon", and the early console
+ * will be enabled with the information from the SPCR table. In this
+ * case, the SPCR code will detect the need for the E44 work-around,
+ * and set the console name to "qdf2400_e44".
+ */
+static int __init
+qdf2400_e44_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = qdf2400_e44_early_write;
+ return 0;
+}
+EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
#else
#define AMBA_CONSOLE NULL
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1f50a83ef958..c355ac9abafc 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -38,7 +38,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/atmel_pdc.h>
-#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
#include <linux/platform_data/atmel.h>
#include <linux/timer.h>
@@ -71,6 +70,7 @@
#include <linux/serial_core.h>
#include "serial_mctrl_gpio.h"
+#include "atmel_serial.h"
static void atmel_start_rx(struct uart_port *port);
static void atmel_stop_rx(struct uart_port *port);
@@ -119,8 +119,9 @@ struct atmel_uart_char {
/*
* at91: 6 USARTs and one DBGU port (SAM9260)
* avr32: 4
+ * samx7: 3 USARTs and 5 UARTs
*/
-#define ATMEL_MAX_UART 7
+#define ATMEL_MAX_UART 8
/*
* We wrap our port structure around the generic uart_port.
@@ -175,6 +176,7 @@ struct atmel_uart_port {
unsigned int pending_status;
spinlock_t lock_suspended;
+#ifdef CONFIG_PM
struct {
u32 cr;
u32 mr;
@@ -185,6 +187,7 @@ struct atmel_uart_port {
u32 fmr;
u32 fimr;
} cache;
+#endif
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
diff --git a/include/linux/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index bd2560502f3c..bd2560502f3c 100644
--- a/include/linux/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f02934ffb329..15df1ba78095 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1705,6 +1705,13 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
unsigned char old_cr2, cr2;
+ unsigned long flags;
+ int locked = 1;
+
+ if (sport->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ else
+ spin_lock_irqsave(&sport->port.lock, flags);
/* first save CR2 and then disable interrupts */
cr2 = old_cr2 = readb(sport->port.membase + UARTCR2);
@@ -1719,6 +1726,9 @@ lpuart_console_write(struct console *co, const char *s, unsigned int count)
barrier();
writeb(old_cr2, sport->port.membase + UARTCR2);
+
+ if (locked)
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
static void
@@ -1726,6 +1736,13 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
unsigned long old_cr, cr;
+ unsigned long flags;
+ int locked = 1;
+
+ if (sport->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&sport->port.lock, flags);
+ else
+ spin_lock_irqsave(&sport->port.lock, flags);
/* first save CR2 and then disable interrupts */
cr = old_cr = lpuart32_read(sport->port.membase + UARTCTRL);
@@ -1740,6 +1757,9 @@ lpuart32_console_write(struct console *co, const char *s, unsigned int count)
barrier();
lpuart32_write(old_cr, sport->port.membase + UARTCTRL);
+
+ if (locked)
+ spin_unlock_irqrestore(&sport->port.lock, flags);
}
/*
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e3e152cbc75e..33509b4beaec 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -719,6 +719,27 @@ out:
return IRQ_HANDLED;
}
+static void imx_disable_rx_int(struct imx_port *sport)
+{
+ unsigned long temp;
+
+ sport->dma_is_rxing = 1;
+
+ /* disable the receiver ready and aging timer interrupts */
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_ATEN);
+ writel(temp, sport->port.membase + UCR2);
+
+ /* disable the rx errors interrupts */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~UCR4_OREN;
+ writel(temp, sport->port.membase + UCR4);
+}
+
static void clear_rx_errors(struct imx_port *sport);
static int start_rx_dma(struct imx_port *sport);
/*
@@ -734,21 +755,8 @@ static void imx_dma_rxint(struct imx_port *sport)
temp = readl(sport->port.membase + USR2);
if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
- sport->dma_is_rxing = 1;
- /* disable the receiver ready and aging timer interrupts */
- temp = readl(sport->port.membase + UCR1);
- temp &= ~(UCR1_RRDYEN);
- writel(temp, sport->port.membase + UCR1);
-
- temp = readl(sport->port.membase + UCR2);
- temp &= ~(UCR2_ATEN);
- writel(temp, sport->port.membase + UCR2);
-
- /* disable the rx errors interrupts */
- temp = readl(sport->port.membase + UCR4);
- temp &= ~UCR4_OREN;
- writel(temp, sport->port.membase + UCR4);
+ imx_disable_rx_int(sport);
/* tell the DMA to receive the data. */
start_rx_dma(sport);
@@ -1317,19 +1325,10 @@ static int imx_startup(struct uart_port *port)
if (!is_imx1_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
- /*
- * The effect of RI and DCD differs depending on the UFCR_DCEDTE
- * bit. In DCE mode they control the outputs, in DTE mode they
- * enable the respective irqs. At least the DCD irq cannot be
- * cleared on i.MX25 at least, so it's not usable and must be
- * disabled. I don't have test hardware to check if RI has the
- * same problem but I consider this likely so it's disabled for
- * now, too.
- */
- temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP |
- UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
+ temp |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
if (sport->dte_mode)
+ /* disable broken interrupts */
temp &= ~(UCR3_RI | UCR3_DCD);
writel(temp, sport->port.membase + UCR3);
@@ -1339,6 +1338,33 @@ static int imx_startup(struct uart_port *port)
* Enable modem status interrupts
*/
imx_enable_ms(&sport->port);
+
+ /*
+ * If the serial port is opened for reading start RX DMA immediately
+ * instead of waiting for RX FIFO interrupts. In our iMX53 the average
+ * delay for the first reception dropped from approximately 35000
+ * microseconds to 1000 microseconds.
+ */
+ if (sport->dma_is_enabled) {
+ struct tty_struct *tty = sport->port.state->port.tty;
+ struct tty_file_private *file_priv;
+ int readcnt = 0;
+
+ spin_lock(&tty->files_lock);
+
+ if (!list_empty(&tty->tty_files))
+ list_for_each_entry(file_priv, &tty->tty_files, list)
+ if (!(file_priv->file->f_flags & O_WRONLY))
+ readcnt++;
+
+ spin_unlock(&tty->files_lock);
+
+ if (readcnt > 0) {
+ imx_disable_rx_int(sport);
+ start_rx_dma(sport);
+ }
+ }
+
spin_unlock_irqrestore(&sport->port.lock, flags);
return 0;
@@ -1584,8 +1610,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ufcr = readl(sport->port.membase + UFCR);
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
- if (sport->dte_mode)
- ufcr |= UFCR_DCEDTE;
writel(ufcr, sport->port.membase + UFCR);
writel(num, sport->port.membase + UBIR);
@@ -2153,6 +2177,27 @@ static int serial_imx_probe(struct platform_device *pdev)
UCR1_TXMPTYEN | UCR1_RTSDEN);
writel_relaxed(reg, sport->port.membase + UCR1);
+ if (!is_imx1_uart(sport) && sport->dte_mode) {
+ /*
+ * The DCEDTE bit changes the direction of DSR, DCD, DTR and RI
+ * and influences if UCR3_RI and UCR3_DCD changes the level of RI
+ * and DCD (when they are outputs) or enables the respective
+ * irqs. So set this bit early, i.e. before requesting irqs.
+ */
+ writel(UFCR_DCEDTE, sport->port.membase + UFCR);
+
+ /*
+ * Disable UCR3_RI and UCR3_DCD irqs. They are also not
+ * enabled later because they cannot be cleared
+ * (confirmed on i.MX25) which makes them unusable.
+ */
+ writel(IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR,
+ sport->port.membase + UCR3);
+
+ } else {
+ writel(0, sport->port.membase + UFCR);
+ }
+
clk_disable_unprepare(sport->clk_ipg);
/*
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index a4734649a0f0..1ea05ac57aa7 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1770,7 +1770,8 @@ static int serial_omap_probe(struct platform_device *pdev)
return 0;
err_add_port:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
@@ -1783,9 +1784,13 @@ static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
+ pm_runtime_get_sync(up->dev);
+
+ uart_remove_one_port(&serial_omap_reg, &up->port);
+
+ pm_runtime_dont_use_autosuspend(up->dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
- uart_remove_one_port(&serial_omap_reg, &up->port);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 7a17aedbf902..8aca18c4cdea 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -859,7 +859,7 @@ static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
{
struct s3c24xx_uart_dma *dma = p->dma;
- unsigned long flags;
+ int ret;
/* Default slave configuration parameters */
dma->rx_conf.direction = DMA_DEV_TO_MEM;
@@ -884,8 +884,8 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
dma->tx_chan = dma_request_chan(p->port.dev, "tx");
if (IS_ERR(dma->tx_chan)) {
- dma_release_channel(dma->rx_chan);
- return PTR_ERR(dma->tx_chan);
+ ret = PTR_ERR(dma->tx_chan);
+ goto err_release_rx;
}
dmaengine_slave_config(dma->tx_chan, &dma->tx_conf);
@@ -894,26 +894,38 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
dma->rx_size = PAGE_SIZE;
dma->rx_buf = kmalloc(dma->rx_size, GFP_KERNEL);
-
if (!dma->rx_buf) {
- dma_release_channel(dma->rx_chan);
- dma_release_channel(dma->tx_chan);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_release_tx;
}
- dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
+ dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
dma->rx_size, DMA_FROM_DEVICE);
-
- spin_lock_irqsave(&p->port.lock, flags);
+ if (dma_mapping_error(p->port.dev, dma->rx_addr)) {
+ ret = -EIO;
+ goto err_free_rx;
+ }
/* TX buffer */
- dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
- p->port.state->xmit.buf,
+ dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
UART_XMIT_SIZE, DMA_TO_DEVICE);
-
- spin_unlock_irqrestore(&p->port.lock, flags);
+ if (dma_mapping_error(p->port.dev, dma->tx_addr)) {
+ ret = -EIO;
+ goto err_unmap_rx;
+ }
return 0;
+
+err_unmap_rx:
+ dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,
+ DMA_FROM_DEVICE);
+err_free_rx:
+ kfree(dma->rx_buf);
+err_release_tx:
+ dma_release_channel(dma->tx_chan);
+err_release_rx:
+ dma_release_channel(dma->rx_chan);
+ return ret;
}
static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
@@ -922,7 +934,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
if (dma->rx_chan) {
dmaengine_terminate_all(dma->rx_chan);
- dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
+ dma_unmap_single(p->port.dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
kfree(dma->rx_buf);
dma_release_channel(dma->rx_chan);
@@ -931,7 +943,7 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
if (dma->tx_chan) {
dmaengine_terminate_all(dma->tx_chan);
- dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
+ dma_unmap_single(p->port.dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_release_channel(dma->tx_chan);
dma->tx_chan = NULL;
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 771f361c47ea..041625cc24bb 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -41,7 +41,7 @@
#include <linux/tty_flip.h>
#include <linux/types.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <asm/io.h>
#include <asm/war.h>
@@ -103,7 +103,7 @@ struct sbd_port {
struct sbd_duart {
struct sbd_port sport[2];
unsigned long mapctrl;
- atomic_t map_guard;
+ refcount_t map_guard;
};
#define to_sport(uport) container_of(uport, struct sbd_port, port)
@@ -654,15 +654,13 @@ static void sbd_release_port(struct uart_port *uport)
{
struct sbd_port *sport = to_sport(uport);
struct sbd_duart *duart = sport->duart;
- int map_guard;
iounmap(sport->memctrl);
sport->memctrl = NULL;
iounmap(uport->membase);
uport->membase = NULL;
- map_guard = atomic_add_return(-1, &duart->map_guard);
- if (!map_guard)
+ if(refcount_dec_and_test(&duart->map_guard))
release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
}
@@ -698,7 +696,6 @@ static int sbd_request_port(struct uart_port *uport)
{
const char *err = KERN_ERR "sbd: Unable to reserve MMIO resource\n";
struct sbd_duart *duart = to_sport(uport)->duart;
- int map_guard;
int ret = 0;
if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
@@ -706,11 +703,11 @@ static int sbd_request_port(struct uart_port *uport)
printk(err);
return -EBUSY;
}
- map_guard = atomic_add_return(1, &duart->map_guard);
- if (map_guard == 1) {
+ refcount_inc(&duart->map_guard);
+ if (refcount_read(&duart->map_guard) == 1) {
if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
"sb1250-duart")) {
- atomic_add(-1, &duart->map_guard);
+ refcount_dec(&duart->map_guard);
printk(err);
ret = -EBUSY;
}
@@ -718,8 +715,7 @@ static int sbd_request_port(struct uart_port *uport)
if (!ret) {
ret = sbd_map_port(uport);
if (ret) {
- map_guard = atomic_add_return(-1, &duart->map_guard);
- if (!map_guard)
+ if (refcount_dec_and_test(&duart->map_guard))
release_mem_region(duart->mapctrl,
DUART_CHANREG_SPACING);
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3fe56894974a..0f45b7884a2c 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2117,9 +2117,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
for (tries = 3; !ops->tx_empty(uport) && tries; tries--)
msleep(10);
if (!tries)
- dev_err(uport->dev, "%s%d: Unable to drain transmitter\n",
- drv->dev_name,
- drv->tty_driver->name_base + uport->line);
+ dev_err(uport->dev, "%s: Unable to drain transmitter\n",
+ uport->name);
ops->shutdown(uport);
}
@@ -2248,11 +2247,10 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
}
- printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ pr_info("%s%s%s at %s (irq = %d, base_baud = %d) is a %s\n",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
- drv->dev_name,
- drv->tty_driver->name_base + port->line,
+ port->name,
address, port->irq, port->uartclk / 16, uart_type(port));
}
@@ -2331,9 +2329,6 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
int flow = 'n';
int ret = 0;
- if (!state)
- return -1;
-
tport = &state->port;
mutex_lock(&tport->mutex);
@@ -2368,13 +2363,12 @@ static int uart_poll_get_char(struct tty_driver *driver, int line)
struct uart_port *port;
int ret = -1;
- if (state) {
- port = uart_port_ref(state);
- if (port) {
- ret = port->ops->poll_get_char(port);
- uart_port_deref(port);
- }
+ port = uart_port_ref(state);
+ if (port) {
+ ret = port->ops->poll_get_char(port);
+ uart_port_deref(port);
}
+
return ret;
}
@@ -2384,9 +2378,6 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
struct uart_state *state = drv->state + line;
struct uart_port *port;
- if (!state)
- return;
-
port = uart_port_ref(state);
if (!port)
return;
@@ -2751,6 +2742,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
+ uport->name = kasprintf(GFP_KERNEL, "%s%d", drv->dev_name,
+ drv->tty_driver->name_base + uport->line);
+ if (!uport->name) {
+ ret = -ENOMEM;
+ goto out;
+ }
/*
* If this port is a console, then the spinlock is already
@@ -2868,6 +2865,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
kfree(uport->tty_groups);
+ kfree(uport->name);
/*
* Indicate that there isn't a port here anymore.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 9a47cc4f16a2..71707e8e6e3f 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -683,24 +683,37 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 data = serial_port_in(port, SCPDR);
u16 ctrl = serial_port_in(port, SCPCR);
/* Enable RXD and TXD pin functions */
ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
if (to_sci_port(port)->has_rtscts) {
- /* RTS# is output, driven 1 */
- ctrl |= SCPCR_RTSC;
- serial_port_out(port, SCPDR,
- serial_port_in(port, SCPDR) | SCPDR_RTSD);
+ /* RTS# is output, active low, unless autorts */
+ if (!(port->mctrl & TIOCM_RTS)) {
+ ctrl |= SCPCR_RTSC;
+ data |= SCPDR_RTSD;
+ } else if (!s->autorts) {
+ ctrl |= SCPCR_RTSC;
+ data &= ~SCPDR_RTSD;
+ } else {
+ /* Enable RTS# pin function */
+ ctrl &= ~SCPCR_RTSC;
+ }
/* Enable CTS# pin function */
ctrl &= ~SCPCR_CTSC;
}
+ serial_port_out(port, SCPDR, data);
serial_port_out(port, SCPCR, ctrl);
} else if (sci_getreg(port, SCSPTR)->size) {
u16 status = serial_port_in(port, SCSPTR);
- /* RTS# is output, driven 1 */
- status |= SCSPTR_RTSIO | SCSPTR_RTSDT;
+ /* RTS# is always output; and active low, unless autorts */
+ status |= SCSPTR_RTSIO;
+ if (!(port->mctrl & TIOCM_RTS))
+ status |= SCSPTR_RTSDT;
+ else if (!s->autorts)
+ status &= ~SCSPTR_RTSDT;
/* CTS# and SCK are inputs */
status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
serial_port_out(port, SCSPTR, status);
@@ -1985,11 +1998,13 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ sci_request_dma(port);
+
ret = sci_request_irq(s);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ sci_free_dma(port);
return ret;
-
- sci_request_dma(port);
+ }
return 0;
}
@@ -2021,8 +2036,8 @@ static void sci_shutdown(struct uart_port *port)
}
#endif
- sci_free_dma(port);
sci_free_irq(s);
+ sci_free_dma(port);
}
static int sci_sck_calc(struct sci_port *s, unsigned int bps,
@@ -2157,10 +2172,6 @@ static void sci_reset(struct uart_port *port)
unsigned int status;
struct sci_port *s = to_sci_port(port);
- do {
- status = serial_port_in(port, SCxSR);
- } while (!(status & SCxSR_TEND(port)));
-
serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
reg = sci_getreg(port, SCFCR);
@@ -2374,6 +2385,10 @@ done:
serial_port_out(port, SCFCR, ctrl);
}
+ if (port->flags & UPF_HARD_FLOW) {
+ /* Refresh (Auto) RTS */
+ sci_set_mctrl(port, port->mctrl);
+ }
scr_val |= SCSCR_RE | SCSCR_TE |
(s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index d98e3dc4838e..90996ad97b37 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -36,7 +36,7 @@
#define SPRD_FIFO_SIZE 128
#define SPRD_DEF_RATE 26000000
#define SPRD_BAUD_IO_LIMIT 3000000
-#define SPRD_TIMEOUT 256
+#define SPRD_TIMEOUT 256000
/* the offset of serial registers and BITs for them */
/* data registers */
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index c334bcc59c64..f5335be344f6 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -887,13 +887,12 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
int locked = 1;
u32 intenable;
- local_irq_save(flags);
if (port->sysrq)
locked = 0; /* asc_interrupt has already claimed the lock */
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = spin_trylock_irqsave(&port->lock, flags);
else
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
/*
* Disable interrupts so we don't get the IRQ line bouncing
@@ -911,14 +910,13 @@ static void asc_console_write(struct console *co, const char *s, unsigned count)
asc_out(port, ASC_INTEN, intenable);
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int asc_console_setup(struct console *co, char *options)
{
struct asc_port *ascport;
- int baud = 9600;
+ int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
@@ -986,7 +984,7 @@ static struct platform_driver asc_serial_driver = {
static int __init asc_init(void)
{
int ret;
- static char banner[] __initdata =
+ static const char banner[] __initconst =
KERN_INFO "STMicroelectronics ASC driver initialized\n";
printk(banner);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 817bb0d3f326..c9b8d702dadc 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -28,7 +28,7 @@
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
-#define ULITE_NR_UARTS 16
+#define ULITE_NR_UARTS CONFIG_SERIAL_UARTLITE_NR_UARTS
/* ---------------------------------------------------------------------
* Register definitions
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index ad77d0ed0c46..c0539950f8d7 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -30,6 +30,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
@@ -176,6 +177,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_BDIV_MIN 4
#define CDNS_UART_BDIV_MAX 255
#define CDNS_UART_CD_MAX 65535
+#define UART_AUTOSUSPEND_TIMEOUT 3000
/**
* struct cdns_uart - device data
@@ -1065,16 +1067,13 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
static void cdns_uart_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- struct cdns_uart *cdns_uart = port->private_data;
-
switch (state) {
case UART_PM_STATE_OFF:
- clk_disable(cdns_uart->uartclk);
- clk_disable(cdns_uart->pclk);
+ pm_runtime_mark_last_busy(port->dev);
+ pm_runtime_put_autosuspend(port->dev);
break;
default:
- clk_enable(cdns_uart->pclk);
- clk_enable(cdns_uart->uartclk);
+ pm_runtime_get_sync(port->dev);
break;
}
}
@@ -1353,12 +1352,7 @@ static int cdns_uart_suspend(struct device *device)
* the suspend.
*/
uart_suspend_port(&cdns_uart_uart_driver, port);
- if (console_suspend_enabled && !may_wake) {
- struct cdns_uart *cdns_uart = port->private_data;
-
- clk_disable(cdns_uart->uartclk);
- clk_disable(cdns_uart->pclk);
- } else {
+ if (!(console_suspend_enabled && !may_wake)) {
unsigned long flags = 0;
spin_lock_irqsave(&port->lock, flags);
@@ -1423,6 +1417,8 @@ static int cdns_uart_resume(struct device *device)
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
writel(ctrl_reg, port->membase + CDNS_UART_CR);
+ clk_disable(cdns_uart->uartclk);
+ clk_disable(cdns_uart->pclk);
spin_unlock_irqrestore(&port->lock, flags);
} else {
spin_lock_irqsave(&port->lock, flags);
@@ -1436,9 +1432,33 @@ static int cdns_uart_resume(struct device *device)
return uart_resume_port(&cdns_uart_uart_driver, port);
}
#endif /* ! CONFIG_PM_SLEEP */
+static int __maybe_unused cdns_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ clk_disable(cdns_uart->uartclk);
+ clk_disable(cdns_uart->pclk);
+ return 0;
+};
+
+static int __maybe_unused cdns_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct cdns_uart *cdns_uart = port->private_data;
-static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
- cdns_uart_resume);
+ clk_enable(cdns_uart->pclk);
+ clk_enable(cdns_uart->uartclk);
+ return 0;
+};
+
+static const struct dev_pm_ops cdns_uart_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdns_uart_suspend, cdns_uart_resume)
+ SET_RUNTIME_PM_OPS(cdns_runtime_suspend,
+ cdns_runtime_resume, NULL)
+};
static const struct cdns_platform_data zynqmp_uart_def = {
.quirks = CDNS_UART_RXBS_SUPPORT, };
@@ -1501,12 +1521,12 @@ static int cdns_uart_probe(struct platform_device *pdev)
return PTR_ERR(cdns_uart_data->uartclk);
}
- rc = clk_prepare(cdns_uart_data->pclk);
+ rc = clk_prepare_enable(cdns_uart_data->pclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
return rc;
}
- rc = clk_prepare(cdns_uart_data->uartclk);
+ rc = clk_prepare_enable(cdns_uart_data->uartclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto err_out_clk_dis_pclk;
@@ -1558,6 +1578,11 @@ static int cdns_uart_probe(struct platform_device *pdev)
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
if (rc) {
dev_err(&pdev->dev,
@@ -1573,9 +1598,12 @@ err_out_notif_unreg:
&cdns_uart_data->clk_rate_change_nb);
#endif
err_out_clk_disable:
- clk_unprepare(cdns_uart_data->uartclk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_disable_unprepare(cdns_uart_data->uartclk);
err_out_clk_dis_pclk:
- clk_unprepare(cdns_uart_data->pclk);
+ clk_disable_unprepare(cdns_uart_data->pclk);
return rc;
}
@@ -1599,8 +1627,11 @@ static int cdns_uart_remove(struct platform_device *pdev)
#endif
rc = uart_remove_one_port(&cdns_uart_uart_driver, port);
port->mapbase = 0;
- clk_unprepare(cdns_uart_data->uartclk);
- clk_unprepare(cdns_uart_data->pclk);
+ clk_disable_unprepare(cdns_uart_data->uartclk);
+ clk_disable_unprepare(cdns_uart_data->pclk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return rc;
}
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
new file mode 100644
index 000000000000..5c33fd25676d
--- /dev/null
+++ b/drivers/tty/tty_baudrate.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/export.h>
+
+
+/*
+ * Routine which returns the baud rate of the tty
+ *
+ * Note that the baud_table needs to be kept in sync with the
+ * include/asm/termbits.h file.
+ */
+static const speed_t baud_table[] = {
+ 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
+ 9600, 19200, 38400, 57600, 115200, 230400, 460800,
+#ifdef __sparc__
+ 76800, 153600, 307200, 614400, 921600
+#else
+ 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
+ 2500000, 3000000, 3500000, 4000000
+#endif
+};
+
+#ifndef __sparc__
+static const tcflag_t baud_bits[] = {
+ B0, B50, B75, B110, B134, B150, B200, B300, B600,
+ B1200, B1800, B2400, B4800, B9600, B19200, B38400,
+ B57600, B115200, B230400, B460800, B500000, B576000,
+ B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
+ B3000000, B3500000, B4000000
+};
+#else
+static const tcflag_t baud_bits[] = {
+ B0, B50, B75, B110, B134, B150, B200, B300, B600,
+ B1200, B1800, B2400, B4800, B9600, B19200, B38400,
+ B57600, B115200, B230400, B460800, B76800, B153600,
+ B307200, B614400, B921600
+};
+#endif
+
+static int n_baud_table = ARRAY_SIZE(baud_table);
+
+/**
+ * tty_termios_baud_rate
+ * @termios: termios structure
+ *
+ * Convert termios baud rate data into a speed. This should be called
+ * with the termios lock held if this termios is a terminal termios
+ * structure. May change the termios data. Device drivers can call this
+ * function but should use ->c_[io]speed directly as they are updated.
+ *
+ * Locking: none
+ */
+
+speed_t tty_termios_baud_rate(struct ktermios *termios)
+{
+ unsigned int cbaud;
+
+ cbaud = termios->c_cflag & CBAUD;
+
+#ifdef BOTHER
+ /* Magic token for arbitrary speed via c_ispeed/c_ospeed */
+ if (cbaud == BOTHER)
+ return termios->c_ospeed;
+#endif
+ if (cbaud & CBAUDEX) {
+ cbaud &= ~CBAUDEX;
+
+ if (cbaud < 1 || cbaud + 15 > n_baud_table)
+ termios->c_cflag &= ~CBAUDEX;
+ else
+ cbaud += 15;
+ }
+ return baud_table[cbaud];
+}
+EXPORT_SYMBOL(tty_termios_baud_rate);
+
+/**
+ * tty_termios_input_baud_rate
+ * @termios: termios structure
+ *
+ * Convert termios baud rate data into a speed. This should be called
+ * with the termios lock held if this termios is a terminal termios
+ * structure. May change the termios data. Device drivers can call this
+ * function but should use ->c_[io]speed directly as they are updated.
+ *
+ * Locking: none
+ */
+
+speed_t tty_termios_input_baud_rate(struct ktermios *termios)
+{
+#ifdef IBSHIFT
+ unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
+
+ if (cbaud == B0)
+ return tty_termios_baud_rate(termios);
+
+ /* Magic token for arbitrary speed via c_ispeed*/
+ if (cbaud == BOTHER)
+ return termios->c_ispeed;
+
+ if (cbaud & CBAUDEX) {
+ cbaud &= ~CBAUDEX;
+
+ if (cbaud < 1 || cbaud + 15 > n_baud_table)
+ termios->c_cflag &= ~(CBAUDEX << IBSHIFT);
+ else
+ cbaud += 15;
+ }
+ return baud_table[cbaud];
+#else
+ return tty_termios_baud_rate(termios);
+#endif
+}
+EXPORT_SYMBOL(tty_termios_input_baud_rate);
+
+/**
+ * tty_termios_encode_baud_rate
+ * @termios: ktermios structure holding user requested state
+ * @ispeed: input speed
+ * @ospeed: output speed
+ *
+ * Encode the speeds set into the passed termios structure. This is
+ * used as a library helper for drivers so that they can report back
+ * the actual speed selected when it differs from the speed requested
+ *
+ * For maximal back compatibility with legacy SYS5/POSIX *nix behaviour
+ * we need to carefully set the bits when the user does not get the
+ * desired speed. We allow small margins and preserve as much of possible
+ * of the input intent to keep compatibility.
+ *
+ * Locking: Caller should hold termios lock. This is already held
+ * when calling this function from the driver termios handler.
+ *
+ * The ifdefs deal with platforms whose owners have yet to update them
+ * and will all go away once this is done.
+ */
+
+void tty_termios_encode_baud_rate(struct ktermios *termios,
+ speed_t ibaud, speed_t obaud)
+{
+ int i = 0;
+ int ifound = -1, ofound = -1;
+ int iclose = ibaud/50, oclose = obaud/50;
+ int ibinput = 0;
+
+ if (obaud == 0) /* CD dropped */
+ ibaud = 0; /* Clear ibaud to be sure */
+
+ termios->c_ispeed = ibaud;
+ termios->c_ospeed = obaud;
+
+#ifdef BOTHER
+ /* If the user asked for a precise weird speed give a precise weird
+ answer. If they asked for a Bfoo speed they may have problems
+ digesting non-exact replies so fuzz a bit */
+
+ if ((termios->c_cflag & CBAUD) == BOTHER)
+ oclose = 0;
+ if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
+ iclose = 0;
+ if ((termios->c_cflag >> IBSHIFT) & CBAUD)
+ ibinput = 1; /* An input speed was specified */
+#endif
+ termios->c_cflag &= ~CBAUD;
+
+ /*
+ * Our goal is to find a close match to the standard baud rate
+ * returned. Walk the baud rate table and if we get a very close
+ * match then report back the speed as a POSIX Bxxxx value by
+ * preference
+ */
+
+ do {
+ if (obaud - oclose <= baud_table[i] &&
+ obaud + oclose >= baud_table[i]) {
+ termios->c_cflag |= baud_bits[i];
+ ofound = i;
+ }
+ if (ibaud - iclose <= baud_table[i] &&
+ ibaud + iclose >= baud_table[i]) {
+ /* For the case input == output don't set IBAUD bits
+ if the user didn't do so */
+ if (ofound == i && !ibinput)
+ ifound = i;
+#ifdef IBSHIFT
+ else {
+ ifound = i;
+ termios->c_cflag |= (baud_bits[i] << IBSHIFT);
+ }
+#endif
+ }
+ } while (++i < n_baud_table);
+
+ /*
+ * If we found no match then use BOTHER if provided or warn
+ * the user their platform maintainer needs to wake up if not.
+ */
+#ifdef BOTHER
+ if (ofound == -1)
+ termios->c_cflag |= BOTHER;
+ /* Set exact input bits only if the input and output differ or the
+ user already did */
+ if (ifound == -1 && (ibaud != obaud || ibinput))
+ termios->c_cflag |= (BOTHER << IBSHIFT);
+#else
+ if (ifound == -1 || ofound == -1)
+ pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n");
+#endif
+}
+EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
+
+/**
+ * tty_encode_baud_rate - set baud rate of the tty
+ * @ibaud: input baud rate
+ * @obad: output baud rate
+ *
+ * Update the current termios data for the tty with the new speed
+ * settings. The caller must hold the termios_rwsem for the tty in
+ * question.
+ */
+
+void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud)
+{
+ tty_termios_encode_baud_rate(&tty->termios, ibaud, obaud);
+}
+EXPORT_SYMBOL_GPL(tty_encode_baud_rate);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 309d25065bb6..0c150b5a9dd6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -377,65 +377,6 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
EXPORT_SYMBOL_GPL(tty_find_polling_driver);
#endif
-static int is_ignored(int sig)
-{
- return (sigismember(&current->blocked, sig) ||
- current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
-}
-
-/**
- * tty_check_change - check for POSIX terminal changes
- * @tty: tty to check
- *
- * If we try to write to, or set the state of, a terminal and we're
- * not in the foreground, send a SIGTTOU. If the signal is blocked or
- * ignored, go ahead and perform the operation. (POSIX 7.2)
- *
- * Locking: ctrl_lock
- */
-
-int __tty_check_change(struct tty_struct *tty, int sig)
-{
- unsigned long flags;
- struct pid *pgrp, *tty_pgrp;
- int ret = 0;
-
- if (current->signal->tty != tty)
- return 0;
-
- rcu_read_lock();
- pgrp = task_pgrp(current);
-
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- tty_pgrp = tty->pgrp;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-
- if (tty_pgrp && pgrp != tty->pgrp) {
- if (is_ignored(sig)) {
- if (sig == SIGTTIN)
- ret = -EIO;
- } else if (is_current_pgrp_orphaned())
- ret = -EIO;
- else {
- kill_pgrp(pgrp, sig, 1);
- set_thread_flag(TIF_SIGPENDING);
- ret = -ERESTARTSYS;
- }
- }
- rcu_read_unlock();
-
- if (!tty_pgrp)
- tty_warn(tty, "sig=%d, tty->pgrp == NULL!\n", sig);
-
- return ret;
-}
-
-int tty_check_change(struct tty_struct *tty)
-{
- return __tty_check_change(tty, SIGTTOU);
-}
-EXPORT_SYMBOL(tty_check_change);
-
static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -509,79 +450,6 @@ static const struct file_operations hung_up_tty_fops = {
static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
-
-void proc_clear_tty(struct task_struct *p)
-{
- unsigned long flags;
- struct tty_struct *tty;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- tty = p->signal->tty;
- p->signal->tty = NULL;
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- tty_kref_put(tty);
-}
-
-/**
- * proc_set_tty - set the controlling terminal
- *
- * Only callable by the session leader and only if it does not already have
- * a controlling terminal.
- *
- * Caller must hold: tty_lock()
- * a readlock on tasklist_lock
- * sighand lock
- */
-static void __proc_set_tty(struct tty_struct *tty)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- /*
- * The session and fg pgrp references will be non-NULL if
- * tiocsctty() is stealing the controlling tty
- */
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->pgrp = get_pid(task_pgrp(current));
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- tty->session = get_pid(task_session(current));
- if (current->signal->tty) {
- tty_debug(tty, "current tty %s not NULL!!\n",
- current->signal->tty->name);
- tty_kref_put(current->signal->tty);
- }
- put_pid(current->signal->tty_old_pgrp);
- current->signal->tty = tty_kref_get(tty);
- current->signal->tty_old_pgrp = NULL;
-}
-
-static void proc_set_tty(struct tty_struct *tty)
-{
- spin_lock_irq(&current->sighand->siglock);
- __proc_set_tty(tty);
- spin_unlock_irq(&current->sighand->siglock);
-}
-
-struct tty_struct *get_current_tty(void)
-{
- struct tty_struct *tty;
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- tty = tty_kref_get(current->signal->tty);
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- return tty;
-}
-EXPORT_SYMBOL_GPL(get_current_tty);
-
-static void session_clear_tty(struct pid *session)
-{
- struct task_struct *p;
- do_each_pid_task(session, PIDTYPE_SID, p) {
- proc_clear_tty(p);
- } while_each_pid_task(session, PIDTYPE_SID, p);
-}
-
/**
* tty_wakeup - request more data
* @tty: terminal
@@ -609,60 +477,6 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_signal_session_leader - sends SIGHUP to session leader
- * @tty controlling tty
- * @exit_session if non-zero, signal all foreground group processes
- *
- * Send SIGHUP and SIGCONT to the session leader and its process group.
- * Optionally, signal all processes in the foreground process group.
- *
- * Returns the number of processes in the session with this tty
- * as their controlling terminal. This value is used to drop
- * tty references for those processes.
- */
-static int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
-{
- struct task_struct *p;
- int refs = 0;
- struct pid *tty_pgrp = NULL;
-
- read_lock(&tasklist_lock);
- if (tty->session) {
- do_each_pid_task(tty->session, PIDTYPE_SID, p) {
- spin_lock_irq(&p->sighand->siglock);
- if (p->signal->tty == tty) {
- p->signal->tty = NULL;
- /* We defer the dereferences outside fo
- the tasklist lock */
- refs++;
- }
- if (!p->signal->leader) {
- spin_unlock_irq(&p->sighand->siglock);
- continue;
- }
- __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
- __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
- put_pid(p->signal->tty_old_pgrp); /* A noop */
- spin_lock(&tty->ctrl_lock);
- tty_pgrp = get_pid(tty->pgrp);
- if (tty->pgrp)
- p->signal->tty_old_pgrp = get_pid(tty->pgrp);
- spin_unlock(&tty->ctrl_lock);
- spin_unlock_irq(&p->sighand->siglock);
- } while_each_pid_task(tty->session, PIDTYPE_SID, p);
- }
- read_unlock(&tasklist_lock);
-
- if (tty_pgrp) {
- if (exit_session)
- kill_pgrp(tty_pgrp, SIGHUP, exit_session);
- put_pid(tty_pgrp);
- }
-
- return refs;
-}
-
-/**
* __tty_hangup - actual handler for hangup events
* @work: tty device
*
@@ -840,7 +654,7 @@ void tty_vhangup_self(void)
* is complete. That guarantee is necessary for security reasons.
*/
-static void tty_vhangup_session(struct tty_struct *tty)
+void tty_vhangup_session(struct tty_struct *tty)
{
tty_debug_hangup(tty, "session hangup\n");
__tty_hangup(tty, 1);
@@ -862,106 +676,6 @@ int tty_hung_up_p(struct file *filp)
EXPORT_SYMBOL(tty_hung_up_p);
/**
- * disassociate_ctty - disconnect controlling tty
- * @on_exit: true if exiting so need to "hang up" the session
- *
- * This function is typically called only by the session leader, when
- * it wants to disassociate itself from its controlling tty.
- *
- * It performs the following functions:
- * (1) Sends a SIGHUP and SIGCONT to the foreground process group
- * (2) Clears the tty from being controlling the session
- * (3) Clears the controlling tty for all processes in the
- * session group.
- *
- * The argument on_exit is set to 1 if called when a process is
- * exiting; it is 0 if called by the ioctl TIOCNOTTY.
- *
- * Locking:
- * BTM is taken for hysterical raisins, and held when
- * called from no_tty().
- * tty_mutex is taken to protect tty
- * ->siglock is taken to protect ->signal/->sighand
- * tasklist_lock is taken to walk process list for sessions
- * ->siglock is taken to protect ->signal/->sighand
- */
-
-void disassociate_ctty(int on_exit)
-{
- struct tty_struct *tty;
-
- if (!current->signal->leader)
- return;
-
- tty = get_current_tty();
- if (tty) {
- if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) {
- tty_vhangup_session(tty);
- } else {
- struct pid *tty_pgrp = tty_get_pgrp(tty);
- if (tty_pgrp) {
- kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
- put_pid(tty_pgrp);
- }
- }
- tty_kref_put(tty);
-
- } else if (on_exit) {
- struct pid *old_pgrp;
- spin_lock_irq(&current->sighand->siglock);
- old_pgrp = current->signal->tty_old_pgrp;
- current->signal->tty_old_pgrp = NULL;
- spin_unlock_irq(&current->sighand->siglock);
- if (old_pgrp) {
- kill_pgrp(old_pgrp, SIGHUP, on_exit);
- kill_pgrp(old_pgrp, SIGCONT, on_exit);
- put_pid(old_pgrp);
- }
- return;
- }
-
- spin_lock_irq(&current->sighand->siglock);
- put_pid(current->signal->tty_old_pgrp);
- current->signal->tty_old_pgrp = NULL;
-
- tty = tty_kref_get(current->signal->tty);
- if (tty) {
- unsigned long flags;
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- put_pid(tty->session);
- put_pid(tty->pgrp);
- tty->session = NULL;
- tty->pgrp = NULL;
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- tty_kref_put(tty);
- } else
- tty_debug_hangup(tty, "no current tty\n");
-
- spin_unlock_irq(&current->sighand->siglock);
- /* Now clear signal->tty under the lock */
- read_lock(&tasklist_lock);
- session_clear_tty(task_session(current));
- read_unlock(&tasklist_lock);
-}
-
-/**
- *
- * no_tty - Ensure the current process does not have a controlling tty
- */
-void no_tty(void)
-{
- /* FIXME: Review locking here. The tty_lock never covered any race
- between a new association and proc_clear_tty but possible we need
- to protect against this anyway */
- struct task_struct *tsk = current;
- disassociate_ctty(0);
- proc_clear_tty(tsk);
-}
-
-
-/**
* stop_tty - propagate flow control
* @tty: tty to stop
*
@@ -1520,7 +1234,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
* This code guarantees that either everything succeeds and the
* TTY is ready for operation, or else the table slots are vacated
* and the allocated memory released. (Except that the termios
- * and locked termios may be retained.)
+ * may be retained.)
*/
if (!try_module_get(driver->owner))
@@ -2163,38 +1877,13 @@ retry_open:
}
clear_bit(TTY_HUPPED, &tty->flags);
-
- read_lock(&tasklist_lock);
- spin_lock_irq(&current->sighand->siglock);
noctty = (filp->f_flags & O_NOCTTY) ||
- (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
- device == MKDEV(TTYAUX_MAJOR, 1) ||
- (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER);
-
- if (!noctty &&
- current->signal->leader &&
- !current->signal->tty &&
- tty->session == NULL) {
- /*
- * Don't let a process that only has write access to the tty
- * obtain the privileges associated with having a tty as
- * controlling terminal (being able to reopen it with full
- * access through /dev/tty, being able to perform pushback).
- * Many distributions set the group of all ttys to "tty" and
- * grant write-only access to all terminals for setgid tty
- * binaries, which should not imply full privileges on all ttys.
- *
- * This could theoretically break old code that performs open()
- * on a write-only file descriptor. In that case, it might be
- * necessary to also permit this if
- * inode_permission(inode, MAY_READ) == 0.
- */
- if (filp->f_mode & FMODE_READ)
- __proc_set_tty(tty);
- }
- spin_unlock_irq(&current->sighand->siglock);
- read_unlock(&tasklist_lock);
+ (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
+ device == MKDEV(TTYAUX_MAJOR, 1) ||
+ (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
+ if (!noctty)
+ tty_open_proc_set_tty(filp, tty);
tty_unlock(tty);
return 0;
}
@@ -2457,211 +2146,6 @@ static int fionbio(struct file *file, int __user *p)
}
/**
- * tiocsctty - set controlling tty
- * @tty: tty structure
- * @arg: user argument
- *
- * This ioctl is used to manage job control. It permits a session
- * leader to set this tty as the controlling tty for the session.
- *
- * Locking:
- * Takes tty_lock() to serialize proc_set_tty() for this tty
- * Takes tasklist_lock internally to walk sessions
- * Takes ->siglock() when updating signal->tty
- */
-
-static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
-{
- int ret = 0;
-
- tty_lock(tty);
- read_lock(&tasklist_lock);
-
- if (current->signal->leader && (task_session(current) == tty->session))
- goto unlock;
-
- /*
- * The process must be a session leader and
- * not have a controlling tty already.
- */
- if (!current->signal->leader || current->signal->tty) {
- ret = -EPERM;
- goto unlock;
- }
-
- if (tty->session) {
- /*
- * This tty is already the controlling
- * tty for another session group!
- */
- if (arg == 1 && capable(CAP_SYS_ADMIN)) {
- /*
- * Steal it away
- */
- session_clear_tty(tty->session);
- } else {
- ret = -EPERM;
- goto unlock;
- }
- }
-
- /* See the comment in tty_open(). */
- if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto unlock;
- }
-
- proc_set_tty(tty);
-unlock:
- read_unlock(&tasklist_lock);
- tty_unlock(tty);
- return ret;
-}
-
-/**
- * tty_get_pgrp - return a ref counted pgrp pid
- * @tty: tty to read
- *
- * Returns a refcounted instance of the pid struct for the process
- * group controlling the tty.
- */
-
-struct pid *tty_get_pgrp(struct tty_struct *tty)
-{
- unsigned long flags;
- struct pid *pgrp;
-
- spin_lock_irqsave(&tty->ctrl_lock, flags);
- pgrp = get_pid(tty->pgrp);
- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-
- return pgrp;
-}
-EXPORT_SYMBOL_GPL(tty_get_pgrp);
-
-/*
- * This checks not only the pgrp, but falls back on the pid if no
- * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
- * without this...
- *
- * The caller must hold rcu lock or the tasklist lock.
- */
-static struct pid *session_of_pgrp(struct pid *pgrp)
-{
- struct task_struct *p;
- struct pid *sid = NULL;
-
- p = pid_task(pgrp, PIDTYPE_PGID);
- if (p == NULL)
- p = pid_task(pgrp, PIDTYPE_PID);
- if (p != NULL)
- sid = task_session(p);
-
- return sid;
-}
-
-/**
- * tiocgpgrp - get process group
- * @tty: tty passed by user
- * @real_tty: tty side of the tty passed by the user if a pty else the tty
- * @p: returned pid
- *
- * Obtain the process group of the tty. If there is no process group
- * return an error.
- *
- * Locking: none. Reference to current->signal->tty is safe.
- */
-
-static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
- struct pid *pid;
- int ret;
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->signal->tty != real_tty)
- return -ENOTTY;
- pid = tty_get_pgrp(real_tty);
- ret = put_user(pid_vnr(pid), p);
- put_pid(pid);
- return ret;
-}
-
-/**
- * tiocspgrp - attempt to set process group
- * @tty: tty passed by user
- * @real_tty: tty side device matching tty passed by user
- * @p: pid pointer
- *
- * Set the process group of the tty to the session passed. Only
- * permitted where the tty session is our session.
- *
- * Locking: RCU, ctrl lock
- */
-
-static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
- struct pid *pgrp;
- pid_t pgrp_nr;
- int retval = tty_check_change(real_tty);
-
- if (retval == -EIO)
- return -ENOTTY;
- if (retval)
- return retval;
- if (!current->signal->tty ||
- (current->signal->tty != real_tty) ||
- (real_tty->session != task_session(current)))
- return -ENOTTY;
- if (get_user(pgrp_nr, p))
- return -EFAULT;
- if (pgrp_nr < 0)
- return -EINVAL;
- rcu_read_lock();
- pgrp = find_vpid(pgrp_nr);
- retval = -ESRCH;
- if (!pgrp)
- goto out_unlock;
- retval = -EPERM;
- if (session_of_pgrp(pgrp) != task_session(current))
- goto out_unlock;
- retval = 0;
- spin_lock_irq(&tty->ctrl_lock);
- put_pid(real_tty->pgrp);
- real_tty->pgrp = get_pid(pgrp);
- spin_unlock_irq(&tty->ctrl_lock);
-out_unlock:
- rcu_read_unlock();
- return retval;
-}
-
-/**
- * tiocgsid - get session id
- * @tty: tty passed by user
- * @real_tty: tty side of the tty passed by the user if a pty else the tty
- * @p: pointer to returned session id
- *
- * Obtain the session id of the tty. If there is no session
- * return an error.
- *
- * Locking: none. Reference to current->signal->tty is safe.
- */
-
-static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
-{
- /*
- * (tty == real_tty) is a cheap way of
- * testing if the tty is NOT a master pty.
- */
- if (tty == real_tty && current->signal->tty != real_tty)
- return -ENOTTY;
- if (!real_tty->session)
- return -ENOTTY;
- return put_user(pid_vnr(real_tty->session), p);
-}
-
-/**
* tiocsetd - set line discipline
* @tty: tty device
* @p: pointer to user data
@@ -2843,8 +2327,8 @@ static void tty_warn_deprecated_flags(struct serial_struct __user *ss)
flags &= ASYNC_DEPRECATED;
if (flags && __ratelimit(&depr_flags))
- pr_warning("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
- __func__, get_task_comm(comm, current), flags);
+ pr_warn("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
+ __func__, get_task_comm(comm, current), flags);
}
/*
@@ -2920,19 +2404,6 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int excl = test_bit(TTY_EXCLUSIVE, &tty->flags);
return put_user(excl, (int __user *)p);
}
- case TIOCNOTTY:
- if (current->signal->tty != tty)
- return -ENOTTY;
- no_tty();
- return 0;
- case TIOCSCTTY:
- return tiocsctty(real_tty, file, arg);
- case TIOCGPGRP:
- return tiocgpgrp(tty, real_tty, p);
- case TIOCSPGRP:
- return tiocspgrp(tty, real_tty, p);
- case TIOCGSID:
- return tiocgsid(tty, real_tty, p);
case TIOCGETD:
return tiocgetd(tty, p);
case TIOCSETD:
@@ -2993,6 +2464,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case TIOCSSERIAL:
tty_warn_deprecated_flags(p);
break;
+ default:
+ retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
+ if (retval != -ENOIOCTLCMD)
+ return retval;
}
if (tty->ops->ioctl) {
retval = tty->ops->ioctl(tty, cmd, arg);
@@ -3293,9 +2768,9 @@ struct device *tty_register_device_attr(struct tty_driver *driver,
{
char name[64];
dev_t devt = MKDEV(driver->major, driver->minor_start) + index;
- struct device *dev = NULL;
- int retval = -ENODEV;
- bool cdev = false;
+ struct ktermios *tp;
+ struct device *dev;
+ int retval;
if (index >= driver->num) {
pr_err("%s: Attempt to register invalid tty line number (%d)\n",
@@ -3308,18 +2783,9 @@ struct device *tty_register_device_attr(struct tty_driver *driver,
else
tty_line_name(driver, index, name);
- if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
- retval = tty_cdev_add(driver, devt, index, 1);
- if (retval)
- goto error;
- cdev = true;
- }
-
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- retval = -ENOMEM;
- goto error;
- }
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
dev->devt = devt;
dev->class = tty_class;
@@ -3329,18 +2795,38 @@ struct device *tty_register_device_attr(struct tty_driver *driver,
dev->groups = attr_grp;
dev_set_drvdata(dev, drvdata);
+ dev_set_uevent_suppress(dev, 1);
+
retval = device_register(dev);
if (retval)
- goto error;
+ goto err_put;
+
+ if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+ /*
+ * Free any saved termios data so that the termios state is
+ * reset when reusing a minor number.
+ */
+ tp = driver->termios[index];
+ if (tp) {
+ driver->termios[index] = NULL;
+ kfree(tp);
+ }
+
+ retval = tty_cdev_add(driver, devt, index, 1);
+ if (retval)
+ goto err_del;
+ }
+
+ dev_set_uevent_suppress(dev, 0);
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
return dev;
-error:
+err_del:
+ device_del(dev);
+err_put:
put_device(dev);
- if (cdev) {
- cdev_del(driver->cdevs[index]);
- driver->cdevs[index] = NULL;
- }
+
return ERR_PTR(retval);
}
EXPORT_SYMBOL_GPL(tty_register_device_attr);
@@ -3441,11 +2927,6 @@ static void destruct_tty_driver(struct kref *kref)
struct ktermios *tp;
if (driver->flags & TTY_DRIVER_INSTALLED) {
- /*
- * Free the termios and termios_locked structures because
- * we don't want to get memory leaks when modular tty
- * drivers are removed from the kernel.
- */
for (i = 0; i < driver->num; i++) {
tp = driver->termios[i];
if (tp) {
@@ -3578,30 +3059,6 @@ void tty_default_fops(struct file_operations *fops)
*fops = tty_fops;
}
-/*
- * Initialize the console device. This is called *early*, so
- * we can't necessarily depend on lots of kernel help here.
- * Just do some early initializations, and do the complex setup
- * later.
- */
-void __init console_init(void)
-{
- initcall_t *call;
-
- /* Setup the default TTY line discipline. */
- n_tty_init();
-
- /*
- * set up the console device so that later boot sequences can
- * inform about problems etc..
- */
- call = __con_initcall_start;
- while (call < __con_initcall_end) {
- (*call)();
- call++;
- }
-}
-
static char *tty_devnode(struct device *dev, umode_t *mode)
{
if (!mode)
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index a9a978731c5b..efa96e6c4c1b 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -258,228 +258,6 @@ static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old)
/* FIXME: What should we do for i/ospeed */
}
-/*
- * Routine which returns the baud rate of the tty
- *
- * Note that the baud_table needs to be kept in sync with the
- * include/asm/termbits.h file.
- */
-static const speed_t baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800,
-#ifdef __sparc__
- 76800, 153600, 307200, 614400, 921600
-#else
- 500000, 576000, 921600, 1000000, 1152000, 1500000, 2000000,
- 2500000, 3000000, 3500000, 4000000
-#endif
-};
-
-#ifndef __sparc__
-static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B500000, B576000,
- B921600, B1000000, B1152000, B1500000, B2000000, B2500000,
- B3000000, B3500000, B4000000
-};
-#else
-static const tcflag_t baud_bits[] = {
- B0, B50, B75, B110, B134, B150, B200, B300, B600,
- B1200, B1800, B2400, B4800, B9600, B19200, B38400,
- B57600, B115200, B230400, B460800, B76800, B153600,
- B307200, B614400, B921600
-};
-#endif
-
-static int n_baud_table = ARRAY_SIZE(baud_table);
-
-/**
- * tty_termios_baud_rate
- * @termios: termios structure
- *
- * Convert termios baud rate data into a speed. This should be called
- * with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
- *
- * Locking: none
- */
-
-speed_t tty_termios_baud_rate(struct ktermios *termios)
-{
- unsigned int cbaud;
-
- cbaud = termios->c_cflag & CBAUD;
-
-#ifdef BOTHER
- /* Magic token for arbitrary speed via c_ispeed/c_ospeed */
- if (cbaud == BOTHER)
- return termios->c_ospeed;
-#endif
- if (cbaud & CBAUDEX) {
- cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~CBAUDEX;
- else
- cbaud += 15;
- }
- return baud_table[cbaud];
-}
-EXPORT_SYMBOL(tty_termios_baud_rate);
-
-/**
- * tty_termios_input_baud_rate
- * @termios: termios structure
- *
- * Convert termios baud rate data into a speed. This should be called
- * with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
- *
- * Locking: none
- */
-
-speed_t tty_termios_input_baud_rate(struct ktermios *termios)
-{
-#ifdef IBSHIFT
- unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
-
- if (cbaud == B0)
- return tty_termios_baud_rate(termios);
-
- /* Magic token for arbitrary speed via c_ispeed*/
- if (cbaud == BOTHER)
- return termios->c_ispeed;
-
- if (cbaud & CBAUDEX) {
- cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~(CBAUDEX << IBSHIFT);
- else
- cbaud += 15;
- }
- return baud_table[cbaud];
-#else
- return tty_termios_baud_rate(termios);
-#endif
-}
-EXPORT_SYMBOL(tty_termios_input_baud_rate);
-
-/**
- * tty_termios_encode_baud_rate
- * @termios: ktermios structure holding user requested state
- * @ispeed: input speed
- * @ospeed: output speed
- *
- * Encode the speeds set into the passed termios structure. This is
- * used as a library helper for drivers so that they can report back
- * the actual speed selected when it differs from the speed requested
- *
- * For maximal back compatibility with legacy SYS5/POSIX *nix behaviour
- * we need to carefully set the bits when the user does not get the
- * desired speed. We allow small margins and preserve as much of possible
- * of the input intent to keep compatibility.
- *
- * Locking: Caller should hold termios lock. This is already held
- * when calling this function from the driver termios handler.
- *
- * The ifdefs deal with platforms whose owners have yet to update them
- * and will all go away once this is done.
- */
-
-void tty_termios_encode_baud_rate(struct ktermios *termios,
- speed_t ibaud, speed_t obaud)
-{
- int i = 0;
- int ifound = -1, ofound = -1;
- int iclose = ibaud/50, oclose = obaud/50;
- int ibinput = 0;
-
- if (obaud == 0) /* CD dropped */
- ibaud = 0; /* Clear ibaud to be sure */
-
- termios->c_ispeed = ibaud;
- termios->c_ospeed = obaud;
-
-#ifdef BOTHER
- /* If the user asked for a precise weird speed give a precise weird
- answer. If they asked for a Bfoo speed they may have problems
- digesting non-exact replies so fuzz a bit */
-
- if ((termios->c_cflag & CBAUD) == BOTHER)
- oclose = 0;
- if (((termios->c_cflag >> IBSHIFT) & CBAUD) == BOTHER)
- iclose = 0;
- if ((termios->c_cflag >> IBSHIFT) & CBAUD)
- ibinput = 1; /* An input speed was specified */
-#endif
- termios->c_cflag &= ~CBAUD;
-
- /*
- * Our goal is to find a close match to the standard baud rate
- * returned. Walk the baud rate table and if we get a very close
- * match then report back the speed as a POSIX Bxxxx value by
- * preference
- */
-
- do {
- if (obaud - oclose <= baud_table[i] &&
- obaud + oclose >= baud_table[i]) {
- termios->c_cflag |= baud_bits[i];
- ofound = i;
- }
- if (ibaud - iclose <= baud_table[i] &&
- ibaud + iclose >= baud_table[i]) {
- /* For the case input == output don't set IBAUD bits
- if the user didn't do so */
- if (ofound == i && !ibinput)
- ifound = i;
-#ifdef IBSHIFT
- else {
- ifound = i;
- termios->c_cflag |= (baud_bits[i] << IBSHIFT);
- }
-#endif
- }
- } while (++i < n_baud_table);
-
- /*
- * If we found no match then use BOTHER if provided or warn
- * the user their platform maintainer needs to wake up if not.
- */
-#ifdef BOTHER
- if (ofound == -1)
- termios->c_cflag |= BOTHER;
- /* Set exact input bits only if the input and output differ or the
- user already did */
- if (ifound == -1 && (ibaud != obaud || ibinput))
- termios->c_cflag |= (BOTHER << IBSHIFT);
-#else
- if (ifound == -1 || ofound == -1)
- pr_warn_once("tty: Unable to return correct speed data as your architecture needs updating.\n");
-#endif
-}
-EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
-
-/**
- * tty_encode_baud_rate - set baud rate of the tty
- * @ibaud: input baud rate
- * @obad: output baud rate
- *
- * Update the current termios data for the tty with the new speed
- * settings. The caller must hold the termios_rwsem for the tty in
- * question.
- */
-
-void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud)
-{
- tty_termios_encode_baud_rate(&tty->termios, ibaud, obaud);
-}
-EXPORT_SYMBOL_GPL(tty_encode_baud_rate);
-
/**
* tty_termios_copy_hw - copy hardware settings
* @new: New termios
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
new file mode 100644
index 000000000000..e7032309ee87
--- /dev/null
+++ b/drivers/tty/tty_jobctrl.c
@@ -0,0 +1,554 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/tty.h>
+#include <linux/fcntl.h>
+#include <linux/uaccess.h>
+
+static int is_ignored(int sig)
+{
+ return (sigismember(&current->blocked, sig) ||
+ current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
+}
+
+/**
+ * tty_check_change - check for POSIX terminal changes
+ * @tty: tty to check
+ *
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU. If the signal is blocked or
+ * ignored, go ahead and perform the operation. (POSIX 7.2)
+ *
+ * Locking: ctrl_lock
+ */
+int __tty_check_change(struct tty_struct *tty, int sig)
+{
+ unsigned long flags;
+ struct pid *pgrp, *tty_pgrp;
+ int ret = 0;
+
+ if (current->signal->tty != tty)
+ return 0;
+
+ rcu_read_lock();
+ pgrp = task_pgrp(current);
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ tty_pgrp = tty->pgrp;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ if (tty_pgrp && pgrp != tty->pgrp) {
+ if (is_ignored(sig)) {
+ if (sig == SIGTTIN)
+ ret = -EIO;
+ } else if (is_current_pgrp_orphaned())
+ ret = -EIO;
+ else {
+ kill_pgrp(pgrp, sig, 1);
+ set_thread_flag(TIF_SIGPENDING);
+ ret = -ERESTARTSYS;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!tty_pgrp)
+ tty_warn(tty, "sig=%d, tty->pgrp == NULL!\n", sig);
+
+ return ret;
+}
+
+int tty_check_change(struct tty_struct *tty)
+{
+ return __tty_check_change(tty, SIGTTOU);
+}
+EXPORT_SYMBOL(tty_check_change);
+
+void proc_clear_tty(struct task_struct *p)
+{
+ unsigned long flags;
+ struct tty_struct *tty;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ tty = p->signal->tty;
+ p->signal->tty = NULL;
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ tty_kref_put(tty);
+}
+
+/**
+ * proc_set_tty - set the controlling terminal
+ *
+ * Only callable by the session leader and only if it does not already have
+ * a controlling terminal.
+ *
+ * Caller must hold: tty_lock()
+ * a readlock on tasklist_lock
+ * sighand lock
+ */
+static void __proc_set_tty(struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ /*
+ * The session and fg pgrp references will be non-NULL if
+ * tiocsctty() is stealing the controlling tty
+ */
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->pgrp = get_pid(task_pgrp(current));
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty->session = get_pid(task_session(current));
+ if (current->signal->tty) {
+ tty_debug(tty, "current tty %s not NULL!!\n",
+ current->signal->tty->name);
+ tty_kref_put(current->signal->tty);
+ }
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty = tty_kref_get(tty);
+ current->signal->tty_old_pgrp = NULL;
+}
+
+static void proc_set_tty(struct tty_struct *tty)
+{
+ spin_lock_irq(&current->sighand->siglock);
+ __proc_set_tty(tty);
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Called by tty_open() to set the controlling tty if applicable.
+ */
+void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty)
+{
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&current->sighand->siglock);
+ if (current->signal->leader &&
+ !current->signal->tty &&
+ tty->session == NULL) {
+ /*
+ * Don't let a process that only has write access to the tty
+ * obtain the privileges associated with having a tty as
+ * controlling terminal (being able to reopen it with full
+ * access through /dev/tty, being able to perform pushback).
+ * Many distributions set the group of all ttys to "tty" and
+ * grant write-only access to all terminals for setgid tty
+ * binaries, which should not imply full privileges on all ttys.
+ *
+ * This could theoretically break old code that performs open()
+ * on a write-only file descriptor. In that case, it might be
+ * necessary to also permit this if
+ * inode_permission(inode, MAY_READ) == 0.
+ */
+ if (filp->f_mode & FMODE_READ)
+ __proc_set_tty(tty);
+ }
+ spin_unlock_irq(&current->sighand->siglock);
+ read_unlock(&tasklist_lock);
+}
+
+struct tty_struct *get_current_tty(void)
+{
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ return tty;
+}
+EXPORT_SYMBOL_GPL(get_current_tty);
+
+/*
+ * Called from tty_release().
+ */
+void session_clear_tty(struct pid *session)
+{
+ struct task_struct *p;
+ do_each_pid_task(session, PIDTYPE_SID, p) {
+ proc_clear_tty(p);
+ } while_each_pid_task(session, PIDTYPE_SID, p);
+}
+
+/**
+ * tty_signal_session_leader - sends SIGHUP to session leader
+ * @tty controlling tty
+ * @exit_session if non-zero, signal all foreground group processes
+ *
+ * Send SIGHUP and SIGCONT to the session leader and its process group.
+ * Optionally, signal all processes in the foreground process group.
+ *
+ * Returns the number of processes in the session with this tty
+ * as their controlling terminal. This value is used to drop
+ * tty references for those processes.
+ */
+int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
+{
+ struct task_struct *p;
+ int refs = 0;
+ struct pid *tty_pgrp = NULL;
+
+ read_lock(&tasklist_lock);
+ if (tty->session) {
+ do_each_pid_task(tty->session, PIDTYPE_SID, p) {
+ spin_lock_irq(&p->sighand->siglock);
+ if (p->signal->tty == tty) {
+ p->signal->tty = NULL;
+ /* We defer the dereferences outside fo
+ the tasklist lock */
+ refs++;
+ }
+ if (!p->signal->leader) {
+ spin_unlock_irq(&p->sighand->siglock);
+ continue;
+ }
+ __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
+ __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
+ put_pid(p->signal->tty_old_pgrp); /* A noop */
+ spin_lock(&tty->ctrl_lock);
+ tty_pgrp = get_pid(tty->pgrp);
+ if (tty->pgrp)
+ p->signal->tty_old_pgrp = get_pid(tty->pgrp);
+ spin_unlock(&tty->ctrl_lock);
+ spin_unlock_irq(&p->sighand->siglock);
+ } while_each_pid_task(tty->session, PIDTYPE_SID, p);
+ }
+ read_unlock(&tasklist_lock);
+
+ if (tty_pgrp) {
+ if (exit_session)
+ kill_pgrp(tty_pgrp, SIGHUP, exit_session);
+ put_pid(tty_pgrp);
+ }
+
+ return refs;
+}
+
+/**
+ * disassociate_ctty - disconnect controlling tty
+ * @on_exit: true if exiting so need to "hang up" the session
+ *
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
+ *
+ * It performs the following functions:
+ * (1) Sends a SIGHUP and SIGCONT to the foreground process group
+ * (2) Clears the tty from being controlling the session
+ * (3) Clears the controlling tty for all processes in the
+ * session group.
+ *
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ *
+ * Locking:
+ * BTM is taken for hysterical raisons, and held when
+ * called from no_tty().
+ * tty_mutex is taken to protect tty
+ * ->siglock is taken to protect ->signal/->sighand
+ * tasklist_lock is taken to walk process list for sessions
+ * ->siglock is taken to protect ->signal/->sighand
+ */
+void disassociate_ctty(int on_exit)
+{
+ struct tty_struct *tty;
+
+ if (!current->signal->leader)
+ return;
+
+ tty = get_current_tty();
+ if (tty) {
+ if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) {
+ tty_vhangup_session(tty);
+ } else {
+ struct pid *tty_pgrp = tty_get_pgrp(tty);
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
+ }
+ tty_kref_put(tty);
+
+ } else if (on_exit) {
+ struct pid *old_pgrp;
+ spin_lock_irq(&current->sighand->siglock);
+ old_pgrp = current->signal->tty_old_pgrp;
+ current->signal->tty_old_pgrp = NULL;
+ spin_unlock_irq(&current->sighand->siglock);
+ if (old_pgrp) {
+ kill_pgrp(old_pgrp, SIGHUP, on_exit);
+ kill_pgrp(old_pgrp, SIGCONT, on_exit);
+ put_pid(old_pgrp);
+ }
+ return;
+ }
+
+ spin_lock_irq(&current->sighand->siglock);
+ put_pid(current->signal->tty_old_pgrp);
+ current->signal->tty_old_pgrp = NULL;
+
+ tty = tty_kref_get(current->signal->tty);
+ if (tty) {
+ unsigned long flags;
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ put_pid(tty->session);
+ put_pid(tty->pgrp);
+ tty->session = NULL;
+ tty->pgrp = NULL;
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ tty_kref_put(tty);
+ }
+
+ spin_unlock_irq(&current->sighand->siglock);
+ /* Now clear signal->tty under the lock */
+ read_lock(&tasklist_lock);
+ session_clear_tty(task_session(current));
+ read_unlock(&tasklist_lock);
+}
+
+/**
+ *
+ * no_tty - Ensure the current process does not have a controlling tty
+ */
+void no_tty(void)
+{
+ /* FIXME: Review locking here. The tty_lock never covered any race
+ between a new association and proc_clear_tty but possible we need
+ to protect against this anyway */
+ struct task_struct *tsk = current;
+ disassociate_ctty(0);
+ proc_clear_tty(tsk);
+}
+
+/**
+ * tiocsctty - set controlling tty
+ * @tty: tty structure
+ * @arg: user argument
+ *
+ * This ioctl is used to manage job control. It permits a session
+ * leader to set this tty as the controlling tty for the session.
+ *
+ * Locking:
+ * Takes tty_lock() to serialize proc_set_tty() for this tty
+ * Takes tasklist_lock internally to walk sessions
+ * Takes ->siglock() when updating signal->tty
+ */
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
+{
+ int ret = 0;
+
+ tty_lock(tty);
+ read_lock(&tasklist_lock);
+
+ if (current->signal->leader && (task_session(current) == tty->session))
+ goto unlock;
+
+ /*
+ * The process must be a session leader and
+ * not have a controlling tty already.
+ */
+ if (!current->signal->leader || current->signal->tty) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
+ if (tty->session) {
+ /*
+ * This tty is already the controlling
+ * tty for another session group!
+ */
+ if (arg == 1 && capable(CAP_SYS_ADMIN)) {
+ /*
+ * Steal it away
+ */
+ session_clear_tty(tty->session);
+ } else {
+ ret = -EPERM;
+ goto unlock;
+ }
+ }
+
+ /* See the comment in tty_open_proc_set_tty(). */
+ if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
+ proc_set_tty(tty);
+unlock:
+ read_unlock(&tasklist_lock);
+ tty_unlock(tty);
+ return ret;
+}
+
+/**
+ * tty_get_pgrp - return a ref counted pgrp pid
+ * @tty: tty to read
+ *
+ * Returns a refcounted instance of the pid struct for the process
+ * group controlling the tty.
+ */
+struct pid *tty_get_pgrp(struct tty_struct *tty)
+{
+ unsigned long flags;
+ struct pid *pgrp;
+
+ spin_lock_irqsave(&tty->ctrl_lock, flags);
+ pgrp = get_pid(tty->pgrp);
+ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+ return pgrp;
+}
+EXPORT_SYMBOL_GPL(tty_get_pgrp);
+
+/*
+ * This checks not only the pgrp, but falls back on the pid if no
+ * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
+ * without this...
+ *
+ * The caller must hold rcu lock or the tasklist lock.
+ */
+static struct pid *session_of_pgrp(struct pid *pgrp)
+{
+ struct task_struct *p;
+ struct pid *sid = NULL;
+
+ p = pid_task(pgrp, PIDTYPE_PGID);
+ if (p == NULL)
+ p = pid_task(pgrp, PIDTYPE_PID);
+ if (p != NULL)
+ sid = task_session(p);
+
+ return sid;
+}
+
+/**
+ * tiocgpgrp - get process group
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty passed by the user if a pty else the tty
+ * @p: returned pid
+ *
+ * Obtain the process group of the tty. If there is no process group
+ * return an error.
+ *
+ * Locking: none. Reference to current->signal->tty is safe.
+ */
+static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ struct pid *pid;
+ int ret;
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ pid = tty_get_pgrp(real_tty);
+ ret = put_user(pid_vnr(pid), p);
+ put_pid(pid);
+ return ret;
+}
+
+/**
+ * tiocspgrp - attempt to set process group
+ * @tty: tty passed by user
+ * @real_tty: tty side device matching tty passed by user
+ * @p: pid pointer
+ *
+ * Set the process group of the tty to the session passed. Only
+ * permitted where the tty session is our session.
+ *
+ * Locking: RCU, ctrl lock
+ */
+static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ struct pid *pgrp;
+ pid_t pgrp_nr;
+ int retval = tty_check_change(real_tty);
+
+ if (retval == -EIO)
+ return -ENOTTY;
+ if (retval)
+ return retval;
+ if (!current->signal->tty ||
+ (current->signal->tty != real_tty) ||
+ (real_tty->session != task_session(current)))
+ return -ENOTTY;
+ if (get_user(pgrp_nr, p))
+ return -EFAULT;
+ if (pgrp_nr < 0)
+ return -EINVAL;
+ rcu_read_lock();
+ pgrp = find_vpid(pgrp_nr);
+ retval = -ESRCH;
+ if (!pgrp)
+ goto out_unlock;
+ retval = -EPERM;
+ if (session_of_pgrp(pgrp) != task_session(current))
+ goto out_unlock;
+ retval = 0;
+ spin_lock_irq(&tty->ctrl_lock);
+ put_pid(real_tty->pgrp);
+ real_tty->pgrp = get_pid(pgrp);
+ spin_unlock_irq(&tty->ctrl_lock);
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+/**
+ * tiocgsid - get session id
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty passed by the user if a pty else the tty
+ * @p: pointer to returned session id
+ *
+ * Obtain the session id of the tty. If there is no session
+ * return an error.
+ *
+ * Locking: none. Reference to current->signal->tty is safe.
+ */
+static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
+{
+ /*
+ * (tty == real_tty) is a cheap way of
+ * testing if the tty is NOT a master pty.
+ */
+ if (tty == real_tty && current->signal->tty != real_tty)
+ return -ENOTTY;
+ if (!real_tty->session)
+ return -ENOTTY;
+ return put_user(pid_vnr(real_tty->session), p);
+}
+
+/*
+ * Called from tty_ioctl(). If tty is a pty then real_tty is the slave side,
+ * if not then tty == real_tty.
+ */
+long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+
+ switch (cmd) {
+ case TIOCNOTTY:
+ if (current->signal->tty != tty)
+ return -ENOTTY;
+ no_tty();
+ return 0;
+ case TIOCSCTTY:
+ return tiocsctty(real_tty, file, arg);
+ case TIOCGPGRP:
+ return tiocgpgrp(tty, real_tty, p);
+ case TIOCSPGRP:
+ return tiocspgrp(tty, real_tty, p);
+ case TIOCGSID:
+ return tiocgsid(tty, real_tty, p);
+ }
+ return -ENOIOCTLCMD;
+}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 36e1b8c7680f..accbd1257bc4 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -80,21 +80,17 @@ void clear_selection(void)
/*
* User settable table: what characters are to be considered alphabetic?
- * 256 bits. Locked by the console lock.
+ * 128 bits. Locked by the console lock.
*/
-static u32 inwordLut[8]={
+static u32 inwordLut[]={
0x00000000, /* control chars */
- 0x03FF0000, /* digits */
+ 0x03FFE000, /* digits and "-./" */
0x87FFFFFE, /* uppercase and '_' */
0x07FFFFFE, /* lowercase */
- 0x00000000,
- 0x00000000,
- 0xFF7FFFFF, /* latin-1 accented letters, not multiplication sign */
- 0xFF7FFFFF /* latin-1 accented letters, not division sign */
};
static inline int inword(const u16 c) {
- return c > 0xff || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
+ return c > 0x7f || (( inwordLut[c>>5] >> (c & 0x1F) ) & 1);
}
/**
@@ -106,10 +102,10 @@ static inline int inword(const u16 c) {
*/
int sel_loadlut(char __user *p)
{
- u32 tmplut[8];
- if (copy_from_user(tmplut, (u32 __user *)(p+4), 32))
+ u32 tmplut[ARRAY_SIZE(inwordLut)];
+ if (copy_from_user(tmplut, (u32 __user *)(p+4), sizeof(inwordLut)))
return -EFAULT;
- memcpy(inwordLut, tmplut, 32);
+ memcpy(inwordLut, tmplut, sizeof(inwordLut));
return 0;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5c4933bb4b53..9c9945284bcf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -181,7 +181,7 @@ int console_blanked;
static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
static int vesa_off_interval;
-static int blankinterval = 10*60;
+static int blankinterval;
core_param(consoleblank, blankinterval, int, 0444);
static DECLARE_WORK(console_work, console_callback);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 60ce7fd54e89..1c196f87e9d9 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -66,7 +66,7 @@ static ssize_t map_size_show(struct uio_mem *mem, char *buf)
static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
{
- return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK);
+ return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs);
}
struct map_sysfs_entry {
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
index d1f95a1567bb..35187c052e8c 100644
--- a/drivers/uio/uio_mf624.c
+++ b/drivers/uio/uio_mf624.c
@@ -127,6 +127,24 @@ static int mf624_irqcontrol(struct uio_info *info, s32 irq_on)
return 0;
}
+static int mf624_setup_mem(struct pci_dev *dev, int bar, struct uio_mem *mem, const char *name)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+
+ mem->name = name;
+ mem->addr = start & PAGE_MASK;
+ mem->offs = start & ~PAGE_MASK;
+ if (!mem->addr)
+ return -ENODEV;
+ mem->size = ((start & ~PAGE_MASK) + len + PAGE_SIZE - 1) & PAGE_MASK;
+ mem->memtype = UIO_MEM_PHYS;
+ mem->internal_addr = pci_ioremap_bar(dev, bar);
+ if (!mem->internal_addr)
+ return -ENODEV;
+ return 0;
+}
+
static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct uio_info *info;
@@ -147,37 +165,15 @@ static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Note: Datasheet says device uses BAR0, BAR1, BAR2 -- do not trust it */
/* BAR0 */
- info->mem[0].name = "PCI chipset, interrupts, status "
- "bits, special functions";
- info->mem[0].addr = pci_resource_start(dev, 0);
- if (!info->mem[0].addr)
+ if (mf624_setup_mem(dev, 0, &info->mem[0], "PCI chipset, interrupts, status "
+ "bits, special functions"))
goto out_release;
- info->mem[0].size = pci_resource_len(dev, 0);
- info->mem[0].memtype = UIO_MEM_PHYS;
- info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
- if (!info->mem[0].internal_addr)
- goto out_release;
-
/* BAR2 */
- info->mem[1].name = "ADC, DAC, DIO";
- info->mem[1].addr = pci_resource_start(dev, 2);
- if (!info->mem[1].addr)
- goto out_unmap0;
- info->mem[1].size = pci_resource_len(dev, 2);
- info->mem[1].memtype = UIO_MEM_PHYS;
- info->mem[1].internal_addr = pci_ioremap_bar(dev, 2);
- if (!info->mem[1].internal_addr)
+ if (mf624_setup_mem(dev, 2, &info->mem[1], "ADC, DAC, DIO"))
goto out_unmap0;
/* BAR4 */
- info->mem[2].name = "Counter/timer chip";
- info->mem[2].addr = pci_resource_start(dev, 4);
- if (!info->mem[2].addr)
- goto out_unmap1;
- info->mem[2].size = pci_resource_len(dev, 4);
- info->mem[2].memtype = UIO_MEM_PHYS;
- info->mem[2].internal_addr = pci_ioremap_bar(dev, 4);
- if (!info->mem[2].internal_addr)
+ if (mf624_setup_mem(dev, 4, &info->mem[2], "Counter/timer chip"))
goto out_unmap1;
info->irq = dev->irq;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index fbe493d44e81..939a63bca82f 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -35,7 +35,6 @@ config USB_COMMON
config USB_ARCH_HAS_HCD
def_bool y
-# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
config USB
tristate "Support for Host-side USB"
depends on USB_ARCH_HAS_HCD
@@ -73,6 +72,17 @@ config USB
To compile this driver as a module, choose M here: the
module will be called usbcore.
+config USB_PCI
+ bool "PCI based USB host interface"
+ depends on PCI
+ default y
+ ---help---
+ A lot of embeded system SOC (e.g. freescale T2080) have both
+ PCI and USB modules. But USB module is controlled by registers
+ directly, it have no relationship with PCI module.
+
+ When say N here it will not build PCI related code in USB driver.
+
if USB
source "drivers/usb/core/Kconfig"
@@ -152,6 +162,8 @@ source "drivers/usb/phy/Kconfig"
source "drivers/usb/gadget/Kconfig"
+source "drivers/usb/typec/Kconfig"
+
config USB_LED_TRIG
bool "USB LED Triggers"
depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 53d1356bbd06..9650b351c26c 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_USB_ISP1760) += isp1760/
obj-$(CONFIG_USB_MON) += mon/
obj-$(CONFIG_USB_MTU3) += mtu3/
-obj-$(CONFIG_PCI) += host/
+obj-$(CONFIG_USB_PCI) += host/
obj-$(CONFIG_USB_EHCI_HCD) += host/
obj-$(CONFIG_USB_ISP116X_HCD) += host/
obj-$(CONFIG_USB_OHCI_HCD) += host/
@@ -62,3 +62,5 @@ obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += common/
obj-$(CONFIG_USBIP_CORE) += usbip/
+
+obj-$(CONFIG_TYPEC) += typec/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index f9fe86b6f7b5..d65a64c29b85 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -474,7 +474,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
if (ret < 2)
return -EINVAL;
- if (index < 0 || index > 0x7f)
+ if (index > 0x7f)
return -EINVAL;
if (tmp < 0 || tmp > len - pos)
return -EINVAL;
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index fc96f5cdcb5c..51f4157bbecf 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_OF
config USB_CHIPIDEA_PCI
tristate
- depends on PCI
+ depends on USB_PCI
depends on NOP_USB_XCEIV
default USB_CHIPIDEA
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 59e22389c10b..6743f85b1b7a 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -177,6 +177,7 @@ struct hw_bank {
* @td_pool: allocation pool for transfer descriptors
* @gadget: device side representation for peripheral controller
* @driver: gadget driver
+ * @resume_state: save the state of gadget suspend from
* @hw_ep_max: total number of endpoints supported by hardware
* @ci_hw_ep: array of endpoints
* @ep0_dir: ep0 direction
@@ -227,6 +228,7 @@ struct ci_hdrc {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
+ enum usb_device_state resume_state;
unsigned hw_ep_max;
struct ci_hw_ep ci_hw_ep[ENDPT_MAX];
u32 ep0_dir;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 79ad8e91632e..9e217b1361ea 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -783,9 +783,6 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
}
pdev->dev.parent = dev;
- pdev->dev.dma_mask = dev->dma_mask;
- pdev->dev.dma_parms = dev->dma_parms;
- dma_set_coherent_mask(&pdev->dev, dev->coherent_dma_mask);
ret = platform_device_add_resources(pdev, res, nres);
if (ret)
@@ -841,6 +838,56 @@ static void ci_get_otg_capable(struct ci_hdrc *ci)
}
}
+static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ci_role(ci)->name);
+}
+
+static ssize_t ci_role_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct ci_hdrc *ci = dev_get_drvdata(dev);
+ enum ci_role role;
+ int ret;
+
+ if (!(ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET])) {
+ dev_warn(dev, "Current configuration is not dual-role, quit\n");
+ return -EPERM;
+ }
+
+ for (role = CI_ROLE_HOST; role < CI_ROLE_END; role++)
+ if (!strncmp(buf, ci->roles[role]->name,
+ strlen(ci->roles[role]->name)))
+ break;
+
+ if (role == CI_ROLE_END || role == ci->role)
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev);
+ disable_irq(ci->irq);
+ ci_role_stop(ci);
+ ret = ci_role_start(ci, role);
+ if (!ret && ci->role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
+ enable_irq(ci->irq);
+ pm_runtime_put_sync(dev);
+
+ return (ret == 0) ? n : ret;
+}
+static DEVICE_ATTR(role, 0644, ci_role_show, ci_role_store);
+
+static struct attribute *ci_attrs[] = {
+ &dev_attr_role.attr,
+ NULL,
+};
+
+static struct attribute_group ci_attr_group = {
+ .attrs = ci_attrs,
+};
+
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1007,11 +1054,18 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ci_hdrc_otg_fsm_start(ci);
device_set_wakeup_capable(&pdev->dev, true);
-
ret = dbg_create_files(ci);
- if (!ret)
- return 0;
+ if (ret)
+ goto stop;
+
+ ret = sysfs_create_group(&dev->kobj, &ci_attr_group);
+ if (ret)
+ goto remove_debug;
+
+ return 0;
+remove_debug:
+ dbg_remove_files(ci);
stop:
ci_role_destroy(ci);
deinit_phy:
@@ -1033,6 +1087,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
}
dbg_remove_files(ci);
+ sysfs_remove_group(&ci->dev->kobj, &ci_attr_group);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 915f3e91586e..18cb8e46262d 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -123,7 +123,8 @@ static int host_start(struct ci_hdrc *ci)
if (usb_disabled())
return -ENODEV;
- hcd = usb_create_hcd(&ci_ehci_hc_driver, ci->dev, dev_name(ci->dev));
+ hcd = __usb_create_hcd(&ci_ehci_hc_driver, ci->dev->parent,
+ ci->dev, dev_name(ci->dev), NULL);
if (!hcd)
return -ENOMEM;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index f88e9157fad0..56d2d3213076 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -423,7 +423,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwreq->req.status = -EALREADY;
- ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
+ ret = usb_gadget_map_request_by_dev(ci->dev->parent,
+ &hwreq->req, hwep->dir);
if (ret)
return ret;
@@ -603,7 +604,8 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
list_del_init(&node->td);
}
- usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
+ usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
+ &hwreq->req, hwep->dir);
hwreq->req.actual += actual;
@@ -1845,27 +1847,32 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
if (USBi_PCI & intr) {
ci->gadget.speed = hw_port_is_high_speed(ci) ?
USB_SPEED_HIGH : USB_SPEED_FULL;
- if (ci->suspended && ci->driver->resume) {
- spin_unlock(&ci->lock);
- ci->driver->resume(&ci->gadget);
- spin_lock(&ci->lock);
+ if (ci->suspended) {
+ if (ci->driver->resume) {
+ spin_unlock(&ci->lock);
+ ci->driver->resume(&ci->gadget);
+ spin_lock(&ci->lock);
+ }
ci->suspended = 0;
+ usb_gadget_set_state(&ci->gadget,
+ ci->resume_state);
}
}
if (USBi_UI & intr)
isr_tr_complete_handler(ci);
- if (USBi_SLI & intr) {
+ if ((USBi_SLI & intr) && !(ci->suspended)) {
+ ci->suspended = 1;
+ ci->resume_state = ci->gadget.state;
if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
ci->driver->suspend) {
- ci->suspended = 1;
spin_unlock(&ci->lock);
ci->driver->suspend(&ci->gadget);
- usb_gadget_set_state(&ci->gadget,
- USB_STATE_SUSPENDED);
spin_lock(&ci->lock);
}
+ usb_gadget_set_state(&ci->gadget,
+ USB_STATE_SUSPENDED);
}
retval = IRQ_HANDLED;
} else {
@@ -1899,13 +1906,13 @@ static int udc_start(struct ci_hdrc *ci)
INIT_LIST_HEAD(&ci->gadget.ep_list);
/* alloc resources */
- ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
+ ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
sizeof(struct ci_hw_qh),
64, CI_HDRC_PAGE_SIZE);
if (ci->qh_pool == NULL)
return -ENOMEM;
- ci->td_pool = dma_pool_create("ci_hw_td", dev,
+ ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
sizeof(struct ci_hw_td),
64, CI_HDRC_PAGE_SIZE);
if (ci->td_pool == NULL) {
@@ -1973,6 +1980,8 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
*/
if (ci->is_otg)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
+
+ ci->vbus_active = 0;
}
/**
diff --git a/drivers/usb/class/Kconfig b/drivers/usb/class/Kconfig
index bb8b73682a70..971385fe9abc 100644
--- a/drivers/usb/class/Kconfig
+++ b/drivers/usb/class/Kconfig
@@ -12,7 +12,7 @@ config USB_ACM
Please read <file:Documentation/usb/acm.txt> for details.
If your modem only reports "Cls=ff(vend.)" in the descriptors in
- /proc/bus/usb/devices, then your modem will not work with this
+ /sys/kernel/debug/usb/devices, then your modem will not work with this
driver.
To compile this driver as a module, choose M here: the
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d5388938bc7a..5357d83bbda2 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -36,6 +36,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/log2.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/tty_driver.h>
@@ -283,39 +284,13 @@ static DEVICE_ATTR(iCountryCodeRelDate, S_IRUGO, show_country_rel_date, NULL);
* Interrupt handlers for various ACM device responses
*/
-/* control interface reports status changes with "interrupt" transfers */
-static void acm_ctrl_irq(struct urb *urb)
+static void acm_process_notification(struct acm *acm, unsigned char *buf)
{
- struct acm *acm = urb->context;
- struct usb_cdc_notification *dr = urb->transfer_buffer;
- unsigned char *data;
int newctrl;
int difference;
- int retval;
- int status = urb->status;
+ struct usb_cdc_notification *dr = (struct usb_cdc_notification *)buf;
+ unsigned char *data = buf + sizeof(struct usb_cdc_notification);
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&acm->control->dev,
- "%s - urb shutting down with status: %d\n",
- __func__, status);
- return;
- default:
- dev_dbg(&acm->control->dev,
- "%s - nonzero urb status received: %d\n",
- __func__, status);
- goto exit;
- }
-
- usb_mark_last_busy(acm->dev);
-
- data = (unsigned char *)(dr + 1);
switch (dr->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
dev_dbg(&acm->control->dev,
@@ -323,7 +298,15 @@ static void acm_ctrl_irq(struct urb *urb)
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
+ if (le16_to_cpu(dr->wLength) != 2) {
+ dev_dbg(&acm->control->dev,
+ "%s - malformed serial state\n", __func__);
+ break;
+ }
+
newctrl = get_unaligned_le16(data);
+ dev_dbg(&acm->control->dev,
+ "%s - serial state: 0x%x\n", __func__, newctrl);
if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
dev_dbg(&acm->control->dev,
@@ -359,13 +342,86 @@ static void acm_ctrl_irq(struct urb *urb)
default:
dev_dbg(&acm->control->dev,
- "%s - unknown notification %d received: index %d "
- "len %d data0 %d data1 %d\n",
+ "%s - unknown notification %d received: index %d len %d\n",
__func__,
- dr->bNotificationType, dr->wIndex,
- dr->wLength, data[0], data[1]);
+ dr->bNotificationType, dr->wIndex, dr->wLength);
+ }
+}
+
+/* control interface reports status changes with "interrupt" transfers */
+static void acm_ctrl_irq(struct urb *urb)
+{
+ struct acm *acm = urb->context;
+ struct usb_cdc_notification *dr = urb->transfer_buffer;
+ unsigned int current_size = urb->actual_length;
+ unsigned int expected_size, copy_size, alloc_size;
+ int retval;
+ int status = urb->status;
+
+ switch (status) {
+ case 0:
+ /* success */
break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ acm->nb_index = 0;
+ dev_dbg(&acm->control->dev,
+ "%s - urb shutting down with status: %d\n",
+ __func__, status);
+ return;
+ default:
+ dev_dbg(&acm->control->dev,
+ "%s - nonzero urb status received: %d\n",
+ __func__, status);
+ goto exit;
+ }
+
+ usb_mark_last_busy(acm->dev);
+
+ if (acm->nb_index)
+ dr = (struct usb_cdc_notification *)acm->notification_buffer;
+
+ /* size = notification-header + (optional) data */
+ expected_size = sizeof(struct usb_cdc_notification) +
+ le16_to_cpu(dr->wLength);
+
+ if (current_size < expected_size) {
+ /* notification is transmitted fragmented, reassemble */
+ if (acm->nb_size < expected_size) {
+ if (acm->nb_size) {
+ kfree(acm->notification_buffer);
+ acm->nb_size = 0;
+ }
+ alloc_size = roundup_pow_of_two(expected_size);
+ /*
+ * kmalloc ensures a valid notification_buffer after a
+ * use of kfree in case the previous allocation was too
+ * small. Final freeing is done on disconnect.
+ */
+ acm->notification_buffer =
+ kmalloc(alloc_size, GFP_ATOMIC);
+ if (!acm->notification_buffer)
+ goto exit;
+ acm->nb_size = alloc_size;
+ }
+
+ copy_size = min(current_size,
+ expected_size - acm->nb_index);
+
+ memcpy(&acm->notification_buffer[acm->nb_index],
+ urb->transfer_buffer, copy_size);
+ acm->nb_index += copy_size;
+ current_size = acm->nb_index;
+ }
+
+ if (current_size >= expected_size) {
+ /* notification complete */
+ acm_process_notification(acm, (unsigned char *)dr);
+ acm->nb_index = 0;
}
+
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -EPERM)
@@ -1174,6 +1230,7 @@ static int acm_probe(struct usb_interface *intf,
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
+ int res;
/* normal quirks */
quirks = (unsigned long)id->driver_info;
@@ -1274,23 +1331,12 @@ static int acm_probe(struct usb_interface *intf,
return -EINVAL;
}
look_for_collapsed_interface:
- for (i = 0; i < 3; i++) {
- struct usb_endpoint_descriptor *ep;
- ep = &data_interface->cur_altsetting->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep))
- epctrl = ep;
- else if (usb_endpoint_is_bulk_out(ep))
- epwrite = ep;
- else if (usb_endpoint_is_bulk_in(ep))
- epread = ep;
- else
- return -EINVAL;
- }
- if (!epctrl || !epread || !epwrite)
- return -ENODEV;
- else
- goto made_compressed_probe;
+ res = usb_find_common_endpoints(data_interface->cur_altsetting,
+ &epread, &epwrite, &epctrl, NULL);
+ if (res)
+ return res;
+
+ goto made_compressed_probe;
}
skip_normal_probe:
@@ -1488,6 +1534,9 @@ skip_countries:
epctrl->bInterval ? epctrl->bInterval : 16);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
+ acm->notification_buffer = NULL;
+ acm->nb_index = 0;
+ acm->nb_size = 0;
dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
@@ -1580,6 +1629,8 @@ static void acm_disconnect(struct usb_interface *intf)
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
+ kfree(acm->notification_buffer);
+
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c980f11cdf56..7a2b3deafc90 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -98,7 +98,9 @@ struct acm {
struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
spinlock_t read_lock;
- int write_used; /* number of non-empty write buffers */
+ u8 *notification_buffer; /* to reassemble fragmented notifications */
+ unsigned int nb_index;
+ unsigned int nb_size;
int transmitting;
spinlock_t write_lock;
struct mutex mutex;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 8fda45a45bd3..08669fee6d7f 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -58,7 +58,6 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_SUSPENDING 8
#define WDM_RESETTING 9
#define WDM_OVERFLOW 10
-#define WDM_DRAIN_ON_OPEN 11
#define WDM_MAX 16
@@ -182,7 +181,7 @@ static void wdm_in_callback(struct urb *urb)
"nonzero urb status received: -ESHUTDOWN\n");
goto skip_error;
case -EPIPE:
- dev_dbg(&desc->intf->dev,
+ dev_err(&desc->intf->dev,
"nonzero urb status received: -EPIPE\n");
break;
default:
@@ -210,25 +209,6 @@ static void wdm_in_callback(struct urb *urb)
desc->reslength = length;
}
}
-
- /*
- * Handling devices with the WDM_DRAIN_ON_OPEN flag set:
- * If desc->resp_count is unset, then the urb was submitted
- * without a prior notification. If the device returned any
- * data, then this implies that it had messages queued without
- * notifying us. Continue reading until that queue is flushed.
- */
- if (!desc->resp_count) {
- if (!length) {
- /* do not propagate the expected -EPIPE */
- desc->rerr = 0;
- goto unlock;
- }
- dev_dbg(&desc->intf->dev, "got %d bytes without notification\n", length);
- set_bit(WDM_RESPONDING, &desc->flags);
- usb_submit_urb(desc->response, GFP_ATOMIC);
- }
-
skip_error:
set_bit(WDM_READ, &desc->flags);
wake_up(&desc->wait);
@@ -243,7 +223,6 @@ skip_error:
service_outstanding_interrupt(desc);
}
-unlock:
spin_unlock(&desc->iuspin);
}
@@ -686,17 +665,6 @@ static int wdm_open(struct inode *inode, struct file *file)
dev_err(&desc->intf->dev,
"Error submitting int urb - %d\n", rv);
rv = usb_translate_errors(rv);
- } else if (test_bit(WDM_DRAIN_ON_OPEN, &desc->flags)) {
- /*
- * Some devices keep pending messages queued
- * without resending notifications. We must
- * flush the message queue before we can
- * assume a one-to-one relationship between
- * notifications and messages in the queue
- */
- dev_dbg(&desc->intf->dev, "draining queued data\n");
- set_bit(WDM_RESPONDING, &desc->flags);
- rv = usb_submit_urb(desc->response, GFP_KERNEL);
}
} else {
rv = 0;
@@ -803,8 +771,7 @@ static void wdm_rxwork(struct work_struct *work)
/* --- hotplug --- */
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
- u16 bufsize, int (*manage_power)(struct usb_interface *, int),
- bool drain_on_open)
+ u16 bufsize, int (*manage_power)(struct usb_interface *, int))
{
int rv = -ENOMEM;
struct wdm_device *desc;
@@ -891,68 +858,6 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
desc->manage_power = manage_power;
- /*
- * "drain_on_open" enables a hack to work around a firmware
- * issue observed on network functions, in particular MBIM
- * functions.
- *
- * Quoting section 7 of the CDC-WMC r1.1 specification:
- *
- * "The firmware shall interpret GetEncapsulatedResponse as a
- * request to read response bytes. The firmware shall send
- * the next wLength bytes from the response. The firmware
- * shall allow the host to retrieve data using any number of
- * GetEncapsulatedResponse requests. The firmware shall
- * return a zero- length reply if there are no data bytes
- * available.
- *
- * The firmware shall send ResponseAvailable notifications
- * periodically, using any appropriate algorithm, to inform
- * the host that there is data available in the reply
- * buffer. The firmware is allowed to send ResponseAvailable
- * notifications even if there is no data available, but
- * this will obviously reduce overall performance."
- *
- * These requirements, although they make equally sense, are
- * often not implemented by network functions. Some firmwares
- * will queue data indefinitely, without ever resending a
- * notification. The result is that the driver and firmware
- * loses "syncronization" if the driver ever fails to respond
- * to a single notification, something which easily can happen
- * on release(). When this happens, the driver will appear to
- * never receive notifications for the most current data. Each
- * notification will only cause a single read, which returns
- * the oldest data in the firmware's queue.
- *
- * The "drain_on_open" hack resolves the situation by draining
- * data from the firmware until none is returned, without a
- * prior notification.
- *
- * This will inevitably race with the firmware, risking that
- * we read data from the device before handling the associated
- * notification. To make things worse, some of the devices
- * needing the hack do not implement the "return zero if no
- * data is available" requirement either. Instead they return
- * an error on the subsequent read in this case. This means
- * that "winning" the race can cause an unexpected EIO to
- * userspace.
- *
- * "winning" the race is more likely on resume() than on
- * open(), and the unexpected error is more harmful in the
- * middle of an open session. The hack is therefore only
- * applied on open(), and not on resume() where it logically
- * would be equally necessary. So we define open() as the only
- * driver <-> device "syncronization point". Should we happen
- * to lose a notification after open(), then syncronization
- * will be lost until release()
- *
- * The hack should not be enabled for CDC WDM devices
- * conforming to the CDC-WMC r1.1 specification. This is
- * ensured by setting drain_on_open to false in wdm_probe().
- */
- if (drain_on_open)
- set_bit(WDM_DRAIN_ON_OPEN, &desc->flags);
-
spin_lock(&wdm_device_list_lock);
list_add(&desc->device_list, &wdm_device_list);
spin_unlock(&wdm_device_list_lock);
@@ -1006,7 +911,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err;
ep = &iface->endpoint[0].desc;
- rv = wdm_create(intf, ep, maxcom, &wdm_manage_power, false);
+ rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
err:
return rv;
@@ -1038,7 +943,7 @@ struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
{
int rv = -EINVAL;
- rv = wdm_create(intf, ep, bufsize, manage_power, true);
+ rv = wdm_create(intf, ep, bufsize, manage_power);
if (rv < 0)
goto err;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index cc61055fb9be..fb87c17ed6fa 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -294,7 +294,7 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
/*
* See the description for usblp_select_alts() below for the usage
- * explanation. Look into your /proc/bus/usb/devices and dmesg in
+ * explanation. Look into your /sys/kernel/debug/usb/devices and dmesg in
* case of any trouble.
*/
static int proto_bias = -1;
@@ -1239,8 +1239,9 @@ static int usblp_select_alts(struct usblp *usblp)
{
struct usb_interface *if_alt;
struct usb_host_interface *ifd;
- struct usb_endpoint_descriptor *epd, *epwrite, *epread;
- int p, i, e;
+ struct usb_endpoint_descriptor *epwrite, *epread;
+ int p, i;
+ int res;
if_alt = usblp->intf;
@@ -1260,31 +1261,21 @@ static int usblp_select_alts(struct usblp *usblp)
ifd->desc.bInterfaceProtocol > USBLP_LAST_PROTOCOL)
continue;
- /* Look for bulk OUT and IN endpoints. */
- epwrite = epread = NULL;
- for (e = 0; e < ifd->desc.bNumEndpoints; e++) {
- epd = &ifd->endpoint[e].desc;
-
- if (usb_endpoint_is_bulk_out(epd))
- if (!epwrite)
- epwrite = epd;
-
- if (usb_endpoint_is_bulk_in(epd))
- if (!epread)
- epread = epd;
+ /* Look for the expected bulk endpoints. */
+ if (ifd->desc.bInterfaceProtocol > 1) {
+ res = usb_find_common_endpoints(ifd,
+ &epread, &epwrite, NULL, NULL);
+ } else {
+ epread = NULL;
+ res = usb_find_bulk_out_endpoint(ifd, &epwrite);
}
/* Ignore buggy hardware without the right endpoints. */
- if (!epwrite || (ifd->desc.bInterfaceProtocol > 1 && !epread))
+ if (res)
continue;
- /*
- * Turn off reads for USB_CLASS_PRINTER/1/1 (unidirectional)
- * interfaces and buggy bidirectional printers.
- */
- if (ifd->desc.bInterfaceProtocol == 1) {
- epread = NULL;
- } else if (usblp->quirks & USBLP_QUIRK_BIDIR) {
+ /* Turn off reads for buggy bidirectional printers. */
+ if (usblp->quirks & USBLP_QUIRK_BIDIR) {
printk(KERN_INFO "usblp%d: Disabling reads from "
"problematic bidirectional printer\n",
usblp->minor);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 8fb309a0ff6b..578f424decc2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1375,7 +1375,7 @@ static int usbtmc_probe(struct usb_interface *intf,
{
struct usbtmc_device_data *data;
struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in;
int n;
int retcode;
@@ -1421,49 +1421,29 @@ static int usbtmc_probe(struct usb_interface *intf,
iface_desc = data->intf->cur_altsetting;
data->ifnum = iface_desc->desc.bInterfaceNumber;
- /* Find bulk in endpoint */
- for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
- endpoint = &iface_desc->endpoint[n].desc;
-
- if (usb_endpoint_is_bulk_in(endpoint)) {
- data->bulk_in = endpoint->bEndpointAddress;
- dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n",
- data->bulk_in);
- break;
- }
- }
-
- /* Find bulk out endpoint */
- for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
- endpoint = &iface_desc->endpoint[n].desc;
-
- if (usb_endpoint_is_bulk_out(endpoint)) {
- data->bulk_out = endpoint->bEndpointAddress;
- dev_dbg(&intf->dev, "Found Bulk out endpoint at %u\n",
- data->bulk_out);
- break;
- }
- }
-
- if (!data->bulk_out || !data->bulk_in) {
+ /* Find bulk endpoints */
+ retcode = usb_find_common_endpoints(iface_desc,
+ &bulk_in, &bulk_out, NULL, NULL);
+ if (retcode) {
dev_err(&intf->dev, "bulk endpoints not found\n");
- retcode = -ENODEV;
goto err_put;
}
+ data->bulk_in = bulk_in->bEndpointAddress;
+ dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
+
+ data->bulk_out = bulk_out->bEndpointAddress;
+ dev_dbg(&intf->dev, "Found Bulk out endpoint at %u\n", data->bulk_out);
+
/* Find int endpoint */
- for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
- endpoint = &iface_desc->endpoint[n].desc;
-
- if (usb_endpoint_is_int_in(endpoint)) {
- data->iin_ep_present = 1;
- data->iin_ep = endpoint->bEndpointAddress;
- data->iin_wMaxPacketSize = usb_endpoint_maxp(endpoint);
- data->iin_interval = endpoint->bInterval;
- dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
+ retcode = usb_find_int_in_endpoint(iface_desc, &int_in);
+ if (!retcode) {
+ data->iin_ep_present = 1;
+ data->iin_ep = int_in->bEndpointAddress;
+ data->iin_wMaxPacketSize = usb_endpoint_maxp(int_in);
+ data->iin_interval = int_in->bInterval;
+ dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
data->iin_ep);
- break;
- }
}
retcode = get_capabilities(data);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index c9480d77810c..930e8f35f8df 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -107,7 +107,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
int len;
struct ulpi *ulpi = to_ulpi_dev(dev);
- len = of_device_get_modalias(dev, buf, PAGE_SIZE - 1);
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
if (len != -ENODEV)
return len;
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 2f537bbdda09..b8fe31e409a5 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -31,6 +31,13 @@
#include <linux/usb/otg.h>
#include <linux/usb/otg-fsm.h>
+#ifdef VERBOSE
+#define VDBG(fmt, args...) pr_debug("[%s] " fmt, \
+ __func__, ## args)
+#else
+#define VDBG(stuff...) do {} while (0)
+#endif
+
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
{
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index b99b871c4b9d..250ec1d662d9 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -8,7 +8,7 @@ usbcore-y += devio.o notify.o generic.o quirks.o devices.o
usbcore-y += port.o
usbcore-$(CONFIG_OF) += of.o
-usbcore-$(CONFIG_PCI) += hcd-pci.o
+usbcore-$(CONFIG_USB_PCI) += hcd-pci.o
usbcore-$(CONFIG_ACPI) += usb-acpi.o
obj-$(CONFIG_USB) += usbcore.o
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index b9bf6e2eb6fe..b64568cf572c 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,7 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
int i, size;
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!hcd->self.controller->dma_mask &&
+ (!is_device_dma_capable(hcd->self.sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM)))
return 0;
@@ -75,7 +75,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
if (!size)
continue;
snprintf(name, sizeof(name), "buffer-%d", size);
- hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
+ hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
size, size, 0);
if (!hcd->pool[i]) {
hcd_buffer_destroy(hcd);
@@ -130,7 +130,7 @@ void *hcd_buffer_alloc(
/* some USB hosts just use PIO */
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!bus->controller->dma_mask &&
+ (!is_device_dma_capable(bus->sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM))) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
@@ -140,7 +140,7 @@ void *hcd_buffer_alloc(
if (size <= pool_max[i])
return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
}
- return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
+ return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
}
void hcd_buffer_free(
@@ -157,7 +157,7 @@ void hcd_buffer_free(
return;
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
- (!bus->controller->dma_mask &&
+ (!is_device_dma_capable(bus->sysdev) &&
!(hcd->driver->flags & HCD_LOCAL_MEM))) {
kfree(addr);
return;
@@ -169,5 +169,5 @@ void hcd_buffer_free(
return;
}
}
- dma_free_coherent(hcd->self.controller, size, addr, dma);
+ dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index f2987ddb1cde..55dea2e7828f 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -24,7 +24,7 @@
* <mountpoint>/devices contains USB topology, device, config, class,
* interface, & endpoint data.
*
- * I considered using /proc/bus/usb/devices/device# for each device
+ * I considered using /dev/bus/usb/device# for each device
* as it is attached or detached, but I didn't like this for some
* reason -- maybe it's just too deep of a directory structure.
* I also don't like looking in multiple places to gather and view
@@ -40,7 +40,7 @@
* Converted the whole proc stuff to real
* read methods. Now not the whole device list needs to fit
* into one page, only the device list for one bus.
- * Added a poll method to /proc/bus/usb/devices, to wake
+ * Added a poll method to /sys/kernel/debug/usb/devices, to wake
* up an eventual usbd
* 2000-01-04: Thomas Sailer <sailer@ife.ee.ethz.ch>
* Turned into its own filesystem
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index cdee5130638b..eb87a259d55c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1331,6 +1331,24 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
*/
if (udev->parent && !PMSG_IS_AUTO(msg))
status = 0;
+
+ /*
+ * If the device is inaccessible, don't try to resume
+ * suspended interfaces and just return the error.
+ */
+ if (status && status != -EBUSY) {
+ int err;
+ u16 devstat;
+
+ err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
+ &devstat);
+ if (err) {
+ dev_err(&udev->dev,
+ "Failed to suspend device, error %d\n",
+ status);
+ goto done;
+ }
+ }
}
/* If the suspend failed, resume interfaces that did get suspended */
@@ -1763,6 +1781,9 @@ static int autosuspend_check(struct usb_device *udev)
int w, i;
struct usb_interface *intf;
+ if (udev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+
/* Fail if autosuspend is disabled, or any interfaces are in use, or
* any interface drivers require remote wakeup but it isn't available.
*/
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index e26bd5e773ad..87ad6b6bfee8 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -29,6 +29,7 @@
#define MAX_USB_MINORS 256
static const struct file_operations *usb_minors[MAX_USB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
+static DEFINE_MUTEX(init_usb_class_mutex);
static int usb_open(struct inode *inode, struct file *file)
{
@@ -111,8 +112,9 @@ static void release_usb_class(struct kref *kref)
static void destroy_usb_class(void)
{
- if (usb_class)
- kref_put(&usb_class->kref, release_usb_class);
+ mutex_lock(&init_usb_class_mutex);
+ kref_put(&usb_class->kref, release_usb_class);
+ mutex_unlock(&init_usb_class_mutex);
}
int usb_major_init(void)
@@ -173,7 +175,10 @@ int usb_register_dev(struct usb_interface *intf,
if (intf->minor >= 0)
return -EADDRINUSE;
+ mutex_lock(&init_usb_class_mutex);
retval = init_usb_class();
+ mutex_unlock(&init_usb_class_mutex);
+
if (retval)
return retval;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 79bdca5cb9c7..49550790a3cb 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1076,6 +1076,7 @@ static void usb_deregister_bus (struct usb_bus *bus)
static int register_root_hub(struct usb_hcd *hcd)
{
struct device *parent_dev = hcd->self.controller;
+ struct device *sysdev = hcd->self.sysdev;
struct usb_device *usb_dev = hcd->self.root_hub;
const int devnum = 1;
int retval;
@@ -1122,7 +1123,7 @@ static int register_root_hub(struct usb_hcd *hcd)
/* Did the HC die before the root hub was registered? */
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
- usb_dev->dev.of_node = parent_dev->of_node;
+ usb_dev->dev.of_node = sysdev->of_node;
}
mutex_unlock(&usb_bus_idr_lock);
@@ -1435,7 +1436,7 @@ void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_SETUP_MAP_SINGLE))
- dma_unmap_single(hcd->self.controller,
+ dma_unmap_single(hcd->self.sysdev,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
@@ -1468,19 +1469,19 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SG))
- dma_unmap_sg(hcd->self.controller,
+ dma_unmap_sg(hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_PAGE))
- dma_unmap_page(hcd->self.controller,
+ dma_unmap_page(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SINGLE))
- dma_unmap_single(hcd->self.controller,
+ dma_unmap_single(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
@@ -1523,11 +1524,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
return ret;
if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->setup_dma))
return -EAGAIN;
urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
@@ -1558,7 +1559,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
}
n = dma_map_sg(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
@@ -1573,12 +1574,12 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
- hcd->self.controller,
+ hcd->self.sysdev,
sg_page(sg),
sg->offset,
urb->transfer_buffer_length,
dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
@@ -1588,11 +1589,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
} else {
urb->transfer_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
- if (dma_mapping_error(hcd->self.controller,
+ if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
@@ -2498,24 +2499,8 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
}
-/**
- * usb_create_shared_hcd - create and initialize an HCD structure
- * @driver: HC driver that will use this hcd
- * @dev: device for this HC, stored in hcd->self.controller
- * @bus_name: value to store in hcd->self.bus_name
- * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
- * PCI device. Only allocate certain resources for the primary HCD
- * Context: !in_interrupt()
- *
- * Allocate a struct usb_hcd, with extra space at the end for the
- * HC driver's private data. Initialize the generic members of the
- * hcd structure.
- *
- * Return: On success, a pointer to the created and initialized HCD structure.
- * On failure (e.g. if memory is unavailable), %NULL.
- */
-struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
- struct device *dev, const char *bus_name,
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
struct usb_hcd *primary_hcd)
{
struct usb_hcd *hcd;
@@ -2556,8 +2541,9 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
usb_bus_init(&hcd->self);
hcd->self.controller = dev;
+ hcd->self.sysdev = sysdev;
hcd->self.bus_name = bus_name;
- hcd->self.uses_dma = (dev->dma_mask != NULL);
+ hcd->self.uses_dma = (sysdev->dma_mask != NULL);
init_timer(&hcd->rh_timer);
hcd->rh_timer.function = rh_timer_func;
@@ -2572,6 +2558,30 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
"USB Host Controller";
return hcd;
}
+EXPORT_SYMBOL_GPL(__usb_create_hcd);
+
+/**
+ * usb_create_shared_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
+ * PCI device. Only allocate certain resources for the primary HCD
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data. Initialize the generic members of the
+ * hcd structure.
+ *
+ * Return: On success, a pointer to the created and initialized HCD structure.
+ * On failure (e.g. if memory is unavailable), %NULL.
+ */
+struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd)
+{
+ return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
+}
EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
/**
@@ -2591,7 +2601,7 @@ EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name)
{
- return usb_create_shared_hcd(driver, dev, bus_name, NULL);
+ return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
}
EXPORT_SYMBOL_GPL(usb_create_hcd);
@@ -2718,7 +2728,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
struct usb_device *rhdev;
if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->usb_phy) {
- struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0);
+ struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0);
if (IS_ERR(phy)) {
retval = PTR_ERR(phy);
@@ -2736,7 +2746,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
}
if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) {
- struct phy *phy = phy_get(hcd->self.controller, "usb");
+ struct phy *phy = phy_get(hcd->self.sysdev, "usb");
if (IS_ERR(phy)) {
retval = PTR_ERR(phy);
@@ -2784,7 +2794,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
*/
retval = hcd_buffer_create(hcd);
if (retval != 0) {
- dev_dbg(hcd->self.controller, "pool alloc failed\n");
+ dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
goto err_create_buf;
}
@@ -2794,7 +2804,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
if (rhdev == NULL) {
- dev_err(hcd->self.controller, "unable to allocate root hub\n");
+ dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
retval = -ENOMEM;
goto err_allocate_root_hub;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5286bf67869a..9dca59ef18b3 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1066,6 +1066,9 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
portstatus = portchange = 0;
status = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (status)
+ goto abort;
+
if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
dev_dbg(&port_dev->dev, "status %04x change %04x\n",
portstatus, portchange);
@@ -1198,7 +1201,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_hub_wq(hub);
-
+ abort:
if (type == HUB_INIT2 || type == HUB_INIT3) {
/* Allow autosuspend if it was suppressed */
disconnected:
@@ -2084,6 +2087,12 @@ void usb_disconnect(struct usb_device **pdev)
dev_info(&udev->dev, "USB disconnect, device number %d\n",
udev->devnum);
+ /*
+ * Ensure that the pm runtime code knows that the USB device
+ * is in the process of being disconnected.
+ */
+ pm_runtime_barrier(&udev->dev);
+
usb_lock_device(udev);
hub_disconnect_children(udev);
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 3de4f8873984..d787f195a9a6 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -18,6 +18,7 @@
*/
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/usb/of.h>
/**
@@ -46,3 +47,25 @@ struct device_node *usb_of_get_child_node(struct device_node *parent,
}
EXPORT_SYMBOL_GPL(usb_of_get_child_node);
+/**
+ * usb_of_get_companion_dev - Find the companion device
+ * @dev: the device pointer to find a companion
+ *
+ * Find the companion device from platform bus.
+ *
+ * Return: On success, a pointer to the companion device, %NULL on failure.
+ */
+struct device *usb_of_get_companion_dev(struct device *dev)
+{
+ struct device_node *node;
+ struct platform_device *pdev = NULL;
+
+ node = of_parse_phandle(dev->of_node, "companion", 0);
+ if (node)
+ pdev = of_find_device_by_node(node);
+
+ of_node_put(node);
+
+ return pdev ? &pdev->dev : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index a2ccc69fb45c..28b053cacc90 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -74,6 +74,140 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
#define usb_autosuspend_delay 0
#endif
+static bool match_endpoint(struct usb_endpoint_descriptor *epd,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out)
+{
+ switch (usb_endpoint_type(epd)) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_endpoint_dir_in(epd)) {
+ if (bulk_in && !*bulk_in) {
+ *bulk_in = epd;
+ break;
+ }
+ } else {
+ if (bulk_out && !*bulk_out) {
+ *bulk_out = epd;
+ break;
+ }
+ }
+
+ return false;
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(epd)) {
+ if (int_in && !*int_in) {
+ *int_in = epd;
+ break;
+ }
+ } else {
+ if (int_out && !*int_out) {
+ *int_out = epd;
+ break;
+ }
+ }
+
+ return false;
+ default:
+ return false;
+ }
+
+ return (!bulk_in || *bulk_in) && (!bulk_out || *bulk_out) &&
+ (!int_in || *int_in) && (!int_out || *int_out);
+}
+
+/**
+ * usb_find_common_endpoints() -- look up common endpoint descriptors
+ * @alt: alternate setting to search
+ * @bulk_in: pointer to descriptor pointer, or NULL
+ * @bulk_out: pointer to descriptor pointer, or NULL
+ * @int_in: pointer to descriptor pointer, or NULL
+ * @int_out: pointer to descriptor pointer, or NULL
+ *
+ * Search the alternate setting's endpoint descriptors for the first bulk-in,
+ * bulk-out, interrupt-in and interrupt-out endpoints and return them in the
+ * provided pointers (unless they are NULL).
+ *
+ * If a requested endpoint is not found, the corresponding pointer is set to
+ * NULL.
+ *
+ * Return: Zero if all requested descriptors were found, or -ENXIO otherwise.
+ */
+int usb_find_common_endpoints(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out)
+{
+ struct usb_endpoint_descriptor *epd;
+ int i;
+
+ if (bulk_in)
+ *bulk_in = NULL;
+ if (bulk_out)
+ *bulk_out = NULL;
+ if (int_in)
+ *int_in = NULL;
+ if (int_out)
+ *int_out = NULL;
+
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
+ epd = &alt->endpoint[i].desc;
+
+ if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out))
+ return 0;
+ }
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(usb_find_common_endpoints);
+
+/**
+ * usb_find_common_endpoints_reverse() -- look up common endpoint descriptors
+ * @alt: alternate setting to search
+ * @bulk_in: pointer to descriptor pointer, or NULL
+ * @bulk_out: pointer to descriptor pointer, or NULL
+ * @int_in: pointer to descriptor pointer, or NULL
+ * @int_out: pointer to descriptor pointer, or NULL
+ *
+ * Search the alternate setting's endpoint descriptors for the last bulk-in,
+ * bulk-out, interrupt-in and interrupt-out endpoints and return them in the
+ * provided pointers (unless they are NULL).
+ *
+ * If a requested endpoint is not found, the corresponding pointer is set to
+ * NULL.
+ *
+ * Return: Zero if all requested descriptors were found, or -ENXIO otherwise.
+ */
+int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out)
+{
+ struct usb_endpoint_descriptor *epd;
+ int i;
+
+ if (bulk_in)
+ *bulk_in = NULL;
+ if (bulk_out)
+ *bulk_out = NULL;
+ if (int_in)
+ *int_in = NULL;
+ if (int_out)
+ *int_out = NULL;
+
+ for (i = alt->desc.bNumEndpoints - 1; i >= 0; --i) {
+ epd = &alt->endpoint[i].desc;
+
+ if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out))
+ return 0;
+ }
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
@@ -453,9 +587,9 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
* Note: calling dma_set_mask() on a USB device would set the
* mask for the entire HCD, so don't do that.
*/
- dev->dev.dma_mask = bus->controller->dma_mask;
- dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
- set_dev_node(&dev->dev, dev_to_node(bus->controller));
+ dev->dev.dma_mask = bus->sysdev->dma_mask;
+ dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
+ set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
atomic_set(&dev->urbnum, 0);
@@ -803,7 +937,7 @@ struct urb *usb_buffer_map(struct urb *urb)
if (!urb
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return NULL;
if (controller->dma_mask) {
@@ -841,7 +975,7 @@ void usb_buffer_dmasync(struct urb *urb)
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
@@ -875,7 +1009,7 @@ void usb_buffer_unmap(struct urb *urb)
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
- || !(controller = bus->controller))
+ || !(controller = bus->sysdev))
return;
if (controller->dma_mask) {
@@ -925,7 +1059,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return -EINVAL;
@@ -961,7 +1095,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
@@ -989,7 +1123,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
if (!dev
|| !(bus = dev->bus)
- || !(controller = bus->controller)
+ || !(controller = bus->sysdev)
|| !controller->dma_mask)
return;
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index e838701d6dd5..b6a495e98fd8 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -54,7 +54,7 @@ endchoice
config USB_DWC2_PCI
tristate "DWC2 PCI"
- depends on PCI
+ depends on USB_PCI
depends on USB_GADGET || !USB_GADGET
default n
select NOP_USB_XCEIV
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 1a7e83005082..8367d4f985c1 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -423,6 +423,10 @@ enum dwc2_ep0_state {
* needed.
* 0 - No (default)
* 1 - Yes
+ * @activate_stm_fs_transceiver: Activate internal transceiver using GGPIO
+ * register.
+ * 0 - Deactivate the transceiver (default)
+ * 1 - Activate the transceiver
* @g_dma: Enables gadget dma usage (default: autodetect).
* @g_dma_desc: Enables gadget descriptor DMA (default: autodetect).
* @g_rx_fifo_size: The periodic rx fifo size for the device, in
@@ -477,6 +481,7 @@ struct dwc2_core_params {
bool uframe_sched;
bool external_id_pin_ctl;
bool hibernation;
+ bool activate_stm_fs_transceiver;
u16 max_packet_count;
u32 max_transfer_size;
u32 ahbcfg;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a73722e27d07..740c7e86d31b 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -121,7 +121,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
- u32 usbcfg, i2cctl;
+ u32 usbcfg, ggpio, i2cctl;
int retval = 0;
/*
@@ -145,6 +145,19 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
return retval;
}
}
+
+ if (hsotg->params.activate_stm_fs_transceiver) {
+ ggpio = dwc2_readl(hsotg->regs + GGPIO);
+ if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
+ dev_dbg(hsotg->dev, "Activating transceiver\n");
+ /*
+ * STM32F4x9 uses the GGPIO register as general
+ * core configuration register.
+ */
+ ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
+ dwc2_writel(ggpio, hsotg->regs + GGPIO);
+ }
+ }
}
/*
@@ -3264,6 +3277,7 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hsotg_core_connect(hsotg);
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index bde72489ae66..4592012c4743 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -225,6 +225,8 @@
#define GPVNDCTL HSOTG_REG(0x0034)
#define GGPIO HSOTG_REG(0x0038)
+#define GGPIO_STM32_OTG_GCCFG_PWRDWN BIT(16)
+
#define GUID HSOTG_REG(0x003c)
#define GSNPSID HSOTG_REG(0x0040)
#define GHWCFG1 HSOTG_REG(0x0044)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 2990c347289f..9cd8722f24f6 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -120,6 +120,22 @@ static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
}
+static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->speed = DWC2_SPEED_PARAM_FULL;
+ p->host_rx_fifo_size = 128;
+ p->host_nperio_tx_fifo_size = 96;
+ p->host_perio_tx_fifo_size = 96;
+ p->max_packet_count = 256;
+ p->phy_type = DWC2_PHY_TYPE_PARAM_FS;
+ p->i2c_enable = false;
+ p->uframe_sched = false;
+ p->activate_stm_fs_transceiver = true;
+}
+
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
@@ -133,6 +149,9 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "amlogic,meson-gxbb-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
+ { .compatible = "st,stm32f4x9-fsotg",
+ .data = dwc2_set_stm32f4x9_fsotg_params },
+ { .compatible = "st,stm32f4x9-hsotg" },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 9564bc76c56f..daf0d37acb37 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -214,20 +214,11 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
if (IS_ERR(hsotg->reset)) {
ret = PTR_ERR(hsotg->reset);
- switch (ret) {
- case -ENOENT:
- case -ENOTSUPP:
- hsotg->reset = NULL;
- break;
- default:
- dev_err(hsotg->dev, "error getting reset control %d\n",
- ret);
- return ret;
- }
+ dev_err(hsotg->dev, "error getting reset control %d\n", ret);
+ return ret;
}
- if (hsotg->reset)
- reset_control_deassert(hsotg->reset);
+ reset_control_deassert(hsotg->reset);
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
@@ -326,8 +317,7 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
- if (hsotg->reset)
- reset_control_assert(hsotg->reset);
+ reset_control_assert(hsotg->reset);
return 0;
}
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index c5aa235863e8..ab8c0e0d3b60 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -41,6 +41,7 @@ config USB_DWC3_GADGET
config USB_DWC3_DUAL_ROLE
bool "Dual Role mode"
depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
+ depends on (EXTCON=y || EXTCON=USB_DWC3)
help
This is the default mode of working of DWC3 controller where
both host and gadget features are enabled.
@@ -70,7 +71,7 @@ config USB_DWC3_EXYNOS
config USB_DWC3_PCI
tristate "PCIe-based Platforms"
- depends on PCI && ACPI
+ depends on USB_PCI && ACPI
default USB_DWC3
help
If you're using the DesignWare Core IP with a PCIe, please say
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ffca34029b21..f15fabbd1e59 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -17,6 +17,10 @@ ifneq ($(filter y,$(CONFIG_USB_DWC3_GADGET) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
dwc3-y += gadget.o ep0.o
endif
+ifneq ($(CONFIG_USB_DWC3_DUAL_ROLE),)
+ dwc3-y += drd.o
+endif
+
ifneq ($(CONFIG_USB_DWC3_ULPI),)
dwc3-y += ulpi.o
endif
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 369bab16a824..455d89a1cd6d 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -100,7 +100,10 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
return 0;
}
-void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+static void dwc3_event_buffers_cleanup(struct dwc3 *dwc);
+static int dwc3_event_buffers_setup(struct dwc3 *dwc);
+
+static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
{
u32 reg;
@@ -110,6 +113,69 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
+static void __dwc3_set_mode(struct work_struct *work)
+{
+ struct dwc3 *dwc = work_to_dwc(work);
+ unsigned long flags;
+ int ret;
+
+ if (!dwc->desired_dr_role)
+ return;
+
+ if (dwc->desired_dr_role == dwc->current_dr_role)
+ return;
+
+ if (dwc->dr_mode != USB_DR_MODE_OTG)
+ return;
+
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_gadget_exit(dwc);
+ dwc3_event_buffers_cleanup(dwc);
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ dwc3_set_prtcap(dwc, dwc->desired_dr_role);
+
+ dwc->current_dr_role = dwc->desired_dr_role;
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (dwc->desired_dr_role) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ ret = dwc3_host_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ dwc3_event_buffers_setup(dwc);
+ ret = dwc3_gadget_init(dwc);
+ if (ret)
+ dev_err(dwc->dev, "failed to initialize peripheral\n");
+ break;
+ default:
+ break;
+ }
+}
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->desired_dr_role = mode;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ queue_work(system_power_efficient_wq, &dwc->drd_work);
+}
+
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
{
struct dwc3 *dwc = dep->dwc;
@@ -397,8 +463,7 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
{
struct dwc3_hwparams *parms = &dwc->hwparams;
- dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
- dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
+ dwc->num_eps = DWC3_NUM_EPS(parms);
}
static void dwc3_cache_hwparams(struct dwc3 *dwc)
@@ -432,6 +497,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
+ * Make sure UX_EXIT_PX is cleared as that causes issues with some
+ * PHYs. Also, this bit is not supposed to be used in normal operation.
+ */
+ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
+
+ /*
* Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY
* to '0' during coreConsultant configuration. So default value
* will be '0' when the core is reset. Application needs to set it
@@ -714,21 +785,6 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err4;
}
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- break;
- case USB_DR_MODE_HOST:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
- break;
- case USB_DR_MODE_OTG:
- dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
- break;
- default:
- dev_warn(dwc->dev, "Unsupported mode %d\n", dwc->dr_mode);
- break;
- }
-
/*
* ENDXFER polling is available on version 3.10a and later of
* the DWC_usb3 controller. It is NOT available in the
@@ -846,6 +902,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -854,6 +911,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -862,17 +920,11 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_OTG:
- ret = dwc3_host_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- ret = dwc3_gadget_init(dwc);
+ INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
+ ret = dwc3_drd_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize gadget\n");
+ dev_err(dev, "failed to initialize dual-role\n");
return ret;
}
break;
@@ -894,8 +946,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
dwc3_host_exit(dwc);
break;
case USB_DR_MODE_OTG:
- dwc3_host_exit(dwc);
- dwc3_gadget_exit(dwc);
+ dwc3_drd_exit(dwc);
break;
default:
/* do nothing */
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2b9e4ca3c932..981c77f5628e 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -23,10 +23,12 @@
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/list.h>
+#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -39,9 +41,8 @@
/* Global constants */
#define DWC3_PULL_UP_TIMEOUT 500 /* ms */
-#define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */
#define DWC3_BOUNCE_SIZE 1024 /* size of a superspeed bulk */
-#define DWC3_EP0_BOUNCE_SIZE 512
+#define DWC3_EP0_SETUP_SIZE 512
#define DWC3_ENDPOINTS_NUM 32
#define DWC3_XHCI_RESOURCES_NUM 2
@@ -66,7 +67,7 @@
#define DWC3_DEVICE_EVENT_OVERFLOW 11
#define DWC3_GEVNTCOUNT_MASK 0xfffc
-#define DWC3_GEVNTCOUNT_EHB (1 << 31)
+#define DWC3_GEVNTCOUNT_EHB BIT(31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
@@ -116,20 +117,20 @@
#define DWC3_VER_NUMBER 0xc1a0
#define DWC3_VER_TYPE 0xc1a4
-#define DWC3_GUSB2PHYCFG(n) (0xc200 + (n * 0x04))
-#define DWC3_GUSB2I2CCTL(n) (0xc240 + (n * 0x04))
+#define DWC3_GUSB2PHYCFG(n) (0xc200 + ((n) * 0x04))
+#define DWC3_GUSB2I2CCTL(n) (0xc240 + ((n) * 0x04))
-#define DWC3_GUSB2PHYACC(n) (0xc280 + (n * 0x04))
+#define DWC3_GUSB2PHYACC(n) (0xc280 + ((n) * 0x04))
-#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + (n * 0x04))
+#define DWC3_GUSB3PIPECTL(n) (0xc2c0 + ((n) * 0x04))
-#define DWC3_GTXFIFOSIZ(n) (0xc300 + (n * 0x04))
-#define DWC3_GRXFIFOSIZ(n) (0xc380 + (n * 0x04))
+#define DWC3_GTXFIFOSIZ(n) (0xc300 + ((n) * 0x04))
+#define DWC3_GRXFIFOSIZ(n) (0xc380 + ((n) * 0x04))
-#define DWC3_GEVNTADRLO(n) (0xc400 + (n * 0x10))
-#define DWC3_GEVNTADRHI(n) (0xc404 + (n * 0x10))
-#define DWC3_GEVNTSIZ(n) (0xc408 + (n * 0x10))
-#define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10))
+#define DWC3_GEVNTADRLO(n) (0xc400 + ((n) * 0x10))
+#define DWC3_GEVNTADRHI(n) (0xc404 + ((n) * 0x10))
+#define DWC3_GEVNTSIZ(n) (0xc408 + ((n) * 0x10))
+#define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10))
#define DWC3_GHWPARAMS8 0xc600
#define DWC3_GFLADJ 0xc630
@@ -143,13 +144,13 @@
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
-#define DWC3_DEP_BASE(n) (0xc800 + (n * 0x10))
+#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10))
#define DWC3_DEPCMDPAR2 0x00
#define DWC3_DEPCMDPAR1 0x04
#define DWC3_DEPCMDPAR0 0x08
#define DWC3_DEPCMD 0x0c
-#define DWC3_DEV_IMOD(n) (0xca00 + (n * 0x4))
+#define DWC3_DEV_IMOD(n) (0xca00 + ((n) * 0x4))
/* OTG Registers */
#define DWC3_OCFG 0xcc00
@@ -176,11 +177,11 @@
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
-#define DWC3_GRXTHRCFG_PKTCNTSEL (1 << 29)
+#define DWC3_GRXTHRCFG_PKTCNTSEL BIT(29)
/* Global Configuration Register */
#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
-#define DWC3_GCTL_U2RSTECN (1 << 16)
+#define DWC3_GCTL_U2RSTECN BIT(16)
#define DWC3_GCTL_RAMCLKSEL(x) (((x) & DWC3_GCTL_CLK_MASK) << 6)
#define DWC3_GCTL_CLK_BUS (0)
#define DWC3_GCTL_CLK_PIPE (1)
@@ -193,24 +194,24 @@
#define DWC3_GCTL_PRTCAP_DEVICE 2
#define DWC3_GCTL_PRTCAP_OTG 3
-#define DWC3_GCTL_CORESOFTRESET (1 << 11)
-#define DWC3_GCTL_SOFITPSYNC (1 << 10)
+#define DWC3_GCTL_CORESOFTRESET BIT(11)
+#define DWC3_GCTL_SOFITPSYNC BIT(10)
#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4)
#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3)
-#define DWC3_GCTL_DISSCRAMBLE (1 << 3)
-#define DWC3_GCTL_U2EXIT_LFPS (1 << 2)
-#define DWC3_GCTL_GBLHIBERNATIONEN (1 << 1)
-#define DWC3_GCTL_DSBLCLKGTNG (1 << 0)
+#define DWC3_GCTL_DISSCRAMBLE BIT(3)
+#define DWC3_GCTL_U2EXIT_LFPS BIT(2)
+#define DWC3_GCTL_GBLHIBERNATIONEN BIT(1)
+#define DWC3_GCTL_DSBLCLKGTNG BIT(0)
/* Global User Control 1 Register */
-#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW (1 << 24)
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
/* Global USB2 PHY Configuration Register */
-#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31)
-#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30)
-#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6)
-#define DWC3_GUSB2PHYCFG_ULPI_UTMI (1 << 4)
-#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8)
+#define DWC3_GUSB2PHYCFG_PHYSOFTRST BIT(31)
+#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS BIT(30)
+#define DWC3_GUSB2PHYCFG_SUSPHY BIT(6)
+#define DWC3_GUSB2PHYCFG_ULPI_UTMI BIT(4)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM BIT(8)
#define DWC3_GUSB2PHYCFG_PHYIF(n) (n << 3)
#define DWC3_GUSB2PHYCFG_PHYIF_MASK DWC3_GUSB2PHYCFG_PHYIF(1)
#define DWC3_GUSB2PHYCFG_USBTRDTIM(n) (n << 10)
@@ -221,25 +222,26 @@
#define UTMI_PHYIF_8_BIT 0
/* Global USB2 PHY Vendor Control Register */
-#define DWC3_GUSB2PHYACC_NEWREGREQ (1 << 25)
-#define DWC3_GUSB2PHYACC_BUSY (1 << 23)
-#define DWC3_GUSB2PHYACC_WRITE (1 << 22)
+#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_BUSY BIT(23)
+#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
#define DWC3_GUSB2PHYACC_EXTEND_ADDR(n) (n << 8)
#define DWC3_GUSB2PHYACC_DATA(n) (n & 0xff)
/* Global USB3 PIPE Control Register */
-#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
-#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
-#define DWC3_GUSB3PIPECTL_DISRXDETINP3 (1 << 28)
-#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
+#define DWC3_GUSB3PIPECTL_PHYSOFTRST BIT(31)
+#define DWC3_GUSB3PIPECTL_U2SSINP3OK BIT(29)
+#define DWC3_GUSB3PIPECTL_DISRXDETINP3 BIT(28)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_PX BIT(27)
+#define DWC3_GUSB3PIPECTL_REQP1P2P3 BIT(24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_EN DWC3_GUSB3PIPECTL_DEP1P2P3(1)
-#define DWC3_GUSB3PIPECTL_DEPOCHANGE (1 << 18)
-#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17)
-#define DWC3_GUSB3PIPECTL_LFPSFILT (1 << 9)
-#define DWC3_GUSB3PIPECTL_RX_DETOPOLL (1 << 8)
+#define DWC3_GUSB3PIPECTL_DEPOCHANGE BIT(18)
+#define DWC3_GUSB3PIPECTL_SUSPHY BIT(17)
+#define DWC3_GUSB3PIPECTL_LFPSFILT BIT(9)
+#define DWC3_GUSB3PIPECTL_RX_DETOPOLL BIT(8)
#define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK DWC3_GUSB3PIPECTL_TX_DEEPH(3)
#define DWC3_GUSB3PIPECTL_TX_DEEPH(n) ((n) << 1)
@@ -248,7 +250,7 @@
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
/* Global Event Size Registers */
-#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
+#define DWC3_GEVNTSIZ_INTMASK BIT(31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
/* Global HWPARAMS0 Register */
@@ -289,18 +291,18 @@
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
/* Global HWPARAMS6 Register */
-#define DWC3_GHWPARAMS6_EN_FPGA (1 << 7)
+#define DWC3_GHWPARAMS6_EN_FPGA BIT(7)
/* Global HWPARAMS7 Register */
#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
/* Global Frame Length Adjustment Register */
-#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
+#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
/* Global User Control Register 2 */
-#define DWC3_GUCTL2_RST_ACTBITLATER (1 << 14)
+#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
/* Device Configuration Register */
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
@@ -310,23 +312,23 @@
#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DCFG_SUPERSPEED (4 << 0)
#define DWC3_DCFG_HIGHSPEED (0 << 0)
-#define DWC3_DCFG_FULLSPEED (1 << 0)
+#define DWC3_DCFG_FULLSPEED BIT(0)
#define DWC3_DCFG_LOWSPEED (2 << 0)
#define DWC3_DCFG_NUMP_SHIFT 17
#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
#define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT)
-#define DWC3_DCFG_LPM_CAP (1 << 22)
+#define DWC3_DCFG_LPM_CAP BIT(22)
/* Device Control Register */
-#define DWC3_DCTL_RUN_STOP (1 << 31)
-#define DWC3_DCTL_CSFTRST (1 << 30)
-#define DWC3_DCTL_LSFTRST (1 << 29)
+#define DWC3_DCTL_RUN_STOP BIT(31)
+#define DWC3_DCTL_CSFTRST BIT(30)
+#define DWC3_DCTL_LSFTRST BIT(29)
#define DWC3_DCTL_HIRD_THRES_MASK (0x1f << 24)
#define DWC3_DCTL_HIRD_THRES(n) ((n) << 24)
-#define DWC3_DCTL_APPL1RES (1 << 23)
+#define DWC3_DCTL_APPL1RES BIT(23)
/* These apply for core versions 1.87a and earlier */
#define DWC3_DCTL_TRGTULST_MASK (0x0f << 17)
@@ -341,15 +343,15 @@
#define DWC3_DCTL_LPM_ERRATA_MASK DWC3_DCTL_LPM_ERRATA(0xf)
#define DWC3_DCTL_LPM_ERRATA(n) ((n) << 20)
-#define DWC3_DCTL_KEEP_CONNECT (1 << 19)
-#define DWC3_DCTL_L1_HIBER_EN (1 << 18)
-#define DWC3_DCTL_CRS (1 << 17)
-#define DWC3_DCTL_CSS (1 << 16)
+#define DWC3_DCTL_KEEP_CONNECT BIT(19)
+#define DWC3_DCTL_L1_HIBER_EN BIT(18)
+#define DWC3_DCTL_CRS BIT(17)
+#define DWC3_DCTL_CSS BIT(16)
-#define DWC3_DCTL_INITU2ENA (1 << 12)
-#define DWC3_DCTL_ACCEPTU2ENA (1 << 11)
-#define DWC3_DCTL_INITU1ENA (1 << 10)
-#define DWC3_DCTL_ACCEPTU1ENA (1 << 9)
+#define DWC3_DCTL_INITU2ENA BIT(12)
+#define DWC3_DCTL_ACCEPTU2ENA BIT(11)
+#define DWC3_DCTL_INITU1ENA BIT(10)
+#define DWC3_DCTL_ACCEPTU1ENA BIT(9)
#define DWC3_DCTL_TSTCTRL_MASK (0xf << 1)
#define DWC3_DCTL_ULSTCHNGREQ_MASK (0x0f << 5)
@@ -364,36 +366,36 @@
#define DWC3_DCTL_ULSTCHNG_LOOPBACK (DWC3_DCTL_ULSTCHNGREQ(11))
/* Device Event Enable Register */
-#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN (1 << 12)
-#define DWC3_DEVTEN_EVNTOVERFLOWEN (1 << 11)
-#define DWC3_DEVTEN_CMDCMPLTEN (1 << 10)
-#define DWC3_DEVTEN_ERRTICERREN (1 << 9)
-#define DWC3_DEVTEN_SOFEN (1 << 7)
-#define DWC3_DEVTEN_EOPFEN (1 << 6)
-#define DWC3_DEVTEN_HIBERNATIONREQEVTEN (1 << 5)
-#define DWC3_DEVTEN_WKUPEVTEN (1 << 4)
-#define DWC3_DEVTEN_ULSTCNGEN (1 << 3)
-#define DWC3_DEVTEN_CONNECTDONEEN (1 << 2)
-#define DWC3_DEVTEN_USBRSTEN (1 << 1)
-#define DWC3_DEVTEN_DISCONNEVTEN (1 << 0)
+#define DWC3_DEVTEN_VNDRDEVTSTRCVEDEN BIT(12)
+#define DWC3_DEVTEN_EVNTOVERFLOWEN BIT(11)
+#define DWC3_DEVTEN_CMDCMPLTEN BIT(10)
+#define DWC3_DEVTEN_ERRTICERREN BIT(9)
+#define DWC3_DEVTEN_SOFEN BIT(7)
+#define DWC3_DEVTEN_EOPFEN BIT(6)
+#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5)
+#define DWC3_DEVTEN_WKUPEVTEN BIT(4)
+#define DWC3_DEVTEN_ULSTCNGEN BIT(3)
+#define DWC3_DEVTEN_CONNECTDONEEN BIT(2)
+#define DWC3_DEVTEN_USBRSTEN BIT(1)
+#define DWC3_DEVTEN_DISCONNEVTEN BIT(0)
/* Device Status Register */
-#define DWC3_DSTS_DCNRD (1 << 29)
+#define DWC3_DSTS_DCNRD BIT(29)
/* This applies for core versions 1.87a and earlier */
-#define DWC3_DSTS_PWRUPREQ (1 << 24)
+#define DWC3_DSTS_PWRUPREQ BIT(24)
/* These apply for core versions 1.94a and later */
-#define DWC3_DSTS_RSS (1 << 25)
-#define DWC3_DSTS_SSS (1 << 24)
+#define DWC3_DSTS_RSS BIT(25)
+#define DWC3_DSTS_SSS BIT(24)
-#define DWC3_DSTS_COREIDLE (1 << 23)
-#define DWC3_DSTS_DEVCTRLHLT (1 << 22)
+#define DWC3_DSTS_COREIDLE BIT(23)
+#define DWC3_DSTS_DEVCTRLHLT BIT(22)
#define DWC3_DSTS_USBLNKST_MASK (0x0f << 18)
#define DWC3_DSTS_USBLNKST(n) (((n) & DWC3_DSTS_USBLNKST_MASK) >> 18)
-#define DWC3_DSTS_RXFIFOEMPTY (1 << 17)
+#define DWC3_DSTS_RXFIFOEMPTY BIT(17)
#define DWC3_DSTS_SOFFN_MASK (0x3fff << 3)
#define DWC3_DSTS_SOFFN(n) (((n) & DWC3_DSTS_SOFFN_MASK) >> 3)
@@ -403,7 +405,7 @@
#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DSTS_SUPERSPEED (4 << 0)
#define DWC3_DSTS_HIGHSPEED (0 << 0)
-#define DWC3_DSTS_FULLSPEED (1 << 0)
+#define DWC3_DSTS_FULLSPEED BIT(0)
#define DWC3_DSTS_LOWSPEED (2 << 0)
/* Device Generic Command Register */
@@ -421,26 +423,26 @@
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
-#define DWC3_DGCMD_CMDACT (1 << 10)
-#define DWC3_DGCMD_CMDIOC (1 << 8)
+#define DWC3_DGCMD_CMDACT BIT(10)
+#define DWC3_DGCMD_CMDIOC BIT(8)
/* Device Generic Command Parameter Register */
-#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT (1 << 0)
+#define DWC3_DGCMDPAR_FORCE_LINKPM_ACCEPT BIT(0)
#define DWC3_DGCMDPAR_FIFO_NUM(n) ((n) << 0)
#define DWC3_DGCMDPAR_RX_FIFO (0 << 5)
-#define DWC3_DGCMDPAR_TX_FIFO (1 << 5)
+#define DWC3_DGCMDPAR_TX_FIFO BIT(5)
#define DWC3_DGCMDPAR_LOOPBACK_DIS (0 << 0)
-#define DWC3_DGCMDPAR_LOOPBACK_ENA (1 << 0)
+#define DWC3_DGCMDPAR_LOOPBACK_ENA BIT(0)
/* Device Endpoint Command Register */
#define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
-#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
-#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
-#define DWC3_DEPCMD_CMDACT (1 << 10)
-#define DWC3_DEPCMD_CMDIOC (1 << 8)
+#define DWC3_DEPCMD_HIPRI_FORCERM BIT(11)
+#define DWC3_DEPCMD_CLEARPENDIN BIT(11)
+#define DWC3_DEPCMD_CMDACT BIT(10)
+#define DWC3_DEPCMD_CMDIOC BIT(8)
#define DWC3_DEPCMD_DEPSTARTCFG (0x09 << 0)
#define DWC3_DEPCMD_ENDTRANSFER (0x08 << 0)
@@ -458,7 +460,7 @@
#define DWC3_DEPCMD_CMD(x) ((x) & 0xf)
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
-#define DWC3_DALEPENA_EP(n) (1 << n)
+#define DWC3_DALEPENA_EP(n) BIT(n)
#define DWC3_DEPCMD_TYPE_CONTROL 0
#define DWC3_DEPCMD_TYPE_ISOC 1
@@ -500,8 +502,8 @@ struct dwc3_event_buffer {
struct dwc3 *dwc;
};
-#define DWC3_EP_FLAG_STALLED (1 << 0)
-#define DWC3_EP_FLAG_WEDGED (1 << 1)
+#define DWC3_EP_FLAG_STALLED BIT(0)
+#define DWC3_EP_FLAG_WEDGED BIT(1)
#define DWC3_EP_DIRECTION_TX true
#define DWC3_EP_DIRECTION_RX false
@@ -550,17 +552,17 @@ struct dwc3_ep {
u32 saved_state;
unsigned flags;
-#define DWC3_EP_ENABLED (1 << 0)
-#define DWC3_EP_STALL (1 << 1)
-#define DWC3_EP_WEDGE (1 << 2)
-#define DWC3_EP_BUSY (1 << 4)
-#define DWC3_EP_PENDING_REQUEST (1 << 5)
-#define DWC3_EP_MISSED_ISOC (1 << 6)
-#define DWC3_EP_END_TRANSFER_PENDING (1 << 7)
-#define DWC3_EP_TRANSFER_STARTED (1 << 8)
+#define DWC3_EP_ENABLED BIT(0)
+#define DWC3_EP_STALL BIT(1)
+#define DWC3_EP_WEDGE BIT(2)
+#define DWC3_EP_BUSY BIT(4)
+#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_MISSED_ISOC BIT(6)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(7)
+#define DWC3_EP_TRANSFER_STARTED BIT(8)
/* This last one is specific to EP0 */
-#define DWC3_EP0_DIR_IN (1 << 31)
+#define DWC3_EP0_DIR_IN BIT(31)
/*
* IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will
@@ -638,13 +640,13 @@ enum dwc3_link_state {
#define DWC3_TRB_STS_XFER_IN_PROG 4
/* TRB Control */
-#define DWC3_TRB_CTRL_HWO (1 << 0)
-#define DWC3_TRB_CTRL_LST (1 << 1)
-#define DWC3_TRB_CTRL_CHN (1 << 2)
-#define DWC3_TRB_CTRL_CSP (1 << 3)
+#define DWC3_TRB_CTRL_HWO BIT(0)
+#define DWC3_TRB_CTRL_LST BIT(1)
+#define DWC3_TRB_CTRL_CHN BIT(2)
+#define DWC3_TRB_CTRL_CSP BIT(3)
#define DWC3_TRB_CTRL_TRBCTL(n) (((n) & 0x3f) << 4)
-#define DWC3_TRB_CTRL_ISP_IMI (1 << 10)
-#define DWC3_TRB_CTRL_IOC (1 << 11)
+#define DWC3_TRB_CTRL_ISP_IMI BIT(10)
+#define DWC3_TRB_CTRL_IOC BIT(11)
#define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14)
#define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4))
@@ -746,6 +748,7 @@ struct dwc3_request {
unsigned direction:1;
unsigned mapped:1;
unsigned started:1;
+ unsigned zero:1;
};
/*
@@ -758,15 +761,11 @@ struct dwc3_scratchpad_array {
/**
* struct dwc3 - representation of our controller
- * @ctrl_req: usb control request which is used for ep0
+ * @drd_work - workqueue used for role swapping
* @ep0_trb: trb which is used for the ctrl_req
- * @ep0_bounce: bounce buffer for ep0
- * @zlp_buf: used when request->zero is set
* @setup_buf: used while precessing STD USB requests
- * @ctrl_req_addr: dma address of ctrl_req
* @ep0_trb: dma address of ep0_trb
* @ep0_usb_req: dummy req used while handling STD USB requests
- * @ep0_bounce_addr: dma address of ep0_bounce
* @scratch_addr: dma address of scratchbuf
* @ep0_in_setup: one control transfer is completed and enter setup phase
* @lock: for synchronizing
@@ -784,6 +783,10 @@ struct dwc3_scratchpad_array {
* @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
* @dr_mode: requested mode of operation
+ * @current_dr_role: current role of operation when in dual-role mode
+ * @desired_dr_role: desired role of operation when in dual-role mode
+ * @edev: extcon handle
+ * @edev_nb: extcon notifier
* @hsphy_mode: UTMI phy mode, one of following:
* - USBPHY_INTERFACE_MODE_UTMI
* - USBPHY_INTERFACE_MODE_UTMIW
@@ -799,8 +802,7 @@ struct dwc3_scratchpad_array {
* @u2pel: parameter from Set SEL request.
* @u1sel: parameter from Set SEL request.
* @u1pel: parameter from Set SEL request.
- * @num_out_eps: number of out endpoints
- * @num_in_eps: number of in endpoints
+ * @num_eps: number of endpoints
* @ep0_next_event: hold the next expected event
* @ep0state: state of endpoint zero
* @link_state: link state
@@ -858,17 +860,13 @@ struct dwc3_scratchpad_array {
* increments or 0 to disable.
*/
struct dwc3 {
- struct usb_ctrlrequest *ctrl_req;
+ struct work_struct drd_work;
struct dwc3_trb *ep0_trb;
void *bounce;
- void *ep0_bounce;
- void *zlp_buf;
void *scratchbuf;
u8 *setup_buf;
- dma_addr_t ctrl_req_addr;
dma_addr_t ep0_trb_addr;
dma_addr_t bounce_addr;
- dma_addr_t ep0_bounce_addr;
dma_addr_t scratch_addr;
struct dwc3_request ep0_usb_req;
struct completion ep0_in_setup;
@@ -900,6 +898,10 @@ struct dwc3 {
size_t regs_size;
enum usb_dr_mode dr_mode;
+ u32 current_dr_role;
+ u32 desired_dr_role;
+ struct extcon_dev *edev;
+ struct notifier_block edev_nb;
enum usb_phy_interface hsphy_mode;
u32 fladj;
@@ -960,8 +962,7 @@ struct dwc3 {
u8 speed;
- u8 num_out_eps;
- u8 num_in_eps;
+ u8 num_eps;
struct dwc3_hwparams hwparams;
struct dentry *root;
@@ -1010,7 +1011,7 @@ struct dwc3 {
u16 imod_interval;
};
-/* -------------------------------------------------------------------------- */
+#define work_to_dwc(w) (container_of((w), struct dwc3, drd_work))
/* -------------------------------------------------------------------------- */
@@ -1054,13 +1055,13 @@ struct dwc3_event_depevt {
u32 status:4;
/* Within XferNotReady */
-#define DEPEVT_STATUS_TRANSFER_ACTIVE (1 << 3)
+#define DEPEVT_STATUS_TRANSFER_ACTIVE BIT(3)
/* Within XferComplete */
-#define DEPEVT_STATUS_BUSERR (1 << 0)
-#define DEPEVT_STATUS_SHORT (1 << 1)
-#define DEPEVT_STATUS_IOC (1 << 2)
-#define DEPEVT_STATUS_LST (1 << 3)
+#define DEPEVT_STATUS_BUSERR BIT(0)
+#define DEPEVT_STATUS_SHORT BIT(1)
+#define DEPEVT_STATUS_IOC BIT(2)
+#define DEPEVT_STATUS_LST BIT(3)
/* Stream event only */
#define DEPEVT_STREAMEVT_FOUND 1
@@ -1221,6 +1222,16 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
{ return 0; }
#endif
+#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+int dwc3_drd_init(struct dwc3 *dwc);
+void dwc3_drd_exit(struct dwc3 *dwc);
+#else
+static inline int dwc3_drd_init(struct dwc3 *dwc)
+{ return 0; }
+static inline void dwc3_drd_exit(struct dwc3 *dwc)
+{ }
+#endif
+
/* power management interface */
#if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
int dwc3_gadget_suspend(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index eeed4ffd8131..cb2d8d3f7f3d 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -124,6 +124,34 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
}
}
+/**
+ * dwc3_trb_type_string - returns TRB type as a string
+ * @type: the type of the TRB
+ */
+static inline const char *dwc3_trb_type_string(unsigned int type)
+{
+ switch (type) {
+ case DWC3_TRBCTL_NORMAL:
+ return "normal";
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ return "setup";
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ return "status2";
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ return "status3";
+ case DWC3_TRBCTL_CONTROL_DATA:
+ return "data";
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ return "isoc-first";
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ return "isoc";
+ case DWC3_TRBCTL_LINK_TRB:
+ return "link";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
switch (state) {
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 31926dda43c9..7be963dd8e3b 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -300,7 +300,7 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
seq_printf(s, "device\n");
break;
case DWC3_GCTL_PRTCAP_OTG:
- seq_printf(s, "OTG\n");
+ seq_printf(s, "otg\n");
break;
default:
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
@@ -319,7 +319,6 @@ static ssize_t dwc3_mode_write(struct file *file,
{
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
- unsigned long flags;
u32 mode = 0;
char buf[32];
@@ -327,19 +326,16 @@ static ssize_t dwc3_mode_write(struct file *file,
return -EFAULT;
if (!strncmp(buf, "host", 4))
- mode |= DWC3_GCTL_PRTCAP_HOST;
+ mode = DWC3_GCTL_PRTCAP_HOST;
if (!strncmp(buf, "device", 6))
- mode |= DWC3_GCTL_PRTCAP_DEVICE;
+ mode = DWC3_GCTL_PRTCAP_DEVICE;
if (!strncmp(buf, "otg", 3))
- mode |= DWC3_GCTL_PRTCAP_OTG;
+ mode = DWC3_GCTL_PRTCAP_OTG;
+
+ dwc3_set_mode(dwc, mode);
- if (mode) {
- spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_mode(dwc, mode);
- spin_unlock_irqrestore(&dwc->lock, flags);
- }
return count;
}
@@ -446,52 +442,7 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
state = DWC3_DSTS_USBLNKST(reg);
spin_unlock_irqrestore(&dwc->lock, flags);
- switch (state) {
- case DWC3_LINK_STATE_U0:
- seq_printf(s, "U0\n");
- break;
- case DWC3_LINK_STATE_U1:
- seq_printf(s, "U1\n");
- break;
- case DWC3_LINK_STATE_U2:
- seq_printf(s, "U2\n");
- break;
- case DWC3_LINK_STATE_U3:
- seq_printf(s, "U3\n");
- break;
- case DWC3_LINK_STATE_SS_DIS:
- seq_printf(s, "SS.Disabled\n");
- break;
- case DWC3_LINK_STATE_RX_DET:
- seq_printf(s, "Rx.Detect\n");
- break;
- case DWC3_LINK_STATE_SS_INACT:
- seq_printf(s, "SS.Inactive\n");
- break;
- case DWC3_LINK_STATE_POLL:
- seq_printf(s, "Poll\n");
- break;
- case DWC3_LINK_STATE_RECOV:
- seq_printf(s, "Recovery\n");
- break;
- case DWC3_LINK_STATE_HRESET:
- seq_printf(s, "HRESET\n");
- break;
- case DWC3_LINK_STATE_CMPLY:
- seq_printf(s, "Compliance\n");
- break;
- case DWC3_LINK_STATE_LPBK:
- seq_printf(s, "Loopback\n");
- break;
- case DWC3_LINK_STATE_RESET:
- seq_printf(s, "Reset\n");
- break;
- case DWC3_LINK_STATE_RESUME:
- seq_printf(s, "Resume\n");
- break;
- default:
- seq_printf(s, "UNKNOWN %d\n", state);
- }
+ seq_printf(s, "%s\n", dwc3_gadget_link_string(state));
return 0;
}
@@ -689,30 +640,6 @@ out:
return 0;
}
-static inline const char *dwc3_trb_type_string(struct dwc3_trb *trb)
-{
- switch (DWC3_TRBCTL_TYPE(trb->ctrl)) {
- case DWC3_TRBCTL_NORMAL:
- return "normal";
- case DWC3_TRBCTL_CONTROL_SETUP:
- return "control-setup";
- case DWC3_TRBCTL_CONTROL_STATUS2:
- return "control-status2";
- case DWC3_TRBCTL_CONTROL_STATUS3:
- return "control-status3";
- case DWC3_TRBCTL_CONTROL_DATA:
- return "control-data";
- case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
- return "isoc-first";
- case DWC3_TRBCTL_ISOCHRONOUS:
- return "isoc";
- case DWC3_TRBCTL_LINK_TRB:
- return "link";
- default:
- return "UNKNOWN";
- }
-}
-
static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
{
struct dwc3_ep *dep = s->private;
@@ -733,10 +660,11 @@ static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
for (i = 0; i < DWC3_TRB_NUM; i++) {
struct dwc3_trb *trb = &dep->trb_pool[i];
+ unsigned int type = DWC3_TRBCTL_TYPE(trb->ctrl);
seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d\n",
trb->bph, trb->bpl, trb->size,
- dwc3_trb_type_string(trb),
+ dwc3_trb_type_string(type),
!!(trb->ctrl & DWC3_TRB_CTRL_IOC),
!!(trb->ctrl & DWC3_TRB_CTRL_ISP_IMI),
!!(trb->ctrl & DWC3_TRB_CTRL_CSP),
@@ -822,19 +750,8 @@ static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
{
int i;
- for (i = 0; i < dwc->num_in_eps; i++) {
- u8 epnum = (i << 1) | 1;
- struct dwc3_ep *dep = dwc->eps[epnum];
-
- if (!dep)
- continue;
-
- dwc3_debugfs_create_endpoint_dir(dep, parent);
- }
-
- for (i = 0; i < dwc->num_out_eps; i++) {
- u8 epnum = (i << 1);
- struct dwc3_ep *dep = dwc->eps[epnum];
+ for (i = 0; i < dwc->num_eps; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
if (!dep)
continue;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
new file mode 100644
index 000000000000..2765c51c7ef5
--- /dev/null
+++ b/drivers/usb/dwc3/drd.c
@@ -0,0 +1,85 @@
+/**
+ * drd.c - DesignWare USB3 DRD Controller Dual-role support
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Roger Quadros <rogerq@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/extcon.h>
+
+#include "debug.h"
+#include "core.h"
+#include "gadget.h"
+
+static void dwc3_drd_update(struct dwc3 *dwc)
+{
+ int id;
+
+ id = extcon_get_state(dwc->edev, EXTCON_USB_HOST);
+ if (id < 0)
+ id = 0;
+
+ dwc3_set_mode(dwc, id ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+}
+
+static int dwc3_drd_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3 *dwc = container_of(nb, struct dwc3, edev_nb);
+
+ dwc3_set_mode(dwc, event ?
+ DWC3_GCTL_PRTCAP_HOST :
+ DWC3_GCTL_PRTCAP_DEVICE);
+
+ return NOTIFY_DONE;
+}
+
+int dwc3_drd_init(struct dwc3 *dwc)
+{
+ int ret;
+
+ if (dwc->dev->of_node) {
+ if (of_property_read_bool(dwc->dev->of_node, "extcon"))
+ dwc->edev = extcon_get_edev_by_phandle(dwc->dev, 0);
+
+ if (IS_ERR(dwc->edev))
+ return PTR_ERR(dwc->edev);
+
+ dwc->edev_nb.notifier_call = dwc3_drd_notifier;
+ ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+ if (ret < 0) {
+ dev_err(dwc->dev, "couldn't register cable notifier\n");
+ return ret;
+ }
+ }
+
+ dwc3_drd_update(dwc);
+
+ return 0;
+}
+
+void dwc3_drd_exit(struct dwc3 *dwc)
+{
+ extcon_unregister_notifier(dwc->edev, EXTCON_USB_HOST,
+ &dwc->edev_nb);
+
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ flush_work(&dwc->drd_work);
+ dwc3_gadget_exit(dwc);
+}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 1515d45ebcec..98f74ff66120 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -147,53 +147,53 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
- goto err2;
+ goto vdd33_err;
}
ret = regulator_enable(exynos->vdd33);
if (ret) {
dev_err(dev, "Failed to enable VDD33 supply\n");
- goto err2;
+ goto vdd33_err;
}
exynos->vdd10 = devm_regulator_get(dev, "vdd10");
if (IS_ERR(exynos->vdd10)) {
ret = PTR_ERR(exynos->vdd10);
- goto err3;
+ goto vdd10_err;
}
ret = regulator_enable(exynos->vdd10);
if (ret) {
dev_err(dev, "Failed to enable VDD10 supply\n");
- goto err3;
+ goto vdd10_err;
}
ret = dwc3_exynos_register_phys(exynos);
if (ret) {
dev_err(dev, "couldn't register PHYs\n");
- goto err4;
+ goto phys_err;
}
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
- goto err5;
+ goto populate_err;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
- goto err5;
+ goto populate_err;
}
return 0;
-err5:
+populate_err:
platform_device_unregister(exynos->usb2_phy);
platform_device_unregister(exynos->usb3_phy);
-err4:
+phys_err:
regulator_disable(exynos->vdd10);
-err3:
+vdd10_err:
regulator_disable(exynos->vdd33);
-err2:
+vdd33_err:
clk_disable_unprepare(exynos->axius_clk);
axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index f8d0747810e7..98926504b55b 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -79,40 +79,40 @@
#define USBOTGSS_DEBUG_OFFSET 0x0600
/* SYSCONFIG REGISTER */
-#define USBOTGSS_SYSCONFIG_DMADISABLE (1 << 16)
+#define USBOTGSS_SYSCONFIG_DMADISABLE BIT(16)
/* IRQ_EOI REGISTER */
-#define USBOTGSS_IRQ_EOI_LINE_NUMBER (1 << 0)
+#define USBOTGSS_IRQ_EOI_LINE_NUMBER BIT(0)
/* IRQS0 BITS */
-#define USBOTGSS_IRQO_COREIRQ_ST (1 << 0)
+#define USBOTGSS_IRQO_COREIRQ_ST BIT(0)
/* IRQMISC BITS */
-#define USBOTGSS_IRQMISC_DMADISABLECLR (1 << 17)
-#define USBOTGSS_IRQMISC_OEVT (1 << 16)
-#define USBOTGSS_IRQMISC_DRVVBUS_RISE (1 << 13)
-#define USBOTGSS_IRQMISC_CHRGVBUS_RISE (1 << 12)
-#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE (1 << 11)
-#define USBOTGSS_IRQMISC_IDPULLUP_RISE (1 << 8)
-#define USBOTGSS_IRQMISC_DRVVBUS_FALL (1 << 5)
-#define USBOTGSS_IRQMISC_CHRGVBUS_FALL (1 << 4)
-#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL (1 << 3)
-#define USBOTGSS_IRQMISC_IDPULLUP_FALL (1 << 0)
+#define USBOTGSS_IRQMISC_DMADISABLECLR BIT(17)
+#define USBOTGSS_IRQMISC_OEVT BIT(16)
+#define USBOTGSS_IRQMISC_DRVVBUS_RISE BIT(13)
+#define USBOTGSS_IRQMISC_CHRGVBUS_RISE BIT(12)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_RISE BIT(11)
+#define USBOTGSS_IRQMISC_IDPULLUP_RISE BIT(8)
+#define USBOTGSS_IRQMISC_DRVVBUS_FALL BIT(5)
+#define USBOTGSS_IRQMISC_CHRGVBUS_FALL BIT(4)
+#define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL BIT(3)
+#define USBOTGSS_IRQMISC_IDPULLUP_FALL BIT(0)
/* UTMI_OTG_STATUS REGISTER */
-#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS (1 << 5)
-#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS (1 << 4)
-#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS (1 << 3)
-#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP (1 << 0)
+#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS BIT(5)
+#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS BIT(4)
+#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS BIT(3)
+#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP BIT(0)
/* UTMI_OTG_CTRL REGISTER */
-#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE (1 << 31)
-#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT (1 << 9)
-#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8)
-#define USBOTGSS_UTMI_OTG_CTRL_IDDIG (1 << 4)
-#define USBOTGSS_UTMI_OTG_CTRL_SESSEND (1 << 3)
-#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID (1 << 2)
-#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID (1 << 1)
+#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE BIT(31)
+#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT BIT(9)
+#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE BIT(8)
+#define USBOTGSS_UTMI_OTG_CTRL_IDDIG BIT(4)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSEND BIT(3)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID BIT(2)
+#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID BIT(1)
struct dwc3_omap {
struct device *dev;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index cce0a220b6b0..a15ec71d0423 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -125,8 +125,10 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
- acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+ ret = devm_acpi_dev_add_driver_gpios(&pdev->dev,
acpi_dwc3_byt_gpios);
+ if (ret)
+ dev_dbg(&pdev->dev, "failed to add mapping table\n");
/*
* These GPIOs will turn on the USB2 PHY. Note that we have to
@@ -242,7 +244,6 @@ static void dwc3_pci_remove(struct pci_dev *pci)
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
- acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
platform_device_unregister(dwc->dwc3);
}
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index e689cede9b0e..a78c78e7a8c3 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,14 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
struct dwc3_ep *dep, struct dwc3_request *req);
-static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
+static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
dma_addr_t buf_dma, u32 len, u32 type, bool chain)
{
struct dwc3_trb *trb;
- struct dwc3_ep *dep;
-
- dep = dwc->eps[epnum];
+ struct dwc3 *dwc;
+ dwc = dep->dwc;
trb = &dwc->ep0_trb[dep->trb_enqueue];
if (chain)
@@ -69,16 +68,17 @@ static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
trace_dwc3_prepare_trb(dep, trb);
}
-static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
+static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3_ep *dep;
+ struct dwc3 *dwc;
int ret;
- dep = dwc->eps[epnum];
if (dep->flags & DWC3_EP_BUSY)
return 0;
+ dwc = dep->dwc;
+
memset(&params, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
@@ -279,13 +279,15 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
void dwc3_ep0_out_start(struct dwc3 *dwc)
{
+ struct dwc3_ep *dep;
int ret;
complete(&dwc->ep0_in_setup);
- dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
+ dep = dwc->eps[0];
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP, false);
- ret = dwc3_ep0_start_trans(dwc, 0);
+ ret = dwc3_ep0_start_trans(dep);
WARN_ON(ret < 0);
}
@@ -794,7 +796,7 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
+ struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
int ret = -EINVAL;
u32 len;
@@ -834,7 +836,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
- unsigned transfer_size = 0;
unsigned maxp;
unsigned remaining_ur_length;
void *buf;
@@ -847,9 +848,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
-
trb = dwc->ep0_trb;
-
trace_dwc3_complete_trb(ep0, trb);
r = next_request(&ep0->pending_list);
@@ -870,58 +869,23 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
remaining_ur_length = ur->length;
length = trb->size & DWC3_TRB_SIZE_MASK;
-
maxp = ep0->endpoint.maxpacket;
-
- if (dwc->ep0_bounced) {
- /*
- * Handle the first TRB before handling the bounce buffer if
- * the request length is greater than the bounce buffer size
- */
- if (ur->length > DWC3_EP0_BOUNCE_SIZE) {
- transfer_size = ALIGN(ur->length - maxp, maxp);
- transferred = transfer_size - length;
- buf = (u8 *)buf + transferred;
- ur->actual += transferred;
- remaining_ur_length -= transferred;
-
- trb++;
- length = trb->size & DWC3_TRB_SIZE_MASK;
-
- ep0->trb_enqueue = 0;
- }
-
- transfer_size = roundup((ur->length - transfer_size),
- maxp);
-
- transferred = min_t(u32, remaining_ur_length,
- transfer_size - length);
- memcpy(buf, dwc->ep0_bounce, transferred);
- } else {
- transferred = ur->length - length;
- }
-
+ transferred = ur->length - length;
ur->actual += transferred;
- if ((epnum & 1) && ur->actual < ur->length) {
- /* for some reason we did not get everything out */
+ if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
+ ur->length && ur->zero) || dwc->ep0_bounced) {
+ trb++;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ trace_dwc3_complete_trb(ep0, trb);
+ ep0->trb_enqueue = 0;
+ dwc->ep0_bounced = false;
+ }
+ if ((epnum & 1) && ur->actual < ur->length)
dwc3_ep0_stall_and_restart(dwc);
- } else {
+ else
dwc3_gadget_giveback(ep0, r, 0);
-
- if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
- ur->length && ur->zero) {
- int ret;
-
- dwc->ep0_next_event = DWC3_EP0_COMPLETE;
-
- dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
- 0, DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, epnum);
- WARN_ON(ret < 0);
- }
- }
}
static void dwc3_ep0_complete_status(struct dwc3 *dwc,
@@ -997,14 +961,13 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
req->direction = !!dep->number;
if (req->request.length == 0) {
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ctrl_req_addr, 0,
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0,
DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ ret = dwc3_ep0_start_trans(dep);
} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
&& (dep->number == 0)) {
- u32 transfer_size = 0;
u32 maxpacket;
+ u32 rem;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
@@ -1012,36 +975,55 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
return;
maxpacket = dep->endpoint.maxpacket;
+ rem = req->request.length % maxpacket;
+ dwc->ep0_bounced = true;
- if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
- transfer_size = ALIGN(req->request.length - maxpacket,
- maxpacket);
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- req->request.dma,
- transfer_size,
- DWC3_TRBCTL_CONTROL_DATA,
- true);
- }
-
- transfer_size = roundup((req->request.length - transfer_size),
- maxpacket);
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ maxpacket - rem,
+ DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
+ } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
+ req->request.length && req->request.zero) {
+ u32 maxpacket;
+ u32 rem;
- dwc->ep0_bounced = true;
+ ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+ &req->request, dep->number);
+ if (ret)
+ return;
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ep0_bounce_addr, transfer_size,
- DWC3_TRBCTL_CONTROL_DATA, false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ maxpacket = dep->endpoint.maxpacket;
+ rem = req->request.length % maxpacket;
+
+ /* prepare normal TRB */
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
+ req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA,
+ true);
+
+ /* Now prepare one extra TRB to align transfer size */
+ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
+ 0, DWC3_TRBCTL_CONTROL_DATA,
+ false);
+ ret = dwc3_ep0_start_trans(dep);
} else {
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
- dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
+ dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length, DWC3_TRBCTL_CONTROL_DATA,
false);
- ret = dwc3_ep0_start_trans(dwc, dep->number);
+ ret = dwc3_ep0_start_trans(dep);
}
WARN_ON(ret < 0);
@@ -1055,9 +1037,8 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- dwc3_ep0_prepare_one_trb(dwc, dep->number,
- dwc->ctrl_req_addr, 0, type, false);
- return dwc3_ep0_start_trans(dwc, dep->number);
+ dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
+ return dwc3_ep0_start_trans(dep);
}
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 79e7a3480d51..6f6f0b3be3ad 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -171,7 +171,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status)
{
struct dwc3 *dwc = dep->dwc;
- unsigned int unmap_after_complete = false;
req->started = false;
list_del(&req->list);
@@ -181,19 +180,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- /*
- * NOTICE we don't want to unmap before calling ->complete() if we're
- * dealing with a bounced ep0 request. If we unmap it here, we would end
- * up overwritting the contents of req->buf and this could confuse the
- * gadget driver.
- */
- if (dwc->ep0_bounced && dep->number <= 1) {
- dwc->ep0_bounced = false;
- unmap_after_complete = true;
- } else {
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
- }
+ usb_gadget_unmap_request_by_dev(dwc->sysdev,
+ &req->request, req->direction);
trace_dwc3_gadget_giveback(req);
@@ -201,10 +189,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
- if (unmap_after_complete)
- usb_gadget_unmap_request_by_dev(dwc->sysdev,
- &req->request, req->direction);
-
if (dep->number > 1)
pm_runtime_put(dwc->dev);
}
@@ -1060,6 +1044,22 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
false, 0, req->request.stream_id,
req->request.short_not_ok,
req->request.no_interrupt);
+ } else if (req->request.zero && req->request.length &&
+ (IS_ALIGNED(req->request.length,dep->endpoint.maxpacket))) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+ req->zero = true;
+
+ /* prepare normal TRB */
+ dwc3_prepare_one_trb(dep, req, true, 0);
+
+ /* Now prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+ false, 0, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
} else {
dwc3_prepare_one_trb(dep, req, false, 0);
}
@@ -1184,8 +1184,11 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
return;
}
- /* 4 micro frames in the future */
- uf = cur_uf + dep->interval * 4;
+ /*
+ * Schedule the first trb for one interval in the future or at
+ * least 4 microframes.
+ */
+ uf = cur_uf + max_t(u32, 4, dep->interval);
__dwc3_gadget_kick_transfer(dep, uf);
}
@@ -1272,31 +1275,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return ret;
}
-static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
- struct usb_request *request)
-{
- dwc3_gadget_ep_free_request(ep, request);
-}
-
-static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
-{
- struct dwc3_request *req;
- struct usb_request *request;
- struct usb_ep *ep = &dep->endpoint;
-
- request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
- if (!request)
- return -ENOMEM;
-
- request->length = 0;
- request->buf = dwc->zlp_buf;
- request->complete = __dwc3_gadget_ep_zlp_complete;
-
- req = to_dwc3_request(request);
-
- return __dwc3_gadget_ep_queue(dep, req);
-}
-
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
@@ -1310,17 +1288,6 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_queue(dep, req);
-
- /*
- * Okay, here's the thing, if gadget driver has requested for a ZLP by
- * setting request->zero, instead of doing magic, we will just queue an
- * extra usb_request ourselves so that it gets handled the same way as
- * any other request.
- */
- if (ret == 0 && request->zero && request->length &&
- (request->length % ep->maxpacket == 0))
- ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1400,7 +1367,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
dwc3_ep_inc_deq(dep);
}
- if (r->unaligned) {
+ if (r->unaligned || r->zero) {
trb = r->trb + r->num_pending_sgs + 1;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
@@ -1411,7 +1378,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
- if (r->unaligned) {
+ if (r->unaligned || r->zero) {
trb = r->trb + 1;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_deq(dep);
@@ -2006,14 +1973,15 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
/* -------------------------------------------------------------------------- */
-static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
- u8 num, u32 direction)
+static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 num)
{
struct dwc3_ep *dep;
- u8 i;
+ u8 epnum;
+
+ INIT_LIST_HEAD(&dwc->gadget.ep_list);
- for (i = 0; i < num; i++) {
- u8 epnum = (i << 1) | (direction ? 1 : 0);
+ for (epnum = 0; epnum < num; epnum++) {
+ bool direction = epnum & 1;
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
@@ -2021,12 +1989,12 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->dwc = dwc;
dep->number = epnum;
- dep->direction = !!direction;
+ dep->direction = direction;
dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
- (epnum & 1) ? "in" : "out");
+ direction ? "in" : "out");
dep->endpoint.name = dep->name;
@@ -2053,7 +2021,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth /= 8;
- size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(i));
+ size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(epnum >> 1));
size = DWC3_GTXFIFOSIZ_TXFDEF(size);
/* FIFO Depth is in MDWDITH bytes. Multiply */
@@ -2103,7 +2071,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->endpoint.caps.type_int = true;
}
- dep->endpoint.caps.dir_in = !!direction;
+ dep->endpoint.caps.dir_in = direction;
dep->endpoint.caps.dir_out = !direction;
INIT_LIST_HEAD(&dep->pending_list);
@@ -2113,27 +2081,6 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
return 0;
}
-static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
-{
- int ret;
-
- INIT_LIST_HEAD(&dwc->gadget.ep_list);
-
- ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
- if (ret < 0) {
- dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
- return ret;
- }
-
- ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
- if (ret < 0) {
- dev_err(dwc->dev, "failed to initialize IN endpoints\n");
- return ret;
- }
-
- return 0;
-}
-
static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -2197,7 +2144,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
* with one TRB pending in the ring. We need to manually clear HWO bit
* from that TRB.
*/
- if (req->unaligned && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
+ if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
return 1;
}
@@ -2291,11 +2238,12 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
event, status, chain);
}
- if (req->unaligned) {
+ if (req->unaligned || req->zero) {
trb = &dep->trb_pool[dep->trb_dequeue];
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
event, status, false);
req->unaligned = false;
+ req->zero = false;
}
req->request.actual = length - req->remaining;
@@ -3161,49 +3109,26 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->irq_gadget = irq;
- dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
- &dwc->ctrl_req_addr, GFP_KERNEL);
- if (!dwc->ctrl_req) {
- dev_err(dwc->dev, "failed to allocate ctrl request\n");
- ret = -ENOMEM;
- goto err0;
- }
-
dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
sizeof(*dwc->ep0_trb) * 2,
&dwc->ep0_trb_addr, GFP_KERNEL);
if (!dwc->ep0_trb) {
dev_err(dwc->dev, "failed to allocate ep0 trb\n");
ret = -ENOMEM;
- goto err1;
+ goto err0;
}
- dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
+ dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
if (!dwc->setup_buf) {
ret = -ENOMEM;
- goto err2;
- }
-
- dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
- DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
- GFP_KERNEL);
- if (!dwc->ep0_bounce) {
- dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
- ret = -ENOMEM;
- goto err3;
- }
-
- dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
- if (!dwc->zlp_buf) {
- ret = -ENOMEM;
- goto err4;
+ goto err1;
}
dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
&dwc->bounce_addr, GFP_KERNEL);
if (!dwc->bounce) {
ret = -ENOMEM;
- goto err5;
+ goto err2;
}
init_completion(&dwc->ep0_in_setup);
@@ -3241,39 +3166,31 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* sure we're starting from a well known location.
*/
- ret = dwc3_gadget_init_endpoints(dwc);
+ ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
if (ret)
- goto err6;
+ goto err3;
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
- goto err6;
+ goto err4;
}
return 0;
-err6:
- dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
- dwc->bounce_addr);
-
-err5:
- kfree(dwc->zlp_buf);
err4:
dwc3_gadget_free_endpoints(dwc);
- dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
- dwc->ep0_bounce, dwc->ep0_bounce_addr);
err3:
- kfree(dwc->setup_buf);
+ dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+ dwc->bounce_addr);
err2:
- dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
- dwc->ep0_trb, dwc->ep0_trb_addr);
+ kfree(dwc->setup_buf);
err1:
- dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
- dwc->ctrl_req, dwc->ctrl_req_addr);
+ dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
+ dwc->ep0_trb, dwc->ep0_trb_addr);
err0:
return ret;
@@ -3284,22 +3201,12 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
usb_del_gadget_udc(&dwc->gadget);
-
dwc3_gadget_free_endpoints(dwc);
-
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
- dwc->bounce_addr);
- dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
- dwc->ep0_bounce, dwc->ep0_bounce_addr);
-
+ dwc->bounce_addr);
kfree(dwc->setup_buf);
- kfree(dwc->zlp_buf);
-
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
- dwc->ep0_trb, dwc->ep0_trb_addr);
-
- dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
- dwc->ctrl_req, dwc->ctrl_req_addr);
+ dwc->ep0_trb, dwc->ep0_trb_addr);
}
int dwc3_gadget_suspend(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 265e223ab645..e4602d0e515b 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -29,16 +29,16 @@ struct dwc3;
/* DEPCFG parameter 1 */
#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
-#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
-#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
-#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
-#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
-#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
+#define DWC3_DEPCFG_XFER_COMPLETE_EN BIT(8)
+#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN BIT(9)
+#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
+#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
+#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(12)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
-#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
+#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
-#define DWC3_DEPCFG_BULK_BASED (1 << 30)
-#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
+#define DWC3_DEPCFG_BULK_BASED BIT(30)
+#define DWC3_DEPCFG_FIFO_BASED BIT(31)
/* DEPCFG parameter 0 */
#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
@@ -47,10 +47,10 @@ struct dwc3;
#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
/* This applies for core versions earlier than 1.94a */
-#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
+#define DWC3_DEPCFG_IGN_SEQ_NUM BIT(31)
/* These apply for core versions 1.94a and later */
#define DWC3_DEPCFG_ACTION_INIT (0 << 30)
-#define DWC3_DEPCFG_ACTION_RESTORE (1 << 30)
+#define DWC3_DEPCFG_ACTION_RESTORE BIT(30)
#define DWC3_DEPCFG_ACTION_MODIFY (2 << 30)
/* DEPXFERCFG parameter 0 */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 2b124f94d858..f1bd444d22a3 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -27,31 +27,6 @@
#include "core.h"
#include "debug.h"
-DECLARE_EVENT_CLASS(dwc3_log_msg,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf),
- TP_STRUCT__entry(__dynamic_array(char, msg, DWC3_MSG_MAX)),
- TP_fast_assign(
- vsnprintf(__get_str(msg), DWC3_MSG_MAX, vaf->fmt, *vaf->va);
- ),
- TP_printk("%s", __get_str(msg))
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_core,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
- TP_PROTO(struct va_format *vaf),
- TP_ARGS(vaf)
-);
-
DECLARE_EVENT_CLASS(dwc3_log_io,
TP_PROTO(void *base, u32 offset, u32 value),
TP_ARGS(base, offset, value),
@@ -198,7 +173,7 @@ DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
__entry->param = param;
__entry->status = status;
),
- TP_printk("cmd '%s' [%d] param %08x --> status: %s",
+ TP_printk("cmd '%s' [%x] param %08x --> status: %s",
dwc3_gadget_generic_cmd_string(__entry->cmd),
__entry->cmd, __entry->param,
dwc3_gadget_generic_cmd_status_string(__entry->status)
@@ -298,36 +273,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
__entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
__entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
- ({char *s;
- switch (__entry->ctrl & 0x3f0) {
- case DWC3_TRBCTL_NORMAL:
- s = "normal";
- break;
- case DWC3_TRBCTL_CONTROL_SETUP:
- s = "setup";
- break;
- case DWC3_TRBCTL_CONTROL_STATUS2:
- s = "status2";
- break;
- case DWC3_TRBCTL_CONTROL_STATUS3:
- s = "status3";
- break;
- case DWC3_TRBCTL_CONTROL_DATA:
- s = "data";
- break;
- case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
- s = "isoc-first";
- break;
- case DWC3_TRBCTL_ISOCHRONOUS:
- s = "isoc";
- break;
- case DWC3_TRBCTL_LINK_TRB:
- s = "link";
- break;
- default:
- s = "UNKNOWN";
- break;
- } s; })
+ dwc3_trb_type_string(DWC3_TRBCTL_TYPE(__entry->ctrl))
)
);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 8ad203296079..c164d6b788c3 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -212,7 +212,7 @@ config USB_F_TCM
# this first set of drivers all depend on bulk-capable hardware.
config USB_CONFIGFS
- tristate "USB functions configurable through configfs"
+ tristate "USB Gadget functions configurable through configfs"
select USB_LIBCOMPOSITE
help
A Linux USB "gadget" can be set up through configfs.
@@ -458,8 +458,9 @@ config USB_CONFIGFS_F_TCM
UAS utilizes the USB 3.0 feature called streams support.
choice
- tristate "USB Gadget Drivers"
+ tristate "USB Gadget precomposed configurations"
default USB_ETH
+ optional
help
A Linux "Gadget Driver" talks to the USB Peripheral Controller
driver through the abstract "gadget" API. Some other operating
@@ -476,6 +477,12 @@ choice
not be able work with that controller, or might need to implement
a less common variant of a device class protocol.
+ The available choices each represent a single precomposed USB
+ gadget configuration. In the device model, each option contains
+ both the device instantiation as a child for a USB gadget
+ controller, and the relevant drivers for each function declared
+ by the device.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a0085571824d..71dd27c0d7f2 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -246,7 +246,6 @@ EXPORT_SYMBOL_GPL(ffs_lock);
static struct ffs_dev *_ffs_find_dev(const char *name);
static struct ffs_dev *_ffs_alloc_dev(void);
-static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
static void _ffs_free_dev(struct ffs_dev *dev);
static void *ffs_acquire_dev(const char *dev_name);
static void ffs_release_dev(struct ffs_data *ffs_data);
@@ -1571,14 +1570,14 @@ static void ffs_data_get(struct ffs_data *ffs)
{
ENTER();
- atomic_inc(&ffs->ref);
+ refcount_inc(&ffs->ref);
}
static void ffs_data_opened(struct ffs_data *ffs)
{
ENTER();
- atomic_inc(&ffs->ref);
+ refcount_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
ffs->state == FFS_DEACTIVATED) {
ffs->state = FFS_CLOSING;
@@ -1590,7 +1589,7 @@ static void ffs_data_put(struct ffs_data *ffs)
{
ENTER();
- if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+ if (unlikely(refcount_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -1635,7 +1634,7 @@ static struct ffs_data *ffs_data_new(void)
ENTER();
- atomic_set(&ffs->ref, 1);
+ refcount_set(&ffs->ref, 1);
atomic_set(&ffs->opened, 0);
ffs->state = FFS_READ_DESCRIPTORS;
mutex_init(&ffs->mutex);
@@ -3302,9 +3301,10 @@ static struct ffs_dev *_ffs_do_find_dev(const char *name)
{
struct ffs_dev *dev;
+ if (!name)
+ return NULL;
+
list_for_each_entry(dev, &ffs_devices, entry) {
- if (!dev->name || !name)
- continue;
if (strcmp(dev->name, name) == 0)
return dev;
}
@@ -3380,42 +3380,11 @@ static void ffs_free_inst(struct usb_function_instance *f)
kfree(opts);
}
-#define MAX_INST_NAME_LEN 40
-
static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
{
- struct f_fs_opts *opts;
- char *ptr;
- const char *tmp;
- int name_len, ret;
-
- name_len = strlen(name) + 1;
- if (name_len > MAX_INST_NAME_LEN)
+ if (strlen(name) >= FIELD_SIZEOF(struct ffs_dev, name))
return -ENAMETOOLONG;
-
- ptr = kstrndup(name, name_len, GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- opts = to_f_fs_opts(fi);
- tmp = NULL;
-
- ffs_dev_lock();
-
- tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
- ret = _ffs_name_dev(opts->dev, ptr);
- if (ret) {
- kfree(ptr);
- ffs_dev_unlock();
- return ret;
- }
- opts->dev->name_allocated = true;
-
- ffs_dev_unlock();
-
- kfree(tmp);
-
- return 0;
+ return ffs_name_dev(to_f_fs_opts(fi)->dev, name);
}
static struct usb_function_instance *ffs_alloc_inst(void)
@@ -3545,32 +3514,19 @@ static struct ffs_dev *_ffs_alloc_dev(void)
return dev;
}
-/*
- * ffs_lock must be taken by the caller of this function
- * The caller is responsible for "name" being available whenever f_fs needs it
- */
-static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
+int ffs_name_dev(struct ffs_dev *dev, const char *name)
{
struct ffs_dev *existing;
+ int ret = 0;
- existing = _ffs_do_find_dev(name);
- if (existing)
- return -EBUSY;
-
- dev->name = name;
-
- return 0;
-}
+ ffs_dev_lock();
-/*
- * The caller is responsible for "name" being available whenever f_fs needs it
- */
-int ffs_name_dev(struct ffs_dev *dev, const char *name)
-{
- int ret;
+ existing = _ffs_do_find_dev(name);
+ if (!existing)
+ strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
+ else if (existing != dev)
+ ret = -EBUSY;
- ffs_dev_lock();
- ret = _ffs_name_dev(dev, name);
ffs_dev_unlock();
return ret;
@@ -3600,8 +3556,6 @@ EXPORT_SYMBOL_GPL(ffs_single_dev);
static void _ffs_free_dev(struct ffs_dev *dev)
{
list_del(&dev->entry);
- if (dev->name_allocated)
- kfree(dev->name);
/* Clear the private_data pointer to stop incorrect dev access */
if (dev->ffs_data)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index c3cab77181d4..a8b40d07e927 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -178,6 +178,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req);
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
+ struct usb_gadget *g = dev->gadget;
struct sk_buff *skb;
int retval = -ENOMEM;
size_t size = 0;
@@ -209,8 +210,11 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
*/
size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
size += dev->port_usb->header_len;
- size += out->maxpacket - 1;
- size -= size % out->maxpacket;
+
+ if (g->quirk_ep_out_aligned_size) {
+ size += out->maxpacket - 1;
+ size -= size % out->maxpacket;
+ }
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
@@ -401,13 +405,12 @@ done:
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
+ struct usb_request *tmp;
unsigned long flags;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
- while (!list_empty(&dev->rx_reqs)) {
- req = container_of(dev->rx_reqs.next,
- struct usb_request, list);
+ list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
@@ -527,7 +530,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- req = container_of(dev->tx_reqs.next, struct usb_request, list);
+ req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
list_del(&req->list);
/* temporarily stop TX queue when the freelist empties */
@@ -1122,6 +1125,7 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
+ struct usb_request *tmp;
WARN_ON(!dev);
if (!dev)
@@ -1138,9 +1142,7 @@ void gether_disconnect(struct gether *link)
*/
usb_ep_disable(link->in_ep);
spin_lock(&dev->req_lock);
- while (!list_empty(&dev->tx_reqs)) {
- req = container_of(dev->tx_reqs.next,
- struct usb_request, list);
+ list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
list_del(&req->list);
spin_unlock(&dev->req_lock);
@@ -1152,9 +1154,7 @@ void gether_disconnect(struct gether *link)
usb_ep_disable(link->out_ep);
spin_lock(&dev->req_lock);
- while (!list_empty(&dev->rx_reqs)) {
- req = container_of(dev->rx_reqs.next,
- struct usb_request, list);
+ list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
list_del(&req->list);
spin_unlock(&dev->req_lock);
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 4b6969451cdc..4378cc2fcac3 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/refcount.h>
#ifdef VERBOSE_DEBUG
#ifndef pr_vdebug
@@ -39,15 +40,16 @@
struct f_fs_opts;
struct ffs_dev {
- const char *name;
- bool name_allocated;
- bool mounted;
- bool desc_ready;
- bool single;
struct ffs_data *ffs_data;
struct f_fs_opts *opts;
struct list_head entry;
+ char name[41];
+
+ bool mounted;
+ bool desc_ready;
+ bool single;
+
int (*ffs_ready_callback)(struct ffs_data *ffs);
void (*ffs_closed_callback)(struct ffs_data *ffs);
void *(*ffs_acquire_dev_callback)(struct ffs_dev *dev);
@@ -177,7 +179,7 @@ struct ffs_data {
struct completion ep0req_completion; /* P: mutex */
/* reference counter */
- atomic_t ref;
+ refcount_t ref;
/* how many files are opened (EP0 and others) */
atomic_t opened;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 4e037d2a7a60..844cb738bafd 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2125,7 +2125,7 @@ static struct configfs_item_operations uvc_item_ops = {
.release = uvc_attr_release,
};
-#define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -2168,16 +2168,16 @@ end: \
return ret; \
} \
\
-UVC_ATTR(f_uvc_opts_, cname, aname)
+UVC_ATTR(f_uvc_opts_, cname, cname)
#define identity_conv(x) (x)
-UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv,
- 16);
-UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu,
- 3072);
-UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv,
- 15);
+UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
+ kstrtou8, u8, identity_conv, 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
+ kstrtou16, u16, le16_to_cpu, 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
+ kstrtou8, u8, identity_conv, 15);
#undef identity_conv
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index a2c916869293..b9ca0a26cbd9 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -27,6 +27,7 @@
#include <linux/mmu_context.h>
#include <linux/aio.h>
#include <linux/uio.h>
+#include <linux/refcount.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -114,7 +115,7 @@ enum ep0_state {
struct dev_data {
spinlock_t lock;
- atomic_t count;
+ refcount_t count;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
@@ -150,12 +151,12 @@ struct dev_data {
static inline void get_dev (struct dev_data *data)
{
- atomic_inc (&data->count);
+ refcount_inc (&data->count);
}
static void put_dev (struct dev_data *data)
{
- if (likely (!atomic_dec_and_test (&data->count)))
+ if (likely (!refcount_dec_and_test (&data->count)))
return;
/* needs no more cleanup */
BUG_ON (waitqueue_active (&data->wait));
@@ -170,7 +171,7 @@ static struct dev_data *dev_new (void)
if (!dev)
return NULL;
dev->state = STATE_DEV_DISABLED;
- atomic_set (&dev->count, 1);
+ refcount_set (&dev->count, 1);
spin_lock_init (&dev->lock);
INIT_LIST_HEAD (&dev->epfiles);
init_waitqueue_head (&dev->wait);
@@ -190,7 +191,7 @@ enum ep_state {
struct ep_data {
struct mutex lock;
enum ep_state state;
- atomic_t count;
+ refcount_t count;
struct dev_data *dev;
/* must hold dev->lock before accessing ep or req */
struct usb_ep *ep;
@@ -205,12 +206,12 @@ struct ep_data {
static inline void get_ep (struct ep_data *data)
{
- atomic_inc (&data->count);
+ refcount_inc (&data->count);
}
static void put_ep (struct ep_data *data)
{
- if (likely (!atomic_dec_and_test (&data->count)))
+ if (likely (!refcount_dec_and_test (&data->count)))
return;
put_dev (data->dev);
/* needs no more cleanup */
@@ -1561,7 +1562,7 @@ static int activate_ep_files (struct dev_data *dev)
init_waitqueue_head (&data->wait);
strncpy (data->name, ep->name, sizeof (data->name) - 1);
- atomic_set (&data->count, 1);
+ refcount_set (&data->count, 1);
data->dev = dev;
get_dev (dev);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 4b69f28a9af9..1c14c283cc47 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -62,8 +62,9 @@ config USB_ATMEL_USBA
The fifo_mode parameter is used to select endpoint allocation mode.
fifo_mode = 0 is used to let the driver autoconfigure the endpoints.
- In this case 2 banks are allocated for isochronous endpoints and
- only one bank is allocated for the rest of the endpoints.
+ In this case, for ep1 2 banks are allocated if it works in isochronous
+ mode and only 1 bank otherwise. For the rest of the endpoints
+ only 1 bank is allocated.
fifo_mode = 1 is a generic maximum fifo size (1024 bytes) configuration
allowing the usage of ep1 - ep6
@@ -191,6 +192,7 @@ config USB_RENESAS_USBHS_UDC
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on EXTCON
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
@@ -253,6 +255,20 @@ config USB_MV_U3D
MARVELL PXA2128 Processor series include a super speed USB3.0 device
controller, which support super speed USB peripheral.
+config USB_SNP_CORE
+ depends on USB_AMD5536UDC
+ tristate
+ help
+ This enables core driver support for Synopsys USB 2.0 Device
+ controller.
+
+ This will be enabled when PCI or Platform driver for this UDC is
+ selected. Currently, this will be enabled by USB_SNP_UDC_PLAT or
+ USB_AMD5536UDC options.
+
+ This IP is different to the High Speed OTG IP that can be enabled
+ by selecting USB_DWC2 or USB_DWC3 options.
+
#
# Controllers available in both integrated and discrete versions
#
@@ -277,7 +293,8 @@ source "drivers/usb/gadget/udc/bdc/Kconfig"
config USB_AMD5536UDC
tristate "AMD5536 UDC"
- depends on PCI
+ depends on USB_PCI
+ select USB_SNP_CORE
help
The AMD5536 UDC is part of the AMD Geode CS5536, an x86 southbridge.
It is a USB Highspeed DMA capable USB device controller. Beside ep0
@@ -285,6 +302,9 @@ config USB_AMD5536UDC
The UDC port supports OTG operation, and may be used as a host port
if it's not being used to implement peripheral or OTG roles.
+ This UDC is based on Synopsys USB device controller IP and selects
+ CONFIG_USB_SNP_CORE option to build the core driver.
+
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "amd5536udc" and force all
gadget drivers to also be dynamically linked.
@@ -327,7 +347,7 @@ config USB_NET2272_DMA
config USB_NET2280
tristate "NetChip NET228x / PLX USB3x8x"
- depends on PCI
+ depends on USB_PCI
help
NetChip 2280 / 2282 is a PCI based USB peripheral controller which
supports both full and high speed USB 2.0 data transfers.
@@ -352,7 +372,7 @@ config USB_NET2280
config USB_GOKU
tristate "Toshiba TC86C001 'Goku-S'"
- depends on PCI
+ depends on USB_PCI
help
The Toshiba TC86C001 is a PCI device which includes controllers
for full speed USB devices, IDE, I2C, SIO, plus a USB host (OHCI).
@@ -366,7 +386,7 @@ config USB_GOKU
config USB_EG20T
tristate "Intel QUARK X1000/EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC"
- depends on PCI
+ depends on USB_PCI
help
This is a USB device driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 98e74ed9f555..626e1f1c62da 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -10,7 +10,8 @@ obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
obj-$(CONFIG_USB_NET2272) += net2272.o
obj-$(CONFIG_USB_NET2280) += net2280.o
-obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
+obj-$(CONFIG_USB_SNP_CORE) += amd5536udc.o
+obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc_pci.o
obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index ea03ca7ae29a..4ecd2f20ea48 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -11,27 +11,15 @@
*/
/*
- * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
- * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
- * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
- *
- * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
- * be used as host port) and UOC bits PAD_EN and APU are set (should be done
- * by BIOS init).
- *
- * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
- * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
- * can be used with gadget ether.
+ * This file does the core driver implementation for the UDC that is based
+ * on Synopsys device controller IP (different than HS OTG IP) that is either
+ * connected through PCI bus or integrated to SoC platforms.
*/
-/* debug control */
-/* #define UDC_VERBOSE */
-
/* Driver strings */
-#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
+#define UDC_MOD_DESCRIPTION "Synopsys USB Device Controller"
#define UDC_DRIVER_VERSION_STRING "01.00.0206"
-/* system */
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
@@ -46,23 +34,12 @@
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <linux/dmapool.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/prefetch.h>
-
+#include <linux/moduleparam.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-
-/* gadget stack */
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-/* udc specific */
#include "amd5536udc.h"
-
static void udc_tasklet_disconnect(unsigned long);
static void empty_req_queue(struct udc_ep *);
static void udc_setup_endpoints(struct udc *dev);
@@ -72,7 +49,7 @@ static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
/* description */
static const char mod_desc[] = UDC_MOD_DESCRIPTION;
-static const char name[] = "amd5536udc";
+static const char name[] = "udc";
/* structure to hold endpoint function pointers */
static const struct usb_ep_ops udc_ep_ops;
@@ -208,30 +185,11 @@ static const struct {
#undef EP_INFO
};
-/* DMA usage flag */
-static bool use_dma = 1;
-/* packet per buffer dma */
-static bool use_dma_ppb = 1;
-/* with per descr. update */
-static bool use_dma_ppb_du;
/* buffer fill mode */
static int use_dma_bufferfill_mode;
-/* full speed only mode */
-static bool use_fullspeed;
/* tx buffer size for high speed */
static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
-/* module parameters */
-module_param(use_dma, bool, S_IRUGO);
-MODULE_PARM_DESC(use_dma, "true for DMA");
-module_param(use_dma_ppb, bool, S_IRUGO);
-MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
-module_param(use_dma_ppb_du, bool, S_IRUGO);
-MODULE_PARM_DESC(use_dma_ppb_du,
- "true for DMA in packet per buffer mode with descriptor update");
-module_param(use_fullspeed, bool, S_IRUGO);
-MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
-
/*---------------------------------------------------------------------------*/
/* Prints UDC device registers and endpoint irq registers */
static void print_regs(struct udc *dev)
@@ -267,7 +225,7 @@ static void print_regs(struct udc *dev)
}
/* Masks unused interrupts */
-static int udc_mask_unused_interrupts(struct udc *dev)
+int udc_mask_unused_interrupts(struct udc *dev)
{
u32 tmp;
@@ -287,6 +245,7 @@ static int udc_mask_unused_interrupts(struct udc *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
/* Enables endpoint 0 interrupts */
static int udc_enable_ep0_interrupts(struct udc *dev)
@@ -306,7 +265,7 @@ static int udc_enable_ep0_interrupts(struct udc *dev)
}
/* Enables device interrupts for SET_INTF and SET_CONFIG */
-static int udc_enable_dev_setup_interrupts(struct udc *dev)
+int udc_enable_dev_setup_interrupts(struct udc *dev)
{
u32 tmp;
@@ -325,6 +284,7 @@ static int udc_enable_dev_setup_interrupts(struct udc *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
/* Calculates fifo start of endpoint based on preceding endpoints */
static int udc_set_txfifo_addr(struct udc_ep *ep)
@@ -583,7 +543,7 @@ udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
if (ep->dma) {
/* ep0 in requests are allocated from data pool here */
- dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+ dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
&req->td_phys);
if (!dma_desc) {
kfree(req);
@@ -608,27 +568,23 @@ udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
}
/* frees pci pool descriptors of a DMA chain */
-static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
+static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
{
- int ret_val = 0;
- struct udc_data_dma *td;
- struct udc_data_dma *td_last = NULL;
+ struct udc_data_dma *td = req->td_data;
unsigned int i;
+ dma_addr_t addr_next = 0x00;
+ dma_addr_t addr = (dma_addr_t)td->next;
+
DBG(dev, "free chain req = %p\n", req);
/* do not free first desc., will be done by free for request */
- td_last = req->td_data;
- td = phys_to_virt(td_last->next);
-
for (i = 1; i < req->chain_len; i++) {
- pci_pool_free(dev->data_requests, td,
- (dma_addr_t)td_last->next);
- td_last = td;
- td = phys_to_virt(td_last->next);
+ td = phys_to_virt(addr);
+ addr_next = (dma_addr_t)td->next;
+ dma_pool_free(dev->data_requests, td, addr);
+ addr = addr_next;
}
-
- return ret_val;
}
/* Frees request packet, called by gadget driver */
@@ -652,7 +608,7 @@ udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
if (req->chain_len > 1)
udc_free_dma_chain(ep->dev, req);
- pci_pool_free(ep->dev->data_requests, req->td_data,
+ dma_pool_free(ep->dev->data_requests, req->td_data,
req->td_phys);
}
kfree(req);
@@ -847,7 +803,7 @@ static int udc_create_dma_chain(
for (i = buf_len; i < bytes; i += buf_len) {
/* create or determine next desc. */
if (create_new_chain) {
- td = pci_pool_alloc(ep->dev->data_requests,
+ td = dma_pool_alloc(ep->dev->data_requests,
gfp_flags, &dma_addr);
if (!td)
return -ENOMEM;
@@ -1507,7 +1463,7 @@ static void make_ep_lists(struct udc *dev)
}
/* Inits UDC context */
-static void udc_basic_init(struct udc *dev)
+void udc_basic_init(struct udc *dev)
{
u32 tmp;
@@ -1543,6 +1499,7 @@ static void udc_basic_init(struct udc *dev)
dev->data_ep_enabled = 0;
dev->data_ep_queued = 0;
}
+EXPORT_SYMBOL_GPL(udc_basic_init);
/* init registers at driver load time */
static int startup_registers(struct udc *dev)
@@ -3031,7 +2988,7 @@ __acquires(dev->lock)
}
/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
-static irqreturn_t udc_irq(int irq, void *pdev)
+irqreturn_t udc_irq(int irq, void *pdev)
{
struct udc *dev = pdev;
u32 reg;
@@ -3083,16 +3040,18 @@ static irqreturn_t udc_irq(int irq, void *pdev)
spin_unlock(&dev->lock);
return ret_val;
}
+EXPORT_SYMBOL_GPL(udc_irq);
/* Tears down device */
-static void gadget_release(struct device *pdev)
+void gadget_release(struct device *pdev)
{
struct amd5536udc *dev = dev_get_drvdata(pdev);
kfree(dev);
}
+EXPORT_SYMBOL_GPL(gadget_release);
/* Cleanup on device remove */
-static void udc_remove(struct udc *dev)
+void udc_remove(struct udc *dev)
{
/* remove timer */
stop_timer++;
@@ -3108,9 +3067,10 @@ static void udc_remove(struct udc *dev)
del_timer_sync(&udc_pollstall_timer);
udc = NULL;
}
+EXPORT_SYMBOL_GPL(udc_remove);
/* free all the dma pools */
-static void free_dma_pools(struct udc *dev)
+void free_dma_pools(struct udc *dev)
{
dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
dev->ep[UDC_EP0OUT_IX].td_phys);
@@ -3119,35 +3079,10 @@ static void free_dma_pools(struct udc *dev)
dma_pool_destroy(dev->stp_requests);
dma_pool_destroy(dev->data_requests);
}
-
-/* Reset all pci context */
-static void udc_pci_remove(struct pci_dev *pdev)
-{
- struct udc *dev;
-
- dev = pci_get_drvdata(pdev);
-
- usb_del_gadget_udc(&udc->gadget);
- /* gadget driver must not be registered */
- if (WARN_ON(dev->driver))
- return;
-
- /* dma pool cleanup */
- free_dma_pools(dev);
-
- /* reset controller */
- writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
- free_irq(pdev->irq, dev);
- iounmap(dev->virt_addr);
- release_mem_region(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- pci_disable_device(pdev);
-
- udc_remove(dev);
-}
+EXPORT_SYMBOL_GPL(free_dma_pools);
/* create dma pools on init */
-static int init_dma_pools(struct udc *dev)
+int init_dma_pools(struct udc *dev)
{
struct udc_stp_dma *td_stp;
struct udc_data_dma *td_data;
@@ -3210,9 +3145,10 @@ err_create_dma_pool:
dev->data_requests = NULL;
return retval;
}
+EXPORT_SYMBOL_GPL(init_dma_pools);
/* general probe */
-static int udc_probe(struct udc *dev)
+int udc_probe(struct udc *dev)
{
char tmp[128];
u32 reg;
@@ -3276,137 +3212,7 @@ static int udc_probe(struct udc *dev)
finished:
return retval;
}
-
-/* Called by pci bus driver to init pci context */
-static int udc_pci_probe(
- struct pci_dev *pdev,
- const struct pci_device_id *id
-)
-{
- struct udc *dev;
- unsigned long resource;
- unsigned long len;
- int retval = 0;
-
- /* one udc only */
- if (udc) {
- dev_dbg(&pdev->dev, "already probed\n");
- return -EBUSY;
- }
-
- /* init */
- dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- /* pci setup */
- if (pci_enable_device(pdev) < 0) {
- retval = -ENODEV;
- goto err_pcidev;
- }
-
- /* PCI resource allocation */
- resource = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
- if (!request_mem_region(resource, len, name)) {
- dev_dbg(&pdev->dev, "pci device used already\n");
- retval = -EBUSY;
- goto err_memreg;
- }
-
- dev->virt_addr = ioremap_nocache(resource, len);
- if (!dev->virt_addr) {
- dev_dbg(&pdev->dev, "start address cannot be mapped\n");
- retval = -EFAULT;
- goto err_ioremap;
- }
-
- if (!pdev->irq) {
- dev_err(&pdev->dev, "irq not set\n");
- retval = -ENODEV;
- goto err_irq;
- }
-
- spin_lock_init(&dev->lock);
- /* udc csr registers base */
- dev->csr = dev->virt_addr + UDC_CSR_ADDR;
- /* dev registers base */
- dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
- /* ep registers base */
- dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
- /* fifo's base */
- dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
- dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
-
- if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
- dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
- retval = -EBUSY;
- goto err_irq;
- }
-
- pci_set_drvdata(pdev, dev);
-
- /* chip revision for Hs AMD5536 */
- dev->chiprev = pdev->revision;
-
- pci_set_master(pdev);
- pci_try_set_mwi(pdev);
-
- /* init dma pools */
- if (use_dma) {
- retval = init_dma_pools(dev);
- if (retval != 0)
- goto err_dma;
- }
-
- dev->phys_addr = resource;
- dev->irq = pdev->irq;
- dev->pdev = pdev;
-
- /* general probing */
- if (udc_probe(dev)) {
- retval = -ENODEV;
- goto err_probe;
- }
- return 0;
-
-err_probe:
- if (use_dma)
- free_dma_pools(dev);
-err_dma:
- free_irq(pdev->irq, dev);
-err_irq:
- iounmap(dev->virt_addr);
-err_ioremap:
- release_mem_region(resource, len);
-err_memreg:
- pci_disable_device(pdev);
-err_pcidev:
- kfree(dev);
- return retval;
-}
-
-/* PCI device parameters */
-static const struct pci_device_id pci_id[] = {
- {
- PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
- .class = PCI_CLASS_SERIAL_USB_DEVICE,
- .class_mask = 0xffffffff,
- },
- {},
-};
-MODULE_DEVICE_TABLE(pci, pci_id);
-
-/* PCI functions */
-static struct pci_driver udc_pci_driver = {
- .name = (char *) name,
- .id_table = pci_id,
- .probe = udc_pci_probe,
- .remove = udc_pci_remove,
-};
-
-module_pci_driver(udc_pci_driver);
+EXPORT_SYMBOL_GPL(udc_probe);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index 4638d707f169..fae49bf3833e 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -13,6 +13,12 @@
#ifndef AMD5536UDC_H
#define AMD5536UDC_H
+/* debug control */
+/* #define UDC_VERBOSE */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
/* various constants */
#define UDC_RDE_TIMER_SECONDS 1
#define UDC_RDE_TIMER_DIV 10
@@ -545,8 +551,8 @@ struct udc {
u32 __iomem *txfifo;
/* DMA desc pools */
- struct pci_pool *data_requests;
- struct pci_pool *stp_requests;
+ struct dma_pool *data_requests;
+ struct dma_pool *stp_requests;
/* device data */
unsigned long phys_addr;
@@ -567,6 +573,36 @@ union udc_setup_data {
struct usb_ctrlrequest request;
};
+/* Function declarations */
+int udc_enable_dev_setup_interrupts(struct udc *dev);
+int udc_mask_unused_interrupts(struct udc *dev);
+irqreturn_t udc_irq(int irq, void *pdev);
+void gadget_release(struct device *pdev);
+void udc_basic_init(struct udc *dev);
+void free_dma_pools(struct udc *dev);
+int init_dma_pools(struct udc *dev);
+void udc_remove(struct udc *dev);
+int udc_probe(struct udc *dev);
+
+/* DMA usage flag */
+static bool use_dma = 1;
+/* packet per buffer dma */
+static bool use_dma_ppb = 1;
+/* with per descr. update */
+static bool use_dma_ppb_du;
+/* full speed only mode */
+static bool use_fullspeed;
+
+/* module parameters */
+module_param(use_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma, "true for DMA");
+module_param(use_dma_ppb, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
+module_param(use_dma_ppb_du, bool, S_IRUGO);
+MODULE_PARM_DESC(use_dma_ppb_du,
+ "true for DMA in packet per buffer mode with descriptor update");
+module_param(use_fullspeed, bool, S_IRUGO);
+MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
/*
*---------------------------------------------------------------------------
* SET and GET bitfields in u32 values
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
new file mode 100644
index 000000000000..2a2d0a96fe24
--- /dev/null
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -0,0 +1,217 @@
+/*
+ * amd5536udc_pci.c -- AMD 5536 UDC high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2007 AMD (http://www.amd.com)
+ * Author: Thomas Dahlmann
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
+ * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
+ * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
+ *
+ * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
+ * be used as host port) and UOC bits PAD_EN and APU are set (should be done
+ * by BIOS init).
+ *
+ * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
+ * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
+ * can be used with gadget ether.
+ *
+ * This file does pci device registration, and the core driver implementation
+ * is done in amd5536udc.c
+ *
+ * The driver is split so as to use the core UDC driver which is based on
+ * Synopsys device controller IP (different than HS OTG IP) in UDCs
+ * integrated to SoC platforms.
+ *
+ */
+
+/* Driver strings */
+#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
+
+/* system */
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/prefetch.h>
+#include <linux/pci.h>
+
+/* udc specific */
+#include "amd5536udc.h"
+
+/* pointer to device object */
+static struct udc *udc;
+
+/* description */
+static const char mod_desc[] = UDC_MOD_DESCRIPTION;
+static const char name[] = "amd5536udc-pci";
+
+/* Reset all pci context */
+static void udc_pci_remove(struct pci_dev *pdev)
+{
+ struct udc *dev;
+
+ dev = pci_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+ /* gadget driver must not be registered */
+ if (WARN_ON(dev->driver))
+ return;
+
+ /* dma pool cleanup */
+ free_dma_pools(dev);
+
+ /* reset controller */
+ writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
+ free_irq(pdev->irq, dev);
+ iounmap(dev->virt_addr);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ pci_disable_device(pdev);
+
+ udc_remove(dev);
+}
+
+/* Called by pci bus driver to init pci context */
+static int udc_pci_probe(
+ struct pci_dev *pdev,
+ const struct pci_device_id *id
+)
+{
+ struct udc *dev;
+ unsigned long resource;
+ unsigned long len;
+ int retval = 0;
+
+ /* one udc only */
+ if (udc) {
+ dev_dbg(&pdev->dev, "already probed\n");
+ return -EBUSY;
+ }
+
+ /* init */
+ dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* pci setup */
+ if (pci_enable_device(pdev) < 0) {
+ retval = -ENODEV;
+ goto err_pcidev;
+ }
+
+ /* PCI resource allocation */
+ resource = pci_resource_start(pdev, 0);
+ len = pci_resource_len(pdev, 0);
+
+ if (!request_mem_region(resource, len, name)) {
+ dev_dbg(&pdev->dev, "pci device used already\n");
+ retval = -EBUSY;
+ goto err_memreg;
+ }
+
+ dev->virt_addr = ioremap_nocache(resource, len);
+ if (!dev->virt_addr) {
+ dev_dbg(&pdev->dev, "start address cannot be mapped\n");
+ retval = -EFAULT;
+ goto err_ioremap;
+ }
+
+ if (!pdev->irq) {
+ dev_err(&pdev->dev, "irq not set\n");
+ retval = -ENODEV;
+ goto err_irq;
+ }
+
+ spin_lock_init(&dev->lock);
+ /* udc csr registers base */
+ dev->csr = dev->virt_addr + UDC_CSR_ADDR;
+ /* dev registers base */
+ dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
+ /* ep registers base */
+ dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
+ /* fifo's base */
+ dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
+ dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
+
+ if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
+ dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+ retval = -EBUSY;
+ goto err_irq;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* chip revision for Hs AMD5536 */
+ dev->chiprev = pdev->revision;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ /* init dma pools */
+ if (use_dma) {
+ retval = init_dma_pools(dev);
+ if (retval != 0)
+ goto err_dma;
+ }
+
+ dev->phys_addr = resource;
+ dev->irq = pdev->irq;
+ dev->pdev = pdev;
+
+ /* general probing */
+ if (udc_probe(dev)) {
+ retval = -ENODEV;
+ goto err_probe;
+ }
+ return 0;
+
+err_probe:
+ if (use_dma)
+ free_dma_pools(dev);
+err_dma:
+ free_irq(pdev->irq, dev);
+err_irq:
+ iounmap(dev->virt_addr);
+err_ioremap:
+ release_mem_region(resource, len);
+err_memreg:
+ pci_disable_device(pdev);
+err_pcidev:
+ kfree(dev);
+ return retval;
+}
+
+/* PCI device parameters */
+static const struct pci_device_id pci_id[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
+ .class_mask = 0xffffffff,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, pci_id);
+
+/* PCI functions */
+static struct pci_driver udc_pci_driver = {
+ .name = (char *) name,
+ .id_table = pci_id,
+ .probe = udc_pci_probe,
+ .remove = udc_pci_remove,
+};
+module_pci_driver(udc_pci_driver);
+
+MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
+MODULE_AUTHOR("Thomas Dahlmann");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 2035906b8ced..3ccc34176a5a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -321,7 +321,6 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
static ushort fifo_mode;
-/* "modprobe ... fifo_mode=1" etc */
module_param(fifo_mode, ushort, 0x0);
MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode");
@@ -371,7 +370,7 @@ static struct usba_fifo_cfg mode_4_cfg[] = {
};
/* Add additional configurations here */
-int usba_config_fifo_table(struct usba_udc *udc)
+static int usba_config_fifo_table(struct usba_udc *udc)
{
int n;
@@ -1076,11 +1075,9 @@ static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int atmel_usba_stop(struct usb_gadget *gadget);
-static struct usb_ep *atmel_usba_match_ep(
- struct usb_gadget *gadget,
- struct usb_endpoint_descriptor *desc,
- struct usb_ss_ep_comp_descriptor *ep_comp
-)
+static struct usb_ep *atmel_usba_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
{
struct usb_ep *_ep;
struct usba_ep *ep;
@@ -1100,7 +1097,6 @@ found_ep:
ep = to_usba_ep(_ep);
switch (usb_endpoint_type(desc)) {
-
case USB_ENDPOINT_XFER_CONTROL:
break;
@@ -1141,7 +1137,7 @@ found_ep:
ep->udc->configured_ep++;
}
-return _ep;
+ return _ep;
}
static const struct usb_gadget_ops usba_udc_ops = {
@@ -1855,8 +1851,8 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
* but it's clearly harmless...
*/
if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
- dev_dbg(&udc->pdev->dev,
- "ODD: EP0 configuration is invalid!\n");
+ dev_err(&udc->pdev->dev,
+ "ODD: EP0 configuration is invalid!\n");
/* Preallocate other endpoints */
n = fifo_mode ? udc->num_ep : udc->configured_ep;
@@ -1864,8 +1860,8 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
ep = &udc->usba_ep[i];
usba_ep_writel(ep, CFG, ep->ept_cfg);
if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED))
- dev_dbg(&udc->pdev->dev,
- "ODD: EP%d configuration is invalid!\n", i);
+ dev_err(&udc->pdev->dev,
+ "ODD: EP%d configuration is invalid!\n", i);
}
}
@@ -2089,8 +2085,9 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
while ((pp = of_get_next_child(np, pp)))
udc->num_ep++;
udc->configured_ep = 1;
- } else
+ } else {
udc->num_ep = usba_config_fifo_table(udc);
+ }
eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep,
GFP_KERNEL);
@@ -2118,14 +2115,34 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
goto err;
}
- ep->fifo_size = fifo_mode ? udc->fifo_cfg[i].fifo_size : val;
+ if (fifo_mode) {
+ if (val < udc->fifo_cfg[i].fifo_size) {
+ dev_warn(&pdev->dev,
+ "Using max fifo-size value from DT\n");
+ ep->fifo_size = val;
+ } else {
+ ep->fifo_size = udc->fifo_cfg[i].fifo_size;
+ }
+ } else {
+ ep->fifo_size = val;
+ }
ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
if (ret) {
dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
goto err;
}
- ep->nr_banks = fifo_mode ? udc->fifo_cfg[i].nr_banks : val;
+ if (fifo_mode) {
+ if (val < udc->fifo_cfg[i].nr_banks) {
+ dev_warn(&pdev->dev,
+ "Using max nb-banks value from DT\n");
+ ep->nr_banks = val;
+ } else {
+ ep->nr_banks = udc->fifo_cfg[i].nr_banks;
+ }
+ } else {
+ ep->nr_banks = val;
+ }
ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 0d7b8c9f72fd..eb8b55392360 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -14,7 +14,7 @@ if USB_BDC_UDC
comment "Platform Support"
config USB_BDC_PCI
tristate "BDC support for PCIe based platforms"
- depends on PCI
+ depends on USB_PCI
default USB_BDC_UDC
help
Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d685d82dcf48..efce68e9a8e0 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1273,6 +1273,7 @@ void usb_del_gadget_udc(struct usb_gadget *gadget)
flush_work(&gadget->work);
device_unregister(&udc->dev);
device_unregister(&gadget->dev);
+ memset(&gadget->dev, 0x00, sizeof(gadget->dev));
}
EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 8cabc5944d5f..c79081952ea0 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2062,16 +2062,13 @@ static int dummy_hub_control(
}
break;
case USB_PORT_FEAT_POWER:
- if (hcd->speed == HCD_USB3) {
- if (dum_hcd->port_status & USB_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- } else
- if (dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER)
- dev_dbg(dummy_dev(dum_hcd),
- "power-off\n");
- /* FALLS THROUGH */
+ dev_dbg(dummy_dev(dum_hcd), "power-off\n");
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
+ break;
default:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
@@ -2242,14 +2239,13 @@ static int dummy_hub_control(
if ((dum_hcd->port_status &
USB_SS_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
} else
if ((dum_hcd->port_status &
USB_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
- set_link_state(dum_hcd);
}
+ set_link_state(dum_hcd);
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index b76fcdb763a0..6f2f71c054be 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2676,6 +2676,8 @@ static const struct platform_device_id fsl_udc_devtype[] = {
}, {
.name = "imx-udc-mx51",
}, {
+ .name = "fsl-usb2-udc",
+ }, {
/* sentinel */
}
};
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index d365449a295a..772049afe166 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1835,13 +1835,18 @@ static int mv_u3d_probe(struct platform_device *dev)
}
/* we will access controller register, so enable the u3d controller */
- clk_enable(u3d->clk);
+ retval = clk_enable(u3d->clk);
+ if (retval) {
+ dev_err(&dev->dev, "clk_enable error %d\n", retval);
+ goto err_u3d_enable;
+ }
if (pdata->phy_init) {
retval = pdata->phy_init(u3d->phy_regs);
if (retval) {
dev_err(&dev->dev, "init phy error %d\n", retval);
- goto err_u3d_enable;
+ clk_disable(u3d->clk);
+ goto err_phy_init;
}
}
@@ -1974,15 +1979,13 @@ err_alloc_trb_pool:
dma_free_coherent(&dev->dev, u3d->ep_context_size,
u3d->ep_context, u3d->ep_context_dma);
err_alloc_ep_context:
- if (pdata->phy_deinit)
- pdata->phy_deinit(u3d->phy_regs);
- clk_disable(u3d->clk);
+err_phy_init:
err_u3d_enable:
iounmap(u3d->cap_regs);
err_map_cap_regs:
err_get_cap_regs:
-err_get_clk:
clk_put(u3d->clk);
+err_get_clk:
kfree(u3d);
err_alloc_private:
err_pdata:
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 27ebb0d5449d..76f56c5762f9 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -445,7 +445,8 @@ static int mv_ep_enable(struct usb_ep *_ep,
struct mv_dqh *dqh;
u16 max = 0;
u32 bit_pos, epctrlx, direction;
- unsigned char zlt = 0, ios = 0, mult = 0;
+ const unsigned char zlt = 1;
+ unsigned char ios, mult;
unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
@@ -465,8 +466,6 @@ static int mv_ep_enable(struct usb_ep *_ep,
* disable HW zero length termination select
* driver handles zero length packet through req->req.zero
*/
- zlt = 1;
-
bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
/* Check if the Endpoint is Primed */
@@ -481,16 +480,16 @@ static int mv_ep_enable(struct usb_ep *_ep,
(unsigned)bit_pos);
goto en_done;
}
+
/* Set the max packet length, interrupt on Setup and Mult fields */
+ ios = 0;
+ mult = 0;
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
- zlt = 1;
- mult = 0;
+ case USB_ENDPOINT_XFER_INT:
break;
case USB_ENDPOINT_XFER_CONTROL:
ios = 1;
- case USB_ENDPOINT_XFER_INT:
- mult = 0;
break;
case USB_ENDPOINT_XFER_ISOC:
/* Calculate transactions needed for high bandwidth iso */
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 7dc0102abdfe..8f85a51bd2b3 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -653,7 +653,7 @@ net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
dev->dma_busy = 1;
/* initialize platform's dma */
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
/* NET2272 addr, buffer addr, length, etc. */
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
@@ -701,7 +701,7 @@ static void
net2272_start_dma(struct net2272 *dev)
{
/* start platform's dma controller */
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
@@ -797,7 +797,7 @@ net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
static void net2272_cancel_dma(struct net2272 *dev)
{
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
@@ -2306,7 +2306,7 @@ err_add_udc:
return ret;
}
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
/*
* wrap this driver around the specified device, but
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index 127ab03fcde3..69bc9c3c6ce4 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -472,7 +472,7 @@ struct net2272 {
unsigned int base_shift;
u16 __iomem *base_addr;
union {
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
struct {
void __iomem *plx9054_base_addr;
void __iomem *epld_base_addr;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 3828c2ec8623..6cf07857eaca 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -569,7 +569,7 @@ static struct usb_request
if (ep->dma) {
struct net2280_dma *td;
- td = pci_pool_alloc(ep->dev->requests, gfp_flags,
+ td = dma_pool_alloc(ep->dev->requests, gfp_flags,
&req->td_dma);
if (!td) {
kfree(req);
@@ -597,7 +597,7 @@ static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
req = container_of(_req, struct net2280_request, req);
WARN_ON(!list_empty(&req->queue));
if (req->td)
- pci_pool_free(ep->dev->requests, req->td, req->td_dma);
+ dma_pool_free(ep->dev->requests, req->td, req->td_dma);
kfree(req);
}
@@ -3579,10 +3579,10 @@ static void net2280_remove(struct pci_dev *pdev)
for (i = 1; i < 5; i++) {
if (!dev->ep[i].dummy)
continue;
- pci_pool_free(dev->requests, dev->ep[i].dummy,
+ dma_pool_free(dev->requests, dev->ep[i].dummy,
dev->ep[i].td_dma);
}
- pci_pool_destroy(dev->requests);
+ dma_pool_destroy(dev->requests);
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
@@ -3724,7 +3724,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* DMA setup */
/* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
- dev->requests = pci_pool_create("requests", pdev,
+ dev->requests = dma_pool_create("requests", &pdev->dev,
sizeof(struct net2280_dma),
0 /* no alignment requirements */,
0 /* or page-crossing issues */);
@@ -3736,7 +3736,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 1; i < 5; i++) {
struct net2280_dma *td;
- td = pci_pool_alloc(dev->requests, GFP_KERNEL,
+ td = dma_pool_alloc(dev->requests, GFP_KERNEL,
&dev->ep[i].td_dma);
if (!td) {
ep_dbg(dev, "can't get dummy %d\n", i);
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 2736a95751c3..1088c3745999 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -187,7 +187,7 @@ struct net2280 {
struct usb338x_ll_chi_regs __iomem *ll_chicken_reg;
struct usb338x_pl_regs __iomem *plregs;
- struct pci_pool *requests;
+ struct dma_pool *requests;
/* statistics...*/
};
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 8a365aad66fe..84dcbcd756f0 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -355,8 +355,8 @@ struct pch_udc_dev {
vbus_session:1,
set_cfg_not_acked:1,
waiting_zlp_ack:1;
- struct pci_pool *data_requests;
- struct pci_pool *stp_requests;
+ struct dma_pool *data_requests;
+ struct dma_pool *stp_requests;
dma_addr_t dma_addr;
struct usb_ctrlrequest setup_data;
void __iomem *base_addr;
@@ -1522,7 +1522,8 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
/* do not free first desc., will be done by free for request */
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
- pci_pool_free(dev->data_requests, td, addr);
+ dma_pool_free(dev->data_requests, td, addr);
+ td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
@@ -1538,7 +1539,7 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
*
* Return codes:
* 0: success,
- * -ENOMEM: pci_pool_alloc invocation fails
+ * -ENOMEM: dma_pool_alloc invocation fails
*/
static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
struct pch_udc_request *req,
@@ -1564,7 +1565,7 @@ static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
if (bytes <= buf_len)
break;
last = td;
- td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
+ td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
&dma_addr);
if (!td)
goto nomem;
@@ -1769,7 +1770,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
if (!ep->dev->dma_addr)
return &req->req;
/* ep0 in requests are allocated from data pool here */
- dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+ dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
&req->td_data_phys);
if (NULL == dma_desc) {
kfree(req);
@@ -1808,7 +1809,7 @@ static void pch_udc_free_request(struct usb_ep *usbep,
if (req->td_data != NULL) {
if (req->chain_len > 1)
pch_udc_free_dma_chain(ep->dev, req);
- pci_pool_free(ep->dev->data_requests, req->td_data,
+ dma_pool_free(ep->dev->data_requests, req->td_data,
req->td_data_phys);
}
kfree(req);
@@ -2913,7 +2914,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
void *ep0out_buf;
/* DMA setup */
- dev->data_requests = pci_pool_create("data_requests", dev->pdev,
+ dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
sizeof(struct pch_udc_data_dma_desc), 0, 0);
if (!dev->data_requests) {
dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
@@ -2922,7 +2923,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
}
/* dma desc for setup data */
- dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
+ dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
sizeof(struct pch_udc_stp_dma_desc), 0, 0);
if (!dev->stp_requests) {
dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
@@ -2930,7 +2931,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
return -ENOMEM;
}
/* setup */
- td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
+ td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
if (!td_stp) {
dev_err(&dev->pdev->dev,
@@ -2940,7 +2941,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
/* data: 0 packets !? */
- td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
+ td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
if (!td_data) {
dev_err(&dev->pdev->dev,
@@ -3020,22 +3021,21 @@ static void pch_udc_remove(struct pci_dev *pdev)
dev_err(&pdev->dev,
"%s: gadget driver still bound!!!\n", __func__);
/* dma pool cleanup */
- if (dev->data_requests)
- pci_pool_destroy(dev->data_requests);
+ dma_pool_destroy(dev->data_requests);
if (dev->stp_requests) {
/* cleanup DMA desc's for ep0in */
if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
- pci_pool_free(dev->stp_requests,
+ dma_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IDX].td_stp,
dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
}
if (dev->ep[UDC_EP0OUT_IDX].td_data) {
- pci_pool_free(dev->stp_requests,
+ dma_pool_free(dev->stp_requests,
dev->ep[UDC_EP0OUT_IDX].td_data,
dev->ep[UDC_EP0OUT_IDX].td_data_phys);
}
- pci_pool_destroy(dev->stp_requests);
+ dma_pool_destroy(dev->stp_requests);
}
if (dev->dma_addr)
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 832c4fdbe985..d48e239660c3 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1608,9 +1608,6 @@ static int pxa_udc_pullup(struct usb_gadget *_gadget, int is_active)
return 0;
}
-static void udc_enable(struct pxa_udc *udc);
-static void udc_disable(struct pxa_udc *udc);
-
/**
* pxa_udc_vbus_session - Called by external transceiver to enable/disable udc
* @_gadget: usb gadget
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 2218f91e92a6..5a2d845fb1a6 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/extcon.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -37,6 +38,9 @@
#define USB3_USB_INT_ENA_2 0x22c
#define USB3_STUP_DAT_0 0x230
#define USB3_STUP_DAT_1 0x234
+#define USB3_USB_OTG_STA 0x268
+#define USB3_USB_OTG_INT_STA 0x26c
+#define USB3_USB_OTG_INT_ENA 0x270
#define USB3_P0_MOD 0x280
#define USB3_P0_CON 0x288
#define USB3_P0_STA 0x28c
@@ -124,6 +128,9 @@
/* USB_INT_ENA_2 and USB_INT_STA_2 */
#define USB_INT_2_PIPE(n) BIT(n)
+/* USB_OTG_STA, USB_OTG_INT_STA and USB_OTG_INT_ENA */
+#define USB_OTG_IDMON BIT(4)
+
/* P0_MOD */
#define P0_MOD_DIR BIT(6)
@@ -257,6 +264,8 @@ struct renesas_usb3 {
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
+ struct extcon_dev *extcon;
+ struct work_struct extcon_work;
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
@@ -269,6 +278,8 @@ struct renesas_usb3 {
u8 ep0_buf[USB3_EP0_BUF_SIZE];
bool softconnect;
bool workaround_for_vbus;
+ bool extcon_host; /* check id and set EXTCON_USB_HOST */
+ bool extcon_usb; /* check vbus and set EXTCON_USB */
};
#define gadget_to_renesas_usb3(_gadget) \
@@ -332,6 +343,15 @@ static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask,
return -EBUSY;
}
+static void renesas_usb3_extcon_work(struct work_struct *work)
+{
+ struct renesas_usb3 *usb3 = container_of(work, struct renesas_usb3,
+ extcon_work);
+
+ extcon_set_state_sync(usb3->extcon, EXTCON_USB_HOST, usb3->extcon_host);
+ extcon_set_state_sync(usb3->extcon, EXTCON_USB, usb3->extcon_usb);
+}
+
static void usb3_enable_irq_1(struct renesas_usb3 *usb3, u32 bits)
{
usb3_set_bit(usb3, bits, USB3_USB_INT_ENA_1);
@@ -352,6 +372,11 @@ static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
usb3_clear_bit(usb3, USB_INT_2_PIPE(num), USB3_USB_INT_ENA_2);
}
+static bool usb3_is_host(struct renesas_usb3 *usb3)
+{
+ return !(usb3_read(usb3, USB3_DRD_CON) & DRD_CON_PERI_CON);
+}
+
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
{
/* Set AXI_INT */
@@ -362,10 +387,6 @@ static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
{
- /* FIXME: How to change host / peripheral mode as well? */
- usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
- usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
-
usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
}
@@ -531,18 +552,70 @@ static void usb3_check_vbus(struct renesas_usb3 *usb3)
if (usb3->workaround_for_vbus) {
usb3_connect(usb3);
} else {
- if (usb3_read(usb3, USB3_USB_STA) & USB_STA_VBUS_STA)
+ usb3->extcon_usb = !!(usb3_read(usb3, USB3_USB_STA) &
+ USB_STA_VBUS_STA);
+ if (usb3->extcon_usb)
usb3_connect(usb3);
else
usb3_disconnect(usb3);
+
+ schedule_work(&usb3->extcon_work);
}
}
+static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
+{
+ if (host)
+ usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ else
+ usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+}
+
+static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
+{
+ if (enable)
+ usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+ else
+ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+}
+
+static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&usb3->lock, flags);
+ usb3_set_mode(usb3, host);
+ usb3_vbus_out(usb3, a_dev);
+ if (!host && a_dev) /* for A-Peripheral */
+ usb3_connect(usb3);
+ spin_unlock_irqrestore(&usb3->lock, flags);
+}
+
+static bool usb3_is_a_device(struct renesas_usb3 *usb3)
+{
+ return !(usb3_read(usb3, USB3_USB_OTG_STA) & USB_OTG_IDMON);
+}
+
+static void usb3_check_id(struct renesas_usb3 *usb3)
+{
+ usb3->extcon_host = usb3_is_a_device(usb3);
+
+ if (usb3->extcon_host)
+ usb3_mode_config(usb3, true, true);
+ else
+ usb3_mode_config(usb3, false, false);
+
+ schedule_work(&usb3->extcon_work);
+}
+
static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
{
usb3_init_axi_bridge(usb3);
usb3_init_epc_registers(usb3);
+ usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
+ usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
+ usb3_check_id(usb3);
usb3_check_vbus(usb3);
}
@@ -551,6 +624,7 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
usb3_write(usb3, 0, USB3_PN_INT_ENA);
+ usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
@@ -1474,10 +1548,22 @@ static void usb3_irq_epc_int_2(struct renesas_usb3 *usb3, u32 int_sta_2)
}
}
+static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
+{
+ usb3_check_id(usb3);
+}
+
+static void usb3_irq_otg_int(struct renesas_usb3 *usb3, u32 otg_int_sta)
+{
+ if (otg_int_sta & USB_OTG_IDMON)
+ usb3_irq_idmon_change(usb3);
+}
+
static void usb3_irq_epc(struct renesas_usb3 *usb3)
{
u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1);
u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2);
+ u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA);
int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1);
if (int_sta_1) {
@@ -1488,6 +1574,12 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
int_sta_2 &= usb3_read(usb3, USB3_USB_INT_ENA_2);
if (int_sta_2)
usb3_irq_epc_int_2(usb3, int_sta_2);
+
+ otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA);
+ if (otg_int_sta) {
+ usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA);
+ usb3_irq_otg_int(usb3, otg_int_sta);
+ }
}
static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
@@ -1756,11 +1848,49 @@ static const struct usb_gadget_ops renesas_usb3_gadget_ops = {
.set_selfpowered = renesas_usb3_set_selfpowered,
};
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+ bool new_mode_is_host;
+
+ if (!usb3->driver)
+ return -ENODEV;
+
+ if (!strncmp(buf, "host", strlen("host")))
+ new_mode_is_host = true;
+ else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ new_mode_is_host = false;
+ else
+ return -EINVAL;
+
+ if (new_mode_is_host == usb3_is_host(usb3))
+ return -EINVAL;
+
+ usb3_mode_config(usb3, new_mode_is_host, usb3_is_a_device(usb3));
+
+ return count;
+}
+
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ if (!usb3->driver)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", usb3_is_host(usb3) ? "host" : "peripheral");
+}
+static DEVICE_ATTR_RW(role);
+
/*------- platform_driver ------------------------------------------------*/
static int renesas_usb3_remove(struct platform_device *pdev)
{
struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
+ device_remove_file(&pdev->dev, &dev_attr_role);
+
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1894,6 +2024,12 @@ static const struct of_device_id usb3_of_match[] = {
};
MODULE_DEVICE_TABLE(of, usb3_of_match);
+static const unsigned int renesas_usb3_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
@@ -1937,6 +2073,17 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ INIT_WORK(&usb3->extcon_work, renesas_usb3_extcon_work);
+ usb3->extcon = devm_extcon_dev_allocate(&pdev->dev, renesas_usb3_cable);
+ if (IS_ERR(usb3->extcon))
+ return PTR_ERR(usb3->extcon);
+
+ ret = devm_extcon_dev_register(&pdev->dev, usb3->extcon);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register extcon\n");
+ return ret;
+ }
+
/* for ep0 handling */
usb3->ep0_req = __renesas_usb3_ep_alloc_request(GFP_KERNEL);
if (!usb3->ep0_req)
@@ -1946,6 +2093,10 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_add_udc;
+ ret = device_create_file(&pdev->dev, &dev_attr_role);
+ if (ret < 0)
+ goto err_dev_create;
+
usb3->workaround_for_vbus = priv->workaround_for_vbus;
pm_runtime_enable(&pdev->dev);
@@ -1955,6 +2106,9 @@ static int renesas_usb3_probe(struct platform_device *pdev)
return 0;
+err_dev_create:
+ usb_del_gadget_udc(&usb3->gadget);
+
err_add_udc:
__renesas_usb3_ep_free_request(usb3->ep0_req);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 407d947b34ea..ababb91d654a 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -30,7 +30,7 @@ if USB_XHCI_HCD
config USB_XHCI_PCI
tristate
- depends on PCI
+ depends on USB_PCI
default y
config USB_XHCI_PLATFORM
@@ -139,7 +139,7 @@ if USB_EHCI_HCD
config USB_EHCI_PCI
tristate
- depends on PCI
+ depends on USB_PCI
default y
config USB_EHCI_HCD_PMC_MSP
@@ -188,7 +188,7 @@ config USB_EHCI_HCD_OMAP
config USB_EHCI_HCD_ORION
tristate "Support for Marvell EBU on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_ORION
+ depends on USB_EHCI_HCD && (PLAT_ORION || ARCH_MVEBU)
default y
---help---
Enables support for the on-chip EHCI controller on Marvell's
@@ -525,7 +525,7 @@ config USB_OHCI_HCD_PPC_OF
config USB_OHCI_HCD_PCI
tristate "OHCI support for PCI-bus USB controllers"
- depends on PCI
+ depends on USB_PCI
default y
select USB_OHCI_LITTLE_ENDIAN
---help---
@@ -606,7 +606,7 @@ endif # USB_OHCI_HCD
config USB_UHCI_HCD
tristate "UHCI HCD (most Intel and VIA) support"
- depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
+ depends on USB_PCI || USB_UHCI_SUPPORT_NON_PCI_HC
---help---
The Universal Host Controller Interface is a standard by Intel for
accessing the USB hardware in the PC (which is also called the USB
@@ -739,7 +739,7 @@ config USB_RENESAS_USBHS_HCD
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver"
- depends on PCI && USB && UWB
+ depends on USB_PCI && USB && UWB
select USB_WUSB
select UWB_WHCI
help
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 2644537b7bcf..c77b0a38557b 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -27,9 +27,7 @@ endif
obj-$(CONFIG_USB_WHCI_HCD) += whci/
-ifneq ($(CONFIG_USB), )
- obj-$(CONFIG_PCI) += pci-quirks.o
-endif
+obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 1a2614aae42c..cbb9b8e12c3c 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -803,7 +803,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
size -= temp;
next += temp;
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
/* EHCI 0.96 and later may have "extended capabilities" */
if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3733aab46efe..4a08b70c81aa 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -96,8 +96,8 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
}
irq = res->start;
- hcd = usb_create_hcd(&fsl_ehci_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
+ hcd = __usb_create_hcd(&fsl_ehci_hc_driver, pdev->dev.parent,
+ &pdev->dev, dev_name(&pdev->dev), NULL);
if (!hcd) {
retval = -ENOMEM;
goto err1;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index ac2c4eab478d..6e834b83a104 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -597,7 +597,7 @@ static int ehci_run (struct usb_hcd *hcd)
/*
* hcc_params controls whether ehci->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
- * pci_pool consistent memory always uses segment zero.
+ * dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 4de43011df23..9b7e63977215 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -138,7 +138,7 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
ehci->sitd_pool = NULL;
if (ehci->periodic)
- dma_free_coherent (ehci_to_hcd(ehci)->self.controller,
+ dma_free_coherent(ehci_to_hcd(ehci)->self.sysdev,
ehci->periodic_size * sizeof (u32),
ehci->periodic, ehci->periodic_dma);
ehci->periodic = NULL;
@@ -155,7 +155,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* QTDs for control/bulk/intr transfers */
ehci->qtd_pool = dma_pool_create ("ehci_qtd",
- ehci_to_hcd(ehci)->self.controller,
+ ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_qtd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
@@ -165,7 +165,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
- ehci_to_hcd(ehci)->self.controller,
+ ehci_to_hcd(ehci)->self.sysdev,
sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
@@ -179,7 +179,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* ITD for high speed ISO transfers */
ehci->itd_pool = dma_pool_create ("ehci_itd",
- ehci_to_hcd(ehci)->self.controller,
+ ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_itd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
@@ -189,7 +189,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* SITD for full/low speed split ISO transfers */
ehci->sitd_pool = dma_pool_create ("ehci_sitd",
- ehci_to_hcd(ehci)->self.controller,
+ ehci_to_hcd(ehci)->self.sysdev,
sizeof (struct ehci_sitd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
@@ -199,7 +199,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* Hardware periodic table */
ehci->periodic = (__le32 *)
- dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
+ dma_alloc_coherent(ehci_to_hcd(ehci)->self.sysdev,
ehci->periodic_size * sizeof(__le32),
&ehci->periodic_dma, flags);
if (ehci->periodic == NULL) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index ee8d5faa0194..1aec87ec68df 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -47,6 +47,18 @@
#define USB_PHY_IVREF_CTRL 0x440
#define USB_PHY_TST_GRP_CTRL 0x450
+#define USB_SBUSCFG 0x90
+
+/* BAWR = BARD = 3 : Align read/write bursts packets larger than 128 bytes */
+#define USB_SBUSCFG_BAWR_ALIGN_128B (0x3 << 6)
+#define USB_SBUSCFG_BARD_ALIGN_128B (0x3 << 3)
+/* AHBBRST = 3 : Align AHB Burst to INCR16 (64 bytes) */
+#define USB_SBUSCFG_AHBBRST_INCR16 (0x3 << 0)
+
+#define USB_SBUSCFG_DEF_VAL (USB_SBUSCFG_BAWR_ALIGN_128B \
+ | USB_SBUSCFG_BARD_ALIGN_128B \
+ | USB_SBUSCFG_AHBBRST_INCR16)
+
#define DRIVER_DESC "EHCI orion driver"
#define hcd_to_orion_priv(h) ((struct orion_ehci_hcd *)hcd_to_ehci(h)->priv)
@@ -151,8 +163,31 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
}
+static int ehci_orion_drv_reset(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ int ret;
+
+ ret = ehci_setup(hcd);
+ if (ret)
+ return ret;
+
+ /*
+ * For SoC without hlock, need to program sbuscfg value to guarantee
+ * AHB master's burst would not overrun or underrun FIFO.
+ *
+ * sbuscfg reg has to be set after usb controller reset, otherwise
+ * the value would be override to 0.
+ */
+ if (of_device_is_compatible(dev->of_node, "marvell,armada-3700-ehci"))
+ wrl(USB_SBUSCFG, USB_SBUSCFG_DEF_VAL);
+
+ return ret;
+}
+
static const struct ehci_driver_overrides orion_overrides __initconst = {
.extra_priv_size = sizeof(struct orion_ehci_hcd),
+ .reset = ehci_orion_drv_reset,
};
static int ehci_orion_drv_probe(struct platform_device *pdev)
@@ -310,6 +345,7 @@ static int ehci_orion_drv_remove(struct platform_device *pdev)
static const struct of_device_id ehci_orion_dt_ids[] = {
{ .compatible = "marvell,orion-ehci", },
+ { .compatible = "marvell,armada-3700-ehci", },
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index a268d9e8d6cf..bc7b9be12f54 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -34,6 +34,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/of.h>
#include "ehci.h"
@@ -220,6 +221,9 @@ static int ehci_platform_probe(struct platform_device *dev)
if (IS_ERR(priv->phys[phy_num])) {
err = PTR_ERR(priv->phys[phy_num]);
goto err_put_hcd;
+ } else if (!hcd->phy) {
+ /* Avoiding phy_get() in usb_add_hcd() */
+ hcd->phy = priv->phys[phy_num];
}
}
@@ -297,6 +301,7 @@ static int ehci_platform_probe(struct platform_device *dev)
goto err_power;
device_wakeup_enable(hcd->self.controller);
+ device_enable_async_suspend(hcd->self.controller);
platform_set_drvdata(dev, hcd);
return err;
@@ -370,6 +375,7 @@ static int ehci_platform_resume(struct device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
+ struct device *companion_dev;
if (pdata->power_on) {
int err = pdata->power_on(pdev);
@@ -377,6 +383,10 @@ static int ehci_platform_resume(struct device *dev)
return err;
}
+ companion_dev = usb_of_get_companion_dev(hcd->self.controller);
+ if (companion_dev)
+ device_pm_wait_for_dev(hcd->self.controller, companion_dev);
+
ehci_resume(hcd, priv->reset_on_resume);
return 0;
}
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 1c5b34b74860..ced08dc229ad 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5047,7 +5047,7 @@ static int fotg210_run(struct usb_hcd *hcd)
/*
* hcc_params controls whether fotg210->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
- * pci_pool consistent memory always uses segment zero.
+ * dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b6daf2e69989..44924824fa41 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -231,7 +231,8 @@ static int ohci_urb_enqueue (
/* Start up the I/O watchdog timer, if it's not running */
if (!timer_pending(&ohci->io_watchdog) &&
- list_empty(&ohci->eds_in_use)) {
+ list_empty(&ohci->eds_in_use) &&
+ !(ohci->flags & OHCI_QUIRK_QEMU)) {
ohci->prev_frame_no = ohci_frame_no(ohci);
mod_timer(&ohci->io_watchdog,
jiffies + IO_WATCHDOG_DELAY);
@@ -994,7 +995,7 @@ static void ohci_stop (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_PM) || defined(CONFIG_PCI)
+#if defined(CONFIG_PM) || defined(CONFIG_USB_PCI)
/* must not be called from interrupt context */
int ohci_restart(struct ohci_hcd *ohci)
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bb1509675727..a84aebe9b0a9 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -164,6 +164,15 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_qemu(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ ohci->flags |= OHCI_QUIRK_QEMU;
+ ohci_dbg(ohci, "enabled qemu quirk\n");
+ return 0;
+}
+
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -214,6 +223,13 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
.driver_data = (unsigned long)ohci_quirk_amd700,
},
+ {
+ .vendor = PCI_VENDOR_ID_APPLE,
+ .device = 0x003f,
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
+ .driver_data = (unsigned long)ohci_quirk_qemu,
+ },
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 898b74086c12..6368fce43197 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -183,6 +183,9 @@ static int ohci_platform_probe(struct platform_device *dev)
if (IS_ERR(priv->phys[phy_num])) {
err = PTR_ERR(priv->phys[phy_num]);
goto err_put_hcd;
+ } else if (!hcd->phy) {
+ /* Avoiding phy_get() in usb_add_hcd() */
+ hcd->phy = priv->phys[phy_num];
}
}
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 37f1725e7a46..12742d002d2d 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -418,6 +418,7 @@ struct ohci_hcd {
#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
+#define OHCI_QUIRK_QEMU 0x1000 /* relax timing expectations */
// there are also chip quirks/bugs in init logic
@@ -438,7 +439,7 @@ struct ohci_hcd {
};
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
static inline int quirk_nec(struct ohci_hcd *ohci)
{
return ohci->flags & OHCI_QUIRK_NEC;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index bcf531c44c70..ed20fb34c897 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2708,7 +2708,7 @@ static int oxu_run(struct usb_hcd *hcd)
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
- * pci_pool consistent memory always uses segment zero.
+ * dma_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index c622ddf21c94..0222195bd5b0 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_USB_PCI_QUIRKS_H
#define __LINUX_USB_PCI_QUIRKS_H
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
int usb_amd_find_chipset_info(void);
@@ -21,6 +21,6 @@ static inline void usb_amd_quirk_pll_enable(void) {}
static inline void usb_amd_dev_put(void) {}
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
static inline void sb800_prefetch(struct device *dev, int on) {}
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_USB_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 683098afa93e..94b150196d4f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -837,7 +837,7 @@ static int uhci_count_ports(struct usb_hcd *hcd)
static const char hcd_name[] = "uhci_hcd";
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
#include "uhci-pci.c"
#define PCI_DRIVER uhci_pci_driver
#endif
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 6f986d82472d..7fa318a3091d 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -530,7 +530,7 @@ static inline void uhci_writeb(const struct uhci_hcd *uhci, u8 val, int reg)
#else
/* Support non-PCI host controllers */
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
/* Support PCI and non-PCI host controllers */
#define uhci_has_pci_registers(u) ((u)->io_addr != 0)
#else
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2b4a00fa735d..2c83b37ae8f2 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -254,157 +254,6 @@ void xhci_print_registers(struct xhci_hcd *xhci)
xhci_print_ports(xhci);
}
-void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
-{
- int i;
- for (i = 0; i < 4; i++)
- xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
- i*4, trb->generic.field[i]);
-}
-
-/**
- * Debug a transfer request block (TRB).
- */
-void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
-{
- u64 address;
- u32 type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;
-
- switch (type) {
- case TRB_TYPE(TRB_LINK):
- xhci_dbg(xhci, "Link TRB:\n");
- xhci_print_trb_offsets(xhci, trb);
-
- address = le64_to_cpu(trb->link.segment_ptr);
- xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
-
- xhci_dbg(xhci, "Interrupter target = 0x%x\n",
- GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
- xhci_dbg(xhci, "Cycle bit = %u\n",
- le32_to_cpu(trb->link.control) & TRB_CYCLE);
- xhci_dbg(xhci, "Toggle cycle bit = %u\n",
- le32_to_cpu(trb->link.control) & LINK_TOGGLE);
- xhci_dbg(xhci, "No Snoop bit = %u\n",
- le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
- break;
- case TRB_TYPE(TRB_TRANSFER):
- address = le64_to_cpu(trb->trans_event.buffer);
- /*
- * FIXME: look at flags to figure out if it's an address or if
- * the data is directly in the buffer field.
- */
- xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
- break;
- case TRB_TYPE(TRB_COMPLETION):
- address = le64_to_cpu(trb->event_cmd.cmd_trb);
- xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
- xhci_dbg(xhci, "Completion status = %u\n",
- GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
- xhci_dbg(xhci, "Flags = 0x%x\n",
- le32_to_cpu(trb->event_cmd.flags));
- break;
- default:
- xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
- (unsigned int) type>>10);
- xhci_print_trb_offsets(xhci, trb);
- break;
- }
-}
-
-/**
- * Debug a segment with an xHCI ring.
- *
- * @return The Link TRB of the segment, or NULL if there is no Link TRB
- * (which is a bug, since all segments must have a Link TRB).
- *
- * Prints out all TRBs in the segment, even those after the Link TRB.
- *
- * XXX: should we print out TRBs that the HC owns? As long as we don't
- * write, that should be fine... We shouldn't expect that the memory pointed to
- * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
- * for HC debugging.
- */
-void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
-{
- int i;
- u64 addr = seg->dma;
- union xhci_trb *trb = seg->trbs;
-
- for (i = 0; i < TRBS_PER_SEGMENT; i++) {
- trb = &seg->trbs[i];
- xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
- lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- le32_to_cpu(trb->link.intr_target),
- le32_to_cpu(trb->link.control));
- addr += sizeof(*trb);
- }
-}
-
-void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
-{
- xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
- ring->dequeue,
- (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
- ring->dequeue));
- xhci_dbg(xhci, "Ring deq updated %u times\n",
- ring->deq_updates);
- xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
- ring->enqueue,
- (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
- ring->enqueue));
- xhci_dbg(xhci, "Ring enq updated %u times\n",
- ring->enq_updates);
-}
-
-/**
- * Debugging for an xHCI ring, which is a queue broken into multiple segments.
- *
- * Print out each segment in the ring. Check that the DMA address in
- * each link segment actually matches the segment's stored DMA address.
- * Check that the link end bit is only set at the end of the ring.
- * Check that the dequeue and enqueue pointers point to real data in this ring
- * (not some other ring).
- */
-void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
-{
- /* FIXME: Throw an error if any segment doesn't have a Link TRB */
- struct xhci_segment *seg;
- struct xhci_segment *first_seg = ring->first_seg;
- xhci_debug_segment(xhci, first_seg);
-
- if (!ring->enq_updates && !ring->deq_updates) {
- xhci_dbg(xhci, " Ring has not been updated\n");
- return;
- }
- for (seg = first_seg->next; seg != first_seg; seg = seg->next)
- xhci_debug_segment(xhci, seg);
-}
-
-void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_virt_ep *ep)
-{
- int i;
- struct xhci_ring *ring;
-
- if (ep->ep_state & EP_HAS_STREAMS) {
- for (i = 1; i < ep->stream_info->num_streams; i++) {
- ring = ep->stream_info->stream_rings[i];
- xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n",
- slot_id, ep_index, i);
- xhci_debug_segment(xhci, ring->deq_seg);
- }
- } else {
- ring = ep->ring;
- if (!ring)
- return;
- xhci_dbg(xhci, "Dev %d endpoint ring %d:\n",
- slot_id, ep_index);
- xhci_debug_segment(xhci, ring->deq_seg);
- }
-}
-
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
u64 addr = erst->erst_dma_addr;
@@ -434,166 +283,13 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
upper_32_bits(val));
}
-/* Print the last 32 bytes for 64-byte contexts */
-static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
-{
- int i;
- for (i = 0; i < 4; i++) {
- xhci_dbg(xhci, "@%p (virt) @%08llx "
- "(dma) %#08llx - rsvd64[%d]\n",
- &ctx[4 + i], (unsigned long long)dma,
- ctx[4 + i], i);
- dma += 8;
- }
-}
-
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+ int state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
- switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
- case SLOT_STATE_ENABLED:
- return "enabled/disabled";
- case SLOT_STATE_DEFAULT:
- return "default";
- case SLOT_STATE_ADDRESSED:
- return "addressed";
- case SLOT_STATE_CONFIGURED:
- return "configured";
- default:
- return "reserved";
- }
-}
-
-static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
-{
- /* Fields are 32 bits wide, DMA addresses are in bytes */
- int field_size = 32 / 8;
- int i;
-
- struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
- dma_addr_t dma = ctx->dma +
- ((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
- int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
-
- xhci_dbg(xhci, "Slot Context:\n");
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
- &slot_ctx->dev_info,
- (unsigned long long)dma, slot_ctx->dev_info);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
- &slot_ctx->dev_info2,
- (unsigned long long)dma, slot_ctx->dev_info2);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
- &slot_ctx->tt_info,
- (unsigned long long)dma, slot_ctx->tt_info);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
- &slot_ctx->dev_state,
- (unsigned long long)dma, slot_ctx->dev_state);
- dma += field_size;
- for (i = 0; i < 4; i++) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
- &slot_ctx->reserved[i], (unsigned long long)dma,
- slot_ctx->reserved[i], i);
- dma += field_size;
- }
-
- if (csz)
- dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
-}
-
-static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
- struct xhci_container_ctx *ctx,
- unsigned int last_ep)
-{
- int i, j;
- int last_ep_ctx = 31;
- /* Fields are 32 bits wide, DMA addresses are in bytes */
- int field_size = 32 / 8;
- int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
-
- if (last_ep < 31)
- last_ep_ctx = last_ep + 1;
- for (i = 0; i < last_ep_ctx; i++) {
- unsigned int epaddr = xhci_get_endpoint_address(i);
- struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
- dma_addr_t dma = ctx->dma +
- ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
-
- xhci_dbg(xhci, "%s Endpoint %02d Context (ep_index %02d):\n",
- usb_endpoint_out(epaddr) ? "OUT" : "IN",
- epaddr & USB_ENDPOINT_NUMBER_MASK, i);
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
- &ep_ctx->ep_info,
- (unsigned long long)dma, ep_ctx->ep_info);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
- &ep_ctx->ep_info2,
- (unsigned long long)dma, ep_ctx->ep_info2);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
- &ep_ctx->deq,
- (unsigned long long)dma, ep_ctx->deq);
- dma += 2*field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
- &ep_ctx->tx_info,
- (unsigned long long)dma, ep_ctx->tx_info);
- dma += field_size;
- for (j = 0; j < 3; j++) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
- &ep_ctx->reserved[j],
- (unsigned long long)dma,
- ep_ctx->reserved[j], j);
- dma += field_size;
- }
-
- if (csz)
- dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
- }
-}
-
-void xhci_dbg_ctx(struct xhci_hcd *xhci,
- struct xhci_container_ctx *ctx,
- unsigned int last_ep)
-{
- int i;
- /* Fields are 32 bits wide, DMA addresses are in bytes */
- int field_size = 32 / 8;
- dma_addr_t dma = ctx->dma;
- int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
-
- if (ctx->type == XHCI_CTX_TYPE_INPUT) {
- struct xhci_input_control_ctx *ctrl_ctx =
- xhci_get_input_control_ctx(ctx);
- if (!ctrl_ctx) {
- xhci_warn(xhci, "Could not get input context, bad type.\n");
- return;
- }
-
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
- &ctrl_ctx->drop_flags, (unsigned long long)dma,
- ctrl_ctx->drop_flags);
- dma += field_size;
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
- &ctrl_ctx->add_flags, (unsigned long long)dma,
- ctrl_ctx->add_flags);
- dma += field_size;
- for (i = 0; i < 6; i++) {
- xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
- &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
- ctrl_ctx->rsvd2[i], i);
- dma += field_size;
- }
-
- if (csz)
- dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
- }
-
- xhci_dbg_slot_ctx(xhci, ctx);
- xhci_dbg_ep_ctx(xhci, ctx, last_ep);
+ return xhci_slot_state_string(state);
}
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 3bddeaa1e2d7..5e3e9d4c6956 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -392,10 +392,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
trace_xhci_stop_device(virt_dev);
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
- if (!cmd) {
- xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+ if (!cmd)
return -ENOMEM;
- }
spin_lock_irqsave(&xhci->lock, flags);
for (i = LAST_EP_INDEX; i > 0; i--) {
@@ -540,6 +538,119 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
return max_ports;
}
+static __le32 __iomem *xhci_get_port_io_addr(struct usb_hcd *hcd, int index)
+{
+ __le32 __iomem **port_array;
+
+ xhci_get_ports(hcd, &port_array);
+ return port_array[index];
+}
+
+/*
+ * xhci_set_port_power() must be called with xhci->lock held.
+ * It will release and re-aquire the lock while calling ACPI
+ * method.
+ */
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+ u16 index, bool on, unsigned long *flags)
+{
+ __le32 __iomem *addr;
+ u32 temp;
+
+ addr = xhci_get_port_io_addr(hcd, index);
+ temp = readl(addr);
+ temp = xhci_port_state_to_neutral(temp);
+ if (on) {
+ /* Power on */
+ writel(temp | PORT_POWER, addr);
+ temp = readl(addr);
+ xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n",
+ index, temp);
+ } else {
+ /* Power off */
+ writel(temp & ~PORT_POWER, addr);
+ }
+
+ spin_unlock_irqrestore(&xhci->lock, *flags);
+ temp = usb_acpi_power_manageable(hcd->self.root_hub,
+ index);
+ if (temp)
+ usb_acpi_set_power_state(hcd->self.root_hub,
+ index, on);
+ spin_lock_irqsave(&xhci->lock, *flags);
+}
+
+static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
+ u16 test_mode, u16 wIndex)
+{
+ u32 temp;
+ __le32 __iomem *addr;
+
+ /* xhci only supports test mode for usb2 ports, i.e. xhci->main_hcd */
+ addr = xhci_get_port_io_addr(xhci->main_hcd, wIndex);
+ temp = readl(addr + PORTPMSC);
+ temp |= test_mode << PORT_TEST_MODE_SHIFT;
+ writel(temp, addr + PORTPMSC);
+ xhci->test_mode = test_mode;
+ if (test_mode == TEST_FORCE_EN)
+ xhci_start(xhci);
+}
+
+static int xhci_enter_test_mode(struct xhci_hcd *xhci,
+ u16 test_mode, u16 wIndex, unsigned long *flags)
+{
+ int i, retval;
+
+ /* Disable all Device Slots */
+ xhci_dbg(xhci, "Disable all slots\n");
+ for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ retval = xhci_disable_slot(xhci, NULL, i);
+ if (retval)
+ xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
+ i, retval);
+ }
+ /* Put all ports to the Disable state by clear PP */
+ xhci_dbg(xhci, "Disable all port (PP = 0)\n");
+ /* Power off USB3 ports*/
+ for (i = 0; i < xhci->num_usb3_ports; i++)
+ xhci_set_port_power(xhci, xhci->shared_hcd, i, false, flags);
+ /* Power off USB2 ports*/
+ for (i = 0; i < xhci->num_usb2_ports; i++)
+ xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
+ /* Stop the controller */
+ xhci_dbg(xhci, "Stop controller\n");
+ retval = xhci_halt(xhci);
+ if (retval)
+ return retval;
+ /* Disable runtime PM for test mode */
+ pm_runtime_forbid(xhci_to_hcd(xhci)->self.controller);
+ /* Set PORTPMSC.PTC field to enter selected test mode */
+ /* Port is selected by wIndex. port_id = wIndex + 1 */
+ xhci_dbg(xhci, "Enter Test Mode: %d, Port_id=%d\n",
+ test_mode, wIndex + 1);
+ xhci_port_set_test_mode(xhci, test_mode, wIndex);
+ return retval;
+}
+
+static int xhci_exit_test_mode(struct xhci_hcd *xhci)
+{
+ int retval;
+
+ if (!xhci->test_mode) {
+ xhci_err(xhci, "Not in test mode, do nothing.\n");
+ return 0;
+ }
+ if (xhci->test_mode == TEST_FORCE_EN &&
+ !(xhci->xhc_state & XHCI_STATE_HALTED)) {
+ retval = xhci_halt(xhci);
+ if (retval)
+ return retval;
+ }
+ pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
+ xhci->test_mode = 0;
+ return xhci_reset(xhci);
+}
+
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 link_state)
{
@@ -895,6 +1006,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 link_state = 0;
u16 wake_mask = 0;
u16 timeout = 0;
+ u16 test_mode = 0;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -935,7 +1047,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
- if (temp == 0xffffffff) {
+ if (temp == ~(u32)0) {
+ xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
@@ -968,6 +1081,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
link_state = (wIndex & 0xff00) >> 3;
if (wValue == USB_PORT_FEAT_REMOTE_WAKE_MASK)
wake_mask = wIndex & 0xff00;
+ if (wValue == USB_PORT_FEAT_TEST)
+ test_mode = (wIndex & 0xff00) >> 8;
/* The MSB of wIndex is the U1/U2 timeout */
timeout = (wIndex & 0xff00) >> 8;
wIndex &= 0xff;
@@ -975,7 +1090,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
- if (temp == 0xffffffff) {
+ if (temp == ~(u32)0) {
+ xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
@@ -1092,18 +1208,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, hub_wq will ignore the roothub events until
* the roothub is registered.
*/
- writel(temp | PORT_POWER, port_array[wIndex]);
-
- temp = readl(port_array[wIndex]);
- xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- temp = usb_acpi_power_manageable(hcd->self.root_hub,
- wIndex);
- if (temp)
- usb_acpi_set_power_state(hcd->self.root_hub,
- wIndex, true);
- spin_lock_irqsave(&xhci->lock, flags);
+ xhci_set_port_power(xhci, hcd, wIndex, true, &flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
@@ -1142,6 +1247,15 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_U2_TIMEOUT(timeout);
writel(temp, port_array[wIndex] + PORTPMSC);
break;
+ case USB_PORT_FEAT_TEST:
+ /* 4.19.6 Port Test Modes (USB2 Test Mode) */
+ if (hcd->speed != HCD_USB2)
+ goto error;
+ if (test_mode > TEST_FORCE_EN || test_mode < TEST_J)
+ goto error;
+ retval = xhci_enter_test_mode(xhci, test_mode, wIndex,
+ &flags);
+ break;
default:
goto error;
}
@@ -1153,7 +1267,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
wIndex--;
temp = readl(port_array[wIndex]);
- if (temp == 0xffffffff) {
+ if (temp == ~(u32)0) {
+ xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
@@ -1207,15 +1322,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
port_array[wIndex], temp);
break;
case USB_PORT_FEAT_POWER:
- writel(temp & ~PORT_POWER, port_array[wIndex]);
-
- spin_unlock_irqrestore(&xhci->lock, flags);
- temp = usb_acpi_power_manageable(hcd->self.root_hub,
- wIndex);
- if (temp)
- usb_acpi_set_power_state(hcd->self.root_hub,
- wIndex, false);
- spin_lock_irqsave(&xhci->lock, flags);
+ xhci_set_port_power(xhci, hcd, wIndex, false, &flags);
+ break;
+ case USB_PORT_FEAT_TEST:
+ retval = xhci_exit_test_mode(xhci);
break;
default:
goto error;
@@ -1269,7 +1379,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
temp = readl(port_array[i]);
- if (temp == 0xffffffff) {
+ if (temp == ~(u32)0) {
+ xhci_hc_died(xhci);
retval = -ENODEV;
break;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ba1853f4e407..bbe22bcc550a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -288,6 +288,8 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (!ring)
return;
+ trace_xhci_ring_free(ring);
+
if (ring->first_seg) {
if (ring->type == TYPE_STREAM)
xhci_remove_stream_mapping(ring);
@@ -313,9 +315,6 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = cycle_state;
- /* Not necessary for new rings, but needed for re-initialized rings */
- ring->enq_updates = 0;
- ring->deq_updates = 0;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
@@ -400,6 +399,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
cpu_to_le32(LINK_TOGGLE);
}
xhci_initialize_ring_info(ring, cycle_state);
+ trace_xhci_ring_alloc(ring);
return ring;
fail:
@@ -504,6 +504,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
xhci_link_rings(xhci, ring, first, last, num_segs);
+ trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
ring->num_segs);
@@ -586,7 +587,7 @@ static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -614,7 +615,7 @@ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
if (size > MEDIUM_STREAM_ARRAY_SIZE)
@@ -1502,6 +1503,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
*/
max_esit_payload = xhci_get_max_esit_payload(udev, ep);
interval = xhci_get_endpoint_interval(udev, ep);
+
+ /* Periodic endpoint bInterval limit quirk */
+ if (usb_endpoint_xfer_int(&ep->desc) ||
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
+ udev->speed >= USB_SPEED_HIGH &&
+ interval >= 7) {
+ interval = 6;
+ }
+ }
+
mult = xhci_get_endpoint_mult(udev, ep);
max_packet = usb_endpoint_maxp(&ep->desc);
max_burst = xhci_get_endpoint_max_burst(udev, ep);
@@ -1686,7 +1698,7 @@ void xhci_slot_copy(struct xhci_hcd *xhci,
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -1709,36 +1721,27 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
- xhci->scratchpad->sp_dma_buffers =
- kzalloc(sizeof(dma_addr_t) * num_sp, flags);
-
- if (!xhci->scratchpad->sp_dma_buffers)
- goto fail_sp4;
-
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
flags);
if (!buf)
- goto fail_sp5;
+ goto fail_sp4;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
- xhci->scratchpad->sp_dma_buffers[i] = dma;
}
return 0;
- fail_sp5:
+ fail_sp4:
for (i = i - 1; i >= 0; i--) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
- xhci->scratchpad->sp_dma_buffers[i]);
+ xhci->scratchpad->sp_array[i]);
}
- kfree(xhci->scratchpad->sp_dma_buffers);
- fail_sp4:
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
@@ -1758,7 +1761,7 @@ static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
if (!xhci->scratchpad)
return;
@@ -1768,9 +1771,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
for (i = 0; i < num_sp; i++) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
- xhci->scratchpad->sp_dma_buffers[i]);
+ xhci->scratchpad->sp_array[i]);
}
- kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
@@ -1831,7 +1833,7 @@ void xhci_free_command(struct xhci_hcd *xhci,
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
int size;
int i, j, num_ports;
@@ -2373,7 +2375,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
- struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg;
@@ -2419,7 +2421,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
writel(val, &xhci->op_regs->config_reg);
/*
- * Section 5.4.8 - doorbell array must be
+ * xHCI section 5.4.6 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
@@ -2480,7 +2482,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%x", val);
+ "// Setting command ring address to 0x%016llx", val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
@@ -2601,7 +2603,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
return 0;
fail:
- xhci_warn(xhci, "Couldn't initialize memory\n");
xhci_halt(xhci);
xhci_reset(xhci);
xhci_mem_cleanup(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index fc99f51d12e1..7b86508ac8cf 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -199,6 +199,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
+ xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Resetting on resume");
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6ed468fa7d5e..7c2a9e7c8e0f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
@@ -54,6 +55,16 @@ static int xhci_priv_init_quirk(struct usb_hcd *hcd)
return priv->init_quirk(hcd);
}
+static int xhci_priv_resume_quirk(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->resume_quirk)
+ return 0;
+
+ return priv->resume_quirk(hcd);
+}
+
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
/*
@@ -92,18 +103,21 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1,
.init_quirk = xhci_rcar_init_quirk,
.plat_start = xhci_rcar_start,
+ .resume_quirk = xhci_rcar_resume_quirk,
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2,
.init_quirk = xhci_rcar_init_quirk,
.plat_start = xhci_rcar_start,
+ .resume_quirk = xhci_rcar_resume_quirk,
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = {
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
.init_quirk = xhci_rcar_init_quirk,
.plat_start = xhci_rcar_start,
+ .resume_quirk = xhci_rcar_resume_quirk,
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -148,6 +162,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct hc_driver *driver;
+ struct device *sysdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
@@ -164,24 +179,47 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
+ /*
+ * sysdev must point to a device that is known to the system firmware
+ * or PCI hardware. We handle these three cases here:
+ * 1. xhci_plat comes from firmware
+ * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+ * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+ */
+ sysdev = &pdev->dev;
+ if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node)
+ sysdev = sysdev->parent;
+#ifdef CONFIG_PCI
+ else if (sysdev->parent && sysdev->parent->parent &&
+ sysdev->parent->parent->bus == &pci_bus_type)
+ sysdev = sysdev->parent->parent;
+#endif
+
/* Try to set 64-bit DMA first */
- if (!pdev->dev.dma_mask)
+ if (WARN_ON(!sysdev->dma_mask))
/* Platform did not initialize dma_mask */
- ret = dma_coerce_mask_and_coherent(&pdev->dev,
+ ret = dma_coerce_mask_and_coherent(sysdev,
DMA_BIT_MASK(64));
else
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
/* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
if (ret) {
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(32));
if (ret)
return ret;
}
- hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd)
- return -ENOMEM;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
+ dev_name(&pdev->dev), NULL);
+ if (!hcd) {
+ ret = -ENOMEM;
+ goto disable_runtime;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -222,20 +260,20 @@ static int xhci_plat_probe(struct platform_device *pdev)
xhci->clk = clk;
xhci->main_hcd = hcd;
- xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
+ xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto disable_clk;
}
- if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
+ if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
- hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
@@ -258,6 +296,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (ret)
goto dealloc_usb2_hcd;
+ device_enable_async_suspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ /*
+ * Prevent runtime pm from being on as default, users should enable
+ * runtime pm using power/control in sysfs.
+ */
+ pm_runtime_forbid(&pdev->dev);
+
return 0;
@@ -277,6 +324,10 @@ disable_clk:
put_hcd:
usb_put_hcd(hcd);
+disable_runtime:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return ret;
}
@@ -298,14 +349,17 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(clk);
usb_put_hcd(hcd);
+ pm_runtime_set_suspended(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xhci_plat_suspend(struct device *dev)
+static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
@@ -315,24 +369,53 @@ static int xhci_plat_suspend(struct device *dev)
* reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
* also applies to runtime suspend.
*/
- return xhci_suspend(xhci, device_may_wakeup(dev));
+ ret = xhci_suspend(xhci, device_may_wakeup(dev));
+
+ if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
+ clk_disable_unprepare(xhci->clk);
+
+ return ret;
}
-static int xhci_plat_resume(struct device *dev)
+static int __maybe_unused xhci_plat_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
+ clk_prepare_enable(xhci->clk);
+
+ ret = xhci_priv_resume_quirk(hcd);
+ if (ret)
+ return ret;
+
+ return xhci_resume(xhci, 0);
+}
+
+static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return xhci_suspend(xhci, true);
+}
+
+static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
return xhci_resume(xhci, 0);
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+
+ SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
+ xhci_plat_runtime_resume,
+ NULL)
};
-#define DEV_PM_OPS (&xhci_plat_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM */
static const struct acpi_device_id usb_xhci_acpi_match[] = {
/* XHCI-compliant USB Controller */
@@ -347,7 +430,7 @@ static struct platform_driver usb_xhci_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
- .pm = DEV_PM_OPS,
+ .pm = &xhci_plat_pm_ops,
.of_match_table = of_match_ptr(usb_xhci_of_match),
.acpi_match_table = ACPI_PTR(usb_xhci_acpi_match),
},
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 9af0cb48053f..29b227895b07 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -17,6 +17,7 @@ struct xhci_plat_priv {
const char *firmware_name;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
+ int (*resume_quirk)(struct usb_hcd *);
};
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index d28df386e780..07278228214b 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -198,3 +198,14 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
return xhci_rcar_download_firmware(hcd);
}
+
+int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+{
+ int ret;
+
+ ret = xhci_rcar_download_firmware(hcd);
+ if (!ret)
+ xhci_rcar_start(hcd);
+
+ return ret;
+}
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index d2ffe20401cf..d247951147a1 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -18,6 +18,7 @@
#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
void xhci_rcar_start(struct usb_hcd *hcd);
int xhci_rcar_init_quirk(struct usb_hcd *hcd);
+int xhci_rcar_resume_quirk(struct usb_hcd *hcd);
#else
static inline void xhci_rcar_start(struct usb_hcd *hcd)
{
@@ -27,5 +28,10 @@ static inline int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
+
+static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+{
+ return 0;
+}
#endif
#endif /* _XHCI_RCAR_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a3309aa02993..74bf5c60a260 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -167,8 +167,6 @@ static void next_trb(struct xhci_hcd *xhci,
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- ring->deq_updates++;
-
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
@@ -191,6 +189,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
}
+
+ trace_xhci_inc_deq(ring);
+
return;
}
@@ -223,7 +224,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_trbs_free--;
next = ++(ring->enqueue);
- ring->enq_updates++;
/* Update the dequeue pointer further if that was a link TRB */
while (trb_is_link(next)) {
@@ -259,6 +259,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
+
+ trace_xhci_inc_enq(ring);
}
/*
@@ -359,21 +361,19 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
&xhci->op_regs->cmd_ring);
- /* Section 4.6.1.2 of xHCI 1.0 spec says software should
- * time the completion od all xHCI commands, including
- * the Command Abort operation. If software doesn't see
- * CRR negated in a timely manner (e.g. longer than 5
- * seconds), then it should assume that the there are
- * larger problems with the xHC and assert HCRST.
+ /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
+ * completion of the Command Abort operation. If CRR is not negated in 5
+ * seconds then driver handles it as if host died (-ENODEV).
+ * In the future we should distinguish between -ENODEV and -ETIMEDOUT
+ * and try to recover a -ETIMEDOUT with a host controller reset.
*/
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
- xhci_err(xhci,
- "Stop command ring failed, maybe the host is dead\n");
- xhci->xhc_state |= XHCI_STATE_DYING;
+ xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
xhci_halt(xhci);
- return -ESHUTDOWN;
+ xhci_hc_died(xhci);
+ return ret;
}
/*
* Writing the CMD_RING_ABORT bit should cause a cmd completion event,
@@ -689,6 +689,8 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_virt_ep *ep;
struct xhci_td *cur_td = NULL;
struct xhci_td *last_unlinked_td;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_virt_device *vdev;
struct xhci_dequeue_state deq_state;
@@ -702,6 +704,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
memset(&deq_state, 0, sizeof(deq_state));
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+
+ vdev = xhci->devs[slot_id];
+ ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+ trace_xhci_handle_cmd_stop_ep(ep_ctx);
+
ep = &xhci->devs[slot_id]->eps[ep_index];
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
struct xhci_td, cancelled_td_list);
@@ -866,6 +873,40 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
}
}
+/*
+ * host controller died, register read returns 0xffffffff
+ * Complete pending commands, mark them ABORTED.
+ * URBs need to be given back as usb core might be waiting with device locks
+ * held for the URBs to finish during device disconnect, blocking host remove.
+ *
+ * Call with xhci->lock held.
+ * lock is relased and re-acquired while giving back urb.
+ */
+void xhci_hc_died(struct xhci_hcd *xhci)
+{
+ int i, j;
+
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return;
+
+ xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
+ xhci->xhc_state |= XHCI_STATE_DYING;
+
+ xhci_cleanup_command_queue(xhci);
+
+ /* return any pending urbs, remove may be waiting for them */
+ for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; j++)
+ xhci_kill_endpoint_urbs(xhci, i, j);
+ }
+
+ /* inform usb core hc died if PCI remove isn't already handling it */
+ if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
+ usb_hc_died(xhci_to_hcd(xhci));
+}
+
/* Watchdog timer function for when a stop endpoint command fails to complete.
* In this case, we assume the host controller is broken or dying or dead. The
* host may still be completing some other events, so we have to be careful to
@@ -887,7 +928,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
{
struct xhci_hcd *xhci;
struct xhci_virt_ep *ep;
- int ret, i, j;
unsigned long flags;
ep = (struct xhci_virt_ep *) arg;
@@ -904,52 +944,22 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
- xhci_warn(xhci, "Assuming host is dying, halting host.\n");
- /* Oops, HC is dead or dying or at least not responding to the stop
- * endpoint command.
- */
-
- xhci->xhc_state |= XHCI_STATE_DYING;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
- /* Disable interrupts from the host controller and start halting it */
- xhci_quiesce(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_halt(xhci);
- ret = xhci_halt(xhci);
+ /*
+ * handle a stop endpoint cmd timeout as if host died (-ENODEV).
+ * In the future we could distinguish between -ENODEV and -ETIMEDOUT
+ * and try to recover a -ETIMEDOUT with a host controller reset
+ */
+ xhci_hc_died(xhci);
- spin_lock_irqsave(&xhci->lock, flags);
- if (ret < 0) {
- /* This is bad; the host is not responding to commands and it's
- * not allowing itself to be halted. At least interrupts are
- * disabled. If we call usb_hc_died(), it will attempt to
- * disconnect all device drivers under this host. Those
- * disconnect() methods will wait for all URBs to be unlinked,
- * so we must complete them.
- */
- xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
- xhci_warn(xhci, "Completing active URBs anyway.\n");
- /* We could turn all TDs on the rings to no-ops. This won't
- * help if the host has cached part of the ring, and is slow if
- * we want to preserve the cycle bit. Skip it and hope the host
- * doesn't touch the memory.
- */
- }
- for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i])
- continue;
- for (j = 0; j < 31; j++)
- xhci_kill_endpoint_urbs(xhci, i, j);
- }
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Calling usb_hc_died()");
- usb_hc_died(xhci_to_hcd(xhci));
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"xHCI host controller is dead.");
}
-
static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
struct xhci_virt_device *dev,
struct xhci_ring *ep_ring,
@@ -1029,6 +1039,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+ trace_xhci_handle_cmd_set_deq(slot_ctx);
+ trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
if (cmd_comp_code != COMP_SUCCESS) {
unsigned int ep_state;
@@ -1099,9 +1111,15 @@ cleanup:
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
+ struct xhci_virt_device *vdev;
+ struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ vdev = xhci->devs[slot_id];
+ ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+ trace_xhci_handle_cmd_reset_ep(ep_ctx);
+
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
@@ -1114,11 +1132,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
struct xhci_command *command;
+
command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
- if (!command) {
- xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
+ if (!command)
return;
- }
+
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Queueing configure endpoint command");
xhci_queue_configure_endpoint(xhci, command,
@@ -1143,10 +1161,15 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
virt_dev = xhci->devs[slot_id];
if (!virt_dev)
return;
+
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ trace_xhci_handle_cmd_disable_slot(slot_ctx);
+
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
@@ -1158,6 +1181,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
{
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
+ struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
unsigned int ep_state;
u32 add_flags, drop_flags;
@@ -1182,6 +1206,9 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(add_flags) - 1;
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
+ trace_xhci_handle_cmd_config_ep(ep_ctx);
+
/* A usb_set_interface() call directly after clearing a halted
* condition may race on this quirky hardware. Not worth
* worrying about, since this is prototype hardware. Not sure
@@ -1206,9 +1233,26 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
return;
}
+static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *vdev;
+ struct xhci_slot_ctx *slot_ctx;
+
+ vdev = xhci->devs[slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+ trace_xhci_handle_cmd_addr_dev(slot_ctx);
+}
+
static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
struct xhci_event_cmd *event)
{
+ struct xhci_virt_device *vdev;
+ struct xhci_slot_ctx *slot_ctx;
+
+ vdev = xhci->devs[slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+ trace_xhci_handle_cmd_reset_dev(slot_ctx);
+
xhci_dbg(xhci, "Completed reset device command.\n");
if (!xhci->devs[slot_id])
xhci_warn(xhci, "Reset device command completion "
@@ -1250,7 +1294,6 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
void xhci_handle_command_timeout(struct work_struct *work)
{
struct xhci_hcd *xhci;
- int ret;
unsigned long flags;
u64 hw_ring_state;
@@ -1271,22 +1314,17 @@ void xhci_handle_command_timeout(struct work_struct *work)
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ if (hw_ring_state == ~(u64)0) {
+ xhci_hc_died(xhci);
+ goto time_out_completed;
+ }
+
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) {
/* Prevent new doorbell, and start command abort */
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_dbg(xhci, "Command timeout\n");
- ret = xhci_abort_cmd_ring(xhci, flags);
- if (unlikely(ret == -ESHUTDOWN)) {
- xhci_err(xhci, "Abort command ring failed\n");
- xhci_cleanup_command_queue(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
- xhci_dbg(xhci, "xHCI host controller is dead.\n");
-
- return;
- }
-
+ xhci_abort_cmd_ring(xhci, flags);
goto time_out_completed;
}
@@ -1384,6 +1422,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_EVAL_CONTEXT:
break;
case TRB_ADDR_DEV:
+ xhci_handle_cmd_addr_dev(xhci, slot_id);
break;
case TRB_STOP_RING:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -2243,7 +2282,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
xdev = xhci->devs[slot_id];
if (!xdev) {
- xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
+ slot_id);
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
@@ -2252,8 +2292,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -2263,8 +2301,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
- xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
- "or incorrect stream ring\n");
+ xhci_err(xhci,
+ "ERROR Transfer event for disabled endpoint slot %u ep %u or incorrect stream ring\n",
+ slot_id, ep_index);
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
xhci->event_ring->deq_seg,
@@ -2273,8 +2312,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
le32_to_cpu(event->flags));
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -2298,45 +2335,62 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code = COMP_SHORT_PACKET;
else
xhci_warn_ratelimited(xhci,
- "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
+ "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
+ slot_id, ep_index);
case COMP_SHORT_PACKET:
break;
case COMP_STOPPED:
- xhci_dbg(xhci, "Stopped on Transfer TRB\n");
+ xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
+ slot_id, ep_index);
break;
case COMP_STOPPED_LENGTH_INVALID:
- xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ xhci_dbg(xhci,
+ "Stopped on No-op or Link TRB for slot %u ep %u\n",
+ slot_id, ep_index);
break;
case COMP_STOPPED_SHORT_PACKET:
- xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
+ xhci_dbg(xhci,
+ "Stopped with short packet transfer detected for slot %u ep %u\n",
+ slot_id, ep_index);
break;
case COMP_STALL_ERROR:
- xhci_dbg(xhci, "Stalled endpoint\n");
+ xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
+ ep_index);
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_TRB_ERROR:
- xhci_warn(xhci, "WARN: TRB error on endpoint\n");
+ xhci_warn(xhci,
+ "WARN: TRB error for slot %u ep %u on endpoint\n",
+ slot_id, ep_index);
status = -EILSEQ;
break;
case COMP_SPLIT_TRANSACTION_ERROR:
case COMP_USB_TRANSACTION_ERROR:
- xhci_dbg(xhci, "Transfer error on endpoint\n");
+ xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
+ slot_id, ep_index);
status = -EPROTO;
break;
case COMP_BABBLE_DETECTED_ERROR:
- xhci_dbg(xhci, "Babble error on endpoint\n");
+ xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
+ slot_id, ep_index);
status = -EOVERFLOW;
break;
case COMP_DATA_BUFFER_ERROR:
- xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
+ xhci_warn(xhci,
+ "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
+ slot_id, ep_index);
status = -ENOSR;
break;
case COMP_BANDWIDTH_OVERRUN_ERROR:
- xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
+ xhci_warn(xhci,
+ "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
+ slot_id, ep_index);
break;
case COMP_ISOCH_BUFFER_OVERRUN:
- xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
+ xhci_warn(xhci,
+ "WARN: buffer overrun event for slot %u ep %u on endpoint",
+ slot_id, ep_index);
break;
case COMP_RING_UNDERRUN:
/*
@@ -2360,7 +2414,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_index);
goto cleanup;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
- xhci_warn(xhci, "WARN: detect an incompatible device");
+ xhci_warn(xhci,
+ "WARN: detect an incompatible device for slot %u ep %u",
+ slot_id, ep_index);
status = -EPROTO;
break;
case COMP_MISSED_SERVICE_ERROR:
@@ -2371,19 +2427,24 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* short transfer when process the ep_ring next time.
*/
ep->skip = true;
- xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ xhci_dbg(xhci,
+ "Miss service interval error for slot %u ep %u, set skip flag\n",
+ slot_id, ep_index);
goto cleanup;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
- xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
+ xhci_dbg(xhci,
+ "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
+ slot_id, ep_index);
goto cleanup;
default:
if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
- xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
- trb_comp_code);
+ xhci_warn(xhci,
+ "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
+ trb_comp_code, slot_id, ep_index);
goto cleanup;
}
@@ -2402,15 +2463,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (le32_to_cpu(event->flags) &
- TRB_TYPE_BITMASK)>>10);
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
}
if (ep->skip) {
ep->skip = false;
- xhci_dbg(xhci, "td_list is empty while skip "
- "flag set. Clear skip flag.\n");
+ xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
+ slot_id, ep_index);
}
goto cleanup;
}
@@ -2418,8 +2475,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* We've skipped all the TDs on the ep ring when ep->skip set */
if (ep->skip && td_num == 0) {
ep->skip = false;
- xhci_dbg(xhci, "All tds on the ep_ring skipped. "
- "Clear skip flag.\n");
+ xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
+ slot_id, ep_index);
goto cleanup;
}
@@ -2478,7 +2535,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_ring->last_td_was_short = false;
if (ep->skip) {
- xhci_dbg(xhci, "Found td. Clear skip flag.\n");
+ xhci_dbg(xhci,
+ "Found td. Clear skip flag for slot %u ep %u.\n",
+ slot_id, ep_index);
ep->skip = false;
}
@@ -2495,7 +2554,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* the TD.
*/
if (trb_is_noop(ep_trb)) {
- xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
+ xhci_dbg(xhci,
+ "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
+ slot_id, ep_index);
goto cleanup;
}
@@ -2623,7 +2684,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
status = readl(&xhci->op_regs->status);
- if (status == 0xffffffff) {
+ if (status == ~(u32)0) {
+ xhci_hc_died(xhci);
ret = IRQ_HANDLED;
goto out;
}
@@ -3942,10 +4004,8 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
/* This function gets called from contexts where it cannot sleep */
cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
- if (!cmd) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
+ if (!cmd)
return;
- }
ep->queued_deq_seg = deq_state->new_deq_seg;
ep->queued_deq_ptr = deq_state->new_deq_ptr;
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 1ac2cdf8eece..8ce96de10e8a 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -231,6 +231,7 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
__field(int, epnum)
__field(int, dir_in)
__field(int, type)
+ __field(int, slot_id)
),
TP_fast_assign(
__entry->urb = urb;
@@ -245,8 +246,9 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
__entry->epnum = usb_endpoint_num(&urb->ep->desc);
__entry->dir_in = usb_endpoint_dir_in(&urb->ep->desc);
__entry->type = usb_endpoint_type(&urb->ep->desc);
+ __entry->slot_id = urb->dev->slot_id;
),
- TP_printk("ep%d%s-%s: urb %p pipe %u length %d/%d sgs %d/%d stream %d flags %08x",
+ TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
__entry->epnum, __entry->dir_in ? "in" : "out",
({ char *s;
switch (__entry->type) {
@@ -264,8 +266,8 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
break;
default:
s = "UNKNOWN";
- } s; }), __entry->urb, __entry->pipe, __entry->actual,
- __entry->length, __entry->num_mapped_sgs,
+ } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
+ __entry->actual, __entry->length, __entry->num_mapped_sgs,
__entry->num_sgs, __entry->stream, __entry->flags
)
);
@@ -285,6 +287,172 @@ DEFINE_EVENT(xhci_log_urb, xhci_urb_dequeue,
TP_ARGS(urb)
);
+DECLARE_EVENT_CLASS(xhci_log_ep_ctx,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u64, deq)
+ __field(u32, tx_info)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->ep_info);
+ __entry->info2 = le32_to_cpu(ctx->ep_info2);
+ __entry->deq = le64_to_cpu(ctx->deq);
+ __entry->tx_info = le32_to_cpu(ctx->tx_info);
+ ),
+ TP_printk("%s", xhci_decode_ep_context(__entry->info,
+ __entry->info2, __entry->deq, __entry->tx_info)
+ )
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_stop_ep,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_set_deq_ep,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_reset_ep,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_ep_ctx, xhci_handle_cmd_config_ep,
+ TP_PROTO(struct xhci_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_slot_ctx,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u32, tt_info)
+ __field(u32, state)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->dev_info);
+ __entry->info2 = le32_to_cpu(ctx->dev_info2);
+ __entry->tt_info = le64_to_cpu(ctx->tt_info);
+ __entry->state = le32_to_cpu(ctx->dev_state);
+ ),
+ TP_printk("%s", xhci_decode_slot_context(__entry->info,
+ __entry->info2, __entry->tt_info,
+ __entry->state)
+ )
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_alloc_dev,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_free_dev,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_disable_slot,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_discover_or_reset_device,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_setup_device_slot,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_addr_dev,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_reset_dev,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ring,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring),
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(void *, ring)
+ __field(dma_addr_t, enq)
+ __field(dma_addr_t, deq)
+ __field(dma_addr_t, enq_seg)
+ __field(dma_addr_t, deq_seg)
+ __field(unsigned int, num_segs)
+ __field(unsigned int, stream_id)
+ __field(unsigned int, cycle_state)
+ __field(unsigned int, num_trbs_free)
+ __field(unsigned int, bounce_buf_len)
+ ),
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->type = ring->type;
+ __entry->num_segs = ring->num_segs;
+ __entry->stream_id = ring->stream_id;
+ __entry->enq_seg = ring->enq_seg->dma;
+ __entry->deq_seg = ring->deq_seg->dma;
+ __entry->cycle_state = ring->cycle_state;
+ __entry->num_trbs_free = ring->num_trbs_free;
+ __entry->bounce_buf_len = ring->bounce_buf_len;
+ __entry->enq = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+ __entry->deq = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ ),
+ TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d free_trbs %d bounce %d cycle %d",
+ xhci_ring_type_string(__entry->type), __entry->ring,
+ &__entry->enq, &__entry->enq_seg,
+ &__entry->deq, &__entry->deq_seg,
+ __entry->num_segs,
+ __entry->stream_id,
+ __entry->num_trbs_free,
+ __entry->bounce_buf_len,
+ __entry->cycle_state
+ )
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_alloc,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_free,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_ring_expansion,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_inc_enq,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(xhci_log_ring, xhci_inc_deq,
+ TP_PROTO(struct xhci_ring *ring),
+ TP_ARGS(ring)
+);
#endif /* __XHCI_TRACE_H */
/* this part must be outside header guard */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 953fd8f62df0..2d1310220832 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -125,7 +125,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-static int xhci_start(struct xhci_hcd *xhci)
+int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -216,31 +216,21 @@ int xhci_reset(struct xhci_hcd *xhci)
return ret;
}
-#ifdef CONFIG_PCI
-static int xhci_free_msi(struct xhci_hcd *xhci)
-{
- int i;
-
- if (!xhci->msix_entries)
- return -EINVAL;
-
- for (i = 0; i < xhci->msix_count; i++)
- if (xhci->msix_entries[i].vector)
- free_irq(xhci->msix_entries[i].vector,
- xhci_to_hcd(xhci));
- return 0;
-}
+#ifdef CONFIG_USB_PCI
/*
* Set up MSI
*/
static int xhci_setup_msi(struct xhci_hcd *xhci)
{
int ret;
+ /*
+ * TODO:Check with MSI Soc for sysdev
+ */
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- ret = pci_enable_msi(pdev);
- if (ret) {
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"failed to allocate MSI entry");
return ret;
@@ -251,35 +241,13 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
if (ret) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"disable MSI interrupt");
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
}
return ret;
}
/*
- * Free IRQs
- * free all IRQs request
- */
-static void xhci_free_irq(struct xhci_hcd *xhci)
-{
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
- int ret;
-
- /* return if using legacy interrupt */
- if (xhci_to_hcd(xhci)->irq > 0)
- return;
-
- ret = xhci_free_msi(xhci);
- if (!ret)
- return;
- if (pdev->irq > 0)
- free_irq(pdev->irq, xhci_to_hcd(xhci));
-
- return;
-}
-
-/*
* Set up MSI-X
*/
static int xhci_setup_msix(struct xhci_hcd *xhci)
@@ -298,28 +266,17 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
xhci->msix_count = min(num_online_cpus() + 1,
HCS_MAX_INTRS(xhci->hcs_params1));
- xhci->msix_entries =
- kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
- GFP_KERNEL);
- if (!xhci->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < xhci->msix_count; i++) {
- xhci->msix_entries[i].entry = i;
- xhci->msix_entries[i].vector = 0;
- }
-
- ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
- if (ret) {
+ ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Failed to enable MSI-X");
- goto free_entries;
+ return ret;
}
for (i = 0; i < xhci->msix_count; i++) {
- ret = request_irq(xhci->msix_entries[i].vector,
- xhci_msi_irq,
- 0, "xhci_hcd", xhci_to_hcd(xhci));
+ ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
+ "xhci_hcd", xhci_to_hcd(xhci));
if (ret)
goto disable_msix;
}
@@ -329,11 +286,9 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
disable_msix:
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
- xhci_free_irq(xhci);
- pci_disable_msix(pdev);
-free_entries:
- kfree(xhci->msix_entries);
- xhci->msix_entries = NULL;
+ while (--i >= 0)
+ free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -346,27 +301,33 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
if (xhci->quirks & XHCI_PLAT)
return;
- xhci_free_irq(xhci);
+ /* return if using legacy interrupt */
+ if (hcd->irq > 0)
+ return;
+
+ if (hcd->msix_enabled) {
+ int i;
- if (xhci->msix_entries) {
- pci_disable_msix(pdev);
- kfree(xhci->msix_entries);
- xhci->msix_entries = NULL;
+ for (i = 0; i < xhci->msix_count; i++)
+ free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
} else {
- pci_disable_msi(pdev);
+ free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
}
+ pci_free_irq_vectors(pdev);
hcd->msix_enabled = 0;
- return;
}
static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
- int i;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ if (hcd->msix_enabled) {
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ int i;
- if (xhci->msix_entries) {
for (i = 0; i < xhci->msix_count; i++)
- synchronize_irq(xhci->msix_entries[i].vector);
+ synchronize_irq(pci_irq_vector(pdev, i));
}
}
@@ -539,7 +500,7 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
* device contexts (?), set up a command ring segment (or two?), create event
* ring (one for now).
*/
-int xhci_init(struct usb_hcd *hcd)
+static int xhci_init(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
@@ -619,16 +580,10 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
return ret;
- xhci_dbg(xhci, "Command ring memory map follows:\n");
- xhci_debug_ring(xhci, xhci->cmd_ring);
- xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci_dbg(xhci, "ERST memory map follows:\n");
xhci_dbg_erst(xhci, &xhci->erst);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
- xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -661,9 +616,11 @@ int xhci_run(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
+
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
+
xhci_queue_vendor_command(xhci, command, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
}
@@ -682,28 +639,28 @@ EXPORT_SYMBOL_GPL(xhci_run);
* Disable device contexts, disable IRQs, and quiesce the HC.
* Reset the HC, finish any completed transactions, and cleanup memory.
*/
-void xhci_stop(struct usb_hcd *hcd)
+static void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
mutex_lock(&xhci->mutex);
- if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
- spin_lock_irq(&xhci->lock);
-
- xhci->xhc_state |= XHCI_STATE_HALTED;
- xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
- xhci_halt(xhci);
- xhci_reset(xhci);
- spin_unlock_irq(&xhci->lock);
- }
-
+ /* Only halt host and free memory after both hcds are removed */
if (!usb_hcd_is_primary_hcd(hcd)) {
+ /* usb core will free this hcd shortly, unset pointer */
+ xhci->shared_hcd = NULL;
mutex_unlock(&xhci->mutex);
return;
}
+ spin_lock_irq(&xhci->lock);
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+ xhci_halt(xhci);
+ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
xhci_cleanup_msix(xhci);
/* Deleting Compliance Mode Recovery Timer */
@@ -721,7 +678,7 @@ void xhci_stop(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
- writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
temp = readl(&xhci->ir_set->irq_pending);
writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
@@ -743,12 +700,12 @@ void xhci_stop(struct usb_hcd *hcd)
*
* This will only ever be called with the main usb_hcd (the USB3 roothub).
*/
-void xhci_shutdown(struct usb_hcd *hcd)
+static void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
- usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
@@ -765,7 +722,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
/* Yet another workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
- pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+ pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
}
#ifdef CONFIG_PM
@@ -1054,7 +1011,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
- writel(temp & ~STS_EINT, &xhci->op_regs->status);
+ writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
temp = readl(&xhci->ir_set->irq_pending);
writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
@@ -1176,7 +1133,7 @@ unsigned int xhci_get_endpoint_address(unsigned int ep_index)
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
-unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
+static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
{
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
@@ -1185,7 +1142,7 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
-unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
+static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
{
return 1 << (ep_index + 1);
}
@@ -1306,11 +1263,6 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
ctrl_ctx->drop_flags = 0;
- xhci_dbg(xhci, "Slot %d input context\n", slot_id);
- xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
- xhci_dbg(xhci, "Slot %d output context\n", slot_id);
- xhci_dbg_ctx(xhci, out_ctx, ep_index);
-
ret = xhci_configure_endpoint(xhci, urb->dev, command,
true, false);
@@ -1329,7 +1281,7 @@ command_cleanup:
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*/
-int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned long flags;
@@ -1465,7 +1417,7 @@ free_priv:
* Note that this function can be called in any context, or so says
* usb_hcd_unlink_urb()
*/
-int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
int ret, i;
@@ -1501,10 +1453,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (!ep || !ep_ring)
goto err_giveback;
+ /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
temp = readl(&xhci->op_regs->status);
- if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
+ if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_hc_died(xhci);
+ goto done;
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "HW died, freeing TD.");
+ "HC halted, freeing TD manually.");
for (i = urb_priv->num_tds_done;
i < urb_priv->num_tds;
i++) {
@@ -1576,7 +1534,7 @@ err_giveback:
* disabled, so there's no need for mutual exclusion to protect
* the xhci->devs[slot_id] structure.
*/
-int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
@@ -1659,7 +1617,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* configuration or alt setting is installed in the device, so there's no need
* for mutual exclusion to protect the xhci->devs[slot_id] structure.
*/
-int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
@@ -1852,7 +1810,6 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, u32 *cmd_status)
{
int ret;
- struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
switch (*cmd_status) {
case COMP_COMMAND_ABORTED:
@@ -1873,7 +1830,6 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
case COMP_CONTEXT_STATE_ERROR:
dev_warn(&udev->dev,
"WARN: invalid context state for evaluate context command.\n");
- xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
@@ -2330,7 +2286,7 @@ static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
}
-void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
+static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
@@ -2595,6 +2551,12 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return -EINVAL;
spin_lock_irqsave(&xhci->lock, flags);
+
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ESHUTDOWN;
+ }
+
virt_dev = xhci->devs[udev->slot_id];
ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
@@ -2689,7 +2651,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
@@ -2746,9 +2708,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
break;
}
}
- xhci_dbg(xhci, "New Input Control Context:\n");
- xhci_dbg_ctx(xhci, virt_dev->in_ctx,
- LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
ret = xhci_configure_endpoint(xhci, udev, command,
false, false);
@@ -2756,10 +2715,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Callee should call reset_bandwidth() */
goto command_cleanup;
- xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
- xhci_dbg_ctx(xhci, virt_dev->out_ctx,
- LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
-
/* Free any rings that were dropped, but not changed. */
for (i = 1; i < 31; i++) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
@@ -2793,7 +2748,7 @@ command_cleanup:
return ret;
}
-void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
@@ -2826,9 +2781,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
xhci_slot_copy(xhci, in_ctx, out_ctx);
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
-
- xhci_dbg(xhci, "Input Context:\n");
- xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
@@ -2919,7 +2871,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* Context: in_interrupt
*/
-void xhci_endpoint_reset(struct usb_hcd *hcd,
+static void xhci_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
@@ -3095,7 +3047,7 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
* hardware or endpoints claim they can't support the number of requested
* stream IDs.
*/
-int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
@@ -3129,10 +3081,9 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
}
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
- if (!config_cmd) {
- xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
+ if (!config_cmd)
return -ENOMEM;
- }
+
ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -3259,7 +3210,7 @@ cleanup:
* Modify the endpoint context state, submit a configure endpoint command,
* and free all endpoint rings for streams if that completes successfully.
*/
-int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
@@ -3391,7 +3342,8 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
* re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
* re-allocate the device.
*/
-int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
+ struct usb_device *udev)
{
int ret, i;
unsigned long flags;
@@ -3443,6 +3395,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
SLOT_STATE_DISABLED)
return 0;
+ trace_xhci_discover_or_reset_device(slot_ctx);
+
xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
/* Allocate the command structure that holds the struct completion.
* Assume we're in process context, since the normal device reset
@@ -3539,9 +3493,6 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
}
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
-
- xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
- xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
ret = 0;
command_cleanup:
@@ -3554,12 +3505,11 @@ command_cleanup:
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
*/
-void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
- unsigned long flags;
- u32 state;
+ struct xhci_slot_ctx *slot_ctx;
int i, ret;
struct xhci_command *command;
@@ -3587,6 +3537,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
virt_dev = xhci->devs[udev->slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ trace_xhci_free_dev(slot_ctx);
/* Stop any wayward timer functions (which may grab the lock) */
for (i = 0; i < 31; i++) {
@@ -3594,30 +3546,50 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
+ xhci_disable_slot(xhci, command, udev->slot_id);
+ /*
+ * Event command completion handler will free any data structures
+ * associated with the slot. XXX Can free sleep?
+ */
+}
+
+int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
+ u32 slot_id)
+{
+ unsigned long flags;
+ u32 state;
+ int ret = 0;
+ struct xhci_virt_device *virt_dev;
+
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -EINVAL;
+ if (!command)
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ if (!command)
+ return -ENOMEM;
+
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command);
- return;
+ return ret;
}
- if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
- udev->slot_id)) {
+ ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
+ slot_id);
+ if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
- return;
+ return ret;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
-
- /*
- * Event command completion handler will free any data structures
- * associated with the slot. XXX Can free sleep?
- */
+ return ret;
}
/*
@@ -3650,6 +3622,8 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *vdev;
+ struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int ret, slot_id;
struct xhci_command *command;
@@ -3705,6 +3679,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
goto disable_slot;
}
+ vdev = xhci->devs[slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
+ trace_xhci_alloc_dev(slot_ctx);
+
udev->slot_id = slot_id;
#ifndef CONFIG_USB_DEFAULT_PERSIST
@@ -3724,15 +3702,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
disable_slot:
/* Disable slot, if we can do it without mem alloc */
- spin_lock_irqsave(&xhci->lock, flags);
kfree(command->completion);
command->completion = NULL;
command->status = 0;
- if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
- udev->slot_id))
- xhci_ring_cmd_db(xhci);
- spin_unlock_irqrestore(&xhci->lock, flags);
- return 0;
+ return xhci_disable_slot(xhci, command, udev->slot_id);
}
/*
@@ -3779,9 +3752,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
ret = -EINVAL;
goto out;
}
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ trace_xhci_setup_device_slot(slot_ctx);
if (setup == SETUP_CONTEXT_ONLY) {
- slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
SLOT_STATE_DEFAULT) {
xhci_dbg(xhci, "Slot already in default state\n");
@@ -3818,8 +3792,6 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
ctrl_ctx->drop_flags = 0;
- xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
@@ -3872,8 +3844,6 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_err(xhci,
"ERROR: unexpected setup %s command completion code 0x%x.\n",
act, command->status);
- xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
@@ -3892,17 +3862,12 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Output Context DMA address = %#08llx",
(unsigned long long)virt_dev->out_ctx->dma);
- xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
- xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/*
* USB core uses address 1 for the roothubs, so we add one to the
* address given back to us by the HC.
*/
- slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
le32_to_cpu(slot_ctx->dev_info) >> 27);
/* Zero the input context control for later use */
@@ -3921,12 +3886,12 @@ out:
return ret;
}
-int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
}
-int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
}
@@ -4003,14 +3968,10 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Set up evaluate context for LPM MEL change.");
- xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, command->in_ctx, 0);
/* Issue and wait for the evaluate context command. */
ret = xhci_configure_endpoint(xhci, udev, command,
true, true);
- xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
- xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
if (!ret) {
spin_lock_irqsave(&xhci->lock, flags);
@@ -4083,7 +4044,7 @@ static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
}
-int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -4207,7 +4168,7 @@ static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
return 0;
}
-int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int portnum = udev->portnum - 1;
@@ -4616,7 +4577,7 @@ static int calculate_max_exit_latency(struct usb_device *udev,
}
/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
-int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
@@ -4647,7 +4608,7 @@ int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
return hub_encoded_timeout;
}
-int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
@@ -4663,24 +4624,24 @@ int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
}
#else /* CONFIG_PM */
-int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
+static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable)
{
return 0;
}
-int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
{
return 0;
}
-int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
+static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return USB3_LPM_DISABLED;
}
-int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
+static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
return 0;
@@ -4692,7 +4653,7 @@ int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
-int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -4713,11 +4674,11 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
+
config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
- if (!config_cmd) {
- xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
+ if (!config_cmd)
return -ENOMEM;
- }
+
ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
@@ -4778,8 +4739,6 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_dbg(xhci, "Set up %s for hub device.\n",
(xhci->hci_version > 0x95) ?
"configure endpoint" : "evaluate context");
- xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
- xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
/* Issue and wait for the configure endpoint or
* evaluate context command.
@@ -4791,14 +4750,11 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
true, false);
- xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
- xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
-
xhci_free_command(xhci, config_cmd);
return ret;
}
-int xhci_get_frame(struct usb_hcd *hcd)
+static int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
/* EHCI mods by the periodic size. Why? */
@@ -4808,7 +4764,11 @@ int xhci_get_frame(struct usb_hcd *hcd)
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
{
struct xhci_hcd *xhci;
- struct device *dev = hcd->self.controller;
+ /*
+ * TODO: Check with DWC3 clients for sysdev according to
+ * quirks
+ */
+ struct device *dev = hcd->self.sysdev;
int retval;
/* Accept arbitrarily long scatter-gather lists */
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index da3eb695fe54..73a28a986d5e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -425,6 +425,7 @@ struct xhci_op_regs {
#define PORT_L1DS_MASK (0xff << 8)
#define PORT_L1DS(p) (((p) & 0xff) << 8)
#define PORT_HLE (1 << 16)
+#define PORT_TEST_MODE_SHIFT 28
/* USB3 Protocol PORTLI Port Link Information */
#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
@@ -617,6 +618,7 @@ struct xhci_slot_ctx {
#define ROUTE_STRING_MASK (0xfffff)
/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
#define DEV_SPEED (0xf << 20)
+#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
/* bit 24 reserved */
/* Is this LS/FS device connected through a HS hub? - bit 25 */
#define DEV_MTT (0x1 << 25)
@@ -637,6 +639,7 @@ struct xhci_slot_ctx {
#define DEVINFO_TO_ROOT_HUB_PORT(p) (((p) >> 16) & 0xff)
/* Maximum number of ports under a hub device */
#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
+#define DEVINFO_TO_MAX_PORTS(p) (((p) & (0xff << 24)) >> 24)
/* tt_info bitmasks */
/*
@@ -651,6 +654,7 @@ struct xhci_slot_ctx {
*/
#define TT_PORT (0xff << 8)
#define TT_THINK_TIME(p) (((p) & 0x3) << 16)
+#define GET_TT_THINK_TIME(p) (((p) & (0x3 << 16)) >> 16)
/* dev_state bitmasks */
/* USB device address - assigned by the HC */
@@ -1562,10 +1566,8 @@ struct xhci_ring {
struct xhci_segment *last_seg;
union xhci_trb *enqueue;
struct xhci_segment *enq_seg;
- unsigned int enq_updates;
union xhci_trb *dequeue;
struct xhci_segment *deq_seg;
- unsigned int deq_updates;
struct list_head td_list;
/*
* Write the cycle state into the TRB cycle field to give ownership of
@@ -1604,7 +1606,6 @@ struct xhci_scratchpad {
u64 *sp_array;
dma_addr_t sp_dma;
void **sp_buffers;
- dma_addr_t *sp_dma_buffers;
};
struct urb_priv {
@@ -1722,7 +1723,6 @@ struct xhci_hcd {
int page_shift;
/* msi-x vectors */
int msix_count;
- struct msix_entry *msix_entries;
/* optional clock */
struct clk *clk;
/* data structures */
@@ -1818,6 +1818,7 @@ struct xhci_hcd {
#define XHCI_MISSING_CAS (1 << 24)
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
+#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1843,6 +1844,7 @@ struct xhci_hcd {
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+ u16 test_mode;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
@@ -1918,19 +1920,10 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
-void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
-void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
-void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
-void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
-void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
-void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
-void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_virt_ep *ep);
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
const char *fmt, ...);
@@ -1944,16 +1937,8 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_address(unsigned int ep_index);
-unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
-unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
-void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
- struct xhci_bw_info *ep_bw,
- struct xhci_interval_bw_table *bw_table,
- struct usb_device *udev,
- struct xhci_virt_ep *virt_ep,
- struct xhci_tt_bw_info *tt_info);
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps);
@@ -2010,53 +1995,25 @@ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
+int xhci_start(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
-int xhci_init(struct usb_hcd *hcd);
int xhci_run(struct usb_hcd *hcd);
-void xhci_stop(struct usb_hcd *hcd);
-void xhci_shutdown(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_disable_slot(struct xhci_hcd *xhci,
+ struct xhci_command *command, u32 slot_id);
-#ifdef CONFIG_PM
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
-#else
-#define xhci_suspend NULL
-#define xhci_resume NULL
-#endif
-int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, void *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
-void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
-int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
- struct usb_host_endpoint **eps, unsigned int num_eps,
- unsigned int num_streams, gfp_t mem_flags);
-int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
- struct usb_host_endpoint **eps, unsigned int num_eps,
- gfp_t mem_flags);
-int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
-int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev);
-int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
-int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
- struct usb_device *udev, int enable);
-int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
- struct usb_tt *tt, gfp_t mem_flags);
-int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
-int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
-int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
-int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
-void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
-int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
-int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
-void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
@@ -2100,9 +2057,6 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
unsigned int ep_index, struct xhci_td *td);
-void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
void xhci_handle_command_timeout(struct work_struct *work);
@@ -2113,16 +2067,13 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 link_state);
-int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
- struct usb_device *udev, enum usb3_link_state state);
-int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
- struct usb_device *udev, enum usb3_link_state state);
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+void xhci_hc_died(struct xhci_hcd *xhci);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
@@ -2153,6 +2104,22 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
+static inline char *xhci_slot_state_string(u32 state)
+{
+ switch (state) {
+ case SLOT_STATE_ENABLED:
+ return "enabled/disabled";
+ case SLOT_STATE_DEFAULT:
+ return "default";
+ case SLOT_STATE_ADDRESSED:
+ return "addressed";
+ case SLOT_STATE_CONFIGURED:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
u32 field3)
{
@@ -2162,14 +2129,12 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
switch (type) {
case TRB_LINK:
sprintf(str,
- "TRB %08x%08x status '%s' len %d slot %d ep %d type '%s' flags %c:%c",
- field1, field0,
- xhci_trb_comp_code_string(GET_COMP_CODE(field2)),
- EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
- /* Macro decrements 1, maybe it shouldn't?!? */
- TRB_TO_EP_INDEX(field3) + 1,
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
- field3 & EVENT_DATA ? 'E' : 'e',
+ "LINK %08x%08x intr %d type '%s' flags %c:%c:%c:%c",
+ field1, field0, GET_INTR_TARGET(field2),
+ xhci_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_TC ? 'T' : 't',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_TRANSFER:
@@ -2187,37 +2152,52 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
/* Macro decrements 1, maybe it shouldn't?!? */
TRB_TO_EP_INDEX(field3) + 1,
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field3 & EVENT_DATA ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SETUP:
- sprintf(str,
- "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
- field0 & 0xff,
- (field0 & 0xff00) >> 8,
- (field0 & 0xff000000) >> 24,
- (field0 & 0xff0000) >> 16,
- (field1 & 0xff00) >> 8,
- field1 & 0xff,
- (field1 & 0xff000000) >> 16 |
- (field1 & 0xff0000) >> 16,
- TRB_LEN(field2), GET_TD_SIZE(field2),
- GET_INTR_TARGET(field2),
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
- field3 & TRB_BEI ? 'B' : 'b',
- field3 & TRB_IDT ? 'I' : 'i',
- field3 & TRB_IOC ? 'I' : 'i',
- field3 & TRB_CHAIN ? 'C' : 'c',
- field3 & TRB_NO_SNOOP ? 'S' : 's',
- field3 & TRB_ISP ? 'I' : 'i',
- field3 & TRB_ENT ? 'E' : 'e',
- field3 & TRB_CYCLE ? 'C' : 'c');
+ sprintf(str, "bRequestType %02x bRequest %02x wValue %02x%02x wIndex %02x%02x wLength %d length %d TD size %d intr %d type '%s' flags %c:%c:%c",
+ field0 & 0xff,
+ (field0 & 0xff00) >> 8,
+ (field0 & 0xff000000) >> 24,
+ (field0 & 0xff0000) >> 16,
+ (field1 & 0xff00) >> 8,
+ field1 & 0xff,
+ (field1 & 0xff000000) >> 16 |
+ (field1 & 0xff0000) >> 16,
+ TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ xhci_trb_type_string(type),
+ field3 & TRB_IDT ? 'I' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CYCLE ? 'C' : 'c');
break;
- case TRB_NORMAL:
case TRB_DATA:
+ sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ xhci_trb_type_string(type),
+ field3 & TRB_IDT ? 'I' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
case TRB_STATUS:
+ sprintf(str, "Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ xhci_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_NORMAL:
case TRB_ISOC:
case TRB_EVENT_DATA:
case TRB_TR_NOOP:
@@ -2225,7 +2205,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
"Buffer %08x%08x length %d TD size %d intr %d type '%s' flags %c:%c:%c:%c:%c:%c:%c:%c",
field1, field0, TRB_LEN(field2), GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field3 & TRB_BEI ? 'B' : 'b',
field3 & TRB_IDT ? 'I' : 'i',
field3 & TRB_IOC ? 'I' : 'i',
@@ -2240,21 +2220,21 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_ENABLE_SLOT:
sprintf(str,
"%s: flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_DISABLE_SLOT:
case TRB_NEG_BANDWIDTH:
sprintf(str,
"%s: slot %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_ADDR_DEV:
sprintf(str,
"%s: ctx %08x%08x slot %d flags %c:%c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_BSR ? 'B' : 'b',
@@ -2263,7 +2243,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_CONFIG_EP:
sprintf(str,
"%s: ctx %08x%08x slot %d flags %c:%c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_DC ? 'D' : 'd',
@@ -2272,7 +2252,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_EVAL_CONTEXT:
sprintf(str,
"%s: ctx %08x%08x slot %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
@@ -2280,7 +2260,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_RESET_EP:
sprintf(str,
"%s: ctx %08x%08x slot %d ep %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
/* Macro decrements 1, maybe it shouldn't?!? */
@@ -2290,7 +2270,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_STOP_RING:
sprintf(str,
"%s: slot %d sp %d ep %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
TRB_TO_SUSPEND_PORT(field3),
/* Macro decrements 1, maybe it shouldn't?!? */
@@ -2300,7 +2280,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_SET_DEQ:
sprintf(str,
"%s: deq %08x%08x stream %d slot %d ep %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_STREAM_ID(field2),
TRB_TO_SLOT_ID(field3),
@@ -2311,14 +2291,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_RESET_DEV:
sprintf(str,
"%s: slot %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_FORCE_EVENT:
sprintf(str,
"%s: event %08x%08x vf intr %d vf id %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_VF_INTR_TARGET(field2),
TRB_TO_VF_ID(field3),
@@ -2327,14 +2307,14 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_SET_LT:
sprintf(str,
"%s: belt %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
TRB_TO_BELT(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_GET_BW:
sprintf(str,
"%s: ctx %08x%08x slot %d speed %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field1, field0,
TRB_TO_SLOT_ID(field3),
TRB_TO_DEV_SPEED(field3),
@@ -2343,7 +2323,7 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
case TRB_FORCE_HEADER:
sprintf(str,
"%s: info %08x%08x%08x pkt type %d roothub port %d flags %c",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field2, field1, field0 & 0xffffffe0,
TRB_TO_PACKET_TYPE(field0),
TRB_TO_ROOTHUB_PORT(field3),
@@ -2352,12 +2332,155 @@ static inline const char *xhci_decode_trb(u32 field0, u32 field1, u32 field2,
default:
sprintf(str,
"type '%s' -> raw %08x %08x %08x %08x",
- xhci_trb_type_string(TRB_FIELD_TO_TYPE(field3)),
+ xhci_trb_type_string(type),
field0, field1, field2, field3);
}
return str;
}
+static inline const char *xhci_decode_slot_context(u32 info, u32 info2,
+ u32 tt_info, u32 state)
+{
+ static char str[1024];
+ u32 speed;
+ u32 hub;
+ u32 mtt;
+ int ret = 0;
+
+ speed = info & DEV_SPEED;
+ hub = info & DEV_HUB;
+ mtt = info & DEV_MTT;
+
+ ret = sprintf(str, "RS %05x %s%s%s Ctx Entries %d MEL %d us Port# %d/%d",
+ info & ROUTE_STRING_MASK,
+ ({ char *s;
+ switch (speed) {
+ case SLOT_SPEED_FS:
+ s = "full-speed";
+ break;
+ case SLOT_SPEED_LS:
+ s = "low-speed";
+ break;
+ case SLOT_SPEED_HS:
+ s = "high-speed";
+ break;
+ case SLOT_SPEED_SS:
+ s = "super-speed";
+ break;
+ case SLOT_SPEED_SSP:
+ s = "super-speed plus";
+ break;
+ default:
+ s = "UNKNOWN speed";
+ } s; }),
+ mtt ? " multi-TT" : "",
+ hub ? " Hub" : "",
+ (info & LAST_CTX_MASK) >> 27,
+ info2 & MAX_EXIT,
+ DEVINFO_TO_ROOT_HUB_PORT(info2),
+ DEVINFO_TO_MAX_PORTS(info2));
+
+ ret += sprintf(str + ret, " [TT Slot %d Port# %d TTT %d Intr %d] Addr %d State %s",
+ tt_info & TT_SLOT, (tt_info & TT_PORT) >> 8,
+ GET_TT_THINK_TIME(tt_info), GET_INTR_TARGET(tt_info),
+ state & DEV_ADDR_MASK,
+ xhci_slot_state_string(GET_SLOT_STATE(state)));
+
+ return str;
+}
+
+static inline const char *xhci_ep_state_string(u8 state)
+{
+ switch (state) {
+ case EP_STATE_DISABLED:
+ return "disabled";
+ case EP_STATE_RUNNING:
+ return "running";
+ case EP_STATE_HALTED:
+ return "halted";
+ case EP_STATE_STOPPED:
+ return "stopped";
+ case EP_STATE_ERROR:
+ return "error";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *xhci_ep_type_string(u8 type)
+{
+ switch (type) {
+ case ISOC_OUT_EP:
+ return "Isoc OUT";
+ case BULK_OUT_EP:
+ return "Bulk OUT";
+ case INT_OUT_EP:
+ return "Int OUT";
+ case CTRL_EP:
+ return "Ctrl";
+ case ISOC_IN_EP:
+ return "Isoc IN";
+ case BULK_IN_EP:
+ return "Bulk IN";
+ case INT_IN_EP:
+ return "Int IN";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
+ u32 tx_info)
+{
+ static char str[1024];
+ int ret;
+
+ u32 esit;
+ u16 maxp;
+ u16 avg;
+
+ u8 max_pstr;
+ u8 ep_state;
+ u8 interval;
+ u8 ep_type;
+ u8 burst;
+ u8 cerr;
+ u8 mult;
+ u8 lsa;
+ u8 hid;
+
+ esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+ EP_MAX_ESIT_PAYLOAD_LO(tx_info);
+
+ ep_state = info & EP_STATE_MASK;
+ max_pstr = info & EP_MAXPSTREAMS_MASK;
+ interval = CTX_TO_EP_INTERVAL(info);
+ mult = CTX_TO_EP_MULT(info) + 1;
+ lsa = info & EP_HAS_LSA;
+
+ cerr = (info2 & (3 << 1)) >> 1;
+ ep_type = CTX_TO_EP_TYPE(info2);
+ hid = info2 & (1 << 7);
+ burst = CTX_TO_MAX_BURST(info2);
+ maxp = MAX_PACKET_DECODED(info2);
+
+ avg = EP_AVG_TRB_LENGTH(tx_info);
+
+ ret = sprintf(str, "State %s mult %d max P. Streams %d %s",
+ xhci_ep_state_string(ep_state), mult,
+ max_pstr, lsa ? "LSA " : "");
+
+ ret += sprintf(str + ret, "interval %d us max ESIT payload %d CErr %d ",
+ (1 << interval) * 125, esit, cerr);
+
+ ret += sprintf(str + ret, "Type %s %sburst %d maxp %d deq %016llx ",
+ xhci_ep_type_string(ep_type), hid ? "HID" : "",
+ burst, maxp, deq);
+
+ ret += sprintf(str + ret, "avg trb len %d", avg);
+
+ return str;
+}
#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 79205b31e4a9..bc68bbab7fa1 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -21,11 +21,11 @@
#include "isp1760-core.h"
#include "isp1760-regs.h"
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
#include <linux/pci.h>
#endif
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
static int isp1761_pci_init(struct pci_dev *dev)
{
resource_size_t mem_start;
@@ -286,7 +286,7 @@ static int __init isp1760_init(void)
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
ret = pci_register_driver(&isp1761_pci_driver);
if (!ret)
any_ret = 0;
@@ -301,7 +301,7 @@ module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
pci_unregister_driver(&isp1761_pci_driver);
#endif
isp1760_deinit_kmem_cache();
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index db9a9e6ff6be..dfd54ea4808f 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -655,24 +655,15 @@ static int adu_probe(struct usb_interface *interface,
{
struct usb_device *udev = interface_to_usbdev(interface);
struct adu_device *dev = NULL;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int retval = -ENODEV;
+ int retval = -ENOMEM;
int in_end_size;
int out_end_size;
- int i;
-
- if (udev == NULL) {
- dev_err(&interface->dev, "udev is NULL.\n");
- goto exit;
- }
+ int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
- if (!dev) {
- retval = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
@@ -680,24 +671,13 @@ static int adu_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- iface_desc = &interface->altsetting[0];
-
- /* set up the endpoint information */
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(endpoint))
- dev->interrupt_in_endpoint = endpoint;
-
- if (usb_endpoint_is_int_out(endpoint))
- dev->interrupt_out_endpoint = endpoint;
- }
- if (dev->interrupt_in_endpoint == NULL) {
- dev_err(&interface->dev, "interrupt in endpoint not found\n");
- goto error;
- }
- if (dev->interrupt_out_endpoint == NULL) {
- dev_err(&interface->dev, "interrupt out endpoint not found\n");
+ res = usb_find_common_endpoints_reverse(&interface->altsetting[0],
+ NULL, NULL,
+ &dev->interrupt_in_endpoint,
+ &dev->interrupt_out_endpoint);
+ if (res) {
+ dev_err(&interface->dev, "interrupt endpoints not found\n");
+ retval = res;
goto error;
}
@@ -705,10 +685,8 @@ static int adu_probe(struct usb_interface *interface,
out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL);
- if (!dev->read_buffer_primary) {
- retval = -ENOMEM;
+ if (!dev->read_buffer_primary)
goto error;
- }
/* debug code prime the buffer */
memset(dev->read_buffer_primary, 'a', in_end_size);
@@ -717,10 +695,8 @@ static int adu_probe(struct usb_interface *interface,
memset(dev->read_buffer_primary + (3 * in_end_size), 'd', in_end_size);
dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL);
- if (!dev->read_buffer_secondary) {
- retval = -ENOMEM;
+ if (!dev->read_buffer_secondary)
goto error;
- }
/* debug code prime the buffer */
memset(dev->read_buffer_secondary, 'e', in_end_size);
@@ -748,6 +724,7 @@ static int adu_probe(struct usb_interface *interface,
if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number,
sizeof(dev->serial_number))) {
dev_err(&interface->dev, "Could not retrieve serial number\n");
+ retval = -EIO;
goto error;
}
dev_dbg(&interface->dev,"serial_number=%s", dev->serial_number);
@@ -770,8 +747,8 @@ static int adu_probe(struct usb_interface *interface,
dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
(dev->minor - ADU_MINOR_BASE));
-exit:
- return retval;
+
+ return 0;
error:
adu_delete(dev);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index da5ff401a354..8efdc500e790 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -212,28 +212,21 @@ static int appledisplay_probe(struct usb_interface *iface,
struct backlight_properties props;
struct appledisplay *pdata;
struct usb_device *udev = interface_to_usbdev(iface);
- struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int int_in_endpointAddr = 0;
- int i, retval = -ENOMEM, brightness;
+ int retval, brightness;
char bl_name[20];
/* set up the endpoint information */
/* use only the first interrupt-in endpoint */
- iface_desc = iface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- endpoint = &iface_desc->endpoint[i].desc;
- if (!int_in_endpointAddr && usb_endpoint_is_int_in(endpoint)) {
- /* we found an interrupt in endpoint */
- int_in_endpointAddr = endpoint->bEndpointAddress;
- break;
- }
- }
- if (!int_in_endpointAddr) {
+ retval = usb_find_int_in_endpoint(iface->cur_altsetting, &endpoint);
+ if (retval) {
dev_err(&iface->dev, "Could not find int-in endpoint\n");
- return -EIO;
+ return retval;
}
+ int_in_endpointAddr = endpoint->bEndpointAddress;
+
/* allocate memory for our device state and initialize it */
pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
if (!pdata) {
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index aa350dc9eb25..e9cae4d82af2 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -117,28 +117,26 @@ static int chaoskey_probe(struct usb_interface *interface,
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *altsetting = interface->cur_altsetting;
- int i;
- int in_ep = -1;
+ struct usb_endpoint_descriptor *epd;
+ int in_ep;
struct chaoskey *dev;
int result = -ENOMEM;
int size;
+ int res;
usb_dbg(interface, "probe %s-%s", udev->product, udev->serial);
/* Find the first bulk IN endpoint and its packet size */
- for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
- if (usb_endpoint_is_bulk_in(&altsetting->endpoint[i].desc)) {
- in_ep = usb_endpoint_num(&altsetting->endpoint[i].desc);
- size = usb_endpoint_maxp(&altsetting->endpoint[i].desc);
- break;
- }
+ res = usb_find_bulk_in_endpoint(altsetting, &epd);
+ if (res) {
+ usb_dbg(interface, "no IN endpoint found");
+ return res;
}
+ in_ep = usb_endpoint_num(epd);
+ size = usb_endpoint_maxp(epd);
+
/* Validate endpoint and size */
- if (in_ep == -1) {
- usb_dbg(interface, "no IN endpoint found");
- return -ENODEV;
- }
if (size <= 0) {
usb_dbg(interface, "invalid size (%d)", size);
return -ENODEV;
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 01a9373b7e18..8291499d0581 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2700,10 +2700,8 @@ static int ftdi_elan_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t buffer_size;
- int i;
- int retval = -ENOMEM;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ int retval;
struct usb_ftdi *ftdi;
ftdi = kzalloc(sizeof(struct usb_ftdi), GFP_KERNEL);
@@ -2720,31 +2718,25 @@ static int ftdi_elan_probe(struct usb_interface *interface,
ftdi->interface = interface;
mutex_init(&ftdi->u132_lock);
ftdi->expected = 4;
+
iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
- if (!ftdi->bulk_in_endpointAddr &&
- usb_endpoint_is_bulk_in(endpoint)) {
- buffer_size = usb_endpoint_maxp(endpoint);
- ftdi->bulk_in_size = buffer_size;
- ftdi->bulk_in_endpointAddr = endpoint->bEndpointAddress;
- ftdi->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!ftdi->bulk_in_buffer) {
- retval = -ENOMEM;
- goto error;
- }
- }
- if (!ftdi->bulk_out_endpointAddr &&
- usb_endpoint_is_bulk_out(endpoint)) {
- ftdi->bulk_out_endpointAddr =
- endpoint->bEndpointAddress;
- }
- }
- if (!(ftdi->bulk_in_endpointAddr && ftdi->bulk_out_endpointAddr)) {
+ retval = usb_find_common_endpoints(iface_desc,
+ &bulk_in, &bulk_out, NULL, NULL);
+ if (retval) {
dev_err(&ftdi->udev->dev, "Could not find both bulk-in and bulk-out endpoints\n");
- retval = -ENODEV;
goto error;
}
+
+ ftdi->bulk_in_size = usb_endpoint_maxp(bulk_in);
+ ftdi->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
+ ftdi->bulk_in_buffer = kmalloc(ftdi->bulk_in_size, GFP_KERNEL);
+ if (!ftdi->bulk_in_buffer) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ ftdi->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
+
dev_info(&ftdi->udev->dev, "interface %d has I=%02X O=%02X\n",
iface_desc->desc.bInterfaceNumber, ftdi->bulk_in_endpointAddr,
ftdi->bulk_out_endpointAddr);
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 502bfe30a077..81fcbf024c65 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -360,26 +360,22 @@ static int idmouse_probe(struct usb_interface *interface,
dev->interface = interface;
/* set up the endpoint information - use only the first bulk-in endpoint */
- endpoint = &iface_desc->endpoint[0].desc;
- if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) {
- /* we found a bulk in endpoint */
- dev->orig_bi_size = usb_endpoint_maxp(endpoint);
- dev->bulk_in_size = 0x200; /* works _much_ faster */
- dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
- dev->bulk_in_buffer =
- kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL);
-
- if (!dev->bulk_in_buffer) {
- idmouse_delete(dev);
- return -ENOMEM;
- }
+ result = usb_find_bulk_in_endpoint(iface_desc, &endpoint);
+ if (result) {
+ dev_err(&interface->dev, "Unable to find bulk-in endpoint.\n");
+ idmouse_delete(dev);
+ return result;
}
- if (!(dev->bulk_in_endpointAddr)) {
- dev_err(&interface->dev, "Unable to find bulk-in endpoint.\n");
+ dev->orig_bi_size = usb_endpoint_maxp(endpoint);
+ dev->bulk_in_size = 0x200; /* works _much_ faster */
+ dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
+ dev->bulk_in_buffer = kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL);
+ if (!dev->bulk_in_buffer) {
idmouse_delete(dev);
- return -ENODEV;
+ return -ENOMEM;
}
+
/* allow device read, write and ioctl */
dev->present = 1;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 37c63cb39714..77569531b78a 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -756,9 +756,8 @@ static int iowarrior_probe(struct usb_interface *interface,
struct usb_device *udev = interface_to_usbdev(interface);
struct iowarrior *dev = NULL;
struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int i;
int retval = -ENOMEM;
+ int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
@@ -781,27 +780,19 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
- /* set up the endpoint information */
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(endpoint))
- dev->int_in_endpoint = endpoint;
- if (usb_endpoint_is_int_out(endpoint))
- /* this one will match for the IOWarrior56 only */
- dev->int_out_endpoint = endpoint;
- }
-
- if (!dev->int_in_endpoint) {
+ res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
+ if (res) {
dev_err(&interface->dev, "no interrupt-in endpoint found\n");
- retval = -ENODEV;
+ retval = res;
goto error;
}
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
- if (!dev->int_out_endpoint) {
+ res = usb_find_last_int_out_endpoint(iface_desc,
+ &dev->int_out_endpoint);
+ if (res) {
dev_err(&interface->dev, "no interrupt-out endpoint found\n");
- retval = -ENODEV;
+ retval = res;
goto error;
}
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 3bc5356832db..9d9487c66f87 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -122,13 +122,13 @@ MODULE_SUPPORTED_DEVICE("LD USB Devices");
* avoid racing conditions and get better performance of the driver.
*/
static int ring_buffer_size = 128;
-module_param(ring_buffer_size, int, 0);
+module_param(ring_buffer_size, int, 0000);
MODULE_PARM_DESC(ring_buffer_size, "Read ring buffer size in reports");
/* The write_buffer can contain more than one interrupt out transfer.
*/
static int write_buffer_size = 10;
-module_param(write_buffer_size, int, 0);
+module_param(write_buffer_size, int, 0000);
MODULE_PARM_DESC(write_buffer_size, "Write buffer size in reports");
/* As of kernel version 2.6.4 ehci-hcd uses an
@@ -141,30 +141,30 @@ MODULE_PARM_DESC(write_buffer_size, "Write buffer size in reports");
* or set to 1 to use the standard interval from the endpoint descriptors.
*/
static int min_interrupt_in_interval = 2;
-module_param(min_interrupt_in_interval, int, 0);
+module_param(min_interrupt_in_interval, int, 0000);
MODULE_PARM_DESC(min_interrupt_in_interval, "Minimum interrupt in interval in ms");
static int min_interrupt_out_interval = 2;
-module_param(min_interrupt_out_interval, int, 0);
+module_param(min_interrupt_out_interval, int, 0000);
MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in ms");
/* Structure to hold all of our device specific stuff */
struct ld_usb {
struct mutex mutex; /* locks this structure */
- struct usb_interface* intf; /* save off the usb interface pointer */
+ struct usb_interface *intf; /* save off the usb interface pointer */
int open_count; /* number of times this port has been opened */
- char* ring_buffer;
+ char *ring_buffer;
unsigned int ring_head;
unsigned int ring_tail;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
- char* interrupt_in_buffer;
- struct usb_endpoint_descriptor* interrupt_in_endpoint;
- struct urb* interrupt_in_urb;
+ char *interrupt_in_buffer;
+ struct usb_endpoint_descriptor *interrupt_in_endpoint;
+ struct urb *interrupt_in_urb;
int interrupt_in_interval;
size_t interrupt_in_endpoint_size;
int interrupt_in_running;
@@ -172,9 +172,9 @@ struct ld_usb {
int buffer_overflow;
spinlock_t rbsl;
- char* interrupt_out_buffer;
- struct usb_endpoint_descriptor* interrupt_out_endpoint;
- struct urb* interrupt_out_urb;
+ char *interrupt_out_buffer;
+ struct usb_endpoint_descriptor *interrupt_out_endpoint;
+ struct urb *interrupt_out_urb;
int interrupt_out_interval;
size_t interrupt_out_endpoint_size;
int interrupt_out_busy;
@@ -244,7 +244,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
if (urb->actual_length > 0) {
next_ring_head = (dev->ring_head+1) % ring_buffer_size;
if (next_ring_head != dev->ring_tail) {
- actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_head*(sizeof(size_t)+dev->interrupt_in_endpoint_size));
+ actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_head * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
/* actual_buffer gets urb->actual_length + interrupt_in_buffer */
*actual_buffer = urb->actual_length;
memcpy(actual_buffer+1, dev->interrupt_in_buffer, urb->actual_length);
@@ -483,7 +483,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
}
/* actual_buffer contains actual_length + interrupt_in_buffer */
- actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_tail*(sizeof(size_t)+dev->interrupt_in_endpoint_size));
+ actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -561,7 +561,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
- dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
+ dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write);
dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
@@ -650,10 +650,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
struct usb_device *udev = interface_to_usbdev(intf);
struct ld_usb *dev = NULL;
struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
char *buffer;
- int i;
int retval = -ENOMEM;
+ int res;
/* allocate memory for our device state and initialize it */
@@ -681,21 +680,17 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
iface_desc = intf->cur_altsetting;
- /* set up the endpoint information */
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(endpoint))
- dev->interrupt_in_endpoint = endpoint;
-
- if (usb_endpoint_is_int_out(endpoint))
- dev->interrupt_out_endpoint = endpoint;
- }
- if (dev->interrupt_in_endpoint == NULL) {
+ res = usb_find_last_int_in_endpoint(iface_desc,
+ &dev->interrupt_in_endpoint);
+ if (res) {
dev_err(&intf->dev, "Interrupt in endpoint not found\n");
+ retval = res;
goto error;
}
- if (dev->interrupt_out_endpoint == NULL)
+
+ res = usb_find_last_int_out_endpoint(iface_desc,
+ &dev->interrupt_out_endpoint);
+ if (res)
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 322a042d6e59..aa3c280fdf8d 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file)
int subminor;
int retval = 0;
struct usb_interface *interface;
- struct tower_reset_reply reset_reply;
+ struct tower_reset_reply *reset_reply;
int result;
+ reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
+
+ if (!reset_reply) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
nonseekable_open(inode, file);
subminor = iminor(inode);
@@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file)
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
- &reset_reply,
- sizeof(reset_reply),
+ reset_reply,
+ sizeof(*reset_reply),
1000);
if (result < 0) {
dev_err(&dev->udev->dev,
@@ -406,6 +413,7 @@ unlock_exit:
mutex_unlock(&dev->lock);
exit:
+ kfree(reset_reply);
return retval;
}
@@ -806,10 +814,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev = NULL;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor* endpoint;
- struct tower_get_version_reply get_version_reply;
- int i;
+ struct tower_get_version_reply *get_version_reply = NULL;
int retval = -ENOMEM;
int result;
@@ -846,25 +851,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_out_urb = NULL;
dev->interrupt_out_busy = 0;
- iface_desc = interface->cur_altsetting;
-
- /* set up the endpoint information */
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_xfer_int(endpoint)) {
- if (usb_endpoint_dir_in(endpoint))
- dev->interrupt_in_endpoint = endpoint;
- else
- dev->interrupt_out_endpoint = endpoint;
- }
- }
- if(dev->interrupt_in_endpoint == NULL) {
- dev_err(idev, "interrupt in endpoint not found\n");
- goto error;
- }
- if (dev->interrupt_out_endpoint == NULL) {
- dev_err(idev, "interrupt out endpoint not found\n");
+ result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
+ NULL, NULL,
+ &dev->interrupt_in_endpoint,
+ &dev->interrupt_out_endpoint);
+ if (result) {
+ dev_err(idev, "interrupt endpoints not found\n");
+ retval = result;
goto error;
}
@@ -886,6 +879,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
+ get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
+
+ if (!get_version_reply) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
@@ -893,18 +893,19 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
- &get_version_reply,
- sizeof(get_version_reply),
+ get_version_reply,
+ sizeof(*get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
- dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
- "build %d\n", get_version_reply.major,
- get_version_reply.minor,
- le16_to_cpu(get_version_reply.build_no));
+ dev_info(&interface->dev,
+ "LEGO USB Tower firmware version is %d.%d build %d\n",
+ get_version_reply->major,
+ get_version_reply->minor,
+ le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
@@ -928,6 +929,7 @@ exit:
return retval;
error:
+ kfree(get_version_reply);
tower_delete(dev);
return retval;
}
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index d3d124753266..2142132a1f82 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -193,7 +193,7 @@ static ssize_t u2_timeout_store(struct device *dev,
return ret;
}
- if (val < 0 || val > 127)
+ if (val > 127)
return -EINVAL;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum | (val << 8),
@@ -222,7 +222,7 @@ static ssize_t u1_timeout_store(struct device *dev,
return ret;
}
- if (val < 0 || val > 127)
+ if (val > 127)
return -EINVAL;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum | (val << 8),
@@ -367,10 +367,9 @@ static int lvs_rh_probe(struct usb_interface *intf,
hdev = interface_to_usbdev(intf);
desc = intf->cur_altsetting;
- if (desc->desc.bNumEndpoints < 1)
- return -ENODEV;
-
- endpoint = &desc->endpoint[0].desc;
+ ret = usb_find_int_in_endpoint(desc, &endpoint);
+ if (ret)
+ return ret;
/* valid only for SS root hub */
if (hdev->descriptor.bDeviceProtocol != USB_HUB_PR_SS || hdev->parent) {
@@ -433,6 +432,7 @@ static void lvs_rh_disconnect(struct usb_interface *intf)
struct lvs_rh *lvs = usb_get_intfdata(intf);
sysfs_remove_group(&intf->dev.kobj, &lvs_attr_group);
+ usb_poison_urb(lvs->urb); /* used in scheduled work */
flush_work(&lvs->rh_work);
usb_free_urb(lvs->urb);
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 4b5777ec1501..3c6948af726a 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -816,7 +816,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
mutex_unlock(&sisusb->lock);
- return 1;
+ return true;
}
/* Interface routine */
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9f48419abc46..0f5ad896c7e3 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -313,16 +313,15 @@ static int lcd_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_lcd *dev = NULL;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t buffer_size;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
int i;
- int retval = -ENOMEM;
+ int retval;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- goto error;
+ return -ENOMEM;
+
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
init_usb_anchor(&dev->submitted);
@@ -338,33 +337,24 @@ static int lcd_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
- iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!dev->bulk_in_endpointAddr &&
- usb_endpoint_is_bulk_in(endpoint)) {
- /* we found a bulk in endpoint */
- buffer_size = usb_endpoint_maxp(endpoint);
- dev->bulk_in_size = buffer_size;
- dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
- dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!dev->bulk_in_buffer)
- goto error;
- }
-
- if (!dev->bulk_out_endpointAddr &&
- usb_endpoint_is_bulk_out(endpoint)) {
- /* we found a bulk out endpoint */
- dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
- }
- }
- if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
+ retval = usb_find_common_endpoints(interface->cur_altsetting,
+ &bulk_in, &bulk_out, NULL, NULL);
+ if (retval) {
dev_err(&interface->dev,
"Could not find both bulk-in and bulk-out endpoints\n");
goto error;
}
+ dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
+ dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
+ dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
+ if (!dev->bulk_in_buffer) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
+
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
@@ -390,8 +380,7 @@ static int lcd_probe(struct usb_interface *interface,
return 0;
error:
- if (dev)
- kref_put(&dev->kref, lcd_delete);
+ kref_put(&dev->kref, lcd_delete);
return retval;
}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 17c081068257..eee82ca55b7b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -124,6 +124,20 @@ static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
/*-------------------------------------------------------------------------*/
+static inline void endpoint_update(int edi,
+ struct usb_host_endpoint **in,
+ struct usb_host_endpoint **out,
+ struct usb_host_endpoint *e)
+{
+ if (edi) {
+ if (!*in)
+ *in = e;
+ } else {
+ if (!*out)
+ *out = e;
+ }
+}
+
static int
get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
{
@@ -151,46 +165,26 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
+ int edi;
e = alt->endpoint + ep;
+ edi = usb_endpoint_dir_in(&e->desc);
+
switch (usb_endpoint_type(&e->desc)) {
case USB_ENDPOINT_XFER_BULK:
- break;
+ endpoint_update(edi, &in, &out, e);
+ continue;
case USB_ENDPOINT_XFER_INT:
if (dev->info->intr)
- goto try_intr;
+ endpoint_update(edi, &int_in, &int_out, e);
+ continue;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
- goto try_iso;
+ endpoint_update(edi, &iso_in, &iso_out, e);
/* FALLTHROUGH */
default:
continue;
}
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!in)
- in = e;
- } else {
- if (!out)
- out = e;
- }
- continue;
-try_intr:
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!int_in)
- int_in = e;
- } else {
- if (!int_out)
- int_out = e;
- }
- continue;
-try_iso:
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!iso_in)
- iso_in = e;
- } else {
- if (!iso_out)
- iso_out = e;
- }
}
if ((in && out) || iso_in || iso_out || int_in || int_out)
goto found;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 07014cad6dbe..5947373700a1 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -689,7 +689,7 @@ static int uss720_probe(struct usb_interface *intf,
{
struct usb_device *usbdev = usb_get_dev(interface_to_usbdev(intf));
struct usb_host_interface *interface;
- struct usb_host_endpoint *endpoint;
+ struct usb_endpoint_descriptor *epd;
struct parport_uss720_private *priv;
struct parport *pp;
unsigned char reg;
@@ -745,9 +745,11 @@ static int uss720_probe(struct usb_interface *intf,
get_1284_register(pp, 0, &reg, GFP_KERNEL);
dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
- endpoint = &interface->endpoint[2];
- dev_dbg(&intf->dev, "epaddr %d interval %d\n",
- endpoint->desc.bEndpointAddress, endpoint->desc.bInterval);
+ i = usb_find_last_int_in_endpoint(interface, &epd);
+ if (!i) {
+ dev_dbg(&intf->dev, "epaddr %d interval %d\n",
+ epd->bEndpointAddress, epd->bInterval);
+ }
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 54e53ac4c08f..58abdf28620a 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -195,8 +195,8 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int retval = -ENOMEM;
- int i;
DEFINE_WAIT(wait);
+ int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -212,20 +212,14 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(endpoint)) {
- dev->int_in_endpointAddr = endpoint->bEndpointAddress;
- break;
- }
- }
- if (!dev->int_in_endpointAddr) {
- retval = -ENODEV;
+ res = usb_find_int_in_endpoint(iface_desc, &endpoint);
+ if (res) {
dev_err(&interface->dev, "Could not find endpoints\n");
+ retval = res;
goto error;
}
+ dev->int_in_endpointAddr = endpoint->bEndpointAddress;
/* allocate control URB */
dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 1a8987e7c5b0..11a0d3b84c5e 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -223,25 +223,25 @@ static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
return 0;
otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
- ret = extcon_register_notifier(edev, EXTCON_USB,
+ ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB,
&otg_sx->vbus_nb);
if (ret < 0)
dev_err(ssusb->dev, "failed to register notifier for USB\n");
otg_sx->id_nb.notifier_call = ssusb_id_notifier;
- ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+ ret = devm_extcon_register_notifier(ssusb->dev, edev, EXTCON_USB_HOST,
&otg_sx->id_nb);
if (ret < 0)
dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
- extcon_get_cable_state_(edev, EXTCON_USB),
- extcon_get_cable_state_(edev, EXTCON_USB_HOST));
+ extcon_get_state(edev, EXTCON_USB),
+ extcon_get_state(edev, EXTCON_USB_HOST));
/* default as host, switch to device mode if needed */
- if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == false)
+ if (extcon_get_state(edev, EXTCON_USB_HOST) == false)
ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
- if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
+ if (extcon_get_state(edev, EXTCON_USB) == true)
ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
return 0;
@@ -367,13 +367,6 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
cancel_delayed_work(&otg_sx->extcon_reg_dwork);
- if (otg_sx->edev) {
- extcon_unregister_notifier(otg_sx->edev,
- EXTCON_USB, &otg_sx->vbus_nb);
- extcon_unregister_notifier(otg_sx->edev,
- EXTCON_USB_HOST, &otg_sx->id_nb);
- }
-
if (otg_sx->manual_drd_enabled)
ssusb_debugfs_exit(ssusb);
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 72a2a5040848..5506a9c03c1f 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -160,8 +160,8 @@ config USB_TI_CPPI_DMA
Enable DMA transfers when TI CPPI DMA is available.
config USB_TI_CPPI41_DMA
- bool 'TI CPPI 4.1 (AM335x)'
- depends on ARCH_OMAP && DMADEVICES
+ bool 'TI CPPI 4.1'
+ depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX) && DMADEVICES
select TI_CPPI41
config USB_TUSB_OMAP_DMA
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index c4fabe952ca6..a13bd3625043 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -582,9 +582,10 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
maxpacket = length;
n_bds = 1;
} else {
- n_bds = length / maxpacket;
- if (!length || (length % maxpacket))
- n_bds++;
+ if (length)
+ n_bds = DIV_ROUND_UP(length, maxpacket);
+ else
+ n_bds = 1;
n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
length = min(n_bds * maxpacket, length);
}
@@ -790,9 +791,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
n_bds = 0xffff / maxpacket;
length = n_bds * maxpacket;
} else {
- n_bds = length / maxpacket;
- if (length % maxpacket)
- n_bds++;
+ n_bds = DIV_ROUND_UP(length, maxpacket);
}
if (n_bds == 1)
onepacket = 1;
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index d79c288ccbf2..df88123274ca 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -456,12 +457,41 @@ static inline u8 get_vbus_power(struct device *dev)
return current_uA / 1000 / 2;
}
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+static void da8xx_dma_controller_callback(struct dma_controller *c)
+{
+ struct musb *musb = c->musb;
+ void __iomem *reg_base = musb->ctrl_base;
+
+ musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
+}
+
+static struct dma_controller *
+da8xx_dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ struct dma_controller *controller;
+
+ controller = cppi41_dma_controller_create(musb, base);
+ if (IS_ERR_OR_NULL(controller))
+ return controller;
+
+ controller->dma_callback = da8xx_dma_controller_callback;
+
+ return controller;
+}
+#endif
+
static const struct musb_platform_ops da8xx_ops = {
- .quirks = MUSB_INDEXED_EP | MUSB_PRESERVE_SESSION,
+ .quirks = MUSB_INDEXED_EP | MUSB_PRESERVE_SESSION |
+ MUSB_DMA_CPPI41 | MUSB_DA8XX,
.init = da8xx_musb_init,
.exit = da8xx_musb_exit,
.fifo_mode = 2,
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+ .dma_init = da8xx_dma_controller_create,
+ .dma_exit = cppi41_dma_controller_destroy,
+#endif
.enable = da8xx_musb_enable,
.disable = da8xx_musb_disable,
@@ -483,6 +513,12 @@ static const struct musb_hdrc_config da8xx_config = {
.multipoint = 1,
};
+static struct of_dev_auxdata da8xx_auxdata_lookup[] = {
+ OF_DEV_AUXDATA("ti,da830-cppi41", 0x01e01000, "cppi41-dmaengine",
+ NULL),
+ {}
+};
+
static int da8xx_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
@@ -533,6 +569,11 @@ static int da8xx_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, glue);
+ ret = of_platform_populate(pdev->dev.of_node, NULL,
+ da8xx_auxdata_lookup, &pdev->dev);
+ if (ret)
+ return ret;
+
memset(musb_resources, 0x00, sizeof(*musb_resources) *
ARRAY_SIZE(musb_resources));
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0c3664ab705e..870da18f5077 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2332,7 +2332,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
/* attach to the IRQ */
- if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
+ if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
dev_err(dev, "request_irq %d failed!\n", nIrq);
status = -ENODEV;
goto fail3;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5b708be6d1d1..3e98d4268a64 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -172,6 +172,7 @@ struct musb_io;
*/
struct musb_platform_ops {
+#define MUSB_DA8XX BIT(8)
#define MUSB_PRESERVE_SESSION BIT(7)
#define MUSB_DMA_UX500 BIT(6)
#define MUSB_DMA_CPPI41 BIT(5)
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 355655f8a3fb..e7c8b1b8bf22 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -571,6 +571,10 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
}
}
+ /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
+ if (musb->io.quirks & MUSB_DA8XX)
+ mdelay(250);
+
tdbit = 1 << cppi41_channel->port_num;
if (is_tx)
tdbit <<= 16;
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 61cef7511a50..3006f569c068 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -74,13 +74,6 @@ config AM335X_PHY_USB
This driver provides PHY support for that phy which part for the
AM335x SoC.
-config SAMSUNG_USBPHY
- tristate
- help
- Enable this to support Samsung USB phy helper driver for Samsung SoCs.
- This driver provides common interface to interact, for Samsung USB 2.0 PHY
- driver and later for Samsung USB 3.0 PHY driver.
-
config TWL6030_USB
tristate "TWL6030 USB Transceiver Driver"
depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index b433e5d89be4..e7c9ca8cafb0 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_TAHVO_USB) += phy-tahvo.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
obj-$(CONFIG_OMAP_OTG) += phy-omap-otg.o
-obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o
obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 392ab422163c..cf8f40ae6e01 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -44,6 +44,13 @@
#include "phy-fsl-usb.h"
+#ifdef VERBOSE
+#define VDBG(fmt, args...) pr_debug("[%s] " fmt, \
+ __func__, ## args)
+#else
+#define VDBG(stuff...) do {} while (0)
+#endif
+
#define DRIVER_VERSION "Rev. 1.55"
#define DRIVER_AUTHOR "Jerry Huang/Li Yang"
#define DRIVER_DESC "Freescale USB OTG Transceiver Driver"
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 80a9845cd93f..569c2200ba42 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -29,12 +29,6 @@
* is any other control code, I will simply check for the first
* one.
*
- * The driver registers himself with the USB-serial core and the USB Core. I had
- * to implement a probe function against USB-serial, because other way, the
- * driver was attaching himself to both interfaces. I have tried with different
- * configurations of usb_serial_driver with out exit, only the probe function
- * could handle this correctly.
- *
* I have taken some info from a Greg Kroah-Hartman article:
* http://www.linuxjournal.com/article/6573
* And from Linux Device Driver Kit CD, which is a great work, the authors taken
@@ -93,30 +87,17 @@ static int aircable_prepare_write_buffer(struct usb_serial_port *port,
return count + HCI_HEADER_LENGTH;
}
-static int aircable_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
+static int aircable_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
- struct usb_host_interface *iface_desc = serial->interface->
- cur_altsetting;
- struct usb_endpoint_descriptor *endpoint;
- int num_bulk_out = 0;
- int i;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- endpoint = &iface_desc->endpoint[i].desc;
- if (usb_endpoint_is_bulk_out(endpoint)) {
- dev_dbg(&serial->dev->dev,
- "found bulk out on endpoint %d\n", i);
- ++num_bulk_out;
- }
- }
-
- if (num_bulk_out == 0) {
- dev_dbg(&serial->dev->dev, "Invalid interface, discarding\n");
+ /* Ignore the first interface, which has no bulk endpoints. */
+ if (epds->num_bulk_out == 0) {
+ dev_dbg(&serial->interface->dev,
+ "ignoring interface with no bulk-out endpoints\n");
return -ENODEV;
}
- return 0;
+ return 1;
}
static int aircable_process_packet(struct usb_serial_port *port,
@@ -164,9 +145,8 @@ static struct usb_serial_driver aircable_device = {
.name = "aircable",
},
.id_table = id_table,
- .num_ports = 1,
.bulk_out_size = HCI_COMPLETE_FRAME,
- .probe = aircable_probe,
+ .calc_num_ports = aircable_calc_num_ports,
.process_read_urb = aircable_process_read_urb,
.prepare_write_buffer = aircable_prepare_write_buffer,
.throttle = usb_serial_generic_throttle,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 2779e59c30f1..0adbd38b4eea 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -122,19 +122,6 @@ static inline int calc_divisor(int bps)
return (12000000 + 2*bps) / (4*bps);
}
-static int ark3116_attach(struct usb_serial *serial)
-{
- /* make sure we have our end-points */
- if (serial->num_bulk_in == 0 ||
- serial->num_bulk_out == 0 ||
- serial->num_interrupt_in == 0) {
- dev_err(&serial->interface->dev, "missing endpoint\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int ark3116_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
@@ -671,7 +658,9 @@ static struct usb_serial_driver ark3116_device = {
},
.id_table = id_table,
.num_ports = 1,
- .attach = ark3116_attach,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.port_probe = ark3116_port_probe,
.port_remove = ark3116_port_remove,
.set_termios = ark3116_set_termios,
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 80260b08398b..47fbd9f0c0c7 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -50,7 +50,6 @@
#define CYBERJACK_PRODUCT_ID 0x0100
/* Function prototypes */
-static int cyberjack_attach(struct usb_serial *serial);
static int cyberjack_port_probe(struct usb_serial_port *port);
static int cyberjack_port_remove(struct usb_serial_port *port);
static int cyberjack_open(struct tty_struct *tty,
@@ -78,7 +77,7 @@ static struct usb_serial_driver cyberjack_device = {
.description = "Reiner SCT Cyberjack USB card reader",
.id_table = id_table,
.num_ports = 1,
- .attach = cyberjack_attach,
+ .num_bulk_out = 1,
.port_probe = cyberjack_port_probe,
.port_remove = cyberjack_port_remove,
.open = cyberjack_open,
@@ -102,14 +101,6 @@ struct cyberjack_private {
short wrsent; /* Data already sent */
};
-static int cyberjack_attach(struct usb_serial *serial)
-{
- if (serial->num_bulk_out < serial->num_ports)
- return -ENODEV;
-
- return 0;
-}
-
static int cyberjack_port_probe(struct usb_serial_port *port)
{
struct cyberjack_private *priv;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 6537d3ca2797..2ce39af32cfa 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -273,6 +273,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
.description = "Digi 2 port USB adapter",
.id_table = id_table_2,
.num_ports = 3,
+ .num_bulk_in = 4,
+ .num_bulk_out = 4,
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
@@ -302,6 +304,8 @@ static struct usb_serial_driver digi_acceleport_4_device = {
.description = "Digi 4 port USB adapter",
.id_table = id_table_4,
.num_ports = 4,
+ .num_bulk_in = 5,
+ .num_bulk_out = 5,
.open = digi_open,
.close = digi_close,
.write = digi_write,
@@ -1251,27 +1255,8 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
- struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
- int i;
-
- /* check whether the device has the expected number of endpoints */
- if (serial->num_port_pointers < serial->type->num_ports + 1) {
- dev_err(dev, "OOB endpoints missing\n");
- return -ENODEV;
- }
-
- for (i = 0; i < serial->type->num_ports + 1 ; i++) {
- if (!serial->port[i]->read_urb) {
- dev_err(dev, "bulk-in endpoint missing\n");
- return -ENODEV;
- }
- if (!serial->port[i]->write_urb) {
- dev_err(dev, "bulk-out endpoint missing\n");
- return -ENODEV;
- }
- }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 22f23a429a95..3d616a2a9f96 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -611,20 +611,30 @@ static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
* The f81534_calc_num_ports() will run to "new style" with checking
* F81534_PORT_UNAVAILABLE section.
*/
-static int f81534_calc_num_ports(struct usb_serial *serial)
+static int f81534_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
+ struct device *dev = &serial->interface->dev;
+ int size_bulk_in = usb_endpoint_maxp(epds->bulk_in[0]);
+ int size_bulk_out = usb_endpoint_maxp(epds->bulk_out[0]);
u8 setting[F81534_CUSTOM_DATA_SIZE];
u8 setting_idx;
u8 num_port = 0;
int status;
size_t i;
+ if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
+ size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
+ dev_err(dev, "unsupported endpoint max packet size\n");
+ return -ENODEV;
+ }
+
/* Check had custom setting */
status = f81534_find_config_idx(serial, &setting_idx);
if (status) {
dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
__func__, status);
- return 0;
+ return status;
}
/*
@@ -640,7 +650,7 @@ static int f81534_calc_num_ports(struct usb_serial *serial)
dev_err(&serial->interface->dev,
"%s: get custom data failed: %d\n",
__func__, status);
- return 0;
+ return status;
}
dev_dbg(&serial->interface->dev,
@@ -656,7 +666,7 @@ static int f81534_calc_num_ports(struct usb_serial *serial)
dev_err(&serial->interface->dev,
"%s: read failed: %d\n", __func__,
status);
- return 0;
+ return status;
}
dev_dbg(&serial->interface->dev, "%s: read default config\n",
@@ -671,12 +681,24 @@ static int f81534_calc_num_ports(struct usb_serial *serial)
++num_port;
}
- if (num_port)
- return num_port;
+ if (!num_port) {
+ dev_warn(&serial->interface->dev,
+ "no config found, assuming 4 ports\n");
+ num_port = 4; /* Nothing found, oldest version IC */
+ }
+
+ /*
+ * Setup bulk-out endpoint multiplexing. All ports share the same
+ * bulk-out endpoint.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < F81534_NUM_PORT);
+
+ for (i = 1; i < num_port; ++i)
+ epds->bulk_out[i] = epds->bulk_out[0];
- dev_warn(&serial->interface->dev, "%s: Read Failed. default 4 ports\n",
- __func__);
- return 4; /* Nothing found, oldest version IC */
+ epds->num_bulk_out = num_port;
+
+ return num_port;
}
static void f81534_set_termios(struct tty_struct *tty,
@@ -1067,96 +1089,6 @@ static void f81534_write_usb_callback(struct urb *urb)
}
}
-static int f81534_setup_ports(struct usb_serial *serial)
-{
- struct usb_serial_port *port;
- u8 port0_out_address;
- int buffer_size;
- size_t i;
-
- /*
- * In our system architecture, we had 2 or 4 serial ports,
- * but only get 1 set of bulk in/out endpoints.
- *
- * The usb-serial subsystem will generate port 0 data,
- * but port 1/2/3 will not. It's will generate write URB and buffer
- * by following code and use the port0 read URB for read operation.
- */
- for (i = 1; i < serial->num_ports; ++i) {
- port0_out_address = serial->port[0]->bulk_out_endpointAddress;
- buffer_size = serial->port[0]->bulk_out_size;
- port = serial->port[i];
-
- if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
- return -ENOMEM;
-
- port->bulk_out_size = buffer_size;
- port->bulk_out_endpointAddress = port0_out_address;
-
- port->write_urbs[0] = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->write_urbs[0])
- return -ENOMEM;
-
- port->bulk_out_buffers[0] = kzalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_out_buffers[0])
- return -ENOMEM;
-
- usb_fill_bulk_urb(port->write_urbs[0], serial->dev,
- usb_sndbulkpipe(serial->dev,
- port0_out_address),
- port->bulk_out_buffers[0], buffer_size,
- serial->type->write_bulk_callback, port);
-
- port->write_urb = port->write_urbs[0];
- port->bulk_out_buffer = port->bulk_out_buffers[0];
- }
-
- return 0;
-}
-
-static int f81534_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
- struct usb_endpoint_descriptor *endpoint;
- struct usb_host_interface *iface_desc;
- struct device *dev;
- int num_bulk_in = 0;
- int num_bulk_out = 0;
- int size_bulk_in = 0;
- int size_bulk_out = 0;
- int i;
-
- dev = &serial->interface->dev;
- iface_desc = serial->interface->cur_altsetting;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(endpoint)) {
- ++num_bulk_in;
- size_bulk_in = usb_endpoint_maxp(endpoint);
- }
-
- if (usb_endpoint_is_bulk_out(endpoint)) {
- ++num_bulk_out;
- size_bulk_out = usb_endpoint_maxp(endpoint);
- }
- }
-
- if (num_bulk_in != 1 || num_bulk_out != 1) {
- dev_err(dev, "expected endpoints not found\n");
- return -ENODEV;
- }
-
- if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
- size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
- dev_err(dev, "unsupported endpoint max packet size\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int f81534_attach(struct usb_serial *serial)
{
struct f81534_serial_private *serial_priv;
@@ -1173,10 +1105,6 @@ static int f81534_attach(struct usb_serial *serial)
mutex_init(&serial_priv->urb_mutex);
- status = f81534_setup_ports(serial);
- if (status)
- return status;
-
/* Check had custom setting */
status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
if (status) {
@@ -1380,12 +1308,13 @@ static struct usb_serial_driver f81534_device = {
},
.description = DRIVER_DESC,
.id_table = f81534_id_table,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.open = f81534_open,
.close = f81534_close,
.write = f81534_write,
.tx_empty = f81534_tx_empty,
.calc_num_ports = f81534_calc_num_ports,
- .probe = f81534_probe,
.attach = f81534_attach,
.port_probe = f81534_port_probe,
.dtr_rts = f81534_dtr_rts,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index c540de15aad2..d38780fa8788 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0x00) },
+ { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1406,6 +1407,9 @@ static int write_latency_timer(struct usb_serial_port *port)
int rv;
int l = priv->latency;
+ if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ return -EINVAL;
+
if (priv->flags & ASYNC_LOW_LATENCY)
l = 1;
@@ -1422,7 +1426,7 @@ static int write_latency_timer(struct usb_serial_port *port)
return rv;
}
-static int read_latency_timer(struct usb_serial_port *port)
+static int _read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
@@ -1440,11 +1444,10 @@ static int read_latency_timer(struct usb_serial_port *port)
0, priv->interface,
buf, 1, WDR_TIMEOUT);
if (rv < 1) {
- dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
if (rv >= 0)
rv = -EIO;
} else {
- priv->latency = buf[0];
+ rv = buf[0];
}
kfree(buf);
@@ -1452,6 +1455,25 @@ static int read_latency_timer(struct usb_serial_port *port)
return rv;
}
+static int read_latency_timer(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int rv;
+
+ if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ return -EINVAL;
+
+ rv = _read_latency_timer(port);
+ if (rv < 0) {
+ dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
+ return rv;
+ }
+
+ priv->latency = rv;
+
+ return 0;
+}
+
static int get_serial_info(struct usb_serial_port *port,
struct serial_struct __user *retinfo)
{
@@ -1603,9 +1625,19 @@ static void ftdi_determine_type(struct usb_serial_port *port)
priv->baud_base = 12000000 / 16;
} else if (version < 0x400) {
/* Assume it's an FT8U232AM (or FT8U245AM) */
- /* (It might be a BM because of the iSerialNumber bug,
- * but it will still work as an AM device.) */
priv->chip_type = FT8U232AM;
+ /*
+ * It might be a BM type because of the iSerialNumber bug.
+ * If iSerialNumber==0 and the latency timer is readable,
+ * assume it is BM type.
+ */
+ if (udev->descriptor.iSerialNumber == 0 &&
+ _read_latency_timer(port) >= 0) {
+ dev_dbg(&port->dev,
+ "%s: has latency timer so not an AM type\n",
+ __func__);
+ priv->chip_type = FT232BM;
+ }
} else if (version < 0x600) {
/* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
@@ -1685,9 +1717,12 @@ static ssize_t latency_timer_store(struct device *dev,
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
- int v = simple_strtoul(valbuf, NULL, 10);
+ u8 v;
int rv;
+ if (kstrtou8(valbuf, 10, &v))
+ return -EINVAL;
+
priv->latency = v;
rv = write_latency_timer(port);
if (rv < 0)
@@ -1704,10 +1739,13 @@ static ssize_t store_event_char(struct device *dev,
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- int v = simple_strtoul(valbuf, NULL, 10);
+ unsigned int v;
int rv;
- dev_dbg(&port->dev, "%s: setting event char = %i\n", __func__, v);
+ if (kstrtouint(valbuf, 0, &v) || v >= 0x200)
+ return -EINVAL;
+
+ dev_dbg(&port->dev, "%s: setting event char = 0x%03x\n", __func__, v);
rv = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 48ee04c94a75..71fb9e59db71 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -873,6 +873,12 @@
#define FIC_VID 0x1457
#define FIC_NEO1973_DEBUG_PID 0x5118
+/*
+ * Actel / Microsemi
+ */
+#define ACTEL_VID 0x1514
+#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008
+
/* Olimex */
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 49ce2be90fa0..35cb8c0e584f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -37,13 +37,41 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
-struct usb_serial_driver usb_serial_generic_device = {
+static int usb_serial_generic_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ struct device *dev = &serial->interface->dev;
+
+ dev_info(dev, "The \"generic\" usb-serial driver is only for testing and one-off prototypes.\n");
+ dev_info(dev, "Tell linux-usb@vger.kernel.org to add your device to a proper driver.\n");
+
+ return 0;
+}
+
+static int usb_serial_generic_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ struct device *dev = &serial->interface->dev;
+ int num_ports;
+
+ num_ports = max(epds->num_bulk_in, epds->num_bulk_out);
+
+ if (num_ports == 0) {
+ dev_err(dev, "device has no bulk endpoints\n");
+ return -ENODEV;
+ }
+
+ return num_ports;
+}
+
+static struct usb_serial_driver usb_serial_generic_device = {
.driver = {
.owner = THIS_MODULE,
.name = "generic",
},
.id_table = generic_device_ids,
- .num_ports = 1,
+ .probe = usb_serial_generic_probe,
+ .calc_num_ports = usb_serial_generic_calc_num_ports,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
.resume = usb_serial_generic_resume,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index bb7673e80a57..bdf8bd814a9a 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1544,11 +1544,6 @@ static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- unsigned int cflag;
-
- cflag = tty->termios.c_cflag;
- dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__, tty->termios.c_cflag, tty->termios.c_iflag);
- dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__, old_termios->c_cflag, old_termios->c_iflag);
if (edge_port == NULL)
return;
@@ -2844,14 +2839,9 @@ static int edge_startup(struct usb_serial *serial)
bool interrupt_in_found;
bool bulk_in_found;
bool bulk_out_found;
- static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
- EDGE_COMPATIBILITY_MASK1,
- EDGE_COMPATIBILITY_MASK2 };
-
- if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
+ static const __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
+ EDGE_COMPATIBILITY_MASK1,
+ EDGE_COMPATIBILITY_MASK2 };
dev = serial->dev;
@@ -3120,6 +3110,9 @@ static struct usb_serial_driver edgeport_2port_device = {
.description = "Edgeport 2 port adapter",
.id_table = edgeport_2port_id_table,
.num_ports = 2,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
@@ -3152,6 +3145,9 @@ static struct usb_serial_driver edgeport_4port_device = {
.description = "Edgeport 4 port adapter",
.id_table = edgeport_4port_id_table,
.num_ports = 4,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
@@ -3184,6 +3180,9 @@ static struct usb_serial_driver edgeport_8port_device = {
.description = "Edgeport 8 port adapter",
.id_table = edgeport_8port_id_table,
.num_ports = 8,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
@@ -3216,6 +3215,9 @@ static struct usb_serial_driver epic_device = {
.description = "EPiC device",
.id_table = Epic_port_id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a76b95d32157..87798e625d6c 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1933,13 +1933,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
- if (!urb) {
- dev_err(&port->dev,
- "%s - no interrupt urb present, exiting\n",
- __func__);
- status = -EINVAL;
- goto release_es_lock;
- }
urb->context = edge_serial;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
@@ -1959,12 +1952,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
/* start up our bulk read urb */
urb = port->read_urb;
- if (!urb) {
- dev_err(&port->dev, "%s - no read urb present, exiting\n",
- __func__);
- status = -EINVAL;
- goto unlink_int_urb;
- }
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->context = edge_port;
status = usb_submit_urb(urb, GFP_KERNEL);
@@ -2385,14 +2372,6 @@ static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
- unsigned int cflag;
-
- cflag = tty->termios.c_cflag;
-
- dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__,
- tty->termios.c_cflag, tty->termios.c_iflag);
- dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
- old_termios->c_cflag, old_termios->c_iflag);
if (edge_port == NULL)
return;
@@ -2544,19 +2523,31 @@ static void edge_heartbeat_work(struct work_struct *work)
edge_heartbeat_schedule(serial);
}
-static int edge_startup(struct usb_serial *serial)
+static int edge_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
- struct edgeport_serial *edge_serial;
- int status;
- u16 product_id;
+ struct device *dev = &serial->interface->dev;
+ unsigned char num_ports = serial->type->num_ports;
/* Make sure we have the required endpoints when in download mode. */
if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
- if (serial->num_bulk_in < serial->num_ports ||
- serial->num_bulk_out < serial->num_ports)
+ if (epds->num_bulk_in < num_ports ||
+ epds->num_bulk_out < num_ports ||
+ epds->num_interrupt_in < 1) {
+ dev_err(dev, "required endpoints missing\n");
return -ENODEV;
+ }
}
+ return num_ports;
+}
+
+static int edge_startup(struct usb_serial *serial)
+{
+ struct edgeport_serial *edge_serial;
+ int status;
+ u16 product_id;
+
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (!edge_serial)
@@ -2736,11 +2727,13 @@ static struct usb_serial_driver edgeport_1port_device = {
.description = "Edgeport TI 1 port adapter",
.id_table = edgeport_1port_id_table,
.num_ports = 1,
+ .num_bulk_out = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
+ .calc_num_ports = edge_calc_num_ports,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
@@ -2773,11 +2766,13 @@ static struct usb_serial_driver edgeport_2port_device = {
.description = "Edgeport TI 2 port adapter",
.id_table = edgeport_2port_id_table,
.num_ports = 2,
+ .num_bulk_out = 1,
.open = edge_open,
.close = edge_close,
.throttle = edge_throttle,
.unthrottle = edge_unthrottle,
.attach = edge_startup,
+ .calc_num_ports = edge_calc_num_ports,
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_port_probe,
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index ec1b8f2c1183..cde0dcdce9c4 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -33,7 +33,8 @@ static int initial_wait;
/* Function prototypes for an ipaq */
static int ipaq_open(struct tty_struct *tty,
struct usb_serial_port *port);
-static int ipaq_calc_num_ports(struct usb_serial *serial);
+static int ipaq_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
static int ipaq_startup(struct usb_serial *serial);
static const struct usb_device_id ipaq_id_table[] = {
@@ -550,42 +551,38 @@ static int ipaq_open(struct tty_struct *tty,
return usb_serial_generic_open(tty, port);
}
-static int ipaq_calc_num_ports(struct usb_serial *serial)
+static int ipaq_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
/*
- * some devices have 3 endpoints, the 3rd of which
- * must be ignored as it would make the core
- * create a second port which oopses when used
+ * Some of the devices in ipaq_id_table[] are composite, and we
+ * shouldn't bind to all the interfaces. This test will rule out
+ * some obviously invalid possibilities.
*/
- int ipaq_num_ports = 1;
-
- dev_dbg(&serial->dev->dev, "%s - numberofendpoints: %d\n", __func__,
- (int)serial->interface->cur_altsetting->desc.bNumEndpoints);
+ if (epds->num_bulk_in == 0 || epds->num_bulk_out == 0)
+ return -ENODEV;
/*
- * a few devices have 4 endpoints, seemingly Yakuma devices,
- * and we need the second pair, so let them have 2 ports
- *
- * TODO: can we drop port 1 ?
+ * A few devices have four endpoints, seemingly Yakuma devices, and
+ * we need the second pair.
*/
- if (serial->interface->cur_altsetting->desc.bNumEndpoints > 3) {
- ipaq_num_ports = 2;
+ if (epds->num_bulk_in > 1 && epds->num_bulk_out > 1) {
+ epds->bulk_in[0] = epds->bulk_in[1];
+ epds->bulk_out[0] = epds->bulk_out[1];
}
- return ipaq_num_ports;
-}
+ /*
+ * Other devices have 3 endpoints, but we only use the first bulk in
+ * and out endpoints.
+ */
+ epds->num_bulk_in = 1;
+ epds->num_bulk_out = 1;
+ return 1;
+}
static int ipaq_startup(struct usb_serial *serial)
{
- /* Some of the devices in ipaq_id_table[] are composite, and we
- * shouldn't bind to all the interfaces. This test will rule out
- * some obviously invalid possibilities.
- */
- if (serial->num_bulk_in < serial->num_ports ||
- serial->num_bulk_out < serial->num_ports)
- return -ENODEV;
-
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
/*
* FIXME: HP iPaq rx3715, possibly others, have 1 config that
@@ -597,10 +594,6 @@ static int ipaq_startup(struct usb_serial *serial)
return -ENODEV;
}
- dev_dbg(&serial->dev->dev,
- "%s - iPAQ module configured for %d ports\n", __func__,
- serial->num_ports);
-
return usb_reset_configuration(serial->dev);
}
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 030390f37b0a..18fc992a245f 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -68,16 +68,6 @@ struct iuu_private {
u32 clk;
};
-static int iuu_attach(struct usb_serial *serial)
-{
- unsigned char num_ports = serial->num_ports;
-
- if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
- return -ENODEV;
-
- return 0;
-}
-
static int iuu_port_probe(struct usb_serial_port *port)
{
struct iuu_private *priv;
@@ -598,9 +588,8 @@ static void read_buf_callback(struct urb *urb)
}
dev_dbg(&port->dev, "%s - %i chars to write\n", __func__, urb->actual_length);
- if (data == NULL)
- dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
- if (urb->actual_length && data) {
+
+ if (urb->actual_length) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
@@ -665,10 +654,8 @@ static void iuu_uart_read_callback(struct urb *urb)
/* error stop all */
return;
}
- if (data == NULL)
- dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
- if (urb->actual_length == 1 && data != NULL)
+ if (urb->actual_length == 1)
len = (int) data[0];
if (urb->actual_length > 1) {
@@ -1183,6 +1170,8 @@ static struct usb_serial_driver iuu_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.bulk_in_size = 512,
.bulk_out_size = 512,
.open = iuu_open,
@@ -1193,7 +1182,6 @@ static struct usb_serial_driver iuu_device = {
.tiocmset = iuu_tiocmset,
.set_termios = iuu_set_termios,
.init_termios = iuu_init_termios,
- .attach = iuu_attach,
.port_probe = iuu_port_probe,
.port_remove = iuu_port_remove,
};
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index d2dab2a341b8..196908dd25a1 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -708,19 +708,6 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
#endif
-static int keyspan_pda_attach(struct usb_serial *serial)
-{
- unsigned char num_ports = serial->num_ports;
-
- if (serial->num_bulk_out < num_ports ||
- serial->num_interrupt_in < num_ports) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int keyspan_pda_port_probe(struct usb_serial_port *port)
{
@@ -784,6 +771,8 @@ static struct usb_serial_driver keyspan_pda_device = {
.description = "Keyspan PDA",
.id_table = id_table_std,
.num_ports = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.dtr_rts = keyspan_pda_dtr_rts,
.open = keyspan_pda_open,
.close = keyspan_pda_close,
@@ -798,7 +787,6 @@ static struct usb_serial_driver keyspan_pda_device = {
.break_ctl = keyspan_pda_break_ctl,
.tiocmget = keyspan_pda_tiocmget,
.tiocmset = keyspan_pda_tiocmset,
- .attach = keyspan_pda_attach,
.port_probe = keyspan_pda_port_probe,
.port_remove = keyspan_pda_port_remove,
};
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 813035f51fe7..3024b9b25360 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -51,7 +51,6 @@
/* Function prototypes */
-static int kobil_attach(struct usb_serial *serial);
static int kobil_port_probe(struct usb_serial_port *probe);
static int kobil_port_remove(struct usb_serial_port *probe);
static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
@@ -87,7 +86,7 @@ static struct usb_serial_driver kobil_device = {
.description = "KOBIL USB smart card terminal",
.id_table = id_table,
.num_ports = 1,
- .attach = kobil_attach,
+ .num_interrupt_out = 1,
.port_probe = kobil_port_probe,
.port_remove = kobil_port_remove,
.ioctl = kobil_ioctl,
@@ -115,16 +114,6 @@ struct kobil_private {
};
-static int kobil_attach(struct usb_serial *serial)
-{
- if (serial->num_interrupt_out < serial->num_ports) {
- dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int kobil_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index f075121c6e32..a453965f9e9a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -973,11 +973,24 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
tty_port_tty_wakeup(&mos7720_port->port->port);
}
-static int mos77xx_calc_num_ports(struct usb_serial *serial)
+static int mos77xx_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
- if (product == MOSCHIP_DEVICE_ID_7715)
+
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ /*
+ * The 7715 uses the first bulk in/out endpoint pair for the
+ * parallel port, and the second for the serial port. We swap
+ * the endpoint descriptors here so that the the first and
+ * only registered port structure uses the serial-port
+ * endpoints.
+ */
+ swap(epds->bulk_in[0], epds->bulk_in[1]);
+ swap(epds->bulk_out[0], epds->bulk_out[1]);
+
return 1;
+ }
return 2;
}
@@ -1395,7 +1408,7 @@ struct divisor_table_entry {
/* Define table of divisors for moschip 7720 hardware *
* These assume a 3.6864MHz crystal, the standard /16, and *
* MCR.7 = 0. */
-static struct divisor_table_entry divisor_table[] = {
+static const struct divisor_table_entry divisor_table[] = {
{ 50, 2304},
{ 110, 1047}, /* 2094.545455 => 230450 => .0217 % over */
{ 134, 857}, /* 1713.011152 => 230398.5 => .00065% under */
@@ -1675,7 +1688,6 @@ static void mos7720_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
int status;
- unsigned int cflag;
struct usb_serial *serial;
struct moschip_port *mos7720_port;
@@ -1691,16 +1703,6 @@ static void mos7720_set_termios(struct tty_struct *tty,
return;
}
- dev_dbg(&port->dev, "setting termios - ASPIRE\n");
-
- cflag = tty->termios.c_cflag;
-
- dev_dbg(&port->dev, "%s - cflag %08x iflag %08x\n", __func__,
- tty->termios.c_cflag, RELEVANT_IFLAG(tty->termios.c_iflag));
-
- dev_dbg(&port->dev, "%s - old cflag %08x old iflag %08x\n", __func__,
- old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
-
/* change the port settings to the new ones specified */
change_port_settings(tty, mos7720_port, old_termios);
@@ -1900,54 +1902,24 @@ static int mos7720_startup(struct usb_serial *serial)
u16 product;
int ret_val;
- if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
- dev_err(&serial->interface->dev, "missing bulk endpoints\n");
- return -ENODEV;
- }
-
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
- /*
- * The 7715 uses the first bulk in/out endpoint pair for the parallel
- * port, and the second for the serial port. Because the usbserial core
- * assumes both pairs are serial ports, we must engage in a bit of
- * subterfuge and swap the pointers for ports 0 and 1 in order to make
- * port 0 point to the serial port. However, both moschip devices use a
- * single interrupt-in endpoint for both ports (as mentioned a little
- * further down), and this endpoint was assigned to port 0. So after
- * the swap, we must copy the interrupt endpoint elements from port 1
- * (as newly assigned) to port 0, and null out port 1 pointers.
- */
- if (product == MOSCHIP_DEVICE_ID_7715) {
- struct usb_serial_port *tmp = serial->port[0];
- serial->port[0] = serial->port[1];
- serial->port[1] = tmp;
- serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
- serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
- serial->port[0]->interrupt_in_endpointAddress =
- tmp->interrupt_in_endpointAddress;
- serial->port[1]->interrupt_in_urb = NULL;
- serial->port[1]->interrupt_in_buffer = NULL;
-
- if (serial->port[0]->interrupt_in_urb) {
- struct urb *urb = serial->port[0]->interrupt_in_urb;
-
- urb->complete = mos7715_interrupt_callback;
- }
- }
-
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) {
+ struct urb *urb = serial->port[0]->interrupt_in_urb;
+
+ urb->complete = mos7715_interrupt_callback;
+
+#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
ret_val = mos7715_parport_init(serial);
if (ret_val < 0)
return ret_val;
- }
#endif
+ }
/* start the interrupt urb */
ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
if (ret_val) {
@@ -2039,6 +2011,9 @@ static struct usb_serial_driver moschip7720_2port_driver = {
},
.description = "Moschip 2 port adapter",
.id_table = id_table,
+ .num_bulk_in = 2,
+ .num_bulk_out = 2,
+ .num_interrupt_in = 1,
.calc_num_ports = mos77xx_calc_num_ports,
.open = mos7720_open,
.close = mos7720_close,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 3821c53fcee9..e8669aae14b3 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1868,7 +1868,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
int status;
- unsigned int cflag;
struct usb_serial *serial;
struct moschip_port *mos7840_port;
@@ -1890,15 +1889,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
return;
}
- dev_dbg(&port->dev, "%s", "setting termios - \n");
-
- cflag = tty->termios.c_cflag;
-
- dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__,
- tty->termios.c_cflag, RELEVANT_IFLAG(tty->termios.c_iflag));
- dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
- old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
-
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
@@ -2104,26 +2094,27 @@ out:
return 0;
}
-static int mos7840_calc_num_ports(struct usb_serial *serial)
+static int mos7840_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
int device_type = (unsigned long)usb_get_serial_data(serial);
- int mos7840_num_ports;
+ int num_ports;
- mos7840_num_ports = (device_type >> 4) & 0x000F;
+ num_ports = (device_type >> 4) & 0x000F;
- return mos7840_num_ports;
-}
+ /*
+ * num_ports is currently never zero as device_type is one of
+ * MOSCHIP_DEVICE_ID_78{1,2,4}0.
+ */
+ if (num_ports == 0)
+ return -ENODEV;
-static int mos7840_attach(struct usb_serial *serial)
-{
- if (serial->num_bulk_in < serial->num_ports ||
- serial->num_bulk_out < serial->num_ports ||
- serial->num_interrupt_in < 1) {
+ if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
dev_err(&serial->interface->dev, "missing endpoints\n");
return -ENODEV;
}
- return 0;
+ return num_ports;
}
static int mos7840_port_probe(struct usb_serial_port *port)
@@ -2384,7 +2375,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
},
.description = DRIVER_DESC,
.id_table = id_table,
- .num_ports = 4,
+ .num_interrupt_in = 1,
.open = mos7840_open,
.close = mos7840_close,
.write = mos7840_write,
@@ -2401,7 +2392,6 @@ static struct usb_serial_driver moschip7840_4port_device = {
.tiocmset = mos7840_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
- .attach = mos7840_attach,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index c88215a0fa3d..3aef091fe88b 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -946,20 +946,39 @@ out:
* Determine how many ports this device has dynamically. It will be
* called after the probe() callback is called, but before attach().
*/
-static int mxuport_calc_num_ports(struct usb_serial *serial)
+static int mxuport_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
unsigned long features = (unsigned long)usb_get_serial_data(serial);
+ int num_ports;
+ int i;
- if (features & MX_UPORT_2_PORT)
- return 2;
- if (features & MX_UPORT_4_PORT)
- return 4;
- if (features & MX_UPORT_8_PORT)
- return 8;
- if (features & MX_UPORT_16_PORT)
- return 16;
+ if (features & MX_UPORT_2_PORT) {
+ num_ports = 2;
+ } else if (features & MX_UPORT_4_PORT) {
+ num_ports = 4;
+ } else if (features & MX_UPORT_8_PORT) {
+ num_ports = 8;
+ } else if (features & MX_UPORT_16_PORT) {
+ num_ports = 16;
+ } else {
+ dev_warn(&serial->interface->dev,
+ "unknown device, assuming two ports\n");
+ num_ports = 2;
+ }
- return 0;
+ /*
+ * Setup bulk-out endpoint multiplexing. All ports share the same
+ * bulk-out endpoint.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < 16);
+
+ for (i = 1; i < num_ports; ++i)
+ epds->bulk_out[i] = epds->bulk_out[0];
+
+ epds->num_bulk_out = num_ports;
+
+ return num_ports;
}
/* Get the version of the firmware currently running. */
@@ -1142,102 +1161,11 @@ static int mxuport_port_probe(struct usb_serial_port *port)
port->port_number);
}
-static int mxuport_alloc_write_urb(struct usb_serial *serial,
- struct usb_serial_port *port,
- struct usb_serial_port *port0,
- int j)
-{
- struct usb_device *dev = interface_to_usbdev(serial->interface);
-
- set_bit(j, &port->write_urbs_free);
- port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->write_urbs[j])
- return -ENOMEM;
-
- port->bulk_out_buffers[j] = kmalloc(port0->bulk_out_size, GFP_KERNEL);
- if (!port->bulk_out_buffers[j])
- return -ENOMEM;
-
- usb_fill_bulk_urb(port->write_urbs[j], dev,
- usb_sndbulkpipe(dev, port->bulk_out_endpointAddress),
- port->bulk_out_buffers[j],
- port->bulk_out_size,
- serial->type->write_bulk_callback,
- port);
- return 0;
-}
-
-
-static int mxuport_alloc_write_urbs(struct usb_serial *serial,
- struct usb_serial_port *port,
- struct usb_serial_port *port0)
-{
- int j;
- int ret;
-
- for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
- ret = mxuport_alloc_write_urb(serial, port, port0, j);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-
static int mxuport_attach(struct usb_serial *serial)
{
struct usb_serial_port *port0 = serial->port[0];
struct usb_serial_port *port1 = serial->port[1];
- struct usb_serial_port *port;
int err;
- int i;
- int j;
-
- /*
- * Throw away all but the first allocated write URBs so we can
- * set them up again to fit the multiplexing scheme.
- */
- for (i = 1; i < serial->num_bulk_out; ++i) {
- port = serial->port[i];
- for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
- usb_free_urb(port->write_urbs[j]);
- kfree(port->bulk_out_buffers[j]);
- port->write_urbs[j] = NULL;
- port->bulk_out_buffers[j] = NULL;
- }
- port->write_urbs_free = 0;
- }
-
- /*
- * All write data is sent over the first bulk out endpoint,
- * with an added header to indicate the port. Allocate URBs
- * for each port to the first bulk out endpoint.
- */
- for (i = 1; i < serial->num_ports; ++i) {
- port = serial->port[i];
- port->bulk_out_size = port0->bulk_out_size;
- port->bulk_out_endpointAddress =
- port0->bulk_out_endpointAddress;
-
- err = mxuport_alloc_write_urbs(serial, port, port0);
- if (err)
- return err;
-
- port->write_urb = port->write_urbs[0];
- port->bulk_out_buffer = port->bulk_out_buffers[0];
-
- /*
- * Ensure each port has a fifo. The framework only
- * allocates a fifo to ports with a bulk out endpoint,
- * where as we need one for every port.
- */
- if (!kfifo_initialized(&port->write_fifo)) {
- err = kfifo_alloc(&port->write_fifo, PAGE_SIZE,
- GFP_KERNEL);
- if (err)
- return err;
- }
- }
/*
* All data from the ports is received on the first bulk in
@@ -1366,7 +1294,8 @@ static struct usb_serial_driver mxuport_device = {
},
.description = "MOXA UPort",
.id_table = mxuport_idtable,
- .num_ports = 0,
+ .num_bulk_in = 2,
+ .num_bulk_out = 1,
.probe = mxuport_probe,
.port_probe = mxuport_port_probe,
.attach = mxuport_attach,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index dd706953b466..efcd7feed6f4 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,6 +1,8 @@
/*
* USB ZyXEL omni.net LCD PLUS driver
*
+ * Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
@@ -32,12 +34,10 @@
/* function prototypes */
static void omninet_process_read_urb(struct urb *urb);
-static void omninet_write_bulk_callback(struct urb *urb);
-static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count);
-static int omninet_write_room(struct tty_struct *tty);
-static void omninet_disconnect(struct usb_serial *serial);
-static int omninet_attach(struct usb_serial *serial);
+static int omninet_prepare_write_buffer(struct usb_serial_port *port,
+ void *buf, size_t count);
+static int omninet_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
static int omninet_port_probe(struct usb_serial_port *port);
static int omninet_port_remove(struct usb_serial_port *port);
@@ -55,15 +55,12 @@ static struct usb_serial_driver zyxel_omninet_device = {
},
.description = "ZyXEL - omni.net lcd plus usb",
.id_table = id_table,
- .num_ports = 1,
- .attach = omninet_attach,
+ .num_bulk_out = 2,
+ .calc_num_ports = omninet_calc_num_ports,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
- .write = omninet_write,
- .write_room = omninet_write_room,
- .write_bulk_callback = omninet_write_bulk_callback,
.process_read_urb = omninet_process_read_urb,
- .disconnect = omninet_disconnect,
+ .prepare_write_buffer = omninet_prepare_write_buffer,
};
static struct usb_serial_driver * const serial_drivers[] = {
@@ -104,15 +101,14 @@ struct omninet_data {
__u8 od_outseq; /* Sequence number for bulk_out URBs */
};
-static int omninet_attach(struct usb_serial *serial)
+static int omninet_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
- /* The second bulk-out endpoint is used for writing. */
- if (serial->num_bulk_out < 2) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
+ /* We need only the second bulk-out for our single-port device. */
+ epds->bulk_out[0] = epds->bulk_out[1];
+ epds->num_bulk_out = 1;
- return 0;
+ return 1;
}
static int omninet_port_probe(struct usb_serial_port *port)
@@ -159,96 +155,24 @@ static void omninet_process_read_urb(struct urb *urb)
tty_flip_buffer_push(&port->port);
}
-static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
- const unsigned char *buf, int count)
+static int omninet_prepare_write_buffer(struct usb_serial_port *port,
+ void *buf, size_t count)
{
- struct usb_serial *serial = port->serial;
- struct usb_serial_port *wport = serial->port[1];
-
struct omninet_data *od = usb_get_serial_port_data(port);
- struct omninet_header *header = (struct omninet_header *)
- wport->write_urb->transfer_buffer;
-
- int result;
-
- if (count == 0) {
- dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
- return 0;
- }
-
- if (!test_and_clear_bit(0, &port->write_urbs_free)) {
- dev_dbg(&port->dev, "%s - already writing\n", __func__);
- return 0;
- }
-
- count = (count > OMNINET_PAYLOADSIZE) ? OMNINET_PAYLOADSIZE : count;
-
- memcpy(wport->write_urb->transfer_buffer + OMNINET_HEADERLEN,
- buf, count);
-
- usb_serial_debug_data(&port->dev, __func__, count,
- wport->write_urb->transfer_buffer);
-
- header->oh_seq = od->od_outseq++;
- header->oh_len = count;
- header->oh_xxx = 0x03;
- header->oh_pad = 0x00;
-
- /* send the data out the bulk port, always 64 bytes */
- wport->write_urb->transfer_buffer_length = OMNINET_BULKOUTSIZE;
-
- result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
- if (result) {
- set_bit(0, &wport->write_urbs_free);
- dev_err_console(port,
- "%s - failed submitting write urb, error %d\n",
- __func__, result);
- } else
- result = count;
-
- return result;
-}
+ struct omninet_header *header = buf;
+ count = min_t(size_t, count, OMNINET_PAYLOADSIZE);
-static int omninet_write_room(struct tty_struct *tty)
-{
- struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
- struct usb_serial_port *wport = serial->port[1];
-
- int room = 0; /* Default: no room */
-
- if (test_bit(0, &wport->write_urbs_free))
- room = wport->bulk_out_size - OMNINET_HEADERLEN;
-
- dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
-
- return room;
-}
-
-static void omninet_write_bulk_callback(struct urb *urb)
-{
-/* struct omninet_header *header = (struct omninet_header *)
- urb->transfer_buffer; */
- struct usb_serial_port *port = urb->context;
- int status = urb->status;
-
- set_bit(0, &port->write_urbs_free);
- if (status) {
- dev_dbg(&port->dev, "%s - nonzero write bulk status received: %d\n",
- __func__, status);
- return;
- }
+ count = kfifo_out_locked(&port->write_fifo, buf + OMNINET_HEADERLEN,
+ count, &port->lock);
- usb_serial_port_softint(port);
-}
-
-
-static void omninet_disconnect(struct usb_serial *serial)
-{
- struct usb_serial_port *wport = serial->port[1];
+ header->oh_seq = od->od_outseq++;
+ header->oh_len = count;
+ header->oh_xxx = 0x03;
+ header->oh_pad = 0x00;
- usb_kill_urb(wport->write_urb);
+ /* always 64 bytes */
+ return OMNINET_BULKOUTSIZE;
}
module_usb_serial_driver(serial_drivers, id_table);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 3937b9c3cc69..58657d64678b 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -367,16 +367,6 @@ static int opticon_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
-static int opticon_startup(struct usb_serial *serial)
-{
- if (!serial->num_bulk_in) {
- dev_err(&serial->dev->dev, "no bulk in endpoint\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int opticon_port_probe(struct usb_serial_port *port)
{
struct opticon_private *priv;
@@ -408,8 +398,8 @@ static struct usb_serial_driver opticon_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
.bulk_in_size = 256,
- .attach = opticon_startup,
.port_probe = opticon_port_probe,
.port_remove = opticon_port_remove,
.open = opticon_open,
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index b8bf52bf7a94..b11eead469ee 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -134,7 +134,6 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
static int oti6858_tiocmget(struct tty_struct *tty);
static int oti6858_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static int oti6858_attach(struct usb_serial *serial);
static int oti6858_port_probe(struct usb_serial_port *port);
static int oti6858_port_remove(struct usb_serial_port *port);
@@ -146,6 +145,9 @@ static struct usb_serial_driver oti6858_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 1,
.open = oti6858_open,
.close = oti6858_close,
.write = oti6858_write,
@@ -159,7 +161,6 @@ static struct usb_serial_driver oti6858_device = {
.write_bulk_callback = oti6858_write_bulk_callback,
.write_room = oti6858_write_room,
.chars_in_buffer = oti6858_chars_in_buffer,
- .attach = oti6858_attach,
.port_probe = oti6858_port_probe,
.port_remove = oti6858_port_remove,
};
@@ -326,20 +327,6 @@ static void send_data(struct work_struct *work)
usb_serial_port_softint(port);
}
-static int oti6858_attach(struct usb_serial *serial)
-{
- unsigned char num_ports = serial->num_ports;
-
- if (serial->num_bulk_in < num_ports ||
- serial->num_bulk_out < num_ports ||
- serial->num_interrupt_in < num_ports) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int oti6858_port_probe(struct usb_serial_port *port)
{
struct oti6858_private *priv;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ca69eb42071b..c9ebefd8f35f 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -33,9 +33,11 @@
#define PL2303_QUIRK_UART_STATE_IDX0 BIT(0)
#define PL2303_QUIRK_LEGACY BIT(1)
+#define PL2303_QUIRK_ENDPOINT_HACK BIT(2)
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
@@ -48,7 +50,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
- { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
@@ -68,7 +71,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
{ USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X75),
.driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
- { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_EF81),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_ID_S81) }, /* Benq/Siemens S81 */
{ USB_DEVICE(SYNTECH_VENDOR_ID, SYNTECH_PRODUCT_ID) },
{ USB_DEVICE(NOKIA_CA42_VENDOR_ID, NOKIA_CA42_PRODUCT_ID) },
@@ -78,7 +82,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
{ USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
{ USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
- { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
+ { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID),
+ .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
@@ -218,20 +223,68 @@ static int pl2303_probe(struct usb_serial *serial,
return 0;
}
+/*
+ * Use interrupt endpoint from first interface if available.
+ *
+ * This is needed due to the looney way its endpoints are set up.
+ */
+static int pl2303_endpoint_hack(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ struct usb_interface *interface = serial->interface;
+ struct usb_device *dev = serial->dev;
+ struct device *ddev = &interface->dev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ unsigned int i;
+
+ if (interface == dev->actconfig->interface[0])
+ return 0;
+
+ /* check out the endpoints of the other interface */
+ iface_desc = dev->actconfig->interface[0]->cur_altsetting;
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!usb_endpoint_is_int_in(endpoint))
+ continue;
+
+ dev_dbg(ddev, "found interrupt in on separate interface\n");
+ if (epds->num_interrupt_in < ARRAY_SIZE(epds->interrupt_in))
+ epds->interrupt_in[epds->num_interrupt_in++] = endpoint;
+ }
+
+ return 0;
+}
+
+static int pl2303_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ unsigned long quirks = (unsigned long)usb_get_serial_data(serial);
+ struct device *dev = &serial->interface->dev;
+ int ret;
+
+ if (quirks & PL2303_QUIRK_ENDPOINT_HACK) {
+ ret = pl2303_endpoint_hack(serial, epds);
+ if (ret)
+ return ret;
+ }
+
+ if (epds->num_interrupt_in < 1) {
+ dev_err(dev, "required interrupt-in endpoint missing\n");
+ return -ENODEV;
+ }
+
+ return 1;
+}
+
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
- unsigned char num_ports = serial->num_ports;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
- if (serial->num_bulk_in < num_ports ||
- serial->num_bulk_out < num_ports ||
- serial->num_interrupt_in < num_ports) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
return -ENOMEM;
@@ -938,7 +991,9 @@ static struct usb_serial_driver pl2303_device = {
.name = "pl2303",
},
.id_table = id_table,
- .num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
+ .num_interrupt_in = 0, /* see pl2303_calc_num_ports */
.bulk_in_size = 256,
.bulk_out_size = 256,
.open = pl2303_open,
@@ -954,6 +1009,7 @@ static struct usb_serial_driver pl2303_device = {
.process_read_urb = pl2303_process_read_urb,
.read_int_callback = pl2303_read_int_callback,
.probe = pl2303_probe,
+ .calc_num_ports = pl2303_calc_num_ports,
.attach = pl2303_startup,
.release = pl2303_release,
.port_probe = pl2303_port_probe,
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index fdbb904d153f..60e17d1444c3 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -246,7 +246,8 @@ static inline int update_mctrl(struct qt2_port_private *port_priv,
return status;
}
-static int qt2_calc_num_ports(struct usb_serial *serial)
+static int qt2_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
struct qt2_device_detail d;
int i;
@@ -600,7 +601,6 @@ static void qt2_process_read_urb(struct urb *urb)
escapeflag = true;
break;
case QT2_CONTROL_ESCAPE:
- tty_buffer_request_room(&port->port, 2);
tty_insert_flip_string(&port->port, ch, 2);
i += 2;
escapeflag = true;
@@ -615,8 +615,7 @@ static void qt2_process_read_urb(struct urb *urb)
continue;
}
- tty_buffer_request_room(&port->port, 1);
- tty_insert_flip_string(&port->port, ch, 1);
+ tty_insert_flip_char(&port->port, *ch, TTY_NORMAL);
}
tty_flip_buffer_push(&port->port);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 465e851b2815..4c4ac4705ac0 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -85,7 +85,8 @@ static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
USB_CTRL_SET_TIMEOUT); /* int timeout */
}
-static int sierra_calc_num_ports(struct usb_serial *serial)
+static int sierra_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
int num_ports = 0;
u8 ifnum, numendpoints;
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index ddfd787c461c..5167b6564c8b 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -154,19 +154,6 @@ static int spcp8x5_probe(struct usb_serial *serial,
return 0;
}
-static int spcp8x5_attach(struct usb_serial *serial)
-{
- unsigned char num_ports = serial->num_ports;
-
- if (serial->num_bulk_in < num_ports ||
- serial->num_bulk_out < num_ports) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int spcp8x5_port_probe(struct usb_serial_port *port)
{
const struct usb_device_id *id = usb_get_serial_data(port->serial);
@@ -488,6 +475,8 @@ static struct usb_serial_driver spcp8x5_device = {
},
.id_table = id_table,
.num_ports = 1,
+ .num_bulk_in = 1,
+ .num_bulk_out = 1,
.open = spcp8x5_open,
.dtr_rts = spcp8x5_dtr_rts,
.carrier_raised = spcp8x5_carrier_raised,
@@ -496,7 +485,6 @@ static struct usb_serial_driver spcp8x5_device = {
.tiocmget = spcp8x5_tiocmget,
.tiocmset = spcp8x5_tiocmset,
.probe = spcp8x5_probe,
- .attach = spcp8x5_attach,
.port_probe = spcp8x5_port_probe,
.port_remove = spcp8x5_port_remove,
};
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 37f3ad15ed06..0d1727232d0c 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -147,16 +147,6 @@ static void symbol_unthrottle(struct tty_struct *tty)
}
}
-static int symbol_startup(struct usb_serial *serial)
-{
- if (!serial->num_interrupt_in) {
- dev_err(&serial->dev->dev, "no interrupt-in endpoint\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
static int symbol_port_probe(struct usb_serial_port *port)
{
struct symbol_private *priv;
@@ -188,7 +178,7 @@ static struct usb_serial_driver symbol_device = {
},
.id_table = id_table,
.num_ports = 1,
- .attach = symbol_startup,
+ .num_interrupt_in = 1,
.port_probe = symbol_port_probe,
.port_remove = symbol_port_remove,
.open = symbol_open,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3107bf5d1c96..8fc3854e5e69 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -427,6 +427,7 @@ static struct usb_serial_driver ti_1port_device = {
.description = "TI USB 3410 1 port adapter",
.id_table = ti_id_table_3410,
.num_ports = 1,
+ .num_bulk_out = 1,
.attach = ti_startup,
.release = ti_release,
.port_probe = ti_port_probe,
@@ -459,6 +460,7 @@ static struct usb_serial_driver ti_2port_device = {
.description = "TI USB 5052 2 port adapter",
.id_table = ti_id_table_5052,
.num_ports = 2,
+ .num_bulk_out = 1,
.attach = ti_startup,
.release = ti_release,
.port_probe = ti_port_probe,
@@ -927,7 +929,6 @@ static void ti_set_termios(struct tty_struct *tty,
{
struct ti_port *tport = usb_get_serial_port_data(port);
struct ti_uart_config *config;
- tcflag_t cflag, iflag;
int baud;
int status;
int port_number = port->port_number;
@@ -935,13 +936,6 @@ static void ti_set_termios(struct tty_struct *tty,
u16 wbaudrate;
u16 wflags = 0;
- cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
-
- dev_dbg(&port->dev, "%s - cflag %08x, iflag %08x\n", __func__, cflag, iflag);
- dev_dbg(&port->dev, "%s - old clfag %08x, old iflag %08x\n", __func__,
- old_termios->c_cflag, old_termios->c_iflag);
-
config = kmalloc(sizeof(*config), GFP_KERNEL);
if (!config)
return;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 4a037b4a79cf..c7ca95f64edc 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -38,7 +38,6 @@
#include <linux/usb/serial.h>
#include <linux/kfifo.h>
#include <linux/idr.h>
-#include "pl2303.h"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@linuxfoundation.org>"
#define DRIVER_DESC "USB Serial Driver core"
@@ -710,6 +709,39 @@ static const struct tty_port_operations serial_port_ops = {
.shutdown = serial_port_shutdown,
};
+static void find_endpoints(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ struct device *dev = &serial->interface->dev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *epd;
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_in) < USB_MAXENDPOINTS / 2);
+ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < USB_MAXENDPOINTS / 2);
+ BUILD_BUG_ON(ARRAY_SIZE(epds->interrupt_in) < USB_MAXENDPOINTS / 2);
+ BUILD_BUG_ON(ARRAY_SIZE(epds->interrupt_out) < USB_MAXENDPOINTS / 2);
+
+ iface_desc = serial->interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ epd = &iface_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(epd)) {
+ dev_dbg(dev, "found bulk in on endpoint %u\n", i);
+ epds->bulk_in[epds->num_bulk_in++] = epd;
+ } else if (usb_endpoint_is_bulk_out(epd)) {
+ dev_dbg(dev, "found bulk out on endpoint %u\n", i);
+ epds->bulk_out[epds->num_bulk_out++] = epd;
+ } else if (usb_endpoint_is_int_in(epd)) {
+ dev_dbg(dev, "found interrupt in on endpoint %u\n", i);
+ epds->interrupt_in[epds->num_interrupt_in++] = epd;
+ } else if (usb_endpoint_is_int_out(epd)) {
+ dev_dbg(dev, "found interrupt out on endpoint %u\n", i);
+ epds->interrupt_out[epds->num_interrupt_out++] = epd;
+ }
+ }
+}
+
static int usb_serial_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -717,23 +749,15 @@ static int usb_serial_probe(struct usb_interface *interface,
struct usb_device *dev = interface_to_usbdev(interface);
struct usb_serial *serial = NULL;
struct usb_serial_port *port;
- struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- struct usb_endpoint_descriptor *interrupt_in_endpoint[MAX_NUM_PORTS];
- struct usb_endpoint_descriptor *interrupt_out_endpoint[MAX_NUM_PORTS];
- struct usb_endpoint_descriptor *bulk_in_endpoint[MAX_NUM_PORTS];
- struct usb_endpoint_descriptor *bulk_out_endpoint[MAX_NUM_PORTS];
+ struct usb_serial_endpoints *epds;
struct usb_serial_driver *type = NULL;
int retval;
int buffer_size;
int i;
int j;
- int num_interrupt_in = 0;
- int num_interrupt_out = 0;
- int num_bulk_in = 0;
- int num_bulk_out = 0;
int num_ports = 0;
- int max_endpoints;
+ unsigned char max_endpoints;
mutex_lock(&table_lock);
type = search_serial_device(interface);
@@ -752,8 +776,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial = create_serial(dev, interface, type);
if (!serial) {
- module_put(type->driver.owner);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto err_put_module;
}
/* if this device type has a probe function, call it */
@@ -765,129 +789,48 @@ static int usb_serial_probe(struct usb_interface *interface,
if (retval) {
dev_dbg(ddev, "sub driver rejected device\n");
- usb_serial_put(serial);
- module_put(type->driver.owner);
- return retval;
+ goto err_put_serial;
}
}
/* descriptor matches, let's find the endpoints needed */
- /* check out the endpoints */
- iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_bulk_in(endpoint)) {
- /* we found a bulk in endpoint */
- dev_dbg(ddev, "found bulk in on endpoint %d\n", i);
- if (num_bulk_in < MAX_NUM_PORTS) {
- bulk_in_endpoint[num_bulk_in] = endpoint;
- ++num_bulk_in;
- }
- }
-
- if (usb_endpoint_is_bulk_out(endpoint)) {
- /* we found a bulk out endpoint */
- dev_dbg(ddev, "found bulk out on endpoint %d\n", i);
- if (num_bulk_out < MAX_NUM_PORTS) {
- bulk_out_endpoint[num_bulk_out] = endpoint;
- ++num_bulk_out;
- }
- }
-
- if (usb_endpoint_is_int_in(endpoint)) {
- /* we found a interrupt in endpoint */
- dev_dbg(ddev, "found interrupt in on endpoint %d\n", i);
- if (num_interrupt_in < MAX_NUM_PORTS) {
- interrupt_in_endpoint[num_interrupt_in] =
- endpoint;
- ++num_interrupt_in;
- }
- }
-
- if (usb_endpoint_is_int_out(endpoint)) {
- /* we found an interrupt out endpoint */
- dev_dbg(ddev, "found interrupt out on endpoint %d\n", i);
- if (num_interrupt_out < MAX_NUM_PORTS) {
- interrupt_out_endpoint[num_interrupt_out] =
- endpoint;
- ++num_interrupt_out;
- }
- }
+ epds = kzalloc(sizeof(*epds), GFP_KERNEL);
+ if (!epds) {
+ retval = -ENOMEM;
+ goto err_put_serial;
}
-#if IS_ENABLED(CONFIG_USB_SERIAL_PL2303)
- /* BEGIN HORRIBLE HACK FOR PL2303 */
- /* this is needed due to the looney way its endpoints are set up */
- if (((le16_to_cpu(dev->descriptor.idVendor) == PL2303_VENDOR_ID) &&
- (le16_to_cpu(dev->descriptor.idProduct) == PL2303_PRODUCT_ID)) ||
- ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) &&
- (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) ||
- ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) &&
- (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) ||
- ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) &&
- (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) {
- if (interface != dev->actconfig->interface[0]) {
- /* check out the endpoints of the other interface*/
- iface_desc = dev->actconfig->interface[0]->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
- if (usb_endpoint_is_int_in(endpoint)) {
- /* we found a interrupt in endpoint */
- dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n");
- if (num_interrupt_in < MAX_NUM_PORTS) {
- interrupt_in_endpoint[num_interrupt_in] = endpoint;
- ++num_interrupt_in;
- }
- }
- }
- }
+ find_endpoints(serial, epds);
- /* Now make sure the PL-2303 is configured correctly.
- * If not, give up now and hope this hack will work
- * properly during a later invocation of usb_serial_probe
- */
- if (num_bulk_in == 0 || num_bulk_out == 0) {
- dev_info(ddev, "PL-2303 hack: descriptors matched but endpoints did not\n");
- usb_serial_put(serial);
- module_put(type->driver.owner);
- return -ENODEV;
- }
- }
- /* END HORRIBLE HACK FOR PL2303 */
-#endif
-
-#ifdef CONFIG_USB_SERIAL_GENERIC
- if (type == &usb_serial_generic_device) {
- num_ports = num_bulk_out;
- if (num_ports == 0) {
- dev_err(ddev, "Generic device with no bulk out, not allowed.\n");
- usb_serial_put(serial);
- module_put(type->driver.owner);
- return -EIO;
- }
- dev_info(ddev, "The \"generic\" usb-serial driver is only for testing and one-off prototypes.\n");
- dev_info(ddev, "Tell linux-usb@vger.kernel.org to add your device to a proper driver.\n");
+ if (epds->num_bulk_in < type->num_bulk_in ||
+ epds->num_bulk_out < type->num_bulk_out ||
+ epds->num_interrupt_in < type->num_interrupt_in ||
+ epds->num_interrupt_out < type->num_interrupt_out) {
+ dev_err(ddev, "required endpoints missing\n");
+ retval = -ENODEV;
+ goto err_free_epds;
}
-#endif
- if (!num_ports) {
- /* if this device type has a calc_num_ports function, call it */
- if (type->calc_num_ports)
- num_ports = type->calc_num_ports(serial);
- if (!num_ports)
- num_ports = type->num_ports;
+
+ if (type->calc_num_ports) {
+ retval = type->calc_num_ports(serial, epds);
+ if (retval < 0)
+ goto err_free_epds;
+ num_ports = retval;
}
+ if (!num_ports)
+ num_ports = type->num_ports;
+
if (num_ports > MAX_NUM_PORTS) {
dev_warn(ddev, "too many ports requested: %d\n", num_ports);
num_ports = MAX_NUM_PORTS;
}
- serial->num_ports = num_ports;
- serial->num_bulk_in = num_bulk_in;
- serial->num_bulk_out = num_bulk_out;
- serial->num_interrupt_in = num_interrupt_in;
- serial->num_interrupt_out = num_interrupt_out;
+ serial->num_ports = (unsigned char)num_ports;
+ serial->num_bulk_in = epds->num_bulk_in;
+ serial->num_bulk_out = epds->num_bulk_out;
+ serial->num_interrupt_in = epds->num_interrupt_in;
+ serial->num_interrupt_out = epds->num_interrupt_out;
/* found all that we need */
dev_info(ddev, "%s converter detected\n", type->description);
@@ -895,10 +838,10 @@ static int usb_serial_probe(struct usb_interface *interface,
/* create our ports, we need as many as the max endpoints */
/* we don't use num_ports here because some devices have more
endpoint pairs than ports */
- max_endpoints = max(num_bulk_in, num_bulk_out);
- max_endpoints = max(max_endpoints, num_interrupt_in);
- max_endpoints = max(max_endpoints, num_interrupt_out);
- max_endpoints = max(max_endpoints, (int)serial->num_ports);
+ max_endpoints = max(epds->num_bulk_in, epds->num_bulk_out);
+ max_endpoints = max(max_endpoints, epds->num_interrupt_in);
+ max_endpoints = max(max_endpoints, epds->num_interrupt_out);
+ max_endpoints = max(max_endpoints, serial->num_ports);
serial->num_port_pointers = max_endpoints;
dev_dbg(ddev, "setting up %d port structure(s)\n", max_endpoints);
@@ -923,8 +866,8 @@ static int usb_serial_probe(struct usb_interface *interface,
}
/* set up the endpoint information */
- for (i = 0; i < num_bulk_in; ++i) {
- endpoint = bulk_in_endpoint[i];
+ for (i = 0; i < epds->num_bulk_in; ++i) {
+ endpoint = epds->bulk_in[i];
port = serial->port[i];
buffer_size = max_t(int, serial->type->bulk_in_size,
usb_endpoint_maxp(endpoint));
@@ -952,8 +895,8 @@ static int usb_serial_probe(struct usb_interface *interface,
port->bulk_in_buffer = port->bulk_in_buffers[0];
}
- for (i = 0; i < num_bulk_out; ++i) {
- endpoint = bulk_out_endpoint[i];
+ for (i = 0; i < epds->num_bulk_out; ++i) {
+ endpoint = epds->bulk_out[i];
port = serial->port[i];
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
@@ -985,8 +928,8 @@ static int usb_serial_probe(struct usb_interface *interface,
}
if (serial->type->read_int_callback) {
- for (i = 0; i < num_interrupt_in; ++i) {
- endpoint = interrupt_in_endpoint[i];
+ for (i = 0; i < epds->num_interrupt_in; ++i) {
+ endpoint = epds->interrupt_in[i];
port = serial->port[i];
port->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!port->interrupt_in_urb)
@@ -1005,13 +948,13 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->type->read_int_callback, port,
endpoint->bInterval);
}
- } else if (num_interrupt_in) {
+ } else if (epds->num_interrupt_in) {
dev_dbg(ddev, "The device claims to support interrupt in transfers, but read_int_callback is not defined\n");
}
if (serial->type->write_int_callback) {
- for (i = 0; i < num_interrupt_out; ++i) {
- endpoint = interrupt_out_endpoint[i];
+ for (i = 0; i < epds->num_interrupt_out; ++i) {
+ endpoint = epds->interrupt_out[i];
port = serial->port[i];
port->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!port->interrupt_out_urb)
@@ -1031,7 +974,7 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->type->write_int_callback, port,
endpoint->bInterval);
}
- } else if (num_interrupt_out) {
+ } else if (epds->num_interrupt_out) {
dev_dbg(ddev, "The device claims to support interrupt out transfers, but write_int_callback is not defined\n");
}
@@ -1053,12 +996,6 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->attached = 1;
}
- /* Avoid race with tty_open and serial_install by setting the
- * disconnected flag and not clearing it until all ports have been
- * registered.
- */
- serial->disconnected = 1;
-
if (allocate_minors(serial, num_ports)) {
dev_err(ddev, "No more free serial minor numbers\n");
goto probe_error;
@@ -1076,18 +1013,23 @@ static int usb_serial_probe(struct usb_interface *interface,
dev_err(ddev, "Error registering port device, continuing\n");
}
- serial->disconnected = 0;
-
if (num_ports > 0)
usb_serial_console_init(serial->port[0]->minor);
exit:
+ kfree(epds);
module_put(type->driver.owner);
return 0;
probe_error:
+ retval = -EIO;
+err_free_epds:
+ kfree(epds);
+err_put_serial:
usb_serial_put(serial);
+err_put_module:
module_put(type->driver.owner);
- return -EIO;
+
+ return retval;
}
static void usb_serial_disconnect(struct usb_interface *interface)
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 92f7e5c21162..12f4c5a91e62 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -17,7 +17,7 @@
#define USB_DEBUG_MAX_PACKET_SIZE 8
#define USB_DEBUG_BRK_SIZE 8
-static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
+static const char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
0x00,
0xff,
0x01,
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be89fcf..9f3317a940ef 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -40,11 +40,12 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port);
static void visor_close(struct usb_serial_port *port);
static int visor_probe(struct usb_serial *serial,
const struct usb_device_id *id);
-static int visor_calc_num_ports(struct usb_serial *serial);
+static int visor_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
+static int clie_5_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
static void visor_read_int_callback(struct urb *urb);
static int clie_3_5_startup(struct usb_serial *serial);
-static int treo_attach(struct usb_serial *serial);
-static int clie_5_attach(struct usb_serial *serial);
static int palm_os_3_probe(struct usb_serial *serial,
const struct usb_device_id *id);
static int palm_os_4_probe(struct usb_serial *serial,
@@ -174,7 +175,6 @@ static struct usb_serial_driver handspring_device = {
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
- .attach = treo_attach,
.probe = visor_probe,
.calc_num_ports = visor_calc_num_ports,
.read_int_callback = visor_read_int_callback,
@@ -189,14 +189,14 @@ static struct usb_serial_driver clie_5_device = {
.description = "Sony Clie 5.0",
.id_table = clie_id_5_table,
.num_ports = 2,
+ .num_bulk_out = 2,
.bulk_out_size = 256,
.open = visor_open,
.close = visor_close,
.throttle = usb_serial_generic_throttle,
.unthrottle = usb_serial_generic_unthrottle,
- .attach = clie_5_attach,
.probe = visor_probe,
- .calc_num_ports = visor_calc_num_ports,
+ .calc_num_ports = clie_5_calc_num_ports,
.read_int_callback = visor_read_int_callback,
};
@@ -466,16 +466,60 @@ static int visor_probe(struct usb_serial *serial,
return retval;
}
-static int visor_calc_num_ports(struct usb_serial *serial)
+static int visor_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
{
+ unsigned int vid = le16_to_cpu(serial->dev->descriptor.idVendor);
int num_ports = (int)(long)(usb_get_serial_data(serial));
if (num_ports)
usb_set_serial_data(serial, NULL);
+ /*
+ * Only swap the bulk endpoints for the Handspring devices with
+ * interrupt in endpoints, which for now are the Treo devices.
+ */
+ if (!(vid == HANDSPRING_VENDOR_ID || vid == KYOCERA_VENDOR_ID) ||
+ epds->num_interrupt_in == 0)
+ goto out;
+
+ if (epds->num_bulk_in < 2 || epds->num_interrupt_in < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
+ /*
+ * It appears that Treos and Kyoceras want to use the
+ * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
+ * so let's swap the 1st and 2nd bulk in and interrupt endpoints.
+ * Note that swapping the bulk out endpoints would break lots of
+ * apps that want to communicate on the second port.
+ */
+ swap(epds->bulk_in[0], epds->bulk_in[1]);
+ swap(epds->interrupt_in[0], epds->interrupt_in[1]);
+out:
return num_ports;
}
+static int clie_5_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ /*
+ * TH55 registers 2 ports.
+ * Communication in from the UX50/TH55 uses the first bulk-in
+ * endpoint, while communication out to the UX50/TH55 uses the second
+ * bulk-out endpoint.
+ */
+
+ /*
+ * FIXME: Should we swap the descriptors instead of using the same
+ * bulk-out endpoint for both ports?
+ */
+ epds->bulk_out[0] = epds->bulk_out[1];
+
+ return serial->type->num_ports;
+}
+
static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
@@ -531,94 +575,6 @@ out:
return result;
}
-static int treo_attach(struct usb_serial *serial)
-{
- struct usb_serial_port *swap_port;
-
- /* Only do this endpoint hack for the Handspring devices with
- * interrupt in endpoints, which for now are the Treo devices. */
- if (!((le16_to_cpu(serial->dev->descriptor.idVendor)
- == HANDSPRING_VENDOR_ID) ||
- (le16_to_cpu(serial->dev->descriptor.idVendor)
- == KYOCERA_VENDOR_ID)) ||
- (serial->num_interrupt_in == 0))
- return 0;
-
- if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
- dev_err(&serial->interface->dev, "missing endpoints\n");
- return -ENODEV;
- }
-
- /*
- * It appears that Treos and Kyoceras want to use the
- * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
- * so let's swap the 1st and 2nd bulk in and interrupt endpoints.
- * Note that swapping the bulk out endpoints would break lots of
- * apps that want to communicate on the second port.
- */
-#define COPY_PORT(dest, src) \
- do { \
- int i; \
- \
- for (i = 0; i < ARRAY_SIZE(src->read_urbs); ++i) { \
- dest->read_urbs[i] = src->read_urbs[i]; \
- dest->read_urbs[i]->context = dest; \
- dest->bulk_in_buffers[i] = src->bulk_in_buffers[i]; \
- } \
- dest->read_urb = src->read_urb; \
- dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
- dest->bulk_in_buffer = src->bulk_in_buffer; \
- dest->bulk_in_size = src->bulk_in_size; \
- dest->interrupt_in_urb = src->interrupt_in_urb; \
- dest->interrupt_in_urb->context = dest; \
- dest->interrupt_in_endpointAddress = \
- src->interrupt_in_endpointAddress;\
- dest->interrupt_in_buffer = src->interrupt_in_buffer; \
- } while (0);
-
- swap_port = kmalloc(sizeof(*swap_port), GFP_KERNEL);
- if (!swap_port)
- return -ENOMEM;
- COPY_PORT(swap_port, serial->port[0]);
- COPY_PORT(serial->port[0], serial->port[1]);
- COPY_PORT(serial->port[1], swap_port);
- kfree(swap_port);
-
- return 0;
-}
-
-static int clie_5_attach(struct usb_serial *serial)
-{
- struct usb_serial_port *port;
- unsigned int pipe;
- int j;
-
- /* TH55 registers 2 ports.
- Communication in from the UX50/TH55 uses bulk_in_endpointAddress
- from port 0. Communication out to the UX50/TH55 uses
- bulk_out_endpointAddress from port 1
-
- Lets do a quick and dirty mapping
- */
-
- /* some sanity check */
- if (serial->num_bulk_out < 2) {
- dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
- return -ENODEV;
- }
-
- /* port 0 now uses the modified endpoint Address */
- port = serial->port[0];
- port->bulk_out_endpointAddress =
- serial->port[1]->bulk_out_endpointAddress;
-
- pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
- for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
- port->write_urbs[j]->pipe = pipe;
-
- return 0;
-}
-
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5ab65eb1dacc..55cebc1e6fec 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,8 +80,6 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
static int whiteheat_firmware_attach(struct usb_serial *serial);
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
-static int whiteheat_probe(struct usb_serial *serial,
- const struct usb_device_id *id);
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_port_probe(struct usb_serial_port *port);
@@ -118,7 +116,8 @@ static struct usb_serial_driver whiteheat_device = {
.description = "Connect Tech - WhiteHEAT",
.id_table = id_table_std,
.num_ports = 4,
- .probe = whiteheat_probe,
+ .num_bulk_in = 5,
+ .num_bulk_out = 5,
.attach = whiteheat_attach,
.release = whiteheat_release,
.port_probe = whiteheat_port_probe,
@@ -221,33 +220,6 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
* Connect Tech's White Heat serial driver functions
*****************************************************************************/
-static int whiteheat_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t num_bulk_in = 0;
- size_t num_bulk_out = 0;
- size_t min_num_bulk;
- unsigned int i;
-
- iface_desc = serial->interface->cur_altsetting;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- endpoint = &iface_desc->endpoint[i].desc;
- if (usb_endpoint_is_bulk_in(endpoint))
- ++num_bulk_in;
- if (usb_endpoint_is_bulk_out(endpoint))
- ++num_bulk_out;
- }
-
- min_num_bulk = COMMAND_PORT + 1;
- if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
- return -ENODEV;
-
- return 0;
-}
-
static int whiteheat_attach(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index f9d407f0b508..b05ba4929f00 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -105,7 +105,7 @@ static struct us_unusual_dev karma_unusual_dev_list[] = {
*/
static int rio_karma_send_command(char cmd, struct us_data *us)
{
- int result, partial;
+ int result;
unsigned long timeout;
static unsigned char seq = 1;
struct karma_data *data = (struct karma_data *) us->extra;
@@ -119,12 +119,12 @@ static int rio_karma_send_command(char cmd, struct us_data *us)
timeout = jiffies + msecs_to_jiffies(6000);
for (;;) {
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
- us->iobuf, RIO_SEND_LEN, &partial);
+ us->iobuf, RIO_SEND_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
goto err;
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
- data->recv, RIO_RECV_LEN, &partial);
+ data->recv, RIO_RECV_LEN, NULL);
if (result != USB_STOR_XFER_GOOD)
goto err;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 9129f6cb8230..5a70c33ef0e0 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -42,7 +42,7 @@
* - a patch that adds the entry for your device, including your
* email address right above the entry (plus maybe a brief
* explanation of the reason for the entry),
- * - a copy of /proc/bus/usb/devices with your device plugged in
+ * - a copy of /sys/kernel/debug/usb/devices with your device plugged in
* running with this patch.
* Send your submission to either Phil Dibowitz <phil@ipom.com> or
* Alan Stern <stern@rowland.harvard.edu>, and don't forget to CC: the
@@ -176,7 +176,7 @@ UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100,
/*
* Reported by Andrew Nayenko <relan@bk.ru>
- * Updated for new firmware by Phillip Potter <phillipinda@hotmail.com>
+ * Updated for new firmware by Phillip Potter <phil@philpotter.co.uk>
*/
UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0610,
"Nokia",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 615bea08ec0a..06615934fed1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -737,13 +737,11 @@ static void get_protocol(struct us_data *us)
/* Get the pipe settings */
static int get_pipes(struct us_data *us)
{
- struct usb_host_interface *altsetting =
- us->pusb_intf->cur_altsetting;
- int i;
- struct usb_endpoint_descriptor *ep;
- struct usb_endpoint_descriptor *ep_in = NULL;
- struct usb_endpoint_descriptor *ep_out = NULL;
- struct usb_endpoint_descriptor *ep_int = NULL;
+ struct usb_host_interface *alt = us->pusb_intf->cur_altsetting;
+ struct usb_endpoint_descriptor *ep_in;
+ struct usb_endpoint_descriptor *ep_out;
+ struct usb_endpoint_descriptor *ep_int;
+ int res;
/*
* Find the first endpoint of each type we need.
@@ -751,28 +749,16 @@ static int get_pipes(struct us_data *us)
* An optional interrupt-in is OK (necessary for CBI protocol).
* We will ignore any others.
*/
- for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
- ep = &altsetting->endpoint[i].desc;
-
- if (usb_endpoint_xfer_bulk(ep)) {
- if (usb_endpoint_dir_in(ep)) {
- if (!ep_in)
- ep_in = ep;
- } else {
- if (!ep_out)
- ep_out = ep;
- }
- }
-
- else if (usb_endpoint_is_int_in(ep)) {
- if (!ep_int)
- ep_int = ep;
- }
+ res = usb_find_common_endpoints(alt, &ep_in, &ep_out, NULL, NULL);
+ if (res) {
+ usb_stor_dbg(us, "bulk endpoints not found\n");
+ return res;
}
- if (!ep_in || !ep_out || (us->protocol == USB_PR_CBI && !ep_int)) {
- usb_stor_dbg(us, "Endpoint sanity check failed! Rejecting dev.\n");
- return -EIO;
+ res = usb_find_int_in_endpoint(alt, &ep_int);
+ if (res && us->protocol == USB_PR_CBI) {
+ usb_stor_dbg(us, "interrupt endpoint not found\n");
+ return res;
}
/* Calculate and store the pipe values */
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
new file mode 100644
index 000000000000..dfcfe459b7cf
--- /dev/null
+++ b/drivers/usb/typec/Kconfig
@@ -0,0 +1,22 @@
+
+menu "USB Power Delivery and Type-C drivers"
+
+config TYPEC
+ tristate
+
+config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on INTEL_SOC_PMIC
+ depends on INTEL_PMC_IPC
+ depends on BXT_WC_PMIC_OPREGION
+ select TYPEC
+ help
+ This driver adds support for USB Type-C detection on Intel Broxton
+ platforms that have Intel Whiskey Cove PMIC. The driver can detect the
+ role and cable orientation.
+
+ To compile this driver as module, choose M here: the module will be
+ called typec_wcove
+
+endmenu
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
new file mode 100644
index 000000000000..b9cb862221af
--- /dev/null
+++ b/drivers/usb/typec/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TYPEC) += typec.o
+obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
diff --git a/drivers/usb/typec/typec.c b/drivers/usb/typec/typec.c
new file mode 100644
index 000000000000..89e540bb7ff3
--- /dev/null
+++ b/drivers/usb/typec/typec.c
@@ -0,0 +1,1262 @@
+/*
+ * USB Type-C Connector Class
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+
+struct typec_mode {
+ int index;
+ u32 vdo;
+ char *desc;
+ enum typec_port_type roles;
+
+ struct typec_altmode *alt_mode;
+
+ unsigned int active:1;
+
+ char group_name[6];
+ struct attribute_group group;
+ struct attribute *attrs[5];
+ struct device_attribute vdo_attr;
+ struct device_attribute desc_attr;
+ struct device_attribute active_attr;
+ struct device_attribute roles_attr;
+};
+
+struct typec_altmode {
+ struct device dev;
+ u16 svid;
+ int n_modes;
+ struct typec_mode modes[ALTMODE_MAX_MODES];
+ const struct attribute_group *mode_groups[ALTMODE_MAX_MODES];
+};
+
+struct typec_plug {
+ struct device dev;
+ enum typec_plug_index index;
+};
+
+struct typec_cable {
+ struct device dev;
+ enum typec_plug_type type;
+ struct usb_pd_identity *identity;
+ unsigned int active:1;
+};
+
+struct typec_partner {
+ struct device dev;
+ unsigned int usb_pd:1;
+ struct usb_pd_identity *identity;
+ enum typec_accessory accessory;
+};
+
+struct typec_port {
+ unsigned int id;
+ struct device dev;
+
+ int prefer_role;
+ enum typec_data_role data_role;
+ enum typec_role pwr_role;
+ enum typec_role vconn_role;
+ enum typec_pwr_opmode pwr_opmode;
+
+ const struct typec_capability *cap;
+};
+
+#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
+#define to_typec_plug(_dev_) container_of(_dev_, struct typec_plug, dev)
+#define to_typec_cable(_dev_) container_of(_dev_, struct typec_cable, dev)
+#define to_typec_partner(_dev_) container_of(_dev_, struct typec_partner, dev)
+#define to_altmode(_dev_) container_of(_dev_, struct typec_altmode, dev)
+
+static const struct device_type typec_partner_dev_type;
+static const struct device_type typec_cable_dev_type;
+static const struct device_type typec_plug_dev_type;
+static const struct device_type typec_port_dev_type;
+
+#define is_typec_partner(_dev_) (_dev_->type == &typec_partner_dev_type)
+#define is_typec_cable(_dev_) (_dev_->type == &typec_cable_dev_type)
+#define is_typec_plug(_dev_) (_dev_->type == &typec_plug_dev_type)
+#define is_typec_port(_dev_) (_dev_->type == &typec_port_dev_type)
+
+static DEFINE_IDA(typec_index_ida);
+static struct class *typec_class;
+
+/* Common attributes */
+
+static const char * const typec_accessory_modes[] = {
+ [TYPEC_ACCESSORY_NONE] = "none",
+ [TYPEC_ACCESSORY_AUDIO] = "analog_audio",
+ [TYPEC_ACCESSORY_DEBUG] = "debug",
+};
+
+static struct usb_pd_identity *get_pd_identity(struct device *dev)
+{
+ if (is_typec_partner(dev)) {
+ struct typec_partner *partner = to_typec_partner(dev);
+
+ return partner->identity;
+ } else if (is_typec_cable(dev)) {
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ return cable->identity;
+ }
+ return NULL;
+}
+
+static ssize_t id_header_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_pd_identity *id = get_pd_identity(dev);
+
+ return sprintf(buf, "0x%08x\n", id->id_header);
+}
+static DEVICE_ATTR_RO(id_header);
+
+static ssize_t cert_stat_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_pd_identity *id = get_pd_identity(dev);
+
+ return sprintf(buf, "0x%08x\n", id->cert_stat);
+}
+static DEVICE_ATTR_RO(cert_stat);
+
+static ssize_t product_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_pd_identity *id = get_pd_identity(dev);
+
+ return sprintf(buf, "0x%08x\n", id->product);
+}
+static DEVICE_ATTR_RO(product);
+
+static struct attribute *usb_pd_id_attrs[] = {
+ &dev_attr_id_header.attr,
+ &dev_attr_cert_stat.attr,
+ &dev_attr_product.attr,
+ NULL
+};
+
+static const struct attribute_group usb_pd_id_group = {
+ .name = "identity",
+ .attrs = usb_pd_id_attrs,
+};
+
+static const struct attribute_group *usb_pd_id_groups[] = {
+ &usb_pd_id_group,
+ NULL,
+};
+
+static void typec_report_identity(struct device *dev)
+{
+ sysfs_notify(&dev->kobj, "identity", "id_header");
+ sysfs_notify(&dev->kobj, "identity", "cert_stat");
+ sysfs_notify(&dev->kobj, "identity", "product");
+}
+
+/* ------------------------------------------------------------------------- */
+/* Alternate Modes */
+
+/**
+ * typec_altmode_update_active - Report Enter/Exit mode
+ * @alt: Handle to the alternate mode
+ * @mode: Mode index
+ * @active: True when the mode has been entered
+ *
+ * If a partner or cable plug executes Enter/Exit Mode command successfully, the
+ * drivers use this routine to report the updated state of the mode.
+ */
+void typec_altmode_update_active(struct typec_altmode *alt, int mode,
+ bool active)
+{
+ struct typec_mode *m = &alt->modes[mode];
+ char dir[6];
+
+ if (m->active == active)
+ return;
+
+ m->active = active;
+ snprintf(dir, sizeof(dir), "mode%d", mode);
+ sysfs_notify(&alt->dev.kobj, dir, "active");
+ kobject_uevent(&alt->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_altmode_update_active);
+
+/**
+ * typec_altmode2port - Alternate Mode to USB Type-C port
+ * @alt: The Alternate Mode
+ *
+ * Returns handle to the port that a cable plug or partner with @alt is
+ * connected to.
+ */
+struct typec_port *typec_altmode2port(struct typec_altmode *alt)
+{
+ if (is_typec_plug(alt->dev.parent))
+ return to_typec_port(alt->dev.parent->parent->parent);
+ if (is_typec_partner(alt->dev.parent))
+ return to_typec_port(alt->dev.parent->parent);
+ if (is_typec_port(alt->dev.parent))
+ return to_typec_port(alt->dev.parent);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(typec_altmode2port);
+
+static ssize_t
+typec_altmode_vdo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_mode *mode = container_of(attr, struct typec_mode,
+ vdo_attr);
+
+ return sprintf(buf, "0x%08x\n", mode->vdo);
+}
+
+static ssize_t
+typec_altmode_desc_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_mode *mode = container_of(attr, struct typec_mode,
+ desc_attr);
+
+ return sprintf(buf, "%s\n", mode->desc ? mode->desc : "");
+}
+
+static ssize_t
+typec_altmode_active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_mode *mode = container_of(attr, struct typec_mode,
+ active_attr);
+
+ return sprintf(buf, "%s\n", mode->active ? "yes" : "no");
+}
+
+static ssize_t
+typec_altmode_active_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_mode *mode = container_of(attr, struct typec_mode,
+ active_attr);
+ struct typec_port *port = typec_altmode2port(mode->alt_mode);
+ bool activate;
+ int ret;
+
+ if (!port->cap->activate_mode)
+ return -EOPNOTSUPP;
+
+ ret = kstrtobool(buf, &activate);
+ if (ret)
+ return ret;
+
+ ret = port->cap->activate_mode(port->cap, mode->index, activate);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t
+typec_altmode_roles_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_mode *mode = container_of(attr, struct typec_mode,
+ roles_attr);
+ ssize_t ret;
+
+ switch (mode->roles) {
+ case TYPEC_PORT_DFP:
+ ret = sprintf(buf, "source\n");
+ break;
+ case TYPEC_PORT_UFP:
+ ret = sprintf(buf, "sink\n");
+ break;
+ case TYPEC_PORT_DRP:
+ default:
+ ret = sprintf(buf, "source sink\n");
+ break;
+ }
+ return ret;
+}
+
+static void typec_init_modes(struct typec_altmode *alt,
+ struct typec_mode_desc *desc, bool is_port)
+{
+ int i;
+
+ for (i = 0; i < alt->n_modes; i++, desc++) {
+ struct typec_mode *mode = &alt->modes[i];
+
+ /* Not considering the human readable description critical */
+ mode->desc = kstrdup(desc->desc, GFP_KERNEL);
+ if (desc->desc && !mode->desc)
+ dev_err(&alt->dev, "failed to copy mode%d desc\n", i);
+
+ mode->alt_mode = alt;
+ mode->vdo = desc->vdo;
+ mode->roles = desc->roles;
+ mode->index = desc->index;
+ sprintf(mode->group_name, "mode%d", desc->index);
+
+ sysfs_attr_init(&mode->vdo_attr.attr);
+ mode->vdo_attr.attr.name = "vdo";
+ mode->vdo_attr.attr.mode = 0444;
+ mode->vdo_attr.show = typec_altmode_vdo_show;
+
+ sysfs_attr_init(&mode->desc_attr.attr);
+ mode->desc_attr.attr.name = "description";
+ mode->desc_attr.attr.mode = 0444;
+ mode->desc_attr.show = typec_altmode_desc_show;
+
+ sysfs_attr_init(&mode->active_attr.attr);
+ mode->active_attr.attr.name = "active";
+ mode->active_attr.attr.mode = 0644;
+ mode->active_attr.show = typec_altmode_active_show;
+ mode->active_attr.store = typec_altmode_active_store;
+
+ mode->attrs[0] = &mode->vdo_attr.attr;
+ mode->attrs[1] = &mode->desc_attr.attr;
+ mode->attrs[2] = &mode->active_attr.attr;
+
+ /* With ports, list the roles that the mode is supported with */
+ if (is_port) {
+ sysfs_attr_init(&mode->roles_attr.attr);
+ mode->roles_attr.attr.name = "supported_roles";
+ mode->roles_attr.attr.mode = 0444;
+ mode->roles_attr.show = typec_altmode_roles_show;
+
+ mode->attrs[3] = &mode->roles_attr.attr;
+ }
+
+ mode->group.attrs = mode->attrs;
+ mode->group.name = mode->group_name;
+
+ alt->mode_groups[i] = &mode->group;
+ }
+}
+
+static ssize_t svid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_altmode *alt = to_altmode(dev);
+
+ return sprintf(buf, "%04x\n", alt->svid);
+}
+static DEVICE_ATTR_RO(svid);
+
+static struct attribute *typec_altmode_attrs[] = {
+ &dev_attr_svid.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(typec_altmode);
+
+static void typec_altmode_release(struct device *dev)
+{
+ struct typec_altmode *alt = to_altmode(dev);
+ int i;
+
+ for (i = 0; i < alt->n_modes; i++)
+ kfree(alt->modes[i].desc);
+ kfree(alt);
+}
+
+static const struct device_type typec_altmode_dev_type = {
+ .name = "typec_alternate_mode",
+ .groups = typec_altmode_groups,
+ .release = typec_altmode_release,
+};
+
+static struct typec_altmode *
+typec_register_altmode(struct device *parent, struct typec_altmode_desc *desc)
+{
+ struct typec_altmode *alt;
+ int ret;
+
+ alt = kzalloc(sizeof(*alt), GFP_KERNEL);
+ if (!alt)
+ return NULL;
+
+ alt->svid = desc->svid;
+ alt->n_modes = desc->n_modes;
+ typec_init_modes(alt, desc->modes, is_typec_port(parent));
+
+ alt->dev.parent = parent;
+ alt->dev.groups = alt->mode_groups;
+ alt->dev.type = &typec_altmode_dev_type;
+ dev_set_name(&alt->dev, "svid-%04x", alt->svid);
+
+ ret = device_register(&alt->dev);
+ if (ret) {
+ dev_err(parent, "failed to register alternate mode (%d)\n",
+ ret);
+ put_device(&alt->dev);
+ return NULL;
+ }
+
+ return alt;
+}
+
+/**
+ * typec_unregister_altmode - Unregister Alternate Mode
+ * @alt: The alternate mode to be unregistered
+ *
+ * Unregister device created with typec_partner_register_altmode(),
+ * typec_plug_register_altmode() or typec_port_register_altmode().
+ */
+void typec_unregister_altmode(struct typec_altmode *alt)
+{
+ if (alt)
+ device_unregister(&alt->dev);
+}
+EXPORT_SYMBOL_GPL(typec_unregister_altmode);
+
+/* ------------------------------------------------------------------------- */
+/* Type-C Partners */
+
+static ssize_t accessory_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_partner *p = to_typec_partner(dev);
+
+ return sprintf(buf, "%s\n", typec_accessory_modes[p->accessory]);
+}
+static DEVICE_ATTR_RO(accessory_mode);
+
+static ssize_t supports_usb_power_delivery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_partner *p = to_typec_partner(dev);
+
+ return sprintf(buf, "%s\n", p->usb_pd ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(supports_usb_power_delivery);
+
+static struct attribute *typec_partner_attrs[] = {
+ &dev_attr_accessory_mode.attr,
+ &dev_attr_supports_usb_power_delivery.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(typec_partner);
+
+static void typec_partner_release(struct device *dev)
+{
+ struct typec_partner *partner = to_typec_partner(dev);
+
+ kfree(partner);
+}
+
+static const struct device_type typec_partner_dev_type = {
+ .name = "typec_partner",
+ .groups = typec_partner_groups,
+ .release = typec_partner_release,
+};
+
+/**
+ * typec_partner_set_identity - Report result from Discover Identity command
+ * @partner: The partner updated identity values
+ *
+ * This routine is used to report that the result of Discover Identity USB power
+ * delivery command has become available.
+ */
+int typec_partner_set_identity(struct typec_partner *partner)
+{
+ if (!partner->identity)
+ return -EINVAL;
+
+ typec_report_identity(&partner->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_identity);
+
+/**
+ * typec_partner_register_altmode - Register USB Type-C Partner Alternate Mode
+ * @partner: USB Type-C Partner that supports the alternate mode
+ * @desc: Description of the alternate mode
+ *
+ * This routine is used to register each alternate mode individually that
+ * @partner has listed in response to Discover SVIDs command. The modes for a
+ * SVID listed in response to Discover Modes command need to be listed in an
+ * array in @desc.
+ *
+ * Returns handle to the alternate mode on success or NULL on failure.
+ */
+struct typec_altmode *
+typec_partner_register_altmode(struct typec_partner *partner,
+ struct typec_altmode_desc *desc)
+{
+ return typec_register_altmode(&partner->dev, desc);
+}
+EXPORT_SYMBOL_GPL(typec_partner_register_altmode);
+
+/**
+ * typec_register_partner - Register a USB Type-C Partner
+ * @port: The USB Type-C Port the partner is connected to
+ * @desc: Description of the partner
+ *
+ * Registers a device for USB Type-C Partner described in @desc.
+ *
+ * Returns handle to the partner on success or NULL on failure.
+ */
+struct typec_partner *typec_register_partner(struct typec_port *port,
+ struct typec_partner_desc *desc)
+{
+ struct typec_partner *partner;
+ int ret;
+
+ partner = kzalloc(sizeof(*partner), GFP_KERNEL);
+ if (!partner)
+ return NULL;
+
+ partner->usb_pd = desc->usb_pd;
+ partner->accessory = desc->accessory;
+
+ if (desc->identity) {
+ /*
+ * Creating directory for the identity only if the driver is
+ * able to provide data to it.
+ */
+ partner->dev.groups = usb_pd_id_groups;
+ partner->identity = desc->identity;
+ }
+
+ partner->dev.class = typec_class;
+ partner->dev.parent = &port->dev;
+ partner->dev.type = &typec_partner_dev_type;
+ dev_set_name(&partner->dev, "%s-partner", dev_name(&port->dev));
+
+ ret = device_register(&partner->dev);
+ if (ret) {
+ dev_err(&port->dev, "failed to register partner (%d)\n", ret);
+ put_device(&partner->dev);
+ return NULL;
+ }
+
+ return partner;
+}
+EXPORT_SYMBOL_GPL(typec_register_partner);
+
+/**
+ * typec_unregister_partner - Unregister a USB Type-C Partner
+ * @partner: The partner to be unregistered
+ *
+ * Unregister device created with typec_register_partner().
+ */
+void typec_unregister_partner(struct typec_partner *partner)
+{
+ if (partner)
+ device_unregister(&partner->dev);
+}
+EXPORT_SYMBOL_GPL(typec_unregister_partner);
+
+/* ------------------------------------------------------------------------- */
+/* Type-C Cable Plugs */
+
+static void typec_plug_release(struct device *dev)
+{
+ struct typec_plug *plug = to_typec_plug(dev);
+
+ kfree(plug);
+}
+
+static const struct device_type typec_plug_dev_type = {
+ .name = "typec_plug",
+ .release = typec_plug_release,
+};
+
+/**
+ * typec_plug_register_altmode - Register USB Type-C Cable Plug Alternate Mode
+ * @plug: USB Type-C Cable Plug that supports the alternate mode
+ * @desc: Description of the alternate mode
+ *
+ * This routine is used to register each alternate mode individually that @plug
+ * has listed in response to Discover SVIDs command. The modes for a SVID that
+ * the plug lists in response to Discover Modes command need to be listed in an
+ * array in @desc.
+ *
+ * Returns handle to the alternate mode on success or NULL on failure.
+ */
+struct typec_altmode *
+typec_plug_register_altmode(struct typec_plug *plug,
+ struct typec_altmode_desc *desc)
+{
+ return typec_register_altmode(&plug->dev, desc);
+}
+EXPORT_SYMBOL_GPL(typec_plug_register_altmode);
+
+/**
+ * typec_register_plug - Register a USB Type-C Cable Plug
+ * @cable: USB Type-C Cable with the plug
+ * @desc: Description of the cable plug
+ *
+ * Registers a device for USB Type-C Cable Plug described in @desc. A USB Type-C
+ * Cable Plug represents a plug with electronics in it that can response to USB
+ * Power Delivery SOP Prime or SOP Double Prime packages.
+ *
+ * Returns handle to the cable plug on success or NULL on failure.
+ */
+struct typec_plug *typec_register_plug(struct typec_cable *cable,
+ struct typec_plug_desc *desc)
+{
+ struct typec_plug *plug;
+ char name[8];
+ int ret;
+
+ plug = kzalloc(sizeof(*plug), GFP_KERNEL);
+ if (!plug)
+ return NULL;
+
+ sprintf(name, "plug%d", desc->index);
+
+ plug->index = desc->index;
+ plug->dev.class = typec_class;
+ plug->dev.parent = &cable->dev;
+ plug->dev.type = &typec_plug_dev_type;
+ dev_set_name(&plug->dev, "%s-%s", dev_name(cable->dev.parent), name);
+
+ ret = device_register(&plug->dev);
+ if (ret) {
+ dev_err(&cable->dev, "failed to register plug (%d)\n", ret);
+ put_device(&plug->dev);
+ return NULL;
+ }
+
+ return plug;
+}
+EXPORT_SYMBOL_GPL(typec_register_plug);
+
+/**
+ * typec_unregister_plug - Unregister a USB Type-C Cable Plug
+ * @plug: The cable plug to be unregistered
+ *
+ * Unregister device created with typec_register_plug().
+ */
+void typec_unregister_plug(struct typec_plug *plug)
+{
+ if (plug)
+ device_unregister(&plug->dev);
+}
+EXPORT_SYMBOL_GPL(typec_unregister_plug);
+
+/* Type-C Cables */
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ return sprintf(buf, "%s\n", cable->active ? "active" : "passive");
+}
+static DEVICE_ATTR_RO(type);
+
+static const char * const typec_plug_types[] = {
+ [USB_PLUG_NONE] = "unknown",
+ [USB_PLUG_TYPE_A] = "type-a",
+ [USB_PLUG_TYPE_B] = "type-b",
+ [USB_PLUG_TYPE_C] = "type-c",
+ [USB_PLUG_CAPTIVE] = "captive",
+};
+
+static ssize_t plug_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ return sprintf(buf, "%s\n", typec_plug_types[cable->type]);
+}
+static DEVICE_ATTR_RO(plug_type);
+
+static struct attribute *typec_cable_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_plug_type.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(typec_cable);
+
+static void typec_cable_release(struct device *dev)
+{
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ kfree(cable);
+}
+
+static const struct device_type typec_cable_dev_type = {
+ .name = "typec_cable",
+ .groups = typec_cable_groups,
+ .release = typec_cable_release,
+};
+
+/**
+ * typec_cable_set_identity - Report result from Discover Identity command
+ * @cable: The cable updated identity values
+ *
+ * This routine is used to report that the result of Discover Identity USB power
+ * delivery command has become available.
+ */
+int typec_cable_set_identity(struct typec_cable *cable)
+{
+ if (!cable->identity)
+ return -EINVAL;
+
+ typec_report_identity(&cable->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(typec_cable_set_identity);
+
+/**
+ * typec_register_cable - Register a USB Type-C Cable
+ * @port: The USB Type-C Port the cable is connected to
+ * @desc: Description of the cable
+ *
+ * Registers a device for USB Type-C Cable described in @desc. The cable will be
+ * parent for the optional cable plug devises.
+ *
+ * Returns handle to the cable on success or NULL on failure.
+ */
+struct typec_cable *typec_register_cable(struct typec_port *port,
+ struct typec_cable_desc *desc)
+{
+ struct typec_cable *cable;
+ int ret;
+
+ cable = kzalloc(sizeof(*cable), GFP_KERNEL);
+ if (!cable)
+ return NULL;
+
+ cable->type = desc->type;
+ cable->active = desc->active;
+
+ if (desc->identity) {
+ /*
+ * Creating directory for the identity only if the driver is
+ * able to provide data to it.
+ */
+ cable->dev.groups = usb_pd_id_groups;
+ cable->identity = desc->identity;
+ }
+
+ cable->dev.class = typec_class;
+ cable->dev.parent = &port->dev;
+ cable->dev.type = &typec_cable_dev_type;
+ dev_set_name(&cable->dev, "%s-cable", dev_name(&port->dev));
+
+ ret = device_register(&cable->dev);
+ if (ret) {
+ dev_err(&port->dev, "failed to register cable (%d)\n", ret);
+ put_device(&cable->dev);
+ return NULL;
+ }
+
+ return cable;
+}
+EXPORT_SYMBOL_GPL(typec_register_cable);
+
+/**
+ * typec_unregister_cable - Unregister a USB Type-C Cable
+ * @cable: The cable to be unregistered
+ *
+ * Unregister device created with typec_register_cable().
+ */
+void typec_unregister_cable(struct typec_cable *cable)
+{
+ if (cable)
+ device_unregister(&cable->dev);
+}
+EXPORT_SYMBOL_GPL(typec_unregister_cable);
+
+/* ------------------------------------------------------------------------- */
+/* USB Type-C ports */
+
+static const char * const typec_roles[] = {
+ [TYPEC_SINK] = "sink",
+ [TYPEC_SOURCE] = "source",
+};
+
+static const char * const typec_data_roles[] = {
+ [TYPEC_DEVICE] = "device",
+ [TYPEC_HOST] = "host",
+};
+
+static ssize_t
+preferred_role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_port *port = to_typec_port(dev);
+ int role;
+ int ret;
+
+ if (port->cap->type != TYPEC_PORT_DRP) {
+ dev_dbg(dev, "Preferred role only supported with DRP ports\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!port->cap->try_role) {
+ dev_dbg(dev, "Setting preferred role not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ role = sysfs_match_string(typec_roles, buf);
+ if (role < 0) {
+ if (sysfs_streq(buf, "none"))
+ role = TYPEC_NO_PREFERRED_ROLE;
+ else
+ return -EINVAL;
+ }
+
+ ret = port->cap->try_role(port->cap, role);
+ if (ret)
+ return ret;
+
+ port->prefer_role = role;
+ return size;
+}
+
+static ssize_t
+preferred_role_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ if (port->cap->type != TYPEC_PORT_DRP)
+ return 0;
+
+ if (port->prefer_role < 0)
+ return 0;
+
+ return sprintf(buf, "%s\n", typec_roles[port->prefer_role]);
+}
+static DEVICE_ATTR_RW(preferred_role);
+
+static ssize_t data_role_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_port *port = to_typec_port(dev);
+ int ret;
+
+ if (port->cap->type != TYPEC_PORT_DRP) {
+ dev_dbg(dev, "data role swap only supported with DRP ports\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!port->cap->dr_set) {
+ dev_dbg(dev, "data role swapping not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = sysfs_match_string(typec_data_roles, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = port->cap->dr_set(port->cap, ret);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t data_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ if (port->cap->type == TYPEC_PORT_DRP)
+ return sprintf(buf, "%s\n", port->data_role == TYPEC_HOST ?
+ "[host] device" : "host [device]");
+
+ return sprintf(buf, "[%s]\n", typec_data_roles[port->data_role]);
+}
+static DEVICE_ATTR_RW(data_role);
+
+static ssize_t power_role_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_port *port = to_typec_port(dev);
+ int ret = size;
+
+ if (!port->cap->pd_revision) {
+ dev_dbg(dev, "USB Power Delivery not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!port->cap->pr_set) {
+ dev_dbg(dev, "power role swapping not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->pwr_opmode != TYPEC_PWR_MODE_PD) {
+ dev_dbg(dev, "partner unable to swap power role\n");
+ return -EIO;
+ }
+
+ ret = sysfs_match_string(typec_roles, buf);
+ if (ret < 0)
+ return ret;
+
+ ret = port->cap->pr_set(port->cap, ret);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t power_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ if (port->cap->type == TYPEC_PORT_DRP)
+ return sprintf(buf, "%s\n", port->pwr_role == TYPEC_SOURCE ?
+ "[source] sink" : "source [sink]");
+
+ return sprintf(buf, "[%s]\n", typec_roles[port->pwr_role]);
+}
+static DEVICE_ATTR_RW(power_role);
+
+static const char * const typec_pwr_opmodes[] = {
+ [TYPEC_PWR_MODE_USB] = "default",
+ [TYPEC_PWR_MODE_1_5A] = "1.5A",
+ [TYPEC_PWR_MODE_3_0A] = "3.0A",
+ [TYPEC_PWR_MODE_PD] = "usb_power_delivery",
+};
+
+static ssize_t power_operation_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ return sprintf(buf, "%s\n", typec_pwr_opmodes[port->pwr_opmode]);
+}
+static DEVICE_ATTR_RO(power_operation_mode);
+
+static ssize_t vconn_source_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct typec_port *port = to_typec_port(dev);
+ bool source;
+ int ret;
+
+ if (!port->cap->pd_revision) {
+ dev_dbg(dev, "VCONN swap depends on USB Power Delivery\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!port->cap->vconn_set) {
+ dev_dbg(dev, "VCONN swapping not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = kstrtobool(buf, &source);
+ if (ret)
+ return ret;
+
+ ret = port->cap->vconn_set(port->cap, (enum typec_role)source);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t vconn_source_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ return sprintf(buf, "%s\n",
+ port->vconn_role == TYPEC_SOURCE ? "yes" : "no");
+}
+static DEVICE_ATTR_RW(vconn_source);
+
+static ssize_t supported_accessory_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+ ssize_t ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->cap->accessory); i++) {
+ if (port->cap->accessory[i])
+ ret += sprintf(buf + ret, "%s ",
+ typec_accessory_modes[port->cap->accessory[i]]);
+ }
+
+ if (!ret)
+ return sprintf(buf, "none\n");
+
+ buf[ret - 1] = '\n';
+
+ return ret;
+}
+static DEVICE_ATTR_RO(supported_accessory_modes);
+
+static ssize_t usb_typec_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *port = to_typec_port(dev);
+ u16 rev = port->cap->revision;
+
+ return sprintf(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
+}
+static DEVICE_ATTR_RO(usb_typec_revision);
+
+static ssize_t usb_power_delivery_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct typec_port *p = to_typec_port(dev);
+
+ return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff);
+}
+static DEVICE_ATTR_RO(usb_power_delivery_revision);
+
+static struct attribute *typec_attrs[] = {
+ &dev_attr_data_role.attr,
+ &dev_attr_power_operation_mode.attr,
+ &dev_attr_power_role.attr,
+ &dev_attr_preferred_role.attr,
+ &dev_attr_supported_accessory_modes.attr,
+ &dev_attr_usb_power_delivery_revision.attr,
+ &dev_attr_usb_typec_revision.attr,
+ &dev_attr_vconn_source.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(typec);
+
+static int typec_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = add_uevent_var(env, "TYPEC_PORT=%s", dev_name(dev));
+ if (ret)
+ dev_err(dev, "failed to add uevent TYPEC_PORT\n");
+
+ return ret;
+}
+
+static void typec_release(struct device *dev)
+{
+ struct typec_port *port = to_typec_port(dev);
+
+ ida_simple_remove(&typec_index_ida, port->id);
+ kfree(port);
+}
+
+static const struct device_type typec_port_dev_type = {
+ .name = "typec_port",
+ .groups = typec_groups,
+ .uevent = typec_uevent,
+ .release = typec_release,
+};
+
+/* --------------------------------------- */
+/* Driver callbacks to report role updates */
+
+/**
+ * typec_set_data_role - Report data role change
+ * @port: The USB Type-C Port where the role was changed
+ * @role: The new data role
+ *
+ * This routine is used by the port drivers to report data role changes.
+ */
+void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
+{
+ if (port->data_role == role)
+ return;
+
+ port->data_role = role;
+ sysfs_notify(&port->dev.kobj, NULL, "data_role");
+ kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_set_data_role);
+
+/**
+ * typec_set_pwr_role - Report power role change
+ * @port: The USB Type-C Port where the role was changed
+ * @role: The new data role
+ *
+ * This routine is used by the port drivers to report power role changes.
+ */
+void typec_set_pwr_role(struct typec_port *port, enum typec_role role)
+{
+ if (port->pwr_role == role)
+ return;
+
+ port->pwr_role = role;
+ sysfs_notify(&port->dev.kobj, NULL, "power_role");
+ kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_set_pwr_role);
+
+/**
+ * typec_set_pwr_role - Report VCONN source change
+ * @port: The USB Type-C Port which VCONN role changed
+ * @role: Source when @port is sourcing VCONN, or Sink when it's not
+ *
+ * This routine is used by the port drivers to report if the VCONN source is
+ * changes.
+ */
+void typec_set_vconn_role(struct typec_port *port, enum typec_role role)
+{
+ if (port->vconn_role == role)
+ return;
+
+ port->vconn_role = role;
+ sysfs_notify(&port->dev.kobj, NULL, "vconn_source");
+ kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_set_vconn_role);
+
+/**
+ * typec_set_pwr_opmode - Report changed power operation mode
+ * @port: The USB Type-C Port where the mode was changed
+ * @opmode: New power operation mode
+ *
+ * This routine is used by the port drivers to report changed power operation
+ * mode in @port. The modes are USB (default), 1.5A, 3.0A as defined in USB
+ * Type-C specification, and "USB Power Delivery" when the power levels are
+ * negotiated with methods defined in USB Power Delivery specification.
+ */
+void typec_set_pwr_opmode(struct typec_port *port,
+ enum typec_pwr_opmode opmode)
+{
+ if (port->pwr_opmode == opmode)
+ return;
+
+ port->pwr_opmode = opmode;
+ sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
+ kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
+
+/* --------------------------------------- */
+
+/**
+ * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
+ * @port: USB Type-C Port that supports the alternate mode
+ * @desc: Description of the alternate mode
+ *
+ * This routine is used to register an alternate mode that @port is capable of
+ * supporting.
+ *
+ * Returns handle to the alternate mode on success or NULL on failure.
+ */
+struct typec_altmode *
+typec_port_register_altmode(struct typec_port *port,
+ struct typec_altmode_desc *desc)
+{
+ return typec_register_altmode(&port->dev, desc);
+}
+EXPORT_SYMBOL_GPL(typec_port_register_altmode);
+
+/**
+ * typec_register_port - Register a USB Type-C Port
+ * @parent: Parent device
+ * @cap: Description of the port
+ *
+ * Registers a device for USB Type-C Port described in @cap.
+ *
+ * Returns handle to the port on success or NULL on failure.
+ */
+struct typec_port *typec_register_port(struct device *parent,
+ const struct typec_capability *cap)
+{
+ struct typec_port *port;
+ int role;
+ int ret;
+ int id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return NULL;
+
+ id = ida_simple_get(&typec_index_ida, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ kfree(port);
+ return NULL;
+ }
+
+ if (cap->type == TYPEC_PORT_DFP)
+ role = TYPEC_SOURCE;
+ else if (cap->type == TYPEC_PORT_UFP)
+ role = TYPEC_SINK;
+ else
+ role = cap->prefer_role;
+
+ if (role == TYPEC_SOURCE) {
+ port->data_role = TYPEC_HOST;
+ port->pwr_role = TYPEC_SOURCE;
+ port->vconn_role = TYPEC_SOURCE;
+ } else {
+ port->data_role = TYPEC_DEVICE;
+ port->pwr_role = TYPEC_SINK;
+ port->vconn_role = TYPEC_SINK;
+ }
+
+ port->id = id;
+ port->cap = cap;
+ port->prefer_role = cap->prefer_role;
+
+ port->dev.class = typec_class;
+ port->dev.parent = parent;
+ port->dev.fwnode = cap->fwnode;
+ port->dev.type = &typec_port_dev_type;
+ dev_set_name(&port->dev, "port%d", id);
+
+ ret = device_register(&port->dev);
+ if (ret) {
+ dev_err(parent, "failed to register port (%d)\n", ret);
+ put_device(&port->dev);
+ return NULL;
+ }
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(typec_register_port);
+
+/**
+ * typec_unregister_port - Unregister a USB Type-C Port
+ * @port: The port to be unregistered
+ *
+ * Unregister device created with typec_register_port().
+ */
+void typec_unregister_port(struct typec_port *port)
+{
+ if (port)
+ device_unregister(&port->dev);
+}
+EXPORT_SYMBOL_GPL(typec_unregister_port);
+
+static int __init typec_init(void)
+{
+ typec_class = class_create(THIS_MODULE, "typec");
+ return PTR_ERR_OR_ZERO(typec_class);
+}
+subsys_initcall(typec_init);
+
+static void __exit typec_exit(void)
+{
+ class_destroy(typec_class);
+ ida_destroy(&typec_index_ida);
+}
+module_exit(typec_exit);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("USB Type-C Connector Class");
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
new file mode 100644
index 000000000000..d5a7b21fa3f1
--- /dev/null
+++ b/drivers/usb/typec/typec_wcove.c
@@ -0,0 +1,377 @@
+/**
+ * typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
+ *
+ * Copyright (C) 2017 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/usb/typec.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+/* Register offsets */
+#define WCOVE_CHGRIRQ0 0x4e09
+#define WCOVE_PHYCTRL 0x5e07
+
+#define USBC_CONTROL1 0x7001
+#define USBC_CONTROL2 0x7002
+#define USBC_CONTROL3 0x7003
+#define USBC_CC1_CTRL 0x7004
+#define USBC_CC2_CTRL 0x7005
+#define USBC_STATUS1 0x7007
+#define USBC_STATUS2 0x7008
+#define USBC_STATUS3 0x7009
+#define USBC_IRQ1 0x7015
+#define USBC_IRQ2 0x7016
+#define USBC_IRQMASK1 0x7017
+#define USBC_IRQMASK2 0x7018
+
+/* Register bits */
+
+#define USBC_CONTROL1_MODE_DRP(r) (((r) & ~0x7) | 4)
+
+#define USBC_CONTROL2_UNATT_SNK BIT(0)
+#define USBC_CONTROL2_UNATT_SRC BIT(1)
+#define USBC_CONTROL2_DIS_ST BIT(2)
+
+#define USBC_CONTROL3_PD_DIS BIT(1)
+
+#define USBC_CC_CTRL_VCONN_EN BIT(1)
+
+#define USBC_STATUS1_DET_ONGOING BIT(6)
+#define USBC_STATUS1_RSLT(r) ((r) & 0xf)
+#define USBC_RSLT_NOTHING 0
+#define USBC_RSLT_SRC_DEFAULT 1
+#define USBC_RSLT_SRC_1_5A 2
+#define USBC_RSLT_SRC_3_0A 3
+#define USBC_RSLT_SNK 4
+#define USBC_RSLT_DEBUG_ACC 5
+#define USBC_RSLT_AUDIO_ACC 6
+#define USBC_RSLT_UNDEF 15
+#define USBC_STATUS1_ORIENT(r) (((r) >> 4) & 0x3)
+#define USBC_ORIENT_NORMAL 1
+#define USBC_ORIENT_REVERSE 2
+
+#define USBC_STATUS2_VBUS_REQ BIT(5)
+
+#define USBC_IRQ1_ADCDONE1 BIT(2)
+#define USBC_IRQ1_OVERTEMP BIT(1)
+#define USBC_IRQ1_SHORT BIT(0)
+
+#define USBC_IRQ2_CC_CHANGE BIT(7)
+#define USBC_IRQ2_RX_PD BIT(6)
+#define USBC_IRQ2_RX_HR BIT(5)
+#define USBC_IRQ2_RX_CR BIT(4)
+#define USBC_IRQ2_TX_SUCCESS BIT(3)
+#define USBC_IRQ2_TX_FAIL BIT(2)
+
+#define USBC_IRQMASK1_ALL (USBC_IRQ1_ADCDONE1 | USBC_IRQ1_OVERTEMP | \
+ USBC_IRQ1_SHORT)
+
+#define USBC_IRQMASK2_ALL (USBC_IRQ2_CC_CHANGE | USBC_IRQ2_RX_PD | \
+ USBC_IRQ2_RX_HR | USBC_IRQ2_RX_CR | \
+ USBC_IRQ2_TX_SUCCESS | USBC_IRQ2_TX_FAIL)
+
+struct wcove_typec {
+ struct mutex lock; /* device lock */
+ struct device *dev;
+ struct regmap *regmap;
+ struct typec_port *port;
+ struct typec_capability cap;
+ struct typec_partner *partner;
+};
+
+enum wcove_typec_func {
+ WCOVE_FUNC_DRIVE_VBUS = 1,
+ WCOVE_FUNC_ORIENTATION,
+ WCOVE_FUNC_ROLE,
+ WCOVE_FUNC_DRIVE_VCONN,
+};
+
+enum wcove_typec_orientation {
+ WCOVE_ORIENTATION_NORMAL,
+ WCOVE_ORIENTATION_REVERSE,
+};
+
+enum wcove_typec_role {
+ WCOVE_ROLE_HOST,
+ WCOVE_ROLE_DEVICE,
+};
+
+static uuid_le uuid = UUID_LE(0x482383f0, 0x2876, 0x4e49,
+ 0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+
+static int wcove_typec_func(struct wcove_typec *wcove,
+ enum wcove_typec_func func, int param)
+{
+ union acpi_object *obj;
+ union acpi_object tmp;
+ union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+ tmp.type = ACPI_TYPE_INTEGER;
+ tmp.integer.value = param;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), uuid.b, 1, func,
+ &argv4);
+ if (!obj) {
+ dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static irqreturn_t wcove_typec_irq(int irq, void *data)
+{
+ enum typec_role role = TYPEC_SINK;
+ struct typec_partner_desc partner;
+ struct wcove_typec *wcove = data;
+ unsigned int cc1_ctrl;
+ unsigned int cc2_ctrl;
+ unsigned int cc_irq1;
+ unsigned int cc_irq2;
+ unsigned int status1;
+ unsigned int status2;
+ int ret;
+
+ mutex_lock(&wcove->lock);
+
+ ret = regmap_read(wcove->regmap, USBC_IRQ1, &cc_irq1);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_IRQ2, &cc_irq2);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_STATUS1, &status1);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_STATUS2, &status2);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1_ctrl);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_CC2_CTRL, &cc2_ctrl);
+ if (ret)
+ goto err;
+
+ if (cc_irq1) {
+ if (cc_irq1 & USBC_IRQ1_OVERTEMP)
+ dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
+ if (cc_irq1 & USBC_IRQ1_SHORT)
+ dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
+ ret = regmap_write(wcove->regmap, USBC_IRQ1, cc_irq1);
+ if (ret)
+ goto err;
+ }
+
+ if (cc_irq2) {
+ ret = regmap_write(wcove->regmap, USBC_IRQ2, cc_irq2);
+ if (ret)
+ goto err;
+ /*
+ * Ignoring any PD communication interrupts until the PD support
+ * is available
+ */
+ if (cc_irq2 & ~USBC_IRQ2_CC_CHANGE) {
+ dev_WARN(wcove->dev, "USB PD handling missing\n");
+ goto err;
+ }
+ }
+
+ if (status1 & USBC_STATUS1_DET_ONGOING)
+ goto out;
+
+ if (USBC_STATUS1_RSLT(status1) == USBC_RSLT_NOTHING) {
+ if (wcove->partner) {
+ typec_unregister_partner(wcove->partner);
+ wcove->partner = NULL;
+ }
+
+ wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
+ WCOVE_ORIENTATION_NORMAL);
+
+ /* This makes sure the device controller is disconnected */
+ wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
+
+ /* Port to default role */
+ typec_set_data_role(wcove->port, TYPEC_DEVICE);
+ typec_set_pwr_role(wcove->port, TYPEC_SINK);
+ typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+
+ goto out;
+ }
+
+ if (wcove->partner)
+ goto out;
+
+ switch (USBC_STATUS1_ORIENT(status1)) {
+ case USBC_ORIENT_NORMAL:
+ wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
+ WCOVE_ORIENTATION_NORMAL);
+ break;
+ case USBC_ORIENT_REVERSE:
+ wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
+ WCOVE_ORIENTATION_REVERSE);
+ default:
+ break;
+ }
+
+ memset(&partner, 0, sizeof(partner));
+
+ switch (USBC_STATUS1_RSLT(status1)) {
+ case USBC_RSLT_SRC_DEFAULT:
+ typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+ break;
+ case USBC_RSLT_SRC_1_5A:
+ typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_1_5A);
+ break;
+ case USBC_RSLT_SRC_3_0A:
+ typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_3_0A);
+ break;
+ case USBC_RSLT_SNK:
+ role = TYPEC_SOURCE;
+ break;
+ case USBC_RSLT_DEBUG_ACC:
+ partner.accessory = TYPEC_ACCESSORY_DEBUG;
+ break;
+ case USBC_RSLT_AUDIO_ACC:
+ partner.accessory = TYPEC_ACCESSORY_AUDIO;
+ break;
+ default:
+ dev_WARN(wcove->dev, "%s Undefined result\n", __func__);
+ goto err;
+ }
+
+ if (role == TYPEC_SINK) {
+ wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_DEVICE);
+ typec_set_data_role(wcove->port, TYPEC_DEVICE);
+ typec_set_pwr_role(wcove->port, TYPEC_SINK);
+ } else {
+ wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
+ typec_set_pwr_role(wcove->port, TYPEC_SOURCE);
+ typec_set_data_role(wcove->port, TYPEC_HOST);
+ }
+
+ wcove->partner = typec_register_partner(wcove->port, &partner);
+ if (!wcove->partner)
+ dev_err(wcove->dev, "failed register partner\n");
+out:
+ /* If either CC pins is requesting VCONN, we turn it on */
+ if ((cc1_ctrl & USBC_CC_CTRL_VCONN_EN) ||
+ (cc2_ctrl & USBC_CC_CTRL_VCONN_EN))
+ wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, true);
+ else
+ wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
+
+ /* Relying on the FSM to know when we need to drive VBUS. */
+ wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS,
+ !!(status2 & USBC_STATUS2_VBUS_REQ));
+err:
+ /* REVISIT: Clear WhiskeyCove CHGR Type-C interrupt */
+ regmap_write(wcove->regmap, WCOVE_CHGRIRQ0, BIT(5));
+
+ mutex_unlock(&wcove->lock);
+ return IRQ_HANDLED;
+}
+
+static int wcove_typec_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ struct wcove_typec *wcove;
+ unsigned int val;
+ int ret;
+
+ wcove = devm_kzalloc(&pdev->dev, sizeof(*wcove), GFP_KERNEL);
+ if (!wcove)
+ return -ENOMEM;
+
+ mutex_init(&wcove->lock);
+ wcove->dev = &pdev->dev;
+ wcove->regmap = pmic->regmap;
+
+ ret = regmap_irq_get_virq(pmic->irq_chip_data_level2,
+ platform_get_irq(pdev, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
+ wcove_typec_irq, IRQF_ONESHOT,
+ "wcove_typec", wcove);
+ if (ret)
+ return ret;
+
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), uuid.b, 0, 0x1f)) {
+ dev_err(&pdev->dev, "Missing _DSM functions\n");
+ return -ENODEV;
+ }
+
+ wcove->cap.type = TYPEC_PORT_DRP;
+ wcove->cap.revision = USB_TYPEC_REV_1_1;
+ wcove->cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+
+ /* Make sure the PD PHY is disabled until USB PD is available */
+ regmap_read(wcove->regmap, USBC_CONTROL3, &val);
+ regmap_write(wcove->regmap, USBC_CONTROL3, val | USBC_CONTROL3_PD_DIS);
+
+ /* DRP mode without accessory support */
+ regmap_read(wcove->regmap, USBC_CONTROL1, &val);
+ regmap_write(wcove->regmap, USBC_CONTROL1, USBC_CONTROL1_MODE_DRP(val));
+
+ wcove->port = typec_register_port(&pdev->dev, &wcove->cap);
+ if (!wcove->port)
+ return -ENODEV;
+
+ /* Unmask everything */
+ regmap_read(wcove->regmap, USBC_IRQMASK1, &val);
+ regmap_write(wcove->regmap, USBC_IRQMASK1, val & ~USBC_IRQMASK1_ALL);
+ regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
+ regmap_write(wcove->regmap, USBC_IRQMASK2, val & ~USBC_IRQMASK2_ALL);
+
+ platform_set_drvdata(pdev, wcove);
+ return 0;
+}
+
+static int wcove_typec_remove(struct platform_device *pdev)
+{
+ struct wcove_typec *wcove = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ /* Mask everything */
+ regmap_read(wcove->regmap, USBC_IRQMASK1, &val);
+ regmap_write(wcove->regmap, USBC_IRQMASK1, val | USBC_IRQMASK1_ALL);
+ regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
+ regmap_write(wcove->regmap, USBC_IRQMASK2, val | USBC_IRQMASK2_ALL);
+
+ typec_unregister_partner(wcove->partner);
+ typec_unregister_port(wcove->port);
+ return 0;
+}
+
+static struct platform_driver wcove_typec_driver = {
+ .driver = {
+ .name = "bxt_wcove_usbc",
+ },
+ .probe = wcove_typec_probe,
+ .remove = wcove_typec_remove,
+};
+
+module_platform_driver(wcove_typec_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("WhiskeyCove PMIC USB Type-C PHY driver");
+MODULE_ALIAS("platform:bxt_wcove_usbc");
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 5133a0792eb0..bb0bd732e29a 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -491,16 +491,14 @@ static int skel_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_skel *dev;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- size_t buffer_size;
- int i;
- int retval = -ENOMEM;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ int retval;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
- goto error;
+ return -ENOMEM;
+
kref_init(&dev->kref);
sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
mutex_init(&dev->io_mutex);
@@ -513,36 +511,29 @@ static int skel_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
- iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!dev->bulk_in_endpointAddr &&
- usb_endpoint_is_bulk_in(endpoint)) {
- /* we found a bulk in endpoint */
- buffer_size = usb_endpoint_maxp(endpoint);
- dev->bulk_in_size = buffer_size;
- dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
- dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!dev->bulk_in_buffer)
- goto error;
- dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!dev->bulk_in_urb)
- goto error;
- }
-
- if (!dev->bulk_out_endpointAddr &&
- usb_endpoint_is_bulk_out(endpoint)) {
- /* we found a bulk out endpoint */
- dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
- }
- }
- if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
+ retval = usb_find_common_endpoints(interface->cur_altsetting,
+ &bulk_in, &bulk_out, NULL, NULL);
+ if (retval) {
dev_err(&interface->dev,
"Could not find both bulk-in and bulk-out endpoints\n");
goto error;
}
+ dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
+ dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
+ dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
+ if (!dev->bulk_in_buffer) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->bulk_in_urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
+
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
@@ -563,9 +554,9 @@ static int skel_probe(struct usb_interface *interface,
return 0;
error:
- if (dev)
- /* this frees allocated memory */
- kref_put(&dev->kref, skel_delete);
+ /* this frees allocated memory */
+ kref_put(&dev->kref, skel_delete);
+
return retval;
}
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index e4cb9f0625e8..5d8b2c261940 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -430,36 +430,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return retval;
}
-static struct vhci_device *get_vdev(struct usb_device *udev)
+static void vhci_tx_urb(struct urb *urb, struct vhci_device *vdev)
{
- struct platform_device *pdev;
- struct usb_hcd *hcd;
- struct vhci_hcd *vhci;
- int pdev_nr, rhport;
-
- if (!udev)
- return NULL;
-
- for (pdev_nr = 0; pdev_nr < vhci_num_controllers; pdev_nr++) {
- pdev = *(vhci_pdevs + pdev_nr);
- if (pdev == NULL)
- continue;
- hcd = platform_get_drvdata(pdev);
- if (hcd == NULL)
- continue;
- vhci = hcd_to_vhci(hcd);
- for (rhport = 0; rhport < VHCI_HC_PORTS; rhport++) {
- if (vhci->vdev[rhport].udev == udev)
- return &vhci->vdev[rhport];
- }
- }
-
- return NULL;
-}
-
-static void vhci_tx_urb(struct urb *urb)
-{
- struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
struct vhci_hcd *vhci;
unsigned long flags;
@@ -601,7 +573,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
}
out:
- vhci_tx_urb(urb);
+ vhci_tx_urb(urb, vdev);
spin_unlock_irqrestore(&vhci->lock, flags);
return 0;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index cf3de91fbfe7..63112c36ab2d 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -198,6 +198,11 @@ static long tce_iommu_register_pages(struct tce_container *container,
return ret;
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ if (!tcemem) {
+ mm_iommu_put(container->mm, mem);
+ return -ENOMEM;
+ }
+
tcemem->mem = mem;
list_add(&tcemem->next, &container->prereg_list);
@@ -680,7 +685,7 @@ static void tce_iommu_free_table(struct tce_container *container,
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
tce_iommu_userspace_view_free(tbl, container->mm);
- tbl->it_ops->free(tbl);
+ iommu_tce_table_put(tbl);
decrement_locked_vm(container->mm, pages);
}
@@ -1335,8 +1340,16 @@ static int tce_iommu_attach_group(void *iommu_data,
if (!table_group->ops || !table_group->ops->take_ownership ||
!table_group->ops->release_ownership) {
+ if (container->v2) {
+ ret = -EPERM;
+ goto unlock_exit;
+ }
ret = tce_iommu_take_ownership(container, table_group);
} else {
+ if (!container->v2) {
+ ret = -EPERM;
+ goto unlock_exit;
+ }
ret = tce_iommu_take_ownership_ddw(container, table_group);
if (!tce_groups_attached(container) && !container->tables[0])
container->def_window_pending = true;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 32d2633092a3..8549cb111627 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
return ret;
}
-struct vwork {
- struct mm_struct *mm;
- long npage;
- struct work_struct work;
-};
-
-/* delayed decrement/increment for locked_vm */
-static void vfio_lock_acct_bg(struct work_struct *work)
+static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
{
- struct vwork *vwork = container_of(work, struct vwork, work);
- struct mm_struct *mm;
-
- mm = vwork->mm;
- down_write(&mm->mmap_sem);
- mm->locked_vm += vwork->npage;
- up_write(&mm->mmap_sem);
- mmput(mm);
- kfree(vwork);
-}
-
-static void vfio_lock_acct(struct task_struct *task, long npage)
-{
- struct vwork *vwork;
struct mm_struct *mm;
bool is_current;
+ int ret;
if (!npage)
- return;
+ return 0;
is_current = (task->mm == current->mm);
mm = is_current ? task->mm : get_task_mm(task);
if (!mm)
- return; /* process exited */
+ return -ESRCH; /* process exited */
- if (down_write_trylock(&mm->mmap_sem)) {
- mm->locked_vm += npage;
- up_write(&mm->mmap_sem);
- if (!is_current)
- mmput(mm);
- return;
- }
+ ret = down_write_killable(&mm->mmap_sem);
+ if (!ret) {
+ if (npage > 0) {
+ if (lock_cap ? !*lock_cap :
+ !has_capability(task, CAP_IPC_LOCK)) {
+ unsigned long limit;
+
+ limit = task_rlimit(task,
+ RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (mm->locked_vm + npage > limit)
+ ret = -ENOMEM;
+ }
+ }
+
+ if (!ret)
+ mm->locked_vm += npage;
- if (is_current) {
- mm = get_task_mm(task);
- if (!mm)
- return;
+ up_write(&mm->mmap_sem);
}
- /*
- * Couldn't get mmap_sem lock, so must setup to update
- * mm->locked_vm later. If locked_vm were atomic, we
- * wouldn't need this silliness
- */
- vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
- if (WARN_ON(!vwork)) {
+ if (!is_current)
mmput(mm);
- return;
- }
- INIT_WORK(&vwork->work, vfio_lock_acct_bg);
- vwork->mm = mm;
- vwork->npage = npage;
- schedule_work(&vwork->work);
+
+ return ret;
}
/*
@@ -403,10 +380,10 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
* first page and all consecutive pages with the same locking.
*/
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
- long npage, unsigned long *pfn_base)
+ long npage, unsigned long *pfn_base,
+ bool lock_cap, unsigned long limit)
{
- unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- bool lock_cap = capable(CAP_IPC_LOCK);
+ unsigned long pfn = 0;
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
@@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
/* Lock all the consecutive pages from pfn_base */
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- unsigned long pfn = 0;
-
ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
if (ret)
break;
@@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
put_pfn(pfn, dma->prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
- break;
+ ret = -ENOMEM;
+ goto unpin_out;
}
lock_acct++;
}
}
out:
- vfio_lock_acct(current, lock_acct);
+ ret = vfio_lock_acct(current, lock_acct, &lock_cap);
+
+unpin_out:
+ if (ret) {
+ if (!rsvd) {
+ for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
+ put_pfn(pfn, dma->prot);
+ }
+
+ return ret;
+ }
return pinned;
}
@@ -488,7 +474,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
}
if (do_accounting)
- vfio_lock_acct(dma->task, locked - unlocked);
+ vfio_lock_acct(dma->task, locked - unlocked, NULL);
return unlocked;
}
@@ -496,37 +482,26 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
- unsigned long limit;
- bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
struct mm_struct *mm;
int ret;
- bool rsvd;
mm = get_task_mm(dma->task);
if (!mm)
return -ENODEV;
ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
- if (ret)
- goto pin_page_exit;
-
- rsvd = is_invalid_reserved_pfn(*pfn_base);
- limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
- if (!rsvd && !lock_cap && mm->locked_vm + 1 > limit) {
- put_pfn(*pfn_base, dma->prot);
- pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK (%ld) exceeded\n",
- __func__, dma->task->comm, task_pid_nr(dma->task),
- limit << PAGE_SHIFT);
- ret = -ENOMEM;
- goto pin_page_exit;
+ if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+ ret = vfio_lock_acct(dma->task, 1, NULL);
+ if (ret) {
+ put_pfn(*pfn_base, dma->prot);
+ if (ret == -ENOMEM)
+ pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
+ "(%ld) exceeded\n", __func__,
+ dma->task->comm, task_pid_nr(dma->task),
+ task_rlimit(dma->task, RLIMIT_MEMLOCK));
+ }
}
- if (!rsvd && do_accounting)
- vfio_lock_acct(dma->task, 1);
- ret = 1;
-
-pin_page_exit:
mmput(mm);
return ret;
}
@@ -543,7 +518,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
if (do_accounting)
- vfio_lock_acct(dma->task, -unlocked);
+ vfio_lock_acct(dma->task, -unlocked, NULL);
return unlocked;
}
@@ -606,10 +581,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
remote_vaddr = dma->vaddr + iova - dma->iova;
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
do_accounting);
- if (ret <= 0) {
- WARN_ON(!ret);
+ if (ret)
goto pin_unwind;
- }
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
if (ret) {
@@ -740,7 +713,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
dma->iommu_mapped = false;
if (do_accounting) {
- vfio_lock_acct(dma->task, -unlocked);
+ vfio_lock_acct(dma->task, -unlocked, NULL);
return 0;
}
return unlocked;
@@ -951,13 +924,15 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
unsigned long vaddr = dma->vaddr;
size_t size = map_size;
long npage;
- unsigned long pfn;
+ unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
int ret = 0;
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
- size >> PAGE_SHIFT, &pfn);
+ size >> PAGE_SHIFT, &pfn,
+ lock_cap, limit);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1067,6 +1042,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
{
struct vfio_domain *d;
struct rb_node *n;
+ unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ bool lock_cap = capable(CAP_IPC_LOCK);
int ret;
/* Arbitrarily pick the first domain in the list for lookups */
@@ -1113,7 +1090,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
npage = vfio_pin_pages_remote(dma, vaddr,
n >> PAGE_SHIFT,
- &pfn);
+ &pfn, lock_cap,
+ limit);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
@@ -1382,7 +1360,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
if (!is_invalid_reserved_pfn(vpfn->pfn))
locked++;
}
- vfio_lock_acct(dma->task, locked - unlocked);
+ vfio_lock_acct(dma->task, locked - unlocked, NULL);
}
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b519897cc17..f61f852d6cfd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -817,12 +817,9 @@ static int vhost_net_open(struct inode *inode, struct file *f)
struct vhost_virtqueue **vqs;
int i;
- n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
- if (!n) {
- n = vmalloc(sizeof *n);
- if (!n)
- return -ENOMEM;
- }
+ n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT);
+ if (!n)
+ return -ENOMEM;
vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kvfree(n);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f0ba362d4c10..042030e5a035 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -534,18 +534,9 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-static void *vhost_kvzalloc(unsigned long size)
-{
- void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
-
- if (!n)
- n = vzalloc(size);
- return n;
-}
-
struct vhost_umem *vhost_dev_reset_owner_prepare(void)
{
- return vhost_kvzalloc(sizeof(struct vhost_umem));
+ return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
@@ -1276,7 +1267,7 @@ EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
static struct vhost_umem *vhost_umem_alloc(void)
{
- struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
+ struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem)
return NULL;
@@ -1302,7 +1293,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EOPNOTSUPP;
if (mem.nregions > max_mem_regions)
return -E2BIG;
- newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
+ newmem = kvzalloc(size + mem.nregions * sizeof(*m->regions), GFP_KERNEL);
if (!newmem)
return -ENOMEM;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d939ac1a4997..3acef3c5d8ed 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -508,12 +508,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
/* This struct is large and allocation could fail, fall back to vmalloc
* if there is no other way.
*/
- vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
- if (!vsock) {
- vsock = vmalloc(sizeof(*vsock));
- if (!vsock)
- return -ENOMEM;
- }
+ vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT);
+ if (!vsock)
+ return -ENOMEM;
vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index ff2a5d2023e1..6b444400a86c 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -934,7 +934,7 @@ static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var)
}
/***************************************************************
- * Various intialisation functions *
+ * Various initialisation functions *
***************************************************************/
static void get_initial_mode(struct intelfb_info *dinfo)
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 1c1e95a0b8fa..ce4c4729a5e8 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -37,7 +37,7 @@
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/pci.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <linux/mmzone.h>
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 150ce2abf6c8..d3eca879a0a8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -243,11 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
/* Get the physical addresses of the source buffer */
- down_read(&current->mm->mmap_sem);
- num_pinned = get_user_pages(param.local_vaddr - lb_offset,
- num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
- pages, NULL);
- up_read(&current->mm->mmap_sem);
+ num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset,
+ num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE);
if (num_pinned != num_pages) {
/* get_user_pages() failed */
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 0035cf79760a..6a3ead42aba8 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -76,9 +76,16 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
}
}
-/*
+/**
+ * vme_free_consistent - Allocate contiguous memory.
+ * @resource: Pointer to VME resource.
+ * @size: Size of allocation required.
+ * @dma: Pointer to variable to store physical address of allocation.
+ *
* Allocate a contiguous block of memory for use by the driver. This is used to
* create the buffers for the slave windows.
+ *
+ * Return: Virtual address of allocation on success, NULL on failure.
*/
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
dma_addr_t *dma)
@@ -111,8 +118,14 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
}
EXPORT_SYMBOL(vme_alloc_consistent);
-/*
- * Free previously allocated contiguous block of memory.
+/**
+ * vme_free_consistent - Free previously allocated memory.
+ * @resource: Pointer to VME resource.
+ * @size: Size of allocation to free.
+ * @vaddr: Virtual address of allocation.
+ * @dma: Physical address of allocation.
+ *
+ * Free previously allocated block of contiguous memory.
*/
void vme_free_consistent(struct vme_resource *resource, size_t size,
void *vaddr, dma_addr_t dma)
@@ -145,6 +158,16 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
}
EXPORT_SYMBOL(vme_free_consistent);
+/**
+ * vme_get_size - Helper function returning size of a VME window
+ * @resource: Pointer to VME slave or master resource.
+ *
+ * Determine the size of the VME window provided. This is a helper
+ * function, wrappering the call to vme_master_get or vme_slave_get
+ * depending on the type of window resource handed to it.
+ *
+ * Return: Size of the window on success, zero on failure.
+ */
size_t vme_get_size(struct vme_resource *resource)
{
int enabled, retval;
@@ -259,9 +282,16 @@ static u32 vme_get_aspace(int am)
return 0;
}
-/*
- * Request a slave image with specific attributes, return some unique
- * identifier.
+/**
+ * vme_slave_request - Request a VME slave window resource.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @address: Required VME address space.
+ * @cycle: Required VME data transfer cycle type.
+ *
+ * Request use of a VME window resource capable of being set for the requested
+ * address space and data transfer cycle.
+ *
+ * Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
u32 cycle)
@@ -327,6 +357,23 @@ err_bus:
}
EXPORT_SYMBOL(vme_slave_request);
+/**
+ * vme_slave_set - Set VME slave window configuration.
+ * @resource: Pointer to VME slave resource.
+ * @enabled: State to which the window should be configured.
+ * @vme_base: Base address for the window.
+ * @size: Size of the VME window.
+ * @buf_base: Based address of buffer used to provide VME slave window storage.
+ * @aspace: VME address space for the VME window.
+ * @cycle: VME data transfer cycle type for the VME window.
+ *
+ * Set configuration for provided VME slave window.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device, if an invalid resource has been provided or invalid
+ * attributes are provided. Hardware specific errors may also be
+ * returned.
+ */
int vme_slave_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t buf_base, u32 aspace, u32 cycle)
@@ -362,6 +409,21 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
}
EXPORT_SYMBOL(vme_slave_set);
+/**
+ * vme_slave_get - Retrieve VME slave window configuration.
+ * @resource: Pointer to VME slave resource.
+ * @enabled: Pointer to variable for storing state.
+ * @vme_base: Pointer to variable for storing window base address.
+ * @size: Pointer to variable for storing window size.
+ * @buf_base: Pointer to variable for storing slave buffer base address.
+ * @aspace: Pointer to variable for storing VME address space.
+ * @cycle: Pointer to variable for storing VME data transfer cycle type.
+ *
+ * Return configuration for provided VME slave window.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device or if an invalid resource has been provided.
+ */
int vme_slave_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
@@ -386,6 +448,12 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
}
EXPORT_SYMBOL(vme_slave_get);
+/**
+ * vme_slave_free - Free VME slave window
+ * @resource: Pointer to VME slave resource.
+ *
+ * Free the provided slave resource so that it may be reallocated.
+ */
void vme_slave_free(struct vme_resource *resource)
{
struct vme_slave_resource *slave_image;
@@ -415,9 +483,17 @@ void vme_slave_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_slave_free);
-/*
- * Request a master image with specific attributes, return some unique
- * identifier.
+/**
+ * vme_master_request - Request a VME master window resource.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @address: Required VME address space.
+ * @cycle: Required VME data transfer cycle type.
+ * @dwidth: Required VME data transfer width.
+ *
+ * Request use of a VME window resource capable of being set for the requested
+ * address space, data transfer cycle and width.
+ *
+ * Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
u32 cycle, u32 dwidth)
@@ -486,6 +562,23 @@ err_bus:
}
EXPORT_SYMBOL(vme_master_request);
+/**
+ * vme_master_set - Set VME master window configuration.
+ * @resource: Pointer to VME master resource.
+ * @enabled: State to which the window should be configured.
+ * @vme_base: Base address for the window.
+ * @size: Size of the VME window.
+ * @aspace: VME address space for the VME window.
+ * @cycle: VME data transfer cycle type for the VME window.
+ * @dwidth: VME data transfer width for the VME window.
+ *
+ * Set configuration for provided VME master window.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device, if an invalid resource has been provided or invalid
+ * attributes are provided. Hardware specific errors may also be
+ * returned.
+ */
int vme_master_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size, u32 aspace,
u32 cycle, u32 dwidth)
@@ -522,6 +615,21 @@ int vme_master_set(struct vme_resource *resource, int enabled,
}
EXPORT_SYMBOL(vme_master_set);
+/**
+ * vme_master_get - Retrieve VME master window configuration.
+ * @resource: Pointer to VME master resource.
+ * @enabled: Pointer to variable for storing state.
+ * @vme_base: Pointer to variable for storing window base address.
+ * @size: Pointer to variable for storing window size.
+ * @aspace: Pointer to variable for storing VME address space.
+ * @cycle: Pointer to variable for storing VME data transfer cycle type.
+ * @dwidth: Pointer to variable for storing VME data transfer width.
+ *
+ * Return configuration for provided VME master window.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device or if an invalid resource has been provided.
+ */
int vme_master_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
u32 *cycle, u32 *dwidth)
@@ -546,8 +654,20 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
}
EXPORT_SYMBOL(vme_master_get);
-/*
- * Read data out of VME space into a buffer.
+/**
+ * vme_master_write - Read data from VME space into a buffer.
+ * @resource: Pointer to VME master resource.
+ * @buf: Pointer to buffer where data should be transferred.
+ * @count: Number of bytes to transfer.
+ * @offset: Offset into VME master window at which to start transfer.
+ *
+ * Perform read of count bytes of data from location on VME bus which maps into
+ * the VME master window at offset to buf.
+ *
+ * Return: Number of bytes read, -EINVAL if resource is not a VME master
+ * resource or read operation is not supported. -EFAULT returned if
+ * invalid offset is provided. Hardware specific errors may also be
+ * returned.
*/
ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
loff_t offset)
@@ -583,8 +703,20 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
}
EXPORT_SYMBOL(vme_master_read);
-/*
- * Write data out to VME space from a buffer.
+/**
+ * vme_master_write - Write data out to VME space from a buffer.
+ * @resource: Pointer to VME master resource.
+ * @buf: Pointer to buffer holding data to transfer.
+ * @count: Number of bytes to transfer.
+ * @offset: Offset into VME master window at which to start transfer.
+ *
+ * Perform write of count bytes of data from buf to location on VME bus which
+ * maps into the VME master window at offset.
+ *
+ * Return: Number of bytes written, -EINVAL if resource is not a VME master
+ * resource or write operation is not supported. -EFAULT returned if
+ * invalid offset is provided. Hardware specific errors may also be
+ * returned.
*/
ssize_t vme_master_write(struct vme_resource *resource, void *buf,
size_t count, loff_t offset)
@@ -619,8 +751,24 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
}
EXPORT_SYMBOL(vme_master_write);
-/*
- * Perform RMW cycle to provided location.
+/**
+ * vme_master_rmw - Perform read-modify-write cycle.
+ * @resource: Pointer to VME master resource.
+ * @mask: Bits to be compared and swapped in operation.
+ * @compare: Bits to be compared with data read from offset.
+ * @swap: Bits to be swapped in data read from offset.
+ * @offset: Offset into VME master window at which to perform operation.
+ *
+ * Perform read-modify-write cycle on provided location:
+ * - Location on VME bus is read.
+ * - Bits selected by mask are compared with compare.
+ * - Where a selected bit matches that in compare and are selected in swap,
+ * the bit is swapped.
+ * - Result written back to location on VME bus.
+ *
+ * Return: Bytes written on success, -EINVAL if resource is not a VME master
+ * resource or RMW operation is not supported. Hardware specific
+ * errors may also be returned.
*/
unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
unsigned int compare, unsigned int swap, loff_t offset)
@@ -644,6 +792,17 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
}
EXPORT_SYMBOL(vme_master_rmw);
+/**
+ * vme_master_mmap - Mmap region of VME master window.
+ * @resource: Pointer to VME master resource.
+ * @vma: Pointer to definition of user mapping.
+ *
+ * Memory map a region of the VME master window into user space.
+ *
+ * Return: Zero on success, -EINVAL if resource is not a VME master
+ * resource or -EFAULT if map exceeds window size. Other generic mmap
+ * errors may also be returned.
+ */
int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
{
struct vme_master_resource *image;
@@ -670,6 +829,12 @@ int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
}
EXPORT_SYMBOL(vme_master_mmap);
+/**
+ * vme_master_free - Free VME master window
+ * @resource: Pointer to VME master resource.
+ *
+ * Free the provided master resource so that it may be reallocated.
+ */
void vme_master_free(struct vme_resource *resource)
{
struct vme_master_resource *master_image;
@@ -699,9 +864,15 @@ void vme_master_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_master_free);
-/*
- * Request a DMA controller with specific attributes, return some unique
- * identifier.
+/**
+ * vme_dma_request - Request a DMA controller.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @route: Required src/destination combination.
+ *
+ * Request a VME DMA controller with capability to perform transfers bewteen
+ * requested source/destination combination.
+ *
+ * Return: Pointer to VME DMA resource on success, NULL on failure.
*/
struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
@@ -768,8 +939,15 @@ err_bus:
}
EXPORT_SYMBOL(vme_dma_request);
-/*
- * Start new list
+/**
+ * vme_new_dma_list - Create new VME DMA list.
+ * @resource: Pointer to VME DMA resource.
+ *
+ * Create a new VME DMA list. It is the responsibility of the user to free
+ * the list once it is no longer required with vme_dma_list_free().
+ *
+ * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
+ * VME DMA resource.
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
@@ -796,8 +974,16 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_new_dma_list);
-/*
- * Create "Pattern" type attributes
+/**
+ * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
+ * @pattern: Value to use used as pattern
+ * @type: Type of pattern to be written.
+ *
+ * Create VME DMA list attribute for pattern generation. It is the
+ * responsibility of the user to free used attributes using
+ * vme_dma_free_attribute().
+ *
+ * Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
{
@@ -831,8 +1017,15 @@ err_attr:
}
EXPORT_SYMBOL(vme_dma_pattern_attribute);
-/*
- * Create "PCI" type attributes
+/**
+ * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
+ * @address: PCI base address for DMA transfer.
+ *
+ * Create VME DMA list attribute pointing to a location on PCI for DMA
+ * transfers. It is the responsibility of the user to free used attributes
+ * using vme_dma_free_attribute().
+ *
+ * Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
{
@@ -869,8 +1062,18 @@ err_attr:
}
EXPORT_SYMBOL(vme_dma_pci_attribute);
-/*
- * Create "VME" type attributes
+/**
+ * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
+ * @address: VME base address for DMA transfer.
+ * @aspace: VME address space to use for DMA transfer.
+ * @cycle: VME bus cycle to use for DMA transfer.
+ * @dwidth: VME data width to use for DMA transfer.
+ *
+ * Create VME DMA list attribute pointing to a location on the VME bus for DMA
+ * transfers. It is the responsibility of the user to free used attributes
+ * using vme_dma_free_attribute().
+ *
+ * Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
u32 aspace, u32 cycle, u32 dwidth)
@@ -908,8 +1111,12 @@ err_attr:
}
EXPORT_SYMBOL(vme_dma_vme_attribute);
-/*
- * Free attribute
+/**
+ * vme_dma_free_attribute - Free DMA list attribute.
+ * @attributes: Pointer to DMA list attribute.
+ *
+ * Free VME DMA list attribute. VME DMA list attributes can be safely freed
+ * once vme_dma_list_add() has returned.
*/
void vme_dma_free_attribute(struct vme_dma_attr *attributes)
{
@@ -918,6 +1125,23 @@ void vme_dma_free_attribute(struct vme_dma_attr *attributes)
}
EXPORT_SYMBOL(vme_dma_free_attribute);
+/**
+ * vme_dma_list_add - Add enty to a VME DMA list.
+ * @list: Pointer to VME list.
+ * @src: Pointer to DMA list attribute to use as source.
+ * @dest: Pointer to DMA list attribute to use as destination.
+ * @count: Number of bytes to transfer.
+ *
+ * Add an entry to the provided VME DMA list. Entry requires pointers to source
+ * and destination DMA attributes and a count.
+ *
+ * Please note, the attributes supported as source and destinations for
+ * transfers are hardware dependent.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device or if the link list has already been submitted for execution.
+ * Hardware specific errors also possible.
+ */
int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_attr *dest, size_t count)
{
@@ -942,6 +1166,16 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
}
EXPORT_SYMBOL(vme_dma_list_add);
+/**
+ * vme_dma_list_exec - Queue a VME DMA list for execution.
+ * @list: Pointer to VME list.
+ *
+ * Queue the provided VME DMA list for execution. The call will return once the
+ * list has been executed.
+ *
+ * Return: Zero on success, -EINVAL if operation is not supported on this
+ * device. Hardware specific errors also possible.
+ */
int vme_dma_list_exec(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
@@ -962,6 +1196,15 @@ int vme_dma_list_exec(struct vme_dma_list *list)
}
EXPORT_SYMBOL(vme_dma_list_exec);
+/**
+ * vme_dma_list_free - Free a VME DMA list.
+ * @list: Pointer to VME list.
+ *
+ * Free the provided DMA list and all its entries.
+ *
+ * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
+ * is still in use. Hardware specific errors also possible.
+ */
int vme_dma_list_free(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
@@ -994,6 +1237,15 @@ int vme_dma_list_free(struct vme_dma_list *list)
}
EXPORT_SYMBOL(vme_dma_list_free);
+/**
+ * vme_dma_free - Free a VME DMA resource.
+ * @resource: Pointer to VME DMA resource.
+ *
+ * Free the provided DMA resource so that it may be reallocated.
+ *
+ * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
+ * is still active.
+ */
int vme_dma_free(struct vme_resource *resource)
{
struct vme_dma_resource *ctrlr;
@@ -1099,6 +1351,22 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
}
EXPORT_SYMBOL(vme_irq_handler);
+/**
+ * vme_irq_request - Request a specific VME interrupt.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @level: Interrupt priority being requested.
+ * @statid: Interrupt vector being requested.
+ * @callback: Pointer to callback function called when VME interrupt/vector
+ * received.
+ * @priv_data: Generic pointer that will be passed to the callback function.
+ *
+ * Request callback to be attached as a handler for VME interrupts with provided
+ * level and statid.
+ *
+ * Return: Zero on success, -EINVAL on invalid vme device, level or if the
+ * function is not supported, -EBUSY if the level/statid combination is
+ * already in use. Hardware specific errors also possible.
+ */
int vme_irq_request(struct vme_dev *vdev, int level, int statid,
void (*callback)(int, int, void *),
void *priv_data)
@@ -1142,6 +1410,14 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
}
EXPORT_SYMBOL(vme_irq_request);
+/**
+ * vme_irq_free - Free a VME interrupt.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @level: Interrupt priority of interrupt being freed.
+ * @statid: Interrupt vector of interrupt being freed.
+ *
+ * Remove previously attached callback from VME interrupt priority/vector.
+ */
void vme_irq_free(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
@@ -1177,6 +1453,18 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
}
EXPORT_SYMBOL(vme_irq_free);
+/**
+ * vme_irq_generate - Generate VME interrupt.
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ * @level: Interrupt priority at which to assert the interrupt.
+ * @statid: Interrupt vector to associate with the interrupt.
+ *
+ * Generate a VME interrupt of the provided level and with the provided
+ * statid.
+ *
+ * Return: Zero on success, -EINVAL on invalid vme device, level or if the
+ * function is not supported. Hardware specific errors also possible.
+ */
int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
{
struct vme_bridge *bridge;
@@ -1201,8 +1489,15 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
}
EXPORT_SYMBOL(vme_irq_generate);
-/*
- * Request the location monitor, return resource or NULL
+/**
+ * vme_lm_request - Request a VME location monitor
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ *
+ * Allocate a location monitor resource to the driver. A location monitor
+ * allows the driver to monitor accesses to a contiguous number of
+ * addresses on the VME bus.
+ *
+ * Return: Pointer to a VME resource on success or NULL on failure.
*/
struct vme_resource *vme_lm_request(struct vme_dev *vdev)
{
@@ -1218,7 +1513,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
goto err_bus;
}
- /* Loop through DMA resources */
+ /* Loop through LM resources */
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
@@ -1264,6 +1559,17 @@ err_bus:
}
EXPORT_SYMBOL(vme_lm_request);
+/**
+ * vme_lm_count - Determine number of VME Addresses monitored
+ * @resource: Pointer to VME location monitor resource.
+ *
+ * The number of contiguous addresses monitored is hardware dependent.
+ * Return the number of contiguous addresses monitored by the
+ * location monitor.
+ *
+ * Return: Count of addresses monitored or -EINVAL when provided with an
+ * invalid location monitor resource.
+ */
int vme_lm_count(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
@@ -1279,6 +1585,20 @@ int vme_lm_count(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_lm_count);
+/**
+ * vme_lm_set - Configure location monitor
+ * @resource: Pointer to VME location monitor resource.
+ * @lm_base: Base address to monitor.
+ * @aspace: VME address space to monitor.
+ * @cycle: VME bus cycle type to monitor.
+ *
+ * Set the base address, address space and cycle type of accesses to be
+ * monitored by the location monitor.
+ *
+ * Return: Zero on success, -EINVAL when provided with an invalid location
+ * monitor resource or function is not supported. Hardware specific
+ * errors may also be returned.
+ */
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
u32 aspace, u32 cycle)
{
@@ -1301,6 +1621,20 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
}
EXPORT_SYMBOL(vme_lm_set);
+/**
+ * vme_lm_get - Retrieve location monitor settings
+ * @resource: Pointer to VME location monitor resource.
+ * @lm_base: Pointer used to output the base address monitored.
+ * @aspace: Pointer used to output the address space monitored.
+ * @cycle: Pointer used to output the VME bus cycle type monitored.
+ *
+ * Retrieve the base address, address space and cycle type of accesses to
+ * be monitored by the location monitor.
+ *
+ * Return: Zero on success, -EINVAL when provided with an invalid location
+ * monitor resource or function is not supported. Hardware specific
+ * errors may also be returned.
+ */
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
u32 *aspace, u32 *cycle)
{
@@ -1323,6 +1657,21 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
}
EXPORT_SYMBOL(vme_lm_get);
+/**
+ * vme_lm_attach - Provide callback for location monitor address
+ * @resource: Pointer to VME location monitor resource.
+ * @monitor: Offset to which callback should be attached.
+ * @callback: Pointer to callback function called when triggered.
+ * @data: Generic pointer that will be passed to the callback function.
+ *
+ * Attach a callback to the specificed offset into the location monitors
+ * monitored addresses. A generic pointer is provided to allow data to be
+ * passed to the callback when called.
+ *
+ * Return: Zero on success, -EINVAL when provided with an invalid location
+ * monitor resource or function is not supported. Hardware specific
+ * errors may also be returned.
+ */
int vme_lm_attach(struct vme_resource *resource, int monitor,
void (*callback)(void *), void *data)
{
@@ -1345,6 +1694,18 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
}
EXPORT_SYMBOL(vme_lm_attach);
+/**
+ * vme_lm_detach - Remove callback for location monitor address
+ * @resource: Pointer to VME location monitor resource.
+ * @monitor: Offset to which callback should be removed.
+ *
+ * Remove the callback associated with the specificed offset into the
+ * location monitors monitored addresses.
+ *
+ * Return: Zero on success, -EINVAL when provided with an invalid location
+ * monitor resource or function is not supported. Hardware specific
+ * errors may also be returned.
+ */
int vme_lm_detach(struct vme_resource *resource, int monitor)
{
struct vme_bridge *bridge = find_bridge(resource);
@@ -1366,6 +1727,18 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
}
EXPORT_SYMBOL(vme_lm_detach);
+/**
+ * vme_lm_free - Free allocated VME location monitor
+ * @resource: Pointer to VME location monitor resource.
+ *
+ * Free allocation of a VME location monitor.
+ *
+ * WARNING: This function currently expects that any callbacks that have
+ * been attached to the location monitor have been removed.
+ *
+ * Return: Zero on success, -EINVAL when provided with an invalid location
+ * monitor resource.
+ */
void vme_lm_free(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
@@ -1392,6 +1765,16 @@ void vme_lm_free(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_lm_free);
+/**
+ * vme_slot_num - Retrieve slot ID
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ *
+ * Retrieve the slot ID associated with the provided VME device.
+ *
+ * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
+ * or the function is not supported. Hardware specific errors may also
+ * be returned.
+ */
int vme_slot_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
@@ -1411,6 +1794,15 @@ int vme_slot_num(struct vme_dev *vdev)
}
EXPORT_SYMBOL(vme_slot_num);
+/**
+ * vme_bus_num - Retrieve bus number
+ * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
+ *
+ * Retrieve the bus enumeration associated with the provided VME device.
+ *
+ * Return: The bus number on success, -EINVAL if VME bridge cannot be
+ * determined.
+ */
int vme_bus_num(struct vme_dev *vdev)
{
struct vme_bridge *bridge;
@@ -1556,6 +1948,15 @@ static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
return err;
}
+/**
+ * vme_register_driver - Register a VME driver
+ * @drv: Pointer to VME driver structure to register.
+ * @ndevs: Maximum number of devices to allow to be enumerated.
+ *
+ * Register a VME device driver with the VME subsystem.
+ *
+ * Return: Zero on success, error value on registration failure.
+ */
int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
{
int err;
@@ -1576,6 +1977,12 @@ int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
}
EXPORT_SYMBOL(vme_register_driver);
+/**
+ * vme_unregister_driver - Unregister a VME driver
+ * @drv: Pointer to VME driver structure to unregister.
+ *
+ * Unregister a VME device driver from the VME subsystem.
+ */
void vme_unregister_driver(struct vme_driver *drv)
{
struct vme_dev *dev, *dev_tmp;
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 3749db8b4396..97a676bf5989 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -36,7 +36,6 @@
#include "../w1.h"
#include "../w1_int.h"
-#include "../w1_log.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
@@ -157,9 +156,6 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
struct matrox_device *dev;
int err;
- assert(pdev != NULL);
- assert(ent != NULL);
-
if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
return -ENODEV;
@@ -224,8 +220,6 @@ static void matrox_w1_remove(struct pci_dev *pdev)
{
struct matrox_device *dev = pci_get_drvdata(pdev);
- assert(dev != NULL);
-
if (dev->found) {
w1_remove_master_device(dev->bus_master);
iounmap(dev->virt_addr);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 0ef9f2663dbd..fb68465908f2 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -86,6 +86,12 @@ config W1_SLAVE_DS2433_CRC
Each block has 30 bytes of data and a two byte CRC16.
Full block writes are only allowed if the CRC is valid.
+config W1_SLAVE_DS2438
+ tristate "DS2438 Smart Battery Monitor 0x26 family support"
+ help
+ Say Y here if you want to use a 1-wire
+ DS2438 Smart Battery Monitor device support
+
config W1_SLAVE_DS2760
tristate "Dallas 2760 battery monitor chip (HP iPAQ & others)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index b4a358955ef9..54c63e420302 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
+obj-$(CONFIG_W1_SLAVE_DS2438) += w1_ds2438.o
obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
new file mode 100644
index 000000000000..5ededb4965e1
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -0,0 +1,390 @@
+/*
+ * 1-Wire implementation for the ds2438 chip
+ *
+ * Copyright (c) 2017 Mariusz Bialonczyk <manio@skyboo.net>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include "../w1.h"
+#include "../w1_family.h"
+
+#define W1_DS2438_RETRIES 3
+
+/* Memory commands */
+#define W1_DS2438_READ_SCRATCH 0xBE
+#define W1_DS2438_WRITE_SCRATCH 0x4E
+#define W1_DS2438_COPY_SCRATCH 0x48
+#define W1_DS2438_RECALL_MEMORY 0xB8
+/* Register commands */
+#define W1_DS2438_CONVERT_TEMP 0x44
+#define W1_DS2438_CONVERT_VOLTAGE 0xB4
+
+#define DS2438_PAGE_SIZE 8
+#define DS2438_ADC_INPUT_VAD 0
+#define DS2438_ADC_INPUT_VDD 1
+#define DS2438_MAX_CONVERSION_TIME 10 /* ms */
+
+/* Page #0 definitions */
+#define DS2438_STATUS_REG 0x00 /* Status/Configuration Register */
+#define DS2438_STATUS_IAD (1 << 0) /* Current A/D Control Bit */
+#define DS2438_STATUS_CA (1 << 1) /* Current Accumulator Configuration */
+#define DS2438_STATUS_EE (1 << 2) /* Current Accumulator Shadow Selector bit */
+#define DS2438_STATUS_AD (1 << 3) /* Voltage A/D Input Select Bit */
+#define DS2438_STATUS_TB (1 << 4) /* Temperature Busy Flag */
+#define DS2438_STATUS_NVB (1 << 5) /* Nonvolatile Memory Busy Flag */
+#define DS2438_STATUS_ADB (1 << 6) /* A/D Converter Busy Flag */
+
+#define DS2438_TEMP_LSB 0x01
+#define DS2438_TEMP_MSB 0x02
+#define DS2438_VOLTAGE_LSB 0x03
+#define DS2438_VOLTAGE_MSB 0x04
+#define DS2438_CURRENT_LSB 0x05
+#define DS2438_CURRENT_MSB 0x06
+#define DS2438_THRESHOLD 0x07
+
+int w1_ds2438_get_page(struct w1_slave *sl, int pageno, u8 *buf)
+{
+ unsigned int retries = W1_DS2438_RETRIES;
+ u8 w1_buf[2];
+ u8 crc;
+ size_t count;
+
+ while (retries--) {
+ crc = 0;
+
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_RECALL_MEMORY;
+ w1_buf[1] = 0x00;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_READ_SCRATCH;
+ w1_buf[1] = 0x00;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ count = w1_read_block(sl->master, buf, DS2438_PAGE_SIZE + 1);
+ if (count == DS2438_PAGE_SIZE + 1) {
+ crc = w1_calc_crc8(buf, DS2438_PAGE_SIZE);
+
+ /* check for correct CRC */
+ if ((u8)buf[DS2438_PAGE_SIZE] == crc)
+ return 0;
+ }
+ }
+ return -1;
+}
+
+int w1_ds2438_get_temperature(struct w1_slave *sl, int16_t *temperature)
+{
+ unsigned int retries = W1_DS2438_RETRIES;
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+ unsigned int tm = DS2438_MAX_CONVERSION_TIME;
+ unsigned long sleep_rem;
+ int ret;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ while (retries--) {
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_write_8(sl->master, W1_DS2438_CONVERT_TEMP);
+
+ mutex_unlock(&sl->master->bus_mutex);
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ ret = -1;
+ goto post_unlock;
+ }
+
+ if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
+ ret = -1;
+ goto post_unlock;
+ }
+
+ break;
+ }
+
+ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
+ *temperature = (((int16_t) w1_buf[DS2438_TEMP_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_TEMP_LSB]);
+ ret = 0;
+ } else
+ ret = -1;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+post_unlock:
+ return ret;
+}
+
+int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
+{
+ unsigned int retries = W1_DS2438_RETRIES;
+ u8 w1_buf[3];
+ u8 status;
+ int perform_write = 0;
+
+ while (retries--) {
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_RECALL_MEMORY;
+ w1_buf[1] = 0x00;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_READ_SCRATCH;
+ w1_buf[1] = 0x00;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* reading one byte of result */
+ status = w1_read_8(sl->master);
+
+ /* if bit0=1, set a value to a mask for easy compare */
+ if (value)
+ value = mask;
+
+ if ((status & mask) == value)
+ return 0; /* already set as requested */
+ else {
+ /* changing bit */
+ status ^= mask;
+ perform_write = 1;
+ }
+ break;
+ }
+
+ if (perform_write) {
+ retries = W1_DS2438_RETRIES;
+ while (retries--) {
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_WRITE_SCRATCH;
+ w1_buf[1] = 0x00;
+ w1_buf[2] = status;
+ w1_write_block(sl->master, w1_buf, 3);
+
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_buf[0] = W1_DS2438_COPY_SCRATCH;
+ w1_buf[1] = 0x00;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ return 0;
+ }
+ }
+ return -1;
+}
+
+uint16_t w1_ds2438_get_voltage(struct w1_slave *sl, int adc_input, uint16_t *voltage)
+{
+ unsigned int retries = W1_DS2438_RETRIES;
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+ unsigned int tm = DS2438_MAX_CONVERSION_TIME;
+ unsigned long sleep_rem;
+ int ret;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_AD, adc_input)) {
+ ret = -1;
+ goto pre_unlock;
+ }
+
+ while (retries--) {
+ if (w1_reset_select_slave(sl))
+ continue;
+ w1_write_8(sl->master, W1_DS2438_CONVERT_VOLTAGE);
+
+ mutex_unlock(&sl->master->bus_mutex);
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ ret = -1;
+ goto post_unlock;
+ }
+
+ if (mutex_lock_interruptible(&sl->master->bus_mutex) != 0) {
+ ret = -1;
+ goto post_unlock;
+ }
+
+ break;
+ }
+
+ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
+ *voltage = (((uint16_t) w1_buf[DS2438_VOLTAGE_MSB]) << 8) | ((uint16_t) w1_buf[DS2438_VOLTAGE_LSB]);
+ ret = 0;
+ } else
+ ret = -1;
+
+pre_unlock:
+ mutex_unlock(&sl->master->bus_mutex);
+
+post_unlock:
+ return ret;
+}
+
+static ssize_t iad_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+
+ if (count != 1 || off != 0)
+ return -EFAULT;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_change_config_bit(sl, DS2438_STATUS_IAD, *buf & 0x01) == 0)
+ ret = 1;
+ else
+ ret = -EIO;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static ssize_t page0_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
+ memcpy(buf, &w1_buf, DS2438_PAGE_SIZE);
+ ret = DS2438_PAGE_SIZE;
+ } else
+ ret = -EIO;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ ssize_t c = PAGE_SIZE;
+ int16_t temp;
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ if (w1_ds2438_get_temperature(sl, &temp) == 0) {
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp);
+ ret = PAGE_SIZE - c;
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t vad_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ ssize_t c = PAGE_SIZE;
+ uint16_t voltage;
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
+ ret = PAGE_SIZE - c;
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ ssize_t c = PAGE_SIZE;
+ uint16_t voltage;
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
+ c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
+ ret = PAGE_SIZE - c;
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
+static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1);
+static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
+static BIN_ATTR_RO(temperature, 0/* real length varies */);
+static BIN_ATTR_RO(vad, 0/* real length varies */);
+static BIN_ATTR_RO(vdd, 0/* real length varies */);
+
+static struct bin_attribute *w1_ds2438_bin_attrs[] = {
+ &bin_attr_iad,
+ &bin_attr_page0,
+ &bin_attr_temperature,
+ &bin_attr_vad,
+ &bin_attr_vdd,
+ NULL,
+};
+
+static const struct attribute_group w1_ds2438_group = {
+ .bin_attrs = w1_ds2438_bin_attrs,
+};
+
+static const struct attribute_group *w1_ds2438_groups[] = {
+ &w1_ds2438_group,
+ NULL,
+};
+
+static struct w1_family_ops w1_ds2438_fops = {
+ .groups = w1_ds2438_groups,
+};
+
+static struct w1_family w1_ds2438_family = {
+ .fid = W1_FAMILY_DS2438,
+ .fops = &w1_ds2438_fops,
+};
+module_w1_family(w1_ds2438_family);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mariusz Bialonczyk <manio@skyboo.net>");
+MODULE_DESCRIPTION("1-wire driver for Maxim/Dallas DS2438 Smart Battery Monitor");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2438));
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h
index 58e774141568..24168c94eeae 100644
--- a/drivers/w1/slaves/w1_ds2760.h
+++ b/drivers/w1/slaves/w1_ds2760.h
@@ -24,11 +24,13 @@
#define DS2760_DATA_SIZE 0x40
#define DS2760_PROTECTION_REG 0x00
+
#define DS2760_STATUS_REG 0x01
- #define DS2760_STATUS_IE (1 << 2)
- #define DS2760_STATUS_SWEN (1 << 3)
- #define DS2760_STATUS_RNAOP (1 << 4)
- #define DS2760_STATUS_PMOD (1 << 5)
+#define DS2760_STATUS_IE (1 << 2)
+#define DS2760_STATUS_SWEN (1 << 3)
+#define DS2760_STATUS_RNAOP (1 << 4)
+#define DS2760_STATUS_PMOD (1 << 5)
+
#define DS2760_EEPROM_REG 0x07
#define DS2760_SPECIAL_FEATURE_REG 0x08
#define DS2760_VOLTAGE_MSB 0x0c
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 90a3d9338fd2..8511d1685db9 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -29,7 +29,6 @@
#include <linux/atomic.h>
#include "w1.h"
-#include "w1_log.h"
#include "w1_int.h"
#include "w1_family.h"
#include "w1_netlink.h"
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index c4a6b257a367..869a3ff87d29 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -29,6 +29,7 @@
#define W1_COUNTER_DS2423 0x1D
#define W1_THERM_DS1822 0x22
#define W1_EEPROM_DS2433 0x23
+#define W1_FAMILY_DS2438 0x26
#define W1_THERM_DS18B20 0x28
#define W1_FAMILY_DS2408 0x29
#define W1_EEPROM_DS2431 0x2D
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 2cae7b29bb5f..1072a2e620bb 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -22,7 +22,6 @@
#include <linux/moduleparam.h>
#include "w1.h"
-#include "w1_log.h"
#include "w1_netlink.h"
#include "w1_int.h"
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index de8bebc27896..1134e6b1eb02 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include "w1.h"
-#include "w1_log.h"
static int w1_delay_parm = 1;
module_param_named(delay_coef, w1_delay_parm, int, 0);
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h
deleted file mode 100644
index dd1422b6afbb..000000000000
--- a/drivers/w1/w1_log.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2004 Evgeniy Polyakov <zbr@ioremap.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __W1_LOG_H
-#define __W1_LOG_H
-
-#define DEBUG
-
-#ifdef W1_DEBUG
-# define assert(expr) do {} while (0)
-#else
-# define assert(expr) \
- if(unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#endif
-
-#endif /* __W1_LOG_H */
-
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 49e520ca79c5..027950f997d1 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -18,13 +18,10 @@
#include <linux/connector.h>
#include "w1.h"
-#include "w1_log.h"
#include "w1_netlink.h"
#if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-
/* Bundle together everything required to process a request in one memory
* allocation.
*/
@@ -598,7 +595,7 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
sizeof(struct w1_netlink_msg) +
sizeof(struct w1_netlink_cmd));
}
- reply_size = MIN(CONNECTOR_MAX_MSG_SIZE, reply_size);
+ reply_size = min(CONNECTOR_MAX_MSG_SIZE, reply_size);
/* allocate space for the block, a copy of the original message,
* one node per cmd to point into the original message,
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 70c7194e2810..67fbe35ce7cf 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -34,7 +34,7 @@
#include <linux/nmi.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
#include <asm/frame.h>
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3d0abc0d59b4..347f0389b089 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -106,6 +106,10 @@ struct iTCO_wdt_private {
struct pci_dev *pci_dev;
/* whether or not the watchdog has been suspended */
bool suspended;
+ /* no reboot API private data */
+ void *no_reboot_priv;
+ /* no reboot update function pointer */
+ int (*update_no_reboot_bit)(void *p, bool set);
};
/* module parameters */
@@ -170,46 +174,68 @@ static inline u32 no_reboot_bit(struct iTCO_wdt_private *p)
return enable_bit;
}
-static void iTCO_wdt_set_NO_REBOOT_bit(struct iTCO_wdt_private *p)
+static int update_no_reboot_bit_def(void *priv, bool set)
{
- u32 val32;
+ return 0;
+}
- /* Set the NO_REBOOT bit: this disables reboots */
- if (p->iTCO_version >= 2) {
- val32 = readl(p->gcs_pmc);
- val32 |= no_reboot_bit(p);
- writel(val32, p->gcs_pmc);
- } else if (p->iTCO_version == 1) {
- pci_read_config_dword(p->pci_dev, 0xd4, &val32);
+static int update_no_reboot_bit_pci(void *priv, bool set)
+{
+ struct iTCO_wdt_private *p = priv;
+ u32 val32 = 0, newval32 = 0;
+
+ pci_read_config_dword(p->pci_dev, 0xd4, &val32);
+ if (set)
val32 |= no_reboot_bit(p);
- pci_write_config_dword(p->pci_dev, 0xd4, val32);
- }
+ else
+ val32 &= ~no_reboot_bit(p);
+ pci_write_config_dword(p->pci_dev, 0xd4, val32);
+ pci_read_config_dword(p->pci_dev, 0xd4, &newval32);
+
+ /* make sure the update is successful */
+ if (val32 != newval32)
+ return -EIO;
+
+ return 0;
}
-static int iTCO_wdt_unset_NO_REBOOT_bit(struct iTCO_wdt_private *p)
+static int update_no_reboot_bit_mem(void *priv, bool set)
{
- u32 enable_bit = no_reboot_bit(p);
- u32 val32 = 0;
+ struct iTCO_wdt_private *p = priv;
+ u32 val32 = 0, newval32 = 0;
- /* Unset the NO_REBOOT bit: this enables reboots */
- if (p->iTCO_version >= 2) {
- val32 = readl(p->gcs_pmc);
- val32 &= ~enable_bit;
- writel(val32, p->gcs_pmc);
+ val32 = readl(p->gcs_pmc);
+ if (set)
+ val32 |= no_reboot_bit(p);
+ else
+ val32 &= ~no_reboot_bit(p);
+ writel(val32, p->gcs_pmc);
+ newval32 = readl(p->gcs_pmc);
- val32 = readl(p->gcs_pmc);
- } else if (p->iTCO_version == 1) {
- pci_read_config_dword(p->pci_dev, 0xd4, &val32);
- val32 &= ~enable_bit;
- pci_write_config_dword(p->pci_dev, 0xd4, val32);
+ /* make sure the update is successful */
+ if (val32 != newval32)
+ return -EIO;
- pci_read_config_dword(p->pci_dev, 0xd4, &val32);
+ return 0;
+}
+
+static void iTCO_wdt_no_reboot_bit_setup(struct iTCO_wdt_private *p,
+ struct itco_wdt_platform_data *pdata)
+{
+ if (pdata->update_no_reboot_bit) {
+ p->update_no_reboot_bit = pdata->update_no_reboot_bit;
+ p->no_reboot_priv = pdata->no_reboot_priv;
+ return;
}
- if (val32 & enable_bit)
- return -EIO;
+ if (p->iTCO_version >= 2)
+ p->update_no_reboot_bit = update_no_reboot_bit_mem;
+ else if (p->iTCO_version == 1)
+ p->update_no_reboot_bit = update_no_reboot_bit_pci;
+ else
+ p->update_no_reboot_bit = update_no_reboot_bit_def;
- return 0;
+ p->no_reboot_priv = p;
}
static int iTCO_wdt_start(struct watchdog_device *wd_dev)
@@ -222,7 +248,7 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
- if (iTCO_wdt_unset_NO_REBOOT_bit(p)) {
+ if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
spin_unlock(&p->io_lock);
pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
@@ -263,7 +289,7 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
val = inw(TCO1_CNT(p));
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
- iTCO_wdt_set_NO_REBOOT_bit(p);
+ p->update_no_reboot_bit(p->no_reboot_priv, true);
spin_unlock(&p->io_lock);
@@ -428,11 +454,13 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
p->iTCO_version = pdata->version;
p->pci_dev = to_pci_dev(dev->parent);
+ iTCO_wdt_no_reboot_bit_setup(p, pdata);
+
/*
* Get the Memory-Mapped GCS or PMC register, we need it for the
* NO_REBOOT flag (TCO v2 and v3).
*/
- if (p->iTCO_version >= 2) {
+ if (p->iTCO_version >= 2 && !pdata->update_no_reboot_bit) {
p->gcs_pmc_res = platform_get_resource(pdev,
IORESOURCE_MEM,
ICH_RES_MEM_GCS_PMC);
@@ -442,14 +470,14 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
}
/* Check chipset's NO_REBOOT bit */
- if (iTCO_wdt_unset_NO_REBOOT_bit(p) &&
+ if (p->update_no_reboot_bit(p->no_reboot_priv, false) &&
iTCO_vendor_check_noreboot_on()) {
pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
return -ENODEV; /* Cannot reset NO_REBOOT bit */
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
- iTCO_wdt_set_NO_REBOOT_bit(p);
+ p->update_no_reboot_bit(p->no_reboot_priv, true);
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
if (!devm_request_region(dev, p->smi_res->start,
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a6d4378eb8d9..50dcb68d8070 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -709,6 +709,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
+#ifdef CONFIG_XEN_PV
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@@ -732,19 +733,22 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
+#endif
static int __init balloon_init(void)
{
- int i;
-
if (!xen_domain())
return -ENODEV;
pr_info("Initialising balloon driver\n");
+#ifdef CONFIG_XEN_PV
balloon_stats.current_pages = xen_pv_domain()
? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
: get_num_physpages();
+#else
+ balloon_stats.current_pages = get_num_physpages();
+#endif
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -761,14 +765,20 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
- /*
- * Initialize the balloon with pages from the extra memory
- * regions (see arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
- if (xen_extra_mem[i].n_pfns)
- balloon_add_region(xen_extra_mem[i].start_pfn,
- xen_extra_mem[i].n_pfns);
+#ifdef CONFIG_XEN_PV
+ {
+ int i;
+
+ /*
+ * Initialize the balloon with pages from the extra memory
+ * regions (see arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
+ if (xen_extra_mem[i].n_pfns)
+ balloon_add_region(xen_extra_mem[i].start_pfn,
+ xen_extra_mem[i].n_pfns);
+ }
+#endif
return 0;
}
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index 22f71ffd3406..9243a9051078 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -26,6 +26,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/platform.h>
#include <xen/xen.h>
+#include <xen/xen-ops.h>
#include <asm/page.h>
@@ -263,3 +264,20 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
return efi_data(op).status;
}
EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
+
+void xen_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
+{
+ switch (reset_type) {
+ case EFI_RESET_COLD:
+ case EFI_RESET_WARM:
+ xen_reboot(SHUTDOWN_reboot);
+ break;
+ case EFI_RESET_SHUTDOWN:
+ xen_reboot(SHUTDOWN_poweroff);
+ break;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL_GPL(xen_efi_reset_system);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6a53577772c9..b52852f38cff 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1312,6 +1312,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (!VALID_EVTCHN(evtchn))
return -1;
+ if (!xen_support_evtchn_rebind())
+ return -1;
+
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
@@ -1646,14 +1649,20 @@ void xen_callback_vector(void)
int rc;
uint64_t callback_via;
- callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
- rc = xen_set_callback_via(callback_via);
- BUG_ON(rc);
- pr_info("Xen HVM callback vector for event delivery is enabled\n");
- /* in the restore case the vector has already been allocated */
- if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
- xen_hvm_callback_vector);
+ if (xen_have_vector_callback) {
+ callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+ rc = xen_set_callback_via(callback_via);
+ if (rc) {
+ pr_err("Request for Xen HVM callback vector failed\n");
+ xen_have_vector_callback = 0;
+ return;
+ }
+ pr_info("Xen HVM callback vector for event delivery is enabled\n");
+ /* in the restore case the vector has already been allocated */
+ if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+ xen_hvm_callback_vector);
+ }
}
#else
void xen_callback_vector(void) {}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 6890897a6f30..10f1ef582659 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -87,18 +87,6 @@ struct user_evtchn {
bool enabled;
};
-static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
-{
- evtchn_port_t *ring;
- size_t s = size * sizeof(*ring);
-
- ring = kmalloc(s, GFP_KERNEL);
- if (!ring)
- ring = vmalloc(s);
-
- return ring;
-}
-
static void evtchn_free_ring(evtchn_port_t *ring)
{
kvfree(ring);
@@ -334,7 +322,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
else
new_size = 2 * u->ring_size;
- new_ring = evtchn_alloc_ring(new_size);
+ new_ring = kvmalloc(new_size * sizeof(*new_ring), GFP_KERNEL);
if (!new_ring)
return -ENOMEM;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 2a165cc8a43c..1275df83070f 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -90,8 +90,10 @@ static int xen_allocate_irq(struct pci_dev *pdev)
static int platform_pci_resume(struct pci_dev *pdev)
{
int err;
- if (!xen_pv_domain())
+
+ if (xen_have_vector_callback)
return 0;
+
err = xen_set_callback_via(callback_via);
if (err) {
dev_err(&pdev->dev, "platform_pci_resume failure!\n");
@@ -137,15 +139,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
-
- /*
- * Xen HVM guests always use the vector callback mechanism.
- * L1 Dom0 in a nested Xen environment is a PV guest inside in an
- * HVM environment. It needs the platform-pci driver to get
- * notifications from L0 Xen, but it cannot use the vector callback
- * as it is not exported by L1 Xen.
- */
- if (xen_pv_domain()) {
+ if (!xen_have_vector_callback) {
ret = xen_allocate_irq(pdev);
if (ret) {
dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e8cef1ad0fe3..8dab0d3dc172 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -693,8 +693,8 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- if (__generic_dma_ops(dev)->mmap)
- return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ if (xen_get_dma_ops(dev)->mmap)
+ return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
dma_addr, size, attrs);
#endif
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
@@ -711,7 +711,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned long attrs)
{
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
- if (__generic_dma_ops(dev)->get_sgtable) {
+ if (xen_get_dma_ops(dev)->get_sgtable) {
#if 0
/*
* This check verifies that the page belongs to the current domain and
@@ -721,7 +721,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
BUG_ON (!page_is_ram(bfn));
#endif
- return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+ return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
handle, size, attrs);
}
#endif
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 328c3987b112..967f069385d0 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -44,14 +44,14 @@ static const struct file_operations capabilities_file_ops = {
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
- static struct tree_descr xenfs_files[] = {
+ static const struct tree_descr xenfs_files[] = {
[2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
- static struct tree_descr xenfs_init_files[] = {
+ static const struct tree_descr xenfs_init_files[] = {
[2] = { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{ "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index eacae1434b73..fa23b7366b98 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
#include <linux/zorro.h>
+#include "zorro.h"
+
/**
* zorro_match_device - Tell if a Zorro device structure has a matching
@@ -161,12 +163,13 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
}
struct bus_type zorro_bus_type = {
- .name = "zorro",
- .dev_name = "zorro",
- .match = zorro_bus_match,
- .uevent = zorro_uevent,
- .probe = zorro_device_probe,
- .remove = zorro_device_remove,
+ .name = "zorro",
+ .dev_name = "zorro",
+ .dev_groups = zorro_device_attribute_groups,
+ .match = zorro_bus_match,
+ .uevent = zorro_uevent,
+ .probe = zorro_device_probe,
+ .remove = zorro_device_remove,
};
EXPORT_SYMBOL(zorro_bus_type);
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 9282dbf5abdb..3d34dba9bb2d 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -23,33 +23,33 @@
/* show configuration fields */
#define zorro_config_attr(name, field, format_string) \
-static ssize_t \
-show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct zorro_dev *z; \
\
z = to_zorro_dev(dev); \
return sprintf(buf, format_string, z->field); \
} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+static DEVICE_ATTR_RO(name);
zorro_config_attr(id, id, "0x%08x\n");
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
-static ssize_t
-show_serial(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct zorro_dev *z;
z = to_zorro_dev(dev);
return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
}
+static DEVICE_ATTR_RO(serial);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-
-static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
@@ -58,8 +58,27 @@ static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *
(unsigned long)zorro_resource_end(z),
zorro_resource_flags(z));
}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zorro_dev *z = to_zorro_dev(dev);
-static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
+ return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *zorro_device_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_type.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_slotaddr.attr,
+ &dev_attr_slotsize.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_modalias.attr,
+ NULL
+};
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -88,32 +107,17 @@ static struct bin_attribute zorro_config_attr = {
.read = zorro_read_config,
};
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct zorro_dev *z = to_zorro_dev(dev);
-
- return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
-}
-
-static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
+static struct bin_attribute *zorro_device_bin_attrs[] = {
+ &zorro_config_attr,
+ NULL
+};
-int zorro_create_sysfs_dev_files(struct zorro_dev *z)
-{
- struct device *dev = &z->dev;
- int error;
-
- /* current configuration's attributes */
- if ((error = device_create_file(dev, &dev_attr_id)) ||
- (error = device_create_file(dev, &dev_attr_type)) ||
- (error = device_create_file(dev, &dev_attr_serial)) ||
- (error = device_create_file(dev, &dev_attr_slotaddr)) ||
- (error = device_create_file(dev, &dev_attr_slotsize)) ||
- (error = device_create_file(dev, &dev_attr_resource)) ||
- (error = device_create_file(dev, &dev_attr_modalias)) ||
- (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
- return error;
-
- return 0;
-}
+static const struct attribute_group zorro_device_attr_group = {
+ .attrs = zorro_device_attrs,
+ .bin_attrs = zorro_device_bin_attrs,
+};
+const struct attribute_group *zorro_device_attribute_groups[] = {
+ &zorro_device_attr_group,
+ NULL
+};
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index d295d9878dff..cc1b1ac57d61 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -197,9 +197,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
put_device(&z->dev);
continue;
}
- error = zorro_create_sysfs_dev_files(z);
- if (error)
- dev_err(&z->dev, "Error creating sysfs files\n");
}
/* Mark all available Zorro II memory */
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index 34119fb4e560..4f805c01cfbc 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -5,5 +5,4 @@ extern void zorro_name_device(struct zorro_dev *z);
static inline void zorro_name_device(struct zorro_dev *dev) { }
#endif
-extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);
-
+extern const struct attribute_group *zorro_device_attribute_groups[];
diff --git a/firmware/Makefile b/firmware/Makefile
index e297e1b52636..fa3e81c2a97b 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -176,7 +176,8 @@ quiet_cmd_fwbin = MK_FW $@
wordsize_deps := $(wildcard include/config/64bit.h include/config/32bit.h \
include/config/ppc32.h include/config/ppc64.h \
include/config/superh32.h include/config/superh64.h \
- include/config/x86_32.h include/config/x86_64.h)
+ include/config/x86_32.h include/config/x86_64.h \
+ firmware/Makefile)
$(patsubst %,$(obj)/%.gen.S, $(fw-shipped-y)): %: $(wordsize_deps)
$(call cmd,fwbin,$(patsubst %.gen.S,%,$@))
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 2f8bab390d13..773749be8290 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/amigaffs.h>
+#include "amigaffs.h"
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -173,7 +173,7 @@ extern int affs_link(struct dentry *olddentry, struct inode *dir,
struct dentry *dentry);
extern int affs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname);
-extern int affs_rename(struct inode *old_dir, struct dentry *old_dentry,
+extern int affs_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
diff --git a/include/linux/amigaffs.h b/fs/affs/amigaffs.h
index 43b41c06aa37..43b41c06aa37 100644
--- a/include/linux/amigaffs.h
+++ b/fs/affs/amigaffs.h
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index f1e7294381c5..591ecd7f3063 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -35,7 +35,7 @@ const struct inode_operations affs_dir_inode_operations = {
.symlink = affs_symlink,
.mkdir = affs_mkdir,
.rmdir = affs_rmdir,
- .rename = affs_rename,
+ .rename = affs_rename2,
.setattr = affs_notify_change,
};
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0deec9cc2362..196ee7f6fdc4 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -499,7 +499,7 @@ affs_getemptyblk_ino(struct inode *inode, int block)
}
static int
-affs_do_readpage_ofs(struct page *page, unsigned to)
+affs_do_readpage_ofs(struct page *page, unsigned to, int create)
{
struct inode *inode = page->mapping->host;
struct super_block *sb = inode->i_sb;
@@ -518,7 +518,7 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
boff = tmp % bsize;
while (pos < to) {
- bh = affs_bread_ino(inode, bidx, 0);
+ bh = affs_bread_ino(inode, bidx, create);
if (IS_ERR(bh))
return PTR_ERR(bh);
tmp = min(bsize - boff, to - pos);
@@ -620,7 +620,7 @@ affs_readpage_ofs(struct file *file, struct page *page)
memset(page_address(page) + to, 0, PAGE_SIZE - to);
}
- err = affs_do_readpage_ofs(page, to);
+ err = affs_do_readpage_ofs(page, to, 0);
if (!err)
SetPageUptodate(page);
unlock_page(page);
@@ -657,7 +657,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping
return 0;
/* XXX: inefficient but safe in the face of short writes */
- err = affs_do_readpage_ofs(page, PAGE_SIZE);
+ err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
if (err) {
unlock_page(page);
put_page(page);
@@ -679,7 +679,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
int written;
from = pos & (PAGE_SIZE - 1);
- to = pos + len;
+ to = from + len;
/*
* XXX: not sure if this can handle short copies (len < copied), but
* we don't have to, because the page should always be uptodate here,
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index abcc59899229..fd4ef3c40e40 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -140,6 +140,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
inode->i_fop = &affs_file_operations;
break;
case ST_SOFTLINK:
+ inode->i_size = strlen((char *)AFFS_HEAD(bh)->table);
inode->i_mode |= S_IFLNK;
inode_nohighmem(inode);
inode->i_op = &affs_symlink_inode_operations;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 96dd1d09a273..46d3ace6761d 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -365,6 +365,7 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
symname++;
}
*p = 0;
+ inode->i_size = i + 1;
mark_buffer_dirty_inode(bh, inode);
affs_brelse(bh);
mark_inode_dirty(inode);
@@ -393,21 +394,14 @@ affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
return affs_add_entry(dir, inode, dentry, ST_LINKFILE);
}
-int
+static int
affs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct super_block *sb = old_dir->i_sb;
struct buffer_head *bh = NULL;
int retval;
- if (flags & ~RENAME_NOREPLACE)
- return -EINVAL;
-
- pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__,
- old_dir->i_ino, old_dentry, new_dir->i_ino, new_dentry);
-
retval = affs_check_name(new_dentry->d_name.name,
new_dentry->d_name.len,
affs_nofilenametruncate(old_dentry));
@@ -447,6 +441,76 @@ done:
return retval;
}
+static int
+affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+
+ struct super_block *sb = old_dir->i_sb;
+ struct buffer_head *bh_old = NULL;
+ struct buffer_head *bh_new = NULL;
+ int retval;
+
+ bh_old = affs_bread(sb, d_inode(old_dentry)->i_ino);
+ if (!bh_old)
+ return -EIO;
+
+ bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
+ if (!bh_new)
+ return -EIO;
+
+ /* Remove old header from its parent directory. */
+ affs_lock_dir(old_dir);
+ retval = affs_remove_hash(old_dir, bh_old);
+ affs_unlock_dir(old_dir);
+ if (retval)
+ goto done;
+
+ /* Remove new header from its parent directory. */
+ affs_lock_dir(new_dir);
+ retval = affs_remove_hash(new_dir, bh_new);
+ affs_unlock_dir(new_dir);
+ if (retval)
+ goto done;
+
+ /* Insert old into the new directory with the new name. */
+ affs_copy_name(AFFS_TAIL(sb, bh_old)->name, new_dentry);
+ affs_fix_checksum(sb, bh_old);
+ affs_lock_dir(new_dir);
+ retval = affs_insert_hash(new_dir, bh_old);
+ affs_unlock_dir(new_dir);
+
+ /* Insert new into the old directory with the old name. */
+ affs_copy_name(AFFS_TAIL(sb, bh_new)->name, old_dentry);
+ affs_fix_checksum(sb, bh_new);
+ affs_lock_dir(old_dir);
+ retval = affs_insert_hash(old_dir, bh_new);
+ affs_unlock_dir(old_dir);
+done:
+ mark_buffer_dirty_inode(bh_old, new_dir);
+ mark_buffer_dirty_inode(bh_new, old_dir);
+ affs_brelse(bh_old);
+ affs_brelse(bh_new);
+ return retval;
+}
+
+int affs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__,
+ old_dir->i_ino, old_dentry, new_dir->i_ino, new_dentry);
+
+ if (flags & RENAME_EXCHANGE)
+ return affs_xrename(old_dir, old_dentry, new_dir, new_dentry);
+
+ return affs_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
static struct dentry *affs_get_parent(struct dentry *child)
{
struct inode *parent;
@@ -477,11 +541,6 @@ static struct inode *affs_nfs_get_inode(struct super_block *sb, u64 ino,
if (IS_ERR(inode))
return ERR_CAST(inode);
- if (generation && inode->i_generation != generation) {
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
-
return inode;
}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index c500e954debb..63e7c4760bfb 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -58,6 +58,7 @@ static struct dentry *befs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type);
static struct dentry *befs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type);
+static struct dentry *befs_get_parent(struct dentry *child);
static const struct super_operations befs_sops = {
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
@@ -93,6 +94,7 @@ static const struct address_space_operations befs_symlink_aops = {
static const struct export_operations befs_export_operations = {
.fh_to_dentry = befs_fh_to_dentry,
.fh_to_parent = befs_fh_to_parent,
+ .get_parent = befs_get_parent,
};
/*
@@ -667,6 +669,19 @@ static struct dentry *befs_fh_to_parent(struct super_block *sb,
befs_nfs_get_inode);
}
+static struct dentry *befs_get_parent(struct dentry *child)
+{
+ struct inode *parent;
+ struct befs_inode_info *befs_ino = BEFS_I(d_inode(child));
+
+ parent = befs_iget(child->d_sb,
+ (unsigned long)befs_ino->i_parent.start);
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ return d_obtain_alias(parent);
+}
+
enum {
Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
};
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index bee1a36bc2ec..f4718098ac31 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -818,7 +818,7 @@ static const struct super_operations s_ops = {
static int bm_fill_super(struct super_block *sb, void *data, int silent)
{
int err;
- static struct tree_descr bm_files[] = {
+ static const struct tree_descr bm_files[] = {
[2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
[3] = {"register", &bm_register_operations, S_IWUSR},
/* last one */ {""}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0d435c794d76..2a305c1a2d88 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/blkpg.h>
#include <linux/magic.h>
+#include <linux/dax.h>
#include <linux/buffer_head.h>
#include <linux/swap.h>
#include <linux/pagevec.h>
@@ -716,50 +717,18 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(bdev_write_page);
-/**
- * bdev_direct_access() - Get the address for directly-accessibly memory
- * @bdev: The device containing the memory
- * @dax: control and output parameters for ->direct_access
- *
- * If a block device is made up of directly addressable memory, this function
- * will tell the caller the PFN and the address of the memory. The address
- * may be directly dereferenced within the kernel without the need to call
- * ioremap(), kmap() or similar. The PFN is suitable for inserting into
- * page tables.
- *
- * Return: negative errno if an error occurs, otherwise the number of bytes
- * accessible at this address.
- */
-long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
+int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
+ pgoff_t *pgoff)
{
- sector_t sector = dax->sector;
- long avail, size = dax->size;
- const struct block_device_operations *ops = bdev->bd_disk->fops;
+ phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
- /*
- * The device driver is allowed to sleep, in order to make the
- * memory directly accessible.
- */
- might_sleep();
-
- if (size < 0)
- return size;
- if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
- return -EOPNOTSUPP;
- if ((sector + DIV_ROUND_UP(size, 512)) >
- part_nr_sects_read(bdev->bd_part))
- return -ERANGE;
- sector += get_start_sect(bdev);
- if (sector % (PAGE_SIZE / 512))
+ if (pgoff)
+ *pgoff = PHYS_PFN(phys_off);
+ if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
return -EINVAL;
- avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
- if (!avail)
- return -ERANGE;
- if (avail > 0 && avail & ~PAGE_MASK)
- return -ENXIO;
- return min(avail, size);
+ return 0;
}
-EXPORT_SYMBOL_GPL(bdev_direct_access);
+EXPORT_SYMBOL(bdev_dax_pgoff);
/**
* bdev_dax_supported() - Check if the device supports dax for filesystem
@@ -773,62 +742,46 @@ EXPORT_SYMBOL_GPL(bdev_direct_access);
*/
int bdev_dax_supported(struct super_block *sb, int blocksize)
{
- struct blk_dax_ctl dax = {
- .sector = 0,
- .size = PAGE_SIZE,
- };
- int err;
+ struct block_device *bdev = sb->s_bdev;
+ struct dax_device *dax_dev;
+ pgoff_t pgoff;
+ int err, id;
+ void *kaddr;
+ pfn_t pfn;
+ long len;
if (blocksize != PAGE_SIZE) {
vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
return -EINVAL;
}
- err = bdev_direct_access(sb->s_bdev, &dax);
- if (err < 0) {
- switch (err) {
- case -EOPNOTSUPP:
- vfs_msg(sb, KERN_ERR,
- "error: device does not support dax");
- break;
- case -EINVAL:
- vfs_msg(sb, KERN_ERR,
- "error: unaligned partition for dax");
- break;
- default:
- vfs_msg(sb, KERN_ERR,
- "error: dax access failed (%d)", err);
- }
+ err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
+ if (err) {
+ vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax");
return err;
}
- return 0;
-}
-EXPORT_SYMBOL_GPL(bdev_dax_supported);
-
-/**
- * bdev_dax_capable() - Return if the raw device is capable for dax
- * @bdev: The device for raw block device access
- */
-bool bdev_dax_capable(struct block_device *bdev)
-{
- struct blk_dax_ctl dax = {
- .size = PAGE_SIZE,
- };
+ dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ if (!dax_dev) {
+ vfs_msg(sb, KERN_ERR, "error: device does not support dax");
+ return -EOPNOTSUPP;
+ }
- if (!IS_ENABLED(CONFIG_FS_DAX))
- return false;
+ id = dax_read_lock();
+ len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+ dax_read_unlock(id);
- dax.sector = 0;
- if (bdev_direct_access(bdev, &dax) < 0)
- return false;
+ put_dax(dax_dev);
- dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
- if (bdev_direct_access(bdev, &dax) < 0)
- return false;
+ if (len < 1) {
+ vfs_msg(sb, KERN_ERR,
+ "error: dax access failed (%ld)", len);
+ return len < 0 ? len : -EIO;
+ }
- return true;
+ return 0;
}
+EXPORT_SYMBOL_GPL(bdev_dax_supported);
/*
* pseudo-fs
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7699e16784d3..24865da63d8f 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -26,6 +26,11 @@
#include "delayed-ref.h"
#include "locking.h"
+enum merge_mode {
+ MERGE_IDENTICAL_KEYS = 1,
+ MERGE_IDENTICAL_PARENTS,
+};
+
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6
@@ -533,7 +538,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
* slot==nritems. In that case, go to the next leaf before we continue.
*/
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
- if (time_seq == (u64)-1)
+ if (time_seq == SEQ_LAST)
ret = btrfs_next_leaf(root, path);
else
ret = btrfs_next_old_leaf(root, path, time_seq);
@@ -577,7 +582,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
eie = NULL;
}
next:
- if (time_seq == (u64)-1)
+ if (time_seq == SEQ_LAST)
ret = btrfs_next_item(root, path);
else
ret = btrfs_next_old_item(root, path, time_seq);
@@ -629,7 +634,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
if (path->search_commit_root)
root_level = btrfs_header_level(root->commit_root);
- else if (time_seq == (u64)-1)
+ else if (time_seq == SEQ_LAST)
root_level = btrfs_header_level(root->node);
else
root_level = btrfs_old_root_level(root, time_seq);
@@ -640,7 +645,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
}
path->lowest_level = level;
- if (time_seq == (u64)-1)
+ if (time_seq == SEQ_LAST)
ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
0, 0);
else
@@ -809,14 +814,12 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
/*
* merge backrefs and adjust counts accordingly
*
- * mode = 1: merge identical keys, if key is set
- * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
- * additionally, we could even add a key range for the blocks we
- * looked into to merge even more (-> replace unresolved refs by those
- * having a parent).
- * mode = 2: merge identical parents
+ * FIXME: For MERGE_IDENTICAL_KEYS, if we add more keys in __add_prelim_ref
+ * then we can merge more here. Additionally, we could even add a key
+ * range for the blocks we looked into to merge even more (-> replace
+ * unresolved refs by those having a parent).
*/
-static void __merge_refs(struct list_head *head, int mode)
+static void __merge_refs(struct list_head *head, enum merge_mode mode)
{
struct __prelim_ref *pos1;
@@ -829,7 +832,7 @@ static void __merge_refs(struct list_head *head, int mode)
if (!ref_for_same_block(ref1, ref2))
continue;
- if (mode == 1) {
+ if (mode == MERGE_IDENTICAL_KEYS) {
if (!ref1->parent && ref2->parent)
swap(ref1, ref2);
} else {
@@ -1196,7 +1199,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
*
* NOTE: This can return values > 0
*
- * If time_seq is set to (u64)-1, it will not search delayed_refs, and behave
+ * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
* much like trans == NULL case, the difference only lies in it will not
* commit root.
* The special case is for qgroup to search roots in commit_transaction().
@@ -1243,7 +1246,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
path->skip_locking = 1;
}
- if (time_seq == (u64)-1)
+ if (time_seq == SEQ_LAST)
path->skip_locking = 1;
/*
@@ -1273,9 +1276,9 @@ again:
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (trans && likely(trans->type != __TRANS_DUMMY) &&
- time_seq != (u64)-1) {
+ time_seq != SEQ_LAST) {
#else
- if (trans && time_seq != (u64)-1) {
+ if (trans && time_seq != SEQ_LAST) {
#endif
/*
* look if there are updates for this ref queued and lock the
@@ -1286,7 +1289,7 @@ again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&head->node.refs);
+ refcount_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -1374,7 +1377,7 @@ again:
if (ret)
goto out;
- __merge_refs(&prefs, 1);
+ __merge_refs(&prefs, MERGE_IDENTICAL_KEYS);
ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
extent_item_pos, total_refs,
@@ -1382,7 +1385,7 @@ again:
if (ret)
goto out;
- __merge_refs(&prefs, 2);
+ __merge_refs(&prefs, MERGE_IDENTICAL_PARENTS);
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 0c6baaba0651..b8622e4d1744 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -125,6 +125,13 @@ struct btrfs_inode {
u64 delalloc_bytes;
/*
+ * Total number of bytes pending delalloc that fall within a file
+ * range that is either a hole or beyond EOF (and no prealloc extent
+ * exists in the range). This is always <= delalloc_bytes.
+ */
+ u64 new_delalloc_bytes;
+
+ /*
* total number of bytes pending defrag, used by stat to check whether
* it needs COW.
*/
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c7721a6aa3bb..10e6b282d09d 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -44,7 +44,7 @@
struct compressed_bio {
/* number of bios pending for this compressed extent */
- atomic_t pending_bios;
+ refcount_t pending_bios;
/* the pages with the compressed data on them */
struct page **compressed_pages;
@@ -161,7 +161,7 @@ static void end_compressed_bio_read(struct bio *bio)
/* if there are more bios still pending for this compressed
* extent, just exit
*/
- if (!atomic_dec_and_test(&cb->pending_bios))
+ if (!refcount_dec_and_test(&cb->pending_bios))
goto out;
inode = cb->inode;
@@ -274,7 +274,7 @@ static void end_compressed_bio_write(struct bio *bio)
/* if there are more bios still pending for this compressed
* extent, just exit
*/
- if (!atomic_dec_and_test(&cb->pending_bios))
+ if (!refcount_dec_and_test(&cb->pending_bios))
goto out;
/* ok, we're the last bio for this extent, step one is to
@@ -342,7 +342,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
- atomic_set(&cb->pending_bios, 0);
+ refcount_set(&cb->pending_bios, 0);
cb->errors = 0;
cb->inode = inode;
cb->start = start;
@@ -363,7 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
- atomic_inc(&cb->pending_bios);
+ refcount_set(&cb->pending_bios, 1);
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
@@ -388,7 +388,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
* we inc the count. Otherwise, the cb might get
* freed before we're done setting it up
*/
- atomic_inc(&cb->pending_bios);
+ refcount_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -607,7 +607,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb)
goto out;
- atomic_set(&cb->pending_bios, 0);
+ refcount_set(&cb->pending_bios, 0);
cb->errors = 0;
cb->inode = inode;
cb->mirror_num = mirror_num;
@@ -656,7 +656,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
- atomic_inc(&cb->pending_bios);
+ refcount_set(&cb->pending_bios, 1);
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
page = cb->compressed_pages[pg_index];
@@ -685,7 +685,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
* we inc the count. Otherwise, the cb might get
* freed before we're done setting it up
*/
- atomic_inc(&cb->pending_bios);
+ refcount_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(inode, comp_bio,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 7dc8844037e0..a3a75f1de002 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -567,7 +567,7 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int dst_slot, int src_slot,
- int nr_items, gfp_t flags)
+ int nr_items)
{
struct tree_mod_elem *tm = NULL;
struct tree_mod_elem **tm_list = NULL;
@@ -578,11 +578,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
if (!tree_mod_need_log(fs_info, eb))
return 0;
- tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
+ tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
if (!tm_list)
return -ENOMEM;
- tm = kzalloc(sizeof(*tm), flags);
+ tm = kzalloc(sizeof(*tm), GFP_NOFS);
if (!tm) {
ret = -ENOMEM;
goto free_tms;
@@ -596,7 +596,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
- MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
+ MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
if (!tm_list[i]) {
ret = -ENOMEM;
goto free_tms;
@@ -663,7 +663,7 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *old_root,
- struct extent_buffer *new_root, gfp_t flags,
+ struct extent_buffer *new_root,
int log_removal)
{
struct tree_mod_elem *tm = NULL;
@@ -678,14 +678,14 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
if (log_removal && btrfs_header_level(old_root) > 0) {
nritems = btrfs_header_nritems(old_root);
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
- flags);
+ GFP_NOFS);
if (!tm_list) {
ret = -ENOMEM;
goto free_tms;
}
for (i = 0; i < nritems; i++) {
tm_list[i] = alloc_tree_mod_elem(old_root, i,
- MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
+ MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
if (!tm_list[i]) {
ret = -ENOMEM;
goto free_tms;
@@ -693,7 +693,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
}
}
- tm = kzalloc(sizeof(*tm), flags);
+ tm = kzalloc(sizeof(*tm), GFP_NOFS);
if (!tm) {
ret = -ENOMEM;
goto free_tms;
@@ -873,7 +873,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
{
int ret;
ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
- nr_items, GFP_NOFS);
+ nr_items);
BUG_ON(ret < 0);
}
@@ -943,7 +943,7 @@ tree_mod_log_set_root_pointer(struct btrfs_root *root,
{
int ret;
ret = tree_mod_log_insert_root(root->fs_info, root->node,
- new_root_node, GFP_NOFS, log_removal);
+ new_root_node, log_removal);
BUG_ON(ret < 0);
}
@@ -5392,13 +5392,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
}
- tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
+ tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
if (!tmp_buf) {
- tmp_buf = vmalloc(fs_info->nodesize);
- if (!tmp_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
left_path->search_commit_root = 1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3e21211e99c3..643c70d2b2e6 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -39,6 +39,7 @@
#include <linux/security.h>
#include <linux/sizes.h>
#include <linux/dynamic_debug.h>
+#include <linux/refcount.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -518,7 +519,7 @@ struct btrfs_caching_control {
struct btrfs_work work;
struct btrfs_block_group_cache *block_group;
u64 progress;
- atomic_t count;
+ refcount_t count;
};
/* Once caching_thread() finds this much free space, it will wake up waiters. */
@@ -538,6 +539,14 @@ struct btrfs_io_ctl {
unsigned check_crcs:1;
};
+/*
+ * Tree to record all locked full stripes of a RAID5/6 block group
+ */
+struct btrfs_full_stripe_locks_tree {
+ struct rb_root root;
+ struct mutex lock;
+};
+
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_block_group_item item;
@@ -648,6 +657,9 @@ struct btrfs_block_group_cache {
* Protected by free_space_lock.
*/
int needs_free_space;
+
+ /* Record locked full stripes for RAID5/6 block group */
+ struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
};
/* delayed seq elem */
@@ -658,6 +670,8 @@ struct seq_list {
#define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 }
+#define SEQ_LAST ((u64)-1)
+
enum btrfs_orphan_cleanup_state {
ORPHAN_CLEANUP_STARTED = 1,
ORPHAN_CLEANUP_DONE = 2,
@@ -702,6 +716,11 @@ struct btrfs_delayed_root;
#define BTRFS_FS_BTREE_ERR 11
#define BTRFS_FS_LOG1_ERR 12
#define BTRFS_FS_LOG2_ERR 13
+/*
+ * Indicate that a whole-filesystem exclusive operation is running
+ * (device replace, resize, device add/delete, balance)
+ */
+#define BTRFS_FS_EXCL_OP 14
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
@@ -1066,8 +1085,6 @@ struct btrfs_fs_info {
/* device replace state */
struct btrfs_dev_replace dev_replace;
- atomic_t mutually_exclusive_operation_running;
-
struct percpu_counter bio_counter;
wait_queue_head_t replace_wait;
@@ -1220,7 +1237,7 @@ struct btrfs_root {
dev_t anon_dev;
spinlock_t root_item_lock;
- atomic_t refs;
+ refcount_t refs;
struct mutex delalloc_mutex;
spinlock_t delalloc_lock;
@@ -3646,6 +3663,12 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress);
+static inline void btrfs_init_full_stripe_locks_tree(
+ struct btrfs_full_stripe_locks_tree *locks_root)
+{
+ locks_root->root = RB_ROOT;
+ mutex_init(&locks_root->lock);
+}
/* dev-replace.c */
void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
@@ -3670,8 +3693,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
struct btrfs_key *start, struct btrfs_key *end);
int btrfs_reada_wait(void *handle);
void btrfs_reada_detach(void *handle);
-int btree_readahead_hook(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, int err);
+int btree_readahead_hook(struct extent_buffer *eb, int err);
static inline int is_fstree(u64 rootid)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 1aff676f0e5b..8ae409b5a61d 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node(
{
delayed_node->root = root;
delayed_node->inode_id = inode_id;
- atomic_set(&delayed_node->refs, 0);
+ refcount_set(&delayed_node->refs, 0);
delayed_node->ins_root = RB_ROOT;
delayed_node->del_root = RB_ROOT;
mutex_init(&delayed_node->mutex);
@@ -81,7 +81,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = READ_ONCE(btrfs_inode->delayed_node);
if (node) {
- atomic_inc(&node->refs);
+ refcount_inc(&node->refs);
return node;
}
@@ -89,14 +89,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
if (node) {
if (btrfs_inode->delayed_node) {
- atomic_inc(&node->refs); /* can be accessed */
+ refcount_inc(&node->refs); /* can be accessed */
BUG_ON(btrfs_inode->delayed_node != node);
spin_unlock(&root->inode_lock);
return node;
}
btrfs_inode->delayed_node = node;
/* can be accessed and cached in the inode */
- atomic_add(2, &node->refs);
+ refcount_add(2, &node->refs);
spin_unlock(&root->inode_lock);
return node;
}
@@ -125,7 +125,7 @@ again:
btrfs_init_delayed_node(node, root, ino);
/* cached in the btrfs inode and can be accessed */
- atomic_add(2, &node->refs);
+ refcount_set(&node->refs, 2);
ret = radix_tree_preload(GFP_NOFS);
if (ret) {
@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
} else {
list_add_tail(&node->n_list, &root->node_list);
list_add_tail(&node->p_list, &root->prepare_list);
- atomic_inc(&node->refs); /* inserted into list */
+ refcount_inc(&node->refs); /* inserted into list */
root->nodes++;
set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
}
@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
spin_lock(&root->lock);
if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
root->nodes--;
- atomic_dec(&node->refs); /* not in the list */
+ refcount_dec(&node->refs); /* not in the list */
list_del_init(&node->n_list);
if (!list_empty(&node->p_list))
list_del_init(&node->p_list);
@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node(
p = delayed_root->node_list.next;
node = list_entry(p, struct btrfs_delayed_node, n_list);
- atomic_inc(&node->refs);
+ refcount_inc(&node->refs);
out:
spin_unlock(&delayed_root->lock);
@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
p = node->n_list.next;
next = list_entry(p, struct btrfs_delayed_node, n_list);
- atomic_inc(&next->refs);
+ refcount_inc(&next->refs);
out:
spin_unlock(&delayed_root->lock);
@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node(
btrfs_dequeue_delayed_node(delayed_root, delayed_node);
mutex_unlock(&delayed_node->mutex);
- if (atomic_dec_and_test(&delayed_node->refs)) {
+ if (refcount_dec_and_test(&delayed_node->refs)) {
bool free = false;
struct btrfs_root *root = delayed_node->root;
spin_lock(&root->inode_lock);
- if (atomic_read(&delayed_node->refs) == 0) {
+ if (refcount_read(&delayed_node->refs) == 0) {
radix_tree_delete(&root->delayed_nodes_tree,
delayed_node->inode_id);
free = true;
@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
p = delayed_root->prepare_list.next;
list_del_init(p);
node = list_entry(p, struct btrfs_delayed_node, p_list);
- atomic_inc(&node->refs);
+ refcount_inc(&node->refs);
out:
spin_unlock(&delayed_root->lock);
@@ -308,7 +308,7 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
item->ins_or_del = 0;
item->bytes_reserved = 0;
item->delayed_node = NULL;
- atomic_set(&item->refs, 1);
+ refcount_set(&item->refs, 1);
}
return item;
}
@@ -483,7 +483,7 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
{
if (item) {
__btrfs_remove_delayed_item(item);
- if (atomic_dec_and_test(&item->refs))
+ if (refcount_dec_and_test(&item->refs))
kfree(item);
}
}
@@ -1600,14 +1600,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
mutex_lock(&delayed_node->mutex);
item = __btrfs_first_delayed_insertion_item(delayed_node);
while (item) {
- atomic_inc(&item->refs);
+ refcount_inc(&item->refs);
list_add_tail(&item->readdir_list, ins_list);
item = __btrfs_next_delayed_item(item);
}
item = __btrfs_first_delayed_deletion_item(delayed_node);
while (item) {
- atomic_inc(&item->refs);
+ refcount_inc(&item->refs);
list_add_tail(&item->readdir_list, del_list);
item = __btrfs_next_delayed_item(item);
}
@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
* insert/delete delayed items in this period. So we also needn't
* requeue or dequeue this delayed node.
*/
- atomic_dec(&delayed_node->refs);
+ refcount_dec(&delayed_node->refs);
return true;
}
@@ -1634,13 +1634,13 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
list_del(&curr->readdir_list);
- if (atomic_dec_and_test(&curr->refs))
+ if (refcount_dec_and_test(&curr->refs))
kfree(curr);
}
list_for_each_entry_safe(curr, next, del_list, readdir_list) {
list_del(&curr->readdir_list);
- if (atomic_dec_and_test(&curr->refs))
+ if (refcount_dec_and_test(&curr->refs))
kfree(curr);
}
@@ -1667,7 +1667,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
list_del(&curr->readdir_list);
ret = (curr->key.offset == index);
- if (atomic_dec_and_test(&curr->refs))
+ if (refcount_dec_and_test(&curr->refs))
kfree(curr);
if (ret)
@@ -1705,7 +1705,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
list_del(&curr->readdir_list);
if (curr->key.offset < ctx->pos) {
- if (atomic_dec_and_test(&curr->refs))
+ if (refcount_dec_and_test(&curr->refs))
kfree(curr);
continue;
}
@@ -1722,7 +1722,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
over = !dir_emit(ctx, name, name_len,
location.objectid, d_type);
- if (atomic_dec_and_test(&curr->refs))
+ if (refcount_dec_and_test(&curr->refs))
kfree(curr);
if (over)
@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
inode_id = delayed_nodes[n - 1]->inode_id + 1;
for (i = 0; i < n; i++)
- atomic_inc(&delayed_nodes[i]->refs);
+ refcount_inc(&delayed_nodes[i]->refs);
spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) {
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 40327cc3b99a..c4189d495934 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/atomic.h>
-
+#include <linux/refcount.h>
#include "ctree.h"
/* types of the delayed item */
@@ -67,7 +67,7 @@ struct btrfs_delayed_node {
struct rb_root del_root;
struct mutex mutex;
struct btrfs_inode_item inode_item;
- atomic_t refs;
+ refcount_t refs;
u64 index_cnt;
unsigned long flags;
int count;
@@ -80,7 +80,7 @@ struct btrfs_delayed_item {
struct list_head readdir_list; /* used for readdir items */
u64 bytes_reserved;
struct btrfs_delayed_node *delayed_node;
- atomic_t refs;
+ refcount_t refs;
int ins_or_del;
u32 data_len;
char data[0];
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 6eb80952efb3..be70d90dfee5 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -164,7 +164,7 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
if (mutex_trylock(&head->mutex))
return 0;
- atomic_inc(&head->node.refs);
+ refcount_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
@@ -590,7 +590,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs;
/* first set the basic ref node struct up */
- atomic_set(&ref->refs, 1);
+ refcount_set(&ref->refs, 1);
ref->bytenr = bytenr;
ref->num_bytes = num_bytes;
ref->ref_mod = count_mod;
@@ -682,7 +682,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs;
/* first set the basic ref node struct up */
- atomic_set(&ref->refs, 1);
+ refcount_set(&ref->refs, 1);
ref->bytenr = bytenr;
ref->num_bytes = num_bytes;
ref->ref_mod = 1;
@@ -739,7 +739,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
seq = atomic64_read(&fs_info->tree_mod_seq);
/* first set the basic ref node struct up */
- atomic_set(&ref->refs, 1);
+ refcount_set(&ref->refs, 1);
ref->bytenr = bytenr;
ref->num_bytes = num_bytes;
ref->ref_mod = 1;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 0e537f98f1a1..c0264ff01b53 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -18,6 +18,8 @@
#ifndef __DELAYED_REF__
#define __DELAYED_REF__
+#include <linux/refcount.h>
+
/* these are the possible values of struct btrfs_delayed_ref_node->action */
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
@@ -53,7 +55,7 @@ struct btrfs_delayed_ref_node {
u64 seq;
/* ref count on this data structure */
- atomic_t refs;
+ refcount_t refs;
/*
* how many refs is this entry adding or deleting. For
@@ -220,8 +222,8 @@ btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
- WARN_ON(atomic_read(&ref->refs) == 0);
- if (atomic_dec_and_test(&ref->refs)) {
+ WARN_ON(refcount_read(&ref->refs) == 0);
+ if (refcount_dec_and_test(&ref->refs)) {
WARN_ON(ref->in_tree);
switch (ref->type) {
case BTRFS_TREE_BLOCK_REF_KEY:
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index e653921f05d9..5fe1ca8abc70 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -546,8 +546,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
+ btrfs_rm_dev_replace_blocked(fs_info);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
+ btrfs_rm_dev_replace_unblocked(fs_info);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return scrub_ret;
@@ -665,7 +667,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
srcdev = dev_replace->srcdev;
- args->status.progress_1000 = div_u64(dev_replace->cursor_left,
+ args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
break;
}
@@ -784,8 +786,7 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
}
btrfs_dev_replace_unlock(dev_replace, 1);
- WARN_ON(atomic_xchg(
- &fs_info->mutually_exclusive_operation_running, 1));
+ WARN_ON(test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
return PTR_ERR_OR_ZERO(task);
}
@@ -814,7 +815,7 @@ static int btrfs_dev_replace_kthread(void *data)
(unsigned int)progress);
}
btrfs_dev_replace_continue_on_mount(fs_info);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
return 0;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 061c1d1f774f..8685d67185d0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -762,7 +762,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
err:
if (reads_done &&
test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(fs_info, eb, ret);
+ btree_readahead_hook(eb, ret);
if (ret) {
/*
@@ -787,7 +787,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
eb->read_mirror = failed_mirror;
atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb->fs_info, eb, -EIO);
+ btree_readahead_hook(eb, -EIO);
return -EIO; /* we fixed nothing */
}
@@ -1340,7 +1340,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
atomic_set(&root->log_writers, 0);
atomic_set(&root->log_batch, 0);
atomic_set(&root->orphan_inodes, 0);
- atomic_set(&root->refs, 1);
+ refcount_set(&root->refs, 1);
atomic_set(&root->will_be_snapshoted, 0);
atomic64_set(&root->qgroup_meta_rsv, 0);
root->log_transid = 0;
@@ -3497,10 +3497,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
*/
static int write_dev_flush(struct btrfs_device *device, int wait)
{
+ struct request_queue *q = bdev_get_queue(device->bdev);
struct bio *bio;
int ret = 0;
- if (device->nobarriers)
+ if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return 0;
if (wait) {
@@ -4321,7 +4322,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
head = rb_entry(node, struct btrfs_delayed_ref_head,
href_node);
if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&head->node.refs);
+ refcount_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
@@ -4593,7 +4594,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
t = list_first_entry(&fs_info->trans_list,
struct btrfs_transaction, list);
if (t->state >= TRANS_STATE_COMMIT_START) {
- atomic_inc(&t->use_count);
+ refcount_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(fs_info, t->transid);
btrfs_put_transaction(t);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 2e0ec29bfd69..21f1ceb85b76 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -101,14 +101,14 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
*/
static inline struct btrfs_root *btrfs_grab_fs_root(struct btrfs_root *root)
{
- if (atomic_inc_not_zero(&root->refs))
+ if (refcount_inc_not_zero(&root->refs))
return root;
return NULL;
}
static inline void btrfs_put_fs_root(struct btrfs_root *root)
{
- if (atomic_dec_and_test(&root->refs))
+ if (refcount_dec_and_test(&root->refs))
kfree(root);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index be5477676cc8..e390451c72e6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -131,6 +131,16 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
if (atomic_dec_and_test(&cache->count)) {
WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0);
+
+ /*
+ * If not empty, someone is still holding mutex of
+ * full_stripe_lock, which can only be released by caller.
+ * And it will definitely cause use-after-free when caller
+ * tries to release full stripe lock.
+ *
+ * No better way to resolve, but only to warn.
+ */
+ WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
kfree(cache);
}
@@ -316,14 +326,14 @@ get_caching_control(struct btrfs_block_group_cache *cache)
}
ctl = cache->caching_ctl;
- atomic_inc(&ctl->count);
+ refcount_inc(&ctl->count);
spin_unlock(&cache->lock);
return ctl;
}
static void put_caching_control(struct btrfs_caching_control *ctl)
{
- if (atomic_dec_and_test(&ctl->count))
+ if (refcount_dec_and_test(&ctl->count))
kfree(ctl);
}
@@ -599,7 +609,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
init_waitqueue_head(&caching_ctl->wait);
caching_ctl->block_group = cache;
caching_ctl->progress = cache->key.objectid;
- atomic_set(&caching_ctl->count, 1);
+ refcount_set(&caching_ctl->count, 1);
btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
caching_thread, NULL, NULL);
@@ -620,7 +630,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_caching_control *ctl;
ctl = cache->caching_ctl;
- atomic_inc(&ctl->count);
+ refcount_inc(&ctl->count);
prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&cache->lock);
@@ -707,7 +717,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
}
down_write(&fs_info->commit_root_sem);
- atomic_inc(&caching_ctl->count);
+ refcount_inc(&caching_ctl->count);
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->commit_root_sem);
@@ -892,7 +902,7 @@ search_again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&head->node.refs);
+ refcount_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -2980,7 +2990,7 @@ again:
struct btrfs_delayed_ref_node *ref;
ref = &head->node;
- atomic_inc(&ref->refs);
+ refcount_inc(&ref->refs);
spin_unlock(&delayed_refs->lock);
/*
@@ -3003,7 +3013,6 @@ again:
goto again;
}
out:
- assert_qgroups_uptodate(trans);
trans->can_flush_pending_bgs = can_flush_pending_bgs;
return 0;
}
@@ -3057,7 +3066,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
}
if (!mutex_trylock(&head->mutex)) {
- atomic_inc(&head->node.refs);
+ refcount_inc(&head->node.refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -3443,7 +3452,8 @@ again:
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
- * b) we're with nospace_cache mount option.
+ * b) we're with nospace_cache mount option,
+ * c) we're with v2 space_cache (FREE_SPACE_TREE).
*/
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
@@ -9917,6 +9927,7 @@ btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
btrfs_init_free_space_ctl(cache);
atomic_set(&cache->trimming, 0);
mutex_init(&cache->free_space_lock);
+ btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
return cache;
}
@@ -10416,7 +10427,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
&fs_info->caching_block_groups, list)
if (ctl->block_group == block_group) {
caching_ctl = ctl;
- atomic_inc(&caching_ctl->count);
+ refcount_inc(&caching_ctl->count);
break;
}
}
@@ -10850,7 +10861,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
if (trans)
- atomic_inc(&trans->use_count);
+ refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
ret = find_free_dev_extent_start(trans, device, minlen, start,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 27fdb250b446..d8da3edf2ac3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -68,7 +68,7 @@ void btrfs_leak_debug_check(void)
pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
state->start, state->end, state->state,
extent_state_in_tree(state),
- atomic_read(&state->refs));
+ refcount_read(&state->refs));
list_del(&state->leak_list);
kmem_cache_free(extent_state_cache, state);
}
@@ -238,7 +238,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
state->failrec = NULL;
RB_CLEAR_NODE(&state->rb_node);
btrfs_leak_debug_add(&state->leak_list, &states);
- atomic_set(&state->refs, 1);
+ refcount_set(&state->refs, 1);
init_waitqueue_head(&state->wq);
trace_alloc_extent_state(state, mask, _RET_IP_);
return state;
@@ -248,7 +248,7 @@ void free_extent_state(struct extent_state *state)
{
if (!state)
return;
- if (atomic_dec_and_test(&state->refs)) {
+ if (refcount_dec_and_test(&state->refs)) {
WARN_ON(extent_state_in_tree(state));
btrfs_leak_debug_del(&state->leak_list);
trace_free_extent_state(state, _RET_IP_);
@@ -641,7 +641,7 @@ again:
if (cached && extent_state_in_tree(cached) &&
cached->start <= start && cached->end > start) {
if (clear)
- atomic_dec(&cached->refs);
+ refcount_dec(&cached->refs);
state = cached;
goto hit_next;
}
@@ -793,7 +793,7 @@ process_node:
if (state->state & bits) {
start = state->start;
- atomic_inc(&state->refs);
+ refcount_inc(&state->refs);
wait_on_state(tree, state);
free_extent_state(state);
goto again;
@@ -834,7 +834,7 @@ static void cache_state_if_flags(struct extent_state *state,
if (cached_ptr && !(*cached_ptr)) {
if (!flags || (state->state & flags)) {
*cached_ptr = state;
- atomic_inc(&state->refs);
+ refcount_inc(&state->refs);
}
}
}
@@ -1538,7 +1538,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
if (!found) {
*start = state->start;
*cached_state = state;
- atomic_inc(&state->refs);
+ refcount_inc(&state->refs);
}
found++;
*end = state->end;
@@ -2004,16 +2004,11 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
u64 map_length = 0;
u64 sector;
struct btrfs_bio *bbio = NULL;
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int ret;
ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
BUG_ON(!mirror_num);
- /* we can't repair anything in raid56 yet */
- if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
- return 0;
-
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
@@ -2026,17 +2021,35 @@ int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length,
* read repair operation.
*/
btrfs_bio_counter_inc_blocked(fs_info);
- ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
- &map_length, &bbio, mirror_num);
- if (ret) {
- btrfs_bio_counter_dec(fs_info);
- bio_put(bio);
- return -EIO;
+ if (btrfs_is_parity_mirror(fs_info, logical, length, mirror_num)) {
+ /*
+ * Note that we don't use BTRFS_MAP_WRITE because it's supposed
+ * to update all raid stripes, but here we just want to correct
+ * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
+ * stripe's dev and sector.
+ */
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
+ &map_length, &bbio, 0);
+ if (ret) {
+ btrfs_bio_counter_dec(fs_info);
+ bio_put(bio);
+ return -EIO;
+ }
+ ASSERT(bbio->mirror_num == 1);
+ } else {
+ ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
+ &map_length, &bbio, mirror_num);
+ if (ret) {
+ btrfs_bio_counter_dec(fs_info);
+ bio_put(bio);
+ return -EIO;
+ }
+ BUG_ON(mirror_num != bbio->mirror_num);
}
- BUG_ON(mirror_num != bbio->mirror_num);
- sector = bbio->stripes[mirror_num-1].physical >> 9;
+
+ sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
bio->bi_iter.bi_sector = sector;
- dev = bbio->stripes[mirror_num-1].dev;
+ dev = bbio->stripes[bbio->mirror_num - 1].dev;
btrfs_put_bbio(bbio);
if (!dev || !dev->bdev || !dev->writeable) {
btrfs_bio_counter_dec(fs_info);
@@ -2859,7 +2872,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
em = *em_cached;
if (extent_map_in_tree(em) && start >= em->start &&
start < extent_map_end(em)) {
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
return em;
}
@@ -2870,7 +2883,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0);
if (em_cached && !IS_ERR_OR_NULL(em)) {
BUG_ON(*em_cached);
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
*em_cached = em;
}
return em;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 3e4fad4a909d..1eafa2f0ede3 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -2,6 +2,7 @@
#define __EXTENTIO__
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include "ulist.h"
/* bits for the extent state */
@@ -14,14 +15,17 @@
#define EXTENT_DEFRAG (1U << 6)
#define EXTENT_BOUNDARY (1U << 9)
#define EXTENT_NODATASUM (1U << 10)
-#define EXTENT_DO_ACCOUNTING (1U << 11)
+#define EXTENT_CLEAR_META_RESV (1U << 11)
#define EXTENT_FIRST_DELALLOC (1U << 12)
#define EXTENT_NEED_WAIT (1U << 13)
#define EXTENT_DAMAGED (1U << 14)
#define EXTENT_NORESERVE (1U << 15)
#define EXTENT_QGROUP_RESERVED (1U << 16)
#define EXTENT_CLEAR_DATA_RESV (1U << 17)
+#define EXTENT_DELALLOC_NEW (1U << 18)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
+#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
+ EXTENT_CLEAR_DATA_RESV)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
/*
@@ -143,7 +147,7 @@ struct extent_state {
/* ADD NEW ELEMENTS AFTER THIS */
wait_queue_head_t wq;
- atomic_t refs;
+ refcount_t refs;
unsigned state;
struct io_failure_record *failrec;
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 26f9ac719d20..69850155870c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -55,7 +55,7 @@ struct extent_map *alloc_extent_map(void)
em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE;
em->generation = 0;
- atomic_set(&em->refs, 1);
+ refcount_set(&em->refs, 1);
INIT_LIST_HEAD(&em->list);
return em;
}
@@ -71,8 +71,8 @@ void free_extent_map(struct extent_map *em)
{
if (!em)
return;
- WARN_ON(atomic_read(&em->refs) == 0);
- if (atomic_dec_and_test(&em->refs)) {
+ WARN_ON(refcount_read(&em->refs) == 0);
+ if (refcount_dec_and_test(&em->refs)) {
WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
@@ -322,7 +322,7 @@ static inline void setup_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em,
int modified)
{
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
em->mod_start = em->start;
em->mod_len = em->len;
@@ -381,7 +381,7 @@ __lookup_extent_mapping(struct extent_map_tree *tree,
if (strict && !(end > em->start && start < extent_map_end(em)))
return NULL;
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
return em;
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index eb8b8fae036b..a67b2def5413 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -2,6 +2,7 @@
#define __EXTENTMAP__
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#define EXTENT_MAP_LAST_BYTE ((u64)-4)
#define EXTENT_MAP_HOLE ((u64)-3)
@@ -41,7 +42,7 @@ struct extent_map {
*/
struct map_lookup *map_lookup;
};
- atomic_t refs;
+ refcount_t refs;
unsigned int compress_type;
struct list_head list;
};
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 520cb7230b2d..da1096eb1a40 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1404,6 +1404,47 @@ fail:
}
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+ const u64 start,
+ const u64 len,
+ struct extent_state **cached_state)
+{
+ u64 search_start = start;
+ const u64 end = start + len - 1;
+
+ while (search_start < end) {
+ const u64 search_len = end - search_start + 1;
+ struct extent_map *em;
+ u64 em_len;
+ int ret = 0;
+
+ em = btrfs_get_extent(inode, NULL, 0, search_start,
+ search_len, 0);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start != EXTENT_MAP_HOLE)
+ goto next;
+
+ em_len = em->len;
+ if (em->start < search_start)
+ em_len -= search_start - em->start;
+ if (em_len > search_len)
+ em_len = search_len;
+
+ ret = set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW,
+ NULL, cached_state, GFP_NOFS);
+next:
+ search_start = extent_map_end(em);
+ free_extent_map(em);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* This function locks the extent and properly waits for data=ordered extents
* to finish before allowing the pages to be modified if need.
@@ -1432,8 +1473,11 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
+ round_up(pos + write_bytes - start_pos,
fs_info->sectorsize) - 1;
- if (start_pos < inode->vfs_inode.i_size) {
+ if (start_pos < inode->vfs_inode.i_size ||
+ (inode->flags & BTRFS_INODE_PREALLOC)) {
struct btrfs_ordered_extent *ordered;
+ unsigned int clear_bits;
+
lock_extent_bits(&inode->io_tree, start_pos, last_pos,
cached_state);
ordered = btrfs_lookup_ordered_range(inode, start_pos,
@@ -1454,11 +1498,19 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
}
if (ordered)
btrfs_put_ordered_extent(ordered);
-
+ ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
+ last_pos - start_pos + 1,
+ cached_state);
+ clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
+ if (ret)
+ clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
clear_extent_bit(&inode->io_tree, start_pos,
- last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state, GFP_NOFS);
+ last_pos, clear_bits,
+ (clear_bits & EXTENT_LOCKED) ? 1 : 0,
+ 0, cached_state, GFP_NOFS);
+ if (ret)
+ return ret;
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
@@ -2342,13 +2394,8 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
int ret = 0;
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0);
- if (IS_ERR_OR_NULL(em)) {
- if (!em)
- ret = -ENOMEM;
- else
- ret = PTR_ERR(em);
- return ret;
- }
+ if (IS_ERR(em))
+ return PTR_ERR(em);
/* Hole or vacuum extent(only exists in no-hole mode) */
if (em->block_start == EXTENT_MAP_HOLE) {
@@ -2835,11 +2882,8 @@ static long btrfs_fallocate(struct file *file, int mode,
while (1) {
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
- if (IS_ERR_OR_NULL(em)) {
- if (!em)
- ret = -ENOMEM;
- else
- ret = PTR_ERR(em);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
break;
}
last_byte = min(extent_map_end(em), alloc_end);
@@ -2856,8 +2900,10 @@ static long btrfs_fallocate(struct file *file, int mode,
}
ret = btrfs_qgroup_reserve_data(inode, cur_offset,
last_byte - cur_offset);
- if (ret < 0)
+ if (ret < 0) {
+ free_extent_map(em);
break;
+ }
} else {
/*
* Do not need to reserve unwritten extent for this
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index da6841efac26..c5e6180cdb8c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -355,7 +355,7 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
io_ctl->orig = io_ctl->cur;
io_ctl->size = PAGE_SIZE;
if (clear)
- memset(io_ctl->cur, 0, PAGE_SIZE);
+ clear_page(io_ctl->cur);
}
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index dd7fb22a955a..fc0bd8406758 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -167,8 +167,7 @@ static u8 *alloc_bitmap(u32 bitmap_size)
if (mem)
return mem;
- return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL);
+ return __vmalloc(bitmap_size, GFP_NOFS | __GFP_ZERO, PAGE_KERNEL);
}
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5e71f1ea3391..17cbe9306faf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -115,6 +115,31 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
u64 ram_bytes, int compress_type,
int type);
+static void __endio_write_update_ordered(struct inode *inode,
+ const u64 offset, const u64 bytes,
+ const bool uptodate);
+
+/*
+ * Cleanup all submitted ordered extents in specified range to handle errors
+ * from the fill_dellaloc() callback.
+ *
+ * NOTE: caller must ensure that when an error happens, it can not call
+ * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
+ * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
+ * to be released, which we want to happen only when finishing the ordered
+ * extent (btrfs_finish_ordered_io()). Also note that the caller of the
+ * fill_delalloc() callback already does proper cleanup for the first page of
+ * the range, that is, it invokes the callback writepage_end_io_hook() for the
+ * range of the first page.
+ */
+static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
+ const u64 offset,
+ const u64 bytes)
+{
+ return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
+ bytes - PAGE_SIZE, false);
+}
+
static int btrfs_dirty_inode(struct inode *inode);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
@@ -547,7 +572,7 @@ cont:
}
if (ret <= 0) {
unsigned long clear_flags = EXTENT_DELALLOC |
- EXTENT_DEFRAG;
+ EXTENT_DELALLOC_NEW | EXTENT_DEFRAG;
unsigned long page_error_op;
clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
@@ -565,8 +590,10 @@ cont:
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
- btrfs_free_reserved_data_space_noquota(inode, start,
- end - start + 1);
+ if (ret == 0)
+ btrfs_free_reserved_data_space_noquota(inode,
+ start,
+ end - start + 1);
goto free_pages_out;
}
}
@@ -852,6 +879,7 @@ out_free:
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
@@ -918,10 +946,13 @@ static noinline int cow_file_range(struct inode *inode,
u64 num_bytes;
unsigned long ram_size;
u64 disk_num_bytes;
- u64 cur_alloc_size;
+ u64 cur_alloc_size = 0;
u64 blocksize = fs_info->sectorsize;
struct btrfs_key ins;
struct extent_map *em;
+ unsigned clear_bits;
+ unsigned long page_ops;
+ bool extent_reserved = false;
int ret = 0;
if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
@@ -944,6 +975,7 @@ static noinline int cow_file_range(struct inode *inode,
extent_clear_unlock_delalloc(inode, start, end,
delalloc_end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
@@ -966,14 +998,14 @@ static noinline int cow_file_range(struct inode *inode,
start + num_bytes - 1, 0);
while (disk_num_bytes > 0) {
- unsigned long op;
-
cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
fs_info->sectorsize, 0, alloc_hint,
&ins, 1, 1);
if (ret < 0)
goto out_unlock;
+ cur_alloc_size = ins.offset;
+ extent_reserved = true;
ram_size = ins.offset;
em = create_io_em(inode, start, ins.offset, /* len */
@@ -988,7 +1020,6 @@ static noinline int cow_file_range(struct inode *inode,
goto out_reserve;
free_extent_map(em);
- cur_alloc_size = ins.offset;
ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
ram_size, cur_alloc_size, 0);
if (ret)
@@ -998,15 +1029,24 @@ static noinline int cow_file_range(struct inode *inode,
BTRFS_DATA_RELOC_TREE_OBJECTID) {
ret = btrfs_reloc_clone_csums(inode, start,
cur_alloc_size);
+ /*
+ * Only drop cache here, and process as normal.
+ *
+ * We must not allow extent_clear_unlock_delalloc()
+ * at out_unlock label to free meta of this ordered
+ * extent, as its meta should be freed by
+ * btrfs_finish_ordered_io().
+ *
+ * So we must continue until @start is increased to
+ * skip current ordered extent.
+ */
if (ret)
- goto out_drop_extent_cache;
+ btrfs_drop_extent_cache(BTRFS_I(inode), start,
+ start + ram_size - 1, 0);
}
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
- if (disk_num_bytes < cur_alloc_size)
- break;
-
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
@@ -1014,18 +1054,30 @@ static noinline int cow_file_range(struct inode *inode,
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
- op = unlock ? PAGE_UNLOCK : 0;
- op |= PAGE_SET_PRIVATE2;
+ page_ops = unlock ? PAGE_UNLOCK : 0;
+ page_ops |= PAGE_SET_PRIVATE2;
extent_clear_unlock_delalloc(inode, start,
start + ram_size - 1,
delalloc_end, locked_page,
EXTENT_LOCKED | EXTENT_DELALLOC,
- op);
- disk_num_bytes -= cur_alloc_size;
+ page_ops);
+ if (disk_num_bytes < cur_alloc_size)
+ disk_num_bytes = 0;
+ else
+ disk_num_bytes -= cur_alloc_size;
num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size;
+ extent_reserved = false;
+
+ /*
+ * btrfs_reloc_clone_csums() error, since start is increased
+ * extent_clear_unlock_delalloc() at out_unlock label won't
+ * free metadata of current ordered extent, we're OK to exit.
+ */
+ if (ret)
+ goto out_unlock;
}
out:
return ret;
@@ -1036,12 +1088,35 @@ out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_unlock:
+ clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV;
+ page_ops = PAGE_UNLOCK | PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
+ PAGE_END_WRITEBACK;
+ /*
+ * If we reserved an extent for our delalloc range (or a subrange) and
+ * failed to create the respective ordered extent, then it means that
+ * when we reserved the extent we decremented the extent's size from
+ * the data space_info's bytes_may_use counter and incremented the
+ * space_info's bytes_reserved counter by the same amount. We must make
+ * sure extent_clear_unlock_delalloc() does not try to decrement again
+ * the data space_info's bytes_may_use counter, therefore we do not pass
+ * it the flag EXTENT_CLEAR_DATA_RESV.
+ */
+ if (extent_reserved) {
+ extent_clear_unlock_delalloc(inode, start,
+ start + cur_alloc_size,
+ start + cur_alloc_size,
+ locked_page,
+ clear_bits,
+ page_ops);
+ start += cur_alloc_size;
+ if (start >= end)
+ goto out;
+ }
extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
locked_page,
- EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
- EXTENT_DELALLOC | EXTENT_DEFRAG,
- PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
- PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
+ clear_bits | EXTENT_CLEAR_DATA_RESV,
+ page_ops);
goto out;
}
@@ -1414,15 +1489,14 @@ out_check:
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
- BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ BTRFS_DATA_RELOC_TREE_OBJECTID)
+ /*
+ * Error handled later, as we must prevent
+ * extent_clear_unlock_delalloc() in error handler
+ * from freeing metadata of created ordered extent.
+ */
ret = btrfs_reloc_clone_csums(inode, cur_offset,
num_bytes);
- if (ret) {
- if (!nolock && nocow)
- btrfs_end_write_no_snapshoting(root);
- goto error;
- }
- }
extent_clear_unlock_delalloc(inode, cur_offset,
cur_offset + num_bytes - 1, end,
@@ -1434,6 +1508,14 @@ out_check:
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
cur_offset = extent_end;
+
+ /*
+ * btrfs_reloc_clone_csums() error, now we're OK to call error
+ * handler, as metadata for created ordered extent will only
+ * be freed by btrfs_finish_ordered_io().
+ */
+ if (ret)
+ goto error;
if (cur_offset > end)
break;
}
@@ -1509,6 +1591,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
ret = cow_file_range_async(inode, locked_page, start, end,
page_started, nr_written);
}
+ if (ret)
+ btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
return ret;
}
@@ -1693,6 +1777,14 @@ static void btrfs_set_bit_hook(struct inode *inode,
btrfs_add_delalloc_inodes(root, inode);
spin_unlock(&BTRFS_I(inode)->lock);
}
+
+ if (!(state->state & EXTENT_DELALLOC_NEW) &&
+ (*bits & EXTENT_DELALLOC_NEW)) {
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 -
+ state->start;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
}
/*
@@ -1722,7 +1814,7 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
- } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
+ } else if (!(*bits & EXTENT_CLEAR_META_RESV)) {
spin_lock(&inode->lock);
inode->outstanding_extents -= num_extents;
spin_unlock(&inode->lock);
@@ -1733,7 +1825,7 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
* don't need to call dellalloc_release_metadata if there is an
* error.
*/
- if (*bits & EXTENT_DO_ACCOUNTING &&
+ if (*bits & EXTENT_CLEAR_META_RESV &&
root != fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, len);
@@ -1741,10 +1833,9 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
if (btrfs_is_testing(fs_info))
return;
- if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
- && do_list && !(state->state & EXTENT_NORESERVE)
- && (*bits & (EXTENT_DO_ACCOUNTING |
- EXTENT_CLEAR_DATA_RESV)))
+ if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID &&
+ do_list && !(state->state & EXTENT_NORESERVE) &&
+ (*bits & EXTENT_CLEAR_DATA_RESV))
btrfs_free_reserved_data_space_noquota(
&inode->vfs_inode,
state->start, len);
@@ -1759,6 +1850,14 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode,
btrfs_del_delalloc_inode(root, inode);
spin_unlock(&inode->lock);
}
+
+ if ((state->state & EXTENT_DELALLOC_NEW) &&
+ (*bits & EXTENT_DELALLOC_NEW)) {
+ spin_lock(&inode->lock);
+ ASSERT(inode->new_delalloc_bytes >= len);
+ inode->new_delalloc_bytes -= len;
+ spin_unlock(&inode->lock);
+ }
}
/*
@@ -2791,6 +2890,13 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
u64 logical_len = ordered_extent->len;
bool nolock;
bool truncated = false;
+ bool range_locked = false;
+ bool clear_new_delalloc_bytes = false;
+
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
+ !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
+ !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags))
+ clear_new_delalloc_bytes = true;
nolock = btrfs_is_free_space_inode(BTRFS_I(inode));
@@ -2839,6 +2945,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
+ range_locked = true;
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
&cached_state);
@@ -2864,7 +2971,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
- goto out_unlock;
+ goto out;
}
trans->block_rsv = &fs_info->delalloc_block_rsv;
@@ -2896,7 +3003,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans->transid);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
- goto out_unlock;
+ goto out;
}
add_pending_csums(trans, inode, &ordered_extent->list);
@@ -2905,14 +3012,26 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
- goto out_unlock;
+ goto out;
}
ret = 0;
-out_unlock:
- unlock_extent_cached(io_tree, ordered_extent->file_offset,
- ordered_extent->file_offset +
- ordered_extent->len - 1, &cached_state, GFP_NOFS);
out:
+ if (range_locked || clear_new_delalloc_bytes) {
+ unsigned int clear_bits = 0;
+
+ if (range_locked)
+ clear_bits |= EXTENT_LOCKED;
+ if (clear_new_delalloc_bytes)
+ clear_bits |= EXTENT_DELALLOC_NEW;
+ clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ ordered_extent->file_offset,
+ ordered_extent->file_offset +
+ ordered_extent->len - 1,
+ clear_bits,
+ (clear_bits & EXTENT_LOCKED) ? 1 : 0,
+ 0, &cached_state, GFP_NOFS);
+ }
+
if (root != fs_info->tree_root)
btrfs_delalloc_release_metadata(BTRFS_I(inode),
ordered_extent->len);
@@ -4401,9 +4520,17 @@ search_again:
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
item_end +=
btrfs_file_extent_num_bytes(leaf, fi);
+
+ trace_btrfs_truncate_show_fi_regular(
+ BTRFS_I(inode), leaf, fi,
+ found_key.offset);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
item_end += btrfs_file_extent_inline_len(leaf,
path->slots[0], fi);
+
+ trace_btrfs_truncate_show_fi_inline(
+ BTRFS_I(inode), leaf, fi, path->slots[0],
+ found_key.offset);
}
item_end--;
}
@@ -4603,13 +4730,6 @@ error:
btrfs_free_path(path);
- if (err == 0) {
- /* only inline file may have last_size != new_size */
- if (new_size >= fs_info->sectorsize ||
- new_size > fs_info->max_inline)
- ASSERT(last_size == new_size);
- }
-
if (be_nice && bytes_deleted > SZ_32M) {
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
@@ -6735,7 +6855,6 @@ static noinline int uncompress_inline(struct btrfs_path *path,
*
* This also copies inline extents directly into the page.
*/
-
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page,
size_t pg_offset, u64 start, u64 len,
@@ -6835,11 +6954,18 @@ again:
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
extent_end = extent_start +
btrfs_file_extent_num_bytes(leaf, item);
+
+ trace_btrfs_get_extent_show_fi_regular(inode, leaf, item,
+ extent_start);
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
+
+ trace_btrfs_get_extent_show_fi_inline(inode, leaf, item,
+ path->slots[0],
+ extent_start);
}
next:
if (start >= extent_end) {
@@ -7037,19 +7163,17 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
- if (em) {
- /*
- * if our em maps to
- * - a hole or
- * - a pre-alloc extent,
- * there might actually be delalloc bytes behind it.
- */
- if (em->block_start != EXTENT_MAP_HOLE &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- return em;
- else
- hole_em = em;
- }
+ /*
+ * If our em maps to:
+ * - a hole or
+ * - a pre-alloc extent,
+ * there might actually be delalloc bytes behind it.
+ */
+ if (em->block_start != EXTENT_MAP_HOLE &&
+ !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ return em;
+ else
+ hole_em = em;
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
@@ -8127,17 +8251,26 @@ static void btrfs_endio_direct_read(struct bio *bio)
bio_put(bio);
}
-static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
- const u64 offset,
- const u64 bytes,
- const int uptodate)
+static void __endio_write_update_ordered(struct inode *inode,
+ const u64 offset, const u64 bytes,
+ const bool uptodate)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_extent *ordered = NULL;
+ struct btrfs_workqueue *wq;
+ btrfs_work_func_t func;
u64 ordered_offset = offset;
u64 ordered_bytes = bytes;
int ret;
+ if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ wq = fs_info->endio_freespace_worker;
+ func = btrfs_freespace_write_helper;
+ } else {
+ wq = fs_info->endio_write_workers;
+ func = btrfs_endio_write_helper;
+ }
+
again:
ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
&ordered_offset,
@@ -8146,9 +8279,8 @@ again:
if (!ret)
goto out_test;
- btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
- finish_ordered_fn, NULL, NULL);
- btrfs_queue_work(fs_info->endio_write_workers, &ordered->work);
+ btrfs_init_work(&ordered->work, func, finish_ordered_fn, NULL, NULL);
+ btrfs_queue_work(wq, &ordered->work);
out_test:
/*
* our bio might span multiple ordered extents. If we haven't
@@ -8166,10 +8298,8 @@ static void btrfs_endio_direct_write(struct bio *bio)
struct btrfs_dio_private *dip = bio->bi_private;
struct bio *dio_bio = dip->dio_bio;
- btrfs_endio_direct_write_update_ordered(dip->inode,
- dip->logical_offset,
- dip->bytes,
- !bio->bi_error);
+ __endio_write_update_ordered(dip->inode, dip->logical_offset,
+ dip->bytes, !bio->bi_error);
kfree(dip);
@@ -8530,10 +8660,10 @@ free_ordered:
io_bio = NULL;
} else {
if (write)
- btrfs_endio_direct_write_update_ordered(inode,
+ __endio_write_update_ordered(inode,
file_offset,
dio_bio->bi_iter.bi_size,
- 0);
+ false);
else
unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
file_offset + dio_bio->bi_iter.bi_size - 1);
@@ -8668,11 +8798,11 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
*/
if (dio_data.unsubmitted_oe_range_start <
dio_data.unsubmitted_oe_range_end)
- btrfs_endio_direct_write_update_ordered(inode,
+ __endio_write_update_ordered(inode,
dio_data.unsubmitted_oe_range_start,
dio_data.unsubmitted_oe_range_end -
dio_data.unsubmitted_oe_range_start,
- 0);
+ false);
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode, offset,
count - (size_t)ret);
@@ -8819,6 +8949,7 @@ again:
if (!inode_evicting)
clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DELALLOC_NEW |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, 1, 0, &cached_state,
GFP_NOFS);
@@ -8876,8 +9007,8 @@ again:
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 1,
+ EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
&cached_state, GFP_NOFS);
__btrfs_releasepage(page, GFP_NOFS);
@@ -9248,6 +9379,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->last_sub_trans = 0;
ei->logged_trans = 0;
ei->delalloc_bytes = 0;
+ ei->new_delalloc_bytes = 0;
ei->defrag_bytes = 0;
ei->disk_i_size = 0;
ei->flags = 0;
@@ -9313,6 +9445,7 @@ void btrfs_destroy_inode(struct inode *inode)
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
+ WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
WARN_ON(BTRFS_I(inode)->defrag_bytes);
@@ -9436,7 +9569,7 @@ static int btrfs_getattr(const struct path *path, struct kstat *stat,
stat->dev = BTRFS_I(inode)->root->anon_dev;
spin_lock(&BTRFS_I(inode)->lock);
- delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
+ delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
ALIGN(delalloc_bytes, blocksize)) >> 9;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dabfc7ac48a6..e176375f374f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1504,7 +1504,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (ret)
return ret;
- if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
mnt_drop_write_file(file);
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
}
@@ -1619,7 +1619,7 @@ out_free:
kfree(vol_args);
out:
mutex_unlock(&fs_info->volume_mutex);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
mnt_drop_write_file(file);
return ret;
}
@@ -2661,7 +2661,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1))
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
mutex_lock(&fs_info->volume_mutex);
@@ -2680,7 +2680,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
kfree(vol_args);
out:
mutex_unlock(&fs_info->volume_mutex);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
return ret;
}
@@ -2708,7 +2708,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
return -EOPNOTSUPP;
- if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out;
}
@@ -2721,7 +2721,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
mutex_unlock(&fs_info->volume_mutex);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
if (!ret) {
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
@@ -2752,7 +2752,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
if (ret)
return ret;
- if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out_drop_write;
}
@@ -2772,7 +2772,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
kfree(vol_args);
out:
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
out_drop_write:
mnt_drop_write_file(file);
@@ -3539,12 +3539,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
u64 last_dest_end = destoff;
ret = -ENOMEM;
- buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
- if (!buf) {
- buf = vmalloc(fs_info->nodesize);
- if (!buf)
- return ret;
- }
+ buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
+ if (!buf)
+ return ret;
path = btrfs_alloc_path();
if (!path) {
@@ -4442,13 +4439,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
ret = -EROFS;
goto out;
}
- if (atomic_xchg(
- &fs_info->mutually_exclusive_operation_running, 1)) {
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} else {
ret = btrfs_dev_replace_by_ioctl(fs_info, p);
- atomic_set(
- &fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
}
break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
@@ -4643,7 +4638,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
return ret;
again:
- if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+ if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
mutex_lock(&fs_info->volume_mutex);
mutex_lock(&fs_info->balance_mutex);
need_unlock = true;
@@ -4689,7 +4684,7 @@ again:
}
locked:
- BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
+ BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
if (arg) {
bargs = memdup_user(arg, sizeof(*bargs));
@@ -4745,11 +4740,10 @@ locked:
do_balance:
/*
- * Ownership of bctl and mutually_exclusive_operation_running
+ * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP
* goes to to btrfs_balance. bctl is freed in __cancel_balance,
* or, if restriper was paused all the way until unmount, in
- * free_fs_info. mutually_exclusive_operation_running is
- * cleared in __cancel_balance.
+ * free_fs_info. The flag is cleared in __cancel_balance.
*/
need_unlock = false;
@@ -4769,7 +4763,7 @@ out_unlock:
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
if (need_unlock)
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
out:
mnt_drop_write_file(file);
return ret;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 9a46878ba60f..7b40e2e7292a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -212,7 +212,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
/* one ref for the tree */
- atomic_set(&entry->refs, 1);
+ refcount_set(&entry->refs, 1);
init_waitqueue_head(&entry->wait);
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list);
@@ -358,7 +358,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
out:
if (!ret && cached && entry) {
*cached = entry;
- atomic_inc(&entry->refs);
+ refcount_inc(&entry->refs);
}
spin_unlock_irqrestore(&tree->lock, flags);
return ret == 0;
@@ -425,7 +425,7 @@ have_entry:
out:
if (!ret && cached && entry) {
*cached = entry;
- atomic_inc(&entry->refs);
+ refcount_inc(&entry->refs);
}
spin_unlock_irqrestore(&tree->lock, flags);
return ret == 0;
@@ -456,7 +456,7 @@ void btrfs_get_logged_extents(struct btrfs_inode *inode,
if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
continue;
list_add(&ordered->log_list, logged_list);
- atomic_inc(&ordered->refs);
+ refcount_inc(&ordered->refs);
}
spin_unlock_irq(&tree->lock);
}
@@ -565,7 +565,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
trace_btrfs_ordered_extent_put(entry->inode, entry);
- if (atomic_dec_and_test(&entry->refs)) {
+ if (refcount_dec_and_test(&entry->refs)) {
ASSERT(list_empty(&entry->log_list));
ASSERT(list_empty(&entry->trans_list));
ASSERT(list_empty(&entry->root_extent_list));
@@ -623,7 +623,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
if (trans)
- atomic_inc(&trans->use_count);
+ refcount_inc(&trans->use_count);
spin_unlock(&fs_info->trans_lock);
ASSERT(trans);
@@ -690,7 +690,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
list_move_tail(&ordered->root_extent_list,
&root->ordered_extents);
- atomic_inc(&ordered->refs);
+ refcount_inc(&ordered->refs);
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
@@ -870,7 +870,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
if (!offset_in_entry(entry, file_offset))
entry = NULL;
if (entry)
- atomic_inc(&entry->refs);
+ refcount_inc(&entry->refs);
out:
spin_unlock_irq(&tree->lock);
return entry;
@@ -911,7 +911,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
}
out:
if (entry)
- atomic_inc(&entry->refs);
+ refcount_inc(&entry->refs);
spin_unlock_irq(&tree->lock);
return entry;
}
@@ -948,7 +948,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
goto out;
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- atomic_inc(&entry->refs);
+ refcount_inc(&entry->refs);
out:
spin_unlock_irq(&tree->lock);
return entry;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 195c93b67fe0..e0c1d5b8d859 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -113,7 +113,7 @@ struct btrfs_ordered_extent {
int compress_type;
/* reference count */
- atomic_t refs;
+ refcount_t refs;
/* the inode we belong to */
struct inode *inode;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index afbea61d957e..deffbeb74a0b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -47,50 +47,6 @@
* - check all ioctl parameters
*/
-/*
- * one struct for each qgroup, organized in fs_info->qgroup_tree.
- */
-struct btrfs_qgroup {
- u64 qgroupid;
-
- /*
- * state
- */
- u64 rfer; /* referenced */
- u64 rfer_cmpr; /* referenced compressed */
- u64 excl; /* exclusive */
- u64 excl_cmpr; /* exclusive compressed */
-
- /*
- * limits
- */
- u64 lim_flags; /* which limits are set */
- u64 max_rfer;
- u64 max_excl;
- u64 rsv_rfer;
- u64 rsv_excl;
-
- /*
- * reservation tracking
- */
- u64 reserved;
-
- /*
- * lists
- */
- struct list_head groups; /* groups this group is member of */
- struct list_head members; /* groups that are members of this group */
- struct list_head dirty; /* dirty groups */
- struct rb_node node; /* tree of qgroups */
-
- /*
- * temp variables for accounting operations
- * Refer to qgroup_shared_accounting() for details.
- */
- u64 old_refcnt;
- u64 new_refcnt;
-};
-
static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
int mod)
{
@@ -1078,6 +1034,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
qgroup->excl += sign * num_bytes;
qgroup->excl_cmpr += sign * num_bytes;
if (sign > 0) {
+ trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes);
if (qgroup->reserved < num_bytes)
report_reserved_underflow(fs_info, qgroup, num_bytes);
else
@@ -1103,6 +1060,8 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
qgroup->excl += sign * num_bytes;
if (sign > 0) {
+ trace_qgroup_update_reserve(fs_info, qgroup,
+ -(s64)num_bytes);
if (qgroup->reserved < num_bytes)
report_reserved_underflow(fs_info, qgroup,
num_bytes);
@@ -2058,12 +2017,12 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
if (!ret) {
/*
- * Use (u64)-1 as time_seq to do special search, which
+ * Use SEQ_LAST as time_seq to do special search, which
* doesn't lock tree or delayed_refs and search current
* root. It's safe inside commit_transaction().
*/
ret = btrfs_find_all_roots(trans, fs_info,
- record->bytenr, (u64)-1, &new_roots);
+ record->bytenr, SEQ_LAST, &new_roots);
if (ret < 0)
goto cleanup;
if (qgroup_to_skip)
@@ -2370,6 +2329,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
struct btrfs_fs_info *fs_info = root->fs_info;
u64 ref_root = root->root_key.objectid;
int ret = 0;
+ int retried = 0;
struct ulist_node *unode;
struct ulist_iterator uiter;
@@ -2378,7 +2338,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
if (num_bytes == 0)
return 0;
-
+retry:
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
if (!quota_root)
@@ -2405,6 +2365,27 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
qg = unode_aux_to_qgroup(unode);
if (enforce && !qgroup_check_limits(qg, num_bytes)) {
+ /*
+ * Commit the tree and retry, since we may have
+ * deletions which would free up space.
+ */
+ if (!retried && qg->reserved > 0) {
+ struct btrfs_trans_handle *trans;
+
+ spin_unlock(&fs_info->qgroup_lock);
+ ret = btrfs_start_delalloc_inodes(root, 0);
+ if (ret)
+ return ret;
+ btrfs_wait_ordered_extents(root, -1, 0, (u64)-1);
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ ret = btrfs_commit_transaction(trans);
+ if (ret)
+ return ret;
+ retried++;
+ goto retry;
+ }
ret = -EDQUOT;
goto out;
}
@@ -2427,6 +2408,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce)
qg = unode_aux_to_qgroup(unode);
+ trace_qgroup_update_reserve(fs_info, qg, num_bytes);
qg->reserved += num_bytes;
}
@@ -2472,6 +2454,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
qg = unode_aux_to_qgroup(unode);
+ trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes);
if (qg->reserved < num_bytes)
report_reserved_underflow(fs_info, qg, num_bytes);
else
@@ -2490,18 +2473,6 @@ out:
spin_unlock(&fs_info->qgroup_lock);
}
-void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
-{
- if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
- return;
- btrfs_err(trans->fs_info,
- "qgroups not uptodate in trans handle %p: list is%s empty, seq is %#x.%x",
- trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
- (u32)(trans->delayed_ref_elem.seq >> 32),
- (u32)trans->delayed_ref_elem.seq);
- BUG();
-}
-
/*
* returns < 0 on error, 0 when more leafs are to be scanned.
* returns 1 when done.
@@ -2889,14 +2860,14 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
if (ret < 0)
goto out;
- if (free) {
- btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
- BTRFS_I(inode)->root->objectid,
- changeset.bytes_changed);
+ if (free)
trace_op = QGROUP_FREE;
- }
trace_btrfs_qgroup_release_data(inode, start, len,
changeset.bytes_changed, trace_op);
+ if (free)
+ btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
+ BTRFS_I(inode)->root->objectid,
+ changeset.bytes_changed);
out:
ulist_release(&changeset.range_changed);
return ret;
@@ -2948,6 +2919,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
+ trace_qgroup_meta_reserve(root, (s64)num_bytes);
ret = qgroup_reserve(root, num_bytes, enforce);
if (ret < 0)
return ret;
@@ -2967,6 +2939,7 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
if (reserved == 0)
return;
+ trace_qgroup_meta_reserve(root, -(s64)reserved);
btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
}
@@ -2981,6 +2954,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
+ trace_qgroup_meta_reserve(root, -(s64)num_bytes);
btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 26932a8a1993..fe04d3f295c6 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -62,6 +62,50 @@ struct btrfs_qgroup_extent_record {
};
/*
+ * one struct for each qgroup, organized in fs_info->qgroup_tree.
+ */
+struct btrfs_qgroup {
+ u64 qgroupid;
+
+ /*
+ * state
+ */
+ u64 rfer; /* referenced */
+ u64 rfer_cmpr; /* referenced compressed */
+ u64 excl; /* exclusive */
+ u64 excl_cmpr; /* exclusive compressed */
+
+ /*
+ * limits
+ */
+ u64 lim_flags; /* which limits are set */
+ u64 max_rfer;
+ u64 max_excl;
+ u64 rsv_rfer;
+ u64 rsv_excl;
+
+ /*
+ * reservation tracking
+ */
+ u64 reserved;
+
+ /*
+ * lists
+ */
+ struct list_head groups; /* groups this group is member of */
+ struct list_head members; /* groups that are members of this group */
+ struct list_head dirty; /* dirty groups */
+ struct rb_node node; /* tree of qgroups */
+
+ /*
+ * temp variables for accounting operations
+ * Refer to qgroup_shared_accounting() for details.
+ */
+ u64 old_refcnt;
+ u64 new_refcnt;
+};
+
+/*
* For qgroup event trace points only
*/
#define QGROUP_RESERVE (1<<0)
@@ -186,17 +230,12 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes);
-/*
- * TODO: Add proper trace point for it, as btrfs_qgroup_free() is
- * called by everywhere, can't provide good trace for delayed ref case.
- */
static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes)
{
- btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
+ btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
}
-void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 1571bf26dc07..d8ea0eb76325 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -149,7 +149,7 @@ struct btrfs_raid_bio {
int generic_bio_cnt;
- atomic_t refs;
+ refcount_t refs;
atomic_t stripes_pending;
@@ -389,7 +389,7 @@ static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
if (bio_list_empty(&rbio->bio_list)) {
if (!list_empty(&rbio->hash_list)) {
list_del_init(&rbio->hash_list);
- atomic_dec(&rbio->refs);
+ refcount_dec(&rbio->refs);
BUG_ON(!list_empty(&rbio->plug_list));
}
}
@@ -480,7 +480,7 @@ static void cache_rbio(struct btrfs_raid_bio *rbio)
/* bump our ref if we were not in the list before */
if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
- atomic_inc(&rbio->refs);
+ refcount_inc(&rbio->refs);
if (!list_empty(&rbio->stripe_cache)){
list_move(&rbio->stripe_cache, &table->stripe_cache);
@@ -689,7 +689,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
list_del_init(&cur->hash_list);
- atomic_dec(&cur->refs);
+ refcount_dec(&cur->refs);
steal_rbio(cur, rbio);
cache_drop = cur;
@@ -738,7 +738,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
}
}
lockit:
- atomic_inc(&rbio->refs);
+ refcount_inc(&rbio->refs);
list_add(&rbio->hash_list, &h->hash_list);
out:
spin_unlock_irqrestore(&h->lock, flags);
@@ -784,7 +784,7 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
}
list_del_init(&rbio->hash_list);
- atomic_dec(&rbio->refs);
+ refcount_dec(&rbio->refs);
/*
* we use the plug list to hold all the rbios
@@ -801,7 +801,7 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
list_del_init(&rbio->plug_list);
list_add(&next->hash_list, &h->hash_list);
- atomic_inc(&next->refs);
+ refcount_inc(&next->refs);
spin_unlock(&rbio->bio_list_lock);
spin_unlock_irqrestore(&h->lock, flags);
@@ -843,8 +843,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
{
int i;
- WARN_ON(atomic_read(&rbio->refs) < 0);
- if (!atomic_dec_and_test(&rbio->refs))
+ if (!refcount_dec_and_test(&rbio->refs))
return;
WARN_ON(!list_empty(&rbio->stripe_cache));
@@ -997,7 +996,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
rbio->stripe_npages = stripe_npages;
rbio->faila = -1;
rbio->failb = -1;
- atomic_set(&rbio->refs, 1);
+ refcount_set(&rbio->refs, 1);
atomic_set(&rbio->error, 0);
atomic_set(&rbio->stripes_pending, 0);
@@ -2118,6 +2117,11 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_raid_bio *rbio;
int ret;
+ if (generic_io) {
+ ASSERT(bbio->mirror_num == mirror_num);
+ btrfs_io_bio(bio)->mirror_num = mirror_num;
+ }
+
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
@@ -2194,6 +2198,8 @@ static void read_rebuild_work(struct btrfs_work *work)
/*
* The following code is used to scrub/replace the parity stripe
*
+ * Caller must have already increased bio_counter for getting @bbio.
+ *
* Note: We need make sure all the pages that add into the scrub/replace
* raid bio are correct and not be changed during the scrub/replace. That
* is those pages just hold metadata or file data with checksum.
@@ -2231,6 +2237,12 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
ASSERT(rbio->stripe_npages == stripe_nsectors);
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
+ /*
+ * We have already increased bio_counter when getting bbio, record it
+ * so we can free it at rbio_orig_end_io().
+ */
+ rbio->generic_bio_cnt = 1;
+
return rbio;
}
@@ -2673,6 +2685,12 @@ raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
return NULL;
}
+ /*
+ * When we get bbio, we have already increased bio_counter, record it
+ * so we can free it at rbio_orig_end_io()
+ */
+ rbio->generic_bio_cnt = 1;
+
return rbio;
}
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index e88bca87f5d2..a17e775a4a89 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -209,9 +209,9 @@ cleanup:
return;
}
-int btree_readahead_hook(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, int err)
+int btree_readahead_hook(struct extent_buffer *eb, int err)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
int ret = 0;
struct reada_extent *re;
@@ -235,10 +235,10 @@ start_machine:
return ret;
}
-static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
- struct btrfs_device *dev, u64 logical,
+static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
struct btrfs_bio *bbio)
{
+ struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
struct reada_zone *zone;
struct btrfs_block_group_cache *cache = NULL;
@@ -270,6 +270,12 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
if (!zone)
return NULL;
+ ret = radix_tree_preload(GFP_KERNEL);
+ if (ret) {
+ kfree(zone);
+ return NULL;
+ }
+
zone->start = start;
zone->end = end;
INIT_LIST_HEAD(&zone->list);
@@ -299,6 +305,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
zone = NULL;
}
spin_unlock(&fs_info->reada_lock);
+ radix_tree_preload_end();
return zone;
}
@@ -313,7 +320,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
- u32 blocksize;
u64 length;
int real_stripes;
int nzones = 0;
@@ -334,7 +340,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
if (!re)
return NULL;
- blocksize = fs_info->nodesize;
re->logical = logical;
re->top = *top;
INIT_LIST_HEAD(&re->extctl);
@@ -344,10 +349,10 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
/*
* map block
*/
- length = blocksize;
+ length = fs_info->nodesize;
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio, 0);
- if (ret || !bbio || length < blocksize)
+ if (ret || !bbio || length < fs_info->nodesize)
goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
@@ -367,7 +372,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
if (!dev->bdev)
continue;
- zone = reada_find_zone(fs_info, dev, logical, bbio);
+ zone = reada_find_zone(dev, logical, bbio);
if (!zone)
continue;
@@ -386,6 +391,10 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
goto error;
}
+ ret = radix_tree_preload(GFP_KERNEL);
+ if (ret)
+ goto error;
+
/* insert extent in reada_tree + all per-device trees, all or nothing */
btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
spin_lock(&fs_info->reada_lock);
@@ -395,13 +404,16 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
re_exist->refcnt++;
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
+ radix_tree_preload_end();
goto error;
}
if (ret) {
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
+ radix_tree_preload_end();
goto error;
}
+ radix_tree_preload_end();
prev_dev = NULL;
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
&fs_info->dev_replace);
@@ -639,9 +651,9 @@ static int reada_pick_zone(struct btrfs_device *dev)
return 1;
}
-static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
- struct btrfs_device *dev)
+static int reada_start_machine_dev(struct btrfs_device *dev)
{
+ struct btrfs_fs_info *fs_info = dev->fs_info;
struct reada_extent *re = NULL;
int mirror_num = 0;
struct extent_buffer *eb = NULL;
@@ -754,8 +766,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (atomic_read(&device->reada_in_flight) <
MAX_IN_FLIGHT)
- enqueued += reada_start_machine_dev(fs_info,
- device);
+ enqueued += reada_start_machine_dev(device);
}
mutex_unlock(&fs_devices->device_list_mutex);
total += enqueued;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index a08224eab8b4..7d6bc308bf43 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -501,8 +501,9 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_root_item *item = &root->root_item;
- struct timespec ct = current_fs_time(root->fs_info->sb);
+ struct timespec ct;
+ ktime_get_real_ts(&ct);
spin_lock(&root->root_item_lock);
btrfs_set_root_ctransid(item, trans->transid);
btrfs_set_stack_timespec_sec(&item->ctime, ct.tv_sec);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b0251eb1239f..c7b45eb2403d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -64,7 +64,7 @@ struct scrub_ctx;
#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
struct scrub_recover {
- atomic_t refs;
+ refcount_t refs;
struct btrfs_bio *bbio;
u64 map_length;
};
@@ -112,7 +112,7 @@ struct scrub_block {
struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
int page_count;
atomic_t outstanding_pages;
- atomic_t refs; /* free mem on transition to zero */
+ refcount_t refs; /* free mem on transition to zero */
struct scrub_ctx *sctx;
struct scrub_parity *sparity;
struct {
@@ -140,9 +140,9 @@ struct scrub_parity {
int nsectors;
- int stripe_len;
+ u64 stripe_len;
- atomic_t refs;
+ refcount_t refs;
struct list_head spages;
@@ -202,7 +202,7 @@ struct scrub_ctx {
* doesn't free the scrub context before or while the workers are
* doing the wakeup() call.
*/
- atomic_t refs;
+ refcount_t refs;
};
struct scrub_fixup_nodatasum {
@@ -240,6 +240,13 @@ struct scrub_warning {
struct btrfs_device *dev;
};
+struct full_stripe_lock {
+ struct rb_node node;
+ u64 logical;
+ u64 refs;
+ struct mutex mutex;
+};
+
static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
@@ -305,7 +312,7 @@ static void scrub_put_ctx(struct scrub_ctx *sctx);
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
- atomic_inc(&sctx->refs);
+ refcount_inc(&sctx->refs);
atomic_inc(&sctx->bios_in_flight);
}
@@ -349,6 +356,222 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
}
/*
+ * Insert new full stripe lock into full stripe locks tree
+ *
+ * Return pointer to existing or newly inserted full_stripe_lock structure if
+ * everything works well.
+ * Return ERR_PTR(-ENOMEM) if we failed to allocate memory
+ *
+ * NOTE: caller must hold full_stripe_locks_root->lock before calling this
+ * function
+ */
+static struct full_stripe_lock *insert_full_stripe_lock(
+ struct btrfs_full_stripe_locks_tree *locks_root,
+ u64 fstripe_logical)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct full_stripe_lock *entry;
+ struct full_stripe_lock *ret;
+
+ WARN_ON(!mutex_is_locked(&locks_root->lock));
+
+ p = &locks_root->root.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct full_stripe_lock, node);
+ if (fstripe_logical < entry->logical) {
+ p = &(*p)->rb_left;
+ } else if (fstripe_logical > entry->logical) {
+ p = &(*p)->rb_right;
+ } else {
+ entry->refs++;
+ return entry;
+ }
+ }
+
+ /* Insert new lock */
+ ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+ ret->logical = fstripe_logical;
+ ret->refs = 1;
+ mutex_init(&ret->mutex);
+
+ rb_link_node(&ret->node, parent, p);
+ rb_insert_color(&ret->node, &locks_root->root);
+ return ret;
+}
+
+/*
+ * Search for a full stripe lock of a block group
+ *
+ * Return pointer to existing full stripe lock if found
+ * Return NULL if not found
+ */
+static struct full_stripe_lock *search_full_stripe_lock(
+ struct btrfs_full_stripe_locks_tree *locks_root,
+ u64 fstripe_logical)
+{
+ struct rb_node *node;
+ struct full_stripe_lock *entry;
+
+ WARN_ON(!mutex_is_locked(&locks_root->lock));
+
+ node = locks_root->root.rb_node;
+ while (node) {
+ entry = rb_entry(node, struct full_stripe_lock, node);
+ if (fstripe_logical < entry->logical)
+ node = node->rb_left;
+ else if (fstripe_logical > entry->logical)
+ node = node->rb_right;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+/*
+ * Helper to get full stripe logical from a normal bytenr.
+ *
+ * Caller must ensure @cache is a RAID56 block group.
+ */
+static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
+ u64 bytenr)
+{
+ u64 ret;
+
+ /*
+ * Due to chunk item size limit, full stripe length should not be
+ * larger than U32_MAX. Just a sanity check here.
+ */
+ WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX);
+
+ /*
+ * round_down() can only handle power of 2, while RAID56 full
+ * stripe length can be 64KiB * n, so we need to manually round down.
+ */
+ ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
+ cache->full_stripe_len + cache->key.objectid;
+ return ret;
+}
+
+/*
+ * Lock a full stripe to avoid concurrency of recovery and read
+ *
+ * It's only used for profiles with parities (RAID5/6), for other profiles it
+ * does nothing.
+ *
+ * Return 0 if we locked full stripe covering @bytenr, with a mutex held.
+ * So caller must call unlock_full_stripe() at the same context.
+ *
+ * Return <0 if encounters error.
+ */
+static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
+ bool *locked_ret)
+{
+ struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_full_stripe_locks_tree *locks_root;
+ struct full_stripe_lock *existing;
+ u64 fstripe_start;
+ int ret = 0;
+
+ *locked_ret = false;
+ bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
+ if (!bg_cache) {
+ ASSERT(0);
+ return -ENOENT;
+ }
+
+ /* Profiles not based on parity don't need full stripe lock */
+ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
+ goto out;
+ locks_root = &bg_cache->full_stripe_locks_root;
+
+ fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
+
+ /* Now insert the full stripe lock */
+ mutex_lock(&locks_root->lock);
+ existing = insert_full_stripe_lock(locks_root, fstripe_start);
+ mutex_unlock(&locks_root->lock);
+ if (IS_ERR(existing)) {
+ ret = PTR_ERR(existing);
+ goto out;
+ }
+ mutex_lock(&existing->mutex);
+ *locked_ret = true;
+out:
+ btrfs_put_block_group(bg_cache);
+ return ret;
+}
+
+/*
+ * Unlock a full stripe.
+ *
+ * NOTE: Caller must ensure it's the same context calling corresponding
+ * lock_full_stripe().
+ *
+ * Return 0 if we unlock full stripe without problem.
+ * Return <0 for error
+ */
+static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr,
+ bool locked)
+{
+ struct btrfs_block_group_cache *bg_cache;
+ struct btrfs_full_stripe_locks_tree *locks_root;
+ struct full_stripe_lock *fstripe_lock;
+ u64 fstripe_start;
+ bool freeit = false;
+ int ret = 0;
+
+ /* If we didn't acquire full stripe lock, no need to continue */
+ if (!locked)
+ return 0;
+
+ bg_cache = btrfs_lookup_block_group(fs_info, bytenr);
+ if (!bg_cache) {
+ ASSERT(0);
+ return -ENOENT;
+ }
+ if (!(bg_cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK))
+ goto out;
+
+ locks_root = &bg_cache->full_stripe_locks_root;
+ fstripe_start = get_full_stripe_logical(bg_cache, bytenr);
+
+ mutex_lock(&locks_root->lock);
+ fstripe_lock = search_full_stripe_lock(locks_root, fstripe_start);
+ /* Unpaired unlock_full_stripe() detected */
+ if (!fstripe_lock) {
+ WARN_ON(1);
+ ret = -ENOENT;
+ mutex_unlock(&locks_root->lock);
+ goto out;
+ }
+
+ if (fstripe_lock->refs == 0) {
+ WARN_ON(1);
+ btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow",
+ fstripe_lock->logical);
+ } else {
+ fstripe_lock->refs--;
+ }
+
+ if (fstripe_lock->refs == 0) {
+ rb_erase(&fstripe_lock->node, &locks_root->root);
+ freeit = true;
+ }
+ mutex_unlock(&locks_root->lock);
+
+ mutex_unlock(&fstripe_lock->mutex);
+ if (freeit)
+ kfree(fstripe_lock);
+out:
+ btrfs_put_block_group(bg_cache);
+ return ret;
+}
+
+/*
* used for workers that require transaction commits (i.e., for the
* NOCOW case)
*/
@@ -356,7 +579,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
- atomic_inc(&sctx->refs);
+ refcount_inc(&sctx->refs);
/*
* increment scrubs_running to prevent cancel requests from
* completing as long as a worker is running. we must also
@@ -447,7 +670,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
static void scrub_put_ctx(struct scrub_ctx *sctx)
{
- if (atomic_dec_and_test(&sctx->refs))
+ if (refcount_dec_and_test(&sctx->refs))
scrub_free_ctx(sctx);
}
@@ -462,7 +685,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx)
goto nomem;
- atomic_set(&sctx->refs, 1);
+ refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1;
@@ -857,12 +1080,14 @@ out:
static inline void scrub_get_recover(struct scrub_recover *recover)
{
- atomic_inc(&recover->refs);
+ refcount_inc(&recover->refs);
}
-static inline void scrub_put_recover(struct scrub_recover *recover)
+static inline void scrub_put_recover(struct btrfs_fs_info *fs_info,
+ struct scrub_recover *recover)
{
- if (atomic_dec_and_test(&recover->refs)) {
+ if (refcount_dec_and_test(&recover->refs)) {
+ btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(recover->bbio);
kfree(recover);
}
@@ -892,6 +1117,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
int mirror_index;
int page_num;
int success;
+ bool full_stripe_locked;
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -917,6 +1143,24 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
have_csum = sblock_to_check->pagev[0]->have_csum;
dev = sblock_to_check->pagev[0]->dev;
+ /*
+ * For RAID5/6, race can happen for a different device scrub thread.
+ * For data corruption, Parity and Data threads will both try
+ * to recovery the data.
+ * Race can lead to doubly added csum error, or even unrecoverable
+ * error.
+ */
+ ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
+ if (ret < 0) {
+ spin_lock(&sctx->stat_lock);
+ if (ret == -ENOMEM)
+ sctx->stat.malloc_errors++;
+ sctx->stat.read_errors++;
+ sctx->stat.uncorrectable_errors++;
+ spin_unlock(&sctx->stat_lock);
+ return ret;
+ }
+
if (sctx->is_dev_replace && !is_metadata && !have_csum) {
sblocks_for_recheck = NULL;
goto nodatasum_case;
@@ -1241,7 +1485,7 @@ out:
sblock->pagev[page_index]->sblock = NULL;
recover = sblock->pagev[page_index]->recover;
if (recover) {
- scrub_put_recover(recover);
+ scrub_put_recover(fs_info, recover);
sblock->pagev[page_index]->recover =
NULL;
}
@@ -1251,6 +1495,9 @@ out:
kfree(sblocks_for_recheck);
}
+ ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -1330,20 +1577,23 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
+ btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
- logical, &mapped_length, &bbio, 0, 1);
+ logical, &mapped_length, &bbio);
if (ret || !bbio || mapped_length < sublen) {
btrfs_put_bbio(bbio);
+ btrfs_bio_counter_dec(fs_info);
return -EIO;
}
recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
if (!recover) {
btrfs_put_bbio(bbio);
+ btrfs_bio_counter_dec(fs_info);
return -ENOMEM;
}
- atomic_set(&recover->refs, 1);
+ refcount_set(&recover->refs, 1);
recover->bbio = bbio;
recover->map_length = mapped_length;
@@ -1365,7 +1615,7 @@ leave_nomem:
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
spin_unlock(&sctx->stat_lock);
- scrub_put_recover(recover);
+ scrub_put_recover(fs_info, recover);
return -ENOMEM;
}
scrub_page_get(page);
@@ -1407,7 +1657,7 @@ leave_nomem:
scrub_get_recover(recover);
page->recover = recover;
}
- scrub_put_recover(recover);
+ scrub_put_recover(fs_info, recover);
length -= sublen;
logical += sublen;
page_index++;
@@ -1497,14 +1747,18 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
bio_add_page(bio, page->page, PAGE_SIZE, 0);
if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
- if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
+ if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) {
+ page->io_error = 1;
sblock->no_io_error_seen = 0;
+ }
} else {
bio->bi_iter.bi_sector = page->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (btrfsic_submit_bio_wait(bio))
+ if (btrfsic_submit_bio_wait(bio)) {
+ page->io_error = 1;
sblock->no_io_error_seen = 0;
+ }
}
bio_put(bio);
@@ -1634,7 +1888,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
if (spage->io_error) {
void *mapped_buffer = kmap_atomic(spage->page);
- memset(mapped_buffer, 0, PAGE_SIZE);
+ clear_page(mapped_buffer);
flush_dcache_page(spage->page);
kunmap_atomic(mapped_buffer);
}
@@ -1998,12 +2252,12 @@ static int scrub_checksum_super(struct scrub_block *sblock)
static void scrub_block_get(struct scrub_block *sblock)
{
- atomic_inc(&sblock->refs);
+ refcount_inc(&sblock->refs);
}
static void scrub_block_put(struct scrub_block *sblock)
{
- if (atomic_dec_and_test(&sblock->refs)) {
+ if (refcount_dec_and_test(&sblock->refs)) {
int i;
if (sblock->sparity)
@@ -2187,8 +2441,9 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
int ret;
int i;
+ btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
- &length, &bbio, 0, 1);
+ &length, &bbio);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@@ -2231,6 +2486,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
rbio_out:
bio_put(bio);
bbio_out:
+ btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio);
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@@ -2255,7 +2511,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
/* one ref inside this function, plus one for each page added to
* a bio later on */
- atomic_set(&sblock->refs, 1);
+ refcount_set(&sblock->refs, 1);
sblock->sctx = sctx;
sblock->no_io_error_seen = 1;
@@ -2385,7 +2641,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
unsigned long *bitmap,
u64 start, u64 len)
{
- u32 offset;
+ u64 offset;
int nsectors;
int sectorsize = sparity->sctx->fs_info->sectorsize;
@@ -2395,8 +2651,8 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
}
start -= sparity->logic_start;
- start = div_u64_rem(start, sparity->stripe_len, &offset);
- offset /= sectorsize;
+ start = div64_u64_rem(start, sparity->stripe_len, &offset);
+ offset = div_u64(offset, sectorsize);
nsectors = (int)len / sectorsize;
if (offset + nsectors <= sparity->nsectors) {
@@ -2555,7 +2811,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
/* one ref inside this function, plus one for each page added to
* a bio later on */
- atomic_set(&sblock->refs, 1);
+ refcount_set(&sblock->refs, 1);
sblock->sctx = sctx;
sblock->no_io_error_seen = 1;
sblock->sparity = sparity;
@@ -2694,7 +2950,7 @@ static int get_raid56_logic_offset(u64 physical, int num,
for (i = 0; i < nr_data_stripes(map); i++) {
*offset = last_offset + i * map->stripe_len;
- stripe_nr = div_u64(*offset, map->stripe_len);
+ stripe_nr = div64_u64(*offset, map->stripe_len);
stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
/* Work out the disk rotation on this stripe-set */
@@ -2765,7 +3021,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct bio *bio;
struct btrfs_raid_bio *rbio;
- struct scrub_page *spage;
struct btrfs_bio *bbio = NULL;
u64 length;
int ret;
@@ -2775,8 +3030,10 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
goto out;
length = sparity->logic_end - sparity->logic_start;
+
+ btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
- &length, &bbio, 0, 1);
+ &length, &bbio);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@@ -2795,9 +3052,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
if (!rbio)
goto rbio_out;
- list_for_each_entry(spage, &sparity->spages, list)
- raid56_add_scrub_pages(rbio, spage->page, spage->logical);
-
scrub_pending_bio_inc(sctx);
raid56_parity_submit_scrub_rbio(rbio);
return;
@@ -2805,6 +3059,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
rbio_out:
bio_put(bio);
bbio_out:
+ btrfs_bio_counter_dec(fs_info);
btrfs_put_bbio(bbio);
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
sparity->nsectors);
@@ -2822,12 +3077,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors)
static void scrub_parity_get(struct scrub_parity *sparity)
{
- atomic_inc(&sparity->refs);
+ refcount_inc(&sparity->refs);
}
static void scrub_parity_put(struct scrub_parity *sparity)
{
- if (!atomic_dec_and_test(&sparity->refs))
+ if (!refcount_dec_and_test(&sparity->refs))
return;
scrub_parity_check_and_repair(sparity);
@@ -2879,7 +3134,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
sparity->scrub_dev = sdev;
sparity->logic_start = logic_start;
sparity->logic_end = logic_end;
- atomic_set(&sparity->refs, 1);
+ refcount_set(&sparity->refs, 1);
INIT_LIST_HEAD(&sparity->spages);
sparity->dbitmap = sparity->bitmap;
sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
@@ -3098,7 +3353,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
physical = map->stripes[num].physical;
offset = 0;
- nstripes = div_u64(length, map->stripe_len);
+ nstripes = div64_u64(length, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
offset = map->stripe_len * num;
increment = map->stripe_len * map->num_stripes;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index a60d5bfb8a49..fc496a6f842a 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5184,13 +5184,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
while (key.offset < ekey->offset + left_len) {
ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
right_type = btrfs_file_extent_type(eb, ei);
- if (right_type != BTRFS_FILE_EXTENT_REG) {
+ if (right_type != BTRFS_FILE_EXTENT_REG &&
+ right_type != BTRFS_FILE_EXTENT_INLINE) {
ret = 0;
goto out;
}
right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
- right_len = btrfs_file_extent_num_bytes(eb, ei);
+ if (right_type == BTRFS_FILE_EXTENT_INLINE) {
+ right_len = btrfs_file_extent_inline_len(eb, slot, ei);
+ right_len = PAGE_ALIGN(right_len);
+ } else {
+ right_len = btrfs_file_extent_num_bytes(eb, ei);
+ }
right_offset = btrfs_file_extent_offset(eb, ei);
right_gen = btrfs_file_extent_generation(eb, ei);
@@ -5204,6 +5210,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
goto out;
}
+ /*
+ * We just wanted to see if when we have an inline extent, what
+ * follows it is a regular extent (wanted to check the above
+ * condition for inline extents too). This should normally not
+ * happen but it's possible for example when we have an inline
+ * compressed extent representing data with a size matching
+ * the page size (currently the same as sector size).
+ */
+ if (right_type == BTRFS_FILE_EXTENT_INLINE) {
+ ret = 0;
+ goto out;
+ }
+
left_offset_fixed = left_offset;
if (key.offset < ekey->offset) {
/* Fix the right offset for 2a and 7. */
@@ -6360,22 +6379,16 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
sctx->clone_roots_cnt = arg->clone_sources_count;
sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
- sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN);
+ sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
if (!sctx->send_buf) {
- sctx->send_buf = vmalloc(sctx->send_max_size);
- if (!sctx->send_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
- sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN);
+ sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
if (!sctx->read_buf) {
- sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
- if (!sctx->read_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
sctx->pending_dir_moves = RB_ROOT;
@@ -6396,13 +6409,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
if (arg->clone_sources_count) {
- clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN);
+ clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
if (!clone_sources_tmp) {
- clone_sources_tmp = vmalloc(alloc_size);
- if (!clone_sources_tmp) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = -ENOMEM;
+ goto out;
}
ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 72a053c9a7f0..4f1cdd5058f1 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1795,8 +1795,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
if (fs_info->fs_devices->missing_devices >
- fs_info->num_tolerated_disk_barrier_failures &&
- !(*flags & MS_RDONLY)) {
+ fs_info->num_tolerated_disk_barrier_failures) {
btrfs_warn(fs_info,
"too many missing devices, writeable remount is not allowed");
ret = -EACCES;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index ea272432c930..b18ab8f327a5 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -237,7 +237,6 @@ void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans)
{
memset(trans, 0, sizeof(*trans));
trans->transid = 1;
- INIT_LIST_HEAD(&trans->qgroup_ref_list);
trans->type = __TRANS_DUMMY;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 61b807de3e16..2168654c90a1 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -60,8 +60,8 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
void btrfs_put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(atomic_read(&transaction->use_count) == 0);
- if (atomic_dec_and_test(&transaction->use_count)) {
+ WARN_ON(refcount_read(&transaction->use_count) == 0);
+ if (refcount_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
if (transaction->delayed_refs.pending_csums)
@@ -207,7 +207,7 @@ loop:
spin_unlock(&fs_info->trans_lock);
return -EBUSY;
}
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
extwriter_counter_inc(cur_trans, type);
spin_unlock(&fs_info->trans_lock);
@@ -257,7 +257,7 @@ loop:
* One for this trans handle, one so it will live on until we
* commit the transaction.
*/
- atomic_set(&cur_trans->use_count, 2);
+ refcount_set(&cur_trans->use_count, 2);
atomic_set(&cur_trans->pending_ordered, 0);
cur_trans->flags = 0;
cur_trans->start_time = get_seconds();
@@ -432,7 +432,7 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->trans_lock);
cur_trans = fs_info->running_transaction;
if (cur_trans && is_transaction_blocked(cur_trans)) {
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
spin_unlock(&fs_info->trans_lock);
wait_event(fs_info->transaction_wait,
@@ -572,7 +572,6 @@ again:
h->type = type;
h->can_flush_pending_bgs = true;
- INIT_LIST_HEAD(&h->qgroup_ref_list);
INIT_LIST_HEAD(&h->new_bgs);
smp_mb();
@@ -744,7 +743,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
list_for_each_entry(t, &fs_info->trans_list, list) {
if (t->transid == transid) {
cur_trans = t;
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
ret = 0;
break;
}
@@ -773,7 +772,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
if (t->state == TRANS_STATE_COMPLETED)
break;
cur_trans = t;
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
break;
}
}
@@ -917,7 +916,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
wake_up_process(info->transaction_kthread);
err = -EIO;
}
- assert_qgroups_uptodate(trans);
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
@@ -1839,7 +1837,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
/* take transaction reference */
cur_trans = trans->transaction;
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
btrfs_end_transaction(trans);
@@ -2015,7 +2013,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
spin_lock(&fs_info->trans_lock);
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
spin_unlock(&fs_info->trans_lock);
- atomic_inc(&cur_trans->use_count);
+ refcount_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans);
wait_for_commit(cur_trans);
@@ -2035,7 +2033,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (prev_trans->state != TRANS_STATE_COMPLETED) {
- atomic_inc(&prev_trans->use_count);
+ refcount_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
wait_for_commit(prev_trans);
@@ -2130,13 +2128,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
goto scrub_continue;
}
- /* Reocrd old roots for later qgroup accounting */
- ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
- if (ret) {
- mutex_unlock(&fs_info->reloc_mutex);
- goto scrub_continue;
- }
-
/*
* make sure none of the code above managed to slip in a
* delayed item
@@ -2179,6 +2170,24 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_free_log_root_tree(trans, fs_info);
/*
+ * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
+ * new delayed refs. Must handle them or qgroup can be wrong.
+ */
+ ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
+ if (ret) {
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
+ goto scrub_continue;
+ }
+
+ ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
+ if (ret) {
+ mutex_unlock(&fs_info->tree_log_mutex);
+ mutex_unlock(&fs_info->reloc_mutex);
+ goto scrub_continue;
+ }
+
+ /*
* Since fs roots are all committed, we can get a quite accurate
* new_roots. So let's do quota accounting.
*/
@@ -2223,7 +2232,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
switch_commit_roots(cur_trans, fs_info);
- assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
update_super_roots(fs_info);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 5dfb5590fff6..c55e44560103 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -18,6 +18,8 @@
#ifndef __BTRFS_TRANSACTION__
#define __BTRFS_TRANSACTION__
+
+#include <linux/refcount.h>
#include "btrfs_inode.h"
#include "delayed-ref.h"
#include "ctree.h"
@@ -49,7 +51,7 @@ struct btrfs_transaction {
* transaction can end
*/
atomic_t num_writers;
- atomic_t use_count;
+ refcount_t use_count;
atomic_t pending_ordered;
unsigned long flags;
@@ -125,8 +127,6 @@ struct btrfs_trans_handle {
unsigned int type;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
- struct seq_list delayed_ref_elem;
- struct list_head qgroup_ref_list;
struct list_head new_bgs;
};
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a59674c3e69e..ccfe9fe7754a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4196,7 +4196,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
if (em->generation <= test_gen)
continue;
/* Need a ref to keep it from getting evicted from cache */
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
list_add_tail(&em->list, &extents);
num++;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ab8a66d852f9..017b67daa3bb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -139,6 +139,11 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+ enum btrfs_map_op op,
+ u64 logical, u64 *length,
+ struct btrfs_bio **bbio_ret,
+ int mirror_num, int need_raid_map);
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
@@ -1008,14 +1013,13 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
q = bdev_get_queue(bdev);
if (blk_queue_discard(q))
device->can_discard = 1;
+ if (!blk_queue_nonrot(q))
+ fs_devices->rotating = 1;
device->bdev = bdev;
device->in_fs_metadata = 0;
device->mode = flags;
- if (!blk_queue_nonrot(bdev_get_queue(bdev)))
- fs_devices->rotating = 1;
-
fs_devices->open_devices++;
if (device->writeable &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -2417,7 +2421,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
fs_info->free_chunk_space += device->total_bytes;
spin_unlock(&fs_info->free_chunk_lock);
- if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+ if (!blk_queue_nonrot(q))
fs_info->fs_devices->rotating = 1;
tmp = btrfs_super_total_bytes(fs_info->super_copy);
@@ -2795,10 +2799,38 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info,
return ret;
}
+static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
+{
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &fs_info->mapping_tree.map_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, length);
+ read_unlock(&em_tree->lock);
+
+ if (!em) {
+ btrfs_crit(fs_info, "unable to find logical %llu length %llu",
+ logical, length);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (em->start > logical || em->start + em->len < logical) {
+ btrfs_crit(fs_info,
+ "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+ logical, length, em->start, em->start + em->len);
+ free_extent_map(em);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* callers are responsible for dropping em's ref. */
+ return em;
+}
+
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
- struct extent_map_tree *em_tree;
struct extent_map *em;
struct map_lookup *map;
u64 dev_extent_len = 0;
@@ -2806,23 +2838,15 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
int i, ret = 0;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- em_tree = &fs_info->mapping_tree.map_tree;
-
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_offset, 1);
- read_unlock(&em_tree->lock);
-
- if (!em || em->start > chunk_offset ||
- em->start + em->len < chunk_offset) {
+ em = get_chunk_map(fs_info, chunk_offset, 1);
+ if (IS_ERR(em)) {
/*
* This is a logic error, but we don't want to just rely on the
* user having built with ASSERT enabled, so if ASSERT doesn't
* do anything we still error out.
*/
ASSERT(0);
- if (em)
- free_extent_map(em);
- return -EINVAL;
+ return PTR_ERR(em);
}
map = em->map_lookup;
mutex_lock(&fs_info->chunk_mutex);
@@ -3736,7 +3760,7 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
if (ret)
btrfs_handle_fs_error(fs_info, ret, NULL);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
}
/* Non-zero return value signifies invalidity */
@@ -3755,6 +3779,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs)
{
struct btrfs_fs_info *fs_info = bctl->fs_info;
+ u64 meta_target, data_target;
u64 allowed;
int mixed = 0;
int ret;
@@ -3851,11 +3876,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
} while (read_seqretry(&fs_info->profiles_lock, seq));
- if (btrfs_get_num_tolerated_disk_barrier_failures(bctl->meta.target) <
- btrfs_get_num_tolerated_disk_barrier_failures(bctl->data.target)) {
+ /* if we're not converting, the target field is uninitialized */
+ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->meta.target : fs_info->avail_metadata_alloc_bits;
+ data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->data.target : fs_info->avail_data_alloc_bits;
+ if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
+ btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
btrfs_warn(fs_info,
"metadata profile 0x%llx has lower redundancy than data profile 0x%llx",
- bctl->meta.target, bctl->data.target);
+ meta_target, data_target);
}
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
@@ -3910,7 +3940,7 @@ out:
__cancel_balance(fs_info);
else {
kfree(bctl);
- atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
}
return ret;
}
@@ -4000,7 +4030,7 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
btrfs_balance_sys(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
- WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
+ WARN_ON(test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
mutex_lock(&fs_info->volume_mutex);
mutex_lock(&fs_info->balance_mutex);
@@ -4785,7 +4815,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = div_u64(stripe_size, dev_stripes);
/* align to BTRFS_STRIPE_LEN */
- stripe_size = div_u64(stripe_size, raid_stripe_len);
+ stripe_size = div64_u64(stripe_size, raid_stripe_len);
stripe_size *= raid_stripe_len;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -4833,7 +4863,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
ret = add_extent_mapping(em_tree, em, 0);
if (!ret) {
list_add_tail(&em->list, &trans->transaction->pending_chunks);
- atomic_inc(&em->refs);
+ refcount_inc(&em->refs);
}
write_unlock(&em_tree->lock);
if (ret) {
@@ -4888,7 +4918,6 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_device *device;
struct btrfs_chunk *chunk;
struct btrfs_stripe *stripe;
- struct extent_map_tree *em_tree;
struct extent_map *em;
struct map_lookup *map;
size_t item_size;
@@ -4897,24 +4926,9 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
int i = 0;
int ret = 0;
- em_tree = &fs_info->mapping_tree.map_tree;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
- read_unlock(&em_tree->lock);
-
- if (!em) {
- btrfs_crit(fs_info, "unable to find logical %Lu len %Lu",
- chunk_offset, chunk_size);
- return -EINVAL;
- }
-
- if (em->start != chunk_offset || em->len != chunk_size) {
- btrfs_crit(fs_info,
- "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
- chunk_offset, chunk_size, em->start, em->len);
- free_extent_map(em);
- return -EINVAL;
- }
+ em = get_chunk_map(fs_info, chunk_offset, chunk_size);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes);
@@ -5055,15 +5069,12 @@ int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct extent_map *em;
struct map_lookup *map;
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int readonly = 0;
int miss_ndevs = 0;
int i;
- read_lock(&map_tree->map_tree.lock);
- em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
- read_unlock(&map_tree->map_tree.lock);
- if (!em)
+ em = get_chunk_map(fs_info, chunk_offset, 1);
+ if (IS_ERR(em))
return 1;
map = em->map_lookup;
@@ -5117,34 +5128,19 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
- struct extent_map_tree *em_tree = &map_tree->map_tree;
int ret;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, len);
- read_unlock(&em_tree->lock);
-
- /*
- * We could return errors for these cases, but that could get ugly and
- * we'd probably do the same thing which is just not do anything else
- * and exit, so return 1 so the callers don't try to use other copies.
- */
- if (!em) {
- btrfs_crit(fs_info, "No mapping for %Lu-%Lu", logical,
- logical+len);
- return 1;
- }
-
- if (em->start > logical || em->start + em->len < logical) {
- btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got %Lu-%Lu",
- logical, logical+len, em->start,
- em->start + em->len);
- free_extent_map(em);
+ em = get_chunk_map(fs_info, logical, len);
+ if (IS_ERR(em))
+ /*
+ * We could return errors for these cases, but that could get
+ * ugly and we'd probably do the same thing which is just not do
+ * anything else and exit, so return 1 so the callers don't try
+ * to use other copies.
+ */
return 1;
- }
map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
@@ -5160,7 +5156,8 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
free_extent_map(em);
btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
- if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
+ if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
+ fs_info->dev_replace.tgtdev)
ret++;
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -5173,15 +5170,11 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
{
struct extent_map *em;
struct map_lookup *map;
- struct extent_map_tree *em_tree = &map_tree->map_tree;
unsigned long len = fs_info->sectorsize;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, len);
- read_unlock(&em_tree->lock);
- BUG_ON(!em);
+ em = get_chunk_map(fs_info, logical, len);
+ WARN_ON(IS_ERR(em));
- BUG_ON(em->start > logical || em->start + em->len < logical);
map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map);
@@ -5189,20 +5182,16 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
return len;
}
-int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len, int mirror_num)
{
struct extent_map *em;
struct map_lookup *map;
- struct extent_map_tree *em_tree = &map_tree->map_tree;
int ret = 0;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, len);
- read_unlock(&em_tree->lock);
- BUG_ON(!em);
+ em = get_chunk_map(fs_info, logical, len);
+ WARN_ON(IS_ERR(em));
- BUG_ON(em->start > logical || em->start + em->len < logical);
map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
ret = 1;
@@ -5295,25 +5284,353 @@ static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
GFP_NOFS|__GFP_NOFAIL);
atomic_set(&bbio->error, 0);
- atomic_set(&bbio->refs, 1);
+ refcount_set(&bbio->refs, 1);
return bbio;
}
void btrfs_get_bbio(struct btrfs_bio *bbio)
{
- WARN_ON(!atomic_read(&bbio->refs));
- atomic_inc(&bbio->refs);
+ WARN_ON(!refcount_read(&bbio->refs));
+ refcount_inc(&bbio->refs);
}
void btrfs_put_bbio(struct btrfs_bio *bbio)
{
if (!bbio)
return;
- if (atomic_dec_and_test(&bbio->refs))
+ if (refcount_dec_and_test(&bbio->refs))
kfree(bbio);
}
+/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
+/*
+ * Please note that, discard won't be sent to target device of device
+ * replace.
+ */
+static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length,
+ struct btrfs_bio **bbio_ret)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct btrfs_bio *bbio;
+ u64 offset;
+ u64 stripe_nr;
+ u64 stripe_nr_end;
+ u64 stripe_end_offset;
+ u64 stripe_cnt;
+ u64 stripe_len;
+ u64 stripe_offset;
+ u64 num_stripes;
+ u32 stripe_index;
+ u32 factor = 0;
+ u32 sub_stripes = 0;
+ u64 stripes_per_dev = 0;
+ u32 remaining_stripes = 0;
+ u32 last_stripe = 0;
+ int ret = 0;
+ int i;
+
+ /* discard always return a bbio */
+ ASSERT(bbio_ret);
+
+ em = get_chunk_map(fs_info, logical, length);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ map = em->map_lookup;
+ /* we don't discard raid56 yet */
+ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ offset = logical - em->start;
+ length = min_t(u64, em->len - offset, length);
+
+ stripe_len = map->stripe_len;
+ /*
+ * stripe_nr counts the total number of stripes we have to stride
+ * to get to this block
+ */
+ stripe_nr = div64_u64(offset, stripe_len);
+
+ /* stripe_offset is the offset of this block in its stripe */
+ stripe_offset = offset - stripe_nr * stripe_len;
+
+ stripe_nr_end = round_up(offset + length, map->stripe_len);
+ stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
+ stripe_cnt = stripe_nr_end - stripe_nr;
+ stripe_end_offset = stripe_nr_end * map->stripe_len -
+ (offset + length);
+ /*
+ * after this, stripe_nr is the number of stripes on this
+ * device we have to walk to find the data, and stripe_index is
+ * the number of our device in the stripe array
+ */
+ num_stripes = 1;
+ stripe_index = 0;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+ sub_stripes = 1;
+ else
+ sub_stripes = map->sub_stripes;
+
+ factor = map->num_stripes / sub_stripes;
+ num_stripes = min_t(u64, map->num_stripes,
+ sub_stripes * stripe_cnt);
+ stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
+ stripe_index *= sub_stripes;
+ stripes_per_dev = div_u64_rem(stripe_cnt, factor,
+ &remaining_stripes);
+ div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
+ last_stripe *= sub_stripes;
+ } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_DUP)) {
+ num_stripes = map->num_stripes;
+ } else {
+ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
+ &stripe_index);
+ }
+
+ bbio = alloc_btrfs_bio(num_stripes, 0);
+ if (!bbio) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < num_stripes; i++) {
+ bbio->stripes[i].physical =
+ map->stripes[stripe_index].physical +
+ stripe_offset + stripe_nr * map->stripe_len;
+ bbio->stripes[i].dev = map->stripes[stripe_index].dev;
+
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ bbio->stripes[i].length = stripes_per_dev *
+ map->stripe_len;
+
+ if (i / sub_stripes < remaining_stripes)
+ bbio->stripes[i].length +=
+ map->stripe_len;
+
+ /*
+ * Special for the first stripe and
+ * the last stripe:
+ *
+ * |-------|...|-------|
+ * |----------|
+ * off end_off
+ */
+ if (i < sub_stripes)
+ bbio->stripes[i].length -=
+ stripe_offset;
+
+ if (stripe_index >= last_stripe &&
+ stripe_index <= (last_stripe +
+ sub_stripes - 1))
+ bbio->stripes[i].length -=
+ stripe_end_offset;
+
+ if (i == sub_stripes - 1)
+ stripe_offset = 0;
+ } else {
+ bbio->stripes[i].length = length;
+ }
+
+ stripe_index++;
+ if (stripe_index == map->num_stripes) {
+ stripe_index = 0;
+ stripe_nr++;
+ }
+ }
+
+ *bbio_ret = bbio;
+ bbio->map_type = map->type;
+ bbio->num_stripes = num_stripes;
+out:
+ free_extent_map(em);
+ return ret;
+}
+
+/*
+ * In dev-replace case, for repair case (that's the only case where the mirror
+ * is selected explicitly when calling btrfs_map_block), blocks left of the
+ * left cursor can also be read from the target drive.
+ *
+ * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
+ * array of stripes.
+ * For READ, it also needs to be supported using the same mirror number.
+ *
+ * If the requested block is not left of the left cursor, EIO is returned. This
+ * can happen because btrfs_num_copies() returns one more in the dev-replace
+ * case.
+ */
+static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length,
+ u64 srcdev_devid, int *mirror_num,
+ u64 *physical)
+{
+ struct btrfs_bio *bbio = NULL;
+ int num_stripes;
+ int index_srcdev = 0;
+ int found = 0;
+ u64 physical_of_found = 0;
+ int i;
+ int ret = 0;
+
+ ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
+ logical, &length, &bbio, 0, 0);
+ if (ret) {
+ ASSERT(bbio == NULL);
+ return ret;
+ }
+
+ num_stripes = bbio->num_stripes;
+ if (*mirror_num > num_stripes) {
+ /*
+ * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
+ * that means that the requested area is not left of the left
+ * cursor
+ */
+ btrfs_put_bbio(bbio);
+ return -EIO;
+ }
+
+ /*
+ * process the rest of the function using the mirror_num of the source
+ * drive. Therefore look it up first. At the end, patch the device
+ * pointer to the one of the target drive.
+ */
+ for (i = 0; i < num_stripes; i++) {
+ if (bbio->stripes[i].dev->devid != srcdev_devid)
+ continue;
+
+ /*
+ * In case of DUP, in order to keep it simple, only add the
+ * mirror with the lowest physical address
+ */
+ if (found &&
+ physical_of_found <= bbio->stripes[i].physical)
+ continue;
+
+ index_srcdev = i;
+ found = 1;
+ physical_of_found = bbio->stripes[i].physical;
+ }
+
+ btrfs_put_bbio(bbio);
+
+ ASSERT(found);
+ if (!found)
+ return -EIO;
+
+ *mirror_num = index_srcdev + 1;
+ *physical = physical_of_found;
+ return ret;
+}
+
+static void handle_ops_on_dev_replace(enum btrfs_map_op op,
+ struct btrfs_bio **bbio_ret,
+ struct btrfs_dev_replace *dev_replace,
+ int *num_stripes_ret, int *max_errors_ret)
+{
+ struct btrfs_bio *bbio = *bbio_ret;
+ u64 srcdev_devid = dev_replace->srcdev->devid;
+ int tgtdev_indexes = 0;
+ int num_stripes = *num_stripes_ret;
+ int max_errors = *max_errors_ret;
+ int i;
+
+ if (op == BTRFS_MAP_WRITE) {
+ int index_where_to_add;
+
+ /*
+ * duplicate the write operations while the dev replace
+ * procedure is running. Since the copying of the old disk to
+ * the new disk takes place at run time while the filesystem is
+ * mounted writable, the regular write operations to the old
+ * disk have to be duplicated to go to the new disk as well.
+ *
+ * Note that device->missing is handled by the caller, and that
+ * the write to the old disk is already set up in the stripes
+ * array.
+ */
+ index_where_to_add = num_stripes;
+ for (i = 0; i < num_stripes; i++) {
+ if (bbio->stripes[i].dev->devid == srcdev_devid) {
+ /* write to new disk, too */
+ struct btrfs_bio_stripe *new =
+ bbio->stripes + index_where_to_add;
+ struct btrfs_bio_stripe *old =
+ bbio->stripes + i;
+
+ new->physical = old->physical;
+ new->length = old->length;
+ new->dev = dev_replace->tgtdev;
+ bbio->tgtdev_map[i] = index_where_to_add;
+ index_where_to_add++;
+ max_errors++;
+ tgtdev_indexes++;
+ }
+ }
+ num_stripes = index_where_to_add;
+ } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
+ int index_srcdev = 0;
+ int found = 0;
+ u64 physical_of_found = 0;
+
+ /*
+ * During the dev-replace procedure, the target drive can also
+ * be used to read data in case it is needed to repair a corrupt
+ * block elsewhere. This is possible if the requested area is
+ * left of the left cursor. In this area, the target drive is a
+ * full copy of the source drive.
+ */
+ for (i = 0; i < num_stripes; i++) {
+ if (bbio->stripes[i].dev->devid == srcdev_devid) {
+ /*
+ * In case of DUP, in order to keep it simple,
+ * only add the mirror with the lowest physical
+ * address
+ */
+ if (found &&
+ physical_of_found <=
+ bbio->stripes[i].physical)
+ continue;
+ index_srcdev = i;
+ found = 1;
+ physical_of_found = bbio->stripes[i].physical;
+ }
+ }
+ if (found) {
+ struct btrfs_bio_stripe *tgtdev_stripe =
+ bbio->stripes + num_stripes;
+
+ tgtdev_stripe->physical = physical_of_found;
+ tgtdev_stripe->length =
+ bbio->stripes[index_srcdev].length;
+ tgtdev_stripe->dev = dev_replace->tgtdev;
+ bbio->tgtdev_map[index_srcdev] = num_stripes;
+
+ tgtdev_indexes++;
+ num_stripes++;
+ }
+ }
+
+ *num_stripes_ret = num_stripes;
+ *max_errors_ret = max_errors;
+ bbio->num_tgtdevs = tgtdev_indexes;
+ *bbio_ret = bbio;
+}
+
+static bool need_full_stripe(enum btrfs_map_op op)
+{
+ return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
+}
+
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
u64 logical, u64 *length,
@@ -5322,14 +5639,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
{
struct extent_map *em;
struct map_lookup *map;
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
- struct extent_map_tree *em_tree = &map_tree->map_tree;
u64 offset;
u64 stripe_offset;
- u64 stripe_end_offset;
u64 stripe_nr;
- u64 stripe_nr_orig;
- u64 stripe_nr_end;
u64 stripe_len;
u32 stripe_index;
int i;
@@ -5345,23 +5657,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
u64 physical_to_patch_in_first_stripe = 0;
u64 raid56_full_stripe_start = (u64)-1;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, logical, *length);
- read_unlock(&em_tree->lock);
-
- if (!em) {
- btrfs_crit(fs_info, "unable to find logical %llu len %llu",
- logical, *length);
- return -EINVAL;
- }
+ if (op == BTRFS_MAP_DISCARD)
+ return __btrfs_map_block_for_discard(fs_info, logical,
+ *length, bbio_ret);
- if (em->start > logical || em->start + em->len < logical) {
- btrfs_crit(fs_info,
- "found a bad mapping, wanted %Lu, found %Lu-%Lu",
- logical, em->start, em->start + em->len);
- free_extent_map(em);
- return -EINVAL;
- }
+ em = get_chunk_map(fs_info, logical, *length);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
map = em->map_lookup;
offset = logical - em->start;
@@ -5400,14 +5702,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
raid56_full_stripe_start *= full_stripe_len;
}
- if (op == BTRFS_MAP_DISCARD) {
- /* we don't discard raid56 yet */
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- ret = -EOPNOTSUPP;
- goto out;
- }
- *length = min_t(u64, em->len - offset, *length);
- } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
u64 max_len;
/* For writes to RAID[56], allow a full stripeset across all disks.
For other RAID types and for RAID[56] reads, just allow a single
@@ -5438,105 +5733,28 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
btrfs_dev_replace_set_lock_blocking(dev_replace);
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
- op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
- op != BTRFS_MAP_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
- /*
- * in dev-replace case, for repair case (that's the only
- * case where the mirror is selected explicitly when
- * calling btrfs_map_block), blocks left of the left cursor
- * can also be read from the target drive.
- * For REQ_GET_READ_MIRRORS, the target drive is added as
- * the last one to the array of stripes. For READ, it also
- * needs to be supported using the same mirror number.
- * If the requested block is not left of the left cursor,
- * EIO is returned. This can happen because btrfs_num_copies()
- * returns one more in the dev-replace case.
- */
- u64 tmp_length = *length;
- struct btrfs_bio *tmp_bbio = NULL;
- int tmp_num_stripes;
- u64 srcdev_devid = dev_replace->srcdev->devid;
- int index_srcdev = 0;
- int found = 0;
- u64 physical_of_found = 0;
-
- ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
- logical, &tmp_length, &tmp_bbio, 0, 0);
- if (ret) {
- WARN_ON(tmp_bbio != NULL);
- goto out;
- }
-
- tmp_num_stripes = tmp_bbio->num_stripes;
- if (mirror_num > tmp_num_stripes) {
- /*
- * BTRFS_MAP_GET_READ_MIRRORS does not contain this
- * mirror, that means that the requested area
- * is not left of the left cursor
- */
- ret = -EIO;
- btrfs_put_bbio(tmp_bbio);
- goto out;
- }
-
- /*
- * process the rest of the function using the mirror_num
- * of the source drive. Therefore look it up first.
- * At the end, patch the device pointer to the one of the
- * target drive.
- */
- for (i = 0; i < tmp_num_stripes; i++) {
- if (tmp_bbio->stripes[i].dev->devid != srcdev_devid)
- continue;
-
- /*
- * In case of DUP, in order to keep it simple, only add
- * the mirror with the lowest physical address
- */
- if (found &&
- physical_of_found <= tmp_bbio->stripes[i].physical)
- continue;
-
- index_srcdev = i;
- found = 1;
- physical_of_found = tmp_bbio->stripes[i].physical;
- }
-
- btrfs_put_bbio(tmp_bbio);
-
- if (!found) {
- WARN_ON(1);
- ret = -EIO;
+ !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
+ ret = get_extra_mirror_from_replace(fs_info, logical, *length,
+ dev_replace->srcdev->devid,
+ &mirror_num,
+ &physical_to_patch_in_first_stripe);
+ if (ret)
goto out;
- }
-
- mirror_num = index_srcdev + 1;
- patch_the_first_stripe_for_dev_replace = 1;
- physical_to_patch_in_first_stripe = physical_of_found;
+ else
+ patch_the_first_stripe_for_dev_replace = 1;
} else if (mirror_num > map->num_stripes) {
mirror_num = 0;
}
num_stripes = 1;
stripe_index = 0;
- stripe_nr_orig = stripe_nr;
- stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
- stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
- stripe_end_offset = stripe_nr_end * map->stripe_len -
- (offset + *length);
-
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- if (op == BTRFS_MAP_DISCARD)
- num_stripes = min_t(u64, map->num_stripes,
- stripe_nr_end - stripe_nr_orig);
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
- if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
- op != BTRFS_MAP_GET_READ_MIRRORS)
+ if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_GET_READ_MIRRORS)
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
- op == BTRFS_MAP_GET_READ_MIRRORS)
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -5549,8 +5767,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
- op == BTRFS_MAP_GET_READ_MIRRORS) {
+ if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
@@ -5566,10 +5783,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
num_stripes = map->sub_stripes;
- else if (op == BTRFS_MAP_DISCARD)
- num_stripes = min_t(u64, map->sub_stripes *
- (stripe_nr_end - stripe_nr_orig),
- map->num_stripes);
else if (mirror_num)
stripe_index += mirror_num - 1;
else {
@@ -5587,7 +5800,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
(op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS ||
mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
- stripe_nr = div_u64(raid56_full_stripe_start,
+ stripe_nr = div64_u64(raid56_full_stripe_start,
stripe_len * nr_data_stripes(map));
/* RAID[56] write or recovery. Return all stripes */
@@ -5612,8 +5825,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
/* We distribute the parity blocks across stripes */
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
&stripe_index);
- if ((op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
- op != BTRFS_MAP_GET_READ_MIRRORS) && mirror_num <= 1)
+ if ((op != BTRFS_MAP_WRITE &&
+ op != BTRFS_MAP_GET_READ_MIRRORS) &&
+ mirror_num <= 1)
mirror_num = 1;
}
} else {
@@ -5635,8 +5849,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
num_alloc_stripes = num_stripes;
- if (dev_replace_is_ongoing) {
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD)
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
+ if (op == BTRFS_MAP_WRITE)
num_alloc_stripes <<= 1;
if (op == BTRFS_MAP_GET_READ_MIRRORS)
num_alloc_stripes++;
@@ -5648,14 +5862,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
ret = -ENOMEM;
goto out;
}
- if (dev_replace_is_ongoing)
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
/* build raid_map */
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
- need_raid_map &&
- ((op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) ||
- mirror_num > 1)) {
+ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
+ (need_full_stripe(op) || mirror_num > 1)) {
u64 tmp;
unsigned rot;
@@ -5679,173 +5891,27 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
RAID6_Q_STRIPE;
}
- if (op == BTRFS_MAP_DISCARD) {
- u32 factor = 0;
- u32 sub_stripes = 0;
- u64 stripes_per_dev = 0;
- u32 remaining_stripes = 0;
- u32 last_stripe = 0;
- if (map->type &
- (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
- if (map->type & BTRFS_BLOCK_GROUP_RAID0)
- sub_stripes = 1;
- else
- sub_stripes = map->sub_stripes;
-
- factor = map->num_stripes / sub_stripes;
- stripes_per_dev = div_u64_rem(stripe_nr_end -
- stripe_nr_orig,
- factor,
- &remaining_stripes);
- div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
- last_stripe *= sub_stripes;
- }
-
- for (i = 0; i < num_stripes; i++) {
- bbio->stripes[i].physical =
- map->stripes[stripe_index].physical +
- stripe_offset + stripe_nr * map->stripe_len;
- bbio->stripes[i].dev = map->stripes[stripe_index].dev;
-
- if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
- BTRFS_BLOCK_GROUP_RAID10)) {
- bbio->stripes[i].length = stripes_per_dev *
- map->stripe_len;
-
- if (i / sub_stripes < remaining_stripes)
- bbio->stripes[i].length +=
- map->stripe_len;
-
- /*
- * Special for the first stripe and
- * the last stripe:
- *
- * |-------|...|-------|
- * |----------|
- * off end_off
- */
- if (i < sub_stripes)
- bbio->stripes[i].length -=
- stripe_offset;
-
- if (stripe_index >= last_stripe &&
- stripe_index <= (last_stripe +
- sub_stripes - 1))
- bbio->stripes[i].length -=
- stripe_end_offset;
-
- if (i == sub_stripes - 1)
- stripe_offset = 0;
- } else
- bbio->stripes[i].length = *length;
-
- stripe_index++;
- if (stripe_index == map->num_stripes) {
- /* This could only happen for RAID0/10 */
- stripe_index = 0;
- stripe_nr++;
- }
- }
- } else {
- for (i = 0; i < num_stripes; i++) {
- bbio->stripes[i].physical =
- map->stripes[stripe_index].physical +
- stripe_offset +
- stripe_nr * map->stripe_len;
- bbio->stripes[i].dev =
- map->stripes[stripe_index].dev;
- stripe_index++;
- }
+ for (i = 0; i < num_stripes; i++) {
+ bbio->stripes[i].physical =
+ map->stripes[stripe_index].physical +
+ stripe_offset +
+ stripe_nr * map->stripe_len;
+ bbio->stripes[i].dev =
+ map->stripes[stripe_index].dev;
+ stripe_index++;
}
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
+ if (need_full_stripe(op))
max_errors = btrfs_chunk_max_errors(map);
if (bbio->raid_map)
sort_parity_stripes(bbio, num_stripes);
- tgtdev_indexes = 0;
- if (dev_replace_is_ongoing &&
- (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD) &&
- dev_replace->tgtdev != NULL) {
- int index_where_to_add;
- u64 srcdev_devid = dev_replace->srcdev->devid;
-
- /*
- * duplicate the write operations while the dev replace
- * procedure is running. Since the copying of the old disk
- * to the new disk takes place at run time while the
- * filesystem is mounted writable, the regular write
- * operations to the old disk have to be duplicated to go
- * to the new disk as well.
- * Note that device->missing is handled by the caller, and
- * that the write to the old disk is already set up in the
- * stripes array.
- */
- index_where_to_add = num_stripes;
- for (i = 0; i < num_stripes; i++) {
- if (bbio->stripes[i].dev->devid == srcdev_devid) {
- /* write to new disk, too */
- struct btrfs_bio_stripe *new =
- bbio->stripes + index_where_to_add;
- struct btrfs_bio_stripe *old =
- bbio->stripes + i;
-
- new->physical = old->physical;
- new->length = old->length;
- new->dev = dev_replace->tgtdev;
- bbio->tgtdev_map[i] = index_where_to_add;
- index_where_to_add++;
- max_errors++;
- tgtdev_indexes++;
- }
- }
- num_stripes = index_where_to_add;
- } else if (dev_replace_is_ongoing &&
- op == BTRFS_MAP_GET_READ_MIRRORS &&
- dev_replace->tgtdev != NULL) {
- u64 srcdev_devid = dev_replace->srcdev->devid;
- int index_srcdev = 0;
- int found = 0;
- u64 physical_of_found = 0;
-
- /*
- * During the dev-replace procedure, the target drive can
- * also be used to read data in case it is needed to repair
- * a corrupt block elsewhere. This is possible if the
- * requested area is left of the left cursor. In this area,
- * the target drive is a full copy of the source drive.
- */
- for (i = 0; i < num_stripes; i++) {
- if (bbio->stripes[i].dev->devid == srcdev_devid) {
- /*
- * In case of DUP, in order to keep it
- * simple, only add the mirror with the
- * lowest physical address
- */
- if (found &&
- physical_of_found <=
- bbio->stripes[i].physical)
- continue;
- index_srcdev = i;
- found = 1;
- physical_of_found = bbio->stripes[i].physical;
- }
- }
- if (found) {
- struct btrfs_bio_stripe *tgtdev_stripe =
- bbio->stripes + num_stripes;
-
- tgtdev_stripe->physical = physical_of_found;
- tgtdev_stripe->length =
- bbio->stripes[index_srcdev].length;
- tgtdev_stripe->dev = dev_replace->tgtdev;
- bbio->tgtdev_map[index_srcdev] = num_stripes;
-
- tgtdev_indexes++;
- num_stripes++;
- }
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
+ need_full_stripe(op)) {
+ handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
+ &max_errors);
}
*bbio_ret = bbio;
@@ -5853,7 +5919,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num;
- bbio->num_tgtdevs = tgtdev_indexes;
/*
* this is the case that REQ_READ && dev_replace_is_ongoing &&
@@ -5886,19 +5951,15 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
- struct btrfs_bio **bbio_ret, int mirror_num,
- int need_raid_map)
+ struct btrfs_bio **bbio_ret)
{
- return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
- mirror_num, need_raid_map);
+ return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
}
int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len)
{
- struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
- struct extent_map_tree *em_tree = &map_tree->map_tree;
struct extent_map *em;
struct map_lookup *map;
u64 *buf;
@@ -5908,24 +5969,11 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 rmap_len;
int i, j, nr = 0;
- read_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, chunk_start, 1);
- read_unlock(&em_tree->lock);
-
- if (!em) {
- btrfs_err(fs_info, "couldn't find em for chunk %Lu",
- chunk_start);
+ em = get_chunk_map(fs_info, chunk_start, 1);
+ if (IS_ERR(em))
return -EIO;
- }
- if (em->start != chunk_start) {
- btrfs_err(fs_info, "bad chunk start, em=%Lu, wanted=%Lu",
- em->start, chunk_start);
- free_extent_map(em);
- return -EIO;
- }
map = em->map_lookup;
-
length = em->len;
rmap_len = map->stripe_len;
@@ -5949,7 +5997,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
continue;
stripe_nr = physical - map->stripes[i].physical;
- stripe_nr = div_u64(stripe_nr, map->stripe_len);
+ stripe_nr = div64_u64(stripe_nr, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripe_nr = stripe_nr * map->num_stripes + i;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 59be81206dd7..c7d0fbc915ca 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -123,7 +123,6 @@ struct btrfs_device {
struct list_head resized_list;
/* for sending down flush barriers */
- int nobarriers;
struct bio *flush_bio;
struct completion flush_wait;
@@ -298,7 +297,7 @@ struct btrfs_bio;
typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
struct btrfs_bio {
- atomic_t refs;
+ refcount_t refs;
atomic_t stripes_pending;
struct btrfs_fs_info *fs_info;
u64 map_type; /* get from map_lookup->type */
@@ -400,8 +399,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
struct btrfs_bio **bbio_ret, int mirror_num);
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
- struct btrfs_bio **bbio_ret, int mirror_num,
- int need_raid_map);
+ struct btrfs_bio **bbio_ret);
int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
@@ -475,7 +473,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev);
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
-int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
u64 logical, u64 len, int mirror_num);
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
diff --git a/fs/buffer.c b/fs/buffer.c
index 9196f2a270da..161be58c5cb0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -49,7 +49,6 @@
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags,
struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
@@ -1830,7 +1829,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
nr_underway++;
}
bh = next;
@@ -1884,7 +1883,7 @@ recover:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
nr_underway++;
}
bh = next;
@@ -2379,8 +2378,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0,
- AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
- &page, &fsdata);
+ AOP_FLAG_CONT_EXPAND, &page, &fsdata);
if (err)
goto out;
@@ -2415,9 +2413,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = PAGE_SIZE - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len,
- AOP_FLAG_UNINTERRUPTIBLE,
- &page, &fsdata);
+ err = pagecache_write_begin(file, mapping, curpos, len, 0,
+ &page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
@@ -2449,9 +2446,8 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
}
len = offset - zerofrom;
- err = pagecache_write_begin(file, mapping, curpos, len,
- AOP_FLAG_UNINTERRUPTIBLE,
- &page, &fsdata);
+ err = pagecache_write_begin(file, mapping, curpos, len, 0,
+ &page, &fsdata);
if (err)
goto out;
zero_user(page, zerofrom, len);
@@ -3095,7 +3091,7 @@ void guard_bio_eod(int op, struct bio *bio)
}
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags, struct writeback_control *wbc)
+ struct writeback_control *wbc)
{
struct bio *bio;
@@ -3130,7 +3126,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
- bio->bi_flags |= bio_flags;
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(op, bio);
@@ -3145,16 +3140,9 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
return 0;
}
-int _submit_bh(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags)
+int submit_bh(int op, int op_flags, struct buffer_head *bh)
{
- return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL);
-}
-EXPORT_SYMBOL_GPL(_submit_bh);
-
-int submit_bh(int op, int op_flags, struct buffer_head *bh)
-{
- return submit_bh_wbc(op, op_flags, bh, 0, NULL);
+ return submit_bh_wbc(op, op_flags, bh, NULL);
}
EXPORT_SYMBOL(submit_bh);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9ecb2fd348cb..1e71e6ca5ddf 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -670,8 +670,12 @@ static void writepages_finish(struct ceph_osd_request *req)
bool remove_page;
dout("writepages_finish %p rc %d\n", inode, rc);
- if (rc < 0)
+ if (rc < 0) {
mapping_set_error(mapping, rc);
+ ceph_set_error_write(ci);
+ } else {
+ ceph_clear_error_write(ci);
+ }
/*
* We lost the cache cap, need to truncate the page before
@@ -703,9 +707,6 @@ static void writepages_finish(struct ceph_osd_request *req)
clear_bdi_congested(inode_to_bdi(inode),
BLK_RW_ASYNC);
- if (rc < 0)
- SetPageError(page);
-
ceph_put_snap_context(page_snap_context(page));
page->private = 0;
ClearPagePrivate(page);
@@ -1892,6 +1893,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
wr_req->r_mtime = ci->vfs_inode.i_mtime;
+ wr_req->r_abort_on_full = true;
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
if (!err)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 68c78be19d5b..a3ebb632294e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1015,6 +1015,7 @@ static int send_cap_msg(struct cap_msg_args *arg)
void *p;
size_t extra_len;
struct timespec zerotime = {0};
+ struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
" seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
@@ -1076,8 +1077,12 @@ static int send_cap_msg(struct cap_msg_args *arg)
ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
/* inline data size */
ceph_encode_32(&p, 0);
- /* osd_epoch_barrier (version 5) */
- ceph_encode_32(&p, 0);
+ /*
+ * osd_epoch_barrier (version 5)
+ * The epoch_barrier is protected osdc->lock, so READ_ONCE here in
+ * case it was recently changed
+ */
+ ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier));
/* oldest_flush_tid (version 6) */
ceph_encode_64(&p, arg->oldest_flush_tid);
@@ -1389,7 +1394,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
first_tid = cf->tid + 1;
capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
- atomic_inc(&capsnap->nref);
+ refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
dout("__flush_snaps %p capsnap %p tid %llu %s\n",
@@ -2202,7 +2207,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
inode, capsnap, cf->tid,
ceph_cap_string(capsnap->dirty));
- atomic_inc(&capsnap->nref);
+ refcount_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
@@ -3633,13 +3638,19 @@ void ceph_handle_caps(struct ceph_mds_session *session,
p += inline_len;
}
+ if (le16_to_cpu(msg->hdr.version) >= 5) {
+ struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
+ u32 epoch_barrier;
+
+ ceph_decode_32_safe(&p, end, epoch_barrier, bad);
+ ceph_osdc_update_epoch_barrier(osdc, epoch_barrier);
+ }
+
if (le16_to_cpu(msg->hdr.version) >= 8) {
u64 flush_tid;
u32 caller_uid, caller_gid;
- u32 osd_epoch_barrier;
u32 pool_ns_len;
- /* version >= 5 */
- ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
+
/* version >= 6 */
ceph_decode_64_safe(&p, end, flush_tid, bad);
/* version >= 7 */
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 3ef11bc8d728..4e2d112c982f 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -22,20 +22,19 @@ static int mdsmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_fs_client *fsc = s->private;
+ struct ceph_mdsmap *mdsmap;
if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
return 0;
- seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
- seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
- seq_printf(s, "session_timeout %d\n",
- fsc->mdsc->mdsmap->m_session_timeout);
- seq_printf(s, "session_autoclose %d\n",
- fsc->mdsc->mdsmap->m_session_autoclose);
- for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
- struct ceph_entity_addr *addr =
- &fsc->mdsc->mdsmap->m_info[i].addr;
- int state = fsc->mdsc->mdsmap->m_info[i].state;
-
+ mdsmap = fsc->mdsc->mdsmap;
+ seq_printf(s, "epoch %d\n", mdsmap->m_epoch);
+ seq_printf(s, "root %d\n", mdsmap->m_root);
+ seq_printf(s, "max_mds %d\n", mdsmap->m_max_mds);
+ seq_printf(s, "session_timeout %d\n", mdsmap->m_session_timeout);
+ seq_printf(s, "session_autoclose %d\n", mdsmap->m_session_autoclose);
+ for (i = 0; i < mdsmap->m_num_mds; i++) {
+ struct ceph_entity_addr *addr = &mdsmap->m_info[i].addr;
+ int state = mdsmap->m_info[i].state;
seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
ceph_pr_addr(&addr->in_addr),
ceph_mds_state_name(state));
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 3e9ad501addf..e071d23f6148 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -294,7 +294,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_mds_client *mdsc = fsc->mdsc;
int i;
int err;
- u32 ftype;
+ unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
@@ -341,7 +341,6 @@ more:
/* do we have the correct frag content buffered? */
if (need_send_readdir(fi, ctx->pos)) {
struct ceph_mds_request *req;
- unsigned frag;
int op = ceph_snap(inode) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
@@ -352,8 +351,11 @@ more:
}
if (is_hash_order(ctx->pos)) {
- frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
- NULL, NULL);
+ /* fragtree isn't always accurate. choose frag
+ * based on previous reply when possible. */
+ if (frag == (unsigned)-1)
+ frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
+ NULL, NULL);
} else {
frag = fpos_frag(ctx->pos);
}
@@ -378,7 +380,11 @@ more:
ceph_mdsc_put_request(req);
return -ENOMEM;
}
+ } else if (is_hash_order(ctx->pos)) {
+ req->r_args.readdir.offset_hash =
+ cpu_to_le32(fpos_hash(ctx->pos));
}
+
req->r_dir_release_cnt = fi->dir_release_count;
req->r_dir_ordered_cnt = fi->dir_ordered_count;
req->r_readdir_cache_idx = fi->readdir_cache_idx;
@@ -476,6 +482,7 @@ more:
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
ino_t ino;
+ u32 ftype;
BUG_ON(rde->offset < ctx->pos);
@@ -498,15 +505,17 @@ more:
ctx->pos++;
}
+ ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+
if (fi->next_offset > 2) {
- ceph_mdsc_put_request(fi->last_readdir);
- fi->last_readdir = NULL;
+ frag = fi->frag;
goto more;
}
/* more frags? */
if (!ceph_frag_is_rightmost(fi->frag)) {
- unsigned frag = ceph_frag_next(fi->frag);
+ frag = ceph_frag_next(fi->frag);
if (is_hash_order(ctx->pos)) {
loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
fi->next_offset, true);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 26cc95421cca..3fdde0b283c9 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -13,6 +13,38 @@
#include "mds_client.h"
#include "cache.h"
+static __le32 ceph_flags_sys2wire(u32 flags)
+{
+ u32 wire_flags = 0;
+
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ wire_flags |= CEPH_O_RDONLY;
+ break;
+ case O_WRONLY:
+ wire_flags |= CEPH_O_WRONLY;
+ break;
+ case O_RDWR:
+ wire_flags |= CEPH_O_RDWR;
+ break;
+ }
+
+#define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
+
+ ceph_sys2wire(O_CREAT);
+ ceph_sys2wire(O_EXCL);
+ ceph_sys2wire(O_TRUNC);
+ ceph_sys2wire(O_DIRECTORY);
+ ceph_sys2wire(O_NOFOLLOW);
+
+#undef ceph_sys2wire
+
+ if (flags)
+ dout("unused open flags: %x", flags);
+
+ return cpu_to_le32(wire_flags);
+}
+
/*
* Ceph file operations
*
@@ -74,12 +106,9 @@ dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
(PAGE_SIZE - 1);
npages = calc_pages_for(align, nbytes);
- pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
- if (!pages) {
- pages = vmalloc(sizeof(*pages) * npages);
- if (!pages)
- return ERR_PTR(-ENOMEM);
- }
+ pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
for (idx = 0; idx < npages; ) {
size_t start;
@@ -123,7 +152,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
if (IS_ERR(req))
goto out;
req->r_fmode = ceph_flags_to_mode(flags);
- req->r_args.open.flags = cpu_to_le32(flags);
+ req->r_args.open.flags = ceph_flags_sys2wire(flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
out:
return req;
@@ -192,7 +221,7 @@ int ceph_renew_caps(struct inode *inode)
spin_lock(&ci->i_ceph_lock);
wanted = __ceph_caps_file_wanted(ci);
if (__ceph_is_any_real_caps(ci) &&
- (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
+ (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
int issued = __ceph_caps_issued(ci, NULL);
spin_unlock(&ci->i_ceph_lock);
dout("renew caps %p want %s issued %s updating mds_wanted\n",
@@ -781,6 +810,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
req->r_callback = ceph_aio_complete_req;
req->r_inode = inode;
req->r_priv = aio_req;
+ req->r_abort_on_full = true;
ret = ceph_osdc_start_request(req->r_osdc, req, false);
out:
@@ -1088,19 +1118,22 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
out:
ceph_osdc_put_request(req);
- if (ret == 0) {
- pos += len;
- written += len;
-
- if (pos > i_size_read(inode)) {
- check_caps = ceph_inode_set_size(inode, pos);
- if (check_caps)
- ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_AUTHONLY,
- NULL);
- }
- } else
+ if (ret != 0) {
+ ceph_set_error_write(ci);
break;
+ }
+
+ ceph_clear_error_write(ci);
+ pos += len;
+ written += len;
+ if (pos > i_size_read(inode)) {
+ check_caps = ceph_inode_set_size(inode, pos);
+ if (check_caps)
+ ceph_check_caps(ceph_inode(inode),
+ CHECK_CAPS_AUTHONLY,
+ NULL);
+ }
+
}
if (ret != -EOLDSNAPC && written > 0) {
@@ -1306,6 +1339,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
retry_snap:
+ /* FIXME: not complete since it doesn't account for being at quota */
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
err = -ENOSPC;
goto out;
@@ -1327,7 +1361,8 @@ retry_snap:
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
- (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
+ (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
+ (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
struct ceph_snap_context *snapc;
struct iov_iter data;
inode_unlock(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index d3119fe3ab45..dcce79b84406 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1482,10 +1482,17 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
return readdir_prepopulate_inodes_only(req, session);
- if (rinfo->hash_order && req->r_path2) {
- last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
- req->r_path2, strlen(req->r_path2));
- last_hash = ceph_frag_value(last_hash);
+ if (rinfo->hash_order) {
+ if (req->r_path2) {
+ last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+ req->r_path2,
+ strlen(req->r_path2));
+ last_hash = ceph_frag_value(last_hash);
+ } else if (rinfo->offset_hash) {
+ /* mds understands offset_hash */
+ WARN_ON_ONCE(req->r_readdir_offset != 2);
+ last_hash = le32_to_cpu(rhead->args.readdir.offset_hash);
+ }
}
if (rinfo->dir_dir &&
@@ -1510,7 +1517,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
}
if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
- !(rinfo->hash_order && req->r_path2)) {
+ !(rinfo->hash_order && last_hash)) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index c681762d76e6..f38e56fa9712 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -189,6 +189,7 @@ static int parse_reply_info_dir(void **p, void *end,
info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
+ info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
}
if (num == 0)
goto done;
@@ -378,9 +379,9 @@ const char *ceph_session_state_name(int s)
static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
{
- if (atomic_inc_not_zero(&s->s_ref)) {
+ if (refcount_inc_not_zero(&s->s_ref)) {
dout("mdsc get_session %p %d -> %d\n", s,
- atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
+ refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
return s;
} else {
dout("mdsc get_session %p 0 -- FAIL", s);
@@ -391,8 +392,8 @@ static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
void ceph_put_mds_session(struct ceph_mds_session *s)
{
dout("mdsc put_session %p %d -> %d\n", s,
- atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
- if (atomic_dec_and_test(&s->s_ref)) {
+ refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
+ if (refcount_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
kfree(s);
@@ -411,7 +412,7 @@ struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
return NULL;
session = mdsc->sessions[mds];
dout("lookup_mds_session %p %d\n", session,
- atomic_read(&session->s_ref));
+ refcount_read(&session->s_ref));
get_session(session);
return session;
}
@@ -441,7 +442,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{
struct ceph_mds_session *s;
- if (mds >= mdsc->mdsmap->m_max_mds)
+ if (mds >= mdsc->mdsmap->m_num_mds)
return ERR_PTR(-EINVAL);
s = kzalloc(sizeof(*s), GFP_NOFS);
@@ -466,7 +467,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
INIT_LIST_HEAD(&s->s_caps);
s->s_nr_caps = 0;
s->s_trim_caps = 0;
- atomic_set(&s->s_ref, 1);
+ refcount_set(&s->s_ref, 1);
INIT_LIST_HEAD(&s->s_waiting);
INIT_LIST_HEAD(&s->s_unsafe);
s->s_num_cap_releases = 0;
@@ -494,7 +495,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
}
mdsc->sessions[mds] = s;
atomic_inc(&mdsc->num_sessions);
- atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
+ refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
@@ -1004,7 +1005,7 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *ts;
int i, mds = session->s_mds;
- if (mds >= mdsc->mdsmap->m_max_mds)
+ if (mds >= mdsc->mdsmap->m_num_mds)
return;
mi = &mdsc->mdsmap->m_info[mds];
@@ -1551,9 +1552,15 @@ void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_msg *msg = NULL;
struct ceph_mds_cap_release *head;
struct ceph_mds_cap_item *item;
+ struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
struct ceph_cap *cap;
LIST_HEAD(tmp_list);
int num_cap_releases;
+ __le32 barrier, *cap_barrier;
+
+ down_read(&osdc->lock);
+ barrier = cpu_to_le32(osdc->epoch_barrier);
+ up_read(&osdc->lock);
spin_lock(&session->s_cap_lock);
again:
@@ -1571,7 +1578,11 @@ again:
head = msg->front.iov_base;
head->num = cpu_to_le32(0);
msg->front.iov_len = sizeof(*head);
+
+ msg->hdr.version = cpu_to_le16(2);
+ msg->hdr.compat_version = cpu_to_le16(1);
}
+
cap = list_first_entry(&tmp_list, struct ceph_cap,
session_caps);
list_del(&cap->session_caps);
@@ -1589,6 +1600,11 @@ again:
ceph_put_cap(mdsc, cap);
if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
+ // Append cap_barrier field
+ cap_barrier = msg->front.iov_base + msg->front.iov_len;
+ *cap_barrier = barrier;
+ msg->front.iov_len += sizeof(*cap_barrier);
+
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
@@ -1604,6 +1620,11 @@ again:
spin_unlock(&session->s_cap_lock);
if (msg) {
+ // Append cap_barrier field
+ cap_barrier = msg->front.iov_base + msg->front.iov_len;
+ *cap_barrier = barrier;
+ msg->front.iov_len += sizeof(*cap_barrier);
+
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
ceph_con_send(&session->s_con, msg);
@@ -1666,6 +1687,7 @@ struct ceph_mds_request *
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
{
struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
+ struct timespec ts;
if (!req)
return ERR_PTR(-ENOMEM);
@@ -1684,7 +1706,8 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
init_completion(&req->r_safe_completion);
INIT_LIST_HEAD(&req->r_unsafe_item);
- req->r_stamp = current_fs_time(mdsc->fsc->sb);
+ ktime_get_real_ts(&ts);
+ req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran);
req->r_op = op;
req->r_direct_mode = mode;
@@ -1991,7 +2014,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
if (req->r_pagelist) {
struct ceph_pagelist *pagelist = req->r_pagelist;
- atomic_inc(&pagelist->refcnt);
+ refcount_inc(&pagelist->refcnt);
ceph_msg_data_add_pagelist(msg, pagelist);
msg->hdr.data_len = cpu_to_le32(pagelist->length);
} else {
@@ -2638,8 +2661,10 @@ static void handle_session(struct ceph_mds_session *session,
seq = le64_to_cpu(h->seq);
mutex_lock(&mdsc->mutex);
- if (op == CEPH_SESSION_CLOSE)
+ if (op == CEPH_SESSION_CLOSE) {
+ get_session(session);
__unregister_session(mdsc, session);
+ }
/* FIXME: this ttl calculation is generous */
session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
mutex_unlock(&mdsc->mutex);
@@ -2728,6 +2753,8 @@ static void handle_session(struct ceph_mds_session *session,
kick_requests(mdsc, mds);
mutex_unlock(&mdsc->mutex);
}
+ if (op == CEPH_SESSION_CLOSE)
+ ceph_put_mds_session(session);
return;
bad:
@@ -3107,7 +3134,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
dout("check_new_map new %u old %u\n",
newmap->m_epoch, oldmap->m_epoch);
- for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
+ for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
if (mdsc->sessions[i] == NULL)
continue;
s = mdsc->sessions[i];
@@ -3121,15 +3148,33 @@ static void check_new_map(struct ceph_mds_client *mdsc,
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
ceph_session_state_name(s->s_state));
- if (i >= newmap->m_max_mds ||
+ if (i >= newmap->m_num_mds ||
memcmp(ceph_mdsmap_get_addr(oldmap, i),
ceph_mdsmap_get_addr(newmap, i),
sizeof(struct ceph_entity_addr))) {
if (s->s_state == CEPH_MDS_SESSION_OPENING) {
/* the session never opened, just close it
* out now */
+ get_session(s);
+ __unregister_session(mdsc, s);
__wake_requests(mdsc, &s->s_waiting);
+ ceph_put_mds_session(s);
+ } else if (i >= newmap->m_num_mds) {
+ /* force close session for stopped mds */
+ get_session(s);
__unregister_session(mdsc, s);
+ __wake_requests(mdsc, &s->s_waiting);
+ kick_requests(mdsc, i);
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&s->s_mutex);
+ cleanup_session_requests(mdsc, s);
+ remove_session_caps(s);
+ mutex_unlock(&s->s_mutex);
+
+ ceph_put_mds_session(s);
+
+ mutex_lock(&mdsc->mutex);
} else {
/* just close it */
mutex_unlock(&mdsc->mutex);
@@ -3167,7 +3212,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
}
}
- for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
+ for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
s = mdsc->sessions[i];
if (!s)
continue;
@@ -3881,7 +3926,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
struct ceph_mds_session *s = con->private;
if (get_session(s)) {
- dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
+ dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
return con;
}
dout("mdsc con_get %p FAIL\n", s);
@@ -3892,7 +3937,7 @@ static void con_put(struct ceph_connection *con)
{
struct ceph_mds_session *s = con->private;
- dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
+ dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
ceph_put_mds_session(s);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ac0475a2daa7..db57ae98ed34 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -7,6 +7,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
@@ -82,9 +83,10 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_dirfrag *dir_dir;
size_t dir_buf_size;
int dir_nr;
- bool dir_complete;
bool dir_end;
+ bool dir_complete;
bool hash_order;
+ bool offset_hash;
struct ceph_mds_reply_dir_entry *dir_entries;
};
@@ -104,10 +106,13 @@ struct ceph_mds_reply_info_parsed {
/*
* cap releases are batched and sent to the MDS en masse.
+ *
+ * Account for per-message overhead of mds_cap_release header
+ * and __le32 for osd epoch barrier trailing field.
*/
-#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \
+#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - sizeof(u32) - \
sizeof(struct ceph_mds_cap_release)) / \
- sizeof(struct ceph_mds_cap_item))
+ sizeof(struct ceph_mds_cap_item))
/*
@@ -156,7 +161,7 @@ struct ceph_mds_session {
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
- atomic_t s_ref;
+ refcount_t s_ref;
struct list_head s_waiting; /* waiting requests */
struct list_head s_unsafe; /* unsafe requests */
};
@@ -373,7 +378,7 @@ __ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
static inline struct ceph_mds_session *
ceph_get_mds_session(struct ceph_mds_session *s)
{
- atomic_inc(&s->s_ref);
+ refcount_inc(&s->s_ref);
return s;
}
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 5454e2327a5f..1a748cf88535 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -22,11 +22,11 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
int i;
/* special case for one mds */
- if (1 == m->m_max_mds && m->m_info[0].state > 0)
+ if (1 == m->m_num_mds && m->m_info[0].state > 0)
return 0;
/* count */
- for (i = 0; i < m->m_max_mds; i++)
+ for (i = 0; i < m->m_num_mds; i++)
if (m->m_info[i].state > 0)
n++;
if (n == 0)
@@ -135,8 +135,9 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_session_autoclose = ceph_decode_32(p);
m->m_max_file_size = ceph_decode_64(p);
m->m_max_mds = ceph_decode_32(p);
+ m->m_num_mds = m->m_max_mds;
- m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
+ m->m_info = kcalloc(m->m_num_mds, sizeof(*m->m_info), GFP_NOFS);
if (m->m_info == NULL)
goto nomem;
@@ -207,9 +208,20 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
ceph_pr_addr(&addr.in_addr),
ceph_mds_state_name(state));
- if (mds < 0 || mds >= m->m_max_mds || state <= 0)
+ if (mds < 0 || state <= 0)
continue;
+ if (mds >= m->m_num_mds) {
+ int new_num = max(mds + 1, m->m_num_mds * 2);
+ void *new_m_info = krealloc(m->m_info,
+ new_num * sizeof(*m->m_info),
+ GFP_NOFS | __GFP_ZERO);
+ if (!new_m_info)
+ goto nomem;
+ m->m_info = new_m_info;
+ m->m_num_mds = new_num;
+ }
+
info = &m->m_info[mds];
info->global_id = global_id;
info->state = state;
@@ -229,6 +241,14 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
info->export_targets = NULL;
}
}
+ if (m->m_num_mds > m->m_max_mds) {
+ /* find max up mds */
+ for (i = m->m_num_mds; i >= m->m_max_mds; i--) {
+ if (i == 0 || m->m_info[i-1].state > 0)
+ break;
+ }
+ m->m_num_mds = i;
+ }
/* pg_pools */
ceph_decode_32_safe(p, end, n, bad);
@@ -270,12 +290,22 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
for (i = 0; i < n; i++) {
s32 mds = ceph_decode_32(p);
- if (mds >= 0 && mds < m->m_max_mds) {
+ if (mds >= 0 && mds < m->m_num_mds) {
if (m->m_info[mds].laggy)
num_laggy++;
}
}
m->m_num_laggy = num_laggy;
+
+ if (n > m->m_num_mds) {
+ void *new_m_info = krealloc(m->m_info,
+ n * sizeof(*m->m_info),
+ GFP_NOFS | __GFP_ZERO);
+ if (!new_m_info)
+ goto nomem;
+ m->m_info = new_m_info;
+ }
+ m->m_num_mds = n;
}
/* inc */
@@ -341,7 +371,7 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
{
int i;
- for (i = 0; i < m->m_max_mds; i++)
+ for (i = 0; i < m->m_num_mds; i++)
kfree(m->m_info[i].export_targets);
kfree(m->m_info);
kfree(m->m_data_pg_pools);
@@ -357,7 +387,7 @@ bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
return false;
if (m->m_num_laggy > 0)
return false;
- for (i = 0; i < m->m_max_mds; i++) {
+ for (i = 0; i < m->m_num_mds; i++) {
if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
nr_active++;
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 8f8b41c2ef0f..dab5d6732345 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -519,7 +519,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->need_flush ? "" : "no_flush");
ihold(inode);
- atomic_set(&capsnap->nref, 1);
+ refcount_set(&capsnap->nref, 1);
INIT_LIST_HEAD(&capsnap->ci_item);
capsnap->follows = old_snapc->seq;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a8c81b2052ca..8d7918ce694a 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -544,10 +544,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_options *opt)
{
struct ceph_fs_client *fsc;
- const u64 supported_features =
- CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH |
- CEPH_FEATURE_MDSENC | CEPH_FEATURE_MDS_INLINE_DATA;
- const u64 required_features = 0;
int page_count;
size_t size;
int err = -ENOMEM;
@@ -556,8 +552,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
if (!fsc)
return ERR_PTR(-ENOMEM);
- fsc->client = ceph_create_client(opt, fsc, supported_features,
- required_features);
+ fsc->client = ceph_create_client(opt, fsc);
if (IS_ERR(fsc->client)) {
err = PTR_ERR(fsc->client);
goto fail;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 176186b12457..a973acd8beaf 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -14,6 +14,7 @@
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/posix_acl.h>
+#include <linux/refcount.h>
#include <linux/ceph/libceph.h>
@@ -160,7 +161,7 @@ struct ceph_cap_flush {
* data before flushing the snapped state (tracked here) back to the MDS.
*/
struct ceph_cap_snap {
- atomic_t nref;
+ refcount_t nref;
struct list_head ci_item;
struct ceph_cap_flush cap_flush;
@@ -189,7 +190,7 @@ struct ceph_cap_snap {
static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
{
- if (atomic_dec_and_test(&capsnap->nref)) {
+ if (refcount_dec_and_test(&capsnap->nref)) {
if (capsnap->xattr_blob)
ceph_buffer_put(capsnap->xattr_blob);
kfree(capsnap);
@@ -471,6 +472,32 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
+#define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */
+
+/*
+ * We set the ERROR_WRITE bit when we start seeing write errors on an inode
+ * and then clear it when they start succeeding. Note that we do a lockless
+ * check first, and only take the lock if it looks like it needs to be changed.
+ * The write submission code just takes this as a hint, so we're not too
+ * worried if a few slip through in either direction.
+ */
+static inline void ceph_set_error_write(struct ceph_inode_info *ci)
+{
+ if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE)) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_ERROR_WRITE;
+ spin_unlock(&ci->i_ceph_lock);
+ }
+}
+
+static inline void ceph_clear_error_write(struct ceph_inode_info *ci)
+{
+ if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE) {
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_ERROR_WRITE;
+ spin_unlock(&ci->i_ceph_lock);
+ }
+}
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index febc28f9e2c2..75267cdd5dfd 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -392,6 +392,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (update_xattr) {
int err = 0;
+
if (xattr && (flags & XATTR_CREATE))
err = -EEXIST;
else if (!xattr && (flags & XATTR_REPLACE))
@@ -399,12 +400,14 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (err) {
kfree(name);
kfree(val);
+ kfree(*newxattr);
return err;
}
if (update_xattr < 0) {
if (xattr)
__remove_xattr(ci, xattr);
kfree(name);
+ kfree(*newxattr);
return 0;
}
}
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 44a240c4bb65..fb8507f521b2 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -471,6 +471,85 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count)
return 0;
}
+/**
+ * cdev_set_parent() - set the parent kobject for a char device
+ * @p: the cdev structure
+ * @kobj: the kobject to take a reference to
+ *
+ * cdev_set_parent() sets a parent kobject which will be referenced
+ * appropriately so the parent is not freed before the cdev. This
+ * should be called before cdev_add.
+ */
+void cdev_set_parent(struct cdev *p, struct kobject *kobj)
+{
+ WARN_ON(!kobj->state_initialized);
+ p->kobj.parent = kobj;
+}
+
+/**
+ * cdev_device_add() - add a char device and it's corresponding
+ * struct device, linkink
+ * @dev: the device structure
+ * @cdev: the cdev structure
+ *
+ * cdev_device_add() adds the char device represented by @cdev to the system,
+ * just as cdev_add does. It then adds @dev to the system using device_add
+ * The dev_t for the char device will be taken from the struct device which
+ * needs to be initialized first. This helper function correctly takes a
+ * reference to the parent device so the parent will not get released until
+ * all references to the cdev are released.
+ *
+ * This helper uses dev->devt for the device number. If it is not set
+ * it will not add the cdev and it will be equivalent to device_add.
+ *
+ * This function should be used whenever the struct cdev and the
+ * struct device are members of the same structure whose lifetime is
+ * managed by the struct device.
+ *
+ * NOTE: Callers must assume that userspace was able to open the cdev and
+ * can call cdev fops callbacks at any time, even if this function fails.
+ */
+int cdev_device_add(struct cdev *cdev, struct device *dev)
+{
+ int rc = 0;
+
+ if (dev->devt) {
+ cdev_set_parent(cdev, &dev->kobj);
+
+ rc = cdev_add(cdev, dev->devt, 1);
+ if (rc)
+ return rc;
+ }
+
+ rc = device_add(dev);
+ if (rc)
+ cdev_del(cdev);
+
+ return rc;
+}
+
+/**
+ * cdev_device_del() - inverse of cdev_device_add
+ * @dev: the device structure
+ * @cdev: the cdev structure
+ *
+ * cdev_device_del() is a helper function to call cdev_del and device_del.
+ * It should be used whenever cdev_device_add is used.
+ *
+ * If dev->devt is not set it will not remove the cdev and will be equivalent
+ * to device_del.
+ *
+ * NOTE: This guarantees that associated sysfs callbacks are not running
+ * or runnable, however any cdevs already open will remain and their fops
+ * will still be callable even after this function returns.
+ */
+void cdev_device_del(struct cdev *cdev, struct device *dev)
+{
+ device_del(dev);
+ if (dev->devt)
+ cdev_del(cdev);
+}
+
static void cdev_unmap(dev_t dev, unsigned count)
{
kobj_unmap(cdev_map, dev, count);
@@ -482,6 +561,10 @@ static void cdev_unmap(dev_t dev, unsigned count)
*
* cdev_del() removes @p from the system, possibly freeing the structure
* itself.
+ *
+ * NOTE: This guarantees that cdev device will no longer be able to be
+ * opened, however any cdevs already open will remain and their fops will
+ * still be callable even after cdev_del returns.
*/
void cdev_del(struct cdev *p)
{
@@ -570,5 +653,8 @@ EXPORT_SYMBOL(cdev_init);
EXPORT_SYMBOL(cdev_alloc);
EXPORT_SYMBOL(cdev_del);
EXPORT_SYMBOL(cdev_add);
+EXPORT_SYMBOL(cdev_set_parent);
+EXPORT_SYMBOL(cdev_device_add);
+EXPORT_SYMBOL(cdev_device_del);
EXPORT_SYMBOL(__register_chrdev);
EXPORT_SYMBOL(__unregister_chrdev);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 02b071bf3732..a0b3e7d1be48 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -83,6 +83,9 @@ convert_sfm_char(const __u16 src_char, char *target)
case SFM_COLON:
*target = ':';
break;
+ case SFM_DOUBLEQUOTE:
+ *target = '"';
+ break;
case SFM_ASTERISK:
*target = '*';
break;
@@ -418,6 +421,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
case ':':
dest_char = cpu_to_le16(SFM_COLON);
break;
+ case '"':
+ dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
+ break;
case '*':
dest_char = cpu_to_le16(SFM_ASTERISK);
break;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 3d7298cc0aeb..8a79a34e66b8 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -57,6 +57,7 @@
* not conflict (although almost does) with the mapping above.
*/
+#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
#define SFM_ASTERISK ((__u16) 0xF021)
#define SFM_QUESTION ((__u16) 0xF025)
#define SFM_COLON ((__u16) 0xF022)
@@ -64,8 +65,8 @@
#define SFM_LESSTHAN ((__u16) 0xF023)
#define SFM_PIPE ((__u16) 0xF027)
#define SFM_SLASH ((__u16) 0xF026)
-#define SFM_PERIOD ((__u16) 0xF028)
-#define SFM_SPACE ((__u16) 0xF029)
+#define SFM_SPACE ((__u16) 0xF028)
+#define SFM_PERIOD ((__u16) 0xF029)
/*
* Mapping mechanism to use when one of the seven reserved characters is
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 058ac9b36f04..68abbb0db608 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -478,6 +478,7 @@ find_timestamp(struct cifs_ses *ses)
unsigned char *blobptr;
unsigned char *blobend;
struct ntlmssp2_name *attrptr;
+ struct timespec ts;
if (!ses->auth_key.len || !ses->auth_key.response)
return 0;
@@ -502,7 +503,8 @@ find_timestamp(struct cifs_ses *ses)
blobptr += attrsize; /* advance attr value */
}
- return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ ktime_get_real_ts(&ts);
+ return cpu_to_le64(cifs_UnixTimeToNT(ts));
}
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d0d11b73b2af..9a1667e0e8d6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -88,6 +88,7 @@ extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
+struct workqueue_struct *cifsoplockd_wq;
__u32 cifs_lock_secret;
/*
@@ -1375,9 +1376,16 @@ init_cifs(void)
goto out_clean_proc;
}
+ cifsoplockd_wq = alloc_workqueue("cifsoplockd",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!cifsoplockd_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_cifsiod_wq;
+ }
+
rc = cifs_fscache_register();
if (rc)
- goto out_destroy_wq;
+ goto out_destroy_cifsoplockd_wq;
rc = cifs_init_inodecache();
if (rc)
@@ -1425,7 +1433,9 @@ out_destroy_inodecache:
cifs_destroy_inodecache();
out_unreg_fscache:
cifs_fscache_unregister();
-out_destroy_wq:
+out_destroy_cifsoplockd_wq:
+ destroy_workqueue(cifsoplockd_wq);
+out_destroy_cifsiod_wq:
destroy_workqueue(cifsiod_wq);
out_clean_proc:
cifs_proc_clean();
@@ -1448,6 +1458,7 @@ exit_cifs(void)
cifs_destroy_mids();
cifs_destroy_inodecache();
cifs_fscache_unregister();
+ destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 37f5a41cc50c..8be55be70faf 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1115,6 +1115,23 @@ struct cifs_io_parms {
struct cifs_tcon *tcon;
};
+struct cifs_aio_ctx {
+ struct kref refcount;
+ struct list_head list;
+ struct mutex aio_mutex;
+ struct completion done;
+ struct iov_iter iter;
+ struct kiocb *iocb;
+ struct cifsFileInfo *cfile;
+ struct bio_vec *bv;
+ loff_t pos;
+ unsigned int npages;
+ ssize_t rc;
+ unsigned int len;
+ unsigned int total_len;
+ bool should_dirty;
+};
+
struct cifs_readdata;
/* asynchronous read support */
@@ -1124,6 +1141,7 @@ struct cifs_readdata {
struct completion done;
struct cifsFileInfo *cfile;
struct address_space *mapping;
+ struct cifs_aio_ctx *ctx;
__u64 offset;
unsigned int bytes;
unsigned int got_bytes;
@@ -1154,6 +1172,7 @@ struct cifs_writedata {
enum writeback_sync_modes sync_mode;
struct work_struct work;
struct cifsFileInfo *cfile;
+ struct cifs_aio_ctx *ctx;
__u64 offset;
pid_t pid;
unsigned int bytes;
@@ -1683,6 +1702,7 @@ void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+extern struct workqueue_struct *cifsoplockd_wq;
extern __u32 cifs_lock_secret;
extern mempool_t *cifs_mid_poolp;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 97e5d236d265..e49958c3f8bb 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -535,4 +535,7 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
+struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
+void cifs_aio_ctx_release(struct kref *refcount);
+int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5d21f00ae341..4c01b3f9abf0 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -478,14 +478,14 @@ decode_lanman_negprot_rsp(struct TCP_Server_Info *server, NEGOTIATE_RSP *pSMBr)
* this requirement.
*/
int val, seconds, remain, result;
- struct timespec ts, utc;
- utc = CURRENT_TIME;
+ struct timespec ts;
+ unsigned long utc = ktime_get_real_seconds();
ts = cnvrtDosUnixTm(rsp->SrvTime.Date,
rsp->SrvTime.Time, 0);
cifs_dbg(FYI, "SrvTime %d sec since 1970 (utc: %d) diff: %d\n",
- (int)ts.tv_sec, (int)utc.tv_sec,
- (int)(utc.tv_sec - ts.tv_sec));
- val = (int)(utc.tv_sec - ts.tv_sec);
+ (int)ts.tv_sec, (int)utc,
+ (int)(utc - ts.tv_sec));
+ val = (int)(utc - ts.tv_sec);
seconds = abs(val);
result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
remain = seconds % MIN_TZ_ADJ;
@@ -718,6 +718,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
if (rc)
return rc;
+ if (server->capabilities & CAP_UNICODE)
+ smb->hdr.Flags2 |= SMBFLG2_UNICODE;
+
/* set up echo request */
smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9bc0b4d6d065..9365c0cf77ad 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1946,9 +1946,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
if (!got_ip) {
+ int len;
+ const char *slash;
+
/* No ip= option specified? Try to get it from UNC */
- if (!cifs_convert_address(dstaddr, &vol->UNC[2],
- strlen(&vol->UNC[2]))) {
+ /* Use the address part of the UNC. */
+ slash = strchr(&vol->UNC[2], '\\');
+ len = slash - &vol->UNC[2];
+ if (!cifs_convert_address(dstaddr, &vol->UNC[2], len)) {
pr_err("Unable to determine destination address.\n");
goto cifs_parse_mount_err;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 21d404535739..6ef78ad838e6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2458,11 +2458,14 @@ cifs_uncached_writedata_release(struct kref *refcount)
struct cifs_writedata *wdata = container_of(refcount,
struct cifs_writedata, refcount);
+ kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
for (i = 0; i < wdata->nr_pages; i++)
put_page(wdata->pages[i]);
cifs_writedata_release(refcount);
}
+static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
+
static void
cifs_uncached_writev_complete(struct work_struct *work)
{
@@ -2478,7 +2481,8 @@ cifs_uncached_writev_complete(struct work_struct *work)
spin_unlock(&inode->i_lock);
complete(&wdata->done);
-
+ collect_uncached_write_data(wdata->ctx);
+ /* the below call can possibly free the last ref to aio ctx */
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
@@ -2527,7 +2531,8 @@ wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
static int
cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
+ struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
+ struct cifs_aio_ctx *ctx)
{
int rc = 0;
size_t cur_len;
@@ -2595,6 +2600,8 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
wdata->pagesz = PAGE_SIZE;
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
wdata->credits = credits;
+ wdata->ctx = ctx;
+ kref_get(&ctx->refcount);
if (!wdata->cfile->invalidHandle ||
!(rc = cifs_reopen_file(wdata->cfile, false)))
@@ -2620,81 +2627,61 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
return rc;
}
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
{
- struct file *file = iocb->ki_filp;
- ssize_t total_written = 0;
- struct cifsFileInfo *open_file;
+ struct cifs_writedata *wdata, *tmp;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
- struct cifs_writedata *wdata, *tmp;
- struct list_head wdata_list;
- struct iov_iter saved_from = *from;
+ struct dentry *dentry = ctx->cfile->dentry;
+ unsigned int i;
int rc;
- /*
- * BB - optimize the way when signing is disabled. We can drop this
- * extra memory-to-memory copying and use iovec buffers for constructing
- * write request.
- */
-
- rc = generic_write_checks(iocb, from);
- if (rc <= 0)
- return rc;
-
- INIT_LIST_HEAD(&wdata_list);
- cifs_sb = CIFS_FILE_SB(file);
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
-
- if (!tcon->ses->server->ops->async_writev)
- return -ENOSYS;
+ tcon = tlink_tcon(ctx->cfile->tlink);
+ cifs_sb = CIFS_SB(dentry->d_sb);
- rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
- open_file, cifs_sb, &wdata_list);
+ mutex_lock(&ctx->aio_mutex);
- /*
- * If at least one write was successfully sent, then discard any rc
- * value from the later writes. If the other write succeeds, then
- * we'll end up returning whatever was written. If it fails, then
- * we'll get a new rc value from that.
- */
- if (!list_empty(&wdata_list))
- rc = 0;
+ if (list_empty(&ctx->list)) {
+ mutex_unlock(&ctx->aio_mutex);
+ return;
+ }
+ rc = ctx->rc;
/*
* Wait for and collect replies for any successful sends in order of
- * increasing offset. Once an error is hit or we get a fatal signal
- * while waiting, then return without waiting for any more replies.
+ * increasing offset. Once an error is hit, then return without waiting
+ * for any more replies.
*/
restart_loop:
- list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
+ list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
if (!rc) {
- /* FIXME: freezable too? */
- rc = wait_for_completion_killable(&wdata->done);
- if (rc)
- rc = -EINTR;
- else if (wdata->result)
+ if (!try_wait_for_completion(&wdata->done)) {
+ mutex_unlock(&ctx->aio_mutex);
+ return;
+ }
+
+ if (wdata->result)
rc = wdata->result;
else
- total_written += wdata->bytes;
+ ctx->total_len += wdata->bytes;
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
struct list_head tmp_list;
- struct iov_iter tmp_from = saved_from;
+ struct iov_iter tmp_from = ctx->iter;
INIT_LIST_HEAD(&tmp_list);
list_del_init(&wdata->list);
iov_iter_advance(&tmp_from,
- wdata->offset - iocb->ki_pos);
+ wdata->offset - ctx->pos);
rc = cifs_write_from_iter(wdata->offset,
wdata->bytes, &tmp_from,
- open_file, cifs_sb, &tmp_list);
+ ctx->cfile, cifs_sb, &tmp_list,
+ ctx);
- list_splice(&tmp_list, &wdata_list);
+ list_splice(&tmp_list, &ctx->list);
kref_put(&wdata->refcount,
cifs_uncached_writedata_release);
@@ -2705,12 +2692,111 @@ restart_loop:
kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
+ for (i = 0; i < ctx->npages; i++)
+ put_page(ctx->bv[i].bv_page);
+
+ cifs_stats_bytes_written(tcon, ctx->total_len);
+ set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
+
+ ctx->rc = (rc == 0) ? ctx->total_len : rc;
+
+ mutex_unlock(&ctx->aio_mutex);
+
+ if (ctx->iocb && ctx->iocb->ki_complete)
+ ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
+ else
+ complete(&ctx->done);
+}
+
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t total_written = 0;
+ struct cifsFileInfo *cfile;
+ struct cifs_tcon *tcon;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_aio_ctx *ctx;
+ struct iov_iter saved_from = *from;
+ int rc;
+
+ /*
+ * BB - optimize the way when signing is disabled. We can drop this
+ * extra memory-to-memory copying and use iovec buffers for constructing
+ * write request.
+ */
+
+ rc = generic_write_checks(iocb, from);
+ if (rc <= 0)
+ return rc;
+
+ cifs_sb = CIFS_FILE_SB(file);
+ cfile = file->private_data;
+ tcon = tlink_tcon(cfile->tlink);
+
+ if (!tcon->ses->server->ops->async_writev)
+ return -ENOSYS;
+
+ ctx = cifs_aio_ctx_alloc();
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cfile = cifsFileInfo_get(cfile);
+
+ if (!is_sync_kiocb(iocb))
+ ctx->iocb = iocb;
+
+ ctx->pos = iocb->ki_pos;
+
+ rc = setup_aio_ctx_iter(ctx, from, WRITE);
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
+
+ /* grab a lock here due to read response handlers can access ctx */
+ mutex_lock(&ctx->aio_mutex);
+
+ rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
+ cfile, cifs_sb, &ctx->list, ctx);
+
+ /*
+ * If at least one write was successfully sent, then discard any rc
+ * value from the later writes. If the other write succeeds, then
+ * we'll end up returning whatever was written. If it fails, then
+ * we'll get a new rc value from that.
+ */
+ if (!list_empty(&ctx->list))
+ rc = 0;
+
+ mutex_unlock(&ctx->aio_mutex);
+
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
+
+ if (!is_sync_kiocb(iocb)) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return -EIOCBQUEUED;
+ }
+
+ rc = wait_for_completion_killable(&ctx->done);
+ if (rc) {
+ mutex_lock(&ctx->aio_mutex);
+ ctx->rc = rc = -EINTR;
+ total_written = ctx->total_len;
+ mutex_unlock(&ctx->aio_mutex);
+ } else {
+ rc = ctx->rc;
+ total_written = ctx->total_len;
+ }
+
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+
if (unlikely(!total_written))
return rc;
iocb->ki_pos += total_written;
- set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
- cifs_stats_bytes_written(tcon, total_written);
return total_written;
}
@@ -2859,6 +2945,7 @@ cifs_uncached_readdata_release(struct kref *refcount)
struct cifs_readdata, refcount);
unsigned int i;
+ kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
for (i = 0; i < rdata->nr_pages; i++) {
put_page(rdata->pages[i]);
rdata->pages[i] = NULL;
@@ -2900,6 +2987,8 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
return remaining ? -EFAULT : 0;
}
+static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
+
static void
cifs_uncached_readv_complete(struct work_struct *work)
{
@@ -2907,6 +2996,8 @@ cifs_uncached_readv_complete(struct work_struct *work)
struct cifs_readdata, work);
complete(&rdata->done);
+ collect_uncached_read_data(rdata->ctx);
+ /* the below call can possibly free the last ref to aio ctx */
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
@@ -2973,7 +3064,8 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
static int
cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
+ struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
+ struct cifs_aio_ctx *ctx)
{
struct cifs_readdata *rdata;
unsigned int npages, rsize, credits;
@@ -3020,6 +3112,8 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
rdata->read_into_pages = cifs_uncached_read_into_pages;
rdata->copy_into_pages = cifs_uncached_copy_into_pages;
rdata->credits = credits;
+ rdata->ctx = ctx;
+ kref_get(&ctx->refcount);
if (!rdata->cfile->invalidHandle ||
!(rc = cifs_reopen_file(rdata->cfile, true)))
@@ -3042,50 +3136,37 @@ error:
return rc;
}
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+static void
+collect_uncached_read_data(struct cifs_aio_ctx *ctx)
{
- struct file *file = iocb->ki_filp;
- ssize_t rc;
- size_t len;
- ssize_t total_read = 0;
- loff_t offset = iocb->ki_pos;
+ struct cifs_readdata *rdata, *tmp;
+ struct iov_iter *to = &ctx->iter;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- struct cifsFileInfo *open_file;
- struct cifs_readdata *rdata, *tmp;
- struct list_head rdata_list;
-
- len = iov_iter_count(to);
- if (!len)
- return 0;
-
- INIT_LIST_HEAD(&rdata_list);
- cifs_sb = CIFS_FILE_SB(file);
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
-
- if (!tcon->ses->server->ops->async_readv)
- return -ENOSYS;
+ unsigned int i;
+ int rc;
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
+ tcon = tlink_tcon(ctx->cfile->tlink);
+ cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
- rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
+ mutex_lock(&ctx->aio_mutex);
- /* if at least one read request send succeeded, then reset rc */
- if (!list_empty(&rdata_list))
- rc = 0;
+ if (list_empty(&ctx->list)) {
+ mutex_unlock(&ctx->aio_mutex);
+ return;
+ }
- len = iov_iter_count(to);
+ rc = ctx->rc;
/* the loop below should proceed in the order of increasing offsets */
again:
- list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+ list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
if (!rc) {
- /* FIXME: freezable sleep too? */
- rc = wait_for_completion_killable(&rdata->done);
- if (rc)
- rc = -EINTR;
- else if (rdata->result == -EAGAIN) {
+ if (!try_wait_for_completion(&rdata->done)) {
+ mutex_unlock(&ctx->aio_mutex);
+ return;
+ }
+
+ if (rdata->result == -EAGAIN) {
/* resend call if it's a retryable error */
struct list_head tmp_list;
unsigned int got_bytes = rdata->got_bytes;
@@ -3111,9 +3192,9 @@ again:
rdata->offset + got_bytes,
rdata->bytes - got_bytes,
rdata->cfile, cifs_sb,
- &tmp_list);
+ &tmp_list, ctx);
- list_splice(&tmp_list, &rdata_list);
+ list_splice(&tmp_list, &ctx->list);
kref_put(&rdata->refcount,
cifs_uncached_readdata_release);
@@ -3131,14 +3212,110 @@ again:
kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
- total_read = len - iov_iter_count(to);
+ for (i = 0; i < ctx->npages; i++) {
+ if (ctx->should_dirty)
+ set_page_dirty(ctx->bv[i].bv_page);
+ put_page(ctx->bv[i].bv_page);
+ }
+
+ ctx->total_len = ctx->len - iov_iter_count(to);
- cifs_stats_bytes_read(tcon, total_read);
+ cifs_stats_bytes_read(tcon, ctx->total_len);
/* mask nodata case */
if (rc == -ENODATA)
rc = 0;
+ ctx->rc = (rc == 0) ? ctx->total_len : rc;
+
+ mutex_unlock(&ctx->aio_mutex);
+
+ if (ctx->iocb && ctx->iocb->ki_complete)
+ ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
+ else
+ complete(&ctx->done);
+}
+
+ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t rc;
+ size_t len;
+ ssize_t total_read = 0;
+ loff_t offset = iocb->ki_pos;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
+ struct cifs_aio_ctx *ctx;
+
+ len = iov_iter_count(to);
+ if (!len)
+ return 0;
+
+ cifs_sb = CIFS_FILE_SB(file);
+ cfile = file->private_data;
+ tcon = tlink_tcon(cfile->tlink);
+
+ if (!tcon->ses->server->ops->async_readv)
+ return -ENOSYS;
+
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+ cifs_dbg(FYI, "attempting read on write only file instance\n");
+
+ ctx = cifs_aio_ctx_alloc();
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cfile = cifsFileInfo_get(cfile);
+
+ if (!is_sync_kiocb(iocb))
+ ctx->iocb = iocb;
+
+ if (to->type & ITER_IOVEC)
+ ctx->should_dirty = true;
+
+ rc = setup_aio_ctx_iter(ctx, to, READ);
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
+
+ len = ctx->len;
+
+ /* grab a lock here due to read response handlers can access ctx */
+ mutex_lock(&ctx->aio_mutex);
+
+ rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
+
+ /* if at least one read request send succeeded, then reset rc */
+ if (!list_empty(&ctx->list))
+ rc = 0;
+
+ mutex_unlock(&ctx->aio_mutex);
+
+ if (rc) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return rc;
+ }
+
+ if (!is_sync_kiocb(iocb)) {
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ return -EIOCBQUEUED;
+ }
+
+ rc = wait_for_completion_killable(&ctx->done);
+ if (rc) {
+ mutex_lock(&ctx->aio_mutex);
+ ctx->rc = rc = -EINTR;
+ total_read = ctx->total_len;
+ mutex_unlock(&ctx->aio_mutex);
+ } else {
+ rc = ctx->rc;
+ total_read = ctx->total_len;
+ }
+
+ kref_put(&ctx->refcount, cifs_aio_ctx_release);
+
if (total_read) {
iocb->ki_pos += total_read;
return total_read;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b261db34103c..c3b2fa0b2ec8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -322,9 +322,9 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
- fattr->cf_atime = CURRENT_TIME;
- fattr->cf_ctime = CURRENT_TIME;
- fattr->cf_mtime = CURRENT_TIME;
+ ktime_get_real_ts(&fattr->cf_mtime);
+ fattr->cf_mtime = timespec_trunc(fattr->cf_mtime, sb->s_time_gran);
+ fattr->cf_atime = fattr->cf_ctime = fattr->cf_mtime;
fattr->cf_nlink = 2;
fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
}
@@ -586,9 +586,10 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
- struct cifs_sb_info *cifs_sb, bool adjust_tz,
+ struct super_block *sb, bool adjust_tz,
bool symlink)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
memset(fattr, 0, sizeof(*fattr));
@@ -598,8 +599,10 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
- else
- fattr->cf_atime = CURRENT_TIME;
+ else {
+ ktime_get_real_ts(&fattr->cf_atime);
+ fattr->cf_atime = timespec_trunc(fattr->cf_atime, sb->s_time_gran);
+ }
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
@@ -659,7 +662,6 @@ cifs_get_file_info(struct file *filp)
FILE_ALL_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = file_inode(filp);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -671,7 +673,7 @@ cifs_get_file_info(struct file *filp)
rc = server->ops->query_file_info(xid, tcon, &cfile->fid, &find_data);
switch (rc) {
case 0:
- cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false,
+ cifs_all_info_to_fattr(&fattr, &find_data, inode->i_sb, false,
false);
break;
case -EREMOTE:
@@ -753,7 +755,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
}
if (!rc) {
- cifs_all_info_to_fattr(&fattr, data, cifs_sb, adjust_tz,
+ cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz,
symlink);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
@@ -1363,9 +1365,9 @@ out_reval:
cifs_inode = CIFS_I(inode);
cifs_inode->time = 0; /* will force revalidate to get info
when needed */
- inode->i_ctime = current_fs_time(sb);
+ inode->i_ctime = current_time(inode);
}
- dir->i_ctime = dir->i_mtime = current_fs_time(sb);
+ dir->i_ctime = dir->i_mtime = current_time(dir);
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
@@ -1633,7 +1635,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifsInode->time = 0;
d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
- current_fs_time(inode->i_sb);
+ current_time(inode);
rmdir_exit:
kfree(full_path);
@@ -1806,7 +1808,7 @@ unlink_target:
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
- target_dir->i_mtime = current_fs_time(source_dir->i_sb);
+ target_dir->i_mtime = current_time(source_dir);
cifs_rename_exit:
kfree(info_buf_source);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 7f4bba574930..76fb0917dc8c 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -209,10 +209,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = -EOPNOTSUPP;
break;
case CIFS_IOC_GET_MNT_INFO:
+ if (pSMBFile == NULL)
+ break;
tcon = tlink_tcon(pSMBFile->tlink);
rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
break;
case CIFS_ENUMERATE_SNAPSHOTS:
+ if (pSMBFile == NULL)
+ break;
if (arg == 0) {
rc = -EINVAL;
goto cifs_ioc_exit;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 843787850435..b08531977daa 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/mempool.h>
+#include <linux/vmalloc.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -488,7 +489,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&pCifsInode->flags);
- queue_work(cifsiod_wq,
+ queue_work(cifsoplockd_wq,
&netfile->oplock_break);
netfile->oplock_break_cancelled = false;
@@ -741,3 +742,122 @@ parse_DFS_referrals_exit:
}
return rc;
}
+
+struct cifs_aio_ctx *
+cifs_aio_ctx_alloc(void)
+{
+ struct cifs_aio_ctx *ctx;
+
+ ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&ctx->list);
+ mutex_init(&ctx->aio_mutex);
+ init_completion(&ctx->done);
+ kref_init(&ctx->refcount);
+ return ctx;
+}
+
+void
+cifs_aio_ctx_release(struct kref *refcount)
+{
+ struct cifs_aio_ctx *ctx = container_of(refcount,
+ struct cifs_aio_ctx, refcount);
+
+ cifsFileInfo_put(ctx->cfile);
+ kvfree(ctx->bv);
+ kfree(ctx);
+}
+
+#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
+
+int
+setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
+{
+ ssize_t rc;
+ unsigned int cur_npages;
+ unsigned int npages = 0;
+ unsigned int i;
+ size_t len;
+ size_t count = iov_iter_count(iter);
+ unsigned int saved_len;
+ size_t start;
+ unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
+ struct page **pages = NULL;
+ struct bio_vec *bv = NULL;
+
+ if (iter->type & ITER_KVEC) {
+ memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
+ ctx->len = count;
+ iov_iter_advance(iter, count);
+ return 0;
+ }
+
+ if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
+ bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
+ GFP_KERNEL);
+
+ if (!bv) {
+ bv = vmalloc(max_pages * sizeof(struct bio_vec));
+ if (!bv)
+ return -ENOMEM;
+ }
+
+ if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
+ pages = kmalloc_array(max_pages, sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages) {
+ pages = vmalloc(max_pages * sizeof(struct page *));
+ if (!bv) {
+ kvfree(bv);
+ return -ENOMEM;
+ }
+ }
+
+ saved_len = count;
+
+ while (count && npages < max_pages) {
+ rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
+ if (rc < 0) {
+ cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
+ break;
+ }
+
+ if (rc > count) {
+ cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
+ count);
+ break;
+ }
+
+ iov_iter_advance(iter, rc);
+ count -= rc;
+ rc += start;
+ cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
+
+ if (npages + cur_npages > max_pages) {
+ cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
+ npages + cur_npages, max_pages);
+ break;
+ }
+
+ for (i = 0; i < cur_npages; i++) {
+ len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
+ bv[npages + i].bv_page = pages[i];
+ bv[npages + i].bv_offset = start;
+ bv[npages + i].bv_len = len - start;
+ rc -= len;
+ start = 0;
+ }
+
+ npages += cur_npages;
+ }
+
+ kvfree(pages);
+ ctx->bv = bv;
+ ctx->len = saved_len - count;
+ ctx->npages = npages;
+ iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+ return 0;
+}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index abae6dd2c6b9..cc88f4f0325e 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -980,10 +980,10 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
days = sd->Day;
month = sd->Month;
- if ((days > 31) || (month > 12)) {
+ if (days < 1 || days > 31 || month < 1 || month > 12) {
cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days);
- if (month > 12)
- month = 12;
+ days = clamp(days, 1, 31);
+ month = clamp(month, 1, 12);
}
month -= 1;
days += total_days_of_prev_months[month];
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 1a04b3a5beb1..7b08a1446a7f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -499,7 +499,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
else
cfile->oplock_break_cancelled = true;
- queue_work(cifsiod_wq, &cfile->oplock_break);
+ queue_work(cifsoplockd_wq, &cfile->oplock_break);
kfree(lw);
return true;
}
@@ -643,7 +643,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
spin_unlock(&cfile->file_info_lock);
- queue_work(cifsiod_wq, &cfile->oplock_break);
+ queue_work(cifsoplockd_wq,
+ &cfile->oplock_break);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 152e37f2ad92..c58691834eb2 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -942,6 +942,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
}
if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
rc = -ERANGE;
+ kfree(retbuf);
return rc;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index fb0da096c2ce..48ff7703b919 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -633,8 +633,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
- cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
- return -EIO;
+ cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
+ rsplen);
+
+ /* relax check since Mac returns max bufsize allowed on ioctl */
+ if (rsplen > CIFSMaxBufSize)
+ return -EIO;
}
/* check validate negotiate info response matches what we got earlier */
@@ -1854,8 +1858,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
* than one credit. Windows typically sets this smaller, but for some
* ioctls it may be useful to allow server to send more. No point
* limiting what the server can send as long as fits in one credit
+ * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
+ * (by default, note that it can be overridden to make max larger)
+ * in responses (except for read responses which can be bigger.
+ * We may want to bump this limit up
*/
- req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
+ req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 11d087b2b28e..6116d5275a3e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -833,7 +833,7 @@ static int compat_ioctl_preallocate(struct file *file,
*/
#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
-#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
+#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
/* ioctl should not be warned about even if it's not implemented.
Valid reasons to use this:
- It is implemented with ->compat_ioctl on some device, but programs
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 37b49894c762..d1bb02b1ee58 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -159,6 +159,8 @@ static int fname_decrypt(struct inode *inode,
static const char *lookup_table =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
/**
* digest_encode() -
*
@@ -230,11 +232,14 @@ EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 ilen, struct fscrypt_str *crypto_str)
{
- unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
+ u32 olen = fscrypt_fname_encrypted_size(inode, ilen);
+ const u32 max_encoded_len =
+ max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE),
+ 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)));
crypto_str->len = olen;
- if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
- olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
+ olen = max(olen, max_encoded_len);
+
/*
* Allocated buffer can hold one more character to null-terminate the
* string
@@ -266,6 +271,10 @@ EXPORT_SYMBOL(fscrypt_fname_free_buffer);
*
* The caller must have allocated sufficient memory for the @oname string.
*
+ * If the key is available, we'll decrypt the disk name; otherwise, we'll encode
+ * it for presentation. Short names are directly base64-encoded, while long
+ * names are encoded in fscrypt_digested_name format.
+ *
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(struct inode *inode,
@@ -274,7 +283,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
struct fscrypt_str *oname)
{
const struct qstr qname = FSTR_TO_QSTR(iname);
- char buf[24];
+ struct fscrypt_digested_name digested_name;
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
@@ -289,20 +298,24 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
if (inode->i_crypt_info)
return fname_decrypt(inode, iname, oname);
- if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
+ if (iname->len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) {
oname->len = digest_encode(iname->name, iname->len,
oname->name);
return 0;
}
if (hash) {
- memcpy(buf, &hash, 4);
- memcpy(buf + 4, &minor_hash, 4);
+ digested_name.hash = hash;
+ digested_name.minor_hash = minor_hash;
} else {
- memset(buf, 0, 8);
+ digested_name.hash = 0;
+ digested_name.minor_hash = 0;
}
- memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ memcpy(digested_name.digest,
+ FSCRYPT_FNAME_DIGEST(iname->name, iname->len),
+ FSCRYPT_FNAME_DIGEST_SIZE);
oname->name[0] = '_';
- oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
+ oname->len = 1 + digest_encode((const char *)&digested_name,
+ sizeof(digested_name), oname->name + 1);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
@@ -336,10 +349,35 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
}
EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
+/**
+ * fscrypt_setup_filename() - prepare to search a possibly encrypted directory
+ * @dir: the directory that will be searched
+ * @iname: the user-provided filename being searched for
+ * @lookup: 1 if we're allowed to proceed without the key because it's
+ * ->lookup() or we're finding the dir_entry for deletion; 0 if we cannot
+ * proceed without the key because we're going to create the dir_entry.
+ * @fname: the filename information to be filled in
+ *
+ * Given a user-provided filename @iname, this function sets @fname->disk_name
+ * to the name that would be stored in the on-disk directory entry, if possible.
+ * If the directory is unencrypted this is simply @iname. Else, if we have the
+ * directory's encryption key, then @iname is the plaintext, so we encrypt it to
+ * get the disk_name.
+ *
+ * Else, for keyless @lookup operations, @iname is the presented ciphertext, so
+ * we decode it to get either the ciphertext disk_name (for short names) or the
+ * fscrypt_digested_name (for long names). Non-@lookup operations will be
+ * impossible in this case, so we fail them with ENOKEY.
+ *
+ * If successful, fscrypt_free_filename() must be called later to clean up.
+ *
+ * Return: 0 on success, -errno on failure
+ */
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
- int ret = 0, bigname = 0;
+ int ret;
+ int digested;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
@@ -373,25 +411,37 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
* We don't have the key and we are doing a lookup; decode the
* user-supplied name
*/
- if (iname->name[0] == '_')
- bigname = 1;
- if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
- return -ENOENT;
+ if (iname->name[0] == '_') {
+ if (iname->len !=
+ 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name)))
+ return -ENOENT;
+ digested = 1;
+ } else {
+ if (iname->len >
+ BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE))
+ return -ENOENT;
+ digested = 0;
+ }
- fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
+ fname->crypto_buf.name =
+ kmalloc(max_t(size_t, FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE,
+ sizeof(struct fscrypt_digested_name)),
+ GFP_KERNEL);
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
- ret = digest_decode(iname->name + bigname, iname->len - bigname,
+ ret = digest_decode(iname->name + digested, iname->len - digested,
fname->crypto_buf.name);
if (ret < 0) {
ret = -ENOENT;
goto errout;
}
fname->crypto_buf.len = ret;
- if (bigname) {
- memcpy(&fname->hash, fname->crypto_buf.name, 4);
- memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
+ if (digested) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ fname->hash = n->hash;
+ fname->minor_hash = n->minor_hash;
} else {
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index e39696e64494..1e1f8a361b75 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -13,8 +13,6 @@
#include <linux/fscrypt_supp.h>
-#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
-
/* Encryption parameters */
#define FS_XTS_TWEAK_SIZE 16
#define FS_AES_128_ECB_KEY_SIZE 16
@@ -22,10 +20,6 @@
#define FS_AES_256_CBC_KEY_SIZE 32
#define FS_AES_256_CTS_KEY_SIZE 32
#define FS_AES_256_XTS_KEY_SIZE 64
-#define FS_MAX_KEY_SIZE 64
-
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
#define FS_KEY_DERIVATION_NONCE_SIZE 16
@@ -51,13 +45,6 @@ struct fscrypt_context {
#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
-/* This is passed in from userspace into the kernel keyring */
-struct fscrypt_key {
- u32 mode;
- u8 raw[FS_MAX_KEY_SIZE];
- u32 size;
-} __packed;
-
/*
* A pointer to this structure is stored in the file system's in-core
* representation of an inode.
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 8cdfddce2b34..179e578b875b 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -183,9 +183,6 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (res)
return res;
- if (!inode->i_sb->s_cop->get_context)
- return -EOPNOTSUPP;
-
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode) ||
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 4908906d54d5..210976e7a269 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -34,9 +34,6 @@ static int create_encryption_context_from_policy(struct inode *inode,
{
struct fscrypt_context ctx;
- if (!inode->i_sb->s_cop->set_context)
- return -EOPNOTSUPP;
-
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
FS_KEY_DESCRIPTOR_SIZE);
@@ -87,8 +84,6 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
if (ret == -ENODATA) {
if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR;
- else if (!inode->i_sb->s_cop->empty_dir)
- ret = -EOPNOTSUPP;
else if (!inode->i_sb->s_cop->empty_dir(inode))
ret = -ENOTEMPTY;
else
@@ -118,8 +113,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
struct fscrypt_policy policy;
int res;
- if (!inode->i_sb->s_cop->get_context ||
- !inode->i_sb->s_cop->is_encrypted(inode))
+ if (!inode->i_sb->s_cop->is_encrypted(inode))
return -ENODATA;
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
@@ -143,27 +137,61 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
}
EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
+/**
+ * fscrypt_has_permitted_context() - is a file's encryption policy permitted
+ * within its directory?
+ *
+ * @parent: inode for parent directory
+ * @child: inode for file being looked up, opened, or linked into @parent
+ *
+ * Filesystems must call this before permitting access to an inode in a
+ * situation where the parent directory is encrypted (either before allowing
+ * ->lookup() to succeed, or for a regular file before allowing it to be opened)
+ * and before any operation that involves linking an inode into an encrypted
+ * directory, including link, rename, and cross rename. It enforces the
+ * constraint that within a given encrypted directory tree, all files use the
+ * same encryption policy. The pre-access check is needed to detect potentially
+ * malicious offline violations of this constraint, while the link and rename
+ * checks are needed to prevent online violations of this constraint.
+ *
+ * Return: 1 if permitted, 0 if forbidden. If forbidden, the caller must fail
+ * the filesystem operation with EPERM.
+ */
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
{
- struct fscrypt_info *parent_ci, *child_ci;
+ const struct fscrypt_operations *cops = parent->i_sb->s_cop;
+ const struct fscrypt_info *parent_ci, *child_ci;
+ struct fscrypt_context parent_ctx, child_ctx;
int res;
- if ((parent == NULL) || (child == NULL)) {
- printk(KERN_ERR "parent %p child %p\n", parent, child);
- BUG_ON(1);
- }
-
/* No restrictions on file types which are never encrypted */
if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
!S_ISLNK(child->i_mode))
return 1;
- /* no restrictions if the parent directory is not encrypted */
- if (!parent->i_sb->s_cop->is_encrypted(parent))
+ /* No restrictions if the parent directory is unencrypted */
+ if (!cops->is_encrypted(parent))
return 1;
- /* if the child directory is not encrypted, this is always a problem */
- if (!parent->i_sb->s_cop->is_encrypted(child))
+
+ /* Encrypted directories must not contain unencrypted files */
+ if (!cops->is_encrypted(child))
return 0;
+
+ /*
+ * Both parent and child are encrypted, so verify they use the same
+ * encryption policy. Compare the fscrypt_info structs if the keys are
+ * available, otherwise retrieve and compare the fscrypt_contexts.
+ *
+ * Note that the fscrypt_context retrieval will be required frequently
+ * when accessing an encrypted directory tree without the key.
+ * Performance-wise this is not a big deal because we already don't
+ * really optimize for file access without the key (to the extent that
+ * such access is even possible), given that any attempted access
+ * already causes a fscrypt_context retrieval and keyring search.
+ *
+ * In any case, if an unexpected error occurs, fall back to "forbidden".
+ */
+
res = fscrypt_get_encryption_info(parent);
if (res)
return 0;
@@ -172,17 +200,32 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
return 0;
parent_ci = parent->i_crypt_info;
child_ci = child->i_crypt_info;
- if (!parent_ci && !child_ci)
- return 1;
- if (!parent_ci || !child_ci)
+
+ if (parent_ci && child_ci) {
+ return memcmp(parent_ci->ci_master_key, child_ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+ (parent_ci->ci_filename_mode ==
+ child_ci->ci_filename_mode) &&
+ (parent_ci->ci_flags == child_ci->ci_flags);
+ }
+
+ res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx));
+ if (res != sizeof(parent_ctx))
return 0;
- return (memcmp(parent_ci->ci_master_key,
- child_ci->ci_master_key,
- FS_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
- (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
- (parent_ci->ci_flags == child_ci->ci_flags));
+ res = cops->get_context(child, &child_ctx, sizeof(child_ctx));
+ if (res != sizeof(child_ctx))
+ return 0;
+
+ return memcmp(parent_ctx.master_key_descriptor,
+ child_ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ctx.contents_encryption_mode ==
+ child_ctx.contents_encryption_mode) &&
+ (parent_ctx.filenames_encryption_mode ==
+ child_ctx.filenames_encryption_mode) &&
+ (parent_ctx.flags == child_ctx.flags);
}
EXPORT_SYMBOL(fscrypt_has_permitted_context);
@@ -202,9 +245,6 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
struct fscrypt_info *ci;
int res;
- if (!parent->i_sb->s_cop->set_context)
- return -EOPNOTSUPP;
-
res = fscrypt_get_encryption_info(parent);
if (res < 0)
return res;
diff --git a/fs/dax.c b/fs/dax.c
index 6433650be833..66d79067eedf 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -55,32 +55,6 @@ static int __init init_dax_wait_table(void)
}
fs_initcall(init_dax_wait_table);
-static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
-{
- struct request_queue *q = bdev->bd_queue;
- long rc = -EIO;
-
- dax->addr = ERR_PTR(-EIO);
- if (blk_queue_enter(q, true) != 0)
- return rc;
-
- rc = bdev_direct_access(bdev, dax);
- if (rc < 0) {
- dax->addr = ERR_PTR(rc);
- blk_queue_exit(q);
- return rc;
- }
- return rc;
-}
-
-static void dax_unmap_atomic(struct block_device *bdev,
- const struct blk_dax_ctl *dax)
-{
- if (IS_ERR(dax->addr))
- return;
- blk_queue_exit(bdev->bd_queue);
-}
-
static int dax_is_pmd_entry(void *entry)
{
return (unsigned long)entry & RADIX_DAX_PMD;
@@ -101,26 +75,6 @@ static int dax_is_empty_entry(void *entry)
return (unsigned long)entry & RADIX_DAX_EMPTY;
}
-struct page *read_dax_sector(struct block_device *bdev, sector_t n)
-{
- struct page *page = alloc_pages(GFP_KERNEL, 0);
- struct blk_dax_ctl dax = {
- .size = PAGE_SIZE,
- .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
- };
- long rc;
-
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- rc = dax_map_atomic(bdev, &dax);
- if (rc < 0)
- return ERR_PTR(rc);
- memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
- dax_unmap_atomic(bdev, &dax);
- return page;
-}
-
/*
* DAX radix tree locking
*/
@@ -555,21 +509,25 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
static int dax_load_hole(struct address_space *mapping, void **entry,
struct vm_fault *vmf)
{
+ struct inode *inode = mapping->host;
struct page *page;
int ret;
/* Hole page already exists? Return it... */
if (!radix_tree_exceptional_entry(*entry)) {
page = *entry;
- goto out;
+ goto finish_fault;
}
/* This will replace locked radix tree entry with a hole page */
page = find_or_create_page(mapping, vmf->pgoff,
vmf->gfp_mask | __GFP_ZERO);
- if (!page)
- return VM_FAULT_OOM;
- out:
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
+finish_fault:
vmf->page = page;
ret = finish_fault(vmf);
vmf->page = NULL;
@@ -577,26 +535,37 @@ static int dax_load_hole(struct address_space *mapping, void **entry,
if (!ret) {
/* Grab reference for PTE that is now referencing the page */
get_page(page);
- return VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
}
+out:
+ trace_dax_load_hole(inode, vmf, ret);
return ret;
}
-static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
- struct page *to, unsigned long vaddr)
+static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
+ sector_t sector, size_t size, struct page *to,
+ unsigned long vaddr)
{
- struct blk_dax_ctl dax = {
- .sector = sector,
- .size = size,
- };
- void *vto;
-
- if (dax_map_atomic(bdev, &dax) < 0)
- return PTR_ERR(dax.addr);
+ void *vto, *kaddr;
+ pgoff_t pgoff;
+ pfn_t pfn;
+ long rc;
+ int id;
+
+ rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ if (rc)
+ return rc;
+
+ id = dax_read_lock();
+ rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+ if (rc < 0) {
+ dax_read_unlock(id);
+ return rc;
+ }
vto = kmap_atomic(to);
- copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
+ copy_user_page(vto, (void __force *)kaddr, vaddr, to);
kunmap_atomic(vto);
- dax_unmap_atomic(bdev, &dax);
+ dax_read_unlock(id);
return 0;
}
@@ -764,12 +733,16 @@ unlock_pte:
}
static int dax_writeback_one(struct block_device *bdev,
- struct address_space *mapping, pgoff_t index, void *entry)
+ struct dax_device *dax_dev, struct address_space *mapping,
+ pgoff_t index, void *entry)
{
struct radix_tree_root *page_tree = &mapping->page_tree;
- struct blk_dax_ctl dax;
- void *entry2, **slot;
- int ret = 0;
+ void *entry2, **slot, *kaddr;
+ long ret = 0, id;
+ sector_t sector;
+ pgoff_t pgoff;
+ size_t size;
+ pfn_t pfn;
/*
* A page got tagged dirty in DAX mapping? Something is seriously
@@ -818,26 +791,29 @@ static int dax_writeback_one(struct block_device *bdev,
* 'entry'. This allows us to flush for PMD_SIZE and not have to
* worry about partial PMD writebacks.
*/
- dax.sector = dax_radix_sector(entry);
- dax.size = PAGE_SIZE << dax_radix_order(entry);
+ sector = dax_radix_sector(entry);
+ size = PAGE_SIZE << dax_radix_order(entry);
+
+ id = dax_read_lock();
+ ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ if (ret)
+ goto dax_unlock;
/*
- * We cannot hold tree_lock while calling dax_map_atomic() because it
- * eventually calls cond_resched().
+ * dax_direct_access() may sleep, so cannot hold tree_lock over
+ * its invocation.
*/
- ret = dax_map_atomic(bdev, &dax);
- if (ret < 0) {
- put_locked_mapping_entry(mapping, index, entry);
- return ret;
- }
+ ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
+ if (ret < 0)
+ goto dax_unlock;
- if (WARN_ON_ONCE(ret < dax.size)) {
+ if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
ret = -EIO;
- goto unmap;
+ goto dax_unlock;
}
- dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
- wb_cache_pmem(dax.addr, dax.size);
+ dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
+ wb_cache_pmem(kaddr, size);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
@@ -847,8 +823,9 @@ static int dax_writeback_one(struct block_device *bdev,
spin_lock_irq(&mapping->tree_lock);
radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
spin_unlock_irq(&mapping->tree_lock);
- unmap:
- dax_unmap_atomic(bdev, &dax);
+ trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
+ dax_unlock:
+ dax_read_unlock(id);
put_locked_mapping_entry(mapping, index, entry);
return ret;
@@ -869,6 +846,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct inode *inode = mapping->host;
pgoff_t start_index, end_index;
pgoff_t indices[PAGEVEC_SIZE];
+ struct dax_device *dax_dev;
struct pagevec pvec;
bool done = false;
int i, ret = 0;
@@ -879,9 +857,15 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
+ dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ if (!dax_dev)
+ return -EIO;
+
start_index = wbc->range_start >> PAGE_SHIFT;
end_index = wbc->range_end >> PAGE_SHIFT;
+ trace_dax_writeback_range(inode, start_index, end_index);
+
tag_pages_for_writeback(mapping, start_index, end_index);
pagevec_init(&pvec, 0);
@@ -899,38 +883,50 @@ int dax_writeback_mapping_range(struct address_space *mapping,
break;
}
- ret = dax_writeback_one(bdev, mapping, indices[i],
- pvec.pages[i]);
+ ret = dax_writeback_one(bdev, dax_dev, mapping,
+ indices[i], pvec.pages[i]);
if (ret < 0)
- return ret;
+ goto out;
}
}
- return 0;
+out:
+ put_dax(dax_dev);
+ trace_dax_writeback_range_done(inode, start_index, end_index);
+ return (ret < 0 ? ret : 0);
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
static int dax_insert_mapping(struct address_space *mapping,
- struct block_device *bdev, sector_t sector, size_t size,
- void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
+ struct block_device *bdev, struct dax_device *dax_dev,
+ sector_t sector, size_t size, void **entryp,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long vaddr = vmf->address;
- struct blk_dax_ctl dax = {
- .sector = sector,
- .size = size,
- };
- void *ret;
void *entry = *entryp;
+ void *ret, *kaddr;
+ pgoff_t pgoff;
+ int id, rc;
+ pfn_t pfn;
- if (dax_map_atomic(bdev, &dax) < 0)
- return PTR_ERR(dax.addr);
- dax_unmap_atomic(bdev, &dax);
+ rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ if (rc)
+ return rc;
+
+ id = dax_read_lock();
+ rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+ if (rc < 0) {
+ dax_read_unlock(id);
+ return rc;
+ }
+ dax_read_unlock(id);
- ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
+ ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
if (IS_ERR(ret))
return PTR_ERR(ret);
*entryp = ret;
- return vm_insert_mixed(vma, vaddr, dax.pfn);
+ trace_dax_insert_mapping(mapping->host, vmf, ret);
+ return vm_insert_mixed(vma, vaddr, pfn);
}
/**
@@ -941,6 +937,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
void *entry, **slot;
pgoff_t index = vmf->pgoff;
@@ -950,6 +947,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
if (entry)
put_unlocked_mapping_entry(mapping, index, entry);
spin_unlock_irq(&mapping->tree_lock);
+ trace_dax_pfn_mkwrite_no_entry(inode, vmf, VM_FAULT_NOPAGE);
return VM_FAULT_NOPAGE;
}
radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
@@ -962,6 +960,7 @@ int dax_pfn_mkwrite(struct vm_fault *vmf)
*/
finish_mkwrite_fault(vmf);
put_locked_mapping_entry(mapping, index, entry);
+ trace_dax_pfn_mkwrite(inode, vmf, VM_FAULT_NOPAGE);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
@@ -979,24 +978,34 @@ static bool dax_range_is_aligned(struct block_device *bdev,
return true;
}
-int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
- unsigned int offset, unsigned int length)
+int __dax_zero_page_range(struct block_device *bdev,
+ struct dax_device *dax_dev, sector_t sector,
+ unsigned int offset, unsigned int size)
{
- struct blk_dax_ctl dax = {
- .sector = sector,
- .size = PAGE_SIZE,
- };
-
- if (dax_range_is_aligned(bdev, offset, length)) {
- sector_t start_sector = dax.sector + (offset >> 9);
+ if (dax_range_is_aligned(bdev, offset, size)) {
+ sector_t start_sector = sector + (offset >> 9);
return blkdev_issue_zeroout(bdev, start_sector,
- length >> 9, GFP_NOFS, 0);
+ size >> 9, GFP_NOFS, 0);
} else {
- if (dax_map_atomic(bdev, &dax) < 0)
- return PTR_ERR(dax.addr);
- clear_pmem(dax.addr + offset, length);
- dax_unmap_atomic(bdev, &dax);
+ pgoff_t pgoff;
+ long rc, id;
+ void *kaddr;
+ pfn_t pfn;
+
+ rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ if (rc)
+ return rc;
+
+ id = dax_read_lock();
+ rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
+ &pfn);
+ if (rc < 0) {
+ dax_read_unlock(id);
+ return rc;
+ }
+ clear_pmem(kaddr + offset, size);
+ dax_read_unlock(id);
}
return 0;
}
@@ -1011,9 +1020,12 @@ static loff_t
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct iomap *iomap)
{
+ struct block_device *bdev = iomap->bdev;
+ struct dax_device *dax_dev = iomap->dax_dev;
struct iov_iter *iter = data;
loff_t end = pos + length, done = 0;
ssize_t ret = 0;
+ int id;
if (iov_iter_rw(iter) == READ) {
end = min(end, i_size_read(inode));
@@ -1038,34 +1050,42 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
(end - 1) >> PAGE_SHIFT);
}
+ id = dax_read_lock();
while (pos < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
- struct blk_dax_ctl dax = { 0 };
+ const size_t size = ALIGN(length + offset, PAGE_SIZE);
+ const sector_t sector = dax_iomap_sector(iomap, pos);
ssize_t map_len;
+ pgoff_t pgoff;
+ void *kaddr;
+ pfn_t pfn;
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
- dax.sector = dax_iomap_sector(iomap, pos);
- dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
- map_len = dax_map_atomic(iomap->bdev, &dax);
+ ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
+ if (ret)
+ break;
+
+ map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
+ &kaddr, &pfn);
if (map_len < 0) {
ret = map_len;
break;
}
- dax.addr += offset;
+ map_len = PFN_PHYS(map_len);
+ kaddr += offset;
map_len -= offset;
if (map_len > end - pos)
map_len = end - pos;
if (iov_iter_rw(iter) == WRITE)
- map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
+ map_len = copy_from_iter_pmem(kaddr, map_len, iter);
else
- map_len = copy_to_iter(dax.addr, map_len, iter);
- dax_unmap_atomic(iomap->bdev, &dax);
+ map_len = copy_to_iter(kaddr, map_len, iter);
if (map_len <= 0) {
ret = map_len ? map_len : -EFAULT;
break;
@@ -1075,6 +1095,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
length -= map_len;
done += map_len;
}
+ dax_read_unlock(id);
return done ? done : ret;
}
@@ -1142,13 +1163,16 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
int vmf_ret = 0;
void *entry;
+ trace_dax_pte_fault(inode, vmf, vmf_ret);
/*
* Check whether offset isn't beyond end of file now. Caller is supposed
* to hold locks serializing us with truncate / punch hole so this is
* a reliable test.
*/
- if (pos >= i_size_read(inode))
- return VM_FAULT_SIGBUS;
+ if (pos >= i_size_read(inode)) {
+ vmf_ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
flags |= IOMAP_WRITE;
@@ -1159,8 +1183,10 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
* that we never have to deal with more than a single extent here.
*/
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
- if (error)
- return dax_fault_return(error);
+ if (error) {
+ vmf_ret = dax_fault_return(error);
+ goto out;
+ }
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
goto finish_iomap;
@@ -1181,8 +1207,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
clear_user_highpage(vmf->cow_page, vaddr);
break;
case IOMAP_MAPPED:
- error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
- vmf->cow_page, vaddr);
+ error = copy_user_dax(iomap.bdev, iomap.dax_dev,
+ sector, PAGE_SIZE, vmf->cow_page, vaddr);
break;
default:
WARN_ON_ONCE(1);
@@ -1207,8 +1233,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
- error = dax_insert_mapping(mapping, iomap.bdev, sector,
- PAGE_SIZE, &entry, vmf->vma, vmf);
+ error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
+ sector, PAGE_SIZE, &entry, vmf->vma, vmf);
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error == -EBUSY)
error = 0;
@@ -1244,6 +1270,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
*/
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
}
+out:
+ trace_dax_pte_fault_done(inode, vmf, vmf_ret);
return vmf_ret;
}
@@ -1258,41 +1286,48 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
loff_t pos, void **entryp)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
+ const sector_t sector = dax_iomap_sector(iomap, pos);
+ struct dax_device *dax_dev = iomap->dax_dev;
struct block_device *bdev = iomap->bdev;
struct inode *inode = mapping->host;
- struct blk_dax_ctl dax = {
- .sector = dax_iomap_sector(iomap, pos),
- .size = PMD_SIZE,
- };
- long length = dax_map_atomic(bdev, &dax);
- void *ret = NULL;
-
- if (length < 0) /* dax_map_atomic() failed */
+ const size_t size = PMD_SIZE;
+ void *ret = NULL, *kaddr;
+ long length = 0;
+ pgoff_t pgoff;
+ pfn_t pfn;
+ int id;
+
+ if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
goto fallback;
- if (length < PMD_SIZE)
- goto unmap_fallback;
- if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
- goto unmap_fallback;
- if (!pfn_t_devmap(dax.pfn))
- goto unmap_fallback;
-
- dax_unmap_atomic(bdev, &dax);
- ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
+ id = dax_read_lock();
+ length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+ if (length < 0)
+ goto unlock_fallback;
+ length = PFN_PHYS(length);
+
+ if (length < size)
+ goto unlock_fallback;
+ if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
+ goto unlock_fallback;
+ if (!pfn_t_devmap(pfn))
+ goto unlock_fallback;
+ dax_read_unlock(id);
+
+ ret = dax_insert_mapping_entry(mapping, vmf, *entryp, sector,
RADIX_DAX_PMD);
if (IS_ERR(ret))
goto fallback;
*entryp = ret;
- trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
+ trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
- dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
+ pfn, vmf->flags & FAULT_FLAG_WRITE);
- unmap_fallback:
- dax_unmap_atomic(bdev, &dax);
+unlock_fallback:
+ dax_read_unlock(id);
fallback:
- trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
- dax.pfn, ret);
+ trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
return VM_FAULT_FALLBACK;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 95d71eda8142..cddf39777835 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -419,6 +419,8 @@ static void dentry_lru_add(struct dentry *dentry)
{
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
d_lru_add(dentry);
+ else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
+ dentry->d_flags |= DCACHE_REFERENCED;
}
/**
@@ -779,8 +781,6 @@ repeat:
goto kill_it;
}
- if (!(dentry->d_flags & DCACHE_REFERENCED))
- dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
dentry->d_lockref.count--;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 7fd4ec4bb214..e892ae7d89f8 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -199,7 +199,7 @@ static const struct dentry_operations debugfs_dops = {
static int debug_fill_super(struct super_block *sb, void *data, int silent)
{
- static struct tree_descr debug_files[] = {{""}};
+ static const struct tree_descr debug_files[] = {{""}};
struct debugfs_fs_info *fsi;
int err;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 42f9a0a0c4ca..8eeb694332fe 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -405,8 +405,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
int err;
lock_page(page);
- err = exofs_write_begin(NULL, page->mapping, pos, len,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = exofs_write_begin(NULL, page->mapping, pos, len, 0, &page, NULL);
if (err)
EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n",
err);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 3a38c1b84e3c..26d77f9f8c12 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -799,6 +799,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned flags, struct iomap *iomap)
{
+ struct block_device *bdev;
unsigned int blkbits = inode->i_blkbits;
unsigned long first_block = offset >> blkbits;
unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits;
@@ -812,8 +813,13 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return ret;
iomap->flags = 0;
- iomap->bdev = inode->i_sb->s_bdev;
+ bdev = inode->i_sb->s_bdev;
+ iomap->bdev = bdev;
iomap->offset = (u64)first_block << blkbits;
+ if (blk_queue_dax(bdev->bd_queue))
+ iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ else
+ iomap->dax_dev = NULL;
if (ret == 0) {
iomap->type = IOMAP_HOLE;
@@ -835,6 +841,7 @@ static int
ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
ssize_t written, unsigned flags, struct iomap *iomap)
{
+ put_dax(iomap->dax_dev);
if (iomap->type == IOMAP_MAPPED &&
written < length &&
(flags & IOMAP_WRITE))
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index 354103f3490c..d9beca1653c5 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -4,11 +4,11 @@
obj-$(CONFIG_EXT4_FS) += ext4.o
-ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
- ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
- ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
- mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
- xattr_trusted.o inline.o readpage.o sysfs.o
+ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
+ extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
+ indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
+ mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
+ super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f7b465b4fb69..8e8046104f4d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2356,17 +2356,16 @@ extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
void *buf, int buf_size,
struct ext4_filename *fname,
struct ext4_dir_entry_2 **dest_de);
-int ext4_insert_dentry(struct inode *dir,
- struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int buf_size,
- struct ext4_filename *fname);
+void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode)
{
if (!ext4_has_feature_dir_index(inode->i_sb))
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
-static unsigned char ext4_filetype_table[] = {
+static const unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
@@ -3050,7 +3049,7 @@ extern int ext4_handle_dirty_dirent_node(handle_t *handle,
struct inode *inode,
struct buffer_head *bh);
#define S_SHIFT 12
-static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
+static const unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = EXT4_FT_DIR,
[S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV,
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
new file mode 100644
index 000000000000..b19436098837
--- /dev/null
+++ b/fs/ext4/fsmap.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "ext4.h"
+#include <linux/fsmap.h>
+#include "fsmap.h"
+#include "mballoc.h"
+#include <linux/sort.h>
+#include <linux/list_sort.h>
+#include <trace/events/ext4.h>
+
+/* Convert an ext4_fsmap to an fsmap. */
+void ext4_fsmap_from_internal(struct super_block *sb, struct fsmap *dest,
+ struct ext4_fsmap *src)
+{
+ dest->fmr_device = src->fmr_device;
+ dest->fmr_flags = src->fmr_flags;
+ dest->fmr_physical = src->fmr_physical << sb->s_blocksize_bits;
+ dest->fmr_owner = src->fmr_owner;
+ dest->fmr_offset = 0;
+ dest->fmr_length = src->fmr_length << sb->s_blocksize_bits;
+ dest->fmr_reserved[0] = 0;
+ dest->fmr_reserved[1] = 0;
+ dest->fmr_reserved[2] = 0;
+}
+
+/* Convert an fsmap to an ext4_fsmap. */
+void ext4_fsmap_to_internal(struct super_block *sb, struct ext4_fsmap *dest,
+ struct fsmap *src)
+{
+ dest->fmr_device = src->fmr_device;
+ dest->fmr_flags = src->fmr_flags;
+ dest->fmr_physical = src->fmr_physical >> sb->s_blocksize_bits;
+ dest->fmr_owner = src->fmr_owner;
+ dest->fmr_length = src->fmr_length >> sb->s_blocksize_bits;
+}
+
+/* getfsmap query state */
+struct ext4_getfsmap_info {
+ struct ext4_fsmap_head *gfi_head;
+ ext4_fsmap_format_t gfi_formatter; /* formatting fn */
+ void *gfi_format_arg;/* format buffer */
+ ext4_fsblk_t gfi_next_fsblk; /* next fsblock we expect */
+ u32 gfi_dev; /* device id */
+ ext4_group_t gfi_agno; /* bg number, if applicable */
+ struct ext4_fsmap gfi_low; /* low rmap key */
+ struct ext4_fsmap gfi_high; /* high rmap key */
+ struct ext4_fsmap gfi_lastfree; /* free ext at end of last bg */
+ struct list_head gfi_meta_list; /* fixed metadata list */
+ bool gfi_last; /* last extent? */
+};
+
+/* Associate a device with a getfsmap handler. */
+struct ext4_getfsmap_dev {
+ int (*gfd_fn)(struct super_block *sb,
+ struct ext4_fsmap *keys,
+ struct ext4_getfsmap_info *info);
+ u32 gfd_dev;
+};
+
+/* Compare two getfsmap device handlers. */
+static int ext4_getfsmap_dev_compare(const void *p1, const void *p2)
+{
+ const struct ext4_getfsmap_dev *d1 = p1;
+ const struct ext4_getfsmap_dev *d2 = p2;
+
+ return d1->gfd_dev - d2->gfd_dev;
+}
+
+/* Compare a record against our starting point */
+static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info,
+ struct ext4_fsmap *rec)
+{
+ return rec->fmr_physical < info->gfi_low.fmr_physical;
+}
+
+/*
+ * Format a reverse mapping for getfsmap, having translated rm_startblock
+ * into the appropriate daddr units.
+ */
+static int ext4_getfsmap_helper(struct super_block *sb,
+ struct ext4_getfsmap_info *info,
+ struct ext4_fsmap *rec)
+{
+ struct ext4_fsmap fmr;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t rec_fsblk = rec->fmr_physical;
+ ext4_group_t agno;
+ ext4_grpblk_t cno;
+ int error;
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ /*
+ * Filter out records that start before our startpoint, if the
+ * caller requested that.
+ */
+ if (ext4_getfsmap_rec_before_low_key(info, rec)) {
+ rec_fsblk += rec->fmr_length;
+ if (info->gfi_next_fsblk < rec_fsblk)
+ info->gfi_next_fsblk = rec_fsblk;
+ return EXT4_QUERY_RANGE_CONTINUE;
+ }
+
+ /* Are we just counting mappings? */
+ if (info->gfi_head->fmh_count == 0) {
+ if (rec_fsblk > info->gfi_next_fsblk)
+ info->gfi_head->fmh_entries++;
+
+ if (info->gfi_last)
+ return EXT4_QUERY_RANGE_CONTINUE;
+
+ info->gfi_head->fmh_entries++;
+
+ rec_fsblk += rec->fmr_length;
+ if (info->gfi_next_fsblk < rec_fsblk)
+ info->gfi_next_fsblk = rec_fsblk;
+ return EXT4_QUERY_RANGE_CONTINUE;
+ }
+
+ /*
+ * If the record starts past the last physical block we saw,
+ * then we've found a gap. Report the gap as being owned by
+ * whatever the caller specified is the missing owner.
+ */
+ if (rec_fsblk > info->gfi_next_fsblk) {
+ if (info->gfi_head->fmh_entries >= info->gfi_head->fmh_count)
+ return EXT4_QUERY_RANGE_ABORT;
+
+ ext4_get_group_no_and_offset(sb, info->gfi_next_fsblk,
+ &agno, &cno);
+ trace_ext4_fsmap_mapping(sb, info->gfi_dev, agno,
+ EXT4_C2B(sbi, cno),
+ rec_fsblk - info->gfi_next_fsblk,
+ EXT4_FMR_OWN_UNKNOWN);
+
+ fmr.fmr_device = info->gfi_dev;
+ fmr.fmr_physical = info->gfi_next_fsblk;
+ fmr.fmr_owner = EXT4_FMR_OWN_UNKNOWN;
+ fmr.fmr_length = rec_fsblk - info->gfi_next_fsblk;
+ fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
+ error = info->gfi_formatter(&fmr, info->gfi_format_arg);
+ if (error)
+ return error;
+ info->gfi_head->fmh_entries++;
+ }
+
+ if (info->gfi_last)
+ goto out;
+
+ /* Fill out the extent we found */
+ if (info->gfi_head->fmh_entries >= info->gfi_head->fmh_count)
+ return EXT4_QUERY_RANGE_ABORT;
+
+ ext4_get_group_no_and_offset(sb, rec_fsblk, &agno, &cno);
+ trace_ext4_fsmap_mapping(sb, info->gfi_dev, agno, EXT4_C2B(sbi, cno),
+ rec->fmr_length, rec->fmr_owner);
+
+ fmr.fmr_device = info->gfi_dev;
+ fmr.fmr_physical = rec_fsblk;
+ fmr.fmr_owner = rec->fmr_owner;
+ fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
+ fmr.fmr_length = rec->fmr_length;
+ error = info->gfi_formatter(&fmr, info->gfi_format_arg);
+ if (error)
+ return error;
+ info->gfi_head->fmh_entries++;
+
+out:
+ rec_fsblk += rec->fmr_length;
+ if (info->gfi_next_fsblk < rec_fsblk)
+ info->gfi_next_fsblk = rec_fsblk;
+ return EXT4_QUERY_RANGE_CONTINUE;
+}
+
+static inline ext4_fsblk_t ext4_fsmap_next_pblk(struct ext4_fsmap *fmr)
+{
+ return fmr->fmr_physical + fmr->fmr_length;
+}
+
+/* Transform a blockgroup's free record into a fsmap */
+static int ext4_getfsmap_datadev_helper(struct super_block *sb,
+ ext4_group_t agno, ext4_grpblk_t start,
+ ext4_grpblk_t len, void *priv)
+{
+ struct ext4_fsmap irec;
+ struct ext4_getfsmap_info *info = priv;
+ struct ext4_fsmap *p;
+ struct ext4_fsmap *tmp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t fsb;
+ ext4_fsblk_t fslen;
+ int error;
+
+ fsb = (EXT4_C2B(sbi, start) + ext4_group_first_block_no(sb, agno));
+ fslen = EXT4_C2B(sbi, len);
+
+ /* If the retained free extent record is set... */
+ if (info->gfi_lastfree.fmr_owner) {
+ /* ...and abuts this one, lengthen it and return. */
+ if (ext4_fsmap_next_pblk(&info->gfi_lastfree) == fsb) {
+ info->gfi_lastfree.fmr_length += fslen;
+ return 0;
+ }
+
+ /*
+ * There's a gap between the two free extents; emit the
+ * retained extent prior to merging the meta_list.
+ */
+ error = ext4_getfsmap_helper(sb, info, &info->gfi_lastfree);
+ if (error)
+ return error;
+ info->gfi_lastfree.fmr_owner = 0;
+ }
+
+ /* Merge in any relevant extents from the meta_list */
+ list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) {
+ if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) {
+ list_del(&p->fmr_list);
+ kfree(p);
+ } else if (p->fmr_physical < fsb) {
+ error = ext4_getfsmap_helper(sb, info, p);
+ if (error)
+ return error;
+
+ list_del(&p->fmr_list);
+ kfree(p);
+ }
+ }
+
+ irec.fmr_device = 0;
+ irec.fmr_physical = fsb;
+ irec.fmr_length = fslen;
+ irec.fmr_owner = EXT4_FMR_OWN_FREE;
+ irec.fmr_flags = 0;
+
+ /* If this is a free extent at the end of a bg, buffer it. */
+ if (ext4_fsmap_next_pblk(&irec) ==
+ ext4_group_first_block_no(sb, agno + 1)) {
+ info->gfi_lastfree = irec;
+ return 0;
+ }
+
+ /* Otherwise, emit it */
+ return ext4_getfsmap_helper(sb, info, &irec);
+}
+
+/* Execute a getfsmap query against the log device. */
+static int ext4_getfsmap_logdev(struct super_block *sb, struct ext4_fsmap *keys,
+ struct ext4_getfsmap_info *info)
+{
+ journal_t *journal = EXT4_SB(sb)->s_journal;
+ struct ext4_fsmap irec;
+
+ /* Set up search keys */
+ info->gfi_low = keys[0];
+ info->gfi_low.fmr_length = 0;
+
+ memset(&info->gfi_high, 0xFF, sizeof(info->gfi_high));
+
+ trace_ext4_fsmap_low_key(sb, info->gfi_dev, 0,
+ info->gfi_low.fmr_physical,
+ info->gfi_low.fmr_length,
+ info->gfi_low.fmr_owner);
+
+ trace_ext4_fsmap_high_key(sb, info->gfi_dev, 0,
+ info->gfi_high.fmr_physical,
+ info->gfi_high.fmr_length,
+ info->gfi_high.fmr_owner);
+
+ if (keys[0].fmr_physical > 0)
+ return 0;
+
+ /* Fabricate an rmap entry for the external log device. */
+ irec.fmr_physical = journal->j_blk_offset;
+ irec.fmr_length = journal->j_maxlen;
+ irec.fmr_owner = EXT4_FMR_OWN_LOG;
+ irec.fmr_flags = 0;
+
+ return ext4_getfsmap_helper(sb, info, &irec);
+}
+
+/* Helper to fill out an ext4_fsmap. */
+static inline int ext4_getfsmap_fill(struct list_head *meta_list,
+ ext4_fsblk_t fsb, ext4_fsblk_t len,
+ uint64_t owner)
+{
+ struct ext4_fsmap *fsm;
+
+ fsm = kmalloc(sizeof(*fsm), GFP_NOFS);
+ if (!fsm)
+ return -ENOMEM;
+ fsm->fmr_device = 0;
+ fsm->fmr_flags = 0;
+ fsm->fmr_physical = fsb;
+ fsm->fmr_owner = owner;
+ fsm->fmr_length = len;
+ list_add_tail(&fsm->fmr_list, meta_list);
+
+ return 0;
+}
+
+/*
+ * This function returns the number of file system metadata blocks at
+ * the beginning of a block group, including the reserved gdt blocks.
+ */
+static unsigned int ext4_getfsmap_find_sb(struct super_block *sb,
+ ext4_group_t agno,
+ struct list_head *meta_list)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t fsb = ext4_group_first_block_no(sb, agno);
+ ext4_fsblk_t len;
+ unsigned long first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
+ unsigned long metagroup = agno / EXT4_DESC_PER_BLOCK(sb);
+ int error;
+
+ /* Record the superblock. */
+ if (ext4_bg_has_super(sb, agno)) {
+ error = ext4_getfsmap_fill(meta_list, fsb, 1, EXT4_FMR_OWN_FS);
+ if (error)
+ return error;
+ fsb++;
+ }
+
+ /* Record the group descriptors. */
+ len = ext4_bg_num_gdb(sb, agno);
+ if (!len)
+ return 0;
+ error = ext4_getfsmap_fill(meta_list, fsb, len,
+ EXT4_FMR_OWN_GDT);
+ if (error)
+ return error;
+ fsb += len;
+
+ /* Reserved GDT blocks */
+ if (!ext4_has_feature_meta_bg(sb) || metagroup < first_meta_bg) {
+ len = le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
+ error = ext4_getfsmap_fill(meta_list, fsb, len,
+ EXT4_FMR_OWN_RESV_GDT);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/* Compare two fsmap items. */
+static int ext4_getfsmap_compare(void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct ext4_fsmap *fa;
+ struct ext4_fsmap *fb;
+
+ fa = container_of(a, struct ext4_fsmap, fmr_list);
+ fb = container_of(b, struct ext4_fsmap, fmr_list);
+ if (fa->fmr_physical < fb->fmr_physical)
+ return -1;
+ else if (fa->fmr_physical > fb->fmr_physical)
+ return 1;
+ return 0;
+}
+
+/* Merge adjacent extents of fixed metadata. */
+static void ext4_getfsmap_merge_fixed_metadata(struct list_head *meta_list)
+{
+ struct ext4_fsmap *p;
+ struct ext4_fsmap *prev = NULL;
+ struct ext4_fsmap *tmp;
+
+ list_for_each_entry_safe(p, tmp, meta_list, fmr_list) {
+ if (!prev) {
+ prev = p;
+ continue;
+ }
+
+ if (prev->fmr_owner == p->fmr_owner &&
+ prev->fmr_physical + prev->fmr_length == p->fmr_physical) {
+ prev->fmr_length += p->fmr_length;
+ list_del(&p->fmr_list);
+ kfree(p);
+ } else
+ prev = p;
+ }
+}
+
+/* Free a list of fixed metadata. */
+static void ext4_getfsmap_free_fixed_metadata(struct list_head *meta_list)
+{
+ struct ext4_fsmap *p;
+ struct ext4_fsmap *tmp;
+
+ list_for_each_entry_safe(p, tmp, meta_list, fmr_list) {
+ list_del(&p->fmr_list);
+ kfree(p);
+ }
+}
+
+/* Find all the fixed metadata in the filesystem. */
+int ext4_getfsmap_find_fixed_metadata(struct super_block *sb,
+ struct list_head *meta_list)
+{
+ struct ext4_group_desc *gdp;
+ ext4_group_t agno;
+ int error;
+
+ INIT_LIST_HEAD(meta_list);
+
+ /* Collect everything. */
+ for (agno = 0; agno < EXT4_SB(sb)->s_groups_count; agno++) {
+ gdp = ext4_get_group_desc(sb, agno, NULL);
+ if (!gdp) {
+ error = -EFSCORRUPTED;
+ goto err;
+ }
+
+ /* Superblock & GDT */
+ error = ext4_getfsmap_find_sb(sb, agno, meta_list);
+ if (error)
+ goto err;
+
+ /* Block bitmap */
+ error = ext4_getfsmap_fill(meta_list,
+ ext4_block_bitmap(sb, gdp), 1,
+ EXT4_FMR_OWN_BLKBM);
+ if (error)
+ goto err;
+
+ /* Inode bitmap */
+ error = ext4_getfsmap_fill(meta_list,
+ ext4_inode_bitmap(sb, gdp), 1,
+ EXT4_FMR_OWN_INOBM);
+ if (error)
+ goto err;
+
+ /* Inodes */
+ error = ext4_getfsmap_fill(meta_list,
+ ext4_inode_table(sb, gdp),
+ EXT4_SB(sb)->s_itb_per_group,
+ EXT4_FMR_OWN_INODES);
+ if (error)
+ goto err;
+ }
+
+ /* Sort the list */
+ list_sort(NULL, meta_list, ext4_getfsmap_compare);
+
+ /* Merge adjacent extents */
+ ext4_getfsmap_merge_fixed_metadata(meta_list);
+
+ return 0;
+err:
+ ext4_getfsmap_free_fixed_metadata(meta_list);
+ return error;
+}
+
+/* Execute a getfsmap query against the buddy bitmaps */
+static int ext4_getfsmap_datadev(struct super_block *sb,
+ struct ext4_fsmap *keys,
+ struct ext4_getfsmap_info *info)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_fsblk_t start_fsb;
+ ext4_fsblk_t end_fsb;
+ ext4_fsblk_t eofs;
+ ext4_group_t start_ag;
+ ext4_group_t end_ag;
+ ext4_grpblk_t first_cluster;
+ ext4_grpblk_t last_cluster;
+ int error = 0;
+
+ eofs = ext4_blocks_count(sbi->s_es);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ if (keys[1].fmr_physical >= eofs)
+ keys[1].fmr_physical = eofs - 1;
+ start_fsb = keys[0].fmr_physical;
+ end_fsb = keys[1].fmr_physical;
+
+ /* Determine first and last group to examine based on start and end */
+ ext4_get_group_no_and_offset(sb, start_fsb, &start_ag, &first_cluster);
+ ext4_get_group_no_and_offset(sb, end_fsb, &end_ag, &last_cluster);
+
+ /*
+ * Convert the fsmap low/high keys to bg based keys. Initialize
+ * low to the fsmap low key and max out the high key to the end
+ * of the bg.
+ */
+ info->gfi_low = keys[0];
+ info->gfi_low.fmr_physical = EXT4_C2B(sbi, first_cluster);
+ info->gfi_low.fmr_length = 0;
+
+ memset(&info->gfi_high, 0xFF, sizeof(info->gfi_high));
+
+ /* Assemble a list of all the fixed-location metadata. */
+ error = ext4_getfsmap_find_fixed_metadata(sb, &info->gfi_meta_list);
+ if (error)
+ goto err;
+
+ /* Query each bg */
+ for (info->gfi_agno = start_ag;
+ info->gfi_agno <= end_ag;
+ info->gfi_agno++) {
+ /*
+ * Set the bg high key from the fsmap high key if this
+ * is the last bg that we're querying.
+ */
+ if (info->gfi_agno == end_ag) {
+ info->gfi_high = keys[1];
+ info->gfi_high.fmr_physical = EXT4_C2B(sbi,
+ last_cluster);
+ info->gfi_high.fmr_length = 0;
+ }
+
+ trace_ext4_fsmap_low_key(sb, info->gfi_dev, info->gfi_agno,
+ info->gfi_low.fmr_physical,
+ info->gfi_low.fmr_length,
+ info->gfi_low.fmr_owner);
+
+ trace_ext4_fsmap_high_key(sb, info->gfi_dev, info->gfi_agno,
+ info->gfi_high.fmr_physical,
+ info->gfi_high.fmr_length,
+ info->gfi_high.fmr_owner);
+
+ error = ext4_mballoc_query_range(sb, info->gfi_agno,
+ EXT4_B2C(sbi, info->gfi_low.fmr_physical),
+ EXT4_B2C(sbi, info->gfi_high.fmr_physical),
+ ext4_getfsmap_datadev_helper, info);
+ if (error)
+ goto err;
+
+ /*
+ * Set the bg low key to the start of the bg prior to
+ * moving on to the next bg.
+ */
+ if (info->gfi_agno == start_ag)
+ memset(&info->gfi_low, 0, sizeof(info->gfi_low));
+ }
+
+ /* Do we have a retained free extent? */
+ if (info->gfi_lastfree.fmr_owner) {
+ error = ext4_getfsmap_helper(sb, info, &info->gfi_lastfree);
+ if (error)
+ goto err;
+ }
+
+ /* Report any gaps at the end of the bg */
+ info->gfi_last = true;
+ error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster, 0, info);
+ if (error)
+ goto err;
+
+err:
+ ext4_getfsmap_free_fixed_metadata(&info->gfi_meta_list);
+ return error;
+}
+
+/* Do we recognize the device? */
+static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
+ struct ext4_fsmap *fm)
+{
+ if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
+ fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
+ return true;
+ if (EXT4_SB(sb)->journal_bdev &&
+ fm->fmr_device == new_encode_dev(EXT4_SB(sb)->journal_bdev->bd_dev))
+ return true;
+ return false;
+}
+
+/* Ensure that the low key is less than the high key. */
+static bool ext4_getfsmap_check_keys(struct ext4_fsmap *low_key,
+ struct ext4_fsmap *high_key)
+{
+ if (low_key->fmr_device > high_key->fmr_device)
+ return false;
+ if (low_key->fmr_device < high_key->fmr_device)
+ return true;
+
+ if (low_key->fmr_physical > high_key->fmr_physical)
+ return false;
+ if (low_key->fmr_physical < high_key->fmr_physical)
+ return true;
+
+ if (low_key->fmr_owner > high_key->fmr_owner)
+ return false;
+ if (low_key->fmr_owner < high_key->fmr_owner)
+ return true;
+
+ return false;
+}
+
+#define EXT4_GETFSMAP_DEVS 2
+/*
+ * Get filesystem's extents as described in head, and format for
+ * output. Calls formatter to fill the user's buffer until all
+ * extents are mapped, until the passed-in head->fmh_count slots have
+ * been filled, or until the formatter short-circuits the loop, if it
+ * is tracking filled-in extents on its own.
+ *
+ * Key to Confusion
+ * ----------------
+ * There are multiple levels of keys and counters at work here:
+ * _fsmap_head.fmh_keys -- low and high fsmap keys passed in;
+ * these reflect fs-wide block addrs.
+ * dkeys -- fmh_keys used to query each device;
+ * these are fmh_keys but w/ the low key
+ * bumped up by fmr_length.
+ * _getfsmap_info.gfi_next_fsblk-- next fs block we expect to see; this
+ * is how we detect gaps in the fsmap
+ * records and report them.
+ * _getfsmap_info.gfi_low/high -- per-bg low/high keys computed from
+ * dkeys; used to query the free space.
+ */
+int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
+ ext4_fsmap_format_t formatter, void *arg)
+{
+ struct ext4_fsmap dkeys[2]; /* per-dev keys */
+ struct ext4_getfsmap_dev handlers[EXT4_GETFSMAP_DEVS];
+ struct ext4_getfsmap_info info = {0};
+ int i;
+ int error = 0;
+
+ if (head->fmh_iflags & ~FMH_IF_VALID)
+ return -EINVAL;
+ if (!ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[0]) ||
+ !ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[1]))
+ return -EINVAL;
+
+ head->fmh_entries = 0;
+
+ /* Set up our device handlers. */
+ memset(handlers, 0, sizeof(handlers));
+ handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
+ handlers[0].gfd_fn = ext4_getfsmap_datadev;
+ if (EXT4_SB(sb)->journal_bdev) {
+ handlers[1].gfd_dev = new_encode_dev(
+ EXT4_SB(sb)->journal_bdev->bd_dev);
+ handlers[1].gfd_fn = ext4_getfsmap_logdev;
+ }
+
+ sort(handlers, EXT4_GETFSMAP_DEVS, sizeof(struct ext4_getfsmap_dev),
+ ext4_getfsmap_dev_compare, NULL);
+
+ /*
+ * To continue where we left off, we allow userspace to use the
+ * last mapping from a previous call as the low key of the next.
+ * This is identified by a non-zero length in the low key. We
+ * have to increment the low key in this scenario to ensure we
+ * don't return the same mapping again, and instead return the
+ * very next mapping.
+ *
+ * Bump the physical offset as there can be no other mapping for
+ * the same physical block range.
+ */
+ dkeys[0] = head->fmh_keys[0];
+ dkeys[0].fmr_physical += dkeys[0].fmr_length;
+ dkeys[0].fmr_owner = 0;
+ dkeys[0].fmr_length = 0;
+ memset(&dkeys[1], 0xFF, sizeof(struct ext4_fsmap));
+
+ if (!ext4_getfsmap_check_keys(dkeys, &head->fmh_keys[1]))
+ return -EINVAL;
+
+ info.gfi_next_fsblk = head->fmh_keys[0].fmr_physical +
+ head->fmh_keys[0].fmr_length;
+ info.gfi_formatter = formatter;
+ info.gfi_format_arg = arg;
+ info.gfi_head = head;
+
+ /* For each device we support... */
+ for (i = 0; i < EXT4_GETFSMAP_DEVS; i++) {
+ /* Is this device within the range the user asked for? */
+ if (!handlers[i].gfd_fn)
+ continue;
+ if (head->fmh_keys[0].fmr_device > handlers[i].gfd_dev)
+ continue;
+ if (head->fmh_keys[1].fmr_device < handlers[i].gfd_dev)
+ break;
+
+ /*
+ * If this device number matches the high key, we have
+ * to pass the high key to the handler to limit the
+ * query results. If the device number exceeds the
+ * low key, zero out the low key so that we get
+ * everything from the beginning.
+ */
+ if (handlers[i].gfd_dev == head->fmh_keys[1].fmr_device)
+ dkeys[1] = head->fmh_keys[1];
+ if (handlers[i].gfd_dev > head->fmh_keys[0].fmr_device)
+ memset(&dkeys[0], 0, sizeof(struct ext4_fsmap));
+
+ info.gfi_dev = handlers[i].gfd_dev;
+ info.gfi_last = false;
+ info.gfi_agno = -1;
+ error = handlers[i].gfd_fn(sb, dkeys, &info);
+ if (error)
+ break;
+ info.gfi_next_fsblk = 0;
+ }
+
+ head->fmh_oflags = FMH_OF_DEV_T;
+ return error;
+}
diff --git a/fs/ext4/fsmap.h b/fs/ext4/fsmap.h
new file mode 100644
index 000000000000..9a2cd367cc66
--- /dev/null
+++ b/fs/ext4/fsmap.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __EXT4_FSMAP_H__
+#define __EXT4_FSMAP_H__
+
+struct fsmap;
+
+/* internal fsmap representation */
+struct ext4_fsmap {
+ struct list_head fmr_list;
+ dev_t fmr_device; /* device id */
+ uint32_t fmr_flags; /* mapping flags */
+ uint64_t fmr_physical; /* device offset of segment */
+ uint64_t fmr_owner; /* owner id */
+ uint64_t fmr_length; /* length of segment, blocks */
+};
+
+struct ext4_fsmap_head {
+ uint32_t fmh_iflags; /* control flags */
+ uint32_t fmh_oflags; /* output flags */
+ unsigned int fmh_count; /* # of entries in array incl. input */
+ unsigned int fmh_entries; /* # of entries filled in (output). */
+
+ struct ext4_fsmap fmh_keys[2]; /* low and high keys */
+};
+
+void ext4_fsmap_from_internal(struct super_block *sb, struct fsmap *dest,
+ struct ext4_fsmap *src);
+void ext4_fsmap_to_internal(struct super_block *sb, struct ext4_fsmap *dest,
+ struct fsmap *src);
+
+/* fsmap to userspace formatter - copy to user & advance pointer */
+typedef int (*ext4_fsmap_format_t)(struct ext4_fsmap *, void *);
+
+int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
+ ext4_fsmap_format_t formatter, void *arg);
+
+#define EXT4_QUERY_RANGE_ABORT 1
+#define EXT4_QUERY_RANGE_CONTINUE 0
+
+/* fmr_owner special values for FS_IOC_GETFSMAP; some share w/ XFS */
+#define EXT4_FMR_OWN_FREE FMR_OWN_FREE /* free space */
+#define EXT4_FMR_OWN_UNKNOWN FMR_OWN_UNKNOWN /* unknown owner */
+#define EXT4_FMR_OWN_FS FMR_OWNER('X', 1) /* static fs metadata */
+#define EXT4_FMR_OWN_LOG FMR_OWNER('X', 2) /* journalling log */
+#define EXT4_FMR_OWN_INODES FMR_OWNER('X', 5) /* inodes */
+#define EXT4_FMR_OWN_GDT FMR_OWNER('f', 1) /* group descriptors */
+#define EXT4_FMR_OWN_RESV_GDT FMR_OWNER('f', 2) /* reserved gdt blocks */
+#define EXT4_FMR_OWN_BLKBM FMR_OWNER('f', 3) /* inode bitmap */
+#define EXT4_FMR_OWN_INOBM FMR_OWNER('f', 4) /* block bitmap */
+
+#endif /* __EXT4_FSMAP_H__ */
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 17bc043308f3..98ac2f1f23b3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1098,6 +1098,17 @@ got:
if (err)
goto fail_drop;
+ /*
+ * Since the encryption xattr will always be unique, create it first so
+ * that it's less likely to end up in an external xattr block and
+ * prevent its deduplication.
+ */
+ if (encrypt) {
+ err = fscrypt_inherit_context(dir, inode, handle, true);
+ if (err)
+ goto fail_free_drop;
+ }
+
err = ext4_init_acl(handle, inode, dir);
if (err)
goto fail_free_drop;
@@ -1119,12 +1130,6 @@ got:
ei->i_datasync_tid = handle->h_transaction->t_tid;
}
- if (encrypt) {
- err = fscrypt_inherit_context(dir, inode, handle, true);
- if (err)
- goto fail_free_drop;
- }
-
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
ext4_std_error(sb, err);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 375fb1c05d49..d5dea4c293ef 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1034,7 +1034,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err)
return err;
- ext4_insert_dentry(dir, inode, de, inline_size, fname);
+ ext4_insert_dentry(inode, de, inline_size, fname);
ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 10b574ab354b..5834c4d76be8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1643,6 +1643,7 @@ struct mpage_da_data {
*/
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
+ unsigned int do_map:1;
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
@@ -2179,6 +2180,9 @@ static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
/* First block in the extent? */
if (map->m_len == 0) {
+ /* We cannot map unless handle is started... */
+ if (!mpd->do_map)
+ return false;
map->m_lblk = lblk;
map->m_len = 1;
map->m_flags = bh->b_state & BH_FLAGS;
@@ -2231,6 +2235,9 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
/* Found extent to map? */
if (mpd->map.m_len)
return 0;
+ /* Buffer needs mapping and handle is not started? */
+ if (!mpd->do_map)
+ return 0;
/* Everything mapped so far and we hit EOF */
break;
}
@@ -2747,6 +2754,29 @@ retry:
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
done = false;
blk_start_plug(&plug);
+
+ /*
+ * First writeback pages that don't need mapping - we can avoid
+ * starting a transaction unnecessarily and also avoid being blocked
+ * in the block layer on device congestion while having transaction
+ * started.
+ */
+ mpd.do_map = 0;
+ mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
+ if (!mpd.io_submit.io_end) {
+ ret = -ENOMEM;
+ goto unplug;
+ }
+ ret = mpage_prepare_extent_to_map(&mpd);
+ /* Submit prepared bio */
+ ext4_io_submit(&mpd.io_submit);
+ ext4_put_io_end_defer(mpd.io_submit.io_end);
+ mpd.io_submit.io_end = NULL;
+ /* Unlock pages we didn't use */
+ mpage_release_unused_pages(&mpd, false);
+ if (ret < 0)
+ goto unplug;
+
while (!done && mpd.first_page <= mpd.last_page) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
@@ -2775,8 +2805,10 @@ retry:
wbc->nr_to_write, inode->i_ino, ret);
/* Release allocated io_end */
ext4_put_io_end(mpd.io_submit.io_end);
+ mpd.io_submit.io_end = NULL;
break;
}
+ mpd.do_map = 1;
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
@@ -2807,6 +2839,7 @@ retry:
if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
ext4_journal_stop(handle);
handle = NULL;
+ mpd.do_map = 0;
}
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
@@ -2824,6 +2857,7 @@ retry:
ext4_journal_stop(handle);
} else
ext4_put_io_end(mpd.io_submit.io_end);
+ mpd.io_submit.io_end = NULL;
if (ret == -ENOSPC && sbi->s_journal) {
/*
@@ -2839,6 +2873,7 @@ retry:
if (ret)
break;
}
+unplug:
blk_finish_plug(&plug);
if (!ret && !cycled && wbc->nr_to_write > 0) {
cycled = 1;
@@ -3305,6 +3340,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned flags, struct iomap *iomap)
{
+ struct block_device *bdev;
unsigned int blkbits = inode->i_blkbits;
unsigned long first_block = offset >> blkbits;
unsigned long last_block = (offset + length - 1) >> blkbits;
@@ -3373,7 +3409,12 @@ retry:
}
iomap->flags = 0;
- iomap->bdev = inode->i_sb->s_bdev;
+ bdev = inode->i_sb->s_bdev;
+ iomap->bdev = bdev;
+ if (blk_queue_dax(bdev->bd_queue))
+ iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ else
+ iomap->dax_dev = NULL;
iomap->offset = first_block << blkbits;
if (ret == 0) {
@@ -3406,6 +3447,7 @@ static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
int blkbits = inode->i_blkbits;
bool truncate = false;
+ put_dax(iomap->dax_dev);
if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
return 0;
@@ -5848,6 +5890,11 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_convert_inline_data(inode);
+ if (ret)
+ goto out_ret;
+
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 184e74eb3004..0c21e22acd74 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -19,6 +19,9 @@
#include <linux/delay.h>
#include "ext4_jbd2.h"
#include "ext4.h"
+#include <linux/fsmap.h>
+#include "fsmap.h"
+#include <trace/events/ext4.h>
/**
* Swap memory between @a and @b for @len bytes.
@@ -443,7 +446,7 @@ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags)
return iflags;
}
-int ext4_shutdown(struct super_block *sb, unsigned long arg)
+static int ext4_shutdown(struct super_block *sb, unsigned long arg)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
__u32 flags;
@@ -489,6 +492,90 @@ int ext4_shutdown(struct super_block *sb, unsigned long arg)
return 0;
}
+struct getfsmap_info {
+ struct super_block *gi_sb;
+ struct fsmap_head __user *gi_data;
+ unsigned int gi_idx;
+ __u32 gi_last_flags;
+};
+
+static int ext4_getfsmap_format(struct ext4_fsmap *xfm, void *priv)
+{
+ struct getfsmap_info *info = priv;
+ struct fsmap fm;
+
+ trace_ext4_getfsmap_mapping(info->gi_sb, xfm);
+
+ info->gi_last_flags = xfm->fmr_flags;
+ ext4_fsmap_from_internal(info->gi_sb, &fm, xfm);
+ if (copy_to_user(&info->gi_data->fmh_recs[info->gi_idx++], &fm,
+ sizeof(struct fsmap)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ext4_ioc_getfsmap(struct super_block *sb,
+ struct fsmap_head __user *arg)
+{
+ struct getfsmap_info info = {0};
+ struct ext4_fsmap_head xhead = {0};
+ struct fsmap_head head;
+ bool aborted = false;
+ int error;
+
+ if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
+ return -EFAULT;
+ if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
+ memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
+ sizeof(head.fmh_keys[0].fmr_reserved)) ||
+ memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
+ sizeof(head.fmh_keys[1].fmr_reserved)))
+ return -EINVAL;
+ /*
+ * ext4 doesn't report file extents at all, so the only valid
+ * file offsets are the magic ones (all zeroes or all ones).
+ */
+ if (head.fmh_keys[0].fmr_offset ||
+ (head.fmh_keys[1].fmr_offset != 0 &&
+ head.fmh_keys[1].fmr_offset != -1ULL))
+ return -EINVAL;
+
+ xhead.fmh_iflags = head.fmh_iflags;
+ xhead.fmh_count = head.fmh_count;
+ ext4_fsmap_to_internal(sb, &xhead.fmh_keys[0], &head.fmh_keys[0]);
+ ext4_fsmap_to_internal(sb, &xhead.fmh_keys[1], &head.fmh_keys[1]);
+
+ trace_ext4_getfsmap_low_key(sb, &xhead.fmh_keys[0]);
+ trace_ext4_getfsmap_high_key(sb, &xhead.fmh_keys[1]);
+
+ info.gi_sb = sb;
+ info.gi_data = arg;
+ error = ext4_getfsmap(sb, &xhead, ext4_getfsmap_format, &info);
+ if (error == EXT4_QUERY_RANGE_ABORT) {
+ error = 0;
+ aborted = true;
+ } else if (error)
+ return error;
+
+ /* If we didn't abort, set the "last" flag in the last fmx */
+ if (!aborted && info.gi_idx) {
+ info.gi_last_flags |= FMR_OF_LAST;
+ if (copy_to_user(&info.gi_data->fmh_recs[info.gi_idx - 1].fmr_flags,
+ &info.gi_last_flags,
+ sizeof(info.gi_last_flags)))
+ return -EFAULT;
+ }
+
+ /* copy back header */
+ head.fmh_entries = xhead.fmh_entries;
+ head.fmh_oflags = xhead.fmh_oflags;
+ if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
+ return -EFAULT;
+
+ return 0;
+}
+
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
@@ -499,6 +586,8 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);
switch (cmd) {
+ case FS_IOC_GETFSMAP:
+ return ext4_ioc_getfsmap(sb, (void __user *)arg);
case EXT4_IOC_GETFLAGS:
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
@@ -1007,6 +1096,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC_GET_ENCRYPTION_PWSALT:
case EXT4_IOC_GET_ENCRYPTION_POLICY:
case EXT4_IOC_SHUTDOWN:
+ case FS_IOC_GETFSMAP:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 354dc1a894c2..5083bce20ac4 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -357,7 +357,7 @@ static struct kmem_cache *ext4_free_data_cachep;
#define NR_GRPINFO_CACHES 8
static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
-static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
"ext4_groupinfo_64k", "ext4_groupinfo_128k"
@@ -2393,7 +2393,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
return 0;
size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
- new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
+ new_groupinfo = kvzalloc(size, GFP_KERNEL);
if (!new_groupinfo) {
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
@@ -5277,3 +5277,52 @@ out:
range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
return ret;
}
+
+/* Iterate all the free extents in the group. */
+int
+ext4_mballoc_query_range(
+ struct super_block *sb,
+ ext4_group_t group,
+ ext4_grpblk_t start,
+ ext4_grpblk_t end,
+ ext4_mballoc_query_range_fn formatter,
+ void *priv)
+{
+ void *bitmap;
+ ext4_grpblk_t next;
+ struct ext4_buddy e4b;
+ int error;
+
+ error = ext4_mb_load_buddy(sb, group, &e4b);
+ if (error)
+ return error;
+ bitmap = e4b.bd_bitmap;
+
+ ext4_lock_group(sb, group);
+
+ start = (e4b.bd_info->bb_first_free > start) ?
+ e4b.bd_info->bb_first_free : start;
+ if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
+ end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
+
+ while (start <= end) {
+ start = mb_find_next_zero_bit(bitmap, end + 1, start);
+ if (start > end)
+ break;
+ next = mb_find_next_bit(bitmap, end + 1, start);
+
+ ext4_unlock_group(sb, group);
+ error = formatter(sb, group, start, next - start, priv);
+ if (error)
+ goto out_unload;
+ ext4_lock_group(sb, group);
+
+ start = next + 1;
+ }
+
+ ext4_unlock_group(sb, group);
+out_unload:
+ ext4_mb_unload_buddy(&e4b);
+
+ return error;
+}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 1aba469f8220..2bed62084a8c 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -199,4 +199,21 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
return ext4_group_first_block_no(sb, fex->fe_group) +
(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
}
+
+typedef int (*ext4_mballoc_query_range_fn)(
+ struct super_block *sb,
+ ext4_group_t agno,
+ ext4_grpblk_t start,
+ ext4_grpblk_t len,
+ void *priv);
+
+int
+ext4_mballoc_query_range(
+ struct super_block *sb,
+ ext4_group_t agno,
+ ext4_grpblk_t start,
+ ext4_grpblk_t end,
+ ext4_mballoc_query_range_fn formatter,
+ void *priv);
+
#endif
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 07e5e1405771..b81f7d46f344 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1237,37 +1237,24 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
}
/*
- * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
+ * Test whether a directory entry matches the filename being searched for.
*
- * `len <= EXT4_NAME_LEN' is guaranteed by caller.
- * `de != NULL' is guaranteed by caller.
+ * Return: %true if the directory entry matches, otherwise %false.
*/
-static inline int ext4_match(struct ext4_filename *fname,
- struct ext4_dir_entry_2 *de)
+static inline bool ext4_match(const struct ext4_filename *fname,
+ const struct ext4_dir_entry_2 *de)
{
- const void *name = fname_name(fname);
- u32 len = fname_len(fname);
+ struct fscrypt_name f;
if (!de->inode)
- return 0;
+ return false;
+ f.usr_fname = fname->usr_fname;
+ f.disk_name = fname->disk_name;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (unlikely(!name)) {
- if (fname->usr_fname->name[0] == '_') {
- int ret;
- if (de->name_len < 16)
- return 0;
- ret = memcmp(de->name + de->name_len - 16,
- fname->crypto_buf.name + 8, 16);
- return (ret == 0) ? 1 : 0;
- }
- name = fname->crypto_buf.name;
- len = fname->crypto_buf.len;
- }
+ f.crypto_buf = fname->crypto_buf;
#endif
- if (de->name_len != len)
- return 0;
- return (memcmp(de->name, name, len) == 0) ? 1 : 0;
+ return fscrypt_match_name(&f, de->name, de->name_len);
}
/*
@@ -1281,48 +1268,31 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
struct ext4_dir_entry_2 * de;
char * dlimit;
int de_len;
- int res;
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
- if ((char *) de + de->name_len <= dlimit) {
- res = ext4_match(fname, de);
- if (res < 0) {
- res = -1;
- goto return_result;
- }
- if (res > 0) {
- /* found a match - just to be sure, do
- * a full check */
- if (ext4_check_dir_entry(dir, NULL, de, bh,
- bh->b_data,
- bh->b_size, offset)) {
- res = -1;
- goto return_result;
- }
- *res_dir = de;
- res = 1;
- goto return_result;
- }
-
+ if ((char *) de + de->name_len <= dlimit &&
+ ext4_match(fname, de)) {
+ /* found a match - just to be sure, do
+ * a full check */
+ if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+ bh->b_size, offset))
+ return -1;
+ *res_dir = de;
+ return 1;
}
/* prevent looping on a bad block */
de_len = ext4_rec_len_from_disk(de->rec_len,
dir->i_sb->s_blocksize);
- if (de_len <= 0) {
- res = -1;
- goto return_result;
- }
+ if (de_len <= 0)
+ return -1;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
-
- res = 0;
-return_result:
- return res;
+ return 0;
}
static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
@@ -1616,16 +1586,9 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
- int nokey = ext4_encrypted_inode(inode) &&
- !fscrypt_has_encryption_key(inode);
- if (nokey) {
- iput(inode);
- return ERR_PTR(-ENOKEY);
- }
ext4_warning(inode->i_sb,
"Inconsistent encryption contexts: %lu/%lu",
- (unsigned long) dir->i_ino,
- (unsigned long) inode->i_ino);
+ dir->i_ino, inode->i_ino);
iput(inode);
return ERR_PTR(-EPERM);
}
@@ -1833,24 +1796,15 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
int nlen, rlen;
unsigned int offset = 0;
char *top;
- int res;
de = (struct ext4_dir_entry_2 *)buf;
top = buf + buf_size - reclen;
while ((char *) de <= top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
- buf, buf_size, offset)) {
- res = -EFSCORRUPTED;
- goto return_result;
- }
- /* Provide crypto context and crypto buffer to ext4 match */
- res = ext4_match(fname, de);
- if (res < 0)
- goto return_result;
- if (res > 0) {
- res = -EEXIST;
- goto return_result;
- }
+ buf, buf_size, offset))
+ return -EFSCORRUPTED;
+ if (ext4_match(fname, de))
+ return -EEXIST;
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
if ((de->inode ? rlen - nlen : rlen) >= reclen)
@@ -1858,22 +1812,17 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
-
if ((char *) de > top)
- res = -ENOSPC;
- else {
- *dest_de = de;
- res = 0;
- }
-return_result:
- return res;
+ return -ENOSPC;
+
+ *dest_de = de;
+ return 0;
}
-int ext4_insert_dentry(struct inode *dir,
- struct inode *inode,
- struct ext4_dir_entry_2 *de,
- int buf_size,
- struct ext4_filename *fname)
+void ext4_insert_dentry(struct inode *inode,
+ struct ext4_dir_entry_2 *de,
+ int buf_size,
+ struct ext4_filename *fname)
{
int nlen, rlen;
@@ -1892,7 +1841,6 @@ int ext4_insert_dentry(struct inode *dir,
ext4_set_de_type(inode->i_sb, de, inode->i_mode);
de->name_len = fname_len(fname);
memcpy(de->name, fname_name(fname), fname_len(fname));
- return 0;
}
/*
@@ -1928,11 +1876,8 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
return err;
}
- /* By now the buffer is marked for journaling. Due to crypto operations,
- * the following function call may fail */
- err = ext4_insert_dentry(dir, inode, de, blocksize, fname);
- if (err < 0)
- return err;
+ /* By now the buffer is marked for journaling */
+ ext4_insert_dentry(inode, de, blocksize, fname);
/*
* XXX shouldn't update any times until successful
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 208241b06662..1a82138ba739 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -297,8 +297,17 @@ static void ext4_end_bio(struct bio *bio)
{
ext4_io_end_t *io_end = bio->bi_private;
sector_t bi_sector = bio->bi_iter.bi_sector;
+ char b[BDEVNAME_SIZE];
- BUG_ON(!io_end);
+ if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
+ bdevname(bio->bi_bdev, b),
+ (long long) bio->bi_iter.bi_sector,
+ (unsigned) bio_sectors(bio),
+ bio->bi_error)) {
+ ext4_finish_bio(bio);
+ bio_put(bio);
+ return;
+ }
bio->bi_end_io = NULL;
if (bio->bi_error) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a9c72e39a4ee..c90edf09b0c3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -49,6 +49,7 @@
#include "xattr.h"
#include "acl.h"
#include "mballoc.h"
+#include "fsmap.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ext4.h>
@@ -1230,7 +1231,7 @@ static const struct fscrypt_operations ext4_cryptops = {
#endif
#ifdef CONFIG_QUOTA
-static char *quotatypes[] = INITQFNAMES;
+static const char * const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
static int ext4_write_dquot(struct dquot *dquot);
@@ -1443,7 +1444,8 @@ static ext4_fsblk_t get_sb_block(void **data)
}
#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
+static const char deprecated_msg[] =
+ "Mount option \"%s\" will be removed by %s\n"
"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
#ifdef CONFIG_QUOTA
@@ -2153,7 +2155,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
return 0;
size = roundup_pow_of_two(size * sizeof(struct flex_groups));
- new_groups = ext4_kvzalloc(size, GFP_KERNEL);
+ new_groups = kvzalloc(size, GFP_KERNEL);
if (!new_groups) {
ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
size / (int) sizeof(struct flex_groups));
@@ -3887,7 +3889,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
}
- sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sbi->s_group_desc = kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
@@ -3898,6 +3900,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
bgl_lock_init(sbi->s_blockgroup_lock);
+ /* Pre-read the descriptors into the buffer cache */
+ for (i = 0; i < db_count; i++) {
+ block = descriptor_loc(sb, logical_sb_block, i);
+ sb_breadahead(sb, block);
+ }
+
for (i = 0; i < db_count; i++) {
block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
@@ -4650,7 +4658,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (sync) {
unlock_buffer(sbh);
error = __sync_dirty_buffer(sbh,
- test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
+ REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
if (error)
return error;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 42145be5c6b4..d74dc5f81a04 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -34,7 +34,7 @@ typedef enum {
ptr_ext4_super_block_offset,
} attr_ptr_t;
-static const char *proc_dirname = "fs/ext4";
+static const char proc_dirname[] = "fs/ext4";
static struct proc_dir_entry *ext4_proc_root;
struct ext4_attr {
@@ -375,7 +375,7 @@ static const struct file_operations ext4_seq_##name##_fops = { \
PROC_FILE_SHOW_DEFN(es_shrinker_info);
PROC_FILE_SHOW_DEFN(options);
-static struct ext4_proc_files {
+static const struct ext4_proc_files {
const char *name;
const struct file_operations *fops;
} proc_files[] = {
@@ -388,7 +388,7 @@ static struct ext4_proc_files {
int ext4_register_sysfs(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_proc_files *p;
+ const struct ext4_proc_files *p;
int err;
sbi->s_kobj.kset = &ext4_kset;
@@ -412,7 +412,7 @@ int ext4_register_sysfs(struct super_block *sb)
void ext4_unregister_sysfs(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_proc_files *p;
+ const struct ext4_proc_files *p;
if (sbi->s_proc) {
for (p = proc_files; p->name; p++)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 996e7900d4c8..8fb7ce14e6eb 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -78,10 +78,8 @@ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
struct mb_cache_entry **);
static void ext4_xattr_rehash(struct ext4_xattr_header *,
struct ext4_xattr_entry *);
-static int ext4_xattr_list(struct dentry *dentry, char *buffer,
- size_t buffer_size);
-static const struct xattr_handler *ext4_xattr_handler_map[] = {
+static const struct xattr_handler * const ext4_xattr_handler_map[] = {
[EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
#ifdef CONFIG_EXT4_FS_POSIX_ACL
[EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
@@ -163,20 +161,9 @@ ext4_xattr_handler(int name_index)
return handler;
}
-/*
- * Inode operation listxattr()
- *
- * d_inode(dentry)->i_mutex: don't care
- */
-ssize_t
-ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
- return ext4_xattr_list(dentry, buffer, size);
-}
-
static int
-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
- void *value_start)
+ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
+ void *value_start)
{
struct ext4_xattr_entry *e = entry;
@@ -230,8 +217,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
return -EFSCORRUPTED;
if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC;
- error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
- bh->b_data);
+ error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
+ bh->b_data);
if (!error)
set_buffer_verified(bh);
return error;
@@ -246,7 +233,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
(header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
goto errout;
- error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
+ error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
errout:
if (error)
__ext4_error_inode(inode, function, line, 0,
@@ -257,20 +244,9 @@ errout:
#define xattr_check_inode(inode, header, end) \
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
-static inline int
-ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
-{
- size_t value_size = le32_to_cpu(entry->e_value_size);
-
- if (entry->e_value_block != 0 || value_size > size ||
- le16_to_cpu(entry->e_value_offs) + value_size > size)
- return -EFSCORRUPTED;
- return 0;
-}
-
static int
ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
- const char *name, size_t size, int sorted)
+ const char *name, int sorted)
{
struct ext4_xattr_entry *entry;
size_t name_len;
@@ -290,8 +266,6 @@ ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
break;
}
*pentry = entry;
- if (!cmp && ext4_xattr_check_entry(entry, size))
- return -EFSCORRUPTED;
return cmp ? -ENODATA : 0;
}
@@ -319,7 +293,6 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(inode, bh)) {
-bad_block:
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
error = -EFSCORRUPTED;
@@ -327,9 +300,7 @@ bad_block:
}
ext4_xattr_cache_insert(ext4_mb_cache, bh);
entry = BFIRST(bh);
- error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
- if (error == -EFSCORRUPTED)
- goto bad_block;
+ error = ext4_xattr_find_entry(&entry, name_index, name, 1);
if (error)
goto cleanup;
size = le32_to_cpu(entry->e_value_size);
@@ -366,13 +337,12 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
return error;
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- entry = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
error = xattr_check_inode(inode, header, end);
if (error)
goto cleanup;
- error = ext4_xattr_find_entry(&entry, name_index, name,
- end - (void *)entry, 0);
+ entry = IFIRST(header);
+ error = ext4_xattr_find_entry(&entry, name_index, name, 0);
if (error)
goto cleanup;
size = le32_to_cpu(entry->e_value_size);
@@ -519,7 +489,9 @@ cleanup:
}
/*
- * ext4_xattr_list()
+ * Inode operation listxattr()
+ *
+ * d_inode(dentry)->i_rwsem: don't care
*
* Copy a list of attribute names into the buffer
* provided, or compute the buffer size required.
@@ -528,8 +500,8 @@ cleanup:
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
*/
-static int
-ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ssize_t
+ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int ret, ret2;
@@ -804,7 +776,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
bs->s.end = bs->bh->b_data + bs->bh->b_size;
bs->s.here = bs->s.first;
error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
- i->name, bs->bh->b_size, 1);
+ i->name, 1);
if (error && error != -ENODATA)
goto cleanup;
bs->s.not_found = error;
@@ -1076,8 +1048,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
return error;
/* Find the named attribute. */
error = ext4_xattr_find_entry(&is->s.here, i->name_index,
- i->name, is->s.end -
- (void *)is->s.base, 0);
+ i->name, 0);
if (error && error != -ENODATA)
return error;
is->s.not_found = error;
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 0339daf4ca02..ea9c317b5916 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -275,10 +275,11 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
goto skip_write;
- trace_f2fs_writepages(mapping->host, wbc, META);
+ /* if locked failed, cp will flush dirty pages instead */
+ if (!mutex_trylock(&sbi->cp_mutex))
+ goto skip_write;
- /* if mounting is failed, skip writing node pages */
- mutex_lock(&sbi->cp_mutex);
+ trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
mutex_unlock(&sbi->cp_mutex);
@@ -567,7 +568,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
if (ni.blk_addr != NULL_ADDR) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
- "%s: orphan failed (ino=%x), run fsck to fix.",
+ "%s: orphan failed (ino=%x) by kernel, retry mount.",
__func__, ino);
return -EIO;
}
@@ -677,7 +678,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
- if (crc_offset >= blk_size) {
+ if (crc_offset > (blk_size - sizeof(__le32))) {
f2fs_msg(sbi->sb, KERN_WARNING,
"invalid crc_offset: %zu", crc_offset);
return -EINVAL;
@@ -816,7 +817,9 @@ static void __add_dirty_inode(struct inode *inode, enum inode_type type)
return;
set_inode_flag(inode, flag);
- list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
+ if (!f2fs_is_volatile_file(inode))
+ list_add_tail(&F2FS_I(inode)->dirty_list,
+ &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type);
}
@@ -941,6 +944,19 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
return 0;
}
+static void __prepare_cp_block(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ nid_t last_nid = nm_i->next_scan_nid;
+
+ next_free_nid(sbi, &last_nid);
+ ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
+ ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
+ ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
+ ckpt->next_free_nid = cpu_to_le32(last_nid);
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -964,21 +980,26 @@ retry_flush_dents:
err = sync_dirty_inodes(sbi, DIR_INODE);
if (err)
goto out;
+ cond_resched();
goto retry_flush_dents;
}
+ /*
+ * POR: we should ensure that there are no dirty node pages
+ * until finishing nat/sit flush. inode->i_blocks can be updated.
+ */
+ down_write(&sbi->node_change);
+
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+ up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
goto out;
+ cond_resched();
goto retry_flush_dents;
}
- /*
- * POR: we should ensure that there are no dirty node pages
- * until finishing nat/sit flush.
- */
retry_flush_nodes:
down_write(&sbi->node_write);
@@ -986,11 +1007,20 @@ retry_flush_nodes:
up_write(&sbi->node_write);
err = sync_node_pages(sbi, &wbc);
if (err) {
+ up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
goto out;
}
+ cond_resched();
goto retry_flush_nodes;
}
+
+ /*
+ * sbi->node_change is used only for AIO write_begin path which produces
+ * dirty node blocks and some checkpoint values by block allocation.
+ */
+ __prepare_cp_block(sbi);
+ up_write(&sbi->node_change);
out:
blk_finish_plug(&plug);
return err;
@@ -1024,16 +1054,20 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
spin_lock(&sbi->cp_lock);
- if (cpc->reason == CP_UMOUNT && ckpt->cp_pack_total_block_count >
+ if ((cpc->reason & CP_UMOUNT) &&
+ le32_to_cpu(ckpt->cp_pack_total_block_count) >
sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
disable_nat_bits(sbi, false);
- if (cpc->reason == CP_UMOUNT)
+ if (cpc->reason & CP_TRIMMED)
+ __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+
+ if (cpc->reason & CP_UMOUNT)
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- if (cpc->reason == CP_FASTBOOT)
+ if (cpc->reason & CP_FASTBOOT)
__set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
else
__clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
@@ -1057,7 +1091,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
- nid_t last_nid = nm_i->next_scan_nid;
block_t start_blk;
unsigned int data_sum_blocks, orphan_blocks;
__u32 crc32 = 0;
@@ -1074,14 +1107,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return -EIO;
}
- next_free_nid(sbi, &last_nid);
-
/*
* modify checkpoint
* version number is already updated
*/
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
- ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
ckpt->cur_node_segno[i] =
@@ -1100,10 +1130,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
curseg_alloc_type(sbi, i + CURSEG_HOT_DATA);
}
- ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
- ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
- ckpt->next_free_nid = cpu_to_le32(last_nid);
-
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
spin_lock(&sbi->cp_lock);
@@ -1143,7 +1169,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* write nat bits */
if (enabled_nat_bits(sbi, cpc)) {
__u64 cp_ver = cur_cp_version(ckpt);
- unsigned int i;
block_t blk;
cp_ver |= ((__u64)crc32 << 32);
@@ -1250,8 +1275,8 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
- (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
- (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
+ ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
+ ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
goto out;
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -1273,7 +1298,7 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_merged_bios(sbi);
/* this is the case of multiple fstrims without any changes */
- if (cpc->reason == CP_DISCARD) {
+ if (cpc->reason & CP_DISCARD) {
if (!exist_trim_candidates(sbi, cpc)) {
unblock_operations(sbi);
goto out;
@@ -1311,7 +1336,7 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
- if (cpc->reason == CP_RECOVERY)
+ if (cpc->reason & CP_RECOVERY)
f2fs_msg(sbi->sb, KERN_NOTICE,
"checkpoint: version = %llx", ckpt_ver);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 1602b4bccae6..7c0f6bdf817d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -309,7 +309,7 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE;
- io->fio.op_flags = REQ_META | REQ_PRIO;
+ io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
if (!test_opt(sbi, NOBARRIER))
io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
}
@@ -341,7 +341,7 @@ void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
/*
* Fill the locked page with data located in the block address.
- * Return unlocked page.
+ * A caller needs to unlock the page on failure.
*/
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
@@ -362,6 +362,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
+
+ if (!is_read_io(fio->op))
+ inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
return 0;
}
@@ -787,6 +790,21 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
return err;
}
+static inline void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
+{
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (lock)
+ down_read(&sbi->node_change);
+ else
+ up_read(&sbi->node_change);
+ } else {
+ if (lock)
+ f2fs_lock_op(sbi);
+ else
+ f2fs_unlock_op(sbi);
+ }
+}
+
/*
* f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
* f2fs_map_blocks structure.
@@ -829,7 +847,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
next_dnode:
if (create)
- f2fs_lock_op(sbi);
+ __do_map_lock(sbi, flag, true);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
@@ -939,7 +957,7 @@ skip:
f2fs_put_dnode(&dn);
if (create) {
- f2fs_unlock_op(sbi);
+ __do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
goto next_dnode;
@@ -948,7 +966,7 @@ sync_out:
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
- f2fs_unlock_op(sbi);
+ __do_map_lock(sbi, flag, false);
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
@@ -1151,9 +1169,10 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
- prefetchw(&page->flags);
if (pages) {
page = list_last_entry(pages, struct page, lru);
+
+ prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index,
@@ -1283,17 +1302,83 @@ static int f2fs_read_data_pages(struct file *file,
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
}
+static int encrypt_one_page(struct f2fs_io_info *fio)
+{
+ struct inode *inode = fio->page->mapping->host;
+ gfp_t gfp_flags = GFP_NOFS;
+
+ if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ return 0;
+
+ /* wait for GCed encrypted page writeback */
+ f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);
+
+retry_encrypt:
+ fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+ PAGE_SIZE, 0, fio->page->index, gfp_flags);
+ if (!IS_ERR(fio->encrypted_page))
+ return 0;
+
+ /* flush pending IOs and wait for a while in the ENOMEM case */
+ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+ f2fs_flush_merged_bios(fio->sbi);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
+ return PTR_ERR(fio->encrypted_page);
+}
+
+static inline bool need_inplace_update(struct f2fs_io_info *fio)
+{
+ struct inode *inode = fio->page->mapping->host;
+
+ if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
+ return false;
+ if (is_cold_data(fio->page))
+ return false;
+ if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ return false;
+
+ return need_inplace_update_policy(inode, fio);
+}
+
+static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
+{
+ if (fio->old_blkaddr == NEW_ADDR)
+ return false;
+ if (fio->old_blkaddr == NULL_ADDR)
+ return false;
+ return true;
+}
+
int do_write_data_page(struct f2fs_io_info *fio)
{
struct page *page = fio->page;
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
+ struct extent_info ei = {0,0,0};
+ bool ipu_force = false;
int err = 0;
set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (need_inplace_update(fio) &&
+ f2fs_lookup_extent_cache(inode, page->index, &ei)) {
+ fio->old_blkaddr = ei.blk + page->index - ei.fofs;
+
+ if (valid_ipu_blkaddr(fio)) {
+ ipu_force = true;
+ fio->need_lock = false;
+ goto got_it;
+ }
+ }
+
+ if (fio->need_lock)
+ f2fs_lock_op(fio->sbi);
+
err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
if (err)
- return err;
+ goto out;
fio->old_blkaddr = dn.data_blkaddr;
@@ -1302,31 +1387,10 @@ int do_write_data_page(struct f2fs_io_info *fio)
ClearPageUptodate(page);
goto out_writepage;
}
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- gfp_t gfp_flags = GFP_NOFS;
-
- /* wait for GCed encrypted page writeback */
- f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
- fio->old_blkaddr);
-retry_encrypt:
- fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
- PAGE_SIZE, 0,
- fio->page->index,
- gfp_flags);
- if (IS_ERR(fio->encrypted_page)) {
- err = PTR_ERR(fio->encrypted_page);
- if (err == -ENOMEM) {
- /* flush pending ios and wait for a while */
- f2fs_flush_merged_bios(F2FS_I_SB(inode));
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- gfp_flags |= __GFP_NOFAIL;
- err = 0;
- goto retry_encrypt;
- }
- goto out_writepage;
- }
- }
+got_it:
+ err = encrypt_one_page(fio);
+ if (err)
+ goto out_writepage;
set_page_writeback(page);
@@ -1334,22 +1398,27 @@ retry_encrypt:
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(fio->old_blkaddr != NEW_ADDR &&
- !is_cold_data(page) &&
- !IS_ATOMIC_WRITTEN_PAGE(page) &&
- need_inplace_update(inode))) {
- rewrite_data_page(fio);
+ if (ipu_force || (valid_ipu_blkaddr(fio) && need_inplace_update(fio))) {
+ f2fs_put_dnode(&dn);
+ if (fio->need_lock)
+ f2fs_unlock_op(fio->sbi);
+ err = rewrite_data_page(fio);
+ trace_f2fs_do_write_data_page(fio->page, IPU);
set_inode_flag(inode, FI_UPDATE_WRITE);
- trace_f2fs_do_write_data_page(page, IPU);
- } else {
- write_data_page(&dn, fio);
- trace_f2fs_do_write_data_page(page, OPU);
- set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ return err;
}
+
+ /* LFS mode write path */
+ write_data_page(&dn, fio);
+ trace_f2fs_do_write_data_page(page, OPU);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ if (page->index == 0)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
out_writepage:
f2fs_put_dnode(&dn);
+out:
+ if (fio->need_lock)
+ f2fs_unlock_op(fio->sbi);
return err;
}
@@ -1370,9 +1439,11 @@ static int __write_data_page(struct page *page, bool *submitted,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = wbc_to_write_flags(wbc),
+ .old_blkaddr = NULL_ADDR,
.page = page,
.encrypted_page = NULL,
.submitted = false,
+ .need_lock = true,
};
trace_f2fs_writepage(page, DATA);
@@ -1408,6 +1479,7 @@ write:
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
+ fio.need_lock = false;
err = do_write_data_page(&fio);
goto done;
}
@@ -1416,6 +1488,8 @@ write:
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
+ else
+ set_inode_flag(inode, FI_HOT_DATA);
err = -EAGAIN;
if (f2fs_has_inline_data(inode)) {
@@ -1423,12 +1497,12 @@ write:
if (!err)
goto out;
}
- f2fs_lock_op(sbi);
+
if (err == -EAGAIN)
err = do_write_data_page(&fio);
if (F2FS_I(inode)->last_disk_size < psize)
F2FS_I(inode)->last_disk_size = psize;
- f2fs_unlock_op(sbi);
+
done:
if (err && err != -ENOENT)
goto redirty_out;
@@ -1441,12 +1515,14 @@ out:
if (wbc->for_reclaim) {
f2fs_submit_merged_bio_cond(sbi, inode, 0, page->index,
DATA, WRITE);
+ clear_inode_flag(inode, FI_HOT_DATA);
remove_dirty_inode(inode);
submitted = NULL;
}
unlock_page(page);
- f2fs_balance_fs(sbi, need_balance_fs);
+ if (!S_ISDIR(inode->i_mode))
+ f2fs_balance_fs(sbi, need_balance_fs);
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_submit_merged_bio(sbi, DATA, WRITE);
@@ -1495,6 +1571,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
pagevec_init(&pvec, 0);
+ if (get_dirty_pages(mapping->host) <=
+ SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
+ set_inode_flag(mapping->host, FI_HOT_DATA);
+ else
+ clear_inode_flag(mapping->host, FI_HOT_DATA);
+
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -1580,8 +1662,10 @@ continue_unlock:
last_idx = page->index;
}
- if (--wbc->nr_to_write <= 0 &&
- wbc->sync_mode == WB_SYNC_NONE) {
+ /* give a priority to WB_SYNC threads */
+ if ((atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) ||
+ --wbc->nr_to_write <= 0) &&
+ wbc->sync_mode == WB_SYNC_NONE) {
done = 1;
break;
}
@@ -1637,9 +1721,18 @@ static int f2fs_write_data_pages(struct address_space *mapping,
trace_f2fs_writepages(mapping->host, wbc, DATA);
+ /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_inc(&sbi->wb_sync_req);
+ else if (atomic_read(&sbi->wb_sync_req))
+ goto skip_write;
+
blk_start_plug(&plug);
ret = f2fs_write_cache_pages(mapping, wbc);
blk_finish_plug(&plug);
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ atomic_dec(&sbi->wb_sync_req);
/*
* if some pages were truncated, we cannot guarantee its mapping->host
* to detect pending bios.
@@ -1687,7 +1780,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
if (f2fs_has_inline_data(inode) ||
(pos & PAGE_MASK) >= i_size_read(inode)) {
- f2fs_lock_op(sbi);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
locked = true;
}
restart:
@@ -1723,7 +1816,8 @@ restart:
err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
if (err || dn.data_blkaddr == NULL_ADDR) {
f2fs_put_dnode(&dn);
- f2fs_lock_op(sbi);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+ true);
locked = true;
goto restart;
}
@@ -1737,7 +1831,7 @@ out:
f2fs_put_dnode(&dn);
unlock_out:
if (locked)
- f2fs_unlock_op(sbi);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
return err;
}
@@ -1951,7 +2045,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
- return;
+ return drop_inmem_page(inode, page);
set_page_private(page, 0);
ClearPagePrivate(page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index ee2d0a485fc3..87f449845f5f 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -51,15 +51,26 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->aw_cnt = atomic_read(&sbi->aw_cnt);
+ si->vw_cnt = atomic_read(&sbi->vw_cnt);
si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
+ si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
- if (SM_I(sbi) && SM_I(sbi)->fcc_info)
- si->nr_flush =
- atomic_read(&SM_I(sbi)->fcc_info->submit_flush);
- if (SM_I(sbi) && SM_I(sbi)->dcc_info)
- si->nr_discard =
- atomic_read(&SM_I(sbi)->dcc_info->submit_discard);
+ if (SM_I(sbi) && SM_I(sbi)->fcc_info) {
+ si->nr_flushed =
+ atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
+ si->nr_flushing =
+ atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
+ }
+ if (SM_I(sbi) && SM_I(sbi)->dcc_info) {
+ si->nr_discarded =
+ atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
+ si->nr_discarding =
+ atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
+ si->nr_discard_cmd =
+ atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
+ si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
+ }
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
@@ -86,6 +97,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->avail_nids = NM_I(sbi)->available_nids;
si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
@@ -99,8 +111,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
si->curseg[i] = curseg->segno;
- si->cursec[i] = curseg->segno / sbi->segs_per_sec;
- si->curzone[i] = si->cursec[i] / sbi->secs_per_zone;
+ si->cursec[i] = GET_SEC_FROM_SEG(sbi, curseg->segno);
+ si->curzone[i] = GET_ZONE_FROM_SEC(sbi, si->cursec[i]);
}
for (i = 0; i < 2; i++) {
@@ -124,10 +136,10 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+ blks_per_sec = BLKS_PER_SEC(sbi);
hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
- vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ vblocks = get_valid_blocks(sbi, segno, true);
dist = abs(vblocks - hblks_per_sec);
bimodal += dist * dist;
@@ -156,7 +168,11 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
if (si->base_mem)
goto get_cache;
- si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
+ /* build stat */
+ si->base_mem = sizeof(struct f2fs_stat_info);
+
+ /* build superblock */
+ si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
@@ -208,8 +224,11 @@ get_cache:
/* build merge flush thread */
if (SM_I(sbi)->fcc_info)
si->cache_mem += sizeof(struct flush_cmd_control);
- if (SM_I(sbi)->dcc_info)
+ if (SM_I(sbi)->dcc_info) {
si->cache_mem += sizeof(struct discard_cmd_control);
+ si->cache_mem += sizeof(struct discard_cmd) *
+ atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
+ }
/* free nids */
si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
@@ -330,11 +349,16 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: %4d, Discard: %4d)\n",
+ seq_printf(s, " - IO (CP: %4d, Data: %4d, Flush: (%4d %4d), "
+ "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
si->nr_wb_cp_data, si->nr_wb_data,
- si->nr_flush, si->nr_discard);
- seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d)\n",
- si->inmem_pages, si->aw_cnt, si->max_aw_cnt);
+ si->nr_flushing, si->nr_flushed,
+ si->nr_discarding, si->nr_discarded,
+ si->nr_discard_cmd, si->undiscard_blks);
+ seq_printf(s, " - inmem: %4d, atomic IO: %4d (Max. %4d), "
+ "volatile IO: %4d (Max. %4d)\n",
+ si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
+ si->vw_cnt, si->max_vw_cnt);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
@@ -347,8 +371,8 @@ static int stat_show(struct seq_file *s, void *v)
si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
- seq_printf(s, " - free_nids: %9d, alloc_nids: %9d\n",
- si->free_nids, si->alloc_nids);
+ seq_printf(s, " - free_nids: %9d/%9d\n - alloc_nids: %9d\n",
+ si->free_nids, si->avail_nids, si->alloc_nids);
seq_puts(s, "\nDistribution of User Blocks:");
seq_puts(s, " [ valid | invalid | free ]\n");
seq_puts(s, " [");
@@ -434,7 +458,9 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inplace_count, 0);
atomic_set(&sbi->aw_cnt, 0);
+ atomic_set(&sbi->vw_cnt, 0);
atomic_set(&sbi->max_aw_cnt, 0);
+ atomic_set(&sbi->max_vw_cnt, 0);
mutex_lock(&f2fs_stat_mutex);
list_add_tail(&si->stat_list, &f2fs_stat_list);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 8d5c62b07b28..94756f55a97e 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -94,7 +94,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
dentry_blk = (struct f2fs_dentry_block *)kmap(dentry_page);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
de = find_target_dentry(fname, namehash, max_slots, &d);
if (de)
*res_page = dentry_page;
@@ -111,8 +111,6 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
- struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
- struct fscrypt_str *name = &fname->disk_name;
if (max_slots)
*max_slots = 0;
@@ -130,17 +128,9 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
continue;
}
- /* encrypted case */
- de_name.name = d->filename[bit_pos];
- de_name.len = le16_to_cpu(de->name_len);
-
- /* show encrypted name */
- if (fname->hash) {
- if (de->hash_code == cpu_to_le32(fname->hash))
- goto found;
- } else if (de_name.len == name->len &&
- de->hash_code == namehash &&
- !memcmp(de_name.name, name->name, name->len))
+ if (de->hash_code == namehash &&
+ fscrypt_match_name(fname, d->filename[bit_pos],
+ le16_to_cpu(de->name_len)))
goto found;
if (max_slots && max_len > *max_slots)
@@ -170,12 +160,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
- f2fs_hash_t namehash;
-
- if(fname->hash)
- namehash = cpu_to_le32(fname->hash);
- else
- namehash = f2fs_dentry_hash(&name);
+ f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@@ -207,13 +192,9 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
f2fs_put_page(dentry_page, 0);
}
- /* This is to increase the speed of f2fs_create */
- if (!de && room) {
- F2FS_I(dir)->task = current;
- if (F2FS_I(dir)->chash != namehash) {
- F2FS_I(dir)->chash = namehash;
- F2FS_I(dir)->clevel = level;
- }
+ if (!de && room && F2FS_I(dir)->chash != namehash) {
+ F2FS_I(dir)->chash = namehash;
+ F2FS_I(dir)->clevel = level;
}
return de;
@@ -254,6 +235,9 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
break;
}
out:
+ /* This is to increase the speed of f2fs_create */
+ if (!de)
+ F2FS_I(dir)->task = current;
return de;
}
@@ -337,24 +321,6 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
set_page_dirty(ipage);
}
-int update_dent_inode(struct inode *inode, struct inode *to,
- const struct qstr *name)
-{
- struct page *page;
-
- if (file_enc_name(to))
- return 0;
-
- page = get_node_page(F2FS_I_SB(inode), inode->i_ino);
- if (IS_ERR(page))
- return PTR_ERR(page);
-
- init_dent_inode(name, page);
- f2fs_put_page(page, 1);
-
- return 0;
-}
-
void do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
@@ -384,7 +350,7 @@ static int make_empty_dir(struct inode *inode,
dentry_blk = kmap_atomic(dentry_page);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
do_make_empty_dir(inode, parent, &d);
kunmap_atomic(dentry_blk);
@@ -438,8 +404,11 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page);
}
- if (new_name)
+ if (new_name) {
init_dent_inode(new_name, page);
+ if (f2fs_encrypted_inode(dir))
+ file_set_enc_name(inode);
+ }
/*
* This file should be checkpointed during fsync.
@@ -542,7 +511,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
level = 0;
slots = GET_DENTRY_SLOTS(new_name->len);
- dentry_hash = f2fs_dentry_hash(new_name);
+ dentry_hash = f2fs_dentry_hash(new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -599,11 +568,9 @@ add_dentry:
err = PTR_ERR(page);
goto fail;
}
- if (f2fs_encrypted_inode(dir))
- file_set_enc_name(inode);
}
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(NULL, &d, dentry_blk);
f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
@@ -911,7 +878,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
dentry_blk = kmap(dentry_page);
- make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
+ make_dentry_ptr_block(inode, &d, dentry_blk);
err = f2fs_fill_dentries(ctx, &d,
n * NR_DENTRY_IN_BLOCK, &fstr);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index c6934f014e0f..2f98d7039701 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -18,6 +18,179 @@
#include "node.h"
#include <trace/events/f2fs.h>
+static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
+ unsigned int ofs)
+{
+ if (cached_re) {
+ if (cached_re->ofs <= ofs &&
+ cached_re->ofs + cached_re->len > ofs) {
+ return cached_re;
+ }
+ }
+ return NULL;
+}
+
+static struct rb_entry *__lookup_rb_tree_slow(struct rb_root *root,
+ unsigned int ofs)
+{
+ struct rb_node *node = root->rb_node;
+ struct rb_entry *re;
+
+ while (node) {
+ re = rb_entry(node, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ node = node->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ node = node->rb_right;
+ else
+ return re;
+ }
+ return NULL;
+}
+
+struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs)
+{
+ struct rb_entry *re;
+
+ re = __lookup_rb_tree_fast(cached_re, ofs);
+ if (!re)
+ return __lookup_rb_tree_slow(root, ofs);
+
+ return re;
+}
+
+struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root *root, struct rb_node **parent,
+ unsigned int ofs)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_entry *re;
+
+ while (*p) {
+ *parent = *p;
+ re = rb_entry(*parent, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ p = &(*p)->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ p = &(*p)->rb_right;
+ else
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return p;
+}
+
+/*
+ * lookup rb entry in position of @ofs in rb-tree,
+ * if hit, return the entry, otherwise, return NULL
+ * @prev_ex: extent before ofs
+ * @next_ex: extent after ofs
+ * @insert_p: insert point for new extent at ofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_entry *cached_re,
+ unsigned int ofs,
+ struct rb_entry **prev_entry,
+ struct rb_entry **next_entry,
+ struct rb_node ***insert_p,
+ struct rb_node **insert_parent,
+ bool force)
+{
+ struct rb_node **pnode = &root->rb_node;
+ struct rb_node *parent = NULL, *tmp_node;
+ struct rb_entry *re = cached_re;
+
+ *insert_p = NULL;
+ *insert_parent = NULL;
+ *prev_entry = NULL;
+ *next_entry = NULL;
+
+ if (RB_EMPTY_ROOT(root))
+ return NULL;
+
+ if (re) {
+ if (re->ofs <= ofs && re->ofs + re->len > ofs)
+ goto lookup_neighbors;
+ }
+
+ while (*pnode) {
+ parent = *pnode;
+ re = rb_entry(*pnode, struct rb_entry, rb_node);
+
+ if (ofs < re->ofs)
+ pnode = &(*pnode)->rb_left;
+ else if (ofs >= re->ofs + re->len)
+ pnode = &(*pnode)->rb_right;
+ else
+ goto lookup_neighbors;
+ }
+
+ *insert_p = pnode;
+ *insert_parent = parent;
+
+ re = rb_entry(parent, struct rb_entry, rb_node);
+ tmp_node = parent;
+ if (parent && ofs > re->ofs)
+ tmp_node = rb_next(parent);
+ *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+
+ tmp_node = parent;
+ if (parent && ofs < re->ofs)
+ tmp_node = rb_prev(parent);
+ *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ return NULL;
+
+lookup_neighbors:
+ if (ofs == re->ofs || force) {
+ /* lookup prev node for merging backward later */
+ tmp_node = rb_prev(&re->rb_node);
+ *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ }
+ if (ofs == re->ofs + re->len - 1 || force) {
+ /* lookup next node for merging frontward later */
+ tmp_node = rb_next(&re->rb_node);
+ *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
+ }
+ return re;
+}
+
+bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root *root)
+{
+#ifdef CONFIG_F2FS_CHECK_FS
+ struct rb_node *cur = rb_first(root), *next;
+ struct rb_entry *cur_re, *next_re;
+
+ if (!cur)
+ return true;
+
+ while (cur) {
+ next = rb_next(cur);
+ if (!next)
+ return true;
+
+ cur_re = rb_entry(cur, struct rb_entry, rb_node);
+ next_re = rb_entry(next, struct rb_entry, rb_node);
+
+ if (cur_re->ofs + cur_re->len > next_re->ofs) {
+ f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, "
+ "cur(%u, %u) next(%u, %u)",
+ cur_re->ofs, cur_re->len,
+ next_re->ofs, next_re->len);
+ return false;
+ }
+
+ cur = next;
+ }
+#endif
+ return true;
+}
+
static struct kmem_cache *extent_tree_slab;
static struct kmem_cache *extent_node_slab;
@@ -102,36 +275,6 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
return et;
}
-static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, unsigned int fofs)
-{
- struct rb_node *node = et->root.rb_node;
- struct extent_node *en = et->cached_en;
-
- if (en) {
- struct extent_info *cei = &en->ei;
-
- if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
- stat_inc_cached_node_hit(sbi);
- return en;
- }
- }
-
- while (node) {
- en = rb_entry(node, struct extent_node, rb_node);
-
- if (fofs < en->ei.fofs) {
- node = node->rb_left;
- } else if (fofs >= en->ei.fofs + en->ei.len) {
- node = node->rb_right;
- } else {
- stat_inc_rbtree_node_hit(sbi);
- return en;
- }
- }
- return NULL;
-}
-
static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei)
{
@@ -237,17 +380,24 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
goto out;
}
- en = __lookup_extent_tree(sbi, et, pgofs);
- if (en) {
- *ei = en->ei;
- spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list)) {
- list_move_tail(&en->list, &sbi->extent_list);
- et->cached_en = en;
- }
- spin_unlock(&sbi->extent_lock);
- ret = true;
+ en = (struct extent_node *)__lookup_rb_tree(&et->root,
+ (struct rb_entry *)et->cached_en, pgofs);
+ if (!en)
+ goto out;
+
+ if (en == et->cached_en)
+ stat_inc_cached_node_hit(sbi);
+ else
+ stat_inc_rbtree_node_hit(sbi);
+
+ *ei = en->ei;
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list)) {
+ list_move_tail(&en->list, &sbi->extent_list);
+ et->cached_en = en;
}
+ spin_unlock(&sbi->extent_lock);
+ ret = true;
out:
stat_inc_total_hit(sbi);
read_unlock(&et->lock);
@@ -256,83 +406,6 @@ out:
return ret;
}
-
-/*
- * lookup extent at @fofs, if hit, return the extent
- * if not, return NULL and
- * @prev_ex: extent before fofs
- * @next_ex: extent after fofs
- * @insert_p: insert point for new extent at fofs
- * in order to simpfy the insertion after.
- * tree must stay unchanged between lookup and insertion.
- */
-static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
- unsigned int fofs,
- struct extent_node **prev_ex,
- struct extent_node **next_ex,
- struct rb_node ***insert_p,
- struct rb_node **insert_parent)
-{
- struct rb_node **pnode = &et->root.rb_node;
- struct rb_node *parent = NULL, *tmp_node;
- struct extent_node *en = et->cached_en;
-
- *insert_p = NULL;
- *insert_parent = NULL;
- *prev_ex = NULL;
- *next_ex = NULL;
-
- if (RB_EMPTY_ROOT(&et->root))
- return NULL;
-
- if (en) {
- struct extent_info *cei = &en->ei;
-
- if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
- goto lookup_neighbors;
- }
-
- while (*pnode) {
- parent = *pnode;
- en = rb_entry(*pnode, struct extent_node, rb_node);
-
- if (fofs < en->ei.fofs)
- pnode = &(*pnode)->rb_left;
- else if (fofs >= en->ei.fofs + en->ei.len)
- pnode = &(*pnode)->rb_right;
- else
- goto lookup_neighbors;
- }
-
- *insert_p = pnode;
- *insert_parent = parent;
-
- en = rb_entry(parent, struct extent_node, rb_node);
- tmp_node = parent;
- if (parent && fofs > en->ei.fofs)
- tmp_node = rb_next(parent);
- *next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
-
- tmp_node = parent;
- if (parent && fofs < en->ei.fofs)
- tmp_node = rb_prev(parent);
- *prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
- return NULL;
-
-lookup_neighbors:
- if (fofs == en->ei.fofs) {
- /* lookup prev node for merging backward later */
- tmp_node = rb_prev(&en->rb_node);
- *prev_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
- }
- if (fofs == en->ei.fofs + en->ei.len - 1) {
- /* lookup next node for merging frontward later */
- tmp_node = rb_next(&en->rb_node);
- *next_ex = rb_entry_safe(tmp_node, struct extent_node, rb_node);
- }
- return en;
-}
-
static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct extent_node *prev_ex,
@@ -387,17 +460,7 @@ static struct extent_node *__insert_extent_tree(struct inode *inode,
goto do_insert;
}
- while (*p) {
- parent = *p;
- en = rb_entry(parent, struct extent_node, rb_node);
-
- if (ei->fofs < en->ei.fofs)
- p = &(*p)->rb_left;
- else if (ei->fofs >= en->ei.fofs + en->ei.len)
- p = &(*p)->rb_right;
- else
- f2fs_bug_on(sbi, 1);
- }
+ p = __lookup_rb_tree_for_insert(sbi, &et->root, &parent, ei->fofs);
do_insert:
en = __attach_extent_node(sbi, et, ei, parent, p);
if (!en)
@@ -447,8 +510,11 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
__drop_largest_extent(inode, fofs, len);
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
- en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
- &insert_p, &insert_parent);
+ en = (struct extent_node *)__lookup_rb_tree_ret(&et->root,
+ (struct rb_entry *)et->cached_en, fofs,
+ (struct rb_entry **)&prev_en,
+ (struct rb_entry **)&next_en,
+ &insert_p, &insert_parent, false);
if (!en)
en = next_en;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 0a6e115562f6..2185c7a040a1 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -50,6 +50,7 @@ enum {
FAULT_BLOCK,
FAULT_DIR_DEPTH,
FAULT_EVICT_INODE,
+ FAULT_TRUNCATE,
FAULT_IO,
FAULT_CHECKPOINT,
FAULT_MAX,
@@ -62,7 +63,7 @@ struct f2fs_fault_info {
};
extern char *fault_name[FAULT_MAX];
-#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
+#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
/*
@@ -88,9 +89,9 @@ extern char *fault_name[FAULT_MAX];
#define F2FS_MOUNT_ADAPTIVE 0x00020000
#define F2FS_MOUNT_LFS 0x00040000
-#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
-#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
-#define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
+#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
+#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
+#define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
#define ver_after(a, b) (typecheck(unsigned long long, a) && \
typecheck(unsigned long long, b) && \
@@ -124,22 +125,20 @@ enum {
SIT_BITMAP
};
-enum {
- CP_UMOUNT,
- CP_FASTBOOT,
- CP_SYNC,
- CP_RECOVERY,
- CP_DISCARD,
-};
+#define CP_UMOUNT 0x00000001
+#define CP_FASTBOOT 0x00000002
+#define CP_SYNC 0x00000004
+#define CP_RECOVERY 0x00000008
+#define CP_DISCARD 0x00000010
+#define CP_TRIMMED 0x00000020
#define DEF_BATCHED_TRIM_SECTIONS 2048
#define BATCHED_TRIM_SEGMENTS(sbi) \
- (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
+ (GET_SEG_FROM_SEC(sbi, SM_I(sbi)->trim_sections))
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
-#define MAX_DISCARD_BLOCKS(sbi) \
- ((1 << (sbi)->log_blocks_per_seg) * (sbi)->segs_per_sec)
-#define DISCARD_ISSUE_RATE 8
+#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
+#define DISCARD_ISSUE_RATE 8
#define DEF_CP_INTERVAL 60 /* 60 secs */
#define DEF_IDLE_INTERVAL 5 /* 5 secs */
@@ -181,37 +180,63 @@ struct inode_entry {
struct inode *inode; /* vfs inode pointer */
};
-/* for the list of blockaddresses to be discarded */
+/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
- block_t blkaddr; /* block address to be discarded */
- int len; /* # of consecutive blocks of the discard */
+ block_t start_blkaddr; /* start blockaddr of current segment */
+ unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
};
+/* max discard pend list number */
+#define MAX_PLIST_NUM 512
+#define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
+ (MAX_PLIST_NUM - 1) : (blk_num - 1))
+
enum {
D_PREP,
D_SUBMIT,
D_DONE,
};
+struct discard_info {
+ block_t lstart; /* logical start address */
+ block_t len; /* length */
+ block_t start; /* actual start address in dev */
+};
+
struct discard_cmd {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ union {
+ struct {
+ block_t lstart; /* logical start address */
+ block_t len; /* length */
+ block_t start; /* actual start address in dev */
+ };
+ struct discard_info di; /* discard info */
+
+ };
struct list_head list; /* command list */
struct completion wait; /* compleation */
- block_t lstart; /* logical start address */
- block_t len; /* length */
- struct bio *bio; /* bio */
- int state; /* state */
+ struct block_device *bdev; /* bdev */
+ unsigned short ref; /* reference count */
+ unsigned char state; /* state */
+ int error; /* bio error */
};
struct discard_cmd_control {
struct task_struct *f2fs_issue_discard; /* discard thread */
- struct list_head discard_entry_list; /* 4KB discard entry list */
- int nr_discards; /* # of discards in the list */
- struct list_head discard_cmd_list; /* discard cmd list */
+ struct list_head entry_list; /* 4KB discard entry list */
+ struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
+ struct list_head wait_list; /* store on-flushing entries */
wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
struct mutex cmd_lock;
- int max_discards; /* max. discards to be issued */
- atomic_t submit_discard; /* # of issued discard */
+ unsigned int nr_discards; /* # of discards in the list */
+ unsigned int max_discards; /* max. discards to be issued */
+ unsigned int undiscard_blks; /* # of undiscard blocks */
+ atomic_t issued_discard; /* # of issued discard */
+ atomic_t issing_discard; /* # of issing discard */
+ atomic_t discard_cmd_cnt; /* # of cached cmd count */
+ struct rb_root root; /* root of discard rb-tree */
};
/* for the list of fsync inodes, used only during recovery */
@@ -222,13 +247,13 @@ struct fsync_inode_entry {
block_t last_dentry; /* block address locating the last dentry */
};
-#define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats))
-#define sits_in_cursum(jnl) (le16_to_cpu(jnl->n_sits))
+#define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
+#define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
-#define nat_in_journal(jnl, i) (jnl->nat_j.entries[i].ne)
-#define nid_in_journal(jnl, i) (jnl->nat_j.entries[i].nid)
-#define sit_in_journal(jnl, i) (jnl->sit_j.entries[i].se)
-#define segno_in_journal(jnl, i) (jnl->sit_j.entries[i].segno)
+#define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
+#define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
+#define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
+#define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
#define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
#define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
@@ -270,11 +295,14 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
-#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
+#define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
-#define F2FS_IOC_DEFRAGMENT _IO(F2FS_IOCTL_MAGIC, 8)
+#define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
+ struct f2fs_defragment)
#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
struct f2fs_move_range)
+#define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \
+ struct f2fs_flush_device)
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -311,6 +339,11 @@ struct f2fs_move_range {
u64 len; /* size to move */
};
+struct f2fs_flush_device {
+ u32 dev_num; /* device number to flush */
+ u32 segments; /* # of segments to flush */
+};
+
/*
* For INODE and NODE manager
*/
@@ -323,26 +356,24 @@ struct f2fs_dentry_ptr {
int max;
};
-static inline void make_dentry_ptr(struct inode *inode,
- struct f2fs_dentry_ptr *d, void *src, int type)
+static inline void make_dentry_ptr_block(struct inode *inode,
+ struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
{
d->inode = inode;
+ d->max = NR_DENTRY_IN_BLOCK;
+ d->bitmap = &t->dentry_bitmap;
+ d->dentry = t->dentry;
+ d->filename = t->filename;
+}
- if (type == 1) {
- struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src;
-
- d->max = NR_DENTRY_IN_BLOCK;
- d->bitmap = &t->dentry_bitmap;
- d->dentry = t->dentry;
- d->filename = t->filename;
- } else {
- struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src;
-
- d->max = NR_INLINE_DENTRY;
- d->bitmap = &t->dentry_bitmap;
- d->dentry = t->dentry;
- d->filename = t->filename;
- }
+static inline void make_dentry_ptr_inline(struct inode *inode,
+ struct f2fs_dentry_ptr *d, struct f2fs_inline_dentry *t)
+{
+ d->inode = inode;
+ d->max = NR_INLINE_DENTRY;
+ d->bitmap = &t->dentry_bitmap;
+ d->dentry = t->dentry;
+ d->filename = t->filename;
}
/*
@@ -374,16 +405,30 @@ enum {
/* number of extent info in extent cache we try to shrink */
#define EXTENT_CACHE_SHRINK_NUMBER 128
+struct rb_entry {
+ struct rb_node rb_node; /* rb node located in rb-tree */
+ unsigned int ofs; /* start offset of the entry */
+ unsigned int len; /* length of the entry */
+};
+
struct extent_info {
unsigned int fofs; /* start offset in a file */
- u32 blk; /* start block address of the extent */
unsigned int len; /* length of the extent */
+ u32 blk; /* start block address of the extent */
};
struct extent_node {
- struct rb_node rb_node; /* rb node located in rb-tree */
+ struct rb_node rb_node;
+ union {
+ struct {
+ unsigned int fofs;
+ unsigned int len;
+ u32 blk;
+ };
+ struct extent_info ei; /* extent info */
+
+ };
struct list_head list; /* node in global extent list of sbi */
- struct extent_info ei; /* extent info */
struct extent_tree *et; /* extent tree pointer */
};
@@ -500,6 +545,24 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
ei->len = len;
}
+static inline bool __is_discard_mergeable(struct discard_info *back,
+ struct discard_info *front)
+{
+ return back->lstart + back->len == front->lstart;
+}
+
+static inline bool __is_discard_back_mergeable(struct discard_info *cur,
+ struct discard_info *back)
+{
+ return __is_discard_mergeable(back, cur);
+}
+
+static inline bool __is_discard_front_mergeable(struct discard_info *cur,
+ struct discard_info *front)
+{
+ return __is_discard_mergeable(cur, front);
+}
+
static inline bool __is_extent_mergeable(struct extent_info *back,
struct extent_info *front)
{
@@ -562,7 +625,6 @@ struct f2fs_nm_info {
unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE];
unsigned char *nat_block_bitmap;
unsigned short *free_nid_count; /* free nid count of NAT block */
- spinlock_t free_nid_lock; /* protect updating of nid count */
/* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */
@@ -641,7 +703,8 @@ struct flush_cmd {
struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
- atomic_t submit_flush; /* # of issued flushes */
+ atomic_t issued_flush; /* # of issued flushes */
+ atomic_t issing_flush; /* # of issing flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
@@ -672,6 +735,7 @@ struct f2fs_sm_info {
unsigned int ipu_policy; /* in-place-update policy */
unsigned int min_ipu_util; /* in-place-update threshold */
unsigned int min_fsync_blocks; /* threshold for fsync */
+ unsigned int min_hot_blocks; /* threshold for hot block allocation */
/* for flush command control */
struct flush_cmd_control *fcc_info;
@@ -722,6 +786,7 @@ enum page_type {
META_FLUSH,
INMEM, /* the below types are used by tracepoints only. */
INMEM_DROP,
+ INMEM_INVALIDATE,
INMEM_REVOKE,
IPU,
OPU,
@@ -737,9 +802,10 @@ struct f2fs_io_info {
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
bool submitted; /* indicate IO submission */
+ bool need_lock; /* indicate we need to lock cp_rwsem */
};
-#define is_read_io(rw) (rw == READ)
+#define is_read_io(rw) ((rw) == READ)
struct f2fs_bio_info {
struct f2fs_sb_info *sbi; /* f2fs superblock */
struct bio *bio; /* bios to merge */
@@ -827,6 +893,7 @@ struct f2fs_sb_info {
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
+ struct rw_semaphore node_change; /* locking node change */
wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */
@@ -879,6 +946,9 @@ struct f2fs_sb_info {
/* # of allocated blocks */
struct percpu_counter alloc_valid_block_count;
+ /* writeback control */
+ atomic_t wb_sync_req; /* count # of WB_SYNC threads */
+
/* valid inode count */
struct percpu_counter total_valid_inode_count;
@@ -912,11 +982,12 @@ struct f2fs_sb_info {
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
atomic_t aw_cnt; /* # of atomic writes */
+ atomic_t vw_cnt; /* # of volatile writes */
atomic_t max_aw_cnt; /* max # of atomic writes */
+ atomic_t max_vw_cnt; /* max # of volatile writes */
int bg_gc; /* background gc calls */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
- unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
/* For sysfs suppport */
@@ -971,8 +1042,8 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
* and the return value is in kbytes. s is of struct f2fs_sb_info.
*/
#define BD_PART_WRITTEN(s) \
-(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) - \
- s->sectors_written_start) >> 1)
+(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[1]) - \
+ (s)->sectors_written_start) >> 1)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
{
@@ -1193,7 +1264,7 @@ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
{
bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
- return (cpc) ? (cpc->reason == CP_UMOUNT) && set : set;
+ return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
}
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
@@ -1229,7 +1300,7 @@ static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
static inline bool __remain_node_summaries(int reason)
{
- return (reason == CP_UMOUNT || reason == CP_FASTBOOT);
+ return (reason & (CP_UMOUNT | CP_FASTBOOT));
}
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
@@ -1707,6 +1778,7 @@ enum {
FI_DO_DEFRAG, /* indicate defragment is running */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
+ FI_HOT_DATA, /* indicate file is hot */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -1869,12 +1941,6 @@ static inline int f2fs_has_inline_data(struct inode *inode)
return is_inode_flag_set(inode, FI_INLINE_DATA);
}
-static inline void f2fs_clear_inline_inode(struct inode *inode)
-{
- clear_inode_flag(inode, FI_INLINE_DATA);
- clear_inode_flag(inode, FI_DATA_EXIST);
-}
-
static inline int f2fs_exist_data(struct inode *inode)
{
return is_inode_flag_set(inode, FI_DATA_EXIST);
@@ -2005,36 +2071,10 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
return kmalloc(size, flags);
}
-static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
-{
- void *ret;
-
- ret = kmalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags, PAGE_KERNEL);
- return ret;
-}
-
-static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
-{
- void *ret;
-
- ret = kzalloc(size, flags | __GFP_NOWARN);
- if (!ret)
- ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
- return ret;
-}
-
#define get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
-/* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, inode) \
- ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) : \
- (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) / \
- ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
-
/*
* file.c
*/
@@ -2096,8 +2136,6 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
struct page **page);
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
struct page *page, struct inode *inode);
-int update_dent_inode(struct inode *inode, struct inode *to,
- const struct qstr *name);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
const struct qstr *name, f2fs_hash_t name_hash,
unsigned int bit_pos);
@@ -2133,7 +2171,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+ struct fscrypt_name *fname);
/*
* node.c
@@ -2184,6 +2223,7 @@ void destroy_node_manager_caches(void);
*/
void register_inmem_page(struct inode *inode, struct page *page);
void drop_inmem_pages(struct inode *inode);
+void drop_inmem_page(struct inode *inode, struct page *page);
int commit_inmem_pages(struct inode *inode);
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi);
@@ -2193,7 +2233,7 @@ void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
-void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr);
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void release_discard_addrs(struct f2fs_sb_info *sbi);
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
@@ -2205,7 +2245,7 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page);
void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
-void rewrite_data_page(struct f2fs_io_info *fio);
+int rewrite_data_page(struct f2fs_io_info *fio);
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
bool recover_curseg, bool recover_newaddr);
@@ -2310,7 +2350,8 @@ int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
int start_gc_thread(struct f2fs_sb_info *sbi);
void stop_gc_thread(struct f2fs_sb_info *sbi);
block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background);
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
+ unsigned int segno);
void build_gc_manager(struct f2fs_sb_info *sbi);
/*
@@ -2334,11 +2375,15 @@ struct f2fs_stat_info {
int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
int inmem_pages;
unsigned int ndirty_dirs, ndirty_files, ndirty_all;
- int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
+ int nats, dirty_nats, sits, dirty_sits;
+ int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, nr_wb_cp_data, nr_wb_data, nr_flush, nr_discard;
+ int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_flushing, nr_flushed, nr_discarding, nr_discarded;
+ int nr_discard_cmd;
+ unsigned int undiscard_blks;
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
- int aw_cnt, max_aw_cnt;
+ int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
@@ -2421,11 +2466,22 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
} while (0)
+#define stat_inc_volatile_write(inode) \
+ (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
+#define stat_dec_volatile_write(inode) \
+ (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
+#define stat_update_max_volatile_write(inode) \
+ do { \
+ int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
+ int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
+ if (cur > max) \
+ atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
+ } while (0)
#define stat_inc_seg_count(sbi, type, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
- (si)->tot_segs++; \
- if (type == SUM_TYPE_DATA) { \
+ si->tot_segs++; \
+ if ((type) == SUM_TYPE_DATA) { \
si->data_segs++; \
si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
} else { \
@@ -2435,14 +2491,14 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
} while (0)
#define stat_inc_tot_blk_count(si, blks) \
- (si->tot_blks += (blks))
+ ((si)->tot_blks += (blks))
#define stat_inc_data_blk_count(sbi, blks, gc_type) \
do { \
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->data_blks += (blks); \
- si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0; \
+ si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
} while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type) \
@@ -2450,7 +2506,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->node_blks += (blks); \
- si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0; \
+ si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
} while (0)
int f2fs_build_stats(struct f2fs_sb_info *sbi);
@@ -2458,32 +2514,35 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
int __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
-#define stat_inc_cp_count(si)
-#define stat_inc_bg_cp_count(si)
-#define stat_inc_call_count(si)
-#define stat_inc_bggc_count(si)
-#define stat_inc_dirty_inode(sbi, type)
-#define stat_dec_dirty_inode(sbi, type)
-#define stat_inc_total_hit(sb)
-#define stat_inc_rbtree_node_hit(sb)
-#define stat_inc_largest_node_hit(sbi)
-#define stat_inc_cached_node_hit(sbi)
-#define stat_inc_inline_xattr(inode)
-#define stat_dec_inline_xattr(inode)
-#define stat_inc_inline_inode(inode)
-#define stat_dec_inline_inode(inode)
-#define stat_inc_inline_dir(inode)
-#define stat_dec_inline_dir(inode)
-#define stat_inc_atomic_write(inode)
-#define stat_dec_atomic_write(inode)
-#define stat_update_max_atomic_write(inode)
-#define stat_inc_seg_type(sbi, curseg)
-#define stat_inc_block_count(sbi, curseg)
-#define stat_inc_inplace_blocks(sbi)
-#define stat_inc_seg_count(sbi, type, gc_type)
-#define stat_inc_tot_blk_count(si, blks)
-#define stat_inc_data_blk_count(sbi, blks, gc_type)
-#define stat_inc_node_blk_count(sbi, blks, gc_type)
+#define stat_inc_cp_count(si) do { } while (0)
+#define stat_inc_bg_cp_count(si) do { } while (0)
+#define stat_inc_call_count(si) do { } while (0)
+#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_inc_dirty_inode(sbi, type) do { } while (0)
+#define stat_dec_dirty_inode(sbi, type) do { } while (0)
+#define stat_inc_total_hit(sb) do { } while (0)
+#define stat_inc_rbtree_node_hit(sb) do { } while (0)
+#define stat_inc_largest_node_hit(sbi) do { } while (0)
+#define stat_inc_cached_node_hit(sbi) do { } while (0)
+#define stat_inc_inline_xattr(inode) do { } while (0)
+#define stat_dec_inline_xattr(inode) do { } while (0)
+#define stat_inc_inline_inode(inode) do { } while (0)
+#define stat_dec_inline_inode(inode) do { } while (0)
+#define stat_inc_inline_dir(inode) do { } while (0)
+#define stat_dec_inline_dir(inode) do { } while (0)
+#define stat_inc_atomic_write(inode) do { } while (0)
+#define stat_dec_atomic_write(inode) do { } while (0)
+#define stat_update_max_atomic_write(inode) do { } while (0)
+#define stat_inc_volatile_write(inode) do { } while (0)
+#define stat_dec_volatile_write(inode) do { } while (0)
+#define stat_update_max_volatile_write(inode) do { } while (0)
+#define stat_inc_seg_type(sbi, curseg) do { } while (0)
+#define stat_inc_block_count(sbi, curseg) do { } while (0)
+#define stat_inc_inplace_blocks(sbi) do { } while (0)
+#define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_tot_blk_count(si, blks) do { } while (0)
+#define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
+#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
@@ -2509,7 +2568,7 @@ extern struct kmem_cache *inode_entry_slab;
bool f2fs_may_inline_data(struct inode *inode);
bool f2fs_may_inline_dentry(struct inode *inode);
void read_inline_data(struct page *page, struct page *ipage);
-bool truncate_inline_inode(struct page *ipage, u64 from);
+void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from);
int f2fs_read_inline_data(struct inode *inode, struct page *page);
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
int f2fs_convert_inline_inode(struct inode *inode);
@@ -2544,6 +2603,18 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
/*
* extent_cache.c
*/
+struct rb_entry *__lookup_rb_tree(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs);
+struct rb_node **__lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ struct rb_root *root, struct rb_node **parent,
+ unsigned int ofs);
+struct rb_entry *__lookup_rb_tree_ret(struct rb_root *root,
+ struct rb_entry *cached_re, unsigned int ofs,
+ struct rb_entry **prev_entry, struct rb_entry **next_entry,
+ struct rb_node ***insert_p, struct rb_node **insert_parent,
+ bool force);
+bool __check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ struct rb_root *root);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
void f2fs_drop_extent_tree(struct inode *inode);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5f7317875a67..61af721329fa 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -116,11 +116,6 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
if (!dentry)
return 0;
- if (update_dent_inode(inode, inode, &dentry->d_name)) {
- dput(dentry);
- return 0;
- }
-
*pino = parent_ino(dentry);
dput(dentry);
return 1;
@@ -528,7 +523,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
- return 0;
+ return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
truncate_out:
f2fs_wait_on_page_writeback(page, DATA, true);
zero_user(page, offset, PAGE_SIZE - offset);
@@ -566,9 +561,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
}
if (f2fs_has_inline_data(inode)) {
- truncate_inline_inode(ipage, from);
- if (from == 0)
- clear_inode_flag(inode, FI_DATA_EXIST);
+ truncate_inline_inode(inode, ipage, from);
f2fs_put_page(ipage, 1);
truncate_page = true;
goto out;
@@ -617,6 +610,12 @@ int f2fs_truncate(struct inode *inode)
trace_f2fs_truncate(inode);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
+ f2fs_show_injection_info(FAULT_TRUNCATE);
+ return -EIO;
+ }
+#endif
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -1012,11 +1011,11 @@ static int __exchange_data_block(struct inode *src_inode,
while (len) {
olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
- src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
if (!src_blkaddr)
return -ENOMEM;
- do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
if (!do_replace) {
kvfree(src_blkaddr);
return -ENOMEM;
@@ -1188,8 +1187,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
- if (offset + len > new_size)
- new_size = offset + len;
new_size = max_t(loff_t, new_size, offset + len);
} else {
if (off_start) {
@@ -1257,8 +1254,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
int ret = 0;
new_size = i_size_read(inode) + len;
- if (new_size > inode->i_sb->s_maxbytes)
- return -EFBIG;
+ ret = inode_newsize_ok(inode, new_size);
+ if (ret)
+ return ret;
if (offset >= i_size_read(inode))
return -EINVAL;
@@ -1428,6 +1426,7 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
clear_inode_flag(inode, FI_VOLATILE_FILE);
+ stat_dec_volatile_write(inode);
set_inode_flag(inode, FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
clear_inode_flag(inode, FI_DROP_CACHE);
@@ -1474,10 +1473,10 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
if (ret)
return ret;
- flags = f2fs_mask_flags(inode->i_mode, flags);
-
inode_lock(inode);
+ flags = f2fs_mask_flags(inode->i_mode, flags);
+
oldflags = fi->i_flags;
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
@@ -1491,10 +1490,11 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
flags = flags & FS_FL_USER_MODIFIABLE;
flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
fi->i_flags = flags;
- inode_unlock(inode);
inode->i_ctime = current_time(inode);
f2fs_set_inode_flags(inode);
+
+ inode_unlock(inode);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1515,6 +1515,9 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -1529,20 +1532,25 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
goto out;
set_inode_flag(inode, FI_ATOMIC_FILE);
+ set_inode_flag(inode, FI_HOT_DATA);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
if (!get_dirty_pages(inode))
- goto out;
+ goto inc_stat;
f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret)
+ if (ret) {
clear_inode_flag(inode, FI_ATOMIC_FILE);
-out:
+ goto out;
+ }
+
+inc_stat:
stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
+out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1592,6 +1600,9 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
@@ -1605,6 +1616,9 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (ret)
goto out;
+ stat_inc_volatile_write(inode);
+ stat_update_max_volatile_write(inode);
+
set_inode_flag(inode, FI_VOLATILE_FILE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
out:
@@ -1660,6 +1674,7 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
clear_inode_flag(inode, FI_VOLATILE_FILE);
+ stat_dec_volatile_write(inode);
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
}
@@ -1841,7 +1856,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
mutex_lock(&sbi->gc_mutex);
}
- ret = f2fs_gc(sbi, sync, true);
+ ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1879,13 +1894,12 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
pgoff_t pg_start, pg_end;
unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num;
- unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
block_t blk_end = 0;
bool fragmented = false;
int err;
/* if in-place-update policy is enabled, don't waste time here */
- if (need_inplace_update(inode))
+ if (need_inplace_update_policy(inode, NULL))
return -EINVAL;
pg_start = range->start >> PAGE_SHIFT;
@@ -1943,7 +1957,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
map.m_lblk = pg_start;
map.m_len = pg_end - pg_start;
- sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
+ sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
/*
* make sure there are enough free section for LFS allocation, this can
@@ -2020,42 +2034,40 @@ static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
return -EINVAL;
- err = mnt_want_write_file(filp);
- if (err)
- return err;
-
- if (f2fs_readonly(sbi->sb)) {
- err = -EROFS;
- goto out;
- }
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
- sizeof(range))) {
- err = -EFAULT;
- goto out;
- }
+ sizeof(range)))
+ return -EFAULT;
/* verify alignment of offset & size */
- if (range.start & (F2FS_BLKSIZE - 1) ||
- range.len & (F2FS_BLKSIZE - 1)) {
- err = -EINVAL;
- goto out;
- }
+ if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
+ return -EINVAL;
+
+ if (unlikely((range.start + range.len) >> PAGE_SHIFT >
+ sbi->max_file_blocks))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
err = f2fs_defragment_range(sbi, filp, &range);
+ mnt_drop_write_file(filp);
+
f2fs_update_time(sbi, REQ_TIME);
if (err < 0)
- goto out;
+ return err;
if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
sizeof(range)))
- err = -EFAULT;
-out:
- mnt_drop_write_file(filp);
- return err;
+ return -EFAULT;
+
+ return 0;
}
static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
@@ -2189,6 +2201,8 @@ static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
range.pos_out, range.len);
mnt_drop_write_file(filp);
+ if (err)
+ goto err_out;
if (copy_to_user((struct f2fs_move_range __user *)arg,
&range, sizeof(range)))
@@ -2198,6 +2212,69 @@ err_out:
return err;
}
+static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct sit_info *sm = SIT_I(sbi);
+ unsigned int start_segno = 0, end_segno = 0;
+ unsigned int dev_start_segno = 0, dev_end_segno = 0;
+ struct f2fs_flush_device range;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
+ sbi->segs_per_sec != 1) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Can't flush %u in %d for segs_per_sec %u != 1\n",
+ range.dev_num, sbi->s_ndevs,
+ sbi->segs_per_sec);
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (range.dev_num != 0)
+ dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
+ dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
+
+ start_segno = sm->last_victim[FLUSH_DEVICE];
+ if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
+ start_segno = dev_start_segno;
+ end_segno = min(start_segno + range.segments, dev_end_segno);
+
+ while (start_segno < end_segno) {
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ sm->last_victim[GC_CB] = end_segno + 1;
+ sm->last_victim[GC_GREEDY] = end_segno + 1;
+ sm->last_victim[ALLOC_NEXT] = end_segno + 1;
+ ret = f2fs_gc(sbi, true, true, start_segno);
+ if (ret == -EAGAIN)
+ ret = 0;
+ else if (ret < 0)
+ break;
+ start_segno++;
+ }
+out:
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -2235,6 +2312,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_defragment(filp, arg);
case F2FS_IOC_MOVE_RANGE:
return f2fs_ioc_move_range(filp, arg);
+ case F2FS_IOC_FLUSH_DEVICE:
+ return f2fs_ioc_flush_device(filp, arg);
default:
return -ENOTTY;
}
@@ -2302,8 +2381,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GARBAGE_COLLECT:
case F2FS_IOC_WRITE_CHECKPOINT:
case F2FS_IOC_DEFRAGMENT:
- break;
case F2FS_IOC_MOVE_RANGE:
+ case F2FS_IOC_FLUSH_DEVICE:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 418fd9881646..026522107ca3 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -84,7 +84,7 @@ static int gc_thread_func(void *data)
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
@@ -172,7 +172,11 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
p->max_search = sbi->max_victim_search;
- p->offset = sbi->last_victim[p->gc_mode];
+ /* let's select beginning hot/small space first */
+ if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ p->offset = 0;
+ else
+ p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
}
static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
@@ -182,7 +186,7 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
if (p->alloc_mode == SSR)
return sbi->blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
- return sbi->blocks_per_seg * p->ofs_unit;
+ return 2 * sbi->blocks_per_seg * p->ofs_unit;
else if (p->gc_mode == GC_CB)
return UINT_MAX;
else /* No other gc_mode */
@@ -207,7 +211,7 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
continue;
clear_bit(secno, dirty_i->victim_secmap);
- return secno * sbi->segs_per_sec;
+ return GET_SEG_FROM_SEC(sbi, secno);
}
return NULL_SEGNO;
}
@@ -215,8 +219,8 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- unsigned int secno = GET_SECNO(sbi, segno);
- unsigned int start = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
unsigned long long mtime = 0;
unsigned int vblocks;
unsigned char age = 0;
@@ -225,7 +229,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
for (i = 0; i < sbi->segs_per_sec; i++)
mtime += get_seg_entry(sbi, start + i)->mtime;
- vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ vblocks = get_valid_blocks(sbi, segno, true);
mtime = div_u64(mtime, sbi->segs_per_sec);
vblocks = div_u64(vblocks, sbi->segs_per_sec);
@@ -248,7 +252,7 @@ static unsigned int get_greedy_cost(struct f2fs_sb_info *sbi,
unsigned int segno)
{
unsigned int valid_blocks =
- get_valid_blocks(sbi, segno, sbi->segs_per_sec);
+ get_valid_blocks(sbi, segno, true);
return IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
valid_blocks * 2 : valid_blocks;
@@ -291,6 +295,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
unsigned int *result, int gc_type, int type, char alloc_mode)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ struct sit_info *sm = SIT_I(sbi);
struct victim_sel_policy p;
unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi);
@@ -304,10 +309,18 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
p.min_segno = NULL_SEGNO;
p.min_cost = get_max_cost(sbi, &p);
+ if (*result != NULL_SEGNO) {
+ if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
+ get_valid_blocks(sbi, *result, false) &&
+ !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
+ p.min_segno = *result;
+ goto out;
+ }
+
if (p.max_search == 0)
goto out;
- last_victim = sbi->last_victim[p.gc_mode];
+ last_victim = sm->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
if (p.min_segno != NULL_SEGNO)
@@ -320,9 +333,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
if (segno >= last_segment) {
- if (sbi->last_victim[p.gc_mode]) {
- last_segment = sbi->last_victim[p.gc_mode];
- sbi->last_victim[p.gc_mode] = 0;
+ if (sm->last_victim[p.gc_mode]) {
+ last_segment =
+ sm->last_victim[p.gc_mode];
+ sm->last_victim[p.gc_mode] = 0;
p.offset = 0;
continue;
}
@@ -339,7 +353,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
nsearched++;
}
- secno = GET_SECNO(sbi, segno);
+ secno = GET_SEC_FROM_SEG(sbi, segno);
if (sec_usage_check(sbi, secno))
goto next;
@@ -357,17 +371,18 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
}
next:
if (nsearched >= p.max_search) {
- if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
- sbi->last_victim[p.gc_mode] = last_victim + 1;
+ if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
+ sm->last_victim[p.gc_mode] = last_victim + 1;
else
- sbi->last_victim[p.gc_mode] = segno + 1;
+ sm->last_victim[p.gc_mode] = segno + 1;
+ sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
break;
}
}
if (p.min_segno != NULL_SEGNO) {
got_it:
if (p.alloc_mode == LFS) {
- secno = GET_SECNO(sbi, p.min_segno);
+ secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
if (gc_type == FG_GC)
sbi->cur_victim_sec = secno;
else
@@ -550,8 +565,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
get_node_info(sbi, nid, dni);
if (sum->version != dni->version) {
- f2fs_put_page(node_page, 1);
- return false;
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: valid data with mismatched node version.",
+ __func__);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
*nofs = ofs_of_node(node_page);
@@ -697,8 +714,10 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC,
+ .old_blkaddr = NULL_ADDR,
.page = page,
.encrypted_page = NULL,
+ .need_lock = true,
};
bool is_dirty = PageDirty(page);
int err;
@@ -890,7 +909,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
GET_SUM_BLOCK(sbi, segno));
f2fs_put_page(sum_page, 0);
- if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ if (get_valid_blocks(sbi, segno, false) == 0 ||
!PageUptodate(sum_page) ||
unlikely(f2fs_cp_error(sbi)))
goto next;
@@ -905,7 +924,6 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
* - mutex_lock(sentry_lock) - change_curseg()
* - lock_page(sum_page)
*/
-
if (type == SUM_TYPE_NODE)
gc_node_segment(sbi, sum->entries, segno, gc_type);
else
@@ -924,7 +942,7 @@ next:
blk_finish_plug(&plug);
if (gc_type == FG_GC &&
- get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
+ get_valid_blocks(sbi, start_segno, true) == 0)
sec_freed = 1;
stat_inc_call_count(sbi->stat_info);
@@ -932,13 +950,14 @@ next:
return sec_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
+ bool background, unsigned int segno)
{
- unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC;
int sec_freed = 0;
int ret = -EINVAL;
struct cp_control cpc;
+ unsigned int init_segno = segno;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
.iroot = RADIX_TREE_INIT(GFP_NOFS),
@@ -959,9 +978,11 @@ gc_more:
* threshold, we can make them free by checkpoint. Then, we
* secure free segments which doesn't need fggc any more.
*/
- ret = write_checkpoint(sbi, &cpc);
- if (ret)
- goto stop;
+ if (prefree_segments(sbi)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ }
if (has_not_enough_free_secs(sbi, 0, 0))
gc_type = FG_GC;
}
@@ -981,13 +1002,17 @@ gc_more:
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed, 0))
+ if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ segno = NULL_SEGNO;
goto gc_more;
+ }
if (gc_type == FG_GC)
ret = write_checkpoint(sbi, &cpc);
}
stop:
+ SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
+ SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
mutex_unlock(&sbi->gc_mutex);
put_gc_inode(&gc_list);
@@ -999,7 +1024,7 @@ stop:
void build_gc_manager(struct f2fs_sb_info *sbi)
{
- u64 main_count, resv_count, ovp_count, blocks_per_sec;
+ u64 main_count, resv_count, ovp_count;
DIRTY_I(sbi)->v_ops = &default_v_ops;
@@ -1007,8 +1032,12 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
- blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
- sbi->fggc_threshold = div64_u64((main_count - ovp_count) * blocks_per_sec,
- (main_count - resv_count));
+ sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
+ BLKS_PER_SEC(sbi), (main_count - resv_count));
+
+ /* give warm/cold data area from slower device */
+ if (sbi->s_ndevs && sbi->segs_per_sec == 1)
+ SIT_I(sbi)->last_victim[ALLOC_NEXT] =
+ GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 71b7206c431e..eb2e031ea887 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,7 +70,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+ struct fscrypt_name *fname)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
@@ -79,6 +80,10 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
const unsigned char *name = name_info->name;
size_t len = name_info->len;
+ /* encrypted bigname case */
+ if (fname && !fname->disk_name.name)
+ return cpu_to_le32(fname->hash);
+
if (is_dot_dotdot(name_info))
return 0;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index e32a9e527968..e4c527c4e7d0 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -63,19 +63,21 @@ void read_inline_data(struct page *page, struct page *ipage)
SetPageUptodate(page);
}
-bool truncate_inline_inode(struct page *ipage, u64 from)
+void truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from)
{
void *addr;
if (from >= MAX_INLINE_DATA)
- return false;
+ return;
addr = inline_data_addr(ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true);
memset(addr + from, 0, MAX_INLINE_DATA - from);
set_page_dirty(ipage);
- return true;
+
+ if (from == 0)
+ clear_inode_flag(inode, FI_DATA_EXIST);
}
int f2fs_read_inline_data(struct inode *inode, struct page *page)
@@ -135,6 +137,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
/* write data page to try to make data consistent */
set_page_writeback(page);
fio.old_blkaddr = dn->data_blkaddr;
+ set_inode_flag(dn->inode, FI_HOT_DATA);
write_data_page(dn, &fio);
f2fs_wait_on_page_writeback(page, DATA, true);
if (dirty) {
@@ -146,11 +149,11 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
- truncate_inline_inode(dn->inode_page, 0);
+ truncate_inline_inode(dn->inode, dn->inode_page, 0);
clear_inline_node(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
- f2fs_clear_inline_inode(dn->inode);
+ clear_inode_flag(dn->inode, FI_INLINE_DATA);
f2fs_put_dnode(dn);
return 0;
}
@@ -267,9 +270,8 @@ process_inline:
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- if (!truncate_inline_inode(ipage, 0))
- return false;
- f2fs_clear_inline_inode(inode);
+ truncate_inline_inode(inode, ipage, 0);
+ clear_inode_flag(inode, FI_INLINE_DATA);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false))
@@ -296,11 +298,11 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
return NULL;
}
- namehash = f2fs_dentry_hash(&name);
+ namehash = f2fs_dentry_hash(&name, fname);
inline_dentry = inline_data_addr(ipage);
- make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+ make_dentry_ptr_inline(NULL, &d, inline_dentry);
de = find_target_dentry(fname, namehash, NULL, &d);
unlock_page(ipage);
if (de)
@@ -319,7 +321,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
dentry_blk = inline_data_addr(ipage);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
+ make_dentry_ptr_inline(NULL, &d, dentry_blk);
do_make_empty_dir(inode, parent, &d);
set_page_dirty(ipage);
@@ -380,7 +382,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
- truncate_inline_inode(ipage, 0);
+ truncate_inline_inode(dir, ipage, 0);
stat_dec_inline_dir(dir);
clear_inode_flag(dir, FI_INLINE_DENTRY);
@@ -400,7 +402,7 @@ static int f2fs_add_inline_entries(struct inode *dir,
unsigned long bit_pos = 0;
int err = 0;
- make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+ make_dentry_ptr_inline(NULL, &d, inline_dentry);
while (bit_pos < d.max) {
struct f2fs_dir_entry *de;
@@ -455,7 +457,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
}
memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
- truncate_inline_inode(ipage, 0);
+ truncate_inline_inode(dir, ipage, 0);
unlock_page(ipage);
@@ -527,14 +529,12 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
err = PTR_ERR(page);
goto fail;
}
- if (f2fs_encrypted_inode(dir))
- file_set_enc_name(inode);
}
f2fs_wait_on_page_writeback(ipage, NODE, true);
- name_hash = f2fs_dentry_hash(new_name);
- make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
+ name_hash = f2fs_dentry_hash(new_name, NULL);
+ make_dentry_ptr_inline(NULL, &d, dentry_blk);
f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
@@ -623,7 +623,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
inline_dentry = inline_data_addr(ipage);
- make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
+ make_dentry_ptr_inline(inode, &d, inline_dentry);
err = f2fs_fill_dentries(ctx, &d, 0, fstr);
if (!err)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 24bb8213d974..518f49643092 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -316,7 +316,6 @@ retry:
} else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi, false);
}
- f2fs_inode_synced(inode);
return 0;
}
ret = update_inode(inode, node_page);
@@ -339,7 +338,8 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- if (update_inode_page(inode) && wbc && wbc->nr_to_write)
+ update_inode_page(inode);
+ if (wbc && wbc->nr_to_write)
f2fs_balance_fs(sbi, true);
return 0;
}
@@ -372,13 +372,6 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
- f2fs_show_injection_info(FAULT_EVICT_INODE);
- goto no_delete;
- }
-#endif
-
remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
@@ -389,6 +382,12 @@ retry:
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
+ f2fs_show_injection_info(FAULT_EVICT_INODE);
+ err = -EIO;
+ }
+#endif
if (!err) {
f2fs_lock_op(sbi);
err = remove_inode_page(inode);
@@ -411,7 +410,10 @@ no_delete:
stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode);
- invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
+ /* ino == 0, if f2fs_new_inode() was failed t*/
+ if (inode->i_ino)
+ invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
+ inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
if (inode->i_nlink) {
@@ -448,6 +450,7 @@ void handle_failed_inode(struct inode *inode)
* in a panic when flushing dirty inodes in gdirty_list.
*/
update_inode_page(inode);
+ f2fs_inode_synced(inode);
/* don't make bad inode, since it becomes a regular file. */
unlock_new_inode(inode);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 98f00a3a7f50..c31b40e5f9cf 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -148,8 +148,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
- f2fs_balance_fs(sbi, true);
-
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -163,6 +161,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out:
handle_failed_inode(inode);
@@ -324,9 +324,10 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (f2fs_encrypted_inode(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!fscrypt_has_permitted_context(dir, inode)) {
- bool nokey = f2fs_encrypted_inode(inode) &&
- !fscrypt_has_encryption_key(inode);
- err = nokey ? -ENOKEY : -EPERM;
+ f2fs_msg(inode->i_sb, KERN_WARNING,
+ "Inconsistent encryption contexts: %lu/%lu",
+ dir->i_ino, inode->i_ino);
+ err = -EPERM;
goto err_out;
}
return d_splice_alias(inode, dentry);
@@ -423,8 +424,6 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- f2fs_balance_fs(sbi, true);
-
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -487,6 +486,8 @@ err_out:
}
kfree(sd);
+
+ f2fs_balance_fs(sbi, true);
return err;
out:
handle_failed_inode(inode);
@@ -508,8 +509,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
- f2fs_balance_fs(sbi, true);
-
set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
@@ -524,6 +523,8 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out_fail:
@@ -554,8 +555,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
- f2fs_balance_fs(sbi, true);
-
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -569,6 +568,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
+
+ f2fs_balance_fs(sbi, true);
return 0;
out:
handle_failed_inode(inode);
@@ -595,8 +596,6 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
}
- f2fs_balance_fs(sbi, true);
-
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
if (err)
@@ -622,6 +621,8 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
/* link_count was changed by d_tmpfile as well. */
f2fs_unlock_op(sbi);
unlock_new_inode(inode);
+
+ f2fs_balance_fs(sbi, true);
return 0;
release_out:
@@ -720,13 +721,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto put_out_dir;
- err = update_dent_inode(old_inode, new_inode,
- &new_dentry->d_name);
- if (err) {
- release_orphan_inode(sbi);
- goto put_out_dir;
- }
-
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
new_inode->i_ctime = current_time(new_inode);
@@ -779,8 +773,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
down_write(&F2FS_I(old_inode)->i_sem);
file_lost_pino(old_inode);
- if (new_inode && file_enc_name(new_inode))
- file_set_enc_name(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = current_time(old_inode);
@@ -908,8 +900,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
old_nlink = old_dir_entry ? -1 : 1;
new_nlink = -old_nlink;
err = -EMLINK;
- if ((old_nlink > 0 && old_inode->i_nlink >= F2FS_LINK_MAX) ||
- (new_nlink > 0 && new_inode->i_nlink >= F2FS_LINK_MAX))
+ if ((old_nlink > 0 && old_dir->i_nlink >= F2FS_LINK_MAX) ||
+ (new_nlink > 0 && new_dir->i_nlink >= F2FS_LINK_MAX))
goto out_new_dir;
}
@@ -917,18 +909,6 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_lock_op(sbi);
- err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name);
- if (err)
- goto out_unlock;
- if (file_enc_name(new_inode))
- file_set_enc_name(old_inode);
-
- err = update_dent_inode(new_inode, old_inode, &old_dentry->d_name);
- if (err)
- goto out_undo;
- if (file_enc_name(old_inode))
- file_set_enc_name(new_inode);
-
/* update ".." directory entry info of old dentry */
if (old_dir_entry)
f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir);
@@ -972,14 +952,6 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
f2fs_sync_fs(sbi->sb, 1);
return 0;
-out_undo:
- /*
- * Still we may fail to recover name info of f2fs_inode here
- * Drop it, once its name is set as encrypted
- */
- update_dent_inode(old_inode, old_inode, &old_dentry->d_name);
-out_unlock:
- f2fs_unlock_op(sbi);
out_new_dir:
if (new_dir_entry) {
f2fs_dentry_kunmap(new_inode, new_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 481aa8dc79f4..4547c5c5cd98 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -22,7 +22,7 @@
#include "trace.h"
#include <trace/events/f2fs.h>
-#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
+#define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
@@ -63,8 +63,9 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
int i;
for (i = 0; i <= UPDATE_INO; i++)
- mem_size += (sbi->im[i].ino_num *
- sizeof(struct ino_entry)) >> PAGE_SHIFT;
+ mem_size += sbi->im[i].ino_num *
+ sizeof(struct ino_entry);
+ mem_size >>= PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == EXTENT_CACHE) {
mem_size = (atomic_read(&sbi->total_ext_tree) *
@@ -177,18 +178,12 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
}
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
- struct nat_entry *ne)
+ struct nat_entry_set *set, struct nat_entry *ne)
{
- nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
- struct nat_entry_set *head;
-
- head = radix_tree_lookup(&nm_i->nat_set_root, set);
- if (head) {
- list_move_tail(&ne->list, &nm_i->nat_entries);
- set_nat_flag(ne, IS_DIRTY, false);
- head->entry_cnt--;
- nm_i->dirty_nat_cnt--;
- }
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ set_nat_flag(ne, IS_DIRTY, false);
+ set->entry_cnt--;
+ nm_i->dirty_nat_cnt--;
}
static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
@@ -381,6 +376,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
struct page *page = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
+ pgoff_t index;
int i;
ni->nid = nid;
@@ -406,17 +402,21 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
node_info_from_raw_nat(ni, &ne);
}
up_read(&curseg->journal_rwsem);
- if (i >= 0)
+ if (i >= 0) {
+ up_read(&nm_i->nat_tree_lock);
goto cache;
+ }
/* Fill node_info from nat page */
- page = get_current_nat_page(sbi, start_nid);
+ index = current_nat_addr(sbi, nid);
+ up_read(&nm_i->nat_tree_lock);
+
+ page = get_meta_page(sbi, index);
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
- up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
down_write(&nm_i->nat_tree_lock);
cache_nat_entry(sbi, nid, &ne);
@@ -1463,6 +1463,9 @@ continue_unlock:
f2fs_wait_on_page_writeback(page, NODE, true);
BUG_ON(PageWriteback(page));
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
+
if (!atomic || page == last_page) {
set_fsync_mark(page, 1);
if (IS_INODE(page)) {
@@ -1766,40 +1769,67 @@ static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i;
+ struct free_nid *i, *e;
struct nat_entry *ne;
- int err;
+ int err = -EINVAL;
+ bool ret = false;
/* 0 nid should not be used */
if (unlikely(nid == 0))
return false;
- if (build) {
- /* do not add allocated nids */
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
- nat_get_blkaddr(ne) != NULL_ADDR))
- return false;
- }
-
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
- if (radix_tree_preload(GFP_NOFS)) {
- kmem_cache_free(free_nid_slab, i);
- return true;
- }
+ if (radix_tree_preload(GFP_NOFS))
+ goto err;
spin_lock(&nm_i->nid_list_lock);
+
+ if (build) {
+ /*
+ * Thread A Thread B
+ * - f2fs_create
+ * - f2fs_new_inode
+ * - alloc_nid
+ * - __insert_nid_to_list(ALLOC_NID_LIST)
+ * - f2fs_balance_fs_bg
+ * - build_free_nids
+ * - __build_free_nids
+ * - scan_nat_page
+ * - add_free_nid
+ * - __lookup_nat_cache
+ * - f2fs_add_link
+ * - init_inode_metadata
+ * - new_inode_page
+ * - new_node_page
+ * - set_node_addr
+ * - alloc_nid_done
+ * - __remove_nid_from_list(ALLOC_NID_LIST)
+ * - __insert_nid_to_list(FREE_NID_LIST)
+ */
+ ne = __lookup_nat_cache(nm_i, nid);
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ nat_get_blkaddr(ne) != NULL_ADDR))
+ goto err_out;
+
+ e = __lookup_free_nid_list(nm_i, nid);
+ if (e) {
+ if (e->state == NID_NEW)
+ ret = true;
+ goto err_out;
+ }
+ }
+ ret = true;
err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+err_out:
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
- if (err) {
+err:
+ if (err)
kmem_cache_free(free_nid_slab, i);
- return true;
- }
- return true;
+ return ret;
}
static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
@@ -1821,7 +1851,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
}
static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
- bool set, bool build, bool locked)
+ bool set, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
@@ -1835,14 +1865,10 @@ static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
else
__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- if (!locked)
- spin_lock(&nm_i->free_nid_lock);
if (set)
nm_i->free_nid_count[nat_ofs]++;
else if (!build)
nm_i->free_nid_count[nat_ofs]--;
- if (!locked)
- spin_unlock(&nm_i->free_nid_lock);
}
static void scan_nat_page(struct f2fs_sb_info *sbi,
@@ -1871,7 +1897,9 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
if (blk_addr == NULL_ADDR)
freed = add_free_nid(sbi, start_nid, true);
- update_free_nid_bitmap(sbi, start_nid, freed, true, false);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, start_nid, freed, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
@@ -1927,6 +1955,9 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
int i = 0;
nid_t nid = nm_i->next_scan_nid;
+ if (unlikely(nid >= nm_i->max_nid))
+ nid = 0;
+
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
return;
@@ -2026,7 +2057,7 @@ retry:
__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
nm_i->available_nids--;
- update_free_nid_bitmap(sbi, *nid, false, false, false);
+ update_free_nid_bitmap(sbi, *nid, false, false);
spin_unlock(&nm_i->nid_list_lock);
return true;
@@ -2082,7 +2113,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
nm_i->available_nids++;
- update_free_nid_bitmap(sbi, nid, true, false, false);
+ update_free_nid_bitmap(sbi, nid, true, false);
spin_unlock(&nm_i->nid_list_lock);
@@ -2407,16 +2438,16 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
}
raw_nat_from_node_info(raw_ne, &ne->ni);
nat_reset_flag(ne);
- __clear_nat_cache_dirty(NM_I(sbi), ne);
+ __clear_nat_cache_dirty(NM_I(sbi), set, ne);
if (nat_get_blkaddr(ne) == NULL_ADDR) {
add_free_nid(sbi, nid, false);
spin_lock(&NM_I(sbi)->nid_list_lock);
NM_I(sbi)->available_nids++;
- update_free_nid_bitmap(sbi, nid, true, false, false);
+ update_free_nid_bitmap(sbi, nid, true, false);
spin_unlock(&NM_I(sbi)->nid_list_lock);
} else {
spin_lock(&NM_I(sbi)->nid_list_lock);
- update_free_nid_bitmap(sbi, nid, false, false, false);
+ update_free_nid_bitmap(sbi, nid, false, false);
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
@@ -2428,10 +2459,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
f2fs_put_page(page, 1);
}
- f2fs_bug_on(sbi, set->entry_cnt);
-
- radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
- kmem_cache_free(nat_entry_set_slab, set);
+ /* Allow dirty nats by node block allocation in write_begin */
+ if (!set->entry_cnt) {
+ radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+ kmem_cache_free(nat_entry_set_slab, set);
+ }
}
/*
@@ -2476,8 +2508,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__flush_nat_entry_set(sbi, set, cpc);
up_write(&nm_i->nat_tree_lock);
-
- f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
+ /* Allow dirty nats by node block allocation in write_begin */
}
static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
@@ -2541,10 +2572,10 @@ inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
nid = i * NAT_ENTRY_PER_BLOCK;
last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK;
- spin_lock(&nm_i->free_nid_lock);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
for (; nid < last_nid; nid++)
- update_free_nid_bitmap(sbi, nid, true, true, true);
- spin_unlock(&nm_i->free_nid_lock);
+ update_free_nid_bitmap(sbi, nid, true, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
}
for (i = 0; i < nm_i->nat_blocks; i++) {
@@ -2621,23 +2652,20 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- nm_i->free_nid_bitmap = f2fs_kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
- nm_i->nat_block_bitmap = f2fs_kvzalloc(nm_i->nat_blocks / 8,
+ nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
- nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
sizeof(unsigned short), GFP_KERNEL);
if (!nm_i->free_nid_count)
return -ENOMEM;
-
- spin_lock_init(&nm_i->free_nid_lock);
-
return 0;
}
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 2f9603fa85a5..558048e33cf9 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -9,10 +9,10 @@
* published by the Free Software Foundation.
*/
/* start node id of a node block dedicated to the given node id */
-#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
+#define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
/* node block offset on the NAT area dedicated to the given start node id */
-#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
+#define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */
#define FREE_NID_PAGES 8
@@ -62,16 +62,16 @@ struct nat_entry {
struct node_info ni; /* in-memory node information */
};
-#define nat_get_nid(nat) (nat->ni.nid)
-#define nat_set_nid(nat, n) (nat->ni.nid = n)
-#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
-#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
-#define nat_get_ino(nat) (nat->ni.ino)
-#define nat_set_ino(nat, i) (nat->ni.ino = i)
-#define nat_get_version(nat) (nat->ni.version)
-#define nat_set_version(nat, v) (nat->ni.version = v)
+#define nat_get_nid(nat) ((nat)->ni.nid)
+#define nat_set_nid(nat, n) ((nat)->ni.nid = (n))
+#define nat_get_blkaddr(nat) ((nat)->ni.blk_addr)
+#define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b))
+#define nat_get_ino(nat) ((nat)->ni.ino)
+#define nat_set_ino(nat, i) ((nat)->ni.ino = (i))
+#define nat_get_version(nat) ((nat)->ni.version)
+#define nat_set_version(nat, v) ((nat)->ni.version = (v))
-#define inc_node_version(version) (++version)
+#define inc_node_version(version) (++(version))
static inline void copy_node_info(struct node_info *dst,
struct node_info *src)
@@ -200,13 +200,16 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
struct f2fs_nm_info *nm_i = NM_I(sbi);
pgoff_t block_off;
pgoff_t block_addr;
- int seg_off;
+ /*
+ * block_off = segment_off * 512 + off_in_segment
+ * OLD = (segment_off * 512) * 2 + off_in_segment
+ * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment
+ */
block_off = NAT_BLOCK_OFFSET(start);
- seg_off = block_off >> sbi->log_blocks_per_seg;
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
- (seg_off << sbi->log_blocks_per_seg << 1) +
+ (block_off << 1) -
(block_off & (sbi->blocks_per_seg - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index d025aa83fb5b..907d6b7dde6a 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -198,7 +198,8 @@ static void recover_inode(struct inode *inode, struct page *page)
ino_of_node(page), name);
}
-static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
+static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
+ bool check_only)
{
struct curseg_info *curseg;
struct page *page = NULL;
@@ -225,7 +226,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
entry = get_fsync_inode(head, ino_of_node(page));
if (!entry) {
- if (IS_INODE(page) && is_dent_dnode(page)) {
+ if (!check_only &&
+ IS_INODE(page) && is_dent_dnode(page)) {
err = recover_inode_page(sbi, page);
if (err)
break;
@@ -569,7 +571,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
mutex_lock(&sbi->cp_mutex);
/* step #1: find fsynced inode numbers */
- err = find_fsync_dnodes(sbi, &inode_list);
+ err = find_fsync_dnodes(sbi, &inode_list, check_only);
if (err || list_empty(&inode_list))
goto out;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 29ef7088c558..96845854e7ee 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -250,6 +250,36 @@ void drop_inmem_pages(struct inode *inode)
stat_dec_atomic_write(inode);
}
+void drop_inmem_page(struct inode *inode, struct page *page)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct list_head *head = &fi->inmem_pages;
+ struct inmem_pages *cur = NULL;
+
+ f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
+
+ mutex_lock(&fi->inmem_lock);
+ list_for_each_entry(cur, head, list) {
+ if (cur->page == page)
+ break;
+ }
+
+ f2fs_bug_on(sbi, !cur || cur->page != page);
+ list_del(&cur->list);
+ mutex_unlock(&fi->inmem_lock);
+
+ dec_page_count(sbi, F2FS_INMEM_PAGES);
+ kmem_cache_free(inmem_entry_slab, cur);
+
+ ClearPageUptodate(page);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 0);
+
+ trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
+}
+
static int __commit_inmem_pages(struct inode *inode,
struct list_head *revoke_list)
{
@@ -261,7 +291,6 @@ static int __commit_inmem_pages(struct inode *inode,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
- .encrypted_page = NULL,
};
pgoff_t last_idx = ULONG_MAX;
int err = 0;
@@ -281,6 +310,9 @@ static int __commit_inmem_pages(struct inode *inode,
}
fio.page = page;
+ fio.old_blkaddr = NULL_ADDR;
+ fio.encrypted_page = NULL;
+ fio.need_lock = false,
err = do_write_data_page(&fio);
if (err) {
unlock_page(page);
@@ -358,11 +390,8 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
}
#endif
- if (!need)
- return;
-
/* balance_fs_bg is able to be pending */
- if (excess_cached_nats(sbi))
+ if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
/*
@@ -371,7 +400,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
*/
if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi, false, false);
+ f2fs_gc(sbi, false, false, NULL_SEGNO);
}
}
@@ -390,7 +419,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
else
build_free_nids(sbi, false, false);
- if (!is_idle(sbi))
+ if (!is_idle(sbi) && !excess_dirty_nats(sbi))
return;
/* checkpoint is the only way to shrink partial cached entries */
@@ -411,32 +440,34 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
}
}
-static int __submit_flush_wait(struct block_device *bdev)
+static int __submit_flush_wait(struct f2fs_sb_info *sbi,
+ struct block_device *bdev)
{
struct bio *bio = f2fs_bio_alloc(0);
int ret;
- bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio->bi_bdev = bdev;
ret = submit_bio_wait(bio);
bio_put(bio);
+
+ trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
+ test_opt(sbi, FLUSH_MERGE), ret);
return ret;
}
static int submit_flush_wait(struct f2fs_sb_info *sbi)
{
- int ret = __submit_flush_wait(sbi->sb->s_bdev);
+ int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
int i;
- if (sbi->s_ndevs && !ret) {
- for (i = 1; i < sbi->s_ndevs; i++) {
- trace_f2fs_issue_flush(FDEV(i).bdev,
- test_opt(sbi, NOBARRIER),
- test_opt(sbi, FLUSH_MERGE));
- ret = __submit_flush_wait(FDEV(i).bdev);
- if (ret)
- break;
- }
+ if (!sbi->s_ndevs || ret)
+ return ret;
+
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ ret = __submit_flush_wait(sbi, FDEV(i).bdev);
+ if (ret)
+ break;
}
return ret;
}
@@ -458,6 +489,8 @@ repeat:
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
ret = submit_flush_wait(sbi);
+ atomic_inc(&fcc->issued_flush);
+
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
@@ -475,25 +508,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
{
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
struct flush_cmd cmd;
+ int ret;
if (test_opt(sbi, NOBARRIER))
return 0;
- if (!test_opt(sbi, FLUSH_MERGE))
- return submit_flush_wait(sbi);
-
- if (!atomic_read(&fcc->submit_flush)) {
- int ret;
+ if (!test_opt(sbi, FLUSH_MERGE)) {
+ ret = submit_flush_wait(sbi);
+ atomic_inc(&fcc->issued_flush);
+ return ret;
+ }
- atomic_inc(&fcc->submit_flush);
+ if (!atomic_read(&fcc->issing_flush)) {
+ atomic_inc(&fcc->issing_flush);
ret = submit_flush_wait(sbi);
- atomic_dec(&fcc->submit_flush);
+ atomic_dec(&fcc->issing_flush);
+
+ atomic_inc(&fcc->issued_flush);
return ret;
}
init_completion(&cmd.wait);
- atomic_inc(&fcc->submit_flush);
+ atomic_inc(&fcc->issing_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
@@ -501,10 +538,10 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (fcc->f2fs_issue_flush) {
wait_for_completion(&cmd.wait);
- atomic_dec(&fcc->submit_flush);
+ atomic_dec(&fcc->issing_flush);
} else {
llist_del_all(&fcc->issue_list);
- atomic_set(&fcc->submit_flush, 0);
+ atomic_set(&fcc->issing_flush, 0);
}
return cmd.ret;
@@ -524,7 +561,8 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
- atomic_set(&fcc->submit_flush, 0);
+ atomic_set(&fcc->issued_flush, 0);
+ atomic_set(&fcc->issing_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->fcc_info = fcc;
@@ -597,8 +635,8 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]--;
- if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
- clear_bit(GET_SECNO(sbi, segno),
+ if (get_valid_blocks(sbi, segno, true) == 0)
+ clear_bit(GET_SEC_FROM_SEG(sbi, segno),
dirty_i->victim_secmap);
}
}
@@ -618,7 +656,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_lock(&dirty_i->seglist_lock);
- valid_blocks = get_valid_blocks(sbi, segno, 0);
+ valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == 0) {
__locate_dirty_segment(sbi, segno, PRE);
@@ -633,162 +671,407 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
-static void __add_discard_cmd(struct f2fs_sb_info *sbi,
- struct bio *bio, block_t lstart, block_t len)
+static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct list_head *cmd_list = &(dcc->discard_cmd_list);
+ struct list_head *pend_list;
struct discard_cmd *dc;
+ f2fs_bug_on(sbi, !len);
+
+ pend_list = &dcc->pend_list[plist_idx(len)];
+
dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
INIT_LIST_HEAD(&dc->list);
- dc->bio = bio;
- bio->bi_private = dc;
+ dc->bdev = bdev;
dc->lstart = lstart;
+ dc->start = start;
dc->len = len;
+ dc->ref = 0;
dc->state = D_PREP;
+ dc->error = 0;
init_completion(&dc->wait);
+ list_add_tail(&dc->list, pend_list);
+ atomic_inc(&dcc->discard_cmd_cnt);
+ dcc->undiscard_blks += len;
- mutex_lock(&dcc->cmd_lock);
- list_add_tail(&dc->list, cmd_list);
- mutex_unlock(&dcc->cmd_lock);
+ return dc;
}
-static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
+static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len,
+ struct rb_node *parent, struct rb_node **p)
{
- int err = dc->bio->bi_error;
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *dc;
- if (dc->state == D_DONE)
- atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard));
+ dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
- if (err == -EOPNOTSUPP)
- err = 0;
+ rb_link_node(&dc->rb_node, parent, p);
+ rb_insert_color(&dc->rb_node, &dcc->root);
+
+ return dc;
+}
+
+static void __detach_discard_cmd(struct discard_cmd_control *dcc,
+ struct discard_cmd *dc)
+{
+ if (dc->state == D_DONE)
+ atomic_dec(&dcc->issing_discard);
- if (err)
- f2fs_msg(sbi->sb, KERN_INFO,
- "Issue discard failed, ret: %d", err);
- bio_put(dc->bio);
list_del(&dc->list);
+ rb_erase(&dc->rb_node, &dcc->root);
+ dcc->undiscard_blks -= dc->len;
+
kmem_cache_free(discard_cmd_slab, dc);
+
+ atomic_dec(&dcc->discard_cmd_cnt);
}
-/* This should be covered by global mutex, &sit_i->sentry_lock */
-void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- struct list_head *wait_list = &(dcc->discard_cmd_list);
- struct discard_cmd *dc, *tmp;
- struct blk_plug plug;
- mutex_lock(&dcc->cmd_lock);
+ if (dc->error == -EOPNOTSUPP)
+ dc->error = 0;
- blk_start_plug(&plug);
+ if (dc->error)
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Issue discard failed, ret: %d", dc->error);
+ __detach_discard_cmd(dcc, dc);
+}
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
+static void f2fs_submit_discard_endio(struct bio *bio)
+{
+ struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
- if (blkaddr == NULL_ADDR) {
- if (dc->state == D_PREP) {
- dc->state = D_SUBMIT;
- submit_bio(dc->bio);
- atomic_inc(&dcc->submit_discard);
- }
- continue;
+ dc->error = bio->bi_error;
+ dc->state = D_DONE;
+ complete(&dc->wait);
+ bio_put(bio);
+}
+
+/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
+static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct bio *bio = NULL;
+
+ if (dc->state != D_PREP)
+ return;
+
+ trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+
+ dc->error = __blkdev_issue_discard(dc->bdev,
+ SECTOR_FROM_BLOCK(dc->start),
+ SECTOR_FROM_BLOCK(dc->len),
+ GFP_NOFS, 0, &bio);
+ if (!dc->error) {
+ /* should keep before submission to avoid D_DONE right away */
+ dc->state = D_SUBMIT;
+ atomic_inc(&dcc->issued_discard);
+ atomic_inc(&dcc->issing_discard);
+ if (bio) {
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ bio->bi_opf |= REQ_SYNC;
+ submit_bio(bio);
+ list_move_tail(&dc->list, &dcc->wait_list);
}
+ } else {
+ __remove_discard_cmd(sbi, dc);
+ }
+}
- if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
- if (dc->state == D_SUBMIT)
- wait_for_completion_io(&dc->wait);
- else
- __remove_discard_cmd(sbi, dc);
+static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len,
+ struct rb_node **insert_p,
+ struct rb_node *insert_parent)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct rb_node **p = &dcc->root.rb_node;
+ struct rb_node *parent = NULL;
+ struct discard_cmd *dc = NULL;
+
+ if (insert_p && insert_parent) {
+ parent = insert_parent;
+ p = insert_p;
+ goto do_insert;
+ }
+
+ p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
+do_insert:
+ dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
+ if (!dc)
+ return NULL;
+
+ return dc;
+}
+
+static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
+ struct discard_cmd *dc)
+{
+ list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
+}
+
+static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_cmd *dc, block_t blkaddr)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_info di = dc->di;
+ bool modified = false;
+
+ if (dc->state == D_DONE || dc->len == 1) {
+ __remove_discard_cmd(sbi, dc);
+ return;
+ }
+
+ dcc->undiscard_blks -= di.len;
+
+ if (blkaddr > di.lstart) {
+ dc->len = blkaddr - dc->lstart;
+ dcc->undiscard_blks += dc->len;
+ __relocate_discard_cmd(dcc, dc);
+ f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
+ modified = true;
+ }
+
+ if (blkaddr < di.lstart + di.len - 1) {
+ if (modified) {
+ __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
+ di.start + blkaddr + 1 - di.lstart,
+ di.lstart + di.len - 1 - blkaddr,
+ NULL, NULL);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
+ } else {
+ dc->lstart++;
+ dc->len--;
+ dc->start++;
+ dcc->undiscard_blks += dc->len;
+ __relocate_discard_cmd(dcc, dc);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
}
}
- blk_finish_plug(&plug);
+}
- /* this comes from f2fs_put_super */
- if (blkaddr == NULL_ADDR) {
- list_for_each_entry_safe(dc, tmp, wait_list, list) {
- wait_for_completion_io(&dc->wait);
- __remove_discard_cmd(sbi, dc);
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct discard_cmd *dc;
+ struct discard_info di = {0};
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ block_t end = lstart + len;
+
+ mutex_lock(&dcc->cmd_lock);
+
+ dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
+ NULL, lstart,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (dc)
+ prev_dc = dc;
+
+ if (!prev_dc) {
+ di.lstart = lstart;
+ di.len = next_dc ? next_dc->lstart - lstart : len;
+ di.len = min(di.len, len);
+ di.start = start;
+ }
+
+ while (1) {
+ struct rb_node *node;
+ bool merged = false;
+ struct discard_cmd *tdc = NULL;
+
+ if (prev_dc) {
+ di.lstart = prev_dc->lstart + prev_dc->len;
+ if (di.lstart < lstart)
+ di.lstart = lstart;
+ if (di.lstart >= end)
+ break;
+
+ if (!next_dc || next_dc->lstart > end)
+ di.len = end - di.lstart;
+ else
+ di.len = next_dc->lstart - di.lstart;
+ di.start = start + di.lstart - lstart;
+ }
+
+ if (!di.len)
+ goto next;
+
+ if (prev_dc && prev_dc->state == D_PREP &&
+ prev_dc->bdev == bdev &&
+ __is_discard_back_mergeable(&di, &prev_dc->di)) {
+ prev_dc->di.len += di.len;
+ dcc->undiscard_blks += di.len;
+ __relocate_discard_cmd(dcc, prev_dc);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
+ di = prev_dc->di;
+ tdc = prev_dc;
+ merged = true;
+ }
+
+ if (next_dc && next_dc->state == D_PREP &&
+ next_dc->bdev == bdev &&
+ __is_discard_front_mergeable(&di, &next_dc->di)) {
+ next_dc->di.lstart = di.lstart;
+ next_dc->di.len += di.len;
+ next_dc->di.start = di.start;
+ dcc->undiscard_blks += di.len;
+ __relocate_discard_cmd(dcc, next_dc);
+ if (tdc)
+ __remove_discard_cmd(sbi, tdc);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
+ merged = true;
+ }
+
+ if (!merged) {
+ __insert_discard_tree(sbi, bdev, di.lstart, di.start,
+ di.len, NULL, NULL);
+ f2fs_bug_on(sbi,
+ !__check_rb_tree_consistence(sbi, &dcc->root));
}
+ next:
+ prev_dc = next_dc;
+ if (!prev_dc)
+ break;
+
+ node = rb_next(&prev_dc->rb_node);
+ next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
}
+
mutex_unlock(&dcc->cmd_lock);
}
-static void f2fs_submit_discard_endio(struct bio *bio)
+static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
{
- struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+ block_t lblkstart = blkstart;
- complete(&dc->wait);
- dc->state = D_DONE;
+ trace_f2fs_queue_discard(bdev, blkstart, blklen);
+
+ if (sbi->s_ndevs) {
+ int devi = f2fs_target_device_index(sbi, blkstart);
+
+ blkstart -= FDEV(devi).start_blk;
+ }
+ __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+ return 0;
}
-static int issue_discard_thread(void *data)
+static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
{
- struct f2fs_sb_info *sbi = data;
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- wait_queue_head_t *q = &dcc->discard_wait_queue;
- struct list_head *cmd_list = &dcc->discard_cmd_list;
+ struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int iter = 0;
-repeat:
- if (kthread_should_stop())
- return 0;
+ int i, iter = 0;
+ mutex_lock(&dcc->cmd_lock);
blk_start_plug(&plug);
+ for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
+ pend_list = &dcc->pend_list[i];
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+
+ if (!issue_cond || is_idle(sbi))
+ __submit_discard_cmd(sbi, dc);
+ if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
+ goto out;
+ }
+ }
+out:
+ blk_finish_plug(&plug);
+ mutex_unlock(&dcc->cmd_lock);
+}
+
+static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct list_head *wait_list = &(dcc->wait_list);
+ struct discard_cmd *dc, *tmp;
mutex_lock(&dcc->cmd_lock);
- list_for_each_entry_safe(dc, tmp, cmd_list, list) {
- if (dc->state == D_PREP) {
- dc->state = D_SUBMIT;
- submit_bio(dc->bio);
- atomic_inc(&dcc->submit_discard);
- if (iter++ > DISCARD_ISSUE_RATE)
- break;
- } else if (dc->state == D_DONE) {
+ list_for_each_entry_safe(dc, tmp, wait_list, list) {
+ if (!wait_cond || dc->state == D_DONE) {
+ if (dc->ref)
+ continue;
+ wait_for_completion_io(&dc->wait);
__remove_discard_cmd(sbi, dc);
}
}
mutex_unlock(&dcc->cmd_lock);
+}
- blk_finish_plug(&plug);
+/* This should be covered by global mutex, &sit_i->sentry_lock */
+void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *dc;
+ bool need_wait = false;
- iter = 0;
- congestion_wait(BLK_RW_SYNC, HZ/50);
+ mutex_lock(&dcc->cmd_lock);
+ dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
+ if (dc) {
+ if (dc->state == D_PREP) {
+ __punch_discard_cmd(sbi, dc, blkaddr);
+ } else {
+ dc->ref++;
+ need_wait = true;
+ }
+ }
+ mutex_unlock(&dcc->cmd_lock);
- wait_event_interruptible(*q,
- kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
- goto repeat;
+ if (need_wait) {
+ wait_for_completion_io(&dc->wait);
+ mutex_lock(&dcc->cmd_lock);
+ f2fs_bug_on(sbi, dc->state != D_DONE);
+ dc->ref--;
+ if (!dc->ref)
+ __remove_discard_cmd(sbi, dc);
+ mutex_unlock(&dcc->cmd_lock);
+ }
}
-
-/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
- struct block_device *bdev, block_t blkstart, block_t blklen)
+/* This comes from f2fs_put_super */
+void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
{
- struct bio *bio = NULL;
- block_t lblkstart = blkstart;
- int err;
+ __issue_discard_cmd(sbi, false);
+ __wait_discard_cmd(sbi, false);
+}
- trace_f2fs_issue_discard(bdev, blkstart, blklen);
+static int issue_discard_thread(void *data)
+{
+ struct f2fs_sb_info *sbi = data;
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ wait_queue_head_t *q = &dcc->discard_wait_queue;
+repeat:
+ if (kthread_should_stop())
+ return 0;
- if (sbi->s_ndevs) {
- int devi = f2fs_target_device_index(sbi, blkstart);
+ __issue_discard_cmd(sbi, true);
+ __wait_discard_cmd(sbi, true);
- blkstart -= FDEV(devi).start_blk;
- }
- err = __blkdev_issue_discard(bdev,
- SECTOR_FROM_BLOCK(blkstart),
- SECTOR_FROM_BLOCK(blklen),
- GFP_NOFS, 0, &bio);
- if (!err && bio) {
- bio->bi_end_io = f2fs_submit_discard_endio;
- bio->bi_opf |= REQ_SYNC;
+ congestion_wait(BLK_RW_SYNC, HZ/50);
- __add_discard_cmd(sbi, bio, lblkstart, blklen);
- wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
- }
- return err;
+ wait_event_interruptible(*q, kthread_should_stop() ||
+ atomic_read(&dcc->discard_cmd_cnt));
+ goto repeat;
}
#ifdef CONFIG_BLK_DEV_ZONED
@@ -796,6 +1079,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
sector_t sector, nr_sects;
+ block_t lblkstart = blkstart;
int devi = 0;
if (sbi->s_ndevs) {
@@ -813,7 +1097,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
case BLK_ZONE_TYPE_CONVENTIONAL:
if (!blk_queue_discard(bdev_get_queue(bdev)))
return 0;
- return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+ return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
sector = SECTOR_FROM_BLOCK(blkstart);
@@ -845,7 +1129,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi,
bdev_zoned_model(bdev) != BLK_ZONED_NONE)
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
#endif
- return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+ return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
}
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
@@ -888,32 +1172,6 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
return err;
}
-static void __add_discard_entry(struct f2fs_sb_info *sbi,
- struct cp_control *cpc, struct seg_entry *se,
- unsigned int start, unsigned int end)
-{
- struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
- struct discard_entry *new, *last;
-
- if (!list_empty(head)) {
- last = list_last_entry(head, struct discard_entry, list);
- if (START_BLOCK(sbi, cpc->trim_start) + start ==
- last->blkaddr + last->len &&
- last->len < MAX_DISCARD_BLOCKS(sbi)) {
- last->len += end - start;
- goto done;
- }
- }
-
- new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
- INIT_LIST_HEAD(&new->list);
- new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
- new->len = end - start;
- list_add_tail(&new->list, head);
-done:
- SM_I(sbi)->dcc_info->nr_discards += end - start;
-}
-
static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
bool check_only)
{
@@ -925,7 +1183,9 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
unsigned long *discard_map = (unsigned long *)se->discard_map;
unsigned long *dmap = SIT_I(sbi)->tmp_map;
unsigned int start = 0, end = -1;
- bool force = (cpc->reason == CP_DISCARD);
+ bool force = (cpc->reason & CP_DISCARD);
+ struct discard_entry *de = NULL;
+ struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
@@ -957,14 +1217,24 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
if (check_only)
return true;
- __add_discard_entry(sbi, cpc, se, start, end);
+ if (!de) {
+ de = f2fs_kmem_cache_alloc(discard_entry_slab,
+ GFP_F2FS_ZERO);
+ de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
+ list_add_tail(&de->list, head);
+ }
+
+ for (i = start; i < end; i++)
+ __set_bit_le(i, (void *)de->discard_map);
+
+ SM_I(sbi)->dcc_info->nr_discards += end - start;
}
return false;
}
void release_discard_addrs(struct f2fs_sb_info *sbi)
{
- struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
+ struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
struct discard_entry *entry, *this;
/* drop caches */
@@ -990,13 +1260,13 @@ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
- struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
+ struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
- bool force = (cpc->reason == CP_DISCARD);
+ bool force = (cpc->reason & CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
@@ -1026,10 +1296,10 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
continue;
}
next:
- secno = GET_SECNO(sbi, start);
- start_segno = secno * sbi->segs_per_sec;
+ secno = GET_SEC_FROM_SEG(sbi, start);
+ start_segno = GET_SEG_FROM_SEC(sbi, secno);
if (!IS_CURSEC(sbi, secno) &&
- !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+ !get_valid_blocks(sbi, start, true))
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
sbi->segs_per_sec << sbi->log_blocks_per_seg);
@@ -1043,22 +1313,46 @@ next:
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
- if (force && entry->len < cpc->trim_minlen)
- goto skip;
- f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
- cpc->trimmed += entry->len;
+ unsigned int cur_pos = 0, next_pos, len, total_len = 0;
+ bool is_valid = test_bit_le(0, entry->discard_map);
+
+find_next:
+ if (is_valid) {
+ next_pos = find_next_zero_bit_le(entry->discard_map,
+ sbi->blocks_per_seg, cur_pos);
+ len = next_pos - cur_pos;
+
+ if (force && len < cpc->trim_minlen)
+ goto skip;
+
+ f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
+ len);
+ cpc->trimmed += len;
+ total_len += len;
+ } else {
+ next_pos = find_next_bit_le(entry->discard_map,
+ sbi->blocks_per_seg, cur_pos);
+ }
skip:
+ cur_pos = next_pos;
+ is_valid = !is_valid;
+
+ if (cur_pos < sbi->blocks_per_seg)
+ goto find_next;
+
list_del(&entry->list);
- SM_I(sbi)->dcc_info->nr_discards -= entry->len;
+ SM_I(sbi)->dcc_info->nr_discards -= total_len;
kmem_cache_free(discard_entry_slab, entry);
}
+
+ wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
}
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
struct discard_cmd_control *dcc;
- int err = 0;
+ int err = 0, i;
if (SM_I(sbi)->dcc_info) {
dcc = SM_I(sbi)->dcc_info;
@@ -1069,12 +1363,18 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
if (!dcc)
return -ENOMEM;
- INIT_LIST_HEAD(&dcc->discard_entry_list);
- INIT_LIST_HEAD(&dcc->discard_cmd_list);
+ INIT_LIST_HEAD(&dcc->entry_list);
+ for (i = 0; i < MAX_PLIST_NUM; i++)
+ INIT_LIST_HEAD(&dcc->pend_list[i]);
+ INIT_LIST_HEAD(&dcc->wait_list);
mutex_init(&dcc->cmd_lock);
- atomic_set(&dcc->submit_discard, 0);
+ atomic_set(&dcc->issued_discard, 0);
+ atomic_set(&dcc->issing_discard, 0);
+ atomic_set(&dcc->discard_cmd_cnt, 0);
dcc->nr_discards = 0;
- dcc->max_discards = 0;
+ dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
+ dcc->undiscard_blks = 0;
+ dcc->root = RB_ROOT;
init_waitqueue_head(&dcc->discard_wait_queue);
SM_I(sbi)->dcc_info = dcc;
@@ -1091,20 +1391,22 @@ init_thread:
return err;
}
-static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free)
+static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
- if (dcc && dcc->f2fs_issue_discard) {
+ if (!dcc)
+ return;
+
+ if (dcc->f2fs_issue_discard) {
struct task_struct *discard_thread = dcc->f2fs_issue_discard;
dcc->f2fs_issue_discard = NULL;
kthread_stop(discard_thread);
}
- if (free) {
- kfree(dcc);
- SM_I(sbi)->dcc_info = NULL;
- }
+
+ kfree(dcc);
+ SM_I(sbi)->dcc_info = NULL;
}
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
@@ -1345,6 +1647,17 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
f2fs_put_page(page, 1);
}
+static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ unsigned int segno = curseg->segno + 1;
+ struct free_segmap_info *free_i = FREE_I(sbi);
+
+ if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
+ return !test_bit(segno, free_i->free_segmap);
+ return 0;
+}
+
/*
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
@@ -1355,8 +1668,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno, secno, zoneno;
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
- unsigned int hint = *newseg / sbi->segs_per_sec;
- unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
+ unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
+ unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
unsigned int left_start = hint;
bool init = true;
int go_left = 0;
@@ -1366,8 +1679,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
- (hint + 1) * sbi->segs_per_sec, *newseg + 1);
- if (segno < (hint + 1) * sbi->segs_per_sec)
+ GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
+ if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
goto got_it;
}
find_other_zone:
@@ -1398,8 +1711,8 @@ find_other_zone:
secno = left_start;
skip_left:
hint = secno;
- segno = secno * sbi->segs_per_sec;
- zoneno = secno / sbi->secs_per_zone;
+ segno = GET_SEG_FROM_SEC(sbi, secno);
+ zoneno = GET_ZONE_FROM_SEC(sbi, secno);
/* give up on finding another zone */
if (!init)
@@ -1443,7 +1756,7 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
struct summary_footer *sum_footer;
curseg->segno = curseg->next_segno;
- curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
+ curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
curseg->next_blkoff = 0;
curseg->next_segno = NULL_SEGNO;
@@ -1456,6 +1769,20 @@ static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
__set_sit_entry_type(sbi, type, curseg->segno, modified);
}
+static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+{
+ /* if segs_per_sec is large than 1, we need to keep original policy. */
+ if (sbi->segs_per_sec != 1)
+ return CURSEG_I(sbi, type)->segno;
+
+ if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
+ return 0;
+
+ if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
+ return SIT_I(sbi)->last_victim[ALLOC_NEXT];
+ return CURSEG_I(sbi, type)->segno;
+}
+
/*
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
@@ -1474,6 +1801,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
if (test_opt(sbi, NOHEAP))
dir = ALLOC_RIGHT;
+ segno = __get_next_segno(sbi, type);
get_new_segment(sbi, &segno, new_sec, dir);
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
@@ -1549,12 +1877,15 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
+ unsigned segno = NULL_SEGNO;
int i, cnt;
bool reversed = false;
/* need_SSR() already forces to do this */
- if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
+ if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
+ curseg->next_segno = segno;
return 1;
+ }
/* For node segments, let's do SSR more intensively */
if (IS_NODESEG(type)) {
@@ -1578,9 +1909,10 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == type)
continue;
- if (v_ops->get_victim(sbi, &(curseg)->next_segno,
- BG_GC, i, SSR))
+ if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
+ curseg->next_segno = segno;
return 1;
+ }
}
return 0;
}
@@ -1592,17 +1924,21 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
int type, bool force)
{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+
if (force)
new_curseg(sbi, type, true);
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
+ else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
+ new_curseg(sbi, type, false);
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
change_curseg(sbi, type, true);
else
new_curseg(sbi, type, false);
- stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
+ stat_inc_seg_type(sbi, curseg);
}
void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -1734,18 +2070,16 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
if (p_type == DATA) {
struct inode *inode = page->mapping->host;
- if (S_ISDIR(inode->i_mode))
- return CURSEG_HOT_DATA;
- else if (is_cold_data(page) || file_is_cold(inode))
+ if (is_cold_data(page) || file_is_cold(inode))
return CURSEG_COLD_DATA;
- else
- return CURSEG_WARM_DATA;
+ if (is_inode_flag_set(inode, FI_HOT_DATA))
+ return CURSEG_HOT_DATA;
+ return CURSEG_WARM_DATA;
} else {
if (IS_DNODE(page))
return is_cold_node(page) ? CURSEG_WARM_NODE :
CURSEG_HOT_NODE;
- else
- return CURSEG_COLD_NODE;
+ return CURSEG_COLD_NODE;
}
}
@@ -1788,15 +2122,14 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
stat_inc_block_count(sbi, curseg);
+ if (!__has_curseg_space(sbi, type))
+ sit_i->s_ops->allocate_segment(sbi, type, false);
/*
- * SIT information should be updated before segment allocation,
- * since SSR needs latest valid block information.
+ * SIT information should be updated after segment allocation,
+ * since we need to keep dirty segments precisely under SSR.
*/
refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
- if (!__has_curseg_space(sbi, type))
- sit_i->s_ops->allocate_segment(sbi, type, false);
-
mutex_unlock(&sit_i->sentry_lock);
if (page && IS_NODESEG(type))
@@ -1868,11 +2201,11 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
}
-void rewrite_data_page(struct f2fs_io_info *fio)
+int rewrite_data_page(struct f2fs_io_info *fio)
{
fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
- f2fs_submit_page_mbio(fio);
+ return f2fs_submit_page_bio(fio);
}
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -2437,7 +2770,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
se = get_seg_entry(sbi, segno);
/* add discard candidates */
- if (cpc->reason != CP_DISCARD) {
+ if (!(cpc->reason & CP_DISCARD)) {
cpc->trim_start = segno;
add_discard_addrs(sbi, cpc, false);
}
@@ -2473,7 +2806,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_bug_on(sbi, !list_empty(head));
f2fs_bug_on(sbi, sit_i->dirty_sentries);
out:
- if (cpc->reason == CP_DISCARD) {
+ if (cpc->reason & CP_DISCARD) {
__u64 trim_start = cpc->trim_start;
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
@@ -2501,13 +2834,13 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
SM_I(sbi)->sit_info = sit_i;
- sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
+ sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
@@ -2540,7 +2873,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
+ sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
@@ -2573,7 +2906,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
- sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
+ sit_i->mounted_time = ktime_get_real_seconds();
mutex_init(&sit_i->sentry_lock);
return 0;
}
@@ -2591,12 +2924,12 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
+ free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
+ free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
@@ -2672,10 +3005,17 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
/* build discard map only one time */
if (f2fs_discard_en(sbi)) {
- memcpy(se->discard_map, se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += sbi->blocks_per_seg -
- se->valid_blocks;
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map,
+ se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks +=
+ sbi->blocks_per_seg -
+ se->valid_blocks;
+ }
}
if (sbi->segs_per_sec > 1)
@@ -2699,10 +3039,15 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
seg_info_from_raw_sit(se, &sit);
if (f2fs_discard_en(sbi)) {
- memcpy(se->discard_map, se->cur_valid_map,
- SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += old_valid_blocks -
- se->valid_blocks;
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+ memset(se->discard_map, 0xff,
+ SIT_VBLOCK_MAP_SIZE);
+ } else {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks -
+ se->valid_blocks;
+ }
}
if (sbi->segs_per_sec > 1)
@@ -2746,7 +3091,7 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
if (segno >= MAIN_SEGS(sbi))
break;
offset = segno + 1;
- valid_blocks = get_valid_blocks(sbi, segno, 0);
+ valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
continue;
if (valid_blocks > sbi->blocks_per_seg) {
@@ -2764,7 +3109,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
@@ -2786,7 +3131,7 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
- dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
@@ -2852,6 +3197,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+ sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
@@ -2988,7 +3334,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
if (!sm_info)
return;
destroy_flush_cmd_control(sbi, true);
- destroy_discard_cmd_control(sbi, true);
+ destroy_discard_cmd_control(sbi);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 5e8ad4280a50..010f336a7573 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -21,78 +21,84 @@
#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
/* L: Logical segment # in volume, R: Relative segment # in main area */
-#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
-#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
+#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
+#define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
-#define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
-#define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
+#define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
+#define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE)
#define IS_CURSEG(sbi, seg) \
- ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
- (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
+ (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
+ ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
#define IS_CURSEC(sbi, secno) \
- ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
- sbi->segs_per_sec) || \
- (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
- sbi->segs_per_sec)) \
+ (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
+ (sbi)->segs_per_sec) || \
+ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
+ (sbi)->segs_per_sec)) \
#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
-#define MAIN_SECS(sbi) (sbi->total_sections)
+#define MAIN_SECS(sbi) ((sbi)->total_sections)
#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
-#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
+#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
-#define SEGMENT_SIZE(sbi) (1ULL << (sbi->log_blocksize + \
- sbi->log_blocks_per_seg))
+#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
+ (sbi)->log_blocks_per_seg))
#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
- (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
+ (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
#define NEXT_FREE_BLKADDR(sbi, curseg) \
- (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
+ (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
- (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
+ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
+ ((((blk_addr) == NULL_ADDR) || ((blk_addr) == NEW_ADDR)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
-#define GET_SECNO(sbi, segno) \
- ((segno) / sbi->segs_per_sec)
-#define GET_ZONENO_FROM_SEGNO(sbi, segno) \
- ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
+#define BLKS_PER_SEC(sbi) \
+ ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
+#define GET_SEC_FROM_SEG(sbi, segno) \
+ ((segno) / (sbi)->segs_per_sec)
+#define GET_SEG_FROM_SEC(sbi, secno) \
+ ((secno) * (sbi)->segs_per_sec)
+#define GET_ZONE_FROM_SEC(sbi, secno) \
+ ((secno) / (sbi)->secs_per_zone)
+#define GET_ZONE_FROM_SEG(sbi, segno) \
+ GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
#define GET_SUM_BLOCK(sbi, segno) \
- ((sbi->sm_info->ssa_blkaddr) + segno)
+ ((sbi)->sm_info->ssa_blkaddr + (segno))
#define GET_SUM_TYPE(footer) ((footer)->entry_type)
-#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
+#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
#define SIT_ENTRY_OFFSET(sit_i, segno) \
- (segno % sit_i->sents_per_block)
+ ((segno) % (sit_i)->sents_per_block)
#define SIT_BLOCK_OFFSET(segno) \
- (segno / SIT_ENTRY_PER_BLOCK)
+ ((segno) / SIT_ENTRY_PER_BLOCK)
#define START_SEGNO(segno) \
(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
#define SIT_BLK_CNT(sbi) \
@@ -103,7 +109,7 @@
#define SECTOR_FROM_BLOCK(blk_addr) \
(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
#define SECTOR_TO_BLOCK(sectors) \
- (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
+ ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
/*
* indicate a block allocation direction: RIGHT and LEFT.
@@ -132,7 +138,10 @@ enum {
*/
enum {
GC_CB = 0,
- GC_GREEDY
+ GC_GREEDY,
+ ALLOC_NEXT,
+ FLUSH_DEVICE,
+ MAX_GC_POLICY,
};
/*
@@ -227,6 +236,8 @@ struct sit_info {
unsigned long long mounted_time; /* mount time */
unsigned long long min_mtime; /* min. modification time */
unsigned long long max_mtime; /* max. modification time */
+
+ unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
};
struct free_segmap_info {
@@ -303,17 +314,17 @@ static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
- return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
+ return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
}
static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
- unsigned int segno, int section)
+ unsigned int segno, bool use_section)
{
/*
* In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately.
*/
- if (section > 1)
+ if (use_section && sbi->segs_per_sec > 1)
return get_sec_entry(sbi, segno)->valid_blocks;
else
return get_seg_entry(sbi, segno)->valid_blocks;
@@ -358,8 +369,8 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
- unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
spin_lock(&free_i->segmap_lock);
@@ -379,7 +390,8 @@ static inline void __set_inuse(struct f2fs_sb_info *sbi,
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
set_bit(segno, free_i->free_segmap);
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
@@ -390,8 +402,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
- unsigned int start_segno = secno * sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+ unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
unsigned int next;
spin_lock(&free_i->segmap_lock);
@@ -412,7 +424,8 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
unsigned int segno)
{
struct free_segmap_info *free_i = FREE_I(sbi);
- unsigned int secno = segno / sbi->segs_per_sec;
+ unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
+
spin_lock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--;
@@ -477,12 +490,12 @@ static inline int overprovision_segments(struct f2fs_sb_info *sbi)
static inline int overprovision_sections(struct f2fs_sb_info *sbi)
{
- return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
+ return GET_SEC_FROM_SEG(sbi, (unsigned int)overprovision_segments(sbi));
}
static inline int reserved_sections(struct f2fs_sb_info *sbi)
{
- return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
+ return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
}
static inline bool need_SSR(struct f2fs_sb_info *sbi)
@@ -495,7 +508,7 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
return false;
return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
- reserved_sections(sbi) + 1);
+ 2 * reserved_sections(sbi));
}
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
@@ -540,6 +553,7 @@ static inline int utilization(struct f2fs_sb_info *sbi)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
+#define DEF_MIN_HOT_BLOCKS 16
enum {
F2FS_IPU_FORCE,
@@ -547,17 +561,15 @@ enum {
F2FS_IPU_UTIL,
F2FS_IPU_SSR_UTIL,
F2FS_IPU_FSYNC,
+ F2FS_IPU_ASYNC,
};
-static inline bool need_inplace_update(struct inode *inode)
+static inline bool need_inplace_update_policy(struct inode *inode,
+ struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int policy = SM_I(sbi)->ipu_policy;
- /* IPU can be done only for the user data */
- if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
- return false;
-
if (test_opt(sbi, LFS))
return false;
@@ -572,6 +584,15 @@ static inline bool need_inplace_update(struct inode *inode)
utilization(sbi) > SM_I(sbi)->min_ipu_util)
return true;
+ /*
+ * IPU for rewrite async pages
+ */
+ if (policy & (0x1 << F2FS_IPU_ASYNC) &&
+ fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) &&
+ !f2fs_encrypted_inode(inode))
+ return true;
+
/* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) &&
is_inode_flag_set(inode, FI_NEED_IPU))
@@ -691,8 +712,9 @@ static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
- return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
- sit_i->mounted_time;
+ time64_t now = ktime_get_real_seconds();
+
+ return sit_i->elapsed_time + now - sit_i->mounted_time;
}
static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
@@ -719,7 +741,7 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
unsigned int secno)
{
- if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >=
+ if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >=
sbi->fggc_threshold)
return true;
return false;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 96fe8ed73100..83355ec4a92c 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -49,6 +49,7 @@ char *fault_name[FAULT_MAX] = {
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
[FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
};
@@ -82,6 +83,7 @@ enum {
Opt_discard,
Opt_nodiscard,
Opt_noheap,
+ Opt_heap,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_acl,
@@ -116,6 +118,7 @@ static match_table_t f2fs_tokens = {
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
+ {Opt_heap, "heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_acl, "acl"},
@@ -293,6 +296,7 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_hot_blocks, min_hot_blocks);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
@@ -318,6 +322,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
ATTR_LIST(min_fsync_blocks),
+ ATTR_LIST(min_hot_blocks),
ATTR_LIST(max_victim_search),
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
@@ -436,6 +441,9 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
+ case Opt_heap:
+ clear_opt(sbi, NOHEAP);
+ break;
#ifdef CONFIG_F2FS_FS_XATTR
case Opt_user_xattr:
set_opt(sbi, XATTR_USER);
@@ -787,7 +795,14 @@ static void f2fs_put_super(struct super_block *sb)
}
/* be sure to wait for any on-going discard commands */
- f2fs_wait_discard_bio(sbi, NULL_ADDR);
+ f2fs_wait_discard_bios(sbi);
+
+ if (!sbi->discard_blks) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT | CP_TRIMMED,
+ };
+ write_checkpoint(sbi, &cpc);
+ }
/* write_checkpoint can update stat informaion */
f2fs_destroy_stats(sbi);
@@ -913,7 +928,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sbi, DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sbi, NOHEAP))
- seq_puts(seq, ",no_heap_alloc");
+ seq_puts(seq, ",no_heap");
+ else
+ seq_puts(seq, ",heap");
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq, ",user_xattr");
@@ -986,7 +1003,7 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
if ((i % 10) == 0)
seq_printf(seq, "%-10d", i);
seq_printf(seq, "%d|%-3u", se->type,
- get_valid_blocks(sbi, i, 1));
+ get_valid_blocks(sbi, i, false));
if ((i % 10) == 9 || i == (total_segs - 1))
seq_putc(seq, '\n');
else
@@ -1012,7 +1029,7 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%-10d", i);
seq_printf(seq, "%d|%-3u|", se->type,
- get_valid_blocks(sbi, i, 1));
+ get_valid_blocks(sbi, i, false));
for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
seq_printf(seq, " %.2x", se->cur_valid_map[j]);
seq_putc(seq, '\n');
@@ -1046,6 +1063,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
+ set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
@@ -1307,7 +1325,7 @@ static int __f2fs_commit_super(struct buffer_head *bh,
unlock_buffer(bh);
/* it's rare case, we can do fua all the time */
- return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA);
+ return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
}
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
@@ -1483,6 +1501,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return 1;
}
+ if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid segment count (%u)",
+ le32_to_cpu(raw_super->segment_count));
+ return 1;
+ }
+
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sbi, bh))
return 1;
@@ -1555,6 +1580,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_COUNT_TYPE; i++)
atomic_set(&sbi->nr_pages[i], 0);
+ atomic_set(&sbi->wb_sync_req, 0);
+
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
mutex_init(&sbi->wio_mutex[NODE]);
@@ -1917,6 +1944,7 @@ try_onemore:
mutex_init(&sbi->gc_mutex);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
+ init_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING);
@@ -2022,6 +2050,10 @@ try_onemore:
f2fs_join_shrinker(sbi);
+ err = f2fs_build_stats(sbi);
+ if (err)
+ goto free_nm;
+
/* if there are nt orphan nodes free them */
err = recover_orphan_inodes(sbi);
if (err)
@@ -2046,10 +2078,6 @@ try_onemore:
goto free_root_inode;
}
- err = f2fs_build_stats(sbi);
- if (err)
- goto free_root_inode;
-
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
@@ -2143,7 +2171,6 @@ free_proc:
remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root);
}
- f2fs_destroy_stats(sbi);
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
@@ -2161,6 +2188,7 @@ free_node_inode:
truncate_inode_pages_final(META_MAPPING(sbi));
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
+ f2fs_destroy_stats(sbi);
free_nm:
destroy_node_manager(sbi);
free_sm:
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index 73b4e1d1912a..bccbbf2616d2 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -59,7 +59,7 @@ void f2fs_trace_pid(struct page *page)
pid_t pid = task_pid_nr(current);
void *p;
- page->private = pid;
+ set_page_private(page, (unsigned long)pid);
if (radix_tree_preload(GFP_NOFS))
return;
@@ -138,7 +138,7 @@ static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
results[ret] = iter.index;
- if (++ret == PIDVEC_SIZE)
+ if (++ret == max_items)
break;
}
return ret;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7298a4488f7f..832c5110abab 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -250,15 +250,13 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
void *cur_addr, *txattr_addr, *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
unsigned int size = xnid ? VALID_XATTR_BLOCK_SIZE : 0;
- unsigned int inline_size = 0;
+ unsigned int inline_size = inline_xattr_size(inode);
int err = 0;
- inline_size = inline_xattr_size(inode);
-
if (!size && !inline_size)
return -ENODATA;
- txattr_addr = kzalloc(inline_size + size + sizeof(__u32),
+ txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
GFP_F2FS_ZERO);
if (!txattr_addr)
return -ENOMEM;
@@ -328,13 +326,14 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
- size_t size = PAGE_SIZE, inline_size = 0;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ unsigned int size = VALID_XATTR_BLOCK_SIZE;
+ unsigned int inline_size = inline_xattr_size(inode);
void *txattr_addr;
int err;
- inline_size = inline_xattr_size(inode);
-
- txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
+ txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
+ GFP_F2FS_ZERO);
if (!txattr_addr)
return -ENOMEM;
@@ -358,19 +357,19 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
}
/* read from xattr node block */
- if (F2FS_I(inode)->i_xattr_nid) {
+ if (xnid) {
struct page *xpage;
void *xattr_addr;
/* The inode already has an extended attribute block. */
- xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+ xpage = get_node_page(sbi, xnid);
if (IS_ERR(xpage)) {
err = PTR_ERR(xpage);
goto fail;
}
xattr_addr = page_address(xpage);
- memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
+ memcpy(txattr_addr + inline_size, xattr_addr, size);
f2fs_put_page(xpage, 1);
}
@@ -392,14 +391,12 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
void *txattr_addr, struct page *ipage)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- size_t inline_size = 0;
+ size_t inline_size = inline_xattr_size(inode);
void *xattr_addr;
struct page *xpage;
nid_t new_nid = 0;
int err;
- inline_size = inline_xattr_size(inode);
-
if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
if (!alloc_nid(sbi, &new_nid))
return -ENOSPC;
@@ -454,7 +451,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
}
xattr_addr = page_address(xpage);
- memcpy(xattr_addr, txattr_addr + inline_size, MAX_XATTR_BLOCK_SIZE);
+ memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
set_page_dirty(xpage);
f2fs_put_page(xpage, 1);
@@ -546,7 +543,9 @@ static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
const void *value, size_t size)
{
void *pval = entry->e_name + entry->e_name_len;
- return (entry->e_value_size == size) && !memcmp(pval, value, size);
+
+ return (le16_to_cpu(entry->e_value_size) == size) &&
+ !memcmp(pval, value, size);
}
static int __f2fs_setxattr(struct inode *inode, int index,
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index d5a94928c116..dbcd1d16e669 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -58,10 +58,10 @@ struct f2fs_xattr_entry {
#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr) + 1))
#define XATTR_ROUND (3)
-#define XATTR_ALIGN(size) ((size + XATTR_ROUND) & ~XATTR_ROUND)
+#define XATTR_ALIGN(size) (((size) + XATTR_ROUND) & ~XATTR_ROUND)
#define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \
- entry->e_name_len + le16_to_cpu(entry->e_value_size)))
+ (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)))
#define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\
ENTRY_SIZE(entry)))
@@ -72,8 +72,8 @@ struct f2fs_xattr_entry {
for (entry = XATTR_FIRST_ENTRY(addr);\
!IS_XATTR_LAST_ENTRY(entry);\
entry = XATTR_NEXT_ENTRY(entry))
-#define MAX_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
-#define VALID_XATTR_BLOCK_SIZE (MAX_XATTR_BLOCK_SIZE - sizeof(__u32))
+#define VALID_XATTR_BLOCK_SIZE (PAGE_SIZE - sizeof(struct node_footer))
+#define XATTR_PADDING_SIZE (sizeof(__u32))
#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + \
VALID_XATTR_BLOCK_SIZE)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8bd81c2e89b2..f4e7267d117f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -899,16 +899,10 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
- O_RDONLY | O_WRONLY | O_RDWR |
- O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
- __O_SYNC | O_DSYNC | FASYNC |
- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE |
- __FMODE_NONOTIFY
- ));
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ HWEIGHT32(
+ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
+ __FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
diff --git a/fs/file.c b/fs/file.c
index ad6f094f2eff..1c2972e3a405 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -42,7 +42,7 @@ static void *alloc_fdmem(size_t size)
if (data != NULL)
return data;
}
- return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
}
static void __free_fdtable(struct fdtable *fdt)
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 6e22748b0704..b9ea99c5b5b3 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -292,7 +292,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
{
- struct tree_descr empty_descr = {""};
+ static const struct tree_descr empty_descr = {""};
struct fuse_conn *fc;
int err;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c2d7f3a92679..c16d00e53264 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -20,6 +20,7 @@
#include <linux/pipe_fs_i.h>
#include <linux/swap.h>
#include <linux/splice.h>
+#include <linux/sched.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
MODULE_ALIAS("devname:fuse");
@@ -45,7 +46,7 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
INIT_LIST_HEAD(&req->list);
INIT_LIST_HEAD(&req->intr_entry);
init_waitqueue_head(&req->waitq);
- atomic_set(&req->count, 1);
+ refcount_set(&req->count, 1);
req->pages = pages;
req->page_descs = page_descs;
req->max_pages = npages;
@@ -102,21 +103,20 @@ void fuse_request_free(struct fuse_req *req)
void __fuse_get_request(struct fuse_req *req)
{
- atomic_inc(&req->count);
+ refcount_inc(&req->count);
}
/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
- BUG_ON(atomic_read(&req->count) < 2);
- atomic_dec(&req->count);
+ refcount_dec(&req->count);
}
-static void fuse_req_init_context(struct fuse_req *req)
+static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
{
req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
- req->in.h.pid = current->pid;
+ req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
}
void fuse_set_initialized(struct fuse_conn *fc)
@@ -163,7 +163,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
goto out;
}
- fuse_req_init_context(req);
+ fuse_req_init_context(fc, req);
__set_bit(FR_WAITING, &req->flags);
if (for_background)
__set_bit(FR_BACKGROUND, &req->flags);
@@ -256,7 +256,7 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
if (!req)
req = get_reserved_req(fc, file);
- fuse_req_init_context(req);
+ fuse_req_init_context(fc, req);
__set_bit(FR_WAITING, &req->flags);
__clear_bit(FR_BACKGROUND, &req->flags);
return req;
@@ -264,7 +264,7 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
- if (atomic_dec_and_test(&req->count)) {
+ if (refcount_dec_and_test(&req->count)) {
if (test_bit(FR_BACKGROUND, &req->flags)) {
/*
* We get here in the unlikely case that a background
@@ -1222,6 +1222,9 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
struct fuse_in *in;
unsigned reqsize;
+ if (task_active_pid_ns(current) != fc->pid_ns)
+ return -EIO;
+
restart:
spin_lock(&fiq->waitq.lock);
err = -EAGAIN;
@@ -1820,6 +1823,9 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
struct fuse_req *req;
struct fuse_out_header oh;
+ if (task_active_pid_ns(current) != fc->pid_ns)
+ return -EIO;
+
if (nbytes < sizeof(struct fuse_out_header))
return -EINVAL;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ec238fb5a584..aa93f09ae6e6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -58,7 +58,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
}
INIT_LIST_HEAD(&ff->write_entry);
- atomic_set(&ff->count, 1);
+ refcount_set(&ff->count, 1);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
@@ -77,7 +77,7 @@ void fuse_file_free(struct fuse_file *ff)
static struct fuse_file *fuse_file_get(struct fuse_file *ff)
{
- atomic_inc(&ff->count);
+ refcount_inc(&ff->count);
return ff;
}
@@ -88,7 +88,7 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
static void fuse_file_put(struct fuse_file *ff, bool sync)
{
- if (atomic_dec_and_test(&ff->count)) {
+ if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
if (ff->fc->no_open) {
@@ -293,7 +293,7 @@ static int fuse_release(struct inode *inode, struct file *file)
void fuse_sync_release(struct fuse_file *ff, int flags)
{
- WARN_ON(atomic_read(&ff->count) != 1);
+ WARN_ON(refcount_read(&ff->count) > 1);
fuse_prepare_release(ff, flags, FUSE_RELEASE);
/*
* iput(NULL) is a no-op and since the refcount is 1 and everything's
@@ -2083,7 +2083,8 @@ static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
return generic_file_mmap(file, vma);
}
-static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
+static int convert_fuse_file_lock(struct fuse_conn *fc,
+ const struct fuse_file_lock *ffl,
struct file_lock *fl)
{
switch (ffl->type) {
@@ -2098,7 +2099,14 @@ static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
fl->fl_start = ffl->start;
fl->fl_end = ffl->end;
- fl->fl_pid = ffl->pid;
+
+ /*
+ * Convert pid into the caller's pid namespace. If the pid
+ * does not map into the namespace fl_pid will get set to 0.
+ */
+ rcu_read_lock();
+ fl->fl_pid = pid_vnr(find_pid_ns(ffl->pid, fc->pid_ns));
+ rcu_read_unlock();
break;
default:
@@ -2147,7 +2155,7 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
- err = convert_fuse_file_lock(&outarg.lk, fl);
+ err = convert_fuse_file_lock(fc, &outarg.lk, fl);
return err;
}
@@ -2159,7 +2167,8 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
FUSE_ARGS(args);
struct fuse_lk_in inarg;
int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
- pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
+ struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
+ pid_t pid_nr = pid_nr_ns(pid, fc->pid_ns);
int err;
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
@@ -2171,7 +2180,10 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
if (fl->fl_flags & FL_CLOSE)
return 0;
- fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg);
+ if (pid && pid_nr == 0)
+ return -EOVERFLOW;
+
+ fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
err = fuse_simple_request(fc, &args);
/* locking is restartable */
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f33341d9501a..1bd7ffdad593 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -24,6 +24,8 @@
#include <linux/workqueue.h>
#include <linux/kref.h>
#include <linux/xattr.h>
+#include <linux/pid_namespace.h>
+#include <linux/refcount.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -137,7 +139,7 @@ struct fuse_file {
u64 nodeid;
/** Refcount */
- atomic_t count;
+ refcount_t count;
/** FOPEN_* flags returned by open */
u32 open_flags;
@@ -306,7 +308,7 @@ struct fuse_req {
struct list_head intr_entry;
/** refcount */
- atomic_t count;
+ refcount_t count;
/** Unique ID for the interrupt request */
u64 intr_unique;
@@ -448,7 +450,7 @@ struct fuse_conn {
spinlock_t lock;
/** Refcount */
- atomic_t count;
+ refcount_t count;
/** Number of fuse_dev's */
atomic_t dev_count;
@@ -461,6 +463,9 @@ struct fuse_conn {
/** The group id for this mount */
kgid_t group_id;
+ /** The pid namespace for this mount */
+ struct pid_namespace *pid_ns;
+
/** Maximum read size */
unsigned max_read;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 73cf05135252..5a1b58f8fef4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
+#include <linux/pid_namespace.h>
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
MODULE_DESCRIPTION("Filesystem in Userspace");
@@ -601,7 +602,7 @@ void fuse_conn_init(struct fuse_conn *fc)
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
init_rwsem(&fc->killsb);
- atomic_set(&fc->count, 1);
+ refcount_set(&fc->count, 1);
atomic_set(&fc->dev_count, 1);
init_waitqueue_head(&fc->blocked_waitq);
init_waitqueue_head(&fc->reserved_req_waitq);
@@ -619,14 +620,16 @@ void fuse_conn_init(struct fuse_conn *fc)
fc->connected = 1;
fc->attr_version = 1;
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
+ fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
void fuse_conn_put(struct fuse_conn *fc)
{
- if (atomic_dec_and_test(&fc->count)) {
+ if (refcount_dec_and_test(&fc->count)) {
if (fc->destroy_req)
fuse_request_free(fc->destroy_req);
+ put_pid_ns(fc->pid_ns);
fc->release(fc);
}
}
@@ -634,7 +637,7 @@ EXPORT_SYMBOL_GPL(fuse_conn_put);
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
{
- atomic_inc(&fc->count);
+ refcount_inc(&fc->count);
return fc;
}
EXPORT_SYMBOL_GPL(fuse_conn_get);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 01b97c012c6e..4d810be532dd 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -38,11 +38,6 @@ struct metapath {
__u16 mp_list[GFS2_MAX_META_HEIGHT];
};
-struct strip_mine {
- int sm_first;
- unsigned int sm_height;
-};
-
/**
* gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
* @ip: the inode
@@ -253,6 +248,19 @@ static inline unsigned int metapath_branch_start(const struct metapath *mp)
}
/**
+ * metaptr1 - Return the first possible metadata pointer in a metaath buffer
+ * @height: The metadata height (0 = dinode)
+ * @mp: The metapath
+ */
+static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
+{
+ struct buffer_head *bh = mp->mp_bh[height];
+ if (height == 0)
+ return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
+ return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
+}
+
+/**
* metapointer - Return pointer to start of metadata in a buffer
* @height: The metadata height (0 = dinode)
* @mp: The metapath
@@ -264,10 +272,8 @@ static inline unsigned int metapath_branch_start(const struct metapath *mp)
static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
{
- struct buffer_head *bh = mp->mp_bh[height];
- unsigned int head_size = (height > 0) ?
- sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
- return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
+ __be64 *p = metaptr1(height, mp);
+ return p + mp->mp_list[height];
}
static void gfs2_metapath_ra(struct gfs2_glock *gl,
@@ -296,6 +302,23 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
}
/**
+ * lookup_mp_height - helper function for lookup_metapath
+ * @ip: the inode
+ * @mp: the metapath
+ * @h: the height which needs looking up
+ */
+static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
+{
+ __be64 *ptr = metapointer(h, mp);
+ u64 dblock = be64_to_cpu(*ptr);
+
+ if (!dblock)
+ return h + 1;
+
+ return gfs2_meta_indirect_buffer(ip, h + 1, dblock, &mp->mp_bh[h + 1]);
+}
+
+/**
* lookup_metapath - Walk the metadata tree to a specific point
* @ip: The inode
* @mp: The metapath
@@ -316,17 +339,10 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
{
unsigned int end_of_metadata = ip->i_height - 1;
unsigned int x;
- __be64 *ptr;
- u64 dblock;
int ret;
for (x = 0; x < end_of_metadata; x++) {
- ptr = metapointer(x, mp);
- dblock = be64_to_cpu(*ptr);
- if (!dblock)
- return x + 1;
-
- ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
+ ret = lookup_mp_height(ip, mp, x);
if (ret)
return ret;
}
@@ -334,6 +350,35 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
return ip->i_height;
}
+/**
+ * fillup_metapath - fill up buffers for the metadata path to a specific height
+ * @ip: The inode
+ * @mp: The metapath
+ * @h: The height to which it should be mapped
+ *
+ * Similar to lookup_metapath, but does lookups for a range of heights
+ *
+ * Returns: error or height of metadata tree
+ */
+
+static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
+{
+ unsigned int start_h = h - 1;
+ int ret;
+
+ if (h) {
+ /* find the first buffer we need to look up. */
+ while (start_h > 0 && mp->mp_bh[start_h] == NULL)
+ start_h--;
+ for (; start_h < h; start_h++) {
+ ret = lookup_mp_height(ip, mp, start_h);
+ if (ret)
+ return ret;
+ }
+ }
+ return ip->i_height;
+}
+
static inline void release_metapath(struct metapath *mp)
{
int i;
@@ -422,6 +467,13 @@ enum alloc_state {
/* ALLOC_UNSTUFF = 3, TBD and rather complicated */
};
+static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
+{
+ if (hgt)
+ return sdp->sd_inptrs;
+ return sdp->sd_diptrs;
+}
+
/**
* gfs2_bmap_alloc - Build a metadata tree of the requested height
* @inode: The GFS2 inode
@@ -620,7 +672,7 @@ int gfs2_block_map(struct inode *inode, sector_t lblock,
BUG_ON(maxlen == 0);
- memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
+ memset(&mp, 0, sizeof(mp));
bmap_lock(ip, create);
clear_buffer_mapped(bh_map);
clear_buffer_new(bh_map);
@@ -702,252 +754,6 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
}
/**
- * do_strip - Look for a layer a particular layer of the file and strip it off
- * @ip: the inode
- * @dibh: the dinode buffer
- * @bh: A buffer of pointers
- * @top: The first pointer in the buffer
- * @bottom: One more than the last pointer
- * @height: the height this buffer is at
- * @sm: a pointer to a struct strip_mine
- *
- * Returns: errno
- */
-
-static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
- struct buffer_head *bh, __be64 *top, __be64 *bottom,
- unsigned int height, struct strip_mine *sm)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrp_list rlist;
- struct gfs2_trans *tr;
- u64 bn, bstart;
- u32 blen, btotal;
- __be64 *p;
- unsigned int rg_blocks = 0;
- int metadata;
- unsigned int revokes = 0;
- int x;
- int error;
- int jblocks_rqsted;
-
- error = gfs2_rindex_update(sdp);
- if (error)
- return error;
-
- if (!*top)
- sm->sm_first = 0;
-
- if (height != sm->sm_height)
- return 0;
-
- if (sm->sm_first) {
- top++;
- sm->sm_first = 0;
- }
-
- metadata = (height != ip->i_height - 1);
- if (metadata)
- revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
- else if (ip->i_depth)
- revokes = sdp->sd_inptrs;
-
- memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
- bstart = 0;
- blen = 0;
-
- for (p = top; p < bottom; p++) {
- if (!*p)
- continue;
-
- bn = be64_to_cpu(*p);
-
- if (bstart + blen == bn)
- blen++;
- else {
- if (bstart)
- gfs2_rlist_add(ip, &rlist, bstart);
-
- bstart = bn;
- blen = 1;
- }
- }
-
- if (bstart)
- gfs2_rlist_add(ip, &rlist, bstart);
- else
- goto out; /* Nothing to do */
-
- gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
-
- for (x = 0; x < rlist.rl_rgrps; x++) {
- struct gfs2_rgrpd *rgd;
- rgd = rlist.rl_ghs[x].gh_gl->gl_object;
- rg_blocks += rgd->rd_length;
- }
-
- error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
- if (error)
- goto out_rlist;
-
- if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */
- gfs2_rs_deltree(&ip->i_res);
-
-restart:
- jblocks_rqsted = rg_blocks + RES_DINODE +
- RES_INDIRECT + RES_STATFS + RES_QUOTA +
- gfs2_struct2blk(sdp, revokes, sizeof(u64));
- if (jblocks_rqsted > atomic_read(&sdp->sd_log_thresh2))
- jblocks_rqsted = atomic_read(&sdp->sd_log_thresh2);
- error = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
- if (error)
- goto out_rg_gunlock;
-
- tr = current->journal_info;
- down_write(&ip->i_rw_mutex);
-
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_trans_add_meta(ip->i_gl, bh);
-
- bstart = 0;
- blen = 0;
- btotal = 0;
-
- for (p = top; p < bottom; p++) {
- if (!*p)
- continue;
-
- /* check for max reasonable journal transaction blocks */
- if (tr->tr_num_buf_new + RES_STATFS +
- RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
- if (rg_blocks >= tr->tr_num_buf_new)
- rg_blocks -= tr->tr_num_buf_new;
- else
- rg_blocks = 0;
- break;
- }
-
- bn = be64_to_cpu(*p);
-
- if (bstart + blen == bn)
- blen++;
- else {
- if (bstart) {
- __gfs2_free_blocks(ip, bstart, blen, metadata);
- btotal += blen;
- }
-
- bstart = bn;
- blen = 1;
- }
-
- *p = 0;
- gfs2_add_inode_blocks(&ip->i_inode, -1);
- }
- if (p == bottom)
- rg_blocks = 0;
-
- if (bstart) {
- __gfs2_free_blocks(ip, bstart, blen, metadata);
- btotal += blen;
- }
-
- gfs2_statfs_change(sdp, 0, +btotal, 0);
- gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
- ip->i_inode.i_gid);
-
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
-
- gfs2_dinode_out(ip, dibh->b_data);
-
- up_write(&ip->i_rw_mutex);
-
- gfs2_trans_end(sdp);
-
- if (rg_blocks)
- goto restart;
-
-out_rg_gunlock:
- gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
-out_rlist:
- gfs2_rlist_free(&rlist);
-out:
- return error;
-}
-
-/**
- * recursive_scan - recursively scan through the end of a file
- * @ip: the inode
- * @dibh: the dinode buffer
- * @mp: the path through the metadata to the point to start
- * @height: the height the recursion is at
- * @block: the indirect block to look at
- * @first: 1 if this is the first block
- * @sm: data opaque to this function to pass to @bc
- *
- * When this is first called @height and @block should be zero and
- * @first should be 1.
- *
- * Returns: errno
- */
-
-static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
- struct metapath *mp, unsigned int height,
- u64 block, int first, struct strip_mine *sm)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct buffer_head *bh = NULL;
- __be64 *top, *bottom;
- u64 bn;
- int error;
- int mh_size = sizeof(struct gfs2_meta_header);
-
- if (!height) {
- error = gfs2_meta_inode_buffer(ip, &bh);
- if (error)
- return error;
- dibh = bh;
-
- top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
- bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
- } else {
- error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
- if (error)
- return error;
-
- top = (__be64 *)(bh->b_data + mh_size) +
- (first ? mp->mp_list[height] : 0);
-
- bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
- }
-
- error = do_strip(ip, dibh, bh, top, bottom, height, sm);
- if (error)
- goto out;
-
- if (height < ip->i_height - 1) {
-
- gfs2_metapath_ra(ip->i_gl, bh, top);
-
- for (; top < bottom; top++, first = 0) {
- if (!*top)
- continue;
-
- bn = be64_to_cpu(*top);
-
- error = recursive_scan(ip, dibh, mp, height + 1, bn,
- first, sm);
- if (error)
- break;
- }
- }
-out:
- brelse(bh);
- return error;
-}
-
-
-/**
* gfs2_block_truncate_page - Deal with zeroing out data for truncate
*
* This is partly borrowed from ext3.
@@ -1106,41 +912,406 @@ out:
return error;
}
-static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
+/**
+ * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
+ * @ip: inode
+ * @rg_gh: holder of resource group glock
+ * @mp: current metapath fully populated with buffers
+ * @btotal: place to keep count of total blocks freed
+ * @hgt: height we're processing
+ * @first: true if this is the first call to this function for this height
+ *
+ * We sweep a metadata buffer (provided by the metapath) for blocks we need to
+ * free, and free them all. However, we do it one rgrp at a time. If this
+ * block has references to multiple rgrps, we break it into individual
+ * transactions. This allows other processes to use the rgrps while we're
+ * focused on a single one, for better concurrency / performance.
+ * At every transaction boundary, we rewrite the inode into the journal.
+ * That way the bitmaps are kept consistent with the inode and we can recover
+ * if we're interrupted by power-outages.
+ *
+ * Returns: 0, or return code if an error occurred.
+ * *btotal has the total number of blocks freed
+ */
+static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
+ const struct metapath *mp, u32 *btotal, int hgt,
+ bool preserve1)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- unsigned int height = ip->i_height;
- u64 lblock;
- struct metapath mp;
- int error;
+ struct gfs2_rgrpd *rgd;
+ struct gfs2_trans *tr;
+ struct buffer_head *bh = mp->mp_bh[hgt];
+ __be64 *top, *bottom, *p;
+ int blks_outside_rgrp;
+ u64 bn, bstart, isize_blks;
+ s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
+ int meta = ((hgt != ip->i_height - 1) ? 1 : 0);
+ int ret = 0;
+ bool buf_in_tr = false; /* buffer was added to transaction */
+
+ if (gfs2_metatype_check(sdp, bh,
+ (hgt ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)))
+ return -EIO;
+
+more_rgrps:
+ blks_outside_rgrp = 0;
+ bstart = 0;
+ blen = 0;
+ top = metapointer(hgt, mp); /* first ptr from metapath */
+ /* If we're keeping some data at the truncation point, we've got to
+ preserve the metadata tree by adding 1 to the starting metapath. */
+ if (preserve1)
+ top++;
+
+ bottom = (__be64 *)(bh->b_data + bh->b_size);
+
+ for (p = top; p < bottom; p++) {
+ if (!*p)
+ continue;
+ bn = be64_to_cpu(*p);
+ if (gfs2_holder_initialized(rd_gh)) {
+ rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object;
+ gfs2_assert_withdraw(sdp,
+ gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
+ } else {
+ rgd = gfs2_blk2rgrpd(sdp, bn, false);
+ ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
+ 0, rd_gh);
+ if (ret)
+ goto out;
+
+ /* Must be done with the rgrp glock held: */
+ if (gfs2_rs_active(&ip->i_res) &&
+ rgd == ip->i_res.rs_rbm.rgd)
+ gfs2_rs_deltree(&ip->i_res);
+ }
+
+ if (!rgrp_contains_block(rgd, bn)) {
+ blks_outside_rgrp++;
+ continue;
+ }
+
+ /* The size of our transactions will be unknown until we
+ actually process all the metadata blocks that relate to
+ the rgrp. So we estimate. We know it can't be more than
+ the dinode's i_blocks and we don't want to exceed the
+ journal flush threshold, sd_log_thresh2. */
+ if (current->journal_info == NULL) {
+ unsigned int jblocks_rqsted, revokes;
+
+ jblocks_rqsted = rgd->rd_length + RES_DINODE +
+ RES_INDIRECT;
+ isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
+ if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
+ jblocks_rqsted +=
+ atomic_read(&sdp->sd_log_thresh2);
+ else
+ jblocks_rqsted += isize_blks;
+ revokes = jblocks_rqsted;
+ if (meta)
+ revokes += hptrs(sdp, hgt);
+ else if (ip->i_depth)
+ revokes += sdp->sd_inptrs;
+ ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
+ if (ret)
+ goto out_unlock;
+ down_write(&ip->i_rw_mutex);
+ }
+ /* check if we will exceed the transaction blocks requested */
+ tr = current->journal_info;
+ if (tr->tr_num_buf_new + RES_STATFS +
+ RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
+ /* We set blks_outside_rgrp to ensure the loop will
+ be repeated for the same rgrp, but with a new
+ transaction. */
+ blks_outside_rgrp++;
+ /* This next part is tricky. If the buffer was added
+ to the transaction, we've already set some block
+ pointers to 0, so we better follow through and free
+ them, or we will introduce corruption (so break).
+ This may be impossible, or at least rare, but I
+ decided to cover the case regardless.
+
+ If the buffer was not added to the transaction
+ (this call), doing so would exceed our transaction
+ size, so we need to end the transaction and start a
+ new one (so goto). */
+
+ if (buf_in_tr)
+ break;
+ goto out_unlock;
+ }
+
+ gfs2_trans_add_meta(ip->i_gl, bh);
+ buf_in_tr = true;
+ *p = 0;
+ if (bstart + blen == bn) {
+ blen++;
+ continue;
+ }
+ if (bstart) {
+ __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ (*btotal) += blen;
+ gfs2_add_inode_blocks(&ip->i_inode, -blen);
+ }
+ bstart = bn;
+ blen = 1;
+ }
+ if (bstart) {
+ __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
+ (*btotal) += blen;
+ gfs2_add_inode_blocks(&ip->i_inode, -blen);
+ }
+out_unlock:
+ if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
+ outside the rgrp we just processed,
+ do it all over again. */
+ if (current->journal_info) {
+ struct buffer_head *dibh = mp->mp_bh[0];
+
+ /* Every transaction boundary, we rewrite the dinode
+ to keep its di_blocks current in case of failure. */
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime =
+ current_time(&ip->i_inode);
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ gfs2_dinode_out(ip, dibh->b_data);
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
+ }
+ gfs2_glock_dq_uninit(rd_gh);
+ cond_resched();
+ goto more_rgrps;
+ }
+out:
+ return ret;
+}
+
+/**
+ * find_nonnull_ptr - find a non-null pointer given a metapath and height
+ * assumes the metapath is valid (with buffers) out to height h
+ * @mp: starting metapath
+ * @h: desired height to search
+ *
+ * Returns: true if a non-null pointer was found in the metapath buffer
+ * false if all remaining pointers are NULL in the buffer
+ */
+static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
+ unsigned int h)
+{
+ __be64 *ptr;
+ unsigned int ptrs = hptrs(sdp, h) - 1;
+
+ while (true) {
+ ptr = metapointer(h, mp);
+ if (*ptr) /* if we have a non-null pointer */
+ return true;
+
+ if (mp->mp_list[h] < ptrs)
+ mp->mp_list[h]++;
+ else
+ return false; /* no more pointers in this buffer */
+ }
+}
+
+enum dealloc_states {
+ DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
+ DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
+ DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
+ DEALLOC_DONE = 3, /* process complete */
+};
- if (!size)
+/**
+ * trunc_dealloc - truncate a file down to a desired size
+ * @ip: inode to truncate
+ * @newsize: The desired size of the file
+ *
+ * This function truncates a file to newsize. It works from the
+ * bottom up, and from the right to the left. In other words, it strips off
+ * the highest layer (data) before stripping any of the metadata. Doing it
+ * this way is best in case the operation is interrupted by power failure, etc.
+ * The dinode is rewritten in every transaction to guarantee integrity.
+ */
+static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct metapath mp;
+ struct buffer_head *dibh, *bh;
+ struct gfs2_holder rd_gh;
+ u64 lblock;
+ __u16 nbof[GFS2_MAX_META_HEIGHT]; /* new beginning of truncation */
+ unsigned int strip_h = ip->i_height - 1;
+ u32 btotal = 0;
+ int ret, state;
+ int mp_h; /* metapath buffers are read in to this height */
+ sector_t last_ra = 0;
+ u64 prev_bnr = 0;
+ bool preserve1; /* need to preserve the first meta pointer? */
+
+ if (!newsize)
lblock = 0;
else
- lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
+ lblock = (newsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+ memset(&mp, 0, sizeof(mp));
find_metapath(sdp, lblock, &mp, ip->i_height);
- error = gfs2_rindex_update(sdp);
- if (error)
- return error;
- error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
- if (error)
- return error;
+ memcpy(&nbof, &mp.mp_list, sizeof(nbof));
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ return ret;
- while (height--) {
- struct strip_mine sm;
- sm.sm_first = !!size;
- sm.sm_height = height;
+ mp.mp_bh[0] = dibh;
+ ret = lookup_metapath(ip, &mp);
+ if (ret == ip->i_height)
+ state = DEALLOC_MP_FULL; /* We have a complete metapath */
+ else
+ state = DEALLOC_FILL_MP; /* deal with partial metapath */
- error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
- if (error)
+ ret = gfs2_rindex_update(sdp);
+ if (ret)
+ goto out_metapath;
+
+ ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
+ if (ret)
+ goto out_metapath;
+ gfs2_holder_mark_uninitialized(&rd_gh);
+
+ mp_h = strip_h;
+
+ while (state != DEALLOC_DONE) {
+ switch (state) {
+ /* Truncate a full metapath at the given strip height.
+ * Note that strip_h == mp_h in order to be in this state. */
+ case DEALLOC_MP_FULL:
+ if (mp_h > 0) { /* issue read-ahead on metadata */
+ __be64 *top;
+
+ bh = mp.mp_bh[mp_h - 1];
+ if (bh->b_blocknr != last_ra) {
+ last_ra = bh->b_blocknr;
+ top = metaptr1(mp_h - 1, &mp);
+ gfs2_metapath_ra(ip->i_gl, bh, top);
+ }
+ }
+ /* If we're truncating to a non-zero size and the mp is
+ at the beginning of file for the strip height, we
+ need to preserve the first metadata pointer. */
+ preserve1 = (newsize &&
+ (mp.mp_list[mp_h] == nbof[mp_h]));
+ bh = mp.mp_bh[mp_h];
+ gfs2_assert_withdraw(sdp, bh);
+ if (gfs2_assert_withdraw(sdp,
+ prev_bnr != bh->b_blocknr)) {
+ printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
+ "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
+ sdp->sd_fsname,
+ (unsigned long long)ip->i_no_addr,
+ prev_bnr, ip->i_height, strip_h, mp_h);
+ }
+ prev_bnr = bh->b_blocknr;
+ ret = sweep_bh_for_rgrps(ip, &rd_gh, &mp, &btotal,
+ mp_h, preserve1);
+ /* If we hit an error or just swept dinode buffer,
+ just exit. */
+ if (ret || !mp_h) {
+ state = DEALLOC_DONE;
+ break;
+ }
+ state = DEALLOC_MP_LOWER;
+ break;
+
+ /* lower the metapath strip height */
+ case DEALLOC_MP_LOWER:
+ /* We're done with the current buffer, so release it,
+ unless it's the dinode buffer. Then back up to the
+ previous pointer. */
+ if (mp_h) {
+ brelse(mp.mp_bh[mp_h]);
+ mp.mp_bh[mp_h] = NULL;
+ }
+ /* If we can't get any lower in height, we've stripped
+ off all we can. Next step is to back up and start
+ stripping the previous level of metadata. */
+ if (mp_h == 0) {
+ strip_h--;
+ memcpy(&mp.mp_list, &nbof, sizeof(nbof));
+ mp_h = strip_h;
+ state = DEALLOC_FILL_MP;
+ break;
+ }
+ mp.mp_list[mp_h] = 0;
+ mp_h--; /* search one metadata height down */
+ if (mp.mp_list[mp_h] >= hptrs(sdp, mp_h) - 1)
+ break; /* loop around in the same state */
+ mp.mp_list[mp_h]++;
+ /* Here we've found a part of the metapath that is not
+ * allocated. We need to search at that height for the
+ * next non-null pointer. */
+ if (find_nonnull_ptr(sdp, &mp, mp_h)) {
+ state = DEALLOC_FILL_MP;
+ mp_h++;
+ }
+ /* No more non-null pointers at this height. Back up
+ to the previous height and try again. */
+ break; /* loop around in the same state */
+
+ /* Fill the metapath with buffers to the given height. */
+ case DEALLOC_FILL_MP:
+ /* Fill the buffers out to the current height. */
+ ret = fillup_metapath(ip, &mp, mp_h);
+ if (ret < 0)
+ goto out;
+
+ /* If buffers found for the entire strip height */
+ if ((ret == ip->i_height) && (mp_h == strip_h)) {
+ state = DEALLOC_MP_FULL;
+ break;
+ }
+ if (ret < ip->i_height) /* We have a partial height */
+ mp_h = ret - 1;
+
+ /* If we find a non-null block pointer, crawl a bit
+ higher up in the metapath and try again, otherwise
+ we need to look lower for a new starting point. */
+ if (find_nonnull_ptr(sdp, &mp, mp_h))
+ mp_h++;
+ else
+ state = DEALLOC_MP_LOWER;
break;
+ }
}
- gfs2_quota_unhold(ip);
+ if (btotal) {
+ if (current->journal_info == NULL) {
+ ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
+ RES_QUOTA, 0);
+ if (ret)
+ goto out;
+ down_write(&ip->i_rw_mutex);
+ }
+ gfs2_statfs_change(sdp, 0, +btotal, 0);
+ gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
+ ip->i_inode.i_gid);
+ ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ gfs2_dinode_out(ip, dibh->b_data);
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
+ }
- return error;
+out:
+ if (gfs2_holder_initialized(&rd_gh))
+ gfs2_glock_dq_uninit(&rd_gh);
+ if (current->journal_info) {
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
+ cond_resched();
+ }
+ gfs2_quota_unhold(ip);
+out_metapath:
+ release_metapath(&mp);
+ return ret;
}
static int trunc_end(struct gfs2_inode *ip)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 6fe2a59c6a9a..c2062a108d19 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -911,11 +911,15 @@ out_qunlock:
static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int ret;
- if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
+ if (mode & ~FALLOC_FL_KEEP_SIZE)
+ return -EOPNOTSUPP;
+ /* fallocate is needed by gfs2_grow to reserve space in the rindex */
+ if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
return -EOPNOTSUPP;
inode_lock(inode);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ec0848fcca02..959a19ced4d5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(lru_lock);
static struct rhashtable_params ht_parms = {
.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
- .key_len = sizeof(struct lm_lockname),
+ .key_len = offsetofend(struct lm_lockname, ln_type),
.key_offset = offsetof(struct gfs2_glock, gl_name),
.head_offset = offsetof(struct gfs2_glock, gl_node),
};
@@ -449,6 +449,9 @@ __acquires(&gl->gl_lockref.lock)
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
+ target != LM_ST_UNLOCKED)
+ return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
LM_FLAG_PRIORITY);
GLOCK_BUG_ON(gl, gl->gl_state == target);
@@ -484,7 +487,8 @@ __acquires(&gl->gl_lockref.lock)
}
else if (ret) {
pr_err("lm_lock ret %d\n", ret);
- GLOCK_BUG_ON(gl, 1);
+ GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
+ &sdp->sd_flags));
}
} else { /* lock_nolock */
finish_xmote(gl, target);
@@ -653,10 +657,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type,
.ln_sbd = sdp };
- struct gfs2_glock *gl, *tmp = NULL;
+ struct gfs2_glock *gl, *tmp;
struct address_space *mapping;
struct kmem_cache *cachep;
- int ret, tries = 0;
+ int ret = 0;
rcu_read_lock();
gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
@@ -721,35 +725,32 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
}
again:
- ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
- ht_parms);
- if (ret == 0) {
+ rcu_read_lock();
+ tmp = rhashtable_lookup_get_insert_fast(&gl_hash_table, &gl->gl_node,
+ ht_parms);
+ if (!tmp) {
*glp = gl;
- return 0;
+ goto out;
}
-
- if (ret == -EEXIST) {
- ret = 0;
- rcu_read_lock();
- tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
- if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
- if (++tries < 100) {
- rcu_read_unlock();
- cond_resched();
- goto again;
- }
- tmp = NULL;
- ret = -ENOMEM;
- }
- rcu_read_unlock();
- } else {
- WARN_ON_ONCE(ret);
+ if (IS_ERR(tmp)) {
+ ret = PTR_ERR(tmp);
+ goto out_free;
}
+ if (lockref_get_not_dead(&tmp->gl_lockref)) {
+ *glp = tmp;
+ goto out_free;
+ }
+ rcu_read_unlock();
+ cond_resched();
+ goto again;
+
+out_free:
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(cachep, gl);
atomic_dec(&sdp->sd_glock_disposal);
- *glp = tmp;
+out:
+ rcu_read_unlock();
return ret;
}
@@ -1918,10 +1919,10 @@ static const struct seq_operations gfs2_sbstats_seq_ops = {
#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
-static int gfs2_glocks_open(struct inode *inode, struct file *file)
+static int __gfs2_glocks_open(struct inode *inode, struct file *file,
+ const struct seq_operations *ops)
{
- int ret = seq_open_private(file, &gfs2_glock_seq_ops,
- sizeof(struct gfs2_glock_iter));
+ int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
if (ret == 0) {
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
@@ -1932,11 +1933,16 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
if (seq->buf)
seq->size = GFS2_SEQ_GOODSIZE;
gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
+ rhashtable_walk_enter(&gl_hash_table, &gi->hti);
}
return ret;
}
+static int gfs2_glocks_open(struct inode *inode, struct file *file)
+{
+ return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
+}
+
static int gfs2_glocks_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
@@ -1949,20 +1955,7 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
static int gfs2_glstats_open(struct inode *inode, struct file *file)
{
- int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
- sizeof(struct gfs2_glock_iter));
- if (ret == 0) {
- struct seq_file *seq = file->private_data;
- struct gfs2_glock_iter *gi = seq->private;
- gi->sdp = inode->i_private;
- gi->last_pos = 0;
- seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
- if (seq->buf)
- seq->size = GFS2_SEQ_GOODSIZE;
- gi->gl = NULL;
- ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
- }
- return ret;
+ return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
}
static int gfs2_sbstats_open(struct inode *inode, struct file *file)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 511e1ed7e2de..b7cf65d13561 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -203,11 +203,15 @@ enum {
DFL_DLM_RECOVERY = 6,
};
+/*
+ * We are using struct lm_lockname as an rhashtable key. Avoid holes within
+ * the struct; padding at the end is fine.
+ */
struct lm_lockname {
- struct gfs2_sbd *ln_sbd;
u64 ln_number;
+ struct gfs2_sbd *ln_sbd;
unsigned int ln_type;
-} __packed __aligned(sizeof(int));
+};
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index e279c3ce27be..9f605ea4810c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -202,8 +202,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
fail_refresh:
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
- gfs2_holder_uninit(&ip->i_iopen_gh);
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_put:
if (io_gl)
gfs2_glock_put(io_gl);
@@ -667,6 +666,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
ip->i_height = 0;
ip->i_depth = 0;
ip->i_entries = 0;
+ ip->i_no_addr = 0; /* Temporarily zero until real addr is assigned */
switch(mode & S_IFMT) {
case S_IFREG:
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 86ccc0159393..83c9909ff14a 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -483,13 +483,6 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
}
}
-static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
-{
- u64 first = rgd->rd_data0;
- u64 last = first + rgd->rd_data;
- return first <= block && block < last;
-}
-
/**
* gfs2_blk2rgrpd - Find resource group for a given data/meta block number
* @sdp: The GFS2 superblock
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 66b51cf66dfa..e90478e2f545 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -83,5 +83,12 @@ static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
return rs && !RB_EMPTY_NODE(&rs->rs_node);
}
+static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
+{
+ u64 first = rgd->rd_data0;
+ u64 last = first + rgd->rd_data;
+ return first <= block && block < last;
+}
+
extern void check_and_update_goal(struct gfs2_inode *ip);
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 361796a84fce..29b0473f6e74 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -793,7 +793,8 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC)))
return;
-
+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ return;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret) {
@@ -1538,8 +1539,7 @@ static void gfs2_evict_inode(struct inode *inode)
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
- gfs2_holder_uninit(&ip->i_iopen_gh);
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
goto out;
}
@@ -1617,7 +1617,7 @@ out_unlock:
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
+ gfs2_glock_dq(&ip->i_iopen_gh);
}
gfs2_holder_uninit(&ip->i_iopen_gh);
}
@@ -1639,8 +1639,7 @@ out:
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
ip->i_iopen_gh.gh_gl->gl_object = NULL;
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
- gfs2_holder_uninit(&ip->i_iopen_gh);
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index e33a0d36a93e..5d0182654580 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -485,8 +485,8 @@ void hfs_file_truncate(struct inode *inode)
/* XXX: Can use generic_cont_expand? */
size = inode->i_size - 1;
- res = pagecache_write_begin(NULL, mapping, size+1, 0,
- AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
+ res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
+ &page, &fsdata);
if (!res) {
res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
page, fsdata);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index feca524ce2a5..a3eb640b4f8f 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -545,9 +545,8 @@ void hfsplus_file_truncate(struct inode *inode)
void *fsdata;
loff_t size = inode->i_size;
- res = pagecache_write_begin(NULL, mapping, size, 0,
- AOP_FLAG_UNINTERRUPTIBLE,
- &page, &fsdata);
+ res = pagecache_write_begin(NULL, mapping, size, 0, 0,
+ &page, &fsdata);
if (res)
return;
res = pagecache_write_end(NULL, mapping, size,
diff --git a/fs/inode.c b/fs/inode.c
index 131b2bcebc48..db5914783a71 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -119,7 +119,7 @@ static int no_open(struct inode *inode, struct file *file)
}
/**
- * inode_init_always - perform inode structure intialisation
+ * inode_init_always - perform inode structure initialisation
* @sb: superblock inode belongs to
* @inode: inode to initialise
*
@@ -402,6 +402,8 @@ static void inode_lru_list_add(struct inode *inode)
{
if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
this_cpu_inc(nr_unused);
+ else
+ inode->i_state |= I_REFERENCED;
}
/*
@@ -1489,7 +1491,6 @@ static void iput_final(struct inode *inode)
drop = generic_drop_inode(inode);
if (!drop && (sb->s_flags & MS_ACTIVE)) {
- inode->i_state |= I_REFERENCED;
inode_add_lru(inode);
spin_unlock(&inode->i_lock);
return;
diff --git a/fs/internal.h b/fs/internal.h
index 076751d90ba2..9676fe11c093 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -126,8 +126,6 @@ static inline bool atime_needs_update_rcu(const struct path *path,
return __atime_needs_update(path, inode, true);
}
-extern bool atime_needs_update_rcu(const struct path *, struct inode *);
-
/*
* fs-writeback.c
*/
diff --git a/fs/iomap.c b/fs/iomap.c
index 1c25ae30500e..4b10892967a5 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -158,12 +158,6 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
ssize_t written = 0;
unsigned int flags = AOP_FLAG_NOFS;
- /*
- * Copies from kernel address space cannot fail (NFSD is a big user).
- */
- if (!iter_is_iovec(i))
- flags |= AOP_FLAG_UNINTERRUPTIBLE;
-
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
@@ -291,8 +285,7 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
return PTR_ERR(rpage);
status = iomap_write_begin(inode, pos, bytes,
- AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
- &page, iomap);
+ AOP_FLAG_NOFS, &page, iomap);
put_page(rpage);
if (unlikely(status))
return status;
@@ -343,8 +336,8 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
struct page *page;
int status;
- status = iomap_write_begin(inode, pos, bytes,
- AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
+ status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
+ iomap);
if (status)
return status;
@@ -360,7 +353,8 @@ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
sector_t sector = iomap->blkno +
(((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
- return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
+ return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
+ offset, bytes);
}
static loff_t
@@ -909,6 +903,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
break;
}
pos += ret;
+
+ if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
+ break;
} while ((count = iov_iter_count(iter)) > 0);
blk_finish_plug(&plug);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c43fe83ee708..ebad34266bcf 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -700,8 +700,21 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
- jbd2_might_wait_for_commit(journal);
read_lock(&journal->j_state_lock);
+#ifdef CONFIG_PROVE_LOCKING
+ /*
+ * Some callers make sure transaction is already committing and in that
+ * case we cannot block on open handles anymore. So don't warn in that
+ * case.
+ */
+ if (tid_gt(tid, journal->j_commit_sequence) &&
+ (!journal->j_committing_transaction ||
+ journal->j_committing_transaction->t_tid != tid)) {
+ read_unlock(&journal->j_state_lock);
+ jbd2_might_wait_for_commit(journal);
+ read_lock(&journal->j_state_lock);
+ }
+#endif
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_ERR
@@ -922,7 +935,8 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
* space and if we lose sb update during power failure we'd replay
* old transaction with possibly newly overwritten data.
*/
- ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
+ ret = jbd2_journal_update_sb_log_tail(journal, tid, block,
+ REQ_SYNC | REQ_FUA);
if (ret)
goto out;
@@ -1323,7 +1337,7 @@ static int journal_reset(journal_t *journal)
jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence,
journal->j_tail,
- REQ_FUA);
+ REQ_SYNC | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
return jbd2_journal_start_thread(journal);
@@ -1463,7 +1477,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
sb->s_errno = cpu_to_be32(journal->j_errno);
read_unlock(&journal->j_state_lock);
- jbd2_write_superblock(journal, REQ_FUA);
+ jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
}
EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
@@ -1730,7 +1744,7 @@ int jbd2_journal_destroy(journal_t *journal)
write_unlock(&journal->j_state_lock);
jbd2_mark_journal_empty(journal,
- REQ_PREFLUSH | REQ_FUA);
+ REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
} else
err = -EIO;
@@ -1989,7 +2003,7 @@ int jbd2_journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- jbd2_mark_journal_empty(journal, REQ_FUA);
+ jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
write_lock(&journal->j_state_lock);
J_ASSERT(!journal->j_running_transaction);
@@ -2035,7 +2049,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (write) {
/* Lock to make assertions happy... */
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal, REQ_FUA);
+ jbd2_mark_journal_empty(journal, REQ_SYNC | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
@@ -2349,7 +2363,7 @@ static int jbd2_journal_init_journal_head_cache(void)
jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
sizeof(struct journal_head),
0, /* offset */
- SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU,
+ SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
NULL); /* ctor */
retval = 0;
if (!jbd2_journal_head_cache) {
diff --git a/fs/libfs.c b/fs/libfs.c
index a8b62e5d43a9..a04395334bb1 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -507,7 +507,7 @@ EXPORT_SYMBOL(simple_write_end);
* to pass it an appropriate max_reserved value to avoid collisions.
*/
int simple_fill_super(struct super_block *s, unsigned long magic,
- struct tree_descr *files)
+ const struct tree_descr *files)
{
struct inode *inode;
struct dentry *root;
diff --git a/fs/namei.c b/fs/namei.c
index 9a7f8bd748d8..7286f87ce863 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4766,7 +4766,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
struct page *page;
void *fsdata;
int err;
- unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE;
+ unsigned int flags = 0;
if (nofs)
flags |= AOP_FLAG_NOFS;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9dc65d7ae754..7b38fedb7e03 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -139,7 +139,7 @@ struct nfs_mount_request {
};
struct nfs_mount_info {
- int (*fill_super)(struct super_block *, struct nfs_mount_info *);
+ void (*fill_super)(struct super_block *, struct nfs_mount_info *);
int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
struct nfs_parsed_mount_data *parsed;
struct nfs_clone_mount *cloned;
@@ -407,7 +407,7 @@ struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *
struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
const char *, struct nfs_mount_info *);
void nfs_kill_super(struct super_block *);
-int nfs_fill_super(struct super_block *, struct nfs_mount_info *);
+void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
extern struct rpc_stat nfs_rpcstat;
@@ -458,7 +458,7 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
-int nfs_clone_super(struct super_block *, struct nfs_mount_info *);
+void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
void nfs_umount_begin(struct super_block *);
int nfs_statfs(struct dentry *, struct kstatfs *);
int nfs_show_options(struct seq_file *, struct dentry *);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index dc69314d455e..2f3822a4a7d5 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2321,11 +2321,10 @@ inline void nfs_initialise_sb(struct super_block *sb)
/*
* Finish setting up an NFS2/3 superblock
*/
-int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
- int ret;
sb->s_blocksize_bits = 0;
sb->s_blocksize = 0;
@@ -2343,21 +2342,13 @@ int nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
}
nfs_initialise_sb(sb);
-
- ret = super_setup_bdi_name(sb, "%u:%u", MAJOR(server->s_dev),
- MINOR(server->s_dev));
- if (ret)
- return ret;
- sb->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
- return 0;
-
}
EXPORT_SYMBOL_GPL(nfs_fill_super);
/*
* Finish setting up a cloned NFS2/3/4 superblock
*/
-int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
+void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
@@ -2377,10 +2368,6 @@ int nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
}
nfs_initialise_sb(sb);
-
- sb->s_bdi = bdi_get(old_sb->s_bdi);
-
- return 0;
}
static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
@@ -2600,14 +2587,19 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
nfs_free_server(server);
server = NULL;
} else {
+ error = super_setup_bdi_name(s, "%u:%u", MAJOR(server->s_dev),
+ MINOR(server->s_dev));
+ if (error) {
+ mntroot = ERR_PTR(error);
+ goto error_splat_super;
+ }
+ s->s_bdi->ra_pages = server->rpages * NFS_MAX_READAHEAD;
server->super = s;
}
if (!s->s_root) {
/* initial superblock/root creation */
- error = mount_info->fill_super(s, mount_info);
- if (error)
- goto error_splat_super;
+ mount_info->fill_super(s, mount_info);
nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 8bf8f667a8cf..6493df6b1bd5 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1146,7 +1146,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
{
- static struct tree_descr nfsd_files[] = {
+ static const struct tree_descr nfsd_files[] = {
[NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 323f492e0822..f3db56e83dd2 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -196,9 +196,11 @@ int ns_get_name(char *buf, size_t size, struct task_struct *task,
{
struct ns_common *ns;
int res = -ENOENT;
+ const char *name;
ns = ns_ops->get(task);
if (ns) {
- res = snprintf(buf, size, "%s:[%u]", ns_ops->name, ns->inum);
+ name = ns_ops->real_ns_name ? : ns_ops->name;
+ res = snprintf(buf, size, "%s:[%u]", name, ns->inum);
ns_ops->put(ns);
}
return res;
diff --git a/fs/open.c b/fs/open.c
index 4d23f729dcc6..6d2d2b33ac54 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -193,7 +193,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
goto out_putf;
error = -EPERM;
- if (IS_APPEND(inode))
+ /* Check IS_APPEND on real upper inode */
+ if (IS_APPEND(file_inode(f.file)))
goto out_putf;
sb_start_write(inode->i_sb);
@@ -900,6 +901,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
+ /*
+ * Clear out all open flags we don't know about so that we don't report
+ * them in fcntl(F_GETFD) or similar interfaces.
+ */
+ flags &= VALID_OPEN_FLAGS;
+
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index e1534c9bab16..c19f0787c9c6 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -180,6 +180,10 @@ static ssize_t orangefs_devreq_read(struct file *file,
return -EINVAL;
}
+ /* Check for an empty list before locking. */
+ if (list_empty(&orangefs_request_list))
+ return -EAGAIN;
+
restart:
/* Get next op (if any) from top of list. */
spin_lock(&orangefs_request_list_lock);
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
index 284373a57a08..d327cbd17756 100644
--- a/fs/orangefs/dir.c
+++ b/fs/orangefs/dir.c
@@ -1,396 +1,404 @@
/*
- * (C) 2001 Clemson University and The University of Chicago
- *
- * See COPYING in top-level directory.
+ * Copyright 2017 Omnibond Systems, L.L.C.
*/
#include "protocol.h"
#include "orangefs-kernel.h"
#include "orangefs-bufmap.h"
+struct orangefs_dir_part {
+ struct orangefs_dir_part *next;
+ size_t len;
+};
+
+struct orangefs_dir {
+ __u64 token;
+ struct orangefs_dir_part *part;
+ loff_t end;
+ int error;
+};
+
+#define PART_SHIFT (24)
+#define PART_SIZE (1<<24)
+#define PART_MASK (~(PART_SIZE - 1))
+
/*
- * decode routine used by kmod to deal with the blob sent from
- * userspace for readdirs. The blob contains zero or more of these
- * sub-blobs:
- * __u32 - represents length of the character string that follows.
- * string - between 1 and ORANGEFS_NAME_MAX bytes long.
- * padding - (if needed) to cause the __u32 plus the string to be
- * eight byte aligned.
- * khandle - sizeof(khandle) bytes.
+ * There can be up to 512 directory entries. Each entry is encoded as
+ * follows:
+ * 4 bytes: string size (n)
+ * n bytes: string
+ * 1 byte: trailing zero
+ * padding to 8 bytes
+ * 16 bytes: khandle
+ * padding to 8 bytes
+ *
+ * The trailer_buf starts with a struct orangefs_readdir_response_s
+ * which must be skipped to get to the directory data.
+ *
+ * The data which is received from the userspace daemon is termed a
+ * part and is stored in a linked list in case more than one part is
+ * needed for a large directory.
+ *
+ * The position pointer (ctx->pos) encodes the part and offset on which
+ * to begin reading at. Bits above PART_SHIFT encode the part and bits
+ * below PART_SHIFT encode the offset. Parts are stored in a linked
+ * list which grows as data is received from the server. The overhead
+ * associated with managing the list is presumed to be small compared to
+ * the overhead of communicating with the server.
+ *
+ * As data is received from the server, it is placed at the end of the
+ * part list. Data is parsed from the current position as it is needed.
+ * When data is determined to be corrupt, it is either because the
+ * userspace component has sent back corrupt data or because the file
+ * pointer has been moved to an invalid location. Since the two cannot
+ * be differentiated, return EIO.
+ *
+ * Part zero is synthesized to contains `.' and `..'. Part one is the
+ * first part of the part list.
*/
-static long decode_dirents(char *ptr, size_t size,
- struct orangefs_readdir_response_s *readdir)
+
+static int do_readdir(struct orangefs_inode_s *oi,
+ struct orangefs_dir *od, struct dentry *dentry,
+ struct orangefs_kernel_op_s *op)
{
- int i;
- struct orangefs_readdir_response_s *rd =
- (struct orangefs_readdir_response_s *) ptr;
- char *buf = ptr;
- int khandle_size = sizeof(struct orangefs_khandle);
- size_t offset = offsetof(struct orangefs_readdir_response_s,
- dirent_array);
- /* 8 reflects eight byte alignment */
- int smallest_blob = khandle_size + 8;
- __u32 len;
- int aligned_len;
- int sizeof_u32 = sizeof(__u32);
- long ret;
-
- gossip_debug(GOSSIP_DIR_DEBUG, "%s: size:%zu:\n", __func__, size);
-
- /* size is = offset on empty dirs, > offset on non-empty dirs... */
- if (size < offset) {
- gossip_err("%s: size:%zu: offset:%zu:\n",
- __func__,
- size,
- offset);
- ret = -EINVAL;
- goto out;
- }
+ struct orangefs_readdir_response_s *resp;
+ int bufi, r;
- if ((size == offset) && (readdir->orangefs_dirent_outcount != 0)) {
- gossip_err("%s: size:%zu: dirent_outcount:%d:\n",
- __func__,
- size,
- readdir->orangefs_dirent_outcount);
- ret = -EINVAL;
- goto out;
- }
+ /*
+ * Despite the badly named field, readdir does not use shared
+ * memory. However, there are a limited number of readdir
+ * slots, which must be allocated here. This flag simply tells
+ * the op scheduler to return the op here for retry.
+ */
+ op->uses_shared_memory = 1;
+ op->upcall.req.readdir.refn = oi->refn;
+ op->upcall.req.readdir.token = od->token;
+ op->upcall.req.readdir.max_dirent_count =
+ ORANGEFS_MAX_DIRENT_COUNT_READDIR;
- readdir->token = rd->token;
- readdir->orangefs_dirent_outcount = rd->orangefs_dirent_outcount;
- readdir->dirent_array = kcalloc(readdir->orangefs_dirent_outcount,
- sizeof(*readdir->dirent_array),
- GFP_KERNEL);
- if (readdir->dirent_array == NULL) {
- gossip_err("%s: kcalloc failed.\n", __func__);
- ret = -ENOMEM;
- goto out;
+again:
+ bufi = orangefs_readdir_index_get();
+ if (bufi < 0) {
+ od->error = bufi;
+ return bufi;
}
- buf += offset;
- size -= offset;
-
- for (i = 0; i < readdir->orangefs_dirent_outcount; i++) {
- if (size < smallest_blob) {
- gossip_err("%s: size:%zu: smallest_blob:%d:\n",
- __func__,
- size,
- smallest_blob);
- ret = -EINVAL;
- goto free;
- }
+ op->upcall.req.readdir.buf_index = bufi;
- len = *(__u32 *)buf;
- if ((len < 1) || (len > ORANGEFS_NAME_MAX)) {
- gossip_err("%s: len:%d:\n", __func__, len);
- ret = -EINVAL;
- goto free;
- }
+ r = service_operation(op, "orangefs_readdir",
+ get_interruptible_flag(dentry->d_inode));
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: size:%zu: len:%d:\n",
- __func__,
- size,
- len);
+ orangefs_readdir_index_put(bufi);
- readdir->dirent_array[i].d_name = buf + sizeof_u32;
- readdir->dirent_array[i].d_length = len;
+ if (op_state_purged(op)) {
+ if (r == -EAGAIN) {
+ vfree(op->downcall.trailer_buf);
+ goto again;
+ } else if (r == -EIO) {
+ vfree(op->downcall.trailer_buf);
+ od->error = r;
+ return r;
+ }
+ }
- /*
- * Calculate "aligned" length of this string and its
- * associated __u32 descriptor.
- */
- aligned_len = ((sizeof_u32 + len + 1) + 7) & ~7;
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: aligned_len:%d:\n",
- __func__,
- aligned_len);
+ if (r < 0) {
+ vfree(op->downcall.trailer_buf);
+ od->error = r;
+ return r;
+ } else if (op->downcall.status) {
+ vfree(op->downcall.trailer_buf);
+ od->error = op->downcall.status;
+ return op->downcall.status;
+ }
- /*
- * The end of the blob should coincide with the end
- * of the last sub-blob.
- */
- if (size < aligned_len + khandle_size) {
- gossip_err("%s: ran off the end of the blob.\n",
- __func__);
- ret = -EINVAL;
- goto free;
- }
- size -= aligned_len + khandle_size;
+ /*
+ * The maximum size is size per entry times the 512 entries plus
+ * the header. This is well under the limit.
+ */
+ if (op->downcall.trailer_size > PART_SIZE) {
+ vfree(op->downcall.trailer_buf);
+ od->error = -EIO;
+ return -EIO;
+ }
- buf += aligned_len;
+ resp = (struct orangefs_readdir_response_s *)
+ op->downcall.trailer_buf;
+ od->token = resp->token;
+ return 0;
+}
- readdir->dirent_array[i].khandle =
- *(struct orangefs_khandle *) buf;
- buf += khandle_size;
+static int parse_readdir(struct orangefs_dir *od,
+ struct orangefs_kernel_op_s *op)
+{
+ struct orangefs_dir_part *part, *new;
+ size_t count;
+
+ count = 1;
+ part = od->part;
+ while (part) {
+ count++;
+ if (part->next)
+ part = part->next;
+ else
+ break;
}
- ret = buf - ptr;
- gossip_debug(GOSSIP_DIR_DEBUG, "%s: returning:%ld:\n", __func__, ret);
- goto out;
-free:
- kfree(readdir->dirent_array);
- readdir->dirent_array = NULL;
+ new = (void *)op->downcall.trailer_buf;
+ new->next = NULL;
+ new->len = op->downcall.trailer_size -
+ sizeof(struct orangefs_readdir_response_s);
+ if (!od->part)
+ od->part = new;
+ else
+ part->next = new;
+ count++;
+ od->end = count << PART_SHIFT;
-out:
- return ret;
+ return 0;
}
-/*
- * Read directory entries from an instance of an open directory.
- */
-static int orangefs_readdir(struct file *file, struct dir_context *ctx)
+static int orangefs_dir_more(struct orangefs_inode_s *oi,
+ struct orangefs_dir *od, struct dentry *dentry)
{
- int ret = 0;
- int buffer_index;
- /*
- * ptoken supports Orangefs' distributed directory logic, added
- * in 2.9.2.
- */
- __u64 *ptoken = file->private_data;
- __u64 pos = 0;
- ino_t ino = 0;
- struct dentry *dentry = file->f_path.dentry;
- struct orangefs_kernel_op_s *new_op = NULL;
- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
- struct orangefs_readdir_response_s readdir_response;
- void *dents_buf;
- int i = 0;
- int len = 0;
- ino_t current_ino = 0;
- char *current_entry = NULL;
- long bytes_decoded;
-
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: ctx->pos:%lld, ptoken = %llu\n",
- __func__,
- lld(ctx->pos),
- llu(*ptoken));
-
- pos = (__u64) ctx->pos;
-
- /* are we done? */
- if (pos == ORANGEFS_READDIR_END) {
- gossip_debug(GOSSIP_DIR_DEBUG,
- "Skipping to termination path\n");
- return 0;
+ struct orangefs_kernel_op_s *op;
+ int r;
+
+ op = op_alloc(ORANGEFS_VFS_OP_READDIR);
+ if (!op) {
+ od->error = -ENOMEM;
+ return -ENOMEM;
+ }
+ r = do_readdir(oi, od, dentry, op);
+ if (r) {
+ od->error = r;
+ goto out;
+ }
+ r = parse_readdir(od, op);
+ if (r) {
+ od->error = r;
+ goto out;
}
- gossip_debug(GOSSIP_DIR_DEBUG,
- "orangefs_readdir called on %pd (pos=%llu)\n",
- dentry, llu(pos));
+ od->error = 0;
+out:
+ op_release(op);
+ return od->error;
+}
- memset(&readdir_response, 0, sizeof(readdir_response));
+static int fill_from_part(struct orangefs_dir_part *part,
+ struct dir_context *ctx)
+{
+ const int offset = sizeof(struct orangefs_readdir_response_s);
+ struct orangefs_khandle *khandle;
+ __u32 *len, padlen;
+ loff_t i;
+ char *s;
+ i = ctx->pos & ~PART_MASK;
- new_op = op_alloc(ORANGEFS_VFS_OP_READDIR);
- if (!new_op)
- return -ENOMEM;
+ /* The file offset from userspace is too large. */
+ if (i > part->len)
+ return 1;
/*
- * Only the indices are shared. No memory is actually shared, but the
- * mechanism is used.
+ * If the seek pointer is positioned just before an entry it
+ * should find the next entry.
*/
- new_op->uses_shared_memory = 1;
- new_op->upcall.req.readdir.refn = orangefs_inode->refn;
- new_op->upcall.req.readdir.max_dirent_count =
- ORANGEFS_MAX_DIRENT_COUNT_READDIR;
-
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: upcall.req.readdir.refn.khandle: %pU\n",
- __func__,
- &new_op->upcall.req.readdir.refn.khandle);
+ if (i % 8)
+ i = i + (8 - i%8)%8;
- new_op->upcall.req.readdir.token = *ptoken;
-
-get_new_buffer_index:
- buffer_index = orangefs_readdir_index_get();
- if (buffer_index < 0) {
- ret = buffer_index;
- gossip_lerr("orangefs_readdir: orangefs_readdir_index_get() failure (%d)\n",
- ret);
- goto out_free_op;
+ while (i < part->len) {
+ if (part->len < i + sizeof *len)
+ break;
+ len = (void *)part + offset + i;
+ /*
+ * len is the size of the string itself. padlen is the
+ * total size of the encoded string.
+ */
+ padlen = (sizeof *len + *len + 1) +
+ (8 - (sizeof *len + *len + 1)%8)%8;
+ if (part->len < i + padlen + sizeof *khandle)
+ goto next;
+ s = (void *)part + offset + i + sizeof *len;
+ if (s[*len] != 0)
+ goto next;
+ khandle = (void *)part + offset + i + padlen;
+ if (!dir_emit(ctx, s, *len,
+ orangefs_khandle_to_ino(khandle),
+ DT_UNKNOWN))
+ return 0;
+ i += padlen + sizeof *khandle;
+ i = i + (8 - i%8)%8;
+ BUG_ON(i > part->len);
+ ctx->pos = (ctx->pos & PART_MASK) | i;
+ continue;
+next:
+ i += 8;
}
- new_op->upcall.req.readdir.buf_index = buffer_index;
-
- ret = service_operation(new_op,
- "orangefs_readdir",
- get_interruptible_flag(dentry->d_inode));
+ return 1;
+}
- gossip_debug(GOSSIP_DIR_DEBUG,
- "Readdir downcall status is %d. ret:%d\n",
- new_op->downcall.status,
- ret);
+static int orangefs_dir_fill(struct orangefs_inode_s *oi,
+ struct orangefs_dir *od, struct dentry *dentry,
+ struct dir_context *ctx)
+{
+ struct orangefs_dir_part *part;
+ size_t count;
- orangefs_readdir_index_put(buffer_index);
+ count = ((ctx->pos & PART_MASK) >> PART_SHIFT) - 1;
- if (ret == -EAGAIN && op_state_purged(new_op)) {
- /* Client-core indices are invalid after it restarted. */
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: Getting new buffer_index for retry of readdir..\n",
- __func__);
- goto get_new_buffer_index;
+ part = od->part;
+ while (part->next && count) {
+ count--;
+ part = part->next;
}
-
- if (ret == -EIO && op_state_purged(new_op)) {
- gossip_err("%s: Client is down. Aborting readdir call.\n",
- __func__);
- goto out_free_op;
+ /* This means the userspace file offset is invalid. */
+ if (count) {
+ od->error = -EIO;
+ return -EIO;
}
- if (ret < 0 || new_op->downcall.status != 0) {
- gossip_debug(GOSSIP_DIR_DEBUG,
- "Readdir request failed. Status:%d\n",
- new_op->downcall.status);
- if (ret >= 0)
- ret = new_op->downcall.status;
- goto out_free_op;
+ while (part && part->len) {
+ int r;
+ r = fill_from_part(part, ctx);
+ if (r < 0) {
+ od->error = r;
+ return r;
+ } else if (r == 0) {
+ /* Userspace buffer is full. */
+ break;
+ } else {
+ /*
+ * The part ran out of data. Move to the next
+ * part. */
+ ctx->pos = (ctx->pos & PART_MASK) +
+ (1 << PART_SHIFT);
+ part = part->next;
+ }
}
+ return 0;
+}
- dents_buf = new_op->downcall.trailer_buf;
- if (dents_buf == NULL) {
- gossip_err("Invalid NULL buffer in readdir response\n");
- ret = -ENOMEM;
- goto out_free_op;
+static loff_t orangefs_dir_llseek(struct file *file, loff_t offset,
+ int whence)
+{
+ struct orangefs_dir *od = file->private_data;
+ /*
+ * Delete the stored data so userspace sees new directory
+ * entries.
+ */
+ if (!whence && offset < od->end) {
+ struct orangefs_dir_part *part = od->part;
+ while (part) {
+ struct orangefs_dir_part *next = part->next;
+ vfree(part);
+ part = next;
+ }
+ od->token = ORANGEFS_ITERATE_START;
+ od->part = NULL;
+ od->end = 1 << PART_SHIFT;
}
+ return default_llseek(file, offset, whence);
+}
- bytes_decoded = decode_dirents(dents_buf, new_op->downcall.trailer_size,
- &readdir_response);
- if (bytes_decoded < 0) {
- ret = bytes_decoded;
- gossip_err("Could not decode readdir from buffer %d\n", ret);
- goto out_vfree;
- }
+static int orangefs_dir_iterate(struct file *file,
+ struct dir_context *ctx)
+{
+ struct orangefs_inode_s *oi;
+ struct orangefs_dir *od;
+ struct dentry *dentry;
+ int r;
- if (bytes_decoded != new_op->downcall.trailer_size) {
- gossip_err("orangefs_readdir: # bytes decoded (%ld) "
- "!= trailer size (%ld)\n",
- bytes_decoded,
- (long)new_op->downcall.trailer_size);
- ret = -EINVAL;
- goto out_destroy_handle;
- }
+ dentry = file->f_path.dentry;
+ oi = ORANGEFS_I(dentry->d_inode);
+ od = file->private_data;
- /*
- * orangefs doesn't actually store dot and dot-dot, but
- * we need to have them represented.
- */
- if (pos == 0) {
- ino = get_ino_from_khandle(dentry->d_inode);
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: calling dir_emit of \".\" with pos = %llu\n",
- __func__,
- llu(pos));
- ret = dir_emit(ctx, ".", 1, ino, DT_DIR);
- pos += 1;
- }
+ if (od->error)
+ return od->error;
- if (pos == 1) {
- ino = get_parent_ino_from_dentry(dentry);
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: calling dir_emit of \"..\" with pos = %llu\n",
- __func__,
- llu(pos));
- ret = dir_emit(ctx, "..", 2, ino, DT_DIR);
- pos += 1;
+ if (ctx->pos == 0) {
+ if (!dir_emit_dot(file, ctx))
+ return 0;
+ ctx->pos++;
+ }
+ if (ctx->pos == 1) {
+ if (!dir_emit_dotdot(file, ctx))
+ return 0;
+ ctx->pos = 1 << PART_SHIFT;
}
/*
- * we stored ORANGEFS_ITERATE_NEXT in ctx->pos last time around
- * to prevent "finding" dot and dot-dot on any iteration
- * other than the first.
+ * The seek position is in the first synthesized part but is not
+ * valid.
*/
- if (ctx->pos == ORANGEFS_ITERATE_NEXT)
- ctx->pos = 0;
-
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: dirent_outcount:%d:\n",
- __func__,
- readdir_response.orangefs_dirent_outcount);
- for (i = ctx->pos;
- i < readdir_response.orangefs_dirent_outcount;
- i++) {
- len = readdir_response.dirent_array[i].d_length;
- current_entry = readdir_response.dirent_array[i].d_name;
- current_ino = orangefs_khandle_to_ino(
- &readdir_response.dirent_array[i].khandle);
-
- gossip_debug(GOSSIP_DIR_DEBUG,
- "calling dir_emit for %s with len %d"
- ", ctx->pos %ld\n",
- current_entry,
- len,
- (unsigned long)ctx->pos);
- /*
- * type is unknown. We don't return object type
- * in the dirent_array. This leaves getdents
- * clueless about type.
- */
- ret =
- dir_emit(ctx, current_entry, len, current_ino, DT_UNKNOWN);
- if (!ret)
- break;
- ctx->pos++;
- gossip_debug(GOSSIP_DIR_DEBUG,
- "%s: ctx->pos:%lld\n",
- __func__,
- lld(ctx->pos));
+ if ((ctx->pos & PART_MASK) == 0)
+ return -EIO;
- }
+ r = 0;
/*
- * we ran all the way through the last batch, set up for
- * getting another batch...
+ * Must read more if the user has sought past what has been read
+ * so far. Stop a user who has sought past the end.
*/
- if (ret) {
- *ptoken = readdir_response.token;
- ctx->pos = ORANGEFS_ITERATE_NEXT;
+ while (od->token != ORANGEFS_ITERATE_END &&
+ ctx->pos > od->end) {
+ r = orangefs_dir_more(oi, od, dentry);
+ if (r)
+ return r;
+ }
+ if (od->token == ORANGEFS_ITERATE_END && ctx->pos > od->end)
+ return -EIO;
+
+ /* Then try to fill if there's any left in the buffer. */
+ if (ctx->pos < od->end) {
+ r = orangefs_dir_fill(oi, od, dentry, ctx);
+ if (r)
+ return r;
}
- /*
- * Did we hit the end of the directory?
- */
- if (readdir_response.token == ORANGEFS_READDIR_END) {
- gossip_debug(GOSSIP_DIR_DEBUG,
- "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
- ctx->pos = ORANGEFS_READDIR_END;
+ /* Finally get some more and try to fill. */
+ if (od->token != ORANGEFS_ITERATE_END) {
+ r = orangefs_dir_more(oi, od, dentry);
+ if (r)
+ return r;
+ r = orangefs_dir_fill(oi, od, dentry, ctx);
}
-out_destroy_handle:
- /* kfree(NULL) is safe */
- kfree(readdir_response.dirent_array);
-out_vfree:
- gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", dents_buf);
- vfree(dents_buf);
-out_free_op:
- op_release(new_op);
- gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
- return ret;
+ return r;
}
static int orangefs_dir_open(struct inode *inode, struct file *file)
{
- __u64 *ptoken;
-
- file->private_data = kmalloc(sizeof(__u64), GFP_KERNEL);
+ struct orangefs_dir *od;
+ file->private_data = kmalloc(sizeof(struct orangefs_dir),
+ GFP_KERNEL);
if (!file->private_data)
return -ENOMEM;
-
- ptoken = file->private_data;
- *ptoken = ORANGEFS_READDIR_START;
+ od = file->private_data;
+ od->token = ORANGEFS_ITERATE_START;
+ od->part = NULL;
+ od->end = 1 << PART_SHIFT;
+ od->error = 0;
return 0;
}
static int orangefs_dir_release(struct inode *inode, struct file *file)
{
+ struct orangefs_dir *od = file->private_data;
+ struct orangefs_dir_part *part = od->part;
orangefs_flush_inode(inode);
- kfree(file->private_data);
+ while (part) {
+ struct orangefs_dir_part *next = part->next;
+ vfree(part);
+ part = next;
+ }
+ kfree(od);
return 0;
}
-/** ORANGEFS implementation of VFS directory operations */
const struct file_operations orangefs_dir_operations = {
+ .llseek = orangefs_dir_llseek,
.read = generic_read_dir,
- .iterate = orangefs_readdir,
+ .iterate = orangefs_dir_iterate,
.open = orangefs_dir_open,
- .release = orangefs_dir_release,
+ .release = orangefs_dir_release
};
diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h
index 3b8923f8bf21..163001c95501 100644
--- a/fs/orangefs/downcall.h
+++ b/fs/orangefs/downcall.h
@@ -40,16 +40,6 @@ struct orangefs_mkdir_response {
struct orangefs_object_kref refn;
};
-/*
- * duplication of some system interface structures so that I don't have
- * to allocate extra memory
- */
-struct orangefs_dirent {
- char *d_name;
- int d_length;
- struct orangefs_khandle khandle;
-};
-
struct orangefs_statfs_response {
__s64 block_size;
__s64 blocks_total;
@@ -131,12 +121,16 @@ struct orangefs_downcall_s {
} resp;
};
+/*
+ * The readdir response comes in the trailer. It is followed by the
+ * directory entries as described in dir.c.
+ */
+
struct orangefs_readdir_response_s {
__u64 token;
__u64 directory_version;
__u32 __pad2;
__u32 orangefs_dirent_outcount;
- struct orangefs_dirent *dirent_array;
};
#endif /* __DOWNCALL_H */
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 1cd37ebc4f25..28f38d813ad2 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -474,7 +474,8 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
/* Make sure generic_write_checks sees an up to date inode size. */
if (file->f_flags & O_APPEND) {
- rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1);
+ rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
+ STATX_SIZE);
if (rc == -ESTALE)
rc = -EIO;
if (rc) {
@@ -692,7 +693,8 @@ static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
* NOTE: We are only interested in file size here,
* so we set mask accordingly.
*/
- ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1);
+ ret = orangefs_inode_getattr(file->f_mapping->host, 0, 1,
+ STATX_SIZE);
if (ret == -ESTALE)
ret = -EIO;
if (ret) {
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index a304bf34b212..9428ea0aac16 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -161,7 +161,7 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
iattr->ia_size);
/* Ensure that we have a up to date size, so we know if it changed. */
- ret = orangefs_inode_getattr(inode, 0, 1);
+ ret = orangefs_inode_getattr(inode, 0, 1, STATX_SIZE);
if (ret == -ESTALE)
ret = -EIO;
if (ret) {
@@ -218,8 +218,7 @@ int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
if (ret)
goto out;
- if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(inode)) {
+ if (iattr->ia_valid & ATTR_SIZE) {
ret = orangefs_setattr_size(inode, iattr);
if (ret)
goto out;
@@ -256,13 +255,19 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
"orangefs_getattr: called on %pd\n",
path->dentry);
- ret = orangefs_inode_getattr(inode, 0, 0);
+ ret = orangefs_inode_getattr(inode, 0, 0, request_mask);
if (ret == 0) {
generic_fillattr(inode, stat);
/* override block size reported to stat */
orangefs_inode = ORANGEFS_I(inode);
stat->blksize = orangefs_inode->blksize;
+
+ if (request_mask & STATX_SIZE)
+ stat->result_mask = STATX_BASIC_STATS;
+ else
+ stat->result_mask = STATX_BASIC_STATS &
+ ~STATX_SIZE;
}
return ret;
}
@@ -277,7 +282,7 @@ int orangefs_permission(struct inode *inode, int mask)
gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
/* Make sure the permission (and other common attrs) are up to date. */
- ret = orangefs_inode_getattr(inode, 0, 0);
+ ret = orangefs_inode_getattr(inode, 0, 0, STATX_MODE);
if (ret < 0)
return ret;
@@ -375,7 +380,7 @@ struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref
if (!inode || !(inode->i_state & I_NEW))
return inode;
- error = orangefs_inode_getattr(inode, 1, 1);
+ error = orangefs_inode_getattr(inode, 1, 1, STATX_ALL);
if (error) {
iget_failed(inode);
return ERR_PTR(error);
@@ -420,7 +425,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
orangefs_set_inode(inode, ref);
inode->i_ino = hash; /* needed for stat etc */
- error = orangefs_inode_getattr(inode, 1, 1);
+ error = orangefs_inode_getattr(inode, 1, 1, STATX_ALL);
if (error)
goto out_iput;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index a290ff6ec756..478e88bd7f9d 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -74,6 +74,7 @@ static int orangefs_create(struct inode *dir,
unlock_new_inode(inode);
orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: dentry instantiated for %pd\n",
@@ -193,8 +194,6 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- ORANGEFS_I(inode)->getattr_time = jiffies - 1;
-
gossip_debug(GOSSIP_NAME_DEBUG,
"%s:%s:%d "
"Found good inode [%lu] with count [%d]\n",
@@ -324,6 +323,7 @@ static int orangefs_symlink(struct inode *dir,
unlock_new_inode(inode);
orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Symlink) %pU -> %pd\n",
@@ -388,6 +388,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
unlock_new_inode(inode);
orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Directory) %pU -> %pd\n",
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 791912da97d7..716ed337f166 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -440,6 +440,9 @@ static ssize_t orangefs_debug_write(struct file *file,
"orangefs_debug_write: %pD\n",
file);
+ if (count == 0)
+ return 0;
+
/*
* Thwart users who try to jamb a ridiculous number
* of bytes into the debug file...
diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
index f380f9ed1b28..efe08c763e56 100644
--- a/fs/orangefs/orangefs-dev-proto.h
+++ b/fs/orangefs/orangefs-dev-proto.h
@@ -52,12 +52,7 @@
*/
#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
-/*
- * The maximum number of directory entries in a single request is 96.
- * XXX: Why can this not be higher. The client-side code can handle up to 512.
- * XXX: What happens if we expect more than the client can return?
- */
-#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 96
+#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 512
#include "upcall.h"
#include "downcall.h"
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 8afac46fcc87..ea0ce507a6ab 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -215,6 +215,7 @@ struct orangefs_inode_s {
unsigned long pinode_flags;
unsigned long getattr_time;
+ u32 getattr_mask;
};
#define P_ATIME_FLAG 0
@@ -340,11 +341,6 @@ static inline struct orangefs_khandle *get_khandle_from_ino(struct inode *inode)
return &(ORANGEFS_I(inode)->refn.khandle);
}
-static inline __s32 get_fsid_from_ino(struct inode *inode)
-{
- return ORANGEFS_I(inode)->refn.fs_id;
-}
-
static inline ino_t get_ino_from_khandle(struct inode *inode)
{
struct orangefs_khandle *khandle;
@@ -500,7 +496,8 @@ int orangefs_inode_setxattr(struct inode *inode,
size_t size,
int flags);
-int orangefs_inode_getattr(struct inode *inode, int new, int bypass);
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass,
+ u32 request_mask);
int orangefs_inode_check_changed(struct inode *inode);
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 9b96b99539d6..aab6f1842963 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -251,7 +251,8 @@ static int orangefs_inode_is_stale(struct inode *inode, int new,
return 0;
}
-int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass,
+ u32 request_mask)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op;
@@ -262,7 +263,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
get_khandle_from_ino(inode));
if (!new && !bypass) {
- if (time_before(jiffies, orangefs_inode->getattr_time))
+ /*
+ * Must have all the attributes in the mask and be within cache
+ * time.
+ */
+ if ((request_mask & orangefs_inode->getattr_mask) ==
+ request_mask &&
+ time_before(jiffies, orangefs_inode->getattr_time))
return 0;
}
@@ -270,7 +277,15 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
if (!new_op)
return -ENOMEM;
new_op->upcall.req.getattr.refn = orangefs_inode->refn;
- new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT;
+ /*
+ * Size is the hardest attribute to get. The incremental cost of any
+ * other attribute is essentially zero.
+ */
+ if (request_mask & STATX_SIZE || new)
+ new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT;
+ else
+ new_op->upcall.req.getattr.mask =
+ ORANGEFS_ATTR_SYS_ALL_NOHINT & ~ORANGEFS_ATTR_SYS_SIZE;
ret = service_operation(new_op, __func__,
get_interruptible_flag(inode));
@@ -291,25 +306,29 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
case S_IFREG:
inode->i_flags = orangefs_inode_flags(&new_op->
downcall.resp.getattr.attributes);
- inode_size = (loff_t)new_op->
- downcall.resp.getattr.attributes.size;
- rounded_up_size =
- (inode_size + (4096 - (inode_size % 4096)));
- inode->i_size = inode_size;
- orangefs_inode->blksize =
- new_op->downcall.resp.getattr.attributes.blksize;
- spin_lock(&inode->i_lock);
- inode->i_bytes = inode_size;
- inode->i_blocks =
- (unsigned long)(rounded_up_size / 512);
- spin_unlock(&inode->i_lock);
+ if (request_mask & STATX_SIZE || new) {
+ inode_size = (loff_t)new_op->
+ downcall.resp.getattr.attributes.size;
+ rounded_up_size =
+ (inode_size + (4096 - (inode_size % 4096)));
+ inode->i_size = inode_size;
+ orangefs_inode->blksize =
+ new_op->downcall.resp.getattr.attributes.blksize;
+ spin_lock(&inode->i_lock);
+ inode->i_bytes = inode_size;
+ inode->i_blocks =
+ (unsigned long)(rounded_up_size / 512);
+ spin_unlock(&inode->i_lock);
+ }
break;
case S_IFDIR:
- inode->i_size = PAGE_SIZE;
- orangefs_inode->blksize = i_blocksize(inode);
- spin_lock(&inode->i_lock);
- inode_set_bytes(inode, inode->i_size);
- spin_unlock(&inode->i_lock);
+ if (request_mask & STATX_SIZE || new) {
+ inode->i_size = PAGE_SIZE;
+ orangefs_inode->blksize = i_blocksize(inode);
+ spin_lock(&inode->i_lock);
+ inode_set_bytes(inode, inode->i_size);
+ spin_unlock(&inode->i_lock);
+ }
set_nlink(inode, 1);
break;
case S_IFLNK:
@@ -349,6 +368,10 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
orangefs_inode->getattr_time = jiffies +
orangefs_getattr_timeout_msecs*HZ/1000;
+ if (request_mask & STATX_SIZE || new)
+ orangefs_inode->getattr_mask = STATX_BASIC_STATS;
+ else
+ orangefs_inode->getattr_mask = STATX_BASIC_STATS & ~STATX_SIZE;
ret = 0;
out:
op_release(new_op);
@@ -500,41 +523,6 @@ int orangefs_flush_inode(struct inode *inode)
return ret;
}
-int orangefs_unmount_sb(struct super_block *sb)
-{
- int ret = -EINVAL;
- struct orangefs_kernel_op_s *new_op = NULL;
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "orangefs_unmount_sb called on sb %p\n",
- sb);
-
- new_op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT);
- if (!new_op)
- return -ENOMEM;
- new_op->upcall.req.fs_umount.id = ORANGEFS_SB(sb)->id;
- new_op->upcall.req.fs_umount.fs_id = ORANGEFS_SB(sb)->fs_id;
- strncpy(new_op->upcall.req.fs_umount.orangefs_config_server,
- ORANGEFS_SB(sb)->devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN);
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "Attempting ORANGEFS Unmount via host %s\n",
- new_op->upcall.req.fs_umount.orangefs_config_server);
-
- ret = service_operation(new_op, "orangefs_fs_umount", 0);
-
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "orangefs_unmount: got return value of %d\n", ret);
- if (ret)
- sb = ERR_PTR(ret);
- else
- ORANGEFS_SB(sb)->mount_pending = 1;
-
- op_release(new_op);
- return ret;
-}
-
void orangefs_make_bad_inode(struct inode *inode)
{
if (is_root_handle(inode)) {
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 971307ad69be..48bcc1bbe415 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -138,13 +138,8 @@ typedef __s64 ORANGEFS_offset;
#define ORANGEFS_G_SGID (1 << 10)
#define ORANGEFS_U_SUID (1 << 11)
-/* definition taken from stdint.h */
-#define INT32_MAX (2147483647)
-#define ORANGEFS_ITERATE_START (INT32_MAX - 1)
-#define ORANGEFS_ITERATE_END (INT32_MAX - 2)
-#define ORANGEFS_ITERATE_NEXT (INT32_MAX - 3)
-#define ORANGEFS_READDIR_START ORANGEFS_ITERATE_START
-#define ORANGEFS_READDIR_END ORANGEFS_ITERATE_END
+#define ORANGEFS_ITERATE_START 2147483646
+#define ORANGEFS_ITERATE_END 2147483645
#define ORANGEFS_IMMUTABLE_FL FS_IMMUTABLE_FL
#define ORANGEFS_APPEND_FL FS_APPEND_FL
#define ORANGEFS_NOATIME_FL FS_NOATIME_FL
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 629d8c917fa6..5c7c273e17ec 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -376,6 +376,25 @@ static const struct export_operations orangefs_export_ops = {
.fh_to_dentry = orangefs_fh_to_dentry,
};
+static int orangefs_unmount(int id, __s32 fs_id, const char *devname)
+{
+ struct orangefs_kernel_op_s *op;
+ int r;
+ op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT);
+ if (!op)
+ return -ENOMEM;
+ op->upcall.req.fs_umount.id = id;
+ op->upcall.req.fs_umount.fs_id = fs_id;
+ strncpy(op->upcall.req.fs_umount.orangefs_config_server,
+ devname, ORANGEFS_MAX_SERVER_ADDR_LEN);
+ r = service_operation(op, "orangefs_fs_umount", 0);
+ /* Not much to do about an error here. */
+ if (r)
+ gossip_err("orangefs_unmount: service_operation %d\n", r);
+ op_release(op);
+ return r;
+}
+
static int orangefs_fill_sb(struct super_block *sb,
struct orangefs_fs_mount_response *fs_mount,
void *data, int silent)
@@ -484,6 +503,8 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
if (IS_ERR(sb)) {
d = ERR_CAST(sb);
+ orangefs_unmount(new_op->downcall.resp.fs_mount.id,
+ new_op->downcall.resp.fs_mount.fs_id, devname);
goto free_op;
}
@@ -539,6 +560,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
free_sb_and_op:
/* Will call orangefs_kill_sb with sb not in list. */
ORANGEFS_SB(sb)->no_list = 1;
+ /* ORANGEFS_VFS_OP_FS_UMOUNT is done by orangefs_kill_sb. */
deactivate_locked_super(sb);
free_op:
gossip_err("orangefs_mount: mount request failed with %d\n", ret);
@@ -554,6 +576,7 @@ free_op:
void orangefs_kill_sb(struct super_block *sb)
{
+ int r;
gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n");
/* provided sb cleanup */
@@ -563,7 +586,10 @@ void orangefs_kill_sb(struct super_block *sb)
* issue the unmount to userspace to tell it to remove the
* dynamic mount info it has for this superblock
*/
- orangefs_unmount_sb(sb);
+ r = orangefs_unmount(ORANGEFS_SB(sb)->id, ORANGEFS_SB(sb)->fs_id,
+ ORANGEFS_SB(sb)->devname);
+ if (!r)
+ ORANGEFS_SB(sb)->mount_pending = 1;
if (!ORANGEFS_SB(sb)->no_list) {
/* remove the sb from our list of orangefs specific sb's */
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index abcfa3fa9992..61e2ca7fec55 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -124,7 +124,14 @@ retry_servicing:
gossip_debug(GOSSIP_WAIT_DEBUG,
"%s:client core is NOT in service.\n",
__func__);
- timeout = op_timeout_secs * HZ;
+ /*
+ * Don't wait for the userspace component to return if
+ * the filesystem is being umounted anyway.
+ */
+ if (op->upcall.type == ORANGEFS_VFS_OP_FS_UMOUNT)
+ timeout = 0;
+ else
+ timeout = op_timeout_secs * HZ;
}
spin_unlock(&orangefs_request_list_lock);
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 74a81b1daaac..237c9c04dc3b 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -76,11 +76,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
- gossip_err("Invalid key length (%d)\n",
- (int)strlen(name));
+ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
return -EINVAL;
- }
fsuid = from_kuid(&init_user_ns, current_fsuid());
fsgid = from_kgid(&init_user_ns, current_fsgid());
@@ -172,6 +169,9 @@ static int orangefs_inode_removexattr(struct inode *inode, const char *name,
struct orangefs_kernel_op_s *new_op = NULL;
int ret = -ENOMEM;
+ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
+ return -EINVAL;
+
down_write(&orangefs_inode->xattr_sem);
new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
if (!new_op)
@@ -231,23 +231,13 @@ int orangefs_inode_setxattr(struct inode *inode, const char *name,
"%s: name %s, buffer_size %zd\n",
__func__, name, size);
- if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
- flags < 0) {
- gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
- (int)size,
- flags);
+ if (size > ORANGEFS_MAX_XATTR_VALUELEN)
+ return -EINVAL;
+ if (strlen(name) > ORANGEFS_MAX_XATTR_NAMELEN)
return -EINVAL;
- }
internal_flag = convert_to_internal_xattr_flags(flags);
- if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
- gossip_err
- ("orangefs_inode_setxattr: bogus key size (%d)\n",
- (int)(strlen(name)));
- return -EINVAL;
- }
-
/* This is equivalent to a removexattr */
if (size == 0 && value == NULL) {
gossip_debug(GOSSIP_XATTR_DEBUG,
@@ -358,7 +348,7 @@ try_again:
returned_count = new_op->downcall.resp.listxattr.returned_count;
if (returned_count < 0 ||
- returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
+ returned_count > ORANGEFS_MAX_XATTR_LISTLEN) {
gossip_err("%s: impossible value for returned_count:%d:\n",
__func__,
returned_count);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 906ea6c93260..9008ab9fbd2e 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/fdtable.h>
#include <linux/ratelimit.h>
+#include <linux/exportfs.h>
#include "overlayfs.h"
#include "ovl_entry.h"
@@ -232,6 +233,79 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
return err;
}
+static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_be *uuid)
+{
+ struct ovl_fh *fh;
+ int fh_type, fh_len, dwords;
+ void *buf;
+ int buflen = MAX_HANDLE_SZ;
+
+ buf = kmalloc(buflen, GFP_TEMPORARY);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * We encode a non-connectable file handle for non-dir, because we
+ * only need to find the lower inode number and we don't want to pay
+ * the price or reconnecting the dentry.
+ */
+ dwords = buflen >> 2;
+ fh_type = exportfs_encode_fh(lower, buf, &dwords, 0);
+ buflen = (dwords << 2);
+
+ fh = ERR_PTR(-EIO);
+ if (WARN_ON(fh_type < 0) ||
+ WARN_ON(buflen > MAX_HANDLE_SZ) ||
+ WARN_ON(fh_type == FILEID_INVALID))
+ goto out;
+
+ BUILD_BUG_ON(MAX_HANDLE_SZ + offsetof(struct ovl_fh, fid) > 255);
+ fh_len = offsetof(struct ovl_fh, fid) + buflen;
+ fh = kmalloc(fh_len, GFP_KERNEL);
+ if (!fh) {
+ fh = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ fh->version = OVL_FH_VERSION;
+ fh->magic = OVL_FH_MAGIC;
+ fh->type = fh_type;
+ fh->flags = OVL_FH_FLAG_CPU_ENDIAN;
+ fh->len = fh_len;
+ fh->uuid = *uuid;
+ memcpy(fh->fid, buf, buflen);
+
+out:
+ kfree(buf);
+ return fh;
+}
+
+static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
+ struct dentry *upper)
+{
+ struct super_block *sb = lower->d_sb;
+ uuid_be *uuid = (uuid_be *) &sb->s_uuid;
+ const struct ovl_fh *fh = NULL;
+ int err;
+
+ /*
+ * When lower layer doesn't support export operations store a 'null' fh,
+ * so we can use the overlay.origin xattr to distignuish between a copy
+ * up and a pure upper inode.
+ */
+ if (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
+ uuid_be_cmp(*uuid, NULL_UUID_BE)) {
+ fh = ovl_encode_fh(lower, uuid);
+ if (IS_ERR(fh))
+ return PTR_ERR(fh);
+ }
+
+ err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0);
+ kfree(fh);
+
+ return err;
+}
+
static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
struct dentry *dentry, struct path *lowerpath,
struct kstat *stat, const char *link,
@@ -316,6 +390,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
if (err)
goto out_cleanup;
+ /*
+ * Store identifier of lower inode in upper inode xattr to
+ * allow lookup of the copy up origin inode.
+ */
+ err = ovl_set_origin(dentry, lowerpath->dentry, temp);
+ if (err)
+ goto out_cleanup;
+
if (tmpfile)
err = ovl_do_link(temp, udir, upper, true);
else
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 6515796460df..723b98b90698 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -138,36 +138,6 @@ static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
return err;
}
-static int ovl_dir_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
-{
- struct dentry *dentry = path->dentry;
- int err;
- enum ovl_path_type type;
- struct path realpath;
- const struct cred *old_cred;
-
- type = ovl_path_real(dentry, &realpath);
- old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_getattr(&realpath, stat, request_mask, flags);
- revert_creds(old_cred);
- if (err)
- return err;
-
- stat->dev = dentry->d_sb->s_dev;
- stat->ino = dentry->d_inode->i_ino;
-
- /*
- * It's probably not worth it to count subdirs to get the
- * correct link count. nlink=1 seems to pacify 'find' and
- * other utilities.
- */
- if (OVL_TYPE_MERGE(type))
- stat->nlink = 1;
-
- return 0;
-}
-
/* Common operations required to be done after creation of file on upper */
static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
struct dentry *newdentry, bool hardlink)
@@ -182,6 +152,9 @@ static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
inc_nlink(inode);
}
d_instantiate(dentry, inode);
+ /* Force lookup of new upper hardlink to find its lower */
+ if (hardlink)
+ d_drop(dentry);
}
static bool ovl_type_merge(struct dentry *dentry)
@@ -210,7 +183,7 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
if (err)
goto out_dput;
- if (ovl_type_merge(dentry->d_parent)) {
+ if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry)) {
/* Setting opaque here is just an optimization, allow to fail */
ovl_set_opaque(dentry, newdentry);
}
@@ -1070,7 +1043,7 @@ const struct inode_operations ovl_dir_inode_operations = {
.create = ovl_create,
.mknod = ovl_mknod,
.permission = ovl_permission,
- .getattr = ovl_dir_getattr,
+ .getattr = ovl_getattr,
.listxattr = ovl_listxattr,
.get_acl = ovl_get_acl,
.update_time = ovl_update_time,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index f8fe6bf2036d..ad9547f82da5 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -57,18 +57,78 @@ out:
return err;
}
-static int ovl_getattr(const struct path *path, struct kstat *stat,
- u32 request_mask, unsigned int flags)
+int ovl_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags)
{
struct dentry *dentry = path->dentry;
+ enum ovl_path_type type;
struct path realpath;
const struct cred *old_cred;
+ bool is_dir = S_ISDIR(dentry->d_inode->i_mode);
int err;
- ovl_path_real(dentry, &realpath);
+ type = ovl_path_real(dentry, &realpath);
old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getattr(&realpath, stat, request_mask, flags);
+ if (err)
+ goto out;
+
+ /*
+ * When all layers are on the same fs, all real inode number are
+ * unique, so we use the overlay st_dev, which is friendly to du -x.
+ *
+ * We also use st_ino of the copy up origin, if we know it.
+ * This guaranties constant st_dev/st_ino across copy up.
+ *
+ * If filesystem supports NFS export ops, this also guaranties
+ * persistent st_ino across mount cycle.
+ */
+ if (ovl_same_sb(dentry->d_sb)) {
+ if (OVL_TYPE_ORIGIN(type)) {
+ struct kstat lowerstat;
+ u32 lowermask = STATX_INO | (!is_dir ? STATX_NLINK : 0);
+
+ ovl_path_lower(dentry, &realpath);
+ err = vfs_getattr(&realpath, &lowerstat,
+ lowermask, flags);
+ if (err)
+ goto out;
+
+ WARN_ON_ONCE(stat->dev != lowerstat.dev);
+ /*
+ * Lower hardlinks are broken on copy up to different
+ * upper files, so we cannot use the lower origin st_ino
+ * for those different files, even for the same fs case.
+ */
+ if (is_dir || lowerstat.nlink == 1)
+ stat->ino = lowerstat.ino;
+ }
+ stat->dev = dentry->d_sb->s_dev;
+ } else if (is_dir) {
+ /*
+ * If not all layers are on the same fs the pair {real st_ino;
+ * overlay st_dev} is not unique, so use the non persistent
+ * overlay st_ino.
+ *
+ * Always use the overlay st_dev for directories, so 'find
+ * -xdev' will scan the entire overlay mount and won't cross the
+ * overlay mount boundaries.
+ */
+ stat->dev = dentry->d_sb->s_dev;
+ stat->ino = dentry->d_inode->i_ino;
+ }
+
+ /*
+ * It's probably not worth it to count subdirs to get the
+ * correct link count. nlink=1 seems to pacify 'find' and
+ * other utilities.
+ */
+ if (is_dir && OVL_TYPE_MERGE(type))
+ stat->nlink = 1;
+
+out:
revert_creds(old_cred);
+
return err;
}
@@ -303,6 +363,41 @@ static const struct inode_operations ovl_symlink_inode_operations = {
.update_time = ovl_update_time,
};
+/*
+ * It is possible to stack overlayfs instance on top of another
+ * overlayfs instance as lower layer. We need to annonate the
+ * stackable i_mutex locks according to stack level of the super
+ * block instance. An overlayfs instance can never be in stack
+ * depth 0 (there is always a real fs below it). An overlayfs
+ * inode lock will use the lockdep annotaion ovl_i_mutex_key[depth].
+ *
+ * For example, here is a snip from /proc/lockdep_chains after
+ * dir_iterate of nested overlayfs:
+ *
+ * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2)
+ * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
+ * [...] &type->i_mutex_dir_key (stack_depth=0)
+ */
+#define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
+
+static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING];
+ static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING];
+
+ int depth = inode->i_sb->s_stack_depth - 1;
+
+ if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING))
+ depth = 0;
+
+ if (S_ISDIR(inode->i_mode))
+ lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]);
+ else
+ lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]);
+#endif
+}
+
static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
{
inode->i_ino = get_next_ino();
@@ -312,6 +407,8 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
#endif
+ ovl_lockdep_annotate_inode_mutex_key(inode);
+
switch (mode & S_IFMT) {
case S_IFREG:
inode->i_op = &ovl_file_inode_operations;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index b8b077821fb0..bad0f665a635 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -12,6 +12,8 @@
#include <linux/namei.h>
#include <linux/xattr.h>
#include <linux/ratelimit.h>
+#include <linux/mount.h>
+#include <linux/exportfs.h>
#include "overlayfs.h"
#include "ovl_entry.h"
@@ -81,6 +83,90 @@ invalid:
goto err_free;
}
+static int ovl_acceptable(void *ctx, struct dentry *dentry)
+{
+ return 1;
+}
+
+static struct dentry *ovl_get_origin(struct dentry *dentry,
+ struct vfsmount *mnt)
+{
+ int res;
+ struct ovl_fh *fh = NULL;
+ struct dentry *origin = NULL;
+ int bytes;
+
+ res = vfs_getxattr(dentry, OVL_XATTR_ORIGIN, NULL, 0);
+ if (res < 0) {
+ if (res == -ENODATA || res == -EOPNOTSUPP)
+ return NULL;
+ goto fail;
+ }
+ /* Zero size value means "copied up but origin unknown" */
+ if (res == 0)
+ return NULL;
+
+ fh = kzalloc(res, GFP_TEMPORARY);
+ if (!fh)
+ return ERR_PTR(-ENOMEM);
+
+ res = vfs_getxattr(dentry, OVL_XATTR_ORIGIN, fh, res);
+ if (res < 0)
+ goto fail;
+
+ if (res < sizeof(struct ovl_fh) || res < fh->len)
+ goto invalid;
+
+ if (fh->magic != OVL_FH_MAGIC)
+ goto invalid;
+
+ /* Treat larger version and unknown flags as "origin unknown" */
+ if (fh->version > OVL_FH_VERSION || fh->flags & ~OVL_FH_FLAG_ALL)
+ goto out;
+
+ /* Treat endianness mismatch as "origin unknown" */
+ if (!(fh->flags & OVL_FH_FLAG_ANY_ENDIAN) &&
+ (fh->flags & OVL_FH_FLAG_BIG_ENDIAN) != OVL_FH_FLAG_CPU_ENDIAN)
+ goto out;
+
+ bytes = (fh->len - offsetof(struct ovl_fh, fid));
+
+ /*
+ * Make sure that the stored uuid matches the uuid of the lower
+ * layer where file handle will be decoded.
+ */
+ if (uuid_be_cmp(fh->uuid, *(uuid_be *) &mnt->mnt_sb->s_uuid))
+ goto out;
+
+ origin = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
+ bytes >> 2, (int)fh->type,
+ ovl_acceptable, NULL);
+ if (IS_ERR(origin)) {
+ /* Treat stale file handle as "origin unknown" */
+ if (origin == ERR_PTR(-ESTALE))
+ origin = NULL;
+ goto out;
+ }
+
+ if (ovl_dentry_weird(origin) ||
+ ((d_inode(origin)->i_mode ^ d_inode(dentry)->i_mode) & S_IFMT)) {
+ dput(origin);
+ origin = NULL;
+ goto invalid;
+ }
+
+out:
+ kfree(fh);
+ return origin;
+
+fail:
+ pr_warn_ratelimited("overlayfs: failed to get origin (%i)\n", res);
+ goto out;
+invalid:
+ pr_warn_ratelimited("overlayfs: invalid origin (%*phN)\n", res, fh);
+ goto out;
+}
+
static bool ovl_is_opaquedir(struct dentry *dentry)
{
int res;
@@ -192,6 +278,45 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
return 0;
}
+
+static int ovl_check_origin(struct dentry *dentry, struct dentry *upperdentry,
+ struct path **stackp, unsigned int *ctrp)
+{
+ struct super_block *same_sb = ovl_same_sb(dentry->d_sb);
+ struct ovl_entry *roe = dentry->d_sb->s_root->d_fsdata;
+ struct vfsmount *mnt;
+ struct dentry *origin;
+
+ if (!same_sb || !roe->numlower)
+ return 0;
+
+ /*
+ * Since all layers are on the same fs, we use the first layer for
+ * decoding the file handle. We may get a disconnected dentry,
+ * which is fine, because we only need to hold the origin inode in
+ * cache and use its inode number. We may even get a connected dentry,
+ * that is not under the first layer's root. That is also fine for
+ * using it's inode number - it's the same as if we held a reference
+ * to a dentry in first layer that was moved under us.
+ */
+ mnt = roe->lowerstack[0].mnt;
+
+ origin = ovl_get_origin(upperdentry, mnt);
+ if (IS_ERR_OR_NULL(origin))
+ return PTR_ERR(origin);
+
+ BUG_ON(*stackp || *ctrp);
+ *stackp = kmalloc(sizeof(struct path), GFP_TEMPORARY);
+ if (!*stackp) {
+ dput(origin);
+ return -ENOMEM;
+ }
+ **stackp = (struct path) { .dentry = origin, .mnt = mnt };
+ *ctrp = 1;
+
+ return 0;
+}
+
/*
* Returns next layer in stack starting from top.
* Returns -1 if this is the last layer.
@@ -220,6 +345,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
const struct cred *old_cred;
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+ struct ovl_entry *roe = dentry->d_sb->s_root->d_fsdata;
struct path *stack = NULL;
struct dentry *upperdir, *upperdentry = NULL;
unsigned int ctr = 0;
@@ -253,13 +379,20 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
err = -EREMOTE;
goto out;
}
+ if (upperdentry && !d.is_dir) {
+ BUG_ON(!d.stop || d.redirect);
+ err = ovl_check_origin(dentry, upperdentry,
+ &stack, &ctr);
+ if (err)
+ goto out;
+ }
if (d.redirect) {
upperredirect = kstrdup(d.redirect, GFP_KERNEL);
if (!upperredirect)
goto out_put_upper;
if (d.redirect[0] == '/')
- poe = dentry->d_sb->s_root->d_fsdata;
+ poe = roe;
}
upperopaque = d.opaque;
}
@@ -290,10 +423,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (d.stop)
break;
- if (d.redirect &&
- d.redirect[0] == '/' &&
- poe != dentry->d_sb->s_root->d_fsdata) {
- poe = dentry->d_sb->s_root->d_fsdata;
+ if (d.redirect && d.redirect[0] == '/' && poe != roe) {
+ poe = roe;
/* Find the current layer on the root dentry */
for (i = 0; i < poe->numlower; i++)
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 741dc0b6931f..caa36cb9c46d 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -8,18 +8,56 @@
*/
#include <linux/kernel.h>
+#include <linux/uuid.h>
enum ovl_path_type {
__OVL_PATH_UPPER = (1 << 0),
__OVL_PATH_MERGE = (1 << 1),
+ __OVL_PATH_ORIGIN = (1 << 2),
};
#define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER)
#define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE)
+#define OVL_TYPE_ORIGIN(type) ((type) & __OVL_PATH_ORIGIN)
#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
#define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
+#define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin"
+
+/*
+ * The tuple (fh,uuid) is a universal unique identifier for a copy up origin,
+ * where:
+ * origin.fh - exported file handle of the lower file
+ * origin.uuid - uuid of the lower filesystem
+ */
+#define OVL_FH_VERSION 0
+#define OVL_FH_MAGIC 0xfb
+
+/* CPU byte order required for fid decoding: */
+#define OVL_FH_FLAG_BIG_ENDIAN (1 << 0)
+#define OVL_FH_FLAG_ANY_ENDIAN (1 << 1)
+
+#define OVL_FH_FLAG_ALL (OVL_FH_FLAG_BIG_ENDIAN | OVL_FH_FLAG_ANY_ENDIAN)
+
+#if defined(__LITTLE_ENDIAN)
+#define OVL_FH_FLAG_CPU_ENDIAN 0
+#elif defined(__BIG_ENDIAN)
+#define OVL_FH_FLAG_CPU_ENDIAN OVL_FH_FLAG_BIG_ENDIAN
+#else
+#error Endianness not defined
+#endif
+
+/* On-disk and in-memeory format for redirect by file handle */
+struct ovl_fh {
+ u8 version; /* 0 */
+ u8 magic; /* 0xfb */
+ u8 len; /* size of this header + size of fid */
+ u8 flags; /* OVL_FH_FLAG_* */
+ u8 type; /* fid_type of fid */
+ uuid_be uuid; /* uuid of filesystem */
+ u8 fid[0]; /* file identifier */
+} __packed;
#define OVL_ISUPPER_MASK 1UL
@@ -151,6 +189,7 @@ int ovl_want_write(struct dentry *dentry);
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
+struct super_block *ovl_same_sb(struct super_block *sb);
struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
bool ovl_dentry_remote(struct dentry *dentry);
bool ovl_dentry_weird(struct dentry *dentry);
@@ -197,6 +236,8 @@ void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
/* inode.c */
int ovl_setattr(struct dentry *dentry, struct iattr *attr);
+int ovl_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int flags);
int ovl_permission(struct inode *inode, int mask);
int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 59614faa14c3..b2023ddb8532 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -29,6 +29,8 @@ struct ovl_fs {
const struct cred *creator_cred;
bool tmpfile;
wait_queue_head_t copyup_wq;
+ /* sb common to all layers */
+ struct super_block *same_sb;
};
/* private information held for every overlayfs dentry */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index c9e70d39c1ea..9828b7de8999 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -49,11 +49,28 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
+static int ovl_check_append_only(struct inode *inode, int flag)
+{
+ /*
+ * This test was moot in vfs may_open() because overlay inode does
+ * not have the S_APPEND flag, so re-check on real upper inode
+ */
+ if (IS_APPEND(inode)) {
+ if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
+ return -EPERM;
+ if (flag & O_TRUNC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode,
unsigned int open_flags)
{
struct dentry *real;
+ int err;
if (!d_is_reg(dentry)) {
if (!inode || inode == d_inode(dentry))
@@ -65,15 +82,20 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
return dentry;
if (open_flags) {
- int err = ovl_open_maybe_copy_up(dentry, open_flags);
-
+ err = ovl_open_maybe_copy_up(dentry, open_flags);
if (err)
return ERR_PTR(err);
}
real = ovl_dentry_upper(dentry);
- if (real && (!inode || inode == d_inode(real)))
+ if (real && (!inode || inode == d_inode(real))) {
+ if (!inode) {
+ err = ovl_check_append_only(d_inode(real), open_flags);
+ if (err)
+ return ERR_PTR(err);
+ }
return real;
+ }
real = ovl_dentry_lower(dentry);
if (!real)
@@ -709,8 +731,8 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
static int ovl_fill_super(struct super_block *sb, void *data, int silent)
{
- struct path upperpath = { NULL, NULL };
- struct path workpath = { NULL, NULL };
+ struct path upperpath = { };
+ struct path workpath = { };
struct dentry *root_dentry;
struct inode *realinode;
struct ovl_entry *oe;
@@ -892,11 +914,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
ufs->lower_mnt[ufs->numlower] = mnt;
ufs->numlower++;
+
+ /* Check if all lower layers are on same sb */
+ if (i == 0)
+ ufs->same_sb = mnt->mnt_sb;
+ else if (ufs->same_sb != mnt->mnt_sb)
+ ufs->same_sb = NULL;
}
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
if (!ufs->upper_mnt)
sb->s_flags |= MS_RDONLY;
+ else if (ufs->upper_mnt->mnt_sb != ufs->same_sb)
+ ufs->same_sb = NULL;
if (remote)
sb->s_d_op = &ovl_reval_dentry_operations;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 6e610a205e15..cfdea47313a1 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,6 +40,13 @@ const struct cred *ovl_override_creds(struct super_block *sb)
return override_creds(ofs->creator_cred);
}
+struct super_block *ovl_same_sb(struct super_block *sb)
+{
+ struct ovl_fs *ofs = sb->s_fs_info;
+
+ return ofs->same_sb;
+}
+
struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
{
size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
@@ -75,11 +82,13 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
type = __OVL_PATH_UPPER;
/*
- * Non-dir dentry can hold lower dentry from previous
- * location.
+ * Non-dir dentry can hold lower dentry of its copy up origin.
*/
- if (oe->numlower && d_is_dir(dentry))
- type |= __OVL_PATH_MERGE;
+ if (oe->numlower) {
+ type |= __OVL_PATH_ORIGIN;
+ if (d_is_dir(dentry))
+ type |= __OVL_PATH_MERGE;
+ }
} else {
if (oe->numlower > 1)
type |= __OVL_PATH_MERGE;
@@ -100,7 +109,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
{
struct ovl_entry *oe = dentry->d_fsdata;
- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { };
}
enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9e3ac5c11780..45f6bf68fff3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -821,10 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!mmget_not_zero(mm))
goto free;
- /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
- flags = FOLL_FORCE;
- if (write)
- flags |= FOLL_WRITE;
+ flags = write ? FOLL_WRITE : 0;
while (count > 0) {
int this_len = min_t(int, count, PAGE_SIZE);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index ee27feb34cf4..9425c0d97262 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -472,6 +472,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
ent->data = NULL;
ent->proc_fops = NULL;
ent->proc_iops = NULL;
+ parent->nlink++;
if (proc_register(parent, ent) < 0) {
kfree(ent);
parent->nlink--;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 2cc7a8030275..e250910cffc8 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -58,7 +58,7 @@ static struct inode *proc_alloc_inode(struct super_block *sb)
struct proc_inode *ei;
struct inode *inode;
- ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
+ ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->pid = NULL;
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 766f0c637ad1..3803b24ca220 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -23,6 +23,7 @@ static const struct proc_ns_operations *ns_entries[] = {
#endif
#ifdef CONFIG_PID_NS
&pidns_operations,
+ &pidns_for_children_operations,
#endif
#ifdef CONFIG_USER_NS
&userns_operations,
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index d04ea4349909..67985a7233c2 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -408,10 +408,6 @@ static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentr
*pentry = entry;
}
-void register_sysctl_root(struct ctl_table_root *root)
-{
-}
-
/*
* sysctl_perm does NOT grant the superuser all rights automatically, because
* some sysctl variables are readonly even to root.
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index aca73dd73906..e3c558d1b78c 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
}
static struct item_operations errcatch_ops = {
- errcatch_bytes_number,
- errcatch_decrement_key,
- errcatch_is_left_mergeable,
- errcatch_print_item,
- errcatch_check_item,
-
- errcatch_create_vi,
- errcatch_check_left,
- errcatch_check_right,
- errcatch_part_size,
- errcatch_unit_num,
- errcatch_print_vi
+ .bytes_number = errcatch_bytes_number,
+ .decrement_key = errcatch_decrement_key,
+ .is_left_mergeable = errcatch_is_left_mergeable,
+ .print_item = errcatch_print_item,
+ .check_item = errcatch_check_item,
+
+ .create_vi = errcatch_create_vi,
+ .check_left = errcatch_check_left,
+ .check_right = errcatch_check_right,
+ .part_size = errcatch_part_size,
+ .unit_num = errcatch_unit_num,
+ .print_vi = errcatch_print_vi
};
#if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
diff --git a/fs/select.c b/fs/select.c
index bd4b2ccfd346..d6c652a31e99 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -633,10 +633,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
goto out_nofds;
alloc_size = 6 * size;
- bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
- if (!bits && alloc_size > PAGE_SIZE)
- bits = vmalloc(alloc_size);
-
+ bits = kvmalloc(alloc_size, GFP_KERNEL);
if (!bits)
goto out_nofds;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index ca69fb99e41a..dc7c2be963ed 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -25,21 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
static void *seq_buf_alloc(unsigned long size)
{
- void *buf;
- gfp_t gfp = GFP_KERNEL;
-
- /*
- * For high order allocations, use __GFP_NORETRY to avoid oom-killing -
- * it's better to fall back to vmalloc() than to kill things. For small
- * allocations, just use GFP_KERNEL which will oom kill, thus no need
- * for vmalloc fallback.
- */
- if (size > PAGE_SIZE)
- gfp |= __GFP_NORETRY | __GFP_NOWARN;
- buf = kmalloc(size, gfp);
- if (!buf && size > PAGE_SIZE)
- buf = vmalloc(size);
- return buf;
+ return kvmalloc(size, GFP_KERNEL);
}
/**
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 270221fcef42..7e3d71109f51 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -38,7 +38,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
/*
* The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and
- * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
+ * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
*/
if (likely(!waitqueue_active(wqh)))
return;
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 21d36d284735..328e89c2cf83 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -266,7 +266,7 @@ static const struct super_operations tracefs_super_operations = {
static int trace_fill_super(struct super_block *sb, void *data, int silent)
{
- static struct tree_descr trace_files[] = {{""}};
+ static const struct tree_descr trace_files[] = {{""}};
struct tracefs_fs_info *fsi;
int err;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index b777bddaa1dd..566079d9b402 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -121,7 +121,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
inode_init_owner(inode, dir, mode);
inode->i_mtime = inode->i_atime = inode->i_ctime =
- ubifs_current_time(inode);
+ current_time(inode);
inode->i_mapping->nrpages = 0;
switch (mode & S_IFMT) {
@@ -285,6 +285,15 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
goto out_dent;
}
+ if (ubifs_crypt_is_encrypted(dir) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !fscrypt_has_permitted_context(dir, inode)) {
+ ubifs_warn(c, "Inconsistent encryption contexts: %lu/%lu",
+ dir->i_ino, inode->i_ino);
+ err = -EPERM;
+ goto out_inode;
+ }
+
done:
kfree(dent);
fscrypt_free_filename(&nm);
@@ -295,6 +304,8 @@ done:
d_add(dentry, inode);
return NULL;
+out_inode:
+ iput(inode);
out_dent:
kfree(dent);
out_fname:
@@ -755,7 +766,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
inc_nlink(inode);
ihold(inode);
- inode->i_ctime = ubifs_current_time(inode);
+ inode->i_ctime = current_time(inode);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
dir->i_mtime = dir->i_ctime = inode->i_ctime;
@@ -830,7 +841,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
}
lock_2_inodes(dir, inode);
- inode->i_ctime = ubifs_current_time(dir);
+ inode->i_ctime = current_time(dir);
drop_nlink(inode);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
@@ -934,7 +945,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
}
lock_2_inodes(dir, inode);
- inode->i_ctime = ubifs_current_time(dir);
+ inode->i_ctime = current_time(dir);
clear_nlink(inode);
drop_nlink(dir);
dir->i_size -= sz_change;
@@ -1411,7 +1422,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the @i_ctime for inodes on a
* rename.
*/
- time = ubifs_current_time(old_dir);
+ time = current_time(old_dir);
old_inode->i_ctime = time;
/* We must adjust parent link count when renaming directories */
@@ -1584,7 +1595,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
lock_4_inodes(old_dir, new_dir, NULL, NULL);
- time = ubifs_current_time(old_dir);
+ time = current_time(old_dir);
fst_inode->i_ctime = time;
snd_inode->i_ctime = time;
old_dir->i_mtime = old_dir->i_ctime = time;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index d9ae86f96df7..2cda3d67e2d0 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1196,7 +1196,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
/* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
@@ -1243,7 +1243,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
if (attr->ia_valid & ATTR_SIZE) {
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
/* 'truncate_setsize()' changed @i_size, update @ui_size */
ui->ui_size = inode->i_size;
}
@@ -1420,7 +1420,7 @@ int ubifs_update_time(struct inode *inode, struct timespec *time,
*/
static int update_mctime(struct inode *inode)
{
- struct timespec now = ubifs_current_time(inode);
+ struct timespec now = current_time(inode);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -1434,7 +1434,7 @@ static int update_mctime(struct inode *inode)
return err;
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
@@ -1511,7 +1511,7 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ubifs_info *c = inode->i_sb->s_fs_info;
- struct timespec now = ubifs_current_time(inode);
+ struct timespec now = current_time(inode);
struct ubifs_budget_req req = { .new_page = 1 };
int err, update_time;
@@ -1579,7 +1579,7 @@ static int ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index da519ba205f6..12b9eb5005ff 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -126,7 +126,7 @@ static int setflags(struct inode *inode, int flags)
ui->flags = ioctl2ubifs(flags);
ubifs_set_inode_flags(inode);
- inode->i_ctime = ubifs_current_time(inode);
+ inode->i_ctime = current_time(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 8ece6ca58c0b..caf83d68fb38 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -225,16 +225,6 @@ static inline void *ubifs_idx_key(const struct ubifs_info *c,
}
/**
- * ubifs_current_time - round current time to time granularity.
- * @inode: inode
- */
-static inline struct timespec ubifs_current_time(struct inode *inode)
-{
- return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
- current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-}
-
-/**
* ubifs_tnc_lookup - look up a file-system node.
* @c: UBIFS file-system description object
* @key: node key to lookup
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 7f1ead29e727..8c25081a5109 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -84,6 +84,8 @@ static int create_default_filesystem(struct ubifs_info *c)
int min_leb_cnt = UBIFS_MIN_LEB_CNT;
long long tmp64, main_bytes;
__le64 tmp_le64;
+ __le32 tmp_le32;
+ struct timespec ts;
/* Some functions called from here depend on the @c->key_len filed */
c->key_len = UBIFS_SK_LEN;
@@ -298,13 +300,17 @@ static int create_default_filesystem(struct ubifs_info *c)
ino->ch.node_type = UBIFS_INO_NODE;
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
ino->nlink = cpu_to_le32(2);
- tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
+
+ ktime_get_real_ts(&ts);
+ ts = timespec_trunc(ts, DEFAULT_TIME_GRAN);
+ tmp_le64 = cpu_to_le64(ts.tv_sec);
ino->atime_sec = tmp_le64;
ino->ctime_sec = tmp_le64;
ino->mtime_sec = tmp_le64;
- ino->atime_nsec = 0;
- ino->ctime_nsec = 0;
- ino->mtime_nsec = 0;
+ tmp_le32 = cpu_to_le32(ts.tv_nsec);
+ ino->atime_nsec = tmp_le32;
+ ino->ctime_nsec = tmp_le32;
+ ino->mtime_nsec = tmp_le32;
ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index efe00fcb8b75..3e53fdbf7997 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,7 +152,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = ubifs_current_time(host);
+ host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -234,7 +234,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = ubifs_current_time(host);
+ host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -488,7 +488,7 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = ubifs_current_time(host);
+ host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 9774555b3721..d1dd8cc33179 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -176,6 +176,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
struct ufs_cg_private_info * ucpi;
struct ufs_cylinder_group * ucg;
struct inode * inode;
+ struct timespec64 ts;
unsigned cg, bit, i, j, start;
struct ufs_inode_info *ufsi;
int err = -ENOSPC;
@@ -323,8 +324,9 @@ cg_found:
lock_buffer(bh);
ufs2_inode = (struct ufs2_inode *)bh->b_data;
ufs2_inode += ufs_inotofsbo(inode->i_ino);
- ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
- ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
+ ktime_get_real_ts64(&ts);
+ ufs2_inode->ui_birthtime = cpu_to_fs64(sb, ts.tv_sec);
+ ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sb->s_flags & MS_SYNCHRONOUS)
diff --git a/fs/xattr.c b/fs/xattr.c
index 7e3317cf4045..464c94bf65f9 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -431,12 +431,9 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
if (size) {
if (size > XATTR_SIZE_MAX)
return -E2BIG;
- kvalue = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!kvalue) {
- kvalue = vmalloc(size);
- if (!kvalue)
- return -ENOMEM;
- }
+ kvalue = kvmalloc(size, GFP_KERNEL);
+ if (!kvalue)
+ return -ENOMEM;
if (copy_from_user(kvalue, value, size)) {
error = -EFAULT;
goto out;
@@ -528,12 +525,9 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (size) {
if (size > XATTR_SIZE_MAX)
size = XATTR_SIZE_MAX;
- kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!kvalue) {
- kvalue = vmalloc(size);
- if (!kvalue)
- return -ENOMEM;
- }
+ kvalue = kvzalloc(size, GFP_KERNEL);
+ if (!kvalue)
+ return -ENOMEM;
}
error = vfs_getxattr(d, kname, kvalue, size);
@@ -611,12 +605,9 @@ listxattr(struct dentry *d, char __user *list, size_t size)
if (size) {
if (size > XATTR_LIST_MAX)
size = XATTR_LIST_MAX;
- klist = kmalloc(size, __GFP_NOWARN | GFP_KERNEL);
- if (!klist) {
- klist = vmalloc(size);
- if (!klist)
- return -ENOMEM;
- }
+ klist = kvmalloc(size, GFP_KERNEL);
+ if (!klist)
+ return -ENOMEM;
}
error = vfs_listxattr(d, klist, size);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 26ef1958b65b..5c90f82b8f6b 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -79,6 +79,7 @@ xfs-y += xfs_aops.o \
xfs_extent_busy.o \
xfs_file.o \
xfs_filestream.o \
+ xfs_fsmap.o \
xfs_fsops.o \
xfs_globals.o \
xfs_icache.o \
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 780fc8986dab..393b6849aeb3 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -67,7 +67,7 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
nofs_flag = memalloc_nofs_save();
lflags = kmem_flags_convert(flags);
- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ ptr = __vmalloc(size, lflags | __GFP_ZERO, PAGE_KERNEL);
if (flags & KM_NOFS)
memalloc_nofs_restore(nofs_flag);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 369adcc18c02..7486401ccbd3 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2868,3 +2868,60 @@ err:
xfs_trans_brelse(tp, agbp);
return error;
}
+
+struct xfs_alloc_query_range_info {
+ xfs_alloc_query_range_fn fn;
+ void *priv;
+};
+
+/* Format btree record and pass to our callback. */
+STATIC int
+xfs_alloc_query_range_helper(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_alloc_query_range_info *query = priv;
+ struct xfs_alloc_rec_incore irec;
+
+ irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
+ irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
+ return query->fn(cur, &irec, query->priv);
+}
+
+/* Find all free space within a given range of blocks. */
+int
+xfs_alloc_query_range(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *low_rec,
+ struct xfs_alloc_rec_incore *high_rec,
+ xfs_alloc_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_irec low_brec;
+ union xfs_btree_irec high_brec;
+ struct xfs_alloc_query_range_info query;
+
+ ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
+ low_brec.a = *low_rec;
+ high_brec.a = *high_rec;
+ query.priv = priv;
+ query.fn = fn;
+ return xfs_btree_query_range(cur, &low_brec, &high_brec,
+ xfs_alloc_query_range_helper, &query);
+}
+
+/* Find all free space records. */
+int
+xfs_alloc_query_all(
+ struct xfs_btree_cur *cur,
+ xfs_alloc_query_range_fn fn,
+ void *priv)
+{
+ struct xfs_alloc_query_range_info query;
+
+ ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
+ query.priv = priv;
+ query.fn = fn;
+ return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
+}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 2a8d0fa6fbbe..77d9c27330ab 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -219,4 +219,16 @@ int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
+typedef int (*xfs_alloc_query_range_fn)(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv);
+
+int xfs_alloc_query_range(struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *low_rec,
+ struct xfs_alloc_rec_incore *high_rec,
+ xfs_alloc_query_range_fn fn, void *priv);
+int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
+ void *priv);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index efb467b10a71..e1fcfe7f0a9a 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -205,19 +205,37 @@ xfs_allocbt_init_key_from_rec(
union xfs_btree_key *key,
union xfs_btree_rec *rec)
{
- ASSERT(rec->alloc.ar_startblock != 0);
-
key->alloc.ar_startblock = rec->alloc.ar_startblock;
key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
}
STATIC void
+xfs_bnobt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ __u32 x;
+
+ x = be32_to_cpu(rec->alloc.ar_startblock);
+ x += be32_to_cpu(rec->alloc.ar_blockcount) - 1;
+ key->alloc.ar_startblock = cpu_to_be32(x);
+ key->alloc.ar_blockcount = 0;
+}
+
+STATIC void
+xfs_cntbt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ key->alloc.ar_blockcount = rec->alloc.ar_blockcount;
+ key->alloc.ar_startblock = 0;
+}
+
+STATIC void
xfs_allocbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
{
- ASSERT(cur->bc_rec.a.ar_startblock != 0);
-
rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
}
@@ -236,18 +254,24 @@ xfs_allocbt_init_ptr_from_cur(
}
STATIC __int64_t
-xfs_allocbt_key_diff(
+xfs_bnobt_key_diff(
struct xfs_btree_cur *cur,
union xfs_btree_key *key)
{
xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
xfs_alloc_key_t *kp = &key->alloc;
- __int64_t diff;
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
- return (__int64_t)be32_to_cpu(kp->ar_startblock) -
- rec->ar_startblock;
- }
+ return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
+}
+
+STATIC __int64_t
+xfs_cntbt_key_diff(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a;
+ xfs_alloc_key_t *kp = &key->alloc;
+ __int64_t diff;
diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount;
if (diff)
@@ -256,6 +280,33 @@ xfs_allocbt_key_diff(
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
}
+STATIC __int64_t
+xfs_bnobt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ return (__int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
+ be32_to_cpu(k2->alloc.ar_startblock);
+}
+
+STATIC __int64_t
+xfs_cntbt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ __int64_t diff;
+
+ diff = be32_to_cpu(k1->alloc.ar_blockcount) -
+ be32_to_cpu(k2->alloc.ar_blockcount);
+ if (diff)
+ return diff;
+
+ return be32_to_cpu(k1->alloc.ar_startblock) -
+ be32_to_cpu(k2->alloc.ar_startblock);
+}
+
static bool
xfs_allocbt_verify(
struct xfs_buf *bp)
@@ -346,44 +397,54 @@ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
#if defined(DEBUG) || defined(XFS_WARN)
STATIC int
-xfs_allocbt_keys_inorder(
+xfs_bnobt_keys_inorder(
struct xfs_btree_cur *cur,
union xfs_btree_key *k1,
union xfs_btree_key *k2)
{
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
- return be32_to_cpu(k1->alloc.ar_startblock) <
- be32_to_cpu(k2->alloc.ar_startblock);
- } else {
- return be32_to_cpu(k1->alloc.ar_blockcount) <
- be32_to_cpu(k2->alloc.ar_blockcount) ||
- (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
- be32_to_cpu(k1->alloc.ar_startblock) <
- be32_to_cpu(k2->alloc.ar_startblock));
- }
+ return be32_to_cpu(k1->alloc.ar_startblock) <
+ be32_to_cpu(k2->alloc.ar_startblock);
}
STATIC int
-xfs_allocbt_recs_inorder(
+xfs_bnobt_recs_inorder(
struct xfs_btree_cur *cur,
union xfs_btree_rec *r1,
union xfs_btree_rec *r2)
{
- if (cur->bc_btnum == XFS_BTNUM_BNO) {
- return be32_to_cpu(r1->alloc.ar_startblock) +
- be32_to_cpu(r1->alloc.ar_blockcount) <=
- be32_to_cpu(r2->alloc.ar_startblock);
- } else {
- return be32_to_cpu(r1->alloc.ar_blockcount) <
- be32_to_cpu(r2->alloc.ar_blockcount) ||
- (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
- be32_to_cpu(r1->alloc.ar_startblock) <
- be32_to_cpu(r2->alloc.ar_startblock));
- }
+ return be32_to_cpu(r1->alloc.ar_startblock) +
+ be32_to_cpu(r1->alloc.ar_blockcount) <=
+ be32_to_cpu(r2->alloc.ar_startblock);
+}
+
+STATIC int
+xfs_cntbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ return be32_to_cpu(k1->alloc.ar_blockcount) <
+ be32_to_cpu(k2->alloc.ar_blockcount) ||
+ (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
+ be32_to_cpu(k1->alloc.ar_startblock) <
+ be32_to_cpu(k2->alloc.ar_startblock));
}
-#endif /* DEBUG */
-static const struct xfs_btree_ops xfs_allocbt_ops = {
+STATIC int
+xfs_cntbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2)
+{
+ return be32_to_cpu(r1->alloc.ar_blockcount) <
+ be32_to_cpu(r2->alloc.ar_blockcount) ||
+ (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount &&
+ be32_to_cpu(r1->alloc.ar_startblock) <
+ be32_to_cpu(r2->alloc.ar_startblock));
+}
+#endif /* DEBUG */
+
+static const struct xfs_btree_ops xfs_bnobt_ops = {
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
@@ -395,13 +456,39 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
.get_minrecs = xfs_allocbt_get_minrecs,
.get_maxrecs = xfs_allocbt_get_maxrecs,
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec,
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
- .key_diff = xfs_allocbt_key_diff,
+ .key_diff = xfs_bnobt_key_diff,
.buf_ops = &xfs_allocbt_buf_ops,
+ .diff_two_keys = xfs_bnobt_diff_two_keys,
#if defined(DEBUG) || defined(XFS_WARN)
- .keys_inorder = xfs_allocbt_keys_inorder,
- .recs_inorder = xfs_allocbt_recs_inorder,
+ .keys_inorder = xfs_bnobt_keys_inorder,
+ .recs_inorder = xfs_bnobt_recs_inorder,
+#endif
+};
+
+static const struct xfs_btree_ops xfs_cntbt_ops = {
+ .rec_len = sizeof(xfs_alloc_rec_t),
+ .key_len = sizeof(xfs_alloc_key_t),
+
+ .dup_cursor = xfs_allocbt_dup_cursor,
+ .set_root = xfs_allocbt_set_root,
+ .alloc_block = xfs_allocbt_alloc_block,
+ .free_block = xfs_allocbt_free_block,
+ .update_lastrec = xfs_allocbt_update_lastrec,
+ .get_minrecs = xfs_allocbt_get_minrecs,
+ .get_maxrecs = xfs_allocbt_get_maxrecs,
+ .init_key_from_rec = xfs_allocbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
+ .key_diff = xfs_cntbt_key_diff,
+ .buf_ops = &xfs_allocbt_buf_ops,
+ .diff_two_keys = xfs_cntbt_diff_two_keys,
+#if defined(DEBUG) || defined(XFS_WARN)
+ .keys_inorder = xfs_cntbt_keys_inorder,
+ .recs_inorder = xfs_cntbt_recs_inorder,
#endif
};
@@ -427,16 +514,15 @@ xfs_allocbt_init_cursor(
cur->bc_mp = mp;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
- cur->bc_ops = &xfs_allocbt_ops;
- if (btnum == XFS_BTNUM_BNO)
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
- else
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
if (btnum == XFS_BTNUM_CNT) {
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
+ cur->bc_ops = &xfs_cntbt_ops;
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
+ cur->bc_ops = &xfs_bnobt_ops;
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 9bd104f32908..f02eb7673392 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -764,7 +764,6 @@ xfs_bmap_extents_to_btree(
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
} else if (dfops->dop_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
-try_another_ag:
args.fsbno = *firstblock;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -779,20 +778,6 @@ try_another_ag:
return error;
}
- /*
- * During a CoW operation, the allocation and bmbt updates occur in
- * different transactions. The mapping code tries to put new bmbt
- * blocks near extents being mapped, but the only way to guarantee this
- * is if the alloc and the mapping happen in a single transaction that
- * has a block reservation. That isn't the case here, so if we run out
- * of space we'll try again with another AG.
- */
- if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
- args.fsbno == NULLFSBLOCK &&
- args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- args.type = XFS_ALLOCTYPE_FIRST_AG;
- goto try_another_ag;
- }
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
xfs_iroot_realloc(ip, -1, whichfork);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
@@ -925,7 +910,6 @@ xfs_bmap_local_to_extents(
* file currently fits in an inode.
*/
if (*firstblock == NULLFSBLOCK) {
-try_another_ag:
args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
@@ -938,19 +922,6 @@ try_another_ag:
if (error)
goto done;
- /*
- * During a CoW operation, the allocation and bmbt updates occur in
- * different transactions. The mapping code tries to put new bmbt
- * blocks near extents being mapped, but the only way to guarantee this
- * is if the alloc and the mapping happen in a single transaction that
- * has a block reservation. That isn't the case here, so if we run out
- * of space we'll try again with another AG.
- */
- if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) &&
- args.fsbno == NULLFSBLOCK &&
- args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- goto try_another_ag;
- }
/* Can't fail, the space was reserved. */
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1);
@@ -1260,7 +1231,6 @@ xfs_bmap_read_extents(
xfs_fsblock_t bno; /* block # of "block" */
xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
- xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
xfs_extnum_t i, j; /* index into the extents list */
xfs_ifork_t *ifp; /* fork structure */
int level; /* btree level, for checking */
@@ -1271,8 +1241,6 @@ xfs_bmap_read_extents(
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
- exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
- XFS_EXTFMT_INODE(ip);
block = ifp->if_broot;
/*
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
@@ -1340,18 +1308,9 @@ xfs_bmap_read_extents(
xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
trp->l0 = be64_to_cpu(frp->l0);
trp->l1 = be64_to_cpu(frp->l1);
- }
- if (exntf == XFS_EXTFMT_NOSTATE) {
- /*
- * Check all attribute bmap btree records and
- * any "older" data bmap btree records for a
- * set bit in the "extent flag" position.
- */
- if (unlikely(xfs_check_nostate_extents(ifp,
- start, num_recs))) {
+ if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount);
+ XFS_ERRLEVEL_LOW, mp);
goto error0;
}
}
@@ -2879,27 +2838,30 @@ xfs_bmap_add_extent_hole_delay(
*/
STATIC int /* error */
xfs_bmap_add_extent_hole_real(
- struct xfs_bmalloca *bma,
- int whichfork)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_extnum_t *idx,
+ struct xfs_btree_cur **curp,
+ struct xfs_bmbt_irec *new,
+ xfs_fsblock_t *first,
+ struct xfs_defer_ops *dfops,
+ int *logflagsp)
{
- struct xfs_bmbt_irec *new = &bma->got;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_cur *cur = *curp;
int error; /* error return value */
int i; /* temp state */
- xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
- struct xfs_mount *mp;
- mp = bma->ip->i_mount;
- ifp = XFS_IFORK_PTR(bma->ip, whichfork);
-
- ASSERT(bma->idx >= 0);
- ASSERT(bma->idx <= xfs_iext_count(ifp));
+ ASSERT(*idx >= 0);
+ ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
- ASSERT(!bma->cur ||
- !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
+ ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
@@ -2912,9 +2874,9 @@ xfs_bmap_add_extent_hole_real(
/*
* Check and set flags if this segment has a left neighbor.
*/
- if (bma->idx > 0) {
+ if (*idx > 0) {
state |= BMAP_LEFT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
if (isnullstartblock(left.br_startblock))
state |= BMAP_LEFT_DELAY;
}
@@ -2923,9 +2885,9 @@ xfs_bmap_add_extent_hole_real(
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
- if (bma->idx < xfs_iext_count(ifp)) {
+ if (*idx < xfs_iext_count(ifp)) {
state |= BMAP_RIGHT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
if (isnullstartblock(right.br_startblock))
state |= BMAP_RIGHT_DELAY;
}
@@ -2962,36 +2924,36 @@ xfs_bmap_add_extent_hole_real(
* left and on the right.
* Merge all three into a single extent record.
*/
- --bma->idx;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+ --*idx;
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
left.br_blockcount + new->br_blockcount +
right.br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
+ xfs_iext_remove(ip, *idx + 1, 1, state);
- XFS_IFORK_NEXT_SET(bma->ip, whichfork,
- XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
- if (bma->cur == NULL) {
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ if (cur == NULL) {
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
+ error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
right.br_startblock, right.br_blockcount,
&i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_btree_delete(bma->cur, &i);
+ error = xfs_btree_delete(cur, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_btree_decrement(bma->cur, 0, &i);
+ error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, left.br_startoff,
+ error = xfs_bmbt_update(cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
new->br_blockcount +
@@ -3008,23 +2970,23 @@ xfs_bmap_add_extent_hole_real(
* on the left.
* Merge the new allocation with the left neighbor.
*/
- --bma->idx;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
+ --*idx;
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
left.br_blockcount + new->br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- if (bma->cur == NULL) {
+ if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
+ error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
left.br_startblock, left.br_blockcount,
&i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, left.br_startoff,
+ error = xfs_bmbt_update(cur, left.br_startoff,
left.br_startblock,
left.br_blockcount +
new->br_blockcount,
@@ -3040,25 +3002,25 @@ xfs_bmap_add_extent_hole_real(
* on the right.
* Merge the new allocation with the right neighbor.
*/
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
new->br_startoff, new->br_startblock,
new->br_blockcount + right.br_blockcount,
right.br_state);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- if (bma->cur == NULL) {
+ if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur,
+ error = xfs_bmbt_lookup_eq(cur,
right.br_startoff,
right.br_startblock,
right.br_blockcount, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, new->br_startoff,
+ error = xfs_bmbt_update(cur, new->br_startoff,
new->br_startblock,
new->br_blockcount +
right.br_blockcount,
@@ -3074,22 +3036,22 @@ xfs_bmap_add_extent_hole_real(
* real allocation.
* Insert a new entry.
*/
- xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
- XFS_IFORK_NEXT_SET(bma->ip, whichfork,
- XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
- if (bma->cur == NULL) {
+ xfs_iext_insert(ip, *idx, 1, new, state);
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+ if (cur == NULL) {
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur,
+ error = xfs_bmbt_lookup_eq(cur,
new->br_startoff,
new->br_startblock,
new->br_blockcount, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- bma->cur->bc_rec.b.br_state = new->br_state;
- error = xfs_btree_insert(bma->cur, &i);
+ cur->bc_rec.b.br_state = new->br_state;
+ error = xfs_btree_insert(cur, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -3098,30 +3060,30 @@ xfs_bmap_add_extent_hole_real(
}
/* add reverse mapping */
- error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
+ error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
if (error)
goto done;
/* convert to a btree if necessary */
- if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
+ if (xfs_bmap_needs_btree(ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
- ASSERT(bma->cur == NULL);
- error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->dfops, &bma->cur,
+ ASSERT(cur == NULL);
+ error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
0, &tmp_logflags, whichfork);
- bma->logflags |= tmp_logflags;
+ *logflagsp |= tmp_logflags;
+ cur = *curp;
if (error)
goto done;
}
/* clear out the allocated field, done with it now in any case. */
- if (bma->cur)
- bma->cur->bc_private.b.allocated = 0;
+ if (cur)
+ cur->bc_private.b.allocated = 0;
- xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
+ xfs_bmap_check_leaf_extents(cur, ip, whichfork);
done:
- bma->logflags |= rval;
+ *logflagsp |= rval;
return error;
}
@@ -3853,60 +3815,6 @@ xfs_bmap_btalloc(
}
/*
- * For a remap operation, just "allocate" an extent at the address that the
- * caller passed in, and ensure that the AGFL is the right size. The caller
- * will then map the "allocated" extent into the file somewhere.
- */
-STATIC int
-xfs_bmap_remap_alloc(
- struct xfs_bmalloca *ap)
-{
- struct xfs_trans *tp = ap->tp;
- struct xfs_mount *mp = tp->t_mountp;
- xfs_agblock_t bno;
- struct xfs_alloc_arg args;
- int error;
-
- /*
- * validate that the block number is legal - the enables us to detect
- * and handle a silent filesystem corruption rather than crashing.
- */
- memset(&args, 0, sizeof(struct xfs_alloc_arg));
- args.tp = ap->tp;
- args.mp = ap->tp->t_mountp;
- bno = *ap->firstblock;
- args.agno = XFS_FSB_TO_AGNO(mp, bno);
- args.agbno = XFS_FSB_TO_AGBNO(mp, bno);
- if (args.agno >= mp->m_sb.sb_agcount ||
- args.agbno >= mp->m_sb.sb_agblocks)
- return -EFSCORRUPTED;
-
- /* "Allocate" the extent from the range we passed in. */
- trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length);
- ap->blkno = bno;
- ap->ip->i_d.di_nblocks += ap->length;
- xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
-
- /* Fix the freelist, like a real allocator does. */
- args.datatype = ap->datatype;
- args.pag = xfs_perag_get(args.mp, args.agno);
- ASSERT(args.pag);
-
- /*
- * The freelist fixing code will decline the allocation if
- * the size and shape of the free space doesn't allow for
- * allocating the extent and updating all the metadata that
- * happens during an allocation. We're remapping, not
- * allocating, so skip that check by pretending to be freeing.
- */
- error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
- xfs_perag_put(args.pag);
- if (error)
- trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
- return error;
-}
-
-/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
* It figures out where to ask the underlying allocator to put the new extent.
*/
@@ -3914,8 +3822,6 @@ STATIC int
xfs_bmap_alloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
{
- if (ap->flags & XFS_BMAPI_REMAP)
- return xfs_bmap_remap_alloc(ap);
if (XFS_IS_REALTIME_INODE(ap->ip) &&
xfs_alloc_is_userdata(ap->datatype))
return xfs_bmap_rtalloc(ap);
@@ -4386,7 +4292,9 @@ xfs_bmapi_allocate(
if (bma->wasdel)
error = xfs_bmap_add_extent_delay_real(bma, whichfork);
else
- error = xfs_bmap_add_extent_hole_real(bma, whichfork);
+ error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
+ whichfork, &bma->idx, &bma->cur, &bma->got,
+ bma->firstblock, bma->dfops, &bma->logflags);
bma->logflags |= tmp_logflags;
if (error)
@@ -4549,9 +4457,7 @@ xfs_bmapi_write(
ASSERT(len > 0);
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
- ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
- ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
+ ASSERT(!(flags & XFS_BMAPI_REMAP));
/* zeroing is for currently only for data extents, not metadata */
ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
@@ -4635,13 +4541,8 @@ xfs_bmapi_write(
} else {
need_alloc = true;
}
- } else {
- /*
- * Make sure we only reflink into a hole.
- */
- ASSERT(!(flags & XFS_BMAPI_REMAP));
- if (isnullstartblock(bma.got.br_startblock))
- wasdelay = true;
+ } else if (isnullstartblock(bma.got.br_startblock)) {
+ wasdelay = true;
}
/*
@@ -4770,6 +4671,93 @@ error0:
return error;
}
+static int
+xfs_bmapi_remap(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ xfs_fileoff_t bno,
+ xfs_filblks_t len,
+ xfs_fsblock_t startblock,
+ struct xfs_defer_ops *dfops)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ struct xfs_btree_cur *cur = NULL;
+ xfs_fsblock_t firstblock = NULLFSBLOCK;
+ struct xfs_bmbt_irec got;
+ xfs_extnum_t idx;
+ int logflags = 0, error;
+
+ ASSERT(len > 0);
+ ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ }
+
+ if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
+ /* make sure we only reflink into a hole. */
+ ASSERT(got.br_startoff > bno);
+ ASSERT(got.br_startoff - bno >= len);
+ }
+
+ ip->i_d.di_nblocks += len;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ if (ifp->if_flags & XFS_IFBROOT) {
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
+ cur->bc_private.b.firstblock = firstblock;
+ cur->bc_private.b.dfops = dfops;
+ cur->bc_private.b.flags = 0;
+ }
+
+ got.br_startoff = bno;
+ got.br_startblock = startblock;
+ got.br_blockcount = len;
+ got.br_state = XFS_EXT_NORM;
+
+ error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
+ &got, &firstblock, dfops, &logflags);
+ if (error)
+ goto error0;
+
+ if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
+ int tmp_logflags = 0;
+
+ error = xfs_bmap_btree_to_extents(tp, ip, cur,
+ &tmp_logflags, XFS_DATA_FORK);
+ logflags |= tmp_logflags;
+ }
+
+error0:
+ if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
+ logflags &= ~XFS_ILOG_DEXT;
+ else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
+ logflags &= ~XFS_ILOG_DBROOT;
+
+ if (logflags)
+ xfs_trans_log_inode(tp, ip, logflags);
+ if (cur) {
+ xfs_btree_del_cursor(cur,
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ }
+ return error;
+}
+
/*
* When a delalloc extent is split (e.g., due to a hole punch), the original
* indlen reservation must be shared across the two new extents that are left
@@ -4887,7 +4875,7 @@ xfs_bmap_del_extent_delay(
ASSERT(got_endoff >= del_endoff);
if (isrt) {
- int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
+ uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
do_div(rtexts, mp->m_sb.sb_rextsize);
xfs_mod_frextents(mp, rtexts);
@@ -6488,27 +6476,15 @@ xfs_bmap_finish_one(
xfs_filblks_t blockcount,
xfs_exntst_t state)
{
- struct xfs_bmbt_irec bmap;
- int nimaps = 1;
- xfs_fsblock_t firstfsb;
- int flags = XFS_BMAPI_REMAP;
- int done;
- int error = 0;
-
- bmap.br_startblock = startblock;
- bmap.br_startoff = startoff;
- bmap.br_blockcount = blockcount;
- bmap.br_state = state;
+ int error = 0, done;
trace_xfs_bmap_deferred(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
ip->i_ino, whichfork, startoff, blockcount, state);
- if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
+ if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
return -EFSCORRUPTED;
- if (whichfork == XFS_ATTR_FORK)
- flags |= XFS_BMAPI_ATTRFORK;
if (XFS_TEST_ERROR(false, tp->t_mountp,
XFS_ERRTAG_BMAP_FINISH_ONE,
@@ -6517,16 +6493,12 @@ xfs_bmap_finish_one(
switch (type) {
case XFS_BMAP_MAP:
- firstfsb = bmap.br_startblock;
- error = xfs_bmapi_write(tp, ip, bmap.br_startoff,
- bmap.br_blockcount, flags, &firstfsb,
- bmap.br_blockcount, &bmap, &nimaps,
- dfops);
+ error = xfs_bmapi_remap(tp, ip, startoff, blockcount,
+ startblock, dfops);
break;
case XFS_BMAP_UNMAP:
- error = xfs_bunmapi(tp, ip, bmap.br_startoff,
- bmap.br_blockcount, flags, 1, &firstfsb,
- dfops, &done);
+ error = xfs_bunmapi(tp, ip, startoff, blockcount,
+ XFS_BMAPI_REMAP, 1, &startblock, dfops, &done);
ASSERT(done);
break;
default:
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cdef87db5262..c35a14fa1527 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -172,6 +172,18 @@ static inline int xfs_bmapi_whichfork(int bmapi_flags)
/*
+ * Return true if the extent is a real, allocated extent, or false if it is a
+ * delayed allocation, and unwritten extent or a hole.
+ */
+static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
+{
+ return irec->br_state != XFS_EXT_UNWRITTEN &&
+ irec->br_startblock != HOLESTARTBLOCK &&
+ irec->br_startblock != DELAYSTARTBLOCK &&
+ !isnullstartblock(irec->br_startblock);
+}
+
+/*
* This macro is used to determine how many extents will be shifted
* in one write transaction. We could require two splits,
* an extent move on the first and an extent merge on the second,
@@ -232,8 +244,6 @@ int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *del);
void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
-int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
- xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index fd55db479385..6cba69aff077 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -366,32 +366,6 @@ xfs_bmbt_to_bmdr(
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
-/*
- * Check extent records, which have just been read, for
- * any bit in the extent flag field. ASSERT on debug
- * kernels, as this condition should not occur.
- * Return an error condition (1) if any flags found,
- * otherwise return 0.
- */
-
-int
-xfs_check_nostate_extents(
- xfs_ifork_t *ifp,
- xfs_extnum_t idx,
- xfs_extnum_t num)
-{
- for (; num > 0; num--, idx++) {
- xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
- if ((ep->l0 >>
- (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
- ASSERT(0);
- return 1;
- }
- }
- return 0;
-}
-
-
STATIC struct xfs_btree_cur *
xfs_bmbt_dup_cursor(
struct xfs_btree_cur *cur)
@@ -448,7 +422,6 @@ xfs_bmbt_alloc_block(
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
args.type = XFS_ALLOCTYPE_START_BNO;
-try_another_ag:
/*
* Make sure there is sufficient room left in the AG to
* complete a full tree split for an extent insert. If
@@ -477,22 +450,6 @@ try_another_ag:
if (error)
goto error0;
- /*
- * During a CoW operation, the allocation and bmbt updates occur in
- * different transactions. The mapping code tries to put new bmbt
- * blocks near extents being mapped, but the only way to guarantee this
- * is if the alloc and the mapping happen in a single transaction that
- * has a block reservation. That isn't the case here, so if we run out
- * of space we'll try again with another AG.
- */
- if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
- args.fsbno == NULLFSBLOCK &&
- args.type == XFS_ALLOCTYPE_NEAR_BNO) {
- args.fsbno = cur->bc_private.b.firstblock;
- args.type = XFS_ALLOCTYPE_FIRST_AG;
- goto try_another_ag;
- }
-
if (args.fsbno == NULLFSBLOCK && args.minleft) {
/*
* Could not find an AG with enough free space to satisfy
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h
index 819a8a4dee95..9da5a8d4f184 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.h
+++ b/fs/xfs/libxfs/xfs_bmap_btree.h
@@ -25,14 +25,6 @@ struct xfs_inode;
struct xfs_trans;
/*
- * Extent state and extent format macros.
- */
-#define XFS_EXTFMT_INODE(x) \
- (xfs_sb_version_hasextflgbit(&((x)->i_mount->m_sb)) ? \
- XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
-#define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
-
-/*
* Btree block header size depends on a superblock flag.
*/
#define XFS_BMBT_BLOCK_LEN(mp) \
@@ -140,4 +132,18 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
+/*
+ * Check that the extent does not contain an invalid unwritten extent flag.
+ */
+static inline bool xfs_bmbt_validate_extent(struct xfs_mount *mp, int whichfork,
+ struct xfs_bmbt_rec_host *ep)
+{
+ if (ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN) == 0)
+ return true;
+ if (whichfork == XFS_DATA_FORK &&
+ xfs_sb_version_hasextflgbit(&mp->m_sb))
+ return true;
+ return false;
+}
+
#endif /* __XFS_BMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 3059a3ec7ecb..5392674bf893 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4842,6 +4842,21 @@ xfs_btree_query_range(
fn, priv);
}
+/* Query a btree for all records. */
+int
+xfs_btree_query_all(
+ struct xfs_btree_cur *cur,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_irec low_rec;
+ union xfs_btree_irec high_rec;
+
+ memset(&low_rec, 0, sizeof(low_rec));
+ memset(&high_rec, 0xFF, sizeof(high_rec));
+ return xfs_btree_query_range(cur, &low_rec, &high_rec, fn, priv);
+}
+
/*
* Calculate the number of blocks needed to store a given number of records
* in a short-format (per-AG metadata) btree.
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 4bb62580a7fd..27bed08261c5 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -496,6 +496,8 @@ typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
int xfs_btree_query_range(struct xfs_btree_cur *cur,
union xfs_btree_irec *low_rec, union xfs_btree_irec *high_rec,
xfs_btree_query_range_fn fn, void *priv);
+int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
+ void *priv);
typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
void *data);
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index ac9a003dd29a..747085b4ef44 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -35,13 +35,8 @@ int
xfs_calc_dquots_per_chunk(
unsigned int nbblks) /* basic block units */
{
- unsigned int ndquots;
-
ASSERT(nbblks > 0);
- ndquots = BBTOB(nbblks);
- do_div(ndquots, sizeof(xfs_dqblk_t));
-
- return ndquots;
+ return BBTOB(nbblks) / sizeof(xfs_dqblk_t);
}
/*
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 6b7579e7b60a..a1dccd8d96bc 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -930,10 +930,8 @@ static inline uint xfs_dinode_size(int version)
/*
* The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
* Since the pathconf interface is signed, we use 2^31 - 1 instead.
- * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
*/
#define XFS_MAXLINK ((1U << 31) - 1U)
-#define XFS_MAXLINK_1 65535U
/*
* Values for di_format
@@ -1578,19 +1576,10 @@ static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
}
/*
- * Possible extent formats.
- */
-typedef enum {
- XFS_EXTFMT_NOSTATE = 0,
- XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
* Possible extent states.
*/
typedef enum {
XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
- XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
} xfs_exntst_t;
/*
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index b72dc821d78b..095bdf049a3f 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -92,6 +92,18 @@ struct getbmapx {
#define BMV_OF_LAST 0x4 /* segment is the last in the file */
#define BMV_OF_SHARED 0x8 /* segment shared with another file */
+/* fmr_owner special values for FS_IOC_GETFSMAP */
+#define XFS_FMR_OWN_FREE FMR_OWN_FREE /* free space */
+#define XFS_FMR_OWN_UNKNOWN FMR_OWN_UNKNOWN /* unknown owner */
+#define XFS_FMR_OWN_FS FMR_OWNER('X', 1) /* static fs metadata */
+#define XFS_FMR_OWN_LOG FMR_OWNER('X', 2) /* journalling log */
+#define XFS_FMR_OWN_AG FMR_OWNER('X', 3) /* per-AG metadata */
+#define XFS_FMR_OWN_INOBT FMR_OWNER('X', 4) /* inode btree blocks */
+#define XFS_FMR_OWN_INODES FMR_OWNER('X', 5) /* inodes */
+#define XFS_FMR_OWN_REFC FMR_OWNER('X', 6) /* refcount tree */
+#define XFS_FMR_OWN_COW FMR_OWNER('X', 7) /* cow staging */
+#define XFS_FMR_OWN_DEFECTIVE FMR_OWNER('X', 8) /* bad blocks */
+
/*
* Structure for XFS_IOC_FSSETDM.
* For use by backup and restore programs to set the XFS on-disk inode
@@ -502,6 +514,7 @@ typedef struct xfs_swapext
#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
+/* XFS_IOC_GETFSMAP ------ hoisted 59 */
/*
* ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index d93f9d918cfc..09c3d1aecef2 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -508,7 +508,7 @@ xfs_iread(
/* even unallocated inodes are verified */
if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
- xfs_alert(mp, "%s: validation failed for inode %lld failed",
+ xfs_alert(mp, "%s: validation failed for inode %lld",
__func__, ip->i_ino);
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 8a37efe04de3..0e80f34fe97c 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -42,35 +42,6 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
-#ifdef DEBUG
-/*
- * Make sure that the extents in the given memory buffer
- * are valid.
- */
-void
-xfs_validate_extents(
- xfs_ifork_t *ifp,
- int nrecs,
- xfs_exntfmt_t fmt)
-{
- xfs_bmbt_irec_t irec;
- xfs_bmbt_rec_host_t rec;
- int i;
-
- for (i = 0; i < nrecs; i++) {
- xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
- rec.l0 = get_unaligned(&ep->l0);
- rec.l1 = get_unaligned(&ep->l1);
- xfs_bmbt_get_all(&rec, &irec);
- if (fmt == XFS_EXTFMT_NOSTATE)
- ASSERT(irec.br_state == XFS_EXT_NORM);
- }
-}
-#else /* DEBUG */
-#define xfs_validate_extents(ifp, nrecs, fmt)
-#endif /* DEBUG */
-
-
/*
* Move inode type and inode format specific information from the
* on-disk inode to the in-core inode. For fifos, devs, and sockets
@@ -352,40 +323,33 @@ xfs_iformat_local(
}
/*
- * The file consists of a set of extents all
- * of which fit into the on-disk inode.
- * If there are few enough extents to fit into
- * the if_inline_ext, then copy them there.
- * Otherwise allocate a buffer for them and copy
- * them into it. Either way, set if_extents
- * to point at the extents.
+ * The file consists of a set of extents all of which fit into the on-disk
+ * inode. If there are few enough extents to fit into the if_inline_ext, then
+ * copy them there. Otherwise allocate a buffer for them and copy them into it.
+ * Either way, set if_extents to point at the extents.
*/
STATIC int
xfs_iformat_extents(
- xfs_inode_t *ip,
- xfs_dinode_t *dip,
- int whichfork)
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip,
+ int whichfork)
{
- xfs_bmbt_rec_t *dp;
- xfs_ifork_t *ifp;
- int nex;
- int size;
- int i;
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- nex = XFS_DFORK_NEXTENTS(dip, whichfork);
- size = nex * (uint)sizeof(xfs_bmbt_rec_t);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int nex = XFS_DFORK_NEXTENTS(dip, whichfork);
+ int size = nex * sizeof(xfs_bmbt_rec_t);
+ struct xfs_bmbt_rec *dp;
+ int i;
/*
- * If the number of extents is unreasonable, then something
- * is wrong and we just bail out rather than crash in
- * kmem_alloc() or memcpy() below.
+ * If the number of extents is unreasonable, then something is wrong and
+ * we just bail out rather than crash in kmem_alloc() or memcpy() below.
*/
- if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
+ if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) {
xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).",
(unsigned long long) ip->i_ino, nex);
XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
+ mp, dip);
return -EFSCORRUPTED;
}
@@ -400,22 +364,17 @@ xfs_iformat_extents(
ifp->if_bytes = size;
if (size) {
dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
- xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
for (i = 0; i < nex; i++, dp++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
ep->l0 = get_unaligned_be64(&dp->l0);
ep->l1 = get_unaligned_be64(&dp->l1);
+ if (!xfs_bmbt_validate_extent(mp, whichfork, ep)) {
+ XFS_ERROR_REPORT("xfs_iformat_extents(2)",
+ XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
}
XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
- if (whichfork != XFS_DATA_FORK ||
- XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
- if (unlikely(xfs_check_nostate_extents(
- ifp, 0, nex))) {
- XFS_ERROR_REPORT("xfs_iformat_extents(2)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount);
- return -EFSCORRUPTED;
- }
}
ifp->if_flags |= XFS_IFEXTENTS;
return 0;
@@ -518,7 +477,6 @@ xfs_iread_extents(
xfs_iext_destroy(ifp);
return error;
}
- xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
ifp->if_flags |= XFS_IFEXTENTS;
return 0;
}
@@ -837,6 +795,9 @@ xfs_iextents_copy(
copied = 0;
for (i = 0; i < nrecs; i++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
+
+ ASSERT(xfs_bmbt_validate_extent(ip->i_mount, whichfork, ep));
+
start_block = xfs_bmbt_get_startblock(ep);
if (isnullstartblock(start_block)) {
/*
@@ -852,7 +813,6 @@ xfs_iextents_copy(
copied++;
}
ASSERT(copied != 0);
- xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
return (copied * (uint)sizeof(xfs_bmbt_rec_t));
}
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 3a8cc7139912..06cfb93c2ef9 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2001,14 +2001,14 @@ xfs_rmap_query_range_helper(
/* Find all rmaps between two keys. */
int
xfs_rmap_query_range(
- struct xfs_btree_cur *cur,
- struct xfs_rmap_irec *low_rec,
- struct xfs_rmap_irec *high_rec,
- xfs_rmap_query_range_fn fn,
- void *priv)
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *low_rec,
+ struct xfs_rmap_irec *high_rec,
+ xfs_rmap_query_range_fn fn,
+ void *priv)
{
- union xfs_btree_irec low_brec;
- union xfs_btree_irec high_brec;
+ union xfs_btree_irec low_brec;
+ union xfs_btree_irec high_brec;
struct xfs_rmap_query_range_info query;
low_brec.r = *low_rec;
@@ -2019,6 +2019,20 @@ xfs_rmap_query_range(
xfs_rmap_query_range_helper, &query);
}
+/* Find all rmaps. */
+int
+xfs_rmap_query_all(
+ struct xfs_btree_cur *cur,
+ xfs_rmap_query_range_fn fn,
+ void *priv)
+{
+ struct xfs_rmap_query_range_info query;
+
+ query.priv = priv;
+ query.fn = fn;
+ return xfs_btree_query_all(cur, xfs_rmap_query_range_helper, &query);
+}
+
/* Clean up after calling xfs_rmap_finish_one. */
void
xfs_rmap_finish_one_cleanup(
@@ -2291,3 +2305,31 @@ xfs_rmap_free_extent(
return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
XFS_DATA_FORK, &bmap);
}
+
+/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
+int
+xfs_rmap_compare(
+ const struct xfs_rmap_irec *a,
+ const struct xfs_rmap_irec *b)
+{
+ __u64 oa;
+ __u64 ob;
+
+ oa = xfs_rmap_irec_offset_pack(a);
+ ob = xfs_rmap_irec_offset_pack(b);
+
+ if (a->rm_startblock < b->rm_startblock)
+ return -1;
+ else if (a->rm_startblock > b->rm_startblock)
+ return 1;
+ else if (a->rm_owner < b->rm_owner)
+ return -1;
+ else if (a->rm_owner > b->rm_owner)
+ return 1;
+ else if (oa < ob)
+ return -1;
+ else if (oa > ob)
+ return 1;
+ else
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 789930599339..98f908fea103 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -162,6 +162,8 @@ typedef int (*xfs_rmap_query_range_fn)(
int xfs_rmap_query_range(struct xfs_btree_cur *cur,
struct xfs_rmap_irec *low_rec, struct xfs_rmap_irec *high_rec,
xfs_rmap_query_range_fn fn, void *priv);
+int xfs_rmap_query_all(struct xfs_btree_cur *cur, xfs_rmap_query_range_fn fn,
+ void *priv);
enum xfs_rmap_intent_type {
XFS_RMAP_MAP,
@@ -212,5 +214,7 @@ int xfs_rmap_find_left_neighbor(struct xfs_btree_cur *cur, xfs_agblock_t bno,
int xfs_rmap_lookup_le_range(struct xfs_btree_cur *cur, xfs_agblock_t bno,
uint64_t owner, uint64_t offset, unsigned int flags,
struct xfs_rmap_irec *irec, int *stat);
+int xfs_rmap_compare(const struct xfs_rmap_irec *a,
+ const struct xfs_rmap_irec *b);
#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index ea45584a9913..e47b99e59f60 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1016,3 +1016,73 @@ xfs_rtfree_extent(
}
return 0;
}
+
+/* Find all the free records within a given range. */
+int
+xfs_rtalloc_query_range(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *low_rec,
+ struct xfs_rtalloc_rec *high_rec,
+ xfs_rtalloc_query_range_fn fn,
+ void *priv)
+{
+ struct xfs_rtalloc_rec rec;
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_rtblock_t rtstart;
+ xfs_rtblock_t rtend;
+ xfs_rtblock_t rem;
+ int is_free;
+ int error = 0;
+
+ if (low_rec->ar_startblock > high_rec->ar_startblock)
+ return -EINVAL;
+ else if (low_rec->ar_startblock == high_rec->ar_startblock)
+ return 0;
+
+ /* Iterate the bitmap, looking for discrepancies. */
+ rtstart = low_rec->ar_startblock;
+ rem = high_rec->ar_startblock - rtstart;
+ while (rem) {
+ /* Is the first block free? */
+ error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
+ &is_free);
+ if (error)
+ break;
+
+ /* How long does the extent go for? */
+ error = xfs_rtfind_forw(mp, tp, rtstart,
+ high_rec->ar_startblock - 1, &rtend);
+ if (error)
+ break;
+
+ if (is_free) {
+ rec.ar_startblock = rtstart;
+ rec.ar_blockcount = rtend - rtstart + 1;
+
+ error = fn(tp, &rec, priv);
+ if (error)
+ break;
+ }
+
+ rem -= rtend - rtstart + 1;
+ rtstart = rtend + 1;
+ }
+
+ return error;
+}
+
+/* Find all the free records. */
+int
+xfs_rtalloc_query_all(
+ struct xfs_trans *tp,
+ xfs_rtalloc_query_range_fn fn,
+ void *priv)
+{
+ struct xfs_rtalloc_rec keys[2];
+
+ keys[0].ar_startblock = 0;
+ keys[1].ar_startblock = tp->t_mountp->m_sb.sb_rblocks;
+ keys[0].ar_blockcount = keys[1].ar_blockcount = 0;
+
+ return xfs_rtalloc_query_range(tp, &keys[0], &keys[1], fn, priv);
+}
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 7917f6e44286..d787c677d2a3 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -21,8 +21,20 @@
/*
* Components of space reservations.
*/
+
+/* Worst case number of rmaps that can be held in a block. */
#define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \
(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
+
+/* Adding one rmap could split every level up to the top of the tree. */
+#define XFS_RMAPADD_SPACE_RES(mp) ((mp)->m_rmap_maxlevels)
+
+/* Blocks we might need to add "b" rmaps to a tree. */
+#define XFS_NRMAPADD_SPACE_RES(mp, b)\
+ (((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
+ XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
+ XFS_RMAPADD_SPACE_RES(mp))
+
#define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) \
(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
#define XFS_EXTENTADD_SPACE_RES(mp,w) (XFS_BM_MAXLEVELS(mp,w) - 1)
@@ -30,13 +42,12 @@
(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
XFS_EXTENTADD_SPACE_RES(mp,w))
+
+/* Blocks we might need to add "b" mappings & rmappings to a file. */
#define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
- (((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
- XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
- XFS_EXTENTADD_SPACE_RES(mp,w) + \
- ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
- XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
- (mp)->m_rmap_maxlevels)
+ (XFS_NEXTENTADD_SPACE_RES((mp), (b), (w)) + \
+ XFS_NRMAPADD_SPACE_RES((mp), (b)))
+
#define XFS_DAENTER_1B(mp,w) \
((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
#define XFS_DAENTER_DBS(mp,w) \
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 05eca126c688..09af0f7cd55e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -111,11 +111,11 @@ xfs_finish_page_writeback(
bsize = bh->b_size;
do {
+ if (off > end)
+ break;
next = bh->b_this_page;
if (off < bvec->bv_offset)
goto next_bh;
- if (off > end)
- break;
bh->b_end_io(bh, !error);
next_bh:
off += bsize;
@@ -1261,8 +1261,8 @@ xfs_get_blocks(
if (nimaps) {
trace_xfs_get_blocks_found(ip, offset, size,
- ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
- : XFS_IO_OVERWRITE, &imap);
+ imap.br_state == XFS_EXT_UNWRITTEN ?
+ XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
xfs_iunlock(ip, lockmode);
} else {
trace_xfs_get_blocks_notfound(ip, offset, size);
@@ -1276,9 +1276,7 @@ xfs_get_blocks(
* For unwritten extents do not report a disk address in the buffered
* read case (treat as if we're reading into a hole).
*/
- if (imap.br_startblock != HOLESTARTBLOCK &&
- imap.br_startblock != DELAYSTARTBLOCK &&
- !ISUNWRITTEN(&imap))
+ if (xfs_bmap_is_real_extent(&imap))
xfs_map_buffer(inode, bh_result, &imap, offset);
/*
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9bf57c76623b..d419d23fa214 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -34,6 +34,8 @@
#include "xfs_bmap.h"
#include "xfs_icache.h"
#include "xfs_trace.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
kmem_zone_t *xfs_bui_zone;
@@ -215,6 +217,7 @@ void
xfs_bui_release(
struct xfs_bui_log_item *buip)
{
+ ASSERT(atomic_read(&buip->bui_refcount) > 0);
if (atomic_dec_and_test(&buip->bui_refcount)) {
xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_bui_item_free(buip);
@@ -446,7 +449,8 @@ xfs_bui_recover(
return -EIO;
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
return error;
budp = xfs_trans_get_bud(tp, buip);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 8795e9cd867c..2b954308a1d6 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -448,10 +448,9 @@ xfs_getbmap_adjust_shared(
next_map->br_blockcount = 0;
/* Only written data blocks can be shared. */
- if (!xfs_is_reflink_inode(ip) || whichfork != XFS_DATA_FORK ||
- map->br_startblock == DELAYSTARTBLOCK ||
- map->br_startblock == HOLESTARTBLOCK ||
- ISUNWRITTEN(map))
+ if (!xfs_is_reflink_inode(ip) ||
+ whichfork != XFS_DATA_FORK ||
+ !xfs_bmap_is_real_extent(map))
return 0;
agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
@@ -904,9 +903,9 @@ xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
}
/*
- * This is called by xfs_inactive to free any blocks beyond eof
- * when the link count isn't zero and by xfs_dm_punch_hole() when
- * punching a hole to EOF.
+ * This is called to free any blocks beyond eof. The caller must hold
+ * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
+ * reference to the inode.
*/
int
xfs_free_eofblocks(
@@ -921,8 +920,6 @@ xfs_free_eofblocks(
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
/*
* Figure out if there are any blocks beyond the end
* of the file. If not, then there is nothing to do.
@@ -1209,11 +1206,8 @@ xfs_adjust_extent_unmap_boundaries(
return error;
if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- xfs_daddr_t block;
-
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- block = imap.br_startblock;
- mod = do_div(block, mp->m_sb.sb_rextsize);
+ mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize);
if (mod)
*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ca09061369cb..62fa39276a24 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1079,6 +1079,8 @@ void
xfs_buf_unlock(
struct xfs_buf *bp)
{
+ ASSERT(xfs_buf_islocked(bp));
+
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
@@ -1815,6 +1817,28 @@ error:
}
/*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+ struct list_head *list)
+{
+ struct xfs_buf *bp;
+
+ while (!list_empty(list)) {
+ bp = list_first_entry(list, struct xfs_buf, b_list);
+
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ }
+}
+
+/*
* Add a buffer to the delayed write list.
*
* This queues a buffer for writeout if it hasn't already been. Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 3c867e5a63e1..8d1d44f87ce9 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -291,7 +291,6 @@ xfs_buf_readahead(
return xfs_buf_readahead_map(target, &map, 1, ops);
}
-struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
@@ -330,6 +329,7 @@ extern void *xfs_buf_offset(struct xfs_buf *, size_t);
extern void xfs_buf_stale(struct xfs_buf *bp);
/* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
extern int xfs_buf_delwri_submit(struct list_head *);
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index ad9396e516f6..20b7a5c6eb2f 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -394,6 +394,7 @@ xfs_dir2_leaf_readbuf(
/*
* Do we need more readahead?
+ * Each loop tries to process 1 full dir blk; last may be partial.
*/
blk_start_plug(&plug);
for (mip->ra_index = mip->ra_offset = i = 0;
@@ -404,7 +405,8 @@ xfs_dir2_leaf_readbuf(
* Read-ahead a contiguous directory block.
*/
if (i > mip->ra_current &&
- map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+ (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+ geo->fsbcount) {
xfs_dir3_data_readahead(dp,
map[mip->ra_index].br_startoff + mip->ra_offset,
XFS_FSB_TO_DADDR(dp->i_mount,
@@ -425,14 +427,19 @@ xfs_dir2_leaf_readbuf(
}
/*
- * Advance offset through the mapping table.
+ * Advance offset through the mapping table, processing a full
+ * dir block even if it is fragmented into several extents.
+ * But stop if we have consumed all valid mappings, even if
+ * it's not yet a full directory block.
*/
- for (j = 0; j < geo->fsbcount; j += length ) {
+ for (j = 0;
+ j < geo->fsbcount && mip->ra_index < mip->map_valid;
+ j += length ) {
/*
* The rest of this extent but not more than a dir
* block.
*/
- length = min_t(int, geo->fsbcount,
+ length = min_t(int, geo->fsbcount - j,
map[mip->ra_index].br_blockcount -
mip->ra_offset);
mip->ra_offset += length;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index d796ffac7296..6a05d278da64 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -132,6 +132,11 @@ next_extent:
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto out_del_cursor;
+
+ if (fatal_signal_pending(current)) {
+ error = -ERESTARTSYS;
+ goto out_del_cursor;
+ }
}
out_del_cursor:
@@ -196,8 +201,11 @@ xfs_ioc_trim(
for (agno = start_agno; agno <= end_agno; agno++) {
error = xfs_trim_extents(mp, agno, start, end, minlen,
&blocks_trimmed);
- if (error)
+ if (error) {
last_error = error;
+ if (error == -ERESTARTSYS)
+ break;
+ }
}
if (last_error)
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index d7bc14906af8..44f8c5451210 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -290,6 +290,7 @@ void
xfs_efi_release(
struct xfs_efi_log_item *efip)
{
+ ASSERT(atomic_read(&efip->efi_refcount) > 0);
if (atomic_dec_and_test(&efip->efi_refcount)) {
xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
xfs_efi_item_free(efip);
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
new file mode 100644
index 000000000000..3683819887a5
--- /dev/null
+++ b/fs/xfs/xfs_fsmap.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_rmap.h"
+#include "xfs_alloc.h"
+#include "xfs_bit.h"
+#include <linux/fsmap.h>
+#include "xfs_fsmap.h"
+#include "xfs_refcount.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_rtalloc.h"
+
+/* Convert an xfs_fsmap to an fsmap. */
+void
+xfs_fsmap_from_internal(
+ struct fsmap *dest,
+ struct xfs_fsmap *src)
+{
+ dest->fmr_device = src->fmr_device;
+ dest->fmr_flags = src->fmr_flags;
+ dest->fmr_physical = BBTOB(src->fmr_physical);
+ dest->fmr_owner = src->fmr_owner;
+ dest->fmr_offset = BBTOB(src->fmr_offset);
+ dest->fmr_length = BBTOB(src->fmr_length);
+ dest->fmr_reserved[0] = 0;
+ dest->fmr_reserved[1] = 0;
+ dest->fmr_reserved[2] = 0;
+}
+
+/* Convert an fsmap to an xfs_fsmap. */
+void
+xfs_fsmap_to_internal(
+ struct xfs_fsmap *dest,
+ struct fsmap *src)
+{
+ dest->fmr_device = src->fmr_device;
+ dest->fmr_flags = src->fmr_flags;
+ dest->fmr_physical = BTOBBT(src->fmr_physical);
+ dest->fmr_owner = src->fmr_owner;
+ dest->fmr_offset = BTOBBT(src->fmr_offset);
+ dest->fmr_length = BTOBBT(src->fmr_length);
+}
+
+/* Convert an fsmap owner into an rmapbt owner. */
+static int
+xfs_fsmap_owner_to_rmap(
+ struct xfs_rmap_irec *dest,
+ struct xfs_fsmap *src)
+{
+ if (!(src->fmr_flags & FMR_OF_SPECIAL_OWNER)) {
+ dest->rm_owner = src->fmr_owner;
+ return 0;
+ }
+
+ switch (src->fmr_owner) {
+ case 0: /* "lowest owner id possible" */
+ case -1ULL: /* "highest owner id possible" */
+ dest->rm_owner = 0;
+ break;
+ case XFS_FMR_OWN_FREE:
+ dest->rm_owner = XFS_RMAP_OWN_NULL;
+ break;
+ case XFS_FMR_OWN_UNKNOWN:
+ dest->rm_owner = XFS_RMAP_OWN_UNKNOWN;
+ break;
+ case XFS_FMR_OWN_FS:
+ dest->rm_owner = XFS_RMAP_OWN_FS;
+ break;
+ case XFS_FMR_OWN_LOG:
+ dest->rm_owner = XFS_RMAP_OWN_LOG;
+ break;
+ case XFS_FMR_OWN_AG:
+ dest->rm_owner = XFS_RMAP_OWN_AG;
+ break;
+ case XFS_FMR_OWN_INOBT:
+ dest->rm_owner = XFS_RMAP_OWN_INOBT;
+ break;
+ case XFS_FMR_OWN_INODES:
+ dest->rm_owner = XFS_RMAP_OWN_INODES;
+ break;
+ case XFS_FMR_OWN_REFC:
+ dest->rm_owner = XFS_RMAP_OWN_REFC;
+ break;
+ case XFS_FMR_OWN_COW:
+ dest->rm_owner = XFS_RMAP_OWN_COW;
+ break;
+ case XFS_FMR_OWN_DEFECTIVE: /* not implemented */
+ /* fall through */
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Convert an rmapbt owner into an fsmap owner. */
+static int
+xfs_fsmap_owner_from_rmap(
+ struct xfs_fsmap *dest,
+ struct xfs_rmap_irec *src)
+{
+ dest->fmr_flags = 0;
+ if (!XFS_RMAP_NON_INODE_OWNER(src->rm_owner)) {
+ dest->fmr_owner = src->rm_owner;
+ return 0;
+ }
+ dest->fmr_flags |= FMR_OF_SPECIAL_OWNER;
+
+ switch (src->rm_owner) {
+ case XFS_RMAP_OWN_FS:
+ dest->fmr_owner = XFS_FMR_OWN_FS;
+ break;
+ case XFS_RMAP_OWN_LOG:
+ dest->fmr_owner = XFS_FMR_OWN_LOG;
+ break;
+ case XFS_RMAP_OWN_AG:
+ dest->fmr_owner = XFS_FMR_OWN_AG;
+ break;
+ case XFS_RMAP_OWN_INOBT:
+ dest->fmr_owner = XFS_FMR_OWN_INOBT;
+ break;
+ case XFS_RMAP_OWN_INODES:
+ dest->fmr_owner = XFS_FMR_OWN_INODES;
+ break;
+ case XFS_RMAP_OWN_REFC:
+ dest->fmr_owner = XFS_FMR_OWN_REFC;
+ break;
+ case XFS_RMAP_OWN_COW:
+ dest->fmr_owner = XFS_FMR_OWN_COW;
+ break;
+ case XFS_RMAP_OWN_NULL: /* "free" */
+ dest->fmr_owner = XFS_FMR_OWN_FREE;
+ break;
+ default:
+ return -EFSCORRUPTED;
+ }
+ return 0;
+}
+
+/* getfsmap query state */
+struct xfs_getfsmap_info {
+ struct xfs_fsmap_head *head;
+ xfs_fsmap_format_t formatter; /* formatting fn */
+ void *format_arg; /* format buffer */
+ struct xfs_buf *agf_bp; /* AGF, for refcount queries */
+ xfs_daddr_t next_daddr; /* next daddr we expect */
+ u64 missing_owner; /* owner of holes */
+ u32 dev; /* device id */
+ xfs_agnumber_t agno; /* AG number, if applicable */
+ struct xfs_rmap_irec low; /* low rmap key */
+ struct xfs_rmap_irec high; /* high rmap key */
+ bool last; /* last extent? */
+};
+
+/* Associate a device with a getfsmap handler. */
+struct xfs_getfsmap_dev {
+ u32 dev;
+ int (*fn)(struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info);
+};
+
+/* Compare two getfsmap device handlers. */
+static int
+xfs_getfsmap_dev_compare(
+ const void *p1,
+ const void *p2)
+{
+ const struct xfs_getfsmap_dev *d1 = p1;
+ const struct xfs_getfsmap_dev *d2 = p2;
+
+ return d1->dev - d2->dev;
+}
+
+/* Decide if this mapping is shared. */
+STATIC int
+xfs_getfsmap_is_shared(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info,
+ struct xfs_rmap_irec *rec,
+ bool *stat)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int error;
+
+ *stat = false;
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return 0;
+ /* rt files will have agno set to NULLAGNUMBER */
+ if (info->agno == NULLAGNUMBER)
+ return 0;
+
+ /* Are there any shared blocks here? */
+ flen = 0;
+ cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp,
+ info->agno, NULL);
+
+ error = xfs_refcount_find_shared(cur, rec->rm_startblock,
+ rec->rm_blockcount, &fbno, &flen, false);
+
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ if (error)
+ return error;
+
+ *stat = flen > 0;
+ return 0;
+}
+
+/*
+ * Format a reverse mapping for getfsmap, having translated rm_startblock
+ * into the appropriate daddr units.
+ */
+STATIC int
+xfs_getfsmap_helper(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info,
+ struct xfs_rmap_irec *rec,
+ xfs_daddr_t rec_daddr)
+{
+ struct xfs_fsmap fmr;
+ struct xfs_mount *mp = tp->t_mountp;
+ bool shared;
+ int error;
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ /*
+ * Filter out records that start before our startpoint, if the
+ * caller requested that.
+ */
+ if (xfs_rmap_compare(rec, &info->low) < 0) {
+ rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (info->next_daddr < rec_daddr)
+ info->next_daddr = rec_daddr;
+ return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ }
+
+ /* Are we just counting mappings? */
+ if (info->head->fmh_count == 0) {
+ if (rec_daddr > info->next_daddr)
+ info->head->fmh_entries++;
+
+ if (info->last)
+ return XFS_BTREE_QUERY_RANGE_CONTINUE;
+
+ info->head->fmh_entries++;
+
+ rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (info->next_daddr < rec_daddr)
+ info->next_daddr = rec_daddr;
+ return XFS_BTREE_QUERY_RANGE_CONTINUE;
+ }
+
+ /*
+ * If the record starts past the last physical block we saw,
+ * then we've found a gap. Report the gap as being owned by
+ * whatever the caller specified is the missing owner.
+ */
+ if (rec_daddr > info->next_daddr) {
+ if (info->head->fmh_entries >= info->head->fmh_count)
+ return XFS_BTREE_QUERY_RANGE_ABORT;
+
+ fmr.fmr_device = info->dev;
+ fmr.fmr_physical = info->next_daddr;
+ fmr.fmr_owner = info->missing_owner;
+ fmr.fmr_offset = 0;
+ fmr.fmr_length = rec_daddr - info->next_daddr;
+ fmr.fmr_flags = FMR_OF_SPECIAL_OWNER;
+ error = info->formatter(&fmr, info->format_arg);
+ if (error)
+ return error;
+ info->head->fmh_entries++;
+ }
+
+ if (info->last)
+ goto out;
+
+ /* Fill out the extent we found */
+ if (info->head->fmh_entries >= info->head->fmh_count)
+ return XFS_BTREE_QUERY_RANGE_ABORT;
+
+ trace_xfs_fsmap_mapping(mp, info->dev, info->agno, rec);
+
+ fmr.fmr_device = info->dev;
+ fmr.fmr_physical = rec_daddr;
+ error = xfs_fsmap_owner_from_rmap(&fmr, rec);
+ if (error)
+ return error;
+ fmr.fmr_offset = XFS_FSB_TO_BB(mp, rec->rm_offset);
+ fmr.fmr_length = XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (rec->rm_flags & XFS_RMAP_UNWRITTEN)
+ fmr.fmr_flags |= FMR_OF_PREALLOC;
+ if (rec->rm_flags & XFS_RMAP_ATTR_FORK)
+ fmr.fmr_flags |= FMR_OF_ATTR_FORK;
+ if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
+ fmr.fmr_flags |= FMR_OF_EXTENT_MAP;
+ if (fmr.fmr_flags == 0) {
+ error = xfs_getfsmap_is_shared(tp, info, rec, &shared);
+ if (error)
+ return error;
+ if (shared)
+ fmr.fmr_flags |= FMR_OF_SHARED;
+ }
+ error = info->formatter(&fmr, info->format_arg);
+ if (error)
+ return error;
+ info->head->fmh_entries++;
+
+out:
+ rec_daddr += XFS_FSB_TO_BB(mp, rec->rm_blockcount);
+ if (info->next_daddr < rec_daddr)
+ info->next_daddr = rec_daddr;
+ return XFS_BTREE_QUERY_RANGE_CONTINUE;
+}
+
+/* Transform a rmapbt irec into a fsmap */
+STATIC int
+xfs_getfsmap_datadev_helper(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_getfsmap_info *info = priv;
+ xfs_fsblock_t fsb;
+ xfs_daddr_t rec_daddr;
+
+ fsb = XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, rec->rm_startblock);
+ rec_daddr = XFS_FSB_TO_DADDR(mp, fsb);
+
+ return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
+}
+
+/* Transform a rtbitmap "record" into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_helper(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_getfsmap_info *info = priv;
+ struct xfs_rmap_irec irec;
+ xfs_daddr_t rec_daddr;
+
+ rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
+
+ irec.rm_startblock = rec->ar_startblock;
+ irec.rm_blockcount = rec->ar_blockcount;
+ irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
+ irec.rm_offset = 0;
+ irec.rm_flags = 0;
+
+ return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+}
+
+/* Transform a bnobt irec into a fsmap */
+STATIC int
+xfs_getfsmap_datadev_bnobt_helper(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_getfsmap_info *info = priv;
+ struct xfs_rmap_irec irec;
+ xfs_daddr_t rec_daddr;
+
+ rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_private.a.agno,
+ rec->ar_startblock);
+
+ irec.rm_startblock = rec->ar_startblock;
+ irec.rm_blockcount = rec->ar_blockcount;
+ irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
+ irec.rm_offset = 0;
+ irec.rm_flags = 0;
+
+ return xfs_getfsmap_helper(cur->bc_tp, info, &irec, rec_daddr);
+}
+
+/* Set rmap flags based on the getfsmap flags */
+static void
+xfs_getfsmap_set_irec_flags(
+ struct xfs_rmap_irec *irec,
+ struct xfs_fsmap *fmr)
+{
+ irec->rm_flags = 0;
+ if (fmr->fmr_flags & FMR_OF_ATTR_FORK)
+ irec->rm_flags |= XFS_RMAP_ATTR_FORK;
+ if (fmr->fmr_flags & FMR_OF_EXTENT_MAP)
+ irec->rm_flags |= XFS_RMAP_BMBT_BLOCK;
+ if (fmr->fmr_flags & FMR_OF_PREALLOC)
+ irec->rm_flags |= XFS_RMAP_UNWRITTEN;
+}
+
+/* Execute a getfsmap query against the log device. */
+STATIC int
+xfs_getfsmap_logdev(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rmap_irec rmap;
+ int error;
+
+ /* Set up search keys */
+ info->low.rm_startblock = XFS_BB_TO_FSBT(mp, keys[0].fmr_physical);
+ info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->low, keys);
+ if (error)
+ return error;
+ info->low.rm_blockcount = 0;
+ xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+
+ error = xfs_fsmap_owner_to_rmap(&info->high, keys + 1);
+ if (error)
+ return error;
+ info->high.rm_startblock = -1U;
+ info->high.rm_owner = ULLONG_MAX;
+ info->high.rm_offset = ULLONG_MAX;
+ info->high.rm_blockcount = 0;
+ info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
+ info->missing_owner = XFS_FMR_OWN_FREE;
+
+ trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+
+ if (keys[0].fmr_physical > 0)
+ return 0;
+
+ /* Fabricate an rmap entry for the external log device. */
+ rmap.rm_startblock = 0;
+ rmap.rm_blockcount = mp->m_sb.sb_logblocks;
+ rmap.rm_owner = XFS_RMAP_OWN_LOG;
+ rmap.rm_offset = 0;
+ rmap.rm_flags = 0;
+
+ return xfs_getfsmap_helper(tp, info, &rmap, 0);
+}
+
+/* Execute a getfsmap query against the realtime device. */
+STATIC int
+__xfs_getfsmap_rtdev(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ int (*query_fn)(struct xfs_trans *,
+ struct xfs_getfsmap_info *),
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ xfs_fsblock_t start_fsb;
+ xfs_fsblock_t end_fsb;
+ xfs_daddr_t eofs;
+ int error = 0;
+
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ if (keys[1].fmr_physical >= eofs)
+ keys[1].fmr_physical = eofs - 1;
+ start_fsb = XFS_BB_TO_FSBT(mp, keys[0].fmr_physical);
+ end_fsb = XFS_BB_TO_FSB(mp, keys[1].fmr_physical);
+
+ /* Set up search keys */
+ info->low.rm_startblock = start_fsb;
+ error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
+ if (error)
+ return error;
+ info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
+ info->low.rm_blockcount = 0;
+ xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+
+ info->high.rm_startblock = end_fsb;
+ error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
+ if (error)
+ return error;
+ info->high.rm_offset = XFS_BB_TO_FSBT(mp, keys[1].fmr_offset);
+ info->high.rm_blockcount = 0;
+ xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
+
+ trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, info->agno, &info->high);
+
+ return query_fn(tp, info);
+}
+
+/* Actually query the realtime bitmap. */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_query(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_rtalloc_rec alow;
+ struct xfs_rtalloc_rec ahigh;
+ int error;
+
+ xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
+
+ alow.ar_startblock = info->low.rm_startblock;
+ ahigh.ar_startblock = info->high.rm_startblock;
+ error = xfs_rtalloc_query_range(tp, &alow, &ahigh,
+ xfs_getfsmap_rtdev_rtbitmap_helper, info);
+ if (error)
+ goto err;
+
+ /* Report any gaps at the end of the rtbitmap */
+ info->last = true;
+ error = xfs_getfsmap_rtdev_rtbitmap_helper(tp, &ahigh, info);
+ if (error)
+ goto err;
+err:
+ xfs_iunlock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
+ return error;
+}
+
+/* Execute a getfsmap query against the realtime device rtbitmap. */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ info->missing_owner = XFS_FMR_OWN_UNKNOWN;
+ return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
+ info);
+}
+
+/* Execute a getfsmap query against the regular data device. */
+STATIC int
+__xfs_getfsmap_datadev(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info,
+ int (*query_fn)(struct xfs_trans *,
+ struct xfs_getfsmap_info *,
+ struct xfs_btree_cur **,
+ void *),
+ void *priv)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *bt_cur = NULL;
+ xfs_fsblock_t start_fsb;
+ xfs_fsblock_t end_fsb;
+ xfs_agnumber_t start_ag;
+ xfs_agnumber_t end_ag;
+ xfs_daddr_t eofs;
+ int error = 0;
+
+ eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
+ if (keys[0].fmr_physical >= eofs)
+ return 0;
+ if (keys[1].fmr_physical >= eofs)
+ keys[1].fmr_physical = eofs - 1;
+ start_fsb = XFS_DADDR_TO_FSB(mp, keys[0].fmr_physical);
+ end_fsb = XFS_DADDR_TO_FSB(mp, keys[1].fmr_physical);
+
+ /*
+ * Convert the fsmap low/high keys to AG based keys. Initialize
+ * low to the fsmap low key and max out the high key to the end
+ * of the AG.
+ */
+ info->low.rm_startblock = XFS_FSB_TO_AGBNO(mp, start_fsb);
+ info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
+ if (error)
+ return error;
+ info->low.rm_blockcount = 0;
+ xfs_getfsmap_set_irec_flags(&info->low, &keys[0]);
+
+ info->high.rm_startblock = -1U;
+ info->high.rm_owner = ULLONG_MAX;
+ info->high.rm_offset = ULLONG_MAX;
+ info->high.rm_blockcount = 0;
+ info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS;
+
+ start_ag = XFS_FSB_TO_AGNO(mp, start_fsb);
+ end_ag = XFS_FSB_TO_AGNO(mp, end_fsb);
+
+ /* Query each AG */
+ for (info->agno = start_ag; info->agno <= end_ag; info->agno++) {
+ /*
+ * Set the AG high key from the fsmap high key if this
+ * is the last AG that we're querying.
+ */
+ if (info->agno == end_ag) {
+ info->high.rm_startblock = XFS_FSB_TO_AGBNO(mp,
+ end_fsb);
+ info->high.rm_offset = XFS_BB_TO_FSBT(mp,
+ keys[1].fmr_offset);
+ error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]);
+ if (error)
+ goto err;
+ xfs_getfsmap_set_irec_flags(&info->high, &keys[1]);
+ }
+
+ if (bt_cur) {
+ xfs_btree_del_cursor(bt_cur, XFS_BTREE_NOERROR);
+ bt_cur = NULL;
+ xfs_trans_brelse(tp, info->agf_bp);
+ info->agf_bp = NULL;
+ }
+
+ error = xfs_alloc_read_agf(mp, tp, info->agno, 0,
+ &info->agf_bp);
+ if (error)
+ goto err;
+
+ trace_xfs_fsmap_low_key(mp, info->dev, info->agno, &info->low);
+ trace_xfs_fsmap_high_key(mp, info->dev, info->agno,
+ &info->high);
+
+ error = query_fn(tp, info, &bt_cur, priv);
+ if (error)
+ goto err;
+
+ /*
+ * Set the AG low key to the start of the AG prior to
+ * moving on to the next AG.
+ */
+ if (info->agno == start_ag) {
+ info->low.rm_startblock = 0;
+ info->low.rm_owner = 0;
+ info->low.rm_offset = 0;
+ info->low.rm_flags = 0;
+ }
+ }
+
+ /* Report any gap at the end of the AG */
+ info->last = true;
+ error = query_fn(tp, info, &bt_cur, priv);
+ if (error)
+ goto err;
+
+err:
+ if (bt_cur)
+ xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR :
+ XFS_BTREE_NOERROR);
+ if (info->agf_bp) {
+ xfs_trans_brelse(tp, info->agf_bp);
+ info->agf_bp = NULL;
+ }
+
+ return error;
+}
+
+/* Actually query the rmap btree. */
+STATIC int
+xfs_getfsmap_datadev_rmapbt_query(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info,
+ struct xfs_btree_cur **curpp,
+ void *priv)
+{
+ /* Report any gap at the end of the last AG. */
+ if (info->last)
+ return xfs_getfsmap_datadev_helper(*curpp, &info->high, info);
+
+ /* Allocate cursor for this AG and query_range it. */
+ *curpp = xfs_rmapbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
+ info->agno);
+ return xfs_rmap_query_range(*curpp, &info->low, &info->high,
+ xfs_getfsmap_datadev_helper, info);
+}
+
+/* Execute a getfsmap query against the regular data device rmapbt. */
+STATIC int
+xfs_getfsmap_datadev_rmapbt(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ info->missing_owner = XFS_FMR_OWN_FREE;
+ return __xfs_getfsmap_datadev(tp, keys, info,
+ xfs_getfsmap_datadev_rmapbt_query, NULL);
+}
+
+/* Actually query the bno btree. */
+STATIC int
+xfs_getfsmap_datadev_bnobt_query(
+ struct xfs_trans *tp,
+ struct xfs_getfsmap_info *info,
+ struct xfs_btree_cur **curpp,
+ void *priv)
+{
+ struct xfs_alloc_rec_incore *key = priv;
+
+ /* Report any gap at the end of the last AG. */
+ if (info->last)
+ return xfs_getfsmap_datadev_bnobt_helper(*curpp, &key[1], info);
+
+ /* Allocate cursor for this AG and query_range it. */
+ *curpp = xfs_allocbt_init_cursor(tp->t_mountp, tp, info->agf_bp,
+ info->agno, XFS_BTNUM_BNO);
+ key->ar_startblock = info->low.rm_startblock;
+ key[1].ar_startblock = info->high.rm_startblock;
+ return xfs_alloc_query_range(*curpp, key, &key[1],
+ xfs_getfsmap_datadev_bnobt_helper, info);
+}
+
+/* Execute a getfsmap query against the regular data device's bnobt. */
+STATIC int
+xfs_getfsmap_datadev_bnobt(
+ struct xfs_trans *tp,
+ struct xfs_fsmap *keys,
+ struct xfs_getfsmap_info *info)
+{
+ struct xfs_alloc_rec_incore akeys[2];
+
+ info->missing_owner = XFS_FMR_OWN_UNKNOWN;
+ return __xfs_getfsmap_datadev(tp, keys, info,
+ xfs_getfsmap_datadev_bnobt_query, &akeys[0]);
+}
+
+/* Do we recognize the device? */
+STATIC bool
+xfs_getfsmap_is_valid_device(
+ struct xfs_mount *mp,
+ struct xfs_fsmap *fm)
+{
+ if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
+ fm->fmr_device == new_encode_dev(mp->m_ddev_targp->bt_dev))
+ return true;
+ if (mp->m_logdev_targp &&
+ fm->fmr_device == new_encode_dev(mp->m_logdev_targp->bt_dev))
+ return true;
+ if (mp->m_rtdev_targp &&
+ fm->fmr_device == new_encode_dev(mp->m_rtdev_targp->bt_dev))
+ return true;
+ return false;
+}
+
+/* Ensure that the low key is less than the high key. */
+STATIC bool
+xfs_getfsmap_check_keys(
+ struct xfs_fsmap *low_key,
+ struct xfs_fsmap *high_key)
+{
+ if (low_key->fmr_device > high_key->fmr_device)
+ return false;
+ if (low_key->fmr_device < high_key->fmr_device)
+ return true;
+
+ if (low_key->fmr_physical > high_key->fmr_physical)
+ return false;
+ if (low_key->fmr_physical < high_key->fmr_physical)
+ return true;
+
+ if (low_key->fmr_owner > high_key->fmr_owner)
+ return false;
+ if (low_key->fmr_owner < high_key->fmr_owner)
+ return true;
+
+ if (low_key->fmr_offset > high_key->fmr_offset)
+ return false;
+ if (low_key->fmr_offset < high_key->fmr_offset)
+ return true;
+
+ return false;
+}
+
+#define XFS_GETFSMAP_DEVS 3
+/*
+ * Get filesystem's extents as described in head, and format for
+ * output. Calls formatter to fill the user's buffer until all
+ * extents are mapped, until the passed-in head->fmh_count slots have
+ * been filled, or until the formatter short-circuits the loop, if it
+ * is tracking filled-in extents on its own.
+ *
+ * Key to Confusion
+ * ----------------
+ * There are multiple levels of keys and counters at work here:
+ * xfs_fsmap_head.fmh_keys -- low and high fsmap keys passed in;
+ * these reflect fs-wide sector addrs.
+ * dkeys -- fmh_keys used to query each device;
+ * these are fmh_keys but w/ the low key
+ * bumped up by fmr_length.
+ * xfs_getfsmap_info.next_daddr -- next disk addr we expect to see; this
+ * is how we detect gaps in the fsmap
+ records and report them.
+ * xfs_getfsmap_info.low/high -- per-AG low/high keys computed from
+ * dkeys; used to query the metadata.
+ */
+int
+xfs_getfsmap(
+ struct xfs_mount *mp,
+ struct xfs_fsmap_head *head,
+ xfs_fsmap_format_t formatter,
+ void *arg)
+{
+ struct xfs_trans *tp = NULL;
+ struct xfs_fsmap dkeys[2]; /* per-dev keys */
+ struct xfs_getfsmap_dev handlers[XFS_GETFSMAP_DEVS];
+ struct xfs_getfsmap_info info = { NULL };
+ int i;
+ int error = 0;
+
+ if (head->fmh_iflags & ~FMH_IF_VALID)
+ return -EINVAL;
+ if (!xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[0]) ||
+ !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1]))
+ return -EINVAL;
+
+ head->fmh_entries = 0;
+
+ /* Set up our device handlers. */
+ memset(handlers, 0, sizeof(handlers));
+ handlers[0].dev = new_encode_dev(mp->m_ddev_targp->bt_dev);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ handlers[0].fn = xfs_getfsmap_datadev_rmapbt;
+ else
+ handlers[0].fn = xfs_getfsmap_datadev_bnobt;
+ if (mp->m_logdev_targp != mp->m_ddev_targp) {
+ handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
+ handlers[1].fn = xfs_getfsmap_logdev;
+ }
+ if (mp->m_rtdev_targp) {
+ handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
+ handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
+ }
+
+ xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
+ xfs_getfsmap_dev_compare);
+
+ /*
+ * To continue where we left off, we allow userspace to use the
+ * last mapping from a previous call as the low key of the next.
+ * This is identified by a non-zero length in the low key. We
+ * have to increment the low key in this scenario to ensure we
+ * don't return the same mapping again, and instead return the
+ * very next mapping.
+ *
+ * If the low key mapping refers to file data, the same physical
+ * blocks could be mapped to several other files/offsets.
+ * According to rmapbt record ordering, the minimal next
+ * possible record for the block range is the next starting
+ * offset in the same inode. Therefore, bump the file offset to
+ * continue the search appropriately. For all other low key
+ * mapping types (attr blocks, metadata), bump the physical
+ * offset as there can be no other mapping for the same physical
+ * block range.
+ */
+ dkeys[0] = head->fmh_keys[0];
+ if (dkeys[0].fmr_flags & (FMR_OF_SPECIAL_OWNER | FMR_OF_EXTENT_MAP)) {
+ dkeys[0].fmr_physical += dkeys[0].fmr_length;
+ dkeys[0].fmr_owner = 0;
+ if (dkeys[0].fmr_offset)
+ return -EINVAL;
+ } else
+ dkeys[0].fmr_offset += dkeys[0].fmr_length;
+ dkeys[0].fmr_length = 0;
+ memset(&dkeys[1], 0xFF, sizeof(struct xfs_fsmap));
+
+ if (!xfs_getfsmap_check_keys(dkeys, &head->fmh_keys[1]))
+ return -EINVAL;
+
+ info.next_daddr = head->fmh_keys[0].fmr_physical +
+ head->fmh_keys[0].fmr_length;
+ info.formatter = formatter;
+ info.format_arg = arg;
+ info.head = head;
+
+ /* For each device we support... */
+ for (i = 0; i < XFS_GETFSMAP_DEVS; i++) {
+ /* Is this device within the range the user asked for? */
+ if (!handlers[i].fn)
+ continue;
+ if (head->fmh_keys[0].fmr_device > handlers[i].dev)
+ continue;
+ if (head->fmh_keys[1].fmr_device < handlers[i].dev)
+ break;
+
+ /*
+ * If this device number matches the high key, we have
+ * to pass the high key to the handler to limit the
+ * query results. If the device number exceeds the
+ * low key, zero out the low key so that we get
+ * everything from the beginning.
+ */
+ if (handlers[i].dev == head->fmh_keys[1].fmr_device)
+ dkeys[1] = head->fmh_keys[1];
+ if (handlers[i].dev > head->fmh_keys[0].fmr_device)
+ memset(&dkeys[0], 0, sizeof(struct xfs_fsmap));
+
+ error = xfs_trans_alloc_empty(mp, &tp);
+ if (error)
+ break;
+
+ info.dev = handlers[i].dev;
+ info.last = false;
+ info.agno = NULLAGNUMBER;
+ error = handlers[i].fn(tp, dkeys, &info);
+ if (error)
+ break;
+ xfs_trans_cancel(tp);
+ tp = NULL;
+ info.next_daddr = 0;
+ }
+
+ if (tp)
+ xfs_trans_cancel(tp);
+ head->fmh_oflags = FMH_OF_DEV_T;
+ return error;
+}
diff --git a/fs/xfs/xfs_fsmap.h b/fs/xfs/xfs_fsmap.h
new file mode 100644
index 000000000000..0b9bf822595c
--- /dev/null
+++ b/fs/xfs/xfs_fsmap.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_FSMAP_H__
+#define __XFS_FSMAP_H__
+
+struct fsmap;
+
+/* internal fsmap representation */
+struct xfs_fsmap {
+ dev_t fmr_device; /* device id */
+ uint32_t fmr_flags; /* mapping flags */
+ uint64_t fmr_physical; /* device offset of segment */
+ uint64_t fmr_owner; /* owner id */
+ xfs_fileoff_t fmr_offset; /* file offset of segment */
+ xfs_filblks_t fmr_length; /* length of segment, blocks */
+};
+
+struct xfs_fsmap_head {
+ uint32_t fmh_iflags; /* control flags */
+ uint32_t fmh_oflags; /* output flags */
+ unsigned int fmh_count; /* # of entries in array incl. input */
+ unsigned int fmh_entries; /* # of entries filled in (output). */
+
+ struct xfs_fsmap fmh_keys[2]; /* low and high keys */
+};
+
+void xfs_fsmap_from_internal(struct fsmap *dest, struct xfs_fsmap *src);
+void xfs_fsmap_to_internal(struct xfs_fsmap *dest, struct fsmap *src);
+
+/* fsmap to userspace formatter - copy to user & advance pointer */
+typedef int (*xfs_fsmap_format_t)(struct xfs_fsmap *, void *);
+
+int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head,
+ xfs_fsmap_format_t formatter, void *arg);
+
+#endif /* __XFS_FSMAP_H__ */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3531f8f72fa5..f61c84f8e31a 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -262,6 +262,22 @@ xfs_inode_clear_reclaim_tag(
xfs_perag_clear_reclaim_tag(pag);
}
+static void
+xfs_inew_wait(
+ struct xfs_inode *ip)
+{
+ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+ do {
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+ if (!xfs_iflags_test(ip, XFS_INEW))
+ break;
+ schedule();
+ } while (true);
+ finish_wait(wq, &wait.wait);
+}
+
/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
@@ -366,14 +382,17 @@ xfs_iget_cache_hit(
error = xfs_reinit_inode(mp, inode);
if (error) {
+ bool wake;
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
-
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
@@ -623,9 +642,11 @@ out_error_or_again:
STATIC int
xfs_inode_ag_walk_grab(
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ int flags)
{
struct inode *inode = VFS_I(ip);
+ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
ASSERT(rcu_read_lock_held());
@@ -643,7 +664,8 @@ xfs_inode_ag_walk_grab(
goto out_unlock_noent;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
@@ -671,7 +693,8 @@ xfs_inode_ag_walk(
void *args),
int flags,
void *args,
- int tag)
+ int tag,
+ int iter_flags)
{
uint32_t first_index;
int last_error = 0;
@@ -713,7 +736,7 @@ restart:
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
- if (done || xfs_inode_ag_walk_grab(ip))
+ if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
batch[i] = NULL;
/*
@@ -741,6 +764,9 @@ restart:
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
+ if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+ xfs_iflags_test(batch[i], XFS_INEW))
+ xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
IRELE(batch[i]);
if (error == -EAGAIN) {
@@ -820,12 +846,13 @@ xfs_cowblocks_worker(
}
int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
- void *args)
+ void *args,
+ int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
@@ -835,7 +862,8 @@ xfs_inode_ag_iterator(
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+ iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
@@ -847,6 +875,17 @@ xfs_inode_ag_iterator(
}
int
+xfs_inode_ag_iterator(
+ struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags,
+ void *args),
+ int flags,
+ void *args)
+{
+ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
xfs_inode_ag_iterator_tag(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
@@ -863,7 +902,8 @@ xfs_inode_ag_iterator_tag(
ag = 0;
while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+ 0);
xfs_perag_put(pag);
if (error) {
last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 8a7c849b4dea..9183f77958ef 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@ struct xfs_eofblocks {
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
+
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -79,6 +84,9 @@ void xfs_cowblocks_worker(struct work_struct *);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags, void *args),
+ int flags, void *args, int iter_flags);
int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7605d8396596..ec9826c56500 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1906,12 +1906,13 @@ xfs_inactive(
* force is true because we are evicting an inode from the
* cache. Post-eof blocks must be freed, lest we end up with
* broken free space accounting.
+ *
+ * Note: don't bother with iolock here since lockdep complains
+ * about acquiring it in reclaim context. We have the only
+ * reference to the inode at this point anyways.
*/
- if (xfs_can_free_eofblocks(ip, true)) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- }
return;
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 10dcf27b4c85..10e89fcb49d7 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -216,7 +216,8 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define XFS_INEW (1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT 3 /* inode has just been allocated */
+#define XFS_INEW (1 << __XFS_INEW_BIT)
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
#define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */
@@ -464,6 +465,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index d90e7811ccdd..08cb7d1a4a3a 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -731,22 +731,27 @@ xfs_iflush_done(
* holding the lock before removing the inode from the AIL.
*/
if (need_ail) {
- struct xfs_log_item *log_items[need_ail];
- int i = 0;
+ bool mlip_changed = false;
+
+ /* this is an opencoded batch version of xfs_trans_ail_delete */
spin_lock(&ailp->xa_lock);
for (blip = lip; blip; blip = blip->li_bio_list) {
- iip = INODE_ITEM(blip);
- if (iip->ili_logged &&
- blip->li_lsn == iip->ili_flush_lsn) {
- log_items[i++] = blip;
- }
- ASSERT(i <= need_ail);
+ if (INODE_ITEM(blip)->ili_logged &&
+ blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
+ mlip_changed |= xfs_ail_delete_one(ailp, blip);
}
- /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
- xfs_trans_ail_delete_bulk(ailp, log_items, i,
- SHUTDOWN_CORRUPT_INCORE);
- }
+ if (mlip_changed) {
+ if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+ xlog_assign_tail_lsn_locked(ailp->xa_mount);
+ if (list_empty(&ailp->xa_ail))
+ wake_up_all(&ailp->xa_empty);
+ }
+ spin_unlock(&ailp->xa_lock);
+
+ if (mlip_changed)
+ xfs_log_space_wake(ailp->xa_mount);
+ }
/*
* clean up and unlock the flush lock now we are done. We can clear the
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 2fd7fdf5438f..6190697603c9 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -41,6 +41,9 @@
#include "xfs_trans.h"
#include "xfs_pnfs.h"
#include "xfs_acl.h"
+#include "xfs_btree.h"
+#include <linux/fsmap.h>
+#include "xfs_fsmap.h"
#include <linux/capability.h>
#include <linux/cred.h>
@@ -1543,10 +1546,11 @@ xfs_ioc_getbmap(
unsigned int cmd,
void __user *arg)
{
- struct getbmapx bmx;
+ struct getbmapx bmx = { 0 };
int error;
- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+ /* struct getbmap is a strict subset of struct getbmapx. */
+ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
return -EFAULT;
if (bmx.bmv_count < 2)
@@ -1608,6 +1612,84 @@ xfs_ioc_getbmapx(
return 0;
}
+struct getfsmap_info {
+ struct xfs_mount *mp;
+ struct fsmap_head __user *data;
+ unsigned int idx;
+ __u32 last_flags;
+};
+
+STATIC int
+xfs_getfsmap_format(struct xfs_fsmap *xfm, void *priv)
+{
+ struct getfsmap_info *info = priv;
+ struct fsmap fm;
+
+ trace_xfs_getfsmap_mapping(info->mp, xfm);
+
+ info->last_flags = xfm->fmr_flags;
+ xfs_fsmap_from_internal(&fm, xfm);
+ if (copy_to_user(&info->data->fmh_recs[info->idx++], &fm,
+ sizeof(struct fsmap)))
+ return -EFAULT;
+
+ return 0;
+}
+
+STATIC int
+xfs_ioc_getfsmap(
+ struct xfs_inode *ip,
+ struct fsmap_head __user *arg)
+{
+ struct getfsmap_info info = { NULL };
+ struct xfs_fsmap_head xhead = {0};
+ struct fsmap_head head;
+ bool aborted = false;
+ int error;
+
+ if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
+ return -EFAULT;
+ if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
+ memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
+ sizeof(head.fmh_keys[0].fmr_reserved)) ||
+ memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
+ sizeof(head.fmh_keys[1].fmr_reserved)))
+ return -EINVAL;
+
+ xhead.fmh_iflags = head.fmh_iflags;
+ xhead.fmh_count = head.fmh_count;
+ xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
+ xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
+
+ trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
+ trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
+
+ info.mp = ip->i_mount;
+ info.data = arg;
+ error = xfs_getfsmap(ip->i_mount, &xhead, xfs_getfsmap_format, &info);
+ if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
+ error = 0;
+ aborted = true;
+ } else if (error)
+ return error;
+
+ /* If we didn't abort, set the "last" flag in the last fmx */
+ if (!aborted && info.idx) {
+ info.last_flags |= FMR_OF_LAST;
+ if (copy_to_user(&info.data->fmh_recs[info.idx - 1].fmr_flags,
+ &info.last_flags, sizeof(info.last_flags)))
+ return -EFAULT;
+ }
+
+ /* copy back header */
+ head.fmh_entries = xhead.fmh_entries;
+ head.fmh_oflags = xhead.fmh_oflags;
+ if (copy_to_user(arg, &head, sizeof(struct fsmap_head)))
+ return -EFAULT;
+
+ return 0;
+}
+
int
xfs_ioc_swapext(
xfs_swapext_t *sxp)
@@ -1788,6 +1870,9 @@ xfs_file_ioctl(
case XFS_IOC_GETBMAPX:
return xfs_ioc_getbmapx(ip, arg);
+ case FS_IOC_GETFSMAP:
+ return xfs_ioc_getfsmap(ip, arg);
+
case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE: {
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7c49938c5aed..fa0bc4d46065 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -20,6 +20,7 @@
#include <linux/mount.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/fsmap.h>
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
@@ -554,6 +555,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_GOINGDOWN:
case XFS_IOC_ERROR_INJECTION:
case XFS_IOC_ERROR_CLEARALL:
+ case FS_IOC_GETFSMAP:
return xfs_file_ioctl(filp, cmd, p);
#ifndef BROKEN_X86_ALIGNMENT
/* These are handled fine if no alignment issues */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 288ee5b840d7..a63f61c256bd 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -240,7 +240,7 @@ xfs_iomap_write_direct(
*/
if (IS_DAX(VFS_I(ip))) {
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
- if (ISUNWRITTEN(imap)) {
+ if (imap->br_state == XFS_EXT_UNWRITTEN) {
tflags |= XFS_TRANS_RESERVE;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
}
@@ -945,7 +945,7 @@ static inline bool imap_needs_alloc(struct inode *inode,
return !nimaps ||
imap->br_startblock == HOLESTARTBLOCK ||
imap->br_startblock == DELAYSTARTBLOCK ||
- (IS_DAX(inode) && ISUNWRITTEN(imap));
+ (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
}
static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
@@ -976,6 +976,7 @@ xfs_file_iomap_begin(
int nimaps = 1, error = 0;
bool shared = false, trimmed = false;
unsigned lockmode;
+ struct block_device *bdev;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -1063,6 +1064,14 @@ xfs_file_iomap_begin(
}
xfs_bmbt_to_iomap(ip, iomap, &imap);
+
+ /* optionally associate a dax device with the iomap bdev */
+ bdev = iomap->bdev;
+ if (blk_queue_dax(bdev->bd_queue))
+ iomap->dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
+ else
+ iomap->dax_dev = NULL;
+
if (shared)
iomap->flags |= IOMAP_F_SHARED;
return 0;
@@ -1140,6 +1149,7 @@ xfs_file_iomap_end(
unsigned flags,
struct iomap *iomap)
{
+ put_dax(iomap->dax_dev);
if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
length, written, iomap);
@@ -1170,10 +1180,10 @@ xfs_xattr_iomap_begin(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- lockmode = xfs_ilock_data_map_shared(ip);
+ lockmode = xfs_ilock_attr_map_shared(ip);
/* if there are no attribute fork or extents, return ENOENT */
- if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+ if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
error = -ENOENT;
goto out_unlock;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 592fdf7111cb..044fb0e15390 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -212,88 +212,6 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack()
-
-/* Move the kernel do_div definition off to one side */
-
-#if defined __i386__
-/* For ia32 we need to pull some tricks to get past various versions
- * of the compiler which do not like us using do_div in the middle
- * of large functions.
- */
-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
-{
- __u32 mod;
-
- switch (n) {
- case 4:
- mod = *(__u32 *)a % b;
- *(__u32 *)a = *(__u32 *)a / b;
- return mod;
- case 8:
- {
- unsigned long __upper, __low, __high, __mod;
- __u64 c = *(__u64 *)a;
- __upper = __high = c >> 32;
- __low = c;
- if (__high) {
- __upper = __high % (b);
- __high = __high / (b);
- }
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
- asm("":"=A" (c):"a" (__low),"d" (__high));
- *(__u64 *)a = c;
- return __mod;
- }
- }
-
- /* NOTREACHED */
- return 0;
-}
-
-/* Side effect free 64 bit mod operation */
-static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
-{
- switch (n) {
- case 4:
- return *(__u32 *)a % b;
- case 8:
- {
- unsigned long __upper, __low, __high, __mod;
- __u64 c = *(__u64 *)a;
- __upper = __high = c >> 32;
- __low = c;
- if (__high) {
- __upper = __high % (b);
- __high = __high / (b);
- }
- asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
- asm("":"=A" (c):"a" (__low),"d" (__high));
- return __mod;
- }
- }
-
- /* NOTREACHED */
- return 0;
-}
-#else
-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
-{
- __u32 mod;
-
- switch (n) {
- case 4:
- mod = *(__u32 *)a % b;
- *(__u32 *)a = *(__u32 *)a / b;
- return mod;
- case 8:
- mod = do_div(*(__u64 *)a, b);
- return mod;
- }
-
- /* NOTREACHED */
- return 0;
-}
-
/* Side effect free 64 bit mod operation */
static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
{
@@ -310,10 +228,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
/* NOTREACHED */
return 0;
}
-#endif
-#undef do_div
-#define do_div(a, b) xfs_do_div(&(a), (b), sizeof(a))
#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a))
static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b1469f0a91a6..3731f13f63e9 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1293,7 +1293,7 @@ void
xfs_log_work_queue(
struct xfs_mount *mp)
{
- queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
+ queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
msecs_to_jiffies(xfs_syncd_centisecs * 10));
}
@@ -1852,7 +1852,7 @@ xlog_sync(
*/
if (log->l_badcrc_factor &&
(prandom_u32() % log->l_badcrc_factor == 0)) {
- iclog->ic_header.h_crc &= 0xAAAAAAAA;
+ iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
iclog->ic_state |= XLOG_STATE_IOABORT;
xfs_warn(log->l_mp,
"Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 4a98762ec8b4..cd0b077deb35 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3796,7 +3796,7 @@ xlog_recover_bud_pass2(
* This routine is called when an inode create format structure is found in a
* committed transaction in the log. It's purpose is to initialise the inodes
* being allocated on disk. This requires us to get inode cluster buffers that
- * match the range to be intialised, stamped with inode templates and written
+ * match the range to be initialised, stamped with inode templates and written
* by delayed write so that subsequent modifications will hit the cached buffer
* and only need writing out at the end of recovery.
*/
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 688ebff1f663..2eaf81859166 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -73,6 +73,10 @@ xfs_uuid_mount(
uuid_t *uuid = &mp->m_sb.sb_uuid;
int hole, i;
+ /* Publish UUID in struct super_block */
+ BUILD_BUG_ON(sizeof(mp->m_super->s_uuid) != sizeof(uuid_t));
+ memcpy(&mp->m_super->s_uuid, uuid, sizeof(uuid_t));
+
if (mp->m_flags & XFS_MOUNT_NOUUID)
return 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 6db6fd6b82b0..9fa312a41c93 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -183,6 +183,7 @@ typedef struct xfs_mount {
struct workqueue_struct *m_reclaim_workqueue;
struct workqueue_struct *m_log_workqueue;
struct workqueue_struct *m_eofblocks_workqueue;
+ struct workqueue_struct *m_sync_workqueue;
/*
* Generation of the filesysyem layout. This is incremented by each
@@ -312,7 +313,7 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
{
- xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d);
+ xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
do_div(ld, mp->m_sb.sb_agblocks);
return (xfs_agnumber_t) ld;
}
@@ -320,7 +321,7 @@ xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
{
- xfs_daddr_t ld = XFS_BB_TO_FSBT(mp, d);
+ xfs_rfsblock_t ld = XFS_BB_TO_FSBT(mp, d);
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b669b123287b..5fe6e70b88ef 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -851,8 +851,8 @@ xfs_qm_reset_dqcounts(
* started afresh by xfs_qm_quotacheck.
*/
#ifdef DEBUG
- j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
- do_div(j, sizeof(xfs_dqblk_t));
+ j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
+ sizeof(xfs_dqblk_t);
ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
#endif
dqb = bp->b_addr;
@@ -1384,12 +1384,7 @@ xfs_qm_quotacheck(
mp->m_qflags |= flags;
error_return:
- while (!list_empty(&buffer_list)) {
- struct xfs_buf *bp =
- list_first_entry(&buffer_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- }
+ xfs_buf_delwri_cancel(&buffer_list);
if (error) {
xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 475a3882a81f..9cb5c381b01c 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -759,5 +759,6 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+ XFS_AGITER_INEW_WAIT);
}
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 6e4c7446c3d4..96fe209b5eb6 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -221,6 +221,7 @@ void
xfs_cui_release(
struct xfs_cui_log_item *cuip)
{
+ ASSERT(atomic_read(&cuip->cui_refcount) > 0);
if (atomic_dec_and_test(&cuip->cui_refcount)) {
xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_cui_item_free(cuip);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 4a84c5ea266d..ffe6fe7a7eb5 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -206,11 +206,7 @@ xfs_reflink_trim_around_shared(
int error = 0;
/* Holes, unwritten, and delalloc extents cannot be shared */
- if (!xfs_is_reflink_inode(ip) ||
- ISUNWRITTEN(irec) ||
- irec->br_startblock == HOLESTARTBLOCK ||
- irec->br_startblock == DELAYSTARTBLOCK ||
- isnullstartblock(irec->br_startblock)) {
+ if (!xfs_is_reflink_inode(ip) || !xfs_bmap_is_real_extent(irec)) {
*shared = false;
return 0;
}
@@ -709,8 +705,22 @@ xfs_reflink_end_cow(
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
- /* Start a rolling transaction to switch the mappings */
- resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+ /*
+ * Start a rolling transaction to switch the mappings. We're
+ * unlikely ever to have to remap 16T worth of single-block
+ * extents, so just cap the worst case extent count to 2^32-1.
+ * Stick a warning in just in case, and avoid 64-bit division.
+ */
+ BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
+ if (end_fsb - offset_fsb > UINT_MAX) {
+ error = -EFSCORRUPTED;
+ xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
+ ASSERT(0);
+ goto out;
+ }
+ resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
+ (unsigned int)(end_fsb - offset_fsb),
+ XFS_DATA_FORK);
error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
resblks, 0, 0, &tp);
if (error)
@@ -1045,12 +1055,12 @@ xfs_reflink_remap_extent(
xfs_off_t new_isize)
{
struct xfs_mount *mp = ip->i_mount;
+ bool real_extent = xfs_bmap_is_real_extent(irec);
struct xfs_trans *tp;
xfs_fsblock_t firstfsb;
unsigned int resblks;
struct xfs_defer_ops dfops;
struct xfs_bmbt_irec uirec;
- bool real_extent;
xfs_filblks_t rlen;
xfs_filblks_t unmap_len;
xfs_off_t newlen;
@@ -1059,11 +1069,6 @@ xfs_reflink_remap_extent(
unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
trace_xfs_reflink_punch_range(ip, destoff, unmap_len);
- /* Only remap normal extents. */
- real_extent = (irec->br_startblock != HOLESTARTBLOCK &&
- irec->br_startblock != DELAYSTARTBLOCK &&
- !ISUNWRITTEN(irec));
-
/* No reflinking if we're low on space */
if (real_extent) {
error = xfs_reflink_ag_has_free_space(mp,
@@ -1359,9 +1364,7 @@ xfs_reflink_dirty_extents(
goto out;
if (nmaps == 0)
break;
- if (map[0].br_startblock == HOLESTARTBLOCK ||
- map[0].br_startblock == DELAYSTARTBLOCK ||
- ISUNWRITTEN(&map[0]))
+ if (!xfs_bmap_is_real_extent(&map[0]))
goto next;
map[1] = map[0];
@@ -1435,9 +1438,7 @@ xfs_reflink_clear_inode_flag(
return error;
if (nmaps == 0)
break;
- if (map.br_startblock == HOLESTARTBLOCK ||
- map.br_startblock == DELAYSTARTBLOCK ||
- ISUNWRITTEN(&map))
+ if (!xfs_bmap_is_real_extent(&map))
goto next;
agno = XFS_FSB_TO_AGNO(mp, map.br_startblock);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 73c827831551..f3b139c9aa16 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -243,6 +243,7 @@ void
xfs_rui_release(
struct xfs_rui_log_item *ruip)
{
+ ASSERT(atomic_read(&ruip->rui_refcount) > 0);
if (atomic_dec_and_test(&ruip->rui_refcount)) {
xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_rui_item_free(ruip);
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 51dd3c726608..f13133e6f19f 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -23,6 +23,16 @@
struct xfs_mount;
struct xfs_trans;
+struct xfs_rtalloc_rec {
+ xfs_rtblock_t ar_startblock;
+ xfs_rtblock_t ar_blockcount;
+};
+
+typedef int (*xfs_rtalloc_query_range_fn)(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv);
+
#ifdef CONFIG_XFS_RT
/*
* Function prototypes for exported functions.
@@ -118,13 +128,21 @@ int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t start, xfs_extlen_t len,
struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
-
-
+int xfs_rtalloc_query_range(struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *low_rec,
+ struct xfs_rtalloc_rec *high_rec,
+ xfs_rtalloc_query_range_fn fn,
+ void *priv);
+int xfs_rtalloc_query_all(struct xfs_trans *tp,
+ xfs_rtalloc_query_range_fn fn,
+ void *priv);
#else
# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
# define xfs_growfs_rt(mp,in) (ENOSYS)
+# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
+# define xfs_rtalloc_query_all(t,f,p) (ENOSYS)
static inline int /* error */
xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 685c042a120f..47d239dcf3f4 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -877,8 +877,15 @@ xfs_init_mount_workqueues(
if (!mp->m_eofblocks_workqueue)
goto out_destroy_log;
+ mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
+ mp->m_fsname);
+ if (!mp->m_sync_workqueue)
+ goto out_destroy_eofb;
+
return 0;
+out_destroy_eofb:
+ destroy_workqueue(mp->m_eofblocks_workqueue);
out_destroy_log:
destroy_workqueue(mp->m_log_workqueue);
out_destroy_reclaim:
@@ -899,6 +906,7 @@ STATIC void
xfs_destroy_mount_workqueues(
struct xfs_mount *mp)
{
+ destroy_workqueue(mp->m_sync_workqueue);
destroy_workqueue(mp->m_eofblocks_workqueue);
destroy_workqueue(mp->m_log_workqueue);
destroy_workqueue(mp->m_reclaim_workqueue);
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 7f17ae6d709a..5d95fe348294 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -47,6 +47,7 @@
#include "xfs_inode_item.h"
#include "xfs_bmap_btree.h"
#include "xfs_filestream.h"
+#include "xfs_fsmap.h"
/*
* We include this last to have the helpers above available for the trace
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 383ac227ce2c..7c5a16528d8b 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -40,6 +40,8 @@ struct xfs_inode_log_format;
struct xfs_bmbt_irec;
struct xfs_btree_cur;
struct xfs_refcount_irec;
+struct xfs_fsmap;
+struct xfs_rmap_irec;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -2190,7 +2192,7 @@ DECLARE_EVENT_CLASS(xfs_discard_class,
__entry->agbno = agbno;
__entry->len = len;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u\n",
+ TP_printk("dev %d:%d agno %u agbno %u len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
@@ -2253,8 +2255,8 @@ DECLARE_EVENT_CLASS(xfs_defer_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(void *, dop)
- __field(bool, committed)
- __field(bool, low)
+ __field(char, committed)
+ __field(char, low)
),
TP_fast_assign(
__entry->dev = mp ? mp->m_super->s_dev : 0;
@@ -2262,7 +2264,7 @@ DECLARE_EVENT_CLASS(xfs_defer_class,
__entry->committed = dop->dop_committed;
__entry->low = dop->dop_low;
),
- TP_printk("dev %d:%d ops %p committed %d low %d\n",
+ TP_printk("dev %d:%d ops %p committed %d low %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->dop,
__entry->committed,
@@ -2279,8 +2281,8 @@ DECLARE_EVENT_CLASS(xfs_defer_error_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(void *, dop)
- __field(bool, committed)
- __field(bool, low)
+ __field(char, committed)
+ __field(char, low)
__field(int, error)
),
TP_fast_assign(
@@ -2290,7 +2292,7 @@ DECLARE_EVENT_CLASS(xfs_defer_error_class,
__entry->low = dop->dop_low;
__entry->error = error;
),
- TP_printk("dev %d:%d ops %p committed %d low %d err %d\n",
+ TP_printk("dev %d:%d ops %p committed %d low %d err %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->dop,
__entry->committed,
@@ -2309,7 +2311,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
__field(dev_t, dev)
__field(int, type)
__field(void *, intent)
- __field(bool, committed)
+ __field(char, committed)
__field(int, nr)
),
TP_fast_assign(
@@ -2319,7 +2321,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
__entry->committed = dfp->dfp_done != NULL;
__entry->nr = dfp->dfp_count;
),
- TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
+ TP_printk("dev %d:%d optype %d intent %p committed %d nr %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->intent,
@@ -2614,7 +2616,8 @@ DECLARE_EVENT_CLASS(xfs_ag_resv_class,
__entry->asked = r ? r->ar_asked : 0;
__entry->len = len;
),
- TP_printk("dev %d:%d agno %u resv %d freeblks %u flcount %u resv %u ask %u len %u\n",
+ TP_printk("dev %d:%d agno %u resv %d freeblks %u flcount %u "
+ "resv %u ask %u len %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->resv,
@@ -2667,7 +2670,7 @@ DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
__entry->agbno = agbno;
__entry->dir = dir;
),
- TP_printk("dev %d:%d agno %u agbno %u cmp %s(%d)\n",
+ TP_printk("dev %d:%d agno %u agbno %u cmp %s(%d)",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
@@ -2700,7 +2703,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class,
__entry->blockcount = irec->rc_blockcount;
__entry->refcount = irec->rc_refcount;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u\n",
+ TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->startblock,
@@ -2735,7 +2738,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class,
__entry->refcount = irec->rc_refcount;
__entry->agbno = agbno;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u @ agbno %u\n",
+ TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u @ agbno %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->startblock,
@@ -2776,7 +2779,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class,
__entry->i2_refcount = i2->rc_refcount;
),
TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
- "agbno %u len %u refcount %u\n",
+ "agbno %u len %u refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->i1_startblock,
@@ -2822,7 +2825,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class,
__entry->agbno = agbno;
),
TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
- "agbno %u len %u refcount %u @ agbno %u\n",
+ "agbno %u len %u refcount %u @ agbno %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->i1_startblock,
@@ -2875,7 +2878,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class,
),
TP_printk("dev %d:%d agno %u agbno %u len %u refcount %u -- "
"agbno %u len %u refcount %u -- "
- "agbno %u len %u refcount %u\n",
+ "agbno %u len %u refcount %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->i1_startblock,
@@ -3001,31 +3004,6 @@ DEFINE_EVENT(xfs_inode_error_class, name, \
unsigned long caller_ip), \
TP_ARGS(ip, error, caller_ip))
-/* reflink allocator */
-TRACE_EVENT(xfs_bmap_remap_alloc,
- TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t fsbno,
- xfs_extlen_t len),
- TP_ARGS(ip, fsbno, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(xfs_fsblock_t, fsbno)
- __field(xfs_extlen_t, len)
- ),
- TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->fsbno = fsbno;
- __entry->len = len;
- ),
- TP_printk("dev %d:%d ino 0x%llx fsbno 0x%llx len %x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->fsbno,
- __entry->len)
-);
-DEFINE_INODE_ERROR_EVENT(xfs_bmap_remap_alloc_error);
-
/* reflink tracepoint classes */
/* two-file io tracepoint class */
@@ -3227,7 +3205,7 @@ TRACE_EVENT(xfs_ioctl_clone,
),
TP_printk("dev %d:%d "
"ino 0x%lx isize 0x%llx -> "
- "ino 0x%lx isize 0x%llx\n",
+ "ino 0x%lx isize 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->src_ino,
__entry->src_isize,
@@ -3267,6 +3245,88 @@ DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap);
DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece);
DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error);
+/* fsmap traces */
+DECLARE_EVENT_CLASS(xfs_fsmap_class,
+ TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno,
+ struct xfs_rmap_irec *rmap),
+ TP_ARGS(mp, keydev, agno, rmap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_fsblock_t, bno)
+ __field(xfs_filblks_t, len)
+ __field(__uint64_t, owner)
+ __field(__uint64_t, offset)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->keydev = new_decode_dev(keydev);
+ __entry->agno = agno;
+ __entry->bno = rmap->rm_startblock;
+ __entry->len = rmap->rm_blockcount;
+ __entry->owner = rmap->rm_owner;
+ __entry->offset = rmap->rm_offset;
+ __entry->flags = rmap->rm_flags;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d agno %u bno %llu len %llu owner %lld offset %llu flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->agno,
+ __entry->bno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+)
+#define DEFINE_FSMAP_EVENT(name) \
+DEFINE_EVENT(xfs_fsmap_class, name, \
+ TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, \
+ struct xfs_rmap_irec *rmap), \
+ TP_ARGS(mp, keydev, agno, rmap))
+DEFINE_FSMAP_EVENT(xfs_fsmap_low_key);
+DEFINE_FSMAP_EVENT(xfs_fsmap_high_key);
+DEFINE_FSMAP_EVENT(xfs_fsmap_mapping);
+
+DECLARE_EVENT_CLASS(xfs_getfsmap_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap),
+ TP_ARGS(mp, fsmap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(xfs_daddr_t, block)
+ __field(xfs_daddr_t, len)
+ __field(__uint64_t, owner)
+ __field(__uint64_t, offset)
+ __field(__uint64_t, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->keydev = new_decode_dev(fsmap->fmr_device);
+ __entry->block = fsmap->fmr_physical;
+ __entry->len = fsmap->fmr_length;
+ __entry->owner = fsmap->fmr_owner;
+ __entry->offset = fsmap->fmr_offset;
+ __entry->flags = fsmap->fmr_flags;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d block %llu len %llu owner %lld offset %llu flags 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->block,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+)
+#define DEFINE_GETFSMAP_EVENT(name) \
+DEFINE_EVENT(xfs_getfsmap_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap), \
+ TP_ARGS(mp, fsmap))
+DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key);
+DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key);
+DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f5969c8274fc..2011620008de 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -263,6 +263,28 @@ xfs_trans_alloc(
}
/*
+ * Create an empty transaction with no reservation. This is a defensive
+ * mechanism for routines that query metadata without actually modifying
+ * them -- if the metadata being queried is somehow cross-linked (think a
+ * btree block pointer that points higher in the tree), we risk deadlock.
+ * However, blocks grabbed as part of a transaction can be re-grabbed.
+ * The verifiers will notice the corrupt block and the operation will fail
+ * back to userspace without deadlocking.
+ *
+ * Note the zero-length reservation; this transaction MUST be cancelled
+ * without any dirty data.
+ */
+int
+xfs_trans_alloc_empty(
+ struct xfs_mount *mp,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans_res resv = {0};
+
+ return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
+}
+
+/*
* Record the indicated change to the given field for application
* to the file system's superblock when the transaction commits.
* For now, just store the change in the transaction structure.
@@ -1012,17 +1034,14 @@ xfs_trans_cancel(
* chunk we've been working on and get a new transaction to continue.
*/
int
-__xfs_trans_roll(
+xfs_trans_roll(
struct xfs_trans **tpp,
- struct xfs_inode *dp,
- int *committed)
+ struct xfs_inode *dp)
{
struct xfs_trans *trans;
struct xfs_trans_res tres;
int error;
- *committed = 0;
-
/*
* Ensure that the inode is always logged.
*/
@@ -1048,7 +1067,6 @@ __xfs_trans_roll(
if (error)
return error;
- *committed = 1;
trans = *tpp;
/*
@@ -1071,12 +1089,3 @@ __xfs_trans_roll(
xfs_trans_ijoin(trans, dp, 0);
return 0;
}
-
-int
-xfs_trans_roll(
- struct xfs_trans **tpp,
- struct xfs_inode *dp)
-{
- int committed;
- return __xfs_trans_roll(tpp, dp, &committed);
-}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 1646f659b60f..a07acbf0bd8a 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -158,6 +158,8 @@ typedef struct xfs_trans {
int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
uint blocks, uint rtextents, uint flags,
struct xfs_trans **tpp);
+int xfs_trans_alloc_empty(struct xfs_mount *mp,
+ struct xfs_trans **tpp);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
@@ -226,7 +228,6 @@ int xfs_trans_free_extent(struct xfs_trans *,
struct xfs_efd_log_item *, xfs_fsblock_t,
xfs_extlen_t, struct xfs_owner_info *);
int xfs_trans_commit(struct xfs_trans *);
-int __xfs_trans_roll(struct xfs_trans **, struct xfs_inode *, int *);
int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
void xfs_trans_cancel(xfs_trans_t *);
int xfs_trans_ail_init(struct xfs_mount *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index d6c9c3e9e02b..9056c0f34a3c 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -684,8 +684,23 @@ xfs_trans_ail_update_bulk(
}
}
-/*
- * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
+bool
+xfs_ail_delete_one(
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_log_item *mlip = xfs_ail_min(ailp);
+
+ trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
+ xfs_ail_delete(ailp, lip);
+ lip->li_flags &= ~XFS_LI_IN_AIL;
+ lip->li_lsn = 0;
+
+ return mlip == lip;
+}
+
+/**
+ * Remove a log items from the AIL
*
* @xfs_trans_ail_delete_bulk takes an array of log items that all need to
* removed from the AIL. The caller is already holding the AIL lock, and done
@@ -706,52 +721,36 @@ xfs_trans_ail_update_bulk(
* before returning.
*/
void
-xfs_trans_ail_delete_bulk(
+xfs_trans_ail_delete(
struct xfs_ail *ailp,
- struct xfs_log_item **log_items,
- int nr_items,
+ struct xfs_log_item *lip,
int shutdown_type) __releases(ailp->xa_lock)
{
- xfs_log_item_t *mlip;
- int mlip_changed = 0;
- int i;
+ struct xfs_mount *mp = ailp->xa_mount;
+ bool mlip_changed;
- mlip = xfs_ail_min(ailp);
-
- for (i = 0; i < nr_items; i++) {
- struct xfs_log_item *lip = log_items[i];
- if (!(lip->li_flags & XFS_LI_IN_AIL)) {
- struct xfs_mount *mp = ailp->xa_mount;
-
- spin_unlock(&ailp->xa_lock);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
- "%s: attempting to delete a log item that is not in the AIL",
- __func__);
- xfs_force_shutdown(mp, shutdown_type);
- }
- return;
+ if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+ spin_unlock(&ailp->xa_lock);
+ if (!XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
+ "%s: attempting to delete a log item that is not in the AIL",
+ __func__);
+ xfs_force_shutdown(mp, shutdown_type);
}
-
- trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
- xfs_ail_delete(ailp, lip);
- lip->li_flags &= ~XFS_LI_IN_AIL;
- lip->li_lsn = 0;
- if (mlip == lip)
- mlip_changed = 1;
+ return;
}
+ mlip_changed = xfs_ail_delete_one(ailp, lip);
if (mlip_changed) {
- if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
- xlog_assign_tail_lsn_locked(ailp->xa_mount);
+ if (!XFS_FORCED_SHUTDOWN(mp))
+ xlog_assign_tail_lsn_locked(mp);
if (list_empty(&ailp->xa_ail))
wake_up_all(&ailp->xa_empty);
- spin_unlock(&ailp->xa_lock);
+ }
+ spin_unlock(&ailp->xa_lock);
+ if (mlip_changed)
xfs_log_space_wake(ailp->xa_mount);
- } else {
- spin_unlock(&ailp->xa_lock);
- }
}
int
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 49931b72da8a..d91706c56c63 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -106,18 +106,9 @@ xfs_trans_ail_update(
xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
}
-void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
- struct xfs_log_item **log_items, int nr_items,
- int shutdown_type)
- __releases(ailp->xa_lock);
-static inline void
-xfs_trans_ail_delete(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip,
- int shutdown_type) __releases(ailp->xa_lock)
-{
- xfs_trans_ail_delete_bulk(ailp, &lip, 1, shutdown_type);
-}
+bool xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
+void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip,
+ int shutdown_type) __releases(ailp->xa_lock);
static inline void
xfs_trans_ail_remove(
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 07740072da55..6db3b4668b1a 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -78,6 +78,7 @@
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */
+#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */
/*
* Should the subsystem abort the loading of an ACPI table if the
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 2fc678e08d8d..197f3fffc9a7 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -577,7 +577,7 @@ struct acpi_pci_root {
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
+int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -588,6 +588,15 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
+#ifdef CONFIG_X86
+bool acpi_device_always_present(struct acpi_device *adev);
+#else
+static inline bool acpi_device_always_present(struct acpi_device *adev)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_PM
acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
void (*work_func)(struct work_struct *work));
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 3795386ea706..15c86ce4df53 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170119
+#define ACPI_CA_VERSION 0x20170303
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 0ff3c64ce924..faa9f2c0d5de 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -87,6 +87,7 @@
#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
#ifdef ACPI_UNDEFINED_TABLES
/*
@@ -783,6 +784,15 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+/* Global interrupt format */
+
+struct acpi_iort_smmu_gsi {
+ u32 nsg_irpt;
+ u32 nsg_irpt_flags;
+ u32 nsg_cfg_irpt;
+ u32 nsg_cfg_irpt_flags;
+};
+
struct acpi_iort_smmu_v3 {
u64 base_address; /* SMMUv3 base address */
u32 flags;
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
new file mode 100644
index 000000000000..83e81f8996b2
--- /dev/null
+++ b/include/asm-generic/set_memory.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_SET_MEMORY_H
+#define __ASM_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
+#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3558f4eb1a86..314a0b9219c6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -566,7 +566,6 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(clksrc) \
- ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 70d4e221a3ad..d0f6cf2e5324 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -37,8 +37,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
if (size * nmemb <= PAGE_SIZE)
return kcalloc(nmemb, size, GFP_KERNEL);
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ return vzalloc(size * nmemb);
}
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
@@ -50,8 +49,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
if (size * nmemb <= PAGE_SIZE)
return kmalloc(nmemb * size, GFP_KERNEL);
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return vmalloc(size * nmemb);
}
static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
@@ -69,8 +67,7 @@ static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
return ptr;
}
- return __vmalloc(size * nmemb,
- gfp | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size * nmemb, gfp, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr)
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 63f4c2c44a1f..3190e30b9398 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -10,6 +10,7 @@
#define CLKID_FCLK_DIV2 4
#define CLKID_FCLK_DIV3 5
#define CLKID_FCLK_DIV4 6
+#define CLKID_GP0_PLL 9
#define CLKID_CLK81 12
#define CLKID_MPLL2 15
#define CLKID_I2C 22
@@ -17,6 +18,10 @@
#define CLKID_RNG0 25
#define CLKID_SPI 34
#define CLKID_ETH 36
+#define CLKID_AIU_GLUE 38
+#define CLKID_I2S_OUT 40
+#define CLKID_MIXER_IFACE 44
+#define CLKID_AIU 47
#define CLKID_USB0 50
#define CLKID_USB1 51
#define CLKID_USB 55
@@ -25,11 +30,17 @@
#define CLKID_USB0_DDR_BRIDGE 65
#define CLKID_SANA 69
#define CLKID_GCLK_VENCI_INT0 77
+#define CLKID_AOCLK_GATE 80
#define CLKID_AO_I2C 93
#define CLKID_SD_EMMC_A 94
#define CLKID_SD_EMMC_B 95
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0 102
+#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1 105
+#define CLKID_MALI 106
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index ce09915c298f..bc256d31099a 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -29,6 +29,9 @@
#define R7S72100_CLK_OSTM0 1
#define R7S72100_CLK_OSTM1 0
+/* MSTP6 */
+#define R7S72100_CLK_RTC 0
+
/* MSTP7 */
#define R7S72100_CLK_ETHER 4
@@ -49,7 +52,9 @@
#define R7S72100_CLK_SPI4 3
/* MSTP12 */
-#define R7S72100_CLK_SDHI0 3
-#define R7S72100_CLK_SDHI1 2
+#define R7S72100_CLK_SDHI00 3
+#define R7S72100_CLK_SDHI01 2
+#define R7S72100_CLK_SDHI10 1
+#define R7S72100_CLK_SDHI11 0
#endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
index dd11ecdf837e..4b3668157257 100644
--- a/include/dt-bindings/clock/r8a73a4-clock.h
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -54,6 +54,7 @@
#define R8A73A4_CLK_IIC3 11
#define R8A73A4_CLK_IIC4 10
#define R8A73A4_CLK_IIC5 9
+#define R8A73A4_CLK_INTC_SYS 8
#define R8A73A4_CLK_IRQC 7
/* MSTP5 */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index fa5e8da809f2..20641fa68e73 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -82,6 +82,7 @@
/* MSTP4 */
#define R8A7790_CLK_IRQC 7
+#define R8A7790_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7790_CLK_AUDIO_DMAC1 1
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index ffa11379b3f0..adc50dc31ab3 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -72,6 +72,7 @@
/* MSTP4 */
#define R8A7791_CLK_IRQC 7
+#define R8A7791_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7791_CLK_AUDIO_DMAC1 1
diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h
index 9a8b392ceb00..5be90bc23bd7 100644
--- a/include/dt-bindings/clock/r8a7792-clock.h
+++ b/include/dt-bindings/clock/r8a7792-clock.h
@@ -17,7 +17,6 @@
#define R8A7792_CLK_PLL3 3
#define R8A7792_CLK_LB 4
#define R8A7792_CLK_QSPI 5
-#define R8A7792_CLK_Z 6
/* MSTP0 */
#define R8A7792_CLK_MSIOF0 0
@@ -45,6 +44,7 @@
/* MSTP4 */
#define R8A7792_CLK_IRQC 7
+#define R8A7792_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7792_CLK_AUDIO_DMAC0 2
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
index efcbc594fe82..7318d45d4e7e 100644
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -77,10 +77,11 @@
/* MSTP4 */
#define R8A7793_CLK_IRQC 7
+#define R8A7793_CLK_INTC_SYS 8
/* MSTP5 */
-#define R8A7793_CLK_AUDIO_DMAC1 1
-#define R8A7793_CLK_AUDIO_DMAC0 2
+#define R8A7793_CLK_AUDIO_DMAC1 1
+#define R8A7793_CLK_AUDIO_DMAC0 2
#define R8A7793_CLK_ADSP_MOD 6
#define R8A7793_CLK_THERMAL 22
#define R8A7793_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 88e64846cf37..93e99c3ffc8d 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -64,6 +64,7 @@
/* MSTP4 */
#define R8A7794_CLK_IRQC 7
+#define R8A7794_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7794_CLK_AUDIO_DMAC0 2
@@ -81,6 +82,7 @@
#define R8A7794_CLK_SCIF2 19
#define R8A7794_CLK_SCIF1 20
#define R8A7794_CLK_SCIF0 21
+#define R8A7794_CLK_DU1 23
#define R8A7794_CLK_DU0 24
/* MSTP8 */
diff --git a/include/dt-bindings/genpd/k2g.h b/include/dt-bindings/genpd/k2g.h
new file mode 100644
index 000000000000..1f31f17e19eb
--- /dev/null
+++ b/include/dt-bindings/genpd/k2g.h
@@ -0,0 +1,90 @@
+/*
+ * TI K2G SoC Device definitions
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_GENPD_K2G_H
+#define _DT_BINDINGS_GENPD_K2G_H
+
+/* Documented in http://processors.wiki.ti.com/index.php/TISCI */
+
+#define K2G_DEV_PMMC0 0x0000
+#define K2G_DEV_MLB0 0x0001
+#define K2G_DEV_DSS0 0x0002
+#define K2G_DEV_MCBSP0 0x0003
+#define K2G_DEV_MCASP0 0x0004
+#define K2G_DEV_MCASP1 0x0005
+#define K2G_DEV_MCASP2 0x0006
+#define K2G_DEV_DCAN0 0x0008
+#define K2G_DEV_DCAN1 0x0009
+#define K2G_DEV_EMIF0 0x000a
+#define K2G_DEV_MMCHS0 0x000b
+#define K2G_DEV_MMCHS1 0x000c
+#define K2G_DEV_GPMC0 0x000d
+#define K2G_DEV_ELM0 0x000e
+#define K2G_DEV_SPI0 0x0010
+#define K2G_DEV_SPI1 0x0011
+#define K2G_DEV_SPI2 0x0012
+#define K2G_DEV_SPI3 0x0013
+#define K2G_DEV_ICSS0 0x0014
+#define K2G_DEV_ICSS1 0x0015
+#define K2G_DEV_USB0 0x0016
+#define K2G_DEV_USB1 0x0017
+#define K2G_DEV_NSS0 0x0018
+#define K2G_DEV_PCIE0 0x0019
+#define K2G_DEV_GPIO0 0x001b
+#define K2G_DEV_GPIO1 0x001c
+#define K2G_DEV_TIMER64_0 0x001d
+#define K2G_DEV_TIMER64_1 0x001e
+#define K2G_DEV_TIMER64_2 0x001f
+#define K2G_DEV_TIMER64_3 0x0020
+#define K2G_DEV_TIMER64_4 0x0021
+#define K2G_DEV_TIMER64_5 0x0022
+#define K2G_DEV_TIMER64_6 0x0023
+#define K2G_DEV_MSGMGR0 0x0025
+#define K2G_DEV_BOOTCFG0 0x0026
+#define K2G_DEV_ARM_BOOTROM0 0x0027
+#define K2G_DEV_DSP_BOOTROM0 0x0029
+#define K2G_DEV_DEBUGSS0 0x002b
+#define K2G_DEV_UART0 0x002c
+#define K2G_DEV_UART1 0x002d
+#define K2G_DEV_UART2 0x002e
+#define K2G_DEV_EHRPWM0 0x002f
+#define K2G_DEV_EHRPWM1 0x0030
+#define K2G_DEV_EHRPWM2 0x0031
+#define K2G_DEV_EHRPWM3 0x0032
+#define K2G_DEV_EHRPWM4 0x0033
+#define K2G_DEV_EHRPWM5 0x0034
+#define K2G_DEV_EQEP0 0x0035
+#define K2G_DEV_EQEP1 0x0036
+#define K2G_DEV_EQEP2 0x0037
+#define K2G_DEV_ECAP0 0x0038
+#define K2G_DEV_ECAP1 0x0039
+#define K2G_DEV_I2C0 0x003a
+#define K2G_DEV_I2C1 0x003b
+#define K2G_DEV_I2C2 0x003c
+#define K2G_DEV_EDMA0 0x003f
+#define K2G_DEV_SEMAPHORE0 0x0040
+#define K2G_DEV_INTC0 0x0041
+#define K2G_DEV_GIC0 0x0042
+#define K2G_DEV_QSPI0 0x0043
+#define K2G_DEV_ARM_64B_COUNTER0 0x0044
+#define K2G_DEV_TETRIS0 0x0045
+#define K2G_DEV_CGEM0 0x0046
+#define K2G_DEV_MSMC0 0x0047
+#define K2G_DEV_CBASS0 0x0049
+#define K2G_DEV_BOARD0 0x004c
+#define K2G_DEV_EDMA1 0x004f
+
+#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c673d2c87c60..b4f54da694eb 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -17,11 +17,15 @@
#define GPIO_PUSH_PULL 0
#define GPIO_SINGLE_ENDED 2
+/* Bit 2 express Open drain or open source */
+#define GPIO_LINE_OPEN_SOURCE 0
+#define GPIO_LINE_OPEN_DRAIN 4
+
/*
- * Open Drain/Collector is the combination of single-ended active low,
- * Open Source/Emitter is the combination of single-ended active high.
+ * Open Drain/Collector is the combination of single-ended open drain interface.
+ * Open Source/Emitter is the combination of single-ended open source interface.
*/
-#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW)
-#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH)
+#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
+#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
#endif
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
new file mode 100644
index 000000000000..e36cc69959c7
--- /dev/null
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -0,0 +1,112 @@
+/*
+ * This header provides constants for the STM32F7 RCC IP
+ */
+
+#ifndef _DT_BINDINGS_MFD_STM32F7_RCC_H
+#define _DT_BINDINGS_MFD_STM32F7_RCC_H
+
+/* AHB1 */
+#define STM32F7_RCC_AHB1_GPIOA 0
+#define STM32F7_RCC_AHB1_GPIOB 1
+#define STM32F7_RCC_AHB1_GPIOC 2
+#define STM32F7_RCC_AHB1_GPIOD 3
+#define STM32F7_RCC_AHB1_GPIOE 4
+#define STM32F7_RCC_AHB1_GPIOF 5
+#define STM32F7_RCC_AHB1_GPIOG 6
+#define STM32F7_RCC_AHB1_GPIOH 7
+#define STM32F7_RCC_AHB1_GPIOI 8
+#define STM32F7_RCC_AHB1_GPIOJ 9
+#define STM32F7_RCC_AHB1_GPIOK 10
+#define STM32F7_RCC_AHB1_CRC 12
+#define STM32F7_RCC_AHB1_BKPSRAM 18
+#define STM32F7_RCC_AHB1_DTCMRAM 20
+#define STM32F7_RCC_AHB1_DMA1 21
+#define STM32F7_RCC_AHB1_DMA2 22
+#define STM32F7_RCC_AHB1_DMA2D 23
+#define STM32F7_RCC_AHB1_ETHMAC 25
+#define STM32F7_RCC_AHB1_ETHMACTX 26
+#define STM32F7_RCC_AHB1_ETHMACRX 27
+#define STM32FF_RCC_AHB1_ETHMACPTP 28
+#define STM32F7_RCC_AHB1_OTGHS 29
+#define STM32F7_RCC_AHB1_OTGHSULPI 30
+
+#define STM32F7_AHB1_RESET(bit) (STM32F7_RCC_AHB1_##bit + (0x10 * 8))
+#define STM32F7_AHB1_CLOCK(bit) (STM32F7_RCC_AHB1_##bit)
+
+
+/* AHB2 */
+#define STM32F7_RCC_AHB2_DCMI 0
+#define STM32F7_RCC_AHB2_CRYP 4
+#define STM32F7_RCC_AHB2_HASH 5
+#define STM32F7_RCC_AHB2_RNG 6
+#define STM32F7_RCC_AHB2_OTGFS 7
+
+#define STM32F7_AHB2_RESET(bit) (STM32F7_RCC_AHB2_##bit + (0x14 * 8))
+#define STM32F7_AHB2_CLOCK(bit) (STM32F7_RCC_AHB2_##bit + 0x20)
+
+/* AHB3 */
+#define STM32F7_RCC_AHB3_FMC 0
+#define STM32F7_RCC_AHB3_QSPI 1
+
+#define STM32F7_AHB3_RESET(bit) (STM32F7_RCC_AHB3_##bit + (0x18 * 8))
+#define STM32F7_AHB3_CLOCK(bit) (STM32F7_RCC_AHB3_##bit + 0x40)
+
+/* APB1 */
+#define STM32F7_RCC_APB1_TIM2 0
+#define STM32F7_RCC_APB1_TIM3 1
+#define STM32F7_RCC_APB1_TIM4 2
+#define STM32F7_RCC_APB1_TIM5 3
+#define STM32F7_RCC_APB1_TIM6 4
+#define STM32F7_RCC_APB1_TIM7 5
+#define STM32F7_RCC_APB1_TIM12 6
+#define STM32F7_RCC_APB1_TIM13 7
+#define STM32F7_RCC_APB1_TIM14 8
+#define STM32F7_RCC_APB1_LPTIM1 9
+#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_SPI2 14
+#define STM32F7_RCC_APB1_SPI3 15
+#define STM32F7_RCC_APB1_SPDIFRX 16
+#define STM32F7_RCC_APB1_UART2 17
+#define STM32F7_RCC_APB1_UART3 18
+#define STM32F7_RCC_APB1_UART4 19
+#define STM32F7_RCC_APB1_UART5 20
+#define STM32F7_RCC_APB1_I2C1 21
+#define STM32F7_RCC_APB1_I2C2 22
+#define STM32F7_RCC_APB1_I2C3 23
+#define STM32F7_RCC_APB1_I2C4 24
+#define STM32F7_RCC_APB1_CAN1 25
+#define STM32F7_RCC_APB1_CAN2 26
+#define STM32F7_RCC_APB1_CEC 27
+#define STM32F7_RCC_APB1_PWR 28
+#define STM32F7_RCC_APB1_DAC 29
+#define STM32F7_RCC_APB1_UART7 30
+#define STM32F7_RCC_APB1_UART8 31
+
+#define STM32F7_APB1_RESET(bit) (STM32F7_RCC_APB1_##bit + (0x20 * 8))
+#define STM32F7_APB1_CLOCK(bit) (STM32F7_RCC_APB1_##bit + 0x80)
+
+/* APB2 */
+#define STM32F7_RCC_APB2_TIM1 0
+#define STM32F7_RCC_APB2_TIM8 1
+#define STM32F7_RCC_APB2_USART1 4
+#define STM32F7_RCC_APB2_USART6 5
+#define STM32F7_RCC_APB2_ADC1 8
+#define STM32F7_RCC_APB2_ADC2 9
+#define STM32F7_RCC_APB2_ADC3 10
+#define STM32F7_RCC_APB2_SDMMC1 11
+#define STM32F7_RCC_APB2_SPI1 12
+#define STM32F7_RCC_APB2_SPI4 13
+#define STM32F7_RCC_APB2_SYSCFG 14
+#define STM32F7_RCC_APB2_TIM9 16
+#define STM32F7_RCC_APB2_TIM10 17
+#define STM32F7_RCC_APB2_TIM11 18
+#define STM32F7_RCC_APB2_SPI5 20
+#define STM32F7_RCC_APB2_SPI6 21
+#define STM32F7_RCC_APB2_SAI1 22
+#define STM32F7_RCC_APB2_SAI2 23
+#define STM32F7_RCC_APB2_LTDC 26
+
+#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8))
+#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0)
+
+#endif /* _DT_BINDINGS_MFD_STM32F7_RCC_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 38f1ea879ea1..0359bfdc9119 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -56,4 +56,19 @@
#define DRIVE4_08MA (4 << 4)
#define DRIVE4_10MA (6 << 4)
+/* drive strength definition for hi3660 */
+#define DRIVE6_MASK (15 << 4)
+#define DRIVE6_04MA (0 << 4)
+#define DRIVE6_12MA (4 << 4)
+#define DRIVE6_19MA (8 << 4)
+#define DRIVE6_27MA (10 << 4)
+#define DRIVE6_32MA (15 << 4)
+#define DRIVE7_02MA (0 << 4)
+#define DRIVE7_04MA (1 << 4)
+#define DRIVE7_06MA (2 << 4)
+#define DRIVE7_08MA (3 << 4)
+#define DRIVE7_10MA (4 << 4)
+#define DRIVE7_12MA (5 << 4)
+#define DRIVE7_14MA (6 << 4)
+#define DRIVE7_16MA (7 << 4)
#endif
diff --git a/include/dt-bindings/power/imx7-power.h b/include/dt-bindings/power/imx7-power.h
new file mode 100644
index 000000000000..3a181e410517
--- /dev/null
+++ b/include/dt-bindings/power/imx7-power.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2017 Impinj
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_IMX7_POWER_H__
+#define __DT_BINDINGS_IMX7_POWER_H__
+
+#define IMX7_POWER_DOMAIN_MIPI_PHY 0
+#define IMX7_POWER_DOMAIN_PCIE_PHY 1
+#define IMX7_POWER_DOMAIN_USB_HSIC_PHY 2
+
+#endif
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
index ee2e26ba605e..ad679eeda137 100644
--- a/include/dt-bindings/power/r8a7795-sysc.h
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -33,7 +33,7 @@
#define R8A7795_PD_CA53_SCU 21
#define R8A7795_PD_3DG_E 22
#define R8A7795_PD_A3IR 24
-#define R8A7795_PD_A2VC0 25
+#define R8A7795_PD_A2VC0 25 /* ES1.x only */
#define R8A7795_PD_A2VC1 26
/* Always-on power area */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10sr.h b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h
new file mode 100644
index 000000000000..9855925e5256
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright Intel Corporation (C) 2017. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Reset binding definitions for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from altr,rst-mgr-a10.h
+ */
+
+#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H
+#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H
+
+/* Peripheral PHY resets */
+#define A10SR_RESET_ENET_HPS 0
+#define A10SR_RESET_PCIE 1
+#define A10SR_RESET_FILE 2
+#define A10SR_RESET_BQSPI 3
+#define A10SR_RESET_USB 4
+
+#define A10SR_RESET_NUM 5
+
+#endif
diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h
new file mode 100644
index 000000000000..63948170c7b2
--- /dev/null
+++ b/include/dt-bindings/reset/imx7-reset.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 Impinj, Inc.
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_BINDING_RESET_IMX7_H
+#define DT_BINDING_RESET_IMX7_H
+
+#define IMX7_RESET_A7_CORE_POR_RESET0 0
+#define IMX7_RESET_A7_CORE_POR_RESET1 1
+#define IMX7_RESET_A7_CORE_RESET0 2
+#define IMX7_RESET_A7_CORE_RESET1 3
+#define IMX7_RESET_A7_DBG_RESET0 4
+#define IMX7_RESET_A7_DBG_RESET1 5
+#define IMX7_RESET_A7_ETM_RESET0 6
+#define IMX7_RESET_A7_ETM_RESET1 7
+#define IMX7_RESET_A7_SOC_DBG_RESET 8
+#define IMX7_RESET_A7_L2RESET 9
+#define IMX7_RESET_SW_M4C_RST 10
+#define IMX7_RESET_SW_M4P_RST 11
+#define IMX7_RESET_EIM_RST 12
+#define IMX7_RESET_HSICPHY_PORT_RST 13
+#define IMX7_RESET_USBPHY1_POR 14
+#define IMX7_RESET_USBPHY1_PORT_RST 15
+#define IMX7_RESET_USBPHY2_POR 16
+#define IMX7_RESET_USBPHY2_PORT_RST 17
+#define IMX7_RESET_MIPI_PHY_MRST 18
+#define IMX7_RESET_MIPI_PHY_SRST 19
+
+/*
+ * IMX7_RESET_PCIEPHY is a logical reset line combining PCIEPHY_BTN
+ * and PCIEPHY_G_RST
+ */
+#define IMX7_RESET_PCIEPHY 20
+#define IMX7_RESET_PCIEPHY_PERST 21
+
+/*
+ * IMX7_RESET_PCIE_CTRL_APPS_EN is not strictly a reset line, but it
+ * can be used to inhibit PCIe LTTSM, so, in a way, it can be thoguht
+ * of as one
+ */
+#define IMX7_RESET_PCIE_CTRL_APPS_EN 22
+#define IMX7_RESET_DDRC_PRST 23
+#define IMX7_RESET_DDRC_CORE_RST 24
+
+#define IMX7_RESET_NUM 25
+
+#endif
+
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index fe797d6ef89d..295584f31a4e 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -63,6 +63,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
+void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 92e7e97ca8ff..1ab4633adf4f 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -50,6 +50,8 @@ void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
+void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
@@ -85,6 +87,11 @@ static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index c0b3d999c266..581a59ea7e34 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -225,8 +225,6 @@ struct vgic_dist {
struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
- u32 vgic_misr; /* Saved only */
- u64 vgic_eisr; /* Saved only */
u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
@@ -236,8 +234,6 @@ struct vgic_v3_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
- u32 vgic_misr; /* Saved only */
- u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
@@ -264,8 +260,6 @@ struct vgic_cpu {
*/
struct list_head ap_list_head;
- u64 live_lrs;
-
/*
* Members below are used with GICv3 emulation only and represent
* parts of the redistributor.
@@ -307,6 +301,9 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+void kvm_vgic_load(struct kvm_vcpu *vcpu);
+void kvm_vgic_put(struct kvm_vcpu *vcpu);
+
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
#define vgic_ready(k) ((k)->arch.vgic.ready)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f729adaac18b..137e4a3d89c5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -770,8 +770,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline void acpi_dma_configure(struct device *dev,
- enum dev_dma_attr attr) { }
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return 0;
+}
static inline void acpi_dma_deconfigure(struct device *dev) { }
@@ -957,6 +960,10 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
adev->driver_gpios = NULL;
}
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios);
+void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
@@ -966,6 +973,13 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
return -ENXIO;
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 77e08099e554..3ff9acea8616 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -34,6 +34,8 @@ void acpi_iort_init(void);
bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+void acpi_configure_pmsi_domain(struct device *dev);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_set_dma_mask(struct device *dev);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
@@ -45,6 +47,7 @@ static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
u32 req_id)
{ return NULL; }
+static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
static inline void iort_set_dma_mask(struct device *dev) { }
static inline
@@ -52,7 +55,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
#endif
-#define IORT_ACPI_DECLARE(name, table_id, fn) \
- ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
-
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 91b84a7f0539..580b5323a717 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -38,24 +38,16 @@
#define PL080_SOFT_LSREQ (0x2C)
#define PL080_CONFIG (0x30)
-#define PL080_CONFIG_M2_BE (1 << 2)
-#define PL080_CONFIG_M1_BE (1 << 1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_M2_BE BIT(2)
+#define PL080_CONFIG_M1_BE BIT(1)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_SYNC (0x34)
/* Per channel configuration registers */
-#define PL080_Cx_STRIDE (0x20)
+/* Per channel configuration registers */
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
-#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
-#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
-#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
-
#define PL080_CH_SRC_ADDR (0x00)
#define PL080_CH_DST_ADDR (0x04)
#define PL080_CH_LLI (0x08)
@@ -66,18 +58,18 @@
#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
#define PL080_LLI_ADDR_SHIFT (2)
-#define PL080_LLI_LM_AHB2 (1 << 0)
+#define PL080_LLI_LM_AHB2 BIT(0)
-#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
+#define PL080_CONTROL_TC_IRQ_EN BIT(31)
#define PL080_CONTROL_PROT_MASK (0x7 << 28)
#define PL080_CONTROL_PROT_SHIFT (28)
-#define PL080_CONTROL_PROT_CACHE (1 << 30)
-#define PL080_CONTROL_PROT_BUFF (1 << 29)
-#define PL080_CONTROL_PROT_SYS (1 << 28)
-#define PL080_CONTROL_DST_INCR (1 << 27)
-#define PL080_CONTROL_SRC_INCR (1 << 26)
-#define PL080_CONTROL_DST_AHB2 (1 << 25)
-#define PL080_CONTROL_SRC_AHB2 (1 << 24)
+#define PL080_CONTROL_PROT_CACHE BIT(30)
+#define PL080_CONTROL_PROT_BUFF BIT(29)
+#define PL080_CONTROL_PROT_SYS BIT(28)
+#define PL080_CONTROL_DST_INCR BIT(27)
+#define PL080_CONTROL_SRC_INCR BIT(26)
+#define PL080_CONTROL_DST_AHB2 BIT(25)
+#define PL080_CONTROL_SRC_AHB2 BIT(24)
#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
#define PL080_CONTROL_DWIDTH_SHIFT (21)
#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
@@ -103,20 +95,20 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
-#define PL080N_CONFIG_ITPROT (1 << 20)
-#define PL080N_CONFIG_SECPROT (1 << 19)
-#define PL080_CONFIG_HALT (1 << 18)
-#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
-#define PL080_CONFIG_LOCK (1 << 16)
-#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
-#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
+#define PL080N_CONFIG_ITPROT BIT(20)
+#define PL080N_CONFIG_SECPROT BIT(19)
+#define PL080_CONFIG_HALT BIT(18)
+#define PL080_CONFIG_ACTIVE BIT(17) /* RO */
+#define PL080_CONFIG_LOCK BIT(16)
+#define PL080_CONFIG_TC_IRQ_MASK BIT(15)
+#define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
#define PL080_CONFIG_DST_SEL_SHIFT (6)
#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
#define PL080_CONFIG_SRC_SEL_SHIFT (1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_FLOW_MEM2MEM (0x0)
#define PL080_FLOW_MEM2PER (0x1)
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
deleted file mode 100644
index fe93758e8403..000000000000
--- a/include/linux/amba/pl330.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/include/linux/amba/pl330.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __AMBA_PL330_H_
-#define __AMBA_PL330_H_
-
-#include <linux/dmaengine.h>
-
-struct dma_pl330_platdata {
- /*
- * Number of valid peripherals connected to DMAC.
- * This may be different from the value read from
- * CR0, as the PL330 implementation might have 'holes'
- * in the peri list or the peri could also be reached
- * from another DMAC which the platform prefers.
- */
- u8 nr_valid_peri;
- /* Array of valid peripherals */
- u8 *peri_id;
- /* Operational capabilities */
- dma_cap_mask_t cap_mask;
- /* Bytes to allocate for MC buffer */
- unsigned mcbuf_sz;
-};
-
-extern bool pl330_filter(struct dma_chan *chan, void *param);
-#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 9657f11d48a7..bca6a5e4ca3d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -80,7 +80,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
-#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
+#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */
#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index f3e5e1de1bdb..c47aa248c640 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -57,6 +57,11 @@ struct blk_mq_hw_ctx {
unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+#endif
};
struct blk_mq_tag_set {
@@ -86,9 +91,9 @@ typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(void *, struct request *, unsigned int,
+typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int, unsigned int);
-typedef void (exit_request_fn)(void *, struct request *, unsigned int,
+typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
typedef int (reinit_request_fn)(void *, struct request *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83d28623645f..b5d1e27631ee 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -579,7 +579,7 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
- struct dentry *mq_debugfs_dir;
+ struct dentry *sched_debugfs_dir;
#endif
bool mq_sysfs_init_done;
@@ -1923,28 +1923,12 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-/**
- * struct blk_dax_ctl - control and output parameters for ->direct_access
- * @sector: (input) offset relative to a block_device
- * @addr: (output) kernel virtual address for @sector populated by driver
- * @pfn: (output) page frame number for @addr populated by driver
- * @size: (input) number of bytes requested
- */
-struct blk_dax_ctl {
- sector_t sector;
- void *addr;
- long size;
- pfn_t pfn;
-};
-
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
- long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1963,9 +1947,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
extern int bdev_dax_supported(struct super_block *, int);
-extern bool bdev_dax_capable(struct block_device *);
+int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 79591c3660cc..bd029e52ef5e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -196,8 +196,6 @@ void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int _submit_bh(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags);
int submit_bh(int, int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f8763615a5f2..408bc09ce497 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -4,6 +4,7 @@
#include <linux/kobject.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/device.h>
struct file_operations;
struct inode;
@@ -26,6 +27,10 @@ void cdev_put(struct cdev *p);
int cdev_add(struct cdev *, dev_t, unsigned);
+void cdev_set_parent(struct cdev *p, struct kobject *kobj);
+int cdev_device_add(struct cdev *cdev, struct device *dev);
+void cdev_device_del(struct cdev *cdev, struct device *dev);
+
void cdev_del(struct cdev *);
void cd_forget(struct inode *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index ae2f66833762..fd8b2953c78f 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -105,8 +105,10 @@ static inline u64 ceph_sanitize_features(u64 features)
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
+ CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
@@ -114,11 +116,13 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
+ CEPH_FEATURE_MDS_INLINE_DATA | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f4b2ee18f38c..ad078ebe25d6 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -365,6 +365,19 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_READDIR_FRAG_END (1<<0)
#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
#define CEPH_READDIR_HASH_ORDER (1<<9)
+#define CEPH_READDIR_OFFSET_HASH (1<<10)
+
+/*
+ * open request flags
+ */
+#define CEPH_O_RDONLY 00000000
+#define CEPH_O_WRONLY 00000001
+#define CEPH_O_RDWR 00000002
+#define CEPH_O_CREAT 00000100
+#define CEPH_O_EXCL 00000200
+#define CEPH_O_TRUNC 00001000
+#define CEPH_O_DIRECTORY 00200000
+#define CEPH_O_NOFOLLOW 00400000
union ceph_mds_request_args {
struct {
@@ -384,6 +397,7 @@ union ceph_mds_request_args {
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
+ __le32 offset_hash;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index 84884d8d4710..0594d3bba774 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -37,6 +37,11 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
char *lock_name, char *cookie,
struct ceph_entity_name *locker);
+int ceph_cls_set_cookie(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 type, char *old_cookie,
+ char *tag, char *new_cookie);
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 88cd5dc8e238..3229ae6c7846 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
@@ -161,7 +162,7 @@ struct ceph_client {
* dirtied.
*/
struct ceph_snap_context {
- atomic_t nref;
+ refcount_t nref;
u64 seq;
u32 num_snaps;
u64 snaps[];
@@ -262,10 +263,7 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
-extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
- void *private,
- u64 supported_features,
- u64 required_features);
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 8ed5dc505fbb..d5f783f3226a 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,6 +25,7 @@ struct ceph_mdsmap {
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
u32 m_max_mds; /* size of m_addr, m_state arrays */
+ int m_num_mds;
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
@@ -40,7 +41,7 @@ struct ceph_mdsmap {
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return NULL;
return &m->m_info[w].addr;
}
@@ -48,14 +49,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
{
- if (w >= 0 && w < m->m_max_mds)
+ if (w >= 0 && w < m->m_num_mds)
return m->m_info[w].laggy;
return false;
}
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c125b5d9e13c..85650b415e73 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mempool.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -27,7 +28,7 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
/* a given osd we're communicating with */
struct ceph_osd {
- atomic_t o_ref;
+ refcount_t o_ref;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
@@ -186,12 +187,12 @@ struct ceph_osd_request {
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
+ bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
int r_attempts;
- struct ceph_eversion r_replay_version; /* aka reassert_version */
u32 r_last_force_resend;
u32 r_map_dne_bound;
@@ -266,6 +267,7 @@ struct ceph_osd_client {
struct rb_root osds; /* osds */
struct list_head osd_lru; /* idle osds */
spinlock_t osd_lru_lock;
+ u32 epoch_barrier;
struct ceph_osd homeless_osd;
atomic64_t last_tid; /* tid of last request */
u64 last_linger_id;
@@ -304,6 +306,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
+void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 13d71fe18b0c..75a7db21457d 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -2,7 +2,7 @@
#define __FS_CEPH_PAGELIST_H
#include <asm/byteorder.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -13,7 +13,7 @@ struct ceph_pagelist {
size_t room;
struct list_head free_list;
size_t num_pages_free;
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct ceph_pagelist_cursor {
@@ -30,7 +30,7 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
- atomic_set(&pl->refcnt, 1);
+ refcount_set(&pl->refcnt, 1);
}
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 03f32d0bd1d8..3e8fbf5a5c73 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -21,15 +21,19 @@ struct cma;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
+extern const char *cma_get_name(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+
+extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index 5949d1855589..b8920a031a3e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -212,4 +212,6 @@ extern bool vgacon_text_force(void);
static inline bool vgacon_text_force(void) { return false; }
#endif
+extern void console_init(void);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 62d240e962f0..0f2a80377520 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -94,6 +94,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+ CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
@@ -137,6 +138,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1a675604b17d..2404ad238c0b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -40,9 +40,9 @@ extern int nr_cpu_ids;
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
#else
-#define nr_cpumask_bits NR_CPUS
+#define nr_cpumask_bits ((unsigned int)NR_CPUS)
#endif
/*
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
new file mode 100644
index 000000000000..541a197ba4a2
--- /dev/null
+++ b/include/linux/crash_core.h
@@ -0,0 +1,69 @@
+#ifndef LINUX_CRASH_CORE_H
+#define LINUX_CRASH_CORE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+
+#define CRASH_CORE_NOTE_NAME "CORE"
+#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+
+#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ CRASH_CORE_NOTE_NAME_BYTES + \
+ CRASH_CORE_NOTE_DESC_BYTES)
+
+#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ VMCOREINFO_NOTE_NAME_BYTES + \
+ VMCOREINFO_BYTES)
+
+typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+
+void crash_save_vmcoreinfo(void);
+void arch_crash_save_vmcoreinfo(void);
+__printf(1, 2)
+void vmcoreinfo_append_str(const char *fmt, ...);
+phys_addr_t paddr_vmcoreinfo_note(void);
+
+#define VMCOREINFO_OSRELEASE(value) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+#define VMCOREINFO_NUMBER(name) \
+ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
+#define VMCOREINFO_CONFIG(name) \
+ vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+
+extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+extern size_t vmcoreinfo_size;
+extern size_t vmcoreinfo_max_size;
+
+Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
+ void *data, size_t data_len);
+void final_note(Elf_Word *buf);
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+
+#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d8a3dc042e1c..d3158e74a59e 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,28 @@
#include <asm/pgtable.h>
struct iomap_ops;
+struct dax_device;
+struct dax_operations {
+ /*
+ * direct_access: translate a device-relative
+ * logical-page-offset into an absolute physical pfn. Return the
+ * number of pages available for DAX at that pfn.
+ */
+ long (*direct_access)(struct dax_device *, pgoff_t, long,
+ void **, pfn_t *);
+};
+
+int dax_read_lock(void);
+void dax_read_unlock(int id);
+struct dax_device *dax_get_by_host(const char *host);
+struct dax_device *alloc_dax(void *private, const char *host,
+ const struct dax_operations *ops);
+void put_dax(struct dax_device *dax_dev);
+bool dax_alive(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void *dax_get_private(struct dax_device *dax_dev);
+long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn);
/*
* We use lowest available bit in exceptional entry for locking, one bit for
@@ -48,17 +70,13 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
-struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+int __dax_zero_page_range(struct block_device *bdev,
+ struct dax_device *dax_dev, sector_t sector,
unsigned int offset, unsigned int length);
#else
-static inline struct page *read_dax_sector(struct block_device *bdev,
- sector_t n)
-{
- return ERR_PTR(-ENXIO);
-}
static inline int __dax_zero_page_range(struct block_device *bdev,
- sector_t sector, unsigned int offset, unsigned int length)
+ struct dax_device *dax_dev, sector_t sector,
+ unsigned int offset, unsigned int length)
{
return -ENXIO;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7dff776e6d16..9174b0d28582 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -74,7 +74,7 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
+ .llseek = no_llseek, \
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 78ad0624cdae..f4c639c0c362 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -130,13 +130,15 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* < 0 : error
* >= 0 : the number of bytes accessible at the address
*/
-typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size);
+typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
+#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct dax_device *dax_dev;
fmode_t mode;
char name[16];
};
@@ -178,7 +180,7 @@ struct target_type {
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
- dm_direct_access_fn direct_access;
+ dm_dax_direct_access_fn direct_access;
/* For internal device-mapper use. */
struct list_head list;
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6048fa404e57..a5195a7d6f77 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
- * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
* have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator
- * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
* fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 5725c94b1f12..4eac2670bfa1 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -20,6 +20,7 @@
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/msi.h>
@@ -71,6 +72,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
#else
@@ -100,6 +102,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
{
}
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0977317c6835..4f3eecedca2d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#ifdef CONFIG_HAS_DMA
+int dma_configure(struct device *dev);
+void dma_deconfigure(struct device *dev);
+#else
+static inline int dma_configure(struct device *dev)
+{
+ return 0;
+}
+
+static inline void dma_deconfigure(struct device *dev) {}
+#endif
+
/*
* Managed DMA API
*/
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 187c10299722..90884072fa73 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
+extern int intel_iommu_tboot_noforce;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3a216318ae73..9ec5e22846e0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -8,6 +8,9 @@
struct io_cq;
struct elevator_type;
+#ifdef CONFIG_BLK_DEBUG_FS
+struct blk_mq_debugfs_attr;
+#endif
/*
* Return values from elevator merger
@@ -144,6 +147,10 @@ struct elevator_type
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
bool uses_mq;
+#ifdef CONFIG_BLK_DEBUG_FS
+ const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
+ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
+#endif
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -214,7 +221,6 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
-extern int elevator_change(struct request_queue *, const char *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 20fa8d8ae313..ba069e8f4f78 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_note elf32_note
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
+#define Elf_Word Elf32_Word
#else
@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_note elf64_note
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
+#define Elf_Word Elf64_Word
#endif
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e2d239ed4c60..b6feed6547ce 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -32,9 +32,9 @@
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
-#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
-#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
-#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
+#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
+#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
@@ -114,6 +114,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
@@ -161,7 +162,7 @@ struct f2fs_checkpoint {
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
-#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
struct f2fs_orphan_block {
@@ -302,6 +303,12 @@ struct f2fs_nat_block {
#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
+ * F2FS uses 4 bytes to represent block address. As a result, supported size of
+ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ */
+#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
+
+/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
* [15:10] : allocation type such as CURSEG_XXXX_TYPE
* [9:0] : valid block count
@@ -449,7 +456,7 @@ typedef __le32 f2fs_hash_t;
#define F2FS_SLOT_LEN 8
#define F2FS_SLOT_LEN_BITS 3
-#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
+#define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329e656d..1b48d9c9a561 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9a7786db14fa..56197f82af45 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,7 +19,9 @@
#include <net/sch_generic.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+#include <asm/set_memory.h>
+#endif
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 8e953c6f394a..37a5eaea69dd 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -25,7 +25,7 @@ int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
new file mode 100644
index 000000000000..3810a9033f49
--- /dev/null
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ALT_PR_IP_CORE_H
+#define _ALT_PR_IP_CORE_H
+#include <linux/io.h>
+
+int alt_pr_register(struct device *dev, void __iomem *reg_base);
+int alt_pr_unregister(struct device *dev);
+
+#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 57beb5d09bfc..b4ac24c4411d 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -70,17 +70,21 @@ enum fpga_mgr_states {
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
/**
* struct fpga_image_info - information specific to a FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ * @config_complete_timeout_us: maximum time for FPGA to switch to operating
+ * status in the write_complete op.
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
+ u32 config_complete_timeout_us;
};
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5d62d2c47939..26488b419965 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -29,7 +29,6 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
-#include <linux/percpu-rwsem.h>
#include <linux/delayed_call.h>
#include <asm/byteorder.h>
@@ -250,9 +249,8 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
-#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
+#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
+#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
* helper code (eg buffer layer)
* to clear GFP_FS from alloc */
@@ -2926,17 +2924,19 @@ extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS);
+ return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW,
+ return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
stat, STATX_BASIC_STATS);
}
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
@@ -3001,9 +3001,10 @@ extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
extern void make_empty_dir_inode(struct inode *inode);
extern bool is_empty_dir_inode(struct inode *inode);
-struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+extern int simple_fill_super(struct super_block *, unsigned long,
+ const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 10c1abfbac6c..0a30c106c1e5 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -46,17 +46,6 @@ struct fscrypt_symlink_data {
char encrypted_path[1];
} __packed;
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 fscrypt_symlink_data_len(u32 l)
-{
- if (l < FS_CRYPTO_BLOCK_SIZE)
- l = FS_CRYPTO_BLOCK_SIZE;
- return (l + sizeof(struct fscrypt_symlink_data) - 1);
-}
-
struct fscrypt_str {
unsigned char *name;
u32 len;
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 3511ca798804..ec406aed2f2f 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -147,6 +147,15 @@ static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
return -EOPNOTSUPP;
}
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
struct bio *bio)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index a140f47e9b27..cd4e82c17304 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -57,6 +57,80 @@ extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
struct fscrypt_str *);
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len) \
+ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+ FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded). This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename. Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext. (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".) This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key. Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+ u32 hash;
+ u32 minor_hash;
+ u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ if (unlikely(!fname->disk_name.name)) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+ return false;
+ if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+ return false;
+ return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+ n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+ }
+
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6d2a63e4ea52..473f088aabea 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -72,7 +72,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
- * Changing those attribute flags after regsitering ftrace_ops will
+ * Changing those attribute flags after registering ftrace_ops will
* cause unexpected results.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 933d93656605..8f702fcbe485 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -170,14 +170,14 @@ static inline struct gpio_desc *__must_check
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -191,7 +191,7 @@ static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void gpiod_put(struct gpio_desc *desc)
@@ -231,14 +231,14 @@ static inline struct gpio_desc *__must_check
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -252,7 +252,7 @@ static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 846f3b989480..393582867afd 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -168,7 +168,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
- int irq_chained_parent;
+ unsigned int irq_chained_parent;
bool irq_nested;
bool irq_need_valid_mask;
unsigned long *irq_valid_mask;
@@ -244,12 +244,12 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq,
+ unsigned int parent_irq,
irq_flow_handler_t parent_handler);
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq);
+ unsigned int parent_irq);
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
new file mode 100644
index 000000000000..90e0b9060e6d
--- /dev/null
+++ b/include/linux/gpio/gpio-reg.h
@@ -0,0 +1,13 @@
+#ifndef GPIO_REG_H
+#define GPIO_REG_H
+
+struct device;
+struct irq_domain;
+
+struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
+ int base, int num, const char *label, u32 direction, u32 def_out,
+ const char *const *names, struct irq_domain *irqdom, const int *irqs);
+
+int gpio_reg_resume(struct gpio_chip *gc);
+
+#endif
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 7ef111d3ecc5..f32d7c392c1e 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ int poll_interval;
+ int raw_hystersis;
struct iio_trigger *trigger;
int timestamp_ns_scale;
struct hid_sensor_hub_attribute_info poll;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 30c7dc45e45f..761f86242473 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -45,6 +45,14 @@
#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430
#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431
+/* Tempreture (200033) */
+#define HID_USAGE_SENSOR_TEMPERATURE 0x200033
+#define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434
+
+/* humidity */
+#define HID_USAGE_SENSOR_HUMIDITY 0x200032
+#define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433
+
/* Gyro 3D: (200076) */
#define HID_USAGE_SENSOR_GYRO_3D 0x200076
#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 1ffbf2a8cb99..3d04aa1dc83e 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -26,6 +26,7 @@ enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 0c170a3f0d8b..e09fc8290c2f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -491,6 +491,12 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
+static inline u32
+hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
+{
+ return rbi->ring_buffer->pending_send_sz;
+}
+
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -524,10 +530,10 @@ struct vmbus_channel_open_channel {
u32 target_vp;
/*
- * The upstream ring buffer begins at offset zero in the memory
- * described by RingBufferGpadlHandle. The downstream ring buffer
- * follows it at this offset (in pages).
- */
+ * The upstream ring buffer begins at offset zero in the memory
+ * described by RingBufferGpadlHandle. The downstream ring buffer
+ * follows it at this offset (in pages).
+ */
u32 downstream_ringbuffer_pageoffset;
/* User-specific data to be passed along to the server endpoint. */
@@ -1013,7 +1019,7 @@ extern int vmbus_open(struct vmbus_channel *channel,
u32 recv_ringbuffersize,
void *userdata,
u32 userdatalen,
- void(*onchannel_callback)(void *context),
+ void (*onchannel_callback)(void *context),
void *context);
extern void vmbus_close(struct vmbus_channel *channel);
@@ -1155,6 +1161,17 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
+
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1428,7 +1445,7 @@ struct hyperv_service_callback {
char *log_msg;
uuid_le data;
struct vmbus_channel *channel;
- void (*callback) (void *context);
+ void (*callback)(void *context);
};
#define MAX_SRV_VER 0x7ffffff
@@ -1504,8 +1521,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
if (cached_write_sz < pending_sz)
vmbus_setevent(channel);
-
- return;
}
/*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c573a52ae440..485a5b48f038 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -30,6 +30,8 @@
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -72,24 +74,8 @@
#define OFFSET_STRIDE (9)
-#ifdef CONFIG_64BIT
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
-#else
-static inline u64 dmar_readq(void __iomem *addr)
-{
- u32 lo, hi;
- lo = readl(addr);
- hi = readl(addr + 4);
- return (((u64) hi) << 32) + lo;
-}
-
-static inline void dmar_writeq(void __iomem *addr, u64 val)
-{
- writel((u32)val, addr);
- writel((u32)(val >> 32), addr + 4);
-}
-#endif
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 53144e78a369..a6fba4804672 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -155,7 +155,7 @@ extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-extern void free_irq(unsigned int, void *);
+extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
diff --git a/include/linux/io.h b/include/linux/io.h
index 82ef36eac8a1..2195d9ea4aaa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -90,6 +90,27 @@ void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
+#ifdef CONFIG_PCI
+/*
+ * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
+ * Posting") mandate non-posted configuration transactions. There is
+ * no ioremap API in the kernel that can guarantee non-posted write
+ * semantics across arches so provide a default implementation for
+ * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * should override it if they have memory mapping implementations that
+ * guarantee non-posted writes semantics to make the memory mapping
+ * compliant with the PCI specification.
+ */
+#ifndef pci_remap_cfgspace
+#define pci_remap_cfgspace pci_remap_cfgspace
+static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
+ size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7291810067eb..f753e788da31 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -41,6 +41,7 @@ struct iomap {
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
+ struct dax_device *dax_dev; /* dax_dev for dax operations */
};
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2e4de0deee53..2cb54adc4a33 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,12 +19,12 @@
#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -32,10 +32,13 @@
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
- * This is to make the IOMMU API setup privileged
- * mapppings accessible by the master only at higher
- * privileged execution level and inaccessible at
- * less privileged levels.
+ * Where the bus hardware includes a privilege level as part of its access type
+ * markings, and certain devices are capable of issuing transactions marked as
+ * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
+ * given permission flags only apply to accesses at the higher privilege level,
+ * and that unprivileged transactions should have as little access as possible.
+ * This would usually imply the same permissions as kernel mappings on the CPU,
+ * if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
@@ -336,46 +339,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
-/**
- * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
- * @domain: the iommu domain where the fault has happened
- * @dev: the device where the fault has happened
- * @iova: the faulting address
- * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
- *
- * This function should be called by the low-level IOMMU implementations
- * whenever IOMMU faults happen, to allow high-level users, that are
- * interested in such events, to know about them.
- *
- * This event may be useful for several possible use cases:
- * - mere logging of the event
- * - dynamic TLB/PTE loading
- * - if restarting of the faulting device is required
- *
- * Returns 0 on success and an appropriate error code otherwise (if dynamic
- * PTE/TLB loading will one day be supported, implementations will be able
- * to tell whether it succeeded or not according to this return value).
- *
- * Specifically, -ENOSYS is returned if a fault handler isn't installed
- * (though fault handlers can also return -ENOSYS, in case they want to
- * elicit the default behavior of the IOMMU drivers).
- */
-static inline int report_iommu_fault(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags)
-{
- int ret = -ENOSYS;
-
- /*
- * if upper layers showed interest and installed a fault handler,
- * invoke it.
- */
- if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags,
- domain->handler_token);
- trace_io_page_fault(dev, iova, flags);
- return ret;
-}
+extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags);
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
diff --git a/include/linux/iova.h b/include/linux/iova.h
index f27bb2c62fca..e0a892ae45c0 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
+#if IS_ENABLED(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+#else
+static inline int iova_cache_get(void)
+{
+ return -ENOTSUPP;
+}
+
+static inline void iova_cache_put(void)
+{
+}
+
+static inline struct iova *alloc_iova_mem(void)
+{
+ return NULL;
+}
+
+static inline void free_iova_mem(struct iova *iova)
+{
+}
+
+static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+}
+
+static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+}
+
+static inline struct iova *alloc_iova(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned)
+{
+ return NULL;
+}
+
+static inline void free_iova_fast(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size)
+{
+}
+
+static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ return 0;
+}
+
+static inline struct iova *reserve_iova(struct iova_domain *iovad,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void copy_reserved_iova(struct iova_domain *from,
+ struct iova_domain *to)
+{
+}
+
+static inline void init_iova_domain(struct iova_domain *iovad,
+ unsigned long granule,
+ unsigned long start_pfn,
+ unsigned long pfn_32bit)
+{
+}
+
+static inline struct iova *find_iova(struct iova_domain *iovad,
+ unsigned long pfn)
+{
+ return NULL;
+}
+
+static inline void put_iova_domain(struct iova_domain *iovad)
+{
+}
+
+static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
+ struct iova *iova,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void free_cpu_cached_iovas(unsigned int cpu,
+ struct iova_domain *iovad)
+{
+}
+#endif
#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 9d84942ae2e5..71fd92d81b26 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -8,8 +8,7 @@
#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
/* used by in-kernel data structures */
-struct kern_ipc_perm
-{
+struct kern_ipc_perm {
spinlock_t lock;
bool deleted;
int id;
@@ -18,9 +17,9 @@ struct kern_ipc_perm
kgid_t gid;
kuid_t cuid;
kgid_t cgid;
- umode_t mode;
+ umode_t mode;
unsigned long seq;
void *security;
-};
+} ____cacheline_aligned_in_smp;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 624215cebee5..36872fbb815d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
+#include <linux/cache.h>
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -63,19 +64,13 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data __attribute__((section(".data")))
-
/*
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
+extern u64 __cacheline_aligned_in_smp jiffies_64;
+extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d419d0e51fe5..c9481ebcbc0c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,17 +14,15 @@
#if !defined(__ASSEMBLY__)
+#include <linux/crash_core.h>
#include <asm/io.h>
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
-#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/ioport.h>
-#include <linux/elfcore.h>
-#include <linux/elf.h>
#include <linux/module.h>
#include <asm/kexec.h>
@@ -62,19 +60,15 @@
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define KEXEC_CORE_NOTE_NAME "CORE"
-#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
-#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
+
/*
* The per-cpu notes area is a list of notes terminated by a "NULL"
* note header. For kdump, the code in vmcore.c runs in the context
* of the second kernel to combine them into one note.
*/
#ifndef KEXEC_NOTE_BYTES
-#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
- KEXEC_CORE_NOTE_NAME_BYTES + \
- KEXEC_CORE_NOTE_DESC_BYTES )
+#define KEXEC_NOTE_BYTES CRASH_CORE_NOTE_BYTES
#endif
/*
@@ -256,33 +250,6 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
@@ -303,31 +270,15 @@ extern int kexec_load_disabled;
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
-#define VMCOREINFO_BYTES (4096)
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
- + VMCOREINFO_NOTE_NAME_BYTES)
-
/* Location of a reserved region to hold the crash kernel.
*/
extern struct resource crashk_res;
extern struct resource crashk_low_res;
-typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
extern note_buf_t __percpu *crash_notes;
-extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-extern size_t vmcoreinfo_size;
-extern size_t vmcoreinfo_max_size;
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 47e4da5b4fa2..30f90c1a0aaf 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -381,6 +381,7 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
+kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index f4156f88f557..29220724bf1c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -66,8 +66,6 @@ static inline void kref_get(struct kref *kref)
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
@@ -79,8 +77,6 @@ static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
release(kref);
return 1;
@@ -92,8 +88,6 @@ static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d0250744507a..2b12b2683359 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -115,14 +115,17 @@ static inline bool is_error_page(struct page *page)
return IS_ERR(page);
}
+#define KVM_REQUEST_MASK GENMASK(7,0)
+#define KVM_REQUEST_NO_WAKEUP BIT(8)
+#define KVM_REQUEST_WAIT BIT(9)
/*
* Architecture-independent vcpu->requests bit members
* Bits 4-7 are reserved for more arch-independent bits.
*/
-#define KVM_REQ_TLB_FLUSH 0
-#define KVM_REQ_MMU_RELOAD 1
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_PENDING_TIMER 2
+#define KVM_REQ_UNHALT 3
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -268,6 +271,12 @@ struct kvm_vcpu {
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
+ /*
+ * The memory barrier ensures a previous write to vcpu->requests cannot
+ * be reordered with the read of vcpu->mode. It pairs with the general
+ * memory barrier following the write of vcpu->mode in VCPU RUN.
+ */
+ smp_mb__before_atomic();
return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}
@@ -375,8 +384,6 @@ struct kvm {
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
- struct srcu_struct srcu;
- struct srcu_struct irq_srcu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
@@ -403,7 +410,7 @@ struct kvm {
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
@@ -429,6 +436,8 @@ struct kvm {
struct list_head devices;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
+ struct srcu_struct srcu;
+ struct srcu_struct irq_srcu;
};
#define kvm_err(fmt, ...) \
@@ -502,10 +511,10 @@ int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
-static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
@@ -641,18 +650,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- gpa_t gpa, unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
@@ -682,7 +691,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
-void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -767,8 +776,6 @@ void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
-void *kvm_kvzalloc(unsigned long size);
-
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -877,22 +884,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-#else
-static inline int kvm_iommu_map_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- return 0;
-}
-
-static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
-}
-#endif
-
/*
* search_memslots() and __gfn_to_memslot() are here because they are
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
@@ -1025,6 +1016,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#define KVM_MAX_IRQ_ROUTES 1024
#endif
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
@@ -1092,13 +1084,23 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
+{
+ return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
+{
+ clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
- if (test_bit(req, &vcpu->requests)) {
- clear_bit(req, &vcpu->requests);
+ if (kvm_test_request(req, vcpu)) {
+ kvm_clear_request(req, vcpu);
/*
* Ensure the rest of the request is visible to kvm_check_request's
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 77e7af32543f..6c807017128d 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -20,9 +20,11 @@
enum {
/* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 1 << 0,
+ NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
- NDD_UNARMED = 1 << 1,
+ NDD_UNARMED = 1,
+ /* locked memory devices should not be accessed */
+ NDD_LOCKED = 2,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -120,7 +122,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t start, unsigned int len);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index bdfc65af4152..4ce24a376262 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -93,6 +93,7 @@ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
ulong choose_memblock_flags(void);
/* Low level functions */
@@ -335,6 +336,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index d0300045f04a..4a0abbc10ef6 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -21,6 +21,7 @@
#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
#define TIM_PSC 0x28 /* Prescaler */
#define TIM_ARR 0x2c /* Auto-Reload Register */
#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
@@ -30,6 +31,7 @@
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index d7a29f246d64..139872c2e0fe 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -28,6 +28,7 @@
#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4)
#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3)
#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x))
+#define SUN4I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(2, 0)
/* TP_CTRL1 bits for sun6i SOCs */
#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7)
@@ -35,6 +36,11 @@
#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5)
#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4)
#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x))
+#define SUN6I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(3, 0)
+
+/* TP_CTRL1 bits for sun8i SoCs */
+#define SUN8I_GPADC_CTRL1_CHOP_TEMP_EN BIT(8)
+#define SUN8I_GPADC_CTRL1_GPADC_CALI_EN BIT(7)
#define SUN4I_GPADC_CTRL2 0x08
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index 0622ae86f9db..b4942a32b81d 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -12,36 +12,6 @@
#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-/* Exynos5 PMU register definitions */
-#define EXYNOS5_HDMI_PHY_CONTROL (0x700)
-#define EXYNOS5_USBDRD_PHY_CONTROL (0x704)
-
-/* Exynos5250 specific register definitions */
-#define EXYNOS5_USBHOST_PHY_CONTROL (0x708)
-#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c)
-#define EXYNOS5_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5_ADC_PHY_CONTROL (0x718)
-#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c)
-#define EXYNOS5_DPTX_PHY_CONTROL (0x720)
-#define EXYNOS5_SATA_PHY_CONTROL (0x724)
-
-/* Exynos5420 specific register definitions */
-#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708)
-#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c)
-#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714)
-#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718)
-#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c)
-#define EXYNOS5420_ADC_PHY_CONTROL (0x720)
-#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724)
-#define EXYNOS5420_DPTX_PHY_CONTROL (0x728)
-
-/* Exynos5433 specific register definitions */
-#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
-#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
-
#define EXYNOS5_PHY_ENABLE BIT(0)
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index 4585d6105d68..abbd52466573 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -44,4 +44,8 @@
#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5)
+
+#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31)
+
#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 74b765ce48ab..d5bed0875d30 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -108,7 +108,7 @@ enum {
MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
};
-/* Driver supports 3 diffrent device methods to manage traffic steering:
+/* Driver supports 3 different device methods to manage traffic steering:
* -device managed - High level API for ib and eth flow steering. FW is
* managing flow steering tables.
* - B0 steering mode - Common low level API for ib and (if supported) eth.
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3fece51dcf13..bcdf739ee41a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -892,12 +892,7 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
static inline void *mlx5_vzalloc(unsigned long size)
{
- void *rtn;
-
- rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!rtn)
- rtn = vzalloc(size);
- return rtn;
+ return kvzalloc(size, GFP_KERNEL);
}
static inline u32 mlx5_base_mkey(const u32 key)
@@ -1102,6 +1097,25 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+#ifndef CONFIG_MLX5_CORE_IPOIB
+static inline
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
+#else
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *));
+void mlx5_rdma_netdev_free(struct net_device *netdev);
+#endif /* CONFIG_MLX5_CORE_IPOIB */
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5d22e69f51ea..7cb17c6b97de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -518,6 +518,28 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+static inline void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+
+ return kvmalloc(n * size, flags);
+}
+
extern void kvfree(const void *addr);
static inline atomic_t *compound_mapcount_ptr(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e0c3c5e3d8a0..ebaccd4e7d8c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -74,6 +74,11 @@ extern char * const migratetype_names[MIGRATE_TYPES];
# define is_migrate_cma_page(_page) false
#endif
+static inline bool is_migrate_movable(int mt)
+{
+ return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
+}
+
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8850fcaf50db..566fda587fcf 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -428,6 +428,16 @@ struct i2c_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* pci_epf */
+
+#define PCI_EPF_NAME_SIZE 20
+#define PCI_EPF_MODULE_PREFIX "pci_epf:"
+
+struct pci_epf_device_id {
+ char name[PCI_EPF_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
diff --git a/include/linux/module.h b/include/linux/module.h
index 9ad68561d8c2..21f56393602f 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -583,7 +583,7 @@ extern bool try_module_get(struct module *module);
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
return !module || module_is_live(module);
}
@@ -680,9 +680,9 @@ static inline void __module_get(struct module *module)
{
}
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
- return 1;
+ return true;
}
static inline void module_put(struct module *module)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index c20395edf2de..5fff5ba5964e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -86,19 +86,16 @@ struct netlink_ext_ack {
* Currently string formatting is not supported (due
* to the lack of an output buffer.)
*/
-#define NL_SET_ERR_MSG(extack, msg) do { \
- static const char _msg[] = (msg); \
- \
- (extack)->_msg = _msg; \
+#define NL_SET_ERR_MSG(extack, msg) do { \
+ static const char __msg[] = (msg); \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) \
+ __extack->_msg = __msg; \
} while (0)
-#define NL_MOD_TRY_SET_ERR_MSG(extack, msg) do { \
- static const char _msg[] = KBUILD_MODNAME ": " msg; \
- struct netlink_ext_ack *_extack = (extack); \
- \
- if (_extack) \
- _extack->_msg = _msg; \
-} while (0)
+#define NL_SET_ERR_MSG_MOD(extack, msg) \
+ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
diff --git a/include/linux/of.h b/include/linux/of.h
index e5d4225fda35..50fcdb54087f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -294,6 +294,9 @@ extern int of_property_count_elems_of_size(const struct device_node *np,
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
+extern int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value);
extern int of_property_read_variable_u8_array(const struct device_node *np,
const char *propname, u8 *out_values,
size_t sz_min, size_t sz_max);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index c12dace043f3..b4ad8b4f8506 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -34,8 +34,7 @@ extern void of_device_unregister(struct platform_device *ofdev);
extern const void *of_device_get_match_data(const struct device *dev);
-extern ssize_t of_device_get_modalias(struct device *dev,
- char *str, ssize_t len);
+extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -55,7 +54,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+int of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -72,8 +72,8 @@ static inline const void *of_device_get_match_data(const struct device *dev)
return NULL;
}
-static inline int of_device_get_modalias(struct device *dev,
- char *str, ssize_t len)
+static inline int of_device_modalias(struct device *dev,
+ char *str, ssize_t len)
{
return -ENODEV;
}
@@ -103,7 +103,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+
+static inline int of_dma_configure(struct device *dev, struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 3f87ea5b8bee..1e089d5a182b 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -30,6 +30,7 @@ struct device_node;
enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
+ OF_GPIO_OPEN_DRAIN = 0x4,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1e0deb8e8494..ec6b11deb773 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -8,7 +8,7 @@
#include <linux/ioport.h>
#include <linux/of.h>
-typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
/*
* Workarounds only applied to 32bit powermac machines
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 0e0974eceb80..518c8d20647a 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
}
#endif
-#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_controller *chip);
-void of_pci_msi_chip_remove(struct msi_controller *chip);
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
-#else
-static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
-static inline struct msi_controller *
-of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
-#endif
-
#endif
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 047d64706f2a..d4cd2014fa6f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,10 +33,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype);
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype);
+ int migratetype, int *num_movable);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index f0d2b9451270..809c2f1873ac 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -16,6 +16,7 @@
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
+#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -68,7 +69,7 @@ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
#endif
-#ifdef CONFIG_PCI_HOST_GENERIC
+#ifdef CONFIG_PCI_HOST_COMMON
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
new file mode 100644
index 000000000000..263b89ea5705
--- /dev/null
+++ b/include/linux/pci-ep-cfs.h
@@ -0,0 +1,41 @@
+/**
+ * PCI Endpoint ConfigFS header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EP_CFS_H
+#define __LINUX_PCI_EP_CFS_H
+
+#include <linux/configfs.h>
+
+#ifdef CONFIG_PCI_ENDPOINT_CONFIGFS
+struct config_group *pci_ep_cfs_add_epc_group(const char *name);
+void pci_ep_cfs_remove_epc_group(struct config_group *group);
+struct config_group *pci_ep_cfs_add_epf_group(const char *name);
+void pci_ep_cfs_remove_epf_group(struct config_group *group);
+#else
+static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+}
+
+static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+}
+#endif
+#endif /* __LINUX_PCI_EP_CFS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
new file mode 100644
index 000000000000..af5edbf3eea3
--- /dev/null
+++ b/include/linux/pci-epc.h
@@ -0,0 +1,144 @@
+/**
+ * PCI Endpoint *Controller* (EPC) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPC_H
+#define __LINUX_PCI_EPC_H
+
+#include <linux/pci-epf.h>
+
+struct pci_epc;
+
+enum pci_epc_irq_type {
+ PCI_EPC_IRQ_UNKNOWN,
+ PCI_EPC_IRQ_LEGACY,
+ PCI_EPC_IRQ_MSI,
+};
+
+/**
+ * struct pci_epc_ops - set of function pointers for performing EPC operations
+ * @write_header: ops to populate configuration space header
+ * @set_bar: ops to configure the BAR
+ * @clear_bar: ops to reset the BAR
+ * @map_addr: ops to map CPU address to PCI address
+ * @unmap_addr: ops to unmap CPU address and PCI address
+ * @set_msi: ops to set the requested number of MSI interrupts in the MSI
+ * capability register
+ * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
+ * the MSI capability register
+ * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @start: ops to start the PCI link
+ * @stop: ops to stop the PCI link
+ * @owner: the module owner containing the ops
+ */
+struct pci_epc_ops {
+ int (*write_header)(struct pci_epc *pci_epc,
+ struct pci_epf_header *hdr);
+ int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+ void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
+ int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc);
+ int (*raise_irq)(struct pci_epc *pci_epc,
+ enum pci_epc_irq_type type, u8 interrupt_num);
+ int (*start)(struct pci_epc *epc);
+ void (*stop)(struct pci_epc *epc);
+ struct module *owner;
+};
+
+/**
+ * struct pci_epc_mem - address space of the endpoint controller
+ * @phys_base: physical base address of the PCI address space
+ * @size: the size of the PCI address space
+ * @bitmap: bitmap to manage the PCI address space
+ * @pages: number of bits representing the address region
+ */
+struct pci_epc_mem {
+ phys_addr_t phys_base;
+ size_t size;
+ unsigned long *bitmap;
+ int pages;
+};
+
+/**
+ * struct pci_epc - represents the PCI EPC device
+ * @dev: PCI EPC device
+ * @pci_epf: list of endpoint functions present in this EPC device
+ * @ops: function pointers for performing endpoint operations
+ * @mem: address space of the endpoint controller
+ * @max_functions: max number of functions that can be configured in this EPC
+ * @group: configfs group representing the PCI EPC device
+ * @lock: spinlock to protect pci_epc ops
+ */
+struct pci_epc {
+ struct device dev;
+ struct list_head pci_epf;
+ const struct pci_epc_ops *ops;
+ struct pci_epc_mem *mem;
+ u8 max_functions;
+ struct config_group *group;
+ /* spinlock to protect against concurrent access of EP controller */
+ spinlock_t lock;
+};
+
+#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+
+#define pci_epc_create(dev, ops) \
+ __pci_epc_create((dev), (ops), THIS_MODULE)
+#define devm_pci_epc_create(dev, ops) \
+ __devm_pci_epc_create((dev), (ops), THIS_MODULE)
+
+static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
+{
+ dev_set_drvdata(&epc->dev, data);
+}
+
+static inline void *epc_get_drvdata(struct pci_epc *epc)
+{
+ return dev_get_drvdata(&epc->dev);
+}
+
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
+void pci_epc_destroy(struct pci_epc *epc);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+void pci_epc_clear_bar(struct pci_epc *epc, int bar);
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size);
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc);
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+int pci_epc_start(struct pci_epc *epc);
+void pci_epc_stop(struct pci_epc *epc);
+struct pci_epc *pci_epc_get(const char *epc_name);
+void pci_epc_put(struct pci_epc *epc);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+void pci_epc_mem_exit(struct pci_epc *epc);
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size);
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size);
+#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
new file mode 100644
index 000000000000..0d529cb90143
--- /dev/null
+++ b/include/linux/pci-epf.h
@@ -0,0 +1,162 @@
+/**
+ * PCI Endpoint *Function* (EPF) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPF_H
+#define __LINUX_PCI_EPF_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct pci_epf;
+
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+/**
+ * struct pci_epf_header - represents standard configuration header
+ * @vendorid: identifies device manufacturer
+ * @deviceid: identifies a particular device
+ * @revid: specifies a device-specific revision identifier
+ * @progif_code: identifies a specific register-level programming interface
+ * @subclass_code: identifies more specifically the function of the device
+ * @baseclass_code: broadly classifies the type of function the device performs
+ * @cache_line_size: specifies the system cacheline size in units of DWORDs
+ * @subsys_vendor_id: vendor of the add-in card or subsystem
+ * @subsys_id: id specific to vendor
+ * @interrupt_pin: interrupt pin the device (or device function) uses
+ */
+struct pci_epf_header {
+ u16 vendorid;
+ u16 deviceid;
+ u8 revid;
+ u8 progif_code;
+ u8 subclass_code;
+ u8 baseclass_code;
+ u8 cache_line_size;
+ u16 subsys_vendor_id;
+ u16 subsys_id;
+ enum pci_interrupt_pin interrupt_pin;
+};
+
+/**
+ * struct pci_epf_ops - set of function pointers for performing EPF operations
+ * @bind: ops to perform when a EPC device has been bound to EPF device
+ * @unbind: ops to perform when a binding has been lost between a EPC device
+ * and EPF device
+ * @linkup: ops to perform when the EPC device has established a connection with
+ * a host system
+ */
+struct pci_epf_ops {
+ int (*bind)(struct pci_epf *epf);
+ void (*unbind)(struct pci_epf *epf);
+ void (*linkup)(struct pci_epf *epf);
+};
+
+/**
+ * struct pci_epf_driver - represents the PCI EPF driver
+ * @probe: ops to perform when a new EPF device has been bound to the EPF driver
+ * @remove: ops to perform when the binding between the EPF device and EPF
+ * driver is broken
+ * @driver: PCI EPF driver
+ * @ops: set of function pointers for performing EPF operations
+ * @owner: the owner of the module that registers the PCI EPF driver
+ * @group: configfs group corresponding to the PCI EPF driver
+ * @id_table: identifies EPF devices for probing
+ */
+struct pci_epf_driver {
+ int (*probe)(struct pci_epf *epf);
+ int (*remove)(struct pci_epf *epf);
+
+ struct device_driver driver;
+ struct pci_epf_ops *ops;
+ struct module *owner;
+ struct config_group *group;
+ const struct pci_epf_device_id *id_table;
+};
+
+#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
+ driver))
+
+/**
+ * struct pci_epf_bar - represents the BAR of EPF device
+ * @phys_addr: physical address that should be mapped to the BAR
+ * @size: the size of the address space present in BAR
+ */
+struct pci_epf_bar {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
+ * struct pci_epf - represents the PCI EPF device
+ * @dev: the PCI EPF device
+ * @name: the name of the PCI EPF device
+ * @header: represents standard configuration header
+ * @bar: represents the BAR of EPF device
+ * @msi_interrupts: number of MSI interrupts required by this function
+ * @func_no: unique function number within this endpoint device
+ * @epc: the EPC device to which this EPF device is bound
+ * @driver: the EPF driver to which this EPF device is bound
+ * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ */
+struct pci_epf {
+ struct device dev;
+ const char *name;
+ struct pci_epf_header *header;
+ struct pci_epf_bar bar[6];
+ u8 msi_interrupts;
+ u8 func_no;
+
+ struct pci_epc *epc;
+ struct pci_epf_driver *driver;
+ struct list_head list;
+};
+
+#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
+
+#define pci_epf_register_driver(driver) \
+ __pci_epf_register_driver((driver), THIS_MODULE)
+
+static inline void epf_set_drvdata(struct pci_epf *epf, void *data)
+{
+ dev_set_drvdata(&epf->dev, data);
+}
+
+static inline void *epf_get_drvdata(struct pci_epf *epf)
+{
+ return dev_get_drvdata(&epf->dev);
+}
+
+struct pci_epf *pci_epf_create(const char *name);
+void pci_epf_destroy(struct pci_epf *epf);
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner);
+void pci_epf_unregister_driver(struct pci_epf_driver *driver);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+int pci_epf_bind(struct pci_epf *epf);
+void pci_epf_unbind(struct pci_epf *epf);
+void pci_epf_linkup(struct pci_epf *epf);
+#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f27be8432e82..33c2b0b77429 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -28,6 +28,7 @@
#include <linux/kobject.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
@@ -178,6 +179,10 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /* a non-root bridge where translation occurs, stop alias search here */
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
+ /* Do not use FLR even if device advertises PCI_AF_CAP */
+ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
};
enum pci_irq_reroute_variant {
@@ -397,6 +402,8 @@ struct pci_dev {
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
+
+ unsigned long priv_flags; /* Private flags for the pci driver */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -941,32 +948,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
-static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
-{
- return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
-{
- return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
- u32 *val)
-{
- return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
-{
- return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
-{
- return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
- u32 val)
-{
- return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
-}
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
+int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
+int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1053,6 +1040,7 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1073,6 +1061,11 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
+ irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
+ const char *fmt, ...);
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
+
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
@@ -1200,6 +1193,11 @@ unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1298,10 +1296,8 @@ struct msix_entry {
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
-void pci_msi_shutdown(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
-void pci_msix_shutdown(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
@@ -1327,10 +1323,8 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msi_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msix_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
static inline int pci_msi_enabled(void) { return 0; }
@@ -1623,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
+/* These two functions provide almost identical functionality. Depennding
+ * on the architecture, one will be implemented as a wrapper around the
+ * other (in drivers/pci/mmap.c).
+ *
+ * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
+ * is expected to be an offset within that region.
+ *
+ * pci_mmap_page_range() is the legacy architecture-specific interface,
+ * which accepts a "user visible" resource address converted by
+ * pci_resource_to_user(), as used in the legacy mmap() interface in
+ * /proc/bus/pci/.
+ */
+int pci_mmap_resource_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#ifndef arch_can_pci_mmap_wc
+#define arch_can_pci_mmap_wc() 0
+#endif
+
+#ifndef arch_can_pci_mmap_io
+#define arch_can_pci_mmap_io() 0
+#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
+#else
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+#endif
+
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a4f77feecbb0..5f6b71d15393 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -862,6 +862,8 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_DRA74x 0xb500
+#define PCI_DEVICE_ID_TI_DRA72x 0xb501
#define PCI_VENDOR_ID_SONY 0x104d
diff --git a/include/linux/pe.h b/include/linux/pe.h
index e170b95e763b..143ce75be5f0 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -23,34 +23,6 @@
#define MZ_MAGIC 0x5a4d /* "MZ" */
-struct mz_hdr {
- uint16_t magic; /* MZ_MAGIC */
- uint16_t lbsize; /* size of last used block */
- uint16_t blocks; /* pages in file, 0x3 */
- uint16_t relocs; /* relocations */
- uint16_t hdrsize; /* header size in "paragraphs" */
- uint16_t min_extra_pps; /* .bss */
- uint16_t max_extra_pps; /* runtime limit for the arena size */
- uint16_t ss; /* relative stack segment */
- uint16_t sp; /* initial %sp register */
- uint16_t checksum; /* word checksum */
- uint16_t ip; /* initial %ip register */
- uint16_t cs; /* initial %cs relative to load segment */
- uint16_t reloc_table_offset; /* offset of the first relocation */
- uint16_t overlay_num; /* overlay number. set to 0. */
- uint16_t reserved0[4]; /* reserved */
- uint16_t oem_id; /* oem identifier */
- uint16_t oem_info; /* oem specific */
- uint16_t reserved1[10]; /* reserved */
- uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
-};
-
-struct mz_reloc {
- uint16_t offset;
- uint16_t segment;
-};
-
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
#define PE_OPT_MAGIC_PE32 0x010b
#define PE_OPT_MAGIC_PE32_ROM 0x0107
@@ -62,6 +34,7 @@ struct mz_reloc {
#define IMAGE_FILE_MACHINE_AMD64 0x8664
#define IMAGE_FILE_MACHINE_ARM 0x01c0
#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
+#define IMAGE_FILE_MACHINE_ARM64 0xaa64
#define IMAGE_FILE_MACHINE_EBC 0x0ebc
#define IMAGE_FILE_MACHINE_I386 0x014c
#define IMAGE_FILE_MACHINE_IA64 0x0200
@@ -98,17 +71,6 @@ struct mz_reloc {
#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
-struct pe_hdr {
- uint32_t magic; /* PE magic */
- uint16_t machine; /* machine type */
- uint16_t sections; /* number of sections */
- uint32_t timestamp; /* time_t */
- uint32_t symbol_table; /* symbol table offset */
- uint32_t symbols; /* number of symbols */
- uint16_t opt_hdr_size; /* size of optional header */
- uint16_t flags; /* flags */
-};
-
#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
@@ -134,6 +96,95 @@ struct pe_hdr {
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
+#define IMAGE_SCN_RESERVED_0 0x00000001
+#define IMAGE_SCN_RESERVED_1 0x00000002
+#define IMAGE_SCN_RESERVED_2 0x00000004
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
+#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
+#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
+#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
+#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
+#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
+#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
+#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
+/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
+#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
+#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
+#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+/* and here they just stuck a 1-byte integer in the middle of a bitfield */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
+#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
+#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
+#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
+#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
+#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
+#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
+#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
+#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
+#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
+#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
+#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
+#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
+#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
+#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
+#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
+#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
+#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
+#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
+
+#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+
+#ifndef __ASSEMBLY__
+
+struct mz_hdr {
+ uint16_t magic; /* MZ_MAGIC */
+ uint16_t lbsize; /* size of last used block */
+ uint16_t blocks; /* pages in file, 0x3 */
+ uint16_t relocs; /* relocations */
+ uint16_t hdrsize; /* header size in "paragraphs" */
+ uint16_t min_extra_pps; /* .bss */
+ uint16_t max_extra_pps; /* runtime limit for the arena size */
+ uint16_t ss; /* relative stack segment */
+ uint16_t sp; /* initial %sp register */
+ uint16_t checksum; /* word checksum */
+ uint16_t ip; /* initial %ip register */
+ uint16_t cs; /* initial %cs relative to load segment */
+ uint16_t reloc_table_offset; /* offset of the first relocation */
+ uint16_t overlay_num; /* overlay number. set to 0. */
+ uint16_t reserved0[4]; /* reserved */
+ uint16_t oem_id; /* oem identifier */
+ uint16_t oem_info; /* oem specific */
+ uint16_t reserved1[10]; /* reserved */
+ uint32_t peaddr; /* address of pe header */
+ char message[64]; /* message to print */
+};
+
+struct mz_reloc {
+ uint16_t offset;
+ uint16_t segment;
+};
+
+struct pe_hdr {
+ uint32_t magic; /* PE magic */
+ uint16_t machine; /* machine type */
+ uint16_t sections; /* number of sections */
+ uint32_t timestamp; /* time_t */
+ uint32_t symbol_table; /* symbol table offset */
+ uint32_t symbols; /* number of symbols */
+ uint16_t opt_hdr_size; /* size of optional header */
+ uint16_t flags; /* flags */
+};
+
/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't
* work right. vomit. */
struct pe32_opt_hdr {
@@ -243,52 +294,6 @@ struct section_header {
uint32_t flags;
};
-/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
-#define IMAGE_SCN_RESERVED_0 0x00000001
-#define IMAGE_SCN_RESERVED_1 0x00000002
-#define IMAGE_SCN_RESERVED_2 0x00000004
-#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
-#define IMAGE_SCN_RESERVED_3 0x00000010
-#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
-#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
-#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
-#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
-#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
-#define IMAGE_SCN_RESERVED_4 0x00000400
-#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
-#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
-#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
-#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
-#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
-/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
-#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
-#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
-#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
-#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
-/* and here they just stuck a 1-byte integer in the middle of a bitfield */
-#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
-#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
-#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
-#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
-#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
-#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
-#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
-#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
-#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
-#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
-#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
-#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
-#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
-#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
-#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
-#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
-#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
-#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
-#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
-#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
-#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
-#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
-
enum x64_coff_reloc_type {
IMAGE_REL_AMD64_ABSOLUTE = 0,
IMAGE_REL_AMD64_ADDR64,
@@ -445,4 +450,6 @@ struct win_certificate {
uint16_t cert_type;
};
+#endif /* !__ASSEMBLY__ */
+
#endif /* __LINUX_PE_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da266089..1360dd6d5e61 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -75,6 +75,8 @@ struct pmu_hw_events {
* already have to allocate this struct per cpu.
*/
struct arm_pmu *percpu_pmu;
+
+ int irq;
};
enum armpmu_attr_groups {
@@ -88,7 +90,6 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
cpumask_t supported_cpus;
- int *irq_affinity;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -104,12 +105,8 @@ struct arm_pmu {
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
@@ -120,6 +117,9 @@ struct arm_pmu {
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+
+ /* Only to be used by ACPI probing code */
+ unsigned long acpi_cpuid;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -135,10 +135,12 @@ int armpmu_map_event(struct perf_event *event,
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask);
+typedef int (*armpmu_init_fn)(struct arm_pmu *);
+
struct pmu_probe_info {
unsigned int cpuid;
unsigned int mask;
- int (*init)(struct arm_pmu *);
+ armpmu_init_fn init;
};
#define PMU_PROBE(_cpuid, _mask, _fn) \
@@ -160,6 +162,21 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
+#ifdef CONFIG_ACPI
+int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
+#else
+static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
+#endif
+
+/* Internal functions only for core arm_pmu code */
+struct arm_pmu *armpmu_alloc(void);
+void armpmu_free(struct arm_pmu *pmu);
+int armpmu_register(struct arm_pmu *pmu);
+int armpmu_request_irqs(struct arm_pmu *armpmu);
+void armpmu_free_irqs(struct arm_pmu *armpmu);
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 0496d171700a..e8b12dbf6170 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -12,28 +12,8 @@
#include <linux/platform_device.h>
-#define MMU_REG_SIZE 256
-
-/**
- * struct iommu_arch_data - omap iommu private data
- * @name: name of the iommu device
- * @iommu_dev: handle of the iommu device
- *
- * This is an omap iommu private data object, which binds an iommu user
- * to its iommu device. This object should be placed at the iommu user's
- * dev_archdata so generic IOMMU API can be used without having to
- * utilize omap-specific plumbing anymore.
- */
-struct omap_iommu_arch_data {
- const char *name;
- struct omap_iommu *iommu_dev;
-};
-
struct iommu_platform_data {
- const char *name;
const char *reset_name;
- int nr_tlb_entries;
-
int (*assert_reset)(struct platform_device *pdev, const char *name);
int (*deassert_reset)(struct platform_device *pdev, const char *name);
};
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
index f16542c77ff7..0e95527edf25 100644
--- a/include/linux/platform_data/itco_wdt.h
+++ b/include/linux/platform_data/itco_wdt.h
@@ -14,6 +14,10 @@
struct itco_wdt_platform_data {
char name[32];
unsigned int version;
+ /* private data to be passed to update_no_reboot_bit API */
+ void *no_reboot_priv;
+ /* pointer for platform specific no reboot update function */
+ int (*update_no_reboot_bit)(void *priv, bool set);
};
#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9b6abe632587..b7803a251044 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -118,6 +118,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ void *data;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a3447932df1f..4c2cba7ec1d4 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -106,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
extern void __pm_relax(struct wakeup_source *ws);
extern void pm_relax(struct device *dev);
-extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec);
-extern void pm_wakeup_event(struct device *dev, unsigned int msec);
+extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard);
+extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard);
#else /* !CONFIG_PM_SLEEP */
@@ -182,9 +182,11 @@ static inline void __pm_relax(struct wakeup_source *ws) {}
static inline void pm_relax(struct device *dev) {}
-static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {}
+static inline void pm_wakeup_ws_event(struct wakeup_source *ws,
+ unsigned int msec, bool hard) {}
-static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
+static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
+ bool hard) {}
#endif /* !CONFIG_PM_SLEEP */
@@ -201,4 +203,19 @@ static inline void wakeup_source_trash(struct wakeup_source *ws)
wakeup_source_drop(ws);
}
+static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+{
+ return pm_wakeup_ws_event(ws, msec, false);
+}
+
+static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
+{
+ return pm_wakeup_dev_event(dev, msec, false);
+}
+
+static inline void pm_wakeup_hard_event(struct device *dev)
+{
+ return pm_wakeup_dev_event(dev, 0, true);
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index e856c2cb0fe8..71ecf3d46aac 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,12 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
-{
- BUG();
- return -EFAULT;
-}
-
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -65,23 +59,6 @@ static inline bool arch_has_pmem_api(void)
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
-/*
- * memcpy_from_pmem - read from persistent memory with error handling
- * @dst: destination buffer
- * @src: source buffer
- * @size: transfer length
- *
- * Returns 0 on success negative error code on failure.
- */
-static inline int memcpy_from_pmem(void *dst, void const *src, size_t size)
-{
- if (arch_has_pmem_api())
- return arch_memcpy_from_pmem(dst, src, size);
- else
- memcpy(dst, src, size);
- return 0;
-}
-
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 571257e0f53d..e10f27468322 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -198,7 +198,7 @@ extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
-void log_buf_kexec_setup(void);
+void log_buf_vmcoreinfo_setup(void);
void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
@@ -246,7 +246,7 @@ static inline u32 log_buf_len_get(void)
return 0;
}
-static inline void log_buf_kexec_setup(void)
+static inline void log_buf_vmcoreinfo_setup(void)
{
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 12cb8bd81d2d..58ab28d81fc2 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -14,6 +14,7 @@ struct inode;
struct proc_ns_operations {
const char *name;
+ const char *real_ns_name;
int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
@@ -26,6 +27,7 @@ extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations pidns_for_children_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
extern const struct proc_ns_operations cgroupns_operations;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index d32f6f1a5225..e5380471c2cd 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -40,6 +40,9 @@ extern int qcom_scm_pas_shutdown(u32 peripheral);
extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
#else
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
@@ -67,5 +70,8 @@ static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
+static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 5544d7b2f2bb..c70ac13a97e6 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -635,7 +635,7 @@ struct qed_common_ops {
* @return 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u8 qid, u16 sb_id);
+ u16 qid, u16 sb_id);
/**
* @brief set_led - Configure LED mode
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
new file mode 100644
index 000000000000..4b766b61e1a0
--- /dev/null
+++ b/include/linux/rcu_node_tree.h
@@ -0,0 +1,99 @@
+/*
+ * RCU node combining tree definitions. These are used to compute
+ * global attributes while avoiding common-case global contention. A key
+ * property that these computations rely on is a tournament-style approach
+ * where only one of the tasks contending a lower level in the tree need
+ * advance to the next higher level. If properly configured, this allows
+ * unlimited scalability while maintaining a constant level of contention
+ * on the root node.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_RCU_NODE_TREE_H
+#define __LINUX_RCU_NODE_TREE_H
+
+/*
+ * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
+ * CONFIG_RCU_FANOUT_LEAF.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this did work well going from three levels to four.
+ * Of course, your mileage may vary.
+ */
+
+#ifdef CONFIG_RCU_FANOUT
+#define RCU_FANOUT CONFIG_RCU_FANOUT
+#else /* #ifdef CONFIG_RCU_FANOUT */
+# ifdef CONFIG_64BIT
+# define RCU_FANOUT 64
+# else
+# define RCU_FANOUT 32
+# endif
+#endif /* #else #ifdef CONFIG_RCU_FANOUT */
+
+#ifdef CONFIG_RCU_FANOUT_LEAF
+#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
+#define RCU_FANOUT_LEAF 16
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
+
+#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
+#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
+#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
+#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT_1
+# define RCU_NUM_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_NODES NUM_RCU_LVL_0
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
+#elif NR_CPUS <= RCU_FANOUT_2
+# define RCU_NUM_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+#elif NR_CPUS <= RCU_FANOUT_3
+# define RCU_NUM_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+#elif NR_CPUS <= RCU_FANOUT_4
+# define RCU_NUM_LVLS 4
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
+
+#endif /* __LINUX_RCU_NODE_TREE_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
new file mode 100644
index 000000000000..ba4d2621d9ca
--- /dev/null
+++ b/include/linux/rcu_segcblist.h
@@ -0,0 +1,90 @@
+/*
+ * RCU segmented callback lists
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
+#define __INCLUDE_LINUX_RCU_SEGCBLIST_H
+
+/* Simple unsegmented callback lists. */
+struct rcu_cblist {
+ struct rcu_head *head;
+ struct rcu_head **tail;
+ long len;
+ long len_lazy;
+};
+
+#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
+
+/* Complicated segmented callback lists. ;-) */
+
+/*
+ * Index values for segments in rcu_segcblist structure.
+ *
+ * The segments are as follows:
+ *
+ * [head, *tails[RCU_DONE_TAIL]):
+ * Callbacks whose grace period has elapsed, and thus can be invoked.
+ * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
+ * Callbacks waiting for the current GP from the current CPU's viewpoint.
+ * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
+ * Callbacks that arrived before the next GP started, again from
+ * the current CPU's viewpoint. These can be handled by the next GP.
+ * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
+ * Callbacks that might have arrived after the next GP started.
+ * There is some uncertainty as to when a given GP starts and
+ * ends, but a CPU knows the exact times if it is the one starting
+ * or ending the GP. Other CPUs know that the previous GP ends
+ * before the next one starts.
+ *
+ * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
+ * empty.
+ *
+ * The ->gp_seq[] array contains the grace-period number at which the
+ * corresponding segment of callbacks will be ready to invoke. A given
+ * element of this array is meaningful only when the corresponding segment
+ * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
+ * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
+ * not yet been assigned a grace-period number).
+ */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_CBLIST_NSEGS 4
+
+struct rcu_segcblist {
+ struct rcu_head *head;
+ struct rcu_head **tails[RCU_CBLIST_NSEGS];
+ unsigned long gp_seq[RCU_CBLIST_NSEGS];
+ long len;
+ long len_lazy;
+};
+
+#define RCU_SEGCBLIST_INITIALIZER(n) \
+{ \
+ .head = NULL, \
+ .tails[RCU_DONE_TAIL] = &n.head, \
+ .tails[RCU_WAIT_TAIL] = &n.head, \
+ .tails[RCU_NEXT_READY_TAIL] = &n.head, \
+ .tails[RCU_NEXT_TAIL] = &n.head, \
+}
+
+#endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4f7a9561b8c4..b1fd8bf85fdc 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -509,7 +509,8 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
{
struct hlist_node *i, *last = NULL;
- for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; i; i = i->next)
last = i;
if (last) {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index dea8f17b2fe3..e1e5d002fdb9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -368,15 +368,20 @@ static inline void rcu_init_nohz(void)
#ifdef CONFIG_TASKS_RCU
#define TASKS_RCU(x) x
extern struct srcu_struct tasks_rcu_exit_srcu;
-#define rcu_note_voluntary_context_switch(t) \
+#define rcu_note_voluntary_context_switch_lite(t) \
do { \
- rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ rcu_all_qs(); \
+ rcu_note_voluntary_context_switch_lite(t); \
+ } while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -1132,11 +1137,11 @@ do { \
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
-#ifdef CONFIG_PPC
+#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
-#else /* #ifdef CONFIG_PPC */
+#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#define smp_mb__after_unlock_lock() do { } while (0)
-#endif /* #else #ifdef CONFIG_PPC */
+#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b452953e21c8..74d9c3a1feee 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -33,6 +33,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
return 0;
}
+static inline bool rcu_eqs_special_set(int cpu)
+{
+ return false; /* Never flag non-existent other CPUs! */
+}
+
static inline unsigned long get_state_synchronize_rcu(void)
{
return 0;
@@ -87,10 +92,11 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func);
}
-static inline void rcu_note_context_switch(void)
-{
- rcu_sched_qs();
-}
+#define rcu_note_context_switch(preempt) \
+ do { \
+ rcu_sched_qs(); \
+ rcu_note_voluntary_context_switch_lite(current); \
+ } while (0)
/*
* Take advantage of the fact that there is only one CPU, which
@@ -212,14 +218,14 @@ static inline void exit_rcu(void)
{
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
extern int rcu_scheduler_active __read_mostly;
void rcu_scheduler_starting(void);
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
static inline void rcu_scheduler_starting(void)
{
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
@@ -237,6 +243,10 @@ static inline bool rcu_is_watching(void)
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+static inline void rcu_request_urgent_qs_task(struct task_struct *t)
+{
+}
+
static inline void rcu_all_qs(void)
{
barrier(); /* Avoid RCU read-side critical sections leaking across. */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 63a4e4cf40a5..0bacb6b2af69 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,7 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-void rcu_note_context_switch(void);
+void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
@@ -41,7 +41,7 @@ void rcu_cpu_stall_reset(void);
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
- rcu_note_context_switch();
+ rcu_note_context_switch(false);
}
void synchronize_rcu_bh(void);
@@ -108,6 +108,7 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
void rcu_all_qs(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 993e7e25a3a5..2b69fc650201 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1269,7 +1269,6 @@ extern struct pid *cad_pid;
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
@@ -1295,9 +1294,6 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
-TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
-TASK_PFA_SET(LMK_WAITING, lmk_waiting)
-
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 9daabe138c99..2b24a6974847 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -191,4 +191,16 @@ static inline void memalloc_nofs_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
}
+static inline unsigned int memalloc_noreclaim_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ return flags;
+}
+
+static inline void memalloc_noreclaim_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC) | flags;
+}
+
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 2cf446704cd4..c06d63b3a583 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -293,7 +293,6 @@ extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 4fc222f8755d..9edec926e9d9 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -10,8 +10,7 @@ struct task_struct;
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
- struct kern_ipc_perm ____cacheline_aligned_in_smp
- sem_perm; /* permissions .. see ipc.h */
+ struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
time_t sem_ctime; /* last change time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head pending_alter; /* pending operations */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 37395b8eb8f1..cda76c6506ca 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -41,12 +41,16 @@ struct serdev_device_ops {
* @nr: Device number on serdev bus.
* @ctrl: serdev controller managing this device.
* @ops: Device operations.
+ * @write_comp Completion used by serdev_device_write() internally
+ * @write_lock Lock to serialize access when writing data
*/
struct serdev_device {
struct device dev;
int nr;
struct serdev_controller *ctrl;
const struct serdev_device_ops *ops;
+ struct completion write_comp;
+ struct mutex write_lock;
};
static inline struct serdev_device *to_serdev_device(struct device *d)
@@ -170,7 +174,7 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
if (!serdev || !serdev->ops->write_wakeup)
return;
- serdev->ops->write_wakeup(ctrl->serdev);
+ serdev->ops->write_wakeup(serdev);
}
static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
@@ -182,7 +186,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
if (!serdev || !serdev->ops->receive_buf)
return -EINVAL;
- return serdev->ops->receive_buf(ctrl->serdev, data, count);
+ return serdev->ops->receive_buf(serdev, data, count);
}
#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
@@ -194,7 +198,8 @@ void serdev_device_set_flow_control(struct serdev_device *, bool);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_write_wakeup(struct serdev_device *);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -240,7 +245,8 @@ static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set,
{
return -ENOTSUPP;
}
-static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count)
+static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
+ size_t count, unsigned long timeout)
{
return -ENODEV;
}
@@ -306,4 +312,11 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
static inline void serdev_tty_port_unregister(struct tty_port *port) {}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *data,
+ size_t count)
+{
+ return serdev_device_write(serdev, data, count, 0);
+}
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 58484fb35cc8..64d892f1e5cd 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -247,6 +247,7 @@ struct uart_port {
unsigned char suspended;
unsigned char irq_wake;
unsigned char unused[2];
+ const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 94ad6eea9550..1f5a16620693 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -390,10 +390,6 @@ int unhandled_signal(struct task_struct *tsk, int sig);
#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
-#define sig_user_defined(t, signr) \
- (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
- ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
-
#define sig_fatal(t, signr) \
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3c37a8c51921..04a7f7993e67 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -28,7 +28,7 @@
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
/*
- * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
@@ -61,8 +61,10 @@
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
+ *
+ * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 49df0a01a2cc..bebdde5dccd6 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - Power management unit definition
@@ -50,6 +50,14 @@
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
+/* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4)
+/* Phy enable bit, common for all phy registers, not only MIPI */
+#define EXYNOS4_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
@@ -342,6 +350,8 @@
#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
+#define EXYNOS5_USBDRD_PHY_CONTROL 0x0704
+#define EXYNOS5_DPTX_PHY_CONTROL 0x0720
#define EXYNOS5_USE_RETENTION BIT(4)
#define EXYNOS5_SYS_WDTRESET (1 << 20)
@@ -495,6 +505,9 @@
#define EXYNOS5420_KFC_CORE_RESET(_nr) \
((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+#define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708
+#define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4)
+#define EXYNOS5420_DPTX_PHY_CONTROL 0x0728
#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
@@ -632,6 +645,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108)
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a598cf3ac70c..167ad8831aaf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -22,7 +22,7 @@
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
+ * Documentation/RCU/ *.txt
*
*/
@@ -32,35 +32,9 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
+#include <linux/rcu_segcblist.h>
-struct srcu_array {
- unsigned long lock_count[2];
- unsigned long unlock_count[2];
-};
-
-struct rcu_batch {
- struct rcu_head *head, **tail;
-};
-
-#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
-
-struct srcu_struct {
- unsigned long completed;
- struct srcu_array __percpu *per_cpu_ref;
- spinlock_t queue_lock; /* protect ->batch_queue, ->running */
- bool running;
- /* callbacks just queued */
- struct rcu_batch batch_queue;
- /* callbacks try to do the first check_zero */
- struct rcu_batch batch_check0;
- /* callbacks done with the first check_zero and the flip */
- struct rcu_batch batch_check1;
- struct rcu_batch batch_done;
- struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-};
+struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -82,46 +56,15 @@ int init_srcu_struct(struct srcu_struct *sp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-void process_srcu(struct work_struct *work);
-
-#define __SRCU_STRUCT_INIT(name) \
- { \
- .completed = -300, \
- .per_cpu_ref = &name##_srcu_array, \
- .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
- .running = false, \
- .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
- .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
- .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
- .batch_done = RCU_BATCH_INIT(name.batch_done), \
- .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
- __SRCU_DEP_MAP_INIT(name) \
- }
-
-/*
- * Define and initialize a srcu struct at build time.
- * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
- *
- * Note that although DEFINE_STATIC_SRCU() hides the name from other
- * files, the per-CPU variable rules nevertheless require that the
- * chosen name be globally unique. These rules also prohibit use of
- * DEFINE_STATIC_SRCU() within a function. If these rules are too
- * restrictive, declare the srcu_struct manually. For example, in
- * each file:
- *
- * static struct srcu_struct my_srcu;
- *
- * Then, before the first use of each my_srcu, manually initialize it:
- *
- * init_srcu_struct(&my_srcu);
- *
- * See include/linux/percpu-defs.h for the rules on per-CPU variables.
- */
-#define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#ifdef CONFIG_TINY_SRCU
+#include <linux/srcutiny.h>
+#elif defined(CONFIG_TREE_SRCU)
+#include <linux/srcutree.h>
+#elif defined(CONFIG_CLASSIC_SRCU)
+#include <linux/srcuclassic.h>
+#else
+#error "Unknown SRCU implementation specified to kernel configuration"
+#endif
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -147,9 +90,6 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-unsigned long srcu_batches_completed(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h
new file mode 100644
index 000000000000..5753f7322262
--- /dev/null
+++ b/include/linux/srcuclassic.h
@@ -0,0 +1,115 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * classic v4.11 variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_CLASSIC_H
+#define _LINUX_SRCU_CLASSIC_H
+
+struct srcu_array {
+ unsigned long lock_count[2];
+ unsigned long unlock_count[2];
+};
+
+struct rcu_batch {
+ struct rcu_head *head, **tail;
+};
+
+#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
+
+struct srcu_struct {
+ unsigned long completed;
+ struct srcu_array __percpu *per_cpu_ref;
+ spinlock_t queue_lock; /* protect ->batch_queue, ->running */
+ bool running;
+ /* callbacks just queued */
+ struct rcu_batch batch_queue;
+ /* callbacks try to do the first check_zero */
+ struct rcu_batch batch_check0;
+ /* callbacks done with the first check_zero and the flip */
+ struct rcu_batch batch_check1;
+ struct rcu_batch batch_done;
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .completed = -300, \
+ .per_cpu_ref = &name##_srcu_array, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+ .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
+ .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
+ .batch_done = RCU_BATCH_INIT(name.batch_done), \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->completed;
+ *gpnum = *completed;
+ if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
+ (*gpnum)++;
+}
+
+#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
new file mode 100644
index 000000000000..42311ee0334f
--- /dev/null
+++ b/include/linux/srcutiny.h
@@ -0,0 +1,93 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tiny variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TINY_H
+#define _LINUX_SRCU_TINY_H
+
+#include <linux/swait.h>
+
+struct srcu_struct {
+ int srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
+ struct swait_queue_head srcu_wq;
+ /* Last srcu_read_unlock() wakes GP. */
+ unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */
+ struct rcu_segcblist srcu_cblist;
+ /* Pending SRCU callbacks. */
+ int srcu_idx; /* Current reader array element. */
+ bool srcu_gp_running; /* GP workqueue running? */
+ bool srcu_gp_waiting; /* GP waiting for readers? */
+ struct work_struct srcu_work; /* For driving grace periods. */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void srcu_drive_gp(struct work_struct *wp);
+
+#define __SRCU_STRUCT_INIT(name) \
+{ \
+ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
+ .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \
+ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
+ __SRCU_DEP_MAP_INIT(name) \
+}
+
+/*
+ * This odd _STATIC_ arrangement is needed for API compatibility with
+ * Tree SRCU, which needs some per-CPU data.
+ */
+#define DEFINE_SRCU(name) \
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_STATIC_SRCU(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+
+void synchronize_srcu(struct srcu_struct *sp);
+
+static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline void srcu_barrier(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
+{
+ return 0;
+}
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->srcu_gp_seq;
+ *gpnum = *completed;
+}
+
+#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
new file mode 100644
index 000000000000..32e86d85fd11
--- /dev/null
+++ b/include/linux/srcutree.h
@@ -0,0 +1,150 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tree variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TREE_H
+#define _LINUX_SRCU_TREE_H
+
+#include <linux/rcu_node_tree.h>
+#include <linux/completion.h>
+
+struct srcu_node;
+struct srcu_struct;
+
+/*
+ * Per-CPU structure feeding into leaf srcu_node, similar in function
+ * to rcu_node.
+ */
+struct srcu_data {
+ /* Read-side state. */
+ unsigned long srcu_lock_count[2]; /* Locks per CPU. */
+ unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+
+ /* Update-side state. */
+ spinlock_t lock ____cacheline_internodealigned_in_smp;
+ struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
+ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ bool srcu_cblist_invoking; /* Invoking these CBs? */
+ struct delayed_work work; /* Context for CB invoking. */
+ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct srcu_node *mynode; /* Leaf srcu_node. */
+ unsigned long grpmask; /* Mask for leaf srcu_node */
+ /* ->srcu_data_have_cbs[]. */
+ int cpu;
+ struct srcu_struct *sp;
+};
+
+/*
+ * Node in SRCU combining tree, similar in function to rcu_data.
+ */
+struct srcu_node {
+ spinlock_t lock;
+ unsigned long srcu_have_cbs[4]; /* GP seq for children */
+ /* having CBs, but only */
+ /* is > ->srcu_gq_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
+ /* have CBs for given GP? */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ struct srcu_node *srcu_parent; /* Next up in tree. */
+ int grplo; /* Least CPU for node. */
+ int grphi; /* Biggest CPU for node. */
+};
+
+/*
+ * Per-SRCU-domain structure, similar in function to rcu_state.
+ */
+struct srcu_struct {
+ struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+ struct srcu_node *level[RCU_NUM_LVLS + 1];
+ /* First node at each level. */
+ struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
+ spinlock_t gp_lock; /* protect ->srcu_cblist */
+ struct mutex srcu_gp_mutex; /* Serialize GP work. */
+ unsigned int srcu_idx; /* Current rdr array element. */
+ unsigned long srcu_gp_seq; /* Grace-period seq #. */
+ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
+ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
+ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
+ struct completion srcu_barrier_completion;
+ /* Awaken barrier rq at end. */
+ atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
+ /* callback for the barrier */
+ /* operation. */
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+/* Values for state variable (bottom bits of ->srcu_gp_seq). */
+#define SRCU_STATE_IDLE 0
+#define SRCU_STATE_SCAN1 1
+#define SRCU_STATE_SCAN2 2
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .sda = &name##_srcu_data, \
+ .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \
+ .srcu_gp_seq_needed = 0 - 1, \
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum, unsigned long *completed);
+
+#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 26b6f6a66f83..537918f8a98e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -114,6 +114,14 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
+static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
+ size_t cnt)
+{
+ memcpy(dst, src, cnt);
+ return 0;
+}
+#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
@@ -135,6 +143,16 @@ static inline int strtobool(const char *s, bool *res)
}
int match_string(const char * const *array, size_t n, const char *string);
+int __sysfs_match_string(const char * const *array, size_t n, const char *s);
+
+/**
+ * sysfs_match_string - matches given string in an array
+ * @_a: array of strings
+ * @_s: string to match with
+ *
+ * Helper for __sysfs_match_string(). Calculates the size of @a automatically.
+ */
+#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d9718378a8be..0b1cf32edfd7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
int (*prepare)(void);
+ void (*wake)(void);
+ void (*sync)(void);
void (*restore)(void);
void (*end)(void);
};
@@ -428,7 +430,8 @@ extern unsigned int pm_wakeup_irq;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
-extern void pm_wakeup_clear(void);
+extern void pm_system_cancel_wakeup(void);
+extern void pm_wakeup_clear(bool reset);
extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
@@ -478,7 +481,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(void) {}
+static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7e82049fec7..80d07816def0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -180,7 +180,6 @@ extern void setup_sysctl_set(struct ctl_table_set *p,
int (*is_seen)(struct ctl_table_set *));
extern void retire_sysctl_set(struct ctl_table_set *set);
-void register_sysctl_root(struct ctl_table_root *root);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
const char *path, struct ctl_table *table);
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
new file mode 100644
index 000000000000..0f175b8f6456
--- /dev/null
+++ b/include/linux/tee_drv.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEE_DRV_H
+#define __TEE_DRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
+#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+
+struct tee_device;
+struct tee_shm;
+struct tee_shm_pool;
+
+/**
+ * struct tee_context - driver specific context on file pointer data
+ * @teedev: pointer to this drivers struct tee_device
+ * @list_shm: List of shared memory object owned by this context
+ * @data: driver specific context data, managed by the driver
+ */
+struct tee_context {
+ struct tee_device *teedev;
+ struct list_head list_shm;
+ void *data;
+};
+
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
+};
+
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_revc: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * struct tee_shm_pool_mem_info - holds information needed to create a shared
+ * memory pool
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ */
+struct tee_shm_pool_mem_info {
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
+ * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
+ * with a dma-buf handle, else driver private memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+#endif /*__TEE_DRV_H*/
diff --git a/include/linux/time.h b/include/linux/time.h
index 23f0f5ce3090..c0543f5f25de 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -151,9 +151,6 @@ static inline bool timespec_inject_offset_valid(const struct timespec *ts)
return true;
}
-#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
-
/* Some architectures do not supply their own clocksource.
* This is mainly the case in architectures that get their
* inter-tick times by reading the counter on their interval
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1017e904c0a3..d07cd2105a6c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -390,7 +390,6 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void console_init(void);
extern void tty_kref_put(struct tty_struct *tty);
extern struct pid *tty_get_pgrp(struct tty_struct *tty);
extern void tty_vhangup_self(void);
@@ -402,8 +401,6 @@ extern struct tty_struct *get_current_tty(void);
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
#else
-static inline void console_init(void)
-{ }
static inline void tty_kref_put(struct tty_struct *tty)
{ }
static inline struct pid *tty_get_pgrp(struct tty_struct *tty)
@@ -478,9 +475,13 @@ extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
extern int is_current_pgrp_orphaned(void);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
+extern void tty_vhangup_session(struct tty_struct *tty);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
+extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+extern void session_clear_tty(struct pid *session);
extern void no_tty(void);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
@@ -528,6 +529,8 @@ extern void tty_ldisc_flush(struct tty_struct *tty);
extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
extern void tty_default_fops(struct file_operations *fops);
extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
@@ -669,7 +672,11 @@ extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
/* n_tty.c */
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+#ifdef CONFIG_TTY
extern void __init n_tty_init(void);
+#else
+static inline void n_tty_init(void) { }
+#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
diff --git a/include/linux/types.h b/include/linux/types.h
index 1e7bd24848fc..258099a4ed82 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -209,7 +209,7 @@ struct ustat {
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
- * The alignment is required to guarantee that bits 0 and 1 of @next will be
+ * The alignment is required to guarantee that bit 0 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index e0cbfb09e60f..201418d5e15c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -203,7 +203,6 @@ static __always_inline void pagefault_disabled_inc(void)
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
- WARN_ON(current->pagefault_disabled < 0);
}
/*
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 32c0e83d6239..3c85c81b0027 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -23,11 +23,13 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
- * @addr: address of the device's memory (phys_addr is used since
- * addr can be logical, virtual, or physical & phys_addr_t
- * should always be large enough to handle any of the
- * address types)
- * @size: size of IO
+ * @addr: address of the device's memory rounded to page
+ * size (phys_addr is used since addr can be
+ * logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of
+ * the address types)
+ * @offs: offset of device memory within the page
+ * @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
* @map: for use by the UIO core only.
@@ -35,6 +37,7 @@ struct uio_map;
struct uio_mem {
const char *name;
phys_addr_t addr;
+ unsigned long offs;
resource_size_t size;
int memtype;
void __iomem *internal_addr;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7e68259360de..cb9fbd54386e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -99,6 +99,76 @@ enum usb_interface_condition {
USB_INTERFACE_UNBINDING,
};
+int __must_check
+usb_find_common_endpoints(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+int __must_check
+usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+static inline int __must_check
+usb_find_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out);
+}
+
+static inline int __must_check
+usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out);
+}
+
/**
* struct usb_interface - what usb device drivers talk to
* @altsetting: array of interface structures, one for each alternate
@@ -248,7 +318,7 @@ void usb_put_intf(struct usb_interface *intf);
* struct usb_interface (which persists only as long as its configuration
* is installed). The altsetting arrays can be accessed through these
* structures at any time, permitting comparison of configurations and
- * providing support for the /proc/bus/usb/devices pseudo-file.
+ * providing support for the /sys/kernel/debug/usb/devices pseudo-file.
*/
struct usb_interface_cache {
unsigned num_altsetting; /* number of alternate settings */
@@ -354,6 +424,7 @@ struct usb_devmap {
*/
struct usb_bus {
struct device *controller; /* host/master side hardware */
+ struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
u8 uses_dma; /* Does the host controller use DMA? */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 40edf6a8533e..a469999a106d 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -437,6 +437,9 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_interface *new_alt);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name);
extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
@@ -453,7 +456,7 @@ extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
@@ -466,7 +469,7 @@ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_USB_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
void usb_init_pool_max(void);
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 5ff9032ee1b4..4031f47629ec 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -18,6 +18,7 @@ int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
struct device_node *usb_of_get_child_node(struct device_node *parent,
int portnum);
+struct device *usb_of_get_companion_dev(struct device *dev);
#else
static inline enum usb_dr_mode
of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
@@ -38,6 +39,10 @@ static inline struct device_node *usb_of_get_child_node
{
return NULL;
}
+static inline struct device *usb_of_get_companion_dev(struct device *dev)
+{
+ return NULL;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index 7a0350535cb1..a0a8f878503c 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -21,21 +21,6 @@
#include <linux/mutex.h>
#include <linux/errno.h>
-#undef VERBOSE
-
-#ifdef VERBOSE
-#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
- __func__, ## args)
-#else
-#define VDBG(stuff...) do {} while (0)
-#endif
-
-#ifdef VERBOSE
-#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__)
-#else
-#define MPC_LOC do {} while (0)
-#endif
-
#define PROTO_UNDEF (0)
#define PROTO_HOST (1)
#define PROTO_GADGET (2)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 704a1ab8240c..e2f0ab07eea5 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -20,7 +20,7 @@
#include <linux/kfifo.h>
/* The maximum number of ports one device can grab at once */
-#define MAX_NUM_PORTS 8
+#define MAX_NUM_PORTS 16
/* parity check flag */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
@@ -159,10 +159,10 @@ struct usb_serial {
unsigned char minors_reserved:1;
unsigned char num_ports;
unsigned char num_port_pointers;
- char num_interrupt_in;
- char num_interrupt_out;
- char num_bulk_in;
- char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
struct usb_serial_port *port[MAX_NUM_PORTS];
struct kref kref;
struct mutex disc_mutex;
@@ -181,6 +181,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
serial->private = data;
}
+struct usb_serial_endpoints {
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS];
+};
+
/**
* usb_serial_driver - describes a usb serial driver
* @description: pointer to a string that describes this driver. This string
@@ -188,12 +199,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* @id_table: pointer to a list of usb_device_id structures that define all
* of the devices this structure can support.
* @num_ports: the number of different ports this device will have.
+ * @num_bulk_in: minimum number of bulk-in endpoints
+ * @num_bulk_out: minimum number of bulk-out endpoints
+ * @num_interrupt_in: minimum number of interrupt-in endpoints
+ * @num_interrupt_out: minimum number of interrupt-out endpoints
* @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
* (0 = end-point size)
* @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
* @calc_num_ports: pointer to a function to determine how many ports this
- * device has dynamically. It will be called after the probe()
- * callback is called, but before attach()
+ * device has dynamically. It can also be used to verify the number of
+ * endpoints or to modify the port-endpoint mapping. It will be called
+ * after the probe() callback is called, but before attach().
* @probe: pointer to the driver's probe function.
* This will be called when the device is inserted into the system,
* but before the device has been fully initialized by the usb_serial
@@ -227,19 +243,26 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
struct usb_serial_driver {
const char *description;
const struct usb_device_id *id_table;
- char num_ports;
struct list_head driver_list;
struct device_driver driver;
struct usb_driver *usb_driver;
struct usb_dynids dynids;
+ unsigned char num_ports;
+
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+
size_t bulk_in_size;
size_t bulk_out_size;
int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
int (*attach)(struct usb_serial *serial);
- int (*calc_num_ports) (struct usb_serial *serial);
+ int (*calc_num_ports)(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
void (*disconnect)(struct usb_serial *serial);
void (*release)(struct usb_serial *serial);
@@ -356,7 +379,6 @@ extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
extern int usb_serial_bus_register(struct usb_serial_driver *device);
extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
-extern struct usb_serial_driver usb_serial_generic_device;
extern struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
new file mode 100644
index 000000000000..ec78204964ab
--- /dev/null
+++ b/include/linux/usb/typec.h
@@ -0,0 +1,243 @@
+
+#ifndef __LINUX_USB_TYPEC_H
+#define __LINUX_USB_TYPEC_H
+
+#include <linux/types.h>
+
+/* XXX: Once we have a header for USB Power Delivery, this belongs there */
+#define ALTMODE_MAX_MODES 6
+
+/* USB Type-C Specification releases */
+#define USB_TYPEC_REV_1_0 0x100 /* 1.0 */
+#define USB_TYPEC_REV_1_1 0x110 /* 1.1 */
+#define USB_TYPEC_REV_1_2 0x120 /* 1.2 */
+
+struct typec_altmode;
+struct typec_partner;
+struct typec_cable;
+struct typec_plug;
+struct typec_port;
+
+struct fwnode_handle;
+
+enum typec_port_type {
+ TYPEC_PORT_DFP,
+ TYPEC_PORT_UFP,
+ TYPEC_PORT_DRP,
+};
+
+enum typec_plug_type {
+ USB_PLUG_NONE,
+ USB_PLUG_TYPE_A,
+ USB_PLUG_TYPE_B,
+ USB_PLUG_TYPE_C,
+ USB_PLUG_CAPTIVE,
+};
+
+enum typec_data_role {
+ TYPEC_DEVICE,
+ TYPEC_HOST,
+};
+
+enum typec_role {
+ TYPEC_SINK,
+ TYPEC_SOURCE,
+};
+
+enum typec_pwr_opmode {
+ TYPEC_PWR_MODE_USB,
+ TYPEC_PWR_MODE_1_5A,
+ TYPEC_PWR_MODE_3_0A,
+ TYPEC_PWR_MODE_PD,
+};
+
+enum typec_accessory {
+ TYPEC_ACCESSORY_NONE,
+ TYPEC_ACCESSORY_AUDIO,
+ TYPEC_ACCESSORY_DEBUG,
+};
+
+#define TYPEC_MAX_ACCESSORY 3
+
+/*
+ * struct usb_pd_identity - USB Power Delivery identity data
+ * @id_header: ID Header VDO
+ * @cert_stat: Cert Stat VDO
+ * @product: Product VDO
+ *
+ * USB power delivery Discover Identity command response data.
+ *
+ * REVISIT: This is USB Power Delivery specific information, so this structure
+ * probable belongs to USB Power Delivery header file once we have them.
+ */
+struct usb_pd_identity {
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+};
+
+int typec_partner_set_identity(struct typec_partner *partner);
+int typec_cable_set_identity(struct typec_cable *cable);
+
+/*
+ * struct typec_mode_desc - Individual Mode of an Alternate Mode
+ * @index: Index of the Mode within the SVID
+ * @vdo: VDO returned by Discover Modes USB PD command
+ * @desc: Optional human readable description of the mode
+ * @roles: Only for ports. DRP if the mode is available in both roles
+ *
+ * Description of a mode of an Alternate Mode which a connector, cable plug or
+ * partner supports. Every mode will have it's own sysfs group. The details are
+ * the VDO returned by discover modes command, description for the mode and
+ * active flag telling has the mode being entered or not.
+ */
+struct typec_mode_desc {
+ int index;
+ u32 vdo;
+ char *desc;
+ /* Only used with ports */
+ enum typec_port_type roles;
+};
+
+/*
+ * struct typec_altmode_desc - USB Type-C Alternate Mode Descriptor
+ * @svid: Standard or Vendor ID
+ * @n_modes: Number of modes
+ * @modes: Array of modes supported by the Alternate Mode
+ *
+ * Representation of an Alternate Mode that has SVID assigned by USB-IF. The
+ * array of modes will list the modes of a particular SVID that are supported by
+ * a connector, partner of a cable plug.
+ */
+struct typec_altmode_desc {
+ u16 svid;
+ int n_modes;
+ struct typec_mode_desc modes[ALTMODE_MAX_MODES];
+};
+
+struct typec_altmode
+*typec_partner_register_altmode(struct typec_partner *partner,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_plug_register_altmode(struct typec_plug *plug,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_port_register_altmode(struct typec_port *port,
+ struct typec_altmode_desc *desc);
+void typec_unregister_altmode(struct typec_altmode *altmode);
+
+struct typec_port *typec_altmode2port(struct typec_altmode *alt);
+
+void typec_altmode_update_active(struct typec_altmode *alt, int mode,
+ bool active);
+
+enum typec_plug_index {
+ TYPEC_PLUG_SOP_P,
+ TYPEC_PLUG_SOP_PP,
+};
+
+/*
+ * struct typec_plug_desc - USB Type-C Cable Plug Descriptor
+ * @index: SOP Prime for the plug connected to DFP and SOP Double Prime for the
+ * plug connected to UFP
+ *
+ * Represents USB Type-C Cable Plug.
+ */
+struct typec_plug_desc {
+ enum typec_plug_index index;
+};
+
+/*
+ * struct typec_cable_desc - USB Type-C Cable Descriptor
+ * @type: The plug type from USB PD Cable VDO
+ * @active: Is the cable active or passive
+ * @identity: Result of Discover Identity command
+ *
+ * Represents USB Type-C Cable attached to USB Type-C port.
+ */
+struct typec_cable_desc {
+ enum typec_plug_type type;
+ unsigned int active:1;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_partner_desc - USB Type-C Partner Descriptor
+ * @usb_pd: USB Power Delivery support
+ * @accessory: Audio, Debug or none.
+ * @identity: Discover Identity command data
+ *
+ * Details about a partner that is attached to USB Type-C port. If @identity
+ * member exists when partner is registered, a directory named "identity" is
+ * created to sysfs for the partner device.
+ */
+struct typec_partner_desc {
+ unsigned int usb_pd:1;
+ enum typec_accessory accessory;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_capability - USB Type-C Port Capabilities
+ * @role: DFP (Host-only), UFP (Device-only) or DRP (Dual Role)
+ * @revision: USB Type-C Specification release. Binary coded decimal
+ * @pd_revision: USB Power Delivery Specification revision if supported
+ * @prefer_role: Initial role preference
+ * @accessory: Supported Accessory Modes
+ * @fwnode: Optional fwnode of the port
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Set VCONN Role
+ * @activate_mode: Enter/exit given Alternate Mode
+ *
+ * Static capabilities of a single USB Type-C port.
+ */
+struct typec_capability {
+ enum typec_port_type type;
+ u16 revision; /* 0120H = "1.2" */
+ u16 pd_revision; /* 0300H = "3.0" */
+ int prefer_role;
+ enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+
+ struct fwnode_handle *fwnode;
+
+ int (*try_role)(const struct typec_capability *,
+ int role);
+
+ int (*dr_set)(const struct typec_capability *,
+ enum typec_data_role);
+ int (*pr_set)(const struct typec_capability *,
+ enum typec_role);
+ int (*vconn_set)(const struct typec_capability *,
+ enum typec_role);
+
+ int (*activate_mode)(const struct typec_capability *,
+ int mode, int activate);
+};
+
+/* Specific to try_role(). Indicates the user want's to clear the preference. */
+#define TYPEC_NO_PREFERRED_ROLE (-1)
+
+struct typec_port *typec_register_port(struct device *parent,
+ const struct typec_capability *cap);
+void typec_unregister_port(struct typec_port *port);
+
+struct typec_partner *typec_register_partner(struct typec_port *port,
+ struct typec_partner_desc *desc);
+void typec_unregister_partner(struct typec_partner *partner);
+
+struct typec_cable *typec_register_cable(struct typec_port *port,
+ struct typec_cable_desc *desc);
+void typec_unregister_cable(struct typec_cable *cable);
+
+struct typec_plug *typec_register_plug(struct typec_cable *cable,
+ struct typec_plug_desc *desc);
+void typec_unregister_plug(struct typec_plug *plug);
+
+void typec_set_data_role(struct typec_port *port, enum typec_data_role role);
+void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
+void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
+void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
+
+#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d68edffbf142..0328ce003992 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
+#include <asm/pgtable.h> /* PAGE_KERNEL */
#include <linux/rbtree.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -80,6 +81,25 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
+#ifndef CONFIG_MMU
+extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
+#else
+extern void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+ int node, const void *caller);
+
+/*
+ * We really want to have this inlined due to caller tracking. This
+ * function is used by the highlevel vmalloc apis and so we want to track
+ * their callers and inlining will achieve that.
+ */
+static inline void *__vmalloc_node_flags(unsigned long size,
+ int node, gfp_t flags)
+{
+ return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
+ node, __builtin_return_address(0));
+}
+#endif
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ec5e8bf6118e..25874da3f2e1 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -92,7 +92,7 @@ extern struct bus_type vme_bus_type;
#define VME_SLOT_ALL -2
/**
- * Structure representing a VME device
+ * struct vme_dev - Structure representing a VME device
* @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
@@ -107,6 +107,16 @@ struct vme_dev {
struct list_head bridge_list;
};
+/**
+ * struct vme_driver - Structure representing a VME driver
+ * @name: Driver name, should be unique among VME drivers and usually the same
+ * as the module name.
+ * @match: Callback used to determine whether probe should be run.
+ * @probe: Callback for device binding, called when new device is detected.
+ * @remove: Callback, called on device removal.
+ * @driver: Underlying generic device driver structure.
+ * @devices: List of VME devices (struct vme_dev) associated with this driver.
+ */
struct vme_driver {
const char *name;
int (*match)(struct vme_dev *);
diff --git a/include/media/cec-edid.h b/include/media/cec-edid.h
deleted file mode 100644
index bdf731ecba1a..000000000000
--- a/include/media/cec-edid.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * cec-edid - HDMI Consumer Electronics Control & EDID helpers
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MEDIA_CEC_EDID_H
-#define _MEDIA_CEC_EDID_H
-
-#include <linux/types.h>
-
-#define CEC_PHYS_ADDR_INVALID 0xffff
-#define cec_phys_addr_exp(pa) \
- ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
-
-/**
- * cec_get_edid_phys_addr() - find and return the physical address
- *
- * @edid: pointer to the EDID data
- * @size: size in bytes of the EDID data
- * @offset: If not %NULL then the location of the physical address
- * bytes in the EDID will be returned here. This is set to 0
- * if there is no physical address found.
- *
- * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
- */
-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
- unsigned int *offset);
-
-/**
- * cec_set_edid_phys_addr() - find and set the physical address
- *
- * @edid: pointer to the EDID data
- * @size: size in bytes of the EDID data
- * @phys_addr: the new physical address
- *
- * This function finds the location of the physical address in the EDID
- * and fills in the given physical address and updates the checksum
- * at the end of the EDID block. It does nothing if the EDID doesn't
- * contain a physical address.
- */
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
-
-/**
- * cec_phys_addr_for_input() - calculate the PA for an input
- *
- * @phys_addr: the physical address of the parent
- * @input: the number of the input port, must be between 1 and 15
- *
- * This function calculates a new physical address based on the input
- * port number. For example:
- *
- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
- *
- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
- *
- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
- *
- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
- *
- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
- */
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
-
-/**
- * cec_phys_addr_validate() - validate a physical address from an EDID
- *
- * @phys_addr: the physical address to validate
- * @parent: if not %NULL, then this is filled with the parents PA.
- * @port: if not %NULL, then this is filled with the input port.
- *
- * This validates a physical address as read from an EDID. If the
- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
- * then it will return -EINVAL.
- *
- * The parent PA is passed into %parent and the input port is passed into
- * %port. For example:
- *
- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
- *
- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
- *
- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
- *
- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
- *
- * Return: 0 if the PA is valid, -EINVAL if not.
- */
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-
-#endif /* _MEDIA_CEC_EDID_H */
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
new file mode 100644
index 000000000000..eb50ce54b759
--- /dev/null
+++ b/include/media/cec-notifier.h
@@ -0,0 +1,111 @@
+/*
+ * cec-notifier.h - notify CEC drivers of physical address changes
+ *
+ * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_NOTIFIER_H
+#define LINUX_CEC_NOTIFIER_H
+
+#include <linux/types.h>
+#include <media/cec.h>
+
+struct device;
+struct edid;
+struct cec_adapter;
+struct cec_notifier;
+
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+
+/**
+ * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * @dev: device that sends the events.
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+struct cec_notifier *cec_notifier_get(struct device *dev);
+
+/**
+ * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
+ * @n: notifier
+ */
+void cec_notifier_put(struct cec_notifier *n);
+
+/**
+ * cec_notifier_set_phys_addr - set a new physical address.
+ * @n: the CEC notifier
+ * @pa: the CEC physical address
+ *
+ * Set a new CEC physical address.
+ */
+void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa);
+
+/**
+ * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID.
+ * @n: the CEC notifier
+ * @edid: the struct edid pointer
+ *
+ * Parses the EDID to obtain the new CEC physical address and set it.
+ */
+void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+ const struct edid *edid);
+
+/**
+ * cec_notifier_register - register a callback with the notifier
+ * @n: the CEC notifier
+ * @adap: the CEC adapter, passed as argument to the callback function
+ * @callback: the callback function
+ */
+void cec_notifier_register(struct cec_notifier *n,
+ struct cec_adapter *adap,
+ void (*callback)(struct cec_adapter *adap, u16 pa));
+
+/**
+ * cec_notifier_unregister - unregister the callback from the notifier.
+ * @n: the CEC notifier
+ */
+void cec_notifier_unregister(struct cec_notifier *n);
+
+#else
+static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+ /* A non-NULL pointer is expected on success */
+ return (struct cec_notifier *)0xdeadfeed;
+}
+
+static inline void cec_notifier_put(struct cec_notifier *n)
+{
+}
+
+static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
+{
+}
+
+static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+ const struct edid *edid)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index 96a0aa770d61..b8eb895731d5 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -29,7 +29,7 @@
#include <linux/timer.h>
#include <linux/cec-funcs.h>
#include <media/rc-core.h>
-#include <media/cec-edid.h>
+#include <media/cec-notifier.h>
/**
* struct cec_devnode - cec device node
@@ -173,6 +173,10 @@ struct cec_adapter {
bool passthrough;
struct cec_log_addrs log_addrs;
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+ struct cec_notifier *notifier;
+#endif
+
struct dentry *cec_dir;
struct dentry *status_file;
@@ -184,6 +188,11 @@ struct cec_adapter {
char input_drv[32];
};
+static inline void *cec_get_drvdata(const struct cec_adapter *adap)
+{
+ return adap->priv;
+}
+
static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr)
{
return adap->log_addrs.log_addr_mask & (1 << log_addr);
@@ -194,7 +203,10 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
return adap->phys_addr == 0;
}
-#if IS_ENABLED(CONFIG_MEDIA_CEC_SUPPORT)
+#define cec_phys_addr_exp(pa) \
+ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
+
+#if IS_ENABLED(CONFIG_CEC_CORE)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps, u8 available_las);
int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
@@ -213,6 +225,86 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
+/**
+ * cec_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset);
+
+/**
+ * cec_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+
+/**
+ * cec_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
+
+/**
+ * cec_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+void cec_register_cec_notifier(struct cec_adapter *adap,
+ struct cec_notifier *notifier);
+#endif
+
#else
static inline int cec_register_adapter(struct cec_adapter *adap,
@@ -234,6 +326,29 @@ static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
{
}
+static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ if (offset)
+ *offset = 0;
+ return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
+ u16 phys_addr)
+{
+}
+
+static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ return 0;
+}
+
#endif
#endif /* _MEDIA_CEC_H */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index c49c306cba61..385597da20dc 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -53,6 +53,7 @@ struct vpif_display_config {
int (*set_clock)(int, int);
struct vpif_subdev_info *subdevinfo;
int subdev_count;
+ int i2c_adapter_id;
struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
const char *card_name;
struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index a704749280d2..1a815a572fa1 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -27,7 +27,8 @@
* @RC_TYPE_NECX: Extended NEC protocol
* @RC_TYPE_NEC32: NEC 32 bit protocol
* @RC_TYPE_SANYO: Sanyo protocol
- * @RC_TYPE_MCE_KBD: RC6-ish MCE keyboard/mouse
+ * @RC_TYPE_MCIR2_KBD: RC6-ish MCE keyboard
+ * @RC_TYPE_MCIR2_MSE: RC6-ish MCE mouse
* @RC_TYPE_RC6_0: Philips RC6-0-16 protocol
* @RC_TYPE_RC6_6A_20: Philips RC6-6A-20 protocol
* @RC_TYPE_RC6_6A_24: Philips RC6-6A-24 protocol
@@ -51,48 +52,51 @@ enum rc_type {
RC_TYPE_NECX = 10,
RC_TYPE_NEC32 = 11,
RC_TYPE_SANYO = 12,
- RC_TYPE_MCE_KBD = 13,
- RC_TYPE_RC6_0 = 14,
- RC_TYPE_RC6_6A_20 = 15,
- RC_TYPE_RC6_6A_24 = 16,
- RC_TYPE_RC6_6A_32 = 17,
- RC_TYPE_RC6_MCE = 18,
- RC_TYPE_SHARP = 19,
- RC_TYPE_XMP = 20,
- RC_TYPE_CEC = 21,
+ RC_TYPE_MCIR2_KBD = 13,
+ RC_TYPE_MCIR2_MSE = 14,
+ RC_TYPE_RC6_0 = 15,
+ RC_TYPE_RC6_6A_20 = 16,
+ RC_TYPE_RC6_6A_24 = 17,
+ RC_TYPE_RC6_6A_32 = 18,
+ RC_TYPE_RC6_MCE = 19,
+ RC_TYPE_SHARP = 20,
+ RC_TYPE_XMP = 21,
+ RC_TYPE_CEC = 22,
};
#define RC_BIT_NONE 0ULL
-#define RC_BIT_UNKNOWN (1ULL << RC_TYPE_UNKNOWN)
-#define RC_BIT_OTHER (1ULL << RC_TYPE_OTHER)
-#define RC_BIT_RC5 (1ULL << RC_TYPE_RC5)
-#define RC_BIT_RC5X_20 (1ULL << RC_TYPE_RC5X_20)
-#define RC_BIT_RC5_SZ (1ULL << RC_TYPE_RC5_SZ)
-#define RC_BIT_JVC (1ULL << RC_TYPE_JVC)
-#define RC_BIT_SONY12 (1ULL << RC_TYPE_SONY12)
-#define RC_BIT_SONY15 (1ULL << RC_TYPE_SONY15)
-#define RC_BIT_SONY20 (1ULL << RC_TYPE_SONY20)
-#define RC_BIT_NEC (1ULL << RC_TYPE_NEC)
-#define RC_BIT_NECX (1ULL << RC_TYPE_NECX)
-#define RC_BIT_NEC32 (1ULL << RC_TYPE_NEC32)
-#define RC_BIT_SANYO (1ULL << RC_TYPE_SANYO)
-#define RC_BIT_MCE_KBD (1ULL << RC_TYPE_MCE_KBD)
-#define RC_BIT_RC6_0 (1ULL << RC_TYPE_RC6_0)
-#define RC_BIT_RC6_6A_20 (1ULL << RC_TYPE_RC6_6A_20)
-#define RC_BIT_RC6_6A_24 (1ULL << RC_TYPE_RC6_6A_24)
-#define RC_BIT_RC6_6A_32 (1ULL << RC_TYPE_RC6_6A_32)
-#define RC_BIT_RC6_MCE (1ULL << RC_TYPE_RC6_MCE)
-#define RC_BIT_SHARP (1ULL << RC_TYPE_SHARP)
-#define RC_BIT_XMP (1ULL << RC_TYPE_XMP)
-#define RC_BIT_CEC (1ULL << RC_TYPE_CEC)
+#define RC_BIT_UNKNOWN BIT_ULL(RC_TYPE_UNKNOWN)
+#define RC_BIT_OTHER BIT_ULL(RC_TYPE_OTHER)
+#define RC_BIT_RC5 BIT_ULL(RC_TYPE_RC5)
+#define RC_BIT_RC5X_20 BIT_ULL(RC_TYPE_RC5X_20)
+#define RC_BIT_RC5_SZ BIT_ULL(RC_TYPE_RC5_SZ)
+#define RC_BIT_JVC BIT_ULL(RC_TYPE_JVC)
+#define RC_BIT_SONY12 BIT_ULL(RC_TYPE_SONY12)
+#define RC_BIT_SONY15 BIT_ULL(RC_TYPE_SONY15)
+#define RC_BIT_SONY20 BIT_ULL(RC_TYPE_SONY20)
+#define RC_BIT_NEC BIT_ULL(RC_TYPE_NEC)
+#define RC_BIT_NECX BIT_ULL(RC_TYPE_NECX)
+#define RC_BIT_NEC32 BIT_ULL(RC_TYPE_NEC32)
+#define RC_BIT_SANYO BIT_ULL(RC_TYPE_SANYO)
+#define RC_BIT_MCIR2_KBD BIT_ULL(RC_TYPE_MCIR2_KBD)
+#define RC_BIT_MCIR2_MSE BIT_ULL(RC_TYPE_MCIR2_MSE)
+#define RC_BIT_RC6_0 BIT_ULL(RC_TYPE_RC6_0)
+#define RC_BIT_RC6_6A_20 BIT_ULL(RC_TYPE_RC6_6A_20)
+#define RC_BIT_RC6_6A_24 BIT_ULL(RC_TYPE_RC6_6A_24)
+#define RC_BIT_RC6_6A_32 BIT_ULL(RC_TYPE_RC6_6A_32)
+#define RC_BIT_RC6_MCE BIT_ULL(RC_TYPE_RC6_MCE)
+#define RC_BIT_SHARP BIT_ULL(RC_TYPE_SHARP)
+#define RC_BIT_XMP BIT_ULL(RC_TYPE_XMP)
+#define RC_BIT_CEC BIT_ULL(RC_TYPE_CEC)
#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \
RC_BIT_RC5 | RC_BIT_RC5X_20 | RC_BIT_RC5_SZ | \
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
- RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_SANYO | \
+ RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
+ RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
RC_BIT_XMP | RC_BIT_CEC)
/* All rc protocols for which we have decoders */
@@ -101,8 +105,8 @@ enum rc_type {
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
- RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_SANYO | RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
+ RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
RC_BIT_XMP)
@@ -111,7 +115,7 @@ enum rc_type {
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | \
+ RC_BIT_SANYO | RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | \
RC_BIT_SHARP)
@@ -255,7 +259,6 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u"
#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051"
-#define RC_MAP_LIRC "rc-lirc"
#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
#define RC_MAP_MEDION_X10 "rc-medion-x10"
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 1a15c3e4efd3..4d8cb0796bc6 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -17,7 +17,6 @@
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/videodev2.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
@@ -55,10 +54,7 @@ struct soc_camera_device {
/* Asynchronous subdevice management */
struct soc_camera_async_client *sasc;
/* video buffer queue */
- union {
- struct videobuf_queue vb_vidq;
- struct vb2_queue vb2_vidq;
- };
+ struct vb2_queue vb2_vidq;
};
/* Host supports programmable stride */
@@ -114,11 +110,8 @@ struct soc_camera_host_ops {
int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
- void (*init_videobuf)(struct videobuf_queue *,
- struct soc_camera_device *);
int (*init_videobuf2)(struct vb2_queue *,
struct soc_camera_device *);
- int (*reqbufs)(struct soc_camera_device *, struct v4l2_requestbuffers *);
int (*querycap)(struct soc_camera_host *, struct v4l2_capability *);
int (*set_bus_param)(struct soc_camera_device *);
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
@@ -396,11 +389,6 @@ static inline struct soc_camera_device *soc_camera_from_vb2q(const struct vb2_qu
return container_of(vq, struct soc_camera_device, vb2_vidq);
}
-static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobuf_queue *vq)
-{
- return container_of(vq, struct soc_camera_device, vb_vidq);
-}
-
static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
{
return (icd->iface << 8) | (icd->devnum + 1);
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index c56501ee0484..630bcf3d8885 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -94,13 +94,12 @@ struct tveeprom {
* of the eeprom previously filled at
* @eeprom_data field.
*
- * @c: I2C client struct
* @tvee: Struct to where the eeprom parsed data will be filled;
* @eeprom_data: Array with the contents of the eeprom_data. It should
* contain 256 bytes filled with the contents of the
* eeprom read from the Hauppauge device.
*/
-void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
+void tveeprom_hauppauge_analog(struct tveeprom *tvee,
unsigned char *eeprom_data);
/**
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 6cd94e5ee113..bd5312118013 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -44,6 +44,9 @@ struct v4l2_fh;
* @vidioc_enum_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
* for Software Defined Radio output
+ * @vidioc_enum_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
+ * for metadata capture
* @vidioc_g_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -74,6 +77,8 @@ struct v4l2_fh;
* @vidioc_g_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_g_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_s_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -104,6 +109,8 @@ struct v4l2_fh;
* @vidioc_s_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_s_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_try_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -136,6 +143,8 @@ struct v4l2_fh;
* @vidioc_try_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_try_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_reqbufs: pointer to the function that implements
* :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl
* @vidioc_querybuf: pointer to the function that implements
@@ -306,6 +315,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
@@ -332,6 +343,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
@@ -358,6 +371,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
@@ -384,6 +399,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs)(struct file *file, void *fh,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ac5898a55fd9..cb97c224be73 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -305,16 +305,16 @@ struct vb2_buffer {
* buffer in \*num_planes, the size of each plane should be
* set in the sizes\[\] array and optional per-plane
* allocator specific device in the alloc_devs\[\] array.
- * When called from VIDIOC_REQBUFS,() \*num_planes == 0,
+ * When called from VIDIOC_REQBUFS(), \*num_planes == 0,
* the driver has to use the currently configured format to
* determine the plane sizes and \*num_buffers is the total
* number of buffers that are being allocated. When called
- * from VIDIOC_CREATE_BUFS,() \*num_planes != 0 and it
+ * from VIDIOC_CREATE_BUFS(), \*num_planes != 0 and it
* describes the requested number of planes and sizes\[\]
- * contains the requested plane sizes. If either
- * \*num_planes or the requested sizes are invalid callback
- * must return %-EINVAL. In this case \*num_buffers are
- * being allocated additionally to q->num_buffers.
+ * contains the requested plane sizes. In this case
+ * \*num_buffers are being allocated additionally to
+ * q->num_buffers. If either \*num_planes or the requested
+ * sizes are invalid callback must return %-EINVAL.
* @wait_prepare: release any locks taken while calling vb2 functions;
* it is called before an ioctl needs to wait for a new
* buffer to arrive; required to avoid a deadlock in
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 36565c7acb54..a6ed091b79ce 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -16,6 +16,7 @@
#include <media/videobuf2-v4l2.h>
#include <linux/mm.h>
+#include <linux/refcount.h>
/**
* struct vb2_vmarea_handler - common vma refcount tracking handler
@@ -25,7 +26,7 @@
* @arg: argument for @put callback
*/
struct vb2_vmarea_handler {
- atomic_t *refcount;
+ refcount_t *refcount;
void (*put)(void *arg);
void *arg;
};
diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h
new file mode 100644
index 000000000000..23f61850f363
--- /dev/null
+++ b/include/misc/charlcd.h
@@ -0,0 +1,42 @@
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct charlcd {
+ const struct charlcd_ops *ops;
+ const unsigned char *char_conv; /* Optional */
+
+ int ifwidth; /* 4-bit or 8-bit (default) */
+ int height;
+ int width;
+ int bwidth; /* Default set by charlcd_alloc() */
+ int hwidth; /* Default set by charlcd_alloc() */
+
+ void *drvdata; /* Set by charlcd_alloc() */
+};
+
+struct charlcd_ops {
+ /* Required */
+ void (*write_cmd)(struct charlcd *lcd, int cmd);
+ void (*write_data)(struct charlcd *lcd, int data);
+
+ /* Optional */
+ void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
+ void (*clear_fast)(struct charlcd *lcd);
+ void (*backlight)(struct charlcd *lcd, int on);
+};
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+
+int charlcd_register(struct charlcd *lcd);
+int charlcd_unregister(struct charlcd *lcd);
+
+void charlcd_poke(struct charlcd *lcd);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 2452e6449532..b43a4eec3cec 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -20,6 +20,8 @@
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+#define ADDRCONF_NOTIFY_PRIORITY 0
+
#include <linux/in.h>
#include <linux/in6.h>
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6e90f1a4950f..b083e6cbae8c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1013,9 +1013,9 @@ enum rate_info_flags {
* @RATE_INFO_BW_160: 160 MHz bandwidth
*/
enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
RATE_INFO_BW_5,
RATE_INFO_BW_10,
- RATE_INFO_BW_20,
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
@@ -1666,7 +1666,7 @@ struct cfg80211_bss_select_adjust {
* (others are filtered out).
* If ommited, all results are passed.
* @n_match_sets: number of match sets
- * @results_wk: worker for processing results notification.
+ * @report_results: indicates that results were reported for this request
* @wiphy: the wiphy this was for
* @dev: the interface
* @scan_start: start time of the scheduled scan
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 9dc2c182a263..f5e625f53367 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -84,6 +84,7 @@ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int ifindex, struct flowi6 *fl6, int flags);
+void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 4d05a9443344..76ed24a201eb 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1141,7 +1141,6 @@ enum mac80211_rx_flags {
* enum mac80211_rx_encoding_flags - MCS & bandwidth flags
*
* @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame
- * @RX_ENC_FLAG_40MHZ: HT40 (40 MHz) was used
* @RX_ENC_FLAG_SHORT_GI: Short guard interval was used
* @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
* if the driver fills this value it should add
@@ -1153,7 +1152,6 @@ enum mac80211_rx_flags {
*/
enum mac80211_rx_encoding_flags {
RX_ENC_FLAG_SHORTPRE = BIT(0),
- RX_ENC_FLAG_40MHZ = BIT(1),
RX_ENC_FLAG_SHORT_GI = BIT(2),
RX_ENC_FLAG_HT_GF = BIT(3),
RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index fe236b3429f0..b94006f6fbdd 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -6,10 +6,12 @@
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
-u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport, u32 *tsoff);
-u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport, u32 *tsoff);
+u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
+u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/sock.h b/include/net/sock.h
index 66349e49d468..f33e3d134e0b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -995,7 +995,7 @@ struct smc_hashinfo;
struct module;
/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
* un-modified. Special care is taken when initializing object to zero.
*/
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 270e5cc43c99..38a7427ae902 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -470,7 +470,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst, u32 tsoff);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -1234,10 +1234,12 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta);
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+ if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
+ ca_ops->cong_control)
return;
delta = tcp_time_stamp - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
@@ -1822,7 +1824,8 @@ struct tcp_request_sock_ops {
#endif
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req);
- __u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
+ u32 (*init_seq)(const struct sk_buff *skb);
+ u32 (*init_ts_off)(const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 1d8141a88d3c..be6472e5b06b 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -324,6 +324,7 @@ struct rvt_qp {
u8 r_state; /* opcode of last packet received */
u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */
+ u8 r_adefered; /* defered ack count */
struct list_head rspwait; /* link for waiting to respond */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index da5033dd8cbc..2109844be53d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -23,6 +23,7 @@
#include <linux/timer.h>
#include <linux/if.h>
#include <linux/percpu.h>
+#include <linux/refcount.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
@@ -321,7 +322,7 @@ struct fc_seq_els_data {
*/
struct fc_fcp_pkt {
spinlock_t scsi_pkt_lock;
- atomic_t ref_cnt;
+ refcount_t ref_cnt;
/* SCSI command and data transfer information */
u32 data_len;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 583875ea136a..c9bd935f4fd1 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -29,6 +29,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
+#include <linux/refcount.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -139,7 +140,7 @@ struct iscsi_task {
/* state set/tested under session->lock */
int state;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head running; /* running cmd list */
void *dd_data; /* driver/transport data */
};
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index dae99d7d2bc0..dd0f72c95abe 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -693,7 +693,6 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
-extern void sas_domain_release_transport(struct scsi_transport_template *);
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 080c7ce9bae8..05641aebd181 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -316,7 +316,7 @@ extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
void scsi_attach_vpd(struct scsi_device *sdev);
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
-extern int scsi_device_get(struct scsi_device *);
+extern int __must_check scsi_device_get(struct scsi_device *);
extern void scsi_device_put(struct scsi_device *);
extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
uint, uint, u64);
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 891a658aa867..a5534ccad859 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -16,6 +16,7 @@ struct scsi_driver {
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
+ void (*eh_reset)(struct scsi_cmnd *);
};
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 98d366b55770..64d30d80dadb 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -23,14 +23,15 @@ static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
-extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
- u64 * info_out);
+extern bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
+ u64 *info_out);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
int result;
+ int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
unsigned char cmd_len;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 3cd8c3bec638..afb04811b7b9 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -452,11 +452,6 @@ struct scsi_host_template {
unsigned no_write_same:1;
/*
- * True if asynchronous aborts are not supported
- */
- unsigned no_async_abort:1;
-
- /*
* Countdown for host blocking with no commands outstanding.
*/
unsigned int max_host_blocked;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b21b8aa58c4d..6e208bb32c78 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -162,6 +162,7 @@ enum fc_tgtid_binding_type {
#define FC_PORT_ROLE_FCP_TARGET 0x01
#define FC_PORT_ROLE_FCP_INITIATOR 0x02
#define FC_PORT_ROLE_IP_PORT 0x04
+#define FC_PORT_ROLE_FCP_DUMMY_INITIATOR 0x08
/* The following are for compatibility */
#define FC_RPORT_ROLE_UNKNOWN FC_PORT_ROLE_UNKNOWN
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 3afec7032448..20bc71c3e0b8 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
#define SG_DEFAULT_RETRIES 0
/* Defaults, commented if they differ from original sg driver */
-#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
#define SG_DEF_FORCE_PACK_ID 0
#define SG_DEF_KEEP_ORPHAN 0
#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
diff --git a/include/soc/tegra/flowctrl.h b/include/soc/tegra/flowctrl.h
new file mode 100644
index 000000000000..8f86aea4024b
--- /dev/null
+++ b/include/soc/tegra/flowctrl.h
@@ -0,0 +1,82 @@
+/*
+ * Functions and macros to control the flowcontroller
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SOC_TEGRA_FLOWCTRL_H__
+#define __SOC_TEGRA_FLOWCTRL_H__
+
+#define FLOW_CTRL_HALT_CPU0_EVENTS 0x0
+#define FLOW_CTRL_WAITEVENT (2 << 29)
+#define FLOW_CTRL_WAIT_FOR_INTERRUPT (4 << 29)
+#define FLOW_CTRL_JTAG_RESUME (1 << 28)
+#define FLOW_CTRL_SCLK_RESUME (1 << 27)
+#define FLOW_CTRL_HALT_CPU_IRQ (1 << 10)
+#define FLOW_CTRL_HALT_CPU_FIQ (1 << 8)
+#define FLOW_CTRL_HALT_LIC_IRQ (1 << 11)
+#define FLOW_CTRL_HALT_LIC_FIQ (1 << 10)
+#define FLOW_CTRL_HALT_GIC_IRQ (1 << 9)
+#define FLOW_CTRL_HALT_GIC_FIQ (1 << 8)
+#define FLOW_CTRL_CPU0_CSR 0x8
+#define FLOW_CTRL_CSR_INTR_FLAG (1 << 15)
+#define FLOW_CTRL_CSR_EVENT_FLAG (1 << 14)
+#define FLOW_CTRL_CSR_ENABLE_EXT_CRAIL (1 << 13)
+#define FLOW_CTRL_CSR_ENABLE_EXT_NCPU (1 << 12)
+#define FLOW_CTRL_CSR_ENABLE_EXT_MASK ( \
+ FLOW_CTRL_CSR_ENABLE_EXT_NCPU | \
+ FLOW_CTRL_CSR_ENABLE_EXT_CRAIL)
+#define FLOW_CTRL_CSR_ENABLE (1 << 0)
+#define FLOW_CTRL_HALT_CPU1_EVENTS 0x14
+#define FLOW_CTRL_CPU1_CSR 0x18
+
+#define TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 (1 << 4)
+#define TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP (3 << 4)
+#define TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP 0
+
+#define TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 (1 << 8)
+#define TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP (0xF << 4)
+#define TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP (0xF << 8)
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_SOC_TEGRA_FLOWCTRL
+u32 flowctrl_read_cpu_csr(unsigned int cpuid);
+void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value);
+void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value);
+
+void flowctrl_cpu_suspend_enter(unsigned int cpuid);
+void flowctrl_cpu_suspend_exit(unsigned int cpuid);
+#else
+static inline u32 flowctrl_read_cpu_csr(unsigned int cpuid)
+{
+ return 0;
+}
+
+static inline void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
+{
+}
+
+static inline void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) {}
+
+static inline void flowctrl_cpu_suspend_enter(unsigned int cpuid)
+{
+}
+
+static inline void flowctrl_cpu_suspend_exit(unsigned int cpuid)
+{
+}
+#endif /* CONFIG_SOC_TEGRA_FLOWCTRL */
+#endif /* __ASSEMBLY */
+#endif /* __SOC_TEGRA_FLOWCTRL_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 2f271d1b9cea..1c3982bc558f 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,12 +26,6 @@
struct clk;
struct reset_control;
-#ifdef CONFIG_PM_SLEEP
-enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
-void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
-void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
-#endif /* CONFIG_PM_SLEEP */
-
#ifdef CONFIG_SMP
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
@@ -144,7 +138,7 @@ enum tegra_io_pad_voltage {
TEGRA_IO_PAD_3300000UV,
};
-#ifdef CONFIG_ARCH_TEGRA
+#ifdef CONFIG_SOC_TEGRA_PMC
int tegra_powergate_is_powered(unsigned int id);
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
@@ -163,6 +157,11 @@ int tegra_io_pad_get_voltage(enum tegra_io_pad id);
/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
int tegra_io_rail_power_on(unsigned int id);
int tegra_io_rail_power_off(unsigned int id);
+
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
+void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
+void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
+
#else
static inline int tegra_powergate_is_powered(unsigned int id)
{
@@ -221,6 +220,20 @@ static inline int tegra_io_rail_power_off(unsigned int id)
{
return -ENOSYS;
}
-#endif /* CONFIG_ARCH_TEGRA */
+
+static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return TEGRA_SUSPEND_NONE;
+}
+
+static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+{
+}
+
+static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+{
+}
+
+#endif /* CONFIG_SOC_TEGRA_PMC */
#endif /* __SOC_TEGRA_PMC_H__ */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index a3c3cab643a9..e37973526153 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -12,6 +12,7 @@ struct btrfs_root;
struct btrfs_fs_info;
struct btrfs_inode;
struct extent_map;
+struct btrfs_file_extent_item;
struct btrfs_ordered_extent;
struct btrfs_delayed_ref_node;
struct btrfs_delayed_tree_ref;
@@ -24,6 +25,7 @@ struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
struct btrfs_qgroup_extent_record;
+struct btrfs_qgroup;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -54,6 +56,12 @@ struct btrfs_qgroup_extent_record;
(obj >= BTRFS_ROOT_TREE_OBJECTID && \
obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
+#define show_fi_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \
+ { BTRFS_FILE_EXTENT_REG, "REG" }, \
+ { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"})
+
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
@@ -213,7 +221,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_start = map->block_start;
__entry->block_len = map->block_len;
__entry->flags = map->flags;
- __entry->refs = atomic_read(&map->refs);
+ __entry->refs = refcount_read(&map->refs);
__entry->compress_type = map->compress_type;
),
@@ -232,6 +240,138 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->refs, __entry->compress_type)
);
+/* file extent item */
+DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_obj )
+ __field( u64, ino )
+ __field( loff_t, isize )
+ __field( u64, disk_isize )
+ __field( u64, num_bytes )
+ __field( u64, ram_bytes )
+ __field( u64, disk_bytenr )
+ __field( u64, disk_num_bytes )
+ __field( u64, extent_offset )
+ __field( u8, extent_type )
+ __field( u8, compression )
+ __field( u64, extent_start )
+ __field( u64, extent_end )
+ ),
+
+ TP_fast_assign_btrfs(bi->root->fs_info,
+ __entry->root_obj = bi->root->objectid;
+ __entry->ino = btrfs_ino(bi);
+ __entry->isize = bi->vfs_inode.i_size;
+ __entry->disk_isize = bi->disk_i_size;
+ __entry->num_bytes = btrfs_file_extent_num_bytes(l, fi);
+ __entry->ram_bytes = btrfs_file_extent_ram_bytes(l, fi);
+ __entry->disk_bytenr = btrfs_file_extent_disk_bytenr(l, fi);
+ __entry->disk_num_bytes = btrfs_file_extent_disk_num_bytes(l, fi);
+ __entry->extent_offset = btrfs_file_extent_offset(l, fi);
+ __entry->extent_type = btrfs_file_extent_type(l, fi);
+ __entry->compression = btrfs_file_extent_compression(l, fi);
+ __entry->extent_start = start;
+ __entry->extent_end = (start + __entry->num_bytes);
+ ),
+
+ TP_printk_btrfs(
+ "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu "
+ "file extent range=[%llu %llu] "
+ "(num_bytes=%llu ram_bytes=%llu disk_bytenr=%llu "
+ "disk_num_bytes=%llu extent_offset=%llu type=%s "
+ "compression=%u",
+ show_root_type(__entry->root_obj), __entry->ino,
+ __entry->isize,
+ __entry->disk_isize, __entry->extent_start,
+ __entry->extent_end, __entry->num_bytes, __entry->ram_bytes,
+ __entry->disk_bytenr, __entry->disk_num_bytes,
+ __entry->extent_offset, show_fi_type(__entry->extent_type),
+ __entry->compression)
+);
+
+DECLARE_EVENT_CLASS(
+ btrfs__file_extent_item_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_obj )
+ __field( u64, ino )
+ __field( loff_t, isize )
+ __field( u64, disk_isize )
+ __field( u8, extent_type )
+ __field( u8, compression )
+ __field( u64, extent_start )
+ __field( u64, extent_end )
+ ),
+
+ TP_fast_assign_btrfs(
+ bi->root->fs_info,
+ __entry->root_obj = bi->root->objectid;
+ __entry->ino = btrfs_ino(bi);
+ __entry->isize = bi->vfs_inode.i_size;
+ __entry->disk_isize = bi->disk_i_size;
+ __entry->extent_type = btrfs_file_extent_type(l, fi);
+ __entry->compression = btrfs_file_extent_compression(l, fi);
+ __entry->extent_start = start;
+ __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
+ ),
+
+ TP_printk_btrfs(
+ "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu "
+ "file extent range=[%llu %llu] "
+ "extent_type=%s compression=%u",
+ show_root_type(__entry->root_obj), __entry->ino, __entry->isize,
+ __entry->disk_isize, __entry->extent_start,
+ __entry->extent_end, show_fi_type(__entry->extent_type),
+ __entry->compression)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start)
+);
+
#define show_ordered_flags(flags) \
__print_flags(flags, "|", \
{ (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
@@ -275,7 +415,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->bytes_left = ordered->bytes_left;
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
- __entry->refs = atomic_read(&ordered->refs);
+ __entry->refs = refcount_read(&ordered->refs);
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
__entry->truncated_len = ordered->truncated_len;
@@ -1475,6 +1615,49 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_new_count)
);
+TRACE_EVENT(qgroup_update_reserve,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ s64 diff),
+
+ TP_ARGS(fs_info, qgroup, diff),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, qgid )
+ __field( u64, cur_reserved )
+ __field( s64, diff )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->qgid = qgroup->qgroupid;
+ __entry->cur_reserved = qgroup->reserved;
+ __entry->diff = diff;
+ ),
+
+ TP_printk_btrfs("qgid=%llu cur_reserved=%llu diff=%lld",
+ __entry->qgid, __entry->cur_reserved, __entry->diff)
+);
+
+TRACE_EVENT(qgroup_meta_reserve,
+
+ TP_PROTO(struct btrfs_root *root, s64 diff),
+
+ TP_ARGS(root, diff),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, refroot )
+ __field( s64, diff )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->refroot = root->objectid;
+ __entry->diff = diff;
+ ),
+
+ TP_printk_btrfs("refroot=%llu(%s) diff=%lld",
+ show_root_type(__entry->refroot), __entry->diff)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 09c71e9aaebf..dfae175ddebc 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -15,6 +15,7 @@ struct ext4_inode_info;
struct mpage_da_data;
struct ext4_map_blocks;
struct extent_status;
+struct ext4_fsmap;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -2529,6 +2530,79 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
+/* fsmap traces */
+DECLARE_EVENT_CLASS(ext4_fsmap_class,
+ TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len,
+ u64 owner),
+ TP_ARGS(sb, keydev, agno, bno, len, owner),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(u32, agno)
+ __field(u64, bno)
+ __field(u64, len)
+ __field(u64, owner)
+ ),
+ TP_fast_assign(
+ __entry->dev = sb->s_bdev->bd_dev;
+ __entry->keydev = new_decode_dev(keydev);
+ __entry->agno = agno;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->owner = owner;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d agno %u bno %llu len %llu owner %lld\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->agno,
+ __entry->bno,
+ __entry->len,
+ __entry->owner)
+)
+#define DEFINE_FSMAP_EVENT(name) \
+DEFINE_EVENT(ext4_fsmap_class, name, \
+ TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len, \
+ u64 owner), \
+ TP_ARGS(sb, keydev, agno, bno, len, owner))
+DEFINE_FSMAP_EVENT(ext4_fsmap_low_key);
+DEFINE_FSMAP_EVENT(ext4_fsmap_high_key);
+DEFINE_FSMAP_EVENT(ext4_fsmap_mapping);
+
+DECLARE_EVENT_CLASS(ext4_getfsmap_class,
+ TP_PROTO(struct super_block *sb, struct ext4_fsmap *fsmap),
+ TP_ARGS(sb, fsmap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(u64, block)
+ __field(u64, len)
+ __field(u64, owner)
+ __field(u64, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = sb->s_bdev->bd_dev;
+ __entry->keydev = new_decode_dev(fsmap->fmr_device);
+ __entry->block = fsmap->fmr_physical;
+ __entry->len = fsmap->fmr_length;
+ __entry->owner = fsmap->fmr_owner;
+ __entry->flags = fsmap->fmr_flags;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d block %llu len %llu owner %lld flags 0x%llx\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->block,
+ __entry->len,
+ __entry->owner,
+ __entry->flags)
+)
+#define DEFINE_GETFSMAP_EVENT(name) \
+DEFINE_EVENT(ext4_getfsmap_class, name, \
+ TP_PROTO(struct super_block *sb, struct ext4_fsmap *fsmap), \
+ TP_ARGS(sb, fsmap))
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_low_key);
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_high_key);
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_mapping);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index c80fcad0a6c9..15da88c5c3a4 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -15,6 +15,8 @@ TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
TRACE_DEFINE_ENUM(INMEM);
TRACE_DEFINE_ENUM(INMEM_DROP);
+TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
+TRACE_DEFINE_ENUM(INMEM_REVOKE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
@@ -42,6 +44,7 @@ TRACE_DEFINE_ENUM(CP_FASTBOOT);
TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
+TRACE_DEFINE_ENUM(CP_TRIMMED);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -51,12 +54,13 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
{ INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
-#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\
- REQ_PRIO)
+#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \
+ REQ_PREFLUSH | REQ_FUA)
#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS)
#define show_bio_type(op,op_flags) show_bio_op(op), \
@@ -75,16 +79,13 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" })
#define show_bio_op_flags(flags) \
- __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
- { REQ_RAHEAD, "(RA)" }, \
- { REQ_SYNC, "(S)" }, \
- { REQ_SYNC | REQ_PRIO, "(SP)" }, \
- { REQ_META, "(M)" }, \
- { REQ_META | REQ_PRIO, "(MP)" }, \
- { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \
- { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \
- { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \
- { 0, " \b" })
+ __print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \
+ { REQ_RAHEAD, "R" }, \
+ { REQ_SYNC, "S" }, \
+ { REQ_META, "M" }, \
+ { REQ_PRIO, "P" }, \
+ { REQ_PREFLUSH, "PF" }, \
+ { REQ_FUA, "FUA" })
#define show_data_type(type) \
__print_symbolic(type, \
@@ -117,12 +118,14 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ GC_CB, "Cost-Benefit" })
#define show_cpreason(type) \
- __print_symbolic(type, \
+ __print_flags(type, "|", \
{ CP_UMOUNT, "Umount" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
{ CP_RECOVERY, "Recovery" }, \
- { CP_DISCARD, "Discard" })
+ { CP_DISCARD, "Discard" }, \
+ { CP_UMOUNT, "Umount" }, \
+ { CP_TRIMMED, "Trimmed" })
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -769,7 +772,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr,
@@ -822,7 +825,7 @@ DECLARE_EVENT_CLASS(f2fs__bio,
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
+ TP_printk("dev = (%d,%d)/(%d,%d), rw = %s(%s), %s, sector = %lld, size = %u",
show_dev(__entry->target),
show_dev(__entry->dev),
show_bio_type(__entry->op, __entry->op_flags),
@@ -1126,7 +1129,7 @@ TRACE_EVENT(f2fs_write_checkpoint,
__entry->msg)
);
-TRACE_EVENT(f2fs_issue_discard,
+DECLARE_EVENT_CLASS(f2fs_discard,
TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
@@ -1150,6 +1153,20 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+DEFINE_EVENT(f2fs_discard, f2fs_queue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
+DEFINE_EVENT(f2fs_discard, f2fs_issue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
TRACE_EVENT(f2fs_issue_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1174,26 +1191,29 @@ TRACE_EVENT(f2fs_issue_reset_zone,
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
- unsigned int flush_merge),
+ unsigned int flush_merge, int ret),
- TP_ARGS(dev, nobarrier, flush_merge),
+ TP_ARGS(dev, nobarrier, flush_merge, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, nobarrier)
__field(unsigned int, flush_merge)
+ __field(int, ret)
),
TP_fast_assign(
__entry->dev = dev->bd_dev;
__entry->nobarrier = nobarrier;
__entry->flush_merge = flush_merge;
+ __entry->ret = ret;
),
- TP_printk("dev = (%d,%d), %s %s",
+ TP_printk("dev = (%d,%d), %s %s, ret = %d",
show_dev(__entry->dev),
__entry->nobarrier ? "skip (nobarrier)" : "issue",
- __entry->flush_merge ? " with flush_merge" : "")
+ __entry->flush_merge ? " with flush_merge" : "",
+ __entry->ret)
);
TRACE_EVENT(f2fs_lookup_extent_tree_start,
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index c566ddc87f73..08bb3ed18dcc 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -150,6 +150,136 @@ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
+DECLARE_EVENT_CLASS(dax_pte_fault_class,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
+ TP_ARGS(inode, vmf, result),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(pgoff_t, pgoff)
+ __field(dev_t, dev)
+ __field(unsigned int, flags)
+ __field(int, result)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->flags = vmf->flags;
+ __entry->pgoff = vmf->pgoff;
+ __entry->result = result;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __entry->address,
+ __entry->pgoff,
+ __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
+ )
+)
+
+#define DEFINE_PTE_FAULT_EVENT(name) \
+DEFINE_EVENT(dax_pte_fault_class, name, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
+ TP_ARGS(inode, vmf, result))
+
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
+DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
+DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
+DEFINE_PTE_FAULT_EVENT(dax_load_hole);
+
+TRACE_EVENT(dax_insert_mapping,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
+ TP_ARGS(inode, vmf, radix_entry),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(void *, radix_entry)
+ __field(dev_t, dev)
+ __field(int, write)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->write = vmf->flags & FAULT_FLAG_WRITE;
+ __entry->radix_entry = radix_entry;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __entry->write ? "write" : "read",
+ __entry->address,
+ (unsigned long)__entry->radix_entry
+ )
+)
+
+DECLARE_EVENT_CLASS(dax_writeback_range_class,
+ TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
+ TP_ARGS(inode, start_index, end_index),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(pgoff_t, start_index)
+ __field(pgoff_t, end_index)
+ __field(dev_t, dev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_index = start_index;
+ __entry->end_index = end_index;
+ ),
+ TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->start_index,
+ __entry->end_index
+ )
+)
+
+#define DEFINE_WRITEBACK_RANGE_EVENT(name) \
+DEFINE_EVENT(dax_writeback_range_class, name, \
+ TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\
+ TP_ARGS(inode, start_index, end_index))
+
+DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range);
+DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range_done);
+
+TRACE_EVENT(dax_writeback_one,
+ TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
+ TP_ARGS(inode, pgoff, pglen),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(pgoff_t, pgoff)
+ __field(pgoff_t, pglen)
+ __field(dev_t, dev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgoff = pgoff;
+ __entry->pglen = pglen;
+ ),
+ TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->pglen
+ )
+)
+
#endif /* _TRACE_FS_DAX_H */
/* This part must be outside protection */
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 2c7befb10f13..99254ed89212 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -11,7 +11,6 @@
#define _TRACE_IOMMU_H
#include <linux/tracepoint.h>
-#include <linux/pci.h>
struct device;
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index ee7754c6e4a1..b3a85b3df53e 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -29,6 +29,7 @@
EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_META_CAPTURE, "META_CAPTURE" ) \
EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
SHOW_TYPE
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 995c8f9c692f..55e301047b3e 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -306,6 +306,51 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+
+/* NVIDIA Tegra frame buffer modifiers */
+
+/*
+ * Some modifiers take parameters, for example the number of vertical GOBs in
+ * a block. Reserve the lower 32 bits for parameters
+ */
+#define __fourcc_mod_tegra_mode_shift 32
+#define fourcc_mod_tegra_code(val, params) \
+ fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params))
+#define fourcc_mod_tegra_mod(m) \
+ (m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
+#define fourcc_mod_tegra_param(m) \
+ (m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
+
+/*
+ * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
+ *
+ * Pixels are arranged in simple tiles of 16 x 16 bytes.
+ */
+#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0)
+
+/*
+ * Tegra 16Bx2 Block Linear layout, used by TK1/TX1
+ *
+ * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
+ * vertically by a power of 2 (1 to 32 GOBs) to form a block.
+ *
+ * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
+ *
+ * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
+ * Valid values are:
+ *
+ * 0 == ONE_GOB
+ * 1 == TWO_GOBS
+ * 2 == FOUR_GOBS
+ * 3 == EIGHT_GOBS
+ * 4 == SIXTEEN_GOBS
+ * 5 == THIRTYTWO_GOBS
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ */
+#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6b0e2758585f..662c592b74dd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -333,6 +333,7 @@ header-y += parport.h
header-y += patchkey.h
header-y += pci.h
header-y += pci_regs.h
+header-y += pcitest.h
header-y += perf_event.h
header-y += personality.h
header-y += pfkeyv2.h
diff --git a/include/uapi/linux/aspeed-lpc-ctrl.h b/include/uapi/linux/aspeed-lpc-ctrl.h
new file mode 100644
index 000000000000..c328c976c684
--- /dev/null
+++ b/include/uapi/linux/aspeed-lpc-ctrl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_LPC_CTRL_H
+#define _UAPI_LINUX_ASPEED_LPC_CTRL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Window types */
+#define ASPEED_LPC_CTRL_WINDOW_FLASH 1
+#define ASPEED_LPC_CTRL_WINDOW_MEMORY 2
+
+/*
+ * This driver provides a window for the host to access a BMC resource
+ * across the BMC <-> Host LPC bus.
+ *
+ * window_type: The BMC resource that the host will access through the
+ * window. BMC flash and BMC RAM.
+ *
+ * window_id: For each window type there may be multiple windows,
+ * these are referenced by ID.
+ *
+ * flags: Reserved for future use, this field is expected to be
+ * zeroed.
+ *
+ * addr: Address on the host LPC bus that the specified window should
+ * be mapped. This address must be power of two aligned.
+ *
+ * offset: Offset into the BMC window that should be mapped to the
+ * host (at addr). This must be a multiple of size.
+ *
+ * size: The size of the mapping. The smallest possible size is 64K.
+ * This must be power of two aligned.
+ *
+ */
+
+struct aspeed_lpc_ctrl_mapping {
+ __u8 window_type;
+ __u8 window_id;
+ __u16 flags;
+ __u32 addr;
+ __u32 offset;
+ __u32 size;
+};
+
+#define __ASPEED_LPC_CTRL_IOCTL_MAGIC 0xb2
+
+#define ASPEED_LPC_CTRL_IOCTL_GET_SIZE _IOWR(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
+ 0x00, struct aspeed_lpc_ctrl_mapping)
+
+#define ASPEED_LPC_CTRL_IOCTL_MAP _IOW(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
+ 0x01, struct aspeed_lpc_ctrl_mapping)
+
+#endif /* _UAPI_LINUX_ASPEED_LPC_CTRL_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index dcfc3a5a9cb1..a456e5309238 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -291,10 +291,10 @@ struct btrfs_ioctl_feature_flags {
struct btrfs_balance_args {
__u64 profiles;
union {
- __le64 usage;
+ __u64 usage;
struct {
- __le32 usage_min;
- __le32 usage_max;
+ __u32 usage_min;
+ __u32 usage_max;
};
};
__u64 devid;
@@ -324,8 +324,8 @@ struct btrfs_balance_args {
* Process chunks that cross stripes_min..stripes_max devices,
* BTRFS_BALANCE_ARGS_STRIPES_RANGE
*/
- __le32 stripes_min;
- __le32 stripes_max;
+ __u32 stripes_min;
+ __u32 stripes_max;
__u64 unused[6];
} __attribute__ ((__packed__));
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 49bc06295398..6fe14d001f68 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -205,7 +205,7 @@ struct vfs_cap_data {
#define CAP_SYS_MODULE 16
/* Allow ioperm/iopl access */
-/* Allow sending USB messages to any device via /proc/bus/usb */
+/* Allow sending USB messages to any device via /dev/bus/usb */
#define CAP_SYS_RAWIO 17
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 14b6f24b189e..a0dfe27bc6c7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -223,7 +223,7 @@ static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
#define CEC_LOG_ADDR_BACKUP_2 13
#define CEC_LOG_ADDR_SPECIFIC 14
#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */
-#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */
+#define CEC_LOG_ADDR_BROADCAST 15 /* as destination address */
/* The logical address types that the CEC device wants to claim */
#define CEC_LOG_ADDR_TYPE_TV 0
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 176b6cb1008d..b5280db9ef6a 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -419,7 +419,7 @@ typedef struct elf64_shdr {
#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
-
+#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 5f4ea28eabe4..d179d7767f51 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1494,6 +1494,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_2500 2500
#define SPEED_5000 5000
#define SPEED_10000 10000
+#define SPEED_14000 14000
#define SPEED_20000 20000
#define SPEED_25000 25000
#define SPEED_40000 40000
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 1c3154913a39..f4d5c998cc2b 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -26,8 +26,21 @@
#define EPOLL_CTL_DEL 2
#define EPOLL_CTL_MOD 3
+/* Epoll event masks */
+#define EPOLLIN 0x00000001
+#define EPOLLPRI 0x00000002
+#define EPOLLOUT 0x00000004
+#define EPOLLERR 0x00000008
+#define EPOLLHUP 0x00000010
+#define EPOLLRDNORM 0x00000040
+#define EPOLLRDBAND 0x00000080
+#define EPOLLWRNORM 0x00000100
+#define EPOLLWRBAND 0x00000200
+#define EPOLLMSG 0x00000400
+#define EPOLLRDHUP 0x00002000
+
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1 << 28)
+#define EPOLLEXCLUSIVE (1U << 28)
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -39,13 +52,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
/*
* On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 048a85e9f017..24e61a54feaa 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,12 +279,25 @@ struct fscrypt_policy {
__u8 filenames_encryption_mode;
__u8 flags;
__u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-} __packed;
+};
#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+/* Parameters for passing an encryption key into the kernel keyring */
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* Structure that userspace passes to the kernel keyring */
+#define FS_MAX_KEY_SIZE 64
+
+struct fscrypt_key {
+ __u32 mode;
+ __u8 raw[FS_MAX_KEY_SIZE];
+ __u32 size;
+};
+
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*
diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h
new file mode 100644
index 000000000000..7e8e5f0bd6d2
--- /dev/null
+++ b/include/uapi/linux/fsmap.h
@@ -0,0 +1,112 @@
+/*
+ * FS_IOC_GETFSMAP ioctl infrastructure.
+ *
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#ifndef _LINUX_FSMAP_H
+#define _LINUX_FSMAP_H
+
+#include <linux/types.h>
+
+/*
+ * Structure for FS_IOC_GETFSMAP.
+ *
+ * The memory layout for this call are the scalar values defined in
+ * struct fsmap_head, followed by two struct fsmap that describe
+ * the lower and upper bound of mappings to return, followed by an
+ * array of struct fsmap mappings.
+ *
+ * fmh_iflags control the output of the call, whereas fmh_oflags report
+ * on the overall record output. fmh_count should be set to the
+ * length of the fmh_recs array, and fmh_entries will be set to the
+ * number of entries filled out during each call. If fmh_count is
+ * zero, the number of reverse mappings will be returned in
+ * fmh_entries, though no mappings will be returned. fmh_reserved
+ * must be set to zero.
+ *
+ * The two elements in the fmh_keys array are used to constrain the
+ * output. The first element in the array should represent the
+ * lowest disk mapping ("low key") that the user wants to learn
+ * about. If this value is all zeroes, the filesystem will return
+ * the first entry it knows about. For a subsequent call, the
+ * contents of fsmap_head.fmh_recs[fsmap_head.fmh_count - 1] should be
+ * copied into fmh_keys[0] to have the kernel start where it left off.
+ *
+ * The second element in the fmh_keys array should represent the
+ * highest disk mapping ("high key") that the user wants to learn
+ * about. If this value is all ones, the filesystem will not stop
+ * until it runs out of mapping to return or runs out of space in
+ * fmh_recs.
+ *
+ * fmr_device can be either a 32-bit cookie representing a device, or
+ * a 32-bit dev_t if the FMH_OF_DEV_T flag is set. fmr_physical,
+ * fmr_offset, and fmr_length are expressed in units of bytes.
+ * fmr_owner is either an inode number, or a special value if
+ * FMR_OF_SPECIAL_OWNER is set in fmr_flags.
+ */
+struct fsmap {
+ __u32 fmr_device; /* device id */
+ __u32 fmr_flags; /* mapping flags */
+ __u64 fmr_physical; /* device offset of segment */
+ __u64 fmr_owner; /* owner id */
+ __u64 fmr_offset; /* file offset of segment */
+ __u64 fmr_length; /* length of segment */
+ __u64 fmr_reserved[3]; /* must be zero */
+};
+
+struct fsmap_head {
+ __u32 fmh_iflags; /* control flags */
+ __u32 fmh_oflags; /* output flags */
+ __u32 fmh_count; /* # of entries in array incl. input */
+ __u32 fmh_entries; /* # of entries filled in (output). */
+ __u64 fmh_reserved[6]; /* must be zero */
+
+ struct fsmap fmh_keys[2]; /* low and high keys for the mapping search */
+ struct fsmap fmh_recs[]; /* returned records */
+};
+
+/* Size of an fsmap_head with room for nr records. */
+static inline size_t
+fsmap_sizeof(
+ unsigned int nr)
+{
+ return sizeof(struct fsmap_head) + nr * sizeof(struct fsmap);
+}
+
+/* Start the next fsmap query at the end of the current query results. */
+static inline void
+fsmap_advance(
+ struct fsmap_head *head)
+{
+ head->fmh_keys[0] = head->fmh_recs[head->fmh_entries - 1];
+}
+
+/* fmh_iflags values - set by FS_IOC_GETFSMAP caller in the header. */
+/* no flags defined yet */
+#define FMH_IF_VALID 0
+
+/* fmh_oflags values - returned in the header segment only. */
+#define FMH_OF_DEV_T 0x1 /* fmr_device values will be dev_t */
+
+/* fmr_flags values - returned for each non-header segment */
+#define FMR_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
+#define FMR_OF_ATTR_FORK 0x2 /* segment = attribute fork */
+#define FMR_OF_EXTENT_MAP 0x4 /* segment = extent map */
+#define FMR_OF_SHARED 0x8 /* segment = shared with another file */
+#define FMR_OF_SPECIAL_OWNER 0x10 /* owner is a special value */
+#define FMR_OF_LAST 0x20 /* segment is the last in the FS */
+
+/* Each FS gets to define its own special owner codes. */
+#define FMR_OWNER(type, code) (((__u64)type << 32) | \
+ ((__u64)code & 0xFFFFFFFFULL))
+#define FMR_OWNER_TYPE(owner) ((__u32)((__u64)owner >> 32))
+#define FMR_OWNER_CODE(owner) ((__u32)(((__u64)owner & 0xFFFFFFFFULL)))
+#define FMR_OWN_FREE FMR_OWNER(0, 1) /* free space */
+#define FMR_OWN_UNKNOWN FMR_OWNER(0, 2) /* unknown owner */
+#define FMR_OWN_METADATA FMR_OWNER(0, 3) /* metadata */
+
+#define FS_IOC_GETFSMAP _IOWR('X', 59, struct fsmap_head)
+
+#endif /* _LINUX_FSMAP_H */
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 7b26a62e5707..b9095a27a08a 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -355,7 +355,7 @@ struct ipmi_cmdspec {
#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \
struct ipmi_cmdspec)
/*
- * Unregister a regsitered command. error values:
+ * Unregister a registered command. error values:
* - EFAULT - an address supplied was invalid.
* - ENOENT - The netfn/cmd was not found registered for this user.
*/
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f51d5082a377..577429a95ad8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
+/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
+#define KVM_VM_MIPS_TE 0
+#define KVM_VM_MIPS_VZ 1
+
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
@@ -883,6 +887,14 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_MMU_RADIX 134
#define KVM_CAP_PPC_MMU_HASH_V3 135
#define KVM_CAP_IMMEDIATE_EXIT 136
+#define KVM_CAP_MIPS_VZ 137
+#define KVM_CAP_MIPS_TE 138
+#define KVM_CAP_MIPS_64BIT 139
+#define KVM_CAP_S390_GS 140
+#define KVM_CAP_S390_AIS 141
+#define KVM_CAP_SPAPR_TCE_VFIO 142
+#define KVM_CAP_X86_GUEST_MWAIT 143
+#define KVM_CAP_ARM_USER_IRQ 144
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1087,6 +1099,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
KVM_DEV_TYPE_FSL_MPIC_20 = 1,
@@ -1108,6 +1121,11 @@ enum kvm_device_type {
KVM_DEV_TYPE_MAX,
};
+struct kvm_vfio_spapr_tce {
+ __s32 groupfd;
+ __s32 tablefd;
+};
+
/*
* ioctls for VM fds
*/
@@ -1354,4 +1372,11 @@ struct kvm_assigned_msix_entry {
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+/* Available with KVM_CAP_ARM_USER_IRQ */
+
+/* Bits for run->s.regs.device_irq_level */
+#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
+#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
+#define KVM_ARM_DEV_PMU (1 << 2)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index ede5c6a62164..7ad3863cb88b 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -169,6 +169,7 @@ enum {
enum {
ND_ARS_VOLATILE = 1,
ND_ARS_PERSISTENT = 2,
+ ND_CONFIG_LOCKED = 1,
};
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index a8072cc7fa0b..dc947e59d03a 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -84,10 +84,6 @@ enum ip_conntrack_status {
IPS_DYING_BIT = 9,
IPS_DYING = (1 << IPS_DYING_BIT),
- /* Bits that cannot be altered from userland. */
- IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
- IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING),
-
/* Connection has fixed timeout. */
IPS_FIXED_TIMEOUT_BIT = 10,
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
@@ -103,6 +99,15 @@ enum ip_conntrack_status {
/* Conntrack got a helper explicitly attached via CT target. */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
+
+ /* Be careful here, modifying these bits can make things messy,
+ * so don't let users modify them directly.
+ */
+ IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
+ IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
+ IPS_SEQ_ADJUST | IPS_TEMPLATE),
+
+ __IPS_MAX_BIT = 14,
};
/* Connection tracking event types */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 18a26c16bd80..d56bb0051009 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -114,7 +114,7 @@
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
-#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+#define PCI_ROM_ADDRESS_MASK (~0x7ffU)
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
new file mode 100644
index 000000000000..a6aa10c45ad1
--- /dev/null
+++ b/include/uapi/linux/pcitest.h
@@ -0,0 +1,19 @@
+/**
+ * pcitest.h - PCI test uapi defines
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ */
+
+#ifndef __UAPI_LINUX_PCITEST_H
+#define __UAPI_LINUX_PCITEST_H
+
+#define PCITEST_BAR _IO('P', 0x1)
+#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_MSI _IOW('P', 0x3, int)
+#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
+#define PCITEST_READ _IOW('P', 0x5, unsigned long)
+#define PCITEST_COPY _IOW('P', 0x6, unsigned long)
+
+#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d09a9cd021b1..b1c0b187acfe 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -922,6 +922,7 @@ enum perf_callchain_context {
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
@@ -933,6 +934,21 @@ union perf_mem_data_src {
mem_rsvd:31;
};
};
+#elif defined(__BIG_ENDIAN_BITFIELD)
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_rsvd:31,
+ mem_dtlb:7, /* tlb access */
+ mem_lock:2, /* lock instr */
+ mem_snoop:5, /* snoop mode */
+ mem_lvl:14, /* memory hierarchy level */
+ mem_op:5; /* type of opcode */
+ };
+};
+#else
+#error "Unknown endianness"
+#endif
/* type of opcode (load/store/prefetch,code) */
#define PERF_MEM_OP_NA 0x01 /* not available */
diff --git a/include/uapi/linux/pps.h b/include/uapi/linux/pps.h
index a9bb1d93451a..c1cb3825a8bc 100644
--- a/include/uapi/linux/pps.h
+++ b/include/uapi/linux/pps.h
@@ -55,6 +55,12 @@ struct pps_ktime {
__s32 nsec;
__u32 flags;
};
+
+struct pps_ktime_compat {
+ __s64 sec;
+ __s32 nsec;
+ __u32 flags;
+} __attribute__((packed, aligned(4)));
#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */
struct pps_kinfo {
@@ -65,6 +71,14 @@ struct pps_kinfo {
int current_mode; /* current mode bits */
};
+struct pps_kinfo_compat {
+ __u32 assert_sequence; /* seq. num. of assert event */
+ __u32 clear_sequence; /* seq. num. of clear event */
+ struct pps_ktime_compat assert_tu; /* time of assert event */
+ struct pps_ktime_compat clear_tu; /* time of clear event */
+ int current_mode; /* current mode bits */
+};
+
struct pps_kparams {
int api_version; /* API version # */
int mode; /* mode bits */
@@ -114,6 +128,11 @@ struct pps_fdata {
struct pps_ktime timeout;
};
+struct pps_fdata_compat {
+ struct pps_kinfo_compat info;
+ struct pps_ktime_compat timeout;
+};
+
struct pps_bind_args {
int tsformat; /* format of time stamps */
int edge; /* selected event type */
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index ccd0ccd00f47..ac217c6f0151 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -80,5 +80,6 @@
#define SERIO_WACOM_IV 0x3e
#define SERIO_EGALAX 0x3f
#define SERIO_PULSE8_CEC 0x40
+#define SERIO_RAINSHADOW_CEC 0x41
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
new file mode 100644
index 000000000000..3e824e1a6495
--- /dev/null
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -0,0 +1,132 @@
+/*
+ * Microsemi Switchtec PCIe Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SWITCHTEC_IOCTL_H
+#define _UAPI_LINUX_SWITCHTEC_IOCTL_H
+
+#include <linux/types.h>
+
+#define SWITCHTEC_IOCTL_PART_CFG0 0
+#define SWITCHTEC_IOCTL_PART_CFG1 1
+#define SWITCHTEC_IOCTL_PART_IMG0 2
+#define SWITCHTEC_IOCTL_PART_IMG1 3
+#define SWITCHTEC_IOCTL_PART_NVLOG 4
+#define SWITCHTEC_IOCTL_PART_VENDOR0 5
+#define SWITCHTEC_IOCTL_PART_VENDOR1 6
+#define SWITCHTEC_IOCTL_PART_VENDOR2 7
+#define SWITCHTEC_IOCTL_PART_VENDOR3 8
+#define SWITCHTEC_IOCTL_PART_VENDOR4 9
+#define SWITCHTEC_IOCTL_PART_VENDOR5 10
+#define SWITCHTEC_IOCTL_PART_VENDOR6 11
+#define SWITCHTEC_IOCTL_PART_VENDOR7 12
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS 13
+
+struct switchtec_ioctl_flash_info {
+ __u64 flash_length;
+ __u32 num_partitions;
+ __u32 padding;
+};
+
+struct switchtec_ioctl_flash_part_info {
+ __u32 flash_partition;
+ __u32 address;
+ __u32 length;
+ __u32 active;
+};
+
+struct switchtec_ioctl_event_summary {
+ __u64 global;
+ __u64 part_bitmap;
+ __u32 local_part;
+ __u32 padding;
+ __u32 part[48];
+ __u32 pff[48];
+};
+
+#define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0
+#define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1
+#define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2
+#define SWITCHTEC_IOCTL_EVENT_SYS_RESET 3
+#define SWITCHTEC_IOCTL_EVENT_FW_EXC 4
+#define SWITCHTEC_IOCTL_EVENT_FW_NMI 5
+#define SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL 6
+#define SWITCHTEC_IOCTL_EVENT_FW_FATAL 7
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP 8
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC 9
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP 10
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC 11
+#define SWITCHTEC_IOCTL_EVENT_GPIO_INT 12
+#define SWITCHTEC_IOCTL_EVENT_PART_RESET 13
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP 14
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC 15
+#define SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP 16
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_P2P 17
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_VEP 18
+#define SWITCHTEC_IOCTL_EVENT_DPC 19
+#define SWITCHTEC_IOCTL_EVENT_CTS 20
+#define SWITCHTEC_IOCTL_EVENT_HOTPLUG 21
+#define SWITCHTEC_IOCTL_EVENT_IER 22
+#define SWITCHTEC_IOCTL_EVENT_THRESH 23
+#define SWITCHTEC_IOCTL_EVENT_POWER_MGMT 24
+#define SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING 25
+#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
+#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
+#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
+#define SWITCHTEC_IOCTL_MAX_EVENTS 29
+
+#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
+#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
+
+#define SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR (1 << 0)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL (1 << 1)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG (1 << 2)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI (1 << 3)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL (1 << 4)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL (1 << 5)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG (1 << 6)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI (1 << 7)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL (1 << 8)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED (~0x1ff)
+
+struct switchtec_ioctl_event_ctl {
+ __u32 event_id;
+ __s32 index;
+ __u32 flags;
+ __u32 occurred;
+ __u32 count;
+ __u32 data[5];
+};
+
+#define SWITCHTEC_IOCTL_PFF_VEP 100
+struct switchtec_ioctl_pff_port {
+ __u32 pff;
+ __u32 partition;
+ __u32 port;
+};
+
+#define SWITCHTEC_IOCTL_FLASH_INFO \
+ _IOR('W', 0x40, struct switchtec_ioctl_flash_info)
+#define SWITCHTEC_IOCTL_FLASH_PART_INFO \
+ _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info)
+#define SWITCHTEC_IOCTL_EVENT_SUMMARY \
+ _IOR('W', 0x42, struct switchtec_ioctl_event_summary)
+#define SWITCHTEC_IOCTL_EVENT_CTL \
+ _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl)
+#define SWITCHTEC_IOCTL_PFF_TO_PORT \
+ _IOWR('W', 0x44, struct switchtec_ioctl_pff_port)
+#define SWITCHTEC_IOCTL_PORT_TO_PFF \
+ _IOWR('W', 0x45, struct switchtec_ioctl_pff_port)
+
+#endif
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
new file mode 100644
index 000000000000..370d8845ab21
--- /dev/null
+++ b/include/uapi/linux/tee.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEE_H
+#define __TEE_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * This file describes the API provided by a TEE driver to user space.
+ *
+ * Each TEE driver defines a TEE specific protocol which is used for the
+ * data passed back and forth using TEE_IOC_CMD.
+ */
+
+/* Helpers to make the ioctl defines */
+#define TEE_IOC_MAGIC 0xa4
+#define TEE_IOC_BASE 0
+
+/* Flags relating to shared memory */
+#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
+#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
+
+#define TEE_MAX_ARG_SIZE 1024
+
+#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
+
+/*
+ * TEE Implementation ID
+ */
+#define TEE_IMPL_ID_OPTEE 1
+
+/*
+ * OP-TEE specific capabilities
+ */
+#define TEE_OPTEE_CAP_TZ (1 << 0)
+
+/**
+ * struct tee_ioctl_version_data - TEE version
+ * @impl_id: [out] TEE implementation id
+ * @impl_caps: [out] Implementation specific capabilities
+ * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above
+ *
+ * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above.
+ * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_*
+ * is valid when @impl_id == TEE_IMPL_ID_OPTEE.
+ */
+struct tee_ioctl_version_data {
+ __u32 impl_id;
+ __u32 impl_caps;
+ __u32 gen_caps;
+};
+
+/**
+ * TEE_IOC_VERSION - query version of TEE
+ *
+ * Takes a tee_ioctl_version_data struct and returns with the TEE version
+ * data filled in.
+ */
+#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \
+ struct tee_ioctl_version_data)
+
+/**
+ * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument
+ * @size: [in/out] Size of shared memory to allocate
+ * @flags: [in/out] Flags to/from allocation.
+ * @id: [out] Identifier of the shared memory
+ *
+ * The flags field should currently be zero as input. Updated by the call
+ * with actual flags as defined by TEE_IOCTL_SHM_* above.
+ * This structure is used as argument for TEE_IOC_SHM_ALLOC below.
+ */
+struct tee_ioctl_shm_alloc_data {
+ __u64 size;
+ __u32 flags;
+ __s32 id;
+};
+
+/**
+ * TEE_IOC_SHM_ALLOC - allocate shared memory
+ *
+ * Allocates shared memory between the user space process and secure OS.
+ *
+ * Returns a file descriptor on success or < 0 on failure
+ *
+ * The returned file descriptor is used to map the shared memory into user
+ * space. The shared memory is freed when the descriptor is closed and the
+ * memory is unmapped.
+ */
+#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \
+ struct tee_ioctl_shm_alloc_data)
+
+/**
+ * struct tee_ioctl_buf_data - Variable sized buffer
+ * @buf_ptr: [in] A __user pointer to a buffer
+ * @buf_len: [in] Length of the buffer above
+ *
+ * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE,
+ * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below.
+ */
+struct tee_ioctl_buf_data {
+ __u64 buf_ptr;
+ __u64 buf_len;
+};
+
+/*
+ * Attributes for struct tee_ioctl_param, selects field in the union
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */
+
+/*
+ * These defines value parameters (struct tee_ioctl_param_value)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */
+
+/*
+ * These defines shared memory reference parameters (struct
+ * tee_ioctl_param_memref)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
+
+/*
+ * Mask for the type part of the attribute, leaves room for more types
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
+
+/*
+ * Matches TEEC_LOGIN_* in GP TEE Client API
+ * Are only defined for GP compliant TEEs
+ */
+#define TEE_IOCTL_LOGIN_PUBLIC 0
+#define TEE_IOCTL_LOGIN_USER 1
+#define TEE_IOCTL_LOGIN_GROUP 2
+#define TEE_IOCTL_LOGIN_APPLICATION 4
+#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
+#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
+
+/**
+ * struct tee_ioctl_param - parameter
+ * @attr: attributes
+ * @a: if a memref, offset into the shared memory object, else a value parameter
+ * @b: if a memref, size of the buffer, else a value parameter
+ * @c: if a memref, shared memory identifier, else a value parameter
+ *
+ * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
+ * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
+ * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
+ * indicates that none of the members are used.
+ *
+ * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
+ * identifier representing the shared memory object. A memref can reference
+ * a part of a shared memory by specifying an offset (@a) and size (@b) of
+ * the object. To supply the entire shared memory object set the offset
+ * (@a) to 0 and size (@b) to the previously returned size of the object.
+ */
+struct tee_ioctl_param {
+ __u64 attr;
+ __u64 a;
+ __u64 b;
+ __u64 c;
+};
+
+#define TEE_IOCTL_UUID_LEN 16
+
+/**
+ * struct tee_ioctl_open_session_arg - Open session argument
+ * @uuid: [in] UUID of the Trusted Application
+ * @clnt_uuid: [in] UUID of client
+ * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [out] Session id
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_open_session_arg {
+ __u8 uuid[TEE_IOCTL_UUID_LEN];
+ __u8 clnt_uuid[TEE_IOCTL_UUID_LEN];
+ __u32 clnt_login;
+ __u32 cancel_id;
+ __u32 session;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_ioctl_open_session_arg followed by any array of struct
+ * tee_ioctl_param
+ */
+#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
+ * Application
+ * @func: [in] Trusted Application function, specific to the TA
+ * @session: [in] Session id
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_invoke_arg {
+ __u32 func;
+ __u32 session;
+ __u32 cancel_id;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_INVOKE - Invokes a function in a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_invoke_func_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [in] Session id, if the session is opened, else set to 0
+ */
+struct tee_ioctl_cancel_arg {
+ __u32 cancel_id;
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CANCEL - Cancels an open session or invoke
+ */
+#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \
+ struct tee_ioctl_cancel_arg)
+
+/**
+ * struct tee_ioctl_close_session_arg - Closes an open session
+ * @session: [in] Session id
+ */
+struct tee_ioctl_close_session_arg {
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CLOSE_SESSION - Closes a session
+ */
+#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \
+ struct tee_ioctl_close_session_arg)
+
+/**
+ * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
+ * @func: [in] supplicant function
+ * @num_params [in/out] number of parameters following this struct
+ *
+ * @num_params is the number of params that tee-supplicant has room to
+ * receive when input, @num_params is the number of actual params
+ * tee-supplicant receives when output.
+ */
+struct tee_iocl_supp_recv_arg {
+ __u32 func;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_recv_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_iocl_supp_send_arg - Send a response to a received request
+ * @ret: [out] return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_iocl_supp_send_arg {
+ __u32 ret;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_send_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
+ struct tee_ioctl_buf_data)
+
+/*
+ * Five syscalls are used when communicating with the TEE driver.
+ * open(): opens the device associated with the driver
+ * ioctl(): as described above operating on the file descriptor from open()
+ * close(): two cases
+ * - closes the device file descriptor
+ * - closes a file descriptor connected to allocated shared memory
+ * mmap(): maps shared memory into user space using information from struct
+ * tee_ioctl_shm_alloc_data
+ * munmap(): unmaps previously shared memory
+ */
+
+#endif /*__TEE_H*/
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 2c5d7c4a69e3..ce1169af39d7 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -224,7 +224,8 @@ struct usb_ctrlrequest {
* through the Linux-USB APIs, they are not converted to cpu byte
* order; it is the responsibility of the client code to do this.
* The single exception is when device and configuration descriptors (but
- * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
+ * not other descriptors) are read from character devices
+ * (i.e. /dev/bus/usb/BBB/DDD);
* in this case the fields are converted to host endianness by the kernel.
*/
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index b2a31a55a612..062606f02309 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -158,7 +158,7 @@ struct usb_ext_prop_desc {
* |-----+-----------------------+------+-------------------------------------|
* | 0 | bFirstInterfaceNumber | U8 | index of the interface or of the 1st|
* | | | | interface in an IAD group |
- * | 1 | Reserved | U8 | 0 |
+ * | 1 | Reserved | U8 | 1 |
* | 2 | CompatibleID | U8[8]| compatible ID string |
* | 10 | SubCompatibleID | U8[8]| subcompatible ID string |
* | 18 | Reserved | U8[6]| 0 |
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 45184a2ef66c..2b8feb86d09e 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -143,6 +143,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
+ V4L2_BUF_TYPE_META_CAPTURE = 13,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -362,8 +363,7 @@ enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
- * are full range.
+ * limited range, except for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -378,8 +378,7 @@ enum v4l2_quantization {
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
- (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {
@@ -453,6 +452,7 @@ struct v4l2_capability {
#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */
#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */
#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */
+#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
@@ -661,6 +661,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
@@ -675,6 +676,10 @@ struct v4l2_pix_format {
#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
+/* Meta-data formats */
+#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
+#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1654,6 +1659,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
+#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -2087,6 +2093,16 @@ struct v4l2_sdr_format {
} __attribute__ ((packed));
/**
+ * struct v4l2_meta_format - metadata format definition
+ * @dataformat: little endian four character code (fourcc)
+ * @buffersize: maximum size in bytes required for data
+ */
+struct v4l2_meta_format {
+ __u32 dataformat;
+ __u32 buffersize;
+} __attribute__ ((packed));
+
+/**
* struct v4l2_format - stream data format
* @type: enum v4l2_buf_type; type of the data stream
* @pix: definition of an image format
@@ -2105,6 +2121,7 @@ struct v4l2_format {
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */
+ struct v4l2_meta_format meta; /* V4L2_BUF_TYPE_META_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 95ce6ac3a971..b1b4ecdf329a 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -2,8 +2,16 @@
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
+#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
+static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dev_dma_ops)
+ return dev->archdata.dev_dma_ops;
+ return get_arch_dma_ops(NULL);
+}
+
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs);
@@ -19,13 +27,13 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+ return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+ xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
@@ -49,7 +57,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
* specific function.
*/
if (local)
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
@@ -67,8 +75,8 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
* specific function.
*/
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ if (xen_get_dma_ops(hwdev)->unmap_page)
+ xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
} else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
}
@@ -78,8 +86,8 @@ static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
+ xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
}
@@ -89,8 +97,8 @@ static inline void xen_dma_sync_single_for_device(struct device *hwdev,
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ if (xen_get_dma_ops(hwdev)->sync_single_for_device)
+ xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
}
diff --git a/include/xen/interface/io/9pfs.h b/include/xen/interface/io/9pfs.h
new file mode 100644
index 000000000000..5b6c19dae5e2
--- /dev/null
+++ b/include/xen/interface/io/9pfs.h
@@ -0,0 +1,36 @@
+/*
+ * 9pfs.h -- Xen 9PFS transport
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_9PFS_H__
+#define __XEN_PUBLIC_IO_9PFS_H__
+
+#include "xen/interface/io/ring.h"
+
+/*
+ * See docs/misc/9pfs.markdown in xen.git for the full specification:
+ * https://xenbits.xen.org/docs/unstable/misc/9pfs.html
+ */
+DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
+
+#endif
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
new file mode 100644
index 000000000000..596578d9be3e
--- /dev/null
+++ b/include/xen/interface/io/displif.h
@@ -0,0 +1,854 @@
+/******************************************************************************
+ * displif.h
+ *
+ * Unified display device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2016-2017 EPAM Systems Inc.
+ *
+ * Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ * Oleksandr Grytsov <oleksandr_grytsov@epam.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_DISPLIF_H__
+#define __XEN_PUBLIC_IO_DISPLIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ ******************************************************************************
+ * Protocol version
+ ******************************************************************************
+ */
+#define XENDISPL_PROTOCOL_VERSION "1"
+
+/*
+ ******************************************************************************
+ * Main features provided by the protocol
+ ******************************************************************************
+ * This protocol aims to provide a unified protocol which fits more
+ * sophisticated use-cases than a framebuffer device can handle. At the
+ * moment basic functionality is supported with the intention to be extended:
+ * o multiple dynamically allocated/destroyed framebuffers
+ * o buffers of arbitrary sizes
+ * o buffer allocation at either back or front end
+ * o better configuration options including multiple display support
+ *
+ * Note: existing fbif can be used together with displif running at the
+ * same time, e.g. on Linux one provides framebuffer and another DRM/KMS
+ *
+ * Note: display resolution (XenStore's "resolution" property) defines
+ * visible area of the virtual display. At the same time resolution of
+ * the display and frame buffers may differ: buffers can be smaller, equal
+ * or bigger than the visible area. This is to enable use-cases, where backend
+ * may do some post-processing of the display and frame buffers supplied,
+ * e.g. those buffers can be just a part of the final composition.
+ *
+ ******************************************************************************
+ * Direction of improvements
+ ******************************************************************************
+ * Future extensions to the existing protocol may include:
+ * o display/connector cloning
+ * o allocation of objects other than display buffers
+ * o plane/overlay support
+ * o scaling support
+ * o rotation support
+ *
+ ******************************************************************************
+ * Feature and Parameter Negotiation
+ ******************************************************************************
+ *
+ * Front->back notifications: when enqueuing a new request, sending a
+ * notification can be made conditional on xendispl_req (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * xendispl_req appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: when enqueuing a new response, sending a
+ * notification can be made conditional on xendispl_resp (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * xendispl_resp appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ *
+ * The two halves of a para-virtual display driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following the XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ ******************************************************************************
+ * Example configuration
+ ******************************************************************************
+ *
+ * Note: depending on the use-case backend can expose more display connectors
+ * than the underlying HW physically has by employing SW graphics compositors
+ *
+ * This is an example of backend and frontend configuration:
+ *
+ *--------------------------------- Backend -----------------------------------
+ *
+ * /local/domain/0/backend/vdispl/1/0/frontend-id = "1"
+ * /local/domain/0/backend/vdispl/1/0/frontend = "/local/domain/1/device/vdispl/0"
+ * /local/domain/0/backend/vdispl/1/0/state = "4"
+ * /local/domain/0/backend/vdispl/1/0/versions = "1,2"
+ *
+ *--------------------------------- Frontend ----------------------------------
+ *
+ * /local/domain/1/device/vdispl/0/backend-id = "0"
+ * /local/domain/1/device/vdispl/0/backend = "/local/domain/0/backend/vdispl/1/0"
+ * /local/domain/1/device/vdispl/0/state = "4"
+ * /local/domain/1/device/vdispl/0/version = "1"
+ * /local/domain/1/device/vdispl/0/be-alloc = "1"
+ *
+ *-------------------------- Connector 0 configuration ------------------------
+ *
+ * /local/domain/1/device/vdispl/0/0/resolution = "1920x1080"
+ * /local/domain/1/device/vdispl/0/0/req-ring-ref = "2832"
+ * /local/domain/1/device/vdispl/0/0/req-event-channel = "15"
+ * /local/domain/1/device/vdispl/0/0/evt-ring-ref = "387"
+ * /local/domain/1/device/vdispl/0/0/evt-event-channel = "16"
+ *
+ *-------------------------- Connector 1 configuration ------------------------
+ *
+ * /local/domain/1/device/vdispl/0/1/resolution = "800x600"
+ * /local/domain/1/device/vdispl/0/1/req-ring-ref = "2833"
+ * /local/domain/1/device/vdispl/0/1/req-event-channel = "17"
+ * /local/domain/1/device/vdispl/0/1/evt-ring-ref = "388"
+ * /local/domain/1/device/vdispl/0/1/evt-event-channel = "18"
+ *
+ ******************************************************************************
+ * Backend XenBus Nodes
+ ******************************************************************************
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * versions
+ * Values: <string>
+ *
+ * List of XENDISPL_LIST_SEPARATOR separated protocol versions supported
+ * by the backend. For example "1,2,3".
+ *
+ ******************************************************************************
+ * Frontend XenBus Nodes
+ ******************************************************************************
+ *
+ *-------------------------------- Addressing ---------------------------------
+ *
+ * dom-id
+ * Values: <uint16_t>
+ *
+ * Domain identifier.
+ *
+ * dev-id
+ * Values: <uint16_t>
+ *
+ * Device identifier.
+ *
+ * conn-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the connector.
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/<conn-idx>/...
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * version
+ * Values: <string>
+ *
+ * Protocol version, chosen among the ones supported by the backend.
+ *
+ *------------------------- Backend buffer allocation -------------------------
+ *
+ * be-alloc
+ * Values: "0", "1"
+ *
+ * If value is set to "1", then backend can be a buffer provider/allocator
+ * for this domain during XENDISPL_OP_DBUF_CREATE operation (see below
+ * for negotiation).
+ * If value is not "1" or omitted frontend must allocate buffers itself.
+ *
+ *----------------------------- Connector settings ----------------------------
+ *
+ * resolution
+ * Values: <width, uint32_t>x<height, uint32_t>
+ *
+ * Width and height of the connector in pixels separated by
+ * XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
+ * display.
+ *
+ *------------------ Connector Request Transport Parameters -------------------
+ *
+ * This communication path is used to deliver requests from frontend to backend
+ * and get the corresponding responses from backend to frontend,
+ * set up per connector.
+ *
+ * req-event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen connector's control event channel
+ * used to signal activity in the ring buffer.
+ *
+ * req-ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page of connector's control ring buffer.
+ *
+ *------------------- Connector Event Transport Parameters --------------------
+ *
+ * This communication path is used to deliver asynchronous events from backend
+ * to frontend, set up per connector.
+ *
+ * evt-event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen connector's event channel
+ * used to signal activity in the ring buffer.
+ *
+ * evt-ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page of connector's event ring buffer.
+ */
+
+/*
+ ******************************************************************************
+ * STATE DIAGRAMS
+ ******************************************************************************
+ *
+ * Tool stack creates front and back state nodes with initial state
+ * XenbusStateInitialising.
+ * Tool stack creates and sets up frontend display configuration
+ * nodes per domain.
+ *
+ *-------------------------------- Normal flow --------------------------------
+ *
+ * Front Back
+ * ================================= =====================================
+ * XenbusStateInitialising XenbusStateInitialising
+ * o Query backend device identification
+ * data.
+ * o Open and validate backend device.
+ * |
+ * |
+ * V
+ * XenbusStateInitWait
+ *
+ * o Query frontend configuration
+ * o Allocate and initialize
+ * event channels per configured
+ * connector.
+ * o Publish transport parameters
+ * that will be in effect during
+ * this connection.
+ * |
+ * |
+ * V
+ * XenbusStateInitialised
+ *
+ * o Query frontend transport parameters.
+ * o Connect to the event channels.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * o Create and initialize OS
+ * virtual display connectors
+ * as per configuration.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * XenbusStateUnknown
+ * XenbusStateClosed
+ * XenbusStateClosing
+ * o Remove virtual display device
+ * o Remove event channels
+ * |
+ * |
+ * V
+ * XenbusStateClosed
+ *
+ *------------------------------- Recovery flow -------------------------------
+ *
+ * In case of frontend unrecoverable errors backend handles that as
+ * if frontend goes into the XenbusStateClosed state.
+ *
+ * In case of backend unrecoverable errors frontend tries removing
+ * the virtualized device. If this is possible at the moment of error,
+ * then frontend goes into the XenbusStateInitialising state and is ready for
+ * new connection with backend. If the virtualized device is still in use and
+ * cannot be removed, then frontend goes into the XenbusStateReconfiguring state
+ * until either the virtualized device is removed or backend initiates a new
+ * connection. On the virtualized device removal frontend goes into the
+ * XenbusStateInitialising state.
+ *
+ * Note on XenbusStateReconfiguring state of the frontend: if backend has
+ * unrecoverable errors then frontend cannot send requests to the backend
+ * and thus cannot provide functionality of the virtualized device anymore.
+ * After backend is back to normal the virtualized device may still hold some
+ * state: configuration in use, allocated buffers, client application state etc.
+ * In most cases, this will require frontend to implement complex recovery
+ * reconnect logic. Instead, by going into XenbusStateReconfiguring state,
+ * frontend will make sure no new clients of the virtualized device are
+ * accepted, allow existing client(s) to exit gracefully by signaling error
+ * state etc.
+ * Once all the clients are gone frontend can reinitialize the virtualized
+ * device and get into XenbusStateInitialising state again signaling the
+ * backend that a new connection can be made.
+ *
+ * There are multiple conditions possible under which frontend will go from
+ * XenbusStateReconfiguring into XenbusStateInitialising, some of them are OS
+ * specific. For example:
+ * 1. The underlying OS framework may provide callbacks to signal that the last
+ * client of the virtualized device has gone and the device can be removed
+ * 2. Frontend can schedule a deferred work (timer/tasklet/workqueue)
+ * to periodically check if this is the right time to re-try removal of
+ * the virtualized device.
+ * 3. By any other means.
+ *
+ ******************************************************************************
+ * REQUEST CODES
+ ******************************************************************************
+ * Request codes [0; 15] are reserved and must not be used
+ */
+
+#define XENDISPL_OP_DBUF_CREATE 0x10
+#define XENDISPL_OP_DBUF_DESTROY 0x11
+#define XENDISPL_OP_FB_ATTACH 0x12
+#define XENDISPL_OP_FB_DETACH 0x13
+#define XENDISPL_OP_SET_CONFIG 0x14
+#define XENDISPL_OP_PG_FLIP 0x15
+
+/*
+ ******************************************************************************
+ * EVENT CODES
+ ******************************************************************************
+ */
+#define XENDISPL_EVT_PG_FLIP 0x00
+
+/*
+ ******************************************************************************
+ * XENSTORE FIELD AND PATH NAME STRINGS, HELPERS
+ ******************************************************************************
+ */
+#define XENDISPL_DRIVER_NAME "vdispl"
+
+#define XENDISPL_LIST_SEPARATOR ","
+#define XENDISPL_RESOLUTION_SEPARATOR "x"
+
+#define XENDISPL_FIELD_BE_VERSIONS "versions"
+#define XENDISPL_FIELD_FE_VERSION "version"
+#define XENDISPL_FIELD_REQ_RING_REF "req-ring-ref"
+#define XENDISPL_FIELD_REQ_CHANNEL "req-event-channel"
+#define XENDISPL_FIELD_EVT_RING_REF "evt-ring-ref"
+#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel"
+#define XENDISPL_FIELD_RESOLUTION "resolution"
+#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
+
+/*
+ ******************************************************************************
+ * STATUS RETURN CODES
+ ******************************************************************************
+ *
+ * Status return code is zero on success and -XEN_EXX on failure.
+ *
+ ******************************************************************************
+ * Assumptions
+ ******************************************************************************
+ * o usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ * o all references in this document to page sizes must be treated
+ * as pages of size XEN_PAGE_SIZE unless otherwise noted.
+ *
+ ******************************************************************************
+ * Description of the protocol between frontend and backend driver
+ ******************************************************************************
+ *
+ * The two halves of a Para-virtual display driver communicate with
+ * each other using shared pages and event channels.
+ * Shared page contains a ring with request/response packets.
+ *
+ * All reserved fields in the structures below must be 0.
+ * Display buffers's cookie of value 0 is treated as invalid.
+ * Framebuffer's cookie of value 0 is treated as invalid.
+ *
+ * For all request/response/event packets that use cookies:
+ * dbuf_cookie - uint64_t, unique to guest domain value used by the backend
+ * to map remote display buffer to its local one
+ * fb_cookie - uint64_t, unique to guest domain value used by the backend
+ * to map remote framebuffer to its local one
+ *
+ *---------------------------------- Requests ---------------------------------
+ *
+ * All requests/responses, which are not connector specific, must be sent over
+ * control ring of the connector which has the index value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ *
+ * All request packets have the same length (64 octets)
+ * All request packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, private guest value, echoed in response
+ * operation - uint8_t, operation code, XENDISPL_OP_???
+ *
+ * Request dbuf creation - request creation of a display buffer.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id |_OP_DBUF_CREATE | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | width | 20
+ * +----------------+----------------+----------------+----------------+
+ * | height | 24
+ * +----------------+----------------+----------------+----------------+
+ * | bpp | 28
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 32
+ * +----------------+----------------+----------------+----------------+
+ * | flags | 36
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 40
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 44
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ * All unused bits in flags field must be set to 0.
+ *
+ * An attempt to create multiple display buffers with the same dbuf_cookie is
+ * an error. dbuf_cookie can be re-used after destroying the corresponding
+ * display buffer.
+ *
+ * Width and height of the display buffers can be smaller, equal or bigger
+ * than the connector's resolution. Depth/pixel format of the individual
+ * buffers can differ as well.
+ *
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * bpp - uint32_t, bits per pixel
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * flags - uint32_t, flags of the operation
+ * o XENDISPL_DBUF_FLG_REQ_ALLOC - if set, then backend is requested
+ * to allocate the buffer with the parameters provided in this request.
+ * Page directory is handled as follows:
+ * Frontend on request:
+ * o allocates pages for the directory (gref_directory,
+ * gref_dir_next_page(s)
+ * o grants permissions for the pages of the directory to the backend
+ * o sets gref_dir_next_page fields
+ * Backend on response:
+ * o grants permissions for the pages of the buffer allocated to
+ * the frontend
+ * o fills in page directory with grant references
+ * (gref[] in struct xendispl_page_directory)
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing shared buffer references. At least one page exists. If shared
+ * buffer size (buffer_sz) exceeds what can be addressed by this single page,
+ * then reference to the next page must be supplied (see gref_dir_next_page
+ * below)
+ */
+
+#define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0)
+
+struct xendispl_dbuf_create_req {
+ uint64_t dbuf_cookie;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bpp;
+ uint32_t buffer_sz;
+ uint32_t flags;
+ grant_ref_t gref_directory;
+};
+
+/*
+ * Shared page for XENDISPL_OP_DBUF_CREATE buffer descriptor (gref_directory in
+ * the request) employs a list of pages, describing all pages of the shared
+ * data buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref_dir_next_page | 4
+ * +----------------+----------------+----------------+----------------+
+ * | gref[0] | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[i] | i*4+8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[N - 1] | N*4+8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * gref_dir_next_page - grant_ref_t, reference to the next page describing
+ * page directory. Must be 0 if there are no more pages in the list.
+ * gref[i] - grant_ref_t, reference to a shared page of the buffer
+ * allocated at XENDISPL_OP_DBUF_CREATE
+ *
+ * Number of grant_ref_t entries in the whole page directory is not
+ * passed, but instead can be calculated as:
+ * num_grefs_total = (XENDISPL_OP_DBUF_CREATE.buffer_sz + XEN_PAGE_SIZE - 1) /
+ * XEN_PAGE_SIZE
+ */
+
+struct xendispl_page_directory {
+ grant_ref_t gref_dir_next_page;
+ grant_ref_t gref[1]; /* Variable length */
+};
+
+/*
+ * Request dbuf destruction - destroy a previously allocated display buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id |_OP_DBUF_DESTROY| reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ */
+
+struct xendispl_dbuf_destroy_req {
+ uint64_t dbuf_cookie;
+};
+
+/*
+ * Request framebuffer attachment - request attachment of a framebuffer to
+ * previously created display buffer.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_FB_ATTACH | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 20
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 24
+ * +----------------+----------------+----------------+----------------+
+ * | width | 28
+ * +----------------+----------------+----------------+----------------+
+ * | height | 32
+ * +----------------+----------------+----------------+----------------+
+ * | pixel_format | 36
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ * Width and height can be smaller, equal or bigger than the connector's
+ * resolution.
+ *
+ * An attempt to create multiple frame buffers with the same fb_cookie is
+ * an error. fb_cookie can be re-used after destroying the corresponding
+ * frame buffer.
+ *
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * pixel_format - uint32_t, pixel format of the framebuffer, FOURCC code
+ */
+
+struct xendispl_fb_attach_req {
+ uint64_t dbuf_cookie;
+ uint64_t fb_cookie;
+ uint32_t width;
+ uint32_t height;
+ uint32_t pixel_format;
+};
+
+/*
+ * Request framebuffer detach - detach a previously
+ * attached framebuffer from the display buffer in request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_FB_DETACH | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ */
+
+struct xendispl_fb_detach_req {
+ uint64_t fb_cookie;
+};
+
+/*
+ * Request configuration set/reset - request to set or reset
+ * the configuration/mode of the display:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_SET_CONFIG | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | x | 20
+ * +----------------+----------------+----------------+----------------+
+ * | y | 24
+ * +----------------+----------------+----------------+----------------+
+ * | width | 28
+ * +----------------+----------------+----------------+----------------+
+ * | height | 32
+ * +----------------+----------------+----------------+----------------+
+ * | bpp | 40
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 44
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Pass all zeros to reset, otherwise command is treated as
+ * configuration set.
+ * Framebuffer's cookie defines which framebuffer/dbuf must be
+ * displayed while enabling display (applying configuration).
+ * x, y, width and height are bound by the connector's resolution and must not
+ * exceed it.
+ *
+ * x - uint32_t, starting position in pixels by X axis
+ * y - uint32_t, starting position in pixels by Y axis
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * bpp - uint32_t, bits per pixel
+ */
+
+struct xendispl_set_config_req {
+ uint64_t fb_cookie;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bpp;
+};
+
+/*
+ * Request page flip - request to flip a page identified by the framebuffer
+ * cookie:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_PG_FLIP | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ */
+
+struct xendispl_page_flip_req {
+ uint64_t fb_cookie;
+};
+
+/*
+ *---------------------------------- Responses --------------------------------
+ *
+ * All response packets have the same length (64 octets)
+ *
+ * All response packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, private guest value, echoed from request
+ * status - int32_t, response status, zero on success and -XEN_EXX on failure
+ *
+ *----------------------------------- Events ----------------------------------
+ *
+ * Events are sent via a shared page allocated by the front and propagated by
+ * evt-event-channel/evt-ring-ref XenStore entries
+ * All event packets have the same length (64 octets)
+ * All event packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | type | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, event id, may be used by front
+ * type - uint8_t, type of the event
+ *
+ *
+ * Page flip complete event - event from back to front on page flip completed:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _EVT_PG_FLIP | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ */
+
+struct xendispl_pg_flip_evt {
+ uint64_t fb_cookie;
+};
+
+struct xendispl_req {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved[5];
+ union {
+ struct xendispl_dbuf_create_req dbuf_create;
+ struct xendispl_dbuf_destroy_req dbuf_destroy;
+ struct xendispl_fb_attach_req fb_attach;
+ struct xendispl_fb_detach_req fb_detach;
+ struct xendispl_set_config_req set_config;
+ struct xendispl_page_flip_req pg_flip;
+ uint8_t reserved[56];
+ } op;
+};
+
+struct xendispl_resp {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved;
+ int32_t status;
+ uint8_t reserved1[56];
+};
+
+struct xendispl_evt {
+ uint16_t id;
+ uint8_t type;
+ uint8_t reserved[5];
+ union {
+ struct xendispl_pg_flip_evt pg_flip;
+ uint8_t reserved[56];
+ } op;
+};
+
+DEFINE_RING_TYPES(xen_displif, struct xendispl_req, struct xendispl_resp);
+
+/*
+ ******************************************************************************
+ * Back to front events delivery
+ ******************************************************************************
+ * In order to deliver asynchronous events from back to front a shared page is
+ * allocated by front and its granted reference propagated to back via
+ * XenStore entries (evt-ring-ref/evt-event-channel).
+ * This page has a common header used by both front and back to synchronize
+ * access and control event's ring buffer, while back being a producer of the
+ * events and front being a consumer. The rest of the page after the header
+ * is used for event packets.
+ *
+ * Upon reception of an event(s) front may confirm its reception
+ * for either each event, group of events or none.
+ */
+
+struct xendispl_event_page {
+ uint32_t in_cons;
+ uint32_t in_prod;
+ uint8_t reserved[56];
+};
+
+#define XENDISPL_EVENT_PAGE_SIZE XEN_PAGE_SIZE
+#define XENDISPL_IN_RING_OFFS (sizeof(struct xendispl_event_page))
+#define XENDISPL_IN_RING_SIZE (XENDISPL_EVENT_PAGE_SIZE - XENDISPL_IN_RING_OFFS)
+#define XENDISPL_IN_RING_LEN (XENDISPL_IN_RING_SIZE / sizeof(struct xendispl_evt))
+#define XENDISPL_IN_RING(page) \
+ ((struct xendispl_evt *)((char *)(page) + XENDISPL_IN_RING_OFFS))
+#define XENDISPL_IN_RING_REF(page, idx) \
+ (XENDISPL_IN_RING((page))[(idx) % XENDISPL_IN_RING_LEN])
+
+#endif /* __XEN_PUBLIC_IO_DISPLIF_H__ */
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 8066c7849fbe..2a9510ade701 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -26,43 +26,432 @@
#ifndef __XEN_PUBLIC_IO_KBDIF_H__
#define __XEN_PUBLIC_IO_KBDIF_H__
-/* In events (backend -> frontend) */
+/*
+ *****************************************************************************
+ * Feature and Parameter Negotiation
+ *****************************************************************************
+ *
+ * The two halves of a para-virtual driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *---------------------------- Features supported ----------------------------
+ *
+ * Capable backend advertises supported features by publishing
+ * corresponding entries in XenStore and puts 1 as the value of the entry.
+ * If a feature is not supported then 0 must be set or feature entry omitted.
+ *
+ * feature-abs-pointer
+ * Values: <uint>
+ *
+ * Backends, which support reporting of absolute coordinates for pointer
+ * device should set this to 1.
+ *
+ * feature-multi-touch
+ * Values: <uint>
+ *
+ * Backends, which support reporting of multi-touch events
+ * should set this to 1.
+ *
+ *------------------------- Pointer Device Parameters ------------------------
+ *
+ * width
+ * Values: <uint>
+ *
+ * Maximum X coordinate (width) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * height
+ * Values: <uint>
+ *
+ * Maximum Y coordinate (height) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------------------ Feature request -----------------------------
+ *
+ * Capable frontend requests features from backend via setting corresponding
+ * entries to 1 in XenStore. Requests for features not advertised as supported
+ * by the backend have no effect.
+ *
+ * request-abs-pointer
+ * Values: <uint>
+ *
+ * Request backend to report absolute pointer coordinates
+ * (XENKBD_TYPE_POS) instead of relative ones (XENKBD_TYPE_MOTION).
+ *
+ * request-multi-touch
+ * Values: <uint>
+ *
+ * Request backend to report multi-touch events.
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: <uint>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * page-gref
+ * Values: <uint>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized event ring buffer.
+ *
+ * page-ref
+ * Values: <uint>
+ *
+ * OBSOLETE, not recommended for use.
+ * PFN of the shared page.
+ *
+ *----------------------- Multi-touch Device Parameters -----------------------
+ *
+ * multi-touch-num-contacts
+ * Values: <uint>
+ *
+ * Number of simultaneous touches reported.
+ *
+ * multi-touch-width
+ * Values: <uint>
+ *
+ * Width of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * multi-touch-height
+ * Values: <uint>
+ *
+ * Height of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ */
/*
- * Frontends should ignore unknown in events.
+ * EVENT CODES.
*/
-/* Pointer movement event */
-#define XENKBD_TYPE_MOTION 1
-/* Event type 2 currently not used */
-/* Key event (includes pointer buttons) */
-#define XENKBD_TYPE_KEY 3
+#define XENKBD_TYPE_MOTION 1
+#define XENKBD_TYPE_RESERVED 2
+#define XENKBD_TYPE_KEY 3
+#define XENKBD_TYPE_POS 4
+#define XENKBD_TYPE_MTOUCH 5
+
+/* Multi-touch event sub-codes */
+
+#define XENKBD_MT_EV_DOWN 0
+#define XENKBD_MT_EV_UP 1
+#define XENKBD_MT_EV_MOTION 2
+#define XENKBD_MT_EV_SYN 3
+#define XENKBD_MT_EV_SHAPE 4
+#define XENKBD_MT_EV_ORIENT 5
+
+/*
+ * CONSTANTS, XENSTORE FIELD AND PATH NAME STRINGS, HELPERS.
+ */
+
+#define XENKBD_DRIVER_NAME "vkbd"
+
+#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
+#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
+#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
+#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
+#define XENKBD_FIELD_RING_GREF "page-gref"
+#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
+#define XENKBD_FIELD_WIDTH "width"
+#define XENKBD_FIELD_HEIGHT "height"
+#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
+#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
+#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
+
+/* OBSOLETE, not recommended for use */
+#define XENKBD_FIELD_RING_REF "page-ref"
+
/*
- * Pointer position event
- * Capable backend sets feature-abs-pointer in xenstore.
- * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
- * request-abs-update in xenstore.
+ *****************************************************************************
+ * Description of the protocol between frontend and backend driver.
+ *****************************************************************************
+ *
+ * The two halves of a Para-virtual driver communicate with
+ * each other using a shared page and an event channel.
+ * Shared page contains a ring with event structures.
+ *
+ * All reserved fields in the structures below must be 0.
+ *
+ *****************************************************************************
+ * Backend to frontend events
+ *****************************************************************************
+ *
+ * Frontends should ignore unknown in events.
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ *
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code, XENKBD_TYPE_???
+ *
+ *
+ * Pointer relative movement event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MOTION | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | rel_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | rel_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * rel_x - int32_t, relative X motion
+ * rel_y - int32_t, relative Y motion
+ * rel_z - int32_t, relative Z motion (wheel)
*/
-#define XENKBD_TYPE_POS 4
struct xenkbd_motion {
- uint8_t type; /* XENKBD_TYPE_MOTION */
- int32_t rel_x; /* relative X motion */
- int32_t rel_y; /* relative Y motion */
- int32_t rel_z; /* relative Z motion (wheel) */
+ uint8_t type;
+ int32_t rel_x;
+ int32_t rel_y;
+ int32_t rel_z;
};
+/*
+ * Key event (includes pointer buttons)
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_KEY | pressed | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | keycode | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * pressed - uint8_t, 1 if pressed; 0 otherwise
+ * keycode - uint32_t, KEY_* from linux/input.h
+ */
+
struct xenkbd_key {
- uint8_t type; /* XENKBD_TYPE_KEY */
- uint8_t pressed; /* 1 if pressed; 0 otherwise */
- uint32_t keycode; /* KEY_* from linux/input.h */
+ uint8_t type;
+ uint8_t pressed;
+ uint32_t keycode;
};
+/*
+ * Pointer absolute position event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_POS | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position (in FB pixels)
+ * abs_y - int32_t, absolute Y position (in FB pixels)
+ * rel_z - int32_t, relative Z motion (wheel)
+ */
+
struct xenkbd_position {
- uint8_t type; /* XENKBD_TYPE_POS */
- int32_t abs_x; /* absolute X position (in FB pixels) */
- int32_t abs_y; /* absolute Y position (in FB pixels) */
- int32_t rel_z; /* relative Z motion (wheel) */
+ uint8_t type;
+ int32_t abs_x;
+ int32_t abs_y;
+ int32_t rel_z;
+};
+
+/*
+ * Multi-touch event and its sub-types
+ *
+ * All multi-touch event packets have common header:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | event_type | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * event_type - unt8_t, multi-touch event sub-type, XENKBD_MT_EV_???
+ * contact_id - unt8_t, ID of the contact
+ *
+ * Touch interactions can consist of one or more contacts.
+ * For each contact, a series of events is generated, starting
+ * with a down event, followed by zero or more motion events,
+ * and ending with an up event. Events relating to the same
+ * contact point can be identified by the ID of the sequence: contact ID.
+ * Contact ID may be reused after XENKBD_MT_EV_UP event and
+ * is in the [0; XENKBD_FIELD_NUM_CONTACTS - 1] range.
+ *
+ * For further information please refer to documentation on Wayland [1],
+ * Linux [2] and Windows [3] multi-touch support.
+ *
+ * [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
+ * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
+ * [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
+ *
+ *
+ * Multi-touch down event - sent when a new touch is made: touch is assigned
+ * a unique contact ID, sent with this and consequent events related
+ * to this touch.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_DOWN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels
+ * abs_y - int32_t, absolute Y position, in pixels
+ *
+ * Multi-touch contact release event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_UP | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch motion event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_MOTION | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels,
+ * abs_y - int32_t, absolute Y position, in pixels,
+ *
+ * Multi-touch input synchronization event - shows end of a set of events
+ * which logically belong together.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SYN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch shape event - touch point's shape has changed its shape.
+ * Shape is approximated by an ellipse through the major and minor axis
+ * lengths: major is the longer diameter of the ellipse and minor is the
+ * shorter one. Center of the ellipse is reported via
+ * XENKBD_MT_EV_DOWN/XENKBD_MT_EV_MOTION events.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SHAPE | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | major | 12
+ * +----------------+----------------+----------------+----------------+
+ * | minor | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * major - unt32_t, length of the major axis, pixels
+ * minor - unt32_t, length of the minor axis, pixels
+ *
+ * Multi-touch orientation event - touch point's shape has changed
+ * its orientation: calculated as a clockwise angle between the major axis
+ * of the ellipse and positive Y axis in degrees, [-180; +180].
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_ORIENT | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | orientation | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * orientation - int16_t, clockwise angle of the major axis
+ */
+
+struct xenkbd_mtouch {
+ uint8_t type; /* XENKBD_TYPE_MTOUCH */
+ uint8_t event_type; /* XENKBD_MT_EV_??? */
+ uint8_t contact_id;
+ uint8_t reserved[5]; /* reserved for the future use */
+ union {
+ struct {
+ int32_t abs_x; /* absolute X position, pixels */
+ int32_t abs_y; /* absolute Y position, pixels */
+ } pos;
+ struct {
+ uint32_t major; /* length of the major axis, pixels */
+ uint32_t minor; /* length of the minor axis, pixels */
+ } shape;
+ int16_t orientation; /* clockwise angle of the major axis */
+ } u;
};
#define XENKBD_IN_EVENT_SIZE 40
@@ -72,15 +461,26 @@ union xenkbd_in_event {
struct xenkbd_motion motion;
struct xenkbd_key key;
struct xenkbd_position pos;
+ struct xenkbd_mtouch mtouch;
char pad[XENKBD_IN_EVENT_SIZE];
};
-/* Out events (frontend -> backend) */
-
/*
+ *****************************************************************************
+ * Frontend to backend events
+ *****************************************************************************
+ *
* Out events may be sent only when requested by backend, and receipt
* of an unknown out event is an error.
* No out events currently defined.
+
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code
*/
#define XENKBD_OUT_EVENT_SIZE 40
@@ -90,7 +490,11 @@ union xenkbd_out_event {
char pad[XENKBD_OUT_EVENT_SIZE];
};
-/* shared page */
+/*
+ *****************************************************************************
+ * Shared page
+ *****************************************************************************
+ */
#define XENKBD_IN_RING_SIZE 2048
#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
@@ -113,4 +517,4 @@ struct xenkbd_page {
uint32_t out_cons, out_prod;
};
-#endif
+#endif /* __XEN_PUBLIC_IO_KBDIF_H__ */
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 21f4fbd55e48..c79456855539 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -283,4 +283,147 @@ struct __name##_back_ring { \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
+
+/*
+ * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
+ * functions to check if there is data on the ring, and to read and
+ * write to them.
+ *
+ * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
+ * does not define the indexes page. As different protocols can have
+ * extensions to the basic format, this macro allow them to define their
+ * own struct.
+ *
+ * XEN_FLEX_RING_SIZE
+ * Convenience macro to calculate the size of one of the two rings
+ * from the overall order.
+ *
+ * $NAME_mask
+ * Function to apply the size mask to an index, to reduce the index
+ * within the range [0-size].
+ *
+ * $NAME_read_packet
+ * Function to read data from the ring. The amount of data to read is
+ * specified by the "size" argument.
+ *
+ * $NAME_write_packet
+ * Function to write data to the ring. The amount of data to write is
+ * specified by the "size" argument.
+ *
+ * $NAME_get_ring_ptr
+ * Convenience function that returns a pointer to read/write to the
+ * ring at the right location.
+ *
+ * $NAME_data_intf
+ * Indexes page, shared between frontend and backend. It also
+ * contains the array of grant refs.
+ *
+ * $NAME_queued
+ * Function to calculate how many bytes are currently on the ring,
+ * ready to be read. It can also be used to calculate how much free
+ * space is currently on the ring (XEN_FLEX_RING_SIZE() -
+ * $NAME_queued()).
+ */
+
+#ifndef XEN_PAGE_SHIFT
+/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
+ * 4K, regardless of the architecture, and page granularity chosen by
+ * operating systems.
+ */
+#define XEN_PAGE_SHIFT 12
+#endif
+#define XEN_FLEX_RING_SIZE(order) \
+ (1UL << ((order) + XEN_PAGE_SHIFT - 1))
+
+#define DEFINE_XEN_FLEX_RING(name) \
+static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
+{ \
+ return idx & (ring_size - 1); \
+} \
+ \
+static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
+ RING_IDX idx, \
+ RING_IDX ring_size) \
+{ \
+ return buf + name##_mask(idx, ring_size); \
+} \
+ \
+static inline void name##_read_packet(void *opaque, \
+ const unsigned char *buf, \
+ size_t size, \
+ RING_IDX masked_prod, \
+ RING_IDX *masked_cons, \
+ RING_IDX ring_size) \
+{ \
+ if (*masked_cons < masked_prod || \
+ size <= ring_size - *masked_cons) { \
+ memcpy(opaque, buf + *masked_cons, size); \
+ } else { \
+ memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
+ memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
+ size - (ring_size - *masked_cons)); \
+ } \
+ *masked_cons = name##_mask(*masked_cons + size, ring_size); \
+} \
+ \
+static inline void name##_write_packet(unsigned char *buf, \
+ const void *opaque, \
+ size_t size, \
+ RING_IDX *masked_prod, \
+ RING_IDX masked_cons, \
+ RING_IDX ring_size) \
+{ \
+ if (*masked_prod < masked_cons || \
+ size <= ring_size - *masked_prod) { \
+ memcpy(buf + *masked_prod, opaque, size); \
+ } else { \
+ memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
+ memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
+ size - (ring_size - *masked_prod)); \
+ } \
+ *masked_prod = name##_mask(*masked_prod + size, ring_size); \
+} \
+ \
+static inline RING_IDX name##_queued(RING_IDX prod, \
+ RING_IDX cons, \
+ RING_IDX ring_size) \
+{ \
+ RING_IDX size; \
+ \
+ if (prod == cons) \
+ return 0; \
+ \
+ prod = name##_mask(prod, ring_size); \
+ cons = name##_mask(cons, ring_size); \
+ \
+ if (prod == cons) \
+ return ring_size; \
+ \
+ if (prod > cons) \
+ size = prod - cons; \
+ else \
+ size = ring_size - (cons - prod); \
+ return size; \
+} \
+ \
+struct name##_data { \
+ unsigned char *in; /* half of the allocation */ \
+ unsigned char *out; /* half of the allocation */ \
+}
+
+#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
+struct name##_data_intf { \
+ RING_IDX in_cons, in_prod; \
+ \
+ uint8_t pad1[56]; \
+ \
+ RING_IDX out_cons, out_prod; \
+ \
+ uint8_t pad2[56]; \
+ \
+ RING_IDX ring_order; \
+ grant_ref_t ref[]; \
+}; \
+DEFINE_XEN_FLEX_RING(name)
+
#endif /* __XEN_PUBLIC_IO_RING_H__ */
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
new file mode 100644
index 000000000000..5c918276835e
--- /dev/null
+++ b/include/xen/interface/io/sndif.h
@@ -0,0 +1,793 @@
+/******************************************************************************
+ * sndif.h
+ *
+ * Unified sound-device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2013-2015 GlobalLogic Inc.
+ * Copyright (C) 2016-2017 EPAM Systems Inc.
+ *
+ * Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ * Oleksandr Grytsov <oleksandr_grytsov@epam.com>
+ * Oleksandr Dmytryshyn <oleksandr.dmytryshyn@globallogic.com>
+ * Iurii Konovalenko <iurii.konovalenko@globallogic.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_SNDIF_H__
+#define __XEN_PUBLIC_IO_SNDIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ ******************************************************************************
+ * Feature and Parameter Negotiation
+ ******************************************************************************
+ *
+ * Front->back notifications: when enqueuing a new request, sending a
+ * notification can be made conditional on xensnd_req (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * xensnd_req appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: when enqueuing a new response, sending a
+ * notification can be made conditional on xensnd_resp (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * xensnd_resp appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ *
+ * The two halves of a para-virtual sound card driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following the XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ ******************************************************************************
+ * Example configuration
+ ******************************************************************************
+ *
+ * Note: depending on the use-case backend can expose more sound cards and
+ * PCM devices/streams than the underlying HW physically has by employing
+ * SW mixers, configuring virtual sound streams, channels etc.
+ *
+ * This is an example of backend and frontend configuration:
+ *
+ *--------------------------------- Backend -----------------------------------
+ *
+ * /local/domain/0/backend/vsnd/1/0/frontend-id = "1"
+ * /local/domain/0/backend/vsnd/1/0/frontend = "/local/domain/1/device/vsnd/0"
+ * /local/domain/0/backend/vsnd/1/0/state = "4"
+ * /local/domain/0/backend/vsnd/1/0/versions = "1,2"
+ *
+ *--------------------------------- Frontend ----------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/backend-id = "0"
+ * /local/domain/1/device/vsnd/0/backend = "/local/domain/0/backend/vsnd/1/0"
+ * /local/domain/1/device/vsnd/0/state = "4"
+ * /local/domain/1/device/vsnd/0/version = "1"
+ *
+ *----------------------------- Card configuration ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/short-name = "Card short name"
+ * /local/domain/1/device/vsnd/0/long-name = "Card long name"
+ * /local/domain/1/device/vsnd/0/sample-rates = "8000,32000,44100,48000,96000"
+ * /local/domain/1/device/vsnd/0/sample-formats = "s8,u8,s16_le,s16_be"
+ * /local/domain/1/device/vsnd/0/buffer-size = "262144"
+ *
+ *------------------------------- PCM device 0 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/name = "General analog"
+ * /local/domain/1/device/vsnd/0/0/channels-max = "5"
+ *
+ *----------------------------- Stream 0, playback ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/0/type = "p"
+ * /local/domain/1/device/vsnd/0/0/0/sample-formats = "s8,u8"
+ * /local/domain/1/device/vsnd/0/0/0/unique-id = "0"
+ *
+ * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386"
+ * /local/domain/1/device/vsnd/0/0/0/event-channel = "15"
+ *
+ *------------------------------ Stream 1, capture ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/1/type = "c"
+ * /local/domain/1/device/vsnd/0/0/1/channels-max = "2"
+ * /local/domain/1/device/vsnd/0/0/1/unique-id = "1"
+ *
+ * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384"
+ * /local/domain/1/device/vsnd/0/0/1/event-channel = "13"
+ *
+ *------------------------------- PCM device 1 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/1/name = "HDMI-0"
+ * /local/domain/1/device/vsnd/0/1/sample-rates = "8000,32000,44100"
+ *
+ *------------------------------ Stream 0, capture ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/1/0/type = "c"
+ * /local/domain/1/device/vsnd/0/1/0/unique-id = "2"
+ *
+ * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387"
+ * /local/domain/1/device/vsnd/0/1/0/event-channel = "151"
+ *
+ *------------------------------- PCM device 2 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/2/name = "SPDIF"
+ *
+ *----------------------------- Stream 0, playback ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/2/0/type = "p"
+ * /local/domain/1/device/vsnd/0/2/0/unique-id = "3"
+ *
+ * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389"
+ * /local/domain/1/device/vsnd/0/2/0/event-channel = "152"
+ *
+ ******************************************************************************
+ * Backend XenBus Nodes
+ ******************************************************************************
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * versions
+ * Values: <string>
+ *
+ * List of XENSND_LIST_SEPARATOR separated protocol versions supported
+ * by the backend. For example "1,2,3".
+ *
+ ******************************************************************************
+ * Frontend XenBus Nodes
+ ******************************************************************************
+ *
+ *-------------------------------- Addressing ---------------------------------
+ *
+ * dom-id
+ * Values: <uint16_t>
+ *
+ * Domain identifier.
+ *
+ * dev-id
+ * Values: <uint16_t>
+ *
+ * Device identifier.
+ *
+ * pcm-dev-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the PCM device.
+ *
+ * stream-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the stream of the PCM device.
+ *
+ * The following pattern is used for addressing:
+ * /local/domain/<dom-id>/device/vsnd/<dev-id>/<pcm-dev-idx>/<stream-idx>/...
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * version
+ * Values: <string>
+ *
+ * Protocol version, chosen among the ones supported by the backend.
+ *
+ *------------------------------- PCM settings --------------------------------
+ *
+ * Every virtualized sound frontend has a set of PCM devices and streams, each
+ * could be individually configured. Part of the PCM configuration can be
+ * defined at higher level of the hierarchy and be fully or partially re-used
+ * by the underlying layers. These configuration values are:
+ * o number of channels (min/max)
+ * o supported sample rates
+ * o supported sample formats.
+ * E.g. one can define these values for the whole card, device or stream.
+ * Every underlying layer in turn can re-define some or all of them to better
+ * fit its needs. For example, card may define number of channels to be
+ * in [1; 8] range, and some particular stream may be limited to [1; 2] only.
+ * The rule is that the underlying layer must be a subset of the upper layer
+ * range.
+ *
+ * channels-min
+ * Values: <uint8_t>
+ *
+ * The minimum amount of channels that is supported, [1; channels-max].
+ * Optional, if not set or omitted a value of 1 is used.
+ *
+ * channels-max
+ * Values: <uint8_t>
+ *
+ * The maximum amount of channels that is supported.
+ * Must be at least <channels-min>.
+ *
+ * sample-rates
+ * Values: <list of uint32_t>
+ *
+ * List of supported sample rates separated by XENSND_LIST_SEPARATOR.
+ * Sample rates are expressed as a list of decimal values w/o any
+ * ordering requirement.
+ *
+ * sample-formats
+ * Values: <list of XENSND_PCM_FORMAT_XXX_STR>
+ *
+ * List of supported sample formats separated by XENSND_LIST_SEPARATOR.
+ * Items must not exceed XENSND_SAMPLE_FORMAT_MAX_LEN length.
+ *
+ * buffer-size
+ * Values: <uint32_t>
+ *
+ * The maximum size in octets of the buffer to allocate per stream.
+ *
+ *----------------------- Virtual sound card settings -------------------------
+ * short-name
+ * Values: <char[32]>
+ *
+ * Short name of the virtual sound card. Optional.
+ *
+ * long-name
+ * Values: <char[80]>
+ *
+ * Long name of the virtual sound card. Optional.
+ *
+ *----------------------------- Device settings -------------------------------
+ * name
+ * Values: <char[80]>
+ *
+ * Name of the sound device within the virtual sound card. Optional.
+ *
+ *----------------------------- Stream settings -------------------------------
+ *
+ * type
+ * Values: "p", "c"
+ *
+ * Stream type: "p" - playback stream, "c" - capture stream
+ *
+ * If both capture and playback are needed then two streams need to be
+ * defined under the same device.
+ *
+ * unique-id
+ * Values: <uint32_t>
+ *
+ * After stream initialization it is assigned a unique ID (within the front
+ * driver), so every stream of the frontend can be identified by the
+ * backend by this ID. This is not equal to stream-idx as the later is
+ * zero based within the device, but this index is contigous within the
+ * driver.
+ *
+ *-------------------- Stream Request Transport Parameters --------------------
+ *
+ * event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized ring buffer.
+ *
+ ******************************************************************************
+ * STATE DIAGRAMS
+ ******************************************************************************
+ *
+ * Tool stack creates front and back state nodes with initial state
+ * XenbusStateInitialising.
+ * Tool stack creates and sets up frontend sound configuration nodes per domain.
+ *
+ * Front Back
+ * ================================= =====================================
+ * XenbusStateInitialising XenbusStateInitialising
+ * o Query backend device identification
+ * data.
+ * o Open and validate backend device.
+ * |
+ * |
+ * V
+ * XenbusStateInitWait
+ *
+ * o Query frontend configuration
+ * o Allocate and initialize
+ * event channels per configured
+ * playback/capture stream.
+ * o Publish transport parameters
+ * that will be in effect during
+ * this connection.
+ * |
+ * |
+ * V
+ * XenbusStateInitialised
+ *
+ * o Query frontend transport parameters.
+ * o Connect to the event channels.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * o Create and initialize OS
+ * virtual sound device instances
+ * as per configuration.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * XenbusStateUnknown
+ * XenbusStateClosed
+ * XenbusStateClosing
+ * o Remove virtual sound device
+ * o Remove event channels
+ * |
+ * |
+ * V
+ * XenbusStateClosed
+ *
+ *------------------------------- Recovery flow -------------------------------
+ *
+ * In case of frontend unrecoverable errors backend handles that as
+ * if frontend goes into the XenbusStateClosed state.
+ *
+ * In case of backend unrecoverable errors frontend tries removing
+ * the virtualized device. If this is possible at the moment of error,
+ * then frontend goes into the XenbusStateInitialising state and is ready for
+ * new connection with backend. If the virtualized device is still in use and
+ * cannot be removed, then frontend goes into the XenbusStateReconfiguring state
+ * until either the virtualized device removed or backend initiates a new
+ * connection. On the virtualized device removal frontend goes into the
+ * XenbusStateInitialising state.
+ *
+ * Note on XenbusStateReconfiguring state of the frontend: if backend has
+ * unrecoverable errors then frontend cannot send requests to the backend
+ * and thus cannot provide functionality of the virtualized device anymore.
+ * After backend is back to normal the virtualized device may still hold some
+ * state: configuration in use, allocated buffers, client application state etc.
+ * So, in most cases, this will require frontend to implement complex recovery
+ * reconnect logic. Instead, by going into XenbusStateReconfiguring state,
+ * frontend will make sure no new clients of the virtualized device are
+ * accepted, allow existing client(s) to exit gracefully by signaling error
+ * state etc.
+ * Once all the clients are gone frontend can reinitialize the virtualized
+ * device and get into XenbusStateInitialising state again signaling the
+ * backend that a new connection can be made.
+ *
+ * There are multiple conditions possible under which frontend will go from
+ * XenbusStateReconfiguring into XenbusStateInitialising, some of them are OS
+ * specific. For example:
+ * 1. The underlying OS framework may provide callbacks to signal that the last
+ * client of the virtualized device has gone and the device can be removed
+ * 2. Frontend can schedule a deferred work (timer/tasklet/workqueue)
+ * to periodically check if this is the right time to re-try removal of
+ * the virtualized device.
+ * 3. By any other means.
+ *
+ ******************************************************************************
+ * PCM FORMATS
+ ******************************************************************************
+ *
+ * XENSND_PCM_FORMAT_<format>[_<endian>]
+ *
+ * format: <S/U/F><bits> or <name>
+ * S - signed, U - unsigned, F - float
+ * bits - 8, 16, 24, 32
+ * name - MU_LAW, GSM, etc.
+ *
+ * endian: <LE/BE>, may be absent
+ * LE - Little endian, BE - Big endian
+ */
+#define XENSND_PCM_FORMAT_S8 0
+#define XENSND_PCM_FORMAT_U8 1
+#define XENSND_PCM_FORMAT_S16_LE 2
+#define XENSND_PCM_FORMAT_S16_BE 3
+#define XENSND_PCM_FORMAT_U16_LE 4
+#define XENSND_PCM_FORMAT_U16_BE 5
+#define XENSND_PCM_FORMAT_S24_LE 6
+#define XENSND_PCM_FORMAT_S24_BE 7
+#define XENSND_PCM_FORMAT_U24_LE 8
+#define XENSND_PCM_FORMAT_U24_BE 9
+#define XENSND_PCM_FORMAT_S32_LE 10
+#define XENSND_PCM_FORMAT_S32_BE 11
+#define XENSND_PCM_FORMAT_U32_LE 12
+#define XENSND_PCM_FORMAT_U32_BE 13
+#define XENSND_PCM_FORMAT_F32_LE 14 /* 4-byte float, IEEE-754 32-bit, */
+#define XENSND_PCM_FORMAT_F32_BE 15 /* range -1.0 to 1.0 */
+#define XENSND_PCM_FORMAT_F64_LE 16 /* 8-byte float, IEEE-754 64-bit, */
+#define XENSND_PCM_FORMAT_F64_BE 17 /* range -1.0 to 1.0 */
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE 18
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE 19
+#define XENSND_PCM_FORMAT_MU_LAW 20
+#define XENSND_PCM_FORMAT_A_LAW 21
+#define XENSND_PCM_FORMAT_IMA_ADPCM 22
+#define XENSND_PCM_FORMAT_MPEG 23
+#define XENSND_PCM_FORMAT_GSM 24
+
+/*
+ ******************************************************************************
+ * REQUEST CODES
+ ******************************************************************************
+ */
+#define XENSND_OP_OPEN 0
+#define XENSND_OP_CLOSE 1
+#define XENSND_OP_READ 2
+#define XENSND_OP_WRITE 3
+#define XENSND_OP_SET_VOLUME 4
+#define XENSND_OP_GET_VOLUME 5
+#define XENSND_OP_MUTE 6
+#define XENSND_OP_UNMUTE 7
+
+/*
+ ******************************************************************************
+ * XENSTORE FIELD AND PATH NAME STRINGS, HELPERS
+ ******************************************************************************
+ */
+#define XENSND_DRIVER_NAME "vsnd"
+
+#define XENSND_LIST_SEPARATOR ","
+/* Field names */
+#define XENSND_FIELD_BE_VERSIONS "versions"
+#define XENSND_FIELD_FE_VERSION "version"
+#define XENSND_FIELD_VCARD_SHORT_NAME "short-name"
+#define XENSND_FIELD_VCARD_LONG_NAME "long-name"
+#define XENSND_FIELD_RING_REF "ring-ref"
+#define XENSND_FIELD_EVT_CHNL "event-channel"
+#define XENSND_FIELD_DEVICE_NAME "name"
+#define XENSND_FIELD_TYPE "type"
+#define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id"
+#define XENSND_FIELD_CHANNELS_MIN "channels-min"
+#define XENSND_FIELD_CHANNELS_MAX "channels-max"
+#define XENSND_FIELD_SAMPLE_RATES "sample-rates"
+#define XENSND_FIELD_SAMPLE_FORMATS "sample-formats"
+#define XENSND_FIELD_BUFFER_SIZE "buffer-size"
+
+/* Stream type field values. */
+#define XENSND_STREAM_TYPE_PLAYBACK "p"
+#define XENSND_STREAM_TYPE_CAPTURE "c"
+/* Sample rate max string length */
+#define XENSND_SAMPLE_RATE_MAX_LEN 11
+/* Sample format field values */
+#define XENSND_SAMPLE_FORMAT_MAX_LEN 24
+
+#define XENSND_PCM_FORMAT_S8_STR "s8"
+#define XENSND_PCM_FORMAT_U8_STR "u8"
+#define XENSND_PCM_FORMAT_S16_LE_STR "s16_le"
+#define XENSND_PCM_FORMAT_S16_BE_STR "s16_be"
+#define XENSND_PCM_FORMAT_U16_LE_STR "u16_le"
+#define XENSND_PCM_FORMAT_U16_BE_STR "u16_be"
+#define XENSND_PCM_FORMAT_S24_LE_STR "s24_le"
+#define XENSND_PCM_FORMAT_S24_BE_STR "s24_be"
+#define XENSND_PCM_FORMAT_U24_LE_STR "u24_le"
+#define XENSND_PCM_FORMAT_U24_BE_STR "u24_be"
+#define XENSND_PCM_FORMAT_S32_LE_STR "s32_le"
+#define XENSND_PCM_FORMAT_S32_BE_STR "s32_be"
+#define XENSND_PCM_FORMAT_U32_LE_STR "u32_le"
+#define XENSND_PCM_FORMAT_U32_BE_STR "u32_be"
+#define XENSND_PCM_FORMAT_F32_LE_STR "float_le"
+#define XENSND_PCM_FORMAT_F32_BE_STR "float_be"
+#define XENSND_PCM_FORMAT_F64_LE_STR "float64_le"
+#define XENSND_PCM_FORMAT_F64_BE_STR "float64_be"
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE_STR "iec958_subframe_le"
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE_STR "iec958_subframe_be"
+#define XENSND_PCM_FORMAT_MU_LAW_STR "mu_law"
+#define XENSND_PCM_FORMAT_A_LAW_STR "a_law"
+#define XENSND_PCM_FORMAT_IMA_ADPCM_STR "ima_adpcm"
+#define XENSND_PCM_FORMAT_MPEG_STR "mpeg"
+#define XENSND_PCM_FORMAT_GSM_STR "gsm"
+
+
+/*
+ ******************************************************************************
+ * STATUS RETURN CODES
+ ******************************************************************************
+ *
+ * Status return code is zero on success and -XEN_EXX on failure.
+ *
+ ******************************************************************************
+ * Assumptions
+ ******************************************************************************
+ * o usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ * o all references in this document to page sizes must be treated
+ * as pages of size XEN_PAGE_SIZE unless otherwise noted.
+ *
+ ******************************************************************************
+ * Description of the protocol between frontend and backend driver
+ ******************************************************************************
+ *
+ * The two halves of a Para-virtual sound driver communicate with
+ * each other using shared pages and event channels.
+ * Shared page contains a ring with request/response packets.
+ *
+ * Packets, used for input/output operations, e.g. read/write, set/get volume,
+ * etc., provide offset/length fields in order to allow asynchronous protocol
+ * operation with buffer space sharing: part of the buffer allocated at
+ * XENSND_OP_OPEN can be used for audio samples and part, for example,
+ * for volume control.
+ *
+ * All reserved fields in the structures below must be 0.
+ *
+ *---------------------------------- Requests ---------------------------------
+ *
+ * All request packets have the same length (32 octets)
+ * All request packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, private guest value, echoed in response
+ * operation - uint8_t, operation code, XENSND_OP_???
+ *
+ * For all packets which use offset and length:
+ * offset - uint32_t, read or write data offset within the shared buffer,
+ * passed with XENSND_OP_OPEN request, octets,
+ * [0; XENSND_OP_OPEN.buffer_sz - 1].
+ * length - uint32_t, read or write data length, octets
+ *
+ * Request open - open a PCM stream for playback or capture:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | XENSND_OP_OPEN | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | pcm_rate | 12
+ * +----------------+----------------+----------------+----------------+
+ * | pcm_format | pcm_channels | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 20
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 24
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * pcm_rate - uint32_t, stream data rate, Hz
+ * pcm_format - uint8_t, XENSND_PCM_FORMAT_XXX value
+ * pcm_channels - uint8_t, number of channels of this stream,
+ * [channels-min; channels-max]
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing shared buffer references. At least one page exists. If shared
+ * buffer size (buffer_sz) exceeds what can be addressed by this single page,
+ * then reference to the next page must be supplied (see gref_dir_next_page
+ * below)
+ */
+
+struct xensnd_open_req {
+ uint32_t pcm_rate;
+ uint8_t pcm_format;
+ uint8_t pcm_channels;
+ uint16_t reserved;
+ uint32_t buffer_sz;
+ grant_ref_t gref_directory;
+};
+
+/*
+ * Shared page for XENSND_OP_OPEN buffer descriptor (gref_directory in the
+ * request) employs a list of pages, describing all pages of the shared data
+ * buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref_dir_next_page | 4
+ * +----------------+----------------+----------------+----------------+
+ * | gref[0] | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[i] | i*4+8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[N - 1] | N*4+8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * gref_dir_next_page - grant_ref_t, reference to the next page describing
+ * page directory. Must be 0 if there are no more pages in the list.
+ * gref[i] - grant_ref_t, reference to a shared page of the buffer
+ * allocated at XENSND_OP_OPEN
+ *
+ * Number of grant_ref_t entries in the whole page directory is not
+ * passed, but instead can be calculated as:
+ * num_grefs_total = (XENSND_OP_OPEN.buffer_sz + XEN_PAGE_SIZE - 1) /
+ * XEN_PAGE_SIZE
+ */
+
+struct xensnd_page_directory {
+ grant_ref_t gref_dir_next_page;
+ grant_ref_t gref[1]; /* Variable length */
+};
+
+/*
+ * Request close - close an opened pcm stream:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | XENSND_OP_CLOSE| reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request read/write - used for read (for capture) or write (for playback):
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write
+ */
+
+struct xensnd_rw_req {
+ uint32_t offset;
+ uint32_t length;
+};
+
+/*
+ * Request set/get volume - set/get channels' volume of the stream given:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_SET_VOLUME for volume set
+ * or XENSND_OP_GET_VOLUME for volume get
+ * Buffer passed with XENSND_OP_OPEN is used to exchange volume
+ * values:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | channel[0] | 4
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[i] | i*4
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[N - 1] | (N-1)*4
+ * +----------------+----------------+----------------+----------------+
+ *
+ * N = XENSND_OP_OPEN.pcm_channels
+ * i - uint8_t, index of a channel
+ * channel[i] - sint32_t, volume of i-th channel
+ * Volume is expressed as a signed value in steps of 0.001 dB,
+ * while 0 being 0 dB.
+ *
+ * Request mute/unmute - mute/unmute stream:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute
+ * Buffer passed with XENSND_OP_OPEN is used to exchange mute/unmute
+ * values:
+ *
+ * 0 octet
+ * +----------------+----------------+----------------+----------------+
+ * | channel[0] | 4
+ * +----------------+----------------+----------------+----------------+
+ * +/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[i] | i*4
+ * +----------------+----------------+----------------+----------------+
+ * +/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[N - 1] | (N-1)*4
+ * +----------------+----------------+----------------+----------------+
+ *
+ * N = XENSND_OP_OPEN.pcm_channels
+ * i - uint8_t, index of a channel
+ * channel[i] - uint8_t, non-zero if i-th channel needs to be muted/unmuted
+ *
+ *------------------------------------ N.B. -----------------------------------
+ *
+ * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME,
+ * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE.
+ */
+
+/*
+ *---------------------------------- Responses --------------------------------
+ *
+ * All response packets have the same length (32 octets)
+ *
+ * Response for all requests:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, copied from the request
+ * operation - uint8_t, XENSND_OP_* - copied from request
+ * status - int32_t, response status, zero on success and -XEN_EXX on failure
+ */
+
+struct xensnd_req {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved[5];
+ union {
+ struct xensnd_open_req open;
+ struct xensnd_rw_req rw;
+ uint8_t reserved[24];
+ } op;
+};
+
+struct xensnd_resp {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved;
+ int32_t status;
+ uint8_t reserved1[24];
+};
+
+DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp);
+
+#endif /* __XEN_PUBLIC_IO_SNDIF_H__ */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index b5486e648607..c44a2ee8c8f8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -22,6 +22,8 @@ void xen_timer_resume(void);
void xen_arch_resume(void);
void xen_arch_suspend(void);
+void xen_reboot(int reason);
+
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
@@ -34,11 +36,25 @@ u64 xen_steal_clock(int cpu);
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
+
+#ifdef CONFIG_XEN_PV
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
+#else
+static inline int xen_create_contiguous_region(phys_addr_t pstart,
+ unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
+{
+ return 0;
+}
+
+static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
+ unsigned int order) { }
+#endif
struct vm_area_struct;
@@ -120,6 +136,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
unsigned long count, u64 *max_size,
int *reset_type);
+void xen_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data);
+
#ifdef CONFIG_PREEMPT
diff --git a/init/Kconfig b/init/Kconfig
index a92f27da4a27..1d3475fc9496 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -521,11 +521,41 @@ config RCU_EXPERT
config SRCU
bool
+ default y
help
This option selects the sleepable version of RCU. This version
permits arbitrary sleeping or blocking within RCU read-side critical
sections.
+config CLASSIC_SRCU
+ bool "Use v4.11 classic SRCU implementation"
+ default n
+ depends on RCU_EXPERT && SRCU
+ help
+ This option selects the traditional well-tested classic SRCU
+ implementation from v4.11, as might be desired for enterprise
+ Linux distributions. Without this option, the shiny new
+ Tiny SRCU and Tree SRCU implementations are used instead.
+ At some point, it is hoped that Tiny SRCU and Tree SRCU
+ will accumulate enough test time and confidence to allow
+ Classic SRCU to be dropped entirely.
+
+ Say Y if you need a rock-solid SRCU.
+
+ Say N if you would like help test Tree SRCU.
+
+config TINY_SRCU
+ bool
+ default y if SRCU && TINY_RCU && !CLASSIC_SRCU
+ help
+ This option selects the single-CPU non-preemptible version of SRCU.
+
+config TREE_SRCU
+ bool
+ default y if SRCU && !TINY_RCU && !CLASSIC_SRCU
+ help
+ This option selects the full-fledged version of SRCU.
+
config TASKS_RCU
bool
default n
@@ -543,6 +573,9 @@ config RCU_STALL_COMMON
the tiny variants to disable RCU CPU stall warnings, while
making these warnings mandatory for the tree variants.
+config RCU_NEED_SEGCBLIST
+ def_bool ( TREE_RCU || PREEMPT_RCU || TINY_SRCU || TREE_SRCU )
+
config CONTEXT_TRACKING
bool
@@ -612,11 +645,17 @@ config RCU_FANOUT_LEAF
initialization. These systems tend to run CPU-bound, and thus
are not helped by synchronized interrupts, and thus tend to
skew them, which reduces lock contention enough that large
- leaf-level fanouts work well.
+ leaf-level fanouts work well. That said, setting leaf-level
+ fanout to a large number will likely cause problematic
+ lock contention on the leaf-level rcu_node structures unless
+ you boot with the skew_tick kernel parameter.
Select a specific number if testing RCU itself.
- Select the maximum permissible value for large systems.
+ Select the maximum permissible value for large systems, but
+ please understand that you may also need to set the skew_tick
+ kernel boot parameter to avoid contention on the rcu_node
+ structure's locks.
Take the default if unsure.
diff --git a/init/do_mounts.h b/init/do_mounts.h
index 067af1d9e8b6..282d65bfd674 100644
--- a/init/do_mounts.h
+++ b/init/do_mounts.h
@@ -19,29 +19,15 @@ static inline int create_dev(char *name, dev_t dev)
return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
}
-#if BITS_PER_LONG == 32
static inline u32 bstat(char *name)
{
- struct stat64 stat;
- if (sys_stat64(name, &stat) != 0)
+ struct kstat stat;
+ if (vfs_stat(name, &stat) != 0)
return 0;
- if (!S_ISBLK(stat.st_mode))
+ if (!S_ISBLK(stat.mode))
return 0;
- if (stat.st_rdev != (u32)stat.st_rdev)
- return 0;
- return stat.st_rdev;
-}
-#else
-static inline u32 bstat(char *name)
-{
- struct stat stat;
- if (sys_newstat(name, &stat) != 0)
- return 0;
- if (!S_ISBLK(stat.st_mode))
- return 0;
- return stat.st_rdev;
+ return stat.rdev;
}
-#endif
#ifdef CONFIG_BLK_DEV_RAM
diff --git a/init/initramfs.c b/init/initramfs.c
index 981f286c1d16..8a532050043f 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -312,10 +312,10 @@ static int __init maybe_link(void)
static void __init clean_path(char *path, umode_t fmode)
{
- struct stat st;
+ struct kstat st;
- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
- if (S_ISDIR(st.st_mode))
+ if (!vfs_lstat(path, &st) && (st.mode ^ fmode) & S_IFMT) {
+ if (S_ISDIR(st.mode))
sys_rmdir(path);
else
sys_unlink(path);
@@ -581,13 +581,13 @@ static void __init clean_rootfs(void)
num = sys_getdents64(fd, dirp, BUF_SIZE);
while (num > 0) {
while (num > 0) {
- struct stat st;
+ struct kstat st;
int ret;
- ret = sys_newlstat(dirp->d_name, &st);
+ ret = vfs_lstat(dirp->d_name, &st);
WARN_ON_ONCE(ret);
if (!ret) {
- if (S_ISDIR(st.st_mode))
+ if (S_ISDIR(st.mode))
sys_rmdir(dirp->d_name);
else
sys_unlink(dirp->d_name);
@@ -608,10 +608,12 @@ static void __init clean_rootfs(void)
static int __init populate_rootfs(void)
{
+ /* Load the built in initramfs */
char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
if (err)
panic("%s", err); /* Failed to decompress INTERNAL initramfs */
- if (initrd_start) {
+ /* If available load the bootloader supplied initrd */
+ if (initrd_start && !IS_ENABLED(CONFIG_INITRAMFS_FORCE)) {
#ifdef CONFIG_BLK_DEV_RAM
int fd;
printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
@@ -640,6 +642,7 @@ static int __init populate_rootfs(void)
free_initrd();
}
done:
+ /* empty statement */;
#else
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start,
@@ -648,13 +651,14 @@ static int __init populate_rootfs(void)
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
free_initrd();
#endif
- flush_delayed_fput();
- /*
- * Try loading default modules from initramfs. This gives
- * us a chance to load before device_initcalls.
- */
- load_default_modules();
}
+ flush_delayed_fput();
+ /*
+ * Try loading default modules from initramfs. This gives
+ * us a chance to load before device_initcalls.
+ */
+ load_default_modules();
+
return 0;
}
rootfs_initcall(populate_rootfs);
diff --git a/init/main.c b/init/main.c
index cc48053bb39f..f866510472d7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -27,7 +27,7 @@
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
-#include <linux/tty.h>
+#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
diff --git a/ipc/shm.c b/ipc/shm.c
index 481d2a9c298a..34c4344e8d4b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1095,11 +1095,11 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
ulong *raddr, unsigned long shmlba)
{
struct shmid_kernel *shp;
- unsigned long addr;
+ unsigned long addr = (unsigned long)shmaddr;
unsigned long size;
struct file *file;
int err;
- unsigned long flags;
+ unsigned long flags = MAP_SHARED;
unsigned long prot;
int acc_mode;
struct ipc_namespace *ns;
@@ -1111,7 +1111,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
err = -EINVAL;
if (shmid < 0)
goto out;
- else if ((addr = (ulong)shmaddr)) {
+
+ if (addr) {
if (addr & (shmlba - 1)) {
/*
* Round down to the nearest multiple of shmlba.
@@ -1126,13 +1127,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
#endif
goto out;
}
- flags = MAP_SHARED | MAP_FIXED;
- } else {
- if ((shmflg & SHM_REMAP))
- goto out;
- flags = MAP_SHARED;
- }
+ flags |= MAP_FIXED;
+ } else if ((shmflg & SHM_REMAP))
+ goto out;
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
diff --git a/ipc/util.c b/ipc/util.c
index 3459a16a9df9..caec7b1bfaa3 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -403,12 +403,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
*/
void *ipc_alloc(int size)
{
- void *out;
- if (size > PAGE_SIZE)
- out = vmalloc(size);
- else
- out = kmalloc(size, GFP_KERNEL);
- return out;
+ return kvmalloc(size, GFP_KERNEL);
}
/**
diff --git a/ipc/util.h b/ipc/util.h
index 51f7ca58ac67..60ddccca464d 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -153,8 +153,6 @@ extern struct msg_msg *load_msg(const void __user *src, size_t len);
extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst);
extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
-extern void recompute_msgmni(struct ipc_namespace *);
-
static inline int ipc_buildid(int id, int seq)
{
return SEQ_MULTIPLIER * seq + id;
diff --git a/kernel/Makefile b/kernel/Makefile
index b302b4731d16..72aa080f91f0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SIG) += module_signing.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
+obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_KEXEC_CORE) += kexec_core.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6f81e0f5a0fa..dedf367f59bb 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -76,8 +76,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
- gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog_aux *aux;
struct bpf_prog *fp;
@@ -107,8 +106,7 @@ EXPORT_SYMBOL_GPL(bpf_prog_alloc);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
- gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog *fp;
u32 pages, delta;
int ret;
@@ -655,8 +653,7 @@ out:
static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
gfp_t gfp_extra_flags)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
- gfp_extra_flags;
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
struct bpf_prog *fp;
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index fddcae801724..9bbd33497d3d 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -429,7 +429,7 @@ static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
static int bpf_fill_super(struct super_block *sb, void *data, int silent)
{
- static struct tree_descr bpf_rfiles[] = { { "" } };
+ static const struct tree_descr bpf_rfiles[] = { { "" } };
struct bpf_mount_opts opts;
struct inode *inode;
int ret;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 13642c73dca0..fd2411fd6914 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -67,8 +67,7 @@ void *bpf_map_area_alloc(size_t size)
return area;
}
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
- PAGE_KERNEL);
+ return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
}
void bpf_map_area_free(void *area)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c2ff608c1984..c5b56c92f8e2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -298,7 +298,8 @@ static const char *const bpf_jmp_string[16] = {
[BPF_EXIT >> 4] = "exit",
};
-static void print_bpf_insn(struct bpf_insn *insn)
+static void print_bpf_insn(const struct bpf_verifier_env *env,
+ const struct bpf_insn *insn)
{
u8 class = BPF_CLASS(insn->code);
@@ -362,9 +363,19 @@ static void print_bpf_insn(struct bpf_insn *insn)
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
- } else if (BPF_MODE(insn->code) == BPF_IMM) {
- verbose("(%02x) r%d = 0x%x\n",
- insn->code, insn->dst_reg, insn->imm);
+ } else if (BPF_MODE(insn->code) == BPF_IMM &&
+ BPF_SIZE(insn->code) == BPF_DW) {
+ /* At this point, we already made sure that the second
+ * part of the ldimm64 insn is accessible.
+ */
+ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
+ bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+
+ if (map_ptr && !env->allow_ptr_leaks)
+ imm = 0;
+
+ verbose("(%02x) r%d = 0x%llx\n", insn->code,
+ insn->dst_reg, (unsigned long long)imm);
} else {
verbose("BUG_ld_%02x\n", insn->code);
return;
@@ -2853,7 +2864,7 @@ static int do_check(struct bpf_verifier_env *env)
if (log_level) {
verbose("%d: ", insn_idx);
- print_bpf_insn(insn);
+ print_bpf_insn(env, insn);
}
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
new file mode 100644
index 000000000000..fcbd568f1e95
--- /dev/null
+++ b/kernel/crash_core.c
@@ -0,0 +1,439 @@
+/*
+ * crash.c - kernel crash support code.
+ * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crash_core.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+
+#include <asm/page.h>
+#include <asm/sections.h>
+
+/* vmcoreinfo stuff */
+static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
+u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+size_t vmcoreinfo_size;
+size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+
+/*
+ * parsing the "crashkernel" commandline
+ *
+ * this code is intended to be called from architecture specific code
+ */
+
+
+/*
+ * This function parses command lines in the format
+ *
+ * crashkernel=ramsize-range:size[,...][@offset]
+ *
+ * The function returns 0 on success and -EINVAL on failure.
+ */
+static int __init parse_crashkernel_mem(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ char *cur = cmdline, *tmp;
+
+ /* for each entry of the comma-separated list */
+ do {
+ unsigned long long start, end = ULLONG_MAX, size;
+
+ /* get the start of the range */
+ start = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warn("crashkernel: Memory value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (*cur != '-') {
+ pr_warn("crashkernel: '-' expected\n");
+ return -EINVAL;
+ }
+ cur++;
+
+ /* if no ':' is here, than we read the end */
+ if (*cur != ':') {
+ end = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warn("crashkernel: Memory value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (end <= start) {
+ pr_warn("crashkernel: end <= start\n");
+ return -EINVAL;
+ }
+ }
+
+ if (*cur != ':') {
+ pr_warn("crashkernel: ':' expected\n");
+ return -EINVAL;
+ }
+ cur++;
+
+ size = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warn("Memory value expected\n");
+ return -EINVAL;
+ }
+ cur = tmp;
+ if (size >= system_ram) {
+ pr_warn("crashkernel: invalid size\n");
+ return -EINVAL;
+ }
+
+ /* match ? */
+ if (system_ram >= start && system_ram < end) {
+ *crash_size = size;
+ break;
+ }
+ } while (*cur++ == ',');
+
+ if (*crash_size > 0) {
+ while (*cur && *cur != ' ' && *cur != '@')
+ cur++;
+ if (*cur == '@') {
+ cur++;
+ *crash_base = memparse(cur, &tmp);
+ if (cur == tmp) {
+ pr_warn("Memory value expected after '@'\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * That function parses "simple" (old) crashkernel command lines like
+ *
+ * crashkernel=size[@offset]
+ *
+ * It returns 0 on success and -EINVAL on failure.
+ */
+static int __init parse_crashkernel_simple(char *cmdline,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ char *cur = cmdline;
+
+ *crash_size = memparse(cmdline, &cur);
+ if (cmdline == cur) {
+ pr_warn("crashkernel: memory value expected\n");
+ return -EINVAL;
+ }
+
+ if (*cur == '@')
+ *crash_base = memparse(cur+1, &cur);
+ else if (*cur != ' ' && *cur != '\0') {
+ pr_warn("crashkernel: unrecognized char: %c\n", *cur);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define SUFFIX_HIGH 0
+#define SUFFIX_LOW 1
+#define SUFFIX_NULL 2
+static __initdata char *suffix_tbl[] = {
+ [SUFFIX_HIGH] = ",high",
+ [SUFFIX_LOW] = ",low",
+ [SUFFIX_NULL] = NULL,
+};
+
+/*
+ * That function parses "suffix" crashkernel command lines like
+ *
+ * crashkernel=size,[high|low]
+ *
+ * It returns 0 on success and -EINVAL on failure.
+ */
+static int __init parse_crashkernel_suffix(char *cmdline,
+ unsigned long long *crash_size,
+ const char *suffix)
+{
+ char *cur = cmdline;
+
+ *crash_size = memparse(cmdline, &cur);
+ if (cmdline == cur) {
+ pr_warn("crashkernel: memory value expected\n");
+ return -EINVAL;
+ }
+
+ /* check with suffix */
+ if (strncmp(cur, suffix, strlen(suffix))) {
+ pr_warn("crashkernel: unrecognized char: %c\n", *cur);
+ return -EINVAL;
+ }
+ cur += strlen(suffix);
+ if (*cur != ' ' && *cur != '\0') {
+ pr_warn("crashkernel: unrecognized char: %c\n", *cur);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __init char *get_last_crashkernel(char *cmdline,
+ const char *name,
+ const char *suffix)
+{
+ char *p = cmdline, *ck_cmdline = NULL;
+
+ /* find crashkernel and use the last one if there are more */
+ p = strstr(p, name);
+ while (p) {
+ char *end_p = strchr(p, ' ');
+ char *q;
+
+ if (!end_p)
+ end_p = p + strlen(p);
+
+ if (!suffix) {
+ int i;
+
+ /* skip the one with any known suffix */
+ for (i = 0; suffix_tbl[i]; i++) {
+ q = end_p - strlen(suffix_tbl[i]);
+ if (!strncmp(q, suffix_tbl[i],
+ strlen(suffix_tbl[i])))
+ goto next;
+ }
+ ck_cmdline = p;
+ } else {
+ q = end_p - strlen(suffix);
+ if (!strncmp(q, suffix, strlen(suffix)))
+ ck_cmdline = p;
+ }
+next:
+ p = strstr(p+1, name);
+ }
+
+ if (!ck_cmdline)
+ return NULL;
+
+ return ck_cmdline;
+}
+
+static int __init __parse_crashkernel(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base,
+ const char *name,
+ const char *suffix)
+{
+ char *first_colon, *first_space;
+ char *ck_cmdline;
+
+ BUG_ON(!crash_size || !crash_base);
+ *crash_size = 0;
+ *crash_base = 0;
+
+ ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
+
+ if (!ck_cmdline)
+ return -EINVAL;
+
+ ck_cmdline += strlen(name);
+
+ if (suffix)
+ return parse_crashkernel_suffix(ck_cmdline, crash_size,
+ suffix);
+ /*
+ * if the commandline contains a ':', then that's the extended
+ * syntax -- if not, it must be the classic syntax
+ */
+ first_colon = strchr(ck_cmdline, ':');
+ first_space = strchr(ck_cmdline, ' ');
+ if (first_colon && (!first_space || first_colon < first_space))
+ return parse_crashkernel_mem(ck_cmdline, system_ram,
+ crash_size, crash_base);
+
+ return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
+}
+
+/*
+ * That function is the entry point for command line parsing and should be
+ * called from the arch-specific code.
+ */
+int __init parse_crashkernel(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
+ "crashkernel=", NULL);
+}
+
+int __init parse_crashkernel_high(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
+ "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
+}
+
+int __init parse_crashkernel_low(char *cmdline,
+ unsigned long long system_ram,
+ unsigned long long *crash_size,
+ unsigned long long *crash_base)
+{
+ return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
+ "crashkernel=", suffix_tbl[SUFFIX_LOW]);
+}
+
+Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
+ void *data, size_t data_len)
+{
+ struct elf_note *note = (struct elf_note *)buf;
+
+ note->n_namesz = strlen(name) + 1;
+ note->n_descsz = data_len;
+ note->n_type = type;
+ buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf_Word));
+ memcpy(buf, name, note->n_namesz);
+ buf += DIV_ROUND_UP(note->n_namesz, sizeof(Elf_Word));
+ memcpy(buf, data, data_len);
+ buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
+
+ return buf;
+}
+
+void final_note(Elf_Word *buf)
+{
+ memset(buf, 0, sizeof(struct elf_note));
+}
+
+static void update_vmcoreinfo_note(void)
+{
+ u32 *buf = vmcoreinfo_note;
+
+ if (!vmcoreinfo_size)
+ return;
+ buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
+ vmcoreinfo_size);
+ final_note(buf);
+}
+
+void crash_save_vmcoreinfo(void)
+{
+ vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
+ update_vmcoreinfo_note();
+}
+
+void vmcoreinfo_append_str(const char *fmt, ...)
+{
+ va_list args;
+ char buf[0x50];
+ size_t r;
+
+ va_start(args, fmt);
+ r = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
+
+ memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
+
+ vmcoreinfo_size += r;
+}
+
+/*
+ * provide an empty default implementation here -- architecture
+ * code may override this
+ */
+void __weak arch_crash_save_vmcoreinfo(void)
+{}
+
+phys_addr_t __weak paddr_vmcoreinfo_note(void)
+{
+ return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
+}
+
+static int __init crash_save_vmcoreinfo_init(void)
+{
+ VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
+ VMCOREINFO_PAGESIZE(PAGE_SIZE);
+
+ VMCOREINFO_SYMBOL(init_uts_ns);
+ VMCOREINFO_SYMBOL(node_online_map);
+#ifdef CONFIG_MMU
+ VMCOREINFO_SYMBOL(swapper_pg_dir);
+#endif
+ VMCOREINFO_SYMBOL(_stext);
+ VMCOREINFO_SYMBOL(vmap_area_list);
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+ VMCOREINFO_SYMBOL(mem_map);
+ VMCOREINFO_SYMBOL(contig_page_data);
+#endif
+#ifdef CONFIG_SPARSEMEM
+ VMCOREINFO_SYMBOL(mem_section);
+ VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
+ VMCOREINFO_STRUCT_SIZE(mem_section);
+ VMCOREINFO_OFFSET(mem_section, section_mem_map);
+#endif
+ VMCOREINFO_STRUCT_SIZE(page);
+ VMCOREINFO_STRUCT_SIZE(pglist_data);
+ VMCOREINFO_STRUCT_SIZE(zone);
+ VMCOREINFO_STRUCT_SIZE(free_area);
+ VMCOREINFO_STRUCT_SIZE(list_head);
+ VMCOREINFO_SIZE(nodemask_t);
+ VMCOREINFO_OFFSET(page, flags);
+ VMCOREINFO_OFFSET(page, _refcount);
+ VMCOREINFO_OFFSET(page, mapping);
+ VMCOREINFO_OFFSET(page, lru);
+ VMCOREINFO_OFFSET(page, _mapcount);
+ VMCOREINFO_OFFSET(page, private);
+ VMCOREINFO_OFFSET(page, compound_dtor);
+ VMCOREINFO_OFFSET(page, compound_order);
+ VMCOREINFO_OFFSET(page, compound_head);
+ VMCOREINFO_OFFSET(pglist_data, node_zones);
+ VMCOREINFO_OFFSET(pglist_data, nr_zones);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+ VMCOREINFO_OFFSET(pglist_data, node_mem_map);
+#endif
+ VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
+ VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
+ VMCOREINFO_OFFSET(pglist_data, node_id);
+ VMCOREINFO_OFFSET(zone, free_area);
+ VMCOREINFO_OFFSET(zone, vm_stat);
+ VMCOREINFO_OFFSET(zone, spanned_pages);
+ VMCOREINFO_OFFSET(free_area, free_list);
+ VMCOREINFO_OFFSET(list_head, next);
+ VMCOREINFO_OFFSET(list_head, prev);
+ VMCOREINFO_OFFSET(vmap_area, va_start);
+ VMCOREINFO_OFFSET(vmap_area, list);
+ VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ log_buf_vmcoreinfo_setup();
+ VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
+ VMCOREINFO_NUMBER(NR_FREE_PAGES);
+ VMCOREINFO_NUMBER(PG_lru);
+ VMCOREINFO_NUMBER(PG_private);
+ VMCOREINFO_NUMBER(PG_swapcache);
+ VMCOREINFO_NUMBER(PG_slab);
+#ifdef CONFIG_MEMORY_FAILURE
+ VMCOREINFO_NUMBER(PG_hwpoison);
+#endif
+ VMCOREINFO_NUMBER(PG_head_mask);
+ VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+#ifdef CONFIG_HUGETLB_PAGE
+ VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
+#endif
+
+ arch_crash_save_vmcoreinfo();
+ update_vmcoreinfo_note();
+
+ return 0;
+}
+
+subsys_initcall(crash_save_vmcoreinfo_init);
diff --git a/kernel/fork.c b/kernel/fork.c
index dd5a371c392a..bfd91b180778 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -179,6 +179,24 @@ void __weak arch_release_thread_stack(unsigned long *stack)
*/
#define NR_CACHED_STACKS 2
static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
+
+static int free_vm_stack_cache(unsigned int cpu)
+{
+ struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
+ int i;
+
+ for (i = 0; i < NR_CACHED_STACKS; i++) {
+ struct vm_struct *vm_stack = cached_vm_stacks[i];
+
+ if (!vm_stack)
+ continue;
+
+ vfree(vm_stack->addr);
+ cached_vm_stacks[i] = NULL;
+ }
+
+ return 0;
+}
#endif
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
@@ -203,7 +221,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
VMALLOC_START, VMALLOC_END,
- THREADINFO_GFP | __GFP_HIGHMEM,
+ THREADINFO_GFP,
PAGE_KERNEL,
0, node, __builtin_return_address(0));
@@ -467,6 +485,11 @@ void __init fork_init(void)
for (i = 0; i < UCOUNT_COUNTS; i++) {
init_user_ns.ucount_max[i] = max_threads/2;
}
+
+#ifdef CONFIG_VMAP_STACK
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
+ NULL, free_vm_stack_cache);
+#endif
}
int __weak arch_dup_task_struct(struct task_struct *dst,
@@ -1314,7 +1337,7 @@ void __cleanup_sighand(struct sighand_struct *sighand)
if (atomic_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
/*
- * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
+ * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
* without an RCU grace period, see __lock_task_sighand().
*/
kmem_cache_free(sighand_cachep, sighand);
@@ -2153,7 +2176,7 @@ void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
diff --git a/kernel/groups.c b/kernel/groups.c
index 8dd7a61b7115..d09727692a2a 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -18,7 +18,7 @@ struct group_info *groups_alloc(int gidsetsize)
len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
if (!gi)
- gi = __vmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_HIGHMEM, PAGE_KERNEL);
+ gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
if (!gi)
return NULL;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index f0f8e2a8496f..751593ed7c0b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -43,6 +43,7 @@ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_
int __read_mostly sysctl_hung_task_warnings = 10;
static int __read_mostly did_panic;
+static bool hung_task_show_lock;
static struct task_struct *watchdog_task;
@@ -120,12 +121,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
- debug_show_all_locks();
+ hung_task_show_lock = true;
}
touch_nmi_watchdog();
if (sysctl_hung_task_panic) {
+ if (hung_task_show_lock)
+ debug_show_all_locks();
trigger_all_cpu_backtrace();
panic("hung_task: blocked tasks");
}
@@ -172,6 +175,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
if (test_taint(TAINT_DIE) || did_panic)
return;
+ hung_task_show_lock = false;
rcu_read_lock();
for_each_process_thread(g, t) {
if (!max_count--)
@@ -187,6 +191,8 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
}
unlock:
rcu_read_unlock();
+ if (hung_task_show_lock)
+ debug_show_all_locks();
}
static long hung_timeout_jiffies(unsigned long last_checked,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ae1c90f20381..070be980c37a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1559,7 +1559,7 @@ void remove_irq(unsigned int irq, struct irqaction *act)
struct irq_desc *desc = irq_to_desc(irq);
if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
- __free_irq(irq, act->dev_id);
+ __free_irq(irq, act->dev_id);
}
EXPORT_SYMBOL_GPL(remove_irq);
@@ -1576,20 +1576,27 @@ EXPORT_SYMBOL_GPL(remove_irq);
* have completed.
*
* This function must not be called from interrupt context.
+ *
+ * Returns the devname argument passed to request_irq.
*/
-void free_irq(unsigned int irq, void *dev_id)
+const void *free_irq(unsigned int irq, void *dev_id)
{
struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ const char *devname;
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
- return;
+ return NULL;
#ifdef CONFIG_SMP
if (WARN_ON(desc->affinity_notify))
desc->affinity_notify = NULL;
#endif
- kfree(__free_irq(irq, dev_id));
+ action = __free_irq(irq, dev_id);
+ devname = action->name;
+ kfree(action);
+ return devname;
}
EXPORT_SYMBOL(free_irq);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 85e5546cd791..cd771993f96f 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -60,15 +60,8 @@ void notrace __sanitizer_cov_trace_pc(void)
/*
* We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts.
- * The checks for whether we are in an interrupt are open-coded, because
- * 1. We can't use in_interrupt() here, since it also returns true
- * when we are inside local_bh_disable() section.
- * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
- * since that leads to slower generated code (three separate tests,
- * one for each of the flags).
*/
- if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
- | NMI_MASK)))
+ if (!t || !in_task())
return;
mode = READ_ONCE(t->kcov_mode);
if (mode == KCOV_MODE_TRACE) {
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index bfe62d5b3872..ae1a3ba24df5 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -51,12 +51,6 @@ DEFINE_MUTEX(kexec_mutex);
/* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t __percpu *crash_notes;
-/* vmcoreinfo stuff */
-static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
-u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-size_t vmcoreinfo_size;
-size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
-
/* Flag to indicate we are going to kexec a new kernel */
bool kexec_in_progress = false;
@@ -996,34 +990,6 @@ unlock:
return ret;
}
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
- size_t data_len)
-{
- struct elf_note note;
-
- note.n_namesz = strlen(name) + 1;
- note.n_descsz = data_len;
- note.n_type = type;
- memcpy(buf, &note, sizeof(note));
- buf += (sizeof(note) + 3)/4;
- memcpy(buf, name, note.n_namesz);
- buf += (note.n_namesz + 3)/4;
- memcpy(buf, data, note.n_descsz);
- buf += (note.n_descsz + 3)/4;
-
- return buf;
-}
-
-static void final_note(u32 *buf)
-{
- struct elf_note note;
-
- note.n_namesz = 0;
- note.n_descsz = 0;
- note.n_type = 0;
- memcpy(buf, &note, sizeof(note));
-}
-
void crash_save_cpu(struct pt_regs *regs, int cpu)
{
struct elf_prstatus prstatus;
@@ -1085,403 +1051,6 @@ subsys_initcall(crash_notes_memory_init);
/*
- * parsing the "crashkernel" commandline
- *
- * this code is intended to be called from architecture specific code
- */
-
-
-/*
- * This function parses command lines in the format
- *
- * crashkernel=ramsize-range:size[,...][@offset]
- *
- * The function returns 0 on success and -EINVAL on failure.
- */
-static int __init parse_crashkernel_mem(char *cmdline,
- unsigned long long system_ram,
- unsigned long long *crash_size,
- unsigned long long *crash_base)
-{
- char *cur = cmdline, *tmp;
-
- /* for each entry of the comma-separated list */
- do {
- unsigned long long start, end = ULLONG_MAX, size;
-
- /* get the start of the range */
- start = memparse(cur, &tmp);
- if (cur == tmp) {
- pr_warn("crashkernel: Memory value expected\n");
- return -EINVAL;
- }
- cur = tmp;
- if (*cur != '-') {
- pr_warn("crashkernel: '-' expected\n");
- return -EINVAL;
- }
- cur++;
-
- /* if no ':' is here, than we read the end */
- if (*cur != ':') {
- end = memparse(cur, &tmp);
- if (cur == tmp) {
- pr_warn("crashkernel: Memory value expected\n");
- return -EINVAL;
- }
- cur = tmp;
- if (end <= start) {
- pr_warn("crashkernel: end <= start\n");
- return -EINVAL;
- }
- }
-
- if (*cur != ':') {
- pr_warn("crashkernel: ':' expected\n");
- return -EINVAL;
- }
- cur++;
-
- size = memparse(cur, &tmp);
- if (cur == tmp) {
- pr_warn("Memory value expected\n");
- return -EINVAL;
- }
- cur = tmp;
- if (size >= system_ram) {
- pr_warn("crashkernel: invalid size\n");
- return -EINVAL;
- }
-
- /* match ? */
- if (system_ram >= start && system_ram < end) {
- *crash_size = size;
- break;
- }
- } while (*cur++ == ',');
-
- if (*crash_size > 0) {
- while (*cur && *cur != ' ' && *cur != '@')
- cur++;
- if (*cur == '@') {
- cur++;
- *crash_base = memparse(cur, &tmp);
- if (cur == tmp) {
- pr_warn("Memory value expected after '@'\n");
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-/*
- * That function parses "simple" (old) crashkernel command lines like
- *
- * crashkernel=size[@offset]
- *
- * It returns 0 on success and -EINVAL on failure.
- */
-static int __init parse_crashkernel_simple(char *cmdline,
- unsigned long long *crash_size,
- unsigned long long *crash_base)
-{
- char *cur = cmdline;
-
- *crash_size = memparse(cmdline, &cur);
- if (cmdline == cur) {
- pr_warn("crashkernel: memory value expected\n");
- return -EINVAL;
- }
-
- if (*cur == '@')
- *crash_base = memparse(cur+1, &cur);
- else if (*cur != ' ' && *cur != '\0') {
- pr_warn("crashkernel: unrecognized char: %c\n", *cur);
- return -EINVAL;
- }
-
- return 0;
-}
-
-#define SUFFIX_HIGH 0
-#define SUFFIX_LOW 1
-#define SUFFIX_NULL 2
-static __initdata char *suffix_tbl[] = {
- [SUFFIX_HIGH] = ",high",
- [SUFFIX_LOW] = ",low",
- [SUFFIX_NULL] = NULL,
-};
-
-/*
- * That function parses "suffix" crashkernel command lines like
- *
- * crashkernel=size,[high|low]
- *
- * It returns 0 on success and -EINVAL on failure.
- */
-static int __init parse_crashkernel_suffix(char *cmdline,
- unsigned long long *crash_size,
- const char *suffix)
-{
- char *cur = cmdline;
-
- *crash_size = memparse(cmdline, &cur);
- if (cmdline == cur) {
- pr_warn("crashkernel: memory value expected\n");
- return -EINVAL;
- }
-
- /* check with suffix */
- if (strncmp(cur, suffix, strlen(suffix))) {
- pr_warn("crashkernel: unrecognized char: %c\n", *cur);
- return -EINVAL;
- }
- cur += strlen(suffix);
- if (*cur != ' ' && *cur != '\0') {
- pr_warn("crashkernel: unrecognized char: %c\n", *cur);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static __init char *get_last_crashkernel(char *cmdline,
- const char *name,
- const char *suffix)
-{
- char *p = cmdline, *ck_cmdline = NULL;
-
- /* find crashkernel and use the last one if there are more */
- p = strstr(p, name);
- while (p) {
- char *end_p = strchr(p, ' ');
- char *q;
-
- if (!end_p)
- end_p = p + strlen(p);
-
- if (!suffix) {
- int i;
-
- /* skip the one with any known suffix */
- for (i = 0; suffix_tbl[i]; i++) {
- q = end_p - strlen(suffix_tbl[i]);
- if (!strncmp(q, suffix_tbl[i],
- strlen(suffix_tbl[i])))
- goto next;
- }
- ck_cmdline = p;
- } else {
- q = end_p - strlen(suffix);
- if (!strncmp(q, suffix, strlen(suffix)))
- ck_cmdline = p;
- }
-next:
- p = strstr(p+1, name);
- }
-
- if (!ck_cmdline)
- return NULL;
-
- return ck_cmdline;
-}
-
-static int __init __parse_crashkernel(char *cmdline,
- unsigned long long system_ram,
- unsigned long long *crash_size,
- unsigned long long *crash_base,
- const char *name,
- const char *suffix)
-{
- char *first_colon, *first_space;
- char *ck_cmdline;
-
- BUG_ON(!crash_size || !crash_base);
- *crash_size = 0;
- *crash_base = 0;
-
- ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
-
- if (!ck_cmdline)
- return -EINVAL;
-
- ck_cmdline += strlen(name);
-
- if (suffix)
- return parse_crashkernel_suffix(ck_cmdline, crash_size,
- suffix);
- /*
- * if the commandline contains a ':', then that's the extended
- * syntax -- if not, it must be the classic syntax
- */
- first_colon = strchr(ck_cmdline, ':');
- first_space = strchr(ck_cmdline, ' ');
- if (first_colon && (!first_space || first_colon < first_space))
- return parse_crashkernel_mem(ck_cmdline, system_ram,
- crash_size, crash_base);
-
- return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
-}
-
-/*
- * That function is the entry point for command line parsing and should be
- * called from the arch-specific code.
- */
-int __init parse_crashkernel(char *cmdline,
- unsigned long long system_ram,
- unsigned long long *crash_size,
- unsigned long long *crash_base)
-{
- return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
- "crashkernel=", NULL);
-}
-
-int __init parse_crashkernel_high(char *cmdline,
- unsigned long long system_ram,
- unsigned long long *crash_size,
- unsigned long long *crash_base)
-{
- return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
- "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
-}
-
-int __init parse_crashkernel_low(char *cmdline,
- unsigned long long system_ram,
- unsigned long long *crash_size,
- unsigned long long *crash_base)
-{
- return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
- "crashkernel=", suffix_tbl[SUFFIX_LOW]);
-}
-
-static void update_vmcoreinfo_note(void)
-{
- u32 *buf = vmcoreinfo_note;
-
- if (!vmcoreinfo_size)
- return;
- buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
- vmcoreinfo_size);
- final_note(buf);
-}
-
-void crash_save_vmcoreinfo(void)
-{
- vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
- update_vmcoreinfo_note();
-}
-
-void vmcoreinfo_append_str(const char *fmt, ...)
-{
- va_list args;
- char buf[0x50];
- size_t r;
-
- va_start(args, fmt);
- r = vscnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
-
- r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
-
- memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
-
- vmcoreinfo_size += r;
-}
-
-/*
- * provide an empty default implementation here -- architecture
- * code may override this
- */
-void __weak arch_crash_save_vmcoreinfo(void)
-{}
-
-phys_addr_t __weak paddr_vmcoreinfo_note(void)
-{
- return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
-}
-
-static int __init crash_save_vmcoreinfo_init(void)
-{
- VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
- VMCOREINFO_PAGESIZE(PAGE_SIZE);
-
- VMCOREINFO_SYMBOL(init_uts_ns);
- VMCOREINFO_SYMBOL(node_online_map);
-#ifdef CONFIG_MMU
- VMCOREINFO_SYMBOL(swapper_pg_dir);
-#endif
- VMCOREINFO_SYMBOL(_stext);
- VMCOREINFO_SYMBOL(vmap_area_list);
-
-#ifndef CONFIG_NEED_MULTIPLE_NODES
- VMCOREINFO_SYMBOL(mem_map);
- VMCOREINFO_SYMBOL(contig_page_data);
-#endif
-#ifdef CONFIG_SPARSEMEM
- VMCOREINFO_SYMBOL(mem_section);
- VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
- VMCOREINFO_STRUCT_SIZE(mem_section);
- VMCOREINFO_OFFSET(mem_section, section_mem_map);
-#endif
- VMCOREINFO_STRUCT_SIZE(page);
- VMCOREINFO_STRUCT_SIZE(pglist_data);
- VMCOREINFO_STRUCT_SIZE(zone);
- VMCOREINFO_STRUCT_SIZE(free_area);
- VMCOREINFO_STRUCT_SIZE(list_head);
- VMCOREINFO_SIZE(nodemask_t);
- VMCOREINFO_OFFSET(page, flags);
- VMCOREINFO_OFFSET(page, _refcount);
- VMCOREINFO_OFFSET(page, mapping);
- VMCOREINFO_OFFSET(page, lru);
- VMCOREINFO_OFFSET(page, _mapcount);
- VMCOREINFO_OFFSET(page, private);
- VMCOREINFO_OFFSET(page, compound_dtor);
- VMCOREINFO_OFFSET(page, compound_order);
- VMCOREINFO_OFFSET(page, compound_head);
- VMCOREINFO_OFFSET(pglist_data, node_zones);
- VMCOREINFO_OFFSET(pglist_data, nr_zones);
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
- VMCOREINFO_OFFSET(pglist_data, node_mem_map);
-#endif
- VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
- VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
- VMCOREINFO_OFFSET(pglist_data, node_id);
- VMCOREINFO_OFFSET(zone, free_area);
- VMCOREINFO_OFFSET(zone, vm_stat);
- VMCOREINFO_OFFSET(zone, spanned_pages);
- VMCOREINFO_OFFSET(free_area, free_list);
- VMCOREINFO_OFFSET(list_head, next);
- VMCOREINFO_OFFSET(list_head, prev);
- VMCOREINFO_OFFSET(vmap_area, va_start);
- VMCOREINFO_OFFSET(vmap_area, list);
- VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
- log_buf_kexec_setup();
- VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
- VMCOREINFO_NUMBER(NR_FREE_PAGES);
- VMCOREINFO_NUMBER(PG_lru);
- VMCOREINFO_NUMBER(PG_private);
- VMCOREINFO_NUMBER(PG_swapcache);
- VMCOREINFO_NUMBER(PG_slab);
-#ifdef CONFIG_MEMORY_FAILURE
- VMCOREINFO_NUMBER(PG_hwpoison);
-#endif
- VMCOREINFO_NUMBER(PG_head_mask);
- VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#ifdef CONFIG_HUGETLB_PAGE
- VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
-#endif
-
- arch_crash_save_vmcoreinfo();
- update_vmcoreinfo_note();
-
- return 0;
-}
-
-subsys_initcall(crash_save_vmcoreinfo_init);
-
-/*
* Move into place and start executing a preloaded standalone
* executable. If nothing was preloaded return an error.
*/
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d733479a10ee..7367e0ec6f81 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -58,15 +58,6 @@
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
-/*
- * Some oddball architectures like 64bit powerpc have function descriptors
- * so this must be overridable.
- */
-#ifndef kprobe_lookup_name
-#define kprobe_lookup_name(name, addr) \
- addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
-#endif
-
static int kprobes_initialized;
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
@@ -81,6 +72,12 @@ static struct {
raw_spinlock_t lock ____cacheline_aligned_in_smp;
} kretprobe_table_locks[KPROBE_TABLE_SIZE];
+kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
+ unsigned int __unused)
+{
+ return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
+}
+
static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
{
return &(kretprobe_table_locks[hash].lock);
@@ -746,13 +743,20 @@ static void kill_optimized_kprobe(struct kprobe *p)
arch_remove_optimized_kprobe(op);
}
+static inline
+void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
+{
+ if (!kprobe_ftrace(p))
+ arch_prepare_optimized_kprobe(op, p);
+}
+
/* Try to prepare optimized instructions */
static void prepare_optimized_kprobe(struct kprobe *p)
{
struct optimized_kprobe *op;
op = container_of(p, struct optimized_kprobe, kp);
- arch_prepare_optimized_kprobe(op, p);
+ __prepare_optimized_kprobe(op, p);
}
/* Allocate new optimized_kprobe and try to prepare optimized instructions */
@@ -766,7 +770,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
INIT_LIST_HEAD(&op->list);
op->kp.addr = p->addr;
- arch_prepare_optimized_kprobe(op, p);
+ __prepare_optimized_kprobe(op, p);
return &op->kp;
}
@@ -1398,7 +1402,7 @@ static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
goto invalid;
if (symbol_name) {
- kprobe_lookup_name(symbol_name, addr);
+ addr = kprobe_lookup_name(symbol_name, offset);
if (!addr)
return ERR_PTR(-ENOENT);
}
@@ -2218,8 +2222,8 @@ static int __init init_kprobes(void)
if (kretprobe_blacklist_size) {
/* lookup the function address from its name */
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
- kprobe_lookup_name(kretprobe_blacklist[i].name,
- kretprobe_blacklist[i].addr);
+ kretprobe_blacklist[i].addr =
+ kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
if (!kretprobe_blacklist[i].addr)
printk("kretprobe: lookup failed: %s\n",
kretprobe_blacklist[i].name);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 0999679d6f26..23cd70651238 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -125,6 +125,10 @@ static ssize_t kexec_crash_size_store(struct kobject *kobj,
}
KERNEL_ATTR_RW(kexec_crash_size);
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_CRASH_CORE
+
static ssize_t vmcoreinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -134,7 +138,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj,
}
KERNEL_ATTR_RO(vmcoreinfo);
-#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_CRASH_CORE */
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
@@ -219,6 +223,8 @@ static struct attribute * kernel_attrs[] = {
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
&kexec_crash_size_attr.attr,
+#endif
+#ifdef CONFIG_CRASH_CORE
&vmcoreinfo_attr.attr,
#endif
#ifndef CONFIG_TINY_RCU
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0a1b3c748478..c0e31bfee25c 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1158,10 +1158,10 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
return 0;
printk("\n");
- printk("======================================================\n");
- printk("[ INFO: possible circular locking dependency detected ]\n");
+ pr_warn("======================================================\n");
+ pr_warn("WARNING: possible circular locking dependency detected\n");
print_kernel_ident();
- printk("-------------------------------------------------------\n");
+ pr_warn("------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(check_src);
@@ -1496,11 +1496,11 @@ print_bad_irq_dependency(struct task_struct *curr,
return 0;
printk("\n");
- printk("======================================================\n");
- printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
+ pr_warn("=====================================================\n");
+ pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
irqclass, irqclass);
print_kernel_ident();
- printk("------------------------------------------------------\n");
+ pr_warn("-----------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
@@ -1725,10 +1725,10 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
return 0;
printk("\n");
- printk("=============================================\n");
- printk("[ INFO: possible recursive locking detected ]\n");
+ pr_warn("============================================\n");
+ pr_warn("WARNING: possible recursive locking detected\n");
print_kernel_ident();
- printk("---------------------------------------------\n");
+ pr_warn("--------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(next);
@@ -2075,10 +2075,10 @@ static void print_collision(struct task_struct *curr,
struct lock_chain *chain)
{
printk("\n");
- printk("======================\n");
- printk("[chain_key collision ]\n");
+ pr_warn("============================\n");
+ pr_warn("WARNING: chain_key collision\n");
print_kernel_ident();
- printk("----------------------\n");
+ pr_warn("----------------------------\n");
printk("%s/%d: ", current->comm, task_pid_nr(current));
printk("Hash chain already cached but the contents don't match!\n");
@@ -2374,10 +2374,10 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
return 0;
printk("\n");
- printk("=================================\n");
- printk("[ INFO: inconsistent lock state ]\n");
+ pr_warn("================================\n");
+ pr_warn("WARNING: inconsistent lock state\n");
print_kernel_ident();
- printk("---------------------------------\n");
+ pr_warn("--------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
usage_str[prev_bit], usage_str[new_bit]);
@@ -2439,10 +2439,10 @@ print_irq_inversion_bug(struct task_struct *curr,
return 0;
printk("\n");
- printk("=========================================================\n");
- printk("[ INFO: possible irq lock inversion dependency detected ]\n");
+ pr_warn("========================================================\n");
+ pr_warn("WARNING: possible irq lock inversion dependency detected\n");
print_kernel_ident();
- printk("---------------------------------------------------------\n");
+ pr_warn("--------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(this);
@@ -3190,10 +3190,10 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
printk("\n");
- printk("==================================\n");
- printk("[ BUG: Nested lock was not taken ]\n");
+ pr_warn("==================================\n");
+ pr_warn("WARNING: Nested lock was not taken\n");
print_kernel_ident();
- printk("----------------------------------\n");
+ pr_warn("----------------------------------\n");
printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
print_lock(hlock);
@@ -3403,10 +3403,10 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
printk("\n");
- printk("=====================================\n");
- printk("[ BUG: bad unlock balance detected! ]\n");
+ pr_warn("=====================================\n");
+ pr_warn("WARNING: bad unlock balance detected!\n");
print_kernel_ident();
- printk("-------------------------------------\n");
+ pr_warn("-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
@@ -3975,10 +3975,10 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
printk("\n");
- printk("=================================\n");
- printk("[ BUG: bad contention detected! ]\n");
+ pr_warn("=================================\n");
+ pr_warn("WARNING: bad contention detected!\n");
print_kernel_ident();
- printk("---------------------------------\n");
+ pr_warn("---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
@@ -4319,10 +4319,10 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
return;
printk("\n");
- printk("=========================\n");
- printk("[ BUG: held lock freed! ]\n");
+ pr_warn("=========================\n");
+ pr_warn("WARNING: held lock freed!\n");
print_kernel_ident();
- printk("-------------------------\n");
+ pr_warn("-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
@@ -4377,11 +4377,11 @@ static void print_held_locks_bug(void)
return;
printk("\n");
- printk("=====================================\n");
- printk("[ BUG: %s/%d still has locks held! ]\n",
+ pr_warn("====================================\n");
+ pr_warn("WARNING: %s/%d still has locks held!\n",
current->comm, task_pid_nr(current));
print_kernel_ident();
- printk("-------------------------------------\n");
+ pr_warn("------------------------------------\n");
lockdep_print_held_locks(current);
printk("\nstack backtrace:\n");
dump_stack();
@@ -4446,7 +4446,7 @@ retry:
} while_each_thread(g, p);
printk("\n");
- printk("=============================================\n\n");
+ pr_warn("=============================================\n\n");
if (unlock)
read_unlock(&tasklist_lock);
@@ -4476,10 +4476,10 @@ asmlinkage __visible void lockdep_sys_exit(void)
if (!debug_locks_off())
return;
printk("\n");
- printk("================================================\n");
- printk("[ BUG: lock held when returning to user space! ]\n");
+ pr_warn("================================================\n");
+ pr_warn("WARNING: lock held when returning to user space!\n");
print_kernel_ident();
- printk("------------------------------------------------\n");
+ pr_warn("------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
@@ -4496,13 +4496,13 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
/* Note: the following can be executed concurrently, so be careful. */
printk("\n");
- pr_err("===============================\n");
- pr_err("[ ERR: suspicious RCU usage. ]\n");
+ pr_warn("=============================\n");
+ pr_warn("WARNING: suspicious RCU usage\n");
print_kernel_ident();
- pr_err("-------------------------------\n");
- pr_err("%s:%d %s!\n", file, line, s);
- pr_err("\nother info that might help us debug this:\n\n");
- pr_err("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
+ pr_warn("-----------------------------\n");
+ printk("%s:%d %s!\n", file, line, s);
+ printk("\nother info that might help us debug this:\n\n");
+ printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
!rcu_lockdep_current_cpu_online()
? "RCU used illegally from offline CPU!\n"
: !rcu_is_watching()
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 32fe775a2eaf..58e366ad36f4 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -102,10 +102,11 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
return;
}
- printk("\n============================================\n");
- printk( "[ BUG: circular locking deadlock detected! ]\n");
- printk("%s\n", print_tainted());
- printk( "--------------------------------------------\n");
+ pr_warn("\n");
+ pr_warn("============================================\n");
+ pr_warn("WARNING: circular locking deadlock detected!\n");
+ pr_warn("%s\n", print_tainted());
+ pr_warn("--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
current->comm, task_pid_nr(current));
diff --git a/kernel/module.c b/kernel/module.c
index 63321952c71c..4a3665f8f837 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -49,6 +49,9 @@
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#ifdef CONFIG_STRICT_MODULE_RWX
+#include <asm/set_memory.h>
+#endif
#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
@@ -2864,7 +2867,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
/* Suck in entire file: we'll want most of it. */
info->hdr = __vmalloc(info->len,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
if (!info->hdr)
return -ENOMEM;
@@ -4035,7 +4038,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
/* Don't lock: we're in enough trouble already. */
preempt_disable();
- if ((colon = strchr(name, ':')) != NULL) {
+ if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
if ((mod = find_module_all(name, colon - name, false)) != NULL)
ret = mod_find_symname(mod, colon+1);
} else {
diff --git a/kernel/pid.c b/kernel/pid.c
index 0143ac0ddceb..fd1cde1e4576 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -321,8 +321,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}
if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns))
+ if (pid_ns_prepare_proc(ns)) {
+ disable_pid_allocation(ns);
goto out_free;
+ }
}
get_pid_ns(ns);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index de461aa0bf9a..d1f3e9f558b8 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -374,6 +374,29 @@ static struct ns_common *pidns_get(struct task_struct *task)
return ns ? &ns->ns : NULL;
}
+static struct ns_common *pidns_for_children_get(struct task_struct *task)
+{
+ struct pid_namespace *ns = NULL;
+
+ task_lock(task);
+ if (task->nsproxy) {
+ ns = task->nsproxy->pid_ns_for_children;
+ get_pid_ns(ns);
+ }
+ task_unlock(task);
+
+ if (ns) {
+ read_lock(&tasklist_lock);
+ if (!ns->child_reaper) {
+ put_pid_ns(ns);
+ ns = NULL;
+ }
+ read_unlock(&tasklist_lock);
+ }
+
+ return ns ? &ns->ns : NULL;
+}
+
static void pidns_put(struct ns_common *ns)
{
put_pid_ns(to_pid_ns(ns));
@@ -443,6 +466,17 @@ const struct proc_ns_operations pidns_operations = {
.get_parent = pidns_get_parent,
};
+const struct proc_ns_operations pidns_for_children_operations = {
+ .name = "pid_for_children",
+ .real_ns_name = "pid",
+ .type = CLONE_NEWPID,
+ .get = pidns_for_children_get,
+ .put = pidns_put,
+ .install = pidns_install,
+ .owner = pidns_owner,
+ .get_parent = pidns_get_parent,
+};
+
static __init int pid_namespaces_init(void)
{
pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index c7209f060eeb..78672d324a6e 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,7 @@ int freeze_processes(void)
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
- pm_wakeup_clear();
+ pm_wakeup_clear(true);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index d79a38de425a..3b1e0f3ad07f 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -36,6 +36,9 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
+#ifdef CONFIG_STRICT_KERNEL_RWX
+#include <asm/set_memory.h>
+#endif
#include "power.h"
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 15e6baef5c73..c0248c74d6d4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -72,6 +72,8 @@ static void freeze_begin(void)
static void freeze_enter(void)
{
+ trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
+
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
@@ -98,6 +100,27 @@ static void freeze_enter(void)
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
+
+ trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
+}
+
+static void s2idle_loop(void)
+{
+ do {
+ freeze_enter();
+
+ if (freeze_ops && freeze_ops->wake)
+ freeze_ops->wake();
+
+ dpm_resume_noirq(PMSG_RESUME);
+ if (freeze_ops && freeze_ops->sync)
+ freeze_ops->sync();
+
+ if (pm_wakeup_pending())
+ break;
+
+ pm_wakeup_clear(false);
+ } while (!dpm_suspend_noirq(PMSG_SUSPEND));
}
void freeze_wake(void)
@@ -371,10 +394,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
- trace_suspend_resume(TPS("machine_suspend"), state, true);
- freeze_enter();
- trace_suspend_resume(TPS("machine_suspend"), state, false);
- goto Platform_wake;
+ s2idle_loop();
+ goto Platform_early_resume;
}
error = disable_nonboot_cpus();
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
index d5760c42f042..61d41ca41844 100644
--- a/kernel/printk/braille.c
+++ b/kernel/printk/braille.c
@@ -2,12 +2,13 @@
#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/errno.h>
#include <linux/string.h>
#include "console_cmdline.h"
#include "braille.h"
-char *_braille_console_setup(char **str, char **brl_options)
+int _braille_console_setup(char **str, char **brl_options)
{
if (!strncmp(*str, "brl,", 4)) {
*brl_options = "";
@@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options)
} else if (!strncmp(*str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
- if (!*str)
+ if (!*str) {
pr_err("need port name after brl=\n");
- else
- *((*str)++) = 0;
- } else
- return NULL;
+ return -EINVAL;
+ }
+ *((*str)++) = 0;
+ }
- return *str;
+ return 0;
}
int
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
index 769d771145c8..749a6756843a 100644
--- a/kernel/printk/braille.h
+++ b/kernel/printk/braille.h
@@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
c->brl_options = brl_options;
}
-char *
+/*
+ * Setup console according to braille options.
+ * Return -EINVAL on syntax error, 0 on success (or no braille option was
+ * actually given).
+ * Modifies str to point to the serial options
+ * Sets brl_options to the parsed braille options.
+ */
+int
_braille_console_setup(char **str, char **brl_options);
int
@@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
{
}
-static inline char *
+static inline int
_braille_console_setup(char **str, char **brl_options)
{
- return NULL;
+ return 0;
}
static inline int
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 779479ac9f57..a1aecf44ab07 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -32,7 +32,7 @@
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
-#include <linux/kexec.h>
+#include <linux/crash_core.h>
#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
@@ -1002,7 +1002,7 @@ const struct file_operations kmsg_fops = {
.release = devkmsg_release,
};
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_CORE
/*
* This appends the listed symbols to /proc/vmcore
*
@@ -1011,7 +1011,7 @@ const struct file_operations kmsg_fops = {
* symbols are specifically used so that utilities can access and extract the
* dmesg log from a vmcore file after a crash.
*/
-void log_buf_kexec_setup(void)
+void log_buf_vmcoreinfo_setup(void)
{
VMCOREINFO_SYMBOL(log_buf);
VMCOREINFO_SYMBOL(log_buf_len);
@@ -2642,6 +2642,30 @@ int unregister_console(struct console *console)
EXPORT_SYMBOL(unregister_console);
/*
+ * Initialize the console device. This is called *early*, so
+ * we can't necessarily depend on lots of kernel help here.
+ * Just do some early initializations, and do the complex setup
+ * later.
+ */
+void __init console_init(void)
+{
+ initcall_t *call;
+
+ /* Setup the default TTY line discipline. */
+ n_tty_init();
+
+ /*
+ * set up the console device so that later boot sequences can
+ * inform about problems etc..
+ */
+ call = __con_initcall_start;
+ while (call < __con_initcall_end) {
+ (*call)();
+ call++;
+ }
+}
+
+/*
* Some boot consoles access data that is in the init section and which will
* be discarded after the initcalls have been run. To make sure that no code
* will access this data, unregister the boot consoles in a late initcall.
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 18dfc485225c..23803c7d5180 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -3,10 +3,13 @@
KCOV_INSTRUMENT := n
obj-y += update.o sync.o
-obj-$(CONFIG_SRCU) += srcu.o
+obj-$(CONFIG_CLASSIC_SRCU) += srcu.o
+obj-$(CONFIG_TREE_SRCU) += srcutree.o
+obj-$(CONFIG_TINY_SRCU) += srcutiny.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
obj-$(CONFIG_TREE_RCU) += tree.o
obj-$(CONFIG_PREEMPT_RCU) += tree.o
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
obj-$(CONFIG_TINY_RCU) += tiny.o
+obj-$(CONFIG_RCU_NEED_SEGCBLIST) += rcu_segcblist.o
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 0d6ff3e471be..73e16ec4054b 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -56,6 +56,83 @@
#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
DYNTICK_TASK_FLAG)
+
+/*
+ * Grace-period counter management.
+ */
+
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
+
+/*
+ * Return the counter portion of a sequence number previously returned
+ * by rcu_seq_snap() or rcu_seq_current().
+ */
+static inline unsigned long rcu_seq_ctr(unsigned long s)
+{
+ return s >> RCU_SEQ_CTR_SHIFT;
+}
+
+/*
+ * Return the state portion of a sequence number previously returned
+ * by rcu_seq_snap() or rcu_seq_current().
+ */
+static inline int rcu_seq_state(unsigned long s)
+{
+ return s & RCU_SEQ_STATE_MASK;
+}
+
+/*
+ * Set the state portion of the pointed-to sequence number.
+ * The caller is responsible for preventing conflicting updates.
+ */
+static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
+{
+ WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
+ WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
+}
+
+/* Adjust sequence number for start of update-side operation. */
+static inline void rcu_seq_start(unsigned long *sp)
+{
+ WRITE_ONCE(*sp, *sp + 1);
+ smp_mb(); /* Ensure update-side operation after counter increment. */
+ WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
+}
+
+/* Adjust sequence number for end of update-side operation. */
+static inline void rcu_seq_end(unsigned long *sp)
+{
+ smp_mb(); /* Ensure update-side operation before counter increment. */
+ WARN_ON_ONCE(!rcu_seq_state(*sp));
+ WRITE_ONCE(*sp, (*sp | RCU_SEQ_STATE_MASK) + 1);
+}
+
+/* Take a snapshot of the update side's sequence number. */
+static inline unsigned long rcu_seq_snap(unsigned long *sp)
+{
+ unsigned long s;
+
+ s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
+ smp_mb(); /* Above access must not bleed into critical section. */
+ return s;
+}
+
+/* Return the current value the update side's sequence number, no ordering. */
+static inline unsigned long rcu_seq_current(unsigned long *sp)
+{
+ return READ_ONCE(*sp);
+}
+
+/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not a
+ * full update-side operation has occurred.
+ */
+static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
+{
+ return ULONG_CMP_GE(READ_ONCE(*sp), s);
+}
+
/*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the
@@ -109,12 +186,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
rcu_lock_acquire(&rcu_callback_map);
if (__is_kfree_rcu_offset(offset)) {
- RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
+ RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
kfree((void *)head - offset);
rcu_lock_release(&rcu_callback_map);
return true;
} else {
- RCU_TRACE(trace_rcu_invoke_callback(rn, head));
+ RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
head->func(head);
rcu_lock_release(&rcu_callback_map);
return false;
@@ -144,4 +221,76 @@ void rcu_test_sync_prims(void);
*/
extern void resched_cpu(int cpu);
+#if defined(SRCU) || !defined(TINY_RCU)
+
+#include <linux/rcu_node_tree.h>
+
+extern int rcu_num_lvls;
+extern int num_rcu_lvl[];
+extern int rcu_num_nodes;
+static bool rcu_fanout_exact;
+static int rcu_fanout_leaf;
+
+/*
+ * Compute the per-level fanout, either using the exact fanout specified
+ * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
+ */
+static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
+{
+ int i;
+
+ if (rcu_fanout_exact) {
+ levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
+ for (i = rcu_num_lvls - 2; i >= 0; i--)
+ levelspread[i] = RCU_FANOUT;
+ } else {
+ int ccur;
+ int cprv;
+
+ cprv = nr_cpu_ids;
+ for (i = rcu_num_lvls - 1; i >= 0; i--) {
+ ccur = levelcnt[i];
+ levelspread[i] = (cprv + ccur - 1) / ccur;
+ cprv = ccur;
+ }
+ }
+}
+
+/*
+ * Do a full breadth-first scan of the rcu_node structures for the
+ * specified rcu_state structure.
+ */
+#define rcu_for_each_node_breadth_first(rsp, rnp) \
+ for ((rnp) = &(rsp)->node[0]; \
+ (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+
+/*
+ * Do a breadth-first scan of the non-leaf rcu_node structures for the
+ * specified rcu_state structure. Note that if there is a singleton
+ * rcu_node tree with but one rcu_node structure, this loop is a no-op.
+ */
+#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ for ((rnp) = &(rsp)->node[0]; \
+ (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
+
+/*
+ * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
+ * structure. Note that if there is a singleton rcu_node tree with but
+ * one rcu_node structure, this loop -will- visit the rcu_node structure.
+ * It is still a leaf node, even if it is also the root node.
+ */
+#define rcu_for_each_leaf_node(rsp, rnp) \
+ for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
+ (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
+
+/*
+ * Iterate over all possible CPUs in a leaf RCU node.
+ */
+#define for_each_leaf_node_possible_cpu(rnp, cpu) \
+ for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
+ cpu <= rnp->grphi; \
+ cpu = cpumask_next((cpu), cpu_possible_mask))
+
+#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
+
#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
new file mode 100644
index 000000000000..2b62a38b080f
--- /dev/null
+++ b/kernel/rcu/rcu_segcblist.c
@@ -0,0 +1,505 @@
+/*
+ * RCU segmented callback lists, function definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+
+#include "rcu_segcblist.h"
+
+/* Initialize simple callback list. */
+void rcu_cblist_init(struct rcu_cblist *rclp)
+{
+ rclp->head = NULL;
+ rclp->tail = &rclp->head;
+ rclp->len = 0;
+ rclp->len_lazy = 0;
+}
+
+/*
+ * Debug function to actually count the number of callbacks.
+ * If the number exceeds the limit specified, return -1.
+ */
+long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim)
+{
+ int cnt = 0;
+ struct rcu_head **rhpp = &rclp->head;
+
+ for (;;) {
+ if (!*rhpp)
+ return cnt;
+ if (++cnt > lim)
+ return -1;
+ rhpp = &(*rhpp)->next;
+ }
+}
+
+/*
+ * Dequeue the oldest rcu_head structure from the specified callback
+ * list. This function assumes that the callback is non-lazy, but
+ * the caller can later invoke rcu_cblist_dequeued_lazy() if it
+ * finds otherwise (and if it cares about laziness). This allows
+ * different users to have different ways of determining laziness.
+ */
+struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
+{
+ struct rcu_head *rhp;
+
+ rhp = rclp->head;
+ if (!rhp)
+ return NULL;
+ rclp->len--;
+ rclp->head = rhp->next;
+ if (!rclp->head)
+ rclp->tail = &rclp->head;
+ return rhp;
+}
+
+/*
+ * Initialize an rcu_segcblist structure.
+ */
+void rcu_segcblist_init(struct rcu_segcblist *rsclp)
+{
+ int i;
+
+ BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
+ BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
+ rsclp->head = NULL;
+ for (i = 0; i < RCU_CBLIST_NSEGS; i++)
+ rsclp->tails[i] = &rsclp->head;
+ rsclp->len = 0;
+ rsclp->len_lazy = 0;
+}
+
+/*
+ * Disable the specified rcu_segcblist structure, so that callbacks can
+ * no longer be posted to it. This structure must be empty.
+ */
+void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
+{
+ WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
+ WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
+ WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
+ rsclp->tails[RCU_NEXT_TAIL] = NULL;
+}
+
+/*
+ * Is the specified segment of the specified rcu_segcblist structure
+ * empty of callbacks?
+ */
+bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg)
+{
+ if (seg == RCU_DONE_TAIL)
+ return &rsclp->head == rsclp->tails[RCU_DONE_TAIL];
+ return rsclp->tails[seg - 1] == rsclp->tails[seg];
+}
+
+/*
+ * Does the specified rcu_segcblist structure contain callbacks that
+ * are ready to be invoked?
+ */
+bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
+{
+ return rcu_segcblist_is_enabled(rsclp) &&
+ &rsclp->head != rsclp->tails[RCU_DONE_TAIL];
+}
+
+/*
+ * Does the specified rcu_segcblist structure contain callbacks that
+ * are still pending, that is, not yet ready to be invoked?
+ */
+bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
+{
+ return rcu_segcblist_is_enabled(rsclp) &&
+ !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
+}
+
+/*
+ * Dequeue and return the first ready-to-invoke callback. If there
+ * are no ready-to-invoke callbacks, return NULL. Disables interrupts
+ * to avoid interference. Does not protect from interference from other
+ * CPUs or tasks.
+ */
+struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp)
+{
+ unsigned long flags;
+ int i;
+ struct rcu_head *rhp;
+
+ local_irq_save(flags);
+ if (!rcu_segcblist_ready_cbs(rsclp)) {
+ local_irq_restore(flags);
+ return NULL;
+ }
+ rhp = rsclp->head;
+ BUG_ON(!rhp);
+ rsclp->head = rhp->next;
+ for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) {
+ if (rsclp->tails[i] != &rhp->next)
+ break;
+ rsclp->tails[i] = &rsclp->head;
+ }
+ smp_mb(); /* Dequeue before decrement for rcu_barrier(). */
+ WRITE_ONCE(rsclp->len, rsclp->len - 1);
+ local_irq_restore(flags);
+ return rhp;
+}
+
+/*
+ * Account for the fact that a previously dequeued callback turned out
+ * to be marked as lazy.
+ */
+void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ rsclp->len_lazy--;
+ local_irq_restore(flags);
+}
+
+/*
+ * Return a pointer to the first callback in the specified rcu_segcblist
+ * structure. This is useful for diagnostics.
+ */
+struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
+{
+ if (rcu_segcblist_is_enabled(rsclp))
+ return rsclp->head;
+ return NULL;
+}
+
+/*
+ * Return a pointer to the first pending callback in the specified
+ * rcu_segcblist structure. This is useful just after posting a given
+ * callback -- if that callback is the first pending callback, then
+ * you cannot rely on someone else having already started up the required
+ * grace period.
+ */
+struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
+{
+ if (rcu_segcblist_is_enabled(rsclp))
+ return *rsclp->tails[RCU_DONE_TAIL];
+ return NULL;
+}
+
+/*
+ * Does the specified rcu_segcblist structure contain callbacks that
+ * have not yet been processed beyond having been posted, that is,
+ * does it contain callbacks in its last segment?
+ */
+bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp)
+{
+ return rcu_segcblist_is_enabled(rsclp) &&
+ !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL);
+}
+
+/*
+ * Enqueue the specified callback onto the specified rcu_segcblist
+ * structure, updating accounting as needed. Note that the ->len
+ * field may be accessed locklessly, hence the WRITE_ONCE().
+ * The ->len field is used by rcu_barrier() and friends to determine
+ * if it must post a callback on this structure, and it is OK
+ * for rcu_barrier() to sometimes post callbacks needlessly, but
+ * absolutely not OK for it to ever miss posting a callback.
+ */
+void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
+ struct rcu_head *rhp, bool lazy)
+{
+ WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */
+ if (lazy)
+ rsclp->len_lazy++;
+ smp_mb(); /* Ensure counts are updated before callback is enqueued. */
+ rhp->next = NULL;
+ *rsclp->tails[RCU_NEXT_TAIL] = rhp;
+ rsclp->tails[RCU_NEXT_TAIL] = &rhp->next;
+}
+
+/*
+ * Entrain the specified callback onto the specified rcu_segcblist at
+ * the end of the last non-empty segment. If the entire rcu_segcblist
+ * is empty, make no change, but return false.
+ *
+ * This is intended for use by rcu_barrier()-like primitives, -not-
+ * for normal grace-period use. IMPORTANT: The callback you enqueue
+ * will wait for all prior callbacks, NOT necessarily for a grace
+ * period. You have been warned.
+ */
+bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
+ struct rcu_head *rhp, bool lazy)
+{
+ int i;
+
+ if (rcu_segcblist_n_cbs(rsclp) == 0)
+ return false;
+ WRITE_ONCE(rsclp->len, rsclp->len + 1);
+ if (lazy)
+ rsclp->len_lazy++;
+ smp_mb(); /* Ensure counts are updated before callback is entrained. */
+ rhp->next = NULL;
+ for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
+ if (rsclp->tails[i] != rsclp->tails[i - 1])
+ break;
+ *rsclp->tails[i] = rhp;
+ for (; i <= RCU_NEXT_TAIL; i++)
+ rsclp->tails[i] = &rhp->next;
+ return true;
+}
+
+/*
+ * Extract only the counts from the specified rcu_segcblist structure,
+ * and place them in the specified rcu_cblist structure. This function
+ * supports both callback orphaning and invocation, hence the separation
+ * of counts and callbacks. (Callbacks ready for invocation must be
+ * orphaned and adopted separately from pending callbacks, but counts
+ * apply to all callbacks. Locking must be used to make sure that
+ * both orphaned-callbacks lists are consistent.)
+ */
+void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ rclp->len_lazy += rsclp->len_lazy;
+ rclp->len += rsclp->len;
+ rsclp->len_lazy = 0;
+ WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */
+}
+
+/*
+ * Extract only those callbacks ready to be invoked from the specified
+ * rcu_segcblist structure and place them in the specified rcu_cblist
+ * structure.
+ */
+void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ int i;
+
+ if (!rcu_segcblist_ready_cbs(rsclp))
+ return; /* Nothing to do. */
+ *rclp->tail = rsclp->head;
+ rsclp->head = *rsclp->tails[RCU_DONE_TAIL];
+ *rsclp->tails[RCU_DONE_TAIL] = NULL;
+ rclp->tail = rsclp->tails[RCU_DONE_TAIL];
+ for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
+ if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
+ rsclp->tails[i] = &rsclp->head;
+}
+
+/*
+ * Extract only those callbacks still pending (not yet ready to be
+ * invoked) from the specified rcu_segcblist structure and place them in
+ * the specified rcu_cblist structure. Note that this loses information
+ * about any callbacks that might have been partway done waiting for
+ * their grace period. Too bad! They will have to start over.
+ */
+void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ int i;
+
+ if (!rcu_segcblist_pend_cbs(rsclp))
+ return; /* Nothing to do. */
+ *rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
+ rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
+ *rsclp->tails[RCU_DONE_TAIL] = NULL;
+ for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++)
+ rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL];
+}
+
+/*
+ * Insert counts from the specified rcu_cblist structure in the
+ * specified rcu_segcblist structure.
+ */
+void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ rsclp->len_lazy += rclp->len_lazy;
+ /* ->len sampled locklessly. */
+ WRITE_ONCE(rsclp->len, rsclp->len + rclp->len);
+ rclp->len_lazy = 0;
+ rclp->len = 0;
+}
+
+/*
+ * Move callbacks from the specified rcu_cblist to the beginning of the
+ * done-callbacks segment of the specified rcu_segcblist.
+ */
+void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ int i;
+
+ if (!rclp->head)
+ return; /* No callbacks to move. */
+ *rclp->tail = rsclp->head;
+ rsclp->head = rclp->head;
+ for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
+ if (&rsclp->head == rsclp->tails[i])
+ rsclp->tails[i] = rclp->tail;
+ else
+ break;
+ rclp->head = NULL;
+ rclp->tail = &rclp->head;
+}
+
+/*
+ * Move callbacks from the specified rcu_cblist to the end of the
+ * new-callbacks segment of the specified rcu_segcblist.
+ */
+void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp)
+{
+ if (!rclp->head)
+ return; /* Nothing to do. */
+ *rsclp->tails[RCU_NEXT_TAIL] = rclp->head;
+ rsclp->tails[RCU_NEXT_TAIL] = rclp->tail;
+ rclp->head = NULL;
+ rclp->tail = &rclp->head;
+}
+
+/*
+ * Advance the callbacks in the specified rcu_segcblist structure based
+ * on the current value passed in for the grace-period counter.
+ */
+void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq)
+{
+ int i, j;
+
+ WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
+ if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
+ return;
+
+ /*
+ * Find all callbacks whose ->gp_seq numbers indicate that they
+ * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
+ */
+ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
+ if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
+ break;
+ rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i];
+ }
+
+ /* If no callbacks moved, nothing more need be done. */
+ if (i == RCU_WAIT_TAIL)
+ return;
+
+ /* Clean up tail pointers that might have been misordered above. */
+ for (j = RCU_WAIT_TAIL; j < i; j++)
+ rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL];
+
+ /*
+ * Callbacks moved, so clean up the misordered ->tails[] pointers
+ * that now point into the middle of the list of ready-to-invoke
+ * callbacks. The overall effect is to copy down the later pointers
+ * into the gap that was created by the now-ready segments.
+ */
+ for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
+ if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
+ break; /* No more callbacks. */
+ rsclp->tails[j] = rsclp->tails[i];
+ rsclp->gp_seq[j] = rsclp->gp_seq[i];
+ }
+}
+
+/*
+ * "Accelerate" callbacks based on more-accurate grace-period information.
+ * The reason for this is that RCU does not synchronize the beginnings and
+ * ends of grace periods, and that callbacks are posted locally. This in
+ * turn means that the callbacks must be labelled conservatively early
+ * on, as getting exact information would degrade both performance and
+ * scalability. When more accurate grace-period information becomes
+ * available, previously posted callbacks can be "accelerated", marking
+ * them to complete at the end of the earlier grace period.
+ *
+ * This function operates on an rcu_segcblist structure, and also the
+ * grace-period sequence number seq at which new callbacks would become
+ * ready to invoke. Returns true if there are callbacks that won't be
+ * ready to invoke until seq, false otherwise.
+ */
+bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
+{
+ int i;
+
+ WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
+ if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
+ return false;
+
+ /*
+ * Find the segment preceding the oldest segment of callbacks
+ * whose ->gp_seq[] completion is at or after that passed in via
+ * "seq", skipping any empty segments. This oldest segment, along
+ * with any later segments, can be merged in with any newly arrived
+ * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
+ * as their ->gp_seq[] grace-period completion sequence number.
+ */
+ for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
+ if (rsclp->tails[i] != rsclp->tails[i - 1] &&
+ ULONG_CMP_LT(rsclp->gp_seq[i], seq))
+ break;
+
+ /*
+ * If all the segments contain callbacks that correspond to
+ * earlier grace-period sequence numbers than "seq", leave.
+ * Assuming that the rcu_segcblist structure has enough
+ * segments in its arrays, this can only happen if some of
+ * the non-done segments contain callbacks that really are
+ * ready to invoke. This situation will get straightened
+ * out by the next call to rcu_segcblist_advance().
+ *
+ * Also advance to the oldest segment of callbacks whose
+ * ->gp_seq[] completion is at or after that passed in via "seq",
+ * skipping any empty segments.
+ */
+ if (++i >= RCU_NEXT_TAIL)
+ return false;
+
+ /*
+ * Merge all later callbacks, including newly arrived callbacks,
+ * into the segment located by the for-loop above. Assign "seq"
+ * as the ->gp_seq[] value in order to correctly handle the case
+ * where there were no pending callbacks in the rcu_segcblist
+ * structure other than in the RCU_NEXT_TAIL segment.
+ */
+ for (; i < RCU_NEXT_TAIL; i++) {
+ rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL];
+ rsclp->gp_seq[i] = seq;
+ }
+ return true;
+}
+
+/*
+ * Scan the specified rcu_segcblist structure for callbacks that need
+ * a grace period later than the one specified by "seq". We don't look
+ * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't
+ * have a grace-period sequence number.
+ */
+bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp,
+ unsigned long seq)
+{
+ int i;
+
+ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
+ if (rsclp->tails[i - 1] != rsclp->tails[i] &&
+ ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
+ return true;
+ return false;
+}
diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
new file mode 100644
index 000000000000..6e36e36478cd
--- /dev/null
+++ b/kernel/rcu/rcu_segcblist.h
@@ -0,0 +1,164 @@
+/*
+ * RCU segmented callback lists, internal-to-rcu header file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#include <linux/rcu_segcblist.h>
+
+/*
+ * Account for the fact that a previously dequeued callback turned out
+ * to be marked as lazy.
+ */
+static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
+{
+ rclp->len_lazy--;
+}
+
+/*
+ * Interim function to return rcu_cblist head pointer. Longer term, the
+ * rcu_cblist will be used more pervasively, removing the need for this
+ * function.
+ */
+static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp)
+{
+ return rclp->head;
+}
+
+/*
+ * Interim function to return rcu_cblist head pointer. Longer term, the
+ * rcu_cblist will be used more pervasively, removing the need for this
+ * function.
+ */
+static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp)
+{
+ WARN_ON_ONCE(!rclp->head);
+ return rclp->tail;
+}
+
+void rcu_cblist_init(struct rcu_cblist *rclp);
+long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim);
+struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
+
+/*
+ * Is the specified rcu_segcblist structure empty?
+ *
+ * But careful! The fact that the ->head field is NULL does not
+ * necessarily imply that there are no callbacks associated with
+ * this structure. When callbacks are being invoked, they are
+ * removed as a group. If callback invocation must be preempted,
+ * the remaining callbacks will be added back to the list. Either
+ * way, the counts are updated later.
+ *
+ * So it is often the case that rcu_segcblist_n_cbs() should be used
+ * instead.
+ */
+static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
+{
+ return !rsclp->head;
+}
+
+/* Return number of callbacks in segmented callback list. */
+static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
+{
+ return READ_ONCE(rsclp->len);
+}
+
+/* Return number of lazy callbacks in segmented callback list. */
+static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
+{
+ return rsclp->len_lazy;
+}
+
+/* Return number of lazy callbacks in segmented callback list. */
+static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
+{
+ return rsclp->len - rsclp->len_lazy;
+}
+
+/*
+ * Is the specified rcu_segcblist enabled, for example, not corresponding
+ * to an offline or callback-offloaded CPU?
+ */
+static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
+{
+ return !!rsclp->tails[RCU_NEXT_TAIL];
+}
+
+/*
+ * Are all segments following the specified segment of the specified
+ * rcu_segcblist structure empty of callbacks? (The specified
+ * segment might well contain callbacks.)
+ */
+static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
+{
+ return !*rsclp->tails[seg];
+}
+
+/*
+ * Interim function to return rcu_segcblist head pointer. Longer term, the
+ * rcu_segcblist will be used more pervasively, removing the need for this
+ * function.
+ */
+static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
+{
+ return rsclp->head;
+}
+
+/*
+ * Interim function to return rcu_segcblist head pointer. Longer term, the
+ * rcu_segcblist will be used more pervasively, removing the need for this
+ * function.
+ */
+static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
+{
+ WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
+ return rsclp->tails[RCU_NEXT_TAIL];
+}
+
+void rcu_segcblist_init(struct rcu_segcblist *rsclp);
+void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
+bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg);
+bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
+bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
+struct rcu_head *rcu_segcblist_dequeue(struct rcu_segcblist *rsclp);
+void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp);
+struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
+struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
+bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp);
+void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
+ struct rcu_head *rhp, bool lazy);
+bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
+ struct rcu_head *rhp, bool lazy);
+void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
+ struct rcu_cblist *rclp);
+void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
+bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
+bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp,
+ unsigned long seq);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index cccc417a8135..ae6e574d4cf5 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -559,19 +559,34 @@ static void srcu_torture_barrier(void)
static void srcu_torture_stats(void)
{
- int cpu;
- int idx = srcu_ctlp->completed & 0x1;
+ int __maybe_unused cpu;
+ int idx;
- pr_alert("%s%s per-CPU(idx=%d):",
+#if defined(CONFIG_TREE_SRCU) || defined(CONFIG_CLASSIC_SRCU)
+#ifdef CONFIG_TREE_SRCU
+ idx = srcu_ctlp->srcu_idx & 0x1;
+#else /* #ifdef CONFIG_TREE_SRCU */
+ idx = srcu_ctlp->completed & 0x1;
+#endif /* #else #ifdef CONFIG_TREE_SRCU */
+ pr_alert("%s%s Tree SRCU per-CPU(idx=%d):",
torture_type, TORTURE_FLAG, idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
- struct srcu_array *counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu);
+#ifdef CONFIG_TREE_SRCU
+ struct srcu_data *counts;
+ counts = per_cpu_ptr(srcu_ctlp->sda, cpu);
+ u0 = counts->srcu_unlock_count[!idx];
+ u1 = counts->srcu_unlock_count[idx];
+#else /* #ifdef CONFIG_TREE_SRCU */
+ struct srcu_array *counts;
+
+ counts = per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu);
u0 = counts->unlock_count[!idx];
u1 = counts->unlock_count[idx];
+#endif /* #else #ifdef CONFIG_TREE_SRCU */
/*
* Make sure that a lock is always counted if the corresponding
@@ -579,14 +594,26 @@ static void srcu_torture_stats(void)
*/
smp_rmb();
+#ifdef CONFIG_TREE_SRCU
+ l0 = counts->srcu_lock_count[!idx];
+ l1 = counts->srcu_lock_count[idx];
+#else /* #ifdef CONFIG_TREE_SRCU */
l0 = counts->lock_count[!idx];
l1 = counts->lock_count[idx];
+#endif /* #else #ifdef CONFIG_TREE_SRCU */
c0 = l0 - u0;
c1 = l1 - u1;
pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
}
pr_cont("\n");
+#elif defined(CONFIG_TINY_SRCU)
+ idx = READ_ONCE(srcu_ctlp->srcu_idx) & 0x1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%d,%d)\n",
+ torture_type, TORTURE_FLAG, idx,
+ READ_ONCE(srcu_ctlp->srcu_lock_nesting[!idx]),
+ READ_ONCE(srcu_ctlp->srcu_lock_nesting[idx]));
+#endif
}
static void srcu_torture_synchronize_expedited(void)
@@ -1333,12 +1360,14 @@ rcu_torture_stats_print(void)
cur_ops->stats();
if (rtcv_snap == rcu_torture_current_version &&
rcu_torture_current != NULL) {
- int __maybe_unused flags;
- unsigned long __maybe_unused gpnum;
- unsigned long __maybe_unused completed;
+ int __maybe_unused flags = 0;
+ unsigned long __maybe_unused gpnum = 0;
+ unsigned long __maybe_unused completed = 0;
rcutorture_get_gp_data(cur_ops->ttype,
&flags, &gpnum, &completed);
+ srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
+ &flags, &gpnum, &completed);
wtp = READ_ONCE(writer_task);
pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx\n",
rcu_torture_writer_state_getname(),
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index ef3bcfb15b39..584d8a983883 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -22,7 +22,7 @@
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
+ * Documentation/RCU/ *.txt
*
*/
@@ -243,8 +243,14 @@ static bool srcu_readers_active(struct srcu_struct *sp)
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up.
*
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.
+ * Must invoke this only after you are finished using a given srcu_struct
+ * that was initialized via init_srcu_struct(). This code does some
+ * probabalistic checking, spotting late uses of srcu_read_lock(),
+ * synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
+ * If any such late uses are detected, the per-CPU memory associated with
+ * the srcu_struct is simply leaked and WARN_ON() is invoked. If the
+ * caller frees the srcu_struct itself, a use-after-free crash will likely
+ * ensue, but at least there will be a warning printed.
*/
void cleanup_srcu_struct(struct srcu_struct *sp)
{
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
new file mode 100644
index 000000000000..36e1f82faed1
--- /dev/null
+++ b/kernel/rcu/srcutiny.c
@@ -0,0 +1,216 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tiny version for non-preemptible single-CPU use.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate_wait.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/srcu.h>
+
+#include <linux/rcu_node_tree.h>
+#include "rcu_segcblist.h"
+#include "rcu.h"
+
+static int init_srcu_struct_fields(struct srcu_struct *sp)
+{
+ sp->srcu_lock_nesting[0] = 0;
+ sp->srcu_lock_nesting[1] = 0;
+ init_swait_queue_head(&sp->srcu_wq);
+ sp->srcu_gp_seq = 0;
+ rcu_segcblist_init(&sp->srcu_cblist);
+ sp->srcu_gp_running = false;
+ sp->srcu_gp_waiting = false;
+ sp->srcu_idx = 0;
+ INIT_WORK(&sp->srcu_work, srcu_drive_gp);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+ struct lock_class_key *key)
+{
+ /* Don't re-initialize a lock while it is held. */
+ debug_check_no_locks_freed((void *)sp, sizeof(*sp));
+ lockdep_init_map(&sp->dep_map, name, key, 0);
+ return init_srcu_struct_fields(sp);
+}
+EXPORT_SYMBOL_GPL(__init_srcu_struct);
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+/*
+ * init_srcu_struct - initialize a sleep-RCU structure
+ * @sp: structure to initialize.
+ *
+ * Must invoke this on a given srcu_struct before passing that srcu_struct
+ * to any other function. Each srcu_struct represents a separate domain
+ * of SRCU protection.
+ */
+int init_srcu_struct(struct srcu_struct *sp)
+{
+ return init_srcu_struct_fields(sp);
+}
+EXPORT_SYMBOL_GPL(init_srcu_struct);
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+/*
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
+ * @sp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory.
+ */
+void cleanup_srcu_struct(struct srcu_struct *sp)
+{
+ WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
+ flush_work(&sp->srcu_work);
+ WARN_ON(rcu_seq_state(sp->srcu_gp_seq));
+ WARN_ON(sp->srcu_gp_running);
+ WARN_ON(sp->srcu_gp_waiting);
+ WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist));
+}
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Must be called from process context.
+ * Returns an index that must be passed to the matching srcu_read_unlock().
+ */
+int __srcu_read_lock(struct srcu_struct *sp)
+{
+ int idx;
+
+ idx = READ_ONCE(sp->srcu_idx);
+ WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
+ return idx;
+}
+EXPORT_SYMBOL_GPL(__srcu_read_lock);
+
+/*
+ * Removes the count for the old reader from the appropriate element of
+ * the srcu_struct. Must be called from process context.
+ */
+void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+{
+ int newval = sp->srcu_lock_nesting[idx] - 1;
+
+ WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
+ if (!newval && READ_ONCE(sp->srcu_gp_waiting))
+ swake_up(&sp->srcu_wq);
+}
+EXPORT_SYMBOL_GPL(__srcu_read_unlock);
+
+/*
+ * Workqueue handler to drive one grace period and invoke any callbacks
+ * that become ready as a result. Single-CPU and !PREEMPT operation
+ * means that we get away with murder on synchronization. ;-)
+ */
+void srcu_drive_gp(struct work_struct *wp)
+{
+ int idx;
+ struct rcu_cblist ready_cbs;
+ struct srcu_struct *sp;
+ struct rcu_head *rhp;
+
+ sp = container_of(wp, struct srcu_struct, srcu_work);
+ if (sp->srcu_gp_running || rcu_segcblist_empty(&sp->srcu_cblist))
+ return; /* Already running or nothing to do. */
+
+ /* Tag recently arrived callbacks and wait for readers. */
+ WRITE_ONCE(sp->srcu_gp_running, true);
+ rcu_segcblist_accelerate(&sp->srcu_cblist,
+ rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_start(&sp->srcu_gp_seq);
+ idx = sp->srcu_idx;
+ WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
+ WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
+ swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
+ WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
+ rcu_seq_end(&sp->srcu_gp_seq);
+
+ /* Update callback list based on GP, and invoke ready callbacks. */
+ rcu_segcblist_advance(&sp->srcu_cblist,
+ rcu_seq_current(&sp->srcu_gp_seq));
+ if (rcu_segcblist_ready_cbs(&sp->srcu_cblist)) {
+ rcu_cblist_init(&ready_cbs);
+ local_irq_disable();
+ rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs);
+ local_irq_enable();
+ rhp = rcu_cblist_dequeue(&ready_cbs);
+ for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
+ local_bh_disable();
+ rhp->func(rhp);
+ local_bh_enable();
+ }
+ local_irq_disable();
+ rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs);
+ local_irq_enable();
+ }
+ WRITE_ONCE(sp->srcu_gp_running, false);
+
+ /*
+ * If more callbacks, reschedule ourselves. This can race with
+ * a call_srcu() at interrupt level, but the ->srcu_gp_running
+ * checks will straighten that out.
+ */
+ if (!rcu_segcblist_empty(&sp->srcu_cblist))
+ schedule_work(&sp->srcu_work);
+}
+EXPORT_SYMBOL_GPL(srcu_drive_gp);
+
+/*
+ * Enqueue an SRCU callback on the specified srcu_struct structure,
+ * initiating grace-period processing if it is not already running.
+ */
+void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+ rcu_callback_t func)
+{
+ unsigned long flags;
+
+ head->func = func;
+ local_irq_save(flags);
+ rcu_segcblist_enqueue(&sp->srcu_cblist, head, false);
+ local_irq_restore(flags);
+ if (!READ_ONCE(sp->srcu_gp_running))
+ schedule_work(&sp->srcu_work);
+}
+EXPORT_SYMBOL_GPL(call_srcu);
+
+/*
+ * synchronize_srcu - wait for prior SRCU read-side critical-section completion
+ */
+void synchronize_srcu(struct srcu_struct *sp)
+{
+ struct rcu_synchronize rs;
+
+ init_rcu_head_on_stack(&rs.head);
+ init_completion(&rs.completion);
+ call_srcu(sp, &rs.head, wakeme_after_rcu);
+ wait_for_completion(&rs.completion);
+ destroy_rcu_head_on_stack(&rs.head);
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu);
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
new file mode 100644
index 000000000000..3ae8474557df
--- /dev/null
+++ b/kernel/rcu/srcutree.c
@@ -0,0 +1,1155 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ * Copyright (C) Fujitsu, 2012
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU/ *.txt
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate_wait.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/srcu.h>
+
+#include "rcu.h"
+#include "rcu_segcblist.h"
+
+ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */
+module_param(exp_holdoff, ulong, 0444);
+
+static void srcu_invoke_callbacks(struct work_struct *work);
+static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
+
+/*
+ * Initialize SRCU combining tree. Note that statically allocated
+ * srcu_struct structures might already have srcu_read_lock() and
+ * srcu_read_unlock() running against them. So if the is_static parameter
+ * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
+ */
+static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
+{
+ int cpu;
+ int i;
+ int level = 0;
+ int levelspread[RCU_NUM_LVLS];
+ struct srcu_data *sdp;
+ struct srcu_node *snp;
+ struct srcu_node *snp_first;
+
+ /* Work out the overall tree geometry. */
+ sp->level[0] = &sp->node[0];
+ for (i = 1; i < rcu_num_lvls; i++)
+ sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
+ rcu_init_levelspread(levelspread, num_rcu_lvl);
+
+ /* Each pass through this loop initializes one srcu_node structure. */
+ rcu_for_each_node_breadth_first(sp, snp) {
+ spin_lock_init(&snp->lock);
+ WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
+ ARRAY_SIZE(snp->srcu_data_have_cbs));
+ for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
+ snp->srcu_have_cbs[i] = 0;
+ snp->srcu_data_have_cbs[i] = 0;
+ }
+ snp->srcu_gp_seq_needed_exp = 0;
+ snp->grplo = -1;
+ snp->grphi = -1;
+ if (snp == &sp->node[0]) {
+ /* Root node, special case. */
+ snp->srcu_parent = NULL;
+ continue;
+ }
+
+ /* Non-root node. */
+ if (snp == sp->level[level + 1])
+ level++;
+ snp->srcu_parent = sp->level[level - 1] +
+ (snp - sp->level[level]) /
+ levelspread[level - 1];
+ }
+
+ /*
+ * Initialize the per-CPU srcu_data array, which feeds into the
+ * leaves of the srcu_node tree.
+ */
+ WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
+ ARRAY_SIZE(sdp->srcu_unlock_count));
+ level = rcu_num_lvls - 1;
+ snp_first = sp->level[level];
+ for_each_possible_cpu(cpu) {
+ sdp = per_cpu_ptr(sp->sda, cpu);
+ spin_lock_init(&sdp->lock);
+ rcu_segcblist_init(&sdp->srcu_cblist);
+ sdp->srcu_cblist_invoking = false;
+ sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
+ sdp->mynode = &snp_first[cpu / levelspread[level]];
+ for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
+ if (snp->grplo < 0)
+ snp->grplo = cpu;
+ snp->grphi = cpu;
+ }
+ sdp->cpu = cpu;
+ INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
+ sdp->sp = sp;
+ sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
+ if (is_static)
+ continue;
+
+ /* Dynamically allocated, better be no srcu_read_locks()! */
+ for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
+ sdp->srcu_lock_count[i] = 0;
+ sdp->srcu_unlock_count[i] = 0;
+ }
+ }
+}
+
+/*
+ * Initialize non-compile-time initialized fields, including the
+ * associated srcu_node and srcu_data structures. The is_static
+ * parameter is passed through to init_srcu_struct_nodes(), and
+ * also tells us that ->sda has already been wired up to srcu_data.
+ */
+static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
+{
+ mutex_init(&sp->srcu_cb_mutex);
+ mutex_init(&sp->srcu_gp_mutex);
+ sp->srcu_idx = 0;
+ sp->srcu_gp_seq = 0;
+ sp->srcu_barrier_seq = 0;
+ mutex_init(&sp->srcu_barrier_mutex);
+ atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
+ INIT_DELAYED_WORK(&sp->work, process_srcu);
+ if (!is_static)
+ sp->sda = alloc_percpu(struct srcu_data);
+ init_srcu_struct_nodes(sp, is_static);
+ sp->srcu_gp_seq_needed_exp = 0;
+ sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
+ return sp->sda ? 0 : -ENOMEM;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+ struct lock_class_key *key)
+{
+ /* Don't re-initialize a lock while it is held. */
+ debug_check_no_locks_freed((void *)sp, sizeof(*sp));
+ lockdep_init_map(&sp->dep_map, name, key, 0);
+ spin_lock_init(&sp->gp_lock);
+ return init_srcu_struct_fields(sp, false);
+}
+EXPORT_SYMBOL_GPL(__init_srcu_struct);
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+/**
+ * init_srcu_struct - initialize a sleep-RCU structure
+ * @sp: structure to initialize.
+ *
+ * Must invoke this on a given srcu_struct before passing that srcu_struct
+ * to any other function. Each srcu_struct represents a separate domain
+ * of SRCU protection.
+ */
+int init_srcu_struct(struct srcu_struct *sp)
+{
+ spin_lock_init(&sp->gp_lock);
+ return init_srcu_struct_fields(sp, false);
+}
+EXPORT_SYMBOL_GPL(init_srcu_struct);
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+/*
+ * First-use initialization of statically allocated srcu_struct
+ * structure. Wiring up the combining tree is more than can be
+ * done with compile-time initialization, so this check is added
+ * to each update-side SRCU primitive. Use ->gp_lock, which -is-
+ * compile-time initialized, to resolve races involving multiple
+ * CPUs trying to garner first-use privileges.
+ */
+static void check_init_srcu_struct(struct srcu_struct *sp)
+{
+ unsigned long flags;
+
+ WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
+ /* The smp_load_acquire() pairs with the smp_store_release(). */
+ if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
+ return; /* Already initialized. */
+ spin_lock_irqsave(&sp->gp_lock, flags);
+ if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
+ spin_unlock_irqrestore(&sp->gp_lock, flags);
+ return;
+ }
+ init_srcu_struct_fields(sp, true);
+ spin_unlock_irqrestore(&sp->gp_lock, flags);
+}
+
+/*
+ * Returns approximate total of the readers' ->srcu_lock_count[] values
+ * for the rank of per-CPU counters specified by idx.
+ */
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
+{
+ int cpu;
+ unsigned long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+
+ sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
+ }
+ return sum;
+}
+
+/*
+ * Returns approximate total of the readers' ->srcu_unlock_count[] values
+ * for the rank of per-CPU counters specified by idx.
+ */
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
+{
+ int cpu;
+ unsigned long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+
+ sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
+ }
+ return sum;
+}
+
+/*
+ * Return true if the number of pre-existing readers is determined to
+ * be zero.
+ */
+static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
+{
+ unsigned long unlocks;
+
+ unlocks = srcu_readers_unlock_idx(sp, idx);
+
+ /*
+ * Make sure that a lock is always counted if the corresponding
+ * unlock is counted. Needs to be a smp_mb() as the read side may
+ * contain a read from a variable that is written to before the
+ * synchronize_srcu() in the write side. In this case smp_mb()s
+ * A and B act like the store buffering pattern.
+ *
+ * This smp_mb() also pairs with smp_mb() C to prevent accesses
+ * after the synchronize_srcu() from being executed before the
+ * grace period ends.
+ */
+ smp_mb(); /* A */
+
+ /*
+ * If the locks are the same as the unlocks, then there must have
+ * been no readers on this index at some time in between. This does
+ * not mean that there are no more readers, as one could have read
+ * the current index but not have incremented the lock counter yet.
+ *
+ * Possible bug: There is no guarantee that there haven't been
+ * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were
+ * counted, meaning that this could return true even if there are
+ * still active readers. Since there are no memory barriers around
+ * srcu_flip(), the CPU is not required to increment ->srcu_idx
+ * before running srcu_readers_unlock_idx(), which means that there
+ * could be an arbitrarily large number of critical sections that
+ * execute after srcu_readers_unlock_idx() but use the old value
+ * of ->srcu_idx.
+ */
+ return srcu_readers_lock_idx(sp, idx) == unlocks;
+}
+
+/**
+ * srcu_readers_active - returns true if there are readers. and false
+ * otherwise
+ * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
+ *
+ * Note that this is not an atomic primitive, and can therefore suffer
+ * severe errors when invoked on an active srcu_struct. That said, it
+ * can be useful as an error check at cleanup time.
+ */
+static bool srcu_readers_active(struct srcu_struct *sp)
+{
+ int cpu;
+ unsigned long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+
+ sum += READ_ONCE(cpuc->srcu_lock_count[0]);
+ sum += READ_ONCE(cpuc->srcu_lock_count[1]);
+ sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
+ sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
+ }
+ return sum;
+}
+
+#define SRCU_INTERVAL 1
+
+/*
+ * Return grace-period delay, zero if there are expedited grace
+ * periods pending, SRCU_INTERVAL otherwise.
+ */
+static unsigned long srcu_get_delay(struct srcu_struct *sp)
+{
+ if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
+ READ_ONCE(sp->srcu_gp_seq_needed_exp)))
+ return 0;
+ return SRCU_INTERVAL;
+}
+
+/**
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
+ * @sp: structure to clean up.
+ *
+ * Must invoke this after you are finished using a given srcu_struct that
+ * was initialized via init_srcu_struct(), else you leak memory.
+ */
+void cleanup_srcu_struct(struct srcu_struct *sp)
+{
+ int cpu;
+
+ if (WARN_ON(!srcu_get_delay(sp)))
+ return; /* Leakage unless caller handles error. */
+ if (WARN_ON(srcu_readers_active(sp)))
+ return; /* Leakage unless caller handles error. */
+ flush_delayed_work(&sp->work);
+ for_each_possible_cpu(cpu)
+ flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
+ if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
+ WARN_ON(srcu_readers_active(sp))) {
+ pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ return; /* Caller forgot to stop doing call_srcu()? */
+ }
+ free_percpu(sp->sda);
+ sp->sda = NULL;
+}
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Must be called from process context.
+ * Returns an index that must be passed to the matching srcu_read_unlock().
+ */
+int __srcu_read_lock(struct srcu_struct *sp)
+{
+ int idx;
+
+ idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ __this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+ smp_mb(); /* B */ /* Avoid leaking the critical section. */
+ return idx;
+}
+EXPORT_SYMBOL_GPL(__srcu_read_lock);
+
+/*
+ * Removes the count for the old reader from the appropriate per-CPU
+ * element of the srcu_struct. Note that this may well be a different
+ * CPU than that which was incremented by the corresponding srcu_read_lock().
+ * Must be called from process context.
+ */
+void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+{
+ smp_mb(); /* C */ /* Avoid leaking the critical section. */
+ this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
+}
+EXPORT_SYMBOL_GPL(__srcu_read_unlock);
+
+/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited(). We spin for a fixed time period
+ * (defined below) to allow SRCU readers to exit their read-side critical
+ * sections. If there are still some readers after a few microseconds,
+ * we repeatedly block for 1-millisecond time periods.
+ */
+#define SRCU_RETRY_CHECK_DELAY 5
+
+/*
+ * Start an SRCU grace period.
+ */
+static void srcu_gp_start(struct srcu_struct *sp)
+{
+ struct srcu_data *sdp = this_cpu_ptr(sp->sda);
+ int state;
+
+ RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock),
+ "Invoked srcu_gp_start() without ->gp_lock!");
+ WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&sp->srcu_gp_seq));
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+ rcu_seq_snap(&sp->srcu_gp_seq));
+ smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
+ rcu_seq_start(&sp->srcu_gp_seq);
+ state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
+}
+
+/*
+ * Track online CPUs to guide callback workqueue placement.
+ */
+DEFINE_PER_CPU(bool, srcu_online);
+
+void srcu_online_cpu(unsigned int cpu)
+{
+ WRITE_ONCE(per_cpu(srcu_online, cpu), true);
+}
+
+void srcu_offline_cpu(unsigned int cpu)
+{
+ WRITE_ONCE(per_cpu(srcu_online, cpu), false);
+}
+
+/*
+ * Place the workqueue handler on the specified CPU if online, otherwise
+ * just run it whereever. This is useful for placing workqueue handlers
+ * that are to invoke the specified CPU's callbacks.
+ */
+static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ bool ret;
+
+ preempt_disable();
+ if (READ_ONCE(per_cpu(srcu_online, cpu)))
+ ret = queue_delayed_work_on(cpu, wq, dwork, delay);
+ else
+ ret = queue_delayed_work(wq, dwork, delay);
+ preempt_enable();
+ return ret;
+}
+
+/*
+ * Schedule callback invocation for the specified srcu_data structure,
+ * if possible, on the corresponding CPU.
+ */
+static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
+{
+ srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
+ &sdp->work, delay);
+}
+
+/*
+ * Schedule callback invocation for all srcu_data structures associated
+ * with the specified srcu_node structure that have callbacks for the
+ * just-completed grace period, the one corresponding to idx. If possible,
+ * schedule this invocation on the corresponding CPUs.
+ */
+static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
+ unsigned long mask, unsigned long delay)
+{
+ int cpu;
+
+ for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
+ if (!(mask & (1 << (cpu - snp->grplo))))
+ continue;
+ srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
+ }
+}
+
+/*
+ * Note the end of an SRCU grace period. Initiates callback invocation
+ * and starts a new grace period if needed.
+ *
+ * The ->srcu_cb_mutex acquisition does not protect any data, but
+ * instead prevents more than one grace period from starting while we
+ * are initiating callback invocation. This allows the ->srcu_have_cbs[]
+ * array to have a finite number of elements.
+ */
+static void srcu_gp_end(struct srcu_struct *sp)
+{
+ unsigned long cbdelay;
+ bool cbs;
+ unsigned long gpseq;
+ int idx;
+ int idxnext;
+ unsigned long mask;
+ struct srcu_node *snp;
+
+ /* Prevent more than one additional grace period. */
+ mutex_lock(&sp->srcu_cb_mutex);
+
+ /* End the current grace period. */
+ spin_lock_irq(&sp->gp_lock);
+ idx = rcu_seq_state(sp->srcu_gp_seq);
+ WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
+ cbdelay = srcu_get_delay(sp);
+ sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ rcu_seq_end(&sp->srcu_gp_seq);
+ gpseq = rcu_seq_current(&sp->srcu_gp_seq);
+ if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
+ sp->srcu_gp_seq_needed_exp = gpseq;
+ spin_unlock_irq(&sp->gp_lock);
+ mutex_unlock(&sp->srcu_gp_mutex);
+ /* A new grace period can start at this point. But only one. */
+
+ /* Initiate callback invocation as needed. */
+ idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
+ idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
+ rcu_for_each_node_breadth_first(sp, snp) {
+ spin_lock_irq(&snp->lock);
+ cbs = false;
+ if (snp >= sp->level[rcu_num_lvls - 1])
+ cbs = snp->srcu_have_cbs[idx] == gpseq;
+ snp->srcu_have_cbs[idx] = gpseq;
+ rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
+ if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
+ snp->srcu_gp_seq_needed_exp = gpseq;
+ mask = snp->srcu_data_have_cbs[idx];
+ snp->srcu_data_have_cbs[idx] = 0;
+ spin_unlock_irq(&snp->lock);
+ if (cbs) {
+ smp_mb(); /* GP end before CB invocation. */
+ srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
+ }
+ }
+
+ /* Callback initiation done, allow grace periods after next. */
+ mutex_unlock(&sp->srcu_cb_mutex);
+
+ /* Start a new grace period if needed. */
+ spin_lock_irq(&sp->gp_lock);
+ gpseq = rcu_seq_current(&sp->srcu_gp_seq);
+ if (!rcu_seq_state(gpseq) &&
+ ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
+ srcu_gp_start(sp);
+ spin_unlock_irq(&sp->gp_lock);
+ /* Throttle expedited grace periods: Should be rare! */
+ srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
+ ? 0 : SRCU_INTERVAL);
+ } else {
+ spin_unlock_irq(&sp->gp_lock);
+ }
+}
+
+/*
+ * Funnel-locking scheme to scalably mediate many concurrent expedited
+ * grace-period requests. This function is invoked for the first known
+ * expedited request for a grace period that has already been requested,
+ * but without expediting. To start a completely new grace period,
+ * whether expedited or not, use srcu_funnel_gp_start() instead.
+ */
+static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
+ unsigned long s)
+{
+ unsigned long flags;
+
+ for (; snp != NULL; snp = snp->srcu_parent) {
+ if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
+ ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
+ return;
+ spin_lock_irqsave(&snp->lock, flags);
+ if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
+ spin_unlock_irqrestore(&snp->lock, flags);
+ return;
+ }
+ WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
+ spin_unlock_irqrestore(&snp->lock, flags);
+ }
+ spin_lock_irqsave(&sp->gp_lock, flags);
+ if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
+ sp->srcu_gp_seq_needed_exp = s;
+ spin_unlock_irqrestore(&sp->gp_lock, flags);
+}
+
+/*
+ * Funnel-locking scheme to scalably mediate many concurrent grace-period
+ * requests. The winner has to do the work of actually starting grace
+ * period s. Losers must either ensure that their desired grace-period
+ * number is recorded on at least their leaf srcu_node structure, or they
+ * must take steps to invoke their own callbacks.
+ */
+static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
+ unsigned long s, bool do_norm)
+{
+ unsigned long flags;
+ int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
+ struct srcu_node *snp = sdp->mynode;
+ unsigned long snp_seq;
+
+ /* Each pass through the loop does one level of the srcu_node tree. */
+ for (; snp != NULL; snp = snp->srcu_parent) {
+ if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
+ return; /* GP already done and CBs recorded. */
+ spin_lock_irqsave(&snp->lock, flags);
+ if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
+ snp_seq = snp->srcu_have_cbs[idx];
+ if (snp == sdp->mynode && snp_seq == s)
+ snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
+ spin_unlock_irqrestore(&snp->lock, flags);
+ if (snp == sdp->mynode && snp_seq != s) {
+ smp_mb(); /* CBs after GP! */
+ srcu_schedule_cbs_sdp(sdp, do_norm
+ ? SRCU_INTERVAL
+ : 0);
+ return;
+ }
+ if (!do_norm)
+ srcu_funnel_exp_start(sp, snp, s);
+ return;
+ }
+ snp->srcu_have_cbs[idx] = s;
+ if (snp == sdp->mynode)
+ snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
+ if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
+ snp->srcu_gp_seq_needed_exp = s;
+ spin_unlock_irqrestore(&snp->lock, flags);
+ }
+
+ /* Top of tree, must ensure the grace period will be started. */
+ spin_lock_irqsave(&sp->gp_lock, flags);
+ if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
+ /*
+ * Record need for grace period s. Pair with load
+ * acquire setting up for initialization.
+ */
+ smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
+ }
+ if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
+ sp->srcu_gp_seq_needed_exp = s;
+
+ /* If grace period not already done and none in progress, start it. */
+ if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
+ rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
+ WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+ srcu_gp_start(sp);
+ queue_delayed_work(system_power_efficient_wq, &sp->work,
+ srcu_get_delay(sp));
+ }
+ spin_unlock_irqrestore(&sp->gp_lock, flags);
+}
+
+/*
+ * Wait until all readers counted by array index idx complete, but
+ * loop an additional time if there is an expedited grace period pending.
+ * The caller must ensure that ->srcu_idx is not changed while checking.
+ */
+static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
+{
+ for (;;) {
+ if (srcu_readers_active_idx_check(sp, idx))
+ return true;
+ if (--trycount + !srcu_get_delay(sp) <= 0)
+ return false;
+ udelay(SRCU_RETRY_CHECK_DELAY);
+ }
+}
+
+/*
+ * Increment the ->srcu_idx counter so that future SRCU readers will
+ * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
+ * us to wait for pre-existing readers in a starvation-free manner.
+ */
+static void srcu_flip(struct srcu_struct *sp)
+{
+ WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
+
+ /*
+ * Ensure that if the updater misses an __srcu_read_unlock()
+ * increment, that task's next __srcu_read_lock() will see the
+ * above counter update. Note that both this memory barrier
+ * and the one in srcu_readers_active_idx_check() provide the
+ * guarantee for __srcu_read_lock().
+ */
+ smp_mb(); /* D */ /* Pairs with C. */
+}
+
+/*
+ * If SRCU is likely idle, return true, otherwise return false.
+ *
+ * Note that it is OK for several current from-idle requests for a new
+ * grace period from idle to specify expediting because they will all end
+ * up requesting the same grace period anyhow. So no loss.
+ *
+ * Note also that if any CPU (including the current one) is still invoking
+ * callbacks, this function will nevertheless say "idle". This is not
+ * ideal, but the overhead of checking all CPUs' callback lists is even
+ * less ideal, especially on large systems. Furthermore, the wakeup
+ * can happen before the callback is fully removed, so we have no choice
+ * but to accept this type of error.
+ *
+ * This function is also subject to counter-wrap errors, but let's face
+ * it, if this function was preempted for enough time for the counters
+ * to wrap, it really doesn't matter whether or not we expedite the grace
+ * period. The extra overhead of a needlessly expedited grace period is
+ * negligible when amoritized over that time period, and the extra latency
+ * of a needlessly non-expedited grace period is similarly negligible.
+ */
+static bool srcu_might_be_idle(struct srcu_struct *sp)
+{
+ unsigned long curseq;
+ unsigned long flags;
+ struct srcu_data *sdp;
+ unsigned long t;
+
+ /* If the local srcu_data structure has callbacks, not idle. */
+ local_irq_save(flags);
+ sdp = this_cpu_ptr(sp->sda);
+ if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
+ local_irq_restore(flags);
+ return false; /* Callbacks already present, so not idle. */
+ }
+ local_irq_restore(flags);
+
+ /*
+ * No local callbacks, so probabalistically probe global state.
+ * Exact information would require acquiring locks, which would
+ * kill scalability, hence the probabalistic nature of the probe.
+ */
+
+ /* First, see if enough time has passed since the last GP. */
+ t = ktime_get_mono_fast_ns();
+ if (exp_holdoff == 0 ||
+ time_in_range_open(t, sp->srcu_last_gp_end,
+ sp->srcu_last_gp_end + exp_holdoff))
+ return false; /* Too soon after last GP. */
+
+ /* Next, check for probable idleness. */
+ curseq = rcu_seq_current(&sp->srcu_gp_seq);
+ smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
+ if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
+ return false; /* Grace period in progress, so not idle. */
+ smp_mb(); /* Order ->srcu_gp_seq with prior access. */
+ if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
+ return false; /* GP # changed, so not idle. */
+ return true; /* With reasonable probability, idle! */
+}
+
+/*
+ * Enqueue an SRCU callback on the srcu_data structure associated with
+ * the current CPU and the specified srcu_struct structure, initiating
+ * grace-period processing if it is not already running.
+ *
+ * Note that all CPUs must agree that the grace period extended beyond
+ * all pre-existing SRCU read-side critical section. On systems with
+ * more than one CPU, this means that when "func()" is invoked, each CPU
+ * is guaranteed to have executed a full memory barrier since the end of
+ * its last corresponding SRCU read-side critical section whose beginning
+ * preceded the call to call_rcu(). It also means that each CPU executing
+ * an SRCU read-side critical section that continues beyond the start of
+ * "func()" must have executed a memory barrier after the call_rcu()
+ * but before the beginning of that SRCU read-side critical section.
+ * Note that these guarantees include CPUs that are offline, idle, or
+ * executing in user mode, as well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * resulting SRCU callback function "func()", then both CPU A and CPU
+ * B are guaranteed to execute a full memory barrier during the time
+ * interval between the call to call_rcu() and the invocation of "func()".
+ * This guarantee applies even if CPU A and CPU B are the same CPU (but
+ * again only if the system has more than one CPU).
+ *
+ * Of course, these guarantees apply only for invocations of call_srcu(),
+ * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
+ * srcu_struct structure.
+ */
+void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+ rcu_callback_t func, bool do_norm)
+{
+ unsigned long flags;
+ bool needexp = false;
+ bool needgp = false;
+ unsigned long s;
+ struct srcu_data *sdp;
+
+ check_init_srcu_struct(sp);
+ rhp->func = func;
+ local_irq_save(flags);
+ sdp = this_cpu_ptr(sp->sda);
+ spin_lock(&sdp->lock);
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&sp->srcu_gp_seq));
+ s = rcu_seq_snap(&sp->srcu_gp_seq);
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ sdp->srcu_gp_seq_needed = s;
+ needgp = true;
+ }
+ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
+ sdp->srcu_gp_seq_needed_exp = s;
+ needexp = true;
+ }
+ spin_unlock_irqrestore(&sdp->lock, flags);
+ if (needgp)
+ srcu_funnel_gp_start(sp, sdp, s, do_norm);
+ else if (needexp)
+ srcu_funnel_exp_start(sp, sdp->mynode, s);
+}
+
+void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+ rcu_callback_t func)
+{
+ __call_srcu(sp, rhp, func, true);
+}
+EXPORT_SYMBOL_GPL(call_srcu);
+
+/*
+ * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
+ */
+static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
+{
+ struct rcu_synchronize rcu;
+
+ RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+ lock_is_held(&rcu_bh_lock_map) ||
+ lock_is_held(&rcu_lock_map) ||
+ lock_is_held(&rcu_sched_lock_map),
+ "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
+
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+ return;
+ might_sleep();
+ check_init_srcu_struct(sp);
+ init_completion(&rcu.completion);
+ init_rcu_head_on_stack(&rcu.head);
+ __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
+ wait_for_completion(&rcu.completion);
+ destroy_rcu_head_on_stack(&rcu.head);
+}
+
+/**
+ * synchronize_srcu_expedited - Brute-force SRCU grace period
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Wait for an SRCU grace period to elapse, but be more aggressive about
+ * spinning rather than blocking when waiting.
+ *
+ * Note that synchronize_srcu_expedited() has the same deadlock and
+ * memory-ordering properties as does synchronize_srcu().
+ */
+void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ __synchronize_srcu(sp, rcu_gp_is_normal());
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
+
+/**
+ * synchronize_srcu - wait for prior SRCU read-side critical-section completion
+ * @sp: srcu_struct with which to synchronize.
+ *
+ * Wait for the count to drain to zero of both indexes. To avoid the
+ * possible starvation of synchronize_srcu(), it waits for the count of
+ * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
+ * and then flip the srcu_idx and wait for the count of the other index.
+ *
+ * Can block; must be called from process context.
+ *
+ * Note that it is illegal to call synchronize_srcu() from the corresponding
+ * SRCU read-side critical section; doing so will result in deadlock.
+ * However, it is perfectly legal to call synchronize_srcu() on one
+ * srcu_struct from some other srcu_struct's read-side critical section,
+ * as long as the resulting graph of srcu_structs is acyclic.
+ *
+ * There are memory-ordering constraints implied by synchronize_srcu().
+ * On systems with more than one CPU, when synchronize_srcu() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since
+ * the end of its last corresponding SRCU-sched read-side critical section
+ * whose beginning preceded the call to synchronize_srcu(). In addition,
+ * each CPU having an SRCU read-side critical section that extends beyond
+ * the return from synchronize_srcu() is guaranteed to have executed a
+ * full memory barrier after the beginning of synchronize_srcu() and before
+ * the beginning of that SRCU read-side critical section. Note that these
+ * guarantees include CPUs that are offline, idle, or executing in user mode,
+ * as well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked synchronize_srcu(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
+ * are the same CPU, but again only if the system has more than one CPU.
+ *
+ * Of course, these memory-ordering guarantees apply only when
+ * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
+ * passed the same srcu_struct structure.
+ *
+ * If SRCU is likely idle, expedite the first request. This semantic
+ * was provided by Classic SRCU, and is relied upon by its users, so TREE
+ * SRCU must also provide it. Note that detecting idleness is heuristic
+ * and subject to both false positives and negatives.
+ */
+void synchronize_srcu(struct srcu_struct *sp)
+{
+ if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
+ synchronize_srcu_expedited(sp);
+ else
+ __synchronize_srcu(sp, true);
+}
+EXPORT_SYMBOL_GPL(synchronize_srcu);
+
+/*
+ * Callback function for srcu_barrier() use.
+ */
+static void srcu_barrier_cb(struct rcu_head *rhp)
+{
+ struct srcu_data *sdp;
+ struct srcu_struct *sp;
+
+ sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
+ sp = sdp->sp;
+ if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
+ complete(&sp->srcu_barrier_completion);
+}
+
+/**
+ * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
+ * @sp: srcu_struct on which to wait for in-flight callbacks.
+ */
+void srcu_barrier(struct srcu_struct *sp)
+{
+ int cpu;
+ struct srcu_data *sdp;
+ unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
+
+ check_init_srcu_struct(sp);
+ mutex_lock(&sp->srcu_barrier_mutex);
+ if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
+ smp_mb(); /* Force ordering following return. */
+ mutex_unlock(&sp->srcu_barrier_mutex);
+ return; /* Someone else did our work for us. */
+ }
+ rcu_seq_start(&sp->srcu_barrier_seq);
+ init_completion(&sp->srcu_barrier_completion);
+
+ /* Initial count prevents reaching zero until all CBs are posted. */
+ atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
+
+ /*
+ * Each pass through this loop enqueues a callback, but only
+ * on CPUs already having callbacks enqueued. Note that if
+ * a CPU already has callbacks enqueue, it must have already
+ * registered the need for a future grace period, so all we
+ * need do is enqueue a callback that will use the same
+ * grace period as the last callback already in the queue.
+ */
+ for_each_possible_cpu(cpu) {
+ sdp = per_cpu_ptr(sp->sda, cpu);
+ spin_lock_irq(&sdp->lock);
+ atomic_inc(&sp->srcu_barrier_cpu_cnt);
+ sdp->srcu_barrier_head.func = srcu_barrier_cb;
+ if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
+ &sdp->srcu_barrier_head, 0))
+ atomic_dec(&sp->srcu_barrier_cpu_cnt);
+ spin_unlock_irq(&sdp->lock);
+ }
+
+ /* Remove the initial count, at which point reaching zero can happen. */
+ if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
+ complete(&sp->srcu_barrier_completion);
+ wait_for_completion(&sp->srcu_barrier_completion);
+
+ rcu_seq_end(&sp->srcu_barrier_seq);
+ mutex_unlock(&sp->srcu_barrier_mutex);
+}
+EXPORT_SYMBOL_GPL(srcu_barrier);
+
+/**
+ * srcu_batches_completed - return batches completed.
+ * @sp: srcu_struct on which to report batch completion.
+ *
+ * Report the number of batches, correlated with, but not necessarily
+ * precisely the same as, the number of grace periods that have elapsed.
+ */
+unsigned long srcu_batches_completed(struct srcu_struct *sp)
+{
+ return sp->srcu_idx;
+}
+EXPORT_SYMBOL_GPL(srcu_batches_completed);
+
+/*
+ * Core SRCU state machine. Push state bits of ->srcu_gp_seq
+ * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
+ * completed in that state.
+ */
+static void srcu_advance_state(struct srcu_struct *sp)
+{
+ int idx;
+
+ mutex_lock(&sp->srcu_gp_mutex);
+
+ /*
+ * Because readers might be delayed for an extended period after
+ * fetching ->srcu_idx for their index, at any point in time there
+ * might well be readers using both idx=0 and idx=1. We therefore
+ * need to wait for readers to clear from both index values before
+ * invoking a callback.
+ *
+ * The load-acquire ensures that we see the accesses performed
+ * by the prior grace period.
+ */
+ idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
+ if (idx == SRCU_STATE_IDLE) {
+ spin_lock_irq(&sp->gp_lock);
+ if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
+ WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
+ spin_unlock_irq(&sp->gp_lock);
+ mutex_unlock(&sp->srcu_gp_mutex);
+ return;
+ }
+ idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ if (idx == SRCU_STATE_IDLE)
+ srcu_gp_start(sp);
+ spin_unlock_irq(&sp->gp_lock);
+ if (idx != SRCU_STATE_IDLE) {
+ mutex_unlock(&sp->srcu_gp_mutex);
+ return; /* Someone else started the grace period. */
+ }
+ }
+
+ if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
+ idx = 1 ^ (sp->srcu_idx & 1);
+ if (!try_check_zero(sp, idx, 1)) {
+ mutex_unlock(&sp->srcu_gp_mutex);
+ return; /* readers present, retry later. */
+ }
+ srcu_flip(sp);
+ rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
+ }
+
+ if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
+
+ /*
+ * SRCU read-side critical sections are normally short,
+ * so check at least twice in quick succession after a flip.
+ */
+ idx = 1 ^ (sp->srcu_idx & 1);
+ if (!try_check_zero(sp, idx, 2)) {
+ mutex_unlock(&sp->srcu_gp_mutex);
+ return; /* readers present, retry later. */
+ }
+ srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
+ }
+}
+
+/*
+ * Invoke a limited number of SRCU callbacks that have passed through
+ * their grace period. If there are more to do, SRCU will reschedule
+ * the workqueue. Note that needed memory barriers have been executed
+ * in this task's context by srcu_readers_active_idx_check().
+ */
+static void srcu_invoke_callbacks(struct work_struct *work)
+{
+ bool more;
+ struct rcu_cblist ready_cbs;
+ struct rcu_head *rhp;
+ struct srcu_data *sdp;
+ struct srcu_struct *sp;
+
+ sdp = container_of(work, struct srcu_data, work.work);
+ sp = sdp->sp;
+ rcu_cblist_init(&ready_cbs);
+ spin_lock_irq(&sdp->lock);
+ smp_mb(); /* Old grace periods before callback invocation! */
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&sp->srcu_gp_seq));
+ if (sdp->srcu_cblist_invoking ||
+ !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
+ spin_unlock_irq(&sdp->lock);
+ return; /* Someone else on the job or nothing to do. */
+ }
+
+ /* We are on the job! Extract and invoke ready callbacks. */
+ sdp->srcu_cblist_invoking = true;
+ rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
+ spin_unlock_irq(&sdp->lock);
+ rhp = rcu_cblist_dequeue(&ready_cbs);
+ for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
+ local_bh_disable();
+ rhp->func(rhp);
+ local_bh_enable();
+ }
+
+ /*
+ * Update counts, accelerate new callbacks, and if needed,
+ * schedule another round of callback invocation.
+ */
+ spin_lock_irq(&sdp->lock);
+ rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
+ rcu_seq_snap(&sp->srcu_gp_seq));
+ sdp->srcu_cblist_invoking = false;
+ more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
+ spin_unlock_irq(&sdp->lock);
+ if (more)
+ srcu_schedule_cbs_sdp(sdp, 0);
+}
+
+/*
+ * Finished one round of SRCU grace period. Start another if there are
+ * more SRCU callbacks queued, otherwise put SRCU into not-running state.
+ */
+static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
+{
+ bool pushgp = true;
+
+ spin_lock_irq(&sp->gp_lock);
+ if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
+ if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
+ /* All requests fulfilled, time to go idle. */
+ pushgp = false;
+ }
+ } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
+ /* Outstanding request and no GP. Start one. */
+ srcu_gp_start(sp);
+ }
+ spin_unlock_irq(&sp->gp_lock);
+
+ if (pushgp)
+ queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
+}
+
+/*
+ * This is the work-queue function that handles SRCU grace periods.
+ */
+void process_srcu(struct work_struct *work)
+{
+ struct srcu_struct *sp;
+
+ sp = container_of(work, struct srcu_struct, work.work);
+
+ srcu_advance_state(sp);
+ srcu_reschedule(sp, srcu_get_delay(sp));
+}
+EXPORT_SYMBOL_GPL(process_srcu);
+
+void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum, unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = rcu_seq_ctr(sp->srcu_gp_seq);
+ *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+}
+EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 6ad330dbbae2..e5385731e391 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL(__rcu_is_watching);
*/
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
- RCU_TRACE(reset_cpu_stall_ticks(rcp));
+ RCU_TRACE(reset_cpu_stall_ticks(rcp);)
if (rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
return 1;
@@ -125,7 +125,7 @@ void rcu_bh_qs(void)
*/
void rcu_check_callbacks(int user)
{
- RCU_TRACE(check_cpu_stalls());
+ RCU_TRACE(check_cpu_stalls();)
if (user)
rcu_sched_qs();
else if (!in_softirq())
@@ -143,7 +143,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
const char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
- RCU_TRACE(int cb_count = 0);
+ RCU_TRACE(int cb_count = 0;)
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
@@ -152,7 +152,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
local_irq_restore(flags);
return;
}
- RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
+ RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1);)
list = rcp->rcucblist;
rcp->rcucblist = *rcp->donetail;
*rcp->donetail = NULL;
@@ -162,7 +162,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
- RCU_TRACE(rn = rcp->name);
+ RCU_TRACE(rn = rcp->name;)
while (list) {
next = list->next;
prefetch(next);
@@ -171,9 +171,9 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
__rcu_reclaim(rn, list);
local_bh_enable();
list = next;
- RCU_TRACE(cb_count++);
+ RCU_TRACE(cb_count++;)
}
- RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
+ RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count);)
RCU_TRACE(trace_rcu_batch_end(rcp->name,
cb_count, 0, need_resched(),
is_idle_task(current),
@@ -221,7 +221,7 @@ static void __call_rcu(struct rcu_head *head,
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
- RCU_TRACE(rcp->qlen++);
+ RCU_TRACE(rcp->qlen++;)
local_irq_restore(flags);
if (unlikely(is_idle_task(current))) {
@@ -254,8 +254,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
void __init rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
- RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
- RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
+ RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk);)
+ RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk);)
rcu_early_boot_tests();
}
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index c64b827ecbca..371034e77f87 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -52,7 +52,7 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
RCU_TRACE(.name = "rcu_bh")
};
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
#include <linux/kernel_stat.h>
int rcu_scheduler_active __read_mostly;
@@ -65,15 +65,16 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
* to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
* The reason for this is that Tiny RCU does not need kthreads, so does
* not have to care about the fact that the scheduler is half-initialized
- * at a certain phase of the boot process.
+ * at a certain phase of the boot process. Unless SRCU is in the mix.
*/
void __init rcu_scheduler_starting(void)
{
WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+ rcu_scheduler_active = IS_ENABLED(CONFIG_SRCU)
+ ? RCU_SCHEDULER_INIT : RCU_SCHEDULER_RUNNING;
}
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
#ifdef CONFIG_RCU_TRACE
@@ -162,8 +163,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
static void check_cpu_stalls(void)
{
- RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
- RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
+ RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk);)
+ RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk);)
}
#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a6dcf3bd244f..e354e475e645 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -98,8 +98,8 @@ struct rcu_state sname##_state = { \
.gpnum = 0UL - 300UL, \
.completed = 0UL - 300UL, \
.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
- .orphan_nxttail = &sname##_state.orphan_nxtlist, \
- .orphan_donetail = &sname##_state.orphan_donelist, \
+ .orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
+ .orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.name = RCU_STATE_NAME(sname), \
.abbr = sabbr, \
@@ -124,7 +124,7 @@ static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
module_param(rcu_fanout_leaf, int, 0444);
int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
/* Number of rcu_nodes at specified level. */
-static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
+int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
@@ -200,7 +200,7 @@ static const int gp_cleanup_delay;
/*
* Number of grace periods between delays, normalized by the duration of
- * the delay. The longer the the delay, the more the grace periods between
+ * the delay. The longer the delay, the more the grace periods between
* each delay. The reason for this normalization is that it means that,
* for non-zero delays, the overall slowdown of grace periods is constant
* regardless of the duration of the delay. This arrangement balances
@@ -273,11 +273,19 @@ void rcu_bh_qs(void)
}
}
-static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+/*
+ * Steal a bit from the bottom of ->dynticks for idle entry/exit
+ * control. Initially this is for TLB flushing.
+ */
+#define RCU_DYNTICK_CTRL_MASK 0x1
+#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
+#ifndef rcu_eqs_special_exit
+#define rcu_eqs_special_exit() do { } while (0)
+#endif
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
- .dynticks = ATOMIC_INIT(1),
+ .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
.dynticks_idle = ATOMIC_INIT(1),
@@ -305,15 +313,20 @@ bool rcu_irq_enter_disabled(void)
static void rcu_dynticks_eqs_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int special;
+ int seq;
/*
- * CPUs seeing atomic_inc_return() must see prior RCU read-side
+ * CPUs seeing atomic_add_return() must see prior RCU read-side
* critical sections, and we also must force ordering with the
* next idle sojourn.
*/
- special = atomic_inc_return(&rdtp->dynticks);
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1);
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ /* Better be in an extended quiescent state! */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ (seq & RCU_DYNTICK_CTRL_CTR));
+ /* Better not have special action (TLB flush) pending! */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ (seq & RCU_DYNTICK_CTRL_MASK));
}
/*
@@ -323,15 +336,22 @@ static void rcu_dynticks_eqs_enter(void)
static void rcu_dynticks_eqs_exit(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int special;
+ int seq;
/*
- * CPUs seeing atomic_inc_return() must see prior idle sojourns,
+ * CPUs seeing atomic_add_return() must see prior idle sojourns,
* and we also must force ordering with the next RCU read-side
* critical section.
*/
- special = atomic_inc_return(&rdtp->dynticks);
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1));
+ seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ !(seq & RCU_DYNTICK_CTRL_CTR));
+ if (seq & RCU_DYNTICK_CTRL_MASK) {
+ atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
+ smp_mb__after_atomic(); /* _exit after clearing mask. */
+ /* Prefer duplicate flushes to losing a flush. */
+ rcu_eqs_special_exit();
+ }
}
/*
@@ -348,9 +368,9 @@ static void rcu_dynticks_eqs_online(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- if (atomic_read(&rdtp->dynticks) & 0x1)
+ if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
return;
- atomic_add(0x1, &rdtp->dynticks);
+ atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
}
/*
@@ -362,7 +382,7 @@ bool rcu_dynticks_curr_cpu_in_eqs(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- return !(atomic_read(&rdtp->dynticks) & 0x1);
+ return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
}
/*
@@ -373,7 +393,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
{
int snap = atomic_add_return(0, &rdtp->dynticks);
- return snap;
+ return snap & ~RCU_DYNTICK_CTRL_MASK;
}
/*
@@ -382,7 +402,7 @@ int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
*/
static bool rcu_dynticks_in_eqs(int snap)
{
- return !(snap & 0x1);
+ return !(snap & RCU_DYNTICK_CTRL_CTR);
}
/*
@@ -402,14 +422,34 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
static void rcu_dynticks_momentary_idle(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int special = atomic_add_return(2, &rdtp->dynticks);
+ int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
+ &rdtp->dynticks);
/* It is illegal to call this from idle state. */
- WARN_ON_ONCE(!(special & 0x1));
+ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
}
-DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
-EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
+/*
+ * Set the special (bottom) bit of the specified CPU so that it
+ * will take special action (such as flushing its TLB) on the
+ * next exit from an extended quiescent state. Returns true if
+ * the bit was successfully set, or false if the CPU was not in
+ * an extended quiescent state.
+ */
+bool rcu_eqs_special_set(int cpu)
+{
+ int old;
+ int new;
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+ do {
+ old = atomic_read(&rdtp->dynticks);
+ if (old & RCU_DYNTICK_CTRL_CTR)
+ return false;
+ new = old | RCU_DYNTICK_CTRL_MASK;
+ } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
+ return true;
+}
/*
* Let the RCU core know that this CPU has gone through the scheduler,
@@ -418,44 +458,14 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
* memory barriers to let the RCU core know about it, regardless of what
* this CPU might (or might not) do in the near future.
*
- * We inform the RCU core by emulating a zero-duration dyntick-idle
- * period, which we in turn do by incrementing the ->dynticks counter
- * by two.
+ * We inform the RCU core by emulating a zero-duration dyntick-idle period.
*
* The caller must have disabled interrupts.
*/
static void rcu_momentary_dyntick_idle(void)
{
- struct rcu_data *rdp;
- int resched_mask;
- struct rcu_state *rsp;
-
- /*
- * Yes, we can lose flag-setting operations. This is OK, because
- * the flag will be set again after some delay.
- */
- resched_mask = raw_cpu_read(rcu_sched_qs_mask);
- raw_cpu_write(rcu_sched_qs_mask, 0);
-
- /* Find the flavor that needs a quiescent state. */
- for_each_rcu_flavor(rsp) {
- rdp = raw_cpu_ptr(rsp->rda);
- if (!(resched_mask & rsp->flavor_mask))
- continue;
- smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
- if (READ_ONCE(rdp->mynode->completed) !=
- READ_ONCE(rdp->cond_resched_completed))
- continue;
-
- /*
- * Pretend to be momentarily idle for the quiescent state.
- * This allows the grace-period kthread to record the
- * quiescent state, with no need for this CPU to do anything
- * further.
- */
- rcu_dynticks_momentary_idle();
- break;
- }
+ raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
+ rcu_dynticks_momentary_idle();
}
/*
@@ -463,14 +473,22 @@ static void rcu_momentary_dyntick_idle(void)
* and requires special handling for preemptible RCU.
* The caller must have disabled interrupts.
*/
-void rcu_note_context_switch(void)
+void rcu_note_context_switch(bool preempt)
{
barrier(); /* Avoid RCU read-side critical sections leaking down. */
trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs();
rcu_preempt_note_context_switch();
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ /* Load rcu_urgent_qs before other flags. */
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
+ goto out;
+ this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
+ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle();
+ this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
+ if (!preempt)
+ rcu_note_voluntary_context_switch_lite(current);
+out:
trace_rcu_utilization(TPS("End context switch"));
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
@@ -493,29 +511,26 @@ void rcu_all_qs(void)
{
unsigned long flags;
+ if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
+ return;
+ preempt_disable();
+ /* Load rcu_urgent_qs before other flags. */
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
+ preempt_enable();
+ return;
+ }
+ this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+ if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
local_irq_save(flags);
rcu_momentary_dyntick_idle();
local_irq_restore(flags);
}
- if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) {
- /*
- * Yes, we just checked a per-CPU variable with preemption
- * enabled, so we might be migrated to some other CPU at
- * this point. That is OK because in that case, the
- * migration will supply the needed quiescent state.
- * We might end up needlessly disabling preemption and
- * invoking rcu_sched_qs() on the destination CPU, but
- * the probability and cost are both quite low, so this
- * should not be a problem in practice.
- */
- preempt_disable();
+ if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
rcu_sched_qs();
- preempt_enable();
- }
- this_cpu_inc(rcu_qs_ctr);
+ this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
barrier(); /* Avoid RCU read-side critical sections leaking up. */
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(rcu_all_qs);
@@ -704,15 +719,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
default:
break;
}
- if (rsp != NULL) {
- *flags = READ_ONCE(rsp->gp_flags);
- *gpnum = READ_ONCE(rsp->gpnum);
- *completed = READ_ONCE(rsp->completed);
+ if (rsp == NULL)
return;
- }
- *flags = 0;
- *gpnum = 0;
- *completed = 0;
+ *flags = READ_ONCE(rsp->gp_flags);
+ *gpnum = READ_ONCE(rsp->gpnum);
+ *completed = READ_ONCE(rsp->completed);
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
@@ -728,16 +739,6 @@ void rcutorture_record_progress(unsigned long vernum)
EXPORT_SYMBOL_GPL(rcutorture_record_progress);
/*
- * Does the CPU have callbacks ready to be invoked?
- */
-static int
-cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
-{
- return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
- rdp->nxttail[RCU_NEXT_TAIL] != NULL;
-}
-
-/*
* Return the root node of the specified rcu_state structure.
*/
static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
@@ -767,21 +768,17 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
static bool
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
- int i;
-
if (rcu_gp_in_progress(rsp))
return false; /* No, a grace period is already in progress. */
if (rcu_future_needs_gp(rsp))
return true; /* Yes, a no-CBs CPU needs one. */
- if (!rdp->nxttail[RCU_NEXT_TAIL])
+ if (!rcu_segcblist_is_enabled(&rdp->cblist))
return false; /* No, this is a no-CBs (or offline) CPU. */
- if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
+ if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
return true; /* Yes, CPU has newly registered callbacks. */
- for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
- if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
- ULONG_CMP_LT(READ_ONCE(rsp->completed),
- rdp->nxtcompleted[i]))
- return true; /* Yes, CBs for future grace period. */
+ if (rcu_segcblist_future_gp_needed(&rdp->cblist,
+ READ_ONCE(rsp->completed)))
+ return true; /* Yes, CBs for future grace period. */
return false; /* No grace period needed. */
}
@@ -1162,6 +1159,24 @@ bool notrace rcu_is_watching(void)
}
EXPORT_SYMBOL_GPL(rcu_is_watching);
+/*
+ * If a holdout task is actually running, request an urgent quiescent
+ * state from its CPU. This is unsynchronized, so migrations can cause
+ * the request to go to the wrong CPU. Which is OK, all that will happen
+ * is that the CPU's next context switch will be a bit slower and next
+ * time around this task will generate another request.
+ */
+void rcu_request_urgent_qs_task(struct task_struct *t)
+{
+ int cpu;
+
+ barrier();
+ cpu = task_cpu(t);
+ if (!task_curr(t))
+ return; /* This task is not running on that CPU. */
+ smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
+}
+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
@@ -1247,7 +1262,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
bool *isidle, unsigned long *maxj)
{
unsigned long jtsq;
- int *rcrmp;
+ bool *rnhqp;
+ bool *ruqp;
unsigned long rjtsc;
struct rcu_node *rnp;
@@ -1283,11 +1299,15 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* might not be the case for nohz_full CPUs looping in the kernel.
*/
rnp = rdp->mynode;
+ ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
- READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_qs_ctr, rdp->cpu) &&
+ READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
return 1;
+ } else {
+ /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
+ smp_store_release(ruqp, true);
}
/* Check for the CPU being offline. */
@@ -1304,7 +1324,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* in-kernel CPU-bound tasks cannot advance grace periods.
* So if the grace period is old enough, make the CPU pay attention.
* Note that the unsynchronized assignments to the per-CPU
- * rcu_sched_qs_mask variable are safe. Yes, setting of
+ * rcu_need_heavy_qs variable are safe. Yes, setting of
* bits can be lost, but they will be set again on the next
* force-quiescent-state pass. So lost bit sets do not result
* in incorrect behavior, merely in a grace period lasting
@@ -1318,16 +1338,13 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
* is set too high, we override with half of the RCU CPU stall
* warning delay.
*/
- rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
- if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
- time_after(jiffies, rdp->rsp->jiffies_resched)) {
- if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
- WRITE_ONCE(rdp->cond_resched_completed,
- READ_ONCE(rdp->mynode->completed));
- smp_mb(); /* ->cond_resched_completed before *rcrmp. */
- WRITE_ONCE(*rcrmp,
- READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
- }
+ rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
+ if (!READ_ONCE(*rnhqp) &&
+ (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
+ time_after(jiffies, rdp->rsp->jiffies_resched))) {
+ WRITE_ONCE(*rnhqp, true);
+ /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
+ smp_store_release(ruqp, true);
rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
}
@@ -1487,7 +1504,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
+ cpu)->cblist);
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rsp->gp_start),
(long)rsp->gpnum, (long)rsp->completed, totqlen);
@@ -1541,7 +1559,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
print_cpu_stall_info(rsp, smp_processor_id());
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
+ totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
+ cpu)->cblist);
pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
jiffies - rsp->gp_start,
(long)rsp->gpnum, (long)rsp->completed, totqlen);
@@ -1644,30 +1663,6 @@ void rcu_cpu_stall_reset(void)
}
/*
- * Initialize the specified rcu_data structure's default callback list
- * to empty. The default callback list is the one that is not used by
- * no-callbacks CPUs.
- */
-static void init_default_callback_list(struct rcu_data *rdp)
-{
- int i;
-
- rdp->nxtlist = NULL;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
- rdp->nxttail[i] = &rdp->nxtlist;
-}
-
-/*
- * Initialize the specified rcu_data structure's callback list to empty.
- */
-static void init_callback_list(struct rcu_data *rdp)
-{
- if (init_nocb_callback_list(rdp))
- return;
- init_default_callback_list(rdp);
-}
-
-/*
* Determine the value that ->completed will have at the end of the
* next subsequent grace period. This is used to tag callbacks so that
* a CPU can invoke callbacks in a timely fashion even if that CPU has
@@ -1721,7 +1716,6 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
unsigned long *c_out)
{
unsigned long c;
- int i;
bool ret = false;
struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
@@ -1767,13 +1761,11 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
/*
* Get a new grace-period number. If there really is no grace
* period in progress, it will be smaller than the one we obtained
- * earlier. Adjust callbacks as needed. Note that even no-CBs
- * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
+ * earlier. Adjust callbacks as needed.
*/
c = rcu_cbs_completed(rdp->rsp, rnp_root);
- for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
- if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
- rdp->nxtcompleted[i] = c;
+ if (!rcu_is_nocb_cpu(rdp->cpu))
+ (void)rcu_segcblist_accelerate(&rdp->cblist, c);
/*
* If the needed for the required grace period is already
@@ -1805,9 +1797,7 @@ out:
/*
* Clean up any old requests for the just-ended grace period. Also return
- * whether any additional grace periods have been requested. Also invoke
- * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
- * waiting for this grace period to complete.
+ * whether any additional grace periods have been requested.
*/
static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
@@ -1853,57 +1843,27 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
- unsigned long c;
- int i;
- bool ret;
-
- /* If the CPU has no callbacks, nothing to do. */
- if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
- return false;
-
- /*
- * Starting from the sublist containing the callbacks most
- * recently assigned a ->completed number and working down, find the
- * first sublist that is not assignable to an upcoming grace period.
- * Such a sublist has something in it (first two tests) and has
- * a ->completed number assigned that will complete sooner than
- * the ->completed number for newly arrived callbacks (last test).
- *
- * The key point is that any later sublist can be assigned the
- * same ->completed number as the newly arrived callbacks, which
- * means that the callbacks in any of these later sublist can be
- * grouped into a single sublist, whether or not they have already
- * been assigned a ->completed number.
- */
- c = rcu_cbs_completed(rsp, rnp);
- for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
- if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
- !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
- break;
+ bool ret = false;
- /*
- * If there are no sublist for unassigned callbacks, leave.
- * At the same time, advance "i" one sublist, so that "i" will
- * index into the sublist where all the remaining callbacks should
- * be grouped into.
- */
- if (++i >= RCU_NEXT_TAIL)
+ /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
+ if (!rcu_segcblist_pend_cbs(&rdp->cblist))
return false;
/*
- * Assign all subsequent callbacks' ->completed number to the next
- * full grace period and group them all in the sublist initially
- * indexed by "i".
+ * Callbacks are often registered with incomplete grace-period
+ * information. Something about the fact that getting exact
+ * information requires acquiring a global lock... RCU therefore
+ * makes a conservative estimate of the grace period number at which
+ * a given callback will become ready to invoke. The following
+ * code checks this estimate and improves it when possible, thus
+ * accelerating callback invocation to an earlier grace-period
+ * number.
*/
- for (; i <= RCU_NEXT_TAIL; i++) {
- rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
- rdp->nxtcompleted[i] = c;
- }
- /* Record any needed additional grace periods. */
- ret = rcu_start_future_gp(rnp, rdp, NULL);
+ if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
+ ret = rcu_start_future_gp(rnp, rdp, NULL);
/* Trace depending on how much we were able to accelerate. */
- if (!*rdp->nxttail[RCU_WAIT_TAIL])
+ if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
else
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
@@ -1923,32 +1883,15 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp)
{
- int i, j;
-
- /* If the CPU has no callbacks, nothing to do. */
- if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
+ /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
+ if (!rcu_segcblist_pend_cbs(&rdp->cblist))
return false;
/*
* Find all callbacks whose ->completed numbers indicate that they
* are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
*/
- for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
- if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
- break;
- rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
- }
- /* Clean up any sublist tail pointers that were misordered above. */
- for (j = RCU_WAIT_TAIL; j < i; j++)
- rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
-
- /* Copy down callbacks to fill in empty sublists. */
- for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
- if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
- break;
- rdp->nxttail[j] = rdp->nxttail[i];
- rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
- }
+ rcu_segcblist_advance(&rdp->cblist, rnp->completed);
/* Classify any remaining callbacks. */
return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1993,7 +1936,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
need_gp = !!(rnp->qsmask & rdp->grpmask);
rdp->cpu_no_qs.b.norm = need_gp;
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp);
WRITE_ONCE(rdp->gpwrap, false);
@@ -2591,7 +2534,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
* within the current grace period.
*/
rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
- rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
+ rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return;
}
@@ -2665,13 +2608,8 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
* because _rcu_barrier() excludes CPU-hotplug operations, so it
* cannot be running now. Thus no memory barrier is required.
*/
- if (rdp->nxtlist != NULL) {
- rsp->qlen_lazy += rdp->qlen_lazy;
- rsp->qlen += rdp->qlen;
- rdp->n_cbs_orphaned += rdp->qlen;
- rdp->qlen_lazy = 0;
- WRITE_ONCE(rdp->qlen, 0);
- }
+ rdp->n_cbs_orphaned += rcu_segcblist_n_cbs(&rdp->cblist);
+ rcu_segcblist_extract_count(&rdp->cblist, &rsp->orphan_done);
/*
* Next, move those callbacks still needing a grace period to
@@ -2679,31 +2617,18 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
* Some of the callbacks might have gone partway through a grace
* period, but that is too bad. They get to start over because we
* cannot assume that grace periods are synchronized across CPUs.
- * We don't bother updating the ->nxttail[] array yet, instead
- * we just reset the whole thing later on.
*/
- if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
- *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
- rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
- *rdp->nxttail[RCU_DONE_TAIL] = NULL;
- }
+ rcu_segcblist_extract_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
/*
* Then move the ready-to-invoke callbacks to the orphanage,
* where some other CPU will pick them up. These will not be
* required to pass though another grace period: They are done.
*/
- if (rdp->nxtlist != NULL) {
- *rsp->orphan_donetail = rdp->nxtlist;
- rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
- }
+ rcu_segcblist_extract_done_cbs(&rdp->cblist, &rsp->orphan_done);
- /*
- * Finally, initialize the rcu_data structure's list to empty and
- * disallow further callbacks on this CPU.
- */
- init_callback_list(rdp);
- rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+ /* Finally, disallow further callbacks on this CPU. */
+ rcu_segcblist_disable(&rdp->cblist);
}
/*
@@ -2712,7 +2637,6 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
*/
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
{
- int i;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
/* No-CBs CPUs are handled specially. */
@@ -2721,13 +2645,10 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
return;
/* Do the accounting first. */
- rdp->qlen_lazy += rsp->qlen_lazy;
- rdp->qlen += rsp->qlen;
- rdp->n_cbs_adopted += rsp->qlen;
- if (rsp->qlen_lazy != rsp->qlen)
+ rdp->n_cbs_adopted += rsp->orphan_done.len;
+ if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
rcu_idle_count_callbacks_posted();
- rsp->qlen_lazy = 0;
- rsp->qlen = 0;
+ rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
/*
* We do not need a memory barrier here because the only way we
@@ -2735,24 +2656,13 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
* we are the task doing the rcu_barrier().
*/
- /* First adopt the ready-to-invoke callbacks. */
- if (rsp->orphan_donelist != NULL) {
- *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
- *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
- for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
- if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
- rdp->nxttail[i] = rsp->orphan_donetail;
- rsp->orphan_donelist = NULL;
- rsp->orphan_donetail = &rsp->orphan_donelist;
- }
-
- /* And then adopt the callbacks that still need a grace period. */
- if (rsp->orphan_nxtlist != NULL) {
- *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
- rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
- rsp->orphan_nxtlist = NULL;
- rsp->orphan_nxttail = &rsp->orphan_nxtlist;
- }
+ /* First adopt the ready-to-invoke callbacks, then the done ones. */
+ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done);
+ WARN_ON_ONCE(rsp->orphan_done.head);
+ rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
+ WARN_ON_ONCE(rsp->orphan_pend.head);
+ WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) !=
+ !rcu_segcblist_n_cbs(&rdp->cblist));
}
/*
@@ -2760,14 +2670,14 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
*/
static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
{
- RCU_TRACE(unsigned long mask);
- RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
- RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
+ RCU_TRACE(unsigned long mask;)
+ RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
+ RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
return;
- RCU_TRACE(mask = rdp->grpmask);
+ RCU_TRACE(mask = rdp->grpmask;)
trace_rcu_grace_period(rsp->name,
rnp->gpnum + 1 - !!(rnp->qsmask & mask),
TPS("cpuofl"));
@@ -2840,9 +2750,11 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
rcu_adopt_orphan_cbs(rsp, flags);
raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
- WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
- "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
- cpu, rdp->qlen, rdp->nxtlist);
+ WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
+ !rcu_segcblist_empty(&rdp->cblist),
+ "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
+ cpu, rcu_segcblist_n_cbs(&rdp->cblist),
+ rcu_segcblist_first_cb(&rdp->cblist));
}
/*
@@ -2852,14 +2764,17 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
- struct rcu_head *next, *list, **tail;
- long bl, count, count_lazy;
- int i;
+ struct rcu_head *rhp;
+ struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
+ long bl, count;
/* If no callbacks are ready, just return. */
- if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
- trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
- trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
+ if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
+ trace_rcu_batch_start(rsp->name,
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+ rcu_segcblist_n_cbs(&rdp->cblist), 0);
+ trace_rcu_batch_end(rsp->name, 0,
+ !rcu_segcblist_empty(&rdp->cblist),
need_resched(), is_idle_task(current),
rcu_is_callbacks_kthread());
return;
@@ -2867,73 +2782,61 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Extract the list of ready callbacks, disabling to prevent
- * races with call_rcu() from interrupt handlers.
+ * races with call_rcu() from interrupt handlers. Leave the
+ * callback counts, as rcu_barrier() needs to be conservative.
*/
local_irq_save(flags);
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
bl = rdp->blimit;
- trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
- list = rdp->nxtlist;
- rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
- *rdp->nxttail[RCU_DONE_TAIL] = NULL;
- tail = rdp->nxttail[RCU_DONE_TAIL];
- for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
- if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
- rdp->nxttail[i] = &rdp->nxtlist;
+ trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+ rcu_segcblist_n_cbs(&rdp->cblist), bl);
+ rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
local_irq_restore(flags);
/* Invoke callbacks. */
- count = count_lazy = 0;
- while (list) {
- next = list->next;
- prefetch(next);
- debug_rcu_head_unqueue(list);
- if (__rcu_reclaim(rsp->name, list))
- count_lazy++;
- list = next;
- /* Stop only if limit reached and CPU has something to do. */
- if (++count >= bl &&
+ rhp = rcu_cblist_dequeue(&rcl);
+ for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
+ debug_rcu_head_unqueue(rhp);
+ if (__rcu_reclaim(rsp->name, rhp))
+ rcu_cblist_dequeued_lazy(&rcl);
+ /*
+ * Stop only if limit reached and CPU has something to do.
+ * Note: The rcl structure counts down from zero.
+ */
+ if (-rcl.len >= bl &&
(need_resched() ||
(!is_idle_task(current) && !rcu_is_callbacks_kthread())))
break;
}
local_irq_save(flags);
- trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
- is_idle_task(current),
- rcu_is_callbacks_kthread());
-
- /* Update count, and requeue any remaining callbacks. */
- if (list != NULL) {
- *tail = rdp->nxtlist;
- rdp->nxtlist = list;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
- if (&rdp->nxtlist == rdp->nxttail[i])
- rdp->nxttail[i] = tail;
- else
- break;
- }
+ count = -rcl.len;
+ trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
+ is_idle_task(current), rcu_is_callbacks_kthread());
+
+ /* Update counts and requeue any remaining callbacks. */
+ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
smp_mb(); /* List handling before counting for rcu_barrier(). */
- rdp->qlen_lazy -= count_lazy;
- WRITE_ONCE(rdp->qlen, rdp->qlen - count);
rdp->n_cbs_invoked += count;
+ rcu_segcblist_insert_count(&rdp->cblist, &rcl);
/* Reinstate batch limit if we have worked down the excess. */
- if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
+ count = rcu_segcblist_n_cbs(&rdp->cblist);
+ if (rdp->blimit == LONG_MAX && count <= qlowmark)
rdp->blimit = blimit;
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
- if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
+ if (count == 0 && rdp->qlen_last_fqs_check != 0) {
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
- } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
- rdp->qlen_last_fqs_check = rdp->qlen;
- WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
+ } else if (count < rdp->qlen_last_fqs_check - qhimark)
+ rdp->qlen_last_fqs_check = count;
+ WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
local_irq_restore(flags);
/* Re-invoke RCU core processing if there are callbacks remaining. */
- if (cpu_has_callbacks_ready_to_invoke(rdp))
+ if (rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_core();
}
@@ -3099,7 +3002,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
bool needwake;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
- WARN_ON_ONCE(rdp->beenonline == 0);
+ WARN_ON_ONCE(!rdp->beenonline);
/* Update RCU state based on any recent quiescent states. */
rcu_check_quiescent_state(rsp, rdp);
@@ -3117,7 +3020,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
}
/* If there are callbacks ready, invoke them. */
- if (cpu_has_callbacks_ready_to_invoke(rdp))
+ if (rcu_segcblist_ready_cbs(&rdp->cblist))
invoke_rcu_callbacks(rsp, rdp);
/* Do any needed deferred wakeups of rcuo kthreads. */
@@ -3189,7 +3092,8 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
* invoking force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
- if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+ if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
+ rdp->qlen_last_fqs_check + qhimark)) {
/* Are we ignoring a completed grace period? */
note_gp_changes(rsp, rdp);
@@ -3207,10 +3111,10 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
/* Give the grace period a kick. */
rdp->blimit = LONG_MAX;
if (rsp->n_force_qs == rdp->n_force_qs_snap &&
- *rdp->nxttail[RCU_DONE_TAIL] != head)
+ rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
force_quiescent_state(rsp);
rdp->n_force_qs_snap = rsp->n_force_qs;
- rdp->qlen_last_fqs_check = rdp->qlen;
+ rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
}
}
}
@@ -3250,7 +3154,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
rdp = this_cpu_ptr(rsp->rda);
/* Add the callback to our list. */
- if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
+ if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
int offline;
if (cpu != -1)
@@ -3269,23 +3173,21 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
*/
BUG_ON(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching());
- if (!likely(rdp->nxtlist))
- init_default_callback_list(rdp);
+ if (rcu_segcblist_empty(&rdp->cblist))
+ rcu_segcblist_init(&rdp->cblist);
}
- WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
- if (lazy)
- rdp->qlen_lazy++;
- else
+ rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
+ if (!lazy)
rcu_idle_count_callbacks_posted();
- smp_mb(); /* Count before adding callback for rcu_barrier(). */
- *rdp->nxttail[RCU_NEXT_TAIL] = head;
- rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
- rdp->qlen_lazy, rdp->qlen);
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+ rcu_segcblist_n_cbs(&rdp->cblist));
else
- trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
+ trace_rcu_callback(rsp->name, head,
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist),
+ rcu_segcblist_n_cbs(&rdp->cblist));
/* Go handle any RCU core processing required. */
__call_rcu_core(rsp, rdp, head, flags);
@@ -3531,41 +3433,6 @@ void cond_synchronize_sched(unsigned long oldstate)
}
EXPORT_SYMBOL_GPL(cond_synchronize_sched);
-/* Adjust sequence number for start of update-side operation. */
-static void rcu_seq_start(unsigned long *sp)
-{
- WRITE_ONCE(*sp, *sp + 1);
- smp_mb(); /* Ensure update-side operation after counter increment. */
- WARN_ON_ONCE(!(*sp & 0x1));
-}
-
-/* Adjust sequence number for end of update-side operation. */
-static void rcu_seq_end(unsigned long *sp)
-{
- smp_mb(); /* Ensure update-side operation before counter increment. */
- WRITE_ONCE(*sp, *sp + 1);
- WARN_ON_ONCE(*sp & 0x1);
-}
-
-/* Take a snapshot of the update side's sequence number. */
-static unsigned long rcu_seq_snap(unsigned long *sp)
-{
- unsigned long s;
-
- s = (READ_ONCE(*sp) + 3) & ~0x1;
- smp_mb(); /* Above access must not bleed into critical section. */
- return s;
-}
-
-/*
- * Given a snapshot from rcu_seq_snap(), determine whether or not a
- * full update-side operation has occurred.
- */
-static bool rcu_seq_done(unsigned long *sp, unsigned long s)
-{
- return ULONG_CMP_GE(READ_ONCE(*sp), s);
-}
-
/*
* Check to see if there is any immediate RCU-related work to be done
* by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -3589,7 +3456,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
- rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
+ rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
rdp->n_rp_core_needs_qs++;
} else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
rdp->n_rp_report_qs++;
@@ -3597,7 +3464,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
}
/* Does this CPU have callbacks ready to invoke? */
- if (cpu_has_callbacks_ready_to_invoke(rdp)) {
+ if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
rdp->n_rp_cb_ready++;
return 1;
}
@@ -3661,10 +3528,10 @@ static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
- if (!rdp->nxtlist)
+ if (rcu_segcblist_empty(&rdp->cblist))
continue;
hc = true;
- if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
+ if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
al = false;
break;
}
@@ -3773,7 +3640,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
__call_rcu(&rdp->barrier_head,
rcu_barrier_callback, rsp, cpu, 0);
}
- } else if (READ_ONCE(rdp->qlen)) {
+ } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
rsp->barrier_sequence);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
@@ -3882,8 +3749,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->qlen_last_fqs_check = 0;
rdp->n_force_qs_snap = rsp->n_force_qs;
rdp->blimit = blimit;
- if (!rdp->nxtlist)
- init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
+ if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
+ !init_nocb_callback_list(rdp))
+ rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_sysidle_init_percpu_data(rdp->dynticks);
rcu_dynticks_eqs_online();
@@ -3902,12 +3770,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
rdp->completed = rnp->completed;
rdp->cpu_no_qs.b.norm = true;
- rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
+ rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
+/*
+ * Invoked early in the CPU-online process, when pretty much all
+ * services are available. The incoming CPU is not present.
+ */
int rcutree_prepare_cpu(unsigned int cpu)
{
struct rcu_state *rsp;
@@ -3921,6 +3793,9 @@ int rcutree_prepare_cpu(unsigned int cpu)
return 0;
}
+/*
+ * Update RCU priority boot kthread affinity for CPU-hotplug changes.
+ */
static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
{
struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
@@ -3928,20 +3803,34 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
}
+/*
+ * Near the end of the CPU-online process. Pretty much all services
+ * enabled, and the CPU is now very much alive.
+ */
int rcutree_online_cpu(unsigned int cpu)
{
sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
+ if (IS_ENABLED(CONFIG_TREE_SRCU))
+ srcu_online_cpu(cpu);
return 0;
}
+/*
+ * Near the beginning of the process. The CPU is still very much alive
+ * with pretty much all services enabled.
+ */
int rcutree_offline_cpu(unsigned int cpu)
{
rcutree_affinity_setting(cpu, cpu);
+ if (IS_ENABLED(CONFIG_TREE_SRCU))
+ srcu_offline_cpu(cpu);
return 0;
}
-
+/*
+ * Near the end of the offline process. We do only tracing here.
+ */
int rcutree_dying_cpu(unsigned int cpu)
{
struct rcu_state *rsp;
@@ -3951,6 +3840,9 @@ int rcutree_dying_cpu(unsigned int cpu)
return 0;
}
+/*
+ * The outgoing CPU is gone and we are running elsewhere.
+ */
int rcutree_dead_cpu(unsigned int cpu)
{
struct rcu_state *rsp;
@@ -3968,6 +3860,10 @@ int rcutree_dead_cpu(unsigned int cpu)
* incoming CPUs are not allowed to use RCU read-side critical sections
* until this function is called. Failing to observe this restriction
* will result in lockdep splats.
+ *
+ * Note that this function is special in that it is invoked directly
+ * from the incoming CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
*/
void rcu_cpu_starting(unsigned int cpu)
{
@@ -3993,9 +3889,6 @@ void rcu_cpu_starting(unsigned int cpu)
* The CPU is exiting the idle loop into the arch_cpu_idle_dead()
* function. We now remove it from the rcu_node tree's ->qsmaskinit
* bit masks.
- * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function. We now remove it from the rcu_node tree's ->qsmaskinit
- * bit masks.
*/
static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
{
@@ -4011,6 +3904,14 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
+/*
+ * The outgoing function has no further need of RCU, so remove it from
+ * the list of CPUs that RCU must track.
+ *
+ * Note that this function is special in that it is invoked directly
+ * from the outgoing CPU rather than from the cpuhp_step mechanism.
+ * This is because this function must be invoked at a precise location.
+ */
void rcu_report_dead(unsigned int cpu)
{
struct rcu_state *rsp;
@@ -4025,6 +3926,10 @@ void rcu_report_dead(unsigned int cpu)
}
#endif
+/*
+ * On non-huge systems, use expedited RCU grace periods to make suspend
+ * and hibernation run faster.
+ */
static int rcu_pm_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -4095,7 +4000,7 @@ early_initcall(rcu_spawn_gp_kthread);
* task is booting the system, and such primitives are no-ops). After this
* function is called, any synchronous grace-period primitives are run as
* expedited, with the requesting task driving the grace period forward.
- * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * A later core_initcall() rcu_set_runtime_mode() will switch to full
* runtime RCU functionality.
*/
void rcu_scheduler_starting(void)
@@ -4108,31 +4013,6 @@ void rcu_scheduler_starting(void)
}
/*
- * Compute the per-level fanout, either using the exact fanout specified
- * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
- */
-static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
-{
- int i;
-
- if (rcu_fanout_exact) {
- levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
- for (i = rcu_num_lvls - 2; i >= 0; i--)
- levelspread[i] = RCU_FANOUT;
- } else {
- int ccur;
- int cprv;
-
- cprv = nr_cpu_ids;
- for (i = rcu_num_lvls - 1; i >= 0; i--) {
- ccur = levelcnt[i];
- levelspread[i] = (cprv + ccur - 1) / ccur;
- cprv = ccur;
- }
- }
-}
-
-/*
* Helper function for rcu_init() that initializes one rcu_state structure.
*/
static void __init rcu_init_one(struct rcu_state *rsp)
@@ -4141,9 +4021,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
static const char * const fqs[] = RCU_FQS_NAME_INIT;
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
- static u8 fl_mask = 0x1;
- int levelcnt[RCU_NUM_LVLS]; /* # nodes in each level. */
int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
int cpustride = 1;
int i;
@@ -4158,20 +4036,16 @@ static void __init rcu_init_one(struct rcu_state *rsp)
/* Initialize the level-tracking arrays. */
- for (i = 0; i < rcu_num_lvls; i++)
- levelcnt[i] = num_rcu_lvl[i];
for (i = 1; i < rcu_num_lvls; i++)
- rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
- rcu_init_levelspread(levelspread, levelcnt);
- rsp->flavor_mask = fl_mask;
- fl_mask <<= 1;
+ rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
+ rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Initialize the elements themselves, starting from the leaves. */
for (i = rcu_num_lvls - 1; i >= 0; i--) {
cpustride *= levelspread[i];
rnp = rsp->level[i];
- for (j = 0; j < levelcnt[i]; j++, rnp++) {
+ for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
&rcu_node_class[i], buf[i]);
@@ -4344,6 +4218,8 @@ void __init rcu_init(void)
for_each_online_cpu(cpu) {
rcutree_prepare_cpu(cpu);
rcu_cpu_starting(cpu);
+ if (IS_ENABLED(CONFIG_TREE_SRCU))
+ srcu_online_cpu(cpu);
}
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index ec62a05bfdb3..ba38262c3554 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -30,80 +30,9 @@
#include <linux/seqlock.h>
#include <linux/swait.h>
#include <linux/stop_machine.h>
+#include <linux/rcu_node_tree.h>
-/*
- * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
- * CONFIG_RCU_FANOUT_LEAF.
- * In theory, it should be possible to add more levels straightforwardly.
- * In practice, this did work well going from three levels to four.
- * Of course, your mileage may vary.
- */
-
-#ifdef CONFIG_RCU_FANOUT
-#define RCU_FANOUT CONFIG_RCU_FANOUT
-#else /* #ifdef CONFIG_RCU_FANOUT */
-# ifdef CONFIG_64BIT
-# define RCU_FANOUT 64
-# else
-# define RCU_FANOUT 32
-# endif
-#endif /* #else #ifdef CONFIG_RCU_FANOUT */
-
-#ifdef CONFIG_RCU_FANOUT_LEAF
-#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
-#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
-# ifdef CONFIG_64BIT
-# define RCU_FANOUT_LEAF 64
-# else
-# define RCU_FANOUT_LEAF 32
-# endif
-#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
-
-#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
-#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
-#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
-#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
-
-#if NR_CPUS <= RCU_FANOUT_1
-# define RCU_NUM_LVLS 1
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_NODES NUM_RCU_LVL_0
-# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
-# define RCU_NODE_NAME_INIT { "rcu_node_0" }
-# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
-#elif NR_CPUS <= RCU_FANOUT_2
-# define RCU_NUM_LVLS 2
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
-# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
-# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
-# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
-#elif NR_CPUS <= RCU_FANOUT_3
-# define RCU_NUM_LVLS 3
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
-# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
-# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
-# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
-# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
-#elif NR_CPUS <= RCU_FANOUT_4
-# define RCU_NUM_LVLS 4
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
-# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
-# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
-# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
-# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
-# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
-#else
-# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
-#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
-
-extern int rcu_num_lvls;
-extern int rcu_num_nodes;
+#include "rcu_segcblist.h"
/*
* Dynticks per-CPU state.
@@ -113,6 +42,9 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
+ bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
+ unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
+ bool rcu_urgent_qs; /* GP old need light quiescent state. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
long long dynticks_idle_nesting;
/* irq/process nesting level from idle. */
@@ -262,41 +194,6 @@ struct rcu_node {
#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
/*
- * Do a full breadth-first scan of the rcu_node structures for the
- * specified rcu_state structure.
- */
-#define rcu_for_each_node_breadth_first(rsp, rnp) \
- for ((rnp) = &(rsp)->node[0]; \
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
-
-/*
- * Do a breadth-first scan of the non-leaf rcu_node structures for the
- * specified rcu_state structure. Note that if there is a singleton
- * rcu_node tree with but one rcu_node structure, this loop is a no-op.
- */
-#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
- for ((rnp) = &(rsp)->node[0]; \
- (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
-
-/*
- * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
- * structure. Note that if there is a singleton rcu_node tree with but
- * one rcu_node structure, this loop -will- visit the rcu_node structure.
- * It is still a leaf node, even if it is also the root node.
- */
-#define rcu_for_each_leaf_node(rsp, rnp) \
- for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
-
-/*
- * Iterate over all possible CPUs in a leaf RCU node.
- */
-#define for_each_leaf_node_possible_cpu(rnp, cpu) \
- for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
- cpu <= rnp->grphi; \
- cpu = cpumask_next((cpu), cpu_possible_mask))
-
-/*
* Union to allow "aggregate OR" operation on the need for a quiescent
* state by the normal and expedited grace periods.
*/
@@ -336,34 +233,9 @@ struct rcu_data {
/* period it is aware of. */
/* 2) batch handling */
- /*
- * If nxtlist is not NULL, it is partitioned as follows.
- * Any of the partitions might be empty, in which case the
- * pointer to that partition will be equal to the pointer for
- * the following partition. When the list is empty, all of
- * the nxttail elements point to the ->nxtlist pointer itself,
- * which in that case is NULL.
- *
- * [nxtlist, *nxttail[RCU_DONE_TAIL]):
- * Entries that batch # <= ->completed
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
- * Entries that batch # <= ->completed - 1: waiting for current GP
- * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
- * Entries known to have arrived before current GP ended
- * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
- * Entries that might have arrived after current GP ended
- * Note that the value of *nxttail[RCU_NEXT_TAIL] will
- * always be NULL, as this is the end of the list.
- */
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[RCU_NEXT_SIZE];
- unsigned long nxtcompleted[RCU_NEXT_SIZE];
- /* grace periods for sublists. */
- long qlen_lazy; /* # of lazy queued callbacks */
- long qlen; /* # of queued callbacks, incl lazy */
+ struct rcu_segcblist cblist; /* Segmented callback list, with */
+ /* different callbacks waiting for */
+ /* different grace periods. */
long qlen_last_fqs_check;
/* qlen at last check for QS forcing */
unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
@@ -482,7 +354,6 @@ struct rcu_state {
struct rcu_node *level[RCU_NUM_LVLS + 1];
/* Hierarchy levels (+1 to */
/* shut bogus gcc warning) */
- u8 flavor_mask; /* bit in flavor mask. */
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
call_rcu_func_t call; /* call_rcu() flavor. */
int ncpus; /* # CPUs seen so far. */
@@ -502,14 +373,11 @@ struct rcu_state {
raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
/* Protect following fields. */
- struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
+ struct rcu_cblist orphan_pend; /* Orphaned callbacks that */
/* need a grace period. */
- struct rcu_head **orphan_nxttail; /* Tail of above. */
- struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
+ struct rcu_cblist orphan_done; /* Orphaned callbacks that */
/* are ready to invoke. */
- struct rcu_head **orphan_donetail; /* Tail of above. */
- long qlen_lazy; /* Number of lazy callbacks. */
- long qlen; /* Total number of callbacks. */
+ /* (Contains counts.) */
/* End of fields guarded by orphan_lock. */
struct mutex barrier_mutex; /* Guards barrier fields. */
@@ -596,6 +464,7 @@ extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
+bool rcu_eqs_special_set(int cpu);
#ifdef CONFIG_RCU_BOOST
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -673,6 +542,14 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
static void rcu_dynticks_task_enter(void);
static void rcu_dynticks_task_exit(void);
+#ifdef CONFIG_SRCU
+void srcu_online_cpu(unsigned int cpu);
+void srcu_offline_cpu(unsigned int cpu);
+#else /* #ifdef CONFIG_SRCU */
+void srcu_online_cpu(unsigned int cpu) { }
+void srcu_offline_cpu(unsigned int cpu) { }
+#endif /* #else #ifdef CONFIG_SRCU */
+
#endif /* #ifndef RCU_TREE_NONCORE */
#ifdef CONFIG_RCU_TRACE
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index a7b639ccd46e..e513b4ab1197 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -292,7 +292,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
rnp->grplo, rnp->grphi,
TPS("wait"));
- wait_event(rnp->exp_wq[(s >> 1) & 0x3],
+ wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(rsp,
&rdp->exp_workdone2, s));
return true;
@@ -331,6 +331,8 @@ static void sync_sched_exp_handler(void *data)
return;
}
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
+ /* Store .exp before .rcu_urgent_qs. */
+ smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
resched_cpu(smp_processor_id());
}
@@ -531,7 +533,8 @@ static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
rnp->exp_seq_rq = s;
spin_unlock(&rnp->exp_lock);
}
- wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
+ smp_mb(); /* All above changes before wakeup. */
+ wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
}
trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
mutex_unlock(&rsp->exp_wake_mutex);
@@ -609,9 +612,9 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
/* Wait for expedited grace period to complete. */
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
rnp = rcu_get_root(rsp);
- wait_event(rnp->exp_wq[(s >> 1) & 0x3],
- sync_exp_work_done(rsp,
- &rdp->exp_workdone0, s));
+ wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
+ sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
+ smp_mb(); /* Workqueue actions happen before return. */
/* Let the next expedited grace period start. */
mutex_unlock(&rsp->exp_mutex);
@@ -735,15 +738,3 @@ void synchronize_rcu_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-/*
- * Switch to run-time mode once Tree RCU has fully initialized.
- */
-static int __init rcu_exp_runtime_mode(void)
-{
- rcu_test_sync_prims();
- rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
- rcu_test_sync_prims();
- return 0;
-}
-core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 0a62a8f1caac..c9a48657512a 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1350,10 +1350,10 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
*/
if ((rdp->completed != rnp->completed ||
unlikely(READ_ONCE(rdp->gpwrap))) &&
- rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
+ rcu_segcblist_pend_cbs(&rdp->cblist))
note_gp_changes(rsp, rdp);
- if (cpu_has_callbacks_ready_to_invoke(rdp))
+ if (rcu_segcblist_ready_cbs(&rdp->cblist))
cbs_ready = true;
}
return cbs_ready;
@@ -1461,7 +1461,7 @@ static void rcu_prepare_for_idle(void)
rdtp->last_accelerate = jiffies;
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
- if (!*rdp->nxttail[RCU_DONE_TAIL])
+ if (rcu_segcblist_pend_cbs(&rdp->cblist))
continue;
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
@@ -1529,7 +1529,7 @@ static void rcu_oom_notify_cpu(void *unused)
for_each_rcu_flavor(rsp) {
rdp = raw_cpu_ptr(rsp->rda);
- if (rdp->qlen_lazy != 0) {
+ if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) {
atomic_inc(&oom_callback_count);
rsp->call(&rdp->oom_head, rcu_oom_callback);
}
@@ -1709,7 +1709,7 @@ __setup("rcu_nocbs=", rcu_nocb_setup);
static int __init parse_rcu_nocb_poll(char *arg)
{
- rcu_nocb_poll = 1;
+ rcu_nocb_poll = true;
return 0;
}
early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
@@ -1860,7 +1860,9 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeEmpty"));
} else {
- rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE);
+ /* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */
+ smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeEmptyIsDeferred"));
}
@@ -1872,7 +1874,9 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeOvf"));
} else {
- rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_FORCE);
+ /* Store ->nocb_defer_wakeup before ->rcu_urgent_qs. */
+ smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
TPS("WakeOvfIsDeferred"));
}
@@ -1930,30 +1934,26 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
struct rcu_data *rdp,
unsigned long flags)
{
- long ql = rsp->qlen;
- long qll = rsp->qlen_lazy;
+ long ql = rsp->orphan_done.len;
+ long qll = rsp->orphan_done.len_lazy;
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */
if (!rcu_is_nocb_cpu(smp_processor_id()))
return false;
- rsp->qlen = 0;
- rsp->qlen_lazy = 0;
/* First, enqueue the donelist, if any. This preserves CB ordering. */
- if (rsp->orphan_donelist != NULL) {
- __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
- rsp->orphan_donetail, ql, qll, flags);
- ql = qll = 0;
- rsp->orphan_donelist = NULL;
- rsp->orphan_donetail = &rsp->orphan_donelist;
+ if (rsp->orphan_done.head) {
+ __call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_done),
+ rcu_cblist_tail(&rsp->orphan_done),
+ ql, qll, flags);
}
- if (rsp->orphan_nxtlist != NULL) {
- __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
- rsp->orphan_nxttail, ql, qll, flags);
- ql = qll = 0;
- rsp->orphan_nxtlist = NULL;
- rsp->orphan_nxttail = &rsp->orphan_nxtlist;
+ if (rsp->orphan_pend.head) {
+ __call_rcu_nocb_enqueue(rdp, rcu_cblist_head(&rsp->orphan_pend),
+ rcu_cblist_tail(&rsp->orphan_pend),
+ ql, qll, flags);
}
+ rcu_cblist_init(&rsp->orphan_done);
+ rcu_cblist_init(&rsp->orphan_pend);
return true;
}
@@ -2395,16 +2395,16 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return false;
/* If there are early-boot callbacks, move them to nocb lists. */
- if (rdp->nxtlist) {
- rdp->nocb_head = rdp->nxtlist;
- rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
- atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
- atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
- rdp->nxtlist = NULL;
- rdp->qlen = 0;
- rdp->qlen_lazy = 0;
+ if (!rcu_segcblist_empty(&rdp->cblist)) {
+ rdp->nocb_head = rcu_segcblist_head(&rdp->cblist);
+ rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist);
+ atomic_long_set(&rdp->nocb_q_count,
+ rcu_segcblist_n_cbs(&rdp->cblist));
+ atomic_long_set(&rdp->nocb_q_count_lazy,
+ rcu_segcblist_n_lazy_cbs(&rdp->cblist));
+ rcu_segcblist_init(&rdp->cblist);
}
- rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+ rcu_segcblist_disable(&rdp->cblist);
return true;
}
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 8751a748499a..6cea17a1ea30 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -41,11 +41,11 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/prefetch.h>
#define RCU_TREE_NONCORE
#include "tree.h"
-
-DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+#include "rcu.h"
static int r_open(struct inode *inode, struct file *file,
const struct seq_operations *op)
@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
cpu_is_offline(rdp->cpu) ? '!' : ' ',
ulong2long(rdp->completed), ulong2long(rdp->gpnum),
rdp->cpu_no_qs.b.norm,
- rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
+ rdp->rcu_qs_ctr_snap == per_cpu(rdp->dynticks->rcu_qs_ctr, rdp->cpu),
rdp->core_needs_qs);
seq_printf(m, " dt=%d/%llx/%d df=%lu",
rcu_dynticks_snap(rdp->dynticks),
@@ -130,17 +130,15 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->dynticks_fqs);
seq_printf(m, " of=%lu", rdp->offline_fqs);
rcu_nocb_q_lengths(rdp, &ql, &qll);
- qll += rdp->qlen_lazy;
- ql += rdp->qlen;
+ qll += rcu_segcblist_n_lazy_cbs(&rdp->cblist);
+ ql += rcu_segcblist_n_cbs(&rdp->cblist);
seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
qll, ql,
- ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
- rdp->nxttail[RCU_NEXT_TAIL]],
- ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
- rdp->nxttail[RCU_NEXT_READY_TAIL]],
- ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
- rdp->nxttail[RCU_WAIT_TAIL]],
- ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
+ ".N"[!rcu_segcblist_segempty(&rdp->cblist, RCU_NEXT_TAIL)],
+ ".R"[!rcu_segcblist_segempty(&rdp->cblist,
+ RCU_NEXT_READY_TAIL)],
+ ".W"[!rcu_segcblist_segempty(&rdp->cblist, RCU_WAIT_TAIL)],
+ ".D"[!rcu_segcblist_segempty(&rdp->cblist, RCU_DONE_TAIL)]);
#ifdef CONFIG_RCU_BOOST
seq_printf(m, " kt=%d/%c ktl=%x",
per_cpu(rcu_cpu_has_work, rdp->cpu),
@@ -278,7 +276,9 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
- READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
+ READ_ONCE(rsp->n_force_qs_lh),
+ rsp->orphan_done.len_lazy,
+ rsp->orphan_done.len);
for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 55c8530316c7..273e869ca21d 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
* non-expedited counterparts? Intended for use within RCU. Note
* that if the user specifies both rcu_expedited and rcu_normal, then
* rcu_normal wins. (Except during the time period during boot from
- * when the first task is spawned until the rcu_exp_runtime_mode()
+ * when the first task is spawned until the rcu_set_runtime_mode()
* core_initcall() is invoked, at which point everything is expedited.)
*/
bool rcu_gp_is_normal(void)
@@ -190,6 +190,39 @@ void rcu_end_inkernel_boot(void)
#endif /* #ifndef CONFIG_TINY_RCU */
+/*
+ * Test each non-SRCU synchronous grace-period wait API. This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+ if (!IS_ENABLED(CONFIG_PROVE_RCU))
+ return;
+ synchronize_rcu();
+ synchronize_rcu_bh();
+ synchronize_sched();
+ synchronize_rcu_expedited();
+ synchronize_rcu_bh_expedited();
+ synchronize_sched_expedited();
+}
+
+#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
+
+/*
+ * Switch to run-time mode once RCU has fully initialized.
+ */
+static int __init rcu_set_runtime_mode(void)
+{
+ rcu_test_sync_prims();
+ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+ rcu_test_sync_prims();
+ return 0;
+}
+core_initcall(rcu_set_runtime_mode);
+
+#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
+
#ifdef CONFIG_PREEMPT_RCU
/*
@@ -632,6 +665,7 @@ static void check_holdout_task(struct task_struct *t,
put_task_struct(t);
return;
}
+ rcu_request_urgent_qs_task(t);
if (!needreport)
return;
if (*firstreport) {
@@ -817,23 +851,6 @@ static void rcu_spawn_tasks_kthread(void)
#endif /* #ifdef CONFIG_TASKS_RCU */
-/*
- * Test each non-SRCU synchronous grace-period wait API. This is
- * useful just after a change in mode for these primitives, and
- * during early boot.
- */
-void rcu_test_sync_prims(void)
-{
- if (!IS_ENABLED(CONFIG_PROVE_RCU))
- return;
- synchronize_rcu();
- synchronize_rcu_bh();
- synchronize_sched();
- synchronize_rcu_expedited();
- synchronize_rcu_bh_expedited();
- synchronize_sched_expedited();
-}
-
#ifdef CONFIG_PROVE_RCU
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c51147a1204c..759f4bd52cd6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3382,7 +3382,7 @@ static void __sched notrace __schedule(bool preempt)
hrtick_clear(rq);
local_irq_disable();
- rcu_note_context_switch();
+ rcu_note_context_switch(preempt);
/*
* Make sure that signal_pending_state()->signal_pending() below
diff --git a/kernel/signal.c b/kernel/signal.c
index 7e59ebc2c25e..ca92bcfeb322 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1237,7 +1237,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
/*
* This sighand can be already freed and even reused, but
- * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
+ * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
* initializes ->siglock: this slab can't go away, it has
* the same object type, ->siglock can't be reinitialized.
*
@@ -1318,7 +1318,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
}
}
-int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
int error;
rcu_read_lock();
diff --git a/kernel/sys.c b/kernel/sys.c
index 196c7134bee6..8a94b4eabcaa 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1396,8 +1396,7 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
!capable(CAP_SYS_RESOURCE))
retval = -EPERM;
if (!retval)
- retval = security_task_setrlimit(tsk->group_leader,
- resource, new_rlim);
+ retval = security_task_setrlimit(tsk, resource, new_rlim);
if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 21343d110296..4dfba1a76cc3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2576,7 +2576,7 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- if (*lvalp > LONG_MAX / HZ)
+ if (*lvalp > INT_MAX / HZ)
return 1;
*valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ);
} else {
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 8a5e44236f78..4559e914452b 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -30,6 +30,7 @@
#include <linux/pid_namespace.h>
#include <net/genetlink.h>
#include <linux/atomic.h>
+#include <linux/sched/cputime.h>
/*
* Maximum length of a cpumask that can be specified in
@@ -210,6 +211,8 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
struct task_struct *tsk, *first;
unsigned long flags;
int rc = -ESRCH;
+ u64 delta, utime, stime;
+ u64 start_time;
/*
* Add additional stats from live tasks except zombie thread group
@@ -227,6 +230,7 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
memset(stats, 0, sizeof(*stats));
tsk = first;
+ start_time = ktime_get_ns();
do {
if (tsk->exit_state)
continue;
@@ -238,6 +242,16 @@ static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
*/
delayacct_add_tsk(stats, tsk);
+ /* calculate task elapsed time in nsec */
+ delta = start_time - tsk->start_time;
+ /* Convert to micro seconds */
+ do_div(delta, NSEC_PER_USEC);
+ stats->ac_etime += delta;
+
+ task_cputime(tsk, &utime, &stime);
+ stats->ac_utime += div_u64(utime, NSEC_PER_USEC);
+ stats->ac_stime += div_u64(stime, NSEC_PER_USEC);
+
stats->nvcsw += tsk->nvcsw;
stats->nivcsw += tsk->nivcsw;
} while_each_thread(first, tsk);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 949e434d3536..1370f067fb51 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1318,7 +1318,7 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
- CPUCLOCK_PID(which_clock) == current->pid))
+ CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c203ac4df791..adcdbbeae010 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -348,14 +348,14 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
__field( u64, duration )
__field( u64, outer_duration )
__field( u64, nmi_total_ts )
- __field_struct( struct timespec, timestamp )
- __field_desc( long, timestamp, tv_sec )
+ __field_struct( struct timespec64, timestamp )
+ __field_desc( s64, timestamp, tv_sec )
__field_desc( long, timestamp, tv_nsec )
__field( unsigned int, nmi_count )
__field( unsigned int, seqnum )
),
- F_printk("cnt:%u\tts:%010lu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
+ F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
__entry->seqnum,
__entry->tv_sec,
__entry->tv_nsec,
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 21ea6ae77d93..d7c8e4ec3d9d 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -79,12 +79,12 @@ static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
/* Individual latency samples are stored here when detected. */
struct hwlat_sample {
- u64 seqnum; /* unique sequence */
- u64 duration; /* delta */
- u64 outer_duration; /* delta (outer loop) */
- u64 nmi_total_ts; /* Total time spent in NMIs */
- struct timespec timestamp; /* wall time */
- int nmi_count; /* # NMIs during this sample */
+ u64 seqnum; /* unique sequence */
+ u64 duration; /* delta */
+ u64 outer_duration; /* delta (outer loop) */
+ u64 nmi_total_ts; /* Total time spent in NMIs */
+ struct timespec64 timestamp; /* wall time */
+ int nmi_count; /* # NMIs during this sample */
};
/* keep the global state somewhere. */
@@ -250,7 +250,7 @@ static int get_sample(void)
s.seqnum = hwlat_data.count;
s.duration = sample;
s.outer_duration = outer_sample;
- s.timestamp = CURRENT_TIME;
+ ktime_get_real_ts64(&s.timestamp);
s.nmi_total_ts = nmi_total_ts;
s.nmi_count = nmi_count;
trace_hwlat_sample(&s);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 02a4aeb22c47..08f9bab8089e 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -4,7 +4,6 @@
* Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
*
*/
-
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
@@ -1161,11 +1160,11 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
trace_assign_type(field, entry);
- trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld",
+ trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld",
field->seqnum,
field->duration,
field->outer_duration,
- field->timestamp.tv_sec,
+ (long long)field->timestamp.tv_sec,
field->timestamp.tv_nsec);
if (field->nmi_count) {
@@ -1195,10 +1194,10 @@ trace_hwlat_raw(struct trace_iterator *iter, int flags,
trace_assign_type(field, iter->ent);
- trace_seq_printf(s, "%llu %lld %ld %09ld %u\n",
+ trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
field->duration,
field->outer_duration,
- field->timestamp.tv_sec,
+ (long long)field->timestamp.tv_sec,
field->timestamp.tv_nsec,
field->seqnum);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2a617e09ab7..e4587ebe52c7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1719,19 +1719,21 @@ config LKDTM
Documentation/fault-injection/provoke-crashes.txt
config TEST_LIST_SORT
- bool "Linked list sorting test"
- depends on DEBUG_KERNEL
+ tristate "Linked list sorting test"
+ depends on DEBUG_KERNEL || m
help
Enable this to turn on 'list_sort()' function test. This test is
- executed only once during system boot, so affects only boot time.
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
If unsure, say N.
config TEST_SORT
- bool "Array-based sort test"
- depends on DEBUG_KERNEL
+ tristate "Array-based sort test"
+ depends on DEBUG_KERNEL || m
help
- This option enables the self-test function of 'sort()' at boot.
+ This option enables the self-test function of 'sort()' at boot,
+ or at module load time.
If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index a155c73e3437..0166fbc0fa81 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
diff --git a/lib/devres.c b/lib/devres.c
index cb1464c411a2..78eca713b1d9 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -17,7 +17,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
/**
* devm_ioremap - Managed ioremap()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
@@ -45,7 +45,7 @@ EXPORT_SYMBOL(devm_ioremap);
/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap_nocache(). Map is automatically unmapped on driver
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
/**
* devm_ioremap_wc - Managed ioremap_wc()
* @dev: Generic device to remap IO address for
- * @offset: BUS offset to map
+ * @offset: Resource address to map
* @size: Size of map
*
* Managed ioremap_wc(). Map is automatically unmapped on driver detach.
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index fe4d50c992df..ea4cc3dde4f1 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1498,7 +1498,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent;
entry->dev = dev;
entry->pfn = page_to_pfn(virt_to_page(virt));
- entry->offset = (size_t) virt & ~PAGE_MASK;
+ entry->offset = offset_in_page(virt);
entry->size = size;
entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL;
@@ -1514,7 +1514,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.type = dma_debug_coherent,
.dev = dev,
.pfn = page_to_pfn(virt_to_page(virt)),
- .offset = (size_t) virt & ~PAGE_MASK,
+ .offset = offset_in_page(virt),
.dev_addr = addr,
.size = size,
.direction = DMA_BIDIRECTIONAL,
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 6a823a53e357..4ff157159a0d 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -56,7 +56,7 @@ static void fail_dump(struct fault_attr *attr)
static bool fail_task(struct fault_attr *attr, struct task_struct *task)
{
- return !in_interrupt() && task->make_it_fail;
+ return in_task() && task->make_it_fail;
}
#define MAX_STACK_TRACE_DEPTH 32
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 4952311422c1..f835964c9485 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -790,6 +790,8 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
{
if (!unroll)
return;
+ if (WARN_ON(unroll > MAX_RW_COUNT))
+ return;
i->count += unroll;
if (unlikely(i->type & ITER_PIPE)) {
struct pipe_inode_info *pipe = i->pipe;
@@ -1028,10 +1030,7 @@ EXPORT_SYMBOL(iov_iter_get_pages);
static struct page **get_pages_array(size_t n)
{
- struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
- if (!p)
- p = vmalloc(n * sizeof(struct page *));
- return p;
+ return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
}
static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 3fe401067e20..9e9acc37652f 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -1,6 +1,3 @@
-
-#define pr_fmt(fmt) "list_sort_test: " fmt
-
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compiler.h>
@@ -145,149 +142,3 @@ void list_sort(void *priv, struct list_head *head,
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
}
EXPORT_SYMBOL(list_sort);
-
-#ifdef CONFIG_TEST_LIST_SORT
-
-#include <linux/slab.h>
-#include <linux/random.h>
-
-/*
- * The pattern of set bits in the list length determines which cases
- * are hit in list_sort().
- */
-#define TEST_LIST_LEN (512+128+2) /* not including head */
-
-#define TEST_POISON1 0xDEADBEEF
-#define TEST_POISON2 0xA324354C
-
-struct debug_el {
- unsigned int poison1;
- struct list_head list;
- unsigned int poison2;
- int value;
- unsigned serial;
-};
-
-/* Array, containing pointers to all elements in the test list */
-static struct debug_el **elts __initdata;
-
-static int __init check(struct debug_el *ela, struct debug_el *elb)
-{
- if (ela->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", ela->serial);
- return -EINVAL;
- }
- if (elb->serial >= TEST_LIST_LEN) {
- pr_err("error: incorrect serial %d\n", elb->serial);
- return -EINVAL;
- }
- if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
- pr_err("error: phantom element\n");
- return -EINVAL;
- }
- if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- ela->poison1, ela->poison2);
- return -EINVAL;
- }
- if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
- pr_err("error: bad poison: %#x/%#x\n",
- elb->poison1, elb->poison2);
- return -EINVAL;
- }
- return 0;
-}
-
-static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct debug_el *ela, *elb;
-
- ela = container_of(a, struct debug_el, list);
- elb = container_of(b, struct debug_el, list);
-
- check(ela, elb);
- return ela->value - elb->value;
-}
-
-static int __init list_sort_test(void)
-{
- int i, count = 1, err = -ENOMEM;
- struct debug_el *el;
- struct list_head *cur;
- LIST_HEAD(head);
-
- pr_debug("start testing list_sort()\n");
-
- elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
- if (!elts) {
- pr_err("error: cannot allocate memory\n");
- return err;
- }
-
- for (i = 0; i < TEST_LIST_LEN; i++) {
- el = kmalloc(sizeof(*el), GFP_KERNEL);
- if (!el) {
- pr_err("error: cannot allocate memory\n");
- goto exit;
- }
- /* force some equivalencies */
- el->value = prandom_u32() % (TEST_LIST_LEN / 3);
- el->serial = i;
- el->poison1 = TEST_POISON1;
- el->poison2 = TEST_POISON2;
- elts[i] = el;
- list_add_tail(&el->list, &head);
- }
-
- list_sort(NULL, &head, cmp);
-
- err = -EINVAL;
- for (cur = head.next; cur->next != &head; cur = cur->next) {
- struct debug_el *el1;
- int cmp_result;
-
- if (cur->next->prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
-
- cmp_result = cmp(NULL, cur, cur->next);
- if (cmp_result > 0) {
- pr_err("error: list is not sorted\n");
- goto exit;
- }
-
- el = container_of(cur, struct debug_el, list);
- el1 = container_of(cur->next, struct debug_el, list);
- if (cmp_result == 0 && el->serial >= el1->serial) {
- pr_err("error: order of equivalent elements not "
- "preserved\n");
- goto exit;
- }
-
- if (check(el, el1)) {
- pr_err("error: element check failed\n");
- goto exit;
- }
- count++;
- }
- if (head.prev != cur) {
- pr_err("error: list is corrupted\n");
- goto exit;
- }
-
-
- if (count != TEST_LIST_LEN) {
- pr_err("error: bad list length %d", count);
- goto exit;
- }
-
- err = 0;
-exit:
- for (i = 0; i < TEST_LIST_LEN; i++)
- kfree(elts[i]);
- kfree(elts);
- return err;
-}
-late_initcall(list_sort_test);
-#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/refcount.c b/lib/refcount.c
index f42124ccf295..9f906783987e 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -76,7 +76,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
return true;
}
-EXPORT_SYMBOL_GPL(refcount_add_not_zero);
+EXPORT_SYMBOL(refcount_add_not_zero);
/**
* refcount_add - add a value to a refcount
@@ -98,7 +98,7 @@ void refcount_add(unsigned int i, refcount_t *r)
{
WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
}
-EXPORT_SYMBOL_GPL(refcount_add);
+EXPORT_SYMBOL(refcount_add);
/**
* refcount_inc_not_zero - increment a refcount unless it is 0
@@ -131,7 +131,7 @@ bool refcount_inc_not_zero(refcount_t *r)
return true;
}
-EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
+EXPORT_SYMBOL(refcount_inc_not_zero);
/**
* refcount_inc - increment a refcount
@@ -149,7 +149,7 @@ void refcount_inc(refcount_t *r)
{
WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
}
-EXPORT_SYMBOL_GPL(refcount_inc);
+EXPORT_SYMBOL(refcount_inc);
/**
* refcount_sub_and_test - subtract from a refcount and test if it is 0
@@ -189,7 +189,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
return !new;
}
-EXPORT_SYMBOL_GPL(refcount_sub_and_test);
+EXPORT_SYMBOL(refcount_sub_and_test);
/**
* refcount_dec_and_test - decrement a refcount and test if it is 0
@@ -208,7 +208,7 @@ bool refcount_dec_and_test(refcount_t *r)
{
return refcount_sub_and_test(1, r);
}
-EXPORT_SYMBOL_GPL(refcount_dec_and_test);
+EXPORT_SYMBOL(refcount_dec_and_test);
/**
* refcount_dec - decrement a refcount
@@ -224,7 +224,7 @@ void refcount_dec(refcount_t *r)
{
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
}
-EXPORT_SYMBOL_GPL(refcount_dec);
+EXPORT_SYMBOL(refcount_dec);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
@@ -248,7 +248,7 @@ bool refcount_dec_if_one(refcount_t *r)
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
-EXPORT_SYMBOL_GPL(refcount_dec_if_one);
+EXPORT_SYMBOL(refcount_dec_if_one);
/**
* refcount_dec_not_one - decrement a refcount if it is not 1
@@ -282,7 +282,7 @@ bool refcount_dec_not_one(refcount_t *r)
return true;
}
-EXPORT_SYMBOL_GPL(refcount_dec_not_one);
+EXPORT_SYMBOL(refcount_dec_not_one);
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
@@ -313,7 +313,7 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
return true;
}
-EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
+EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
@@ -344,5 +344,5 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
return true;
}
-EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
+EXPORT_SYMBOL(refcount_dec_and_lock);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a930e436db5d..d9e7274a04cd 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -86,16 +86,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
size = min(size, 1U << tbl->nest);
if (sizeof(spinlock_t) != 0) {
- tbl->locks = NULL;
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE &&
- gfp == GFP_KERNEL)
- tbl->locks = vmalloc(size * sizeof(spinlock_t));
-#endif
- if (gfp != GFP_KERNEL)
- gfp |= __GFP_NOWARN | __GFP_NORETRY;
-
- if (!tbl->locks)
+ if (gfpflags_allow_blocking(gfp))
+ tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
+ else
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
gfp);
if (!tbl->locks)
diff --git a/lib/string.c b/lib/string.c
index b5c9a1168d3a..1c1fc9187b05 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -656,6 +656,32 @@ int match_string(const char * const *array, size_t n, const char *string)
}
EXPORT_SYMBOL(match_string);
+/**
+ * __sysfs_match_string - matches given string in an array
+ * @array: array of strings
+ * @n: number of strings in the array or -1 for NULL terminated arrays
+ * @str: string to match with
+ *
+ * Returns index of @str in the @array or -EINVAL, just like match_string().
+ * Uses sysfs_streq instead of strcmp for matching.
+ */
+int __sysfs_match_string(const char * const *array, size_t n, const char *str)
+{
+ const char *item;
+ int index;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ break;
+ if (sysfs_streq(item, str))
+ return index;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(__sysfs_match_string);
+
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index a0f66280ea50..889bc31785be 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4769,8 +4769,8 @@ static struct bpf_test tests[] = {
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 2),
- BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
- BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4784,7 +4784,7 @@ static struct bpf_test tests[] = {
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 0),
- BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4798,8 +4798,8 @@ static struct bpf_test tests[] = {
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 4),
- BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
- BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
new file mode 100644
index 000000000000..28e817387b04
--- /dev/null
+++ b/lib/test_list_sort.c
@@ -0,0 +1,150 @@
+#define pr_fmt(fmt) "list_sort_test: " fmt
+
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
+struct debug_el {
+ unsigned int poison1;
+ struct list_head list;
+ unsigned int poison2;
+ int value;
+ unsigned serial;
+};
+
+/* Array, containing pointers to all elements in the test list */
+static struct debug_el **elts __initdata;
+
+static int __init check(struct debug_el *ela, struct debug_el *elb)
+{
+ if (ela->serial >= TEST_LIST_LEN) {
+ pr_err("error: incorrect serial %d\n", ela->serial);
+ return -EINVAL;
+ }
+ if (elb->serial >= TEST_LIST_LEN) {
+ pr_err("error: incorrect serial %d\n", elb->serial);
+ return -EINVAL;
+ }
+ if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
+ pr_err("error: phantom element\n");
+ return -EINVAL;
+ }
+ if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
+ pr_err("error: bad poison: %#x/%#x\n",
+ ela->poison1, ela->poison2);
+ return -EINVAL;
+ }
+ if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
+ pr_err("error: bad poison: %#x/%#x\n",
+ elb->poison1, elb->poison2);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct debug_el *ela, *elb;
+
+ ela = container_of(a, struct debug_el, list);
+ elb = container_of(b, struct debug_el, list);
+
+ check(ela, elb);
+ return ela->value - elb->value;
+}
+
+static int __init list_sort_test(void)
+{
+ int i, count = 1, err = -ENOMEM;
+ struct debug_el *el;
+ struct list_head *cur;
+ LIST_HEAD(head);
+
+ pr_debug("start testing list_sort()\n");
+
+ elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ if (!elts) {
+ pr_err("error: cannot allocate memory\n");
+ return err;
+ }
+
+ for (i = 0; i < TEST_LIST_LEN; i++) {
+ el = kmalloc(sizeof(*el), GFP_KERNEL);
+ if (!el) {
+ pr_err("error: cannot allocate memory\n");
+ goto exit;
+ }
+ /* force some equivalencies */
+ el->value = prandom_u32() % (TEST_LIST_LEN / 3);
+ el->serial = i;
+ el->poison1 = TEST_POISON1;
+ el->poison2 = TEST_POISON2;
+ elts[i] = el;
+ list_add_tail(&el->list, &head);
+ }
+
+ list_sort(NULL, &head, cmp);
+
+ err = -EINVAL;
+ for (cur = head.next; cur->next != &head; cur = cur->next) {
+ struct debug_el *el1;
+ int cmp_result;
+
+ if (cur->next->prev != cur) {
+ pr_err("error: list is corrupted\n");
+ goto exit;
+ }
+
+ cmp_result = cmp(NULL, cur, cur->next);
+ if (cmp_result > 0) {
+ pr_err("error: list is not sorted\n");
+ goto exit;
+ }
+
+ el = container_of(cur, struct debug_el, list);
+ el1 = container_of(cur->next, struct debug_el, list);
+ if (cmp_result == 0 && el->serial >= el1->serial) {
+ pr_err("error: order of equivalent elements not "
+ "preserved\n");
+ goto exit;
+ }
+
+ if (check(el, el1)) {
+ pr_err("error: element check failed\n");
+ goto exit;
+ }
+ count++;
+ }
+ if (head.prev != cur) {
+ pr_err("error: list is corrupted\n");
+ goto exit;
+ }
+
+
+ if (count != TEST_LIST_LEN) {
+ pr_err("error: bad list length %d", count);
+ goto exit;
+ }
+
+ err = 0;
+exit:
+ for (i = 0; i < TEST_LIST_LEN; i++)
+ kfree(elts[i]);
+ kfree(elts);
+ return err;
+}
+module_init(list_sort_test);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_sort.c b/lib/test_sort.c
index 4db3911db50a..d389c1cc2f6c 100644
--- a/lib/test_sort.c
+++ b/lib/test_sort.c
@@ -1,11 +1,8 @@
#include <linux/sort.h>
#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/module.h>
-/*
- * A simple boot-time regression test
- * License: GPL
- */
+/* a simple boot-time regression test */
#define TEST_LEN 1000
@@ -41,4 +38,6 @@ exit:
kfree(a);
return err;
}
-subsys_initcall(test_sort_init);
+
+module_init(test_sort_init);
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 176641cc549d..2d41de3f98a1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1477,6 +1477,9 @@ int kptr_restrict __read_mostly;
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
+ * Please update scripts/checkpatch.pl when adding/removing conversion
+ * characters. (Search for "check for vsprintf extension").
+ *
* Right now we handle:
*
* - 'F' For symbolic function descriptor pointers with offset
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index 3fe6ce5b53e5..028943052926 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -109,7 +109,7 @@ int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
*bits = 1;
return 0; /* no symbols, but wait for decoding to report error */
}
- for (min = 1; min <= MAXBITS; min++)
+ for (min = 1; min < MAXBITS; min++)
if (count[min] != 0) break;
if (root < min) root = min;
diff --git a/mm/cma.c b/mm/cma.c
index a6033e344430..978b4a1441ef 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -53,6 +53,11 @@ unsigned long cma_get_size(const struct cma *cma)
return cma->count << PAGE_SHIFT;
}
+const char *cma_get_name(const struct cma *cma)
+{
+ return cma->name ? cma->name : "(undefined)";
+}
+
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
int align_order)
{
@@ -168,6 +173,7 @@ core_initcall(cma_init_reserved_areas);
*/
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma)
{
struct cma *cma;
@@ -198,6 +204,13 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
* subsystems (like slab allocator) are available.
*/
cma = &cma_areas[cma_area_count];
+ if (name) {
+ cma->name = name;
+ } else {
+ cma->name = kasprintf(GFP_KERNEL, "cma%d\n", cma_area_count);
+ if (!cma->name)
+ return -ENOMEM;
+ }
cma->base_pfn = PFN_DOWN(base);
cma->count = size >> PAGE_SHIFT;
cma->order_per_bit = order_per_bit;
@@ -229,7 +242,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma)
+ bool fixed, const char *name, struct cma **res_cma)
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
phys_addr_t highmem_start;
@@ -335,7 +348,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
base = addr;
}
- ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
+ ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
if (ret)
goto err;
@@ -491,3 +504,17 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
return true;
}
+
+int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
+{
+ int i;
+
+ for (i = 0; i < cma_area_count; i++) {
+ int ret = it(&cma_areas[i], data);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/mm/cma.h b/mm/cma.h
index 17c75a4246c8..49861286279d 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -11,6 +11,7 @@ struct cma {
struct hlist_head mem_head;
spinlock_t mem_head_lock;
#endif
+ const char *name;
};
extern struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index ffc0c3d0ae64..595b757bef72 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
char name[16];
int u32s;
- sprintf(name, "cma-%d", idx);
+ sprintf(name, "cma-%s", cma->name);
tmp = debugfs_create_dir(name, cma_debugfs_root);
diff --git a/mm/compaction.c b/mm/compaction.c
index 09c5282ebdd2..613c59e928cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -89,11 +89,6 @@ static void map_pages(struct list_head *list)
list_splice(&tmp_list, list);
}
-static inline bool migrate_async_suitable(int migratetype)
-{
- return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
-}
-
#ifdef CONFIG_COMPACTION
int PageMovable(struct page *page)
@@ -988,6 +983,22 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
+static bool suitable_migration_source(struct compact_control *cc,
+ struct page *page)
+{
+ int block_mt;
+
+ if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
+ return true;
+
+ block_mt = get_pageblock_migratetype(page);
+
+ if (cc->migratetype == MIGRATE_MOVABLE)
+ return is_migrate_movable(block_mt);
+ else
+ return block_mt == cc->migratetype;
+}
+
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct compact_control *cc,
struct page *page)
@@ -1007,7 +1018,7 @@ static bool suitable_migration_target(struct compact_control *cc,
return true;
/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
- if (migrate_async_suitable(get_pageblock_migratetype(page)))
+ if (is_migrate_movable(get_pageblock_migratetype(page)))
return true;
/* Otherwise skip the block */
@@ -1242,8 +1253,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* Async compaction is optimistic to see if the minimum amount
* of work satisfies the allocation.
*/
- if (cc->mode == MIGRATE_ASYNC &&
- !migrate_async_suitable(get_pageblock_migratetype(page)))
+ if (!suitable_migration_source(cc, page))
continue;
/* Perform the isolation */
@@ -1276,11 +1286,11 @@ static inline bool is_via_compact_memory(int order)
return order == -1;
}
-static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
- const int migratetype)
+static enum compact_result __compact_finished(struct zone *zone,
+ struct compact_control *cc)
{
unsigned int order;
- unsigned long watermark;
+ const int migratetype = cc->migratetype;
if (cc->contended || fatal_signal_pending(current))
return COMPACT_CONTENDED;
@@ -1308,12 +1318,16 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
if (is_via_compact_memory(cc->order))
return COMPACT_CONTINUE;
- /* Compaction run is not finished if the watermark is not met */
- watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
-
- if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
- cc->alloc_flags))
- return COMPACT_CONTINUE;
+ if (cc->finishing_block) {
+ /*
+ * We have finished the pageblock, but better check again that
+ * we really succeeded.
+ */
+ if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
+ cc->finishing_block = false;
+ else
+ return COMPACT_CONTINUE;
+ }
/* Direct compactor: Is a suitable page free? */
for (order = cc->order; order < MAX_ORDER; order++) {
@@ -1335,20 +1349,40 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
* other migratetype buddy lists.
*/
if (find_suitable_fallback(area, order, migratetype,
- true, &can_steal) != -1)
- return COMPACT_SUCCESS;
+ true, &can_steal) != -1) {
+
+ /* movable pages are OK in any pageblock */
+ if (migratetype == MIGRATE_MOVABLE)
+ return COMPACT_SUCCESS;
+
+ /*
+ * We are stealing for a non-movable allocation. Make
+ * sure we finish compacting the current pageblock
+ * first so it is as free as possible and we won't
+ * have to steal another one soon. This only applies
+ * to sync compaction, as async compaction operates
+ * on pageblocks of the same migratetype.
+ */
+ if (cc->mode == MIGRATE_ASYNC ||
+ IS_ALIGNED(cc->migrate_pfn,
+ pageblock_nr_pages)) {
+ return COMPACT_SUCCESS;
+ }
+
+ cc->finishing_block = true;
+ return COMPACT_CONTINUE;
+ }
}
return COMPACT_NO_SUITABLE_PAGE;
}
static enum compact_result compact_finished(struct zone *zone,
- struct compact_control *cc,
- const int migratetype)
+ struct compact_control *cc)
{
int ret;
- ret = __compact_finished(zone, cc, migratetype);
+ ret = __compact_finished(zone, cc);
trace_mm_compaction_finished(zone, cc->order, ret);
if (ret == COMPACT_NO_SUITABLE_PAGE)
ret = COMPACT_CONTINUE;
@@ -1481,9 +1515,9 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
enum compact_result ret;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
- const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
const bool sync = cc->mode != MIGRATE_ASYNC;
+ cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
cc->classzone_idx);
/* Compaction is likely to fail */
@@ -1533,8 +1567,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
migrate_prep_local();
- while ((ret = compact_finished(zone, cc, migratetype)) ==
- COMPACT_CONTINUE) {
+ while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
int err;
switch (isolate_migratepages(zone, cc)) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 681da61080bc..6f1be573a5e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2050,7 +2050,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
iocb->ki_pos += retval;
count -= retval;
}
- iov_iter_revert(iter, iov_iter_count(iter) - count);
+ iov_iter_revert(iter, count - iov_iter_count(iter));
/*
* Btrfs can have a short DIO read if we encounter
@@ -2791,12 +2791,6 @@ ssize_t generic_perform_write(struct file *file,
ssize_t written = 0;
unsigned int flags = 0;
- /*
- * Copies from kernel address space cannot fail (NFSD is a big user).
- */
- if (!iter_is_iovec(i))
- flags |= AOP_FLAG_UNINTERRUPTIBLE;
-
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index db77dcb38afd..72ebec18629c 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -200,10 +200,7 @@ struct frame_vector *frame_vector_create(unsigned int nr_frames)
* Avoid higher order allocations, use vmalloc instead. It should
* be rare anyway.
*/
- if (size <= PAGE_SIZE)
- vec = kmalloc(size, GFP_KERNEL);
- else
- vec = vmalloc(size);
+ vec = kvmalloc(size, GFP_KERNEL);
if (!vec)
return NULL;
vec->nr_allocated = nr_frames;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b787c4cfda0e..a84909cf20d3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -715,7 +715,8 @@ int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
}
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write)
+ pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
+ pgtable_t pgtable)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t entry;
@@ -729,6 +730,12 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
entry = pmd_mkyoung(pmd_mkdirty(entry));
entry = maybe_pmd_mkwrite(entry, vma);
}
+
+ if (pgtable) {
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ atomic_long_inc(&mm->nr_ptes);
+ }
+
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
spin_unlock(ptl);
@@ -738,6 +745,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, bool write)
{
pgprot_t pgprot = vma->vm_page_prot;
+ pgtable_t pgtable = NULL;
/*
* If we had pmd_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that
@@ -752,9 +760,15 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
+ if (arch_needs_pgtable_deposit()) {
+ pgtable = pte_alloc_one(vma->vm_mm, addr);
+ if (!pgtable)
+ return VM_FAULT_OOM;
+ }
+
track_pfn_insert(vma, &pgprot, pfn);
- insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
+ insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@@ -1611,12 +1625,13 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_dax(vma)) {
+ if (arch_needs_pgtable_deposit())
+ zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd))
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else if (is_huge_zero_pmd(orig_pmd)) {
- pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
- atomic_long_dec(&tlb->mm->nr_ptes);
+ zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
} else {
@@ -1625,10 +1640,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
if (PageAnon(page)) {
- pgtable_t pgtable;
- pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
- pte_free(tlb->mm, pgtable);
- atomic_long_dec(&tlb->mm->nr_ptes);
+ zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
} else {
if (arch_needs_pgtable_deposit())
diff --git a/mm/internal.h b/mm/internal.h
index 04d08ef91224..0e4f558412fb 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -183,6 +183,7 @@ extern int user_min_free_kbytes;
struct compact_control {
struct list_head freepages; /* List of free pages to migrate to */
struct list_head migratepages; /* List of pages being migrated */
+ struct zone *zone;
unsigned long nr_freepages; /* Number of isolated free pages */
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long total_migrate_scanned;
@@ -190,17 +191,18 @@ struct compact_control {
unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
+ const gfp_t gfp_mask; /* gfp mask of a direct compactor */
+ int order; /* order a direct compactor needs */
+ int migratetype; /* migratetype of direct compactor */
+ const unsigned int alloc_flags; /* alloc flags of a direct compactor */
+ const int classzone_idx; /* zone index of a direct compactor */
enum migrate_mode mode; /* Async or sync migration mode */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
bool ignore_block_suitable; /* Scan blocks considered unsuitable */
bool direct_compaction; /* False from kcompactd or /proc/... */
bool whole_zone; /* Whole zone should/has been scanned */
- int order; /* order a direct compactor needs */
- const gfp_t gfp_mask; /* gfp mask of a direct compactor */
- const unsigned int alloc_flags; /* alloc flags of a direct compactor */
- const int classzone_idx; /* zone index of a direct compactor */
- struct zone *zone;
bool contended; /* Signal lock or sched contention */
+ bool finishing_block; /* Finishing current pageblock */
};
unsigned long
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 9348d27088c1..c81549d5c833 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -413,7 +413,7 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
*size += sizeof(struct kasan_alloc_meta);
/* Add free meta. */
- if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
+ if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
cache->object_size < sizeof(struct kasan_free_meta)) {
cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta);
@@ -561,7 +561,7 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
/* RCU slabs could be legally used after free within the RCU period */
- if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return;
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
@@ -572,7 +572,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
s8 shadow_byte;
/* RCU slabs could be legally used after free within the RCU period */
- if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
return false;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
@@ -691,7 +691,7 @@ int kasan_module_alloc(void *addr, size_t size)
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ GFP_KERNEL | __GFP_ZERO,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0));
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
index 5bf191756a4a..2d5959c5f7c5 100644
--- a/mm/kmemcheck.c
+++ b/mm/kmemcheck.c
@@ -95,7 +95,7 @@ void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
{
/* TODO: RCU freeing is unsupported for now; hide false positives. */
- if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
+ if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
kmemcheck_mark_freed(object, size);
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 696f06d17c4e..b049c9b2dba8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -805,6 +805,18 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
}
/**
+ * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
+ * @base: the base phys addr of the region
+ * @size: the size of the region
+ *
+ * Return 0 on success, -errno on failure.
+ */
+int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
+{
+ return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
+}
+
+/**
* __next_reserved_mem_region - next function for for_each_reserved_region()
* @idx: pointer to u64 loop variable
* @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
@@ -1531,11 +1543,37 @@ void __init memblock_enforce_memory_limit(phys_addr_t limit)
(phys_addr_t)ULLONG_MAX);
}
+void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
+{
+ int start_rgn, end_rgn;
+ int i, ret;
+
+ if (!size)
+ return;
+
+ ret = memblock_isolate_range(&memblock.memory, base, size,
+ &start_rgn, &end_rgn);
+ if (ret)
+ return;
+
+ /* remove all the MAP regions */
+ for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
+ if (!memblock_is_nomap(&memblock.memory.regions[i]))
+ memblock_remove_region(&memblock.memory, i);
+
+ for (i = start_rgn - 1; i >= 0; i--)
+ if (!memblock_is_nomap(&memblock.memory.regions[i]))
+ memblock_remove_region(&memblock.memory, i);
+
+ /* truncate the reserved regions */
+ memblock_remove_range(&memblock.reserved, 0, base);
+ memblock_remove_range(&memblock.reserved,
+ base + size, (phys_addr_t)ULLONG_MAX);
+}
+
void __init memblock_mem_limit_remove_map(phys_addr_t limit)
{
- struct memblock_type *type = &memblock.memory;
phys_addr_t max_addr;
- int i, ret, start_rgn, end_rgn;
if (!limit)
return;
@@ -1546,19 +1584,7 @@ void __init memblock_mem_limit_remove_map(phys_addr_t limit)
if (max_addr == (phys_addr_t)ULLONG_MAX)
return;
- ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX,
- &start_rgn, &end_rgn);
- if (ret)
- return;
-
- /* remove all the MAP regions above the limit */
- for (i = end_rgn - 1; i >= start_rgn; i--) {
- if (!memblock_is_nomap(&type->regions[i]))
- memblock_remove_region(type, i);
- }
- /* truncate the reserved regions */
- memblock_remove_range(&memblock.reserved, max_addr,
- (phys_addr_t)ULLONG_MAX);
+ memblock_cap_memory_range(0, max_addr);
}
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index a7652acd2ab9..54ca54562928 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
/* global SRCU for all MMs */
-static struct srcu_struct srcu;
+DEFINE_STATIC_SRCU(srcu);
/*
* This function allows mmu_notifier::release callback to delay a call to
@@ -252,12 +252,6 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
BUG_ON(atomic_read(&mm->mm_users) <= 0);
- /*
- * Verify that mmu_notifier_init() already run and the global srcu is
- * initialized.
- */
- BUG_ON(!srcu.per_cpu_ref);
-
ret = -ENOMEM;
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
if (unlikely(!mmu_notifier_mm))
@@ -406,9 +400,3 @@ void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
-
-static int __init mmu_notifier_init(void)
-{
- return init_srcu_struct(&srcu);
-}
-subsys_initcall(mmu_notifier_init);
diff --git a/mm/nommu.c b/mm/nommu.c
index 2d131b97a851..fc184f597d59 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -237,12 +237,16 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
}
EXPORT_SYMBOL(__vmalloc);
+void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags)
+{
+ return __vmalloc(size, flags, PAGE_KERNEL);
+}
+
void *vmalloc_user(unsigned long size)
{
void *ret;
- ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
- PAGE_KERNEL);
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
if (ret) {
struct vm_area_struct *vma;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2359608d2568..143c1c25d680 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2352,10 +2352,16 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->nr_to_write <= 0)
return 0;
- if (mapping->a_ops->writepages)
- ret = mapping->a_ops->writepages(mapping, wbc);
- else
- ret = generic_writepages(mapping, wbc);
+ while (1) {
+ if (mapping->a_ops->writepages)
+ ret = mapping->a_ops->writepages(mapping, wbc);
+ else
+ ret = generic_writepages(mapping, wbc);
+ if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL))
+ break;
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ }
return ret;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c25de46c58f..f9e450c6b6e4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1832,9 +1832,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
+static int move_freepages(struct zone *zone,
struct page *start_page, struct page *end_page,
- int migratetype)
+ int migratetype, int *num_movable)
{
struct page *page;
unsigned int order;
@@ -1851,6 +1851,9 @@ int move_freepages(struct zone *zone,
VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif
+ if (num_movable)
+ *num_movable = 0;
+
for (page = start_page; page <= end_page;) {
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
@@ -1861,6 +1864,15 @@ int move_freepages(struct zone *zone,
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
if (!PageBuddy(page)) {
+ /*
+ * We assume that pages that could be isolated for
+ * migration are movable. But we don't actually try
+ * isolating, as that would be expensive.
+ */
+ if (num_movable &&
+ (PageLRU(page) || __PageMovable(page)))
+ (*num_movable)++;
+
page++;
continue;
}
@@ -1876,7 +1888,7 @@ int move_freepages(struct zone *zone,
}
int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype)
+ int migratetype, int *num_movable)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -1893,7 +1905,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
if (!zone_spans_pfn(zone, end_pfn))
return 0;
- return move_freepages(zone, start_page, end_page, migratetype);
+ return move_freepages(zone, start_page, end_page, migratetype,
+ num_movable);
}
static void change_pageblock_range(struct page *pageblock_page,
@@ -1943,28 +1956,79 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
/*
* This function implements actual steal behaviour. If order is large enough,
* we can steal whole pageblock. If not, we first move freepages in this
- * pageblock and check whether half of pages are moved or not. If half of
- * pages are moved, we can change migratetype of pageblock and permanently
- * use it's pages as requested migratetype in the future.
+ * pageblock to our migratetype and determine how many already-allocated pages
+ * are there in the pageblock with a compatible migratetype. If at least half
+ * of pages are free or compatible, we can change migratetype of the pageblock
+ * itself, so pages freed in the future will be put on the correct free list.
*/
static void steal_suitable_fallback(struct zone *zone, struct page *page,
- int start_type)
+ int start_type, bool whole_block)
{
unsigned int current_order = page_order(page);
- int pages;
+ struct free_area *area;
+ int free_pages, movable_pages, alike_pages;
+ int old_block_type;
+
+ old_block_type = get_pageblock_migratetype(page);
+
+ /*
+ * This can happen due to races and we want to prevent broken
+ * highatomic accounting.
+ */
+ if (is_migrate_highatomic(old_block_type))
+ goto single_page;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
change_pageblock_range(page, current_order, start_type);
- return;
+ goto single_page;
+ }
+
+ /* We are not allowed to try stealing from the whole block */
+ if (!whole_block)
+ goto single_page;
+
+ free_pages = move_freepages_block(zone, page, start_type,
+ &movable_pages);
+ /*
+ * Determine how many pages are compatible with our allocation.
+ * For movable allocation, it's the number of movable pages which
+ * we just obtained. For other types it's a bit more tricky.
+ */
+ if (start_type == MIGRATE_MOVABLE) {
+ alike_pages = movable_pages;
+ } else {
+ /*
+ * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
+ * to MOVABLE pageblock, consider all non-movable pages as
+ * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
+ * vice versa, be conservative since we can't distinguish the
+ * exact migratetype of non-movable pages.
+ */
+ if (old_block_type == MIGRATE_MOVABLE)
+ alike_pages = pageblock_nr_pages
+ - (free_pages + movable_pages);
+ else
+ alike_pages = 0;
}
- pages = move_freepages_block(zone, page, start_type);
+ /* moving whole block can fail due to zone boundary conditions */
+ if (!free_pages)
+ goto single_page;
- /* Claim the whole block if over half of it is free */
- if (pages >= (1 << (pageblock_order-1)) ||
+ /*
+ * If a sufficient number of pages in the block are either free or of
+ * comparable migratability as our allocation, claim the whole block.
+ */
+ if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled)
set_pageblock_migratetype(page, start_type);
+
+ return;
+
+single_page:
+ area = &zone->free_area[current_order];
+ list_move(&page->lru, &area->free_list[start_type]);
}
/*
@@ -2034,7 +2098,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
&& !is_migrate_cma(mt)) {
zone->nr_reserved_highatomic += pageblock_nr_pages;
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
- move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
+ move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
}
out_unlock:
@@ -2111,7 +2175,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* may increase.
*/
set_pageblock_migratetype(page, ac->migratetype);
- ret = move_freepages_block(zone, page, ac->migratetype);
+ ret = move_freepages_block(zone, page, ac->migratetype,
+ NULL);
if (ret) {
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
@@ -2123,8 +2188,13 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
return false;
}
-/* Remove an element from the buddy allocator from the fallback list */
-static inline struct page *
+/*
+ * Try finding a free buddy page on the fallback list and put it on the free
+ * list of requested migratetype, possibly along with other pages from the same
+ * block, depending on fragmentation avoidance heuristics. Returns true if
+ * fallback was found so that __rmqueue_smallest() can grab it.
+ */
+static inline bool
__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
{
struct free_area *area;
@@ -2145,32 +2215,17 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
page = list_first_entry(&area->free_list[fallback_mt],
struct page, lru);
- if (can_steal && !is_migrate_highatomic_page(page))
- steal_suitable_fallback(zone, page, start_migratetype);
-
- /* Remove the page from the freelists */
- area->nr_free--;
- list_del(&page->lru);
- rmv_page_order(page);
- expand(zone, page, order, current_order, area,
- start_migratetype);
- /*
- * The pcppage_migratetype may differ from pageblock's
- * migratetype depending on the decisions in
- * find_suitable_fallback(). This is OK as long as it does not
- * differ for MIGRATE_CMA pageblocks. Those can be used as
- * fallback only via special __rmqueue_cma_fallback() function
- */
- set_pcppage_migratetype(page, start_migratetype);
+ steal_suitable_fallback(zone, page, start_migratetype,
+ can_steal);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt);
- return page;
+ return true;
}
- return NULL;
+ return false;
}
/*
@@ -2182,13 +2237,14 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
{
struct page *page;
+retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
if (migratetype == MIGRATE_MOVABLE)
page = __rmqueue_cma_fallback(zone, order);
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ if (!page && __rmqueue_fallback(zone, order, migratetype))
+ goto retry;
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -3227,14 +3283,15 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
enum compact_priority prio, enum compact_result *compact_result)
{
struct page *page;
+ unsigned int noreclaim_flag;
if (!order)
return NULL;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
prio);
- current->flags &= ~PF_MEMALLOC;
+ memalloc_noreclaim_restore(noreclaim_flag);
if (*compact_result <= COMPACT_INACTIVE)
return NULL;
@@ -3381,12 +3438,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
{
struct reclaim_state reclaim_state;
int progress;
+ unsigned int noreclaim_flag;
cond_resched();
/* We now go into synchronous reclaim */
cpuset_memory_pressure_bump();
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
@@ -3396,7 +3454,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order,
current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
- current->flags &= ~PF_MEMALLOC;
+ memalloc_noreclaim_restore(noreclaim_flag);
cond_resched();
@@ -3609,6 +3667,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
+ const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
unsigned int alloc_flags;
unsigned long did_some_progress;
@@ -3676,12 +3735,17 @@ retry_cpuset:
/*
* For costly allocations, try direct compaction first, as it's likely
- * that we have enough base pages and don't need to reclaim. Don't try
- * that for allocations that are allowed to ignore watermarks, as the
- * ALLOC_NO_WATERMARKS attempt didn't yet happen.
+ * that we have enough base pages and don't need to reclaim. For non-
+ * movable high-order allocations, do that as well, as compaction will
+ * try prevent permanent fragmentation by migrating from blocks of the
+ * same migratetype.
+ * Don't try this for allocations that are allowed to ignore
+ * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
*/
- if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
- !gfp_pfmemalloc_allowed(gfp_mask)) {
+ if (can_direct_reclaim &&
+ (costly_order ||
+ (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
+ && !gfp_pfmemalloc_allowed(gfp_mask)) {
page = __alloc_pages_direct_compact(gfp_mask, order,
alloc_flags, ac,
INIT_COMPACT_PRIORITY,
@@ -3693,7 +3757,7 @@ retry_cpuset:
* Checks for costly allocations with __GFP_NORETRY, which
* includes THP page fault allocations
*/
- if (gfp_mask & __GFP_NORETRY) {
+ if (costly_order && (gfp_mask & __GFP_NORETRY)) {
/*
* If compaction is deferred for high-order allocations,
* it is because sync compaction recently failed. If
@@ -3774,7 +3838,7 @@ retry:
* Do not retry costly high order allocations unless they are
* __GFP_REPEAT
*/
- if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
+ if (costly_order && !(gfp_mask & __GFP_REPEAT))
goto nopage;
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7927bbb54a4e..5092e4ef00c8 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -66,7 +66,8 @@ out:
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++;
- nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
+ NULL);
__mod_zone_freepage_state(zone, -nr_pages, migratetype);
}
@@ -120,7 +121,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
* pageblock scanning for freepage moving.
*/
if (!isolated_page) {
- nr_pages = move_freepages_block(zone, page, migratetype);
+ nr_pages = move_freepages_block(zone, page, migratetype, NULL);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
}
set_pageblock_migratetype(page, migratetype);
diff --git a/mm/rmap.c b/mm/rmap.c
index 3ff241f714eb..d405f0e0ee96 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -430,7 +430,7 @@ static void anon_vma_ctor(void *data)
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
+ 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
SLAB_PANIC|SLAB_ACCOUNT);
@@ -481,7 +481,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
* If this page is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma:
- * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
+ * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt).
*/
if (!page_mapped(page)) {
diff --git a/mm/slab.c b/mm/slab.c
index 1880d482a0cb..2a31ee3c5814 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1728,7 +1728,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
freelist = page->freelist;
slab_destroy_debugcheck(cachep, page);
- if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
+ if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
call_rcu(&page->rcu_head, kmem_rcu_free);
else
kmem_freepages(cachep, page);
@@ -1924,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
cachep->num = 0;
- if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
+ if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
return false;
left = calculate_slab_order(cachep, size,
@@ -2030,7 +2030,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
- if (!(flags & SLAB_DESTROY_BY_RCU))
+ if (!(flags & SLAB_TYPESAFE_BY_RCU))
flags |= SLAB_POISON;
#endif
#endif
diff --git a/mm/slab.h b/mm/slab.h
index 65e7c3fcac72..9cfcf099709c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -126,7 +126,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
+ SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
@@ -415,7 +415,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* back there or track user information then we can
* only use the space before that information.
*/
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 09d0e849b07f..01a0fe2eb332 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -39,7 +39,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
* Set of flags that will prevent slab merging
*/
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
- SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
+ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
@@ -500,7 +500,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
struct kmem_cache *s, *s2;
/*
- * On destruction, SLAB_DESTROY_BY_RCU kmem_caches are put on the
+ * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
* @slab_caches_to_rcu_destroy list. The slab pages are freed
* through RCU and and the associated kmem_cache are dereferenced
* while freeing the pages, so the kmem_caches should be freed only
@@ -537,7 +537,7 @@ static int shutdown_cache(struct kmem_cache *s)
memcg_unlink_cache(s);
list_del(&s->list);
- if (s->flags & SLAB_DESTROY_BY_RCU) {
+ if (s->flags & SLAB_TYPESAFE_BY_RCU) {
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
diff --git a/mm/slob.c b/mm/slob.c
index eac04d4357ec..1bae78d71096 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -126,7 +126,7 @@ static inline void clear_slob_page_free(struct page *sp)
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
- * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
+ * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
* the block using call_rcu.
*/
struct slob_rcu {
@@ -524,7 +524,7 @@ EXPORT_SYMBOL(ksize);
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
- if (flags & SLAB_DESTROY_BY_RCU) {
+ if (flags & SLAB_TYPESAFE_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
@@ -598,7 +598,7 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
kmemleak_free_recursive(b, c->flags);
- if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
slob_rcu->size = c->size;
diff --git a/mm/slub.c b/mm/slub.c
index 7f4bc7027ed5..57e5156f02be 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1687,7 +1687,7 @@ static void rcu_free_slab(struct rcu_head *h)
static void free_slab(struct kmem_cache *s, struct page *page)
{
- if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
+ if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
struct rcu_head *head;
if (need_reserve_slab_rcu) {
@@ -2963,7 +2963,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
* slab_free_freelist_hook() could have put the items into quarantine.
* If so, no need to free them.
*/
- if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
+ if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
return;
do_slab_free(s, page, head, tail, cnt, addr);
}
@@ -3433,7 +3433,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* the slab may touch the object after free or before allocation
* then we should never poison the object itself.
*/
- if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
+ if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
!s->ctor)
s->flags |= __OBJECT_POISON;
else
@@ -3455,7 +3455,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
s->inuse = size;
- if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
+ if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
s->ctor)) {
/*
* Relocate free pointer after the object if it is not
@@ -3537,7 +3537,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
- if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+ if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
s->reserved = sizeof(struct rcu_head);
if (!calculate_sizes(s, -1))
@@ -5042,7 +5042,7 @@ SLAB_ATTR_RO(cache_dma);
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
}
SLAB_ATTR_RO(destroy_by_rcu);
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index aa1c415f4abd..58f6c78f1dad 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -31,6 +31,7 @@
#include <linux/cpumask.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
+#include <linux/mm.h>
#ifdef CONFIG_SWAP
@@ -119,16 +120,18 @@ static int alloc_swap_slot_cache(unsigned int cpu)
/*
* Do allocation outside swap_slots_cache_mutex
- * as vzalloc could trigger reclaim and get_swap_page,
+ * as kvzalloc could trigger reclaim and get_swap_page,
* which can lock swap_slots_cache_mutex.
*/
- slots = vzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE);
+ slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ GFP_KERNEL);
if (!slots)
return -ENOMEM;
- slots_ret = vzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE);
+ slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE,
+ GFP_KERNEL);
if (!slots_ret) {
- vfree(slots);
+ kvfree(slots);
return -ENOMEM;
}
@@ -152,9 +155,9 @@ static int alloc_swap_slot_cache(unsigned int cpu)
out:
mutex_unlock(&swap_slots_cache_mutex);
if (slots)
- vfree(slots);
+ kvfree(slots);
if (slots_ret)
- vfree(slots_ret);
+ kvfree(slots_ret);
return 0;
}
@@ -171,7 +174,7 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
cache->cur = 0;
cache->nr = 0;
if (free_slots && cache->slots) {
- vfree(cache->slots);
+ kvfree(cache->slots);
cache->slots = NULL;
}
mutex_unlock(&cache->alloc_lock);
@@ -186,7 +189,7 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
}
spin_unlock_irq(&cache->free_lock);
if (slots)
- vfree(slots);
+ kvfree(slots);
}
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7bfb9bd1ca21..539b8885e3d1 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -523,7 +523,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages)
unsigned int i, nr;
nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
- spaces = vzalloc(sizeof(struct address_space) * nr);
+ spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
if (!spaces)
return -ENOMEM;
for (i = 0; i < nr; i++) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b86b2aca3fb9..4f6cba1b6632 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2270,8 +2270,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;
vfree(swap_map);
- vfree(cluster_info);
- vfree(frontswap_map);
+ kvfree(cluster_info);
+ kvfree(frontswap_map);
/* Destroy swap account information */
swap_cgroup_swapoff(p->type);
exit_swap_address_space(p->type);
@@ -2794,7 +2794,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->cluster_next = 1 + (prandom_u32() % p->highest_bit);
nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
- cluster_info = vzalloc(nr_cluster * sizeof(*cluster_info));
+ cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info),
+ GFP_KERNEL);
if (!cluster_info) {
error = -ENOMEM;
goto bad_swap;
@@ -2827,7 +2828,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
/* frontswap enabled? set up bit-per-page map for frontswap */
if (IS_ENABLED(CONFIG_FRONTSWAP))
- frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
+ frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long),
+ GFP_KERNEL);
if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
/*
diff --git a/mm/util.c b/mm/util.c
index 656dc5e37a87..718154debc87 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -329,6 +329,63 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
+/**
+ * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
+ * failure, fall back to non-contiguous (vmalloc) allocation.
+ * @size: size of the request.
+ * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
+ * @node: numa node to allocate from
+ *
+ * Uses kmalloc to get the memory but if the allocation fails then falls back
+ * to the vmalloc allocator. Use kvfree for freeing the memory.
+ *
+ * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT
+ * is supported only for large (>32kB) allocations, and it should be used only if
+ * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks.
+ *
+ * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
+ */
+void *kvmalloc_node(size_t size, gfp_t flags, int node)
+{
+ gfp_t kmalloc_flags = flags;
+ void *ret;
+
+ /*
+ * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
+ * so the given set of flags has to be compatible.
+ */
+ WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
+
+ /*
+ * Make sure that larger requests are not too disruptive - no OOM
+ * killer and no allocation failure warnings as we have a fallback
+ */
+ if (size > PAGE_SIZE) {
+ kmalloc_flags |= __GFP_NOWARN;
+
+ /*
+ * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly
+ * requests because there is no other way to tell the allocator
+ * that we want to fail rather than retry endlessly.
+ */
+ if (!(kmalloc_flags & __GFP_REPEAT) ||
+ (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ kmalloc_flags |= __GFP_NORETRY;
+ }
+
+ ret = kmalloc_node(size, kmalloc_flags, node);
+
+ /*
+ * It doesn't really make sense to fallback to vmalloc for sub page
+ * requests
+ */
+ if (ret || size <= PAGE_SIZE)
+ return ret;
+
+ return __vmalloc_node_flags(size, node, flags);
+}
+EXPORT_SYMBOL(kvmalloc_node);
+
void kvfree(const void *addr)
{
if (is_vmalloc_addr(addr))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b52aeed3f58e..1dda6d8a200a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1649,16 +1649,13 @@ void *vmap(struct page **pages, unsigned int count,
}
EXPORT_SYMBOL(vmap);
-static void *__vmalloc_node(unsigned long size, unsigned long align,
- gfp_t gfp_mask, pgprot_t prot,
- int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
- const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
+ const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN;
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
@@ -1786,8 +1783,15 @@ fail:
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
+ *
+ * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT
+ * and __GFP_NOFAIL are not supported
+ *
+ * Any use of gfp flags outside of GFP_KERNEL should be consulted
+ * with mm people.
+ *
*/
-static void *__vmalloc_node(unsigned long size, unsigned long align,
+void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
int node, const void *caller)
{
@@ -1802,13 +1806,6 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
}
EXPORT_SYMBOL(__vmalloc);
-static inline void *__vmalloc_node_flags(unsigned long size,
- int node, gfp_t flags)
-{
- return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
- node, __builtin_return_address(0));
-}
-
/**
* vmalloc - allocate virtually contiguous memory
* @size: allocation size
@@ -1821,7 +1818,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
void *vmalloc(unsigned long size)
{
return __vmalloc_node_flags(size, NUMA_NO_NODE,
- GFP_KERNEL | __GFP_HIGHMEM);
+ GFP_KERNEL);
}
EXPORT_SYMBOL(vmalloc);
@@ -1838,7 +1835,7 @@ EXPORT_SYMBOL(vmalloc);
void *vzalloc(unsigned long size)
{
return __vmalloc_node_flags(size, NUMA_NO_NODE,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc);
@@ -1855,7 +1852,7 @@ void *vmalloc_user(unsigned long size)
void *ret;
ret = __vmalloc_node(size, SHMLBA,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ GFP_KERNEL | __GFP_ZERO,
PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
if (ret) {
@@ -1879,7 +1876,7 @@ EXPORT_SYMBOL(vmalloc_user);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+ return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
node, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc_node);
@@ -1899,7 +1896,7 @@ EXPORT_SYMBOL(vmalloc_node);
void *vzalloc_node(unsigned long size, int node)
{
return __vmalloc_node_flags(size, node,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ GFP_KERNEL | __GFP_ZERO);
}
EXPORT_SYMBOL(vzalloc_node);
@@ -1921,7 +1918,7 @@ EXPORT_SYMBOL(vzalloc_node);
void *vmalloc_exec(unsigned long size)
{
- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+ return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
NUMA_NO_NODE, __builtin_return_address(0));
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4e7ed65842af..2f45c0520f43 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3036,6 +3036,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
struct zonelist *zonelist;
unsigned long nr_reclaimed;
int nid;
+ unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
@@ -3062,9 +3063,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.gfp_mask,
sc.reclaim_idx);
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
- current->flags &= ~PF_MEMALLOC;
+ memalloc_noreclaim_restore(noreclaim_flag);
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
@@ -3589,8 +3590,9 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
struct task_struct *p = current;
unsigned long nr_reclaimed;
+ unsigned int noreclaim_flag;
- p->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
lockdep_set_current_reclaim_state(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -3599,7 +3601,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
p->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
- p->flags &= ~PF_MEMALLOC;
+ memalloc_noreclaim_restore(noreclaim_flag);
return nr_reclaimed;
}
@@ -3764,6 +3766,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
struct task_struct *p = current;
struct reclaim_state reclaim_state;
int classzone_idx = gfp_zone(gfp_mask);
+ unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = (gfp_mask = current_gfp_context(gfp_mask)),
@@ -3781,7 +3784,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
* and we also need to be able to write out pages for RECLAIM_WRITE
* and RECLAIM_UNMAP.
*/
- p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
+ noreclaim_flag = memalloc_noreclaim_save();
+ p->flags |= PF_SWAPWRITE;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -3797,7 +3801,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
}
p->reclaim_state = NULL;
- current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
+ current->flags &= ~PF_SWAPWRITE;
+ memalloc_noreclaim_restore(noreclaim_flag);
lockdep_clear_current_reclaim_state();
return sc.nr_reclaimed >= nr_pages;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9ee5787634e5..953b6728bd00 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -626,11 +626,18 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
netdev_features_t old_features = features;
+ netdev_features_t lower_features;
- features = netdev_intersect_features(features, real_dev->vlan_features);
- features |= NETIF_F_RXCSUM;
- features = netdev_intersect_features(features, real_dev->features);
+ lower_features = netdev_intersect_features((real_dev->vlan_features |
+ NETIF_F_RXCSUM),
+ real_dev->features);
+ /* Add HW_CSUM setting to preserve user ability to control
+ * checksum offload on the vlan device.
+ */
+ if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+ lower_features |= NETIF_F_HW_CSUM;
+ features = netdev_intersect_features(features, lower_features);
features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE);
features |= NETIF_F_LLTX;
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index a75174a33723..e6014e0e51f7 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -22,6 +22,15 @@ config NET_9P_VIRTIO
This builds support for a transports between
guest partitions and a host partition.
+config NET_9P_XEN
+ depends on XEN
+ select XEN_XENBUS_FRONTEND
+ tristate "9P Xen Transport"
+ help
+ This builds support for a transport for 9pfs between
+ two Xen domains.
+
+
config NET_9P_RDMA
depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS
tristate "9P RDMA Transport (Experimental)"
diff --git a/net/9p/Makefile b/net/9p/Makefile
index a0874cc1f718..697ea7caf466 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_NET_9P) := 9pnet.o
+obj-$(CONFIG_NET_9P_XEN) += 9pnet_xen.o
obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
@@ -14,5 +15,8 @@ obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
9pnet_virtio-objs := \
trans_virtio.o \
+9pnet_xen-objs := \
+ trans_xen.o \
+
9pnet_rdma-objs := \
trans_rdma.o \
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
new file mode 100644
index 000000000000..71e85643b3f9
--- /dev/null
+++ b/net/9p/trans_xen.c
@@ -0,0 +1,545 @@
+/*
+ * linux/fs/9p/trans_xen
+ *
+ * Xen transport layer.
+ *
+ * Copyright (C) 2017 by Stefano Stabellini <stefano@aporeto.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/9pfs.h>
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rwlock.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+#include <net/9p/transport.h>
+
+#define XEN_9PFS_NUM_RINGS 2
+#define XEN_9PFS_RING_ORDER 6
+#define XEN_9PFS_RING_SIZE XEN_FLEX_RING_SIZE(XEN_9PFS_RING_ORDER)
+
+struct xen_9pfs_header {
+ uint32_t size;
+ uint8_t id;
+ uint16_t tag;
+
+ /* uint8_t sdata[]; */
+} __attribute__((packed));
+
+/* One per ring, more than one per 9pfs share */
+struct xen_9pfs_dataring {
+ struct xen_9pfs_front_priv *priv;
+
+ struct xen_9pfs_data_intf *intf;
+ grant_ref_t ref;
+ int evtchn;
+ int irq;
+ /* protect a ring from concurrent accesses */
+ spinlock_t lock;
+
+ struct xen_9pfs_data data;
+ wait_queue_head_t wq;
+ struct work_struct work;
+};
+
+/* One per 9pfs share */
+struct xen_9pfs_front_priv {
+ struct list_head list;
+ struct xenbus_device *dev;
+ char *tag;
+ struct p9_client *client;
+
+ int num_rings;
+ struct xen_9pfs_dataring *rings;
+};
+
+static LIST_HEAD(xen_9pfs_devs);
+static DEFINE_RWLOCK(xen_9pfs_lock);
+
+/* We don't currently allow canceling of requests */
+static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
+{
+ return 1;
+}
+
+static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
+{
+ struct xen_9pfs_front_priv *priv;
+
+ read_lock(&xen_9pfs_lock);
+ list_for_each_entry(priv, &xen_9pfs_devs, list) {
+ if (!strcmp(priv->tag, addr)) {
+ priv->client = client;
+ read_unlock(&xen_9pfs_lock);
+ return 0;
+ }
+ }
+ read_unlock(&xen_9pfs_lock);
+ return -EINVAL;
+}
+
+static void p9_xen_close(struct p9_client *client)
+{
+ struct xen_9pfs_front_priv *priv;
+
+ read_lock(&xen_9pfs_lock);
+ list_for_each_entry(priv, &xen_9pfs_devs, list) {
+ if (priv->client == client) {
+ priv->client = NULL;
+ read_unlock(&xen_9pfs_lock);
+ return;
+ }
+ }
+ read_unlock(&xen_9pfs_lock);
+}
+
+static bool p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
+{
+ RING_IDX cons, prod;
+
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ virt_mb();
+
+ return XEN_9PFS_RING_SIZE -
+ xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size;
+}
+
+static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
+{
+ struct xen_9pfs_front_priv *priv = NULL;
+ RING_IDX cons, prod, masked_cons, masked_prod;
+ unsigned long flags;
+ u32 size = p9_req->tc->size;
+ struct xen_9pfs_dataring *ring;
+ int num;
+
+ read_lock(&xen_9pfs_lock);
+ list_for_each_entry(priv, &xen_9pfs_devs, list) {
+ if (priv->client == client)
+ break;
+ }
+ read_unlock(&xen_9pfs_lock);
+ if (!priv || priv->client != client)
+ return -EINVAL;
+
+ num = p9_req->tc->tag % priv->num_rings;
+ ring = &priv->rings[num];
+
+again:
+ while (wait_event_interruptible(ring->wq,
+ p9_xen_write_todo(ring, size)) != 0)
+ ;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ cons = ring->intf->out_cons;
+ prod = ring->intf->out_prod;
+ virt_mb();
+
+ if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons,
+ XEN_9PFS_RING_SIZE) < size) {
+ spin_unlock_irqrestore(&ring->lock, flags);
+ goto again;
+ }
+
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ xen_9pfs_write_packet(ring->data.out, p9_req->tc->sdata, size,
+ &masked_prod, masked_cons, XEN_9PFS_RING_SIZE);
+
+ p9_req->status = REQ_STATUS_SENT;
+ virt_wmb(); /* write ring before updating pointer */
+ prod += size;
+ ring->intf->out_prod = prod;
+ spin_unlock_irqrestore(&ring->lock, flags);
+ notify_remote_via_irq(ring->irq);
+
+ return 0;
+}
+
+static void p9_xen_response(struct work_struct *work)
+{
+ struct xen_9pfs_front_priv *priv;
+ struct xen_9pfs_dataring *ring;
+ RING_IDX cons, prod, masked_cons, masked_prod;
+ struct xen_9pfs_header h;
+ struct p9_req_t *req;
+ int status;
+
+ ring = container_of(work, struct xen_9pfs_dataring, work);
+ priv = ring->priv;
+
+ while (1) {
+ cons = ring->intf->in_cons;
+ prod = ring->intf->in_prod;
+ virt_rmb();
+
+ if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) <
+ sizeof(h)) {
+ notify_remote_via_irq(ring->irq);
+ return;
+ }
+
+ masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+ /* First, read just the header */
+ xen_9pfs_read_packet(&h, ring->data.in, sizeof(h),
+ masked_prod, &masked_cons,
+ XEN_9PFS_RING_SIZE);
+
+ req = p9_tag_lookup(priv->client, h.tag);
+ if (!req || req->status != REQ_STATUS_SENT) {
+ dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
+ cons += h.size;
+ virt_mb();
+ ring->intf->in_cons = cons;
+ continue;
+ }
+
+ memcpy(req->rc, &h, sizeof(h));
+ req->rc->offset = 0;
+
+ masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+ /* Then, read the whole packet (including the header) */
+ xen_9pfs_read_packet(req->rc->sdata, ring->data.in, h.size,
+ masked_prod, &masked_cons,
+ XEN_9PFS_RING_SIZE);
+
+ virt_mb();
+ cons += h.size;
+ ring->intf->in_cons = cons;
+
+ status = (req->status != REQ_STATUS_ERROR) ?
+ REQ_STATUS_RCVD : REQ_STATUS_ERROR;
+
+ p9_client_cb(priv->client, req, status);
+ }
+}
+
+static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
+{
+ struct xen_9pfs_dataring *ring = r;
+
+ if (!ring || !ring->priv->client) {
+ /* ignore spurious interrupt */
+ return IRQ_HANDLED;
+ }
+
+ wake_up_interruptible(&ring->wq);
+ schedule_work(&ring->work);
+
+ return IRQ_HANDLED;
+}
+
+static struct p9_trans_module p9_xen_trans = {
+ .name = "xen",
+ .maxsize = 1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT),
+ .def = 1,
+ .create = p9_xen_create,
+ .close = p9_xen_close,
+ .request = p9_xen_request,
+ .cancel = p9_xen_cancel,
+ .owner = THIS_MODULE,
+};
+
+static const struct xenbus_device_id xen_9pfs_front_ids[] = {
+ { "9pfs" },
+ { "" }
+};
+
+static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
+{
+ int i, j;
+
+ write_lock(&xen_9pfs_lock);
+ list_del(&priv->list);
+ write_unlock(&xen_9pfs_lock);
+
+ for (i = 0; i < priv->num_rings; i++) {
+ if (!priv->rings[i].intf)
+ break;
+ if (priv->rings[i].irq > 0)
+ unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
+ if (priv->rings[i].data.in) {
+ for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) {
+ grant_ref_t ref;
+
+ ref = priv->rings[i].intf->ref[j];
+ gnttab_end_foreign_access(ref, 0, 0);
+ }
+ free_pages((unsigned long)priv->rings[i].data.in,
+ XEN_9PFS_RING_ORDER -
+ (PAGE_SHIFT - XEN_PAGE_SHIFT));
+ }
+ gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
+ free_page((unsigned long)priv->rings[i].intf);
+ }
+ kfree(priv->rings);
+ kfree(priv->tag);
+ kfree(priv);
+}
+
+static int xen_9pfs_front_remove(struct xenbus_device *dev)
+{
+ struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
+
+ dev_set_drvdata(&dev->dev, NULL);
+ xen_9pfs_front_free(priv);
+ return 0;
+}
+
+static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
+ struct xen_9pfs_dataring *ring)
+{
+ int i = 0;
+ int ret = -ENOMEM;
+ void *bytes = NULL;
+
+ init_waitqueue_head(&ring->wq);
+ spin_lock_init(&ring->lock);
+ INIT_WORK(&ring->work, p9_xen_response);
+
+ ring->intf = (struct xen_9pfs_data_intf *)get_zeroed_page(GFP_KERNEL);
+ if (!ring->intf)
+ return ret;
+ ret = gnttab_grant_foreign_access(dev->otherend_id,
+ virt_to_gfn(ring->intf), 0);
+ if (ret < 0)
+ goto out;
+ ring->ref = ret;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ XEN_9PFS_RING_ORDER - (PAGE_SHIFT - XEN_PAGE_SHIFT));
+ if (!bytes) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (; i < (1 << XEN_9PFS_RING_ORDER); i++) {
+ ret = gnttab_grant_foreign_access(
+ dev->otherend_id, virt_to_gfn(bytes) + i, 0);
+ if (ret < 0)
+ goto out;
+ ring->intf->ref[i] = ret;
+ }
+ ring->intf->ring_order = XEN_9PFS_RING_ORDER;
+ ring->data.in = bytes;
+ ring->data.out = bytes + XEN_9PFS_RING_SIZE;
+
+ ret = xenbus_alloc_evtchn(dev, &ring->evtchn);
+ if (ret)
+ goto out;
+ ring->irq = bind_evtchn_to_irqhandler(ring->evtchn,
+ xen_9pfs_front_event_handler,
+ 0, "xen_9pfs-frontend", ring);
+ if (ring->irq >= 0)
+ return 0;
+
+ xenbus_free_evtchn(dev, ring->evtchn);
+ ret = ring->irq;
+out:
+ if (bytes) {
+ for (i--; i >= 0; i--)
+ gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
+ free_pages((unsigned long)bytes,
+ XEN_9PFS_RING_ORDER -
+ (PAGE_SHIFT - XEN_PAGE_SHIFT));
+ }
+ gnttab_end_foreign_access(ring->ref, 0, 0);
+ free_page((unsigned long)ring->intf);
+ return ret;
+}
+
+static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int ret, i;
+ struct xenbus_transaction xbt;
+ struct xen_9pfs_front_priv *priv = NULL;
+ char *versions;
+ unsigned int max_rings, max_ring_order, len = 0;
+
+ versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+ if (!len)
+ return -EINVAL;
+ if (strcmp(versions, "1")) {
+ kfree(versions);
+ return -EINVAL;
+ }
+ kfree(versions);
+ max_rings = xenbus_read_unsigned(dev->otherend, "max-rings", 0);
+ if (max_rings < XEN_9PFS_NUM_RINGS)
+ return -EINVAL;
+ max_ring_order = xenbus_read_unsigned(dev->otherend,
+ "max-ring-page-order", 0);
+ if (max_ring_order < XEN_9PFS_RING_ORDER)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->num_rings = XEN_9PFS_NUM_RINGS;
+ priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
+ GFP_KERNEL);
+ if (!priv->rings) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->num_rings; i++) {
+ priv->rings[i].priv = priv;
+ ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]);
+ if (ret < 0)
+ goto error;
+ }
+
+ again:
+ ret = xenbus_transaction_start(&xbt);
+ if (ret) {
+ xenbus_dev_fatal(dev, ret, "starting transaction");
+ goto error;
+ }
+ ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
+ priv->num_rings);
+ if (ret)
+ goto error_xenbus;
+ for (i = 0; i < priv->num_rings; i++) {
+ char str[16];
+
+ BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
+ sprintf(str, "ring-ref%u", i);
+ ret = xenbus_printf(xbt, dev->nodename, str, "%d",
+ priv->rings[i].ref);
+ if (ret)
+ goto error_xenbus;
+
+ sprintf(str, "event-channel-%u", i);
+ ret = xenbus_printf(xbt, dev->nodename, str, "%u",
+ priv->rings[i].evtchn);
+ if (ret)
+ goto error_xenbus;
+ }
+ priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
+ if (!priv->tag) {
+ ret = -EINVAL;
+ goto error_xenbus;
+ }
+ ret = xenbus_transaction_end(xbt, 0);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, ret, "completing transaction");
+ goto error;
+ }
+
+ write_lock(&xen_9pfs_lock);
+ list_add_tail(&priv->list, &xen_9pfs_devs);
+ write_unlock(&xen_9pfs_lock);
+ dev_set_drvdata(&dev->dev, priv);
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ error_xenbus:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, ret, "writing xenstore");
+ error:
+ dev_set_drvdata(&dev->dev, NULL);
+ xen_9pfs_front_free(priv);
+ return ret;
+}
+
+static int xen_9pfs_front_resume(struct xenbus_device *dev)
+{
+ dev_warn(&dev->dev, "suspsend/resume unsupported\n");
+ return 0;
+}
+
+static void xen_9pfs_front_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ break;
+
+ case XenbusStateInitWait:
+ break;
+
+ case XenbusStateConnected:
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct xenbus_driver xen_9pfs_front_driver = {
+ .ids = xen_9pfs_front_ids,
+ .probe = xen_9pfs_front_probe,
+ .remove = xen_9pfs_front_remove,
+ .resume = xen_9pfs_front_resume,
+ .otherend_changed = xen_9pfs_front_changed,
+};
+
+int p9_trans_xen_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("Initialising Xen transport for 9pfs\n");
+
+ v9fs_register_trans(&p9_xen_trans);
+ return xenbus_register_frontend(&xen_9pfs_front_driver);
+}
+module_init(p9_trans_xen_init);
+
+void p9_trans_xen_exit(void)
+{
+ v9fs_unregister_trans(&p9_xen_trans);
+ return xenbus_unregister_driver(&xen_9pfs_front_driver);
+}
+module_exit(p9_trans_xen_exit);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a572db710d4e..c5ce7745b230 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -133,6 +133,8 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
+ + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
@@ -633,6 +635,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
+ [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 4e0b0c359325..e0bb624c3845 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -9,6 +9,7 @@
*/
#include <linux/module.h>
#include <net/sock.h>
+#include "../br_private.h"
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
@@ -18,11 +19,30 @@ static unsigned int
ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_nat_info *info = par->targinfo;
+ struct net_device *dev;
if (!skb_make_writable(skb, 0))
return EBT_DROP;
ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
+
+ if (is_multicast_ether_addr(info->mac)) {
+ if (is_broadcast_ether_addr(info->mac))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ if (xt_hooknum(par) != NF_BR_BROUTING)
+ dev = br_port_get_rcu(xt_in(par))->br->dev;
+ else
+ dev = xt_in(par);
+
+ if (ether_addr_equal(info->mac, dev->dev_addr))
+ skb->pkt_type = PACKET_HOST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+
return info->target;
}
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 108533859a53..4fd02831beed 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -45,6 +45,17 @@ bool libceph_compatible(void *data)
}
EXPORT_SYMBOL(libceph_compatible);
+static int param_get_supported_features(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%llx", CEPH_FEATURES_SUPPORTED_DEFAULT);
+}
+static const struct kernel_param_ops param_ops_supported_features = {
+ .get = param_get_supported_features,
+};
+module_param_cb(supported_features, &param_ops_supported_features, NULL,
+ S_IRUGO);
+
/*
* find filename portion of a path (/foo/bar/baz -> baz)
*/
@@ -187,7 +198,7 @@ void *ceph_kvmalloc(size_t size, gfp_t flags)
return ptr;
}
- return __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size, flags, PAGE_KERNEL);
}
@@ -596,9 +607,7 @@ EXPORT_SYMBOL(ceph_client_gid);
/*
* create a fresh client instance
*/
-struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
- u64 supported_features,
- u64 required_features)
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
{
struct ceph_client *client;
struct ceph_entity_addr *myaddr = NULL;
@@ -615,14 +624,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
init_waitqueue_head(&client->auth_wq);
client->auth_err = 0;
- if (!ceph_test_opt(client, NOMSGAUTH))
- required_features |= CEPH_FEATURE_MSG_AUTH;
-
client->extra_mon_dispatch = NULL;
- client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
- supported_features;
- client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT |
- required_features;
+ client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
+ client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT;
+
+ if (!ceph_test_opt(client, NOMSGAUTH))
+ client->required_features |= CEPH_FEATURE_MSG_AUTH;
/* msgr */
if (ceph_test_opt(client, MYIP))
diff --git a/net/ceph/cls_lock_client.c b/net/ceph/cls_lock_client.c
index b9233b990399..08ada893f01e 100644
--- a/net/ceph/cls_lock_client.c
+++ b/net/ceph/cls_lock_client.c
@@ -179,6 +179,57 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
}
EXPORT_SYMBOL(ceph_cls_break_lock);
+int ceph_cls_set_cookie(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 type, char *old_cookie,
+ char *tag, char *new_cookie)
+{
+ int cookie_op_buf_size;
+ int name_len = strlen(lock_name);
+ int old_cookie_len = strlen(old_cookie);
+ int tag_len = strlen(tag);
+ int new_cookie_len = strlen(new_cookie);
+ void *p, *end;
+ struct page *cookie_op_page;
+ int ret;
+
+ cookie_op_buf_size = name_len + sizeof(__le32) +
+ old_cookie_len + sizeof(__le32) +
+ tag_len + sizeof(__le32) +
+ new_cookie_len + sizeof(__le32) +
+ sizeof(u8) + CEPH_ENCODING_START_BLK_LEN;
+ if (cookie_op_buf_size > PAGE_SIZE)
+ return -E2BIG;
+
+ cookie_op_page = alloc_page(GFP_NOIO);
+ if (!cookie_op_page)
+ return -ENOMEM;
+
+ p = page_address(cookie_op_page);
+ end = p + cookie_op_buf_size;
+
+ /* encode cls_lock_set_cookie_op struct */
+ ceph_start_encoding(&p, 1, 1,
+ cookie_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
+ ceph_encode_string(&p, end, lock_name, name_len);
+ ceph_encode_8(&p, type);
+ ceph_encode_string(&p, end, old_cookie, old_cookie_len);
+ ceph_encode_string(&p, end, tag, tag_len);
+ ceph_encode_string(&p, end, new_cookie, new_cookie_len);
+
+ dout("%s lock_name %s type %d old_cookie %s tag %s new_cookie %s\n",
+ __func__, lock_name, type, old_cookie, tag, new_cookie);
+ ret = ceph_osdc_call(osdc, oid, oloc, "lock", "set_cookie",
+ CEPH_OSD_FLAG_WRITE, cookie_op_page,
+ cookie_op_buf_size, NULL, NULL);
+
+ dout("%s: status %d\n", __func__, ret);
+ __free_page(cookie_op_page);
+ return ret;
+}
+EXPORT_SYMBOL(ceph_cls_set_cookie);
+
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers)
{
int i;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index c62b2b029a6e..71ba13927b3d 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -62,7 +62,8 @@ static int osdmap_show(struct seq_file *s, void *p)
return 0;
down_read(&osdc->lock);
- seq_printf(s, "epoch %d flags 0x%x\n", map->epoch, map->flags);
+ seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch,
+ osdc->epoch_barrier, map->flags);
for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
struct ceph_pg_pool_info *pi =
@@ -177,9 +178,7 @@ static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
seq_printf(s, "%llu\t", req->r_tid);
dump_target(s, &req->r_t);
- seq_printf(s, "\t%d\t%u'%llu", req->r_attempts,
- le32_to_cpu(req->r_replay_version.epoch),
- le64_to_cpu(req->r_replay_version.version));
+ seq_printf(s, "\t%d", req->r_attempts);
for (i = 0; i < req->r_num_ops; i++) {
struct ceph_osd_req_op *op = &req->r_ops[i];
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f76bb3332613..5766a6c896c4 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1386,8 +1386,9 @@ static void prepare_write_keepalive(struct ceph_connection *con)
dout("prepare_write_keepalive %p\n", con);
con_out_kvec_reset(con);
if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
- struct timespec now = CURRENT_TIME;
+ struct timespec now;
+ ktime_get_real_ts(&now);
con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
ceph_encode_timespec(&con->out_temp_keepalive2, &now);
con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
@@ -3176,8 +3177,9 @@ bool ceph_con_keepalive_expired(struct ceph_connection *con,
{
if (interval > 0 &&
(con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
- struct timespec now = CURRENT_TIME;
+ struct timespec now;
struct timespec ts;
+ ktime_get_real_ts(&now);
jiffies_to_timespec(interval, &ts);
ts = timespec_add(con->last_keepalive_ack, ts);
return timespec_compare(&now, &ts) >= 0;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e15ea9e4c495..924f07c36ddb 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -961,6 +961,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
truncate_size, truncate_seq);
}
+ req->r_abort_on_full = true;
req->r_flags = flags;
req->r_base_oloc.pool = layout->pool_id;
req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
@@ -1005,7 +1006,7 @@ static bool osd_registered(struct ceph_osd *osd)
*/
static void osd_init(struct ceph_osd *osd)
{
- atomic_set(&osd->o_ref, 1);
+ refcount_set(&osd->o_ref, 1);
RB_CLEAR_NODE(&osd->o_node);
osd->o_requests = RB_ROOT;
osd->o_linger_requests = RB_ROOT;
@@ -1050,9 +1051,9 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
- if (atomic_inc_not_zero(&osd->o_ref)) {
- dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
- atomic_read(&osd->o_ref));
+ if (refcount_inc_not_zero(&osd->o_ref)) {
+ dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
+ refcount_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
@@ -1062,9 +1063,9 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd)
static void put_osd(struct ceph_osd *osd)
{
- dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
- atomic_read(&osd->o_ref) - 1);
- if (atomic_dec_and_test(&osd->o_ref)) {
+ dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
+ refcount_read(&osd->o_ref) - 1);
+ if (refcount_dec_and_test(&osd->o_ref)) {
osd_cleanup(osd);
kfree(osd);
}
@@ -1297,8 +1298,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
__pool_full(pi);
WARN_ON(pi->id != t->base_oloc.pool);
- return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
- (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
+ return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
+ ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
+ (osdc->osdmap->epoch < osdc->epoch_barrier);
}
enum calc_target_result {
@@ -1503,9 +1505,10 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
ceph_encode_32(&p, req->r_flags);
ceph_encode_timespec(p, &req->r_mtime);
p += sizeof(struct ceph_timespec);
- /* aka reassert_version */
- memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
- p += sizeof(req->r_replay_version);
+
+ /* reassert_version */
+ memset(p, 0, sizeof(struct ceph_eversion));
+ p += sizeof(struct ceph_eversion);
/* oloc */
ceph_start_encoding(&p, 5, 4,
@@ -1626,6 +1629,7 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
ceph_monc_renew_subs(&osdc->client->monc);
}
+static void complete_request(struct ceph_osd_request *req, int err);
static void send_map_check(struct ceph_osd_request *req);
static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -1635,6 +1639,7 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
enum calc_target_result ct_res;
bool need_send = false;
bool promoted = false;
+ bool need_abort = false;
WARN_ON(req->r_tid);
dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
@@ -1650,8 +1655,13 @@ again:
goto promote;
}
- if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
- ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
+ if (osdc->osdmap->epoch < osdc->epoch_barrier) {
+ dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
+ osdc->epoch_barrier);
+ req->r_t.paused = true;
+ maybe_request_map(osdc);
+ } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+ ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
dout("req %p pausewr\n", req);
req->r_t.paused = true;
maybe_request_map(osdc);
@@ -1669,6 +1679,8 @@ again:
pr_warn_ratelimited("FULL or reached pool quota\n");
req->r_t.paused = true;
maybe_request_map(osdc);
+ if (req->r_abort_on_full)
+ need_abort = true;
} else if (!osd_homeless(osd)) {
need_send = true;
} else {
@@ -1685,6 +1697,8 @@ again:
link_request(osd, req);
if (need_send)
send_request(req);
+ else if (need_abort)
+ complete_request(req, -ENOSPC);
mutex_unlock(&osd->lock);
if (ct_res == CALC_TARGET_POOL_DNE)
@@ -1799,6 +1813,97 @@ static void abort_request(struct ceph_osd_request *req, int err)
complete_request(req, err);
}
+static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
+{
+ if (likely(eb > osdc->epoch_barrier)) {
+ dout("updating epoch_barrier from %u to %u\n",
+ osdc->epoch_barrier, eb);
+ osdc->epoch_barrier = eb;
+ /* Request map if we're not to the barrier yet */
+ if (eb > osdc->osdmap->epoch)
+ maybe_request_map(osdc);
+ }
+}
+
+void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
+{
+ down_read(&osdc->lock);
+ if (unlikely(eb > osdc->epoch_barrier)) {
+ up_read(&osdc->lock);
+ down_write(&osdc->lock);
+ update_epoch_barrier(osdc, eb);
+ up_write(&osdc->lock);
+ } else {
+ up_read(&osdc->lock);
+ }
+}
+EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
+
+/*
+ * Drop all pending requests that are stalled waiting on a full condition to
+ * clear, and complete them with ENOSPC as the return code. Set the
+ * osdc->epoch_barrier to the latest map epoch that we've seen if any were
+ * cancelled.
+ */
+static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
+{
+ struct rb_node *n;
+ bool victims = false;
+
+ dout("enter abort_on_full\n");
+
+ if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
+ goto out;
+
+ /* Scan list and see if there is anything to abort */
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+ struct rb_node *m;
+
+ m = rb_first(&osd->o_requests);
+ while (m) {
+ struct ceph_osd_request *req = rb_entry(m,
+ struct ceph_osd_request, r_node);
+ m = rb_next(m);
+
+ if (req->r_abort_on_full) {
+ victims = true;
+ break;
+ }
+ }
+ if (victims)
+ break;
+ }
+
+ if (!victims)
+ goto out;
+
+ /*
+ * Update the barrier to current epoch if it's behind that point,
+ * since we know we have some calls to be aborted in the tree.
+ */
+ update_epoch_barrier(osdc, osdc->osdmap->epoch);
+
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+ struct rb_node *m;
+
+ m = rb_first(&osd->o_requests);
+ while (m) {
+ struct ceph_osd_request *req = rb_entry(m,
+ struct ceph_osd_request, r_node);
+ m = rb_next(m);
+
+ if (req->r_abort_on_full &&
+ (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+ pool_full(osdc, req->r_t.target_oloc.pool)))
+ abort_request(req, -ENOSPC);
+ }
+ }
+out:
+ dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
+}
+
static void check_pool_dne(struct ceph_osd_request *req)
{
struct ceph_osd_client *osdc = req->r_osdc;
@@ -3252,11 +3357,13 @@ done:
pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
have_pool_full(osdc);
- if (was_pauserd || was_pausewr || pauserd || pausewr)
+ if (was_pauserd || was_pausewr || pauserd || pausewr ||
+ osdc->osdmap->epoch < osdc->epoch_barrier)
maybe_request_map(osdc);
kick_requests(osdc, &need_resend, &need_resend_linger);
+ ceph_osdc_abort_on_full(osdc);
ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
osdc->osdmap->epoch);
up_write(&osdc->lock);
@@ -3574,7 +3681,7 @@ ceph_osdc_watch(struct ceph_osd_client *osdc,
ceph_oid_copy(&lreq->t.base_oid, oid);
ceph_oloc_copy(&lreq->t.base_oloc, oloc);
lreq->t.flags = CEPH_OSD_FLAG_WRITE;
- lreq->mtime = CURRENT_TIME;
+ ktime_get_real_ts(&lreq->mtime);
lreq->reg_req = alloc_linger_request(lreq);
if (!lreq->reg_req) {
@@ -3632,7 +3739,7 @@ int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
req->r_flags = CEPH_OSD_FLAG_WRITE;
- req->r_mtime = CURRENT_TIME;
+ ktime_get_real_ts(&req->r_mtime);
osd_req_op_watch_init(req, 0, lreq->linger_id,
CEPH_OSD_WATCH_OP_UNWATCH);
@@ -4126,7 +4233,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
close_osd(osd);
}
up_write(&osdc->lock);
- WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
+ WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
osd_cleanup(&osdc->homeless_osd);
WARN_ON(!list_empty(&osdc->osd_lru));
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 6864007e64fc..ce09f73be759 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -16,7 +16,7 @@ static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
void ceph_pagelist_release(struct ceph_pagelist *pl)
{
- if (!atomic_dec_and_test(&pl->refcnt))
+ if (!refcount_dec_and_test(&pl->refcnt))
return;
ceph_pagelist_unmap_tail(pl);
while (!list_empty(&pl->head)) {
diff --git a/net/ceph/snapshot.c b/net/ceph/snapshot.c
index 705414e78ae0..e14a5d038656 100644
--- a/net/ceph/snapshot.c
+++ b/net/ceph/snapshot.c
@@ -49,7 +49,7 @@ struct ceph_snap_context *ceph_create_snap_context(u32 snap_count,
if (!snapc)
return NULL;
- atomic_set(&snapc->nref, 1);
+ refcount_set(&snapc->nref, 1);
snapc->num_snaps = snap_count;
return snapc;
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(ceph_create_snap_context);
struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc)
{
if (sc)
- atomic_inc(&sc->nref);
+ refcount_inc(&sc->nref);
return sc;
}
EXPORT_SYMBOL(ceph_get_snap_context);
@@ -68,7 +68,7 @@ void ceph_put_snap_context(struct ceph_snap_context *sc)
{
if (!sc)
return;
- if (atomic_dec_and_test(&sc->nref)) {
+ if (refcount_dec_and_test(&sc->nref)) {
/*printk(" deleting snap_context %p\n", sc);*/
kfree(sc);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index d07aa5ffb511..96cf83da0d66 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -81,6 +81,7 @@
#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/mm.h>
@@ -4235,7 +4236,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
int ret;
if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
- unsigned long pflags = current->flags;
+ unsigned int noreclaim_flag;
/*
* PFMEMALLOC skbs are special, they should
@@ -4246,9 +4247,9 @@ static int __netif_receive_skb(struct sk_buff *skb)
* Use PF_MEMALLOC as this saves us from propagating the allocation
* context down to all allocation sites.
*/
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
ret = __netif_receive_skb_core(skb, true);
- current_restore_flags(pflags, PF_MEMALLOC);
+ memalloc_noreclaim_restore(noreclaim_flag);
} else
ret = __netif_receive_skb_core(skb, false);
@@ -7264,12 +7265,10 @@ static int netif_alloc_rx_queues(struct net_device *dev)
BUG_ON(count < 1);
- rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
- if (!rx) {
- rx = vzalloc(sz);
- if (!rx)
- return -ENOMEM;
- }
+ rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
+ if (!rx)
+ return -ENOMEM;
+
dev->_rx = rx;
for (i = 0; i < count; i++)
@@ -7306,12 +7305,10 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
if (count < 1 || count > 0xffff)
return -EINVAL;
- tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
- if (!tx) {
- tx = vzalloc(sz);
- if (!tx)
- return -ENOMEM;
- }
+ tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT);
+ if (!tx)
+ return -ENOMEM;
+
dev->_tx = tx;
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
@@ -7845,9 +7842,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
/* ensure 32-byte alignment of whole construct */
alloc_size += NETDEV_ALIGN - 1;
- p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
- if (!p)
- p = vzalloc(alloc_size);
+ p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT);
if (!p)
return NULL;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 6e67315ec368..bcb0f610ee42 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1054,7 +1054,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
return err;
}
- if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+ if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
return -EMSGSIZE;
return 0;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 6bd2f8fb0476..ae35cce3a40d 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -24,9 +24,13 @@ static siphash_key_t ts_secret __read_mostly;
static __always_inline void net_secret_init(void)
{
- net_get_random_once(&ts_secret, sizeof(ts_secret));
net_get_random_once(&net_secret, sizeof(net_secret));
}
+
+static __always_inline void ts_secret_init(void)
+{
+ net_get_random_once(&ts_secret, sizeof(ts_secret));
+}
#endif
#ifdef CONFIG_INET
@@ -47,7 +51,7 @@ static u32 seq_scale(u32 seq)
#endif
#if IS_ENABLED(CONFIG_IPV6)
-static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
{
const struct {
struct in6_addr saddr;
@@ -60,12 +64,14 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
if (sysctl_tcp_timestamps != 1)
return 0;
+ ts_secret_init();
return siphash(&combined, offsetofend(typeof(combined), daddr),
&ts_secret);
}
+EXPORT_SYMBOL(secure_tcpv6_ts_off);
-u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport, u32 *tsoff)
+u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport)
{
const struct {
struct in6_addr saddr;
@@ -78,14 +84,14 @@ u32 secure_tcpv6_seq_and_tsoff(const __be32 *saddr, const __be32 *daddr,
.sport = sport,
.dport = dport
};
- u64 hash;
+ u32 hash;
+
net_secret_init();
hash = siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
- *tsoff = secure_tcpv6_ts_off(saddr, daddr);
return seq_scale(hash);
}
-EXPORT_SYMBOL(secure_tcpv6_seq_and_tsoff);
+EXPORT_SYMBOL(secure_tcpv6_seq);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport)
@@ -107,11 +113,12 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#endif
#ifdef CONFIG_INET
-static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
{
if (sysctl_tcp_timestamps != 1)
return 0;
+ ts_secret_init();
return siphash_2u32((__force u32)saddr, (__force u32)daddr,
&ts_secret);
}
@@ -121,15 +128,15 @@ static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
* it would be easy enough to have the former function use siphash_4u32, passing
* the arguments as separate u32.
*/
-u32 secure_tcp_seq_and_tsoff(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport, u32 *tsoff)
+u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
{
- u64 hash;
+ u32 hash;
+
net_secret_init();
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u32)sport << 16 | (__force u32)dport,
&net_secret);
- *tsoff = secure_tcp_ts_off(saddr, daddr);
return seq_scale(hash);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index b5baeb9cb0fb..79c6aee6af9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -102,6 +102,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
@@ -372,14 +373,14 @@ EXPORT_SYMBOL_GPL(sk_clear_memalloc);
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int ret;
- unsigned long pflags = current->flags;
+ unsigned int noreclaim_flag;
/* these should have been dropped before queueing */
BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
ret = sk->sk_backlog_rcv(sk, skb);
- current_restore_flags(pflags, PF_MEMALLOC);
+ memalloc_noreclaim_restore(noreclaim_flag);
return ret;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b99168b0fabf..f75482bdee9a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -951,7 +951,7 @@ static struct proto dccp_v4_prot = {
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock),
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
.rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d9b6a4e403e7..840f14aaa016 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1014,7 +1014,7 @@ static struct proto dccp_v6_prot = {
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
.rsk_prot = &dccp6_request_sock_ops,
.twsk_prot = &dccp6_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9afa2a5030b2..405483a07efc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2361,7 +2361,8 @@ MODULE_AUTHOR("Linux DECnet Project Team");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_DECnet);
-static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
+static const char banner[] __initconst = KERN_INFO
+"NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
static int __init decnet_init(void)
{
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 482730cd8a56..eeb5fc561f80 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -110,7 +110,7 @@ struct neigh_table dn_neigh_table = {
static int dn_neigh_construct(struct neighbour *neigh)
{
struct net_device *dev = neigh->dev;
- struct dn_neigh *dn = (struct dn_neigh *)neigh;
+ struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
struct dn_dev *dn_db;
struct neigh_parms *parms;
@@ -339,7 +339,7 @@ int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
struct dn_route *rt = (struct dn_route *) dst;
struct neighbour *neigh = rt->n;
- struct dn_neigh *dn = (struct dn_neigh *)neigh;
+ struct dn_neigh *dn = container_of(neigh, struct dn_neigh, n);
struct dn_dev *dn_db;
bool use_long;
@@ -391,7 +391,7 @@ int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
- dn = (struct dn_neigh *)neigh;
+ dn = container_of(neigh, struct dn_neigh, n);
if (neigh) {
write_lock(&neigh->lock);
@@ -451,7 +451,7 @@ int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb
neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
- dn = (struct dn_neigh *)neigh;
+ dn = container_of(neigh, struct dn_neigh, n);
if (neigh) {
write_lock(&neigh->lock);
@@ -510,7 +510,7 @@ static void neigh_elist_cb(struct neighbour *neigh, void *_info)
if (neigh->dev != s->dev)
return;
- dn = (struct dn_neigh *) neigh;
+ dn = container_of(neigh, struct dn_neigh, n);
if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
return;
@@ -549,7 +549,7 @@ int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
static inline void dn_neigh_format_entry(struct seq_file *seq,
struct neighbour *n)
{
- struct dn_neigh *dn = (struct dn_neigh *) n;
+ struct dn_neigh *dn = container_of(n, struct dn_neigh, n);
char buf[DN_ASCBUF_LEN];
read_lock(&n->lock);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 5e313c1ac94f..1054d330bf9d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -794,6 +794,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
/* listeners have SOCK_RCU_FREE, not the children */
sock_reset_flag(newsk, SOCK_RCU_FREE);
+ inet_sk(newsk)->mc_list = NULL;
+
newsk->sk_mark = inet_rsk(req)->ir_mark;
atomic64_set(&newsk->sk_cookie,
atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8bea74298173..e9a59d2d91d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -678,11 +678,7 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
/* no more locks than number of hash buckets */
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
- hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
- GFP_KERNEL | __GFP_NOWARN);
- if (!hashinfo->ehash_locks)
- hashinfo->ehash_locks = vmalloc(nblocks * locksz);
-
+ hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
if (!hashinfo->ehash_locks)
return -ENOMEM;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 40977413fd48..4ec9affb2252 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -546,12 +546,13 @@ static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm *p = &t->parms;
- nla_put_u32(skb, IFLA_VTI_LINK, p->link);
- nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
- nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
- nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr);
- nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr);
- nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark);
+ if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
+ nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
+ nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
+ nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
+ nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
+ nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
+ return -EMSGSIZE;
return 0;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9d943974de2b..bdffad875691 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -358,6 +358,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct iphdr))
+ return -EINVAL;
+
if (flags&MSG_PROBE)
goto out;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 496b97e17aaf..0257d965f111 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -16,6 +16,7 @@
#include <linux/siphash.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <net/secure_seq.h>
#include <net/tcp.h>
#include <net/route.h>
@@ -203,7 +204,7 @@ EXPORT_SYMBOL_GPL(__cookie_v4_check);
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst)
+ struct dst_entry *dst, u32 tsoff)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
@@ -213,6 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
NULL, &own_req);
if (child) {
atomic_set(&req->rsk_refcnt, 1);
+ tcp_sk(child)->tsoffset = tsoff;
sock_rps_save_rxhash(child, skb);
inet_csk_reqsk_queue_add(sk, req, child);
} else {
@@ -292,6 +294,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
struct rtable *rt;
__u8 rcv_wscale;
struct flowi4 fl4;
+ u32 tsoff = 0;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
@@ -311,6 +314,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+ tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
if (!cookie_timestamp_decode(&tcp_opt))
goto out;
@@ -381,7 +389,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
- ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
+ ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9739962bfb3f..5a3ad09e2786 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,6 @@ int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
-EXPORT_SYMBOL(sysctl_tcp_timestamps);
/* rfc5961 challenge ack rate limiting */
int sysctl_tcp_challenge_ack_limit = 1000;
@@ -6347,8 +6346,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
- if (isn && tmp_opt.tstamp_ok)
- af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
+ if (tmp_opt.tstamp_ok)
+ tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb);
if (!want_cookie && !isn) {
/* Kill the following clause, if you dislike this way. */
@@ -6368,7 +6367,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_release;
}
- isn = af_ops->init_seq_tsoff(skb, &tcp_rsk(req)->ts_off);
+ isn = af_ops->init_seq(skb);
}
if (!dst) {
dst = af_ops->route_req(sk, &fl, req);
@@ -6380,7 +6379,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (want_cookie) {
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
- tcp_rsk(req)->ts_off = 0;
req->cookie_ts = tmp_opt.tstamp_ok;
if (!tmp_opt.tstamp_ok)
inet_rsk(req)->ecn_ok = 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cbbafe546c0f..5ab2aac5ca19 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,12 +94,18 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static u32 tcp_v4_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v4_init_seq(const struct sk_buff *skb)
{
- return secure_tcp_seq_and_tsoff(ip_hdr(skb)->daddr,
- ip_hdr(skb)->saddr,
- tcp_hdr(skb)->dest,
- tcp_hdr(skb)->source, tsoff);
+ return secure_tcp_seq(ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr,
+ tcp_hdr(skb)->dest,
+ tcp_hdr(skb)->source);
+}
+
+static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
+{
+ return secure_tcp_ts_off(ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr);
}
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -145,7 +151,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct flowi4 *fl4;
struct rtable *rt;
int err;
- u32 seq;
struct ip_options_rcu *inet_opt;
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
@@ -232,13 +237,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
rt = NULL;
if (likely(!tp->repair)) {
- seq = secure_tcp_seq_and_tsoff(inet->inet_saddr,
- inet->inet_daddr,
- inet->inet_sport,
- usin->sin_port,
- &tp->tsoffset);
if (!tp->write_seq)
- tp->write_seq = seq;
+ tp->write_seq = secure_tcp_seq(inet->inet_saddr,
+ inet->inet_daddr,
+ inet->inet_sport,
+ usin->sin_port);
+ tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
+ inet->inet_daddr);
}
inet->inet_id = tp->write_seq ^ jiffies;
@@ -1239,7 +1244,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.cookie_init_seq = cookie_v4_init_sequence,
#endif
.route_req = tcp_v4_route_req,
- .init_seq_tsoff = tcp_v4_init_seq_and_tsoff,
+ .init_seq = tcp_v4_init_seq,
+ .init_ts_off = tcp_v4_init_ts_off,
.send_synack = tcp_v4_send_synack,
};
@@ -2389,7 +2395,7 @@ struct proto tcp_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
.twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 9d0d4f39e42b..653bbd67e3a3 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -1011,10 +1011,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
tcp_metrics_hash_log = order_base_2(slots);
size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
- tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!tcp_metrics_hash)
- tcp_metrics_hash = vzalloc(size);
-
+ tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
if (!tcp_metrics_hash)
return -ENOMEM;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 8f6373b0cd77..717be4de5324 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -523,6 +523,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
+ newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 60111a0fc201..4858e190f6ac 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1514,6 +1514,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
/* Track the maximum number of outstanding packets in each
@@ -1536,7 +1537,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle &&
- (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
+ (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
+ !ca_ops->cong_control)
tcp_cwnd_application_limited(sk);
/* The following conditions together indicate the starvation
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b09ac38d8dc4..8d297a79b568 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3328,7 +3328,8 @@ static int fixup_permanent_addr(struct inet6_dev *idev,
idev->dev, 0, 0);
}
- addrconf_dad_start(ifp);
+ if (ifp->state == INET6_IFADDR_STATE_PREDAD)
+ addrconf_dad_start(ifp);
return 0;
}
@@ -3547,6 +3548,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
+ .priority = ADDRCONF_NOTIFY_PRIORITY,
};
static void addrconf_type_change(struct net_device *dev, unsigned long event)
@@ -3683,7 +3685,7 @@ restart:
if (keep) {
/* set state to skip the notifier below */
state = INET6_IFADDR_STATE_DEAD;
- ifa->state = 0;
+ ifa->state = INET6_IFADDR_STATE_PREDAD;
if (!(ifa->flags & IFA_F_NODAD))
ifa->flags |= IFA_F_TENTATIVE;
@@ -6572,6 +6574,8 @@ int __init addrconf_init(void)
goto errlo;
}
+ ip6_route_init_special_entries();
+
for (i = 0; i < IN6_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet6_addr_lst[i]);
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index af8f52ee7180..2fd5ca151dcf 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -41,13 +41,7 @@ static int alloc_ila_locks(struct ila_net *ilan)
size = roundup_pow_of_two(nr_pcpus * LOCKS_PER_CPU);
if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE)
- ilan->locks = vmalloc(size * sizeof(spinlock_t));
- else
-#endif
- ilan->locks = kmalloc_array(size, sizeof(spinlock_t),
- GFP_KERNEL);
+ ilan->locks = kvmalloc(size * sizeof(spinlock_t), GFP_KERNEL);
if (!ilan->locks)
return -ENOMEM;
for (i = 0; i < size; i++)
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index bf3ad3e7b647..b2b4f031b3a1 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -235,7 +235,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
inside->icmp6.icmp6_cksum =
csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
skb->len - hdrlen, IPPROTO_ICMPV6,
- csum_partial(&inside->icmp6,
+ skb_checksum(skb, hdrlen,
skb->len - hdrlen, 0));
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 0da6a12b5472..1f992d9e261d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -632,6 +632,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct ipv6hdr))
+ return -EINVAL;
if (flags&MSG_PROBE)
goto out;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a1bf426c959b..dc61b0b5e64e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3709,7 +3709,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
- if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
+ if (!(dev->flags & IFF_LOOPBACK))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
@@ -3718,6 +3721,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
+ } else if (event == NETDEV_UNREGISTER) {
+ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+ in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+#endif
}
return NOTIFY_OK;
@@ -4024,9 +4033,24 @@ static struct pernet_operations ip6_route_net_late_ops = {
static struct notifier_block ip6_route_dev_notifier = {
.notifier_call = ip6_route_dev_notify,
- .priority = 0,
+ .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
};
+void __init ip6_route_init_special_entries(void)
+{
+ /* Registering of the loopback is done before this portion of code,
+ * the loopback reference in rt6_info will not be taken, do it
+ * manually for init_net */
+ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #endif
+}
+
int __init ip6_route_init(void)
{
int ret;
@@ -4053,17 +4077,6 @@ int __init ip6_route_init(void)
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
- /* Registering of the loopback is done before this portion of code,
- * the loopback reference in rt6_info will not be taken, do it
- * manually for init_net */
- init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #ifdef CONFIG_IPV6_MULTIPLE_TABLES
- init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #endif
ret = fib6_init();
if (ret)
goto out_register_subsys;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 895ff650db43..5abc3692b901 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -18,6 +18,7 @@
#include <linux/random.h>
#include <linux/siphash.h>
#include <linux/kernel.h>
+#include <net/secure_seq.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -143,6 +144,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct dst_entry *dst;
__u8 rcv_wscale;
+ u32 tsoff = 0;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
@@ -162,6 +164,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+ tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+ ipv6_hdr(skb)->saddr.s6_addr32);
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
if (!cookie_timestamp_decode(&tcp_opt))
goto out;
@@ -242,7 +250,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
- ret = tcp_get_cookie_sock(sk, skb, req, dst);
+ ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
out:
return ret;
out_free:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8e42e8f54b70..7a8237acd210 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -101,12 +101,18 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
}
}
-static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
+static u32 tcp_v6_init_seq(const struct sk_buff *skb)
{
- return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
- ipv6_hdr(skb)->saddr.s6_addr32,
- tcp_hdr(skb)->dest,
- tcp_hdr(skb)->source, tsoff);
+ return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
+ ipv6_hdr(skb)->saddr.s6_addr32,
+ tcp_hdr(skb)->dest,
+ tcp_hdr(skb)->source);
+}
+
+static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
+{
+ return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
+ ipv6_hdr(skb)->saddr.s6_addr32);
}
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
@@ -122,7 +128,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
- u32 seq;
int err;
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
@@ -282,13 +287,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk_set_txhash(sk);
if (likely(!tp->repair)) {
- seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
- sk->sk_v6_daddr.s6_addr32,
- inet->inet_sport,
- inet->inet_dport,
- &tp->tsoffset);
if (!tp->write_seq)
- tp->write_seq = seq;
+ tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32,
+ inet->inet_sport,
+ inet->inet_dport);
+ tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32);
}
if (tcp_fastopen_defer_connect(sk, &err))
@@ -749,7 +754,8 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.cookie_init_seq = cookie_v6_init_sequence,
#endif
.route_req = tcp_v6_route_req,
- .init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
+ .init_seq = tcp_v6_init_seq,
+ .init_ts_off = tcp_v6_init_ts_off,
.send_synack = tcp_v6_send_synack,
};
@@ -1911,7 +1917,7 @@ struct proto tcpv6_prot = {
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock),
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
.twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index cb4fff785cbf..8364fe5b59e4 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -142,7 +142,7 @@ static struct proto llc_proto = {
.name = "LLC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct llc_sock),
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
};
/**
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1bd2d45..9b02c13d258b 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -506,7 +506,7 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_estab_match(sap, daddr, laddr, rc)) {
- /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
@@ -565,7 +565,7 @@ static struct sock *__llc_lookup_listener(struct llc_sap *sap,
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_listener_match(sap, laddr, rc)) {
- /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 5404d0d195cc..63b6ab056370 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -328,7 +328,7 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_dgram_match(sap, laddr, rc)) {
- /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ /* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6db09fa18269..364d4e137649 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
+ 2 + sizeof(struct ieee80211_vht_cap) +
+ 2 + sizeof(struct ieee80211_vht_operation) +
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 89dff563b1ec..0ea9712bd99e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4382,6 +4382,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;
+ /* If a reconfig is happening, bail out */
+ if (local->in_reconfig)
+ return -EBUSY;
+
if (assoc) {
rcu_read_lock();
have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 088e2b459d0f..257ec66009da 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2005,10 +2005,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
unsigned index;
if (size) {
- labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (!labels)
- labels = vzalloc(size);
-
+ labels = kvzalloc(size, GFP_KERNEL);
if (!labels)
goto nolabels;
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 668d9643f0cc..1fa3c2307b6e 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3078,6 +3078,17 @@ nla_put_failure:
return skb->len;
}
+static bool ip_vs_is_af_valid(int af)
+{
+ if (af == AF_INET)
+ return true;
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6 && ipv6_mod_enabled())
+ return true;
+#endif
+ return false;
+}
+
static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
@@ -3105,11 +3116,7 @@ static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
memset(usvc, 0, sizeof(*usvc));
usvc->af = nla_get_u16(nla_af);
-#ifdef CONFIG_IP_VS_IPV6
- if (usvc->af != AF_INET && usvc->af != AF_INET6)
-#else
- if (usvc->af != AF_INET)
-#endif
+ if (!ip_vs_is_af_valid(usvc->af))
return -EAFNOSUPPORT;
if (nla_fwmark) {
@@ -3612,6 +3619,11 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (udest.af == 0)
udest.af = svc->af;
+ if (!ip_vs_is_af_valid(udest.af)) {
+ ret = -EAFNOSUPPORT;
+ goto out;
+ }
+
if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) {
/* The synchronization protocol is incompatible
* with mixed family services
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f9245dbfe435..e847dbaa0c6b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -911,7 +911,7 @@ static unsigned int early_drop_list(struct net *net,
continue;
/* kill only if still in same netns -- might have moved due to
- * SLAB_DESTROY_BY_RCU rules.
+ * SLAB_TYPESAFE_BY_RCU rules.
*
* We steal the timer reference. If that fails timer has
* already fired or someone else deleted it. Just drop ref
@@ -1114,7 +1114,7 @@ __nf_conntrack_alloc(struct net *net,
/*
* Do not use kmem_cache_zalloc(), as this cache uses
- * SLAB_DESTROY_BY_RCU.
+ * SLAB_TYPESAFE_BY_RCU.
*/
ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL)
@@ -1159,7 +1159,7 @@ void nf_conntrack_free(struct nf_conn *ct)
struct net *net = nf_ct_net(ct);
/* A freed object has refcnt == 0, that's
- * the golden rule for SLAB_DESTROY_BY_RCU
+ * the golden rule for SLAB_TYPESAFE_BY_RCU
*/
NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
@@ -1853,7 +1853,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
-static unsigned int total_extension_size(void)
+static __always_inline unsigned int total_extension_size(void)
{
/* remember to add new extensions below */
BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
@@ -1929,7 +1929,7 @@ int nf_conntrack_init_start(void)
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
sizeof(struct nf_conn),
NFCT_INFOMASK + 1,
- SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
+ SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
if (!nf_conntrack_cachep)
goto err_cachep;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4b9dfe3eef62..3a60efa7799b 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -385,7 +385,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
struct nf_conntrack_helper *cur;
- int ret = 0;
+ int ret = 0, i;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -395,10 +395,26 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
return -EINVAL;
mutex_lock(&nf_ct_helper_mutex);
- hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
- if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
- ret = -EEXIST;
- goto out;
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) {
+ if (!strcmp(cur->name, me->name) &&
+ (cur->tuple.src.l3num == NFPROTO_UNSPEC ||
+ cur->tuple.src.l3num == me->tuple.src.l3num) &&
+ cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ }
+
+ /* avoid unpredictable behaviour for auto_assign_helper */
+ if (!(me->flags & NF_CT_HELPER_F_USERSPACE)) {
+ hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
+ if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple,
+ &mask)) {
+ ret = -EEXIST;
+ goto out;
+ }
}
}
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 5f6f2f388928..dcf561b5c97a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -417,8 +417,7 @@ nla_put_failure:
return -1;
}
-static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb,
- const struct nf_conn *ct)
+static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
{
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
struct nf_ct_seqadj *seq;
@@ -426,15 +425,20 @@ static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb,
if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
return 0;
+ spin_lock_bh(&ct->lock);
seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
- return -1;
+ goto err;
seq = &seqadj->seq[IP_CT_DIR_REPLY];
if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
- return -1;
+ goto err;
+ spin_unlock_bh(&ct->lock);
return 0;
+err:
+ spin_unlock_bh(&ct->lock);
+ return -1;
}
static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
@@ -1417,6 +1421,24 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
}
#endif
+static void
+__ctnetlink_change_status(struct nf_conn *ct, unsigned long on,
+ unsigned long off)
+{
+ unsigned int bit;
+
+ /* Ignore these unchangable bits */
+ on &= ~IPS_UNCHANGEABLE_MASK;
+ off &= ~IPS_UNCHANGEABLE_MASK;
+
+ for (bit = 0; bit < __IPS_MAX_BIT; bit++) {
+ if (on & (1 << bit))
+ set_bit(bit, &ct->status);
+ else if (off & (1 << bit))
+ clear_bit(bit, &ct->status);
+ }
+}
+
static int
ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
{
@@ -1436,10 +1458,7 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
/* ASSURED bit can only be set */
return -EBUSY;
- /* Be careful here, modifying NAT bits can screw up things,
- * so don't let users modify them directly if they don't pass
- * nf_nat_range. */
- ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
+ __ctnetlink_change_status(ct, status, 0);
return 0;
}
@@ -1508,23 +1527,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
return 0;
}
+ rcu_read_lock();
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
nf_ct_protonum(ct));
if (helper == NULL) {
-#ifdef CONFIG_MODULES
- spin_unlock_bh(&nf_conntrack_expect_lock);
-
- if (request_module("nfct-helper-%s", helpname) < 0) {
- spin_lock_bh(&nf_conntrack_expect_lock);
- return -EOPNOTSUPP;
- }
-
- spin_lock_bh(&nf_conntrack_expect_lock);
- helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper)
- return -EAGAIN;
-#endif
+ rcu_read_unlock();
return -EOPNOTSUPP;
}
@@ -1533,13 +1540,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
/* update private helper data if allowed. */
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
- return 0;
+ err = 0;
} else
- return -EBUSY;
+ err = -EBUSY;
+ } else {
+ /* we cannot set a helper for an existing conntrack */
+ err = -EOPNOTSUPP;
}
- /* we cannot set a helper for an existing conntrack */
- return -EOPNOTSUPP;
+ rcu_read_unlock();
+ return err;
}
static int ctnetlink_change_timeout(struct nf_conn *ct,
@@ -1630,25 +1640,30 @@ ctnetlink_change_seq_adj(struct nf_conn *ct,
if (!seqadj)
return 0;
+ spin_lock_bh(&ct->lock);
if (cda[CTA_SEQ_ADJ_ORIG]) {
ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
cda[CTA_SEQ_ADJ_ORIG]);
if (ret < 0)
- return ret;
+ goto err;
- ct->status |= IPS_SEQ_ADJUST;
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
}
if (cda[CTA_SEQ_ADJ_REPLY]) {
ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
cda[CTA_SEQ_ADJ_REPLY]);
if (ret < 0)
- return ret;
+ goto err;
- ct->status |= IPS_SEQ_ADJUST;
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
}
+ spin_unlock_bh(&ct->lock);
return 0;
+err:
+ spin_unlock_bh(&ct->lock);
+ return ret;
}
static int
@@ -1959,9 +1974,7 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
err = -EEXIST;
ct = nf_ct_tuplehash_to_ctrack(h);
if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
- spin_lock_bh(&nf_conntrack_expect_lock);
err = ctnetlink_change_conntrack(ct, cda);
- spin_unlock_bh(&nf_conntrack_expect_lock);
if (err == 0) {
nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
(1 << IPCT_ASSURED) |
@@ -2294,10 +2307,10 @@ ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
/* This check is less strict than ctnetlink_change_status()
* because callers often flip IPS_EXPECTED bits when sending
* an NFQA_CT attribute to the kernel. So ignore the
- * unchangeable bits but do not error out.
+ * unchangeable bits but do not error out. Also user programs
+ * are allowed to clear the bits that they are allowed to change.
*/
- ct->status = (status & ~IPS_UNCHANGEABLE_MASK) |
- (ct->status & IPS_UNCHANGEABLE_MASK);
+ __ctnetlink_change_status(ct, status, ~status);
return 0;
}
@@ -2351,11 +2364,7 @@ ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
if (ret < 0)
return ret;
- spin_lock_bh(&nf_conntrack_expect_lock);
- ret = ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
- spin_unlock_bh(&nf_conntrack_expect_lock);
-
- return ret;
+ return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
}
static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1c6482d2c4dc..559225029740 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3778,6 +3778,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = set->ops->insert(ctx->net, set, &elem, &ext2);
if (err) {
if (err == -EEXIST) {
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
+ nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
+ nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
+ nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
+ return -EBUSY;
if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
memcmp(nft_set_ext_data(ext),
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 3948da380259..66221ad891a9 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -82,8 +82,7 @@ static void nft_dynset_eval(const struct nft_expr *expr,
nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
timeout = priv->timeout ? : set->timeout;
*nft_set_ext_expiration(ext) = jiffies + timeout;
- } else if (sexpr == NULL)
- goto out;
+ }
if (sexpr != NULL)
sexpr->ops->eval(sexpr, regs, pkt);
@@ -92,7 +91,7 @@ static void nft_dynset_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
return;
}
-out:
+
if (!priv->invert)
regs->verdict.code = NFT_BREAK;
}
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index 8ebbc2940f4c..b988162b5b15 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -257,6 +257,11 @@ static int nft_bitmap_init(const struct nft_set *set,
static void nft_bitmap_destroy(const struct nft_set *set)
{
+ struct nft_bitmap *priv = nft_set_priv(set);
+ struct nft_bitmap_elem *be, *n;
+
+ list_for_each_entry_safe(be, n, &priv->list, head)
+ nft_set_elem_destroy(set, be, true);
}
static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 14857afc9937..8876b7da6884 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -763,17 +763,8 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
- unsigned int *off;
+ return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
- off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
-
- if (off)
- return off;
-
- if (size < (SIZE_MAX / sizeof(unsigned int)))
- off = vmalloc(size * sizeof(unsigned int));
-
- return off;
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);
@@ -1007,8 +998,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!info) {
- info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN |
- __GFP_NORETRY | __GFP_HIGHMEM,
+ info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!info)
return NULL;
@@ -1051,8 +1041,10 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
list_for_each_entry(t, &init_net.xt.tables[af], list) {
if (strcmp(t->name, name))
continue;
- if (!try_module_get(t->me))
+ if (!try_module_get(t->me)) {
+ mutex_unlock(&xt[af].mutex);
return NULL;
+ }
mutex_unlock(&xt[af].mutex);
if (t->table_init(net) != 0) {
@@ -1114,7 +1106,7 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
size = sizeof(void **) * nr_cpu_ids;
if (size > PAGE_SIZE)
- i->jumpstack = vzalloc(size);
+ i->jumpstack = kvzalloc(size, GFP_KERNEL);
else
i->jumpstack = kzalloc(size, GFP_KERNEL);
if (i->jumpstack == NULL)
@@ -1136,12 +1128,8 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
*/
size = sizeof(void *) * i->stacksize * 2u;
for_each_possible_cpu(cpu) {
- if (size > PAGE_SIZE)
- i->jumpstack[cpu] = vmalloc_node(size,
- cpu_to_node(cpu));
- else
- i->jumpstack[cpu] = kmalloc_node(size,
- GFP_KERNEL, cpu_to_node(cpu));
+ i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
+ cpu_to_node(cpu));
if (i->jumpstack[cpu] == NULL)
/*
* Freeing will be done later on by the callers. The
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 3cbe1bcf6a74..bb7ad82dcd56 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -168,8 +168,10 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
goto err_put_timeout;
}
timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
- if (timeout_ext == NULL)
+ if (!timeout_ext) {
ret = -ENOMEM;
+ goto err_put_timeout;
+ }
rcu_read_unlock();
return ret;
@@ -201,6 +203,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
struct xt_ct_target_info_v1 *info)
{
struct nf_conntrack_zone zone;
+ struct nf_conn_help *help;
struct nf_conn *ct;
int ret = -EOPNOTSUPP;
@@ -249,7 +252,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
if (info->timeout[0]) {
ret = xt_ct_set_timeout(ct, par, info->timeout);
if (ret < 0)
- goto err3;
+ goto err4;
}
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
nf_conntrack_get(&ct->ct_general);
@@ -257,6 +260,10 @@ out:
info->ct = ct;
return 0;
+err4:
+ help = nfct_help(ct);
+ if (help)
+ module_put(help->helper->me);
err3:
nf_ct_tmpl_free(ct);
err2:
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 37d581a31cff..3f6c4fa78bdb 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -388,10 +388,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
}
sz = sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size;
- if (sz <= PAGE_SIZE)
- t = kzalloc(sz, GFP_KERNEL);
- else
- t = vzalloc(sz);
+ t = kvzalloc(sz, GFP_KERNEL);
if (t == NULL) {
ret = -ENOMEM;
goto out;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 770bbec878f1..e75ef39669c5 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -152,7 +152,7 @@ static int socket_mt_enable_defrag(struct net *net, int family)
switch (family) {
case NFPROTO_IPV4:
return nf_defrag_ipv4_enable(net);
-#ifdef XT_SOCKET_HAVE_IPV6
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
case NFPROTO_IPV6:
return nf_defrag_ipv6_enable(net);
#endif
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 42a95919df09..bf602e33c40a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -516,10 +516,38 @@ ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
u16 proto, const struct sk_buff *skb)
{
struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *exp;
if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
return NULL;
- return __nf_ct_expect_find(net, zone, &tuple);
+
+ exp = __nf_ct_expect_find(net, zone, &tuple);
+ if (exp) {
+ struct nf_conntrack_tuple_hash *h;
+
+ /* Delete existing conntrack entry, if it clashes with the
+ * expectation. This can happen since conntrack ALGs do not
+ * check for clashes between (new) expectations and existing
+ * conntrack entries. nf_conntrack_in() will check the
+ * expectations only if a conntrack entry can not be found,
+ * which can lead to OVS finding the expectation (here) in the
+ * init direction, but which will not be removed by the
+ * nf_conntrack_in() call, if a matching conntrack entry is
+ * found instead. In this case all init direction packets
+ * would be reported as new related packets, while reply
+ * direction packets would be reported as un-related
+ * established packets.
+ */
+ h = nf_conntrack_find_get(net, zone, &tuple);
+ if (h) {
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ nf_ct_delete(ct, 0, 0);
+ nf_conntrack_put(&ct->ct_general);
+ }
+ }
+
+ return exp;
}
/* This replicates logic from nf_conntrack_core.c that is not exported. */
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 2efb36c08f2a..dee469fed967 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -203,8 +203,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
*arg = (unsigned long) head;
rcu_assign_pointer(tp->root, new);
- if (head)
- call_rcu(&head->rcu, mall_destroy_rcu);
+ call_rcu(&head->rcu, mall_destroy_rcu);
return 0;
err_replace_hw_filter:
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index d00f4c7c2f3a..b30a2c70bd48 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -376,10 +376,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
if (mask != q->tab_mask) {
struct sk_buff **ntab;
- ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
- GFP_KERNEL | __GFP_NOWARN);
- if (!ntab)
- ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+ ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO);
if (!ntab)
return -ENOMEM;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index da4f67bda0ee..b488721a0059 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -624,16 +624,6 @@ static void fq_rehash(struct fq_sched_data *q,
q->stat_gc_flows += fcnt;
}
-static void *fq_alloc_node(size_t sz, int node)
-{
- void *ptr;
-
- ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
- if (!ptr)
- ptr = vmalloc_node(sz, node);
- return ptr;
-}
-
static void fq_free(void *addr)
{
kvfree(addr);
@@ -650,7 +640,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
return 0;
/* If XPS was setup, we can allocate memory on right NUMA node */
- array = fq_alloc_node(sizeof(struct rb_root) << log,
+ array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT,
netdev_queue_numa_node_read(sch->dev_queue));
if (!array)
return -ENOMEM;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 18bbb5476c83..9201abce928c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -446,27 +446,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static void *fq_codel_zalloc(size_t sz)
-{
- void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vzalloc(sz);
- return ptr;
-}
-
-static void fq_codel_free(void *addr)
-{
- kvfree(addr);
-}
-
static void fq_codel_destroy(struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
- fq_codel_free(q->backlogs);
- fq_codel_free(q->flows);
+ kvfree(q->backlogs);
+ kvfree(q->flows);
}
static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
@@ -493,13 +479,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
}
if (!q->flows) {
- q->flows = fq_codel_zalloc(q->flows_cnt *
- sizeof(struct fq_codel_flow));
+ q->flows = kvzalloc(q->flows_cnt *
+ sizeof(struct fq_codel_flow), GFP_KERNEL);
if (!q->flows)
return -ENOMEM;
- q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
+ q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
if (!q->backlogs) {
- fq_codel_free(q->flows);
+ kvfree(q->flows);
return -ENOMEM;
}
for (i = 0; i < q->flows_cnt; i++) {
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index c19d346e6c5a..51d3ba682af9 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -467,29 +467,14 @@ static void hhf_reset(struct Qdisc *sch)
rtnl_kfree_skbs(skb, skb);
}
-static void *hhf_zalloc(size_t sz)
-{
- void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vzalloc(sz);
-
- return ptr;
-}
-
-static void hhf_free(void *addr)
-{
- kvfree(addr);
-}
-
static void hhf_destroy(struct Qdisc *sch)
{
int i;
struct hhf_sched_data *q = qdisc_priv(sch);
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- hhf_free(q->hhf_arrays[i]);
- hhf_free(q->hhf_valid_bits[i]);
+ kvfree(q->hhf_arrays[i]);
+ kvfree(q->hhf_valid_bits[i]);
}
for (i = 0; i < HH_FLOWS_CNT; i++) {
@@ -503,7 +488,7 @@ static void hhf_destroy(struct Qdisc *sch)
kfree(flow);
}
}
- hhf_free(q->hh_flows);
+ kvfree(q->hh_flows);
}
static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
@@ -609,8 +594,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->hh_flows) {
/* Initialize heavy-hitter flow table. */
- q->hh_flows = hhf_zalloc(HH_FLOWS_CNT *
- sizeof(struct list_head));
+ q->hh_flows = kvzalloc(HH_FLOWS_CNT *
+ sizeof(struct list_head), GFP_KERNEL);
if (!q->hh_flows)
return -ENOMEM;
for (i = 0; i < HH_FLOWS_CNT; i++)
@@ -624,8 +609,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
/* Initialize heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
- sizeof(u32));
+ q->hhf_arrays[i] = kvzalloc(HHF_ARRAYS_LEN *
+ sizeof(u32), GFP_KERNEL);
if (!q->hhf_arrays[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
@@ -637,8 +622,8 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
/* Initialize valid bits of heavy-hitter filter arrays. */
for (i = 0; i < HHF_ARRAYS_CNT; i++) {
- q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
- BITS_PER_BYTE);
+ q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
+ BITS_PER_BYTE, GFP_KERNEL);
if (!q->hhf_valid_bits[i]) {
/* Note: hhf_destroy() will be called
* by our caller.
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index f0ce4780f395..1b3dd6190e93 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -702,15 +702,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
spinlock_t *root_lock;
struct disttable *d;
int i;
- size_t s;
if (n > NETEM_DIST_MAX)
return -EINVAL;
- s = sizeof(struct disttable) + n * sizeof(s16);
- d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
- if (!d)
- d = vmalloc(s);
+ d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
if (!d)
return -ENOMEM;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b00e02c139de..332d94be6e1c 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -685,11 +685,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
static void *sfq_alloc(size_t sz)
{
- void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
-
- if (!ptr)
- ptr = vmalloc(sz);
- return ptr;
+ return kvmalloc(sz, GFP_KERNEL);
}
static void sfq_free(void *addr)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5b6ee21368a6..6793d7348cc8 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -101,7 +101,7 @@ struct proto smc_proto = {
.unhash = smc_unhash_sk,
.obj_size = sizeof(struct smc_sock),
.h.smc_hash = &smc_v4_hashinfo,
- .slab_flags = SLAB_DESTROY_BY_RCU,
+ .slab_flags = SLAB_TYPESAFE_BY_RCU,
};
EXPORT_SYMBOL_GPL(smc_proto);
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 919981324171..9aed6fe1bf1a 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -106,7 +106,6 @@ __init int net_sysctl_init(void)
ret = register_pernet_subsys(&sysctl_pernet_ops);
if (ret)
goto out1;
- register_sysctl_root(&net_sysctl_root);
out:
return ret;
out1:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 570fc95dc507..c3bc9da30cff 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2764,8 +2764,8 @@ static int nl80211_parse_mon_options(struct cfg80211_registered_device *rdev,
nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]);
/* bits 0 and 63 are reserved and must be zero */
- if ((mumimo_groups[0] & BIT(7)) ||
- (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0)))
+ if ((mumimo_groups[0] & BIT(0)) ||
+ (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(7)))
return -EINVAL;
params->vht_mumimo_groups = mumimo_groups;
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 4221dc359453..74456b3eb89a 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -39,6 +39,9 @@ int event_fd[MAX_PROGS];
int prog_cnt;
int prog_array_fd = -1;
+struct bpf_map_data map_data[MAX_MAPS];
+int map_data_count = 0;
+
static int populate_prog_array(const char *event, int prog_fd)
{
int ind = atoi(event), err;
@@ -186,42 +189,45 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
return 0;
}
-static int load_maps(struct bpf_map_def *maps, int nr_maps,
- const char **map_names, fixup_map_cb fixup_map)
+static int load_maps(struct bpf_map_data *maps, int nr_maps,
+ fixup_map_cb fixup_map)
{
int i;
- /*
- * Warning: Using "maps" pointing to ELF data_maps->d_buf as
- * an array of struct bpf_map_def is a wrong assumption about
- * the ELF maps section format.
- */
+
for (i = 0; i < nr_maps; i++) {
- if (fixup_map)
- fixup_map(&maps[i], map_names[i], i);
-
- if (maps[i].type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
- maps[i].type == BPF_MAP_TYPE_HASH_OF_MAPS) {
- int inner_map_fd = map_fd[maps[i].inner_map_idx];
-
- map_fd[i] = bpf_create_map_in_map(maps[i].type,
- maps[i].key_size,
- inner_map_fd,
- maps[i].max_entries,
- maps[i].map_flags);
+ if (fixup_map) {
+ fixup_map(&maps[i], i);
+ /* Allow userspace to assign map FD prior to creation */
+ if (maps[i].fd != -1) {
+ map_fd[i] = maps[i].fd;
+ continue;
+ }
+ }
+
+ if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ int inner_map_fd = map_fd[maps[i].def.inner_map_idx];
+
+ map_fd[i] = bpf_create_map_in_map(maps[i].def.type,
+ maps[i].def.key_size,
+ inner_map_fd,
+ maps[i].def.max_entries,
+ maps[i].def.map_flags);
} else {
- map_fd[i] = bpf_create_map(maps[i].type,
- maps[i].key_size,
- maps[i].value_size,
- maps[i].max_entries,
- maps[i].map_flags);
+ map_fd[i] = bpf_create_map(maps[i].def.type,
+ maps[i].def.key_size,
+ maps[i].def.value_size,
+ maps[i].def.max_entries,
+ maps[i].def.map_flags);
}
if (map_fd[i] < 0) {
printf("failed to create a map: %d %s\n",
errno, strerror(errno));
return 1;
}
+ maps[i].fd = map_fd[i];
- if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
+ if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY)
prog_array_fd = map_fd[i];
}
return 0;
@@ -251,7 +257,8 @@ static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
}
static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
- GElf_Shdr *shdr, struct bpf_insn *insn)
+ GElf_Shdr *shdr, struct bpf_insn *insn,
+ struct bpf_map_data *maps, int nr_maps)
{
int i, nrels;
@@ -261,6 +268,8 @@ static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
GElf_Sym sym;
GElf_Rel rel;
unsigned int insn_idx;
+ bool match = false;
+ int j, map_idx;
gelf_getrel(data, i, &rel);
@@ -274,11 +283,21 @@ static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
return 1;
}
insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
- /*
- * Warning: Using sizeof(struct bpf_map_def) here is a
- * wrong assumption about ELF maps section format
- */
- insn[insn_idx].imm = map_fd[sym.st_value / sizeof(struct bpf_map_def)];
+
+ /* Match FD relocation against recorded map_data[] offset */
+ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+ if (maps[map_idx].elf_offset == sym.st_value) {
+ match = true;
+ break;
+ }
+ }
+ if (match) {
+ insn[insn_idx].imm = maps[map_idx].fd;
+ } else {
+ printf("invalid relo for insn[%d] no map_data match\n",
+ insn_idx);
+ return 1;
+ }
}
return 0;
@@ -297,40 +316,112 @@ static int cmp_symbols(const void *l, const void *r)
return 0;
}
-static int get_sorted_map_names(Elf *elf, Elf_Data *symbols, int maps_shndx,
- int strtabidx, char **map_names)
+static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx,
+ Elf *elf, Elf_Data *symbols, int strtabidx)
{
- GElf_Sym map_symbols[MAX_MAPS];
- int i, nr_maps = 0;
+ int map_sz_elf, map_sz_copy;
+ bool validate_zero = false;
+ Elf_Data *data_maps;
+ int i, nr_maps;
+ GElf_Sym *sym;
+ Elf_Scn *scn;
+ int copy_sz;
+
+ if (maps_shndx < 0)
+ return -EINVAL;
+ if (!symbols)
+ return -EINVAL;
+
+ /* Get data for maps section via elf index */
+ scn = elf_getscn(elf, maps_shndx);
+ if (scn)
+ data_maps = elf_getdata(scn, NULL);
+ if (!scn || !data_maps) {
+ printf("Failed to get Elf_Data from maps section %d\n",
+ maps_shndx);
+ return -EINVAL;
+ }
- for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
- assert(nr_maps < MAX_MAPS);
- if (!gelf_getsym(symbols, i, &map_symbols[nr_maps]))
+ /* For each map get corrosponding symbol table entry */
+ sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym));
+ for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+ assert(nr_maps < MAX_MAPS+1);
+ if (!gelf_getsym(symbols, i, &sym[nr_maps]))
continue;
- if (map_symbols[nr_maps].st_shndx != maps_shndx)
+ if (sym[nr_maps].st_shndx != maps_shndx)
continue;
+ /* Only increment iif maps section */
nr_maps++;
}
- qsort(map_symbols, nr_maps, sizeof(GElf_Sym), cmp_symbols);
+ /* Align to map_fd[] order, via sort on offset in sym.st_value */
+ qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols);
+
+ /* Keeping compatible with ELF maps section changes
+ * ------------------------------------------------
+ * The program size of struct bpf_map_def is known by loader
+ * code, but struct stored in ELF file can be different.
+ *
+ * Unfortunately sym[i].st_size is zero. To calculate the
+ * struct size stored in the ELF file, assume all struct have
+ * the same size, and simply divide with number of map
+ * symbols.
+ */
+ map_sz_elf = data_maps->d_size / nr_maps;
+ map_sz_copy = sizeof(struct bpf_map_def);
+ if (map_sz_elf < map_sz_copy) {
+ /*
+ * Backward compat, loading older ELF file with
+ * smaller struct, keeping remaining bytes zero.
+ */
+ map_sz_copy = map_sz_elf;
+ } else if (map_sz_elf > map_sz_copy) {
+ /*
+ * Forward compat, loading newer ELF file with larger
+ * struct with unknown features. Assume zero means
+ * feature not used. Thus, validate rest of struct
+ * data is zero.
+ */
+ validate_zero = true;
+ }
+ /* Memcpy relevant part of ELF maps data to loader maps */
for (i = 0; i < nr_maps; i++) {
- char *map_name;
-
- map_name = elf_strptr(elf, strtabidx, map_symbols[i].st_name);
- if (!map_name) {
- printf("cannot get map symbol\n");
- return -1;
- }
-
- map_names[i] = strdup(map_name);
- if (!map_names[i]) {
+ unsigned char *addr, *end;
+ struct bpf_map_def *def;
+ const char *map_name;
+ size_t offset;
+
+ map_name = elf_strptr(elf, strtabidx, sym[i].st_name);
+ maps[i].name = strdup(map_name);
+ if (!maps[i].name) {
printf("strdup(%s): %s(%d)\n", map_name,
strerror(errno), errno);
- return -1;
+ free(sym);
+ return -errno;
+ }
+
+ /* Symbol value is offset into ELF maps section data area */
+ offset = sym[i].st_value;
+ def = (struct bpf_map_def *)(data_maps->d_buf + offset);
+ maps[i].elf_offset = offset;
+ memset(&maps[i].def, 0, sizeof(struct bpf_map_def));
+ memcpy(&maps[i].def, def, map_sz_copy);
+
+ /* Verify no newer features were requested */
+ if (validate_zero) {
+ addr = (unsigned char*) def + map_sz_copy;
+ end = (unsigned char*) def + map_sz_elf;
+ for (; addr < end; addr++) {
+ if (*addr != 0) {
+ free(sym);
+ return -EFBIG;
+ }
+ }
}
}
+ free(sym);
return nr_maps;
}
@@ -341,7 +432,8 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
GElf_Ehdr ehdr;
GElf_Shdr shdr, shdr_prog;
Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL;
- char *shname, *shname_prog, *map_names[MAX_MAPS] = { NULL };
+ char *shname, *shname_prog;
+ int nr_maps = 0;
/* reset global variables */
kern_version = 0;
@@ -389,8 +481,12 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
}
memcpy(&kern_version, data->d_buf, sizeof(int));
} else if (strcmp(shname, "maps") == 0) {
+ int j;
+
maps_shndx = i;
data_maps = data;
+ for (j = 0; j < MAX_MAPS; j++)
+ map_data[j].fd = -1;
} else if (shdr.sh_type == SHT_SYMTAB) {
strtabidx = shdr.sh_link;
symbols = data;
@@ -405,27 +501,17 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
}
if (data_maps) {
- int nr_maps;
- int prog_elf_map_sz;
-
- nr_maps = get_sorted_map_names(elf, symbols, maps_shndx,
- strtabidx, map_names);
- if (nr_maps < 0)
- goto done;
-
- /* Deduce map struct size stored in ELF maps section */
- prog_elf_map_sz = data_maps->d_size / nr_maps;
- if (prog_elf_map_sz != sizeof(struct bpf_map_def)) {
- printf("Error: ELF maps sec wrong size (%d/%lu),"
- " old kern.o file?\n",
- prog_elf_map_sz, sizeof(struct bpf_map_def));
+ nr_maps = load_elf_maps_section(map_data, maps_shndx,
+ elf, symbols, strtabidx);
+ if (nr_maps < 0) {
+ printf("Error: Failed loading ELF maps (errno:%d):%s\n",
+ nr_maps, strerror(-nr_maps));
ret = 1;
goto done;
}
-
- if (load_maps(data_maps->d_buf, nr_maps,
- (const char **)map_names, fixup_map))
+ if (load_maps(map_data, nr_maps, fixup_map))
goto done;
+ map_data_count = nr_maps;
processed_sec[maps_shndx] = true;
}
@@ -453,7 +539,8 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
processed_sec[shdr.sh_info] = true;
processed_sec[i] = true;
- if (parse_relo_and_apply(data, symbols, &shdr, insns))
+ if (parse_relo_and_apply(data, symbols, &shdr, insns,
+ map_data, nr_maps))
continue;
if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
@@ -488,8 +575,6 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
ret = 0;
done:
- for (i = 0; i < MAX_MAPS; i++)
- free(map_names[i]);
close(fd);
return ret;
}
diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h
index 05822f83173a..ca0563d04744 100644
--- a/samples/bpf/bpf_load.h
+++ b/samples/bpf/bpf_load.h
@@ -15,15 +15,27 @@ struct bpf_map_def {
unsigned int inner_map_idx;
};
-typedef void (*fixup_map_cb)(struct bpf_map_def *map, const char *map_name,
- int idx);
+struct bpf_map_data {
+ int fd;
+ char *name;
+ size_t elf_offset;
+ struct bpf_map_def def;
+};
+
+typedef void (*fixup_map_cb)(struct bpf_map_data *map, int idx);
-extern int map_fd[MAX_MAPS];
extern int prog_fd[MAX_PROGS];
extern int event_fd[MAX_PROGS];
extern char bpf_log_buf[BPF_LOG_BUF_SIZE];
extern int prog_cnt;
+/* There is a one-to-one mapping between map_fd[] and map_data[].
+ * The map_data[] just contains more rich info on the given map.
+ */
+extern int map_fd[MAX_MAPS];
+extern struct bpf_map_data map_data[MAX_MAPS];
+extern int map_data_count;
+
/* parses elf file compiled by llvm .c->.o
* . parses 'maps' section and creates maps via BPF syscall
* . parses 'license' section and passes it to syscall
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 6ac778153315..1a8894b5ac51 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -320,21 +320,21 @@ static void fill_lpm_trie(void)
assert(!r);
}
-static void fixup_map(struct bpf_map_def *map, const char *name, int idx)
+static void fixup_map(struct bpf_map_data *map, int idx)
{
int i;
- if (!strcmp("inner_lru_hash_map", name)) {
+ if (!strcmp("inner_lru_hash_map", map->name)) {
inner_lru_hash_idx = idx;
- inner_lru_hash_size = map->max_entries;
+ inner_lru_hash_size = map->def.max_entries;
}
- if (!strcmp("array_of_lru_hashs", name)) {
+ if (!strcmp("array_of_lru_hashs", map->name)) {
if (inner_lru_hash_idx == -1) {
printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n");
exit(1);
}
- map->inner_map_idx = inner_lru_hash_idx;
+ map->def.inner_map_idx = inner_lru_hash_idx;
array_of_lru_hashs_idx = idx;
}
@@ -345,9 +345,9 @@ static void fixup_map(struct bpf_map_def *map, const char *name, int idx)
/* Only change the max_entries for the enabled test(s) */
for (i = 0; i < NR_TESTS; i++) {
- if (!strcmp(test_map_names[i], name) &&
+ if (!strcmp(test_map_names[i], map->name) &&
(check_test_flags(i))) {
- map->max_entries = num_map_entries;
+ map->def.max_entries = num_map_entries;
}
}
}
diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c
index ded9804c5034..7fee0f1ba9a3 100644
--- a/samples/bpf/tracex2_user.c
+++ b/samples/bpf/tracex2_user.c
@@ -4,6 +4,7 @@
#include <signal.h>
#include <linux/bpf.h>
#include <string.h>
+#include <sys/resource.h>
#include "libbpf.h"
#include "bpf_load.h"
@@ -112,6 +113,7 @@ static void int_exit(int sig)
int main(int ac, char **argv)
{
+ struct rlimit r = {1024*1024, RLIM_INFINITY};
char filename[256];
long key, next_key, value;
FILE *f;
@@ -119,6 +121,11 @@ int main(int ac, char **argv)
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
+
signal(SIGINT, int_exit);
/* start 'ping' in the background to have some kfree_skb events */
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
index 8f7d199d5945..fe372239d505 100644
--- a/samples/bpf/tracex3_user.c
+++ b/samples/bpf/tracex3_user.c
@@ -11,6 +11,7 @@
#include <stdbool.h>
#include <string.h>
#include <linux/bpf.h>
+#include <sys/resource.h>
#include "libbpf.h"
#include "bpf_load.h"
@@ -112,11 +113,17 @@ static void print_hist(int fd)
int main(int ac, char **argv)
{
+ struct rlimit r = {1024*1024, RLIM_INFINITY};
char filename[256];
int i;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
+
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
index 03449f773cb1..22c644f1f4c3 100644
--- a/samples/bpf/tracex4_user.c
+++ b/samples/bpf/tracex4_user.c
@@ -12,6 +12,8 @@
#include <string.h>
#include <time.h>
#include <linux/bpf.h>
+#include <sys/resource.h>
+
#include "libbpf.h"
#include "bpf_load.h"
@@ -50,11 +52,17 @@ static void print_old_objects(int fd)
int main(int ac, char **argv)
{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
char filename[256];
int i;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK, RLIM_INFINITY)");
+ return 1;
+ }
+
if (load_bpf_file(filename)) {
printf("%s", bpf_log_buf);
return 1;
diff --git a/samples/mei/TODO b/samples/mei/TODO
deleted file mode 100644
index 6b3625d3058c..000000000000
--- a/samples/mei/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-TODO:
- - Cleanup and split the timer function
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 7234e61e7ce3..c9f975ab9840 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -280,9 +280,21 @@ DTC ?= $(objtree)/scripts/dtc/dtc
# Disable noisy checks by default
ifeq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),)
-DTC_FLAGS += -Wno-unit_address_vs_reg
+DTC_FLAGS += -Wno-unit_address_vs_reg \
+ -Wno-simple_bus_reg \
+ -Wno-unit_address_format \
+ -Wno-pci_bridge \
+ -Wno-pci_device_bus_num \
+ -Wno-pci_device_reg
endif
+ifeq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),2)
+DTC_FLAGS += -Wnode_name_chars_strict \
+ -Wproperty_name_chars_strict
+endif
+
+DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
+
# Generate an assembly file to wrap the output of the device tree compiler
quiet_cmd_dt_S_dtb= DTB $@
cmd_dt_S_dtb= \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index baa3c7be04ad..4b9569fa931b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -55,6 +55,7 @@ my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $conststructsfile = "$D/const_structs.checkpatch";
+my $typedefsfile = "";
my $color = 1;
my $allow_c99_comments = 1;
@@ -113,6 +114,7 @@ Options:
--codespell Use the codespell dictionary for spelling/typos
(default:/usr/share/codespell/dictionary.txt)
--codespellfile Use this codespell dictionary
+ --typedefsfile Read additional types from this file
--color Use colors when output is STDOUT (default: on)
-h, --help, --version display this help and exit
@@ -208,6 +210,7 @@ GetOptions(
'test-only=s' => \$tst_only,
'codespell!' => \$codespell,
'codespellfile=s' => \$codespellfile,
+ 'typedefsfile=s' => \$typedefsfile,
'color!' => \$color,
'h|help' => \$help,
'version' => \$help
@@ -629,29 +632,44 @@ if ($codespell) {
$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
-my $const_structs = "";
-if (open(my $conststructs, '<', $conststructsfile)) {
- while (<$conststructs>) {
- my $line = $_;
+sub read_words {
+ my ($wordsRef, $file) = @_;
- $line =~ s/\s*\n?$//g;
- $line =~ s/^\s*//g;
+ if (open(my $words, '<', $file)) {
+ while (<$words>) {
+ my $line = $_;
- next if ($line =~ m/^\s*#/);
- next if ($line =~ m/^\s*$/);
- if ($line =~ /\s/) {
- print("$conststructsfile: '$line' invalid - ignored\n");
- next;
- }
+ $line =~ s/\s*\n?$//g;
+ $line =~ s/^\s*//g;
- $const_structs .= '|' if ($const_structs ne "");
- $const_structs .= $line;
+ next if ($line =~ m/^\s*#/);
+ next if ($line =~ m/^\s*$/);
+ if ($line =~ /\s/) {
+ print("$file: '$line' invalid - ignored\n");
+ next;
+ }
+
+ $$wordsRef .= '|' if ($$wordsRef ne "");
+ $$wordsRef .= $line;
+ }
+ close($file);
+ return 1;
}
- close($conststructsfile);
-} else {
- warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+
+ return 0;
}
+my $const_structs = "";
+read_words(\$const_structs, $conststructsfile)
+ or warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+
+my $typeOtherTypedefs = "";
+if (length($typedefsfile)) {
+ read_words(\$typeOtherTypedefs, $typedefsfile)
+ or warn "No additional types will be considered - file '$typedefsfile': $!\n";
+}
+$typeTypedefs .= '|' . $typeOtherTypedefs if ($typeOtherTypedefs ne "");
+
sub build_types {
my $mods = "(?x: \n" . join("|\n ", (@modifierList, @modifierListFile)) . "\n)";
my $all = "(?x: \n" . join("|\n ", (@typeList, @typeListFile)) . "\n)";
@@ -2195,8 +2213,7 @@ sub process {
}
#next;
}
- if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
- my $context = $4;
+ if ($rawline =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
$realline=$1-1;
if (defined $2) {
$realcnt=$3+1;
@@ -2205,12 +2222,6 @@ sub process {
}
$in_comment = 0;
- if ($context =~ /\b(\w+)\s*\(/) {
- $context_function = $1;
- } else {
- undef $context_function;
- }
-
# Guestimate if this is a continuing comment. Run
# the context looking for a comment "edge". If this
# edge is a close comment then we must be in a comment
@@ -2281,7 +2292,8 @@ sub process {
#extract the line range in the file after the patch is applied
if (!$in_commit_log &&
- $line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+ $line =~ /^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@(.*)/) {
+ my $context = $4;
$is_patch = 1;
$first_line = $linenr + 1;
$realline=$1-1;
@@ -2297,6 +2309,11 @@ sub process {
%suppress_whiletrailers = ();
%suppress_export = ();
$suppress_statement = 0;
+ if ($context =~ /\b(\w+)\s*\(/) {
+ $context_function = $1;
+ } else {
+ undef $context_function;
+ }
next;
# track the line number as we move through the hunk, note that
@@ -2539,6 +2556,7 @@ sub process {
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
$line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i &&
+ $line !~ /^This reverts commit [0-9a-f]{7,40}/ &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
@@ -2628,8 +2646,8 @@ sub process {
# Check if it's the start of a commit log
# (not a header line and we haven't seen the patch filename)
if ($in_header_lines && $realfile =~ /^$/ &&
- !($rawline =~ /^\s+\S/ ||
- $rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) {
+ !($rawline =~ /^\s+(?:\S|$)/ ||
+ $rawline =~ /^(?:commit\b|from\b|[\w-]+:)/i)) {
$in_header_lines = 0;
$in_commit_log = 1;
$has_commit_log = 1;
@@ -2757,13 +2775,6 @@ sub process {
#print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
-# discourage the addition of CONFIG_EXPERIMENTAL in Kconfig.
- if ($realfile =~ /Kconfig/ &&
- $line =~ /.\s*depends on\s+.*\bEXPERIMENTAL\b/) {
- WARN("CONFIG_EXPERIMENTAL",
- "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
- }
-
# discourage the use of boolean for type definition attributes of Kconfig options
if ($realfile =~ /Kconfig/ &&
$line =~ /^\+\s*\bboolean\b/) {
@@ -3133,6 +3144,17 @@ sub process {
# check we are in a valid C source file if not then ignore this hunk
next if ($realfile !~ /\.(h|c)$/);
+# check if this appears to be the start function declaration, save the name
+ if ($sline =~ /^\+\{\s*$/ &&
+ $prevline =~ /^\+(?:(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*)?($Ident)\(/) {
+ $context_function = $1;
+ }
+
+# check if this appears to be the end of function declaration
+ if ($sline =~ /^\+\}\s*$/) {
+ undef $context_function;
+ }
+
# check indentation of any line with a bare else
# (but not if it is a multiple line "if (foo) return bar; else return baz;")
# if the previous line is a break or return and is indented 1 tab more...
@@ -3157,12 +3179,6 @@ sub process {
}
}
-# discourage the addition of CONFIG_EXPERIMENTAL in #if(def).
- if ($line =~ /^\+\s*\#\s*if.*\bCONFIG_EXPERIMENTAL\b/) {
- WARN("CONFIG_EXPERIMENTAL",
- "Use of CONFIG_EXPERIMENTAL is deprecated. For alternatives, see https://lkml.org/lkml/2012/10/23/580\n");
- }
-
# check for RCS/CVS revision markers
if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
WARN("CVS_KEYWORD",
@@ -3338,7 +3354,7 @@ sub process {
}
# Check relative indent for conditionals and blocks.
- if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+ if ($line =~ /\b(?:(?:if|while|for|(?:[a-z_]+|)for_each[a-z_]+)\s*\(|(?:do|else)\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
($stat, $cond, $line_nr_next, $remain_next, $off_next) =
ctx_statement_block($linenr, $realcnt, 0)
if (!defined $stat);
@@ -3430,6 +3446,8 @@ sub process {
if ($check && $s ne '' &&
(($sindent % 8) != 0 ||
($sindent < $indent) ||
+ ($sindent == $indent &&
+ ($s !~ /^\s*(?:\}|\{|else\b)/)) ||
($sindent > $indent + 8))) {
WARN("SUSPECT_CODE_INDENT",
"suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
@@ -4851,8 +4869,10 @@ sub process {
$dstat !~ /^\(\{/ && # ({...
$ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
{
-
- if ($dstat =~ /;/) {
+ if ($dstat =~ /^\s*if\b/) {
+ ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
+ "Macros starting with if should be enclosed by a do - while loop to avoid possible if/else logic defects\n" . "$herectx");
+ } elsif ($dstat =~ /;/) {
ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
"Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
} else {
@@ -5174,14 +5194,16 @@ sub process {
"break quoted strings at a space character\n" . $hereprev);
}
-#check for an embedded function name in a string when the function is known
-# as part of a diff. This does not work for -f --file checking as it
-#depends on patch context providing the function name
+# check for an embedded function name in a string when the function is known
+# This does not work very well for -f --file checking as it depends on patch
+# context providing the function name or a single line form for in-file
+# function declarations
if ($line =~ /^\+.*$String/ &&
defined($context_function) &&
- get_quoted_string($line, $rawline) =~ /\b$context_function\b/) {
+ get_quoted_string($line, $rawline) =~ /\b$context_function\b/ &&
+ length(get_quoted_string($line, $rawline)) != (length($context_function) + 2)) {
WARN("EMBEDDED_FUNCTION_NAME",
- "Prefer using \"%s\", __func__ to embedded function names\n" . $herecurr);
+ "Prefer using '\"%s...\", __func__' to using '$context_function', this function's name, in a string\n" . $herecurr);
}
# check for spaces before a quoted newline
@@ -5676,6 +5698,32 @@ sub process {
}
}
+ # check for vsprintf extension %p<foo> misuses
+ if ($^V && $^V ge 5.10.0 &&
+ defined $stat &&
+ $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
+ $1 !~ /^_*volatile_*$/) {
+ my $bad_extension = "";
+ my $lc = $stat =~ tr@\n@@;
+ $lc = $lc + $linenr;
+ for (my $count = $linenr; $count <= $lc; $count++) {
+ my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
+ $fmt =~ s/%%//g;
+ if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGN]).)/) {
+ $bad_extension = $1;
+ last;
+ }
+ }
+ if ($bad_extension ne "") {
+ my $stat_real = raw_line($linenr, 0);
+ for (my $count = $linenr + 1; $count <= $lc; $count++) {
+ $stat_real = $stat_real . "\n" . raw_line($count, 0);
+ }
+ WARN("VSPRINTF_POINTER_EXTENSION",
+ "Invalid vsprintf pointer extension '$bad_extension'\n" . "$here\n$stat_real\n");
+ }
+ }
+
# Check for misused memsets
if ($^V && $^V ge 5.10.0 &&
defined $stat &&
@@ -5893,7 +5941,8 @@ sub process {
# check for k[mz]alloc with multiplies that could be kmalloc_array/kcalloc
if ($^V && $^V ge 5.10.0 &&
- $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) {
+ defined $stat &&
+ $stat =~ /^\+\s*($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)\s*,/) {
my $oldfunc = $3;
my $a1 = $4;
my $a2 = $10;
@@ -5907,11 +5956,17 @@ sub process {
}
if ($r1 !~ /^sizeof\b/ && $r2 =~ /^sizeof\s*\S/ &&
!($r1 =~ /^$Constant$/ || $r1 =~ /^[A-Z_][A-Z0-9_]*$/)) {
+ my $ctx = '';
+ my $herectx = $here . "\n";
+ my $cnt = statement_rawlines($stat);
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
if (WARN("ALLOC_WITH_MULTIPLY",
- "Prefer $newfunc over $oldfunc with multiply\n" . $herecurr) &&
+ "Prefer $newfunc over $oldfunc with multiply\n" . $herectx) &&
+ $cnt == 1 &&
$fix) {
$fixed[$fixlinenr] =~ s/\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*(k[mz]alloc)\s*\(\s*($FuncArg)\s*\*\s*($FuncArg)/$1 . ' = ' . "$newfunc(" . trim($r1) . ', ' . trim($r2)/e;
-
}
}
}
@@ -6066,11 +6121,11 @@ sub process {
}
# check for various structs that are normally const (ops, kgdb, device_tree)
+# and avoid what seem like struct definitions 'struct foo {'
if ($line !~ /\bconst\b/ &&
- $line =~ /\bstruct\s+($const_structs)\b/) {
+ $line =~ /\bstruct\s+($const_structs)\b(?!\s*\{)/) {
WARN("CONST_STRUCT",
- "struct $1 should normally be const\n" .
- $herecurr);
+ "struct $1 should normally be const\n" . $herecurr);
}
# use of NR_CPUS is usually wrong
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 3d18e45374c8..5adfc8f52b4f 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -72,17 +72,16 @@ struct check {
#define CHECK(_nm, _fn, _d, ...) \
CHECK_ENTRY(_nm, _fn, _d, false, false, __VA_ARGS__)
-#ifdef __GNUC__
-static inline void check_msg(struct check *c, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
-#endif
-static inline void check_msg(struct check *c, const char *fmt, ...)
+static inline void PRINTF(3, 4) check_msg(struct check *c, struct dt_info *dti,
+ const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if ((c->warn && (quiet < 1))
|| (c->error && (quiet < 2))) {
- fprintf(stderr, "%s (%s): ",
+ fprintf(stderr, "%s: %s (%s): ",
+ strcmp(dti->outname, "-") ? dti->outname : "<stdout>",
(c->error) ? "ERROR" : "Warning", c->name);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
@@ -90,11 +89,11 @@ static inline void check_msg(struct check *c, const char *fmt, ...)
va_end(ap);
}
-#define FAIL(c, ...) \
- do { \
- TRACE((c), "\t\tFAILED at %s:%d", __FILE__, __LINE__); \
- (c)->status = FAILED; \
- check_msg((c), __VA_ARGS__); \
+#define FAIL(c, dti, ...) \
+ do { \
+ TRACE((c), "\t\tFAILED at %s:%d", __FILE__, __LINE__); \
+ (c)->status = FAILED; \
+ check_msg((c), dti, __VA_ARGS__); \
} while (0)
static void check_nodes_props(struct check *c, struct dt_info *dti, struct node *node)
@@ -127,7 +126,7 @@ static bool run_check(struct check *c, struct dt_info *dti)
error = error || run_check(prq, dti);
if (prq->status != PASSED) {
c->status = PREREQ;
- check_msg(c, "Failed prerequisite '%s'",
+ check_msg(c, dti, "Failed prerequisite '%s'",
c->prereq[i]->name);
}
}
@@ -157,7 +156,7 @@ out:
static inline void check_always_fail(struct check *c, struct dt_info *dti,
struct node *node)
{
- FAIL(c, "always_fail check");
+ FAIL(c, dti, "always_fail check");
}
CHECK(always_fail, check_always_fail, NULL);
@@ -172,7 +171,7 @@ static void check_is_string(struct check *c, struct dt_info *dti,
return; /* Not present, assumed ok */
if (!data_is_one_string(prop->val))
- FAIL(c, "\"%s\" property in %s is not a string",
+ FAIL(c, dti, "\"%s\" property in %s is not a string",
propname, node->fullpath);
}
#define WARNING_IF_NOT_STRING(nm, propname) \
@@ -191,7 +190,7 @@ static void check_is_cell(struct check *c, struct dt_info *dti,
return; /* Not present, assumed ok */
if (prop->val.len != sizeof(cell_t))
- FAIL(c, "\"%s\" property in %s is not a single cell",
+ FAIL(c, dti, "\"%s\" property in %s is not a single cell",
propname, node->fullpath);
}
#define WARNING_IF_NOT_CELL(nm, propname) \
@@ -213,7 +212,7 @@ static void check_duplicate_node_names(struct check *c, struct dt_info *dti,
child2;
child2 = child2->next_sibling)
if (streq(child->name, child2->name))
- FAIL(c, "Duplicate node name %s",
+ FAIL(c, dti, "Duplicate node name %s",
child->fullpath);
}
ERROR(duplicate_node_names, check_duplicate_node_names, NULL);
@@ -228,7 +227,7 @@ static void check_duplicate_property_names(struct check *c, struct dt_info *dti,
if (prop2->deleted)
continue;
if (streq(prop->name, prop2->name))
- FAIL(c, "Duplicate property name %s in %s",
+ FAIL(c, dti, "Duplicate property name %s in %s",
prop->name, node->fullpath);
}
}
@@ -239,6 +238,7 @@ ERROR(duplicate_property_names, check_duplicate_property_names, NULL);
#define UPPERCASE "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#define DIGITS "0123456789"
#define PROPNODECHARS LOWERCASE UPPERCASE DIGITS ",._+*#?-"
+#define PROPNODECHARSSTRICT LOWERCASE UPPERCASE DIGITS ",-"
static void check_node_name_chars(struct check *c, struct dt_info *dti,
struct node *node)
@@ -246,16 +246,27 @@ static void check_node_name_chars(struct check *c, struct dt_info *dti,
int n = strspn(node->name, c->data);
if (n < strlen(node->name))
- FAIL(c, "Bad character '%c' in node %s",
+ FAIL(c, dti, "Bad character '%c' in node %s",
node->name[n], node->fullpath);
}
ERROR(node_name_chars, check_node_name_chars, PROPNODECHARS "@");
+static void check_node_name_chars_strict(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ int n = strspn(node->name, c->data);
+
+ if (n < node->basenamelen)
+ FAIL(c, dti, "Character '%c' not recommended in node %s",
+ node->name[n], node->fullpath);
+}
+CHECK(node_name_chars_strict, check_node_name_chars_strict, PROPNODECHARSSTRICT);
+
static void check_node_name_format(struct check *c, struct dt_info *dti,
struct node *node)
{
if (strchr(get_unitname(node), '@'))
- FAIL(c, "Node %s has multiple '@' characters in name",
+ FAIL(c, dti, "Node %s has multiple '@' characters in name",
node->fullpath);
}
ERROR(node_name_format, check_node_name_format, NULL, &node_name_chars);
@@ -274,11 +285,11 @@ static void check_unit_address_vs_reg(struct check *c, struct dt_info *dti,
if (prop) {
if (!unitname[0])
- FAIL(c, "Node %s has a reg or ranges property, but no unit name",
+ FAIL(c, dti, "Node %s has a reg or ranges property, but no unit name",
node->fullpath);
} else {
if (unitname[0])
- FAIL(c, "Node %s has a unit name, but no reg property",
+ FAIL(c, dti, "Node %s has a unit name, but no reg property",
node->fullpath);
}
}
@@ -293,12 +304,44 @@ static void check_property_name_chars(struct check *c, struct dt_info *dti,
int n = strspn(prop->name, c->data);
if (n < strlen(prop->name))
- FAIL(c, "Bad character '%c' in property name \"%s\", node %s",
+ FAIL(c, dti, "Bad character '%c' in property name \"%s\", node %s",
prop->name[n], prop->name, node->fullpath);
}
}
ERROR(property_name_chars, check_property_name_chars, PROPNODECHARS);
+static void check_property_name_chars_strict(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct property *prop;
+
+ for_each_property(node, prop) {
+ const char *name = prop->name;
+ int n = strspn(name, c->data);
+
+ if (n == strlen(prop->name))
+ continue;
+
+ /* Certain names are whitelisted */
+ if (streq(name, "device_type"))
+ continue;
+
+ /*
+ * # is only allowed at the beginning of property names not counting
+ * the vendor prefix.
+ */
+ if (name[n] == '#' && ((n == 0) || (name[n-1] == ','))) {
+ name += n + 1;
+ n = strspn(name, c->data);
+ }
+ if (n < strlen(name))
+ FAIL(c, dti, "Character '%c' not recommended in property name \"%s\", node %s",
+ name[n], prop->name, node->fullpath);
+ }
+}
+CHECK(property_name_chars_strict, check_property_name_chars_strict, PROPNODECHARSSTRICT);
+
#define DESCLABEL_FMT "%s%s%s%s%s"
#define DESCLABEL_ARGS(node,prop,mark) \
((mark) ? "value of " : ""), \
@@ -327,7 +370,7 @@ static void check_duplicate_label(struct check *c, struct dt_info *dti,
return;
if ((othernode != node) || (otherprop != prop) || (othermark != mark))
- FAIL(c, "Duplicate label '%s' on " DESCLABEL_FMT
+ FAIL(c, dti, "Duplicate label '%s' on " DESCLABEL_FMT
" and " DESCLABEL_FMT,
label, DESCLABEL_ARGS(node, prop, mark),
DESCLABEL_ARGS(othernode, otherprop, othermark));
@@ -367,7 +410,7 @@ static cell_t check_phandle_prop(struct check *c, struct dt_info *dti,
return 0;
if (prop->val.len != sizeof(cell_t)) {
- FAIL(c, "%s has bad length (%d) %s property",
+ FAIL(c, dti, "%s has bad length (%d) %s property",
node->fullpath, prop->val.len, prop->name);
return 0;
}
@@ -379,7 +422,7 @@ static cell_t check_phandle_prop(struct check *c, struct dt_info *dti,
/* "Set this node's phandle equal to some
* other node's phandle". That's nonsensical
* by construction. */ {
- FAIL(c, "%s in %s is a reference to another node",
+ FAIL(c, dti, "%s in %s is a reference to another node",
prop->name, node->fullpath);
}
/* But setting this node's phandle equal to its own
@@ -393,7 +436,7 @@ static cell_t check_phandle_prop(struct check *c, struct dt_info *dti,
phandle = propval_cell(prop);
if ((phandle == 0) || (phandle == -1)) {
- FAIL(c, "%s has bad value (0x%x) in %s property",
+ FAIL(c, dti, "%s has bad value (0x%x) in %s property",
node->fullpath, phandle, prop->name);
return 0;
}
@@ -420,7 +463,7 @@ static void check_explicit_phandles(struct check *c, struct dt_info *dti,
return;
if (linux_phandle && phandle && (phandle != linux_phandle))
- FAIL(c, "%s has mismatching 'phandle' and 'linux,phandle'"
+ FAIL(c, dti, "%s has mismatching 'phandle' and 'linux,phandle'"
" properties", node->fullpath);
if (linux_phandle && !phandle)
@@ -428,7 +471,7 @@ static void check_explicit_phandles(struct check *c, struct dt_info *dti,
other = get_node_by_phandle(root, phandle);
if (other && (other != node)) {
- FAIL(c, "%s has duplicated phandle 0x%x (seen before at %s)",
+ FAIL(c, dti, "%s has duplicated phandle 0x%x (seen before at %s)",
node->fullpath, phandle, other->fullpath);
return;
}
@@ -453,7 +496,7 @@ static void check_name_properties(struct check *c, struct dt_info *dti,
if ((prop->val.len != node->basenamelen+1)
|| (memcmp(prop->val.val, node->name, node->basenamelen) != 0)) {
- FAIL(c, "\"name\" property in %s is incorrect (\"%s\" instead"
+ FAIL(c, dti, "\"name\" property in %s is incorrect (\"%s\" instead"
" of base node name)", node->fullpath, prop->val.val);
} else {
/* The name property is correct, and therefore redundant.
@@ -488,16 +531,16 @@ static void fixup_phandle_references(struct check *c, struct dt_info *dti,
refnode = get_node_by_ref(dt, m->ref);
if (! refnode) {
if (!(dti->dtsflags & DTSF_PLUGIN))
- FAIL(c, "Reference to non-existent node or "
+ FAIL(c, dti, "Reference to non-existent node or "
"label \"%s\"\n", m->ref);
else /* mark the entry as unresolved */
- *((cell_t *)(prop->val.val + m->offset)) =
+ *((fdt32_t *)(prop->val.val + m->offset)) =
cpu_to_fdt32(0xffffffff);
continue;
}
phandle = get_node_phandle(dt, refnode);
- *((cell_t *)(prop->val.val + m->offset)) = cpu_to_fdt32(phandle);
+ *((fdt32_t *)(prop->val.val + m->offset)) = cpu_to_fdt32(phandle);
}
}
}
@@ -520,7 +563,7 @@ static void fixup_path_references(struct check *c, struct dt_info *dti,
refnode = get_node_by_ref(dt, m->ref);
if (!refnode) {
- FAIL(c, "Reference to non-existent node or label \"%s\"\n",
+ FAIL(c, dti, "Reference to non-existent node or label \"%s\"\n",
m->ref);
continue;
}
@@ -579,19 +622,19 @@ static void check_reg_format(struct check *c, struct dt_info *dti,
return; /* No "reg", that's fine */
if (!node->parent) {
- FAIL(c, "Root node has a \"reg\" property");
+ FAIL(c, dti, "Root node has a \"reg\" property");
return;
}
if (prop->val.len == 0)
- FAIL(c, "\"reg\" property in %s is empty", node->fullpath);
+ FAIL(c, dti, "\"reg\" property in %s is empty", node->fullpath);
addr_cells = node_addr_cells(node->parent);
size_cells = node_size_cells(node->parent);
entrylen = (addr_cells + size_cells) * sizeof(cell_t);
if (!entrylen || (prop->val.len % entrylen) != 0)
- FAIL(c, "\"reg\" property in %s has invalid length (%d bytes) "
+ FAIL(c, dti, "\"reg\" property in %s has invalid length (%d bytes) "
"(#address-cells == %d, #size-cells == %d)",
node->fullpath, prop->val.len, addr_cells, size_cells);
}
@@ -608,7 +651,7 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
return;
if (!node->parent) {
- FAIL(c, "Root node has a \"ranges\" property");
+ FAIL(c, dti, "Root node has a \"ranges\" property");
return;
}
@@ -620,17 +663,17 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
if (prop->val.len == 0) {
if (p_addr_cells != c_addr_cells)
- FAIL(c, "%s has empty \"ranges\" property but its "
+ FAIL(c, dti, "%s has empty \"ranges\" property but its "
"#address-cells (%d) differs from %s (%d)",
node->fullpath, c_addr_cells, node->parent->fullpath,
p_addr_cells);
if (p_size_cells != c_size_cells)
- FAIL(c, "%s has empty \"ranges\" property but its "
+ FAIL(c, dti, "%s has empty \"ranges\" property but its "
"#size-cells (%d) differs from %s (%d)",
node->fullpath, c_size_cells, node->parent->fullpath,
p_size_cells);
} else if ((prop->val.len % entrylen) != 0) {
- FAIL(c, "\"ranges\" property in %s has invalid length (%d bytes) "
+ FAIL(c, dti, "\"ranges\" property in %s has invalid length (%d bytes) "
"(parent #address-cells == %d, child #address-cells == %d, "
"#size-cells == %d)", node->fullpath, prop->val.len,
p_addr_cells, c_addr_cells, c_size_cells);
@@ -638,6 +681,229 @@ static void check_ranges_format(struct check *c, struct dt_info *dti,
}
WARNING(ranges_format, check_ranges_format, NULL, &addr_size_cells);
+static const struct bus_type pci_bus = {
+ .name = "PCI",
+};
+
+static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ cell_t *cells;
+
+ prop = get_property(node, "device_type");
+ if (!prop || !streq(prop->val.val, "pci"))
+ return;
+
+ node->bus = &pci_bus;
+
+ if (!strneq(node->name, "pci", node->basenamelen) &&
+ !strneq(node->name, "pcie", node->basenamelen))
+ FAIL(c, dti, "Node %s node name is not \"pci\" or \"pcie\"",
+ node->fullpath);
+
+ prop = get_property(node, "ranges");
+ if (!prop)
+ FAIL(c, dti, "Node %s missing ranges for PCI bridge (or not a bridge)",
+ node->fullpath);
+
+ if (node_addr_cells(node) != 3)
+ FAIL(c, dti, "Node %s incorrect #address-cells for PCI bridge",
+ node->fullpath);
+ if (node_size_cells(node) != 2)
+ FAIL(c, dti, "Node %s incorrect #size-cells for PCI bridge",
+ node->fullpath);
+
+ prop = get_property(node, "bus-range");
+ if (!prop) {
+ FAIL(c, dti, "Node %s missing bus-range for PCI bridge",
+ node->fullpath);
+ return;
+ }
+ if (prop->val.len != (sizeof(cell_t) * 2)) {
+ FAIL(c, dti, "Node %s bus-range must be 2 cells",
+ node->fullpath);
+ return;
+ }
+ cells = (cell_t *)prop->val.val;
+ if (fdt32_to_cpu(cells[0]) > fdt32_to_cpu(cells[1]))
+ FAIL(c, dti, "Node %s bus-range 1st cell must be less than or equal to 2nd cell",
+ node->fullpath);
+ if (fdt32_to_cpu(cells[1]) > 0xff)
+ FAIL(c, dti, "Node %s bus-range maximum bus number must be less than 256",
+ node->fullpath);
+}
+WARNING(pci_bridge, check_pci_bridge, NULL,
+ &device_type_is_string, &addr_size_cells);
+
+static void check_pci_device_bus_num(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ unsigned int bus_num, min_bus, max_bus;
+ cell_t *cells;
+
+ if (!node->parent || (node->parent->bus != &pci_bus))
+ return;
+
+ prop = get_property(node, "reg");
+ if (!prop)
+ return;
+
+ cells = (cell_t *)prop->val.val;
+ bus_num = (fdt32_to_cpu(cells[0]) & 0x00ff0000) >> 16;
+
+ prop = get_property(node->parent, "bus-range");
+ if (!prop) {
+ min_bus = max_bus = 0;
+ } else {
+ cells = (cell_t *)prop->val.val;
+ min_bus = fdt32_to_cpu(cells[0]);
+ max_bus = fdt32_to_cpu(cells[0]);
+ }
+ if ((bus_num < min_bus) || (bus_num > max_bus))
+ FAIL(c, dti, "Node %s PCI bus number %d out of range, expected (%d - %d)",
+ node->fullpath, bus_num, min_bus, max_bus);
+}
+WARNING(pci_device_bus_num, check_pci_device_bus_num, NULL, &reg_format, &pci_bridge);
+
+static void check_pci_device_reg(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ const char *unitname = get_unitname(node);
+ char unit_addr[5];
+ unsigned int dev, func, reg;
+ cell_t *cells;
+
+ if (!node->parent || (node->parent->bus != &pci_bus))
+ return;
+
+ prop = get_property(node, "reg");
+ if (!prop) {
+ FAIL(c, dti, "Node %s missing PCI reg property", node->fullpath);
+ return;
+ }
+
+ cells = (cell_t *)prop->val.val;
+ if (cells[1] || cells[2])
+ FAIL(c, dti, "Node %s PCI reg config space address cells 2 and 3 must be 0",
+ node->fullpath);
+
+ reg = fdt32_to_cpu(cells[0]);
+ dev = (reg & 0xf800) >> 11;
+ func = (reg & 0x700) >> 8;
+
+ if (reg & 0xff000000)
+ FAIL(c, dti, "Node %s PCI reg address is not configuration space",
+ node->fullpath);
+ if (reg & 0x000000ff)
+ FAIL(c, dti, "Node %s PCI reg config space address register number must be 0",
+ node->fullpath);
+
+ if (func == 0) {
+ snprintf(unit_addr, sizeof(unit_addr), "%x", dev);
+ if (streq(unitname, unit_addr))
+ return;
+ }
+
+ snprintf(unit_addr, sizeof(unit_addr), "%x,%x", dev, func);
+ if (streq(unitname, unit_addr))
+ return;
+
+ FAIL(c, dti, "Node %s PCI unit address format error, expected \"%s\"",
+ node->fullpath, unit_addr);
+}
+WARNING(pci_device_reg, check_pci_device_reg, NULL, &reg_format, &pci_bridge);
+
+static const struct bus_type simple_bus = {
+ .name = "simple-bus",
+};
+
+static bool node_is_compatible(struct node *node, const char *compat)
+{
+ struct property *prop;
+ const char *str, *end;
+
+ prop = get_property(node, "compatible");
+ if (!prop)
+ return false;
+
+ for (str = prop->val.val, end = str + prop->val.len; str < end;
+ str += strnlen(str, end - str) + 1) {
+ if (strneq(str, compat, end - str))
+ return true;
+ }
+ return false;
+}
+
+static void check_simple_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
+{
+ if (node_is_compatible(node, "simple-bus"))
+ node->bus = &simple_bus;
+}
+WARNING(simple_bus_bridge, check_simple_bus_bridge, NULL, &addr_size_cells);
+
+static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ const char *unitname = get_unitname(node);
+ char unit_addr[17];
+ unsigned int size;
+ uint64_t reg = 0;
+ cell_t *cells = NULL;
+
+ if (!node->parent || (node->parent->bus != &simple_bus))
+ return;
+
+ prop = get_property(node, "reg");
+ if (prop)
+ cells = (cell_t *)prop->val.val;
+ else {
+ prop = get_property(node, "ranges");
+ if (prop && prop->val.len)
+ /* skip of child address */
+ cells = ((cell_t *)prop->val.val) + node_addr_cells(node);
+ }
+
+ if (!cells) {
+ if (node->parent->parent && !(node->bus == &simple_bus))
+ FAIL(c, dti, "Node %s missing or empty reg/ranges property", node->fullpath);
+ return;
+ }
+
+ size = node_addr_cells(node->parent);
+ while (size--)
+ reg = (reg << 32) | fdt32_to_cpu(*(cells++));
+
+ snprintf(unit_addr, sizeof(unit_addr), "%lx", reg);
+ if (!streq(unitname, unit_addr))
+ FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"",
+ node->fullpath, unit_addr);
+}
+WARNING(simple_bus_reg, check_simple_bus_reg, NULL, &reg_format, &simple_bus_bridge);
+
+static void check_unit_address_format(struct check *c, struct dt_info *dti,
+ struct node *node)
+{
+ const char *unitname = get_unitname(node);
+
+ if (node->parent && node->parent->bus)
+ return;
+
+ if (!unitname[0])
+ return;
+
+ if (!strncmp(unitname, "0x", 2)) {
+ FAIL(c, dti, "Node %s unit name should not have leading \"0x\"",
+ node->fullpath);
+ /* skip over 0x for next test */
+ unitname += 2;
+ }
+ if (unitname[0] == '0' && isxdigit(unitname[1]))
+ FAIL(c, dti, "Node %s unit name should not have leading 0s",
+ node->fullpath);
+}
+WARNING(unit_address_format, check_unit_address_format, NULL,
+ &node_name_format, &pci_bridge, &simple_bus_bridge);
+
/*
* Style checks
*/
@@ -656,11 +922,11 @@ static void check_avoid_default_addr_size(struct check *c, struct dt_info *dti,
return;
if (node->parent->addr_cells == -1)
- FAIL(c, "Relying on default #address-cells value for %s",
+ FAIL(c, dti, "Relying on default #address-cells value for %s",
node->fullpath);
if (node->parent->size_cells == -1)
- FAIL(c, "Relying on default #size-cells value for %s",
+ FAIL(c, dti, "Relying on default #size-cells value for %s",
node->fullpath);
}
WARNING(avoid_default_addr_size, check_avoid_default_addr_size, NULL,
@@ -684,7 +950,7 @@ static void check_obsolete_chosen_interrupt_controller(struct check *c,
prop = get_property(chosen, "interrupt-controller");
if (prop)
- FAIL(c, "/chosen has obsolete \"interrupt-controller\" "
+ FAIL(c, dti, "/chosen has obsolete \"interrupt-controller\" "
"property");
}
WARNING(obsolete_chosen_interrupt_controller,
@@ -703,9 +969,20 @@ static struct check *check_table[] = {
&address_cells_is_cell, &size_cells_is_cell, &interrupt_cells_is_cell,
&device_type_is_string, &model_is_string, &status_is_string,
+ &property_name_chars_strict,
+ &node_name_chars_strict,
+
&addr_size_cells, &reg_format, &ranges_format,
&unit_address_vs_reg,
+ &unit_address_format,
+
+ &pci_bridge,
+ &pci_device_reg,
+ &pci_device_bus_num,
+
+ &simple_bus_bridge,
+ &simple_bus_reg,
&avoid_default_addr_size,
&obsolete_chosen_interrupt_controller,
diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c
index 8cae23746882..aa37a16c8891 100644
--- a/scripts/dtc/data.c
+++ b/scripts/dtc/data.c
@@ -171,9 +171,9 @@ struct data data_merge(struct data d1, struct data d2)
struct data data_append_integer(struct data d, uint64_t value, int bits)
{
uint8_t value_8;
- uint16_t value_16;
- uint32_t value_32;
- uint64_t value_64;
+ fdt16_t value_16;
+ fdt32_t value_32;
+ fdt64_t value_64;
switch (bits) {
case 8:
@@ -197,14 +197,14 @@ struct data data_append_integer(struct data d, uint64_t value, int bits)
}
}
-struct data data_append_re(struct data d, const struct fdt_reserve_entry *re)
+struct data data_append_re(struct data d, uint64_t address, uint64_t size)
{
- struct fdt_reserve_entry bere;
+ struct fdt_reserve_entry re;
- bere.address = cpu_to_fdt64(re->address);
- bere.size = cpu_to_fdt64(re->size);
+ re.address = cpu_to_fdt64(address);
+ re.size = cpu_to_fdt64(size);
- return data_append_data(d, &bere, sizeof(bere));
+ return data_append_data(d, &re, sizeof(re));
}
struct data data_append_cell(struct data d, cell_t word)
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index c600603044f3..fd825ebba69c 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -62,7 +62,8 @@ static int dts_version = 1;
static void push_input_file(const char *filename);
static bool pop_input_file(void);
-static void lexical_error(const char *fmt, ...);
+static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
+
%}
%%
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index 2c862bc86ad0..64c243772398 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -655,8 +655,9 @@ static int dts_version = 1;
static void push_input_file(const char *filename);
static bool pop_input_file(void);
-static void lexical_error(const char *fmt, ...);
-#line 660 "dtc-lexer.lex.c"
+static void PRINTF(1, 2) lexical_error(const char *fmt, ...);
+
+#line 661 "dtc-lexer.lex.c"
#define INITIAL 0
#define BYTESTRING 1
@@ -878,9 +879,9 @@ YY_DECL
}
{
-#line 68 "dtc-lexer.l"
+#line 69 "dtc-lexer.l"
-#line 884 "dtc-lexer.lex.c"
+#line 885 "dtc-lexer.lex.c"
while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */
{
@@ -937,7 +938,7 @@ do_action: /* This label is used only to access EOF actions. */
case 1:
/* rule 1 can match eol */
YY_RULE_SETUP
-#line 69 "dtc-lexer.l"
+#line 70 "dtc-lexer.l"
{
char *name = strchr(yytext, '\"') + 1;
yytext[yyleng-1] = '\0';
@@ -947,7 +948,7 @@ YY_RULE_SETUP
case 2:
/* rule 2 can match eol */
YY_RULE_SETUP
-#line 75 "dtc-lexer.l"
+#line 76 "dtc-lexer.l"
{
char *line, *fnstart, *fnend;
struct data fn;
@@ -981,7 +982,7 @@ case YY_STATE_EOF(INITIAL):
case YY_STATE_EOF(BYTESTRING):
case YY_STATE_EOF(PROPNODENAME):
case YY_STATE_EOF(V1):
-#line 104 "dtc-lexer.l"
+#line 105 "dtc-lexer.l"
{
if (!pop_input_file()) {
yyterminate();
@@ -991,7 +992,7 @@ case YY_STATE_EOF(V1):
case 3:
/* rule 3 can match eol */
YY_RULE_SETUP
-#line 110 "dtc-lexer.l"
+#line 111 "dtc-lexer.l"
{
DPRINT("String: %s\n", yytext);
yylval.data = data_copy_escape_string(yytext+1,
@@ -1001,7 +1002,7 @@ YY_RULE_SETUP
YY_BREAK
case 4:
YY_RULE_SETUP
-#line 117 "dtc-lexer.l"
+#line 118 "dtc-lexer.l"
{
DPRINT("Keyword: /dts-v1/\n");
dts_version = 1;
@@ -1011,7 +1012,7 @@ YY_RULE_SETUP
YY_BREAK
case 5:
YY_RULE_SETUP
-#line 124 "dtc-lexer.l"
+#line 125 "dtc-lexer.l"
{
DPRINT("Keyword: /plugin/\n");
return DT_PLUGIN;
@@ -1019,7 +1020,7 @@ YY_RULE_SETUP
YY_BREAK
case 6:
YY_RULE_SETUP
-#line 129 "dtc-lexer.l"
+#line 130 "dtc-lexer.l"
{
DPRINT("Keyword: /memreserve/\n");
BEGIN_DEFAULT();
@@ -1028,7 +1029,7 @@ YY_RULE_SETUP
YY_BREAK
case 7:
YY_RULE_SETUP
-#line 135 "dtc-lexer.l"
+#line 136 "dtc-lexer.l"
{
DPRINT("Keyword: /bits/\n");
BEGIN_DEFAULT();
@@ -1037,7 +1038,7 @@ YY_RULE_SETUP
YY_BREAK
case 8:
YY_RULE_SETUP
-#line 141 "dtc-lexer.l"
+#line 142 "dtc-lexer.l"
{
DPRINT("Keyword: /delete-property/\n");
DPRINT("<PROPNODENAME>\n");
@@ -1047,7 +1048,7 @@ YY_RULE_SETUP
YY_BREAK
case 9:
YY_RULE_SETUP
-#line 148 "dtc-lexer.l"
+#line 149 "dtc-lexer.l"
{
DPRINT("Keyword: /delete-node/\n");
DPRINT("<PROPNODENAME>\n");
@@ -1057,7 +1058,7 @@ YY_RULE_SETUP
YY_BREAK
case 10:
YY_RULE_SETUP
-#line 155 "dtc-lexer.l"
+#line 156 "dtc-lexer.l"
{
DPRINT("Label: %s\n", yytext);
yylval.labelref = xstrdup(yytext);
@@ -1067,7 +1068,7 @@ YY_RULE_SETUP
YY_BREAK
case 11:
YY_RULE_SETUP
-#line 162 "dtc-lexer.l"
+#line 163 "dtc-lexer.l"
{
char *e;
DPRINT("Integer Literal: '%s'\n", yytext);
@@ -1093,7 +1094,7 @@ YY_RULE_SETUP
case 12:
/* rule 12 can match eol */
YY_RULE_SETUP
-#line 184 "dtc-lexer.l"
+#line 185 "dtc-lexer.l"
{
struct data d;
DPRINT("Character literal: %s\n", yytext);
@@ -1117,7 +1118,7 @@ YY_RULE_SETUP
YY_BREAK
case 13:
YY_RULE_SETUP
-#line 205 "dtc-lexer.l"
+#line 206 "dtc-lexer.l"
{ /* label reference */
DPRINT("Ref: %s\n", yytext+1);
yylval.labelref = xstrdup(yytext+1);
@@ -1126,7 +1127,7 @@ YY_RULE_SETUP
YY_BREAK
case 14:
YY_RULE_SETUP
-#line 211 "dtc-lexer.l"
+#line 212 "dtc-lexer.l"
{ /* new-style path reference */
yytext[yyleng-1] = '\0';
DPRINT("Ref: %s\n", yytext+2);
@@ -1136,7 +1137,7 @@ YY_RULE_SETUP
YY_BREAK
case 15:
YY_RULE_SETUP
-#line 218 "dtc-lexer.l"
+#line 219 "dtc-lexer.l"
{
yylval.byte = strtol(yytext, NULL, 16);
DPRINT("Byte: %02x\n", (int)yylval.byte);
@@ -1145,7 +1146,7 @@ YY_RULE_SETUP
YY_BREAK
case 16:
YY_RULE_SETUP
-#line 224 "dtc-lexer.l"
+#line 225 "dtc-lexer.l"
{
DPRINT("/BYTESTRING\n");
BEGIN_DEFAULT();
@@ -1154,7 +1155,7 @@ YY_RULE_SETUP
YY_BREAK
case 17:
YY_RULE_SETUP
-#line 230 "dtc-lexer.l"
+#line 231 "dtc-lexer.l"
{
DPRINT("PropNodeName: %s\n", yytext);
yylval.propnodename = xstrdup((yytext[0] == '\\') ?
@@ -1165,7 +1166,7 @@ YY_RULE_SETUP
YY_BREAK
case 18:
YY_RULE_SETUP
-#line 238 "dtc-lexer.l"
+#line 239 "dtc-lexer.l"
{
DPRINT("Binary Include\n");
return DT_INCBIN;
@@ -1174,64 +1175,64 @@ YY_RULE_SETUP
case 19:
/* rule 19 can match eol */
YY_RULE_SETUP
-#line 243 "dtc-lexer.l"
+#line 244 "dtc-lexer.l"
/* eat whitespace */
YY_BREAK
case 20:
/* rule 20 can match eol */
YY_RULE_SETUP
-#line 244 "dtc-lexer.l"
+#line 245 "dtc-lexer.l"
/* eat C-style comments */
YY_BREAK
case 21:
/* rule 21 can match eol */
YY_RULE_SETUP
-#line 245 "dtc-lexer.l"
+#line 246 "dtc-lexer.l"
/* eat C++-style comments */
YY_BREAK
case 22:
YY_RULE_SETUP
-#line 247 "dtc-lexer.l"
+#line 248 "dtc-lexer.l"
{ return DT_LSHIFT; };
YY_BREAK
case 23:
YY_RULE_SETUP
-#line 248 "dtc-lexer.l"
+#line 249 "dtc-lexer.l"
{ return DT_RSHIFT; };
YY_BREAK
case 24:
YY_RULE_SETUP
-#line 249 "dtc-lexer.l"
+#line 250 "dtc-lexer.l"
{ return DT_LE; };
YY_BREAK
case 25:
YY_RULE_SETUP
-#line 250 "dtc-lexer.l"
+#line 251 "dtc-lexer.l"
{ return DT_GE; };
YY_BREAK
case 26:
YY_RULE_SETUP
-#line 251 "dtc-lexer.l"
+#line 252 "dtc-lexer.l"
{ return DT_EQ; };
YY_BREAK
case 27:
YY_RULE_SETUP
-#line 252 "dtc-lexer.l"
+#line 253 "dtc-lexer.l"
{ return DT_NE; };
YY_BREAK
case 28:
YY_RULE_SETUP
-#line 253 "dtc-lexer.l"
+#line 254 "dtc-lexer.l"
{ return DT_AND; };
YY_BREAK
case 29:
YY_RULE_SETUP
-#line 254 "dtc-lexer.l"
+#line 255 "dtc-lexer.l"
{ return DT_OR; };
YY_BREAK
case 30:
YY_RULE_SETUP
-#line 256 "dtc-lexer.l"
+#line 257 "dtc-lexer.l"
{
DPRINT("Char: %c (\\x%02x)\n", yytext[0],
(unsigned)yytext[0]);
@@ -1249,10 +1250,10 @@ YY_RULE_SETUP
YY_BREAK
case 31:
YY_RULE_SETUP
-#line 271 "dtc-lexer.l"
+#line 272 "dtc-lexer.l"
ECHO;
YY_BREAK
-#line 1256 "dtc-lexer.lex.c"
+#line 1257 "dtc-lexer.lex.c"
case YY_END_OF_BUFFER:
{
@@ -2218,7 +2219,7 @@ void yyfree (void * ptr )
#define YYTABLES_NAME "yytables"
-#line 271 "dtc-lexer.l"
+#line 272 "dtc-lexer.l"
diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped
index 2965227a1b4a..0a7a5ed86f04 100644
--- a/scripts/dtc/dtc-parser.tab.c_shipped
+++ b/scripts/dtc/dtc-parser.tab.c_shipped
@@ -1557,10 +1557,10 @@ yyreduce:
{
struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref));
- add_label(&target->labels, (yyvsp[-2].labelref));
- if (target)
+ if (target) {
+ add_label(&target->labels, (yyvsp[-2].labelref));
merge_nodes(target, (yyvsp[0].node));
- else
+ } else
ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
(yyval.node) = (yyvsp[-3].node);
}
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index b2fd4d155839..ca3f5003427c 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -171,10 +171,10 @@ devicetree:
{
struct node *target = get_node_by_ref($1, $3);
- add_label(&target->labels, $2);
- if (target)
+ if (target) {
+ add_label(&target->labels, $2);
merge_nodes(target, $4);
- else
+ } else
ERROR(&@3, "Label or path %s not found", $3);
$$ = $1;
}
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index a4edf4c7aebf..f5eed9d72c02 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -138,7 +138,7 @@ static const char *guess_type_by_name(const char *fname, const char *fallback)
static const char *guess_input_format(const char *fname, const char *fallback)
{
struct stat statbuf;
- uint32_t magic;
+ fdt32_t magic;
FILE *f;
if (stat(fname, &statbuf) != 0)
@@ -159,8 +159,7 @@ static const char *guess_input_format(const char *fname, const char *fallback)
}
fclose(f);
- magic = fdt32_to_cpu(magic);
- if (magic == FDT_MAGIC)
+ if (fdt32_to_cpu(magic) == FDT_MAGIC)
return "dtb";
return guess_type_by_name(fname, fallback);
@@ -216,7 +215,7 @@ int main(int argc, char *argv[])
alignsize = strtol(optarg, NULL, 0);
if (!is_power_of_2(alignsize))
die("Invalid argument \"%d\" to -a option\n",
- optarg);
+ alignsize);
break;
case 'f':
force = true;
@@ -309,6 +308,8 @@ int main(int argc, char *argv[])
else
die("Unknown input format \"%s\"\n", inform);
+ dti->outname = outname;
+
if (depfile) {
fputc('\n', depfile);
fclose(depfile);
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index c6f125c68ba8..fc24e17510fd 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -43,7 +43,6 @@
#define debug(...)
#endif
-
#define DEFAULT_FDT_VERSION 17
/*
@@ -114,7 +113,7 @@ struct data data_insert_at_marker(struct data d, struct marker *m,
struct data data_merge(struct data d1, struct data d2);
struct data data_append_cell(struct data d, cell_t word);
struct data data_append_integer(struct data d, uint64_t word, int bits);
-struct data data_append_re(struct data d, const struct fdt_reserve_entry *re);
+struct data data_append_re(struct data d, uint64_t address, uint64_t size);
struct data data_append_addr(struct data d, uint64_t addr);
struct data data_append_byte(struct data d, uint8_t byte);
struct data data_append_zeroes(struct data d, int len);
@@ -136,6 +135,10 @@ struct label {
struct label *next;
};
+struct bus_type {
+ const char *name;
+};
+
struct property {
bool deleted;
char *name;
@@ -162,6 +165,7 @@ struct node {
int addr_cells, size_cells;
struct label *labels;
+ const struct bus_type *bus;
};
#define for_each_label_withdel(l0, l) \
@@ -227,7 +231,7 @@ uint32_t guess_boot_cpuid(struct node *tree);
/* Boot info (tree plus memreserve information */
struct reserve_info {
- struct fdt_reserve_entry re;
+ uint64_t address, size;
struct reserve_info *next;
@@ -246,6 +250,7 @@ struct dt_info {
struct reserve_info *reservelist;
uint32_t boot_cpuid_phys;
struct node *dt; /* the device tree */
+ const char *outname; /* filename being written to, "-" for stdout */
};
/* DTS version flags definitions */
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index ebac548b3fa8..fcf71541d8a7 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -49,7 +49,7 @@ static struct version_info {
struct emitter {
void (*cell)(void *, cell_t);
- void (*string)(void *, char *, int);
+ void (*string)(void *, const char *, int);
void (*align)(void *, int);
void (*data)(void *, struct data);
void (*beginnode)(void *, struct label *labels);
@@ -64,7 +64,7 @@ static void bin_emit_cell(void *e, cell_t val)
*dtbuf = data_append_cell(*dtbuf, val);
}
-static void bin_emit_string(void *e, char *str, int len)
+static void bin_emit_string(void *e, const char *str, int len)
{
struct data *dtbuf = e;
@@ -144,22 +144,14 @@ static void asm_emit_cell(void *e, cell_t val)
(val >> 8) & 0xff, val & 0xff);
}
-static void asm_emit_string(void *e, char *str, int len)
+static void asm_emit_string(void *e, const char *str, int len)
{
FILE *f = e;
- char c = 0;
- if (len != 0) {
- /* XXX: ewww */
- c = str[len];
- str[len] = '\0';
- }
-
- fprintf(f, "\t.string\t\"%s\"\n", str);
-
- if (len != 0) {
- str[len] = c;
- }
+ if (len != 0)
+ fprintf(f, "\t.string\t\"%.*s\"\n", len, str);
+ else
+ fprintf(f, "\t.string\t\"%s\"\n", str);
}
static void asm_emit_align(void *e, int a)
@@ -179,7 +171,7 @@ static void asm_emit_data(void *e, struct data d)
emit_offset_label(f, m->ref, m->offset);
while ((d.len - off) >= sizeof(uint32_t)) {
- asm_emit_cell(e, fdt32_to_cpu(*((uint32_t *)(d.val+off))));
+ asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
off += sizeof(uint32_t);
}
@@ -318,17 +310,16 @@ static struct data flatten_reserve_list(struct reserve_info *reservelist,
{
struct reserve_info *re;
struct data d = empty_data;
- static struct fdt_reserve_entry null_re = {0,0};
int j;
for (re = reservelist; re; re = re->next) {
- d = data_append_re(d, &re->re);
+ d = data_append_re(d, re->address, re->size);
}
/*
* Add additional reserved slots if the user asked for them.
*/
for (j = 0; j < reservenum; j++) {
- d = data_append_re(d, &null_re);
+ d = data_append_re(d, 0, 0);
}
return d;
@@ -544,11 +535,11 @@ void dt_to_asm(FILE *f, struct dt_info *dti, int version)
fprintf(f, "\t.globl\t%s\n", l->label);
fprintf(f, "%s:\n", l->label);
}
- ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.address >> 32));
+ ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->address >> 32));
ASM_EMIT_BELONG(f, "0x%08x",
- (unsigned int)(re->re.address & 0xffffffff));
- ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.size >> 32));
- ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.size & 0xffffffff));
+ (unsigned int)(re->address & 0xffffffff));
+ ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->size >> 32));
+ ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->size & 0xffffffff));
}
for (i = 0; i < reservenum; i++) {
fprintf(f, "\t.long\t0, 0\n\t.long\t0, 0\n");
@@ -609,7 +600,7 @@ static void flat_read_chunk(struct inbuf *inb, void *p, int len)
static uint32_t flat_read_word(struct inbuf *inb)
{
- uint32_t val;
+ fdt32_t val;
assert(((inb->ptr - inb->base) % sizeof(val)) == 0);
@@ -718,13 +709,15 @@ static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
* First pass, count entries.
*/
while (1) {
+ uint64_t address, size;
+
flat_read_chunk(inb, &re, sizeof(re));
- re.address = fdt64_to_cpu(re.address);
- re.size = fdt64_to_cpu(re.size);
- if (re.size == 0)
+ address = fdt64_to_cpu(re.address);
+ size = fdt64_to_cpu(re.size);
+ if (size == 0)
break;
- new = build_reserve_entry(re.address, re.size);
+ new = build_reserve_entry(address, size);
reservelist = add_reserve_entry(reservelist, new);
}
@@ -817,6 +810,7 @@ static struct node *unflatten_tree(struct inbuf *dtbuf,
struct dt_info *dt_from_blob(const char *fname)
{
FILE *f;
+ fdt32_t magic_buf, totalsize_buf;
uint32_t magic, totalsize, version, size_dt, boot_cpuid_phys;
uint32_t off_dt, off_str, off_mem_rsvmap;
int rc;
@@ -833,7 +827,7 @@ struct dt_info *dt_from_blob(const char *fname)
f = srcfile_relative_open(fname, NULL);
- rc = fread(&magic, sizeof(magic), 1, f);
+ rc = fread(&magic_buf, sizeof(magic_buf), 1, f);
if (ferror(f))
die("Error reading DT blob magic number: %s\n",
strerror(errno));
@@ -844,11 +838,11 @@ struct dt_info *dt_from_blob(const char *fname)
die("Mysterious short read reading magic number\n");
}
- magic = fdt32_to_cpu(magic);
+ magic = fdt32_to_cpu(magic_buf);
if (magic != FDT_MAGIC)
die("Blob has incorrect magic number\n");
- rc = fread(&totalsize, sizeof(totalsize), 1, f);
+ rc = fread(&totalsize_buf, sizeof(totalsize_buf), 1, f);
if (ferror(f))
die("Error reading DT blob size: %s\n", strerror(errno));
if (rc < 1) {
@@ -858,7 +852,7 @@ struct dt_info *dt_from_blob(const char *fname)
die("Mysterious short read reading blob size\n");
}
- totalsize = fdt32_to_cpu(totalsize);
+ totalsize = fdt32_to_cpu(totalsize_buf);
if (totalsize < FDT_V1_SIZE)
die("DT blob size (%d) is too small\n", totalsize);
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 2eed4f58387c..3fd5847377c9 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -283,7 +283,8 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
if (err)
return err;
- memcpy(prop->data, val, len);
+ if (len)
+ memcpy(prop->data, val, len);
return 0;
}
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index b842b156fa17..ba86caa73d01 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -143,7 +143,9 @@
/* Low-level functions (you probably don't need these) */
/**********************************************************************/
+#ifndef SWIG /* This function is not useful in Python */
const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen);
+#endif
static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
{
return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen);
@@ -210,7 +212,6 @@ int fdt_next_subnode(const void *fdt, int offset);
/**********************************************************************/
/* General functions */
/**********************************************************************/
-
#define fdt_get_header(fdt, field) \
(fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
#define fdt_magic(fdt) (fdt_get_header(fdt, magic))
@@ -354,8 +355,10 @@ int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size);
* useful for finding subnodes based on a portion of a larger string,
* such as a full path.
*/
+#ifndef SWIG /* Not available in Python */
int fdt_subnode_offset_namelen(const void *fdt, int parentoffset,
const char *name, int namelen);
+#endif
/**
* fdt_subnode_offset - find a subnode of a given node
* @fdt: pointer to the device tree blob
@@ -391,7 +394,9 @@ int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name);
* Identical to fdt_path_offset(), but only consider the first namelen
* characters of path as the path name.
*/
+#ifndef SWIG /* Not available in Python */
int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen);
+#endif
/**
* fdt_path_offset - find a tree node by its full path
@@ -550,10 +555,12 @@ const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
* Identical to fdt_get_property(), but only examine the first namelen
* characters of name for matching the property name.
*/
+#ifndef SWIG /* Not available in Python */
const struct fdt_property *fdt_get_property_namelen(const void *fdt,
int nodeoffset,
const char *name,
int namelen, int *lenp);
+#endif
/**
* fdt_get_property - find a given property in a given node
@@ -624,8 +631,10 @@ static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset,
* -FDT_ERR_BADSTRUCTURE,
* -FDT_ERR_TRUNCATED, standard meanings
*/
+#ifndef SWIG /* This function is not useful in Python */
const void *fdt_getprop_by_offset(const void *fdt, int offset,
const char **namep, int *lenp);
+#endif
/**
* fdt_getprop_namelen - get property value based on substring
@@ -638,6 +647,7 @@ const void *fdt_getprop_by_offset(const void *fdt, int offset,
* Identical to fdt_getprop(), but only examine the first namelen
* characters of name for matching the property name.
*/
+#ifndef SWIG /* Not available in Python */
const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
const char *name, int namelen, int *lenp);
static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset,
@@ -647,6 +657,7 @@ static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset,
return (void *)(uintptr_t)fdt_getprop_namelen(fdt, nodeoffset, name,
namelen, lenp);
}
+#endif
/**
* fdt_getprop - retrieve the value of a given property
@@ -707,8 +718,10 @@ uint32_t fdt_get_phandle(const void *fdt, int nodeoffset);
* Identical to fdt_get_alias(), but only examine the first namelen
* characters of name for matching the alias name.
*/
+#ifndef SWIG /* Not available in Python */
const char *fdt_get_alias_namelen(const void *fdt,
const char *name, int namelen);
+#endif
/**
* fdt_get_alias - retrieve the path referenced by a given alias
@@ -1106,10 +1119,12 @@ int fdt_size_cells(const void *fdt, int nodeoffset);
* of the name. It is useful when you want to manipulate only one value of
* an array and you have a string that doesn't end with \0.
*/
+#ifndef SWIG /* Not available in Python */
int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset,
const char *name, int namelen,
uint32_t idx, const void *val,
int len);
+#endif
/**
* fdt_setprop_inplace - change a property's value, but not its size
@@ -1139,8 +1154,10 @@ int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset,
* -FDT_ERR_BADSTRUCTURE,
* -FDT_ERR_TRUNCATED, standard meanings
*/
+#ifndef SWIG /* Not available in Python */
int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
const void *val, int len);
+#endif
/**
* fdt_setprop_inplace_u32 - change the value of a 32-bit integer property
@@ -1527,6 +1544,36 @@ static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name,
#define fdt_setprop_string(fdt, nodeoffset, name, str) \
fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1)
+
+/**
+ * fdt_setprop_empty - set a property to an empty value
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ *
+ * fdt_setprop_empty() sets the value of the named property in the
+ * given node to an empty (zero length) value, or creates a new empty
+ * property if it does not already exist.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+#define fdt_setprop_empty(fdt, nodeoffset, name) \
+ fdt_setprop((fdt), (nodeoffset), (name), NULL, 0)
+
/**
* fdt_appendprop - append to or create a property
* @fdt: pointer to the device tree blob
@@ -1704,8 +1751,10 @@ int fdt_delprop(void *fdt, int nodeoffset, const char *name);
* creating subnodes based on a portion of a larger string, such as a
* full path.
*/
+#ifndef SWIG /* Not available in Python */
int fdt_add_subnode_namelen(void *fdt, int parentoffset,
const char *name, int namelen);
+#endif
/**
* fdt_add_subnode - creates a new node
diff --git a/scripts/dtc/libfdt/libfdt_env.h b/scripts/dtc/libfdt/libfdt_env.h
index 99f936dacc60..952056cddf09 100644
--- a/scripts/dtc/libfdt/libfdt_env.h
+++ b/scripts/dtc/libfdt/libfdt_env.h
@@ -58,16 +58,16 @@
#include <string.h>
#ifdef __CHECKER__
-#define __force __attribute__((force))
-#define __bitwise __attribute__((bitwise))
+#define FDT_FORCE __attribute__((force))
+#define FDT_BITWISE __attribute__((bitwise))
#else
-#define __force
-#define __bitwise
+#define FDT_FORCE
+#define FDT_BITWISE
#endif
-typedef uint16_t __bitwise fdt16_t;
-typedef uint32_t __bitwise fdt32_t;
-typedef uint64_t __bitwise fdt64_t;
+typedef uint16_t FDT_BITWISE fdt16_t;
+typedef uint32_t FDT_BITWISE fdt32_t;
+typedef uint64_t FDT_BITWISE fdt64_t;
#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n])
#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1))
@@ -80,29 +80,29 @@ typedef uint64_t __bitwise fdt64_t;
static inline uint16_t fdt16_to_cpu(fdt16_t x)
{
- return (__force uint16_t)CPU_TO_FDT16(x);
+ return (FDT_FORCE uint16_t)CPU_TO_FDT16(x);
}
static inline fdt16_t cpu_to_fdt16(uint16_t x)
{
- return (__force fdt16_t)CPU_TO_FDT16(x);
+ return (FDT_FORCE fdt16_t)CPU_TO_FDT16(x);
}
static inline uint32_t fdt32_to_cpu(fdt32_t x)
{
- return (__force uint32_t)CPU_TO_FDT32(x);
+ return (FDT_FORCE uint32_t)CPU_TO_FDT32(x);
}
static inline fdt32_t cpu_to_fdt32(uint32_t x)
{
- return (__force fdt32_t)CPU_TO_FDT32(x);
+ return (FDT_FORCE fdt32_t)CPU_TO_FDT32(x);
}
static inline uint64_t fdt64_to_cpu(fdt64_t x)
{
- return (__force uint64_t)CPU_TO_FDT64(x);
+ return (FDT_FORCE uint64_t)CPU_TO_FDT64(x);
}
static inline fdt64_t cpu_to_fdt64(uint64_t x)
{
- return (__force fdt64_t)CPU_TO_FDT64(x);
+ return (FDT_FORCE fdt64_t)CPU_TO_FDT64(x);
}
#undef CPU_TO_FDT64
#undef CPU_TO_FDT32
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index afa2f67b142a..3673de07e4e5 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -242,7 +242,7 @@ void delete_property_by_name(struct node *node, char *name)
struct property *prop = node->proplist;
while (prop) {
- if (!strcmp(prop->name, name)) {
+ if (streq(prop->name, name)) {
delete_property(prop);
return;
}
@@ -275,7 +275,7 @@ void delete_node_by_name(struct node *parent, char *name)
struct node *node = parent->children;
while (node) {
- if (!strcmp(node->name, name)) {
+ if (streq(node->name, name)) {
delete_node(node);
return;
}
@@ -319,8 +319,8 @@ struct reserve_info *build_reserve_entry(uint64_t address, uint64_t size)
memset(new, 0, sizeof(*new));
- new->re.address = address;
- new->re.size = size;
+ new->address = address;
+ new->size = size;
return new;
}
@@ -393,7 +393,7 @@ struct property *get_property(struct node *node, const char *propname)
cell_t propval_cell(struct property *prop)
{
assert(prop->val.len == sizeof(cell_t));
- return fdt32_to_cpu(*((cell_t *)prop->val.val));
+ return fdt32_to_cpu(*((fdt32_t *)prop->val.val));
}
struct property *get_property_by_label(struct node *tree, const char *label,
@@ -599,13 +599,13 @@ static int cmp_reserve_info(const void *ax, const void *bx)
a = *((const struct reserve_info * const *)ax);
b = *((const struct reserve_info * const *)bx);
- if (a->re.address < b->re.address)
+ if (a->address < b->address)
return -1;
- else if (a->re.address > b->re.address)
+ else if (a->address > b->address)
return 1;
- else if (a->re.size < b->re.size)
+ else if (a->size < b->size)
return -1;
- else if (a->re.size > b->re.size)
+ else if (a->size > b->size)
return 1;
else
return 0;
@@ -847,6 +847,8 @@ static void add_fixup_entry(struct dt_info *dti, struct node *fn,
xasprintf(&entry, "%s:%s:%u",
node->fullpath, prop->name, m->offset);
append_to_property(fn, m->ref, entry, strlen(entry) + 1);
+
+ free(entry);
}
static void generate_fixups_tree_internal(struct dt_info *dti,
@@ -900,7 +902,7 @@ static void add_local_fixup_entry(struct dt_info *dti,
struct node *refnode)
{
struct node *wn, *nwn; /* local fixup node, walk node, new */
- uint32_t value_32;
+ fdt32_t value_32;
char **compp;
int i, depth;
diff --git a/scripts/dtc/srcpos.c b/scripts/dtc/srcpos.c
index aa3aad04cec4..9d38459902f3 100644
--- a/scripts/dtc/srcpos.c
+++ b/scripts/dtc/srcpos.c
@@ -252,7 +252,7 @@ srcpos_string(struct srcpos *pos)
const char *fname = "<no-file>";
char *pos_str;
- if (pos)
+ if (pos->file && pos->file->name)
fname = pos->file->name;
diff --git a/scripts/dtc/srcpos.h b/scripts/dtc/srcpos.h
index 2cdfcd82e95e..7caca8257c6d 100644
--- a/scripts/dtc/srcpos.h
+++ b/scripts/dtc/srcpos.h
@@ -22,6 +22,7 @@
#include <stdio.h>
#include <stdbool.h>
+#include "util.h"
struct srcfile_state {
FILE *f;
@@ -106,12 +107,10 @@ extern void srcpos_update(struct srcpos *pos, const char *text, int len);
extern struct srcpos *srcpos_copy(struct srcpos *pos);
extern char *srcpos_string(struct srcpos *pos);
-extern void srcpos_verror(struct srcpos *pos, const char *prefix,
- const char *fmt, va_list va)
- __attribute__((format(printf, 3, 0)));
-extern void srcpos_error(struct srcpos *pos, const char *prefix,
- const char *fmt, ...)
- __attribute__((format(printf, 3, 4)));
+extern void PRINTF(3, 0) srcpos_verror(struct srcpos *pos, const char *prefix,
+ const char *fmt, va_list va);
+extern void PRINTF(3, 4) srcpos_error(struct srcpos *pos, const char *prefix,
+ const char *fmt, ...);
extern void srcpos_set_line(char *f, int l);
diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c
index c9d8967969f9..2461a3d068a0 100644
--- a/scripts/dtc/treesource.c
+++ b/scripts/dtc/treesource.c
@@ -137,7 +137,7 @@ static void write_propval_string(FILE *f, struct data val)
static void write_propval_cells(FILE *f, struct data val)
{
void *propend = val.val + val.len;
- cell_t *cp = (cell_t *)val.val;
+ fdt32_t *cp = (fdt32_t *)val.val;
struct marker *m = val.markers;
fprintf(f, "<");
@@ -275,8 +275,8 @@ void dt_to_source(FILE *f, struct dt_info *dti)
for_each_label(re->labels, l)
fprintf(f, "%s: ", l->label);
fprintf(f, "/memreserve/\t0x%016llx 0x%016llx;\n",
- (unsigned long long)re->re.address,
- (unsigned long long)re->re.size);
+ (unsigned long long)re->address,
+ (unsigned long long)re->size);
}
write_tree_source_node(f, dti->dt, 0);
diff --git a/scripts/dtc/update-dtc-source.sh b/scripts/dtc/update-dtc-source.sh
index f5cde799db03..b8ebcc6722d2 100755
--- a/scripts/dtc/update-dtc-source.sh
+++ b/scripts/dtc/update-dtc-source.sh
@@ -36,10 +36,19 @@ DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c
DTC_GENERATED="dtc-lexer.lex.c dtc-parser.tab.c dtc-parser.tab.h"
LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_empty_tree.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h"
+get_last_dtc_version() {
+ git log --oneline scripts/dtc/ | grep 'upstream' | head -1 | sed -e 's/^.* \(.*\)/\1/'
+}
+
+last_dtc_ver=$(get_last_dtc_version)
+
# Build DTC
cd $DTC_UPSTREAM_PATH
make clean
make check
+dtc_version=$(git describe HEAD)
+dtc_log=$(git log --oneline ${last_dtc_ver}..)
+
# Copy the files into the Linux tree
cd $DTC_LINUX_PATH
@@ -60,4 +69,13 @@ sed -i -- 's/#include <libfdt_env.h>/#include "libfdt_env.h"/g' ./libfdt/libfdt.
sed -i -- 's/#include <fdt.h>/#include "fdt.h"/g' ./libfdt/libfdt.h
git add ./libfdt/libfdt.h
-git commit -e -v -m "scripts/dtc: Update to upstream version [CHANGEME]"
+commit_msg=$(cat << EOF
+scripts/dtc: Update to upstream version ${dtc_version}
+
+This adds the following commits from upstream:
+
+${dtc_log}
+EOF
+)
+
+git commit -e -v -s -m "${commit_msg}"
diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c
index 3550f86bd6df..9953c32a0244 100644
--- a/scripts/dtc/util.c
+++ b/scripts/dtc/util.c
@@ -396,7 +396,7 @@ void utilfdt_print_data(const char *data, int len)
} while (s < data + len);
} else if ((len % 4) == 0) {
- const uint32_t *cell = (const uint32_t *)data;
+ const fdt32_t *cell = (const fdt32_t *)data;
printf(" = <");
for (i = 0, len /= 4; i < len; i++)
@@ -412,15 +412,16 @@ void utilfdt_print_data(const char *data, int len)
}
}
-void util_version(void)
+void NORETURN util_version(void)
{
printf("Version: %s\n", DTC_VERSION);
exit(0);
}
-void util_usage(const char *errmsg, const char *synopsis,
- const char *short_opts, struct option const long_opts[],
- const char * const opts_help[])
+void NORETURN util_usage(const char *errmsg, const char *synopsis,
+ const char *short_opts,
+ struct option const long_opts[],
+ const char * const opts_help[])
{
FILE *fp = errmsg ? stderr : stdout;
const char a_arg[] = "<arg>";
diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h
index f5c4f1b50d30..ad5f41199edb 100644
--- a/scripts/dtc/util.h
+++ b/scripts/dtc/util.h
@@ -25,9 +25,17 @@
* USA
*/
+#ifdef __GNUC__
+#define PRINTF(i, j) __attribute__((format (printf, i, j)))
+#define NORETURN __attribute__((noreturn))
+#else
+#define PRINTF(i, j)
+#define NORETURN
+#endif
+
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-static inline void __attribute__((noreturn)) die(const char *str, ...)
+static inline void NORETURN PRINTF(1, 2) die(const char *str, ...)
{
va_list ap;
@@ -53,13 +61,14 @@ static inline void *xrealloc(void *p, size_t len)
void *new = realloc(p, len);
if (!new)
- die("realloc() failed (len=%d)\n", len);
+ die("realloc() failed (len=%zd)\n", len);
return new;
}
extern char *xstrdup(const char *s);
-extern int xasprintf(char **strp, const char *fmt, ...);
+
+extern int PRINTF(2, 3) xasprintf(char **strp, const char *fmt, ...);
extern char *join_path(const char *path, const char *name);
/**
@@ -188,7 +197,7 @@ void utilfdt_print_data(const char *data, int len);
/**
* Show source version and exit
*/
-void util_version(void) __attribute__((noreturn));
+void NORETURN util_version(void);
/**
* Show usage and exit
@@ -202,9 +211,10 @@ void util_version(void) __attribute__((noreturn));
* @param long_opts The structure of long options
* @param opts_help An array of help strings (should align with long_opts)
*/
-void util_usage(const char *errmsg, const char *synopsis,
- const char *short_opts, struct option const long_opts[],
- const char * const opts_help[]) __attribute__((noreturn));
+void NORETURN util_usage(const char *errmsg, const char *synopsis,
+ const char *short_opts,
+ struct option const long_opts[],
+ const char * const opts_help[]);
/**
* Show usage and exit
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index 16c2e53a85e3..1229e07b4912 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.2-g0931cea3"
+#define DTC_VERSION "DTC 1.4.4-g756ffc4f"
diff --git a/scripts/module-common.lds b/scripts/module-common.lds
index 9b6e246a45d0..d61b9e8678e8 100644
--- a/scripts/module-common.lds
+++ b/scripts/module-common.lds
@@ -20,8 +20,7 @@ SECTIONS {
__kcrctab_unused_gpl 0 : { *(SORT(___kcrctab_unused_gpl+*)) }
__kcrctab_gpl_future 0 : { *(SORT(___kcrctab_gpl_future+*)) }
- . = ALIGN(8);
- .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
+ .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) }
__jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
}
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index b67e74b22826..eb38f49d4b75 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -179,6 +179,7 @@ bakup||backup
baloon||balloon
baloons||balloons
bandwith||bandwidth
+banlance||balance
batery||battery
beacuse||because
becasue||because
@@ -375,6 +376,8 @@ dictionnary||dictionary
didnt||didn't
diferent||different
differrence||difference
+diffrent||different
+diffrentiate||differentiate
difinition||definition
diplay||display
direectly||directly
@@ -605,6 +608,9 @@ interruptted||interrupted
interupted||interrupted
interupt||interrupt
intial||initial
+intialisation||initialisation
+intialised||initialised
+intialise||initialise
intialization||initialization
intialized||initialized
intialize||initialize
@@ -691,6 +697,7 @@ miximum||maximum
mmnemonic||mnemonic
mnay||many
modulues||modules
+momery||memory
monochorome||monochrome
monochromo||monochrome
monocrome||monochrome
@@ -890,6 +897,7 @@ registerd||registered
registeresd||registered
registes||registers
registraration||registration
+regsiter||register
regster||register
regualar||regular
reguator||regulator
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 430b201f3e25..b51de8a7e2a3 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -1,4 +1,4 @@
-#!/bin/awk -f
+#!/usr/bin/awk -f
# Before running this script please ensure that your PATH is
# typical as you use for compilation/installation. I use
# /bin /sbin /usr/bin /usr/sbin /usr/local/bin, but it may
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 41073f70eb41..4f6ac9dbc65d 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -98,7 +98,7 @@ static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
return ERR_PTR(-ESPIPE);
/* freed by caller to simple_write_to_buffer */
- data = kvmalloc(sizeof(*data) + alloc_size);
+ data = kvmalloc(sizeof(*data) + alloc_size, GFP_KERNEL);
if (data == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&data->count);
@@ -1357,7 +1357,7 @@ static int aa_mk_null_file(struct dentry *parent)
inode->i_ino = get_next_ino();
inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
MKDEV(MEM_MAJOR, 3));
d_instantiate(dentry, inode);
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 0291ff3902f9..550a700563b4 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -64,17 +64,6 @@ char *aa_split_fqname(char *args, char **ns_name);
const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
size_t *ns_len);
void aa_info_message(const char *str);
-void *__aa_kvmalloc(size_t size, gfp_t flags);
-
-static inline void *kvmalloc(size_t size)
-{
- return __aa_kvmalloc(size, 0);
-}
-
-static inline void *kvzalloc(size_t size)
-{
- return __aa_kvmalloc(size, __GFP_ZERO);
-}
/**
* aa_strneq - compare null terminated @str to a non null terminated substring
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 32cafc12593e..7cd788a9445b 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -129,36 +129,6 @@ void aa_info_message(const char *str)
}
/**
- * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
- * @size: how many bytes of memory are required
- * @flags: the type of memory to allocate (see kmalloc).
- *
- * Return: allocated buffer or NULL if failed
- *
- * It is possible that policy being loaded from the user is larger than
- * what can be allocated by kmalloc, in those cases fall back to vmalloc.
- */
-void *__aa_kvmalloc(size_t size, gfp_t flags)
-{
- void *buffer = NULL;
-
- if (size == 0)
- return NULL;
-
- /* do not attempt kmalloc if we need more than 16 pages at once */
- if (size <= (16*PAGE_SIZE))
- buffer = kmalloc(size, flags | GFP_KERNEL | __GFP_NORETRY |
- __GFP_NOWARN);
- if (!buffer) {
- if (flags & __GFP_ZERO)
- buffer = vzalloc(size);
- else
- buffer = vmalloc(size);
- }
- return buffer;
-}
-
-/**
* aa_policy_init - initialize a policy structure
* @policy: policy to initialize (NOT NULL)
* @prefix: prefix name if any is required. (MAYBE NULL)
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index eb0efef746f5..960c913381e2 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -88,7 +88,7 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
if (bsize < tsize)
goto out;
- table = kvzalloc(tsize);
+ table = kvzalloc(tsize, GFP_KERNEL);
if (table) {
table->td_id = th.td_id;
table->td_flags = th.td_flags;
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 2e37c9c26bbd..f3422a91353c 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -487,7 +487,7 @@ fail:
static void *kvmemdup(const void *src, size_t len)
{
- void *p = kvmalloc(len);
+ void *p = kvmalloc(len, GFP_KERNEL);
if (p)
memcpy(p, src, len);
diff --git a/security/inode.c b/security/inode.c
index 2cb14162ff8d..eccd58ef2ae8 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -28,7 +28,7 @@ static int mount_count;
static int fill_super(struct super_block *sb, void *data, int silent)
{
- static struct tree_descr files[] = {{""}};
+ static const struct tree_descr files[] = {{""}};
return simple_fill_super(sb, SECURITYFS_MAGIC, files);
}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 82a9e1851108..447a7d5cee0f 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -101,14 +101,9 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
if (_payload) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
- if (!payload) {
- if (plen <= PAGE_SIZE)
- goto error2;
- payload = vmalloc(plen);
- if (!payload)
- goto error2;
- }
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
@@ -1071,14 +1066,9 @@ long keyctl_instantiate_key_common(key_serial_t id,
if (from) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
- if (!payload) {
- if (plen <= PAGE_SIZE)
- goto error;
- payload = vmalloc(plen);
- if (!payload)
- goto error;
- }
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
ret = -EFAULT;
if (!copy_from_iter_full(payload, plen, from))
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index ce7171884223..50062e70140d 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1496,7 +1496,7 @@ static const struct file_operations sel_avc_cache_stats_ops = {
static int sel_make_avc_files(struct dentry *dir)
{
int i;
- static struct tree_descr files[] = {
+ static const struct tree_descr files[] = {
{ "cache_threshold",
&sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR },
{ "hash_stats", &sel_avc_hash_stats_ops, S_IRUGO },
@@ -1805,7 +1805,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
struct inode_security_struct *isec;
- static struct tree_descr selinux_files[] = {
+ static const struct tree_descr selinux_files[] = {
[SEL_LOAD] = {"load", &sel_load_ops, S_IRUSR|S_IWUSR},
[SEL_ENFORCE] = {"enforce", &sel_enforce_ops, S_IRUGO|S_IWUSR},
[SEL_CONTEXT] = {"context", &transaction_ops, S_IRUGO|S_IWUGO},
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 366b8356f75b..f6482e53d55a 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2855,7 +2855,7 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
int rc;
struct inode *root_inode;
- static struct tree_descr smack_files[] = {
+ static const struct tree_descr smack_files[] = {
[SMK_LOAD] = {
"load", &smk_load_ops, S_IRUGO|S_IWUSR},
[SMK_CIPSO] = {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b786fbab029f..1770f085c2a6 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,7 +53,7 @@
#ifdef CONFIG_X86
/* for snoop control */
#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <asm/cpufeature.h>
#endif
#include <sound/core.h>
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 9720a30dbfff..6d17b171c17b 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -40,7 +40,9 @@
#include <sound/initval.h>
/* for 440MX workaround */
#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 525f2f397b4c..aae099c0e502 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -936,7 +936,7 @@ static struct snd_soc_component *soc_find_component(
*
* @dlc: name of the DAI and optional component info to match
*
- * This function will search all regsitered components and their DAIs to
+ * This function will search all registered components and their DAIs to
* find the DAI of the same name. The component's of_node and name
* should also match if being specified.
*
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index c505b019e09c..664b7fe206d6 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -30,7 +30,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
-#include <asm/cacheflush.h>
+#include <asm/set_memory.h>
#include <sound/core.h>
#include <sound/asoundef.h>
#include <sound/pcm.h>
diff --git a/tools/Makefile b/tools/Makefile
index 00caacd3ed92..c8a90d01dd8e 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -86,10 +86,13 @@ tmon: FORCE
freefall: FORCE
$(call descend,laptop/$@)
+kvm_stat: FORCE
+ $(call descend,kvm/$@)
+
all: acpi cgroup cpupower gpio hv firewire lguest \
perf selftests turbostat usb \
virtio vm net x86_energy_perf_policy \
- tmon freefall objtool
+ tmon freefall objtool kvm_stat
acpi_install:
$(call descend,power/$(@:_install=),install)
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index a2ffec4139ad..7f4fd65e9208 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -131,7 +131,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kmo[16]; /* with MSA4 */
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
- __u8 reserved[1824];
+ __u8 kma[16]; /* with MSA8 */
+ __u8 reserved[1808];
};
/* kvm attributes for crypto */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index d09a9cd021b1..b1c0b187acfe 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -922,6 +922,7 @@ enum perf_callchain_context {
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
@@ -933,6 +934,21 @@ union perf_mem_data_src {
mem_rsvd:31;
};
};
+#elif defined(__BIG_ENDIAN_BITFIELD)
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_rsvd:31,
+ mem_dtlb:7, /* tlb access */
+ mem_lock:2, /* lock instr */
+ mem_snoop:5, /* snoop mode */
+ mem_lvl:14, /* memory hierarchy level */
+ mem_op:5; /* type of opcode */
+ };
+};
+#else
+#error "Unknown endianness"
+#endif
/* type of opcode (load/store/prefetch,code) */
#define PERF_MEM_OP_NA 0x01 /* not available */
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 581278c58488..8f74ed8e7237 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -30,8 +30,8 @@ import fcntl
import resource
import struct
import re
+import subprocess
from collections import defaultdict
-from time import sleep
VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
@@ -225,6 +225,7 @@ IOCTL_NUMBERS = {
'RESET': 0x00002403,
}
+
class Arch(object):
"""Encapsulates global architecture specific data.
@@ -255,12 +256,14 @@ class Arch(object):
return ArchX86(SVM_EXIT_REASONS)
return
+
class ArchX86(Arch):
def __init__(self, exit_reasons):
self.sc_perf_evt_open = 298
self.ioctl_numbers = IOCTL_NUMBERS
self.exit_reasons = exit_reasons
+
class ArchPPC(Arch):
def __init__(self):
self.sc_perf_evt_open = 319
@@ -275,12 +278,14 @@ class ArchPPC(Arch):
self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
self.exit_reasons = {}
+
class ArchA64(Arch):
def __init__(self):
self.sc_perf_evt_open = 241
self.ioctl_numbers = IOCTL_NUMBERS
self.exit_reasons = AARCH64_EXIT_REASONS
+
class ArchS390(Arch):
def __init__(self):
self.sc_perf_evt_open = 331
@@ -316,6 +321,61 @@ def parse_int_list(list_string):
return integers
+def get_pid_from_gname(gname):
+ """Fuzzy function to convert guest name to QEMU process pid.
+
+ Returns a list of potential pids, can be empty if no match found.
+ Throws an exception on processing errors.
+
+ """
+ pids = []
+ try:
+ child = subprocess.Popen(['ps', '-A', '--format', 'pid,args'],
+ stdout=subprocess.PIPE)
+ except:
+ raise Exception
+ for line in child.stdout:
+ line = line.lstrip().split(' ', 1)
+ # perform a sanity check before calling the more expensive
+ # function to possibly extract the guest name
+ if ' -name ' in line[1] and gname == get_gname_from_pid(line[0]):
+ pids.append(int(line[0]))
+ child.stdout.close()
+
+ return pids
+
+
+def get_gname_from_pid(pid):
+ """Returns the guest name for a QEMU process pid.
+
+ Extracts the guest name from the QEMU comma line by processing the '-name'
+ option. Will also handle names specified out of sequence.
+
+ """
+ name = ''
+ try:
+ line = open('/proc/{}/cmdline'.format(pid), 'rb').read().split('\0')
+ parms = line[line.index('-name') + 1].split(',')
+ while '' in parms:
+ # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results in
+ # ['foo', '', 'bar'], which we revert here
+ idx = parms.index('')
+ parms[idx - 1] += ',' + parms[idx + 1]
+ del parms[idx:idx+2]
+ # the '-name' switch allows for two ways to specify the guest name,
+ # where the plain name overrides the name specified via 'guest='
+ for arg in parms:
+ if '=' not in arg:
+ name = arg
+ break
+ if arg[:6] == 'guest=':
+ name = arg[6:]
+ except (ValueError, IOError, IndexError):
+ pass
+
+ return name
+
+
def get_online_cpus():
"""Returns a list of cpu id integers."""
with open('/sys/devices/system/cpu/online') as cpu_list:
@@ -342,6 +402,7 @@ def get_filters():
libc = ctypes.CDLL('libc.so.6', use_errno=True)
syscall = libc.syscall
+
class perf_event_attr(ctypes.Structure):
"""Struct that holds the necessary data to set up a trace event.
@@ -370,6 +431,7 @@ class perf_event_attr(ctypes.Structure):
self.size = ctypes.sizeof(self)
self.read_format = PERF_FORMAT_GROUP
+
def perf_event_open(attr, pid, cpu, group_fd, flags):
"""Wrapper for the sys_perf_evt_open() syscall.
@@ -395,6 +457,7 @@ PERF_FORMAT_GROUP = 1 << 3
PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
+
class Group(object):
"""Represents a perf event group."""
@@ -427,6 +490,7 @@ class Group(object):
struct.unpack(read_format,
os.read(self.events[0].fd, length))))
+
class Event(object):
"""Represents a performance event and manages its life cycle."""
def __init__(self, name, group, trace_cpu, trace_pid, trace_point,
@@ -510,6 +574,7 @@ class Event(object):
"""Resets the count of the trace event in the kernel."""
fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0)
+
class TracepointProvider(object):
"""Data provider for the stats class.
@@ -551,6 +616,7 @@ class TracepointProvider(object):
def setup_traces(self):
"""Creates all event and group objects needed to be able to retrieve
data."""
+ fields = self.get_available_fields()
if self._pid > 0:
# Fetch list of all threads of the monitored pid, as qemu
# starts a thread for each vcpu.
@@ -561,7 +627,7 @@ class TracepointProvider(object):
# The constant is needed as a buffer for python libs, std
# streams and other files that the script opens.
- newlim = len(groupids) * len(self._fields) + 50
+ newlim = len(groupids) * len(fields) + 50
try:
softlim_, hardlim = resource.getrlimit(resource.RLIMIT_NOFILE)
@@ -577,7 +643,7 @@ class TracepointProvider(object):
for groupid in groupids:
group = Group()
- for name in self._fields:
+ for name in fields:
tracepoint = name
tracefilter = None
match = re.match(r'(.*)\((.*)\)', name)
@@ -650,13 +716,23 @@ class TracepointProvider(object):
ret[name] += val
return ret
+ def reset(self):
+ """Reset all field counters"""
+ for group in self.group_leaders:
+ for event in group.events:
+ event.reset()
+
+
class DebugfsProvider(object):
"""Provides data from the files that KVM creates in the kvm debugfs
folder."""
def __init__(self):
self._fields = self.get_available_fields()
+ self._baseline = {}
self._pid = 0
self.do_read = True
+ self.paths = []
+ self.reset()
def get_available_fields(self):
""""Returns a list of available fields.
@@ -673,6 +749,7 @@ class DebugfsProvider(object):
@fields.setter
def fields(self, fields):
self._fields = fields
+ self.reset()
@property
def pid(self):
@@ -690,10 +767,11 @@ class DebugfsProvider(object):
self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
else:
- self.paths = ['']
+ self.paths = []
self.do_read = True
+ self.reset()
- def read(self):
+ def read(self, reset=0):
"""Returns a dict with format:'file name / field -> current value'."""
results = {}
@@ -701,10 +779,22 @@ class DebugfsProvider(object):
if not self.do_read:
return results
- for path in self.paths:
+ paths = self.paths
+ if self._pid == 0:
+ paths = []
+ for entry in os.walk(PATH_DEBUGFS_KVM):
+ for dir in entry[1]:
+ paths.append(dir)
+ for path in paths:
for field in self._fields:
- results[field] = results.get(field, 0) \
- + self.read_field(field, path)
+ value = self.read_field(field, path)
+ key = path + field
+ if reset:
+ self._baseline[key] = value
+ if self._baseline.get(key, -1) == -1:
+ self._baseline[key] = value
+ results[field] = (results.get(field, 0) + value -
+ self._baseline.get(key, 0))
return results
@@ -718,6 +808,12 @@ class DebugfsProvider(object):
except IOError:
return 0
+ def reset(self):
+ """Reset field counters"""
+ self._baseline = {}
+ self.read(1)
+
+
class Stats(object):
"""Manages the data providers and the data they provide.
@@ -753,14 +849,20 @@ class Stats(object):
for provider in self.providers:
provider.pid = self._pid_filter
+ def reset(self):
+ self.values = {}
+ for provider in self.providers:
+ provider.reset()
+
@property
def fields_filter(self):
return self._fields_filter
@fields_filter.setter
def fields_filter(self, fields_filter):
- self._fields_filter = fields_filter
- self.update_provider_filters()
+ if fields_filter != self._fields_filter:
+ self._fields_filter = fields_filter
+ self.update_provider_filters()
@property
def pid_filter(self):
@@ -768,9 +870,10 @@ class Stats(object):
@pid_filter.setter
def pid_filter(self, pid):
- self._pid_filter = pid
- self.values = {}
- self.update_provider_pid()
+ if pid != self._pid_filter:
+ self._pid_filter = pid
+ self.values = {}
+ self.update_provider_pid()
def get(self):
"""Returns a dict with field -> (value, delta to last value) of all
@@ -778,23 +881,26 @@ class Stats(object):
for provider in self.providers:
new = provider.read()
for key in provider.fields:
- oldval = self.values.get(key, (0, 0))
+ oldval = self.values.get(key, (0, 0))[0]
newval = new.get(key, 0)
- newdelta = None
- if oldval is not None:
- newdelta = newval - oldval[0]
+ newdelta = newval - oldval
self.values[key] = (newval, newdelta)
return self.values
LABEL_WIDTH = 40
NUMBER_WIDTH = 10
+DELAY_INITIAL = 0.25
+DELAY_REGULAR = 3.0
+MAX_GUEST_NAME_LEN = 48
+MAX_REGEX_LEN = 44
+DEFAULT_REGEX = r'^[^\(]*$'
+
class Tui(object):
"""Instruments curses to draw a nice text ui."""
def __init__(self, stats):
self.stats = stats
self.screen = None
- self.drilldown = False
self.update_drilldown()
def __enter__(self):
@@ -809,7 +915,14 @@ class Tui(object):
# return from C start_color() is ignorable.
try:
curses.start_color()
- except:
+ except curses.error:
+ pass
+
+ # Hide cursor in extra statement as some monochrome terminals
+ # might support hiding but not colors.
+ try:
+ curses.curs_set(0)
+ except curses.error:
pass
curses.use_default_colors()
@@ -827,36 +940,60 @@ class Tui(object):
def update_drilldown(self):
"""Sets or removes a filter that only allows fields without braces."""
if not self.stats.fields_filter:
- self.stats.fields_filter = r'^[^\(]*$'
+ self.stats.fields_filter = DEFAULT_REGEX
- elif self.stats.fields_filter == r'^[^\(]*$':
+ elif self.stats.fields_filter == DEFAULT_REGEX:
self.stats.fields_filter = None
def update_pid(self, pid):
"""Propagates pid selection to stats object."""
self.stats.pid_filter = pid
- def refresh(self, sleeptime):
- """Refreshes on-screen data."""
+ def refresh_header(self, pid=None):
+ """Refreshes the header."""
+ if pid is None:
+ pid = self.stats.pid_filter
self.screen.erase()
- if self.stats.pid_filter > 0:
- self.screen.addstr(0, 0, 'kvm statistics - pid {0}'
- .format(self.stats.pid_filter),
- curses.A_BOLD)
+ gname = get_gname_from_pid(pid)
+ if gname:
+ gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
+ if len(gname) > MAX_GUEST_NAME_LEN
+ else gname))
+ if pid > 0:
+ self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
+ .format(pid, gname), curses.A_BOLD)
else:
self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+ if self.stats.fields_filter and self.stats.fields_filter \
+ != DEFAULT_REGEX:
+ regex = self.stats.fields_filter
+ if len(regex) > MAX_REGEX_LEN:
+ regex = regex[:MAX_REGEX_LEN] + '...'
+ self.screen.addstr(1, 17, 'regex filter: {0}'.format(regex))
self.screen.addstr(2, 1, 'Event')
self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH -
len('Total'), 'Total')
- self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 8 -
+ self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 7 -
+ len('%Total'), '%Total')
+ self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 7 + 8 -
len('Current'), 'Current')
+ self.screen.addstr(4, 1, 'Collecting data...')
+ self.screen.refresh()
+
+ def refresh_body(self, sleeptime):
row = 3
+ self.screen.move(row, 0)
+ self.screen.clrtobot()
stats = self.stats.get()
+
def sortkey(x):
if stats[x][1]:
return (-stats[x][1], -stats[x][0])
else:
return (0, -stats[x][0])
+ total = 0.
+ for val in stats.values():
+ total += val[0]
for key in sorted(stats.keys(), key=sortkey):
if row >= self.screen.getmaxyx()[0]:
@@ -869,6 +1006,8 @@ class Tui(object):
col += LABEL_WIDTH
self.screen.addstr(row, col, '%10d' % (values[0],))
col += NUMBER_WIDTH
+ self.screen.addstr(row, col, '%7.1f' % (values[0] * 100 / total,))
+ col += 7
if values[1] is not None:
self.screen.addstr(row, col, '%8d' % (values[1] / sleeptime,))
row += 1
@@ -893,20 +1032,24 @@ class Tui(object):
regex = self.screen.getstr()
curses.noecho()
if len(regex) == 0:
+ self.stats.fields_filter = DEFAULT_REGEX
+ self.refresh_header()
return
try:
re.compile(regex)
self.stats.fields_filter = regex
+ self.refresh_header()
return
except re.error:
continue
- def show_vm_selection(self):
+ def show_vm_selection_by_pid(self):
"""Draws PID selection mask.
Asks for a pid until a valid pid or 0 has been entered.
"""
+ msg = ''
while True:
self.screen.erase()
self.screen.addstr(0, 0,
@@ -915,6 +1058,7 @@ class Tui(object):
self.screen.addstr(1, 0,
'This might limit the shown data to the trace '
'statistics.')
+ self.screen.addstr(5, 0, msg)
curses.echo()
self.screen.addstr(3, 0, "Pid [0 or pid]: ")
@@ -922,60 +1066,128 @@ class Tui(object):
curses.noecho()
try:
- pid = int(pid)
-
- if pid == 0:
- self.update_pid(pid)
- break
- else:
- if not os.path.isdir(os.path.join('/proc/', str(pid))):
+ if len(pid) > 0:
+ pid = int(pid)
+ if pid != 0 and not os.path.isdir(os.path.join('/proc/',
+ str(pid))):
+ msg = '"' + str(pid) + '": Not a running process'
continue
- else:
- self.update_pid(pid)
- break
+ else:
+ pid = 0
+ self.refresh_header(pid)
+ self.update_pid(pid)
+ break
except ValueError:
+ msg = '"' + str(pid) + '": Not a valid pid'
continue
+ def show_vm_selection_by_guest_name(self):
+ """Draws guest selection mask.
+
+ Asks for a guest name until a valid guest name or '' is entered.
+
+ """
+ msg = ''
+ while True:
+ self.screen.erase()
+ self.screen.addstr(0, 0,
+ 'Show statistics for specific guest.',
+ curses.A_BOLD)
+ self.screen.addstr(1, 0,
+ 'This might limit the shown data to the trace '
+ 'statistics.')
+ self.screen.addstr(5, 0, msg)
+ curses.echo()
+ self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
+ gname = self.screen.getstr()
+ curses.noecho()
+
+ if not gname:
+ self.refresh_header(0)
+ self.update_pid(0)
+ break
+ else:
+ pids = []
+ try:
+ pids = get_pid_from_gname(gname)
+ except:
+ msg = '"' + gname + '": Internal error while searching, ' \
+ 'use pid filter instead'
+ continue
+ if len(pids) == 0:
+ msg = '"' + gname + '": Not an active guest'
+ continue
+ if len(pids) > 1:
+ msg = '"' + gname + '": Multiple matches found, use pid ' \
+ 'filter instead'
+ continue
+ self.refresh_header(pids[0])
+ self.update_pid(pids[0])
+ break
+
def show_stats(self):
"""Refreshes the screen and processes user input."""
- sleeptime = 0.25
+ sleeptime = DELAY_INITIAL
+ self.refresh_header()
while True:
- self.refresh(sleeptime)
+ self.refresh_body(sleeptime)
curses.halfdelay(int(sleeptime * 10))
- sleeptime = 3
+ sleeptime = DELAY_REGULAR
try:
char = self.screen.getkey()
if char == 'x':
- self.drilldown = not self.drilldown
+ self.refresh_header()
self.update_drilldown()
+ sleeptime = DELAY_INITIAL
if char == 'q':
break
+ if char == 'c':
+ self.stats.fields_filter = DEFAULT_REGEX
+ self.refresh_header(0)
+ self.update_pid(0)
+ sleeptime = DELAY_INITIAL
if char == 'f':
self.show_filter_selection()
+ sleeptime = DELAY_INITIAL
+ if char == 'g':
+ self.show_vm_selection_by_guest_name()
+ sleeptime = DELAY_INITIAL
if char == 'p':
- self.show_vm_selection()
+ self.show_vm_selection_by_pid()
+ sleeptime = DELAY_INITIAL
+ if char == 'r':
+ self.refresh_header()
+ self.stats.reset()
+ sleeptime = DELAY_INITIAL
except KeyboardInterrupt:
break
except curses.error:
continue
+
def batch(stats):
"""Prints statistics in a key, value format."""
- s = stats.get()
- time.sleep(1)
- s = stats.get()
- for key in sorted(s.keys()):
- values = s[key]
- print '%-42s%10d%10d' % (key, values[0], values[1])
+ try:
+ s = stats.get()
+ time.sleep(1)
+ s = stats.get()
+ for key in sorted(s.keys()):
+ values = s[key]
+ print '%-42s%10d%10d' % (key, values[0], values[1])
+ except KeyboardInterrupt:
+ pass
+
def log(stats):
"""Prints statistics as reiterating key block, multiple value blocks."""
keys = sorted(stats.get().iterkeys())
+
def banner():
for k in keys:
print '%s' % k,
print
+
def statline():
s = stats.get()
for k in keys:
@@ -984,11 +1196,15 @@ def log(stats):
line = 0
banner_repeat = 20
while True:
- time.sleep(1)
- if line % banner_repeat == 0:
- banner()
- statline()
- line += 1
+ try:
+ time.sleep(1)
+ if line % banner_repeat == 0:
+ banner()
+ statline()
+ line += 1
+ except KeyboardInterrupt:
+ break
+
def get_options():
"""Returns processed program arguments."""
@@ -1009,6 +1225,16 @@ Requirements:
CAP_SYS_ADMIN and perf events are used.
- CAP_SYS_RESOURCE if the hard limit is not high enough to allow
the large number of files that are possibly opened.
+
+Interactive Commands:
+ c clear filter
+ f filter by regular expression
+ g filter by guest name
+ p filter by PID
+ q quit
+ x toggle reporting of stats for individual child trace events
+ r reset stats
+Press any other key to refresh statistics immediately.
"""
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
@@ -1018,6 +1244,22 @@ Requirements:
else:
return ""
+ def cb_guest_to_pid(option, opt, val, parser):
+ try:
+ pids = get_pid_from_gname(val)
+ except:
+ raise optparse.OptionValueError('Error while searching for guest '
+ '"{}", use "-p" to specify a pid '
+ 'instead'.format(val))
+ if len(pids) == 0:
+ raise optparse.OptionValueError('No guest by the name "{}" '
+ 'found'.format(val))
+ if len(pids) > 1:
+ raise optparse.OptionValueError('Multiple processes found (pids: '
+ '{}) - use "-p" to specify a pid '
+ 'instead'.format(" ".join(pids)))
+ parser.values.pid = pids[0]
+
optparser = optparse.OptionParser(description=description_text,
formatter=PlainHelpFormatter())
optparser.add_option('-1', '--once', '--batch',
@@ -1051,15 +1293,24 @@ Requirements:
help='fields to display (regex)',
)
optparser.add_option('-p', '--pid',
- action='store',
- default=0,
- type=int,
- dest='pid',
- help='restrict statistics to pid',
- )
+ action='store',
+ default=0,
+ type='int',
+ dest='pid',
+ help='restrict statistics to pid',
+ )
+ optparser.add_option('-g', '--guest',
+ action='callback',
+ type='string',
+ dest='pid',
+ metavar='GUEST',
+ help='restrict statistics to guest by name',
+ callback=cb_guest_to_pid,
+ )
(options, _) = optparser.parse_args(sys.argv)
return options
+
def get_providers(options):
"""Returns a list of data providers depending on the passed options."""
providers = []
@@ -1073,6 +1324,7 @@ def get_providers(options):
return providers
+
def check_access(options):
"""Exits if the current user can't access all needed directories."""
if not os.path.exists('/sys/kernel/debug'):
@@ -1086,8 +1338,8 @@ def check_access(options):
"Also ensure, that the kvm modules are loaded.\n")
sys.exit(1)
- if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints
- or not options.debugfs):
+ if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or
+ not options.debugfs):
sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
"when using the option -t (default).\n"
"If it is enabled, make {0} readable by the "
@@ -1098,10 +1350,11 @@ def check_access(options):
sys.stderr.write("Falling back to debugfs statistics!\n")
options.debugfs = True
- sleep(5)
+ time.sleep(5)
return options
+
def main():
options = get_options()
options = check_access(options)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index b92a153d7115..109431bdc63c 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -18,11 +18,33 @@ state transitions such as guest mode entry and exit.
This tool is useful for observing guest behavior from the host perspective.
Often conclusions about performance or buggy behavior can be drawn from the
output.
+While running in regular mode, use any of the keys listed in section
+'Interactive Commands' below.
+Use batch and logging modes for scripting purposes.
The set of KVM kernel module trace events may be specific to the kernel version
or architecture. It is best to check the KVM kernel module source code for the
meaning of events.
+INTERACTIVE COMMANDS
+--------------------
+[horizontal]
+*c*:: clear filter
+
+*f*:: filter by regular expression
+
+*g*:: filter by guest name
+
+*p*:: filter by PID
+
+*q*:: quit
+
+*r*:: reset stats
+
+*x*:: toggle reporting of stats for child trace events
+
+Press any other key to refresh statistics immediately.
+
OPTIONS
-------
-1::
@@ -46,6 +68,10 @@ OPTIONS
--pid=<pid>::
limit statistics to one virtual machine (pid)
+-g<guest>::
+--guest=<guest_name>::
+ limit statistics to one virtual machine (guest name)
+
-f<fields>::
--fields=<fields>::
fields to display (regex)
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
new file mode 100644
index 000000000000..ad54a58d7dda
--- /dev/null
+++ b/tools/pci/pcitest.c
@@ -0,0 +1,186 @@
+/**
+ * Userspace PCI Endpoint Test Module
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ioctl.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <linux/pcitest.h>
+
+#define BILLION 1E9
+
+static char *result[] = { "NOT OKAY", "OKAY" };
+
+struct pci_test {
+ char *device;
+ char barnum;
+ bool legacyirq;
+ unsigned int msinum;
+ bool read;
+ bool write;
+ bool copy;
+ unsigned long size;
+};
+
+static int run_test(struct pci_test *test)
+{
+ long ret;
+ int fd;
+ struct timespec start, end;
+ double time;
+
+ fd = open(test->device, O_RDWR);
+ if (fd < 0) {
+ perror("can't open PCI Endpoint Test device");
+ return fd;
+ }
+
+ if (test->barnum >= 0 && test->barnum <= 5) {
+ ret = ioctl(fd, PCITEST_BAR, test->barnum);
+ fprintf(stdout, "BAR%d:\t\t", test->barnum);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->legacyirq) {
+ ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0);
+ fprintf(stdout, "LEGACY IRQ:\t");
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->msinum > 0 && test->msinum <= 32) {
+ ret = ioctl(fd, PCITEST_MSI, test->msinum);
+ fprintf(stdout, "MSI%d:\t\t", test->msinum);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->write) {
+ ret = ioctl(fd, PCITEST_WRITE, test->size);
+ fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->read) {
+ ret = ioctl(fd, PCITEST_READ, test->size);
+ fprintf(stdout, "READ (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ if (test->copy) {
+ ret = ioctl(fd, PCITEST_COPY, test->size);
+ fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size);
+ if (ret < 0)
+ fprintf(stdout, "TEST FAILED\n");
+ else
+ fprintf(stdout, "%s\n", result[ret]);
+ }
+
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int c;
+ struct pci_test *test;
+
+ test = calloc(1, sizeof(*test));
+ if (!test) {
+ perror("Fail to allocate memory for pci_test\n");
+ return -ENOMEM;
+ }
+
+ /* since '0' is a valid BAR number, initialize it to -1 */
+ test->barnum = -1;
+
+ /* set default size as 100KB */
+ test->size = 0x19000;
+
+ /* set default endpoint device */
+ test->device = "/dev/pci-endpoint-test.0";
+
+ while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF)
+ switch (c) {
+ case 'D':
+ test->device = optarg;
+ continue;
+ case 'b':
+ test->barnum = atoi(optarg);
+ if (test->barnum < 0 || test->barnum > 5)
+ goto usage;
+ continue;
+ case 'l':
+ test->legacyirq = true;
+ continue;
+ case 'm':
+ test->msinum = atoi(optarg);
+ if (test->msinum < 1 || test->msinum > 32)
+ goto usage;
+ continue;
+ case 'r':
+ test->read = true;
+ continue;
+ case 'w':
+ test->write = true;
+ continue;
+ case 'c':
+ test->copy = true;
+ continue;
+ case 's':
+ test->size = strtoul(optarg, NULL, 0);
+ continue;
+ case '?':
+ case 'h':
+ default:
+usage:
+ fprintf(stderr,
+ "usage: %s [options]\n"
+ "Options:\n"
+ "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n"
+ "\t-b <bar num> BAR test (bar number between 0..5)\n"
+ "\t-m <msi num> MSI test (msi number between 1..32)\n"
+ "\t-r Read buffer test\n"
+ "\t-w Write buffer test\n"
+ "\t-c Copy buffer test\n"
+ "\t-s <size> Size of buffer {default: 100KB}\n",
+ argv[0]);
+ return -EINVAL;
+ }
+
+ run_test(test);
+ return 0;
+}
diff --git a/tools/pci/pcitest.sh b/tools/pci/pcitest.sh
new file mode 100644
index 000000000000..5442bbea4c22
--- /dev/null
+++ b/tools/pci/pcitest.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+
+echo "BAR tests"
+echo
+
+bar=0
+
+while [ $bar -lt 6 ]
+do
+ pcitest -b $bar
+ bar=`expr $bar + 1`
+done
+echo
+
+echo "Interrupt tests"
+echo
+
+pcitest -l
+msi=1
+
+while [ $msi -lt 33 ]
+do
+ pcitest -m $msi
+ msi=`expr $msi + 1`
+done
+echo
+
+echo "Read Tests"
+echo
+
+pcitest -r -s 1
+pcitest -r -s 1024
+pcitest -r -s 1025
+pcitest -r -s 1024000
+pcitest -r -s 1024001
+echo
+
+echo "Write Tests"
+echo
+
+pcitest -w -s 1
+pcitest -w -s 1024
+pcitest -w -s 1025
+pcitest -w -s 1024000
+pcitest -w -s 1024001
+echo
+
+echo "Copy Tests"
+echo
+
+pcitest -c -s 1
+pcitest -c -s 1024
+pcitest -c -s 1025
+pcitest -c -s 1024000
+pcitest -c -s 1024001
+echo
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 405212be044a..d870520da68b 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -28,7 +28,10 @@ obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_ACPI_NFIT) += nfit.o
-obj-$(CONFIG_DEV_DAX) += dax.o
+ifeq ($(CONFIG_DAX),m)
+obj-$(CONFIG_DAX) += dax.o
+endif
+obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
nfit-y := $(ACPI_SRC)/core.o
@@ -48,9 +51,13 @@ nd_blk-y += config_check.o
nd_e820-y := $(NVDIMM_SRC)/e820.o
nd_e820-y += config_check.o
-dax-y := $(DAX_SRC)/dax.o
+dax-y := $(DAX_SRC)/super.o
dax-y += config_check.o
+device_dax-y := $(DAX_SRC)/device.o
+device_dax-y += dax-dev.o
+device_dax-y += config_check.o
+
dax_pmem-y := $(DAX_SRC)/pmem.o
dax_pmem-y += config_check.o
diff --git a/tools/testing/nvdimm/dax-dev.c b/tools/testing/nvdimm/dax-dev.c
new file mode 100644
index 000000000000..36ee3d8797c3
--- /dev/null
+++ b/tools/testing/nvdimm/dax-dev.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include "test/nfit_test.h"
+#include <linux/mm.h>
+#include "../../../drivers/dax/dax-private.h"
+
+phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
+ unsigned long size)
+{
+ struct resource *res;
+ phys_addr_t addr;
+ int i;
+
+ for (i = 0; i < dev_dax->num_resources; i++) {
+ res = &dev_dax->res[i];
+ addr = pgoff * PAGE_SIZE + res->start;
+ if (addr >= res->start && addr <= res->end)
+ break;
+ pgoff -= PHYS_PFN(resource_size(res));
+ }
+
+ if (i < dev_dax->num_resources) {
+ res = &dev_dax->res[i];
+ if (addr + size - 1 <= res->end) {
+ if (get_nfit_res(addr)) {
+ struct page *page;
+
+ if (dev_dax->region->align > PAGE_SIZE)
+ return -1;
+
+ page = vmalloc_to_page((void *)addr);
+ return PFN_PHYS(page_to_pfn(page));
+ } else
+ return addr;
+ }
+ }
+
+ return -1;
+}
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index c9b8c48f85fc..b53596ad601b 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -15,13 +15,13 @@
#include <pmem.h>
#include <nd.h>
-long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct pmem_device *pmem = bdev->bd_queue->queuedata;
- resource_size_t offset = sector * 512 + pmem->data_offset;
+ resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
- if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
+ PFN_PHYS(nr_pages))))
return -EIO;
/*
@@ -34,11 +34,10 @@ long pmem_direct_access(struct block_device *bdev, sector_t sector,
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
*pfn = page_to_pfn_t(page);
- dev_dbg_ratelimited(disk_to_dev(bdev->bd_disk)->parent,
- "%s: sector: %#llx pfn: %#lx\n", __func__,
- (unsigned long long) sector, page_to_pfn(page));
+ pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
+ __func__, pmem, pgoff, page_to_pfn(page));
- return PAGE_SIZE;
+ return 1;
}
*kaddr = pmem->virt_addr + offset;
@@ -49,6 +48,6 @@ long pmem_direct_access(struct block_device *bdev, sector_t sector,
* requested range.
*/
if (unlikely(pmem->bb.count))
- return size;
- return pmem->size - pmem->pfn_pad - offset;
+ return nr_pages;
+ return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 798f17655433..c2187178fb13 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -132,6 +132,7 @@ static u32 handle[] = {
[3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
[4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
[5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
+ [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
};
static unsigned long dimm_fail_cmd_flags[NUM_DCR];
@@ -728,8 +729,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
static int nfit_test1_alloc(struct nfit_test *t)
{
size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
- + sizeof(struct acpi_nfit_memory_map)
- + offsetof(struct acpi_nfit_control_region, window_size);
+ + sizeof(struct acpi_nfit_memory_map) * 2
+ + offsetof(struct acpi_nfit_control_region, window_size) * 2;
int i;
t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -906,6 +907,7 @@ static void nfit_test0_setup(struct nfit_test *t)
memdev->address = 0;
memdev->interleave_index = 0;
memdev->interleave_ways = 2;
+ memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
/* mem-region2 (spa1, dimm0) */
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
@@ -921,6 +923,7 @@ static void nfit_test0_setup(struct nfit_test *t)
memdev->address = SPA0_SIZE/2;
memdev->interleave_index = 0;
memdev->interleave_ways = 4;
+ memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
/* mem-region3 (spa1, dimm1) */
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
@@ -951,6 +954,7 @@ static void nfit_test0_setup(struct nfit_test *t)
memdev->address = SPA0_SIZE/2;
memdev->interleave_index = 0;
memdev->interleave_ways = 4;
+ memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
/* mem-region5 (spa1, dimm3) */
memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
@@ -1086,6 +1090,7 @@ static void nfit_test0_setup(struct nfit_test *t)
memdev->address = 0;
memdev->interleave_index = 0;
memdev->interleave_ways = 1;
+ memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
/* dcr-descriptor0: blk */
@@ -1384,6 +1389,7 @@ static void nfit_test0_setup(struct nfit_test *t)
memdev->address = 0;
memdev->interleave_index = 0;
memdev->interleave_ways = 1;
+ memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
/* mem-region16 (spa/bdw4, dimm4) */
memdev = nfit_buf + offset +
@@ -1486,6 +1492,34 @@ static void nfit_test1_setup(struct nfit_test *t)
dcr->code = NFIT_FIC_BYTE;
dcr->windows = 0;
+ offset += dcr->header.length;
+ memdev = nfit_buf + offset;
+ memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
+ memdev->header.length = sizeof(*memdev);
+ memdev->device_handle = handle[6];
+ memdev->physical_id = 0;
+ memdev->region_id = 0;
+ memdev->range_index = 0;
+ memdev->region_index = 0+2;
+ memdev->region_size = SPA2_SIZE;
+ memdev->region_offset = 0;
+ memdev->address = 0;
+ memdev->interleave_index = 0;
+ memdev->interleave_ways = 1;
+ memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
+
+ /* dcr-descriptor1 */
+ offset += sizeof(*memdev);
+ dcr = nfit_buf + offset;
+ dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
+ dcr->header.length = offsetof(struct acpi_nfit_control_region,
+ window_size);
+ dcr->region_index = 0+2;
+ dcr_common_init(dcr);
+ dcr->serial_number = ~handle[6];
+ dcr->code = NFIT_FIC_BYTE;
+ dcr->windows = 0;
+
post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
acpi_desc = &t->acpi_desc;
@@ -1817,6 +1851,10 @@ static int nfit_test_probe(struct platform_device *pdev)
if (rc)
return rc;
+ rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
+ if (rc)
+ return rc;
+
if (nfit_test->setup != nfit_test0_setup)
return 0;
@@ -1907,7 +1945,7 @@ static __init int nfit_test_init(void)
case 1:
nfit_test->num_pm = 1;
nfit_test->dcr_idx = NUM_DCR;
- nfit_test->num_dcr = 1;
+ nfit_test->num_dcr = 2;
nfit_test->alloc = nfit_test1_alloc;
nfit_test->setup = nfit_test1_setup;
break;
@@ -1924,6 +1962,7 @@ static __init int nfit_test_init(void)
put_device(&pdev->dev);
goto err_register;
}
+ get_device(&pdev->dev);
rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
@@ -1942,6 +1981,10 @@ static __init int nfit_test_init(void)
if (instances[i])
platform_device_unregister(&instances[i]->pdev);
nfit_test_teardown();
+ for (i = 0; i < NUM_NFITS; i++)
+ if (instances[i])
+ put_device(&instances[i]->pdev.dev);
+
return rc;
}
@@ -1949,10 +1992,13 @@ static __exit void nfit_test_exit(void)
{
int i;
- platform_driver_unregister(&nfit_test_driver);
for (i = 0; i < NUM_NFITS; i++)
platform_device_unregister(&instances[i]->pdev);
+ platform_driver_unregister(&nfit_test_driver);
nfit_test_teardown();
+
+ for (i = 0; i < NUM_NFITS; i++)
+ put_device(&instances[i]->pdev.dev);
class_destroy(nfit_test_dimm);
}
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index f0600d20ce7d..91750352459d 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -1 +1,5 @@
kselftest
+gpiogpio-event-mon
+gpiogpio-hammer
+gpioinclude/
+gpiolsgpio
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index d8593f1251ec..26ce4f7168be 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -39,7 +39,7 @@ TARGETS += x86
TARGETS += zram
#Please keep the TARGETS list alphabetically sorted
# Run "make quicktest=1 run_tests" or
-# "make quicktest=1 kselftest from top level Makefile
+# "make quicktest=1 kselftest" from top level Makefile
TARGETS_HOTPLUG = cpu-hotplug
TARGETS_HOTPLUG += memory-hotplug
@@ -133,4 +133,4 @@ clean:
make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: install
+.PHONY: all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index d8d94b9bd76c..91edd0566237 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -13,7 +13,7 @@ LDLIBS += -lcap -lelf
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o
+TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o
TEST_PROGS := test_kmod.sh
@@ -34,6 +34,6 @@ $(BPFOBJ): force
CLANG ?= clang
%.o: %.c
- $(CLANG) -I../../../include/uapi -I../../../../samples/bpf/ \
- -D__x86_64__ -Wno-compare-distinct-pointer-types \
+ $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \
+ -Wno-compare-distinct-pointer-types \
-O2 -target bpf -c $< -o $@
diff --git a/tools/testing/selftests/bpf/gnu/stubs.h b/tools/testing/selftests/bpf/gnu/stubs.h
new file mode 100644
index 000000000000..719225b16626
--- /dev/null
+++ b/tools/testing/selftests/bpf/gnu/stubs.h
@@ -0,0 +1 @@
+/* dummy .h to trick /usr/include/features.h to work with 'clang -target bpf' */
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 4ed049a0b14b..b59f5ed4ae40 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -268,6 +268,21 @@ out:
bpf_object__close(obj);
}
+static void test_tcp_estats(void)
+{
+ const char *file = "./test_tcp_estats.o";
+ int err, prog_fd;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ CHECK(err, "", "err %d errno %d\n", err, errno);
+ if (err)
+ return;
+
+ bpf_object__close(obj);
+}
+
int main(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -277,6 +292,7 @@ int main(void)
test_pkt_access();
test_xdp();
test_l4lb();
+ test_tcp_estats();
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return 0;
diff --git a/tools/testing/selftests/bpf/test_tcp_estats.c b/tools/testing/selftests/bpf/test_tcp_estats.c
new file mode 100644
index 000000000000..bee3bbecc0c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcp_estats.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+/* This program shows clang/llvm is able to generate code pattern
+ * like:
+ * _tcp_send_active_reset:
+ * 0: bf 16 00 00 00 00 00 00 r6 = r1
+ * ......
+ * 335: b7 01 00 00 0f 00 00 00 r1 = 15
+ * 336: 05 00 48 00 00 00 00 00 goto 72
+ *
+ * LBB0_3:
+ * 337: b7 01 00 00 01 00 00 00 r1 = 1
+ * 338: 63 1a d0 ff 00 00 00 00 *(u32 *)(r10 - 48) = r1
+ * 408: b7 01 00 00 03 00 00 00 r1 = 3
+ *
+ * LBB0_4:
+ * 409: 71 a2 fe ff 00 00 00 00 r2 = *(u8 *)(r10 - 2)
+ * 410: bf a7 00 00 00 00 00 00 r7 = r10
+ * 411: 07 07 00 00 b8 ff ff ff r7 += -72
+ * 412: bf 73 00 00 00 00 00 00 r3 = r7
+ * 413: 0f 13 00 00 00 00 00 00 r3 += r1
+ * 414: 73 23 2d 00 00 00 00 00 *(u8 *)(r3 + 45) = r2
+ *
+ * From the above code snippet, the code generated by the compiler
+ * is reasonable. The "r1" is assigned to different values in basic
+ * blocks "_tcp_send_active_reset" and "LBB0_3", and used in "LBB0_4".
+ * The verifier should be able to handle such code patterns.
+ */
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/ipv6.h>
+#include <linux/version.h>
+#include <sys/socket.h>
+#include "bpf_helpers.h"
+
+#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define TCP_ESTATS_MAGIC 0xBAADBEEF
+
+/* This test case needs "sock" and "pt_regs" data structure.
+ * Recursively, "sock" needs "sock_common" and "inet_sock".
+ * However, this is a unit test case only for
+ * verifier purpose without bpf program execution.
+ * We can safely mock much simpler data structures, basically
+ * only taking the necessary fields from kernel headers.
+ */
+typedef __u32 __bitwise __portpair;
+typedef __u64 __bitwise __addrpair;
+
+struct sock_common {
+ unsigned short skc_family;
+ union {
+ __addrpair skc_addrpair;
+ struct {
+ __be32 skc_daddr;
+ __be32 skc_rcv_saddr;
+ };
+ };
+ union {
+ __portpair skc_portpair;
+ struct {
+ __be16 skc_dport;
+ __u16 skc_num;
+ };
+ };
+ struct in6_addr skc_v6_daddr;
+ struct in6_addr skc_v6_rcv_saddr;
+};
+
+struct sock {
+ struct sock_common __sk_common;
+#define sk_family __sk_common.skc_family
+#define sk_v6_daddr __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
+};
+
+struct inet_sock {
+ struct sock sk;
+#define inet_daddr sk.__sk_common.skc_daddr
+#define inet_dport sk.__sk_common.skc_dport
+ __be32 inet_saddr;
+ __be16 inet_sport;
+};
+
+struct pt_regs {
+ long di;
+};
+
+static inline struct inet_sock *inet_sk(const struct sock *sk)
+{
+ return (struct inet_sock *)sk;
+}
+
+/* Define various data structures for state recording.
+ * Some fields are not used due to test simplification.
+ */
+enum tcp_estats_addrtype {
+ TCP_ESTATS_ADDRTYPE_IPV4 = 1,
+ TCP_ESTATS_ADDRTYPE_IPV6 = 2
+};
+
+enum tcp_estats_event_type {
+ TCP_ESTATS_ESTABLISH,
+ TCP_ESTATS_PERIODIC,
+ TCP_ESTATS_TIMEOUT,
+ TCP_ESTATS_RETRANSMIT_TIMEOUT,
+ TCP_ESTATS_RETRANSMIT_OTHER,
+ TCP_ESTATS_SYN_RETRANSMIT,
+ TCP_ESTATS_SYNACK_RETRANSMIT,
+ TCP_ESTATS_TERM,
+ TCP_ESTATS_TX_RESET,
+ TCP_ESTATS_RX_RESET,
+ TCP_ESTATS_WRITE_TIMEOUT,
+ TCP_ESTATS_CONN_TIMEOUT,
+ TCP_ESTATS_ACK_LATENCY,
+ TCP_ESTATS_NEVENTS,
+};
+
+struct tcp_estats_event {
+ int pid;
+ int cpu;
+ unsigned long ts;
+ unsigned int magic;
+ enum tcp_estats_event_type event_type;
+};
+
+/* The below data structure is packed in order for
+ * llvm compiler to generate expected code.
+ */
+struct tcp_estats_conn_id {
+ unsigned int localaddressType;
+ struct {
+ unsigned char data[16];
+ } localaddress;
+ struct {
+ unsigned char data[16];
+ } remaddress;
+ unsigned short localport;
+ unsigned short remport;
+} __attribute__((__packed__));
+
+struct tcp_estats_basic_event {
+ struct tcp_estats_event event;
+ struct tcp_estats_conn_id conn_id;
+};
+
+struct bpf_map_def SEC("maps") ev_record_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tcp_estats_basic_event),
+ .max_entries = 1024,
+};
+
+struct dummy_tracepoint_args {
+ unsigned long long pad;
+ struct sock *sock;
+};
+
+static __always_inline void tcp_estats_ev_init(struct tcp_estats_event *event,
+ enum tcp_estats_event_type type)
+{
+ event->magic = TCP_ESTATS_MAGIC;
+ event->ts = bpf_ktime_get_ns();
+ event->event_type = type;
+}
+
+static __always_inline void unaligned_u32_set(unsigned char *to, __u8 *from)
+{
+ to[0] = _(from[0]);
+ to[1] = _(from[1]);
+ to[2] = _(from[2]);
+ to[3] = _(from[3]);
+}
+
+static __always_inline void conn_id_ipv4_init(struct tcp_estats_conn_id *conn_id,
+ __be32 *saddr, __be32 *daddr)
+{
+ conn_id->localaddressType = TCP_ESTATS_ADDRTYPE_IPV4;
+
+ unaligned_u32_set(conn_id->localaddress.data, (__u8 *)saddr);
+ unaligned_u32_set(conn_id->remaddress.data, (__u8 *)daddr);
+}
+
+static __always_inline void conn_id_ipv6_init(struct tcp_estats_conn_id *conn_id,
+ __be32 *saddr, __be32 *daddr)
+{
+ conn_id->localaddressType = TCP_ESTATS_ADDRTYPE_IPV6;
+
+ unaligned_u32_set(conn_id->localaddress.data, (__u8 *)saddr);
+ unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32),
+ (__u8 *)(saddr + 1));
+ unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32) * 2,
+ (__u8 *)(saddr + 2));
+ unaligned_u32_set(conn_id->localaddress.data + sizeof(__u32) * 3,
+ (__u8 *)(saddr + 3));
+
+ unaligned_u32_set(conn_id->remaddress.data,
+ (__u8 *)(daddr));
+ unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32),
+ (__u8 *)(daddr + 1));
+ unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32) * 2,
+ (__u8 *)(daddr + 2));
+ unaligned_u32_set(conn_id->remaddress.data + sizeof(__u32) * 3,
+ (__u8 *)(daddr + 3));
+}
+
+static __always_inline void tcp_estats_conn_id_init(struct tcp_estats_conn_id *conn_id,
+ struct sock *sk)
+{
+ conn_id->localport = _(inet_sk(sk)->inet_sport);
+ conn_id->remport = _(inet_sk(sk)->inet_dport);
+
+ if (_(sk->sk_family) == AF_INET6)
+ conn_id_ipv6_init(conn_id,
+ sk->sk_v6_rcv_saddr.s6_addr32,
+ sk->sk_v6_daddr.s6_addr32);
+ else
+ conn_id_ipv4_init(conn_id,
+ &inet_sk(sk)->inet_saddr,
+ &inet_sk(sk)->inet_daddr);
+}
+
+static __always_inline void tcp_estats_init(struct sock *sk,
+ struct tcp_estats_event *event,
+ struct tcp_estats_conn_id *conn_id,
+ enum tcp_estats_event_type type)
+{
+ tcp_estats_ev_init(event, type);
+ tcp_estats_conn_id_init(conn_id, sk);
+}
+
+static __always_inline void send_basic_event(struct sock *sk,
+ enum tcp_estats_event_type type)
+{
+ struct tcp_estats_basic_event ev;
+ __u32 key = bpf_get_prandom_u32();
+
+ memset(&ev, 0, sizeof(ev));
+ tcp_estats_init(sk, &ev.event, &ev.conn_id, type);
+ bpf_map_update_elem(&ev_record_map, &key, &ev, BPF_ANY);
+}
+
+SEC("dummy_tracepoint")
+int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+{
+ if (!arg->sock)
+ return 0;
+
+ send_basic_event(arg->sock, TCP_ESTATS_TX_RESET);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 72aa103e4141..6b214b7b10fb 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -5,7 +5,7 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
TEST_GEN_PROGS := breakpoint_test
endif
-ifeq ($(ARCH),aarch64)
+ifneq (,$(filter $(ARCH),aarch64 arm64))
TEST_GEN_PROGS := breakpoint_test_arm64
endif
diff --git a/tools/testing/selftests/cpufreq/config b/tools/testing/selftests/cpufreq/config
new file mode 100644
index 000000000000..27ff72ebd0f5
--- /dev/null
+++ b/tools/testing/selftests/cpufreq/config
@@ -0,0 +1,15 @@
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index ef8214661612..8a1c9f949fe0 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -1 +1,2 @@
+CONFIG_KPROBES=y
CONFIG_FTRACE=y
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
index 0a78705b43b2..c75faefb4fff 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
@@ -26,7 +26,7 @@ check_types() {
test $X2 = $X3
test 0x$X3 = $3
- B4=`printf "%x" $4`
+ B4=`printf "%02x" $4`
B3=`echo -n $X3 | tail -c 3 | head -c 2`
test $B3 = $B4
}
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index 653c5cd9e44d..e2fbb890aef9 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -8,7 +8,7 @@ include ../lib.mk
all:
for DIR in $(SUBDIRS); do \
- BUILD_TARGET=$$OUTPUT/$$DIR; \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done
@@ -22,7 +22,7 @@ override define INSTALL_RULE
install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@for SUBDIR in $(SUBDIRS); do \
- BUILD_TARGET=$$OUTPUT/$$SUBDIR; \
+ BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \
mkdir $$BUILD_TARGET -p; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \
done;
@@ -32,9 +32,10 @@ override define EMIT_TESTS
echo "./run.sh"
endef
-clean:
+override define CLEAN
for DIR in $(SUBDIRS); do \
- BUILD_TARGET=$$OUTPUT/$$DIR; \
+ BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done
+endef
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 205e4d10e085..298929df97e6 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -2,13 +2,20 @@
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh $(BINARIES)
BINARIES := gpio-mockup-chardev
+EXTRA_PROGS := ../gpiogpio-event-mon ../gpiogpio-hammer ../gpiolsgpio
+EXTRA_DIRS := ../gpioinclude/
+EXTRA_OBJS := ../gpiogpio-event-mon-in.o ../gpiogpio-event-mon.o
+EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
+EXTRA_OBJS += ../gpiolsgpio.o
include ../lib.mk
all: $(BINARIES)
-clean:
- $(RM) $(BINARIES)
+override define CLEAN
+ $(RM) $(BINARIES) $(EXTRA_PROGS) $(EXTRA_OBJS)
+ $(RM) -r $(EXTRA_DIRS)
+endef
CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
LDLIBS += -lmount -I/usr/include/libmount
diff --git a/tools/testing/selftests/gpio/config b/tools/testing/selftests/gpio/config
new file mode 100644
index 000000000000..abaa6902b7b6
--- /dev/null
+++ b/tools/testing/selftests/gpio/config
@@ -0,0 +1,2 @@
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_MOCKUP=m
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 775c589ac3c0..959273c3a52e 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -51,8 +51,12 @@ endef
emit_tests:
$(EMIT_TESTS)
-clean:
+define CLEAN
$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
+endef
+
+clean:
+ $(CLEAN)
$(OUTPUT)/%:%.c
$(LINK.c) $^ $(LDLIBS) -o $@
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
new file mode 100644
index 000000000000..126933bcc950
--- /dev/null
+++ b/tools/testing/selftests/lib/config
@@ -0,0 +1,3 @@
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_PRIME_NUMBERS=m
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 2132ff8eb4e7..72c3ac2323e1 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -14,6 +14,7 @@ export CFLAGS
SUB_DIRS = alignment \
benchmarks \
+ cache_shape \
copyloops \
context_switch \
dscr \
@@ -59,12 +60,13 @@ override define EMIT_TESTS
done;
endef
-clean:
+override define CLEAN
@for TARGET in $(SUB_DIRS); do \
BUILD_TARGET=$(OUTPUT)/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
done;
rm -f tags
+endef
tags:
find . -name '*.c' -o -name '*.h' | xargs ctags
diff --git a/tools/testing/selftests/powerpc/cache_shape/.gitignore b/tools/testing/selftests/powerpc/cache_shape/.gitignore
new file mode 100644
index 000000000000..ec1848434be5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/cache_shape/.gitignore
@@ -0,0 +1 @@
+cache_shape
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
new file mode 100644
index 000000000000..b24485ab30e2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
@@ -0,0 +1,10 @@
+TEST_PROGS := cache_shape
+
+all: $(TEST_PROGS)
+
+$(TEST_PROGS): ../harness.c ../utils.c
+
+include ../../lib.mk
+
+clean:
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/cache_shape/cache_shape.c b/tools/testing/selftests/powerpc/cache_shape/cache_shape.c
new file mode 100644
index 000000000000..29ec07eba7f9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/cache_shape/cache_shape.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017, Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#ifndef AT_L1I_CACHESIZE
+#define AT_L1I_CACHESIZE 40
+#define AT_L1I_CACHEGEOMETRY 41
+#define AT_L1D_CACHESIZE 42
+#define AT_L1D_CACHEGEOMETRY 43
+#define AT_L2_CACHESIZE 44
+#define AT_L2_CACHEGEOMETRY 45
+#define AT_L3_CACHESIZE 46
+#define AT_L3_CACHEGEOMETRY 47
+#endif
+
+static void print_size(const char *label, uint32_t val)
+{
+ printf("%s cache size: %#10x %10dB %10dK\n", label, val, val, val / 1024);
+}
+
+static void print_geo(const char *label, uint32_t val)
+{
+ uint16_t assoc;
+
+ printf("%s line size: %#10x ", label, val & 0xFFFF);
+
+ assoc = val >> 16;
+ if (assoc)
+ printf("%u-way", assoc);
+ else
+ printf("fully");
+
+ printf(" associative\n");
+}
+
+static int test_cache_shape()
+{
+ static char buffer[4096];
+ ElfW(auxv_t) *p;
+ int found;
+
+ FAIL_IF(read_auxv(buffer, sizeof(buffer)));
+
+ found = 0;
+
+ p = find_auxv_entry(AT_L1I_CACHESIZE, buffer);
+ if (p) {
+ found++;
+ print_size("L1I ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L1I_CACHEGEOMETRY, buffer);
+ if (p) {
+ found++;
+ print_geo("L1I ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L1D_CACHESIZE, buffer);
+ if (p) {
+ found++;
+ print_size("L1D ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L1D_CACHEGEOMETRY, buffer);
+ if (p) {
+ found++;
+ print_geo("L1D ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L2_CACHESIZE, buffer);
+ if (p) {
+ found++;
+ print_size("L2 ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L2_CACHEGEOMETRY, buffer);
+ if (p) {
+ found++;
+ print_geo("L2 ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L3_CACHESIZE, buffer);
+ if (p) {
+ found++;
+ print_size("L3 ", (uint32_t)p->a_un.a_val);
+ }
+
+ p = find_auxv_entry(AT_L3_CACHEGEOMETRY, buffer);
+ if (p) {
+ found++;
+ print_geo("L3 ", (uint32_t)p->a_un.a_val);
+ }
+
+ /* If we found none we're probably on a system where they don't exist */
+ SKIP_IF(found == 0);
+
+ /* But if we found any, we expect to find them all */
+ FAIL_IF(found != 8);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_cache_shape, "cache_shape");
+}
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index 53405e8a52ab..735815b3ad7f 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -24,7 +24,11 @@ typedef uint8_t u8;
void test_harness_set_timeout(uint64_t time);
int test_harness(int (test_function)(void), char *name);
-extern void *get_auxv_entry(int type);
+
+int read_auxv(char *buf, ssize_t buf_size);
+void *find_auxv_entry(int type, char *auxv);
+void *get_auxv_entry(int type);
+
int pick_online_cpu(void);
static inline bool have_hwcap(unsigned long ftr)
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index dcf74184bfd0..d46916867a6f 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -19,45 +19,64 @@
static char auxv[4096];
-void *get_auxv_entry(int type)
+int read_auxv(char *buf, ssize_t buf_size)
{
- ElfW(auxv_t) *p;
- void *result;
ssize_t num;
- int fd;
+ int rc, fd;
fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
perror("open");
- return NULL;
+ return -errno;
}
- result = NULL;
-
- num = read(fd, auxv, sizeof(auxv));
+ num = read(fd, buf, buf_size);
if (num < 0) {
perror("read");
+ rc = -EIO;
goto out;
}
- if (num > sizeof(auxv)) {
- printf("Overflowed auxv buffer\n");
+ if (num > buf_size) {
+ printf("overflowed auxv buffer\n");
+ rc = -EOVERFLOW;
goto out;
}
+ rc = 0;
+out:
+ close(fd);
+ return rc;
+}
+
+void *find_auxv_entry(int type, char *auxv)
+{
+ ElfW(auxv_t) *p;
+
p = (ElfW(auxv_t) *)auxv;
while (p->a_type != AT_NULL) {
- if (p->a_type == type) {
- result = (void *)p->a_un.a_val;
- break;
- }
+ if (p->a_type == type)
+ return p;
p++;
}
-out:
- close(fd);
- return result;
+
+ return NULL;
+}
+
+void *get_auxv_entry(int type)
+{
+ ElfW(auxv_t) *p;
+
+ if (read_auxv(auxv, sizeof(auxv)))
+ return NULL;
+
+ p = find_auxv_entry(type, auxv);
+ if (p)
+ return (void *)p->a_un.a_val;
+
+ return NULL;
}
int pick_online_cpu(void)
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index ea6e373edc27..93eede4e8fbe 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -170,7 +170,7 @@ qemu_append="`identify_qemu_append "$QEMU"`"
# Pull in Kconfig-fragment boot parameters
boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
# Generate kernel-version-specific boot parameters
-boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
+boot_args="`per_version_boot_params "$boot_args" $resdir/.config $seconds`"
if test -n "$TORTURE_BUILDONLY"
then
diff --git a/tools/testing/selftests/splice/Makefile b/tools/testing/selftests/splice/Makefile
index de51f439d4a6..9fc78e5e5451 100644
--- a/tools/testing/selftests/splice/Makefile
+++ b/tools/testing/selftests/splice/Makefile
@@ -4,5 +4,4 @@ all: $(TEST_PROGS) $(EXTRA)
include ../lib.mk
-clean:
- rm -fr $(TEST_PROGS) $(EXTRA)
+EXTRA_CLEAN := $(EXTRA)
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
index 87ac400507c0..4981c6b6d050 100644
--- a/tools/testing/selftests/sync/Makefile
+++ b/tools/testing/selftests/sync/Makefile
@@ -20,5 +20,4 @@ TESTS += sync_stress_merge.o
sync_test: $(OBJS) $(TESTS)
-clean:
- $(RM) sync_test $(OBJS) $(TESTS)
+EXTRA_CLEAN := sync_test $(OBJS) $(TESTS)
diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
index fd88e3025bed..5ff165373f8b 100644
--- a/tools/testing/selftests/timers/clocksource-switch.c
+++ b/tools/testing/selftests/timers/clocksource-switch.c
@@ -159,7 +159,7 @@ int main(int argv, char **argc)
}
- printf("Running Asyncrhonous Switching Tests...\n");
+ printf("Running Asynchronous Switching Tests...\n");
pid = fork();
if (!pid)
return run_tests(60);
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index dba889004ea1..cbb29e41ef2b 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -16,6 +16,7 @@ TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
TEST_GEN_FILES += mlock-random-test
+TEST_GEN_FILES += virtual_address_range
TEST_PROGS := run_vmtests
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
index 698c7ed28a26..1c0d76cb5adf 100644
--- a/tools/testing/selftests/vm/config
+++ b/tools/testing/selftests/vm/config
@@ -1 +1,2 @@
+CONFIG_SYSVIPC=y
CONFIG_USERFAULTFD=y
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index addcd6fc1ecc..77687ab59f77 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -62,7 +62,7 @@ int main(void)
void *addr;
int ret;
- addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, 0, 0);
+ addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index ff0cda2b19c9..e5dbc87b4297 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -293,7 +293,7 @@ static int test_mlock_lock()
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
@@ -402,7 +402,7 @@ static int test_mlock_onfault()
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
@@ -445,7 +445,7 @@ static int test_lock_onfault_of_present()
uint64_t page1_flags, page2_flags;
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_mlock_locked mmap");
goto out;
@@ -492,7 +492,7 @@ static int test_munlockall()
unsigned long page_size = getpagesize();
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_munlockall mmap");
@@ -518,7 +518,7 @@ static int test_munlockall()
munmap(map, 2 * page_size);
map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("test_munlockall second mmap");
@@ -573,7 +573,7 @@ static int test_vma_management(bool call_mlock)
struct vm_boundaries page3;
map = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (map == MAP_FAILED) {
perror("mmap()");
return ret;
diff --git a/tools/testing/selftests/vm/on-fault-limit.c b/tools/testing/selftests/vm/on-fault-limit.c
index 0ae458f32fdb..7f96a5c2e292 100644
--- a/tools/testing/selftests/vm/on-fault-limit.c
+++ b/tools/testing/selftests/vm/on-fault-limit.c
@@ -26,7 +26,7 @@ static int test_limit(void)
}
map = mmap(NULL, 2 * lims.rlim_max, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, 0, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, -1, 0);
if (map != MAP_FAILED)
printf("mmap should have failed, but didn't\n");
else {
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 3214a6456d13..07548a1fa901 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -49,9 +49,9 @@ fi
mkdir $mnt
mount -t hugetlbfs none $mnt
-echo "--------------------"
+echo "---------------------"
echo "running hugepage-mmap"
-echo "--------------------"
+echo "---------------------"
./hugepage-mmap
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -77,9 +77,9 @@ fi
echo $shmmax > /proc/sys/kernel/shmmax
echo $shmall > /proc/sys/kernel/shmall
-echo "--------------------"
+echo "-------------------"
echo "running map_hugetlb"
-echo "--------------------"
+echo "-------------------"
./map_hugetlb
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -92,9 +92,9 @@ echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
echo " hugetlb regression testing."
-echo "--------------------"
+echo "-------------------"
echo "running userfaultfd"
-echo "--------------------"
+echo "-------------------"
./userfaultfd anon 128 32
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -103,10 +103,10 @@ else
echo "[PASS]"
fi
-echo "----------------------------"
+echo "---------------------------"
echo "running userfaultfd_hugetlb"
-echo "----------------------------"
-# 258MB total huge pages == 128MB src and 128MB dst
+echo "---------------------------"
+# 256MB total huge pages == 128MB src and 128MB dst
./userfaultfd hugetlb 128 32 $mnt/ufd_test_file
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -116,9 +116,9 @@ else
fi
rm -f $mnt/ufd_test_file
-echo "----------------------------"
+echo "-------------------------"
echo "running userfaultfd_shmem"
-echo "----------------------------"
+echo "-------------------------"
./userfaultfd shmem 128 32
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -143,9 +143,9 @@ else
echo "[PASS]"
fi
-echo "--------------------"
+echo "----------------------"
echo "running on-fault-limit"
-echo "--------------------"
+echo "----------------------"
sudo -u nobody ./on-fault-limit
if [ $? -ne 0 ]; then
echo "[FAIL]"
@@ -165,4 +165,15 @@ else
echo "[PASS]"
fi
+echo "-----------------------------"
+echo "running virtual_address_range"
+echo "-----------------------------"
+./virtual_address_range
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/thuge-gen.c b/tools/testing/selftests/vm/thuge-gen.c
index 0bc737a75150..88a2ab535e01 100644
--- a/tools/testing/selftests/vm/thuge-gen.c
+++ b/tools/testing/selftests/vm/thuge-gen.c
@@ -146,7 +146,7 @@ void test_mmap(unsigned long size, unsigned flags)
before = read_free(size);
map = mmap(NULL, size*NUM_PAGES, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|flags, 0, 0);
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|flags, -1, 0);
if (map == (char *)-1) err("mmap");
memset(map, 0xff, size*NUM_PAGES);
diff --git a/tools/testing/selftests/vm/virtual_address_range.c b/tools/testing/selftests/vm/virtual_address_range.c
new file mode 100644
index 000000000000..3b02aa6eb9da
--- /dev/null
+++ b/tools/testing/selftests/vm/virtual_address_range.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2017, Anshuman Khandual, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Works on architectures which support 128TB virtual
+ * address range and beyond.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <numaif.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+/*
+ * Maximum address range mapped with a single mmap()
+ * call is little bit more than 16GB. Hence 16GB is
+ * chosen as the single chunk size for address space
+ * mapping.
+ */
+#define MAP_CHUNK_SIZE 17179869184UL /* 16GB */
+
+/*
+ * Address space till 128TB is mapped without any hint
+ * and is enabled by default. Address space beyond 128TB
+ * till 512TB is obtained by passing hint address as the
+ * first argument into mmap() system call.
+ *
+ * The process heap address space is divided into two
+ * different areas one below 128TB and one above 128TB
+ * till it reaches 512TB. One with size 128TB and the
+ * other being 384TB.
+ */
+#define NR_CHUNKS_128TB 8192UL /* Number of 16GB chunks for 128TB */
+#define NR_CHUNKS_384TB 24576UL /* Number of 16GB chunks for 384TB */
+
+#define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */
+
+static char *hind_addr(void)
+{
+ int bits = 48 + rand() % 15;
+
+ return (char *) (1UL << bits);
+}
+
+static int validate_addr(char *ptr, int high_addr)
+{
+ unsigned long addr = (unsigned long) ptr;
+
+ if (high_addr) {
+ if (addr < ADDR_MARK_128TB) {
+ printf("Bad address %lx\n", addr);
+ return 1;
+ }
+ return 0;
+ }
+
+ if (addr > ADDR_MARK_128TB) {
+ printf("Bad address %lx\n", addr);
+ return 1;
+ }
+ return 0;
+}
+
+static int validate_lower_address_hint(void)
+{
+ char *ptr;
+
+ ptr = mmap((void *) (1UL << 45), MAP_CHUNK_SIZE, PROT_READ |
+ PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (ptr == MAP_FAILED)
+ return 0;
+
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ char *ptr[NR_CHUNKS_128TB];
+ char *hptr[NR_CHUNKS_384TB];
+ char *hint;
+ unsigned long i, lchunks, hchunks;
+
+ for (i = 0; i < NR_CHUNKS_128TB; i++) {
+ ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (ptr[i] == MAP_FAILED) {
+ if (validate_lower_address_hint())
+ return 1;
+ break;
+ }
+
+ if (validate_addr(ptr[i], 0))
+ return 1;
+ }
+ lchunks = i;
+
+ for (i = 0; i < NR_CHUNKS_384TB; i++) {
+ hint = hind_addr();
+ hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (hptr[i] == MAP_FAILED)
+ break;
+
+ if (validate_addr(hptr[i], 1))
+ return 1;
+ }
+ hchunks = i;
+
+ for (i = 0; i < lchunks; i++)
+ munmap(ptr[i], MAP_CHUNK_SIZE);
+
+ for (i = 0; i < hchunks; i++)
+ munmap(hptr[i], MAP_CHUNK_SIZE);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index 6983d05097e2..a74c9d739d07 100644
--- a/tools/testing/selftests/watchdog/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
@@ -24,9 +24,11 @@ const char v = 'V';
static void keep_alive(void)
{
int dummy;
+ int ret;
- printf(".");
- ioctl(fd, WDIOC_KEEPALIVE, &dummy);
+ ret = ioctl(fd, WDIOC_KEEPALIVE, &dummy);
+ if (!ret)
+ printf(".");
}
/*
@@ -51,6 +53,7 @@ int main(int argc, char *argv[])
int flags;
unsigned int ping_rate = 1;
int ret;
+ int i;
setbuf(stdout, NULL);
@@ -61,31 +64,35 @@ int main(int argc, char *argv[])
exit(-1);
}
- if (argc > 1) {
- if (!strncasecmp(argv[1], "-d", 2)) {
- flags = WDIOS_DISABLECARD;
- ioctl(fd, WDIOC_SETOPTIONS, &flags);
- printf("Watchdog card disabled.\n");
- goto end;
- } else if (!strncasecmp(argv[1], "-e", 2)) {
- flags = WDIOS_ENABLECARD;
- ioctl(fd, WDIOC_SETOPTIONS, &flags);
- printf("Watchdog card enabled.\n");
- goto end;
- } else if (!strncasecmp(argv[1], "-t", 2) && argv[2]) {
- flags = atoi(argv[2]);
- ioctl(fd, WDIOC_SETTIMEOUT, &flags);
- printf("Watchdog timeout set to %u seconds.\n", flags);
- goto end;
- } else if (!strncasecmp(argv[1], "-p", 2) && argv[2]) {
- ping_rate = strtoul(argv[2], NULL, 0);
- printf("Watchdog ping rate set to %u seconds.\n", ping_rate);
- } else {
- printf("-d to disable, -e to enable, -t <n> to set " \
- "the timeout,\n-p <n> to set the ping rate, and \n");
- printf("run by itself to tick the card.\n");
- goto end;
- }
+ for (i = 1; i < argc; i++) {
+ if (!strncasecmp(argv[i], "-d", 2)) {
+ flags = WDIOS_DISABLECARD;
+ ret = ioctl(fd, WDIOC_SETOPTIONS, &flags);
+ if (!ret)
+ printf("Watchdog card disabled.\n");
+ } else if (!strncasecmp(argv[i], "-e", 2)) {
+ flags = WDIOS_ENABLECARD;
+ ret = ioctl(fd, WDIOC_SETOPTIONS, &flags);
+ if (!ret)
+ printf("Watchdog card enabled.\n");
+ } else if (!strncasecmp(argv[i], "-t", 2) && argv[2]) {
+ flags = atoi(argv[i + 1]);
+ ret = ioctl(fd, WDIOC_SETTIMEOUT, &flags);
+ if (!ret)
+ printf("Watchdog timeout set to %u seconds.\n", flags);
+ i++;
+ } else if (!strncasecmp(argv[i], "-p", 2) && argv[2]) {
+ ping_rate = strtoul(argv[i + 1], NULL, 0);
+ printf("Watchdog ping rate set to %u seconds.\n", ping_rate);
+ i++;
+ } else {
+ printf("-d to disable, -e to enable, -t <n> to set "
+ "the timeout,\n-p <n> to set the ping rate, and ");
+ printf("run by itself to tick the card.\n");
+ printf("Parameters are parsed left-to-right in real-time.\n");
+ printf("Example: %s -d -t 10 -p 5 -e\n", argv[0]);
+ goto end;
+ }
}
printf("Watchdog Ticking Away!\n");
diff --git a/tools/testing/selftests/x86/.gitignore b/tools/testing/selftests/x86/.gitignore
index 15034fef9698..7757f73ff9a3 100644
--- a/tools/testing/selftests/x86/.gitignore
+++ b/tools/testing/selftests/x86/.gitignore
@@ -1,2 +1,15 @@
*_32
*_64
+single_step_syscall
+sysret_ss_attrs
+syscall_nt
+ptrace_syscall
+test_mremap_vdso
+check_initial_reg_state
+sigreturn
+ldt_gdt
+iopl
+mpx-mini-test
+ioperm
+protection_keys
+test_vdso
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 38e0a9ca5d71..97f187e2663f 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -40,8 +40,7 @@ all_32: $(BINARIES_32)
all_64: $(BINARIES_64)
-clean:
- $(RM) $(BINARIES_32) $(BINARIES_64)
+EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
$(BINARIES_32): $(OUTPUT)/%_32: %.c
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
diff --git a/tools/usb/.gitignore b/tools/usb/.gitignore
new file mode 100644
index 000000000000..1b7448981435
--- /dev/null
+++ b/tools/usb/.gitignore
@@ -0,0 +1,2 @@
+ffs-test
+testusb
diff --git a/tools/usb/usbip/README b/tools/usb/usbip/README
index 5eb2b6c7722b..7844490fc603 100644
--- a/tools/usb/usbip/README
+++ b/tools/usb/usbip/README
@@ -244,7 +244,7 @@ Detach the imported device:
- See 'Debug Tips' on the project wiki.
- http://usbip.wiki.sourceforge.net/how-to-debug-usbip
- usbip-host.ko must be bound to the target device.
- - See /proc/bus/usb/devices and find "Driver=..." lines of the device.
+ - See /sys/kernel/debug/usb/devices and find "Driver=..." lines of the device.
- Target USB gadget must be bound to vudc
(using USB gadget susbsys, not usbip bind command)
- Shutdown firewall.
diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
index ac73710473de..1517a232ab18 100644
--- a/tools/usb/usbip/libsrc/usbip_common.c
+++ b/tools/usb/usbip/libsrc/usbip_common.c
@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf)
{
char busid[SYSFS_BUS_ID_SIZE];
+ int size;
struct udev_device *sif;
- sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
+ size = snprintf(busid, sizeof(busid), "%s:%d.%d",
+ udev->busid, udev->bConfigurationValue, i);
+ if (size < 0 || (unsigned int)size >= sizeof(busid)) {
+ err("busid length %i >= %lu or < 0", size,
+ (long unsigned)sizeof(busid));
+ return -1;
+ }
sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
if (!sif) {
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index 9d415228883d..6ff7b601f854 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -40,13 +40,20 @@ struct udev *udev_context;
static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
{
char status_attr_path[SYSFS_PATH_MAX];
+ int size;
int fd;
int length;
char status;
int value = 0;
- snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
- udev->path);
+ size = snprintf(status_attr_path, sizeof(status_attr_path),
+ "%s/usbip_status", udev->path);
+ if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
+ err("usbip_status path length %i >= %lu or < 0", size,
+ (long unsigned)sizeof(status_attr_path));
+ return -1;
+ }
+
fd = open(status_attr_path, O_RDONLY);
if (fd < 0) {
@@ -218,6 +225,7 @@ int usbip_export_device(struct usbip_exported_device *edev, int sockfd)
{
char attr_name[] = "usbip_sockfd";
char sockfd_attr_path[SYSFS_PATH_MAX];
+ int size;
char sockfd_buff[30];
int ret;
@@ -237,10 +245,20 @@ int usbip_export_device(struct usbip_exported_device *edev, int sockfd)
}
/* only the first interface is true */
- snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
- edev->udev.path, attr_name);
+ size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+ edev->udev.path, attr_name);
+ if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
+ err("exported device path length %i >= %lu or < 0", size,
+ (long unsigned)sizeof(sockfd_attr_path));
+ return -1;
+ }
- snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
+ size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
+ if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
+ err("socket length %i >= %lu or < 0", size,
+ (long unsigned)sizeof(sockfd_buff));
+ return -1;
+ }
ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
strlen(sockfd_buff));
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index ad9204773533..f659c146cdc8 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -123,33 +123,15 @@ static int refresh_imported_device_list(void)
static int get_nports(void)
{
- char *c;
- int nports = 0;
- const char *attr_status;
+ const char *attr_nports;
- attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device,
- "status");
- if (!attr_status) {
- err("udev_device_get_sysattr_value failed");
+ attr_nports = udev_device_get_sysattr_value(vhci_driver->hc_device, "nports");
+ if (!attr_nports) {
+ err("udev_device_get_sysattr_value nports failed");
return -1;
}
- /* skip a header line */
- c = strchr(attr_status, '\n');
- if (!c)
- return 0;
- c++;
-
- while (*c != '\0') {
- /* go to the next line */
- c = strchr(c, '\n');
- if (!c)
- return nports;
- c++;
- nports += 1;
- }
-
- return nports;
+ return (int)strtoul(attr_nports, NULL, 10);
}
/*
diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
index d7599d943529..73d8eee8130b 100644
--- a/tools/usb/usbip/src/usbip.c
+++ b/tools/usb/usbip/src/usbip.c
@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
break;
case '?':
printf("usbip: invalid option\n");
+ /* Terminate after printing error */
+ /* FALLTHRU */
default:
usbip_usage();
goto out;
diff --git a/usr/Kconfig b/usr/Kconfig
index 6278f135256d..c0c48507e44e 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -21,6 +21,16 @@ config INITRAMFS_SOURCE
If you are not sure, leave it blank.
+config INITRAMFS_FORCE
+ bool "Ignore the initramfs passed by the bootloader"
+ depends on CMDLINE_EXTEND || CMDLINE_FORCE
+ help
+ This option causes the kernel to ignore the initramfs image
+ (or initrd image) passed to it by the bootloader. This is
+ analogous to CMDLINE_FORCE, which is found on some architectures,
+ and is useful if you cannot or don't want to change the image
+ your bootloader passes to the kernel.
+
config INITRAMFS_ROOT_UID
int "User ID to map to 0 (user root)"
depends on INITRAMFS_SOURCE!=""
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 35d7100e0815..5976609ef27c 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -184,28 +184,47 @@ bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
return cval <= now;
}
+/*
+ * Reflect the timer output level into the kvm_run structure
+ */
+void kvm_timer_update_run(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct kvm_sync_regs *regs = &vcpu->run->s.regs;
+
+ /* Populate the device bitmap with the timer states */
+ regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
+ KVM_ARM_DEV_EL1_PTIMER);
+ if (vtimer->irq.level)
+ regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
+ if (ptimer->irq.level)
+ regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
+}
+
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
struct arch_timer_context *timer_ctx)
{
int ret;
- BUG_ON(!vgic_initialized(vcpu->kvm));
-
timer_ctx->active_cleared_last = false;
timer_ctx->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level);
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, timer_ctx->irq.irq,
- timer_ctx->irq.level);
- WARN_ON(ret);
+ if (likely(irqchip_in_kernel(vcpu->kvm))) {
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ timer_ctx->irq.irq,
+ timer_ctx->irq.level);
+ WARN_ON(ret);
+ }
}
/*
* Check if there was a change in the timer state (should we raise or lower
* the line level to the GIC).
*/
-static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
@@ -217,16 +236,14 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
* because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate.
*/
- if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
- return -ENODEV;
+ if (unlikely(!timer->enabled))
+ return;
if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
-
- return 0;
}
/* Schedule the background timer for the emulated timer. */
@@ -286,25 +303,12 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
timer_disarm(timer);
}
-/**
- * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
- * @vcpu: The vcpu pointer
- *
- * Check if the virtual timer has expired while we were running in the host,
- * and inject an interrupt if that was the case.
- */
-void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
+static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
bool phys_active;
int ret;
- if (kvm_timer_update_state(vcpu))
- return;
-
- /* Set the background timer for the physical timer emulation. */
- kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
-
/*
* If we enter the guest with the virtual input level to the VGIC
* asserted, then we have already told the VGIC what we need to, and
@@ -356,11 +360,72 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
vtimer->active_cleared_last = !phys_active;
}
+bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
+ bool vlevel, plevel;
+
+ if (likely(irqchip_in_kernel(vcpu->kvm)))
+ return false;
+
+ vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
+ plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
+
+ return vtimer->irq.level != vlevel ||
+ ptimer->irq.level != plevel;
+}
+
+static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+ /*
+ * To prevent continuously exiting from the guest, we mask the
+ * physical interrupt such that the guest can make forward progress.
+ * Once we detect the output level being deasserted, we unmask the
+ * interrupt again so that we exit from the guest when the timer
+ * fires.
+ */
+ if (vtimer->irq.level)
+ disable_percpu_irq(host_vtimer_irq);
+ else
+ enable_percpu_irq(host_vtimer_irq, 0);
+}
+
+/**
+ * kvm_timer_flush_hwstate - prepare timers before running the vcpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the virtual timer has expired while we were running in the host,
+ * and inject an interrupt if that was the case, making sure the timer is
+ * masked or disabled on the host so that we keep executing. Also schedule a
+ * software timer for the physical timer if it is enabled.
+ */
+void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+ if (unlikely(!timer->enabled))
+ return;
+
+ kvm_timer_update_state(vcpu);
+
+ /* Set the background timer for the physical timer emulation. */
+ kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
+
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ kvm_timer_flush_hwstate_user(vcpu);
+ else
+ kvm_timer_flush_hwstate_vgic(vcpu);
+}
+
/**
* kvm_timer_sync_hwstate - sync timer state from cpu
* @vcpu: The vcpu pointer
*
- * Check if the virtual timer has expired while we were running in the guest,
+ * Check if any of the timers have expired while we were running in the guest,
* and inject an interrupt if that was the case.
*/
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
@@ -560,6 +625,13 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (timer->enabled)
return 0;
+ /* Without a VGIC we do not map virtual IRQs to physical IRQs */
+ if (!irqchip_in_kernel(vcpu->kvm))
+ goto no_vgic;
+
+ if (!vgic_initialized(vcpu->kvm))
+ return -ENODEV;
+
/*
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
@@ -583,8 +655,8 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (ret)
return ret;
+no_vgic:
timer->enabled = 1;
-
return 0;
}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index c8aeb7b91ec8..a3f18d362366 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -22,49 +22,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
-static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
- void __iomem *base)
-{
- struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
- u32 eisr0, eisr1;
- int i;
- bool expect_mi;
-
- expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
-
- for (i = 0; i < nr_lr; i++) {
- if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
- continue;
-
- expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
- (cpu_if->vgic_lr[i] & GICH_LR_EOI));
- }
-
- if (expect_mi) {
- cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
-
- if (cpu_if->vgic_misr & GICH_MISR_EOI) {
- eisr0 = readl_relaxed(base + GICH_EISR0);
- if (unlikely(nr_lr > 32))
- eisr1 = readl_relaxed(base + GICH_EISR1);
- else
- eisr1 = 0;
- } else {
- eisr0 = eisr1 = 0;
- }
- } else {
- cpu_if->vgic_misr = 0;
- eisr0 = eisr1 = 0;
- }
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
-#else
- cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
-#endif
-}
-
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -87,13 +44,10 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
int i;
+ u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
- for (i = 0; i < nr_lr; i++) {
- if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
- continue;
-
+ for (i = 0; i < used_lrs; i++) {
if (cpu_if->vgic_elrsr & (1UL << i))
cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
else
@@ -110,26 +64,20 @@ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
if (!base)
return;
- cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
-
- if (vcpu->arch.vgic_cpu.live_lrs) {
+ if (used_lrs) {
cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
- save_maint_int_state(vcpu, base);
save_elrsr(vcpu, base);
save_lrs(vcpu, base);
writel_relaxed(0, base + GICH_HCR);
-
- vcpu->arch.vgic_cpu.live_lrs = 0;
} else {
- cpu_if->vgic_eisr = 0;
cpu_if->vgic_elrsr = ~0UL;
- cpu_if->vgic_misr = 0;
cpu_if->vgic_apr = 0;
}
}
@@ -141,32 +89,20 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
- int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
int i;
- u64 live_lrs = 0;
+ u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
if (!base)
return;
-
- for (i = 0; i < nr_lr; i++)
- if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
- live_lrs |= 1UL << i;
-
- if (live_lrs) {
+ if (used_lrs) {
writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
- for (i = 0; i < nr_lr; i++) {
- if (!(live_lrs & (1UL << i)))
- continue;
-
+ for (i = 0; i < used_lrs; i++) {
writel_relaxed(cpu_if->vgic_lr[i],
base + GICH_LR0 + (i * 4));
}
}
-
- writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
- vcpu->arch.vgic_cpu.live_lrs = live_lrs;
}
#ifdef CONFIG_ARM64
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 3947095cc0a1..bce6037cf01d 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -118,66 +118,32 @@ static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
}
}
-static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
-{
- struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
- int i;
- bool expect_mi;
-
- expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
-
- for (i = 0; i < nr_lr; i++) {
- if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
- continue;
-
- expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
- (cpu_if->vgic_lr[i] & ICH_LR_EOI));
- }
-
- if (expect_mi) {
- cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
-
- if (cpu_if->vgic_misr & ICH_MISR_EOI)
- cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
- else
- cpu_if->vgic_eisr = 0;
- } else {
- cpu_if->vgic_misr = 0;
- cpu_if->vgic_eisr = 0;
- }
-}
-
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 val;
/*
* Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface.
*/
- if (!cpu_if->vgic_sre)
+ if (!cpu_if->vgic_sre) {
dsb(st);
+ cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
+ }
- cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
-
- if (vcpu->arch.vgic_cpu.live_lrs) {
+ if (used_lrs) {
int i;
- u32 max_lr_idx, nr_pri_bits;
+ u32 nr_pri_bits;
cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
write_gicreg(0, ICH_HCR_EL2);
val = read_gicreg(ICH_VTR_EL2);
- max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
- save_maint_int_state(vcpu, max_lr_idx + 1);
-
- for (i = 0; i <= max_lr_idx; i++) {
- if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
- continue;
-
+ for (i = 0; i < used_lrs; i++) {
if (cpu_if->vgic_elrsr & (1 << i))
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
else
@@ -205,11 +171,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
default:
cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
}
-
- vcpu->arch.vgic_cpu.live_lrs = 0;
} else {
- cpu_if->vgic_misr = 0;
- cpu_if->vgic_eisr = 0;
cpu_if->vgic_elrsr = 0xffff;
cpu_if->vgic_ap0r[0] = 0;
cpu_if->vgic_ap0r[1] = 0;
@@ -234,9 +196,9 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 val;
- u32 max_lr_idx, nr_pri_bits;
- u16 live_lrs = 0;
+ u32 nr_pri_bits;
int i;
/*
@@ -245,25 +207,19 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* delivered as a FIQ to the guest, with potentially fatal
* consequences. So we must make sure that ICC_SRE_EL1 has
* been actually programmed with the value we want before
- * starting to mess with the rest of the GIC.
+ * starting to mess with the rest of the GIC, and VMCR_EL2 in
+ * particular.
*/
if (!cpu_if->vgic_sre) {
write_gicreg(0, ICC_SRE_EL1);
isb();
+ write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
}
val = read_gicreg(ICH_VTR_EL2);
- max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
- for (i = 0; i <= max_lr_idx; i++) {
- if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
- live_lrs |= (1 << i);
- }
-
- write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
-
- if (live_lrs) {
+ if (used_lrs) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
switch (nr_pri_bits) {
@@ -286,12 +242,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
}
- for (i = 0; i <= max_lr_idx; i++) {
- if (!(live_lrs & (1 << i)))
- continue;
-
+ for (i = 0; i < used_lrs; i++)
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
- }
}
/*
@@ -303,7 +255,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
isb();
dsb(sy);
}
- vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/*
* Prevent the guest from touching the GIC system registers if
@@ -326,3 +277,13 @@ u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
{
return read_gicreg(ICH_VTR_EL2);
}
+
+u64 __hyp_text __vgic_v3_read_vmcr(void)
+{
+ return read_gicreg(ICH_VMCR_EL2);
+}
+
+void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
+{
+ write_gicreg(vmcr, ICH_VMCR_EL2);
+}
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 69ccce308458..4b43e7f3b158 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -230,13 +230,44 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
return;
overflow = !!kvm_pmu_overflow_status(vcpu);
- if (pmu->irq_level != overflow) {
- pmu->irq_level = overflow;
- kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- pmu->irq_num, overflow);
+ if (pmu->irq_level == overflow)
+ return;
+
+ pmu->irq_level = overflow;
+
+ if (likely(irqchip_in_kernel(vcpu->kvm))) {
+ int ret;
+ ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ pmu->irq_num, overflow);
+ WARN_ON(ret);
}
}
+bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
+ bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
+
+ if (likely(irqchip_in_kernel(vcpu->kvm)))
+ return false;
+
+ return pmu->irq_level != run_level;
+}
+
+/*
+ * Reflect the PMU overflow interrupt output level into the kvm_run structure
+ */
+void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sync_regs *regs = &vcpu->run->s.regs;
+
+ /* Populate the timer bitmap for user space */
+ regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
+ if (vcpu->arch.pmu.irq_level)
+ regs->device_irq_level |= KVM_ARM_DEV_PMU;
+}
+
/**
* kvm_pmu_flush_hwstate - flush pmu state to cpu
* @vcpu: The vcpu pointer
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 702f8108608d..25fd1b942c11 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -24,7 +24,12 @@
/*
* Initialization rules: there are multiple stages to the vgic
- * initialization, both for the distributor and the CPU interfaces.
+ * initialization, both for the distributor and the CPU interfaces. The basic
+ * idea is that even though the VGIC is not functional or not requested from
+ * user space, the critical path of the run loop can still call VGIC functions
+ * that just won't do anything, without them having to check additional
+ * initialization flags to ensure they don't look at uninitialized data
+ * structures.
*
* Distributor:
*
@@ -39,23 +44,67 @@
*
* CPU Interface:
*
- * - kvm_vgic_cpu_early_init(): initialization of static data that
+ * - kvm_vgic_vcpu_early_init(): initialization of static data that
* doesn't depend on any sizing information or emulation type. No
* allocation is allowed there.
*/
/* EARLY INIT */
-/*
- * Those 2 functions should not be needed anymore but they
- * still are called from arm.c
+/**
+ * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
+ * @kvm: The VM whose VGIC districutor should be initialized
+ *
+ * Only do initialization of static structures that don't require any
+ * allocation or sizing information from userspace. vgic_init() called
+ * kvm_vgic_dist_init() which takes care of the rest.
*/
void kvm_vgic_early_init(struct kvm *kvm)
{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ INIT_LIST_HEAD(&dist->lpi_list_head);
+ spin_lock_init(&dist->lpi_list_lock);
}
+/**
+ * kvm_vgic_vcpu_early_init() - Initialize static VGIC VCPU data structures
+ * @vcpu: The VCPU whose VGIC data structures whould be initialized
+ *
+ * Only do initialization, but do not actually enable the VGIC CPU interface
+ * yet.
+ */
void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ int i;
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ spin_lock_init(&vgic_cpu->ap_list_lock);
+
+ /*
+ * Enable and configure all SGIs to be edge-triggered and
+ * configure all PPIs as level-triggered.
+ */
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+
+ INIT_LIST_HEAD(&irq->ap_list);
+ spin_lock_init(&irq->irq_lock);
+ irq->intid = i;
+ irq->vcpu = NULL;
+ irq->target_vcpu = vcpu;
+ irq->targets = 1U << vcpu->vcpu_id;
+ kref_init(&irq->refcount);
+ if (vgic_irq_is_sgi(i)) {
+ /* SGIs */
+ irq->enabled = 1;
+ irq->config = VGIC_CONFIG_EDGE;
+ } else {
+ /* PPIs */
+ irq->config = VGIC_CONFIG_LEVEL;
+ }
+ }
}
/* CREATION */
@@ -148,9 +197,6 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
int i;
- INIT_LIST_HEAD(&dist->lpi_list_head);
- spin_lock_init(&dist->lpi_list_lock);
-
dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
if (!dist->spis)
return -ENOMEM;
@@ -181,41 +227,11 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
}
/**
- * kvm_vgic_vcpu_init: initialize the vcpu data structures and
- * enable the VCPU interface
- * @vcpu: the VCPU which's VGIC should be initialized
+ * kvm_vgic_vcpu_init() - Enable the VCPU interface
+ * @vcpu: the VCPU which's VGIC should be enabled
*/
static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int i;
-
- INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
- spin_lock_init(&vgic_cpu->ap_list_lock);
-
- /*
- * Enable and configure all SGIs to be edge-triggered and
- * configure all PPIs as level-triggered.
- */
- for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
-
- INIT_LIST_HEAD(&irq->ap_list);
- spin_lock_init(&irq->irq_lock);
- irq->intid = i;
- irq->vcpu = NULL;
- irq->target_vcpu = vcpu;
- irq->targets = 1U << vcpu->vcpu_id;
- kref_init(&irq->refcount);
- if (vgic_irq_is_sgi(i)) {
- /* SGIs */
- irq->enabled = 1;
- irq->config = VGIC_CONFIG_EDGE;
- } else {
- /* PPIs */
- irq->config = VGIC_CONFIG_LEVEL;
- }
- }
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_enable(vcpu);
else
@@ -262,6 +278,18 @@ int vgic_init(struct kvm *kvm)
vgic_debug_init(kvm);
dist->initialized = true;
+
+ /*
+ * If we're initializing GICv2 on-demand when first running the VCPU
+ * then we need to load the VGIC state onto the CPU. We can detect
+ * this easily by checking if we are in between vcpu_load and vcpu_put
+ * when we just initialized the VGIC.
+ */
+ preempt_disable();
+ vcpu = kvm_arm_get_running_vcpu();
+ if (vcpu)
+ kvm_vgic_load(vcpu);
+ preempt_enable();
out:
return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index b637d9c7afe3..a65757aab6d3 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -22,20 +22,6 @@
#include "vgic.h"
-/*
- * Call this function to convert a u64 value to an unsigned long * bitmask
- * in a way that works on both 32-bit and 64-bit LE and BE platforms.
- *
- * Warning: Calling this function may modify *val.
- */
-static unsigned long *u64_to_bitmask(u64 *val)
-{
-#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
- *val = (*val >> 32) | (*val << 32);
-#endif
- return (unsigned long *)val;
-}
-
static inline void vgic_v2_write_lr(int lr, u32 val)
{
void __iomem *base = kvm_vgic_global_state.vctrl_base;
@@ -51,45 +37,17 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0);
}
-void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
- if (cpuif->vgic_misr & GICH_MISR_EOI) {
- u64 eisr = cpuif->vgic_eisr;
- unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
- int lr;
-
- for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
- u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
-
- WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
-
- /* Only SPIs require notification */
- if (vgic_valid_spi(vcpu->kvm, intid))
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
- }
- }
-
- /* check and disable underflow maintenance IRQ */
- cpuif->vgic_hcr &= ~GICH_HCR_UIE;
-
- /*
- * In the next iterations of the vcpu loop, if we sync the
- * vgic state after flushing it, but before entering the guest
- * (this happens for pending signals and vmid rollovers), then
- * make sure we don't pick up any old maintenance interrupts
- * here.
- */
- cpuif->vgic_eisr = 0;
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
}
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+static bool lr_signals_eoi_mi(u32 lr_val)
{
- struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
-
- cpuif->vgic_hcr |= GICH_HCR_UIE;
+ return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
+ !(lr_val & GICH_LR_HW);
}
/*
@@ -101,14 +59,22 @@ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
*/
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
{
- struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
int lr;
- for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+ cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+
+ for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr];
u32 intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
+ /* Notify fds when the guest EOI'ed a level-triggered SPI */
+ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
+
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock);
@@ -141,6 +107,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
}
+
+ vgic_cpu->used_lrs = 0;
}
/*
@@ -199,6 +167,7 @@ void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u32 vmcr;
vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
@@ -209,12 +178,15 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
- vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+ cpu_if->vgic_vmcr = vmcr;
}
void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
- u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ u32 vmcr;
+
+ vmcr = cpu_if->vgic_vmcr;
vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
GICH_VMCR_CTRL_SHIFT;
@@ -390,3 +362,19 @@ out:
return ret;
}
+
+void vgic_v2_load(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
+
+ writel_relaxed(cpu_if->vgic_vmcr, vgic->vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
+
+ cpu_if->vgic_vmcr = readl_relaxed(vgic->vctrl_base + GICH_VMCR);
+}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index be0f4c3e0142..df1503650300 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -21,59 +21,29 @@
#include "vgic.h"
-void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
- u32 model = vcpu->kvm->arch.vgic.vgic_model;
-
- if (cpuif->vgic_misr & ICH_MISR_EOI) {
- unsigned long eisr_bmap = cpuif->vgic_eisr;
- int lr;
-
- for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
- u32 intid;
- u64 val = cpuif->vgic_lr[lr];
-
- if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
- intid = val & ICH_LR_VIRTUAL_ID_MASK;
- else
- intid = val & GICH_LR_VIRTUALID;
-
- WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
-
- /* Only SPIs require notification */
- if (vgic_valid_spi(vcpu->kvm, intid))
- kvm_notify_acked_irq(vcpu->kvm, 0,
- intid - VGIC_NR_PRIVATE_IRQS);
- }
-
- /*
- * In the next iterations of the vcpu loop, if we sync
- * the vgic state after flushing it, but before
- * entering the guest (this happens for pending
- * signals and vmid rollovers), then make sure we
- * don't pick up any old maintenance interrupts here.
- */
- cpuif->vgic_eisr = 0;
- }
- cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+ cpuif->vgic_hcr |= ICH_HCR_UIE;
}
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+static bool lr_signals_eoi_mi(u64 lr_val)
{
- struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
-
- cpuif->vgic_hcr |= ICH_HCR_UIE;
+ return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
+ !(lr_val & ICH_LR_HW);
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
{
- struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr;
- for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+ cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+
+ for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
u32 intid;
struct vgic_irq *irq;
@@ -82,6 +52,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
intid = val & ICH_LR_VIRTUAL_ID_MASK;
else
intid = val & GICH_LR_VIRTUALID;
+
+ /* Notify fds when the guest EOI'ed a level-triggered IRQ */
+ if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
+
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
if (!irq) /* An LPI could have been unmapped. */
continue;
@@ -117,6 +93,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
}
+
+ vgic_cpu->used_lrs = 0;
}
/* Requires the irq to be locked already */
@@ -173,6 +151,7 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u32 vmcr;
/*
@@ -188,12 +167,15 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
- vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
+ cpu_if->vgic_vmcr = vmcr;
}
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
- u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u32 vmcr;
+
+ vmcr = cpu_if->vgic_vmcr;
/*
* Ignore the FIQen bit, because GIC emulation always implies
@@ -386,3 +368,24 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
return 0;
}
+
+void vgic_v3_load(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ /*
+ * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
+ * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
+ * VMCR_EL2 save/restore in the world switch.
+ */
+ if (likely(cpu_if->vgic_sre))
+ kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (likely(cpu_if->vgic_sre))
+ cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 654dfd40e449..4346bc7d08dc 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -29,7 +29,9 @@
#define DEBUG_SPINLOCK_BUG_ON(p)
#endif
-struct vgic_global __section(.hyp.text) kvm_vgic_global_state = {.gicv3_cpuif = STATIC_KEY_FALSE_INIT,};
+struct vgic_global kvm_vgic_global_state __ro_after_init = {
+ .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
+};
/*
* Locking order is always:
@@ -527,14 +529,6 @@ retry:
spin_unlock(&vgic_cpu->ap_list_lock);
}
-static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
-{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_process_maintenance(vcpu);
- else
- vgic_v3_process_maintenance(vcpu);
-}
-
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{
if (kvm_vgic_global_state.type == VGIC_V2)
@@ -601,10 +595,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
- if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
- vgic_set_underflow(vcpu);
+ if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
vgic_sort_ap_list(vcpu);
- }
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);
@@ -623,8 +615,12 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
next:
spin_unlock(&irq->irq_lock);
- if (count == kvm_vgic_global_state.nr_lr)
+ if (count == kvm_vgic_global_state.nr_lr) {
+ if (!list_is_last(&irq->ap_list,
+ &vgic_cpu->ap_list_head))
+ vgic_set_underflow(vcpu);
break;
+ }
}
vcpu->arch.vgic_cpu.used_lrs = count;
@@ -637,18 +633,30 @@ next:
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ /* An empty ap_list_head implies used_lrs == 0 */
+ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
- vgic_process_maintenance_interrupt(vcpu);
- vgic_fold_lr_state(vcpu);
+ if (vgic_cpu->used_lrs)
+ vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
- if (unlikely(!vgic_initialized(vcpu->kvm)))
+ /*
+ * If there are no virtual interrupts active or pending for this
+ * VCPU, then there is no work to do and we can bail out without
+ * taking any lock. There is a potential race with someone injecting
+ * interrupts to the VCPU, but it is a benign race as the VCPU will
+ * either observe the new interrupt before or after doing this check,
+ * and introducing additional synchronization mechanism doesn't change
+ * this.
+ */
+ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
@@ -656,6 +664,28 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
}
+void kvm_vgic_load(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(!vgic_initialized(vcpu->kvm)))
+ return;
+
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_load(vcpu);
+ else
+ vgic_v3_load(vcpu);
+}
+
+void kvm_vgic_put(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(!vgic_initialized(vcpu->kvm)))
+ return;
+
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_put(vcpu);
+ else
+ vgic_v3_put(vcpu);
+}
+
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 6cf557e9f718..799fd651b260 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -119,7 +119,6 @@ void vgic_kick_vcpus(struct kvm *kvm);
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment);
-void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
@@ -138,6 +137,8 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type);
void vgic_v2_init_lrs(void);
+void vgic_v2_load(struct kvm_vcpu *vcpu);
+void vgic_v2_put(struct kvm_vcpu *vcpu);
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
{
@@ -147,7 +148,6 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
kref_get(&irq->refcount);
}
-void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
@@ -159,6 +159,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+void vgic_v3_load(struct kvm_vcpu *vcpu);
+void vgic_v3_put(struct kvm_vcpu *vcpu);
+
int vgic_register_its_iodevs(struct kvm *kvm);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 4d28a9ddbee0..a8d540398bbd 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -490,7 +490,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
- kvm_vcpu_request_scan_ioapic(kvm);
+ kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -500,7 +500,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
- kvm_vcpu_request_scan_ioapic(kvm);
+ kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
#endif
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 3bcc9990adf7..31e40c9e81df 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -142,8 +142,8 @@ static int setup_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
- int r = -EINVAL;
struct kvm_kernel_irq_routing_entry *ei;
+ int r;
/*
* Do not allow GSI to be mapped to the same irqchip more than once.
@@ -153,26 +153,30 @@ static int setup_routing_entry(struct kvm *kvm,
if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
ue->u.irqchip.irqchip == ei->irqchip.irqchip)
- return r;
+ return -EINVAL;
e->gsi = ue->gsi;
e->type = ue->type;
r = kvm_set_routing_entry(kvm, e, ue);
if (r)
- goto out;
+ return r;
if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
hlist_add_head(&e->link, &rt->map[e->gsi]);
- r = 0;
-out:
- return r;
+
+ return 0;
}
void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
{
}
+bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
+{
+ return true;
+}
+
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *ue,
unsigned nr,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 88257b311cb5..b3d151ee2a67 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -165,6 +165,24 @@ void vcpu_put(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(vcpu_put);
+/* TODO: merge with kvm_arch_vcpu_should_kick */
+static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
+{
+ int mode = kvm_vcpu_exiting_guest_mode(vcpu);
+
+ /*
+ * We need to wait for the VCPU to reenable interrupts and get out of
+ * READING_SHADOW_PAGE_TABLES mode.
+ */
+ if (req & KVM_REQUEST_WAIT)
+ return mode != OUTSIDE_GUEST_MODE;
+
+ /*
+ * Need to kick a running VCPU, but otherwise there is nothing to do.
+ */
+ return mode == IN_GUEST_MODE;
+}
+
static void ack_flush(void *_completed)
{
}
@@ -174,6 +192,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
int i, cpu, me;
cpumask_var_t cpus;
bool called = true;
+ bool wait = req & KVM_REQUEST_WAIT;
struct kvm_vcpu *vcpu;
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@@ -183,17 +202,17 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
kvm_make_request(req, vcpu);
cpu = vcpu->cpu;
- /* Set ->requests bit before we read ->mode. */
- smp_mb__after_atomic();
+ if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+ continue;
if (cpus != NULL && cpu != -1 && cpu != me &&
- kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
+ kvm_request_needs_ipi(vcpu, req))
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
- smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+ smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
else if (!cpumask_empty(cpus))
- smp_call_function_many(cpus, ack_flush, NULL, 1);
+ smp_call_function_many(cpus, ack_flush, NULL, wait);
else
called = false;
put_cpu();
@@ -504,7 +523,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
int i;
struct kvm_memslots *slots;
- slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
return NULL;
@@ -689,18 +708,6 @@ out_err_no_disable:
return ERR_PTR(r);
}
-/*
- * Avoid using vmalloc for a small buffer.
- * Should not be used when the size is statically known.
- */
-void *kvm_kvzalloc(unsigned long size)
-{
- if (size > PAGE_SIZE)
- return vzalloc(size);
- else
- return kzalloc(size, GFP_KERNEL);
-}
-
static void kvm_destroy_devices(struct kvm *kvm)
{
struct kvm_device *dev, *tmp;
@@ -782,7 +789,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
- memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
+ memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
if (!memslot->dirty_bitmap)
return -ENOMEM;
@@ -1008,7 +1015,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
- slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+ slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!slots)
goto out_free;
memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
@@ -1019,8 +1026,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
old_memslots = install_new_memslots(kvm, as_id, slots);
- /* slot was deleted or moved, clear iommu mapping */
- kvm_iommu_unmap_pages(kvm, &old);
/* From this point no new shadow pages pointing to a deleted,
* or moved, memslot will be created.
*
@@ -1055,21 +1060,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
kvm_free_memslot(kvm, &old, &new);
kvfree(old_memslots);
-
- /*
- * IOMMU mapping: New slots need to be mapped. Old slots need to be
- * un-mapped and re-mapped if their base changes. Since base change
- * unmapping is handled above with slot deletion, mapping alone is
- * needed here. Anything else the iommu might care about for existing
- * slots (size changes, userspace addr changes and read-only flag
- * changes) is disallowed above, so any other attribute changes getting
- * here can be skipped.
- */
- if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) {
- r = kvm_iommu_map_pages(kvm, &new);
- return r;
- }
-
return 0;
out_slots:
@@ -1973,18 +1963,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
return 0;
}
-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
- struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len)
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len)
{
- struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
gpa_t gpa = ghc->gpa + offset;
@@ -1994,7 +1984,7 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
- return kvm_vcpu_write_guest(vcpu, gpa, data, len);
+ return kvm_write_guest(kvm, gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
@@ -2006,19 +1996,19 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len)
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len)
{
- return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
+ return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len)
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len)
{
- struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+ struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
BUG_ON(len > ghc->len);
@@ -2027,7 +2017,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
- return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
+ return kvm_read_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
@@ -2038,7 +2028,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
@@ -2212,8 +2202,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
-#ifndef CONFIG_S390
-void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wqp;
@@ -2221,11 +2210,14 @@ void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
if (swait_active(wqp)) {
swake_up(wqp);
++vcpu->stat.halt_wakeup;
+ return true;
}
+ return false;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
+#ifndef CONFIG_S390
/*
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
*/
@@ -2234,7 +2226,9 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
int me;
int cpu = vcpu->cpu;
- kvm_vcpu_wake_up(vcpu);
+ if (kvm_vcpu_wake_up(vcpu))
+ return;
+
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (kvm_arch_vcpu_should_kick(vcpu))
@@ -2366,7 +2360,7 @@ static int kvm_vcpu_fault(struct vm_fault *vmf)
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
#endif
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
@@ -2935,6 +2929,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
return 1;
+#ifdef CONFIG_KVM_MMIO
+ case KVM_CAP_COALESCED_MMIO:
+ return KVM_COALESCED_MMIO_PAGE_OFFSET;
+#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
@@ -2984,7 +2982,7 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
}
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
@@ -3067,6 +3065,8 @@ static long kvm_vm_ioctl(struct file *filp,
if (copy_from_user(&routing, argp, sizeof(routing)))
goto out;
r = -EINVAL;
+ if (!kvm_arch_can_set_irq_routing(kvm))
+ goto out;
if (routing.nr > KVM_MAX_IRQ_ROUTES)
goto out;
if (routing.flags)
@@ -3176,7 +3176,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
kvm = kvm_create_vm(type);
if (IS_ERR(kvm))
return PTR_ERR(kvm);
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
r = kvm_coalesced_mmio_init(kvm);
if (r < 0) {
kvm_put_kvm(kvm);
@@ -3229,7 +3229,7 @@ static long kvm_dev_ioctl(struct file *filp,
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index d32f239eb471..37d9118fd84b 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -20,6 +20,10 @@
#include <linux/vfio.h>
#include "vfio.h"
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+#include <asm/kvm_ppc.h>
+#endif
+
struct kvm_vfio_group {
struct list_head node;
struct vfio_group *vfio_group;
@@ -89,6 +93,47 @@ static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
return ret > 0;
}
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
+{
+ int (*fn)(struct vfio_group *);
+ int ret = -EINVAL;
+
+ fn = symbol_get(vfio_external_user_iommu_id);
+ if (!fn)
+ return ret;
+
+ ret = fn(vfio_group);
+
+ symbol_put(vfio_external_user_iommu_id);
+
+ return ret;
+}
+
+static struct iommu_group *kvm_vfio_group_get_iommu_group(
+ struct vfio_group *group)
+{
+ int group_id = kvm_vfio_external_user_iommu_id(group);
+
+ if (group_id < 0)
+ return NULL;
+
+ return iommu_group_get_by_id(group_id);
+}
+
+static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
+ struct vfio_group *vfio_group)
+{
+ struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
+
+ if (WARN_ON_ONCE(!grp))
+ return;
+
+ kvm_spapr_tce_release_iommu_group(kvm, grp);
+ iommu_group_put(grp);
+}
+#endif
+
/*
* Groups can use the same or different IOMMU domains. If the same then
* adding a new group may change the coherency of groups we've previously
@@ -211,6 +256,9 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
mutex_unlock(&kv->lock);
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group);
+#endif
kvm_vfio_group_set_kvm(vfio_group, NULL);
kvm_vfio_group_put_external_user(vfio_group);
@@ -218,6 +266,57 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
kvm_vfio_update_coherency(dev);
return ret;
+
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
+ struct kvm_vfio_spapr_tce param;
+ struct kvm_vfio *kv = dev->private;
+ struct vfio_group *vfio_group;
+ struct kvm_vfio_group *kvg;
+ struct fd f;
+ struct iommu_group *grp;
+
+ if (copy_from_user(&param, (void __user *)arg,
+ sizeof(struct kvm_vfio_spapr_tce)))
+ return -EFAULT;
+
+ f = fdget(param.groupfd);
+ if (!f.file)
+ return -EBADF;
+
+ vfio_group = kvm_vfio_group_get_external_user(f.file);
+ fdput(f);
+
+ if (IS_ERR(vfio_group))
+ return PTR_ERR(vfio_group);
+
+ grp = kvm_vfio_group_get_iommu_group(vfio_group);
+ if (WARN_ON_ONCE(!grp)) {
+ kvm_vfio_group_put_external_user(vfio_group);
+ return -EIO;
+ }
+
+ ret = -ENOENT;
+
+ mutex_lock(&kv->lock);
+
+ list_for_each_entry(kvg, &kv->group_list, node) {
+ if (kvg->vfio_group != vfio_group)
+ continue;
+
+ ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
+ param.tablefd, grp);
+ break;
+ }
+
+ mutex_unlock(&kv->lock);
+
+ iommu_group_put(grp);
+ kvm_vfio_group_put_external_user(vfio_group);
+
+ return ret;
+ }
+#endif /* CONFIG_SPAPR_TCE_IOMMU */
}
return -ENXIO;
@@ -242,6 +341,9 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
switch (attr->attr) {
case KVM_DEV_VFIO_GROUP_ADD:
case KVM_DEV_VFIO_GROUP_DEL:
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
+#endif
return 0;
}
@@ -257,6 +359,9 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
struct kvm_vfio_group *kvg, *tmp;
list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
+#endif
kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
kvm_vfio_group_put_external_user(kvg->vfio_group);
list_del(&kvg->node);